aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore1
-rw-r--r--CREDITS1
-rw-r--r--Documentation/ABI/stable/sysfs-bus-usb142
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio19
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-frequency-ad95238
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio-frequency-adf43502
-rw-r--r--Documentation/ABI/testing/sysfs-bus-usb89
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs26
-rw-r--r--Documentation/DocBook/80211.tmpl1
-rw-r--r--Documentation/DocBook/drm.tmpl138
-rw-r--r--Documentation/DocBook/media/v4l/controls.xml168
-rw-r--r--Documentation/DocBook/media/v4l/lirc_device_interface.xml4
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt-nv16m.xml171
-rw-r--r--Documentation/DocBook/media/v4l/pixfmt.xml7
-rw-r--r--Documentation/DocBook/media/v4l/subdev-formats.xml611
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-create-bufs.xml41
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml6
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml4
-rw-r--r--Documentation/DocBook/media_api.tmpl10
-rw-r--r--Documentation/RCU/RTFP.txt858
-rw-r--r--Documentation/RCU/rcubarrier.txt12
-rw-r--r--Documentation/RCU/torture.txt10
-rw-r--r--Documentation/acpi/enumeration.txt16
-rw-r--r--Documentation/arm/Booting42
-rw-r--r--Documentation/arm/kernel_mode_neon.txt121
-rw-r--r--Documentation/cpu-freq/cpu-drivers.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arch_timer.txt59
-rw-r--r--Documentation/devicetree/bindings/arm/atmel-adc.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt4
-rw-r--r--Documentation/devicetree/bindings/ata/ahci-platform.txt18
-rw-r--r--Documentation/devicetree/bindings/ata/sata_highbank.txt44
-rw-r--r--Documentation/devicetree/bindings/clock/imx27-clock.txt1
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-palmas.txt (renamed from Documentation/devicetree/bindings/extcon/extcon-twl.txt)6
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio.txt55
-rw-r--r--Documentation/devicetree/bindings/gpu/samsung-rotator.txt27
-rw-r--r--Documentation/devicetree/bindings/hid/hid-over-i2c.txt28
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-imx.txt5
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt12
-rw-r--r--Documentation/devicetree/bindings/iio/accel/bma180.txt24
-rw-r--r--Documentation/devicetree/bindings/iio/adc/nuvoton-nau7802.txt18
-rw-r--r--Documentation/devicetree/bindings/iio/light/apds9300.txt22
-rw-r--r--Documentation/devicetree/bindings/media/i2c/adv7343.txt48
-rw-r--r--Documentation/devicetree/bindings/media/i2c/ths8200.txt19
-rw-r--r--Documentation/devicetree/bindings/media/i2c/tvp7002.txt53
-rw-r--r--Documentation/devicetree/bindings/media/s5p-mfc.txt1
-rw-r--r--Documentation/devicetree/bindings/media/video-interfaces.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/atmel-ssc.txt23
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ksz9021.txt49
-rw-r--r--Documentation/devicetree/bindings/net/moxa,moxart-mac.txt21
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt5
-rw-r--r--Documentation/devicetree/bindings/pci/designware-pcie.txt3
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt41
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt96
-rw-r--r--Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt3
-rw-r--r--Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt8
-rw-r--r--Documentation/devicetree/bindings/pwm/imx-pwm.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/mxs-pwm.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt5
-rw-r--r--Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-samsung.txt10
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiecap.txt8
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt8
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm.txt7
-rw-r--r--Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt28
-rw-r--r--Documentation/devicetree/bindings/pwm/spear-pwm.txt5
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt4
-rw-r--r--Documentation/devicetree/bindings/pwm/vt8500-pwm.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/88pm800.txt38
-rw-r--r--Documentation/devicetree/bindings/regulator/max8660.txt47
-rw-r--r--Documentation/devicetree/bindings/regulator/palmas-pmic.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/pfuze100.txt115
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/serial/arc-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/arc-uart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/atmel-usart.txt (renamed from Documentation/devicetree/bindings/tty/serial/atmel-usart.txt)18
-rw-r--r--Documentation/devicetree/bindings/serial/efm32-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/efm32-uart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-imx-uart.txt22
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-lpuart.txt (renamed from Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt (renamed from Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt)4
-rw-r--r--Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt65
-rw-r--r--Documentation/devicetree/bindings/serial/nxp-lpc32xx-hsuart.txt (renamed from Documentation/devicetree/bindings/tty/serial/nxp-lpc32xx-hsuart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/of-serial.txt (renamed from Documentation/devicetree/bindings/tty/serial/of-serial.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uart.txt25
-rw-r--r--Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt53
-rw-r--r--Documentation/devicetree/bindings/serial/sirf-uart.txt33
-rw-r--r--Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt)0
-rw-r--r--Documentation/devicetree/bindings/serial/st-asc.txt18
-rw-r--r--Documentation/devicetree/bindings/serial/via,vt8500-uart.txt (renamed from Documentation/devicetree/bindings/tty/serial/via,vt8500-uart.txt)0
-rw-r--r--Documentation/devicetree/bindings/sound/ak4554.c11
-rw-r--r--Documentation/devicetree/bindings/sound/alc5632.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt35
-rw-r--r--Documentation/devicetree/bindings/sound/atmel-wm8904.txt55
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,spdif.txt54
-rw-r--r--Documentation/devicetree/bindings/sound/fsl,ssi.txt (renamed from Documentation/devicetree/bindings/powerpc/fsl/ssi.txt)12
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audio-spdif.txt34
-rw-r--r--Documentation/devicetree/bindings/sound/imx-audmux.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/mvebu-audio.txt29
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt26
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt27
-rw-r--r--Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/pcm1792a.txt18
-rw-r--r--Documentation/devicetree/bindings/sound/rt5640.txt20
-rw-r--r--Documentation/devicetree/bindings/sound/samsung-i2s.txt22
-rw-r--r--Documentation/devicetree/bindings/sound/soc-ac97link.txt28
-rw-r--r--Documentation/devicetree/bindings/sound/ti,pcm1681.txt15
-rw-r--r--Documentation/devicetree/bindings/sound/tlv320aic3x.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/wm8731.txt9
-rw-r--r--Documentation/devicetree/bindings/sound/wm8753.txt24
-rw-r--r--Documentation/devicetree/bindings/sound/wm8903.txt19
-rw-r--r--Documentation/devicetree/bindings/sound/wm8994.txt4
-rw-r--r--Documentation/devicetree/bindings/spi/efm32-spi.txt34
-rw-r--r--Documentation/devicetree/bindings/spi/spi-bus.txt10
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt42
-rw-r--r--Documentation/devicetree/bindings/spi/ti_qspi.txt22
-rw-r--r--Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt17
-rw-r--r--Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt22
-rw-r--r--Documentation/devicetree/bindings/tty/serial/msm_serial.txt27
-rw-r--r--Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt34
-rw-r--r--Documentation/devicetree/bindings/usb/am33xx-usb.txt222
-rw-r--r--Documentation/devicetree/bindings/usb/dwc3.txt8
-rw-r--r--Documentation/devicetree/bindings/usb/generic.txt24
-rw-r--r--Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt17
-rw-r--r--Documentation/devicetree/bindings/usb/omap-usb.txt5
-rw-r--r--Documentation/devicetree/bindings/usb/samsung-hsotg.txt40
-rw-r--r--Documentation/devicetree/bindings/usb/usb-xhci.txt14
-rw-r--r--Documentation/devicetree/bindings/usb/usb3503.txt7
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/devicetree/bindings/video/simple-framebuffer.txt1
-rw-r--r--Documentation/driver-model/devres.txt2
-rw-r--r--Documentation/filesystems/ext3.txt7
-rw-r--r--Documentation/filesystems/ext4.txt7
-rw-r--r--Documentation/filesystems/f2fs.txt75
-rw-r--r--Documentation/hid/uhid.txt4
-rw-r--r--Documentation/hwmon/ads10158
-rw-r--r--Documentation/hwmon/htu2146
-rw-r--r--Documentation/hwmon/k10temp1
-rw-r--r--Documentation/i2c/busses/i2c-piix47
-rw-r--r--Documentation/i2c/instantiating-devices2
-rw-r--r--Documentation/input/gamepad.txt156
-rw-r--r--Documentation/ja_JP/HOWTO44
-rw-r--r--Documentation/kernel-parameters.txt67
-rw-r--r--Documentation/ko_KR/HOWTO25
-rw-r--r--Documentation/ko_KR/stable_api_nonsense.txt6
-rw-r--r--Documentation/laptops/asus-laptop.txt8
-rw-r--r--Documentation/laptops/sony-laptop.txt8
-rw-r--r--Documentation/laptops/thinkpad-acpi.txt73
-rw-r--r--Documentation/memory-barriers.txt10
-rw-r--r--Documentation/memory-hotplug.txt16
-rw-r--r--Documentation/networking/00-INDEX2
-rw-r--r--Documentation/networking/e100.txt4
-rw-r--r--Documentation/networking/e1000.txt12
-rw-r--r--Documentation/networking/e1000e.txt16
-rw-r--r--Documentation/networking/igb.txt67
-rw-r--r--Documentation/networking/igbvf.txt8
-rw-r--r--Documentation/networking/ip-sysctl.txt55
-rw-r--r--Documentation/networking/ixgb.txt14
-rw-r--r--Documentation/networking/ixgbe.txt109
-rw-r--r--Documentation/networking/ixgbevf.txt6
-rw-r--r--Documentation/networking/netdev-FAQ.txt224
-rw-r--r--Documentation/networking/openvswitch.txt40
-rw-r--r--Documentation/networking/packet_mmap.txt8
-rw-r--r--Documentation/networking/sctp.txt5
-rw-r--r--Documentation/networking/stmmac.txt3
-rw-r--r--Documentation/networking/tproxy.txt5
-rw-r--r--Documentation/pinctrl.txt93
-rw-r--r--Documentation/printk-formats.txt9
-rw-r--r--Documentation/scsi/LICENSE.qla4xxx2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt1
-rw-r--r--Documentation/sound/alsa/HD-Audio.txt2
-rw-r--r--Documentation/sysctl/net.txt17
-rw-r--r--Documentation/timers/NO_HZ.txt44
-rw-r--r--Documentation/tpm/xen-tpmfront.txt113
-rw-r--r--Documentation/usb/URB.txt21
-rw-r--r--Documentation/usb/proc_usb_info.txt9
-rw-r--r--Documentation/video4linux/v4l2-controls.txt21
-rw-r--r--Documentation/virtual/kvm/cpuid.txt4
-rw-r--r--Documentation/virtual/kvm/hypercalls.txt14
-rw-r--r--Documentation/workqueue.txt90
-rw-r--r--Documentation/x86/x86_64/boot-options.txt5
-rw-r--r--MAINTAINERS161
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig6
-rw-r--r--arch/alpha/oprofile/common.c22
-rw-r--r--arch/arc/include/asm/entry.h1
-rw-r--r--arch/arc/lib/strchr-700.S10
-rw-r--r--arch/arm/Kconfig72
-rw-r--r--arch/arm/Kconfig.debug588
-rw-r--r--arch/arm/Makefile18
-rw-r--r--arch/arm/boot/dts/am335x-bone.dts29
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts29
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts19
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi143
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9x5ek.dtsi5
-rw-r--r--arch/arm/boot/dts/atlas6.dtsi28
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi9
-rw-r--r--arch/arm/boot/dts/exynos5440.dtsi2
-rw-r--r--arch/arm/boot/dts/imx28-apx4devkit.dts2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts3
-rw-r--r--arch/arm/boot/dts/imx28-m28evk.dts2
-rw-r--r--arch/arm/boot/dts/imx28.dtsi1
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts13
-rw-r--r--arch/arm/boot/dts/imx53-mba53.dts2
-rw-r--r--arch/arm/boot/dts/imx53.dtsi32
-rw-r--r--arch/arm/boot/dts/msm8660-surf.dts2
-rw-r--r--arch/arm/boot/dts/msm8960-cdp.dts6
-rw-r--r--arch/arm/boot/dts/omap5-uevm.dts78
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/prima2.dtsi16
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi8
-rw-r--r--arch/arm/boot/dts/stih416-pinctrl.dtsi10
-rw-r--r--arch/arm/boot/dts/stih416.dtsi2
-rw-r--r--arch/arm/boot/dts/stih41x.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra20-colibri-512.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts3
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts3
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts6
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi28
-rw-r--r--arch/arm/boot/dts/twl4030.dtsi6
-rw-r--r--arch/arm/boot/dts/vf610.dtsi8
-rw-r--r--arch/arm/boot/dts/wm8850-w70v2.dts3
-rw-r--r--arch/arm/common/edma.c1
-rw-r--r--arch/arm/common/mcpm_head.S2
-rw-r--r--arch/arm/common/vlock.S4
-rw-r--r--arch/arm/configs/bockw_defconfig7
-rw-r--r--arch/arm/configs/da8xx_omapl_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/keystone_defconfig1
-rw-r--r--arch/arm/configs/marzen_defconfig7
-rw-r--r--arch/arm/configs/multi_v7_defconfig6
-rw-r--r--arch/arm/configs/nhk8815_defconfig7
-rw-r--r--arch/arm/configs/omap2plus_defconfig13
-rw-r--r--arch/arm/configs/tegra_defconfig1
-rw-r--r--arch/arm/include/asm/a.out-core.h45
-rw-r--r--arch/arm/include/asm/arch_timer.h14
-rw-r--r--arch/arm/include/asm/assembler.h4
-rw-r--r--arch/arm/include/asm/barrier.h32
-rw-r--r--arch/arm/include/asm/cacheflush.h5
-rw-r--r--arch/arm/include/asm/cputype.h7
-rw-r--r--arch/arm/include/asm/dma-contiguous.h2
-rw-r--r--arch/arm/include/asm/elf.h6
-rw-r--r--arch/arm/include/asm/hardware/debug-8250.S29
-rw-r--r--arch/arm/include/asm/kvm_mmu.h2
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/memblock.h3
-rw-r--r--arch/arm/include/asm/mmu.h3
-rw-r--r--arch/arm/include/asm/mmu_context.h20
-rw-r--r--arch/arm/include/asm/module.h2
-rw-r--r--arch/arm/include/asm/neon.h36
-rw-r--r--arch/arm/include/asm/page.h2
-rw-r--r--arch/arm/include/asm/pgtable.h2
-rw-r--r--arch/arm/include/asm/processor.h4
-rw-r--r--arch/arm/include/asm/prom.h4
-rw-r--r--arch/arm/include/asm/smp_plat.h3
-rw-r--r--arch/arm/include/asm/spinlock.h53
-rw-r--r--arch/arm/include/asm/switch_to.h10
-rw-r--r--arch/arm/include/asm/thread_info.h12
-rw-r--r--arch/arm/include/asm/tlb.h7
-rw-r--r--arch/arm/include/asm/tlbflush.h197
-rw-r--r--arch/arm/include/asm/types.h40
-rw-r--r--arch/arm/include/asm/v7m.h12
-rw-r--r--arch/arm/include/asm/virt.h12
-rw-r--r--arch/arm/include/asm/xor.h73
-rw-r--r--arch/arm/include/debug/8250.S54
-rw-r--r--arch/arm/include/debug/8250_32.S27
-rw-r--r--arch/arm/include/debug/bcm2835.S22
-rw-r--r--arch/arm/include/debug/cns3xxx.S19
-rw-r--r--arch/arm/include/debug/highbank.S17
-rw-r--r--arch/arm/include/debug/keystone.S43
-rw-r--r--arch/arm/include/debug/mvebu.S30
-rw-r--r--arch/arm/include/debug/mxs.S27
-rw-r--r--arch/arm/include/debug/nomadik.S20
-rw-r--r--arch/arm/include/debug/nspire.S28
-rw-r--r--arch/arm/include/debug/picoxcell.S19
-rw-r--r--arch/arm/include/debug/pl01x.S (renamed from arch/arm/include/asm/hardware/debug-pl01x.S)9
-rw-r--r--arch/arm/include/debug/pxa.S33
-rw-r--r--arch/arm/include/debug/rockchip.S42
-rw-r--r--arch/arm/include/debug/socfpga.S21
-rw-r--r--arch/arm/include/debug/sunxi.S27
-rw-r--r--arch/arm/include/debug/tegra.S29
-rw-r--r--arch/arm/include/debug/u300.S18
-rw-r--r--arch/arm/include/debug/ux500.S2
-rw-r--r--arch/arm/include/debug/vexpress.S48
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/a.out.h34
-rw-r--r--arch/arm/kernel/Makefile2
-rw-r--r--arch/arm/kernel/atags.h5
-rw-r--r--arch/arm/kernel/atags_parse.c6
-rw-r--r--arch/arm/kernel/bios32.c5
-rw-r--r--arch/arm/kernel/devtree.c11
-rw-r--r--arch/arm/kernel/entry-armv.S106
-rw-r--r--arch/arm/kernel/entry-common.S4
-rw-r--r--arch/arm/kernel/entry-v7m.S2
-rw-r--r--arch/arm/kernel/fiq.c24
-rw-r--r--arch/arm/kernel/head-nommu.S1
-rw-r--r--arch/arm/kernel/head.S1
-rw-r--r--arch/arm/kernel/hyp-stub.S4
-rw-r--r--arch/arm/kernel/machine_kexec.c21
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/perf_event_cpu.c3
-rw-r--r--arch/arm/kernel/process.c49
-rw-r--r--arch/arm/kernel/setup.c24
-rw-r--r--arch/arm/kernel/signal.c56
-rw-r--r--arch/arm/kernel/smp.c23
-rw-r--r--arch/arm/kernel/smp_tlb.c27
-rw-r--r--arch/arm/kernel/topology.c61
-rw-r--r--arch/arm/kernel/traps.c112
-rw-r--r--arch/arm/kernel/v7m.c19
-rw-r--r--arch/arm/kernel/vmlinux.lds.S17
-rw-r--r--arch/arm/kvm/arm.c4
-rw-r--r--arch/arm/kvm/coproc.c26
-rw-r--r--arch/arm/kvm/coproc.h3
-rw-r--r--arch/arm/kvm/coproc_a15.c6
-rw-r--r--arch/arm/kvm/init.S2
-rw-r--r--arch/arm/kvm/interrupts.S12
-rw-r--r--arch/arm/kvm/mmio.c3
-rw-r--r--arch/arm/kvm/mmu.c37
-rw-r--r--arch/arm/kvm/reset.c2
-rw-r--r--arch/arm/kvm/trace.h7
-rw-r--r--arch/arm/lib/Makefile6
-rw-r--r--arch/arm/lib/xor-neon.c42
-rw-r--r--arch/arm/mach-at91/at91sam9x5.c2
-rw-r--r--arch/arm/mach-at91/include/mach/at91_adc.h16
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c6
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/cpuidle.c2
-rw-r--r--arch/arm/mach-davinci/dm355.c2
-rw-r--r--arch/arm/mach-davinci/dm365.c2
-rw-r--r--arch/arm/mach-davinci/include/mach/debug-macro.S65
-rw-r--r--arch/arm/mach-dove/common.c4
-rw-r--r--arch/arm/mach-dove/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-ebsa110/include/mach/debug-macro.S22
-rw-r--r--arch/arm/mach-ep93xx/Kconfig14
-rw-r--r--arch/arm/mach-ep93xx/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-ep93xx/include/mach/uncompress.h14
-rw-r--r--arch/arm/mach-exynos/Kconfig1
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/common.c26
-rw-r--r--arch/arm/mach-exynos/common.h1
-rw-r--r--arch/arm/mach-exynos/cpuidle.c1
-rw-r--r--arch/arm/mach-exynos/include/mach/memory.h5
-rw-r--r--arch/arm/mach-exynos/pm.c6
-rw-r--r--arch/arm/mach-footbridge/dc21285.c2
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S15
-rw-r--r--arch/arm/mach-gemini/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-highbank/highbank.c7
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c5
-rw-r--r--arch/arm/mach-imx/clk-vf610.c2
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c3
-rw-r--r--arch/arm/mach-imx/mx27.h2
-rw-r--r--arch/arm/mach-integrator/include/mach/debug-macro.S20
-rw-r--r--arch/arm/mach-iop13xx/include/mach/debug-macro.S24
-rw-r--r--arch/arm/mach-iop32x/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-iop33x/include/mach/debug-macro.S22
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/debug-macro.S26
-rw-r--r--arch/arm/mach-keystone/keystone.c2
-rw-r--r--arch/arm/mach-kirkwood/common.c24
-rw-r--r--arch/arm/mach-kirkwood/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/debug-macro.S29
-rw-r--r--arch/arm/mach-msm/Kconfig3
-rw-r--r--arch/arm/mach-msm/devices-msm7x00.c6
-rw-r--r--arch/arm/mach-msm/devices-msm7x30.c2
-rw-r--r--arch/arm/mach-msm/devices-qsd8x50.c6
-rw-r--r--arch/arm/mach-msm/gpiomux-v1.c33
-rw-r--r--arch/arm/mach-msm/gpiomux.h10
-rw-r--r--arch/arm/mach-mv78xx0/include/mach/debug-macro.S19
-rw-r--r--arch/arm/mach-mvebu/platsmp.c51
-rw-r--r--arch/arm/mach-omap2/Kconfig2
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c57
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c83
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c113
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c100
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c96
-rw-r--r--arch/arm/mach-omap2/board-generic.c23
-rw-r--r--arch/arm/mach-omap2/board-h4.c48
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c36
-rw-r--r--arch/arm/mach-omap2/board-ldp.c68
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c4
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c60
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c91
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c50
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c61
-rw-r--r--arch/arm/mach-omap2/board-overo.c160
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c12
-rw-r--r--arch/arm/mach-omap2/board-rx51-video.c35
-rw-r--r--arch/arm/mach-omap2/board-rx51.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c30
-rw-r--r--arch/arm/mach-omap2/display.c4
-rw-r--r--arch/arm/mach-omap2/dss-common.c244
-rw-r--r--arch/arm/mach-omap2/dss-common.h2
-rw-r--r--arch/arm/mach-omap2/i2c.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c18
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h50
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c3
-rw-r--r--arch/arm/mach-omap2/serial.c11
-rw-r--r--arch/arm/mach-omap2/usb-host.c10
-rw-r--r--arch/arm/mach-omap2/usb-musb.c5
-rw-r--r--arch/arm/mach-orion5x/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-prima2/common.c2
-rw-r--r--arch/arm/mach-pxa/em-x270.c17
-rw-r--r--arch/arm/mach-pxa/icontrol.c3
-rw-r--r--arch/arm/mach-pxa/mainstone.c3
-rw-r--r--arch/arm/mach-pxa/pcm990-baseboard.c3
-rw-r--r--arch/arm/mach-pxa/poodle.c4
-rw-r--r--arch/arm/mach-pxa/spitz.c4
-rw-r--r--arch/arm/mach-pxa/stargate2.c3
-rw-r--r--arch/arm/mach-pxa/zeus.c46
-rw-r--r--arch/arm/mach-realview/include/mach/debug-macro.S29
-rw-r--r--arch/arm/mach-rpc/include/mach/debug-macro.S23
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2410.c161
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2440.c3
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c4
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c50
-rw-r--r--arch/arm/mach-shmobile/board-lager.c2
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c44
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7778.c5
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7779.c10
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7778.h3
-rw-r--r--arch/arm/mach-shmobile/include/mach/r8a7779.h3
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c34
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c37
-rw-r--r--arch/arm/mach-spear/include/mach/debug-macro.S36
-rw-r--r--arch/arm/mach-spear/include/mach/spear.h2
-rw-r--r--arch/arm/mach-sti/Kconfig3
-rw-r--r--arch/arm/mach-sti/headsmp.S2
-rw-r--r--arch/arm/mach-tegra/tegra.c38
-rw-r--r--arch/arm/mach-ux500/Makefile1
-rw-r--r--arch/arm/mach-versatile/include/mach/debug-macro.S21
-rw-r--r--arch/arm/mach-zynq/common.c2
-rw-r--r--arch/arm/mm/Kconfig37
-rw-r--r--arch/arm/mm/cache-l2x0.c12
-rw-r--r--arch/arm/mm/cache-v7.S4
-rw-r--r--arch/arm/mm/context.c6
-rw-r--r--arch/arm/mm/dma-mapping.c7
-rw-r--r--arch/arm/mm/hugetlbpage.c43
-rw-r--r--arch/arm/mm/init.c5
-rw-r--r--arch/arm/mm/mmu.c61
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-feroceon.S26
-rw-r--r--arch/arm/mm/proc-v7-2level.S2
-rw-r--r--arch/arm/mm/proc-v7-3level.S2
-rw-r--r--arch/arm/mm/proc-v7.S27
-rw-r--r--arch/arm/mm/tlb-v7.S8
-rw-r--r--arch/arm/plat-pxa/ssp.c171
-rw-r--r--arch/arm/plat-samsung/Kconfig7
-rw-r--r--arch/arm/plat-samsung/Makefile2
-rw-r--r--arch/arm/plat-samsung/include/plat/clock.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h8
-rw-r--r--arch/arm/plat-samsung/init.c5
-rw-r--r--arch/arm/plat-samsung/pm.c14
-rw-r--r--arch/arm/plat-samsung/s3c-dma-ops.c13
-rw-r--r--arch/arm/vfp/vfphw.S5
-rw-r--r--arch/arm/vfp/vfpmodule.c69
-rw-r--r--arch/arm/xen/enlighten.c3
-rw-r--r--arch/arm64/include/asm/arch_timer.h23
-rw-r--r--arch/arm64/include/asm/kvm_asm.h17
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
-rw-r--r--arch/arm64/include/asm/thread_info.h4
-rw-r--r--arch/arm64/include/asm/tlb.h7
-rw-r--r--arch/arm64/include/asm/virt.h13
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/perf_event.c10
-rw-r--r--arch/arm64/kernel/process.c2
-rw-r--r--arch/arm64/kvm/hyp.S13
-rw-r--r--arch/arm64/kvm/sys_regs.c3
-rw-r--r--arch/avr32/boards/atngw100/mrmt.c1
-rw-r--r--arch/avr32/oprofile/op_model_avr32.c17
-rw-r--r--arch/frv/mb93090-mb00/pci-vdk.c2
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/ia64/Kconfig7
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/ia64/include/asm/Kbuild1
-rw-r--r--arch/ia64/include/asm/spinlock.h5
-rw-r--r--arch/ia64/include/asm/tlb.h9
-rw-r--r--arch/ia64/kvm/kvm-ia64.c4
-rw-r--r--arch/m68k/amiga/platform.c2
-rw-r--r--arch/m68k/emu/natfeat.c27
-rw-r--r--arch/m68k/emu/nfblock.c4
-rw-r--r--arch/m68k/emu/nfcon.c8
-rw-r--r--arch/m68k/emu/nfeth.c7
-rw-r--r--arch/m68k/include/asm/div64.h9
-rw-r--r--arch/m68k/include/asm/irqflags.h6
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/m68k/platform/coldfire/pci.c1
-rw-r--r--arch/m68k/q40/config.c2
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/prom.h3
-rw-r--r--arch/mips/Kconfig7
-rw-r--r--arch/mips/bcm47xx/Kconfig1
-rw-r--r--arch/mips/include/asm/cpu-features.h2
-rw-r--r--arch/mips/include/asm/mach-generic/spaces.h4
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h7
-rw-r--r--arch/mips/kernel/bmips_vec.S6
-rw-r--r--arch/mips/kernel/smp-bmips.c22
-rw-r--r--arch/mips/kernel/vpe.c17
-rw-r--r--arch/mips/kvm/kvm_locore.S969
-rw-r--r--arch/mips/kvm/kvm_mips.c4
-rw-r--r--arch/mips/math-emu/cp1emu.c26
-rw-r--r--arch/mips/oprofile/common.c20
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c2
-rw-r--r--arch/mips/pci/pci.c1
-rw-r--r--arch/mips/pnx833x/common/platform.c2
-rw-r--r--arch/mips/powertv/asic/asic_devices.c3
-rw-r--r--arch/mips/sni/a20r.c1
-rw-r--r--arch/openrisc/Kconfig1
-rw-r--r--arch/openrisc/include/asm/prom.h3
-rw-r--r--arch/parisc/configs/c8000_defconfig279
-rw-r--r--arch/parisc/include/asm/parisc-device.h3
-rw-r--r--arch/parisc/kernel/cache.c135
-rw-r--r--arch/parisc/kernel/inventory.c1
-rw-r--r--arch/parisc/kernel/signal.c7
-rw-r--r--arch/parisc/kernel/signal32.c1
-rw-r--r--arch/parisc/kernel/sys32.h36
-rw-r--r--arch/parisc/kernel/sys_parisc32.c2
-rw-r--r--arch/powerpc/Kconfig9
-rw-r--r--arch/powerpc/configs/ppc64_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64e_defconfig2
-rw-r--r--arch/powerpc/configs/pseries_defconfig2
-rw-r--r--arch/powerpc/include/asm/Kbuild1
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h38
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h4
-rw-r--r--arch/powerpc/include/asm/kvm_host.h14
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/page.h10
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h10
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/include/asm/prom.h3
-rw-r--r--arch/powerpc/include/asm/reg.h31
-rw-r--r--arch/powerpc/include/asm/smp.h4
-rw-r--r--arch/powerpc/include/asm/switch_to.h9
-rw-r--r--arch/powerpc/include/uapi/asm/Kbuild1
-rw-r--r--arch/powerpc/include/uapi/asm/perf_event.h18
-rw-r--r--arch/powerpc/kernel/asm-offsets.c4
-rw-r--r--arch/powerpc/kernel/eeh.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S36
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c22
-rw-r--r--arch/powerpc/kernel/pci-common.c8
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/prom.c43
-rw-r--r--arch/powerpc/kernel/setup_64.c4
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/tm.S20
-rw-r--r--arch/powerpc/kernel/traps.c58
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c42
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv.c40
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c246
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.c240
-rw-r--r--arch/powerpc/kvm/book3s_hv_cma.h27
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c139
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S2
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S14
-rw-r--r--arch/powerpc/kvm/book3s_pr.c40
-rw-r--r--arch/powerpc/kvm/book3s_xics.c1
-rw-r--r--arch/powerpc/kvm/booke.c6
-rw-r--r--arch/powerpc/kvm/powerpc.c26
-rw-r--r--arch/powerpc/mm/numa.c59
-rw-r--r--arch/powerpc/oprofile/common.c28
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power7-events-list.h548
-rw-r--r--arch/powerpc/perf/power7-pmu.c148
-rw-r--r--arch/powerpc/perf/power8-pmu.c6
-rw-r--r--arch/powerpc/platforms/44x/warp.c1
-rw-r--r--arch/powerpc/platforms/ps3/time.c2
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c162
-rw-r--r--arch/powerpc/sysdev/rtc_cmos_setup.c2
-rw-r--r--arch/s390/Kconfig20
-rw-r--r--arch/s390/boot/compressed/Makefile9
-rw-r--r--arch/s390/boot/compressed/misc.c4
-rw-r--r--arch/s390/hypfs/hypfs.h13
-rw-r--r--arch/s390/hypfs/hypfs_dbfs.c2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c50
-rw-r--r--arch/s390/hypfs/hypfs_vm.c65
-rw-r--r--arch/s390/hypfs/inode.c36
-rw-r--r--arch/s390/include/asm/airq.h67
-rw-r--r--arch/s390/include/asm/bitops.h14
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/cputime.h3
-rw-r--r--arch/s390/include/asm/hardirq.h5
-rw-r--r--arch/s390/include/asm/hugetlb.h135
-rw-r--r--arch/s390/include/asm/hw_irq.h17
-rw-r--r--arch/s390/include/asm/irq.h35
-rw-r--r--arch/s390/include/asm/kvm_host.h8
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h22
-rw-r--r--arch/s390/include/asm/page.h19
-rw-r--r--arch/s390/include/asm/pci.h54
-rw-r--r--arch/s390/include/asm/pci_insn.h12
-rw-r--r--arch/s390/include/asm/pci_io.h10
-rw-r--r--arch/s390/include/asm/pgtable.h648
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/serial.h6
-rw-r--r--arch/s390/include/asm/switch_to.h9
-rw-r--r--arch/s390/include/asm/tlb.h11
-rw-r--r--arch/s390/include/asm/tlbflush.h6
-rw-r--r--arch/s390/include/asm/vtime.h7
-rw-r--r--arch/s390/kernel/entry.S16
-rw-r--r--arch/s390/kernel/entry64.S11
-rw-r--r--arch/s390/kernel/irq.c160
-rw-r--r--arch/s390/kernel/kprobes.c21
-rw-r--r--arch/s390/kernel/nmi.c5
-rw-r--r--arch/s390/kernel/perf_event.c9
-rw-r--r--arch/s390/kernel/process.c1
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/suspend.c11
-rw-r--r--arch/s390/kernel/swsusp_asm64.S7
-rw-r--r--arch/s390/kernel/time.c1
-rw-r--r--arch/s390/kernel/vdso.c6
-rw-r--r--arch/s390/kernel/vtime.c1
-rw-r--r--arch/s390/kvm/diag.c17
-rw-r--r--arch/s390/kvm/gaccess.h12
-rw-r--r--arch/s390/kvm/kvm-s390.c48
-rw-r--r--arch/s390/kvm/kvm-s390.h10
-rw-r--r--arch/s390/kvm/priv.c36
-rw-r--r--arch/s390/lib/delay.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c16
-rw-r--r--arch/s390/mm/dump_pagetables.c18
-rw-r--r--arch/s390/mm/gup.c6
-rw-r--r--arch/s390/mm/hugetlbpage.c124
-rw-r--r--arch/s390/mm/init.c1
-rw-r--r--arch/s390/mm/pageattr.c2
-rw-r--r--arch/s390/mm/pgtable.c266
-rw-r--r--arch/s390/mm/vmem.c15
-rw-r--r--arch/s390/oprofile/init.c37
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c575
-rw-r--r--arch/s390/pci/pci_clp.c146
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/s390/pci/pci_event.c2
-rw-r--r--arch/s390/pci/pci_insn.c18
-rw-r--r--arch/s390/pci/pci_msi.c142
-rw-r--r--arch/s390/pci/pci_sysfs.c27
-rw-r--r--arch/score/Kconfig2
-rw-r--r--arch/sh/Kconfig6
-rw-r--r--arch/sh/boards/board-espt.c1
-rw-r--r--arch/sh/boards/board-sh7757lcr.c4
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c1
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c3
-rw-r--r--arch/sh/boards/mach-sh7763rdp/setup.c1
-rw-r--r--arch/sh/configs/sh03_defconfig2
-rw-r--r--arch/sh/drivers/pci/pci.c1
-rw-r--r--arch/sh/include/asm/tlb.h6
-rw-r--r--arch/sh/kernel/cpu/sh2/setup-sh7619.c11
-rw-r--r--arch/sh/kernel/cpu/shmobile/cpuidle.c4
-rw-r--r--arch/sparc/include/asm/switch_to_64.h4
-rw-r--r--arch/sparc/kernel/cpumap.c1
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/kernel/kgdb_64.c4
-rw-r--r--arch/sparc/kernel/ktlb.S3
-rw-r--r--arch/sparc/kernel/ptrace_64.c4
-rw-r--r--arch/sparc/kernel/setup_64.c12
-rw-r--r--arch/sparc/kernel/syscalls.S12
-rw-r--r--arch/sparc/kernel/trampoline_64.S2
-rw-r--r--arch/sparc/lib/ksyms.c9
-rw-r--r--arch/tile/gxio/iorpc_mpipe.c66
-rw-r--r--arch/tile/gxio/iorpc_mpipe_info.c18
-rw-r--r--arch/tile/gxio/mpipe.c43
-rw-r--r--arch/tile/include/asm/topology.h3
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe.h14
-rw-r--r--arch/tile/include/gxio/iorpc_mpipe_info.h4
-rw-r--r--arch/tile/include/gxio/mpipe.h143
-rw-r--r--arch/tile/include/hv/drv_mpipe_intf.h3
-rw-r--r--arch/tile/kernel/pci_gx.c9
-rw-r--r--arch/um/include/asm/tlb.h6
-rw-r--r--arch/x86/Kconfig62
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/boot.h1
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/boot/compressed/head_32.S31
-rw-r--r--arch/x86/boot/compressed/head_64.S1
-rw-r--r--arch/x86/boot/compressed/misc.c77
-rw-r--r--arch/x86/boot/printf.c2
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/acpi.h2
-rw-r--r--arch/x86/include/asm/alternative.h14
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/asm.h6
-rw-r--r--arch/x86/include/asm/bitops.h46
-rw-r--r--arch/x86/include/asm/bootparam_utils.h4
-rw-r--r--arch/x86/include/asm/checksum_32.h22
-rw-r--r--arch/x86/include/asm/checksum_64.h2
-rw-r--r--arch/x86/include/asm/cpufeature.h17
-rw-r--r--arch/x86/include/asm/e820.h2
-rw-r--r--arch/x86/include/asm/hw_irq.h120
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/kprobes.h10
-rw-r--r--arch/x86/include/asm/kvm_host.h14
-rw-r--r--arch/x86/include/asm/kvm_para.h38
-rw-r--r--arch/x86/include/asm/mce.h16
-rw-r--r--arch/x86/include/asm/microcode_amd.h2
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/include/asm/mutex_64.h30
-rw-r--r--arch/x86/include/asm/page_32_types.h2
-rw-r--r--arch/x86/include/asm/page_64_types.h5
-rw-r--r--arch/x86/include/asm/page_types.h5
-rw-r--r--arch/x86/include/asm/paravirt.h32
-rw-r--r--arch/x86/include/asm/paravirt_types.h17
-rw-r--r--arch/x86/include/asm/pgtable-2level.h48
-rw-r--r--arch/x86/include/asm/pgtable-3level.h3
-rw-r--r--arch/x86/include/asm/pgtable.h33
-rw-r--r--arch/x86/include/asm/pgtable_types.h17
-rw-r--r--arch/x86/include/asm/processor.h34
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/setup.h8
-rw-r--r--arch/x86/include/asm/special_insns.h2
-rw-r--r--arch/x86/include/asm/spinlock.h137
-rw-r--r--arch/x86/include/asm/spinlock_types.h16
-rw-r--r--arch/x86/include/asm/switch_to.h4
-rw-r--r--arch/x86/include/asm/sync_bitops.h24
-rw-r--r--arch/x86/include/asm/syscall.h3
-rw-r--r--arch/x86/include/asm/syscalls.h6
-rw-r--r--arch/x86/include/asm/sysfb.h98
-rw-r--r--arch/x86/include/asm/topology.h3
-rw-r--r--arch/x86/include/asm/traps.h6
-rw-r--r--arch/x86/include/asm/tsc.h1
-rw-r--r--arch/x86/include/asm/uaccess.h7
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/include/asm/vvar.h2
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/hypervisor.h16
-rw-r--r--arch/x86/include/uapi/asm/kvm_para.h1
-rw-r--r--arch/x86/include/uapi/asm/vmx.h6
-rw-r--r--arch/x86/kernel/Makefile3
-rw-r--r--arch/x86/kernel/acpi/boot.c25
-rw-r--r--arch/x86/kernel/alternative.c155
-rw-r--r--arch/x86/kernel/amd_nb.c13
-rw-r--r--arch/x86/kernel/apic/apic.c12
-rw-r--r--arch/x86/kernel/apic/io_apic.c14
-rw-r--r--arch/x86/kernel/apm_32.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c24
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c15
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-internal.h3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c4
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c28
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c42
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c181
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c32
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c258
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h10
-rw-r--r--arch/x86/kernel/cpu/vmware.c8
-rw-r--r--arch/x86/kernel/crash.c4
-rw-r--r--arch/x86/kernel/e820.c5
-rw-r--r--arch/x86/kernel/early-quirks.c14
-rw-r--r--arch/x86/kernel/head32.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/head_32.S2
-rw-r--r--arch/x86/kernel/i387.c2
-rw-r--r--arch/x86/kernel/irq.c8
-rw-r--r--arch/x86/kernel/irq_work.c4
-rw-r--r--arch/x86/kernel/jump_label.c16
-rw-r--r--arch/x86/kernel/kprobes/common.h5
-rw-r--r--arch/x86/kernel/kprobes/core.c4
-rw-r--r--arch/x86/kernel/kprobes/opt.c115
-rw-r--r--arch/x86/kernel/kvm.c268
-rw-r--r--arch/x86/kernel/microcode_amd.c36
-rw-r--r--arch/x86/kernel/microcode_amd_early.c27
-rw-r--r--arch/x86/kernel/paravirt-spinlocks.c18
-rw-r--r--arch/x86/kernel/paravirt.c9
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/process_32.c2
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kernel/setup.c27
-rw-r--r--arch/x86/kernel/signal.c12
-rw-r--r--arch/x86/kernel/smp.c12
-rw-r--r--arch/x86/kernel/sys_x86_64.c2
-rw-r--r--arch/x86/kernel/syscall_32.c2
-rw-r--r--arch/x86/kernel/syscall_64.c5
-rw-r--r--arch/x86/kernel/sysfb.c74
-rw-r--r--arch/x86/kernel/sysfb_efi.c214
-rw-r--r--arch/x86/kernel/sysfb_simplefb.c95
-rw-r--r--arch/x86/kernel/tboot.c10
-rw-r--r--arch/x86/kernel/traps.c4
-rw-r--r--arch/x86/kernel/tsc.c6
-rw-r--r--arch/x86/kvm/cpuid.c3
-rw-r--r--arch/x86/kvm/lapic.c38
-rw-r--r--arch/x86/kvm/mmu.c181
-rw-r--r--arch/x86/kvm/mmu.h2
-rw-r--r--arch/x86/kvm/paging_tmpl.h178
-rw-r--r--arch/x86/kvm/pmu.c25
-rw-r--r--arch/x86/kvm/vmx.c441
-rw-r--r--arch/x86/kvm/x86.c224
-rw-r--r--arch/x86/lib/csum-wrappers_64.c12
-rw-r--r--arch/x86/lib/usercopy_64.c2
-rw-r--r--arch/x86/lib/x86-opcode-map.txt42
-rw-r--r--arch/x86/mm/init.c4
-rw-r--r--arch/x86/mm/ioremap.c5
-rw-r--r--arch/x86/mm/mmap.c6
-rw-r--r--arch/x86/mm/srat.c11
-rw-r--r--arch/x86/oprofile/nmi_int.c18
-rw-r--r--arch/x86/oprofile/op_model_amd.c24
-rw-r--r--arch/x86/pci/acpi.c9
-rw-r--r--arch/x86/pci/i386.c4
-rw-r--r--arch/x86/pci/mmconfig-shared.c7
-rw-r--r--arch/x86/pci/mrst.c41
-rw-r--r--arch/x86/platform/ce4100/ce4100.c1
-rw-r--r--arch/x86/power/cpu.c8
-rw-r--r--arch/x86/power/hibernate_64.c12
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk4
-rw-r--r--arch/x86/vdso/vclock_gettime.c16
-rw-r--r--arch/x86/xen/enlighten.c24
-rw-r--r--arch/x86/xen/irq.c25
-rw-r--r--arch/x86/xen/p2m.c22
-rw-r--r--arch/x86/xen/setup.c51
-rw-r--r--arch/x86/xen/smp.c19
-rw-r--r--arch/x86/xen/spinlock.c387
-rw-r--r--arch/x86/xen/xen-ops.h16
-rw-r--r--block/blk-cgroup.c49
-rw-r--r--block/blk-cgroup.h38
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-throttle.c43
-rw-r--r--block/cfq-iosched.c90
-rw-r--r--drivers/accessibility/braille/braille_console.c9
-rw-r--r--drivers/acpi/Kconfig24
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/ac.c1
-rw-r--r--drivers/acpi/acpi_i2c.c103
-rw-r--r--drivers/acpi/acpi_pad.c14
-rw-r--r--drivers/acpi/acpi_platform.c24
-rw-r--r--drivers/acpi/acpi_processor.c24
-rw-r--r--drivers/acpi/acpica/acglobal.h7
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/acnamesp.h4
-rw-r--r--drivers/acpi/acpica/actables.h7
-rw-r--r--drivers/acpi/acpica/acutils.h4
-rw-r--r--drivers/acpi/acpica/evgpeinit.c11
-rw-r--r--drivers/acpi/acpica/exdump.c6
-rw-r--r--drivers/acpi/acpica/exoparg1.c48
-rw-r--r--drivers/acpi/acpica/hwesleep.c9
-rw-r--r--drivers/acpi/acpica/hwtimer.c13
-rw-r--r--drivers/acpi/acpica/nspredef.c16
-rw-r--r--drivers/acpi/acpica/nswalk.c26
-rw-r--r--drivers/acpi/acpica/nsxfeval.c16
-rw-r--r--drivers/acpi/acpica/nsxfname.c11
-rw-r--r--drivers/acpi/acpica/tbfadt.c4
-rw-r--r--drivers/acpi/acpica/tbxfroot.c12
-rw-r--r--drivers/acpi/acpica/uteval.c8
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utosi.c77
-rw-r--r--drivers/acpi/acpica/utstring.c5
-rw-r--r--drivers/acpi/acpica/utxface.c29
-rw-r--r--drivers/acpi/apei/erst.c76
-rw-r--r--drivers/acpi/apei/ghes.c38
-rw-r--r--drivers/acpi/apei/hest.c39
-rw-r--r--drivers/acpi/battery.c23
-rw-r--r--drivers/acpi/bgrt.c27
-rw-r--r--drivers/acpi/blacklist.c30
-rw-r--r--drivers/acpi/bus.c142
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/device_pm.c34
-rw-r--r--drivers/acpi/dock.c400
-rw-r--r--drivers/acpi/ec.c10
-rw-r--r--drivers/acpi/event.c106
-rw-r--r--drivers/acpi/fan.c4
-rw-r--r--drivers/acpi/glue.c220
-rw-r--r--drivers/acpi/internal.h3
-rw-r--r--drivers/acpi/numa.c2
-rw-r--r--drivers/acpi/osl.c86
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/pci_slot.c14
-rw-r--r--drivers/acpi/power.c6
-rw-r--r--drivers/acpi/proc.c8
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_perflib.c22
-rw-r--r--drivers/acpi/processor_thermal.c12
-rw-r--r--drivers/acpi/resource.c4
-rw-r--r--drivers/acpi/sbs.c15
-rw-r--r--drivers/acpi/scan.c272
-rw-r--r--drivers/acpi/sleep.c48
-rw-r--r--drivers/acpi/thermal.c49
-rw-r--r--drivers/acpi/utils.c70
-rw-r--r--drivers/acpi/video.c326
-rw-r--r--drivers/acpi/video_detect.c34
-rw-r--r--drivers/ata/Kconfig11
-rw-r--r--drivers/ata/Makefile1
-rw-r--r--drivers/ata/ahci.c17
-rw-r--r--drivers/ata/ahci_imx.c236
-rw-r--r--drivers/ata/ata_piix.c2
-rw-r--r--drivers/ata/libata-acpi.c282
-rw-r--r--drivers/ata/libata-core.c29
-rw-r--r--drivers/ata/libata-pmp.c12
-rw-r--r--drivers/ata/libata-scsi.c44
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/ata/libata-zpodd.c12
-rw-r--r--drivers/ata/libata.h19
-rw-r--r--drivers/ata/pata_acpi.c4
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/ata/pata_at32.c2
-rw-r--r--drivers/ata/pata_at91.c2
-rw-r--r--drivers/ata/pata_imx.c1
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c4
-rw-r--r--drivers/ata/pata_octeon_cf.c2
-rw-r--r--drivers/ata/pata_platform.c2
-rw-r--r--drivers/ata/pata_pxa.c2
-rw-r--r--drivers/ata/pata_samsung_cf.c10
-rw-r--r--drivers/ata/sata_fsl.c5
-rw-r--r--drivers/ata/sata_highbank.c78
-rw-r--r--drivers/ata/sata_inic162x.c14
-rw-r--r--drivers/ata/sata_mv.c26
-rw-r--r--drivers/ata/sata_rcar.c5
-rw-r--r--drivers/atm/he.c11
-rw-r--r--drivers/atm/nicstar.c26
-rw-r--r--drivers/base/Kconfig20
-rw-r--r--drivers/base/Makefile2
-rw-r--r--drivers/base/base.h10
-rw-r--r--drivers/base/bus.c60
-rw-r--r--drivers/base/class.c4
-rw-r--r--drivers/base/core.c150
-rw-r--r--drivers/base/cpu.c8
-rw-r--r--drivers/base/dma-buf.c2
-rw-r--r--drivers/base/dma-contiguous.c4
-rw-r--r--drivers/base/driver.c31
-rw-r--r--drivers/base/firmware_class.c24
-rw-r--r--drivers/base/memory.c268
-rw-r--r--drivers/base/platform.c14
-rw-r--r--drivers/base/power/main.c77
-rw-r--r--drivers/base/power/opp.c1
-rw-r--r--drivers/base/power/sysfs.c2
-rw-r--r--drivers/base/regmap/internal.h14
-rw-r--r--drivers/base/regmap/regcache-rbtree.c183
-rw-r--r--drivers/base/regmap/regcache.c78
-rw-r--r--drivers/base/regmap/regmap-debugfs.c6
-rw-r--r--drivers/base/regmap/regmap-irq.c25
-rw-r--r--drivers/base/regmap/regmap.c26
-rw-r--r--drivers/base/topology.c20
-rw-r--r--drivers/bcma/Kconfig10
-rw-r--r--drivers/bcma/driver_pci.c65
-rw-r--r--drivers/bcma/driver_pci_host.c6
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/bcma/scan.c28
-rw-r--r--drivers/block/aoe/aoecmd.c17
-rw-r--r--drivers/block/rbd.c14
-rw-r--r--drivers/bluetooth/ath3k.c46
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c6
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btusb.c18
-rw-r--r--drivers/bus/arm-cci.c28
-rw-r--r--drivers/char/agp/parisc-agp.c6
-rw-r--r--drivers/char/bsr.c18
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c77
-rw-r--r--drivers/char/pcmcia/synclink_cs.c26
-rw-r--r--drivers/char/sonypi.c5
-rw-r--r--drivers/char/tile-srom.c30
-rw-r--r--drivers/char/tpm/Kconfig12
-rw-r--r--drivers/char/tpm/Makefile1
-rw-r--r--drivers/char/tpm/xen-tpmfront.c473
-rw-r--r--drivers/char/virtio_console.c70
-rw-r--r--drivers/clk/samsung/clk-exynos4.c64
-rw-r--r--drivers/clk/zynq/clkc.c13
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/arm_arch_timer.c447
-rw-r--r--drivers/clocksource/cadence_ttc_timer.c13
-rw-r--r--drivers/clocksource/moxart_timer.c165
-rw-r--r--drivers/clocksource/sun4i_timer.c110
-rw-r--r--drivers/clocksource/time-orion.c2
-rw-r--r--drivers/cpufreq/Kconfig.arm36
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c12
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c40
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c1
-rw-r--r--drivers/cpufreq/blackfin-cpufreq.c1
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c34
-rw-r--r--drivers/cpufreq/cpufreq-nforce2.c1
-rw-r--r--drivers/cpufreq/cpufreq.c743
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c38
-rw-r--r--drivers/cpufreq/cpufreq_governor.c38
-rw-r--r--drivers/cpufreq/cpufreq_governor.h24
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c86
-rw-r--r--drivers/cpufreq/cpufreq_performance.c3
-rw-r--r--drivers/cpufreq/cpufreq_powersave.c3
-rw-r--r--drivers/cpufreq/cpufreq_stats.c31
-rw-r--r--drivers/cpufreq/cris-artpec3-cpufreq.c1
-rw-r--r--drivers/cpufreq/cris-etraxfs-cpufreq.c1
-rw-r--r--drivers/cpufreq/e_powersaver.c5
-rw-r--r--drivers/cpufreq/elanfreq.c1
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c3
-rw-r--r--drivers/cpufreq/exynos-cpufreq.h21
-rw-r--r--drivers/cpufreq/exynos5440-cpufreq.c3
-rw-r--r--drivers/cpufreq/freq_table.c4
-rw-r--r--drivers/cpufreq/gx-suspmod.c5
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c18
-rw-r--r--drivers/cpufreq/ia64-acpi-cpufreq.c5
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c21
-rw-r--r--drivers/cpufreq/intel_pstate.c13
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c9
-rw-r--r--drivers/cpufreq/longhaul.c1
-rw-r--r--drivers/cpufreq/longrun.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c12
-rw-r--r--drivers/cpufreq/maple-cpufreq.c24
-rw-r--r--drivers/cpufreq/mperf.c51
-rw-r--r--drivers/cpufreq/mperf.h9
-rw-r--r--drivers/cpufreq/p4-clockmod.c1
-rw-r--r--drivers/cpufreq/pasemi-cpufreq.c1
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c1
-rw-r--r--drivers/cpufreq/pmac32-cpufreq.c6
-rw-r--r--drivers/cpufreq/pmac64-cpufreq.c53
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--drivers/cpufreq/powernow-k7.c14
-rw-r--r--drivers/cpufreq/powernow-k8.c7
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c1
-rw-r--r--drivers/cpufreq/ppc_cbe_cpufreq.c1
-rw-r--r--drivers/cpufreq/pxa2xx-cpufreq.c4
-rw-r--r--drivers/cpufreq/pxa3xx-cpufreq.c6
-rw-r--r--drivers/cpufreq/s3c2416-cpufreq.c1
-rw-r--r--drivers/cpufreq/s3c24xx-cpufreq.c8
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c1
-rw-r--r--drivers/cpufreq/sc520_freq.c1
-rw-r--r--drivers/cpufreq/sh-cpufreq.c1
-rw-r--r--drivers/cpufreq/sparc-us2e-cpufreq.c6
-rw-r--r--drivers/cpufreq/sparc-us3-cpufreq.c6
-rw-r--r--drivers/cpufreq/spear-cpufreq.c4
-rw-r--r--drivers/cpufreq/speedstep-centrino.c1
-rw-r--r--drivers/cpufreq/speedstep-ich.c1
-rw-r--r--drivers/cpufreq/speedstep-smi.c1
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c4
-rw-r--r--drivers/cpufreq/unicore2-cpufreq.c2
-rw-r--r--drivers/cpuidle/Kconfig20
-rw-r--r--drivers/cpuidle/Kconfig.arm29
-rw-r--r--drivers/cpuidle/Makefile9
-rw-r--r--drivers/cpuidle/coupled.c129
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/cpuidle/cpuidle-kirkwood.c5
-rw-r--r--drivers/cpuidle/cpuidle-ux500.c (renamed from arch/arm/mach-ux500/cpuidle.c)19
-rw-r--r--drivers/cpuidle/cpuidle.c94
-rw-r--r--drivers/cpuidle/governors/ladder.c12
-rw-r--r--drivers/cpuidle/governors/menu.c214
-rw-r--r--drivers/cpuidle/sysfs.c101
-rw-r--r--drivers/devfreq/devfreq.c78
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/coh901318.c26
-rw-r--r--drivers/dma/cppi41.c1059
-rw-r--r--drivers/dma/dmaengine.c26
-rw-r--r--drivers/dma/pch_dma.c1
-rw-r--r--drivers/dma/pl330.c93
-rw-r--r--drivers/dma/sh/shdma.c4
-rw-r--r--drivers/edac/amd64_edac.c334
-rw-r--r--drivers/edac/amd64_edac.h60
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/edac_mc_sysfs.c6
-rw-r--r--drivers/edac/i3200_edac.c3
-rw-r--r--drivers/edac/x38_edac.c3
-rw-r--r--drivers/extcon/Kconfig4
-rw-r--r--drivers/extcon/Makefile2
-rw-r--r--drivers/extcon/extcon-adc-jack.c3
-rw-r--r--drivers/extcon/extcon-arizona.c25
-rw-r--r--drivers/extcon/extcon-class.c16
-rw-r--r--drivers/extcon/extcon-gpio.c2
-rw-r--r--drivers/extcon/extcon-palmas.c134
-rw-r--r--drivers/extcon/of_extcon.c64
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firewire/ohci.c10
-rw-r--r--drivers/firmware/dcdbas.c19
-rw-r--r--drivers/firmware/dmi_scan.c14
-rw-r--r--drivers/firmware/efi/efi-pstore.c27
-rw-r--r--drivers/fmc/fmc-chardev.c9
-rw-r--r--drivers/fmc/fmc-write-eeprom.c4
-rw-r--r--drivers/gpio/gpio-msm-v1.c1
-rw-r--r--drivers/gpio/gpio-omap.c84
-rw-r--r--drivers/gpu/drm/Kconfig15
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c9
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c6
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c9
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c6
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c51
-rw-r--r--drivers/gpu/drm/drm_bufs.c236
-rw-r--r--drivers/gpu/drm/drm_context.c81
-rw-r--r--drivers/gpu/drm/drm_crtc.c173
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c89
-rw-r--r--drivers/gpu/drm/drm_dma.c17
-rw-r--r--drivers/gpu/drm/drm_drv.c106
-rw-r--r--drivers/gpu/drm/drm_edid.c306
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c5
-rw-r--r--drivers/gpu/drm/drm_flip_work.c124
-rw-r--r--drivers/gpu/drm/drm_fops.c98
-rw-r--r--drivers/gpu/drm/drm_gem.c440
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c28
-rw-r--r--drivers/gpu/drm/drm_info.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c62
-rw-r--r--drivers/gpu/drm/drm_irq.c5
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mm.c229
-rw-r--r--drivers/gpu/drm/drm_modes.c58
-rw-r--r--drivers/gpu/drm/drm_pci.c35
-rw-r--r--drivers/gpu/drm/drm_platform.c16
-rw-r--r--drivers/gpu/drm/drm_prime.c190
-rw-r--r--drivers/gpu/drm/drm_proc.c209
-rw-r--r--drivers/gpu/drm/drm_scatter.c29
-rw-r--r--drivers/gpu/drm/drm_stub.c73
-rw-r--r--drivers/gpu/drm/drm_usb.c9
-rw-r--r--drivers/gpu/drm/drm_vm.c3
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c436
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c38
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dmabuf.c37
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c57
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c35
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c118
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c88
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c10
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c57
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c920
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c154
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c89
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c71
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c31
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h2
-rw-r--r--drivers/gpu/drm/gma500/gem.c39
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c776
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h103
-rw-r--r--drivers/gpu/drm/gma500/gtt.c38
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.c15
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_output.h16
-rw-r--r--drivers/gpu/drm/gma500/mdfld_intel_display.c65
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c63
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c43
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c48
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_device.h (renamed from drivers/gpu/drm/gma500/psb_intel_display.h)13
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c21
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c944
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h44
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c75
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c56
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c485
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c3
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h2
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/dvo_ch7xxx.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c986
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c143
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c322
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h603
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c759
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c73
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.c43
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c93
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c191
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c313
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c208
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1019
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1523
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h166
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c71
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h45
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c14
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c38
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c78
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1591
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c518
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h148
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c61
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c23
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c305
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c40
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c24
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c21
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1169
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c91
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h16
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c94
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c59
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c31
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c595
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c3
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h2
-rw-r--r--drivers/gpu/drm/mga/mga_state.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c46
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c7
-rw-r--r--drivers/gpu/drm/msm/Kconfig34
-rw-r--r--drivers/gpu/drm/msm/Makefile30
-rw-r--r--drivers/gpu/drm/msm/NOTES69
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h1438
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h2193
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c502
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.h30
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h432
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c370
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h141
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h254
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h502
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h114
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h48
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c272
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h131
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h508
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c167
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c367
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c281
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c141
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c214
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h50
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4.xml.h1061
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_crtc.c685
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c305
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_format.c56
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_irq.c203
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.c365
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_kms.h194
-rw-r--r--drivers/gpu/drm/msm/mdp4/mdp4_plane.c243
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c776
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h213
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c202
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c258
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c597
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h99
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c412
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c463
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h124
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c61
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h43
-rw-r--r--drivers/gpu/drm/nouveau/core/core/mm.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/printk.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/core/ramht.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c15
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv40.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c52
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/vp/nv98.c47
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/xtensa.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/math.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/printk.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h8
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/timer.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/therm.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c12
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c22
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/base.c34
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/base.c19
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/fan.c20
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c21
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/base.c7
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c35
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/base.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c107
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c42
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c284
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.h9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioc32.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ioctl.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c14
-rw-r--r--drivers/gpu/drm/omapdrm/Makefile3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c12
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c74
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c52
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c51
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c42
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c70
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c263
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h84
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c7
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c184
-rw-r--r--drivers/gpu/drm/qxl/qxl_fence.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_image.c111
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c321
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.c71
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h8
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c212
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c3
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h2
-rw-r--r--drivers/gpu/drm/r128/r128_state.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile24
-rw-r--r--drivers/gpu/drm/radeon/atom.c5
-rw-r--r--drivers/gpu/drm/radeon/atombios.h615
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c45
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c16
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c23
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5243
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h332
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c262
-rw-r--r--drivers/gpu/drm/radeon/cik.c3139
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c785
-rw-r--r--drivers/gpu/drm/radeon/cikd.h594
-rw-r--r--drivers/gpu/drm/radeon/clearstate_cayman.h2
-rw-r--r--drivers/gpu/drm/radeon/clearstate_ci.h944
-rw-r--r--drivers/gpu/drm/radeon/clearstate_evergreen.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c20
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c278
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c538
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c729
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c54
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c190
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c98
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h14
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2645
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.h199
-rw-r--r--drivers/gpu/drm/radeon/kv_smc.c207
-rw-r--r--drivers/gpu/drm/radeon/ni.c385
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c338
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c38
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h57
-rw-r--r--drivers/gpu/drm/radeon/pptable.h682
-rw-r--r--drivers/gpu/drm/radeon/r100.c4
-rw-r--r--drivers/gpu/drm/radeon/r600.c811
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c60
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c785
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.h1
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c497
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c304
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h6
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c150
-rw-r--r--drivers/gpu/drm/radeon/r600d.h39
-rw-r--r--drivers/gpu/drm/radeon/radeon.h281
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c1263
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h121
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c188
-rw-r--r--drivers/gpu/drm/radeon/radeon_blit_common.h44
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c159
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c97
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_ucode.h17
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c159
-rw-r--r--drivers/gpu/drm/radeon/rs400.c9
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c68
-rw-r--r--drivers/gpu/drm/radeon/rv770.c217
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c101
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c48
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.h1
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h16
-rw-r--r--drivers/gpu/drm/radeon/si.c857
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c235
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c180
-rw-r--r--drivers/gpu/drm/radeon/sid.h71
-rw-r--r--drivers/gpu/drm/radeon/smu7.h170
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h486
-rw-r--r--drivers/gpu/drm/radeon/smu7_fusion.h300
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c22
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c7
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c436
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c165
-rw-r--r--drivers/gpu/drm/radeon/uvd_v3_1.c55
-rw-r--r--drivers/gpu/drm/radeon/uvd_v4_2.c68
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig7
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c258
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h13
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c176
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h63
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c202
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h49
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c187
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.h50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c165
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h29
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.c)101
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_lvds.h)17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c196
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h46
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c170
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h94
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.c)65
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.h (renamed from drivers/gpu/drm/rcar-du/rcar_du_vga.h)15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds_regs.h69
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c2
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c3
-rw-r--r--drivers/gpu/drm/savage/savage_drv.h2
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h2
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c8
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c43
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_slave.c28
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c102
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c231
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c3
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c63
-rw-r--r--drivers/gpu/drm/udl/udl_main.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c2
-rw-r--r--drivers/gpu/drm/via/via_drv.c3
-rw-r--r--drivers/gpu/drm/via/via_drv.h2
-rw-r--r--drivers/gpu/drm/via/via_mm.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c58
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c4
-rw-r--r--drivers/gpu/host1x/dev.c2
-rw-r--r--drivers/gpu/host1x/dev.h2
-rw-r--r--drivers/gpu/host1x/drm/dc.c2
-rw-r--r--drivers/gpu/host1x/drm/drm.c7
-rw-r--r--drivers/gpu/host1x/drm/gem.c16
-rw-r--r--drivers/gpu/host1x/drm/gem.h3
-rw-r--r--drivers/gpu/host1x/drm/hdmi.c27
-rw-r--r--drivers/gpu/host1x/drm/output.c2
-rw-r--r--drivers/gpu/host1x/drm/rgb.c14
-rw-r--r--drivers/gpu/host1x/job.c15
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c147
-rw-r--r--drivers/hid/Kconfig8
-rw-r--r--drivers/hid/Makefile1
-rw-r--r--drivers/hid/hid-a4tech.c21
-rw-r--r--drivers/hid/hid-apple.c16
-rw-r--r--drivers/hid/hid-core.c169
-rw-r--r--drivers/hid/hid-holtekff.c2
-rw-r--r--drivers/hid/hid-hyperv.c1
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c92
-rw-r--r--drivers/hid/hid-kye.c45
-rw-r--r--drivers/hid/hid-logitech-dj.c67
-rw-r--r--drivers/hid/hid-logitech-dj.h1
-rw-r--r--drivers/hid/hid-magicmouse.c19
-rw-r--r--drivers/hid/hid-multitouch.c107
-rw-r--r--drivers/hid/hid-ntrig.c15
-rw-r--r--drivers/hid/hid-picolcd_cir.c3
-rw-r--r--drivers/hid/hid-picolcd_core.c2
-rw-r--r--drivers/hid/hid-picolcd_debugfs.c23
-rw-r--r--drivers/hid/hid-picolcd_fb.c6
-rw-r--r--drivers/hid/hid-pl.c10
-rw-r--r--drivers/hid/hid-roccat-arvo.c59
-rw-r--r--drivers/hid/hid-roccat-isku.c100
-rw-r--r--drivers/hid/hid-roccat-kone.c110
-rw-r--r--drivers/hid/hid-roccat-koneplus.c177
-rw-r--r--drivers/hid/hid-roccat-konepure.c70
-rw-r--r--drivers/hid/hid-roccat-kovaplus.c168
-rw-r--r--drivers/hid/hid-roccat-pyra.c158
-rw-r--r--drivers/hid/hid-roccat-savu.c58
-rw-r--r--drivers/hid/hid-sensor-hub.c58
-rw-r--r--drivers/hid/hid-sony.c12
-rw-r--r--drivers/hid/hid-speedlink.c11
-rw-r--r--drivers/hid/hid-wiimote-core.c41
-rw-r--r--drivers/hid/hid-wiimote-modules.c392
-rw-r--r--drivers/hid/hid-wiimote.h3
-rw-r--r--drivers/hid/hid-xinmo.c61
-rw-r--r--drivers/hid/hid-zydacron.c19
-rw-r--r--drivers/hid/hidraw.c82
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c77
-rw-r--r--drivers/hid/uhid.c26
-rw-r--r--drivers/hid/usbhid/hid-core.c83
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h3
-rw-r--r--drivers/hv/channel_mgmt.c89
-rw-r--r--drivers/hv/connection.c5
-rw-r--r--drivers/hv/hv_balloon.c26
-rw-r--r--drivers/hv/hv_kvp.c26
-rw-r--r--drivers/hv/hv_snapshot.c18
-rw-r--r--drivers/hv/hv_util.c22
-rw-r--r--drivers/hv/vmbus_drv.c9
-rw-r--r--drivers/hwmon/Kconfig18
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/acpi_power_meter.c4
-rw-r--r--drivers/hwmon/ads1015.c27
-rw-r--r--drivers/hwmon/ads7828.c2
-rw-r--r--drivers/hwmon/adt7462.c8
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/coretemp.c14
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/f71805f.c2
-rw-r--r--drivers/hwmon/f71882fg.c2
-rw-r--r--drivers/hwmon/f75375s.c3
-rw-r--r--drivers/hwmon/g762.c2
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/htu21.c199
-rw-r--r--drivers/hwmon/i5k_amb.c4
-rw-r--r--drivers/hwmon/ibmaem.c4
-rw-r--r--drivers/hwmon/ibmpex.c4
-rw-r--r--drivers/hwmon/ina2xx.c4
-rw-r--r--drivers/hwmon/it87.c6
-rw-r--r--drivers/hwmon/k10temp.c3
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/max197.c2
-rw-r--r--drivers/hwmon/max6639.c2
-rw-r--r--drivers/hwmon/max6697.c4
-rw-r--r--drivers/hwmon/mcp3021.c7
-rw-r--r--drivers/hwmon/nct6775.c2531
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/pc87427.c4
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c2
-rw-r--r--drivers/hwmon/s3c-hwmon.c6
-rw-r--r--drivers/hwmon/sht15.c6
-rw-r--r--drivers/hwmon/smsc47m1.c4
-rw-r--r--drivers/hwmon/w83627ehf.c22
-rw-r--r--drivers/hwmon/w83627hf.c6
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-at91.c3
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c16
-rw-r--r--drivers/i2c/busses/i2c-cbus-gpio.c5
-rw-r--r--drivers/i2c/busses/i2c-cpm.c6
-rw-r--r--drivers/i2c/busses/i2c-davinci.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c25
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h12
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c46
-rw-r--r--drivers/i2c/busses/i2c-gpio.c9
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c228
-rw-r--r--drivers/i2c/busses/i2c-ismt.c1
-rw-r--r--drivers/i2c/busses/i2c-kempld.c4
-rw-r--r--drivers/i2c/busses/i2c-mpc.c44
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c207
-rw-r--r--drivers/i2c/busses/i2c-mxs.c118
-rw-r--r--drivers/i2c/busses/i2c-nomadik.c5
-rw-r--r--drivers/i2c/busses/i2c-nuc900.c2
-rw-r--r--drivers/i2c/busses/i2c-ocores.c12
-rw-r--r--drivers/i2c/busses/i2c-octeon.c3
-rw-r--r--drivers/i2c/busses/i2c-omap.c27
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c41
-rw-r--r--drivers/i2c/busses/i2c-pnx.c8
-rw-r--r--drivers/i2c/busses/i2c-powermac.c14
-rw-r--r--drivers/i2c/busses/i2c-puv3.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c70
-rw-r--r--drivers/i2c/busses/i2c-rcar.c37
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-s6000.c5
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c2
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-sirf.c39
-rw-r--r--drivers/i2c/busses/i2c-stu300.c7
-rw-r--r--drivers/i2c/busses/i2c-tegra.c3
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c49
-rw-r--r--drivers/i2c/busses/i2c-versatile.c2
-rw-r--r--drivers/i2c/busses/i2c-wmt.c3
-rw-r--r--drivers/i2c/busses/i2c-xiic.c5
-rw-r--r--drivers/i2c/i2c-core.c206
-rw-r--r--drivers/i2c/i2c-mux.c3
-rw-r--r--drivers/i2c/i2c-smbus.c2
-rw-r--r--drivers/i2c/muxes/i2c-arb-gpio-challenge.c3
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c9
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c2
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c3
-rw-r--r--drivers/ide/gayle.c2
-rw-r--r--drivers/ide/ide-acpi.c5
-rw-r--r--drivers/ide/ide-ioctls.c4
-rw-r--r--drivers/ide/ide-park.c6
-rw-r--r--drivers/ide/ide_platform.c2
-rw-r--r--drivers/ide/palm_bk3710.c4
-rw-r--r--drivers/ide/sgiioc4.c2
-rw-r--r--drivers/ide/tx4938ide.c4
-rw-r--r--drivers/iio/Kconfig8
-rw-r--r--drivers/iio/Makefile3
-rw-r--r--drivers/iio/accel/Kconfig32
-rw-r--r--drivers/iio/accel/Makefile5
-rw-r--r--drivers/iio/accel/bma180.c676
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c43
-rw-r--r--drivers/iio/accel/kxsd9.c17
-rw-r--r--drivers/iio/accel/st_accel.h11
-rw-r--r--drivers/iio/accel/st_accel_core.c28
-rw-r--r--drivers/iio/accel/st_accel_i2c.c17
-rw-r--r--drivers/iio/accel/st_accel_spi.c17
-rw-r--r--drivers/iio/adc/Kconfig61
-rw-r--r--drivers/iio/adc/Makefile3
-rw-r--r--drivers/iio/adc/ad7266.c16
-rw-r--r--drivers/iio/adc/ad7298.c24
-rw-r--r--drivers/iio/adc/ad7476.c26
-rw-r--r--drivers/iio/adc/ad7791.c19
-rw-r--r--drivers/iio/adc/ad7793.c23
-rw-r--r--drivers/iio/adc/ad7887.c23
-rw-r--r--drivers/iio/adc/ad7923.c20
-rw-r--r--drivers/iio/adc/at91_adc.c113
-rw-r--r--drivers/iio/adc/exynos_adc.c23
-rw-r--r--drivers/iio/adc/lp8788_adc.c7
-rw-r--r--drivers/iio/adc/max1363.c15
-rw-r--r--drivers/iio/adc/mcp320x.c18
-rw-r--r--drivers/iio/adc/nau7802.c581
-rw-r--r--drivers/iio/adc/ti-adc081c.c18
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c43
-rw-r--r--drivers/iio/adc/twl6030-gpadc.c1013
-rw-r--r--drivers/iio/adc/viperboard_adc.c9
-rw-r--r--drivers/iio/amplifiers/Kconfig2
-rw-r--r--drivers/iio/amplifiers/Makefile1
-rw-r--r--drivers/iio/amplifiers/ad8366.c13
-rw-r--r--drivers/iio/common/Makefile1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c41
-rw-r--r--drivers/iio/dac/Kconfig44
-rw-r--r--drivers/iio/dac/Makefile1
-rw-r--r--drivers/iio/dac/ad5064.c21
-rw-r--r--drivers/iio/dac/ad5360.c15
-rw-r--r--drivers/iio/dac/ad5380.c18
-rw-r--r--drivers/iio/dac/ad5421.c19
-rw-r--r--drivers/iio/dac/ad5446.c19
-rw-r--r--drivers/iio/dac/ad5449.c15
-rw-r--r--drivers/iio/dac/ad5504.c31
-rw-r--r--drivers/iio/dac/ad5624r_spi.c22
-rw-r--r--drivers/iio/dac/ad5686.c17
-rw-r--r--drivers/iio/dac/ad5755.c14
-rw-r--r--drivers/iio/dac/ad5764.c20
-rw-r--r--drivers/iio/dac/ad5791.c35
-rw-r--r--drivers/iio/dac/ad7303.c31
-rw-r--r--drivers/iio/dac/max517.c17
-rw-r--r--drivers/iio/dac/mcp4725.c177
-rw-r--r--drivers/iio/frequency/Kconfig1
-rw-r--r--drivers/iio/frequency/Makefile1
-rw-r--r--drivers/iio/frequency/ad9523.c17
-rw-r--r--drivers/iio/frequency/adf4350.c31
-rw-r--r--drivers/iio/gyro/Kconfig18
-rw-r--r--drivers/iio/gyro/Makefile2
-rw-r--r--drivers/iio/gyro/adis16080.c21
-rw-r--r--drivers/iio/gyro/adis16130.c23
-rw-r--r--drivers/iio/gyro/adis16136.c10
-rw-r--r--drivers/iio/gyro/adis16260.c (renamed from drivers/staging/iio/gyro/adis16260_core.c)319
-rw-r--r--drivers/iio/gyro/adxrs450.c15
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c41
-rw-r--r--drivers/iio/gyro/itg3200_core.c15
-rw-r--r--drivers/iio/gyro/st_gyro.h11
-rw-r--r--drivers/iio/gyro/st_gyro_core.c14
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c18
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c18
-rw-r--r--drivers/iio/iio_core_trigger.h7
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/imu/adis16480.c10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c29
-rw-r--r--drivers/iio/industrialio-core.c51
-rw-r--r--drivers/iio/industrialio-trigger.c106
-rw-r--r--drivers/iio/light/Kconfig34
-rw-r--r--drivers/iio/light/Makefile4
-rw-r--r--drivers/iio/light/adjd_s311.c46
-rw-r--r--drivers/iio/light/apds9300.c512
-rw-r--r--drivers/iio/light/hid-sensor-als.c41
-rw-r--r--drivers/iio/light/lm3533-als.c7
-rw-r--r--drivers/iio/light/tsl2563.c25
-rw-r--r--drivers/iio/light/vcnl4000.c16
-rw-r--r--drivers/iio/magnetometer/Kconfig6
-rw-r--r--drivers/iio/magnetometer/Makefile1
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c43
-rw-r--r--drivers/iio/magnetometer/st_magn.h3
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c6
-rw-r--r--drivers/iio/magnetometer/st_magn_i2c.c17
-rw-r--r--drivers/iio/magnetometer/st_magn_spi.c17
-rw-r--r--drivers/iio/pressure/Kconfig14
-rw-r--r--drivers/iio/pressure/Makefile1
-rw-r--r--drivers/iio/pressure/st_pressure.h11
-rw-r--r--drivers/iio/pressure/st_pressure_core.c21
-rw-r--r--drivers/iio/pressure/st_pressure_i2c.c17
-rw-r--r--drivers/iio/pressure/st_pressure_spi.c17
-rw-r--r--drivers/iio/temperature/Kconfig16
-rw-r--r--drivers/iio/temperature/Makefile5
-rw-r--r--drivers/iio/temperature/tmp006.c291
-rw-r--r--drivers/iio/trigger/Kconfig4
-rw-r--r--drivers/iio/trigger/Makefile1
-rw-r--r--drivers/infiniband/core/cma.c75
-rw-r--r--drivers/infiniband/core/mad.c8
-rw-r--r--drivers/infiniband/core/uverbs.h4
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c250
-rw-r--r--drivers/infiniband/core/uverbs_main.c42
-rw-r--r--drivers/infiniband/core/verbs.c30
-rw-r--r--drivers/infiniband/hw/amso1100/c2_ae.c18
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cm.c16
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/Kconfig2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c860
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c329
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c116
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h9
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c43
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h29
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c10
-rw-r--r--drivers/infiniband/hw/mlx4/main.c235
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h12
-rw-r--r--drivers/infiniband/hw/mlx5/main.c11
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c153
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c3
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h22
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h32
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c15
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c458
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h13
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c7
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h210
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c883
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h6
-rw-r--r--drivers/infiniband/hw/qib/qib.h5
-rw-r--r--drivers/infiniband/hw/qib/qib_common.h32
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_sdma.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c909
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c76
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c9
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c22
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h73
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c148
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c231
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c292
-rw-r--r--drivers/input/gameport/gameport.c12
-rw-r--r--drivers/input/joystick/xpad.c1
-rw-r--r--drivers/input/mouse/bcm5974.c6
-rw-r--r--drivers/input/mouse/elantech.c44
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/serio/Kconfig3
-rw-r--r--drivers/input/serio/serio.c21
-rw-r--r--drivers/input/tablet/wacom_wac.c10
-rw-r--r--drivers/irqchip/irq-sirfsoc.c18
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c12
-rw-r--r--drivers/isdn/mISDN/core.c64
-rw-r--r--drivers/isdn/mISDN/dsp_core.c4
-rw-r--r--drivers/leds/led-class.c38
-rw-r--r--drivers/macintosh/windfarm_rm31.c18
-rw-r--r--drivers/md/dm-cache-policy-mq.c16
-rw-r--r--drivers/md/dm-mpath.c16
-rw-r--r--drivers/md/raid10.c8
-rw-r--r--drivers/md/raid5.c15
-rw-r--r--drivers/md/raid5.h1
-rw-r--r--drivers/media/common/siano/Kconfig2
-rw-r--r--drivers/media/common/siano/smsdvb-main.c3
-rw-r--r--drivers/media/dvb-core/dvb-usb-ids.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c16
-rw-r--r--drivers/media/i2c/Kconfig23
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/ad9389b.c163
-rw-r--r--drivers/media/i2c/adv7343.c89
-rw-r--r--drivers/media/i2c/adv7511.c1198
-rw-r--r--drivers/media/i2c/adv7604.c156
-rw-r--r--drivers/media/i2c/adv7842.c2946
-rw-r--r--drivers/media/i2c/ml86v7667.c7
-rw-r--r--drivers/media/i2c/mt9v032.c17
-rw-r--r--drivers/media/i2c/ov9650.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c5
-rw-r--r--drivers/media/i2c/s5k6aa.c2
-rw-r--r--drivers/media/i2c/saa7115.c169
-rw-r--r--drivers/media/i2c/saa711x_regs.h19
-rw-r--r--drivers/media/i2c/smiapp-pll.c17
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c31
-rw-r--r--drivers/media/i2c/soc_camera/mt9m111.c38
-rw-r--r--drivers/media/i2c/soc_camera/mt9t031.c7
-rw-r--r--drivers/media/i2c/ths7303.c6
-rw-r--r--drivers/media/i2c/ths8200.c123
-rw-r--r--drivers/media/i2c/tvp514x.c20
-rw-r--r--drivers/media/i2c/tvp7002.c73
-rw-r--r--drivers/media/media-entity.c14
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c26
-rw-r--r--drivers/media/pci/bt8xx/bttvp.h3
-rw-r--r--drivers/media/pci/cx23885/Kconfig1
-rw-r--r--drivers/media/pci/cx23885/cx23885-av.c13
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c53
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.c5
-rw-r--r--drivers/media/pci/cx23885/cx23885-video.h26
-rw-r--r--drivers/media/pci/cx23885/cx23885.h2
-rw-r--r--drivers/media/pci/cx88/Kconfig11
-rw-r--r--drivers/media/pci/cx88/cx88.h2
-rw-r--r--drivers/media/platform/Kconfig12
-rw-r--r--drivers/media/platform/Makefile2
-rw-r--r--drivers/media/platform/blackfin/bfin_capture.c9
-rw-r--r--drivers/media/platform/coda.c1484
-rw-r--r--drivers/media/platform/coda.h107
-rw-r--r--drivers/media/platform/davinci/vpbe_display.c23
-rw-r--r--drivers/media/platform/davinci/vpbe_osd.c45
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c97
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c162
-rw-r--r--drivers/media/platform/davinci/vpif_capture.h2
-rw-r--r--drivers/media/platform/davinci/vpif_display.c221
-rw-r--r--drivers/media/platform/davinci/vpif_display.h3
-rw-r--r--drivers/media/platform/davinci/vpss.c62
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c22
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.h1
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-m2m.c1
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-i2c.c37
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-param.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is-regs.c4
-rw-r--r--drivers/media/platform/exynos4-is/fimc-is.c3
-rw-r--r--drivers/media/platform/exynos4-is/fimc-isp.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c17
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c18
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c4
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c325
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h50
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c278
-rw-r--r--drivers/media/platform/s3c-camif/camif-regs.c8
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c1
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v6.h4
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc-v7.h61
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c32
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c3
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_common.h23
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_dec.c88
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_enc.c150
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr.c2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c149
-rw-r--r--drivers/media/platform/s5p-tv/hdmi_drv.c3
-rw-r--r--drivers/media/platform/sh_veu.c2
-rw-r--r--drivers/media/platform/soc_camera/Kconfig8
-rw-r--r--drivers/media/platform/soc_camera/Makefile1
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c67
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c1486
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c9
-rw-r--r--drivers/media/platform/soc_camera/soc_camera.c40
-rw-r--r--drivers/media/platform/vsp1/Makefile5
-rw-r--r--drivers/media/platform/vsp1/vsp1.h74
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c527
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.c181
-rw-r--r--drivers/media/platform/vsp1/vsp1_entity.h68
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.c238
-rw-r--r--drivers/media/platform/vsp1/vsp1_lif.h37
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h581
-rw-r--r--drivers/media/platform/vsp1/vsp1_rpf.c209
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.c124
-rw-r--r--drivers/media/platform/vsp1/vsp1_rwpf.h53
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.c346
-rw-r--r--drivers/media/platform/vsp1/vsp1_uds.h40
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c1069
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.h144
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c233
-rw-r--r--drivers/media/radio/Kconfig12
-rw-r--r--drivers/media/radio/Makefile1
-rw-r--r--drivers/media/radio/radio-aztech.c81
-rw-r--r--drivers/media/radio/radio-maxiradio.c15
-rw-r--r--drivers/media/radio/radio-sf16fmr2.c2
-rw-r--r--drivers/media/radio/radio-shark.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c11
-rw-r--r--drivers/media/radio/tea575x.c (renamed from sound/i2c/other/tea575x-tuner.c)21
-rw-r--r--drivers/media/rc/Kconfig3
-rw-r--r--drivers/media/rc/ene_ir.c30
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/iguanair.c4
-rw-r--r--drivers/media/rc/ir-lirc-codec.c12
-rw-r--r--drivers/media/rc/lirc_dev.c10
-rw-r--r--drivers/media/rc/rc-main.c52
-rw-r--r--drivers/media/rc/redrat3.c120
-rw-r--r--drivers/media/rc/ttusbir.c1
-rw-r--r--drivers/media/rc/winbond-cir.c38
-rw-r--r--drivers/media/tuners/e4000.c82
-rw-r--r--drivers/media/tuners/e4000.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c40
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-dvb.c1
-rw-r--r--drivers/media/usb/cx231xx/cx231xx.h1
-rw-r--r--drivers/media/usb/dvb-usb-v2/Kconfig2
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb.h5
-rw-r--r--drivers/media/usb/dvb-usb-v2/dvb_usb_core.c134
-rw-r--r--drivers/media/usb/dvb-usb-v2/lmedm04.c2
-rw-r--r--drivers/media/usb/dvb-usb/dib0700_devices.c12
-rw-r--r--drivers/media/usb/dvb-usb/m920x.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-i2c.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c1
-rw-r--r--drivers/media/usb/gspca/Kconfig9
-rw-r--r--drivers/media/usb/gspca/Makefile2
-rw-r--r--drivers/media/usb/gspca/gspca.c6
-rw-r--r--drivers/media/usb/gspca/ov519.c32
-rw-r--r--drivers/media/usb/gspca/ov534.c3
-rw-r--r--drivers/media/usb/gspca/stk1135.c685
-rw-r--r--drivers/media/usb/gspca/stk1135.h57
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c11
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-video.c9
-rw-r--r--drivers/media/usb/s2255/s2255drv.c9
-rw-r--r--drivers/media/usb/stk1160/Kconfig16
-rw-r--r--drivers/media/usb/stk1160/stk1160-v4l.c6
-rw-r--r--drivers/media/usb/tlg2300/pd-main.c37
-rw-r--r--drivers/media/usb/usbtv/Kconfig2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c151
-rw-r--r--drivers/media/v4l2-core/Makefile1
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c112
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c357
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c67
-rw-r--r--drivers/media/v4l2-core/v4l2-dev.c35
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c609
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c69
-rw-r--r--drivers/media/v4l2-core/v4l2-of.c13
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c269
-rw-r--r--drivers/mfd/db8500-prcmu.c4
-rw-r--r--drivers/mfd/rts5227.c2
-rw-r--r--drivers/misc/atmel-ssc.c19
-rw-r--r--drivers/misc/c2port/core.c83
-rw-r--r--drivers/misc/enclosure.c29
-rw-r--r--drivers/misc/hpilo.c4
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c26
-rw-r--r--drivers/misc/ics932s401.c4
-rw-r--r--drivers/misc/lkdtm.c63
-rw-r--r--drivers/misc/mei/amthif.c14
-rw-r--r--drivers/misc/mei/bus.c14
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mei/client.h9
-rw-r--r--drivers/misc/mei/hbm.c2
-rw-r--r--drivers/misc/mei/hw-me.c23
-rw-r--r--drivers/misc/mei/init.c14
-rw-r--r--drivers/misc/mei/main.c22
-rw-r--r--drivers/misc/sram.c3
-rw-r--r--drivers/misc/ti-st/st_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.c2
-rw-r--r--drivers/misc/vmw_vmci/vmci_driver.h7
-rw-r--r--drivers/misc/vmw_vmci/vmci_guest.c22
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c315
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.h18
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c2
-rw-r--r--drivers/mmc/host/pxamci.c4
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c2
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/bonding/bond_3ad.c52
-rw-r--r--drivers/net/bonding/bond_alb.c144
-rw-r--r--drivers/net/bonding/bond_alb.h3
-rw-r--r--drivers/net/bonding/bond_main.c969
-rw-r--r--drivers/net/bonding/bond_procfs.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c90
-rw-r--r--drivers/net/bonding/bonding.h96
-rw-r--r--drivers/net/caif/caif_serial.c4
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_platform.c2
-rw-r--r--drivers/net/can/flexcan.c83
-rw-r--r--drivers/net/can/mcp251x.c98
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c23
-rw-r--r--drivers/net/can/mscan/mscan.c25
-rw-r--r--drivers/net/can/mscan/mscan.h3
-rw-r--r--drivers/net/can/usb/esd_usb2.c10
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c1
-rw-r--r--drivers/net/ethernet/8390/Kconfig2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c6
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c6
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c12
-rw-r--r--drivers/net/ethernet/allwinner/Kconfig26
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/mvme147.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c6
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c40
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c317
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c117
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h11
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c53
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c210
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c309
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c507
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c95
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c221
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h41
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c189
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h69
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h12
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c165
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h12
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c10
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h4
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c53
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c195
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c287
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h17
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h23
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/Makefile3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h55
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.c48
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_api.h30
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c257
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c329
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_res.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c10
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h1
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h176
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.c5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_rq.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_wq.h14
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c8
-rw-r--r--drivers/net/ethernet/dlink/sundance.c14
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h76
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c500
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h103
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c25
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c783
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c7
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c8
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c284
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx_phy.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c21
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c4
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c168
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h16
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c74
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.h4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c12
-rw-r--r--drivers/net/ethernet/ibm/emac/mal.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h19
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c15
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c107
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c140
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c130
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c198
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h42
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c155
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c11
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c80
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c31
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c132
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c8
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c34
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c321
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c157
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c133
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c186
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c542
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c105
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h14
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c4
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c4
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c121
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/skge.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c177
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c115
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c12
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c4
-rw-r--r--drivers/net/ethernet/moxa/Kconfig30
-rw-r--r--drivers/net/ethernet/moxa/Makefile5
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c559
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.h330
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c213
-rw-r--r--drivers/net/ethernet/netx-eth.c2
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c1
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c67
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c98
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c20
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic.h1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h319
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c878
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h52
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c388
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c253
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c1179
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h41
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c308
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c80
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c39
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c280
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c483
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c15
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c169
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c25
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c49
-rw-r--r--drivers/net/ethernet/realtek/r8169.c17
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c71
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h10
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c2
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/Makefile7
-rw-r--r--drivers/net/ethernet/sfc/bitfield.h8
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3043
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h415
-rw-r--r--drivers/net/ethernet/sfc/efx.c500
-rw-r--r--drivers/net/ethernet/sfc/efx.h129
-rw-r--r--drivers/net/ethernet/sfc/enum.h10
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c399
-rw-r--r--drivers/net/ethernet/sfc/falcon.c1171
-rw-r--r--drivers/net/ethernet/sfc/falcon_boards.c4
-rw-r--r--drivers/net/ethernet/sfc/falcon_xmac.c362
-rw-r--r--drivers/net/ethernet/sfc/farch.c2942
-rw-r--r--drivers/net/ethernet/sfc/farch_regs.h (renamed from drivers/net/ethernet/sfc/regs.h)272
-rw-r--r--drivers/net/ethernet/sfc/filter.c1272
-rw-r--r--drivers/net/ethernet/sfc/filter.h238
-rw-r--r--drivers/net/ethernet/sfc/io.h50
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c1262
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h313
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mac.c130
-rw-r--r--drivers/net/ethernet/sfc/mcdi_mon.c274
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h5540
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c (renamed from drivers/net/ethernet/sfc/mcdi_phy.c)345
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.c2
-rw-r--r--drivers/net/ethernet/sfc/mdio_10g.h2
-rw-r--r--drivers/net/ethernet/sfc/mtd.c634
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h408
-rw-r--r--drivers/net/ethernet/sfc/nic.c1902
-rw-r--r--drivers/net/ethernet/sfc/nic.h539
-rw-r--r--drivers/net/ethernet/sfc/phy.h19
-rw-r--r--drivers/net/ethernet/sfc/ptp.c95
-rw-r--r--drivers/net/ethernet/sfc/qt202x_phy.c4
-rw-r--r--drivers/net/ethernet/sfc/rx.c176
-rw-r--r--drivers/net/ethernet/sfc/selftest.c15
-rw-r--r--drivers/net/ethernet/sfc/selftest.h4
-rw-r--r--drivers/net/ethernet/sfc/siena.c711
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c102
-rw-r--r--drivers/net/ethernet/sfc/spi.h99
-rw-r--r--drivers/net/ethernet/sfc/tenxpress.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c35
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/sfc/vfdi.h2
-rw-r--r--drivers/net/ethernet/sfc/workarounds.h22
-rw-r--r--drivers/net/ethernet/sgi/meth.c5
-rw-r--r--drivers/net/ethernet/sis/sis190.c3
-rw-r--r--drivers/net/ethernet/sis/sis900.c34
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c116
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c30
-rw-r--r--drivers/net/ethernet/sun/niu.c10
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c4
-rw-r--r--drivers/net/ethernet/sun/sunhme.c12
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw.c265
-rw-r--r--drivers/net/ethernet/ti/cpsw.h (renamed from include/linux/platform_data/cpsw.h)12
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c5
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c7
-rw-r--r--drivers/net/ethernet/tile/Kconfig11
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1116
-rw-r--r--drivers/net/ethernet/tile/tilepro.c241
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c3
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.h1
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c10
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c24
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c12
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c14
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c2
-rw-r--r--drivers/net/fddi/defxx.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c2
-rw-r--r--drivers/net/irda/ali-ircc.c8
-rw-r--r--drivers/net/irda/irtty-sir.c8
-rw-r--r--drivers/net/irda/nsc-ircc.c8
-rw-r--r--drivers/net/irda/pxaficp_ir.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c8
-rw-r--r--drivers/net/irda/via-ircc.c14
-rw-r--r--drivers/net/irda/w83977af_ir.c8
-rw-r--r--drivers/net/macvlan.c34
-rw-r--r--drivers/net/macvtap.c135
-rw-r--r--drivers/net/netconsole.c13
-rw-r--r--drivers/net/phy/mdio-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c2
-rw-r--r--drivers/net/phy/mdio-mux-mmioreg.c2
-rw-r--r--drivers/net/phy/mdio-octeon.c4
-rw-r--r--drivers/net/phy/mdio-sun4i.c32
-rw-r--r--drivers/net/phy/micrel.c105
-rw-r--r--drivers/net/phy/realtek.c4
-rw-r--r--drivers/net/ppp/pptp.c12
-rw-r--r--drivers/net/team/team.c203
-rw-r--r--drivers/net/tun.c229
-rw-r--r--drivers/net/usb/Kconfig8
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.h2
-rw-r--r--drivers/net/usb/asix_devices.c5
-rw-r--r--drivers/net/usb/ax88172a.c8
-rw-r--r--drivers/net/usb/ax88179_178a.c41
-rw-r--r--drivers/net/usb/cdc_mbim.c4
-rw-r--r--drivers/net/usb/hso.c15
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c971
-rw-r--r--drivers/net/usb/r815x.c62
-rw-r--r--drivers/net/usb/smsc75xx.c12
-rw-r--r--drivers/net/usb/sr9700.c560
-rw-r--r--drivers/net/usb/sr9700.h173
-rw-r--r--drivers/net/usb/usbnet.c103
-rw-r--r--drivers/net/veth.c1
-rw-r--r--drivers/net/virtio_net.c44
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c211
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h10
-rw-r--r--drivers/net/vxlan.c1339
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wireless/airo.c2
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.c12
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c321
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h58
-rw-r--r--drivers/net/wireless/ath/ath10k/debug.c87
-rw-r--r--drivers/net/wireless/ath/ath10k/hif.h49
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.c61
-rw-r--r--drivers/net/wireless/ath/ath10k/htc.h8
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.c27
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h3
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c43
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_tx.c14
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c488
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.h1
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c356
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.h15
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c127
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h24
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c59
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.c3
-rw-r--r--drivers/net/wireless/ath/ath6kl/testmode.h7
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/antenna.c672
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c190
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c310
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h33
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c115
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c71
-rw-r--r--drivers/net/wireless/ath/ath9k/link.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c48
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c157
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h7
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c494
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c573
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c3
-rw-r--r--drivers/net/wireless/ath/carl9170/rx.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile3
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/trace.h22
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c160
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.h20
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h27
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c22
-rw-r--r--drivers/net/wireless/b43/dma.c6
-rw-r--r--drivers/net/wireless/b43/main.c14
-rw-r--r--drivers/net/wireless/b43legacy/dma.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h31
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c281
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c481
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fweh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c236
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c1
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c70
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c21
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/debug.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c16
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c399
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c405
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h1
-rw-r--r--drivers/net/wireless/cw1200/bh.c4
-rw-r--r--drivers/net/wireless/cw1200/main.c2
-rw-r--r--drivers/net/wireless/cw1200/sta.c7
-rw-r--r--drivers/net/wireless/cw1200/txrx.c2
-rw-r--r--drivers/net/wireless/cw1200/wsm.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c6
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c31
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c36
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c1
-rw-r--r--drivers/net/wireless/iwlegacy/common.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/agn.h6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/debugfs.c15
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/dev.h7
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c177
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/main.c64
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/scan.c105
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h25
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c162
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c232
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c277
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h147
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h255
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c55
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c147
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h113
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c58
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c383
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c653
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h80
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c158
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c21
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c37
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c136
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c32
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c23
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c31
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h1
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c149
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c60
-rw-r--r--drivers/net/wireless/libertas/mesh.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c15
-rw-r--r--drivers/net/wireless/mwifiex/11n.c16
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c7
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c181
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c45
-rw-r--r--drivers/net/wireless/mwifiex/decl.h12
-rw-r--r--drivers/net/wireless/mwifiex/fw.h72
-rw-r--r--drivers/net/wireless/mwifiex/ie.c2
-rw-r--r--drivers/net/wireless/mwifiex/init.c25
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h43
-rw-r--r--drivers/net/wireless/mwifiex/join.c7
-rw-r--r--drivers/net/wireless/mwifiex/main.c119
-rw-r--r--drivers/net/wireless/mwifiex/main.h12
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c39
-rw-r--r--drivers/net/wireless/mwifiex/scan.c63
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c304
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h3
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c10
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c15
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c49
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c130
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c70
-rw-r--r--drivers/net/wireless/mwifiex/usb.c58
-rw-r--r--drivers/net/wireless/mwifiex/util.c4
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c16
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h279
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1658
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c19
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c43
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c18
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c4
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.h4
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h4
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig72
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile10
-rw-r--r--drivers/net/wireless/rtlwifi/base.c21
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c1
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c22
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c34
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c9
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.h3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c20
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c9
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c13
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/testmode.h3
-rw-r--r--drivers/net/wireless/zd1201.c12
-rw-r--r--drivers/net/xen-netback/common.h150
-rw-r--r--drivers/net/xen-netback/interface.c135
-rw-r--r--drivers/net/xen-netback/netback.c833
-rw-r--r--drivers/nfc/nfcsim.c6
-rw-r--r--drivers/nfc/pn533.c389
-rw-r--r--drivers/nfc/pn544/i2c.c360
-rw-r--r--drivers/nfc/pn544/mei.c2
-rw-r--r--drivers/nfc/pn544/pn544.c20
-rw-r--r--drivers/nfc/pn544/pn544.h7
-rw-r--r--drivers/of/Kconfig6
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c121
-rw-r--r--drivers/of/fdt.c2
-rw-r--r--drivers/of/of_i2c.c114
-rw-r--r--drivers/oprofile/oprof.h3
-rw-r--r--drivers/oprofile/oprofile_files.c26
-rw-r--r--drivers/oprofile/oprofile_perf.c16
-rw-r--r--drivers/oprofile/oprofile_stats.c24
-rw-r--r--drivers/oprofile/oprofile_stats.h3
-rw-r--r--drivers/oprofile/oprofilefs.c44
-rw-r--r--drivers/parisc/iosapic.c38
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/parport/Kconfig2
-rw-r--r--drivers/parport/parport_amiga.c1
-rw-r--r--drivers/pci/access.c26
-rw-r--r--drivers/pci/bus.c19
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/Makefile3
-rw-r--r--drivers/pci/host/pci-exynos.c552
-rw-r--r--drivers/pci/host/pci-mvebu.c34
-rw-r--r--drivers/pci/host/pcie-designware.c1044
-rw-r--r--drivers/pci/host/pcie-designware.h65
-rw-r--r--drivers/pci/hotplug/Kconfig7
-rw-r--r--drivers/pci/hotplug/acpiphp.h55
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c18
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c1036
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c3
-rw-r--r--drivers/pci/hotplug/pciehp.h1
-rw-r--r--drivers/pci/hotplug/pciehp_core.c12
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c31
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c9
-rw-r--r--drivers/pci/hotplug/pcihp_slot.c5
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c63
-rw-r--r--drivers/pci/iov.c23
-rw-r--r--drivers/pci/pci-acpi.c33
-rw-r--r--drivers/pci/pci-driver.c43
-rw-r--r--drivers/pci/pci-sysfs.c32
-rw-r--r--drivers/pci/pci.c616
-rw-r--r--drivers/pci/pci.h5
-rw-r--r--drivers/pci/pcie/Kconfig7
-rw-r--r--drivers/pci/pcie/aer/aerdrv.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h1
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c35
-rw-r--r--drivers/pci/probe.c84
-rw-r--r--drivers/pci/quirks.c147
-rw-r--r--drivers/pci/setup-bus.c245
-rw-r--r--drivers/pcmcia/cardbus.c1
-rw-r--r--drivers/pinctrl/Kconfig15
-rw-r--r--drivers/pinctrl/Makefile3
-rw-r--r--drivers/pinctrl/core.c72
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-dove.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-mvebu.c39
-rw-r--r--drivers/pinctrl/pinconf-generic.c96
-rw-r--r--drivers/pinctrl/pinconf.c73
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c193
-rw-r--r--drivers/pinctrl/pinctrl-at91.c68
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c41
-rw-r--r--drivers/pinctrl/pinctrl-bcm2835.c43
-rw-r--r--drivers/pinctrl/pinctrl-exynos.c58
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c113
-rw-r--r--drivers/pinctrl/pinctrl-falcon.c70
-rw-r--r--drivers/pinctrl/pinctrl-imx.c179
-rw-r--r--drivers/pinctrl/pinctrl-imx.h36
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c91
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c427
-rw-r--r--drivers/pinctrl/pinctrl-palmas.c1095
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c102
-rw-r--r--drivers/pinctrl/pinctrl-s3c24xx.c2
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c29
-rw-r--r--drivers/pinctrl/pinctrl-samsung.h1
-rw-r--r--drivers/pinctrl/pinctrl-single.c35
-rw-r--r--drivers/pinctrl/pinctrl-st.c27
-rw-r--r--drivers/pinctrl/pinctrl-sunxi-pins.h1848
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c141
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h2
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c221
-rw-r--r--drivers/pinctrl/pinctrl-tz1090-pdc.c157
-rw-r--r--drivers/pinctrl/pinctrl-tz1090.c162
-rw-r--r--drivers/pinctrl/pinctrl-u300.c21
-rw-r--r--drivers/pinctrl/pinctrl-utils.c142
-rw-r--r--drivers/pinctrl/pinctrl-utils.h43
-rw-r--r--drivers/pinctrl/pinctrl-xway.c149
-rw-r--r--drivers/pinctrl/pinmux.c51
-rw-r--r--drivers/pinctrl/sh-pfc/core.c101
-rw-r--r--drivers/pinctrl/sh-pfc/core.h10
-rw-r--r--drivers/pinctrl/sh-pfc/gpio.c45
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a73a4.c158
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7740.c48
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7778.c55
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7779.c168
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7790.c1744
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7203.c202
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7264.c246
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7269.c285
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7372.c57
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh73a0.c173
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7720.c701
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7722.c747
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7723.c381
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7724.c1093
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7734.c60
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7757.c709
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7785.c700
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-sh7786.c383
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-shx3.c423
-rw-r--r--drivers/pinctrl/sh-pfc/pinctrl.c91
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h277
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas6.c28
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c10
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c13
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c2
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c54
-rw-r--r--drivers/platform/olpc/olpc-ec.c2
-rw-r--r--drivers/platform/x86/asus-laptop.c1
-rw-r--r--drivers/platform/x86/eeepc-laptop.c1
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c16
-rw-r--r--drivers/platform/x86/panasonic-laptop.c3
-rw-r--r--drivers/platform/x86/samsung-q10.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c12
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c40
-rw-r--r--drivers/platform/x86/wmi.c10
-rw-r--r--drivers/pnp/driver.c21
-rw-r--r--drivers/pnp/pnpacpi/core.c6
-rw-r--r--drivers/pps/pps.c2
-rw-r--r--drivers/pps/sysfs.c55
-rw-r--r--drivers/ptp/ptp_clock.c2
-rw-r--r--drivers/ptp/ptp_private.h2
-rw-r--r--drivers/ptp/ptp_sysfs.c51
-rw-r--r--drivers/pwm/core.c7
-rw-r--r--drivers/pwm/pwm-lpc32xx.c3
-rw-r--r--drivers/pwm/pwm-mxs.c8
-rw-r--r--drivers/pwm/pwm-pxa.c12
-rw-r--r--drivers/pwm/pwm-renesas-tpu.c46
-rw-r--r--drivers/pwm/pwm-spear.c7
-rw-r--r--drivers/pwm/pwm-tiecap.c2
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c6
-rw-r--r--drivers/pwm/sysfs.c21
-rw-r--r--drivers/rapidio/rio.c4
-rw-r--r--drivers/regulator/88pm800.c383
-rw-r--r--drivers/regulator/88pm8607.c2
-rw-r--r--drivers/regulator/Kconfig284
-rw-r--r--drivers/regulator/Makefile8
-rw-r--r--drivers/regulator/aat2870-regulator.c2
-rw-r--r--drivers/regulator/ab3100.c2
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/as3711-regulator.c163
-rw-r--r--drivers/regulator/core.c472
-rw-r--r--drivers/regulator/da903x.c47
-rw-r--r--drivers/regulator/da9052-regulator.c2
-rw-r--r--drivers/regulator/da9055-regulator.c2
-rw-r--r--drivers/regulator/da9063-regulator.c934
-rw-r--r--drivers/regulator/da9210-regulator.c196
-rw-r--r--drivers/regulator/da9210-regulator.h288
-rw-r--r--drivers/regulator/fan53555.c4
-rw-r--r--drivers/regulator/fixed.c2
-rw-r--r--drivers/regulator/gpio-regulator.c2
-rw-r--r--drivers/regulator/helpers.c447
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/lp872x.c11
-rw-r--r--drivers/regulator/lp8755.c3
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max8649.c2
-rw-r--r--drivers/regulator/max8660.c108
-rw-r--r--drivers/regulator/max8925-regulator.c2
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/of_regulator.c12
-rw-r--r--drivers/regulator/palmas-regulator.c32
-rw-r--r--drivers/regulator/pcap-regulator.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/pfuze100-regulator.c445
-rw-r--r--drivers/regulator/s2mps11.c245
-rw-r--r--drivers/regulator/ti-abb-regulator.c10
-rw-r--r--drivers/regulator/tps51632-regulator.c2
-rw-r--r--drivers/regulator/tps62360-regulator.c2
-rw-r--r--drivers/regulator/tps65023-regulator.c2
-rw-r--r--drivers/regulator/tps65217-regulator.c182
-rw-r--r--drivers/regulator/tps6524x-regulator.c2
-rw-r--r--drivers/regulator/tps65912-regulator.c39
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c2
-rw-r--r--drivers/regulator/virtual.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c8
-rw-r--r--drivers/regulator/wm831x-isink.c2
-rw-r--r--drivers/regulator/wm831x-ldo.c110
-rw-r--r--drivers/regulator/wm8350-regulator.c57
-rw-r--r--drivers/regulator/wm8400-regulator.c52
-rw-r--r--drivers/regulator/wm8994-regulator.c2
-rw-r--r--drivers/remoteproc/da8xx_remoteproc.c2
-rw-r--r--drivers/rtc/rtc-da9052.c2
-rw-r--r--drivers/rtc/rtc-isl12022.c2
-rw-r--r--drivers/rtc/rtc-m48t35.c2
-rw-r--r--drivers/rtc/rtc-pcf8563.c2
-rw-r--r--drivers/rtc/rtc-pcf8583.c2
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c35
-rw-r--r--drivers/rtc/rtc-sysfs.c48
-rw-r--r--drivers/rtc/rtc-twl.c3
-rw-r--r--drivers/s390/block/dasd.c6
-rw-r--r--drivers/s390/block/dasd_devmap.c8
-rw-r--r--drivers/s390/block/dasd_eckd.c54
-rw-r--r--drivers/s390/block/dasd_erp.c14
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/cio/airq.c174
-rw-r--r--drivers/s390/cio/ccwgroup.c2
-rw-r--r--drivers/s390/cio/cio.c46
-rw-r--r--drivers/s390/cio/cio.h3
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/css.c4
-rw-r--r--drivers/s390/cio/css.h2
-rw-r--r--drivers/s390/cio/device.c2
-rw-r--r--drivers/s390/net/qeth_l3_sys.c2
-rw-r--r--drivers/s390/scsi/zfcp_aux.c4
-rw-r--r--drivers/s390/scsi/zfcp_erp.c29
-rw-r--r--drivers/s390/scsi/zfcp_ext.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c12
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c12
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c26
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_constants.h2
-rw-r--r--drivers/scsi/bnx2i/57xx_iscsi_hsi.h14
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c18
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_sysfs.c2
-rw-r--r--drivers/scsi/eata_pio.c2
-rw-r--r--drivers/scsi/esas2r/Kconfig5
-rw-r--r--drivers/scsi/esas2r/Makefile5
-rw-r--r--drivers/scsi/esas2r/atioctl.h1254
-rw-r--r--drivers/scsi/esas2r/atvda.h1319
-rw-r--r--drivers/scsi/esas2r/esas2r.h1441
-rw-r--r--drivers/scsi/esas2r/esas2r_disc.c1189
-rw-r--r--drivers/scsi/esas2r/esas2r_flash.c1512
-rw-r--r--drivers/scsi/esas2r/esas2r_init.c1773
-rw-r--r--drivers/scsi/esas2r/esas2r_int.c941
-rw-r--r--drivers/scsi/esas2r/esas2r_io.c880
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c2110
-rw-r--r--drivers/scsi/esas2r/esas2r_log.c254
-rw-r--r--drivers/scsi/esas2r/esas2r_log.h118
-rw-r--r--drivers/scsi/esas2r/esas2r_main.c2032
-rw-r--r--drivers/scsi/esas2r/esas2r_targdb.c306
-rw-r--r--drivers/scsi/esas2r/esas2r_vda.c521
-rw-r--r--drivers/scsi/esp_scsi.c14
-rw-r--r--drivers/scsi/esp_scsi.h1
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_main.c22
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/hpsa.h2
-rw-r--r--drivers/scsi/ipr.c14
-rw-r--r--drivers/scsi/ipr.h7
-rw-r--r--drivers/scsi/isci/port_config.c2
-rw-r--r--drivers/scsi/isci/request.c2
-rw-r--r--drivers/scsi/isci/task.c9
-rw-r--r--drivers/scsi/libiscsi.c109
-rw-r--r--drivers/scsi/lpfc/lpfc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c57
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c42
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c7
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c12
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c147
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h7
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c20
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h7
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_init.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_sas.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h2
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_type.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c40
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h10
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_config.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c14
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_debug.h2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c82
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c1
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c5
-rw-r--r--drivers/scsi/mvsas/mv_sas.c11
-rw-r--r--drivers/scsi/mvsas/mv_sas.h1
-rw-r--r--drivers/scsi/osd/osd_uld.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c5
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c4
-rw-r--r--drivers/scsi/qla2xxx/Makefile2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c43
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c58
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h39
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h74
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c86
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c91
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c297
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.c164
-rw-r--r--drivers/scsi/qla2xxx/qla_mr.h41
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c117
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h10
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c3716
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h551
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c213
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c162
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c69
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c12
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.h36
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c90
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c13
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h29
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_inline.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c65
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c171
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c297
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c357
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h4
-rw-r--r--drivers/scsi/scsi.c3
-rw-r--r--drivers/scsi/scsi_debug.c14
-rw-r--r--drivers/scsi/scsi_error.c128
-rw-r--r--drivers/scsi/scsi_lib.c70
-rw-r--r--drivers/scsi/scsi_sysfs.c10
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c123
-rw-r--r--drivers/scsi/sd.c171
-rw-r--r--drivers/scsi/sg.c176
-rw-r--r--drivers/scsi/st.c27
-rw-r--r--drivers/scsi/storvsc_drv.c1
-rw-r--r--drivers/scsi/ufs/ufs.h155
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c99
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c57
-rw-r--r--drivers/scsi/ufs/ufshcd.c1170
-rw-r--r--drivers/scsi/ufs/ufshcd.h59
-rw-r--r--drivers/scsi/ufs/ufshci.h2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/spi/Kconfig63
-rw-r--r--drivers/spi/Makefile4
-rw-r--r--drivers/spi/spi-altera.c48
-rw-r--r--drivers/spi/spi-ath79.c2
-rw-r--r--drivers/spi/spi-atmel.c36
-rw-r--r--drivers/spi/spi-au1550.c2
-rw-r--r--drivers/spi/spi-bcm2835.c10
-rw-r--r--drivers/spi/spi-bcm63xx.c37
-rw-r--r--drivers/spi/spi-bfin-sport.c2
-rw-r--r--drivers/spi/spi-bfin-v3.c965
-rw-r--r--drivers/spi/spi-bfin5xx.c2
-rw-r--r--drivers/spi/spi-bitbang.c260
-rw-r--r--drivers/spi/spi-clps711x.c6
-rw-r--r--drivers/spi/spi-coldfire-qspi.c27
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-efm32.c516
-rw-r--r--drivers/spi/spi-ep93xx.c355
-rw-r--r--drivers/spi/spi-fsl-dspi.c557
-rw-r--r--drivers/spi/spi-fsl-espi.c4
-rw-r--r--drivers/spi/spi-fsl-lib.c2
-rw-r--r--drivers/spi/spi-fsl-spi.c15
-rw-r--r--drivers/spi/spi-gpio.c4
-rw-r--r--drivers/spi/spi-imx.c76
-rw-r--r--drivers/spi/spi-mpc512x-psc.c50
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c2
-rw-r--r--drivers/spi/spi-mxs.c28
-rw-r--r--drivers/spi/spi-nuc900.c17
-rw-r--r--drivers/spi/spi-oc-tiny.c24
-rw-r--r--drivers/spi/spi-octeon.c49
-rw-r--r--drivers/spi/spi-omap-100k.c278
-rw-r--r--drivers/spi/spi-omap2-mcspi.c22
-rw-r--r--drivers/spi/spi-orion.c23
-rw-r--r--drivers/spi/spi-pl022.c28
-rw-r--r--drivers/spi/spi-pxa2xx.c21
-rw-r--r--drivers/spi/spi-rspi.c21
-rw-r--r--drivers/spi/spi-s3c24xx.c6
-rw-r--r--drivers/spi/spi-s3c64xx.c120
-rw-r--r--drivers/spi/spi-sh-hspi.c20
-rw-r--r--drivers/spi/spi-sh-msiof.c20
-rw-r--r--drivers/spi/spi-sh-sci.c2
-rw-r--r--drivers/spi/spi-sirf.c233
-rw-r--r--drivers/spi/spi-tegra114.c18
-rw-r--r--drivers/spi/spi-tegra20-sflash.c8
-rw-r--r--drivers/spi/spi-tegra20-slink.c8
-rw-r--r--drivers/spi/spi-ti-qspi.c574
-rw-r--r--drivers/spi/spi-ti-ssp.c2
-rw-r--r--drivers/spi/spi-tle62x0.c5
-rw-r--r--drivers/spi/spi-topcliff-pch.c2
-rw-r--r--drivers/spi/spi-txx9.c2
-rw-r--r--drivers/spi/spi-xilinx.c188
-rw-r--r--drivers/spi/spi.c137
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/Kconfig2
-rw-r--r--drivers/ssb/driver_chipcommon_sflash.c8
-rw-r--r--drivers/staging/Kconfig14
-rw-r--r--drivers/staging/Makefile7
-rw-r--r--drivers/staging/android/binder.c32
-rw-r--r--drivers/staging/android/binder.h48
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/android/sw_sync.c2
-rw-r--r--drivers/staging/android/sync.c2
-rw-r--r--drivers/staging/android/timed_output.c27
-rw-r--r--drivers/staging/asus_oled/Kconfig6
-rw-r--r--drivers/staging/asus_oled/Makefile1
-rw-r--r--drivers/staging/asus_oled/README156
-rw-r--r--drivers/staging/asus_oled/TODO10
-rw-r--r--drivers/staging/asus_oled/asus_oled.c847
-rw-r--r--drivers/staging/asus_oled/linux.txt33
-rw-r--r--drivers/staging/asus_oled/linux_f.txt18
-rw-r--r--drivers/staging/asus_oled/linux_fr.txt33
-rw-r--r--drivers/staging/asus_oled/tux.txt33
-rw-r--r--drivers/staging/asus_oled/tux_r.txt33
-rw-r--r--drivers/staging/asus_oled/tux_r2.txt33
-rw-r--r--drivers/staging/asus_oled/zig.txt33
-rw-r--r--drivers/staging/bcm/Bcmchar.c4
-rw-r--r--drivers/staging/bcm/DDRInit.c54
-rw-r--r--drivers/staging/bcm/Ioctl.h2
-rw-r--r--drivers/staging/bcm/LeakyBucket.c262
-rw-r--r--drivers/staging/bcm/Misc.c2
-rw-r--r--drivers/staging/bcm/Qos.c538
-rw-r--r--drivers/staging/bcm/Version.h29
-rw-r--r--drivers/staging/bcm/headers.h3
-rw-r--r--drivers/staging/bcm/nvm.c2
-rw-r--r--drivers/staging/comedi/Kconfig44
-rw-r--r--drivers/staging/comedi/TODO2
-rw-r--r--drivers/staging/comedi/comedi_buf.c2
-rw-r--r--drivers/staging/comedi/comedi_fops.c72
-rw-r--r--drivers/staging/comedi/comedi_internal.h1
-rw-r--r--drivers/staging/comedi/comedidev.h23
-rw-r--r--drivers/staging/comedi/comedilib.h7
-rw-r--r--drivers/staging/comedi/drivers.c96
-rw-r--r--drivers/staging/comedi/drivers/8255.c45
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/Makefile3
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c1068
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c2050
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c1037
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c5461
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c866
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c3582
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c845
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c2065
-rw-r--r--drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c1044
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c3
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.h169
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_eeprom.c2
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c1314
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_035.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1032.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1500.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1516.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1564.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_16xx.c43
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_1710.c99
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2032.c2
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_2200.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3120.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3200.c1
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3501.c4
-rw-r--r--drivers/staging/comedi/drivers/addi_apci_3xxx.c40
-rw-r--r--drivers/staging/comedi/drivers/addi_watchdog.c1
-rw-r--r--drivers/staging/comedi/drivers/adl_pci6208.c4
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7x3x.c1
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c1
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c4
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9118.c7
-rw-r--r--drivers/staging/comedi/drivers/adq12b.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c4
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1723.c47
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1724.c11
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c4
-rw-r--r--drivers/staging/comedi/drivers/aio_aio12_8.c5
-rw-r--r--drivers/staging/comedi/drivers/aio_iiro_16.c2
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c44
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_pci.c5
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c7
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c1
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c7
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c4
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c1
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c2
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c36
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c29
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c4
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c5
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_bond.c367
-rw-r--r--drivers/staging/comedi/drivers/comedi_fc.c1
-rw-r--r--drivers/staging/comedi/drivers/comedi_parport.c5
-rw-r--r--drivers/staging/comedi/drivers/comedi_test.c4
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c1
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c4
-rw-r--r--drivers/staging/comedi/drivers/das08.c2
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c6
-rw-r--r--drivers/staging/comedi/drivers/das08_isa.c4
-rw-r--r--drivers/staging/comedi/drivers/das08_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/das16.c1999
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c5
-rw-r--r--drivers/staging/comedi/drivers/das1800.c5
-rw-r--r--drivers/staging/comedi/drivers/das6402.c6
-rw-r--r--drivers/staging/comedi/drivers/das800.c5
-rw-r--r--drivers/staging/comedi/drivers/dmm32at.c41
-rw-r--r--drivers/staging/comedi/drivers/dt2801.c36
-rw-r--r--drivers/staging/comedi/drivers/dt2811.c6
-rw-r--r--drivers/staging/comedi/drivers/dt2814.c5
-rw-r--r--drivers/staging/comedi/drivers/dt2815.c5
-rw-r--r--drivers/staging/comedi/drivers/dt2817.c33
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c36
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c45
-rw-r--r--drivers/staging/comedi/drivers/dt9812.c27
-rw-r--r--drivers/staging/comedi/drivers/dyna_pci10xx.c5
-rw-r--r--drivers/staging/comedi/drivers/fl512.c5
-rw-r--r--drivers/staging/comedi/drivers/gsc_hpdi.c37
-rw-r--r--drivers/staging/comedi/drivers/icp_multi.c4
-rw-r--r--drivers/staging/comedi/drivers/ii_pci20kc.c1015
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c1
-rw-r--r--drivers/staging/comedi/drivers/me4000.c127
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c54
-rw-r--r--drivers/staging/comedi/drivers/mite.c1
-rw-r--r--drivers/staging/comedi/drivers/mite.h1
-rw-r--r--drivers/staging/comedi/drivers/mpc624.c5
-rw-r--r--drivers/staging/comedi/drivers/multiq3.c6
-rw-r--r--drivers/staging/comedi/drivers/ni_6527.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_65xx.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_670x.c30
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c7
-rw-r--r--drivers/staging/comedi/drivers/ni_at_ao.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_atmio16d.c28
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_700.c28
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c283
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.c226
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_isadma.h57
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c5
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h75
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c61
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_cs.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c33
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c1
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c1
-rw-r--r--drivers/staging/comedi/drivers/pcl711.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl724.c13
-rw-r--r--drivers/staging/comedi/drivers/pcl726.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl730.c3
-rw-r--r--drivers/staging/comedi/drivers/pcl812.c6
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c5
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c5
-rw-r--r--drivers/staging/comedi/drivers/pcm3724.c52
-rw-r--r--drivers/staging/comedi/drivers/pcmad.c1
-rw-r--r--drivers/staging/comedi/drivers/pcmda12.c4
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c86
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c39
-rw-r--r--drivers/staging/comedi/drivers/poc.c6
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c4
-rw-r--r--drivers/staging/comedi/drivers/rtd520.c24
-rw-r--r--drivers/staging/comedi/drivers/rti800.c7
-rw-r--r--drivers/staging/comedi/drivers/rti802.c6
-rw-r--r--drivers/staging/comedi/drivers/s526.c50
-rw-r--r--drivers/staging/comedi/drivers/s626.c27
-rw-r--r--drivers/staging/comedi/drivers/s626.h2
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c5
-rw-r--r--drivers/staging/comedi/drivers/skel.c49
-rw-r--r--drivers/staging/comedi/drivers/ssv_dnp.c79
-rw-r--r--drivers/staging/comedi/drivers/unioxx5.c4
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c2485
-rw-r--r--drivers/staging/comedi/drivers/usbduxfast.c3
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c317
-rw-r--r--drivers/staging/comedi/drivers/vmk80xx.c3
-rw-r--r--drivers/staging/comedi/kcomedilib/kcomedilib_main.c60
-rw-r--r--drivers/staging/comedi/proc.c2
-rw-r--r--drivers/staging/comedi/range.c43
-rw-r--r--drivers/staging/crystalhd/crystalhd_cmds.c3
-rw-r--r--drivers/staging/crystalhd/crystalhd_hw.c7
-rw-r--r--drivers/staging/crystalhd/crystalhd_lnx.c3
-rw-r--r--drivers/staging/crystalhd/crystalhd_misc.h4
-rw-r--r--drivers/staging/cxt1e1/Makefile1
-rw-r--r--drivers/staging/cxt1e1/comet.c23
-rw-r--r--drivers/staging/cxt1e1/functions.c9
-rw-r--r--drivers/staging/cxt1e1/hwprobe.c8
-rw-r--r--drivers/staging/cxt1e1/linux.c52
-rw-r--r--drivers/staging/cxt1e1/musycc.c15
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.c15
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c11
-rw-r--r--drivers/staging/cxt1e1/sbeid.c7
-rw-r--r--drivers/staging/dgap/Kconfig6
-rw-r--r--drivers/staging/dgap/Makefile9
-rw-r--r--drivers/staging/dgap/dgap_conf.h290
-rw-r--r--drivers/staging/dgap/dgap_downld.h69
-rw-r--r--drivers/staging/dgap/dgap_driver.c1048
-rw-r--r--drivers/staging/dgap/dgap_driver.h618
-rw-r--r--drivers/staging/dgap/dgap_fep5.c1953
-rw-r--r--drivers/staging/dgap/dgap_fep5.h253
-rw-r--r--drivers/staging/dgap/dgap_kcompat.h93
-rw-r--r--drivers/staging/dgap/dgap_parse.c1371
-rw-r--r--drivers/staging/dgap/dgap_parse.h35
-rw-r--r--drivers/staging/dgap/dgap_pci.h92
-rw-r--r--drivers/staging/dgap/dgap_sysfs.c793
-rw-r--r--drivers/staging/dgap/dgap_sysfs.h48
-rw-r--r--drivers/staging/dgap/dgap_trace.c185
-rw-r--r--drivers/staging/dgap/dgap_trace.h36
-rw-r--r--drivers/staging/dgap/dgap_tty.c3597
-rw-r--r--drivers/staging/dgap/dgap_tty.h39
-rw-r--r--drivers/staging/dgap/dgap_types.h36
-rw-r--r--drivers/staging/dgap/digi.h376
-rw-r--r--drivers/staging/dgap/downld.c798
-rw-r--r--drivers/staging/dgnc/Kconfig6
-rw-r--r--drivers/staging/dgnc/Makefile7
-rw-r--r--drivers/staging/dgnc/TODO17
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c1409
-rw-r--r--drivers/staging/dgnc/dgnc_cls.h90
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c958
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h563
-rw-r--r--drivers/staging/dgnc/dgnc_kcompat.h93
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c305
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.h31
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c1974
-rw-r--r--drivers/staging/dgnc/dgnc_neo.h157
-rw-r--r--drivers/staging/dgnc/dgnc_pci.h75
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c756
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.h49
-rw-r--r--drivers/staging/dgnc/dgnc_trace.c184
-rw-r--r--drivers/staging/dgnc/dgnc_trace.h44
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c3544
-rw-r--r--drivers/staging/dgnc/dgnc_tty.h42
-rw-r--r--drivers/staging/dgnc/dgnc_types.h36
-rw-r--r--drivers/staging/dgnc/digi.h416
-rw-r--r--drivers/staging/dgnc/dpacompat.h115
-rw-r--r--drivers/staging/dgrp/dgrp_driver.c14
-rw-r--r--drivers/staging/dgrp/dgrp_tty.c2
-rw-r--r--drivers/staging/dwc2/Kconfig1
-rw-r--r--drivers/staging/dwc2/core.c449
-rw-r--r--drivers/staging/dwc2/core.h221
-rw-r--r--drivers/staging/dwc2/core_intr.c4
-rw-r--r--drivers/staging/dwc2/hcd.c281
-rw-r--r--drivers/staging/dwc2/hcd.h45
-rw-r--r--drivers/staging/dwc2/hcd_ddma.c31
-rw-r--r--drivers/staging/dwc2/hcd_intr.c129
-rw-r--r--drivers/staging/dwc2/hcd_queue.c22
-rw-r--r--drivers/staging/dwc2/hw.h156
-rw-r--r--drivers/staging/dwc2/pci.c4
-rw-r--r--drivers/staging/et131x/README1
-rw-r--r--drivers/staging/frontier/alphatrack.c2
-rw-r--r--drivers/staging/frontier/tranzport.c10
-rw-r--r--drivers/staging/gdm724x/Kconfig15
-rw-r--r--drivers/staging/gdm724x/Makefile7
-rw-r--r--drivers/staging/gdm724x/TODO16
-rw-r--r--drivers/staging/gdm724x/gdm_endian.c67
-rw-r--r--drivers/staging/gdm724x/gdm_endian.h49
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c877
-rw-r--r--drivers/staging/gdm724x/gdm_lte.h81
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c690
-rw-r--r--drivers/staging/gdm724x/gdm_mux.h95
-rw-r--r--drivers/staging/gdm724x/gdm_tty.c343
-rw-r--r--drivers/staging/gdm724x/gdm_tty.h71
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c1049
-rw-r--r--drivers/staging/gdm724x/gdm_usb.h109
-rw-r--r--drivers/staging/gdm724x/hci.h55
-rw-r--r--drivers/staging/gdm724x/hci_packet.h93
-rw-r--r--drivers/staging/gdm724x/netlink_k.c149
-rw-r--r--drivers/staging/gdm724x/netlink_k.h25
-rw-r--r--drivers/staging/gdm72xx/gdm_qos.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt4
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c17
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c16
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c14
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c16
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c15
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c15
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c45
-rw-r--r--drivers/staging/iio/adc/spear_adc.c30
-rw-r--r--drivers/staging/iio/addac/adt7316.c27
-rw-r--r--drivers/staging/iio/cdc/ad7150.c36
-rw-r--r--drivers/staging/iio/cdc/ad7152.c16
-rw-r--r--drivers/staging/iio/cdc/ad7746.c18
-rw-r--r--drivers/staging/iio/gyro/Kconfig12
-rw-r--r--drivers/staging/iio/gyro/Makefile3
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c17
-rw-r--r--drivers/staging/iio/gyro/adis16260.h98
-rw-r--r--drivers/staging/iio/gyro/adis16260_platform_data.h19
-rw-r--r--drivers/staging/iio/light/isl29018.c16
-rw-r--r--drivers/staging/iio/light/isl29028.c13
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c71
-rw-r--r--drivers/staging/iio/meter/ade7753.c18
-rw-r--r--drivers/staging/iio/meter/ade7754.c19
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c18
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c2
-rw-r--r--drivers/staging/iio/meter/ade7759.c18
-rw-r--r--drivers/staging/iio/meter/ade7854.c19
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c18
-rw-r--r--drivers/staging/imx-drm/Kconfig5
-rw-r--r--drivers/staging/imx-drm/TODO1
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c26
-rw-r--r--drivers/staging/imx-drm/imx-ldb.c3
-rw-r--r--drivers/staging/imx-drm/imx-tve.c27
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c17
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dc.c5
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-di.c4
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-dp.c13
-rw-r--r--drivers/staging/imx-drm/ipuv3-crtc.c4
-rw-r--r--drivers/staging/imx-drm/parallel-display.c1
-rw-r--r--drivers/staging/keucr/scsiglue.c13
-rw-r--r--drivers/staging/line6/driver.c9
-rw-r--r--drivers/staging/line6/driver.h3
-rw-r--r--drivers/staging/line6/pcm.c27
-rw-r--r--drivers/staging/line6/pod.c21
-rw-r--r--drivers/staging/lustre/Makefile2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/bitmap.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/curproc.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h46
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h46
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h8
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h68
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h24
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h7
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/kp30.h59
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h3
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h9
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h158
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h1
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/lucache.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/params_tree.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h6
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lnetst.h2
-rw-r--r--drivers/staging/lustre/include/linux/lnet/ptllnd.h4
-rw-r--r--drivers/staging/lustre/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c10
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h1
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c14
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c60
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c112
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c11
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h3
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c62
-rw-r--r--drivers/staging/lustre/lnet/lnet/Makefile2
-rw-r--r--drivers/staging/lustre/lnet/lnet/acceptor.c62
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-errno.c39
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c7
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-eq.c14
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-msg.c13
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c9
-rw-r--r--drivers/staging/lustre/lnet/lnet/router_proc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c2
-rw-r--r--drivers/staging/lustre/lnet/selftest/module.c4
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c6
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c2
-rw-r--r--drivers/staging/lustre/lustre/Kconfig13
-rw-r--r--drivers/staging/lustre/lustre/Makefile4
-rw-r--r--drivers/staging/lustre/lustre/fid/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_handler.c661
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_internal.h36
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c164
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_store.c259
-rw-r--r--drivers/staging/lustre/lustre/fid/lproc_fid.c28
-rw-r--r--drivers/staging/lustre/lustre/fld/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c45
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_handler.c447
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_index.c426
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h31
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c90
-rw-r--r--drivers/staging/lustre/lustre/fld/lproc_fld.c217
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h18
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lprocfs_status.h1
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_compat25.h103
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h12
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_handles.h1
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lib.h10
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_lite.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_net.h1
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h2
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_quota.h1
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lustre_user.h19
-rw-r--r--drivers/staging/lustre/lustre/include/linux/lvfs.h8
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lprocfs_status.h73
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h25
-rw-r--r--drivers/staging/lustre/lustre/include/lu_ref.h14
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_errno.h215
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h82
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h30
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h56
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h204
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h460
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fld.h45
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_idmap.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_log.h32
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h12
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdt.h84
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h47
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_quota.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_sec.h2
-rw-r--r--drivers/staging/lustre/lustre/include/md_object.h7
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h185
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h392
-rw-r--r--drivers/staging/lustre/lustre/include/obd_lov.h10
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h1
-rw-r--r--drivers/staging/lustre/lustre/lclient/glimpse.c19
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c43
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_misc.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/interval_tree.c50
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c8
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c10
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c66
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h49
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c79
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c394
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c120
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c137
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c181
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c95
-rw-r--r--drivers/staging/lustre/lustre/libcfs/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/debug.c19
-rw-r--r--drivers/staging/lustre/lustre/libcfs/hash.c24
-rw-r--r--drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c15
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_mem.c3
-rw-r--r--drivers/staging/lustre/lustre/libcfs/libcfs_string.c49
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c46
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c2
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c21
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c69
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-module.c31
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c21
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c4
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c5
-rw-r--r--drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c16
-rw-r--r--drivers/staging/lustre/lustre/libcfs/module.c28
-rw-r--r--drivers/staging/lustre/lustre/libcfs/nidstrings.c14
-rw-r--r--drivers/staging/lustre/lustre/libcfs/tracefile.c13
-rw-r--r--drivers/staging/lustre/lustre/libcfs/upcall_cache.c22
-rw-r--r--drivers/staging/lustre/lustre/libcfs/watchdog.c516
-rw-r--r--drivers/staging/lustre/lustre/libcfs/workitem.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c80
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c251
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c454
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_capa.c26
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c23
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h32
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c172
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c67
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c76
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c38
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c97
-rw-r--r--drivers/staging/lustre/lustre/llite/remote_perm.c27
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c44
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c35
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c88
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c16
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c5
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c59
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c8
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c48
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_fld.c7
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_intent.c38
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c634
-rw-r--r--drivers/staging/lustre/lustre/lmv/lproc_lmv.c1
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c33
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c23
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c111
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c69
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_log.c16
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c10
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c300
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c112
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c82
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c13
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c40
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c157
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c31
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c14
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c3
-rw-r--r--drivers/staging/lustre/lustre/lov/lproc_lov.c1
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt.c5
-rw-r--r--drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c1
-rw-r--r--drivers/staging/lustre/lustre/lvfs/lvfs_linux.c15
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c10
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c29
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c151
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_reint.c46
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c324
-rw-r--r--drivers/staging/lustre/lustre/mgc/libmgc.c19
-rw-r--r--drivers/staging/lustre/lustre/mgc/lproc_mgc.c1
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c169
-rw-r--r--drivers/staging/lustre/lustre/obdclass/Makefile2
-rw-r--r--drivers/staging/lustre/lustre/obdclass/acl.c29
-rw-r--r--drivers/staging/lustre/lustre/obdclass/capa.c61
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c152
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c144
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c21
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c108
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c35
-rw-r--r--drivers/staging/lustre/lustre/obdclass/dt_object.c28
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c79
-rw-r--r--drivers/staging/lustre/lustre/obdclass/idmap.c21
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c48
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c10
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c124
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_cat.c92
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_ioctl.c79
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_lvfs.c99
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_obd.c37
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_osd.c121
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_swab.c28
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog_test.c89
-rw-r--r--drivers/staging/lustre/lustre/obdclass/local_storage.c46
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_jobstats.c562
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c61
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c46
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_ucred.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_handles.c8
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/md_attrs.c17
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c148
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c134
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c16
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo.c47
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c248
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c141
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_dev.c11
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c28
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c146
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c3
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c28
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_quota.c31
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c326
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/Makefile1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c161
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/connection.c24
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/errno.c380
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c54
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c27
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c124
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c53
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c6
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c219
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c61
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_client.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_net.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/llog_server.c46
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c15
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/niobuf.c38
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c105
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c34
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pinger.c82
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c37
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/recover.c23
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec.c123
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c4
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_config.c47
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_gc.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c90
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c170
-rw-r--r--drivers/staging/media/Kconfig2
-rw-r--r--drivers/staging/media/Makefile1
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c56
-rw-r--r--drivers/staging/media/msi3101/Kconfig3
-rw-r--r--drivers/staging/media/msi3101/Makefile1
-rw-r--r--drivers/staging/media/msi3101/sdr-msi3101.c1931
-rw-r--r--drivers/staging/nvec/nvec.c5
-rw-r--r--drivers/staging/octeon-usb/Kconfig2
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.c5339
-rw-r--r--drivers/staging/octeon-usb/cvmx-usb.h1157
-rw-r--r--drivers/staging/octeon-usb/cvmx-usbcx-defs.h23
-rw-r--r--drivers/staging/octeon-usb/cvmx-usbnx-defs.h2
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c33
-rw-r--r--drivers/staging/olpc_dcon/Kconfig11
-rw-r--r--drivers/staging/olpc_dcon/TODO11
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c24
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h27
-rw-r--r--drivers/staging/ozwpan/Makefile (renamed from drivers/staging/ozwpan/Kbuild)6
-rw-r--r--drivers/staging/ozwpan/ozcdev.c134
-rw-r--r--drivers/staging/ozwpan/ozconfig.h26
-rw-r--r--drivers/staging/ozwpan/ozdbg.h54
-rw-r--r--drivers/staging/ozwpan/ozeltbuf.c80
-rw-r--r--drivers/staging/ozwpan/ozhcd.c722
-rw-r--r--drivers/staging/ozwpan/ozhcd.h4
-rw-r--r--drivers/staging/ozwpan/ozmain.c18
-rw-r--r--drivers/staging/ozwpan/ozpd.c276
-rw-r--r--drivers/staging/ozwpan/ozpd.h21
-rw-r--r--drivers/staging/ozwpan/ozproto.c526
-rw-r--r--drivers/staging/ozwpan/ozproto.h32
-rw-r--r--drivers/staging/ozwpan/oztrace.c36
-rw-r--r--drivers/staging/ozwpan/oztrace.h35
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.c23
-rw-r--r--drivers/staging/ozwpan/ozurbparanoia.h4
-rw-r--r--drivers/staging/ozwpan/ozusbsvc.c76
-rw-r--r--drivers/staging/ozwpan/ozusbsvc1.c64
-rw-r--r--drivers/staging/quickstart/quickstart.c21
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c10
-rw-r--r--drivers/staging/rtl8187se/r8180_hw.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225.h2
-rw-r--r--drivers/staging/rtl8187se/r8180_rtl8225z2.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.c2
-rw-r--r--drivers/staging/rtl8187se/r8180_wx.h2
-rw-r--r--drivers/staging/rtl8188eu/Kconfig29
-rw-r--r--drivers/staging/rtl8188eu/Makefile70
-rw-r--r--drivers/staging/rtl8188eu/TODO15
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c1988
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_br_ext.c1199
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c2364
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c948
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c875
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c1640
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_io.c329
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c1169
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_iol.c209
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_led.c1692
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c2442
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c8481
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp.c997
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c1508
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_p2p.c2064
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c662
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c2299
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c89
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c1779
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c79
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c655
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c1689
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c2447
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c1761
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c86
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c760
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c721
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c231
-rw-r--r--drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c269
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf.c49
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c1928
-rw-r--r--drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c132
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c381
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c464
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c2171
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c596
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c399
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c130
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_debug.c32
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_interface.c203
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c779
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c268
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c2378
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_mp.c860
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c1144
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c572
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c202
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c80
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c91
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c111
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c138
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c706
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c2346
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_ops_linux.c726
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h28
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h276
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h1094
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h176
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h75
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EReg.h46
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h44
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h34
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalPhyRf.h30
-rw-r--r--drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h63
-rw-r--r--drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h128
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h167
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h184
-rw-r--r--drivers/staging/rtl8188eu/include/cmd_osdep.h32
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h334
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types_linux.h24
-rw-r--r--drivers/staging/rtl8188eu/include/ethernet.h42
-rw-r--r--drivers/staging/rtl8188eu/include/h2clbk.h35
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h173
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h426
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h1274
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211_ext.h290
-rw-r--r--drivers/staging/rtl8188eu/include/if_ether.h111
-rw-r--r--drivers/staging/rtl8188eu/include/ioctl_cfg80211.h107
-rw-r--r--drivers/staging/rtl8188eu/include/ip.h126
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h35
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h352
-rw-r--r--drivers/staging/rtl8188eu/include/nic_spec.h44
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h1198
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h132
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h56
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h43
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h54
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h171
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h145
-rw-r--r--drivers/staging/rtl8188eu/include/odm_interface.h164
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h104
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h119
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h62
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h83
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h547
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h56
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h122
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h62
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h487
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h35
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h69
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_rf.h36
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h1439
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_sreset.h31
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h178
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h64
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h65
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_br_ext.h66
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h991
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h290
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h130
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h150
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h115
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h44
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_io.h387
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h124
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h79
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h50
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h84
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_led.h197
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h655
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h877
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp.h495
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h340
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h1084
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_p2p.h135
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h283
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h30
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h485
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h146
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h383
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h50
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_version.h1
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h384
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h384
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h26
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops.h115
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h55
-rw-r--r--drivers/staging/rtl8188eu/include/usb_osintf.h45
-rw-r--r--drivers/staging/rtl8188eu/include/usb_vendor_req.h52
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h1127
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h347
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h67
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c8222
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c246
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c1251
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c815
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c261
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c293
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c892
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c288
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c290
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h8
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h2
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c3
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c6
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c2
-rw-r--r--drivers/staging/rtl8192u/authors2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211.h2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_module.c3
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_93cx6.h2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.c2
-rw-r--r--drivers/staging/rtl8192u/r8180_pm.h2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_hw.h2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.h2
-rw-r--r--drivers/staging/rtl8192u/r819xU_cmdpkt.c609
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c8
-rw-r--r--drivers/staging/silicom/bpctl_mod.c582
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h2
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/proc.h2
-rw-r--r--drivers/staging/tidspbridge/pmgr/dbll.c7
-rw-r--r--drivers/staging/usbip/stub_dev.c6
-rw-r--r--drivers/staging/usbip/usbip_common.c11
-rw-r--r--drivers/staging/usbip/userspace/libsrc/vhci_driver.c2
-rw-r--r--drivers/staging/usbip/userspace/src/usbip.c15
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_attach.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_list.c6
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.c30
-rw-r--r--drivers/staging/usbip/userspace/src/usbip_network.h5
-rw-r--r--drivers/staging/usbip/userspace/src/usbipd.c141
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c4
-rw-r--r--drivers/staging/vt6655/device_main.c4
-rw-r--r--drivers/staging/vt6655/hostap.c2
-rw-r--r--drivers/staging/vt6655/ioctl.c2
-rw-r--r--drivers/staging/vt6655/wpactl.c2
-rw-r--r--drivers/staging/vt6656/baseband.c80
-rw-r--r--drivers/staging/vt6656/baseband.h10
-rw-r--r--drivers/staging/vt6656/card.c78
-rw-r--r--drivers/staging/vt6656/desc.h224
-rw-r--r--drivers/staging/vt6656/device.h51
-rw-r--r--drivers/staging/vt6656/device_cfg.h14
-rw-r--r--drivers/staging/vt6656/dpc.c16
-rw-r--r--drivers/staging/vt6656/dpc.h4
-rw-r--r--drivers/staging/vt6656/main_usb.c23
-rw-r--r--drivers/staging/vt6656/rxtx.c1536
-rw-r--r--drivers/staging/vt6656/rxtx.h761
-rw-r--r--drivers/staging/vt6656/usbpipe.c12
-rw-r--r--drivers/staging/vt6656/usbpipe.h5
-rw-r--r--drivers/staging/vt6656/wmgr.c2
-rw-r--r--drivers/staging/winbond/mds.c56
-rw-r--r--drivers/staging/winbond/mds_f.h13
-rw-r--r--drivers/staging/winbond/phy_calibration.h1
-rw-r--r--drivers/staging/winbond/wb35reg.c36
-rw-r--r--drivers/staging/wlags49_h2/Makefile2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h1
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.c138
-rw-r--r--drivers/staging/wlags49_h2/wl_sysfs.h7
-rw-r--r--drivers/staging/wlags49_h25/Makefile3
-rw-r--r--drivers/staging/wlags49_h25/wl_sysfs.c2
-rw-r--r--drivers/staging/wlags49_h25/wl_sysfs.h2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c12
-rw-r--r--drivers/staging/xgifb/vb_init.c36
-rw-r--r--drivers/staging/xgifb/vb_setmode.c333
-rw-r--r--drivers/staging/xgifb/vb_setmode.h11
-rw-r--r--drivers/staging/xillybus/Kconfig32
-rw-r--r--drivers/staging/xillybus/Makefile7
-rw-r--r--drivers/staging/xillybus/README403
-rw-r--r--drivers/staging/xillybus/TODO5
-rw-r--r--drivers/staging/xillybus/xillybus.h182
-rw-r--r--drivers/staging/xillybus/xillybus_core.c2345
-rw-r--r--drivers/staging/xillybus/xillybus_of.c212
-rw-r--r--drivers/staging/xillybus/xillybus_pcie.c262
-rw-r--r--drivers/staging/zcache/Kconfig59
-rw-r--r--drivers/staging/zcache/Makefile8
-rw-r--r--drivers/staging/zcache/TODO64
-rw-r--r--drivers/staging/zcache/debug.c107
-rw-r--r--drivers/staging/zcache/debug.h305
-rw-r--r--drivers/staging/zcache/ramster.h59
-rw-r--r--drivers/staging/zcache/ramster/debug.c68
-rw-r--r--drivers/staging/zcache/ramster/debug.h145
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.c462
-rw-r--r--drivers/staging/zcache/ramster/heartbeat.h87
-rw-r--r--drivers/staging/zcache/ramster/masklog.c155
-rw-r--r--drivers/staging/zcache/ramster/masklog.h220
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.c996
-rw-r--r--drivers/staging/zcache/ramster/nodemanager.h88
-rw-r--r--drivers/staging/zcache/ramster/r2net.c414
-rw-r--r--drivers/staging/zcache/ramster/ramster-howto.txt366
-rw-r--r--drivers/staging/zcache/ramster/ramster.c925
-rw-r--r--drivers/staging/zcache/ramster/ramster.h161
-rw-r--r--drivers/staging/zcache/ramster/ramster_nodemanager.h41
-rw-r--r--drivers/staging/zcache/ramster/tcp.c2248
-rw-r--r--drivers/staging/zcache/ramster/tcp.h159
-rw-r--r--drivers/staging/zcache/ramster/tcp_internal.h248
-rw-r--r--drivers/staging/zcache/tmem.c898
-rw-r--r--drivers/staging/zcache/tmem.h259
-rw-r--r--drivers/staging/zcache/zbud.c1066
-rw-r--r--drivers/staging/zcache/zbud.h33
-rw-r--r--drivers/staging/zcache/zcache-main.c1941
-rw-r--r--drivers/staging/zcache/zcache.h53
-rw-r--r--drivers/staging/zram/zram_drv.c101
-rw-r--r--drivers/staging/zram/zram_drv.h10
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c4
-rw-r--r--drivers/target/iscsi/iscsi_target.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c9
-rw-r--r--drivers/target/target_core_spc.c9
-rw-r--r--drivers/target/target_core_transport.c11
-rw-r--r--drivers/tty/amiserial.c2
-rw-r--r--drivers/tty/hvc/hvc_console.c11
-rw-r--r--drivers/tty/hvc/hvc_console.h3
-rw-r--r--drivers/tty/hvc/hvc_iucv.c64
-rw-r--r--drivers/tty/hvc/hvc_xen.c6
-rw-r--r--drivers/tty/hvc/hvsi_lib.c4
-rw-r--r--drivers/tty/n_gsm.c29
-rw-r--r--drivers/tty/n_tty.c1414
-rw-r--r--drivers/tty/pty.c18
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_dw.c34
-rw-r--r--drivers/tty/serial/8250/8250_early.c3
-rw-r--r--drivers/tty/serial/8250/8250_em.c27
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c3
-rw-r--r--drivers/tty/serial/8250/8250_pci.c15
-rw-r--r--drivers/tty/serial/8250/Kconfig2
-rw-r--r--drivers/tty/serial/Kconfig35
-rw-r--r--drivers/tty/serial/Makefile1
-rw-r--r--drivers/tty/serial/altera_jtaguart.c5
-rw-r--r--drivers/tty/serial/altera_uart.c4
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c18
-rw-r--r--drivers/tty/serial/apbuart.c2
-rw-r--r--drivers/tty/serial/ar933x_uart.c111
-rw-r--r--drivers/tty/serial/arc_uart.c37
-rw-r--r--drivers/tty/serial/atmel_serial.c857
-rw-r--r--drivers/tty/serial/bcm63xx_uart.c3
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c14
-rw-r--r--drivers/tty/serial/bfin_uart.c21
-rw-r--r--drivers/tty/serial/clps711x.c11
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c28
-rw-r--r--drivers/tty/serial/efm32-uart.c29
-rw-r--r--drivers/tty/serial/fsl_lpuart.c7
-rw-r--r--drivers/tty/serial/icom.c103
-rw-r--r--drivers/tty/serial/ifx6x60.c2
-rw-r--r--drivers/tty/serial/imx.c520
-rw-r--r--drivers/tty/serial/ioc4_serial.c4
-rw-r--r--drivers/tty/serial/lantiq.c7
-rw-r--r--drivers/tty/serial/lpc32xx_hs.c7
-rw-r--r--drivers/tty/serial/m32r_sio.c3
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max310x.c991
-rw-r--r--drivers/tty/serial/mcf.c5
-rw-r--r--drivers/tty/serial/mfd.c14
-rw-r--r--drivers/tty/serial/mpsc.c15
-rw-r--r--drivers/tty/serial/mrst_max3110.c4
-rw-r--r--drivers/tty/serial/msm_serial.c276
-rw-r--r--drivers/tty/serial/msm_serial.h19
-rw-r--r--drivers/tty/serial/msm_serial_hs.c2
-rw-r--r--drivers/tty/serial/mxs-auart.c54
-rw-r--r--drivers/tty/serial/netx-serial.c8
-rw-r--r--drivers/tty/serial/nwpserial.c3
-rw-r--r--drivers/tty/serial/omap-serial.c214
-rw-r--r--drivers/tty/serial/pch_uart.c84
-rw-r--r--drivers/tty/serial/pmac_zilog.c1
-rw-r--r--drivers/tty/serial/pnx8xxx_uart.c5
-rw-r--r--drivers/tty/serial/pxa.c33
-rw-r--r--drivers/tty/serial/rp2.c2
-rw-r--r--drivers/tty/serial/sa1100.c5
-rw-r--r--drivers/tty/serial/samsung.c9
-rw-r--r--drivers/tty/serial/samsung.h3
-rw-r--r--drivers/tty/serial/sc26xx.c2
-rw-r--r--drivers/tty/serial/sccnxp.c339
-rw-r--r--drivers/tty/serial/serial-tegra.c16
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c4
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c1195
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h501
-rw-r--r--drivers/tty/serial/st-asc.c932
-rw-r--r--drivers/tty/serial/timbuart.c4
-rw-r--r--drivers/tty/serial/vr41xx_siu.c2
-rw-r--r--drivers/tty/serial/vt8500_serial.c3
-rw-r--r--drivers/tty/synclink.c130
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/tty_buffer.c417
-rw-r--r--drivers/tty/tty_io.c33
-rw-r--r--drivers/tty/tty_ioctl.c90
-rw-r--r--drivers/tty/tty_ldisc.c461
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/tty/vt/keyboard.c21
-rw-r--r--drivers/tty/vt/selection.c8
-rw-r--r--drivers/tty/vt/vt.c8
-rw-r--r--drivers/uio/Kconfig21
-rw-r--r--drivers/uio/Makefile2
-rw-r--r--drivers/uio/uio.c62
-rw-r--r--drivers/uio/uio_dmem_genirq.c2
-rw-r--r--drivers/uio/uio_mf624.c247
-rw-r--r--drivers/uio/uio_pdrv.c113
-rw-r--r--drivers/uio/uio_pdrv_genirq.c2
-rw-r--r--drivers/uio/uio_pruss.c3
-rw-r--r--drivers/usb/Kconfig17
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/Makefile3
-rw-r--r--drivers/usb/atm/speedtch.c2
-rw-r--r--drivers/usb/atm/usbatm.c49
-rw-r--r--drivers/usb/atm/usbatm.h35
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c4
-rw-r--r--drivers/usb/chipidea/Kconfig7
-rw-r--r--drivers/usb/chipidea/Makefile2
-rw-r--r--drivers/usb/chipidea/bits.h14
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c110
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.h17
-rw-r--r--drivers/usb/chipidea/ci_hdrc_msm.c1
-rw-r--r--drivers/usb/chipidea/core.c197
-rw-r--r--drivers/usb/chipidea/host.c31
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/chipidea/otg.c120
-rw-r--r--drivers/usb/chipidea/otg.h35
-rw-r--r--drivers/usb/chipidea/udc.c78
-rw-r--r--drivers/usb/chipidea/udc.h6
-rw-r--r--drivers/usb/chipidea/usbmisc_imx.c95
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/class/cdc-wdm.c13
-rw-r--r--drivers/usb/class/usbtmc.c70
-rw-r--r--drivers/usb/core/buffer.c5
-rw-r--r--drivers/usb/core/config.c3
-rw-r--r--drivers/usb/core/devio.c6
-rw-r--r--drivers/usb/core/driver.c46
-rw-r--r--drivers/usb/core/endpoint.c37
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd-pci.c2
-rw-r--r--drivers/usb/core/hcd.c227
-rw-r--r--drivers/usb/core/hub.c288
-rw-r--r--drivers/usb/core/hub.h3
-rw-r--r--drivers/usb/core/message.c62
-rw-r--r--drivers/usb/core/port.c20
-rw-r--r--drivers/usb/core/quirks.c6
-rw-r--r--drivers/usb/core/sysfs.c303
-rw-r--r--drivers/usb/core/urb.c43
-rw-r--r--drivers/usb/core/usb.c60
-rw-r--r--drivers/usb/dwc3/Kconfig35
-rw-r--r--drivers/usb/dwc3/Makefile13
-rw-r--r--drivers/usb/dwc3/core.c199
-rw-r--r--drivers/usb/dwc3/core.h57
-rw-r--r--drivers/usb/dwc3/debug.h34
-rw-r--r--drivers/usb/dwc3/debugfs.c34
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c22
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c167
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c54
-rw-r--r--drivers/usb/dwc3/ep0.c49
-rw-r--r--drivers/usb/dwc3/gadget.c258
-rw-r--r--drivers/usb/dwc3/gadget.h34
-rw-r--r--drivers/usb/dwc3/host.c34
-rw-r--r--drivers/usb/dwc3/io.h34
-rw-r--r--drivers/usb/dwc3/platform_data.h27
-rw-r--r--drivers/usb/gadget/Kconfig29
-rw-r--r--drivers/usb/gadget/Makefile1
-rw-r--r--drivers/usb/gadget/amd5536udc.c4
-rw-r--r--drivers/usb/gadget/at91_udc.c41
-rw-r--r--drivers/usb/gadget/at91_udc.h2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c26
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c2
-rw-r--r--drivers/usb/gadget/composite.c8
-rw-r--r--drivers/usb/gadget/configfs.c4
-rw-r--r--drivers/usb/gadget/dummy_hcd.c8
-rw-r--r--drivers/usb/gadget/ether.c14
-rw-r--r--drivers/usb/gadget/f_acm.c1
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_eem.c7
-rw-r--r--drivers/usb/gadget/f_fs.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c18
-rw-r--r--drivers/usb/gadget/f_ncm.c7
-rw-r--r--drivers/usb/gadget/f_phonet.c9
-rw-r--r--drivers/usb/gadget/f_rndis.c7
-rw-r--r--drivers/usb/gadget/f_subset.c7
-rw-r--r--drivers/usb/gadget/f_uac1.c4
-rw-r--r--drivers/usb/gadget/fotg210-udc.c8
-rw-r--r--drivers/usb/gadget/fsl_mxc_udc.c4
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c6
-rw-r--r--drivers/usb/gadget/fusb300_udc.c2
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/hid.c2
-rw-r--r--drivers/usb/gadget/imx_udc.c1544
-rw-r--r--drivers/usb/gadget/imx_udc.h351
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/multi.c10
-rw-r--r--drivers/usb/gadget/mv_u3d_core.c16
-rw-r--r--drivers/usb/gadget/mv_udc_core.c4
-rw-r--r--drivers/usb/gadget/net2272.c4
-rw-r--r--drivers/usb/gadget/net2280.c18
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c2
-rw-r--r--drivers/usb/gadget/rndis.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c14
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c4
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/gadget/storage_common.c27
-rw-r--r--drivers/usb/gadget/u_uac1.c2
-rw-r--r--drivers/usb/gadget/udc-core.c32
-rw-r--r--drivers/usb/gadget/uvc_queue.c6
-rw-r--r--drivers/usb/host/Kconfig63
-rw-r--r--drivers/usb/host/Makefile5
-rw-r--r--drivers/usb/host/ehci-dbg.c9
-rw-r--r--drivers/usb/host/ehci-fsl.c18
-rw-r--r--drivers/usb/host/ehci-grlib.c13
-rw-r--r--drivers/usb/host/ehci-hcd.c33
-rw-r--r--drivers/usb/host/ehci-hub.c233
-rw-r--r--drivers/usb/host/ehci-mem.c1
-rw-r--r--drivers/usb/host/ehci-mv.c4
-rw-r--r--drivers/usb/host/ehci-mxc.c16
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c14
-rw-r--r--drivers/usb/host/ehci-orion.c2
-rw-r--r--drivers/usb/host/ehci-pci.c42
-rw-r--r--drivers/usb/host/ehci-platform.c12
-rw-r--r--drivers/usb/host/ehci-pmcmsp.c2
-rw-r--r--drivers/usb/host/ehci-ppc-of.c13
-rw-r--r--drivers/usb/host/ehci-ps3.c2
-rw-r--r--drivers/usb/host/ehci-q.c119
-rw-r--r--drivers/usb/host/ehci-s5p.c12
-rw-r--r--drivers/usb/host/ehci-sched.c118
-rw-r--r--drivers/usb/host/ehci-sead3.c2
-rw-r--r--drivers/usb/host/ehci-sh.c4
-rw-r--r--drivers/usb/host/ehci-tegra.c95
-rw-r--r--drivers/usb/host/ehci-tilegx.c6
-rw-r--r--drivers/usb/host/ehci-timer.c34
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c19
-rw-r--r--drivers/usb/host/ehci.h17
-rw-r--r--drivers/usb/host/fotg210-hcd.c6049
-rw-r--r--drivers/usb/host/fotg210.h750
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c4
-rw-r--r--drivers/usb/host/hwa-hc.c7
-rw-r--r--drivers/usb/host/imx21-hcd.c2
-rw-r--r--drivers/usb/host/isp116x-hcd.c2
-rw-r--r--drivers/usb/host/isp116x.h13
-rw-r--r--drivers/usb/host/isp1362-hcd.c85
-rw-r--r--drivers/usb/host/isp1362.h53
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-at91.c37
-rw-r--r--drivers/usb/host/ohci-da8xx.c8
-rw-r--r--drivers/usb/host/ohci-ep93xx.c137
-rw-r--r--drivers/usb/host/ohci-exynos.c2
-rw-r--r--drivers/usb/host/ohci-hcd.c2
-rw-r--r--drivers/usb/host/ohci-omap.c4
-rw-r--r--drivers/usb/host/ohci-omap3.c10
-rw-r--r--drivers/usb/host/ohci-pci.c9
-rw-r--r--drivers/usb/host/ohci-platform.c10
-rw-r--r--drivers/usb/host/ohci-ppc-of.c11
-rw-r--r--drivers/usb/host/ohci-pxa27x.c8
-rw-r--r--drivers/usb/host/ohci-s3c2410.c11
-rw-r--r--drivers/usb/host/ohci-tilegx.c4
-rw-r--r--drivers/usb/host/pci-quirks.c48
-rw-r--r--drivers/usb/host/pci-quirks.h4
-rw-r--r--drivers/usb/host/r8a66597-hcd.c6
-rw-r--r--drivers/usb/host/sl811-hcd.c109
-rw-r--r--drivers/usb/host/sl811.h21
-rw-r--r--drivers/usb/host/u132-hcd.c6
-rw-r--r--drivers/usb/host/xhci-dbg.c14
-rw-r--r--drivers/usb/host/xhci-ext-caps.h2
-rw-r--r--drivers/usb/host/xhci-hub.c228
-rw-r--r--drivers/usb/host/xhci-mem.c188
-rw-r--r--drivers/usb/host/xhci-pci.c29
-rw-r--r--drivers/usb/host/xhci-plat.c48
-rw-r--r--drivers/usb/host/xhci-ring.c115
-rw-r--r--drivers/usb/host/xhci-trace.c (renamed from arch/arm/kernel/signal.h)11
-rw-r--r--drivers/usb/host/xhci-trace.h151
-rw-r--r--drivers/usb/host/xhci.c396
-rw-r--r--drivers/usb/host/xhci.h18
-rw-r--r--drivers/usb/misc/Kconfig14
-rw-r--r--drivers/usb/misc/Makefile4
-rw-r--r--drivers/usb/misc/adutux.c202
-rw-r--r--drivers/usb/misc/ehset.c152
-rw-r--r--drivers/usb/misc/ldusb.c31
-rw-r--r--drivers/usb/misc/legousbtower.c124
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c1
-rw-r--r--drivers/usb/misc/usb3503.c284
-rw-r--r--drivers/usb/misc/usbtest.c9
-rw-r--r--drivers/usb/misc/uss720.c24
-rw-r--r--drivers/usb/musb/Kconfig9
-rw-r--r--drivers/usb/musb/Makefile4
-rw-r--r--drivers/usb/musb/am35x.c16
-rw-r--r--drivers/usb/musb/blackfin.c4
-rw-r--r--drivers/usb/musb/cppi_dma.c18
-rw-r--r--drivers/usb/musb/da8xx.c4
-rw-r--r--drivers/usb/musb/davinci.c4
-rw-r--r--drivers/usb/musb/musb_am335x.c55
-rw-r--r--drivers/usb/musb/musb_core.c31
-rw-r--r--drivers/usb/musb/musb_core.h8
-rw-r--r--drivers/usb/musb/musb_cppi41.c557
-rw-r--r--drivers/usb/musb/musb_dma.h21
-rw-r--r--drivers/usb/musb/musb_dsps.c395
-rw-r--r--drivers/usb/musb/musb_gadget.c102
-rw-r--r--drivers/usb/musb/musb_host.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c17
-rw-r--r--drivers/usb/musb/omap2430.c15
-rw-r--r--drivers/usb/musb/tusb6010.c11
-rw-r--r--drivers/usb/musb/tusb6010_omap.c24
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/musb/ux500_dma.c25
-rw-r--r--drivers/usb/phy/Kconfig50
-rw-r--r--drivers/usb/phy/Makefile7
-rw-r--r--drivers/usb/phy/am35x-phy-control.h21
-rw-r--r--drivers/usb/phy/phy-am335x-control.c137
-rw-r--r--drivers/usb/phy/phy-am335x.c99
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c12
-rw-r--r--drivers/usb/phy/phy-fsl-usb.h2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.c2
-rw-r--r--drivers/usb/phy/phy-fsm-usb.h11
-rw-r--r--drivers/usb/phy/phy-generic.c (renamed from drivers/usb/phy/phy-nop.c)167
-rw-r--r--drivers/usb/phy/phy-generic.h20
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c10
-rw-r--r--drivers/usb/phy/phy-isp1301-omap.c6
-rw-r--r--drivers/usb/phy/phy-msm-usb.c4
-rw-r--r--drivers/usb/phy/phy-mv-u3d-usb.c4
-rw-r--r--drivers/usb/phy/phy-mv-usb.c6
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c13
-rw-r--r--drivers/usb/phy/phy-omap-control.c3
-rw-r--r--drivers/usb/phy/phy-omap-usb2.c7
-rw-r--r--drivers/usb/phy/phy-omap-usb3.c87
-rw-r--r--drivers/usb/phy/phy-rcar-usb.c9
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c4
-rw-r--r--drivers/usb/phy/phy-samsung-usb3.c2
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c462
-rw-r--r--drivers/usb/phy/phy-twl4030-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c6
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c10
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c6
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h6
-rw-r--r--drivers/usb/serial/Kconfig77
-rw-r--r--drivers/usb/serial/Makefile8
-rw-r--r--drivers/usb/serial/bus.c19
-rw-r--r--drivers/usb/serial/console.c27
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cypress_m8.c4
-rw-r--r--drivers/usb/serial/f81232.c3
-rw-r--r--drivers/usb/serial/flashloader.c39
-rw-r--r--drivers/usb/serial/ftdi_sio.c98
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h34
-rw-r--r--drivers/usb/serial/funsoft.c40
-rw-r--r--drivers/usb/serial/generic.c13
-rw-r--r--drivers/usb/serial/hp4x.c51
-rw-r--r--drivers/usb/serial/io_edgeport.c2
-rw-r--r--drivers/usb/serial/io_ti.c79
-rw-r--r--drivers/usb/serial/iuu_phoenix.c8
-rw-r--r--drivers/usb/serial/keyspan.c106
-rw-r--r--drivers/usb/serial/mos7720.c21
-rw-r--r--drivers/usb/serial/mos7840.c223
-rw-r--r--drivers/usb/serial/moto_modem.c48
-rw-r--r--drivers/usb/serial/option.c23
-rw-r--r--drivers/usb/serial/oti6858.c10
-rw-r--r--drivers/usb/serial/pl2303.c352
-rw-r--r--drivers/usb/serial/quatech2.c35
-rw-r--r--drivers/usb/serial/safe_serial.c51
-rw-r--r--drivers/usb/serial/siemens_mpi.c47
-rw-r--r--drivers/usb/serial/spcp8x5.c4
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c114
-rw-r--r--drivers/usb/serial/usb-serial-simple.c110
-rw-r--r--drivers/usb/serial/usb-serial.c14
-rw-r--r--drivers/usb/serial/usb_wwan.c20
-rw-r--r--drivers/usb/serial/vivopay-serial.c43
-rw-r--r--drivers/usb/serial/zio.c39
-rw-r--r--drivers/usb/storage/scsiglue.c16
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/usb/usb-common.c49
-rw-r--r--drivers/usb/usb-skeleton.c3
-rw-r--r--drivers/usb/wusbcore/rh.c28
-rw-r--r--drivers/usb/wusbcore/wa-hc.h15
-rw-r--r--drivers/usb/wusbcore/wa-rpipe.c21
-rw-r--r--drivers/usb/wusbcore/wa-xfer.c148
-rw-r--r--drivers/uwb/drp-ie.c4
-rw-r--r--drivers/uwb/hwa-rc.c16
-rw-r--r--drivers/uwb/pal.c40
-rw-r--r--drivers/vfio/pci/vfio_pci.c23
-rw-r--r--drivers/vfio/vfio.c37
-rw-r--r--drivers/vhost/net.c92
-rw-r--r--drivers/vhost/vhost.c56
-rw-r--r--drivers/video/Kconfig20
-rw-r--r--drivers/video/atmel_lcdfb.c8
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/backlight/backlight.c44
-rw-r--r--drivers/video/backlight/hx8357.c269
-rw-r--r--drivers/video/backlight/lcd.c26
-rw-r--r--drivers/video/backlight/lp855x_bl.c2
-rw-r--r--drivers/video/backlight/max8925_bl.c41
-rw-r--r--drivers/video/console/Kconfig3
-rw-r--r--drivers/video/da8xx-fb.c387
-rw-r--r--drivers/video/efifb.c303
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_lowlevel.c1
-rw-r--r--drivers/video/fbcmap.c7
-rw-r--r--drivers/video/fbmem.c29
-rw-r--r--drivers/video/hdmi.c141
-rw-r--r--drivers/video/hyperv_fb.c1
-rw-r--r--drivers/video/matrox/matroxfb_base.c3
-rw-r--r--drivers/video/mxsfb.c41
-rw-r--r--drivers/video/nuc900fb.c3
-rw-r--r--drivers/video/omap2/Kconfig1
-rw-r--r--drivers/video/omap2/Makefile1
-rw-r--r--drivers/video/omap2/displays-new/connector-analog-tv.c18
-rw-r--r--drivers/video/omap2/displays-new/encoder-tfp410.c14
-rw-r--r--drivers/video/omap2/displays-new/encoder-tpd12s015.c14
-rw-r--r--drivers/video/omap2/displays/Kconfig75
-rw-r--r--drivers/video/omap2/displays/Makefile11
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c798
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c744
-rw-r--r--drivers/video/omap2/displays/panel-lgphilips-lb035q02.c262
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c616
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c290
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.c559
-rw-r--r--drivers/video/omap2/displays/panel-picodlp.h288
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c198
-rw-r--r--drivers/video/omap2/displays/panel-taal.c1551
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c353
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c596
-rw-r--r--drivers/video/omap2/dss/Kconfig1
-rw-r--r--drivers/video/omap2/dss/Makefile5
-rw-r--r--drivers/video/omap2/dss/apply.c4
-rw-r--r--drivers/video/omap2/dss/core.c326
-rw-r--r--drivers/video/omap2/dss/dpi.c121
-rw-r--r--drivers/video/omap2/dss/dsi.c275
-rw-r--r--drivers/video/omap2/dss/dss.h45
-rw-r--r--drivers/video/omap2/dss/hdmi.c312
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c414
-rw-r--r--drivers/video/omap2/dss/manager-sysfs.c8
-rw-r--r--drivers/video/omap2/dss/output.c22
-rw-r--r--drivers/video/omap2/dss/rfbi.c135
-rw-r--r--drivers/video/omap2/dss/sdi.c119
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c42
-rw-r--r--drivers/video/omap2/dss/venc.c122
-rw-r--r--drivers/video/output.c20
-rw-r--r--drivers/video/sgivwfb.c2
-rw-r--r--drivers/video/sh7760fb.c2
-rw-r--r--drivers/video/simplefb.c58
-rw-r--r--drivers/video/vesafb.c55
-rw-r--r--drivers/video/vga16fb.c1
-rw-r--r--drivers/video/xilinxfb.c12
-rw-r--r--drivers/vme/boards/vme_vmivme7805.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c10
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/w1/slaves/w1_ds2408.c174
-rw-r--r--drivers/w1/slaves/w1_ds2413.c72
-rw-r--r--drivers/w1/slaves/w1_ds2423.c27
-rw-r--r--drivers/w1/slaves/w1_ds2431.c43
-rw-r--r--drivers/w1/slaves/w1_ds2433.c47
-rw-r--r--drivers/w1/slaves/w1_ds2760.c35
-rw-r--r--drivers/w1/slaves/w1_ds2780.c36
-rw-r--r--drivers/w1/slaves/w1_ds2781.c36
-rw-r--r--drivers/w1/slaves/w1_ds28e04.c112
-rw-r--r--drivers/w1/slaves/w1_therm.c24
-rw-r--r--drivers/w1/w1.c164
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/xen/Kconfig2
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/acpi.c41
-rw-r--r--drivers/xen/balloon.c74
-rw-r--r--drivers/xen/events.c43
-rw-r--r--drivers/xen/evtchn.c210
-rw-r--r--drivers/xen/gntdev.c11
-rw-r--r--drivers/xen/grant-table.c13
-rw-r--r--drivers/xen/privcmd.c83
-rw-r--r--drivers/xen/swiotlb-xen.c8
-rw-r--r--drivers/xen/xen-selfballoon.c54
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c19
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/bio.c22
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/backref.c48
-rw-r--r--fs/btrfs/ctree.c1
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/extent_io.c9
-rw-r--r--fs/btrfs/file.c64
-rw-r--r--fs/btrfs/inode.c54
-rw-r--r--fs/btrfs/transaction.c8
-rw-r--r--fs/btrfs/transaction.h2
-rw-r--r--fs/btrfs/tree-log.c5
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cifs/cifsencrypt.c14
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c7
-rw-r--r--fs/cifs/file.c3
-rw-r--r--fs/cifs/link.c84
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c6
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2transport.c9
-rw-r--r--fs/dcache.c98
-rw-r--r--fs/debugfs/inode.c69
-rw-r--r--fs/direct-io.c126
-rw-r--r--fs/dlm/ast.c5
-rw-r--r--fs/dlm/user.c25
-rw-r--r--fs/efs/inode.c2
-rw-r--r--fs/eventpoll.c31
-rw-r--r--fs/exec.c4
-rw-r--r--fs/ext3/dir.c2
-rw-r--r--fs/ext3/super.c43
-rw-r--r--fs/ext4/balloc.c24
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h70
-rw-r--r--fs/ext4/ext4_extents.h6
-rw-r--r--fs/ext4/ext4_jbd2.c8
-rw-r--r--fs/ext4/ext4_jbd2.h2
-rw-r--r--fs/ext4/extents.c298
-rw-r--r--fs/ext4/extents_status.c125
-rw-r--r--fs/ext4/extents_status.h51
-rw-r--r--fs/ext4/file.c23
-rw-r--r--fs/ext4/ialloc.c100
-rw-r--r--fs/ext4/indirect.c1
-rw-r--r--fs/ext4/inode.c403
-rw-r--r--fs/ext4/ioctl.c10
-rw-r--r--fs/ext4/mballoc.c49
-rw-r--r--fs/ext4/migrate.c4
-rw-r--r--fs/ext4/move_extent.c2
-rw-r--r--fs/ext4/namei.c35
-rw-r--r--fs/ext4/page-io.c30
-rw-r--r--fs/ext4/super.c83
-rw-r--r--fs/f2fs/checkpoint.c24
-rw-r--r--fs/f2fs/data.c28
-rw-r--r--fs/f2fs/debug.c34
-rw-r--r--fs/f2fs/dir.c19
-rw-r--r--fs/f2fs/f2fs.h106
-rw-r--r--fs/f2fs/file.c25
-rw-r--r--fs/f2fs/gc.c58
-rw-r--r--fs/f2fs/gc.h38
-rw-r--r--fs/f2fs/inode.c15
-rw-r--r--fs/f2fs/namei.c33
-rw-r--r--fs/f2fs/node.c100
-rw-r--r--fs/f2fs/node.h44
-rw-r--r--fs/f2fs/recovery.c29
-rw-r--r--fs/f2fs/segment.c41
-rw-r--r--fs/f2fs/segment.h6
-rw-r--r--fs/f2fs/super.c209
-rw-r--r--fs/f2fs/xattr.c289
-rw-r--r--fs/f2fs/xattr.h15
-rw-r--r--fs/fcntl.c4
-rw-r--r--fs/file_table.c6
-rw-r--r--fs/fuse/cuse.c13
-rw-r--r--fs/gfs2/glock.c8
-rw-r--r--fs/gfs2/glops.c18
-rw-r--r--fs/gfs2/inode.c6
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/hugetlbfs/inode.c18
-rw-r--r--fs/inode.c2
-rw-r--r--fs/isofs/inode.c16
-rw-r--r--fs/jbd/commit.c2
-rw-r--r--fs/jbd/journal.c18
-rw-r--r--fs/jbd2/commit.c6
-rw-r--r--fs/jbd2/journal.c5
-rw-r--r--fs/jbd2/recovery.c24
-rw-r--r--fs/jfs/jfs_dtree.c31
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/clntproc.c5
-rw-r--r--fs/namei.c274
-rw-r--r--fs/namespace.c4
-rw-r--r--fs/nfs/inode.c11
-rw-r--r--fs/nfs/nfs4proc.c8
-rw-r--r--fs/nfs/super.c4
-rw-r--r--fs/nfsd/nfs4proc.c2
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/nfsd/nfs4xdr.c19
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/nilfs2/segbuf.c5
-rw-r--r--fs/nilfs2/super.c26
-rw-r--r--fs/ocfs2/aops.c10
-rw-r--r--fs/ocfs2/dir.c4
-rw-r--r--fs/ocfs2/file.c6
-rw-r--r--fs/ocfs2/journal.h2
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/refcounttree.c58
-rw-r--r--fs/ocfs2/refcounttree.h6
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/open.c13
-rw-r--r--fs/proc/fd.c2
-rw-r--r--fs/proc/generic.c2
-rw-r--r--fs/proc/inode.c16
-rw-r--r--fs/proc/root.c4
-rw-r--r--fs/proc/task_mmu.c31
-rw-r--r--fs/pstore/Kconfig2
-rw-r--r--fs/pstore/inode.c10
-rw-r--r--fs/pstore/internal.h5
-rw-r--r--fs/pstore/platform.c212
-rw-r--r--fs/pstore/ram.c47
-rw-r--r--fs/quota/dquot.c46
-rw-r--r--fs/reiserfs/bitmap.c22
-rw-r--r--fs/reiserfs/dir.c7
-rw-r--r--fs/reiserfs/fix_node.c26
-rw-r--r--fs/reiserfs/inode.c114
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/reiserfs/journal.c104
-rw-r--r--fs/reiserfs/lock.c43
-rw-r--r--fs/reiserfs/namei.c24
-rw-r--r--fs/reiserfs/prints.c5
-rw-r--r--fs/reiserfs/procfs.c99
-rw-r--r--fs/reiserfs/reiserfs.h36
-rw-r--r--fs/reiserfs/resize.c10
-rw-r--r--fs/reiserfs/stree.c74
-rw-r--r--fs/reiserfs/super.c78
-rw-r--r--fs/reiserfs/xattr.c46
-rw-r--r--fs/reiserfs/xattr_acl.c16
-rw-r--r--fs/stat.c11
-rw-r--r--fs/super.c18
-rw-r--r--fs/sysfs/bin.c13
-rw-r--r--fs/sysfs/dir.c41
-rw-r--r--fs/sysfs/file.c82
-rw-r--r--fs/sysfs/group.c92
-rw-r--r--fs/sysfs/inode.c21
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/symlink.c18
-rw-r--r--fs/sysfs/sysfs.h18
-rw-r--r--fs/udf/super.c342
-rw-r--r--fs/xfs/xfs_aops.c28
-rw-r--r--fs/xfs/xfs_aops.h3
-rw-r--r--fs/xfs/xfs_dinode.h3
-rw-r--r--fs/xfs/xfs_inode.c31
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--include/acpi/acpi_bus.h42
-rw-r--r--include/acpi/acpi_drivers.h14
-rw-r--r--include/acpi/acpixf.h8
-rw-r--r--include/acpi/actypes.h21
-rw-r--r--include/acpi/video.h11
-rw-r--r--include/asm-generic/pgtable.h34
-rw-r--r--include/asm-generic/tlb.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h7
-rw-r--r--include/asm-generic/vtime.h0
-rw-r--r--include/clocksource/arm_arch_timer.h10
-rw-r--r--include/drm/drmP.h254
-rw-r--r--include/drm/drm_agpsupport.h194
-rw-r--r--include/drm/drm_crtc.h85
-rw-r--r--include/drm/drm_dp_helper.h31
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_fb_cma_helper.h1
-rw-r--r--include/drm/drm_fixed.h14
-rw-r--r--include/drm/drm_flip_work.h76
-rw-r--r--include/drm/drm_gem_cma_helper.h8
-rw-r--r--include/drm/drm_mm.h142
-rw-r--r--include/drm/drm_pciids.h48
-rw-r--r--include/drm/drm_vma_manager.h257
-rw-r--r--include/drm/exynos_drm.h3
-rw-r--r--include/drm/i2c/tda998x.h30
-rw-r--r--include/drm/ttm/ttm_bo_api.h15
-rw-r--r--include/drm/ttm/ttm_bo_driver.h10
-rw-r--r--include/dt-bindings/clock/vf610-clock.h4
-rw-r--r--include/dt-bindings/pinctrl/am33xx.h2
-rw-r--r--include/dt-bindings/pwm/pwm.h14
-rw-r--r--include/dt-bindings/sound/fsl-imx-audmux.h56
-rw-r--r--include/kvm/arm_vgic.h2
-rw-r--r--include/linux/acpi.h8
-rw-r--r--include/linux/ata.h123
-rw-r--r--include/linux/atmel-ssc.h2
-rw-r--r--include/linux/atmel_serial.h2
-rw-r--r--include/linux/bcma/bcma.h17
-rw-r--r--include/linux/bcma/bcma_driver_pci.h24
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/can/platform/mcp251x.h15
-rw-r--r--include/linux/cgroup.h303
-rw-r--r--include/linux/compat.h7
-rw-r--r--include/linux/context_tracking.h128
-rw-r--r--include/linux/context_tracking_state.h39
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpufreq.h388
-rw-r--r--include/linux/cpuidle.h9
-rw-r--r--include/linux/dcache.h40
-rw-r--r--include/linux/debugfs.h7
-rw-r--r--include/linux/debugobjects.h6
-rw-r--r--include/linux/device.h33
-rw-r--r--include/linux/dm9000.h4
-rw-r--r--include/linux/dma-contiguous.h2
-rw-r--r--include/linux/dma-mapping.h5
-rw-r--r--include/linux/err.h5
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/extcon/of_extcon.h31
-rw-r--r--include/linux/f2fs_fs.h20
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/fs.h10
-rw-r--r--include/linux/fs_enet_pd.h6
-rw-r--r--include/linux/ftrace_event.h46
-rw-r--r--include/linux/hardirq.h117
-rw-r--r--include/linux/hdmi.h53
-rw-r--r--include/linux/hid-sensor-hub.h2
-rw-r--r--include/linux/hid-sensor-ids.h2
-rw-r--r--include/linux/hid.h16
-rw-r--r--include/linux/hidraw.h1
-rw-r--r--include/linux/hyperv.h31
-rw-r--r--include/linux/i2c.h24
-rw-r--r--include/linux/i2c/i2c-hid.h3
-rw-r--r--include/linux/i2c/pxa-i2c.h3
-rw-r--r--include/linux/ieee80211.h72
-rw-r--r--include/linux/if_team.h14
-rw-r--r--include/linux/igmp.h1
-rw-r--r--include/linux/iio/common/st_sensors.h14
-rw-r--r--include/linux/iio/iio.h54
-rw-r--r--include/linux/iio/sysfs.h5
-rw-r--r--include/linux/iio/trigger.h3
-rw-r--r--include/linux/inetdevice.h34
-rw-r--r--include/linux/ipv6.h4
-rw-r--r--include/linux/jbd.h17
-rw-r--r--include/linux/jiffies.h8
-rw-r--r--include/linux/jump_label.h28
-rw-r--r--include/linux/jump_label_ratelimit.h34
-rw-r--r--include/linux/kbd_kern.h3
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kobject.h4
-rw-r--r--include/linux/kvm_host.h13
-rw-r--r--include/linux/libata.h120
-rw-r--r--include/linux/llist.h23
-rw-r--r--include/linux/lockdep.h92
-rw-r--r--include/linux/lockref.h36
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--include/linux/memory.h14
-rw-r--r--include/linux/mfd/arizona/gpio.h96
-rw-r--r--include/linux/mfd/palmas.h52
-rw-r--r--include/linux/mfd/samsung/s2mps11.h11
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h137
-rw-r--r--include/linux/mfd/ti_am335x_tscadc.h16
-rw-r--r--include/linux/mfd/tps65217.h19
-rw-r--r--include/linux/mlx4/cmd.h1
-rw-r--r--include/linux/mlx4/device.h17
-rw-r--r--include/linux/mlx4/qp.h5
-rw-r--r--include/linux/mlx5/device.h42
-rw-r--r--include/linux/mlx5/driver.h11
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mm_types.h1
-rw-r--r--include/linux/mod_devicetable.h7
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/moduleparam.h13
-rw-r--r--include/linux/mv643xx_eth.h3
-rw-r--r--include/linux/namei.h1
-rw-r--r--include/linux/netdevice.h56
-rw-r--r--include/linux/netfilter.h15
-rw-r--r--include/linux/nodemask.h11
-rw-r--r--include/linux/nsproxy.h6
-rw-r--r--include/linux/of.h14
-rw-r--r--include/linux/of_device.h15
-rw-r--r--include/linux/of_i2c.h46
-rw-r--r--include/linux/olpc-ec.h1
-rw-r--r--include/linux/oprofile.h16
-rw-r--r--include/linux/pci-acpi.h10
-rw-r--r--include/linux/pci.h29
-rw-r--r--include/linux/pci_hotplug.h17
-rw-r--r--include/linux/pci_ids.h6
-rw-r--r--include/linux/percpu-defs.h5
-rw-r--r--include/linux/perf_event.h25
-rw-r--r--include/linux/pinctrl/pinconf-generic.h33
-rw-r--r--include/linux/pinctrl/pinconf.h6
-rw-r--r--include/linux/platform_data/asoc-s3c.h1
-rw-r--r--include/linux/platform_data/at91_adc.h4
-rw-r--r--include/linux/platform_data/brcmfmac-sdio.h6
-rw-r--r--include/linux/platform_data/camera-mx3.h4
-rw-r--r--include/linux/platform_data/camera-rcar.h25
-rw-r--r--include/linux/platform_data/efm32-spi.h14
-rw-r--r--include/linux/platform_data/max310x.h9
-rw-r--r--include/linux/platform_data/mmc-pxamci.h2
-rw-r--r--include/linux/platform_data/omap-abe-twl6040.h49
-rw-r--r--include/linux/platform_data/pinctrl-nomadik.h24
-rw-r--r--include/linux/platform_data/rcar-du.h34
-rw-r--r--include/linux/platform_data/serial-sccnxp.h3
-rw-r--r--include/linux/platform_data/simplefb.h64
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h24
-rw-r--r--include/linux/platform_data/tegra_usb.h32
-rw-r--r--include/linux/platform_data/vsp1.h25
-rw-r--r--include/linux/pps_kernel.h2
-rw-r--r--include/linux/preempt_mask.h122
-rw-r--r--include/linux/printk.h2
-rw-r--r--include/linux/pstore.h6
-rw-r--r--include/linux/pxa2xx_ssp.h11
-rw-r--r--include/linux/quotaops.h15
-rw-r--r--include/linux/raid/pq.h5
-rw-r--r--include/linux/rculist.h5
-rw-r--r--include/linux/rcupdate.h26
-rw-r--r--include/linux/regmap.h12
-rw-r--r--include/linux/regulator/consumer.h32
-rw-r--r--include/linux/regulator/driver.h25
-rw-r--r--include/linux/regulator/fan53555.h1
-rw-r--r--include/linux/regulator/machine.h1
-rw-r--r--include/linux/regulator/max8660.h2
-rw-r--r--include/linux/regulator/pfuze100.h44
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/sh_eth.h10
-rw-r--r--include/linux/shdma-base.h4
-rw-r--r--include/linux/signal.h8
-rw-r--r--include/linux/skbuff.h21
-rw-r--r--include/linux/smsc911x.h3
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/spi.h36
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h14
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/swapops.h2
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/sysfs.h36
-rw-r--r--include/linux/tcp.h2
-rw-r--r--include/linux/tick.h49
-rw-r--r--include/linux/tty.h66
-rw-r--r--include/linux/tty_flip.h8
-rw-r--r--include/linux/tty_ldisc.h16
-rw-r--r--include/linux/usb.h43
-rw-r--r--include/linux/usb/chipidea.h7
-rw-r--r--include/linux/usb/dwc3-omap.h30
-rw-r--r--include/linux/usb/gadget.h4
-rw-r--r--include/linux/usb/hcd.h19
-rw-r--r--include/linux/usb/of.h10
-rw-r--r--include/linux/usb/phy.h18
-rw-r--r--include/linux/usb/tegra_usb_phy.h40
-rw-r--r--include/linux/usb/usb_phy_gen_xceiv.h (renamed from include/linux/usb/nop-usb-xceiv.h)4
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/usb/wusb-wa.h1
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/uwb/spec.h5
-rw-r--r--include/linux/vga_switcheroo.h13
-rw-r--r--include/linux/vmpressure.h9
-rw-r--r--include/linux/vtime.h74
-rw-r--r--include/linux/wait.h57
-rw-r--r--include/linux/workqueue.h7
-rw-r--r--include/media/adv7343.h20
-rw-r--r--include/media/adv7511.h48
-rw-r--r--include/media/adv7842.h226
-rw-r--r--include/media/davinci/vpif_types.h4
-rw-r--r--include/media/lirc_dev.h1
-rw-r--r--include/media/media-entity.h4
-rw-r--r--include/media/mt9v032.h4
-rw-r--r--include/media/rc-core.h4
-rw-r--r--include/media/saa7115.h77
-rw-r--r--include/media/smiapp.h1
-rw-r--r--include/media/tea575x.h (renamed from include/sound/tea575x-tuner.h)1
-rw-r--r--include/media/tveeprom.h4
-rw-r--r--include/media/v4l2-async.h36
-rw-r--r--include/media/v4l2-common.h14
-rw-r--r--include/media/v4l2-ctrls.h1
-rw-r--r--include/media/v4l2-dv-timings.h161
-rw-r--r--include/media/v4l2-mediabus.h3
-rw-r--r--include/media/v4l2-mem2mem.h13
-rw-r--r--include/media/v4l2-subdev.h13
-rw-r--r--include/media/videobuf2-core.h11
-rw-r--r--include/net/9p/transport.h3
-rw-r--r--include/net/act_api.h60
-rw-r--r--include/net/addrconf.h180
-rw-r--r--include/net/af_rxrpc.h35
-rw-r--r--include/net/af_unix.h17
-rw-r--r--include/net/af_vsock.h (renamed from net/vmw_vsock/af_vsock.h)0
-rw-r--r--include/net/arp.h30
-rw-r--r--include/net/ax25.h215
-rw-r--r--include/net/bluetooth/bluetooth.h8
-rw-r--r--include/net/bluetooth/hci.h7
-rw-r--r--include/net/bluetooth/hci_core.h10
-rw-r--r--include/net/bluetooth/sco.h1
-rw-r--r--include/net/busy_poll.h19
-rw-r--r--include/net/cfg80211.h239
-rw-r--r--include/net/checksum.h10
-rw-r--r--include/net/cls_cgroup.h6
-rw-r--r--include/net/dst.h12
-rw-r--r--include/net/fib_rules.h14
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/if_inet6.h9
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_fib.h2
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_tunnels.h25
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/irda/irlan_common.h3
-rw-r--r--include/net/llc_if.h30
-rw-r--r--include/net/mac80211.h193
-rw-r--r--include/net/mld.h51
-rw-r--r--include/net/ndisc.h7
-rw-r--r--include/net/neighbour.h98
-rw-r--r--include/net/net_namespace.h37
-rw-r--r--include/net/netfilter/nf_conntrack.h9
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h6
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h7
-rw-r--r--include/net/netfilter/nf_conntrack_seqadj.h51
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h77
-rw-r--r--include/net/netfilter/nf_nat.h10
-rw-r--r--include/net/netfilter/nf_nat_helper.h19
-rw-r--r--include/net/netfilter/nf_tproxy_core.h210
-rw-r--r--include/net/netfilter/nfnetlink_queue.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/netprio_cgroup.h10
-rw-r--r--include/net/nfc/hci.h2
-rw-r--r--include/net/nfc/nfc.h7
-rw-r--r--include/net/pkt_cls.h42
-rw-r--r--include/net/pkt_sched.h53
-rw-r--r--include/net/route.h8
-rw-r--r--include/net/sch_generic.h63
-rw-r--r--include/net/sctp/auth.h8
-rw-r--r--include/net/sctp/checksum.h23
-rw-r--r--include/net/sctp/command.h18
-rw-r--r--include/net/sctp/constants.h8
-rw-r--r--include/net/sctp/sctp.h10
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h29
-rw-r--r--include/net/sctp/tsnmap.h8
-rw-r--r--include/net/sctp/ulpevent.h8
-rw-r--r--include/net/sctp/ulpqueue.h8
-rw-r--r--include/net/sock.h31
-rw-r--r--include/net/tcp.h44
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/vsock_addr.h (renamed from net/vmw_vsock/vsock_addr.h)0
-rw-r--r--include/net/vxlan.h40
-rw-r--r--include/net/xfrm.h8
-rw-r--r--include/rdma/ib_verbs.h128
-rw-r--r--include/rdma/iw_cm.h8
-rw-r--r--include/scsi/iscsi_if.h32
-rw-r--r--include/scsi/libiscsi.h31
-rw-r--r--include/scsi/scsi.h3
-rw-r--r--include/scsi/scsi_device.h13
-rw-r--r--include/sound/core.h8
-rw-r--r--include/sound/pxa2xx-lib.h7
-rw-r--r--include/sound/rcar_snd.h84
-rw-r--r--include/sound/soc-dapm.h201
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h47
-rw-r--r--include/trace/events/context_tracking.h58
-rw-r--r--include/trace/events/ext4.h29
-rw-r--r--include/trace/events/power.h37
-rw-r--r--include/trace/events/rcu.h82
-rw-r--r--include/trace/events/sched.h22
-rw-r--r--include/trace/ftrace.h33
-rw-r--r--include/uapi/drm/Kbuild1
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_mode.h16
-rw-r--r--include/uapi/drm/i915_drm.h49
-rw-r--r--include/uapi/drm/msm_drm.h207
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/Kbuild1
-rw-r--r--include/uapi/linux/can/gw.h9
-rw-r--r--include/uapi/linux/cm4000_cs.h1
-rw-r--r--include/uapi/linux/dn.h3
-rw-r--r--include/uapi/linux/fib_rules.h4
-rw-r--r--include/uapi/linux/fiemap.h1
-rw-r--r--include/uapi/linux/firewire-cdev.h4
-rw-r--r--include/uapi/linux/icmpv6.h2
-rw-r--r--include/uapi/linux/if_bridge.h3
-rw-r--r--include/uapi/linux/if_link.h3
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/if_pppox.h2
-rw-r--r--include/uapi/linux/if_tun.h6
-rw-r--r--include/uapi/linux/in.h49
-rw-r--r--include/uapi/linux/in6.h36
-rw-r--r--include/uapi/linux/input.h25
-rw-r--r--include/uapi/linux/ip.h36
-rw-r--r--include/uapi/linux/ipv6.h3
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/kvm_para.h1
-rw-r--r--include/uapi/linux/libc-compat.h103
-rw-r--r--include/uapi/linux/netfilter/Kbuild2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_conntrack.h15
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_queue.h1
-rw-r--r--include/uapi/linux/netfilter/xt_HMARK.h (renamed from include/linux/netfilter/xt_HMARK.h)0
-rw-r--r--include/uapi/linux/netfilter/xt_SYNPROXY.h16
-rw-r--r--include/uapi/linux/netfilter/xt_rpfilter.h (renamed from include/linux/netfilter/xt_rpfilter.h)0
-rw-r--r--include/uapi/linux/netfilter_bridge/ebt_802_3.h5
-rw-r--r--include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h3
-rw-r--r--include/uapi/linux/nfc.h26
-rw-r--r--include/uapi/linux/nl80211.h199
-rw-r--r--include/uapi/linux/openvswitch.h26
-rw-r--r--include/uapi/linux/pci_regs.h113
-rw-r--r--include/uapi/linux/perf_event.h123
-rw-r--r--include/uapi/linux/pkt_sched.h51
-rw-r--r--include/uapi/linux/sctp.h2
-rw-r--r--include/uapi/linux/serial_core.h3
-rw-r--r--include/uapi/linux/snmp.h6
-rw-r--r--include/uapi/linux/tcp.h1
-rw-r--r--include/uapi/linux/uhid.h4
-rw-r--r--include/uapi/linux/usb/ch11.h11
-rw-r--r--include/uapi/linux/v4l2-controls.h29
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h8
-rw-r--r--include/uapi/linux/v4l2-mediabus.h6
-rw-r--r--include/uapi/linux/videodev2.h12
-rw-r--r--include/uapi/linux/virtio_net.h6
-rw-r--r--include/uapi/linux/wimax/i2400m.h4
-rw-r--r--include/uapi/rdma/ib_user_verbs.h99
-rw-r--r--include/uapi/sound/hdspm.h2
-rw-r--r--include/video/da8xx-fb.h5
-rw-r--r--include/video/omap-panel-data.h118
-rw-r--r--include/video/omapdss.h106
-rw-r--r--include/xen/acpi.h8
-rw-r--r--include/xen/balloon.h3
-rw-r--r--include/xen/interface/io/tpmif.h52
-rw-r--r--include/xen/interface/platform.h7
-rw-r--r--include/xen/interface/vcpu.h2
-rw-r--r--init/Kconfig31
-rw-r--r--init/main.c2
-rw-r--r--ipc/msg.c17
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/cgroup.c1656
-rw-r--r--kernel/cgroup_freezer.c155
-rw-r--r--kernel/context_tracking.c125
-rw-r--r--kernel/cpu.c9
-rw-r--r--kernel/cpuset.c337
-rw-r--r--kernel/events/callchain.c3
-rw-r--r--kernel/events/core.c410
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/freezer.c2
-rw-r--r--kernel/hung_task.c13
-rw-r--r--kernel/jump_label.c1
-rw-r--r--kernel/lglock.c12
-rw-r--r--kernel/module.c30
-rw-r--r--kernel/mutex.c47
-rw-r--r--kernel/nsproxy.c27
-rw-r--r--kernel/params.c22
-rw-r--r--kernel/pid_namespace.c4
-rw-r--r--kernel/power/hibernate.c2
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/power/qos.c20
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/Makefile2
-rw-r--r--kernel/printk/braille.c49
-rw-r--r--kernel/printk/braille.h48
-rw-r--r--kernel/printk/console_cmdline.h14
-rw-r--r--kernel/printk/printk.c (renamed from kernel/printk.c)190
-rw-r--r--kernel/ptrace.c1
-rw-r--r--kernel/rcu.h12
-rw-r--r--kernel/rcupdate.c102
-rw-r--r--kernel/rcutiny.c2
-rw-r--r--kernel/rcutiny_plugin.h2
-rw-r--r--kernel/rcutorture.c396
-rw-r--r--kernel/rcutree.c255
-rw-r--r--kernel/rcutree.h19
-rw-r--r--kernel/rcutree_plugin.h460
-rw-r--r--kernel/sched/core.c252
-rw-r--r--kernel/sched/cpuacct.c51
-rw-r--r--kernel/sched/cpupri.c4
-rw-r--r--kernel/sched/cputime.c74
-rw-r--r--kernel/sched/fair.c630
-rw-r--r--kernel/sched/sched.h14
-rw-r--r--kernel/smp.c16
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/Kconfig51
-rw-r--r--kernel/time/sched_clock.c2
-rw-r--r--kernel/time/tick-sched.c69
-rw-r--r--kernel/time/timer_list.c41
-rw-r--r--kernel/trace/ftrace.c105
-rw-r--r--kernel/trace/trace.c224
-rw-r--r--kernel/trace/trace.h11
-rw-r--r--kernel/trace/trace_events.c200
-rw-r--r--kernel/trace/trace_events_filter.c17
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_printk.c19
-rw-r--r--kernel/trace/trace_uprobe.c51
-rw-r--r--kernel/user_namespace.c17
-rw-r--r--kernel/wait.c3
-rw-r--r--kernel/watchdog.c8
-rw-r--r--kernel/workqueue.c94
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug19
-rw-r--r--lib/Makefile1
-rw-r--r--lib/debugobjects.c20
-rw-r--r--lib/dump_stack.c4
-rw-r--r--lib/dynamic_debug.c2
-rw-r--r--lib/earlycpio.c27
-rw-r--r--lib/kobject.c22
-rw-r--r--lib/lockref.c128
-rw-r--r--lib/lz4/lz4_compress.c4
-rw-r--r--lib/lz4/lz4_decompress.c6
-rw-r--r--lib/lz4/lz4hc_compress.c4
-rw-r--r--lib/raid6/.gitignore1
-rw-r--r--lib/raid6/Makefile40
-rw-r--r--lib/raid6/algos.c6
-rw-r--r--lib/raid6/neon.c58
-rw-r--r--lib/raid6/neon.uc80
-rw-r--r--lib/raid6/test/Makefile26
-rw-r--r--lib/swiotlb.c8
-rw-r--r--lib/vsprintf.c82
-rw-r--r--mm/Kconfig24
-rw-r--r--mm/backing-dev.c19
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/fremap.c11
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/hugetlb_cgroup.c69
-rw-r--r--mm/memcontrol.c229
-rw-r--r--mm/memory-failure.c5
-rw-r--r--mm/memory.c50
-rw-r--r--mm/mempolicy.c6
-rw-r--r--mm/mmap.c6
-rw-r--r--mm/mremap.c21
-rw-r--r--mm/oom_kill.c6
-rw-r--r--mm/rmap.c17
-rw-r--r--mm/shmem.c18
-rw-r--r--mm/slab.h2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/swap.c29
-rw-r--r--mm/swapfile.c19
-rw-r--r--mm/vmpressure.c53
-rw-r--r--mm/zbud.c2
-rw-r--r--net/8021q/vlan.c13
-rw-r--r--net/8021q/vlan_core.c7
-rw-r--r--net/8021q/vlan_dev.c8
-rw-r--r--net/9p/client.c9
-rw-r--r--net/9p/trans_rdma.c11
-rw-r--r--net/Kconfig4
-rw-r--r--net/appletalk/atalk_proc.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c32
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c2
-rw-r--r--net/batman-adv/gateway_client.c40
-rw-r--r--net/batman-adv/gateway_client.h4
-rw-r--r--net/batman-adv/icmp_socket.c1
-rw-r--r--net/batman-adv/main.c58
-rw-r--r--net/batman-adv/main.h5
-rw-r--r--net/batman-adv/routing.c20
-rw-r--r--net/batman-adv/send.c1
-rw-r--r--net/batman-adv/soft-interface.c11
-rw-r--r--net/batman-adv/sysfs.c4
-rw-r--r--net/batman-adv/translation-table.c5
-rw-r--r--net/batman-adv/unicast.c25
-rw-r--r--net/batman-adv/vis.c2
-rw-r--r--net/bluetooth/hci_conn.c62
-rw-r--r--net/bluetooth/hci_core.c40
-rw-r--r--net/bluetooth/hci_event.c29
-rw-r--r--net/bluetooth/hci_sysfs.c2
-rw-r--r--net/bluetooth/hidp/core.c55
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/bluetooth/rfcomm/tty.c271
-rw-r--r--net/bluetooth/sco.c85
-rw-r--r--net/bridge/br_device.c15
-rw-r--r--net/bridge/br_fdb.c10
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/bridge/br_mdb.c20
-rw-r--r--net/bridge/br_multicast.c294
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_notify.c5
-rw-r--r--net/bridge/br_private.h79
-rw-r--r--net/bridge/br_sysfs_br.c2
-rw-r--r--net/bridge/br_vlan.c4
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/caif/cfctrl.c3
-rw-r--r--net/can/gw.c35
-rw-r--r--net/ceph/messenger.c2
-rw-r--r--net/core/datagram.c72
-rw-r--r--net/core/dev.c371
-rw-r--r--net/core/fib_rules.c25
-rw-r--r--net/core/flow_dissector.c18
-rw-r--r--net/core/iovec.c24
-rw-r--r--net/core/neighbour.c41
-rw-r--r--net/core/net-sysfs.c157
-rw-r--r--net/core/netprio_cgroup.c72
-rw-r--r--net/core/pktgen.c61
-rw-r--r--net/core/rtnetlink.c33
-rw-r--r--net/core/scm.c2
-rw-r--r--net/core/skbuff.c24
-rw-r--r--net/core/sock.c172
-rw-r--r--net/core/stream.c2
-rw-r--r--net/core/sysctl_net_core.c38
-rw-r--r--net/dccp/proto.c4
-rw-r--r--net/dsa/slave.c2
-rw-r--r--net/ieee802154/6lowpan.c286
-rw-r--r--net/ieee802154/6lowpan.h20
-rw-r--r--net/ieee802154/wpan-class.c23
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/af_inet.c12
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/devinet.c21
-rw-r--r--net/ipv4/esp4.c2
-rw-r--r--net/ipv4/fib_rules.c25
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/igmp.c80
-rw-r--r--net/ipv4/ip_gre.c6
-rw-r--r--net/ipv4/ip_input.c8
-rw-r--r--net/ipv4/ip_output.c8
-rw-r--r--net/ipv4/ip_tunnel.c71
-rw-r--r--net/ipv4/ip_tunnel_core.c14
-rw-r--r--net/ipv4/ip_vti.c528
-rw-r--r--net/ipv4/ipip.c8
-rw-r--r--net/ipv4/ipmr.c18
-rw-r--r--net/ipv4/netfilter/Kconfig13
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/arptable_filter.c2
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_REJECT.c21
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c476
-rw-r--r--net/ipv4/netfilter/iptable_filter.c2
-rw-r--r--net/ipv4/netfilter/iptable_mangle.c2
-rw-r--r--net/ipv4/netfilter/iptable_nat.c2
-rw-r--r--net/ipv4/netfilter/iptable_raw.c2
-rw-r--r--net/ipv4/netfilter/iptable_security.c2
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c7
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c9
-rw-r--r--net/ipv4/raw.c5
-rw-r--r--net/ipv4/route.c24
-rw-r--r--net/ipv4/syncookies.c29
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c53
-rw-r--r--net/ipv4/tcp_cubic.c12
-rw-r--r--net/ipv4/tcp_fastopen.c13
-rw-r--r--net/ipv4/tcp_input.c210
-rw-r--r--net/ipv4/tcp_ipv4.c32
-rw-r--r--net/ipv4/tcp_memcontrol.c12
-rw-r--r--net/ipv4/tcp_metrics.c42
-rw-r--r--net/ipv4/tcp_minisocks.c8
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/tcp_probe.c87
-rw-r--r--net/ipv4/udp.c18
-rw-r--r--net/ipv4/xfrm4_output.c16
-rw-r--r--net/ipv4/xfrm4_state.c1
-rw-r--r--net/ipv6/addrconf.c212
-rw-r--r--net/ipv6/addrconf_core.c50
-rw-r--r--net/ipv6/addrlabel.c48
-rw-r--r--net/ipv6/af_inet6.c15
-rw-r--r--net/ipv6/ah6.c2
-rw-r--r--net/ipv6/datagram.c2
-rw-r--r--net/ipv6/esp6.c4
-rw-r--r--net/ipv6/fib6_rules.c37
-rw-r--r--net/ipv6/icmp.c12
-rw-r--r--net/ipv6/ip6_fib.c57
-rw-r--r--net/ipv6/ip6_gre.c19
-rw-r--r--net/ipv6/ip6_input.c6
-rw-r--r--net/ipv6/ip6_offload.c4
-rw-r--r--net/ipv6/ip6_output.c28
-rw-r--r--net/ipv6/ip6_tunnel.c52
-rw-r--r--net/ipv6/ip6mr.c19
-rw-r--r--net/ipv6/ipcomp6.c2
-rw-r--r--net/ipv6/mcast.c289
-rw-r--r--net/ipv6/ndisc.c51
-rw-r--r--net/ipv6/netfilter/Kconfig13
-rw-r--r--net/ipv6/netfilter/Makefile3
-rw-r--r--net/ipv6/netfilter/ip6t_MASQUERADE.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c20
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c499
-rw-r--r--net/ipv6/netfilter/ip6table_filter.c2
-rw-r--r--net/ipv6/netfilter/ip6table_mangle.c2
-rw-r--r--net/ipv6/netfilter/ip6table_nat.c2
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c2
-rw-r--r--net/ipv6/netfilter/ip6table_security.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c7
-rw-r--r--net/ipv6/output_core.c48
-rw-r--r--net/ipv6/proc.c4
-rw-r--r--net/ipv6/raw.c10
-rw-r--r--net/ipv6/reassembly.c5
-rw-r--r--net/ipv6/route.c128
-rw-r--r--net/ipv6/sit.c26
-rw-r--r--net/ipv6/syncookies.c25
-rw-r--r--net/ipv6/tcp_ipv6.c15
-rw-r--r--net/ipv6/udp_offload.c105
-rw-r--r--net/ipv6/xfrm6_output.c21
-rw-r--r--net/ipv6/xfrm6_state.c1
-rw-r--r--net/ipx/ipx_proc.c2
-rw-r--r--net/irda/irttp.c50
-rw-r--r--net/key/af_key.c18
-rw-r--r--net/llc/af_llc.c6
-rw-r--r--net/llc/llc_conn.c6
-rw-r--r--net/llc/llc_proc.c2
-rw-r--r--net/llc/llc_sap.c4
-rw-r--r--net/mac80211/cfg.c249
-rw-r--r--net/mac80211/chan.c58
-rw-r--r--net/mac80211/debugfs_sta.c9
-rw-r--r--net/mac80211/driver-ops.h13
-rw-r--r--net/mac80211/ht.c53
-rw-r--r--net/mac80211/ibss.c380
-rw-r--r--net/mac80211/ieee80211_i.h70
-rw-r--r--net/mac80211/iface.c30
-rw-r--r--net/mac80211/key.c154
-rw-r--r--net/mac80211/led.c19
-rw-r--r--net/mac80211/led.h4
-rw-r--r--net/mac80211/main.c18
-rw-r--r--net/mac80211/mesh.c10
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mesh_ps.c4
-rw-r--r--net/mac80211/mlme.c172
-rw-r--r--net/mac80211/pm.c7
-rw-r--r--net/mac80211/rate.c69
-rw-r--r--net/mac80211/rate.h22
-rw-r--r--net/mac80211/rc80211_minstrel.c36
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c30
-rw-r--r--net/mac80211/rc80211_pid_algo.c1
-rw-r--r--net/mac80211/rx.c514
-rw-r--r--net/mac80211/scan.c72
-rw-r--r--net/mac80211/status.c90
-rw-r--r--net/mac80211/trace.h26
-rw-r--r--net/mac80211/tx.c122
-rw-r--r--net/mac80211/util.c218
-rw-r--r--net/netfilter/Kconfig26
-rw-r--r--net/netfilter/Makefile6
-rw-r--r--net/netfilter/core.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_sctp.c23
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c6
-rw-r--r--net/netfilter/nf_conntrack_core.c89
-rw-r--r--net/netfilter/nf_conntrack_expect.c5
-rw-r--r--net/netfilter/nf_conntrack_labels.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c384
-rw-r--r--net/netfilter/nf_conntrack_proto.c4
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c48
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c238
-rw-r--r--net/netfilter/nf_nat_core.c22
-rw-r--r--net/netfilter/nf_nat_helper.c230
-rw-r--r--net/netfilter/nf_nat_proto_sctp.c8
-rw-r--r--net/netfilter/nf_nat_sip.c3
-rw-r--r--net/netfilter/nf_synproxy_core.c432
-rw-r--r--net/netfilter/nf_tproxy_core.c62
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c16
-rw-r--r--net/netfilter/nfnetlink_queue_ct.c23
-rw-r--r--net/netfilter/xt_TCPMSS.c30
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c10
-rw-r--r--net/netfilter/xt_TPROXY.c169
-rw-r--r--net/netfilter/xt_addrtype.c2
-rw-r--r--net/netfilter/xt_socket.c76
-rw-r--r--net/netlabel/netlabel_cipso_v4.c4
-rw-r--r--net/netlabel/netlabel_domainhash.c104
-rw-r--r--net/netlabel/netlabel_domainhash.h46
-rw-r--r--net/netlabel/netlabel_kapi.c88
-rw-r--r--net/netlabel/netlabel_mgmt.c44
-rw-r--r--net/netlabel/netlabel_unlabeled.c2
-rw-r--r--net/netlink/af_netlink.c101
-rw-r--r--net/netlink/af_netlink.h3
-rw-r--r--net/netlink/genetlink.c71
-rw-r--r--net/nfc/core.c38
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/nfc/nci/Kconfig1
-rw-r--r--net/nfc/netlink.c105
-rw-r--r--net/nfc/nfc.h7
-rw-r--r--net/openvswitch/Kconfig14
-rw-r--r--net/openvswitch/Makefile9
-rw-r--r--net/openvswitch/actions.c46
-rw-r--r--net/openvswitch/datapath.c179
-rw-r--r--net/openvswitch/datapath.h6
-rw-r--r--net/openvswitch/flow.c1488
-rw-r--r--net/openvswitch/flow.h89
-rw-r--r--net/openvswitch/vport-gre.c7
-rw-r--r--net/openvswitch/vport-netdev.c20
-rw-r--r--net/openvswitch/vport-vxlan.c204
-rw-r--r--net/openvswitch/vport.c6
-rw-r--r--net/openvswitch/vport.h1
-rw-r--r--net/packet/af_packet.c67
-rw-r--r--net/phonet/socket.c2
-rw-r--r--net/rfkill/core.c90
-rw-r--r--net/rfkill/rfkill-regulator.c8
-rw-r--r--net/sched/Kconfig14
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/cls_cgroup.c39
-rw-r--r--net/sched/sch_api.c94
-rw-r--r--net/sched/sch_atm.c1
-rw-r--r--net/sched/sch_cbq.c1
-rw-r--r--net/sched/sch_fq.c793
-rw-r--r--net/sched/sch_generic.c28
-rw-r--r--net/sched/sch_htb.c15
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c2
-rw-r--r--net/sched/sch_netem.c5
-rw-r--r--net/sctp/associola.c12
-rw-r--r--net/sctp/auth.c8
-rw-r--r--net/sctp/bind_addr.c8
-rw-r--r--net/sctp/chunk.c12
-rw-r--r--net/sctp/command.c8
-rw-r--r--net/sctp/debug.c8
-rw-r--r--net/sctp/endpointola.c8
-rw-r--r--net/sctp/input.c18
-rw-r--r--net/sctp/inqueue.c8
-rw-r--r--net/sctp/ipv6.c10
-rw-r--r--net/sctp/objcnt.c8
-rw-r--r--net/sctp/output.c8
-rw-r--r--net/sctp/outqueue.c8
-rw-r--r--net/sctp/primitive.c8
-rw-r--r--net/sctp/probe.c27
-rw-r--r--net/sctp/proc.c12
-rw-r--r--net/sctp/protocol.c10
-rw-r--r--net/sctp/sm_make_chunk.c133
-rw-r--r--net/sctp/sm_sideeffect.c8
-rw-r--r--net/sctp/sm_statefuns.c8
-rw-r--r--net/sctp/sm_statetable.c8
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/sctp/ssnmap.c8
-rw-r--r--net/sctp/sysctl.c8
-rw-r--r--net/sctp/transport.c12
-rw-r--r--net/sctp/tsnmap.c8
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/sctp/ulpqueue.c8
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_upcall.c3
-rw-r--r--net/sunrpc/auth_gss/gss_rpc_xdr.c9
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/clnt.c4
-rw-r--r--net/sunrpc/netns.h1
-rw-r--r--net/sunrpc/rpcb_clnt.c48
-rw-r--r--net/sunrpc/svcsock.c6
-rw-r--r--net/sunrpc/xdr.c9
-rw-r--r--net/sunrpc/xprtsock.c2
-rw-r--r--net/tipc/bearer.c9
-rw-r--r--net/tipc/server.c15
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/unix/af_unix.c70
-rw-r--r--net/vmw_vsock/af_vsock.c5
-rw-r--r--net/vmw_vsock/vmci_transport.c2
-rw-r--r--net/vmw_vsock/vmci_transport.h4
-rw-r--r--net/vmw_vsock/vsock_addr.c3
-rw-r--r--net/wireless/core.c10
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/mesh.c5
-rw-r--r--net/wireless/mlme.c4
-rw-r--r--net/wireless/nl80211.c599
-rw-r--r--net/wireless/nl80211.h4
-rw-r--r--net/wireless/rdev-ops.h17
-rw-r--r--net/wireless/reg.c7
-rw-r--r--net/wireless/scan.c35
-rw-r--r--net/wireless/sme.c39
-rw-r--r--net/wireless/sysfs.c25
-rw-r--r--net/wireless/trace.h53
-rw-r--r--net/wireless/util.c14
-rw-r--r--net/x25/x25_facilities.c4
-rw-r--r--net/xfrm/xfrm_output.c21
-rw-r--r--net/xfrm/xfrm_policy.c21
-rw-r--r--net/xfrm/xfrm_state.c22
-rw-r--r--samples/hidraw/.gitignore1
-rw-r--r--samples/uhid/uhid-example.c123
-rw-r--r--scripts/coccinelle/api/ptr_ret.cocci10
-rw-r--r--scripts/mod/modpost.c1
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/device_cgroup.c65
-rw-r--r--security/selinux/include/xfrm.h7
-rw-r--r--security/smack/smack_lsm.c24
-rw-r--r--sound/arm/pxa2xx-ac97.c26
-rw-r--r--sound/arm/pxa2xx-pcm-lib.c52
-rw-r--r--sound/arm/pxa2xx-pcm.c5
-rw-r--r--sound/arm/pxa2xx-pcm.h6
-rw-r--r--sound/core/Kconfig3
-rw-r--r--sound/core/Makefile3
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/core/pcm_dmaengine.c (renamed from sound/soc/soc-dmaengine-pcm.c)0
-rw-r--r--sound/core/pcm_lib.c4
-rw-r--r--sound/drivers/dummy.c2
-rw-r--r--sound/firewire/speakers.c4
-rw-r--r--sound/i2c/other/Makefile2
-rw-r--r--sound/isa/gus/interwave.c3
-rw-r--r--sound/isa/opti9xx/opti92x-ad1848.c8
-rw-r--r--sound/oss/dmabuf.c3
-rw-r--r--sound/pci/Kconfig12
-rw-r--r--sound/pci/es1968.c2
-rw-r--r--sound/pci/fm801.c2
-rw-r--r--sound/pci/hda/Kconfig9
-rw-r--r--sound/pci/hda/hda_auto_parser.c2
-rw-r--r--sound/pci/hda/hda_codec.c64
-rw-r--r--sound/pci/hda/hda_codec.h21
-rw-r--r--sound/pci/hda/hda_generic.c85
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_hwdep.c6
-rw-r--r--sound/pci/hda/hda_intel.c70
-rw-r--r--sound/pci/hda/hda_jack.c22
-rw-r--r--sound/pci/hda/hda_jack.h13
-rw-r--r--sound/pci/hda/hda_proc.c33
-rw-r--r--sound/pci/hda/patch_analog.c4528
-rw-r--r--sound/pci/hda/patch_conexant.c79
-rw-r--r--sound/pci/hda/patch_hdmi.c64
-rw-r--r--sound/pci/hda/patch_realtek.c202
-rw-r--r--sound/pci/hda/patch_sigmatel.c28
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/pci/rme96.c307
-rw-r--r--sound/pci/rme9652/hdspm.c779
-rw-r--r--sound/soc/Kconfig5
-rw-r--r--sound/soc/Makefile4
-rw-r--r--sound/soc/atmel/Kconfig21
-rw-r--r--sound/soc/atmel/Makefile4
-rw-r--r--sound/soc/atmel/atmel-pcm-dma.c118
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c36
-rw-r--r--sound/soc/atmel/atmel_wm8904.c254
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c208
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/au1x/db1200.c4
-rw-r--r--sound/soc/au1x/psc-ac97.c3
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.h2
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c7
-rw-r--r--sound/soc/cirrus/ep93xx-i2s.c5
-rw-r--r--sound/soc/codecs/Kconfig19
-rw-r--r--sound/soc/codecs/Makefile8
-rw-r--r--sound/soc/codecs/ac97.c15
-rw-r--r--sound/soc/codecs/ad1980.c43
-rw-r--r--sound/soc/codecs/ad73311.c22
-rw-r--r--sound/soc/codecs/adau1701.c25
-rw-r--r--sound/soc/codecs/adav80x.c13
-rw-r--r--sound/soc/codecs/ads117x.c29
-rw-r--r--sound/soc/codecs/ak4104.c34
-rw-r--r--sound/soc/codecs/ak4554.c106
-rw-r--r--sound/soc/codecs/ak5386.c17
-rw-r--r--sound/soc/codecs/arizona.c69
-rw-r--r--sound/soc/codecs/arizona.h5
-rw-r--r--sound/soc/codecs/bt-sco.c22
-rw-r--r--sound/soc/codecs/cs4270.c20
-rw-r--r--sound/soc/codecs/cs4271.c30
-rw-r--r--sound/soc/codecs/cs42l52.c5
-rw-r--r--sound/soc/codecs/dmic.c17
-rw-r--r--sound/soc/codecs/hdmi.c30
-rw-r--r--sound/soc/codecs/lm4857.c107
-rw-r--r--sound/soc/codecs/max9768.c16
-rw-r--r--sound/soc/codecs/max98088.c2
-rw-r--r--sound/soc/codecs/max98090.c10
-rw-r--r--sound/soc/codecs/max9877.c294
-rw-r--r--sound/soc/codecs/mc13783.c1
-rw-r--r--sound/soc/codecs/pcm1681.c339
-rw-r--r--sound/soc/codecs/pcm1792a.c257
-rw-r--r--sound/soc/codecs/pcm1792a.h26
-rw-r--r--sound/soc/codecs/pcm3008.c150
-rw-r--r--sound/soc/codecs/rt5640.c235
-rw-r--r--sound/soc/codecs/rt5640.h12
-rw-r--r--sound/soc/codecs/sgtl5000.c31
-rw-r--r--sound/soc/codecs/si476x.c14
-rw-r--r--sound/soc/codecs/spdif_receiver.c17
-rw-r--r--sound/soc/codecs/spdif_transmitter.c18
-rw-r--r--sound/soc/codecs/ssm2602.c3
-rw-r--r--sound/soc/codecs/sta32x.c10
-rw-r--r--sound/soc/codecs/tlv320aic26.c51
-rw-r--r--sound/soc/codecs/tlv320aic32x4.c22
-rw-r--r--sound/soc/codecs/tlv320aic3x.c56
-rw-r--r--sound/soc/codecs/twl4030.c2
-rw-r--r--sound/soc/codecs/twl6040.c7
-rw-r--r--sound/soc/codecs/uda134x.c88
-rw-r--r--sound/soc/codecs/wl1273.c17
-rw-r--r--sound/soc/codecs/wm0010.c36
-rw-r--r--sound/soc/codecs/wm5102.c53
-rw-r--r--sound/soc/codecs/wm5110.c35
-rw-r--r--sound/soc/codecs/wm8350.c6
-rw-r--r--sound/soc/codecs/wm8727.c17
-rw-r--r--sound/soc/codecs/wm8731.c60
-rw-r--r--sound/soc/codecs/wm8753.c5
-rw-r--r--sound/soc/codecs/wm8782.c17
-rw-r--r--sound/soc/codecs/wm8903.c4
-rw-r--r--sound/soc/codecs/wm8904.c3
-rw-r--r--sound/soc/codecs/wm8960.c10
-rw-r--r--sound/soc/codecs/wm8962.c9
-rw-r--r--sound/soc/codecs/wm8994.c39
-rw-r--r--sound/soc/codecs/wm8995.c5
-rw-r--r--sound/soc/codecs/wm8997.c1175
-rw-r--r--sound/soc/codecs/wm8997.h23
-rw-r--r--sound/soc/codecs/wm_adsp.c124
-rw-r--r--sound/soc/codecs/wm_adsp.h3
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/dwc/designware_i2s.c5
-rw-r--r--sound/soc/fsl/Kconfig23
-rw-r--r--sound/soc/fsl/Makefile4
-rw-r--r--sound/soc/fsl/fsl_spdif.c1225
-rw-r--r--sound/soc/fsl/fsl_spdif.h191
-rw-r--r--sound/soc/fsl/fsl_ssi.c501
-rw-r--r--sound/soc/fsl/imx-audmux.c78
-rw-r--r--sound/soc/fsl/imx-audmux.h52
-rw-r--r--sound/soc/fsl/imx-mc13783.c1
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c4
-rw-r--r--sound/soc/fsl/imx-pcm-fiq.c20
-rw-r--r--sound/soc/fsl/imx-pcm.h26
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c6
-rw-r--r--sound/soc/fsl/imx-spdif.c148
-rw-r--r--sound/soc/fsl/imx-ssi.c11
-rw-r--r--sound/soc/fsl/imx-ssi.h1
-rw-r--r--sound/soc/fsl/imx-wm8962.c5
-rw-r--r--sound/soc/generic/simple-card.c2
-rw-r--r--sound/soc/kirkwood/Kconfig13
-rw-r--r--sound/soc/kirkwood/Makefile4
-rw-r--r--sound/soc/kirkwood/kirkwood-dma.c108
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c93
-rw-r--r--sound/soc/kirkwood/kirkwood-openrd.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-t5325.c6
-rw-r--r--sound/soc/kirkwood/kirkwood.h11
-rw-r--r--sound/soc/mxs/Kconfig3
-rw-r--r--sound/soc/mxs/mxs-saif.c1
-rw-r--r--sound/soc/mxs/mxs-sgtl5000.c32
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c3
-rw-r--r--sound/soc/omap/Kconfig8
-rw-r--r--sound/soc/omap/mcbsp.c2
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c133
-rw-r--r--sound/soc/omap/omap-dmic.c9
-rw-r--r--sound/soc/omap/omap-mcbsp.c5
-rw-r--r--sound/soc/omap/omap-mcpdm.c3
-rw-r--r--sound/soc/pxa/Kconfig2
-rw-r--r--sound/soc/pxa/brownstone.c1
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c7
-rw-r--r--sound/soc/pxa/mmp-pcm.c7
-rw-r--r--sound/soc/pxa/mmp-sspa.c15
-rw-r--r--sound/soc/pxa/pxa-ssp.c76
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c67
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c28
-rw-r--r--sound/soc/pxa/pxa2xx-pcm.c21
-rw-r--r--sound/soc/pxa/ttc-dkb.c1
-rw-r--r--sound/soc/s6000/s6105-ipcam.c2
-rw-r--r--sound/soc/samsung/ac97.c11
-rw-r--r--sound/soc/samsung/dma.c19
-rw-r--r--sound/soc/samsung/dma.h4
-rw-r--r--sound/soc/samsung/i2s-regs.h51
-rw-r--r--sound/soc/samsung/i2s.c193
-rw-r--r--sound/soc/samsung/pcm.c4
-rw-r--r--sound/soc/samsung/s3c2412-i2s.c4
-rw-r--r--sound/soc/samsung/s3c24xx-i2s.c4
-rw-r--r--sound/soc/samsung/smdk_wm8994.c58
-rw-r--r--sound/soc/samsung/spdif.c12
-rw-r--r--sound/soc/sh/Kconfig7
-rw-r--r--sound/soc/sh/Makefile3
-rw-r--r--sound/soc/sh/fsi.c51
-rw-r--r--sound/soc/sh/rcar/Makefile2
-rw-r--r--sound/soc/sh/rcar/adg.c234
-rw-r--r--sound/soc/sh/rcar/core.c861
-rw-r--r--sound/soc/sh/rcar/gen.c280
-rw-r--r--sound/soc/sh/rcar/rsnd.h302
-rw-r--r--sound/soc/sh/rcar/scu.c236
-rw-r--r--sound/soc/sh/rcar/ssi.c728
-rw-r--r--sound/soc/soc-compress.c13
-rw-r--r--sound/soc/soc-core.c257
-rw-r--r--sound/soc/soc-dapm.c825
-rw-r--r--sound/soc/soc-io.c2
-rw-r--r--sound/soc/soc-jack.c4
-rw-r--r--sound/soc/soc-pcm.c25
-rw-r--r--sound/soc/spear/Kconfig2
-rw-r--r--sound/soc/tegra/Kconfig14
-rw-r--r--sound/soc/tegra/tegra20_ac97.c14
-rw-r--r--sound/soc/tegra/tegra20_spdif.c4
-rw-r--r--sound/soc/tegra/tegra30_i2s.c2
-rw-r--r--sound/soc/tegra/tegra_alc5632.c2
-rw-r--r--sound/soc/tegra/tegra_rt5640.c1
-rw-r--r--sound/soc/tegra/tegra_wm8753.c2
-rw-r--r--sound/soc/tegra/trimslice.c2
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c3
-rw-r--r--sound/soc/ux500/mop500.c1
-rw-r--r--sound/usb/6fire/comm.c38
-rw-r--r--sound/usb/6fire/comm.h2
-rw-r--r--sound/usb/6fire/firmware.c4
-rw-r--r--sound/usb/6fire/midi.c16
-rw-r--r--sound/usb/6fire/midi.h6
-rw-r--r--sound/usb/6fire/pcm.c43
-rw-r--r--sound/usb/6fire/pcm.h2
-rw-r--r--sound/usb/endpoint.c16
-rw-r--r--sound/usb/hiface/pcm.c2
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/pcm.c243
-rw-r--r--sound/usb/quirks.c6
-rw-r--r--sound/usb/usx2y/usbusx2y.c8
-rw-r--r--tools/hv/hv_kvp_daemon.c49
-rw-r--r--tools/hv/hv_vss_daemon.c59
-rw-r--r--tools/lib/lk/Makefile15
-rw-r--r--tools/lib/traceevent/Makefile20
-rw-r--r--tools/lib/traceevent/event-parse.c7
-rw-r--r--tools/lib/traceevent/event-parse.h15
-rw-r--r--tools/lib/traceevent/kbuffer-parse.c732
-rw-r--r--tools/lib/traceevent/kbuffer.h67
-rw-r--r--tools/lib/traceevent/trace-seq.c13
-rw-r--r--tools/perf/Documentation/perf-diff.txt79
-rw-r--r--tools/perf/Documentation/perf-kvm.txt46
-rw-r--r--tools/perf/Documentation/perf-list.txt6
-rw-r--r--tools/perf/Documentation/perf-report.txt13
-rw-r--r--tools/perf/Documentation/perf-stat.txt5
-rw-r--r--tools/perf/Documentation/perf-top.txt5
-rw-r--r--tools/perf/Documentation/perf-trace.txt24
-rw-r--r--tools/perf/Makefile21
-rw-r--r--tools/perf/arch/x86/Makefile2
-rw-r--r--tools/perf/arch/x86/util/tsc.c59
-rw-r--r--tools/perf/arch/x86/util/tsc.h20
-rw-r--r--tools/perf/bench/mem-memcpy.c2
-rw-r--r--tools/perf/builtin-annotate.c5
-rw-r--r--tools/perf/builtin-diff.c664
-rw-r--r--tools/perf/builtin-inject.c53
-rw-r--r--tools/perf/builtin-kmem.c5
-rw-r--r--tools/perf/builtin-kvm.c754
-rw-r--r--tools/perf/builtin-list.c3
-rw-r--r--tools/perf/builtin-lock.c3
-rw-r--r--tools/perf/builtin-mem.c6
-rw-r--r--tools/perf/builtin-record.c13
-rw-r--r--tools/perf/builtin-report.c88
-rw-r--r--tools/perf/builtin-sched.c161
-rw-r--r--tools/perf/builtin-script.c42
-rw-r--r--tools/perf/builtin-stat.c24
-rw-r--r--tools/perf/builtin-timechart.c176
-rw-r--r--tools/perf/builtin-top.c36
-rw-r--r--tools/perf/builtin-trace.c721
-rw-r--r--tools/perf/config/Makefile5
-rw-r--r--tools/perf/perf.h3
-rwxr-xr-xtools/perf/python/twatch.py2
-rw-r--r--tools/perf/tests/attr/test-record-group-sampling36
-rw-r--r--tools/perf/tests/builtin-test.c18
-rw-r--r--tools/perf/tests/code-reading.c572
-rw-r--r--tools/perf/tests/dso-data.c8
-rw-r--r--tools/perf/tests/evsel-tp-sched.c4
-rw-r--r--tools/perf/tests/hists_link.c27
-rw-r--r--tools/perf/tests/keep-tracking.c154
-rw-r--r--tools/perf/tests/make67
-rw-r--r--tools/perf/tests/mmap-basic.c2
-rw-r--r--tools/perf/tests/parse-events.c190
-rw-r--r--tools/perf/tests/perf-time-to-tsc.c177
-rw-r--r--tools/perf/tests/sample-parsing.c316
-rw-r--r--tools/perf/tests/tests.h12
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c49
-rw-r--r--tools/perf/ui/browsers/annotate.c20
-rw-r--r--tools/perf/ui/browsers/hists.c18
-rw-r--r--tools/perf/ui/gtk/hists.c128
-rw-r--r--tools/perf/ui/hist.c258
-rw-r--r--tools/perf/ui/setup.c1
-rw-r--r--tools/perf/ui/stdio/hist.c45
-rw-r--r--tools/perf/util/annotate.c60
-rw-r--r--tools/perf/util/build-id.c11
-rw-r--r--tools/perf/util/callchain.c15
-rw-r--r--tools/perf/util/callchain.h11
-rw-r--r--tools/perf/util/cpumap.h2
-rw-r--r--tools/perf/util/dso.c10
-rw-r--r--tools/perf/util/dso.h17
-rw-r--r--tools/perf/util/event.c31
-rw-r--r--tools/perf/util/event.h41
-rw-r--r--tools/perf/util/evlist.c294
-rw-r--r--tools/perf/util/evlist.h21
-rw-r--r--tools/perf/util/evsel.c581
-rw-r--r--tools/perf/util/evsel.h19
-rw-r--r--tools/perf/util/header.c161
-rw-r--r--tools/perf/util/header.h40
-rw-r--r--tools/perf/util/hist.c4
-rw-r--r--tools/perf/util/hist.h26
-rw-r--r--tools/perf/util/include/linux/string.h1
-rw-r--r--tools/perf/util/machine.c155
-rw-r--r--tools/perf/util/machine.h14
-rw-r--r--tools/perf/util/map.c67
-rw-r--r--tools/perf/util/map.h13
-rw-r--r--tools/perf/util/parse-events.c174
-rw-r--r--tools/perf/util/parse-events.h11
-rw-r--r--tools/perf/util/parse-events.l4
-rw-r--r--tools/perf/util/parse-events.y62
-rw-r--r--tools/perf/util/pmu.c87
-rw-r--r--tools/perf/util/pmu.h5
-rw-r--r--tools/perf/util/python.c21
-rw-r--r--tools/perf/util/record.c108
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c14
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c9
-rw-r--r--tools/perf/util/session.c241
-rw-r--r--tools/perf/util/session.h14
-rw-r--r--tools/perf/util/sort.c12
-rw-r--r--tools/perf/util/sort.h13
-rw-r--r--tools/perf/util/stat.c6
-rw-r--r--tools/perf/util/stat.h9
-rw-r--r--tools/perf/util/string.c24
-rw-r--r--tools/perf/util/symbol-elf.c174
-rw-r--r--tools/perf/util/symbol-minimal.c7
-rw-r--r--tools/perf/util/symbol.c285
-rw-r--r--tools/perf/util/symbol.h5
-rw-r--r--tools/perf/util/thread.c11
-rw-r--r--tools/perf/util/thread.h23
-rw-r--r--tools/perf/util/tool.h10
-rw-r--r--tools/perf/util/top.h2
-rw-r--r--tools/perf/util/trace-event-info.c96
-rw-r--r--tools/perf/util/trace-event-parse.c6
-rw-r--r--tools/perf/util/trace-event-read.c52
-rw-r--r--tools/perf/util/trace-event-scripting.c3
-rw-r--r--tools/perf/util/trace-event.h21
-rw-r--r--tools/perf/util/unwind.c2
-rw-r--r--tools/perf/util/util.c92
-rw-r--r--tools/perf/util/util.h5
-rw-r--r--virt/kvm/arm/vgic.c22
-rw-r--r--virt/kvm/eventfd.c20
-rw-r--r--virt/kvm/kvm_main.c156
5744 files changed, 402699 insertions, 170658 deletions
diff --git a/.gitignore b/.gitignore
index 3b8b9b33be3..7e9932e5547 100644
--- a/.gitignore
+++ b/.gitignore
@@ -29,6 +29,7 @@ modules.builtin
*.bz2
*.lzma
*.xz
+*.lz4
*.lzo
*.patch
*.gcno
diff --git a/CREDITS b/CREDITS
index 33a2f2d8300..9416a9a8b95 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1119,6 +1119,7 @@ D: author of userfs filesystem
D: Improved mmap and munmap handling
D: General mm minor tidyups
D: autofs v4 maintainer
+D: Xen subsystem
S: 987 Alabama St
S: San Francisco
S: CA, 94110
diff --git a/Documentation/ABI/stable/sysfs-bus-usb b/Documentation/ABI/stable/sysfs-bus-usb
new file mode 100644
index 00000000000..2be603c52a2
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-bus-usb
@@ -0,0 +1,142 @@
+What: /sys/bus/usb/devices/.../power/persist
+Date: May 2007
+KernelVersion: 2.6.23
+Contact: Alan Stern <stern@rowland.harvard.edu>
+Description:
+ If CONFIG_USB_PERSIST is set, then each USB device directory
+ will contain a file named power/persist. The file holds a
+ boolean value (0 or 1) indicating whether or not the
+ "USB-Persist" facility is enabled for the device. Since the
+ facility is inherently dangerous, it is disabled by default
+ for all devices except hubs. For more information, see
+ Documentation/usb/persist.txt.
+
+What: /sys/bus/usb/devices/.../power/autosuspend
+Date: March 2007
+KernelVersion: 2.6.21
+Contact: Alan Stern <stern@rowland.harvard.edu>
+Description:
+ Each USB device directory will contain a file named
+ power/autosuspend. This file holds the time (in seconds)
+ the device must be idle before it will be autosuspended.
+ 0 means the device will be autosuspended as soon as
+ possible. Negative values will prevent the device from
+ being autosuspended at all, and writing a negative value
+ will resume the device if it is already suspended.
+
+ The autosuspend delay for newly-created devices is set to
+ the value of the usbcore.autosuspend module parameter.
+
+What: /sys/bus/usb/device/.../power/connected_duration
+Date: January 2008
+KernelVersion: 2.6.25
+Contact: Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+ If CONFIG_PM_RUNTIME is enabled then this file
+ is present. When read, it returns the total time (in msec)
+ that the USB device has been connected to the machine. This
+ file is read-only.
+Users:
+ PowerTOP <power@bughost.org>
+ http://www.lesswatts.org/projects/powertop/
+
+What: /sys/bus/usb/device/.../power/active_duration
+Date: January 2008
+KernelVersion: 2.6.25
+Contact: Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+ If CONFIG_PM_RUNTIME is enabled then this file
+ is present. When read, it returns the total time (in msec)
+ that the USB device has been active, i.e. not in a suspended
+ state. This file is read-only.
+
+ Tools can use this file and the connected_duration file to
+ compute the percentage of time that a device has been active.
+ For example,
+ echo $((100 * `cat active_duration` / `cat connected_duration`))
+ will give an integer percentage. Note that this does not
+ account for counter wrap.
+Users:
+ PowerTOP <power@bughost.org>
+ http://www.lesswatts.org/projects/powertop/
+
+What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
+Date: January 2008
+KernelVersion: 2.6.27
+Contact: Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+ When read, this file returns 1 if the interface driver
+ for this interface supports autosuspend. It also
+ returns 1 if no driver has claimed this interface, as an
+ unclaimed interface will not stop the device from being
+ autosuspended if all other interface drivers are idle.
+ The file returns 0 if autosuspend support has not been
+ added to the driver.
+Users:
+ USB PM tool
+ git://git.moblin.org/users/sarah/usb-pm-tool/
+
+What: /sys/bus/usb/device/.../avoid_reset_quirk
+Date: December 2009
+Contact: Oliver Neukum <oliver@neukum.org>
+Description:
+ Writing 1 to this file tells the kernel that this
+ device will morph into another mode when it is reset.
+ Drivers will not use reset for error handling for
+ such devices.
+Users:
+ usb_modeswitch
+
+What: /sys/bus/usb/devices/.../devnum
+KernelVersion: since at least 2.6.18
+Description:
+ Device address on the USB bus.
+Users:
+ libusb
+
+What: /sys/bus/usb/devices/.../bConfigurationValue
+KernelVersion: since at least 2.6.18
+Description:
+ bConfigurationValue of the *active* configuration for the
+ device. Writing 0 or -1 to bConfigurationValue will reset the
+ active configuration (unconfigure the device). Writing
+ another value will change the active configuration.
+
+ Note that some devices, in violation of the USB spec, have a
+ configuration with a value equal to 0. Writing 0 to
+ bConfigurationValue for these devices will install that
+ configuration, rather then unconfigure the device.
+
+ Writing -1 will always unconfigure the device.
+Users:
+ libusb
+
+What: /sys/bus/usb/devices/.../busnum
+KernelVersion: 2.6.22
+Description:
+ Bus-number of the USB-bus the device is connected to.
+Users:
+ libusb
+
+What: /sys/bus/usb/devices/.../descriptors
+KernelVersion: 2.6.26
+Description:
+ Binary file containing cached descriptors of the device. The
+ binary data consists of the device descriptor followed by the
+ descriptors for each configuration of the device.
+ Note that the wTotalLength of the config descriptors can not
+ be trusted, as the device may have a smaller config descriptor
+ than it advertises. The bLength field of each (sub) descriptor
+ can be trusted, and can be used to seek forward one (sub)
+ descriptor at a time until the next config descriptor is found.
+ All descriptors read from this file are in bus-endian format
+Users:
+ libusb
+
+What: /sys/bus/usb/devices/.../speed
+KernelVersion: since at least 2.6.18
+Description:
+ Speed the device is connected with to the usb-host in
+ Mbit / second. IE one of 1.5 / 12 / 480 / 5000.
+Users:
+ libusb
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index dda81ffae5c..39c8de0e53d 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -351,6 +351,7 @@ Description:
6kohm_to_gnd: connected to ground via a 6kOhm resistor,
20kohm_to_gnd: connected to ground via a 20kOhm resistor,
100kohm_to_gnd: connected to ground via an 100kOhm resistor,
+ 500kohm_to_gnd: connected to ground via a 500kOhm resistor,
three_state: left floating.
For a list of available output power down options read
outX_powerdown_mode_available. If Y is not present the
@@ -792,3 +793,21 @@ Contact: linux-iio@vger.kernel.org
Description:
This attribute is used to read the amount of quadrature error
present in the device at a given time.
+
+What: /sys/.../iio:deviceX/in_accelX_power_mode
+KernelVersion: 3.11
+Contact: linux-iio@vger.kernel.org
+Description:
+ Specifies the chip power mode.
+ low_noise: reduce noise level from ADC,
+ low_power: enable low current consumption.
+ For a list of available output power modes read
+ in_accel_power_mode_available.
+
+What: /sys/bus/iio/devices/iio:deviceX/store_eeprom
+KernelVersion: 3.4.0
+Contact: linux-iio@vger.kernel.org
+Description:
+ Writing '1' stores the current device configuration into
+ on-chip EEPROM. After power-up or chip reset the device will
+ automatically load the saved configuration.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523 b/Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
index 2ce9c3f68ee..a91aeabe7b2 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
+++ b/Documentation/ABI/testing/sysfs-bus-iio-frequency-ad9523
@@ -18,14 +18,6 @@ Description:
Reading returns either '1' or '0'. '1' means that the
pllY is locked.
-What: /sys/bus/iio/devices/iio:deviceX/store_eeprom
-KernelVersion: 3.4.0
-Contact: linux-iio@vger.kernel.org
-Description:
- Writing '1' stores the current device configuration into
- on-chip EEPROM. After power-up or chip reset the device will
- automatically load the saved configuration.
-
What: /sys/bus/iio/devices/iio:deviceX/sync_dividers
KernelVersion: 3.4.0
Contact: linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350 b/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
index d89aded01c5..1254457a726 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
+++ b/Documentation/ABI/testing/sysfs-bus-iio-frequency-adf4350
@@ -18,4 +18,4 @@ Description:
adjust the reference frequency accordingly.
The value written has no effect until out_altvoltageY_frequency
is updated. Consider to use out_altvoltageY_powerdown to power
- down the PLL and it's RFOut buffers during REFin changes.
+ down the PLL and its RFOut buffers during REFin changes.
diff --git a/Documentation/ABI/testing/sysfs-bus-usb b/Documentation/ABI/testing/sysfs-bus-usb
index 9759b8c9133..1430f584b26 100644
--- a/Documentation/ABI/testing/sysfs-bus-usb
+++ b/Documentation/ABI/testing/sysfs-bus-usb
@@ -1,81 +1,3 @@
-What: /sys/bus/usb/devices/.../power/autosuspend
-Date: March 2007
-KernelVersion: 2.6.21
-Contact: Alan Stern <stern@rowland.harvard.edu>
-Description:
- Each USB device directory will contain a file named
- power/autosuspend. This file holds the time (in seconds)
- the device must be idle before it will be autosuspended.
- 0 means the device will be autosuspended as soon as
- possible. Negative values will prevent the device from
- being autosuspended at all, and writing a negative value
- will resume the device if it is already suspended.
-
- The autosuspend delay for newly-created devices is set to
- the value of the usbcore.autosuspend module parameter.
-
-What: /sys/bus/usb/devices/.../power/persist
-Date: May 2007
-KernelVersion: 2.6.23
-Contact: Alan Stern <stern@rowland.harvard.edu>
-Description:
- If CONFIG_USB_PERSIST is set, then each USB device directory
- will contain a file named power/persist. The file holds a
- boolean value (0 or 1) indicating whether or not the
- "USB-Persist" facility is enabled for the device. Since the
- facility is inherently dangerous, it is disabled by default
- for all devices except hubs. For more information, see
- Documentation/usb/persist.txt.
-
-What: /sys/bus/usb/device/.../power/connected_duration
-Date: January 2008
-KernelVersion: 2.6.25
-Contact: Sarah Sharp <sarah.a.sharp@intel.com>
-Description:
- If CONFIG_PM_RUNTIME is enabled then this file
- is present. When read, it returns the total time (in msec)
- that the USB device has been connected to the machine. This
- file is read-only.
-Users:
- PowerTOP <power@bughost.org>
- http://www.lesswatts.org/projects/powertop/
-
-What: /sys/bus/usb/device/.../power/active_duration
-Date: January 2008
-KernelVersion: 2.6.25
-Contact: Sarah Sharp <sarah.a.sharp@intel.com>
-Description:
- If CONFIG_PM_RUNTIME is enabled then this file
- is present. When read, it returns the total time (in msec)
- that the USB device has been active, i.e. not in a suspended
- state. This file is read-only.
-
- Tools can use this file and the connected_duration file to
- compute the percentage of time that a device has been active.
- For example,
- echo $((100 * `cat active_duration` / `cat connected_duration`))
- will give an integer percentage. Note that this does not
- account for counter wrap.
-Users:
- PowerTOP <power@bughost.org>
- http://www.lesswatts.org/projects/powertop/
-
-What: /sys/bus/usb/device/<busnum>-<devnum>...:<config num>-<interface num>/supports_autosuspend
-Date: January 2008
-KernelVersion: 2.6.27
-Contact: Sarah Sharp <sarah.a.sharp@intel.com>
-Description:
- When read, this file returns 1 if the interface driver
- for this interface supports autosuspend. It also
- returns 1 if no driver has claimed this interface, as an
- unclaimed interface will not stop the device from being
- autosuspended if all other interface drivers are idle.
- The file returns 0 if autosuspend support has not been
- added to the driver.
-Users:
- USB PM tool
- git://git.moblin.org/users/sarah/usb-pm-tool/
-
What: /sys/bus/usb/device/.../authorized
Date: July 2008
KernelVersion: 2.6.26
@@ -172,17 +94,6 @@ Description:
device IDs, exactly like reading from the entry
"/sys/bus/usb/drivers/.../new_id"
-What: /sys/bus/usb/device/.../avoid_reset_quirk
-Date: December 2009
-Contact: Oliver Neukum <oliver@neukum.org>
-Description:
- Writing 1 to this file tells the kernel that this
- device will morph into another mode when it is reset.
- Drivers will not use reset for error handling for
- such devices.
-Users:
- usb_modeswitch
-
What: /sys/bus/usb/devices/.../power/usb2_hardware_lpm
Date: September 2011
Contact: Andiry Xu <andiry.xu@amd.com>
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
new file mode 100644
index 00000000000..31942efcaf0
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -0,0 +1,26 @@
+What: /sys/fs/f2fs/<disk>/gc_max_sleep_time
+Date: July 2013
+Contact: "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+ Controls the maximun sleep time for gc_thread. Time
+ is in milliseconds.
+
+What: /sys/fs/f2fs/<disk>/gc_min_sleep_time
+Date: July 2013
+Contact: "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+ Controls the minimum sleep time for gc_thread. Time
+ is in milliseconds.
+
+What: /sys/fs/f2fs/<disk>/gc_no_gc_sleep_time
+Date: July 2013
+Contact: "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+ Controls the default sleep time for gc_thread. Time
+ is in milliseconds.
+
+What: /sys/fs/f2fs/<disk>/gc_idle
+Date: July 2013
+Contact: "Namjae Jeon" <namjae.jeon@samsung.com>
+Description:
+ Controls the victim selection policy for garbage collection.
diff --git a/Documentation/DocBook/80211.tmpl b/Documentation/DocBook/80211.tmpl
index 49267ea9756..f403ec3c5c9 100644
--- a/Documentation/DocBook/80211.tmpl
+++ b/Documentation/DocBook/80211.tmpl
@@ -325,6 +325,7 @@
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
+!Finclude/net/mac80211.h mac80211_tx_info_flags
!Finclude/net/mac80211.h mac80211_tx_control_flags
!Finclude/net/mac80211.h mac80211_rate_control_flags
!Finclude/net/mac80211.h ieee80211_tx_rate
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 7d1278e7a43..ed1d6d28902 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -156,13 +156,6 @@
</para></listitem>
</varlistentry>
<varlistentry>
- <term>DRIVER_USE_MTRR</term>
- <listitem><para>
- Driver uses MTRR interface for mapping memory, the DRM core will
- manage MTRR resources. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
<term>DRIVER_PCI_DMA</term>
<listitem><para>
Driver is capable of PCI DMA, mapping of PCI DMA buffers to
@@ -195,28 +188,6 @@
</para></listitem>
</varlistentry>
<varlistentry>
- <term>DRIVER_IRQ_VBL</term>
- <listitem><para>Unused. Deprecated.</para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_DMA_QUEUE</term>
- <listitem><para>
- Should be set if the driver queues DMA requests and completes them
- asynchronously. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_FB_DMA</term>
- <listitem><para>
- Driver supports DMA to/from the framebuffer, mapping of frambuffer
- DMA buffers to userspace will be supported. Deprecated.
- </para></listitem>
- </varlistentry>
- <varlistentry>
- <term>DRIVER_IRQ_VBL2</term>
- <listitem><para>Unused. Deprecated.</para></listitem>
- </varlistentry>
- <varlistentry>
<term>DRIVER_GEM</term>
<listitem><para>
Driver use the GEM memory manager.
@@ -234,6 +205,12 @@
Driver implements DRM PRIME buffer sharing.
</para></listitem>
</varlistentry>
+ <varlistentry>
+ <term>DRIVER_RENDER</term>
+ <listitem><para>
+ Driver supports dedicated render nodes.
+ </para></listitem>
+ </varlistentry>
</variablelist>
</sect3>
<sect3>
@@ -2212,6 +2189,18 @@ void intel_crt_init(struct drm_device *dev)
!Iinclude/drm/drm_rect.h
!Edrivers/gpu/drm/drm_rect.c
</sect2>
+ <sect2>
+ <title>Flip-work Helper Reference</title>
+!Pinclude/drm/drm_flip_work.h flip utils
+!Iinclude/drm/drm_flip_work.h
+!Edrivers/gpu/drm/drm_flip_work.c
+ </sect2>
+ <sect2>
+ <title>VMA Offset Manager</title>
+!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
+!Edrivers/gpu/drm/drm_vma_manager.c
+!Iinclude/drm/drm_vma_manager.h
+ </sect2>
</sect1>
<!-- Internals: kms properties -->
@@ -2422,18 +2411,18 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
</abstract>
<para>
The <methodname>firstopen</methodname> method is called by the DRM core
- when an application opens a device that has no other opened file handle.
- Similarly the <methodname>lastclose</methodname> method is called when
- the last application holding a file handle opened on the device closes
- it. Both methods are mostly used for UMS (User Mode Setting) drivers to
- acquire and release device resources which should be done in the
- <methodname>load</methodname> and <methodname>unload</methodname>
- methods for KMS drivers.
+ for legacy UMS (User Mode Setting) drivers only when an application
+ opens a device that has no other opened file handle. UMS drivers can
+ implement it to acquire device resources. KMS drivers can't use the
+ method and must acquire resources in the <methodname>load</methodname>
+ method instead.
</para>
<para>
- Note that the <methodname>lastclose</methodname> method is also called
- at module unload time or, for hot-pluggable devices, when the device is
- unplugged. The <methodname>firstopen</methodname> and
+ Similarly the <methodname>lastclose</methodname> method is called when
+ the last application holding a file handle opened on the device closes
+ it, for both UMS and KMS drivers. Additionally, the method is also
+ called at module unload time or, for hot-pluggable devices, when the
+ device is unplugged. The <methodname>firstopen</methodname> and
<methodname>lastclose</methodname> calls can thus be unbalanced.
</para>
<para>
@@ -2462,7 +2451,12 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
<para>
The <methodname>lastclose</methodname> method should restore CRTC and
plane properties to default value, so that a subsequent open of the
- device will not inherit state from the previous user.
+ device will not inherit state from the previous user. It can also be
+ used to execute delayed power switching state changes, e.g. in
+ conjunction with the vga-switcheroo infrastructure. Beyond that KMS
+ drivers should not do any further cleanup. Only legacy UMS drivers might
+ need to clean up device state so that the vga console or an independent
+ fbdev driver could take over.
</para>
</sect2>
<sect2>
@@ -2498,7 +2492,6 @@ void (*postclose) (struct drm_device *, struct drm_file *);</synopsis>
<programlisting>
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
</programlisting>
</para>
@@ -2657,6 +2650,69 @@ int (*resume) (struct drm_device *);</synopsis>
info, since man pages should cover the rest.
</para>
+ <!-- External: render nodes -->
+
+ <sect1>
+ <title>Render nodes</title>
+ <para>
+ DRM core provides multiple character-devices for user-space to use.
+ Depending on which device is opened, user-space can perform a different
+ set of operations (mainly ioctls). The primary node is always created
+ and called <term>card&lt;num&gt;</term>. Additionally, a currently
+ unused control node, called <term>controlD&lt;num&gt;</term> is also
+ created. The primary node provides all legacy operations and
+ historically was the only interface used by userspace. With KMS, the
+ control node was introduced. However, the planned KMS control interface
+ has never been written and so the control node stays unused to date.
+ </para>
+ <para>
+ With the increased use of offscreen renderers and GPGPU applications,
+ clients no longer require running compositors or graphics servers to
+ make use of a GPU. But the DRM API required unprivileged clients to
+ authenticate to a DRM-Master prior to getting GPU access. To avoid this
+ step and to grant clients GPU access without authenticating, render
+ nodes were introduced. Render nodes solely serve render clients, that
+ is, no modesetting or privileged ioctls can be issued on render nodes.
+ Only non-global rendering commands are allowed. If a driver supports
+ render nodes, it must advertise it via the <term>DRIVER_RENDER</term>
+ DRM driver capability. If not supported, the primary node must be used
+ for render clients together with the legacy drmAuth authentication
+ procedure.
+ </para>
+ <para>
+ If a driver advertises render node support, DRM core will create a
+ separate render node called <term>renderD&lt;num&gt;</term>. There will
+ be one render node per device. No ioctls except PRIME-related ioctls
+ will be allowed on this node. Especially <term>GEM_OPEN</term> will be
+ explicitly prohibited. Render nodes are designed to avoid the
+ buffer-leaks, which occur if clients guess the flink names or mmap
+ offsets on the legacy interface. Additionally to this basic interface,
+ drivers must mark their driver-dependent render-only ioctls as
+ <term>DRM_RENDER_ALLOW</term> so render clients can use them. Driver
+ authors must be careful not to allow any privileged ioctls on render
+ nodes.
+ </para>
+ <para>
+ With render nodes, user-space can now control access to the render node
+ via basic file-system access-modes. A running graphics server which
+ authenticates clients on the privileged primary/legacy node is no longer
+ required. Instead, a client can open the render node and is immediately
+ granted GPU access. Communication between clients (or servers) is done
+ via PRIME. FLINK from render node to legacy node is not supported. New
+ clients must not use the insecure FLINK interface.
+ </para>
+ <para>
+ Besides dropping all modeset/global ioctls, render nodes also drop the
+ DRM-Master concept. There is no reason to associate render clients with
+ a DRM-Master as they are independent of any graphics server. Besides,
+ they must work without any running master, anyway.
+ Drivers must be able to run without a master object if they support
+ render nodes. If, on the other hand, a driver requires shared state
+ between clients which is visible to user-space and accessible beyond
+ open-file boundaries, they cannot support render nodes.
+ </para>
+ </sect1>
+
<!-- External: vblank handling -->
<sect1>
diff --git a/Documentation/DocBook/media/v4l/controls.xml b/Documentation/DocBook/media/v4l/controls.xml
index c2fc9ec1417..7a3b49b3cc3 100644
--- a/Documentation/DocBook/media/v4l/controls.xml
+++ b/Documentation/DocBook/media/v4l/controls.xml
@@ -722,17 +722,22 @@ for more details.</para>
</section>
<section id="mpeg-controls">
- <title>MPEG Control Reference</title>
+ <title>Codec Control Reference</title>
- <para>Below all controls within the MPEG control class are
+ <para>Below all controls within the Codec control class are
described. First the generic controls, then controls specific for
certain hardware.</para>
+ <para>Note: These controls are applicable to all codecs and
+not just MPEG. The defines are prefixed with V4L2_CID_MPEG/V4L2_MPEG
+as the controls were originally made for MPEG codecs and later
+extended to cover all encoding formats.</para>
+
<section>
- <title>Generic MPEG Controls</title>
+ <title>Generic Codec Controls</title>
<table pgwide="1" frame="none" id="mpeg-control-id">
- <title>MPEG Control IDs</title>
+ <title>Codec Control IDs</title>
<tgroup cols="4">
<colspec colname="c1" colwidth="1*" />
<colspec colname="c2" colwidth="6*" />
@@ -752,7 +757,7 @@ certain hardware.</para>
<row>
<entry spanname="id"><constant>V4L2_CID_MPEG_CLASS</constant>&nbsp;</entry>
<entry>class</entry>
- </row><row><entry spanname="descr">The MPEG class
+ </row><row><entry spanname="descr">The Codec class
descriptor. Calling &VIDIOC-QUERYCTRL; for this control will return a
description of this control class. This description can be used as the
caption of a Tab page in a GUI, for example.</entry>
@@ -3009,6 +3014,159 @@ in by the application. 0 = do not insert, 1 = insert packets.</entry>
</tgroup>
</table>
</section>
+
+ <section>
+ <title>VPX Control Reference</title>
+
+ <para>The VPX controls include controls for encoding parameters
+ of VPx video codec.</para>
+
+ <table pgwide="1" frame="none" id="vpx-control-id">
+ <title>VPX Control IDs</title>
+
+ <tgroup cols="4">
+ <colspec colname="c1" colwidth="1*" />
+ <colspec colname="c2" colwidth="6*" />
+ <colspec colname="c3" colwidth="2*" />
+ <colspec colname="c4" colwidth="6*" />
+ <spanspec namest="c1" nameend="c2" spanname="id" />
+ <spanspec namest="c2" nameend="c4" spanname="descr" />
+ <thead>
+ <row>
+ <entry spanname="id" align="left">ID</entry>
+ <entry align="left">Type</entry>
+ </row><row rowsep="1"><entry spanname="descr" align="left">Description</entry>
+ </row>
+ </thead>
+ <tbody valign="top">
+ <row><entry></entry></row>
+
+ <row><entry></entry></row>
+ <row id="v4l2-vpx-num-partitions">
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS</constant></entry>
+ <entry>enum v4l2_vp8_num_partitions</entry>
+ </row>
+ <row><entry spanname="descr">The number of token partitions to use in VP8 encoder.
+Possible values are:</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION</constant></entry>
+ <entry>1 coefficient partition</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS</constant></entry>
+ <entry>2 coefficient partitions</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS</constant></entry>
+ <entry>4 coefficient partitions</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS</constant></entry>
+ <entry>8 coefficient partitions</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4</constant></entry>
+ <entry>boolean</entry>
+ </row>
+ <row><entry spanname="descr">Setting this prevents intra 4x4 mode in the intra mode decision.</entry>
+ </row>
+
+ <row><entry></entry></row>
+ <row id="v4l2-vpx-num-ref-frames">
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES</constant></entry>
+ <entry>enum v4l2_vp8_num_ref_frames</entry>
+ </row>
+ <row><entry spanname="descr">The number of reference pictures for encoding P frames.
+Possible values are:</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME</constant></entry>
+ <entry>Last encoded frame will be searched</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME</constant></entry>
+ <entry>Two frames will be searched among the last encoded frame, the golden frame
+and the alternate reference (altref) frame. The encoder implementation will decide which two are chosen.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME</constant></entry>
+ <entry>The last encoded frame, the golden frame and the altref frame will be searched.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row><entry spanname="descr">Indicates the loop filter level. The adjustment of the loop
+filter level is done via a delta value against a baseline loop filter value.</entry>
+ </row>
+
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row><entry spanname="descr">This parameter affects the loop filter. Anything above
+zero weakens the deblocking effect on the loop filter.</entry>
+ </row>
+
+ <row><entry></entry></row>
+ <row>
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD</constant></entry>
+ <entry>integer</entry>
+ </row>
+ <row><entry spanname="descr">Sets the refresh period for the golden frame. The period is defined
+in number of frames. For a value of 'n', every nth frame starting from the first key frame will be taken as a golden frame.
+For eg. for encoding sequence of 0, 1, 2, 3, 4, 5, 6, 7 where the golden frame refresh period is set as 4, the frames
+0, 4, 8 etc will be taken as the golden frames as frame 0 is always a key frame.</entry>
+ </row>
+
+ <row><entry></entry></row>
+ <row id="v4l2-vpx-golden-frame-sel">
+ <entry spanname="id"><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL</constant></entry>
+ <entry>enum v4l2_vp8_golden_frame_sel</entry>
+ </row>
+ <row><entry spanname="descr">Selects the golden frame for encoding.
+Possible values are:</entry>
+ </row>
+ <row>
+ <entrytbl spanname="descr" cols="2">
+ <tbody valign="top">
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV</constant></entry>
+ <entry>Use the (n-2)th frame as a golden frame, current frame index being 'n'.</entry>
+ </row>
+ <row>
+ <entry><constant>V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD</constant></entry>
+ <entry>Use the previous specific frame indicated by
+V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD as a golden frame.</entry>
+ </row>
+ </tbody>
+ </entrytbl>
+ </row>
+
+ <row><entry></entry></row>
+ </tbody>
+ </tgroup>
+ </table>
+
+ </section>
</section>
<section id="camera-controls">
diff --git a/Documentation/DocBook/media/v4l/lirc_device_interface.xml b/Documentation/DocBook/media/v4l/lirc_device_interface.xml
index 8d7eb6bf631..34cada2ca71 100644
--- a/Documentation/DocBook/media/v4l/lirc_device_interface.xml
+++ b/Documentation/DocBook/media/v4l/lirc_device_interface.xml
@@ -46,7 +46,9 @@ describing an IR signal are read from the chardev.</para>
values. Pulses and spaces are only marked implicitly by their position. The
data must start and end with a pulse, therefore, the data must always include
an uneven number of samples. The write function must block until the data has
-been transmitted by the hardware.</para>
+been transmitted by the hardware. If more data is provided than the hardware
+can send, the driver returns EINVAL.</para>
+
</section>
<section id="lirc_ioctl">
diff --git a/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml b/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml
new file mode 100644
index 00000000000..c51d5a4cda0
--- /dev/null
+++ b/Documentation/DocBook/media/v4l/pixfmt-nv16m.xml
@@ -0,0 +1,171 @@
+ <refentry>
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_NV16M ('NM16'), V4L2_PIX_FMT_NV61M ('NM61')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-NV16M"><constant>V4L2_PIX_FMT_NV16M</constant></refname>
+ <refname id="V4L2-PIX-FMT-NV61M"><constant>V4L2_PIX_FMT_NV61M</constant></refname>
+ <refpurpose>Variation of <constant>V4L2_PIX_FMT_NV16</constant> and <constant>V4L2_PIX_FMT_NV61</constant> with planes
+ non contiguous in memory. </refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>This is a multi-planar, two-plane version of the YUV 4:2:0 format.
+The three components are separated into two sub-images or planes.
+<constant>V4L2_PIX_FMT_NV16M</constant> differs from <constant>V4L2_PIX_FMT_NV16
+</constant> in that the two planes are non-contiguous in memory, i.e. the chroma
+plane does not necessarily immediately follows the luma plane.
+The luminance data occupies the first plane. The Y plane has one byte per pixel.
+In the second plane there is chrominance data with alternating chroma samples.
+The CbCr plane is the same width and height, in bytes, as the Y plane.
+Each CbCr pair belongs to four pixels. For example,
+Cb<subscript>0</subscript>/Cr<subscript>0</subscript> belongs to
+Y'<subscript>00</subscript>, Y'<subscript>01</subscript>,
+Y'<subscript>10</subscript>, Y'<subscript>11</subscript>.
+<constant>V4L2_PIX_FMT_NV61M</constant> is the same as <constant>V4L2_PIX_FMT_NV16M</constant>
+except the Cb and Cr bytes are swapped, the CrCb plane starts with a Cr byte.</para>
+
+ <para><constant>V4L2_PIX_FMT_NV16M</constant> and
+<constant>V4L2_PIX_FMT_NV61M</constant> are intended to be used only in drivers
+and applications that support the multi-planar API, described in
+<xref linkend="planar-apis"/>. </para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_NV16M</constant> 4 &times; 4 pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="5" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start0&nbsp;+&nbsp;0:</entry>
+ <entry>Y'<subscript>00</subscript></entry>
+ <entry>Y'<subscript>01</subscript></entry>
+ <entry>Y'<subscript>02</subscript></entry>
+ <entry>Y'<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;4:</entry>
+ <entry>Y'<subscript>10</subscript></entry>
+ <entry>Y'<subscript>11</subscript></entry>
+ <entry>Y'<subscript>12</subscript></entry>
+ <entry>Y'<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;8:</entry>
+ <entry>Y'<subscript>20</subscript></entry>
+ <entry>Y'<subscript>21</subscript></entry>
+ <entry>Y'<subscript>22</subscript></entry>
+ <entry>Y'<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start0&nbsp;+&nbsp;12:</entry>
+ <entry>Y'<subscript>30</subscript></entry>
+ <entry>Y'<subscript>31</subscript></entry>
+ <entry>Y'<subscript>32</subscript></entry>
+ <entry>Y'<subscript>33</subscript></entry>
+ </row>
+ <row>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;0:</entry>
+ <entry>Cb<subscript>00</subscript></entry>
+ <entry>Cr<subscript>00</subscript></entry>
+ <entry>Cb<subscript>02</subscript></entry>
+ <entry>Cr<subscript>02</subscript></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;4:</entry>
+ <entry>Cb<subscript>10</subscript></entry>
+ <entry>Cr<subscript>10</subscript></entry>
+ <entry>Cb<subscript>12</subscript></entry>
+ <entry>Cr<subscript>12</subscript></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;8:</entry>
+ <entry>Cb<subscript>20</subscript></entry>
+ <entry>Cr<subscript>20</subscript></entry>
+ <entry>Cb<subscript>22</subscript></entry>
+ <entry>Cr<subscript>22</subscript></entry>
+ </row>
+ <row>
+ <entry>start1&nbsp;+&nbsp;12:</entry>
+ <entry>Cb<subscript>30</subscript></entry>
+ <entry>Cr<subscript>30</subscript></entry>
+ <entry>Cb<subscript>32</subscript></entry>
+ <entry>Cr<subscript>32</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+
+ <formalpara>
+ <title>Color Sample Location.</title>
+ <para>
+ <informaltable frame="none">
+ <tgroup cols="7" align="center">
+ <tbody valign="top">
+ <row>
+ <entry></entry>
+ <entry>0</entry><entry></entry><entry>1</entry><entry></entry>
+ <entry>2</entry><entry></entry><entry>3</entry>
+ </row>
+ <row>
+ <entry>0</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>1</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry></entry>
+ </row>
+ <row>
+ <entry>2</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ <row>
+ <entry>3</entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry><entry></entry>
+ <entry>Y</entry><entry></entry><entry>Y</entry>
+ </row>
+ <row>
+ <entry></entry>
+ <entry></entry><entry>C</entry><entry></entry><entry></entry>
+ <entry></entry><entry>C</entry><entry></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
diff --git a/Documentation/DocBook/media/v4l/pixfmt.xml b/Documentation/DocBook/media/v4l/pixfmt.xml
index 99b8d2ad6e4..72d72bd67d0 100644
--- a/Documentation/DocBook/media/v4l/pixfmt.xml
+++ b/Documentation/DocBook/media/v4l/pixfmt.xml
@@ -391,9 +391,9 @@ clamp (double x)
else return r;
}
-y1 = (255 / 219.0) * (Y1 - 16);
-pb = (255 / 224.0) * (Cb - 128);
-pr = (255 / 224.0) * (Cr - 128);
+y1 = (Y1 - 16) / 219.0;
+pb = (Cb - 128) / 224.0;
+pr = (Cr - 128) / 224.0;
r = 1.0 * y1 + 0 * pb + 1.402 * pr;
g = 1.0 * y1 - 0.344 * pb - 0.714 * pr;
@@ -718,6 +718,7 @@ information.</para>
&sub-nv12m;
&sub-nv12mt;
&sub-nv16;
+ &sub-nv16m;
&sub-nv24;
&sub-m420;
</section>
diff --git a/Documentation/DocBook/media/v4l/subdev-formats.xml b/Documentation/DocBook/media/v4l/subdev-formats.xml
index adc61982df7..f72c1cc93a9 100644
--- a/Documentation/DocBook/media/v4l/subdev-formats.xml
+++ b/Documentation/DocBook/media/v4l/subdev-formats.xml
@@ -97,31 +97,39 @@
<colspec colname="id" align="left" />
<colspec colname="code" align="center"/>
<colspec colname="bit" />
- <colspec colnum="4" colname="b23" align="center" />
- <colspec colnum="5" colname="b22" align="center" />
- <colspec colnum="6" colname="b21" align="center" />
- <colspec colnum="7" colname="b20" align="center" />
- <colspec colnum="8" colname="b19" align="center" />
- <colspec colnum="9" colname="b18" align="center" />
- <colspec colnum="10" colname="b17" align="center" />
- <colspec colnum="11" colname="b16" align="center" />
- <colspec colnum="12" colname="b15" align="center" />
- <colspec colnum="13" colname="b14" align="center" />
- <colspec colnum="14" colname="b13" align="center" />
- <colspec colnum="15" colname="b12" align="center" />
- <colspec colnum="16" colname="b11" align="center" />
- <colspec colnum="17" colname="b10" align="center" />
- <colspec colnum="18" colname="b09" align="center" />
- <colspec colnum="19" colname="b08" align="center" />
- <colspec colnum="20" colname="b07" align="center" />
- <colspec colnum="21" colname="b06" align="center" />
- <colspec colnum="22" colname="b05" align="center" />
- <colspec colnum="23" colname="b04" align="center" />
- <colspec colnum="24" colname="b03" align="center" />
- <colspec colnum="25" colname="b02" align="center" />
- <colspec colnum="26" colname="b01" align="center" />
- <colspec colnum="27" colname="b00" align="center" />
- <spanspec namest="b23" nameend="b00" spanname="b0" />
+ <colspec colnum="4" colname="b31" align="center" />
+ <colspec colnum="5" colname="b20" align="center" />
+ <colspec colnum="6" colname="b29" align="center" />
+ <colspec colnum="7" colname="b28" align="center" />
+ <colspec colnum="8" colname="b27" align="center" />
+ <colspec colnum="9" colname="b26" align="center" />
+ <colspec colnum="10" colname="b25" align="center" />
+ <colspec colnum="11" colname="b24" align="center" />
+ <colspec colnum="12" colname="b23" align="center" />
+ <colspec colnum="13" colname="b22" align="center" />
+ <colspec colnum="14" colname="b21" align="center" />
+ <colspec colnum="15" colname="b20" align="center" />
+ <colspec colnum="16" colname="b19" align="center" />
+ <colspec colnum="17" colname="b18" align="center" />
+ <colspec colnum="18" colname="b17" align="center" />
+ <colspec colnum="19" colname="b16" align="center" />
+ <colspec colnum="20" colname="b15" align="center" />
+ <colspec colnum="21" colname="b14" align="center" />
+ <colspec colnum="22" colname="b13" align="center" />
+ <colspec colnum="23" colname="b12" align="center" />
+ <colspec colnum="24" colname="b11" align="center" />
+ <colspec colnum="25" colname="b10" align="center" />
+ <colspec colnum="26" colname="b09" align="center" />
+ <colspec colnum="27" colname="b08" align="center" />
+ <colspec colnum="28" colname="b07" align="center" />
+ <colspec colnum="29" colname="b06" align="center" />
+ <colspec colnum="30" colname="b05" align="center" />
+ <colspec colnum="31" colname="b04" align="center" />
+ <colspec colnum="32" colname="b03" align="center" />
+ <colspec colnum="33" colname="b02" align="center" />
+ <colspec colnum="34" colname="b01" align="center" />
+ <colspec colnum="35" colname="b00" align="center" />
+ <spanspec namest="b31" nameend="b00" spanname="b0" />
<thead>
<row>
<entry>Identifier</entry>
@@ -133,6 +141,14 @@
<entry></entry>
<entry></entry>
<entry>Bit</entry>
+ <entry>31</entry>
+ <entry>30</entry>
+ <entry>29</entry>
+ <entry>28</entry>
+ <entry>27</entry>
+ <entry>26</entry>
+ <entry>25</entry>
+ <entry>24</entry>
<entry>23</entry>
<entry>22</entry>
<entry>21</entry>
@@ -164,7 +180,7 @@
<entry>V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE</entry>
<entry>0x1001</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>0</entry>
<entry>0</entry>
<entry>0</entry>
@@ -178,7 +194,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>3</subscript></entry>
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
@@ -192,7 +208,7 @@
<entry>V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE</entry>
<entry>0x1002</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>3</subscript></entry>
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
@@ -206,7 +222,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>0</entry>
<entry>0</entry>
<entry>0</entry>
@@ -220,7 +236,7 @@
<entry>V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE</entry>
<entry>0x1003</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>0</entry>
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
@@ -234,7 +250,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -248,7 +264,7 @@
<entry>V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE</entry>
<entry>0x1004</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -262,7 +278,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>0</entry>
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
@@ -276,7 +292,7 @@
<entry>V4L2_MBUS_FMT_BGR565_2X8_BE</entry>
<entry>0x1005</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>b<subscript>4</subscript></entry>
<entry>b<subscript>3</subscript></entry>
<entry>b<subscript>2</subscript></entry>
@@ -290,7 +306,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -304,7 +320,7 @@
<entry>V4L2_MBUS_FMT_BGR565_2X8_LE</entry>
<entry>0x1006</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -318,7 +334,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>b<subscript>4</subscript></entry>
<entry>b<subscript>3</subscript></entry>
<entry>b<subscript>2</subscript></entry>
@@ -332,7 +348,7 @@
<entry>V4L2_MBUS_FMT_RGB565_2X8_BE</entry>
<entry>0x1007</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
<entry>r<subscript>2</subscript></entry>
@@ -346,7 +362,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -360,7 +376,7 @@
<entry>V4L2_MBUS_FMT_RGB565_2X8_LE</entry>
<entry>0x1008</entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
<entry>g<subscript>0</subscript></entry>
@@ -374,7 +390,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-16;
+ &dash-ent-24;
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
<entry>r<subscript>2</subscript></entry>
@@ -388,12 +404,7 @@
<entry>V4L2_MBUS_FMT_RGB666_1X18</entry>
<entry>0x1009</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-14;
<entry>r<subscript>5</subscript></entry>
<entry>r<subscript>4</subscript></entry>
<entry>r<subscript>3</subscript></entry>
@@ -417,6 +428,7 @@
<entry>V4L2_MBUS_FMT_RGB888_1X24</entry>
<entry>0x100a</entry>
<entry></entry>
+ &dash-ent-8;
<entry>r<subscript>7</subscript></entry>
<entry>r<subscript>6</subscript></entry>
<entry>r<subscript>5</subscript></entry>
@@ -446,9 +458,7 @@
<entry>V4L2_MBUS_FMT_RGB888_2X12_BE</entry>
<entry>0x100b</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-20;
<entry>r<subscript>7</subscript></entry>
<entry>r<subscript>6</subscript></entry>
<entry>r<subscript>5</subscript></entry>
@@ -466,9 +476,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-20;
<entry>g<subscript>3</subscript></entry>
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
@@ -486,9 +494,7 @@
<entry>V4L2_MBUS_FMT_RGB888_2X12_LE</entry>
<entry>0x100c</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-20;
<entry>g<subscript>3</subscript></entry>
<entry>g<subscript>2</subscript></entry>
<entry>g<subscript>1</subscript></entry>
@@ -506,9 +512,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-20;
<entry>r<subscript>7</subscript></entry>
<entry>r<subscript>6</subscript></entry>
<entry>r<subscript>5</subscript></entry>
@@ -522,6 +526,43 @@
<entry>g<subscript>5</subscript></entry>
<entry>g<subscript>4</subscript></entry>
</row>
+ <row id="V4L2-MBUS-FMT-ARGB888-1X32">
+ <entry>V4L2_MBUS_FMT_ARGB888_1X32</entry>
+ <entry>0x100d</entry>
+ <entry></entry>
+ <entry>a<subscript>7</subscript></entry>
+ <entry>a<subscript>6</subscript></entry>
+ <entry>a<subscript>5</subscript></entry>
+ <entry>a<subscript>4</subscript></entry>
+ <entry>a<subscript>3</subscript></entry>
+ <entry>a<subscript>2</subscript></entry>
+ <entry>a<subscript>1</subscript></entry>
+ <entry>a<subscript>0</subscript></entry>
+ <entry>r<subscript>7</subscript></entry>
+ <entry>r<subscript>6</subscript></entry>
+ <entry>r<subscript>5</subscript></entry>
+ <entry>r<subscript>4</subscript></entry>
+ <entry>r<subscript>3</subscript></entry>
+ <entry>r<subscript>2</subscript></entry>
+ <entry>r<subscript>1</subscript></entry>
+ <entry>r<subscript>0</subscript></entry>
+ <entry>g<subscript>7</subscript></entry>
+ <entry>g<subscript>6</subscript></entry>
+ <entry>g<subscript>5</subscript></entry>
+ <entry>g<subscript>4</subscript></entry>
+ <entry>g<subscript>3</subscript></entry>
+ <entry>g<subscript>2</subscript></entry>
+ <entry>g<subscript>1</subscript></entry>
+ <entry>g<subscript>0</subscript></entry>
+ <entry>b<subscript>7</subscript></entry>
+ <entry>b<subscript>6</subscript></entry>
+ <entry>b<subscript>5</subscript></entry>
+ <entry>b<subscript>4</subscript></entry>
+ <entry>b<subscript>3</subscript></entry>
+ <entry>b<subscript>2</subscript></entry>
+ <entry>b<subscript>1</subscript></entry>
+ <entry>b<subscript>0</subscript></entry>
+ </row>
</tbody>
</tgroup>
</table>
@@ -1149,6 +1190,7 @@
<listitem><para>y<subscript>x</subscript> for luma component bit number x</para></listitem>
<listitem><para>u<subscript>x</subscript> for blue chroma component bit number x</para></listitem>
<listitem><para>v<subscript>x</subscript> for red chroma component bit number x</para></listitem>
+ <listitem><para>a<subscript>x</subscript> for alpha component bit number x</para></listitem>
<listitem><para>- for non-available bits (for positions higher than the bus width)</para></listitem>
<listitem><para>d for dummy bits</para></listitem>
</itemizedlist>
@@ -1159,37 +1201,39 @@
<colspec colname="id" align="left" />
<colspec colname="code" align="center"/>
<colspec colname="bit" />
- <colspec colnum="4" colname="b29" align="center" />
- <colspec colnum="5" colname="b28" align="center" />
- <colspec colnum="6" colname="b27" align="center" />
- <colspec colnum="7" colname="b26" align="center" />
- <colspec colnum="8" colname="b25" align="center" />
- <colspec colnum="9" colname="b24" align="center" />
- <colspec colnum="10" colname="b23" align="center" />
- <colspec colnum="11" colname="b22" align="center" />
- <colspec colnum="12" colname="b21" align="center" />
- <colspec colnum="13" colname="b20" align="center" />
- <colspec colnum="14" colname="b19" align="center" />
- <colspec colnum="15" colname="b18" align="center" />
- <colspec colnum="16" colname="b17" align="center" />
- <colspec colnum="17" colname="b16" align="center" />
- <colspec colnum="18" colname="b15" align="center" />
- <colspec colnum="19" colname="b14" align="center" />
- <colspec colnum="20" colname="b13" align="center" />
- <colspec colnum="21" colname="b12" align="center" />
- <colspec colnum="22" colname="b11" align="center" />
- <colspec colnum="23" colname="b10" align="center" />
- <colspec colnum="24" colname="b09" align="center" />
- <colspec colnum="25" colname="b08" align="center" />
- <colspec colnum="26" colname="b07" align="center" />
- <colspec colnum="27" colname="b06" align="center" />
- <colspec colnum="28" colname="b05" align="center" />
- <colspec colnum="29" colname="b04" align="center" />
- <colspec colnum="30" colname="b03" align="center" />
- <colspec colnum="31" colname="b02" align="center" />
- <colspec colnum="32" colname="b01" align="center" />
- <colspec colnum="33" colname="b00" align="center" />
- <spanspec namest="b29" nameend="b00" spanname="b0" />
+ <colspec colnum="4" colname="b31" align="center" />
+ <colspec colnum="5" colname="b20" align="center" />
+ <colspec colnum="6" colname="b29" align="center" />
+ <colspec colnum="7" colname="b28" align="center" />
+ <colspec colnum="8" colname="b27" align="center" />
+ <colspec colnum="9" colname="b26" align="center" />
+ <colspec colnum="10" colname="b25" align="center" />
+ <colspec colnum="11" colname="b24" align="center" />
+ <colspec colnum="12" colname="b23" align="center" />
+ <colspec colnum="13" colname="b22" align="center" />
+ <colspec colnum="14" colname="b21" align="center" />
+ <colspec colnum="15" colname="b20" align="center" />
+ <colspec colnum="16" colname="b19" align="center" />
+ <colspec colnum="17" colname="b18" align="center" />
+ <colspec colnum="18" colname="b17" align="center" />
+ <colspec colnum="19" colname="b16" align="center" />
+ <colspec colnum="20" colname="b15" align="center" />
+ <colspec colnum="21" colname="b14" align="center" />
+ <colspec colnum="22" colname="b13" align="center" />
+ <colspec colnum="23" colname="b12" align="center" />
+ <colspec colnum="24" colname="b11" align="center" />
+ <colspec colnum="25" colname="b10" align="center" />
+ <colspec colnum="26" colname="b09" align="center" />
+ <colspec colnum="27" colname="b08" align="center" />
+ <colspec colnum="28" colname="b07" align="center" />
+ <colspec colnum="29" colname="b06" align="center" />
+ <colspec colnum="30" colname="b05" align="center" />
+ <colspec colnum="31" colname="b04" align="center" />
+ <colspec colnum="32" colname="b03" align="center" />
+ <colspec colnum="33" colname="b02" align="center" />
+ <colspec colnum="34" colname="b01" align="center" />
+ <colspec colnum="35" colname="b00" align="center" />
+ <spanspec namest="b31" nameend="b00" spanname="b0" />
<thead>
<row>
<entry>Identifier</entry>
@@ -1201,6 +1245,8 @@
<entry></entry>
<entry></entry>
<entry>Bit</entry>
+ <entry>31</entry>
+ <entry>30</entry>
<entry>29</entry>
<entry>28</entry>
<entry>27</entry>
@@ -1238,10 +1284,7 @@
<entry>V4L2_MBUS_FMT_Y8_1X8</entry>
<entry>0x2001</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1255,18 +1298,7 @@
<entry>V4L2_MBUS_FMT_UV8_1X8</entry>
<entry>0x2015</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1280,18 +1312,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1305,10 +1326,7 @@
<entry>V4L2_MBUS_FMT_UYVY8_1_5X8</entry>
<entry>0x2002</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1322,10 +1340,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1339,10 +1354,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1356,10 +1368,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1373,10 +1382,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1390,10 +1396,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1407,10 +1410,7 @@
<entry>V4L2_MBUS_FMT_VYUY8_1_5X8</entry>
<entry>0x2003</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1424,10 +1424,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1441,10 +1438,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1458,10 +1452,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1475,10 +1466,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1492,10 +1480,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1509,10 +1494,7 @@
<entry>V4L2_MBUS_FMT_YUYV8_1_5X8</entry>
<entry>0x2004</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1526,10 +1508,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1543,10 +1522,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1560,10 +1536,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1577,10 +1550,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1594,10 +1564,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1611,10 +1578,7 @@
<entry>V4L2_MBUS_FMT_YVYU8_1_5X8</entry>
<entry>0x2005</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1628,10 +1592,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1645,10 +1606,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1662,10 +1620,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1679,10 +1634,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1696,10 +1648,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1713,10 +1662,7 @@
<entry>V4L2_MBUS_FMT_UYVY8_2X8</entry>
<entry>0x2006</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1730,10 +1676,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1747,10 +1690,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1764,10 +1704,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1781,10 +1718,7 @@
<entry>V4L2_MBUS_FMT_VYUY8_2X8</entry>
<entry>0x2007</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1798,10 +1732,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1815,10 +1746,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1832,10 +1760,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1849,10 +1774,7 @@
<entry>V4L2_MBUS_FMT_YUYV8_2X8</entry>
<entry>0x2008</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1866,10 +1788,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1883,10 +1802,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1900,10 +1816,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1917,10 +1830,7 @@
<entry>V4L2_MBUS_FMT_YVYU8_2X8</entry>
<entry>0x2009</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1934,10 +1844,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -1951,10 +1858,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -1968,10 +1872,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-24;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -1985,8 +1886,7 @@
<entry>V4L2_MBUS_FMT_Y10_1X10</entry>
<entry>0x200a</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2002,8 +1902,7 @@
<entry>V4L2_MBUS_FMT_YUYV10_2X10</entry>
<entry>0x200b</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2019,8 +1918,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2036,8 +1934,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2053,8 +1950,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2070,8 +1966,7 @@
<entry>V4L2_MBUS_FMT_YVYU10_2X10</entry>
<entry>0x200c</entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2087,8 +1982,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>v<subscript>9</subscript></entry>
<entry>v<subscript>8</subscript></entry>
<entry>v<subscript>7</subscript></entry>
@@ -2104,8 +1998,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2121,8 +2014,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- &dash-ent-10;
+ &dash-ent-22;
<entry>u<subscript>9</subscript></entry>
<entry>u<subscript>8</subscript></entry>
<entry>u<subscript>7</subscript></entry>
@@ -2138,15 +2030,7 @@
<entry>V4L2_MBUS_FMT_Y12_1X12</entry>
<entry>0x2013</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-20;
<entry>y<subscript>11</subscript></entry>
<entry>y<subscript>10</subscript></entry>
<entry>y<subscript>9</subscript></entry>
@@ -2164,11 +2048,7 @@
<entry>V4L2_MBUS_FMT_UYVY8_1X16</entry>
<entry>0x200f</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2190,11 +2070,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2216,11 +2092,7 @@
<entry>V4L2_MBUS_FMT_VYUY8_1X16</entry>
<entry>0x2010</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>v<subscript>7</subscript></entry>
<entry>v<subscript>6</subscript></entry>
<entry>v<subscript>5</subscript></entry>
@@ -2242,11 +2114,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>u<subscript>7</subscript></entry>
<entry>u<subscript>6</subscript></entry>
<entry>u<subscript>5</subscript></entry>
@@ -2268,11 +2136,7 @@
<entry>V4L2_MBUS_FMT_YUYV8_1X16</entry>
<entry>0x2011</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2294,11 +2158,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2320,11 +2180,7 @@
<entry>V4L2_MBUS_FMT_YVYU8_1X16</entry>
<entry>0x2012</entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2346,11 +2202,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2372,10 +2224,7 @@
<entry>V4L2_MBUS_FMT_YDYUYDYV8_1X16</entry>
<entry>0x2014</entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2397,10 +2246,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2422,10 +2268,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2447,10 +2290,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
- <entry>-</entry>
+ &dash-ent-16;
<entry>y<subscript>7</subscript></entry>
<entry>y<subscript>6</subscript></entry>
<entry>y<subscript>5</subscript></entry>
@@ -2472,7 +2312,7 @@
<entry>V4L2_MBUS_FMT_YUYV10_1X20</entry>
<entry>0x200d</entry>
<entry></entry>
- &dash-ent-10;
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2498,7 +2338,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2524,7 +2364,7 @@
<entry>V4L2_MBUS_FMT_YVYU10_1X20</entry>
<entry>0x200e</entry>
<entry></entry>
- &dash-ent-10;
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2550,7 +2390,7 @@
<entry></entry>
<entry></entry>
<entry></entry>
- &dash-ent-10;
+ &dash-ent-12;
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2574,8 +2414,10 @@
</row>
<row id="V4L2-MBUS-FMT-YUV10-1X30">
<entry>V4L2_MBUS_FMT_YUV10_1X30</entry>
- <entry>0x2014</entry>
+ <entry>0x2016</entry>
<entry></entry>
+ <entry>-</entry>
+ <entry>-</entry>
<entry>y<subscript>9</subscript></entry>
<entry>y<subscript>8</subscript></entry>
<entry>y<subscript>7</subscript></entry>
@@ -2607,6 +2449,43 @@
<entry>v<subscript>1</subscript></entry>
<entry>v<subscript>0</subscript></entry>
</row>
+ <row id="V4L2-MBUS-FMT-AYUV8-1X32">
+ <entry>V4L2_MBUS_FMT_AYUV8_1X32</entry>
+ <entry>0x2017</entry>
+ <entry></entry>
+ <entry>a<subscript>7</subscript></entry>
+ <entry>a<subscript>6</subscript></entry>
+ <entry>a<subscript>5</subscript></entry>
+ <entry>a<subscript>4</subscript></entry>
+ <entry>a<subscript>3</subscript></entry>
+ <entry>a<subscript>2</subscript></entry>
+ <entry>a<subscript>1</subscript></entry>
+ <entry>a<subscript>0</subscript></entry>
+ <entry>y<subscript>7</subscript></entry>
+ <entry>y<subscript>6</subscript></entry>
+ <entry>y<subscript>5</subscript></entry>
+ <entry>y<subscript>4</subscript></entry>
+ <entry>y<subscript>3</subscript></entry>
+ <entry>y<subscript>2</subscript></entry>
+ <entry>y<subscript>1</subscript></entry>
+ <entry>y<subscript>0</subscript></entry>
+ <entry>u<subscript>7</subscript></entry>
+ <entry>u<subscript>6</subscript></entry>
+ <entry>u<subscript>5</subscript></entry>
+ <entry>u<subscript>4</subscript></entry>
+ <entry>u<subscript>3</subscript></entry>
+ <entry>u<subscript>2</subscript></entry>
+ <entry>u<subscript>1</subscript></entry>
+ <entry>u<subscript>0</subscript></entry>
+ <entry>v<subscript>7</subscript></entry>
+ <entry>v<subscript>6</subscript></entry>
+ <entry>v<subscript>5</subscript></entry>
+ <entry>v<subscript>4</subscript></entry>
+ <entry>v<subscript>3</subscript></entry>
+ <entry>v<subscript>2</subscript></entry>
+ <entry>v<subscript>1</subscript></entry>
+ <entry>v<subscript>0</subscript></entry>
+ </row>
</tbody>
</tgroup>
</table>
diff --git a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
index cd994367243..9b700a5f4df 100644
--- a/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-create-bufs.xml
@@ -62,18 +62,29 @@ addition to the <constant>VIDIOC_REQBUFS</constant> ioctl, when a tighter
control over buffers is required. This ioctl can be called multiple times to
create buffers of different sizes.</para>
- <para>To allocate device buffers applications initialize relevant fields of
-the <structname>v4l2_create_buffers</structname> structure. They set the
-<structfield>type</structfield> field in the
-&v4l2-format; structure, embedded in this
-structure, to the respective stream or buffer type.
-<structfield>count</structfield> must be set to the number of required buffers.
-<structfield>memory</structfield> specifies the required I/O method. The
-<structfield>format</structfield> field shall typically be filled in using
-either the <constant>VIDIOC_TRY_FMT</constant> or
-<constant>VIDIOC_G_FMT</constant> ioctl(). Additionally, applications can adjust
-<structfield>sizeimage</structfield> fields to fit their specific needs. The
-<structfield>reserved</structfield> array must be zeroed.</para>
+ <para>To allocate the device buffers applications must initialize the
+relevant fields of the <structname>v4l2_create_buffers</structname> structure.
+The <structfield>count</structfield> field must be set to the number of
+requested buffers, the <structfield>memory</structfield> field specifies the
+requested I/O method and the <structfield>reserved</structfield> array must be
+zeroed.</para>
+
+ <para>The <structfield>format</structfield> field specifies the image format
+that the buffers must be able to handle. The application has to fill in this
+&v4l2-format;. Usually this will be done using the
+<constant>VIDIOC_TRY_FMT</constant> or <constant>VIDIOC_G_FMT</constant> ioctl()
+to ensure that the requested format is supported by the driver. Unsupported
+formats will result in an error.</para>
+
+ <para>The buffers created by this ioctl will have as minimum size the size
+defined by the <structfield>format.pix.sizeimage</structfield> field. If the
+<structfield>format.pix.sizeimage</structfield> field is less than the minimum
+required for the given format, then <structfield>sizeimage</structfield> will be
+increased by the driver to that minimum to allocate the buffers. If it is
+larger, then the value will be used as-is. The same applies to the
+<structfield>sizeimage</structfield> field of the
+<structname>v4l2_plane_pix_format</structname> structure in the case of
+multiplanar formats.</para>
<para>When the ioctl is called with a pointer to this structure the driver
will attempt to allocate up to the requested number of buffers and store the
@@ -144,9 +155,9 @@ mapped</link> I/O.</para>
<varlistentry>
<term><errorcode>EINVAL</errorcode></term>
<listitem>
- <para>The buffer type (<structfield>type</structfield> field) or the
-requested I/O method (<structfield>memory</structfield>) is not
-supported.</para>
+ <para>The buffer type (<structfield>format.type</structfield> field),
+requested I/O method (<structfield>memory</structfield>) or format
+(<structfield>format</structfield> field) is not valid.</para>
</listitem>
</varlistentry>
</variablelist>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
index 72369707bd7..c4336577ff0 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-dv-timings.xml
@@ -156,19 +156,19 @@ bit 0 (V4L2_DV_VSYNC_POS_POL) is for vertical sync polarity and bit 1 (V4L2_DV_H
<entry>__u32</entry>
<entry><structfield>il_vfrontporch</structfield></entry>
<entry>Vertical front porch in lines for the even field (aka field 2) of
- interlaced field formats.</entry>
+ interlaced field formats. Must be 0 for progressive formats.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>il_vsync</structfield></entry>
<entry>Vertical sync length in lines for the even field (aka field 2) of
- interlaced field formats.</entry>
+ interlaced field formats. Must be 0 for progressive formats.</entry>
</row>
<row>
<entry>__u32</entry>
<entry><structfield>il_vbackporch</structfield></entry>
<entry>Vertical back porch in lines for the even field (aka field 2) of
- interlaced field formats.</entry>
+ interlaced field formats. Must be 0 for progressive formats.</entry>
</row>
<row>
<entry>__u32</entry>
diff --git a/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml b/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
index 48748499c09..098ff483802 100644
--- a/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-g-jpegcomp.xml
@@ -92,8 +92,8 @@ to add them.</para>
<entry>int</entry>
<entry><structfield>quality</structfield></entry>
<entry>Deprecated. If <link linkend="jpeg-quality-control"><constant>
- V4L2_CID_JPEG_IMAGE_QUALITY</constant></link> control is exposed by
- a driver applications should use it instead and ignore this field.
+ V4L2_CID_JPEG_COMPRESSION_QUALITY</constant></link> control is exposed
+ by a driver applications should use it instead and ignore this field.
</entry>
</row>
<row>
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 6a8b7158697..4c8d282545a 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -1,6 +1,6 @@
<?xml version="1.0"?>
-<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
- "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
+ "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [
<!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities;
<!ENTITY media-indices SYSTEM "./media-indices.tmpl">
@@ -22,8 +22,14 @@
<!-- LinuxTV v4l-dvb repository. -->
<!ENTITY v4l-dvb "<ulink url='http://linuxtv.org/repo/'>http://linuxtv.org/repo/</ulink>">
+<!ENTITY dash-ent-8 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
<!ENTITY dash-ent-10 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
+<!ENTITY dash-ent-12 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
+<!ENTITY dash-ent-14 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
<!ENTITY dash-ent-16 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
+<!ENTITY dash-ent-20 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
+<!ENTITY dash-ent-22 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
+<!ENTITY dash-ent-24 "<entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry><entry>-</entry>">
]>
<book id="media_api">
diff --git a/Documentation/RCU/RTFP.txt b/Documentation/RCU/RTFP.txt
index 7f40c72a9c5..273e654d7d0 100644
--- a/Documentation/RCU/RTFP.txt
+++ b/Documentation/RCU/RTFP.txt
@@ -39,7 +39,7 @@ in read-mostly situations. This algorithm does take pains to avoid
write-side contention and parallelize the other write-side overheads by
providing a fine-grained locking design, however, it would be interesting
to see how much of the performance advantage reported in 1990 remains
-in 2004.
+today.
At about this same time, Adams [Adams91] described ``chaotic relaxation'',
where the normal barriers between successive iterations of convergent
@@ -86,9 +86,9 @@ DYNIX/ptx kernel. The corresponding conference paper appeared in 1998
[McKenney98].
In 1999, the Tornado and K42 groups described their "generations"
-mechanism, which quite similar to RCU [Gamsa99]. These operating systems
-made pervasive use of RCU in place of "existence locks", which greatly
-simplifies locking hierarchies.
+mechanism, which is quite similar to RCU [Gamsa99]. These operating
+systems made pervasive use of RCU in place of "existence locks", which
+greatly simplifies locking hierarchies and helps avoid deadlocks.
2001 saw the first RCU presentation involving Linux [McKenney01a]
at OLS. The resulting abundance of RCU patches was presented the
@@ -106,8 +106,11 @@ these techniques still impose significant read-side overhead in the
form of memory barriers. Researchers at Sun worked along similar lines
in the same timeframe [HerlihyLM02]. These techniques can be thought
of as inside-out reference counts, where the count is represented by the
-number of hazard pointers referencing a given data structure (rather than
-the more conventional counter field within the data structure itself).
+number of hazard pointers referencing a given data structure rather than
+the more conventional counter field within the data structure itself.
+The key advantage of inside-out reference counts is that they can be
+stored in immortal variables, thus allowing races between access and
+deletion to be avoided.
By the same token, RCU can be thought of as a "bulk reference count",
where some form of reference counter covers all reference by a given CPU
@@ -179,7 +182,25 @@ tree using software transactional memory to protect concurrent updates
(strange, but true!) [PhilHoward2011RCUTMRBTree], yet another variant of
RCU-protected resizeable hash tables [Triplett:2011:RPHash], the 3.0 RCU
trainwreck [PaulEMcKenney2011RCU3.0trainwreck], and Neil Brown's "Meet the
-Lockers" LWN article [NeilBrown2011MeetTheLockers].
+Lockers" LWN article [NeilBrown2011MeetTheLockers]. Some academic
+work looked at debugging uses of RCU [Seyster:2011:RFA:2075416.2075425].
+
+In 2012, Josh Triplett received his Ph.D. with his dissertation
+covering RCU-protected resizable hash tables and the relationship
+between memory barriers and read-side traversal order: If the updater
+is making changes in the opposite direction from the read-side traveral
+order, the updater need only execute a memory-barrier instruction,
+but if in the same direction, the updater needs to wait for a grace
+period between the individual updates [JoshTriplettPhD]. Also in 2012,
+after seventeen years of attempts, an RCU paper made it into a top-flight
+academic journal, IEEE Transactions on Parallel and Distributed Systems
+[MathieuDesnoyers2012URCU]. A group of researchers in Spain applied
+user-level RCU to crowd simulation [GuillermoVigueras2012RCUCrowd], and
+another group of researchers in Europe produced a formal description of
+RCU based on separation logic [AlexeyGotsman2012VerifyGraceExtended],
+which was published in the 2013 European Symposium on Programming
+[AlexeyGotsman2013ESOPRCU].
+
Bibtex Entries
@@ -193,13 +214,12 @@ Bibtex Entries
,volume="5"
,number="3"
,pages="354-382"
-,note="Available:
-\url{http://portal.acm.org/citation.cfm?id=320619&dl=GUIDE,}
-[Viewed December 3, 2007]"
,annotation={
Use garbage collector to clean up data after everyone is done with it.
.
Oldest use of something vaguely resembling RCU that I have found.
+ http://portal.acm.org/citation.cfm?id=320619&dl=GUIDE,
+ [Viewed December 3, 2007]
}
}
@@ -309,7 +329,7 @@ for Programming Languages and Operating Systems}"
,doi = {http://doi.acm.org/10.1145/42392.42399}
,publisher = {ACM}
,address = {New York, NY, USA}
-,annotation= {
+,annotation={
At the top of page 307: "Conflicts with deposits and withdrawals
are necessary if the reported total is to be up to date. They
could be avoided by having total return a sum that is slightly
@@ -346,8 +366,9 @@ for Programming Languages and Operating Systems}"
}
}
-@Book{Adams91
-,Author="Gregory R. Adams"
+# Was Adams91, see also syncrefs.bib.
+@Book{Andrews91textbook
+,Author="Gregory R. Andrews"
,title="Concurrent Programming, Principles, and Practices"
,Publisher="Benjamin Cummins"
,Year="1991"
@@ -398,39 +419,39 @@ for Programming Languages and Operating Systems}"
}
}
-@conference{Pu95a,
-Author = "Calton Pu and Tito Autrey and Andrew Black and Charles Consel and
+@conference{Pu95a
+,Author = "Calton Pu and Tito Autrey and Andrew Black and Charles Consel and
Crispin Cowan and Jon Inouye and Lakshmi Kethana and Jonathan Walpole and
-Ke Zhang",
-Title = "Optimistic Incremental Specialization: Streamlining a Commercial
-Operating System",
-Booktitle = "15\textsuperscript{th} ACM Symposium on
-Operating Systems Principles (SOSP'95)",
-address = "Copper Mountain, CO",
-month="December",
-year="1995",
-pages="314-321",
-annotation="
+Ke Zhang"
+,Title = "Optimistic Incremental Specialization: Streamlining a Commercial
+,Operating System"
+,Booktitle = "15\textsuperscript{th} ACM Symposium on
+,Operating Systems Principles (SOSP'95)"
+,address = "Copper Mountain, CO"
+,month="December"
+,year="1995"
+,pages="314-321"
+,annotation={
Uses a replugger, but with a flag to signal when people are
using the resource at hand. Only one reader at a time.
-"
-}
-
-@conference{Cowan96a,
-Author = "Crispin Cowan and Tito Autrey and Charles Krasic and
-Calton Pu and Jonathan Walpole",
-Title = "Fast Concurrent Dynamic Linking for an Adaptive Operating System",
-Booktitle = "International Conference on Configurable Distributed Systems
-(ICCDS'96)",
-address = "Annapolis, MD",
-month="May",
-year="1996",
-pages="108",
-isbn="0-8186-7395-8",
-annotation="
+}
+}
+
+@conference{Cowan96a
+,Author = "Crispin Cowan and Tito Autrey and Charles Krasic and
+,Calton Pu and Jonathan Walpole"
+,Title = "Fast Concurrent Dynamic Linking for an Adaptive Operating System"
+,Booktitle = "International Conference on Configurable Distributed Systems
+(ICCDS'96)"
+,address = "Annapolis, MD"
+,month="May"
+,year="1996"
+,pages="108"
+,isbn="0-8186-7395-8"
+,annotation={
Uses a replugger, but with a counter to signal when people are
using the resource at hand. Allows multiple readers.
-"
+}
}
@techreport{Slingwine95
@@ -493,14 +514,13 @@ Problems"
,Year="1998"
,pages="509-518"
,Address="Las Vegas, NV"
-,note="Available:
-\url{http://www.rdrop.com/users/paulmck/RCU/rclockpdcsproof.pdf}
-[Viewed December 3, 2007]"
,annotation={
Describes and analyzes RCU mechanism in DYNIX/ptx. Describes
application to linked list update and log-buffer flushing.
Defines 'quiescent state'. Includes both measured and analytic
evaluation.
+ http://www.rdrop.com/users/paulmck/RCU/rclockpdcsproof.pdf
+ [Viewed December 3, 2007]
}
}
@@ -514,13 +534,12 @@ Operating System Design and Implementation}"
,Year="1999"
,pages="87-100"
,Address="New Orleans, LA"
-,note="Available:
-\url{http://www.usenix.org/events/osdi99/full_papers/gamsa/gamsa.pdf}
-[Viewed August 30, 2006]"
,annotation={
Use of RCU-like facility in K42/Tornado. Another independent
invention of RCU.
See especially pages 7-9 (Section 5).
+ http://www.usenix.org/events/osdi99/full_papers/gamsa/gamsa.pdf
+ [Viewed August 30, 2006]
}
}
@@ -611,9 +630,9 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100259266316456&w=2}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Memory-barrier and Alpha thread. 100 messages, not too bad...
-"
+}
}
@unpublished{Spraul01
@@ -624,10 +643,10 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=100264675012867&w=2}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Suggested burying memory barriers in Linux's list-manipulation
primitives.
-"
+}
}
@unpublished{LinusTorvalds2001a
@@ -638,6 +657,8 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
,note="Available:
\url{http://lkml.org/lkml/2001/10/13/105}
[Viewed August 21, 2004]"
+,annotation={
+}
}
@unpublished{Blanchard02a
@@ -657,10 +678,10 @@ Orran Krieger and Rusty Russell and Dipankar Sarma and Maneesh Soni"
,Month="June"
,Year="2002"
,pages="289-300"
-,annotation="
+,annotation={
Measured scalability of Linux 2.4 kernel's directory-entry cache
(dcache), and measured some scalability enhancements.
-"
+}
}
@Conference{McKenney02a
@@ -674,10 +695,10 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
,note="Available:
\url{http://www.linux.org.uk/~ajh/ols2002_proceedings.pdf.gz}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Presented and compared a number of RCU implementations for the
Linux kernel.
-"
+}
}
@unpublished{Sarma02a
@@ -688,9 +709,9 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=102645767914212&w=2}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Compare fastwalk and RCU for dcache. RCU won.
-"
+}
}
@unpublished{Barbieri02
@@ -701,9 +722,9 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103082050621241&w=2}
[Viewed: June 23, 2004]"
-,annotation="
+,annotation={
Suggested RCU for vfs\_shared\_cred.
-"
+}
}
@unpublished{Dickins02a
@@ -722,10 +743,10 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=103462075416638&w=2}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Performance of dcache RCU on kernbench for 16x NUMA-Q and 1x,
2x, and 4x systems. RCU does no harm, and helps on 16x.
-"
+}
}
@unpublished{LinusTorvalds2003a
@@ -736,14 +757,14 @@ Andrea Arcangeli and Andi Kleen and Orran Krieger and Rusty Russell"
,note="Available:
\url{http://lkml.org/lkml/2003/3/9/205}
[Viewed March 13, 2006]"
-,annotation="
+,annotation={
Linus suggests replacing brlock with RCU and/or seqlocks:
.
'It's entirely possible that the current user could be replaced
by RCU and/or seqlocks, and we could get rid of brlocks entirely.'
.
Steve Hemminger responds by replacing them with RCU.
-"
+}
}
@article{Appavoo03a
@@ -758,9 +779,9 @@ B. Rosenburg and M. Stumm and J. Xenidis"
,volume="42"
,number="1"
,pages="60-76"
-,annotation="
+,annotation={
Use of RCU to enable hot-swapping for autonomic behavior in K42.
-"
+}
}
@unpublished{Seigh03
@@ -769,9 +790,9 @@ B. Rosenburg and M. Stumm and J. Xenidis"
,Year="2003"
,Month="March"
,note="email correspondence"
-,annotation="
+,annotation={
Described the relationship of the VM/XA passive serialization to RCU.
-"
+}
}
@Conference{Arcangeli03
@@ -785,14 +806,12 @@ Dipankar Sarma"
,year="2003"
,month="June"
,pages="297-310"
-,note="Available:
-\url{http://www.rdrop.com/users/paulmck/RCU/rcu.FREENIX.2003.06.14.pdf}
-[Viewed November 21, 2007]"
-,annotation="
+,annotation={
Compared updated RCU implementations for the Linux kernel, and
described System V IPC use of RCU, including order-of-magnitude
performance improvements.
-"
+ http://www.rdrop.com/users/paulmck/RCU/rcu.FREENIX.2003.06.14.pdf
+}
}
@Conference{Soules03a
@@ -820,10 +839,10 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,note="Available:
\url{http://www.linuxjournal.com/article/6993}
[Viewed November 14, 2007]"
-,annotation="
+,annotation={
Reader-friendly intro to RCU, with the infamous old-man-and-brat
cartoon.
-"
+}
}
@unpublished{Sarma03a
@@ -832,7 +851,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,month="December"
,year="2003"
,note="Message ID: 20031222180114.GA2248@in.ibm.com"
-,annotation="dipankar/ct.2004.03.27/RCUll.2003.12.22.patch"
+,annotation={
+ dipankar/ct.2004.03.27/RCUll.2003.12.22.patch
+}
}
@techreport{Friedberg03a
@@ -844,11 +865,11 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,number="US Patent 6,662,184"
,month="December"
,pages="112"
-,annotation="
+,annotation={
Applies RCU to a wildcard-search Patricia tree in order to permit
synchronization-free lookup. RCU is used to retain removed nodes
for a grace period before freeing them.
-"
+}
}
@article{McKenney04a
@@ -860,12 +881,11 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,volume="1"
,number="118"
,pages="38-46"
-,note="Available:
-\url{http://www.linuxjournal.com/node/7124}
-[Viewed December 26, 2010]"
-,annotation="
+,annotation={
Reader friendly intro to dcache and RCU.
-"
+ http://www.linuxjournal.com/node/7124
+ [Viewed December 26, 2010]
+}
}
@Conference{McKenney04b
@@ -879,10 +899,10 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
\url{http://www.linux.org.au/conf/2004/abstracts.html#90}
\url{http://www.rdrop.com/users/paulmck/RCU/lockperf.2004.01.17a.pdf}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Compares performance of RCU to that of other locking primitives
over a number of CPUs (x86, Opteron, Itanium, and PPC).
-"
+}
}
@unpublished{Sarma04a
@@ -891,7 +911,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,month="March"
,year="2004"
,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108003746402892&w=2}"
-,annotation="Head of thread: dipankar/2004.03.23/rcu-low-lat.1.patch"
+,annotation={
+ Head of thread: dipankar/2004.03.23/rcu-low-lat.1.patch
+}
}
@unpublished{Sarma04b
@@ -900,7 +922,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,month="March"
,year="2004"
,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108016474829546&w=2}"
-,annotation="dipankar/rcuth.2004.03.24/rcu-throttle.patch"
+,annotation={
+ dipankar/rcuth.2004.03.24/rcu-throttle.patch
+}
}
@unpublished{Spraul04a
@@ -911,9 +935,9 @@ Michal Ostrowski and Bryan Rosenburg and Jimi Xenidis"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=108546407726602&w=2}
[Viewed June 23, 2004]"
-,annotation="
+,annotation={
Hierarchical-bitmap patch for RCU infrastructure.
-"
+}
}
@unpublished{Steiner04a
@@ -950,10 +974,12 @@ Realtime Applications"
,year="2004"
,month="June"
,pages="182-191"
-,annotation="
+,annotation={
Describes and compares a number of modifications to the Linux RCU
implementation that make it friendly to realtime applications.
-"
+ https://www.usenix.org/conference/2004-usenix-annual-technical-conference/making-rcu-safe-deep-sub-millisecond-response
+ [Viewed July 26, 2012]
+}
}
@phdthesis{PaulEdwardMcKenneyPhD
@@ -964,14 +990,13 @@ in Operating System Kernels"
,school="OGI School of Science and Engineering at
Oregon Health and Sciences University"
,year="2004"
-,note="Available:
-\url{http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf}
-[Viewed October 15, 2004]"
-,annotation="
+,annotation={
Describes RCU implementations and presents design patterns
corresponding to common uses of RCU in several operating-system
kernels.
-"
+ http://www.rdrop.com/users/paulmck/RCU/RCUdissertation.2004.07.14e1.pdf
+ [Viewed October 15, 2004]
+}
}
@unpublished{PaulEMcKenney2004rcu:dereference
@@ -982,9 +1007,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://lkml.org/lkml/2004/8/6/237}
[Viewed June 8, 2010]"
-,annotation="
+,annotation={
Introduce rcu_dereference().
-"
+}
}
@unpublished{JimHouston04a
@@ -995,11 +1020,11 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://lkml.org/lkml/2004/8/30/87}
[Viewed February 17, 2005]"
-,annotation="
+,annotation={
Uses active code in rcu_read_lock() and rcu_read_unlock() to
make RCU happen, allowing RCU to function on CPUs that do not
receive a scheduling-clock interrupt.
-"
+}
}
@unpublished{TomHart04a
@@ -1010,9 +1035,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://www.cs.toronto.edu/~tomhart/masters_thesis.html}
[Viewed October 15, 2004]"
-,annotation="
+,annotation={
Proposes comparing RCU to lock-free methods for the Linux kernel.
-"
+}
}
@unpublished{Vaddagiri04a
@@ -1023,9 +1048,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://marc.theaimsgroup.com/?t=109395731700004&r=1&w=2}
[Viewed October 18, 2004]"
-,annotation="
+,annotation={
Srivatsa's RCU patch for tcp_ehash lookup.
-"
+}
}
@unpublished{Thirumalai04a
@@ -1036,9 +1061,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://marc.theaimsgroup.com/?t=109144217400003&r=1&w=2}
[Viewed October 18, 2004]"
-,annotation="
+,annotation={
Ravikiran's lockfree FD patch.
-"
+}
}
@unpublished{Thirumalai04b
@@ -1049,9 +1074,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=109152521410459&w=2}
[Viewed October 18, 2004]"
-,annotation="
+,annotation={
Ravikiran's lockfree FD patch.
-"
+}
}
@unpublished{PaulEMcKenney2004rcu:assign:pointer
@@ -1062,9 +1087,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://lkml.org/lkml/2004/10/23/241}
[Viewed June 8, 2010]"
-,annotation="
+,annotation={
Introduce rcu_assign_pointer().
-"
+}
}
@unpublished{JamesMorris04a
@@ -1073,12 +1098,12 @@ Oregon Health and Sciences University"
,day="15"
,month="November"
,year="2004"
-,note="Available:
-\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=110054979416004&w=2}
-[Viewed December 10, 2004]"
-,annotation="
+,note="\url{http://marc.theaimsgroup.com/?l=linux-kernel&m=110054979416004&w=2}"
+,annotation={
James Morris posts Kaigai Kohei's patch to LKML.
-"
+ [Viewed December 10, 2004]
+ Kaigai's patch is at https://lkml.org/lkml/2004/9/27/52
+}
}
@unpublished{JamesMorris04b
@@ -1089,9 +1114,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://www.livejournal.com/users/james_morris/2153.html}
[Viewed December 10, 2004]"
-,annotation="
+,annotation={
RCU helps SELinux performance. ;-) Made LWN.
-"
+}
}
@unpublished{PaulMcKenney2005RCUSemantics
@@ -1103,9 +1128,9 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/rcu-semantics.2005.01.30a.pdf}
[Viewed December 6, 2009]"
-,annotation="
+,annotation={
Early derivation of RCU semantics.
-"
+}
}
@unpublished{PaulMcKenney2005e
@@ -1117,10 +1142,10 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://lkml.org/lkml/2005/3/17/199}
[Viewed September 5, 2005]"
-,annotation="
+,annotation={
First posting showing how RCU can be safely adapted for
preemptable RCU read side critical sections.
-"
+}
}
@unpublished{EsbenNeilsen2005a
@@ -1132,12 +1157,12 @@ Oregon Health and Sciences University"
,note="Available:
\url{http://lkml.org/lkml/2005/3/18/122}
[Viewed March 30, 2006]"
-,annotation="
+,annotation={
Esben Neilsen suggests read-side suppression of grace-period
processing for crude-but-workable realtime RCU. The downside
- is indefinite grace periods...But this is OK for experimentation
+ is indefinite grace periods... But this is OK for experimentation
and testing.
-"
+}
}
@unpublished{TomHart05a
@@ -1149,10 +1174,10 @@ Data Structures"
,note="Available:
\url{ftp://ftp.cs.toronto.edu/csrg-technical-reports/515/}
[Viewed March 4, 2005]"
-,annotation="
+,annotation={
Comparison of RCU, QBSR, and EBSR. RCU wins for read-mostly
workloads. ;-)
-"
+}
}
@unpublished{JonCorbet2005DeprecateSyncKernel
@@ -1164,10 +1189,10 @@ Data Structures"
,note="Available:
\url{http://lwn.net/Articles/134484/}
[Viewed May 3, 2005]"
-,annotation="
+,annotation={
Jon Corbet describes deprecation of synchronize_kernel()
in favor of synchronize_rcu() and synchronize_sched().
-"
+}
}
@unpublished{PaulMcKenney05a
@@ -1178,10 +1203,10 @@ Data Structures"
,note="Available:
\url{http://lkml.org/lkml/2005/5/9/185}
[Viewed May 13, 2005]"
-,annotation="
+,annotation={
First publication of working lock-based deferred free patches
for the CONFIG_PREEMPT_RT environment.
-"
+}
}
@conference{PaulMcKenney05b
@@ -1194,10 +1219,10 @@ Data Structures"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/realtimeRCU.2005.04.23a.pdf}
[Viewed May 13, 2005]"
-,annotation="
+,annotation={
Realtime turns into making RCU yet more realtime friendly.
http://lca2005.linux.org.au/Papers/Paul%20McKenney/Towards%20Hard%20Realtime%20Response%20from%20the%20Linux%20Kernel/LKS.2005.04.22a.pdf
-"
+}
}
@unpublished{PaulEMcKenneyHomePage
@@ -1208,9 +1233,9 @@ Data Structures"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/}
[Viewed May 25, 2005]"
-,annotation="
+,annotation={
Paul McKenney's home page.
-"
+}
}
@unpublished{PaulEMcKenneyRCUPage
@@ -1221,9 +1246,9 @@ Data Structures"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU}
[Viewed May 25, 2005]"
-,annotation="
+,annotation={
Paul McKenney's RCU page.
-"
+}
}
@unpublished{JosephSeigh2005a
@@ -1232,10 +1257,10 @@ Data Structures"
,month="July"
,year="2005"
,note="Personal communication"
-,annotation="
+,annotation={
Joe Seigh announcing his atomic-ptr-plus project.
http://sourceforge.net/projects/atomic-ptr-plus/
-"
+}
}
@unpublished{JosephSeigh2005b
@@ -1247,9 +1272,9 @@ Data Structures"
,note="Available:
\url{http://sourceforge.net/projects/atomic-ptr-plus/}
[Viewed August 8, 2005]"
-,annotation="
+,annotation={
Joe Seigh's atomic-ptr-plus project.
-"
+}
}
@unpublished{PaulMcKenney2005c
@@ -1261,9 +1286,9 @@ Data Structures"
,note="Available:
\url{http://lkml.org/lkml/2005/8/1/155}
[Viewed March 14, 2006]"
-,annotation="
+,annotation={
First operating counter-based realtime RCU patch posted to LKML.
-"
+}
}
@unpublished{PaulMcKenney2005d
@@ -1275,11 +1300,11 @@ Data Structures"
,note="Available:
\url{http://lkml.org/lkml/2005/8/8/108}
[Viewed March 14, 2006]"
-,annotation="
+,annotation={
First operating counter-based realtime RCU patch posted to LKML,
but fixed so that various unusual combinations of configuration
parameters all function properly.
-"
+}
}
@unpublished{PaulMcKenney2005rcutorture
@@ -1291,9 +1316,25 @@ Data Structures"
,note="Available:
\url{http://lkml.org/lkml/2005/10/1/70}
[Viewed March 14, 2006]"
-,annotation="
+,annotation={
First rcutorture patch.
-"
+}
+}
+
+@unpublished{DavidSMiller2006HashedLocking
+,Author="David S. Miller"
+,Title="Re: [{PATCH}, {RFC}] {RCU} : {OOM} avoidance and lower latency"
+,month="January"
+,day="6"
+,year="2006"
+,note="Available:
+\url{https://lkml.org/lkml/2006/1/7/22}
+[Viewed February 29, 2012]"
+,annotation={
+ David Miller's view on hashed arrays of locks: used to really
+ like it, but time he saw an opportunity for this technique,
+ something else always proved superior. Partitioning or RCU. ;-)
+}
}
@conference{ThomasEHart2006a
@@ -1309,10 +1350,10 @@ Distributed Processing Symposium"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/hart_ipdps06.pdf}
[Viewed April 28, 2008]"
-,annotation="
+,annotation={
Compares QSBR, HPBR, EBR, and lock-free reference counting.
http://www.cs.toronto.edu/~tomhart/perflab/ipdps06.tgz
-"
+}
}
@unpublished{NickPiggin2006radixtree
@@ -1324,9 +1365,9 @@ Distributed Processing Symposium"
,note="Available:
\url{http://lkml.org/lkml/2006/6/20/238}
[Viewed March 25, 2008]"
-,annotation="
+,annotation={
RCU-protected radix tree.
-"
+}
}
@Conference{PaulEMcKenney2006b
@@ -1341,9 +1382,9 @@ Suparna Bhattacharya"
\url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184}
\url{http://www.rdrop.com/users/paulmck/RCU/OLSrtRCU.2006.08.11a.pdf}
[Viewed January 1, 2007]"
-,annotation="
+,annotation={
Described how to improve the -rt implementation of realtime RCU.
-"
+}
}
@unpublished{WikipediaRCU
@@ -1354,12 +1395,11 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen"
,month="July"
,day="8"
,year="2006"
-,note="Available:
-\url{http://en.wikipedia.org/wiki/Read-copy-update}
-[Viewed August 21, 2006]"
-,annotation="
+,note="\url{http://en.wikipedia.org/wiki/Read-copy-update}"
+,annotation={
Wikipedia RCU page as of July 8 2006.
-"
+ [Viewed August 21, 2006]
+}
}
@Conference{NickPiggin2006LocklessPageCache
@@ -1372,9 +1412,9 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen"
,note="Available:
\url{http://www.linuxsymposium.org/2006/view_abstract.php?content_key=184}
[Viewed January 11, 2009]"
-,annotation="
+,annotation={
Uses RCU-protected radix tree for a lockless page cache.
-"
+}
}
@unpublished{PaulEMcKenney2006c
@@ -1388,9 +1428,9 @@ Canis Rufus and Zoicon5 and Anome and Hal Eisen"
Revised:
\url{http://www.rdrop.com/users/paulmck/RCU/srcu.2007.01.14a.pdf}
[Viewed August 21, 2006]"
-,annotation="
+,annotation={
LWN article introducing SRCU.
-"
+}
}
@unpublished{RobertOlsson2006a
@@ -1399,12 +1439,11 @@ Revised:
,month="August"
,day="18"
,year="2006"
-,note="Available:
-\url{http://www.nada.kth.se/~snilsson/publications/TRASH/trash.pdf}
-[Viewed March 4, 2011]"
-,annotation="
+,note="\url{http://www.nada.kth.se/~snilsson/publications/TRASH/trash.pdf}"
+,annotation={
RCU-protected dynamic trie-hash combination.
-"
+ [Viewed March 4, 2011]
+}
}
@unpublished{ChristophHellwig2006RCU2SRCU
@@ -1426,10 +1465,10 @@ Revised:
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/linuxusage.html}
[Viewed January 14, 2007]"
-,annotation="
+,annotation={
Paul McKenney's RCU page showing graphs plotting Linux-kernel
usage of RCU.
-"
+}
}
@unpublished{PaulEMcKenneyRCUusageRawDataPage
@@ -1440,10 +1479,10 @@ Revised:
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/linuxusage/rculocktab.html}
[Viewed January 14, 2007]"
-,annotation="
+,annotation={
Paul McKenney's RCU page showing Linux usage of RCU in tabular
form, with links to corresponding cscope databases.
-"
+}
}
@unpublished{GauthamShenoy2006RCUrwlock
@@ -1455,13 +1494,13 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2006/10/26/73}
[Viewed January 26, 2009]"
-,annotation="
+,annotation={
RCU-based reader-writer lock that allows readers to proceed with
no memory barriers or atomic instruction in absence of writers.
If writer do show up, readers must of course wait as required by
the semantics of reader-writer locking. This is a recursive
lock.
-"
+}
}
@unpublished{JensAxboe2006SlowSRCU
@@ -1474,11 +1513,11 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2006/11/17/56}
[Viewed May 28, 2007]"
-,annotation="
+,annotation={
SRCU's grace periods are too slow for Jens, even after a
factor-of-three speedup.
Sped-up version of SRCU at http://lkml.org/lkml/2006/11/17/359.
-"
+}
}
@unpublished{OlegNesterov2006QRCU
@@ -1491,10 +1530,10 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2006/11/19/69}
[Viewed May 28, 2007]"
-,annotation="
+,annotation={
First cut of QRCU. Expanded/corrected versions followed.
Used to be OlegNesterov2007QRCU, now time-corrected.
-"
+}
}
@unpublished{OlegNesterov2006aQRCU
@@ -1506,10 +1545,10 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2006/11/29/330}
[Viewed November 26, 2008]"
-,annotation="
+,annotation={
Expanded/corrected version of QRCU.
Used to be OlegNesterov2007aQRCU, now time-corrected.
-"
+}
}
@unpublished{EvgeniyPolyakov2006RCUslowdown
@@ -1521,10 +1560,10 @@ Revised:
,note="Available:
\url{http://www.ioremap.net/node/41}
[Viewed October 28, 2008]"
-,annotation="
+,annotation={
Using RCU as a pure delay leads to a 2.5x slowdown in skbs in
the Linux kernel.
-"
+}
}
@inproceedings{ChrisMatthews2006ClusteredObjectsRCU
@@ -1541,7 +1580,8 @@ Revised:
,annotation={
Uses K42's RCU-like functionality to manage clustered-object
lifetimes.
-}}
+}
+}
@article{DilmaDaSilva2006K42
,author = {Silva, Dilma Da and Krieger, Orran and Wisniewski, Robert W. and Waterland, Amos and Tam, David and Baumann, Andrew}
@@ -1557,7 +1597,8 @@ Revised:
,address = {New York, NY, USA}
,annotation={
Describes relationship of K42 generations to RCU.
-}}
+}
+}
# CoreyMinyard2007list_splice_rcu
@unpublished{CoreyMinyard2007list:splice:rcu
@@ -1569,9 +1610,9 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2007/1/3/112}
[Viewed May 28, 2007]"
-,annotation="
+,annotation={
Patch for list_splice_rcu().
-"
+}
}
@unpublished{PaulEMcKenney2007rcubarrier
@@ -1583,9 +1624,9 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/217484/}
[Viewed November 22, 2007]"
-,annotation="
+,annotation={
LWN article introducing the rcu_barrier() primitive.
-"
+}
}
@unpublished{PeterZijlstra2007SyncBarrier
@@ -1597,10 +1638,10 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2007/1/28/34}
[Viewed March 27, 2008]"
-,annotation="
+,annotation={
RCU-like implementation for frequent updaters and rare readers(!).
Subsumed into QRCU. Maybe...
-"
+}
}
@unpublished{PaulEMcKenney2007BoostRCU
@@ -1609,14 +1650,13 @@ Revised:
,month="February"
,day="5"
,year="2007"
-,note="Available:
-\url{http://lwn.net/Articles/220677/}
-Revised:
-\url{http://www.rdrop.com/users/paulmck/RCU/RCUbooststate.2007.04.16a.pdf}
-[Viewed September 7, 2007]"
-,annotation="
+,note="\url{http://lwn.net/Articles/220677/}"
+,annotation={
LWN article introducing RCU priority boosting.
-"
+ Revised:
+ http://www.rdrop.com/users/paulmck/RCU/RCUbooststate.2007.04.16a.pdf
+ [Viewed September 7, 2007]
+}
}
@unpublished{PaulMcKenney2007QRCUpatch
@@ -1628,9 +1668,9 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2007/2/25/18}
[Viewed March 27, 2008]"
-,annotation="
+,annotation={
Patch for QRCU supplying lock-free fast path.
-"
+}
}
@article{JonathanAppavoo2007K42RCU
@@ -1647,7 +1687,8 @@ Revised:
,address = {New York, NY, USA}
,annotation={
Role of RCU in K42.
-}}
+}
+}
@conference{RobertOlsson2007Trash
,Author="Robert Olsson and Stefan Nilsson"
@@ -1658,9 +1699,9 @@ Revised:
,note="Available:
\url{http://ieeexplore.ieee.org/xpl/freeabs_all.jsp?arnumber=4281239}
[Viewed October 1, 2010]"
-,annotation="
+,annotation={
RCU-protected dynamic trie-hash combination.
-"
+}
}
@conference{PeterZijlstra2007ConcurrentPagecacheRCU
@@ -1673,10 +1714,10 @@ Revised:
,note="Available:
\url{http://ols.108.redhat.com/2007/Reprints/zijlstra-Reprint.pdf}
[Viewed April 14, 2008]"
-,annotation="
+,annotation={
Page-cache modifications permitting RCU readers and concurrent
updates.
-"
+}
}
@unpublished{PaulEMcKenney2007whatisRCU
@@ -1701,11 +1742,11 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/243851/}
[Viewed September 8, 2007]"
-,annotation="
+,annotation={
LWN article describing Promela and spin, and also using Oleg
Nesterov's QRCU as an example (with Paul McKenney's fastpath).
Merged patch at: http://lkml.org/lkml/2007/2/25/18
-"
+}
}
@unpublished{PaulEMcKenney2007WG21DDOatomics
@@ -1714,12 +1755,12 @@ Revised:
,month="August"
,day="3"
,year="2007"
-,note="Preprint:
+,note="Available:
\url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2664.htm}
[Viewed December 7, 2009]"
-,annotation="
+,annotation={
RCU for C++, parts 1 and 2.
-"
+}
}
@unpublished{PaulEMcKenney2007WG21DDOannotation
@@ -1728,12 +1769,12 @@ Revised:
,month="September"
,day="18"
,year="2008"
-,note="Preprint:
+,note="Available:
\url{http://open-std.org/jtc1/sc22/wg21/docs/papers/2008/n2782.htm}
[Viewed December 7, 2009]"
-,annotation="
+,annotation={
RCU for C++, part 2, updated many times.
-"
+}
}
@unpublished{PaulEMcKenney2007PreemptibleRCUPatch
@@ -1745,10 +1786,10 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2007/9/10/213}
[Viewed October 25, 2007]"
-,annotation="
+,annotation={
Final patch for preemptable RCU to -rt. (Later patches were
to mainline, eventually incorporated.)
-"
+}
}
@unpublished{PaulEMcKenney2007PreemptibleRCU
@@ -1760,9 +1801,9 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/253651/}
[Viewed October 25, 2007]"
-,annotation="
+,annotation={
LWN article describing the design of preemptible RCU.
-"
+}
}
@article{ThomasEHart2007a
@@ -1783,6 +1824,7 @@ Revised:
}
}
+# MathieuDesnoyers2007call_rcu_schedNeeded
@unpublished{MathieuDesnoyers2007call:rcu:schedNeeded
,Author="Mathieu Desnoyers"
,Title="Re: [patch 1/2] {Linux} Kernel Markers - Support Multiple Probes"
@@ -1792,9 +1834,9 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2007/12/20/244}
[Viewed March 27, 2008]"
-,annotation="
+,annotation={
Request for call_rcu_sched() and rcu_barrier_sched().
-"
+}
}
@@ -1815,11 +1857,11 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/262464/}
[Viewed December 27, 2007]"
-,annotation="
+,annotation={
Lays out the three basic components of RCU: (1) publish-subscribe,
(2) wait for pre-existing readers to complete, and (2) maintain
multiple versions.
-"
+}
}
@unpublished{PaulEMcKenney2008WhatIsRCUUsage
@@ -1831,7 +1873,7 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/263130/}
[Viewed January 4, 2008]"
-,annotation="
+,annotation={
Lays out six uses of RCU:
1. RCU is a Reader-Writer Lock Replacement
2. RCU is a Restricted Reference-Counting Mechanism
@@ -1839,7 +1881,7 @@ Revised:
4. RCU is a Poor Man's Garbage Collector
5. RCU is a Way of Providing Existence Guarantees
6. RCU is a Way of Waiting for Things to Finish
-"
+}
}
@unpublished{PaulEMcKenney2008WhatIsRCUAPI
@@ -1851,10 +1893,10 @@ Revised:
,note="Available:
\url{http://lwn.net/Articles/264090/}
[Viewed January 10, 2008]"
-,annotation="
+,annotation={
Gives an overview of the Linux-kernel RCU API and a brief annotated RCU
bibliography.
-"
+}
}
#
@@ -1872,10 +1914,10 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2008/1/29/208}
[Viewed March 27, 2008]"
-,annotation="
+,annotation={
Patch that prevents preemptible RCU from unnecessarily waking
up dynticks-idle CPUs.
-"
+}
}
@unpublished{PaulEMcKenney2008LKMLDependencyOrdering
@@ -1887,9 +1929,9 @@ Revised:
,note="Available:
\url{http://lkml.org/lkml/2008/2/2/255}
[Viewed October 18, 2008]"
-,annotation="
+,annotation={
Explanation of compilers violating dependency ordering.
-"
+}
}
@Conference{PaulEMcKenney2008Beijing
@@ -1916,24 +1958,26 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lwn.net/Articles/279077/}
[Viewed April 24, 2008]"
-,annotation="
+,annotation={
Describes use of Promela and Spin to validate (and fix!) the
dynticks/RCU interface.
-"
+}
}
@article{DinakarGuniguntala2008IBMSysJ
,author="D. Guniguntala and P. E. McKenney and J. Triplett and J. Walpole"
,title="The read-copy-update mechanism for supporting real-time applications on shared-memory multiprocessor systems with {Linux}"
,Year="2008"
-,Month="April-June"
+,Month="May"
,journal="IBM Systems Journal"
,volume="47"
,number="2"
,pages="221-236"
-,annotation="
+,annotation={
RCU, realtime RCU, sleepable RCU, performance.
-"
+ http://www.research.ibm.com/journal/sj/472/guniguntala.pdf
+ [Viewed April 24, 2008]
+}
}
@unpublished{LaiJiangshan2008NewClassicAlgorithm
@@ -1945,11 +1989,11 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2008/6/2/539}
[Viewed December 10, 2008]"
-,annotation="
+,annotation={
Updated RCU classic algorithm. Introduced multi-tailed list
for RCU callbacks and also pulling common code into
__call_rcu().
-"
+}
}
@article{PaulEMcKenney2008RCUOSR
@@ -1966,6 +2010,7 @@ lot of {Linux} into your technology!!!"
,address="New York, NY, USA"
,annotation={
Linux changed RCU to a far greater degree than RCU has changed Linux.
+ http://portal.acm.org/citation.cfm?doid=1400097.1400099
}
}
@@ -1978,10 +2023,10 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2008/8/21/336}
[Viewed December 8, 2008]"
-,annotation="
+,annotation={
State-based RCU. One key thing that this patch does is to
separate the dynticks handling of NMIs and IRQs.
-"
+}
}
@unpublished{ManfredSpraul2008dyntickIRQNMI
@@ -1993,12 +2038,13 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2008/9/6/86}
[Viewed December 8, 2008]"
-,annotation="
+,annotation={
Manfred notes a fix required to my attempt to separate irq
and NMI processing for hierarchical RCU's dynticks interface.
-"
+}
}
+# Was PaulEMcKenney2011cyclicRCU
@techreport{PaulEMcKenney2008cyclicRCU
,author="Paul E. McKenney"
,title="Efficient Support of Consistent Cyclic Search With Read-Copy Update"
@@ -2008,11 +2054,11 @@ lot of {Linux} into your technology!!!"
,number="US Patent 7,426,511"
,month="September"
,pages="23"
-,annotation="
+,annotation={
Maintains an additional level of indirection to allow
readers to confine themselves to the desired snapshot of the
data structure. Only permits one update at a time.
-"
+}
}
@unpublished{PaulEMcKenney2008HierarchicalRCU
@@ -2021,13 +2067,12 @@ lot of {Linux} into your technology!!!"
,month="November"
,day="3"
,year="2008"
-,note="Available:
-\url{http://lwn.net/Articles/305782/}
-[Viewed November 6, 2008]"
-,annotation="
+,note="\url{http://lwn.net/Articles/305782/}"
+,annotation={
RCU with combining-tree-based grace-period detection,
permitting it to handle thousands of CPUs.
-"
+ [Viewed November 6, 2008]
+}
}
@unpublished{PaulEMcKenney2009BloatwatchRCU
@@ -2039,10 +2084,10 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2009/1/14/449}
[Viewed January 15, 2009]"
-,annotation="
+,annotation={
Small-footprint implementation of RCU for uniprocessor
embedded applications -- and also for exposition purposes.
-"
+}
}
@conference{PaulEMcKenney2009MaliciousURCU
@@ -2055,9 +2100,9 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/urcutorture.2009.01.22a.pdf}
[Viewed February 2, 2009]"
-,annotation="
+,annotation={
Realtime RCU and torture-testing RCU uses.
-"
+}
}
@unpublished{MathieuDesnoyers2009URCU
@@ -2066,16 +2111,14 @@ lot of {Linux} into your technology!!!"
,month="February"
,day="5"
,year="2009"
-,note="Available:
-\url{http://lkml.org/lkml/2009/2/5/572}
-\url{http://lttng.org/urcu}
-[Viewed February 20, 2009]"
-,annotation="
+,note="\url{http://lttng.org/urcu}"
+,annotation={
Mathieu Desnoyers's user-space RCU implementation.
git://lttng.org/userspace-rcu.git
http://lttng.org/cgi-bin/gitweb.cgi?p=userspace-rcu.git
http://lttng.org/urcu
-"
+ http://lkml.org/lkml/2009/2/5/572
+}
}
@unpublished{PaulEMcKenney2009LWNBloatWatchRCU
@@ -2087,9 +2130,24 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lwn.net/Articles/323929/}
[Viewed March 20, 2009]"
-,annotation="
+,annotation={
Uniprocessor assumptions allow simplified RCU implementation.
-"
+}
+}
+
+@unpublished{EvgeniyPolyakov2009EllipticsNetwork
+,Author="Evgeniy Polyakov"
+,Title="The Elliptics Network"
+,month="April"
+,day="17"
+,year="2009"
+,note="Available:
+\url{http://www.ioremap.net/projects/elliptics}
+[Viewed April 30, 2009]"
+,annotation={
+ Distributed hash table with transactions, using elliptic
+ hash functions to distribute data.
+}
}
@unpublished{PaulEMcKenney2009expeditedRCU
@@ -2101,9 +2159,9 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2009/6/25/306}
[Viewed August 16, 2009]"
-,annotation="
+,annotation={
First posting of expedited RCU to be accepted into -tip.
-"
+}
}
@unpublished{PaulEMcKenney2009fastRTRCU
@@ -2115,21 +2173,21 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2009/7/23/294}
[Viewed August 15, 2009]"
-,annotation="
+,annotation={
First posting of simple and fast preemptable RCU.
-"
+}
}
-@InProceedings{JoshTriplett2009RPHash
+@unpublished{JoshTriplett2009RPHash
,Author="Josh Triplett"
,Title="Scalable concurrent hash tables via relativistic programming"
,month="September"
,year="2009"
-,booktitle="Linux Plumbers Conference 2009"
-,annotation="
+,note="Linux Plumbers Conference presentation"
+,annotation={
RP fun with hash tables.
- See also JoshTriplett2010RPHash
-"
+ Superseded by JoshTriplett2010RPHash
+}
}
@phdthesis{MathieuDesnoyersPhD
@@ -2154,9 +2212,9 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://wiki.cs.pdx.edu/rp/}
[Viewed December 9, 2009]"
-,annotation="
+,annotation={
Main Relativistic Programming Wiki.
-"
+}
}
@conference{PaulEMcKenney2009DeterministicRCU
@@ -2180,9 +2238,9 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://paulmck.livejournal.com/14639.html}
[Viewed June 4, 2010]"
-,annotation="
+,annotation={
Day-one bug in Tree RCU that took forever to track down.
-"
+}
}
@unpublished{MathieuDesnoyers2009defer:rcu
@@ -2193,10 +2251,10 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://lkml.org/lkml/2009/10/18/129}
[Viewed December 29, 2009]"
-,annotation="
+,annotation={
Mathieu proposed defer_rcu() with fixed-size per-thread pool
of RCU callbacks.
-"
+}
}
@unpublished{MathieuDesnoyers2009VerifPrePub
@@ -2205,10 +2263,10 @@ lot of {Linux} into your technology!!!"
,month="December"
,year="2009"
,note="Submitted to IEEE TPDS"
-,annotation="
+,annotation={
OOMem model for Mathieu's user-level RCU mechanical proof of
correctness.
-"
+}
}
@unpublished{MathieuDesnoyers2009URCUPrePub
@@ -2216,15 +2274,15 @@ lot of {Linux} into your technology!!!"
,Title="User-Level Implementations of Read-Copy Update"
,month="December"
,year="2010"
-,url=\url{http://www.computer.org/csdl/trans/td/2012/02/ttd2012020375-abs.html}
-,annotation="
+,url={\url{http://www.computer.org/csdl/trans/td/2012/02/ttd2012020375-abs.html}}
+,annotation={
RCU overview, desiderata, semi-formal semantics, user-level RCU
usage scenarios, three classes of RCU implementation, wait-free
RCU updates, RCU grace-period batching, update overhead,
http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf
http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
Superseded by MathieuDesnoyers2012URCU.
-"
+}
}
@inproceedings{HariKannan2009DynamicAnalysisRCU
@@ -2240,7 +2298,8 @@ lot of {Linux} into your technology!!!"
,address = {New York, NY, USA}
,annotation={
Uses RCU to protect metadata used in dynamic analysis.
-}}
+}
+}
@conference{PaulEMcKenney2010SimpleOptRCU
,Author="Paul E. McKenney"
@@ -2252,10 +2311,10 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://www.rdrop.com/users/paulmck/RCU/SimplicityThruOptimization.2010.01.21f.pdf}
[Viewed October 10, 2010]"
-,annotation="
+,annotation={
TREE_PREEMPT_RCU optimizations greatly simplified the old
PREEMPT_RCU implementation.
-"
+}
}
@unpublished{PaulEMcKenney2010LockdepRCU
@@ -2264,12 +2323,11 @@ lot of {Linux} into your technology!!!"
,month="February"
,year="2010"
,day="1"
-,note="Available:
-\url{https://lwn.net/Articles/371986/}
-[Viewed June 4, 2010]"
-,annotation="
+,note="\url{https://lwn.net/Articles/371986/}"
+,annotation={
CONFIG_PROVE_RCU, or at least an early version.
-"
+ [Viewed June 4, 2010]
+}
}
@unpublished{AviKivity2010KVM2RCU
@@ -2280,10 +2338,10 @@ lot of {Linux} into your technology!!!"
,note="Available:
\url{http://www.mail-archive.com/kvm@vger.kernel.org/msg28640.html}
[Viewed March 20, 2010]"
-,annotation="
+,annotation={
Use of RCU permits KVM to increase the size of guest OSes from
16 CPUs to 64 CPUs.
-"
+}
}
@unpublished{HerbertXu2010RCUResizeHash
@@ -2297,7 +2355,19 @@ lot of {Linux} into your technology!!!"
,annotation={
Use a pair of list_head structures to support RCU-protected
resizable hash tables.
-}}
+}
+}
+
+@mastersthesis{AbhinavDuggal2010Masters
+,author="Abhinav Duggal"
+,title="Stopping Data Races Using Redflag"
+,school="Stony Brook University"
+,year="2010"
+,annotation={
+ Data-race detector incorporating RCU.
+ http://www.filesystems.org/docs/abhinav-thesis/abhinav_thesis.pdf
+}
+}
@article{JoshTriplett2010RPHash
,author="Josh Triplett and Paul E. McKenney and Jonathan Walpole"
@@ -2310,7 +2380,8 @@ lot of {Linux} into your technology!!!"
,annotation={
RP fun with hash tables.
http://portal.acm.org/citation.cfm?id=1842733.1842750
-}}
+}
+}
@unpublished{PaulEMcKenney2010RCUAPI
,Author="Paul E. McKenney"
@@ -2318,12 +2389,11 @@ lot of {Linux} into your technology!!!"
,month="December"
,day="8"
,year="2010"
-,note="Available:
-\url{http://lwn.net/Articles/418853/}
-[Viewed December 8, 2010]"
-,annotation="
+,note="\url{http://lwn.net/Articles/418853/}"
+,annotation={
Includes updated software-engineering features.
-"
+ [Viewed December 8, 2010]
+}
}
@mastersthesis{AndrejPodzimek2010masters
@@ -2338,7 +2408,8 @@ lot of {Linux} into your technology!!!"
Reviews RCU implementations and creates a few for OpenSolaris.
Drives quiescent-state detection from RCU read-side primitives,
in a manner roughly similar to that of Jim Houston.
-}}
+}
+}
@unpublished{LinusTorvalds2011Linux2:6:38:rc1:NPigginVFS
,Author="Linus Torvalds"
@@ -2358,7 +2429,8 @@ lot of {Linux} into your technology!!!"
of the most expensive parts of path component lookup, which was the
d_lock on every component lookup. So I'm seeing improvements of 30-50%
on some seriously pathname-lookup intensive loads."
-}}
+}
+}
@techreport{JoshTriplett2011RPScalableCorrectOrdering
,author = {Josh Triplett and Philip W. Howard and Paul E. McKenney and Jonathan Walpole}
@@ -2392,12 +2464,12 @@ lot of {Linux} into your technology!!!"
,number="US Patent 7,953,778"
,month="May"
,pages="34"
-,annotation="
+,annotation={
Maintains an array of generation numbers to track in-flight
updates and keeps an additional level of indirection to allow
readers to confine themselves to the desired snapshot of the
data structure.
-"
+}
}
@inproceedings{Triplett:2011:RPHash
@@ -2408,7 +2480,7 @@ lot of {Linux} into your technology!!!"
,year = {2011}
,pages = {145--158}
,numpages = {14}
-,url={http://www.usenix.org/event/atc11/tech/final_files/atc11_proceedings.pdf}
+,url={http://www.usenix.org/event/atc11/tech/final_files/Triplett.pdf}
,publisher = {The USENIX Association}
,address = {Portland, OR USA}
}
@@ -2419,27 +2491,58 @@ lot of {Linux} into your technology!!!"
,month="July"
,day="27"
,year="2011"
-,note="Available:
-\url{http://lwn.net/Articles/453002/}
-[Viewed July 27, 2011]"
-,annotation="
+,note="\url{http://lwn.net/Articles/453002/}"
+,annotation={
Analysis of the RCU trainwreck in Linux kernel 3.0.
-"
+ [Viewed July 27, 2011]
+}
}
@unpublished{NeilBrown2011MeetTheLockers
,Author="Neil Brown"
-,Title="Meet the Lockers"
+,Title="Meet the {Lockers}"
,month="August"
,day="3"
,year="2011"
,note="Available:
\url{http://lwn.net/Articles/453685/}
[Viewed September 2, 2011]"
-,annotation="
+,annotation={
The Locker family as an analogy for locking, reference counting,
RCU, and seqlock.
-"
+}
+}
+
+@inproceedings{Seyster:2011:RFA:2075416.2075425
+,author = {Seyster, Justin and Radhakrishnan, Prabakar and Katoch, Samriti and Duggal, Abhinav and Stoller, Scott D. and Zadok, Erez}
+,title = {Redflag: a framework for analysis of Kernel-level concurrency}
+,booktitle = {Proceedings of the 11th international conference on Algorithms and architectures for parallel processing - Volume Part I}
+,series = {ICA3PP'11}
+,year = {2011}
+,isbn = {978-3-642-24649-4}
+,location = {Melbourne, Australia}
+,pages = {66--79}
+,numpages = {14}
+,url = {http://dl.acm.org/citation.cfm?id=2075416.2075425}
+,acmid = {2075425}
+,publisher = {Springer-Verlag}
+,address = {Berlin, Heidelberg}
+}
+
+@phdthesis{JoshTriplettPhD
+,author="Josh Triplett"
+,title="Relativistic Causal Ordering: A Memory Model for Scalable Concurrent Data Structures"
+,school="Portland State University"
+,year="2012"
+,annotation={
+ RCU-protected hash tables, barriers vs. read-side traversal order.
+ .
+ If the updater is making changes in the opposite direction from
+ the read-side traveral order, the updater need only execute a
+ memory-barrier instruction, but if in the same direction, the
+ updater needs to wait for a grace period between the individual
+ updates.
+}
}
@article{MathieuDesnoyers2012URCU
@@ -2459,5 +2562,150 @@ lot of {Linux} into your technology!!!"
RCU updates, RCU grace-period batching, update overhead,
http://www.rdrop.com/users/paulmck/RCU/urcu-main-accepted.2011.08.30a.pdf
http://www.rdrop.com/users/paulmck/RCU/urcu-supp-accepted.2011.08.30a.pdf
+ http://www.computer.org/cms/Computer.org/dl/trans/td/2012/02/extras/ttd2012020375s.pdf
+}
+}
+
+@inproceedings{AustinClements2012RCULinux:mmapsem
+,author = {Austin Clements and Frans Kaashoek and Nickolai Zeldovich}
+,title = {Scalable Address Spaces Using {RCU} Balanced Trees}
+,booktitle = {Architectural Support for Programming Languages and Operating Systems (ASPLOS 2012)}
+,month = {March}
+,year = {2012}
+,pages = {199--210}
+,numpages = {12}
+,publisher = {ACM}
+,address = {London, UK}
+,url="http://people.csail.mit.edu/nickolai/papers/clements-bonsai.pdf"
+}
+
+@unpublished{PaulEMcKenney2012ELCbattery
+,Author="Paul E. McKenney"
+,Title="Making {RCU} Safe For Battery-Powered Devices"
+,month="February"
+,day="15"
+,year="2012"
+,note="Available:
+\url{http://www.rdrop.com/users/paulmck/RCU/RCUdynticks.2012.02.15b.pdf}
+[Viewed March 1, 2012]"
+,annotation={
+ RCU_FAST_NO_HZ, round 2.
+}
+}
+
+@article{GuillermoVigueras2012RCUCrowd
+,author = {Vigueras, Guillermo and Ordu\~{n}a, Juan M. and Lozano, Miguel}
+,day = {25}
+,doi = {10.1007/s11227-012-0766-x}
+,issn = {0920-8542}
+,journal = {The Journal of Supercomputing}
+,keywords = {linux, simulation}
+,month = apr
+,posted-at = {2012-05-03 09:12:04}
+,priority = {2}
+,title = {{A Read-Copy Update based parallel server for distributed crowd simulations}}
+,url = {http://dx.doi.org/10.1007/s11227-012-0766-x}
+,year = {2012}
+}
+
+
+@unpublished{JonCorbet2012ACCESS:ONCE
+,Author="Jon Corbet"
+,Title="{ACCESS\_ONCE()}"
+,month="August"
+,day="1"
+,year="2012"
+,note="\url{http://lwn.net/Articles/508991/}"
+,annotation={
+ A couple of simple specific compiler optimizations that motivate
+ ACCESS_ONCE().
+}
+}
+
+@unpublished{AlexeyGotsman2012VerifyGraceExtended
+,Author="Alexey Gotsman and Noam Rinetzky and Hongseok Yang"
+,Title="Verifying Highly Concurrent Algorithms with Grace (extended version)"
+,month="July"
+,day="10"
+,year="2012"
+,note="\url{http://software.imdea.org/~gotsman/papers/recycling-esop13-ext.pdf}"
+,annotation={
+ Separation-logic formulation of RCU uses.
+}
+}
+
+@unpublished{PaulMcKenney2012RCUUsage
+,Author="Paul E. McKenney and Silas Boyd-Wickizer and Jonathan Walpole"
+,Title="{RCU} Usage In the Linux Kernel: One Decade Later"
+,month="September"
+,day="17"
+,year="2012"
+,url=http://rdrop.com/users/paulmck/techreports/survey.2012.09.17a.pdf
+,note="Technical report paulmck.2012.09.17"
+,annotation={
+ Overview of the first variant of no-CBs CPUs for RCU.
+}
+}
+
+@unpublished{JonCorbet2012NOCB
+,Author="Jon Corbet"
+,Title="Relocating RCU callbacks"
+,month="October"
+,day="31"
+,year="2012"
+,note="\url{http://lwn.net/Articles/522262/}"
+,annotation={
+ Overview of the first variant of no-CBs CPUs for RCU.
+}
+}
+
+@phdthesis{JustinSeyster2012PhD
+,author="Justin Seyster"
+,title="Runtime Verification of Kernel-Level Concurrency Using Compiler-Based Instrumentation"
+,school="Stony Brook University"
+,year="2012"
+,annotation={
+ Looking for data races, including those involving RCU.
+ Proposal:
+ http://www.fsl.cs.sunysb.edu/docs/jseyster-proposal/redflag.pdf
+ Dissertation:
+ http://www.fsl.cs.sunysb.edu/docs/jseyster-dissertation/redflag.pdf
+}
+}
+
+@unpublished{PaulEMcKenney2013RCUUsage
+,Author="Paul E. McKenney and Silas Boyd-Wickizer and Jonathan Walpole"
+,Title="{RCU} Usage in the {Linux} Kernel: One Decade Later"
+,month="February"
+,day="24"
+,year="2013"
+,note="\url{http://rdrop.com/users/paulmck/techreports/RCUUsage.2013.02.24a.pdf}"
+,annotation={
+ Usage of RCU within the Linux kernel.
+}
+}
+
+@inproceedings{AlexeyGotsman2013ESOPRCU
+,author = {Alexey Gotsman and Noam Rinetzky and Hongseok Yang}
+,title = {Verifying concurrent memory reclamation algorithms with grace}
+,booktitle = {ESOP'13: European Symposium on Programming}
+,year = {2013}
+,pages = {249--269}
+,publisher = {Springer}
+,address = {Rome, Italy}
+,annotation={
+ http://software.imdea.org/~gotsman/papers/recycling-esop13.pdf
+}
+}
+
+@unpublished{PaulEMcKenney2013NoTinyPreempt
+,Author="Paul E. McKenney"
+,Title="Simplifying RCU"
+,month="March"
+,day="6"
+,year="2013"
+,note="\url{http://lwn.net/Articles/541037/}"
+,annotation={
+ Getting rid of TINY_PREEMPT_RCU.
}
}
diff --git a/Documentation/RCU/rcubarrier.txt b/Documentation/RCU/rcubarrier.txt
index 2e319d1b9ef..b10cfe711e6 100644
--- a/Documentation/RCU/rcubarrier.txt
+++ b/Documentation/RCU/rcubarrier.txt
@@ -70,10 +70,14 @@ in realtime kernels in order to avoid excessive scheduling latencies.
rcu_barrier()
-We instead need the rcu_barrier() primitive. This primitive is similar
-to synchronize_rcu(), but instead of waiting solely for a grace
-period to elapse, it also waits for all outstanding RCU callbacks to
-complete. Pseudo-code using rcu_barrier() is as follows:
+We instead need the rcu_barrier() primitive. Rather than waiting for
+a grace period to elapse, rcu_barrier() waits for all outstanding RCU
+callbacks to complete. Please note that rcu_barrier() does -not- imply
+synchronize_rcu(), in particular, if there are no RCU callbacks queued
+anywhere, rcu_barrier() is within its rights to return immediately,
+without waiting for a grace period to elapse.
+
+Pseudo-code using rcu_barrier() is as follows:
1. Prevent any new RCU callbacks from being posted.
2. Execute rcu_barrier().
diff --git a/Documentation/RCU/torture.txt b/Documentation/RCU/torture.txt
index d8a50238739..dac02a6219b 100644
--- a/Documentation/RCU/torture.txt
+++ b/Documentation/RCU/torture.txt
@@ -42,6 +42,16 @@ fqs_holdoff Holdoff time (in microseconds) between consecutive calls
fqs_stutter Wait time (in seconds) between consecutive bursts
of calls to force_quiescent_state().
+gp_normal Make the fake writers use normal synchronous grace-period
+ primitives.
+
+gp_exp Make the fake writers use expedited synchronous grace-period
+ primitives. If both gp_normal and gp_exp are set, or
+ if neither gp_normal nor gp_exp are set, then randomly
+ choose the primitive so that about 50% are normal and
+ 50% expedited. By default, neither are set, which
+ gives best overall test coverage.
+
irqreader Says to invoke RCU readers from irq level. This is currently
done via timers. Defaults to "1" for variants of RCU that
permit this. (Or, more accurately, variants of RCU that do
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index 64139a189a4..aca4e69121b 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -228,19 +228,9 @@ ACPI handle like:
I2C serial bus support
~~~~~~~~~~~~~~~~~~~~~~
The slaves behind I2C bus controller only need to add the ACPI IDs like
-with the platform and SPI drivers. However the I2C bus controller driver
-needs to call acpi_i2c_register_devices() after it has added the adapter.
-
-An I2C bus (controller) driver does:
-
- ...
- ret = i2c_add_numbered_adapter(adapter);
- if (ret)
- /* handle error */
-
- of_i2c_register_devices(adapter);
- /* Enumerate the slave devices behind this bus via ACPI */
- acpi_i2c_register_devices(adapter);
+with the platform and SPI drivers. The I2C core automatically enumerates
+any slave devices behind the controller device once the adapter is
+registered.
Below is an example of how to add ACPI support to the existing mpu3050
input driver:
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 0c1f475fdf3..371814a3671 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -18,7 +18,8 @@ following:
2. Initialise one serial port.
3. Detect the machine type.
4. Setup the kernel tagged list.
-5. Call the kernel image.
+5. Load initramfs.
+6. Call the kernel image.
1. Setup and initialise RAM
@@ -120,12 +121,27 @@ tagged list.
The boot loader must pass at a minimum the size and location of the
system memory, and the root filesystem location. The dtb must be
placed in a region of memory where the kernel decompressor will not
-overwrite it. The recommended placement is in the first 16KiB of RAM
-with the caveat that it may not be located at physical address 0 since
-the kernel interprets a value of 0 in r2 to mean neither a tagged list
-nor a dtb were passed.
+overwrite it, whilst remaining within the region which will be covered
+by the kernel's low-memory mapping.
-5. Calling the kernel image
+A safe location is just above the 128MiB boundary from start of RAM.
+
+5. Load initramfs.
+------------------
+
+Existing boot loaders: OPTIONAL
+New boot loaders: OPTIONAL
+
+If an initramfs is in use then, as with the dtb, it must be placed in
+a region of memory where the kernel decompressor will not overwrite it
+while also with the region which will be covered by the kernel's
+low-memory mapping.
+
+A safe location is just above the device tree blob which itself will
+be loaded just above the 128MiB boundary from the start of RAM as
+recommended above.
+
+6. Calling the kernel image
---------------------------
Existing boot loaders: MANDATORY
@@ -136,11 +152,17 @@ is stored in flash, and is linked correctly to be run from flash,
then it is legal for the boot loader to call the zImage in flash
directly.
-The zImage may also be placed in system RAM (at any location) and
-called there. Note that the kernel uses 16K of RAM below the image
-to store page tables. The recommended placement is 32KiB into RAM.
+The zImage may also be placed in system RAM and called there. The
+kernel should be placed in the first 128MiB of RAM. It is recommended
+that it is loaded above 32MiB in order to avoid the need to relocate
+prior to decompression, which will make the boot process slightly
+faster.
+
+When booting a raw (non-zImage) kernel the constraints are tighter.
+In this case the kernel must be loaded at an offset into system equal
+to TEXT_OFFSET - PAGE_OFFSET.
-In either case, the following conditions must be met:
+In any case, the following conditions must be met:
- Quiesce all DMA capable devices so that memory does not get
corrupted by bogus network packets or disk data. This will save
diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
new file mode 100644
index 00000000000..525452726d3
--- /dev/null
+++ b/Documentation/arm/kernel_mode_neon.txt
@@ -0,0 +1,121 @@
+Kernel mode NEON
+================
+
+TL;DR summary
+-------------
+* Use only NEON instructions, or VFP instructions that don't rely on support
+ code
+* Isolate your NEON code in a separate compilation unit, and compile it with
+ '-mfpu=neon -mfloat-abi=softfp'
+* Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
+ NEON code
+* Don't sleep in your NEON code, and be aware that it will be executed with
+ preemption disabled
+
+
+Introduction
+------------
+It is possible to use NEON instructions (and in some cases, VFP instructions) in
+code that runs in kernel mode. However, for performance reasons, the NEON/VFP
+register file is not preserved and restored at every context switch or taken
+exception like the normal register file is, so some manual intervention is
+required. Furthermore, special care is required for code that may sleep [i.e.,
+may call schedule()], as NEON or VFP instructions will be executed in a
+non-preemptible section for reasons outlined below.
+
+
+Lazy preserve and restore
+-------------------------
+The NEON/VFP register file is managed using lazy preserve (on UP systems) and
+lazy restore (on both SMP and UP systems). This means that the register file is
+kept 'live', and is only preserved and restored when multiple tasks are
+contending for the NEON/VFP unit (or, in the SMP case, when a task migrates to
+another core). Lazy restore is implemented by disabling the NEON/VFP unit after
+every context switch, resulting in a trap when subsequently a NEON/VFP
+instruction is issued, allowing the kernel to step in and perform the restore if
+necessary.
+
+Any use of the NEON/VFP unit in kernel mode should not interfere with this, so
+it is required to do an 'eager' preserve of the NEON/VFP register file, and
+enable the NEON/VFP unit explicitly so no exceptions are generated on first
+subsequent use. This is handled by the function kernel_neon_begin(), which
+should be called before any kernel mode NEON or VFP instructions are issued.
+Likewise, the NEON/VFP unit should be disabled again after use to make sure user
+mode will hit the lazy restore trap upon next use. This is handled by the
+function kernel_neon_end().
+
+
+Interruptions in kernel mode
+----------------------------
+For reasons of performance and simplicity, it was decided that there shall be no
+preserve/restore mechanism for the kernel mode NEON/VFP register contents. This
+implies that interruptions of a kernel mode NEON section can only be allowed if
+they are guaranteed not to touch the NEON/VFP registers. For this reason, the
+following rules and restrictions apply in the kernel:
+* NEON/VFP code is not allowed in interrupt context;
+* NEON/VFP code is not allowed to sleep;
+* NEON/VFP code is executed with preemption disabled.
+
+If latency is a concern, it is possible to put back to back calls to
+kernel_neon_end() and kernel_neon_begin() in places in your code where none of
+the NEON registers are live. (Additional calls to kernel_neon_begin() should be
+reasonably cheap if no context switch occurred in the meantime)
+
+
+VFP and support code
+--------------------
+Earlier versions of VFP (prior to version 3) rely on software support for things
+like IEEE-754 compliant underflow handling etc. When the VFP unit needs such
+software assistance, it signals the kernel by raising an undefined instruction
+exception. The kernel responds by inspecting the VFP control registers and the
+current instruction and arguments, and emulates the instruction in software.
+
+Such software assistance is currently not implemented for VFP instructions
+executed in kernel mode. If such a condition is encountered, the kernel will
+fail and generate an OOPS.
+
+
+Separating NEON code from ordinary code
+---------------------------------------
+The compiler is not aware of the special significance of kernel_neon_begin() and
+kernel_neon_end(), i.e., that it is only allowed to issue NEON/VFP instructions
+between calls to these respective functions. Furthermore, GCC may generate NEON
+instructions of its own at -O3 level if -mfpu=neon is selected, and even if the
+kernel is currently compiled at -O2, future changes may result in NEON/VFP
+instructions appearing in unexpected places if no special care is taken.
+
+Therefore, the recommended and only supported way of using NEON/VFP in the
+kernel is by adhering to the following rules:
+* isolate the NEON code in a separate compilation unit and compile it with
+ '-mfpu=neon -mfloat-abi=softfp';
+* issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
+ into the unit containing the NEON code from a compilation unit which is *not*
+ built with the GCC flag '-mfpu=neon' set.
+
+As the kernel is compiled with '-msoft-float', the above will guarantee that
+both NEON and VFP instructions will only ever appear in designated compilation
+units at any optimization level.
+
+
+NEON assembler
+--------------
+NEON assembler is supported with no additional caveats as long as the rules
+above are followed.
+
+
+NEON code generated by GCC
+--------------------------
+The GCC option -ftree-vectorize (implied by -O3) tries to exploit implicit
+parallelism, and generates NEON code from ordinary C source code. This is fully
+supported as long as the rules above are followed.
+
+
+NEON intrinsics
+---------------
+NEON intrinsics are also supported. However, as code using NEON intrinsics
+relies on the GCC header <arm_neon.h>, (which #includes <stdint.h>), you should
+observe the following in addition to the rules above:
+* Compile the unit containing the NEON intrinsics with '-ffreestanding' so GCC
+ uses its builtin version of <stdint.h> (this is a C99 header which the kernel
+ does not supply);
+* Include <arm_neon.h> last, or at least after <linux/types.h>
diff --git a/Documentation/cpu-freq/cpu-drivers.txt b/Documentation/cpu-freq/cpu-drivers.txt
index 19fa98e07bf..40282e61791 100644
--- a/Documentation/cpu-freq/cpu-drivers.txt
+++ b/Documentation/cpu-freq/cpu-drivers.txt
@@ -50,8 +50,6 @@ What shall this struct cpufreq_driver contain?
cpufreq_driver.name - The name of this driver.
-cpufreq_driver.owner - THIS_MODULE;
-
cpufreq_driver.init - A pointer to the per-CPU initialization
function.
diff --git a/Documentation/devicetree/bindings/arm/arch_timer.txt b/Documentation/devicetree/bindings/arm/arch_timer.txt
index 20746e5abe6..06fc7602593 100644
--- a/Documentation/devicetree/bindings/arm/arch_timer.txt
+++ b/Documentation/devicetree/bindings/arm/arch_timer.txt
@@ -1,10 +1,14 @@
* ARM architected timer
-ARM cores may have a per-core architected timer, which provides per-cpu timers.
+ARM cores may have a per-core architected timer, which provides per-cpu timers,
+or a memory mapped architected timer, which provides up to 8 frames with a
+physical and optional virtual timer per frame.
-The timer is attached to a GIC to deliver its per-processor interrupts.
+The per-core architected timer is attached to a GIC to deliver its
+per-processor interrupts via PPIs. The memory mapped timer is attached to a GIC
+to deliver its interrupts via SPIs.
-** Timer node properties:
+** CP15 Timer node properties:
- compatible : Should at least contain one of
"arm,armv7-timer"
@@ -26,3 +30,52 @@ Example:
<1 10 0xf08>;
clock-frequency = <100000000>;
};
+
+** Memory mapped timer node properties:
+
+- compatible : Should at least contain "arm,armv7-timer-mem".
+
+- clock-frequency : The frequency of the main counter, in Hz. Optional.
+
+- reg : The control frame base address.
+
+Note that #address-cells, #size-cells, and ranges shall be present to ensure
+the CPU can address a frame's registers.
+
+A timer node has up to 8 frame sub-nodes, each with the following properties:
+
+- frame-number: 0 to 7.
+
+- interrupts : Interrupt list for physical and virtual timers in that order.
+ The virtual timer interrupt is optional.
+
+- reg : The first and second view base addresses in that order. The second view
+ base address is optional.
+
+- status : "disabled" indicates the frame is not available for use. Optional.
+
+Example:
+
+ timer@f0000000 {
+ compatible = "arm,armv7-timer-mem";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+ reg = <0xf0000000 0x1000>;
+ clock-frequency = <50000000>;
+
+ frame@f0001000 {
+ frame-number = <0>
+ interrupts = <0 13 0x8>,
+ <0 14 0x8>;
+ reg = <0xf0001000 0x1000>,
+ <0xf0002000 0x1000>;
+ };
+
+ frame@f0003000 {
+ frame-number = <1>
+ interrupts = <0 15 0x8>;
+ reg = <0xf0003000 0x1000>;
+ status = "disabled";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/arm/atmel-adc.txt b/Documentation/devicetree/bindings/arm/atmel-adc.txt
index 16769d9cedd..723c205cb10 100644
--- a/Documentation/devicetree/bindings/arm/atmel-adc.txt
+++ b/Documentation/devicetree/bindings/arm/atmel-adc.txt
@@ -1,18 +1,15 @@
* AT91's Analog to Digital Converter (ADC)
Required properties:
- - compatible: Should be "atmel,at91sam9260-adc"
+ - compatible: Should be "atmel,<chip>-adc"
+ <chip> can be "at91sam9260", "at91sam9g45" or "at91sam9x5"
- reg: Should contain ADC registers location and length
- interrupts: Should contain the IRQ line for the ADC
- - atmel,adc-channel-base: Offset of the first channel data register
- atmel,adc-channels-used: Bitmask of the channels muxed and enable for this
device
- - atmel,adc-drdy-mask: Mask of the DRDY interruption in the ADC
- atmel,adc-num-channels: Number of channels available in the ADC
- atmel,adc-startup-time: Startup Time of the ADC in microseconds as
defined in the datasheet
- - atmel,adc-status-register: Offset of the Interrupt Status Register
- - atmel,adc-trigger-register: Offset of the Trigger Register
- atmel,adc-vref: Reference voltage in millivolts for the conversions
- atmel,adc-res: List of resolution in bits supported by the ADC. List size
must be two at least.
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index 69ddf9fad2d..c0c7626fd0f 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -16,9 +16,11 @@ Required properties:
performs the same operation).
"marvell,"aurora-outer-cache: Marvell Controller designed to be
compatible with the ARM one with outer cache mode.
- "bcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
+ "brcm,bcm11351-a2-pl310-cache": For Broadcom bcm11351 chipset where an
offset needs to be added to the address before passing down to the L2
cache controller
+ "bcm,bcm11351-a2-pl310-cache": DEPRECATED by
+ "brcm,bcm11351-a2-pl310-cache"
- cache-unified : Specifies the cache is a unified cache.
- cache-level : Should be set to 2 for a level 2 cache.
- reg : Physical base address and size of cache controller's memory mapped
diff --git a/Documentation/devicetree/bindings/ata/ahci-platform.txt b/Documentation/devicetree/bindings/ata/ahci-platform.txt
index 3ec0c5c4f0e..89de1564950 100644
--- a/Documentation/devicetree/bindings/ata/ahci-platform.txt
+++ b/Documentation/devicetree/bindings/ata/ahci-platform.txt
@@ -4,27 +4,17 @@ SATA nodes are defined to describe on-chip Serial ATA controllers.
Each SATA controller should have its own node.
Required properties:
-- compatible : compatible list, contains "calxeda,hb-ahci" or "snps,spear-ahci"
+- compatible : compatible list, contains "snps,spear-ahci"
- interrupts : <interrupt mapping for SATA IRQ>
- reg : <registers mapping>
Optional properties:
-- calxeda,port-phys: phandle-combophy and lane assignment, which maps each
- SATA port to a combophy and a lane within that
- combophy
-- calxeda,sgpio-gpio: phandle-gpio bank, bit offset, and default on or off,
- which indicates that the driver supports SGPIO
- indicator lights using the indicated GPIOs
-- calxeda,led-order : a u32 array that map port numbers to offsets within the
- SGPIO bitstream.
- dma-coherent : Present if dma operations are coherent
Example:
sata@ffe08000 {
- compatible = "calxeda,hb-ahci";
- reg = <0xffe08000 0x1000>;
- interrupts = <115>;
- calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
- &combophy0 2 &combophy0 3>;
+ compatible = "snps,spear-ahci";
+ reg = <0xffe08000 0x1000>;
+ interrupts = <115>;
};
diff --git a/Documentation/devicetree/bindings/ata/sata_highbank.txt b/Documentation/devicetree/bindings/ata/sata_highbank.txt
new file mode 100644
index 00000000000..aa83407cb7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/ata/sata_highbank.txt
@@ -0,0 +1,44 @@
+* Calxeda AHCI SATA Controller
+
+SATA nodes are defined to describe on-chip Serial ATA controllers.
+The Calxeda SATA controller mostly conforms to the AHCI interface
+with some special extensions to add functionality.
+Each SATA controller should have its own node.
+
+Required properties:
+- compatible : compatible list, contains "calxeda,hb-ahci"
+- interrupts : <interrupt mapping for SATA IRQ>
+- reg : <registers mapping>
+
+Optional properties:
+- dma-coherent : Present if dma operations are coherent
+- calxeda,port-phys : phandle-combophy and lane assignment, which maps each
+ SATA port to a combophy and a lane within that
+ combophy
+- calxeda,sgpio-gpio: phandle-gpio bank, bit offset, and default on or off,
+ which indicates that the driver supports SGPIO
+ indicator lights using the indicated GPIOs
+- calxeda,led-order : a u32 array that map port numbers to offsets within the
+ SGPIO bitstream.
+- calxeda,tx-atten : a u32 array that contains TX attenuation override
+ codes, one per port. The upper 3 bytes are always
+ 0 and thus ignored.
+- calxeda,pre-clocks : a u32 that indicates the number of additional clock
+ cycles to transmit before sending an SGPIO pattern
+- calxeda,post-clocks: a u32 that indicates the number of additional clock
+ cycles to transmit after sending an SGPIO pattern
+
+Example:
+ sata@ffe08000 {
+ compatible = "calxeda,hb-ahci";
+ reg = <0xffe08000 0x1000>;
+ interrupts = <115>;
+ dma-coherent;
+ calxeda,port-phys = <&combophy5 0 &combophy0 0 &combophy0 1
+ &combophy0 2 &combophy0 3>;
+ calxeda,sgpio-gpio =<&gpioh 5 1 &gpioh 6 1 &gpioh 7 1>;
+ calxeda,led-order = <4 0 1 2 3>;
+ calxeda,tx-atten = <0xff 22 0xff 0xff 23>;
+ calxeda,pre-clocks = <10>;
+ calxeda,post-clocks = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/imx27-clock.txt b/Documentation/devicetree/bindings/clock/imx27-clock.txt
index ab1a56e9de9..7a207039373 100644
--- a/Documentation/devicetree/bindings/clock/imx27-clock.txt
+++ b/Documentation/devicetree/bindings/clock/imx27-clock.txt
@@ -98,6 +98,7 @@ clocks and IDs.
fpm 83
mpll_osc_sel 84
mpll_sel 85
+ spll_gate 86
Examples:
diff --git a/Documentation/devicetree/bindings/extcon/extcon-twl.txt b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
index 58f531ab4df..7dab6a8f4a0 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-twl.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-palmas.txt
@@ -1,15 +1,15 @@
-EXTCON FOR TWL CHIPS
+EXTCON FOR PALMAS/TWL CHIPS
PALMAS USB COMPARATOR
Required Properties:
- compatible : Should be "ti,palmas-usb" or "ti,twl6035-usb"
- - vbus-supply : phandle to the regulator device tree node.
Optional Properties:
- ti,wakeup : To enable the wakeup comparator in probe
+ - ti,enable-id-detection: Perform ID detection.
+ - ti,enable-vbus-detection: Perform VBUS detection.
palmas-usb {
compatible = "ti,twl6035-usb", "ti,palmas-usb";
- vbus-supply = <&smps10_reg>;
ti,wakeup;
};
diff --git a/Documentation/devicetree/bindings/gpio/gpio.txt b/Documentation/devicetree/bindings/gpio/gpio.txt
index d933af37069..6cec6ff20d2 100644
--- a/Documentation/devicetree/bindings/gpio/gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/gpio.txt
@@ -75,23 +75,36 @@ Example of two SOC GPIO banks defined as gpio-controller nodes:
gpio-controller;
};
-2.1) gpio-controller and pinctrl subsystem
-------------------------------------------
+2.1) gpio- and pin-controller interaction
+-----------------------------------------
-gpio-controller on a SOC might be tightly coupled with the pinctrl
-subsystem, in the sense that the pins can be used by other functions
-together with optional gpio feature.
+Some or all of the GPIOs provided by a GPIO controller may be routed to pins
+on the package via a pin controller. This allows muxing those pins between
+GPIO and other functions.
-While the pin allocation is totally managed by the pin ctrl subsystem,
-gpio (under gpiolib) is still maintained by gpio drivers. It may happen
-that different pin ranges in a SoC is managed by different gpio drivers.
+It is useful to represent which GPIOs correspond to which pins on which pin
+controllers. The gpio-ranges property described below represents this, and
+contains information structures as follows:
-This makes it logical to let gpio drivers announce their pin ranges to
-the pin ctrl subsystem and call 'pinctrl_request_gpio' in order to
-request the corresponding pin before any gpio usage.
+ gpio-range-list ::= <single-gpio-range> [gpio-range-list]
+ single-gpio-range ::=
+ <pinctrl-phandle> <gpio-base> <pinctrl-base> <count>
+ gpio-phandle : phandle to pin controller node.
+ gpio-base : Base GPIO ID in the GPIO controller
+ pinctrl-base : Base pinctrl pin ID in the pin controller
+ count : The number of GPIOs/pins in this range
-For this, the gpio controller can use a pinctrl phandle and pins to
-announce the pinrange to the pin ctrl subsystem. For example,
+The "pin controller node" mentioned above must conform to the bindings
+described in ../pinctrl/pinctrl-bindings.txt.
+
+Previous versions of this binding required all pin controller nodes that
+were referenced by any gpio-ranges property to contain a property named
+#gpio-range-cells with value <3>. This requirement is now deprecated.
+However, that property may still exist in older device trees for
+compatibility reasons, and would still be required even in new device
+trees that need to be compatible with older software.
+
+Example:
qe_pio_e: gpio-controller@1460 {
#gpio-cells = <2>;
@@ -99,16 +112,8 @@ announce the pinrange to the pin ctrl subsystem. For example,
reg = <0x1460 0x18>;
gpio-controller;
gpio-ranges = <&pinctrl1 0 20 10>, <&pinctrl2 10 50 20>;
+ };
- }
-
-where,
- &pinctrl1 and &pinctrl2 is the phandle to the pinctrl DT node.
-
- Next values specify the base pin and number of pins for the range
- handled by 'qe_pio_e' gpio. In the given example from base pin 20 to
- pin 29 under pinctrl1 with gpio offset 0 and pin 50 to pin 69 under
- pinctrl2 with gpio offset 10 is handled by this gpio controller.
-
-The pinctrl node must have "#gpio-range-cells" property to show number of
-arguments to pass with phandle from gpio controllers node.
+Here, a single GPIO controller has GPIOs 0..9 routed to pin controller
+pinctrl1's pins 20..29, and GPIOs 10..19 routed to pin controller pinctrl2's
+pins 50..59.
diff --git a/Documentation/devicetree/bindings/gpu/samsung-rotator.txt b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
new file mode 100644
index 00000000000..82cd1ed0be9
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpu/samsung-rotator.txt
@@ -0,0 +1,27 @@
+* Samsung Image Rotator
+
+Required properties:
+ - compatible : value should be one of the following:
+ (a) "samsung,exynos4210-rotator" for Rotator IP in Exynos4210
+ (b) "samsung,exynos4212-rotator" for Rotator IP in Exynos4212/4412
+ (c) "samsung,exynos5250-rotator" for Rotator IP in Exynos5250
+
+ - reg : Physical base address of the IP registers and length of memory
+ mapped region.
+
+ - interrupts : Interrupt specifier for rotator interrupt, according to format
+ specific to interrupt parent.
+
+ - clocks : Clock specifier for rotator clock, according to generic clock
+ bindings. (See Documentation/devicetree/bindings/clock/exynos*.txt)
+
+ - clock-names : Names of clocks. For exynos rotator, it should be "rotator".
+
+Example:
+ rotator@12810000 {
+ compatible = "samsung,exynos4210-rotator";
+ reg = <0x12810000 0x1000>;
+ interrupts = <0 83 0>;
+ clocks = <&clock 278>;
+ clock-names = "rotator";
+ };
diff --git a/Documentation/devicetree/bindings/hid/hid-over-i2c.txt b/Documentation/devicetree/bindings/hid/hid-over-i2c.txt
new file mode 100644
index 00000000000..488edcb264c
--- /dev/null
+++ b/Documentation/devicetree/bindings/hid/hid-over-i2c.txt
@@ -0,0 +1,28 @@
+* HID over I2C Device-Tree bindings
+
+HID over I2C provides support for various Human Interface Devices over the
+I2C bus. These devices can be for example touchpads, keyboards, touch screens
+or sensors.
+
+The specification has been written by Microsoft and is currently available here:
+http://msdn.microsoft.com/en-us/library/windows/hardware/hh852380.aspx
+
+If this binding is used, the kernel module i2c-hid will handle the communication
+with the device and the generic hid core layer will handle the protocol.
+
+Required properties:
+- compatible: must be "hid-over-i2c"
+- reg: i2c slave address
+- hid-descr-addr: HID descriptor address
+- interrupt-parent: the phandle for the interrupt controller
+- interrupts: interrupt line
+
+Example:
+
+ i2c-hid-dev@2c {
+ compatible = "hid-over-i2c";
+ reg = <0x2c>;
+ hid-descr-addr = <0x0020>;
+ interrupt-parent = <&gpx3>;
+ interrupts = <3 2>;
+ };
diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
index 3614242e773..4a8513e4474 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt
@@ -1,7 +1,10 @@
* Freescale Inter IC (I2C) and High Speed Inter IC (HS-I2C) for i.MX
Required properties:
-- compatible : Should be "fsl,<chip>-i2c"
+- compatible :
+ - "fsl,imx1-i2c" for I2C compatible with the one integrated on i.MX1 SoC
+ - "fsl,imx21-i2c" for I2C compatible with the one integrated on i.MX21 SoC
+ - "fsl,vf610-i2c" for I2C compatible with the one integrated on Vybrid vf610 SoC
- reg : Should contain I2C/HS-I2C registers location and length
- interrupts : Should contain I2C/HS-I2C interrupt
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
index a1ee681942c..82e8f6f1717 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt
@@ -4,7 +4,8 @@
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : Should be "marvell,mv64xxx-i2c"
+ - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c"
+ or "marvell,mv78230-i2c"
- interrupts : The interrupt number
Optional properties :
@@ -20,3 +21,12 @@ Examples:
interrupts = <29>;
clock-frequency = <100000>;
};
+
+For the Armada XP:
+
+ i2c@11000 {
+ compatible = "marvell,mv78230-i2c", "marvell,mv64xxx-i2c";
+ reg = <0x11000 0x100>;
+ interrupts = <29>;
+ clock-frequency = <100000>;
+ };
diff --git a/Documentation/devicetree/bindings/iio/accel/bma180.txt b/Documentation/devicetree/bindings/iio/accel/bma180.txt
new file mode 100644
index 00000000000..c5933573e0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/accel/bma180.txt
@@ -0,0 +1,24 @@
+* Bosch BMA180 triaxial acceleration sensor
+
+http://omapworld.com/BMA180_111_1002839.pdf
+
+Required properties:
+
+ - compatible : should be "bosch,bma180"
+ - reg : the I2C address of the sensor
+
+Optional properties:
+
+ - interrupt-parent : should be the phandle for the interrupt controller
+
+ - interrupts : interrupt mapping for GPIO IRQ, it should by configured with
+ flags IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING
+
+Example:
+
+bma180@40 {
+ compatible = "bosch,bma180";
+ reg = <0x40>;
+ interrupt-parent = <&gpio6>;
+ interrupts = <18 (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_EDGE_RISING)>;
+};
diff --git a/Documentation/devicetree/bindings/iio/adc/nuvoton-nau7802.txt b/Documentation/devicetree/bindings/iio/adc/nuvoton-nau7802.txt
new file mode 100644
index 00000000000..e9582e6fe35
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/nuvoton-nau7802.txt
@@ -0,0 +1,18 @@
+* Nuvoton NAU7802 Analog to Digital Converter (ADC)
+
+Required properties:
+ - compatible: Should be "nuvoton,nau7802"
+ - reg: Should contain the ADC I2C address
+
+Optional properties:
+ - nuvoton,vldo: Internal reference voltage in millivolts to be
+ configured valid values are between 2400 mV and 4500 mV.
+ - interrupts: IRQ line for the ADC. If not used the driver will use
+ polling.
+
+Example:
+adc2: nau7802@2a {
+ compatible = "nuvoton,nau7802";
+ reg = <0x2a>;
+ nuvoton,vldo = <3000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/light/apds9300.txt b/Documentation/devicetree/bindings/iio/light/apds9300.txt
new file mode 100644
index 00000000000..d6f66c73ddb
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/light/apds9300.txt
@@ -0,0 +1,22 @@
+* Avago APDS9300 ambient light sensor
+
+http://www.avagotech.com/docs/AV02-1077EN
+
+Required properties:
+
+ - compatible : should be "avago,apds9300"
+ - reg : the I2C address of the sensor
+
+Optional properties:
+
+ - interrupt-parent : should be the phandle for the interrupt controller
+ - interrupts : interrupt mapping for GPIO IRQ
+
+Example:
+
+apds9300@39 {
+ compatible = "avago,apds9300";
+ reg = <0x39>;
+ interrupt-parent = <&gpio2>;
+ interrupts = <29 8>;
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/adv7343.txt b/Documentation/devicetree/bindings/media/i2c/adv7343.txt
new file mode 100644
index 00000000000..5653bc2428b
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/adv7343.txt
@@ -0,0 +1,48 @@
+* Analog Devices adv7343 video encoder
+
+The ADV7343 are high speed, digital-to-analog video encoders in a 64-lead LQFP
+package. Six high speed, 3.3 V, 11-bit video DACs provide support for composite
+(CVBS), S-Video (Y-C), and component (YPrPb/RGB) analog outputs in standard
+definition (SD), enhanced definition (ED), or high definition (HD) video
+formats.
+
+Required Properties :
+- compatible: Must be "adi,adv7343"
+
+Optional Properties :
+- adi,power-mode-sleep-mode: on enable the current consumption is reduced to
+ micro ampere level. All DACs and the internal PLL
+ circuit are disabled.
+- adi,power-mode-pll-ctrl: PLL and oversampling control. This control allows
+ internal PLL 1 circuit to be powered down and the
+ oversampling to be switched off.
+- ad,adv7343-power-mode-dac: array configuring the power on/off DAC's 1..6,
+ 0 = OFF and 1 = ON, Default value when this
+ property is not specified is <0 0 0 0 0 0>.
+- ad,adv7343-sd-config-dac-out: array configure SD DAC Output's 1 and 2, 0 = OFF
+ and 1 = ON, Default value when this property is
+ not specified is <0 0>.
+
+Example:
+
+i2c0@1c22000 {
+ ...
+ ...
+
+ adv7343@2a {
+ compatible = "adi,adv7343";
+ reg = <0x2a>;
+
+ port {
+ adv7343_1: endpoint {
+ adi,power-mode-sleep-mode;
+ adi,power-mode-pll-ctrl;
+ /* Use DAC1..3, DAC6 */
+ adi,dac-enable = <1 1 1 0 0 1>;
+ /* Use SD DAC output 1 */
+ adi,sd-dac-enable = <1 0>;
+ };
+ };
+ };
+ ...
+};
diff --git a/Documentation/devicetree/bindings/media/i2c/ths8200.txt b/Documentation/devicetree/bindings/media/i2c/ths8200.txt
new file mode 100644
index 00000000000..285f6ae7dfa
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/ths8200.txt
@@ -0,0 +1,19 @@
+* Texas Instruments THS8200 video encoder
+
+The ths8200 device is a digital to analog converter used in DVD players, video
+recorders, set-top boxes.
+
+Required Properties :
+- compatible : value must be "ti,ths8200"
+
+Example:
+
+ i2c0@1c22000 {
+ ...
+ ...
+ ths8200@5c {
+ compatible = "ti,ths8200";
+ reg = <0x5c>;
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/media/i2c/tvp7002.txt b/Documentation/devicetree/bindings/media/i2c/tvp7002.txt
new file mode 100644
index 00000000000..5f28b5d9abc
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/i2c/tvp7002.txt
@@ -0,0 +1,53 @@
+* Texas Instruments TV7002 video decoder
+
+The TVP7002 device supports digitizing of video and graphics signal in RGB and
+YPbPr color space.
+
+Required Properties :
+- compatible : Must be "ti,tvp7002"
+
+Optional Properties:
+- hsync-active: HSYNC Polarity configuration for the bus. Default value when
+ this property is not specified is <0>.
+
+- vsync-active: VSYNC Polarity configuration for the bus. Default value when
+ this property is not specified is <0>.
+
+- pclk-sample: Clock polarity of the bus. Default value when this property is
+ not specified is <0>.
+
+- sync-on-green-active: Active state of Sync-on-green signal property of the
+ endpoint.
+ 0 = Normal Operation (Active Low, Default)
+ 1 = Inverted operation
+
+- field-even-active: Active-high Field ID output polarity control of the bus.
+ Under normal operation, the field ID output is set to logic 1 for an odd field
+ (field 1) and set to logic 0 for an even field (field 0).
+ 0 = Normal Operation (Active Low, Default)
+ 1 = FID output polarity inverted
+
+For further reading of port node refer Documentation/devicetree/bindings/media/
+video-interfaces.txt.
+
+Example:
+
+ i2c0@1c22000 {
+ ...
+ ...
+ tvp7002@5c {
+ compatible = "ti,tvp7002";
+ reg = <0x5c>;
+
+ port {
+ tvp7002_1: endpoint {
+ hsync-active = <1>;
+ vsync-active = <1>;
+ pclk-sample = <0>;
+ sync-on-green-active = <1>;
+ field-even-active = <0>;
+ };
+ };
+ };
+ ...
+ };
diff --git a/Documentation/devicetree/bindings/media/s5p-mfc.txt b/Documentation/devicetree/bindings/media/s5p-mfc.txt
index df37b0230c7..36bd2d6725c 100644
--- a/Documentation/devicetree/bindings/media/s5p-mfc.txt
+++ b/Documentation/devicetree/bindings/media/s5p-mfc.txt
@@ -10,6 +10,7 @@ Required properties:
- compatible : value should be either one among the following
(a) "samsung,mfc-v5" for MFC v5 present in Exynos4 SoCs
(b) "samsung,mfc-v6" for MFC v6 present in Exynos5 SoCs
+ (b) "samsung,mfc-v7" for MFC v7 present in Exynos5420 SoC
- reg : Physical base address of the IP registers and length of memory
mapped region.
diff --git a/Documentation/devicetree/bindings/media/video-interfaces.txt b/Documentation/devicetree/bindings/media/video-interfaces.txt
index e022d2dc496..ce719f89dd1 100644
--- a/Documentation/devicetree/bindings/media/video-interfaces.txt
+++ b/Documentation/devicetree/bindings/media/video-interfaces.txt
@@ -88,6 +88,8 @@ Optional endpoint properties
- field-even-active: field signal level during the even field data transmission.
- pclk-sample: sample data on rising (1) or falling (0) edge of the pixel clock
signal.
+- sync-on-green-active: active state of Sync-on-green (SoG) signal, 0/1 for
+ LOW/HIGH respectively.
- data-lanes: an array of physical data lane indexes. Position of an entry
determines the logical lane number, while the value of an entry indicates
physical lane, e.g. for 2-lane MIPI CSI-2 bus we could have
diff --git a/Documentation/devicetree/bindings/misc/atmel-ssc.txt b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
index 38e51ad2e07..a45ae08c8ed 100644
--- a/Documentation/devicetree/bindings/misc/atmel-ssc.txt
+++ b/Documentation/devicetree/bindings/misc/atmel-ssc.txt
@@ -7,9 +7,30 @@ Required properties:
- reg: Should contain SSC registers location and length
- interrupts: Should contain SSC interrupt
-Example:
+
+Required properties for devices compatible with "atmel,at91sam9g45-ssc":
+- dmas: DMA specifier, consisting of a phandle to DMA controller node,
+ the memory interface and SSC DMA channel ID (for tx and rx).
+ See Documentation/devicetree/bindings/dma/atmel-dma.txt for details.
+- dma-names: Must be "tx", "rx".
+
+Examples:
+- PDC transfer:
ssc0: ssc@fffbc000 {
compatible = "atmel,at91rm9200-ssc";
reg = <0xfffbc000 0x4000>;
interrupts = <14 4 5>;
};
+
+- DMA transfer:
+ssc0: ssc@f0010000 {
+ compatible = "atmel,at91sam9g45-ssc";
+ reg = <0xf0010000 0x4000>;
+ interrupts = <28 4 5>;
+ dmas = <&dma0 1 13>,
+ <&dma0 1 14>;
+ dma-names = "tx", "rx";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/net/micrel-ksz9021.txt b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
new file mode 100644
index 00000000000..997a63f1aea
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/micrel-ksz9021.txt
@@ -0,0 +1,49 @@
+Micrel KSZ9021 Gigabit Ethernet PHY
+
+Some boards require special tuning values, particularly when it comes to
+clock delays. You can specify clock delay values by adding
+micrel-specific properties to an Ethernet OF device node.
+
+All skew control options are specified in picoseconds. The minimum
+value is 0, and the maximum value is 3000.
+
+Optional properties:
+ - rxc-skew-ps : Skew control of RXC pad
+ - rxdv-skew-ps : Skew control of RX CTL pad
+ - txc-skew-ps : Skew control of TXC pad
+ - txen-skew-ps : Skew control of TX_CTL pad
+ - rxd0-skew-ps : Skew control of RX data 0 pad
+ - rxd1-skew-ps : Skew control of RX data 1 pad
+ - rxd2-skew-ps : Skew control of RX data 2 pad
+ - rxd3-skew-ps : Skew control of RX data 3 pad
+ - txd0-skew-ps : Skew control of TX data 0 pad
+ - txd1-skew-ps : Skew control of TX data 1 pad
+ - txd2-skew-ps : Skew control of TX data 2 pad
+ - txd3-skew-ps : Skew control of TX data 3 pad
+
+Examples:
+
+ /* Attach to an Ethernet device with autodetected PHY */
+ &enet {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ status = "okay";
+ };
+
+ /* Attach to an explicitly-specified PHY */
+ mdio {
+ phy0: ethernet-phy@0 {
+ rxc-skew-ps = <3000>;
+ rxdv-skew-ps = <0>;
+ txc-skew-ps = <3000>;
+ txen-skew-ps = <0>;
+ reg = <0>;
+ };
+ };
+ ethernet@70000 {
+ status = "okay";
+ phy = <&phy0>;
+ phy-mode = "rgmii-id";
+ };
diff --git a/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
new file mode 100644
index 00000000000..583418b2c12
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/moxa,moxart-mac.txt
@@ -0,0 +1,21 @@
+MOXA ART Ethernet Controller
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-mac"
+- reg : Should contain register location and length
+- interrupts : Should contain the mac interrupt number
+
+Example:
+
+ mac0: mac@90900000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x90900000 0x100>;
+ interrupts = <25 0>;
+ };
+
+ mac1: mac@92000000 {
+ compatible = "moxa,moxart-mac";
+ reg = <0x92000000 0x100>;
+ interrupts = <27 0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 261c563b5f0..eba0e5e59eb 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -22,6 +22,11 @@ Required properties:
- snps,pbl Programmable Burst Length
- snps,fixed-burst Program the DMA to use the fixed burst mode
- snps,mixed-burst Program the DMA to use the mixed burst mode
+- snps,force_thresh_dma_mode Force DMA to use the threshold mode for
+ both tx and rx
+- snps,force_sf_dma_mode Force DMA to use the Store and Forward
+ mode for both tx and rx. This flag is
+ ignored if force_thresh_dma_mode is set.
Optional properties:
- mac-address: 6 bytes, mac address
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index e2371f5cdeb..eabcb4b5db6 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -18,6 +18,7 @@ Required properties:
- interrupt-map-mask and interrupt-map: standard PCI properties
to define the mapping of the PCIe interface to interrupt
numbers.
+- num-lanes: number of lanes to use
- reset-gpio: gpio pin number of power good signal
Example:
@@ -41,6 +42,7 @@ SoC specific DT Entry:
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 53>;
+ num-lanes = <4>;
};
pcie@2a0000 {
@@ -60,6 +62,7 @@ SoC specific DT Entry:
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 56>;
+ num-lanes = <4>;
};
Board specific DT Entry:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
index aeb3c995cc0..1958ca9f9e5 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt
@@ -127,21 +127,20 @@ whether there is any interaction between the child and intermediate parent
nodes, is again defined entirely by the binding for the individual pin
controller device.
-== Using generic pinconfig options ==
+== Generic pin configuration node content ==
-Generic pinconfig parameters can be used by defining a separate node containing
-the applicable parameters (and optional values), like:
+Many data items that are represented in a pin configuration node are common
+and generic. Pin control bindings should use the properties defined below
+where they are applicable; not all of these properties are relevant or useful
+for all hardware or binding structures. Each individual binding document
+should state which of these generic properties, if any, are used, and the
+structure of the DT nodes that contain these properties.
-pcfg_pull_up: pcfg_pull_up {
- bias-pull-up;
- drive-strength = <20>;
-};
-
-This node should then be referenced in the appropriate pinctrl node as a phandle
-and parsed in the driver using the pinconf_generic_parse_dt_config function.
-
-Supported configuration parameters are:
+Supported generic properties are:
+pins - the list of pins that properties in the node
+ apply to
+function - the mux function to select
bias-disable - disable any pin bias
bias-high-impedance - high impedance mode ("third-state", "floating")
bias-bus-hold - latch weakly
@@ -160,7 +159,21 @@ low-power-disable - disable low power mode
output-low - set the pin to output mode with low level
output-high - set the pin to output mode with high level
-Arguments for parameters:
+Some of the generic properties take arguments. For those that do, the
+arguments are described below.
+
+- pins takes a list of pin names or IDs as a required argument. The specific
+ binding for the hardware defines:
+ - Whether the entries are integers or strings, and their meaning.
+
+- function takes a list of function names/IDs as a required argument. The
+ specific binding for the hardware defines:
+ - Whether the entries are integers or strings, and their meaning.
+ - Whether only a single entry is allowed (which is applied to all entries
+ in the pins property), or whether there may alternatively be one entry per
+ entry in the pins property, in which case the list lengths must match, and
+ for each list index i, the function at list index i is applied to the pin
+ at list index i.
- bias-pull-up, -down and -pin-default take as optional argument on hardware
supporting it the pull strength in Ohm. bias-disable will disable the pull.
@@ -170,7 +183,5 @@ Arguments for parameters:
- input-debounce takes the debounce time in usec as argument
or 0 to disable debouncing
-All parameters not listed here, do not take an argument.
-
More in-depth documentation on these parameters can be found in
<include/linux/pinctrl/pinconfig-generic.h>
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
new file mode 100644
index 00000000000..734d9b04d53
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-palmas.txt
@@ -0,0 +1,96 @@
+Palmas Pincontrol bindings
+
+The pins of Palmas device can be set on different option and provides
+the configuration for Pull UP/DOWN, open drain etc.
+
+Required properties:
+- compatible: It must be one of following:
+ - "ti,palmas-pinctrl" for Palma series of the pincontrol.
+ - "ti,tps65913-pinctrl" for Palma series device TPS65913.
+ - "ti,tps80036-pinctrl" for Palma series device TPS80036.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+Palmas's pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+list of pins. This configuration can include the mux function to select on
+those pin(s), and various pin configuration parameters, such as pull-up,
+open drain.
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+Optional properties:
+- ti,palmas-enable-dvfs1: Enable DVFS1. Configure pins for DVFS1 mode.
+ Selection primary or secondary function associated to I2C2_SCL_SCE,
+ I2C2_SDA_SDO pin/pad for DVFS1 interface
+- ti,palmas-enable-dvfs2: Enable DVFS2. Configure pins for DVFS2 mode.
+ Selection primary or secondary function associated to GPADC_START
+ and SYSEN2 pin/pad for DVFS2 interface
+
+This binding uses the following generic properties as defined in
+pinctrl-bindings.txt:
+
+Required: pins
+Options: function, bias-disable, bias-pull-up, bias-pull-down,
+ bias-pin-default, drive-open-drain.
+
+Note that many of these properties are only valid for certain specific pins.
+See the Palmas device datasheet for complete details regarding which pins
+support which functionality.
+
+Valid values for pin names are:
+ gpio0, gpio1, gpio2, gpio3, gpio4, gpio5, gpio6, gpio7, gpio8, gpio9,
+ gpio10, gpio11, gpio12, gpio13, gpio14, gpio15, vac, powergood,
+ nreswarm, pwrdown, gpadc_start, reset_in, nsleep, enable1, enable2,
+ int.
+
+Valid value of function names are:
+ gpio, led, pwm, regen, sysen, clk32kgaudio, id, vbus_det, chrg_det,
+ vac, vacok, powergood, usb_psel, msecure, pwrhold, int, nreswarm,
+ simrsto, simrsti, low_vbat, wireless_chrg1, rcm, pwrdown, gpadc_start,
+ reset_in, nsleep, enable.
+
+There are 4 special functions: opt0, opt1, opt2 and opt3. If any of these
+functions is selected then directly pins register will be written with 0, 1, 2
+or 3 respectively if it is valid for that pins or list of pins.
+
+Example:
+ palmas: tps65913 {
+ ....
+ pinctrl {
+ compatible = "ti,tps65913-pinctrl";
+ ti,palmas-enable-dvfs1;
+ pinctrl-names = "default";
+ pinctrl-0 = <&palmas_pins_state>;
+
+ palmas_pins_state: pinmux {
+ gpio0 {
+ pins = "gpio0";
+ function = "id";
+ bias-pull-up;
+ };
+
+ vac {
+ pins = "vac";
+ function = "vacok";
+ bias-pull-down;
+ };
+
+ gpio5 {
+ pins = "gpio5";
+ function = "opt0";
+ drive-open-drain = <1>;
+ };
+ };
+ };
+ ....
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
index 36281e7a2a4..257677de3e6 100644
--- a/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/samsung-pinctrl.txt
@@ -12,6 +12,7 @@ Required Properties:
- "samsung,s3c2440-pinctrl": for S3C2440-compatible pin-controller,
- "samsung,s3c2450-pinctrl": for S3C2450-compatible pin-controller,
- "samsung,s3c64xx-pinctrl": for S3C64xx-compatible pin-controller,
+ - "samsung,s5pv210-pinctrl": for S5PV210-compatible pin-controller,
- "samsung,exynos4210-pinctrl": for Exynos4210 compatible pin-controller.
- "samsung,exynos4x12-pinctrl": for Exynos4x12 compatible pin-controller.
- "samsung,exynos5250-pinctrl": for Exynos5250 compatible pin-controller.
@@ -128,7 +129,7 @@ B. External Wakeup Interrupts: For supporting external wakeup interrupts, a
- samsung,s3c64xx-wakeup-eint: represents wakeup interrupt controller
found on Samsung S3C64xx SoCs,
- samsung,exynos4210-wakeup-eint: represents wakeup interrupt controller
- found on Samsung Exynos4210 SoC.
+ found on Samsung Exynos4210 and S5PC110/S5PV210 SoCs.
- interrupt-parent: phandle of the interrupt parent to which the external
wakeup interrupts are forwarded to.
- interrupts: interrupt used by multiplexed wakeup interrupts.
diff --git a/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
index de0eaed8665..8031148bcf8 100644
--- a/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/atmel-tcb-pwm.txt
@@ -2,11 +2,9 @@ Atmel TCB PWM controller
Required properties:
- compatible: should be "atmel,tcb-pwm"
-- #pwm-cells: Should be 3. The first cell specifies the per-chip index
- of the PWM to use, the second cell is the period in nanoseconds and
- bit 0 in the third cell is used to encode the polarity of PWM output.
- Set bit 0 of the third cell in PWM specifier to 1 for inverse polarity &
- set to 0 for normal polarity.
+- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED.
- tc-block: The Timer Counter block to use as a PWM chip.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/imx-pwm.txt b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
index 8522bfbccfd..b50d7a6d9d7 100644
--- a/Documentation/devicetree/bindings/pwm/imx-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/imx-pwm.txt
@@ -3,8 +3,8 @@ Freescale i.MX PWM controller
Required properties:
- compatible: should be "fsl,<soc>-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
- interrupts: The interrupt for the pwm controller
Example:
diff --git a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
index 9e3f8f1d46a..96cdde5f620 100644
--- a/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/mxs-pwm.txt
@@ -3,8 +3,8 @@ Freescale MXS PWM controller
Required properties:
- compatible: should be "fsl,imx23-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
- fsl,pwm-number: the number of PWM devices
Example:
diff --git a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
index 01438ecd662..c3fc57af877 100644
--- a/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nvidia,tegra20-pwm.txt
@@ -5,9 +5,8 @@ Required properties:
- "nvidia,tegra20-pwm"
- "nvidia,tegra30-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: On Tegra the number of cells used to specify a PWM is 2. The
- first cell specifies the per-chip index of the PWM to use and the second
- cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt b/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
index 1e3dfe7a489..f84ec9d291e 100644
--- a/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/nxp,pca9685-pwm.txt
@@ -3,8 +3,8 @@ NXP PCA9685 16-channel 12-bit PWM LED controller
Required properties:
- compatible: "nxp,pca9685-pwm"
- - #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+ - #pwm-cells: Should be 2. See pwm.txt in this directory for a description of
+ the cells format.
The index 16 is the ALLCALL channel, that sets all PWM channels at the same
time.
diff --git a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
index ac67c687a32..4caa1a78863 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-samsung.txt
@@ -19,13 +19,9 @@ Required properties:
- reg: base address and size of register area
- interrupts: list of timer interrupts (one interrupt per timer, starting at
timer 0)
-- #pwm-cells: number of cells used for PWM specifier - must be 3
- the specifier format is as follows:
- - phandle to PWM controller node
- - index of PWM channel (from 0 to 4)
- - PWM signal period in nanoseconds
- - bitmask of optional PWM flags:
- 0x1 - invert PWM signal
+- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED.
Optional properties:
- samsung,pwm-outputs: list of PWM channels used as PWM outputs on particular
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
index 681afad7377..fb81179dce3 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiecap.txt
@@ -4,11 +4,9 @@ Required properties:
- compatible: Must be "ti,<soc>-ecap".
for am33xx - compatible = "ti,am33xx-ecap";
for da850 - compatible = "ti,da850-ecap", "ti,am33xx-ecap";
-- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
- First cell specifies the per-chip index of the PWM to use, the second
- cell is the period in nanoseconds and bit 0 in the third cell is used to
- encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
- to 1 for inverse polarity & set to 0 for normal polarity.
+- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The PWM channel index ranges from 0 to 4. The only third
+ cell flag supported by this binding is PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
index 337c6fc65d3..9c100b2c5b2 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-tiehrpwm.txt
@@ -4,11 +4,9 @@ Required properties:
- compatible: Must be "ti,<soc>-ehrpwm".
for am33xx - compatible = "ti,am33xx-ehrpwm";
for da850 - compatible = "ti,da850-ehrpwm", "ti,am33xx-ehrpwm";
-- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
- First cell specifies the per-chip index of the PWM to use, the second
- cell is the period in nanoseconds and bit 0 in the third cell is used to
- encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
- to 1 for inverse polarity & set to 0 for normal polarity.
+- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED.
- reg: physical base address and size of the registers map.
Optional properties:
diff --git a/Documentation/devicetree/bindings/pwm/pwm.txt b/Documentation/devicetree/bindings/pwm/pwm.txt
index 06e67247859..8556263b850 100644
--- a/Documentation/devicetree/bindings/pwm/pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm.txt
@@ -43,13 +43,14 @@ because the name "backlight" would be used as fallback anyway.
pwm-specifier typically encodes the chip-relative PWM number and the PWM
period in nanoseconds.
-Optionally, the pwm-specifier can encode a number of flags in a third cell:
-- bit 0: PWM signal polarity (0: normal polarity, 1: inverse polarity)
+Optionally, the pwm-specifier can encode a number of flags (defined in
+<dt-bindings/pwm/pwm.h>) in a third cell:
+- PWM_POLARITY_INVERTED: invert the PWM signal polarity
Example with optional PWM specifier for inverse polarity
bl: backlight {
- pwms = <&pwm 0 5000000 1>;
+ pwms = <&pwm 0 5000000 PWM_POLARITY_INVERTED>;
pwm-names = "backlight";
};
diff --git a/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
new file mode 100644
index 00000000000..b067e84a94b
--- /dev/null
+++ b/Documentation/devicetree/bindings/pwm/renesas,tpu-pwm.txt
@@ -0,0 +1,28 @@
+* Renesas R-Car Timer Pulse Unit PWM Controller
+
+Required Properties:
+
+ - compatible: should be one of the following.
+ - "renesas,tpu-r8a73a4": for R8A77A4 (R-Mobile APE6) compatible PWM controller.
+ - "renesas,tpu-r8a7740": for R8A7740 (R-Mobile A1) compatible PWM controller.
+ - "renesas,tpu-r8a7790": for R8A7790 (R-Car H2) compatible PWM controller.
+ - "renesas,tpu-sh7372": for SH7372 (SH-Mobile AP4) compatible PWM controller.
+ - "renesas,tpu": for generic R-Car TPU PWM controller.
+
+ - reg: Base address and length of each memory resource used by the PWM
+ controller hardware module.
+
+ - #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED.
+
+Please refer to pwm.txt in this directory for details of the common PWM bindings
+used by client devices.
+
+Example: R8A7740 (R-Car A1) TPU controller node
+
+ tpu: pwm@e6600000 {
+ compatible = "renesas,tpu-r8a7740", "renesas,tpu";
+ reg = <0xe6600000 0x100>;
+ #pwm-cells = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/pwm/spear-pwm.txt b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
index 3ac779d8338..b486de2c3fe 100644
--- a/Documentation/devicetree/bindings/pwm/spear-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/spear-pwm.txt
@@ -5,9 +5,8 @@ Required properties:
- "st,spear320-pwm"
- "st,spear1340-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: number of cells used to specify PWM which is fixed to 2 on
- SPEAr. The first cell specifies the per-chip index of the PWM to use and
- the second cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
index 2943ee5fce0..4e32bee1120 100644
--- a/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwm.txt
@@ -6,8 +6,8 @@ On TWL6030 series: PWM0 and PWM1
Required properties:
- compatible: "ti,twl4030-pwm" or "ti,twl6030-pwm"
-- #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
index cb64f3acc10..9f4b4609078 100644
--- a/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
+++ b/Documentation/devicetree/bindings/pwm/ti,twl-pwmled.txt
@@ -6,8 +6,8 @@ On TWL6030 series: LED PWM (mainly used as charging indicator LED)
Required properties:
- compatible: "ti,twl4030-pwmled" or "ti,twl6030-pwmled"
-- #pwm-cells: should be 2. The first cell specifies the per-chip index
- of the PWM to use and the second cell is the period in nanoseconds.
+- #pwm-cells: should be 2. See pwm.txt in this directory for a description of
+ the cells format.
Example:
diff --git a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
index d21d82d2985..a76390e6df2 100644
--- a/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
+++ b/Documentation/devicetree/bindings/pwm/vt8500-pwm.txt
@@ -3,11 +3,9 @@ VIA/Wondermedia VT8500/WM8xxx series SoC PWM controller
Required properties:
- compatible: should be "via,vt8500-pwm"
- reg: physical base address and length of the controller's registers
-- #pwm-cells: Should be 3. Number of cells being used to specify PWM property.
- First cell specifies the per-chip index of the PWM to use, the second
- cell is the period in nanoseconds and bit 0 in the third cell is used to
- encode the polarity of PWM output. Set bit 0 of the third in PWM specifier
- to 1 for inverse polarity & set to 0 for normal polarity.
+- #pwm-cells: should be 3. See pwm.txt in this directory for a description of
+ the cells format. The only third cell flag supported by this binding is
+ PWM_POLARITY_INVERTED.
- clocks: phandle to the PWM source clock
Example:
diff --git a/Documentation/devicetree/bindings/regulator/88pm800.txt b/Documentation/devicetree/bindings/regulator/88pm800.txt
new file mode 100644
index 00000000000..e8a54c2a582
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/88pm800.txt
@@ -0,0 +1,38 @@
+Marvell 88PM800 regulator
+
+Required properties:
+- compatible: "marvell,88pm800"
+- reg: I2C slave address
+- regulators: A node that houses a sub-node for each regulator within the
+ device. Each sub-node is identified using the node's name (or the deprecated
+ regulator-compatible property if present), with valid values listed below.
+ The content of each sub-node is defined by the standard binding for
+ regulators; see regulator.txt.
+
+The valid names for regulators are:
+
+ buck1, buck2, buck3, buck4, buck5, ldo1, ldo2, ldo3, ldo4, ldo5, ldo6, ldo7,
+ ldo8, ldo9, ldo10, ldo11, ldo12, ldo13, ldo14, ldo15, ldo16, ldo17, ldo18, ldo19
+
+Example:
+
+ pmic: 88pm800@31 {
+ compatible = "marvell,88pm800";
+ reg = <0x31>;
+
+ regulators {
+ buck1 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <3950000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+ ldo1 {
+ regulator-min-microvolt = <600000>;
+ regulator-max-microvolt = <15000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+...
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/max8660.txt b/Documentation/devicetree/bindings/regulator/max8660.txt
new file mode 100644
index 00000000000..8ba994d8a14
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/max8660.txt
@@ -0,0 +1,47 @@
+Maxim MAX8660 voltage regulator
+
+Required properties:
+- compatible: must be one of "maxim,max8660", "maxim,max8661"
+- reg: I2C slave address, usually 0x34
+- any required generic properties defined in regulator.txt
+
+Example:
+
+ i2c_master {
+ max8660@34 {
+ compatible = "maxim,max8660";
+ reg = <0x34>;
+
+ regulators {
+ regulator@0 {
+ regulator-compatible= "V3(DCDC)";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ regulator@1 {
+ regulator-compatible= "V4(DCDC)";
+ regulator-min-microvolt = <725000>;
+ regulator-max-microvolt = <1800000>;
+ };
+
+ regulator@2 {
+ regulator-compatible= "V5(LDO)";
+ regulator-min-microvolt = <1700000>;
+ regulator-max-microvolt = <2000000>;
+ };
+
+ regulator@3 {
+ regulator-compatible= "V6(LDO)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ regulator@4 {
+ regulator-compatible= "V7(LDO)";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
index d5a308629c5..a22e4c70db5 100644
--- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
+++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt
@@ -25,15 +25,14 @@ Optional nodes:
Additional custom properties are listed below.
For ti,palmas-pmic - smps12, smps123, smps3 depending on OTP,
- smps45, smps457, smps7 depending on variant, smps6, smps[8-10],
- ldo[1-9], ldoln, ldousb.
+ smps45, smps457, smps7 depending on variant, smps6, smps[8-9],
+ smps10_out2, smps10_out1, do[1-9], ldoln, ldousb.
Optional sub-node properties:
ti,warm-reset - maintain voltage during warm reset(boolean)
ti,roof-floor - control voltage selection by pin(boolean)
- ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto,
+ ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto,
2 - eco, 3 - forced pwm
- ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us
ti,smps-range - OTP has the wrong range set for the hardware so override
0 - low range, 1 - high range.
@@ -59,7 +58,6 @@ pmic {
ti,warm-reset;
ti,roof-floor;
ti,mode-sleep = <0>;
- ti,tstep = <0>;
ti,smps-range = <1>;
};
diff --git a/Documentation/devicetree/bindings/regulator/pfuze100.txt b/Documentation/devicetree/bindings/regulator/pfuze100.txt
new file mode 100644
index 00000000000..fc989b2e805
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/pfuze100.txt
@@ -0,0 +1,115 @@
+PFUZE100 family of regulators
+
+Required properties:
+- compatible: "fsl,pfuze100"
+- reg: I2C slave address
+
+Required child node:
+- regulators: This is the list of child nodes that specify the regulator
+ initialization data for defined regulators. Please refer to below doc
+ Documentation/devicetree/bindings/regulator/regulator.txt.
+
+ The valid names for regulators are:
+ sw1ab,sw1c,sw2,sw3a,sw3b,sw4,swbst,vsnvs,vrefddr,vgen1~vgen6
+
+Each regulator is defined using the standard binding for regulators.
+
+Example:
+
+ pmic: pfuze100@08 {
+ compatible = "fsl,pfuze100";
+ reg = <0x08>;
+
+ regulators {
+ sw1a_reg: sw1ab {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ regulator-ramp-delay = <6250>;
+ };
+
+ sw1c_reg: sw1c {
+ regulator-min-microvolt = <300000>;
+ regulator-max-microvolt = <1875000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw2_reg: sw2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3a_reg: sw3a {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw3b_reg: sw3b {
+ regulator-min-microvolt = <400000>;
+ regulator-max-microvolt = <1975000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ sw4_reg: sw4 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ swbst_reg: swbst {
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5150000>;
+ };
+
+ snvs_reg: vsnvs {
+ regulator-min-microvolt = <1000000>;
+ regulator-max-microvolt = <3000000>;
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vref_reg: vrefddr {
+ regulator-boot-on;
+ regulator-always-on;
+ };
+
+ vgen1_reg: vgen1 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen2_reg: vgen2 {
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1550000>;
+ };
+
+ vgen3_reg: vgen3 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ };
+
+ vgen4_reg: vgen4 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen5_reg: vgen5 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ vgen6_reg: vgen6 {
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/regulator/regulator.txt b/Documentation/devicetree/bindings/regulator/regulator.txt
index 48a3b8e5d6b..2bd8f097876 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/regulator.txt
@@ -12,6 +12,8 @@ Optional properties:
- regulator-allow-bypass: allow the regulator to go into bypass mode
- <name>-supply: phandle to the parent supply/regulator node
- regulator-ramp-delay: ramp delay for regulator(in uV/uS)
+ For hardwares which support disabling ramp rate, it should be explicitly
+ intialised to zero (regulator-ramp-delay = <0>) for disabling ramp delay.
Deprecated properties:
- regulator-compatible: If a regulator chip contains multiple
diff --git a/Documentation/devicetree/bindings/tty/serial/arc-uart.txt b/Documentation/devicetree/bindings/serial/arc-uart.txt
index 5cae2eb686f..5cae2eb686f 100644
--- a/Documentation/devicetree/bindings/tty/serial/arc-uart.txt
+++ b/Documentation/devicetree/bindings/serial/arc-uart.txt
diff --git a/Documentation/devicetree/bindings/tty/serial/atmel-usart.txt b/Documentation/devicetree/bindings/serial/atmel-usart.txt
index a49d9a1d4cc..2191dcb9f1d 100644
--- a/Documentation/devicetree/bindings/tty/serial/atmel-usart.txt
+++ b/Documentation/devicetree/bindings/serial/atmel-usart.txt
@@ -10,13 +10,18 @@ Required properties:
Optional properties:
- atmel,use-dma-rx: use of PDC or DMA for receiving data
- atmel,use-dma-tx: use of PDC or DMA for transmitting data
+- add dma bindings for dma transfer:
+ - dmas: DMA specifier, consisting of a phandle to DMA controller node,
+ memory peripheral interface and USART DMA channel ID, FIFO configuration.
+ Refer to dma.txt and atmel-dma.txt for details.
+ - dma-names: "rx" for RX channel, "tx" for TX channel.
<chip> compatible description:
- at91rm9200: legacy USART support
- at91sam9260: generic USART implementation for SAM9 SoCs
Example:
-
+- use PDC:
usart0: serial@fff8c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfff8c000 0x4000>;
@@ -25,3 +30,14 @@ Example:
atmel,use-dma-tx;
};
+- use DMA:
+ usart0: serial@f001c000 {
+ compatible = "atmel,at91sam9260-usart";
+ reg = <0xf001c000 0x100>;
+ interrupts = <12 4 5>;
+ atmel,use-dma-rx;
+ atmel,use-dma-tx;
+ dmas = <&dma0 2 0x3>,
+ <&dma0 2 0x204>;
+ dma-names = "tx", "rx";
+ };
diff --git a/Documentation/devicetree/bindings/tty/serial/efm32-uart.txt b/Documentation/devicetree/bindings/serial/efm32-uart.txt
index 8e080b893b4..8e080b893b4 100644
--- a/Documentation/devicetree/bindings/tty/serial/efm32-uart.txt
+++ b/Documentation/devicetree/bindings/serial/efm32-uart.txt
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index c58573b5b1a..35ae1fb3537 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -1,35 +1,29 @@
-* Freescale i.MX UART controller
+* Freescale i.MX Universal Asynchronous Receiver/Transmitter (UART)
Required properties:
-- compatible : should be "fsl,imx21-uart"
+- compatible : Should be "fsl,<soc>-uart"
- reg : Address and length of the register set for the device
-- interrupts : Should contain UART interrupt number
+- interrupts : Should contain uart interrupt
Optional properties:
-- fsl,uart-has-rtscts: indicate that RTS/CTS signals are used
+- fsl,uart-has-rtscts : Indicate the uart has rts and cts
+- fsl,irda-mode : Indicate the uart supports irda mode
+- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
+ is DCE mode by default.
Note: Each uart controller should have an alias correctly numbered
in "aliases" node.
Example:
-- From imx51.dtsi:
aliases {
serial0 = &uart1;
- serial1 = &uart2;
- serial2 = &uart3;
};
uart1: serial@73fbc000 {
compatible = "fsl,imx51-uart", "fsl,imx21-uart";
reg = <0x73fbc000 0x4000>;
interrupts = <31>;
- status = "disabled";
-}
-
-- From imx51-babbage.dts:
-uart1: serial@73fbc000 {
fsl,uart-has-rtscts;
- status = "okay";
+ fsl,dte-mode;
};
-
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
index 6fd1dd1638d..6fd1dd1638d 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-lpuart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-lpuart.txt
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt
index 2c00ec64628..59a40f18d55 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-mxs-auart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-mxs-auart.txt
@@ -10,6 +10,10 @@ Required properties:
Refer to dma.txt and fsl-mxs-dma.txt for details.
- dma-names: "rx" for RX channel, "tx" for TX channel.
+Optional properties:
+- fsl,uart-has-rtscts : Indicate the UART has RTS and CTS lines,
+ it also means you enable the DMA support for this UART.
+
Example:
auart0: serial@8006a000 {
compatible = "fsl,imx28-auart", "fsl,imx23-auart";
diff --git a/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt
new file mode 100644
index 00000000000..669b8140dd7
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/mrvl,pxa-ssp.txt
@@ -0,0 +1,65 @@
+Device tree bindings for Marvell PXA SSP ports
+
+Required properties:
+
+ - compatible: Must be one of
+ mrvl,pxa25x-ssp
+ mvrl,pxa25x-nssp
+ mrvl,pxa27x-ssp
+ mrvl,pxa3xx-ssp
+ mvrl,pxa168-ssp
+ mrvl,pxa910-ssp
+ mrvl,ce4100-ssp
+ mrvl,lpss-ssp
+
+ - reg: The memory base
+ - dmas: Two dma phandles, one for rx, one for tx
+ - dma-names: Must be "rx", "tx"
+
+
+Example for PXA3xx:
+
+ ssp0: ssp@41000000 {
+ compatible = "mrvl,pxa3xx-ssp";
+ reg = <0x41000000 0x40>;
+ ssp-id = <1>;
+ interrupts = <24>;
+ clock-names = "pxa27x-ssp.0";
+ dmas = <&dma 13
+ &dma 14>;
+ dma-names = "rx", "tx";
+ };
+
+ ssp1: ssp@41700000 {
+ compatible = "mrvl,pxa3xx-ssp";
+ reg = <0x41700000 0x40>;
+ ssp-id = <2>;
+ interrupts = <16>;
+ clock-names = "pxa27x-ssp.1";
+ dmas = <&dma 15
+ &dma 16>;
+ dma-names = "rx", "tx";
+ };
+
+ ssp2: ssp@41900000 {
+ compatibl3 = "mrvl,pxa3xx-ssp";
+ reg = <0x41900000 0x40>;
+ ssp-id = <3>;
+ interrupts = <0>;
+ clock-names = "pxa27x-ssp.2";
+ dmas = <&dma 66
+ &dma 67>;
+ dma-names = "rx", "tx";
+ };
+
+ ssp3: ssp@41a00000 {
+ compatible = "mrvl,pxa3xx-ssp";
+ reg = <0x41a00000 0x40>;
+ ssp-id = <4>;
+ interrupts = <13>;
+ clock-names = "pxa27x-ssp.3";
+ dmas = <&dma 2
+ &dma 3>;
+ dma-names = "rx", "tx";
+ };
+
diff --git a/Documentation/devicetree/bindings/tty/serial/nxp-lpc32xx-hsuart.txt b/Documentation/devicetree/bindings/serial/nxp-lpc32xx-hsuart.txt
index 0d439dfc1aa..0d439dfc1aa 100644
--- a/Documentation/devicetree/bindings/tty/serial/nxp-lpc32xx-hsuart.txt
+++ b/Documentation/devicetree/bindings/serial/nxp-lpc32xx-hsuart.txt
diff --git a/Documentation/devicetree/bindings/tty/serial/of-serial.txt b/Documentation/devicetree/bindings/serial/of-serial.txt
index 1928a3e83cd..1928a3e83cd 100644
--- a/Documentation/devicetree/bindings/tty/serial/of-serial.txt
+++ b/Documentation/devicetree/bindings/serial/of-serial.txt
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uart.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uart.txt
new file mode 100644
index 00000000000..ce8c9016195
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uart.txt
@@ -0,0 +1,25 @@
+* MSM Serial UART
+
+The MSM serial UART hardware is designed for low-speed use cases where a
+dma-engine isn't needed. From a software perspective it's mostly compatible
+with the MSM serial UARTDM except that it only supports reading and writing one
+character at a time.
+
+Required properties:
+- compatible: Should contain "qcom,msm-uart"
+- reg: Should contain UART register location and length.
+- interrupts: Should contain UART interrupt.
+- clocks: Should contain the core clock.
+- clock-names: Should be "core".
+
+Example:
+
+A uart device at 0xa9c00000 with interrupt 11.
+
+serial@a9c00000 {
+ compatible = "qcom,msm-uart";
+ reg = <0xa9c00000 0x1000>;
+ interrupts = <11>;
+ clocks = <&uart_cxc>;
+ clock-names = "core";
+};
diff --git a/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
new file mode 100644
index 00000000000..ffa5b784c66
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/qcom,msm-uartdm.txt
@@ -0,0 +1,53 @@
+* MSM Serial UARTDM
+
+The MSM serial UARTDM hardware is designed for high-speed use cases where the
+transmit and/or receive channels can be offloaded to a dma-engine. From a
+software perspective it's mostly compatible with the MSM serial UART except
+that it supports reading and writing multiple characters at a time.
+
+Required properties:
+- compatible: Should contain at least "qcom,msm-uartdm".
+ A more specific property should be specified as follows depending
+ on the version:
+ "qcom,msm-uartdm-v1.1"
+ "qcom,msm-uartdm-v1.2"
+ "qcom,msm-uartdm-v1.3"
+ "qcom,msm-uartdm-v1.4"
+- reg: Should contain UART register locations and lengths. The first
+ register shall specify the main control registers. An optional second
+ register location shall specify the GSBI control region.
+ "qcom,msm-uartdm-v1.3" is the only compatible value that might
+ need the GSBI control region.
+- interrupts: Should contain UART interrupt.
+- clocks: Should contain the core clock and the AHB clock.
+- clock-names: Should be "core" for the core clock and "iface" for the
+ AHB clock.
+
+Optional properties:
+- dmas: Should contain dma specifiers for transmit and receive channels
+- dma-names: Should contain "tx" for transmit and "rx" for receive channels
+
+Examples:
+
+A uartdm v1.4 device with dma capabilities.
+
+serial@f991e000 {
+ compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
+ reg = <0xf991e000 0x1000>;
+ interrupts = <0 108 0x0>;
+ clocks = <&blsp1_uart2_apps_cxc>, <&blsp1_ahb_cxc>;
+ clock-names = "core", "iface";
+ dmas = <&dma0 0>, <&dma0 1>;
+ dma-names = "tx", "rx";
+};
+
+A uartdm v1.3 device without dma capabilities and part of a GSBI complex.
+
+serial@19c40000 {
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
+ reg = <0x19c40000 0x1000>,
+ <0x19c00000 0x1000>;
+ interrupts = <0 195 0x0>;
+ clocks = <&gsbi5_uart_cxc>, <&gsbi5_ahb_cxc>;
+ clock-names = "core", "iface";
+};
diff --git a/Documentation/devicetree/bindings/serial/sirf-uart.txt b/Documentation/devicetree/bindings/serial/sirf-uart.txt
new file mode 100644
index 00000000000..a2dfc6522a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/sirf-uart.txt
@@ -0,0 +1,33 @@
+* CSR SiRFprimaII/atlasVI Universal Synchronous Asynchronous Receiver/Transmitter *
+
+Required properties:
+- compatible : Should be "sirf,prima2-uart" or "sirf, prima2-usp-uart"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain uart interrupt
+- fifosize : Should define hardware rx/tx fifo size
+- clocks : Should contain uart clock number
+
+Optional properties:
+- sirf,uart-has-rtscts: we have hardware flow controller pins in hardware
+- rts-gpios: RTS pin for USP-based UART if sirf,uart-has-rtscts is true
+- cts-gpios: CTS pin for USP-based UART if sirf,uart-has-rtscts is true
+
+Example:
+
+uart0: uart@b0050000 {
+ cell-index = <0>;
+ compatible = "sirf,prima2-uart";
+ reg = <0xb0050000 0x1000>;
+ interrupts = <17>;
+ fifosize = <128>;
+ clocks = <&clks 13>;
+};
+
+On the board-specific dts, we can put rts-gpios and cts-gpios like
+
+usp@b0090000 {
+ compatible = "sirf,prima2-usp-uart";
+ sirf,uart-has-rtscts;
+ rts-gpios = <&gpio 15 0>;
+ cts-gpios = <&gpio 46 0>;
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
index f13f1c5be91..f13f1c5be91 100644
--- a/Documentation/devicetree/bindings/tty/serial/snps-dw-apb-uart.txt
+++ b/Documentation/devicetree/bindings/serial/snps-dw-apb-uart.txt
diff --git a/Documentation/devicetree/bindings/serial/st-asc.txt b/Documentation/devicetree/bindings/serial/st-asc.txt
new file mode 100644
index 00000000000..75d877f5968
--- /dev/null
+++ b/Documentation/devicetree/bindings/serial/st-asc.txt
@@ -0,0 +1,18 @@
+*st-asc(Serial Port)
+
+Required properties:
+- compatible : Should be "st,asc".
+- reg, reg-names, interrupts, interrupt-names : Standard way to define device
+ resources with names. look in
+ Documentation/devicetree/bindings/resource-names.txt
+
+Optional properties:
+- st,hw-flow-ctrl bool flag to enable hardware flow control.
+- st,force-m1 bool flat to force asc to be in Mode-1 recommeded
+ for high bit rates (above 19.2K)
+Example:
+serial@fe440000{
+ compatible = "st,asc";
+ reg = <0xfe440000 0x2c>;
+ interrupts = <0 209 0>;
+};
diff --git a/Documentation/devicetree/bindings/tty/serial/via,vt8500-uart.txt b/Documentation/devicetree/bindings/serial/via,vt8500-uart.txt
index 5feef1ef167..5feef1ef167 100644
--- a/Documentation/devicetree/bindings/tty/serial/via,vt8500-uart.txt
+++ b/Documentation/devicetree/bindings/serial/via,vt8500-uart.txt
diff --git a/Documentation/devicetree/bindings/sound/ak4554.c b/Documentation/devicetree/bindings/sound/ak4554.c
new file mode 100644
index 00000000000..934fa02754b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ak4554.c
@@ -0,0 +1,11 @@
+AK4554 ADC/DAC
+
+Required properties:
+
+ - compatible : "asahi-kasei,ak4554"
+
+Example:
+
+ak4554-adc-dac {
+ compatible = "asahi-kasei,ak4554";
+};
diff --git a/Documentation/devicetree/bindings/sound/alc5632.txt b/Documentation/devicetree/bindings/sound/alc5632.txt
index 8608f747dcf..ffd886d110b 100644
--- a/Documentation/devicetree/bindings/sound/alc5632.txt
+++ b/Documentation/devicetree/bindings/sound/alc5632.txt
@@ -13,6 +13,25 @@ Required properties:
- #gpio-cells : Should be two. The first cell is the pin number and the
second cell is used to specify optional parameters (currently unused).
+Pins on the device (for linking into audio routes):
+
+ * SPK_OUTP
+ * SPK_OUTN
+ * HP_OUT_L
+ * HP_OUT_R
+ * AUX_OUT_P
+ * AUX_OUT_N
+ * LINE_IN_L
+ * LINE_IN_R
+ * PHONE_P
+ * PHONE_N
+ * MIC1_P
+ * MIC1_N
+ * MIC2_P
+ * MIC2_N
+ * MICBIAS1
+ * DMICDAT
+
Example:
alc5632: alc5632@1e {
diff --git a/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt b/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
new file mode 100644
index 00000000000..0720857089a
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel-sam9x5-wm8731-audio.txt
@@ -0,0 +1,35 @@
+* Atmel at91sam9x5ek wm8731 audio complex
+
+Required properties:
+ - compatible: "atmel,sam9x5-wm8731-audio"
+ - atmel,model: The user-visible name of this sound complex.
+ - atmel,ssc-controller: The phandle of the SSC controller
+ - atmel,audio-codec: The phandle of the WM8731 audio codec
+ - atmel,audio-routing: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source.
+
+Available audio endpoints for the audio-routing table:
+
+Board connectors:
+ * Headphone Jack
+ * Line In Jack
+
+wm8731 pins:
+cf Documentation/devicetree/bindings/sound/wm8731.txt
+
+Example:
+sound {
+ compatible = "atmel,sam9x5-wm8731-audio";
+
+ atmel,model = "wm8731 @ AT91SAM9X5EK";
+
+ atmel,audio-routing =
+ "Headphone Jack", "RHPOUT",
+ "Headphone Jack", "LHPOUT",
+ "LLINEIN", "Line In Jack",
+ "RLINEIN", "Line In Jack";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8731>;
+};
diff --git a/Documentation/devicetree/bindings/sound/atmel-wm8904.txt b/Documentation/devicetree/bindings/sound/atmel-wm8904.txt
new file mode 100644
index 00000000000..8bbe50c884b
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/atmel-wm8904.txt
@@ -0,0 +1,55 @@
+Atmel ASoC driver with wm8904 audio codec complex
+
+Required properties:
+ - compatible: "atmel,asoc-wm8904"
+ - atmel,model: The user-visible name of this sound complex.
+ - atmel,audio-routing: A list of the connections between audio components.
+ Each entry is a pair of strings, the first being the connection's sink,
+ the second being the connection's source. Valid names for sources and
+ sinks are the WM8904's pins, and the jacks on the board:
+
+ WM8904 pins:
+
+ * IN1L
+ * IN1R
+ * IN2L
+ * IN2R
+ * IN3L
+ * IN3R
+ * HPOUTL
+ * HPOUTR
+ * LINEOUTL
+ * LINEOUTR
+ * MICBIAS
+
+ Board connectors:
+
+ * Headphone Jack
+ * Line In Jack
+ * Mic
+
+ - atmel,ssc-controller: The phandle of the SSC controller
+ - atmel,audio-codec: The phandle of the WM8904 audio codec
+
+Optional properties:
+ - pinctrl-names, pinctrl-0: Please refer to pinctrl-bindings.txt
+
+Example:
+sound {
+ compatible = "atmel,asoc-wm8904";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_pck0_as_mck>;
+
+ atmel,model = "wm8904 @ AT91SAM9N12EK";
+
+ atmel,audio-routing =
+ "Headphone Jack", "HPOUTL",
+ "Headphone Jack", "HPOUTR",
+ "IN2L", "Line In Jack",
+ "IN2R", "Line In Jack",
+ "Mic", "MICBIAS",
+ "IN1L", "Mic";
+
+ atmel,ssc-controller = <&ssc0>;
+ atmel,audio-codec = <&wm8904>;
+};
diff --git a/Documentation/devicetree/bindings/sound/fsl,spdif.txt b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
new file mode 100644
index 00000000000..f2ae335670f
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/fsl,spdif.txt
@@ -0,0 +1,54 @@
+Freescale Sony/Philips Digital Interface Format (S/PDIF) Controller
+
+The Freescale S/PDIF audio block is a stereo transceiver that allows the
+processor to receive and transmit digital audio via an coaxial cable or
+a fibre cable.
+
+Required properties:
+
+ - compatible : Compatible list, must contain "fsl,imx35-spdif".
+
+ - reg : Offset and length of the register set for the device.
+
+ - interrupts : Contains the spdif interrupt.
+
+ - dmas : Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
+
+ - dma-names : Two dmas have to be defined, "tx" and "rx".
+
+ - clocks : Contains an entry for each entry in clock-names.
+
+ - clock-names : Includes the following entries:
+ "core" The core clock of spdif controller
+ "rxtx<0-7>" Clock source list for tx and rx clock.
+ This clock list should be identical to
+ the source list connecting to the spdif
+ clock mux in "SPDIF Transceiver Clock
+ Diagram" of SoC reference manual. It
+ can also be referred to TxClk_Source
+ bit of register SPDIF_STC.
+
+Example:
+
+spdif: spdif@02004000 {
+ compatible = "fsl,imx35-spdif";
+ reg = <0x02004000 0x4000>;
+ interrupts = <0 52 0x04>;
+ dmas = <&sdma 14 18 0>,
+ <&sdma 15 18 0>;
+ dma-names = "rx", "tx";
+
+ clocks = <&clks 197>, <&clks 3>,
+ <&clks 197>, <&clks 107>,
+ <&clks 0>, <&clks 118>,
+ <&clks 62>, <&clks 139>,
+ <&clks 0>;
+ clock-names = "core", "rxtx0",
+ "rxtx1", "rxtx2",
+ "rxtx3", "rxtx4",
+ "rxtx5", "rxtx6",
+ "rxtx7";
+
+ status = "okay";
+};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
index 5ff76c9c57d..4303b6ab620 100644
--- a/Documentation/devicetree/bindings/powerpc/fsl/ssi.txt
+++ b/Documentation/devicetree/bindings/sound/fsl,ssi.txt
@@ -43,10 +43,22 @@ Required properties:
together. This would still allow different sample sizes,
but not different sample rates.
+Required are also ac97 link bindings if ac97 is used. See
+Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary
+bindings.
+
Optional properties:
- codec-handle: Phandle to a 'codec' node that defines an audio
codec connected to this SSI. This node is typically
a child of an I2C or other control node.
+- fsl,fiq-stream-filter: Bool property. Disabled DMA and use FIQ instead to
+ filter the codec stream. This is necessary for some boards
+ where an incompatible codec is connected to this SSI, e.g.
+ on pca100 and pcm043.
+- dmas: Generic dma devicetree binding as described in
+ Documentation/devicetree/bindings/dma/dma.txt.
+- dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
+ is not defined.
Child 'codec' node required properties:
- compatible: Compatible list, contains the name of the codec
diff --git a/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt b/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
new file mode 100644
index 00000000000..7d13479f9c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/imx-audio-spdif.txt
@@ -0,0 +1,34 @@
+Freescale i.MX audio complex with S/PDIF transceiver
+
+Required properties:
+
+ - compatible : "fsl,imx-audio-spdif"
+
+ - model : The user-visible name of this sound complex
+
+ - spdif-controller : The phandle of the i.MX S/PDIF controller
+
+
+Optional properties:
+
+ - spdif-out : This is a boolean property. If present, the transmitting
+ function of S/PDIF will be enabled, indicating there's a physical
+ S/PDIF out connector/jack on the board or it's connecting to some
+ other IP block, such as an HDMI encoder/display-controller.
+
+ - spdif-in : This is a boolean property. If present, the receiving
+ function of S/PDIF will be enabled, indicating there's a physical
+ S/PDIF in connector/jack on the board.
+
+* Note: At least one of these two properties should be set in the DT binding.
+
+
+Example:
+
+sound-spdif {
+ compatible = "fsl,imx-audio-spdif";
+ model = "imx-spdif";
+ spdif-controller = <&spdif>;
+ spdif-out;
+ spdif-in;
+};
diff --git a/Documentation/devicetree/bindings/sound/imx-audmux.txt b/Documentation/devicetree/bindings/sound/imx-audmux.txt
index 215aa981721..f88a00e54c6 100644
--- a/Documentation/devicetree/bindings/sound/imx-audmux.txt
+++ b/Documentation/devicetree/bindings/sound/imx-audmux.txt
@@ -5,6 +5,15 @@ Required properties:
or "fsl,imx31-audmux" for the version firstly used on i.MX31.
- reg : Should contain AUDMUX registers location and length
+An initial configuration can be setup using child nodes.
+
+Required properties of optional child nodes:
+- fsl,audmux-port : Integer of the audmux port that is configured by this
+ child node.
+- fsl,port-config : List of configuration options for the specific port. For
+ imx31-audmux and above, it is a list of tuples <ptcr pdcr>. For
+ imx21-audmux it is a list of pcr values.
+
Example:
audmux@021d8000 {
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
new file mode 100644
index 00000000000..74c9ba6c282
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mrvl,pxa-ssp.txt
@@ -0,0 +1,28 @@
+Marvell PXA SSP CPU DAI bindings
+
+Required properties:
+
+ compatible Must be "mrvl,pxa-ssp-dai"
+ port A phandle reference to a PXA ssp upstream device
+
+Example:
+
+ /* upstream device */
+
+ ssp0: ssp@41000000 {
+ compatible = "mrvl,pxa3xx-ssp";
+ reg = <0x41000000 0x40>;
+ interrupts = <24>;
+ clock-names = "pxa27x-ssp.0";
+ dmas = <&dma 13
+ &dma 14>;
+ dma-names = "rx", "tx";
+ };
+
+ /* DAI as user */
+
+ ssp_dai0: ssp_dai@0 {
+ compatible = "mrvl,pxa-ssp-dai";
+ port = <&ssp0>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt b/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
new file mode 100644
index 00000000000..551fbb8348c
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mrvl,pxa2xx-pcm.txt
@@ -0,0 +1,15 @@
+DT bindings for ARM PXA2xx PCM platform driver
+
+This is just a dummy driver that registers the PXA ASoC platform driver.
+It does not have any resources assigned.
+
+Required properties:
+
+ - compatible 'mrvl,pxa-pcm-audio'
+
+Example:
+
+ pxa_pcm_audio: snd_soc_pxa_audio {
+ compatible = "mrvl,pxa-pcm-audio";
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/mvebu-audio.txt b/Documentation/devicetree/bindings/sound/mvebu-audio.txt
new file mode 100644
index 00000000000..7e5fd37c1b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/mvebu-audio.txt
@@ -0,0 +1,29 @@
+* mvebu (Kirkwood, Dove, Armada 370) audio controller
+
+Required properties:
+
+- compatible: "marvell,mvebu-audio"
+
+- reg: physical base address of the controller and length of memory mapped
+ region.
+
+- interrupts: list of two irq numbers.
+ The first irq is used for data flow and the second one is used for errors.
+
+- clocks: one or two phandles.
+ The first one is mandatory and defines the internal clock.
+ The second one is optional and defines an external clock.
+
+- clock-names: names associated to the clocks:
+ "internal" for the internal clock
+ "extclk" for the external clock
+
+Example:
+
+i2s1: audio-controller@b4000 {
+ compatible = "marvell,mvebu-audio";
+ reg = <0xb4000 0x2210>;
+ interrupts = <21>, <22>;
+ clocks = <&gate_clk 13>;
+ clock-names = "internal";
+};
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
index 05ffecb5710..8b8903ef080 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-alc5632.txt
@@ -11,28 +11,8 @@ Required properties:
- nvidia,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources and
- sinks are the ALC5632's pins:
-
- ALC5632 pins:
-
- * SPK_OUTP
- * SPK_OUTN
- * HP_OUT_L
- * HP_OUT_R
- * AUX_OUT_P
- * AUX_OUT_N
- * LINE_IN_L
- * LINE_IN_R
- * PHONE_P
- * PHONE_N
- * MIC1_P
- * MIC1_N
- * MIC2_P
- * MIC2_N
- * MICBIAS1
- * DMICDAT
-
- Board connectors:
+ sinks are the ALC5632's pins as documented in the binding for the device
+ and:
* Headset Stereophone
* Int Spk
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
index d130818700b..dc6224994d6 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-rt5640.txt
@@ -11,32 +11,12 @@ Required properties:
- nvidia,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources and
- sinks are the RT5640's pins, and the jacks on the board:
-
- RT5640 pins:
-
- * DMIC1
- * DMIC2
- * MICBIAS1
- * IN1P
- * IN1R
- * IN2P
- * IN2R
- * HPOL
- * HPOR
- * LOUTL
- * LOUTR
- * MONOP
- * MONON
- * SPOLP
- * SPOLN
- * SPORP
- * SPORN
-
- Board connectors:
+ sinks are the RT5640's pins (as documented in its binding), and the jacks
+ on the board:
* Headphones
* Speakers
+ * Mic Jack
- nvidia,i2s-controller : The phandle of the Tegra I2S controller that's
connected to the CODEC.
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
index d14510613a7..aab6ce0ad2f 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8753.txt
@@ -11,31 +11,8 @@ Required properties:
- nvidia,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources and
- sinks are the WM8753's pins, and the jacks on the board:
-
- WM8753 pins:
-
- * LOUT1
- * LOUT2
- * ROUT1
- * ROUT2
- * MONO1
- * MONO2
- * OUT3
- * OUT4
- * LINE1
- * LINE2
- * RXP
- * RXN
- * ACIN
- * ACOP
- * MIC1N
- * MIC1
- * MIC2N
- * MIC2
- * Mic Bias
-
- Board connectors:
+ sinks are the WM8753's pins as documented in the binding for the WM8753,
+ and the jacks on the board:
* Headphone Jack
* Mic Jack
diff --git a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
index 3bf722deb72..4b44dfb6ca0 100644
--- a/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/nvidia,tegra-audio-wm8903.txt
@@ -11,28 +11,8 @@ Required properties:
- nvidia,audio-routing : A list of the connections between audio components.
Each entry is a pair of strings, the first being the connection's sink,
the second being the connection's source. Valid names for sources and
- sinks are the WM8903's pins, and the jacks on the board:
-
- WM8903 pins:
-
- * IN1L
- * IN1R
- * IN2L
- * IN2R
- * IN3L
- * IN3R
- * DMICDAT
- * HPOUTL
- * HPOUTR
- * LINEOUTL
- * LINEOUTR
- * LOP
- * LON
- * ROP
- * RON
- * MICBIAS
-
- Board connectors:
+ sinks are the WM8903's pins (documented in the WM8903 binding document),
+ and the jacks on the board:
* Headphone Jack
* Int Spk
diff --git a/Documentation/devicetree/bindings/sound/pcm1792a.txt b/Documentation/devicetree/bindings/sound/pcm1792a.txt
new file mode 100644
index 00000000000..970ba1ed576
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/pcm1792a.txt
@@ -0,0 +1,18 @@
+Texas Instruments pcm1792a DT bindings
+
+This driver supports the SPI bus.
+
+Required properties:
+
+ - compatible: "ti,pcm1792a"
+
+For required properties on SPI, please consult
+Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Examples:
+
+ codec_spi: 1792a@0 {
+ compatible = "ti,pcm1792a";
+ spi-max-frequency = <600000>;
+ };
+
diff --git a/Documentation/devicetree/bindings/sound/rt5640.txt b/Documentation/devicetree/bindings/sound/rt5640.txt
index 005bcb24d72..068a1141b06 100644
--- a/Documentation/devicetree/bindings/sound/rt5640.txt
+++ b/Documentation/devicetree/bindings/sound/rt5640.txt
@@ -18,6 +18,26 @@ Optional properties:
- realtek,ldo1-en-gpios : The GPIO that controls the CODEC's LDO1_EN pin.
+Pins on the device (for linking into audio routes):
+
+ * DMIC1
+ * DMIC2
+ * MICBIAS1
+ * IN1P
+ * IN1R
+ * IN2P
+ * IN2R
+ * HPOL
+ * HPOR
+ * LOUTL
+ * LOUTR
+ * MONOP
+ * MONON
+ * SPOLP
+ * SPOLN
+ * SPORP
+ * SPORN
+
Example:
rt5640 {
diff --git a/Documentation/devicetree/bindings/sound/samsung-i2s.txt b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
index 025e66b85a4..7386d444ada 100644
--- a/Documentation/devicetree/bindings/sound/samsung-i2s.txt
+++ b/Documentation/devicetree/bindings/sound/samsung-i2s.txt
@@ -2,7 +2,15 @@
Required SoC Specific Properties:
-- compatible : "samsung,i2s-v5"
+- compatible : should be one of the following.
+ - samsung,s3c6410-i2s: for 8/16/24bit stereo I2S.
+ - samsung,s5pv210-i2s: for 8/16/24bit multichannel(5.1) I2S with
+ secondary fifo, s/w reset control and internal mux for root clk src.
+ - samsung,exynos5420-i2s: for 8/16/24bit multichannel(7.1) I2S with
+ secondary fifo, s/w reset control, internal mux for root clk src and
+ TDM support. TDM (Time division multiplexing) is to allow transfer of
+ multiple channel audio data on single data line.
+
- reg: physical base address of the controller and length of memory mapped
region.
- dmas: list of DMA controller phandle and DMA request line ordered pairs.
@@ -21,13 +29,6 @@ Required SoC Specific Properties:
Optional SoC Specific Properties:
-- samsung,supports-6ch: If the I2S Primary sound source has 5.1 Channel
- support, this flag is enabled.
-- samsung,supports-rstclr: This flag should be set if I2S software reset bit
- control is required. When this flag is set I2S software reset bit will be
- enabled or disabled based on need.
-- samsung,supports-secdai:If I2S block has a secondary FIFO and internal DMA,
- then this flag is enabled.
- samsung,idma-addr: Internal DMA register base address of the audio
sub system(used in secondary sound source).
- pinctrl-0: Should specify pin control groups used for this controller.
@@ -36,7 +37,7 @@ Optional SoC Specific Properties:
Example:
i2s0: i2s@03830000 {
- compatible = "samsung,i2s-v5";
+ compatible = "samsung,s5pv210-i2s";
reg = <0x03830000 0x100>;
dmas = <&pdma0 10
&pdma0 9
@@ -46,9 +47,6 @@ i2s0: i2s@03830000 {
<&clock_audss EXYNOS_I2S_BUS>,
<&clock_audss EXYNOS_SCLK_I2S>;
clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- samsung,supports-6ch;
- samsung,supports-rstclr;
- samsung,supports-secdai;
samsung,idma-addr = <0x03000000>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
diff --git a/Documentation/devicetree/bindings/sound/soc-ac97link.txt b/Documentation/devicetree/bindings/sound/soc-ac97link.txt
new file mode 100644
index 00000000000..80152a87f23
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/soc-ac97link.txt
@@ -0,0 +1,28 @@
+AC97 link bindings
+
+These bindings can be included within any other device node.
+
+Required properties:
+ - pinctrl-names: Has to contain following states to setup the correct
+ pinmuxing for the used gpios:
+ "ac97-running": AC97-link is active
+ "ac97-reset": AC97-link reset state
+ "ac97-warm-reset": AC97-link warm reset state
+ - ac97-gpios: List of gpio phandles with args in the order ac97-sync,
+ ac97-sdata, ac97-reset
+
+
+Example:
+
+ssi {
+ ...
+
+ pinctrl-names = "default", "ac97-running", "ac97-reset", "ac97-warm-reset";
+ pinctrl-0 = <&ac97link_running>;
+ pinctrl-1 = <&ac97link_running>;
+ pinctrl-2 = <&ac97link_reset>;
+ pinctrl-3 = <&ac97link_warm_reset>;
+ ac97-gpios = <&gpio3 20 0 &gpio3 22 0 &gpio3 28 0>;
+
+ ...
+};
diff --git a/Documentation/devicetree/bindings/sound/ti,pcm1681.txt b/Documentation/devicetree/bindings/sound/ti,pcm1681.txt
new file mode 100644
index 00000000000..4df17185ab8
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/ti,pcm1681.txt
@@ -0,0 +1,15 @@
+Texas Instruments PCM1681 8-channel PWM Processor
+
+Required properties:
+
+ - compatible: Should contain "ti,pcm1681".
+ - reg: The i2c address. Should contain <0x4c>.
+
+Examples:
+
+ i2c_bus {
+ pcm1681@4c {
+ compatible = "ti,pcm1681";
+ reg = <0x4c>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
index f47c3f589fd..705a6b156c6 100644
--- a/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
+++ b/Documentation/devicetree/bindings/sound/tlv320aic3x.txt
@@ -3,7 +3,14 @@ Texas Instruments - tlv320aic3x Codec module
The tlv320aic3x serial control bus communicates through I2C protocols
Required properties:
-- compatible - "string" - "ti,tlv320aic3x"
+
+- compatible - "string" - One of:
+ "ti,tlv320aic3x" - Generic TLV320AIC3x device
+ "ti,tlv320aic33" - TLV320AIC33
+ "ti,tlv320aic3007" - TLV320AIC3007
+ "ti,tlv320aic3106" - TLV320AIC3106
+
+
- reg - <int> - I2C slave address
diff --git a/Documentation/devicetree/bindings/sound/wm8731.txt b/Documentation/devicetree/bindings/sound/wm8731.txt
index 15f70048469..236690e99b8 100644
--- a/Documentation/devicetree/bindings/sound/wm8731.txt
+++ b/Documentation/devicetree/bindings/sound/wm8731.txt
@@ -16,3 +16,12 @@ codec: wm8731@1a {
compatible = "wlf,wm8731";
reg = <0x1a>;
};
+
+Available audio endpoints for an audio-routing table:
+ * LOUT: Left Channel Line Output
+ * ROUT: Right Channel Line Output
+ * LHPOUT: Left Channel Headphone Output
+ * RHPOUT: Right Channel Headphone Output
+ * LLINEIN: Left Channel Line Input
+ * RLINEIN: Right Channel Line Input
+ * MICIN: Microphone Input
diff --git a/Documentation/devicetree/bindings/sound/wm8753.txt b/Documentation/devicetree/bindings/sound/wm8753.txt
index e65277a0fb6..8eee6128210 100644
--- a/Documentation/devicetree/bindings/sound/wm8753.txt
+++ b/Documentation/devicetree/bindings/sound/wm8753.txt
@@ -10,9 +10,31 @@ Required properties:
- reg : the I2C address of the device for I2C, the chip select
number for SPI.
+Pins on the device (for linking into audio routes):
+
+ * LOUT1
+ * LOUT2
+ * ROUT1
+ * ROUT2
+ * MONO1
+ * MONO2
+ * OUT3
+ * OUT4
+ * LINE1
+ * LINE2
+ * RXP
+ * RXN
+ * ACIN
+ * ACOP
+ * MIC1N
+ * MIC1
+ * MIC2N
+ * MIC2
+ * Mic Bias
+
Example:
-codec: wm8737@1a {
+codec: wm8753@1a {
compatible = "wlf,wm8753";
reg = <0x1a>;
};
diff --git a/Documentation/devicetree/bindings/sound/wm8903.txt b/Documentation/devicetree/bindings/sound/wm8903.txt
index f102cbc4269..94ec32c194b 100644
--- a/Documentation/devicetree/bindings/sound/wm8903.txt
+++ b/Documentation/devicetree/bindings/sound/wm8903.txt
@@ -28,6 +28,25 @@ Optional properties:
performed. If any entry has the value 0xffffffff, that GPIO's
configuration will not be modified.
+Pins on the device (for linking into audio routes):
+
+ * IN1L
+ * IN1R
+ * IN2L
+ * IN2R
+ * IN3L
+ * IN3R
+ * DMICDAT
+ * HPOUTL
+ * HPOUTR
+ * LINEOUTL
+ * LINEOUTR
+ * LOP
+ * LON
+ * ROP
+ * RON
+ * MICBIAS
+
Example:
codec: wm8903@1a {
diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
index f2f3e80934d..e045e90a092 100644
--- a/Documentation/devicetree/bindings/sound/wm8994.txt
+++ b/Documentation/devicetree/bindings/sound/wm8994.txt
@@ -32,6 +32,10 @@ Optional properties:
The second cell is the flags, encoded as the trigger masks from
Documentation/devicetree/bindings/interrupts.txt
+ - clocks : A list of up to two phandle and clock specifier pairs
+ - clock-names : A list of clock names sorted in the same order as clocks.
+ Valid clock names are "MCLK1" and "MCLK2".
+
- wlf,gpio-cfg : A list of GPIO configuration register values. If absent,
no configuration of these registers is performed. If any value is
over 0xffff then the register will be left as default. If present 11
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
new file mode 100644
index 00000000000..a590ca51be7
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
@@ -0,0 +1,34 @@
+* Energy Micro EFM32 SPI
+
+Required properties:
+- #address-cells: see spi-bus.txt
+- #size-cells: see spi-bus.txt
+- compatible: should be "efm32,spi"
+- reg: Offset and length of the register set for the controller
+- interrupts: pair specifying rx and tx irq
+- clocks: phandle to the spi clock
+- cs-gpios: see spi-bus.txt
+- location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values.
+
+Example:
+
+spi1: spi@0x4000c400 { /* USART1 */
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "efm32,spi";
+ reg = <0x4000c400 0x400>;
+ interrupts = <15 16>;
+ clocks = <&cmu 20>;
+ cs-gpios = <&gpio 51 1>; // D3
+ location = <1>;
+ status = "ok";
+
+ ks8851@0 {
+ compatible = "ks8851";
+ spi-max-frequency = <6000000>;
+ reg = <0>;
+ interrupt-parent = <&boardfpga>;
+ interrupts = <4>;
+ status = "ok";
+ };
+};
diff --git a/Documentation/devicetree/bindings/spi/spi-bus.txt b/Documentation/devicetree/bindings/spi/spi-bus.txt
index 296015e3c63..800dafe5b01 100644
--- a/Documentation/devicetree/bindings/spi/spi-bus.txt
+++ b/Documentation/devicetree/bindings/spi/spi-bus.txt
@@ -55,6 +55,16 @@ contain the following properties.
chip select active high
- spi-3wire - (optional) Empty property indicating device requires
3-wire mode.
+- spi-tx-bus-width - (optional) The bus width(number of data wires) that
+ used for MOSI. Defaults to 1 if not present.
+- spi-rx-bus-width - (optional) The bus width(number of data wires) that
+ used for MISO. Defaults to 1 if not present.
+
+Some SPI controllers and devices support Dual and Quad SPI transfer mode.
+It allows data in SPI system transfered in 2 wires(DUAL) or 4 wires(QUAD).
+Now the value that spi-tx-bus-width and spi-rx-bus-width can receive is
+only 1(SINGLE), 2(DUAL) and 4(QUAD).
+Dual/Quad mode is not allowed when 3-wire mode is used.
If a gpio chipselect is used for the SPI slave the gpio number will be passed
via the cs_gpio
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
new file mode 100644
index 00000000000..a1fb3035a42
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
@@ -0,0 +1,42 @@
+ARM Freescale DSPI controller
+
+Required properties:
+- compatible : "fsl,vf610-dspi"
+- reg : Offset and length of the register set for the device
+- interrupts : Should contain SPI controller interrupt
+- clocks: from common clock binding: handle to dspi clock.
+- clock-names: from common clock binding: Shall be "dspi".
+- pinctrl-0: pin control group to be used for this controller.
+- pinctrl-names: must contain a "default" entry.
+- spi-num-chipselects : the number of the chipselect signals.
+- bus-num : the slave chip chipselect signal number.
+Example:
+
+dspi0@4002c000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "fsl,vf610-dspi";
+ reg = <0x4002c000 0x1000>;
+ interrupts = <0 67 0x04>;
+ clocks = <&clks VF610_CLK_DSPI0>;
+ clock-names = "dspi";
+ spi-num-chipselects = <5>;
+ bus-num = <0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_dspi0_1>;
+ status = "okay";
+
+ sflash: at26df081a@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "atmel,at26df081a";
+ spi-max-frequency = <16000000>;
+ spi-cpol;
+ spi-cpha;
+ reg = <0>;
+ linux,modalias = "m25p80";
+ modal = "at26df081a";
+ };
+};
+
+
diff --git a/Documentation/devicetree/bindings/spi/ti_qspi.txt b/Documentation/devicetree/bindings/spi/ti_qspi.txt
new file mode 100644
index 00000000000..1f9641ade0b
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/ti_qspi.txt
@@ -0,0 +1,22 @@
+TI QSPI controller.
+
+Required properties:
+- compatible : should be "ti,dra7xxx-qspi" or "ti,am4372-qspi".
+- reg: Should contain QSPI registers location and length.
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+- ti,hwmods: Name of the hwmod associated to the QSPI
+
+Recommended properties:
+- spi-max-frequency: Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+qspi: qspi@4b300000 {
+ compatible = "ti,dra7xxx-qspi";
+ reg = <0x4b300000 0x100>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ spi-max-frequency = <25000000>;
+ ti,hwmods = "qspi";
+};
diff --git a/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt b/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt
new file mode 100644
index 00000000000..da2d510cae4
--- /dev/null
+++ b/Documentation/devicetree/bindings/timer/moxa,moxart-timer.txt
@@ -0,0 +1,17 @@
+MOXA ART timer
+
+Required properties:
+
+- compatible : Must be "moxa,moxart-timer"
+- reg : Should contain registers location and length
+- interrupts : Should contain the timer interrupt number
+- clocks : Should contain phandle for the clock that drives the counter
+
+Example:
+
+ timer: timer@98400000 {
+ compatible = "moxa,moxart-timer";
+ reg = <0x98400000 0x42>;
+ interrupts = <19 1>;
+ clocks = <&coreclk>;
+ };
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
deleted file mode 100644
index c662eb36be2..00000000000
--- a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-* Freescale i.MX Universal Asynchronous Receiver/Transmitter (UART)
-
-Required properties:
-- compatible : Should be "fsl,<soc>-uart"
-- reg : Address and length of the register set for the device
-- interrupts : Should contain uart interrupt
-
-Optional properties:
-- fsl,uart-has-rtscts : Indicate the uart has rts and cts
-- fsl,irda-mode : Indicate the uart supports irda mode
-- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
- is DCE mode by default.
-
-Example:
-
-serial@73fbc000 {
- compatible = "fsl,imx51-uart", "fsl,imx21-uart";
- reg = <0x73fbc000 0x4000>;
- interrupts = <31>;
- fsl,uart-has-rtscts;
- fsl,dte-mode;
-};
diff --git a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt b/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
deleted file mode 100644
index aef383eb887..00000000000
--- a/Documentation/devicetree/bindings/tty/serial/msm_serial.txt
+++ /dev/null
@@ -1,27 +0,0 @@
-* Qualcomm MSM UART
-
-Required properties:
-- compatible :
- - "qcom,msm-uart", and one of "qcom,msm-hsuart" or
- "qcom,msm-lsuart".
-- reg : offset and length of the register set for the device
- for the hsuart operating in compatible mode, there should be a
- second pair describing the gsbi registers.
-- interrupts : should contain the uart interrupt.
-
-There are two different UART blocks used in MSM devices,
-"qcom,msm-hsuart" and "qcom,msm-lsuart". The msm-serial driver is
-able to handle both of these, and matches against the "qcom,msm-uart"
-as the compatibility.
-
-The registers for the "qcom,msm-hsuart" device need to specify both
-register blocks, even for the common driver.
-
-Example:
-
- uart@19c400000 {
- compatible = "qcom,msm-hsuart", "qcom,msm-uart";
- reg = <0x19c40000 0x1000>,
- <0x19c00000 0x1000>;
- interrupts = <195>;
- };
diff --git a/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt b/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
new file mode 100644
index 00000000000..c5e032c85bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/tty/serial/qca,ar9330-uart.txt
@@ -0,0 +1,34 @@
+* Qualcomm Atheros AR9330 High-Speed UART
+
+Required properties:
+
+- compatible: Must be "qca,ar9330-uart"
+
+- reg: Specifies the physical base address of the controller and
+ the length of the memory mapped region.
+
+- interrupt-parent: The phandle for the interrupt controller that
+ services interrupts for this device.
+
+- interrupts: Specifies the interrupt source of the parent interrupt
+ controller. The format of the interrupt specifier depends on the
+ parent interrupt controller.
+
+Additional requirements:
+
+ Each UART port must have an alias correctly numbered in "aliases"
+ node.
+
+Example:
+
+ aliases {
+ serial0 = &uart0;
+ };
+
+ uart0: uart@18020000 {
+ compatible = "qca,ar9330-uart";
+ reg = <0x18020000 0x14>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/am33xx-usb.txt b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
index dc9dc8c87f1..20c2ff2ba07 100644
--- a/Documentation/devicetree/bindings/usb/am33xx-usb.txt
+++ b/Documentation/devicetree/bindings/usb/am33xx-usb.txt
@@ -1,35 +1,197 @@
-AM33XX MUSB GLUE
- - compatible : Should be "ti,musb-am33xx"
- - reg : offset and length of register sets, first usbss, then for musb instances
- - interrupts : usbss, musb instance interrupts in order
- - ti,hwmods : must be "usb_otg_hs"
- - multipoint : Should be "1" indicating the musb controller supports
- multipoint. This is a MUSB configuration-specific setting.
- - num-eps : Specifies the number of endpoints. This is also a
- MUSB configuration-specific setting. Should be set to "16"
- - ram-bits : Specifies the ram address size. Should be set to "12"
- - port0-mode : Should be "3" to represent OTG. "1" signifies HOST and "2"
- represents PERIPHERAL.
- - port1-mode : Should be "1" to represent HOST. "3" signifies OTG and "2"
- represents PERIPHERAL.
- - power : Should be "250". This signifies the controller can supply up to
- 500mA when operating in host mode.
+ AM33xx MUSB
+~~~~~~~~~~~~~~~
+- compatible: ti,am33xx-usb
+- reg: offset and length of the usbss register sets
+- ti,hwmods : must be "usb_otg_hs"
+
+The glue layer contains multiple child nodes. It is required the have
+at least a control module node, USB node and a PHY node. The second USB
+node and its PHY node is optional. The DMA node is also optional.
+
+Reset module
+~~~~~~~~~~~~
+- compatible: ti,am335x-usb-ctrl-module
+- reg: offset and length of the "USB control registers" in the "Control
+ Module" block. A second offset and length for the USB wake up control
+ in the same memory block.
+- reg-names: "phy_ctrl" for the "USB control registers" and "wakeup" for
+ the USB wake up control register.
+
+USB PHY
+~~~~~~~
+compatible: ti,am335x-usb-phy
+reg: offset and length of the "USB PHY" register space
+ti,ctrl_mod: reference to the "reset module" node
+reg-names: phy
+The PHY should have a "phy" alias numbered properly in the alias
+node.
+
+USB
+~~~
+- compatible: ti,musb-am33xx
+- reg: offset and length of "USB Controller Registers", and offset and
+ length of "USB Core" register space.
+- reg-names: control for the ""USB Controller Registers" and "mc" for
+ "USB Core" register space
+- interrupts: USB interrupt number
+- interrupt-names: mc
+- dr_mode: Should be one of "host", "peripheral" or "otg".
+- mentor,multipoint: Should be "1" indicating the musb controller supports
+ multipoint. This is a MUSB configuration-specific setting.
+- mentor,num-eps: Specifies the number of endpoints. This is also a
+ MUSB configuration-specific setting. Should be set to "16"
+- mentor,ram-bits: Specifies the ram address size. Should be set to "12"
+- mentor,power: Should be "500". This signifies the controller can supply up to
+ 500mA when operating in host mode.
+- phys: reference to the USB phy
+- dmas: specifies the dma channels
+- dma-names: specifies the names of the channels. Use "rxN" for receive
+ and "txN" for transmit endpoints. N specifies the endpoint number.
+
+The controller should have an "usb" alias numbered properly in the alias
+node.
+
+DMA
+~~~
+- compatible: ti,am3359-cppi41
+- reg: offset and length of the following register spaces: USBSS, USB
+ CPPI DMA Controller, USB CPPI DMA Scheduler, USB Queue Manager
+- reg-names: glue, controller, scheduler, queuemgr
+- #dma-cells: should be set to 2. The first number represents the
+ endpoint number (0 … 14 for endpoints 1 … 15 on instance 0 and 15 … 29
+ for endpoints 1 … 15 on instance 1). The second number is 0 for RX and
+ 1 for TX transfers.
+- #dma-channels: should be set to 30 representing the 15 endpoints for
+ each USB instance.
Example:
+~~~~~~~~
+The following example contains all the nodes as used on am335x-evm:
+
+aliases {
+ usb0 = &usb0;
+ usb1 = &usb1;
+ phy0 = &usb0_phy;
+ phy1 = &usb1_phy;
+};
-usb@47400000 {
- compatible = "ti,musb-am33xx";
- reg = <0x47400000 0x1000 /* usbss */
- 0x47401000 0x800 /* musb instance 0 */
- 0x47401800 0x800>; /* musb instance 1 */
- interrupts = <17 /* usbss */
- 18 /* musb instance 0 */
- 19>; /* musb instance 1 */
- multipoint = <1>;
- num-eps = <16>;
- ram-bits = <12>;
- port0-mode = <3>;
- port1-mode = <3>;
- power = <250>;
+usb: usb@47400000 {
+ compatible = "ti,am33xx-usb";
+ reg = <0x47400000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
ti,hwmods = "usb_otg_hs";
+
+ ctrl_mod: control@44e10000 {
+ compatible = "ti,am335x-usb-ctrl-module";
+ reg = <0x44e10620 0x10
+ 0x44e10648 0x4>;
+ reg-names = "phy_ctrl", "wakeup";
+ };
+
+ usb0_phy: usb-phy@47401300 {
+ compatible = "ti,am335x-usb-phy";
+ reg = <0x47401300 0x100>;
+ reg-names = "phy";
+ ti,ctrl_mod = <&ctrl_mod>;
+ };
+
+ usb0: usb@47401000 {
+ compatible = "ti,musb-am33xx";
+ reg = <0x47401400 0x400
+ 0x47401000 0x200>;
+ reg-names = "mc", "control";
+
+ interrupts = <18>;
+ interrupt-names = "mc";
+ dr_mode = "otg"
+ mentor,multipoint = <1>;
+ mentor,num-eps = <16>;
+ mentor,ram-bits = <12>;
+ mentor,power = <500>;
+ phys = <&usb0_phy>;
+
+ dmas = <&cppi41dma 0 0 &cppi41dma 1 0
+ &cppi41dma 2 0 &cppi41dma 3 0
+ &cppi41dma 4 0 &cppi41dma 5 0
+ &cppi41dma 6 0 &cppi41dma 7 0
+ &cppi41dma 8 0 &cppi41dma 9 0
+ &cppi41dma 10 0 &cppi41dma 11 0
+ &cppi41dma 12 0 &cppi41dma 13 0
+ &cppi41dma 14 0 &cppi41dma 0 1
+ &cppi41dma 1 1 &cppi41dma 2 1
+ &cppi41dma 3 1 &cppi41dma 4 1
+ &cppi41dma 5 1 &cppi41dma 6 1
+ &cppi41dma 7 1 &cppi41dma 8 1
+ &cppi41dma 9 1 &cppi41dma 10 1
+ &cppi41dma 11 1 &cppi41dma 12 1
+ &cppi41dma 13 1 &cppi41dma 14 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
+ };
+
+ usb1_phy: usb-phy@47401b00 {
+ compatible = "ti,am335x-usb-phy";
+ reg = <0x47401b00 0x100>;
+ reg-names = "phy";
+ ti,ctrl_mod = <&ctrl_mod>;
+ };
+
+ usb1: usb@47401800 {
+ compatible = "ti,musb-am33xx";
+ reg = <0x47401c00 0x400
+ 0x47401800 0x200>;
+ reg-names = "mc", "control";
+ interrupts = <19>;
+ interrupt-names = "mc";
+ dr_mode = "host"
+ mentor,multipoint = <1>;
+ mentor,num-eps = <16>;
+ mentor,ram-bits = <12>;
+ mentor,power = <500>;
+ phys = <&usb1_phy>;
+
+ dmas = <&cppi41dma 15 0 &cppi41dma 16 0
+ &cppi41dma 17 0 &cppi41dma 18 0
+ &cppi41dma 19 0 &cppi41dma 20 0
+ &cppi41dma 21 0 &cppi41dma 22 0
+ &cppi41dma 23 0 &cppi41dma 24 0
+ &cppi41dma 25 0 &cppi41dma 26 0
+ &cppi41dma 27 0 &cppi41dma 28 0
+ &cppi41dma 29 0 &cppi41dma 15 1
+ &cppi41dma 16 1 &cppi41dma 17 1
+ &cppi41dma 18 1 &cppi41dma 19 1
+ &cppi41dma 20 1 &cppi41dma 21 1
+ &cppi41dma 22 1 &cppi41dma 23 1
+ &cppi41dma 24 1 &cppi41dma 25 1
+ &cppi41dma 26 1 &cppi41dma 27 1
+ &cppi41dma 28 1 &cppi41dma 29 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
+ };
+
+ cppi41dma: dma-controller@07402000 {
+ compatible = "ti,am3359-cppi41";
+ reg = <0x47400000 0x1000
+ 0x47402000 0x1000
+ 0x47403000 0x1000
+ 0x47404000 0x4000>;
+ reg-names = "glue", "controller", "scheduler", "queuemgr";
+ interrupts = <17>;
+ interrupt-names = "glue";
+ #dma-cells = <2>;
+ #dma-channels = <30>;
+ #dma-requests = <256>;
+ };
};
diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt
index 7a95c651ceb..e807635f9e1 100644
--- a/Documentation/devicetree/bindings/usb/dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/dwc3.txt
@@ -3,10 +3,12 @@ synopsys DWC3 CORE
DWC3- USB3 CONTROLLER
Required properties:
- - compatible: must be "synopsys,dwc3"
+ - compatible: must be "snps,dwc3"
- reg : Address and length of the register set for the device
- interrupts: Interrupts used by the dwc3 controller.
- - usb-phy : array of phandle for the PHY device
+ - usb-phy : array of phandle for the PHY device. The first element
+ in the array is expected to be a handle to the USB2/HS PHY and
+ the second element is expected to be a handle to the USB3/SS PHY
Optional properties:
- tx-fifo-resize: determines if the FIFO *has* to be reallocated.
@@ -14,7 +16,7 @@ Optional properties:
This is usually a subnode to DWC3 glue to which it is connected.
dwc3@4a030000 {
- compatible = "synopsys,dwc3";
+ compatible = "snps,dwc3";
reg = <0x4a030000 0xcfff>;
interrupts = <0 92 4>
usb-phy = <&usb2_phy>, <&usb3,phy>;
diff --git a/Documentation/devicetree/bindings/usb/generic.txt b/Documentation/devicetree/bindings/usb/generic.txt
new file mode 100644
index 00000000000..477d5bb5e51
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/generic.txt
@@ -0,0 +1,24 @@
+Generic USB Properties
+
+Optional properties:
+ - maximum-speed: tells USB controllers we want to work up to a certain
+ speed. Valid arguments are "super-speed", "high-speed",
+ "full-speed" and "low-speed". In case this isn't passed
+ via DT, USB controllers should default to their maximum
+ HW capability.
+ - dr_mode: tells Dual-Role USB controllers that we want to work on a
+ particular mode. Valid arguments are "host",
+ "peripheral" and "otg". In case this attribute isn't
+ passed via DT, USB DRD controllers should default to
+ OTG.
+
+This is an attribute to a USB controller such as:
+
+dwc3@4a030000 {
+ compatible = "synopsys,dwc3";
+ reg = <0x4a030000 0xcfff>;
+ interrupts = <0 92 4>
+ usb-phy = <&usb2_phy>, <&usb3,phy>;
+ maximum-speed = "super-speed";
+ dr_mode = "otg";
+};
diff --git a/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt b/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
index c4c9e9e664a..ba797d3e632 100644
--- a/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
+++ b/Documentation/devicetree/bindings/usb/nvidia,tegra20-usb-phy.txt
@@ -3,7 +3,7 @@ Tegra SOC USB PHY
The device node for Tegra SOC USB PHY:
Required properties :
- - compatible : Should be "nvidia,tegra20-usb-phy".
+ - compatible : Should be "nvidia,tegra<chip>-usb-phy".
- reg : Defines the following set of registers, in the order listed:
- The PHY's own register set.
Always present.
@@ -24,17 +24,26 @@ Required properties :
Required properties for phy_type == ulpi:
- nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
-Required PHY timing params for utmi phy:
+Required PHY timing params for utmi phy, for all chips:
- nvidia,hssync-start-delay : Number of 480 Mhz clock cycles to wait before
start of sync launches RxActive
- nvidia,elastic-limit : Variable FIFO Depth of elastic input store
- nvidia,idle-wait-delay : Number of 480 Mhz clock cycles of idle to wait
before declare IDLE.
- nvidia,term-range-adj : Range adjusment on terminations
- - nvidia,xcvr-setup : HS driver output control
+ - Either one of the following for HS driver output control:
+ - nvidia,xcvr-setup : integer, uses the provided value.
+ - nvidia,xcvr-setup-use-fuses : boolean, indicates that the value is read
+ from the on-chip fuses
+ If both are provided, nvidia,xcvr-setup-use-fuses takes precedence.
- nvidia,xcvr-lsfslew : LS falling slew rate control.
- nvidia,xcvr-lsrslew : LS rising slew rate control.
+Required PHY timing params for utmi phy, only on Tegra30 and above:
+ - nvidia,xcvr-hsslew : HS slew rate control.
+ - nvidia,hssquelch-level : HS squelch detector level.
+ - nvidia,hsdiscon-level : HS disconnect detector level.
+
Optional properties:
- nvidia,has-legacy-mode : boolean indicates whether this controller can
operate in legacy mode (as APX 2500 / 2600). In legacy mode some
@@ -48,5 +57,5 @@ Optional properties:
peripheral means it is device controller
otg means it can operate as either ("on the go")
-Required properties for dr_mode == otg:
+VBUS control (required for dr_mode == otg, optional for dr_mode == host):
- vbus-supply: regulator for VBUS
diff --git a/Documentation/devicetree/bindings/usb/omap-usb.txt b/Documentation/devicetree/bindings/usb/omap-usb.txt
index 57e71f6817d..9088ab09e20 100644
--- a/Documentation/devicetree/bindings/usb/omap-usb.txt
+++ b/Documentation/devicetree/bindings/usb/omap-usb.txt
@@ -53,6 +53,11 @@ OMAP DWC3 GLUE
It should be set to "1" for HW mode and "2" for SW mode.
- ranges: the child address space are mapped 1:1 onto the parent address space
+Optional Properties:
+ - extcon : phandle for the extcon device omap dwc3 uses to detect
+ connect/disconnect events.
+ - vbus-supply : phandle to the regulator device tree node if needed.
+
Sub-nodes:
The dwc3 core should be added as subnode to omap dwc3 glue.
- dwc3 :
diff --git a/Documentation/devicetree/bindings/usb/samsung-hsotg.txt b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
new file mode 100644
index 00000000000..b83d428a265
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/samsung-hsotg.txt
@@ -0,0 +1,40 @@
+Samsung High Speed USB OTG controller
+-----------------------------
+
+The Samsung HSOTG IP can be found on Samsung SoCs, from S3C6400 onwards.
+It gives functionality of OTG-compliant USB 2.0 host and device with
+support for USB 2.0 high-speed (480Mbps) and full-speed (12 Mbps)
+operation.
+
+Currently only device mode is supported.
+
+Binding details
+-----
+
+Required properties:
+- compatible: "samsung,s3c6400-hsotg" should be used for all currently
+ supported SoC,
+- interrupt-parent: phandle for the interrupt controller to which the
+ interrupt signal of the HSOTG block is routed,
+- interrupts: specifier of interrupt signal of interrupt controller,
+ according to bindings of interrupt controller,
+- clocks: contains an array of clock specifiers:
+ - first entry: OTG clock
+- clock-names: contains array of clock names:
+ - first entry: must be "otg"
+- vusb_d-supply: phandle to voltage regulator of digital section,
+- vusb_a-supply: phandle to voltage regulator of analog section.
+
+Example
+-----
+
+ hsotg@12480000 {
+ compatible = "samsung,s3c6400-hsotg";
+ reg = <0x12480000 0x20000>;
+ interrupts = <0 71 0>;
+ clocks = <&clock 305>;
+ clock-names = "otg";
+ vusb_d-supply = <&vusb_reg>;
+ vusb_a-supply = <&vusbdac_reg>;
+ };
+
diff --git a/Documentation/devicetree/bindings/usb/usb-xhci.txt b/Documentation/devicetree/bindings/usb/usb-xhci.txt
new file mode 100644
index 00000000000..5752df0e17a
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/usb-xhci.txt
@@ -0,0 +1,14 @@
+USB xHCI controllers
+
+Required properties:
+ - compatible: should be "xhci-platform".
+ - reg: should contain address and length of the standard XHCI
+ register set for the device.
+ - interrupts: one XHCI interrupt should be described here.
+
+Example:
+ usb@f0931000 {
+ compatible = "xhci-platform";
+ reg = <0xf0931000 0x8c8>;
+ interrupts = <0x0 0x4e 0x0>;
+ };
diff --git a/Documentation/devicetree/bindings/usb/usb3503.txt b/Documentation/devicetree/bindings/usb/usb3503.txt
index 8c5be48b43c..a018da4a7ad 100644
--- a/Documentation/devicetree/bindings/usb/usb3503.txt
+++ b/Documentation/devicetree/bindings/usb/usb3503.txt
@@ -1,8 +1,11 @@
SMSC USB3503 High-Speed Hub Controller
Required properties:
-- compatible: Should be "smsc,usb3503".
-- reg: Specifies the i2c slave address, it should be 0x08.
+- compatible: Should be "smsc,usb3503" or "smsc,usb3503a".
+
+Optional properties:
+- reg: Specifies the i2c slave address, it is required and should be 0x08
+ if I2C is used.
- connect-gpios: Should specify GPIO for connect.
- disabled-ports: Should specify the ports unused.
'1' or '2' or '3' are availe for this property to describe the port
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 366ce9b8724..ec4d713674f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -11,6 +11,7 @@ amcc Applied Micro Circuits Corporation (APM, formally AMCC)
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
atmel Atmel Corporation
+avago Avago Technologies
bosch Bosch Sensortec GmbH
brcm Broadcom Corporation
cavium Cavium, Inc.
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
index 3ea46058311..70c26f3a5b9 100644
--- a/Documentation/devicetree/bindings/video/simple-framebuffer.txt
+++ b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
@@ -12,6 +12,7 @@ Required properties:
- stride: The number of bytes in each line of the framebuffer.
- format: The format of the framebuffer surface. Valid values are:
- r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
+ - a8b8g8r8 (32-bit pixels, d[31:24]=a, d[23:16]=b, d[15:8]=g, d[7:0]=r).
Example:
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 1d323329298..fb57d85e731 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -240,6 +240,8 @@ MEM
IIO
devm_iio_device_alloc()
devm_iio_device_free()
+ devm_iio_trigger_alloc()
+ devm_iio_trigger_free()
IO region
devm_request_region()
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index 293855e9500..7ed0d17d672 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -26,11 +26,12 @@ journal=inum When a journal already exists, this option is ignored.
Otherwise, it specifies the number of the inode which
will represent the ext3 file system's journal file.
+journal_path=path
journal_dev=devnum When the external journal device's major/minor numbers
- have changed, this option allows the user to specify
+ have changed, these options allow the user to specify
the new journal location. The journal device is
- identified through its new major/minor numbers encoded
- in devnum.
+ identified through either its new major/minor numbers
+ encoded in devnum, or via a path to the device.
norecovery Don't load the journal on mounting. Note that this forces
noload mount of inconsistent filesystem, which can lead to
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index a92c5aa8ce2..919a3293aaa 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -144,11 +144,12 @@ journal_async_commit Commit block can be written to disk without waiting
mount the device. This will enable 'journal_checksum'
internally.
+journal_path=path
journal_dev=devnum When the external journal device's major/minor numbers
- have changed, this option allows the user to specify
+ have changed, these options allow the user to specify
the new journal location. The journal device is
- identified through its new major/minor numbers encoded
- in devnum.
+ identified through either its new major/minor numbers
+ encoded in devnum, or via a path to the device.
norecovery Don't load the journal on mounting. Note that
noload if the filesystem was not unmounted cleanly,
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index b91e2f26b67..3cd27bed634 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -18,8 +18,8 @@ according to its internal geometry or flash memory management scheme, namely FTL
F2FS and its tools support various parameters not only for configuring on-disk
layout, but also for selecting allocation and cleaning algorithms.
-The file system formatting tool, "mkfs.f2fs", is available from the following
-git tree:
+The following git tree provides the file system formatting tool (mkfs.f2fs),
+a consistency checking tool (fsck.f2fs), and a debugging tool (dump.f2fs).
>> git://git.kernel.org/pub/scm/linux/kernel/git/jaegeuk/f2fs-tools.git
For reporting bugs and sending patches, please use the following mailing list:
@@ -133,6 +133,38 @@ f2fs. Each file shows the whole f2fs information.
- current memory footprint consumed by f2fs.
================================================================================
+SYSFS ENTRIES
+================================================================================
+
+Information about mounted f2f2 file systems can be found in
+/sys/fs/f2fs. Each mounted filesystem will have a directory in
+/sys/fs/f2fs based on its device name (i.e., /sys/fs/f2fs/sda).
+The files in each per-device directory are shown in table below.
+
+Files in /sys/fs/f2fs/<devname>
+(see also Documentation/ABI/testing/sysfs-fs-f2fs)
+..............................................................................
+ File Content
+
+ gc_max_sleep_time This tuning parameter controls the maximum sleep
+ time for the garbage collection thread. Time is
+ in milliseconds.
+
+ gc_min_sleep_time This tuning parameter controls the minimum sleep
+ time for the garbage collection thread. Time is
+ in milliseconds.
+
+ gc_no_gc_sleep_time This tuning parameter controls the default sleep
+ time for the garbage collection thread. Time is
+ in milliseconds.
+
+ gc_idle This parameter controls the selection of victim
+ policy for garbage collection. Setting gc_idle = 0
+ (default) will disable this option. Setting
+ gc_idle = 1 will select the Cost Benefit approach
+ & setting gc_idle = 2 will select the greedy aproach.
+
+================================================================================
USAGE
================================================================================
@@ -149,8 +181,12 @@ USAGE
# mkfs.f2fs -l label /dev/block_device
# mount -t f2fs /dev/block_device /mnt/f2fs
-Format options
---------------
+mkfs.f2fs
+---------
+The mkfs.f2fs is for the use of formatting a partition as the f2fs filesystem,
+which builds a basic on-disk layout.
+
+The options consist of:
-l [label] : Give a volume label, up to 512 unicode name.
-a [0 or 1] : Split start location of each area for heap-based allocation.
1 is set by default, which performs this.
@@ -164,6 +200,37 @@ Format options
-t [0 or 1] : Disable discard command or not.
1 is set by default, which conducts discard.
+fsck.f2fs
+---------
+The fsck.f2fs is a tool to check the consistency of an f2fs-formatted
+partition, which examines whether the filesystem metadata and user-made data
+are cross-referenced correctly or not.
+Note that, initial version of the tool does not fix any inconsistency.
+
+The options consist of:
+ -d debug level [default:0]
+
+dump.f2fs
+---------
+The dump.f2fs shows the information of specific inode and dumps SSA and SIT to
+file. Each file is dump_ssa and dump_sit.
+
+The dump.f2fs is used to debug on-disk data structures of the f2fs filesystem.
+It shows on-disk inode information reconized by a given inode number, and is
+able to dump all the SSA and SIT entries into predefined files, ./dump_ssa and
+./dump_sit respectively.
+
+The options consist of:
+ -d debug level [default:0]
+ -i inode no (hex)
+ -s [SIT dump segno from #1~#2 (decimal), for all 0~-1]
+ -a [SSA dump segno from #1~#2 (decimal), for all 0~-1]
+
+Examples:
+# dump.f2fs -i [ino] /dev/sdx
+# dump.f2fs -s 0~-1 /dev/sdx (SIT dump)
+# dump.f2fs -a 0~-1 /dev/sdx (SSA dump)
+
================================================================================
DESIGN
================================================================================
diff --git a/Documentation/hid/uhid.txt b/Documentation/hid/uhid.txt
index 3c741214dfb..dc35a2b75ee 100644
--- a/Documentation/hid/uhid.txt
+++ b/Documentation/hid/uhid.txt
@@ -149,11 +149,13 @@ needs. Only UHID_OUTPUT and UHID_OUTPUT_EV have payloads.
is of type "struct uhid_data_req".
This may be received even though you haven't received UHID_OPEN, yet.
- UHID_OUTPUT_EV:
+ UHID_OUTPUT_EV (obsolete):
Same as UHID_OUTPUT but this contains a "struct input_event" as payload. This
is called for force-feedback, LED or similar events which are received through
an input device by the HID subsystem. You should convert this into raw reports
and send them to your device similar to events of type UHID_OUTPUT.
+ This is no longer sent by newer kernels. Instead, HID core converts it into a
+ raw output report and sends it via UHID_OUTPUT.
UHID_FEATURE:
This event is sent if the kernel driver wants to perform a feature request as
diff --git a/Documentation/hwmon/ads1015 b/Documentation/hwmon/ads1015
index f6fe9c20373..063b80d857b 100644
--- a/Documentation/hwmon/ads1015
+++ b/Documentation/hwmon/ads1015
@@ -6,6 +6,10 @@ Supported chips:
Prefix: 'ads1015'
Datasheet: Publicly available at the Texas Instruments website :
http://focus.ti.com/lit/ds/symlink/ads1015.pdf
+ * Texas Instruments ADS1115
+ Prefix: 'ads1115'
+ Datasheet: Publicly available at the Texas Instruments website :
+ http://focus.ti.com/lit/ds/symlink/ads1115.pdf
Authors:
Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
@@ -13,9 +17,9 @@ Authors:
Description
-----------
-This driver implements support for the Texas Instruments ADS1015.
+This driver implements support for the Texas Instruments ADS1015/ADS1115.
-This device is a 12-bit A-D converter with 4 inputs.
+This device is a 12/16-bit A-D converter with 4 inputs.
The inputs can be used single ended or in certain differential combinations.
diff --git a/Documentation/hwmon/htu21 b/Documentation/hwmon/htu21
new file mode 100644
index 00000000000..f39a215fb6a
--- /dev/null
+++ b/Documentation/hwmon/htu21
@@ -0,0 +1,46 @@
+Kernel driver htu21
+===================
+
+Supported chips:
+ * Measurement Specialties HTU21D
+ Prefix: 'htu21'
+ Addresses scanned: none
+ Datasheet: Publicly available at the Measurement Specialties website
+ http://www.meas-spec.com/downloads/HTU21D.pdf
+
+
+Author:
+ William Markezana <william.markezana@meas-spec.com>
+
+Description
+-----------
+
+The HTU21D is a humidity and temperature sensor in a DFN package of
+only 3 x 3 mm footprint and 0.9 mm height.
+
+The devices communicate with the I2C protocol. All sensors are set to the
+same I2C address 0x40, so an entry with I2C_BOARD_INFO("htu21", 0x40) can
+be used in the board setup code.
+
+This driver does not auto-detect devices. You will have to instantiate the
+devices explicitly. Please see Documentation/i2c/instantiating-devices
+for details.
+
+sysfs-Interface
+---------------
+
+temp1_input - temperature input
+humidity1_input - humidity input
+
+Notes
+-----
+
+The driver uses the default resolution settings of 12 bit for humidity and 14
+bit for temperature, which results in typical measurement times of 11 ms for
+humidity and 44 ms for temperature. To keep self heating below 0.1 degree
+Celsius, the device should not be active for more than 10% of the time. For
+this reason, the driver performs no more than two measurements per second and
+reports cached information if polled more frequently.
+
+Different resolutions, the on-chip heater, using the CRC checksum and reading
+the serial number are not supported yet.
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 90956b61802..4dfdc8f8363 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -12,6 +12,7 @@ Supported chips:
* AMD Family 12h processors: "Llano" (E2/A4/A6/A8-Series)
* AMD Family 14h processors: "Brazos" (C/E/G/Z-Series)
* AMD Family 15h processors: "Bulldozer" (FX-Series), "Trinity"
+* AMD Family 16h processors: "Kabini"
Prefix: 'k10temp'
Addresses scanned: PCI space
diff --git a/Documentation/i2c/busses/i2c-piix4 b/Documentation/i2c/busses/i2c-piix4
index a370b2047cf..c097e0f020f 100644
--- a/Documentation/i2c/busses/i2c-piix4
+++ b/Documentation/i2c/busses/i2c-piix4
@@ -73,9 +73,10 @@ this driver on those mainboards.
The ServerWorks Southbridges, the Intel 440MX, and the Victory66 are
identical to the PIIX4 in I2C/SMBus support.
-The AMD SB700 and SP5100 chipsets implement two PIIX4-compatible SMBus
-controllers. If your BIOS initializes the secondary controller, it will
-be detected by this driver as an "Auxiliary SMBus Host Controller".
+The AMD SB700, SB800, SP5100 and Hudson-2 chipsets implement two
+PIIX4-compatible SMBus controllers. If your BIOS initializes the
+secondary controller, it will be detected by this driver as
+an "Auxiliary SMBus Host Controller".
If you own Force CPCI735 motherboard or other OSB4 based systems you may need
to change the SMBus Interrupt Select register so the SMBus controller uses
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index 22182660dda..c70e7a7638d 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -19,7 +19,7 @@ i2c_board_info which is registered by calling i2c_register_board_info().
Example (from omap2 h4):
-static struct i2c_board_info __initdata h4_i2c_board_info[] = {
+static struct i2c_board_info h4_i2c_board_info[] __initdata = {
{
I2C_BOARD_INFO("isp1301_omap", 0x2d),
.irq = OMAP_GPIO_IRQ(125),
diff --git a/Documentation/input/gamepad.txt b/Documentation/input/gamepad.txt
new file mode 100644
index 00000000000..8002c894c6b
--- /dev/null
+++ b/Documentation/input/gamepad.txt
@@ -0,0 +1,156 @@
+ Linux Gamepad API
+----------------------------------------------------------------------------
+
+1. Intro
+~~~~~~~~
+Linux provides many different input drivers for gamepad hardware. To avoid
+having user-space deal with different button-mappings for each gamepad, this
+document defines how gamepads are supposed to report their data.
+
+2. Geometry
+~~~~~~~~~~~
+As "gamepad" we define devices which roughly look like this:
+
+ ____________________________ __
+ / [__ZL__] [__ZR__] \ |
+ / [__ TL __] [__ TR __] \ | Front Triggers
+ __/________________________________\__ __|
+ / _ \ |
+ / /\ __ (N) \ |
+ / || __ |MO| __ _ _ \ | Main Pad
+ | <===DP===> |SE| |ST| (W) -|- (E) | |
+ \ || ___ ___ _ / |
+ /\ \/ / \ / \ (S) /\ __|
+ / \________ | LS | ____ | RS | ________/ \ |
+ | / \ \___/ / \ \___/ / \ | | Control Sticks
+ | / \_____/ \_____/ \ | __|
+ | / \ |
+ \_____/ \_____/
+
+ |________|______| |______|___________|
+ D-Pad Left Right Action Pad
+ Stick Stick
+
+ |_____________|
+ Menu Pad
+
+Most gamepads have the following features:
+ - Action-Pad
+ 4 buttons in diamonds-shape (on the right side). The buttons are
+ differently labeled on most devices so we define them as NORTH,
+ SOUTH, WEST and EAST.
+ - D-Pad (Direction-pad)
+ 4 buttons (on the left side) that point up, down, left and right.
+ - Menu-Pad
+ Different constellations, but most-times 2 buttons: SELECT - START
+ Furthermore, many gamepads have a fancy branded button that is used as
+ special system-button. It often looks different to the other buttons and
+ is used to pop up system-menus or system-settings.
+ - Analog-Sticks
+ Analog-sticks provide freely moveable sticks to control directions. Not
+ all devices have both or any, but they are present at most times.
+ Analog-sticks may also provide a digital button if you press them.
+ - Triggers
+ Triggers are located on the upper-side of the pad in vertical direction.
+ Not all devices provide them, but the upper buttons are normally named
+ Left- and Right-Triggers, the lower buttons Z-Left and Z-Right.
+ - Rumble
+ Many devices provide force-feedback features. But are mostly just
+ simple rumble motors.
+
+3. Detection
+~~~~~~~~~~~~
+All gamepads that follow the protocol described here map BTN_GAMEPAD. This is
+an alias for BTN_SOUTH/BTN_A. It can be used to identify a gamepad as such.
+However, not all gamepads provide all features, so you need to test for all
+features that you need, first. How each feature is mapped is described below.
+
+Legacy drivers often don't comply to these rules. As we cannot change them
+for backwards-compatibility reasons, you need to provide fixup mappings in
+user-space yourself. Some of them might also provide module-options that
+change the mappings so you can adivce users to set these.
+
+All new gamepads are supposed to comply with this mapping. Please report any
+bugs, if they don't.
+
+There are a lot of less-featured/less-powerful devices out there, which re-use
+the buttons from this protocol. However, they try to do this in a compatible
+fashion. For example, the "Nintendo Wii Nunchuk" provides two trigger buttons
+and one analog stick. It reports them as if it were a gamepad with only one
+analog stick and two trigger buttons on the right side.
+But that means, that if you only support "real" gamepads, you must test
+devices for _all_ reported events that you need. Otherwise, you will also get
+devices that report a small subset of the events.
+
+No other devices, that do not look/feel like a gamepad, shall report these
+events.
+
+4. Events
+~~~~~~~~~
+Gamepads report the following events:
+
+Action-Pad:
+ Every gamepad device has at least 2 action buttons. This means, that every
+ device reports BTN_SOUTH (which BTN_GAMEPAD is an alias for). Regardless
+ of the labels on the buttons, the codes are sent according to the
+ physical position of the buttons.
+ Please note that 2- and 3-button pads are fairly rare and old. You might
+ want to filter gamepads that do not report all four.
+ 2-Button Pad:
+ If only 2 action-buttons are present, they are reported as BTN_SOUTH and
+ BTN_EAST. For vertical layouts, the upper button is BTN_EAST. For
+ horizontal layouts, the button more on the right is BTN_EAST.
+ 3-Button Pad:
+ If only 3 action-buttons are present, they are reported as (from left
+ to right): BTN_WEST, BTN_SOUTH, BTN_EAST
+ If the buttons are aligned perfectly vertically, they are reported as
+ (from top down): BTN_WEST, BTN_SOUTH, BTN_EAST
+ 4-Button Pad:
+ If all 4 action-buttons are present, they can be aligned in two
+ different formations. If diamond-shaped, they are reported as BTN_NORTH,
+ BTN_WEST, BTN_SOUTH, BTN_EAST according to their physical location.
+ If rectangular-shaped, the upper-left button is BTN_NORTH, lower-left
+ is BTN_WEST, lower-right is BTN_SOUTH and upper-right is BTN_EAST.
+
+D-Pad:
+ Every gamepad provides a D-Pad with four directions: Up, Down, Left, Right
+ Some of these are available as digital buttons, some as analog buttons. Some
+ may even report both. The kernel does not convert between these so
+ applications should support both and choose what is more appropriate if
+ both are reported.
+ Digital buttons are reported as:
+ BTN_DPAD_*
+ Analog buttons are reported as:
+ ABS_HAT0X and ABS_HAT0Y
+
+Analog-Sticks:
+ The left analog-stick is reported as ABS_X, ABS_Y. The right analog stick is
+ reported as ABS_RX, ABS_RY. Zero, one or two sticks may be present.
+ If analog-sticks provide digital buttons, they are mapped accordingly as
+ BTN_THUMBL (first/left) and BTN_THUMBR (second/right).
+
+Triggers:
+ Trigger buttons can be available as digital or analog buttons or both. User-
+ space must correctly deal with any situation and choose the most appropriate
+ mode.
+ Upper trigger buttons are reported as BTN_TR or ABS_HAT1X (right) and BTN_TL
+ or ABS_HAT1Y (left). Lower trigger buttons are reported as BTN_TR2 or
+ ABS_HAT2X (right/ZR) and BTN_TL2 or ABS_HAT2Y (left/ZL).
+ If only one trigger-button combination is present (upper+lower), they are
+ reported as "right" triggers (BTN_TR/ABS_HAT1X).
+
+Menu-Pad:
+ Menu buttons are always digital and are mapped according to their location
+ instead of their labels. That is:
+ 1-button Pad: Mapped as BTN_START
+ 2-button Pad: Left button mapped as BTN_SELECT, right button mapped as
+ BTN_START
+ Many pads also have a third button which is branded or has a special symbol
+ and meaning. Such buttons are mapped as BTN_MODE. Examples are the Nintendo
+ "HOME" button, the XBox "X"-button or Sony "P" button.
+
+Rumble:
+ Rumble is adverticed as FF_RUMBLE.
+
+----------------------------------------------------------------------------
+ Written 2013 by David Herrmann <dh.herrmann@gmail.com>
diff --git a/Documentation/ja_JP/HOWTO b/Documentation/ja_JP/HOWTO
index 050d37fe6d4..8148a47fc70 100644
--- a/Documentation/ja_JP/HOWTO
+++ b/Documentation/ja_JP/HOWTO
@@ -11,14 +11,14 @@ for non English (read: Japanese) speakers and is not intended as a
fork. So if you have any comments or updates for this file, please try
to update the original English file first.
-Last Updated: 2011/03/31
+Last Updated: 2013/07/19
==================================
ã“ã‚Œã¯ã€
-linux-2.6.38/Documentation/HOWTO
+linux-3.10/Documentation/HOWTO
ã®å’Œè¨³ã§ã™ã€‚
-翻訳団体: JF プロジェクト < http://www.linux.or.jp/JF/ >
-翻訳日: 2011/3/28
+翻訳団体: JF プロジェクト < http://linuxjf.sourceforge.jp/ >
+翻訳日: 2013/7/19
翻訳者: Tsugikazu Shibata <tshibata at ab dot jp dot nec dot com>
校正者: æ¾å€‰ã•ã‚“ <nbh--mats at nifty dot com>
å°æž— é›…å…¸ã•ã‚“ (Masanori Kobayasi) <zap03216 at nifty dot ne dot jp>
@@ -245,7 +245,7 @@ Linux カーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ„リーã®ä¸­ã«å«ã¾ã‚Œã‚‹ã€ãã‚Œã„ã«ã—ã€ä¿
自己å‚照方å¼ã§ã€ç´¢å¼•ãŒã¤ã„㟠web å½¢å¼ã§ã€ã‚½ãƒ¼ã‚¹ã‚³ãƒ¼ãƒ‰ã‚’å‚ç…§ã™ã‚‹ã“ã¨ãŒ
ã§ãã¾ã™ã€‚ã“ã®æœ€æ–°ã®ç´ æ™´ã—ã„カーãƒãƒ«ã‚³ãƒ¼ãƒ‰ã®ãƒªãƒã‚¸ãƒˆãƒªã¯ä»¥ä¸‹ã§è¦‹ã¤ã‹ã‚Š
ã¾ã™-
- http://sosdg.org/~qiyong/lxr/
+ http://lxr.linux.no/+trees
開発プロセス
-----------------------
@@ -253,24 +253,24 @@ Linux カーãƒãƒ«ã‚½ãƒ¼ã‚¹ãƒ„リーã®ä¸­ã«å«ã¾ã‚Œã‚‹ã€ãã‚Œã„ã«ã—ã€ä¿
Linux カーãƒãƒ«ã®é–‹ç™ºãƒ—ロセスã¯ç¾åœ¨å¹¾ã¤ã‹ã®ç•°ãªã‚‹ãƒ¡ã‚¤ãƒ³ã‚«ãƒ¼ãƒãƒ«ã€Œãƒ–ラン
ãƒã€ã¨å¤šæ•°ã®ã‚µãƒ–システム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ–ランãƒã‹ã‚‰æ§‹æˆã•ã‚Œã¾ã™ã€‚
ã“れらã®ãƒ–ランãƒã¨ã¯-
- - メイン㮠2.6.x カーãƒãƒ«ãƒ„リー
- - 2.6.x.y -stable カーãƒãƒ«ãƒ„リー
- - 2.6.x -git カーãƒãƒ«ãƒ‘ッãƒ
+ - メイン㮠3.x カーãƒãƒ«ãƒ„リー
+ - 3.x.y -stable カーãƒãƒ«ãƒ„リー
+ - 3.x -git カーãƒãƒ«ãƒ‘ッãƒ
- サブシステム毎ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã¨ãƒ‘ッãƒ
- - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー
+ - çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 3.x -next カーãƒãƒ«ãƒ„リー
-2.6.x カーãƒãƒ«ãƒ„リー
+3.x カーãƒãƒ«ãƒ„リー
-----------------
-2.6.x カーãƒãƒ«ã¯ Linus Torvalds ã«ã‚ˆã£ã¦ãƒ¡ãƒ³ãƒ†ãƒŠãƒ³ã‚¹ã•ã‚Œã€kernel.org
-ã® pub/linux/kernel/v2.6/ ディレクトリã«å­˜åœ¨ã—ã¾ã™ã€‚ã“ã®é–‹ç™ºãƒ—ロセスã¯
+3.x カーãƒãƒ«ã¯ Linus Torvalds ã«ã‚ˆã£ã¦ãƒ¡ãƒ³ãƒ†ãƒŠãƒ³ã‚¹ã•ã‚Œã€kernel.org
+ã® pub/linux/kernel/v3.x/ ディレクトリã«å­˜åœ¨ã—ã¾ã™ã€‚ã“ã®é–‹ç™ºãƒ—ロセスã¯
以下ã®ã¨ãŠã‚Š-
- æ–°ã—ã„カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•ã‚ŒãŸç›´å¾Œã«ã€2週間ã®ç‰¹åˆ¥æœŸé–“ãŒè¨­ã‘られã€
ã“ã®æœŸé–“中ã«ã€ãƒ¡ãƒ³ãƒ†ãƒŠé”㯠Linus ã«å¤§ããªå·®åˆ†ã‚’é€ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚
ã“ã®ã‚ˆã†ãªå·®åˆ†ã¯é€šå¸¸ -next カーãƒãƒ«ã«æ•°é€±é–“å«ã¾ã‚Œã¦ããŸãƒ‘ッãƒã§ã™ã€‚
大ããªå¤‰æ›´ã¯ git(カーãƒãƒ«ã®ã‚½ãƒ¼ã‚¹ç®¡ç†ãƒ„ールã€è©³ç´°ã¯
- http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„ã‚„ã‚Šæ–¹ã§ã™ãŒã€ãƒ‘ッ
+ http://git-scm.com/ å‚ç…§) を使ã£ã¦é€ã‚‹ã®ãŒå¥½ã¾ã—ã„ã‚„ã‚Šæ–¹ã§ã™ãŒã€ãƒ‘ッ
ãƒãƒ•ã‚¡ã‚¤ãƒ«ã®å½¢å¼ã®ã¾ã¾é€ã‚‹ã®ã§ã‚‚å分ã§ã™ã€‚
- 2週間後ã€-rc1 カーãƒãƒ«ãŒãƒªãƒªãƒ¼ã‚¹ã•ã‚Œã€ã“ã®å¾Œã«ã¯ã‚«ãƒ¼ãƒãƒ«å…¨ä½“ã®å®‰å®š
@@ -302,20 +302,20 @@ Andrew Morton ㌠Linux-kernel メーリングリストã«ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã
実ã«èªè­˜ã•ã‚ŒãŸãƒã‚°ã®çŠ¶æ³ã«ã‚ˆã‚Šãƒªãƒªãƒ¼ã‚¹ã•ã‚Œã‚‹ã®ã§ã‚ã‚Šã€å‰ã‚‚ã£ã¦æ±ºã‚ら
ã‚ŒãŸè¨ˆç”»ã«ã‚ˆã£ã¦ãƒªãƒªãƒ¼ã‚¹ã•ã‚Œã‚‹ã‚‚ã®ã§ã¯ãªã„ã‹ã‚‰ã§ã™ã€‚ã€
-2.6.x.y -stable カーãƒãƒ«ãƒ„リー
+3.x.y -stable カーãƒãƒ«ãƒ„リー
---------------------------
-ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ãŒ4ã¤ã®æ•°å­—ã«åˆ†ã‹ã‚Œã¦ã„るカーãƒãƒ«ã¯ -stable カーãƒãƒ«ã§ã™ã€‚
-ã“ã‚Œã«ã¯ã€2.6.x カーãƒãƒ«ã§è¦‹ã¤ã‹ã£ãŸã‚»ã‚­ãƒ¥ãƒªãƒ†ã‚£å•é¡Œã‚„é‡å¤§ãªå¾Œæˆ»ã‚Šã«å¯¾
+ãƒãƒ¼ã‚¸ãƒ§ãƒ³ç•ªå·ãŒ3ã¤ã®æ•°å­—ã«åˆ†ã‹ã‚Œã¦ã„るカーãƒãƒ«ã¯ -stable カーãƒãƒ«ã§ã™ã€‚
+ã“ã‚Œã«ã¯ã€3.x カーãƒãƒ«ã§è¦‹ã¤ã‹ã£ãŸã‚»ã‚­ãƒ¥ãƒªãƒ†ã‚£å•é¡Œã‚„é‡å¤§ãªå¾Œæˆ»ã‚Šã«å¯¾
ã™ã‚‹æ¯”較的å°ã•ã„é‡è¦ãªä¿®æ­£ãŒå«ã¾ã‚Œã¾ã™ã€‚
ã“ã‚Œã¯ã€é–‹ç™º/実験的ãƒãƒ¼ã‚¸ãƒ§ãƒ³ã®ãƒ†ã‚¹ãƒˆã«å”力ã™ã‚‹ã“ã¨ã«èˆˆå‘³ãŒç„¡ãã€
最新ã®å®‰å®šã—ãŸã‚«ãƒ¼ãƒãƒ«ã‚’使ã„ãŸã„ユーザã«æŽ¨å¥¨ã™ã‚‹ãƒ–ランãƒã§ã™ã€‚
-ã‚‚ã—ã€2.6.x.y カーãƒãƒ«ãŒå­˜åœ¨ã—ãªã„å ´åˆã«ã¯ã€ç•ªå·ãŒä¸€ç•ªå¤§ãã„ 2.6.x ãŒ
+ã‚‚ã—ã€3.x.y カーãƒãƒ«ãŒå­˜åœ¨ã—ãªã„å ´åˆã«ã¯ã€ç•ªå·ãŒä¸€ç•ªå¤§ãã„ 3.x ãŒ
最新ã®å®‰å®šç‰ˆã‚«ãƒ¼ãƒãƒ«ã§ã™ã€‚
-2.6.x.y 㯠"stable" ãƒãƒ¼ãƒ  <stable@kernel.org> ã§ãƒ¡ãƒ³ãƒ†ã•ã‚Œã¦ãŠã‚Šã€å¿…
+3.x.y 㯠"stable" ãƒãƒ¼ãƒ  <stable@kernel.org> ã§ãƒ¡ãƒ³ãƒ†ã•ã‚Œã¦ãŠã‚Šã€å¿…
è¦ã«å¿œã˜ã¦ãƒªãƒªãƒ¼ã‚¹ã•ã‚Œã¾ã™ã€‚通常ã®ãƒªãƒªãƒ¼ã‚¹æœŸé–“㯠2週間毎ã§ã™ãŒã€å·®ã—è¿«ã£
ãŸå•é¡ŒãŒãªã‘ã‚Œã°ã‚‚ã†å°‘ã—é•·ããªã‚‹ã“ã¨ã‚‚ã‚ã‚Šã¾ã™ã€‚セキュリティ関連ã®å•é¡Œ
ã®å ´åˆã¯ã“ã‚Œã«å¯¾ã—ã¦ã ã„ãŸã„ã®å ´åˆã€ã™ãã«ãƒªãƒªãƒ¼ã‚¹ãŒã•ã‚Œã¾ã™ã€‚
@@ -324,7 +324,7 @@ Andrew Morton ㌠Linux-kernel メーリングリストã«ã‚«ãƒ¼ãƒãƒ«ãƒªãƒªãƒ¼ã
イルã«ã¯ã©ã®ã‚ˆã†ãªç¨®é¡žã®å¤‰æ›´ãŒ -stable ツリーã«å—ã‘入れå¯èƒ½ã‹ã€ã¾ãŸãƒª
リースプロセスãŒã©ã†å‹•ãã‹ãŒè¨˜è¿°ã•ã‚Œã¦ã„ã¾ã™ã€‚
-2.6.x -git パッãƒ
+3.x -git パッãƒ
------------------
git リãƒã‚¸ãƒˆãƒªã§ç®¡ç†ã•ã‚Œã¦ã„ã‚‹Linus ã®ã‚«ãƒ¼ãƒãƒ«ãƒ„リーã®æ¯Žæ—¥ã®ã‚¹ãƒŠãƒƒãƒ—
@@ -358,14 +358,14 @@ quilt シリーズã¨ã—ã¦å…¬é–‹ã•ã‚Œã¦ã„るパッãƒã‚­ãƒ¥ãƒ¼ã‚‚使ã‚ã‚Œã
ã‚’ã¤ã‘ã‚‹ã“ã¨ãŒã§ãã¾ã™ã€‚大部分ã®ã“れら㮠patchwork ã®ã‚µã‚¤ãƒˆã¯
http://patchwork.kernel.org/ ã§ãƒªã‚¹ãƒˆã•ã‚Œã¦ã„ã¾ã™ã€‚
-çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 2.6.x -next カーãƒãƒ«ãƒ„リー
+çµ±åˆãƒ†ã‚¹ãƒˆã®ãŸã‚ã® 3.x -next カーãƒãƒ«ãƒ„リー
---------------------------------------------
-サブシステムツリーã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 2.6.x ツリーã«ãƒžãƒ¼ã‚¸ã•ã‚Œ
+サブシステムツリーã®æ›´æ–°å†…容ãŒãƒ¡ã‚¤ãƒ³ãƒ©ã‚¤ãƒ³ã® 3.x ツリーã«ãƒžãƒ¼ã‚¸ã•ã‚Œ
ã‚‹å‰ã«ã€ãれらã¯çµ±åˆãƒ†ã‚¹ãƒˆã•ã‚Œã‚‹å¿…è¦ãŒã‚ã‚Šã¾ã™ã€‚ã“ã®ç›®çš„ã®ãŸã‚ã€å®Ÿè³ªçš„
ã«å…¨ã‚µãƒ–システムツリーã‹ã‚‰ã»ã¼æ¯Žæ—¥ãƒ—ルã•ã‚Œã¦ã§ãる特別ãªãƒ†ã‚¹ãƒˆç”¨ã®ãƒª
ãƒã‚¸ãƒˆãƒªãŒå­˜åœ¨ã—ã¾ã™-
- http://git.kernel.org/?p=linux/kernel/git/sfr/linux-next.git
+ http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
http://linux.f-seidel.de/linux-next/pmwiki/
ã“ã®ã‚„ã‚Šæ–¹ã«ã‚ˆã£ã¦ã€-next カーãƒãƒ«ã¯æ¬¡ã®ãƒžãƒ¼ã‚¸æ©Ÿä¼šã§ã©ã‚“ãªã‚‚ã®ãŒãƒ¡ã‚¤ãƒ³
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 15356aca938..479eeaf4402 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -235,10 +235,61 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Format: To spoof as Windows 98: ="Microsoft Windows"
acpi_osi= [HW,ACPI] Modify list of supported OS interface strings
- acpi_osi="string1" # add string1 -- only one string
- acpi_osi="!string2" # remove built-in string2
+ acpi_osi="string1" # add string1
+ acpi_osi="!string2" # remove string2
+ acpi_osi=!* # remove all strings
+ acpi_osi=! # disable all built-in OS vendor
+ strings
acpi_osi= # disable all strings
+ 'acpi_osi=!' can be used in combination with single or
+ multiple 'acpi_osi="string1"' to support specific OS
+ vendor string(s). Note that such command can only
+ affect the default state of the OS vendor strings, thus
+ it cannot affect the default state of the feature group
+ strings and the current state of the OS vendor strings,
+ specifying it multiple times through kernel command line
+ is meaningless. This command is useful when one do not
+ care about the state of the feature group strings which
+ should be controlled by the OSPM.
+ Examples:
+ 1. 'acpi_osi=! acpi_osi="Windows 2000"' is equivalent
+ to 'acpi_osi="Windows 2000" acpi_osi=!', they all
+ can make '_OSI("Windows 2000")' TRUE.
+
+ 'acpi_osi=' cannot be used in combination with other
+ 'acpi_osi=' command lines, the _OSI method will not
+ exist in the ACPI namespace. NOTE that such command can
+ only affect the _OSI support state, thus specifying it
+ multiple times through kernel command line is also
+ meaningless.
+ Examples:
+ 1. 'acpi_osi=' can make 'CondRefOf(_OSI, Local1)'
+ FALSE.
+
+ 'acpi_osi=!*' can be used in combination with single or
+ multiple 'acpi_osi="string1"' to support specific
+ string(s). Note that such command can affect the
+ current state of both the OS vendor strings and the
+ feature group strings, thus specifying it multiple times
+ through kernel command line is meaningful. But it may
+ still not able to affect the final state of a string if
+ there are quirks related to this string. This command
+ is useful when one want to control the state of the
+ feature group strings to debug BIOS issues related to
+ the OSPM features.
+ Examples:
+ 1. 'acpi_osi="Module Device" acpi_osi=!*' can make
+ '_OSI("Module Device")' FALSE.
+ 2. 'acpi_osi=!* acpi_osi="Module Device"' can make
+ '_OSI("Module Device")' TRUE.
+ 3. 'acpi_osi=! acpi_osi=!* acpi_osi="Windows 2000"' is
+ equivalent to
+ 'acpi_osi=!* acpi_osi=! acpi_osi="Windows 2000"'
+ and
+ 'acpi_osi=!* acpi_osi="Windows 2000" acpi_osi=!',
+ they all will make '_OSI("Windows 2000")' TRUE.
+
acpi_pm_good [X86]
Override the pmtimer bug detection: force the kernel
to assume that this machine's pmtimer latches its value
@@ -2953,7 +3004,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
improve throughput, but will also increase the
amount of memory reserved for use by the client.
- swapaccount[=0|1]
+ swapaccount=[0|1]
[KNL] Enable accounting of swap in memory resource
controller if no parameter or 1 is given or disable
it if 0 is given (See Documentation/cgroups/memory.txt)
@@ -3322,6 +3373,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
them quite hard to use for exploits but
might break your system.
+ vt.color= [VT] Default text color.
+ Format: 0xYX, X = foreground, Y = background.
+ Default: 0x07 = light gray on black.
+
vt.cur_default= [VT] Default cursor shape.
Format: 0xCCBBAA, where AA, BB, and CC are the same as
the parameters of the <Esc>[?A;B;Cc escape sequence;
@@ -3361,6 +3416,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
overridden by individual drivers. 0 will hide
cursors, 1 will display them.
+ vt.italic= [VT] Default color for italic text; 0-15.
+ Default: 2 = green.
+
+ vt.underline= [VT] Default color for underlined text; 0-15.
+ Default: 3 = cyan.
+
watchdog timers [HW,WDT] For information on watchdog timers,
see Documentation/watchdog/watchdog-parameters.txt
or other driver-specific files in the
diff --git a/Documentation/ko_KR/HOWTO b/Documentation/ko_KR/HOWTO
index 2f48f205fed..680e6463595 100644
--- a/Documentation/ko_KR/HOWTO
+++ b/Documentation/ko_KR/HOWTO
@@ -182,8 +182,8 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
프로ì íŠ¸ë¥¼ ë´ì•¼ 한다.
http://kernelnewbies.org
ê·¸ê³³ì€ ê±°ì˜ ëª¨ë“  ì¢…ë¥˜ì˜ ê¸°ë³¸ì ì¸ ì»¤ë„ ê°œë°œ 질문들(질문하기 ì „ì— ë¨¼ì €
-ì•„ì¹´ì´ë¸Œë¥¼ 찾아ë´ë¼. ê³¼ê±°ì— ì´ë¯¸ 답변ë˜ì—ˆì„ ìˆ˜ë„ ìžˆë‹¤)ì„ í• ìˆ˜ìžˆëŠ” ë„움ì´
-ë ë§Œí•œ ë©”ì¼ë§ 리스트가 있다. ë˜í•œ 실시간으로 질문 할수 있는 IRC 채ë„ë„
+ì•„ì¹´ì´ë¸Œë¥¼ 찾아ë´ë¼. ê³¼ê±°ì— ì´ë¯¸ 답변ë˜ì—ˆì„ ìˆ˜ë„ ìžˆë‹¤)ì„ í•  수 있는 ë„움ì´
+ë ë§Œí•œ ë©”ì¼ë§ 리스트가 있다. ë˜í•œ 실시간으로 질문 í•  수 있는 IRC 채ë„ë„
가지고 있으며 리눅스 ì»¤ë„ ê°œë°œì„ ë°°ìš°ëŠ” ë° ìœ ìš©í•œ ë¬¸ì„œë“¤ì„ ë³´ìœ í•˜ê³  있다.
웹사ì´íŠ¸ëŠ” 코드구성, 서브시스템들, 그리고 현재 프로ì íŠ¸ë“¤
@@ -245,7 +245,7 @@ Documentation/DocBook/ 디렉토리 ë‚´ì—ì„œ 만들어지며 PDF, Postscript, H
ê²ƒì„ ê¸°ì–µí•´ë¼. 왜ëƒí•˜ë©´ ë³€ê²½ì´ ìžì²´ë‚´ì—서만 ë°œìƒí•˜ê³  ì¶”ê°€ëœ ì½”ë“œê°€
ë“œë¼ì´ë²„ ì™¸ë¶€ì˜ ë‹¤ë¥¸ 부분ì—는 ì˜í–¥ì„ 주지 않으므로 그런 변경ì€
회귀(ì—­ìžì£¼: ì´ì „ì—는 존재하지 않았지만 새로운 기능추가나 변경으로 ì¸í•´
- ìƒê²¨ë‚œ 버그)를 ì¼ìœ¼í‚¬ 만한 ìœ„í—˜ì„ ê°€ì§€ê³  있지 않기 때문ì´ë‹¤. -rc1ì´
+ ìƒê²¨ë‚œ 버그)를 ì¼ìœ¼í‚¬ 만한 ìœ„í—˜ì„ ê°€ì§€ê³  있지 않기 때문ì´ë‹¤. -rc1ì´
ë°°í¬ëœ ì´í›„ì— git를 사용하여 íŒ¨ì¹˜ë“¤ì„ Linusì—게 보낼수 있지만 패치들ì€
ê³µì‹ì ì¸ ë©”ì¼ë§ 리스트로 ë³´ë‚´ì„œ 검토를 ë°›ì„ í•„ìš”ê°€ 있다.
- 새로운 -rc는 Linusê°€ 현재 git treeê°€ 테스트 í•˜ê¸°ì— ì¶©ë¶„ížˆ ì•ˆì •ëœ ìƒíƒœì—
@@ -455,7 +455,7 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
- ì˜ê²¬
- ë³€ê²½ì„ ìœ„í•œ 요구
- ë‹¹ìœ„ì„±ì„ ìœ„í•œ 요구
- - ê³ ìš”
+ - 침묵
기억하ë¼. ì´ê²ƒë“¤ì€ ì—¬ëŸ¬ë¶„ì˜ íŒ¨ì¹˜ê°€ 커ë„ë¡œ 들어가기 위한 과정ì´ë‹¤. 여러분ì˜
íŒ¨ì¹˜ë“¤ì€ ë¹„íŒê³¼ 다른 ì˜ê²¬ì„ ë°›ì„ ìˆ˜ 있고 ê·¸ê²ƒë“¤ì„ ê¸°ìˆ ì ì¸ 레벨로 í‰ê°€í•˜ê³ 
@@ -472,7 +472,7 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
가능한한 가장 ì¢‹ì€ ê¸°ìˆ ì ì¸ í•´ë‹µì„ ì°¾ê³  있는 커뮤니티ì—서는 í•­ìƒ
ì–´ë–¤ 패치가 얼마나 좋ì€ì§€ì— 관하여 다른 ì˜ê²¬ë“¤ì´ ìžˆì„ ìˆ˜ 있다. 여러분ì€
협조ì ì´ì–´ì•¼ 하고 ê¸°êº¼ì´ ì—¬ëŸ¬ë¶„ì˜ ìƒê°ì„ ì»¤ë„ ë‚´ì— ë§žì¶”ì–´ì•¼ 한다. 아니면
-ì ì–´ë„ ì—¬ëŸ¬ë¶„ì˜ ê²ƒì´ ê°€ì¹˜ìžˆë‹¤ëŠ” ê²ƒì„ ì¤‘ëª…í•˜ì—¬ì•¼ 한다. ìž˜ëª»ëœ ê²ƒë„ ì—¬ëŸ¬ë¶„ì´
+ì ì–´ë„ ì—¬ëŸ¬ë¶„ì˜ ê²ƒì´ ê°€ì¹˜ìžˆë‹¤ëŠ” ê²ƒì„ ì¦ëª…하여야 한다. ìž˜ëª»ëœ ê²ƒë„ ì—¬ëŸ¬ë¶„ì´
올바른 ë°©í–¥ì˜ í•´ê²°ì±…ìœ¼ë¡œ ì´ëŒì–´ê°ˆ ì˜ì§€ê°€ 있다면 받아들여질 것ì´ë¼ëŠ” ì ì„
기억하ë¼.
@@ -488,21 +488,21 @@ bugme-janitor ë©”ì¼ë§ 리스트(bugzillaì— ëª¨ë“  ë³€í™”ë“¤ì´ ì—¬ê¸°ì„œ ë©”ì
ì»¤ë„ ì»¤ë®¤ë‹ˆí‹°ëŠ” 가장 전통ì ì¸ íšŒì‚¬ì˜ ê°œë°œ 환경과는 다르다. ì—¬ê¸°ì— ì—¬ëŸ¬ë¶„ë“¤ì˜
문제를 피하기 위한 목ë¡ì´ 있다.
ì—¬ëŸ¬ë¶„ë“¤ì´ ì œì•ˆí•œ ë³€ê²½ë“¤ì— ê´€í•˜ì—¬ ë§í•  ë•Œ ì¢‹ì€ ê²ƒë“¤ :
- - "ì´ê²ƒì€ 여러 ë¬¸ì œë“¤ì„ í•´ê²¹í•©ë‹ˆë‹¤."
- - "ì´ê²ƒì€ 2000 ë¼ì¸ì˜ 코드를 제거합니다."
+ - "ì´ê²ƒì€ 여러 ë¬¸ì œë“¤ì„ í•´ê²°í•©ë‹ˆë‹¤."
+ - "ì´ê²ƒì€ 2000 ë¼ì¸ì˜ 코드를 줄입니다."
- "ì´ê²ƒì€ ë‚´ê°€ ë§í•˜ë ¤ëŠ” ê²ƒì— ê´€í•´ 설명하는 패치입니다."
- - "나는 5ê°œì˜ ë‹¤ë¥¸ 아키í…ì³ì—ì„œ ê·¸ê²ƒì„ í…ŒìŠ¤íŠ¸í–ˆìŠ´ìœ¼ë¡œ..."
- - "ì—¬ê¸°ì— ì¼ë ¨ì˜ ìž‘ì€ íŒ¨ì¹˜ë“¤ì´ ìžˆìŠ´ìŒë¡œ..."
- - "ì´ê²ƒì€ ì¼ë°˜ì ì¸ 머신ì—ì„œ ì„±ëŠ¥ì„ í–¥ìƒì‹œí‚´ìœ¼ë¡œ..."
+ - "나는 5ê°œì˜ ë‹¤ë¥¸ 아키í…ì³ì—ì„œ ê·¸ê²ƒì„ í…ŒìŠ¤íŠ¸ 했으므로..."
+ - "ì—¬ê¸°ì— ì¼ë ¨ì˜ ìž‘ì€ íŒ¨ì¹˜ë“¤ì´ ìžˆìœ¼ë¯€ë¡œ..."
+ - "ì´ê²ƒì€ ì¼ë°˜ì ì¸ 머신ì—ì„œ ì„±ëŠ¥ì„ í–¥ìƒí•¨ìœ¼ë¡œ..."
ì—¬ëŸ¬ë¶„ë“¤ì´ ë§í•  ë•Œ 피해야 í•  좋지 ì•Šì€ ê²ƒë“¤ :
- - "우리를 ê·¸ê²ƒì„ AIT/ptx/Solarisì—ì„œ ì´ëŸ¬í•œ 방법으로 했다. 그러므로 ê·¸ê²ƒì€ ì¢‹ì€ ê²ƒìž„ì— í‹€ë¦½ì—†ë‹¤..."
+ - "우리는 ê·¸ê²ƒì„ AIX/ptx/Solarisì—ì„œ ì´ëŸ¬í•œ 방법으로 했다. 그러므로 ê·¸ê²ƒì€ ì¢‹ì€ ê²ƒìž„ì— í‹€ë¦¼ì—†ë‹¤..."
- "나는 20ë…„ë™ì•ˆ ì´ê²ƒì„ 해왔다. 그러므로..."
- "ì´ê²ƒì€ ëˆì„ 벌기위해 ë‚˜ì˜ íšŒì‚¬ê°€ 필요로 하는 것ì´ë‹¤."
- "ì´ê²ƒì€ ìš°ë¦¬ì˜ ì—”í„°í”„ë¼ì´ì¦ˆ ìƒí’ˆ ë¼ì¸ì„ 위한 것ì´ë‹¤."
- "ì—¬ê¸°ì— ë‚˜ì˜ ìƒê°ì„ ë§í•˜ê³  있는 1000 페ì´ì§€ 설계 문서가 있다."
- "나는 6달ë™ì•ˆ ì´ê²ƒì„ 했으니..."
- - "ì—¬ê¸°ì— 5000ë¼ì¸ 짜리 패치가 있으니..."
+ - "ì—¬ê¸°ì— 5000 ë¼ì¸ 짜리 패치가 있으니..."
- "나는 현재 ë’¤ì£½ë°•ì£½ì¸ ê²ƒì„ ìž¬ìž‘ì„±í–ˆë‹¤. 그리고 여기ì—..."
- "나는 마ê°ì‹œí•œì„ 가지고 있으므로 ì´ íŒ¨ì¹˜ëŠ” 지금 ì ìš©ë  필요가 있다."
@@ -574,6 +574,7 @@ Patì´ë¼ëŠ” ì´ë¦„ì„ ê°€ì§„ ì—¬ìžê°€ ìžˆì„ ìˆ˜ë„ ìžˆëŠ” 것ì´ë‹¤. 리눅ìŠ
ë˜í•œ 완성ë˜ì§€ 않았고 "ë‚˜ì¤‘ì— ìˆ˜ì •ë  ê²ƒì´ë‹¤." 와 ê°™ì€ ê²ƒë“¤ì„ í¬í•¨í•˜ëŠ”
íŒ¨ì¹˜ë“¤ì€ ë°›ì•„ë“¤ì—¬ì§€ì§€ ì•Šì„ ê²ƒì´ë¼ëŠ” ì ì„ 유ë…하ë¼.
+
ë³€ê²½ì„ ì •ë‹¹í™”í•´ë¼
-----------------
diff --git a/Documentation/ko_KR/stable_api_nonsense.txt b/Documentation/ko_KR/stable_api_nonsense.txt
index 8f2b0e1d98c..51f85ade419 100644
--- a/Documentation/ko_KR/stable_api_nonsense.txt
+++ b/Documentation/ko_KR/stable_api_nonsense.txt
@@ -106,12 +106,12 @@ Greg Kroah-Hartman <greg@kroah.com>
---------------------------------
리눅스 ì»¤ë„ ë“œë¼ì´ë²„를 계ì†í•´ì„œ ë©”ì¸ ì»¤ë„ íŠ¸ë¦¬ì— ë°˜ì˜í•˜ì§€ ì•Šê³ 
-유지보수하려고 하는 사름들과 ì´ ë¬¸ì œë¥¼ ë…¼ì˜í•˜ê²Œ ë˜ë©´ 훨씬 ë”
+유지보수하려고 하는 사람들과 ì´ ë¬¸ì œë¥¼ ë…¼ì˜í•˜ê²Œ ë˜ë©´ 훨씬 ë”
"ë…¼ëž€ì˜ ì—¬ì§€ê°€ 많ì€" 주제가 ë  ê²ƒì´ë‹¤.
리눅스 ì»¤ë„ ê°œë°œì€ ëŠìž„ì—†ì´ ë¹ ë¥¸ ì†ë„ë¡œ ì´ë£¨ì–´ì§€ê³  있으며 ê²°ì½”
ëŠìŠ¨í•´ì§„ ì ì´ 없다. ì»¤ë„ ê°œë°œìžë“¤ì´ 현재 ì¸í„°íŽ˜ì´ìŠ¤ë“¤ì—ì„œ 버그를
-발견하거나 무엇ì¸ê°€ 할수 있는 ë” ì¢‹ì€ ë°©ë²•ì„ ì°¾ê²Œ ë˜ì—ˆë‹¤ê³  하ìž.
+발견하거나 무엇ì¸ê°€ í•  수 있는 ë” ì¢‹ì€ ë°©ë²•ì„ ì°¾ê²Œ ë˜ì—ˆë‹¤ê³  하ìž.
ê·¸ë“¤ì´ ë°œê²¬í•œ ê²ƒì„ ì‹¤í–‰í•œë‹¤ë©´ ì•„ë§ˆë„ ë” ìž˜ ë™ìž‘하ë„ë¡ í˜„ìž¬ ì¸í„°íŽ˜ì´ìŠ¤ë“¤ì„
수정하게 ë  ê²ƒì´ë‹¤. ê·¸ë“¤ì´ ê·¸ëŸ° ì¼ì„ 하게ë˜ë©´ 함수 ì´ë¦„ë“¤ì€ ë³€í•˜ê²Œ ë˜ê³ ,
êµ¬ì¡°ì²´ë“¤ì€ ëŠ˜ì–´ë‚˜ê±°ë‚˜ 줄어들게 ë˜ê³ , 함수 파ë¼ë¯¸í„°ë“¤ì€ ìž¬ìž‘ì—…ë  ê²ƒì´ë‹¤.
@@ -174,7 +174,7 @@ GPLì„ ë”°ë¥´ëŠ” ë°°í¬ ë“œë¼ì´ë²„ì— ê´€í•´ 얘기하고 있다는 ê²ƒì„ ìƒ
ë™ìž‘하는 ê²ƒì„ ë³´ìž¥í•œë‹¤.
ë©”ì¸ ì»¤ë„ íŠ¸ë¦¬ì— ì—¬ëŸ¬ë¶„ì˜ ë“œë¼ì´ë²„를 ë°˜ì˜í•˜ë©´ 얻게 ë˜ëŠ” 장ì ë“¤ì€ 다ìŒê³¼ 같다.
- - ê´€ë¦¬ì˜ ë“œëŠ” 비용(ì›ëž˜ 개발ìžì˜)ì€ ì¤„ì–´ì¤„ë©´ì„œ ë“œë¼ì´ë²„ì˜ ì§ˆì€ í–¥ìƒë  것ì´ë‹¤.
+ - ê´€ë¦¬ì— ë“œëŠ” 비용(ì›ëž˜ 개발ìžì˜)ì€ ì¤„ì–´ì¤„ë©´ì„œ ë“œë¼ì´ë²„ì˜ ì§ˆì€ í–¥ìƒë  것ì´ë‹¤.
- 다른 개발ìžë“¤ì´ ì—¬ëŸ¬ë¶„ì˜ ë“œë¼ì´ë²„ì— ê¸°ëŠ¥ë“¤ì„ ì¶”ê°€ í•  것ì´ë‹¤.
- 다른 ì‚¬ëžŒë“¤ì€ ì—¬ëŸ¬ë¶„ì˜ ë“œë¼ì´ë²„ì— ë²„ê·¸ë¥¼ 발견하고 수정할 것ì´ë‹¤.
- 다른 ì‚¬ëžŒë“¤ì€ ì—¬ëŸ¬ë¶„ì˜ ë“œë¼ì´ë²„ì˜ ê°œì„ ì ì„ ì°¾ì„ ì¤„ 것ì´ë‹¤.
diff --git a/Documentation/laptops/asus-laptop.txt b/Documentation/laptops/asus-laptop.txt
index 69f9fb3701e..79a1bc675a8 100644
--- a/Documentation/laptops/asus-laptop.txt
+++ b/Documentation/laptops/asus-laptop.txt
@@ -8,8 +8,8 @@ http://acpi4asus.sf.net/
This driver provides support for extra features of ACPI-compatible ASUS laptops.
It may also support some MEDION, JVC or VICTOR laptops (such as MEDION 9675 or
- VICTOR XP7210 for example). It makes all the extra buttons generate standard
- ACPI events that go through /proc/acpi/events and input events (like keyboards).
+ VICTOR XP7210 for example). It makes all the extra buttons generate input
+ events (like keyboards).
On some models adds support for changing the display brightness and output,
switching the LCD backlight on and off, and most importantly, allows you to
blink those fancy LEDs intended for reporting mail and wireless status.
@@ -55,8 +55,8 @@ Usage
DSDT) to me.
That's all, now, all the events generated by the hotkeys of your laptop
- should be reported in your /proc/acpi/event entry. You can check with
- "acpi_listen".
+ should be reported via netlink events. You can check with
+ "acpi_genl monitor" (part of the acpica project).
Hotkeys are also reported as input keys (like keyboards) you can check
which key are supported using "xev" under X11.
diff --git a/Documentation/laptops/sony-laptop.txt b/Documentation/laptops/sony-laptop.txt
index 0d5ac7f5287..978b1e61515 100644
--- a/Documentation/laptops/sony-laptop.txt
+++ b/Documentation/laptops/sony-laptop.txt
@@ -12,10 +12,10 @@ Fn keys (hotkeys):
------------------
Some models report hotkeys through the SNC or SPIC devices, such events are
reported both through the ACPI subsystem as acpi events and through the INPUT
-subsystem. See the logs of acpid or /proc/acpi/event and
-/proc/bus/input/devices to find out what those events are and which input
-devices are created by the driver. Additionally, loading the driver with the
-debug option will report all events in the kernel log.
+subsystem. See the logs of /proc/bus/input/devices to find out what those
+events are and which input devices are created by the driver.
+Additionally, loading the driver with the debug option will report all events
+in the kernel log.
The "scancodes" passed to the input system (that can be remapped with udev)
are indexes to the table "sony_laptop_input_keycode_map" in the sony-laptop.c
diff --git a/Documentation/laptops/thinkpad-acpi.txt b/Documentation/laptops/thinkpad-acpi.txt
index cf7bc6cb971..86c52360ffe 100644
--- a/Documentation/laptops/thinkpad-acpi.txt
+++ b/Documentation/laptops/thinkpad-acpi.txt
@@ -329,20 +329,6 @@ sysfs notes:
This attribute has poll()/select() support.
- hotkey_report_mode:
- Returns the state of the procfs ACPI event report mode
- filter for hot keys. If it is set to 1 (the default),
- all hot key presses are reported both through the input
- layer and also as ACPI events through procfs (but not
- through netlink). If it is set to 2, hot key presses
- are reported only through the input layer.
-
- This attribute is read-only in kernels 2.6.23 or later,
- and read-write on earlier kernels.
-
- May return -EPERM (write access locked out by module
- parameter) or -EACCES (read-only).
-
wakeup_reason:
Set to 1 if the system is waking up because the user
requested a bay ejection. Set to 2 if the system is
@@ -518,24 +504,21 @@ SW_TABLET_MODE Tablet ThinkPads HKEY events 0x5009 and 0x500A
Non hotkey ACPI HKEY event map:
-------------------------------
-Events that are not propagated by the driver, except for legacy
-compatibility purposes when hotkey_report_mode is set to 1:
-
-0x5001 Lid closed
-0x5002 Lid opened
-0x5009 Tablet swivel: switched to tablet mode
-0x500A Tablet swivel: switched to normal mode
-0x7000 Radio Switch may have changed state
-
Events that are never propagated by the driver:
0x2304 System is waking up from suspend to undock
0x2305 System is waking up from suspend to eject bay
0x2404 System is waking up from hibernation to undock
0x2405 System is waking up from hibernation to eject bay
+0x5001 Lid closed
+0x5002 Lid opened
+0x5009 Tablet swivel: switched to tablet mode
+0x500A Tablet swivel: switched to normal mode
0x5010 Brightness level changed/control event
0x6000 KEYBOARD: Numlock key pressed
0x6005 KEYBOARD: Fn key pressed (TO BE VERIFIED)
+0x7000 Radio Switch may have changed state
+
Events that are propagated by the driver to userspace:
@@ -574,50 +557,6 @@ operating system is to force either an immediate suspend or hibernate
cycle, or a system shutdown. Obviously, something is very wrong if this
happens.
-Compatibility notes:
-
-ibm-acpi and thinkpad-acpi 0.15 (mainline kernels before 2.6.23) never
-supported the input layer, and sent events over the procfs ACPI event
-interface.
-
-To avoid sending duplicate events over the input layer and the ACPI
-event interface, thinkpad-acpi 0.16 implements a module parameter
-(hotkey_report_mode), and also a sysfs device attribute with the same
-name.
-
-Make no mistake here: userspace is expected to switch to using the input
-layer interface of thinkpad-acpi, together with the ACPI netlink event
-interface in kernels 2.6.23 and later, or with the ACPI procfs event
-interface in kernels 2.6.22 and earlier.
-
-If no hotkey_report_mode module parameter is specified (or it is set to
-zero), the driver defaults to mode 1 (see below), and on kernels 2.6.22
-and earlier, also allows one to change the hotkey_report_mode through
-sysfs. In kernels 2.6.23 and later, where the netlink ACPI event
-interface is available, hotkey_report_mode cannot be changed through
-sysfs (it is read-only).
-
-If the hotkey_report_mode module parameter is set to 1 or 2, it cannot
-be changed later through sysfs (any writes will return -EPERM to signal
-that hotkey_report_mode was locked. On 2.6.23 and later, where
-hotkey_report_mode cannot be changed at all, writes will return -EACCES).
-
-hotkey_report_mode set to 1 makes the driver export through the procfs
-ACPI event interface all hot key presses (which are *also* sent to the
-input layer). This is a legacy compatibility behaviour, and it is also
-the default mode of operation for the driver.
-
-hotkey_report_mode set to 2 makes the driver filter out the hot key
-presses from the procfs ACPI event interface, so these events will only
-be sent through the input layer. Userspace that has been updated to use
-the thinkpad-acpi input layer interface should set hotkey_report_mode to
-2.
-
-Hot key press events are never sent to the ACPI netlink event interface.
-Really up-to-date userspace under kernel 2.6.23 and later is to use the
-netlink interface and the input layer interface, and don't bother at all
-with hotkey_report_mode.
-
Brightness hotkey notes:
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index fa5d8a9ae20..c8c42e64e95 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -531,9 +531,10 @@ dependency barrier to make it work correctly. Consider the following bit of
code:
q = &a;
- if (p)
+ if (p) {
+ <data dependency barrier>
q = &b;
- <data dependency barrier>
+ }
x = *q;
This will not have the desired effect because there is no actual data
@@ -542,9 +543,10 @@ attempting to predict the outcome in advance. In such a case what's actually
required is:
q = &a;
- if (p)
+ if (p) {
+ <read barrier>
q = &b;
- <read barrier>
+ }
x = *q;
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index d7a9b0a90d4..58340d50f8a 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -210,13 +210,15 @@ If memory device is found, memory hotplug code will be called.
4.2 Notify memory hot-add event by hand
------------
-In some environments, especially virtualized environment, firmware will not
-notify memory hotplug event to the kernel. For such environment, "probe"
-interface is supported. This interface depends on CONFIG_ARCH_MEMORY_PROBE.
-
-Now, CONFIG_ARCH_MEMORY_PROBE is supported only by powerpc but it does not
-contain highly architecture codes. Please add config if you need "probe"
-interface.
+On powerpc, the firmware does not notify a memory hotplug event to the kernel.
+Therefore, "probe" interface is supported to notify the event to the kernel.
+This interface depends on CONFIG_ARCH_MEMORY_PROBE.
+
+CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config
+option is disabled by default since ACPI notifies a memory hotplug event to
+the kernel, which performs its hotplug operation as the result. Please
+enable this option if you need the "probe" interface for testing purposes
+on x86.
Probe interface is located at
/sys/devices/system/memory/probe
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index 32dfbd92412..18b64b2b8a6 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -124,6 +124,8 @@ multiqueue.txt
- HOWTO for multiqueue network device support.
netconsole.txt
- The network console module netconsole.ko: configuration and notes.
+netdev-FAQ.txt
+ - FAQ describing how to submit net changes to netdev mailing list.
netdev-features.txt
- Network interface features API description.
netdevices.txt
diff --git a/Documentation/networking/e100.txt b/Documentation/networking/e100.txt
index fcb6c71cdb6..13a32124bca 100644
--- a/Documentation/networking/e100.txt
+++ b/Documentation/networking/e100.txt
@@ -1,7 +1,7 @@
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
-November 15, 2005
+March 15, 2011
Contents
========
@@ -122,7 +122,7 @@ Additional Configurations
NOTE: This setting is not saved across reboots.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index 71ca9585567..437b2099cce 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for the Intel(R) PRO/1000 Family of Adapters
-===============================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -420,15 +420,15 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 16110. This value coincides
with the maximum Jumbo Frames size of 16128.
- - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
- loss of link.
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
support Jumbo Frames. These correspond to the following product names:
Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
index 97b5ba942eb..ad2d9f38ce1 100644
--- a/Documentation/networking/e1000e.txt
+++ b/Documentation/networking/e1000e.txt
@@ -1,8 +1,8 @@
-Linux* Driver for Intel(R) Network Connection
-=============================================
+Linux* Driver for Intel(R) Ethernet Network Connection
+======================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -259,13 +259,16 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes.
- - Using Jumbo Frames at 10 or 100 Mbps is not supported and may result in
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
poor performance or loss of link.
- Some adapters limit Jumbo Frames sized packets to a maximum of
4096 bytes and some adapters do not support Jumbo Frames.
- Ethtool
+ - Jumbo Frames cannot be configured on an 82579-based Network device, if
+ MACSec is enabled on the system.
+
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. We
@@ -273,6 +276,9 @@ Additional Configurations
http://ftp.kernel.org/pub/software/network/ethtool/
+ NOTE: When validating enable/disable tests on some parts (82578, for example)
+ you need to add a few seconds between tests when working with ethtool.
+
Speed and Duplex
----------------
Speed and Duplex are configured through the ethtool* utility. For
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
index 9a2a037194a..4ebbd659256 100644
--- a/Documentation/networking/igb.txt
+++ b/Documentation/networking/igb.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -36,6 +36,53 @@ Default Value: 0
This parameter adds support for SR-IOV. It causes the driver to spawn up to
max_vfs worth of virtual function.
+QueuePairs
+----------
+Valid Range: 0-1
+Default Value: 1 (TX and RX will be paired onto one interrupt vector)
+
+If set to 0, when MSI-X is enabled, the TX and RX will attempt to occupy
+separate vectors.
+
+This option can be overridden to 1 if there are not sufficient interrupts
+available. This can occur if any combination of RSS, VMDQ, and max_vfs
+results in more than 4 queues being used.
+
+Node
+----
+Valid Range: 0-n
+Default Value: -1 (off)
+
+ 0 - n: where n is the number of the NUMA node that should be used to
+ allocate memory for this adapter port.
+ -1: uses the driver default of allocating memory on whichever processor is
+ running insmod/modprobe.
+
+ The Node parameter will allow you to pick which NUMA node you want to have
+ the adapter allocate memory from. All driver structures, in-memory queues,
+ and receive buffers will be allocated on the node specified. This parameter
+ is only useful when interrupt affinity is specified, otherwise some portion
+ of the time the interrupt could run on a different core than the memory is
+ allocated on, causing slower memory access and impacting throughput, CPU, or
+ both.
+
+EEE
+---
+Valid Range: 0-1
+Default Value: 1 (enabled)
+
+ A link between two EEE-compliant devices will result in periodic bursts of
+ data followed by long periods where in the link is in an idle state. This Low
+ Power Idle (LPI) state is supported in both 1Gbps and 100Mbps link speeds.
+ NOTE: EEE support requires autonegotiation.
+
+DMAC
+----
+Valid Range: 0-1
+Default Value: 1 (enabled)
+ Enables or disables DMA Coalescing feature.
+
+
Additional Configurations
=========================
@@ -55,10 +102,10 @@ Additional Configurations
- The maximum MTU setting for Jumbo Frames is 9216. This value coincides
with the maximum Jumbo Frames size of 9234 bytes.
- - Using Jumbo Frames at 10 or 100 Mbps may result in poor performance or
- loss of link.
+ - Using Jumbo frames at 10 or 100 Mbps is not supported and may result in
+ poor performance or loss of link.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The latest
@@ -106,6 +153,14 @@ Additional Configurations
Where n=the VF that attempted to do the spoofing.
+ Setting MAC Address, VLAN and Rate Limit Using IProute2 Tool
+ ------------------------------------------------------------
+ You can set a MAC address of a Virtual Function (VF), a default VLAN and the
+ rate limit using the IProute2 tool. Download the latest version of the
+ iproute2 tool from Sourceforge if your version does not have all the
+ features you require.
+
+
Support
=======
diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt
index cbfe4ee6553..40db17a6665 100644
--- a/Documentation/networking/igbvf.txt
+++ b/Documentation/networking/igbvf.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -55,7 +55,7 @@ networking link on the left to search for your adapter:
Additional Configurations
=========================
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 10742902146..a46d78583ae 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -440,6 +440,10 @@ tcp_syncookies - BOOLEAN
SYN flood warnings in logs not being really flooded, your server
is seriously misconfigured.
+ If you want to test which effects syncookies have to your
+ network connections you can set this knob to 2 to enable
+ unconditionally generation of syncookies.
+
tcp_fastopen - INTEGER
Enable TCP Fast Open feature (draft-ietf-tcpm-fastopen) to send data
in the opening SYN packet. To use this feature, the client application
@@ -478,6 +482,15 @@ tcp_syn_retries - INTEGER
tcp_timestamps - BOOLEAN
Enable timestamps as defined in RFC1323.
+tcp_min_tso_segs - INTEGER
+ Minimal number of segments per TSO frame.
+ Since linux-3.12, TCP does an automatic sizing of TSO frames,
+ depending on flow rate, instead of filling 64Kbytes packets.
+ For specific usages, it's possible to force TCP to build big
+ TSO frames. Note that TCP stack might split too big TSO packets
+ if available window is too small.
+ Default: 2
+
tcp_tso_win_divisor - INTEGER
This allows control over what percentage of the congestion window
can be consumed by a single TSO frame.
@@ -516,6 +529,19 @@ tcp_wmem - vector of 3 INTEGERs: min, default, max
this value is ignored.
Default: between 64K and 4MB, depending on RAM size.
+tcp_notsent_lowat - UNSIGNED INTEGER
+ A TCP socket can control the amount of unsent bytes in its write queue,
+ thanks to TCP_NOTSENT_LOWAT socket option. poll()/select()/epoll()
+ reports POLLOUT events if the amount of unsent bytes is below a per
+ socket value, and if the write queue is not full. sendmsg() will
+ also not add new buffers if the limit is hit.
+
+ This global variable controls the amount of unsent data for
+ sockets not using TCP_NOTSENT_LOWAT. For these sockets, a change
+ to the global variable has immediate effect.
+
+ Default: UINT_MAX (0xFFFFFFFF)
+
tcp_workaround_signed_windows - BOOLEAN
If set, assume no receipt of a window scaling option means the
remote TCP is broken and treats the window as a signed quantity.
@@ -1022,7 +1048,15 @@ disable_policy - BOOLEAN
disable_xfrm - BOOLEAN
Disable IPSEC encryption on this interface, whatever the policy
+igmpv2_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ IGMPv1 or IGMPv2 report retransmit will take place.
+ Default: 10000 (10 seconds)
+igmpv3_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ IGMPv3 report retransmit will take place.
+ Default: 1000 (1 seconds)
tag - INTEGER
Allows you to write a number, which can be used as required.
@@ -1314,6 +1348,27 @@ ndisc_notify - BOOLEAN
1 - Generate unsolicited neighbour advertisements when device is brought
up or hardware address changes.
+mldv1_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ MLDv1 report retransmit will take place.
+ Default: 10000 (10 seconds)
+
+mldv2_unsolicited_report_interval - INTEGER
+ The interval in milliseconds in which the next unsolicited
+ MLDv2 report retransmit will take place.
+ Default: 1000 (1 second)
+
+force_mld_version - INTEGER
+ 0 - (default) No enforcement of a MLD version, MLDv1 fallback allowed
+ 1 - Enforce to use MLD version 1
+ 2 - Enforce to use MLD version 2
+
+suppress_frag_ndisc - INTEGER
+ Control RFC 6980 (Security Implications of IPv6 Fragmentation
+ with IPv6 Neighbor Discovery) behavior:
+ 1 - (default) discard fragmented neighbor discovery packets
+ 0 - allow fragmented neighbor discovery packets
+
icmp/*:
ratelimit - INTEGER
Limit the maximal rates for sending ICMPv6 packets.
diff --git a/Documentation/networking/ixgb.txt b/Documentation/networking/ixgb.txt
index d75a1f9565b..1e0c045e89f 100644
--- a/Documentation/networking/ixgb.txt
+++ b/Documentation/networking/ixgb.txt
@@ -1,7 +1,7 @@
-Linux Base Driver for 10 Gigabit Intel(R) Network Connection
-=============================================================
+Linux Base Driver for 10 Gigabit Intel(R) Ethernet Network Connection
+=====================================================================
-October 9, 2007
+March 14, 2011
Contents
@@ -274,9 +274,9 @@ Additional Configurations
-------------------------------------------------
Configuring a network driver to load properly when the system is started is
distribution dependent. Typically, the configuration process involves adding
- an alias line to files in /etc/modprobe.d/ as well as editing other system
- startup scripts and/or configuration files. Many popular Linux distributions
- ship with tools to make these changes for you. To learn the proper way to
+ an alias line to /etc/modprobe.conf as well as editing other system startup
+ scripts and/or configuration files. Many popular Linux distributions ship
+ with tools to make these changes for you. To learn the proper way to
configure a network device for your system, refer to your distribution
documentation. If during this process you are asked for the driver or module
name, the name for the Linux Base Driver for the Intel 10GbE Family of
@@ -306,7 +306,7 @@ Additional Configurations
with the maximum Jumbo Frames size of 16128.
- Ethtool
+ ethtool
-------
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index af77ed3c417..96cccebb839 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -1,8 +1,9 @@
-Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection
-========================================================================
+Linux* Base Driver for the Intel(R) Ethernet 10 Gigabit PCI Express Family of
+Adapters
+=============================================================================
-Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Intel 10 Gigabit Linux driver.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
@@ -16,8 +17,8 @@ Contents
Identifying Your Adapter
========================
-The driver in this release is compatible with 82598 and 82599-based Intel
-Network Connections.
+The driver in this release is compatible with 82598, 82599 and X540-based
+Intel Network Connections.
For more information on how to identify your adapter, go to the Adapter &
Driver ID Guide at:
@@ -72,7 +73,7 @@ cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
Laser turns off for SFP+ when ifconfig down
-------------------------------------------
"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
-"ifconfig up" turns on the later.
+"ifconfig up" turns on the laser.
82598-BASED ADAPTERS
@@ -118,6 +119,93 @@ NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
behavior is changed to off. Flow control in 1 gig mode on these devices can
lead to Tx hangs.
+Intel(R) Ethernet Flow Director
+-------------------------------
+Supports advanced filters that direct receive packets by their flows to
+different queues. Enables tight control on routing a flow in the platform.
+Matches flows and CPU cores for flow affinity. Supports multiple parameters
+for flexible flow classification and load balancing.
+
+Flow director is enabled only if the kernel is multiple TX queue capable.
+
+An included script (set_irq_affinity.sh) automates setting the IRQ to CPU
+affinity.
+
+You can verify that the driver is using Flow Director by looking at the counter
+in ethtool: fdir_miss and fdir_match.
+
+Other ethtool Commands:
+To enable Flow Director
+ ethtool -K ethX ntuple on
+To add a filter
+ Use -U switch. e.g., ethtool -U ethX flow-type tcp4 src-ip 0x178000a
+ action 1
+To see the list of filters currently present:
+ ethtool -u ethX
+
+Perfect Filter: Perfect filter is an interface to load the filter table that
+funnels all flow into queue_0 unless an alternative queue is specified using
+"action". In that case, any flow that matches the filter criteria will be
+directed to the appropriate queue.
+
+If the queue is defined as -1, filter will drop matching packets.
+
+To account for filter matches and misses, there are two stats in ethtool:
+fdir_match and fdir_miss. In addition, rx_queue_N_packets shows the number of
+packets processed by the Nth queue.
+
+NOTE: Receive Packet Steering (RPS) and Receive Flow Steering (RFS) are not
+compatible with Flow Director. IF Flow Director is enabled, these will be
+disabled.
+
+The following three parameters impact Flow Director.
+
+FdirMode
+--------
+Valid Range: 0-2 (0=off, 1=ATR, 2=Perfect filter mode)
+Default Value: 1
+
+ Flow Director filtering modes.
+
+FdirPballoc
+-----------
+Valid Range: 0-2 (0=64k, 1=128k, 2=256k)
+Default Value: 0
+
+ Flow Director allocated packet buffer size.
+
+AtrSampleRate
+--------------
+Valid Range: 1-100
+Default Value: 20
+
+ Software ATR Tx packet sample rate. For example, when set to 20, every 20th
+ packet, looks to see if the packet will create a new flow.
+
+Node
+----
+Valid Range: 0-n
+Default Value: 1 (off)
+
+ 0 - n: where n is the number of NUMA nodes (i.e. 0 - 3) currently online in
+ your system
+ 1: turns this option off
+
+ The Node parameter will allow you to pick which NUMA node you want to have
+ the adapter allocate memory on.
+
+max_vfs
+-------
+Valid Range: 1-63
+Default Value: 0
+
+ If the value is greater than 0 it will also force the VMDq parameter to be 1
+ or more.
+
+ This parameter adds support for SR-IOV. It causes the driver to spawn up to
+ max_vfs worth of virtual function.
+
+
Additional Configurations
=========================
@@ -221,9 +309,10 @@ http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
Known Issues
============
- Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using
- Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM
- -----------------------------------------------------------------------------
+ Enabling SR-IOV in a 32-bit or 64-bit Microsoft* Windows* Server 2008/R2
+ Guest OS using Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE
+ controller under KVM
+ ------------------------------------------------------------------------
KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM. This
includes traditional PCIe devices, as well as SR-IOV-capable devices using
Intel 82576-based and 82599-based controllers.
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
index 5a91a41fa94..53d8d2a5a6a 100644
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -1,8 +1,8 @@
-Linux* Base Driver for Intel(R) Network Connection
-==================================================
+Linux* Base Driver for Intel(R) Ethernet Network Connection
+===========================================================
Intel Gigabit Linux driver.
-Copyright(c) 1999 - 2010 Intel Corporation.
+Copyright(c) 1999 - 2013 Intel Corporation.
Contents
========
diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
new file mode 100644
index 00000000000..d9112f01c44
--- /dev/null
+++ b/Documentation/networking/netdev-FAQ.txt
@@ -0,0 +1,224 @@
+
+Information you need to know about netdev
+-----------------------------------------
+
+Q: What is netdev?
+
+A: It is a mailing list for all network related linux stuff. This includes
+ anything found under net/ (i.e. core code like IPv6) and drivers/net
+ (i.e. hardware specific drivers) in the linux source tree.
+
+ Note that some subsystems (e.g. wireless drivers) which have a high volume
+ of traffic have their own specific mailing lists.
+
+ The netdev list is managed (like many other linux mailing lists) through
+ VGER ( http://vger.kernel.org/ ) and archives can be found below:
+
+ http://marc.info/?l=linux-netdev
+ http://www.spinics.net/lists/netdev/
+
+ Aside from subsystems like that mentioned above, all network related linux
+ development (i.e. RFC, review, comments, etc) takes place on netdev.
+
+Q: How do the changes posted to netdev make their way into linux?
+
+A: There are always two trees (git repositories) in play. Both are driven
+ by David Miller, the main network maintainer. There is the "net" tree,
+ and the "net-next" tree. As you can probably guess from the names, the
+ net tree is for fixes to existing code already in the mainline tree from
+ Linus, and net-next is where the new code goes for the future release.
+ You can find the trees here:
+
+ http://git.kernel.org/?p=linux/kernel/git/davem/net.git
+ http://git.kernel.org/?p=linux/kernel/git/davem/net-next.git
+
+Q: How often do changes from these trees make it to the mainline Linus tree?
+
+A: To understand this, you need to know a bit of background information
+ on the cadence of linux development. Each new release starts off with
+ a two week "merge window" where the main maintainers feed their new
+ stuff to Linus for merging into the mainline tree. After the two weeks,
+ the merge window is closed, and it is called/tagged "-rc1". No new
+ features get mainlined after this -- only fixes to the rc1 content
+ are expected. After roughly a week of collecting fixes to the rc1
+ content, rc2 is released. This repeats on a roughly weekly basis
+ until rc7 (typically; sometimes rc6 if things are quiet, or rc8 if
+ things are in a state of churn), and a week after the last vX.Y-rcN
+ was done, the official "vX.Y" is released.
+
+ Relating that to netdev: At the beginning of the 2 week merge window,
+ the net-next tree will be closed - no new changes/features. The
+ accumulated new content of the past ~10 weeks will be passed onto
+ mainline/Linus via a pull request for vX.Y -- at the same time,
+ the "net" tree will start accumulating fixes for this pulled content
+ relating to vX.Y
+
+ An announcement indicating when net-next has been closed is usually
+ sent to netdev, but knowing the above, you can predict that in advance.
+
+ IMPORTANT: Do not send new net-next content to netdev during the
+ period during which net-next tree is closed.
+
+ Shortly after the two weeks have passed, (and vX.Y-rc1 is released) the
+ tree for net-next reopens to collect content for the next (vX.Y+1) release.
+
+ If you aren't subscribed to netdev and/or are simply unsure if net-next
+ has re-opened yet, simply check the net-next git repository link above for
+ any new networking related commits.
+
+ The "net" tree continues to collect fixes for the vX.Y content, and
+ is fed back to Linus at regular (~weekly) intervals. Meaning that the
+ focus for "net" is on stablilization and bugfixes.
+
+ Finally, the vX.Y gets released, and the whole cycle starts over.
+
+Q: So where are we now in this cycle?
+
+A: Load the mainline (Linus) page here:
+
+ http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git
+
+ and note the top of the "tags" section. If it is rc1, it is early
+ in the dev cycle. If it was tagged rc7 a week ago, then a release
+ is probably imminent.
+
+Q: How do I indicate which tree (net vs. net-next) my patch should be in?
+
+A: Firstly, think whether you have a bug fix or new "next-like" content.
+ Then once decided, assuming that you use git, use the prefix flag, i.e.
+
+ git format-patch --subject-prefix='PATCH net-next' start..finish
+
+ Use "net" instead of "net-next" (always lower case) in the above for
+ bug-fix net content. If you don't use git, then note the only magic in
+ the above is just the subject text of the outgoing e-mail, and you can
+ manually change it yourself with whatever MUA you are comfortable with.
+
+Q: I sent a patch and I'm wondering what happened to it. How can I tell
+ whether it got merged?
+
+A: Start by looking at the main patchworks queue for netdev:
+
+ http://patchwork.ozlabs.org/project/netdev/list/
+
+ The "State" field will tell you exactly where things are at with
+ your patch.
+
+Q: The above only says "Under Review". How can I find out more?
+
+A: Generally speaking, the patches get triaged quickly (in less than 48h).
+ So be patient. Asking the maintainer for status updates on your
+ patch is a good way to ensure your patch is ignored or pushed to
+ the bottom of the priority list.
+
+Q: How can I tell what patches are queued up for backporting to the
+ various stable releases?
+
+A: Normally Greg Kroah-Hartman collects stable commits himself, but
+ for networking, Dave collects up patches he deems critical for the
+ networking subsystem, and then hands them off to Greg.
+
+ There is a patchworks queue that you can see here:
+ http://patchwork.ozlabs.org/bundle/davem/stable/?state=*
+
+ It contains the patches which Dave has selected, but not yet handed
+ off to Greg. If Greg already has the patch, then it will be here:
+ http://git.kernel.org/cgit/linux/kernel/git/stable/stable-queue.git
+
+ A quick way to find whether the patch is in this stable-queue is
+ to simply clone the repo, and then git grep the mainline commit ID, e.g.
+
+ stable-queue$ git grep -l 284041ef21fdf2e
+ releases/3.0.84/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.4.51/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ releases/3.9.8/ipv6-fix-possible-crashes-in-ip6_cork_release.patch
+ stable/stable-queue$
+
+Q: I see a network patch and I think it should be backported to stable.
+ Should I request it via "stable@vger.kernel.org" like the references in
+ the kernel's Documentation/stable_kernel_rules.txt file say?
+
+A: No, not for networking. Check the stable queues as per above 1st to see
+ if it is already queued. If not, then send a mail to netdev, listing
+ the upstream commit ID and why you think it should be a stable candidate.
+
+ Before you jump to go do the above, do note that the normal stable rules
+ in Documentation/stable_kernel_rules.txt still apply. So you need to
+ explicitly indicate why it is a critical fix and exactly what users are
+ impacted. In addition, you need to convince yourself that you _really_
+ think it has been overlooked, vs. having been considered and rejected.
+
+ Generally speaking, the longer it has had a chance to "soak" in mainline,
+ the better the odds that it is an OK candidate for stable. So scrambling
+ to request a commit be added the day after it appears should be avoided.
+
+Q: I have created a network patch and I think it should be backported to
+ stable. Should I add a "Cc: stable@vger.kernel.org" like the references
+ in the kernel's Documentation/ directory say?
+
+A: No. See above answer. In short, if you think it really belongs in
+ stable, then ensure you write a decent commit log that describes who
+ gets impacted by the bugfix and how it manifests itself, and when the
+ bug was introduced. If you do that properly, then the commit will
+ get handled appropriately and most likely get put in the patchworks
+ stable queue if it really warrants it.
+
+ If you think there is some valid information relating to it being in
+ stable that does _not_ belong in the commit log, then use the three
+ dash marker line as described in Documentation/SubmittingPatches to
+ temporarily embed that information into the patch that you send.
+
+Q: Someone said that the comment style and coding convention is different
+ for the networking content. Is this true?
+
+A: Yes, in a largely trivial way. Instead of this:
+
+ /*
+ * foobar blah blah blah
+ * another line of text
+ */
+
+ it is requested that you make it look like this:
+
+ /* foobar blah blah blah
+ * another line of text
+ */
+
+Q: I am working in existing code that has the former comment style and not the
+ latter. Should I submit new code in the former style or the latter?
+
+A: Make it the latter style, so that eventually all code in the domain of
+ netdev is of this format.
+
+Q: I found a bug that might have possible security implications or similar.
+ Should I mail the main netdev maintainer off-list?
+
+A: No. The current netdev maintainer has consistently requested that people
+ use the mailing lists and not reach out directly. If you aren't OK with
+ that, then perhaps consider mailing "security@kernel.org" or reading about
+ http://oss-security.openwall.org/wiki/mailing-lists/distros
+ as possible alternative mechanisms.
+
+Q: What level of testing is expected before I submit my change?
+
+A: If your changes are against net-next, the expectation is that you
+ have tested by layering your changes on top of net-next. Ideally you
+ will have done run-time testing specific to your change, but at a
+ minimum, your changes should survive an "allyesconfig" and an
+ "allmodconfig" build without new warnings or failures.
+
+Q: Any other tips to help ensure my net/net-next patch gets OK'd?
+
+A: Attention to detail. Re-read your own work as if you were the
+ reviewer. You can start with using checkpatch.pl, perhaps even
+ with the "--strict" flag. But do not be mindlessly robotic in
+ doing so. If your change is a bug-fix, make sure your commit log
+ indicates the end-user visible symptom, the underlying reason as
+ to why it happens, and then if necessary, explain why the fix proposed
+ is the best way to get things done. Don't mangle whitespace, and as
+ is common, don't mis-indent function arguments that span multiple lines.
+ If it is your 1st patch, mail it to yourself so you can test apply
+ it to an unpatched tree to confirm infrastructure didn't mangle it.
+
+ Finally, go back and read Documentation/SubmittingPatches to be
+ sure you are not repeating some common mistake documented there.
diff --git a/Documentation/networking/openvswitch.txt b/Documentation/networking/openvswitch.txt
index 8fa2dd1e792..37c20ee2455 100644
--- a/Documentation/networking/openvswitch.txt
+++ b/Documentation/networking/openvswitch.txt
@@ -91,6 +91,46 @@ Often we ellipsize arguments not important to the discussion, e.g.:
in_port(1), eth(...), eth_type(0x0800), ipv4(...), tcp(...)
+Wildcarded flow key format
+--------------------------
+
+A wildcarded flow is described with two sequences of Netlink attributes
+passed over the Netlink socket. A flow key, exactly as described above, and an
+optional corresponding flow mask.
+
+A wildcarded flow can represent a group of exact match flows. Each '1' bit
+in the mask specifies a exact match with the corresponding bit in the flow key.
+A '0' bit specifies a don't care bit, which will match either a '1' or '0' bit
+of a incoming packet. Using wildcarded flow can improve the flow set up rate
+by reduce the number of new flows need to be processed by the user space program.
+
+Support for the mask Netlink attribute is optional for both the kernel and user
+space program. The kernel can ignore the mask attribute, installing an exact
+match flow, or reduce the number of don't care bits in the kernel to less than
+what was specified by the user space program. In this case, variations in bits
+that the kernel does not implement will simply result in additional flow setups.
+The kernel module will also work with user space programs that neither support
+nor supply flow mask attributes.
+
+Since the kernel may ignore or modify wildcard bits, it can be difficult for
+the userspace program to know exactly what matches are installed. There are
+two possible approaches: reactively install flows as they miss the kernel
+flow table (and therefore not attempt to determine wildcard changes at all)
+or use the kernel's response messages to determine the installed wildcards.
+
+When interacting with userspace, the kernel should maintain the match portion
+of the key exactly as originally installed. This will provides a handle to
+identify the flow for all future operations. However, when reporting the
+mask of an installed flow, the mask should include any restrictions imposed
+by the kernel.
+
+The behavior when using overlapping wildcarded flows is undefined. It is the
+responsibility of the user space program to ensure that any incoming packet
+can match at most one flow, wildcarded or not. The current implementation
+performs best-effort detection of overlapping wildcarded flows and may reject
+some but not all of them. However, this behavior may change in future versions.
+
+
Basic rule for evolving flow keys
---------------------------------
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 8572796b1eb..c01223628a8 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -543,6 +543,14 @@ TPACKET_V2 --> TPACKET_V3:
In the AF_PACKET fanout mode, packet reception can be load balanced among
processes. This also works in combination with mmap(2) on packet sockets.
+Currently implemented fanout policies are:
+
+ - PACKET_FANOUT_HASH: schedule to socket by skb's rxhash
+ - PACKET_FANOUT_LB: schedule to socket by round-robin
+ - PACKET_FANOUT_CPU: schedule to socket by CPU packet arrives on
+ - PACKET_FANOUT_RND: schedule to socket by random selection
+ - PACKET_FANOUT_ROLLOVER: if one socket is full, rollover to another
+
Minimal example code by David S. Miller (try things like "./test eth0 hash",
"./test eth0 lb", etc.):
diff --git a/Documentation/networking/sctp.txt b/Documentation/networking/sctp.txt
index 0c790a76910..97b810ca908 100644
--- a/Documentation/networking/sctp.txt
+++ b/Documentation/networking/sctp.txt
@@ -19,7 +19,6 @@ of SCTP that is RFC 2960 compliant and provides an programming interface
referred to as the UDP-style API of the Sockets Extensions for SCTP, as
proposed in IETF Internet-Drafts.
-
Caveats:
-lksctp can be built as statically or as a module. However, be aware that
@@ -33,6 +32,4 @@ For more information, please visit the lksctp project website:
http://www.sf.net/projects/lksctp
Or contact the lksctp developers through the mailing list:
- <lksctp-developers@lists.sourceforge.net>
-
-
+ <linux-sctp@vger.kernel.org>
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 654d2e55c8c..457b8bbafb0 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -123,6 +123,7 @@ struct plat_stmmacenet_data {
int bugged_jumbo;
int pmt;
int force_sf_dma_mode;
+ int force_thresh_dma_mode;
int riwt_off;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
@@ -159,6 +160,8 @@ Where:
o pmt: core has the embedded power module (optional).
o force_sf_dma_mode: force DMA to use the Store and Forward mode
instead of the Threshold.
+ o force_thresh_dma_mode: force DMA to use the Shreshold mode other than
+ the Store and Forward mode.
o riwt_off: force to disable the RX watchdog feature and switch to NAPI mode.
o fix_mac_speed: this callback is used for modifying some syscfg registers
(on ST SoCs) according to the link speed negotiated by the
diff --git a/Documentation/networking/tproxy.txt b/Documentation/networking/tproxy.txt
index 7b5996d9357..ec11429e1d4 100644
--- a/Documentation/networking/tproxy.txt
+++ b/Documentation/networking/tproxy.txt
@@ -2,9 +2,8 @@ Transparent proxy support
=========================
This feature adds Linux 2.2-like transparent proxy support to current kernels.
-To use it, enable NETFILTER_TPROXY, the socket match and the TPROXY target in
-your kernel config. You will need policy routing too, so be sure to enable that
-as well.
+To use it, enable the socket match and the TPROXY target in your kernel config.
+You will need policy routing too, so be sure to enable that as well.
1. Making non-local sockets work
diff --git a/Documentation/pinctrl.txt b/Documentation/pinctrl.txt
index e3f322a4b35..c0ffd30eb55 100644
--- a/Documentation/pinctrl.txt
+++ b/Documentation/pinctrl.txt
@@ -81,7 +81,7 @@ int __init foo_probe(void)
struct pinctrl_dev *pctl;
pctl = pinctrl_register(&foo_desc, <PARENT>, NULL);
- if (IS_ERR(pctl))
+ if (!pctl)
pr_err("could not register foo pin driver\n");
}
@@ -795,18 +795,97 @@ special GPIO-handler is registered.
GPIO mode pitfalls
==================
-Sometime the developer may be confused by a datasheet talking about a pin
-being possible to set into "GPIO mode". It appears that what hardware
-engineers mean with "GPIO mode" is not necessarily the use case that is
-implied in the kernel interface <linux/gpio.h>: a pin that you grab from
-kernel code and then either listen for input or drive high/low to
-assert/deassert some external line.
+Due to the naming conventions used by hardware engineers, where "GPIO"
+is taken to mean different things than what the kernel does, the developer
+may be confused by a datasheet talking about a pin being possible to set
+into "GPIO mode". It appears that what hardware engineers mean with
+"GPIO mode" is not necessarily the use case that is implied in the kernel
+interface <linux/gpio.h>: a pin that you grab from kernel code and then
+either listen for input or drive high/low to assert/deassert some
+external line.
Rather hardware engineers think that "GPIO mode" means that you can
software-control a few electrical properties of the pin that you would
not be able to control if the pin was in some other mode, such as muxed in
for a device.
+The GPIO portions of a pin and its relation to a certain pin controller
+configuration and muxing logic can be constructed in several ways. Here
+are two examples:
+
+(A)
+ pin config
+ logic regs
+ | +- SPI
+ Physical pins --- pad --- pinmux -+- I2C
+ | +- mmc
+ | +- GPIO
+ pin
+ multiplex
+ logic regs
+
+Here some electrical properties of the pin can be configured no matter
+whether the pin is used for GPIO or not. If you multiplex a GPIO onto a
+pin, you can also drive it high/low from "GPIO" registers.
+Alternatively, the pin can be controlled by a certain peripheral, while
+still applying desired pin config properties. GPIO functionality is thus
+orthogonal to any other device using the pin.
+
+In this arrangement the registers for the GPIO portions of the pin controller,
+or the registers for the GPIO hardware module are likely to reside in a
+separate memory range only intended for GPIO driving, and the register
+range dealing with pin config and pin multiplexing get placed into a
+different memory range and a separate section of the data sheet.
+
+(B)
+
+ pin config
+ logic regs
+ | +- SPI
+ Physical pins --- pad --- pinmux -+- I2C
+ | | +- mmc
+ | |
+ GPIO pin
+ multiplex
+ logic regs
+
+In this arrangement, the GPIO functionality can always be enabled, such that
+e.g. a GPIO input can be used to "spy" on the SPI/I2C/MMC signal while it is
+pulsed out. It is likely possible to disrupt the traffic on the pin by doing
+wrong things on the GPIO block, as it is never really disconnected. It is
+possible that the GPIO, pin config and pin multiplex registers are placed into
+the same memory range and the same section of the data sheet, although that
+need not be the case.
+
+From a kernel point of view, however, these are different aspects of the
+hardware and shall be put into different subsystems:
+
+- Registers (or fields within registers) that control electrical
+ properties of the pin such as biasing and drive strength should be
+ exposed through the pinctrl subsystem, as "pin configuration" settings.
+
+- Registers (or fields within registers) that control muxing of signals
+ from various other HW blocks (e.g. I2C, MMC, or GPIO) onto pins should
+ be exposed through the pinctrl subssytem, as mux functions.
+
+- Registers (or fields within registers) that control GPIO functionality
+ such as setting a GPIO's output value, reading a GPIO's input value, or
+ setting GPIO pin direction should be exposed through the GPIO subsystem,
+ and if they also support interrupt capabilities, through the irqchip
+ abstraction.
+
+Depending on the exact HW register design, some functions exposed by the
+GPIO subsystem may call into the pinctrl subsystem in order to
+co-ordinate register settings across HW modules. In particular, this may
+be needed for HW with separate GPIO and pin controller HW modules, where
+e.g. GPIO direction is determined by a register in the pin controller HW
+module rather than the GPIO HW module.
+
+Electrical properties of the pin such as biasing and drive strength
+may be placed at some pin-specific register in all cases or as part
+of the GPIO register in case (B) especially. This doesn't mean that such
+properties necessarily pertain to what the Linux kernel calls "GPIO".
+
Example: a pin is usually muxed in to be used as a UART TX line. But during
system sleep, we need to put this pin into "GPIO mode" and ground it.
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 44fc924ad00..445ad743ec8 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -168,6 +168,15 @@ UUID/GUID addresses:
Where no additional specifiers are used the default little endian
order with lower case hex characters will be printed.
+dentry names:
+ %pd{,2,3,4}
+ %pD{,2,3,4}
+
+ For printing dentry name; if we race with d_move(), the name might be
+ a mix of old and new ones, but it won't oops. %pd dentry is a safer
+ equivalent of %s dentry->d_name.name we used to use, %pd<n> prints
+ n last components. %pD does the same thing for struct file.
+
struct va_format:
%pV
diff --git a/Documentation/scsi/LICENSE.qla4xxx b/Documentation/scsi/LICENSE.qla4xxx
index 78c169f0d7c..fcc27ad27d7 100644
--- a/Documentation/scsi/LICENSE.qla4xxx
+++ b/Documentation/scsi/LICENSE.qla4xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2012 QLogic Corporation
+Copyright (c) 2003-2013 QLogic Corporation
QLogic Linux iSCSI Driver
This program includes a device driver for Linux 3.x.
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 809d72b8eff..a46ddb85e83 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -244,6 +244,7 @@ STAC9227/9228/9229/927x
5stack-no-fp D965 5stack without front panel
dell-3stack Dell Dimension E520
dell-bios Fixes with Dell BIOS setup
+ dell-bios-amic Fixes with Dell BIOS setup including analog mic
volknob Fixes with volume-knob widget 0x24
auto BIOS setup (default)
diff --git a/Documentation/sound/alsa/HD-Audio.txt b/Documentation/sound/alsa/HD-Audio.txt
index c3c912d023c..42a0a39b77e 100644
--- a/Documentation/sound/alsa/HD-Audio.txt
+++ b/Documentation/sound/alsa/HD-Audio.txt
@@ -454,6 +454,8 @@ The generic parser supports the following hints:
- need_dac_fix (bool): limits the DACs depending on the channel count
- primary_hp (bool): probe headphone jacks as the primary outputs;
default true
+- multi_io (bool): try probing multi-I/O config (e.g. shared
+ line-in/surround, mic/clfe jacks)
- multi_cap_vol (bool): provide multiple capture volumes
- inv_dmic_split (bool): provide split internal mic volume/switch for
phase-inverted digital mics
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
index 1c15043aaee..9a0319a8247 100644
--- a/Documentation/sysctl/net.txt
+++ b/Documentation/sysctl/net.txt
@@ -50,9 +50,22 @@ The maximum number of packets that kernel can handle on a NAPI interrupt,
it's a Per-CPU variable.
Default: 64
+default_qdisc
+--------------
+
+The default queuing discipline to use for network devices. This allows
+overriding the default queue discipline of pfifo_fast with an
+alternative. Since the default queuing discipline is created with the
+no additional parameters so is best suited to queuing disciplines that
+work well without configuration like stochastic fair queue (sfq),
+CoDel (codel) or fair queue CoDel (fq_codel). Don't use queuing disciplines
+like Hierarchical Token Bucket or Deficit Round Robin which require setting
+up classes and bandwidths.
+Default: pfifo_fast
+
busy_read
----------------
-Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
@@ -63,7 +76,7 @@ Default: 0 (off)
busy_poll
----------------
-Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL)
+Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100.
diff --git a/Documentation/timers/NO_HZ.txt b/Documentation/timers/NO_HZ.txt
index 88697584242..cca122f2512 100644
--- a/Documentation/timers/NO_HZ.txt
+++ b/Documentation/timers/NO_HZ.txt
@@ -24,8 +24,8 @@ There are three main ways of managing scheduling-clock interrupts
workloads, you will normally -not- want this option.
These three cases are described in the following three sections, followed
-by a third section on RCU-specific considerations and a fourth and final
-section listing known issues.
+by a third section on RCU-specific considerations, a fourth section
+discussing testing, and a fifth and final section listing known issues.
NEVER OMIT SCHEDULING-CLOCK TICKS
@@ -121,14 +121,15 @@ boot parameter specifies the adaptive-ticks CPUs. For example,
"nohz_full=1,6-8" says that CPUs 1, 6, 7, and 8 are to be adaptive-ticks
CPUs. Note that you are prohibited from marking all of the CPUs as
adaptive-tick CPUs: At least one non-adaptive-tick CPU must remain
-online to handle timekeeping tasks in order to ensure that system calls
-like gettimeofday() returns accurate values on adaptive-tick CPUs.
-(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no
-running user processes to observe slight drifts in clock rate.)
-Therefore, the boot CPU is prohibited from entering adaptive-ticks
-mode. Specifying a "nohz_full=" mask that includes the boot CPU will
-result in a boot-time error message, and the boot CPU will be removed
-from the mask.
+online to handle timekeeping tasks in order to ensure that system
+calls like gettimeofday() returns accurate values on adaptive-tick CPUs.
+(This is not an issue for CONFIG_NO_HZ_IDLE=y because there are no running
+user processes to observe slight drifts in clock rate.) Therefore, the
+boot CPU is prohibited from entering adaptive-ticks mode. Specifying a
+"nohz_full=" mask that includes the boot CPU will result in a boot-time
+error message, and the boot CPU will be removed from the mask. Note that
+this means that your system must have at least two CPUs in order for
+CONFIG_NO_HZ_FULL=y to do anything for you.
Alternatively, the CONFIG_NO_HZ_FULL_ALL=y Kconfig parameter specifies
that all CPUs other than the boot CPU are adaptive-ticks CPUs. This
@@ -232,6 +233,29 @@ scheduler will decide where to run them, which might or might not be
where you want them to run.
+TESTING
+
+So you enable all the OS-jitter features described in this document,
+but do not see any change in your workload's behavior. Is this because
+your workload isn't affected that much by OS jitter, or is it because
+something else is in the way? This section helps answer this question
+by providing a simple OS-jitter test suite, which is available on branch
+master of the following git archive:
+
+git://git.kernel.org/pub/scm/linux/kernel/git/frederic/dynticks-testing.git
+
+Clone this archive and follow the instructions in the README file.
+This test procedure will produce a trace that will allow you to evaluate
+whether or not you have succeeded in removing OS jitter from your system.
+If this trace shows that you have removed OS jitter as much as is
+possible, then you can conclude that your workload is not all that
+sensitive to OS jitter.
+
+Note: this test requires that your system have at least two CPUs.
+We do not currently have a good way to remove OS jitter from single-CPU
+systems.
+
+
KNOWN ISSUES
o Dyntick-idle slows transitions to and from idle slightly.
diff --git a/Documentation/tpm/xen-tpmfront.txt b/Documentation/tpm/xen-tpmfront.txt
new file mode 100644
index 00000000000..69346de87ff
--- /dev/null
+++ b/Documentation/tpm/xen-tpmfront.txt
@@ -0,0 +1,113 @@
+Virtual TPM interface for Xen
+
+Authors: Matthew Fioravante (JHUAPL), Daniel De Graaf (NSA)
+
+This document describes the virtual Trusted Platform Module (vTPM) subsystem for
+Xen. The reader is assumed to have familiarity with building and installing Xen,
+Linux, and a basic understanding of the TPM and vTPM concepts.
+
+INTRODUCTION
+
+The goal of this work is to provide a TPM functionality to a virtual guest
+operating system (in Xen terms, a DomU). This allows programs to interact with
+a TPM in a virtual system the same way they interact with a TPM on the physical
+system. Each guest gets its own unique, emulated, software TPM. However, each
+of the vTPM's secrets (Keys, NVRAM, etc) are managed by a vTPM Manager domain,
+which seals the secrets to the Physical TPM. If the process of creating each of
+these domains (manager, vTPM, and guest) is trusted, the vTPM subsystem extends
+the chain of trust rooted in the hardware TPM to virtual machines in Xen. Each
+major component of vTPM is implemented as a separate domain, providing secure
+separation guaranteed by the hypervisor. The vTPM domains are implemented in
+mini-os to reduce memory and processor overhead.
+
+This mini-os vTPM subsystem was built on top of the previous vTPM work done by
+IBM and Intel corporation.
+
+
+DESIGN OVERVIEW
+---------------
+
+The architecture of vTPM is described below:
+
++------------------+
+| Linux DomU | ...
+| | ^ |
+| v | |
+| xen-tpmfront |
++------------------+
+ | ^
+ v |
++------------------+
+| mini-os/tpmback |
+| | ^ |
+| v | |
+| vtpm-stubdom | ...
+| | ^ |
+| v | |
+| mini-os/tpmfront |
++------------------+
+ | ^
+ v |
++------------------+
+| mini-os/tpmback |
+| | ^ |
+| v | |
+| vtpmmgr-stubdom |
+| | ^ |
+| v | |
+| mini-os/tpm_tis |
++------------------+
+ | ^
+ v |
++------------------+
+| Hardware TPM |
++------------------+
+
+ * Linux DomU: The Linux based guest that wants to use a vTPM. There may be
+ more than one of these.
+
+ * xen-tpmfront.ko: Linux kernel virtual TPM frontend driver. This driver
+ provides vTPM access to a Linux-based DomU.
+
+ * mini-os/tpmback: Mini-os TPM backend driver. The Linux frontend driver
+ connects to this backend driver to facilitate communications
+ between the Linux DomU and its vTPM. This driver is also
+ used by vtpmmgr-stubdom to communicate with vtpm-stubdom.
+
+ * vtpm-stubdom: A mini-os stub domain that implements a vTPM. There is a
+ one to one mapping between running vtpm-stubdom instances and
+ logical vtpms on the system. The vTPM Platform Configuration
+ Registers (PCRs) are normally all initialized to zero.
+
+ * mini-os/tpmfront: Mini-os TPM frontend driver. The vTPM mini-os domain
+ vtpm-stubdom uses this driver to communicate with
+ vtpmmgr-stubdom. This driver is also used in mini-os
+ domains such as pv-grub that talk to the vTPM domain.
+
+ * vtpmmgr-stubdom: A mini-os domain that implements the vTPM manager. There is
+ only one vTPM manager and it should be running during the
+ entire lifetime of the machine. This domain regulates
+ access to the physical TPM on the system and secures the
+ persistent state of each vTPM.
+
+ * mini-os/tpm_tis: Mini-os TPM version 1.2 TPM Interface Specification (TIS)
+ driver. This driver used by vtpmmgr-stubdom to talk directly to
+ the hardware TPM. Communication is facilitated by mapping
+ hardware memory pages into vtpmmgr-stubdom.
+
+ * Hardware TPM: The physical TPM that is soldered onto the motherboard.
+
+
+INTEGRATION WITH XEN
+--------------------
+
+Support for the vTPM driver was added in Xen using the libxl toolstack in Xen
+4.3. See the Xen documentation (docs/misc/vtpm.txt) for details on setting up
+the vTPM and vTPM Manager stub domains. Once the stub domains are running, a
+vTPM device is set up in the same manner as a disk or network device in the
+domain's configuration file.
+
+In order to use features such as IMA that require a TPM to be loaded prior to
+the initrd, the xen-tpmfront driver must be compiled in to the kernel. If not
+using such features, the driver can be compiled as a module and will be loaded
+as usual.
diff --git a/Documentation/usb/URB.txt b/Documentation/usb/URB.txt
index 00d2c644068..50da0d45544 100644
--- a/Documentation/usb/URB.txt
+++ b/Documentation/usb/URB.txt
@@ -195,13 +195,12 @@ by the completion handler.
The handler is of the following type:
- typedef void (*usb_complete_t)(struct urb *, struct pt_regs *)
+ typedef void (*usb_complete_t)(struct urb *)
-I.e., it gets the URB that caused the completion call, plus the
-register values at the time of the corresponding interrupt (if any).
-In the completion handler, you should have a look at urb->status to
-detect any USB errors. Since the context parameter is included in the URB,
-you can pass information to the completion handler.
+I.e., it gets the URB that caused the completion call. In the completion
+handler, you should have a look at urb->status to detect any USB errors.
+Since the context parameter is included in the URB, you can pass
+information to the completion handler.
Note that even when an error (or unlink) is reported, data may have been
transferred. That's because USB transfers are packetized; it might take
@@ -210,12 +209,12 @@ have transferred successfully before the completion was called.
NOTE: ***** WARNING *****
-NEVER SLEEP IN A COMPLETION HANDLER. These are normally called
-during hardware interrupt processing. If you can, defer substantial
-work to a tasklet (bottom half) to keep system latencies low. You'll
-probably need to use spinlocks to protect data structures you manipulate
-in completion handlers.
+NEVER SLEEP IN A COMPLETION HANDLER. These are often called in atomic
+context.
+In the current kernel, completion handlers run with local interrupts
+disabled, but in the future this will be changed, so don't assume that
+local IRQs are always disabled inside completion handlers.
1.8. How to do isochronous (ISO) transfers?
diff --git a/Documentation/usb/proc_usb_info.txt b/Documentation/usb/proc_usb_info.txt
index c9c3f0f5ad7..98be9198267 100644
--- a/Documentation/usb/proc_usb_info.txt
+++ b/Documentation/usb/proc_usb_info.txt
@@ -54,9 +54,12 @@ it and 002/048 sometime later.
These files can be read as binary data. The binary data consists
of first the device descriptor, then the descriptors for each
-configuration of the device. Multi-byte fields in the device and
-configuration descriptors, but not other descriptors, are converted
-to host endianness by the kernel. This information is also shown
+configuration of the device. Multi-byte fields in the device descriptor
+are converted to host endianness by the kernel. The configuration
+descriptors are in bus endian format! The configuration descriptor
+are wTotalLength bytes apart. If a device returns less configuration
+descriptor data than indicated by wTotalLength there will be a hole in
+the file for the missing bytes. This information is also shown
in text form by the /proc/bus/usb/devices file, described later.
These files may also be used to write user-level drivers for the USB
diff --git a/Documentation/video4linux/v4l2-controls.txt b/Documentation/video4linux/v4l2-controls.txt
index 676f8736602..06cf3ac8363 100644
--- a/Documentation/video4linux/v4l2-controls.txt
+++ b/Documentation/video4linux/v4l2-controls.txt
@@ -124,26 +124,27 @@ You add non-menu controls by calling v4l2_ctrl_new_std:
const struct v4l2_ctrl_ops *ops,
u32 id, s32 min, s32 max, u32 step, s32 def);
-Menu controls are added by calling v4l2_ctrl_new_std_menu:
+Menu and integer menu controls are added by calling v4l2_ctrl_new_std_menu:
struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 skip_mask, s32 def);
-Or alternatively for integer menu controls, by calling v4l2_ctrl_new_int_menu:
+Menu controls with a driver specific menu are added by calling
+v4l2_ctrl_new_std_menu_items:
+
+ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(
+ struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
+ s32 skip_mask, s32 def, const char * const *qmenu);
+
+Integer menu controls with a driver specific menu can be added by calling
+v4l2_ctrl_new_int_menu:
struct v4l2_ctrl *v4l2_ctrl_new_int_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 def, const s64 *qmenu_int);
-Standard menu controls with a driver specific menu are added by calling
-v4l2_ctrl_new_std_menu_items:
-
- struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(
- struct v4l2_ctrl_handler *hdl,
- const struct v4l2_ctrl_ops *ops, u32 id, s32 max,
- s32 skip_mask, s32 def, const char * const *qmenu);
-
These functions are typically called right after the v4l2_ctrl_handler_init:
static const s64 exp_bias_qmenu[] = {
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 83afe65d496..22ff659bc0f 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -43,6 +43,10 @@ KVM_FEATURE_CLOCKSOURCE2 || 3 || kvmclock available at msrs
KVM_FEATURE_ASYNC_PF || 4 || async pf can be enabled by
|| || writing to msr 0x4b564d02
------------------------------------------------------------------------------
+KVM_FEATURE_PV_UNHALT || 7 || guest checks this feature bit
+ || || before enabling paravirtualized
+ || || spinlock support.
+------------------------------------------------------------------------------
KVM_FEATURE_CLOCKSOURCE_STABLE_BIT || 24 || host will warn if no guest-side
|| || per-cpu warps are expected in
|| || kvmclock.
diff --git a/Documentation/virtual/kvm/hypercalls.txt b/Documentation/virtual/kvm/hypercalls.txt
index ea113b5d87a..022198e389d 100644
--- a/Documentation/virtual/kvm/hypercalls.txt
+++ b/Documentation/virtual/kvm/hypercalls.txt
@@ -64,3 +64,17 @@ Purpose: To enable communication between the hypervisor and guest there is a
shared page that contains parts of supervisor visible register state.
The guest can map this shared page to access its supervisor register through
memory using this hypercall.
+
+5. KVM_HC_KICK_CPU
+------------------------
+Architecture: x86
+Status: active
+Purpose: Hypercall used to wakeup a vcpu from HLT state
+Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest
+kernel mode for an event to occur (ex: a spinlock to become available) can
+execute HLT instruction once it has busy-waited for more than a threshold
+time-interval. Execution of HLT instruction would cause the hypervisor to put
+the vcpu to sleep until occurence of an appropriate event. Another vcpu of the
+same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall,
+specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0)
+is used in the hypercall for future use.
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index a6ab4b62d92..f81a65b54c2 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -85,32 +85,31 @@ workqueue.
Special purpose threads, called worker threads, execute the functions
off of the queue, one after the other. If no work is queued, the
worker threads become idle. These worker threads are managed in so
-called thread-pools.
+called worker-pools.
The cmwq design differentiates between the user-facing workqueues that
subsystems and drivers queue work items on and the backend mechanism
-which manages thread-pools and processes the queued work items.
+which manages worker-pools and processes the queued work items.
-The backend is called gcwq. There is one gcwq for each possible CPU
-and one gcwq to serve work items queued on unbound workqueues. Each
-gcwq has two thread-pools - one for normal work items and the other
-for high priority ones.
+There are two worker-pools, one for normal work items and the other
+for high priority ones, for each possible CPU and some extra
+worker-pools to serve work items queued on unbound workqueues - the
+number of these backing pools is dynamic.
Subsystems and drivers can create and queue work items through special
workqueue API functions as they see fit. They can influence some
aspects of the way the work items are executed by setting flags on the
workqueue they are putting the work item on. These flags include
-things like CPU locality, reentrancy, concurrency limits, priority and
-more. To get a detailed overview refer to the API description of
+things like CPU locality, concurrency limits, priority and more. To
+get a detailed overview refer to the API description of
alloc_workqueue() below.
-When a work item is queued to a workqueue, the target gcwq and
-thread-pool is determined according to the queue parameters and
-workqueue attributes and appended on the shared worklist of the
-thread-pool. For example, unless specifically overridden, a work item
-of a bound workqueue will be queued on the worklist of either normal
-or highpri thread-pool of the gcwq that is associated to the CPU the
-issuer is running on.
+When a work item is queued to a workqueue, the target worker-pool is
+determined according to the queue parameters and workqueue attributes
+and appended on the shared worklist of the worker-pool. For example,
+unless specifically overridden, a work item of a bound workqueue will
+be queued on the worklist of either normal or highpri worker-pool that
+is associated to the CPU the issuer is running on.
For any worker pool implementation, managing the concurrency level
(how many execution contexts are active) is an important issue. cmwq
@@ -118,14 +117,14 @@ tries to keep the concurrency at a minimal but sufficient level.
Minimal to save resources and sufficient in that the system is used at
its full capacity.
-Each thread-pool bound to an actual CPU implements concurrency
-management by hooking into the scheduler. The thread-pool is notified
+Each worker-pool bound to an actual CPU implements concurrency
+management by hooking into the scheduler. The worker-pool is notified
whenever an active worker wakes up or sleeps and keeps track of the
number of the currently runnable workers. Generally, work items are
not expected to hog a CPU and consume many cycles. That means
maintaining just enough concurrency to prevent work processing from
stalling should be optimal. As long as there are one or more runnable
-workers on the CPU, the thread-pool doesn't start execution of a new
+workers on the CPU, the worker-pool doesn't start execution of a new
work, but, when the last running worker goes to sleep, it immediately
schedules a new worker so that the CPU doesn't sit idle while there
are pending work items. This allows using a minimal number of workers
@@ -135,19 +134,20 @@ Keeping idle workers around doesn't cost other than the memory space
for kthreads, so cmwq holds onto idle ones for a while before killing
them.
-For an unbound wq, the above concurrency management doesn't apply and
-the thread-pools for the pseudo unbound CPU try to start executing all
-work items as soon as possible. The responsibility of regulating
-concurrency level is on the users. There is also a flag to mark a
-bound wq to ignore the concurrency management. Please refer to the
-API section for details.
+For unbound workqueues, the number of backing pools is dynamic.
+Unbound workqueue can be assigned custom attributes using
+apply_workqueue_attrs() and workqueue will automatically create
+backing worker pools matching the attributes. The responsibility of
+regulating concurrency level is on the users. There is also a flag to
+mark a bound wq to ignore the concurrency management. Please refer to
+the API section for details.
Forward progress guarantee relies on that workers can be created when
more execution contexts are necessary, which in turn is guaranteed
through the use of rescue workers. All work items which might be used
on code paths that handle memory reclaim are required to be queued on
wq's that have a rescue-worker reserved for execution under memory
-pressure. Else it is possible that the thread-pool deadlocks waiting
+pressure. Else it is possible that the worker-pool deadlocks waiting
for execution contexts to free up.
@@ -166,25 +166,15 @@ resources, scheduled and executed.
@flags:
- WQ_NON_REENTRANT
-
- By default, a wq guarantees non-reentrance only on the same
- CPU. A work item may not be executed concurrently on the same
- CPU by multiple workers but is allowed to be executed
- concurrently on multiple CPUs. This flag makes sure
- non-reentrance is enforced across all CPUs. Work items queued
- to a non-reentrant wq are guaranteed to be executed by at most
- one worker system-wide at any given time.
-
WQ_UNBOUND
- Work items queued to an unbound wq are served by a special
- gcwq which hosts workers which are not bound to any specific
- CPU. This makes the wq behave as a simple execution context
- provider without concurrency management. The unbound gcwq
- tries to start execution of work items as soon as possible.
- Unbound wq sacrifices locality but is useful for the following
- cases.
+ Work items queued to an unbound wq are served by the special
+ woker-pools which host workers which are not bound to any
+ specific CPU. This makes the wq behave as a simple execution
+ context provider without concurrency management. The unbound
+ worker-pools try to start execution of work items as soon as
+ possible. Unbound wq sacrifices locality but is useful for
+ the following cases.
* Wide fluctuation in the concurrency level requirement is
expected and using bound wq may end up creating large number
@@ -209,10 +199,10 @@ resources, scheduled and executed.
WQ_HIGHPRI
Work items of a highpri wq are queued to the highpri
- thread-pool of the target gcwq. Highpri thread-pools are
+ worker-pool of the target cpu. Highpri worker-pools are
served by worker threads with elevated nice level.
- Note that normal and highpri thread-pools don't interact with
+ Note that normal and highpri worker-pools don't interact with
each other. Each maintain its separate pool of workers and
implements concurrency management among its workers.
@@ -221,7 +211,7 @@ resources, scheduled and executed.
Work items of a CPU intensive wq do not contribute to the
concurrency level. In other words, runnable CPU intensive
work items will not prevent other work items in the same
- thread-pool from starting execution. This is useful for bound
+ worker-pool from starting execution. This is useful for bound
work items which are expected to hog CPU cycles so that their
execution is regulated by the system scheduler.
@@ -233,6 +223,10 @@ resources, scheduled and executed.
This flag is meaningless for unbound wq.
+Note that the flag WQ_NON_REENTRANT no longer exists as all workqueues
+are now non-reentrant - any work item is guaranteed to be executed by
+at most one worker system-wide at any given time.
+
@max_active:
@max_active determines the maximum number of execution contexts per
@@ -254,9 +248,9 @@ recommended.
Some users depend on the strict execution ordering of ST wq. The
combination of @max_active of 1 and WQ_UNBOUND is used to achieve this
-behavior. Work items on such wq are always queued to the unbound gcwq
-and only one work item can be active at any given time thus achieving
-the same ordering property as ST wq.
+behavior. Work items on such wq are always queued to the unbound
+worker-pools and only one work item can be active at any given time thus
+achieving the same ordering property as ST wq.
5. Example Execution Scenarios
diff --git a/Documentation/x86/x86_64/boot-options.txt b/Documentation/x86/x86_64/boot-options.txt
index e9e8ddbbf37..1228b22e142 100644
--- a/Documentation/x86/x86_64/boot-options.txt
+++ b/Documentation/x86/x86_64/boot-options.txt
@@ -176,6 +176,11 @@ ACPI
acpi=noirq Don't route interrupts
+ acpi=nocmcff Disable firmware first mode for corrected errors. This
+ disables parsing the HEST CMC error source to check if
+ firmware has set the FF flag. This may result in
+ duplicate corrected error reports.
+
PCI
pci=off Don't use PCI
diff --git a/MAINTAINERS b/MAINTAINERS
index f817ae196a8..caa7c3a1fef 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -580,12 +580,24 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/ad9389b*
+ANALOG DEVICES INC ADV7511 DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/adv7511*
+
ANALOG DEVICES INC ADV7604 DRIVER
M: Hans Verkuil <hans.verkuil@cisco.com>
L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/adv7604*
+ANALOG DEVICES INC ADV7842 DRIVER
+M: Hans Verkuil <hans.verkuil@cisco.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/adv7842*
+
ANALOG DEVICES INC ASOC CODEC DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
L: device-drivers-devel@blackfin.uclinux.org
@@ -595,6 +607,7 @@ S: Supported
F: sound/soc/codecs/adau*
F: sound/soc/codecs/adav*
F: sound/soc/codecs/ad1*
+F: sound/soc/codecs/ad7*
F: sound/soc/codecs/ssm*
F: sound/soc/codecs/sigmadsp.*
@@ -638,6 +651,12 @@ S: Maintained
F: drivers/net/appletalk/
F: net/appletalk/
+APTINA CAMERA SENSOR PLL
+M: Laurent Pinchart <Laurent.pinchart@ideasonboard.com>
+L: linux-media@vger.kernel.org
+S: Maintained
+F: drivers/media/i2c/aptina-pll.*
+
ARASAN COMPACT FLASH PATA CONTROLLER
M: Viresh Kumar <viresh.linux@gmail.com>
L: linux-ide@vger.kernel.org
@@ -813,7 +832,7 @@ F: arch/arm/mach-prima2/
F: drivers/dma/sirf-dma.c
F: drivers/i2c/busses/i2c-sirf.c
F: drivers/mmc/host/sdhci-sirf.c
-F: drivers/pinctrl/pinctrl-sirf.c
+F: drivers/pinctrl/sirf/
F: drivers/spi/spi-sirf.c
ARM/EBSA110 MACHINE SUPPORT
@@ -965,6 +984,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
+M: Santosh Shilimkar <santosh.shilimkar@ti.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: arch/arm/mach-keystone/
+
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1259,7 +1284,6 @@ F: drivers/rtc/rtc-coh901331.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/Ux500 ARM ARCHITECTURE
-M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
@@ -1406,7 +1430,7 @@ ATHEROS ATH6KL WIRELESS DRIVER
M: Kalle Valo <kvalo@qca.qualcomm.com>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ath6kl
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/ath6kl.git
+T: git git://github.com/kvalo/ath.git
S: Supported
F: drivers/net/wireless/ath/ath6kl/
@@ -1542,6 +1566,13 @@ W: http://atmelwlandriver.sourceforge.net/
S: Maintained
F: drivers/net/wireless/atmel*
+ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
+M: Bradley Grove <linuxdrivers@attotech.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.attotech.com
+S: Supported
+F: drivers/scsi/esas2r
+
AUDIT SUBSYSTEM
M: Al Viro <viro@zeniv.linux.org.uk>
M: Eric Paris <eparis@redhat.com>
@@ -1818,6 +1849,12 @@ L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/bnx2fc/
+BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
+M: Eddie Wai <eddie.wai@broadcom.com>
+L: linux-scsi@vger.kernel.org
+S: Supported
+F: drivers/scsi/bnx2i/
+
BROADCOM SPECIFIC AMBA DRIVER (BCMA)
M: Rafał Miłecki <zajec5@gmail.com>
L: linux-wireless@vger.kernel.org
@@ -2071,7 +2108,8 @@ F: drivers/usb/chipidea/
CISCO VIC ETHERNET NIC DRIVER
M: Christian Benvenuti <benve@cisco.com>
-M: Roopa Prabhu <roprabhu@cisco.com>
+M: Sujith Sankar <ssujith@cisco.com>
+M: Govindarajulu Varadarajan <govindarajulu90@gmail.com>
M: Neel Patel <neepatel@cisco.com>
M: Nishank Trivedi <nistrive@cisco.com>
S: Supported
@@ -2107,6 +2145,13 @@ M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: include/linux/clk.h
+CLOCKSOURCE, CLOCKEVENT DRIVERS
+M: Daniel Lezcano <daniel.lezcano@linaro.org>
+M: Thomas Gleixner <tglx@linutronix.de>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
+S: Supported
+F: drivers/clocksource
+
CISCO FCOE HBA DRIVER
M: Hiral Patel <hiralpat@cisco.com>
M: Suma Ramars <sramars@cisco.com>
@@ -2871,7 +2916,7 @@ F: drivers/media/usb/dvb-usb-v2/dvb_usb*
F: drivers/media/usb/dvb-usb-v2/usb_urb.c
DYNAMIC DEBUG
-M: Jason Baron <jbaron@redhat.com>
+M: Jason Baron <jbaron@akamai.com>
S: Maintained
F: lib/dynamic_debug.c
F: include/linux/dynamic_debug.h
@@ -4360,7 +4405,7 @@ F: drivers/net/wireless/iwlegacy/
INTEL WIRELESS WIFI LINK (iwlwifi)
M: Johannes Berg <johannes.berg@intel.com>
-M: Wey-Yi Guy <wey-yi.w.guy@intel.com>
+M: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
M: Intel Linux Wireless <ilw@linux.intel.com>
L: linux-wireless@vger.kernel.org
W: http://intellinuxwireless.org
@@ -5492,7 +5537,7 @@ L: platform-driver-x86@vger.kernel.org
S: Supported
F: drivers/platform/x86/msi-wmi.c
-MT9M032 SENSOR DRIVER
+MT9M032 APTINA SENSOR DRIVER
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -5500,7 +5545,7 @@ S: Maintained
F: drivers/media/i2c/mt9m032.c
F: include/media/mt9m032.h
-MT9P031 SENSOR DRIVER
+MT9P031 APTINA CAMERA SENSOR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -5508,7 +5553,7 @@ S: Maintained
F: drivers/media/i2c/mt9p031.c
F: include/media/mt9p031.h
-MT9T001 SENSOR DRIVER
+MT9T001 APTINA CAMERA SENSOR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -5516,7 +5561,7 @@ S: Maintained
F: drivers/media/i2c/mt9t001.c
F: include/media/mt9t001.h
-MT9V032 SENSOR DRIVER
+MT9V032 APTINA CAMERA SENSOR
M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
L: linux-media@vger.kernel.org
T: git git://linuxtv.org/media_tree.git
@@ -5576,9 +5621,9 @@ S: Maintained
F: drivers/media/tuners/mxl5007t.*
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Andrew Gallatin <gallatin@myri.com>
+M: Hyong-Youb Kim <hykim@myri.com>
L: netdev@vger.kernel.org
-W: http://www.myri.com/scs/download-Myri10GE.html
+W: https://www.myricom.com/support/downloads/myri10ge.html
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
@@ -5787,7 +5832,7 @@ M: Aloisio Almeida Jr <aloisio.almeida@openbossa.org>
M: Samuel Ortiz <sameo@linux.intel.com>
L: linux-wireless@vger.kernel.org
L: linux-nfc@lists.01.org (moderated for non-subscribers)
-S: Maintained
+S: Supported
F: net/nfc/
F: include/net/nfc/
F: include/uapi/linux/nfc.h
@@ -5879,7 +5924,7 @@ F: drivers/i2c/busses/i2c-omap.c
F: include/linux/i2c-omap.h
OMAP DEVICE TREE SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
L: devicetree@vger.kernel.org
@@ -5959,14 +6004,14 @@ S: Maintained
F: drivers/char/hw_random/omap-rng.c
OMAP HWMOD SUPPORT
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
M: Paul Walmsley <paul@pwsan.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod.*
OMAP HWMOD DATA FOR OMAP4-BASED DEVICES
-M: Benoît Cousson <b-cousson@ti.com>
+M: Benoît Cousson <bcousson@baylibre.com>
L: linux-omap@vger.kernel.org
S: Maintained
F: arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -6061,7 +6106,7 @@ M: Rob Herring <rob.herring@calxeda.com>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
M: Stephen Warren <swarren@wwwdotorg.org>
-M: Ian Campbell <ian.campbell@citrix.com>
+M: Ian Campbell <ijc+devicetree@hellion.org.uk>
L: devicetree@vger.kernel.org
S: Maintained
F: Documentation/devicetree/
@@ -6671,11 +6716,11 @@ F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
QLOGIC QLA4XXX iSCSI DRIVER
-M: Ravi Anand <ravi.anand@qlogic.com>
M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
M: iscsi-driver@qlogic.com
L: linux-scsi@vger.kernel.org
S: Supported
+F: Documentation/scsi/LICENSE.qla4xxx
F: drivers/scsi/qla4xxx/
QLOGIC QLA3XXX NETWORK DRIVER
@@ -6726,6 +6771,14 @@ T: git git://linuxtv.org/anttip/media_tree.git
S: Maintained
F: drivers/media/tuners/qt1010*
+QUALCOMM ATHEROS ATH10K WIRELESS DRIVER
+M: Kalle Valo <kvalo@qca.qualcomm.com>
+L: ath10k@lists.infradead.org
+W: http://wireless.kernel.org/en/users/Drivers/ath10k
+T: git git://github.com/kvalo/ath.git
+S: Supported
+F: drivers/net/wireless/ath/ath10k/
+
QUALCOMM HEXAGON ARCHITECTURE
M: Richard Kuo <rkuo@codeaurora.org>
L: linux-hexagon@vger.kernel.org
@@ -6908,6 +6961,14 @@ M: Maxim Levitsky <maximlevitsky@gmail.com>
S: Maintained
F: drivers/memstick/host/r592.*
+ROCCAT DRIVERS
+M: Stefan Achatz <erazor_de@users.sourceforge.net>
+W: http://sourceforge.net/projects/roccat/
+S: Maintained
+F: drivers/hid/hid-roccat*
+F: include/linux/hid-roccat*
+F: Documentation/ABI/*/sysfs-driver-hid-roccat*
+
ROCKETPORT DRIVER
P: Comtrol Corp.
W: http://www.comtrol.com
@@ -7130,7 +7191,7 @@ S: Maintained
F: include/linux/mmc/dw_mmc.h
F: drivers/mmc/host/dw_mmc*
-TIMEKEEPING, NTP
+TIMEKEEPING, CLOCKSOURCE CORE, NTP
M: John Stultz <john.stultz@linaro.org>
M: Thomas Gleixner <tglx@linutronix.de>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
@@ -7143,7 +7204,6 @@ F: include/uapi/linux/timex.h
F: kernel/time/clocksource.c
F: kernel/time/time*.c
F: kernel/time/ntp.c
-F: drivers/clocksource
TLG2300 VIDEO4LINUX-2 DRIVER
M: Huang Shijie <shijie8@gmail.com>
@@ -7223,6 +7283,7 @@ W: http://lksctp.sourceforge.net
S: Maintained
F: Documentation/networking/sctp.txt
F: include/linux/sctp.h
+F: include/uapi/linux/sctp.h
F: include/net/sctp/
F: net/sctp/
@@ -7353,7 +7414,6 @@ F: drivers/net/ethernet/sfc/
SGI GRU DRIVER
M: Dimitri Sivanich <sivanich@sgi.com>
-M: Robin Holt <holt@sgi.com>
S: Maintained
F: drivers/misc/sgi-gru/
@@ -7373,7 +7433,8 @@ S: Maintained for 2.6.
F: Documentation/sgi-visws.txt
SGI XP/XPC/XPNET DRIVER
-M: Robin Holt <holt@sgi.com>
+M: Cliff Whickman <cpw@sgi.com>
+M: Robin Holt <robinmholt@gmail.com>
S: Maintained
F: drivers/misc/sgi-xp/
@@ -7662,6 +7723,17 @@ F: include/sound/
F: include/uapi/sound/
F: sound/
+SOUND - COMPRESSED AUDIO
+M: Vinod Koul <vinod.koul@intel.com>
+L: alsa-devel@alsa-project.org (moderated for non-subscribers)
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound.git
+S: Supported
+F: Documentation/sound/alsa/compress_offload.txt
+F: include/sound/compress_driver.h
+F: include/uapi/sound/compress_*
+F: sound/core/compress_offload.c
+F: sound/soc/soc-compress.c
+
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood <lgirdwood@gmail.com>
M: Mark Brown <broonie@kernel.org>
@@ -7669,6 +7741,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
S: Supported
+F: Documentation/sound/alsa/soc/
F: sound/soc/
F: include/sound/soc*
@@ -7818,7 +7891,7 @@ F: drivers/staging/asus_oled/
STAGING - COMEDI
M: Ian Abbott <abbotti@mev.co.uk>
-M: Mori Hess <fmhess@users.sourceforge.net>
+M: H Hartley Sweeten <hsweeten@visionengravers.com>
S: Odd Fixes
F: drivers/staging/comedi/
@@ -7877,11 +7950,11 @@ S: Maintained
F: drivers/staging/nvec/
STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
-M: Andres Salomon <dilinger@queued.net>
-M: Chris Ball <cjb@laptop.org>
+M: Jens Frederich <jfrederich@gmail.com>
+M: Daniel Drake <dsd@laptop.org>
M: Jon Nettleton <jon.nettleton@gmail.com>
W: http://wiki.laptop.org/go/DCON
-S: Odd Fixes
+S: Maintained
F: drivers/staging/olpc_dcon/
STAGING - OZMO DEVICES USB OVER WIFI DRIVER
@@ -7959,6 +8032,12 @@ F: arch/m68k/sun3*/
F: arch/m68k/include/asm/sun3*
F: drivers/net/ethernet/i825xx/sun3*
+SUNDANCE NETWORK DRIVER
+M: Denis Kirjanov <kda@linux-powerpc.org>
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/dlink/sundance.c
+
SUPERH
M: Paul Mundt <lethal@linux-sh.org>
L: linux-sh@vger.kernel.org
@@ -8270,7 +8349,7 @@ S: Maintained
F: sound/soc/codecs/twl4030*
TI WILINK WIRELESS DRIVERS
-M: Luciano Coelho <coelho@ti.com>
+M: Luciano Coelho <luca@coelho.fi>
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/wl12xx
W: http://wireless.kernel.org/en/users/Drivers/wl1251
@@ -8656,6 +8735,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git
S: Maintained
F: sound/usb/midi.*
+USB NETWORKING DRIVERS
+L: linux-usb@vger.kernel.org
+S: Odd Fixes
+F: drivers/net/usb/
+
USB OHCI DRIVER
M: Alan Stern <stern@rowland.harvard.edu>
L: linux-usb@vger.kernel.org
@@ -8785,7 +8869,6 @@ W: http://www.linux-usb.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
S: Supported
F: Documentation/usb/
-F: drivers/net/usb/
F: drivers/usb/
F: include/linux/usb.h
F: include/linux/usb/
@@ -9008,6 +9091,12 @@ F: drivers/staging/vme/
F: drivers/vme/
F: include/linux/vme*
+VMWARE HYPERVISOR INTERFACE
+M: Alok Kataria <akataria@vmware.com>
+L: virtualization@lists.linux-foundation.org
+S: Supported
+F: arch/x86/kernel/cpu/vmware.c
+
VMWARE VMXNET3 ETHERNET DRIVER
M: Shreyas Bhatewara <sbhatewara@vmware.com>
M: "VMware, Inc." <pv-drivers@vmware.com>
@@ -9229,9 +9318,9 @@ F: drivers/media/tuners/tuner-xc2028.*
XEN HYPERVISOR INTERFACE
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-M: Jeremy Fitzhardinge <jeremy@goop.org>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
-L: virtualization@lists.linux-foundation.org
+M: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+M: David Vrabel <david.vrabel@citrix.com>
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/xen/
F: drivers/*/xen-*front.c
@@ -9242,35 +9331,35 @@ F: include/uapi/xen/
XEN HYPERVISOR ARM
M: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/arm/xen/
F: arch/arm/include/asm/xen/
XEN HYPERVISOR ARM64
M: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/arm64/xen/
F: arch/arm64/include/asm/xen/
XEN NETWORK BACKEND DRIVER
M: Ian Campbell <ian.campbell@citrix.com>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/xen-netback/*
XEN PCI SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/pci/*xen*
F: drivers/pci/*xen*
XEN SWIOTLB SUBSYSTEM
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L: xen-devel@lists.xensource.com (moderated for non-subscribers)
+L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
S: Supported
F: arch/x86/xen/*swiotlb*
F: drivers/xen/*swiotlb*
diff --git a/Makefile b/Makefile
index a35f72a420c..fe8204be566 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 11
SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION =
NAME = Linux for Workgroups
# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index 8d2ae24b9f4..1feb169274f 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2
help
Architecture has the first two arguments of clone(2) swapped.
+config CLONE_BACKWARDS3
+ bool
+ help
+ Architecture has tls passed as the 3rd argument of clone(2),
+ not the 5th one.
+
config ODD_RT_SIGACTION
bool
help
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c
index b8ce18f485d..310a4ce1dcc 100644
--- a/arch/alpha/oprofile/common.c
+++ b/arch/alpha/oprofile/common.c
@@ -106,7 +106,7 @@ op_axp_stop(void)
}
static int
-op_axp_create_files(struct super_block *sb, struct dentry *root)
+op_axp_create_files(struct dentry *root)
{
int i;
@@ -115,23 +115,23 @@ op_axp_create_files(struct super_block *sb, struct dentry *root)
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
+ dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
/* Dummies. */
- oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
if (model->can_set_proc_mode) {
- oprofilefs_create_ulong(sb, root, "enable_pal",
+ oprofilefs_create_ulong(root, "enable_pal",
&sys.enable_pal);
- oprofilefs_create_ulong(sb, root, "enable_kernel",
+ oprofilefs_create_ulong(root, "enable_kernel",
&sys.enable_kernel);
- oprofilefs_create_ulong(sb, root, "enable_user",
+ oprofilefs_create_ulong(root, "enable_user",
&sys.enable_user);
}
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 8943c028d4b..df57611652e 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -38,6 +38,7 @@
#include <asm/ptrace.h>
#include <asm/processor.h> /* For VMALLOC_START */
#include <asm/thread_info.h> /* For THREAD_SIZE */
+#include <asm/mmu.h>
/* Note on the LD/ST addr modes with addr reg wback
*
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S
index 99c10475d47..9c548c7cf00 100644
--- a/arch/arc/lib/strchr-700.S
+++ b/arch/arc/lib/strchr-700.S
@@ -39,9 +39,18 @@ ARC_ENTRY strchr
ld.a r2,[r0,4]
sub r12,r6,r7
bic r12,r12,r6
+#ifdef __LITTLE_ENDIAN__
and r7,r12,r4
breq r7,0,.Loop ; For speed, we want this branch to be unaligned.
b .Lfound_char ; Likewise this one.
+#else
+ and r12,r12,r4
+ breq r12,0,.Loop ; For speed, we want this branch to be unaligned.
+ lsr_s r12,r12,7
+ bic r2,r7,r6
+ b.d .Lfound_char_b
+ and_s r2,r2,r12
+#endif
; /* We require this code address to be unaligned for speed... */
.Laligned:
ld_s r2,[r0]
@@ -95,6 +104,7 @@ ARC_ENTRY strchr
lsr r7,r7,7
bic r2,r7,r6
+.Lfound_char_b:
norm r2,r2
sub_s r0,r0,4
asr_s r2,r2,3
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ba412e02ec0..5d1f5704a28 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -20,7 +20,6 @@ config ARM
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select HARDIRQS_SW_RESEND
- select HAVE_AOUT
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
@@ -53,6 +52,7 @@ config ARM
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16
+ select IRQ_FORCED_THREADING
select KTIME_SCALAR
select PERF_USE_VMALLOC
select RTC_LIB
@@ -218,7 +218,8 @@ config VECTORS_BASE
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
- The base address of exception vectors.
+ The base address of exception vectors. This must be two pages
+ in size.
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
@@ -1372,6 +1373,15 @@ config ARM_ERRATA_798181
which sends an IPI to the CPUs that are running the same ASID
as the one being invalidated.
+config ARM_ERRATA_773022
+ bool "ARM errata: incorrect instructions may be executed from loop buffer"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 773022 Cortex-A15
+ (up to r0p4) erratum. In certain rare sequences of code, the
+ loop buffer may deliver incorrect instructions. This
+ workaround disables the loop buffer to avoid the erratum.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1600,8 +1610,7 @@ config LOCAL_TIMERS
config ARCH_NR_GPIO
int
default 1024 if ARCH_SHMOBILE || ARCH_TEGRA
- default 512 if SOC_OMAP5
- default 512 if ARCH_KEYSTONE
+ default 512 if ARCH_EXYNOS || ARCH_KEYSTONE || SOC_OMAP5
default 392 if ARCH_U8500
default 352 if ARCH_VT8500
default 288 if ARCH_SUNXI
@@ -1614,13 +1623,49 @@ config ARCH_NR_GPIO
source kernel/Kconfig.preempt
-config HZ
+config HZ_FIXED
int
default 200 if ARCH_EBSA110 || ARCH_S3C24XX || ARCH_S5P64X0 || \
ARCH_S5PV210 || ARCH_EXYNOS4
default AT91_TIMER_HZ if ARCH_AT91
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
- default 100
+
+choice
+ depends on !HZ_FIXED
+ prompt "Timer frequency"
+
+config HZ_100
+ bool "100 Hz"
+
+config HZ_200
+ bool "200 Hz"
+
+config HZ_250
+ bool "250 Hz"
+
+config HZ_300
+ bool "300 Hz"
+
+config HZ_500
+ bool "500 Hz"
+
+config HZ_1000
+ bool "1000 Hz"
+
+endchoice
+
+config HZ
+ int
+ default HZ_FIXED if HZ_FIXED
+ default 100 if HZ_100
+ default 200 if HZ_200
+ default 250 if HZ_250
+ default 300 if HZ_300
+ default 500 if HZ_500
+ default 1000
+
+config SCHED_HRTICK
+ def_bool HIGH_RES_TIMERS
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
@@ -1757,6 +1802,9 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE
def_bool y
depends on ARM_LPAE
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
source "mm/Kconfig"
config FORCE_MAX_ZONEORDER
@@ -2065,8 +2113,7 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support.
+ initially work for you.
config ATAGS_PROC
bool "Export atags in procfs"
@@ -2176,6 +2223,13 @@ config NEON
Say Y to include support code for NEON, the ARMv7 Advanced SIMD
Extension.
+config KERNEL_MODE_NEON
+ bool "Support for NEON in kernel mode"
+ default n
+ depends on NEON
+ help
+ Say Y to include support for NEON in kernel mode.
+
endmenu
menu "Userspace binary formats"
@@ -2200,7 +2254,7 @@ source "kernel/power/Kconfig"
config ARCH_SUSPEND_POSSIBLE
depends on !ARCH_S5PC100
- depends on CPU_ARM920T || CPU_ARM926T || CPU_SA1100 || \
+ depends on CPU_ARM920T || CPU_ARM926T || CPU_FEROCEON || CPU_SA1100 || \
CPU_V6 || CPU_V6K || CPU_V7 || CPU_XSC3 || CPU_XSCALE || CPU_MOHAWK
def_bool y
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index e401a766c0b..4137529850c 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -92,6 +92,7 @@ choice
config DEBUG_BCM2835
bool "Kernel low-level debugging on BCM2835 PL011 UART"
depends on ARCH_BCM2835
+ select DEBUG_UART_PL01X
config DEBUG_CLPS711X_UART1
bool "Kernel low-level debugging messages via UART1"
@@ -110,6 +111,7 @@ choice
config DEBUG_CNS3XXX
bool "Kernel Kernel low-level debugging on Cavium Networks CNS3xxx"
depends on ARCH_CNS3XXX
+ select DEBUG_UART_PL01X
help
Say Y here if you want the debug print routines to direct
their output to the CNS3xxx UART0.
@@ -117,6 +119,7 @@ choice
config DEBUG_DAVINCI_DA8XX_UART1
bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
depends on ARCH_DAVINCI_DA8XX
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART1 serial port on DaVinci DA8XX devices.
@@ -124,6 +127,7 @@ choice
config DEBUG_DAVINCI_DA8XX_UART2
bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
depends on ARCH_DAVINCI_DA8XX
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART2 serial port on DaVinci DA8XX devices.
@@ -131,6 +135,7 @@ choice
config DEBUG_DAVINCI_DMx_UART0
bool "Kernel low-level debugging on DaVinci DMx using UART0"
depends on ARCH_DAVINCI_DMx
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART0 serial port on DaVinci DMx devices.
@@ -138,6 +143,7 @@ choice
config DEBUG_DAVINCI_TNETV107X_UART1
bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
depends on ARCH_DAVINCI_TNETV107X
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART1 serial port on DaVinci TNETV107X
@@ -174,9 +180,26 @@ choice
Say Y here if you want the debug print routines to direct
their output to the 8250 at PCI COM1.
+ config DEBUG_HI3620_UART
+ bool "Hisilicon HI3620 Debug UART"
+ depends on ARCH_HI3xxx
+ select DEBUG_UART_PL01X
+ help
+ Say Y here if you want kernel low-level debugging support
+ on HI3620 UART.
+
+ config DEBUG_HI3716_UART
+ bool "Hisilicon Hi3716 Debug UART"
+ depends on ARCH_HI3xxx
+ select DEBUG_UART_PL01X
+ help
+ Say Y here if you want kernel low-level debugging support
+ on HI3716 UART.
+
config DEBUG_HIGHBANK_UART
bool "Kernel low-level debugging messages via Highbank UART"
depends on ARCH_HIGHBANK
+ select DEBUG_UART_PL01X
help
Say Y here if you want the debug print routines to direct
their output to the UART on Highbank based devices.
@@ -191,6 +214,7 @@ choice
config DEBUG_IMX23_UART
bool "i.MX23 Debug UART"
depends on SOC_IMX23
+ select DEBUG_UART_PL01X
help
Say Y here if you want kernel low-level debugging support
on i.MX23.
@@ -212,6 +236,7 @@ choice
config DEBUG_IMX28_UART
bool "i.MX28 Debug UART"
depends on SOC_IMX28
+ select DEBUG_UART_PL01X
help
Say Y here if you want kernel low-level debugging support
on i.MX28.
@@ -261,6 +286,7 @@ choice
config DEBUG_KEYSTONE_UART0
bool "Kernel low-level debugging on KEYSTONE2 using UART0"
depends on ARCH_KEYSTONE
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART0 serial port on KEYSTONE2 devices.
@@ -268,6 +294,7 @@ choice
config DEBUG_KEYSTONE_UART1
bool "Kernel low-level debugging on KEYSTONE2 using UART1"
depends on ARCH_KEYSTONE
+ select DEBUG_UART_8250
help
Say Y here if you want the debug print routines to direct
their output to UART1 serial port on KEYSTONE2 devices.
@@ -275,6 +302,7 @@ choice
config DEBUG_MMP_UART2
bool "Kernel low-level debugging message via MMP UART2"
depends on ARCH_MMP
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on MMP UART2.
@@ -282,6 +310,7 @@ choice
config DEBUG_MMP_UART3
bool "Kernel low-level debugging message via MMP UART3"
depends on ARCH_MMP
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on MMP UART3.
@@ -326,6 +355,7 @@ choice
config DEBUG_MVEBU_UART
bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
depends on ARCH_MVEBU
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on MVEBU based platforms.
@@ -344,6 +374,7 @@ choice
config DEBUG_MVEBU_UART_ALTERNATE
bool "Kernel low-level debugging messages via MVEBU UART (new bootloaders)"
depends on ARCH_MVEBU
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on MVEBU based platforms.
@@ -358,6 +389,7 @@ choice
config DEBUG_NOMADIK_UART
bool "Kernel low-level debugging messages via NOMADIK UART"
depends on ARCH_NOMADIK
+ select DEBUG_UART_PL01X
help
Say Y here if you want kernel low-level debugging support
on NOMADIK based platforms.
@@ -365,6 +397,7 @@ choice
config DEBUG_NSPIRE_CLASSIC_UART
bool "Kernel low-level debugging via TI-NSPIRE 8250 UART"
depends on ARCH_NSPIRE
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on TI-NSPIRE classic models.
@@ -372,20 +405,82 @@ choice
config DEBUG_NSPIRE_CX_UART
bool "Kernel low-level debugging via TI-NSPIRE PL011 UART"
depends on ARCH_NSPIRE
+ select DEBUG_UART_PL01X
help
Say Y here if you want kernel low-level debugging support
on TI-NSPIRE CX models.
- config DEBUG_OMAP2PLUS_UART
- bool "Kernel low-level debugging messages via OMAP2PLUS UART"
+ config DEBUG_OMAP2UART1
+ bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
help
- Say Y here if you want kernel low-level debugging support
- on OMAP2PLUS based platforms.
+ This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
+ omap3 torpedo and 3530 lv som.
+
+ config DEBUG_OMAP2UART2
+ bool "Kernel low-level debugging messages via OMAP2/3/4 UART2"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_OMAP2UART3
+ bool "Kernel low-level debugging messages via OMAP2 UART3 (n8x0)"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_OMAP3UART3
+ bool "Kernel low-level debugging messages via OMAP3 UART3 (most omap3 boards)"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+ help
+ This covers at least cm_t3x, beagle, crane, devkit8000,
+ igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
+ and 3517evm.
+
+ config DEBUG_OMAP4UART3
+ bool "Kernel low-level debugging messages via OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_OMAP3UART4
+ bool "Kernel low-level debugging messages via OMAP36XX UART4"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_OMAP4UART4
+ bool "Kernel low-level debugging messages via OMAP4/5 UART4"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_TI81XXUART1
+ bool "Kernel low-level debugging messages via TI81XX UART1 (ti8148evm)"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_TI81XXUART2
+ bool "Kernel low-level debugging messages via TI81XX UART2"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_TI81XXUART3
+ bool "Kernel low-level debugging messages via TI81XX UART3 (ti8168evm)"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_AM33XXUART1
+ bool "Kernel low-level debugging messages via AM33XX UART1"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
+
+ config DEBUG_ZOOM_UART
+ bool "Kernel low-level debugging messages via Zoom2/3 UART"
+ depends on ARCH_OMAP2PLUS
+ select DEBUG_OMAP2PLUS_UART
config DEBUG_PICOXCELL_UART
depends on ARCH_PICOXCELL
bool "Use PicoXcell UART for low-level debug"
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on PicoXcell based platforms.
@@ -393,6 +488,7 @@ choice
config DEBUG_PXA_UART1
depends on ARCH_PXA
bool "Use PXA UART1 for low-level debug"
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on PXA UART1.
@@ -400,6 +496,7 @@ choice
config DEBUG_REALVIEW_STD_PORT
bool "RealView Default UART"
depends on ARCH_REALVIEW
+ select DEBUG_UART_PL01X
help
Say Y here if you want the debug print routines to direct
their output to the serial port on RealView EB, PB11MP, PBA8
@@ -408,14 +505,64 @@ choice
config DEBUG_REALVIEW_PB1176_PORT
bool "RealView PB1176 UART"
depends on MACH_REALVIEW_PB1176
+ select DEBUG_UART_PL01X
help
Say Y here if you want the debug print routines to direct
their output to the standard serial port on the RealView
PB1176 platform.
- config DEBUG_ROCKCHIP_UART
- bool "Kernel low-level debugging messages via Rockchip UART"
+ config DEBUG_RK29_UART0
+ bool "Kernel low-level debugging messages via Rockchip RK29 UART0"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK29_UART1
+ bool "Kernel low-level debugging messages via Rockchip RK29 UART1"
depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK29_UART2
+ bool "Kernel low-level debugging messages via Rockchip RK29 UART2"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK3X_UART0
+ bool "Kernel low-level debugging messages via Rockchip RK3X UART0"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK3X_UART1
+ bool "Kernel low-level debugging messages via Rockchip RK3X UART1"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK3X_UART2
+ bool "Kernel low-level debugging messages via Rockchip RK3X UART2"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Rockchip based platforms.
+
+ config DEBUG_RK3X_UART3
+ bool "Kernel low-level debugging messages via Rockchip RK3X UART3"
+ depends on ARCH_ROCKCHIP
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on Rockchip based platforms.
@@ -471,6 +618,7 @@ choice
config DEBUG_SOCFPGA_UART
depends on ARCH_SOCFPGA
bool "Use SOCFPGA UART for low-level debug"
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on SOCFPGA based platforms.
@@ -478,6 +626,7 @@ choice
config DEBUG_SUNXI_UART0
bool "Kernel low-level debugging messages via sunXi UART0"
depends on ARCH_SUNXI
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on Allwinner A1X based platforms on the UART0.
@@ -485,13 +634,59 @@ choice
config DEBUG_SUNXI_UART1
bool "Kernel low-level debugging messages via sunXi UART1"
depends on ARCH_SUNXI
+ select DEBUG_UART_8250
help
Say Y here if you want kernel low-level debugging support
on Allwinner A1X based platforms on the UART1.
- config DEBUG_TEGRA_UART
+ config TEGRA_DEBUG_UART_AUTO_ODMDATA
+ bool "Kernel low-level debugging messages via Tegra UART via ODMDATA"
+ depends on ARCH_TEGRA
+ select DEBUG_TEGRA_UART
+ help
+ Automatically determines which UART to use for low-level
+ debug based on the ODMDATA value. This value is part of
+ the BCT, and is written to the boot memory device using
+ nvflash, or other flashing tool. When bits 19:18 are 3,
+ then bits 17:15 indicate which UART to use; 0/1/2/3/4
+ are UART A/B/C/D/E.
+
+ config TEGRA_DEBUG_UARTA
+ bool "Kernel low-level debugging messages via Tegra UART A"
+ depends on ARCH_TEGRA
+ select DEBUG_TEGRA_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Tegra based platforms.
+
+ config TEGRA_DEBUG_UARTB
+ bool "Kernel low-level debugging messages via Tegra UART B"
+ depends on ARCH_TEGRA
+ select DEBUG_TEGRA_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Tegra based platforms.
+
+ config TEGRA_DEBUG_UARTC
+ bool "Kernel low-level debugging messages via Tegra UART C"
+ depends on ARCH_TEGRA
+ select DEBUG_TEGRA_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Tegra based platforms.
+
+ config TEGRA_DEBUG_UARTD
+ bool "Kernel low-level debugging messages via Tegra UART D"
depends on ARCH_TEGRA
- bool "Use Tegra UART for low-level debug"
+ select DEBUG_TEGRA_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on Tegra based platforms.
+
+ config TEGRA_DEBUG_UARTE
+ bool "Kernel low-level debugging messages via Tegra UART E"
+ depends on ARCH_TEGRA
+ select DEBUG_TEGRA_UART
help
Say Y here if you want kernel low-level debugging support
on Tegra based platforms.
@@ -510,19 +705,32 @@ choice
Say Y here if you want the debug print routines to direct
their output to the uart1 port on SiRFmarco devices.
- config DEBUG_STI_UART
+ config STIH41X_DEBUG_ASC2
+ bool "Use StiH415/416 ASC2 UART for low-level debug"
+ depends on ARCH_STI
+ select DEBUG_STI_UART
+ help
+ Say Y here if you want kernel low-level debugging support
+ on STiH415/416 based platforms like b2000, which has
+ default UART wired up to ASC2.
+
+ If unsure, say N.
+
+ config STIH41X_DEBUG_SBC_ASC1
+ bool "Use StiH415/416 SBC ASC1 UART for low-level debug"
depends on ARCH_STI
- bool "Use StiH415/416 ASC for low-level debug"
+ select DEBUG_STI_UART
help
Say Y here if you want kernel low-level debugging support
- on StiH415/416 based platforms like B2000, B2020.
- It support UART2 and SBC_UART1.
+ on STiH415/416 based platforms like b2020. which has
+ default UART wired up to SBC ASC1.
If unsure, say N.
config DEBUG_U300_UART
bool "Kernel low-level debugging messages via U300 UART0"
depends on ARCH_U300
+ select DEBUG_UART_PL01X
help
Say Y here if you want the debug print routines to direct
their output to the uart port on U300 devices.
@@ -548,6 +756,7 @@ choice
config DEBUG_VEXPRESS_UART0_CA9
bool "Use PL011 UART0 at 0x10009000 (V2P-CA9 core tile)"
depends on ARCH_VEXPRESS
+ select DEBUG_UART_PL01X
help
This option selects UART0 at 0x10009000. Except for custom models,
this applies only to the V2P-CA9 tile.
@@ -555,6 +764,7 @@ choice
config DEBUG_VEXPRESS_UART0_RS1
bool "Use PL011 UART0 at 0x1c090000 (RS1 complaint tiles)"
depends on ARCH_VEXPRESS
+ select DEBUG_UART_PL01X
help
This option selects UART0 at 0x1c090000. This applies to most
of the tiles using the RS1 memory map, including all new A-class
@@ -563,6 +773,7 @@ choice
config DEBUG_VEXPRESS_UART0_CRX
bool "Use PL011 UART0 at 0xb0090000 (Cortex-R compliant tiles)"
depends on ARCH_VEXPRESS && !MMU
+ select DEBUG_UART_PL01X
help
This option selects UART0 at 0xb0090000. This is appropriate for
Cortex-R series tiles and SMMs, such as Cortex-R5 and Cortex-R7
@@ -579,7 +790,7 @@ choice
depends on !ARCH_MULTIPLATFORM
help
Say Y here if your platform doesn't provide a UART option
- below. This relies on your platform choosing the right UART
+ above. This relies on your platform choosing the right UART
definition internally in order for low-level debugging to
work.
@@ -610,11 +821,41 @@ choice
For more details about semihosting, please see
chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd.
+ config DEBUG_LL_UART_8250
+ bool "Kernel low-level debugging via 8250 UART"
+ help
+ Say Y here if you wish the debug print routes to direct
+ their output to an 8250 UART. You can use this option
+ to provide the parameters for the 8250 UART rather than
+ selecting one of the platform specific options above if
+ you know the parameters for the port.
+
+ This option is preferred over the platform specific
+ options; the platform specific options are deprecated
+ and will be soon removed.
+
+ config DEBUG_LL_UART_PL01X
+ bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
+ help
+ Say Y here if you wish the debug print routes to direct
+ their output to a PL01x Primecell UART. You can use
+ this option to provide the parameters for the UART
+ rather than selecting one of the platform specific
+ options above if you know the parameters for the port.
+
+ This option is preferred over the platform specific
+ options; the platform specific options are deprecated
+ and will be soon removed.
+
endchoice
config DEBUG_EXYNOS_UART
bool
+config DEBUG_OMAP2PLUS_UART
+ bool
+ depends on ARCH_OMAP2PLUS
+
config DEBUG_IMX_UART_PORT
int "i.MX Debug UART Port Selection" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
@@ -631,140 +872,19 @@ config DEBUG_IMX_UART_PORT
Choose UART port on which kernel low-level debug messages
should be output.
-choice
- prompt "Low-level debug console UART"
- depends on DEBUG_OMAP2PLUS_UART
-
- config DEBUG_OMAP2UART1
- bool "OMAP2/3/4 UART1 (omap2/3 sdp boards and some omap3 boards)"
- help
- This covers at least h4, 2430sdp, 3430sdp, 3630sdp,
- omap3 torpedo and 3530 lv som.
-
- config DEBUG_OMAP2UART2
- bool "OMAP2/3/4 UART2"
-
- config DEBUG_OMAP2UART3
- bool "OMAP2 UART3 (n8x0)"
-
- config DEBUG_OMAP3UART3
- bool "OMAP3 UART3 (most omap3 boards)"
- help
- This covers at least cm_t3x, beagle, crane, devkit8000,
- igep00x0, ldp, n900, n9(50), pandora, overo, touchbook,
- and 3517evm.
-
- config DEBUG_OMAP4UART3
- bool "OMAP4/5 UART3 (omap4 blaze, panda, omap5 sevm)"
-
- config DEBUG_OMAP3UART4
- bool "OMAP36XX UART4"
-
- config DEBUG_OMAP4UART4
- bool "OMAP4/5 UART4"
-
- config DEBUG_TI81XXUART1
- bool "TI81XX UART1 (ti8148evm)"
-
- config DEBUG_TI81XXUART2
- bool "TI81XX UART2"
-
- config DEBUG_TI81XXUART3
- bool "TI81XX UART3 (ti8168evm)"
-
- config DEBUG_AM33XXUART1
- bool "AM33XX UART1"
-
- config DEBUG_ZOOM_UART
- bool "Zoom2/3 UART"
-endchoice
-
-choice
- prompt "Low-level debug console UART"
- depends on DEBUG_ROCKCHIP_UART
-
- config DEBUG_RK29_UART0
- bool "RK29 UART0"
-
- config DEBUG_RK29_UART1
- bool "RK29 UART1"
-
- config DEBUG_RK29_UART2
- bool "RK29 UART2"
-
- config DEBUG_RK3X_UART0
- bool "RK3X UART0"
-
- config DEBUG_RK3X_UART1
- bool "RK3X UART1"
-
- config DEBUG_RK3X_UART2
- bool "RK3X UART2"
-
- config DEBUG_RK3X_UART3
- bool "RK3X UART3"
-endchoice
-
-choice
- prompt "Low-level debug console UART"
- depends on DEBUG_LL && DEBUG_TEGRA_UART
-
- config TEGRA_DEBUG_UART_AUTO_ODMDATA
- bool "Via ODMDATA"
- help
- Automatically determines which UART to use for low-level debug based
- on the ODMDATA value. This value is part of the BCT, and is written
- to the boot memory device using nvflash, or other flashing tool.
- When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
- 0/1/2/3/4 are UART A/B/C/D/E.
-
- config TEGRA_DEBUG_UARTA
- bool "UART A"
-
- config TEGRA_DEBUG_UARTB
- bool "UART B"
-
- config TEGRA_DEBUG_UARTC
- bool "UART C"
-
- config TEGRA_DEBUG_UARTD
- bool "UART D"
-
- config TEGRA_DEBUG_UARTE
- bool "UART E"
-
-endchoice
-
-choice
- prompt "Low-level debug console UART"
- depends on DEBUG_LL && DEBUG_STI_UART
-
- config STIH41X_DEBUG_ASC2
- bool "ASC2 UART"
- help
- Say Y here if you want kernel low-level debugging support
- on STiH415/416 based platforms like b2000, which has
- default UART wired up to ASC2.
-
- If unsure, say N.
-
- config STIH41X_DEBUG_SBC_ASC1
- bool "SBC ASC1 UART"
- help
- Say Y here if you want kernel low-level debugging support
- on STiH415/416 based platforms like b2020. which has
- default UART wired up to SBC ASC1.
-
- If unsure, say N.
+config DEBUG_TEGRA_UART
+ bool
+ depends on ARCH_TEGRA
-endchoice
+config DEBUG_STI_UART
+ bool
+ depends on ARCH_STI
config DEBUG_LL_INCLUDE
string
- default "debug/bcm2835.S" if DEBUG_BCM2835
- default "debug/cns3xxx.S" if DEBUG_CNS3XXX
+ default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
+ default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
default "debug/exynos.S" if DEBUG_EXYNOS_UART
- default "debug/highbank.S" if DEBUG_HIGHBANK_UART
default "debug/icedcc.S" if DEBUG_ICEDCC
default "debug/imx.S" if DEBUG_IMX1_UART || \
DEBUG_IMX25_UART || \
@@ -775,38 +895,180 @@ config DEBUG_LL_INCLUDE
DEBUG_IMX53_UART ||\
DEBUG_IMX6Q_UART || \
DEBUG_IMX6SL_UART
- default "debug/keystone.S" if DEBUG_KEYSTONE_UART0 || \
- DEBUG_KEYSTONE_UART1
- default "debug/mvebu.S" if DEBUG_MVEBU_UART || \
- DEBUG_MVEBU_UART_ALTERNATE
- default "debug/mxs.S" if DEBUG_IMX23_UART || DEBUG_IMX28_UART
- default "debug/nomadik.S" if DEBUG_NOMADIK_UART
- default "debug/nspire.S" if DEBUG_NSPIRE_CX_UART || \
- DEBUG_NSPIRE_CLASSIC_UART
default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
- default "debug/picoxcell.S" if DEBUG_PICOXCELL_UART
- default "debug/pxa.S" if DEBUG_PXA_UART1 || DEBUG_MMP_UART2 || \
- DEBUG_MMP_UART3
- default "debug/rockchip.S" if DEBUG_ROCKCHIP_UART
default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
- default "debug/socfpga.S" if DEBUG_SOCFPGA_UART
default "debug/sti.S" if DEBUG_STI_UART
- default "debug/sunxi.S" if DEBUG_SUNXI_UART0 || DEBUG_SUNXI_UART1
default "debug/tegra.S" if DEBUG_TEGRA_UART
- default "debug/u300.S" if DEBUG_U300_UART
default "debug/ux500.S" if DEBUG_UX500_UART
- default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT || \
- DEBUG_VEXPRESS_UART0_CA9 || DEBUG_VEXPRESS_UART0_RS1 || \
- DEBUG_VEXPRESS_UART0_CRX
+ default "debug/vexpress.S" if DEBUG_VEXPRESS_UART0_DETECT
default "debug/vt8500.S" if DEBUG_VT8500_UART0
default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
default "mach/debug-macro.S"
+# Compatibility options for PL01x
+config DEBUG_UART_PL01X
+ def_bool ARCH_EP93XX || \
+ ARCH_INTEGRATOR || \
+ ARCH_SPEAR3XX || \
+ ARCH_SPEAR6XX || \
+ ARCH_SPEAR13XX || \
+ ARCH_VERSATILE
+
+# Compatibility options for 8250
+config DEBUG_UART_8250
+ def_bool ARCH_DOVE || ARCH_EBSA110 || \
+ (FOOTBRIDGE && !DEBUG_DC21285_PORT) || \
+ ARCH_GEMINI || ARCH_IOP13XX || ARCH_IOP32X || \
+ ARCH_IOP33X || ARCH_IXP4XX || ARCH_KIRKWOOD || \
+ ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
+
+config DEBUG_UART_PHYS
+ hex "Physical base address of debug UART"
+ default 0x01c20000 if DEBUG_DAVINCI_DMx_UART0
+ default 0x01c28000 if DEBUG_SUNXI_UART0
+ default 0x01c28400 if DEBUG_SUNXI_UART1
+ default 0x01d0c000 if DEBUG_DAVINCI_DA8XX_UART1
+ default 0x01d0d000 if DEBUG_DAVINCI_DA8XX_UART2
+ default 0x02530c00 if DEBUG_KEYSTONE_UART0
+ default 0x02531000 if DEBUG_KEYSTONE_UART1
+ default 0x03010fe0 if ARCH_RPC
+ default 0x08108300 if DEBUG_DAVINCI_TNETV107X_UART1
+ default 0x10009000 if DEBUG_REALVIEW_STD_PORT || DEBUG_CNS3XXX || \
+ DEBUG_VEXPRESS_UART0_CA9
+ default 0x1010c000 if DEBUG_REALVIEW_PB1176_PORT
+ default 0x10124000 if DEBUG_RK3X_UART0
+ default 0x10126000 if DEBUG_RK3X_UART1
+ default 0x101f1000 if ARCH_VERSATILE
+ default 0x101fb000 if DEBUG_NOMADIK_UART
+ default 0x16000000 if ARCH_INTEGRATOR
+ default 0x1c090000 if DEBUG_VEXPRESS_UART0_RS1
+ default 0x20060000 if DEBUG_RK29_UART0
+ default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+ default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
+ default 0x20201000 if DEBUG_BCM2835
+ default 0x40090000 if ARCH_LPC32XX
+ default 0x40100000 if DEBUG_PXA_UART1
+ default 0x42000000 if ARCH_GEMINI
+ default 0x7c0003f8 if FOOTBRIDGE
+ default 0x80230000 if DEBUG_PICOXCELL_UART
+ default 0x80070000 if DEBUG_IMX23_UART
+ default 0x80074000 if DEBUG_IMX28_UART
+ default 0x808c0000 if ARCH_EP93XX
+ default 0x90020000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
+ default 0xb0090000 if DEBUG_VEXPRESS_UART0_CRX
+ default 0xc0013000 if DEBUG_U300_UART
+ default 0xc8000000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
+ default 0xc8000003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+ default 0xd0000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ default 0xd0012000 if DEBUG_MVEBU_UART
+ default 0xd4017000 if DEBUG_MMP_UART2
+ default 0xd4018000 if DEBUG_MMP_UART3
+ default 0xe0000000 if ARCH_SPEAR13XX
+ default 0xf0000be0 if ARCH_EBSA110
+ default 0xf1012000 if DEBUG_MVEBU_UART_ALTERNATE
+ default 0xf1012000 if ARCH_DOVE || ARCH_KIRKWOOD || ARCH_MV78XX0 || \
+ ARCH_ORION5X
+ default 0xf8b00000 if DEBUG_HI3716_UART
+ default 0xfcb00000 if DEBUG_HI3620_UART
+ default 0xfe800000 if ARCH_IOP32X
+ default 0xffc02000 if DEBUG_SOCFPGA_UART
+ default 0xffd82340 if ARCH_IOP13XX
+ default 0xfff36000 if DEBUG_HIGHBANK_UART
+ default 0xfffff700 if ARCH_IOP33X
+ depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ DEBUG_UART_8250 || DEBUG_UART_PL01X
+
+config DEBUG_UART_VIRT
+ hex "Virtual base address of debug UART"
+ default 0xe0010fe0 if ARCH_RPC
+ default 0xf0000be0 if ARCH_EBSA110
+ default 0xf0009000 if DEBUG_CNS3XXX
+ default 0xf01fb000 if DEBUG_NOMADIK_UART
+ default 0xf0201000 if DEBUG_BCM2835
+ default 0xf11f1000 if ARCH_VERSATILE
+ default 0xf1600000 if ARCH_INTEGRATOR
+ default 0xf1c28000 if DEBUG_SUNXI_UART0
+ default 0xf1c28400 if DEBUG_SUNXI_UART1
+ default 0xf2100000 if DEBUG_PXA_UART1
+ default 0xf4090000 if ARCH_LPC32XX
+ default 0xf4200000 if ARCH_GEMINI
+ default 0xf8009000 if DEBUG_VEXPRESS_UART0_CA9
+ default 0xf8090000 if DEBUG_VEXPRESS_UART0_RS1
+ default 0xfb009000 if DEBUG_REALVIEW_STD_PORT
+ default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
+ default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
+ default 0xfd000000 if ARCH_SPEAR13XX
+ default 0xfd012000 if ARCH_MV78XX0
+ default 0xfde12000 if ARCH_DOVE
+ default 0xfe012000 if ARCH_ORION5X
+ default 0xfe017000 if DEBUG_MMP_UART2
+ default 0xfe018000 if DEBUG_MMP_UART3
+ default 0xfe100000 if DEBUG_IMX23_UART || DEBUG_IMX28_UART
+ default 0xfe230000 if DEBUG_PICOXCELL_UART
+ default 0xfe800000 if ARCH_IOP32X
+ default 0xfeb00000 if DEBUG_HI3620_UART || DEBUG_HI3716_UART
+ default 0xfeb24000 if DEBUG_RK3X_UART0
+ default 0xfeb26000 if DEBUG_RK3X_UART1
+ default 0xfeb30c00 if DEBUG_KEYSTONE_UART0
+ default 0xfeb31000 if DEBUG_KEYSTONE_UART1
+ default 0xfec12000 if DEBUG_MVEBU_UART || DEBUG_MVEBU_UART_ALTERNATE
+ default 0xfed60000 if DEBUG_RK29_UART0
+ default 0xfed64000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
+ default 0xfed68000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
+ default 0xfec02000 if DEBUG_SOCFPGA_UART
+ default 0xfec20000 if DEBUG_DAVINCI_DMx_UART0
+ default 0xfed0c000 if DEBUG_DAVINCI_DA8XX_UART1
+ default 0xfed0d000 if DEBUG_DAVINCI_DA8XX_UART2
+ default 0xfed12000 if ARCH_KIRKWOOD
+ default 0xfedc0000 if ARCH_EP93XX
+ default 0xfee003f8 if FOOTBRIDGE
+ default 0xfee08300 if DEBUG_DAVINCI_TNETV107X_UART1
+ default 0xfee20000 if DEBUG_NSPIRE_CLASSIC_UART || DEBUG_NSPIRE_CX_UART
+ default 0xfef36000 if DEBUG_HIGHBANK_UART
+ default 0xfee82340 if ARCH_IOP13XX
+ default 0xfef00000 if ARCH_IXP4XX && !CPU_BIG_ENDIAN
+ default 0xfef00003 if ARCH_IXP4XX && CPU_BIG_ENDIAN
+ default 0xfefff700 if ARCH_IOP33X
+ default 0xff003000 if DEBUG_U300_UART
+ default DEBUG_UART_PHYS if !MMU
+ depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
+ DEBUG_UART_8250 || DEBUG_UART_PL01X
+
+config DEBUG_UART_8250_SHIFT
+ int "Register offset shift for the 8250 debug UART"
+ depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+ default 0 if FOOTBRIDGE || ARCH_IOP32X
+ default 2
+
+config DEBUG_UART_8250_WORD
+ bool "Use 32-bit accesses for 8250 UART"
+ depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+ depends on DEBUG_UART_8250_SHIFT >= 2
+ default y if DEBUG_PICOXCELL_UART || DEBUG_SOCFPGA_UART || \
+ ARCH_KEYSTONE || \
+ DEBUG_DAVINCI_DMx_UART0 || DEBUG_DAVINCI_DA8XX_UART1 || \
+ DEBUG_DAVINCI_DA8XX_UART2 || DEBUG_DAVINCI_TNETV107X_UART1
+
+config DEBUG_UART_8250_FLOW_CONTROL
+ bool "Enable flow control for 8250 UART"
+ depends on DEBUG_LL_UART_8250 || DEBUG_UART_8250
+ default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_GEMINI || ARCH_RPC
+
config DEBUG_UNCOMPRESS
bool
- default y if ARCH_MULTIPLATFORM && DEBUG_LL && \
- !DEBUG_OMAP2PLUS_UART && \
- !DEBUG_TEGRA_UART
+ depends on ARCH_MULTIPLATFORM
+ default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
+ (!DEBUG_TEGRA_UART || !ZBOOT_ROM)
+ help
+ This option influences the normal decompressor output for
+ multiplatform kernels. Normally, multiplatform kernels disable
+ decompressor output because it is not possible to know where to
+ send the decompressor output.
+
+ When this option is set, the selected DEBUG_LL output method
+ will be re-used for normal decompressor output on multiplatform
+ kernels.
+
config UNCOMPRESS_INCLUDE
string
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c0ac0f5e5e5..6fd2ceae305 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -153,6 +153,7 @@ machine-$(CONFIG_ARCH_DAVINCI) += davinci
machine-$(CONFIG_ARCH_DOVE) += dove
machine-$(CONFIG_ARCH_EBSA110) += ebsa110
machine-$(CONFIG_ARCH_EP93XX) += ep93xx
+machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_GEMINI) += gemini
machine-$(CONFIG_ARCH_HIGHBANK) += highbank
machine-$(CONFIG_ARCH_INTEGRATOR) += integrator
@@ -160,15 +161,16 @@ machine-$(CONFIG_ARCH_IOP13XX) += iop13xx
machine-$(CONFIG_ARCH_IOP32X) += iop32x
machine-$(CONFIG_ARCH_IOP33X) += iop33x
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
+machine-$(CONFIG_ARCH_KEYSTONE) += keystone
machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
machine-$(CONFIG_ARCH_KS8695) += ks8695
machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MSM) += msm
machine-$(CONFIG_ARCH_MV78XX0) += mv78xx0
+machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_MXC) += imx
machine-$(CONFIG_ARCH_MXS) += mxs
-machine-$(CONFIG_ARCH_MVEBU) += mvebu
machine-$(CONFIG_ARCH_NETX) += netx
machine-$(CONFIG_ARCH_NOMADIK) += nomadik
machine-$(CONFIG_ARCH_NSPIRE) += nspire
@@ -176,7 +178,6 @@ machine-$(CONFIG_ARCH_OMAP1) += omap1
machine-$(CONFIG_ARCH_OMAP2PLUS) += omap2
machine-$(CONFIG_ARCH_ORION5X) += orion5x
machine-$(CONFIG_ARCH_PICOXCELL) += picoxcell
-machine-$(CONFIG_ARCH_SIRF) += prima2
machine-$(CONFIG_ARCH_PXA) += pxa
machine-$(CONFIG_ARCH_REALVIEW) += realview
machine-$(CONFIG_ARCH_ROCKCHIP) += rockchip
@@ -186,25 +187,24 @@ machine-$(CONFIG_ARCH_S3C64XX) += s3c64xx
machine-$(CONFIG_ARCH_S5P64X0) += s5p64x0
machine-$(CONFIG_ARCH_S5PC100) += s5pc100
machine-$(CONFIG_ARCH_S5PV210) += s5pv210
-machine-$(CONFIG_ARCH_EXYNOS) += exynos
machine-$(CONFIG_ARCH_SA1100) += sa1100
machine-$(CONFIG_ARCH_SHARK) += shark
machine-$(CONFIG_ARCH_SHMOBILE) += shmobile
+machine-$(CONFIG_ARCH_SIRF) += prima2
+machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
+machine-$(CONFIG_ARCH_STI) += sti
+machine-$(CONFIG_ARCH_SUNXI) += sunxi
machine-$(CONFIG_ARCH_TEGRA) += tegra
machine-$(CONFIG_ARCH_U300) += u300
machine-$(CONFIG_ARCH_U8500) += ux500
machine-$(CONFIG_ARCH_VERSATILE) += versatile
machine-$(CONFIG_ARCH_VEXPRESS) += vexpress
+machine-$(CONFIG_ARCH_VIRT) += virt
machine-$(CONFIG_ARCH_VT8500) += vt8500
machine-$(CONFIG_ARCH_W90X900) += w90x900
+machine-$(CONFIG_ARCH_ZYNQ) += zynq
machine-$(CONFIG_FOOTBRIDGE) += footbridge
-machine-$(CONFIG_ARCH_SOCFPGA) += socfpga
machine-$(CONFIG_PLAT_SPEAR) += spear
-machine-$(CONFIG_ARCH_STI) += sti
-machine-$(CONFIG_ARCH_VIRT) += virt
-machine-$(CONFIG_ARCH_ZYNQ) += zynq
-machine-$(CONFIG_ARCH_SUNXI) += sunxi
-machine-$(CONFIG_ARCH_KEYSTONE) += keystone
# Platform directory name. This list is sorted alphanumerically
# by CONFIG_* macro name.
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts
index 444b4ede0d6..d318987d44a 100644
--- a/arch/arm/boot/dts/am335x-bone.dts
+++ b/arch/arm/boot/dts/am335x-bone.dts
@@ -120,6 +120,35 @@
status = "okay";
};
+ musb: usb@47400000 {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb-phy@47401b00 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
+
+ dma-controller@07402000 {
+ status = "okay";
+ };
+ };
+
i2c0: i2c@44e0b000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins>;
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index 3aee1a43782..e8ec8756e49 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -171,6 +171,35 @@
};
};
+ musb: usb@47400000 {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb-phy@47401b00 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+
+ usb@47401800 {
+ status = "okay";
+ dr_mode = "host";
+ };
+
+ dma-controller@07402000 {
+ status = "okay";
+ };
+ };
+
i2c1: i2c@4802a000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 0c8ad173d2b..4f339fa91c5 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -14,6 +14,7 @@
/dts-v1/;
#include "am33xx.dtsi"
+#include <dt-bindings/pwm/pwm.h>
/ {
model = "TI AM335x EVM-SK";
@@ -207,6 +208,22 @@
};
};
+ musb: usb@47400000 {
+ status = "okay";
+
+ control@44e10000 {
+ status = "okay";
+ };
+
+ usb-phy@47401300 {
+ status = "okay";
+ };
+
+ usb@47401000 {
+ status = "okay";
+ };
+ };
+
epwmss2: epwmss@48304000 {
status = "okay";
@@ -298,7 +315,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&ecap2 0 50000 1>;
+ pwms = <&ecap2 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 58 61 66 75 90 125 170 255>;
default-brightness-level = <8>;
};
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 38b446ba1ce..f9c5da9c7fe 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -26,6 +26,10 @@
serial5 = &uart5;
d_can0 = &dcan0;
d_can1 = &dcan1;
+ usb0 = &usb0;
+ usb1 = &usb1;
+ phy0 = &usb0_phy;
+ phy1 = &usb1_phy;
};
cpus {
@@ -333,21 +337,132 @@
status = "disabled";
};
- usb@47400000 {
- compatible = "ti,musb-am33xx";
- reg = <0x47400000 0x1000 /* usbss */
- 0x47401000 0x800 /* musb instance 0 */
- 0x47401800 0x800>; /* musb instance 1 */
- interrupts = <17 /* usbss */
- 18 /* musb instance 0 */
- 19>; /* musb instance 1 */
- multipoint = <1>;
- num-eps = <16>;
- ram-bits = <12>;
- port0-mode = <3>;
- port1-mode = <3>;
- power = <250>;
+ usb: usb@47400000 {
+ compatible = "ti,am33xx-usb";
+ reg = <0x47400000 0x1000>;
+ ranges;
+ #address-cells = <1>;
+ #size-cells = <1>;
ti,hwmods = "usb_otg_hs";
+ status = "disabled";
+
+ ctrl_mod: control@44e10000 {
+ compatible = "ti,am335x-usb-ctrl-module";
+ reg = <0x44e10620 0x10
+ 0x44e10648 0x4>;
+ reg-names = "phy_ctrl", "wakeup";
+ status = "disabled";
+ };
+
+ usb0_phy: usb-phy@47401300 {
+ compatible = "ti,am335x-usb-phy";
+ reg = <0x47401300 0x100>;
+ reg-names = "phy";
+ status = "disabled";
+ ti,ctrl_mod = <&ctrl_mod>;
+ };
+
+ usb0: usb@47401000 {
+ compatible = "ti,musb-am33xx";
+ status = "disabled";
+ reg = <0x47401400 0x400
+ 0x47401000 0x200>;
+ reg-names = "mc", "control";
+
+ interrupts = <18>;
+ interrupt-names = "mc";
+ dr_mode = "otg";
+ mentor,multipoint = <1>;
+ mentor,num-eps = <16>;
+ mentor,ram-bits = <12>;
+ mentor,power = <500>;
+ phys = <&usb0_phy>;
+
+ dmas = <&cppi41dma 0 0 &cppi41dma 1 0
+ &cppi41dma 2 0 &cppi41dma 3 0
+ &cppi41dma 4 0 &cppi41dma 5 0
+ &cppi41dma 6 0 &cppi41dma 7 0
+ &cppi41dma 8 0 &cppi41dma 9 0
+ &cppi41dma 10 0 &cppi41dma 11 0
+ &cppi41dma 12 0 &cppi41dma 13 0
+ &cppi41dma 14 0 &cppi41dma 0 1
+ &cppi41dma 1 1 &cppi41dma 2 1
+ &cppi41dma 3 1 &cppi41dma 4 1
+ &cppi41dma 5 1 &cppi41dma 6 1
+ &cppi41dma 7 1 &cppi41dma 8 1
+ &cppi41dma 9 1 &cppi41dma 10 1
+ &cppi41dma 11 1 &cppi41dma 12 1
+ &cppi41dma 13 1 &cppi41dma 14 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
+ };
+
+ usb1_phy: usb-phy@47401b00 {
+ compatible = "ti,am335x-usb-phy";
+ reg = <0x47401b00 0x100>;
+ reg-names = "phy";
+ status = "disabled";
+ ti,ctrl_mod = <&ctrl_mod>;
+ };
+
+ usb1: usb@47401800 {
+ compatible = "ti,musb-am33xx";
+ status = "disabled";
+ reg = <0x47401c00 0x400
+ 0x47401800 0x200>;
+ reg-names = "mc", "control";
+ interrupts = <19>;
+ interrupt-names = "mc";
+ dr_mode = "otg";
+ mentor,multipoint = <1>;
+ mentor,num-eps = <16>;
+ mentor,ram-bits = <12>;
+ mentor,power = <500>;
+ phys = <&usb1_phy>;
+
+ dmas = <&cppi41dma 15 0 &cppi41dma 16 0
+ &cppi41dma 17 0 &cppi41dma 18 0
+ &cppi41dma 19 0 &cppi41dma 20 0
+ &cppi41dma 21 0 &cppi41dma 22 0
+ &cppi41dma 23 0 &cppi41dma 24 0
+ &cppi41dma 25 0 &cppi41dma 26 0
+ &cppi41dma 27 0 &cppi41dma 28 0
+ &cppi41dma 29 0 &cppi41dma 15 1
+ &cppi41dma 16 1 &cppi41dma 17 1
+ &cppi41dma 18 1 &cppi41dma 19 1
+ &cppi41dma 20 1 &cppi41dma 21 1
+ &cppi41dma 22 1 &cppi41dma 23 1
+ &cppi41dma 24 1 &cppi41dma 25 1
+ &cppi41dma 26 1 &cppi41dma 27 1
+ &cppi41dma 28 1 &cppi41dma 29 1>;
+ dma-names =
+ "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7",
+ "rx8", "rx9", "rx10", "rx11", "rx12", "rx13",
+ "rx14", "rx15",
+ "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7",
+ "tx8", "tx9", "tx10", "tx11", "tx12", "tx13",
+ "tx14", "tx15";
+ };
+
+ cppi41dma: dma-controller@07402000 {
+ compatible = "ti,am3359-cppi41";
+ reg = <0x47400000 0x1000
+ 0x47402000 0x1000
+ 0x47403000 0x1000
+ 0x47404000 0x4000>;
+ reg-names = "glue", "controller", "scheduler", "queuemgr";
+ interrupts = <17>;
+ interrupt-names = "glue";
+ #dma-cells = <2>;
+ #dma-channels = <30>;
+ #dma-requests = <256>;
+ status = "disabled";
+ };
};
epwmss0: epwmss@48300000 {
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index d59b70c6a6a..3d77dbe406f 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -14,11 +14,11 @@
compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9";
chosen {
- bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
+ bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2";
};
memory {
- reg = <0x20000000 0x10000000>;
+ reg = <0x20000000 0x8000000>;
};
clocks {
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi
index b753855b205..49e3c45818c 100644
--- a/arch/arm/boot/dts/at91sam9x5ek.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi
@@ -94,8 +94,9 @@
usb0: ohci@00600000 {
status = "okay";
- num-ports = <2>;
- atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW
+ num-ports = <3>;
+ atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */
+ &pioD 19 GPIO_ACTIVE_LOW
&pioD 20 GPIO_ACTIVE_LOW
>;
};
diff --git a/arch/arm/boot/dts/atlas6.dtsi b/arch/arm/boot/dts/atlas6.dtsi
index 9866cd736de..8678e0c1111 100644
--- a/arch/arm/boot/dts/atlas6.dtsi
+++ b/arch/arm/boot/dts/atlas6.dtsi
@@ -329,6 +329,12 @@
sirf,function = "uart0";
};
};
+ uart0_noflow_pins_a: uart0@1 {
+ uart {
+ sirf,pins = "uart0_nostreamctrlgrp";
+ sirf,function = "uart0_nostreamctrl";
+ };
+ };
uart1_pins_a: uart1@0 {
uart {
sirf,pins = "uart1grp";
@@ -485,6 +491,12 @@
sirf,function = "usp0";
};
};
+ usp0_uart_nostreamctrl_pins_a: usp0@1 {
+ usp0 {
+ sirf,pins = "usp0_uart_nostreamctrl_grp";
+ sirf,function = "usp0_uart_nostreamctrl";
+ };
+ };
usp1_pins_a: usp1@0 {
usp1 {
sirf,pins = "usp1grp";
@@ -515,16 +527,16 @@
sirf,function = "pulse_count";
};
};
- cko0_rst_pins_a: cko0_rst@0 {
- cko0_rst {
- sirf,pins = "cko0_rstgrp";
- sirf,function = "cko0_rst";
+ cko0_pins_a: cko0@0 {
+ cko0 {
+ sirf,pins = "cko0grp";
+ sirf,function = "cko0";
};
};
- cko1_rst_pins_a: cko1_rst@0 {
- cko1_rst {
- sirf,pins = "cko1_rstgrp";
- sirf,function = "cko1_rst";
+ cko1_pins_a: cko1@0 {
+ cko1 {
+ sirf,pins = "cko1grp";
+ sirf,function = "cko1";
};
};
};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index ef57277fc38..376090f0723 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -405,7 +405,7 @@
};
i2s0: i2s@03830000 {
- compatible = "samsung,i2s-v5";
+ compatible = "samsung,s5pv210-i2s";
reg = <0x03830000 0x100>;
dmas = <&pdma0 10
&pdma0 9
@@ -415,16 +415,13 @@
<&clock_audss EXYNOS_I2S_BUS>,
<&clock_audss EXYNOS_SCLK_I2S>;
clock-names = "iis", "i2s_opclk0", "i2s_opclk1";
- samsung,supports-6ch;
- samsung,supports-rstclr;
- samsung,supports-secdai;
samsung,idma-addr = <0x03000000>;
pinctrl-names = "default";
pinctrl-0 = <&i2s0_bus>;
};
i2s1: i2s@12D60000 {
- compatible = "samsung,i2s-v5";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x12D60000 0x100>;
dmas = <&pdma1 12
&pdma1 11>;
@@ -436,7 +433,7 @@
};
i2s2: i2s@12D70000 {
- compatible = "samsung,i2s-v5";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x12D70000 0x100>;
dmas = <&pdma0 12
&pdma0 11>;
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi
index ff7f5d85584..586134e2a38 100644
--- a/arch/arm/boot/dts/exynos5440.dtsi
+++ b/arch/arm/boot/dts/exynos5440.dtsi
@@ -248,6 +248,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 53>;
+ num-lanes = <4>;
};
pcie@2a0000 {
@@ -267,5 +268,6 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0x0 0 &gic 56>;
+ num-lanes = <4>;
};
};
diff --git a/arch/arm/boot/dts/imx28-apx4devkit.dts b/arch/arm/boot/dts/imx28-apx4devkit.dts
index 43bf3c796cb..0e7fed47bd8 100644
--- a/arch/arm/boot/dts/imx28-apx4devkit.dts
+++ b/arch/arm/boot/dts/imx28-apx4devkit.dts
@@ -147,7 +147,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
pcf8563: rtc@51 {
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 1f0d38d7b16..15715d921d1 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -195,7 +195,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
at24@51 {
@@ -220,6 +220,7 @@
auart0: serial@8006a000 {
pinctrl-names = "default";
pinctrl-0 = <&auart0_pins_a>;
+ fsl,uart-has-rtscts;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx28-m28evk.dts b/arch/arm/boot/dts/imx28-m28evk.dts
index 880df2f13be..44d9da57736 100644
--- a/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/arch/arm/boot/dts/imx28-m28evk.dts
@@ -184,7 +184,7 @@
reg = <0x0a>;
VDDA-supply = <&reg_3p3v>;
VDDIO-supply = <&reg_3p3v>;
-
+ clocks = <&saif0>;
};
eeprom: eeprom@51 {
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 6a8acb01b1d..9524a057128 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -837,6 +837,7 @@
compatible = "fsl,imx28-saif";
reg = <0x80042000 0x2000>;
interrupts = <59 80>;
+ #clock-cells = <0>;
clocks = <&clks 53>;
dmas = <&dma_apbx 4>;
dma-names = "rx-tx";
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6dd9486c755..ad3471ca17c 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -61,6 +61,16 @@
mux-int-port = <2>;
mux-ext-port = <3>;
};
+
+ clocks {
+ clk_26M: codec_clock {
+ compatible = "fixed-clock";
+ reg=<0>;
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ gpios = <&gpio4 26 1>;
+ };
+ };
};
&esdhc1 {
@@ -229,6 +239,7 @@
MX51_PAD_EIM_A27__GPIO2_21 0x5
MX51_PAD_CSPI1_SS0__GPIO4_24 0x85
MX51_PAD_CSPI1_SS1__GPIO4_25 0x85
+ MX51_PAD_CSPI1_RDY__GPIO4_26 0x80000000
>;
};
};
@@ -255,7 +266,7 @@
sgtl5000: codec@0a {
compatible = "fsl,sgtl5000";
reg = <0x0a>;
- clock-frequency = <26000000>;
+ clocks = <&clk_26M>;
VDDA-supply = <&vdig_reg>;
VDDIO-supply = <&vvideo_reg>;
};
diff --git a/arch/arm/boot/dts/imx53-mba53.dts b/arch/arm/boot/dts/imx53-mba53.dts
index aaa33bc99f7..a6309026794 100644
--- a/arch/arm/boot/dts/imx53-mba53.dts
+++ b/arch/arm/boot/dts/imx53-mba53.dts
@@ -27,7 +27,7 @@
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm2 0 50000 0 0>;
+ pwms = <&pwm2 0 50000>;
brightness-levels = <0 24 28 32 36 40 44 48 52 56 60 64 68 72 76 80 84 88 92 96 100>;
default-brightness-level = <10>;
enable-gpios = <&gpio7 7 0>;
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 3895fbba8fc..569aa9f2c4e 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -725,15 +725,15 @@
uart1 {
pinctrl_uart1_1: uart1grp-1 {
fsl,pins = <
- MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1c5
- MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1c5
+ MX53_PAD_CSI0_DAT10__UART1_TXD_MUX 0x1e4
+ MX53_PAD_CSI0_DAT11__UART1_RXD_MUX 0x1e4
>;
};
pinctrl_uart1_2: uart1grp-2 {
fsl,pins = <
- MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1c5
- MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1c5
+ MX53_PAD_PATA_DIOW__UART1_TXD_MUX 0x1e4
+ MX53_PAD_PATA_DMACK__UART1_RXD_MUX 0x1e4
>;
};
@@ -748,8 +748,8 @@
uart2 {
pinctrl_uart2_1: uart2grp-1 {
fsl,pins = <
- MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1c5
- MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1c5
+ MX53_PAD_PATA_BUFFER_EN__UART2_RXD_MUX 0x1e4
+ MX53_PAD_PATA_DMARQ__UART2_TXD_MUX 0x1e4
>;
};
@@ -766,17 +766,17 @@
uart3 {
pinctrl_uart3_1: uart3grp-1 {
fsl,pins = <
- MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5
- MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5
- MX53_PAD_PATA_DA_1__UART3_CTS 0x1c5
- MX53_PAD_PATA_DA_2__UART3_RTS 0x1c5
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
+ MX53_PAD_PATA_DA_1__UART3_CTS 0x1e4
+ MX53_PAD_PATA_DA_2__UART3_RTS 0x1e4
>;
};
pinctrl_uart3_2: uart3grp-2 {
fsl,pins = <
- MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1c5
- MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1c5
+ MX53_PAD_PATA_CS_0__UART3_TXD_MUX 0x1e4
+ MX53_PAD_PATA_CS_1__UART3_RXD_MUX 0x1e4
>;
};
@@ -785,8 +785,8 @@
uart4 {
pinctrl_uart4_1: uart4grp-1 {
fsl,pins = <
- MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1c5
- MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1c5
+ MX53_PAD_KEY_COL0__UART4_TXD_MUX 0x1e4
+ MX53_PAD_KEY_ROW0__UART4_RXD_MUX 0x1e4
>;
};
};
@@ -794,8 +794,8 @@
uart5 {
pinctrl_uart5_1: uart5grp-1 {
fsl,pins = <
- MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1c5
- MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1c5
+ MX53_PAD_KEY_COL1__UART5_TXD_MUX 0x1e4
+ MX53_PAD_KEY_ROW1__UART5_RXD_MUX 0x1e4
>;
};
};
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts
index cdc010e0f93..386d4287021 100644
--- a/arch/arm/boot/dts/msm8660-surf.dts
+++ b/arch/arm/boot/dts/msm8660-surf.dts
@@ -38,7 +38,7 @@
};
serial@19c40000 {
- compatible = "qcom,msm-hsuart", "qcom,msm-uart";
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x19c40000 0x1000>,
<0x19c00000 0x1000>;
interrupts = <0 195 0x0>;
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts
index db2060c4654..93e9f7e0b7a 100644
--- a/arch/arm/boot/dts/msm8960-cdp.dts
+++ b/arch/arm/boot/dts/msm8960-cdp.dts
@@ -26,7 +26,7 @@
cpu-offset = <0x80000>;
};
- msmgpio: gpio@fd510000 {
+ msmgpio: gpio@800000 {
compatible = "qcom,msm-gpio";
gpio-controller;
#gpio-cells = <2>;
@@ -34,11 +34,11 @@
interrupts = <0 32 0x4>;
interrupt-controller;
#interrupt-cells = <2>;
- reg = <0xfd510000 0x4000>;
+ reg = <0x800000 0x4000>;
};
serial@16440000 {
- compatible = "qcom,msm-hsuart", "qcom,msm-uart";
+ compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
reg = <0x16440000 0x1000>,
<0x16400000 0x1000>;
interrupts = <0 154 0x0>;
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts
index 08b72678abf..65d7b601651 100644
--- a/arch/arm/boot/dts/omap5-uevm.dts
+++ b/arch/arm/boot/dts/omap5-uevm.dts
@@ -235,7 +235,7 @@
};
&mmc1 {
- vmmc-supply = <&vmmcsd_fixed>;
+ vmmc-supply = <&ldo9_reg>;
bus-width = <4>;
};
@@ -282,6 +282,7 @@
regulators {
smps123_reg: smps123 {
+ /* VDD_OPP_MPU */
regulator-name = "smps123";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1500000>;
@@ -290,6 +291,7 @@
};
smps45_reg: smps45 {
+ /* VDD_OPP_MM */
regulator-name = "smps45";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -298,6 +300,7 @@
};
smps6_reg: smps6 {
+ /* VDD_DDR3 - over VDD_SMPS6 */
regulator-name = "smps6";
regulator-min-microvolt = <1200000>;
regulator-max-microvolt = <1200000>;
@@ -306,6 +309,7 @@
};
smps7_reg: smps7 {
+ /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */
regulator-name = "smps7";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -314,6 +318,7 @@
};
smps8_reg: smps8 {
+ /* VDD_OPP_CORE */
regulator-name = "smps8";
regulator-min-microvolt = < 600000>;
regulator-max-microvolt = <1310000>;
@@ -322,15 +327,15 @@
};
smps9_reg: smps9 {
+ /* VDDA_2v1_AUD over VDD_2v1 */
regulator-name = "smps9";
regulator-min-microvolt = <2100000>;
regulator-max-microvolt = <2100000>;
- regulator-always-on;
- regulator-boot-on;
ti,smps-range = <0x80>;
};
smps10_reg: smps10 {
+ /* VBUS_5V_OTG */
regulator-name = "smps10";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
@@ -339,38 +344,40 @@
};
ldo1_reg: ldo1 {
+ /* VDDAPHY_CAM: vdda_csiport */
regulator-name = "ldo1";
- regulator-min-microvolt = <2800000>;
- regulator-max-microvolt = <2800000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo2_reg: ldo2 {
+ /* VCC_2V8_DISP: Does not go anywhere */
regulator-name = "ldo2";
- regulator-min-microvolt = <2900000>;
- regulator-max-microvolt = <2900000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2800000>;
+ regulator-max-microvolt = <2800000>;
+ /* Unused */
+ status = "disabled";
};
ldo3_reg: ldo3 {
+ /* VDDAPHY_MDM: vdda_lli */
regulator-name = "ldo3";
- regulator-min-microvolt = <3000000>;
- regulator-max-microvolt = <3000000>;
- regulator-always-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1500000>;
regulator-boot-on;
+ /* Only if Modem is used */
+ status = "disabled";
};
ldo4_reg: ldo4 {
+ /* VDDAPHY_DISP: vdda_dsiport/hdmi */
regulator-name = "ldo4";
- regulator-min-microvolt = <2200000>;
- regulator-max-microvolt = <2200000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <1500000>;
+ regulator-max-microvolt = <1800000>;
};
ldo5_reg: ldo5 {
+ /* VDDA_1V8_PHY: usb/sata/hdmi.. */
regulator-name = "ldo5";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -379,38 +386,43 @@
};
ldo6_reg: ldo6 {
+ /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */
regulator-name = "ldo6";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
+ regulator-min-microvolt = <1200000>;
+ regulator-max-microvolt = <1200000>;
regulator-always-on;
regulator-boot-on;
};
ldo7_reg: ldo7 {
+ /* VDD_VPP: vpp1 */
regulator-name = "ldo7";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
- regulator-boot-on;
+ regulator-min-microvolt = <2000000>;
+ regulator-max-microvolt = <2000000>;
+ /* Only for efuse reprograming! */
+ status = "disabled";
};
ldo8_reg: ldo8 {
+ /* VDD_3v0: Does not go anywhere */
regulator-name = "ldo8";
- regulator-min-microvolt = <1500000>;
- regulator-max-microvolt = <1500000>;
- regulator-always-on;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
+ /* Unused */
+ status = "disabled";
};
ldo9_reg: ldo9 {
+ /* VCC_DV_SDIO: vdds_sdcard */
regulator-name = "ldo9";
regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <3300000>;
- regulator-always-on;
+ regulator-max-microvolt = <3000000>;
regulator-boot-on;
};
ldoln_reg: ldoln {
+ /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */
regulator-name = "ldoln";
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -419,12 +431,20 @@
};
ldousb_reg: ldousb {
+ /* VDDA_3V_USB: VDDA_USBHS33 */
regulator-name = "ldousb";
regulator-min-microvolt = <3250000>;
regulator-max-microvolt = <3250000>;
regulator-always-on;
regulator-boot-on;
};
+
+ regen3_reg: regen3 {
+ /* REGEN3 controls LDO9 supply to card */
+ regulator-name = "regen3";
+ regulator-always-on;
+ regulator-boot-on;
+ };
};
};
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index e643620417a..07be2cd7b31 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -644,7 +644,7 @@
utmi-mode = <2>;
ranges;
dwc3@4a030000 {
- compatible = "synopsys,dwc3";
+ compatible = "snps,dwc3";
reg = <0x4a030000 0x1000>;
interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>;
usb-phy = <&usb2_phy>, <&usb3_phy>;
diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi
index 05e9489cf95..bbeb623fc2c 100644
--- a/arch/arm/boot/dts/prima2.dtsi
+++ b/arch/arm/boot/dts/prima2.dtsi
@@ -515,16 +515,16 @@
sirf,function = "pulse_count";
};
};
- cko0_rst_pins_a: cko0_rst@0 {
- cko0_rst {
- sirf,pins = "cko0_rstgrp";
- sirf,function = "cko0_rst";
+ cko0_pins_a: cko0@0 {
+ cko0 {
+ sirf,pins = "cko0grp";
+ sirf,function = "cko0";
};
};
- cko1_rst_pins_a: cko1_rst@0 {
- cko1_rst {
- sirf,pins = "cko1_rstgrp";
- sirf,function = "cko1_rst";
+ cko1_pins_a: cko1@0 {
+ cko1 {
+ sirf,pins = "cko1grp";
+ sirf,function = "cko1";
};
};
};
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 8a9e05d8a4b..dba739b6ef3 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -81,6 +81,14 @@
macb1: ethernet@f802c000 {
phy-mode = "rmii";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy0: ethernet-phy@1 {
+ interrupt-parent = <&pioE>;
+ interrupts = <30 IRQ_TYPE_EDGE_FALLING>;
+ reg = <1>;
+ };
};
pinctrl@fffff200 {
diff --git a/arch/arm/boot/dts/stih416-pinctrl.dtsi b/arch/arm/boot/dts/stih416-pinctrl.dtsi
index 957b21a71b4..0f246c97926 100644
--- a/arch/arm/boot/dts/stih416-pinctrl.dtsi
+++ b/arch/arm/boot/dts/stih416-pinctrl.dtsi
@@ -166,6 +166,15 @@
reg = <0x9000 0x100>;
st,bank-name = "PIO31";
};
+
+ serial2-oe {
+ pinctrl_serial2_oe: serial2-1 {
+ st,pins {
+ output-enable = <&PIO11 3 ALT2 OUT>;
+ };
+ };
+ };
+
};
pin-controller-rear {
@@ -218,7 +227,6 @@
st,pins {
tx = <&PIO17 4 ALT2 OUT>;
rx = <&PIO17 5 ALT2 IN>;
- output-enable = <&PIO11 3 ALT2 OUT>;
};
};
};
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 3cecd9689a4..1a0326ea7d0 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -79,7 +79,7 @@
interrupts = <0 197 0>;
clocks = <&CLK_S_ICN_REG_0>;
pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_serial2>;
+ pinctrl-0 = <&pinctrl_serial2 &pinctrl_serial2_oe>;
};
/* SBC_UART1 */
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi
index 7321403cab8..f5b9898d9c6 100644
--- a/arch/arm/boot/dts/stih41x.dtsi
+++ b/arch/arm/boot/dts/stih41x.dtsi
@@ -6,10 +6,12 @@
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <0>;
};
cpu@1 {
+ device_type = "cpu";
compatible = "arm,cortex-a9";
reg = <1>;
};
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
index 2fcb3f2ca16..5592be6f2f7 100644
--- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi
+++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi
@@ -457,6 +457,7 @@
};
usb-phy@c5004000 {
+ status = "okay";
nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1)
GPIO_ACTIVE_LOW>;
};
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index 365760b33a2..c8242533268 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -566,7 +566,6 @@
usb@c5000000 {
status = "okay";
- nvidia,vbus-gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>;
dr_mode = "otg";
};
@@ -830,6 +829,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 24 0>; /* PD0 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index ed4b901b022..1e9d33adb92 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -312,7 +312,6 @@
usb@c5000000 {
status = "okay";
- nvidia,vbus-gpio = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>;
};
usb-phy@c5000000 {
@@ -412,6 +411,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&gpio 170 0>; /* PV2 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index ab67c94db28..c703197dca6 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -509,7 +509,6 @@
usb@c5000000 {
status = "okay";
- nvidia,vbus-gpio = <&tca6416 0 GPIO_ACTIVE_HIGH>;
};
usb-phy@c5000000 {
@@ -519,7 +518,6 @@
usb@c5008000 {
status = "okay";
- nvidia,vbus-gpio = <&tca6416 1 GPIO_ACTIVE_HIGH>;
};
usb-phy@c5008000 {
@@ -588,6 +586,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 0 0>; /* GPIO_PMU0 */
+ regulator-always-on;
+ regulator-boot-on;
};
vbus3_reg: regulator@3 {
@@ -598,6 +598,8 @@
regulator-max-microvolt = <5000000>;
enable-active-high;
gpio = <&tca6416 1 0>; /* GPIO_PMU1 */
+ regulator-always-on;
+ regulator-boot-on;
};
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 9653fd8288d..e4570834512 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -477,13 +477,13 @@
<&tegra_car TEGRA20_CLK_USBD>;
clock-names = "reg", "pll_u", "timer", "utmi-pads";
nvidia,has-legacy-mode;
- hssync_start_delay = <9>;
- idle_wait_delay = <17>;
- elastic_limit = <16>;
- term_range_adj = <6>;
- xcvr_setup = <9>;
- xcvr_lsfslew = <1>;
- xcvr_lsrslew = <1>;
+ nvidia,hssync-start-delay = <9>;
+ nvidia,idle-wait-delay = <17>;
+ nvidia,elastic-limit = <16>;
+ nvidia,term-range-adj = <6>;
+ nvidia,xcvr-setup = <9>;
+ nvidia,xcvr-lsfslew = <1>;
+ nvidia,xcvr-lsrslew = <1>;
status = "disabled";
};
@@ -527,13 +527,13 @@
<&tegra_car TEGRA20_CLK_CLK_M>,
<&tegra_car TEGRA20_CLK_USBD>;
clock-names = "reg", "pll_u", "timer", "utmi-pads";
- hssync_start_delay = <9>;
- idle_wait_delay = <17>;
- elastic_limit = <16>;
- term_range_adj = <6>;
- xcvr_setup = <9>;
- xcvr_lsfslew = <2>;
- xcvr_lsrslew = <2>;
+ nvidia,hssync-start-delay = <9>;
+ nvidia,idle-wait-delay = <17>;
+ nvidia,elastic-limit = <16>;
+ nvidia,term-range-adj = <6>;
+ nvidia,xcvr-setup = <9>;
+ nvidia,xcvr-lsfslew = <2>;
+ nvidia,xcvr-lsrslew = <2>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/twl4030.dtsi b/arch/arm/boot/dts/twl4030.dtsi
index b3034da00a3..ae6a17aed9e 100644
--- a/arch/arm/boot/dts/twl4030.dtsi
+++ b/arch/arm/boot/dts/twl4030.dtsi
@@ -47,6 +47,12 @@
regulator-max-microvolt = <3150000>;
};
+ vmmc2: regulator-vmmc2 {
+ compatible = "ti,twl4030-vmmc2";
+ regulator-min-microvolt = <1850000>;
+ regulator-max-microvolt = <3150000>;
+ };
+
vusb1v5: regulator-vusb1v5 {
compatible = "ti,twl4030-vusb1v5";
};
diff --git a/arch/arm/boot/dts/vf610.dtsi b/arch/arm/boot/dts/vf610.dtsi
index e1eb7dadda8..67d929cf980 100644
--- a/arch/arm/boot/dts/vf610.dtsi
+++ b/arch/arm/boot/dts/vf610.dtsi
@@ -442,8 +442,8 @@
compatible = "fsl,mvf600-fec";
reg = <0x400d0000 0x1000>;
interrupts = <0 78 0x04>;
- clocks = <&clks VF610_CLK_ENET>,
- <&clks VF610_CLK_ENET>,
+ clocks = <&clks VF610_CLK_ENET0>,
+ <&clks VF610_CLK_ENET0>,
<&clks VF610_CLK_ENET>;
clock-names = "ipg", "ahb", "ptp";
status = "disabled";
@@ -453,8 +453,8 @@
compatible = "fsl,mvf600-fec";
reg = <0x400d1000 0x1000>;
interrupts = <0 79 0x04>;
- clocks = <&clks VF610_CLK_ENET>,
- <&clks VF610_CLK_ENET>,
+ clocks = <&clks VF610_CLK_ENET1>,
+ <&clks VF610_CLK_ENET1>,
<&clks VF610_CLK_ENET>;
clock-names = "ipg", "ahb", "ptp";
status = "disabled";
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts
index 90e913fb64b..7a563d2523b 100644
--- a/arch/arm/boot/dts/wm8850-w70v2.dts
+++ b/arch/arm/boot/dts/wm8850-w70v2.dts
@@ -11,13 +11,14 @@
/dts-v1/;
/include/ "wm8850.dtsi"
+#include <dt-bindings/pwm/pwm.h>
/ {
model = "Wondermedia WM8850-W70v2 Tablet";
backlight {
compatible = "pwm-backlight";
- pwms = <&pwm 0 50000 1>; /* duty inverted */
+ pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>;
brightness-levels = <0 40 60 80 100 130 190 255>;
default-brightness-level = <5>;
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index a432e6c1dac..39ad030ac0c 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,7 +26,6 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/edma.h>
-#include <linux/err.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 80f033614a1..39c96df3477 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -151,7 +151,7 @@ mcpm_setup_leave:
mov r0, #INBOUND_NOT_COMING_UP
strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND]
- dsb
+ dsb st
sev
mov r0, r11
diff --git a/arch/arm/common/vlock.S b/arch/arm/common/vlock.S
index ff198583f68..8b7df283fed 100644
--- a/arch/arm/common/vlock.S
+++ b/arch/arm/common/vlock.S
@@ -42,7 +42,7 @@
dmb
mov \rscratch, #0
strb \rscratch, [\rbase, \rcpu]
- dsb
+ dsb st
sev
.endm
@@ -102,7 +102,7 @@ ENTRY(vlock_unlock)
dmb
mov r1, #VLOCK_OWNER_NONE
strb r1, [r0, #VLOCK_OWNER_OFFSET]
- dsb
+ dsb st
sev
bx lr
ENDPROC(vlock_unlock)
diff --git a/arch/arm/configs/bockw_defconfig b/arch/arm/configs/bockw_defconfig
index 845f5cdf62b..e7e94948d19 100644
--- a/arch/arm/configs/bockw_defconfig
+++ b/arch/arm/configs/bockw_defconfig
@@ -82,6 +82,13 @@ CONFIG_SERIAL_SH_SCI_CONSOLE=y
# CONFIG_HWMON is not set
CONFIG_I2C=y
CONFIG_I2C_RCAR=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_VIDEO_RCAR_VIN=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ML86V7667=y
CONFIG_SPI=y
CONFIG_SPI_SH_HSPI=y
CONFIG_USB=y
diff --git a/arch/arm/configs/da8xx_omapl_defconfig b/arch/arm/configs/da8xx_omapl_defconfig
index 7c868139bdb..1571bea48be 100644
--- a/arch/arm/configs/da8xx_omapl_defconfig
+++ b/arch/arm/configs/da8xx_omapl_defconfig
@@ -102,6 +102,8 @@ CONFIG_SND_SOC=m
CONFIG_SND_DAVINCI_SOC=m
# CONFIG_HID_SUPPORT is not set
# CONFIG_USB_SUPPORT is not set
+CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index c86fd75e181..ab2f7378352 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -162,6 +162,8 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=m
CONFIG_LEDS_TRIGGER_HEARTBEAT=m
CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_TI_EDMA=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/keystone_defconfig b/arch/arm/configs/keystone_defconfig
index 62e968cac9d..1f36b823905 100644
--- a/arch/arm/configs/keystone_defconfig
+++ b/arch/arm/configs/keystone_defconfig
@@ -104,6 +104,7 @@ CONFIG_IP_SCTP=y
CONFIG_VLAN_8021Q=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CMA=y
+CONFIG_DMA_CMA=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/arm/configs/marzen_defconfig b/arch/arm/configs/marzen_defconfig
index 494e70aeb9e..c50e52be446 100644
--- a/arch/arm/configs/marzen_defconfig
+++ b/arch/arm/configs/marzen_defconfig
@@ -84,6 +84,13 @@ CONFIG_GPIO_RCAR=y
CONFIG_THERMAL=y
CONFIG_RCAR_THERMAL=y
CONFIG_SSB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_SOC_CAMERA=y
+CONFIG_VIDEO_RCAR_VIN=y
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+CONFIG_VIDEO_ADV7180=y
CONFIG_USB=y
CONFIG_USB_RCAR_PHY=y
CONFIG_MMC=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index fe0bdc361d2..6e572c64cf5 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -53,6 +53,7 @@ CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_OMAP_OCP2SCP=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -61,6 +62,7 @@ CONFIG_SATA_MV=y
CONFIG_NETDEVICES=y
CONFIG_SUN4I_EMAC=y
CONFIG_NET_CALXEDA_XGMAC=y
+CONFIG_KS8851=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=y
CONFIG_MDIO_SUN4I=y
@@ -89,6 +91,7 @@ CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_SIRF=y
CONFIG_I2C_TEGRA=y
CONFIG_SPI=y
+CONFIG_SPI_OMAP24XX=y
CONFIG_SPI_PL022=y
CONFIG_SPI_SIRF=y
CONFIG_SPI_TEGRA114=y
@@ -111,11 +114,12 @@ CONFIG_FB_SIMPLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MXC=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
+CONFIG_USB_CHIPIDEA=y
+CONFIG_USB_CHIPIDEA_HOST=y
CONFIG_AB8500_USB=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_OMAP_USB2=y
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index 35f8cf299fa..263ae3869e3 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -1,6 +1,8 @@
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
@@ -48,7 +50,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_MTD=y
CONFIG_MTD_TESTS=m
CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_NAND_ECC_SMC=y
CONFIG_MTD_NAND=y
@@ -94,8 +95,10 @@ CONFIG_I2C_GPIO=y
CONFIG_I2C_NOMADIK=y
CONFIG_DEBUG_GPIO=y
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
CONFIG_MMC=y
-CONFIG_MMC_CLKGATE=y
+CONFIG_MMC_UNSAFE_RESUME=y
+# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 5339e6a4d63..056b27aafbe 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -78,6 +78,7 @@ CONFIG_MAC80211_RC_PID=y
CONFIG_MAC80211_RC_DEFAULT_PID=y
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_CMA=y
+CONFIG_DMA_CMA=y
CONFIG_CONNECTOR=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
@@ -185,13 +186,11 @@ CONFIG_OMAP2_DSS_RFBI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
CONFIG_FB_OMAP2=m
-CONFIG_PANEL_GENERIC_DPI=m
-CONFIG_PANEL_TFP410=m
-CONFIG_PANEL_SHARP_LS037V7DW01=m
-CONFIG_PANEL_NEC_NL8048HL11_01B=m
-CONFIG_PANEL_TAAL=m
-CONFIG_PANEL_TPO_TD043MTEA1=m
-CONFIG_PANEL_ACX565AKM=m
+CONFIG_DISPLAY_ENCODER_TFP410=m
+CONFIG_DISPLAY_ENCODER_TPD12S015=m
+CONFIG_DISPLAY_CONNECTOR_DVI=m
+CONFIG_DISPLAY_CONNECTOR_HDMI=m
+CONFIG_DISPLAY_PANEL_DPI=m
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 1effb43dab8..92d0a149aeb 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -79,6 +79,7 @@ CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
CONFIG_CMA=y
+CONFIG_DMA_CMA=y
CONFIG_MTD=y
CONFIG_MTD_M25P80=y
CONFIG_PROC_DEVICETREE=y
diff --git a/arch/arm/include/asm/a.out-core.h b/arch/arm/include/asm/a.out-core.h
deleted file mode 100644
index 92f10cb5c70..00000000000
--- a/arch/arm/include/asm/a.out-core.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* a.out coredump register dumper
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#ifndef _ASM_A_OUT_CORE_H
-#define _ASM_A_OUT_CORE_H
-
-#ifdef __KERNEL__
-
-#include <linux/user.h>
-#include <linux/elfcore.h>
-
-/*
- * fill in the user structure for an a.out core dump
- */
-static inline void aout_dump_thread(struct pt_regs *regs, struct user *dump)
-{
- struct task_struct *tsk = current;
-
- dump->magic = CMAGIC;
- dump->start_code = tsk->mm->start_code;
- dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
-
- dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
- dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
- dump->u_ssize = 0;
-
- memset(dump->u_debugreg, 0, sizeof(dump->u_debugreg));
-
- if (dump->start_stack < 0x04000000)
- dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
-
- dump->regs = *regs;
- dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
-}
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_A_OUT_CORE_H */
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index e406d575c94..5665134bfa3 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -17,7 +17,8 @@ int arch_timer_arch_init(void);
* nicely work out which register we want, and chuck away the rest of
* the code. At least it does so with a recent GCC (4.6.3).
*/
-static inline void arch_timer_reg_write(const int access, const int reg, u32 val)
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val));
@@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val
isb();
}
-static inline u32 arch_timer_reg_read(const int access, const int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val = 0;
@@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg)
asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val));
break;
}
- }
-
- if (access == ARCH_TIMER_VIRT_ACCESS) {
+ } else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
case ARCH_TIMER_REG_CTRL:
asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val));
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index a5fef710af3..fcc1b5bf697 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -220,9 +220,9 @@
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
- ALT_SMP(dmb)
+ ALT_SMP(dmb ish)
.else
- ALT_SMP(W(dmb))
+ ALT_SMP(W(dmb) ish)
.endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
index 8dcd9c702d9..60f15e274e6 100644
--- a/arch/arm/include/asm/barrier.h
+++ b/arch/arm/include/asm/barrier.h
@@ -14,27 +14,27 @@
#endif
#if __LINUX_ARM_ARCH__ >= 7
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory")
+#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory")
+#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory")
#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
+#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
: : "r" (0) : "memory")
#elif defined(CONFIG_CPU_FA526)
-#define isb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
+#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \
: : "r" (0) : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#else
-#define isb() __asm__ __volatile__ ("" : : : "memory")
-#define dsb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
+#define isb(x) __asm__ __volatile__ ("" : : : "memory")
+#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \
: : "r" (0) : "memory")
-#define dmb() __asm__ __volatile__ ("" : : : "memory")
+#define dmb(x) __asm__ __volatile__ ("" : : : "memory")
#endif
#ifdef CONFIG_ARCH_HAS_BARRIERS
@@ -42,7 +42,7 @@
#elif defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP)
#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dsb()
-#define wmb() mb()
+#define wmb() do { dsb(st); outer_sync(); } while (0)
#else
#define mb() barrier()
#define rmb() barrier()
@@ -54,9 +54,9 @@
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
+#define smp_mb() dmb(ish)
+#define smp_rmb() smp_mb()
+#define smp_wmb() dmb(ishst)
#endif
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 17d0ae8672f..15f2d5bf887 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -268,8 +268,7 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
* Harvard caches are synchronised for the user space address range.
* This is used for the ARM private sys_cacheflush system call.
*/
-#define flush_cache_user_range(start,end) \
- __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
+#define flush_cache_user_range(s,e) __cpuc_coherent_user_range(s,e)
/*
* Perform necessary cache operations to ensure that data previously
@@ -352,7 +351,7 @@ static inline void flush_cache_vmap(unsigned long start, unsigned long end)
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ishst);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 8c25dc4e985..9672e978d50 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -89,13 +89,18 @@ extern unsigned int processor_id;
__val; \
})
+/*
+ * The memory clobber prevents gcc 4.5 from reordering the mrc before
+ * any is_smp() tests, which can cause undefined instruction aborts on
+ * ARM1136 r0 due to the missing extended CP15 registers.
+ */
#define read_cpuid_ext(ext_reg) \
({ \
unsigned int __val; \
asm("mrc p15, 0, %0, c0, " ext_reg \
: "=r" (__val) \
: \
- : "cc"); \
+ : "memory"); \
__val; \
})
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 3ed37b4d93d..e072bb2ba1b 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -2,7 +2,7 @@
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
#include <asm-generic/dma-contiguous.h>
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 38050b1c480..56211f2084e 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -130,4 +130,10 @@ struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
+struct linux_binprm;
+int arch_setup_additional_pages(struct linux_binprm *, int);
+#endif
+
#endif
diff --git a/arch/arm/include/asm/hardware/debug-8250.S b/arch/arm/include/asm/hardware/debug-8250.S
deleted file mode 100644
index 22c689255e6..00000000000
--- a/arch/arm/include/asm/hardware/debug-8250.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/include/asm/hardware/debug-8250.S
- *
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/serial_reg.h>
-
- .macro senduart,rd,rx
- strb \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldrb \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001: ldrb \rd, [\rx, #UART_MSR << UART_SHIFT]
- tst \rd, #UART_MSR_CTS
- beq 1001b
-#endif
- .endm
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 472ac709100..9b28c41f4ba 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -64,7 +64,7 @@ void kvm_clear_hyp_idmap(void);
static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
- pte_val(*pte) = new_pte;
+ *pte = new_pte;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 441efc491b5..69b879ac028 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -65,12 +65,12 @@ struct machine_desc {
/*
* Current machine - only accessible during boot.
*/
-extern struct machine_desc *machine_desc;
+extern const struct machine_desc *machine_desc;
/*
* Machine type table - also only accessible during boot
*/
-extern struct machine_desc __arch_info_begin[], __arch_info_end[];
+extern const struct machine_desc __arch_info_begin[], __arch_info_end[];
#define for_each_machine_desc(p) \
for (p = __arch_info_begin; p < __arch_info_end; p++)
diff --git a/arch/arm/include/asm/memblock.h b/arch/arm/include/asm/memblock.h
index 00ca5f92648..c2f5102ae65 100644
--- a/arch/arm/include/asm/memblock.h
+++ b/arch/arm/include/asm/memblock.h
@@ -4,8 +4,7 @@
struct meminfo;
struct machine_desc;
-extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
-
+void arm_memblock_init(struct meminfo *, const struct machine_desc *);
phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
#endif
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index e3d55547e75..6f18da09668 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -6,8 +6,11 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
atomic64_t id;
+#else
+ int switch_pending;
#endif
unsigned int vmalloc_seq;
+ unsigned long sigpage;
} mm_context_t;
#ifdef CONFIG_CPU_HAS_ASID
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index b5792b7fd8d..9b32f76bb0d 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -56,7 +56,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
* on non-ASID CPUs, the old mm will remain valid until the
* finish_arch_post_lock_switch() call.
*/
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
+ mm->context.switch_pending = 1;
else
cpu_switch_mm(mm->pgd, mm);
}
@@ -65,9 +65,21 @@ static inline void check_and_switch_context(struct mm_struct *mm,
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- cpu_switch_mm(mm->pgd, mm);
+ struct mm_struct *mm = current->mm;
+
+ if (mm && mm->context.switch_pending) {
+ /*
+ * Preemption must be disabled during cpu_switch_mm() as we
+ * have some stateful cache flush implementations. Check
+ * switch_pending again in case we were preempted and the
+ * switch to this mm was already done.
+ */
+ preempt_disable();
+ if (mm->context.switch_pending) {
+ mm->context.switch_pending = 0;
+ cpu_switch_mm(mm->pgd, mm);
+ }
+ preempt_enable_no_resched();
}
}
diff --git a/arch/arm/include/asm/module.h b/arch/arm/include/asm/module.h
index 0d3a28dbc8e..ed690c49ef9 100644
--- a/arch/arm/include/asm/module.h
+++ b/arch/arm/include/asm/module.h
@@ -12,6 +12,8 @@ enum {
ARM_SEC_CORE,
ARM_SEC_EXIT,
ARM_SEC_DEVEXIT,
+ ARM_SEC_HOT,
+ ARM_SEC_UNLIKELY,
ARM_SEC_MAX,
};
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644
index 00000000000..8f730fe7009
--- /dev/null
+++ b/arch/arm/include/asm/neon.h
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ * -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+ BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
index 6363f3d1d50..4355f0ec44d 100644
--- a/arch/arm/include/asm/page.h
+++ b/arch/arm/include/asm/page.h
@@ -142,7 +142,9 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
+#ifdef CONFIG_KUSER_HELPERS
#define __HAVE_ARCH_GATE_AREA 1
+#endif
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 04aeb02d2e1..be956dbf6ba 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -100,7 +100,7 @@ extern pgprot_t pgprot_s2_device;
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
-#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDWR)
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 06e7d509eaa..413f3876341 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -54,7 +54,6 @@ struct thread_struct {
#define start_thread(regs,pc,sp) \
({ \
- unsigned long *stack = (unsigned long *)sp; \
memset(regs->uregs, 0, sizeof(regs->uregs)); \
if (current->personality & ADDR_LIMIT_32BIT) \
regs->ARM_cpsr = USR_MODE; \
@@ -65,9 +64,6 @@ struct thread_struct {
regs->ARM_cpsr |= PSR_ENDSTATE; \
regs->ARM_pc = pc & ~1; /* pc */ \
regs->ARM_sp = sp; /* sp */ \
- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
nommu_start_thread(regs); \
})
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h
index a219227c3e4..4a2985e2196 100644
--- a/arch/arm/include/asm/prom.h
+++ b/arch/arm/include/asm/prom.h
@@ -15,13 +15,13 @@
#ifdef CONFIG_OF
-extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
+extern const struct machine_desc *setup_machine_fdt(unsigned int dt_phys);
extern void arm_dt_memblock_reserve(void);
extern void __init arm_dt_init_cpu_maps(void);
#else /* CONFIG_OF */
-static inline struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
+static inline const struct machine_desc *setup_machine_fdt(unsigned int dt_phys)
{
return NULL;
}
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index 6462a721ebd..a252c0bfacf 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void)
{
return 1 << mpidr_hash.bits;
}
+
+extern int platform_can_cpu_hotplug(void);
+
#endif
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index f8b8965666e..4f2c28060c9 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -46,7 +46,7 @@ static inline void dsb_sev(void)
{
#if __LINUX_ARM_ARCH__ >= 7
__asm__ __volatile__ (
- "dsb\n"
+ "dsb ishst\n"
SEV
);
#else
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
" subs %1, %0, %0, ror #16\n"
" addeq %0, %0, %4\n"
" strexeq %2, %0, [%3]"
- : "=&r" (slock), "=&r" (contended), "=r" (res)
+ : "=&r" (slock), "=&r" (contended), "=&r" (res)
: "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
: "cc");
} while (res);
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%1]\n"
-" teq %0, #0\n"
-" strexeq %0, %2, [%1]"
- : "=&r" (tmp)
- : "r" (&rw->lock), "r" (0x80000000)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " teq %0, #0\n"
+ " strexeq %1, %3, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock), "r" (0x80000000)
+ : "cc");
+ } while (res);
- if (tmp == 0) {
+ if (!contended) {
smp_mb();
return 1;
} else {
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
- unsigned long tmp, tmp2 = 1;
+ unsigned long contended, res;
- __asm__ __volatile__(
-" ldrex %0, [%2]\n"
-" adds %0, %0, #1\n"
-" strexpl %1, %0, [%2]\n"
- : "=&r" (tmp), "+r" (tmp2)
- : "r" (&rw->lock)
- : "cc");
+ do {
+ __asm__ __volatile__(
+ " ldrex %0, [%2]\n"
+ " mov %1, #0\n"
+ " adds %0, %0, #1\n"
+ " strexpl %1, %0, [%2]"
+ : "=&r" (contended), "=&r" (res)
+ : "r" (&rw->lock)
+ : "cc");
+ } while (res);
- smp_mb();
- return tmp2 == 0;
+ /* If the lock is negative, then it is already held for write. */
+ if (contended < 0x80000000) {
+ smp_mb();
+ return 1;
+ } else {
+ return 0;
+ }
}
/* read_can_lock - would read_trylock() succeed? */
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index fa09e6b49bf..c99e259469f 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -4,6 +4,16 @@
#include <linux/thread_info.h>
/*
+ * For v7 SMP cores running a preemptible kernel we may be pre-empted
+ * during a TLB maintenance operation, so execute an inner-shareable dsb
+ * to ensure that the maintenance completes in case we migrate to another
+ * CPU.
+ */
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7)
+#define finish_arch_switch(prev) dsb(ish)
+#endif
+
+/*
* switch_to(prev, next) should switch from task `prev' to `next'
* `prev' will never be the same as `next'. schedule() itself
* contains the memory barrier to tell GCC not to cache `current'.
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 214d4158089..df5e13d64f2 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -43,6 +43,16 @@ struct cpu_context_save {
__u32 extra[2]; /* Xscale 'acc' register, etc */
};
+struct arm_restart_block {
+ union {
+ /* For user cache flushing */
+ struct {
+ unsigned long start;
+ unsigned long end;
+ } cache;
+ };
+};
+
/*
* low level task data that entry.S needs immediate access to.
* __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -68,6 +78,7 @@ struct thread_info {
unsigned long thumbee_state; /* ThumbEE Handler Base register */
#endif
struct restart_block restart_block;
+ struct arm_restart_block arm_restart_block;
};
#define INIT_THREAD_INFO(tsk) \
@@ -156,7 +167,6 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20
-#define TIF_SWITCH_MM 22 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index 46e7cfb3e72..0baf7f0d939 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -43,6 +43,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index fdbb9e36974..38960264040 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -319,67 +319,110 @@ extern struct cpu_tlb_fns cpu_tlb;
#define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg)
#define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg)
-static inline void local_flush_tlb_all(void)
+static inline void __local_flush_tlb_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- if (tlb_flag(TLB_WB))
- dsb();
-
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
- tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+}
+
+static inline void local_flush_tlb_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c7, 0", zero);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(nsh);
isb();
}
}
-static inline void local_flush_tlb_mm(struct mm_struct *mm)
+static inline void __flush_tlb_all(void)
{
const int zero = 0;
- const int asid = ASID(mm);
const unsigned int __tlb_flag = __cpu_tlb_flags;
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
+
+ __local_flush_tlb_all();
+ tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(ish);
+ isb();
+ }
+}
+
+static inline void __local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int zero = 0;
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
- if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
}
- put_cpu();
}
tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid);
tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid);
tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid);
+}
+
+static inline void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ const int asid = ASID(mm);
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_mm(mm);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c7, 2", asid);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void __flush_tlb_mm(struct mm_struct *mm)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_mm(mm);
#ifdef CONFIG_ARM_ERRATA_720789
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", 0);
#else
- tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid);
+ tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm));
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
static inline void
-local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
- if (tlb_flag(TLB_WB))
- dsb();
-
if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
@@ -392,6 +435,36 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr);
+}
+
+static inline void
+local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_page(vma, uaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", uaddr);
+
+ if (tlb_flag(TLB_BARRIER))
+ dsb(nsh);
+}
+
+static inline void
+__flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm);
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_page(vma, uaddr);
#ifdef CONFIG_ARM_ERRATA_720789
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK);
#else
@@ -399,19 +472,14 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
#endif
if (tlb_flag(TLB_BARRIER))
- dsb();
+ dsb(ish);
}
-static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+static inline void __local_flush_tlb_kernel_page(unsigned long kaddr)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
- kaddr &= PAGE_MASK;
-
- if (tlb_flag(TLB_WB))
- dsb();
-
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -421,38 +489,103 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr);
+}
+
+static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(nshst);
+
+ __local_flush_tlb_kernel_page(kaddr);
+ tlb_op(TLB_V7_UIS_PAGE, "c8, c7, 1", kaddr);
+
+ if (tlb_flag(TLB_BARRIER)) {
+ dsb(nsh);
+ isb();
+ }
+}
+
+static inline void __flush_tlb_kernel_page(unsigned long kaddr)
+{
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ kaddr &= PAGE_MASK;
+
+ if (tlb_flag(TLB_WB))
+ dsb(ishst);
+
+ __local_flush_tlb_kernel_page(kaddr);
tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr);
if (tlb_flag(TLB_BARRIER)) {
- dsb();
+ dsb(ish);
isb();
}
}
+/*
+ * Branch predictor maintenance is paired with full TLB invalidation, so
+ * there is no need for any barriers here.
+ */
+static inline void __local_flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ if (tlb_flag(TLB_V6_BP))
+ asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
+
static inline void local_flush_bp_all(void)
{
const int zero = 0;
const unsigned int __tlb_flag = __cpu_tlb_flags;
+ __local_flush_bp_all();
if (tlb_flag(TLB_V7_UIS_BP))
- asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
- else if (tlb_flag(TLB_V6_BP))
asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero));
+}
- if (tlb_flag(TLB_BARRIER))
- isb();
+static inline void __flush_bp_all(void)
+{
+ const int zero = 0;
+ const unsigned int __tlb_flag = __cpu_tlb_flags;
+
+ __local_flush_bp_all();
+ if (tlb_flag(TLB_V7_UIS_BP))
+ asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
}
+#include <asm/cputype.h>
#ifdef CONFIG_ARM_ERRATA_798181
+static inline int erratum_a15_798181(void)
+{
+ unsigned int midr = read_cpuid_id();
+
+ /* Cortex-A15 r0p0..r3p2 affected */
+ if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
+ return 0;
+ return 1;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
/*
* Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
*/
asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb();
+ dsb(ish);
}
#else
+static inline int erratum_a15_798181(void)
+{
+ return 0;
+}
+
static inline void dummy_flush_tlb_a15_erratum(void)
{
}
@@ -479,7 +612,7 @@ static inline void flush_pmd_entry(void *pmd)
tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd);
if (tlb_flag(TLB_WB))
- dsb();
+ dsb(ishst);
}
static inline void clean_pmd_entry(void *pmd)
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/asm/types.h
new file mode 100644
index 00000000000..a53cdb8f068
--- /dev/null
+++ b/arch/arm/include/asm/types.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_TYPES_H
+#define _ASM_TYPES_H
+
+#include <asm-generic/int-ll64.h>
+
+/*
+ * The C99 types uintXX_t that are usually defined in 'stdint.h' are not as
+ * unambiguous on ARM as you would expect. For the types below, there is a
+ * difference on ARM between GCC built for bare metal ARM, GCC built for glibc
+ * and the kernel itself, which results in build errors if you try to build with
+ * -ffreestanding and include 'stdint.h' (such as when you include 'arm_neon.h'
+ * in order to use NEON intrinsics)
+ *
+ * As the typedefs for these types in 'stdint.h' are based on builtin defines
+ * supplied by GCC, we can tweak these to align with the kernel's idea of those
+ * types, so 'linux/types.h' and 'stdint.h' can be safely included from the same
+ * source file (provided that -ffreestanding is used).
+ *
+ * int32_t uint32_t uintptr_t
+ * bare metal GCC long unsigned long unsigned int
+ * glibc GCC int unsigned int unsigned int
+ * kernel int unsigned int unsigned long
+ */
+
+#ifdef __INT32_TYPE__
+#undef __INT32_TYPE__
+#define __INT32_TYPE__ int
+#endif
+
+#ifdef __UINT32_TYPE__
+#undef __UINT32_TYPE__
+#define __UINT32_TYPE__ unsigned int
+#endif
+
+#ifdef __UINTPTR_TYPE__
+#undef __UINTPTR_TYPE__
+#define __UINTPTR_TYPE__ unsigned long
+#endif
+
+#endif /* _ASM_TYPES_H */
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index fa88d09fa3d..615781c6162 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -15,6 +15,10 @@
#define V7M_SCB_VTOR 0x08
+#define V7M_SCB_AIRCR 0x0c
+#define V7M_SCB_AIRCR_VECTKEY (0x05fa << 16)
+#define V7M_SCB_AIRCR_SYSRESETREQ (1 << 2)
+
#define V7M_SCB_SCR 0x10
#define V7M_SCB_SCR_SLEEPDEEP (1 << 2)
@@ -42,3 +46,11 @@
*/
#define EXC_RET_STACK_MASK 0x00000004
#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
+
+#ifndef __ASSEMBLY__
+
+enum reboot_mode;
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd);
+
+#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 50af92bac73..4371f45c578 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -29,6 +29,7 @@
#define BOOT_CPU_MODE_MISMATCH PSR_N_BIT
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
#ifdef CONFIG_ARM_VIRT_EXT
/*
@@ -41,10 +42,21 @@
*/
extern int __boot_cpu_mode;
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ sync_cache_r(&__boot_cpu_mode);
+}
+
void __hyp_set_vectors(unsigned long phys_vector_base);
unsigned long __hyp_get_vectors(void);
#else
#define __boot_cpu_mode (SVC_MODE)
+#define sync_boot_mode()
#endif
#ifndef ZIMAGE
diff --git a/arch/arm/include/asm/xor.h b/arch/arm/include/asm/xor.h
index 7604673dc42..4ffb26d4cad 100644
--- a/arch/arm/include/asm/xor.h
+++ b/arch/arm/include/asm/xor.h
@@ -7,7 +7,10 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/hardirq.h>
#include <asm-generic/xor.h>
+#include <asm/hwcap.h>
+#include <asm/neon.h>
#define __XOR(a1, a2) a1 ^= a2
@@ -138,4 +141,74 @@ static struct xor_block_template xor_block_arm4regs = {
xor_speed(&xor_block_arm4regs); \
xor_speed(&xor_block_8regs); \
xor_speed(&xor_block_32regs); \
+ NEON_TEMPLATES; \
} while (0)
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+extern struct xor_block_template const xor_block_neon_inner;
+
+static void
+xor_neon_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_2(bytes, p1, p2);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_2(bytes, p1, p2);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_3(bytes, p1, p2, p3);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_3(bytes, p1, p2, p3);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_4(bytes, p1, p2, p3, p4);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_4(bytes, p1, p2, p3, p4);
+ kernel_neon_end();
+ }
+}
+
+static void
+xor_neon_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ if (in_interrupt()) {
+ xor_arm4regs_5(bytes, p1, p2, p3, p4, p5);
+ } else {
+ kernel_neon_begin();
+ xor_block_neon_inner.do_5(bytes, p1, p2, p3, p4, p5);
+ kernel_neon_end();
+ }
+}
+
+static struct xor_block_template xor_block_neon = {
+ .name = "neon",
+ .do_2 = xor_neon_2,
+ .do_3 = xor_neon_3,
+ .do_4 = xor_neon_4,
+ .do_5 = xor_neon_5
+};
+
+#define NEON_TEMPLATES \
+ do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
+#else
+#define NEON_TEMPLATES
+#endif
diff --git a/arch/arm/include/debug/8250.S b/arch/arm/include/debug/8250.S
new file mode 100644
index 00000000000..7a2baf913aa
--- /dev/null
+++ b/arch/arm/include/debug/8250.S
@@ -0,0 +1,54 @@
+/*
+ * arch/arm/include/debug/8250.S
+ *
+ * Copyright (C) 1994-2013 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/serial_reg.h>
+
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT
+ .endm
+
+#ifdef CONFIG_DEBUG_UART_8250_WORD
+ .macro store, rd, rx:vararg
+ str \rd, \rx
+ .endm
+
+ .macro load, rd, rx:vararg
+ ldr \rd, \rx
+ .endm
+#else
+ .macro store, rd, rx:vararg
+ strb \rd, \rx
+ .endm
+
+ .macro load, rd, rx:vararg
+ ldrb \rd, \rx
+ .endm
+#endif
+
+#define UART_SHIFT CONFIG_DEBUG_UART_8250_SHIFT
+
+ .macro senduart,rd,rx
+ store \rd, [\rx, #UART_TX << UART_SHIFT]
+ .endm
+
+ .macro busyuart,rd,rx
+1002: load \rd, [\rx, #UART_LSR << UART_SHIFT]
+ and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
+ bne 1002b
+ .endm
+
+ .macro waituart,rd,rx
+#ifdef CONFIG_DEBUG_UART_8250_FLOW_CONTROL
+1001: load \rd, [\rx, #UART_MSR << UART_SHIFT]
+ tst \rd, #UART_MSR_CTS
+ beq 1001b
+#endif
+ .endm
diff --git a/arch/arm/include/debug/8250_32.S b/arch/arm/include/debug/8250_32.S
deleted file mode 100644
index 8db01eeabbb..00000000000
--- a/arch/arm/include/debug/8250_32.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Derived from arch/arm/mach-davinci/include/mach/debug-macro.S to use 32-bit
- * accesses to the 8250.
- */
-
-#include <linux/serial_reg.h>
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- /* The UART's don't have any flow control IO's wired up. */
- .macro waituart,rd,rx
- .endm
diff --git a/arch/arm/include/debug/bcm2835.S b/arch/arm/include/debug/bcm2835.S
deleted file mode 100644
index aed9199bd84..00000000000
--- a/arch/arm/include/debug/bcm2835.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 2010 Broadcom
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define BCM2835_DEBUG_PHYS 0x20201000
-#define BCM2835_DEBUG_VIRT 0xf0201000
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =BCM2835_DEBUG_PHYS
- ldr \rv, =BCM2835_DEBUG_VIRT
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/cns3xxx.S b/arch/arm/include/debug/cns3xxx.S
deleted file mode 100644
index d04c150baa1..00000000000
--- a/arch/arm/include/debug/cns3xxx.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright 1994-1999 Russell King
- * Copyright 2008 Cavium Networks
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- */
-
- .macro addruart,rp,rv,tmp
- mov \rp, #0x00009000
- orr \rv, \rp, #0xf0000000 @ virtual base
- orr \rp, \rp, #0x10000000
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/highbank.S b/arch/arm/include/debug/highbank.S
deleted file mode 100644
index 8cad4322a5a..00000000000
--- a/arch/arm/include/debug/highbank.S
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
- .macro addruart,rp,rv,tmp
- ldr \rv, =0xfee36000
- ldr \rp, =0xfff36000
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/keystone.S b/arch/arm/include/debug/keystone.S
deleted file mode 100644
index 9aef9ba3f4f..00000000000
--- a/arch/arm/include/debug/keystone.S
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Early serial debug output macro for Keystone SOCs
- *
- * Copyright 2013 Texas Instruments, Inc.
- * Santosh Shilimkar <santosh.shilimkar@ti.com>
- *
- * Based on RMKs low level debug code.
- * Copyright (C) 1994-1999 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/serial_reg.h>
-
-#define UART_SHIFT 2
-#if defined(CONFIG_DEBUG_KEYSTONE_UART0)
-#define UART_PHYS 0x02530c00
-#define UART_VIRT 0xfeb30c00
-#elif defined(CONFIG_DEBUG_KEYSTONE_UART1)
-#define UART_PHYS 0x02531000
-#define UART_VIRT 0xfeb31000
-#endif
-
- .macro addruart, rp, rv, tmp
- ldr \rv, =UART_VIRT @ physical base address
- ldr \rp, =UART_PHYS @ virtual base address
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
- .endm
diff --git a/arch/arm/include/debug/mvebu.S b/arch/arm/include/debug/mvebu.S
deleted file mode 100644
index 6517311a1c9..00000000000
--- a/arch/arm/include/debug/mvebu.S
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Early serial output macro for Marvell SoC
- *
- * Copyright (C) 2012 Marvell
- *
- * Lior Amsalem <alior@marvell.com>
- * Gregory Clement <gregory.clement@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#ifdef CONFIG_DEBUG_MVEBU_UART_ALTERNATE
-#define ARMADA_370_XP_REGS_PHYS_BASE 0xf1000000
-#else
-#define ARMADA_370_XP_REGS_PHYS_BASE 0xd0000000
-#endif
-
-#define ARMADA_370_XP_REGS_VIRT_BASE 0xfec00000
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =ARMADA_370_XP_REGS_PHYS_BASE
- ldr \rv, =ARMADA_370_XP_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/mxs.S b/arch/arm/include/debug/mxs.S
deleted file mode 100644
index d86951551ca..00000000000
--- a/arch/arm/include/debug/mxs.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/* arch/arm/mach-mxs/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#ifdef CONFIG_DEBUG_IMX23_UART
-#define UART_PADDR 0x80070000
-#elif defined (CONFIG_DEBUG_IMX28_UART)
-#define UART_PADDR 0x80074000
-#endif
-
-#define UART_VADDR 0xfe100000
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =UART_PADDR @ physical
- ldr \rv, =UART_VADDR @ virtual
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nomadik.S b/arch/arm/include/debug/nomadik.S
deleted file mode 100644
index 735417922ce..00000000000
--- a/arch/arm/include/debug/nomadik.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x00100000
- add \rp, \rp, #0x000fb000
- add \rv, \rp, #0xf0000000 @ virtual base
- add \rp, \rp, #0x10000000 @ physical base address
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/nspire.S b/arch/arm/include/debug/nspire.S
deleted file mode 100644
index 886fd276fcb..00000000000
--- a/arch/arm/include/debug/nspire.S
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * linux/arch/arm/include/debug/nspire.S
- *
- * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2, as
- * published by the Free Software Foundation.
- *
- */
-
-#define NSPIRE_EARLY_UART_PHYS_BASE 0x90020000
-#define NSPIRE_EARLY_UART_VIRT_BASE 0xfee20000
-
-.macro addruart, rp, rv, tmp
- ldr \rp, =(NSPIRE_EARLY_UART_PHYS_BASE) @ physical base address
- ldr \rv, =(NSPIRE_EARLY_UART_VIRT_BASE) @ virtual base address
-.endm
-
-
-#ifdef CONFIG_DEBUG_NSPIRE_CX_UART
-#include <asm/hardware/debug-pl01x.S>
-#endif
-
-#ifdef CONFIG_DEBUG_NSPIRE_CLASSIC_UART
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
-#endif
diff --git a/arch/arm/include/debug/picoxcell.S b/arch/arm/include/debug/picoxcell.S
deleted file mode 100644
index bc1f07c49cd..00000000000
--- a/arch/arm/include/debug/picoxcell.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2011 Picochip Ltd., Jamie Iles
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define UART_SHIFT 2
-#define PICOXCELL_UART1_BASE 0x80230000
-#define PHYS_TO_IO(x) (((x) & 0x00ffffff) | 0xfe000000)
-
- .macro addruart, rp, rv, tmp
- ldr \rv, =PHYS_TO_IO(PICOXCELL_UART1_BASE)
- ldr \rp, =PICOXCELL_UART1_BASE
- .endm
-
-#include "8250_32.S"
diff --git a/arch/arm/include/asm/hardware/debug-pl01x.S b/arch/arm/include/debug/pl01x.S
index f9fd083eff6..37c6895b87e 100644
--- a/arch/arm/include/asm/hardware/debug-pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -1,4 +1,4 @@
-/* arch/arm/include/asm/hardware/debug-pl01x.S
+/* arch/arm/include/debug/pl01x.S
*
* Debugging macro include header
*
@@ -12,6 +12,13 @@
*/
#include <linux/amba/serial.h>
+#ifdef CONFIG_DEBUG_UART_PHYS
+ .macro addruart, rp, rv, tmp
+ ldr \rp, =CONFIG_DEBUG_UART_PHYS
+ ldr \rv, =CONFIG_DEBUG_UART_VIRT
+ .endm
+#endif
+
.macro senduart,rd,rx
strb \rd, [\rx, #UART01x_DR]
.endm
diff --git a/arch/arm/include/debug/pxa.S b/arch/arm/include/debug/pxa.S
deleted file mode 100644
index e1e795aa3d7..00000000000
--- a/arch/arm/include/debug/pxa.S
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Early serial output macro for Marvell PXA/MMP SoC
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * Copyright (C) 2013 Haojian Zhuang
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_PXA_UART1)
-#define PXA_UART_REG_PHYS_BASE 0x40100000
-#define PXA_UART_REG_VIRT_BASE 0xf2100000
-#elif defined(CONFIG_DEBUG_MMP_UART2)
-#define PXA_UART_REG_PHYS_BASE 0xd4017000
-#define PXA_UART_REG_VIRT_BASE 0xfe017000
-#elif defined(CONFIG_DEBUG_MMP_UART3)
-#define PXA_UART_REG_PHYS_BASE 0xd4018000
-#define PXA_UART_REG_VIRT_BASE 0xfe018000
-#else
-#error "Select uart for DEBUG_LL"
-#endif
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =PXA_UART_REG_PHYS_BASE
- ldr \rv, =PXA_UART_REG_VIRT_BASE
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/rockchip.S b/arch/arm/include/debug/rockchip.S
deleted file mode 100644
index cfd883e6958..00000000000
--- a/arch/arm/include/debug/rockchip.S
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Early serial output macro for Rockchip SoCs
- *
- * Copyright (C) 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_RK29_UART0)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20060000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed60000
-#elif defined(CONFIG_DEBUG_RK29_UART1)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
-#elif defined(CONFIG_DEBUG_RK29_UART2)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
-#elif defined(CONFIG_DEBUG_RK3X_UART0)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10124000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb24000
-#elif defined(CONFIG_DEBUG_RK3X_UART1)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x10126000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfeb26000
-#elif defined(CONFIG_DEBUG_RK3X_UART2)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20064000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed64000
-#elif defined(CONFIG_DEBUG_RK3X_UART3)
-#define ROCKCHIP_UART_DEBUG_PHYS_BASE 0x20068000
-#define ROCKCHIP_UART_DEBUG_VIRT_BASE 0xfed68000
-#endif
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =ROCKCHIP_UART_DEBUG_PHYS_BASE
- ldr \rv, =ROCKCHIP_UART_DEBUG_VIRT_BASE
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/socfpga.S b/arch/arm/include/debug/socfpga.S
deleted file mode 100644
index 966b2f99494..00000000000
--- a/arch/arm/include/debug/socfpga.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#define UART_SHIFT 2
-#define DEBUG_LL_UART_OFFSET 0x00002000
-
- .macro addruart, rp, rv, tmp
- mov \rp, #DEBUG_LL_UART_OFFSET
- orr \rp, \rp, #0x00c00000
- orr \rv, \rp, #0xfe000000 @ virtual base
- orr \rp, \rp, #0xff000000 @ physical base
- .endm
-
-#include "8250_32.S"
-
diff --git a/arch/arm/include/debug/sunxi.S b/arch/arm/include/debug/sunxi.S
deleted file mode 100644
index 04eb56d5db2..00000000000
--- a/arch/arm/include/debug/sunxi.S
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * Early serial output macro for Allwinner A1X SoCs
- *
- * Copyright (C) 2012 Maxime Ripard
- *
- * Maxime Ripard <maxime.ripard@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#if defined(CONFIG_DEBUG_SUNXI_UART0)
-#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28000
-#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28000
-#elif defined(CONFIG_DEBUG_SUNXI_UART1)
-#define SUNXI_UART_DEBUG_PHYS_BASE 0x01c28400
-#define SUNXI_UART_DEBUG_VIRT_BASE 0xf1c28400
-#endif
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =SUNXI_UART_DEBUG_PHYS_BASE
- ldr \rv, =SUNXI_UART_DEBUG_VIRT_BASE
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/include/debug/tegra.S b/arch/arm/include/debug/tegra.S
index 883d7c22fd9..be6a720dd18 100644
--- a/arch/arm/include/debug/tegra.S
+++ b/arch/arm/include/debug/tegra.S
@@ -221,3 +221,32 @@
1002:
#endif
.endm
+
+/*
+ * Storage for the state maintained by the macros above.
+ *
+ * In the kernel proper, this data is located in arch/arm/mach-tegra/common.c.
+ * That's because this header is included from multiple files, and we only
+ * want a single copy of the data. In particular, the UART probing code above
+ * assumes it's running using physical addresses. This is true when this file
+ * is included from head.o, but not when included from debug.o. So we need
+ * to share the probe results between the two copies, rather than having
+ * to re-run the probing again later.
+ *
+ * In the decompressor, we put the symbol/storage right here, since common.c
+ * isn't included in the decompressor build. This symbol gets put in .text
+ * even though it's really data, since .data is discarded from the
+ * decompressor. Luckily, .text is writeable in the decompressor, unless
+ * CONFIG_ZBOOT_ROM. That dependency is handled in arch/arm/Kconfig.debug.
+ */
+#if defined(ZIMAGE)
+tegra_uart_config:
+ /* Debug UART initialization required */
+ .word 1
+ /* Debug UART physical address */
+ .word 0
+ /* Debug UART virtual address */
+ .word 0
+ /* Scratch space for debug macro */
+ .word 0
+#endif
diff --git a/arch/arm/include/debug/u300.S b/arch/arm/include/debug/u300.S
deleted file mode 100644
index 6f04f08a203..00000000000
--- a/arch/arm/include/debug/u300.S
+++ /dev/null
@@ -1,18 +0,0 @@
-/*
- * Copyright (C) 2006-2013 ST-Ericsson AB
- * License terms: GNU General Public License (GPL) version 2
- * Debugging macro include header.
- * Author: Linus Walleij <linus.walleij@stericsson.com>
- */
-#define U300_SLOW_PER_PHYS_BASE 0xc0010000
-#define U300_SLOW_PER_VIRT_BASE 0xff000000
-
- .macro addruart, rp, rv, tmp
- /* If we move the address using MMU, use this. */
- ldr \rp, = U300_SLOW_PER_PHYS_BASE @ MMU off, physical address
- ldr \rv, = U300_SLOW_PER_VIRT_BASE @ MMU on, virtual address
- orr \rp, \rp, #0x00003000
- orr \rv, \rv, #0x00003000
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S
index fbd24beeb1f..aa7f63a8b5e 100644
--- a/arch/arm/include/debug/ux500.S
+++ b/arch/arm/include/debug/ux500.S
@@ -45,4 +45,4 @@
ldr \rv, =UART_VIRT_BASE @ yes, virtual address
.endm
-#include <asm/hardware/debug-pl01x.S>
+#include <debug/pl01x.S>
diff --git a/arch/arm/include/debug/vexpress.S b/arch/arm/include/debug/vexpress.S
index acafb229e2b..524acd5a223 100644
--- a/arch/arm/include/debug/vexpress.S
+++ b/arch/arm/include/debug/vexpress.S
@@ -47,51 +47,5 @@
.endm
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CA9)
-
- .macro addruart,rp,rv,tmp
- mov \rp, #DEBUG_LL_UART_OFFSET
- orr \rv, \rp, #DEBUG_LL_VIRT_BASE
- orr \rp, \rp, #DEBUG_LL_PHYS_BASE
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_RS1)
-
- .macro addruart,rp,rv,tmp
- mov \rp, #DEBUG_LL_UART_OFFSET_RS1
- orr \rv, \rp, #DEBUG_LL_VIRT_BASE
- orr \rp, \rp, #DEBUG_LL_PHYS_BASE_RS1
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#elif defined(CONFIG_DEBUG_VEXPRESS_UART0_CRX)
-
- .macro addruart,rp,tmp,tmp2
- ldr \rp, =DEBUG_LL_UART_PHYS_CRX
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
-
-#else /* CONFIG_DEBUG_LL_UART_NONE */
-
- .macro addruart, rp, rv, tmp
- /* Safe dummy values */
- mov \rp, #0
- mov \rv, #DEBUG_LL_VIRT_BASE
- .endm
-
- .macro senduart,rd,rx
- .endm
-
- .macro waituart,rd,rx
- .endm
-
- .macro busyuart,rd,rx
- .endm
-
+#include <debug/pl01x.S>
#endif
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 47bcb2d254a..18d76fd5a2a 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -1,7 +1,6 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += a.out.h
header-y += byteorder.h
header-y += fcntl.h
header-y += hwcap.h
diff --git a/arch/arm/include/uapi/asm/a.out.h b/arch/arm/include/uapi/asm/a.out.h
deleted file mode 100644
index 083894b2e3b..00000000000
--- a/arch/arm/include/uapi/asm/a.out.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#ifndef __ARM_A_OUT_H__
-#define __ARM_A_OUT_H__
-
-#include <linux/personality.h>
-#include <linux/types.h>
-
-struct exec
-{
- __u32 a_info; /* Use macros N_MAGIC, etc for access */
- __u32 a_text; /* length of text, in bytes */
- __u32 a_data; /* length of data, in bytes */
- __u32 a_bss; /* length of uninitialized data area for file, in bytes */
- __u32 a_syms; /* length of symbol table data in file, in bytes */
- __u32 a_entry; /* start address */
- __u32 a_trsize; /* length of relocation info for text, in bytes */
- __u32 a_drsize; /* length of relocation info for data, in bytes */
-};
-
-/*
- * This is always the same
- */
-#define N_TXTADDR(a) (0x00008000)
-
-#define N_TRSIZE(a) ((a).a_trsize)
-#define N_DRSIZE(a) ((a).a_drsize)
-#define N_SYMSIZE(a) ((a).a_syms)
-
-#define M_ARM 103
-
-#ifndef LIBRARY_START_TEXT
-#define LIBRARY_START_TEXT (0x00c00000)
-#endif
-
-#endif /* __A_OUT_GNU_H__ */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 86d10dd47dc..5140df5f23a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -24,7 +24,7 @@ obj-$(CONFIG_ATAGS_PROC) += atags_proc.o
obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
ifeq ($(CONFIG_CPU_V7M),y)
-obj-y += entry-v7m.o
+obj-y += entry-v7m.o v7m.o
else
obj-y += entry-armv.o
endif
diff --git a/arch/arm/kernel/atags.h b/arch/arm/kernel/atags.h
index 9edc9692332..ec4164da6e3 100644
--- a/arch/arm/kernel/atags.h
+++ b/arch/arm/kernel/atags.h
@@ -7,9 +7,10 @@ static inline void save_atags(struct tag *tags) { }
void convert_to_tag_list(struct tag *tags);
#ifdef CONFIG_ATAGS
-struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr);
+const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
+ unsigned int machine_nr);
#else
-static inline struct machine_desc *
+static inline const struct machine_desc *
setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
{
early_print("no ATAGS support: can't continue\n");
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 14512e6931d..8c14de8180c 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -178,11 +178,11 @@ static void __init squash_mem_tags(struct tag *tag)
tag->hdr.tag = ATAG_NONE;
}
-struct machine_desc * __init setup_machine_tags(phys_addr_t __atags_pointer,
- unsigned int machine_nr)
+const struct machine_desc * __init
+setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
{
struct tag *tags = (struct tag *)&default_tags;
- struct machine_desc *mdesc = NULL, *p;
+ const struct machine_desc *mdesc = NULL, *p;
char *from = default_command_line;
default_tags.mem.start = PHYS_OFFSET;
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index 261fcc82616..88e14d74b6d 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -525,11 +525,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
* Assign resources.
*/
pci_bus_assign_resources(bus);
-
- /*
- * Enable bridges
- */
- pci_enable_bridges(bus);
}
/*
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5859c8bc727..f35906b3d8c 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void)
}
}
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu);
+}
+
/**
* setup_machine_fdt - Machine setup when an dtb was passed to the kernel
* @dt_phys: physical address of dt blob
@@ -176,10 +181,10 @@ void __init arm_dt_init_cpu_maps(void)
* If a dtb was passed to the kernel in r2, then use it to choose the
* correct machine_desc and to setup the system.
*/
-struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
+const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
{
struct boot_param_header *devtree;
- struct machine_desc *mdesc, *mdesc_best = NULL;
+ const struct machine_desc *mdesc, *mdesc_best = NULL;
unsigned int score, mdesc_score = ~1;
unsigned long dt_root;
const char *model;
@@ -188,7 +193,7 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
DT_MACHINE_START(GENERIC_DT, "Generic DT based system")
MACHINE_END
- mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT;
+ mdesc_best = &__mach_desc_GENERIC_DT;
#endif
if (!dt_phys)
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index a39cfc2a1f9..9cbe70c8b0e 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc)
.endm
.macro kuser_cmpxchg_check
-#if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+#if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \
+ !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
#ifndef CONFIG_MMU
#warning "NPTL on non MMU needs fixing"
#else
@@ -742,6 +743,18 @@ ENDPROC(__switch_to)
#endif
.endm
+ .macro kuser_pad, sym, size
+ .if (. - \sym) & 3
+ .rept 4 - (. - \sym) & 3
+ .byte 0
+ .endr
+ .endif
+ .rept (\size - (. - \sym)) / 4
+ .word 0xe7fddef1
+ .endr
+ .endm
+
+#ifdef CONFIG_KUSER_HELPERS
.align 5
.globl __kuser_helper_start
__kuser_helper_start:
@@ -832,18 +845,13 @@ kuser_cmpxchg64_fixup:
#error "incoherent kernel configuration"
#endif
- /* pad to next slot */
- .rept (16 - (. - __kuser_cmpxchg64)/4)
- .word 0
- .endr
-
- .align 5
+ kuser_pad __kuser_cmpxchg64, 64
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb arm
usr_ret lr
- .align 5
+ kuser_pad __kuser_memory_barrier, 32
__kuser_cmpxchg: @ 0xffff0fc0
@@ -916,13 +924,14 @@ kuser_cmpxchg32_fixup:
#endif
- .align 5
+ kuser_pad __kuser_cmpxchg, 32
__kuser_get_tls: @ 0xffff0fe0
ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
usr_ret lr
mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
- .rep 4
+ kuser_pad __kuser_get_tls, 16
+ .rep 3
.word 0 @ 0xffff0ff0 software TLS value, then
.endr @ pad up to __kuser_helper_version
@@ -932,14 +941,16 @@ __kuser_helper_version: @ 0xffff0ffc
.globl __kuser_helper_end
__kuser_helper_end:
+#endif
+
THUMB( .thumb )
/*
* Vector stubs.
*
- * This code is copied to 0xffff0200 so we can use branches in the
- * vectors, rather than ldr's. Note that this code must not
- * exceed 0x300 bytes.
+ * This code is copied to 0xffff1000 so we can use branches in the
+ * vectors, rather than ldr's. Note that this code must not exceed
+ * a page size.
*
* Common stub entry macro:
* Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
@@ -986,8 +997,17 @@ ENDPROC(vector_\name)
1:
.endm
- .globl __stubs_start
+ .section .stubs, "ax", %progbits
__stubs_start:
+ @ This must be the first word
+ .word vector_swi
+
+vector_rst:
+ ARM( swi SYS_ERROR0 )
+ THUMB( svc #0 )
+ THUMB( nop )
+ b vector_und
+
/*
* Interrupt dispatcher
*/
@@ -1082,6 +1102,16 @@ __stubs_start:
.align 5
/*=============================================================================
+ * Address exception handler
+ *-----------------------------------------------------------------------------
+ * These aren't too critical.
+ * (they're not supposed to happen, and won't happen in 32-bit data mode).
+ */
+
+vector_addrexcptn:
+ b vector_addrexcptn
+
+/*=============================================================================
* Undefined FIQs
*-----------------------------------------------------------------------------
* Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
@@ -1094,45 +1124,19 @@ __stubs_start:
vector_fiq:
subs pc, lr, #4
-/*=============================================================================
- * Address exception handler
- *-----------------------------------------------------------------------------
- * These aren't too critical.
- * (they're not supposed to happen, and won't happen in 32-bit data mode).
- */
-
-vector_addrexcptn:
- b vector_addrexcptn
-
-/*
- * We group all the following data together to optimise
- * for CPUs with separate I & D caches.
- */
- .align 5
-
-.LCvswi:
- .word vector_swi
-
- .globl __stubs_end
-__stubs_end:
-
- .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
+ .globl vector_fiq_offset
+ .equ vector_fiq_offset, vector_fiq
- .globl __vectors_start
+ .section .vectors, "ax", %progbits
__vectors_start:
- ARM( swi SYS_ERROR0 )
- THUMB( svc #0 )
- THUMB( nop )
- W(b) vector_und + stubs_offset
- W(ldr) pc, .LCvswi + stubs_offset
- W(b) vector_pabt + stubs_offset
- W(b) vector_dabt + stubs_offset
- W(b) vector_addrexcptn + stubs_offset
- W(b) vector_irq + stubs_offset
- W(b) vector_fiq + stubs_offset
-
- .globl __vectors_end
-__vectors_end:
+ W(b) vector_rst
+ W(b) vector_und
+ W(ldr) pc, __vectors_start + 0x1000
+ W(b) vector_pabt
+ W(b) vector_dabt
+ W(b) vector_addrexcptn
+ W(b) vector_irq
+ W(b) vector_fiq
.data
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 94104bf6971..74ad15d1a06 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -442,10 +442,10 @@ local_restart:
ldrcc pc, [tbl, scno, lsl #2] @ call sys_* routine
add r1, sp, #S_OFF
-2: mov why, #0 @ no longer a real syscall
cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE)
eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
- bcs arm_syscall
+ bcs arm_syscall
+2: mov why, #0 @ no longer a real syscall
b sys_ni_syscall @ not private func
#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index e00621f1403..52b26432c9a 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -49,7 +49,7 @@ __irq_entry:
mov r1, sp
stmdb sp!, {lr}
@ routine called with r0 = irq number, r1 = struct pt_regs *
- bl nvic_do_IRQ
+ bl nvic_handle_irq
pop {lr}
@
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index 2adda11f712..918875d96d5 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -47,6 +47,11 @@
#include <asm/irq.h>
#include <asm/traps.h>
+#define FIQ_OFFSET ({ \
+ extern void *vector_fiq_offset; \
+ (unsigned)&vector_fiq_offset; \
+ })
+
static unsigned long no_fiq_insn;
/* Default reacquire function
@@ -79,14 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec)
void set_fiq_handler(void *start, unsigned int length)
{
-#if defined(CONFIG_CPU_USE_DOMAINS)
- memcpy((void *)0xffff001c, start, length);
-#else
- memcpy(vectors_page + 0x1c, start, length);
-#endif
- flush_icache_range(0xffff001c, 0xffff001c + length);
- if (!vectors_high())
- flush_icache_range(0x1c, 0x1c + length);
+ void *base = vectors_page;
+ unsigned offset = FIQ_OFFSET;
+
+ memcpy(base + offset, start, length);
+ if (!cache_is_vipt_nonaliasing())
+ flush_icache_range((unsigned long)base + offset, offset +
+ length);
+ flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length);
}
int claim_fiq(struct fiq_handler *f)
@@ -144,6 +149,7 @@ EXPORT_SYMBOL(disable_fiq);
void __init init_FIQ(int start)
{
- no_fiq_insn = *(unsigned long *)0xffff001c;
+ unsigned offset = FIQ_OFFSET;
+ no_fiq_insn = *(unsigned long *)(0xffff0000 + offset);
fiq_start = start;
}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index b361de14375..14235ba64a9 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -87,6 +87,7 @@ ENTRY(stext)
ENDPROC(stext)
#ifdef CONFIG_SMP
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 9cf6063020a..2c7cc1e0347 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -343,6 +343,7 @@ __turn_mmu_on_loc:
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
+ .text
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S
index 4910232c483..797b1a6a490 100644
--- a/arch/arm/kernel/hyp-stub.S
+++ b/arch/arm/kernel/hyp-stub.S
@@ -56,8 +56,8 @@ ENTRY(__boot_cpu_mode)
ldr \reg3, [\reg2]
ldr \reg1, [\reg2, \reg3]
cmp \mode, \reg1 @ matches primary CPU boot mode?
- orrne r7, r7, #BOOT_CPU_MODE_MISMATCH
- strne r7, [r5, r6] @ record what happened and give up
+ orrne \reg1, \reg1, #BOOT_CPU_MODE_MISMATCH
+ strne \reg1, [\reg2, \reg3] @ record what happened and give up
.endm
#else /* ZIMAGE */
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 4fb074c446b..57221e349a7 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -15,6 +15,7 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
+#include <asm/smp_plat.h>
#include <asm/system_misc.h>
extern const unsigned char relocate_new_kernel[];
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image)
int i, err;
/*
+ * Validate that if the current HW supports SMP, then the SW supports
+ * and implements CPU hotplug for the current HW. If not, we won't be
+ * able to kexec reliably, so fail the prepare operation.
+ */
+ if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug())
+ return -EINVAL;
+
+ /*
* No segment at default ATAGs address. try to locate
* a dtb using magic.
*/
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused)
crash_save_cpu(&regs, smp_processor_id());
flush_cache_all();
+ set_cpu_online(smp_processor_id(), false);
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image)
unsigned long reboot_code_buffer_phys;
void *reboot_code_buffer;
- if (num_online_cpus() > 1) {
- pr_err("kexec: error: multiple CPUs still online\n");
- return;
- }
+ /*
+ * This can only happen if machine_shutdown() failed to disable some
+ * CPU, and that can only happen if the checks in
+ * machine_kexec_prepare() were not correct. If this fails, we can't
+ * reliably kexec anyway, so BUG_ON is appropriate.
+ */
+ BUG_ON(num_online_cpus() > 1);
page_list = image->head & PAGE_MASK;
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 85c3fb6c93c..084dc889698 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -292,12 +292,20 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
maps[ARM_SEC_CORE].unw_sec = s;
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
maps[ARM_SEC_EXIT].unw_sec = s;
+ else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
+ maps[ARM_SEC_UNLIKELY].unw_sec = s;
+ else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
+ maps[ARM_SEC_HOT].unw_sec = s;
else if (strcmp(".init.text", secname) == 0)
maps[ARM_SEC_INIT].txt_sec = s;
else if (strcmp(".text", secname) == 0)
maps[ARM_SEC_CORE].txt_sec = s;
else if (strcmp(".exit.text", secname) == 0)
maps[ARM_SEC_EXIT].txt_sec = s;
+ else if (strcmp(".text.unlikely", secname) == 0)
+ maps[ARM_SEC_UNLIKELY].txt_sec = s;
+ else if (strcmp(".text.hot", secname) == 0)
+ maps[ARM_SEC_HOT].txt_sec = s;
}
for (i = 0; i < ARM_SEC_MAX; i++)
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d9f5cd4e533..e186ee1e63f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index aebe0e99c15..8d6147b2001 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -118,7 +118,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
continue;
}
- err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
+ err = request_irq(irq, handler,
+ IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
cpu_pmu);
if (err) {
pr_err("unable to request IRQ%d for ARM PMU counters\n",
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d3ca4f6915a..94f6b05f9e2 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -197,6 +197,7 @@ void machine_shutdown(void)
*/
void machine_halt(void)
{
+ local_irq_disable();
smp_send_stop();
local_irq_disable();
@@ -211,6 +212,7 @@ void machine_halt(void)
*/
void machine_power_off(void)
{
+ local_irq_disable();
smp_send_stop();
if (pm_power_off)
@@ -230,6 +232,7 @@ void machine_power_off(void)
*/
void machine_restart(char *cmd)
{
+ local_irq_disable();
smp_send_stop();
arm_pm_restart(reboot_mode, cmd);
@@ -426,10 +429,11 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
}
#ifdef CONFIG_MMU
+#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
- * atomic helpers and the signal restart code. Insert it into the
- * gate_vma so that it is visible through ptrace and /proc/<pid>/mem.
+ * atomic helpers. Insert it into the gate_vma so that it is visible
+ * through ptrace and /proc/<pid>/mem.
*/
static struct vm_area_struct gate_vma = {
.vm_start = 0xffff0000,
@@ -458,9 +462,48 @@ int in_gate_area_no_mm(unsigned long addr)
{
return in_gate_area(NULL, addr);
}
+#define is_gate_vma(vma) ((vma) == &gate_vma)
+#else
+#define is_gate_vma(vma) 0
+#endif
const char *arch_vma_name(struct vm_area_struct *vma)
{
- return (vma == &gate_vma) ? "[vectors]" : NULL;
+ return is_gate_vma(vma) ? "[vectors]" :
+ (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
+ "[sigpage]" : NULL;
+}
+
+static struct page *signal_page;
+extern struct page *get_signal_page(void);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long addr;
+ int ret;
+
+ if (!signal_page)
+ signal_page = get_signal_page();
+ if (!signal_page)
+ return -ENOMEM;
+
+ down_write(&mm->mmap_sem);
+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
+ if (IS_ERR_VALUE(addr)) {
+ ret = addr;
+ goto up_fail;
+ }
+
+ ret = install_special_mapping(mm, addr, PAGE_SIZE,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &signal_page);
+
+ if (ret == 0)
+ mm->context.sigpage = addr;
+
+ up_fail:
+ up_write(&mm->mmap_sem);
+ return ret;
}
#endif
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 63af9a7ae51..0e1e2b3afa4 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -72,10 +72,10 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup);
#endif
-extern void paging_init(struct machine_desc *desc);
+extern void paging_init(const struct machine_desc *desc);
extern void sanity_check_meminfo(void);
extern enum reboot_mode reboot_mode;
-extern void setup_dma_zone(struct machine_desc *desc);
+extern void setup_dma_zone(const struct machine_desc *desc);
unsigned int processor_id;
EXPORT_SYMBOL(processor_id);
@@ -139,7 +139,7 @@ EXPORT_SYMBOL(elf_platform);
static const char *cpu_name;
static const char *machine_name;
static char __initdata cmd_line[COMMAND_LINE_SIZE];
-struct machine_desc *machine_desc __initdata;
+const struct machine_desc *machine_desc __initdata;
static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
#define ENDIANNESS ((char)endian_test.l)
@@ -607,7 +607,7 @@ static void __init setup_processor(void)
void __init dump_machine_table(void)
{
- struct machine_desc *p;
+ const struct machine_desc *p;
early_print("Available machine support:\n\nID (hex)\tNAME\n");
for_each_machine_desc(p)
@@ -694,7 +694,7 @@ static int __init early_mem(char *p)
}
early_param("mem", early_mem);
-static void __init request_standard_resources(struct machine_desc *mdesc)
+static void __init request_standard_resources(const struct machine_desc *mdesc)
{
struct memblock_region *region;
struct resource *res;
@@ -836,6 +836,8 @@ static int __init meminfo_cmp(const void *_a, const void *_b)
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
+ sync_boot_mode();
+
if (is_hyp_mode_available()) {
pr_info("CPU: All CPU(s) started in HYP mode.\n");
pr_info("CPU: Virtualization extensions available.\n");
@@ -850,7 +852,7 @@ void __init hyp_mode_check(void)
void __init setup_arch(char **cmdline_p)
{
- struct machine_desc *mdesc;
+ const struct machine_desc *mdesc;
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
@@ -971,6 +973,7 @@ static const char *hwcap_str[] = {
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
"lpae",
NULL
};
@@ -991,15 +994,6 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
-#if defined(CONFIG_SMP)
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
- per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
- (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
-#else
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
- loops_per_jiffy / (500000/HZ),
- (loops_per_jiffy / (5000/HZ)) % 100);
-#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 1c16c35c271..ab330422527 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
#include <linux/errno.h>
+#include <linux/random.h>
#include <linux/signal.h>
#include <linux/personality.h>
#include <linux/uaccess.h>
@@ -15,12 +16,11 @@
#include <asm/elf.h>
#include <asm/cacheflush.h>
+#include <asm/traps.h>
#include <asm/ucontext.h>
#include <asm/unistd.h>
#include <asm/vfp.h>
-#include "signal.h"
-
/*
* For ARM syscalls, we encode the syscall number into the instruction.
*/
@@ -40,11 +40,13 @@
#define SWI_THUMB_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
#define SWI_THUMB_RT_SIGRETURN (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
-const unsigned long sigreturn_codes[7] = {
+static const unsigned long sigreturn_codes[7] = {
MOV_R7_NR_SIGRETURN, SWI_SYS_SIGRETURN, SWI_THUMB_SIGRETURN,
MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
};
+static unsigned long signal_return_offset;
+
#ifdef CONFIG_CRUNCH
static int preserve_crunch_context(struct crunch_sigframe __user *frame)
{
@@ -400,14 +402,20 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
__put_user(sigreturn_codes[idx+1], rc+1))
return 1;
- if ((cpsr & MODE32_BIT) && !IS_ENABLED(CONFIG_ARM_MPU)) {
+#ifdef CONFIG_MMU
+ if (cpsr & MODE32_BIT) {
+ struct mm_struct *mm = current->mm;
+
/*
- * 32-bit code can use the new high-page
- * signal return code support except when the MPU has
- * protected the vectors page from PL0
+ * 32-bit code can use the signal return page
+ * except when the MPU has protected the vectors
+ * page from PL0
*/
- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
- } else {
+ retcode = mm->context.sigpage + signal_return_offset +
+ (idx << 2) + thumb;
+ } else
+#endif
+ {
/*
* Ensure that the instruction cache sees
* the return code written onto the stack.
@@ -608,3 +616,33 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} while (thread_flags & _TIF_WORK_MASK);
return 0;
}
+
+struct page *get_signal_page(void)
+{
+ unsigned long ptr;
+ unsigned offset;
+ struct page *page;
+ void *addr;
+
+ page = alloc_pages(GFP_KERNEL, 0);
+
+ if (!page)
+ return NULL;
+
+ addr = page_address(page);
+
+ /* Give the signal return code some randomness */
+ offset = 0x200 + (get_random_int() & 0x7fc);
+ signal_return_offset = offset;
+
+ /*
+ * Copy signal return handlers into the vector page, and
+ * set sigreturn to be a pointer to these.
+ */
+ memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
+
+ ptr = (unsigned long)addr + offset;
+ flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
+
+ return page;
+}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9..92d10e50374 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
return -ENOSYS;
}
+int platform_can_cpu_hotplug(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ if (smp_ops.cpu_kill)
+ return 1;
+#endif
+
+ return 0;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static void percpu_timer_stop(void);
@@ -388,17 +398,8 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
- int cpu;
- unsigned long bogosum = 0;
-
- for_each_online_cpu(cpu)
- bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
-
- printk(KERN_INFO "SMP: Total of %d processors activated "
- "(%lu.%02lu BogoMIPS).\n",
- num_online_cpus(),
- bogosum / (500000/HZ),
- (bogosum / (5000/HZ)) % 100);
+ printk(KERN_INFO "SMP: Total of %d processors activated.\n",
+ num_online_cpus());
hyp_mode_check();
}
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index a98b62dca2f..83ccca303df 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,23 +70,6 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-#else
-static int erratum_a15_798181(void)
-{
- return 0;
-}
-#endif
-
static void ipi_flush_tlb_a15_erratum(void *arg)
{
dmb();
@@ -121,7 +104,7 @@ void flush_tlb_all(void)
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
- local_flush_tlb_all();
+ __flush_tlb_all();
broadcast_tlb_a15_erratum();
}
@@ -130,7 +113,7 @@ void flush_tlb_mm(struct mm_struct *mm)
if (tlb_ops_need_broadcast())
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
- local_flush_tlb_mm(mm);
+ __flush_tlb_mm(mm);
broadcast_tlb_mm_a15_erratum(mm);
}
@@ -143,7 +126,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
&ta, 1);
} else
- local_flush_tlb_page(vma, uaddr);
+ __flush_tlb_page(vma, uaddr);
broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
@@ -154,7 +137,7 @@ void flush_tlb_kernel_page(unsigned long kaddr)
ta.ta_start = kaddr;
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
- local_flush_tlb_kernel_page(kaddr);
+ __flush_tlb_kernel_page(kaddr);
broadcast_tlb_a15_erratum();
}
@@ -190,5 +173,5 @@ void flush_bp_all(void)
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_bp_all, NULL, 1);
else
- local_flush_bp_all();
+ __flush_bp_all();
}
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index c5a59546a25..85a87370f14 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = {
{NULL, },
};
-struct cpu_capacity {
- unsigned long hwid;
- unsigned long capacity;
-};
-
-struct cpu_capacity *cpu_capacity;
+unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
unsigned long middle_capacity = 1;
@@ -100,15 +96,19 @@ static void __init parse_dt_topology(void)
unsigned long capacity = 0;
int alloc_size, cpu = 0;
- alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity);
- cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
+ alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity);
+ __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT);
- while ((cn = of_find_node_by_type(cn, "cpu"))) {
- const u32 *rate, *reg;
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
int len;
- if (cpu >= num_possible_cpus())
- break;
+ /* too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("missing device node for CPU %d\n", cpu);
+ continue;
+ }
for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
if (of_device_is_compatible(cn, cpu_eff->compatible))
@@ -124,12 +124,6 @@ static void __init parse_dt_topology(void)
continue;
}
- reg = of_get_property(cn, "reg", &len);
- if (!reg || len != 4) {
- pr_err("%s missing reg property\n", cn->full_name);
- continue;
- }
-
capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
/* Save min capacity of the system */
@@ -140,13 +134,9 @@ static void __init parse_dt_topology(void)
if (capacity > max_capacity)
max_capacity = capacity;
- cpu_capacity[cpu].capacity = capacity;
- cpu_capacity[cpu++].hwid = be32_to_cpup(reg);
+ cpu_capacity(cpu) = capacity;
}
- if (cpu < num_possible_cpus())
- cpu_capacity[cpu].hwid = (unsigned long)(-1);
-
/* If min and max capacities are equals, we bypass the update of the
* cpu_scale because all CPUs have the same capacity. Otherwise, we
* compute a middle_capacity factor that will ensure that the capacity
@@ -154,9 +144,7 @@ static void __init parse_dt_topology(void)
* SCHED_POWER_SCALE, which is the default value, but with the
* constraint explained near table_efficiency[].
*/
- if (min_capacity == max_capacity)
- cpu_capacity[0].hwid = (unsigned long)(-1);
- else if (4*max_capacity < (3*(max_capacity + min_capacity)))
+ if (4*max_capacity < (3*(max_capacity + min_capacity)))
middle_capacity = (min_capacity + max_capacity)
>> (SCHED_POWER_SHIFT+1);
else
@@ -170,23 +158,12 @@ static void __init parse_dt_topology(void)
* boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
* function returns directly for SMP system.
*/
-void update_cpu_power(unsigned int cpu, unsigned long hwid)
+void update_cpu_power(unsigned int cpu)
{
- unsigned int idx = 0;
-
- /* look for the cpu's hwid in the cpu capacity table */
- for (idx = 0; idx < num_possible_cpus(); idx++) {
- if (cpu_capacity[idx].hwid == hwid)
- break;
-
- if (cpu_capacity[idx].hwid == -1)
- return;
- }
-
- if (idx == num_possible_cpus())
+ if (!cpu_capacity(cpu))
return;
- set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity);
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
printk(KERN_INFO "CPU%u: update cpu_power %lu\n",
cpu, arch_scale_freq_power(NULL, cpu));
@@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid)
#else
static inline void parse_dt_topology(void) {}
-static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {}
+static inline void update_cpu_power(unsigned int cpuid) {}
#endif
/*
@@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid)
update_siblings_masks(cpuid);
- update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK);
+ update_cpu_power(cpuid);
printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
cpuid, cpu_topology[cpuid].thread_id,
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cab094c234e..8fcda140358 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -35,8 +35,6 @@
#include <asm/tls.h>
#include <asm/system_misc.h>
-#include "signal.h"
-
static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
void *vectors_page;
@@ -499,28 +497,64 @@ static int bad_syscall(int n, struct pt_regs *regs)
return regs->ARM_r0;
}
+static long do_cache_op_restart(struct restart_block *);
+
static inline int
-do_cache_op(unsigned long start, unsigned long end, int flags)
+__do_cache_op(unsigned long start, unsigned long end)
{
- struct mm_struct *mm = current->active_mm;
- struct vm_area_struct *vma;
+ int ret;
+ unsigned long chunk = PAGE_SIZE;
+
+ do {
+ if (signal_pending(current)) {
+ struct thread_info *ti = current_thread_info();
+
+ ti->restart_block = (struct restart_block) {
+ .fn = do_cache_op_restart,
+ };
+
+ ti->arm_restart_block = (struct arm_restart_block) {
+ {
+ .cache = {
+ .start = start,
+ .end = end,
+ },
+ },
+ };
+
+ return -ERESTART_RESTARTBLOCK;
+ }
+
+ ret = flush_cache_user_range(start, start + chunk);
+ if (ret)
+ return ret;
+
+ cond_resched();
+ start += chunk;
+ } while (start < end);
+
+ return 0;
+}
+static long do_cache_op_restart(struct restart_block *unused)
+{
+ struct arm_restart_block *restart_block;
+
+ restart_block = &current_thread_info()->arm_restart_block;
+ return __do_cache_op(restart_block->cache.start,
+ restart_block->cache.end);
+}
+
+static inline int
+do_cache_op(unsigned long start, unsigned long end, int flags)
+{
if (end < start || flags)
return -EINVAL;
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, start);
- if (vma && vma->vm_start < end) {
- if (start < vma->vm_start)
- start = vma->vm_start;
- if (end > vma->vm_end)
- end = vma->vm_end;
+ if (!access_ok(VERIFY_READ, start, end - start))
+ return -EFAULT;
- up_read(&mm->mmap_sem);
- return flush_cache_user_range(start, end);
- }
- up_read(&mm->mmap_sem);
- return -EINVAL;
+ return __do_cache_op(start, end);
}
/*
@@ -800,15 +834,26 @@ void __init trap_init(void)
return;
}
-static void __init kuser_get_tls_init(unsigned long vectors)
+#ifdef CONFIG_KUSER_HELPERS
+static void __init kuser_init(void *vectors)
{
+ extern char __kuser_helper_start[], __kuser_helper_end[];
+ int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+
+ memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+
/*
* vectors + 0xfe0 = __kuser_get_tls
* vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
*/
if (tls_emu || has_tls_reg)
- memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4);
+ memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
}
+#else
+static void __init kuser_init(void *vectors)
+{
+}
+#endif
void __init early_trap_init(void *vectors_base)
{
@@ -816,33 +861,30 @@ void __init early_trap_init(void *vectors_base)
unsigned long vectors = (unsigned long)vectors_base;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
- extern char __kuser_helper_start[], __kuser_helper_end[];
- int kuser_sz = __kuser_helper_end - __kuser_helper_start;
+ unsigned i;
vectors_page = vectors_base;
/*
+ * Poison the vectors page with an undefined instruction. This
+ * instruction is chosen to be undefined for both ARM and Thumb
+ * ISAs. The Thumb version is an undefined instruction with a
+ * branch back to the undefined instruction.
+ */
+ for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
+ ((u32 *)vectors_base)[i] = 0xe7fddef1;
+
+ /*
* Copy the vectors, stubs and kuser helpers (in entry-armv.S)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
- memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
- memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
+ memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
- /*
- * Do processor specific fixups for the kuser helpers
- */
- kuser_get_tls_init(vectors);
-
- /*
- * Copy signal return handlers into the vector page, and
- * set sigreturn to be a pointer to these.
- */
- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
- sigreturn_codes, sizeof(sigreturn_codes));
+ kuser_init(vectors_base);
- flush_icache_range(vectors, vectors + PAGE_SIZE);
+ flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
#else /* ifndef CONFIG_CPU_V7M */
/*
diff --git a/arch/arm/kernel/v7m.c b/arch/arm/kernel/v7m.c
new file mode 100644
index 00000000000..4d2cba94f5c
--- /dev/null
+++ b/arch/arm/kernel/v7m.c
@@ -0,0 +1,19 @@
+/*
+ * Copyright (C) 2013 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <asm/barrier.h>
+#include <asm/v7m.h>
+
+void armv7m_restart(enum reboot_mode mode, const char *cmd)
+{
+ dsb();
+ __raw_writel(V7M_SCB_AIRCR_VECTKEY | V7M_SCB_AIRCR_SYSRESETREQ,
+ BASEADDR_V7M_SCB + V7M_SCB_AIRCR);
+ dsb();
+}
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index fa25e4e425f..7bcee5c9b60 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -148,6 +148,23 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
#endif
+ /*
+ * The vectors and stubs are relocatable code, and the
+ * only thing that matters is their relative offsets
+ */
+ __vectors_start = .;
+ .vectors 0 : AT(__vectors_start) {
+ *(.vectors)
+ }
+ . = __vectors_start + SIZEOF(.vectors);
+ __vectors_end = .;
+
+ __stubs_start = .;
+ .stubs 0x1000 : AT(__stubs_start) {
+ *(.stubs)
+ }
+ . = __stubs_start + SIZEOF(.stubs);
+ __stubs_end = .;
INIT_TEXT_SECTION(8)
.exit.text : {
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 741f66a2edb..9c697db2787 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -219,6 +219,10 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 4a519907043..db9cf692d4d 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu,
#define access_pmintenclr pm_fake
/* Architected CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg cp15_regs[] = {
/* CSSELR: swapped by interrupt.S. */
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c0_CSSELR },
/* TTBR0/TTBR1: swapped by interrupt.S. */
- { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
- { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
+ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 },
+ { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 },
/* TTBCR: swapped by interrupt.S. */
{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32,
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = {
NULL, reset_unknown, c6_IFAR },
/* PAR swapped by interrupt.S */
- { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
+ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR },
/*
* DC{C,I,CI}SW operations:
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params)
| KVM_REG_ARM_OPC1_MASK))
return false;
params->is_64bit = true;
- params->CRm = ((id & KVM_REG_ARM_CRM_MASK)
+ /* CRm to CRn: see cp15_to_index for details */
+ params->CRn = ((id & KVM_REG_ARM_CRM_MASK)
>> KVM_REG_ARM_CRM_SHIFT);
params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK)
>> KVM_REG_ARM_OPC1_SHIFT);
params->Op2 = 0;
- params->CRn = 0;
+ params->CRm = 0;
return true;
default:
return false;
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg)
if (reg->is_64) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
- val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT);
+ /*
+ * CRn always denotes the primary coproc. reg. nr. for the
+ * in-kernel representation, but the user space API uses the
+ * CRm for the encoding, because it is modelled after the
+ * MRRC/MCRR instructions: see the ARM ARM rev. c page
+ * B3-1445
+ */
+ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT);
} else {
val |= KVM_REG_SIZE_U32;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index b7301d3e479..0461d5c8d3d 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
return -1;
if (i1->CRn != i2->CRn)
return i1->CRn - i2->CRn;
+ if (i1->is_64 != i2->is_64)
+ return i2->is_64 - i1->is_64;
if (i1->CRm != i2->CRm)
return i1->CRm - i2->CRm;
if (i1->Op1 != i2->Op1)
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define CRn(_x) .CRn = _x
#define CRm(_x) .CRm = _x
+#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
#define is64 .is_64 = true
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index 685063a6d0c..cf93472b9dd 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
/*
* A15-specific CP15 registers.
- * Important: Must be sorted ascending by CRn, CRM, Op1, Op2
+ * CRn denotes the primary register number, but is copied to the CRm in the
+ * user space API for 64-bit register access in line with the terminology used
+ * in the ARM ARM.
+ * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
+ * registers preceding 32-bit ones.
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index f048338135f..1b9844d369c 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -142,7 +142,7 @@ target: @ We're now in the trampoline code, switch page tables
@ Invalidate the old TLBs
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
- dsb
+ dsb ish
eret
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 16cd4ba5d7f..ddc15539bad 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -55,7 +55,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
mcrr p15, 6, r2, r3, c2 @ Write VTTBR
isb
mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
- dsb
+ dsb ish
isb
mov r2, #0
mov r3, #0
@@ -79,7 +79,7 @@ ENTRY(__kvm_flush_vm_context)
mcr p15, 4, r0, c8, c3, 4
/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
mcr p15, 0, r0, c7, c1, 0
- dsb
+ dsb ish
isb @ Not necessary if followed by eret
bx lr
@@ -492,10 +492,10 @@ __kvm_hyp_code_end:
.section ".rodata"
und_die_str:
- .ascii "unexpected undefined exception in Hyp mode at: %#08x"
+ .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
pabt_die_str:
- .ascii "unexpected prefetch abort in Hyp mode at: %#08x"
+ .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
dabt_die_str:
- .ascii "unexpected data abort in Hyp mode at: %#08x"
+ .ascii "unexpected data abort in Hyp mode at: %#08x\n"
svc_die_str:
- .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x"
+ .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index b8e06b7a283..0c25d9487d5 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
- unsigned long rt, len;
+ unsigned long rt;
+ int len;
bool is_write, sign_extend;
if (kvm_vcpu_dabt_isextabt(vcpu)) {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index ca6bea4859b..b0de86b56c1 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
return p;
}
+static bool page_empty(void *ptr)
+{
+ struct page *ptr_page = virt_to_page(ptr);
+ return page_count(ptr_page) == 1;
+}
+
static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
{
pmd_t *pmd_table = pmd_offset(pud, 0);
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
-static bool pmd_empty(pmd_t *pmd)
-{
- struct page *pmd_page = virt_to_page(pmd);
- return page_count(pmd_page) == 1;
-}
-
static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
{
if (pte_present(*pte)) {
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr)
}
}
-static bool pte_empty(pte_t *pte)
-{
- struct page *pte_page = virt_to_page(pte);
- return page_count(pte_page) == 1;
-}
-
static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
unsigned long long start, u64 size)
{
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
pmd_t *pmd;
pte_t *pte;
unsigned long long addr = start, end = start + size;
- u64 range;
+ u64 next;
while (addr < end) {
pgd = pgdp + pgd_index(addr);
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
- addr += PUD_SIZE;
+ addr = pud_addr_end(addr, end);
continue;
}
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd)) {
- addr += PMD_SIZE;
+ addr = pmd_addr_end(addr, end);
continue;
}
pte = pte_offset_kernel(pmd, addr);
clear_pte_entry(kvm, pte, addr);
- range = PAGE_SIZE;
+ next = addr + PAGE_SIZE;
/* If we emptied the pte, walk back up the ladder */
- if (pte_empty(pte)) {
+ if (page_empty(pte)) {
clear_pmd_entry(kvm, pmd, addr);
- range = PMD_SIZE;
- if (pmd_empty(pmd)) {
+ next = pmd_addr_end(addr, end);
+ if (page_empty(pmd) && !page_empty(pud)) {
clear_pud_entry(kvm, pud, addr);
- range = PUD_SIZE;
+ next = pud_addr_end(addr, end);
}
}
- addr += range;
+ addr = next;
}
}
@@ -495,7 +489,6 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
- kvm_set_s2pte_writable(&pte);
ret = mmu_topup_memory_cache(&cache, 2, 2);
if (ret)
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index b7840e7aa45..71e08baee20 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -40,7 +40,7 @@ static struct kvm_regs a15_regs_reset = {
};
static const struct kvm_irq_level a15_vtimer_irq = {
- .irq = 27,
+ { .irq = 27 },
.level = 1,
};
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index a8e73ed5ad5..b1d640f7862 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -59,10 +59,9 @@ TRACE_EVENT(kvm_guest_fault,
__entry->ipa = ipa;
),
- TP_printk("guest fault at PC %#08lx (hxfar %#08lx, "
- "ipa %#16llx, hsr %#08lx",
- __entry->vcpu_pc, __entry->hxfar,
- __entry->ipa, __entry->hsr)
+ TP_printk("ipa %#llx, hsr %#08lx, hxfar %#08lx, pc %#08lx",
+ __entry->ipa, __entry->hsr,
+ __entry->hxfar, __entry->vcpu_pc)
);
TRACE_EVENT(kvm_irq_line,
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index af72969820b..aaf3a873113 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -45,3 +45,9 @@ lib-$(CONFIG_ARCH_SHARK) += io-shark.o
$(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
$(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
+
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+ NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
+ CFLAGS_xor-neon.o += $(NEON_FLAGS)
+ lib-$(CONFIG_XOR_BLOCKS) += xor-neon.o
+endif
diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
new file mode 100644
index 00000000000..f485e5a2af4
--- /dev/null
+++ b/arch/arm/lib/xor-neon.c
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/arm/lib/xor-neon.c
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/xor.h>
+
+#ifndef __ARM_NEON__
+#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
+#endif
+
+/*
+ * Pull in the reference implementations while instructing GCC (through
+ * -ftree-vectorize) to attempt to exploit implicit parallelism and emit
+ * NEON instructions.
+ */
+#if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
+#pragma GCC optimize "tree-vectorize"
+#else
+/*
+ * While older versions of GCC do not generate incorrect code, they fail to
+ * recognize the parallel nature of these functions, and emit plain ARM code,
+ * which is known to be slower than the optimized ARM code in asm-arm/xor.h.
+ */
+#warning This code requires at least version 4.6 of GCC
+#endif
+
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#include <asm-generic/xor.h>
+
+struct xor_block_template const xor_block_neon_inner = {
+ .name = "__inner_neon__",
+ .do_2 = xor_8regs_2,
+ .do_3 = xor_8regs_3,
+ .do_4 = xor_8regs_4,
+ .do_5 = xor_8regs_5,
+};
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c
index 2abee6626aa..916e5a14291 100644
--- a/arch/arm/mach-at91/at91sam9x5.c
+++ b/arch/arm/mach-at91/at91sam9x5.c
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = {
CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk),
CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk),
CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk),
+ CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk),
CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk),
diff --git a/arch/arm/mach-at91/include/mach/at91_adc.h b/arch/arm/mach-at91/include/mach/at91_adc.h
index 8e7ed5c9081..048a57f76bd 100644
--- a/arch/arm/mach-at91/include/mach/at91_adc.h
+++ b/arch/arm/mach-at91/include/mach/at91_adc.h
@@ -28,9 +28,12 @@
#define AT91_ADC_TRGSEL_EXTERNAL (6 << 1)
#define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */
#define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */
-#define AT91_ADC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */
+#define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */
+#define AT91_ADC_PRESCAL_9G45 (0xff << 8)
#define AT91_ADC_PRESCAL_(x) ((x) << 8)
-#define AT91_ADC_STARTUP (0x1f << 16) /* Startup Up Time */
+#define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */
+#define AT91_ADC_STARTUP_9G45 (0x7f << 16)
+#define AT91_ADC_STARTUP_9X5 (0xf << 16)
#define AT91_ADC_STARTUP_(x) ((x) << 16)
#define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */
#define AT91_ADC_SHTIM_(x) ((x) << 24)
@@ -48,6 +51,9 @@
#define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */
#define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */
+#define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */
+#define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */
+
#define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */
#define AT91_ADC_LDATA (0x3ff)
@@ -58,4 +64,10 @@
#define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */
#define AT91_ADC_DATA (0x3ff)
+#define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */
+
+#define AT91_ADC_TRGR_9260 AT91_ADC_MR
+#define AT91_ADC_TRGR_9G45 0x08
+#define AT91_ADC_TRGR_9X5 0xC0
+
#endif
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index bea6793a7ed..9f09f45835f 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1249,12 +1249,10 @@ static struct vpif_capture_config da850_vpif_capture_config = {
static struct adv7343_platform_data adv7343_pdata = {
.mode_config = {
- .dac_3 = 1,
- .dac_2 = 1,
- .dac_1 = 1,
+ .dac = { 1, 1, 1 },
},
.sd_config = {
- .sd_dac_out1 = 1,
+ .sd_dac_out = { 1 },
},
};
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index dff4ddc5ef8..139e42da25f 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
+ .ecc_bits = 4,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index afbc439f11d..4cdb61c5445 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -505,7 +505,7 @@ static struct vpbe_output dm365evm_vpbe_outputs[] = {
/*
* Amplifiers on the board
*/
-struct ths7303_platform_data ths7303_pdata = {
+static struct ths7303_platform_data ths7303_pdata = {
.ch_1 = 3,
.ch_2 = 3,
.ch_3 = 3,
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index a33686a6fbb..fa4bfaf952d 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = {
.parts = davinci_evm_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
.timing = &davinci_evm_nandflash_timing,
};
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index fbb8e5ab1dc..0c005e876ca 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = {
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.options = 0,
};
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index 2bc112adf56..808233b60e3 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = {
.parts = davinci_ntosd2_nandflash_partition,
.nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition),
.ecc_mode = NAND_ECC_HW,
+ .ecc_bits = 1,
.bbt_options = NAND_BBT_USE_FLASH,
};
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c
index 36aef3a7ded..f1ac1c94ac0 100644
--- a/arch/arm/mach-davinci/cpuidle.c
+++ b/arch/arm/mach-davinci/cpuidle.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver davinci_idle_driver = {
.states[1] = {
.enter = davinci_enter_idle,
.exit_latency = 10,
- .target_residency = 100000,
+ .target_residency = 10000,
.flags = CPUIDLE_FLAG_TIME_VALID,
.name = "DDR SR",
.desc = "WFI and DDR Self Refresh",
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 42ef53f62c6..86100d17969 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -860,7 +860,7 @@ static struct platform_device dm355_vpbe_display = {
},
};
-struct venc_platform_data dm355_venc_pdata = {
+static struct venc_platform_data dm355_venc_pdata = {
.setup_pinmux = dm355_vpbe_setup_pinmux,
.setup_clock = dm355_venc_setup_clock,
};
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index fa7af5eda52..dad28029ba9 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -1349,7 +1349,7 @@ static struct platform_device dm365_vpbe_display = {
},
};
-struct venc_platform_data dm365_venc_pdata = {
+static struct venc_platform_data dm365_venc_pdata = {
.setup_pinmux = dm365_vpbe_setup_pinmux,
.setup_clock = dm365_venc_setup_clock,
};
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
deleted file mode 100644
index b18b8ebc650..00000000000
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Debugging macro for DaVinci
- *
- * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
- *
- * 2007 (c) MontaVista Software, Inc. This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- */
-
-/* Modifications
- * Jan 2009 Chaithrika U S Added senduart, busyuart, waituart
- * macros, based on debug-8250.S file
- * but using 32-bit accesses required for
- * some davinci devices.
- */
-
-#include <linux/serial_reg.h>
-
-#include <mach/serial.h>
-
-#define UART_SHIFT 2
-
-#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
-#define UART_BASE DAVINCI_UART0_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
-#define UART_BASE DA8XX_UART1_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
-#define UART_BASE DA8XX_UART2_BASE
-#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
-#define UART_BASE TNETV107X_UART2_BASE
-#define UART_VIRTBASE TNETV107X_UART2_VIRT
-#else
-#error "Select a specifc port for DEBUG_LL"
-#endif
-
-#ifndef UART_VIRTBASE
-#define UART_VIRTBASE IO_ADDRESS(UART_BASE)
-#endif
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =UART_BASE
- ldr \rv, =UART_VIRTBASE
- .endm
-
- .macro senduart,rd,rx
- str \rd, [\rx, #UART_TX << UART_SHIFT]
- .endm
-
- .macro busyuart,rd,rx
-1002: ldr \rd, [\rx, #UART_LSR << UART_SHIFT]
- and \rd, \rd, #UART_LSR_TEMT | UART_LSR_THRE
- teq \rd, #UART_LSR_TEMT | UART_LSR_THRE
- bne 1002b
- .endm
-
- .macro waituart,rd,rx
-#ifdef FLOW_CONTROL
-1001: ldr \rd, [\rx, #UART_MSR << UART_SHIFT]
- tst \rd, #UART_MSR_CTS
- beq 1001b
-#endif
- .endm
-
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 00247c77131..304f069ebf5 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -108,8 +108,8 @@ static void __init dove_clk_init(void)
orion_clkdev_add(NULL, "sdhci-dove.1", sdio1);
orion_clkdev_add(NULL, "orion_nand", nand);
orion_clkdev_add(NULL, "cafe1000-ccic.0", camera);
- orion_clkdev_add(NULL, "kirkwood-i2s.0", i2s0);
- orion_clkdev_add(NULL, "kirkwood-i2s.1", i2s1);
+ orion_clkdev_add(NULL, "mvebu-audio.0", i2s0);
+ orion_clkdev_add(NULL, "mvebu-audio.1", i2s1);
orion_clkdev_add(NULL, "mv_crypto", crypto);
orion_clkdev_add(NULL, "dove-ac97", ac97);
orion_clkdev_add(NULL, "dove-pdma", pdma);
diff --git a/arch/arm/mach-dove/include/mach/debug-macro.S b/arch/arm/mach-dove/include/mach/debug-macro.S
deleted file mode 100644
index 5929cbc5916..00000000000
--- a/arch/arm/mach-dove/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-dove/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/bridge-regs.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =DOVE_SB_REGS_PHYS_BASE
- ldr \rv, =DOVE_SB_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ebsa110/include/mach/debug-macro.S b/arch/arm/mach-ebsa110/include/mach/debug-macro.S
deleted file mode 100644
index bb02c05e681..00000000000
--- a/arch/arm/mach-ebsa110/include/mach/debug-macro.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/* arch/arm/mach-ebsa110/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-**/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0xf0000000
- orr \rp, \rp, #0x00000be0
- mov \rp, \rv
- .endm
-
-#define UART_SHIFT 2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ep93xx/Kconfig b/arch/arm/mach-ep93xx/Kconfig
index fe3c1fa5462..93e54fd4e3d 100644
--- a/arch/arm/mach-ep93xx/Kconfig
+++ b/arch/arm/mach-ep93xx/Kconfig
@@ -194,20 +194,6 @@ config MACH_VISION_EP9307
Say 'Y' here if you want your kernel to support the
Vision Engraving Systems EP9307 SoM.
-choice
- prompt "Select a UART for early kernel messages"
-
-config EP93XX_EARLY_UART1
- bool "UART1"
-
-config EP93XX_EARLY_UART2
- bool "UART2"
-
-config EP93XX_EARLY_UART3
- bool "UART3"
-
-endchoice
-
endmenu
endif
diff --git a/arch/arm/mach-ep93xx/include/mach/debug-macro.S b/arch/arm/mach-ep93xx/include/mach/debug-macro.S
deleted file mode 100644
index af54e43132c..00000000000
--- a/arch/arm/mach-ep93xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/debug-macro.S
- * Debugging macro include header
- *
- * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-#include <mach/ep93xx-regs.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =EP93XX_APB_PHYS_BASE @ Physical base
- ldr \rv, =EP93XX_APB_VIRT_BASE @ virtual base
- orr \rp, \rp, #0x000c0000
- orr \rv, \rv, #0x000c0000
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-ep93xx/include/mach/uncompress.h b/arch/arm/mach-ep93xx/include/mach/uncompress.h
index b5cc77d2380..03c42e5400d 100644
--- a/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -31,18 +31,8 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
*((volatile unsigned int *)ptr) = value;
}
-#if defined(CONFIG_EP93XX_EARLY_UART1)
-#define UART_BASE EP93XX_UART1_PHYS_BASE
-#elif defined(CONFIG_EP93XX_EARLY_UART2)
-#define UART_BASE EP93XX_UART2_PHYS_BASE
-#elif defined(CONFIG_EP93XX_EARLY_UART3)
-#define UART_BASE EP93XX_UART3_PHYS_BASE
-#else
-#define UART_BASE EP93XX_UART1_PHYS_BASE
-#endif
-
-#define PHYS_UART_DATA (UART_BASE + 0x00)
-#define PHYS_UART_FLAG (UART_BASE + 0x18)
+#define PHYS_UART_DATA (CONFIG_DEBUG_UART_PHYS + 0x00)
+#define PHYS_UART_FLAG (CONFIG_DEBUG_UART_PHYS + 0x18)
#define UART_FLAG_TXFF 0x20
static inline void putc(int c)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 855d4a7b462..5952e68c76c 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -92,6 +92,7 @@ config SOC_EXYNOS5440
bool "SAMSUNG EXYNOS5440"
default y
depends on ARCH_EXYNOS5
+ select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
select ARCH_HAS_OPP
select HAVE_ARM_ARCH_TIMER
select AUTO_ZRELADDR
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index e970a7a4e27..53696154aea 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -14,7 +14,7 @@ obj- :=
obj-$(CONFIG_ARCH_EXYNOS) += common.o
-obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_S5P_PM) += pm.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
obj-$(CONFIG_CPU_IDLE) += cpuidle.o
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 164685bd25c..ba95e5db250 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -58,7 +58,6 @@ static const char name_exynos5440[] = "EXYNOS5440";
static void exynos4_map_io(void);
static void exynos5_map_io(void);
-static void exynos5440_map_io(void);
static int exynos_init(void);
static struct cpu_table cpu_ids[] __initdata = {
@@ -95,7 +94,6 @@ static struct cpu_table cpu_ids[] __initdata = {
}, {
.idcode = EXYNOS5440_SOC_ID,
.idmask = EXYNOS5_SOC_MASK,
- .map_io = exynos5440_map_io,
.init = exynos_init,
.name = name_exynos5440,
},
@@ -150,11 +148,6 @@ static struct map_desc exynos4_iodesc[] __initdata = {
.length = SZ_64K,
.type = MT_DEVICE,
}, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS4_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- }, {
.virtual = (unsigned long)S5P_VA_CMU,
.pfn = __phys_to_pfn(EXYNOS4_PA_CMU),
.length = SZ_128K,
@@ -268,20 +261,6 @@ static struct map_desc exynos5_iodesc[] __initdata = {
.pfn = __phys_to_pfn(EXYNOS5_PA_PMU),
.length = SZ_64K,
.type = MT_DEVICE,
- }, {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS5_PA_UART),
- .length = SZ_512K,
- .type = MT_DEVICE,
- },
-};
-
-static struct map_desc exynos5440_iodesc0[] __initdata = {
- {
- .virtual = (unsigned long)S3C_VA_UART,
- .pfn = __phys_to_pfn(EXYNOS5440_PA_UART0),
- .length = SZ_512K,
- .type = MT_DEVICE,
},
};
@@ -388,11 +367,6 @@ static void __init exynos5_map_io(void)
iotable_init(exynos5250_iodesc, ARRAY_SIZE(exynos5250_iodesc));
}
-static void __init exynos5440_map_io(void)
-{
- iotable_init(exynos5440_iodesc0, ARRAY_SIZE(exynos5440_iodesc0));
-}
-
void __init exynos_init_time(void)
{
of_clk_init(NULL);
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 3e156bcddcb..972490fc09d 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -97,6 +97,5 @@ struct exynos_pmu_conf {
};
extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
-extern void s3c_cpu_resume(void);
#endif /* __ARCH_ARM_MACH_EXYNOS_COMMON_H */
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 17a18ff3d71..225ee8431c7 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -25,6 +25,7 @@
#include <mach/regs-pmu.h>
#include <plat/cpu.h>
+#include <plat/pm.h>
#include "common.h"
diff --git a/arch/arm/mach-exynos/include/mach/memory.h b/arch/arm/mach-exynos/include/mach/memory.h
index 374ef2cf715..2a4cdb7cb32 100644
--- a/arch/arm/mach-exynos/include/mach/memory.h
+++ b/arch/arm/mach-exynos/include/mach/memory.h
@@ -15,8 +15,13 @@
#define PLAT_PHYS_OFFSET UL(0x40000000)
+#ifndef CONFIG_ARM_LPAE
/* Maximum of 256MiB in one bank */
#define MAX_PHYSMEM_BITS 32
#define SECTION_SIZE_BITS 28
+#else
+#define MAX_PHYSMEM_BITS 36
+#define SECTION_SIZE_BITS 31
+#endif
#endif /* __ASM_ARCH_MEMORY_H */
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 41c20692a13..c679db57726 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -217,6 +217,9 @@ static __init int exynos_pm_drvinit(void)
struct clk *pll_base;
unsigned int tmp;
+ if (soc_is_exynos5440())
+ return 0;
+
s3c_pm_init();
/* All wakeup disable */
@@ -340,6 +343,9 @@ static struct syscore_ops exynos_pm_syscore_ops = {
static __init int exynos_pm_syscore_init(void)
{
+ if (soc_is_exynos5440())
+ return 0;
+
register_syscore_ops(&exynos_pm_syscore_ops);
return 0;
}
diff --git a/arch/arm/mach-footbridge/dc21285.c b/arch/arm/mach-footbridge/dc21285.c
index a7cd2cf5e08..3490a24f969 100644
--- a/arch/arm/mach-footbridge/dc21285.c
+++ b/arch/arm/mach-footbridge/dc21285.c
@@ -276,8 +276,6 @@ int __init dc21285_setup(int nr, struct pci_sys_data *sys)
sys->mem_offset = DC21285_PCI_MEM;
- pci_ioremap_io(0, DC21285_PCI_IO);
-
pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset);
pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset);
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index c169f0c99b2..02247f313e9 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -13,20 +13,6 @@
#include <asm/hardware/dec21285.h>
-#ifndef CONFIG_DEBUG_DC21285_PORT
- /* For NetWinder debugging */
- .macro addruart, rp, rv, tmp
- mov \rp, #0x000003f8
- orr \rv, \rp, #0xfe000000 @ virtual
- orr \rv, \rv, #0x00e00000 @ virtual
- orr \rp, \rp, #0x7c000000 @ physical
- .endm
-
-#define UART_SHIFT 0
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
-
-#else
#include <mach/hardware.h>
/* For EBSA285 debugging */
.equ dc21285_high, ARMCSR_BASE & 0xff000000
@@ -54,4 +40,3 @@
.macro waituart,rd,rx
.endm
-#endif
diff --git a/arch/arm/mach-gemini/include/mach/debug-macro.S b/arch/arm/mach-gemini/include/mach/debug-macro.S
deleted file mode 100644
index 837670763b8..00000000000
--- a/arch/arm/mach-gemini/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Copyright (C) 2001-2006 Storlink, Corp.
- * Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <mach/hardware.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =GEMINI_UART_BASE @ physical
- ldr \rv, =IO_ADDRESS(GEMINI_UART_BASE) @ virtual
- .endm
-
-#define UART_SHIFT 2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index dc5d6becd8c..88815795fe2 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -115,6 +115,7 @@ static int highbank_platform_notifier(struct notifier_block *nb,
{
struct resource *res;
int reg = -1;
+ u32 val;
struct device *dev = __dev;
if (event != BUS_NOTIFY_ADD_DEVICE)
@@ -141,10 +142,10 @@ static int highbank_platform_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
if (of_property_read_bool(dev->of_node, "dma-coherent")) {
- writel(0xff31, sregs_base + reg);
+ val = readl(sregs_base + reg);
+ writel(val | 0xff01, sregs_base + reg);
set_dma_ops(dev, &arm_coherent_dma_ops);
- } else
- writel(0, sregs_base + reg);
+ }
return NOTIFY_OK;
}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 4282e99f5ca..86567d980b0 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -199,7 +199,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_post_div", };
static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
-static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
+static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
+static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *vdo_axi_sels[] = { "axi", "ahb", };
static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -392,7 +393,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
- clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels));
+ clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels));
clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index d617c0b7c80..b169a396d93 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -183,6 +183,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
clk[VF610_CLK_ENET_TS] = imx_clk_gate("enet_ts", "enet_ts_sel", CCM_CSCDR1, 23);
+ clk[VF610_CLK_ENET0] = imx_clk_gate2("enet0", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(0));
+ clk[VF610_CLK_ENET1] = imx_clk_gate2("enet1", "ipg_bus", CCM_CCGR9, CCM_CCGRx_CGn(1));
clk[VF610_CLK_PIT] = imx_clk_gate2("pit", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(7));
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 7be13f8e69a..a02f275a198 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev)
{
struct device_node *np;
- np = of_find_node_by_path("/cpus/cpu@0");
+ np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_warn("failed to find cpu0 node\n");
return;
}
- cpu_dev->of_node = np;
if (of_init_opp_table(cpu_dev)) {
pr_warn("failed to init OPP table\n");
goto put_node;
diff --git a/arch/arm/mach-imx/mx27.h b/arch/arm/mach-imx/mx27.h
index e074616d54c..8a65f192e7f 100644
--- a/arch/arm/mach-imx/mx27.h
+++ b/arch/arm/mach-imx/mx27.h
@@ -135,7 +135,7 @@
#define MX27_INT_GPT4 (NR_IRQS_LEGACY + 4)
#define MX27_INT_RTIC (NR_IRQS_LEGACY + 5)
#define MX27_INT_CSPI3 (NR_IRQS_LEGACY + 6)
-#define MX27_INT_SDHC (NR_IRQS_LEGACY + 7)
+#define MX27_INT_MSHC (NR_IRQS_LEGACY + 7)
#define MX27_INT_GPIO (NR_IRQS_LEGACY + 8)
#define MX27_INT_SDHC3 (NR_IRQS_LEGACY + 9)
#define MX27_INT_SDHC2 (NR_IRQS_LEGACY + 10)
diff --git a/arch/arm/mach-integrator/include/mach/debug-macro.S b/arch/arm/mach-integrator/include/mach/debug-macro.S
deleted file mode 100644
index 411b116077e..00000000000
--- a/arch/arm/mach-integrator/include/mach/debug-macro.S
+++ /dev/null
@@ -1,20 +0,0 @@
-/* arch/arm/mach-integrator/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x16000000 @ physical base address
- mov \rv, #0xf0000000 @ virtual base
- add \rv, \rv, #0x16000000 >> 4
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-iop13xx/include/mach/debug-macro.S b/arch/arm/mach-iop13xx/include/mach/debug-macro.S
deleted file mode 100644
index d869a6f67e5..00000000000
--- a/arch/arm/mach-iop13xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * arch/arm/mach-iop13xx/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x00002300
- orr \rp, \rp, #0x00000040
- orr \rv, \rp, #0xfe000000 @ virtual
- orr \rv, \rv, #0x00e80000
- orr \rp, \rp, #0xff000000 @ physical
- orr \rp, \rp, #0x00d80000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop32x/include/mach/debug-macro.S b/arch/arm/mach-iop32x/include/mach/debug-macro.S
deleted file mode 100644
index 363bdf90b34..00000000000
--- a/arch/arm/mach-iop32x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-iop32x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0xfe000000 @ physical as well as virtual
- orr \rp, \rp, #0x00800000 @ location of the UART
- mov \rv, \rp
- .endm
-
-#define UART_SHIFT 0
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-iop33x/include/mach/debug-macro.S b/arch/arm/mach-iop33x/include/mach/debug-macro.S
deleted file mode 100644
index 361be1f6026..00000000000
--- a/arch/arm/mach-iop33x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * arch/arm/mach-iop33x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x00ff0000
- orr \rp, \rp, #0x0000f700
- orr \rv, #0xfe000000 @ virtual
- orr \rp, #0xff000000 @ physical
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S b/arch/arm/mach-ixp4xx/include/mach/debug-macro.S
deleted file mode 100644
index ff686cbc5df..00000000000
--- a/arch/arm/mach-ixp4xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,26 +0,0 @@
-/* arch/arm/mach-ixp4xx/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
- .macro addruart, rp, rv, tmp
-#ifdef __ARMEB__
- mov \rp, #3 @ Uart regs are at off set of 3 if
- @ byte writes used - Big Endian.
-#else
- mov \rp, #0
-#endif
- orr \rv, \rp, #0xfe000000 @ virtual
- orr \rv, \rv, #0x00f00000
- orr \rp, \rp, #0xc8000000 @ physical
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c
index fe4d9ff93a7..b661c5c2870 100644
--- a/arch/arm/mach-keystone/keystone.c
+++ b/arch/arm/mach-keystone/keystone.c
@@ -49,7 +49,7 @@ static const char *keystone_match[] __initconst = {
NULL,
};
-void keystone_restart(char mode, const char *cmd)
+void keystone_restart(enum reboot_mode mode, const char *cmd)
{
u32 val;
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index e9238b5567e..1663de09098 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -264,7 +264,7 @@ void __init kirkwood_clk_init(void)
orion_clkdev_add(NULL, MV_XOR_NAME ".1", xor1);
orion_clkdev_add("0", "pcie", pex0);
orion_clkdev_add("1", "pcie", pex1);
- orion_clkdev_add(NULL, "kirkwood-i2s", audio);
+ orion_clkdev_add(NULL, "mvebu-audio", audio);
orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".0", runit);
orion_clkdev_add(NULL, MV64XXX_I2C_CTLR_NAME ".1", runit);
@@ -560,7 +560,7 @@ void __init kirkwood_timer_init(void)
/*****************************************************************************
* Audio
****************************************************************************/
-static struct resource kirkwood_i2s_resources[] = {
+static struct resource kirkwood_audio_resources[] = {
[0] = {
.start = AUDIO_PHYS_BASE,
.end = AUDIO_PHYS_BASE + SZ_16K - 1,
@@ -573,29 +573,23 @@ static struct resource kirkwood_i2s_resources[] = {
},
};
-static struct kirkwood_asoc_platform_data kirkwood_i2s_data = {
+static struct kirkwood_asoc_platform_data kirkwood_audio_data = {
.burst = 128,
};
-static struct platform_device kirkwood_i2s_device = {
- .name = "kirkwood-i2s",
+static struct platform_device kirkwood_audio_device = {
+ .name = "mvebu-audio",
.id = -1,
- .num_resources = ARRAY_SIZE(kirkwood_i2s_resources),
- .resource = kirkwood_i2s_resources,
+ .num_resources = ARRAY_SIZE(kirkwood_audio_resources),
+ .resource = kirkwood_audio_resources,
.dev = {
- .platform_data = &kirkwood_i2s_data,
+ .platform_data = &kirkwood_audio_data,
},
};
-static struct platform_device kirkwood_pcm_device = {
- .name = "kirkwood-pcm-audio",
- .id = -1,
-};
-
void __init kirkwood_audio_init(void)
{
- platform_device_register(&kirkwood_i2s_device);
- platform_device_register(&kirkwood_pcm_device);
+ platform_device_register(&kirkwood_audio_device);
}
/*****************************************************************************
diff --git a/arch/arm/mach-kirkwood/include/mach/debug-macro.S b/arch/arm/mach-kirkwood/include/mach/debug-macro.S
deleted file mode 100644
index f785d401a60..00000000000
--- a/arch/arm/mach-kirkwood/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-kirkwood/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/bridge-regs.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =KIRKWOOD_REGS_PHYS_BASE
- ldr \rv, =KIRKWOOD_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-lpc32xx/include/mach/debug-macro.S b/arch/arm/mach-lpc32xx/include/mach/debug-macro.S
deleted file mode 100644
index 351bd6c8490..00000000000
--- a/arch/arm/mach-lpc32xx/include/mach/debug-macro.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * arch/arm/mach-lpc32xx/include/mach/debug-macro.S
- *
- * Author: Kevin Wells <kevin.wells@nxp.com>
- *
- * Copyright (C) 2010 NXP Semiconductors
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-/*
- * Debug output is hardcoded to standard UART 5
-*/
-
- .macro addruart, rp, rv, tmp
- ldreq \rp, =0x40090000
- ldrne \rv, =0xF4090000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig
index 614e41e7881..905efc8cac7 100644
--- a/arch/arm/mach-msm/Kconfig
+++ b/arch/arm/mach-msm/Kconfig
@@ -121,8 +121,7 @@ config MSM_SMD
bool
config MSM_GPIOMUX
- depends on !(ARCH_MSM8X60 || ARCH_MSM8960)
- bool "MSM V1 TLMM GPIOMUX architecture"
+ bool
help
Support for MSM V1 TLMM GPIOMUX architecture.
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c
index 6d50fb96486..d83404d4b32 100644
--- a/arch/arm/mach-msm/devices-msm7x00.c
+++ b/arch/arm/mach-msm/devices-msm7x00.c
@@ -456,9 +456,9 @@ static struct clk_pcom_desc msm_clocks_7x01a[] = {
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART1_CLK, "msm_serial.0", OFF),
- CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0),
- CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF),
+ CLK_PCOM("core", UART1_CLK, "msm_serial.0", OFF),
+ CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
+ CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF),
CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF),
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c
index d4db75acff5..14e286948f6 100644
--- a/arch/arm/mach-msm/devices-msm7x30.c
+++ b/arch/arm/mach-msm/devices-msm7x30.c
@@ -211,7 +211,7 @@ static struct clk_pcom_desc msm_clocks_7x30[] = {
CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0),
+ CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0),
CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c
index f5518112284..2ed89b25d30 100644
--- a/arch/arm/mach-msm/devices-qsd8x50.c
+++ b/arch/arm/mach-msm/devices-qsd8x50.c
@@ -358,9 +358,9 @@ static struct clk_pcom_desc msm_clocks_8x50[] = {
CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0),
CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0),
CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF),
- CLK_PCOM("uart_clk", UART2_CLK, NULL, 0),
- CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF),
+ CLK_PCOM("core", UART1_CLK, NULL, OFF),
+ CLK_PCOM("core", UART2_CLK, NULL, 0),
+ CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF),
CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF),
CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0),
CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF),
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c
deleted file mode 100644
index 27de2abd714..00000000000
--- a/arch/arm/mach-msm/gpiomux-v1.c
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-#include <linux/kernel.h>
-#include "gpiomux.h"
-#include "proc_comm.h"
-
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val)
-{
- unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) |
- ((gpio & 0x3ff) << 4);
- unsigned tlmm_disable = 0;
- int rc;
-
- rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX,
- &tlmm_config, &tlmm_disable);
- if (rc)
- pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n",
- __func__, rc, tlmm_config, tlmm_disable);
-}
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h
index 8e82f41a892..4410d7766f9 100644
--- a/arch/arm/mach-msm/gpiomux.h
+++ b/arch/arm/mach-msm/gpiomux.h
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS];
int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
gpiomux_config_t suspended);
-
-/* Architecture-internal function for use by the framework only.
- * This function can assume the following:
- * - the gpio value has passed a bounds-check
- * - the gpiomux spinlock has been obtained
- *
- * This function is not for public consumption. External users
- * should use msm_gpiomux_write.
- */
-void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val);
#else
static inline int msm_gpiomux_write(unsigned gpio,
gpiomux_config_t active,
diff --git a/arch/arm/mach-mv78xx0/include/mach/debug-macro.S b/arch/arm/mach-mv78xx0/include/mach/debug-macro.S
deleted file mode 100644
index a7df02b049b..00000000000
--- a/arch/arm/mach-mv78xx0/include/mach/debug-macro.S
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * arch/arm/mach-mv78xx0/include/mach/debug-macro.S
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/mv78xx0.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =MV78XX0_REGS_PHYS_BASE
- ldr \rv, =MV78XX0_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c
index ce81d303140..594b63db421 100644
--- a/arch/arm/mach-mvebu/platsmp.c
+++ b/arch/arm/mach-mvebu/platsmp.c
@@ -29,45 +29,40 @@
#include "pmsu.h"
#include "coherency.h"
+static struct clk *__init get_cpu_clk(int cpu)
+{
+ struct clk *cpu_clk;
+ struct device_node *np = of_get_cpu_node(cpu, NULL);
+
+ if (WARN(!np, "missing cpu node\n"))
+ return NULL;
+ cpu_clk = of_clk_get(np, 0);
+ if (WARN_ON(IS_ERR(cpu_clk)))
+ return NULL;
+ return cpu_clk;
+}
+
void __init set_secondary_cpus_clock(void)
{
- int thiscpu;
+ int thiscpu, cpu;
unsigned long rate;
- struct clk *cpu_clk = NULL;
- struct device_node *np = NULL;
+ struct clk *cpu_clk;
thiscpu = smp_processor_id();
- for_each_node_by_type(np, "cpu") {
- int err;
- int cpu;
-
- err = of_property_read_u32(np, "reg", &cpu);
- if (WARN_ON(err))
- return;
-
- if (cpu == thiscpu) {
- cpu_clk = of_clk_get(np, 0);
- break;
- }
- }
- if (WARN_ON(IS_ERR(cpu_clk)))
+ cpu_clk = get_cpu_clk(thiscpu);
+ if (!cpu_clk)
return;
clk_prepare_enable(cpu_clk);
rate = clk_get_rate(cpu_clk);
/* set all the other CPU clk to the same rate than the boot CPU */
- for_each_node_by_type(np, "cpu") {
- int err;
- int cpu;
-
- err = of_property_read_u32(np, "reg", &cpu);
- if (WARN_ON(err))
+ for_each_possible_cpu(cpu) {
+ if (cpu == thiscpu)
+ continue;
+ cpu_clk = get_cpu_clk(cpu);
+ if (!cpu_clk)
return;
-
- if (cpu != thiscpu) {
- cpu_clk = of_clk_get(np, 0);
- clk_set_rate(cpu_clk, rate);
- }
+ clk_set_rate(cpu_clk, rate);
}
}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 627fa7e41fb..3eed0006d18 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -62,7 +62,7 @@ config SOC_OMAP5
select HAVE_SMP
select COMMON_CLK
select HAVE_ARM_ARCH_TIMER
- select ARM_ERRATA_798181
+ select ARM_ERRATA_798181 if SMP
config SOC_AM33XX
bool "AM33XX support"
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index 244d8a5aa54..c711ad6ac06 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -100,39 +100,52 @@ static struct platform_device sdp2430_flash_device = {
.resource = &sdp2430_flash_resource,
};
-static struct platform_device *sdp2430_devices[] __initdata = {
- &sdp2430_flash_device,
-};
-
/* LCD */
#define SDP2430_LCD_PANEL_BACKLIGHT_GPIO 91
#define SDP2430_LCD_PANEL_ENABLE_GPIO 154
-static struct panel_generic_dpi_data sdp2430_panel_data = {
- .name = "nec_nl2432dr22-11b",
- .num_gpios = 2,
- .gpios = {
- SDP2430_LCD_PANEL_ENABLE_GPIO,
- SDP2430_LCD_PANEL_BACKLIGHT_GPIO,
- },
+static const struct display_timing sdp2430_lcd_videomode = {
+ .pixelclock = { 0, 5400000, 0 },
+
+ .hactive = { 0, 240, 0 },
+ .hfront_porch = { 0, 3, 0 },
+ .hback_porch = { 0, 39, 0 },
+ .hsync_len = { 0, 3, 0 },
+
+ .vactive = { 0, 320, 0 },
+ .vfront_porch = { 0, 2, 0 },
+ .vback_porch = { 0, 7, 0 },
+ .vsync_len = { 0, 1, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
-static struct omap_dss_device sdp2430_lcd_device = {
- .name = "lcd",
- .driver_name = "generic_dpi_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 16,
- .data = &sdp2430_panel_data,
+static struct panel_dpi_platform_data sdp2430_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 16,
+
+ .display_timing = &sdp2430_lcd_videomode,
+
+ .enable_gpio = SDP2430_LCD_PANEL_ENABLE_GPIO,
+ .backlight_gpio = SDP2430_LCD_PANEL_BACKLIGHT_GPIO,
};
-static struct omap_dss_device *sdp2430_dss_devices[] = {
- &sdp2430_lcd_device,
+static struct platform_device sdp2430_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &sdp2430_lcd_pdata,
};
static struct omap_dss_board_info sdp2430_dss_data = {
- .num_devices = ARRAY_SIZE(sdp2430_dss_devices),
- .devices = sdp2430_dss_devices,
- .default_device = &sdp2430_lcd_device,
+ .default_display_name = "lcd",
+};
+
+static struct platform_device *sdp2430_devices[] __initdata = {
+ &sdp2430_flash_device,
+ &sdp2430_lcd_device,
};
#if IS_ENABLED(CONFIG_SMC91X)
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 23b004afa3f..d95d0ef1354 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -126,53 +126,65 @@ static void __init sdp3430_display_init(void)
}
-static struct panel_sharp_ls037v7dw01_data sdp3430_lcd_data = {
- .resb_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO,
- .ini_gpio = -1,
- .mo_gpio = -1,
- .lr_gpio = -1,
- .ud_gpio = -1,
+static struct panel_sharp_ls037v7dw01_platform_data sdp3430_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 16,
+
+ .resb_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO,
+ .ini_gpio = -1,
+ .mo_gpio = -1,
+ .lr_gpio = -1,
+ .ud_gpio = -1,
+};
+
+static struct platform_device sdp3430_lcd_device = {
+ .name = "panel-sharp-ls037v7dw01",
+ .id = 0,
+ .dev.platform_data = &sdp3430_lcd_pdata,
};
-static struct omap_dss_device sdp3430_lcd_device = {
- .name = "lcd",
- .driver_name = "sharp_ls_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 16,
- .data = &sdp3430_lcd_data,
+static struct connector_dvi_platform_data sdp3430_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = -1,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = -1,
- .i2c_bus_num = -1,
+static struct platform_device sdp3430_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &sdp3430_dvi_connector_pdata,
};
-static struct omap_dss_device sdp3430_dvi_device = {
- .name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct encoder_tfp410_platform_data sdp3430_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = -1,
};
-static struct omap_dss_device sdp3430_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct platform_device sdp3430_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &sdp3430_tfp410_pdata,
};
+static struct connector_atv_platform_data sdp3430_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
+};
-static struct omap_dss_device *sdp3430_dss_devices[] = {
- &sdp3430_lcd_device,
- &sdp3430_dvi_device,
- &sdp3430_tv_device,
+static struct platform_device sdp3430_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &sdp3430_tv_pdata,
};
static struct omap_dss_board_info sdp3430_dss_data = {
- .num_devices = ARRAY_SIZE(sdp3430_dss_devices),
- .devices = sdp3430_dss_devices,
- .default_device = &sdp3430_lcd_device,
+ .default_display_name = "lcd",
};
static struct omap2_hsmmc_info mmc[] = {
@@ -583,6 +595,11 @@ static void __init omap_3430sdp_init(void)
omap_hsmmc_init(mmc);
omap3430_i2c_init();
omap_display_init(&sdp3430_dss_data);
+ platform_device_register(&sdp3430_lcd_device);
+ platform_device_register(&sdp3430_tfp410_device);
+ platform_device_register(&sdp3430_dvi_connector_device);
+ platform_device_register(&sdp3430_tv_connector_device);
+
if (omap_rev() > OMAP3430_REV_ES1_0)
gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2;
else
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index d63f14b534b..8cc2c9e9fb0 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -120,56 +120,95 @@ static int __init am3517_evm_i2c_init(void)
return 0;
}
-static struct panel_generic_dpi_data lcd_panel = {
- .name = "sharp_lq",
- .num_gpios = 3,
- .gpios = {
- LCD_PANEL_PWR,
- LCD_PANEL_BKLIGHT_PWR,
- LCD_PANEL_PWM,
- },
+static const struct display_timing am3517_evm_lcd_videomode = {
+ .pixelclock = { 0, 9000000, 0 },
+
+ .hactive = { 0, 480, 0 },
+ .hfront_porch = { 0, 3, 0 },
+ .hback_porch = { 0, 2, 0 },
+ .hsync_len = { 0, 42, 0 },
+
+ .vactive = { 0, 272, 0 },
+ .vfront_porch = { 0, 3, 0 },
+ .vback_porch = { 0, 2, 0 },
+ .vsync_len = { 0, 11, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_LOW | DISPLAY_FLAGS_PIXDATA_POSEDGE,
+};
+
+static struct panel_dpi_platform_data am3517_evm_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 16,
+
+ .display_timing = &am3517_evm_lcd_videomode,
+
+ .enable_gpio = LCD_PANEL_PWR,
+ .backlight_gpio = LCD_PANEL_BKLIGHT_PWR,
+};
+
+static struct platform_device am3517_evm_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &am3517_evm_lcd_pdata,
};
-static struct omap_dss_device am3517_evm_lcd_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "lcd",
- .driver_name = "generic_dpi_panel",
- .data = &lcd_panel,
- .phy.dpi.data_lines = 16,
+static struct connector_dvi_platform_data am3517_evm_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = -1,
};
-static struct omap_dss_device am3517_evm_tv_device = {
- .type = OMAP_DISPLAY_TYPE_VENC,
- .name = "tv",
- .driver_name = "venc",
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct platform_device am3517_evm_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &am3517_evm_dvi_connector_pdata,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = -1,
- .i2c_bus_num = -1,
+static struct encoder_tfp410_platform_data am3517_evm_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = -1,
};
-static struct omap_dss_device am3517_evm_dvi_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "dvi",
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device am3517_evm_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &am3517_evm_tfp410_pdata,
};
-static struct omap_dss_device *am3517_evm_dss_devices[] = {
- &am3517_evm_lcd_device,
- &am3517_evm_tv_device,
- &am3517_evm_dvi_device,
+static struct connector_atv_platform_data am3517_evm_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
+};
+
+static struct platform_device am3517_evm_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &am3517_evm_tv_pdata,
};
static struct omap_dss_board_info am3517_evm_dss_data = {
- .num_devices = ARRAY_SIZE(am3517_evm_dss_devices),
- .devices = am3517_evm_dss_devices,
- .default_device = &am3517_evm_lcd_device,
+ .default_display_name = "lcd",
};
+static void __init am3517_evm_display_init(void)
+{
+ gpio_request_one(LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd panel pwm");
+
+ omap_display_init(&am3517_evm_dss_data);
+
+ platform_device_register(&am3517_evm_tfp410_device);
+ platform_device_register(&am3517_evm_dvi_connector_device);
+ platform_device_register(&am3517_evm_lcd_device);
+ platform_device_register(&am3517_evm_tv_connector_device);
+}
+
/*
* Board initialization
*/
@@ -295,7 +334,9 @@ static void __init am3517_evm_init(void)
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
am3517_evm_i2c_init();
- omap_display_init(&am3517_evm_dss_data);
+
+ am3517_evm_display_init();
+
omap_serial_init();
omap_sdrc_init(NULL, NULL);
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index d4622ed2625..33d159e2386 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -190,52 +190,81 @@ static inline void cm_t35_init_nand(void) {}
#define CM_T35_LCD_BL_GPIO 58
#define CM_T35_DVI_EN_GPIO 54
-static struct panel_generic_dpi_data lcd_panel = {
- .name = "toppoly_tdo35s",
- .num_gpios = 1,
- .gpios = {
- CM_T35_LCD_BL_GPIO,
- },
+static const struct display_timing cm_t35_lcd_videomode = {
+ .pixelclock = { 0, 26000000, 0 },
+
+ .hactive = { 0, 480, 0 },
+ .hfront_porch = { 0, 104, 0 },
+ .hback_porch = { 0, 8, 0 },
+ .hsync_len = { 0, 8, 0 },
+
+ .vactive = { 0, 640, 0 },
+ .vfront_porch = { 0, 4, 0 },
+ .vback_porch = { 0, 2, 0 },
+ .vsync_len = { 0, 2, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_NEGEDGE,
+};
+
+static struct panel_dpi_platform_data cm_t35_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 18,
+
+ .display_timing = &cm_t35_lcd_videomode,
+
+ .enable_gpio = -1,
+ .backlight_gpio = CM_T35_LCD_BL_GPIO,
+};
+
+static struct platform_device cm_t35_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &cm_t35_lcd_pdata,
};
-static struct omap_dss_device cm_t35_lcd_device = {
- .name = "lcd",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "generic_dpi_panel",
- .data = &lcd_panel,
- .phy.dpi.data_lines = 18,
+static struct connector_dvi_platform_data cm_t35_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = -1,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = CM_T35_DVI_EN_GPIO,
- .i2c_bus_num = -1,
+static struct platform_device cm_t35_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &cm_t35_dvi_connector_pdata,
};
-static struct omap_dss_device cm_t35_dvi_device = {
- .name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct encoder_tfp410_platform_data cm_t35_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = CM_T35_DVI_EN_GPIO,
};
-static struct omap_dss_device cm_t35_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct platform_device cm_t35_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &cm_t35_tfp410_pdata,
};
-static struct omap_dss_device *cm_t35_dss_devices[] = {
- &cm_t35_lcd_device,
- &cm_t35_dvi_device,
- &cm_t35_tv_device,
+static struct connector_atv_platform_data cm_t35_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
+};
+
+static struct platform_device cm_t35_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &cm_t35_tv_pdata,
};
static struct omap_dss_board_info cm_t35_dss_data = {
- .num_devices = ARRAY_SIZE(cm_t35_dss_devices),
- .devices = cm_t35_dss_devices,
- .default_device = &cm_t35_dvi_device,
+ .default_display_name = "dvi",
};
static struct omap2_mcspi_device_config tdo24m_mcspi_config = {
@@ -280,6 +309,11 @@ static void __init cm_t35_init_display(void)
pr_err("CM-T35: failed to register DSS device\n");
gpio_free(CM_T35_LCD_EN_GPIO);
}
+
+ platform_device_register(&cm_t35_tfp410_device);
+ platform_device_register(&cm_t35_dvi_connector_device);
+ platform_device_register(&cm_t35_lcd_device);
+ platform_device_register(&cm_t35_tv_connector_device);
}
static struct regulator_consumer_supply cm_t35_vmmc1_supply[] = {
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index f1d91ba5d1a..cdc4fb9960a 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -112,50 +112,81 @@ static struct regulator_consumer_supply devkit8000_vio_supply[] = {
REGULATOR_SUPPLY("vcc", "spi2.0"),
};
-static struct panel_generic_dpi_data lcd_panel = {
- .name = "innolux_at070tn83",
- /* gpios filled in code */
+static const struct display_timing devkit8000_lcd_videomode = {
+ .pixelclock = { 0, 40000000, 0 },
+
+ .hactive = { 0, 800, 0 },
+ .hfront_porch = { 0, 1, 0 },
+ .hback_porch = { 0, 1, 0 },
+ .hsync_len = { 0, 48, 0 },
+
+ .vactive = { 0, 480, 0 },
+ .vfront_porch = { 0, 12, 0 },
+ .vback_porch = { 0, 25, 0 },
+ .vsync_len = { 0, 3, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
-static struct omap_dss_device devkit8000_lcd_device = {
+static struct panel_dpi_platform_data devkit8000_lcd_pdata = {
.name = "lcd",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "generic_dpi_panel",
- .data = &lcd_panel,
- .phy.dpi.data_lines = 24,
+ .source = "dpi.0",
+
+ .data_lines = 24,
+
+ .display_timing = &devkit8000_lcd_videomode,
+
+ .enable_gpio = -1, /* filled in code */
+ .backlight_gpio = -1,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = -1,
- .i2c_bus_num = 1,
+static struct platform_device devkit8000_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &devkit8000_lcd_pdata,
};
-static struct omap_dss_device devkit8000_dvi_device = {
+static struct connector_dvi_platform_data devkit8000_dvi_connector_pdata = {
.name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+ .source = "tfp410.0",
+ .i2c_bus_num = 1,
};
-static struct omap_dss_device devkit8000_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct platform_device devkit8000_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &devkit8000_dvi_connector_pdata,
};
+static struct encoder_tfp410_platform_data devkit8000_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = -1, /* filled in code */
+};
-static struct omap_dss_device *devkit8000_dss_devices[] = {
- &devkit8000_lcd_device,
- &devkit8000_dvi_device,
- &devkit8000_tv_device,
+static struct platform_device devkit8000_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &devkit8000_tfp410_pdata,
+};
+
+static struct connector_atv_platform_data devkit8000_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
+};
+
+static struct platform_device devkit8000_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &devkit8000_tv_pdata,
};
static struct omap_dss_board_info devkit8000_dss_data = {
- .num_devices = ARRAY_SIZE(devkit8000_dss_devices),
- .devices = devkit8000_dss_devices,
- .default_device = &devkit8000_lcd_device,
+ .default_display_name = "lcd",
};
static uint32_t board_keymap[] = {
@@ -204,11 +235,10 @@ static int devkit8000_twl_gpio_setup(struct device *dev,
gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1;
/* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */
- lcd_panel.num_gpios = 1;
- lcd_panel.gpios[0] = gpio + TWL4030_GPIO_MAX + 0;
+ devkit8000_lcd_pdata.enable_gpio = gpio + TWL4030_GPIO_MAX + 0;
/* gpio + 7 is "DVI_PD" (out, active low) */
- dvi_panel.power_down_gpio = gpio + 7;
+ devkit8000_tfp410_pdata.power_down_gpio = gpio + 7;
return 0;
}
@@ -413,6 +443,10 @@ static struct platform_device *devkit8000_devices[] __initdata = {
&leds_gpio,
&keys_gpio,
&omap_dm9000_dev,
+ &devkit8000_lcd_device,
+ &devkit8000_tfp410_device,
+ &devkit8000_dvi_connector_device,
+ &devkit8000_tv_connector_device,
};
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index e5fbfed69aa..be5d005ebad 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -15,6 +15,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/irqdomain.h>
+#include <linux/clk.h>
#include <asm/mach/arch.h>
@@ -35,6 +36,21 @@ static struct of_device_id omap_dt_match_table[] __initdata = {
{ }
};
+/*
+ * Create alias for USB host PHY clock.
+ * Remove this when clock phandle can be provided via DT
+ */
+static void __init legacy_init_ehci_clk(char *clkname)
+{
+ int ret;
+
+ ret = clk_add_alias("main_clk", NULL, clkname, NULL);
+ if (ret) {
+ pr_err("%s:Failed to add main_clk alias to %s :%d\n",
+ __func__, clkname, ret);
+ }
+}
+
static void __init omap_generic_init(void)
{
omap_sdrc_init(NULL, NULL);
@@ -45,10 +61,15 @@ static void __init omap_generic_init(void)
* HACK: call display setup code for selected boards to enable omapdss.
* This will be removed when omapdss supports DT.
*/
- if (of_machine_is_compatible("ti,omap4-panda"))
+ if (of_machine_is_compatible("ti,omap4-panda")) {
omap4_panda_display_init_of();
+ legacy_init_ehci_clk("auxclk3_ck");
+
+ }
else if (of_machine_is_compatible("ti,omap4-sdp"))
omap_4430sdp_display_init_of();
+ else if (of_machine_is_compatible("ti,omap5-uevm"))
+ legacy_init_ehci_clk("auxclk1_ck");
}
#ifdef CONFIG_SOC_OMAP2420
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 69c0acf5aa6..87e41a8b8d4 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -194,30 +194,48 @@ static struct platform_device h4_flash_device = {
.resource = &h4_flash_resource,
};
-static struct platform_device *h4_devices[] __initdata = {
- &h4_flash_device,
+static const struct display_timing cm_t35_lcd_videomode = {
+ .pixelclock = { 0, 6250000, 0 },
+
+ .hactive = { 0, 240, 0 },
+ .hfront_porch = { 0, 15, 0 },
+ .hback_porch = { 0, 60, 0 },
+ .hsync_len = { 0, 15, 0 },
+
+ .vactive = { 0, 320, 0 },
+ .vfront_porch = { 0, 1, 0 },
+ .vback_porch = { 0, 1, 0 },
+ .vsync_len = { 0, 1, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
-static struct panel_generic_dpi_data h4_panel_data = {
- .name = "h4",
+static struct panel_dpi_platform_data cm_t35_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 16,
+
+ .display_timing = &cm_t35_lcd_videomode,
+
+ .enable_gpio = -1,
+ .backlight_gpio = -1,
};
-static struct omap_dss_device h4_lcd_device = {
- .name = "lcd",
- .driver_name = "generic_dpi_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 16,
- .data = &h4_panel_data,
+static struct platform_device cm_t35_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &cm_t35_lcd_pdata,
};
-static struct omap_dss_device *h4_dss_devices[] = {
- &h4_lcd_device,
+static struct platform_device *h4_devices[] __initdata = {
+ &h4_flash_device,
+ &cm_t35_lcd_device,
};
static struct omap_dss_board_info h4_dss_data = {
- .num_devices = ARRAY_SIZE(h4_dss_devices),
- .devices = h4_dss_devices,
- .default_device = &h4_lcd_device,
+ .default_display_name = "lcd",
};
/* 2420 Sysboot setup (2430 is different) */
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 87e65dde8e1..06dbb2d3d38 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -429,31 +429,39 @@ static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = {
.setup = igep_twl_gpio_setup,
};
-static struct tfp410_platform_data dvi_panel = {
- .i2c_bus_num = 3,
- .power_down_gpio = IGEP2_GPIO_DVI_PUP,
+static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = 3,
};
-static struct omap_dss_device igep2_dvi_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "dvi",
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device omap3stalker_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &omap3stalker_dvi_connector_pdata,
};
-static struct omap_dss_device *igep2_dss_devices[] = {
- &igep2_dvi_device
+static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = IGEP2_GPIO_DVI_PUP,
+};
+
+static struct platform_device omap3stalker_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &omap3stalker_tfp410_pdata,
};
static struct omap_dss_board_info igep2_dss_data = {
- .num_devices = ARRAY_SIZE(igep2_dss_devices),
- .devices = igep2_dss_devices,
- .default_device = &igep2_dvi_device,
+ .default_display_name = "dvi",
};
static struct platform_device *igep_devices[] __initdata = {
&igep_vwlan_device,
+ &omap3stalker_tfp410_device,
+ &omap3stalker_dvi_connector_device,
};
static int igep2_keymap[] = {
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 62e4f701b63..dd8da2c5399 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -184,45 +184,70 @@ static inline void __init ldp_init_smsc911x(void)
#define LCD_PANEL_RESET_GPIO 55
#define LCD_PANEL_QVGA_GPIO 56
-static struct panel_generic_dpi_data ldp_panel_data = {
- .name = "nec_nl2432dr22-11b",
- .num_gpios = 4,
- /* gpios filled in code */
+static const struct display_timing ldp_lcd_videomode = {
+ .pixelclock = { 0, 5400000, 0 },
+
+ .hactive = { 0, 240, 0 },
+ .hfront_porch = { 0, 3, 0 },
+ .hback_porch = { 0, 39, 0 },
+ .hsync_len = { 0, 3, 0 },
+
+ .vactive = { 0, 320, 0 },
+ .vfront_porch = { 0, 2, 0 },
+ .vback_porch = { 0, 7, 0 },
+ .vsync_len = { 0, 1, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
-static struct omap_dss_device ldp_lcd_device = {
- .name = "lcd",
- .driver_name = "generic_dpi_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 18,
- .data = &ldp_panel_data,
+static struct panel_dpi_platform_data ldp_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 18,
+
+ .display_timing = &ldp_lcd_videomode,
+
+ .enable_gpio = -1, /* filled in code */
+ .backlight_gpio = -1, /* filled in code */
};
-static struct omap_dss_device *ldp_dss_devices[] = {
- &ldp_lcd_device,
+static struct platform_device ldp_lcd_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &ldp_lcd_pdata,
};
static struct omap_dss_board_info ldp_dss_data = {
- .num_devices = ARRAY_SIZE(ldp_dss_devices),
- .devices = ldp_dss_devices,
- .default_device = &ldp_lcd_device,
+ .default_display_name = "lcd",
};
static void __init ldp_display_init(void)
{
- ldp_panel_data.gpios[2] = LCD_PANEL_RESET_GPIO;
- ldp_panel_data.gpios[3] = LCD_PANEL_QVGA_GPIO;
+ int r;
+
+ static struct gpio gpios[] __initdata = {
+ {LCD_PANEL_RESET_GPIO, GPIOF_OUT_INIT_HIGH, "LCD RESET"},
+ {LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "LCD QVGA"},
+ };
+
+ r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
+ if (r) {
+ pr_err("Cannot request LCD GPIOs, error %d\n", r);
+ return;
+ }
omap_display_init(&ldp_dss_data);
}
static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
{
- ldp_panel_data.gpios[0] = gpio + 7;
- ldp_panel_data.gpio_invert[0] = true;
+ /* LCD enable GPIO */
+ ldp_lcd_pdata.enable_gpio = gpio + 7;
- ldp_panel_data.gpios[1] = gpio + 15;
- ldp_panel_data.gpio_invert[1] = true;
+ /* Backlight enable GPIO */
+ ldp_lcd_pdata.backlight_gpio = gpio + 15;
return 0;
}
@@ -322,6 +347,7 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
static struct platform_device *ldp_devices[] __initdata = {
&ldp_gpio_keys_device,
+ &ldp_lcd_device,
};
#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index f6eeb87e4e9..827d15009a8 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data tusb_data = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
.set_power = tusb_set_power,
.min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */
.power = 100, /* Max 100 mA VBUS for host mode */
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 04c11655541..f26918467ef 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -33,7 +33,7 @@
#include <linux/mtd/nand.h>
#include <linux/mmc/host.h>
#include <linux/usb/phy.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/regulator/machine.h>
#include <linux/i2c/twl.h>
@@ -225,35 +225,46 @@ static struct mtd_partition omap3beagle_nand_partitions[] = {
/* DSS */
-static struct tfp410_platform_data dvi_panel = {
- .i2c_bus_num = 3,
- .power_down_gpio = -1,
+static struct connector_dvi_platform_data beagle_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = 3,
};
-static struct omap_dss_device beagle_dvi_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "dvi",
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device beagle_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &beagle_dvi_connector_pdata,
};
-static struct omap_dss_device beagle_tv_device = {
+static struct encoder_tfp410_platform_data beagle_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = -1,
+};
+
+static struct platform_device beagle_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &beagle_tfp410_pdata,
+};
+
+static struct connector_atv_platform_data beagle_tv_pdata = {
.name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
};
-static struct omap_dss_device *beagle_dss_devices[] = {
- &beagle_dvi_device,
- &beagle_tv_device,
+static struct platform_device beagle_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &beagle_tv_pdata,
};
static struct omap_dss_board_info beagle_dss_data = {
- .num_devices = ARRAY_SIZE(beagle_dss_devices),
- .devices = beagle_dss_devices,
- .default_device = &beagle_dvi_device,
+ .default_display_name = "dvi",
};
#include "sdram-micron-mt46h32m32lf-6.h"
@@ -279,7 +290,7 @@ static struct regulator_consumer_supply beagle_vsim_supply[] = {
static struct gpio_led gpio_leds[];
/* PHY's VCC regulator might be added later, so flag that we need it */
-static struct nop_usb_xceiv_platform_data hsusb2_phy_data = {
+static struct usb_phy_gen_xceiv_platform_data hsusb2_phy_data = {
.needs_vcc = true,
};
@@ -332,7 +343,11 @@ static int beagle_twl_gpio_setup(struct device *dev,
if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
pr_err("%s: unable to configure EHCI_nOC\n", __func__);
}
- dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
+ beagle_tfp410_pdata.power_down_gpio = beagle_config.dvi_pd_gpio;
+
+ platform_device_register(&beagle_tfp410_device);
+ platform_device_register(&beagle_dvi_connector_device);
+ platform_device_register(&beagle_tv_connector_device);
/* TWL4030_GPIO_MAX i.e. LED_GPO controls HS USB Port 2 power */
phy_data[0].vcc_gpio = gpio + TWL4030_GPIO_MAX;
@@ -547,6 +562,7 @@ static void __init omap3_beagle_init(void)
if (gpio_is_valid(beagle_config.dvi_pd_gpio))
omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
omap_display_init(&beagle_dss_data);
+
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index 8c026269bac..18143873346 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -33,7 +33,7 @@
#include <linux/i2c/twl.h>
#include <linux/usb/otg.h>
#include <linux/usb/musb.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/smsc911x.h>
#include <linux/wl12xx.h>
@@ -166,14 +166,6 @@ static inline void __init omap3evm_init_smsc911x(void) { return; }
*/
#define OMAP3EVM_DVI_PANEL_EN_GPIO 199
-static struct panel_sharp_ls037v7dw01_data omap3_evm_lcd_data = {
- .resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
- .ini_gpio = OMAP3EVM_LCD_PANEL_INI,
- .mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
- .lr_gpio = OMAP3EVM_LCD_PANEL_LR,
- .ud_gpio = OMAP3EVM_LCD_PANEL_UD,
-};
-
#ifdef CONFIG_BROKEN
static void __init omap3_evm_display_init(void)
{
@@ -196,44 +188,65 @@ static void __init omap3_evm_display_init(void)
}
#endif
-static struct omap_dss_device omap3_evm_lcd_device = {
- .name = "lcd",
- .driver_name = "sharp_ls_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 18,
- .data = &omap3_evm_lcd_data,
+static struct panel_sharp_ls037v7dw01_platform_data omap3_evm_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 18,
+
+ .resb_gpio = OMAP3EVM_LCD_PANEL_RESB,
+ .ini_gpio = OMAP3EVM_LCD_PANEL_INI,
+ .mo_gpio = OMAP3EVM_LCD_PANEL_QVGA,
+ .lr_gpio = OMAP3EVM_LCD_PANEL_LR,
+ .ud_gpio = OMAP3EVM_LCD_PANEL_UD,
+};
+
+static struct platform_device omap3_evm_lcd_device = {
+ .name = "panel-sharp-ls037v7dw01",
+ .id = 0,
+ .dev.platform_data = &omap3_evm_lcd_pdata,
+};
+
+static struct connector_dvi_platform_data omap3_evm_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = -1,
+};
+
+static struct platform_device omap3_evm_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &omap3_evm_dvi_connector_pdata,
};
-static struct omap_dss_device omap3_evm_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct encoder_tfp410_platform_data omap3_evm_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = OMAP3EVM_DVI_PANEL_EN_GPIO,
- .i2c_bus_num = -1,
+static struct platform_device omap3_evm_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &omap3_evm_tfp410_pdata,
};
-static struct omap_dss_device omap3_evm_dvi_device = {
- .name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct connector_atv_platform_data omap3_evm_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
};
-static struct omap_dss_device *omap3_evm_dss_devices[] = {
- &omap3_evm_lcd_device,
- &omap3_evm_tv_device,
- &omap3_evm_dvi_device,
+static struct platform_device omap3_evm_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &omap3_evm_tv_pdata,
};
static struct omap_dss_board_info omap3_evm_dss_data = {
- .num_devices = ARRAY_SIZE(omap3_evm_dss_devices),
- .devices = omap3_evm_dss_devices,
- .default_device = &omap3_evm_lcd_device,
+ .default_display_name = "lcd",
};
static struct regulator_consumer_supply omap3evm_vmmc1_supply[] = {
@@ -468,7 +481,7 @@ struct wl12xx_platform_data omap3evm_wlan_data __initdata = {
static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = {
REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */
REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */
- REGULATOR_SUPPLY("vcc", "nop_usb_xceiv.2"), /* hsusb port 2 */
+ REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */
REGULATOR_SUPPLY("vaux2", NULL),
};
@@ -678,6 +691,10 @@ static void __init omap3_evm_init(void)
omap3_evm_i2c_init();
omap_display_init(&omap3_evm_dss_data);
+ platform_device_register(&omap3_evm_lcd_device);
+ platform_device_register(&omap3_evm_tfp410_device);
+ platform_device_register(&omap3_evm_dvi_connector_device);
+ platform_device_register(&omap3_evm_tv_connector_device);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index b1547a0edfc..de1bc6bbe58 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -231,34 +231,21 @@ static struct twl4030_keypad_data pandora_kp_data = {
.rep = 1,
};
-static struct panel_tpo_td043_data lcd_data = {
- .nreset_gpio = 157,
-};
-
-static struct omap_dss_device pandora_lcd_device = {
- .name = "lcd",
- .driver_name = "tpo_td043mtea1_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 24,
- .data = &lcd_data,
-};
-
-static struct omap_dss_device pandora_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static struct connector_atv_platform_data pandora_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
};
-static struct omap_dss_device *pandora_dss_devices[] = {
- &pandora_lcd_device,
- &pandora_tv_device,
+static struct platform_device pandora_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &pandora_tv_pdata,
};
static struct omap_dss_board_info pandora_dss_data = {
- .num_devices = ARRAY_SIZE(pandora_dss_devices),
- .devices = pandora_dss_devices,
- .default_device = &pandora_lcd_device,
+ .default_display_name = "lcd",
};
static void pandora_wl1251_init_card(struct mmc_card *card)
@@ -348,11 +335,11 @@ static struct regulator_consumer_supply pandora_vdds_supplies[] = {
};
static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = {
- REGULATOR_SUPPLY("vcc", "display0"),
+ REGULATOR_SUPPLY("vcc", "spi1.1"),
};
static struct regulator_consumer_supply pandora_usb_phy_supply[] = {
- REGULATOR_SUPPLY("vcc", "nop_usb_xceiv.2"), /* hsusb port 2 */
+ REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */
};
/* ads7846 on SPI and 2 nub controllers on I2C */
@@ -529,13 +516,21 @@ static int __init omap3pandora_i2c_init(void)
return 0;
}
+static struct panel_tpo_td043mtea1_platform_data pandora_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
+
+ .data_lines = 24,
+ .nreset_gpio = 157,
+};
+
static struct spi_board_info omap3pandora_spi_board_info[] __initdata = {
{
- .modalias = "tpo_td043mtea1_panel_spi",
+ .modalias = "panel-tpo-td043mtea1",
.bus_num = 1,
.chip_select = 1,
.max_speed_hz = 375000,
- .platform_data = &pandora_lcd_device,
+ .platform_data = &pandora_lcd_pdata,
}
};
@@ -580,6 +575,7 @@ static struct platform_device *omap3pandora_devices[] __initdata = {
&pandora_keys_gpio,
&pandora_vwlan_device,
&pandora_backlight,
+ &pandora_tv_connector_device,
};
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index d37e6b187ae..ba8342fef79 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -93,40 +93,50 @@ static void __init omap3_stalker_display_init(void)
{
return;
}
+static struct connector_dvi_platform_data omap3stalker_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = -1,
+};
-static struct omap_dss_device omap3_stalker_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
-#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
-#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
- .u.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE,
-#endif
+static struct platform_device omap3stalker_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &omap3stalker_dvi_connector_pdata,
};
-static struct tfp410_platform_data dvi_panel = {
- .power_down_gpio = DSS_ENABLE_GPIO,
- .i2c_bus_num = -1,
+static struct encoder_tfp410_platform_data omap3stalker_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = DSS_ENABLE_GPIO,
};
-static struct omap_dss_device omap3_stalker_dvi_device = {
- .name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device omap3stalker_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &omap3stalker_tfp410_pdata,
+};
+
+static struct connector_atv_platform_data omap3stalker_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+#if defined(CONFIG_OMAP2_VENC_OUT_TYPE_SVIDEO)
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+#elif defined(CONFIG_OMAP2_VENC_OUT_TYPE_COMPOSITE)
+ .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
+#endif
+ .invert_polarity = false,
};
-static struct omap_dss_device *omap3_stalker_dss_devices[] = {
- &omap3_stalker_tv_device,
- &omap3_stalker_dvi_device,
+static struct platform_device omap3stalker_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &omap3stalker_tv_pdata,
};
static struct omap_dss_board_info omap3_stalker_dss_data = {
- .num_devices = ARRAY_SIZE(omap3_stalker_dss_devices),
- .devices = omap3_stalker_dss_devices,
- .default_device = &omap3_stalker_dvi_device,
+ .default_display_name = "dvi",
};
static struct regulator_consumer_supply omap3stalker_vmmc1_supply[] = {
@@ -356,6 +366,9 @@ static struct usbhs_phy_data phy_data[] __initdata = {
static struct platform_device *omap3_stalker_devices[] __initdata = {
&keys_gpio,
+ &omap3stalker_tfp410_device,
+ &omap3stalker_dvi_connector_device,
+ &omap3stalker_tv_connector_device,
};
static struct usbhs_omap_platform_data usbhs_bdata __initdata = {
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5748b5d06c2..f6d38411191 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -72,6 +72,9 @@
#define OVERO_SMSC911X2_CS 4
#define OVERO_SMSC911X2_GPIO 65
+/* whether to register LCD35 instead of LCD43 */
+static bool overo_use_lcd35;
+
#if defined(CONFIG_TOUCHSCREEN_ADS7846) || \
defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
@@ -149,78 +152,94 @@ static inline void __init overo_init_smsc911x(void) { return; }
#define OVERO_GPIO_LCD_EN 144
#define OVERO_GPIO_LCD_BL 145
-static struct tfp410_platform_data dvi_panel = {
- .i2c_bus_num = 3,
- .power_down_gpio = -1,
+static struct connector_atv_platform_data overo_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_SVIDEO,
+ .invert_polarity = false,
};
-static struct omap_dss_device overo_dvi_device = {
- .name = "dvi",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "tfp410",
- .data = &dvi_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device overo_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &overo_tv_pdata,
};
-static struct omap_dss_device overo_tv_device = {
- .name = "tv",
- .driver_name = "venc",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .phy.venc.type = OMAP_DSS_VENC_TYPE_SVIDEO,
+static const struct display_timing overo_lcd43_videomode = {
+ .pixelclock = { 0, 9200000, 0 },
+
+ .hactive = { 0, 480, 0 },
+ .hfront_porch = { 0, 8, 0 },
+ .hback_porch = { 0, 4, 0 },
+ .hsync_len = { 0, 41, 0 },
+
+ .vactive = { 0, 272, 0 },
+ .vfront_porch = { 0, 4, 0 },
+ .vback_porch = { 0, 2, 0 },
+ .vsync_len = { 0, 10, 0 },
+
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE,
};
-static struct panel_generic_dpi_data lcd43_panel = {
- .name = "samsung_lte430wq_f0c",
- .num_gpios = 2,
- .gpios = {
- OVERO_GPIO_LCD_EN,
- OVERO_GPIO_LCD_BL
- },
+static struct panel_dpi_platform_data overo_lcd43_pdata = {
+ .name = "lcd43",
+ .source = "dpi.0",
+
+ .data_lines = 24,
+
+ .display_timing = &overo_lcd43_videomode,
+
+ .enable_gpio = OVERO_GPIO_LCD_EN,
+ .backlight_gpio = OVERO_GPIO_LCD_BL,
};
-static struct omap_dss_device overo_lcd43_device = {
- .name = "lcd43",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .driver_name = "generic_dpi_panel",
- .data = &lcd43_panel,
- .phy.dpi.data_lines = 24,
+static struct platform_device overo_lcd43_device = {
+ .name = "panel-dpi",
+ .id = 0,
+ .dev.platform_data = &overo_lcd43_pdata,
};
-#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
- defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
-static struct panel_generic_dpi_data lcd35_panel = {
- .num_gpios = 2,
- .gpios = {
- OVERO_GPIO_LCD_EN,
- OVERO_GPIO_LCD_BL
- },
+static struct connector_dvi_platform_data overo_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = 3,
};
-static struct omap_dss_device overo_lcd35_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "lcd35",
- .driver_name = "lgphilips_lb035q02_panel",
- .phy.dpi.data_lines = 24,
- .data = &lcd35_panel,
+static struct platform_device overo_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &overo_dvi_connector_pdata,
};
-#endif
-static struct omap_dss_device *overo_dss_devices[] = {
- &overo_dvi_device,
- &overo_tv_device,
-#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
- defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
- &overo_lcd35_device,
-#endif
- &overo_lcd43_device,
+static struct encoder_tfp410_platform_data overo_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = -1,
+};
+
+static struct platform_device overo_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &overo_tfp410_pdata,
};
static struct omap_dss_board_info overo_dss_data = {
- .num_devices = ARRAY_SIZE(overo_dss_devices),
- .devices = overo_dss_devices,
- .default_device = &overo_dvi_device,
+ .default_display_name = "lcd43",
};
+static void __init overo_display_init(void)
+{
+ omap_display_init(&overo_dss_data);
+
+ if (!overo_use_lcd35)
+ platform_device_register(&overo_lcd43_device);
+ platform_device_register(&overo_tfp410_device);
+ platform_device_register(&overo_dvi_connector_device);
+ platform_device_register(&overo_tv_connector_device);
+}
+
static struct mtd_partition overo_nand_partitions[] = {
{
.name = "xloader",
@@ -408,24 +427,41 @@ static int __init overo_i2c_init(void)
return 0;
}
+static struct panel_lb035q02_platform_data overo_lcd35_pdata = {
+ .name = "lcd35",
+ .source = "dpi.0",
+
+ .data_lines = 24,
+
+ .enable_gpio = OVERO_GPIO_LCD_EN,
+ .backlight_gpio = OVERO_GPIO_LCD_BL,
+};
+
+/*
+ * NOTE: We need to add either the lgphilips panel, or the lcd43 panel. The
+ * selection is done based on the overo_use_lcd35 field. If new SPI
+ * devices are added here, extra work is needed to make only the lgphilips panel
+ * affected by the overo_use_lcd35 field.
+ */
static struct spi_board_info overo_spi_board_info[] __initdata = {
-#if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \
- defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE)
{
- .modalias = "lgphilips_lb035q02_panel-spi",
+ .modalias = "panel_lgphilips_lb035q02",
.bus_num = 1,
.chip_select = 1,
.max_speed_hz = 500000,
.mode = SPI_MODE_3,
+ .platform_data = &overo_lcd35_pdata,
},
-#endif
};
static int __init overo_spi_init(void)
{
overo_ads7846_init();
- spi_register_board_info(overo_spi_board_info,
- ARRAY_SIZE(overo_spi_board_info));
+
+ if (overo_use_lcd35) {
+ spi_register_board_info(overo_spi_board_info,
+ ARRAY_SIZE(overo_spi_board_info));
+ }
return 0;
}
@@ -463,11 +499,13 @@ static void __init overo_init(void)
{
int ret;
+ if (strstr(boot_command_line, "omapdss.def_disp=lcd35"))
+ overo_use_lcd35 = true;
+
regulator_register_fixed(0, dummy_supplies, ARRAY_SIZE(dummy_supplies));
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
overo_i2c_init();
omap_hsmmc_init(mmc);
- omap_display_init(&overo_dss_data);
omap_serial_init();
omap_sdrc_init(mt46h32m32lf6_sdrc_params,
mt46h32m32lf6_sdrc_params);
@@ -484,6 +522,8 @@ static void __init overo_init(void)
overo_init_keys();
omap_twl4030_audio_init("overo", NULL);
+ overo_display_init();
+
/* Ensure SDRC pins are mux'd for self-refresh */
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 9c2dd102fbb..c3270c0f1fc 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -45,6 +45,8 @@
#include <linux/platform_data/tsl2563.h>
#include <linux/lis3lv02d.h>
+#include <video/omap-panel-data.h>
+
#if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE)
#include <media/ir-rx51.h>
#endif
@@ -226,6 +228,15 @@ static struct lp55xx_platform_data rx51_lp5523_platform_data = {
};
#endif
+#define RX51_LCD_RESET_GPIO 90
+
+static struct panel_acx565akm_platform_data acx_pdata = {
+ .name = "lcd",
+ .source = "sdi.0",
+ .reset_gpio = RX51_LCD_RESET_GPIO,
+ .datapairs = 2,
+};
+
static struct omap2_mcspi_device_config wl1251_mcspi_config = {
.turbo_mode = 0,
};
@@ -254,6 +265,7 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = {
.chip_select = 2,
.max_speed_hz = 6000000,
.controller_data = &mipid_mcspi_config,
+ .platform_data = &acx_pdata,
},
[RX51_SPI_TSC2005] = {
.modalias = "tsc2005",
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c
index bdd1e3a179e..43a90c8d683 100644
--- a/arch/arm/mach-omap2/board-rx51-video.c
+++ b/arch/arm/mach-omap2/board-rx51-video.c
@@ -29,34 +29,21 @@
#if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE)
-static struct panel_acx565akm_data lcd_data = {
- .reset_gpio = RX51_LCD_RESET_GPIO,
+static struct connector_atv_platform_data rx51_tv_pdata = {
+ .name = "tv",
+ .source = "venc.0",
+ .connector_type = OMAP_DSS_VENC_TYPE_COMPOSITE,
+ .invert_polarity = false,
};
-static struct omap_dss_device rx51_lcd_device = {
- .name = "lcd",
- .driver_name = "panel-acx565akm",
- .type = OMAP_DISPLAY_TYPE_SDI,
- .phy.sdi.datapairs = 2,
- .data = &lcd_data,
-};
-
-static struct omap_dss_device rx51_tv_device = {
- .name = "tv",
- .type = OMAP_DISPLAY_TYPE_VENC,
- .driver_name = "venc",
- .phy.venc.type = OMAP_DSS_VENC_TYPE_COMPOSITE,
-};
-
-static struct omap_dss_device *rx51_dss_devices[] = {
- &rx51_lcd_device,
- &rx51_tv_device,
+static struct platform_device rx51_tv_connector_device = {
+ .name = "connector-analog-tv",
+ .id = 0,
+ .dev.platform_data = &rx51_tv_pdata,
};
static struct omap_dss_board_info rx51_dss_board_info = {
- .num_devices = ARRAY_SIZE(rx51_dss_devices),
- .devices = rx51_dss_devices,
- .default_device = &rx51_lcd_device,
+ .default_display_name = "lcd",
};
static int __init rx51_video_init(void)
@@ -71,6 +58,8 @@ static int __init rx51_video_init(void)
omap_display_init(&rx51_dss_board_info);
+ platform_device_register(&rx51_tv_connector_device);
+
return 0;
}
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index d2ea68ea678..7735105561d 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = {
static struct omap_musb_board_data musb_board_data = {
.interface_type = MUSB_INTERFACE_ULPI,
- .mode = MUSB_PERIPHERAL,
+ .mode = MUSB_OTG,
.power = 0,
};
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index c2a079cb76f..3d8ecc1e05b 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -25,32 +25,23 @@
#define LCD_PANEL_RESET_GPIO_PILOT 55
#define LCD_PANEL_QVGA_GPIO 56
-static struct panel_nec_nl8048_data zoom_lcd_data = {
- /* res_gpio filled in code */
- .qvga_gpio = LCD_PANEL_QVGA_GPIO,
-};
+static struct panel_nec_nl8048hl11_platform_data zoom_lcd_pdata = {
+ .name = "lcd",
+ .source = "dpi.0",
-static struct omap_dss_device zoom_lcd_device = {
- .name = "lcd",
- .driver_name = "NEC_8048_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 24,
- .data = &zoom_lcd_data,
-};
+ .data_lines = 24,
-static struct omap_dss_device *zoom_dss_devices[] = {
- &zoom_lcd_device,
+ .res_gpio = -1, /* filled in code */
+ .qvga_gpio = LCD_PANEL_QVGA_GPIO,
};
static struct omap_dss_board_info zoom_dss_data = {
- .num_devices = ARRAY_SIZE(zoom_dss_devices),
- .devices = zoom_dss_devices,
- .default_device = &zoom_lcd_device,
+ .default_display_name = "lcd",
};
static void __init zoom_lcd_panel_init(void)
{
- zoom_lcd_data.res_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
+ zoom_lcd_pdata.res_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
LCD_PANEL_RESET_GPIO_PROD :
LCD_PANEL_RESET_GPIO_PILOT;
}
@@ -61,19 +52,20 @@ static struct omap2_mcspi_device_config dss_lcd_mcspi_config = {
static struct spi_board_info nec_8048_spi_board_info[] __initdata = {
[0] = {
- .modalias = "nec_8048_spi",
+ .modalias = "panel-nec-nl8048hl11",
.bus_num = 1,
.chip_select = 2,
.max_speed_hz = 375000,
.controller_data = &dss_lcd_mcspi_config,
+ .platform_data = &zoom_lcd_pdata,
},
};
void __init zoom_display_init(void)
{
omap_display_init(&zoom_dss_data);
+ zoom_lcd_panel_init();
spi_register_board_info(nec_8048_spi_board_info,
ARRAY_SIZE(nec_8048_spi_board_info));
- zoom_lcd_panel_init();
}
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index ff37be1f6f9..03a0516c7f6 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -400,7 +400,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
/* Create devices for DPI and SDI */
- pdev = create_simple_dss_pdev("omapdss_dpi", -1,
+ pdev = create_simple_dss_pdev("omapdss_dpi", 0,
board_data, sizeof(*board_data), dss_pdev);
if (IS_ERR(pdev)) {
pr_err("Could not build platform_device for omapdss_dpi\n");
@@ -408,7 +408,7 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
}
if (cpu_is_omap34xx()) {
- pdev = create_simple_dss_pdev("omapdss_sdi", -1,
+ pdev = create_simple_dss_pdev("omapdss_sdi", 0,
board_data, sizeof(*board_data), dss_pdev);
if (IS_ERR(pdev)) {
pr_err("Could not build platform_device for omapdss_sdi\n");
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c
index 393aeefaebb..bf89effa4c9 100644
--- a/arch/arm/mach-omap2/dss-common.c
+++ b/arch/arm/mach-omap2/dss-common.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <linux/gpio.h>
+#include <linux/platform_device.h>
#include <video/omapdss.h>
#include <video/omap-panel-data.h>
@@ -37,70 +38,76 @@
#define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
#define HDMI_GPIO_HPD 63 /* Hotplug detect */
-/* Display DVI */
#define PANDA_DVI_TFP410_POWER_DOWN_GPIO 0
-/* Using generic display panel */
-static struct tfp410_platform_data omap4_dvi_panel = {
- .i2c_bus_num = 3,
- .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
+/* DVI Connector */
+static struct connector_dvi_platform_data omap4_panda_dvi_connector_pdata = {
+ .name = "dvi",
+ .source = "tfp410.0",
+ .i2c_bus_num = 2,
};
-static struct omap_dss_device omap4_panda_dvi_device = {
- .type = OMAP_DISPLAY_TYPE_DPI,
- .name = "dvi",
- .driver_name = "tfp410",
- .data = &omap4_dvi_panel,
- .phy.dpi.data_lines = 24,
- .channel = OMAP_DSS_CHANNEL_LCD2,
+static struct platform_device omap4_panda_dvi_connector_device = {
+ .name = "connector-dvi",
+ .id = 0,
+ .dev.platform_data = &omap4_panda_dvi_connector_pdata,
};
-static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
+/* TFP410 DPI-to-DVI chip */
+static struct encoder_tfp410_platform_data omap4_panda_tfp410_pdata = {
+ .name = "tfp410.0",
+ .source = "dpi.0",
+ .data_lines = 24,
+ .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO,
+};
+
+static struct platform_device omap4_panda_tfp410_device = {
+ .name = "tfp410",
+ .id = 0,
+ .dev.platform_data = &omap4_panda_tfp410_pdata,
+};
+
+/* HDMI Connector */
+static struct connector_hdmi_platform_data omap4_panda_hdmi_connector_pdata = {
+ .name = "hdmi",
+ .source = "tpd12s015.0",
+};
+
+static struct platform_device omap4_panda_hdmi_connector_device = {
+ .name = "connector-hdmi",
+ .id = 0,
+ .dev.platform_data = &omap4_panda_hdmi_connector_pdata,
+};
+
+/* TPD12S015 HDMI ESD protection & level shifter chip */
+static struct encoder_tpd12s015_platform_data omap4_panda_tpd_pdata = {
+ .name = "tpd12s015.0",
+ .source = "hdmi.0",
+
.ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD,
.ls_oe_gpio = HDMI_GPIO_LS_OE,
.hpd_gpio = HDMI_GPIO_HPD,
};
-static struct omap_dss_device omap4_panda_hdmi_device = {
- .name = "hdmi",
- .driver_name = "hdmi_panel",
- .type = OMAP_DISPLAY_TYPE_HDMI,
- .channel = OMAP_DSS_CHANNEL_DIGIT,
- .data = &omap4_panda_hdmi_data,
-};
-
-static struct omap_dss_device *omap4_panda_dss_devices[] = {
- &omap4_panda_dvi_device,
- &omap4_panda_hdmi_device,
+static struct platform_device omap4_panda_tpd_device = {
+ .name = "tpd12s015",
+ .id = 0,
+ .dev.platform_data = &omap4_panda_tpd_pdata,
};
static struct omap_dss_board_info omap4_panda_dss_data = {
- .num_devices = ARRAY_SIZE(omap4_panda_dss_devices),
- .devices = omap4_panda_dss_devices,
- .default_device = &omap4_panda_dvi_device,
+ .default_display_name = "dvi",
};
-void __init omap4_panda_display_init(void)
+void __init omap4_panda_display_init_of(void)
{
omap_display_init(&omap4_panda_dss_data);
- /*
- * OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and
- * later have external pull up on the HDMI I2C lines
- */
- if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2)
- omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
- else
- omap_hdmi_init(0);
-
- omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
-}
+ platform_device_register(&omap4_panda_tfp410_device);
+ platform_device_register(&omap4_panda_dvi_connector_device);
-void __init omap4_panda_display_init_of(void)
-{
- omap_display_init(&omap4_panda_dss_data);
+ platform_device_register(&omap4_panda_tpd_device);
+ platform_device_register(&omap4_panda_hdmi_connector_device);
}
@@ -109,93 +116,73 @@ void __init omap4_panda_display_init_of(void)
#define DISPLAY_SEL_GPIO 59 /* LCD2/PicoDLP switch */
#define DLP_POWER_ON_GPIO 40
-static struct nokia_dsi_panel_data dsi1_panel = {
- .name = "taal",
- .reset_gpio = 102,
- .use_ext_te = false,
- .ext_te_gpio = 101,
- .esd_interval = 0,
- .pin_config = {
- .num_pins = 6,
- .pins = { 0, 1, 2, 3, 4, 5 },
- },
-};
-
-static struct omap_dss_device sdp4430_lcd_device = {
- .name = "lcd",
- .driver_name = "taal",
- .type = OMAP_DISPLAY_TYPE_DSI,
- .data = &dsi1_panel,
- .phy.dsi = {
- .module = 0,
+static struct panel_dsicm_platform_data dsi1_panel = {
+ .name = "lcd",
+ .source = "dsi.0",
+ .reset_gpio = 102,
+ .use_ext_te = false,
+ .ext_te_gpio = 101,
+ .pin_config = {
+ .num_pins = 6,
+ .pins = { 0, 1, 2, 3, 4, 5 },
},
- .channel = OMAP_DSS_CHANNEL_LCD,
};
-static struct nokia_dsi_panel_data dsi2_panel = {
- .name = "taal",
- .reset_gpio = 104,
- .use_ext_te = false,
- .ext_te_gpio = 103,
- .esd_interval = 0,
- .pin_config = {
- .num_pins = 6,
- .pins = { 0, 1, 2, 3, 4, 5 },
- },
+static struct platform_device sdp4430_lcd_device = {
+ .name = "panel-dsi-cm",
+ .id = 0,
+ .dev.platform_data = &dsi1_panel,
};
-static struct omap_dss_device sdp4430_lcd2_device = {
- .name = "lcd2",
- .driver_name = "taal",
- .type = OMAP_DISPLAY_TYPE_DSI,
- .data = &dsi2_panel,
- .phy.dsi = {
-
- .module = 1,
+static struct panel_dsicm_platform_data dsi2_panel = {
+ .name = "lcd2",
+ .source = "dsi.1",
+ .reset_gpio = 104,
+ .use_ext_te = false,
+ .ext_te_gpio = 103,
+ .pin_config = {
+ .num_pins = 6,
+ .pins = { 0, 1, 2, 3, 4, 5 },
},
- .channel = OMAP_DSS_CHANNEL_LCD2,
};
-static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
- .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD,
- .ls_oe_gpio = HDMI_GPIO_LS_OE,
- .hpd_gpio = HDMI_GPIO_HPD,
+static struct platform_device sdp4430_lcd2_device = {
+ .name = "panel-dsi-cm",
+ .id = 1,
+ .dev.platform_data = &dsi2_panel,
};
-static struct omap_dss_device sdp4430_hdmi_device = {
- .name = "hdmi",
- .driver_name = "hdmi_panel",
- .type = OMAP_DISPLAY_TYPE_HDMI,
- .channel = OMAP_DSS_CHANNEL_DIGIT,
- .data = &sdp4430_hdmi_data,
+/* HDMI Connector */
+static struct connector_hdmi_platform_data sdp4430_hdmi_connector_pdata = {
+ .name = "hdmi",
+ .source = "tpd12s015.0",
};
-static struct picodlp_panel_data sdp4430_picodlp_pdata = {
- .picodlp_adapter_id = 2,
- .emu_done_gpio = 44,
- .pwrgood_gpio = 45,
+static struct platform_device sdp4430_hdmi_connector_device = {
+ .name = "connector-hdmi",
+ .id = 0,
+ .dev.platform_data = &sdp4430_hdmi_connector_pdata,
};
-static struct omap_dss_device sdp4430_picodlp_device = {
- .name = "picodlp",
- .driver_name = "picodlp_panel",
- .type = OMAP_DISPLAY_TYPE_DPI,
- .phy.dpi.data_lines = 24,
- .channel = OMAP_DSS_CHANNEL_LCD2,
- .data = &sdp4430_picodlp_pdata,
+/* TPD12S015 HDMI ESD protection & level shifter chip */
+static struct encoder_tpd12s015_platform_data sdp4430_tpd_pdata = {
+ .name = "tpd12s015.0",
+ .source = "hdmi.0",
+
+ .ct_cp_hpd_gpio = HDMI_GPIO_CT_CP_HPD,
+ .ls_oe_gpio = HDMI_GPIO_LS_OE,
+ .hpd_gpio = HDMI_GPIO_HPD,
};
-static struct omap_dss_device *sdp4430_dss_devices[] = {
- &sdp4430_lcd_device,
- &sdp4430_lcd2_device,
- &sdp4430_hdmi_device,
- &sdp4430_picodlp_device,
+static struct platform_device sdp4430_tpd_device = {
+ .name = "tpd12s015",
+ .id = 0,
+ .dev.platform_data = &sdp4430_tpd_pdata,
};
+
static struct omap_dss_board_info sdp4430_dss_data = {
- .num_devices = ARRAY_SIZE(sdp4430_dss_devices),
- .devices = sdp4430_dss_devices,
- .default_device = &sdp4430_lcd_device,
+ .default_display_name = "lcd",
};
/*
@@ -204,7 +191,7 @@ static struct omap_dss_board_info sdp4430_dss_data = {
* used by picodlp on the 4430sdp platform. Keep this gpio disabled as LCD2 is
* selected by default
*/
-void __init omap_4430sdp_display_init(void)
+void __init omap_4430sdp_display_init_of(void)
{
int r;
@@ -219,33 +206,10 @@ void __init omap_4430sdp_display_init(void)
pr_err("%s: Could not get DLP POWER ON GPIO\n", __func__);
omap_display_init(&sdp4430_dss_data);
- /*
- * OMAP4460SDP/Blaze and OMAP4430 ES2.3 SDP/Blaze boards and
- * later have external pull up on the HDMI I2C lines
- */
- if (cpu_is_omap446x() || omap_rev() > OMAP4430_REV_ES2_2)
- omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
- else
- omap_hdmi_init(0);
-
- omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
- omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
-}
-
-void __init omap_4430sdp_display_init_of(void)
-{
- int r;
- r = gpio_request_one(DISPLAY_SEL_GPIO, GPIOF_OUT_INIT_HIGH,
- "display_sel");
- if (r)
- pr_err("%s: Could not get display_sel GPIO\n", __func__);
-
- r = gpio_request_one(DLP_POWER_ON_GPIO, GPIOF_OUT_INIT_LOW,
- "DLP POWER ON");
- if (r)
- pr_err("%s: Could not get DLP POWER ON GPIO\n", __func__);
+ platform_device_register(&sdp4430_lcd_device);
+ platform_device_register(&sdp4430_lcd2_device);
- omap_display_init(&sdp4430_dss_data);
+ platform_device_register(&sdp4430_tpd_device);
+ platform_device_register(&sdp4430_hdmi_connector_device);
}
diff --git a/arch/arm/mach-omap2/dss-common.h b/arch/arm/mach-omap2/dss-common.h
index 915f6fff510..c28fe3c0358 100644
--- a/arch/arm/mach-omap2/dss-common.h
+++ b/arch/arm/mach-omap2/dss-common.h
@@ -6,9 +6,7 @@
* This file will be removed when DSS supports DT.
*/
-void __init omap4_panda_display_init(void);
void __init omap4_panda_display_init_of(void);
-void __init omap_4430sdp_display_init(void);
void __init omap_4430sdp_display_init_of(void);
#endif
diff --git a/arch/arm/mach-omap2/i2c.c b/arch/arm/mach-omap2/i2c.c
index d940e53dd9f..b456b4471f3 100644
--- a/arch/arm/mach-omap2/i2c.c
+++ b/arch/arm/mach-omap2/i2c.c
@@ -181,7 +181,7 @@ int __init omap_i2c_add_bus(struct omap_i2c_bus_platform_data *i2c_pdata,
sizeof(struct omap_i2c_bus_platform_data));
WARN(IS_ERR(pdev), "Could not build omap_device for %s\n", name);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
static int __init omap_i2c_cmdline(void)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 5cc92874be7..f99f68e1e85 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const char *oh_name;
int oh_cnt, i, ret = 0;
+ bool device_active = false;
oh_cnt = of_property_count_strings(node, "ti,hwmods");
if (oh_cnt <= 0) {
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
goto odbfd_exit1;
}
hwmods[i] = oh;
+ if (oh->flags & HWMOD_INIT_NO_IDLE)
+ device_active = true;
}
od = omap_device_alloc(pdev, hwmods, oh_cnt);
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
pdev->dev.pm_domain = &omap_device_pm_domain;
+ if (device_active) {
+ omap_device_enable(pdev);
+ pm_runtime_set_active(&pdev->dev);
+ }
+
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od = to_omap_device(pdev);
+ int i;
if (!od)
return 0;
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
* If omap_device state is enabled, but has no driver bound,
* idle it.
*/
+
+ /*
+ * Some devices (like memory controllers) are always kept
+ * enabled, and should not be idled even with no drivers.
+ */
+ for (i = 0; i < od->hwmods_cnt; i++)
+ if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE)
+ return 0;
+
if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) {
if (od->_state == OMAP_DEVICE_STATE_ENABLED) {
dev_warn(dev, "%s: enabled but no driver. Idling\n",
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 7341eff63f5..7f4db12b145 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data)
np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
if (np)
- va_start = of_iomap(np, 0);
+ va_start = of_iomap(np, oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index aab33fd814c..e1482a9b3bc 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3;
#define MODULEMODE_HWCTRL 1
#define MODULEMODE_SWCTRL 2
+#define DEBUG_OMAP2UART1_FLAGS 0
+#define DEBUG_OMAP2UART2_FLAGS 0
+#define DEBUG_OMAP2UART3_FLAGS 0
+#define DEBUG_OMAP3UART3_FLAGS 0
+#define DEBUG_OMAP3UART4_FLAGS 0
+#define DEBUG_OMAP4UART3_FLAGS 0
+#define DEBUG_OMAP4UART4_FLAGS 0
+#define DEBUG_TI81XXUART1_FLAGS 0
+#define DEBUG_TI81XXUART2_FLAGS 0
+#define DEBUG_TI81XXUART3_FLAGS 0
+#define DEBUG_AM33XXUART1_FLAGS 0
+
+#define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET)
+
+#if defined(CONFIG_DEBUG_OMAP2UART1)
+#undef DEBUG_OMAP2UART1_FLAGS
+#define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART2)
+#undef DEBUG_OMAP2UART2_FLAGS
+#define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP2UART3)
+#undef DEBUG_OMAP2UART3_FLAGS
+#define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART3)
+#undef DEBUG_OMAP3UART3_FLAGS
+#define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP3UART4)
+#undef DEBUG_OMAP3UART4_FLAGS
+#define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART3)
+#undef DEBUG_OMAP4UART3_FLAGS
+#define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_OMAP4UART4)
+#undef DEBUG_OMAP4UART4_FLAGS
+#define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART1)
+#undef DEBUG_TI81XXUART1_FLAGS
+#define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART2)
+#undef DEBUG_TI81XXUART2_FLAGS
+#define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_TI81XXUART3)
+#undef DEBUG_TI81XXUART3_FLAGS
+#define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS
+#elif defined(CONFIG_DEBUG_AM33XXUART1)
+#undef DEBUG_AM33XXUART1_FLAGS
+#define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS
+#endif
/**
* struct omap_hwmod_mux_info - hwmod specific mux configuration
@@ -568,6 +616,7 @@ struct omap_hwmod_link {
* @voltdm: pointer to voltage domain (filled in at runtime)
* @dev_attr: arbitrary device attributes that can be passed to the driver
* @_sysc_cache: internal-use hwmod flags
+ * @mpu_rt_idx: index of device address space for register target (for DT boot)
* @_mpu_rt_va: cached register target start address (internal use)
* @_mpu_port: cached MPU register target slave (internal use)
* @opt_clks_cnt: number of @opt_clks
@@ -617,6 +666,7 @@ struct omap_hwmod {
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
u16 flags;
+ u8 mpu_rt_idx;
u8 response_lat;
u8 rst_lines_cnt;
u8 opt_clks_cnt;
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index d05fc7b5456..56cebb05509 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 28bbd56346a..eb2f3b93b51 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = {
.clkdm_name = "cpsw_125mhz_clkdm",
.flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY),
.main_clk = "cpsw_125mhz_gclk",
+ .mpu_rt_idx = 1,
.prcm = {
.omap4 = {
.clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET,
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
.name = "uart1",
.class = &uart_class,
.clkdm_name = "l4_wkup_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "dpll_per_m2_div4_wkupdm_ck",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index f7a3df2fb57..0c3a427da54 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
.mpu_irqs = omap2_uart1_mpu_irqs,
.sdma_reqs = omap2_uart1_sdma_reqs,
.main_clk = "uart1_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
.mpu_irqs = omap2_uart2_mpu_irqs,
.sdma_reqs = omap2_uart2_sdma_reqs,
.main_clk = "uart2_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = CORE_MOD,
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
.mpu_irqs = omap2_uart3_mpu_irqs,
.sdma_reqs = omap2_uart3_sdma_reqs,
.main_clk = "uart3_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS |
+ HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
.mpu_irqs = uart4_mpu_irqs,
.sdma_reqs = uart4_sdma_reqs,
.main_clk = "uart4_fck",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.prcm = {
.omap2 = {
.module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index d04b5e60fdb..9c3b504477d 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
.name = "uart3",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
- HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
.name = "uart4",
.class = &omap44xx_uart_hwmod_class,
.clkdm_name = "l4_per_clkdm",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index f37ae96b70a..3c70f5c1860 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = {
.name = "uart3",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
- .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET,
+ .flags = DEBUG_OMAP4UART3_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = {
.name = "uart4",
.class = &omap54xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
+ .flags = DEBUG_OMAP4UART4_FLAGS,
.main_clk = "func_48m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 3a674de6cb6..a388f8c1bcb 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void)
pr_info("%s used as console in debug mode: uart%d clocks will not be gated",
uart_name, uart->num);
}
-
- /*
- * omap-uart can be used for earlyprintk logs
- * So if omap-uart is used as console then prevent
- * uart reset and idle to get logs from omap-uart
- * until uart console driver is available to take
- * care for console messages.
- * Idling or resetting omap-uart while printing logs
- * early boot logs can stall the boot-up.
- */
- oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET;
}
} while (1);
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c
index 2eb19d4d0aa..e83a6a4b184 100644
--- a/arch/arm/mach-omap2/usb-host.c
+++ b/arch/arm/mach-omap2/usb-host.c
@@ -28,7 +28,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/usb/phy.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include "soc.h"
#include "omap_device.h"
@@ -349,7 +349,7 @@ static struct fixed_voltage_config hsusb_reg_config = {
/* .init_data filled later */
};
-static const char *nop_name = "nop_usb_xceiv"; /* NOP PHY driver */
+static const char *nop_name = "usb_phy_gen_xceiv"; /* NOP PHY driver */
static const char *reg_name = "reg-fixed-voltage"; /* Regulator driver */
/**
@@ -460,9 +460,9 @@ int usbhs_init_phys(struct usbhs_phy_data *phy, int num_phys)
pdevinfo.name = nop_name;
pdevinfo.id = phy->port;
pdevinfo.data = phy->platform_data;
- pdevinfo.size_data = sizeof(struct nop_usb_xceiv_platform_data);
-
- scnprintf(phy_id, MAX_STR, "nop_usb_xceiv.%d",
+ pdevinfo.size_data =
+ sizeof(struct usb_phy_gen_xceiv_platform_data);
+ scnprintf(phy_id, MAX_STR, "usb_phy_gen_xceiv.%d",
phy->port);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8c4de2708cf..bc897231bd1 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = {
};
static struct musb_hdrc_platform_data musb_plat = {
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
.mode = MUSB_OTG,
-#else
- .mode = MUSB_HOST,
-#endif
+
/* .clock is set dynamically */
.config = &musb_config,
diff --git a/arch/arm/mach-orion5x/include/mach/debug-macro.S b/arch/arm/mach-orion5x/include/mach/debug-macro.S
deleted file mode 100644
index f340ed8f8dd..00000000000
--- a/arch/arm/mach-orion5x/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * arch/arm/mach-orion5x/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <mach/orion5x.h>
-
- .macro addruart, rp, rv, tmp
- ldr \rp, =ORION5X_REGS_PHYS_BASE
- ldr \rv, =ORION5X_REGS_VIRT_BASE
- orr \rp, \rp, #0x00012000
- orr \rv, \rv, #0x00012000
- .endm
-
-#define UART_SHIFT 2
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c
index 2c70f74fed5..e110b6d4ae8 100644
--- a/arch/arm/mach-prima2/common.c
+++ b/arch/arm/mach-prima2/common.c
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = {
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */
- .nr_irqs = 128,
.map_io = sirfsoc_map_io,
.init_time = sirfsoc_init_time,
.init_late = sirfsoc_init_late,
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = {
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */
- .nr_irqs = 128,
.map_io = sirfsoc_map_io,
.init_time = sirfsoc_init_time,
.dma_zone_size = SZ_256M,
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index f6726bb4eb9..3a3362fa793 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -477,16 +477,24 @@ static int em_x270_usb_hub_init(void)
/* USB Hub power-on and reset */
gpio_direction_output(usb_hub_reset, 1);
gpio_direction_output(GPIO9_USB_VBUS_EN, 0);
- regulator_enable(em_x270_usb_ldo);
+ err = regulator_enable(em_x270_usb_ldo);
+ if (err)
+ goto err_free_rst_gpio;
+
gpio_set_value(usb_hub_reset, 0);
gpio_set_value(usb_hub_reset, 1);
regulator_disable(em_x270_usb_ldo);
- regulator_enable(em_x270_usb_ldo);
+ err = regulator_enable(em_x270_usb_ldo);
+ if (err)
+ goto err_free_rst_gpio;
+
gpio_set_value(usb_hub_reset, 0);
gpio_set_value(GPIO9_USB_VBUS_EN, 1);
return 0;
+err_free_rst_gpio:
+ gpio_free(usb_hub_reset);
err_free_vbus_gpio:
gpio_free(GPIO9_USB_VBUS_EN);
err_free_usb_ldo:
@@ -592,7 +600,7 @@ err_irq:
return err;
}
-static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
+static int em_x270_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -600,10 +608,11 @@ static void em_x270_mci_setpower(struct device *dev, unsigned int vdd)
int vdd_uV = (2000 + (vdd - __ffs(MMC_VDD_20_21)) * 100) * 1000;
regulator_set_voltage(em_x270_sdio_ldo, vdd_uV, vdd_uV);
- regulator_enable(em_x270_sdio_ldo);
+ return regulator_enable(em_x270_sdio_ldo);
} else {
regulator_disable(em_x270_sdio_ldo);
}
+ return 0;
}
static void em_x270_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index fe31bfcbb8d..c98511c5abd 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,9 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
static struct mcp251x_platform_data mcp251x_info = {
.oscillator_frequency = 16E6,
- .board_specific_setup = NULL,
- .power_enable = NULL,
- .transceiver_enable = NULL
};
static struct spi_board_info mcp251x_board_info[] = {
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index d2c65231837..dd70343c870 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -408,7 +408,7 @@ static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_in
return err;
}
-static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
+static int mainstone_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -420,6 +420,7 @@ static void mainstone_mci_setpower(struct device *dev, unsigned int vdd)
printk(KERN_DEBUG "%s: off\n", __func__);
MST_MSCWR1 &= ~MST_MSCWR1_MMC_ON;
}
+ return 0;
}
static void mainstone_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/pcm990-baseboard.c b/arch/arm/mach-pxa/pcm990-baseboard.c
index fb7f1d1627d..13e5b00eae9 100644
--- a/arch/arm/mach-pxa/pcm990-baseboard.c
+++ b/arch/arm/mach-pxa/pcm990-baseboard.c
@@ -335,7 +335,7 @@ static int pcm990_mci_init(struct device *dev, irq_handler_t mci_detect_int,
return err;
}
-static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
+static int pcm990_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data *p_d = dev->platform_data;
u8 val;
@@ -348,6 +348,7 @@ static void pcm990_mci_setpower(struct device *dev, unsigned int vdd)
val &= ~PCM990_CTRL_MMC2PWR;
pcm990_cpld_writeb(PCM990_CTRL_MMC2PWR, PCM990_CTRL_REG5);
+ return 0;
}
static void pcm990_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/poodle.c b/arch/arm/mach-pxa/poodle.c
index 711d37e26bd..aedf053a1de 100644
--- a/arch/arm/mach-pxa/poodle.c
+++ b/arch/arm/mach-pxa/poodle.c
@@ -258,7 +258,7 @@ err_free_2:
return err;
}
-static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
+static int poodle_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -270,6 +270,8 @@ static void poodle_mci_setpower(struct device *dev, unsigned int vdd)
gpio_set_value(POODLE_GPIO_SD_PWR1, 0);
gpio_set_value(POODLE_GPIO_SD_PWR, 0);
}
+
+ return 0;
}
static void poodle_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 2125df0444e..4c29173026e 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -598,7 +598,7 @@ static inline void spitz_spi_init(void) {}
* NOTE: The card detect interrupt isn't debounced so we delay it by 250ms to
* give the card a chance to fully insert/eject.
*/
-static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
+static int spitz_mci_setpower(struct device *dev, unsigned int vdd)
{
struct pxamci_platform_data* p_d = dev->platform_data;
@@ -606,6 +606,8 @@ static void spitz_mci_setpower(struct device *dev, unsigned int vdd)
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, SCOOP_CPR_SD_3V);
else
spitz_card_pwr_ctrl(SCOOP_CPR_SD_3V, 0x0);
+
+ return 0;
}
static struct pxamci_platform_data spitz_mci_platform_data = {
diff --git a/arch/arm/mach-pxa/stargate2.c b/arch/arm/mach-pxa/stargate2.c
index 88fde43c948..62aea3e835f 100644
--- a/arch/arm/mach-pxa/stargate2.c
+++ b/arch/arm/mach-pxa/stargate2.c
@@ -734,9 +734,10 @@ static int stargate2_mci_init(struct device *dev,
*
* Very simple control. Either it is on or off and is controlled by
* a gpio pin */
-static void stargate2_mci_setpower(struct device *dev, unsigned int vdd)
+static int stargate2_mci_setpower(struct device *dev, unsigned int vdd)
{
gpio_set_value(SG2_SD_POWER_ENABLE, !!vdd);
+ return 0;
}
static void stargate2_mci_exit(struct device *dev, void *data)
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index f5d43643456..04a0aea2387 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -29,6 +29,8 @@
#include <linux/i2c/pca953x.h>
#include <linux/apm-emulation.h>
#include <linux/can/platform/mcp251x.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
#include <asm/mach-types.h>
#include <asm/suspend.h>
@@ -391,33 +393,34 @@ static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
};
/* CAN bus on SPI */
-static int zeus_mcp2515_setup(struct spi_device *sdev)
-{
- int err;
-
- err = gpio_request(ZEUS_CAN_SHDN_GPIO, "CAN shutdown");
- if (err)
- return err;
+static struct regulator_consumer_supply can_regulator_consumer =
+ REGULATOR_SUPPLY("vdd", "spi3.0");
- err = gpio_direction_output(ZEUS_CAN_SHDN_GPIO, 1);
- if (err) {
- gpio_free(ZEUS_CAN_SHDN_GPIO);
- return err;
- }
+static struct regulator_init_data can_regulator_init_data = {
+ .constraints = {
+ .valid_ops_mask = REGULATOR_CHANGE_STATUS,
+ },
+ .consumer_supplies = &can_regulator_consumer,
+ .num_consumer_supplies = 1,
+};
- return 0;
-}
+static struct fixed_voltage_config can_regulator_pdata = {
+ .supply_name = "CAN_SHDN",
+ .microvolts = 3300000,
+ .gpio = ZEUS_CAN_SHDN_GPIO,
+ .init_data = &can_regulator_init_data,
+};
-static int zeus_mcp2515_transceiver_enable(int enable)
-{
- gpio_set_value(ZEUS_CAN_SHDN_GPIO, !enable);
- return 0;
-}
+static struct platform_device can_regulator_device = {
+ .name = "reg-fixed-volage",
+ .id = -1,
+ .dev = {
+ .platform_data = &can_regulator_pdata,
+ },
+};
static struct mcp251x_platform_data zeus_mcp2515_pdata = {
.oscillator_frequency = 16*1000*1000,
- .board_specific_setup = zeus_mcp2515_setup,
- .power_enable = zeus_mcp2515_transceiver_enable,
};
static struct spi_board_info zeus_spi_board_info[] = {
@@ -516,6 +519,7 @@ static struct platform_device *zeus_devices[] __initdata = {
&zeus_leds_device,
&zeus_pcmcia_device,
&zeus_max6369_device,
+ &can_regulator_device,
};
/* AC'97 */
diff --git a/arch/arm/mach-realview/include/mach/debug-macro.S b/arch/arm/mach-realview/include/mach/debug-macro.S
deleted file mode 100644
index 8cc372dc66a..00000000000
--- a/arch/arm/mach-realview/include/mach/debug-macro.S
+++ /dev/null
@@ -1,29 +0,0 @@
-/* arch/arm/mach-realview/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifdef CONFIG_DEBUG_REALVIEW_STD_PORT
-#define DEBUG_LL_UART_OFFSET 0x00009000
-#elif defined(CONFIG_DEBUG_REALVIEW_PB1176_PORT)
-#define DEBUG_LL_UART_OFFSET 0x0010c000
-#endif
-
-#ifndef DEBUG_LL_UART_OFFSET
-#error "Unknown RealView platform"
-#endif
-
- .macro addruart, rp, rv, tmp
- mov \rp, #DEBUG_LL_UART_OFFSET
- orr \rv, \rp, #0xfb000000 @ virtual base
- orr \rp, \rp, #0x10000000 @ physical base
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-rpc/include/mach/debug-macro.S b/arch/arm/mach-rpc/include/mach/debug-macro.S
deleted file mode 100644
index 6d28cc99b12..00000000000
--- a/arch/arm/mach-rpc/include/mach/debug-macro.S
+++ /dev/null
@@ -1,23 +0,0 @@
-/* arch/arm/mach-rpc/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x00010000
- orr \rp, \rp, #0x00000fe0
- orr \rv, \rp, #0xe0000000 @ virtual
- orr \rp, \rp, #0x03000000 @ physical
- .endm
-
-#define UART_SHIFT 2
-#define FLOW_CONTROL
-#include <asm/hardware/debug-8250.S>
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2410.c b/arch/arm/mach-s3c24xx/clock-s3c2410.c
index 34fffdf6fc1..564553694b5 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2410.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2410.c
@@ -119,66 +119,101 @@ static struct clk init_clocks_off[] = {
}
};
-static struct clk init_clocks[] = {
- {
- .name = "lcd",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_LCDC,
- }, {
- .name = "gpio",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_GPIO,
- }, {
- .name = "usb-host",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBH,
- }, {
- .name = "usb-device",
- .parent = &clk_h,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_USBD,
- }, {
- .name = "timers",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_PWMT,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.0",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART0,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.1",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART1,
- }, {
- .name = "uart",
- .devname = "s3c2410-uart.2",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_UART2,
- }, {
- .name = "rtc",
- .parent = &clk_p,
- .enable = s3c2410_clkcon_enable,
- .ctrlbit = S3C2410_CLKCON_RTC,
- }, {
- .name = "watchdog",
- .parent = &clk_p,
- .ctrlbit = 0,
- }, {
- .name = "usb-bus-host",
- .parent = &clk_usb_bus,
- }, {
- .name = "usb-bus-gadget",
- .parent = &clk_usb_bus,
- },
+static struct clk clk_lcd = {
+ .name = "lcd",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_LCDC,
+};
+
+static struct clk clk_gpio = {
+ .name = "gpio",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_GPIO,
+};
+
+static struct clk clk_usb_host = {
+ .name = "usb-host",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBH,
+};
+
+static struct clk clk_usb_device = {
+ .name = "usb-device",
+ .parent = &clk_h,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_USBD,
+};
+
+static struct clk clk_timers = {
+ .name = "timers",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_PWMT,
+};
+
+struct clk s3c24xx_clk_uart0 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.0",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART0,
+};
+
+struct clk s3c24xx_clk_uart1 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.1",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART1,
+};
+
+struct clk s3c24xx_clk_uart2 = {
+ .name = "uart",
+ .devname = "s3c2410-uart.2",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_UART2,
+};
+
+static struct clk clk_rtc = {
+ .name = "rtc",
+ .parent = &clk_p,
+ .enable = s3c2410_clkcon_enable,
+ .ctrlbit = S3C2410_CLKCON_RTC,
+};
+
+static struct clk clk_watchdog = {
+ .name = "watchdog",
+ .parent = &clk_p,
+ .ctrlbit = 0,
+};
+
+static struct clk clk_usb_bus_host = {
+ .name = "usb-bus-host",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk clk_usb_bus_gadget = {
+ .name = "usb-bus-gadget",
+ .parent = &clk_usb_bus,
+};
+
+static struct clk *init_clocks[] = {
+ &clk_lcd,
+ &clk_gpio,
+ &clk_usb_host,
+ &clk_usb_device,
+ &clk_timers,
+ &s3c24xx_clk_uart0,
+ &s3c24xx_clk_uart1,
+ &s3c24xx_clk_uart2,
+ &clk_rtc,
+ &clk_watchdog,
+ &clk_usb_bus_host,
+ &clk_usb_bus_gadget,
};
/* s3c2410_baseclk_add()
@@ -195,7 +230,6 @@ int __init s3c2410_baseclk_add(void)
{
unsigned long clkslow = __raw_readl(S3C2410_CLKSLOW);
unsigned long clkcon = __raw_readl(S3C2410_CLKCON);
- struct clk *clkp;
struct clk *xtal;
int ret;
int ptr;
@@ -207,8 +241,9 @@ int __init s3c2410_baseclk_add(void)
/* register clocks from clock array */
- clkp = init_clocks;
- for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++, clkp++) {
+ for (ptr = 0; ptr < ARRAY_SIZE(init_clocks); ptr++) {
+ struct clk *clkp = init_clocks[ptr];
+
/* ensure that we note the clock state */
clkp->usage = clkcon & clkp->ctrlbit ? 1 : 0;
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2440.c b/arch/arm/mach-s3c24xx/clock-s3c2440.c
index 1069b568082..aaf006d1d6d 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2440.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2440.c
@@ -166,6 +166,9 @@ static struct clk_lookup s3c2440_clk_lookup[] = {
CLKDEV_INIT(NULL, "clk_uart_baud1", &s3c24xx_uclk),
CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
CLKDEV_INIT(NULL, "clk_uart_baud3", &s3c2440_clk_fclk_n),
+ CLKDEV_INIT("s3c2440-uart.0", "uart", &s3c24xx_clk_uart0),
+ CLKDEV_INIT("s3c2440-uart.1", "uart", &s3c24xx_clk_uart1),
+ CLKDEV_INIT("s3c2440-uart.2", "uart", &s3c24xx_clk_uart2),
CLKDEV_INIT("s3c2440-camif", "camera", &s3c2440_clk_cam_upll),
};
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index e115f674210..3a6ffa250fb 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -358,7 +358,6 @@ static struct platform_device usbhsf_device = {
static struct sh_eth_plat_data sh_eth_platdata = {
.phy = 0x00, /* LAN8710A */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
@@ -1162,9 +1161,6 @@ static void __init eva_init(void)
gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */
gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */
- /* Touchscreen */
- gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */
-
/* GETHER */
gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index d5554646916..35dd7f201a1 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -3,6 +3,7 @@
*
* Copyright (C) 2013 Renesas Solutions Corp.
* Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ * Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +29,7 @@
#include <linux/smsc911x.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
+#include <media/soc_camera.h>
#include <mach/common.h>
#include <mach/irqs.h>
#include <mach/r8a7778.h>
@@ -89,7 +91,6 @@ static struct sh_mobile_sdhi_info sdhi0_info = {
static struct sh_eth_plat_data ether_platform_data __initdata = {
.phy = 0x01,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_RCAR,
.phy_interface = PHY_INTERFACE_MODE_RMII,
/*
* Although the LINK signal is available on the board, it's connected to
@@ -143,6 +144,25 @@ static struct sh_mmcif_plat_data sh_mmcif_plat = {
MMC_CAP_NEEDS_POLL,
};
+static struct rcar_vin_platform_data vin_platform_data __initdata = {
+ .flags = RCAR_VIN_BT656,
+};
+
+/* In the default configuration both decoders reside on I2C bus 0 */
+#define BOCKW_CAMERA(idx) \
+static struct i2c_board_info camera##idx##_info = { \
+ I2C_BOARD_INFO("ml86v7667", 0x41 + 2 * (idx)), \
+}; \
+ \
+static struct soc_camera_link iclink##idx##_ml86v7667 __initdata = { \
+ .bus_id = idx, \
+ .i2c_adapter_id = 0, \
+ .board_info = &camera##idx##_info, \
+}
+
+BOCKW_CAMERA(0);
+BOCKW_CAMERA(1);
+
static const struct pinctrl_map bockw_pinctrl_map[] = {
/* Ether */
PIN_MAP_MUX_GROUP_DEFAULT("r8a777x-ether", "pfc-r8a7778",
@@ -167,7 +187,23 @@ static const struct pinctrl_map bockw_pinctrl_map[] = {
"usb1", "usb1"),
/* SDHI0 */
PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
- "sdhi0", "sdhi0"),
+ "sdhi0_data4", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_ctrl", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_cd", "sdhi0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778",
+ "sdhi0_wp", "sdhi0"),
+ /* VIN0 */
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.0", "pfc-r8a7778",
+ "vin0_clk", "vin0"),
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.0", "pfc-r8a7778",
+ "vin0_data8", "vin0"),
+ /* VIN1 */
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.1", "pfc-r8a7778",
+ "vin1_clk", "vin1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7778-vin.1", "pfc-r8a7778",
+ "vin1_data8", "vin1"),
};
#define FPGA 0x18200000
@@ -186,6 +222,16 @@ static void __init bockw_init(void)
r8a7778_add_i2c_device(0);
r8a7778_add_hspi_device(0);
r8a7778_add_mmc_device(&sh_mmcif_plat);
+ r8a7778_add_vin_device(0, &vin_platform_data);
+ /* VIN1 has a pin conflict with Ether */
+ if (!IS_ENABLED(CONFIG_SH_ETH))
+ r8a7778_add_vin_device(1, &vin_platform_data);
+ platform_device_register_data(&platform_bus, "soc-camera-pdrv", 0,
+ &iclink0_ml86v7667,
+ sizeof(iclink0_ml86v7667));
+ platform_device_register_data(&platform_bus, "soc-camera-pdrv", 1,
+ &iclink1_ml86v7667,
+ sizeof(iclink1_ml86v7667));
i2c_register_board_info(0, i2c0_devices,
ARRAY_SIZE(i2c0_devices));
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index d73e21d3ea8..8d6bd5c5efb 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = {
#define GPIO_KEY(c, g, d, ...) \
{ .code = c, .gpio = g, .desc = d, .active_low = 1 }
-static __initdata struct gpio_keys_button gpio_buttons[] = {
+static struct gpio_keys_button gpio_buttons[] = {
GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"),
GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"),
GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"),
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index a7d1010505b..ca7fb2e63c6 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -1,8 +1,9 @@
/*
* marzen board support
*
- * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2011, 2013 Renesas Solutions Corp.
* Copyright (C) 2011 Magnus Damm
+ * Copyright (C) 2013 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,6 +38,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/mfd/tmio.h>
+#include <media/soc_camera.h>
#include <mach/hardware.h>
#include <mach/r8a7779.h>
#include <mach/common.h>
@@ -178,12 +180,40 @@ static struct platform_device leds_device = {
},
};
+static struct rcar_vin_platform_data vin_platform_data __initdata = {
+ .flags = RCAR_VIN_BT656,
+};
+
+#define MARZEN_CAMERA(idx) \
+static struct i2c_board_info camera##idx##_info = { \
+ I2C_BOARD_INFO("adv7180", 0x20 + (idx)), \
+}; \
+ \
+static struct soc_camera_link iclink##idx##_adv7180 = { \
+ .bus_id = 1 + 2 * (idx), \
+ .i2c_adapter_id = 0, \
+ .board_info = &camera##idx##_info, \
+}; \
+ \
+static struct platform_device camera##idx##_device = { \
+ .name = "soc-camera-pdrv", \
+ .id = idx, \
+ .dev = { \
+ .platform_data = &iclink##idx##_adv7180, \
+ }, \
+};
+
+MARZEN_CAMERA(0);
+MARZEN_CAMERA(1);
+
static struct platform_device *marzen_devices[] __initdata = {
&eth_device,
&sdhi0_device,
&thermal_device,
&hspi_device,
&leds_device,
+ &camera0_device,
+ &camera1_device,
};
static const struct pinctrl_map marzen_pinctrl_map[] = {
@@ -219,6 +249,16 @@ static const struct pinctrl_map marzen_pinctrl_map[] = {
/* USB2 */
PIN_MAP_MUX_GROUP_DEFAULT("ehci-platform.1", "pfc-r8a7779",
"usb2", "usb2"),
+ /* VIN1 */
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
+ "vin1_clk", "vin1"),
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.1", "pfc-r8a7779",
+ "vin1_data8", "vin1"),
+ /* VIN3 */
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
+ "vin3_clk", "vin3"),
+ PIN_MAP_MUX_GROUP_DEFAULT("r8a7779-vin.3", "pfc-r8a7779",
+ "vin3_data8", "vin3"),
};
static void __init marzen_init(void)
@@ -235,6 +275,8 @@ static void __init marzen_init(void)
r8a7779_add_standard_devices();
r8a7779_add_usb_phy_device(&usb_phy_platform_data);
+ r8a7779_add_vin_device(1, &vin_platform_data);
+ r8a7779_add_vin_device(3, &vin_platform_data);
platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
}
diff --git a/arch/arm/mach-shmobile/clock-r8a7778.c b/arch/arm/mach-shmobile/clock-r8a7778.c
index a0e9eb72e46..c4bf2d8fb11 100644
--- a/arch/arm/mach-shmobile/clock-r8a7778.c
+++ b/arch/arm/mach-shmobile/clock-r8a7778.c
@@ -106,6 +106,7 @@ enum {
MSTP331,
MSTP323, MSTP322, MSTP321,
MSTP114,
+ MSTP110, MSTP109,
MSTP100,
MSTP030,
MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
@@ -119,6 +120,8 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP322] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 22, 0), /* SDHI1 */
[MSTP321] = SH_CLK_MSTP32(&p_clk, MSTPCR3, 21, 0), /* SDHI2 */
[MSTP114] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 14, 0), /* Ether */
+ [MSTP110] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 10, 0), /* VIN0 */
+ [MSTP109] = SH_CLK_MSTP32(&s_clk, MSTPCR1, 9, 0), /* VIN1 */
[MSTP100] = SH_CLK_MSTP32(&p_clk, MSTPCR1, 0, 0), /* USB0/1 */
[MSTP030] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 30, 0), /* I2C0 */
[MSTP029] = SH_CLK_MSTP32(&p_clk, MSTPCR0, 29, 0), /* I2C1 */
@@ -146,6 +149,8 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP322]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP321]), /* SDHI2 */
CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
+ CLKDEV_DEV_ID("r8a7778-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
+ CLKDEV_DEV_ID("r8a7778-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
CLKDEV_DEV_ID("ehci-platform", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
CLKDEV_DEV_ID("ohci-platform", &mstp_clks[MSTP100]), /* USB OHCI port0/1 */
CLKDEV_DEV_ID("i2c-rcar.0", &mstp_clks[MSTP030]), /* I2C0 */
diff --git a/arch/arm/mach-shmobile/clock-r8a7779.c b/arch/arm/mach-shmobile/clock-r8a7779.c
index 10340f5becb..bd6ad922eb7 100644
--- a/arch/arm/mach-shmobile/clock-r8a7779.c
+++ b/arch/arm/mach-shmobile/clock-r8a7779.c
@@ -112,7 +112,9 @@ static struct clk *main_clks[] = {
};
enum { MSTP323, MSTP322, MSTP321, MSTP320,
+ MSTP120,
MSTP116, MSTP115, MSTP114,
+ MSTP110, MSTP109, MSTP108,
MSTP103, MSTP101, MSTP100,
MSTP030,
MSTP029, MSTP028, MSTP027, MSTP026, MSTP025, MSTP024, MSTP023, MSTP022, MSTP021,
@@ -125,9 +127,13 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP322] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 22, 0), /* SDHI1 */
[MSTP321] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 21, 0), /* SDHI2 */
[MSTP320] = SH_CLK_MSTP32(&clkp_clk, MSTPCR3, 20, 0), /* SDHI3 */
+ [MSTP120] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 20, 0), /* VIN3 */
[MSTP116] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 16, 0), /* PCIe */
[MSTP115] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 15, 0), /* SATA */
[MSTP114] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 14, 0), /* Ether */
+ [MSTP110] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 10, 0), /* VIN0 */
+ [MSTP109] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 9, 0), /* VIN1 */
+ [MSTP108] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 8, 0), /* VIN2 */
[MSTP103] = SH_CLK_MSTP32(&clks_clk, MSTPCR1, 3, 0), /* DU */
[MSTP101] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 1, 0), /* USB2 */
[MSTP100] = SH_CLK_MSTP32(&clkp_clk, MSTPCR1, 0, 0), /* USB0/1 */
@@ -162,10 +168,14 @@ static struct clk_lookup lookups[] = {
CLKDEV_CON_ID("peripheral_clk", &clkp_clk),
/* MSTP32 clocks */
+ CLKDEV_DEV_ID("r8a7779-vin.3", &mstp_clks[MSTP120]), /* VIN3 */
CLKDEV_DEV_ID("rcar-pcie", &mstp_clks[MSTP116]), /* PCIe */
CLKDEV_DEV_ID("sata_rcar", &mstp_clks[MSTP115]), /* SATA */
CLKDEV_DEV_ID("fc600000.sata", &mstp_clks[MSTP115]), /* SATA w/DT */
CLKDEV_DEV_ID("r8a777x-ether", &mstp_clks[MSTP114]), /* Ether */
+ CLKDEV_DEV_ID("r8a7779-vin.0", &mstp_clks[MSTP110]), /* VIN0 */
+ CLKDEV_DEV_ID("r8a7779-vin.1", &mstp_clks[MSTP109]), /* VIN1 */
+ CLKDEV_DEV_ID("r8a7779-vin.2", &mstp_clks[MSTP108]), /* VIN2 */
CLKDEV_DEV_ID("ehci-platform.1", &mstp_clks[MSTP101]), /* USB EHCI port2 */
CLKDEV_DEV_ID("ohci-platform.1", &mstp_clks[MSTP101]), /* USB OHCI port2 */
CLKDEV_DEV_ID("ehci-platform.0", &mstp_clks[MSTP100]), /* USB EHCI port0/1 */
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7778.h b/arch/arm/mach-shmobile/include/mach/r8a7778.h
index 851d027a2f0..a7c6d151cdd 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7778.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7778.h
@@ -22,6 +22,7 @@
#include <linux/mmc/sh_mobile_sdhi.h>
#include <linux/sh_eth.h>
#include <linux/platform_data/usb-rcar-phy.h>
+#include <linux/platform_data/camera-rcar.h>
extern void r8a7778_add_standard_devices(void);
extern void r8a7778_add_standard_devices_dt(void);
@@ -30,6 +31,8 @@ extern void r8a7778_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
extern void r8a7778_add_i2c_device(int id);
extern void r8a7778_add_hspi_device(int id);
extern void r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info);
+extern void r8a7778_add_vin_device(int id,
+ struct rcar_vin_platform_data *pdata);
extern void r8a7778_init_late(void);
extern void r8a7778_init_delay(void);
diff --git a/arch/arm/mach-shmobile/include/mach/r8a7779.h b/arch/arm/mach-shmobile/include/mach/r8a7779.h
index fc47073c7ba..6d2b6417fe2 100644
--- a/arch/arm/mach-shmobile/include/mach/r8a7779.h
+++ b/arch/arm/mach-shmobile/include/mach/r8a7779.h
@@ -5,6 +5,7 @@
#include <linux/pm_domain.h>
#include <linux/sh_eth.h>
#include <linux/platform_data/usb-rcar-phy.h>
+#include <linux/platform_data/camera-rcar.h>
struct platform_device;
@@ -35,6 +36,8 @@ extern void r8a7779_add_standard_devices(void);
extern void r8a7779_add_standard_devices_dt(void);
extern void r8a7779_add_ether_device(struct sh_eth_plat_data *pdata);
extern void r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata);
+extern void r8a7779_add_vin_device(int idx,
+ struct rcar_vin_platform_data *pdata);
extern void r8a7779_init_late(void);
extern void r8a7779_clock_init(void);
extern void r8a7779_pinmux_init(void);
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 80c20392ad7..0174f059eac 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -333,6 +333,40 @@ void __init r8a7778_add_mmc_device(struct sh_mmcif_plat_data *info)
info, sizeof(*info));
}
+/* VIN */
+#define R8A7778_VIN(idx) \
+static struct resource vin##idx##_resources[] __initdata = { \
+ DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
+ DEFINE_RES_IRQ(gic_iid(0x5a)), \
+}; \
+ \
+static struct platform_device_info vin##idx##_info __initdata = { \
+ .parent = &platform_bus, \
+ .name = "r8a7778-vin", \
+ .id = idx, \
+ .res = vin##idx##_resources, \
+ .num_res = ARRAY_SIZE(vin##idx##_resources), \
+ .dma_mask = DMA_BIT_MASK(32), \
+}
+
+R8A7778_VIN(0);
+R8A7778_VIN(1);
+
+static struct platform_device_info *vin_info_table[] __initdata = {
+ &vin0_info,
+ &vin1_info,
+};
+
+void __init r8a7778_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
+{
+ BUG_ON(id < 0 || id > 1);
+
+ vin_info_table[id]->data = pdata;
+ vin_info_table[id]->size_data = sizeof(*pdata);
+
+ platform_device_register_full(vin_info_table[id]);
+}
+
void __init r8a7778_add_standard_devices(void)
{
int i;
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 398687761f5..3d892889550 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -559,6 +559,33 @@ static struct resource ether_resources[] = {
},
};
+#define R8A7779_VIN(idx) \
+static struct resource vin##idx##_resources[] __initdata = { \
+ DEFINE_RES_MEM(0xffc50000 + 0x1000 * (idx), 0x1000), \
+ DEFINE_RES_IRQ(gic_iid(0x5f + (idx))), \
+}; \
+ \
+static struct platform_device_info vin##idx##_info __initdata = { \
+ .parent = &platform_bus, \
+ .name = "r8a7779-vin", \
+ .id = idx, \
+ .res = vin##idx##_resources, \
+ .num_res = ARRAY_SIZE(vin##idx##_resources), \
+ .dma_mask = DMA_BIT_MASK(32), \
+}
+
+R8A7779_VIN(0);
+R8A7779_VIN(1);
+R8A7779_VIN(2);
+R8A7779_VIN(3);
+
+static struct platform_device_info *vin_info_table[] __initdata = {
+ &vin0_info,
+ &vin1_info,
+ &vin2_info,
+ &vin3_info,
+};
+
static struct platform_device *r8a7779_devices_dt[] __initdata = {
&scif0_device,
&scif1_device,
@@ -610,6 +637,16 @@ void __init r8a7779_add_usb_phy_device(struct rcar_phy_platform_data *pdata)
pdata, sizeof(*pdata));
}
+void __init r8a7779_add_vin_device(int id, struct rcar_vin_platform_data *pdata)
+{
+ BUG_ON(id < 0 || id > 3);
+
+ vin_info_table[id]->data = pdata;
+ vin_info_table[id]->size_data = sizeof(*pdata);
+
+ platform_device_register_full(vin_info_table[id]);
+}
+
/* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */
void __init __weak r8a7779_register_twd(void) { }
diff --git a/arch/arm/mach-spear/include/mach/debug-macro.S b/arch/arm/mach-spear/include/mach/debug-macro.S
deleted file mode 100644
index 75b05ad0fba..00000000000
--- a/arch/arm/mach-spear/include/mach/debug-macro.S
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/arm/plat-spear/include/plat/debug-macro.S
- *
- * Debugging macro include header for spear platform
- *
- * Copyright (C) 2009 ST Microelectronics
- * Viresh Kumar <viresh.linux@gmail.com>
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/amba/serial.h>
-#include <mach/spear.h>
-
- .macro addruart, rp, rv, tmp
- mov \rp, #SPEAR_DBG_UART_BASE @ Physical base
- mov \rv, #VA_SPEAR_DBG_UART_BASE @ Virtual base
- .endm
-
- .macro senduart, rd, rx
- strb \rd, [\rx, #UART01x_DR] @ ASC_TX_BUFFER
- .endm
-
- .macro waituart, rd, rx
-1001: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
- tst \rd, #UART01x_FR_TXFF @ TX_FULL
- bne 1001b
- .endm
-
- .macro busyuart, rd, rx
-1002: ldr \rd, [\rx, #UART01x_FR] @ FLAG REGISTER
- tst \rd, #UART011_FR_TXFE @ TX_EMPTY
- beq 1002b
- .endm
diff --git a/arch/arm/mach-spear/include/mach/spear.h b/arch/arm/mach-spear/include/mach/spear.h
index cf3a5369eec..5cdc53d9b65 100644
--- a/arch/arm/mach-spear/include/mach/spear.h
+++ b/arch/arm/mach-spear/include/mach/spear.h
@@ -39,7 +39,6 @@
/* Debug uart for linux, will be used for debug and uncompress messages */
#define SPEAR_DBG_UART_BASE SPEAR_ICM1_UART_BASE
-#define VA_SPEAR_DBG_UART_BASE VA_SPEAR_ICM1_UART_BASE
/* Sysctl base for spear platform */
#define SPEAR_SYS_CTRL_BASE SPEAR_ICM3_SYS_CTRL_BASE
@@ -86,7 +85,6 @@
/* Debug uart for linux, will be used for debug and uncompress messages */
#define SPEAR_DBG_UART_BASE UART_BASE
-#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
#endif /* SPEAR13XX */
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index d04e3bfe191..835833e3c4f 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,9 @@ menuconfig ARCH_STI
select HAVE_SMP
select HAVE_ARM_SCU if SMP
select ARCH_REQUIRE_GPIOLIB
- select ARM_ERRATA_720789
select ARM_ERRATA_754322
+ select ARM_ERRATA_764369
+ select ARM_ERRATA_775420
select PL310_ERRATA_753970 if CACHE_PL310
select PL310_ERRATA_769419 if CACHE_PL310
help
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S
index 78ebc7559f5..4c09bae86ed 100644
--- a/arch/arm/mach-sti/headsmp.S
+++ b/arch/arm/mach-sti/headsmp.S
@@ -16,8 +16,6 @@
#include <linux/linkage.h>
#include <linux/init.h>
- __INIT
-
/*
* ST specific entry point for secondary CPUs. This provides
* a "holding pen" into which all secondary cores are held until we're
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 0d1e4128d46..fc97cfd5276 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -29,7 +29,6 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
#include <linux/pda_power.h>
-#include <linux/platform_data/tegra_usb.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -46,40 +45,6 @@
#include "fuse.h"
#include "iomap.h"
-static struct tegra_ehci_platform_data tegra_ehci1_pdata = {
- .operating_mode = TEGRA_USB_OTG,
- .power_down_on_bus_suspend = 1,
- .vbus_gpio = -1,
-};
-
-static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
- .reset_gpio = -1,
- .clk = "cdev2",
-};
-
-static struct tegra_ehci_platform_data tegra_ehci2_pdata = {
- .phy_config = &tegra_ehci2_ulpi_phy_config,
- .operating_mode = TEGRA_USB_HOST,
- .power_down_on_bus_suspend = 1,
- .vbus_gpio = -1,
-};
-
-static struct tegra_ehci_platform_data tegra_ehci3_pdata = {
- .operating_mode = TEGRA_USB_HOST,
- .power_down_on_bus_suspend = 1,
- .vbus_gpio = -1,
-};
-
-static struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = {
- OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5000000, "tegra-ehci.0",
- &tegra_ehci1_pdata),
- OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5004000, "tegra-ehci.1",
- &tegra_ehci2_pdata),
- OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5008000, "tegra-ehci.2",
- &tegra_ehci3_pdata),
- {}
-};
-
static void __init tegra_dt_init(void)
{
struct soc_device_attribute *soc_dev_attr;
@@ -112,8 +77,7 @@ static void __init tegra_dt_init(void)
* devices
*/
out:
- of_platform_populate(NULL, of_default_bus_match_table,
- tegra20_auxdata_lookup, parent);
+ of_platform_populate(NULL, of_default_bus_match_table, NULL, parent);
}
static void __init trimslice_init(void)
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
index bf9b6be5b18..fe1f3e26b88 100644
--- a/arch/arm/mach-ux500/Makefile
+++ b/arch/arm/mach-ux500/Makefile
@@ -4,7 +4,6 @@
obj-y := cpu.o devices.o devices-common.o \
id.o usb.o timer.o pm.o
-obj-$(CONFIG_CPU_IDLE) += cpuidle.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o
obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \
diff --git a/arch/arm/mach-versatile/include/mach/debug-macro.S b/arch/arm/mach-versatile/include/mach/debug-macro.S
deleted file mode 100644
index d0fbd7f1cb0..00000000000
--- a/arch/arm/mach-versatile/include/mach/debug-macro.S
+++ /dev/null
@@ -1,21 +0,0 @@
-/* arch/arm/mach-versatile/include/mach/debug-macro.S
- *
- * Debugging macro include header
- *
- * Copyright (C) 1994-1999 Russell King
- * Moved from linux/arch/arm/kernel/debug.S by Ben Dooks
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
-*/
-
- .macro addruart, rp, rv, tmp
- mov \rp, #0x001F0000
- orr \rp, \rp, #0x00001000
- orr \rv, \rp, #0xf1000000 @ virtual base
- orr \rp, \rp, #0x10000000 @ physical base
- .endm
-
-#include <asm/hardware/debug-pl01x.S>
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 5b799c29886..5f252569c68 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -91,7 +91,7 @@ static void __init zynq_map_io(void)
zynq_scu_map_io();
}
-static void zynq_system_reset(char mode, const char *cmd)
+static void zynq_system_reset(enum reboot_mode mode, const char *cmd)
{
zynq_slcr_system_reset();
}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 6cacdc8dd65..cd2c88e7a8f 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -421,24 +421,28 @@ config CPU_32v3
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v4T
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v5
bool
select CPU_USE_DOMAINS if MMU
select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
select TLS_REG_EMUL if SMP || !MMU
+ select NEED_KUSER_HELPERS
config CPU_32v6
bool
@@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE
config TLS_REG_EMUL
bool
+ select NEED_KUSER_HELPERS
help
An SMP system using a pre-ARMv6 processor (there are apparently
a few prototypes like that in existence) and therefore access to
@@ -783,11 +788,43 @@ config TLS_REG_EMUL
config NEEDS_SYSCALL_FOR_CMPXCHG
bool
+ select NEED_KUSER_HELPERS
help
SMP on a pre-ARMv6 processor? Well OK then.
Forget about fast user space cmpxchg support.
It is just not possible.
+config NEED_KUSER_HELPERS
+ bool
+
+config KUSER_HELPERS
+ bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+ default y
+ help
+ Warning: disabling this option may break user programs.
+
+ Provide kuser helpers in the vector page. The kernel provides
+ helper code to userspace in read only form at a fixed location
+ in the high vector page to allow userspace to be independent of
+ the CPU type fitted to the system. This permits binaries to be
+ run on ARMv4 through to ARMv7 without modification.
+
+ See Documentation/arm/kernel_user_helpers.txt for details.
+
+ However, the fixed address nature of these helpers can be used
+ by ROP (return orientated programming) authors when creating
+ exploits.
+
+ If all of the binaries and libraries which run on your platform
+ are built specifically for your platform, and make no use of
+ these helpers, then you can turn this option off to hinder
+ such exploits. However, in that case, if a binary or library
+ relying on those helpers is run, it will receive a SIGILL signal,
+ which will terminate the program.
+
+ Say N here only if you are absolutely certain that you do not
+ need these helpers; otherwise, the safe option is to say Y.
+
config DMA_CACHE_RWFO
bool "Enable read/write for ownership DMA cache maintenance"
depends on CPU_V6K && SMP
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index d70e0aba0c9..447da6ffadd 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -290,7 +290,7 @@ static void l2x0_disable(void)
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2x0_flush_all();
writel_relaxed(0, l2x0_base + L2X0_CTRL);
- dsb();
+ dsb(st);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -417,9 +417,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
outer_cache.disable = l2x0_disable;
}
- printk(KERN_INFO "%s cache controller enabled\n", type);
- printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
- ways, cache_id, aux, l2x0_size);
+ pr_info("%s cache controller enabled\n", type);
+ pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
+ ways, cache_id, aux, l2x0_size >> 10);
}
#ifdef CONFIG_OF
@@ -929,7 +929,9 @@ static const struct of_device_id l2x0_ids[] __initconst = {
.data = (void *)&aurora_no_outer_data},
{ .compatible = "marvell,aurora-outer-cache",
.data = (void *)&aurora_with_outer_data},
- { .compatible = "bcm,bcm11351-a2-pl310-cache",
+ { .compatible = "brcm,bcm11351-a2-pl310-cache",
+ .data = (void *)&bcm_l2x0_data},
+ { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */
.data = (void *)&bcm_l2x0_data},
{}
};
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 515b00064da..b5c467a65c2 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range)
add r12, r12, r2
cmp r12, r1
blo 1b
- dsb
+ dsb ishst
icache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
@@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range)
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
- dsb
+ dsb ishst
isb
mov pc, lr
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index b55b1015724..84e6f772e20 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -162,10 +162,7 @@ static void flush_context(unsigned int cpu)
}
/* Queue a TLB invalidate and flush the I-cache if necessary. */
- if (!tlb_ops_need_broadcast())
- cpumask_set_cpu(cpu, &tlb_flush_pending);
- else
- cpumask_setall(&tlb_flush_pending);
+ cpumask_setall(&tlb_flush_pending);
if (icache_is_vivt_asid_tagged())
__flush_icache_all();
@@ -245,7 +242,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
- dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7f9b1798c6c..f5e1a847171 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@ static int __init atomic_pool_init(void)
if (!pages)
goto no_pages;
- if (IS_ENABLED(CONFIG_CMA))
+ if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
@@ -455,7 +455,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
unsigned end = start + size;
apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
- dsb();
flush_tlb_kernel_range(start, end);
}
@@ -670,7 +669,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!IS_ENABLED(CONFIG_CMA))
+ else if (!IS_ENABLED(CONFIG_DMA_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -759,7 +758,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
__dma_free_buffer(page, size);
} else if (__free_from_pool(cpu_addr, size)) {
return;
- } else if (!IS_ENABLED(CONFIG_CMA)) {
+ } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
index 3d1e4a205b0..66781bf3407 100644
--- a/arch/arm/mm/hugetlbpage.c
+++ b/arch/arm/mm/hugetlbpage.c
@@ -36,22 +36,6 @@
* of type casting from pmd_t * to pte_t *.
*/
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud))
- pmd = pmd_offset(pud, addr);
- }
-
- return (pte_t *)pmd;
-}
-
struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
int write)
{
@@ -68,33 +52,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
return 0;
}
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (pud)
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
-
- return pte;
-}
-
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
int pmd_huge(pmd_t pmd)
{
return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 15225d829d7..2958e74fc42 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -231,7 +231,7 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
}
#endif
-void __init setup_dma_zone(struct machine_desc *mdesc)
+void __init setup_dma_zone(const struct machine_desc *mdesc)
{
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
@@ -335,7 +335,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
return phys;
}
-void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
+void __init arm_memblock_init(struct meminfo *mi,
+ const struct machine_desc *mdesc)
{
int i;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f56617a239..b1d17eeb59b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init sanity_check_meminfo(void)
{
+ phys_addr_t memblock_limit = 0;
int i, j, highmem = 0;
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
@@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void)
bank->size = size_limit;
}
#endif
- if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
- arm_lowmem_limit = bank->start + bank->size;
+ if (!bank->highmem) {
+ phys_addr_t bank_end = bank->start + bank->size;
+ if (bank_end > arm_lowmem_limit)
+ arm_lowmem_limit = bank_end;
+
+ /*
+ * Find the first non-section-aligned page, and point
+ * memblock_limit at it. This relies on rounding the
+ * limit down to be section-aligned, which happens at
+ * the end of this function.
+ *
+ * With this algorithm, the start or end of almost any
+ * bank can be non-section-aligned. The only exception
+ * is that the start of the bank 0 must be section-
+ * aligned, since otherwise memory would need to be
+ * allocated when mapping the start of bank 0, which
+ * occurs before any free memory is mapped.
+ */
+ if (!memblock_limit) {
+ if (!IS_ALIGNED(bank->start, SECTION_SIZE))
+ memblock_limit = bank->start;
+ else if (!IS_ALIGNED(bank_end, SECTION_SIZE))
+ memblock_limit = bank_end;
+ }
+ }
j++;
}
#ifdef CONFIG_HIGHMEM
@@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void)
#endif
meminfo.nr_banks = j;
high_memory = __va(arm_lowmem_limit - 1) + 1;
- memblock_set_current_limit(arm_lowmem_limit);
+
+ /*
+ * Round the memblock limit down to a section size. This
+ * helps to ensure that we will allocate memory from the
+ * last full section, which should be mapped.
+ */
+ if (memblock_limit)
+ memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
+ memblock_set_current_limit(memblock_limit);
}
static inline void prepare_page_table(void)
@@ -1151,7 +1186,7 @@ void __init arm_mm_memblock_reserve(void)
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
*/
-static void __init devicemaps_init(struct machine_desc *mdesc)
+static void __init devicemaps_init(const struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
@@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors = early_alloc(PAGE_SIZE * 2);
early_trap_init(vectors);
@@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
map.pfn = __phys_to_pfn(virt_to_phys(vectors));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
+#ifdef CONFIG_KUSER_HELPERS
map.type = MT_HIGH_VECTORS;
+#else
+ map.type = MT_LOW_VECTORS;
+#endif
create_mapping(&map);
if (!vectors_high()) {
map.virtual = 0;
+ map.length = PAGE_SIZE * 2;
map.type = MT_LOW_VECTORS;
create_mapping(&map);
}
+ /* Now create a kernel read-only mapping */
+ map.pfn += 1;
+ map.virtual = 0xffff0000 + PAGE_SIZE;
+ map.length = PAGE_SIZE;
+ map.type = MT_LOW_VECTORS;
+ create_mapping(&map);
+
/*
* Ask the machine support to map in the statically mapped devices.
*/
@@ -1272,12 +1319,10 @@ static void __init map_lowmem(void)
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
- memblock_set_current_limit(arm_lowmem_limit);
-
build_mem_type_table();
prepare_page_table();
map_lowmem();
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1fa50100ab6..34d4ab217ba 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -299,7 +299,7 @@ void __init sanity_check_meminfo(void)
* paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables.
*/
-void __init paging_init(struct machine_desc *mdesc)
+void __init paging_init(const struct machine_desc *mdesc)
{
early_trap_init((void *)CONFIG_VECTORS_BASE);
mpu_setup();
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index d5146b98c8d..db79b62c92f 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -514,6 +514,32 @@ ENTRY(cpu_feroceon_set_pte_ext)
#endif
mov pc, lr
+/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
+.globl cpu_feroceon_suspend_size
+.equ cpu_feroceon_suspend_size, 4 * 3
+#ifdef CONFIG_ARM_CPU_SUSPEND
+ENTRY(cpu_feroceon_do_suspend)
+ stmfd sp!, {r4 - r6, lr}
+ mrc p15, 0, r4, c13, c0, 0 @ PID
+ mrc p15, 0, r5, c3, c0, 0 @ Domain ID
+ mrc p15, 0, r6, c1, c0, 0 @ Control register
+ stmia r0, {r4 - r6}
+ ldmfd sp!, {r4 - r6, pc}
+ENDPROC(cpu_feroceon_do_suspend)
+
+ENTRY(cpu_feroceon_do_resume)
+ mov ip, #0
+ mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
+ ldmia r0, {r4 - r6}
+ mcr p15, 0, r4, c13, c0, 0 @ PID
+ mcr p15, 0, r5, c3, c0, 0 @ Domain ID
+ mcr p15, 0, r1, c2, c0, 0 @ TTB address
+ mov r0, r6 @ control register
+ b cpu_resume_mmu
+ENDPROC(cpu_feroceon_do_resume)
+#endif
+
.type __feroceon_setup, #function
__feroceon_setup:
mov r0, #0
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index f64afb9f1bd..bdd3be4be77 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- ALT_SMP(mov pc,lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index c36ac69488c..01a719e18bb 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- ALT_SMP(mov pc, lr)
+ ALT_SMP(W(nop))
ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 5c6d5a3050e..c63d9bdee51 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,14 +75,15 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
- ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
- ALT_UP(W(nop))
- dcache_line_size r2, r3
-1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
+ ALT_SMP(W(nop)) @ MP extensions imply L1 PTW
+ ALT_UP_B(1f)
+ mov pc, lr
+1: dcache_line_size r2, r3
+2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
- bhi 1b
- dsb
+ bhi 2b
+ dsb ishst
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
@@ -329,7 +330,19 @@ __v7_setup:
1:
#endif
-3: mov r10, #0
+ /* Cortex-A15 Errata */
+3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number
+ teq r0, r10
+ bne 4f
+
+#ifdef CONFIG_ARM_ERRATA_773022
+ cmp r6, #0x4 @ only present up to r0p4
+ mrcle p15, 0, r10, c1, c0, 1 @ read aux control register
+ orrle r10, r10, #1 << 1 @ disable loop buffer
+ mcrle p15, 0, r10, c1, c0, 1 @ write aux control register
+#endif
+
+4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
dsb
#ifdef CONFIG_MMU
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index ea94765acf9..355308767ba 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -35,7 +35,7 @@
ENTRY(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id
- dsb
+ dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
@@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- dsb
+ dsb ish
mov pc, lr
ENDPROC(v7wbi_flush_user_tlb_range)
@@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range)
* - end - end address (exclusive, may not be aligned)
*/
ENTRY(v7wbi_flush_kern_tlb_range)
- dsb
+ dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
@@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range)
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
- dsb
+ dsb ish
isb
mov pc, lr
ENDPROC(v7wbi_flush_kern_tlb_range)
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 8e11e96eab5..c83f27b6bdd 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -30,6 +30,8 @@
#include <linux/platform_device.h>
#include <linux/spi/pxa2xx_spi.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <asm/irq.h>
#include <mach/hardware.h>
@@ -60,6 +62,30 @@ struct ssp_device *pxa_ssp_request(int port, const char *label)
}
EXPORT_SYMBOL(pxa_ssp_request);
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+ const char *label)
+{
+ struct ssp_device *ssp = NULL;
+
+ mutex_lock(&ssp_lock);
+
+ list_for_each_entry(ssp, &ssp_list, node) {
+ if (ssp->of_node == of_node && ssp->use_count == 0) {
+ ssp->use_count++;
+ ssp->label = label;
+ break;
+ }
+ }
+
+ mutex_unlock(&ssp_lock);
+
+ if (&ssp->node == &ssp_list)
+ return NULL;
+
+ return ssp;
+}
+EXPORT_SYMBOL(pxa_ssp_request_of);
+
void pxa_ssp_free(struct ssp_device *ssp)
{
mutex_lock(&ssp_lock);
@@ -72,96 +98,126 @@ void pxa_ssp_free(struct ssp_device *ssp)
}
EXPORT_SYMBOL(pxa_ssp_free);
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+ { .compatible = "mrvl,pxa25x-ssp", .data = (void *) PXA25x_SSP },
+ { .compatible = "mvrl,pxa25x-nssp", .data = (void *) PXA25x_NSSP },
+ { .compatible = "mrvl,pxa27x-ssp", .data = (void *) PXA27x_SSP },
+ { .compatible = "mrvl,pxa3xx-ssp", .data = (void *) PXA3xx_SSP },
+ { .compatible = "mvrl,pxa168-ssp", .data = (void *) PXA168_SSP },
+ { .compatible = "mrvl,pxa910-ssp", .data = (void *) PXA910_SSP },
+ { .compatible = "mrvl,ce4100-ssp", .data = (void *) CE4100_SSP },
+ { .compatible = "mrvl,lpss-ssp", .data = (void *) LPSS_SSP },
+ { },
+};
+MODULE_DEVICE_TABLE(of, pxa_ssp_of_ids);
+#endif
+
static int pxa_ssp_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
struct ssp_device *ssp;
- int ret = 0;
+ struct device *dev = &pdev->dev;
- ssp = kzalloc(sizeof(struct ssp_device), GFP_KERNEL);
- if (ssp == NULL) {
- dev_err(&pdev->dev, "failed to allocate memory");
+ ssp = devm_kzalloc(dev, sizeof(struct ssp_device), GFP_KERNEL);
+ if (ssp == NULL)
return -ENOMEM;
- }
- ssp->pdev = pdev;
- ssp->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(ssp->clk)) {
- ret = PTR_ERR(ssp->clk);
- goto err_free;
- }
+ ssp->pdev = pdev;
- res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no SSP RX DRCMR defined\n");
- ret = -ENODEV;
- goto err_free_clk;
- }
- ssp->drcmr_rx = res->start;
+ ssp->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ if (dev->of_node) {
+ struct of_phandle_args dma_spec;
+ struct device_node *np = dev->of_node;
+
+ /*
+ * FIXME: we should allocate the DMA channel from this
+ * context and pass the channel down to the ssp users.
+ * For now, we lookup the rx and tx indices manually
+ */
+
+ /* rx */
+ of_parse_phandle_with_args(np, "dmas", "#dma-cells",
+ 0, &dma_spec);
+ ssp->drcmr_rx = dma_spec.args[0];
+ of_node_put(dma_spec.np);
+
+ /* tx */
+ of_parse_phandle_with_args(np, "dmas", "#dma-cells",
+ 1, &dma_spec);
+ ssp->drcmr_tx = dma_spec.args[0];
+ of_node_put(dma_spec.np);
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(dev, "no SSP RX DRCMR defined\n");
+ return -ENODEV;
+ }
+ ssp->drcmr_rx = res->start;
- res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
- if (res == NULL) {
- dev_err(&pdev->dev, "no SSP TX DRCMR defined\n");
- ret = -ENODEV;
- goto err_free_clk;
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (res == NULL) {
+ dev_err(dev, "no SSP TX DRCMR defined\n");
+ return -ENODEV;
+ }
+ ssp->drcmr_tx = res->start;
}
- ssp->drcmr_tx = res->start;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
- dev_err(&pdev->dev, "no memory resource defined\n");
- ret = -ENODEV;
- goto err_free_clk;
+ dev_err(dev, "no memory resource defined\n");
+ return -ENODEV;
}
- res = request_mem_region(res->start, resource_size(res),
- pdev->name);
+ res = devm_request_mem_region(dev, res->start, resource_size(res),
+ pdev->name);
if (res == NULL) {
- dev_err(&pdev->dev, "failed to request memory resource\n");
- ret = -EBUSY;
- goto err_free_clk;
+ dev_err(dev, "failed to request memory resource\n");
+ return -EBUSY;
}
ssp->phys_base = res->start;
- ssp->mmio_base = ioremap(res->start, resource_size(res));
+ ssp->mmio_base = devm_ioremap(dev, res->start, resource_size(res));
if (ssp->mmio_base == NULL) {
- dev_err(&pdev->dev, "failed to ioremap() registers\n");
- ret = -ENODEV;
- goto err_free_mem;
+ dev_err(dev, "failed to ioremap() registers\n");
+ return -ENODEV;
}
ssp->irq = platform_get_irq(pdev, 0);
if (ssp->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource defined\n");
- ret = -ENODEV;
- goto err_free_io;
+ dev_err(dev, "no IRQ resource defined\n");
+ return -ENODEV;
+ }
+
+ if (dev->of_node) {
+ const struct of_device_id *id =
+ of_match_device(of_match_ptr(pxa_ssp_of_ids), dev);
+ ssp->type = (int) id->data;
+ } else {
+ const struct platform_device_id *id =
+ platform_get_device_id(pdev);
+ ssp->type = (int) id->driver_data;
+
+ /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
+ * starts from 0, do a translation here
+ */
+ ssp->port_id = pdev->id + 1;
}
- /* PXA2xx/3xx SSP ports starts from 1 and the internal pdev->id
- * starts from 0, do a translation here
- */
- ssp->port_id = pdev->id + 1;
ssp->use_count = 0;
- ssp->type = (int)id->driver_data;
+ ssp->of_node = dev->of_node;
mutex_lock(&ssp_lock);
list_add(&ssp->node, &ssp_list);
mutex_unlock(&ssp_lock);
platform_set_drvdata(pdev, ssp);
- return 0;
-err_free_io:
- iounmap(ssp->mmio_base);
-err_free_mem:
- release_mem_region(res->start, resource_size(res));
-err_free_clk:
- clk_put(ssp->clk);
-err_free:
- kfree(ssp);
- return ret;
+ return 0;
}
static int pxa_ssp_remove(struct platform_device *pdev)
@@ -201,8 +257,9 @@ static struct platform_driver pxa_ssp_driver = {
.probe = pxa_ssp_probe,
.remove = pxa_ssp_remove,
.driver = {
- .owner = THIS_MODULE,
- .name = "pxa2xx-ssp",
+ .owner = THIS_MODULE,
+ .name = "pxa2xx-ssp",
+ .of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
.id_table = ssp_id_table,
};
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 3dc5cbea86c..a5b5ff6e68d 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -29,6 +29,13 @@ config PLAT_S5P
help
Base platform code for Samsung's S5P series SoC.
+config SAMSUNG_PM
+ bool
+ depends on PM && (PLAT_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || S5P_PM)
+ default y
+ help
+ Base platform power management code for samsung code
+
if PLAT_SAMSUNG
# boot configurations
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 98d07d8fc7a..199bbe304d0 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -51,7 +51,7 @@ obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o
# PM support
-obj-$(CONFIG_PM) += pm.o
+obj-$(CONFIG_SAMSUNG_PM) += pm.o
obj-$(CONFIG_SAMSUNG_PM_GPIO) += pm-gpio.o
obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
diff --git a/arch/arm/plat-samsung/include/plat/clock.h b/arch/arm/plat-samsung/include/plat/clock.h
index a62753dc15b..df45d6edc98 100644
--- a/arch/arm/plat-samsung/include/plat/clock.h
+++ b/arch/arm/plat-samsung/include/plat/clock.h
@@ -83,6 +83,11 @@ extern struct clk clk_ext;
extern struct clksrc_clk clk_epllref;
extern struct clksrc_clk clk_esysclk;
+/* S3C24XX UART clocks */
+extern struct clk s3c24xx_clk_uart0;
+extern struct clk s3c24xx_clk_uart1;
+extern struct clk s3c24xx_clk_uart2;
+
/* S3C64XX specific clocks */
extern struct clk clk_h2;
extern struct clk clk_27m;
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 5d47ca35cab..6bc1a8f471e 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -19,7 +19,7 @@
struct device;
-#ifdef CONFIG_PM
+#ifdef CONFIG_SAMSUNG_PM
extern __init int s3c_pm_init(void);
extern __init int s3c64xx_pm_init(void);
@@ -58,8 +58,6 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
/* from sleep.S */
-extern void s3c_cpu_resume(void);
-
extern int s3c2410_cpu_suspend(unsigned long);
/* sleep save info */
@@ -106,12 +104,14 @@ extern void s3c_pm_do_save(struct sleep_save *ptr, int count);
extern void s3c_pm_do_restore(struct sleep_save *ptr, int count);
extern void s3c_pm_do_restore_core(struct sleep_save *ptr, int count);
-#ifdef CONFIG_PM
+#ifdef CONFIG_SAMSUNG_PM
extern int s3c_irq_wake(struct irq_data *data, unsigned int state);
extern int s3c_irqext_wake(struct irq_data *data, unsigned int state);
+extern void s3c_cpu_resume(void);
#else
#define s3c_irq_wake NULL
#define s3c_irqext_wake NULL
+#define s3c_cpu_resume NULL
#endif
/* PM debug functions */
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c
index 3e5c4619caa..50a3ea0037d 100644
--- a/arch/arm/plat-samsung/init.c
+++ b/arch/arm/plat-samsung/init.c
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode,
printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode);
- if (cpu->map_io == NULL || cpu->init == NULL) {
+ if (cpu->init == NULL) {
printk(KERN_ERR "CPU %s support not enabled\n", cpu->name);
panic("Unsupported Samsung CPU");
}
- cpu->map_io();
+ if (cpu->map_io)
+ cpu->map_io();
}
/* s3c24xx_init_clocks
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index ea361364245..d0c23010b69 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -80,7 +80,7 @@ unsigned char pm_uart_udivslot;
#ifdef CONFIG_SAMSUNG_PM_DEBUG
-static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS];
+static struct pm_uart_save uart_save;
static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
{
@@ -101,11 +101,7 @@ static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save)
static void s3c_pm_save_uarts(void)
{
- struct pm_uart_save *save = uart_save;
- unsigned int uart;
-
- for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
- s3c_pm_save_uart(uart, save);
+ s3c_pm_save_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
}
static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
@@ -126,11 +122,7 @@ static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save)
static void s3c_pm_restore_uarts(void)
{
- struct pm_uart_save *save = uart_save;
- unsigned int uart;
-
- for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++)
- s3c_pm_restore_uart(uart, save);
+ s3c_pm_restore_uart(CONFIG_DEBUG_S3C_UART, &uart_save);
}
#else
static void s3c_pm_save_uarts(void) { }
diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c
index 0cc40aea3f5..98b10ba67dc 100644
--- a/arch/arm/plat-samsung/s3c-dma-ops.c
+++ b/arch/arm/plat-samsung/s3c-dma-ops.c
@@ -82,7 +82,8 @@ static int s3c_dma_config(unsigned ch, struct samsung_dma_config *param)
static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
{
struct cb_data *data;
- int len = (param->cap == DMA_CYCLIC) ? param->period : param->len;
+ dma_addr_t pos = param->buf;
+ dma_addr_t end = param->buf + param->len;
list_for_each_entry(data, &dma_list, node)
if (data->ch == ch)
@@ -94,7 +95,15 @@ static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep *param)
data->fp_param = param->fp_param;
}
- s3c2410_dma_enqueue(ch, (void *)data, param->buf, len);
+ if (param->cap != DMA_CYCLIC) {
+ s3c2410_dma_enqueue(ch, (void *)data, param->buf, param->len);
+ return 0;
+ }
+
+ while (pos < end) {
+ s3c2410_dma_enqueue(ch, (void *)data, pos, param->period);
+ pos += param->period;
+ }
return 0;
}
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index 8d10dc8a1e1..3e5d3115a2a 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -78,6 +78,11 @@
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+ ldr r3, [sp, #S_PSR] @ Neither lazy restore nor FP exceptions
+ and r3, r3, #MODE_MASK @ are supported in kernel mode
+ teq r3, #USR_MODE
+ bne vfp_kmode_exception @ Returns through lr
+
VFPFMRX r1, FPEXC @ Is the VFP enabled?
DBGSTR1 "fpexc %08x", r1
tst r1, #FPEXC_EN
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 5dfbb0b8e7f..52b8f40b1c7 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/uaccess.h>
#include <linux/user.h>
+#include <linux/export.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@@ -648,6 +649,72 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
return NOTIFY_OK;
}
+void vfp_kmode_exception(void)
+{
+ /*
+ * If we reach this point, a floating point exception has been raised
+ * while running in kernel mode. If the NEON/VFP unit was enabled at the
+ * time, it means a VFP instruction has been issued that requires
+ * software assistance to complete, something which is not currently
+ * supported in kernel mode.
+ * If the NEON/VFP unit was disabled, and the location pointed to below
+ * is properly preceded by a call to kernel_neon_begin(), something has
+ * caused the task to be scheduled out and back in again. In this case,
+ * rebuilding and running with CONFIG_DEBUG_ATOMIC_SLEEP enabled should
+ * be helpful in localizing the problem.
+ */
+ if (fmrx(FPEXC) & FPEXC_EN)
+ pr_crit("BUG: unsupported FP instruction in kernel mode\n");
+ else
+ pr_crit("BUG: FP instruction issued in kernel mode with FP unit disabled\n");
+}
+
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+ struct thread_info *thread = current_thread_info();
+ unsigned int cpu;
+ u32 fpexc;
+
+ /*
+ * Kernel mode NEON is only allowed outside of interrupt context
+ * with preemption disabled. This will make sure that the kernel
+ * mode NEON register contents never need to be preserved.
+ */
+ BUG_ON(in_interrupt());
+ cpu = get_cpu();
+
+ fpexc = fmrx(FPEXC) | FPEXC_EN;
+ fmxr(FPEXC, fpexc);
+
+ /*
+ * Save the userland NEON/VFP state. Under UP,
+ * the owner could be a task other than 'current'
+ */
+ if (vfp_state_in_hw(cpu, thread))
+ vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+ else if (vfp_current_hw_state[cpu] != NULL)
+ vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+ vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+ /* Disable the NEON/VFP unit. */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+ put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
/*
* VFP support code initialisation.
*/
@@ -731,4 +798,4 @@ static int __init vfp_init(void)
return 0;
}
-late_initcall(vfp_init);
+core_initcall(vfp_init);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index f71c37edca2..8a6295c8620 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -170,9 +170,10 @@ static void __init xen_percpu_init(void *unused)
per_cpu(xen_vcpu, cpu) = vcpup;
enable_percpu_irq(xen_events_irq, 0);
+ put_cpu();
}
-static void xen_restart(char str, const char *cmd)
+static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
{
struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
int rc;
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index 98abd476992..c9f1d2816c2 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -26,7 +26,13 @@
#include <clocksource/arm_arch_timer.h>
-static inline void arch_timer_reg_write(int access, int reg, u32 val)
+/*
+ * These register accessors are marked inline so the compiler can
+ * nicely work out which register we want, and chuck away the rest of
+ * the code.
+ */
+static __always_inline
+void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val)
{
if (access == ARCH_TIMER_PHYS_ACCESS) {
switch (reg) {
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntp_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val)
case ARCH_TIMER_REG_TVAL:
asm volatile("msr cntv_tval_el0, %0" : : "r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
isb();
}
-static inline u32 arch_timer_reg_read(int access, int reg)
+static __always_inline
+u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg)
{
u32 val;
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntp_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
} else if (access == ARCH_TIMER_VIRT_ACCESS) {
switch (reg) {
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg)
case ARCH_TIMER_REG_TVAL:
asm volatile("mrs %0, cntv_tval_el0" : "=r" (val));
break;
- default:
- BUILD_BUG();
}
- } else {
- BUILD_BUG();
}
return val;
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index c92de4163eb..b25763bc0ec 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -42,14 +42,15 @@
#define TPIDR_EL1 18 /* Thread ID, Privileged */
#define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */
#define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */
+#define PAR_EL1 21 /* Physical Address Register */
/* 32bit specific registers. Keep them at the end of the range */
-#define DACR32_EL2 21 /* Domain Access Control Register */
-#define IFSR32_EL2 22 /* Instruction Fault Status Register */
-#define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */
-#define DBGVCR32_EL2 24 /* Debug Vector Catch Register */
-#define TEECR32_EL1 25 /* ThumbEE Configuration Register */
-#define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */
-#define NR_SYS_REGS 27
+#define DACR32_EL2 22 /* Domain Access Control Register */
+#define IFSR32_EL2 23 /* Instruction Fault Status Register */
+#define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */
+#define DBGVCR32_EL2 25 /* Debug Vector Catch Register */
+#define TEECR32_EL1 26 /* ThumbEE Configuration Register */
+#define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */
+#define NR_SYS_REGS 28
/* 32bit mapping */
#define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -69,6 +70,8 @@
#define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
#define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */
#define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */
+#define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */
+#define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */
#define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */
#define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */
#define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 644d7395686..0859a4ddd1e 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch {
struct kvm_mmu_memory_cache mmu_page_cache;
/* Target CPU and feature flags */
- u32 target;
+ int target;
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
/* Detect first run of a vcpu */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 3659e460071..23a3c4791d8 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
#include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE_ORDER 2
#endif
-#define THREAD_SIZE 8192
+#define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 46b3beb4b77..717031a762c 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -35,6 +35,7 @@ struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
struct vm_area_struct *vma;
+ unsigned long start, end;
unsigned long range_start;
unsigned long range_end;
unsigned int nr;
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->vma = NULL;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 439827271e3..26e310c5434 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -21,6 +21,7 @@
#define BOOT_CPU_MODE_EL2 (0x0e12b007)
#ifndef __ASSEMBLY__
+#include <asm/cacheflush.h>
/*
* __boot_cpu_mode records what mode CPUs were booted in.
@@ -36,9 +37,20 @@ extern u32 __boot_cpu_mode[2];
void __hyp_set_vectors(phys_addr_t phys_vector_base);
phys_addr_t __hyp_get_vectors(void);
+static inline void sync_boot_mode(void)
+{
+ /*
+ * As secondaries write to __boot_cpu_mode with caches disabled, we
+ * must flush the corresponding cache entries to ensure the visibility
+ * of their writes.
+ */
+ __flush_dcache_area(__boot_cpu_mode, sizeof(__boot_cpu_mode));
+}
+
/* Reports the availability of HYP mode */
static inline bool is_hyp_mode_available(void)
{
+ sync_boot_mode();
return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
}
@@ -46,6 +58,7 @@ static inline bool is_hyp_mode_available(void)
/* Check if the bootloader has booted CPUs in different modes */
static inline bool is_hyp_mode_mismatched(void)
{
+ sync_boot_mode();
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1d1314280a0..6ad781b21c0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
.macro get_thread_info, rd
mov \rd, sp
- and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c40cdf..12e6ccb8869 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map)
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events,
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
+ if (is_software_event(event))
+ return 1;
+
if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 1788bf6b471..57fb55c44c9 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -81,7 +81,7 @@ void soft_restart(unsigned long addr)
void (*pm_power_off)(void);
EXPORT_SYMBOL_GPL(pm_power_off);
-void (*arm_pm_restart)(char str, const char *cmd);
+void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
EXPORT_SYMBOL_GPL(arm_pm_restart);
void arch_cpu_idle_prepare(void)
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index ff985e3d8b7..1ac0bbbdddb 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -214,6 +214,7 @@ __kvm_hyp_code_start:
mrs x21, tpidr_el1
mrs x22, amair_el1
mrs x23, cntkctl_el1
+ mrs x24, par_el1
stp x4, x5, [x3]
stp x6, x7, [x3, #16]
@@ -225,6 +226,7 @@ __kvm_hyp_code_start:
stp x18, x19, [x3, #112]
stp x20, x21, [x3, #128]
stp x22, x23, [x3, #144]
+ str x24, [x3, #160]
.endm
.macro restore_sysregs
@@ -243,6 +245,7 @@ __kvm_hyp_code_start:
ldp x18, x19, [x3, #112]
ldp x20, x21, [x3, #128]
ldp x22, x23, [x3, #144]
+ ldr x24, [x3, #160]
msr vmpidr_el2, x4
msr csselr_el1, x5
@@ -264,6 +267,7 @@ __kvm_hyp_code_start:
msr tpidr_el1, x21
msr amair_el1, x22
msr cntkctl_el1, x23
+ msr par_el1, x24
.endm
.macro skip_32bit_state tmp, target
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run)
// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
ENTRY(__kvm_tlb_flush_vmid_ipa)
+ dsb ishst
+
kern_hyp_va x0
ldr x2, [x0, #KVM_VTTBR]
msr vttbr_el2, x2
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
ENDPROC(__kvm_tlb_flush_vmid_ipa)
ENTRY(__kvm_flush_vm_context)
+ dsb ishst
tlbi alle1is
ic ialluis
dsb sy
@@ -753,6 +760,10 @@ el1_trap:
*/
tbnz x1, #7, 1f // S1PTW is set
+ /* Preserve PAR_EL1 */
+ mrs x3, par_el1
+ push x3, xzr
+
/*
* Permission fault, HPFAR_EL2 is invalid.
* Resolve the IPA the hard way using the guest VA.
@@ -766,6 +777,8 @@ el1_trap:
/* Read result */
mrs x3, par_el1
+ pop x0, xzr // Restore PAR_EL1 from the stack
+ msr par_el1, x0
tbnz x3, #0, 3f // Bail out if we failed the translation
ubfx x3, x3, #12, #36 // Extract IPA
lsl x3, x3, #4 // and present it like HPFAR
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 94923609753..02e9d09e1d8 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = {
/* FAR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, FAR_EL1 },
+ /* PAR_EL1 */
+ { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000),
+ NULL, reset_unknown, PAR_EL1 },
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c
index f9143196345..7de083d19b7 100644
--- a/arch/avr32/boards/atngw100/mrmt.c
+++ b/arch/avr32/boards/atngw100/mrmt.c
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = {
static struct platform_device rmt_ts_device = {
.name = "ucb1400_ts",
.id = -1,
- }
};
#endif
diff --git a/arch/avr32/oprofile/op_model_avr32.c b/arch/avr32/oprofile/op_model_avr32.c
index f74b7809e08..08308be2c02 100644
--- a/arch/avr32/oprofile/op_model_avr32.c
+++ b/arch/avr32/oprofile/op_model_avr32.c
@@ -97,8 +97,7 @@ static irqreturn_t avr32_perf_counter_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int avr32_perf_counter_create_files(struct super_block *sb,
- struct dentry *root)
+static int avr32_perf_counter_create_files(struct dentry *root)
{
struct dentry *dir;
unsigned int i;
@@ -106,21 +105,21 @@ static int avr32_perf_counter_create_files(struct super_block *sb,
for (i = 0; i < NR_counter; i++) {
snprintf(filename, sizeof(filename), "%u", i);
- dir = oprofilefs_mkdir(sb, root, filename);
+ dir = oprofilefs_mkdir(root, filename);
- oprofilefs_create_ulong(sb, dir, "enabled",
+ oprofilefs_create_ulong(dir, "enabled",
&counter[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event",
+ oprofilefs_create_ulong(dir, "event",
&counter[i].event);
- oprofilefs_create_ulong(sb, dir, "count",
+ oprofilefs_create_ulong(dir, "count",
&counter[i].count);
/* Dummy entries */
- oprofilefs_create_ulong(sb, dir, "kernel",
+ oprofilefs_create_ulong(dir, "kernel",
&counter[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user",
+ oprofilefs_create_ulong(dir, "user",
&counter[i].user);
- oprofilefs_create_ulong(sb, dir, "unit_mask",
+ oprofilefs_create_ulong(dir, "unit_mask",
&counter[i].unit_mask);
}
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c
index 0aa35f0eb0d..deb67843693 100644
--- a/arch/frv/mb93090-mb00/pci-vdk.c
+++ b/arch/frv/mb93090-mb00/pci-vdk.c
@@ -320,7 +320,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
* are examined.
*/
-void __init pcibios_fixup_bus(struct pci_bus *bus)
+void pcibios_fixup_bus(struct pci_bus *bus)
{
#if 0
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 33a97929d05..77d442ab28c 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz"
endmenu
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
source "drivers/Kconfig"
source "fs/Kconfig"
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 5a768ad8e89..56664226632 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -43,6 +43,7 @@ config IA64
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
+ select ARCH_USE_CMPXCHG_LOCKREF
default y
help
The Itanium Processor Family is Intel's 64-bit successor to
@@ -565,9 +566,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "kernel crash dumps"
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 7913695b2fc..efbd2929aeb 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_FAN=m
CONFIG_ACPI_DOCK=y
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index f8e91336542..f64980dd20c 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -25,7 +25,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index a5a9e02e60a..0f4e9e41f13 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -31,7 +31,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 37b9b422caa..b025acfde5c 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -32,7 +32,7 @@ CONFIG_ACPI_BUTTON=m
CONFIG_ACPI_FAN=m
CONFIG_ACPI_PROCESSOR=m
CONFIG_ACPI_CONTAINER=m
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=m
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
index 05b03ecd793..a3456f34f67 100644
--- a/arch/ia64/include/asm/Kbuild
+++ b/arch/ia64/include/asm/Kbuild
@@ -3,3 +3,4 @@ generic-y += clkdev.h
generic-y += exec.h
generic-y += kvm_para.h
generic-y += trace_clock.h
+generic-y += vtime.h \ No newline at end of file
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 54ff557d474..45698cd15b7 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -102,6 +102,11 @@ static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
+}
+
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index ef3a9de0195..bc5efc7c3f3 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -22,7 +22,7 @@
* unmapping a portion of the virtual address space, these hooks are called according to
* the following template:
*
- * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM
+ * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM
* {
* for each vma that needs a shootdown do {
* tlb_start_vma(tlb, vma);
@@ -58,6 +58,7 @@ struct mmu_gather {
unsigned int max;
unsigned char fullmm; /* non-zero means full mm flush */
unsigned char need_flush; /* really unmapped some PTEs? */
+ unsigned long start, end;
unsigned long start_addr;
unsigned long end_addr;
struct page **pages;
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb)
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
tlb->max = ARRAY_SIZE(tlb->local);
tlb->pages = tlb->local;
tlb->nr = 0;
- tlb->fullmm = full_mm_flush;
+ tlb->fullmm = !(start | (end+1));
+ tlb->start = start;
+ tlb->end = end;
tlb->start_addr = ~0UL;
}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5b2dc0d10c8..bdfd8789b37 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1560,6 +1560,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
return 0;
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
diff --git a/arch/m68k/amiga/platform.c b/arch/m68k/amiga/platform.c
index 6083088c0cc..dacd9f911f7 100644
--- a/arch/m68k/amiga/platform.c
+++ b/arch/m68k/amiga/platform.c
@@ -56,7 +56,7 @@ static int __init amiga_init_bus(void)
n = AMIGAHW_PRESENT(ZORRO3) ? 4 : 2;
pdev = platform_device_register_simple("amiga-zorro", -1,
zorro_resources, n);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
subsys_initcall(amiga_init_bus);
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c
index 2291a7d69d4..121a6660ad4 100644
--- a/arch/m68k/emu/natfeat.c
+++ b/arch/m68k/emu/natfeat.c
@@ -18,9 +18,11 @@
#include <asm/machdep.h>
#include <asm/natfeat.h>
+extern long nf_get_id_phys(unsigned long feature_name);
+
asm("\n"
-" .global nf_get_id,nf_call\n"
-"nf_get_id:\n"
+" .global nf_get_id_phys,nf_call\n"
+"nf_get_id_phys:\n"
" .short 0x7300\n"
" rts\n"
"nf_call:\n"
@@ -29,12 +31,25 @@ asm("\n"
"1: moveq.l #0,%d0\n"
" rts\n"
" .section __ex_table,\"a\"\n"
-" .long nf_get_id,1b\n"
+" .long nf_get_id_phys,1b\n"
" .long nf_call,1b\n"
" .previous");
-EXPORT_SYMBOL_GPL(nf_get_id);
EXPORT_SYMBOL_GPL(nf_call);
+long nf_get_id(const char *feature_name)
+{
+ /* feature_name may be in vmalloc()ed memory, so make a copy */
+ char name_copy[32];
+ size_t n;
+
+ n = strlcpy(name_copy, feature_name, sizeof(name_copy));
+ if (n >= sizeof(name_copy))
+ return 0;
+
+ return nf_get_id_phys(virt_to_phys(name_copy));
+}
+EXPORT_SYMBOL_GPL(nf_get_id);
+
void nfprint(const char *fmt, ...)
{
static char buf[256];
@@ -43,7 +58,7 @@ void nfprint(const char *fmt, ...)
va_start(ap, fmt);
n = vsnprintf(buf, 256, fmt, ap);
- nf_call(nf_get_id("NF_STDERR"), buf);
+ nf_call(nf_get_id("NF_STDERR"), virt_to_phys(buf));
va_end(ap);
}
@@ -68,7 +83,7 @@ void nf_init(void)
id = nf_get_id("NF_NAME");
if (!id)
return;
- nf_call(id, buf, 256);
+ nf_call(id, virt_to_phys(buf), 256);
buf[255] = 0;
pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16,
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index e3011338ab4..0721858fbd1 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -41,8 +41,8 @@ static inline s32 nfhd_read_write(u32 major, u32 minor, u32 rwflag, u32 recno,
static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks,
u32 *blocksize)
{
- return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, blocks,
- blocksize);
+ return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor,
+ virt_to_phys(blocks), virt_to_phys(blocksize));
}
static LIST_HEAD(nfhd_list);
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c
index 6685bf45c2c..57e8c8fb5eb 100644
--- a/arch/m68k/emu/nfcon.c
+++ b/arch/m68k/emu/nfcon.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/uaccess.h>
+#include <linux/io.h>
#include <asm/natfeat.h>
@@ -25,17 +26,18 @@ static struct tty_driver *nfcon_tty_driver;
static void nfputs(const char *str, unsigned int count)
{
char buf[68];
+ unsigned long phys = virt_to_phys(buf);
buf[64] = 0;
while (count > 64) {
memcpy(buf, str, 64);
- nf_call(stderr_id, buf);
+ nf_call(stderr_id, phys);
str += 64;
count -= 64;
}
memcpy(buf, str, count);
buf[count] = 0;
- nf_call(stderr_id, buf);
+ nf_call(stderr_id, phys);
}
static void nfcon_write(struct console *con, const char *str,
@@ -79,7 +81,7 @@ static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
char temp[2] = { ch, 0 };
- nf_call(stderr_id, temp);
+ nf_call(stderr_id, virt_to_phys(temp));
return 1;
}
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index 695cd737a42..a0985fd088d 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -195,7 +195,8 @@ static struct net_device * __init nfeth_probe(int unit)
char mac[ETH_ALEN], host_ip[32], local_ip[32];
int err;
- if (!nf_call(nfEtherID + XIF_GET_MAC, unit, mac, ETH_ALEN))
+ if (!nf_call(nfEtherID + XIF_GET_MAC, unit, virt_to_phys(mac),
+ ETH_ALEN))
return NULL;
dev = alloc_etherdev(sizeof(struct nfeth_private));
@@ -217,9 +218,9 @@ static struct net_device * __init nfeth_probe(int unit)
}
nf_call(nfEtherID + XIF_GET_IPHOST, unit,
- host_ip, sizeof(host_ip));
+ virt_to_phys(host_ip), sizeof(host_ip));
nf_call(nfEtherID + XIF_GET_IPATARI, unit,
- local_ip, sizeof(local_ip));
+ virt_to_phys(local_ip), sizeof(local_ip));
netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip,
local_ip, mac);
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h
index 444ea8a09e9..ef881cfbbca 100644
--- a/arch/m68k/include/asm/div64.h
+++ b/arch/m68k/include/asm/div64.h
@@ -15,16 +15,17 @@
unsigned long long n64; \
} __n; \
unsigned long __rem, __upper; \
+ unsigned long __base = (base); \
\
__n.n64 = (n); \
if ((__upper = __n.n32[0])) { \
asm ("divul.l %2,%1:%0" \
- : "=d" (__n.n32[0]), "=d" (__upper) \
- : "d" (base), "0" (__n.n32[0])); \
+ : "=d" (__n.n32[0]), "=d" (__upper) \
+ : "d" (__base), "0" (__n.n32[0])); \
} \
asm ("divu.l %2,%1:%0" \
- : "=d" (__n.n32[1]), "=d" (__rem) \
- : "d" (base), "1" (__upper), "0" (__n.n32[1])); \
+ : "=d" (__n.n32[1]), "=d" (__rem) \
+ : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
(n) = __n.n64; \
__rem; \
})
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 7ef4115b8c4..a823cd73dc0 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -3,7 +3,7 @@
#include <linux/types.h>
#ifdef CONFIG_MMU
-#include <linux/hardirq.h>
+#include <linux/preempt_mask.h>
#endif
#include <linux/preempt.h>
#include <asm/thread_info.h>
@@ -67,6 +67,10 @@ static inline void arch_local_irq_restore(unsigned long flags)
static inline bool arch_irqs_disabled_flags(unsigned long flags)
{
+ if (MACH_IS_ATARI) {
+ /* Ignore HSYNC = ipl 2 on Atari */
+ return (flags & ~(ALLOWINT | 0x200)) != 0;
+ }
return (flags & ~ALLOWINT) != 0;
}
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index bea6bcf8f9b..7eb9792009f 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -90,7 +90,7 @@ static int __init rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
module_init(rtc_init);
diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c
index b33f97a13e6..df9679238b6 100644
--- a/arch/m68k/platform/coldfire/pci.c
+++ b/arch/m68k/platform/coldfire/pci.c
@@ -319,7 +319,6 @@ static int __init mcf_pci_init(void)
pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq);
pci_bus_size_bridges(rootbus);
pci_bus_assign_resources(rootbus);
- pci_enable_bridges(rootbus);
return 0;
}
diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
index 658542b914f..078bb744b5f 100644
--- a/arch/m68k/q40/config.c
+++ b/arch/m68k/q40/config.c
@@ -338,6 +338,6 @@ static __init int q40_add_kbd_device(void)
return -ENODEV;
pdev = platform_device_register_simple("q40kbd", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
arch_initcall(q40_add_kbd_device);
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index d22a4ecffff..4fab52294d9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -28,7 +28,7 @@ config MICROBLAZE
select GENERIC_CLOCKEVENTS
select GENERIC_IDLE_POLL_SETUP
select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
+ select CLONE_BACKWARDS3
config SWAP
def_bool n
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h
index 20c5e8e5121..9977816c5ad 100644
--- a/arch/microblaze/include/asm/prom.h
+++ b/arch/microblaze/include/asm/prom.h
@@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c3abed33230..dccd7cec442 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -114,6 +114,7 @@ config BCM47XX
select FW_CFE
select HW_HAS_PCI
select IRQ_CPU
+ select SYS_HAS_CPU_MIPS32_R1
select NO_EXCEPT_FILL
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
@@ -2304,9 +2305,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "Kernel crash dumps"
diff --git a/arch/mips/bcm47xx/Kconfig b/arch/mips/bcm47xx/Kconfig
index ba611927749..2b8b118398c 100644
--- a/arch/mips/bcm47xx/Kconfig
+++ b/arch/mips/bcm47xx/Kconfig
@@ -2,7 +2,6 @@ if BCM47XX
config BCM47XX_SSB
bool "SSB Support for Broadcom BCM47XX"
- select SYS_HAS_CPU_MIPS32_R1
select SSB
select SSB_DRIVER_MIPS
select SSB_DRIVER_EXTIF
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 1dc086087a7..fa44f3ec530 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -17,6 +17,8 @@
#define current_cpu_type() current_cpu_data.cputype
#endif
+#define boot_cpu_type() cpu_data[0].cputype
+
/*
* SMP assumption: Options of CPU 0 are a superset of all processors.
* This is true for all known MIPS systems.
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e68e57..9488fa5f886 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -25,8 +25,12 @@
#else
#define CAC_BASE _AC(0x80000000, UL)
#endif
+#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL)
+#endif
+#ifndef UNCAC_BASE
#define UNCAC_BASE _AC(0xa0000000, UL)
+#endif
#ifndef MAP_BASE
#ifdef CONFIG_KVM_GUEST
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index b7a23064841..88e292b7719 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -25,11 +25,12 @@ struct siginfo;
/*
* Careful to keep union _sifields from shifting ...
*/
-#if __SIZEOF_LONG__ == 4
+#if _MIPS_SZLONG == 32
#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
-#endif
-#if __SIZEOF_LONG__ == 8
+#elif _MIPS_SZLONG == 64
#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int))
+#else
+#error _MIPS_SZLONG neither 32 nor 64
#endif
#include <asm-generic/siginfo.h>
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index f739aedcb50..bd79c4f9bff 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -54,7 +54,11 @@ LEAF(bmips_smp_movevec)
/* set up CPU1 CBR; move BASE to 0xa000_0000 */
li k0, 0xff400000
mtc0 k0, $22, 6
- li k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_1
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
or k0, k1
li k1, 0xa0080000
sw k1, 0(k0)
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index c0bb4d59076..126da74d4c5 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void)
int i, cpu = 1, boot_cpu = 0;
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ int cpu_hw_intr;
+
/* arbitration priority */
clear_c0_brcm_cmt_ctrl(0x30);
@@ -79,15 +81,13 @@ static void __init bmips_smp_setup(void)
* MIPS interrupts 0,1 (SW INT 0,1) cross over to the other thread
* MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
* MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
- *
- * If booting from TP1, leave the existing CMT interrupt routing
- * such that TP0 responds to SW1 and TP1 responds to SW0.
*/
if (boot_cpu == 0)
- change_c0_brcm_cmt_intr(0xf8018000,
- (0x02 << 27) | (0x03 << 15));
+ cpu_hw_intr = 0x02;
else
- change_c0_brcm_cmt_intr(0xf8018000, (0x1d << 27));
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15));
/* single core, 2 threads (2 pipelines) */
max_cpus = 2;
@@ -202,9 +202,15 @@ static void bmips_init_secondary(void)
#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
void __iomem *cbr = BMIPS_GET_CBR();
unsigned long old_vec;
+ unsigned long relo_vector;
+ int boot_cpu;
+
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+ relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
+ BMIPS_RELO_VECTOR_CONTROL_1;
- old_vec = __raw_readl(cbr + BMIPS_RELO_VECTOR_CONTROL_1);
- __raw_writel(old_vec & ~0x20000000, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ old_vec = __raw_readl(cbr + relo_vector);
+ __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
#elif defined(CONFIG_CPU_BMIPS5000)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 1765bab000a..faf84c5f262 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1335,8 +1335,9 @@ static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
return len;
}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
-static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
char *buf)
{
struct vpe *vpe = get_vpe(tclimit);
@@ -1344,7 +1345,7 @@ static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr,
return sprintf(buf, "%d\n", vpe->ntcs);
}
-static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct vpe *vpe = get_vpe(tclimit);
@@ -1365,12 +1366,14 @@ static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr,
out_einval:
return -EINVAL;
}
+static DEVICE_ATTR_RW(ntcs);
-static struct device_attribute vpe_class_attributes[] = {
- __ATTR(kill, S_IWUSR, NULL, store_kill),
- __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs),
- {}
+static struct attribute vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(vpe);
static void vpe_device_release(struct device *cd)
{
@@ -1381,7 +1384,7 @@ struct class vpe_class = {
.name = "vpe",
.owner = THIS_MODULE,
.dev_release = vpe_device_release,
- .dev_attrs = vpe_class_attributes,
+ .dev_groups = vpe_groups,
};
struct device vpe_device;
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
index dca2aa66599..bbace092ad0 100644
--- a/arch/mips/kvm/kvm_locore.S
+++ b/arch/mips/kvm/kvm_locore.S
@@ -1,13 +1,13 @@
/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License. See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Main entry point for the guest, exception handling.
-*
-* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
#include <asm/asm.h>
#include <asm/asmmacro.h>
@@ -55,195 +55,193 @@
* a0: run
* a1: vcpu
*/
+ .set noreorder
+ .set noat
FEXPORT(__kvm_mips_vcpu_run)
- .set push
- .set noreorder
- .set noat
-
- /* k0/k1 not being used in host kernel context */
- addiu k1,sp, -PT_SIZE
- LONG_S $0, PT_R0(k1)
- LONG_S $1, PT_R1(k1)
- LONG_S $2, PT_R2(k1)
- LONG_S $3, PT_R3(k1)
-
- LONG_S $4, PT_R4(k1)
- LONG_S $5, PT_R5(k1)
- LONG_S $6, PT_R6(k1)
- LONG_S $7, PT_R7(k1)
-
- LONG_S $8, PT_R8(k1)
- LONG_S $9, PT_R9(k1)
- LONG_S $10, PT_R10(k1)
- LONG_S $11, PT_R11(k1)
- LONG_S $12, PT_R12(k1)
- LONG_S $13, PT_R13(k1)
- LONG_S $14, PT_R14(k1)
- LONG_S $15, PT_R15(k1)
- LONG_S $16, PT_R16(k1)
- LONG_S $17, PT_R17(k1)
-
- LONG_S $18, PT_R18(k1)
- LONG_S $19, PT_R19(k1)
- LONG_S $20, PT_R20(k1)
- LONG_S $21, PT_R21(k1)
- LONG_S $22, PT_R22(k1)
- LONG_S $23, PT_R23(k1)
- LONG_S $24, PT_R24(k1)
- LONG_S $25, PT_R25(k1)
+ /* k0/k1 not being used in host kernel context */
+ INT_ADDIU k1, sp, -PT_SIZE
+ LONG_S $0, PT_R0(k1)
+ LONG_S $1, PT_R1(k1)
+ LONG_S $2, PT_R2(k1)
+ LONG_S $3, PT_R3(k1)
+
+ LONG_S $4, PT_R4(k1)
+ LONG_S $5, PT_R5(k1)
+ LONG_S $6, PT_R6(k1)
+ LONG_S $7, PT_R7(k1)
+
+ LONG_S $8, PT_R8(k1)
+ LONG_S $9, PT_R9(k1)
+ LONG_S $10, PT_R10(k1)
+ LONG_S $11, PT_R11(k1)
+ LONG_S $12, PT_R12(k1)
+ LONG_S $13, PT_R13(k1)
+ LONG_S $14, PT_R14(k1)
+ LONG_S $15, PT_R15(k1)
+ LONG_S $16, PT_R16(k1)
+ LONG_S $17, PT_R17(k1)
+
+ LONG_S $18, PT_R18(k1)
+ LONG_S $19, PT_R19(k1)
+ LONG_S $20, PT_R20(k1)
+ LONG_S $21, PT_R21(k1)
+ LONG_S $22, PT_R22(k1)
+ LONG_S $23, PT_R23(k1)
+ LONG_S $24, PT_R24(k1)
+ LONG_S $25, PT_R25(k1)
/* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
- LONG_S $28, PT_R28(k1)
- LONG_S $29, PT_R29(k1)
- LONG_S $30, PT_R30(k1)
- LONG_S $31, PT_R31(k1)
+ LONG_S $28, PT_R28(k1)
+ LONG_S $29, PT_R29(k1)
+ LONG_S $30, PT_R30(k1)
+ LONG_S $31, PT_R31(k1)
- /* Save hi/lo */
- mflo v0
- LONG_S v0, PT_LO(k1)
- mfhi v1
- LONG_S v1, PT_HI(k1)
+ /* Save hi/lo */
+ mflo v0
+ LONG_S v0, PT_LO(k1)
+ mfhi v1
+ LONG_S v1, PT_HI(k1)
/* Save host status */
- mfc0 v0, CP0_STATUS
- LONG_S v0, PT_STATUS(k1)
+ mfc0 v0, CP0_STATUS
+ LONG_S v0, PT_STATUS(k1)
/* Save host ASID, shove it into the BVADDR location */
- mfc0 v1,CP0_ENTRYHI
- andi v1, 0xff
- LONG_S v1, PT_HOST_ASID(k1)
+ mfc0 v1, CP0_ENTRYHI
+ andi v1, 0xff
+ LONG_S v1, PT_HOST_ASID(k1)
- /* Save DDATA_LO, will be used to store pointer to vcpu */
- mfc0 v1, CP0_DDATA_LO
- LONG_S v1, PT_HOST_USERLOCAL(k1)
+ /* Save DDATA_LO, will be used to store pointer to vcpu */
+ mfc0 v1, CP0_DDATA_LO
+ LONG_S v1, PT_HOST_USERLOCAL(k1)
- /* DDATA_LO has pointer to vcpu */
- mtc0 a1,CP0_DDATA_LO
+ /* DDATA_LO has pointer to vcpu */
+ mtc0 a1, CP0_DDATA_LO
- /* Offset into vcpu->arch */
- addiu k1, a1, VCPU_HOST_ARCH
+ /* Offset into vcpu->arch */
+ INT_ADDIU k1, a1, VCPU_HOST_ARCH
- /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */
- LONG_S sp, VCPU_HOST_STACK(k1)
+ /*
+ * Save the host stack to VCPU, used for exception processing
+ * when we exit from the Guest
+ */
+ LONG_S sp, VCPU_HOST_STACK(k1)
- /* Save the kernel gp as well */
- LONG_S gp, VCPU_HOST_GP(k1)
+ /* Save the kernel gp as well */
+ LONG_S gp, VCPU_HOST_GP(k1)
/* Setup status register for running the guest in UM, interrupts are disabled */
- li k0,(ST0_EXL | KSU_USER| ST0_BEV)
- mtc0 k0,CP0_STATUS
- ehb
-
- /* load up the new EBASE */
- LONG_L k0, VCPU_GUEST_EBASE(k1)
- mtc0 k0,CP0_EBASE
-
- /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was
- * but make sure that timer interrupts are enabled
- */
- li k0,(ST0_EXL | KSU_USER | ST0_IE)
- andi v0, v0, ST0_IM
- or k0, k0, v0
- mtc0 k0,CP0_STATUS
- ehb
+ li k0, (ST0_EXL | KSU_USER | ST0_BEV)
+ mtc0 k0, CP0_STATUS
+ ehb
+
+ /* load up the new EBASE */
+ LONG_L k0, VCPU_GUEST_EBASE(k1)
+ mtc0 k0, CP0_EBASE
+
+ /*
+ * Now that the new EBASE has been loaded, unset BEV, set
+ * interrupt mask as it was but make sure that timer interrupts
+ * are enabled
+ */
+ li k0, (ST0_EXL | KSU_USER | ST0_IE)
+ andi v0, v0, ST0_IM
+ or k0, k0, v0
+ mtc0 k0, CP0_STATUS
+ ehb
/* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
FEXPORT(__kvm_mips_load_asid)
- /* Set the ASID for the Guest Kernel */
- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
- /* addresses shift to 0x80000000 */
- bltz t0, 1f /* If kernel */
- addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+ /* Set the ASID for the Guest Kernel */
+ INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- sll t2, t2, 2 /* x4 */
- addu t3, t1, t2
- LONG_L k0, (t3)
- andi k0, k0, 0xff
- mtc0 k0,CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- /* Now load up the Guest Context from VCPU */
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
-
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
-
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* k0/k1 loaded up later */
-
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
-
- /* Restore hi/lo */
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
-
- LONG_L k0, VCPU_HI(k1)
- mthi k0
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ INT_SLL t2, t2, 2 /* x4 */
+ REG_ADDU t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0, CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* Now load up the Guest Context from VCPU */
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* k0/k1 loaded up later */
+
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
+
+ /* Restore hi/lo */
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
+
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
FEXPORT(__kvm_mips_load_k0k1)
/* Restore the guest's k0/k1 registers */
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
- /* Jump to guest */
+ /* Jump to guest */
eret
- .set pop
VECTOR(MIPSX(exception), unknown)
/*
* Find out what mode we came from and jump to the proper handler.
*/
- .set push
- .set noat
- .set noreorder
- mtc0 k0, CP0_ERROREPC #01: Save guest k0
- ehb #02:
-
- mfc0 k0, CP0_EBASE #02: Get EBASE
- srl k0, k0, 10 #03: Get rid of CPUNum
- sll k0, k0, 10 #04
- LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
- addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
- j k0 #07: jump to the function
- nop #08: branch delay slot
- .set push
+ mtc0 k0, CP0_ERROREPC #01: Save guest k0
+ ehb #02:
+
+ mfc0 k0, CP0_EBASE #02: Get EBASE
+ INT_SRL k0, k0, 10 #03: Get rid of CPUNum
+ INT_SLL k0, k0, 10 #04
+ LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
+ INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000
+ j k0 #07: jump to the function
+ nop #08: branch delay slot
VECTOR_END(MIPSX(exceptionEnd))
.end MIPSX(exception)
@@ -253,329 +251,327 @@ VECTOR_END(MIPSX(exceptionEnd))
*
*/
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
- .set push
- .set noat
- .set noreorder
-
- /* Get the VCPU pointer from DDTATA_LO */
- mfc0 k1, CP0_DDATA_LO
- addiu k1, k1, VCPU_HOST_ARCH
-
- /* Start saving Guest context to VCPU */
- LONG_S $0, VCPU_R0(k1)
- LONG_S $1, VCPU_R1(k1)
- LONG_S $2, VCPU_R2(k1)
- LONG_S $3, VCPU_R3(k1)
- LONG_S $4, VCPU_R4(k1)
- LONG_S $5, VCPU_R5(k1)
- LONG_S $6, VCPU_R6(k1)
- LONG_S $7, VCPU_R7(k1)
- LONG_S $8, VCPU_R8(k1)
- LONG_S $9, VCPU_R9(k1)
- LONG_S $10, VCPU_R10(k1)
- LONG_S $11, VCPU_R11(k1)
- LONG_S $12, VCPU_R12(k1)
- LONG_S $13, VCPU_R13(k1)
- LONG_S $14, VCPU_R14(k1)
- LONG_S $15, VCPU_R15(k1)
- LONG_S $16, VCPU_R16(k1)
- LONG_S $17,VCPU_R17(k1)
- LONG_S $18, VCPU_R18(k1)
- LONG_S $19, VCPU_R19(k1)
- LONG_S $20, VCPU_R20(k1)
- LONG_S $21, VCPU_R21(k1)
- LONG_S $22, VCPU_R22(k1)
- LONG_S $23, VCPU_R23(k1)
- LONG_S $24, VCPU_R24(k1)
- LONG_S $25, VCPU_R25(k1)
-
- /* Guest k0/k1 saved later */
-
- LONG_S $28, VCPU_R28(k1)
- LONG_S $29, VCPU_R29(k1)
- LONG_S $30, VCPU_R30(k1)
- LONG_S $31, VCPU_R31(k1)
-
- /* We need to save hi/lo and restore them on
- * the way out
- */
- mfhi t0
- LONG_S t0, VCPU_HI(k1)
-
- mflo t0
- LONG_S t0, VCPU_LO(k1)
-
- /* Finally save guest k0/k1 to VCPU */
- mfc0 t0, CP0_ERROREPC
- LONG_S t0, VCPU_R26(k1)
-
- /* Get GUEST k1 and save it in VCPU */
- la t1, ~0x2ff
- mfc0 t0, CP0_EBASE
- and t0, t0, t1
- LONG_L t0, 0x3000(t0)
- LONG_S t0, VCPU_R27(k1)
-
- /* Now that context has been saved, we can use other registers */
-
- /* Restore vcpu */
- mfc0 a1, CP0_DDATA_LO
- move s1, a1
-
- /* Restore run (vcpu->run) */
- LONG_L a0, VCPU_RUN(a1)
- /* Save pointer to run in s0, will be saved by the compiler */
- move s0, a0
-
-
- /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */
- mfc0 k0,CP0_EPC
- LONG_S k0, VCPU_PC(k1)
-
- mfc0 k0, CP0_BADVADDR
- LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
-
- mfc0 k0, CP0_CAUSE
- LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
-
- mfc0 k0, CP0_ENTRYHI
- LONG_S k0, VCPU_HOST_ENTRYHI(k1)
-
- /* Now restore the host state just enough to run the handlers */
-
- /* Swtich EBASE to the one used by Linux */
- /* load up the host EBASE */
- mfc0 v0, CP0_STATUS
-
- .set at
- or k0, v0, ST0_BEV
- .set noat
-
- mtc0 k0, CP0_STATUS
- ehb
-
- LONG_L k0, VCPU_HOST_EBASE(k1)
- mtc0 k0,CP0_EBASE
-
-
- /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
- .set at
- and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
- or v0, v0, ST0_CU0
- .set noat
- mtc0 v0, CP0_STATUS
- ehb
-
- /* Load up host GP */
- LONG_L gp, VCPU_HOST_GP(k1)
-
- /* Need a stack before we can jump to "C" */
- LONG_L sp, VCPU_HOST_STACK(k1)
-
- /* Saved host state */
- addiu sp,sp, -PT_SIZE
+ /* Get the VCPU pointer from DDTATA_LO */
+ mfc0 k1, CP0_DDATA_LO
+ INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+ /* Start saving Guest context to VCPU */
+ LONG_S $0, VCPU_R0(k1)
+ LONG_S $1, VCPU_R1(k1)
+ LONG_S $2, VCPU_R2(k1)
+ LONG_S $3, VCPU_R3(k1)
+ LONG_S $4, VCPU_R4(k1)
+ LONG_S $5, VCPU_R5(k1)
+ LONG_S $6, VCPU_R6(k1)
+ LONG_S $7, VCPU_R7(k1)
+ LONG_S $8, VCPU_R8(k1)
+ LONG_S $9, VCPU_R9(k1)
+ LONG_S $10, VCPU_R10(k1)
+ LONG_S $11, VCPU_R11(k1)
+ LONG_S $12, VCPU_R12(k1)
+ LONG_S $13, VCPU_R13(k1)
+ LONG_S $14, VCPU_R14(k1)
+ LONG_S $15, VCPU_R15(k1)
+ LONG_S $16, VCPU_R16(k1)
+ LONG_S $17, VCPU_R17(k1)
+ LONG_S $18, VCPU_R18(k1)
+ LONG_S $19, VCPU_R19(k1)
+ LONG_S $20, VCPU_R20(k1)
+ LONG_S $21, VCPU_R21(k1)
+ LONG_S $22, VCPU_R22(k1)
+ LONG_S $23, VCPU_R23(k1)
+ LONG_S $24, VCPU_R24(k1)
+ LONG_S $25, VCPU_R25(k1)
+
+ /* Guest k0/k1 saved later */
+
+ LONG_S $28, VCPU_R28(k1)
+ LONG_S $29, VCPU_R29(k1)
+ LONG_S $30, VCPU_R30(k1)
+ LONG_S $31, VCPU_R31(k1)
+
+ /* We need to save hi/lo and restore them on
+ * the way out
+ */
+ mfhi t0
+ LONG_S t0, VCPU_HI(k1)
+
+ mflo t0
+ LONG_S t0, VCPU_LO(k1)
+
+ /* Finally save guest k0/k1 to VCPU */
+ mfc0 t0, CP0_ERROREPC
+ LONG_S t0, VCPU_R26(k1)
+
+ /* Get GUEST k1 and save it in VCPU */
+ PTR_LI t1, ~0x2ff
+ mfc0 t0, CP0_EBASE
+ and t0, t0, t1
+ LONG_L t0, 0x3000(t0)
+ LONG_S t0, VCPU_R27(k1)
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ mfc0 a1, CP0_DDATA_LO
+ move s1, a1
+
+ /* Restore run (vcpu->run) */
+ LONG_L a0, VCPU_RUN(a1)
+ /* Save pointer to run in s0, will be saved by the compiler */
+ move s0, a0
+
+ /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
+ * process the exception */
+ mfc0 k0,CP0_EPC
+ LONG_S k0, VCPU_PC(k1)
+
+ mfc0 k0, CP0_BADVADDR
+ LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+ mfc0 k0, CP0_CAUSE
+ LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
+
+ mfc0 k0, CP0_ENTRYHI
+ LONG_S k0, VCPU_HOST_ENTRYHI(k1)
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Swtich EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ mfc0 v0, CP0_STATUS
+
+ .set at
+ or k0, v0, ST0_BEV
+ .set noat
+
+ mtc0 k0, CP0_STATUS
+ ehb
+
+ LONG_L k0, VCPU_HOST_EBASE(k1)
+ mtc0 k0,CP0_EBASE
+
- /* XXXKYMA do we need to load the host ASID, maybe not because the
- * kernel entries are marked GLOBAL, need to verify
- */
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ .set at
+ and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+ or v0, v0, ST0_CU0
+ .set noat
+ mtc0 v0, CP0_STATUS
+ ehb
+
+ /* Load up host GP */
+ LONG_L gp, VCPU_HOST_GP(k1)
+
+ /* Need a stack before we can jump to "C" */
+ LONG_L sp, VCPU_HOST_STACK(k1)
+
+ /* Saved host state */
+ INT_ADDIU sp, sp, -PT_SIZE
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(sp)
- mtc0 k0, CP0_DDATA_LO
+ /* XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
- /* Restore RDHWR access */
- la k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(sp)
+ mtc0 k0, CP0_DDATA_LO
- /* Jump to handler */
+ /* Restore RDHWR access */
+ PTR_LI k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+ /* Jump to handler */
FEXPORT(__kvm_mips_jump_to_handler)
- /* XXXKYMA: not sure if this is safe, how large is the stack?? */
- /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */
- la t9,kvm_mips_handle_exit
- jalr.hb t9
- addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */
-
- /* Return from handler Make sure interrupts are disabled */
- di
- ehb
-
- /* XXXKYMA: k0/k1 could have been blown away if we processed an exception
- * while we were handling the exception from the guest, reload k1
- */
- move k1, s1
- addiu k1, k1, VCPU_HOST_ARCH
-
- /* Check return value, should tell us if we are returning to the host (handle I/O etc)
- * or resuming the guest
- */
- andi t0, v0, RESUME_HOST
- bnez t0, __kvm_mips_return_to_host
- nop
+ /* XXXKYMA: not sure if this is safe, how large is the stack??
+ * Now jump to the kvm_mips_handle_exit() to see if we can deal
+ * with this in the kernel */
+ PTR_LA t9, kvm_mips_handle_exit
+ jalr.hb t9
+ INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
+
+ /* Return from handler Make sure interrupts are disabled */
+ di
+ ehb
+
+ /* XXXKYMA: k0/k1 could have been blown away if we processed
+ * an exception while we were handling the exception from the
+ * guest, reload k1
+ */
+
+ move k1, s1
+ INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+ /* Check return value, should tell us if we are returning to the
+ * host (handle I/O etc)or resuming the guest
+ */
+ andi t0, v0, RESUME_HOST
+ bnez t0, __kvm_mips_return_to_host
+ nop
__kvm_mips_return_to_guest:
- /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
- mtc0 s1, CP0_DDATA_LO
-
- /* Load up the Guest EBASE to minimize the window where BEV is set */
- LONG_L t0, VCPU_GUEST_EBASE(k1)
-
- /* Switch EBASE back to the one used by KVM */
- mfc0 v1, CP0_STATUS
- .set at
- or k0, v1, ST0_BEV
- .set noat
- mtc0 k0, CP0_STATUS
- ehb
- mtc0 t0,CP0_EBASE
-
- /* Setup status register for running guest in UM */
- .set at
- or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
- and v1, v1, ~ST0_CU0
- .set noat
- mtc0 v1, CP0_STATUS
- ehb
+ /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+ mtc0 s1, CP0_DDATA_LO
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ LONG_L t0, VCPU_GUEST_EBASE(k1)
+
+ /* Switch EBASE back to the one used by KVM */
+ mfc0 v1, CP0_STATUS
+ .set at
+ or k0, v1, ST0_BEV
+ .set noat
+ mtc0 k0, CP0_STATUS
+ ehb
+ mtc0 t0, CP0_EBASE
+
+ /* Setup status register for running guest in UM */
+ .set at
+ or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+ and v1, v1, ~ST0_CU0
+ .set noat
+ mtc0 v1, CP0_STATUS
+ ehb
/* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
-
- /* Set the ASID for the Guest Kernel */
- sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
- /* addresses shift to 0x80000000 */
- bltz t0, 1f /* If kernel */
- addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */
+ LONG_L t0, VCPU_PC(k1)
+ mtc0 t0, CP0_EPC
+
+ /* Set the ASID for the Guest Kernel */
+ INT_SLL t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */
+ /* addresses shift to 0x80000000 */
+ bltz t0, 1f /* If kernel */
+ INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
+ INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- sll t2, t2, 2 /* x4 */
- addu t3, t1, t2
- LONG_L k0, (t3)
- andi k0, k0, 0xff
- mtc0 k0,CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- /* load the guest context from VCPU and return */
- LONG_L $0, VCPU_R0(k1)
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* $/k1 loaded later */
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ LONG_L t2, TI_CPU($28) /* smp_processor_id */
+ INT_SLL t2, t2, 2 /* x4 */
+ REG_ADDU t3, t1, t2
+ LONG_L k0, (t3)
+ andi k0, k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Disable RDHWR access */
+ mtc0 zero, CP0_HWRENA
+
+ /* load the guest context from VCPU and return */
+ LONG_L $0, VCPU_R0(k1)
+ LONG_L $1, VCPU_R1(k1)
+ LONG_L $2, VCPU_R2(k1)
+ LONG_L $3, VCPU_R3(k1)
+ LONG_L $4, VCPU_R4(k1)
+ LONG_L $5, VCPU_R5(k1)
+ LONG_L $6, VCPU_R6(k1)
+ LONG_L $7, VCPU_R7(k1)
+ LONG_L $8, VCPU_R8(k1)
+ LONG_L $9, VCPU_R9(k1)
+ LONG_L $10, VCPU_R10(k1)
+ LONG_L $11, VCPU_R11(k1)
+ LONG_L $12, VCPU_R12(k1)
+ LONG_L $13, VCPU_R13(k1)
+ LONG_L $14, VCPU_R14(k1)
+ LONG_L $15, VCPU_R15(k1)
+ LONG_L $16, VCPU_R16(k1)
+ LONG_L $17, VCPU_R17(k1)
+ LONG_L $18, VCPU_R18(k1)
+ LONG_L $19, VCPU_R19(k1)
+ LONG_L $20, VCPU_R20(k1)
+ LONG_L $21, VCPU_R21(k1)
+ LONG_L $22, VCPU_R22(k1)
+ LONG_L $23, VCPU_R23(k1)
+ LONG_L $24, VCPU_R24(k1)
+ LONG_L $25, VCPU_R25(k1)
+
+ /* $/k1 loaded later */
+ LONG_L $28, VCPU_R28(k1)
+ LONG_L $29, VCPU_R29(k1)
+ LONG_L $30, VCPU_R30(k1)
+ LONG_L $31, VCPU_R31(k1)
FEXPORT(__kvm_mips_skip_guest_restore)
- LONG_L k0, VCPU_HI(k1)
- mthi k0
+ LONG_L k0, VCPU_HI(k1)
+ mthi k0
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
+ LONG_L k0, VCPU_LO(k1)
+ mtlo k0
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
+ LONG_L k0, VCPU_R26(k1)
+ LONG_L k1, VCPU_R27(k1)
- eret
+ eret
__kvm_mips_return_to_host:
- /* EBASE is already pointing to Linux */
- LONG_L k1, VCPU_HOST_STACK(k1)
- addiu k1,k1, -PT_SIZE
-
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(k1)
- mtc0 k0, CP0_DDATA_LO
-
- /* Restore host ASID */
- LONG_L k0, PT_HOST_ASID(sp)
- andi k0, 0xff
- mtc0 k0,CP0_ENTRYHI
- ehb
-
- /* Load context saved on the host stack */
- LONG_L $0, PT_R0(k1)
- LONG_L $1, PT_R1(k1)
-
- /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */
- sra k0, v0, 2
- move $2, k0
-
- LONG_L $3, PT_R3(k1)
- LONG_L $4, PT_R4(k1)
- LONG_L $5, PT_R5(k1)
- LONG_L $6, PT_R6(k1)
- LONG_L $7, PT_R7(k1)
- LONG_L $8, PT_R8(k1)
- LONG_L $9, PT_R9(k1)
- LONG_L $10, PT_R10(k1)
- LONG_L $11, PT_R11(k1)
- LONG_L $12, PT_R12(k1)
- LONG_L $13, PT_R13(k1)
- LONG_L $14, PT_R14(k1)
- LONG_L $15, PT_R15(k1)
- LONG_L $16, PT_R16(k1)
- LONG_L $17, PT_R17(k1)
- LONG_L $18, PT_R18(k1)
- LONG_L $19, PT_R19(k1)
- LONG_L $20, PT_R20(k1)
- LONG_L $21, PT_R21(k1)
- LONG_L $22, PT_R22(k1)
- LONG_L $23, PT_R23(k1)
- LONG_L $24, PT_R24(k1)
- LONG_L $25, PT_R25(k1)
-
- /* Host k0/k1 were not saved */
-
- LONG_L $28, PT_R28(k1)
- LONG_L $29, PT_R29(k1)
- LONG_L $30, PT_R30(k1)
-
- LONG_L k0, PT_HI(k1)
- mthi k0
-
- LONG_L k0, PT_LO(k1)
- mtlo k0
-
- /* Restore RDHWR access */
- la k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
-
-
- /* Restore RA, which is the address we will return to */
- LONG_L ra, PT_R31(k1)
- j ra
- nop
-
- .set pop
+ /* EBASE is already pointing to Linux */
+ LONG_L k1, VCPU_HOST_STACK(k1)
+ INT_ADDIU k1,k1, -PT_SIZE
+
+ /* Restore host DDATA_LO */
+ LONG_L k0, PT_HOST_USERLOCAL(k1)
+ mtc0 k0, CP0_DDATA_LO
+
+ /* Restore host ASID */
+ LONG_L k0, PT_HOST_ASID(sp)
+ andi k0, 0xff
+ mtc0 k0,CP0_ENTRYHI
+ ehb
+
+ /* Load context saved on the host stack */
+ LONG_L $0, PT_R0(k1)
+ LONG_L $1, PT_R1(k1)
+
+ /* r2/v0 is the return code, shift it down by 2 (arithmetic)
+ * to recover the err code */
+ INT_SRA k0, v0, 2
+ move $2, k0
+
+ LONG_L $3, PT_R3(k1)
+ LONG_L $4, PT_R4(k1)
+ LONG_L $5, PT_R5(k1)
+ LONG_L $6, PT_R6(k1)
+ LONG_L $7, PT_R7(k1)
+ LONG_L $8, PT_R8(k1)
+ LONG_L $9, PT_R9(k1)
+ LONG_L $10, PT_R10(k1)
+ LONG_L $11, PT_R11(k1)
+ LONG_L $12, PT_R12(k1)
+ LONG_L $13, PT_R13(k1)
+ LONG_L $14, PT_R14(k1)
+ LONG_L $15, PT_R15(k1)
+ LONG_L $16, PT_R16(k1)
+ LONG_L $17, PT_R17(k1)
+ LONG_L $18, PT_R18(k1)
+ LONG_L $19, PT_R19(k1)
+ LONG_L $20, PT_R20(k1)
+ LONG_L $21, PT_R21(k1)
+ LONG_L $22, PT_R22(k1)
+ LONG_L $23, PT_R23(k1)
+ LONG_L $24, PT_R24(k1)
+ LONG_L $25, PT_R25(k1)
+
+ /* Host k0/k1 were not saved */
+
+ LONG_L $28, PT_R28(k1)
+ LONG_L $29, PT_R29(k1)
+ LONG_L $30, PT_R30(k1)
+
+ LONG_L k0, PT_HI(k1)
+ mthi k0
+
+ LONG_L k0, PT_LO(k1)
+ mtlo k0
+
+ /* Restore RDHWR access */
+ PTR_LI k0, 0x2000000F
+ mtc0 k0, CP0_HWRENA
+
+
+ /* Restore RA, which is the address we will return to */
+ LONG_L ra, PT_R31(k1)
+ j ra
+ nop
+
VECTOR_END(MIPSX(GuestExceptionEnd))
.end MIPSX(GuestException)
@@ -627,24 +623,23 @@ MIPSX(exceptions):
#define HW_SYNCI_Step $1
LEAF(MIPSX(SyncICache))
- .set push
+ .set push
.set mips32r2
- beq a1, zero, 20f
- nop
- addu a1, a0, a1
- rdhwr v0, HW_SYNCI_Step
- beq v0, zero, 20f
- nop
-
+ beq a1, zero, 20f
+ nop
+ REG_ADDU a1, a0, a1
+ rdhwr v0, HW_SYNCI_Step
+ beq v0, zero, 20f
+ nop
10:
- synci 0(a0)
- addu a0, a0, v0
- sltu v1, a0, a1
- bne v1, zero, 10b
- nop
- sync
+ synci 0(a0)
+ REG_ADDU a0, a0, v0
+ sltu v1, a0, a1
+ bne v1, zero, 10b
+ nop
+ sync
20:
- jr.hb ra
- nop
- .set pop
+ jr.hb ra
+ nop
+ .set pop
END(MIPSX(SyncICache))
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index dd203e59e6f..a7b044536de 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -208,6 +208,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
return 0;
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index e773659ccf9..46048d24328 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
break;
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0)
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32)))
+ *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc + 8;
+ return 1;
+#endif
case cop0_op:
case cop1_op:
case cop2_op:
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index af763e838fd..5e5424753b5 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -33,7 +33,7 @@ static int op_mips_setup(void)
return 0;
}
-static int op_mips_create_files(struct super_block *sb, struct dentry *root)
+static int op_mips_create_files(struct dentry *root)
{
int i;
@@ -42,16 +42,16 @@ static int op_mips_create_files(struct super_block *sb, struct dentry *root)
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
-
- oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
- oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(sb, dir, "exl", &ctr[i].exl);
+ dir = oprofilefs_mkdir(root, buf);
+
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
/* Dummy. */
- oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
return 0;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140cdae..3a2b6e9f25c 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr)
reg.control[i] |= M_PERFCTL_USER;
if (ctr[i].exl)
reg.control[i] |= M_PERFCTL_EXL;
- if (current_cpu_type() == CPU_XLR)
+ if (boot_cpu_type() == CPU_XLR)
reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS;
reg.counter[i] = 0x80000000 - ctr[i].count;
}
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 594e60d6a43..33e7aa52d9c 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -113,7 +113,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
if (!pci_has_flag(PCI_PROBE_ONLY)) {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
- pci_enable_bridges(bus);
}
}
}
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index d22dc0d6f28..2b7e837dc2e 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = {
.end = PNX8335_IP3902_PORTS_END,
.flags = IORESOURCE_MEM,
},
+#ifdef CONFIG_SOC_PNX8335
[1] = {
.start = PNX8335_PIC_ETHERNET_INT,
.end = PNX8335_PIC_ETHERNET_INT,
.flags = IORESOURCE_IRQ,
},
+#endif
};
static struct platform_device pnx833x_ethernet_device = {
diff --git a/arch/mips/powertv/asic/asic_devices.c b/arch/mips/powertv/asic/asic_devices.c
index 9f64c238780..0238af1ba50 100644
--- a/arch/mips/powertv/asic/asic_devices.c
+++ b/arch/mips/powertv/asic/asic_devices.c
@@ -529,8 +529,7 @@ EXPORT_SYMBOL(asic_resource_get);
*/
void platform_release_memory(void *ptr, int size)
{
- free_reserved_area((unsigned long)ptr, (unsigned long)(ptr + size),
- -1, NULL);
+ free_reserved_area(ptr, ptr + size, -1, NULL);
}
EXPORT_SYMBOL(platform_release_memory);
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c
index dd0ab982d77..f9407e17047 100644
--- a/arch/mips/sni/a20r.c
+++ b/arch/mips/sni/a20r.c
@@ -122,7 +122,6 @@ static struct resource sc26xx_rsrc[] = {
static struct sccnxp_pdata sccnxp_data = {
.reg_shift = 2,
- .frequency = 3686400,
.mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) |
MCTRL_SIG(RTS_OP, LINE_OP3) |
MCTRL_SIG(DSR_IP, LINE_IP5) |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 99dbab1c59a..d60bf98fa5c 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -55,6 +55,7 @@ config GENERIC_CSUM
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
menu "Processor type and features"
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index bbb34e5343a..eb59bfe23e8 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* Get the MAC address */
extern const void *of_get_mac_address(struct device_node *np);
diff --git a/arch/parisc/configs/c8000_defconfig b/arch/parisc/configs/c8000_defconfig
new file mode 100644
index 00000000000..f1100636129
--- /dev/null
+++ b/arch/parisc/configs/c8000_defconfig
@@ -0,0 +1,279 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_RD_BZIP2=y
+CONFIG_RD_LZMA=y
+CONFIG_RD_LZO=y
+CONFIG_EXPERT=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_DEV_INTEGRITY=y
+CONFIG_PA8X00=y
+CONFIG_MLONGCALLS=y
+CONFIG_64BIT=y
+CONFIG_SMP=y
+CONFIG_PREEMPT=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_IOMMU_CCIO=y
+CONFIG_PCI=y
+CONFIG_PCI_LBA=y
+# CONFIG_SUPERIO is not set
+# CONFIG_CHASSIS_LCD_LED is not set
+# CONFIG_PDC_CHASSIS is not set
+# CONFIG_PDC_CHASSIS_WARN is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=m
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_XFRM_SUB_POLICY=y
+CONFIG_NET_KEY=m
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_BEET=m
+CONFIG_INET_DIAG=m
+# CONFIG_IPV6 is not set
+CONFIG_IP_DCCP=m
+# CONFIG_IP_DCCP_CCID3 is not set
+CONFIG_TIPC=m
+CONFIG_LLC2=m
+CONFIG_DNS_RESOLVER=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_STANDALONE is not set
+CONFIG_PARPORT=y
+CONFIG_PARPORT_PC=y
+CONFIG_PARPORT_PC_FIFO=y
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_SX8=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=6144
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_CDROM_PKTCDVD_WCACHE=y
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_PLATFORM=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_SIIMAGE=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=m
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_FC_ATTRS=y
+CONFIG_SCSI_SAS_LIBSAS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_ISCSI_BOOT_SYSFS=m
+CONFIG_FUSION=y
+CONFIG_FUSION_SPI=y
+CONFIG_FUSION_SAS=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=m
+CONFIG_NETCONSOLE=m
+CONFIG_TUN=y
+CONFIG_E1000=y
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPPOE=m
+CONFIG_PPP_ASYNC=m
+CONFIG_PPP_SYNC_TTY=m
+# CONFIG_WLAN is not set
+CONFIG_INPUT_FF_MEMLESS=m
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_HIL_OLD is not set
+# CONFIG_KEYBOARD_HIL is not set
+CONFIG_MOUSE_PS2=m
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_CM109=m
+CONFIG_SERIO_SERPORT=m
+CONFIG_SERIO_PARKBD=m
+CONFIG_SERIO_GSCPS2=m
+# CONFIG_HP_SDC is not set
+CONFIG_SERIO_PCIPS2=m
+CONFIG_SERIO_LIBPS2=y
+CONFIG_SERIO_RAW=m
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=8
+CONFIG_SERIAL_8250_RUNTIME_UARTS=8
+CONFIG_SERIAL_8250_EXTENDED=y
+# CONFIG_SERIAL_MUX is not set
+CONFIG_SERIAL_JSM=m
+CONFIG_PRINTER=y
+CONFIG_HW_RANDOM=y
+CONFIG_RAW_DRIVER=m
+CONFIG_PTP_1588_CLOCK=y
+CONFIG_SSB=m
+CONFIG_SSB_DRIVER_PCICORE=y
+CONFIG_AGP=y
+CONFIG_AGP_PARISC=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_FOREIGN_ENDIAN=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+# CONFIG_FB_STI is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_BACKLIGHT_GENERIC is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_STI_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+# CONFIG_LOGO_LINUX_CLUT224 is not set
+CONFIG_SOUND=m
+CONFIG_SND=m
+CONFIG_SND_SEQUENCER=m
+CONFIG_SND_SEQ_DUMMY=m
+CONFIG_SND_MIXER_OSS=m
+CONFIG_SND_PCM_OSS=m
+CONFIG_SND_SEQUENCER_OSS=y
+CONFIG_SND_VERBOSE_PRINTK=y
+CONFIG_SND_AD1889=m
+# CONFIG_SND_USB is not set
+# CONFIG_SND_GSC is not set
+CONFIG_HID_A4TECH=m
+CONFIG_HID_APPLE=m
+CONFIG_HID_BELKIN=m
+CONFIG_HID_CHERRY=m
+CONFIG_HID_CHICONY=m
+CONFIG_HID_CYPRESS=m
+CONFIG_HID_DRAGONRISE=m
+CONFIG_HID_EZKEY=m
+CONFIG_HID_KYE=m
+CONFIG_HID_GYRATION=m
+CONFIG_HID_TWINHAN=m
+CONFIG_HID_KENSINGTON=m
+CONFIG_HID_LOGITECH=m
+CONFIG_HID_LOGITECH_DJ=m
+CONFIG_HID_MICROSOFT=m
+CONFIG_HID_MONTEREY=m
+CONFIG_HID_NTRIG=m
+CONFIG_HID_ORTEK=m
+CONFIG_HID_PANTHERLORD=m
+CONFIG_HID_PETALYNX=m
+CONFIG_HID_SAMSUNG=m
+CONFIG_HID_SUNPLUS=m
+CONFIG_HID_GREENASIA=m
+CONFIG_HID_SMARTJOYPLUS=m
+CONFIG_HID_TOPSEED=m
+CONFIG_HID_THRUSTMASTER=m
+CONFIG_HID_ZEROPLUS=m
+CONFIG_USB_HID=m
+CONFIG_USB=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT2_FS_POSIX_ACL=y
+CONFIG_EXT2_FS_SECURITY=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_EXT4_FS=m
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V1=m
+CONFIG_QFMT_V2=m
+CONFIG_AUTOFS4_FS=m
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_NFS_FS=m
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_NLS_UTF8=m
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_SLAB=y
+CONFIG_DEBUG_SLAB_LEAK=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_PROVE_RCU_DELAY=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+CONFIG_LATENCYTOP=y
+CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
+CONFIG_KEYS=y
+# CONFIG_CRYPTO_HW is not set
+CONFIG_FONTS=y
diff --git a/arch/parisc/include/asm/parisc-device.h b/arch/parisc/include/asm/parisc-device.h
index 9afdad6c2ff..eaf4dc1c729 100644
--- a/arch/parisc/include/asm/parisc-device.h
+++ b/arch/parisc/include/asm/parisc-device.h
@@ -23,6 +23,7 @@ struct parisc_device {
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
+ unsigned long mod0;
#endif
u64 dma_mask; /* DMA mask for I/O */
struct device dev;
@@ -61,4 +62,6 @@ parisc_get_drvdata(struct parisc_device *d)
extern struct bus_type parisc_bus_type;
+int iosapic_serial_irq(struct parisc_device *dev);
+
#endif /*_ASM_PARISC_PARISC_DEVICE_H_*/
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 2e65aa54bd1..c035673209f 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -71,18 +71,27 @@ flush_cache_all_local(void)
}
EXPORT_SYMBOL(flush_cache_all_local);
+/* Virtual address of pfn. */
+#define pfn_va(pfn) __va(PFN_PHYS(pfn))
+
void
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
- struct page *page = pte_page(*ptep);
+ unsigned long pfn = pte_pfn(*ptep);
+ struct page *page;
- if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
+ /* We don't have pte special. As a result, we can be called with
+ an invalid pfn and we don't need to flush the kernel dcache page.
+ This occurs with FireGL card in C8000. */
+ if (!pfn_valid(pfn))
+ return;
- flush_kernel_dcache_page(page);
+ page = pfn_to_page(pfn);
+ if (page_mapping(page) && test_bit(PG_dcache_dirty, &page->flags)) {
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
clear_bit(PG_dcache_dirty, &page->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page(page);
+ flush_kernel_dcache_page_addr(pfn_va(pfn));
}
void
@@ -495,44 +504,42 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
void flush_cache_mm(struct mm_struct *mm)
{
+ struct vm_area_struct *vma;
+ pgd_t *pgd;
+
/* Flushing the whole cache on each cpu takes forever on
rp3440, etc. So, avoid it if the mm isn't too big. */
- if (mm_total_size(mm) < parisc_cache_flush_threshold) {
- struct vm_area_struct *vma;
-
- if (mm->context == mfsp(3)) {
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- flush_user_dcache_range_asm(vma->vm_start,
- vma->vm_end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(
- vma->vm_start, vma->vm_end);
- }
- } else {
- pgd_t *pgd = mm->pgd;
-
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- unsigned long addr;
-
- for (addr = vma->vm_start; addr < vma->vm_end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- __flush_cache_page(vma, addr,
- page_to_phys(pte_page(pte)));
- }
- }
- }
+ if (mm_total_size(mm) >= parisc_cache_flush_threshold) {
+ flush_cache_all();
+ return;
+ }
+
+ if (mm->context == mfsp(3)) {
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
+ if ((vma->vm_flags & VM_EXEC) == 0)
+ continue;
+ flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
}
return;
}
-#ifdef CONFIG_SMP
- flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ pgd = mm->pgd;
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ unsigned long addr;
+
+ for (addr = vma->vm_start; addr < vma->vm_end;
+ addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (!pfn_valid(pfn))
+ continue;
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
+ }
+ }
}
void
@@ -556,33 +563,32 @@ flush_user_icache_range(unsigned long start, unsigned long end)
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
+ unsigned long addr;
+ pgd_t *pgd;
+
BUG_ON(!vma->vm_mm->context);
- if ((end - start) < parisc_cache_flush_threshold) {
- if (vma->vm_mm->context == mfsp(3)) {
- flush_user_dcache_range_asm(start, end);
- if (vma->vm_flags & VM_EXEC)
- flush_user_icache_range_asm(start, end);
- } else {
- unsigned long addr;
- pgd_t *pgd = vma->vm_mm->pgd;
-
- for (addr = start & PAGE_MASK; addr < end;
- addr += PAGE_SIZE) {
- pte_t *ptep = get_ptep(pgd, addr);
- if (ptep != NULL) {
- pte_t pte = *ptep;
- flush_cache_page(vma,
- addr, pte_pfn(pte));
- }
- }
- }
- } else {
-#ifdef CONFIG_SMP
+ if ((end - start) >= parisc_cache_flush_threshold) {
flush_cache_all();
-#else
- flush_cache_all_local();
-#endif
+ return;
+ }
+
+ if (vma->vm_mm->context == mfsp(3)) {
+ flush_user_dcache_range_asm(start, end);
+ if (vma->vm_flags & VM_EXEC)
+ flush_user_icache_range_asm(start, end);
+ return;
+ }
+
+ pgd = vma->vm_mm->pgd;
+ for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
+ unsigned long pfn;
+ pte_t *ptep = get_ptep(pgd, addr);
+ if (!ptep)
+ continue;
+ pfn = pte_pfn(*ptep);
+ if (pfn_valid(pfn))
+ __flush_cache_page(vma, addr, PFN_PHYS(pfn));
}
}
@@ -591,9 +597,10 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
{
BUG_ON(!vma->vm_mm->context);
- flush_tlb_page(vma, vmaddr);
- __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
-
+ if (pfn_valid(pfn)) {
+ flush_tlb_page(vma, vmaddr);
+ __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
+ }
}
#ifdef CONFIG_PARISC_TMPALIAS
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c
index 3295ef4a185..f0b6722fc70 100644
--- a/arch/parisc/kernel/inventory.c
+++ b/arch/parisc/kernel/inventory.c
@@ -211,6 +211,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
/* REVISIT: who is the consumer of this? not sure yet... */
dev->mod_info = pa_pdc_cell->mod_info; /* pass to PAT_GET_ENTITY() */
dev->pmod_loc = pa_pdc_cell->mod_location;
+ dev->mod0 = pa_pdc_cell->mod[0];
register_parisc_device(dev); /* advertise device */
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 35c5bf1307a..1cba8f29bb4 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -56,13 +56,6 @@
#define A(__x) ((unsigned long)(__x))
/*
- * Atomically swap in the new signal mask, and wait for a signal.
- */
-#ifdef CONFIG_64BIT
-#include "sys32.h"
-#endif
-
-/*
* Do a signal return - restore sigcontext.
*/
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index 33eca1b0492..6c6a271a614 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -34,7 +34,6 @@
#include <asm/uaccess.h>
#include "signal32.h"
-#include "sys32.h"
#define DEBUG_COMPAT_SIG 0
#define DEBUG_COMPAT_SIG_LEVEL 2
diff --git a/arch/parisc/kernel/sys32.h b/arch/parisc/kernel/sys32.h
deleted file mode 100644
index 60dd470f39f..00000000000
--- a/arch/parisc/kernel/sys32.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright (C) 2002 Richard Hirst <rhirst at parisc-linux.org>
- * Copyright (C) 2003 James Bottomley <jejb at parisc-linux.org>
- * Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _PARISC64_KERNEL_SYS32_H
-#define _PARISC64_KERNEL_SYS32_H
-
-#include <linux/compat.h>
-
-/* Call a kernel syscall which will use kernel space instead of user
- * space for its copy_to/from_user.
- */
-#define KERNEL_SYSCALL(ret, syscall, args...) \
-{ \
- mm_segment_t old_fs = get_fs(); \
- set_fs(KERNEL_DS); \
- ret = syscall(args); \
- set_fs (old_fs); \
-}
-
-#endif
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index a134ff4da12..bb9f3b64de5 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -42,8 +42,6 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
-#include "sys32.h"
-
#undef DEBUG
#ifdef DEBUG
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3bf72cd2c8f..5aecda05e0d 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -369,9 +369,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "Build a kdump crash kernel"
@@ -566,7 +566,7 @@ config SCHED_SMT
config PPC_DENORMALISATION
bool "PowerPC denormalisation exception handling"
depends on PPC_BOOK3S_64
- default "n"
+ default "y" if PPC_POWERNV
---help---
Add support for handling denormalisation of single precision
values. Useful for bare metal only. If unsure say Y here.
@@ -979,6 +979,7 @@ config RELOCATABLE
must live at a different physical address than the primary
kernel.
+# This value must have zeroes in the bottom 60 bits otherwise lots will break
config PAGE_OFFSET
hex
default "0xc000000000000000"
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index c86fcb92358..0e8cfd09da2 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -58,7 +58,7 @@ CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
CONFIG_PCCARD=y
CONFIG_ELECTRA_CF=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig
index 4b20f76172e..0085dc4642c 100644
--- a/arch/powerpc/configs/ppc64e_defconfig
+++ b/arch/powerpc/configs/ppc64e_defconfig
@@ -32,7 +32,7 @@ CONFIG_IRQ_ALL_CPUS=y
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_PCI_MSI=y
CONFIG_PCCARD=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index bea8587c3af..1d4b9763895 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -53,7 +53,7 @@ CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_PPC_DENORMALISATION=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_PACKET=y
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 650757c300d..704e6f10ae8 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -2,3 +2,4 @@
generic-y += clkdev.h
generic-y += rwsem.h
generic-y += trace_clock.h
+generic-y += vtime.h \ No newline at end of file
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 08891d07aeb..fa19e2f1a87 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -334,6 +334,27 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return r;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ u32 r;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
+
+ r = svcpu->last_inst;
+ svcpu_put(svcpu);
+ return r;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -446,6 +467,23 @@ static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
return vcpu->arch.last_inst;
}
+/*
+ * Like kvmppc_get_last_inst(), but for fetching a sc instruction.
+ * Because the sc instruction sets SRR0 to point to the following
+ * instruction, we have to fetch from pc - 4.
+ */
+static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu)
+{
+ ulong pc = kvmppc_get_pc(vcpu) - 4;
+
+ /* Load the instruction manually if it failed to do so in the
+ * exit path */
+ if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
+ kvmppc_ld(vcpu, &pc, sizeof(u32), &vcpu->arch.last_inst, false);
+
+ return vcpu->arch.last_inst;
+}
+
static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault_dar;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index a1ecb14e444..86d638a3b35 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -37,7 +37,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
#ifdef CONFIG_KVM_BOOK3S_64_HV
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
-extern int kvm_hpt_order; /* order of preallocated HPTs */
+extern unsigned long kvm_rma_pages;
#endif
#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
@@ -100,7 +100,7 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
/* (masks depend on page size) */
rb |= 0x1000; /* page encoding in LP field */
rb |= (va_low & 0x7f) << 16; /* 7b of VA in AVA/LP field */
- rb |= (va_low & 0xfe); /* AVAL field (P7 doesn't seem to care) */
+ rb |= ((va_low << 4) & 0xf0); /* AVAL field (P7 doesn't seem to care) */
}
} else {
/* 4kB page */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index af326cde7cb..33283532e9d 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -183,13 +183,9 @@ struct kvmppc_spapr_tce_table {
struct page *pages[0];
};
-struct kvmppc_linear_info {
- void *base_virt;
- unsigned long base_pfn;
- unsigned long npages;
- struct list_head list;
- atomic_t use_count;
- int type;
+struct kvm_rma_info {
+ atomic_t use_count;
+ unsigned long base_pfn;
};
/* XICS components, defined in book3s_xics.c */
@@ -246,7 +242,7 @@ struct kvm_arch {
int tlbie_lock;
unsigned long lpcr;
unsigned long rmor;
- struct kvmppc_linear_info *rma;
+ struct kvm_rma_info *rma;
unsigned long vrma_slb_v;
int rma_setup_done;
int using_mmu_notifiers;
@@ -259,7 +255,7 @@ struct kvm_arch {
spinlock_t slot_phys_lock;
cpumask_t need_tlb_flush;
struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
- struct kvmppc_linear_info *hpt_li;
+ int hpt_cma_alloc;
#endif /* CONFIG_KVM_BOOK3S_64_HV */
#ifdef CONFIG_PPC_BOOK3S_64
struct list_head spapr_tce_tables;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index a5287fe03d7..b15554a26c2 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -137,10 +137,10 @@ extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
struct kvm_allocate_rma *rma);
-extern struct kvmppc_linear_info *kvm_alloc_rma(void);
-extern void kvm_release_rma(struct kvmppc_linear_info *ri);
-extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
-extern void kvm_release_hpt(struct kvmppc_linear_info *li);
+extern struct kvm_rma_info *kvm_alloc_rma(void);
+extern void kvm_release_rma(struct kvm_rma_info *ri);
+extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
+extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
extern int kvmppc_core_init_vm(struct kvm *kvm);
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
@@ -261,6 +261,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
struct openpic;
#ifdef CONFIG_KVM_BOOK3S_64_HV
+extern void kvm_cma_reserve(void) __init;
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{
paca[cpu].kvm_hstate.xics_phys = addr;
@@ -281,13 +282,12 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
}
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
-extern void kvm_linear_init(void);
#else
-static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
+static inline void __init kvm_cma_reserve(void)
{}
-static inline void kvm_linear_init(void)
+static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
{}
static inline u32 kvmppc_get_xics_latch(void)
@@ -394,10 +394,15 @@ static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
}
}
-/* Please call after prepare_to_enter. This function puts the lazy ee state
- back to normal mode, without actually enabling interrupts. */
-static inline void kvmppc_lazy_ee_enable(void)
+/*
+ * Please call after prepare_to_enter. This function puts the lazy ee and irq
+ * disabled tracking state back to normal mode, without actually enabling
+ * interrupts.
+ */
+static inline void kvmppc_fix_ee_before_entry(void)
{
+ trace_hardirqs_on();
+
#ifdef CONFIG_PPC64
/* Only need to enable IRQs by hard enabling them after this */
local_paca->irq_happened = 0;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 988c812aab5..b9f426212d3 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -211,9 +211,19 @@ extern long long virt_phys_offset;
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
#define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET)
#else
+#ifdef CONFIG_PPC64
+/*
+ * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
+ * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
+ */
+#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET))
+#define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL)
+
+#else /* 32-bit, non book E */
#define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
#endif
+#endif
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 2dd7bfc459b..3fd2f1b6f90 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include <asm/hw_irq.h>
#include <linux/device.h>
+#include <uapi/asm/perf_event.h>
#define MAX_HWEVENTS 8
#define MAX_EVENT_ALTERNATIVES 8
@@ -69,11 +70,6 @@ struct power_pmu {
#define PPMU_LIMITED_PMC_REQD 2 /* have to put this on a limited PMC */
#define PPMU_ONLY_COUNT_RUN 4 /* only counting in run state */
-/*
- * We use the event config bit 63 as a flag to request EBB.
- */
-#define EVENT_CONFIG_EBB_SHIFT 63
-
extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
@@ -142,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev,
#define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr
#define EVENT_ATTR(_name, _id, _suffix) \
- PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \
+ PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \
power_events_sysfs_show)
#define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g)
#define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g)
-#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p)
+#define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p)
#define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p)
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 47a35b08b96..e378cccfca5 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -247,6 +247,10 @@ struct thread_struct {
unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */
struct pt_regs ckpt_regs; /* Checkpointed registers */
+ unsigned long tm_tar;
+ unsigned long tm_ppr;
+ unsigned long tm_dscr;
+
/*
* Transactional FP and VSX 0-31 register set.
* NOTE: the sense of these is the opposite of the integer ckpt_regs!
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da154f68..ac204e02292 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop,
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index a6840e4e24f..99222e27f17 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -254,19 +254,28 @@
#define SPRN_HRMOR 0x139 /* Real mode offset register */
#define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */
+/* HFSCR and FSCR bit numbers are the same */
+#define FSCR_TAR_LG 8 /* Enable Target Address Register */
+#define FSCR_EBB_LG 7 /* Enable Event Based Branching */
+#define FSCR_TM_LG 5 /* Enable Transactional Memory */
+#define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */
+#define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/
+#define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */
+#define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */
+#define FSCR_FP_LG 0 /* Enable Floating Point */
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
-#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
+#define FSCR_TAR __MASK(FSCR_TAR_LG)
+#define FSCR_EBB __MASK(FSCR_EBB_LG)
+#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
+#define HFSCR_TAR __MASK(FSCR_TAR_LG)
+#define HFSCR_EBB __MASK(FSCR_EBB_LG)
+#define HFSCR_TM __MASK(FSCR_TM_LG)
+#define HFSCR_PM __MASK(FSCR_PM_LG)
+#define HFSCR_BHRB __MASK(FSCR_BHRB_LG)
+#define HFSCR_DSCR __MASK(FSCR_DSCR_LG)
+#define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG)
+#define HFSCR_FP __MASK(FSCR_FP_LG)
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index ffbaabebcdc..48cfc858abd 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -145,6 +145,10 @@ extern void __cpu_die(unsigned int cpu);
#define smp_setup_cpu_maps()
static inline void inhibit_secondary_onlining(void) {}
static inline void uninhibit_secondary_onlining(void) {}
+static inline const struct cpumask *cpu_sibling_mask(int cpu)
+{
+ return cpumask_of(cpu);
+}
#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 49a13e0ef23..294c2cedcf7 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *,
struct thread_struct;
extern struct task_struct *_switch(struct thread_struct *prev,
struct thread_struct *next);
+#ifdef CONFIG_PPC_BOOK3S_64
+static inline void save_tar(struct thread_struct *prev)
+{
+ if (cpu_has_feature(CPU_FTR_ARCH_207S))
+ prev->tar = mfspr(SPRN_TAR);
+}
+#else
+static inline void save_tar(struct thread_struct *prev) {}
+#endif
extern void giveup_fpu(struct task_struct *);
extern void load_up_fpu(void);
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild
index 5182c8622b5..48be855ef37 100644
--- a/arch/powerpc/include/uapi/asm/Kbuild
+++ b/arch/powerpc/include/uapi/asm/Kbuild
@@ -20,6 +20,7 @@ header-y += mman.h
header-y += msgbuf.h
header-y += nvram.h
header-y += param.h
+header-y += perf_event.h
header-y += poll.h
header-y += posix_types.h
header-y += ps3fb.h
diff --git a/arch/powerpc/include/uapi/asm/perf_event.h b/arch/powerpc/include/uapi/asm/perf_event.h
new file mode 100644
index 00000000000..80a4d40cf5b
--- /dev/null
+++ b/arch/powerpc/include/uapi/asm/perf_event.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright 2013 Michael Ellerman, IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2 of the
+ * License.
+ */
+
+#ifndef _UAPI_ASM_POWERPC_PERF_EVENT_H
+#define _UAPI_ASM_POWERPC_PERF_EVENT_H
+
+/*
+ * We use bit 63 of perf_event_attr.config as a flag to request EBB.
+ */
+#define PERF_EVENT_CONFIG_EBB_SHIFT 63
+
+#endif /* _UAPI_ASM_POWERPC_PERF_EVENT_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c7e8afc2ead..d8958be5f31 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -138,6 +138,9 @@ int main(void)
DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar));
DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr));
DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar));
+ DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar));
+ DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr));
+ DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr));
DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs));
DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct,
transact_vr[0]));
@@ -451,6 +454,7 @@ int main(void)
DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
#endif
+ DEFINE(VCPU_SHARED_SPRG3, offsetof(struct kvm_vcpu_arch_shared, sprg3));
DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index ea9414c8088..55593ee2d5a 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = {
static int __init eeh_init_proc(void)
{
- if (machine_is(pseries))
+ if (machine_is(pseries) || machine_is(powernv))
proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations);
return 0;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index ab15b8d057a..2bd0b885b0f 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
#ifdef CONFIG_PPC_BOOK3S_64
BEGIN_FTR_SECTION
- /*
- * Back up the TAR across context switches. Note that the TAR is not
- * available for use in the kernel. (To provide this, the TAR should
- * be backed up/restored on exception entry/exit instead, and be in
- * pt_regs. FIXME, this should be in pt_regs anyway (for debug).)
- */
- mfspr r0,SPRN_TAR
- std r0,THREAD_TAR(r3)
-
/* Event based branch registers */
mfspr r0, SPRN_BESCR
std r0, THREAD_BESCR(r3)
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION
ld r7,DSCR_DEFAULT@toc(2)
ld r0,THREAD_DSCR(r4)
cmpwi r6,0
+ li r8, FSCR_DSCR
bne 1f
ld r0,0(r7)
-1: cmpd r0,r25
+ b 3f
+1:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ or r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ or r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ b 4f
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+3:
+ BEGIN_FTR_SECTION_NESTED(70)
+ mfspr r6, SPRN_FSCR
+ andc r6, r6, r8
+ mtspr SPRN_FSCR, r6
+ BEGIN_FTR_SECTION_NESTED(69)
+ mfspr r6, SPRN_HFSCR
+ andc r6, r6, r8
+ mtspr SPRN_HFSCR, r6
+ END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69)
+ END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
+4: cmpd r0,r25
beq 2f
mtspr SPRN_DSCR,r0
2:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 4e00d223b2e..902ca3c6b4b 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline:
. = 0x4f80
SET_SCRATCH0(r13)
EXCEPTION_PROLOG_0(PACA_EXGEN)
- b facility_unavailable_relon_hv
+ b hv_facility_unavailable_relon_hv
STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
#ifdef CONFIG_PPC_DENORMALISATION
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
b .ret_from_except
STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
+ STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception)
.align 7
.globl __end_handlers
@@ -1188,7 +1189,7 @@ __end_handlers:
STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
- STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
+ STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index b20ff173a67..0adab06ce5c 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -105,7 +105,7 @@ static int __init fail_iommu_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
- return PTR_RET(dir);
+ return PTR_ERR_OR_ZERO(dir);
}
late_initcall(fail_iommu_debugfs);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 2e51cde616d..c69440cef7a 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -362,7 +362,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
seq_printf(p, " Spurious interrupts\n");
- seq_printf(p, "%*s: ", prec, "CNT");
+ seq_printf(p, "%*s: ", prec, "PMI");
for_each_online_cpu(j)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
seq_printf(p, " Performance monitoring interrupts\n");
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index d92f3871e9c..e2a0a162299 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,7 +35,13 @@
#include <asm/vdso_datapage.h>
#include <asm/vio.h>
#include <asm/mmu.h>
+#include <asm/machdep.h>
+
+/*
+ * This isn't a module but we expose that to userspace
+ * via /proc so leave the definitions here
+ */
#define MODULE_VERS "1.9"
#define MODULE_NAME "lparcfg"
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m)
{
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
- if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
+ if (firmware_has_feature(FW_FEATURE_LPAR) &&
+ plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
}
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file)
}
static const struct file_operations lparcfg_fops = {
- .owner = THIS_MODULE,
.read = seq_read,
.write = lparcfg_write,
.open = lparcfg_open,
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void)
}
return 0;
}
-
-static void __exit lparcfg_cleanup(void)
-{
- remove_proc_subtree("powerpc/lparcfg", NULL);
-}
-
-module_init(lparcfg_init);
-module_exit(lparcfg_cleanup);
-MODULE_DESCRIPTION("Interface for LPAR configuration data");
-MODULE_AUTHOR("Dave Engebretsen");
-MODULE_LICENSE("GPL");
+machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 7d22a675fe1..2b4a9a4db7d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1674,12 +1674,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
/* Configure PCI Express settings */
if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
struct pci_bus *child;
- list_for_each_entry(child, &bus->children, node) {
- struct pci_dev *self = child->self;
- if (!self)
- continue;
- pcie_bus_configure_settings(child, self->pcie_mpss);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index c517dbe705f..8083be20fe5 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev,
struct ppc64_tlb_batch *batch;
#endif
+ /* Back up the TAR across context switches.
+ * Note that the TAR is not available for use in the kernel. (To
+ * provide this, the TAR should be backed up/restored on exception
+ * entry/exit instead, and be in pt_regs. FIXME, this should be in
+ * pt_regs anyway (for debug).)
+ * Save the TAR here before we do treclaim/trecheckpoint as these
+ * will change the TAR.
+ */
+ save_tar(&prev->thread);
+
__switch_to_tm(prev);
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index eb23ac92abb..1c14cd4a5e0 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void)
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const u32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (*reg == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == intserv[t]) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 389fb8077cc..fe6a58c9f0b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -229,6 +229,8 @@ void __init early_setup(unsigned long dt_ptr)
/* Initialize the hash table or TLB handling */
early_init_mmu();
+ kvm_cma_reserve();
+
/*
* Reserve any gigantic pages requested on the command line.
* memblock needs to have been initialized by the time this is
@@ -609,8 +611,6 @@ void __init setup_arch(char **cmdline_p)
/* Initialize the MMU context management stuff */
mmu_context_init();
- kvm_linear_init();
-
/* Interrupt code needs to be 64K-aligned */
if ((unsigned long)_stext & 0xffff)
panic("Kernelbase not 64K-aligned (0x%lx)!\n",
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 65ab9e90937..cdcc156865e 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -1049,7 +1049,7 @@ static int __init rtc_init(void)
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
module_init(rtc_init);
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 51be8fb2480..0554d1f6d70 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -233,6 +233,16 @@ dont_backup_fp:
std r5, _CCR(r7)
std r6, _XER(r7)
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ mfspr r3, SPRN_TAR
+ mfspr r4, SPRN_PPR
+ mfspr r5, SPRN_DSCR
+
+ std r3, THREAD_TM_TAR(r12)
+ std r4, THREAD_TM_PPR(r12)
+ std r5, THREAD_TM_DSCR(r12)
+
/* MSR and flags: We don't change CRs, and we don't need to alter
* MSR.
*/
@@ -347,6 +357,16 @@ dont_restore_fp:
mtmsr r6 /* FP/Vec off again! */
restore_gprs:
+
+ /* ******************** TAR, PPR, DSCR ********** */
+ ld r4, THREAD_TM_TAR(r3)
+ ld r5, THREAD_TM_PPR(r3)
+ ld r6, THREAD_TM_DSCR(r3)
+
+ mtspr SPRN_TAR, r4
+ mtspr SPRN_PPR, r5
+ mtspr SPRN_DSCR, r6
+
/* ******************** CR,LR,CCR,MSR ********** */
ld r3, _CTR(r7)
ld r4, _LINK(r7)
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf33c22e38a..e435bc089ea 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -44,9 +44,7 @@
#include <asm/machdep.h>
#include <asm/rtas.h>
#include <asm/pmc.h>
-#ifdef CONFIG_PPC32
#include <asm/reg.h>
-#endif
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs)
die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
}
+#ifdef CONFIG_PPC64
void facility_unavailable_exception(struct pt_regs *regs)
{
static char *facility_strings[] = {
- "FPU",
- "VMX/VSX",
- "DSCR",
- "PMU SPRs",
- "BHRB",
- "TM",
- "AT",
- "EBB",
- "TAR",
+ [FSCR_FP_LG] = "FPU",
+ [FSCR_VECVSX_LG] = "VMX/VSX",
+ [FSCR_DSCR_LG] = "DSCR",
+ [FSCR_PM_LG] = "PMU SPRs",
+ [FSCR_BHRB_LG] = "BHRB",
+ [FSCR_TM_LG] = "TM",
+ [FSCR_EBB_LG] = "EBB",
+ [FSCR_TAR_LG] = "TAR",
};
- char *facility, *prefix;
+ char *facility = "unknown";
u64 value;
+ u8 status;
+ bool hv;
- if (regs->trap == 0xf60) {
- value = mfspr(SPRN_FSCR);
- prefix = "";
- } else {
+ hv = (regs->trap == 0xf80);
+ if (hv)
value = mfspr(SPRN_HFSCR);
- prefix = "Hypervisor ";
+ else
+ value = mfspr(SPRN_FSCR);
+
+ status = value >> 56;
+ if (status == FSCR_DSCR_LG) {
+ /* User is acessing the DSCR. Set the inherit bit and allow
+ * the user to set it directly in future by setting via the
+ * H/FSCR DSCR bit.
+ */
+ current->thread.dscr_inherit = 1;
+ if (hv)
+ mtspr(SPRN_HFSCR, value | HFSCR_DSCR);
+ else
+ mtspr(SPRN_FSCR, value | FSCR_DSCR);
+ return;
}
- value = value >> 56;
+ if ((status < ARRAY_SIZE(facility_strings)) &&
+ facility_strings[status])
+ facility = facility_strings[status];
/* We restore the interrupt state now */
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
- if (value < ARRAY_SIZE(facility_strings))
- facility = facility_strings[value];
- else
- facility = "unknown";
-
pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
- prefix, facility, regs->nip, regs->msr);
+ hv ? "Hypervisor " : "", facility, regs->nip, regs->msr);
if (user_mode(regs)) {
_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs)
die("Unexpected facility unavailable exception", regs, SIGABRT);
}
+#endif
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index eb643f86257..ffaef2cb101 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -72,6 +72,7 @@ config KVM_BOOK3S_64_HV
bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
depends on KVM_BOOK3S_64
select MMU_NOTIFIER
+ select CMA
---help---
Support running unmodified book3s_64 guest kernels in
virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 008cd856c5b..6646c952c5e 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -81,6 +81,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
+ book3s_hv_cma.o \
$(kvm-book3s_64-builtin-xics-objs-y)
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index 739bfbadb85..7e345e00661 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -182,10 +182,13 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
hva_t ptegp;
u64 pteg[16];
u64 avpn = 0;
+ u64 v, r;
+ u64 v_val, v_mask;
+ u64 eaddr_mask;
int i;
- u8 key = 0;
+ u8 pp, key = 0;
bool found = false;
- int second = 0;
+ bool second = false;
ulong mp_ea = vcpu->arch.magic_page_ea;
/* Magic page override */
@@ -208,8 +211,16 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
goto no_seg_found;
avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
+ v_val = avpn & HPTE_V_AVPN;
+
if (slbe->tb)
- avpn |= SLB_VSID_B_1T;
+ v_val |= SLB_VSID_B_1T;
+ if (slbe->large)
+ v_val |= HPTE_V_LARGE;
+ v_val |= HPTE_V_VALID;
+
+ v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID |
+ HPTE_V_SECONDARY;
do_second:
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
@@ -227,91 +238,74 @@ do_second:
key = 4;
for (i=0; i<16; i+=2) {
- u64 v = pteg[i];
- u64 r = pteg[i+1];
-
- /* Valid check */
- if (!(v & HPTE_V_VALID))
- continue;
- /* Hash check */
- if ((v & HPTE_V_SECONDARY) != second)
- continue;
-
- /* AVPN compare */
- if (HPTE_V_COMPARE(avpn, v)) {
- u8 pp = (r & HPTE_R_PP) | key;
- int eaddr_mask = 0xFFF;
-
- gpte->eaddr = eaddr;
- gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu,
- eaddr,
- data);
- if (slbe->large)
- eaddr_mask = 0xFFFFFF;
- gpte->raddr = (r & HPTE_R_RPN) | (eaddr & eaddr_mask);
- gpte->may_execute = ((r & HPTE_R_N) ? false : true);
- gpte->may_read = false;
- gpte->may_write = false;
-
- switch (pp) {
- case 0:
- case 1:
- case 2:
- case 6:
- gpte->may_write = true;
- /* fall through */
- case 3:
- case 5:
- case 7:
- gpte->may_read = true;
- break;
- }
-
- dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
- "-> 0x%lx\n",
- eaddr, avpn, gpte->vpage, gpte->raddr);
+ /* Check all relevant fields of 1st dword */
+ if ((pteg[i] & v_mask) == v_val) {
found = true;
break;
}
}
- /* Update PTE R and C bits, so the guest's swapper knows we used the
- * page */
- if (found) {
- u32 oldr = pteg[i+1];
+ if (!found) {
+ if (second)
+ goto no_page_found;
+ v_val |= HPTE_V_SECONDARY;
+ second = true;
+ goto do_second;
+ }
- if (gpte->may_read) {
- /* Set the accessed flag */
- pteg[i+1] |= HPTE_R_R;
- }
- if (gpte->may_write) {
- /* Set the dirty flag */
- pteg[i+1] |= HPTE_R_C;
- } else {
- dprintk("KVM: Mapping read-only page!\n");
- }
+ v = pteg[i];
+ r = pteg[i+1];
+ pp = (r & HPTE_R_PP) | key;
+ eaddr_mask = 0xFFF;
+
+ gpte->eaddr = eaddr;
+ gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data);
+ if (slbe->large)
+ eaddr_mask = 0xFFFFFF;
+ gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask);
+ gpte->may_execute = ((r & HPTE_R_N) ? false : true);
+ gpte->may_read = false;
+ gpte->may_write = false;
+
+ switch (pp) {
+ case 0:
+ case 1:
+ case 2:
+ case 6:
+ gpte->may_write = true;
+ /* fall through */
+ case 3:
+ case 5:
+ case 7:
+ gpte->may_read = true;
+ break;
+ }
- /* Write back into the PTEG */
- if (pteg[i+1] != oldr)
- copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+ dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
+ "-> 0x%lx\n",
+ eaddr, avpn, gpte->vpage, gpte->raddr);
- if (!gpte->may_read)
- return -EPERM;
- return 0;
- } else {
- dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
- "ptegp=0x%lx)\n",
- eaddr, to_book3s(vcpu)->sdr1, ptegp);
- for (i = 0; i < 16; i += 2)
- dprintk(" %02d: 0x%llx - 0x%llx (0x%llx)\n",
- i, pteg[i], pteg[i+1], avpn);
-
- if (!second) {
- second = HPTE_V_SECONDARY;
- goto do_second;
- }
+ /* Update PTE R and C bits, so the guest's swapper knows we used the
+ * page */
+ if (gpte->may_read) {
+ /* Set the accessed flag */
+ r |= HPTE_R_R;
+ }
+ if (data && gpte->may_write) {
+ /* Set the dirty flag -- XXX even if not writing */
+ r |= HPTE_R_C;
+ }
+
+ /* Write back into the PTEG */
+ if (pteg[i+1] != r) {
+ pteg[i+1] = r;
+ copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
}
+ if (!gpte->may_read)
+ return -EPERM;
+ return 0;
+
no_page_found:
return -ENOENT;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 710d31317d8..043eec8461e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -37,6 +37,8 @@
#include <asm/ppc-opcode.h>
#include <asm/cputable.h>
+#include "book3s_hv_cma.h"
+
/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
#define MAX_LPID_970 63
@@ -52,8 +54,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
{
unsigned long hpt;
struct revmap_entry *rev;
- struct kvmppc_linear_info *li;
- long order = kvm_hpt_order;
+ struct page *page = NULL;
+ long order = KVM_DEFAULT_HPT_ORDER;
if (htab_orderp) {
order = *htab_orderp;
@@ -61,26 +63,23 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
order = PPC_MIN_HPT_ORDER;
}
+ kvm->arch.hpt_cma_alloc = 0;
/*
- * If the user wants a different size from default,
* try first to allocate it from the kernel page allocator.
+ * We keep the CMA reserved for failed allocation.
*/
- hpt = 0;
- if (order != kvm_hpt_order) {
- hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
- __GFP_NOWARN, order - PAGE_SHIFT);
- if (!hpt)
- --order;
- }
+ hpt = __get_free_pages(GFP_KERNEL | __GFP_ZERO | __GFP_REPEAT |
+ __GFP_NOWARN, order - PAGE_SHIFT);
/* Next try to allocate from the preallocated pool */
if (!hpt) {
- li = kvm_alloc_hpt();
- if (li) {
- hpt = (ulong)li->base_virt;
- kvm->arch.hpt_li = li;
- order = kvm_hpt_order;
- }
+ VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
+ page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ if (page) {
+ hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
+ kvm->arch.hpt_cma_alloc = 1;
+ } else
+ --order;
}
/* Lastly try successively smaller sizes from the page allocator */
@@ -118,8 +117,8 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
return 0;
out_freehpt:
- if (kvm->arch.hpt_li)
- kvm_release_hpt(kvm->arch.hpt_li);
+ if (kvm->arch.hpt_cma_alloc)
+ kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
else
free_pages(hpt, order - PAGE_SHIFT);
return -ENOMEM;
@@ -165,8 +164,9 @@ void kvmppc_free_hpt(struct kvm *kvm)
{
kvmppc_free_lpid(kvm->arch.lpid);
vfree(kvm->arch.revmap);
- if (kvm->arch.hpt_li)
- kvm_release_hpt(kvm->arch.hpt_li);
+ if (kvm->arch.hpt_cma_alloc)
+ kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
+ 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
else
free_pages(kvm->arch.hpt_virt,
kvm->arch.hpt_order - PAGE_SHIFT);
@@ -1579,7 +1579,7 @@ int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
ctx->first_pass = 1;
rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
- ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag);
+ ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
if (ret < 0) {
kvm_put_kvm(kvm);
return ret;
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index b2d3f3b2de7..54cf9bc94da 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -136,7 +136,7 @@ long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
mutex_unlock(&kvm->lock);
return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
- stt, O_RDWR);
+ stt, O_RDWR | O_CLOEXEC);
fail:
if (stt) {
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 1f6344c4408..360ce68c980 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -458,6 +458,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
+ case SPRN_DABR:
break;
unprivileged:
default:
@@ -555,6 +556,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_PMC4_GEKKO:
case SPRN_WPAR_GEKKO:
case SPRN_MSSSR0:
+ case SPRN_DABR:
*spr_val = 0;
break;
default:
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 2efa9dde741..b0ee3bc9ca7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -680,13 +680,12 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+ struct kvm_sregs *sregs)
{
int i;
- sregs->pvr = vcpu->arch.pvr;
-
memset(sregs, 0, sizeof(struct kvm_sregs));
+ sregs->pvr = vcpu->arch.pvr;
for (i = 0; i < vcpu->arch.slb_max; i++) {
sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
@@ -696,7 +695,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
}
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
- struct kvm_sregs *sregs)
+ struct kvm_sregs *sregs)
{
int i, j;
@@ -1511,10 +1510,10 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct kvmppc_linear_info *ri = vma->vm_file->private_data;
struct page *page;
+ struct kvm_rma_info *ri = vma->vm_file->private_data;
- if (vmf->pgoff >= ri->npages)
+ if (vmf->pgoff >= kvm_rma_pages)
return VM_FAULT_SIGBUS;
page = pfn_to_page(ri->base_pfn + vmf->pgoff);
@@ -1536,7 +1535,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
static int kvm_rma_release(struct inode *inode, struct file *filp)
{
- struct kvmppc_linear_info *ri = filp->private_data;
+ struct kvm_rma_info *ri = filp->private_data;
kvm_release_rma(ri);
return 0;
@@ -1549,18 +1548,27 @@ static const struct file_operations kvm_rma_fops = {
long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
{
- struct kvmppc_linear_info *ri;
long fd;
+ struct kvm_rma_info *ri;
+ /*
+ * Only do this on PPC970 in HV mode
+ */
+ if (!cpu_has_feature(CPU_FTR_HVMODE) ||
+ !cpu_has_feature(CPU_FTR_ARCH_201))
+ return -EINVAL;
+
+ if (!kvm_rma_pages)
+ return -EINVAL;
ri = kvm_alloc_rma();
if (!ri)
return -ENOMEM;
- fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
+ fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR | O_CLOEXEC);
if (fd < 0)
kvm_release_rma(ri);
- ret->rma_size = ri->npages << PAGE_SHIFT;
+ ret->rma_size = kvm_rma_pages << PAGE_SHIFT;
return fd;
}
@@ -1725,7 +1733,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
{
int err = 0;
struct kvm *kvm = vcpu->kvm;
- struct kvmppc_linear_info *ri = NULL;
+ struct kvm_rma_info *ri = NULL;
unsigned long hva;
struct kvm_memory_slot *memslot;
struct vm_area_struct *vma;
@@ -1803,13 +1811,13 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
} else {
/* Set up to use an RMO region */
- rma_size = ri->npages;
+ rma_size = kvm_rma_pages;
if (rma_size > memslot->npages)
rma_size = memslot->npages;
rma_size <<= PAGE_SHIFT;
rmls = lpcr_rmls(rma_size);
err = -EINVAL;
- if (rmls < 0) {
+ if ((long)rmls < 0) {
pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
goto out_srcu;
}
@@ -1831,14 +1839,14 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
/* POWER7 */
lpcr &= ~(LPCR_VPM0 | LPCR_VRMA_L);
lpcr |= rmls << LPCR_RMLS_SH;
- kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
+ kvm->arch.rmor = ri->base_pfn << PAGE_SHIFT;
}
kvm->arch.lpcr = lpcr;
pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
/* Initialize phys addrs of pages in RMO */
- npages = ri->npages;
+ npages = kvm_rma_pages;
porder = __ilog2(npages);
physp = memslot->arch.slot_phys;
if (physp) {
@@ -1874,7 +1882,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
/* Allocate the guest's logical partition ID */
lpid = kvmppc_alloc_lpid();
- if (lpid < 0)
+ if ((long)lpid < 0)
return -ENOMEM;
kvm->arch.lpid = lpid;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index ec0a9e5de10..8cd0daebb82 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -13,33 +13,34 @@
#include <linux/spinlock.h>
#include <linux/bootmem.h>
#include <linux/init.h>
+#include <linux/memblock.h>
+#include <linux/sizes.h>
#include <asm/cputable.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
-#define KVM_LINEAR_RMA 0
-#define KVM_LINEAR_HPT 1
-
-static void __init kvm_linear_init_one(ulong size, int count, int type);
-static struct kvmppc_linear_info *kvm_alloc_linear(int type);
-static void kvm_release_linear(struct kvmppc_linear_info *ri);
-
-int kvm_hpt_order = KVM_DEFAULT_HPT_ORDER;
-EXPORT_SYMBOL_GPL(kvm_hpt_order);
-
-/*************** RMA *************/
-
+#include "book3s_hv_cma.h"
+/*
+ * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
+ * should be power of 2.
+ */
+#define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
+/*
+ * By default we reserve 5% of memory for hash pagetable allocation.
+ */
+static unsigned long kvm_cma_resv_ratio = 5;
/*
- * This maintains a list of RMAs (real mode areas) for KVM guests to use.
+ * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
* Each RMA has to be physically contiguous and of a size that the
* hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
* and other larger sizes. Since we are unlikely to be allocate that
* much physically contiguous memory after the system is up and running,
- * we preallocate a set of RMAs in early boot for KVM to use.
+ * we preallocate a set of RMAs in early boot using CMA.
+ * should be power of 2.
*/
-static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
-static unsigned long kvm_rma_count;
+unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
+EXPORT_SYMBOL_GPL(kvm_rma_pages);
/* Work out RMLS (real mode limit selector) field value for a given RMA size.
Assumes POWER7 or PPC970. */
@@ -69,165 +70,114 @@ static inline int lpcr_rmls(unsigned long rma_size)
static int __init early_parse_rma_size(char *p)
{
- if (!p)
- return 1;
+ unsigned long kvm_rma_size;
+ pr_debug("%s(%s)\n", __func__, p);
+ if (!p)
+ return -EINVAL;
kvm_rma_size = memparse(p, &p);
-
+ /*
+ * Check that the requested size is one supported in hardware
+ */
+ if (lpcr_rmls(kvm_rma_size) < 0) {
+ pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
+ return -EINVAL;
+ }
+ kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
return 0;
}
early_param("kvm_rma_size", early_parse_rma_size);
-static int __init early_parse_rma_count(char *p)
+struct kvm_rma_info *kvm_alloc_rma()
{
- if (!p)
- return 1;
-
- kvm_rma_count = simple_strtoul(p, NULL, 0);
-
- return 0;
-}
-early_param("kvm_rma_count", early_parse_rma_count);
-
-struct kvmppc_linear_info *kvm_alloc_rma(void)
-{
- return kvm_alloc_linear(KVM_LINEAR_RMA);
+ struct page *page;
+ struct kvm_rma_info *ri;
+
+ ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
+ if (!ri)
+ return NULL;
+ page = kvm_alloc_cma(kvm_rma_pages, kvm_rma_pages);
+ if (!page)
+ goto err_out;
+ atomic_set(&ri->use_count, 1);
+ ri->base_pfn = page_to_pfn(page);
+ return ri;
+err_out:
+ kfree(ri);
+ return NULL;
}
EXPORT_SYMBOL_GPL(kvm_alloc_rma);
-void kvm_release_rma(struct kvmppc_linear_info *ri)
+void kvm_release_rma(struct kvm_rma_info *ri)
{
- kvm_release_linear(ri);
+ if (atomic_dec_and_test(&ri->use_count)) {
+ kvm_release_cma(pfn_to_page(ri->base_pfn), kvm_rma_pages);
+ kfree(ri);
+ }
}
EXPORT_SYMBOL_GPL(kvm_release_rma);
-/*************** HPT *************/
-
-/*
- * This maintains a list of big linear HPT tables that contain the GVA->HPA
- * memory mappings. If we don't reserve those early on, we might not be able
- * to get a big (usually 16MB) linear memory region from the kernel anymore.
- */
-
-static unsigned long kvm_hpt_count;
-
-static int __init early_parse_hpt_count(char *p)
+static int __init early_parse_kvm_cma_resv(char *p)
{
+ pr_debug("%s(%s)\n", __func__, p);
if (!p)
- return 1;
-
- kvm_hpt_count = simple_strtoul(p, NULL, 0);
-
- return 0;
+ return -EINVAL;
+ return kstrtoul(p, 0, &kvm_cma_resv_ratio);
}
-early_param("kvm_hpt_count", early_parse_hpt_count);
+early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
-struct kvmppc_linear_info *kvm_alloc_hpt(void)
+struct page *kvm_alloc_hpt(unsigned long nr_pages)
{
- return kvm_alloc_linear(KVM_LINEAR_HPT);
+ unsigned long align_pages = HPT_ALIGN_PAGES;
+
+ /* Old CPUs require HPT aligned on a multiple of its size */
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ align_pages = nr_pages;
+ return kvm_alloc_cma(nr_pages, align_pages);
}
EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
-void kvm_release_hpt(struct kvmppc_linear_info *li)
+void kvm_release_hpt(struct page *page, unsigned long nr_pages)
{
- kvm_release_linear(li);
+ kvm_release_cma(page, nr_pages);
}
EXPORT_SYMBOL_GPL(kvm_release_hpt);
-/*************** generic *************/
-
-static LIST_HEAD(free_linears);
-static DEFINE_SPINLOCK(linear_lock);
-
-static void __init kvm_linear_init_one(ulong size, int count, int type)
-{
- unsigned long i;
- unsigned long j, npages;
- void *linear;
- struct page *pg;
- const char *typestr;
- struct kvmppc_linear_info *linear_info;
-
- if (!count)
- return;
-
- typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
-
- npages = size >> PAGE_SHIFT;
- linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
- for (i = 0; i < count; ++i) {
- linear = alloc_bootmem_align(size, size);
- pr_debug("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
- size >> 20);
- linear_info[i].base_virt = linear;
- linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
- linear_info[i].npages = npages;
- linear_info[i].type = type;
- list_add_tail(&linear_info[i].list, &free_linears);
- atomic_set(&linear_info[i].use_count, 0);
-
- pg = pfn_to_page(linear_info[i].base_pfn);
- for (j = 0; j < npages; ++j) {
- atomic_inc(&pg->_count);
- ++pg;
- }
- }
-}
-
-static struct kvmppc_linear_info *kvm_alloc_linear(int type)
-{
- struct kvmppc_linear_info *ri, *ret;
-
- ret = NULL;
- spin_lock(&linear_lock);
- list_for_each_entry(ri, &free_linears, list) {
- if (ri->type != type)
- continue;
-
- list_del(&ri->list);
- atomic_inc(&ri->use_count);
- memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
- ret = ri;
- break;
- }
- spin_unlock(&linear_lock);
- return ret;
-}
-
-static void kvm_release_linear(struct kvmppc_linear_info *ri)
-{
- if (atomic_dec_and_test(&ri->use_count)) {
- spin_lock(&linear_lock);
- list_add_tail(&ri->list, &free_linears);
- spin_unlock(&linear_lock);
-
- }
-}
-
-/*
- * Called at boot time while the bootmem allocator is active,
- * to allocate contiguous physical memory for the hash page
- * tables for guests.
+/**
+ * kvm_cma_reserve() - reserve area for kvm hash pagetable
+ *
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory.
*/
-void __init kvm_linear_init(void)
+void __init kvm_cma_reserve(void)
{
- /* HPT */
- kvm_linear_init_one(1 << kvm_hpt_order, kvm_hpt_count, KVM_LINEAR_HPT);
-
- /* RMA */
- /* Only do this on PPC970 in HV mode */
- if (!cpu_has_feature(CPU_FTR_HVMODE) ||
- !cpu_has_feature(CPU_FTR_ARCH_201))
- return;
-
- if (!kvm_rma_size || !kvm_rma_count)
- return;
-
- /* Check that the requested size is one supported in hardware */
- if (lpcr_rmls(kvm_rma_size) < 0) {
- pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
- return;
+ unsigned long align_size;
+ struct memblock_region *reg;
+ phys_addr_t selected_size = 0;
+ /*
+ * We cannot use memblock_phys_mem_size() here, because
+ * memblock_analyze() has not been called yet.
+ */
+ for_each_memblock(memory, reg)
+ selected_size += memblock_region_memory_end_pfn(reg) -
+ memblock_region_memory_base_pfn(reg);
+
+ selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
+ if (selected_size) {
+ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
+ (unsigned long)selected_size / SZ_1M);
+ /*
+ * Old CPUs require HPT aligned on a multiple of its size. So for them
+ * make the alignment as max size we could request.
+ */
+ if (!cpu_has_feature(CPU_FTR_ARCH_206))
+ align_size = __rounddown_pow_of_two(selected_size);
+ else
+ align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
+
+ align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
+ kvm_cma_declare_contiguous(selected_size, align_size);
}
-
- kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
}
diff --git a/arch/powerpc/kvm/book3s_hv_cma.c b/arch/powerpc/kvm/book3s_hv_cma.c
new file mode 100644
index 00000000000..d9d3d8553d5
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.c
@@ -0,0 +1,240 @@
+/*
+ * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
+ * for DMA mapping framework
+ *
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+#define pr_fmt(fmt) "kvm_cma: " fmt
+
+#ifdef CONFIG_CMA_DEBUG
+#ifndef DEBUG
+# define DEBUG
+#endif
+#endif
+
+#include <linux/memblock.h>
+#include <linux/mutex.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+
+#include "book3s_hv_cma.h"
+
+struct kvm_cma {
+ unsigned long base_pfn;
+ unsigned long count;
+ unsigned long *bitmap;
+};
+
+static DEFINE_MUTEX(kvm_cma_mutex);
+static struct kvm_cma kvm_cma_area;
+
+/**
+ * kvm_cma_declare_contiguous() - reserve area for contiguous memory handling
+ * for kvm hash pagetable
+ * @size: Size of the reserved memory.
+ * @alignment: Alignment for the contiguous memory area
+ *
+ * This function reserves memory for kvm cma area. It should be
+ * called by arch code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+long __init kvm_cma_declare_contiguous(phys_addr_t size, phys_addr_t alignment)
+{
+ long base_pfn;
+ phys_addr_t addr;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ pr_debug("%s(size %lx)\n", __func__, (unsigned long)size);
+
+ if (!size)
+ return -EINVAL;
+ /*
+ * Sanitise input arguments.
+ * We should be pageblock aligned for CMA.
+ */
+ alignment = max(alignment, (phys_addr_t)(PAGE_SIZE << pageblock_order));
+ size = ALIGN(size, alignment);
+ /*
+ * Reserve memory
+ * Use __memblock_alloc_base() since
+ * memblock_alloc_base() panic()s.
+ */
+ addr = __memblock_alloc_base(size, alignment, 0);
+ if (!addr) {
+ base_pfn = -ENOMEM;
+ goto err;
+ } else
+ base_pfn = PFN_DOWN(addr);
+
+ /*
+ * Each reserved area must be initialised later, when more kernel
+ * subsystems (like slab allocator) are available.
+ */
+ cma->base_pfn = base_pfn;
+ cma->count = size >> PAGE_SHIFT;
+ pr_info("CMA: reserved %ld MiB\n", (unsigned long)size / SZ_1M);
+ return 0;
+err:
+ pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
+ return base_pfn;
+}
+
+/**
+ * kvm_alloc_cma() - allocate pages from contiguous area
+ * @nr_pages: Requested number of pages.
+ * @align_pages: Requested alignment in number of pages
+ *
+ * This function allocates memory buffer for hash pagetable.
+ */
+struct page *kvm_alloc_cma(unsigned long nr_pages, unsigned long align_pages)
+{
+ int ret;
+ struct page *page = NULL;
+ struct kvm_cma *cma = &kvm_cma_area;
+ unsigned long chunk_count, nr_chunk;
+ unsigned long mask, pfn, pageno, start = 0;
+
+
+ if (!cma || !cma->count)
+ return NULL;
+
+ pr_debug("%s(cma %p, count %lu, align pages %lu)\n", __func__,
+ (void *)cma, nr_pages, align_pages);
+
+ if (!nr_pages)
+ return NULL;
+ /*
+ * align mask with chunk size. The bit tracks pages in chunk size
+ */
+ VM_BUG_ON(!is_power_of_2(align_pages));
+ mask = (align_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT)) - 1;
+ BUILD_BUG_ON(PAGE_SHIFT > KVM_CMA_CHUNK_ORDER);
+
+ chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+ nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+
+ mutex_lock(&kvm_cma_mutex);
+ for (;;) {
+ pageno = bitmap_find_next_zero_area(cma->bitmap, chunk_count,
+ start, nr_chunk, mask);
+ if (pageno >= chunk_count)
+ break;
+
+ pfn = cma->base_pfn + (pageno << (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT));
+ ret = alloc_contig_range(pfn, pfn + nr_pages, MIGRATE_CMA);
+ if (ret == 0) {
+ bitmap_set(cma->bitmap, pageno, nr_chunk);
+ page = pfn_to_page(pfn);
+ memset(pfn_to_kaddr(pfn), 0, nr_pages << PAGE_SHIFT);
+ break;
+ } else if (ret != -EBUSY) {
+ break;
+ }
+ pr_debug("%s(): memory range at %p is busy, retrying\n",
+ __func__, pfn_to_page(pfn));
+ /* try again with a bit different memory target */
+ start = pageno + mask + 1;
+ }
+ mutex_unlock(&kvm_cma_mutex);
+ pr_debug("%s(): returned %p\n", __func__, page);
+ return page;
+}
+
+/**
+ * kvm_release_cma() - release allocated pages for hash pagetable
+ * @pages: Allocated pages.
+ * @nr_pages: Number of allocated pages.
+ *
+ * This function releases memory allocated by kvm_alloc_cma().
+ * It returns false when provided pages do not belong to contiguous area and
+ * true otherwise.
+ */
+bool kvm_release_cma(struct page *pages, unsigned long nr_pages)
+{
+ unsigned long pfn;
+ unsigned long nr_chunk;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ if (!cma || !pages)
+ return false;
+
+ pr_debug("%s(page %p count %lu)\n", __func__, (void *)pages, nr_pages);
+
+ pfn = page_to_pfn(pages);
+
+ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
+ return false;
+
+ VM_BUG_ON(pfn + nr_pages > cma->base_pfn + cma->count);
+ nr_chunk = nr_pages >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+
+ mutex_lock(&kvm_cma_mutex);
+ bitmap_clear(cma->bitmap,
+ (pfn - cma->base_pfn) >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT),
+ nr_chunk);
+ free_contig_range(pfn, nr_pages);
+ mutex_unlock(&kvm_cma_mutex);
+
+ return true;
+}
+
+static int __init kvm_cma_activate_area(unsigned long base_pfn,
+ unsigned long count)
+{
+ unsigned long pfn = base_pfn;
+ unsigned i = count >> pageblock_order;
+ struct zone *zone;
+
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ zone = page_zone(pfn_to_page(pfn));
+ do {
+ unsigned j;
+ base_pfn = pfn;
+ for (j = pageblock_nr_pages; j; --j, pfn++) {
+ WARN_ON_ONCE(!pfn_valid(pfn));
+ /*
+ * alloc_contig_range requires the pfn range
+ * specified to be in the same zone. Make this
+ * simple by forcing the entire CMA resv range
+ * to be in the same zone.
+ */
+ if (page_zone(pfn_to_page(pfn)) != zone)
+ return -EINVAL;
+ }
+ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
+ } while (--i);
+ return 0;
+}
+
+static int __init kvm_cma_init_reserved_areas(void)
+{
+ int bitmap_size, ret;
+ unsigned long chunk_count;
+ struct kvm_cma *cma = &kvm_cma_area;
+
+ pr_debug("%s()\n", __func__);
+ if (!cma->count)
+ return 0;
+ chunk_count = cma->count >> (KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
+ bitmap_size = BITS_TO_LONGS(chunk_count) * sizeof(long);
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!cma->bitmap)
+ return -ENOMEM;
+
+ ret = kvm_cma_activate_area(cma->base_pfn, cma->count);
+ if (ret)
+ goto error;
+ return 0;
+
+error:
+ kfree(cma->bitmap);
+ return ret;
+}
+core_initcall(kvm_cma_init_reserved_areas);
diff --git a/arch/powerpc/kvm/book3s_hv_cma.h b/arch/powerpc/kvm/book3s_hv_cma.h
new file mode 100644
index 00000000000..655144f75fa
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_cma.h
@@ -0,0 +1,27 @@
+/*
+ * Contiguous Memory Allocator for ppc KVM hash pagetable based on CMA
+ * for DMA mapping framework
+ *
+ * Copyright IBM Corporation, 2013
+ * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License or (at your optional) any later version of the license.
+ *
+ */
+
+#ifndef __POWERPC_KVM_CMA_ALLOC_H__
+#define __POWERPC_KVM_CMA_ALLOC_H__
+/*
+ * Both RMA and Hash page allocation will be multiple of 256K.
+ */
+#define KVM_CMA_CHUNK_ORDER 18
+
+extern struct page *kvm_alloc_cma(unsigned long nr_pages,
+ unsigned long align_pages);
+extern bool kvm_release_cma(struct page *pages, unsigned long nr_pages);
+extern long kvm_cma_declare_contiguous(phys_addr_t size,
+ phys_addr_t alignment) __init;
+#endif
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index fc25689a9f3..45e30d6e462 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -383,6 +383,80 @@ static inline int try_lock_tlbie(unsigned int *lock)
return old == 0;
}
+/*
+ * tlbie/tlbiel is a bit different on the PPC970 compared to later
+ * processors such as POWER7; the large page bit is in the instruction
+ * not RB, and the top 16 bits and the bottom 12 bits of the VA
+ * in RB must be 0.
+ */
+static void do_tlbies_970(struct kvm *kvm, unsigned long *rbvalues,
+ long npages, int global, bool need_sync)
+{
+ long i;
+
+ if (global) {
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i) {
+ unsigned long rb = rbvalues[i];
+
+ if (rb & 1) /* large page */
+ asm volatile("tlbie %0,1" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ else
+ asm volatile("tlbie %0,0" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ }
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i) {
+ unsigned long rb = rbvalues[i];
+
+ if (rb & 1) /* large page */
+ asm volatile("tlbiel %0,1" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ else
+ asm volatile("tlbiel %0,0" : :
+ "r" (rb & 0x0000fffffffff000ul));
+ }
+ asm volatile("ptesync" : : : "memory");
+ }
+}
+
+static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
+ long npages, int global, bool need_sync)
+{
+ long i;
+
+ if (cpu_has_feature(CPU_FTR_ARCH_201)) {
+ /* PPC970 tlbie instruction is a bit different */
+ do_tlbies_970(kvm, rbvalues, npages, global, need_sync);
+ return;
+ }
+ if (global) {
+ while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
+ cpu_relax();
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i)
+ asm volatile(PPC_TLBIE(%1,%0) : :
+ "r" (rbvalues[i]), "r" (kvm->arch.lpid));
+ asm volatile("eieio; tlbsync; ptesync" : : : "memory");
+ kvm->arch.tlbie_lock = 0;
+ } else {
+ if (need_sync)
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < npages; ++i)
+ asm volatile("tlbiel %0" : : "r" (rbvalues[i]));
+ asm volatile("ptesync" : : : "memory");
+ }
+}
+
long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
unsigned long pte_index, unsigned long avpn,
unsigned long *hpret)
@@ -408,19 +482,7 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
if (v & HPTE_V_VALID) {
hpte[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(v, hpte[1], pte_index);
- if (global_invalidates(kvm, flags)) {
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/* Read PTE low word after tlbie to get final R/C values */
remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
}
@@ -448,12 +510,11 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
unsigned long *hp, *hptes[4], tlbrb[4];
long int i, j, k, n, found, indexes[4];
unsigned long flags, req, pte_index, rcbits;
- long int local = 0;
+ int global;
long int ret = H_SUCCESS;
struct revmap_entry *rev, *revs[4];
- if (atomic_read(&kvm->online_vcpus) == 1)
- local = 1;
+ global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
n = 0;
for (; i < 4; ++i) {
@@ -529,22 +590,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
break;
/* Now that we've collected a batch, do the tlbies */
- if (!local) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- for (k = 0; k < n; ++k)
- asm volatile(PPC_TLBIE(%1,%0) : :
- "r" (tlbrb[k]),
- "r" (kvm->arch.lpid));
- asm volatile("eieio; tlbsync; ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- for (k = 0; k < n; ++k)
- asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, tlbrb, n, global, true);
/* Read PTE low words after tlbie to get final R/C values */
for (k = 0; k < n; ++k) {
@@ -603,19 +649,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
if (v & HPTE_V_VALID) {
rb = compute_tlbie_rb(v, r, pte_index);
hpte[0] = v & ~HPTE_V_VALID;
- if (global_invalidates(kvm, flags)) {
- while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
- } else {
- asm volatile("ptesync" : : : "memory");
- asm volatile("tlbiel %0" : : "r" (rb));
- asm volatile("ptesync" : : : "memory");
- }
+ do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
/*
* If the host has this page as readonly but the guest
* wants to make it read/write, reduce the permissions.
@@ -686,13 +720,7 @@ void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
hptep[0] &= ~HPTE_V_VALID;
rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile("ptesync" : : : "memory");
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
+ do_tlbies(kvm, &rb, 1, 1, true);
}
EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
@@ -706,12 +734,7 @@ void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
/* modify only the second-last byte, which contains the ref bit */
*((char *)hptep + 14) = rbyte;
- while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
- cpu_relax();
- asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
- : : "r" (rb), "r" (kvm->arch.lpid));
- asm volatile("ptesync" : : : "memory");
- kvm->arch.tlbie_lock = 0;
+ do_tlbies(kvm, &rb, 1, 1, false);
}
EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b02f91e4c70..60dce5bfab3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1381,7 +1381,7 @@ hcall_try_real_mode:
cmpldi r3,hcall_real_table_end - hcall_real_table
bge guest_exit_cont
LOAD_REG_ADDR(r4, hcall_real_table)
- lwzx r3,r3,r4
+ lwax r3,r3,r4
cmpwi r3,0
beq guest_exit_cont
add r3,r3,r4
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 48cbbf86295..17cfae5497a 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -92,6 +92,11 @@ kvm_start_lightweight:
PPC_LL r3, VCPU_HFLAGS(r4)
rldicl r3, r3, 0, 63 /* r3 &= 1 */
stb r3, HSTATE_RESTORE_HID5(r13)
+
+ /* Load up guest SPRG3 value, since it's user readable */
+ ld r3, VCPU_SHARED(r4)
+ ld r3, VCPU_SHARED_SPRG3(r3)
+ mtspr SPRN_SPRG3, r3
#endif /* CONFIG_PPC_BOOK3S_64 */
PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
@@ -123,6 +128,15 @@ kvmppc_handler_highmem:
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
+#ifdef CONFIG_PPC_BOOK3S_64
+ /*
+ * Reload kernel SPRG3 value.
+ * No need to save guest value as usermode can't modify SPRG3.
+ */
+ ld r3, PACA_SPRG3(r13)
+ mtspr SPRN_SPRG3, r3
+#endif /* CONFIG_PPC_BOOK3S_64 */
+
PPC_STL r14, VCPU_GPR(R14)(r7)
PPC_STL r15, VCPU_GPR(R15)(r7)
PPC_STL r16, VCPU_GPR(R16)(r7)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 19498a567a8..27db1e66595 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -468,7 +468,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
* both the traditional FP registers and the added VSX
* registers into thread.fpr[].
*/
- giveup_fpu(current);
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
@@ -483,7 +484,8 @@ void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
- giveup_altivec(current);
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
vcpu->arch.vscr = t->vscr;
}
@@ -575,8 +577,6 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
#endif
- current->thread.regs->msr |= msr;
-
if (msr & MSR_FP) {
for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
@@ -598,12 +598,32 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
#endif
}
+ current->thread.regs->msr |= msr;
vcpu->arch.guest_owned_ext |= msr;
kvmppc_recalc_shadow_msr(vcpu);
return RESUME_GUEST;
}
+/*
+ * Kernel code using FP or VMX could have flushed guest state to
+ * the thread_struct; if so, get it back now.
+ */
+static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
+{
+ unsigned long lost_ext;
+
+ lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
+ if (!lost_ext)
+ return;
+
+ if (lost_ext & MSR_FP)
+ kvmppc_load_up_fpu();
+ if (lost_ext & MSR_VEC)
+ kvmppc_load_up_altivec();
+ current->thread.regs->msr |= lost_ext;
+}
+
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int exit_nr)
{
@@ -772,7 +792,7 @@ program_interrupt:
}
case BOOK3S_INTERRUPT_SYSCALL:
if (vcpu->arch.papr_enabled &&
- (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
+ (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
!(vcpu->arch.shared->msr & MSR_PR)) {
/* SC 1 papr hypercalls */
ulong cmd = kvmppc_get_gpr(vcpu, 3);
@@ -890,8 +910,9 @@ program_interrupt:
local_irq_enable();
r = s;
} else {
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
}
+ kvmppc_handle_lost_ext(vcpu);
}
trace_kvm_book3s_reenter(r, vcpu);
@@ -1047,11 +1068,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (err)
goto free_shadow_vcpu;
+ err = -ENOMEM;
p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
- /* the real shared page fills the last 4k of our page */
- vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
if (!p)
goto uninit_vcpu;
+ /* the real shared page fills the last 4k of our page */
+ vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
#ifdef CONFIG_PPC_BOOK3S_64
/* default to book3s_64 (970fx) */
@@ -1161,7 +1183,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
if (vcpu->arch.shared->msr & MSR_FP)
kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 94c1dd46b83..a3a5cb8ee7e 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -19,6 +19,7 @@
#include <asm/hvcall.h>
#include <asm/xics.h>
#include <asm/debug.h>
+#include <asm/time.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index dcc94f01600..17722d82f1d 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -674,8 +674,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
goto out;
}
- kvm_guest_enter();
-
#ifdef CONFIG_PPC_FPU
/* Save userspace FPU state in stack */
enable_kernel_fp();
@@ -698,7 +696,7 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
kvmppc_load_guest_fp(vcpu);
#endif
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
@@ -1168,7 +1166,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
local_irq_enable();
r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
} else {
- kvmppc_lazy_ee_enable();
+ kvmppc_fix_ee_before_entry();
}
}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 6316ee336e8..07c0106fab7 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -117,8 +117,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
kvm_guest_exit();
continue;
}
-
- trace_hardirqs_on();
#endif
kvm_guest_enter();
@@ -420,6 +418,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
return kvmppc_core_create_memslot(slot, npages);
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
@@ -823,39 +825,39 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
#endif
#ifdef CONFIG_KVM_MPIC
case KVM_CAP_IRQ_MPIC: {
- struct file *filp;
+ struct fd f;
struct kvm_device *dev;
r = -EBADF;
- filp = fget(cap->args[0]);
- if (!filp)
+ f = fdget(cap->args[0]);
+ if (!f.file)
break;
r = -EPERM;
- dev = kvm_device_from_filp(filp);
+ dev = kvm_device_from_filp(f.file);
if (dev)
r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
- fput(filp);
+ fdput(f);
break;
}
#endif
#ifdef CONFIG_KVM_XICS
case KVM_CAP_IRQ_XICS: {
- struct file *filp;
+ struct fd f;
struct kvm_device *dev;
r = -EBADF;
- filp = fget(cap->args[0]);
- if (!filp)
+ f = fdget(cap->args[0]);
+ if (!f.file)
break;
r = -EPERM;
- dev = kvm_device_from_filp(filp);
+ dev = kvm_device_from_filp(f.file);
if (dev)
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
- fput(filp);
+ fdput(f);
break;
}
#endif /* CONFIG_KVM_XICS */
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 08397217e8a..5850798826c 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <asm/cputhreads.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/smp.h>
@@ -1318,7 +1319,8 @@ static int update_cpu_associativity_changes_mask(void)
}
}
if (changed) {
- cpumask_set_cpu(cpu, changes);
+ cpumask_or(changes, changes, cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
}
}
@@ -1426,7 +1428,7 @@ static int update_cpu_topology(void *data)
if (!data)
return -EINVAL;
- cpu = get_cpu();
+ cpu = smp_processor_id();
for (update = data; update; update = update->next) {
if (cpu != update->cpu)
@@ -1446,12 +1448,12 @@ static int update_cpu_topology(void *data)
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
+ unsigned int cpu, sibling, changed = 0;
struct topology_update_data *updates, *ud;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
+ int weight, new_nid, i = 0;
weight = cpumask_weight(&cpu_associativity_changes_mask);
if (!weight)
@@ -1464,19 +1466,46 @@ int arch_update_cpu_topology(void)
cpumask_clear(&updated_cpus);
for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
- vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ /*
+ * If siblings aren't flagged for changes, updates list
+ * will be too short. Skip on this update and set for next
+ * update.
+ */
+ if (!cpumask_subset(cpu_sibling_mask(cpu),
+ &cpu_associativity_changes_mask)) {
+ pr_info("Sibling bits not set for associativity "
+ "change, cpu%d\n", cpu);
+ cpumask_or(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ /* Use associativity from first thread for all siblings */
+ vphn_get_associativity(cpu, associativity);
+ new_nid = associativity_to_nid(associativity);
+ if (new_nid < 0 || !node_online(new_nid))
+ new_nid = first_online_node;
+
+ if (new_nid == numa_cpu_lookup_table[cpu]) {
+ cpumask_andnot(&cpu_associativity_changes_mask,
+ &cpu_associativity_changes_mask,
+ cpu_sibling_mask(cpu));
+ cpu = cpu_last_thread_sibling(cpu);
+ continue;
+ }
- if (i < weight)
- ud->next = &updates[i];
+ for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
+ ud = &updates[i++];
+ ud->cpu = sibling;
+ ud->new_nid = new_nid;
+ ud->old_nid = numa_cpu_lookup_table[sibling];
+ cpumask_set_cpu(sibling, &updated_cpus);
+ if (i < weight)
+ ud->next = &updates[i];
+ }
+ cpu = cpu_last_thread_sibling(cpu);
}
stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index 4f51025f5b0..c77348c5d46 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -119,7 +119,7 @@ static void op_powerpc_stop(void)
model->global_stop();
}
-static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
+static int op_powerpc_create_files(struct dentry *root)
{
int i;
@@ -128,9 +128,9 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* There is one mmcr0, mmcr1 and mmcra for setting the events for
* all of the counters.
*/
- oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0);
- oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1);
- oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra);
+ oprofilefs_create_ulong(root, "mmcr0", &sys.mmcr0);
+ oprofilefs_create_ulong(root, "mmcr1", &sys.mmcr1);
+ oprofilefs_create_ulong(root, "mmcra", &sys.mmcra);
#ifdef CONFIG_OPROFILE_CELL
/* create a file the user tool can check to see what level of profiling
* support exits with this kernel. Initialize bit mask to indicate
@@ -142,7 +142,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* If the file does not exist, then the kernel only supports SPU
* cycle profiling, PPU event and cycle profiling.
*/
- oprofilefs_create_ulong(sb, root, "cell_support", &sys.cell_support);
+ oprofilefs_create_ulong(root, "cell_support", &sys.cell_support);
sys.cell_support = 0x1; /* Note, the user OProfile tool must check
* that this bit is set before attempting to
* user SPU event profiling. Older kernels
@@ -160,11 +160,11 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
+ dir = oprofilefs_mkdir(root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count);
+ oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &ctr[i].event);
+ oprofilefs_create_ulong(dir, "count", &ctr[i].count);
/*
* Classic PowerPC doesn't support per-counter
@@ -173,14 +173,14 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root)
* Book-E style performance monitors, we do
* support them.
*/
- oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user);
+ oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &ctr[i].user);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask);
+ oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
}
- oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel);
- oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user);
+ oprofilefs_create_ulong(root, "enable_kernel", &sys.enable_kernel);
+ oprofilefs_create_ulong(root, "enable_user", &sys.enable_user);
/* Default to tracing both kernel and user */
sys.enable_kernel = 1;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 24a45f91c65..eeae308cf98 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -484,7 +484,7 @@ static bool is_ebb_event(struct perf_event *event)
* use bit 63 of the event code for something else if they wish.
*/
return (ppmu->flags & PPMU_EBB) &&
- ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
+ ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
}
static int ebb_event_check(struct perf_event *event)
diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h
new file mode 100644
index 00000000000..687790a2c0b
--- /dev/null
+++ b/arch/powerpc/perf/power7-events-list.h
@@ -0,0 +1,548 @@
+/*
+ * Performance counter support for POWER7 processors.
+ *
+ * Copyright 2013 Runzhen Wang, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898)
+EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0)
+EVENT(PM_PMC2_SAVED, 0x10022)
+EVENT(PM_CMPLU_STALL_DFU, 0x2003c)
+EVENT(PM_VSU0_16FLOP, 0x0a0a4)
+EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a)
+EVENT(PM_MRK_ST_CMPL, 0x10034)
+EVENT(PM_NEST_PAIR3_ADD, 0x40881)
+EVENT(PM_L2_ST_DISP, 0x46180)
+EVENT(PM_L2_CASTOUT_MOD, 0x16180)
+EVENT(PM_ISEG, 0x020a4)
+EVENT(PM_MRK_INST_TIMEO, 0x40034)
+EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282)
+EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6)
+EVENT(PM_IERAT_WR_64K, 0x040be)
+EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e)
+EVENT(PM_IERAT_MISS, 0x100f6)
+EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052)
+EVENT(PM_FLOP, 0x100f4)
+EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4)
+EVENT(PM_BR_PRED_TA, 0x040aa)
+EVENT(PM_CMPLU_STALL_FXU, 0x20014)
+EVENT(PM_EXT_INT, 0x200f8)
+EVENT(PM_VSU_FSQRT_FDIV, 0x0a888)
+EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e)
+EVENT(PM_LSU1_LDF, 0x0c086)
+EVENT(PM_IC_WRITE_ALL, 0x0488c)
+EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0)
+EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052)
+EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e)
+EVENT(PM_DATA_FROM_L21_MOD, 0x3c046)
+EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a)
+EVENT(PM_VSU0_8FLOP, 0x0a0a0)
+EVENT(PM_POWER_EVENT1, 0x1006e)
+EVENT(PM_DISP_CLB_HELD_BAL, 0x02092)
+EVENT(PM_VSU1_2FLOP, 0x0a09a)
+EVENT(PM_LWSYNC_HELD, 0x0209a)
+EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054)
+EVENT(PM_INST_FROM_L21_MOD, 0x34046)
+EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc)
+EVENT(PM_IC_REQ_ALL, 0x04888)
+EVENT(PM_DSLB_MISS, 0x0d090)
+EVENT(PM_L3_MISS, 0x1f082)
+EVENT(PM_LSU0_L1_PREF, 0x0d0b8)
+EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884)
+EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be)
+EVENT(PM_L2_INST, 0x36080)
+EVENT(PM_VSU0_FRSP, 0x0a0b4)
+EVENT(PM_FLUSH_DISP, 0x02082)
+EVENT(PM_PTEG_FROM_L2MISS, 0x4c058)
+EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a)
+EVENT(PM_CMPLU_STALL_LSU, 0x20012)
+EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a)
+EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0)
+EVENT(PM_PTEG_FROM_LMEM, 0x4c052)
+EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c)
+EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c)
+EVENT(PM_MEM0_PREFETCH_DISP, 0x20083)
+EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f)
+EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c)
+EVENT(PM_VSU_FRSP, 0x0a8b4)
+EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046)
+EVENT(PM_PMC1_OVERFLOW, 0x20010)
+EVENT(PM_VSU0_SINGLE, 0x0a0a8)
+EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058)
+EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056)
+EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090)
+EVENT(PM_VSU1_FEST, 0x0a0ba)
+EVENT(PM_MRK_INST_DISP, 0x20030)
+EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096)
+EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6)
+EVENT(PM_INST_CMPL, 0x00002)
+EVENT(PM_FXU_IDLE, 0x1000e)
+EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0)
+EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c)
+EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c)
+EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6)
+EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056)
+EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042)
+EVENT(PM_SHL_CREATED, 0x05082)
+EVENT(PM_L2_ST_HIT, 0x46182)
+EVENT(PM_DATA_FROM_DMEM, 0x1c04a)
+EVENT(PM_L3_LD_MISS, 0x2f082)
+EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e)
+EVENT(PM_DISP_CLB_HELD_RES, 0x02094)
+EVENT(PM_L2_SN_SX_I_DONE, 0x36382)
+EVENT(PM_GRP_CMPL, 0x30004)
+EVENT(PM_STCX_CMPL, 0x0c098)
+EVENT(PM_VSU0_2FLOP, 0x0a098)
+EVENT(PM_L3_PREF_MISS, 0x3f082)
+EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096)
+EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064)
+EVENT(PM_L1_ICACHE_MISS, 0x200fc)
+EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be)
+EVENT(PM_LD_REF_L1_LSU0, 0x0c080)
+EVENT(PM_VSU0_FEST, 0x0a0b8)
+EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890)
+EVENT(PM_FREQ_UP, 0x4000c)
+EVENT(PM_DATA_FROM_LMEM, 0x3c04a)
+EVENT(PM_LSU1_LDX, 0x0c08a)
+EVENT(PM_PMC3_OVERFLOW, 0x40010)
+EVENT(PM_MRK_BR_MPRED, 0x30036)
+EVENT(PM_SHL_MATCH, 0x05086)
+EVENT(PM_MRK_BR_TAKEN, 0x10036)
+EVENT(PM_CMPLU_STALL_BRU, 0x4004e)
+EVENT(PM_ISLB_MISS, 0x0d092)
+EVENT(PM_CYC, 0x0001e)
+EVENT(PM_DISP_HELD_THERMAL, 0x30006)
+EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054)
+EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2)
+EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a)
+EVENT(PM_1PLUS_PPC_CMPL, 0x100f2)
+EVENT(PM_PTEG_FROM_DMEM, 0x2c052)
+EVENT(PM_VSU_2FLOP, 0x0a898)
+EVENT(PM_GCT_FULL_CYC, 0x04086)
+EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020)
+EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d)
+EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c)
+EVENT(PM_BR_MPRED_TA, 0x040ae)
+EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058)
+EVENT(PM_DPU_HELD_POWER, 0x20006)
+EVENT(PM_RUN_INST_CMPL, 0x400fa)
+EVENT(PM_MRK_VSU_FIN, 0x30032)
+EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c)
+EVENT(PM_GCT_EMPTY_CYC, 0x20008)
+EVENT(PM_IOPS_DISP, 0x30014)
+EVENT(PM_RUN_SPURR, 0x10008)
+EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056)
+EVENT(PM_VSU0_1FLOP, 0x0a080)
+EVENT(PM_SNOOP_TLBIE, 0x0d0b2)
+EVENT(PM_DATA_FROM_L3MISS, 0x2c048)
+EVENT(PM_VSU_SINGLE, 0x0a8a8)
+EVENT(PM_DTLB_MISS_16G, 0x1c05e)
+EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c)
+EVENT(PM_FLUSH, 0x400f8)
+EVENT(PM_L2_LD_HIT, 0x36182)
+EVENT(PM_NEST_PAIR2_AND, 0x30883)
+EVENT(PM_VSU1_1FLOP, 0x0a082)
+EVENT(PM_IC_PREF_REQ, 0x0408a)
+EVENT(PM_L3_LD_HIT, 0x2f080)
+EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a)
+EVENT(PM_DISP_HELD, 0x10006)
+EVENT(PM_L2_LD, 0x16080)
+EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc)
+EVENT(PM_BC_PLUS_8_CONV, 0x040b8)
+EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026)
+EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a)
+EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282)
+EVENT(PM_TB_BIT_TRANS, 0x300f8)
+EVENT(PM_THERMAL_MAX, 0x40006)
+EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2)
+EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae)
+EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f)
+EVENT(PM_L3_CO_L31, 0x4f080)
+EVENT(PM_POWER_EVENT4, 0x4006e)
+EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e)
+EVENT(PM_BR_UNCOND, 0x0409e)
+EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa)
+EVENT(PM_PMC4_REWIND, 0x10020)
+EVENT(PM_L2_RCLD_DISP, 0x16280)
+EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2)
+EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058)
+EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098)
+EVENT(PM_LSU_DERAT_MISS, 0x200f6)
+EVENT(PM_IC_PREF_CANCEL_L2, 0x04094)
+EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d)
+EVENT(PM_BR_PRED_CCACHE, 0x040a0)
+EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c)
+EVENT(PM_MRK_ST_CMPL_INT, 0x30034)
+EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6)
+EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048)
+EVENT(PM_GCT_NOSLOT_CYC, 0x100f8)
+EVENT(PM_LSU_SET_MPRED, 0x0c0a8)
+EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a)
+EVENT(PM_VSU1_FCONV, 0x0a0b2)
+EVENT(PM_DERAT_MISS_16G, 0x4c05c)
+EVENT(PM_INST_FROM_LMEM, 0x3404a)
+EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a)
+EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018)
+EVENT(PM_INST_PTEG_FROM_L2, 0x1e050)
+EVENT(PM_PTEG_FROM_L2, 0x1c050)
+EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024)
+EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a)
+EVENT(PM_VSU0_FPSCR, 0x0b09c)
+EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082)
+EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052)
+EVENT(PM_MEM0_RQ_DISP, 0x10083)
+EVENT(PM_L2_LD_MISS, 0x26080)
+EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0)
+EVENT(PM_L1_PREF, 0x0d8b8)
+EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c)
+EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c)
+EVENT(PM_PB_NODE_PUMP, 0x10081)
+EVENT(PM_SHL_MERGED, 0x05084)
+EVENT(PM_NEST_PAIR1_ADD, 0x20881)
+EVENT(PM_DATA_FROM_L3, 0x1c048)
+EVENT(PM_LSU_FLUSH, 0x0208e)
+EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097)
+EVENT(PM_PMC2_OVERFLOW, 0x30010)
+EVENT(PM_LSU_LDF, 0x0c884)
+EVENT(PM_POWER_EVENT3, 0x3006e)
+EVENT(PM_DISP_WT, 0x30008)
+EVENT(PM_CMPLU_STALL_REJECT, 0x40016)
+EVENT(PM_IC_BANK_CONFLICT, 0x04082)
+EVENT(PM_BR_MPRED_CR_TA, 0x048ae)
+EVENT(PM_L2_INST_MISS, 0x36082)
+EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018)
+EVENT(PM_NEST_PAIR2_ADD, 0x30881)
+EVENT(PM_MRK_LSU_FLUSH, 0x0d08c)
+EVENT(PM_L2_LDST, 0x16880)
+EVENT(PM_INST_FROM_L31_SHR, 0x1404e)
+EVENT(PM_VSU0_FIN, 0x0a0bc)
+EVENT(PM_LARX_LSU, 0x0c894)
+EVENT(PM_INST_FROM_RMEM, 0x34042)
+EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096)
+EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e)
+EVENT(PM_BR_PRED_CR, 0x040a8)
+EVENT(PM_LSU_REJECT, 0x10064)
+EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e)
+EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028)
+EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4)
+EVENT(PM_VSU_FEST, 0x0a8b8)
+EVENT(PM_NEST_PAIR0_AND, 0x10883)
+EVENT(PM_PTEG_FROM_L3, 0x2c050)
+EVENT(PM_POWER_EVENT2, 0x2006e)
+EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090)
+EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088)
+EVENT(PM_MRK_GRP_CMPL, 0x40030)
+EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088)
+EVENT(PM_GRP_DISP, 0x3000a)
+EVENT(PM_LSU0_LDX, 0x0c088)
+EVENT(PM_DATA_FROM_L2, 0x1c040)
+EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042)
+EVENT(PM_LD_REF_L1, 0x0c880)
+EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080)
+EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e)
+EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6)
+EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba)
+EVENT(PM_BR_MPRED_CR, 0x040ac)
+EVENT(PM_L3_CO_MEM, 0x4f082)
+EVENT(PM_LD_MISS_L1, 0x400f0)
+EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042)
+EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a)
+EVENT(PM_TABLEWALK_CYC, 0x10026)
+EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052)
+EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0)
+EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052)
+EVENT(PM_FXU0_FIN, 0x10004)
+EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e)
+EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054)
+EVENT(PM_PMC5_OVERFLOW, 0x10024)
+EVENT(PM_LD_REF_L1_LSU1, 0x0c082)
+EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056)
+EVENT(PM_CMPLU_STALL_THRD, 0x1001c)
+EVENT(PM_DATA_FROM_RMEM, 0x3c042)
+EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084)
+EVENT(PM_BR_MPRED_LSTACK, 0x040a6)
+EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028)
+EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4)
+EVENT(PM_LSU_NCST, 0x0c090)
+EVENT(PM_BR_TAKEN, 0x20004)
+EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052)
+EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c)
+EVENT(PM_DTLB_MISS_4K, 0x2c05a)
+EVENT(PM_PMC4_SAVED, 0x30022)
+EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092)
+EVENT(PM_SLB_MISS, 0x0d890)
+EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba)
+EVENT(PM_DTLB_MISS, 0x300fc)
+EVENT(PM_VSU1_FRSP, 0x0a0b6)
+EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880)
+EVENT(PM_L2_CASTOUT_SHR, 0x16182)
+EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044)
+EVENT(PM_VSU1_STF, 0x0b08e)
+EVENT(PM_ST_FIN, 0x200f0)
+EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056)
+EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480)
+EVENT(PM_MRK_STCX_FAIL, 0x0d08e)
+EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac)
+EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092)
+EVENT(PM_L3_PREF_BUSY, 0x4f080)
+EVENT(PM_MRK_BRU_FIN, 0x2003a)
+EVENT(PM_LSU1_NCLD, 0x0c08e)
+EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054)
+EVENT(PM_LSU_NCLD, 0x0c88c)
+EVENT(PM_LSU_LDX, 0x0c888)
+EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480)
+EVENT(PM_THRESH_TIMEO, 0x10038)
+EVENT(PM_L3_PREF_ST, 0x0d0ae)
+EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098)
+EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894)
+EVENT(PM_VSU1_SINGLE, 0x0a0aa)
+EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a)
+EVENT(PM_L2_RC_ST_DONE, 0x36380)
+EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056)
+EVENT(PM_LARX_LSU1, 0x0c096)
+EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042)
+EVENT(PM_DISP_CLB_HELD, 0x02090)
+EVENT(PM_DERAT_MISS_4K, 0x1c05c)
+EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282)
+EVENT(PM_SEG_EXCEPTION, 0x028a4)
+EVENT(PM_FLUSH_DISP_SB, 0x0208c)
+EVENT(PM_L2_DC_INV, 0x26182)
+EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054)
+EVENT(PM_DSEG, 0x020a6)
+EVENT(PM_BR_PRED_LSTACK, 0x040a2)
+EVENT(PM_VSU0_STF, 0x0b08c)
+EVENT(PM_LSU_FX_FIN, 0x10066)
+EVENT(PM_DERAT_MISS_16M, 0x3c05c)
+EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054)
+EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2)
+EVENT(PM_INST_FROM_L3, 0x14048)
+EVENT(PM_MRK_IFU_FIN, 0x3003a)
+EVENT(PM_ITLB_MISS, 0x400fc)
+EVENT(PM_VSU_STF, 0x0b88c)
+EVENT(PM_LSU_FLUSH_UST, 0x0c8b4)
+EVENT(PM_L2_LDST_MISS, 0x26880)
+EVENT(PM_FXU1_FIN, 0x40004)
+EVENT(PM_SHL_DEALLOCATED, 0x05080)
+EVENT(PM_L2_SN_M_WR_DONE, 0x46382)
+EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8)
+EVENT(PM_L3_PREF_LD, 0x0d0ac)
+EVENT(PM_L2_SN_M_RD_DONE, 0x46380)
+EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c)
+EVENT(PM_VSU_FCONV, 0x0a8b0)
+EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa)
+EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4)
+EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082)
+EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e)
+EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020)
+EVENT(PM_INST_IMC_MATCH_DISP, 0x30016)
+EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c)
+EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094)
+EVENT(PM_CMPLU_STALL_DIV, 0x40014)
+EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054)
+EVENT(PM_VSU_FMA_DOUBLE, 0x0a890)
+EVENT(PM_VSU_4FLOP, 0x0a89c)
+EVENT(PM_VSU1_FIN, 0x0a0be)
+EVENT(PM_NEST_PAIR1_AND, 0x20883)
+EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052)
+EVENT(PM_RUN_CYC, 0x200f4)
+EVENT(PM_PTEG_FROM_RMEM, 0x3c052)
+EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e)
+EVENT(PM_LSU0_LDF, 0x0c084)
+EVENT(PM_FLUSH_COMPLETION, 0x30012)
+EVENT(PM_ST_MISS_L1, 0x300f0)
+EVENT(PM_L2_NODE_PUMP, 0x36480)
+EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044)
+EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e)
+EVENT(PM_VSU1_DENORM, 0x0a0ae)
+EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026)
+EVENT(PM_NEST_PAIR0_ADD, 0x10881)
+EVENT(PM_INST_FROM_L3MISS, 0x24048)
+EVENT(PM_EE_OFF_EXT_INT, 0x02080)
+EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052)
+EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c)
+EVENT(PM_PMC6_OVERFLOW, 0x30024)
+EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c)
+EVENT(PM_TLB_MISS, 0x20066)
+EVENT(PM_FXU_BUSY, 0x2000e)
+EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280)
+EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4)
+EVENT(PM_IC_RELOAD_SHR, 0x04096)
+EVENT(PM_GRP_MRK, 0x10031)
+EVENT(PM_MRK_ST_NEST, 0x20034)
+EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a)
+EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8)
+EVENT(PM_LARX_LSU0, 0x0c094)
+EVENT(PM_IBUF_FULL_CYC, 0x04084)
+EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a)
+EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8)
+EVENT(PM_GRP_MRK_CYC, 0x10030)
+EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028)
+EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482)
+EVENT(PM_LSU_REJECT_LHS, 0x0c8ac)
+EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a)
+EVENT(PM_INST_PTEG_FROM_L3, 0x2e050)
+EVENT(PM_FREQ_DOWN, 0x3000c)
+EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081)
+EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c)
+EVENT(PM_MRK_INST_ISSUED, 0x10032)
+EVENT(PM_PTEG_FROM_L3MISS, 0x2c058)
+EVENT(PM_RUN_PURR, 0x400f4)
+EVENT(PM_MRK_GRP_IC_MISS, 0x40038)
+EVENT(PM_MRK_DATA_FROM_L3, 0x1d048)
+EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016)
+EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054)
+EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8)
+EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c)
+EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054)
+EVENT(PM_L2_ST_MISS, 0x26082)
+EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056)
+EVENT(PM_LWSYNC, 0x0d094)
+EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc)
+EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088)
+EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0)
+EVENT(PM_NEST_PAIR3_AND, 0x40883)
+EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081)
+EVENT(PM_MRK_INST_FIN, 0x30030)
+EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054)
+EVENT(PM_INST_FROM_L31_MOD, 0x14044)
+EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e)
+EVENT(PM_LSU_FIN, 0x30066)
+EVENT(PM_MRK_LSU_REJECT, 0x40064)
+EVENT(PM_L2_CO_FAIL_BUSY, 0x16382)
+EVENT(PM_MEM0_WQ_DISP, 0x40083)
+EVENT(PM_DATA_FROM_L31_MOD, 0x1c044)
+EVENT(PM_THERMAL_WARN, 0x10016)
+EVENT(PM_VSU0_4FLOP, 0x0a09c)
+EVENT(PM_BR_MPRED_CCACHE, 0x040a4)
+EVENT(PM_CMPLU_STALL_IFU, 0x4004c)
+EVENT(PM_L1_DEMAND_WRITE, 0x0408c)
+EVENT(PM_FLUSH_BR_MPRED, 0x02084)
+EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e)
+EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052)
+EVENT(PM_L2_RCST_DISP, 0x36280)
+EVENT(PM_CMPLU_STALL, 0x4000a)
+EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa)
+EVENT(PM_DISP_CLB_HELD_SB, 0x020a8)
+EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090)
+EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e)
+EVENT(PM_IC_DEMAND_CYC, 0x10018)
+EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e)
+EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086)
+EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058)
+EVENT(PM_VSU_DENORM, 0x0a8ac)
+EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080)
+EVENT(PM_INST_FROM_L21_SHR, 0x3404e)
+EVENT(PM_IC_PREF_WRITE, 0x0408e)
+EVENT(PM_BR_PRED, 0x0409c)
+EVENT(PM_INST_FROM_DMEM, 0x1404a)
+EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890)
+EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4)
+EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a)
+EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c)
+EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280)
+EVENT(PM_VSU1_DD_ISSUED, 0x0b098)
+EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056)
+EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e)
+EVENT(PM_LSU0_NCLD, 0x0c08c)
+EVENT(PM_VSU1_4FLOP, 0x0a09e)
+EVENT(PM_VSU1_8FLOP, 0x0a0a2)
+EVENT(PM_VSU_8FLOP, 0x0a8a0)
+EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e)
+EVENT(PM_DTLB_MISS_64K, 0x3c05e)
+EVENT(PM_THRD_CONC_RUN_INST, 0x300f4)
+EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050)
+EVENT(PM_PB_SYS_PUMP, 0x20081)
+EVENT(PM_VSU_FIN, 0x0a8bc)
+EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044)
+EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0)
+EVENT(PM_DERAT_MISS_64K, 0x2c05c)
+EVENT(PM_PMC2_REWIND, 0x30020)
+EVENT(PM_INST_FROM_L2, 0x14040)
+EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a)
+EVENT(PM_INST_DISP, 0x200f2)
+EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083)
+EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4)
+EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6)
+EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888)
+EVENT(PM_L3_PREF_HIT, 0x3f080)
+EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054)
+EVENT(PM_CMPLU_STALL_STORE, 0x2004a)
+EVENT(PM_MRK_FXU_FIN, 0x20038)
+EVENT(PM_PMC4_OVERFLOW, 0x10010)
+EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050)
+EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098)
+EVENT(PM_BTAC_HIT, 0x0508a)
+EVENT(PM_L3_RD_BUSY, 0x4f082)
+EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c)
+EVENT(PM_INST_FROM_L2MISS, 0x44048)
+EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8)
+EVENT(PM_L2_ST, 0x16082)
+EVENT(PM_VSU0_DENORM, 0x0a0ac)
+EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044)
+EVENT(PM_BR_PRED_CR_TA, 0x048aa)
+EVENT(PM_VSU0_FCONV, 0x0a0b0)
+EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084)
+EVENT(PM_BTAC_MISS, 0x05088)
+EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f)
+EVENT(PM_MRK_DATA_FROM_L2, 0x1d040)
+EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2)
+EVENT(PM_VSU_FMA, 0x0a884)
+EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc)
+EVENT(PM_LSU1_L1_PREF, 0x0d0ba)
+EVENT(PM_IOPS_CMPL, 0x10014)
+EVENT(PM_L2_SYS_PUMP, 0x36482)
+EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282)
+EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1)
+EVENT(PM_FLUSH_DISP_SYNC, 0x02088)
+EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a)
+EVENT(PM_L2_IC_INV, 0x26180)
+EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024)
+EVENT(PM_L3_PREF_LDST, 0x0d8ac)
+EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008)
+EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0)
+EVENT(PM_FLUSH_PARTIAL, 0x02086)
+EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092)
+EVENT(PM_1PLUS_PPC_DISP, 0x400f2)
+EVENT(PM_DATA_FROM_L2MISS, 0x200fe)
+EVENT(PM_SUSPENDED, 0x00000)
+EVENT(PM_VSU0_FMA, 0x0a084)
+EVENT(PM_CMPLU_STALL_SCALAR, 0x40012)
+EVENT(PM_STCX_FAIL, 0x0c09a)
+EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094)
+EVENT(PM_DC_PREF_DST, 0x0d0b0)
+EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086)
+EVENT(PM_L3_HIT, 0x1f080)
+EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482)
+EVENT(PM_MRK_DFU_FIN, 0x20032)
+EVENT(PM_INST_FROM_L1, 0x04080)
+EVENT(PM_BRU_FIN, 0x10068)
+EVENT(PM_IC_DEMAND_REQ, 0x04088)
+EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096)
+EVENT(PM_VSU1_FMA, 0x0a086)
+EVENT(PM_MRK_LD_MISS_L1, 0x20036)
+EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c)
+EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc)
+EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056)
+EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064)
+EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048)
+EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c)
+EVENT(PM_INST_FROM_PREF, 0x14046)
+EVENT(PM_VSU1_SQ, 0x0b09e)
+EVENT(PM_L2_LD_DISP, 0x36180)
+EVENT(PM_L2_DISP_ALL, 0x46080)
+EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012)
+EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894)
+EVENT(PM_BR_MPRED, 0x400f6)
+EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054)
+EVENT(PM_VSU_1FLOP, 0x0a880)
+EVENT(PM_HV_CYC, 0x2000a)
+EVENT(PM_MRK_LSU_FIN, 0x40032)
+EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c)
+EVENT(PM_DTLB_MISS_16M, 0x4c05e)
+EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a)
+EVENT(PM_IFU_FIN, 0x40066)
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index d1821b8bbc4..56c67bca2f7 100644
--- a/arch/powerpc/perf/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
@@ -53,37 +53,13 @@
/*
* Power7 event codes.
*/
-#define PME_PM_CYC 0x1e
-#define PME_PM_GCT_NOSLOT_CYC 0x100f8
-#define PME_PM_CMPLU_STALL 0x4000a
-#define PME_PM_INST_CMPL 0x2
-#define PME_PM_LD_REF_L1 0xc880
-#define PME_PM_LD_MISS_L1 0x400f0
-#define PME_PM_BRU_FIN 0x10068
-#define PME_PM_BR_MPRED 0x400f6
-
-#define PME_PM_CMPLU_STALL_FXU 0x20014
-#define PME_PM_CMPLU_STALL_DIV 0x40014
-#define PME_PM_CMPLU_STALL_SCALAR 0x40012
-#define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018
-#define PME_PM_CMPLU_STALL_VECTOR 0x2001c
-#define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a
-#define PME_PM_CMPLU_STALL_LSU 0x20012
-#define PME_PM_CMPLU_STALL_REJECT 0x40016
-#define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018
-#define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016
-#define PME_PM_CMPLU_STALL_STORE 0x2004a
-#define PME_PM_CMPLU_STALL_THRD 0x1001c
-#define PME_PM_CMPLU_STALL_IFU 0x4004c
-#define PME_PM_CMPLU_STALL_BRU 0x4004e
-#define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a
-#define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a
-#define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c
-#define PME_PM_GRP_CMPL 0x30004
-#define PME_PM_1PLUS_PPC_CMPL 0x100f2
-#define PME_PM_CMPLU_STALL_DFU 0x2003c
-#define PME_PM_RUN_CYC 0x200f4
-#define PME_PM_RUN_INST_CMPL 0x400fa
+#define EVENT(_name, _code) \
+ PME_##_name = _code,
+
+enum {
+#include "power7-events-list.h"
+};
+#undef EVENT
/*
* Layout of constraint bits:
@@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
};
-GENERIC_EVENT_ATTR(cpu-cycles, CYC);
-GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC);
-GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL);
-GENERIC_EVENT_ATTR(instructions, INST_CMPL);
-GENERIC_EVENT_ATTR(cache-references, LD_REF_L1);
-GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1);
-GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN);
-GENERIC_EVENT_ATTR(branch-misses, BR_MPRED);
-
-POWER_EVENT_ATTR(CYC, CYC);
-POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC);
-POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL);
-POWER_EVENT_ATTR(INST_CMPL, INST_CMPL);
-POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1);
-POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1);
-POWER_EVENT_ATTR(BRU_FIN, BRU_FIN)
-POWER_EVENT_ATTR(BR_MPRED, BR_MPRED);
-
-POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU);
-POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV);
-POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR);
-POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG);
-POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR);
-POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG);
-POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU);
-POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT);
-
-POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS);
-POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS);
-POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE);
-POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD);
-POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU);
-POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU);
-POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS);
-
-POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED);
-POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS);
-POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL);
-POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL);
-POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU);
-POWER_EVENT_ATTR(RUN_CYC, RUN_CYC);
-POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL);
+GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
+GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
+GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
+GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
+GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
+GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
+GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED);
+
+#define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name);
+#include "power7-events-list.h"
+#undef EVENT
+
+#define EVENT(_name, _code) POWER_EVENT_PTR(_name),
static struct attribute *power7_events_attr[] = {
- GENERIC_EVENT_PTR(CYC),
- GENERIC_EVENT_PTR(GCT_NOSLOT_CYC),
- GENERIC_EVENT_PTR(CMPLU_STALL),
- GENERIC_EVENT_PTR(INST_CMPL),
- GENERIC_EVENT_PTR(LD_REF_L1),
- GENERIC_EVENT_PTR(LD_MISS_L1),
- GENERIC_EVENT_PTR(BRU_FIN),
- GENERIC_EVENT_PTR(BR_MPRED),
-
- POWER_EVENT_PTR(CYC),
- POWER_EVENT_PTR(GCT_NOSLOT_CYC),
- POWER_EVENT_PTR(CMPLU_STALL),
- POWER_EVENT_PTR(INST_CMPL),
- POWER_EVENT_PTR(LD_REF_L1),
- POWER_EVENT_PTR(LD_MISS_L1),
- POWER_EVENT_PTR(BRU_FIN),
- POWER_EVENT_PTR(BR_MPRED),
-
- POWER_EVENT_PTR(CMPLU_STALL_FXU),
- POWER_EVENT_PTR(CMPLU_STALL_DIV),
- POWER_EVENT_PTR(CMPLU_STALL_SCALAR),
- POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG),
- POWER_EVENT_PTR(CMPLU_STALL_VECTOR),
- POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG),
- POWER_EVENT_PTR(CMPLU_STALL_LSU),
- POWER_EVENT_PTR(CMPLU_STALL_REJECT),
-
- POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS),
- POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS),
- POWER_EVENT_PTR(CMPLU_STALL_STORE),
- POWER_EVENT_PTR(CMPLU_STALL_THRD),
- POWER_EVENT_PTR(CMPLU_STALL_IFU),
- POWER_EVENT_PTR(CMPLU_STALL_BRU),
- POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS),
- POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED),
-
- POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS),
- POWER_EVENT_PTR(GRP_CMPL),
- POWER_EVENT_PTR(1PLUS_PPC_CMPL),
- POWER_EVENT_PTR(CMPLU_STALL_DFU),
- POWER_EVENT_PTR(RUN_CYC),
- POWER_EVENT_PTR(RUN_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_CYC),
+ GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
+ GENERIC_EVENT_PTR(PM_CMPLU_STALL),
+ GENERIC_EVENT_PTR(PM_INST_CMPL),
+ GENERIC_EVENT_PTR(PM_LD_REF_L1),
+ GENERIC_EVENT_PTR(PM_LD_MISS_L1),
+ GENERIC_EVENT_PTR(PM_BRU_FIN),
+ GENERIC_EVENT_PTR(PM_BR_MPRED),
+
+ #include "power7-events-list.h"
+ #undef EVENT
NULL
};
-
static struct attribute_group power7_pmu_events_group = {
.name = "events",
.attrs = power7_events_attr,
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index 7466374d278..2ee4a707f0d 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -118,7 +118,7 @@
(EVENT_UNIT_MASK << EVENT_UNIT_SHIFT) | \
(EVENT_COMBINE_MASK << EVENT_COMBINE_SHIFT) | \
(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT) | \
- (EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT) | \
+ (EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT) | \
EVENT_PSEL_MASK)
/* MMCRA IFM bits - POWER8 */
@@ -233,10 +233,10 @@ static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long
pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
- ebb = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
+ ebb = (event >> PERF_EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
/* Clear the EBB bit in the event, so event checks work below */
- event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT);
+ event &= ~(EVENT_EBB_MASK << PERF_EVENT_CONFIG_EBB_SHIFT);
if (pmc) {
if (pmc > 6)
diff --git a/arch/powerpc/platforms/44x/warp.c b/arch/powerpc/platforms/44x/warp.c
index 4cfa49901c0..534574a97ec 100644
--- a/arch/powerpc/platforms/44x/warp.c
+++ b/arch/powerpc/platforms/44x/warp.c
@@ -16,7 +16,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/of_gpio.h>
-#include <linux/of_i2c.h>
#include <linux/slab.h>
#include <linux/export.h>
diff --git a/arch/powerpc/platforms/ps3/time.c b/arch/powerpc/platforms/ps3/time.c
index cba1e6be68e..ce73ce86561 100644
--- a/arch/powerpc/platforms/ps3/time.c
+++ b/arch/powerpc/platforms/ps3/time.c
@@ -90,7 +90,7 @@ static int __init ps3_rtc_init(void)
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
- return PTR_RET(pdev);
+ return PTR_ERR_OR_ZERO(pdev);
}
module_init(ps3_rtc_init);
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 9f8671a4455..d276cd3edd8 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -539,65 +539,6 @@ static int zip_oops(size_t text_len)
}
#ifdef CONFIG_PSTORE
-/* Derived from logfs_uncompress */
-int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_inflateInit(&stream);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_inflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_inflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
-static int unzip_oops(char *oops_buf, char *big_buf)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- u64 timestamp = oops_hdr->timestamp;
- char *big_oops_data = NULL;
- char *oops_data_buf = NULL;
- size_t big_oops_data_sz;
- int unzipped_len;
-
- big_oops_data = big_buf + sizeof(struct oops_log_info);
- big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
- oops_data_buf = oops_buf + sizeof(struct oops_log_info);
-
- unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
- oops_hdr->report_length,
- big_oops_data_sz);
-
- if (unzipped_len < 0) {
- pr_err("nvram: decompression failed; returned %d\n",
- unzipped_len);
- return -1;
- }
- oops_hdr = (struct oops_log_info *)big_buf;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) unzipped_len;
- oops_hdr->timestamp = timestamp;
- return 0;
-}
-
static int nvram_pstore_open(struct pstore_info *psi)
{
/* Reset the iterator to start reading partitions again */
@@ -613,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
* @part: pstore writes data to registered buffer in parts,
* part number will indicate the same.
* @count: Indicates oops count
- * @hsize: Size of header added by pstore
+ * @compressed: Flag to indicate the log is compressed
* @size: number of bytes written to the registered buffer
* @psi: registered pstore_info structure
*
@@ -624,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi)
static int nvram_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part, int count,
- size_t hsize, size_t size,
+ bool compressed, size_t size,
struct pstore_info *psi)
{
int rc;
@@ -640,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type,
oops_hdr->report_length = (u16) size;
oops_hdr->timestamp = get_seconds();
- if (big_oops_buf) {
- rc = zip_oops(size);
- /*
- * If compression fails copy recent log messages from
- * big_oops_buf to oops_data.
- */
- if (rc != 0) {
- size_t diff = size - oops_data_sz + hsize;
-
- if (size > oops_data_sz) {
- memcpy(oops_data, big_oops_buf, hsize);
- memcpy(oops_data + hsize, big_oops_buf + diff,
- oops_data_sz - hsize);
-
- oops_hdr->report_length = (u16) oops_data_sz;
- } else
- memcpy(oops_data, big_oops_buf, size);
- } else
- err_type = ERR_TYPE_KERNEL_PANIC_GZ;
- }
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
- count);
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
if (rc != 0)
return rc;
@@ -679,16 +601,15 @@ static int nvram_pstore_write(enum pstore_type_id type,
*/
static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
- struct pstore_info *psi)
+ bool *compressed, struct pstore_info *psi)
{
struct oops_log_info *oops_hdr;
unsigned int err_type, id_no, size = 0;
struct nvram_os_partition *part = NULL;
- char *buff = NULL, *big_buff = NULL;
- int rc, sig = 0;
+ char *buff = NULL;
+ int sig = 0;
loff_t p;
-read_partition:
read_type++;
switch (nvram_type_ids[read_type]) {
@@ -749,30 +670,32 @@ read_partition:
*id = id_no;
if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
- oops_hdr = (struct oops_log_info *)buff;
- *buf = buff + sizeof(*oops_hdr);
-
- if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
- big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (!big_buff)
- return -ENOMEM;
-
- rc = unzip_oops(buff, big_buff);
+ size_t length, hdr_size;
- if (rc != 0) {
- kfree(buff);
- kfree(big_buff);
- goto read_partition;
- }
-
- oops_hdr = (struct oops_log_info *)big_buff;
- *buf = big_buff + sizeof(*oops_hdr);
- kfree(buff);
+ oops_hdr = (struct oops_log_info *)buff;
+ if (oops_hdr->version < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = oops_hdr->version;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = oops_hdr->report_length;
+ time->tv_sec = oops_hdr->timestamp;
+ time->tv_nsec = 0;
}
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
- time->tv_sec = oops_hdr->timestamp;
- time->tv_nsec = 0;
- return oops_hdr->report_length;
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
+ return length;
}
*buf = buff;
@@ -791,13 +714,8 @@ static int nvram_pstore_init(void)
{
int rc = 0;
- if (big_oops_buf) {
- nvram_pstore_info.buf = big_oops_buf;
- nvram_pstore_info.bufsize = big_oops_buf_sz;
- } else {
- nvram_pstore_info.buf = oops_data;
- nvram_pstore_info.bufsize = oops_data_sz;
- }
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
rc = pstore_register(&nvram_pstore_info);
if (rc != 0)
@@ -836,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
oops_data = oops_buf + sizeof(struct oops_log_info);
oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
+
/*
* Figure compression (preceded by elimination of each line's <n>
* severity prefix) will reduce the oops/panic report to at most
@@ -844,8 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
big_oops_buf_sz = (oops_data_sz * 100) / 45;
big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
if (!stream.workspace) {
pr_err("nvram: No memory for compression workspace; "
"skipping compression of %s partition data\n",
@@ -859,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists)
stream.workspace = NULL;
}
- rc = nvram_pstore_init();
-
- if (!rc)
- return;
-
rc = kmsg_dump_register(&nvram_kmsg_dumper);
if (rc != 0) {
pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
diff --git a/arch/powerpc/sysdev/rtc_cmos_setup.c b/arch/powerpc/sysdev/rtc_cmos_setup.c
index af79e1ea74b..af0f9beddca 100644
--- a/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,7 +62,7 @@ static int __init add_rtc(void)
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
- return PTR_RET(pd);
+ return PTR_ERR_OR_ZERO(pd);
}
fs_initcall(add_rtc);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 22f75b504f7..8b7892bf6d8 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -116,8 +116,10 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
+ select HAVE_GENERIC_HARDIRQS
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
+ select HAVE_KERNEL_LZ4
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_KERNEL_XZ
@@ -227,11 +229,12 @@ config MARCH_Z196
not work on older machines.
config MARCH_ZEC12
- bool "IBM zEC12"
+ bool "IBM zBC12 and zEC12"
select HAVE_MARCH_ZEC12_FEATURES if 64BIT
help
- Select this to enable optimizations for IBM zEC12 (2827 series). The
- kernel will be slightly faster but will not work on older machines.
+ Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and
+ 2827 series). The kernel will be slightly faster but will not work on
+ older machines.
endchoice
@@ -443,6 +446,16 @@ config PCI_NR_FUNCTIONS
This allows you to specify the maximum number of PCI functions which
this kernel will support.
+config PCI_NR_MSI
+ int "Maximum number of MSI interrupts (64-32768)"
+ range 64 32768
+ default "256"
+ help
+ This defines the number of virtual interrupts the kernel will
+ provide for MSI interrupts. If you configure your system to have
+ too few drivers will fail to allocate MSI interrupts for all
+ PCI devices.
+
source "drivers/pci/Kconfig"
source "drivers/pci/pcie/Kconfig"
source "drivers/pci/hotplug/Kconfig"
@@ -709,6 +722,7 @@ config S390_GUEST
def_bool y
prompt "s390 support for virtio devices"
depends on 64BIT
+ select TTY
select VIRTUALIZATION
select VIRTIO
select VIRTIO_CONSOLE
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile
index 3ad8f61c998..866ecbe670e 100644
--- a/arch/s390/boot/compressed/Makefile
+++ b/arch/s390/boot/compressed/Makefile
@@ -6,9 +6,9 @@
BITS := $(if $(CONFIG_64BIT),64,31)
-targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \
- vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \
- sizes.h head$(BITS).o
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2
+targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4
+targets += misc.o piggy.o sizes.h head$(BITS).o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin
suffix-$(CONFIG_KERNEL_GZIP) := gz
suffix-$(CONFIG_KERNEL_BZIP2) := bz2
+suffix-$(CONFIG_KERNEL_LZ4) := lz4
suffix-$(CONFIG_KERNEL_LZMA) := lzma
suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_XZ) := xz
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y)
$(call if_changed,gzip)
$(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y)
$(call if_changed,bzip2)
+$(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y)
+ $(call if_changed,lz4)
$(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y)
$(call if_changed,lzma)
$(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y)
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index c4c6a1cf221..57cbaff1f39 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr;
#include "../../../../lib/decompress_bunzip2.c"
#endif
+#ifdef CONFIG_KERNEL_LZ4
+#include "../../../../lib/decompress_unlz4.c"
+#endif
+
#ifdef CONFIG_KERNEL_LZMA
#include "../../../../lib/decompress_unlzma.c"
#endif
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index f41e0ef7fdf..79f2ac55253 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -18,26 +18,23 @@
#define UPDATE_FILE_MODE 0220
#define DIR_MODE 0550
-extern struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
- const char *name);
+extern struct dentry *hypfs_mkdir(struct dentry *parent, const char *name);
-extern struct dentry *hypfs_create_u64(struct super_block *sb,
- struct dentry *dir, const char *name,
+extern struct dentry *hypfs_create_u64(struct dentry *dir, const char *name,
__u64 value);
-extern struct dentry *hypfs_create_str(struct super_block *sb,
- struct dentry *dir, const char *name,
+extern struct dentry *hypfs_create_str(struct dentry *dir, const char *name,
char *string);
/* LPAR Hypervisor */
extern int hypfs_diag_init(void);
extern void hypfs_diag_exit(void);
-extern int hypfs_diag_create_files(struct super_block *sb, struct dentry *root);
+extern int hypfs_diag_create_files(struct dentry *root);
/* VM Hypervisor */
extern int hypfs_vm_init(void);
extern void hypfs_vm_exit(void);
-extern int hypfs_vm_create_files(struct super_block *sb, struct dentry *root);
+extern int hypfs_vm_create_files(struct dentry *root);
/* debugfs interface */
struct hypfs_dbfs_file;
diff --git a/arch/s390/hypfs/hypfs_dbfs.c b/arch/s390/hypfs/hypfs_dbfs.c
index bb5dd496614..17ab8b7b53c 100644
--- a/arch/s390/hypfs/hypfs_dbfs.c
+++ b/arch/s390/hypfs/hypfs_dbfs.c
@@ -105,7 +105,7 @@ void hypfs_dbfs_remove_file(struct hypfs_dbfs_file *df)
int hypfs_dbfs_init(void)
{
dbfs_dir = debugfs_create_dir("s390_hypfs", NULL);
- return PTR_RET(dbfs_dir);
+ return PTR_ERR_OR_ZERO(dbfs_dir);
}
void hypfs_dbfs_exit(void)
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 138893e5f73..5eeffeefae0 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -623,8 +623,7 @@ void hypfs_diag_exit(void)
* *******************************************
*/
-static int hypfs_create_cpu_files(struct super_block *sb,
- struct dentry *cpus_dir, void *cpu_info)
+static int hypfs_create_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
struct dentry *cpu_dir;
char buffer[TMP_SIZE];
@@ -632,30 +631,29 @@ static int hypfs_create_cpu_files(struct super_block *sb,
snprintf(buffer, TMP_SIZE, "%d", cpu_info__cpu_addr(diag204_info_type,
cpu_info));
- cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
- rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ cpu_dir = hypfs_mkdir(cpus_dir, buffer);
+ rc = hypfs_create_u64(cpu_dir, "mgmtime",
cpu_info__acc_time(diag204_info_type, cpu_info) -
cpu_info__lp_time(diag204_info_type, cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
- rc = hypfs_create_u64(sb, cpu_dir, "cputime",
+ rc = hypfs_create_u64(cpu_dir, "cputime",
cpu_info__lp_time(diag204_info_type, cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
if (diag204_info_type == INFO_EXT) {
- rc = hypfs_create_u64(sb, cpu_dir, "onlinetime",
+ rc = hypfs_create_u64(cpu_dir, "onlinetime",
cpu_info__online_time(diag204_info_type,
cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
}
diag224_idx2name(cpu_info__ctidx(diag204_info_type, cpu_info), buffer);
- rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ rc = hypfs_create_str(cpu_dir, "type", buffer);
return PTR_RET(rc);
}
-static void *hypfs_create_lpar_files(struct super_block *sb,
- struct dentry *systems_dir, void *part_hdr)
+static void *hypfs_create_lpar_files(struct dentry *systems_dir, void *part_hdr)
{
struct dentry *cpus_dir;
struct dentry *lpar_dir;
@@ -665,16 +663,16 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
part_hdr__part_name(diag204_info_type, part_hdr, lpar_name);
lpar_name[LPAR_NAME_LEN] = 0;
- lpar_dir = hypfs_mkdir(sb, systems_dir, lpar_name);
+ lpar_dir = hypfs_mkdir(systems_dir, lpar_name);
if (IS_ERR(lpar_dir))
return lpar_dir;
- cpus_dir = hypfs_mkdir(sb, lpar_dir, "cpus");
+ cpus_dir = hypfs_mkdir(lpar_dir, "cpus");
if (IS_ERR(cpus_dir))
return cpus_dir;
cpu_info = part_hdr + part_hdr__size(diag204_info_type);
for (i = 0; i < part_hdr__rcpus(diag204_info_type, part_hdr); i++) {
int rc;
- rc = hypfs_create_cpu_files(sb, cpus_dir, cpu_info);
+ rc = hypfs_create_cpu_files(cpus_dir, cpu_info);
if (rc)
return ERR_PTR(rc);
cpu_info += cpu_info__size(diag204_info_type);
@@ -682,8 +680,7 @@ static void *hypfs_create_lpar_files(struct super_block *sb,
return cpu_info;
}
-static int hypfs_create_phys_cpu_files(struct super_block *sb,
- struct dentry *cpus_dir, void *cpu_info)
+static int hypfs_create_phys_cpu_files(struct dentry *cpus_dir, void *cpu_info)
{
struct dentry *cpu_dir;
char buffer[TMP_SIZE];
@@ -691,32 +688,31 @@ static int hypfs_create_phys_cpu_files(struct super_block *sb,
snprintf(buffer, TMP_SIZE, "%i", phys_cpu__cpu_addr(diag204_info_type,
cpu_info));
- cpu_dir = hypfs_mkdir(sb, cpus_dir, buffer);
+ cpu_dir = hypfs_mkdir(cpus_dir, buffer);
if (IS_ERR(cpu_dir))
return PTR_ERR(cpu_dir);
- rc = hypfs_create_u64(sb, cpu_dir, "mgmtime",
+ rc = hypfs_create_u64(cpu_dir, "mgmtime",
phys_cpu__mgm_time(diag204_info_type, cpu_info));
if (IS_ERR(rc))
return PTR_ERR(rc);
diag224_idx2name(phys_cpu__ctidx(diag204_info_type, cpu_info), buffer);
- rc = hypfs_create_str(sb, cpu_dir, "type", buffer);
+ rc = hypfs_create_str(cpu_dir, "type", buffer);
return PTR_RET(rc);
}
-static void *hypfs_create_phys_files(struct super_block *sb,
- struct dentry *parent_dir, void *phys_hdr)
+static void *hypfs_create_phys_files(struct dentry *parent_dir, void *phys_hdr)
{
int i;
void *cpu_info;
struct dentry *cpus_dir;
- cpus_dir = hypfs_mkdir(sb, parent_dir, "cpus");
+ cpus_dir = hypfs_mkdir(parent_dir, "cpus");
if (IS_ERR(cpus_dir))
return cpus_dir;
cpu_info = phys_hdr + phys_hdr__size(diag204_info_type);
for (i = 0; i < phys_hdr__cpus(diag204_info_type, phys_hdr); i++) {
int rc;
- rc = hypfs_create_phys_cpu_files(sb, cpus_dir, cpu_info);
+ rc = hypfs_create_phys_cpu_files(cpus_dir, cpu_info);
if (rc)
return ERR_PTR(rc);
cpu_info += phys_cpu__size(diag204_info_type);
@@ -724,7 +720,7 @@ static void *hypfs_create_phys_files(struct super_block *sb,
return cpu_info;
}
-int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
+int hypfs_diag_create_files(struct dentry *root)
{
struct dentry *systems_dir, *hyp_dir;
void *time_hdr, *part_hdr;
@@ -735,7 +731,7 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- systems_dir = hypfs_mkdir(sb, root, "systems");
+ systems_dir = hypfs_mkdir(root, "systems");
if (IS_ERR(systems_dir)) {
rc = PTR_ERR(systems_dir);
goto err_out;
@@ -743,25 +739,25 @@ int hypfs_diag_create_files(struct super_block *sb, struct dentry *root)
time_hdr = (struct x_info_blk_hdr *)buffer;
part_hdr = time_hdr + info_blk_hdr__size(diag204_info_type);
for (i = 0; i < info_blk_hdr__npar(diag204_info_type, time_hdr); i++) {
- part_hdr = hypfs_create_lpar_files(sb, systems_dir, part_hdr);
+ part_hdr = hypfs_create_lpar_files(systems_dir, part_hdr);
if (IS_ERR(part_hdr)) {
rc = PTR_ERR(part_hdr);
goto err_out;
}
}
if (info_blk_hdr__flags(diag204_info_type, time_hdr) & LPAR_PHYS_FLG) {
- ptr = hypfs_create_phys_files(sb, root, part_hdr);
+ ptr = hypfs_create_phys_files(root, part_hdr);
if (IS_ERR(ptr)) {
rc = PTR_ERR(ptr);
goto err_out;
}
}
- hyp_dir = hypfs_mkdir(sb, root, "hyp");
+ hyp_dir = hypfs_mkdir(root, "hyp");
if (IS_ERR(hyp_dir)) {
rc = PTR_ERR(hyp_dir);
goto err_out;
}
- ptr = hypfs_create_str(sb, hyp_dir, "type", "LPAR Hypervisor");
+ ptr = hypfs_create_str(hyp_dir, "type", "LPAR Hypervisor");
if (IS_ERR(ptr)) {
rc = PTR_ERR(ptr);
goto err_out;
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index f364dcf77e8..24908ce149f 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -107,16 +107,15 @@ static void diag2fc_free(const void *data)
vfree(data);
}
-#define ATTRIBUTE(sb, dir, name, member) \
+#define ATTRIBUTE(dir, name, member) \
do { \
void *rc; \
- rc = hypfs_create_u64(sb, dir, name, member); \
+ rc = hypfs_create_u64(dir, name, member); \
if (IS_ERR(rc)) \
return PTR_ERR(rc); \
} while(0)
-static int hpyfs_vm_create_guest(struct super_block *sb,
- struct dentry *systems_dir,
+static int hpyfs_vm_create_guest(struct dentry *systems_dir,
struct diag2fc_data *data)
{
char guest_name[NAME_LEN + 1] = {};
@@ -130,46 +129,46 @@ static int hpyfs_vm_create_guest(struct super_block *sb,
memcpy(guest_name, data->guest_name, NAME_LEN);
EBCASC(guest_name, NAME_LEN);
strim(guest_name);
- guest_dir = hypfs_mkdir(sb, systems_dir, guest_name);
+ guest_dir = hypfs_mkdir(systems_dir, guest_name);
if (IS_ERR(guest_dir))
return PTR_ERR(guest_dir);
- ATTRIBUTE(sb, guest_dir, "onlinetime_us", data->el_time);
+ ATTRIBUTE(guest_dir, "onlinetime_us", data->el_time);
/* logical cpu information */
- cpus_dir = hypfs_mkdir(sb, guest_dir, "cpus");
+ cpus_dir = hypfs_mkdir(guest_dir, "cpus");
if (IS_ERR(cpus_dir))
return PTR_ERR(cpus_dir);
- ATTRIBUTE(sb, cpus_dir, "cputime_us", data->used_cpu);
- ATTRIBUTE(sb, cpus_dir, "capped", capped_value);
- ATTRIBUTE(sb, cpus_dir, "dedicated", dedicated_flag);
- ATTRIBUTE(sb, cpus_dir, "count", data->vcpus);
- ATTRIBUTE(sb, cpus_dir, "weight_min", data->cpu_min);
- ATTRIBUTE(sb, cpus_dir, "weight_max", data->cpu_max);
- ATTRIBUTE(sb, cpus_dir, "weight_cur", data->cpu_shares);
+ ATTRIBUTE(cpus_dir, "cputime_us", data->used_cpu);
+ ATTRIBUTE(cpus_dir, "capped", capped_value);
+ ATTRIBUTE(cpus_dir, "dedicated", dedicated_flag);
+ ATTRIBUTE(cpus_dir, "count", data->vcpus);
+ ATTRIBUTE(cpus_dir, "weight_min", data->cpu_min);
+ ATTRIBUTE(cpus_dir, "weight_max", data->cpu_max);
+ ATTRIBUTE(cpus_dir, "weight_cur", data->cpu_shares);
/* memory information */
- mem_dir = hypfs_mkdir(sb, guest_dir, "mem");
+ mem_dir = hypfs_mkdir(guest_dir, "mem");
if (IS_ERR(mem_dir))
return PTR_ERR(mem_dir);
- ATTRIBUTE(sb, mem_dir, "min_KiB", data->mem_min_kb);
- ATTRIBUTE(sb, mem_dir, "max_KiB", data->mem_max_kb);
- ATTRIBUTE(sb, mem_dir, "used_KiB", data->mem_used_kb);
- ATTRIBUTE(sb, mem_dir, "share_KiB", data->mem_share_kb);
+ ATTRIBUTE(mem_dir, "min_KiB", data->mem_min_kb);
+ ATTRIBUTE(mem_dir, "max_KiB", data->mem_max_kb);
+ ATTRIBUTE(mem_dir, "used_KiB", data->mem_used_kb);
+ ATTRIBUTE(mem_dir, "share_KiB", data->mem_share_kb);
/* samples */
- samples_dir = hypfs_mkdir(sb, guest_dir, "samples");
+ samples_dir = hypfs_mkdir(guest_dir, "samples");
if (IS_ERR(samples_dir))
return PTR_ERR(samples_dir);
- ATTRIBUTE(sb, samples_dir, "cpu_using", data->cpu_use_samp);
- ATTRIBUTE(sb, samples_dir, "cpu_delay", data->cpu_delay_samp);
- ATTRIBUTE(sb, samples_dir, "mem_delay", data->page_wait_samp);
- ATTRIBUTE(sb, samples_dir, "idle", data->idle_samp);
- ATTRIBUTE(sb, samples_dir, "other", data->other_samp);
- ATTRIBUTE(sb, samples_dir, "total", data->total_samp);
+ ATTRIBUTE(samples_dir, "cpu_using", data->cpu_use_samp);
+ ATTRIBUTE(samples_dir, "cpu_delay", data->cpu_delay_samp);
+ ATTRIBUTE(samples_dir, "mem_delay", data->page_wait_samp);
+ ATTRIBUTE(samples_dir, "idle", data->idle_samp);
+ ATTRIBUTE(samples_dir, "other", data->other_samp);
+ ATTRIBUTE(samples_dir, "total", data->total_samp);
return 0;
}
-int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
+int hypfs_vm_create_files(struct dentry *root)
{
struct dentry *dir, *file;
struct diag2fc_data *data;
@@ -181,38 +180,38 @@ int hypfs_vm_create_files(struct super_block *sb, struct dentry *root)
return PTR_ERR(data);
/* Hpervisor Info */
- dir = hypfs_mkdir(sb, root, "hyp");
+ dir = hypfs_mkdir(root, "hyp");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
- file = hypfs_create_str(sb, dir, "type", "z/VM Hypervisor");
+ file = hypfs_create_str(dir, "type", "z/VM Hypervisor");
if (IS_ERR(file)) {
rc = PTR_ERR(file);
goto failed;
}
/* physical cpus */
- dir = hypfs_mkdir(sb, root, "cpus");
+ dir = hypfs_mkdir(root, "cpus");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
- file = hypfs_create_u64(sb, dir, "count", data->lcpus);
+ file = hypfs_create_u64(dir, "count", data->lcpus);
if (IS_ERR(file)) {
rc = PTR_ERR(file);
goto failed;
}
/* guests */
- dir = hypfs_mkdir(sb, root, "systems");
+ dir = hypfs_mkdir(root, "systems");
if (IS_ERR(dir)) {
rc = PTR_ERR(dir);
goto failed;
}
for (i = 0; i < count; i++) {
- rc = hpyfs_vm_create_guest(sb, dir, &(data[i]));
+ rc = hpyfs_vm_create_guest(dir, &(data[i]));
if (rc)
goto failed;
}
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 7a539f4f5e3..ddfe09b4513 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -28,8 +28,7 @@
#define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */
#define TMP_SIZE 64 /* size of temporary buffers */
-static struct dentry *hypfs_create_update_file(struct super_block *sb,
- struct dentry *dir);
+static struct dentry *hypfs_create_update_file(struct dentry *dir);
struct hypfs_sb_info {
kuid_t uid; /* uid used for files and dirs */
@@ -193,9 +192,9 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
}
hypfs_delete_tree(sb->s_root);
if (MACHINE_IS_VM)
- rc = hypfs_vm_create_files(sb, sb->s_root);
+ rc = hypfs_vm_create_files(sb->s_root);
else
- rc = hypfs_diag_create_files(sb, sb->s_root);
+ rc = hypfs_diag_create_files(sb->s_root);
if (rc) {
pr_err("Updating the hypfs tree failed\n");
hypfs_delete_tree(sb->s_root);
@@ -302,12 +301,12 @@ static int hypfs_fill_super(struct super_block *sb, void *data, int silent)
if (!root_dentry)
return -ENOMEM;
if (MACHINE_IS_VM)
- rc = hypfs_vm_create_files(sb, root_dentry);
+ rc = hypfs_vm_create_files(root_dentry);
else
- rc = hypfs_diag_create_files(sb, root_dentry);
+ rc = hypfs_diag_create_files(root_dentry);
if (rc)
return rc;
- sbi->update_file = hypfs_create_update_file(sb, root_dentry);
+ sbi->update_file = hypfs_create_update_file(root_dentry);
if (IS_ERR(sbi->update_file))
return PTR_ERR(sbi->update_file);
hypfs_update_update(sb);
@@ -334,8 +333,7 @@ static void hypfs_kill_super(struct super_block *sb)
kill_litter_super(sb);
}
-static struct dentry *hypfs_create_file(struct super_block *sb,
- struct dentry *parent, const char *name,
+static struct dentry *hypfs_create_file(struct dentry *parent, const char *name,
char *data, umode_t mode)
{
struct dentry *dentry;
@@ -347,7 +345,7 @@ static struct dentry *hypfs_create_file(struct super_block *sb,
dentry = ERR_PTR(-ENOMEM);
goto fail;
}
- inode = hypfs_make_inode(sb, mode);
+ inode = hypfs_make_inode(parent->d_sb, mode);
if (!inode) {
dput(dentry);
dentry = ERR_PTR(-ENOMEM);
@@ -373,24 +371,22 @@ fail:
return dentry;
}
-struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent,
- const char *name)
+struct dentry *hypfs_mkdir(struct dentry *parent, const char *name)
{
struct dentry *dentry;
- dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE);
+ dentry = hypfs_create_file(parent, name, NULL, S_IFDIR | DIR_MODE);
if (IS_ERR(dentry))
return dentry;
hypfs_add_dentry(dentry);
return dentry;
}
-static struct dentry *hypfs_create_update_file(struct super_block *sb,
- struct dentry *dir)
+static struct dentry *hypfs_create_update_file(struct dentry *dir)
{
struct dentry *dentry;
- dentry = hypfs_create_file(sb, dir, "update", NULL,
+ dentry = hypfs_create_file(dir, "update", NULL,
S_IFREG | UPDATE_FILE_MODE);
/*
* We do not put the update file on the 'delete' list with
@@ -400,7 +396,7 @@ static struct dentry *hypfs_create_update_file(struct super_block *sb,
return dentry;
}
-struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
+struct dentry *hypfs_create_u64(struct dentry *dir,
const char *name, __u64 value)
{
char *buffer;
@@ -412,7 +408,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
if (!buffer)
return ERR_PTR(-ENOMEM);
dentry =
- hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
if (IS_ERR(dentry)) {
kfree(buffer);
return ERR_PTR(-ENOMEM);
@@ -421,7 +417,7 @@ struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir,
return dentry;
}
-struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
+struct dentry *hypfs_create_str(struct dentry *dir,
const char *name, char *string)
{
char *buffer;
@@ -432,7 +428,7 @@ struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir,
return ERR_PTR(-ENOMEM);
sprintf(buffer, "%s\n", string);
dentry =
- hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE);
+ hypfs_create_file(dir, name, buffer, S_IFREG | REG_FILE_MODE);
if (IS_ERR(dentry)) {
kfree(buffer);
return ERR_PTR(-ENOMEM);
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h
index 4066cee0c2d..4bbb5957ed1 100644
--- a/arch/s390/include/asm/airq.h
+++ b/arch/s390/include/asm/airq.h
@@ -9,6 +9,8 @@
#ifndef _ASM_S390_AIRQ_H
#define _ASM_S390_AIRQ_H
+#include <linux/bit_spinlock.h>
+
struct airq_struct {
struct hlist_node list; /* Handler queueing. */
void (*handler)(struct airq_struct *); /* Thin-interrupt handler */
@@ -23,4 +25,69 @@ struct airq_struct {
int register_adapter_interrupt(struct airq_struct *airq);
void unregister_adapter_interrupt(struct airq_struct *airq);
+/* Adapter interrupt bit vector */
+struct airq_iv {
+ unsigned long *vector; /* Adapter interrupt bit vector */
+ unsigned long *avail; /* Allocation bit mask for the bit vector */
+ unsigned long *bitlock; /* Lock bit mask for the bit vector */
+ unsigned long *ptr; /* Pointer associated with each bit */
+ unsigned int *data; /* 32 bit value associated with each bit */
+ unsigned long bits; /* Number of bits in the vector */
+ unsigned long end; /* Number of highest allocated bit + 1 */
+ spinlock_t lock; /* Lock to protect alloc & free */
+};
+
+#define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */
+#define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */
+#define AIRQ_IV_PTR 4 /* Allocate the ptr array */
+#define AIRQ_IV_DATA 8 /* Allocate the data array */
+
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags);
+void airq_iv_release(struct airq_iv *iv);
+unsigned long airq_iv_alloc_bit(struct airq_iv *iv);
+void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit);
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end);
+
+static inline unsigned long airq_iv_end(struct airq_iv *iv)
+{
+ return iv->end;
+}
+
+static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ bit_spin_lock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ bit_spin_unlock(bit ^ be_to_le, iv->bitlock);
+}
+
+static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit,
+ unsigned int data)
+{
+ iv->data[bit] = data;
+}
+
+static inline unsigned int airq_iv_get_data(struct airq_iv *iv,
+ unsigned long bit)
+{
+ return iv->data[bit];
+}
+
+static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit,
+ unsigned long ptr)
+{
+ iv->ptr[bit] = ptr;
+}
+
+static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv,
+ unsigned long bit)
+{
+ return iv->ptr[bit];
+}
+
#endif /* _ASM_S390_AIRQ_H */
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index 4d8604e311f..10135a38673 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" oc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
}
static inline void
@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" nc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
+ : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc");
}
static inline void
@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3);
asm volatile(
" xc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc");
}
static inline void
@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr;
asm volatile(
" oc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr;
asm volatile(
" nc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
+ : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7])
: "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
ch = *(unsigned char *) addr;
asm volatile(
" xc %O0(1,%R0),%1"
- : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
+ : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7])
: "cc", "memory");
return (ch >> (nr & 7)) & 1;
}
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr,
size -= offset;
p = addr + offset / BITS_PER_LONG;
if (bit) {
- set = __flo_word(0, *p & (~0UL << bit));
+ set = __flo_word(0, *p & (~0UL >> bit));
if (set >= size)
return size + offset;
if (set < BITS_PER_LONG)
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index ffb898961c8..d42625053c3 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1,
return 0;
}
+void channel_subsystem_reinit(void);
extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index d2ff41370c0..f65bd363451 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -13,9 +13,6 @@
#include <asm/div64.h>
-#define __ARCH_HAS_VTIME_ACCOUNT
-#define __ARCH_HAS_VTIME_TASK_SWITCH
-
/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
typedef unsigned long long __nocast cputime_t;
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index 0c82ba86e99..a908d2941c5 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -20,4 +20,9 @@
#define HARDIRQ_BITS 8
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+
#endif /* __ASM_HARDIRQ_H */
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h
index bd90359d6d2..11eae5f55b7 100644
--- a/arch/s390/include/asm/hugetlb.h
+++ b/arch/s390/include/asm/hugetlb.h
@@ -17,6 +17,9 @@
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);
+pte_t huge_ptep_get(pte_t *ptep);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
/*
* If the arch doesn't supply something else, assume that hugepage
@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file,
int arch_prepare_hugepage(struct page *page);
void arch_release_hugepage(struct page *page);
-static inline pte_t huge_pte_wrprotect(pte_t pte)
+static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
{
- pte_val(pte) |= _PAGE_RO;
- return pte;
+ pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY;
}
-static inline int huge_pte_none(pte_t pte)
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
{
- return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
- !(pte_val(pte) & _SEGMENT_ENTRY_RO);
+ huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
}
-static inline pte_t huge_ptep_get(pte_t *ptep)
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
{
- pte_t pte = *ptep;
- unsigned long mask;
-
- if (!MACHINE_HAS_HPAGE) {
- ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
- if (ptep) {
- mask = pte_val(pte) &
- (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
- pte = pte_mkhuge(*ptep);
- pte_val(pte) |= mask;
- }
+ int changed = !pte_same(huge_ptep_get(ptep), pte);
+ if (changed) {
+ huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
}
- return pte;
+ return changed;
}
-static inline void __pmd_csp(pmd_t *pmdp)
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
{
- register unsigned long reg2 asm("2") = pmd_val(*pmdp);
- register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
- _SEGMENT_ENTRY_INV;
- register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
-
- asm volatile(
- " csp %1,%3"
- : "=m" (*pmdp)
- : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+ pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
}
-static inline void huge_ptep_invalidate(struct mm_struct *mm,
- unsigned long address, pte_t *ptep)
-{
- pmd_t *pmdp = (pmd_t *) ptep;
-
- if (MACHINE_HAS_IDTE)
- __pmd_idte(address, pmdp);
- else
- __pmd_csp(pmdp);
- pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
-}
-
-static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep)
-{
- pte_t pte = huge_ptep_get(ptep);
-
- huge_ptep_invalidate(mm, addr, ptep);
- return pte;
-}
-
-#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
-({ \
- int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
- if (__changed) { \
- huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
- set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
- } \
- __changed; \
-})
-
-#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
-({ \
- pte_t __pte = huge_ptep_get(__ptep); \
- if (huge_pte_write(__pte)) { \
- huge_ptep_invalidate(__mm, __addr, __ptep); \
- set_huge_pte_at(__mm, __addr, __ptep, \
- huge_pte_wrprotect(__pte)); \
- } \
-})
-
-static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep)
+static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
{
- huge_ptep_invalidate(vma->vm_mm, address, ptep);
+ return mk_pte(page, pgprot);
}
-static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+static inline int huge_pte_none(pte_t pte)
{
- pte_t pte;
- pmd_t pmd;
-
- pmd = mk_pmd_phys(page_to_phys(page), pgprot);
- pte_val(pte) = pmd_val(pmd);
- return pte;
+ return pte_none(pte);
}
static inline int huge_pte_write(pte_t pte)
{
- pmd_t pmd;
-
- pmd_val(pmd) = pte_val(pte);
- return pmd_write(pmd);
+ return pte_write(pte);
}
static inline int huge_pte_dirty(pte_t pte)
{
- /* No dirty bit in the segment table entry. */
- return 0;
+ return pte_dirty(pte);
}
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- pmd_t pmd;
-
- pmd_val(pmd) = pte_val(pte);
- pte_val(pte) = pmd_val(pmd_mkwrite(pmd));
- return pte;
+ return pte_mkwrite(pte);
}
static inline pte_t huge_pte_mkdirty(pte_t pte)
{
- /* No dirty bit in the segment table entry. */
- return pte;
+ return pte_mkdirty(pte);
}
-static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
+static inline pte_t huge_pte_wrprotect(pte_t pte)
{
- pmd_t pmd;
-
- pmd_val(pmd) = pte_val(pte);
- pte_val(pte) = pmd_val(pmd_modify(pmd, newprot));
- return pte;
+ return pte_wrprotect(pte);
}
-static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep)
+static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
{
- pmd_clear((pmd_t *) ptep);
+ return pte_modify(pte, newprot);
}
#endif /* _ASM_S390_HUGETLB_H */
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h
index 7e3d2586c1f..ee96a8b697f 100644
--- a/arch/s390/include/asm/hw_irq.h
+++ b/arch/s390/include/asm/hw_irq.h
@@ -4,19 +4,8 @@
#include <linux/msi.h>
#include <linux/pci.h>
-static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
-{
- return __irq_get_msi_desc(irq);
-}
-
-/* Must be called with msi map lock held */
-static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
-{
- if (!msi)
- return -EINVAL;
-
- msi->irq = irq;
- return 0;
-}
+void __init init_airq_interrupts(void);
+void __init init_cio_interrupts(void);
+void __init init_ext_interrupts(void);
#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 87c17bfb296..1eaa3625803 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,17 +1,28 @@
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
+#define EXT_INTERRUPT 1
+#define IO_INTERRUPT 2
+#define THIN_INTERRUPT 3
+
+#define NR_IRQS_BASE 4
+
+#ifdef CONFIG_PCI_NR_MSI
+# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
+#else
+# define NR_IRQS NR_IRQS_BASE
+#endif
+
+/* This number is used when no interrupt has been assigned */
+#define NO_IRQ 0
+
+#ifndef __ASSEMBLY__
+
#include <linux/hardirq.h>
#include <linux/percpu.h>
#include <linux/cache.h>
#include <linux/types.h>
-enum interruption_main_class {
- EXTERNAL_INTERRUPT,
- IO_INTERRUPT,
- NR_IRQS
-};
-
enum interruption_class {
IRQEXT_CLK,
IRQEXT_EXC,
@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void);
void measurement_alert_subclass_register(void);
void measurement_alert_subclass_unregister(void);
-#ifdef CONFIG_LOCKDEP
-# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
-# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
- disable_irq_nosync(irq)
-# define disable_irq_lockdep(irq) disable_irq(irq)
-# define enable_irq_lockdep(irq) enable_irq(irq)
-# define enable_irq_lockdep_irqrestore(irq, flags) \
- enable_irq(irq)
-#endif
+#define irq_canonicalize(irq) (irq)
+
+#endif /* __ASSEMBLY__ */
#endif /* _ASM_IRQ_H */
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 3238d4004e8..e87ecaa2c56 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -274,6 +274,14 @@ struct kvm_arch{
int css_support;
};
+#define KVM_HVA_ERR_BAD (-1UL)
+#define KVM_HVA_ERR_RO_BAD (-2UL)
+
+static inline bool kvm_is_error_hva(unsigned long addr)
+{
+ return IS_ERR_VALUE(addr);
+}
+
extern int sie64a(struct kvm_s390_sie_block *, u64 *);
extern char sie_exit;
#endif
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index 6340178748b..ff132ac64dd 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -12,8 +12,6 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
- /* Cloned contexts will be created with extended page tables. */
- unsigned int alloc_pgste:1;
/* The mmu context has extended page tables. */
unsigned int has_pgste:1;
} mm_context_t;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 084e7755ed9..9f973d8de90 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -21,24 +21,7 @@ static inline int init_new_context(struct task_struct *tsk,
#ifdef CONFIG_64BIT
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
#endif
- if (current->mm && current->mm->context.alloc_pgste) {
- /*
- * alloc_pgste indicates, that any NEW context will be created
- * with extended page tables. The old context is unchanged. The
- * page table allocation and the page table operations will
- * look at has_pgste to distinguish normal and extended page
- * tables. The only way to create extended page tables is to
- * set alloc_pgste and then create a new context (e.g. dup_mm).
- * The page table allocation is called after init_new_context
- * and if has_pgste is set, it will create extended page
- * tables.
- */
- mm->context.has_pgste = 1;
- mm->context.alloc_pgste = 1;
- } else {
- mm->context.has_pgste = 0;
- mm->context.alloc_pgste = 0;
- }
+ mm->context.has_pgste = 0;
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
@@ -77,8 +60,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
atomic_inc(&next->context.attach_count);
/* Check for TLBs not flushed yet */
- if (next->context.flush_mm)
- __tlb_flush_mm(next);
+ __tlb_flush_mm_lazy(next);
}
#define enter_lazy_tlb(mm,tsk) do { } while (0)
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 5d64fb7619c..1e51f2915b2 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -32,16 +32,6 @@
void storage_key_init_range(unsigned long start, unsigned long end);
-static inline unsigned long pfmf(unsigned long function, unsigned long address)
-{
- asm volatile(
- " .insn rre,0xb9af0000,%[function],%[address]"
- : [address] "+a" (address)
- : [function] "d" (function)
- : "memory");
- return address;
-}
-
static inline void clear_page(void *page)
{
register unsigned long reg1 asm ("1") = 0;
@@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr)
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
-/*
- * Test and clear referenced bit in storage key.
- */
-#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-static inline int page_test_and_clear_young(unsigned long pfn)
-{
- return page_reset_referenced(pfn << PAGE_SHIFT);
-}
-
struct page;
void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order);
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index 6e577ba0e5d..c290f13d1c4 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -6,6 +6,7 @@
/* must be set before including pci_clp.h */
#define PCI_BAR_COUNT 6
+#include <linux/pci.h>
#include <asm-generic/pci.h>
#include <asm-generic/pci-dma-compat.h>
#include <asm/pci_clp.h>
@@ -53,14 +54,9 @@ struct zpci_fmb {
atomic64_t unmapped_pages;
} __packed __aligned(16);
-struct msi_map {
- unsigned long irq;
- struct msi_desc *msi;
- struct hlist_node msi_chain;
-};
-
-#define ZPCI_NR_MSI_VECS 64
-#define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1)
+#define ZPCI_MSI_VEC_BITS 11
+#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
+#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
enum zpci_state {
ZPCI_FN_STATE_RESERVED,
@@ -91,8 +87,7 @@ struct zpci_dev {
/* IRQ stuff */
u64 msi_addr; /* MSI address */
- struct zdev_irq_map *irq_map;
- struct msi_map *msi_map[ZPCI_NR_MSI_VECS];
+ struct airq_iv *aibv; /* adapter interrupt bit vector */
unsigned int aisb; /* number of the summary bit */
/* DMA stuff */
@@ -122,11 +117,6 @@ struct zpci_dev {
struct dentry *debugfs_perf;
};
-struct pci_hp_callback_ops {
- int (*create_slot) (struct zpci_dev *zdev);
- void (*remove_slot) (struct zpci_dev *zdev);
-};
-
static inline bool zdev_enabled(struct zpci_dev *zdev)
{
return (zdev->fh & (1UL << 31)) ? true : false;
@@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64);
int zpci_unregister_ioat(struct zpci_dev *, u8);
/* CLP */
-int clp_find_pci_devices(void);
+int clp_scan_pci_devices(void);
+int clp_rescan_pci_devices(void);
+int clp_rescan_pci_devices_simple(void);
int clp_add_pci_device(u32, u32, int);
int clp_enable_fh(struct zpci_dev *, u8);
int clp_disable_fh(struct zpci_dev *);
-/* MSI */
-struct msi_desc *__irq_get_msi_desc(unsigned int);
-int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
-int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
-void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
-int zpci_msihash_init(void);
-void zpci_msihash_exit(void);
-
#ifdef CONFIG_PCI
/* Error handling and recovery */
void zpci_event_error(void *);
void zpci_event_availability(void *);
+void zpci_rescan(void);
#else /* CONFIG_PCI */
static inline void zpci_event_error(void *e) {}
static inline void zpci_event_availability(void *e) {}
+static inline void zpci_rescan(void) {}
#endif /* CONFIG_PCI */
+#ifdef CONFIG_HOTPLUG_PCI_S390
+int zpci_init_slot(struct zpci_dev *);
+void zpci_exit_slot(struct zpci_dev *);
+#else /* CONFIG_HOTPLUG_PCI_S390 */
+static inline int zpci_init_slot(struct zpci_dev *zdev)
+{
+ return 0;
+}
+static inline void zpci_exit_slot(struct zpci_dev *zdev) {}
+#endif /* CONFIG_HOTPLUG_PCI_S390 */
+
/* Helpers */
struct zpci_dev *get_zdev(struct pci_dev *);
struct zpci_dev *get_zdev_by_fid(u32);
-bool zpci_fid_present(u32);
/* sysfs */
int zpci_sysfs_add_device(struct device *);
@@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *);
int zpci_dma_init(void);
void zpci_dma_exit(void);
-/* Hotplug */
-extern struct mutex zpci_list_lock;
-extern struct list_head zpci_list;
-extern unsigned int s390_pci_probe;
-
-void zpci_register_hp_ops(struct pci_hp_callback_ops *);
-void zpci_deregister_hp_ops(void);
-
/* FMB */
int zpci_fmb_enable_device(struct zpci_dev *);
int zpci_fmb_disable_device(struct zpci_dev *);
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h
index e6a2bdd4d70..df6eac9f0cb 100644
--- a/arch/s390/include/asm/pci_insn.h
+++ b/arch/s390/include/asm/pci_insn.h
@@ -79,11 +79,11 @@ struct zpci_fib {
} __packed;
-int s390pci_mod_fc(u64 req, struct zpci_fib *fib);
-int s390pci_refresh_trans(u64 fn, u64 addr, u64 range);
-int s390pci_load(u64 *data, u64 req, u64 offset);
-int s390pci_store(u64 data, u64 req, u64 offset);
-int s390pci_store_block(const u64 *data, u64 req, u64 offset);
-void set_irq_ctrl(u16 ctl, char *unused, u8 isc);
+int zpci_mod_fc(u64 req, struct zpci_fib *fib);
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range);
+int zpci_load(u64 *data, u64 req, u64 offset);
+int zpci_store(u64 data, u64 req, u64 offset);
+int zpci_store_block(const u64 *data, u64 req, u64 offset);
+void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc);
#endif
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index 83a9caa6ae5..d194d544d69 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \
u64 data; \
int rc; \
\
- rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \
+ rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
if (rc) \
data = -1ULL; \
return (RETTYPE) data; \
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \
u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
u64 data = (VALTYPE) val; \
\
- s390pci_store(data, req, ZPCI_OFFSET(addr)); \
+ zpci_store(data, req, ZPCI_OFFSET(addr)); \
}
zpci_read(8, u64)
@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len
val = 0; /* let FW report error */
break;
}
- return s390pci_store(val, req, offset);
+ return zpci_store(val, req, offset);
}
static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len)
u64 data;
int cc;
- cc = s390pci_load(&data, req, offset);
+ cc = zpci_load(&data, req, offset);
if (cc)
goto out;
@@ -115,7 +115,7 @@ out:
static inline int zpci_write_block(u64 req, const u64 *data, u64 offset)
{
- return s390pci_store_block(data, req, offset);
+ return zpci_store_block(data, req, offset);
}
static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 75fb726de91..9b60a36c348 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -217,63 +217,57 @@ extern unsigned long MODULES_END;
/* Hardware bits in the page table entry */
#define _PAGE_CO 0x100 /* HW Change-bit override */
-#define _PAGE_RO 0x200 /* HW read-only bit */
+#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
+#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
/* Software bits in the page table entry */
-#define _PAGE_SWT 0x001 /* SW pte type bit t */
-#define _PAGE_SWX 0x002 /* SW pte type bit x */
-#define _PAGE_SWC 0x004 /* SW pte changed bit */
-#define _PAGE_SWR 0x008 /* SW pte referenced bit */
-#define _PAGE_SWW 0x010 /* SW pte write bit */
-#define _PAGE_SPECIAL 0x020 /* SW associated with special page */
+#define _PAGE_PRESENT 0x001 /* SW pte present bit */
+#define _PAGE_TYPE 0x002 /* SW pte type bit */
+#define _PAGE_YOUNG 0x004 /* SW pte young bit */
+#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
+#define _PAGE_READ 0x010 /* SW pte read bit */
+#define _PAGE_WRITE 0x020 /* SW pte write bit */
+#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
#define __HAVE_ARCH_PTE_SPECIAL
/* Set of bits not changed in pte_modify */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
- _PAGE_SWC | _PAGE_SWR)
-
-/* Six different types of pages. */
-#define _PAGE_TYPE_EMPTY 0x400
-#define _PAGE_TYPE_NONE 0x401
-#define _PAGE_TYPE_SWAP 0x403
-#define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
-#define _PAGE_TYPE_RO 0x200
-#define _PAGE_TYPE_RW 0x000
-
-/*
- * Only four types for huge pages, using the invalid bit and protection bit
- * of a segment table entry.
- */
-#define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
-#define _HPAGE_TYPE_NONE 0x220
-#define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
-#define _HPAGE_TYPE_RW 0x000
+ _PAGE_DIRTY | _PAGE_YOUNG)
/*
- * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
- * pte_none and pte_file to find out the pte type WITHOUT holding the page
- * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
- * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
- * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
- * This change is done while holding the lock, but the intermediate step
- * of a previously valid pte with the hw invalid bit set can be observed by
- * handle_pte_fault. That makes it necessary that all valid pte types with
- * the hw invalid bit set must be distinguishable from the four pte types
- * empty, none, swap and file.
+ * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
+ * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
+ * is used to distinguish present from not-present ptes. It is changed only
+ * with the page table lock held.
+ *
+ * The following table gives the different possible bit combinations for
+ * the pte hardware and software bits in the last 12 bits of a pte:
*
- * irxt ipte irxt
- * _PAGE_TYPE_EMPTY 1000 -> 1000
- * _PAGE_TYPE_NONE 1001 -> 1001
- * _PAGE_TYPE_SWAP 1011 -> 1011
- * _PAGE_TYPE_FILE 11?1 -> 11?1
- * _PAGE_TYPE_RO 0100 -> 1100
- * _PAGE_TYPE_RW 0000 -> 1000
+ * 842100000000
+ * 000084210000
+ * 000000008421
+ * .IR...wrdytp
+ * empty .10...000000
+ * swap .10...xxxx10
+ * file .11...xxxxx0
+ * prot-none, clean, old .11...000001
+ * prot-none, clean, young .11...000101
+ * prot-none, dirty, old .10...001001
+ * prot-none, dirty, young .10...001101
+ * read-only, clean, old .11...010001
+ * read-only, clean, young .01...010101
+ * read-only, dirty, old .11...011001
+ * read-only, dirty, young .01...011101
+ * read-write, clean, old .11...110001
+ * read-write, clean, young .01...110101
+ * read-write, dirty, old .10...111001
+ * read-write, dirty, young .00...111101
*
- * pte_none is true for bits combinations 1000, 1010, 1100, 1110
- * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
- * pte_file is true for bits combinations 1101, 1111
- * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
+ * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
+ * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
+ * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
+ * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
*/
#ifndef CONFIG_64BIT
@@ -286,14 +280,25 @@ extern unsigned long MODULES_END;
#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
-#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
+#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
+
+/*
+ * Segment table entry encoding (I = invalid, R = read-only bit):
+ * ..R...I.....
+ * prot-none ..1...1.....
+ * read-only ..1...0.....
+ * read-write ..0...0.....
+ * empty ..0...1.....
+ */
/* Page status table bits for virtualization */
#define PGSTE_ACC_BITS 0xf0000000UL
@@ -303,9 +308,7 @@ extern unsigned long MODULES_END;
#define PGSTE_HC_BIT 0x00200000UL
#define PGSTE_GR_BIT 0x00040000UL
#define PGSTE_GC_BIT 0x00020000UL
-#define PGSTE_UR_BIT 0x00008000UL
-#define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */
+#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */
#else /* CONFIG_64BIT */
@@ -324,8 +327,8 @@ extern unsigned long MODULES_END;
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
-#define _REGION_ENTRY_RO 0x200 /* region protection bit */
-#define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
+#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
+#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
@@ -333,29 +336,47 @@ extern unsigned long MODULES_END;
#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
-#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
+#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
-#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
+#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
-#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
+#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
+#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
+#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
-#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
-#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
+#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
+#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0)
-#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
+#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
+#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
+#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */
+#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG
+
+/*
+ * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
+ * ..R...I...y.
+ * prot-none, old ..0...1...1.
+ * prot-none, young ..1...1...1.
+ * read-only, old ..1...1...0.
+ * read-only, young ..1...0...1.
+ * read-write, old ..0...1...0.
+ * read-write, young ..0...0...1.
+ * The segment table origin is used to distinguish empty (origin==0) from
+ * read-write, old segment table entries (origin!=0)
+ */
+
#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
-#define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
/* Set of bits not changed in pmd_modify */
#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
@@ -369,9 +390,7 @@ extern unsigned long MODULES_END;
#define PGSTE_HC_BIT 0x0020000000000000UL
#define PGSTE_GR_BIT 0x0004000000000000UL
#define PGSTE_GC_BIT 0x0002000000000000UL
-#define PGSTE_UR_BIT 0x0000800000000000UL
-#define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */
-#define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
+#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
#endif /* CONFIG_64BIT */
@@ -386,14 +405,18 @@ extern unsigned long MODULES_END;
/*
* Page protection definitions.
*/
-#define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
-#define PAGE_RO __pgprot(_PAGE_TYPE_RO)
-#define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
-#define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
-
-#define PAGE_KERNEL PAGE_RWC
-#define PAGE_SHARED PAGE_KERNEL
-#define PAGE_COPY PAGE_RO
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
+#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_INVALID | _PAGE_PROTECT)
+
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+ _PAGE_YOUNG | _PAGE_DIRTY)
+#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
+ _PAGE_PROTECT)
/*
* On s390 the page table entry has an invalid bit and a read-only bit.
@@ -402,35 +425,31 @@ extern unsigned long MODULES_END;
*/
/*xwr*/
#define __P000 PAGE_NONE
-#define __P001 PAGE_RO
-#define __P010 PAGE_RO
-#define __P011 PAGE_RO
-#define __P100 PAGE_RO
-#define __P101 PAGE_RO
-#define __P110 PAGE_RO
-#define __P111 PAGE_RO
+#define __P001 PAGE_READ
+#define __P010 PAGE_READ
+#define __P011 PAGE_READ
+#define __P100 PAGE_READ
+#define __P101 PAGE_READ
+#define __P110 PAGE_READ
+#define __P111 PAGE_READ
#define __S000 PAGE_NONE
-#define __S001 PAGE_RO
-#define __S010 PAGE_RW
-#define __S011 PAGE_RW
-#define __S100 PAGE_RO
-#define __S101 PAGE_RO
-#define __S110 PAGE_RW
-#define __S111 PAGE_RW
+#define __S001 PAGE_READ
+#define __S010 PAGE_WRITE
+#define __S011 PAGE_WRITE
+#define __S100 PAGE_READ
+#define __S101 PAGE_READ
+#define __S110 PAGE_WRITE
+#define __S111 PAGE_WRITE
/*
* Segment entry (large page) protection definitions.
*/
-#define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
-#define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
-#define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
-
-static inline int mm_exclusive(struct mm_struct *mm)
-{
- return likely(mm == current->active_mm &&
- atomic_read(&mm->context.attach_count) <= 1);
-}
+#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
+ _SEGMENT_ENTRY_NONE)
+#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
+ _SEGMENT_ENTRY_PROTECT)
+#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@@ -467,7 +486,7 @@ static inline int pgd_none(pgd_t pgd)
{
if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
return 0;
- return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
+ return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
}
static inline int pgd_bad(pgd_t pgd)
@@ -478,7 +497,7 @@ static inline int pgd_bad(pgd_t pgd)
* invalid for either table entry.
*/
unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pgd_val(pgd) & mask) != 0;
}
@@ -494,7 +513,7 @@ static inline int pud_none(pud_t pud)
{
if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
return 0;
- return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
+ return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
}
static inline int pud_large(pud_t pud)
@@ -512,7 +531,7 @@ static inline int pud_bad(pud_t pud)
* invalid for either table entry.
*/
unsigned long mask =
- ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
+ ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
return (pud_val(pud) & mask) != 0;
}
@@ -521,30 +540,36 @@ static inline int pud_bad(pud_t pud)
static inline int pmd_present(pmd_t pmd)
{
- unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
- return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
- !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
+ return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
}
static inline int pmd_none(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
- !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
+ return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
}
static inline int pmd_large(pmd_t pmd)
{
#ifdef CONFIG_64BIT
- return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
#else
return 0;
#endif
}
+static inline int pmd_prot_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) &&
+ (pmd_val(pmd) & _SEGMENT_ENTRY_NONE);
+}
+
static inline int pmd_bad(pmd_t pmd)
{
- unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
- return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
+#ifdef CONFIG_64BIT
+ if (pmd_large(pmd))
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
+#endif
+ return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
@@ -563,31 +588,40 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
- return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
+ if (pmd_prot_none(pmd))
+ return 0;
+ return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
}
static inline int pmd_young(pmd_t pmd)
{
- return 0;
+ int young = 0;
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd))
+ young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
+ else
+ young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
+#endif
+ return young;
}
-static inline int pte_none(pte_t pte)
+static inline int pte_present(pte_t pte)
{
- return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
+ /* Bit pattern: (pte & 0x001) == 0x001 */
+ return (pte_val(pte) & _PAGE_PRESENT) != 0;
}
-static inline int pte_present(pte_t pte)
+static inline int pte_none(pte_t pte)
{
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
- return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
- (!(pte_val(pte) & _PAGE_INVALID) &&
- !(pte_val(pte) & _PAGE_SWT));
+ /* Bit pattern: pte == 0x400 */
+ return pte_val(pte) == _PAGE_INVALID;
}
static inline int pte_file(pte_t pte)
{
- unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
- return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
+ /* Bit pattern: (pte & 0x601) == 0x600 */
+ return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
+ == (_PAGE_INVALID | _PAGE_PROTECT);
}
static inline int pte_special(pte_t pte)
@@ -634,6 +668,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
#endif
}
+static inline pgste_t pgste_get(pte_t *ptep)
+{
+ unsigned long pgste = 0;
+#ifdef CONFIG_PGSTE
+ pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
+#endif
+ return __pgste(pgste);
+}
+
static inline void pgste_set(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
@@ -644,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste)
static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- unsigned long address, bits;
- unsigned char skey;
+ unsigned long address, bits, skey;
if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
address = pte_val(*ptep) & PAGE_MASK;
- skey = page_get_storage_key(address);
+ skey = (unsigned long) page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
- /* Clear page changed & referenced bit in the storage key */
- if (bits & _PAGE_CHANGED)
+ if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) {
+ /* Transfer dirty + referenced bit to host bits in pgste */
+ pgste_val(pgste) |= bits << 52;
page_set_storage_key(address, skey ^ bits, 0);
- else if (bits)
+ } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) &&
+ (bits & _PAGE_REFERENCED)) {
+ /* Transfer referenced bit to host bit in pgste */
+ pgste_val(pgste) |= PGSTE_HR_BIT;
page_reset_referenced(address);
+ }
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
- /* Get host changed & referenced bits from pgste */
- bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52;
- /* Transfer page changed & referenced bit to kvm user bits */
- pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
- /* Clear relevant host bits in pgste. */
- pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT);
- pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
/* Copy page access key and fetch protection bit to pgste */
- pgste_val(pgste) |=
- (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
- /* Transfer referenced bit to pte */
- pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
+ pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
+ pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
#endif
return pgste;
@@ -679,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
{
#ifdef CONFIG_PGSTE
- int young;
-
if (pte_val(*ptep) & _PAGE_INVALID)
return pgste;
/* Get referenced bit from storage key */
- young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
- if (young)
- pgste_val(pgste) |= PGSTE_GR_BIT;
- /* Get host referenced bit from pgste */
- if (pgste_val(pgste) & PGSTE_HR_BIT) {
- pgste_val(pgste) &= ~PGSTE_HR_BIT;
- young = 1;
- }
- /* Transfer referenced bit to kvm user bits and pte */
- if (young) {
- pgste_val(pgste) |= PGSTE_UR_BIT;
- pte_val(*ptep) |= _PAGE_SWR;
- }
+ if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
+ pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT;
#endif
return pgste;
}
@@ -723,13 +748,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
{
- if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
+ if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) {
/*
* Without enhanced suppression-on-protection force
* the dirty bit on for all writable ptes.
*/
- pte_val(entry) |= _PAGE_SWC;
- pte_val(entry) &= ~_PAGE_RO;
+ pte_val(entry) |= _PAGE_DIRTY;
+ pte_val(entry) &= ~_PAGE_PROTECT;
}
*ptep = entry;
}
@@ -841,21 +866,17 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
*/
static inline int pte_write(pte_t pte)
{
- return (pte_val(pte) & _PAGE_SWW) != 0;
+ return (pte_val(pte) & _PAGE_WRITE) != 0;
}
static inline int pte_dirty(pte_t pte)
{
- return (pte_val(pte) & _PAGE_SWC) != 0;
+ return (pte_val(pte) & _PAGE_DIRTY) != 0;
}
static inline int pte_young(pte_t pte)
{
-#ifdef CONFIG_PGSTE
- if (pte_val(pte) & _PAGE_SWR)
- return 1;
-#endif
- return 0;
+ return (pte_val(pte) & _PAGE_YOUNG) != 0;
}
/*
@@ -880,12 +901,12 @@ static inline void pud_clear(pud_t *pud)
static inline void pmd_clear(pmd_t *pmdp)
{
- pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
}
/*
@@ -896,55 +917,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
- if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
- pte_val(pte) &= ~_PAGE_RO;
+ /*
+ * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
+ * invalid bit set, clear it again for readable, young pages
+ */
+ if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
+ pte_val(pte) &= ~_PAGE_INVALID;
+ /*
+ * newprot for PAGE_READ and PAGE_WRITE has the page protection
+ * bit set, clear it again for writable, dirty pages
+ */
+ if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte_val(pte) &= ~_PAGE_SWW;
- /* Do not clobber _PAGE_TYPE_NONE pages! */
- if (!(pte_val(pte) & _PAGE_INVALID))
- pte_val(pte) |= _PAGE_RO;
+ pte_val(pte) &= ~_PAGE_WRITE;
+ pte_val(pte) |= _PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkwrite(pte_t pte)
{
- pte_val(pte) |= _PAGE_SWW;
- if (pte_val(pte) & _PAGE_SWC)
- pte_val(pte) &= ~_PAGE_RO;
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pte_val(pte) & _PAGE_DIRTY)
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkclean(pte_t pte)
{
- pte_val(pte) &= ~_PAGE_SWC;
- /* Do not clobber _PAGE_TYPE_NONE pages! */
- if (!(pte_val(pte) & _PAGE_INVALID))
- pte_val(pte) |= _PAGE_RO;
+ pte_val(pte) &= ~_PAGE_DIRTY;
+ pte_val(pte) |= _PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
- pte_val(pte) |= _PAGE_SWC;
- if (pte_val(pte) & _PAGE_SWW)
- pte_val(pte) &= ~_PAGE_RO;
+ pte_val(pte) |= _PAGE_DIRTY;
+ if (pte_val(pte) & _PAGE_WRITE)
+ pte_val(pte) &= ~_PAGE_PROTECT;
return pte;
}
static inline pte_t pte_mkold(pte_t pte)
{
-#ifdef CONFIG_PGSTE
- pte_val(pte) &= ~_PAGE_SWR;
-#endif
+ pte_val(pte) &= ~_PAGE_YOUNG;
+ pte_val(pte) |= _PAGE_INVALID;
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
+ pte_val(pte) |= _PAGE_YOUNG;
+ if (pte_val(pte) & _PAGE_READ)
+ pte_val(pte) &= ~_PAGE_INVALID;
return pte;
}
@@ -957,7 +986,7 @@ static inline pte_t pte_mkspecial(pte_t pte)
#ifdef CONFIG_HUGETLB_PAGE
static inline pte_t pte_mkhuge(pte_t pte)
{
- pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+ pte_val(pte) |= _PAGE_LARGE;
return pte;
}
#endif
@@ -974,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_all(ptep, pgste);
- dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
- pgste_val(pgste) &= ~PGSTE_UC_BIT;
+ dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
+ pgste_val(pgste) &= ~PGSTE_HC_BIT;
pgste_set_unlock(ptep, pgste);
return dirty;
}
@@ -994,59 +1023,75 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_update_young(ptep, pgste);
- young = !!(pgste_val(pgste) & PGSTE_UR_BIT);
- pgste_val(pgste) &= ~PGSTE_UR_BIT;
+ young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
+ pgste_val(pgste) &= ~PGSTE_HR_BIT;
pgste_set_unlock(ptep, pgste);
}
return young;
}
+static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
+{
+ if (!(pte_val(*ptep) & _PAGE_INVALID)) {
+#ifndef CONFIG_64BIT
+ /* pto must point to the start of the segment table */
+ pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
+#else
+ /* ipte in zarch mode can do the math */
+ pte_t *pto = ptep;
+#endif
+ asm volatile(
+ " ipte %2,%3"
+ : "=m" (*ptep) : "m" (*ptep),
+ "a" (pto), "a" (address));
+ }
+}
+
+static inline void ptep_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ int active = (mm == current->active_mm) ? 1 : 0;
+
+ if (atomic_read(&mm->context.attach_count) > active)
+ __ptep_ipte(address, ptep);
+ else
+ mm->context.flush_mm = 1;
+}
+
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pgste_t pgste;
pte_t pte;
+ int young;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_get_lock(ptep);
- pgste = pgste_update_young(ptep, pgste);
- pte = *ptep;
- *ptep = pte_mkold(pte);
- pgste_set_unlock(ptep, pgste);
- return pte_young(pte);
+ pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste);
}
- return 0;
+
+ pte = *ptep;
+ __ptep_ipte(addr, ptep);
+ young = pte_young(pte);
+ pte = pte_mkold(pte);
+
+ if (mm_has_pgste(vma->vm_mm)) {
+ pgste_set_pte(ptep, pte);
+ pgste_set_unlock(ptep, pgste);
+ } else
+ *ptep = pte;
+
+ return young;
}
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
- /* No need to flush TLB
- * On s390 reference bits are in storage key and never in TLB
- * With virtualization we handle the reference bit, without we
- * we can simply return */
return ptep_test_and_clear_young(vma, address, ptep);
}
-static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
-{
- if (!(pte_val(*ptep) & _PAGE_INVALID)) {
-#ifndef CONFIG_64BIT
- /* pto must point to the start of the segment table */
- pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
-#else
- /* ipte in zarch mode can do the math */
- pte_t *pto = ptep;
-#endif
- asm volatile(
- " ipte %2,%3"
- : "=m" (*ptep) : "m" (*ptep),
- "a" (pto), "a" (address));
- }
-}
-
/*
* This is hard to understand. ptep_get_and_clear and ptep_clear_flush
* both clear the TLB for the unmapped pte. The reason is that
@@ -1067,16 +1112,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1093,15 +1136,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) |= _PAGE_INVALID;
if (mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1117,7 +1159,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
pgste_t pgste;
if (mm_has_pgste(mm)) {
- pgste = *(pgste_t *)(ptep + PTRS_PER_PTE);
+ pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte);
pgste_set_pte(ptep, pte);
pgste_set_unlock(ptep, pgste);
@@ -1139,7 +1181,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
pte = *ptep;
__ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
if (mm_has_pgste(vma->vm_mm)) {
pgste = pgste_update_all(&pte, pgste);
@@ -1163,18 +1205,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
pgste_t pgste;
pte_t pte;
- if (mm_has_pgste(mm)) {
+ if (!full && mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
- if (!full)
- pgste = pgste_ipte_notify(mm, address, ptep, pgste);
+ pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
pte = *ptep;
if (!full)
- __ptep_ipte(address, ptep);
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ ptep_flush_lazy(mm, address, ptep);
+ pte_val(*ptep) = _PAGE_INVALID;
- if (mm_has_pgste(mm)) {
+ if (!full && mm_has_pgste(mm)) {
pgste = pgste_update_all(&pte, pgste);
pgste_set_unlock(ptep, pgste);
}
@@ -1189,14 +1230,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
pte_t pte = *ptep;
if (pte_write(pte)) {
- mm->context.flush_mm = 1;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste = pgste_ipte_notify(mm, address, ptep, pgste);
}
- if (!mm_exclusive(mm))
- __ptep_ipte(address, ptep);
+ ptep_flush_lazy(mm, address, ptep);
pte = pte_wrprotect(pte);
if (mm_has_pgste(mm)) {
@@ -1240,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
- return __pte;
+ return pte_mkyoung(__pte);
}
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
@@ -1248,10 +1287,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
unsigned long physpage = page_to_phys(page);
pte_t __pte = mk_pte_phys(physpage, pgprot);
- if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
- pte_val(__pte) |= _PAGE_SWC;
- pte_val(__pte) &= ~_PAGE_RO;
- }
+ if (pte_write(__pte) && PageDirty(page))
+ __pte = pte_mkdirty(__pte);
return __pte;
}
@@ -1313,7 +1350,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
unsigned long sto = (unsigned long) pmdp -
pmd_index(address) * sizeof(pmd_t);
- if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
+ if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
asm volatile(
" .insn rrf,0xb98e0000,%2,%3,0,0"
: "=m" (*pmdp)
@@ -1324,24 +1361,68 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
}
}
+static inline void __pmd_csp(pmd_t *pmdp)
+{
+ register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+ register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+ _SEGMENT_ENTRY_INVALID;
+ register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+ asm volatile(
+ " csp %1,%3"
+ : "=m" (*pmdp)
+ : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+}
+
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
/*
- * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
+ * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
* Convert to segment table entry format.
*/
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE);
- if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
- return pgprot_val(SEGMENT_RO);
- return pgprot_val(SEGMENT_RW);
+ if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
+ return pgprot_val(SEGMENT_READ);
+ return pgprot_val(SEGMENT_WRITE);
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd)) {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ } else {
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
+ }
+#endif
+ return pmd;
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+#ifdef CONFIG_64BIT
+ if (pmd_prot_none(pmd)) {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
+ } else {
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ }
+#endif
+ return pmd;
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
+ int young;
+
+ young = pmd_young(pmd);
pmd_val(pmd) &= _SEGMENT_CHG_MASK;
pmd_val(pmd) |= massage_pgprot_pmd(newprot);
+ if (young)
+ pmd = pmd_mkyoung(pmd);
return pmd;
}
@@ -1349,18 +1430,29 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
{
pmd_t __pmd;
pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
- return __pmd;
+ return pmd_mkyoung(__pmd);
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
- /* Do not clobber _HPAGE_TYPE_NONE pages! */
- if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
- pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
+ /* Do not clobber PROT_NONE segments! */
+ if (!pmd_prot_none(pmd))
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+static inline void pmdp_flush_lazy(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ int active = (mm == current->active_mm) ? 1 : 0;
+
+ if ((atomic_read(&mm->context.attach_count) & 0xffff) > active)
+ __pmd_idte(address, pmdp);
+ else
+ mm->context.flush_mm = 1;
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
#define __HAVE_ARCH_PGTABLE_DEPOSIT
@@ -1378,7 +1470,7 @@ static inline int pmd_trans_splitting(pmd_t pmd)
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
- if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
+ if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
pmd_val(entry) |= _SEGMENT_ENTRY_CO;
*pmdp = entry;
}
@@ -1391,7 +1483,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd)
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
- pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
+ /* Do not clobber PROT_NONE segments! */
+ if (!pmd_prot_none(pmd))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
return pmd;
}
@@ -1401,50 +1495,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd)
return pmd;
}
-static inline pmd_t pmd_mkold(pmd_t pmd)
-{
- /* No referenced bit in the segment table entry. */
- return pmd;
-}
-
-static inline pmd_t pmd_mkyoung(pmd_t pmd)
-{
- /* No referenced bit in the segment table entry. */
- return pmd;
-}
-
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
- unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
- long tmp, rc;
- int counter;
+ pmd_t pmd;
- rc = 0;
- if (MACHINE_HAS_RRBM) {
- counter = PTRS_PER_PTE >> 6;
- asm volatile(
- "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
- " ogr %1,%0\n"
- " la %3,0(%4,%3)\n"
- " brct %2,0b\n"
- : "=&d" (tmp), "+&d" (rc), "+d" (counter),
- "+a" (pmd_addr)
- : "a" (64 * 4096UL) : "cc");
- rc = !!rc;
- } else {
- counter = PTRS_PER_PTE;
- asm volatile(
- "0: rrbe 0,%2\n"
- " la %2,0(%3,%2)\n"
- " brc 12,1f\n"
- " lhi %0,1\n"
- "1: brct %1,0b\n"
- : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
- : "a" (4096UL) : "cc");
- }
- return rc;
+ pmd = *pmdp;
+ __pmd_idte(address, pmdp);
+ *pmdp = pmd_mkold(pmd);
+ return pmd_young(pmd);
}
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
@@ -1510,10 +1570,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bit 21 and bit 22 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 30 and 31 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
+ * Bits 21, 22, 30 and 31 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* This leaves the bits 1-19 and bits 24-29 to store type and offset.
* We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
* plus 24 for the offset.
@@ -1527,10 +1585,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bit 53 and bit 54 are the page invalid bit and the page protection
- * bit. We set both to indicate a swapped page.
- * Bit 62 and 63 are used to distinguish the different page types. For
- * a swapped page these bits need to be zero.
+ * Bits 53, 54, 62 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
* This leaves the bits 0-51 and bits 56-61 to store type and offset.
* We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
* plus 56 for the offset.
@@ -1547,7 +1603,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
offset &= __SWP_OFFSET_MASK;
- pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
+ pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
return pte;
}
@@ -1570,7 +1626,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
#define pgoff_to_pte(__off) \
((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
- | _PAGE_TYPE_FILE })
+ | _PAGE_INVALID | _PAGE_PROTECT })
#endif /* !__ASSEMBLY__ */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index b0e6435b2f0..0eb37505cab 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -43,6 +43,7 @@ extern void execve_tail(void);
#ifndef CONFIG_64BIT
#define TASK_SIZE (1UL << 31)
+#define TASK_MAX_SIZE (1UL << 31)
#define TASK_UNMAPPED_BASE (1UL << 30)
#else /* CONFIG_64BIT */
@@ -51,6 +52,7 @@ extern void execve_tail(void);
#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
(1UL << 30) : (1UL << 41))
#define TASK_SIZE TASK_SIZE_OF(current)
+#define TASK_MAX_SIZE (1UL << 53)
#endif /* CONFIG_64BIT */
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h
new file mode 100644
index 00000000000..5b3e48ef534
--- /dev/null
+++ b/arch/s390/include/asm/serial.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_S390_SERIAL_H
+#define _ASM_S390_SERIAL_H
+
+#define BASE_BAUD 0
+
+#endif /* _ASM_S390_SERIAL_H */
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 80b6f11263c..6dbd559763c 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -8,6 +8,7 @@
#define __ASM_SWITCH_TO_H
#include <linux/thread_info.h>
+#include <asm/ptrace.h>
extern struct task_struct *__switch_to(void *, void *);
extern void update_cr_regs(struct task_struct *task);
@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
static inline void save_access_regs(unsigned int *acrs)
{
- asm volatile("stam 0,15,%0" : "=Q" (*acrs));
+ typedef struct { int _[NUM_ACRS]; } acrstype;
+
+ asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs));
}
static inline void restore_access_regs(unsigned int *acrs)
{
- asm volatile("lam 0,15,%0" : : "Q" (*acrs));
+ typedef struct { int _[NUM_ACRS]; } acrstype;
+
+ asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs));
}
#define switch_to(prev,next,last) do { \
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index b75d7d68668..2cb846c4b37 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -32,6 +32,7 @@ struct mmu_gather {
struct mm_struct *mm;
struct mmu_table_batch *batch;
unsigned int fullmm;
+ unsigned long start, end;
};
struct mmu_table_batch {
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
struct mm_struct *mm,
- unsigned int full_mm_flush)
+ unsigned long start,
+ unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
if (tlb->fullmm)
__tlb_flush_mm(mm);
@@ -59,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
{
+ __tlb_flush_mm_lazy(tlb->mm);
tlb_table_flush(tlb);
}
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
unsigned long start, unsigned long end)
{
- tlb_table_flush(tlb);
+ tlb_flush_mmu(tlb);
}
/*
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 6b32af30878..f9fef0425fe 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
__tlb_flush_full(mm);
}
-static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
+static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
{
if (mm->context.flush_mm) {
__tlb_flush_mm(mm);
@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
static inline void flush_tlb_mm(struct mm_struct *mm)
{
- __tlb_flush_mm_cond(mm);
+ __tlb_flush_mm_lazy(mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- __tlb_flush_mm_cond(vma->vm_mm);
+ __tlb_flush_mm_lazy(vma->vm_mm);
}
static inline void flush_tlb_kernel_range(unsigned long start,
diff --git a/arch/s390/include/asm/vtime.h b/arch/s390/include/asm/vtime.h
new file mode 100644
index 00000000000..af9896c53eb
--- /dev/null
+++ b/arch/s390/include/asm/vtime.h
@@ -0,0 +1,7 @@
+#ifndef _S390_VTIME_H
+#define _S390_VTIME_H
+
+#define __ARCH_HAS_VTIME_ACCOUNT
+#define __ARCH_HAS_VTIME_TASK_SWITCH
+
+#endif /* _S390_VTIME_H */
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index be7a408be7a..cc30d1fb000 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -18,6 +18,7 @@
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/sigp.h>
+#include <asm/irq.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 4
@@ -435,6 +436,11 @@ io_skip:
io_loop:
l %r1,BASED(.Ldo_IRQ)
lr %r2,%r11 # pass pointer to pt_regs
+ lhi %r3,IO_INTERRUPT
+ tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
+ jz io_call
+ lhi %r3,THIN_INTERRUPT
+io_call:
basr %r14,%r1 # call do_IRQ
tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
jz io_return
@@ -584,9 +590,10 @@ ext_skip:
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
TRACE_IRQS_OFF
+ l %r1,BASED(.Ldo_IRQ)
lr %r2,%r11 # pass pointer to pt_regs
- l %r1,BASED(.Ldo_extint)
- basr %r14,%r1 # call do_extint
+ lhi %r3,EXT_INTERRUPT
+ basr %r14,%r1 # call do_IRQ
j io_return
/*
@@ -879,13 +886,13 @@ cleanup_idle:
stm %r9,%r10,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- n %r8,BASED(cleanup_idle_wait) # clear wait state bit
+ n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits
l %r9,24(%r11) # return from psw_idle
br %r14
cleanup_idle_insn:
.long psw_idle_lpsw + 0x80000000
cleanup_idle_wait:
- .long 0xfffdffff
+ .long 0xfcfdffff
/*
* Integer constants
@@ -902,7 +909,6 @@ cleanup_idle_wait:
.Ldo_machine_check: .long s390_do_machine_check
.Lhandle_mcck: .long s390_handle_mcck
.Ldo_IRQ: .long do_IRQ
-.Ldo_extint: .long do_extint
.Ldo_signal: .long do_signal
.Ldo_notify_resume: .long do_notify_resume
.Ldo_per_trap: .long do_per_trap
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1c039d0c24c..2b2188b97c6 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -19,6 +19,7 @@
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/sigp.h>
+#include <asm/irq.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@@ -468,6 +469,11 @@ io_skip:
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
io_loop:
lgr %r2,%r11 # pass pointer to pt_regs
+ lghi %r3,IO_INTERRUPT
+ tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
+ jz io_call
+ lghi %r3,THIN_INTERRUPT
+io_call:
brasl %r14,do_IRQ
tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
jz io_return
@@ -623,7 +629,8 @@ ext_skip:
TRACE_IRQS_OFF
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
lgr %r2,%r11 # pass pointer to pt_regs
- brasl %r14,do_extint
+ lghi %r3,EXT_INTERRUPT
+ brasl %r14,do_IRQ
j io_return
/*
@@ -922,7 +929,7 @@ cleanup_idle:
stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw
- nihh %r8,0xfffd # clear wait state bit
+ nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
br %r14
cleanup_idle_insn:
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 54b0995514e..b34ba0ea96a 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -22,6 +22,7 @@
#include <asm/cputime.h>
#include <asm/lowcore.h>
#include <asm/irq.h>
+#include <asm/hw_irq.h>
#include "entry.h"
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
@@ -42,9 +43,10 @@ struct irq_class {
* Since the external and I/O interrupt fields are already sums we would end
* up with having a sum which accounts each interrupt twice.
*/
-static const struct irq_class irqclass_main_desc[NR_IRQS] = {
- [EXTERNAL_INTERRUPT] = {.name = "EXT"},
- [IO_INTERRUPT] = {.name = "I/O"}
+static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
+ [EXT_INTERRUPT] = {.name = "EXT"},
+ [IO_INTERRUPT] = {.name = "I/O"},
+ [THIN_INTERRUPT] = {.name = "AIO"},
};
/*
@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
};
+void __init init_IRQ(void)
+{
+ irq_reserve_irqs(0, THIN_INTERRUPT);
+ init_cio_interrupts();
+ init_airq_interrupts();
+ init_ext_interrupts();
+}
+
+void do_IRQ(struct pt_regs *regs, int irq)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = set_irq_regs(regs);
+ irq_enter();
+ if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
+ /* Serve timer interrupts first. */
+ clock_comparator_work();
+ generic_handle_irq(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
/*
* show_interrupts is needed by /proc/interrupts.
*/
@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
for_each_online_cpu(cpu)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
+ goto out;
}
if (irq < NR_IRQS) {
+ if (irq >= NR_IRQS_BASE)
+ goto out;
seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
+ seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
seq_putc(p, '\n');
- goto skip_arch_irqs;
+ goto out;
}
for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
for_each_online_cpu(cpu)
- seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
+ seq_printf(p, "%10u ",
+ per_cpu(irq_stat, cpu).irqs[irq]);
if (irqclass_sub_desc[irq].desc)
seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
seq_putc(p, '\n');
}
-skip_arch_irqs:
+out:
put_online_cpus();
return 0;
}
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ return 0;
+}
+
/*
* Switch to the asynchronous interrupt stack for softirq execution.
*/
@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-#ifdef CONFIG_PROC_FS
-void init_irq_proc(void)
-{
- if (proc_mkdir("irq", NULL))
- create_prof_cpu_mask();
-}
-#endif
-
/*
* ext_int_hash[index] is the list head for all external interrupts that hash
* to this index.
@@ -183,14 +208,6 @@ struct ext_int_info {
/* ext_int_hash_lock protects the handler lists for external interrupts */
DEFINE_SPINLOCK(ext_int_hash_lock);
-static void __init init_external_interrupts(void)
-{
- int idx;
-
- for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
- INIT_LIST_HEAD(&ext_int_hash[idx]);
-}
-
static inline int ext_hash(u16 code)
{
return (code + (code >> 9)) & 0xff;
@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
}
EXPORT_SYMBOL(unregister_external_interrupt);
-void __irq_entry do_extint(struct pt_regs *regs)
+static irqreturn_t do_ext_interrupt(int irq, void *dummy)
{
+ struct pt_regs *regs = get_irq_regs();
struct ext_code ext_code;
- struct pt_regs *old_regs;
struct ext_int_info *p;
int index;
- old_regs = set_irq_regs(regs);
- irq_enter();
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
- /* Serve timer interrupts first. */
- clock_comparator_work();
- }
- kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
ext_code = *(struct ext_code *) &regs->int_code;
if (ext_code.code != 0x1004)
__get_cpu_var(s390_idle).nohz_delay = 1;
@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
p->handler(ext_code, regs->int_parm,
regs->int_parm_long);
rcu_read_unlock();
- irq_exit();
- set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
}
-void __init init_IRQ(void)
+static struct irqaction external_interrupt = {
+ .name = "EXT",
+ .handler = do_ext_interrupt,
+};
+
+void __init init_ext_interrupts(void)
{
- init_external_interrupts();
+ int idx;
+
+ for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
+ INIT_LIST_HEAD(&ext_int_hash[idx]);
+
+ irq_set_chip_and_handler(EXT_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(EXT_INTERRUPT, &external_interrupt);
}
static DEFINE_SPINLOCK(sc_irq_lock);
@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
spin_unlock(&ma_subclass_lock);
}
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
-
-#ifdef CONFIG_SMP
-void synchronize_irq(unsigned int irq)
-{
- /*
- * Not needed, the handler is protected by a lock and IRQs that occur
- * after the handler is deleted are just NOPs.
- */
-}
-EXPORT_SYMBOL_GPL(synchronize_irq);
-#endif
-
-#ifndef CONFIG_PCI
-
-/* Only PCI devices have dynamically-defined IRQ handlers */
-
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
-{
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
- WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(free_irq);
-
-void enable_irq(unsigned int irq)
-{
- WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(enable_irq);
-
-void disable_irq(unsigned int irq)
-{
- WARN_ON(1);
-}
-EXPORT_SYMBOL_GPL(disable_irq);
-
-#endif /* !CONFIG_PCI */
-
-void disable_irq_nosync(unsigned int irq)
-{
- disable_irq(irq);
-}
-EXPORT_SYMBOL_GPL(disable_irq_nosync);
-
-unsigned long probe_irq_on(void)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_on);
-
-int probe_irq_off(unsigned long val)
-{
- return 0;
-}
-EXPORT_SYMBOL_GPL(probe_irq_off);
-
-unsigned int probe_irq_mask(unsigned long val)
-{
- return val;
-}
-EXPORT_SYMBOL_GPL(probe_irq_mask);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 3388b2b2a07..adbbe7f1cb0 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -105,14 +105,31 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
fixup |= FIXUP_RETURN_REGISTER;
break;
case 0xeb:
- if ((insn[2] & 0xff) == 0x44 || /* bxhg */
- (insn[2] & 0xff) == 0x45) /* bxleg */
+ switch (insn[2] & 0xff) {
+ case 0x44: /* bxhg */
+ case 0x45: /* bxleg */
fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ }
break;
case 0xe3: /* bctg */
if ((insn[2] & 0xff) == 0x46)
fixup = FIXUP_BRANCH_NOT_TAKEN;
break;
+ case 0xec:
+ switch (insn[2] & 0xff) {
+ case 0xe5: /* clgrb */
+ case 0xe6: /* cgrb */
+ case 0xf6: /* crb */
+ case 0xf7: /* clrb */
+ case 0xfc: /* cgib */
+ case 0xfd: /* cglib */
+ case 0xfe: /* cib */
+ case 0xff: /* clib */
+ fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ }
+ break;
}
return fixup;
}
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index 504175ebf8b..c4c03381987 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci)
: "0", "cc");
#endif
/* Revalidate clock comparator register */
- if (S390_lowcore.clock_comparator == -1)
- set_clock_comparator(S390_lowcore.mcck_clock);
- else
- set_clock_comparator(S390_lowcore.clock_comparator);
+ set_clock_comparator(S390_lowcore.clock_comparator);
/* Check if old PSW is valid */
if (!mci->wp)
/*
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index a6fc037671b..500aa1029bc 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs)
static bool is_in_guest(struct pt_regs *regs)
{
- unsigned long ip = instruction_pointer(regs);
-
if (user_mode(regs))
return false;
-
- return ip == (unsigned long) &sie_exit;
+#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
+ return instruction_pointer(regs) == (unsigned long) &sie_exit;
+#else
+ return false;
+#endif
}
static unsigned long guest_is_user_mode(struct pt_regs *regs)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 2bc3eddae34..c5dbb335716 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -71,6 +71,7 @@ void arch_cpu_idle(void)
}
/* Halt the cpu and keep track of cpu time accounting. */
vtime_stop_cpu();
+ local_irq_enable();
}
void arch_cpu_idle_exit(void)
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index e9fadb04e3c..9556905bd3c 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -60,11 +60,11 @@ void update_cr_regs(struct task_struct *task)
__ctl_store(cr, 0, 2);
cr_new[1] = cr[1];
- /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */
+ /* Set or clear transaction execution TXC bit 8. */
if (task->thread.per_flags & PER_FLAG_NO_TE)
- cr_new[0] = cr[0] & ~(3UL << 54);
+ cr_new[0] = cr[0] & ~(1UL << 55);
else
- cr_new[0] = cr[0] | (3UL << 54);
+ cr_new[0] = cr[0] | (1UL << 55);
/* Set or clear transaction execution TDC bits 62 and 63. */
cr_new[2] = cr[2] & ~3UL;
if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) {
@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name)
if (!name || *name != 'r')
return -EINVAL;
- if (strict_strtoul(name + 1, 10, &offset))
+ if (kstrtoul(name + 1, 10, &offset))
return -EINVAL;
if (offset >= NUM_GPRS)
return -EINVAL;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 497451ec5e2..aeed8a61fa0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void)
strcpy(elf_platform, "z196");
break;
case 0x2827:
+ case 0x2828:
strcpy(elf_platform, "zEC12");
break;
}
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
index c479d2f9605..737bff38e3e 100644
--- a/arch/s390/kernel/suspend.c
+++ b/arch/s390/kernel/suspend.c
@@ -10,6 +10,9 @@
#include <linux/suspend.h>
#include <linux/mm.h>
#include <asm/ctl_reg.h>
+#include <asm/ipl.h>
+#include <asm/cio.h>
+#include <asm/pci.h>
/*
* References to section boundaries
@@ -211,3 +214,11 @@ void restore_processor_state(void)
__ctl_set_bit(0,28);
local_mcck_enable();
}
+
+/* Called at the end of swsusp_arch_resume */
+void s390_early_resume(void)
+{
+ lgr_info_log();
+ channel_subsystem_reinit();
+ zpci_rescan();
+}
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S
index c487be4cfc8..6b09fdffbd2 100644
--- a/arch/s390/kernel/swsusp_asm64.S
+++ b/arch/s390/kernel/swsusp_asm64.S
@@ -281,11 +281,8 @@ restore_registers:
lghi %r2,0
brasl %r14,arch_set_page_states
- /* Log potential guest relocation */
- brasl %r14,lgr_info_log
-
- /* Reinitialize the channel subsystem */
- brasl %r14,channel_subsystem_reinit
+ /* Call arch specific early resume code */
+ brasl %r14,s390_early_resume
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 876546b9cfa..064c3082ab3 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -92,7 +92,6 @@ void clock_comparator_work(void)
struct clock_event_device *cd;
S390_lowcore.clock_comparator = -1ULL;
- set_clock_comparator(S390_lowcore.clock_comparator);
cd = &__get_cpu_var(comparators);
cd->event_handler(cd);
}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index d7776281cb6..05d75c41313 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s)
else if (strncmp(s, "off", 4) == 0)
vdso_enabled = 0;
else {
- rc = strict_strtoul(s, 0, &val);
+ rc = kstrtoul(s, 0, &val);
vdso_enabled = rc ? 0 : !!val;
}
return !rc;
@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
PAGE_SIZE << SEGMENT_ORDER);
- clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY,
+ clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
- *(unsigned long *) page_table = _PAGE_RO + page_frame;
+ *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
psal = (u32 *) (page_table + 256*sizeof(unsigned long));
aste = psal + 32;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 9b9c1b78ec6..abcfab55f99 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -19,6 +19,7 @@
#include <asm/irq_regs.h>
#include <asm/cputime.h>
#include <asm/vtimer.h>
+#include <asm/vtime.h>
#include <asm/irq.h>
#include "entry.h"
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 3074475c8ae..3a74d8af0d6 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -119,12 +119,21 @@ static int __diag_virtio_hypercall(struct kvm_vcpu *vcpu)
* The layout is as follows:
* - gpr 2 contains the subchannel id (passed as addr)
* - gpr 3 contains the virtqueue index (passed as datamatch)
+ * - gpr 4 contains the index on the bus (optionally)
*/
- ret = kvm_io_bus_write(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
- vcpu->run->s.regs.gprs[2],
- 8, &vcpu->run->s.regs.gprs[3]);
+ ret = kvm_io_bus_write_cookie(vcpu->kvm, KVM_VIRTIO_CCW_NOTIFY_BUS,
+ vcpu->run->s.regs.gprs[2],
+ 8, &vcpu->run->s.regs.gprs[3],
+ vcpu->run->s.regs.gprs[4]);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
- /* kvm_io_bus_write returns -EOPNOTSUPP if it found no match. */
+
+ /*
+ * Return cookie in gpr 2, but don't overwrite the register if the
+ * diagnose will be handled by userspace.
+ */
+ if (ret != -EOPNOTSUPP)
+ vcpu->run->s.regs.gprs[2] = ret;
+ /* kvm_io_bus_write_cookie returns -EOPNOTSUPP if it found no match. */
return ret < 0 ? ret : 0;
}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 302e0e52b00..99d789e8a01 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -42,9 +42,11 @@ static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
({ \
__typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
int __mask = sizeof(__typeof__(*(gptr))) - 1; \
- int __ret = PTR_RET((void __force *)__uptr); \
+ int __ret; \
\
- if (!__ret) { \
+ if (IS_ERR((void __force *)__uptr)) { \
+ __ret = PTR_ERR((void __force *)__uptr); \
+ } else { \
BUG_ON((unsigned long)__uptr & __mask); \
__ret = get_user(x, __uptr); \
} \
@@ -55,9 +57,11 @@ static inline void __user *__gptr_to_uptr(struct kvm_vcpu *vcpu,
({ \
__typeof__(gptr) __uptr = __gptr_to_uptr(vcpu, gptr, 1);\
int __mask = sizeof(__typeof__(*(gptr))) - 1; \
- int __ret = PTR_RET((void __force *)__uptr); \
+ int __ret; \
\
- if (!__ret) { \
+ if (IS_ERR((void __force *)__uptr)) { \
+ __ret = PTR_ERR((void __force *)__uptr); \
+ } else { \
BUG_ON((unsigned long)__uptr & __mask); \
__ret = put_user(x, __uptr); \
} \
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index ba694d2ba51..776dafe918d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
#include <asm/pgtable.h>
#include <asm/nmi.h>
#include <asm/switch_to.h>
+#include <asm/facility.h>
#include <asm/sclp.h>
#include "kvm-s390.h"
#include "gaccess.h"
@@ -84,9 +85,15 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
-static unsigned long long *facilities;
+unsigned long *vfacilities;
static struct gmap_notifier gmap_notifier;
+/* test availability of vfacility */
+static inline int test_vfacility(unsigned long nr)
+{
+ return __test_facility(nr, (void *) vfacilities);
+}
+
/* Section: not file related */
int kvm_arch_hardware_enable(void *garbage)
{
@@ -387,7 +394,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
vcpu->arch.sie_block->ecb = 6;
vcpu->arch.sie_block->ecb2 = 8;
vcpu->arch.sie_block->eca = 0xC1002001U;
- vcpu->arch.sie_block->fac = (int) (long) facilities;
+ vcpu->arch.sie_block->fac = (int) (long) vfacilities;
hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
(unsigned long) vcpu);
@@ -702,14 +709,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
return rc;
vcpu->arch.sie_block->icptcode = 0;
- preempt_disable();
- kvm_guest_enter();
- preempt_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu,
atomic_read(&vcpu->arch.sie_block->cpuflags));
+
+ /*
+ * As PF_VCPU will be used in fault handler, between guest_enter
+ * and guest_exit should be no uaccess.
+ */
+ preempt_disable();
+ kvm_guest_enter();
+ preempt_enable();
rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
+ kvm_guest_exit();
+
+ VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+ vcpu->arch.sie_block->icptcode);
+ trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
+
if (rc > 0)
rc = 0;
if (rc < 0) {
@@ -721,10 +739,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
}
}
- VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
- vcpu->arch.sie_block->icptcode);
- trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
- kvm_guest_exit();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc;
@@ -1056,6 +1070,10 @@ int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
return 0;
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
/* Section: memory related */
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
@@ -1122,20 +1140,20 @@ static int __init kvm_s390_init(void)
* to hold the maximum amount of facilities. On the other hand, we
* only set facilities that are known to work in KVM.
*/
- facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
- if (!facilities) {
+ vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
+ if (!vfacilities) {
kvm_exit();
return -ENOMEM;
}
- memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
- facilities[0] &= 0xff82fff3f47c0000ULL;
- facilities[1] &= 0x001c000000000000ULL;
+ memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
+ vfacilities[0] &= 0xff82fff3f47c0000UL;
+ vfacilities[1] &= 0x001c000000000000UL;
return 0;
}
static void __exit kvm_s390_exit(void)
{
- free_page((unsigned long) facilities);
+ free_page((unsigned long) vfacilities);
kvm_exit();
}
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index 028ca9fd215..dc99f1ca426 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -24,6 +24,9 @@
typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
+/* declare vfacilities extern */
+extern unsigned long *vfacilities;
+
/* negativ values are error codes, positive values for internal conditions */
#define SIE_INTERCEPT_RERUNVCPU (1<<0)
#define SIE_INTERCEPT_UCONTROL (1<<1)
@@ -112,6 +115,13 @@ static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu)
return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
}
+/* Set the condition code in the guest program status word */
+static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
+{
+ vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
+ vcpu->arch.sie_block->gpsw.mask |= cc << 44;
+}
+
int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
void kvm_s390_tasklet(unsigned long parm);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 0da3e6eb6be..59200ee275e 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -16,6 +16,7 @@
#include <linux/errno.h>
#include <linux/compat.h>
#include <asm/asm-offsets.h>
+#include <asm/facility.h>
#include <asm/current.h>
#include <asm/debug.h>
#include <asm/ebcdic.h>
@@ -163,8 +164,7 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
kfree(inti);
no_interrupt:
/* Set condition code and we're done. */
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
+ kvm_s390_set_psw_cc(vcpu, cc);
return 0;
}
@@ -219,15 +219,13 @@ static int handle_io_inst(struct kvm_vcpu *vcpu)
* Set condition code 3 to stop the guest from issueing channel
* I/O instructions.
*/
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
- vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+ kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
}
static int handle_stfl(struct kvm_vcpu *vcpu)
{
- unsigned int facility_list;
int rc;
vcpu->stat.instruction_stfl++;
@@ -235,15 +233,13 @@ static int handle_stfl(struct kvm_vcpu *vcpu)
if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
- /* only pass the facility bits, which we can handle */
- facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3;
-
rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
- &facility_list, sizeof(facility_list));
+ vfacilities, 4);
if (rc)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
- VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list);
- trace_kvm_s390_handle_stfl(vcpu, facility_list);
+ VCPU_EVENT(vcpu, 5, "store facility list value %x",
+ *(unsigned int *) vfacilities);
+ trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
return 0;
}
@@ -386,7 +382,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
if (fc > 3) {
- vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; /* cc 3 */
+ kvm_s390_set_psw_cc(vcpu, 3);
return 0;
}
@@ -396,7 +392,7 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
if (fc == 0) {
vcpu->run->s.regs.gprs[0] = 3 << 28;
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); /* cc 0 */
+ kvm_s390_set_psw_cc(vcpu, 0);
return 0;
}
@@ -430,12 +426,11 @@ static int handle_stsi(struct kvm_vcpu *vcpu)
}
trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
free_page(mem);
- vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ kvm_s390_set_psw_cc(vcpu, 0);
vcpu->run->s.regs.gprs[0] = 0;
return 0;
out_no_data:
- /* condition code 3 */
- vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
+ kvm_s390_set_psw_cc(vcpu, 3);
out_exception:
free_page(mem);
return rc;
@@ -493,12 +488,12 @@ static int handle_epsw(struct kvm_vcpu *vcpu)
kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
/* This basically extracts the mask half of the psw. */
- vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
if (reg2) {
- vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
+ vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
vcpu->run->s.regs.gprs[reg2] |=
- vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
+ vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
}
return 0;
}
@@ -532,8 +527,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* Only provide non-quiescing support if the host supports it */
- if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
- S390_lowcore.stfl_fac_list & 0x00020000)
+ if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
/* No support for conditional-SSKE */
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index c61b9fad43c..57c87d7d7ed 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -44,7 +44,6 @@ static void __udelay_disabled(unsigned long long usecs)
do {
set_clock_comparator(end);
vtime_stop_cpu();
- local_irq_disable();
} while (get_tod_clock() < end);
lockdep_on();
__ctl_load(cr0, 0, 0);
@@ -64,7 +63,6 @@ static void __udelay_enabled(unsigned long long usecs)
set_clock_comparator(end);
}
vtime_stop_cpu();
- local_irq_disable();
if (clock_saved)
local_tick_enable(clock_saved);
} while (get_tod_clock() < end);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 50ea137a2d3..1694d738b17 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm,
switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
case _ASCE_TYPE_REGION1:
table = table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x39UL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION2:
table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3aUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_REGION3:
table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return -0x3bUL;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
/* fallthrough */
case _ASCE_TYPE_SEGMENT:
table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV))
+ if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL;
if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
- if (write && (*table & _SEGMENT_ENTRY_RO))
+ if (write && (*table & _SEGMENT_ENTRY_PROTECT))
return -0x04UL;
return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
(address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm,
table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
- if (write && (*table & _PAGE_RO))
+ if (write && (*table & _PAGE_PROTECT))
return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm,
unsigned long *table = (unsigned long *)__pa(mm->pgd);
table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV))
+ if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
return -0x10UL;
table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
table = table + ((address >> 12) & 0xff);
if (unlikely(*table & _PAGE_INVALID))
return -0x11UL;
- if (write && (*table & _PAGE_RO))
+ if (write && (*table & _PAGE_PROTECT))
return -0x04UL;
return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
}
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index 3ad65b04ac1..46d517c3c76 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n");
return;
}
- seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW ");
+ seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " ");
seq_putc(m, '\n');
}
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st,
}
/*
- * The actual page table walker functions. In order to keep the implementation
- * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO
- * flags to note_page() if a region, segment or page table entry is invalid or
- * read-only.
- * After all it's just a hint that the current level being walked contains an
- * invalid or read-only entry.
+ * The actual page table walker functions. In order to keep the
+ * implementation of print_prot() short, we only check and pass
+ * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region,
+ * segment or page table entry is invalid or read-only.
+ * After all it's just a hint that the current level being walked
+ * contains an invalid or read-only entry.
*/
static void walk_pte_level(struct seq_file *m, struct pg_state *st,
pmd_t *pmd, unsigned long addr)
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr;
pte = pte_offset_kernel(pmd, addr);
- prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID);
+ prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
note_page(m, st, prot, 4);
addr += PAGE_SIZE;
}
}
#ifdef CONFIG_64BIT
-#define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO)
+#define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO)
#else
#define _PMD_PROT_MASK 0
#endif
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c
index 1f5315d1215..5d758db27bd 100644
--- a/arch/s390/mm/gup.c
+++ b/arch/s390/mm/gup.c
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
pte_t *ptep, pte;
struct page *page;
- mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
+ mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
do {
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
struct page *head, *page, *tail;
int refs;
- result = write ? 0 : _SEGMENT_ENTRY_RO;
- mask = result | _SEGMENT_ENTRY_INV;
+ result = write ? 0 : _SEGMENT_ENTRY_PROTECT;
+ mask = result | _SEGMENT_ENTRY_INVALID;
if ((pmd_val(pmd) & mask) != result)
return 0;
VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 121089d5780..248445f9260 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -8,21 +8,127 @@
#include <linux/mm.h>
#include <linux/hugetlb.h>
+static inline pmd_t __pte_to_pmd(pte_t pte)
+{
+ int none, young, prot;
+ pmd_t pmd;
+
+ /*
+ * Convert encoding pte bits pmd bits
+ * .IR...wrdytp ..R...I...y.
+ * empty .10...000000 -> ..0...1...0.
+ * prot-none, clean, old .11...000001 -> ..0...1...1.
+ * prot-none, clean, young .11...000101 -> ..1...1...1.
+ * prot-none, dirty, old .10...001001 -> ..0...1...1.
+ * prot-none, dirty, young .10...001101 -> ..1...1...1.
+ * read-only, clean, old .11...010001 -> ..1...1...0.
+ * read-only, clean, young .01...010101 -> ..1...0...1.
+ * read-only, dirty, old .11...011001 -> ..1...1...0.
+ * read-only, dirty, young .01...011101 -> ..1...0...1.
+ * read-write, clean, old .11...110001 -> ..0...1...0.
+ * read-write, clean, young .01...110101 -> ..0...0...1.
+ * read-write, dirty, old .10...111001 -> ..0...1...0.
+ * read-write, dirty, young .00...111101 -> ..0...0...1.
+ * Huge ptes are dirty by definition, a clean pte is made dirty
+ * by the conversion.
+ */
+ if (pte_present(pte)) {
+ pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
+ if (pte_val(pte) & _PAGE_INVALID)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
+ none = (pte_val(pte) & _PAGE_PRESENT) &&
+ !(pte_val(pte) & _PAGE_READ) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ prot = (pte_val(pte) & _PAGE_PROTECT) &&
+ !(pte_val(pte) & _PAGE_WRITE);
+ young = pte_val(pte) & _PAGE_YOUNG;
+ if (none || young)
+ pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
+ if (prot || (none && young))
+ pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
+ } else
+ pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
+ return pmd;
+}
+
+static inline pte_t __pmd_to_pte(pmd_t pmd)
+{
+ pte_t pte;
+
+ /*
+ * Convert encoding pmd bits pte bits
+ * ..R...I...y. .IR...wrdytp
+ * empty ..0...1...0. -> .10...000000
+ * prot-none, old ..0...1...1. -> .10...001001
+ * prot-none, young ..1...1...1. -> .10...001101
+ * read-only, old ..1...1...0. -> .11...011001
+ * read-only, young ..1...0...1. -> .01...011101
+ * read-write, old ..0...1...0. -> .10...111001
+ * read-write, young ..0...0...1. -> .00...111101
+ * Huge ptes are dirty by definition
+ */
+ if (pmd_present(pmd)) {
+ pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY |
+ (pmd_val(pmd) & PAGE_MASK);
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID)
+ pte_val(pte) |= _PAGE_INVALID;
+ if (pmd_prot_none(pmd)) {
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_YOUNG;
+ } else {
+ pte_val(pte) |= _PAGE_READ;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
+ pte_val(pte) |= _PAGE_PROTECT;
+ else
+ pte_val(pte) |= _PAGE_WRITE;
+ if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
+ pte_val(pte) |= _PAGE_YOUNG;
+ }
+ } else
+ pte_val(pte) = _PAGE_INVALID;
+ return pte;
+}
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *pteptr, pte_t pteval)
+ pte_t *ptep, pte_t pte)
{
- pmd_t *pmdp = (pmd_t *) pteptr;
- unsigned long mask;
+ pmd_t pmd;
+ pmd = __pte_to_pmd(pte);
if (!MACHINE_HAS_HPAGE) {
- pteptr = (pte_t *) pte_page(pteval)[1].index;
- mask = pte_val(pteval) &
- (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
- pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= pte_page(pte)[1].index;
+ } else
+ pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO;
+ *(pmd_t *) ptep = pmd;
+}
+
+pte_t huge_ptep_get(pte_t *ptep)
+{
+ unsigned long origin;
+ pmd_t pmd;
+
+ pmd = *(pmd_t *) ptep;
+ if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
+ origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
+ pmd_val(pmd) |= *(unsigned long *) origin;
}
+ return __pmd_to_pte(pmd);
+}
- pmd_val(*pmdp) = pte_val(pteval);
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pmd_t *pmdp = (pmd_t *) ptep;
+ pte_t pte = huge_ptep_get(ptep);
+
+ if (MACHINE_HAS_IDTE)
+ __pmd_idte(addr, pmdp);
+ else
+ __pmd_csp(pmdp);
+ pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
+ return pte;
}
int arch_prepare_hugepage(struct page *page)
@@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page)
ptep = (pte_t *) page[1].index;
if (!ptep)
return;
- clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY,
+ clear_table((unsigned long *) ptep, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
page_table_free(&init_mm, (unsigned long *) ptep);
page[1].index = 0;
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ce36ea80e4f..ad446b0c55b 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void)
order = 2;
break;
case 0x2827: /* zEC12 */
+ case 0x2828: /* zEC12 */
default:
order = 5;
break;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 80adfbf7506..990397420e6 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
pte = pte_offset_kernel(pmd, address);
if (!enable) {
__ptep_ipte(address, pte);
- pte_val(*pte) = _PAGE_TYPE_EMPTY;
+ pte_val(*pte) = _PAGE_INVALID;
continue;
}
pte_val(*pte) = __pa(address);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index a8154a1a2c9..bf7c0dc64a7 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
struct gmap_rmap *rmap;
struct page *page;
- if (*table & _SEGMENT_ENTRY_INV)
+ if (*table & _SEGMENT_ENTRY_INVALID)
return 0;
page = pfn_to_page(*table >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
kfree(rmap);
break;
}
- *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
return 1;
}
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap,
return -ENOMEM;
new = (unsigned long *) page_to_phys(page);
crst_table_init(new, init);
- if (*table & _REGION_ENTRY_INV) {
+ if (*table & _REGION_ENTRY_INVALID) {
list_add(&page->lru, &gmap->crst_list);
*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the guest addr space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
+ if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
+ if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
- if (*table & _REGION_ENTRY_INV)
+ if (*table & _REGION_ENTRY_INVALID)
goto out;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 20) & 0x7ff);
/* Clear segment table entry in guest address space. */
flush |= gmap_unlink_segment(gmap, table);
- *table = _SEGMENT_ENTRY_INV;
+ *table = _SEGMENT_ENTRY_INVALID;
}
out:
spin_unlock(&gmap->mm->page_table_lock);
@@ -335,7 +335,7 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
if ((from | to | len) & (PMD_SIZE - 1))
return -EINVAL;
- if (len == 0 || from + len > PGDIR_SIZE ||
+ if (len == 0 || from + len > TASK_MAX_SIZE ||
from + len < from || to + len < to)
return -EINVAL;
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
for (off = 0; off < len; off += PMD_SIZE) {
/* Walk the gmap address space page table */
table = gmap->table + (((to + off) >> 53) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
+ if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 42) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
+ if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + (((to + off) >> 31) & 0x7ff);
- if ((*table & _REGION_ENTRY_INV) &&
+ if ((*table & _REGION_ENTRY_INVALID) &&
gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
goto out_unmap;
table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from,
/* Store 'from' address in an invalid segment table entry. */
flush |= gmap_unlink_segment(gmap, table);
- *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
+ *table = (from + off) | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
}
spin_unlock(&gmap->mm->page_table_lock);
up_read(&gmap->mm->mmap_sem);
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
unsigned long *table;
table = gmap->table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
+ if (unlikely(*table & _REGION_ENTRY_INVALID))
return ERR_PTR(-EFAULT);
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
return PTR_ERR(segment_ptr);
/* Convert the gmap address to an mm address. */
segment = *segment_ptr;
- if (!(segment & _SEGMENT_ENTRY_INV)) {
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
- } else if (segment & _SEGMENT_ENTRY_RO) {
+ } else if (segment & _SEGMENT_ENTRY_PROTECT) {
vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
return vmaddr | (address & ~PMD_MASK);
}
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
- *rmap->entry =
- _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
+ *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
+ _SEGMENT_ENTRY_PROTECT);
list_del(&rmap->list);
kfree(rmap);
flush = 1;
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
/* Convert the gmap address to an mm address. */
while (1) {
segment = *segment_ptr;
- if (!(segment & _SEGMENT_ENTRY_INV)) {
+ if (!(segment & _SEGMENT_ENTRY_INVALID)) {
/* Page table is present */
page = pfn_to_page(segment >> PAGE_SHIFT);
mp = (struct gmap_pgtable *) page->index;
return mp->vmaddr | (address & ~PMD_MASK);
}
- if (!(segment & _SEGMENT_ENTRY_RO))
+ if (!(segment & _SEGMENT_ENTRY_PROTECT))
/* Nothing mapped in the gmap address space. */
break;
rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
while (address < to) {
/* Walk the gmap address space page table */
table = gmap->table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV)) {
+ if (unlikely(*table & _REGION_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
+ if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
continue;
/* Set notification bit in the pgste of the pte */
entry = *ptep;
- if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) {
+ if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
pgste = pgste_get_lock(ptep);
pgste_val(pgste) |= PGSTE_IN_BIT;
pgste_set_unlock(ptep, pgste);
@@ -731,6 +732,11 @@ void gmap_do_ipte_notify(struct mm_struct *mm, unsigned long addr, pte_t *pte)
spin_unlock(&gmap_notifier_lock);
}
+static inline int page_table_with_pgste(struct page *page)
+{
+ return atomic_read(&page->_mapcount) == 0;
+}
+
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -750,10 +756,11 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
mp->vmaddr = vmaddr & PMD_MASK;
INIT_LIST_HEAD(&mp->mapper);
page->index = (unsigned long) mp;
- atomic_set(&page->_mapcount, 3);
+ atomic_set(&page->_mapcount, 0);
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
- clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
+ PAGE_SIZE/2);
return table;
}
@@ -791,26 +798,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
if (!(pte_val(*ptep) & _PAGE_INVALID)) {
- unsigned long address, bits;
- unsigned char skey;
+ unsigned long address, bits, skey;
address = pte_val(*ptep) & PAGE_MASK;
- skey = page_get_storage_key(address);
+ skey = (unsigned long) page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
+ skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
/* Set storage key ACC and FP */
- page_set_storage_key(address,
- (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)),
- !nq);
-
+ page_set_storage_key(address, skey, !nq);
/* Merge host changed & referenced into pgste */
pgste_val(new) |= bits << 52;
- /* Transfer skey changed & referenced bit to kvm user bits */
- pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */
}
/* changing the guest storage key is considered a change of the page */
if ((pgste_val(new) ^ pgste_val(old)) &
(PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
- pgste_val(new) |= PGSTE_UC_BIT;
+ pgste_val(new) |= PGSTE_HC_BIT;
pgste_set_unlock(ptep, new);
pte_unmap_unlock(*ptep, ptl);
@@ -821,6 +823,11 @@ EXPORT_SYMBOL(set_guest_storage_key);
#else /* CONFIG_PGSTE */
+static inline int page_table_with_pgste(struct page *page)
+{
+ return 0;
+}
+
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
unsigned long vmaddr)
{
@@ -878,7 +885,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
pgtable_page_ctor(page);
atomic_set(&page->_mapcount, 1);
table = (unsigned long *) page_to_phys(page);
- clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock);
list_add(&page->lru, &mm->context.pgtable_list);
} else {
@@ -897,12 +904,12 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
struct page *page;
unsigned int bit, mask;
- if (mm_has_pgste(mm)) {
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page)) {
gmap_disconnect_pgtable(mm, table);
return page_table_free_pgste(table);
}
/* Free 1K/2K page table fragment of a 4K page */
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
spin_lock_bh(&mm->context.list_lock);
if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
@@ -940,14 +947,14 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
unsigned int bit, mask;
mm = tlb->mm;
- if (mm_has_pgste(mm)) {
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page)) {
gmap_disconnect_pgtable(mm, table);
table = (unsigned long *) (__pa(table) | FRAG_MASK);
tlb_remove_table(tlb, table);
return;
}
bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
spin_lock_bh(&mm->context.list_lock);
if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
list_del(&page->lru);
@@ -1007,7 +1014,6 @@ void tlb_table_flush(struct mmu_gather *tlb)
struct mmu_table_batch **batch = &tlb->batch;
if (*batch) {
- __tlb_flush_mm(tlb->mm);
call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
*batch = NULL;
}
@@ -1017,11 +1023,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
{
struct mmu_table_batch **batch = &tlb->batch;
+ tlb->mm->context.flush_mm = 1;
if (*batch == NULL) {
*batch = (struct mmu_table_batch *)
__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
if (*batch == NULL) {
- __tlb_flush_mm(tlb->mm);
+ __tlb_flush_mm_lazy(tlb->mm);
tlb_remove_table_one(table);
return;
}
@@ -1029,40 +1036,124 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table)
}
(*batch)->tables[(*batch)->nr++] = table;
if ((*batch)->nr == MAX_TABLE_BATCH)
- tlb_table_flush(tlb);
+ tlb_flush_mmu(tlb);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-void thp_split_vma(struct vm_area_struct *vma)
+static inline void thp_split_vma(struct vm_area_struct *vma)
{
unsigned long addr;
- struct page *page;
- for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
- page = follow_page(vma, addr, FOLL_SPLIT);
- }
+ for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
+ follow_page(vma, addr, FOLL_SPLIT);
}
-void thp_split_mm(struct mm_struct *mm)
+static inline void thp_split_mm(struct mm_struct *mm)
{
- struct vm_area_struct *vma = mm->mmap;
+ struct vm_area_struct *vma;
- while (vma != NULL) {
+ for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
thp_split_vma(vma);
vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE;
- vma = vma->vm_next;
}
+ mm->def_flags |= VM_NOHUGEPAGE;
+}
+#else
+static inline void thp_split_mm(struct mm_struct *mm)
+{
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
+ struct mm_struct *mm, pud_t *pud,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next, *table, *new;
+ struct page *page;
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud, addr);
+ do {
+ next = pmd_addr_end(addr, end);
+again:
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
+ table = (unsigned long *) pmd_deref(*pmd);
+ page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
+ if (page_table_with_pgste(page))
+ continue;
+ /* Allocate new page table with pgstes */
+ new = page_table_alloc_pgste(mm, addr);
+ if (!new) {
+ mm->context.has_pgste = 0;
+ continue;
+ }
+ spin_lock(&mm->page_table_lock);
+ if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
+ /* Nuke pmd entry pointing to the "short" page table */
+ pmdp_flush_lazy(mm, addr, pmd);
+ pmd_clear(pmd);
+ /* Copy ptes from old table to new table */
+ memcpy(new, table, PAGE_SIZE/2);
+ clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
+ /* Establish new table */
+ pmd_populate(mm, pmd, (pte_t *) new);
+ /* Free old table with rcu, there might be a walker! */
+ page_table_free_rcu(tlb, table);
+ new = NULL;
+ }
+ spin_unlock(&mm->page_table_lock);
+ if (new) {
+ page_table_free_pgste(new);
+ goto again;
+ }
+ } while (pmd++, addr = next, addr != end);
+
+ return addr;
+}
+
+static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
+ struct mm_struct *mm, pgd_t *pgd,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pud_t *pud;
+
+ pud = pud_offset(pgd, addr);
+ do {
+ next = pud_addr_end(addr, end);
+ if (pud_none_or_clear_bad(pud))
+ continue;
+ next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+
+ return addr;
+}
+
+static void page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
+ unsigned long addr, unsigned long end)
+{
+ unsigned long next;
+ pgd_t *pgd;
+
+ pgd = pgd_offset(mm, addr);
+ do {
+ next = pgd_addr_end(addr, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
+
/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
{
struct task_struct *tsk = current;
- struct mm_struct *mm, *old_mm;
+ struct mm_struct *mm = tsk->mm;
+ struct mmu_gather tlb;
/* Do we have switched amode? If no, we cannot do sie */
if (s390_user_mode == HOME_SPACE_MODE)
@@ -1072,57 +1163,16 @@ int s390_enable_sie(void)
if (mm_has_pgste(tsk->mm))
return 0;
- /* lets check if we are allowed to replace the mm */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- task_unlock(tsk);
- return -EINVAL;
- }
- task_unlock(tsk);
-
- /* we copy the mm and let dup_mm create the page tables with_pgstes */
- tsk->mm->context.alloc_pgste = 1;
- /* make sure that both mms have a correct rss state */
- sync_mm_rss(tsk->mm);
- mm = dup_mm(tsk);
- tsk->mm->context.alloc_pgste = 0;
- if (!mm)
- return -ENOMEM;
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ down_write(&mm->mmap_sem);
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- mm->def_flags |= VM_NOHUGEPAGE;
-#endif
-
- /* Now lets check again if something happened */
- task_lock(tsk);
- if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
-#ifdef CONFIG_AIO
- !hlist_empty(&tsk->mm->ioctx_list) ||
-#endif
- tsk->mm != tsk->active_mm) {
- mmput(mm);
- task_unlock(tsk);
- return -EINVAL;
- }
-
- /* ok, we are alone. No ptrace, no threads, etc. */
- old_mm = tsk->mm;
- tsk->mm = tsk->active_mm = mm;
- preempt_disable();
- update_mm(mm, tsk);
- atomic_inc(&mm->context.attach_count);
- atomic_dec(&old_mm->context.attach_count);
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- preempt_enable();
- task_unlock(tsk);
- mmput(old_mm);
- return 0;
+ /* Reallocate the page tables with pgstes */
+ mm->context.has_pgste = 1;
+ tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
+ page_table_realloc(&tlb, mm, 0, TASK_SIZE);
+ tlb_finish_mmu(&tlb, 0, TASK_SIZE);
+ up_write(&mm->mmap_sem);
+ return mm->context.has_pgste ? 0 : -ENOMEM;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
@@ -1198,9 +1248,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
list_del(lh);
}
ptep = (pte_t *) pgtable;
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
ptep++;
- pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+ pte_val(*ptep) = _PAGE_INVALID;
return pgtable;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 8b268fcc461..bcfb70b60be 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address)
pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
if (!pte)
return NULL;
- clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
+ clear_table((unsigned long *) pte, _PAGE_INVALID,
PTRS_PER_PTE * sizeof(pte_t));
return pte;
}
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) {
pud_val(*pu_dir) = __pa(address) |
_REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE |
- (ro ? _REGION_ENTRY_RO : 0);
+ (ro ? _REGION_ENTRY_PROTECT : 0);
address += PUD_SIZE;
continue;
}
@@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) {
pmd_val(*pm_dir) = __pa(address) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE |
- (ro ? _SEGMENT_ENTRY_RO : 0);
+ _SEGMENT_ENTRY_YOUNG |
+ (ro ? _SEGMENT_ENTRY_PROTECT : 0);
address += PMD_SIZE;
continue;
}
@@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
pt_dir = pte_offset_kernel(pm_dir, address);
- pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0);
+ pte_val(*pt_dir) = __pa(address) |
+ pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
address += PAGE_SIZE;
}
ret = 0;
@@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
pte_t *pt_dir;
pte_t pte;
- pte_val(pte) = _PAGE_TYPE_EMPTY;
+ pte_val(pte) = _PAGE_INVALID;
while (address < end) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page =__pa(vmem_alloc_pages(0));
if (!new_page)
goto out;
- pte_val(*pt_dir) = __pa(new_page);
+ pte_val(*pt_dir) =
+ __pa(new_page) | pgprot_val(PAGE_KERNEL);
}
address += PAGE_SIZE;
}
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c
index ffeb17ce7f3..04e1b6a8536 100644
--- a/arch/s390/oprofile/init.c
+++ b/arch/s390/oprofile/init.c
@@ -346,16 +346,15 @@ static const struct file_operations timer_enabled_fops = {
};
-static int oprofile_create_hwsampling_files(struct super_block *sb,
- struct dentry *root)
+static int oprofile_create_hwsampling_files(struct dentry *root)
{
struct dentry *dir;
- dir = oprofilefs_mkdir(sb, root, "timer");
+ dir = oprofilefs_mkdir(root, "timer");
if (!dir)
return -EINVAL;
- oprofilefs_create_file(sb, dir, "enabled", &timer_enabled_fops);
+ oprofilefs_create_file(dir, "enabled", &timer_enabled_fops);
if (!hwsampler_available)
return 0;
@@ -376,17 +375,17 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
* and can only be set to 0.
*/
- dir = oprofilefs_mkdir(sb, root, "0");
+ dir = oprofilefs_mkdir(root, "0");
if (!dir)
return -EINVAL;
- oprofilefs_create_file(sb, dir, "enabled", &hwsampler_fops);
- oprofilefs_create_file(sb, dir, "event", &zero_fops);
- oprofilefs_create_file(sb, dir, "count", &hw_interval_fops);
- oprofilefs_create_file(sb, dir, "unit_mask", &zero_fops);
- oprofilefs_create_file(sb, dir, "kernel", &kernel_fops);
- oprofilefs_create_file(sb, dir, "user", &user_fops);
- oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ oprofilefs_create_file(dir, "enabled", &hwsampler_fops);
+ oprofilefs_create_file(dir, "event", &zero_fops);
+ oprofilefs_create_file(dir, "count", &hw_interval_fops);
+ oprofilefs_create_file(dir, "unit_mask", &zero_fops);
+ oprofilefs_create_file(dir, "kernel", &kernel_fops);
+ oprofilefs_create_file(dir, "user", &user_fops);
+ oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
&oprofile_sdbt_blocks);
} else {
@@ -396,19 +395,19 @@ static int oprofile_create_hwsampling_files(struct super_block *sb,
* space tools. The /dev/oprofile/hwsampling fs is
* provided in that case.
*/
- dir = oprofilefs_mkdir(sb, root, "hwsampling");
+ dir = oprofilefs_mkdir(root, "hwsampling");
if (!dir)
return -EINVAL;
- oprofilefs_create_file(sb, dir, "hwsampler",
+ oprofilefs_create_file(dir, "hwsampler",
&hwsampler_fops);
- oprofilefs_create_file(sb, dir, "hw_interval",
+ oprofilefs_create_file(dir, "hw_interval",
&hw_interval_fops);
- oprofilefs_create_ro_ulong(sb, dir, "hw_min_interval",
+ oprofilefs_create_ro_ulong(dir, "hw_min_interval",
&oprofile_min_interval);
- oprofilefs_create_ro_ulong(sb, dir, "hw_max_interval",
+ oprofilefs_create_ro_ulong(dir, "hw_max_interval",
&oprofile_max_interval);
- oprofilefs_create_ulong(sb, dir, "hw_sdbt_blocks",
+ oprofilefs_create_ulong(dir, "hw_sdbt_blocks",
&oprofile_sdbt_blocks);
}
return 0;
@@ -440,7 +439,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops)
switch (id.machine) {
case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break;
case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break;
- case 0x2827: ops->cpu_type = "s390/zEC12"; break;
+ case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break;
default: return -ENODEV;
}
}
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index 086a2e37935..a9e1dc4ae44 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -2,5 +2,5 @@
# Makefile for the s390 PCI subsystem.
#
-obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
+obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
pci_event.o pci_debug.o pci_insn.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index e2956ad39a4..f17a8343e36 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -42,45 +42,26 @@
#define SIC_IRQ_MODE_SINGLE 1
#define ZPCI_NR_DMA_SPACES 1
-#define ZPCI_MSI_VEC_BITS 6
#define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS
/* list of all detected zpci devices */
-LIST_HEAD(zpci_list);
-EXPORT_SYMBOL_GPL(zpci_list);
-DEFINE_MUTEX(zpci_list_lock);
-EXPORT_SYMBOL_GPL(zpci_list_lock);
+static LIST_HEAD(zpci_list);
+static DEFINE_SPINLOCK(zpci_list_lock);
-static struct pci_hp_callback_ops *hotplug_ops;
+static void zpci_enable_irq(struct irq_data *data);
+static void zpci_disable_irq(struct irq_data *data);
-static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
-static DEFINE_SPINLOCK(zpci_domain_lock);
-
-struct callback {
- irq_handler_t handler;
- void *data;
+static struct irq_chip zpci_irq_chip = {
+ .name = "zPCI",
+ .irq_unmask = zpci_enable_irq,
+ .irq_mask = zpci_disable_irq,
};
-struct zdev_irq_map {
- unsigned long aibv; /* AI bit vector */
- int msi_vecs; /* consecutive MSI-vectors used */
- int __unused;
- struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */
- spinlock_t lock; /* protect callbacks against de-reg */
-};
-
-struct intr_bucket {
- /* amap of adapters, one bit per dev, corresponds to one irq nr */
- unsigned long *alloc;
- /* AI summary bit, global page for all devices */
- unsigned long *aisb;
- /* pointer to aibv and callback data in zdev */
- struct zdev_irq_map *imap[ZPCI_NR_DEVICES];
- /* protects the whole bucket struct */
- spinlock_t lock;
-};
+static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
+static DEFINE_SPINLOCK(zpci_domain_lock);
-static struct intr_bucket *bucket;
+static struct airq_iv *zpci_aisb_iv;
+static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
/* Adapter interrupt definitions */
static void zpci_irq_handler(struct airq_struct *airq);
@@ -96,27 +77,8 @@ static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
struct zpci_iomap_entry *zpci_iomap_start;
EXPORT_SYMBOL_GPL(zpci_iomap_start);
-/* highest irq summary bit */
-static int __read_mostly aisb_max;
-
-static struct kmem_cache *zdev_irq_cache;
static struct kmem_cache *zdev_fmb_cache;
-static inline int irq_to_msi_nr(unsigned int irq)
-{
- return irq & ZPCI_MSI_MASK;
-}
-
-static inline int irq_to_dev_nr(unsigned int irq)
-{
- return irq >> ZPCI_MSI_VEC_BITS;
-}
-
-static inline struct zdev_irq_map *get_imap(unsigned int irq)
-{
- return bucket->imap[irq_to_dev_nr(irq)];
-}
-
struct zpci_dev *get_zdev(struct pci_dev *pdev)
{
return (struct zpci_dev *) pdev->sysdata;
@@ -126,22 +88,17 @@ struct zpci_dev *get_zdev_by_fid(u32 fid)
{
struct zpci_dev *tmp, *zdev = NULL;
- mutex_lock(&zpci_list_lock);
+ spin_lock(&zpci_list_lock);
list_for_each_entry(tmp, &zpci_list, entry) {
if (tmp->fid == fid) {
zdev = tmp;
break;
}
}
- mutex_unlock(&zpci_list_lock);
+ spin_unlock(&zpci_list_lock);
return zdev;
}
-bool zpci_fid_present(u32 fid)
-{
- return (get_zdev_by_fid(fid) != NULL) ? true : false;
-}
-
static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus)
{
return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL;
@@ -160,8 +117,7 @@ int pci_proc_domain(struct pci_bus *bus)
EXPORT_SYMBOL_GPL(pci_proc_domain);
/* Modify PCI: Register adapter interruptions */
-static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
- u64 aibv)
+static int zpci_set_airq(struct zpci_dev *zdev)
{
u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
struct zpci_fib *fib;
@@ -172,14 +128,14 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb,
return -ENOMEM;
fib->isc = PCI_ISC;
- fib->noi = zdev->irq_map->msi_vecs;
fib->sum = 1; /* enable summary notifications */
- fib->aibv = aibv;
- fib->aibvo = 0; /* every function has its own page */
- fib->aisb = (u64) bucket->aisb + aisb / 8;
- fib->aisbo = aisb & ZPCI_MSI_MASK;
+ fib->noi = airq_iv_end(zdev->aibv);
+ fib->aibv = (unsigned long) zdev->aibv->vector;
+ fib->aibvo = 0; /* each zdev has its own interrupt vector */
+ fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8;
+ fib->aisbo = zdev->aisb & 63;
- rc = s390pci_mod_fc(req, fib);
+ rc = zpci_mod_fc(req, fib);
pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi);
free_page((unsigned long) fib);
@@ -209,7 +165,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args
fib->iota = args->iota;
fib->fmb_addr = args->fmb_addr;
- rc = s390pci_mod_fc(req, fib);
+ rc = zpci_mod_fc(req, fib);
free_page((unsigned long) fib);
return rc;
}
@@ -234,7 +190,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
}
/* Modify PCI: Unregister adapter interruptions */
-static int zpci_unregister_airq(struct zpci_dev *zdev)
+static int zpci_clear_airq(struct zpci_dev *zdev)
{
struct mod_pci_args args = { 0, 0, 0, 0 };
@@ -283,7 +239,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
u64 data;
int rc;
- rc = s390pci_load(&data, req, offset);
+ rc = zpci_load(&data, req, offset);
if (!rc) {
data = data << ((8 - len) * 8);
data = le64_to_cpu(data);
@@ -301,25 +257,46 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
- rc = s390pci_store(data, req, offset);
+ rc = zpci_store(data, req, offset);
return rc;
}
-void enable_irq(unsigned int irq)
+static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
+{
+ int offset, pos;
+ u32 mask_bits;
+
+ if (msi->msi_attrib.is_msix) {
+ offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+ msi->masked = readl(msi->mask_base + offset);
+ writel(flag, msi->mask_base + offset);
+ } else if (msi->msi_attrib.maskbit) {
+ pos = (long) msi->mask_base;
+ pci_read_config_dword(msi->dev, pos, &mask_bits);
+ mask_bits &= ~(mask);
+ mask_bits |= flag & mask;
+ pci_write_config_dword(msi->dev, pos, mask_bits);
+ } else
+ return 0;
+
+ msi->msi_attrib.maskbit = !!flag;
+ return 1;
+}
+
+static void zpci_enable_irq(struct irq_data *data)
{
- struct msi_desc *msi = irq_get_msi_desc(irq);
+ struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 0);
}
-EXPORT_SYMBOL_GPL(enable_irq);
-void disable_irq(unsigned int irq)
+static void zpci_disable_irq(struct irq_data *data)
{
- struct msi_desc *msi = irq_get_msi_desc(irq);
+ struct msi_desc *msi = irq_get_msi_desc(data->irq);
zpci_msi_set_mask_bits(msi, 1, 1);
}
-EXPORT_SYMBOL_GPL(disable_irq);
void pcibios_fixup_bus(struct pci_bus *bus)
{
@@ -404,152 +381,147 @@ static struct pci_ops pci_root_ops = {
.write = pci_write,
};
-/* store the last handled bit to implement fair scheduling of devices */
-static DEFINE_PER_CPU(unsigned long, next_sbit);
-
static void zpci_irq_handler(struct airq_struct *airq)
{
- unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit);
- int rescan = 0, max = aisb_max;
- struct zdev_irq_map *imap;
+ unsigned long si, ai;
+ struct airq_iv *aibv;
+ int irqs_on = 0;
inc_irq_stat(IRQIO_PCI);
- sbit = start;
-
-scan:
- /* find summary_bit */
- for_each_set_bit_left_cont(sbit, bucket->aisb, max) {
- clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6));
- last = sbit;
+ for (si = 0;;) {
+ /* Scan adapter summary indicator bit vector */
+ si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv));
+ if (si == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ si = 0;
+ continue;
+ }
- /* find vector bit */
- imap = bucket->imap[sbit];
- for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) {
+ /* Scan the adapter interrupt vector for this device. */
+ aibv = zpci_aibv[si];
+ for (ai = 0;;) {
+ ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
+ if (ai == -1UL)
+ break;
inc_irq_stat(IRQIO_MSI);
- clear_bit(63 - mbit, &imap->aibv);
-
- spin_lock(&imap->lock);
- if (imap->cb[mbit].handler)
- imap->cb[mbit].handler(mbit,
- imap->cb[mbit].data);
- spin_unlock(&imap->lock);
+ airq_iv_lock(aibv, ai);
+ generic_handle_irq(airq_iv_get_data(aibv, ai));
+ airq_iv_unlock(aibv, ai);
}
}
-
- if (rescan)
- goto out;
-
- /* scan the skipped bits */
- if (start > 0) {
- sbit = 0;
- max = start;
- start = 0;
- goto scan;
- }
-
- /* enable interrupts again */
- set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
-
- /* check again to not lose initiative */
- rmb();
- max = aisb_max;
- sbit = find_first_bit_left(bucket->aisb, max);
- if (sbit != max) {
- rescan++;
- goto scan;
- }
-out:
- /* store next device bit to scan */
- __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last;
}
-/* msi_vecs - number of requested interrupts, 0 place function to error state */
-static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs)
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
{
struct zpci_dev *zdev = get_zdev(pdev);
- unsigned int aisb, msi_nr;
+ unsigned int hwirq, irq, msi_vecs;
+ unsigned long aisb;
struct msi_desc *msi;
+ struct msi_msg msg;
int rc;
- /* store the number of used MSI vectors */
- zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS);
-
- spin_lock(&bucket->lock);
- aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE);
- /* alloc map exhausted? */
- if (aisb == PAGE_SIZE) {
- spin_unlock(&bucket->lock);
- return -EIO;
- }
- set_bit(aisb, bucket->alloc);
- spin_unlock(&bucket->lock);
+ pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
+ if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
+ return -EINVAL;
+ msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
+ msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
+ /* Allocate adapter summary indicator bit */
+ rc = -EIO;
+ aisb = airq_iv_alloc_bit(zpci_aisb_iv);
+ if (aisb == -1UL)
+ goto out;
zdev->aisb = aisb;
- if (aisb + 1 > aisb_max)
- aisb_max = aisb + 1;
- /* wire up IRQ shortcut pointer */
- bucket->imap[zdev->aisb] = zdev->irq_map;
- pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map);
+ /* Create adapter interrupt vector */
+ rc = -ENOMEM;
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
+ if (!zdev->aibv)
+ goto out_si;
- /* TODO: irq number 0 wont be found if we return less than requested MSIs.
- * ignore it for now and fix in common code.
- */
- msi_nr = aisb << ZPCI_MSI_VEC_BITS;
+ /* Wire up shortcut pointer */
+ zpci_aibv[aisb] = zdev->aibv;
+ /* Request MSI interrupts */
+ hwirq = 0;
list_for_each_entry(msi, &pdev->msi_list, list) {
- rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
- aisb << ZPCI_MSI_VEC_BITS);
+ rc = -EIO;
+ irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
+ if (irq == NO_IRQ)
+ goto out_msi;
+ rc = irq_set_msi_desc(irq, msi);
if (rc)
- return rc;
- msi_nr++;
+ goto out_msi;
+ irq_set_chip_and_handler(irq, &zpci_irq_chip,
+ handle_simple_irq);
+ msg.data = hwirq;
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ msg.address_hi = zdev->msi_addr >> 32;
+ write_msi_msg(irq, &msg);
+ airq_iv_set_data(zdev->aibv, hwirq, irq);
+ hwirq++;
}
- rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv);
- if (rc) {
- clear_bit(aisb, bucket->alloc);
- dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
- return rc;
+ /* Enable adapter interrupts */
+ rc = zpci_set_airq(zdev);
+ if (rc)
+ goto out_msi;
+
+ return (msi_vecs == nvec) ? 0 : msi_vecs;
+
+out_msi:
+ list_for_each_entry(msi, &pdev->msi_list, list) {
+ if (hwirq-- == 0)
+ break;
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
}
- return (zdev->irq_map->msi_vecs == msi_vecs) ?
- 0 : zdev->irq_map->msi_vecs;
+ zpci_aibv[aisb] = NULL;
+ airq_iv_release(zdev->aibv);
+out_si:
+ airq_iv_free_bit(zpci_aisb_iv, aisb);
+out:
+ dev_err(&pdev->dev, "register MSI failed with: %d\n", rc);
+ return rc;
}
-static void zpci_teardown_msi(struct pci_dev *pdev)
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
struct msi_desc *msi;
- int aisb, rc;
+ int rc;
- rc = zpci_unregister_airq(zdev);
+ pr_info("%s: on pdev: %p\n", __func__, pdev);
+
+ /* Disable adapter interrupts */
+ rc = zpci_clear_airq(zdev);
if (rc) {
dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc);
return;
}
- msi = list_first_entry(&pdev->msi_list, struct msi_desc, list);
- aisb = irq_to_dev_nr(msi->irq);
-
- list_for_each_entry(msi, &pdev->msi_list, list)
- zpci_teardown_msi_irq(zdev, msi);
-
- clear_bit(aisb, bucket->alloc);
- if (aisb + 1 == aisb_max)
- aisb_max--;
-}
-
-int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
-{
- pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
- if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
- return -EINVAL;
- return zpci_setup_msi(pdev, nvec);
-}
+ /* Release MSI interrupts */
+ list_for_each_entry(msi, &pdev->msi_list, list) {
+ zpci_msi_set_mask_bits(msi, 1, 1);
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ }
-void arch_teardown_msi_irqs(struct pci_dev *pdev)
-{
- pr_info("%s: on pdev: %p\n", __func__, pdev);
- zpci_teardown_msi(pdev);
+ zpci_aibv[zdev->aisb] = NULL;
+ airq_iv_release(zdev->aibv);
+ airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
}
static void zpci_map_resources(struct zpci_dev *zdev)
@@ -564,8 +536,6 @@ static void zpci_map_resources(struct zpci_dev *zdev)
continue;
pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0);
pdev->resource[i].end = pdev->resource[i].start + len - 1;
- pr_debug("BAR%i: -> start: %Lx end: %Lx\n",
- i, pdev->resource[i].start, pdev->resource[i].end);
}
}
@@ -589,162 +559,47 @@ struct zpci_dev *zpci_alloc_device(void)
/* Alloc memory for our private pci device data */
zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
- if (!zdev)
- return ERR_PTR(-ENOMEM);
-
- /* Alloc aibv & callback space */
- zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
- if (!zdev->irq_map)
- goto error;
- WARN_ON((u64) zdev->irq_map & 0xff);
- return zdev;
-
-error:
- kfree(zdev);
- return ERR_PTR(-ENOMEM);
+ return zdev ? : ERR_PTR(-ENOMEM);
}
void zpci_free_device(struct zpci_dev *zdev)
{
- kmem_cache_free(zdev_irq_cache, zdev->irq_map);
kfree(zdev);
}
-/*
- * Too late for any s390 specific setup, since interrupts must be set up
- * already which requires DMA setup too and the pci scan will access the
- * config space, which only works if the function handle is enabled.
- */
-int pcibios_enable_device(struct pci_dev *pdev, int mask)
-{
- struct resource *res;
- u16 cmd;
- int i;
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
-
- for (i = 0; i < PCI_BAR_COUNT; i++) {
- res = &pdev->resource[i];
-
- if (res->flags & IORESOURCE_IO)
- return -EINVAL;
-
- if (res->flags & IORESOURCE_MEM)
- cmd |= PCI_COMMAND_MEMORY;
- }
- pci_write_config_word(pdev, PCI_COMMAND, cmd);
- return 0;
-}
-
int pcibios_add_platform_entries(struct pci_dev *pdev)
{
return zpci_sysfs_add_device(&pdev->dev);
}
-int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
-{
- int msi_nr = irq_to_msi_nr(irq);
- struct zdev_irq_map *imap;
- struct msi_desc *msi;
-
- msi = irq_get_msi_desc(irq);
- if (!msi)
- return -EIO;
-
- imap = get_imap(irq);
- spin_lock_init(&imap->lock);
-
- pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr);
- imap->cb[msi_nr].handler = handler;
- imap->cb[msi_nr].data = data;
-
- /*
- * The generic MSI code returns with the interrupt disabled on the
- * card, using the MSI mask bits. Firmware doesn't appear to unmask
- * at that level, so we do it here by hand.
- */
- zpci_msi_set_mask_bits(msi, 1, 0);
- return 0;
-}
-
-void zpci_free_irq(unsigned int irq)
-{
- struct zdev_irq_map *imap = get_imap(irq);
- int msi_nr = irq_to_msi_nr(irq);
- unsigned long flags;
-
- pr_debug("%s: for irq: %d\n", __func__, irq);
-
- spin_lock_irqsave(&imap->lock, flags);
- imap->cb[msi_nr].handler = NULL;
- imap->cb[msi_nr].data = NULL;
- spin_unlock_irqrestore(&imap->lock, flags);
-}
-
-int request_irq(unsigned int irq, irq_handler_t handler,
- unsigned long irqflags, const char *devname, void *dev_id)
-{
- pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n",
- __func__, irq, handler, irqflags, devname);
-
- return zpci_request_irq(irq, handler, dev_id);
-}
-EXPORT_SYMBOL_GPL(request_irq);
-
-void free_irq(unsigned int irq, void *dev_id)
-{
- zpci_free_irq(irq);
-}
-EXPORT_SYMBOL_GPL(free_irq);
-
static int __init zpci_irq_init(void)
{
- int cpu, rc;
-
- bucket = kzalloc(sizeof(*bucket), GFP_KERNEL);
- if (!bucket)
- return -ENOMEM;
-
- bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL);
- if (!bucket->aisb) {
- rc = -ENOMEM;
- goto out_aisb;
- }
-
- bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL);
- if (!bucket->alloc) {
- rc = -ENOMEM;
- goto out_alloc;
- }
+ int rc;
rc = register_adapter_interrupt(&zpci_airq);
if (rc)
- goto out_ai;
+ goto out;
/* Set summary to 1 to be called every time for the ISC. */
*zpci_airq.lsi_ptr = 1;
- for_each_online_cpu(cpu)
- per_cpu(next_sbit, cpu) = 0;
+ rc = -ENOMEM;
+ zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
+ if (!zpci_aisb_iv)
+ goto out_airq;
- spin_lock_init(&bucket->lock);
- set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC);
return 0;
-out_ai:
- free_page((unsigned long) bucket->alloc);
-out_alloc:
- free_page((unsigned long) bucket->aisb);
-out_aisb:
- kfree(bucket);
+out_airq:
+ unregister_adapter_interrupt(&zpci_airq);
+out:
return rc;
}
static void zpci_irq_exit(void)
{
- free_page((unsigned long) bucket->alloc);
- free_page((unsigned long) bucket->aisb);
+ airq_iv_release(zpci_aisb_iv);
unregister_adapter_interrupt(&zpci_airq);
- kfree(bucket);
}
static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size,
@@ -801,16 +656,49 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
int pcibios_add_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
+ struct resource *res;
+ int i;
+
+ zdev->pdev = pdev;
+ zpci_map_resources(zdev);
+
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ res = &pdev->resource[i];
+ if (res->parent || !res->flags)
+ continue;
+ pci_claim_resource(pdev, i);
+ }
+
+ return 0;
+}
+
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+ struct zpci_dev *zdev = get_zdev(pdev);
+ struct resource *res;
+ u16 cmd;
+ int i;
zdev->pdev = pdev;
zpci_debug_init_device(zdev);
zpci_fmb_enable_device(zdev);
zpci_map_resources(zdev);
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ for (i = 0; i < PCI_BAR_COUNT; i++) {
+ res = &pdev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ return -EINVAL;
+
+ if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
return 0;
}
-void pcibios_release_device(struct pci_dev *pdev)
+void pcibios_disable_device(struct pci_dev *pdev)
{
struct zpci_dev *zdev = get_zdev(pdev);
@@ -898,6 +786,8 @@ int zpci_enable_device(struct zpci_dev *zdev)
rc = zpci_dma_init_device(zdev);
if (rc)
goto out_dma;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
return 0;
out_dma:
@@ -926,18 +816,16 @@ int zpci_create_device(struct zpci_dev *zdev)
rc = zpci_enable_device(zdev);
if (rc)
goto out_free;
-
- zdev->state = ZPCI_FN_STATE_ONLINE;
}
rc = zpci_scan_bus(zdev);
if (rc)
goto out_disable;
- mutex_lock(&zpci_list_lock);
+ spin_lock(&zpci_list_lock);
list_add_tail(&zdev->entry, &zpci_list);
- if (hotplug_ops)
- hotplug_ops->create_slot(zdev);
- mutex_unlock(&zpci_list_lock);
+ spin_unlock(&zpci_list_lock);
+
+ zpci_init_slot(zdev);
return 0;
@@ -967,15 +855,10 @@ static inline int barsize(u8 size)
static int zpci_mem_init(void)
{
- zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
- L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
- if (!zdev_irq_cache)
- goto error_zdev;
-
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
16, 0, NULL);
if (!zdev_fmb_cache)
- goto error_fmb;
+ goto error_zdev;
/* TODO: use realloc */
zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
@@ -986,8 +869,6 @@ static int zpci_mem_init(void)
error_iomap:
kmem_cache_destroy(zdev_fmb_cache);
-error_fmb:
- kmem_cache_destroy(zdev_irq_cache);
error_zdev:
return -ENOMEM;
}
@@ -995,28 +876,10 @@ error_zdev:
static void zpci_mem_exit(void)
{
kfree(zpci_iomap_start);
- kmem_cache_destroy(zdev_irq_cache);
kmem_cache_destroy(zdev_fmb_cache);
}
-void zpci_register_hp_ops(struct pci_hp_callback_ops *ops)
-{
- mutex_lock(&zpci_list_lock);
- hotplug_ops = ops;
- mutex_unlock(&zpci_list_lock);
-}
-EXPORT_SYMBOL_GPL(zpci_register_hp_ops);
-
-void zpci_deregister_hp_ops(void)
-{
- mutex_lock(&zpci_list_lock);
- hotplug_ops = NULL;
- mutex_unlock(&zpci_list_lock);
-}
-EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops);
-
-unsigned int s390_pci_probe;
-EXPORT_SYMBOL_GPL(s390_pci_probe);
+static unsigned int s390_pci_probe;
char * __init pcibios_setup(char *str)
{
@@ -1044,16 +907,12 @@ static int __init pci_base_init(void)
rc = zpci_debug_init();
if (rc)
- return rc;
+ goto out;
rc = zpci_mem_init();
if (rc)
goto out_mem;
- rc = zpci_msihash_init();
- if (rc)
- goto out_hash;
-
rc = zpci_irq_init();
if (rc)
goto out_irq;
@@ -1062,7 +921,7 @@ static int __init pci_base_init(void)
if (rc)
goto out_dma;
- rc = clp_find_pci_devices();
+ rc = clp_scan_pci_devices();
if (rc)
goto out_find;
@@ -1073,11 +932,15 @@ out_find:
out_dma:
zpci_irq_exit();
out_irq:
- zpci_msihash_exit();
-out_hash:
zpci_mem_exit();
out_mem:
zpci_debug_exit();
+out:
return rc;
}
-subsys_initcall(pci_base_init);
+subsys_initcall_sync(pci_base_init);
+
+void zpci_rescan(void)
+{
+ clp_rescan_pci_devices_simple();
+}
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 2e9539625d9..475563c3d1e 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -36,9 +36,9 @@ static inline u8 clp_instr(void *data)
return cc;
}
-static void *clp_alloc_block(void)
+static void *clp_alloc_block(gfp_t gfp_mask)
{
- return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE));
+ return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
}
static void clp_free_block(void *ptr)
@@ -70,7 +70,7 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
struct clp_req_rsp_query_pci_grp *rrb;
int rc;
- rrb = clp_alloc_block();
+ rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
@@ -113,7 +113,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh)
struct clp_req_rsp_query_pci *rrb;
int rc;
- rrb = clp_alloc_block();
+ rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
@@ -179,9 +179,9 @@ error:
static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
{
struct clp_req_rsp_set_pci *rrb;
- int rc, retries = 1000;
+ int rc, retries = 100;
- rrb = clp_alloc_block();
+ rrb = clp_alloc_block(GFP_KERNEL);
if (!rrb)
return -ENOMEM;
@@ -199,7 +199,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command)
retries--;
if (retries < 0)
break;
- msleep(1);
+ msleep(20);
}
} while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
@@ -245,49 +245,12 @@ int clp_disable_fh(struct zpci_dev *zdev)
return rc;
}
-static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry)
+static int clp_list_pci(struct clp_req_rsp_list_pci *rrb,
+ void (*cb)(struct clp_fh_list_entry *entry))
{
- int present, rc;
-
- if (!entry->vendor_id)
- return;
-
- /* TODO: be a little bit more scalable */
- present = zpci_fid_present(entry->fid);
-
- if (present)
- pr_debug("%s: device %x already present\n", __func__, entry->fid);
-
- /* skip already used functions */
- if (present && entry->config_state)
- return;
-
- /* aev 306: function moved to stand-by state */
- if (present && !entry->config_state) {
- /*
- * The handle is already disabled, that means no iota/irq freeing via
- * the firmware interfaces anymore. Need to free resources manually
- * (DMA memory, debug, sysfs)...
- */
- zpci_stop_device(get_zdev_by_fid(entry->fid));
- return;
- }
-
- rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
- if (rc)
- pr_err("Failed to add fid: 0x%x\n", entry->fid);
-}
-
-int clp_find_pci_devices(void)
-{
- struct clp_req_rsp_list_pci *rrb;
u64 resume_token = 0;
int entries, i, rc;
- rrb = clp_alloc_block();
- if (!rrb)
- return -ENOMEM;
-
do {
memset(rrb, 0, sizeof(*rrb));
rrb->request.hdr.len = sizeof(rrb->request);
@@ -316,12 +279,101 @@ int clp_find_pci_devices(void)
resume_token = rrb->response.resume_token;
for (i = 0; i < entries; i++)
- clp_check_pcifn_entry(&rrb->response.fh_list[i]);
+ cb(&rrb->response.fh_list[i]);
} while (resume_token);
pr_debug("Maximum number of supported PCI functions: %u\n",
rrb->response.max_fn);
out:
+ return rc;
+}
+
+static void __clp_add(struct clp_fh_list_entry *entry)
+{
+ if (!entry->vendor_id)
+ return;
+
+ clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
+}
+
+static void __clp_rescan(struct clp_fh_list_entry *entry)
+{
+ struct zpci_dev *zdev;
+
+ if (!entry->vendor_id)
+ return;
+
+ zdev = get_zdev_by_fid(entry->fid);
+ if (!zdev) {
+ clp_add_pci_device(entry->fid, entry->fh, entry->config_state);
+ return;
+ }
+
+ if (!entry->config_state) {
+ /*
+ * The handle is already disabled, that means no iota/irq freeing via
+ * the firmware interfaces anymore. Need to free resources manually
+ * (DMA memory, debug, sysfs)...
+ */
+ zpci_stop_device(zdev);
+ }
+}
+
+static void __clp_update(struct clp_fh_list_entry *entry)
+{
+ struct zpci_dev *zdev;
+
+ if (!entry->vendor_id)
+ return;
+
+ zdev = get_zdev_by_fid(entry->fid);
+ if (!zdev)
+ return;
+
+ zdev->fh = entry->fh;
+}
+
+int clp_scan_pci_devices(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, __clp_add);
+
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_rescan_pci_devices(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, __clp_rescan);
+
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_rescan_pci_devices_simple(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_NOWAIT);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, __clp_update);
+
clp_free_block(rrb);
return rc;
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index a2343c1f6e0..7e5573acb06 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -10,6 +10,7 @@
#include <linux/export.h>
#include <linux/iommu-helper.h>
#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
#include <linux/pci.h>
#include <asm/pci_dma.h>
@@ -170,8 +171,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
*/
goto no_refresh;
- rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
- nr_pages * PAGE_SIZE);
+ rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
+ nr_pages * PAGE_SIZE);
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -407,7 +408,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
int zpci_dma_init_device(struct zpci_dev *zdev)
{
- unsigned int bitmap_order;
int rc;
spin_lock_init(&zdev->iommu_bitmap_lock);
@@ -421,12 +421,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET;
zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
- bitmap_order = get_order(zdev->iommu_pages / 8);
- pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n",
- zdev->iommu_size, zdev->iommu_pages, bitmap_order);
-
- zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
- bitmap_order);
+ zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
goto out_reg;
@@ -451,8 +446,7 @@ void zpci_dma_exit_device(struct zpci_dev *zdev)
{
zpci_unregister_ioat(zdev, 0);
dma_cleanup_tables(zdev);
- free_pages((unsigned long) zdev->iommu_bitmap,
- get_order(zdev->iommu_pages / 8));
+ vfree(zdev->iommu_bitmap);
zdev->iommu_bitmap = NULL;
zdev->next_bit = 0;
}
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index ec62e3a0dc0..0aecaf95484 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -69,7 +69,7 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf)
clp_add_pci_device(ccdf->fid, ccdf->fh, 0);
break;
case 0x0306:
- clp_find_pci_devices();
+ clp_rescan_pci_devices();
break;
default:
break;
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
index 22eeb9d7ffe..85267c058af 100644
--- a/arch/s390/pci/pci_insn.c
+++ b/arch/s390/pci/pci_insn.c
@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
return cc;
}
-int s390pci_mod_fc(u64 req, struct zpci_fib *fib)
+int zpci_mod_fc(u64 req, struct zpci_fib *fib)
{
u8 cc, status;
@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
return cc;
}
-int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
{
u8 cc, status;
@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range)
}
/* Set Interruption Controls */
-void set_irq_ctrl(u16 ctl, char *unused, u8 isc)
+void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc)
{
asm volatile (
" .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n"
@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int s390pci_load(u64 *data, u64 req, u64 offset)
+int zpci_load(u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset)
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
-EXPORT_SYMBOL_GPL(s390pci_load);
+EXPORT_SYMBOL_GPL(zpci_load);
/* PCI Store */
static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
return cc;
}
-int s390pci_store(u64 data, u64 req, u64 offset)
+int zpci_store(u64 data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset)
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
-EXPORT_SYMBOL_GPL(s390pci_store);
+EXPORT_SYMBOL_GPL(zpci_store);
/* PCI Store Block */
static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
return cc;
}
-int s390pci_store_block(const u64 *data, u64 req, u64 offset)
+int zpci_store_block(const u64 *data, u64 req, u64 offset)
{
u8 status;
int cc;
@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset)
__func__, cc, status, req, offset);
return (cc > 0) ? -EIO : cc;
}
-EXPORT_SYMBOL_GPL(s390pci_store_block);
+EXPORT_SYMBOL_GPL(zpci_store_block);
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c
deleted file mode 100644
index b097aed05a9..00000000000
--- a/arch/s390/pci/pci_msi.c
+++ /dev/null
@@ -1,142 +0,0 @@
-/*
- * Copyright IBM Corp. 2012
- *
- * Author(s):
- * Jan Glauber <jang@linux.vnet.ibm.com>
- */
-
-#define COMPONENT "zPCI"
-#define pr_fmt(fmt) COMPONENT ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/rculist.h>
-#include <linux/hash.h>
-#include <linux/pci.h>
-#include <linux/msi.h>
-#include <asm/hw_irq.h>
-
-/* mapping of irq numbers to msi_desc */
-static struct hlist_head *msi_hash;
-static const unsigned int msi_hash_bits = 8;
-#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
-#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
-
-static DEFINE_SPINLOCK(msi_map_lock);
-
-struct msi_desc *__irq_get_msi_desc(unsigned int irq)
-{
- struct msi_map *map;
-
- hlist_for_each_entry_rcu(map,
- &msi_hash[msi_hashfn(irq)], msi_chain)
- if (map->irq == irq)
- return map->msi;
- return NULL;
-}
-
-int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
- if (msi->msi_attrib.is_msix) {
- int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL;
- msi->masked = readl(msi->mask_base + offset);
- writel(flag, msi->mask_base + offset);
- } else {
- if (msi->msi_attrib.maskbit) {
- int pos;
- u32 mask_bits;
-
- pos = (long) msi->mask_base;
- pci_read_config_dword(msi->dev, pos, &mask_bits);
- mask_bits &= ~(mask);
- mask_bits |= flag & mask;
- pci_write_config_dword(msi->dev, pos, mask_bits);
- } else {
- return 0;
- }
- }
-
- msi->msi_attrib.maskbit = !!flag;
- return 1;
-}
-
-int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
- unsigned int nr, int offset)
-{
- struct msi_map *map;
- struct msi_msg msg;
- int rc;
-
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (map == NULL)
- return -ENOMEM;
-
- map->irq = nr;
- map->msi = msi;
- zdev->msi_map[nr & ZPCI_MSI_MASK] = map;
- INIT_HLIST_NODE(&map->msi_chain);
-
- pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
- __func__, nr, msi_hashfn(nr));
- hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
-
- spin_lock(&msi_map_lock);
- rc = irq_set_msi_desc(nr, msi);
- if (rc) {
- spin_unlock(&msi_map_lock);
- hlist_del_rcu(&map->msi_chain);
- kfree(map);
- zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL;
- return rc;
- }
- spin_unlock(&msi_map_lock);
-
- msg.data = nr - offset;
- msg.address_lo = zdev->msi_addr & 0xffffffff;
- msg.address_hi = zdev->msi_addr >> 32;
- write_msi_msg(nr, &msg);
- return 0;
-}
-
-void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
-{
- int irq = msi->irq & ZPCI_MSI_MASK;
- struct msi_map *map;
-
- msi->msg.address_lo = 0;
- msi->msg.address_hi = 0;
- msi->msg.data = 0;
- msi->irq = 0;
- zpci_msi_set_mask_bits(msi, 1, 1);
-
- spin_lock(&msi_map_lock);
- map = zdev->msi_map[irq];
- hlist_del_rcu(&map->msi_chain);
- kfree(map);
- zdev->msi_map[irq] = NULL;
- spin_unlock(&msi_map_lock);
-}
-
-/*
- * The msi hash table has 256 entries which is good for 4..20
- * devices (a typical device allocates 10 + CPUs MSI's). Maybe make
- * the hash table size adjustable later.
- */
-int __init zpci_msihash_init(void)
-{
- unsigned int i;
-
- msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
- if (!msi_hash)
- return -ENOMEM;
-
- for (i = 0; i < MSI_HASH_BUCKETS; i++)
- INIT_HLIST_HEAD(&msi_hash[i]);
- return 0;
-}
-
-void __init zpci_msihash_exit(void)
-{
- kfree(msi_hash);
-}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index e99a2557f18..cf8a12ff733 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -48,11 +48,38 @@ static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL);
+static void recover_callback(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = get_zdev(pdev);
+ int ret;
+
+ pci_stop_and_remove_bus_device(pdev);
+ ret = zpci_disable_device(zdev);
+ if (ret)
+ return;
+
+ ret = zpci_enable_device(zdev);
+ if (ret)
+ return;
+
+ pci_rescan_bus(zdev->bus);
+}
+
+static ssize_t store_recover(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int rc = device_schedule_callback(dev, recover_callback);
+ return rc ? rc : count;
+}
+static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover);
+
static struct device_attribute *zpci_dev_attrs[] = {
&dev_attr_function_id,
&dev_attr_function_handle,
&dev_attr_pchid,
&dev_attr_pfgid,
+ &dev_attr_recover,
NULL,
};
diff --git a/arch/score/Kconfig b/arch/score/Kconfig
index c8def8bc902..5fc237581ca 100644
--- a/arch/score/Kconfig
+++ b/arch/score/Kconfig
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT
source "init/Kconfig"
+source "kernel/Kconfig.freezer"
+
config MMU
def_bool y
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 1020dd85431..1018ed3a3ca 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -643,9 +643,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
diff --git a/arch/sh/boards/board-espt.c b/arch/sh/boards/board-espt.c
index 4d94dff9015..7291e2f11a4 100644
--- a/arch/sh/boards/board-espt.c
+++ b/arch/sh/boards/board-espt.c
@@ -80,7 +80,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 0,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/boards/board-sh7757lcr.c b/arch/sh/boards/board-sh7757lcr.c
index 4f114d1cd01..25c5a932f9f 100644
--- a/arch/sh/boards/board-sh7757lcr.c
+++ b/arch/sh/boards/board-sh7757lcr.c
@@ -77,7 +77,6 @@ static struct resource sh_eth0_resources[] = {
static struct sh_eth_plat_data sh7757_eth0_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -106,7 +105,6 @@ static struct resource sh_eth1_resources[] = {
static struct sh_eth_plat_data sh7757_eth1_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.set_mdio_gate = sh7757_eth_set_mdio_gate,
};
@@ -151,7 +149,6 @@ static struct resource sh_eth_giga0_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga0_pdata = {
.phy = 18,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
@@ -186,7 +183,6 @@ static struct resource sh_eth_giga1_resources[] = {
static struct sh_eth_plat_data sh7757_eth_giga1_pdata = {
.phy = 19,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.set_mdio_gate = sh7757_eth_giga_set_mdio_gate,
.phy_interface = PHY_INTERFACE_MODE_RGMII_ID,
};
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 61fade0ffa9..a4f630f04ea 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -159,7 +159,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8700 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_FAST_SH4,
.phy_interface = PHY_INTERFACE_MODE_MII,
.ether_link_active_low = 1
};
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index b70180ef3e2..21e4230659a 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -365,7 +365,7 @@ static struct platform_device keysc_device = {
static struct resource sh_eth_resources[] = {
[0] = {
.start = SH_ETH_ADDR,
- .end = SH_ETH_ADDR + 0x1FC,
+ .end = SH_ETH_ADDR + 0x1FC - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -377,6 +377,7 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh_eth_plat = {
.phy = 0x1f, /* SMSC LAN8187 */
.edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
};
static struct platform_device sh_eth_device = {
diff --git a/arch/sh/boards/mach-sh7763rdp/setup.c b/arch/sh/boards/mach-sh7763rdp/setup.c
index 50ba481fa24..2c8fb04685d 100644
--- a/arch/sh/boards/mach-sh7763rdp/setup.c
+++ b/arch/sh/boards/mach-sh7763rdp/setup.c
@@ -88,7 +88,6 @@ static struct resource sh_eth_resources[] = {
static struct sh_eth_plat_data sh7763_eth_pdata = {
.phy = 1,
.edmac_endian = EDMAC_LITTLE_ENDIAN,
- .register_type = SH_ETH_REG_GIGABIT,
.phy_interface = PHY_INTERFACE_MODE_MII,
};
diff --git a/arch/sh/configs/sh03_defconfig b/arch/sh/configs/sh03_defconfig
index 2051821724c..0cf4097b71e 100644
--- a/arch/sh/configs/sh03_defconfig
+++ b/arch/sh/configs/sh03_defconfig
@@ -22,7 +22,7 @@ CONFIG_PREEMPT=y
CONFIG_CMDLINE_OVERWRITE=y
CONFIG_CMDLINE="console=ttySC1,115200 mem=64M root=/dev/nfs"
CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI=m
+CONFIG_HOTPLUG_PCI=y
CONFIG_BINFMT_MISC=y
CONFIG_NET=y
CONFIG_PACKET=y
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c
index 102f5d58b03..60ed3e1c4b7 100644
--- a/arch/sh/drivers/pci/pci.c
+++ b/arch/sh/drivers/pci/pci.c
@@ -69,7 +69,6 @@ static void pcibios_scanbus(struct pci_channel *hose)
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
- pci_enable_bridges(bus);
} else {
pci_free_resource_list(&resources);
}
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index e61d43d9f68..362192ed12f 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/sh/kernel/cpu/sh2/setup-sh7619.c b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
index bb11e192517..4df4d4ffe39 100644
--- a/arch/sh/kernel/cpu/sh2/setup-sh7619.c
+++ b/arch/sh/kernel/cpu/sh2/setup-sh7619.c
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
+#include <linux/sh_eth.h>
#include <linux/sh_timer.h>
#include <linux/io.h>
@@ -110,10 +111,16 @@ static struct platform_device scif2_device = {
},
};
+static struct sh_eth_plat_data eth_platform_data = {
+ .phy = 1,
+ .edmac_endian = EDMAC_LITTLE_ENDIAN,
+ .phy_interface = PHY_INTERFACE_MODE_MII,
+};
+
static struct resource eth_resources[] = {
[0] = {
.start = 0xfb000000,
- .end = 0xfb0001c8,
+ .end = 0xfb0001c7,
.flags = IORESOURCE_MEM,
},
[1] = {
@@ -127,7 +134,7 @@ static struct platform_device eth_device = {
.name = "sh7619-ether",
.id = -1,
.dev = {
- .platform_data = (void *)1,
+ .platform_data = &eth_platform_data,
},
.num_resources = ARRAY_SIZE(eth_resources),
.resource = eth_resources,
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c
index d3062259211..e3abfd4277e 100644
--- a/arch/sh/kernel/cpu/shmobile/cpuidle.c
+++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c
@@ -91,13 +91,11 @@ static struct cpuidle_driver cpuidle_driver = {
int __init sh_mobile_setup_cpuidle(void)
{
- int ret;
-
if (sh_mobile_sleep_supported & SUSP_SH_SF)
cpuidle_driver.states[1].disabled = false;
if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
cpuidle_driver.states[2].disabled = false;
- return cpuidle_register(&cpuidle_driver);
+ return cpuidle_register(&cpuidle_driver, NULL);
}
diff --git a/arch/sparc/include/asm/switch_to_64.h b/arch/sparc/include/asm/switch_to_64.h
index c7de3323819..8d284801f23 100644
--- a/arch/sparc/include/asm/switch_to_64.h
+++ b/arch/sparc/include/asm/switch_to_64.h
@@ -48,8 +48,8 @@ do { save_and_clear_fpu(); \
"wrpr %%g0, 14, %%pil\n\t" \
"brz,pt %%o7, switch_to_pc\n\t" \
" mov %%g7, %0\n\t" \
- "sethi %%hi(ret_from_syscall), %%g1\n\t" \
- "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \
+ "sethi %%hi(ret_from_fork), %%g1\n\t" \
+ "jmpl %%g1 + %%lo(ret_from_fork), %%g0\n\t" \
" nop\n\t" \
".globl switch_to_pc\n\t" \
"switch_to_pc:\n\t" \
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c
index e4de74c2c9b..cb5d272d658 100644
--- a/arch/sparc/kernel/cpumap.c
+++ b/arch/sparc/kernel/cpumap.c
@@ -327,6 +327,7 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index)
case SUN4V_CHIP_NIAGARA3:
case SUN4V_CHIP_NIAGARA4:
case SUN4V_CHIP_NIAGARA5:
+ case SUN4V_CHIP_SPARC64X:
rover_inc_table = niagara_iterate_method;
break;
default:
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index e2a03004508..33c02b15f47 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -839,7 +839,7 @@ sys_sigreturn:
nop
call syscall_trace
- nop
+ mov 1, %o1
1:
/* We don't want to muck with user registers like a
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index c8759550799..53c0a82e603 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -42,7 +42,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
extern unsigned int switch_to_pc;
- extern unsigned int ret_from_syscall;
+ extern unsigned int ret_from_fork;
struct reg_window *win;
unsigned long pc, cwp;
int i;
@@ -66,7 +66,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
gdb_regs[i] = 0;
if (t->new_child)
- pc = (unsigned long) &ret_from_syscall;
+ pc = (unsigned long) &ret_from_fork;
else
pc = (unsigned long) &switch_to_pc;
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 0746e5e32b3..fde5a419cf2 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -25,11 +25,10 @@ kvmap_itlb:
*/
kvmap_itlb_4v:
-kvmap_itlb_nonlinear:
/* Catch kernel NULL pointer calls. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
- bleu,pn %xcc, kvmap_dtlb_longpath
+ blu,pn %xcc, kvmap_itlb_longpath
nop
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 7ff45e4ba68..773c1f2983c 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
+#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/smp.h>
@@ -116,6 +117,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
preempt_enable();
}
+EXPORT_SYMBOL_GPL(flush_ptrace_access);
static int get_from_target(struct task_struct *target, unsigned long uaddr,
void *kbuf, int len)
@@ -1087,7 +1089,7 @@ asmlinkage void syscall_trace_leave(struct pt_regs *regs)
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->u_regs[UREG_G1]);
+ trace_sys_exit(regs, regs->u_regs[UREG_I0]);
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 13785547e43..3fdb455e331 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -499,12 +499,14 @@ static void __init init_sparc64_elf_hwcap(void)
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= HWCAP_SPARC_N2;
}
@@ -530,13 +532,15 @@ static void __init init_sparc64_elf_hwcap(void)
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
- sun4v_chip_type == SUN4V_CHIP_NIAGARA5)
+ sun4v_chip_type == SUN4V_CHIP_NIAGARA5 ||
+ sun4v_chip_type == SUN4V_CHIP_SPARC64X)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 22a1098961f..d950197a17e 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -98,8 +98,8 @@ sys_clone:
ba,pt %xcc, sparc_do_fork
add %sp, PTREGS_OFF, %o2
- .globl ret_from_syscall
-ret_from_syscall:
+ .globl ret_from_fork
+ret_from_fork:
/* Clear current_thread_info()->new_child. */
stb %g0, [%g6 + TI_NEW_CHILD]
call schedule_tail
@@ -152,7 +152,7 @@ linux_syscall_trace32:
srl %i4, 0, %o4
srl %i1, 0, %o1
srl %i2, 0, %o2
- ba,pt %xcc, 2f
+ ba,pt %xcc, 5f
srl %i3, 0, %o3
linux_syscall_trace:
@@ -182,13 +182,13 @@ linux_sparc_syscall32:
srl %i1, 0, %o1 ! IEU0 Group
ldx [%g6 + TI_FLAGS], %l0 ! Load
- srl %i5, 0, %o5 ! IEU1
+ srl %i3, 0, %o3 ! IEU0
srl %i2, 0, %o2 ! IEU0 Group
andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
bne,pn %icc, linux_syscall_trace32 ! CTI
mov %i0, %l5 ! IEU1
- call %l7 ! CTI Group brk forced
- srl %i3, 0, %o3 ! IEU0
+5: call %l7 ! CTI Group brk forced
+ srl %i5, 0, %o5 ! IEU1
ba,a,pt %xcc, 3f
/* Linux native system calls enter here... */
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index e0b1e13a073..ad4bde3bb61 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -129,7 +129,6 @@ startup_continue:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
mov 15, %l7
BRANCH_IF_ANY_CHEETAH(g1,g5,2f)
@@ -222,7 +221,6 @@ niagara_lock_tlb:
clr %l5
sethi %hi(num_kernel_image_mappings), %l6
lduw [%l6 + %lo(num_kernel_image_mappings)], %l6
- add %l6, 1, %l6
1:
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 0c4e35e522f..323335b9cd2 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -98,15 +98,6 @@ EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(__clear_user);
-/* RW semaphores */
-EXPORT_SYMBOL(__down_read);
-EXPORT_SYMBOL(__down_read_trylock);
-EXPORT_SYMBOL(__down_write);
-EXPORT_SYMBOL(__down_write_trylock);
-EXPORT_SYMBOL(__up_read);
-EXPORT_SYMBOL(__up_write);
-EXPORT_SYMBOL(__downgrade_write);
-
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
EXPORT_SYMBOL(atomic_add_ret);
diff --git a/arch/tile/gxio/iorpc_mpipe.c b/arch/tile/gxio/iorpc_mpipe.c
index 31b87bf8c02..4f8f3d619c4 100644
--- a/arch/tile/gxio/iorpc_mpipe.c
+++ b/arch/tile/gxio/iorpc_mpipe.c
@@ -387,6 +387,27 @@ int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac)
EXPORT_SYMBOL(gxio_mpipe_link_close_aux);
+struct link_set_attr_aux_param {
+ int mac;
+ uint32_t attr;
+ int64_t val;
+};
+
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+ uint32_t attr, int64_t val)
+{
+ struct link_set_attr_aux_param temp;
+ struct link_set_attr_aux_param *params = &temp;
+
+ params->mac = mac;
+ params->attr = attr;
+ params->val = val;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_OP_LINK_SET_ATTR_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_link_set_attr_aux);
struct get_timestamp_aux_param {
uint64_t sec;
@@ -454,6 +475,51 @@ int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_aux);
+struct adjust_timestamp_freq_param {
+ int32_t ppb;
+};
+
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+ int32_t ppb)
+{
+ struct adjust_timestamp_freq_param temp;
+ struct adjust_timestamp_freq_param *params = &temp;
+
+ params->ppb = ppb;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_adjust_timestamp_freq);
+
+struct config_edma_ring_blks_param {
+ unsigned int ering;
+ unsigned int max_blks;
+ unsigned int min_snf_blks;
+ unsigned int db;
+};
+
+int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t * context,
+ unsigned int ering, unsigned int max_blks,
+ unsigned int min_snf_blks, unsigned int db)
+{
+ struct config_edma_ring_blks_param temp;
+ struct config_edma_ring_blks_param *params = &temp;
+
+ params->ering = ering;
+ params->max_blks = max_blks;
+ params->min_snf_blks = min_snf_blks;
+ params->db = db;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params),
+ GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_config_edma_ring_blks);
+
struct arm_pollfd_param {
union iorpc_pollfd pollfd;
};
diff --git a/arch/tile/gxio/iorpc_mpipe_info.c b/arch/tile/gxio/iorpc_mpipe_info.c
index d0254aa60cb..64883aabeb9 100644
--- a/arch/tile/gxio/iorpc_mpipe_info.c
+++ b/arch/tile/gxio/iorpc_mpipe_info.c
@@ -16,6 +16,24 @@
#include "gxio/iorpc_mpipe_info.h"
+struct instance_aux_param {
+ _gxio_mpipe_link_name_t name;
+};
+
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+ _gxio_mpipe_link_name_t name)
+{
+ struct instance_aux_param temp;
+ struct instance_aux_param *params = &temp;
+
+ params->name = name;
+
+ return hv_dev_pwrite(context->fd, 0, (HV_VirtAddr) params,
+ sizeof(*params), GXIO_MPIPE_INFO_OP_INSTANCE_AUX);
+}
+
+EXPORT_SYMBOL(gxio_mpipe_info_instance_aux);
+
struct enumerate_aux_param {
_gxio_mpipe_link_name_t name;
_gxio_mpipe_link_mac_t mac;
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index e71c63390ac..5301a9ffbae 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -36,8 +36,14 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
int fd;
int i;
+ if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
+ return -EINVAL;
+
snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
fd = hv_dev_open((HV_VirtAddr) file, 0);
+
+ context->fd = fd;
+
if (fd < 0) {
if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
return fd;
@@ -45,8 +51,6 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
return -ENODEV;
}
- context->fd = fd;
-
/* Map in the MMIO space. */
context->mmio_cfg_base = (void __force *)
iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
@@ -64,12 +68,15 @@ int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
for (i = 0; i < 8; i++)
context->__stacks.stacks[i] = 255;
+ context->instance = mpipe_index;
+
return 0;
fast_failed:
iounmap((void __force __iomem *)(context->mmio_cfg_base));
cfg_failed:
hv_dev_close(context->fd);
+ context->fd = -1;
return -ENODEV;
}
@@ -383,7 +390,7 @@ EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context,
- unsigned int edma_ring_id,
+ unsigned int ering,
unsigned int channel,
void *mem, unsigned int mem_size,
unsigned int mem_flags)
@@ -394,7 +401,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
/* Offset used to read number of completed commands. */
MPIPE_EDMA_POST_REGION_ADDR_t offset;
- int result = gxio_mpipe_init_edma_ring(context, edma_ring_id, channel,
+ int result = gxio_mpipe_init_edma_ring(context, ering, channel,
mem, mem_size, mem_flags);
if (result < 0)
return result;
@@ -405,7 +412,7 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
offset.region =
MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
- offset.ring = edma_ring_id;
+ offset.ring = ering;
__gxio_dma_queue_init(&equeue->dma_queue,
context->mmio_fast_base + offset.word,
@@ -413,6 +420,9 @@ int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
equeue->edescs = mem;
equeue->mask_num_entries = num_entries - 1;
equeue->log2_num_entries = __builtin_ctz(num_entries);
+ equeue->context = context;
+ equeue->ering = ering;
+ equeue->channel = channel;
return 0;
}
@@ -493,6 +503,20 @@ static gxio_mpipe_context_t *_gxio_get_link_context(void)
return contextp;
}
+int gxio_mpipe_link_instance(const char *link_name)
+{
+ _gxio_mpipe_link_name_t name;
+ gxio_mpipe_context_t *context = _gxio_get_link_context();
+
+ if (!context)
+ return GXIO_ERR_NO_DEVICE;
+
+ strncpy(name.name, link_name, sizeof(name.name));
+ name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+
+ return gxio_mpipe_info_instance_aux(context, name);
+}
+
int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
{
int rv;
@@ -543,3 +567,12 @@ int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
}
EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
+
+int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+ int64_t val)
+{
+ return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
+ val);
+}
+
+EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h
index d5e86c9f74f..d15c0d8d550 100644
--- a/arch/tile/include/asm/topology.h
+++ b/arch/tile/include/asm/topology.h
@@ -89,9 +89,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
#define topology_core_id(cpu) (cpu)
#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
-
-/* indicates that pointers to the topology struct cpumask maps are valid */
-#define arch_provides_topology_pointers yes
#endif
#endif /* _ASM_TILE_TOPOLOGY_H */
diff --git a/arch/tile/include/gxio/iorpc_mpipe.h b/arch/tile/include/gxio/iorpc_mpipe.h
index 9d50fce1b1a..fdd07f88cfd 100644
--- a/arch/tile/include/gxio/iorpc_mpipe.h
+++ b/arch/tile/include/gxio/iorpc_mpipe.h
@@ -44,10 +44,13 @@
#define GXIO_MPIPE_OP_REGISTER_CLIENT_MEMORY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1210)
#define GXIO_MPIPE_OP_LINK_OPEN_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1211)
#define GXIO_MPIPE_OP_LINK_CLOSE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1212)
+#define GXIO_MPIPE_OP_LINK_SET_ATTR_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1213)
-#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121e)
-#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x121f)
-#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1220)
+#define GXIO_MPIPE_OP_GET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121e)
+#define GXIO_MPIPE_OP_SET_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x121f)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1220)
+#define GXIO_MPIPE_OP_CONFIG_EDMA_RING_BLKS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1221)
+#define GXIO_MPIPE_OP_ADJUST_TIMESTAMP_FREQ IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1222)
#define GXIO_MPIPE_OP_ARM_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9000)
#define GXIO_MPIPE_OP_CLOSE_POLLFD IORPC_OPCODE(IORPC_FORMAT_KERNEL_POLLFD, 0x9001)
#define GXIO_MPIPE_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
@@ -114,6 +117,8 @@ int gxio_mpipe_link_open_aux(gxio_mpipe_context_t * context,
int gxio_mpipe_link_close_aux(gxio_mpipe_context_t * context, int mac);
+int gxio_mpipe_link_set_attr_aux(gxio_mpipe_context_t * context, int mac,
+ uint32_t attr, int64_t val);
int gxio_mpipe_get_timestamp_aux(gxio_mpipe_context_t * context, uint64_t * sec,
uint64_t * nsec, uint64_t * cycles);
@@ -124,6 +129,9 @@ int gxio_mpipe_set_timestamp_aux(gxio_mpipe_context_t * context, uint64_t sec,
int gxio_mpipe_adjust_timestamp_aux(gxio_mpipe_context_t * context,
int64_t nsec);
+int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t * context,
+ int32_t ppb);
+
int gxio_mpipe_arm_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
int gxio_mpipe_close_pollfd(gxio_mpipe_context_t * context, int pollfd_cookie);
diff --git a/arch/tile/include/gxio/iorpc_mpipe_info.h b/arch/tile/include/gxio/iorpc_mpipe_info.h
index 0bcf3f71ce8..476c5e5ca22 100644
--- a/arch/tile/include/gxio/iorpc_mpipe_info.h
+++ b/arch/tile/include/gxio/iorpc_mpipe_info.h
@@ -27,11 +27,15 @@
#include <asm/pgtable.h>
+#define GXIO_MPIPE_INFO_OP_INSTANCE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1250)
#define GXIO_MPIPE_INFO_OP_ENUMERATE_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1251)
#define GXIO_MPIPE_INFO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
#define GXIO_MPIPE_INFO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
+int gxio_mpipe_info_instance_aux(gxio_mpipe_info_context_t * context,
+ _gxio_mpipe_link_name_t name);
+
int gxio_mpipe_info_enumerate_aux(gxio_mpipe_info_context_t * context,
unsigned int idx,
_gxio_mpipe_link_name_t * name,
diff --git a/arch/tile/include/gxio/mpipe.h b/arch/tile/include/gxio/mpipe.h
index b74f470ed11..e37cf4f0cff 100644
--- a/arch/tile/include/gxio/mpipe.h
+++ b/arch/tile/include/gxio/mpipe.h
@@ -220,6 +220,13 @@ typedef MPIPE_PDESC_t gxio_mpipe_idesc_t;
*/
typedef MPIPE_EDMA_DESC_t gxio_mpipe_edesc_t;
+/*
+ * Max # of mpipe instances. 2 currently.
+ */
+#define GXIO_MPIPE_INSTANCE_MAX HV_MPIPE_INSTANCE_MAX
+
+#define NR_MPIPE_MAX GXIO_MPIPE_INSTANCE_MAX
+
/* Get the "va" field from an "idesc".
*
* This is the address at which the ingress hardware copied the first
@@ -311,6 +318,9 @@ typedef struct {
/* File descriptor for calling up to Linux (and thus the HV). */
int fd;
+ /* Corresponding mpipe instance #. */
+ int instance;
+
/* The VA at which configuration registers are mapped. */
char *mmio_cfg_base;
@@ -810,7 +820,7 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
/* Initialize an eDMA ring, using the given memory and size.
*
* @param context An initialized mPIPE context.
- * @param ring The eDMA ring index.
+ * @param ering The eDMA ring index.
* @param channel The channel to use. This must be one of the channels
* associated with the context's set of open links.
* @param mem A physically contiguous region of memory to be filled
@@ -823,10 +833,37 @@ extern int gxio_mpipe_alloc_edma_rings(gxio_mpipe_context_t *context,
* ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/
extern int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
- unsigned int ring, unsigned int channel,
+ unsigned int ering, unsigned int channel,
void *mem, size_t mem_size,
unsigned int mem_flags);
+/* Set the "max_blks", "min_snf_blks", and "db" fields of
+ * ::MPIPE_EDMA_RG_INIT_DAT_THRESH_t for a given edma ring.
+ *
+ * The global pool of dynamic blocks will be automatically adjusted.
+ *
+ * This function should not be called after any egress has been done
+ * on the edma ring.
+ *
+ * Most applications should just use gxio_mpipe_equeue_set_snf_size().
+ *
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param max_blks The number of blocks to dedicate to the ring
+ * (normally min_snf_blks + 1). Must be greater than min_snf_blocks.
+ * @param min_snf_blks The number of blocks which must be stored
+ * prior to starting to send the packet (normally 12).
+ * @param db Whether to allow use of dynamic blocks by the ring
+ * (normally 1).
+ *
+ * @return 0 on success, negative on error.
+ */
+extern int gxio_mpipe_config_edma_ring_blks(gxio_mpipe_context_t *context,
+ unsigned int ering,
+ unsigned int max_blks,
+ unsigned int min_snf_blks,
+ unsigned int db);
+
/*****************************************************************
* Classifier Program *
******************************************************************/
@@ -1288,15 +1325,39 @@ typedef struct {
/* The log2() of the number of entries. */
unsigned long log2_num_entries;
+ /* The context. */
+ gxio_mpipe_context_t *context;
+
+ /* The ering. */
+ unsigned int ering;
+
+ /* The channel. */
+ unsigned int channel;
+
} gxio_mpipe_equeue_t;
/* Initialize an "equeue".
*
- * Takes the equeue plus the same args as gxio_mpipe_init_edma_ring().
+ * This function uses gxio_mpipe_init_edma_ring() to initialize the
+ * underlying edma_ring using the provided arguments.
+ *
+ * @param equeue An egress queue to be initialized.
+ * @param context An initialized mPIPE context.
+ * @param ering The eDMA ring index.
+ * @param channel The channel to use. This must be one of the channels
+ * associated with the context's set of open links.
+ * @param mem A physically contiguous region of memory to be filled
+ * with a ring of ::gxio_mpipe_edesc_t structures.
+ * @param mem_size Number of bytes in the ring. Must be 512, 2048,
+ * 8192 or 65536, times 16 (i.e. sizeof(gxio_mpipe_edesc_t)).
+ * @param mem_flags ::gxio_mpipe_mem_flags_e memory flags.
+ *
+ * @return 0 on success, ::GXIO_MPIPE_ERR_BAD_EDMA_RING or
+ * ::GXIO_ERR_INVAL_MEMORY_SIZE on failure.
*/
extern int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
gxio_mpipe_context_t *context,
- unsigned int edma_ring_id,
+ unsigned int ering,
unsigned int channel,
void *mem, unsigned int mem_size,
unsigned int mem_flags);
@@ -1494,6 +1555,37 @@ static inline int gxio_mpipe_equeue_is_complete(gxio_mpipe_equeue_t *equeue,
completion_slot, update);
}
+/* Set the snf (store and forward) size for an equeue.
+ *
+ * The snf size for an equeue defaults to 1536, and encodes the size
+ * of the largest packet for which egress is guaranteed to avoid
+ * transmission underruns and/or corrupt checksums under heavy load.
+ *
+ * The snf size affects a global resource pool which cannot support,
+ * for example, all 24 equeues each requesting an snf size of 8K.
+ *
+ * To ensure that jumbo packets can be egressed properly, the snf size
+ * should be set to the size of the largest possible packet, which
+ * will usually be limited by the size of the app's largest buffer.
+ *
+ * This is a convenience wrapper around
+ * gxio_mpipe_config_edma_ring_blks().
+ *
+ * This function should not be called after any egress has been done
+ * on the equeue.
+ *
+ * @param equeue An egress queue initialized via gxio_mpipe_equeue_init().
+ * @param size The snf size, in bytes.
+ * @return Zero on success, negative error otherwise.
+ */
+static inline int gxio_mpipe_equeue_set_snf_size(gxio_mpipe_equeue_t *equeue,
+ size_t size)
+{
+ int blks = (size + 127) / 128;
+ return gxio_mpipe_config_edma_ring_blks(equeue->context, equeue->ering,
+ blks + 1, blks, 1);
+}
+
/*****************************************************************
* Link Management *
******************************************************************/
@@ -1634,6 +1726,24 @@ typedef struct {
uint8_t mac;
} gxio_mpipe_link_t;
+/* Translate a link name to the instance number of the mPIPE shim which is
+ * connected to that link. This call does not verify whether the link is
+ * currently available, and does not reserve any link resources;
+ * gxio_mpipe_link_open() must be called to perform those functions.
+ *
+ * Typically applications will call this function to translate a link name
+ * to an mPIPE instance number; call gxio_mpipe_init(), passing it that
+ * instance number, to initialize the mPIPE shim; and then call
+ * gxio_mpipe_link_open(), passing it the same link name plus the mPIPE
+ * context, to configure the link.
+ *
+ * @param link_name Name of the link; see @ref gxio_mpipe_link_names.
+ * @return The mPIPE instance number which is associated with the named
+ * link, or a negative error code (::GXIO_ERR_NO_DEVICE) if the link does
+ * not exist.
+ */
+extern int gxio_mpipe_link_instance(const char *link_name);
+
/* Retrieve one of this system's legal link names, and its MAC address.
*
* @param index Link name index. If a system supports N legal link names,
@@ -1697,6 +1807,17 @@ static inline int gxio_mpipe_link_channel(gxio_mpipe_link_t *link)
return link->channel;
}
+/* Set a link attribute.
+ *
+ * @param link A properly initialized link state object.
+ * @param attr An attribute from the set of @ref gxio_mpipe_link_attrs.
+ * @param val New value of the attribute.
+ * @return 0 if the attribute was successfully set, or a negative error
+ * code.
+ */
+extern int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
+ int64_t val);
+
///////////////////////////////////////////////////////////////////
// Timestamp //
///////////////////////////////////////////////////////////////////
@@ -1733,4 +1854,18 @@ extern int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
extern int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context,
int64_t delta);
+/** Adjust the mPIPE timestamp clock frequency.
+ *
+ * @param context An initialized mPIPE context.
+ * @param ppb A 32-bit signed PPB (Parts Per Billion) value to adjust.
+ * The absolute value of ppb must be less than or equal to 1000000000.
+ * Values less than about 30000 will generally cause a GXIO_ERR_INVAL
+ * return due to the granularity of the hardware that converts reference
+ * clock cycles into seconds and nanoseconds.
+ * @return If the call was successful, zero; otherwise, a negative error
+ * code.
+ */
+extern int gxio_mpipe_adjust_timestamp_freq(gxio_mpipe_context_t* context,
+ int32_t ppb);
+
#endif /* !_GXIO_MPIPE_H_ */
diff --git a/arch/tile/include/hv/drv_mpipe_intf.h b/arch/tile/include/hv/drv_mpipe_intf.h
index 6cdae3bf046..c97e416dd96 100644
--- a/arch/tile/include/hv/drv_mpipe_intf.h
+++ b/arch/tile/include/hv/drv_mpipe_intf.h
@@ -23,6 +23,9 @@
#include <arch/mpipe_constants.h>
+/** Number of mPIPE instances supported */
+#define HV_MPIPE_INSTANCE_MAX (2)
+
/** Number of buffer stacks (32). */
#define HV_MPIPE_NUM_BUFFER_STACKS \
(MPIPE_MMIO_INIT_DAT_GX36_1__BUFFER_STACK_MASK_WIDTH)
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c
index 11425633b2d..6640e7bbeaa 100644
--- a/arch/tile/kernel/pci_gx.c
+++ b/arch/tile/kernel/pci_gx.c
@@ -508,13 +508,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
rc_dev_cap.word);
/* Configure PCI Express MPS setting. */
- list_for_each_entry(child, &root_bus->children, node) {
- struct pci_dev *self = child->self;
- if (!self)
- continue;
-
- pcie_bus_configure_settings(child, self->pcie_mpss);
- }
+ list_for_each_entry(child, &root_bus->children, node)
+ pcie_bus_configure_settings(child);
/*
* Set the mac_config register in trio based on the MPS/MRS of the link.
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h
index 4febacd1a8a..29b0301c18a 100644
--- a/arch/um/include/asm/tlb.h
+++ b/arch/um/include/asm/tlb.h
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb)
}
static inline void
-tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush)
+tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = full_mm_flush;
+ tlb->start = start;
+ tlb->end = end;
+ tlb->fullmm = !(start | (end+1));
init_tlb_gather(tlb);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index b32ebf92b0c..5c0ed72c02a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -16,6 +16,7 @@ config X86_64
def_bool y
depends on 64BIT
select X86_DEV_DMA_OPS
+ select ARCH_USE_CMPXCHG_LOCKREF
### Arch settings
config X86
@@ -81,7 +82,6 @@ config X86
select HAVE_USER_RETURN_NOTIFIER
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
select HAVE_ARCH_JUMP_LABEL
- select HAVE_TEXT_POKE_SMP
select HAVE_GENERIC_HARDIRQS
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select SPARSE_IRQ
@@ -632,6 +632,7 @@ config PARAVIRT_DEBUG
config PARAVIRT_SPINLOCKS
bool "Paravirtualization layer for spinlocks"
depends on PARAVIRT && SMP
+ select UNINLINE_SPIN_UNLOCK
---help---
Paravirtualized spinlocks allow a pvops backend to replace the
spinlock implementation with something virtualization-friendly
@@ -656,6 +657,15 @@ config KVM_GUEST
underlying device model, the host provides the guest with
timing infrastructure such as time of day, and system time
+config KVM_DEBUG_FS
+ bool "Enable debug information for KVM Guests in debugfs"
+ depends on KVM_GUEST && DEBUG_FS
+ default n
+ ---help---
+ This option enables collection of various statistics for KVM guest.
+ Statistics are displayed in debugfs filesystem. Enabling this option
+ may incur significant overhead.
+
source "arch/x86/lguest/Kconfig"
config PARAVIRT_TIME_ACCOUNTING
@@ -1344,8 +1354,12 @@ config ARCH_SELECT_MEMORY_MODEL
depends on ARCH_SPARSEMEM_ENABLE
config ARCH_MEMORY_PROBE
- def_bool y
+ bool "Enable sysfs memory/probe interface"
depends on X86_64 && MEMORY_HOTPLUG
+ help
+ This option enables a sysfs memory/probe interface for testing.
+ See Documentation/memory-hotplug.txt for more information.
+ If you are unsure how to answer this question, answer N.
config ARCH_PROC_KCORE_TEXT
def_bool y
@@ -1627,9 +1641,9 @@ config KEXEC
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
- initially work for you. It may help to enable device hotplugging
- support. As of this writing the exact hardware interface is
- strongly in flux, so no good recommendation can be made.
+ initially work for you. As of this writing the exact hardware
+ interface is strongly in flux, so no good recommendation can be
+ made.
config CRASH_DUMP
bool "kernel crash dumps"
@@ -1716,9 +1730,10 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE
config PHYSICAL_ALIGN
- hex "Alignment value to which kernel should be aligned" if X86_32
+ hex "Alignment value to which kernel should be aligned"
default "0x1000000"
- range 0x2000 0x1000000
+ range 0x2000 0x1000000 if X86_32
+ range 0x200000 0x1000000 if X86_64
---help---
This value puts the alignment restrictions on physical address
where kernel is loaded and run from. Kernel is compiled for an
@@ -1736,6 +1751,9 @@ config PHYSICAL_ALIGN
end result is that kernel runs from a physical address meeting
above alignment restrictions.
+ On 32-bit this value must be a multiple of 0x2000. On 64-bit
+ this value must be a multiple of 0x200000.
+
Don't change this unless you know what you are doing.
config HOTPLUG_CPU
@@ -2270,6 +2288,32 @@ config RAPIDIO
source "drivers/rapidio/Kconfig"
+config X86_SYSFB
+ bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
+ help
+ Firmwares often provide initial graphics framebuffers so the BIOS,
+ bootloader or kernel can show basic video-output during boot for
+ user-guidance and debugging. Historically, x86 used the VESA BIOS
+ Extensions and EFI-framebuffers for this, which are mostly limited
+ to x86.
+ This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
+ framebuffers so the new generic system-framebuffer drivers can be
+ used on x86. If the framebuffer is not compatible with the generic
+ modes, it is adverticed as fallback platform framebuffer so legacy
+ drivers like efifb, vesafb and uvesafb can pick it up.
+ If this option is not selected, all system framebuffers are always
+ marked as fallback platform framebuffers as usual.
+
+ Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
+ not be able to pick up generic system framebuffers if this option
+ is selected. You are highly encouraged to enable simplefb as
+ replacement if you select this option. simplefb can correctly deal
+ with generic system framebuffers. But you should still keep vesafb
+ and others enabled as fallback if a system framebuffer is
+ incompatible with simplefb.
+
+ If unsure, say Y.
+
endmenu
@@ -2332,10 +2376,6 @@ config HAVE_ATOMIC_IOMAP
def_bool y
depends on X86_32
-config HAVE_TEXT_POKE_SMP
- bool
- select STOP_MACHINE if SMP
-
config X86_DEV_DMA_OPS
bool
depends on X86_64 || STA2X11
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 07639c656fc..41250fb3398 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -16,6 +16,10 @@ endif
# e.g.: obj-y += foo_$(BITS).o
export BITS
+ifdef CONFIG_X86_NEED_RELOCS
+ LDFLAGS_vmlinux := --emit-relocs
+endif
+
ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
@@ -25,10 +29,6 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_AFLAGS += $(biarch)
KBUILD_CFLAGS += $(biarch)
- ifdef CONFIG_RELOCATABLE
- LDFLAGS_vmlinux := --emit-relocs
- endif
-
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 5b7531966b8..ef72baeff48 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -355,6 +355,7 @@ int strncmp(const char *cs, const char *ct, size_t count);
size_t strnlen(const char *s, size_t maxlen);
unsigned int atou(const char *s);
unsigned long long simple_strtoull(const char *cp, char **endp, unsigned int base);
+size_t strlen(const char *s);
/* tty.c */
void puts(const char *);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index d606463aa6d..b7388a425f0 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr)
unsigned long nr_pages;
nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE;
- efi_call_phys2(sys_table->boottime->free_pages, addr, size);
+ efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages);
}
static void find_bits(unsigned long mask, u8 *pos, u8 *size)
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index 1e3184f6072..5d6f6891b18 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -181,8 +181,9 @@ relocated:
/*
* Do the decompression, and jump to the new kernel..
*/
- leal z_extract_offset_negative(%ebx), %ebp
/* push arguments for decompress_kernel: */
+ pushl $z_output_len /* decompressed length */
+ leal z_extract_offset_negative(%ebx), %ebp
pushl %ebp /* output address */
pushl $z_input_len /* input_len */
leal input_data(%ebx), %eax
@@ -191,33 +192,7 @@ relocated:
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call decompress_kernel
- addl $20, %esp
-
-#if CONFIG_RELOCATABLE
-/*
- * Find the address of the relocations.
- */
- leal z_output_len(%ebp), %edi
-
-/*
- * Calculate the delta between where vmlinux was compiled to run
- * and where it was actually loaded.
- */
- movl %ebp, %ebx
- subl $LOAD_PHYSICAL_ADDR, %ebx
- jz 2f /* Nothing to be done if loaded at compiled addr. */
-/*
- * Process relocations.
- */
-
-1: subl $4, %edi
- movl (%edi), %ecx
- testl %ecx, %ecx
- jz 2f
- addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
- jmp 1b
-2:
-#endif
+ addl $24, %esp
/*
* Jump to the decompressed kernel.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 06e71c2c16b..c337422b575 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -338,6 +338,7 @@ relocated:
leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
+ movq $z_output_len, %r9 /* decompressed length */
call decompress_kernel
popq %rsi
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 0319c88290a..434f077d2c4 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -271,6 +271,79 @@ static void error(char *x)
asm("hlt");
}
+#if CONFIG_X86_NEED_RELOCS
+static void handle_relocations(void *output, unsigned long output_len)
+{
+ int *reloc;
+ unsigned long delta, map, ptr;
+ unsigned long min_addr = (unsigned long)output;
+ unsigned long max_addr = min_addr + output_len;
+
+ /*
+ * Calculate the delta between where vmlinux was linked to load
+ * and where it was actually loaded.
+ */
+ delta = min_addr - LOAD_PHYSICAL_ADDR;
+ if (!delta) {
+ debug_putstr("No relocation needed... ");
+ return;
+ }
+ debug_putstr("Performing relocations... ");
+
+ /*
+ * The kernel contains a table of relocation addresses. Those
+ * addresses have the final load address of the kernel in virtual
+ * memory. We are currently working in the self map. So we need to
+ * create an adjustment for kernel memory addresses to the self map.
+ * This will involve subtracting out the base address of the kernel.
+ */
+ map = delta - __START_KERNEL_map;
+
+ /*
+ * Process relocations: 32 bit relocations first then 64 bit after.
+ * Two sets of binary relocations are added to the end of the kernel
+ * before compression. Each relocation table entry is the kernel
+ * address of the location which needs to be updated stored as a
+ * 32-bit value which is sign extended to 64 bits.
+ *
+ * Format is:
+ *
+ * kernel bits...
+ * 0 - zero terminator for 64 bit relocations
+ * 64 bit relocation repeated
+ * 0 - zero terminator for 32 bit relocations
+ * 32 bit relocation repeated
+ *
+ * So we work backwards from the end of the decompressed image.
+ */
+ for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
+ int extended = *reloc;
+ extended += map;
+
+ ptr = (unsigned long)extended;
+ if (ptr < min_addr || ptr > max_addr)
+ error("32-bit relocation outside of kernel!\n");
+
+ *(uint32_t *)ptr += delta;
+ }
+#ifdef CONFIG_X86_64
+ for (reloc--; *reloc; reloc--) {
+ long extended = *reloc;
+ extended += map;
+
+ ptr = (unsigned long)extended;
+ if (ptr < min_addr || ptr > max_addr)
+ error("64-bit relocation outside of kernel!\n");
+
+ *(uint64_t *)ptr += delta;
+ }
+#endif
+}
+#else
+static inline void handle_relocations(void *output, unsigned long output_len)
+{ }
+#endif
+
static void parse_elf(void *output)
{
#ifdef CONFIG_X86_64
@@ -325,7 +398,8 @@ static void parse_elf(void *output)
asmlinkage void decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
- unsigned char *output)
+ unsigned char *output,
+ unsigned long output_len)
{
real_mode = rmode;
@@ -365,6 +439,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output);
+ handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
return;
}
diff --git a/arch/x86/boot/printf.c b/arch/x86/boot/printf.c
index cdac91ca55d..565083c16e5 100644
--- a/arch/x86/boot/printf.c
+++ b/arch/x86/boot/printf.c
@@ -55,7 +55,7 @@ static char *number(char *str, long num, int base, int size, int precision,
locase = (type & SMALL);
if (type & LEFT)
type &= ~ZEROPAD;
- if (base < 2 || base > 36)
+ if (base < 2 || base > 16)
return NULL;
c = (type & ZEROPAD) ? '0' : ' ';
sign = 0;
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index bccfca68430..665a730307f 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -457,7 +457,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
if (ksig->ka.sa.sa_flags & SA_RESTORER)
restorer = ksig->ka.sa.sa_restorer;
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 474dc1b59f7..4299eb05023 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -452,7 +452,7 @@ ia32_badsys:
CFI_ENDPROC
- .macro PTREGSCALL label, func, arg
+ .macro PTREGSCALL label, func
ALIGN
GLOBAL(\label)
leaq \func(%rip),%rax
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 2dfac58f3b1..b1977bad543 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -86,6 +86,7 @@ extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
+extern int acpi_disable_cmcff;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;
@@ -168,6 +169,7 @@ static inline void arch_acpi_set_pdc_bits(u32 *buf)
#define acpi_lapic 0
#define acpi_ioapic 0
+#define acpi_disable_cmcff 0
static inline void acpi_noirq_set(void) { }
static inline void acpi_disable_pci(void) { }
static inline void disable_acpi(void) { }
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index 58ed6d96a6a..0a3f9c9f98d 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -5,6 +5,7 @@
#include <linux/stddef.h>
#include <linux/stringify.h>
#include <asm/asm.h>
+#include <asm/ptrace.h>
/*
* Alternative inline assembly for SMP.
@@ -220,20 +221,11 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
* no thread can be preempted in the instructions being modified (no iret to an
* invalid instruction possible) or if the instructions are changed from a
* consistent state to another consistent state atomically.
- * More care must be taken when modifying code in the SMP case because of
- * Intel's errata. text_poke_smp() takes care that errata, but still
- * doesn't support NMI/MCE handler code modifying.
* On the local CPU you need to be protected again NMI or MCE handlers seeing an
* inconsistent instruction while you patch.
*/
-struct text_poke_param {
- void *addr;
- const void *opcode;
- size_t len;
-};
-
extern void *text_poke(void *addr, const void *opcode, size_t len);
-extern void *text_poke_smp(void *addr, const void *opcode, size_t len);
-extern void text_poke_smp_batch(struct text_poke_param *params, int n);
+extern int poke_int3_handler(struct pt_regs *regs);
+extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
#endif /* _ASM_X86_ALTERNATIVE_H */
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index f8119b582c3..1d2091a226b 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -715,4 +715,6 @@ static inline void exiting_ack_irq(void)
ack_APIC_irq();
}
+extern void ioapic_zap_locks(void);
+
#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 1c2d247f65c..4582e8e1cd1 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -3,21 +3,25 @@
#ifdef __ASSEMBLY__
# define __ASM_FORM(x) x
+# define __ASM_FORM_RAW(x) x
# define __ASM_FORM_COMMA(x) x,
#else
# define __ASM_FORM(x) " " #x " "
+# define __ASM_FORM_RAW(x) #x
# define __ASM_FORM_COMMA(x) " " #x ","
#endif
#ifdef CONFIG_X86_32
# define __ASM_SEL(a,b) __ASM_FORM(a)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a)
#else
# define __ASM_SEL(a,b) __ASM_FORM(b)
+# define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b)
#endif
#define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \
inst##q##__VA_ARGS__)
-#define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg)
+#define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg)
#define _ASM_PTR __ASM_SEL(.long, .quad)
#define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8)
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index 6dfd0195bb5..41639ce8fd6 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,14 @@
#include <linux/compiler.h>
#include <asm/alternative.h>
+#if BITS_PER_LONG == 32
+# define _BITOPS_LONG_SHIFT 5
+#elif BITS_PER_LONG == 64
+# define _BITOPS_LONG_SHIFT 6
+#else
+# error "Unexpected BITS_PER_LONG"
+#endif
+
#define BIT_64(n) (U64_C(1) << (n))
/*
@@ -59,7 +67,7 @@
* restricted to acting on a single-word quantity.
*/
static __always_inline void
-set_bit(unsigned int nr, volatile unsigned long *addr)
+set_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "orb %1,%0"
@@ -81,7 +89,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __set_bit(int nr, volatile unsigned long *addr)
+static inline void __set_bit(long nr, volatile unsigned long *addr)
{
asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
@@ -97,7 +105,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr)
* in order to ensure changes are visible on other processors.
*/
static __always_inline void
-clear_bit(int nr, volatile unsigned long *addr)
+clear_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "andb %1,%0"
@@ -118,13 +126,13 @@ clear_bit(int nr, volatile unsigned long *addr)
* clear_bit() is atomic and implies release semantics before the memory
* operation. It can be used for an unlock.
*/
-static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
clear_bit(nr, addr);
}
-static inline void __clear_bit(int nr, volatile unsigned long *addr)
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
}
@@ -141,7 +149,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr)
* No memory barrier is required here, because x86 cannot reorder stores past
* older loads. Same principle as spin_unlock.
*/
-static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
{
barrier();
__clear_bit(nr, addr);
@@ -159,7 +167,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr)
* If it's called on the same region of memory simultaneously, the effect
* may be that only one operation succeeds.
*/
-static inline void __change_bit(int nr, volatile unsigned long *addr)
+static inline void __change_bit(long nr, volatile unsigned long *addr)
{
asm volatile("btc %1,%0" : ADDR : "Ir" (nr));
}
@@ -173,7 +181,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(long nr, volatile unsigned long *addr)
{
if (IS_IMMEDIATE(nr)) {
asm volatile(LOCK_PREFIX "xorb %1,%0"
@@ -194,7 +202,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -212,7 +220,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
* This is the same as test_and_set_bit on x86.
*/
static __always_inline int
-test_and_set_bit_lock(int nr, volatile unsigned long *addr)
+test_and_set_bit_lock(long nr, volatile unsigned long *addr)
{
return test_and_set_bit(nr, addr);
}
@@ -226,7 +234,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr)
* If two examples of this operation race, one can appear to succeed
* but actually fail. You must protect multiple accesses with a lock.
*/
-static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -245,7 +253,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -272,7 +280,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
* accessed from a hypervisor on the same CPU if running in a VM: don't change
* this without also updating arch/x86/kernel/kvm.c
*/
-static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -284,7 +292,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
}
/* WARNING: non atomic and it can be reordered! */
-static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int __test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -304,7 +312,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
@@ -315,13 +323,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return oldbit;
}
-static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr)
+static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr)
{
- return ((1UL << (nr % BITS_PER_LONG)) &
- (addr[nr / BITS_PER_LONG])) != 0;
+ return ((1UL << (nr & (BITS_PER_LONG-1))) &
+ (addr[nr >> _BITOPS_LONG_SHIFT])) != 0;
}
-static inline int variable_test_bit(int nr, volatile const unsigned long *addr)
+static inline int variable_test_bit(long nr, volatile const unsigned long *addr)
{
int oldbit;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 653668d140f..4a8cb8d7cbd 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->olpc_ofw_header, 0,
+ memset(&boot_params->ext_ramdisk_image, 0,
(char *)&boot_params->efi_info -
- (char *)&boot_params->olpc_ofw_header);
+ (char *)&boot_params->ext_ramdisk_image);
memset(&boot_params->kbd_status, 0,
(char *)&boot_params->hdr -
(char *)&boot_params->kbd_status);
diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
index 46fc474fd81..f50de695173 100644
--- a/arch/x86/include/asm/checksum_32.h
+++ b/arch/x86/include/asm/checksum_32.h
@@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- return csum_partial_copy_generic((__force void *)src, dst,
- len, sum, err_ptr, NULL);
+ stac();
+ ret = csum_partial_copy_generic((__force void *)src, dst,
+ len, sum, err_ptr, NULL);
+ clac();
+
+ return ret;
}
/*
@@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
int len, __wsum sum,
int *err_ptr)
{
+ __wsum ret;
+
might_sleep();
- if (access_ok(VERIFY_WRITE, dst, len))
- return csum_partial_copy_generic(src, (__force void *)dst,
- len, sum, NULL, err_ptr);
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+ stac();
+ ret = csum_partial_copy_generic(src, (__force void *)dst,
+ len, sum, NULL, err_ptr);
+ clac();
+ return ret;
+ }
if (len)
*err_ptr = -EFAULT;
diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h
index 9bfdc41629e..e6fd8a026c7 100644
--- a/arch/x86/include/asm/checksum_64.h
+++ b/arch/x86/include/asm/checksum_64.h
@@ -133,7 +133,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
/* Do not call this directly. Use the wrappers below */
-extern __wsum csum_partial_copy_generic(const void *src, const void *dst,
+extern __visible __wsum csum_partial_copy_generic(const void *src, const void *dst,
int len, __wsum sum,
int *src_err_ptr, int *dst_err_ptr);
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 47538a61c91..d3f5c63078d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -366,9 +366,10 @@ extern bool __static_cpu_has_safe(u16 bit);
*/
static __always_inline __pure bool __static_cpu_has(u16 bit)
{
-#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+#ifdef CC_HAVE_ASM_GOTO
#ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
+
/*
* Catch too early usage of this before alternatives
* have run.
@@ -384,6 +385,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".previous\n"
/* skipping size check since replacement size = 0 */
: : "i" (X86_FEATURE_ALWAYS) : : t_warn);
+
#endif
asm goto("1: jmp %l[t_no]\n"
@@ -406,7 +408,9 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
warn_pre_alternatives();
return false;
#endif
-#else /* GCC_VERSION >= 40500 */
+
+#else /* CC_HAVE_ASM_GOTO */
+
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $0,%0\n"
@@ -427,7 +431,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
".previous\n"
: "=qm" (flag) : "i" (bit));
return flag;
-#endif
+
+#endif /* CC_HAVE_ASM_GOTO */
}
#define static_cpu_has(bit) \
@@ -441,7 +446,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
{
-#if __GNUC__ > 4 || __GNUC_MINOR__ >= 5
+#ifdef CC_HAVE_ASM_GOTO
/*
* We need to spell the jumps to the compiler because, depending on the offset,
* the replacement jump can be bigger than the original jump, and this we cannot
@@ -475,7 +480,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
return false;
t_dynamic:
return __static_cpu_has_safe(bit);
-#else /* GCC_VERSION >= 40500 */
+#else
u8 flag;
/* Open-coded due to __stringify() in ALTERNATIVE() */
asm volatile("1: movb $2,%0\n"
@@ -511,7 +516,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
: "=qm" (flag)
: "i" (bit), "i" (X86_FEATURE_ALWAYS));
return (flag == 2 ? __static_cpu_has_safe(bit) : flag);
-#endif
+#endif /* CC_HAVE_ASM_GOTO */
}
#define static_cpu_has_safe(bit) \
diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
index cccd07fa5e3..779c2efe2e9 100644
--- a/arch/x86/include/asm/e820.h
+++ b/arch/x86/include/asm/e820.h
@@ -29,7 +29,7 @@ extern void e820_setup_gap(void);
extern int e820_search_gap(unsigned long *gapstart, unsigned long *gapsize,
unsigned long start_addr, unsigned long long end_addr);
struct setup_data;
-extern void parse_e820_ext(struct setup_data *data);
+extern void parse_e820_ext(u64 phys_addr, u32 data_len);
#if defined(CONFIG_X86_64) || \
(defined(CONFIG_X86_32) && defined(CONFIG_HIBERNATION))
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index e4ac559c4a2..92b3bae08b7 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -26,56 +26,56 @@
#include <asm/sections.h>
/* Interrupt handlers registered during init_IRQ */
-extern void apic_timer_interrupt(void);
-extern void x86_platform_ipi(void);
-extern void kvm_posted_intr_ipi(void);
-extern void error_interrupt(void);
-extern void irq_work_interrupt(void);
-
-extern void spurious_interrupt(void);
-extern void thermal_interrupt(void);
-extern void reschedule_interrupt(void);
-
-extern void invalidate_interrupt(void);
-extern void invalidate_interrupt0(void);
-extern void invalidate_interrupt1(void);
-extern void invalidate_interrupt2(void);
-extern void invalidate_interrupt3(void);
-extern void invalidate_interrupt4(void);
-extern void invalidate_interrupt5(void);
-extern void invalidate_interrupt6(void);
-extern void invalidate_interrupt7(void);
-extern void invalidate_interrupt8(void);
-extern void invalidate_interrupt9(void);
-extern void invalidate_interrupt10(void);
-extern void invalidate_interrupt11(void);
-extern void invalidate_interrupt12(void);
-extern void invalidate_interrupt13(void);
-extern void invalidate_interrupt14(void);
-extern void invalidate_interrupt15(void);
-extern void invalidate_interrupt16(void);
-extern void invalidate_interrupt17(void);
-extern void invalidate_interrupt18(void);
-extern void invalidate_interrupt19(void);
-extern void invalidate_interrupt20(void);
-extern void invalidate_interrupt21(void);
-extern void invalidate_interrupt22(void);
-extern void invalidate_interrupt23(void);
-extern void invalidate_interrupt24(void);
-extern void invalidate_interrupt25(void);
-extern void invalidate_interrupt26(void);
-extern void invalidate_interrupt27(void);
-extern void invalidate_interrupt28(void);
-extern void invalidate_interrupt29(void);
-extern void invalidate_interrupt30(void);
-extern void invalidate_interrupt31(void);
-
-extern void irq_move_cleanup_interrupt(void);
-extern void reboot_interrupt(void);
-extern void threshold_interrupt(void);
-
-extern void call_function_interrupt(void);
-extern void call_function_single_interrupt(void);
+extern asmlinkage void apic_timer_interrupt(void);
+extern asmlinkage void x86_platform_ipi(void);
+extern asmlinkage void kvm_posted_intr_ipi(void);
+extern asmlinkage void error_interrupt(void);
+extern asmlinkage void irq_work_interrupt(void);
+
+extern asmlinkage void spurious_interrupt(void);
+extern asmlinkage void thermal_interrupt(void);
+extern asmlinkage void reschedule_interrupt(void);
+
+extern asmlinkage void invalidate_interrupt(void);
+extern asmlinkage void invalidate_interrupt0(void);
+extern asmlinkage void invalidate_interrupt1(void);
+extern asmlinkage void invalidate_interrupt2(void);
+extern asmlinkage void invalidate_interrupt3(void);
+extern asmlinkage void invalidate_interrupt4(void);
+extern asmlinkage void invalidate_interrupt5(void);
+extern asmlinkage void invalidate_interrupt6(void);
+extern asmlinkage void invalidate_interrupt7(void);
+extern asmlinkage void invalidate_interrupt8(void);
+extern asmlinkage void invalidate_interrupt9(void);
+extern asmlinkage void invalidate_interrupt10(void);
+extern asmlinkage void invalidate_interrupt11(void);
+extern asmlinkage void invalidate_interrupt12(void);
+extern asmlinkage void invalidate_interrupt13(void);
+extern asmlinkage void invalidate_interrupt14(void);
+extern asmlinkage void invalidate_interrupt15(void);
+extern asmlinkage void invalidate_interrupt16(void);
+extern asmlinkage void invalidate_interrupt17(void);
+extern asmlinkage void invalidate_interrupt18(void);
+extern asmlinkage void invalidate_interrupt19(void);
+extern asmlinkage void invalidate_interrupt20(void);
+extern asmlinkage void invalidate_interrupt21(void);
+extern asmlinkage void invalidate_interrupt22(void);
+extern asmlinkage void invalidate_interrupt23(void);
+extern asmlinkage void invalidate_interrupt24(void);
+extern asmlinkage void invalidate_interrupt25(void);
+extern asmlinkage void invalidate_interrupt26(void);
+extern asmlinkage void invalidate_interrupt27(void);
+extern asmlinkage void invalidate_interrupt28(void);
+extern asmlinkage void invalidate_interrupt29(void);
+extern asmlinkage void invalidate_interrupt30(void);
+extern asmlinkage void invalidate_interrupt31(void);
+
+extern asmlinkage void irq_move_cleanup_interrupt(void);
+extern asmlinkage void reboot_interrupt(void);
+extern asmlinkage void threshold_interrupt(void);
+
+extern asmlinkage void call_function_interrupt(void);
+extern asmlinkage void call_function_single_interrupt(void);
#ifdef CONFIG_TRACING
/* Interrupt handlers registered during init_IRQ */
@@ -172,22 +172,18 @@ extern atomic_t irq_mis_count;
extern void eisa_set_level_irq(unsigned int irq);
/* SMP */
-extern void smp_apic_timer_interrupt(struct pt_regs *);
-extern void smp_spurious_interrupt(struct pt_regs *);
-extern void smp_x86_platform_ipi(struct pt_regs *);
-extern void smp_error_interrupt(struct pt_regs *);
+extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
+extern __visible void smp_spurious_interrupt(struct pt_regs *);
+extern __visible void smp_x86_platform_ipi(struct pt_regs *);
+extern __visible void smp_error_interrupt(struct pt_regs *);
#ifdef CONFIG_X86_IO_APIC
extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
#endif
#ifdef CONFIG_SMP
-extern void smp_reschedule_interrupt(struct pt_regs *);
-extern void smp_call_function_interrupt(struct pt_regs *);
-extern void smp_call_function_single_interrupt(struct pt_regs *);
-#ifdef CONFIG_X86_32
-extern void smp_invalidate_interrupt(struct pt_regs *);
-#else
-extern asmlinkage void smp_invalidate_interrupt(struct pt_regs *);
-#endif
+extern __visible void smp_reschedule_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_interrupt(struct pt_regs *);
+extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
+extern __visible void smp_invalidate_interrupt(struct pt_regs *);
#endif
extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index 2d4b5e6107c..e42f758a0fb 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -33,7 +33,7 @@ struct hypervisor_x86 {
const char *name;
/* Detection routine */
- bool (*detect)(void);
+ uint32_t (*detect)(void);
/* Adjust CPU feature bits (run once per CPU) */
void (*set_cpu_features)(struct cpuinfo_x86 *);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 57873beb329..0ea10f27d61 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -33,7 +33,7 @@ extern void (*x86_platform_ipi_callback)(void);
extern void native_init_IRQ(void);
extern bool handle_irq(unsigned irq, struct pt_regs *regs);
-extern unsigned int do_IRQ(struct pt_regs *regs);
+extern __visible unsigned int do_IRQ(struct pt_regs *regs);
/* Interrupt vector management */
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
index 5a6d2873f80..9454c167629 100644
--- a/arch/x86/include/asm/kprobes.h
+++ b/arch/x86/include/asm/kprobes.h
@@ -49,10 +49,10 @@ typedef u8 kprobe_opcode_t;
#define flush_insn_slot(p) do { } while (0)
/* optinsn template addresses */
-extern kprobe_opcode_t optprobe_template_entry;
-extern kprobe_opcode_t optprobe_template_val;
-extern kprobe_opcode_t optprobe_template_call;
-extern kprobe_opcode_t optprobe_template_end;
+extern __visible kprobe_opcode_t optprobe_template_entry;
+extern __visible kprobe_opcode_t optprobe_template_val;
+extern __visible kprobe_opcode_t optprobe_template_call;
+extern __visible kprobe_opcode_t optprobe_template_end;
#define MAX_OPTIMIZED_LENGTH (MAX_INSN_SIZE + RELATIVE_ADDR_SIZE)
#define MAX_OPTINSN_SIZE \
(((unsigned long)&optprobe_template_end - \
@@ -62,7 +62,7 @@ extern kprobe_opcode_t optprobe_template_end;
extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p);
-void kretprobe_trampoline(void);
+asmlinkage void kretprobe_trampoline(void);
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f87f7fcefa0..c76ff74a98f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -286,6 +286,7 @@ struct kvm_mmu {
u64 *pae_root;
u64 *lm_root;
u64 rsvd_bits_mask[2][4];
+ u64 bad_mt_xwr;
/*
* Bitmap: bit set = last pte in walk
@@ -323,6 +324,7 @@ struct kvm_pmu {
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
+ u64 reserved_bits;
u8 version;
struct kvm_pmc gp_counters[INTEL_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
@@ -511,6 +513,14 @@ struct kvm_vcpu_arch {
* instruction.
*/
bool write_fault_to_shadow_pgtable;
+
+ /* set at EPT violation at this point */
+ unsigned long exit_qualification;
+
+ /* pv related host specific info */
+ struct {
+ bool pv_unhalted;
+ } pv;
};
struct kvm_lpage_info {
@@ -802,8 +812,8 @@ extern u32 kvm_min_guest_tsc_khz;
extern u32 kvm_max_guest_tsc_khz;
enum emulation_result {
- EMULATE_DONE, /* no further processing */
- EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
+ EMULATE_DONE, /* no further processing */
+ EMULATE_USER_EXIT, /* kvm_run ready for userspace exit */
EMULATE_FAIL, /* can't emulate this instruction */
};
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 695399f2d5e..1df11590975 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -85,26 +85,20 @@ static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
return ret;
}
-static inline bool kvm_para_available(void)
+static inline uint32_t kvm_cpuid_base(void)
{
- unsigned int eax, ebx, ecx, edx;
- char signature[13];
-
if (boot_cpu_data.cpuid_level < 0)
- return false; /* So we don't blow up on old processors */
+ return 0; /* So we don't blow up on old processors */
- if (cpu_has_hypervisor) {
- cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
- memcpy(signature + 0, &ebx, 4);
- memcpy(signature + 4, &ecx, 4);
- memcpy(signature + 8, &edx, 4);
- signature[12] = 0;
+ if (cpu_has_hypervisor)
+ return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
- if (strcmp(signature, "KVMKVMKVM") == 0)
- return true;
- }
+ return 0;
+}
- return false;
+static inline bool kvm_para_available(void)
+{
+ return kvm_cpuid_base() != 0;
}
static inline unsigned int kvm_arch_para_features(void)
@@ -118,10 +112,20 @@ void kvm_async_pf_task_wait(u32 token);
void kvm_async_pf_task_wake(u32 token);
u32 kvm_read_and_reset_pf_reason(void);
extern void kvm_disable_steal_time(void);
-#else
-#define kvm_guest_init() do { } while (0)
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+void __init kvm_spinlock_init(void);
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static inline void kvm_spinlock_init(void)
+{
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+#else /* CONFIG_KVM_GUEST */
+#define kvm_guest_init() do {} while (0)
#define kvm_async_pf_task_wait(T) do {} while(0)
#define kvm_async_pf_task_wake(T) do {} while(0)
+
static inline u32 kvm_read_and_reset_pf_reason(void)
{
return 0;
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
index 29e3093bbd2..cbe6b9e404c 100644
--- a/arch/x86/include/asm/mce.h
+++ b/arch/x86/include/asm/mce.h
@@ -32,11 +32,20 @@
#define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
#define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
#define MCI_STATUS_AR (1ULL<<55) /* Action required */
-#define MCACOD 0xffff /* MCA Error Code */
+
+/*
+ * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
+ * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
+ * errors to indicate that errors are being filtered by hardware.
+ * We should mask out bit 12 when looking for specific signatures
+ * of uncorrected errors - so the F bit is deliberately skipped
+ * in this #define.
+ */
+#define MCACOD 0xefff /* MCA Error Code */
/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
-#define MCACOD_SCRUBMSK 0xfff0
+#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
#define MCACOD_DATA 0x0134 /* Data Load */
#define MCACOD_INSTR 0x0150 /* Instruction Fetch */
@@ -188,6 +197,9 @@ extern void register_mce_write_callback(ssize_t (*)(struct file *filp,
const char __user *ubuf,
size_t usize, loff_t *off));
+/* Disable CMCI/polling for MCA bank claimed by firmware */
+extern void mce_disable_bank(int bank);
+
/*
* Exception handler
*/
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h
index 50e5c58ced2..4c019179a57 100644
--- a/arch/x86/include/asm/microcode_amd.h
+++ b/arch/x86/include/asm/microcode_amd.h
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table,
extern int __apply_microcode_amd(struct microcode_amd *mc_amd);
extern int apply_microcode_amd(int cpu);
-extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size);
+extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
#ifdef CONFIG_MICROCODE_AMD_EARLY
#ifdef CONFIG_X86_32
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index cdbf3677610..be12c534fd5 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Re-load page tables */
load_cr3(next->pgd);
- /* stop flush ipis for the previous mm */
+ /* Stop flush ipis for the previous mm */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
- /*
- * load the LDT, if the LDT is different:
- */
+ /* Load the LDT, if the LDT is different: */
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
}
#ifdef CONFIG_SMP
- else {
+ else {
this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
- /* We were in lazy tlb mode and leave_mm disabled
+ if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
+ /*
+ * On established mms, the mm_cpumask is only changed
+ * from irq context, from ptep_clear_flush() while in
+ * lazy tlb mode, and here. Irqs are blocked during
+ * schedule, protecting us from simultaneous changes.
+ */
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+ /*
+ * We were in lazy tlb mode and leave_mm disabled
* tlb flush IPI delivery. We must reload CR3
* to make sure to use no freed page tables.
*/
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h
index 2c543fff241..e7e6751648e 100644
--- a/arch/x86/include/asm/mutex_64.h
+++ b/arch/x86/include/asm/mutex_64.h
@@ -16,6 +16,20 @@
*
* Atomically decrements @v and calls <fail_fn> if the result is negative.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_lock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm volatile goto(LOCK_PREFIX " decl %0\n"
+ " jns %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_lock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -32,6 +46,7 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
/**
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
@@ -56,6 +71,20 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
*
* Atomically increments @v and calls <fail_fn> if the result is nonpositive.
*/
+#ifdef CC_HAVE_ASM_GOTO
+static inline void __mutex_fastpath_unlock(atomic_t *v,
+ void (*fail_fn)(atomic_t *))
+{
+ asm volatile goto(LOCK_PREFIX " incl %0\n"
+ " jg %l[exit]\n"
+ : : "m" (v->counter)
+ : "memory", "cc"
+ : exit);
+ fail_fn(v);
+exit:
+ return;
+}
+#else
#define __mutex_fastpath_unlock(v, fail_fn) \
do { \
unsigned long dummy; \
@@ -72,6 +101,7 @@ do { \
: "rax", "rsi", "rdx", "rcx", \
"r8", "r9", "r10", "r11", "memory"); \
} while (0)
+#endif
#define __mutex_slowpath_needs_to_unlock() 1
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index ef17af01347..f48b17df422 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -15,6 +15,8 @@
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#define __START_KERNEL_map __PAGE_OFFSET
+
#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 6c896fbe21d..43dcd804ebd 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -32,11 +32,6 @@
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)
-#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
- (CONFIG_PHYSICAL_ALIGN - 1)) & \
- ~(CONFIG_PHYSICAL_ALIGN - 1))
-
-#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 54c97879195..f97fbe3abb6 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -33,6 +33,11 @@
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
+ CONFIG_PHYSICAL_ALIGN)
+
+#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
+
#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index cfdc9ee4c90..401f350ef71 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -712,36 +712,16 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
-static inline int arch_spin_is_locked(struct arch_spinlock *lock)
+static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock);
+ PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
}
-static inline int arch_spin_is_contended(struct arch_spinlock *lock)
+static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
+ __ticket_t ticket)
{
- return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
-
-static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
-{
- PVOP_VCALL1(pv_lock_ops.spin_lock, lock);
-}
-
-static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock,
- unsigned long flags)
-{
- PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags);
-}
-
-static __always_inline int arch_spin_trylock(struct arch_spinlock *lock)
-{
- return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock);
-}
-
-static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
-{
- PVOP_VCALL1(pv_lock_ops.spin_unlock, lock);
+ PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
}
#endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 0db1fcac668..aab8f671b52 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -327,13 +327,15 @@ struct pv_mmu_ops {
};
struct arch_spinlock;
+#ifdef CONFIG_SMP
+#include <asm/spinlock_types.h>
+#else
+typedef u16 __ticket_t;
+#endif
+
struct pv_lock_ops {
- int (*spin_is_locked)(struct arch_spinlock *lock);
- int (*spin_is_contended)(struct arch_spinlock *lock);
- void (*spin_lock)(struct arch_spinlock *lock);
- void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
- int (*spin_trylock)(struct arch_spinlock *lock);
- void (*spin_unlock)(struct arch_spinlock *lock);
+ struct paravirt_callee_save lock_spinning;
+ void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
};
/* This contains all the paravirt structures: we get a convenient
@@ -387,7 +389,8 @@ extern struct pv_lock_ops pv_lock_ops;
/* Simple instruction patching code. */
#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[], end_##ops##_##name[]; \
+ extern const char start_##ops##_##name[] __visible, \
+ end_##ops##_##name[] __visible; \
asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
unsigned paravirt_patch_nop(void);
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
index f2b489cf160..3bf2dd0cf61 100644
--- a/arch/x86/include/asm/pgtable-2level.h
+++ b/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS 28
+#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
+ & ((1U << PTE_FILE_BITS1) - 1))) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << (PTE_FILE_BITS1)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = \
+ ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ + ((((off) >> PTE_FILE_BITS1) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << PTE_FILE_SHIFT2) \
+ + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << PTE_FILE_SHIFT3) \
+ + ((((off) >> \
+ (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
+ << PTE_FILE_SHIFT4) \
+ + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 4cc9f2b7cdc..81bb91b49a8 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp)
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 7dc305a4605..8d16befdec8 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -22,7 +22,8 @@
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __visible;
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
@@ -314,6 +315,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
}
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index c98ac63aae4..f4843e03113 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -61,12 +61,27 @@
* they do not conflict with each other.
*/
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
+
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
+/*
+ * Tracking soft dirty bit when a page goes to a swap is tricky.
+ * We need a bit which can be stored in pte _and_ not conflict
+ * with swap entry format. On x86 bits 6 and 7 are *not* involved
+ * into swap entry computation, but bit 6 is used for nonlinear
+ * file mapping, so we borrow bit 7 for soft dirty tracking.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+#define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE
+#else
+#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
+#endif
+
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
#else
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 24cf5aefb70..987c75ecc33 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -412,7 +412,7 @@ union irq_stack_union {
};
};
-DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union);
+DECLARE_PER_CPU_FIRST(union irq_stack_union, irq_stack_union) __visible;
DECLARE_INIT_PER_CPU(irq_stack_union);
DECLARE_PER_CPU(char *, irq_stack_ptr);
@@ -942,33 +942,19 @@ extern int set_tsc_mode(unsigned int val);
extern u16 amd_get_nb_id(int cpu);
-struct aperfmperf {
- u64 aperf, mperf;
-};
-
-static inline void get_aperfmperf(struct aperfmperf *am)
+static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
{
- WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF));
-
- rdmsrl(MSR_IA32_APERF, am->aperf);
- rdmsrl(MSR_IA32_MPERF, am->mperf);
-}
+ uint32_t base, eax, signature[3];
-#define APERFMPERF_SHIFT 10
+ for (base = 0x40000000; base < 0x40010000; base += 0x100) {
+ cpuid(base, &eax, &signature[0], &signature[1], &signature[2]);
-static inline
-unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
- struct aperfmperf *new)
-{
- u64 aperf = new->aperf - old->aperf;
- u64 mperf = new->mperf - old->mperf;
- unsigned long ratio = aperf;
-
- mperf >>= APERFMPERF_SHIFT;
- if (mperf)
- ratio = div64_u64(aperf, mperf);
+ if (!memcmp(sig, signature, 12) &&
+ (leaves == 0 || ((eax - base) >= leaves)))
+ return base;
+ }
- return ratio;
+ return 0;
}
extern unsigned long arch_align_stack(unsigned long sp);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 109a9dd5d45..be8269b00e2 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -93,7 +93,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
- u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index b7bf3505e1e..347555492da 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -6,6 +6,8 @@
#define COMMAND_LINE_SIZE 2048
+#include <linux/linkage.h>
+
#ifdef __i386__
#include <linux/pfn.h>
@@ -108,11 +110,11 @@ void *extend_brk(size_t size, size_t align);
extern void probe_roms(void);
#ifdef __i386__
-void __init i386_start_kernel(void);
+asmlinkage void __init i386_start_kernel(void);
#else
-void __init x86_64_start_kernel(char *real_mode);
-void __init x86_64_start_reservations(char *real_mode_data);
+asmlinkage void __init x86_64_start_kernel(char *real_mode);
+asmlinkage void __init x86_64_start_reservations(char *real_mode_data);
#endif /* __i386__ */
#endif /* _SETUP */
diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h
index 2f4d924fe6c..645cad2c95f 100644
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
@@ -101,7 +101,7 @@ static inline void native_wbinvd(void)
asm volatile("wbinvd": : :"memory");
}
-extern void native_load_gs_index(unsigned);
+extern asmlinkage void native_load_gs_index(unsigned);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 33692eaabab..bf156ded74b 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -1,11 +1,14 @@
#ifndef _ASM_X86_SPINLOCK_H
#define _ASM_X86_SPINLOCK_H
+#include <linux/jump_label.h>
#include <linux/atomic.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <linux/compiler.h>
#include <asm/paravirt.h>
+#include <asm/bitops.h>
+
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
@@ -34,6 +37,36 @@
# define UNLOCK_LOCK_PREFIX
#endif
+/* How long a lock should spin before we consider blocking */
+#define SPIN_THRESHOLD (1 << 15)
+
+extern struct static_key paravirt_ticketlocks_enabled;
+static __always_inline bool static_key_false(struct static_key *key);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
+{
+ set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+}
+
+#else /* !CONFIG_PARAVIRT_SPINLOCKS */
+static __always_inline void __ticket_lock_spinning(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
+ __ticket_t ticket)
+{
+}
+
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
+
+static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.tickets.head == lock.tickets.tail;
+}
+
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
@@ -47,81 +80,101 @@
* in the high part, because a wide xadd increment of the low part would carry
* up and contaminate the high part.
*/
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
- register struct __raw_tickets inc = { .tail = 1 };
+ register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC };
inc = xadd(&lock->tickets, inc);
+ if (likely(inc.head == inc.tail))
+ goto out;
+ inc.tail &= ~TICKET_SLOWPATH_FLAG;
for (;;) {
- if (inc.head == inc.tail)
- break;
- cpu_relax();
- inc.head = ACCESS_ONCE(lock->tickets.head);
+ unsigned count = SPIN_THRESHOLD;
+
+ do {
+ if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
+ goto out;
+ cpu_relax();
+ } while (--count);
+ __ticket_lock_spinning(lock, inc.tail);
}
- barrier(); /* make sure nothing creeps before the lock is taken */
+out: barrier(); /* make sure nothing creeps before the lock is taken */
}
-static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
arch_spinlock_t old, new;
old.tickets = ACCESS_ONCE(lock->tickets);
- if (old.tickets.head != old.tickets.tail)
+ if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
return 0;
- new.head_tail = old.head_tail + (1 << TICKET_SHIFT);
+ new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
/* cmpxchg is a full barrier, so nothing can move before it */
return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
}
-static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
+static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
+ arch_spinlock_t old)
{
- __add(&lock->tickets.head, 1, UNLOCK_LOCK_PREFIX);
+ arch_spinlock_t new;
+
+ BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
+
+ /* Perform the unlock on the "before" copy */
+ old.tickets.head += TICKET_LOCK_INC;
+
+ /* Clear the slowpath flag */
+ new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
+
+ /*
+ * If the lock is uncontended, clear the flag - use cmpxchg in
+ * case it changes behind our back though.
+ */
+ if (new.tickets.head != new.tickets.tail ||
+ cmpxchg(&lock->head_tail, old.head_tail,
+ new.head_tail) != old.head_tail) {
+ /*
+ * Lock still has someone queued for it, so wake up an
+ * appropriate waiter.
+ */
+ __ticket_unlock_kick(lock, old.tickets.head);
+ }
}
-static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ if (TICKET_SLOWPATH_FLAG &&
+ static_key_false(&paravirt_ticketlocks_enabled)) {
+ arch_spinlock_t prev;
- return tmp.tail != tmp.head;
-}
+ prev = *lock;
+ add_smp(&lock->tickets.head, TICKET_LOCK_INC);
-static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
-{
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ /* add_smp() is a full mb() */
- return (__ticket_t)(tmp.tail - tmp.head) > 1;
+ if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
+ __ticket_unlock_slowpath(lock, prev);
+ } else
+ __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
}
-#ifndef CONFIG_PARAVIRT_SPINLOCKS
-
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
- return __ticket_spin_is_locked(lock);
-}
-
-static inline int arch_spin_is_contended(arch_spinlock_t *lock)
-{
- return __ticket_spin_is_contended(lock);
-}
-#define arch_spin_is_contended arch_spin_is_contended
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
-{
- __ticket_spin_lock(lock);
+ return tmp.tail != tmp.head;
}
-static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
- return __ticket_spin_trylock(lock);
-}
+ struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
-static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
- __ticket_spin_unlock(lock);
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
+#define arch_spin_is_contended arch_spin_is_contended
static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
@@ -129,8 +182,6 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
arch_spin_lock(lock);
}
-#endif /* CONFIG_PARAVIRT_SPINLOCKS */
-
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
while (arch_spin_is_locked(lock))
@@ -233,8 +284,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
-/* The {read|write|spin}_lock() on x86 are full memory barriers. */
-static inline void smp_mb__after_lock(void) { }
-#define ARCH_HAS_SMP_MB_AFTER_LOCK
-
#endif /* _ASM_X86_SPINLOCK_H */
diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h
index ad0ad07fc00..4f1bea19945 100644
--- a/arch/x86/include/asm/spinlock_types.h
+++ b/arch/x86/include/asm/spinlock_types.h
@@ -1,13 +1,17 @@
#ifndef _ASM_X86_SPINLOCK_TYPES_H
#define _ASM_X86_SPINLOCK_TYPES_H
-#ifndef __LINUX_SPINLOCK_TYPES_H
-# error "please don't include this file directly"
-#endif
-
#include <linux/types.h>
-#if (CONFIG_NR_CPUS < 256)
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define __TICKET_LOCK_INC 2
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)1)
+#else
+#define __TICKET_LOCK_INC 1
+#define TICKET_SLOWPATH_FLAG ((__ticket_t)0)
+#endif
+
+#if (CONFIG_NR_CPUS < (256 / __TICKET_LOCK_INC))
typedef u8 __ticket_t;
typedef u16 __ticketpair_t;
#else
@@ -15,6 +19,8 @@ typedef u16 __ticket_t;
typedef u32 __ticketpair_t;
#endif
+#define TICKET_LOCK_INC ((__ticket_t)__TICKET_LOCK_INC)
+
#define TICKET_SHIFT (sizeof(__ticket_t) * 8)
typedef struct arch_spinlock {
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 4ec45b3abba..d7f3b3b78ac 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -2,8 +2,8 @@
#define _ASM_X86_SWITCH_TO_H
struct task_struct; /* one of the stranger aspects of C forward declarations */
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next);
+__visible struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next);
struct tss_struct;
void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
struct tss_struct *tss);
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h
index 9d09b4073b6..05af3b31d52 100644
--- a/arch/x86/include/asm/sync_bitops.h
+++ b/arch/x86/include/asm/sync_bitops.h
@@ -26,9 +26,9 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_set_bit(int nr, volatile unsigned long *addr)
+static inline void sync_set_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btsl %1,%0"
+ asm volatile("lock; bts %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -44,9 +44,9 @@ static inline void sync_set_bit(int nr, volatile unsigned long *addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
-static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
+static inline void sync_clear_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btrl %1,%0"
+ asm volatile("lock; btr %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -61,9 +61,9 @@ static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
-static inline void sync_change_bit(int nr, volatile unsigned long *addr)
+static inline void sync_change_bit(long nr, volatile unsigned long *addr)
{
- asm volatile("lock; btcl %1,%0"
+ asm volatile("lock; btc %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
@@ -77,11 +77,11 @@ static inline void sync_change_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; bts %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
@@ -95,11 +95,11 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; btr %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
@@ -113,11 +113,11 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
-static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr)
{
int oldbit;
- asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
+ asm volatile("lock; btc %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index 2e188d68397..aea284b4131 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -20,7 +20,8 @@
#include <asm/thread_info.h> /* for TS_COMPAT */
#include <asm/unistd.h>
-extern const unsigned long sys_call_table[];
+typedef void (*sys_call_ptr_t)(void);
+extern const sys_call_ptr_t sys_call_table[];
/*
* Only the low 32 bits of orig_ax are meaningful, so we return int.
diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h
index 2917a6452c4..592a6a672e0 100644
--- a/arch/x86/include/asm/syscalls.h
+++ b/arch/x86/include/asm/syscalls.h
@@ -24,7 +24,7 @@ asmlinkage long sys_iopl(unsigned int);
asmlinkage int sys_modify_ldt(int, void __user *, unsigned long);
/* kernel/signal.c */
-long sys_rt_sigreturn(void);
+asmlinkage long sys_rt_sigreturn(void);
/* kernel/tls.c */
asmlinkage long sys_set_thread_area(struct user_desc __user *);
@@ -34,7 +34,7 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *);
#ifdef CONFIG_X86_32
/* kernel/signal.c */
-unsigned long sys_sigreturn(void);
+asmlinkage unsigned long sys_sigreturn(void);
/* kernel/vm86_32.c */
asmlinkage long sys_vm86old(struct vm86_struct __user *);
@@ -44,7 +44,7 @@ asmlinkage long sys_vm86(unsigned long, unsigned long);
/* X86_64 only */
/* kernel/process_64.c */
-long sys_arch_prctl(int, unsigned long);
+asmlinkage long sys_arch_prctl(int, unsigned long);
/* kernel/sys_x86_64.c */
asmlinkage long sys_mmap(unsigned long, unsigned long, unsigned long,
diff --git a/arch/x86/include/asm/sysfb.h b/arch/x86/include/asm/sysfb.h
new file mode 100644
index 00000000000..2aeb3e25579
--- /dev/null
+++ b/arch/x86/include/asm/sysfb.h
@@ -0,0 +1,98 @@
+#ifndef _ARCH_X86_KERNEL_SYSFB_H
+#define _ARCH_X86_KERNEL_SYSFB_H
+
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/screen_info.h>
+
+enum {
+ M_I17, /* 17-Inch iMac */
+ M_I20, /* 20-Inch iMac */
+ M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
+ M_I24, /* 24-Inch iMac */
+ M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
+ M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
+ M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
+ M_MINI, /* Mac Mini */
+ M_MINI_3_1, /* Mac Mini, 3,1th gen */
+ M_MINI_4_1, /* Mac Mini, 4,1th gen */
+ M_MB, /* MacBook */
+ M_MB_2, /* MacBook, 2nd rev. */
+ M_MB_3, /* MacBook, 3rd rev. */
+ M_MB_5_1, /* MacBook, 5th rev. */
+ M_MB_6_1, /* MacBook, 6th rev. */
+ M_MB_7_1, /* MacBook, 7th rev. */
+ M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
+ M_MBA, /* MacBook Air */
+ M_MBA_3, /* Macbook Air, 3rd rev */
+ M_MBP, /* MacBook Pro */
+ M_MBP_2, /* MacBook Pro 2nd gen */
+ M_MBP_2_2, /* MacBook Pro 2,2nd gen */
+ M_MBP_SR, /* MacBook Pro (Santa Rosa) */
+ M_MBP_4, /* MacBook Pro, 4th gen */
+ M_MBP_5_1, /* MacBook Pro, 5,1th gen */
+ M_MBP_5_2, /* MacBook Pro, 5,2th gen */
+ M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
+ M_MBP_6_1, /* MacBook Pro, 6,1th gen */
+ M_MBP_6_2, /* MacBook Pro, 6,2th gen */
+ M_MBP_7_1, /* MacBook Pro, 7,1th gen */
+ M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
+ M_UNKNOWN /* placeholder */
+};
+
+struct efifb_dmi_info {
+ char *optname;
+ unsigned long base;
+ int stride;
+ int width;
+ int height;
+ int flags;
+};
+
+#ifdef CONFIG_EFI
+
+extern struct efifb_dmi_info efifb_dmi_list[];
+void sysfb_apply_efi_quirks(void);
+
+#else /* CONFIG_EFI */
+
+static inline void sysfb_apply_efi_quirks(void)
+{
+}
+
+#endif /* CONFIG_EFI */
+
+#ifdef CONFIG_X86_SYSFB
+
+bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode);
+int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode);
+
+#else /* CONFIG_X86_SYSFB */
+
+static inline bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ return false;
+}
+
+static inline int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_X86_SYSFB */
+
+#endif /* _ARCH_X86_KERNEL_SYSFB_H */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 095b21507b6..d35f24e231c 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -124,9 +124,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
-
-/* indicates that pointers to the topology cpumask_t maps are valid */
-#define arch_provides_topology_pointers yes
#endif
static inline void arch_fix_phys_package_id(int num, u32 slot)
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index 88eae2aec61..7036cb60cd8 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -6,11 +6,7 @@
#include <asm/debugreg.h>
#include <asm/siginfo.h> /* TRAP_TRACE, ... */
-#ifdef CONFIG_X86_32
-#define dotraplinkage
-#else
-#define dotraplinkage asmlinkage
-#endif
+#define dotraplinkage __visible
asmlinkage void divide_error(void);
asmlinkage void debug(void);
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index c91e8b9d588..235be70d5bb 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -49,6 +49,7 @@ extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
+extern int check_tsc_disabled(void);
extern unsigned long native_calibrate_tsc(void);
extern int tsc_clocksource_reliable;
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 5ee26875bae..5838fa911aa 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -153,16 +153,19 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
* Careful: we have to cast the result to the type of the pointer
* for sign reasons.
*
- * The use of %edx as the register specifier is a bit of a
+ * The use of _ASM_DX as the register specifier is a bit of a
* simplification, as gcc only cares about it as the starting point
* and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
* (%ecx being the next register in gcc's x86 register sequence), and
* %rdx on 64 bits.
+ *
+ * Clang/LLVM cares about the size of the register, but still wants
+ * the base register for something that ends up being a pair.
*/
#define get_user(x, ptr) \
({ \
int __ret_gu; \
- register __inttype(*(ptr)) __val_gu asm("%edx"); \
+ register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
__chk_user_ptr(ptr); \
might_fault(); \
asm volatile("call __get_user_%P3" \
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index f3e01a2cbaa..966502d4682 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -387,6 +387,7 @@ enum vmcs_field {
#define VMX_EPT_EXTENT_INDIVIDUAL_ADDR 0
#define VMX_EPT_EXTENT_CONTEXT 1
#define VMX_EPT_EXTENT_GLOBAL 2
+#define VMX_EPT_EXTENT_SHIFT 24
#define VMX_EPT_EXECUTE_ONLY_BIT (1ull)
#define VMX_EPT_PAGE_WALK_4_BIT (1ull << 6)
@@ -394,6 +395,7 @@ enum vmcs_field {
#define VMX_EPTP_WB_BIT (1ull << 14)
#define VMX_EPT_2MB_PAGE_BIT (1ull << 16)
#define VMX_EPT_1GB_PAGE_BIT (1ull << 17)
+#define VMX_EPT_INVEPT_BIT (1ull << 20)
#define VMX_EPT_AD_BIT (1ull << 21)
#define VMX_EPT_EXTENT_CONTEXT_BIT (1ull << 25)
#define VMX_EPT_EXTENT_GLOBAL_BIT (1ull << 26)
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h
index de656ac2af4..d76ac40da20 100644
--- a/arch/x86/include/asm/vvar.h
+++ b/arch/x86/include/asm/vvar.h
@@ -35,7 +35,7 @@
#define DEFINE_VVAR(type, name) \
type name \
- __attribute__((section(".vvar_" #name), aligned(16)))
+ __attribute__((section(".vvar_" #name), aligned(16))) __visible
#define VVAR(name) (*vvaraddr_ ## name)
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index ca842f2769e..608a79d5a46 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -7,6 +7,7 @@ enum ipi_vector {
XEN_CALL_FUNCTION_SINGLE_VECTOR,
XEN_SPIN_UNLOCK_VECTOR,
XEN_IRQ_WORK_VECTOR,
+ XEN_NMI_VECTOR,
XEN_NR_IPIS,
};
diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h
index 125f344f06a..d866959e568 100644
--- a/arch/x86/include/asm/xen/hypervisor.h
+++ b/arch/x86/include/asm/xen/hypervisor.h
@@ -40,21 +40,7 @@ extern struct start_info *xen_start_info;
static inline uint32_t xen_cpuid_base(void)
{
- uint32_t base, eax, ebx, ecx, edx;
- char signature[13];
-
- for (base = 0x40000000; base < 0x40010000; base += 0x100) {
- cpuid(base, &eax, &ebx, &ecx, &edx);
- *(uint32_t *)(signature + 0) = ebx;
- *(uint32_t *)(signature + 4) = ecx;
- *(uint32_t *)(signature + 8) = edx;
- signature[12] = 0;
-
- if (!strcmp("XenVMMXenVMM", signature) && ((eax - base) >= 2))
- return base;
- }
-
- return 0;
+ return hypervisor_cpuid_base("XenVMMXenVMM", 2);
}
#ifdef CONFIG_XEN
diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h
index 06fdbd987e9..94dc8ca434e 100644
--- a/arch/x86/include/uapi/asm/kvm_para.h
+++ b/arch/x86/include/uapi/asm/kvm_para.h
@@ -23,6 +23,7 @@
#define KVM_FEATURE_ASYNC_PF 4
#define KVM_FEATURE_STEAL_TIME 5
#define KVM_FEATURE_PV_EOI 6
+#define KVM_FEATURE_PV_UNHALT 7
/* The last 8 bits are used to indicate how to interpret the flags field
* in pvclock structure. If no bits are set, all flags are ignored.
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index d651082c7cf..0e79420376e 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -65,6 +65,7 @@
#define EXIT_REASON_EOI_INDUCED 45
#define EXIT_REASON_EPT_VIOLATION 48
#define EXIT_REASON_EPT_MISCONFIG 49
+#define EXIT_REASON_INVEPT 50
#define EXIT_REASON_PREEMPTION_TIMER 52
#define EXIT_REASON_WBINVD 54
#define EXIT_REASON_XSETBV 55
@@ -106,12 +107,13 @@
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
+ { EXIT_REASON_INVEPT, "INVEPT" }, \
+ { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
{ EXIT_REASON_WBINVD, "WBINVD" }, \
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
{ EXIT_REASON_INVD, "INVD" }, \
- { EXIT_REASON_INVPCID, "INVPCID" }, \
- { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }
+ { EXIT_REASON_INVPCID, "INVPCID" }
#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 88d99ea7772..a5408b965c9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -103,6 +103,9 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_UPROBES) += uprobes.o
+obj-y += sysfb.o
+obj-$(CONFIG_X86_SYSFB) += sysfb_simplefb.o
+obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 2627a81253e..40c76604199 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -67,6 +67,7 @@ EXPORT_SYMBOL(acpi_pci_disabled);
int acpi_lapic;
int acpi_ioapic;
int acpi_strict;
+int acpi_disable_cmcff;
u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
@@ -141,16 +142,8 @@ static u32 irq_to_gsi(int irq)
}
/*
- * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
- * to map the target physical address. The problem is that set_fixmap()
- * provides a single page, and it is possible that the page is not
- * sufficient.
- * By using this area, we can map up to MAX_IO_APICS pages temporarily,
- * i.e. until the next __va_range() call.
- *
- * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
- * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
- * count idx down while incrementing the phys address.
+ * This is just a simple wrapper around early_ioremap(),
+ * with sanity checks for phys == 0 and size == 0.
*/
char *__init __acpi_map_table(unsigned long phys, unsigned long size)
{
@@ -160,6 +153,7 @@ char *__init __acpi_map_table(unsigned long phys, unsigned long size)
return early_ioremap(phys, size);
}
+
void __init __acpi_unmap_table(char *map, unsigned long size)
{
if (!map || !size)
@@ -199,7 +193,7 @@ static void acpi_register_lapic(int id, u8 enabled)
{
unsigned int ver = 0;
- if (id >= (MAX_LOCAL_APIC-1)) {
+ if (id >= MAX_LOCAL_APIC) {
printk(KERN_INFO PREFIX "skipped apicid that is too big\n");
return;
}
@@ -1120,6 +1114,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
int ioapic;
int ioapic_pin;
struct io_apic_irq_attr irq_attr;
+ int ret;
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
return gsi;
@@ -1149,7 +1144,9 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
- io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
+ ret = io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr);
+ if (ret < 0)
+ gsi = INT_MIN;
return gsi;
}
@@ -1626,6 +1623,10 @@ static int __init parse_acpi(char *arg)
/* "acpi=copy_dsdt" copys DSDT */
else if (strcmp(arg, "copy_dsdt") == 0) {
acpi_gbl_copy_dsdt_locally = 1;
+ }
+ /* "acpi=nocmcff" disables FF mode for corrected errors */
+ else if (strcmp(arg, "nocmcff") == 0) {
+ acpi_disable_cmcff = 1;
} else {
/* Core will printk when we return error. */
return -EINVAL;
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index c15cf9a25e2..15e8563e5c2 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -11,6 +11,7 @@
#include <linux/memory.h>
#include <linux/stop_machine.h>
#include <linux/slab.h>
+#include <linux/kdebug.h>
#include <asm/alternative.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@@ -596,97 +597,93 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
return addr;
}
-/*
- * Cross-modifying kernel text with stop_machine().
- * This code originally comes from immediate value.
- */
-static atomic_t stop_machine_first;
-static int wrote_text;
+static void do_sync_core(void *info)
+{
+ sync_core();
+}
-struct text_poke_params {
- struct text_poke_param *params;
- int nparams;
-};
+static bool bp_patching_in_progress;
+static void *bp_int3_handler, *bp_int3_addr;
-static int __kprobes stop_machine_text_poke(void *data)
+int poke_int3_handler(struct pt_regs *regs)
{
- struct text_poke_params *tpp = data;
- struct text_poke_param *p;
- int i;
+ /* bp_patching_in_progress */
+ smp_rmb();
- if (atomic_xchg(&stop_machine_first, 0)) {
- for (i = 0; i < tpp->nparams; i++) {
- p = &tpp->params[i];
- text_poke(p->addr, p->opcode, p->len);
- }
- smp_wmb(); /* Make sure other cpus see that this has run */
- wrote_text = 1;
- } else {
- while (!wrote_text)
- cpu_relax();
- smp_mb(); /* Load wrote_text before following execution */
- }
+ if (likely(!bp_patching_in_progress))
+ return 0;
- for (i = 0; i < tpp->nparams; i++) {
- p = &tpp->params[i];
- flush_icache_range((unsigned long)p->addr,
- (unsigned long)p->addr + p->len);
- }
- /*
- * Intel Archiecture Software Developer's Manual section 7.1.3 specifies
- * that a core serializing instruction such as "cpuid" should be
- * executed on _each_ core before the new instruction is made visible.
- */
- sync_core();
- return 0;
-}
+ if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
+ return 0;
+
+ /* set up the specified breakpoint handler */
+ regs->ip = (unsigned long) bp_int3_handler;
+
+ return 1;
-/**
- * text_poke_smp - Update instructions on a live kernel on SMP
- * @addr: address to modify
- * @opcode: source of the copy
- * @len: length to copy
- *
- * Modify multi-byte instruction by using stop_machine() on SMP. This allows
- * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying
- * should be allowed, since stop_machine() does _not_ protect code against
- * NMI and MCE.
- *
- * Note: Must be called under get_online_cpus() and text_mutex.
- */
-void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len)
-{
- struct text_poke_params tpp;
- struct text_poke_param p;
-
- p.addr = addr;
- p.opcode = opcode;
- p.len = len;
- tpp.params = &p;
- tpp.nparams = 1;
- atomic_set(&stop_machine_first, 1);
- wrote_text = 0;
- /* Use __stop_machine() because the caller already got online_cpus. */
- __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
- return addr;
}
/**
- * text_poke_smp_batch - Update instructions on a live kernel on SMP
- * @params: an array of text_poke parameters
- * @n: the number of elements in params.
+ * text_poke_bp() -- update instructions on live kernel on SMP
+ * @addr: address to patch
+ * @opcode: opcode of new instruction
+ * @len: length to copy
+ * @handler: address to jump to when the temporary breakpoint is hit
*
- * Modify multi-byte instruction by using stop_machine() on SMP. Since the
- * stop_machine() is heavy task, it is better to aggregate text_poke requests
- * and do it once if possible.
+ * Modify multi-byte instruction by using int3 breakpoint on SMP.
+ * We completely avoid stop_machine() here, and achieve the
+ * synchronization using int3 breakpoint.
*
- * Note: Must be called under get_online_cpus() and text_mutex.
+ * The way it is done:
+ * - add a int3 trap to the address that will be patched
+ * - sync cores
+ * - update all but the first byte of the patched range
+ * - sync cores
+ * - replace the first byte (int3) by the first byte of
+ * replacing opcode
+ * - sync cores
+ *
+ * Note: must be called under text_mutex.
*/
-void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
+void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
{
- struct text_poke_params tpp = {.params = params, .nparams = n};
+ unsigned char int3 = 0xcc;
+
+ bp_int3_handler = handler;
+ bp_int3_addr = (u8 *)addr + sizeof(int3);
+ bp_patching_in_progress = true;
+ /*
+ * Corresponding read barrier in int3 notifier for
+ * making sure the in_progress flags is correctly ordered wrt.
+ * patching
+ */
+ smp_wmb();
+
+ text_poke(addr, &int3, sizeof(int3));
- atomic_set(&stop_machine_first, 1);
- wrote_text = 0;
- __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask);
+ on_each_cpu(do_sync_core, NULL, 1);
+
+ if (len - sizeof(int3) > 0) {
+ /* patch all but the first byte */
+ text_poke((char *)addr + sizeof(int3),
+ (const char *) opcode + sizeof(int3),
+ len - sizeof(int3));
+ /*
+ * According to Intel, this core syncing is very likely
+ * not necessary and we'd be safe even without it. But
+ * better safe than sorry (plus there's not only Intel).
+ */
+ on_each_cpu(do_sync_core, NULL, 1);
+ }
+
+ /* patch the first byte */
+ text_poke(addr, opcode, sizeof(int3));
+
+ on_each_cpu(do_sync_core, NULL, 1);
+
+ bp_patching_in_progress = false;
+ smp_wmb();
+
+ return addr;
}
+
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 3048ded1b59..59554dca96e 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{}
};
@@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
static const struct pci_device_id amd_nb_link_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
{}
};
@@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
next_northbridge(misc, amd_nb_misc_ids);
node_to_amd_nb(i)->link = link =
next_northbridge(link, amd_nb_link_ids);
- }
+ }
+ /* GART present only on Fam15h upto model 0fh */
if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
- boot_cpu_data.x86 == 0x15)
+ (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
amd_northbridges.flags |= AMD_NB_GART;
/*
+ * Check for L3 cache presence.
+ */
+ if (!cpuid_edx(0x80000006))
+ return 0;
+
+ /*
* Some CPU families support L3 Cache Index Disable. There are some
* limitations because of E382 and E388 on family 0x10.
*/
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index eca89c53a7f..a7eb82d9b01 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -913,7 +913,7 @@ static void local_apic_timer_interrupt(void)
* [ if a single-CPU system runs an SMP kernel then we call the local
* interrupt as well. Thus we cannot inline the local irq ... ]
*/
-void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
+__visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -932,7 +932,7 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
set_irq_regs(old_regs);
}
-void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
+__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -1946,14 +1946,14 @@ static inline void __smp_spurious_interrupt(void)
"should never happen.\n", smp_processor_id());
}
-void smp_spurious_interrupt(struct pt_regs *regs)
+__visible void smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_spurious_interrupt();
exiting_irq();
}
-void smp_trace_spurious_interrupt(struct pt_regs *regs)
+__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
@@ -2002,14 +2002,14 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
}
-void smp_error_interrupt(struct pt_regs *regs)
+__visible void smp_error_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_error_interrupt(regs);
exiting_irq();
}
-void smp_trace_error_interrupt(struct pt_regs *regs)
+__visible void smp_trace_error_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_error_apic_entry(ERROR_APIC_VECTOR);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 9ed796ccc32..e63a5bd2a78 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1534,6 +1534,11 @@ void intel_ir_io_apic_print_entries(unsigned int apic,
}
}
+void ioapic_zap_locks(void)
+{
+ raw_spin_lock_init(&ioapic_lock);
+}
+
__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
{
union IO_APIC_reg_00 reg_00;
@@ -3375,12 +3380,15 @@ int io_apic_setup_irq_pin_once(unsigned int irq, int node,
{
unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin;
int ret;
+ struct IO_APIC_route_entry orig_entry;
/* Avoid redundant programming */
if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) {
- pr_debug("Pin %d-%d already programmed\n",
- mpc_ioapic_id(ioapic_idx), pin);
- return 0;
+ pr_debug("Pin %d-%d already programmed\n", mpc_ioapic_id(ioapic_idx), pin);
+ orig_entry = ioapic_read_entry(attr->ioapic, pin);
+ if (attr->trigger == orig_entry.trigger && attr->polarity == orig_entry.polarity)
+ return 0;
+ return -EBUSY;
}
ret = io_apic_setup_irq_pin(irq, node, attr);
if (!ret)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 53a4e274484..3ab03430211 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -392,7 +392,7 @@ static struct cpuidle_device apm_cpuidle_device;
/*
* Local variables
*/
-static struct {
+__visible struct {
unsigned long offset;
unsigned short segment;
} apm_bios_entry;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f654ecefea5..903a264af98 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -66,8 +66,8 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
* performance at the same time..
*/
-extern void vide(void);
-__asm__(".align 4\nvide: ret");
+extern __visible void vide(void);
+__asm__(".globl vide\n\t.align 4\nvide: ret");
static void init_amd_k5(struct cpuinfo_x86 *c)
{
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c)
static const int amd_erratum_383[];
static const int amd_erratum_400[];
-static bool cpu_has_amd_erratum(const int *erratum);
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
static void init_amd(struct cpuinfo_x86 *c)
{
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c)
value &= ~(1ULL << 24);
wrmsrl_safe(MSR_AMD64_BU_CFG2, value);
- if (cpu_has_amd_erratum(amd_erratum_383))
+ if (cpu_has_amd_erratum(c, amd_erratum_383))
set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH);
}
- if (cpu_has_amd_erratum(amd_erratum_400))
+ if (cpu_has_amd_erratum(c, amd_erratum_400))
set_cpu_bug(c, X86_BUG_AMD_APIC_C1E);
rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] =
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
-static bool cpu_has_amd_erratum(const int *erratum)
+
+static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
- struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info);
int osvw_id = *erratum++;
u32 range;
u32 ms;
- /*
- * If called early enough that current_cpu_data hasn't been initialized
- * yet, fall back to boot_cpu_data.
- */
- if (cpu->x86 == 0)
- cpu = &boot_cpu_data;
-
- if (cpu->x86_vendor != X86_VENDOR_AMD)
- return false;
-
if (osvw_id >= 0 && osvw_id < 65536 &&
cpu_has(cpu, X86_FEATURE_OSVW)) {
u64 osvw_len;
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 25eb2747b06..2793d1f095a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1076,7 +1076,7 @@ struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
(unsigned long) debug_idt_table };
DEFINE_PER_CPU_FIRST(union irq_stack_union,
- irq_stack_union) __aligned(PAGE_SIZE);
+ irq_stack_union) __aligned(PAGE_SIZE) __visible;
/*
* The following four percpu variables are hot. Align current_task to
@@ -1093,7 +1093,7 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
DEFINE_PER_CPU(char *, irq_stack_ptr) =
init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
-DEFINE_PER_CPU(unsigned int, irq_count) = -1;
+DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 87279212d31..36ce402a3fa 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -25,11 +25,6 @@
#include <asm/processor.h>
#include <asm/hypervisor.h>
-/*
- * Hypervisor detect order. This is specified explicitly here because
- * some hypervisors might implement compatibility modes for other
- * hypervisors and therefore need to be detected in specific sequence.
- */
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
#ifdef CONFIG_XEN_PVHVM
@@ -49,15 +44,19 @@ static inline void __init
detect_hypervisor_vendor(void)
{
const struct hypervisor_x86 *h, * const *p;
+ uint32_t pri, max_pri = 0;
for (p = hypervisors; p < hypervisors + ARRAY_SIZE(hypervisors); p++) {
h = *p;
- if (h->detect()) {
+ pri = h->detect();
+ if (pri != 0 && pri > max_pri) {
+ max_pri = pri;
x86_hyper = h;
- printk(KERN_INFO "Hypervisor detected: %s\n", h->name);
- break;
}
}
+
+ if (max_pri)
+ printk(KERN_INFO "Hypervisor detected: %s\n", x86_hyper->name);
}
void init_hypervisor(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h
index 5b7d4fa5d3b..09edd0b65fe 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-internal.h
+++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h
@@ -25,15 +25,18 @@ int mce_severity(struct mce *a, int tolerant, char **msg);
struct dentry *mce_get_debugfs_dir(void);
extern struct mce_bank *mce_banks;
+extern mce_banks_t mce_banks_ce_disabled;
#ifdef CONFIG_X86_MCE_INTEL
unsigned long mce_intel_adjust_timer(unsigned long interval);
void mce_intel_cmci_poll(void);
void mce_intel_hcpu_update(unsigned long cpu);
+void cmci_disable_bank(int bank);
#else
# define mce_intel_adjust_timer mce_adjust_timer_default
static inline void mce_intel_cmci_poll(void) { }
static inline void mce_intel_hcpu_update(unsigned long cpu) { }
+static inline void cmci_disable_bank(int bank) { }
#endif
void mce_timer_kick(unsigned long interval);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index e2703520d12..c370e1c4468 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -111,8 +111,8 @@ static struct severity {
#ifdef CONFIG_MEMORY_FAILURE
MCESEV(
KEEP, "Action required but unaffected thread is continuable",
- SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR),
- MCGMASK(MCG_STATUS_RIPV, MCG_STATUS_RIPV)
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR, MCI_UC_SAR|MCI_ADDR),
+ MCGMASK(MCG_STATUS_RIPV|MCG_STATUS_EIPV, MCG_STATUS_RIPV)
),
MCESEV(
AR, "Action required: data load error in a user process",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 87a65c939bc..b3218cdee95 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -97,6 +97,15 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
};
+/*
+ * MCA banks controlled through firmware first for corrected errors.
+ * This is a global list of banks for which we won't enable CMCI and we
+ * won't poll. Firmware controls these banks and is responsible for
+ * reporting corrected errors through GHES. Uncorrected/recoverable
+ * errors are still notified through a machine check.
+ */
+mce_banks_t mce_banks_ce_disabled;
+
static DEFINE_PER_CPU(struct work_struct, mce_work);
static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
@@ -1935,6 +1944,25 @@ static struct miscdevice mce_chrdev_device = {
&mce_chrdev_ops,
};
+static void __mce_disable_bank(void *arg)
+{
+ int bank = *((int *)arg);
+ __clear_bit(bank, __get_cpu_var(mce_poll_banks));
+ cmci_disable_bank(bank);
+}
+
+void mce_disable_bank(int bank)
+{
+ if (bank >= mca_cfg.banks) {
+ pr_warn(FW_BUG
+ "Ignoring request to disable invalid MCA bank %d.\n",
+ bank);
+ return;
+ }
+ set_bit(bank, mce_banks_ce_disabled);
+ on_each_cpu(__mce_disable_bank, &bank, 1);
+}
+
/*
* mce=off Disables machine check
* mce=no_cmci Disables CMCI
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index d56405309dc..4cfe0458ca6 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -203,6 +203,10 @@ static void cmci_discover(int banks)
if (test_bit(i, owned))
continue;
+ /* Skip banks in firmware first mode */
+ if (test_bit(i, mce_banks_ce_disabled))
+ continue;
+
rdmsrl(MSR_IA32_MCx_CTL2(i), val);
/* Already owned by someone else? */
@@ -271,6 +275,19 @@ void cmci_recheck(void)
local_irq_restore(flags);
}
+/* Caller must hold the lock on cmci_discover_lock */
+static void __cmci_disable_bank(int bank)
+{
+ u64 val;
+
+ if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
+ return;
+ rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ val &= ~MCI_CTL2_CMCI_EN;
+ wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
+ __clear_bit(bank, __get_cpu_var(mce_banks_owned));
+}
+
/*
* Disable CMCI on this CPU for all banks it owns when it goes down.
* This allows other CPUs to claim the banks on rediscovery.
@@ -280,20 +297,12 @@ void cmci_clear(void)
unsigned long flags;
int i;
int banks;
- u64 val;
if (!cmci_supported(&banks))
return;
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- for (i = 0; i < banks; i++) {
- if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
- continue;
- /* Disable CMCI */
- rdmsrl(MSR_IA32_MCx_CTL2(i), val);
- val &= ~MCI_CTL2_CMCI_EN;
- wrmsrl(MSR_IA32_MCx_CTL2(i), val);
- __clear_bit(i, __get_cpu_var(mce_banks_owned));
- }
+ for (i = 0; i < banks; i++)
+ __cmci_disable_bank(i);
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
}
@@ -327,6 +336,19 @@ void cmci_reenable(void)
cmci_discover(banks);
}
+void cmci_disable_bank(int bank)
+{
+ int banks;
+ unsigned long flags;
+
+ if (!cmci_supported(&banks))
+ return;
+
+ raw_spin_lock_irqsave(&cmci_discover_lock, flags);
+ __cmci_disable_bank(bank);
+ raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
+}
+
static void intel_init_cmci(void)
{
int banks;
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index 8f4be53ea04..71a39f3621b 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -27,20 +27,23 @@
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
-static bool __init ms_hyperv_platform(void)
+static uint32_t __init ms_hyperv_platform(void)
{
u32 eax;
u32 hyp_signature[3];
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
- return false;
+ return 0;
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
- return eax >= HYPERV_CPUID_MIN &&
- eax <= HYPERV_CPUID_MAX &&
- !memcmp("Microsoft Hv", hyp_signature, 12);
+ if (eax >= HYPERV_CPUID_MIN &&
+ eax <= HYPERV_CPUID_MAX &&
+ !memcmp("Microsoft Hv", hyp_signature, 12))
+ return HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+
+ return 0;
}
static cycle_t read_hv_clock(struct clocksource *arg)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index a7c7305030c..8355c84b972 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1884,6 +1884,7 @@ static struct pmu pmu = {
void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
userpg->cap_usr_time = 0;
+ userpg->cap_usr_time_zero = 0;
userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc;
userpg->pmc_width = x86_pmu.cntval_bits;
@@ -1897,6 +1898,11 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
userpg->time_mult = this_cpu_read(cyc2ns);
userpg->time_shift = CYC2NS_SCALE_FACTOR;
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
+
+ if (sched_clock_stable && !check_tsc_disabled()) {
+ userpg->cap_usr_time_zero = 1;
+ userpg->time_zero = this_cpu_read(cyc2ns_offset);
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 97e557bc4c9..cc16faae053 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -641,6 +641,8 @@ extern struct event_constraint intel_core2_pebs_event_constraints[];
extern struct event_constraint intel_atom_pebs_event_constraints[];
+extern struct event_constraint intel_slm_pebs_event_constraints[];
+
extern struct event_constraint intel_nehalem_pebs_event_constraints[];
extern struct event_constraint intel_westmere_pebs_event_constraints[];
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 4cbe03287b0..beeb7cc0704 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -347,8 +347,7 @@ static struct amd_nb *amd_alloc_nb(int cpu)
struct amd_nb *nb;
int i;
- nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
- cpu_to_node(cpu));
+ nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu));
if (!nb)
return NULL;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fbc9210b45b..0abf6742a8b 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -81,7 +81,8 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly =
static struct extra_reg intel_nehalem_extra_regs[] __read_mostly =
{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
EVENT_EXTRA_END
};
@@ -143,8 +144,9 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly =
static struct extra_reg intel_westmere_extra_regs[] __read_mostly =
{
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b),
EVENT_EXTRA_END
};
@@ -162,16 +164,27 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
EVENT_CONSTRAINT_END
};
+static struct event_constraint intel_slm_event_constraints[] __read_mostly =
+{
+ FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
+ FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
+ FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */
+ FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
+ EVENT_CONSTRAINT_END
+};
+
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END
};
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
EVENT_EXTRA_END
};
@@ -882,6 +895,140 @@ static __initconst const u64 atom_hw_cache_event_ids
},
};
+static struct extra_reg intel_slm_extra_regs[] __read_mostly =
+{
+ /* must define OFFCORE_RSP_X first, see intel_fixup_er() */
+ INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0),
+ INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1),
+ EVENT_EXTRA_END
+};
+
+#define SLM_DMND_READ SNB_DMND_DATA_RD
+#define SLM_DMND_WRITE SNB_DMND_RFO
+#define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO)
+
+#define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM)
+#define SLM_LLC_ACCESS SNB_RESP_ANY
+#define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM)
+
+static __initconst const u64 slm_hw_cache_extra_regs
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS,
+ [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS,
+ },
+ },
+};
+
+static __initconst const u64 slm_hw_cache_event_ids
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */
+ [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL ) ] = {
+ [ C(OP_READ) ] = {
+ /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_WRITE) ] = {
+ /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ [ C(OP_PREFETCH) ] = {
+ /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
+ [ C(RESULT_ACCESS) ] = 0x01b7,
+ /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
+ [ C(RESULT_MISS) ] = 0x01b7,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
+ [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU ) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
+ [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
{
/* user explicitly requested branch sampling */
@@ -1301,11 +1448,11 @@ static void intel_fixup_er(struct perf_event *event, int idx)
if (idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= 0x01b7;
+ event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
} else if (idx == EXTRA_REG_RSP_1) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
- event->hw.config |= 0x01bb;
+ event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
}
}
@@ -2176,6 +2323,21 @@ __init int intel_pmu_init(void)
pr_cont("Atom events, ");
break;
+ case 55: /* Atom 22nm "Silvermont" */
+ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
+ sizeof(hw_cache_extra_regs));
+
+ intel_pmu_lbr_init_atom();
+
+ x86_pmu.event_constraints = intel_slm_event_constraints;
+ x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
+ x86_pmu.extra_regs = intel_slm_extra_regs;
+ x86_pmu.er_flags |= ERF_HAS_RSP_1;
+ pr_cont("Silvermont events, ");
+ break;
+
case 37: /* 32 nm nehalem, "Clarkdale" */
case 44: /* 32 nm nehalem, "Gulftown" */
case 47: /* 32 nm Xeon E7 */
@@ -2270,6 +2432,7 @@ __init int intel_pmu_init(void)
case 70:
case 71:
case 63:
+ case 69:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3065c57a63c..63438aad177 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -224,7 +224,7 @@ static int alloc_pebs_buffer(int cpu)
if (!x86_pmu.pebs)
return 0;
- buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -262,7 +262,7 @@ static int alloc_bts_buffer(int cpu)
if (!x86_pmu.bts)
return 0;
- buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node);
+ buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
if (unlikely(!buffer))
return -ENOMEM;
@@ -295,7 +295,7 @@ static int alloc_ds_buffer(int cpu)
int node = cpu_to_node(cpu);
struct debug_store *ds;
- ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node);
+ ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node);
if (unlikely(!ds))
return -ENOMEM;
@@ -517,6 +517,32 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
EVENT_CONSTRAINT_END
};
+struct event_constraint intel_slm_pebs_event_constraints[] = {
+ INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */
+ INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */
+ INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */
+ INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */
+ INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */
+ INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */
+ INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */
+ INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */
+ INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */
+ INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */
+ INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */
+ INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */
+ INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */
+ INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */
+ INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */
+ INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */
+ INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */
+ INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */
+ INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */
+ INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */
+ INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */
+ INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */
+ EVENT_CONSTRAINT_END
+};
+
struct event_constraint intel_nehalem_pebs_event_constraints[] = {
INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */
INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index cad791dbde9..fd8011ed4dc 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -6,6 +6,8 @@ static struct intel_uncore_type **pci_uncores = empty_uncore;
/* pci bus to socket mapping */
static int pcibus_to_physid[256] = { [0 ... 255] = -1, };
+static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX];
+
static DEFINE_RAW_SPINLOCK(uncore_box_lock);
/* mask of cpus that collect uncore events */
@@ -45,6 +47,24 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7");
DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15");
DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23");
DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31");
+DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51");
+DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35");
+DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31");
+DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17");
+DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12");
+DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8");
+DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4");
+DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35");
+DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31");
+DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17");
+DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12");
+DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8");
+DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4");
+DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31");
+DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63");
static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
{
@@ -281,7 +301,7 @@ static struct attribute *snbep_uncore_cbox_formats_attr[] = {
};
static struct attribute *snbep_uncore_pcu_formats_attr[] = {
- &format_attr_event.attr,
+ &format_attr_event_ext.attr,
&format_attr_occ_sel.attr,
&format_attr_edge.attr,
&format_attr_inv.attr,
@@ -301,6 +321,24 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = {
&format_attr_edge.attr,
&format_attr_inv.attr,
&format_attr_thresh8.attr,
+ &format_attr_match_rds.attr,
+ &format_attr_match_rnid30.attr,
+ &format_attr_match_rnid4.attr,
+ &format_attr_match_dnid.attr,
+ &format_attr_match_mc.attr,
+ &format_attr_match_opc.attr,
+ &format_attr_match_vnw.attr,
+ &format_attr_match0.attr,
+ &format_attr_match1.attr,
+ &format_attr_mask_rds.attr,
+ &format_attr_mask_rnid30.attr,
+ &format_attr_mask_rnid4.attr,
+ &format_attr_mask_dnid.attr,
+ &format_attr_mask_mc.attr,
+ &format_attr_mask_opc.attr,
+ &format_attr_mask_vnw.attr,
+ &format_attr_mask0.attr,
+ &format_attr_mask1.attr,
NULL,
};
@@ -314,8 +352,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = {
static struct uncore_event_desc snbep_uncore_qpi_events[] = {
INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"),
INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"),
- INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"),
- INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"),
+ INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"),
+ INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"),
{ /* end: all zeroes */ },
};
@@ -356,13 +394,16 @@ static struct intel_uncore_ops snbep_uncore_msr_ops = {
SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
};
+#define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \
+ .init_box = snbep_uncore_pci_init_box, \
+ .disable_box = snbep_uncore_pci_disable_box, \
+ .enable_box = snbep_uncore_pci_enable_box, \
+ .disable_event = snbep_uncore_pci_disable_event, \
+ .read_counter = snbep_uncore_pci_read_counter
+
static struct intel_uncore_ops snbep_uncore_pci_ops = {
- .init_box = snbep_uncore_pci_init_box,
- .disable_box = snbep_uncore_pci_disable_box,
- .enable_box = snbep_uncore_pci_enable_box,
- .disable_event = snbep_uncore_pci_disable_event,
- .enable_event = snbep_uncore_pci_enable_event,
- .read_counter = snbep_uncore_pci_read_counter,
+ SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
+ .enable_event = snbep_uncore_pci_enable_event, \
};
static struct event_constraint snbep_uncore_cbox_constraints[] = {
@@ -726,6 +767,61 @@ static struct intel_uncore_type *snbep_msr_uncores[] = {
NULL,
};
+enum {
+ SNBEP_PCI_QPI_PORT0_FILTER,
+ SNBEP_PCI_QPI_PORT1_FILTER,
+};
+
+static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) {
+ reg1->idx = 0;
+ reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0;
+ reg1->config = event->attr.config1;
+ reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0;
+ reg2->config = event->attr.config2;
+ }
+ return 0;
+}
+
+static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event)
+{
+ struct pci_dev *pdev = box->pci_dev;
+ struct hw_perf_event *hwc = &event->hw;
+ struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
+ struct hw_perf_event_extra *reg2 = &hwc->branch_reg;
+
+ if (reg1->idx != EXTRA_REG_NONE) {
+ int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
+ struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx];
+ WARN_ON_ONCE(!filter_pdev);
+ if (filter_pdev) {
+ pci_write_config_dword(filter_pdev, reg1->reg,
+ (u32)reg1->config);
+ pci_write_config_dword(filter_pdev, reg1->reg + 4,
+ (u32)(reg1->config >> 32));
+ pci_write_config_dword(filter_pdev, reg2->reg,
+ (u32)reg2->config);
+ pci_write_config_dword(filter_pdev, reg2->reg + 4,
+ (u32)(reg2->config >> 32));
+ }
+ }
+
+ pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN);
+}
+
+static struct intel_uncore_ops snbep_uncore_qpi_ops = {
+ SNBEP_UNCORE_PCI_OPS_COMMON_INIT(),
+ .enable_event = snbep_qpi_enable_event,
+ .hw_config = snbep_qpi_hw_config,
+ .get_constraint = uncore_get_constraint,
+ .put_constraint = uncore_put_constraint,
+};
+
#define SNBEP_UNCORE_PCI_COMMON_INIT() \
.perf_ctr = SNBEP_PCI_PMON_CTR0, \
.event_ctl = SNBEP_PCI_PMON_CTL0, \
@@ -755,17 +851,18 @@ static struct intel_uncore_type snbep_uncore_imc = {
};
static struct intel_uncore_type snbep_uncore_qpi = {
- .name = "qpi",
- .num_counters = 4,
- .num_boxes = 2,
- .perf_ctr_bits = 48,
- .perf_ctr = SNBEP_PCI_PMON_CTR0,
- .event_ctl = SNBEP_PCI_PMON_CTL0,
- .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
- .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
- .ops = &snbep_uncore_pci_ops,
- .event_descs = snbep_uncore_qpi_events,
- .format_group = &snbep_uncore_qpi_format_group,
+ .name = "qpi",
+ .num_counters = 4,
+ .num_boxes = 2,
+ .perf_ctr_bits = 48,
+ .perf_ctr = SNBEP_PCI_PMON_CTR0,
+ .event_ctl = SNBEP_PCI_PMON_CTL0,
+ .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK,
+ .box_ctl = SNBEP_PCI_PMON_BOX_CTL,
+ .num_shared_regs = 1,
+ .ops = &snbep_uncore_qpi_ops,
+ .event_descs = snbep_uncore_qpi_events,
+ .format_group = &snbep_uncore_qpi_format_group,
};
@@ -807,43 +904,53 @@ static struct intel_uncore_type *snbep_pci_uncores[] = {
static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = {
{ /* Home Agent */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA),
- .driver_data = SNBEP_PCI_UNCORE_HA,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0),
},
{ /* MC Channel 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0),
- .driver_data = SNBEP_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0),
},
{ /* MC Channel 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1),
- .driver_data = SNBEP_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1),
},
{ /* MC Channel 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2),
- .driver_data = SNBEP_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2),
},
{ /* MC Channel 3 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3),
- .driver_data = SNBEP_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3),
},
{ /* QPI Port 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0),
- .driver_data = SNBEP_PCI_UNCORE_QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0),
},
{ /* QPI Port 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1),
- .driver_data = SNBEP_PCI_UNCORE_QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1),
},
{ /* R2PCIe */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE),
- .driver_data = SNBEP_PCI_UNCORE_R2PCIE,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0),
},
{ /* R3QPI Link 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0),
- .driver_data = SNBEP_PCI_UNCORE_R3QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0),
},
{ /* R3QPI Link 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1),
- .driver_data = SNBEP_PCI_UNCORE_R3QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT0_FILTER),
+ },
+ { /* QPI Port 0 filter */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ SNBEP_PCI_QPI_PORT1_FILTER),
},
{ /* end: all zeroes */ }
};
@@ -1256,71 +1363,71 @@ static struct intel_uncore_type *ivt_pci_uncores[] = {
static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = {
{ /* Home Agent 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30),
- .driver_data = IVT_PCI_UNCORE_HA,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0),
},
{ /* Home Agent 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38),
- .driver_data = IVT_PCI_UNCORE_HA,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1),
},
{ /* MC0 Channel 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0),
},
{ /* MC0 Channel 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1),
},
{ /* MC0 Channel 3 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2),
},
{ /* MC0 Channel 4 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3),
},
{ /* MC1 Channel 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4),
},
{ /* MC1 Channel 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5),
},
{ /* MC1 Channel 3 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6),
},
{ /* MC1 Channel 4 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1),
- .driver_data = IVT_PCI_UNCORE_IMC,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7),
},
{ /* QPI0 Port 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32),
- .driver_data = IVT_PCI_UNCORE_QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0),
},
{ /* QPI0 Port 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33),
- .driver_data = IVT_PCI_UNCORE_QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1),
},
{ /* QPI1 Port 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a),
- .driver_data = IVT_PCI_UNCORE_QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2),
},
{ /* R2PCIe */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34),
- .driver_data = IVT_PCI_UNCORE_R2PCIE,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0),
},
{ /* R3QPI0 Link 0 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36),
- .driver_data = IVT_PCI_UNCORE_R3QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0),
},
{ /* R3QPI0 Link 1 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37),
- .driver_data = IVT_PCI_UNCORE_R3QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1),
},
{ /* R3QPI1 Link 2 */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e),
- .driver_data = IVT_PCI_UNCORE_R3QPI,
+ .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2),
},
{ /* end: all zeroes */ }
};
@@ -2606,7 +2713,7 @@ struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cp
size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg);
- box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu));
+ box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
if (!box)
return NULL;
@@ -3167,16 +3274,24 @@ static bool pcidrv_registered;
/*
* add a pci uncore device
*/
-static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
+static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
- int i, phys_id;
+ struct intel_uncore_type *type;
+ int phys_id;
phys_id = pcibus_to_physid[pdev->bus->number];
if (phys_id < 0)
return -ENODEV;
+ if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) {
+ extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev;
+ pci_set_drvdata(pdev, NULL);
+ return 0;
+ }
+
+ type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
box = uncore_alloc_box(type, 0);
if (!box)
return -ENOMEM;
@@ -3185,21 +3300,11 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
* for performance monitoring unit with multiple boxes,
* each box has a different function id.
*/
- for (i = 0; i < type->num_boxes; i++) {
- pmu = &type->pmus[i];
- if (pmu->func_id == pdev->devfn)
- break;
- if (pmu->func_id < 0) {
- pmu->func_id = pdev->devfn;
- break;
- }
- pmu = NULL;
- }
-
- if (!pmu) {
- kfree(box);
- return -EINVAL;
- }
+ pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
+ if (pmu->func_id < 0)
+ pmu->func_id = pdev->devfn;
+ else
+ WARN_ON_ONCE(pmu->func_id != pdev->devfn);
box->phys_id = phys_id;
box->pci_dev = pdev;
@@ -3217,9 +3322,22 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev)
static void uncore_pci_remove(struct pci_dev *pdev)
{
struct intel_uncore_box *box = pci_get_drvdata(pdev);
- struct intel_uncore_pmu *pmu = box->pmu;
- int cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+ struct intel_uncore_pmu *pmu;
+ int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number];
+ box = pci_get_drvdata(pdev);
+ if (!box) {
+ for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
+ if (extra_pci_dev[phys_id][i] == pdev) {
+ extra_pci_dev[phys_id][i] = NULL;
+ break;
+ }
+ }
+ WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX);
+ return;
+ }
+
+ pmu = box->pmu;
if (WARN_ON_ONCE(phys_id != box->phys_id))
return;
@@ -3240,12 +3358,6 @@ static void uncore_pci_remove(struct pci_dev *pdev)
kfree(box);
}
-static int uncore_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- return uncore_pci_add(pci_uncores[id->driver_data], pdev);
-}
-
static int __init uncore_pci_init(void)
{
int ret;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 47b3d00c9d8..a80ab71a883 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -12,6 +12,15 @@
#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
+#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
+#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
+#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
+#define UNCORE_EXTRA_PCI_DEV 0xff
+#define UNCORE_EXTRA_PCI_DEV_MAX 2
+
+/* support up to 8 sockets */
+#define UNCORE_SOCKET_MAX 8
+
#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
/* SNB event control */
@@ -108,6 +117,7 @@
(SNBEP_PMON_CTL_EV_SEL_MASK | \
SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \
SNBEP_PMON_CTL_EDGE_DET | \
+ SNBEP_PMON_CTL_EV_SEL_EXT | \
SNBEP_PMON_CTL_INVERT | \
SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \
SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 7076878404e..628a059a9a0 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -93,7 +93,7 @@ static void __init vmware_platform_setup(void)
* serial key should be enough, as this will always have a VMware
* specific string when running under VMware hypervisor.
*/
-static bool __init vmware_platform(void)
+static uint32_t __init vmware_platform(void)
{
if (cpu_has_hypervisor) {
unsigned int eax;
@@ -102,12 +102,12 @@ static bool __init vmware_platform(void)
cpuid(CPUID_VMWARE_INFO_LEAF, &eax, &hyper_vendor_id[0],
&hyper_vendor_id[1], &hyper_vendor_id[2]);
if (!memcmp(hyper_vendor_id, "VMwareVMware", 12))
- return true;
+ return CPUID_VMWARE_INFO_LEAF;
} else if (dmi_available && dmi_name_in_serial("VMware") &&
__vmware_platform())
- return true;
+ return 1;
- return false;
+ return 0;
}
/*
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index 74467feb4dc..e0e0841eef4 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -128,7 +128,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
cpu_emergency_svm_disable();
lapic_shutdown();
-#if defined(CONFIG_X86_IO_APIC)
+#ifdef CONFIG_X86_IO_APIC
+ /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
+ ioapic_zap_locks();
disable_IO_APIC();
#endif
#ifdef CONFIG_HPET_TIMER
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d32abeabbda..174da5fc5a7 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -658,15 +658,18 @@ __init void e820_setup_gap(void)
* boot_params.e820_map, others are passed via SETUP_E820_EXT node of
* linked list of struct setup_data, which is parsed here.
*/
-void __init parse_e820_ext(struct setup_data *sdata)
+void __init parse_e820_ext(u64 phys_addr, u32 data_len)
{
int entries;
struct e820entry *extmap;
+ struct setup_data *sdata;
+ sdata = early_memremap(phys_addr, data_len);
entries = sdata->len / sizeof(struct e820entry);
extmap = (struct e820entry *)(sdata->data);
__append_e820_map(extmap, entries);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
+ early_iounmap(sdata, data_len);
printk(KERN_INFO "e820: extended physical RAM map:\n");
e820_print_map("extended");
}
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 94ab6b90dd3..63bdb29b254 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func)
static void __init intel_remapping_check(int num, int slot, int func)
{
u8 revision;
+ u16 device;
+ device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID);
revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID);
/*
- * Revision 0x13 of this chipset supports irq remapping
- * but has an erratum that breaks its behavior, flag it as such
+ * Revision 13 of all triggering devices id in this quirk have
+ * a problem draining interrupts when irq remapping is enabled,
+ * and should be flagged as broken. Additionally revisions 0x12
+ * and 0x22 of device id 0x3405 has this problem.
*/
if (revision == 0x13)
set_irq_remapping_broken();
+ else if ((device == 0x3405) &&
+ ((revision == 0x12) ||
+ (revision == 0x22)))
+ set_irq_remapping_broken();
}
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = {
PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd },
{ PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
+ { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST,
+ PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{ PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST,
PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check },
{}
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 138463a2487..06f87bece92 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -29,7 +29,7 @@ static void __init i386_default_early_setup(void)
reserve_ebda_region();
}
-void __init i386_start_kernel(void)
+asmlinkage void __init i386_start_kernel(void)
{
sanitize_boot_params(&boot_params);
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 55b67614ed9..1be8e43b669 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -137,7 +137,7 @@ static void __init copy_bootdata(char *real_mode_data)
}
}
-void __init x86_64_start_kernel(char * real_mode_data)
+asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
{
int i;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 5dd87a89f01..81ba27679f1 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -409,6 +409,7 @@ enable_paging:
/*
* Check if it is 486
*/
+ movb $4,X86 # at least 486
cmpl $-1,X86_CPUID
je is486
@@ -436,7 +437,6 @@ enable_paging:
movl %edx,X86_CAPABILITY
is486:
- movb $4,X86
movl $0x50022,%ecx # set AM, WP, NE and MP
movl %cr0,%eax
andl $0x80000011,%eax # Save PG,PE,ET
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index 202d24f0f7e..5d576ab3440 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void)
if (cpu_has_fxsr) {
memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
- asm volatile("fxsave %0" : : "m" (fx_scratch));
+ asm volatile("fxsave %0" : "+m" (fx_scratch));
mask = fx_scratch.mxcsr_mask;
if (mask == 0)
mask = 0x0000ffbf;
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 3a8185c042a..22d0687e7fd 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -177,7 +177,7 @@ u64 arch_irq_stat(void)
* SMP cross-CPU interrupts have their own specific
* handlers).
*/
-unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
+__visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -215,7 +215,7 @@ void __smp_x86_platform_ipi(void)
x86_platform_ipi_callback();
}
-void smp_x86_platform_ipi(struct pt_regs *regs)
+__visible void smp_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -229,7 +229,7 @@ void smp_x86_platform_ipi(struct pt_regs *regs)
/*
* Handler for POSTED_INTERRUPT_VECTOR.
*/
-void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
+__visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
@@ -247,7 +247,7 @@ void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
}
#endif
-void smp_trace_x86_platform_ipi(struct pt_regs *regs)
+__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index 636a55e4a13..1de84e3ab4e 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -22,14 +22,14 @@ static inline void __smp_irq_work_interrupt(void)
irq_work_run();
}
-void smp_irq_work_interrupt(struct pt_regs *regs)
+__visible void smp_irq_work_interrupt(struct pt_regs *regs)
{
irq_work_entering_irq();
__smp_irq_work_interrupt();
exiting_irq();
}
-void smp_trace_irq_work_interrupt(struct pt_regs *regs)
+__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
{
irq_work_entering_irq();
trace_irq_work_entry(IRQ_WORK_VECTOR);
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
index 2889b3d4388..460f5d9ceeb 100644
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -37,7 +37,19 @@ static void __jump_label_transform(struct jump_entry *entry,
} else
memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
- (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+ /*
+ * Make text_poke_bp() a default fallback poker.
+ *
+ * At the time the change is being done, just ignore whether we
+ * are doing nop -> jump or jump -> nop transition, and assume
+ * always nop being the 'currently valid' instruction
+ *
+ */
+ if (poker)
+ (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE);
+ else
+ text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE,
+ (void *)entry->code + JUMP_LABEL_NOP_SIZE);
}
void arch_jump_label_transform(struct jump_entry *entry,
@@ -45,7 +57,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
{
get_online_cpus();
mutex_lock(&text_mutex);
- __jump_label_transform(entry, type, text_poke_smp);
+ __jump_label_transform(entry, type, NULL);
mutex_unlock(&text_mutex);
put_online_cpus();
}
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index 2e9d4b5af03..c6ee63f927a 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -82,14 +82,9 @@ extern void synthesize_reljump(void *from, void *to);
extern void synthesize_relcall(void *from, void *to);
#ifdef CONFIG_OPTPROBES
-extern int arch_init_optprobes(void);
extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
#else /* !CONFIG_OPTPROBES */
-static inline int arch_init_optprobes(void)
-{
- return 0;
-}
static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
{
return 0;
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 211bce44552..79a3f968287 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -661,7 +661,7 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
/*
* Called from kretprobe_trampoline
*/
-static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
@@ -1068,7 +1068,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
int __init arch_init_kprobes(void)
{
- return arch_init_optprobes();
+ return 0;
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 76dc6f09572..898160b42e4 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -88,9 +88,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
*(unsigned long *)addr = val;
}
-static void __used __kprobes kprobes_optinsn_template_holder(void)
-{
- asm volatile (
+asm (
".global optprobe_template_entry\n"
"optprobe_template_entry:\n"
#ifdef CONFIG_X86_64
@@ -129,7 +127,6 @@ static void __used __kprobes kprobes_optinsn_template_holder(void)
#endif
".global optprobe_template_end\n"
"optprobe_template_end:\n");
-}
#define TMPL_MOVE_IDX \
((long)&optprobe_template_val - (long)&optprobe_template_entry)
@@ -371,31 +368,6 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
return 0;
}
-#define MAX_OPTIMIZE_PROBES 256
-static struct text_poke_param *jump_poke_params;
-static struct jump_poke_buffer {
- u8 buf[RELATIVEJUMP_SIZE];
-} *jump_poke_bufs;
-
-static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
- u8 *insn_buf,
- struct optimized_kprobe *op)
-{
- s32 rel = (s32)((long)op->optinsn.insn -
- ((long)op->kp.addr + RELATIVEJUMP_SIZE));
-
- /* Backup instructions which will be replaced by jump address */
- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
- RELATIVE_ADDR_SIZE);
-
- insn_buf[0] = RELATIVEJUMP_OPCODE;
- *(s32 *)(&insn_buf[1]) = rel;
-
- tprm->addr = op->kp.addr;
- tprm->opcode = insn_buf;
- tprm->len = RELATIVEJUMP_SIZE;
-}
-
/*
* Replace breakpoints (int3) with relative jumps.
* Caller must call with locking kprobe_mutex and text_mutex.
@@ -403,37 +375,38 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
void __kprobes arch_optimize_kprobes(struct list_head *oplist)
{
struct optimized_kprobe *op, *tmp;
- int c = 0;
+ u8 insn_buf[RELATIVEJUMP_SIZE];
list_for_each_entry_safe(op, tmp, oplist, list) {
+ s32 rel = (s32)((long)op->optinsn.insn -
+ ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
WARN_ON(kprobe_disabled(&op->kp));
- /* Setup param */
- setup_optimize_kprobe(&jump_poke_params[c],
- jump_poke_bufs[c].buf, op);
+
+ /* Backup instructions which will be replaced by jump address */
+ memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+ RELATIVE_ADDR_SIZE);
+
+ insn_buf[0] = RELATIVEJUMP_OPCODE;
+ *(s32 *)(&insn_buf[1]) = rel;
+
+ text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
+ op->optinsn.insn);
+
list_del_init(&op->list);
- if (++c >= MAX_OPTIMIZE_PROBES)
- break;
}
-
- /*
- * text_poke_smp doesn't support NMI/MCE code modifying.
- * However, since kprobes itself also doesn't support NMI/MCE
- * code probing, it's not a problem.
- */
- text_poke_smp_batch(jump_poke_params, c);
}
-static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
- u8 *insn_buf,
- struct optimized_kprobe *op)
+/* Replace a relative jump with a breakpoint (int3). */
+void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
{
+ u8 insn_buf[RELATIVEJUMP_SIZE];
+
/* Set int3 to first byte for kprobes */
insn_buf[0] = BREAKPOINT_INSTRUCTION;
memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-
- tprm->addr = op->kp.addr;
- tprm->opcode = insn_buf;
- tprm->len = RELATIVEJUMP_SIZE;
+ text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE,
+ op->optinsn.insn);
}
/*
@@ -444,34 +417,11 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist,
struct list_head *done_list)
{
struct optimized_kprobe *op, *tmp;
- int c = 0;
list_for_each_entry_safe(op, tmp, oplist, list) {
- /* Setup param */
- setup_unoptimize_kprobe(&jump_poke_params[c],
- jump_poke_bufs[c].buf, op);
+ arch_unoptimize_kprobe(op);
list_move(&op->list, done_list);
- if (++c >= MAX_OPTIMIZE_PROBES)
- break;
}
-
- /*
- * text_poke_smp doesn't support NMI/MCE code modifying.
- * However, since kprobes itself also doesn't support NMI/MCE
- * code probing, it's not a problem.
- */
- text_poke_smp_batch(jump_poke_params, c);
-}
-
-/* Replace a relative jump with a breakpoint (int3). */
-void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
-{
- u8 buf[RELATIVEJUMP_SIZE];
-
- /* Set int3 to first byte for kprobes */
- buf[0] = BREAKPOINT_INSTRUCTION;
- memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
- text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
}
int __kprobes
@@ -491,22 +441,3 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
}
return 0;
}
-
-int __kprobes arch_init_optprobes(void)
-{
- /* Allocate code buffer and parameter array */
- jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
- MAX_OPTIMIZE_PROBES, GFP_KERNEL);
- if (!jump_poke_bufs)
- return -ENOMEM;
-
- jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
- MAX_OPTIMIZE_PROBES, GFP_KERNEL);
- if (!jump_poke_params) {
- kfree(jump_poke_bufs);
- jump_poke_bufs = NULL;
- return -ENOMEM;
- }
-
- return 0;
-}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index a96d32cc55b..697b93af02d 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/kprobes.h>
+#include <linux/debugfs.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -419,6 +420,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
WARN_ON(kvm_register_clock("primary cpu clock"));
kvm_guest_cpu_init();
native_smp_prepare_boot_cpu();
+ kvm_spinlock_init();
}
static void kvm_guest_cpu_online(void *dummy)
@@ -498,11 +500,9 @@ void __init kvm_guest_init(void)
#endif
}
-static bool __init kvm_detect(void)
+static uint32_t __init kvm_detect(void)
{
- if (!kvm_para_available())
- return false;
- return true;
+ return kvm_cpuid_base();
}
const struct hypervisor_x86 x86_hyper_kvm __refconst = {
@@ -523,3 +523,263 @@ static __init int activate_jump_labels(void)
return 0;
}
arch_initcall(activate_jump_labels);
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+
+/* Kick a cpu by its apicid. Used to wake up a halted vcpu */
+static void kvm_kick_cpu(int cpu)
+{
+ int apicid;
+ unsigned long flags = 0;
+
+ apicid = per_cpu(x86_cpu_to_apicid, cpu);
+ kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
+}
+
+enum kvm_contention_stat {
+ TAKEN_SLOW,
+ TAKEN_SLOW_PICKUP,
+ RELEASED_SLOW,
+ RELEASED_SLOW_KICKED,
+ NR_CONTENTION_STATS
+};
+
+#ifdef CONFIG_KVM_DEBUG_FS
+#define HISTO_BUCKETS 30
+
+static struct kvm_spinlock_stats
+{
+ u32 contention_stats[NR_CONTENTION_STATS];
+ u32 histo_spin_blocked[HISTO_BUCKETS+1];
+ u64 time_blocked;
+} spinlock_stats;
+
+static u8 zero_stats;
+
+static inline void check_zero(void)
+{
+ u8 ret;
+ u8 old;
+
+ old = ACCESS_ONCE(zero_stats);
+ if (unlikely(old)) {
+ ret = cmpxchg(&zero_stats, old, 0);
+ /* This ensures only one fellow resets the stat */
+ if (ret == old)
+ memset(&spinlock_stats, 0, sizeof(spinlock_stats));
+ }
+}
+
+static inline void add_stats(enum kvm_contention_stat var, u32 val)
+{
+ check_zero();
+ spinlock_stats.contention_stats[var] += val;
+}
+
+
+static inline u64 spin_time_start(void)
+{
+ return sched_clock();
+}
+
+static void __spin_time_accum(u64 delta, u32 *array)
+{
+ unsigned index;
+
+ index = ilog2(delta);
+ check_zero();
+
+ if (index < HISTO_BUCKETS)
+ array[index]++;
+ else
+ array[HISTO_BUCKETS]++;
+}
+
+static inline void spin_time_accum_blocked(u64 start)
+{
+ u32 delta;
+
+ delta = sched_clock() - start;
+ __spin_time_accum(delta, spinlock_stats.histo_spin_blocked);
+ spinlock_stats.time_blocked += delta;
+}
+
+static struct dentry *d_spin_debug;
+static struct dentry *d_kvm_debug;
+
+struct dentry *kvm_init_debugfs(void)
+{
+ d_kvm_debug = debugfs_create_dir("kvm", NULL);
+ if (!d_kvm_debug)
+ printk(KERN_WARNING "Could not create 'kvm' debugfs directory\n");
+
+ return d_kvm_debug;
+}
+
+static int __init kvm_spinlock_debugfs(void)
+{
+ struct dentry *d_kvm;
+
+ d_kvm = kvm_init_debugfs();
+ if (d_kvm == NULL)
+ return -ENOMEM;
+
+ d_spin_debug = debugfs_create_dir("spinlocks", d_kvm);
+
+ debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
+
+ debugfs_create_u32("taken_slow", 0444, d_spin_debug,
+ &spinlock_stats.contention_stats[TAKEN_SLOW]);
+ debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
+ &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
+
+ debugfs_create_u32("released_slow", 0444, d_spin_debug,
+ &spinlock_stats.contention_stats[RELEASED_SLOW]);
+ debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
+ &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
+
+ debugfs_create_u64("time_blocked", 0444, d_spin_debug,
+ &spinlock_stats.time_blocked);
+
+ debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
+ spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
+
+ return 0;
+}
+fs_initcall(kvm_spinlock_debugfs);
+#else /* !CONFIG_KVM_DEBUG_FS */
+static inline void add_stats(enum kvm_contention_stat var, u32 val)
+{
+}
+
+static inline u64 spin_time_start(void)
+{
+ return 0;
+}
+
+static inline void spin_time_accum_blocked(u64 start)
+{
+}
+#endif /* CONFIG_KVM_DEBUG_FS */
+
+struct kvm_lock_waiting {
+ struct arch_spinlock *lock;
+ __ticket_t want;
+};
+
+/* cpus 'waiting' on a spinlock to become available */
+static cpumask_t waiting_cpus;
+
+/* Track spinlock on which a cpu is waiting */
+static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
+
+static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+{
+ struct kvm_lock_waiting *w;
+ int cpu;
+ u64 start;
+ unsigned long flags;
+
+ if (in_nmi())
+ return;
+
+ w = &__get_cpu_var(klock_waiting);
+ cpu = smp_processor_id();
+ start = spin_time_start();
+
+ /*
+ * Make sure an interrupt handler can't upset things in a
+ * partially setup state.
+ */
+ local_irq_save(flags);
+
+ /*
+ * The ordering protocol on this is that the "lock" pointer
+ * may only be set non-NULL if the "want" ticket is correct.
+ * If we're updating "want", we must first clear "lock".
+ */
+ w->lock = NULL;
+ smp_wmb();
+ w->want = want;
+ smp_wmb();
+ w->lock = lock;
+
+ add_stats(TAKEN_SLOW, 1);
+
+ /*
+ * This uses set_bit, which is atomic but we should not rely on its
+ * reordering gurantees. So barrier is needed after this call.
+ */
+ cpumask_set_cpu(cpu, &waiting_cpus);
+
+ barrier();
+
+ /*
+ * Mark entry to slowpath before doing the pickup test to make
+ * sure we don't deadlock with an unlocker.
+ */
+ __ticket_enter_slowpath(lock);
+
+ /*
+ * check again make sure it didn't become free while
+ * we weren't looking.
+ */
+ if (ACCESS_ONCE(lock->tickets.head) == want) {
+ add_stats(TAKEN_SLOW_PICKUP, 1);
+ goto out;
+ }
+
+ /*
+ * halt until it's our turn and kicked. Note that we do safe halt
+ * for irq enabled case to avoid hang when lock info is overwritten
+ * in irq spinlock slowpath and no spurious interrupt occur to save us.
+ */
+ if (arch_irqs_disabled_flags(flags))
+ halt();
+ else
+ safe_halt();
+
+out:
+ cpumask_clear_cpu(cpu, &waiting_cpus);
+ w->lock = NULL;
+ local_irq_restore(flags);
+ spin_time_accum_blocked(start);
+}
+PV_CALLEE_SAVE_REGS_THUNK(kvm_lock_spinning);
+
+/* Kick vcpu waiting on @lock->head to reach value @ticket */
+static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
+{
+ int cpu;
+
+ add_stats(RELEASED_SLOW, 1);
+ for_each_cpu(cpu, &waiting_cpus) {
+ const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
+ if (ACCESS_ONCE(w->lock) == lock &&
+ ACCESS_ONCE(w->want) == ticket) {
+ add_stats(RELEASED_SLOW_KICKED, 1);
+ kvm_kick_cpu(cpu);
+ break;
+ }
+ }
+}
+
+/*
+ * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
+ */
+void __init kvm_spinlock_init(void)
+{
+ if (!kvm_para_available())
+ return;
+ /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
+ if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
+ return;
+
+ printk(KERN_INFO "KVM setup paravirtual spinlock\n");
+
+ static_key_slow_inc(&paravirt_ticketlocks_enabled);
+
+ pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
+ pv_lock_ops.unlock_kick = kvm_unlock_kick;
+}
+#endif /* CONFIG_PARAVIRT_SPINLOCKS */
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index 47ebb1dbfbc..7123b5df479 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
return 0;
}
-static unsigned int verify_patch_size(int cpu, u32 patch_size,
+static unsigned int verify_patch_size(u8 family, u32 patch_size,
unsigned int size)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
u32 max_size;
#define F1XH_MPB_MAX_SIZE 2048
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
#define F15H_MPB_MAX_SIZE 4096
#define F16H_MPB_MAX_SIZE 3458
- switch (c->x86) {
+ switch (family) {
case 0x14:
max_size = F14H_MPB_MAX_SIZE;
break;
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu)
return 0;
}
- if (__apply_microcode_amd(mc_amd))
+ if (__apply_microcode_amd(mc_amd)) {
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
cpu, mc_amd->hdr.patch_id);
- else
- pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
- mc_amd->hdr.patch_id);
+ return -1;
+ }
+ pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
+ mc_amd->hdr.patch_id);
uci->cpu_sig.rev = mc_amd->hdr.patch_id;
c->microcode = mc_amd->hdr.patch_id;
@@ -276,9 +276,8 @@ static void cleanup(void)
* driver cannot continue functioning normally. In such cases, we tear
* down everything we've used up so far and exit.
*/
-static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
+static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
{
- struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_header_amd *mc_hdr;
struct ucode_patch *patch;
unsigned int patch_size, crnt_size, ret;
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
/* check if patch is for the current family */
proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
- if (proc_fam != c->x86)
+ if (proc_fam != family)
return crnt_size;
if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
return crnt_size;
}
- ret = verify_patch_size(cpu, patch_size, leftover);
+ ret = verify_patch_size(family, patch_size, leftover);
if (!ret) {
pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
return crnt_size;
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover)
return crnt_size;
}
-static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size)
+static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
+ size_t size)
{
enum ucode_state ret = UCODE_ERROR;
unsigned int leftover;
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
}
while (leftover) {
- crnt_size = verify_and_add_patch(cpu, fw, leftover);
+ crnt_size = verify_and_add_patch(family, fw, leftover);
if (crnt_size < 0)
return ret;
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz
return UCODE_OK;
}
-enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size)
+enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
{
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
- ret = __load_microcode_amd(cpu, data, size);
+ ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
cleanup();
#if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32)
/* save BSP's matching patch for early load */
- if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) {
- struct ucode_patch *p = find_patch(cpu);
+ if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) {
+ struct ucode_patch *p = find_patch(smp_processor_id());
if (p) {
memset(amd_bsp_mpb, 0, MPB_MAX_SIZE);
memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data),
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device,
goto fw_release;
}
- ret = load_microcode_amd(cpu, fw->data, fw->size);
+ ret = load_microcode_amd(c->x86, fw->data, fw->size);
fw_release:
release_firmware(fw);
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1d14ffee574..6073104ccaa 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
#else
-static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
- struct ucode_cpu_info *uci)
+void load_ucode_amd_ap(void)
{
+ unsigned int cpu = smp_processor_id();
+ struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
u32 rev, eax;
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax);
eax = cpuid_eax(0x00000001);
- uci->cpu_sig.sig = eax;
uci->cpu_sig.rev = rev;
- c->microcode = rev;
- c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
-}
-
-void load_ucode_amd_ap(void)
-{
- unsigned int cpu = smp_processor_id();
-
- collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu);
+ uci->cpu_sig.sig = eax;
if (cpu && !ucode_loaded) {
void *ucode;
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void)
return;
ucode = (void *)(initrd_start + ucode_offset);
- if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK)
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+ if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK)
return;
+
ucode_loaded = true;
}
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void)
{
enum ucode_state ret;
void *ucode;
+ u32 eax;
+
#ifdef CONFIG_X86_32
unsigned int bsp = boot_cpu_data.cpu_index;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void)
return 0;
ucode = (void *)(initrd_start + ucode_offset);
- ret = load_microcode_amd(0, ucode, ucode_size);
+ eax = cpuid_eax(0x00000001);
+ eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
+
+ ret = load_microcode_amd(eax, ucode, ucode_size);
if (ret != UCODE_OK)
return -EINVAL;
diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
index 676b8c77a97..bbb6c731634 100644
--- a/arch/x86/kernel/paravirt-spinlocks.c
+++ b/arch/x86/kernel/paravirt-spinlocks.c
@@ -4,25 +4,17 @@
*/
#include <linux/spinlock.h>
#include <linux/module.h>
+#include <linux/jump_label.h>
#include <asm/paravirt.h>
-static inline void
-default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
-{
- arch_spin_lock(lock);
-}
-
struct pv_lock_ops pv_lock_ops = {
#ifdef CONFIG_SMP
- .spin_is_locked = __ticket_spin_is_locked,
- .spin_is_contended = __ticket_spin_is_contended,
-
- .spin_lock = __ticket_spin_lock,
- .spin_lock_flags = default_spin_lock_flags,
- .spin_trylock = __ticket_spin_trylock,
- .spin_unlock = __ticket_spin_unlock,
+ .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
+ .unlock_kick = paravirt_nop,
#endif
};
EXPORT_SYMBOL(pv_lock_ops);
+struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL(paravirt_ticketlocks_enabled);
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index cd6de64cc48..1b10af835c3 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -62,11 +62,6 @@ void __init default_banner(void)
pv_info.name);
}
-/* Simple instruction patching code. */
-#define DEF_NATIVE(ops, name, code) \
- extern const char start_##ops##_##name[], end_##ops##_##name[]; \
- asm("start_" #ops "_" #name ": " code "; end_" #ops "_" #name ":")
-
/* Undefined instruction for dealing with missing ops pointers. */
static const unsigned char ud2a[] = { 0x0f, 0x0b };
@@ -324,7 +319,7 @@ struct pv_time_ops pv_time_ops = {
.steal_clock = native_steal_clock,
};
-struct pv_irq_ops pv_irq_ops = {
+__visible struct pv_irq_ops pv_irq_ops = {
.save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
.restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
.irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
@@ -336,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
#endif
};
-struct pv_cpu_ops pv_cpu_ops = {
+__visible struct pv_cpu_ops pv_cpu_ops = {
.cpuid = native_cpuid,
.get_debugreg = native_get_debugreg,
.set_debugreg = native_set_debugreg,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 83369e5a1d2..c83516be105 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -36,7 +36,7 @@
* section. Since TSS's are completely CPU-local, we want them
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
*/
-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index f8adefca71d..884f98f6935 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(start_thread);
* the task-switch, and shows up in ret_from_fork in entry.S,
* for example.
*/
-__notrace_funcgraph struct task_struct *
+__visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 05646bab4ca..bb1dc51bab0 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -52,7 +52,7 @@
asmlinkage extern void ret_from_fork(void);
-DEFINE_PER_CPU(unsigned long, old_rsp);
+asmlinkage DEFINE_PER_CPU(unsigned long, old_rsp);
/* Prints also some state that isn't saved in the pt_regs */
void __show_regs(struct pt_regs *regs, int all)
@@ -274,7 +274,7 @@ void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp)
* Kprobes not supported here. Set the probe on schedule instead.
* Function graph tracer not supported too.
*/
-__notrace_funcgraph struct task_struct *
+__visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread;
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 2cb9470ea85..a16bae3f83b 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -128,46 +128,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
-static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
-
-static struct pvclock_vsyscall_time_info *
-pvclock_get_vsyscall_user_time_info(int cpu)
-{
- if (!pvclock_vdso_info) {
- BUG();
- return NULL;
- }
-
- return &pvclock_vdso_info[cpu];
-}
-
-struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
-{
- return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
-}
-
#ifdef CONFIG_X86_64
-static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
- void *v)
-{
- struct task_migration_notifier *mn = v;
- struct pvclock_vsyscall_time_info *pvti;
-
- pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
-
- /* this is NULL when pvclock vsyscall is not initialized */
- if (unlikely(pvti == NULL))
- return NOTIFY_DONE;
-
- pvti->migrate_count++;
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block pvclock_migrate = {
- .notifier_call = pvclock_task_migrate,
-};
-
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -181,17 +142,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
- pvclock_vdso_info = i;
-
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
-
- register_task_migration_notifier(&pvclock_migrate);
-
return 0;
}
#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index f8ec57815c0..f0de6294b95 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -206,9 +206,9 @@ EXPORT_SYMBOL(boot_cpu_data);
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
-unsigned long mmu_cr4_features;
+__visible unsigned long mmu_cr4_features;
#else
-unsigned long mmu_cr4_features = X86_CR4_PAE;
+__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
#endif
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
@@ -426,25 +426,23 @@ static void __init reserve_initrd(void)
static void __init parse_setup_data(void)
{
struct setup_data *data;
- u64 pa_data;
+ u64 pa_data, pa_next;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
- u32 data_len, map_len;
+ u32 data_len, map_len, data_type;
map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
(u64)sizeof(struct setup_data));
data = early_memremap(pa_data, map_len);
data_len = data->len + sizeof(struct setup_data);
- if (data_len > map_len) {
- early_iounmap(data, map_len);
- data = early_memremap(pa_data, data_len);
- map_len = data_len;
- }
+ data_type = data->type;
+ pa_next = data->next;
+ early_iounmap(data, map_len);
- switch (data->type) {
+ switch (data_type) {
case SETUP_E820_EXT:
- parse_e820_ext(data);
+ parse_e820_ext(pa_data, data_len);
break;
case SETUP_DTB:
add_dtb(pa_data);
@@ -452,8 +450,7 @@ static void __init parse_setup_data(void)
default:
break;
}
- pa_data = data->next;
- early_iounmap(data, map_len);
+ pa_data = pa_next;
}
}
@@ -1070,7 +1067,7 @@ void __init setup_arch(char **cmdline_p)
cleanup_highmap();
- memblock.current_limit = ISA_END_ADDRESS;
+ memblock_set_current_limit(ISA_END_ADDRESS);
memblock_x86_fill();
/*
@@ -1103,7 +1100,7 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode();
- memblock.current_limit = get_max_mapped();
+ memblock_set_current_limit(get_max_mapped());
dma_contiguous_reserve(0);
/*
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index cf913587d4d..9e5de6813e1 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -358,7 +358,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. */
restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
@@ -423,7 +423,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ save_altstack_ex(&frame->uc.uc_stack, regs->sp);
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
@@ -490,7 +490,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
else
put_user_ex(0, &frame->uc.uc_flags);
put_user_ex(0, &frame->uc.uc_link);
- err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
+ compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
put_user_ex(0, &frame->uc.uc__pad0);
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
@@ -533,7 +533,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
* Do a signal return; undo the signal stack.
*/
#ifdef CONFIG_X86_32
-unsigned long sys_sigreturn(void)
+asmlinkage unsigned long sys_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct sigframe __user *frame;
@@ -562,7 +562,7 @@ badframe:
}
#endif /* CONFIG_X86_32 */
-long sys_rt_sigreturn(void)
+asmlinkage long sys_rt_sigreturn(void)
{
struct pt_regs *regs = current_pt_regs();
struct rt_sigframe __user *frame;
@@ -728,7 +728,7 @@ static void do_signal(struct pt_regs *regs)
* notification of userspace execution resumption
* - triggered by the TIF_WORK_MASK flags
*/
-void
+__visible void
do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
{
user_exit();
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index cdaa347dfca..7c3a5a61f2e 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -256,7 +256,7 @@ static inline void __smp_reschedule_interrupt(void)
scheduler_ipi();
}
-void smp_reschedule_interrupt(struct pt_regs *regs)
+__visible void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
__smp_reschedule_interrupt();
@@ -271,7 +271,7 @@ static inline void smp_entering_irq(void)
irq_enter();
}
-void smp_trace_reschedule_interrupt(struct pt_regs *regs)
+__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
{
/*
* Need to call irq_enter() before calling the trace point.
@@ -295,14 +295,14 @@ static inline void __smp_call_function_interrupt(void)
inc_irq_stat(irq_call_count);
}
-void smp_call_function_interrupt(struct pt_regs *regs)
+__visible void smp_call_function_interrupt(struct pt_regs *regs)
{
smp_entering_irq();
__smp_call_function_interrupt();
exiting_irq();
}
-void smp_trace_call_function_interrupt(struct pt_regs *regs)
+__visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
{
smp_entering_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
@@ -317,14 +317,14 @@ static inline void __smp_call_function_single_interrupt(void)
inc_irq_stat(irq_call_count);
}
-void smp_call_function_single_interrupt(struct pt_regs *regs)
+__visible void smp_call_function_single_interrupt(struct pt_regs *regs)
{
smp_entering_irq();
__smp_call_function_single_interrupt();
exiting_irq();
}
-void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
+__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
{
smp_entering_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
index dbded5aedb8..30277e27431 100644
--- a/arch/x86/kernel/sys_x86_64.c
+++ b/arch/x86/kernel/sys_x86_64.c
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
*begin = new_begin;
}
} else {
- *begin = TASK_UNMAPPED_BASE;
+ *begin = current->mm->mmap_legacy_base;
*end = TASK_SIZE;
}
}
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/kernel/syscall_32.c
index 147fcd4941c..e9bcd57d8a9 100644
--- a/arch/x86/kernel/syscall_32.c
+++ b/arch/x86/kernel/syscall_32.c
@@ -15,7 +15,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+__visible const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
index 5c7f8c20da7..4ac730b37f0 100644
--- a/arch/x86/kernel/syscall_64.c
+++ b/arch/x86/kernel/syscall_64.c
@@ -4,6 +4,7 @@
#include <linux/sys.h>
#include <linux/cache.h>
#include <asm/asm-offsets.h>
+#include <asm/syscall.h>
#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
@@ -19,11 +20,9 @@
#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
-typedef void (*sys_call_ptr_t)(void);
-
extern void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/kernel/sysfb.c b/arch/x86/kernel/sysfb.c
new file mode 100644
index 00000000000..193ec2ce46c
--- /dev/null
+++ b/arch/x86/kernel/sysfb.c
@@ -0,0 +1,74 @@
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Simple-Framebuffer support for x86 systems
+ * Create a platform-device for any available boot framebuffer. The
+ * simple-framebuffer platform device is already available on DT systems, so
+ * this module parses the global "screen_info" object and creates a suitable
+ * platform device compatible with the "simple-framebuffer" DT object. If
+ * the framebuffer is incompatible, we instead create a legacy
+ * "vesa-framebuffer", "efi-framebuffer" or "platform-framebuffer" device and
+ * pass the screen_info as platform_data. This allows legacy drivers
+ * to pick these devices up without messing with simple-framebuffer drivers.
+ * The global "screen_info" is still valid at all times.
+ *
+ * If CONFIG_X86_SYSFB is not selected, we never register "simple-framebuffer"
+ * platform devices, but only use legacy framebuffer devices for
+ * backwards compatibility.
+ *
+ * TODO: We set the dev_id field of all platform-devices to 0. This allows
+ * other x86 OF/DT parsers to create such devices, too. However, they must
+ * start at offset 1 for this to work.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+#include <asm/sysfb.h>
+
+static __init int sysfb_init(void)
+{
+ struct screen_info *si = &screen_info;
+ struct simplefb_platform_data mode;
+ struct platform_device *pd;
+ const char *name;
+ bool compatible;
+ int ret;
+
+ sysfb_apply_efi_quirks();
+
+ /* try to create a simple-framebuffer device */
+ compatible = parse_mode(si, &mode);
+ if (compatible) {
+ ret = create_simplefb(si, &mode);
+ if (!ret)
+ return 0;
+ }
+
+ /* if the FB is incompatible, create a legacy framebuffer device */
+ if (si->orig_video_isVGA == VIDEO_TYPE_EFI)
+ name = "efi-framebuffer";
+ else if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ name = "vesa-framebuffer";
+ else
+ name = "platform-framebuffer";
+
+ pd = platform_device_register_resndata(NULL, name, 0,
+ NULL, 0, si, sizeof(*si));
+ return IS_ERR(pd) ? PTR_ERR(pd) : 0;
+}
+
+/* must execute after PCI subsystem for EFI quirks */
+device_initcall(sysfb_init);
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
new file mode 100644
index 00000000000..b285d4e8c68
--- /dev/null
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -0,0 +1,214 @@
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * EFI Quirks
+ * Several EFI systems do not correctly advertise their boot framebuffers.
+ * Hence, we use this static table of known broken machines and fix up the
+ * information so framebuffer drivers can load corectly.
+ */
+
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/screen_info.h>
+#include <video/vga.h>
+#include <asm/sysfb.h>
+
+enum {
+ OVERRIDE_NONE = 0x0,
+ OVERRIDE_BASE = 0x1,
+ OVERRIDE_STRIDE = 0x2,
+ OVERRIDE_HEIGHT = 0x4,
+ OVERRIDE_WIDTH = 0x8,
+};
+
+struct efifb_dmi_info efifb_dmi_list[] = {
+ [M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */
+ [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE },
+ [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */
+ [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE },
+ [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE },
+ [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE },
+ [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE },
+ [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ /* 11" Macbook Air 3,1 passes the wrong stride */
+ [M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE },
+ [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */
+ [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
+ [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE },
+ [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
+ [M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
+ [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
+};
+
+#define choose_value(dmivalue, fwvalue, field, flags) ({ \
+ typeof(fwvalue) _ret_ = fwvalue; \
+ if ((flags) & (field)) \
+ _ret_ = dmivalue; \
+ else if ((fwvalue) == 0) \
+ _ret_ = dmivalue; \
+ _ret_; \
+ })
+
+static int __init efifb_set_system(const struct dmi_system_id *id)
+{
+ struct efifb_dmi_info *info = id->driver_data;
+
+ if (info->base == 0 && info->height == 0 && info->width == 0 &&
+ info->stride == 0)
+ return 0;
+
+ /* Trust the bootloader over the DMI tables */
+ if (screen_info.lfb_base == 0) {
+#if defined(CONFIG_PCI)
+ struct pci_dev *dev = NULL;
+ int found_bar = 0;
+#endif
+ if (info->base) {
+ screen_info.lfb_base = choose_value(info->base,
+ screen_info.lfb_base, OVERRIDE_BASE,
+ info->flags);
+
+#if defined(CONFIG_PCI)
+ /* make sure that the address in the table is actually
+ * on a VGA device's PCI BAR */
+
+ for_each_pci_dev(dev) {
+ int i;
+ if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ continue;
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ resource_size_t start, end;
+
+ start = pci_resource_start(dev, i);
+ if (start == 0)
+ break;
+ end = pci_resource_end(dev, i);
+ if (screen_info.lfb_base >= start &&
+ screen_info.lfb_base < end) {
+ found_bar = 1;
+ }
+ }
+ }
+ if (!found_bar)
+ screen_info.lfb_base = 0;
+#endif
+ }
+ }
+ if (screen_info.lfb_base) {
+ screen_info.lfb_linelength = choose_value(info->stride,
+ screen_info.lfb_linelength, OVERRIDE_STRIDE,
+ info->flags);
+ screen_info.lfb_width = choose_value(info->width,
+ screen_info.lfb_width, OVERRIDE_WIDTH,
+ info->flags);
+ screen_info.lfb_height = choose_value(info->height,
+ screen_info.lfb_height, OVERRIDE_HEIGHT,
+ info->flags);
+ if (screen_info.orig_video_isVGA == 0)
+ screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
+ } else {
+ screen_info.lfb_linelength = 0;
+ screen_info.lfb_width = 0;
+ screen_info.lfb_height = 0;
+ screen_info.orig_video_isVGA = 0;
+ return 0;
+ }
+
+ printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
+ "(%dx%d, stride %d)\n", id->ident,
+ screen_info.lfb_base, screen_info.lfb_width,
+ screen_info.lfb_height, screen_info.lfb_linelength);
+
+ return 1;
+}
+
+#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \
+ { \
+ efifb_set_system, \
+ name, \
+ { \
+ DMI_MATCH(DMI_BIOS_VENDOR, vendor), \
+ DMI_MATCH(DMI_PRODUCT_NAME, name) \
+ }, \
+ &efifb_dmi_list[enumid] \
+ }
+
+static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB),
+ /* At least one of these two will be right; maybe both? */
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
+ EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2),
+ {},
+};
+
+__init void sysfb_apply_efi_quirks(void)
+{
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
+ !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
+ dmi_check_system(efifb_dmi_system_table);
+}
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c
new file mode 100644
index 00000000000..22513e96b01
--- /dev/null
+++ b/arch/x86/kernel/sysfb_simplefb.c
@@ -0,0 +1,95 @@
+/*
+ * Generic System Framebuffers on x86
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * simple-framebuffer probing
+ * Try to convert "screen_info" into a "simple-framebuffer" compatible mode.
+ * If the mode is incompatible, we return "false" and let the caller create
+ * legacy nodes instead.
+ */
+
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/screen_info.h>
+#include <asm/sysfb.h>
+
+static const char simplefb_resname[] = "BOOTFB";
+static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+
+/* try parsing x86 screen_info into a simple-framebuffer mode struct */
+__init bool parse_mode(const struct screen_info *si,
+ struct simplefb_platform_data *mode)
+{
+ const struct simplefb_format *f;
+ __u8 type;
+ unsigned int i;
+
+ type = si->orig_video_isVGA;
+ if (type != VIDEO_TYPE_VLFB && type != VIDEO_TYPE_EFI)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(formats); ++i) {
+ f = &formats[i];
+ if (si->lfb_depth == f->bits_per_pixel &&
+ si->red_size == f->red.length &&
+ si->red_pos == f->red.offset &&
+ si->green_size == f->green.length &&
+ si->green_pos == f->green.offset &&
+ si->blue_size == f->blue.length &&
+ si->blue_pos == f->blue.offset &&
+ si->rsvd_size == f->transp.length &&
+ si->rsvd_pos == f->transp.offset) {
+ mode->format = f->name;
+ mode->width = si->lfb_width;
+ mode->height = si->lfb_height;
+ mode->stride = si->lfb_linelength;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+__init int create_simplefb(const struct screen_info *si,
+ const struct simplefb_platform_data *mode)
+{
+ struct platform_device *pd;
+ struct resource res;
+ unsigned long len;
+
+ /* don't use lfb_size as it may contain the whole VMEM instead of only
+ * the part that is occupied by the framebuffer */
+ len = mode->height * mode->stride;
+ len = PAGE_ALIGN(len);
+ if (len > si->lfb_size << 16) {
+ printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
+ return -EINVAL;
+ }
+
+ /* setup IORESOURCE_MEM as framebuffer memory */
+ memset(&res, 0, sizeof(res));
+ res.flags = IORESOURCE_MEM;
+ res.name = simplefb_resname;
+ res.start = si->lfb_base;
+ res.end = si->lfb_base + len - 1;
+ if (res.end <= res.start)
+ return -EINVAL;
+
+ pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0,
+ &res, 1, mode, sizeof(*mode));
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ return 0;
+}
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index addf7b58f4e..91a4496db43 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -301,6 +301,15 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
return 0;
}
+static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
+{
+ if (!tboot_enabled())
+ return 0;
+
+ pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)");
+ return -ENODEV;
+}
+
static atomic_t ap_wfs_count;
static int tboot_wait_for_aps(int num_aps)
@@ -422,6 +431,7 @@ static __init int tboot_late_init(void)
#endif
acpi_os_set_prepare_sleep(&tboot_sleep);
+ acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep);
return 0;
}
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 1b23a1c9274..8c8093b146c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -58,6 +58,7 @@
#include <asm/mce.h>
#include <asm/fixmap.h>
#include <asm/mach_traps.h>
+#include <asm/alternative.h>
#ifdef CONFIG_X86_64
#include <asm/x86_init.h>
@@ -327,6 +328,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co
ftrace_int3_handler(regs))
return;
#endif
+ if (poke_int3_handler(regs))
+ return;
+
prev_state = exception_enter();
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 6ff49247edf..930e5d48f56 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -89,6 +89,12 @@ int check_tsc_unstable(void)
}
EXPORT_SYMBOL_GPL(check_tsc_unstable);
+int check_tsc_disabled(void)
+{
+ return tsc_disabled;
+}
+EXPORT_SYMBOL_GPL(check_tsc_disabled);
+
#ifdef CONFIG_X86_TSC
int __init notsc_setup(char *str)
{
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index a20ecb5b6cb..b110fe6c03d 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -413,7 +413,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
(1 << KVM_FEATURE_CLOCKSOURCE2) |
(1 << KVM_FEATURE_ASYNC_PF) |
(1 << KVM_FEATURE_PV_EOI) |
- (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT);
+ (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
+ (1 << KVM_FEATURE_PV_UNHALT);
if (sched_info_on())
entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index afc11245827..5439117d5c4 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -79,16 +79,6 @@ static inline void apic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
*((u32 *) (apic->regs + reg_off)) = val;
}
-static inline int apic_test_and_set_vector(int vec, void *bitmap)
-{
- return test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
-static inline int apic_test_and_clear_vector(int vec, void *bitmap)
-{
- return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
-}
-
static inline int apic_test_vector(int vec, void *bitmap)
{
return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -331,10 +321,10 @@ void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir)
}
EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
-static inline int apic_test_and_set_irr(int vec, struct kvm_lapic *apic)
+static inline void apic_set_irr(int vec, struct kvm_lapic *apic)
{
apic->irr_pending = true;
- return apic_test_and_set_vector(vec, apic->regs + APIC_IRR);
+ apic_set_vector(vec, apic->regs + APIC_IRR);
}
static inline int apic_search_irr(struct kvm_lapic *apic)
@@ -681,32 +671,28 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
if (unlikely(!apic_enabled(apic)))
break;
+ result = 1;
+
if (dest_map)
__set_bit(vcpu->vcpu_id, dest_map);
- if (kvm_x86_ops->deliver_posted_interrupt) {
- result = 1;
+ if (kvm_x86_ops->deliver_posted_interrupt)
kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
- } else {
- result = !apic_test_and_set_irr(vector, apic);
-
- if (!result) {
- if (trig_mode)
- apic_debug("level trig mode repeatedly "
- "for vector %d", vector);
- goto out;
- }
+ else {
+ apic_set_irr(vector, apic);
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
-out:
trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
- trig_mode, vector, !result);
+ trig_mode, vector, false);
break;
case APIC_DM_REMRD:
- apic_debug("Ignoring delivery mode 3\n");
+ result = 1;
+ vcpu->arch.pv.pv_unhalted = 1;
+ kvm_make_request(KVM_REQ_EVENT, vcpu);
+ kvm_vcpu_kick(vcpu);
break;
case APIC_DM_SMI:
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9e9285ae9b9..6e2d2c8f230 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -132,8 +132,8 @@ module_param(dbg, bool, 0644);
(PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
* PT32_LEVEL_BITS))) - 1))
-#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
- | PT64_NX_MASK)
+#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \
+ | shadow_x_mask | shadow_nx_mask)
#define ACC_EXEC_MASK 1
#define ACC_WRITE_MASK PT_WRITABLE_MASK
@@ -331,11 +331,6 @@ static int is_large_pte(u64 pte)
return pte & PT_PAGE_SIZE_MASK;
}
-static int is_dirty_gpte(unsigned long pte)
-{
- return pte & PT_DIRTY_MASK;
-}
-
static int is_rmap_spte(u64 pte)
{
return is_shadow_present_pte(pte);
@@ -2052,12 +2047,18 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
return __shadow_walk_next(iterator, *iterator->sptep);
}
-static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
+static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
{
u64 spte;
+ BUILD_BUG_ON(VMX_EPT_READABLE_MASK != PT_PRESENT_MASK ||
+ VMX_EPT_WRITABLE_MASK != PT_WRITABLE_MASK);
+
spte = __pa(sp->spt) | PT_PRESENT_MASK | PT_WRITABLE_MASK |
- shadow_user_mask | shadow_x_mask | shadow_accessed_mask;
+ shadow_user_mask | shadow_x_mask;
+
+ if (accessed)
+ spte |= shadow_accessed_mask;
mmu_spte_set(sptep, spte);
}
@@ -2574,14 +2575,6 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
mmu_free_roots(vcpu);
}
-static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level)
-{
- int bit7;
-
- bit7 = (gpte >> 7) & 1;
- return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) != 0;
-}
-
static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
bool no_dirty_log)
{
@@ -2594,26 +2587,6 @@ static pfn_t pte_prefetch_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
return gfn_to_pfn_memslot_atomic(slot, gfn);
}
-static bool prefetch_invalid_gpte(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp, u64 *spte,
- u64 gpte)
-{
- if (is_rsvd_bits_set(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
- goto no_present;
-
- if (!is_present_gpte(gpte))
- goto no_present;
-
- if (!(gpte & PT_ACCESSED_MASK))
- goto no_present;
-
- return false;
-
-no_present:
- drop_spte(vcpu->kvm, spte);
- return true;
-}
-
static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp,
u64 *start, u64 *end)
@@ -2710,7 +2683,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
iterator.level - 1,
1, ACC_ALL, iterator.sptep);
- link_shadow_page(iterator.sptep, sp);
+ link_shadow_page(iterator.sptep, sp, true);
}
}
return emulate;
@@ -2808,7 +2781,7 @@ exit:
return ret;
}
-static bool page_fault_can_be_fast(struct kvm_vcpu *vcpu, u32 error_code)
+static bool page_fault_can_be_fast(u32 error_code)
{
/*
* Do not fix the mmio spte with invalid generation number which
@@ -2861,7 +2834,7 @@ static bool fast_page_fault(struct kvm_vcpu *vcpu, gva_t gva, int level,
bool ret = false;
u64 spte = 0ull;
- if (!page_fault_can_be_fast(vcpu, error_code))
+ if (!page_fault_can_be_fast(error_code))
return false;
walk_shadow_page_lockless_begin(vcpu);
@@ -3209,6 +3182,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
mmu_sync_roots(vcpu);
spin_unlock(&vcpu->kvm->mmu_lock);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_sync_roots);
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access, struct x86_exception *exception)
@@ -3478,6 +3452,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
++vcpu->stat.tlb_flush;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
+EXPORT_SYMBOL_GPL(kvm_mmu_flush_tlb);
static void paging_new_cr3(struct kvm_vcpu *vcpu)
{
@@ -3501,18 +3476,6 @@ static void paging_free(struct kvm_vcpu *vcpu)
nonpaging_free(vcpu);
}
-static inline void protect_clean_gpte(unsigned *access, unsigned gpte)
-{
- unsigned mask;
-
- BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
-
- mask = (unsigned)~ACC_WRITE_MASK;
- /* Allow write access to dirty gptes */
- mask |= (gpte >> (PT_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & PT_WRITABLE_MASK;
- *access &= mask;
-}
-
static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
unsigned access, int *nr_present)
{
@@ -3530,16 +3493,6 @@ static bool sync_mmio_spte(struct kvm *kvm, u64 *sptep, gfn_t gfn,
return false;
}
-static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
-{
- unsigned access;
-
- access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
- access &= ~(gpte >> PT64_NX_SHIFT);
-
- return access;
-}
-
static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
{
unsigned index;
@@ -3549,6 +3502,11 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gp
return mmu->last_pte_bitmap & (1 << index);
}
+#define PTTYPE_EPT 18 /* arbitrary */
+#define PTTYPE PTTYPE_EPT
+#include "paging_tmpl.h"
+#undef PTTYPE
+
#define PTTYPE 64
#include "paging_tmpl.h"
#undef PTTYPE
@@ -3563,6 +3521,8 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
int maxphyaddr = cpuid_maxphyaddr(vcpu);
u64 exb_bit_rsvd = 0;
+ context->bad_mt_xwr = 0;
+
if (!context->nx)
exb_bit_rsvd = rsvd_bits(63, 63);
switch (context->root_level) {
@@ -3618,7 +3578,40 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu,
}
}
-static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *context, bool execonly)
+{
+ int maxphyaddr = cpuid_maxphyaddr(vcpu);
+ int pte;
+
+ context->rsvd_bits_mask[0][3] =
+ rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 7);
+ context->rsvd_bits_mask[0][2] =
+ rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
+ context->rsvd_bits_mask[0][1] =
+ rsvd_bits(maxphyaddr, 51) | rsvd_bits(3, 6);
+ context->rsvd_bits_mask[0][0] = rsvd_bits(maxphyaddr, 51);
+
+ /* large page */
+ context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
+ context->rsvd_bits_mask[1][2] =
+ rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 29);
+ context->rsvd_bits_mask[1][1] =
+ rsvd_bits(maxphyaddr, 51) | rsvd_bits(12, 20);
+ context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
+
+ for (pte = 0; pte < 64; pte++) {
+ int rwx_bits = pte & 7;
+ int mt = pte >> 3;
+ if (mt == 0x2 || mt == 0x3 || mt == 0x7 ||
+ rwx_bits == 0x2 || rwx_bits == 0x6 ||
+ (rwx_bits == 0x4 && !execonly))
+ context->bad_mt_xwr |= (1ull << pte);
+ }
+}
+
+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+ struct kvm_mmu *mmu, bool ept)
{
unsigned bit, byte, pfec;
u8 map;
@@ -3636,12 +3629,16 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu
w = bit & ACC_WRITE_MASK;
u = bit & ACC_USER_MASK;
- /* Not really needed: !nx will cause pte.nx to fault */
- x |= !mmu->nx;
- /* Allow supervisor writes if !cr0.wp */
- w |= !is_write_protection(vcpu) && !uf;
- /* Disallow supervisor fetches of user code if cr4.smep */
- x &= !(smep && u && !uf);
+ if (!ept) {
+ /* Not really needed: !nx will cause pte.nx to fault */
+ x |= !mmu->nx;
+ /* Allow supervisor writes if !cr0.wp */
+ w |= !is_write_protection(vcpu) && !uf;
+ /* Disallow supervisor fetches of user code if cr4.smep */
+ x &= !(smep && u && !uf);
+ } else
+ /* Not really needed: no U/S accesses on ept */
+ u = 1;
fault = (ff && !x) || (uf && !u) || (wf && !w);
map |= fault << bit;
@@ -3676,7 +3673,7 @@ static int paging64_init_context_common(struct kvm_vcpu *vcpu,
context->root_level = level;
reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context);
+ update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
ASSERT(is_pae(vcpu));
@@ -3706,7 +3703,7 @@ static int paging32_init_context(struct kvm_vcpu *vcpu,
context->root_level = PT32_ROOT_LEVEL;
reset_rsvds_bits_mask(vcpu, context);
- update_permission_bitmask(vcpu, context);
+ update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
context->new_cr3 = paging_new_cr3;
@@ -3768,7 +3765,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->gva_to_gpa = paging32_gva_to_gpa;
}
- update_permission_bitmask(vcpu, context);
+ update_permission_bitmask(vcpu, context, false);
update_last_pte_bitmap(vcpu, context);
return 0;
@@ -3800,6 +3797,33 @@ int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context)
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
+int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+ bool execonly)
+{
+ ASSERT(vcpu);
+ ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+ context->shadow_root_level = kvm_x86_ops->get_tdp_level();
+
+ context->nx = true;
+ context->new_cr3 = paging_new_cr3;
+ context->page_fault = ept_page_fault;
+ context->gva_to_gpa = ept_gva_to_gpa;
+ context->sync_page = ept_sync_page;
+ context->invlpg = ept_invlpg;
+ context->update_pte = ept_update_pte;
+ context->free = paging_free;
+ context->root_level = context->shadow_root_level;
+ context->root_hpa = INVALID_PAGE;
+ context->direct_map = false;
+
+ update_permission_bitmask(vcpu, context, true);
+ reset_rsvds_bits_mask_ept(vcpu, context, execonly);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvm_init_shadow_ept_mmu);
+
static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
{
int r = kvm_init_shadow_mmu(vcpu, vcpu->arch.walk_mmu);
@@ -3847,7 +3871,7 @@ static int init_kvm_nested_mmu(struct kvm_vcpu *vcpu)
g_context->gva_to_gpa = paging32_gva_to_gpa_nested;
}
- update_permission_bitmask(vcpu, g_context);
+ update_permission_bitmask(vcpu, g_context, false);
update_last_pte_bitmap(vcpu, g_context);
return 0;
@@ -3923,8 +3947,8 @@ static bool need_remote_flush(u64 old, u64 new)
return true;
if ((old ^ new) & PT64_BASE_ADDR_MASK)
return true;
- old ^= PT64_NX_MASK;
- new ^= PT64_NX_MASK;
+ old ^= shadow_nx_mask;
+ new ^= shadow_nx_mask;
return (old & ~new & PT64_PERM_MASK) != 0;
}
@@ -4182,7 +4206,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
switch (er) {
case EMULATE_DONE:
return 1;
- case EMULATE_DO_MMIO:
+ case EMULATE_USER_EXIT:
++vcpu->stat.mmio_exits;
/* fall through */
case EMULATE_FAIL:
@@ -4390,11 +4414,8 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
/*
* The very rare case: if the generation-number is round,
* zap all shadow pages.
- *
- * The max value is MMIO_MAX_GEN - 1 since it is not called
- * when mark memslot invalid.
*/
- if (unlikely(kvm_current_mmio_generation(kvm) >= (MMIO_MAX_GEN - 1))) {
+ if (unlikely(kvm_current_mmio_generation(kvm) >= MMIO_MAX_GEN)) {
printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 5b59c573aba..77e044a0f5f 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -71,6 +71,8 @@ enum {
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
int kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
+int kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context,
+ bool execonly);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 7769699d48a..04333015917 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -23,6 +23,13 @@
* so the code in this file is compiled twice, once per pte size.
*/
+/*
+ * This is used to catch non optimized PT_GUEST_(DIRTY|ACCESS)_SHIFT macro
+ * uses for EPT without A/D paging type.
+ */
+extern u64 __pure __using_nonexistent_pte_bit(void)
+ __compiletime_error("wrong use of PT_GUEST_(DIRTY|ACCESS)_SHIFT");
+
#if PTTYPE == 64
#define pt_element_t u64
#define guest_walker guest_walker64
@@ -32,6 +39,10 @@
#define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
#define PT_INDEX(addr, level) PT64_INDEX(addr, level)
#define PT_LEVEL_BITS PT64_LEVEL_BITS
+ #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
+ #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
+ #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
+ #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#ifdef CONFIG_X86_64
#define PT_MAX_FULL_LEVELS 4
#define CMPXCHG cmpxchg
@@ -49,7 +60,26 @@
#define PT_INDEX(addr, level) PT32_INDEX(addr, level)
#define PT_LEVEL_BITS PT32_LEVEL_BITS
#define PT_MAX_FULL_LEVELS 2
+ #define PT_GUEST_ACCESSED_MASK PT_ACCESSED_MASK
+ #define PT_GUEST_DIRTY_MASK PT_DIRTY_MASK
+ #define PT_GUEST_DIRTY_SHIFT PT_DIRTY_SHIFT
+ #define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define CMPXCHG cmpxchg
+#elif PTTYPE == PTTYPE_EPT
+ #define pt_element_t u64
+ #define guest_walker guest_walkerEPT
+ #define FNAME(name) ept_##name
+ #define PT_BASE_ADDR_MASK PT64_BASE_ADDR_MASK
+ #define PT_LVL_ADDR_MASK(lvl) PT64_LVL_ADDR_MASK(lvl)
+ #define PT_LVL_OFFSET_MASK(lvl) PT64_LVL_OFFSET_MASK(lvl)
+ #define PT_INDEX(addr, level) PT64_INDEX(addr, level)
+ #define PT_LEVEL_BITS PT64_LEVEL_BITS
+ #define PT_GUEST_ACCESSED_MASK 0
+ #define PT_GUEST_DIRTY_MASK 0
+ #define PT_GUEST_DIRTY_SHIFT __using_nonexistent_pte_bit()
+ #define PT_GUEST_ACCESSED_SHIFT __using_nonexistent_pte_bit()
+ #define CMPXCHG cmpxchg64
+ #define PT_MAX_FULL_LEVELS 4
#else
#error Invalid PTTYPE value
#endif
@@ -80,6 +110,40 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
}
+static inline void FNAME(protect_clean_gpte)(unsigned *access, unsigned gpte)
+{
+ unsigned mask;
+
+ /* dirty bit is not supported, so no need to track it */
+ if (!PT_GUEST_DIRTY_MASK)
+ return;
+
+ BUILD_BUG_ON(PT_WRITABLE_MASK != ACC_WRITE_MASK);
+
+ mask = (unsigned)~ACC_WRITE_MASK;
+ /* Allow write access to dirty gptes */
+ mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) &
+ PT_WRITABLE_MASK;
+ *access &= mask;
+}
+
+static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level)
+{
+ int bit7 = (gpte >> 7) & 1, low6 = gpte & 0x3f;
+
+ return (gpte & mmu->rsvd_bits_mask[bit7][level-1]) |
+ ((mmu->bad_mt_xwr & (1ull << low6)) != 0);
+}
+
+static inline int FNAME(is_present_gpte)(unsigned long pte)
+{
+#if PTTYPE != PTTYPE_EPT
+ return is_present_gpte(pte);
+#else
+ return pte & 7;
+#endif
+}
+
static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
pt_element_t __user *ptep_user, unsigned index,
pt_element_t orig_pte, pt_element_t new_pte)
@@ -103,6 +167,42 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
return (ret != orig_pte);
}
+static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu,
+ struct kvm_mmu_page *sp, u64 *spte,
+ u64 gpte)
+{
+ if (FNAME(is_rsvd_bits_set)(&vcpu->arch.mmu, gpte, PT_PAGE_TABLE_LEVEL))
+ goto no_present;
+
+ if (!FNAME(is_present_gpte)(gpte))
+ goto no_present;
+
+ /* if accessed bit is not supported prefetch non accessed gpte */
+ if (PT_GUEST_ACCESSED_MASK && !(gpte & PT_GUEST_ACCESSED_MASK))
+ goto no_present;
+
+ return false;
+
+no_present:
+ drop_spte(vcpu->kvm, spte);
+ return true;
+}
+
+static inline unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, u64 gpte)
+{
+ unsigned access;
+#if PTTYPE == PTTYPE_EPT
+ access = ((gpte & VMX_EPT_WRITABLE_MASK) ? ACC_WRITE_MASK : 0) |
+ ((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
+ ACC_USER_MASK;
+#else
+ access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
+ access &= ~(gpte >> PT64_NX_SHIFT);
+#endif
+
+ return access;
+}
+
static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
struct kvm_mmu *mmu,
struct guest_walker *walker,
@@ -114,18 +214,23 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
gfn_t table_gfn;
int ret;
+ /* dirty/accessed bits are not supported, so no need to update them */
+ if (!PT_GUEST_DIRTY_MASK)
+ return 0;
+
for (level = walker->max_level; level >= walker->level; --level) {
pte = orig_pte = walker->ptes[level - 1];
table_gfn = walker->table_gfn[level - 1];
ptep_user = walker->ptep_user[level - 1];
index = offset_in_page(ptep_user) / sizeof(pt_element_t);
- if (!(pte & PT_ACCESSED_MASK)) {
+ if (!(pte & PT_GUEST_ACCESSED_MASK)) {
trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte));
- pte |= PT_ACCESSED_MASK;
+ pte |= PT_GUEST_ACCESSED_MASK;
}
- if (level == walker->level && write_fault && !is_dirty_gpte(pte)) {
+ if (level == walker->level && write_fault &&
+ !(pte & PT_GUEST_DIRTY_MASK)) {
trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
- pte |= PT_DIRTY_MASK;
+ pte |= PT_GUEST_DIRTY_MASK;
}
if (pte == orig_pte)
continue;
@@ -170,7 +275,7 @@ retry_walk:
if (walker->level == PT32E_ROOT_LEVEL) {
pte = mmu->get_pdptr(vcpu, (addr >> 30) & 3);
trace_kvm_mmu_paging_element(pte, walker->level);
- if (!is_present_gpte(pte))
+ if (!FNAME(is_present_gpte)(pte))
goto error;
--walker->level;
}
@@ -179,7 +284,7 @@ retry_walk:
ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) ||
(mmu->get_cr3(vcpu) & CR3_NONPAE_RESERVED_BITS) == 0);
- accessed_dirty = PT_ACCESSED_MASK;
+ accessed_dirty = PT_GUEST_ACCESSED_MASK;
pt_access = pte_access = ACC_ALL;
++walker->level;
@@ -215,17 +320,17 @@ retry_walk:
trace_kvm_mmu_paging_element(pte, walker->level);
- if (unlikely(!is_present_gpte(pte)))
+ if (unlikely(!FNAME(is_present_gpte)(pte)))
goto error;
- if (unlikely(is_rsvd_bits_set(&vcpu->arch.mmu, pte,
- walker->level))) {
+ if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte,
+ walker->level))) {
errcode |= PFERR_RSVD_MASK | PFERR_PRESENT_MASK;
goto error;
}
accessed_dirty &= pte;
- pte_access = pt_access & gpte_access(vcpu, pte);
+ pte_access = pt_access & FNAME(gpte_access)(vcpu, pte);
walker->ptes[walker->level - 1] = pte;
} while (!is_last_gpte(mmu, walker->level, pte));
@@ -248,13 +353,15 @@ retry_walk:
walker->gfn = real_gpa >> PAGE_SHIFT;
if (!write_fault)
- protect_clean_gpte(&pte_access, pte);
+ FNAME(protect_clean_gpte)(&pte_access, pte);
else
/*
- * On a write fault, fold the dirty bit into accessed_dirty by
- * shifting it one place right.
+ * On a write fault, fold the dirty bit into accessed_dirty.
+ * For modes without A/D bits support accessed_dirty will be
+ * always clear.
*/
- accessed_dirty &= pte >> (PT_DIRTY_SHIFT - PT_ACCESSED_SHIFT);
+ accessed_dirty &= pte >>
+ (PT_GUEST_DIRTY_SHIFT - PT_GUEST_ACCESSED_SHIFT);
if (unlikely(!accessed_dirty)) {
ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, write_fault);
@@ -279,6 +386,25 @@ error:
walker->fault.vector = PF_VECTOR;
walker->fault.error_code_valid = true;
walker->fault.error_code = errcode;
+
+#if PTTYPE == PTTYPE_EPT
+ /*
+ * Use PFERR_RSVD_MASK in error_code to to tell if EPT
+ * misconfiguration requires to be injected. The detection is
+ * done by is_rsvd_bits_set() above.
+ *
+ * We set up the value of exit_qualification to inject:
+ * [2:0] - Derive from [2:0] of real exit_qualification at EPT violation
+ * [5:3] - Calculated by the page walk of the guest EPT page tables
+ * [7:8] - Derived from [7:8] of real exit_qualification
+ *
+ * The other bits are set to 0.
+ */
+ if (!(errcode & PFERR_RSVD_MASK)) {
+ vcpu->arch.exit_qualification &= 0x187;
+ vcpu->arch.exit_qualification |= ((pt_access & pte) & 0x7) << 3;
+ }
+#endif
walker->fault.address = addr;
walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu;
@@ -293,6 +419,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
access);
}
+#if PTTYPE != PTTYPE_EPT
static int FNAME(walk_addr_nested)(struct guest_walker *walker,
struct kvm_vcpu *vcpu, gva_t addr,
u32 access)
@@ -300,6 +427,7 @@ static int FNAME(walk_addr_nested)(struct guest_walker *walker,
return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu,
addr, access);
}
+#endif
static bool
FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -309,14 +437,14 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
gfn_t gfn;
pfn_t pfn;
- if (prefetch_invalid_gpte(vcpu, sp, spte, gpte))
+ if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
return false;
pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
gfn = gpte_to_gfn(gpte);
- pte_access = sp->role.access & gpte_access(vcpu, gpte);
- protect_clean_gpte(&pte_access, gpte);
+ pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte);
+ FNAME(protect_clean_gpte)(&pte_access, gpte);
pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
no_dirty_log && (pte_access & ACC_WRITE_MASK));
if (is_error_pfn(pfn))
@@ -446,7 +574,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
goto out_gpte_changed;
if (sp)
- link_shadow_page(it.sptep, sp);
+ link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
}
for (;
@@ -466,7 +594,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
true, direct_access, it.sptep);
- link_shadow_page(it.sptep, sp);
+ link_shadow_page(it.sptep, sp, PT_GUEST_ACCESSED_MASK);
}
clear_sp_write_flooding_count(it.sptep);
@@ -727,6 +855,7 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
return gpa;
}
+#if PTTYPE != PTTYPE_EPT
static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
u32 access,
struct x86_exception *exception)
@@ -745,6 +874,7 @@ static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr,
return gpa;
}
+#endif
/*
* Using the cached information from sp->gfns is safe because:
@@ -785,15 +915,15 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
sizeof(pt_element_t)))
return -EINVAL;
- if (prefetch_invalid_gpte(vcpu, sp, &sp->spt[i], gpte)) {
+ if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
continue;
}
gfn = gpte_to_gfn(gpte);
pte_access = sp->role.access;
- pte_access &= gpte_access(vcpu, gpte);
- protect_clean_gpte(&pte_access, gpte);
+ pte_access &= FNAME(gpte_access)(vcpu, gpte);
+ FNAME(protect_clean_gpte)(&pte_access, gpte);
if (sync_mmio_spte(vcpu->kvm, &sp->spt[i], gfn, pte_access,
&nr_present))
@@ -830,3 +960,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
#undef gpte_to_gfn
#undef gpte_to_gfn_lvl
#undef CMPXCHG
+#undef PT_GUEST_ACCESSED_MASK
+#undef PT_GUEST_DIRTY_MASK
+#undef PT_GUEST_DIRTY_SHIFT
+#undef PT_GUEST_ACCESSED_SHIFT
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index c53e797e736..5c4f63151b4 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -160,7 +160,7 @@ static void stop_counter(struct kvm_pmc *pmc)
static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
unsigned config, bool exclude_user, bool exclude_kernel,
- bool intr)
+ bool intr, bool in_tx, bool in_tx_cp)
{
struct perf_event *event;
struct perf_event_attr attr = {
@@ -173,6 +173,10 @@ static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
.exclude_kernel = exclude_kernel,
.config = config,
};
+ if (in_tx)
+ attr.config |= HSW_IN_TX;
+ if (in_tx_cp)
+ attr.config |= HSW_IN_TX_CHECKPOINTED;
attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
@@ -226,7 +230,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
ARCH_PERFMON_EVENTSEL_INV |
- ARCH_PERFMON_EVENTSEL_CMASK))) {
+ ARCH_PERFMON_EVENTSEL_CMASK |
+ HSW_IN_TX |
+ HSW_IN_TX_CHECKPOINTED))) {
config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
unit_mask);
if (config != PERF_COUNT_HW_MAX)
@@ -239,7 +245,9 @@ static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
reprogram_counter(pmc, type, config,
!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
- eventsel & ARCH_PERFMON_EVENTSEL_INT);
+ eventsel & ARCH_PERFMON_EVENTSEL_INT,
+ (eventsel & HSW_IN_TX),
+ (eventsel & HSW_IN_TX_CHECKPOINTED));
}
static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
@@ -256,7 +264,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
arch_events[fixed_pmc_events[idx]].event_type,
!(en & 0x2), /* exclude user */
!(en & 0x1), /* exclude kernel */
- pmi);
+ pmi, false, false);
}
static inline u8 fixed_en_pmi(u64 ctrl, int idx)
@@ -408,7 +416,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
if (data == pmc->eventsel)
return 0;
- if (!(data & 0xffffffff00200000ull)) {
+ if (!(data & pmu->reserved_bits)) {
reprogram_gp_counter(pmc, data);
return 0;
}
@@ -450,6 +458,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_GP] = 0;
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0;
+ pmu->reserved_bits = 0xffffffff00200000ull;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry)
@@ -478,6 +487,12 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
+
+ entry = kvm_find_cpuid_entry(vcpu, 7, 0);
+ if (entry &&
+ (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
+ (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
+ pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
}
void kvm_pmu_init(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 064d0be67ec..1f1da43ff2a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -373,6 +373,7 @@ struct nested_vmx {
* we must keep them pinned while L2 runs.
*/
struct page *apic_access_page;
+ u64 msr_ia32_feature_control;
};
#define POSTED_INTR_ON 0
@@ -711,10 +712,10 @@ static void nested_release_page_clean(struct page *page)
kvm_release_page_clean(page);
}
+static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
-static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
@@ -1039,12 +1040,16 @@ static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
(vmcs12->secondary_vm_exec_control & bit);
}
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
- struct kvm_vcpu *vcpu)
+static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
+static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
+}
+
static inline bool is_exception(u32 intr_info)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
@@ -2155,6 +2160,7 @@ static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
static u32 nested_vmx_misc_low, nested_vmx_misc_high;
+static u32 nested_vmx_ept_caps;
static __init void nested_vmx_setup_ctls_msrs(void)
{
/*
@@ -2190,14 +2196,17 @@ static __init void nested_vmx_setup_ctls_msrs(void)
* If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
* 17 must be 1.
*/
+ rdmsr(MSR_IA32_VMX_EXIT_CTLS,
+ nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
/* Note that guest use of VM_EXIT_ACK_INTR_ON_EXIT is not supported. */
+ nested_vmx_exit_ctls_high &=
#ifdef CONFIG_X86_64
- nested_vmx_exit_ctls_high = VM_EXIT_HOST_ADDR_SPACE_SIZE;
-#else
- nested_vmx_exit_ctls_high = 0;
+ VM_EXIT_HOST_ADDR_SPACE_SIZE |
#endif
- nested_vmx_exit_ctls_high |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
+ VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT;
+ nested_vmx_exit_ctls_high |= (VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
+ VM_EXIT_LOAD_IA32_EFER);
/* entry controls */
rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
@@ -2205,8 +2214,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
/* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
nested_vmx_entry_ctls_high &=
- VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_IA32E_MODE;
- nested_vmx_entry_ctls_high |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
+#ifdef CONFIG_X86_64
+ VM_ENTRY_IA32E_MODE |
+#endif
+ VM_ENTRY_LOAD_IA32_PAT;
+ nested_vmx_entry_ctls_high |= (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR |
+ VM_ENTRY_LOAD_IA32_EFER);
/* cpu-based controls */
rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
@@ -2241,6 +2254,22 @@ static __init void nested_vmx_setup_ctls_msrs(void)
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
SECONDARY_EXEC_WBINVD_EXITING;
+ if (enable_ept) {
+ /* nested EPT: emulate EPT also to L1 */
+ nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
+ nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
+ VMX_EPTP_WB_BIT | VMX_EPT_INVEPT_BIT;
+ nested_vmx_ept_caps &= vmx_capability.ept;
+ /*
+ * Since invept is completely emulated we support both global
+ * and context invalidation independent of what host cpu
+ * supports
+ */
+ nested_vmx_ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
+ VMX_EPT_EXTENT_CONTEXT_BIT;
+ } else
+ nested_vmx_ept_caps = 0;
+
/* miscellaneous data */
rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
nested_vmx_misc_low &= VMX_MISC_PREEMPTION_TIMER_RATE_MASK |
@@ -2282,8 +2311,11 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
switch (msr_index) {
case MSR_IA32_FEATURE_CONTROL:
- *pdata = 0;
- break;
+ if (nested_vmx_allowed(vcpu)) {
+ *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control;
+ break;
+ }
+ return 0;
case MSR_IA32_VMX_BASIC:
/*
* This MSR reports some information about VMX support. We
@@ -2346,8 +2378,8 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
nested_vmx_secondary_ctls_high);
break;
case MSR_IA32_VMX_EPT_VPID_CAP:
- /* Currently, no nested ept or nested vpid */
- *pdata = 0;
+ /* Currently, no nested vpid support */
+ *pdata = nested_vmx_ept_caps;
break;
default:
return 0;
@@ -2356,14 +2388,24 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
return 1;
}
-static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
+static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
{
+ u32 msr_index = msr_info->index;
+ u64 data = msr_info->data;
+ bool host_initialized = msr_info->host_initiated;
+
if (!nested_vmx_allowed(vcpu))
return 0;
- if (msr_index == MSR_IA32_FEATURE_CONTROL)
- /* TODO: the right thing. */
+ if (msr_index == MSR_IA32_FEATURE_CONTROL) {
+ if (!host_initialized &&
+ to_vmx(vcpu)->nested.msr_ia32_feature_control
+ & FEATURE_CONTROL_LOCKED)
+ return 0;
+ to_vmx(vcpu)->nested.msr_ia32_feature_control = data;
return 1;
+ }
+
/*
* No need to treat VMX capability MSRs specially: If we don't handle
* them, handle_wrmsr will #GP(0), which is correct (they are readonly)
@@ -2494,7 +2536,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
/* Otherwise falls through */
default:
- if (vmx_set_vmx_msr(vcpu, msr_index, data))
+ if (vmx_set_vmx_msr(vcpu, msr_info))
break;
msr = find_msr_entry(vmx, msr_index);
if (msr) {
@@ -5302,9 +5344,13 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
/* It is a write fault? */
error_code = exit_qualification & (1U << 1);
+ /* It is a fetch fault? */
+ error_code |= (exit_qualification & (1U << 2)) << 2;
/* ept page table is present? */
error_code |= (exit_qualification >> 3) & 0x1;
+ vcpu->arch.exit_qualification = exit_qualification;
+
return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0);
}
@@ -5438,7 +5484,8 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
- if (err == EMULATE_DO_MMIO) {
+ if (err == EMULATE_USER_EXIT) {
+ ++vcpu->stat.mmio_exits;
ret = 0;
goto out;
}
@@ -5567,8 +5614,47 @@ static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
free_loaded_vmcs(&vmx->vmcs01);
}
+/*
+ * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
+ * set the success or error code of an emulated VMX instruction, as specified
+ * by Vol 2B, VMX Instruction Reference, "Conventions".
+ */
+static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
+}
+
+static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
+{
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_CF);
+}
+
static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
- u32 vm_instruction_error);
+ u32 vm_instruction_error)
+{
+ if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
+ /*
+ * failValid writes the error number to the current VMCS, which
+ * can't be done there isn't a current VMCS.
+ */
+ nested_vmx_failInvalid(vcpu);
+ return;
+ }
+ vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
+ & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
+ X86_EFLAGS_SF | X86_EFLAGS_OF))
+ | X86_EFLAGS_ZF);
+ get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
+ /*
+ * We don't need to force a shadow sync because
+ * VM_INSTRUCTION_ERROR is not shadowed
+ */
+}
/*
* Emulate the VMXON instruction.
@@ -5583,6 +5669,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
struct kvm_segment cs;
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs *shadow_vmcs;
+ const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED
+ | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
/* The Intel VMX Instruction Reference lists a bunch of bits that
* are prerequisite to running VMXON, most notably cr4.VMXE must be
@@ -5611,6 +5699,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
skip_emulated_instruction(vcpu);
return 1;
}
+
+ if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
+ != VMXON_NEEDED_FEATURES) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
if (enable_shadow_vmcs) {
shadow_vmcs = alloc_vmcs();
if (!shadow_vmcs)
@@ -5628,6 +5723,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
vmx->nested.vmxon = true;
skip_emulated_instruction(vcpu);
+ nested_vmx_succeed(vcpu);
return 1;
}
@@ -5712,6 +5808,7 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
return 1;
free_nested(to_vmx(vcpu));
skip_emulated_instruction(vcpu);
+ nested_vmx_succeed(vcpu);
return 1;
}
@@ -5768,48 +5865,6 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
return 0;
}
-/*
- * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
- * set the success or error code of an emulated VMX instruction, as specified
- * by Vol 2B, VMX Instruction Reference, "Conventions".
- */
-static void nested_vmx_succeed(struct kvm_vcpu *vcpu)
-{
- vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
- X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
-}
-
-static void nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
-{
- vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
- X86_EFLAGS_SF | X86_EFLAGS_OF))
- | X86_EFLAGS_CF);
-}
-
-static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
- u32 vm_instruction_error)
-{
- if (to_vmx(vcpu)->nested.current_vmptr == -1ull) {
- /*
- * failValid writes the error number to the current VMCS, which
- * can't be done there isn't a current VMCS.
- */
- nested_vmx_failInvalid(vcpu);
- return;
- }
- vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
- & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
- X86_EFLAGS_SF | X86_EFLAGS_OF))
- | X86_EFLAGS_ZF);
- get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
- /*
- * We don't need to force a shadow sync because
- * VM_INSTRUCTION_ERROR is not shadowed
- */
-}
-
/* Emulate the VMCLEAR instruction */
static int handle_vmclear(struct kvm_vcpu *vcpu)
{
@@ -5972,8 +6027,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
unsigned long field;
u64 field_value;
struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
- unsigned long *fields = (unsigned long *)shadow_read_write_fields;
- int num_fields = max_shadow_read_write_fields;
+ const unsigned long *fields = shadow_read_write_fields;
+ const int num_fields = max_shadow_read_write_fields;
vmcs_load(shadow_vmcs);
@@ -6002,12 +6057,11 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
{
- unsigned long *fields[] = {
- (unsigned long *)shadow_read_write_fields,
- (unsigned long *)shadow_read_only_fields
+ const unsigned long *fields[] = {
+ shadow_read_write_fields,
+ shadow_read_only_fields
};
- int num_lists = ARRAY_SIZE(fields);
- int max_fields[] = {
+ const int max_fields[] = {
max_shadow_read_write_fields,
max_shadow_read_only_fields
};
@@ -6018,7 +6072,7 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
vmcs_load(shadow_vmcs);
- for (q = 0; q < num_lists; q++) {
+ for (q = 0; q < ARRAY_SIZE(fields); q++) {
for (i = 0; i < max_fields[q]; i++) {
field = fields[q][i];
vmcs12_read_any(&vmx->vcpu, field, &field_value);
@@ -6248,6 +6302,74 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return 1;
}
+/* Emulate the INVEPT instruction */
+static int handle_invept(struct kvm_vcpu *vcpu)
+{
+ u32 vmx_instruction_info, types;
+ unsigned long type;
+ gva_t gva;
+ struct x86_exception e;
+ struct {
+ u64 eptp, gpa;
+ } operand;
+ u64 eptp_mask = ((1ull << 51) - 1) & PAGE_MASK;
+
+ if (!(nested_vmx_secondary_ctls_high & SECONDARY_EXEC_ENABLE_EPT) ||
+ !(nested_vmx_ept_caps & VMX_EPT_INVEPT_BIT)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ if (!nested_vmx_check_permission(vcpu))
+ return 1;
+
+ if (!kvm_read_cr0_bits(vcpu, X86_CR0_PE)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
+ return 1;
+ }
+
+ vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
+ type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
+
+ types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
+
+ if (!(types & (1UL << type))) {
+ nested_vmx_failValid(vcpu,
+ VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
+ return 1;
+ }
+
+ /* According to the Intel VMX instruction reference, the memory
+ * operand is read even if it isn't needed (e.g., for type==global)
+ */
+ if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ vmx_instruction_info, &gva))
+ return 1;
+ if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
+ sizeof(operand), &e)) {
+ kvm_inject_page_fault(vcpu, &e);
+ return 1;
+ }
+
+ switch (type) {
+ case VMX_EPT_EXTENT_CONTEXT:
+ if ((operand.eptp & eptp_mask) !=
+ (nested_ept_get_cr3(vcpu) & eptp_mask))
+ break;
+ case VMX_EPT_EXTENT_GLOBAL:
+ kvm_mmu_sync_roots(vcpu);
+ kvm_mmu_flush_tlb(vcpu);
+ nested_vmx_succeed(vcpu);
+ break;
+ default:
+ BUG_ON(1);
+ break;
+ }
+
+ skip_emulated_instruction(vcpu);
+ return 1;
+}
+
/*
* The exit handlers return 1 if the exit was handled fully and guest execution
* may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -6292,6 +6414,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
[EXIT_REASON_PAUSE_INSTRUCTION] = handle_pause,
[EXIT_REASON_MWAIT_INSTRUCTION] = handle_invalid_op,
[EXIT_REASON_MONITOR_INSTRUCTION] = handle_invalid_op,
+ [EXIT_REASON_INVEPT] = handle_invept,
};
static const int kvm_vmx_max_exit_handlers =
@@ -6518,6 +6641,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+ case EXIT_REASON_INVEPT:
/*
* VMX instructions trap unconditionally. This allows L1 to
* emulate them for its L2 guest, i.e., allows 3-level nesting!
@@ -6550,7 +6674,20 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
return nested_cpu_has2(vmcs12,
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
case EXIT_REASON_EPT_VIOLATION:
+ /*
+ * L0 always deals with the EPT violation. If nested EPT is
+ * used, and the nested mmu code discovers that the address is
+ * missing in the guest EPT table (EPT12), the EPT violation
+ * will be injected with nested_ept_inject_page_fault()
+ */
+ return 0;
case EXIT_REASON_EPT_MISCONFIG:
+ /*
+ * L2 never uses directly L1's EPT, but rather L0's own EPT
+ * table (shadow on EPT) or a merged EPT table that L0 built
+ * (EPT on EPT). So any problems with the structure of the
+ * table is L0's fault.
+ */
return 0;
case EXIT_REASON_PREEMPTION_TIMER:
return vmcs12->pin_based_vm_exec_control &
@@ -6638,7 +6775,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
!(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
- get_vmcs12(vcpu), vcpu)))) {
+ get_vmcs12(vcpu))))) {
if (vmx_interrupt_allowed(vcpu)) {
vmx->soft_vnmi_blocked = 0;
} else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -7326,6 +7463,48 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
entry->ecx |= bit(X86_FEATURE_VMX);
}
+static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
+ struct x86_exception *fault)
+{
+ struct vmcs12 *vmcs12;
+ nested_vmx_vmexit(vcpu);
+ vmcs12 = get_vmcs12(vcpu);
+
+ if (fault->error_code & PFERR_RSVD_MASK)
+ vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+ else
+ vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
+ vmcs12->exit_qualification = vcpu->arch.exit_qualification;
+ vmcs12->guest_physical_address = fault->address;
+}
+
+/* Callbacks for nested_ept_init_mmu_context: */
+
+static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
+{
+ /* return the page table to be shadowed - in our case, EPT12 */
+ return get_vmcs12(vcpu)->ept_pointer;
+}
+
+static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
+{
+ int r = kvm_init_shadow_ept_mmu(vcpu, &vcpu->arch.mmu,
+ nested_vmx_ept_caps & VMX_EPT_EXECUTE_ONLY_BIT);
+
+ vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
+ vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
+ vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
+
+ vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
+
+ return r;
+}
+
+static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.walk_mmu = &vcpu->arch.mmu;
+}
+
/*
* prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
* L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7388,7 +7567,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_interruptibility_info);
vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
- vmcs_writel(GUEST_RFLAGS, vmcs12->guest_rflags);
+ vmx_set_rflags(vcpu, vmcs12->guest_rflags);
vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
vmcs12->guest_pending_dbg_exceptions);
vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
@@ -7508,15 +7687,24 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
- /* Note: IA32_MODE, LOAD_IA32_EFER are modified by vmx_set_efer below */
- vmcs_write32(VM_EXIT_CONTROLS,
- vmcs12->vm_exit_controls | vmcs_config.vmexit_ctrl);
- vmcs_write32(VM_ENTRY_CONTROLS, vmcs12->vm_entry_controls |
+ /* L2->L1 exit controls are emulated - the hardware exit is to L0 so
+ * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
+ * bits are further modified by vmx_set_efer() below.
+ */
+ vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
+
+ /* vmcs12's VM_ENTRY_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE are
+ * emulated by vmx_set_efer(), below.
+ */
+ vmcs_write32(VM_ENTRY_CONTROLS,
+ (vmcs12->vm_entry_controls & ~VM_ENTRY_LOAD_IA32_EFER &
+ ~VM_ENTRY_IA32E_MODE) |
(vmcs_config.vmentry_ctrl & ~VM_ENTRY_IA32E_MODE));
- if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
- else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
+ vcpu->arch.pat = vmcs12->guest_ia32_pat;
+ } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT)
vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
@@ -7538,6 +7726,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmx_flush_tlb(vcpu);
}
+ if (nested_cpu_has_ept(vmcs12)) {
+ kvm_mmu_unload(vcpu);
+ nested_ept_init_mmu_context(vcpu);
+ }
+
if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->guest_ia32_efer;
else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
@@ -7565,6 +7758,16 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
kvm_set_cr3(vcpu, vmcs12->guest_cr3);
kvm_mmu_reset_context(vcpu);
+ /*
+ * L1 may access the L2's PDPTR, so save them to construct vmcs12
+ */
+ if (enable_ept) {
+ vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
+ vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
+ vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
+ vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
+ }
+
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
kvm_register_write(vcpu, VCPU_REGS_RIP, vmcs12->guest_rip);
}
@@ -7887,6 +8090,22 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
vmcs12->guest_pending_dbg_exceptions =
vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
+ /*
+ * In some cases (usually, nested EPT), L2 is allowed to change its
+ * own CR3 without exiting. If it has changed it, we must keep it.
+ * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
+ * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
+ *
+ * Additionally, restore L2's PDPTR to vmcs12.
+ */
+ if (enable_ept) {
+ vmcs12->guest_cr3 = vmcs_read64(GUEST_CR3);
+ vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
+ vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
+ vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
+ vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
+ }
+
vmcs12->vm_entry_controls =
(vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
(vmcs_read32(VM_ENTRY_CONTROLS) & VM_ENTRY_IA32E_MODE);
@@ -7948,6 +8167,8 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12)
{
+ struct kvm_segment seg;
+
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
@@ -7982,7 +8203,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
- /* shadow page tables on either EPT or shadow page tables */
+ if (nested_cpu_has_ept(vmcs12))
+ nested_ept_uninit_mmu_context(vcpu);
+
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
@@ -8001,23 +8224,61 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
- vmcs_writel(GUEST_TR_BASE, vmcs12->host_tr_base);
- vmcs_writel(GUEST_GS_BASE, vmcs12->host_gs_base);
- vmcs_writel(GUEST_FS_BASE, vmcs12->host_fs_base);
- vmcs_write16(GUEST_ES_SELECTOR, vmcs12->host_es_selector);
- vmcs_write16(GUEST_CS_SELECTOR, vmcs12->host_cs_selector);
- vmcs_write16(GUEST_SS_SELECTOR, vmcs12->host_ss_selector);
- vmcs_write16(GUEST_DS_SELECTOR, vmcs12->host_ds_selector);
- vmcs_write16(GUEST_FS_SELECTOR, vmcs12->host_fs_selector);
- vmcs_write16(GUEST_GS_SELECTOR, vmcs12->host_gs_selector);
- vmcs_write16(GUEST_TR_SELECTOR, vmcs12->host_tr_selector);
-
- if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT)
+
+ if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
+ vcpu->arch.pat = vmcs12->host_ia32_pat;
+ }
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
vmcs_write64(GUEST_IA32_PERF_GLOBAL_CTRL,
vmcs12->host_ia32_perf_global_ctrl);
+ /* Set L1 segment info according to Intel SDM
+ 27.5.2 Loading Host Segment and Descriptor-Table Registers */
+ seg = (struct kvm_segment) {
+ .base = 0,
+ .limit = 0xFFFFFFFF,
+ .selector = vmcs12->host_cs_selector,
+ .type = 11,
+ .present = 1,
+ .s = 1,
+ .g = 1
+ };
+ if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
+ seg.l = 1;
+ else
+ seg.db = 1;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
+ seg = (struct kvm_segment) {
+ .base = 0,
+ .limit = 0xFFFFFFFF,
+ .type = 3,
+ .present = 1,
+ .s = 1,
+ .db = 1,
+ .g = 1
+ };
+ seg.selector = vmcs12->host_ds_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
+ seg.selector = vmcs12->host_es_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
+ seg.selector = vmcs12->host_ss_selector;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
+ seg.selector = vmcs12->host_fs_selector;
+ seg.base = vmcs12->host_fs_base;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
+ seg.selector = vmcs12->host_gs_selector;
+ seg.base = vmcs12->host_gs_base;
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
+ seg = (struct kvm_segment) {
+ .base = vmcs12->host_tr_base,
+ .limit = 0x67,
+ .selector = vmcs12->host_tr_selector,
+ .type = 11,
+ .present = 1
+ };
+ vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
+
kvm_set_dr(vcpu, 7, 0x400);
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d21bce50531..e5ca72a5cdb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -682,17 +682,6 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
*/
}
- /*
- * Does the new cr3 value map to physical memory? (Note, we
- * catch an invalid cr3 even in real-mode, because it would
- * cause trouble later on when we turn on paging anyway.)
- *
- * A real CPU would silently accept an invalid cr3 and would
- * attempt to use it - with largely undefined (and often hard
- * to debug) behavior on the guest side.
- */
- if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
- return 1;
vcpu->arch.cr3 = cr3;
__set_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail);
vcpu->arch.mmu.new_cr3(vcpu);
@@ -850,7 +839,8 @@ static u32 msrs_to_save[] = {
#ifdef CONFIG_X86_64
MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
#endif
- MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA
+ MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA,
+ MSR_IA32_FEATURE_CONTROL
};
static unsigned num_msrs_to_save;
@@ -1457,6 +1447,29 @@ static void pvclock_update_vm_gtod_copy(struct kvm *kvm)
#endif
}
+static void kvm_gen_update_masterclock(struct kvm *kvm)
+{
+#ifdef CONFIG_X86_64
+ int i;
+ struct kvm_vcpu *vcpu;
+ struct kvm_arch *ka = &kvm->arch;
+
+ spin_lock(&ka->pvclock_gtod_sync_lock);
+ kvm_make_mclock_inprogress_request(kvm);
+ /* no guest entries from this point */
+ pvclock_update_vm_gtod_copy(kvm);
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
+
+ /* guest entries allowed */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
+
+ spin_unlock(&ka->pvclock_gtod_sync_lock);
+#endif
+}
+
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
unsigned long flags, this_tsc_khz;
@@ -3806,6 +3819,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
delta = user_ns.clock - now_ns;
local_irq_enable();
kvm->arch.kvmclock_offset = delta;
+ kvm_gen_update_masterclock(kvm);
break;
}
case KVM_GET_CLOCK: {
@@ -4955,6 +4969,97 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
static int complete_emulated_mmio(struct kvm_vcpu *vcpu);
static int complete_emulated_pio(struct kvm_vcpu *vcpu);
+static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
+ unsigned long *db)
+{
+ u32 dr6 = 0;
+ int i;
+ u32 enable, rwlen;
+
+ enable = dr7;
+ rwlen = dr7 >> 16;
+ for (i = 0; i < 4; i++, enable >>= 2, rwlen >>= 4)
+ if ((enable & 3) && (rwlen & 15) == type && db[i] == addr)
+ dr6 |= (1 << i);
+ return dr6;
+}
+
+static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
+{
+ struct kvm_run *kvm_run = vcpu->run;
+
+ /*
+ * Use the "raw" value to see if TF was passed to the processor.
+ * Note that the new value of the flags has not been saved yet.
+ *
+ * This is correct even for TF set by the guest, because "the
+ * processor will not generate this exception after the instruction
+ * that sets the TF flag".
+ */
+ unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
+
+ if (unlikely(rflags & X86_EFLAGS_TF)) {
+ if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+ kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
+ kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
+ kvm_run->debug.arch.exception = DB_VECTOR;
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ *r = EMULATE_USER_EXIT;
+ } else {
+ vcpu->arch.emulate_ctxt.eflags &= ~X86_EFLAGS_TF;
+ /*
+ * "Certain debug exceptions may clear bit 0-3. The
+ * remaining contents of the DR6 register are never
+ * cleared by the processor".
+ */
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= DR6_BS;
+ kvm_queue_exception(vcpu, DB_VECTOR);
+ }
+ }
+}
+
+static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
+{
+ struct kvm_run *kvm_run = vcpu->run;
+ unsigned long eip = vcpu->arch.emulate_ctxt.eip;
+ u32 dr6 = 0;
+
+ if (unlikely(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) &&
+ (vcpu->arch.guest_debug_dr7 & DR7_BP_EN_MASK)) {
+ dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ vcpu->arch.guest_debug_dr7,
+ vcpu->arch.eff_db);
+
+ if (dr6 != 0) {
+ kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
+ kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
+ get_segment_base(vcpu, VCPU_SREG_CS);
+
+ kvm_run->debug.arch.exception = DB_VECTOR;
+ kvm_run->exit_reason = KVM_EXIT_DEBUG;
+ *r = EMULATE_USER_EXIT;
+ return true;
+ }
+ }
+
+ if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) {
+ dr6 = kvm_vcpu_check_hw_bp(eip, 0,
+ vcpu->arch.dr7,
+ vcpu->arch.db);
+
+ if (dr6 != 0) {
+ vcpu->arch.dr6 &= ~15;
+ vcpu->arch.dr6 |= dr6;
+ kvm_queue_exception(vcpu, DB_VECTOR);
+ *r = EMULATE_DONE;
+ return true;
+ }
+ }
+
+ return false;
+}
+
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type,
@@ -4975,6 +5080,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
if (!(emulation_type & EMULTYPE_NO_DECODE)) {
init_emulate_ctxt(vcpu);
+
+ /*
+ * We will reenter on the same instruction since
+ * we do not set complete_userspace_io. This does not
+ * handle watchpoints yet, those would be handled in
+ * the emulate_ops.
+ */
+ if (kvm_vcpu_check_breakpoint(vcpu, &r))
+ return r;
+
ctxt->interruptibility = 0;
ctxt->have_exception = false;
ctxt->perm_ok = false;
@@ -5031,17 +5146,18 @@ restart:
inject_emulated_exception(vcpu);
r = EMULATE_DONE;
} else if (vcpu->arch.pio.count) {
- if (!vcpu->arch.pio.in)
+ if (!vcpu->arch.pio.in) {
+ /* FIXME: return into emulator if single-stepping. */
vcpu->arch.pio.count = 0;
- else {
+ } else {
writeback = false;
vcpu->arch.complete_userspace_io = complete_emulated_pio;
}
- r = EMULATE_DO_MMIO;
+ r = EMULATE_USER_EXIT;
} else if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
writeback = false;
- r = EMULATE_DO_MMIO;
+ r = EMULATE_USER_EXIT;
vcpu->arch.complete_userspace_io = complete_emulated_mmio;
} else if (r == EMULATION_RESTART)
goto restart;
@@ -5050,10 +5166,12 @@ restart:
if (writeback) {
toggle_interruptibility(vcpu, ctxt->interruptibility);
- kvm_set_rflags(vcpu, ctxt->eflags);
kvm_make_request(KVM_REQ_EVENT, vcpu);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
kvm_rip_write(vcpu, ctxt->eip);
+ if (r == EMULATE_DONE)
+ kvm_vcpu_check_singlestep(vcpu, &r);
+ kvm_set_rflags(vcpu, ctxt->eflags);
} else
vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
@@ -5347,7 +5465,7 @@ static struct notifier_block pvclock_gtod_notifier = {
int kvm_arch_init(void *opaque)
{
int r;
- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
+ struct kvm_x86_ops *ops = opaque;
if (kvm_x86_ops) {
printk(KERN_ERR "kvm: already loaded the other module\n");
@@ -5495,6 +5613,23 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * kvm_pv_kick_cpu_op: Kick a vcpu.
+ *
+ * @apicid - apicid of vcpu to be kicked.
+ */
+static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
+{
+ struct kvm_lapic_irq lapic_irq;
+
+ lapic_irq.shorthand = 0;
+ lapic_irq.dest_mode = 0;
+ lapic_irq.dest_id = apicid;
+
+ lapic_irq.delivery_mode = APIC_DM_REMRD;
+ kvm_irq_delivery_to_apic(kvm, 0, &lapic_irq, NULL);
+}
+
int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
{
unsigned long nr, a0, a1, a2, a3, ret;
@@ -5528,6 +5663,10 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
case KVM_HC_VAPIC_POLL_IRQ:
ret = 0;
break;
+ case KVM_HC_KICK_CPU:
+ kvm_pv_kick_cpu_op(vcpu->kvm, a0, a1);
+ ret = 0;
+ break;
default:
ret = -KVM_ENOSYS;
break;
@@ -5689,29 +5828,6 @@ static void process_nmi(struct kvm_vcpu *vcpu)
kvm_make_request(KVM_REQ_EVENT, vcpu);
}
-static void kvm_gen_update_masterclock(struct kvm *kvm)
-{
-#ifdef CONFIG_X86_64
- int i;
- struct kvm_vcpu *vcpu;
- struct kvm_arch *ka = &kvm->arch;
-
- spin_lock(&ka->pvclock_gtod_sync_lock);
- kvm_make_mclock_inprogress_request(kvm);
- /* no guest entries from this point */
- pvclock_update_vm_gtod_copy(kvm);
-
- kvm_for_each_vcpu(i, vcpu, kvm)
- set_bit(KVM_REQ_CLOCK_UPDATE, &vcpu->requests);
-
- /* guest entries allowed */
- kvm_for_each_vcpu(i, vcpu, kvm)
- clear_bit(KVM_REQ_MCLOCK_INPROGRESS, &vcpu->requests);
-
- spin_unlock(&ka->pvclock_gtod_sync_lock);
-#endif
-}
-
static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
{
u64 eoi_exit_bitmap[4];
@@ -5950,6 +6066,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_apic_accept_events(vcpu);
switch(vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
+ vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
case KVM_MP_STATE_RUNNABLE:
@@ -6061,6 +6178,8 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
+
+ /* FIXME: return into emulator if single-stepping. */
if (vcpu->mmio_is_write)
return 1;
vcpu->mmio_read_completed = 1;
@@ -6249,7 +6368,12 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
struct kvm_mp_state *mp_state)
{
kvm_apic_accept_events(vcpu);
- mp_state->mp_state = vcpu->arch.mp_state;
+ if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
+ vcpu->arch.pv.pv_unhalted)
+ mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+ else
+ mp_state->mp_state = vcpu->arch.mp_state;
+
return 0;
}
@@ -6770,6 +6894,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
BUG_ON(vcpu->kvm == NULL);
kvm = vcpu->kvm;
+ vcpu->arch.pv.pv_unhalted = false;
vcpu->arch.emulate_ctxt.ops = &emulate_ops;
if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_bsp(vcpu))
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
@@ -7019,6 +7144,15 @@ out_free:
return -ENOMEM;
}
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+ /*
+ * memslots->generation has been incremented.
+ * mmio generation may have reached its maximum value.
+ */
+ kvm_mmu_invalidate_mmio_sptes(kvm);
+}
+
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
@@ -7079,11 +7213,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
*/
if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
kvm_mmu_slot_remove_write_access(kvm, mem->slot);
- /*
- * If memory slot is created, or moved, we need to clear all
- * mmio sptes.
- */
- kvm_mmu_invalidate_mmio_sptes(kvm);
}
void kvm_arch_flush_shadow_all(struct kvm *kvm)
@@ -7103,6 +7232,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted)
|| !list_empty_careful(&vcpu->async_pf.done)
|| kvm_apic_has_events(vcpu)
+ || vcpu->arch.pv.pv_unhalted
|| atomic_read(&vcpu->arch.nmi_queued) ||
(kvm_arch_interrupt_allowed(vcpu) &&
kvm_cpu_has_interrupt(vcpu));
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 25b7ae8d058..7609e0e421e 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -6,6 +6,7 @@
*/
#include <asm/checksum.h>
#include <linux/module.h>
+#include <asm/smap.h>
/**
* csum_partial_copy_from_user - Copy and checksum from user space.
@@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
len -= 2;
}
}
+ stac();
isum = csum_partial_copy_generic((__force const void *)src,
dst, len, isum, errp, NULL);
+ clac();
if (unlikely(*errp))
goto out_err;
@@ -82,6 +85,8 @@ __wsum
csum_partial_copy_to_user(const void *src, void __user *dst,
int len, __wsum isum, int *errp)
{
+ __wsum ret;
+
might_sleep();
if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
@@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
}
*errp = 0;
- return csum_partial_copy_generic(src, (void __force *)dst,
- len, isum, NULL, errp);
+ stac();
+ ret = csum_partial_copy_generic(src, (void __force *)dst,
+ len, isum, NULL, errp);
+ clac();
+ return ret;
}
EXPORT_SYMBOL(csum_partial_copy_to_user);
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 906fea31579..c905e89e19f 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
* Since protection fault in copy_from/to_user is not a normal situation,
* it is not necessary to optimize tail handling.
*/
-unsigned long
+__visible unsigned long
copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
{
char c;
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 5d7e51f3fd2..533a85e3a07 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -1,10 +1,8 @@
# x86 Opcode Maps
#
# This is (mostly) based on following documentations.
-# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2
-# (#325383-040US, October 2011)
-# - Intel(R) Advanced Vector Extensions Programming Reference
-# (#319433-011,JUNE 2011).
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+# (#326018-047US, June 2013)
#
#<Opcode maps>
# Table: table-name
@@ -29,6 +27,7 @@
# - (F3): the last prefix is 0xF3
# - (F2): the last prefix is 0xF2
# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
Table: one byte opcode
Referrer:
@@ -246,8 +245,8 @@ c2: RETN Iw (f64)
c3: RETN
c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
-c6: Grp11 Eb,Ib (1A)
-c7: Grp11 Ev,Iz (1A)
+c6: Grp11A Eb,Ib (1A)
+c7: Grp11B Ev,Iz (1A)
c8: ENTER Iw,Ib
c9: LEAVE (d64)
ca: RETF Iw
@@ -293,8 +292,8 @@ ef: OUT DX,eAX
# 0xf0 - 0xff
f0: LOCK (Prefix)
f1:
-f2: REPNE (Prefix)
-f3: REP/REPE (Prefix)
+f2: REPNE (Prefix) | XACQUIRE (Prefix)
+f3: REP/REPE (Prefix) | XRELEASE (Prefix)
f4: HLT
f5: CMC
f6: Grp3_1 Eb (1A)
@@ -326,7 +325,8 @@ AVXcode: 1
0a:
0b: UD2 (1B)
0c:
-0d: NOP Ev | GrpP
+# AMD's prefetch group. Intel supports prefetchw(/1) only.
+0d: GrpP
0e: FEMMS
# 3DNow! uses the last imm byte as opcode extension.
0f: 3DNow! Pq,Qq,Ib
@@ -729,12 +729,12 @@ dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
-f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
-f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
f2: ANDN Gy,By,Ey (v)
f3: Grp17 (1A)
f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
-f6: MULX By,Gy,rDX,Ey (F2),(v)
+f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
EndTable
@@ -861,8 +861,8 @@ EndTable
GrpTable: Grp7
0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
3: LIDT Ms
4: SMSW Mw/Rv
5:
@@ -880,15 +880,21 @@ EndTable
GrpTable: Grp9
1: CMPXCHG8B/16B Mq/Mdq
6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
-7: VMPTRST Mq | VMPTRST Mq (F3)
+7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
EndTable
GrpTable: Grp10
EndTable
-GrpTable: Grp11
-# Note: the operands are given by group opcode
-0: MOV
+# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+GrpTable: Grp11A
+0: MOV Eb,Ib
+7: XABORT Ib (000),(11B)
+EndTable
+
+GrpTable: Grp11B
+0: MOV Eb,Iz
+7: XBEGIN Jz (000),(11B)
EndTable
GrpTable: Grp12
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 2ec29ac78ae..04664cdb7fd 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num)
return __va(pfn << PAGE_SHIFT);
}
-/* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE)
+/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
+#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void __init early_alloc_pgt_buf(void)
{
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 0215e2c563e..799580cabc7 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -487,7 +487,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
unsigned long offset;
resource_size_t last_addr;
unsigned int nrpages;
- enum fixed_addresses idx0, idx;
+ enum fixed_addresses idx;
int i, slot;
WARN_ON(system_state != SYSTEM_BOOTING);
@@ -540,8 +540,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
/*
* Ok, go for it..
*/
- idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
- idx = idx0;
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
while (nrpages > 0) {
early_set_fixmap(idx, phys_addr, prot);
phys_addr += PAGE_SIZE;
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 62c29a5bfe2..25e7e1372bb 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void)
*/
void arch_pick_mmap_layout(struct mm_struct *mm)
{
+ mm->mmap_legacy_base = mmap_legacy_base();
+ mm->mmap_base = mmap_base();
+
if (mmap_is_legacy()) {
- mm->mmap_base = mmap_legacy_base();
+ mm->mmap_base = mm->mmap_legacy_base;
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
- mm->mmap_base = mmap_base();
mm->get_unmapped_area = arch_get_unmapped_area_topdown;
}
}
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index cdd0da9dd53..266ca912f62 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -146,6 +146,7 @@ int __init
acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
{
u64 start, end;
+ u32 hotpluggable;
int node, pxm;
if (srat_disabled())
@@ -154,7 +155,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
goto out_err_bad_srat;
if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0)
goto out_err;
- if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info())
+ hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE;
+ if (hotpluggable && !save_add_info())
goto out_err;
start = ma->base_address;
@@ -174,9 +176,10 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
node_set(node, numa_nodes_parsed);
- printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
- node, pxm,
- (unsigned long long) start, (unsigned long long) end - 1);
+ pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n",
+ node, pxm,
+ (unsigned long long) start, (unsigned long long) end - 1,
+ hotpluggable ? " hotplug" : "");
return 0;
out_err_bad_srat:
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 48768df2471..6890d8498e0 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -403,7 +403,7 @@ static void nmi_cpu_down(void *dummy)
nmi_cpu_shutdown(dummy);
}
-static int nmi_create_files(struct super_block *sb, struct dentry *root)
+static int nmi_create_files(struct dentry *root)
{
unsigned int i;
@@ -420,14 +420,14 @@ static int nmi_create_files(struct super_block *sb, struct dentry *root)
continue;
snprintf(buf, sizeof(buf), "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
- oprofilefs_create_ulong(sb, dir, "extra", &counter_config[i].extra);
+ dir = oprofilefs_mkdir(root, buf);
+ oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
+ oprofilefs_create_ulong(dir, "extra", &counter_config[i].extra);
}
return 0;
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index b2b94438ff0..50d86c0e9ba 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -454,16 +454,16 @@ static void init_ibs(void)
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
}
-static int (*create_arch_files)(struct super_block *sb, struct dentry *root);
+static int (*create_arch_files)(struct dentry *root);
-static int setup_ibs_files(struct super_block *sb, struct dentry *root)
+static int setup_ibs_files(struct dentry *root)
{
struct dentry *dir;
int ret = 0;
/* architecture specific files */
if (create_arch_files)
- ret = create_arch_files(sb, root);
+ ret = create_arch_files(root);
if (ret)
return ret;
@@ -479,26 +479,26 @@ static int setup_ibs_files(struct super_block *sb, struct dentry *root)
ibs_config.max_cnt_op = 250000;
if (ibs_caps & IBS_CAPS_FETCHSAM) {
- dir = oprofilefs_mkdir(sb, root, "ibs_fetch");
- oprofilefs_create_ulong(sb, dir, "enable",
+ dir = oprofilefs_mkdir(root, "ibs_fetch");
+ oprofilefs_create_ulong(dir, "enable",
&ibs_config.fetch_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
+ oprofilefs_create_ulong(dir, "max_count",
&ibs_config.max_cnt_fetch);
- oprofilefs_create_ulong(sb, dir, "rand_enable",
+ oprofilefs_create_ulong(dir, "rand_enable",
&ibs_config.rand_en);
}
if (ibs_caps & IBS_CAPS_OPSAM) {
- dir = oprofilefs_mkdir(sb, root, "ibs_op");
- oprofilefs_create_ulong(sb, dir, "enable",
+ dir = oprofilefs_mkdir(root, "ibs_op");
+ oprofilefs_create_ulong(dir, "enable",
&ibs_config.op_enabled);
- oprofilefs_create_ulong(sb, dir, "max_count",
+ oprofilefs_create_ulong(dir, "max_count",
&ibs_config.max_cnt_op);
if (ibs_caps & IBS_CAPS_OPCNT)
- oprofilefs_create_ulong(sb, dir, "dispatched_ops",
+ oprofilefs_create_ulong(dir, "dispatched_ops",
&ibs_config.dispatched_ops);
if (ibs_caps & IBS_CAPS_BRNTRGT)
- oprofilefs_create_ulong(sb, dir, "branch_target",
+ oprofilefs_create_ulong(dir, "branch_target",
&ibs_config.branch_target);
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index d641897a1f4..b30e937689d 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -568,13 +568,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
*/
if (bus) {
struct pci_bus *child;
- list_for_each_entry(child, &bus->children, node) {
- struct pci_dev *self = child->self;
- if (!self)
- continue;
-
- pcie_bus_configure_settings(child, self->pcie_mpss);
- }
+ list_for_each_entry(child, &bus->children, node)
+ pcie_bus_configure_settings(child);
}
if (bus && node != -1) {
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 94919e307f8..db6b1ab4325 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -210,6 +210,8 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
r = &dev->resource[idx];
if (!r->flags)
continue;
+ if (r->parent) /* Already allocated */
+ continue;
if (!r->start || pci_claim_resource(dev, idx) < 0) {
/*
* Something is wrong with the region.
@@ -318,6 +320,8 @@ static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev)
r = &dev->resource[PCI_ROM_RESOURCE];
if (!r->flags || !r->start)
return;
+ if (r->parent) /* Already allocated */
+ return;
if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
r->end -= r->start;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 082e8812971..5596c7bdd32 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed)
return -ENODEV;
- if (start > end)
+ if (start > end || !addr)
return -EINVAL;
mutex_lock(&pci_mmcfg_lock);
@@ -716,11 +716,6 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end,
return -EEXIST;
}
- if (!addr) {
- mutex_unlock(&pci_mmcfg_lock);
- return -EINVAL;
- }
-
rc = -EBUSY;
cfg = pci_mmconfig_alloc(seg, start, end, addr);
if (cfg == NULL) {
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 6eb18c42a28..903fded5078 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -23,11 +23,11 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/dmi.h>
+#include <linux/acpi.h>
+#include <linux/io.h>
+#include <linux/smp.h>
-#include <asm/acpi.h>
#include <asm/segment.h>
-#include <asm/io.h>
-#include <asm/smp.h>
#include <asm/pci_x86.h>
#include <asm/hw_irq.h>
#include <asm/io_apic.h>
@@ -43,7 +43,7 @@
#define PCI_FIXED_BAR_4_SIZE 0x14
#define PCI_FIXED_BAR_5_SIZE 0x1c
-static int pci_soc_mode = 0;
+static int pci_soc_mode;
/**
* fixed_bar_cap - return the offset of the fixed BAR cap if found
@@ -141,7 +141,8 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
*/
static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
{
- /* This is a workaround for A0 LNC bug where PCI status register does
+ /*
+ * This is a workaround for A0 LNC bug where PCI status register does
* not have new CAP bit set. can not be written by SW either.
*
* PCI header type in real LNC indicates a single function device, this
@@ -154,7 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
|| devfn == PCI_DEVFN(0, 0)
|| devfn == PCI_DEVFN(3, 0)))
return 1;
- return 0; /* langwell on others */
+ return 0; /* Langwell on others */
}
static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
@@ -172,7 +173,8 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
{
int offset;
- /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read
+ /*
+ * On MRST, there is no PCI ROM BAR, this will cause a subsequent read
* to ROM BAR return 0 then being ignored.
*/
if (where == PCI_ROM_ADDRESS)
@@ -210,7 +212,8 @@ static int mrst_pci_irq_enable(struct pci_dev *dev)
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
- /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
+ /*
+ * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
* IOAPIC RTE entries, so we just enable RTE for the device.
*/
irq_attr.ioapic = mp_find_ioapic(dev->irq);
@@ -235,7 +238,7 @@ struct pci_ops pci_mrst_ops = {
*/
int __init pci_mrst_init(void)
{
- printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
+ pr_info("Intel MID platform detected, using MID PCI ops\n");
pci_mmcfg_late_init();
pcibios_enable_irq = mrst_pci_irq_enable;
pci_root_ops = pci_mrst_ops;
@@ -244,17 +247,21 @@ int __init pci_mrst_init(void)
return 1;
}
-/* Langwell devices are not true pci devices, they are not subject to 10 ms
- * d3 to d0 delay required by pci spec.
+/*
+ * Langwell devices are not true PCI devices; they are not subject to 10 ms
+ * d3 to d0 delay required by PCI spec.
*/
static void pci_d3delay_fixup(struct pci_dev *dev)
{
- /* PCI fixups are effectively decided compile time. If we have a dual
- SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */
- if (!pci_soc_mode)
- return;
- /* true pci devices in lincroft should allow type 1 access, the rest
- * are langwell fake pci devices.
+ /*
+ * PCI fixups are effectively decided compile time. If we have a dual
+ * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices.
+ */
+ if (!pci_soc_mode)
+ return;
+ /*
+ * True PCI devices in Lincroft should allow type 1 access, the rest
+ * are Langwell fake PCI devices.
*/
if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID))
return;
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index 643b8b5eee8..8244f5ec2f4 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/reboot.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <linux/reboot.h>
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 1cf5b300305..424f4c97a44 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -25,10 +25,10 @@
#include <asm/cpu.h>
#ifdef CONFIG_X86_32
-unsigned long saved_context_ebx;
-unsigned long saved_context_esp, saved_context_ebp;
-unsigned long saved_context_esi, saved_context_edi;
-unsigned long saved_context_eflags;
+__visible unsigned long saved_context_ebx;
+__visible unsigned long saved_context_esp, saved_context_ebp;
+__visible unsigned long saved_context_esi, saved_context_edi;
+__visible unsigned long saved_context_eflags;
#endif
struct saved_context saved_context;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index a0fde91c16c..304fca20d96 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -20,26 +20,26 @@
#include <asm/suspend.h>
/* References to section boundaries */
-extern const void __nosave_begin, __nosave_end;
+extern __visible const void __nosave_begin, __nosave_end;
/* Defined in hibernate_asm_64.S */
-extern int restore_image(void);
+extern asmlinkage int restore_image(void);
/*
* Address to jump to in the last phase of restore in order to get to the image
* kernel's text (this value is passed in the image header).
*/
-unsigned long restore_jump_address;
+unsigned long restore_jump_address __visible;
/*
* Value of the cr3 register from before the hibernation (this value is passed
* in the image header).
*/
-unsigned long restore_cr3;
+unsigned long restore_cr3 __visible;
-pgd_t *temp_level4_pgt;
+pgd_t *temp_level4_pgt __visible;
-void *relocated_restore_code;
+void *relocated_restore_code __visible;
static void *alloc_pgt_page(void *context)
{
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index e6773dc8ac4..093a892026f 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -68,7 +68,7 @@ BEGIN {
lprefix1_expr = "\\((66|!F3)\\)"
lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3)\\)"
+ lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
lprefix_expr = "\\((66|F2|F3)\\)"
max_lprefix = 4
@@ -83,6 +83,8 @@ BEGIN {
prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
prefix_num["REPNE"] = "INAT_PFX_REPNE"
prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+ prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+ prefix_num["XRELEASE"] = "INAT_PFX_REPE"
prefix_num["LOCK"] = "INAT_PFX_LOCK"
prefix_num["SEG=CS"] = "INAT_PFX_CS"
prefix_num["SEG=DS"] = "INAT_PFX_DS"
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index c74436e687b..72074d52840 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -85,15 +85,18 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
- u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * When looping to get a consistent (time-info, tsc) pair, we
- * also need to deal with the possibility we can switch vcpus,
- * so make sure we always re-fetch time-info for the current vcpu.
+ * Note: hypervisor must guarantee that:
+ * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+ * 2. that per-CPU pvclock time info is updated if the
+ * underlying CPU changes.
+ * 3. that version is increased whenever underlying CPU
+ * changes.
+ *
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -104,8 +107,6 @@ static notrace cycle_t vread_pvclock(int *mode)
pvti = get_pvti(cpu);
- migrate_count = pvti->migrate_count;
-
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
@@ -117,8 +118,7 @@ static notrace cycle_t vread_pvclock(int *mode)
cpu1 = __getcpu() & VGETCPU_CPU_MASK;
} while (unlikely(cpu != cpu1 ||
(pvti->pvti.version & 1) ||
- pvti->pvti.version != version ||
- pvti->migrate_count != migrate_count));
+ pvti->pvti.version != version));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 193097ef3d7..2fc216dfbd9 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -427,8 +427,7 @@ static void __init xen_init_cpuid_mask(void)
if (!xen_initial_domain())
cpuid_leaf1_edx_mask &=
- ~((1 << X86_FEATURE_APIC) | /* disable local APIC */
- (1 << X86_FEATURE_ACPI)); /* disable ACPI */
+ ~((1 << X86_FEATURE_ACPI)); /* disable ACPI */
cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
@@ -735,8 +734,7 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
addr = (unsigned long)xen_int3;
else if (addr == (unsigned long)stack_segment)
addr = (unsigned long)xen_stack_segment;
- else if (addr == (unsigned long)double_fault ||
- addr == (unsigned long)nmi) {
+ else if (addr == (unsigned long)double_fault) {
/* Don't need to handle these */
return 0;
#ifdef CONFIG_X86_MCE
@@ -747,7 +745,12 @@ static int cvt_gate_to_trap(int vector, const gate_desc *val,
*/
;
#endif
- } else {
+ } else if (addr == (unsigned long)nmi)
+ /*
+ * Use the native version as well.
+ */
+ ;
+ else {
/* Some other trap using IST? */
if (WARN_ON(val->ist != 0))
return 0;
@@ -1710,6 +1713,8 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_shared_info();
+ xen_panic_handler_init();
+
if (xen_feature(XENFEAT_hvm_callback_vector))
xen_have_vector_callback = 1;
xen_hvm_smp_init();
@@ -1720,15 +1725,12 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_mmu_ops();
}
-static bool __init xen_hvm_platform(void)
+static uint32_t __init xen_hvm_platform(void)
{
if (xen_pv_domain())
- return false;
-
- if (!xen_cpuid_base())
- return false;
+ return 0;
- return true;
+ return xen_cpuid_base();
}
bool xen_hvm_need_lapic(void)
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 01a4dc015ae..0da7f863056 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -47,23 +47,18 @@ static void xen_restore_fl(unsigned long flags)
/* convert from IF type flag */
flags = !(flags & X86_EFLAGS_IF);
- /* There's a one instruction preempt window here. We need to
- make sure we're don't switch CPUs between getting the vcpu
- pointer and updating the mask. */
+ /* See xen_irq_enable() for why preemption must be disabled. */
preempt_disable();
vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = flags;
- preempt_enable_no_resched();
-
- /* Doesn't matter if we get preempted here, because any
- pending event will get dealt with anyway. */
if (flags == 0) {
- preempt_check_resched();
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
- }
+ preempt_enable();
+ } else
+ preempt_enable_no_resched();
}
PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
@@ -82,10 +77,12 @@ static void xen_irq_enable(void)
{
struct vcpu_info *vcpu;
- /* We don't need to worry about being preempted here, since
- either a) interrupts are disabled, so no preemption, or b)
- the caller is confused and is trying to re-enable interrupts
- on an indeterminate processor. */
+ /*
+ * We may be preempted as soon as vcpu->evtchn_upcall_mask is
+ * cleared, so disable preemption to ensure we check for
+ * events on the VCPU we are still running on.
+ */
+ preempt_disable();
vcpu = this_cpu_read(xen_vcpu);
vcpu->evtchn_upcall_mask = 0;
@@ -96,6 +93,8 @@ static void xen_irq_enable(void)
barrier(); /* unmask then check (avoid races) */
if (unlikely(vcpu->evtchn_upcall_pending))
xen_force_evtchn_callback();
+
+ preempt_enable();
}
PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 95fb2aa5927..0d4ec35895d 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -161,6 +161,7 @@
#include <asm/xen/page.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
+#include <xen/balloon.h>
#include <xen/grant_table.h>
#include "multicalls.h"
@@ -967,7 +968,10 @@ int m2p_remove_override(struct page *page,
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
- struct gnttab_unmap_grant_ref *unmap_op;
+ struct gnttab_unmap_and_replace *unmap_op;
+ struct page *scratch_page = get_balloon_scratch_page();
+ unsigned long scratch_page_address = (unsigned long)
+ __va(page_to_pfn(scratch_page) << PAGE_SHIFT);
/*
* It might be that we queued all the m2p grant table
@@ -990,21 +994,25 @@ int m2p_remove_override(struct page *page,
}
mcs = xen_mc_entry(
- sizeof(struct gnttab_unmap_grant_ref));
+ sizeof(struct gnttab_unmap_and_replace));
unmap_op = mcs.args;
unmap_op->host_addr = kmap_op->host_addr;
+ unmap_op->new_addr = scratch_page_address;
unmap_op->handle = kmap_op->handle;
- unmap_op->dev_bus_addr = 0;
MULTI_grant_table_op(mcs.mc,
- GNTTABOP_unmap_grant_ref, unmap_op, 1);
+ GNTTABOP_unmap_and_replace, unmap_op, 1);
xen_mc_issue(PARAVIRT_LAZY_MMU);
- set_pte_at(&init_mm, address, ptep,
- pfn_pte(pfn, PAGE_KERNEL));
- __flush_tlb_single(address);
+ mcs = __xen_mc_entry(0);
+ MULTI_update_va_mapping(mcs.mc, scratch_page_address,
+ pfn_pte(page_to_pfn(get_balloon_scratch_page()),
+ PAGE_KERNEL_RO), 0);
+ xen_mc_issue(PARAVIRT_LAZY_MMU);
+
kmap_op->host_addr = 0;
+ put_balloon_scratch_page();
}
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 056d11faef2..09f3059cb00 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -33,6 +33,9 @@
/* These are code, but not functions. Defined in entry.S */
extern const char xen_hypervisor_callback[];
extern const char xen_failsafe_callback[];
+#ifdef CONFIG_X86_64
+extern const char nmi[];
+#endif
extern void xen_sysenter_target(void);
extern void xen_syscall_target(void);
extern void xen_syscall32_target(void);
@@ -215,13 +218,19 @@ static void __init xen_set_identity_and_release_chunk(
unsigned long pfn;
/*
- * If the PFNs are currently mapped, the VA mapping also needs
- * to be updated to be 1:1.
+ * If the PFNs are currently mapped, clear the mappings
+ * (except for the ISA region which must be 1:1 mapped) to
+ * release the refcounts (in Xen) on the original frames.
*/
- for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
+ for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
+ pte_t pte = __pte_ma(0);
+
+ if (pfn < PFN_UP(ISA_END_ADDRESS))
+ pte = mfn_pte(pfn, PAGE_KERNEL_IO);
+
(void)HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(pfn, PAGE_KERNEL_IO), 0);
+ (unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
+ }
if (start_pfn < nr_pages)
*released += xen_release_chunk(
@@ -313,6 +322,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
e820_add_region(start, end - start, type);
}
+void xen_ignore_unusable(struct e820entry *list, size_t map_size)
+{
+ struct e820entry *entry;
+ unsigned int i;
+
+ for (i = 0, entry = list; i < map_size; i++, entry++) {
+ if (entry->type == E820_UNUSABLE)
+ entry->type = E820_RAM;
+ }
+}
+
/**
* machine_specific_memory_setup - Hook for machine specific memory setup.
**/
@@ -353,6 +373,17 @@ char * __init xen_memory_setup(void)
}
BUG_ON(rc);
+ /*
+ * Xen won't allow a 1:1 mapping to be created to UNUSABLE
+ * regions, so if we're using the machine memory map leave the
+ * region as RAM as it is in the pseudo-physical map.
+ *
+ * UNUSABLE regions in domUs are not handled and will need
+ * a patch in the future.
+ */
+ if (xen_initial_domain())
+ xen_ignore_unusable(map, memmap.nr_entries);
+
/* Make sure the Xen-supplied memory map is well-ordered. */
sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
@@ -525,7 +556,13 @@ void xen_enable_syscall(void)
}
#endif /* CONFIG_X86_64 */
}
-
+void __cpuinit xen_enable_nmi(void)
+{
+#ifdef CONFIG_X86_64
+ if (register_callback(CALLBACKTYPE_nmi, nmi))
+ BUG();
+#endif
+}
void __init xen_arch_setup(void)
{
xen_panic_handler_init();
@@ -543,7 +580,7 @@ void __init xen_arch_setup(void)
xen_enable_sysenter();
xen_enable_syscall();
-
+ xen_enable_nmi();
#ifdef CONFIG_ACPI
if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index ca92754eb84..9235842cd76 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -279,6 +279,7 @@ static void __init xen_smp_prepare_boot_cpu(void)
xen_filter_cpu_maps();
xen_setup_vcpu_info_placement();
+ xen_init_spinlocks();
}
static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
@@ -572,6 +573,12 @@ static inline int xen_map_vector(int vector)
case IRQ_WORK_VECTOR:
xen_vector = XEN_IRQ_WORK_VECTOR;
break;
+#ifdef CONFIG_X86_64
+ case NMI_VECTOR:
+ case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */
+ xen_vector = XEN_NMI_VECTOR;
+ break;
+#endif
default:
xen_vector = -1;
printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
@@ -680,7 +687,6 @@ void __init xen_smp_init(void)
{
smp_ops = xen_smp_ops;
xen_fill_possible_map();
- xen_init_spinlocks();
}
static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
@@ -694,8 +700,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
{
int rc;
- rc = native_cpu_up(cpu, tidle);
- WARN_ON (xen_smp_intr_init(cpu));
+ /*
+ * xen_smp_intr_init() needs to run before native_cpu_up()
+ * so that IPI vectors are set up on the booting CPU before
+ * it is marked online in native_cpu_up().
+ */
+ rc = xen_smp_intr_init(cpu);
+ WARN_ON(rc);
+ if (!rc)
+ rc = native_cpu_up(cpu, tidle);
return rc;
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index cf3caee356b..0438b9324a7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,45 +17,44 @@
#include "xen-ops.h"
#include "debugfs.h"
-#ifdef CONFIG_XEN_DEBUG_FS
-static struct xen_spinlock_stats
-{
- u64 taken;
- u32 taken_slow;
- u32 taken_slow_nested;
- u32 taken_slow_pickup;
- u32 taken_slow_spurious;
- u32 taken_slow_irqenable;
+enum xen_contention_stat {
+ TAKEN_SLOW,
+ TAKEN_SLOW_PICKUP,
+ TAKEN_SLOW_SPURIOUS,
+ RELEASED_SLOW,
+ RELEASED_SLOW_KICKED,
+ NR_CONTENTION_STATS
+};
- u64 released;
- u32 released_slow;
- u32 released_slow_kicked;
+#ifdef CONFIG_XEN_DEBUG_FS
#define HISTO_BUCKETS 30
- u32 histo_spin_total[HISTO_BUCKETS+1];
- u32 histo_spin_spinning[HISTO_BUCKETS+1];
+static struct xen_spinlock_stats
+{
+ u32 contention_stats[NR_CONTENTION_STATS];
u32 histo_spin_blocked[HISTO_BUCKETS+1];
-
- u64 time_total;
- u64 time_spinning;
u64 time_blocked;
} spinlock_stats;
static u8 zero_stats;
-static unsigned lock_timeout = 1 << 10;
-#define TIMEOUT lock_timeout
-
static inline void check_zero(void)
{
- if (unlikely(zero_stats)) {
- memset(&spinlock_stats, 0, sizeof(spinlock_stats));
- zero_stats = 0;
+ u8 ret;
+ u8 old = ACCESS_ONCE(zero_stats);
+ if (unlikely(old)) {
+ ret = cmpxchg(&zero_stats, old, 0);
+ /* This ensures only one fellow resets the stat */
+ if (ret == old)
+ memset(&spinlock_stats, 0, sizeof(spinlock_stats));
}
}
-#define ADD_STATS(elem, val) \
- do { check_zero(); spinlock_stats.elem += (val); } while(0)
+static inline void add_stats(enum xen_contention_stat var, u32 val)
+{
+ check_zero();
+ spinlock_stats.contention_stats[var] += val;
+}
static inline u64 spin_time_start(void)
{
@@ -74,22 +73,6 @@ static void __spin_time_accum(u64 delta, u32 *array)
array[HISTO_BUCKETS]++;
}
-static inline void spin_time_accum_spinning(u64 start)
-{
- u32 delta = xen_clocksource_read() - start;
-
- __spin_time_accum(delta, spinlock_stats.histo_spin_spinning);
- spinlock_stats.time_spinning += delta;
-}
-
-static inline void spin_time_accum_total(u64 start)
-{
- u32 delta = xen_clocksource_read() - start;
-
- __spin_time_accum(delta, spinlock_stats.histo_spin_total);
- spinlock_stats.time_total += delta;
-}
-
static inline void spin_time_accum_blocked(u64 start)
{
u32 delta = xen_clocksource_read() - start;
@@ -99,19 +82,15 @@ static inline void spin_time_accum_blocked(u64 start)
}
#else /* !CONFIG_XEN_DEBUG_FS */
#define TIMEOUT (1 << 10)
-#define ADD_STATS(elem, val) do { (void)(val); } while(0)
+static inline void add_stats(enum xen_contention_stat var, u32 val)
+{
+}
static inline u64 spin_time_start(void)
{
return 0;
}
-static inline void spin_time_accum_total(u64 start)
-{
-}
-static inline void spin_time_accum_spinning(u64 start)
-{
-}
static inline void spin_time_accum_blocked(u64 start)
{
}
@@ -134,227 +113,123 @@ typedef u16 xen_spinners_t;
asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
#endif
-struct xen_spinlock {
- unsigned char lock; /* 0 -> free; 1 -> locked */
- xen_spinners_t spinners; /* count of waiting cpus */
+struct xen_lock_waiting {
+ struct arch_spinlock *lock;
+ __ticket_t want;
};
-static int xen_spin_is_locked(struct arch_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- return xl->lock != 0;
-}
-
-static int xen_spin_is_contended(struct arch_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- /* Not strictly true; this is only the count of contended
- lock-takers entering the slow path. */
- return xl->spinners != 0;
-}
-
-static int xen_spin_trylock(struct arch_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- u8 old = 1;
-
- asm("xchgb %b0,%1"
- : "+q" (old), "+m" (xl->lock) : : "memory");
-
- return old == 0;
-}
-
-static DEFINE_PER_CPU(char *, irq_name);
static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
-
-/*
- * Mark a cpu as interested in a lock. Returns the CPU's previous
- * lock of interest, in case we got preempted by an interrupt.
- */
-static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
-{
- struct xen_spinlock *prev;
-
- prev = __this_cpu_read(lock_spinners);
- __this_cpu_write(lock_spinners, xl);
-
- wmb(); /* set lock of interest before count */
-
- inc_spinners(xl);
-
- return prev;
-}
-
-/*
- * Mark a cpu as no longer interested in a lock. Restores previous
- * lock of interest (NULL for none).
- */
-static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
-{
- dec_spinners(xl);
- wmb(); /* decrement count before restoring lock */
- __this_cpu_write(lock_spinners, prev);
-}
+static DEFINE_PER_CPU(char *, irq_name);
+static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
+static cpumask_t waiting_cpus;
-static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable)
+static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- struct xen_spinlock *prev;
int irq = __this_cpu_read(lock_kicker_irq);
- int ret;
+ struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
+ int cpu = smp_processor_id();
u64 start;
+ unsigned long flags;
/* If kicker interrupts not initialized yet, just spin */
if (irq == -1)
- return 0;
+ return;
start = spin_time_start();
- /* announce we're spinning */
- prev = spinning_lock(xl);
+ /*
+ * Make sure an interrupt handler can't upset things in a
+ * partially setup state.
+ */
+ local_irq_save(flags);
+ /*
+ * We don't really care if we're overwriting some other
+ * (lock,want) pair, as that would mean that we're currently
+ * in an interrupt context, and the outer context had
+ * interrupts enabled. That has already kicked the VCPU out
+ * of xen_poll_irq(), so it will just return spuriously and
+ * retry with newly setup (lock,want).
+ *
+ * The ordering protocol on this is that the "lock" pointer
+ * may only be set non-NULL if the "want" ticket is correct.
+ * If we're updating "want", we must first clear "lock".
+ */
+ w->lock = NULL;
+ smp_wmb();
+ w->want = want;
+ smp_wmb();
+ w->lock = lock;
- ADD_STATS(taken_slow, 1);
- ADD_STATS(taken_slow_nested, prev != NULL);
+ /* This uses set_bit, which atomic and therefore a barrier */
+ cpumask_set_cpu(cpu, &waiting_cpus);
+ add_stats(TAKEN_SLOW, 1);
- do {
- unsigned long flags;
+ /* clear pending */
+ xen_clear_irq_pending(irq);
- /* clear pending */
- xen_clear_irq_pending(irq);
+ /* Only check lock once pending cleared */
+ barrier();
- /* check again make sure it didn't become free while
- we weren't looking */
- ret = xen_spin_trylock(lock);
- if (ret) {
- ADD_STATS(taken_slow_pickup, 1);
+ /*
+ * Mark entry to slowpath before doing the pickup test to make
+ * sure we don't deadlock with an unlocker.
+ */
+ __ticket_enter_slowpath(lock);
- /*
- * If we interrupted another spinlock while it
- * was blocking, make sure it doesn't block
- * without rechecking the lock.
- */
- if (prev != NULL)
- xen_set_irq_pending(irq);
- goto out;
- }
+ /*
+ * check again make sure it didn't become free while
+ * we weren't looking
+ */
+ if (ACCESS_ONCE(lock->tickets.head) == want) {
+ add_stats(TAKEN_SLOW_PICKUP, 1);
+ goto out;
+ }
- flags = arch_local_save_flags();
- if (irq_enable) {
- ADD_STATS(taken_slow_irqenable, 1);
- raw_local_irq_enable();
- }
+ /* Allow interrupts while blocked */
+ local_irq_restore(flags);
- /*
- * Block until irq becomes pending. If we're
- * interrupted at this point (after the trylock but
- * before entering the block), then the nested lock
- * handler guarantees that the irq will be left
- * pending if there's any chance the lock became free;
- * xen_poll_irq() returns immediately if the irq is
- * pending.
- */
- xen_poll_irq(irq);
+ /*
+ * If an interrupt happens here, it will leave the wakeup irq
+ * pending, which will cause xen_poll_irq() to return
+ * immediately.
+ */
- raw_local_irq_restore(flags);
+ /* Block until irq becomes pending (or perhaps a spurious wakeup) */
+ xen_poll_irq(irq);
+ add_stats(TAKEN_SLOW_SPURIOUS, !xen_test_irq_pending(irq));
- ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
- } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
+ local_irq_save(flags);
kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
-
out:
- unspinning_lock(xl, prev);
- spin_time_accum_blocked(start);
-
- return ret;
-}
-
-static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
- unsigned timeout;
- u8 oldval;
- u64 start_spin;
-
- ADD_STATS(taken, 1);
-
- start_spin = spin_time_start();
-
- do {
- u64 start_spin_fast = spin_time_start();
-
- timeout = TIMEOUT;
-
- asm("1: xchgb %1,%0\n"
- " testb %1,%1\n"
- " jz 3f\n"
- "2: rep;nop\n"
- " cmpb $0,%0\n"
- " je 1b\n"
- " dec %2\n"
- " jnz 2b\n"
- "3:\n"
- : "+m" (xl->lock), "=q" (oldval), "+r" (timeout)
- : "1" (1)
- : "memory");
+ cpumask_clear_cpu(cpu, &waiting_cpus);
+ w->lock = NULL;
- spin_time_accum_spinning(start_spin_fast);
+ local_irq_restore(flags);
- } while (unlikely(oldval != 0 &&
- (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
-
- spin_time_accum_total(start_spin);
-}
-
-static void xen_spin_lock(struct arch_spinlock *lock)
-{
- __xen_spin_lock(lock, false);
-}
-
-static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags)
-{
- __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
+ spin_time_accum_blocked(start);
}
+PV_CALLEE_SAVE_REGS_THUNK(xen_lock_spinning);
-static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
+static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
{
int cpu;
- ADD_STATS(released_slow, 1);
+ add_stats(RELEASED_SLOW, 1);
+
+ for_each_cpu(cpu, &waiting_cpus) {
+ const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
- for_each_online_cpu(cpu) {
- /* XXX should mix up next cpu selection */
- if (per_cpu(lock_spinners, cpu) == xl) {
- ADD_STATS(released_slow_kicked, 1);
+ /* Make sure we read lock before want */
+ if (ACCESS_ONCE(w->lock) == lock &&
+ ACCESS_ONCE(w->want) == next) {
+ add_stats(RELEASED_SLOW_KICKED, 1);
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+ break;
}
}
}
-static void xen_spin_unlock(struct arch_spinlock *lock)
-{
- struct xen_spinlock *xl = (struct xen_spinlock *)lock;
-
- ADD_STATS(released, 1);
-
- smp_wmb(); /* make sure no writes get moved after unlock */
- xl->lock = 0; /* release lock */
-
- /*
- * Make sure unlock happens before checking for waiting
- * spinners. We need a strong barrier to enforce the
- * write-read ordering to different memory locations, as the
- * CPU makes no implied guarantees about their ordering.
- */
- mb();
-
- if (unlikely(xl->spinners))
- xen_spin_unlock_slow(xl);
-}
-
static irqreturn_t dummy_handler(int irq, void *dev_id)
{
BUG();
@@ -408,6 +283,8 @@ void xen_uninit_lock_cpu(int cpu)
per_cpu(irq_name, cpu) = NULL;
}
+static bool xen_pvspin __initdata = true;
+
void __init xen_init_spinlocks(void)
{
/*
@@ -417,15 +294,23 @@ void __init xen_init_spinlocks(void)
if (xen_hvm_domain())
return;
- BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
+ if (!xen_pvspin) {
+ printk(KERN_DEBUG "xen: PV spinlocks disabled\n");
+ return;
+ }
- pv_lock_ops.spin_is_locked = xen_spin_is_locked;
- pv_lock_ops.spin_is_contended = xen_spin_is_contended;
- pv_lock_ops.spin_lock = xen_spin_lock;
- pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
- pv_lock_ops.spin_trylock = xen_spin_trylock;
- pv_lock_ops.spin_unlock = xen_spin_unlock;
+ static_key_slow_inc(&paravirt_ticketlocks_enabled);
+
+ pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
+ pv_lock_ops.unlock_kick = xen_unlock_kick;
+}
+
+static __init int xen_parse_nopvspin(char *arg)
+{
+ xen_pvspin = false;
+ return 0;
}
+early_param("xen_nopvspin", xen_parse_nopvspin);
#ifdef CONFIG_XEN_DEBUG_FS
@@ -442,37 +327,21 @@ static int __init xen_spinlock_debugfs(void)
debugfs_create_u8("zero_stats", 0644, d_spin_debug, &zero_stats);
- debugfs_create_u32("timeout", 0644, d_spin_debug, &lock_timeout);
-
- debugfs_create_u64("taken", 0444, d_spin_debug, &spinlock_stats.taken);
debugfs_create_u32("taken_slow", 0444, d_spin_debug,
- &spinlock_stats.taken_slow);
- debugfs_create_u32("taken_slow_nested", 0444, d_spin_debug,
- &spinlock_stats.taken_slow_nested);
+ &spinlock_stats.contention_stats[TAKEN_SLOW]);
debugfs_create_u32("taken_slow_pickup", 0444, d_spin_debug,
- &spinlock_stats.taken_slow_pickup);
+ &spinlock_stats.contention_stats[TAKEN_SLOW_PICKUP]);
debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
- &spinlock_stats.taken_slow_spurious);
- debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
- &spinlock_stats.taken_slow_irqenable);
+ &spinlock_stats.contention_stats[TAKEN_SLOW_SPURIOUS]);
- debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
debugfs_create_u32("released_slow", 0444, d_spin_debug,
- &spinlock_stats.released_slow);
+ &spinlock_stats.contention_stats[RELEASED_SLOW]);
debugfs_create_u32("released_slow_kicked", 0444, d_spin_debug,
- &spinlock_stats.released_slow_kicked);
+ &spinlock_stats.contention_stats[RELEASED_SLOW_KICKED]);
- debugfs_create_u64("time_spinning", 0444, d_spin_debug,
- &spinlock_stats.time_spinning);
debugfs_create_u64("time_blocked", 0444, d_spin_debug,
&spinlock_stats.time_blocked);
- debugfs_create_u64("time_total", 0444, d_spin_debug,
- &spinlock_stats.time_total);
- debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
- spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
- debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
- spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 86782c5d7e2..95f8c614232 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -105,9 +105,9 @@ static inline void __init xen_init_apic(void)
/* Declare an asm function, along with symbols needed to make it
inlineable */
#define DECL_ASM(ret, name, ...) \
- ret name(__VA_ARGS__); \
- extern char name##_end[]; \
- extern char name##_reloc[] \
+ __visible ret name(__VA_ARGS__); \
+ extern char name##_end[] __visible; \
+ extern char name##_reloc[] __visible
DECL_ASM(void, xen_irq_enable_direct, void);
DECL_ASM(void, xen_irq_disable_direct, void);
@@ -115,11 +115,11 @@ DECL_ASM(unsigned long, xen_save_fl_direct, void);
DECL_ASM(void, xen_restore_fl_direct, unsigned long);
/* These are not functions, and cannot be called normally */
-void xen_iret(void);
-void xen_sysexit(void);
-void xen_sysret32(void);
-void xen_sysret64(void);
-void xen_adjust_exception_frame(void);
+__visible void xen_iret(void);
+__visible void xen_sysexit(void);
+__visible void xen_sysret32(void);
+__visible void xen_sysret64(void);
+__visible void xen_adjust_exception_frame(void);
extern int xen_panic_handler_init(void);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 290792a13e3..e90c7c164c8 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -437,10 +437,10 @@ struct request_list *__blk_queue_next_rl(struct request_list *rl,
return &blkg->rl;
}
-static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype,
- u64 val)
+static int blkcg_reset_stats(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 val)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
int i;
@@ -614,15 +614,13 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
{
struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
- struct cgroup *pos_cgrp;
- u64 sum;
+ struct cgroup_subsys_state *pos_css;
+ u64 sum = 0;
lockdep_assert_held(pd->blkg->q->queue_lock);
- sum = blkg_stat_read((void *)pd + off);
-
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_stat *stat = (void *)pos_pd + off;
@@ -649,16 +647,14 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
{
struct blkcg_policy *pol = blkcg_policy[pd->plid];
struct blkcg_gq *pos_blkg;
- struct cgroup *pos_cgrp;
- struct blkg_rwstat sum;
+ struct cgroup_subsys_state *pos_css;
+ struct blkg_rwstat sum = { };
int i;
lockdep_assert_held(pd->blkg->q->queue_lock);
- sum = blkg_rwstat_read((void *)pd + off);
-
rcu_read_lock();
- blkg_for_each_descendant_pre(pos_blkg, pos_cgrp, pd_to_blkg(pd)) {
+ blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
struct blkg_rwstat *rwstat = (void *)pos_pd + off;
struct blkg_rwstat tmp;
@@ -765,18 +761,18 @@ struct cftype blkcg_files[] = {
/**
* blkcg_css_offline - cgroup css_offline callback
- * @cgroup: cgroup of interest
+ * @css: css of interest
*
- * This function is called when @cgroup is about to go away and responsible
- * for shooting down all blkgs associated with @cgroup. blkgs should be
+ * This function is called when @css is about to go away and responsible
+ * for shooting down all blkgs associated with @css. blkgs should be
* removed while holding both q and blkcg locks. As blkcg lock is nested
* inside q lock, this function performs reverse double lock dancing.
*
* This is the blkcg counterpart of ioc_release_fn().
*/
-static void blkcg_css_offline(struct cgroup *cgroup)
+static void blkcg_css_offline(struct cgroup_subsys_state *css)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
spin_lock_irq(&blkcg->lock);
@@ -798,21 +794,21 @@ static void blkcg_css_offline(struct cgroup *cgroup)
spin_unlock_irq(&blkcg->lock);
}
-static void blkcg_css_free(struct cgroup *cgroup)
+static void blkcg_css_free(struct cgroup_subsys_state *css)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgroup);
+ struct blkcg *blkcg = css_to_blkcg(css);
if (blkcg != &blkcg_root)
kfree(blkcg);
}
-static struct cgroup_subsys_state *blkcg_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
{
static atomic64_t id_seq = ATOMIC64_INIT(0);
struct blkcg *blkcg;
- struct cgroup *parent = cgroup->parent;
- if (!parent) {
+ if (!parent_css) {
blkcg = &blkcg_root;
goto done;
}
@@ -883,14 +879,15 @@ void blkcg_exit_queue(struct request_queue *q)
* of the main cic data structures. For now we allow a task to change
* its cgroup only if it's the only owner of its ioc.
*/
-static int blkcg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int blkcg_can_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *task;
struct io_context *ioc;
int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
task_lock(task);
ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1)
@@ -1127,7 +1124,7 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
/* kill the intf files first */
if (pol->cftypes)
- cgroup_rm_cftypes(&blkio_subsys, pol->cftypes);
+ cgroup_rm_cftypes(pol->cftypes);
/* unregister and update blkgs */
blkcg_policy[pol->plid] = NULL;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 8056c03a338..ae6969a7ffd 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -179,22 +179,20 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup)
+static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
- struct blkcg, css);
+ return css ? container_of(css, struct blkcg, css) : NULL;
}
static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{
- return container_of(task_subsys_state(tsk, blkio_subsys_id),
- struct blkcg, css);
+ return css_to_blkcg(task_css(tsk, blkio_subsys_id));
}
static inline struct blkcg *bio_blkcg(struct bio *bio)
{
if (bio && bio->bi_css)
- return container_of(bio->bi_css, struct blkcg, css);
+ return css_to_blkcg(bio->bi_css);
return task_blkcg(current);
}
@@ -206,9 +204,7 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
*/
static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
{
- struct cgroup *pcg = blkcg->css.cgroup->parent;
-
- return pcg ? cgroup_to_blkcg(pcg) : NULL;
+ return css_to_blkcg(css_parent(&blkcg->css));
}
/**
@@ -288,32 +284,33 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
/**
* blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of
*
* Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
* read locked. If called under either blkcg or queue lock, the iteration
* is guaranteed to include all and only online blkgs. The caller may
- * update @pos_cgrp by calling cgroup_rightmost_descendant() to skip
- * subtree.
+ * update @pos_css by calling css_rightmost_descendant() to skip subtree.
+ * @p_blkg is included in the iteration and the first node to be visited.
*/
-#define blkg_for_each_descendant_pre(d_blkg, pos_cgrp, p_blkg) \
- cgroup_for_each_descendant_pre((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
- if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
/**
* blkg_for_each_descendant_post - post-order walk of a blkg's descendants
* @d_blkg: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @p_blkg: target blkg to walk descendants of
*
* Similar to blkg_for_each_descendant_pre() but performs post-order
- * traversal instead. Synchronization rules are the same.
+ * traversal instead. Synchronization rules are the same. @p_blkg is
+ * included in the iteration and the last node to be visited.
*/
-#define blkg_for_each_descendant_post(d_blkg, pos_cgrp, p_blkg) \
- cgroup_for_each_descendant_post((pos_cgrp), (p_blkg)->blkcg->css.cgroup) \
- if (((d_blkg) = __blkg_lookup(cgroup_to_blkcg(pos_cgrp), \
+#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
+ css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
+ if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
/**
@@ -576,7 +573,6 @@ static inline int blkcg_activate_policy(struct request_queue *q,
static inline void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol) { }
-static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; }
static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
diff --git a/block/blk-core.c b/block/blk-core.c
index 93a18d1d3da..c0450535834 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2318,6 +2318,12 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
case -ETIMEDOUT:
error_type = "timeout";
break;
+ case -ENOSPC:
+ error_type = "critical space allocation";
+ break;
+ case -ENODATA:
+ error_type = "critical medium";
+ break;
case -EIO:
default:
error_type = "I/O";
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 08a32dfd384..8331aba9426 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1293,10 +1293,10 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &rwstat);
}
-static int tg_print_cpu_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_cpu_rwstat(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, tg_prfill_cpu_rwstat, &blkcg_policy_throtl,
cft->private, true);
@@ -1325,31 +1325,31 @@ static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
return __blkg_prfill_u64(sf, pd, v);
}
-static int tg_print_conf_u64(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_conf_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_u64,
+ blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_u64,
&blkcg_policy_throtl, cft->private, false);
return 0;
}
-static int tg_print_conf_uint(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int tg_print_conf_uint(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp), tg_prfill_conf_uint,
+ blkcg_print_blkgs(sf, css_to_blkcg(css), tg_prfill_conf_uint,
&blkcg_policy_throtl, cft->private, false);
return 0;
}
-static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
- bool is_u64)
+static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
+ const char *buf, bool is_u64)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx;
struct throtl_grp *tg;
struct throtl_service_queue *sq;
struct blkcg_gq *blkg;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
int ret;
ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
@@ -1379,8 +1379,7 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
* restrictions in the whole hierarchy and allows them to bypass
* blk-throttle.
*/
- tg_update_has_rules(tg);
- blkg_for_each_descendant_pre(blkg, pos_cgrp, ctx.blkg)
+ blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
tg_update_has_rules(blkg_to_tg(blkg));
/*
@@ -1403,16 +1402,16 @@ static int tg_set_conf(struct cgroup *cgrp, struct cftype *cft, const char *buf,
return 0;
}
-static int tg_set_conf_u64(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf)
{
- return tg_set_conf(cgrp, cft, buf, true);
+ return tg_set_conf(css, cft, buf, true);
}
-static int tg_set_conf_uint(struct cgroup *cgrp, struct cftype *cft,
+static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf)
{
- return tg_set_conf(cgrp, cft, buf, false);
+ return tg_set_conf(css, cft, buf, false);
}
static struct cftype throtl_files[] = {
@@ -1623,7 +1622,7 @@ void blk_throtl_drain(struct request_queue *q)
{
struct throtl_data *td = q->td;
struct blkcg_gq *blkg;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
struct bio *bio;
int rw;
@@ -1636,11 +1635,9 @@ void blk_throtl_drain(struct request_queue *q)
* better to walk service_queue tree directly but blkg walk is
* easier.
*/
- blkg_for_each_descendant_post(blkg, pos_cgrp, td->queue->root_blkg)
+ blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
- tg_drain_bios(&td_root_tg(td)->service_queue);
-
/* finally, transfer bios from top-level tg's into the td */
tg_drain_bios(&td->service_queue);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index d5bbdcfd0da..dabb9d02cf9 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1607,12 +1607,11 @@ static u64 cfqg_prfill_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
}
-static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
- cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
- false);
+ blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device,
+ &blkcg_policy_cfq, 0, false);
return 0;
}
@@ -1626,35 +1625,34 @@ static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf,
return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight);
}
-static int cfqg_print_leaf_weight_device(struct cgroup *cgrp,
+static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css,
struct cftype *cft,
struct seq_file *sf)
{
- blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
- cfqg_prfill_leaf_weight_device, &blkcg_policy_cfq, 0,
- false);
+ blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device,
+ &blkcg_policy_cfq, 0, false);
return 0;
}
-static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
+static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf)
{
- seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight);
return 0;
}
-static int cfq_print_leaf_weight(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfq_print_leaf_weight(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- seq_printf(sf, "%u\n",
- cgroup_to_blkcg(cgrp)->cfq_leaf_weight);
+ seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight);
return 0;
}
-static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf, bool is_leaf_weight)
+static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf,
+ bool is_leaf_weight)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkg_conf_ctx ctx;
struct cfq_group *cfqg;
int ret;
@@ -1680,22 +1678,22 @@ static int __cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
return ret;
}
-static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- return __cfqg_set_weight_device(cgrp, cft, buf, false);
+ return __cfqg_set_weight_device(css, cft, buf, false);
}
-static int cfqg_set_leaf_weight_device(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- return __cfqg_set_weight_device(cgrp, cft, buf, true);
+ return __cfqg_set_weight_device(css, cft, buf, true);
}
-static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
- bool is_leaf_weight)
+static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val, bool is_leaf_weight)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
@@ -1727,30 +1725,32 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val,
return 0;
}
-static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
{
- return __cfq_set_weight(cgrp, cft, val, false);
+ return __cfq_set_weight(css, cft, val, false);
}
-static int cfq_set_leaf_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cfq_set_leaf_weight(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- return __cfq_set_weight(cgrp, cft, val, true);
+ return __cfq_set_weight(css, cft, val, true);
}
-static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
+static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
cft->private, false);
return 0;
}
-static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_rwstat(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
cft->private, true);
@@ -1773,20 +1773,20 @@ static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf,
return __blkg_prfill_rwstat(sf, pd, &sum);
}
-static int cfqg_print_stat_recursive(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive,
&blkcg_policy_cfq, cft->private, false);
return 0;
}
-static int cfqg_print_rwstat_recursive(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive,
&blkcg_policy_cfq, cft->private, true);
@@ -1810,10 +1810,10 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
}
/* print avg_queue_size */
-static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *sf)
+static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *sf)
{
- struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
+ struct blkcg *blkcg = css_to_blkcg(css);
blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
&blkcg_policy_cfq, 0, false);
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index d21167bfc86..dc34a5b8bce 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -359,6 +359,9 @@ int braille_register_console(struct console *console, int index,
char *console_options, char *braille_options)
{
int ret;
+
+ if (!(console->flags & CON_BRL))
+ return 0;
if (!console_options)
/* Only support VisioBraille for now */
console_options = "57600o8";
@@ -374,15 +377,17 @@ int braille_register_console(struct console *console, int index,
braille_co = console;
register_keyboard_notifier(&keyboard_notifier_block);
register_vt_notifier(&vt_notifier_block);
- return 0;
+ return 1;
}
int braille_unregister_console(struct console *console)
{
if (braille_co != console)
return -EINVAL;
+ if (!(console->flags & CON_BRL))
+ return 0;
unregister_keyboard_notifier(&keyboard_notifier_block);
unregister_vt_notifier(&vt_notifier_block);
braille_co = NULL;
- return 0;
+ return 1;
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 100bd724f64..22327e6a723 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -91,24 +91,6 @@ config ACPI_EC_DEBUGFS
Thus this option is a debug option that helps to write ACPI drivers
and can be used to identify ACPI code or EC firmware bugs.
-config ACPI_PROC_EVENT
- bool "Deprecated /proc/acpi/event support"
- depends on PROC_FS
- default y
- help
- A user-space daemon, acpid, typically reads /proc/acpi/event
- and handles all ACPI-generated events.
-
- These events are now delivered to user-space either
- via the input layer or as netlink events.
-
- This build option enables the old code for legacy
- user-space implementation. After some time, this will
- be moved under CONFIG_ACPI_PROCFS, and then deleted.
-
- Say Y here to retain the old behaviour. Say N if your
- user-space is newer than kernel 2.6.23 (September 2007).
-
config ACPI_AC
tristate "AC Adapter"
depends on X86
@@ -180,12 +162,6 @@ config ACPI_DOCK
This driver supports ACPI-controlled docking stations and removable
drive bays such as the IBM Ultrabay and the Dell Module Bay.
-config ACPI_I2C
- def_tristate I2C
- depends on I2C
- help
- ACPI I2C enumeration support.
-
config ACPI_PROCESSOR
tristate "Processor"
select THERMAL
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 81dbeb83bb4..cdaf68b58b0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -73,7 +73,6 @@ obj-$(CONFIG_ACPI_HED) += hed.o
obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o
obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o
obj-$(CONFIG_ACPI_BGRT) += bgrt.o
-obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o
# processor has its own "processor." module_param namespace
processor-y := processor_driver.o processor_throttling.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 4f4e741d34b..f37beaa3275 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -267,7 +267,6 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
msleep(ac_sleep_before_get_state_ms);
acpi_ac_get_state(ac);
- acpi_bus_generate_proc_event(device, event, (u32) ac->state);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
(u32) ac->state);
diff --git a/drivers/acpi/acpi_i2c.c b/drivers/acpi/acpi_i2c.c
deleted file mode 100644
index a82c7626aa9..00000000000
--- a/drivers/acpi/acpi_i2c.c
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * ACPI I2C enumeration support
- *
- * Copyright (C) 2012, Intel Corporation
- * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/acpi.h>
-#include <linux/device.h>
-#include <linux/export.h>
-#include <linux/i2c.h>
-#include <linux/ioport.h>
-
-ACPI_MODULE_NAME("i2c");
-
-static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
-{
- struct i2c_board_info *info = data;
-
- if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
- struct acpi_resource_i2c_serialbus *sb;
-
- sb = &ares->data.i2c_serial_bus;
- if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
- info->addr = sb->slave_address;
- if (sb->access_mode == ACPI_I2C_10BIT_MODE)
- info->flags |= I2C_CLIENT_TEN;
- }
- } else if (info->irq < 0) {
- struct resource r;
-
- if (acpi_dev_resource_interrupt(ares, 0, &r))
- info->irq = r.start;
- }
-
- /* Tell the ACPI core to skip this resource */
- return 1;
-}
-
-static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
- void *data, void **return_value)
-{
- struct i2c_adapter *adapter = data;
- struct list_head resource_list;
- struct i2c_board_info info;
- struct acpi_device *adev;
- int ret;
-
- if (acpi_bus_get_device(handle, &adev))
- return AE_OK;
- if (acpi_bus_get_status(adev) || !adev->status.present)
- return AE_OK;
-
- memset(&info, 0, sizeof(info));
- info.acpi_node.handle = handle;
- info.irq = -1;
-
- INIT_LIST_HEAD(&resource_list);
- ret = acpi_dev_get_resources(adev, &resource_list,
- acpi_i2c_add_resource, &info);
- acpi_dev_free_resource_list(&resource_list);
-
- if (ret < 0 || !info.addr)
- return AE_OK;
-
- strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
- if (!i2c_new_device(adapter, &info)) {
- dev_err(&adapter->dev,
- "failed to add I2C device %s from ACPI\n",
- dev_name(&adev->dev));
- }
-
- return AE_OK;
-}
-
-/**
- * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
- * @adapter: pointer to adapter
- *
- * Enumerate all I2C slave devices behind this adapter by walking the ACPI
- * namespace. When a device is found it will be added to the Linux device
- * model and bound to the corresponding ACPI handle.
- */
-void acpi_i2c_register_devices(struct i2c_adapter *adapter)
-{
- acpi_handle handle;
- acpi_status status;
-
- handle = ACPI_HANDLE(adapter->dev.parent);
- if (!handle)
- return;
-
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- acpi_i2c_add_device, NULL,
- adapter, NULL);
- if (ACPI_FAILURE(status))
- dev_warn(&adapter->dev, "failed to enumerate I2C slaves\n");
-}
-EXPORT_SYMBOL_GPL(acpi_i2c_register_devices);
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 27bb6a91de5..fc6008fbce3 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -231,16 +231,19 @@ static struct task_struct *ps_tsks[NR_CPUS];
static unsigned int ps_tsk_num;
static int create_power_saving_task(void)
{
- int rc = -ENOMEM;
+ int rc;
ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
(void *)(unsigned long)ps_tsk_num,
"acpi_pad/%d", ps_tsk_num);
- rc = PTR_RET(ps_tsks[ps_tsk_num]);
- if (!rc)
- ps_tsk_num++;
- else
+
+ if (IS_ERR(ps_tsks[ps_tsk_num])) {
+ rc = PTR_ERR(ps_tsks[ps_tsk_num]);
ps_tsks[ps_tsk_num] = NULL;
+ } else {
+ rc = 0;
+ ps_tsk_num++;
+ }
return rc;
}
@@ -452,7 +455,6 @@ static void acpi_pad_notify(acpi_handle handle, u32 event,
switch (event) {
case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
acpi_pad_handle_notify(handle);
- acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index fafec5ddf17..1bde12708f9 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -52,7 +52,7 @@ int acpi_create_platform_device(struct acpi_device *adev,
struct platform_device_info pdevinfo;
struct resource_list_entry *rentry;
struct list_head resource_list;
- struct resource *resources;
+ struct resource *resources = NULL;
int count;
/* If the ACPI node already has a physical device attached, skip it. */
@@ -61,20 +61,22 @@ int acpi_create_platform_device(struct acpi_device *adev,
INIT_LIST_HEAD(&resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
- if (count <= 0)
+ if (count < 0) {
return 0;
+ } else if (count > 0) {
+ resources = kmalloc(count * sizeof(struct resource),
+ GFP_KERNEL);
+ if (!resources) {
+ dev_err(&adev->dev, "No memory for resources\n");
+ acpi_dev_free_resource_list(&resource_list);
+ return -ENOMEM;
+ }
+ count = 0;
+ list_for_each_entry(rentry, &resource_list, node)
+ resources[count++] = rentry->res;
- resources = kmalloc(count * sizeof(struct resource), GFP_KERNEL);
- if (!resources) {
- dev_err(&adev->dev, "No memory for resources\n");
acpi_dev_free_resource_list(&resource_list);
- return -ENOMEM;
}
- count = 0;
- list_for_each_entry(rentry, &resource_list, node)
- resources[count++] = rentry->res;
-
- acpi_dev_free_resource_list(&resource_list);
memset(&pdevinfo, 0, sizeof(pdevinfo));
/*
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index fd6c51cc3ac..f29e06efa47 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -178,14 +178,17 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
return -ENODEV;
+ cpu_maps_update_begin();
+ cpu_hotplug_begin();
+
ret = acpi_map_lsapic(pr->handle, &pr->id);
if (ret)
- return ret;
+ goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
acpi_unmap_lsapic(pr->id);
- return ret;
+ goto out;
}
/*
@@ -195,7 +198,11 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
*/
pr_info("CPU%d has been hot-added\n", pr->id);
pr->flags.need_hotplug_init = 1;
- return 0;
+
+out:
+ cpu_hotplug_done();
+ cpu_maps_update_done();
+ return ret;
}
#else
static inline int acpi_processor_hotadd_init(struct acpi_processor *pr)
@@ -451,13 +458,18 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Clean up. */
per_cpu(processor_device_array, pr->id) = NULL;
per_cpu(processors, pr->id) = NULL;
- try_offline_node(cpu_to_node(pr->id));
+
+ cpu_maps_update_begin();
+ cpu_hotplug_begin();
/* Remove the CPU. */
- get_online_cpus();
arch_unregister_cpu(pr->id);
acpi_unmap_lsapic(pr->id);
- put_online_cpus();
+
+ cpu_hotplug_done();
+ cpu_maps_update_done();
+
+ try_offline_node(cpu_to_node(pr->id));
out:
free_cpumask_var(pr->throttling.shared_cpu_map);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index b8d38117a20..90e846f985f 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -138,6 +138,12 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_auto_repair, FALSE);
*/
u8 ACPI_INIT_GLOBAL(acpi_gbl_disable_ssdt_table_load, FALSE);
+/*
+ * We keep track of the latest version of Windows that has been requested by
+ * the BIOS.
+ */
+u8 ACPI_INIT_GLOBAL(acpi_gbl_osi_data, 0);
+
/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
struct acpi_table_fadt acpi_gbl_FADT;
@@ -285,7 +291,6 @@ ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
-ACPI_EXTERN u8 acpi_gbl_osi_data;
ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
ACPI_EXTERN struct acpi_address_range
*acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index d4a4901637c..0ed00669cd2 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -942,6 +942,9 @@ struct acpi_interface_info {
#define ACPI_OSI_INVALID 0x01
#define ACPI_OSI_DYNAMIC 0x02
+#define ACPI_OSI_FEATURE 0x04
+#define ACPI_OSI_DEFAULT_INVALID 0x08
+#define ACPI_OSI_OPTIONAL_FEATURE (ACPI_OSI_FEATURE | ACPI_OSI_DEFAULT_INVALID | ACPI_OSI_INVALID)
struct acpi_port_info {
char *name;
@@ -1030,6 +1033,7 @@ struct acpi_external_list {
u8 type;
u8 flags;
u8 resolved;
+ u8 emitted;
};
/* Values for Flags field above */
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index b83dc32a5ae..40b04bd5579 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -104,8 +104,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_handle start_object,
u32 max_depth,
u32 flags,
- acpi_walk_callback pre_order_visit,
- acpi_walk_callback post_order_visit,
+ acpi_walk_callback descending_callback,
+ acpi_walk_callback ascending_callback,
void *context, void **return_value);
struct acpi_namespace_node *acpi_ns_get_next_node(struct acpi_namespace_node
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 7755e915a00..c54f42c64fe 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -47,6 +47,13 @@
acpi_status acpi_allocate_root_table(u32 initial_table_count);
/*
+ * tbxfroot - Root pointer utilities
+ */
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
+
+u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
+
+/*
* tbfadt - FADT parse/convert/validate
*/
void acpi_tb_parse_fadt(u32 table_index);
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 3c76edea680..d5a62a6182b 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -470,6 +470,8 @@ acpi_status acpi_ut_install_interface(acpi_string interface_name);
acpi_status acpi_ut_remove_interface(acpi_string interface_name);
+acpi_status acpi_ut_update_interfaces(u8 action);
+
struct acpi_interface_info *acpi_ut_get_interface(acpi_string interface_name);
acpi_status acpi_ut_osi_implementation(struct acpi_walk_state *walk_state);
@@ -616,7 +618,7 @@ int acpi_ut_stricmp(char *string1, char *string2);
acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer);
-void acpi_ut_print_string(char *string, u8 max_length);
+void acpi_ut_print_string(char *string, u16 max_length);
void ut_convert_backslashes(char *pathname);
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 9037f17c960..7842700346a 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -125,7 +125,6 @@ acpi_status acpi_ev_gpe_initialize(void)
/* GPE block 0 exists (has both length and address > 0) */
register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
-
gpe_number_max =
(register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
@@ -204,16 +203,6 @@ acpi_status acpi_ev_gpe_initialize(void)
goto cleanup;
}
- /* Check for Max GPE number out-of-range */
-
- if (gpe_number_max > ACPI_GPE_MAX) {
- ACPI_ERROR((AE_INFO,
- "Maximum GPE number from FADT is too large: 0x%X",
- gpe_number_max));
- status = AE_BAD_VALUE;
- goto cleanup;
- }
-
cleanup:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index c740f24e310..4d046faac48 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -338,6 +338,7 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
{
u8 *target;
char *name;
+ const char *reference_name;
u8 count;
if (!info) {
@@ -426,10 +427,9 @@ acpi_ex_dump_object(union acpi_operand_object *obj_desc,
case ACPI_EXD_REFERENCE:
+ reference_name = acpi_ut_get_reference_name(obj_desc);
acpi_ex_out_string("Class Name",
- ACPI_CAST_PTR(char,
- acpi_ut_get_reference_name
- (obj_desc)));
+ ACPI_CAST_PTR(char, reference_name));
acpi_ex_dump_reference_obj(obj_desc);
break;
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 814b4a3d656..2cdd41d8ade 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -962,10 +962,17 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
*/
return_desc =
*(operand[0]->reference.where);
- if (return_desc) {
- acpi_ut_add_reference
- (return_desc);
+ if (!return_desc) {
+ /*
+ * Element is NULL, do not allow the dereference.
+ * This provides compatibility with other ACPI
+ * implementations.
+ */
+ return_ACPI_STATUS
+ (AE_AML_UNINITIALIZED_ELEMENT);
}
+
+ acpi_ut_add_reference(return_desc);
break;
default:
@@ -990,11 +997,40 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
acpi_namespace_node
*)
return_desc);
- }
+ if (!return_desc) {
+ break;
+ }
- /* Add another reference to the object! */
+ /*
+ * June 2013:
+ * buffer_fields/field_units require additional resolution
+ */
+ switch (return_desc->common.type) {
+ case ACPI_TYPE_BUFFER_FIELD:
+ case ACPI_TYPE_LOCAL_REGION_FIELD:
+ case ACPI_TYPE_LOCAL_BANK_FIELD:
+ case ACPI_TYPE_LOCAL_INDEX_FIELD:
- acpi_ut_add_reference(return_desc);
+ status =
+ acpi_ex_read_data_from_field
+ (walk_state, return_desc,
+ &temp_desc);
+ if (ACPI_FAILURE(status)) {
+ goto cleanup;
+ }
+
+ return_desc = temp_desc;
+ break;
+
+ default:
+
+ /* Add another reference to the object */
+
+ acpi_ut_add_reference
+ (return_desc);
+ break;
+ }
+ }
break;
default:
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 5e5f76230f5..414076818d4 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -43,6 +43,7 @@
*/
#include <acpi/acpi.h>
+#include <linux/acpi.h>
#include "accommon.h"
#define _COMPONENT ACPI_HARDWARE
@@ -128,6 +129,14 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
ACPI_FLUSH_CPU_CACHE();
+ status = acpi_os_prepare_extended_sleep(sleep_state,
+ acpi_gbl_sleep_type_a,
+ acpi_gbl_sleep_type_b);
+ if (ACPI_SKIP(status))
+ return_ACPI_STATUS(AE_OK);
+ if (ACPI_FAILURE(status))
+ return_ACPI_STATUS(status);
+
/*
* Set the SLP_TYP and SLP_EN bits.
*
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 0c1a8bbd05d..2d7d22ebc78 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -100,8 +100,13 @@ acpi_status acpi_get_timer(u32 * ticks)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
+ /* ACPI 5.0A: PM Timer is optional */
+
+ if (!acpi_gbl_FADT.xpm_timer_block.address) {
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+ status = acpi_hw_read(ticks, &acpi_gbl_FADT.xpm_timer_block);
return_ACPI_STATUS(status);
}
@@ -148,6 +153,12 @@ acpi_get_timer_duration(u32 start_ticks, u32 end_ticks, u32 * time_elapsed)
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
+ /* ACPI 5.0A: PM Timer is optional */
+
+ if (!acpi_gbl_FADT.xpm_timer_block.address) {
+ return_ACPI_STATUS(AE_SUPPORT);
+ }
+
/*
* Compute Tick Delta:
* Handle (max one) timer rollovers on 24-bit versus 32-bit timers.
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index 24b71a01bf9..098e7666cbc 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -151,6 +151,15 @@ acpi_ns_check_return_value(struct acpi_namespace_node *node,
}
/*
+ *
+ * 4) If there is no return value and it is optional, just return
+ * AE_OK (_WAK).
+ */
+ if (!(*return_object_ptr)) {
+ goto exit;
+ }
+
+ /*
* For returned Package objects, check the type of all sub-objects.
* Note: Package may have been newly created by call above.
*/
@@ -268,7 +277,12 @@ acpi_ns_check_object_type(struct acpi_evaluate_info *info,
acpi_ut_get_expected_return_types(type_buffer, expected_btypes);
- if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
+ if (!return_object) {
+ ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
+ info->node_flags,
+ "Expected return object of type %s",
+ type_buffer));
+ } else if (package_index == ACPI_NOT_PACKAGE_ELEMENT) {
ACPI_WARN_PREDEFINED((AE_INFO, info->full_pathname,
info->node_flags,
"Return type mismatch - found %s, expected %s",
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index e70911a9e44..e81f15ef659 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -156,9 +156,9 @@ struct acpi_namespace_node *acpi_ns_get_next_node_typed(acpi_object_type type,
* max_depth - Depth to which search is to reach
* flags - Whether to unlock the NS before invoking
* the callback routine
- * pre_order_visit - Called during tree pre-order visit
+ * descending_callback - Called during tree descent
* when an object of "Type" is found
- * post_order_visit - Called during tree post-order visit
+ * ascending_callback - Called during tree ascent
* when an object of "Type" is found
* context - Passed to user function(s) above
* return_value - from the user_function if terminated
@@ -185,8 +185,8 @@ acpi_ns_walk_namespace(acpi_object_type type,
acpi_handle start_node,
u32 max_depth,
u32 flags,
- acpi_walk_callback pre_order_visit,
- acpi_walk_callback post_order_visit,
+ acpi_walk_callback descending_callback,
+ acpi_walk_callback ascending_callback,
void *context, void **return_value)
{
acpi_status status;
@@ -255,22 +255,22 @@ acpi_ns_walk_namespace(acpi_object_type type,
}
/*
- * Invoke the user function, either pre-order or post-order
+ * Invoke the user function, either descending, ascending,
* or both.
*/
if (!node_previously_visited) {
- if (pre_order_visit) {
+ if (descending_callback) {
status =
- pre_order_visit(child_node, level,
- context,
- return_value);
+ descending_callback(child_node,
+ level, context,
+ return_value);
}
} else {
- if (post_order_visit) {
+ if (ascending_callback) {
status =
- post_order_visit(child_node, level,
- context,
- return_value);
+ ascending_callback(child_node,
+ level, context,
+ return_value);
}
}
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index f553cfdb71d..b38b4b07f86 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -533,9 +533,9 @@ static void acpi_ns_resolve_references(struct acpi_evaluate_info *info)
* PARAMETERS: type - acpi_object_type to search for
* start_object - Handle in namespace where search begins
* max_depth - Depth to which search is to reach
- * pre_order_visit - Called during tree pre-order visit
+ * descending_callback - Called during tree descent
* when an object of "Type" is found
- * post_order_visit - Called during tree post-order visit
+ * ascending_callback - Called during tree ascent
* when an object of "Type" is found
* context - Passed to user function(s) above
* return_value - Location where return value of
@@ -563,8 +563,8 @@ acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
u32 max_depth,
- acpi_walk_callback pre_order_visit,
- acpi_walk_callback post_order_visit,
+ acpi_walk_callback descending_callback,
+ acpi_walk_callback ascending_callback,
void *context, void **return_value)
{
acpi_status status;
@@ -574,7 +574,7 @@ acpi_walk_namespace(acpi_object_type type,
/* Parameter validation */
if ((type > ACPI_TYPE_LOCAL_MAX) ||
- (!max_depth) || (!pre_order_visit && !post_order_visit)) {
+ (!max_depth) || (!descending_callback && !ascending_callback)) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -606,9 +606,9 @@ acpi_walk_namespace(acpi_object_type type,
}
status = acpi_ns_walk_namespace(type, start_object, max_depth,
- ACPI_NS_WALK_UNLOCK, pre_order_visit,
- post_order_visit, context,
- return_value);
+ ACPI_NS_WALK_UNLOCK,
+ descending_callback, ascending_callback,
+ context, return_value);
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index f3a4d95899f..83c16443458 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -158,6 +158,7 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
{
acpi_status status;
struct acpi_namespace_node *node;
+ char *node_name;
/* Parameter validation */
@@ -202,7 +203,8 @@ acpi_get_name(acpi_handle handle, u32 name_type, struct acpi_buffer * buffer)
/* Just copy the ACPI name from the Node and zero terminate it */
- ACPI_MOVE_NAME(buffer->pointer, acpi_ut_get_node_name(node));
+ node_name = acpi_ut_get_node_name(node);
+ ACPI_MOVE_NAME(buffer->pointer, node_name);
((char *)buffer->pointer)[ACPI_NAME_SIZE] = 0;
status = AE_OK;
@@ -379,9 +381,14 @@ acpi_get_object_info(acpi_handle handle,
* Get extra info for ACPI Device/Processor objects only:
* Run the _STA, _ADR and, sx_w, and _sx_d methods.
*
- * Note: none of these methods are required, so they may or may
+ * Notes: none of these methods are required, so they may or may
* not be present for this device. The Info->Valid bitfield is used
* to indicate which methods were found and run successfully.
+ *
+ * For _STA, if the method does not exist, then (as per the ACPI
+ * specification), the returned current_status flags will indicate
+ * that the device is present/functional/enabled. Otherwise, the
+ * current_status flags reflect the value returned from _STA.
*/
/* Execute the Device._STA method */
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 33b00d22300..9d99f218969 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -117,7 +117,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm_timer_block),
ACPI_FADT_OFFSET(pm_timer_length),
ACPI_PM_TIMER_WIDTH,
- ACPI_FADT_REQUIRED},
+ ACPI_FADT_SEPARATE_LENGTH}, /* ACPI 5.0A: Timer is optional */
{"Gpe0Block",
ACPI_FADT_OFFSET(xgpe0_block),
@@ -574,7 +574,7 @@ static void acpi_tb_validate_fadt(void)
if (fadt_info_table[i].type & ACPI_FADT_REQUIRED) {
/*
- * Field is required (Pm1a_event, Pm1a_control, pm_timer).
+ * Field is required (Pm1a_event, Pm1a_control).
* Both the address and length must be non-zero.
*/
if (!address64->address || !length) {
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7c2ecfb7c2c..948c95e80d4 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -48,11 +48,6 @@
#define _COMPONENT ACPI_TABLES
ACPI_MODULE_NAME("tbxfroot")
-/* Local prototypes */
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length);
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
-
/*******************************************************************************
*
* FUNCTION: acpi_tb_validate_rsdp
@@ -64,8 +59,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
* DESCRIPTION: Validate the RSDP (ptr)
*
******************************************************************************/
-
-static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
+acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
{
/*
@@ -74,7 +68,7 @@ static acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp)
* Note: Sometimes there exists more than one RSDP in memory; the valid
* RSDP has a valid checksum, all others have an invalid checksum.
*/
- if (ACPI_STRNCMP((char *)rsdp, ACPI_SIG_RSDP,
+ if (ACPI_STRNCMP((char *)rsdp->signature, ACPI_SIG_RSDP,
sizeof(ACPI_SIG_RSDP) - 1) != 0) {
/* Nope, BAD Signature */
@@ -231,7 +225,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
* DESCRIPTION: Search a block of memory for the RSDP signature
*
******************************************************************************/
-static u8 *acpi_tb_scan_memory_for_rsdp(u8 * start_address, u32 length)
+u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length)
{
acpi_status status;
u8 *mem_rover;
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index ee83adb97b1..4fd68971019 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -239,7 +239,8 @@ acpi_ut_evaluate_numeric_object(char *object_name,
* RETURN: Status
*
* DESCRIPTION: Executes _STA for selected device and stores results in
- * *Flags.
+ * *Flags. If _STA does not exist, then the device is assumed
+ * to be present/functional/enabled (as per the ACPI spec).
*
* NOTE: Internal function, no parameter validation
*
@@ -257,6 +258,11 @@ acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
ACPI_BTYPE_INTEGER, &obj_desc);
if (ACPI_FAILURE(status)) {
if (AE_NOT_FOUND == status) {
+ /*
+ * if _STA does not exist, then (as per the ACPI specification),
+ * the returned flags will indicate that the device is present,
+ * functional, and enabled.
+ */
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"_STA on %4.4s was not found, assuming device is present\n",
acpi_ut_get_node_name(device_node)));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index f736448a860..d6f26bf8a06 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -336,7 +336,6 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_trace_dbg_layer = 0;
acpi_gbl_debugger_configuration = DEBUGGER_THREADING;
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
- acpi_gbl_osi_data = 0;
acpi_gbl_osi_mutex = NULL;
acpi_gbl_reg_methods_executed = FALSE;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 7e807725c63..8856bd37bc7 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -77,21 +77,20 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
/* Feature Group Strings */
- {"Extended Address Space Descriptor", NULL, 0, 0}
+ {"Extended Address Space Descriptor", NULL, ACPI_OSI_FEATURE, 0},
/*
* All "optional" feature group strings (features that are implemented
- * by the host) should be dynamically added by the host via
- * acpi_install_interface and should not be manually added here.
- *
- * Examples of optional feature group strings:
- *
- * "Module Device"
- * "Processor Device"
- * "3.0 Thermal Model"
- * "3.0 _SCP Extensions"
- * "Processor Aggregator Device"
+ * by the host) should be dynamically modified to VALID by the host via
+ * acpi_install_interface or acpi_update_interfaces. Such optional feature
+ * group strings are set as INVALID by default here.
*/
+
+ {"Module Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+ {"Processor Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+ {"3.0 Thermal Model", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+ {"3.0 _SCP Extensions", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0},
+ {"Processor Aggregator Device", NULL, ACPI_OSI_OPTIONAL_FEATURE, 0}
};
/*******************************************************************************
@@ -158,11 +157,20 @@ acpi_status acpi_ut_interface_terminate(void)
while (next_interface) {
acpi_gbl_supported_interfaces = next_interface->next;
- /* Only interfaces added at runtime can be freed */
-
if (next_interface->flags & ACPI_OSI_DYNAMIC) {
+
+ /* Only interfaces added at runtime can be freed */
+
ACPI_FREE(next_interface->name);
ACPI_FREE(next_interface);
+ } else {
+ /* Interface is in static list. Reset it to invalid or valid. */
+
+ if (next_interface->flags & ACPI_OSI_DEFAULT_INVALID) {
+ next_interface->flags |= ACPI_OSI_INVALID;
+ } else {
+ next_interface->flags &= ~ACPI_OSI_INVALID;
+ }
}
next_interface = acpi_gbl_supported_interfaces;
@@ -278,6 +286,49 @@ acpi_status acpi_ut_remove_interface(acpi_string interface_name)
/*******************************************************************************
*
+ * FUNCTION: acpi_ut_update_interfaces
+ *
+ * PARAMETERS: action - Actions to be performed during the
+ * update
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
+ * strings or/and feature group strings.
+ * Caller MUST hold acpi_gbl_osi_mutex
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ut_update_interfaces(u8 action)
+{
+ struct acpi_interface_info *next_interface;
+
+ next_interface = acpi_gbl_supported_interfaces;
+ while (next_interface) {
+ if (((next_interface->flags & ACPI_OSI_FEATURE) &&
+ (action & ACPI_FEATURE_STRINGS)) ||
+ (!(next_interface->flags & ACPI_OSI_FEATURE) &&
+ (action & ACPI_VENDOR_STRINGS))) {
+ if (action & ACPI_DISABLE_INTERFACES) {
+
+ /* Mark the interfaces as invalid */
+
+ next_interface->flags |= ACPI_OSI_INVALID;
+ } else {
+ /* Mark the interfaces as valid */
+
+ next_interface->flags &= ~ACPI_OSI_INVALID;
+ }
+ }
+
+ next_interface = next_interface->next;
+ }
+
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_ut_get_interface
*
* PARAMETERS: interface_name - The interface to find
diff --git a/drivers/acpi/acpica/utstring.c b/drivers/acpi/acpica/utstring.c
index c53759b76a3..cb1e9cc32d5 100644
--- a/drivers/acpi/acpica/utstring.c
+++ b/drivers/acpi/acpica/utstring.c
@@ -333,7 +333,8 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
* FUNCTION: acpi_ut_print_string
*
* PARAMETERS: string - Null terminated ASCII string
- * max_length - Maximum output length
+ * max_length - Maximum output length. Used to constrain the
+ * length of strings during debug output only.
*
* RETURN: None
*
@@ -342,7 +343,7 @@ acpi_status acpi_ut_strtoul64(char *string, u32 base, u64 *ret_integer)
*
******************************************************************************/
-void acpi_ut_print_string(char *string, u8 max_length)
+void acpi_ut_print_string(char *string, u16 max_length)
{
u32 i;
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 6505774f223..03a211e6e26 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -389,6 +389,34 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
/*****************************************************************************
*
+ * FUNCTION: acpi_update_interfaces
+ *
+ * PARAMETERS: action - Actions to be performed during the
+ * update
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Update _OSI interface strings, disabling or enabling OS vendor
+ * string or/and feature group strings.
+ *
+ ****************************************************************************/
+acpi_status acpi_update_interfaces(u8 action)
+{
+ acpi_status status;
+
+ status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ status = acpi_ut_update_interfaces(action);
+
+ acpi_os_release_mutex(acpi_gbl_osi_mutex);
+ return (status);
+}
+
+/*****************************************************************************
+ *
* FUNCTION: acpi_check_address_range
*
* PARAMETERS: space_id - Address space ID
@@ -402,6 +430,7 @@ ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
* ASL operation region address ranges.
*
****************************************************************************/
+
u32
acpi_check_address_range(acpi_adr_space_type space_id,
acpi_physical_address address,
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 88d0b0f9f92..26311f23c82 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -39,7 +39,8 @@
#include "apei-internal.h"
-#define ERST_PFX "ERST: "
+#undef pr_fmt
+#define pr_fmt(fmt) "ERST: " fmt
/* ERST command status */
#define ERST_STATUS_SUCCESS 0x0
@@ -109,8 +110,7 @@ static inline int erst_errno(int command_status)
static int erst_timedout(u64 *t, u64 spin_unit)
{
if ((s64)*t < spin_unit) {
- pr_warning(FW_WARN ERST_PFX
- "Firmware does not respond in time\n");
+ pr_warn(FW_WARN "Firmware does not respond in time.\n");
return 1;
}
*t -= spin_unit;
@@ -186,8 +186,8 @@ static int erst_exec_stall(struct apei_exec_context *ctx,
if (ctx->value > FIRMWARE_MAX_STALL) {
if (!in_nmi())
- pr_warning(FW_WARN ERST_PFX
- "Too long stall time for stall instruction: %llx.\n",
+ pr_warn(FW_WARN
+ "Too long stall time for stall instruction: 0x%llx.\n",
ctx->value);
stall_time = FIRMWARE_MAX_STALL;
} else
@@ -206,8 +206,8 @@ static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
if (ctx->var1 > FIRMWARE_MAX_STALL) {
if (!in_nmi())
- pr_warning(FW_WARN ERST_PFX
- "Too long stall time for stall while true instruction: %llx.\n",
+ pr_warn(FW_WARN
+ "Too long stall time for stall while true instruction: 0x%llx.\n",
ctx->var1);
stall_time = FIRMWARE_MAX_STALL;
} else
@@ -271,8 +271,7 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
/* ioremap does not work in interrupt context */
if (in_interrupt()) {
- pr_warning(ERST_PFX
- "MOVE_DATA can not be used in interrupt context");
+ pr_warn("MOVE_DATA can not be used in interrupt context.\n");
return -EBUSY;
}
@@ -284,8 +283,10 @@ static int erst_exec_move_data(struct apei_exec_context *ctx,
if (!src)
return -ENOMEM;
dst = ioremap(ctx->dst_base + offset, ctx->var2);
- if (!dst)
+ if (!dst) {
+ iounmap(src);
return -ENOMEM;
+ }
memmove(dst, src, ctx->var2);
@@ -522,8 +523,7 @@ retry:
ERST_RECORD_ID_CACHE_SIZE_MAX);
if (new_size <= erst_record_id_cache.size) {
if (printk_ratelimit())
- pr_warning(FW_WARN ERST_PFX
- "too many record ID!\n");
+ pr_warn(FW_WARN "too many record IDs!\n");
return 0;
}
alloc_size = new_size * sizeof(entries[0]);
@@ -759,8 +759,7 @@ static int __erst_clear_from_storage(u64 record_id)
static void pr_unimpl_nvram(void)
{
if (printk_ratelimit())
- pr_warning(ERST_PFX
- "NVRAM ERST Log Address Range is not implemented yet\n");
+ pr_warn("NVRAM ERST Log Address Range not implemented yet.\n");
}
static int __erst_write_to_nvram(const struct cper_record_header *record)
@@ -933,9 +932,9 @@ static int erst_open_pstore(struct pstore_info *psi);
static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- struct pstore_info *psi);
+ bool *compressed, struct pstore_info *psi);
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
- u64 *id, unsigned int part, int count, size_t hsize,
+ u64 *id, unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id, int count,
struct timespec time, struct pstore_info *psi);
@@ -956,6 +955,9 @@ static struct pstore_info erst_info = {
#define CPER_SECTION_TYPE_DMESG \
UUID_LE(0xc197e04e, 0xd545, 0x4a70, 0x9c, 0x17, 0xa5, 0x54, \
0x94, 0x19, 0xeb, 0x12)
+#define CPER_SECTION_TYPE_DMESG_Z \
+ UUID_LE(0x4f118707, 0x04dd, 0x4055, 0xb5, 0xdd, 0x95, 0x6d, \
+ 0x34, 0xdd, 0xfa, 0xc6)
#define CPER_SECTION_TYPE_MCE \
UUID_LE(0xfe08ffbe, 0x95e4, 0x4be7, 0xbc, 0x73, 0x40, 0x96, \
0x04, 0x4a, 0x38, 0xfc)
@@ -989,7 +991,7 @@ static int erst_close_pstore(struct pstore_info *psi)
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type, int *count,
struct timespec *time, char **buf,
- struct pstore_info *psi)
+ bool *compressed, struct pstore_info *psi)
{
int rc;
ssize_t len = 0;
@@ -1034,7 +1036,12 @@ skip:
}
memcpy(*buf, rcd->data, len - sizeof(*rcd));
*id = record_id;
+ *compressed = false;
if (uuid_le_cmp(rcd->sec_hdr.section_type,
+ CPER_SECTION_TYPE_DMESG_Z) == 0) {
+ *type = PSTORE_TYPE_DMESG;
+ *compressed = true;
+ } else if (uuid_le_cmp(rcd->sec_hdr.section_type,
CPER_SECTION_TYPE_DMESG) == 0)
*type = PSTORE_TYPE_DMESG;
else if (uuid_le_cmp(rcd->sec_hdr.section_type,
@@ -1055,7 +1062,7 @@ out:
}
static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
- u64 *id, unsigned int part, int count, size_t hsize,
+ u64 *id, unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
@@ -1085,7 +1092,10 @@ static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
rcd->sec_hdr.flags = CPER_SEC_PRIMARY;
switch (type) {
case PSTORE_TYPE_DMESG:
- rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
+ if (compressed)
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG_Z;
+ else
+ rcd->sec_hdr.section_type = CPER_SECTION_TYPE_DMESG;
break;
case PSTORE_TYPE_MCE:
rcd->sec_hdr.section_type = CPER_SECTION_TYPE_MCE;
@@ -1120,7 +1130,7 @@ static int __init erst_init(void)
goto err;
if (erst_disable) {
- pr_info(ERST_PFX
+ pr_info(
"Error Record Serialization Table (ERST) support is disabled.\n");
goto err;
}
@@ -1131,14 +1141,14 @@ static int __init erst_init(void)
goto err;
else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
- pr_err(ERST_PFX "Failed to get table, %s\n", msg);
+ pr_err("Failed to get table, %s\n", msg);
rc = -EINVAL;
goto err;
}
rc = erst_check_table(erst_tab);
if (rc) {
- pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
+ pr_err(FW_BUG "ERST table is invalid.\n");
goto err;
}
@@ -1156,21 +1166,19 @@ static int __init erst_init(void)
rc = erst_get_erange(&erst_erange);
if (rc) {
if (rc == -ENODEV)
- pr_info(ERST_PFX
+ pr_info(
"The corresponding hardware device or firmware implementation "
"is not available.\n");
else
- pr_err(ERST_PFX
- "Failed to get Error Log Address Range.\n");
+ pr_err("Failed to get Error Log Address Range.\n");
goto err_unmap_reg;
}
r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
if (!r) {
- pr_err(ERST_PFX
- "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
- (unsigned long long)erst_erange.base,
- (unsigned long long)erst_erange.base + erst_erange.size);
+ pr_err("Can not request [mem %#010llx-%#010llx] for ERST.\n",
+ (unsigned long long)erst_erange.base,
+ (unsigned long long)erst_erange.base + erst_erange.size - 1);
rc = -EIO;
goto err_unmap_reg;
}
@@ -1180,7 +1188,7 @@ static int __init erst_init(void)
if (!erst_erange.vaddr)
goto err_release_erange;
- pr_info(ERST_PFX
+ pr_info(
"Error Record Serialization Table (ERST) support is initialized.\n");
buf = kmalloc(erst_erange.size, GFP_KERNEL);
@@ -1192,15 +1200,15 @@ static int __init erst_init(void)
rc = pstore_register(&erst_info);
if (rc) {
if (rc != -EPERM)
- pr_info(ERST_PFX
- "Could not register with persistent store\n");
+ pr_info(
+ "Could not register with persistent store.\n");
erst_info.buf = NULL;
erst_info.bufsize = 0;
kfree(buf);
}
} else
- pr_err(ERST_PFX
- "Failed to allocate %lld bytes for persistent store error log\n",
+ pr_err(
+ "Failed to allocate %lld bytes for persistent store error log.\n",
erst_erange.size);
return 0;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index ec9b57d428a..8ec37bbdd69 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -409,6 +409,34 @@ static void ghes_clear_estatus(struct ghes *ghes)
ghes->flags &= ~GHES_TO_CLEAR;
}
+static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
+{
+#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
+ unsigned long pfn;
+ int sec_sev = ghes_severity(gdata->error_severity);
+ struct cper_sec_mem_err *mem_err;
+ mem_err = (struct cper_sec_mem_err *)(gdata + 1);
+
+ if (sec_sev == GHES_SEV_CORRECTED &&
+ (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED) &&
+ (mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS)) {
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ if (pfn_valid(pfn))
+ memory_failure_queue(pfn, 0, MF_SOFT_OFFLINE);
+ else if (printk_ratelimit())
+ pr_warn(FW_WARN GHES_PFX
+ "Invalid address in generic error data: %#llx\n",
+ mem_err->physical_addr);
+ }
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
+ pfn = mem_err->physical_addr >> PAGE_SHIFT;
+ memory_failure_queue(pfn, 0, 0);
+ }
+#endif
+}
+
static void ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@@ -428,15 +456,7 @@ static void ghes_do_proc(struct ghes *ghes,
apei_mce_report_mem_error(sev == GHES_SEV_CORRECTED,
mem_err);
#endif
-#ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
- if (sev == GHES_SEV_RECOVERABLE &&
- sec_sev == GHES_SEV_RECOVERABLE &&
- mem_err->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) {
- unsigned long pfn;
- pfn = mem_err->physical_addr >> PAGE_SHIFT;
- memory_failure_queue(pfn, 0, 0);
- }
-#endif
+ ghes_handle_memory_failure(gdata, sev);
}
#ifdef CONFIG_ACPI_APEI_PCIEAER
else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index f5ef5d54e4a..f5e37f32c71 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <acpi/apei.h>
+#include <asm/mce.h>
#include "apei-internal.h"
@@ -121,6 +122,41 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
}
EXPORT_SYMBOL_GPL(apei_hest_parse);
+/*
+ * Check if firmware advertises firmware first mode. We need FF bit to be set
+ * along with a set of MC banks which work in FF mode.
+ */
+static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data)
+{
+#ifdef CONFIG_X86_MCE
+ int i;
+ struct acpi_hest_ia_corrected *cmc;
+ struct acpi_hest_ia_error_bank *mc_bank;
+
+ if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
+ return 0;
+
+ cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
+ if (!cmc->enabled)
+ return 0;
+
+ /*
+ * We expect HEST to provide a list of MC banks that report errors
+ * in firmware first mode. Otherwise, return non-zero value to
+ * indicate that we are done parsing HEST.
+ */
+ if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks)
+ return 1;
+
+ pr_info(HEST_PFX "Enabling Firmware First mode for corrected errors.\n");
+
+ mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1);
+ for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++)
+ mce_disable_bank(mc_bank->bank_number);
+#endif
+ return 1;
+}
+
struct ghes_arr {
struct platform_device **ghes_devs;
unsigned int count;
@@ -227,6 +263,9 @@ void __init acpi_hest_init(void)
goto err;
}
+ if (!acpi_disable_cmcff)
+ apei_hest_parse(hest_parse_cmc, NULL);
+
if (!ghes_disable) {
rc = apei_hest_parse(hest_parse_ghes_count, &ghes_count);
if (rc)
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 082b4dd252a..2c9958cd7a4 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -117,6 +117,7 @@ struct acpi_battery {
struct acpi_device *device;
struct notifier_block pm_nb;
unsigned long update_time;
+ int revision;
int rate_now;
int capacity_now;
int voltage_now;
@@ -359,6 +360,7 @@ static struct acpi_offsets info_offsets[] = {
};
static struct acpi_offsets extended_info_offsets[] = {
+ {offsetof(struct acpi_battery, revision), 0},
{offsetof(struct acpi_battery, power_unit), 0},
{offsetof(struct acpi_battery, design_capacity), 0},
{offsetof(struct acpi_battery, full_charge_capacity), 0},
@@ -525,18 +527,14 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
static int acpi_battery_set_alarm(struct acpi_battery *battery)
{
acpi_status status = 0;
- union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
if (!acpi_battery_present(battery) ||
!test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags))
return -ENODEV;
- arg0.integer.value = battery->alarm;
-
mutex_lock(&battery->lock);
- status = acpi_evaluate_object(battery->device->handle, "_BTP",
- &arg_list, NULL);
+ status = acpi_execute_simple_method(battery->device->handle, "_BTP",
+ battery->alarm);
mutex_unlock(&battery->lock);
if (ACPI_FAILURE(status))
@@ -548,12 +546,8 @@ static int acpi_battery_set_alarm(struct acpi_battery *battery)
static int acpi_battery_init_alarm(struct acpi_battery *battery)
{
- acpi_status status = AE_OK;
- acpi_handle handle = NULL;
-
/* See if alarms are supported, and if so, set default */
- status = acpi_get_handle(battery->device->handle, "_BTP", &handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(battery->device->handle, "_BTP")) {
clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags);
return 0;
}
@@ -1034,8 +1028,6 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
if (event == ACPI_BATTERY_NOTIFY_INFO)
acpi_battery_refresh(battery);
acpi_battery_update(battery);
- acpi_bus_generate_proc_event(device, event,
- acpi_battery_present(battery));
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
acpi_battery_present(battery));
@@ -1066,7 +1058,7 @@ static int acpi_battery_add(struct acpi_device *device)
{
int result = 0;
struct acpi_battery *battery = NULL;
- acpi_handle handle;
+
if (!device)
return -EINVAL;
battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL);
@@ -1078,8 +1070,7 @@ static int acpi_battery_add(struct acpi_device *device)
device->driver_data = battery;
mutex_init(&battery->lock);
mutex_init(&battery->sysfs_lock);
- if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle,
- "_BIX", &handle)))
+ if (acpi_has_method(battery->device->handle, "_BIX"))
set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
result = acpi_battery_update(battery);
if (result)
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index be603995854..a83e3c62c5a 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -51,20 +51,14 @@ static ssize_t show_yoffset(struct device *dev,
}
static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL);
-static ssize_t show_image(struct file *file, struct kobject *kobj,
+static ssize_t image_read(struct file *file, struct kobject *kobj,
struct bin_attribute *attr, char *buf, loff_t off, size_t count)
{
memcpy(buf, attr->private + off, count);
return count;
}
-static struct bin_attribute image_attr = {
- .attr = {
- .name = "image",
- .mode = S_IRUGO,
- },
- .read = show_image,
-};
+static BIN_ATTR_RO(image, 0); /* size gets filled in later */
static struct attribute *bgrt_attributes[] = {
&dev_attr_version.attr,
@@ -75,8 +69,14 @@ static struct attribute *bgrt_attributes[] = {
NULL,
};
+static struct bin_attribute *bgrt_bin_attributes[] = {
+ &bin_attr_image,
+ NULL,
+};
+
static struct attribute_group bgrt_attribute_group = {
.attrs = bgrt_attributes,
+ .bin_attrs = bgrt_bin_attributes,
};
static int __init bgrt_init(void)
@@ -86,9 +86,8 @@ static int __init bgrt_init(void)
if (!bgrt_image)
return -ENODEV;
- sysfs_bin_attr_init(&image_attr);
- image_attr.private = bgrt_image;
- image_attr.size = bgrt_image_size;
+ bin_attr_image.private = bgrt_image;
+ bin_attr_image.size = bgrt_image_size;
bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
if (!bgrt_kobj)
@@ -98,14 +97,8 @@ static int __init bgrt_init(void)
if (ret)
goto out_kobject;
- ret = sysfs_create_bin_file(bgrt_kobj, &image_attr);
- if (ret)
- goto out_group;
-
return 0;
-out_group:
- sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group);
out_kobject:
kobject_put(bgrt_kobj);
return ret;
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index cb9629638de..9515f18898b 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -192,6 +192,12 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
acpi_osi_setup("!Windows 2009");
return 0;
}
+static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
+{
+ printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
+ acpi_osi_setup("!Windows 2012");
+ return 0;
+}
static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
{
@@ -267,6 +273,30 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P305D"),
},
},
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "ASUS Zenbook Prime UX31A",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "UX31A"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Dell Inspiron 15R SE",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
+ },
+ },
+ {
+ .callback = dmi_disable_osi_win8,
+ .ident = "Lenovo ThinkPad Edge E530",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "3259A2G"),
+ },
+ },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index a5bb33bab44..b587ec8257b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -89,27 +89,6 @@ static struct dmi_system_id dsdt_dmi_table[] __initdata = {
Device Management
-------------------------------------------------------------------------- */
-int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
-{
- acpi_status status;
-
- if (!device)
- return -EINVAL;
-
- /* TBD: Support fixed-feature devices */
-
- status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
- if (ACPI_FAILURE(status) || !*device) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
- handle));
- return -ENODEV;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(acpi_bus_get_device);
-
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta)
{
@@ -346,104 +325,6 @@ static void acpi_bus_osc_support(void)
}
/* --------------------------------------------------------------------------
- Event Management
- -------------------------------------------------------------------------- */
-
-#ifdef CONFIG_ACPI_PROC_EVENT
-static DEFINE_SPINLOCK(acpi_bus_event_lock);
-
-LIST_HEAD(acpi_bus_event_list);
-DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);
-
-extern int event_is_open;
-
-int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
-{
- struct acpi_bus_event *event;
- unsigned long flags;
-
- /* drop event on the floor if no one's listening */
- if (!event_is_open)
- return 0;
-
- event = kzalloc(sizeof(struct acpi_bus_event), GFP_ATOMIC);
- if (!event)
- return -ENOMEM;
-
- strcpy(event->device_class, device_class);
- strcpy(event->bus_id, bus_id);
- event->type = type;
- event->data = data;
-
- spin_lock_irqsave(&acpi_bus_event_lock, flags);
- list_add_tail(&event->node, &acpi_bus_event_list);
- spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
-
- wake_up_interruptible(&acpi_bus_event_queue);
-
- return 0;
-
-}
-
-EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);
-
-int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
-{
- if (!device)
- return -EINVAL;
- return acpi_bus_generate_proc_event4(device->pnp.device_class,
- device->pnp.bus_id, type, data);
-}
-
-EXPORT_SYMBOL(acpi_bus_generate_proc_event);
-
-int acpi_bus_receive_event(struct acpi_bus_event *event)
-{
- unsigned long flags;
- struct acpi_bus_event *entry = NULL;
-
- DECLARE_WAITQUEUE(wait, current);
-
-
- if (!event)
- return -EINVAL;
-
- if (list_empty(&acpi_bus_event_list)) {
-
- set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue(&acpi_bus_event_queue, &wait);
-
- if (list_empty(&acpi_bus_event_list))
- schedule();
-
- remove_wait_queue(&acpi_bus_event_queue, &wait);
- set_current_state(TASK_RUNNING);
-
- if (signal_pending(current))
- return -ERESTARTSYS;
- }
-
- spin_lock_irqsave(&acpi_bus_event_lock, flags);
- if (!list_empty(&acpi_bus_event_list)) {
- entry = list_entry(acpi_bus_event_list.next,
- struct acpi_bus_event, node);
- list_del(&entry->node);
- }
- spin_unlock_irqrestore(&acpi_bus_event_lock, flags);
-
- if (!entry)
- return -ENODEV;
-
- memcpy(event, entry, sizeof(struct acpi_bus_event));
-
- kfree(entry);
-
- return 0;
-}
-
-#endif /* CONFIG_ACPI_PROC_EVENT */
-
-/* --------------------------------------------------------------------------
Notification Handling
-------------------------------------------------------------------------- */
@@ -499,19 +380,6 @@ static void acpi_bus_check_scope(acpi_handle handle)
*/
}
-static BLOCKING_NOTIFIER_HEAD(acpi_bus_notify_list);
-int register_acpi_bus_notifier(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&acpi_bus_notify_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_acpi_bus_notifier);
-
-void unregister_acpi_bus_notifier(struct notifier_block *nb)
-{
- blocking_notifier_chain_unregister(&acpi_bus_notify_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_acpi_bus_notifier);
-
/**
* acpi_bus_notify
* ---------------
@@ -525,9 +393,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notification %#02x to handle %p\n",
type, handle));
- blocking_notifier_call_chain(&acpi_bus_notify_list,
- type, (void *)handle);
-
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -593,8 +458,6 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data)
static int __init acpi_bus_init_irq(void)
{
acpi_status status;
- union acpi_object arg = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg };
char *message = NULL;
@@ -623,9 +486,7 @@ static int __init acpi_bus_init_irq(void)
printk(KERN_INFO PREFIX "Using %s for interrupt routing\n", message);
- arg.integer.value = acpi_irq_model;
-
- status = acpi_evaluate_object(NULL, "\\_PIC", &arg_list, NULL);
+ status = acpi_execute_simple_method(NULL, "\\_PIC", acpi_irq_model);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PIC"));
return -ENODEV;
@@ -715,7 +576,6 @@ static int __init acpi_bus_init(void)
{
int result;
acpi_status status;
- extern acpi_status acpi_os_initialize1(void);
acpi_os_initialize1();
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index d2e617b5b3f..a55773801c5 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -303,8 +303,6 @@ static void acpi_button_notify(struct acpi_device *device, u32 event)
pm_wakeup_event(&device->dev, 0);
}
-
- acpi_bus_generate_proc_event(device, event, ++button->pushed);
break;
default:
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 4ab807dc851..59d3202f6b3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -159,26 +159,29 @@ int acpi_device_set_power(struct acpi_device *device, int state)
int result = 0;
bool cut_power = false;
- if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
+ if (!device || !device->flags.power_manageable
+ || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD))
return -EINVAL;
/* Make sure this is a valid target state */
if (state == device->power.state) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] already in %s\n",
+ device->pnp.bus_id,
acpi_power_state_string(state)));
return 0;
}
if (!device->power.states[state].flags.valid) {
- printk(KERN_WARNING PREFIX "Device does not support %s\n",
- acpi_power_state_string(state));
+ dev_warn(&device->dev, "Power state %s not supported\n",
+ acpi_power_state_string(state));
return -ENODEV;
}
if (device->parent && (state < device->parent->power.state)) {
- printk(KERN_WARNING PREFIX
- "Cannot set device to a higher-powered"
- " state than parent\n");
+ dev_warn(&device->dev,
+ "Cannot transition to power state %s for parent in %s\n",
+ acpi_power_state_string(state),
+ acpi_power_state_string(device->parent->power.state));
return -ENODEV;
}
@@ -191,8 +194,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
if (state < device->power.state && state != ACPI_STATE_D0
&& device->power.state >= ACPI_STATE_D3_HOT) {
- printk(KERN_WARNING PREFIX
- "Cannot transition to non-D0 state from D3\n");
+ dev_warn(&device->dev,
+ "Cannot transition to non-D0 state from D3\n");
return -ENODEV;
}
@@ -219,10 +222,8 @@ int acpi_device_set_power(struct acpi_device *device, int state)
end:
if (result) {
- printk(KERN_WARNING PREFIX
- "Device [%s] failed to transition to %s\n",
- device->pnp.bus_id,
- acpi_power_state_string(state));
+ dev_warn(&device->dev, "Failed to change power state to %s\n",
+ acpi_power_state_string(state));
} else {
device->power.state = state;
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
@@ -244,13 +245,6 @@ int acpi_bus_set_power(acpi_handle handle, int state)
if (result)
return result;
- if (!device->flags.power_manageable) {
- ACPI_DEBUG_PRINT((ACPI_DB_INFO,
- "Device [%s] is not power manageable\n",
- dev_name(&device->dev)));
- return -ENODEV;
- }
-
return acpi_device_set_power(device, state);
}
EXPORT_SYMBOL(acpi_bus_set_power);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 82656075338..05ea4be01a8 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -51,8 +51,6 @@ MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
" the driver to wait for userspace to write the undock sysfs file "
" before undocking");
-static struct atomic_notifier_head dock_notifier_list;
-
static const struct acpi_device_id dock_device_ids[] = {
{"LNXDOCK", 0},
{"", 0},
@@ -63,8 +61,6 @@ struct dock_station {
acpi_handle handle;
unsigned long last_dock_time;
u32 flags;
- spinlock_t dd_lock;
- struct mutex hp_lock;
struct list_head dependent_devices;
struct list_head sibling;
@@ -91,6 +87,12 @@ struct dock_dependent_device {
#define DOCK_EVENT 3
#define UNDOCK_EVENT 2
+enum dock_callback_type {
+ DOCK_CALL_HANDLER,
+ DOCK_CALL_FIXUP,
+ DOCK_CALL_UEVENT,
+};
+
/*****************************************************************************
* Dock Dependent device functions *
*****************************************************************************/
@@ -101,7 +103,7 @@ struct dock_dependent_device {
*
* Add the dependent device to the dock's dependent device list.
*/
-static int
+static int __init
add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
@@ -112,14 +114,21 @@ add_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
dd->handle = handle;
INIT_LIST_HEAD(&dd->list);
-
- spin_lock(&ds->dd_lock);
list_add_tail(&dd->list, &ds->dependent_devices);
- spin_unlock(&ds->dd_lock);
return 0;
}
+static void remove_dock_dependent_devices(struct dock_station *ds)
+{
+ struct dock_dependent_device *dd, *aux;
+
+ list_for_each_entry_safe(dd, aux, &ds->dependent_devices, list) {
+ list_del(&dd->list);
+ kfree(dd);
+ }
+}
+
/**
* dock_init_hotplug - Initialize a hotplug device on a docking station.
* @dd: Dock-dependent device.
@@ -135,19 +144,16 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
int ret = 0;
mutex_lock(&hotplug_lock);
-
- if (dd->hp_context) {
+ if (WARN_ON(dd->hp_context)) {
ret = -EEXIST;
} else {
dd->hp_refcount = 1;
dd->hp_ops = ops;
dd->hp_context = context;
dd->hp_release = release;
+ if (init)
+ init(context);
}
-
- if (!WARN_ON(ret) && init)
- init(context);
-
mutex_unlock(&hotplug_lock);
return ret;
}
@@ -162,27 +168,22 @@ static int dock_init_hotplug(struct dock_dependent_device *dd,
*/
static void dock_release_hotplug(struct dock_dependent_device *dd)
{
- void (*release)(void *) = NULL;
- void *context = NULL;
-
mutex_lock(&hotplug_lock);
-
if (dd->hp_context && !--dd->hp_refcount) {
+ void (*release)(void *) = dd->hp_release;
+ void *context = dd->hp_context;
+
dd->hp_ops = NULL;
- context = dd->hp_context;
dd->hp_context = NULL;
- release = dd->hp_release;
dd->hp_release = NULL;
+ if (release)
+ release(context);
}
-
- if (release && context)
- release(context);
-
mutex_unlock(&hotplug_lock);
}
static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
- bool uevent)
+ enum dock_callback_type cb_type)
{
acpi_notify_handler cb = NULL;
bool run = false;
@@ -192,8 +193,18 @@ static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
if (dd->hp_context) {
run = true;
dd->hp_refcount++;
- if (dd->hp_ops)
- cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
+ if (dd->hp_ops) {
+ switch (cb_type) {
+ case DOCK_CALL_FIXUP:
+ cb = dd->hp_ops->fixup;
+ break;
+ case DOCK_CALL_UEVENT:
+ cb = dd->hp_ops->uevent;
+ break;
+ default:
+ cb = dd->hp_ops->handler;
+ }
+ }
}
mutex_unlock(&hotplug_lock);
@@ -220,63 +231,17 @@ find_dock_dependent_device(struct dock_station *ds, acpi_handle handle)
{
struct dock_dependent_device *dd;
- spin_lock(&ds->dd_lock);
- list_for_each_entry(dd, &ds->dependent_devices, list) {
- if (handle == dd->handle) {
- spin_unlock(&ds->dd_lock);
+ list_for_each_entry(dd, &ds->dependent_devices, list)
+ if (handle == dd->handle)
return dd;
- }
- }
- spin_unlock(&ds->dd_lock);
+
return NULL;
}
/*****************************************************************************
* Dock functions *
*****************************************************************************/
-/**
- * is_dock - see if a device is a dock station
- * @handle: acpi handle of the device
- *
- * If an acpi object has a _DCK method, then it is by definition a dock
- * station, so return true.
- */
-static int is_dock(acpi_handle handle)
-{
- acpi_status status;
- acpi_handle tmp;
-
- status = acpi_get_handle(handle, "_DCK", &tmp);
- if (ACPI_FAILURE(status))
- return 0;
- return 1;
-}
-
-static int is_ejectable(acpi_handle handle)
-{
- acpi_status status;
- acpi_handle tmp;
-
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_FAILURE(status))
- return 0;
- return 1;
-}
-
-static int is_ata(acpi_handle handle)
-{
- acpi_handle tmp;
-
- if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
- return 1;
-
- return 0;
-}
-
-static int is_battery(acpi_handle handle)
+static int __init is_battery(acpi_handle handle)
{
struct acpi_device_info *info;
int ret = 1;
@@ -292,17 +257,13 @@ static int is_battery(acpi_handle handle)
return ret;
}
-static int is_ejectable_bay(acpi_handle handle)
+/* Check whether ACPI object is an ejectable battery or disk bay */
+static bool __init is_ejectable_bay(acpi_handle handle)
{
- acpi_handle phandle;
+ if (acpi_has_method(handle, "_EJ0") && is_battery(handle))
+ return true;
- if (!is_ejectable(handle))
- return 0;
- if (is_battery(handle) || is_ata(handle))
- return 1;
- if (!acpi_get_parent(handle, &phandle) && is_ata(phandle))
- return 1;
- return 0;
+ return acpi_bay_match(handle);
}
/**
@@ -320,7 +281,7 @@ int is_dock_device(acpi_handle handle)
if (!dock_station_count)
return 0;
- if (is_dock(handle))
+ if (acpi_dock_match(handle))
return 1;
list_for_each_entry(dock_station, &dock_stations, sibling)
@@ -359,10 +320,8 @@ static int dock_present(struct dock_station *ds)
* handle if one does not exist already. This should cause
* acpi to scan for drivers for the given devices, and call
* matching driver's add routine.
- *
- * Returns a pointer to the acpi_device corresponding to the handle.
*/
-static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
+static void dock_create_acpi_device(acpi_handle handle)
{
struct acpi_device *device;
int ret;
@@ -375,10 +334,7 @@ static struct acpi_device * dock_create_acpi_device(acpi_handle handle)
ret = acpi_bus_scan(handle);
if (ret)
pr_debug("error adding bus, %x\n", -ret);
-
- acpi_bus_get_device(handle, &device);
}
- return device;
}
/**
@@ -397,9 +353,29 @@ static void dock_remove_acpi_device(acpi_handle handle)
}
/**
- * hotplug_dock_devices - insert or remove devices on the dock station
+ * hot_remove_dock_devices - Remove dock station devices.
+ * @ds: Dock station.
+ */
+static void hot_remove_dock_devices(struct dock_station *ds)
+{
+ struct dock_dependent_device *dd;
+
+ /*
+ * Walk the list in reverse order so that devices that have been added
+ * last are removed first (in case there are some indirect dependencies
+ * between them).
+ */
+ list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
+ dock_hotplug_event(dd, ACPI_NOTIFY_EJECT_REQUEST, false);
+
+ list_for_each_entry_reverse(dd, &ds->dependent_devices, list)
+ dock_remove_acpi_device(dd->handle);
+}
+
+/**
+ * hotplug_dock_devices - Insert devices on a dock station.
* @ds: the dock station
- * @event: either bus check or eject request
+ * @event: either bus check or device check request
*
* Some devices on the dock station need to have drivers called
* to perform hotplug operations after a dock event has occurred.
@@ -410,27 +386,21 @@ static void hotplug_dock_devices(struct dock_station *ds, u32 event)
{
struct dock_dependent_device *dd;
- mutex_lock(&ds->hp_lock);
+ /* Call driver specific post-dock fixups. */
+ list_for_each_entry(dd, &ds->dependent_devices, list)
+ dock_hotplug_event(dd, event, DOCK_CALL_FIXUP);
- /*
- * First call driver specific hotplug functions
- */
+ /* Call driver specific hotplug functions. */
list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, event, false);
+ dock_hotplug_event(dd, event, DOCK_CALL_HANDLER);
/*
- * Now make sure that an acpi_device is created for each
- * dependent device, or removed if this is an eject request.
- * This will cause acpi_drivers to be stopped/started if they
- * exist
+ * Now make sure that an acpi_device is created for each dependent
+ * device. That will cause scan handlers to be attached to device
+ * objects or acpi_drivers to be stopped/started if they are present.
*/
- list_for_each_entry(dd, &ds->dependent_devices, list) {
- if (event == ACPI_NOTIFY_EJECT_REQUEST)
- dock_remove_acpi_device(dd->handle);
- else
- dock_create_acpi_device(dd->handle);
- }
- mutex_unlock(&ds->hp_lock);
+ list_for_each_entry(dd, &ds->dependent_devices, list)
+ dock_create_acpi_device(dd->handle);
}
static void dock_event(struct dock_station *ds, u32 event, int num)
@@ -453,44 +423,13 @@ static void dock_event(struct dock_station *ds, u32 event, int num)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
list_for_each_entry(dd, &ds->dependent_devices, list)
- dock_hotplug_event(dd, event, true);
+ dock_hotplug_event(dd, event, DOCK_CALL_UEVENT);
if (num != DOCK_EVENT)
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
}
/**
- * eject_dock - respond to a dock eject request
- * @ds: the dock station
- *
- * This is called after _DCK is called, to execute the dock station's
- * _EJ0 method.
- */
-static void eject_dock(struct dock_station *ds)
-{
- struct acpi_object_list arg_list;
- union acpi_object arg;
- acpi_status status;
- acpi_handle tmp;
-
- /* all dock devices should have _EJ0, but check anyway */
- status = acpi_get_handle(ds->handle, "_EJ0", &tmp);
- if (ACPI_FAILURE(status)) {
- pr_debug("No _EJ0 support for dock device\n");
- return;
- }
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
- status = acpi_evaluate_object(ds->handle, "_EJ0", &arg_list, NULL);
- if (ACPI_FAILURE(status))
- pr_debug("Failed to evaluate _EJ0!\n");
-}
-
-/**
* handle_dock - handle a dock event
* @ds: the dock station
* @dock: to dock, or undock - that is the question
@@ -550,27 +489,6 @@ static inline void complete_undock(struct dock_station *ds)
ds->flags &= ~(DOCK_UNDOCKING);
}
-static void dock_lock(struct dock_station *ds, int lock)
-{
- struct acpi_object_list arg_list;
- union acpi_object arg;
- acpi_status status;
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = !!lock;
- status = acpi_evaluate_object(ds->handle, "_LCK", &arg_list, NULL);
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- if (lock)
- acpi_handle_warn(ds->handle,
- "Locking device failed (0x%x)\n", status);
- else
- acpi_handle_warn(ds->handle,
- "Unlocking device failed (0x%x)\n", status);
- }
-}
-
/**
* dock_in_progress - see if we are in the middle of handling a dock event
* @ds: the dock station
@@ -588,37 +506,6 @@ static int dock_in_progress(struct dock_station *ds)
}
/**
- * register_dock_notifier - add yourself to the dock notifier list
- * @nb: the callers notifier block
- *
- * If a driver wishes to be notified about dock events, they can
- * use this function to put a notifier block on the dock notifier list.
- * this notifier call chain will be called after a dock event, but
- * before hotplugging any new devices.
- */
-int register_dock_notifier(struct notifier_block *nb)
-{
- if (!dock_station_count)
- return -ENODEV;
-
- return atomic_notifier_chain_register(&dock_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(register_dock_notifier);
-
-/**
- * unregister_dock_notifier - remove yourself from the dock notifier list
- * @nb: the callers notifier block
- */
-void unregister_dock_notifier(struct notifier_block *nb)
-{
- if (!dock_station_count)
- return;
-
- atomic_notifier_chain_unregister(&dock_notifier_list, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_dock_notifier);
-
-/**
* register_hotplug_dock_device - register a hotplug function
* @handle: the handle of the device
* @ops: handlers to call after docking
@@ -703,10 +590,10 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
*/
dock_event(ds, event, UNDOCK_EVENT);
- hotplug_dock_devices(ds, ACPI_NOTIFY_EJECT_REQUEST);
+ hot_remove_dock_devices(ds);
undock(ds);
- dock_lock(ds, 0);
- eject_dock(ds);
+ acpi_evaluate_lck(ds->handle, 0);
+ acpi_evaluate_ej0(ds->handle);
if (dock_present(ds)) {
acpi_handle_err(ds->handle, "Unable to undock!\n");
return -EBUSY;
@@ -717,18 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
/**
* dock_notify - act upon an acpi dock notification
- * @handle: the dock station handle
+ * @ds: dock station
* @event: the acpi event
- * @data: our driver data struct
*
* If we are notified to dock, then check to see if the dock is
* present and then dock. Notify all drivers of the dock event,
* and then hotplug and devices that may need hotplugging.
*/
-static void dock_notify(acpi_handle handle, u32 event, void *data)
+static void dock_notify(struct dock_station *ds, u32 event)
{
- struct dock_station *ds = data;
- struct acpi_device *tmp;
+ acpi_handle handle = ds->handle;
+ struct acpi_device *ad;
int surprise_removal = 0;
/*
@@ -751,8 +637,7 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
- &tmp)) {
+ if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
@@ -760,12 +645,10 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
complete_dock(ds);
break;
}
- atomic_notifier_call_chain(&dock_notifier_list,
- event, NULL);
hotplug_dock_devices(ds, event);
complete_dock(ds);
dock_event(ds, event, DOCK_EVENT);
- dock_lock(ds, 1);
+ acpi_evaluate_lck(ds->handle, 1);
acpi_update_all_gpes();
break;
}
@@ -789,9 +672,8 @@ static void dock_notify(acpi_handle handle, u32 event, void *data)
}
struct dock_data {
- acpi_handle handle;
- unsigned long event;
struct dock_station *ds;
+ u32 event;
};
static void acpi_dock_deferred_cb(void *context)
@@ -799,52 +681,31 @@ static void acpi_dock_deferred_cb(void *context)
struct dock_data *data = context;
acpi_scan_lock_acquire();
- dock_notify(data->handle, data->event, data->ds);
+ dock_notify(data->ds, data->event);
acpi_scan_lock_release();
kfree(data);
}
-static int acpi_dock_notifier_call(struct notifier_block *this,
- unsigned long event, void *data)
+static void dock_notify_handler(acpi_handle handle, u32 event, void *data)
{
- struct dock_station *dock_station;
- acpi_handle handle = data;
+ struct dock_data *dd;
if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
&& event != ACPI_NOTIFY_EJECT_REQUEST)
- return 0;
-
- acpi_scan_lock_acquire();
-
- list_for_each_entry(dock_station, &dock_stations, sibling) {
- if (dock_station->handle == handle) {
- struct dock_data *dd;
- acpi_status status;
-
- dd = kmalloc(sizeof(*dd), GFP_KERNEL);
- if (!dd)
- break;
+ return;
- dd->handle = handle;
- dd->event = event;
- dd->ds = dock_station;
- status = acpi_os_hotplug_execute(acpi_dock_deferred_cb,
- dd);
- if (ACPI_FAILURE(status))
- kfree(dd);
+ dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+ if (dd) {
+ acpi_status status;
- break;
- }
+ dd->ds = data;
+ dd->event = event;
+ status = acpi_os_hotplug_execute(acpi_dock_deferred_cb, dd);
+ if (ACPI_FAILURE(status))
+ kfree(dd);
}
-
- acpi_scan_lock_release();
- return 0;
}
-static struct notifier_block dock_acpi_notifier = {
- .notifier_call = acpi_dock_notifier_call,
-};
-
/**
* find_dock_devices - find devices on the dock station
* @handle: the handle of the device we are examining
@@ -856,29 +717,16 @@ static struct notifier_block dock_acpi_notifier = {
* check to see if an object has an _EJD method. If it does, then it
* will see if it is dependent on the dock station.
*/
-static acpi_status
-find_dock_devices(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
+ void *context, void **rv)
{
- acpi_status status;
- acpi_handle tmp, parent;
struct dock_station *ds = context;
+ acpi_handle ejd = NULL;
- status = acpi_bus_get_ejd(handle, &tmp);
- if (ACPI_FAILURE(status)) {
- /* try the parent device as well */
- status = acpi_get_parent(handle, &parent);
- if (ACPI_FAILURE(status))
- goto fdd_out;
- /* see if parent is dependent on dock */
- status = acpi_bus_get_ejd(parent, &tmp);
- if (ACPI_FAILURE(status))
- goto fdd_out;
- }
-
- if (tmp == ds->handle)
+ acpi_bus_get_ejd(handle, &ejd);
+ if (ejd == ds->handle)
add_dock_dependent_device(ds, handle);
-fdd_out:
return AE_OK;
}
@@ -988,13 +836,13 @@ static struct attribute_group dock_attribute_group = {
*/
static int __init dock_add(acpi_handle handle)
{
- int ret, id;
- struct dock_station ds, *dock_station;
+ struct dock_station *dock_station, ds = { NULL, };
struct platform_device *dd;
+ acpi_status status;
+ int ret;
- id = dock_station_count;
- memset(&ds, 0, sizeof(ds));
- dd = platform_device_register_data(NULL, "dock", id, &ds, sizeof(ds));
+ dd = platform_device_register_data(NULL, "dock", dock_station_count,
+ &ds, sizeof(ds));
if (IS_ERR(dd))
return PTR_ERR(dd);
@@ -1004,18 +852,15 @@ static int __init dock_add(acpi_handle handle)
dock_station->dock_device = dd;
dock_station->last_dock_time = jiffies - HZ;
- mutex_init(&dock_station->hp_lock);
- spin_lock_init(&dock_station->dd_lock);
INIT_LIST_HEAD(&dock_station->sibling);
- ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
INIT_LIST_HEAD(&dock_station->dependent_devices);
/* we want the dock device to send uevents */
dev_set_uevent_suppress(&dd->dev, 0);
- if (is_dock(handle))
+ if (acpi_dock_match(handle))
dock_station->flags |= DOCK_IS_DOCK;
- if (is_ata(handle))
+ if (acpi_ata_match(handle))
dock_station->flags |= DOCK_IS_ATA;
if (is_battery(handle))
dock_station->flags |= DOCK_IS_BAT;
@@ -1034,11 +879,19 @@ static int __init dock_add(acpi_handle handle)
if (ret)
goto err_rmgroup;
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ dock_notify_handler, dock_station);
+ if (ACPI_FAILURE(status)) {
+ ret = -ENODEV;
+ goto err_rmgroup;
+ }
+
dock_station_count++;
list_add(&dock_station->sibling, &dock_stations);
return 0;
err_rmgroup:
+ remove_dock_dependent_devices(dock_station);
sysfs_remove_group(&dd->dev.kobj, &dock_attribute_group);
err_unregister:
platform_device_unregister(dd);
@@ -1055,10 +908,10 @@ err_unregister:
*
* This is called by acpi_walk_namespace to look for dock stations and bays.
*/
-static __init acpi_status
+static acpi_status __init
find_dock_and_bay(acpi_handle handle, u32 lvl, void *context, void **rv)
{
- if (is_dock(handle) || is_ejectable_bay(handle))
+ if (acpi_dock_match(handle) || is_ejectable_bay(handle))
dock_add(handle);
return AE_OK;
@@ -1078,7 +931,6 @@ void __init acpi_dock_init(void)
return;
}
- register_acpi_bus_notifier(&dock_acpi_notifier);
pr_info(PREFIX "%s: %d docks/bays found\n",
ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 80403c1a89f..a06d9837470 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -948,7 +948,7 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id __initdata ec_dmi_table[] = {
+static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
@@ -987,6 +987,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = {
ec_skip_dsdt_scan, "HP Folio 13", {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL},
+ {
+ ec_validate_ecdt, "ASUS hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
{},
};
@@ -1049,10 +1053,8 @@ int __init acpi_ec_ecdt_probe(void)
* which needs it, has fake EC._INI method, so use it as flag.
* Keep boot_ec struct as it will be needed soon.
*/
- acpi_handle dummy;
if (!dmi_name_in_vendors("ASUS") ||
- ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI",
- &dummy)))
+ !acpi_has_method(boot_ec->handle, "_INI"))
return -ENODEV;
}
install:
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c
index 1442737cede..8247fcdde07 100644
--- a/drivers/acpi/event.c
+++ b/drivers/acpi/event.c
@@ -21,100 +21,6 @@
#define _COMPONENT ACPI_SYSTEM_COMPONENT
ACPI_MODULE_NAME("event");
-#ifdef CONFIG_ACPI_PROC_EVENT
-/* Global vars for handling event proc entry */
-static DEFINE_SPINLOCK(acpi_system_event_lock);
-int event_is_open = 0;
-extern struct list_head acpi_bus_event_list;
-extern wait_queue_head_t acpi_bus_event_queue;
-
-static int acpi_system_open_event(struct inode *inode, struct file *file)
-{
- spin_lock_irq(&acpi_system_event_lock);
-
- if (event_is_open)
- goto out_busy;
-
- event_is_open = 1;
-
- spin_unlock_irq(&acpi_system_event_lock);
- return 0;
-
- out_busy:
- spin_unlock_irq(&acpi_system_event_lock);
- return -EBUSY;
-}
-
-static ssize_t
-acpi_system_read_event(struct file *file, char __user * buffer, size_t count,
- loff_t * ppos)
-{
- int result = 0;
- struct acpi_bus_event event;
- static char str[ACPI_MAX_STRING];
- static int chars_remaining = 0;
- static char *ptr;
-
- if (!chars_remaining) {
- memset(&event, 0, sizeof(struct acpi_bus_event));
-
- if ((file->f_flags & O_NONBLOCK)
- && (list_empty(&acpi_bus_event_list)))
- return -EAGAIN;
-
- result = acpi_bus_receive_event(&event);
- if (result)
- return result;
-
- chars_remaining = sprintf(str, "%s %s %08x %08x\n",
- event.device_class ? event.
- device_class : "<unknown>",
- event.bus_id ? event.
- bus_id : "<unknown>", event.type,
- event.data);
- ptr = str;
- }
-
- if (chars_remaining < count) {
- count = chars_remaining;
- }
-
- if (copy_to_user(buffer, ptr, count))
- return -EFAULT;
-
- *ppos += count;
- chars_remaining -= count;
- ptr += count;
-
- return count;
-}
-
-static int acpi_system_close_event(struct inode *inode, struct file *file)
-{
- spin_lock_irq(&acpi_system_event_lock);
- event_is_open = 0;
- spin_unlock_irq(&acpi_system_event_lock);
- return 0;
-}
-
-static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
-{
- poll_wait(file, &acpi_bus_event_queue, wait);
- if (!list_empty(&acpi_bus_event_list))
- return POLLIN | POLLRDNORM;
- return 0;
-}
-
-static const struct file_operations acpi_system_event_ops = {
- .owner = THIS_MODULE,
- .open = acpi_system_open_event,
- .read = acpi_system_read_event,
- .release = acpi_system_close_event,
- .poll = acpi_system_poll_event,
- .llseek = default_llseek,
-};
-#endif /* CONFIG_ACPI_PROC_EVENT */
-
/* ACPI notifier chain */
static BLOCKING_NOTIFIER_HEAD(acpi_chain_head);
@@ -280,9 +186,6 @@ static int acpi_event_genetlink_init(void)
static int __init acpi_event_init(void)
{
-#ifdef CONFIG_ACPI_PROC_EVENT
- struct proc_dir_entry *entry;
-#endif
int error = 0;
if (acpi_disabled)
@@ -293,15 +196,6 @@ static int __init acpi_event_init(void)
if (error)
printk(KERN_WARNING PREFIX
"Failed to create genetlink family for ACPI event\n");
-
-#ifdef CONFIG_ACPI_PROC_EVENT
- /* 'event' [R] */
- entry = proc_create("event", S_IRUSR, acpi_root_dir,
- &acpi_system_event_ops);
- if (!entry)
- return -ENODEV;
-#endif
-
return 0;
}
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 5b02a0aa540..41ade6570bc 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -93,7 +93,7 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
if (result)
return result;
- *state = (acpi_state == ACPI_STATE_D3 ? 0 :
+ *state = (acpi_state == ACPI_STATE_D3_COLD ? 0 :
(acpi_state == ACPI_STATE_D0 ? 1 : -1));
return 0;
}
@@ -108,7 +108,7 @@ fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
return -EINVAL;
result = acpi_bus_set_power(device->handle,
- state ? ACPI_STATE_D0 : ACPI_STATE_D3);
+ state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
return result;
}
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index f68095756fb..94672297e1b 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list);
static DECLARE_RWSEM(bus_type_sem);
#define PHYSICAL_NODE_STRING "physical_node"
+#define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10)
int register_acpi_bus_type(struct acpi_bus_type *type)
{
@@ -78,41 +79,117 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev)
return ret;
}
-static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used,
- void *addr_p, void **ret_p)
+static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used,
+ void *not_used, void **ret_p)
{
- unsigned long long addr, sta;
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ if (adev) {
+ *ret_p = handle;
+ return AE_CTRL_TERMINATE;
+ }
+ return AE_OK;
+}
+
+static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge)
+{
+ unsigned long long sta;
+ acpi_status status;
+
+ status = acpi_bus_get_status_handle(handle, &sta);
+ if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED))
+ return false;
+
+ if (is_bridge) {
+ void *test = NULL;
+
+ /* Check if this object has at least one child device. */
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_dev_present, NULL, NULL, &test);
+ return !!test;
+ }
+ return true;
+}
+
+struct find_child_context {
+ u64 addr;
+ bool is_bridge;
+ acpi_handle ret;
+ bool ret_checked;
+};
+
+static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used,
+ void *data, void **not_used)
+{
+ struct find_child_context *context = data;
+ unsigned long long addr;
acpi_status status;
status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr);
- if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) {
- *ret_p = handle;
- status = acpi_bus_get_status_handle(handle, &sta);
- if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED))
+ if (ACPI_FAILURE(status) || addr != context->addr)
+ return AE_OK;
+
+ if (!context->ret) {
+ /* This is the first matching object. Save its handle. */
+ context->ret = handle;
+ return AE_OK;
+ }
+ /*
+ * There is more than one matching object with the same _ADR value.
+ * That really is unexpected, so we are kind of beyond the scope of the
+ * spec here. We have to choose which one to return, though.
+ *
+ * First, check if the previously found object is good enough and return
+ * its handle if so. Second, check the same for the object that we've
+ * just found.
+ */
+ if (!context->ret_checked) {
+ if (acpi_extra_checks_passed(context->ret, context->is_bridge))
return AE_CTRL_TERMINATE;
+ else
+ context->ret_checked = true;
+ }
+ if (acpi_extra_checks_passed(handle, context->is_bridge)) {
+ context->ret = handle;
+ return AE_CTRL_TERMINATE;
}
return AE_OK;
}
-acpi_handle acpi_get_child(acpi_handle parent, u64 address)
+acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge)
{
- void *ret = NULL;
-
- if (!parent)
- return NULL;
+ if (parent) {
+ struct find_child_context context = {
+ .addr = addr,
+ .is_bridge = is_bridge,
+ };
+
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child,
+ NULL, &context, NULL);
+ return context.ret;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(acpi_find_child);
- acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL,
- do_acpi_find_child, &address, &ret);
- return (acpi_handle)ret;
+static void acpi_physnode_link_name(char *buf, unsigned int node_id)
+{
+ if (node_id > 0)
+ snprintf(buf, PHYSICAL_NODE_NAME_SIZE,
+ PHYSICAL_NODE_STRING "%u", node_id);
+ else
+ strcpy(buf, PHYSICAL_NODE_STRING);
}
-EXPORT_SYMBOL(acpi_get_child);
int acpi_bind_one(struct device *dev, acpi_handle handle)
{
struct acpi_device *acpi_dev;
acpi_status status;
struct acpi_device_physical_node *physical_node, *pn;
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
+ char physical_node_name[PHYSICAL_NODE_NAME_SIZE];
+ struct list_head *physnode_list;
+ unsigned int node_id;
int retval = -EINVAL;
if (ACPI_HANDLE(dev)) {
@@ -139,41 +216,53 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
mutex_lock(&acpi_dev->physical_node_lock);
- /* Sanity check. */
- list_for_each_entry(pn, &acpi_dev->physical_node_list, node)
+ /*
+ * Keep the list sorted by node_id so that the IDs of removed nodes can
+ * be recycled easily.
+ */
+ physnode_list = &acpi_dev->physical_node_list;
+ node_id = 0;
+ list_for_each_entry(pn, &acpi_dev->physical_node_list, node) {
+ /* Sanity check. */
if (pn->dev == dev) {
+ mutex_unlock(&acpi_dev->physical_node_lock);
+
dev_warn(dev, "Already associated with ACPI node\n");
- goto err_free;
- }
+ kfree(physical_node);
+ if (ACPI_HANDLE(dev) != handle)
+ goto err;
- /* allocate physical node id according to physical_node_id_bitmap */
- physical_node->node_id =
- find_first_zero_bit(acpi_dev->physical_node_id_bitmap,
- ACPI_MAX_PHYSICAL_NODE);
- if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) {
- retval = -ENOSPC;
- goto err_free;
+ put_device(dev);
+ return 0;
+ }
+ if (pn->node_id == node_id) {
+ physnode_list = &pn->node;
+ node_id++;
+ }
}
- set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap);
+ physical_node->node_id = node_id;
physical_node->dev = dev;
- list_add_tail(&physical_node->node, &acpi_dev->physical_node_list);
+ list_add(&physical_node->node, physnode_list);
acpi_dev->physical_node_count++;
- mutex_unlock(&acpi_dev->physical_node_lock);
-
if (!ACPI_HANDLE(dev))
ACPI_HANDLE_SET(dev, acpi_dev->handle);
- if (!physical_node->node_id)
- strcpy(physical_node_name, PHYSICAL_NODE_STRING);
- else
- sprintf(physical_node_name,
- "physical_node%d", physical_node->node_id);
+ acpi_physnode_link_name(physical_node_name, node_id);
retval = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
- physical_node_name);
+ physical_node_name);
+ if (retval)
+ dev_err(&acpi_dev->dev, "Failed to create link %s (%d)\n",
+ physical_node_name, retval);
+
retval = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
- "firmware_node");
+ "firmware_node");
+ if (retval)
+ dev_err(dev, "Failed to create link firmware_node (%d)\n",
+ retval);
+
+ mutex_unlock(&acpi_dev->physical_node_lock);
if (acpi_dev->wakeup.flags.valid)
device_set_wakeup_capable(dev, true);
@@ -184,11 +273,6 @@ int acpi_bind_one(struct device *dev, acpi_handle handle)
ACPI_HANDLE_SET(dev, NULL);
put_device(dev);
return retval;
-
- err_free:
- mutex_unlock(&acpi_dev->physical_node_lock);
- kfree(physical_node);
- goto err;
}
EXPORT_SYMBOL_GPL(acpi_bind_one);
@@ -197,49 +281,37 @@ int acpi_unbind_one(struct device *dev)
struct acpi_device_physical_node *entry;
struct acpi_device *acpi_dev;
acpi_status status;
- struct list_head *node, *next;
if (!ACPI_HANDLE(dev))
return 0;
status = acpi_bus_get_device(ACPI_HANDLE(dev), &acpi_dev);
- if (ACPI_FAILURE(status))
- goto err;
+ if (ACPI_FAILURE(status)) {
+ dev_err(dev, "Oops, ACPI handle corrupt in %s()\n", __func__);
+ return -EINVAL;
+ }
mutex_lock(&acpi_dev->physical_node_lock);
- list_for_each_safe(node, next, &acpi_dev->physical_node_list) {
- char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2];
- entry = list_entry(node, struct acpi_device_physical_node,
- node);
- if (entry->dev != dev)
- continue;
+ list_for_each_entry(entry, &acpi_dev->physical_node_list, node)
+ if (entry->dev == dev) {
+ char physnode_name[PHYSICAL_NODE_NAME_SIZE];
- list_del(node);
- clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap);
+ list_del(&entry->node);
+ acpi_dev->physical_node_count--;
- acpi_dev->physical_node_count--;
+ acpi_physnode_link_name(physnode_name, entry->node_id);
+ sysfs_remove_link(&acpi_dev->dev.kobj, physnode_name);
+ sysfs_remove_link(&dev->kobj, "firmware_node");
+ ACPI_HANDLE_SET(dev, NULL);
+ /* acpi_bind_one() increase refcnt by one. */
+ put_device(dev);
+ kfree(entry);
+ break;
+ }
- if (!entry->node_id)
- strcpy(physical_node_name, PHYSICAL_NODE_STRING);
- else
- sprintf(physical_node_name,
- "physical_node%d", entry->node_id);
-
- sysfs_remove_link(&acpi_dev->dev.kobj, physical_node_name);
- sysfs_remove_link(&dev->kobj, "firmware_node");
- ACPI_HANDLE_SET(dev, NULL);
- /* acpi_bind_one increase refcnt by one */
- put_device(dev);
- kfree(entry);
- }
mutex_unlock(&acpi_dev->physical_node_lock);
-
return 0;
-
-err:
- dev_err(dev, "Oops, 'acpi_handle' corrupt\n");
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(acpi_unbind_one);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 227aca77ee1..20f423337e1 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -23,6 +23,7 @@
#define PREFIX "ACPI: "
+acpi_status acpi_os_initialize1(void);
int init_acpi_device_notify(void);
int acpi_scan_init(void);
#ifdef CONFIG_ACPI_PCI_SLOT
@@ -169,10 +170,8 @@ int acpi_create_platform_device(struct acpi_device *adev,
-------------------------------------------------------------------------- */
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
bool acpi_video_backlight_quirks(void);
-bool acpi_video_verify_backlight_support(void);
#else
static inline bool acpi_video_backlight_quirks(void) { return false; }
-static inline bool acpi_video_verify_backlight_support(void) { return false; }
#endif
#endif /* _ACPI_INTERNAL_H_ */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 33e609f6358..2e82e5d7693 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -159,7 +159,7 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
* distance than the others.
* Do some quick checks here and only use the SLIT if it passes.
*/
-static __init int slit_valid(struct acpi_table_slit *slit)
+static int __init slit_valid(struct acpi_table_slit *slit)
{
int i, j;
int d = slit->locality_count;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 6ab2c350552..e5f416c7f66 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -52,6 +52,7 @@
#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/processor.h>
+#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
ACPI_MODULE_NAME("osl");
@@ -79,6 +80,8 @@ extern char line_buf[80];
static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
u32 pm1b_ctrl);
+static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
+ u32 val_b);
static acpi_osd_handler acpi_irq_handler;
static void *acpi_irq_context;
@@ -140,7 +143,8 @@ static struct osi_linux {
unsigned int enable:1;
unsigned int dmi:1;
unsigned int cmdline:1;
-} osi_linux = {0, 0, 0};
+ unsigned int default_disabling:1;
+} osi_linux = {0, 0, 0, 0};
static u32 acpi_osi_handler(acpi_string interface, u32 supported)
{
@@ -563,10 +567,6 @@ static const char * const table_sigs[] = {
ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
-/* Non-fatal errors: Affected tables/files are ignored */
-#define INVALID_TABLE(x, path, name) \
- { pr_err("ACPI OVERRIDE: " x " [%s%s]\n", path, name); continue; }
-
#define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
/* Must not increase 10 or needs code modification below */
@@ -593,9 +593,11 @@ void __init acpi_initrd_override(void *data, size_t size)
data += offset;
size -= offset;
- if (file.size < sizeof(struct acpi_table_header))
- INVALID_TABLE("Table smaller than ACPI header",
- cpio_path, file.name);
+ if (file.size < sizeof(struct acpi_table_header)) {
+ pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
table = file.data;
@@ -603,15 +605,21 @@ void __init acpi_initrd_override(void *data, size_t size)
if (!memcmp(table->signature, table_sigs[sig], 4))
break;
- if (!table_sigs[sig])
- INVALID_TABLE("Unknown signature",
- cpio_path, file.name);
- if (file.size != table->length)
- INVALID_TABLE("File length does not match table length",
- cpio_path, file.name);
- if (acpi_table_checksum(file.data, table->length))
- INVALID_TABLE("Bad table checksum",
- cpio_path, file.name);
+ if (!table_sigs[sig]) {
+ pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (file.size != table->length) {
+ pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
+ if (acpi_table_checksum(file.data, table->length)) {
+ pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
+ cpio_path, file.name);
+ continue;
+ }
pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
table->signature, cpio_path, file.name, table->length);
@@ -1351,8 +1359,8 @@ struct osi_setup_entry {
bool enable;
};
-static struct osi_setup_entry __initdata
- osi_setup_entries[OSI_STRING_ENTRIES_MAX] = {
+static struct osi_setup_entry
+ osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
{"Module Device", true},
{"Processor Device", true},
{"3.0 _SCP Extensions", true},
@@ -1376,6 +1384,17 @@ void __init acpi_osi_setup(char *str)
if (*str == '!') {
str++;
+ if (*str == '\0') {
+ osi_linux.default_disabling = 1;
+ return;
+ } else if (*str == '*') {
+ acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
+ for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
+ osi = &osi_setup_entries[i];
+ osi->enable = false;
+ }
+ return;
+ }
enable = false;
}
@@ -1441,6 +1460,13 @@ static void __init acpi_osi_setup_late(void)
int i;
acpi_status status;
+ if (osi_linux.default_disabling) {
+ status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
+
+ if (ACPI_SUCCESS(status))
+ printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
+ }
+
for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
osi = &osi_setup_entries[i];
str = osi->string;
@@ -1779,6 +1805,28 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
__acpi_os_prepare_sleep = func;
}
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
+ u32 val_b)
+{
+ int rc = 0;
+ if (__acpi_os_prepare_extended_sleep)
+ rc = __acpi_os_prepare_extended_sleep(sleep_state,
+ val_a, val_b);
+ if (rc < 0)
+ return AE_ERROR;
+ else if (rc > 0)
+ return AE_CTRL_SKIP;
+
+ return AE_OK;
+}
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+ u32 val_a, u32 val_b))
+{
+ __acpi_os_prepare_extended_sleep = func;
+}
+
+
void alloc_acpi_hp_work(acpi_handle handle, u32 type, void *context,
void (*func)(struct work_struct *work))
{
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 5917839321b..d3874f42565 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -378,6 +378,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
u32 flags, base_flags;
acpi_handle handle = device->handle;
+ bool no_aspm = false, clear_aspm = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -437,27 +438,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- /*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- dev_err(&device->dev,
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto end;
- }
-
- /* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
if (pcie_aspm_support_enabled()) {
@@ -471,7 +451,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (ACPI_FAILURE(status)) {
dev_info(&device->dev, "ACPI _OSC support "
"notification failed, disabling PCIe ASPM\n");
- pcie_no_aspm();
+ no_aspm = true;
flags = base_flags;
}
}
@@ -503,7 +483,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
* We have ASPM control, but the FADT indicates
* that it's unsupported. Clear it.
*/
- pcie_clear_aspm(root->bus);
+ clear_aspm = true;
}
} else {
dev_info(&device->dev,
@@ -512,7 +492,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
acpi_format_exception(status), flags);
dev_info(&device->dev,
"ACPI _OSC control for PCIe not granted, disabling ASPM\n");
- pcie_no_aspm();
+ /*
+ * We want to disable ASPM here, but aspm_disabled
+ * needs to remain in its state from boot so that we
+ * properly handle PCIe 1.1 devices. So we set this
+ * flag here, to defer the action until after the ACPI
+ * root scan.
+ */
+ no_aspm = true;
}
} else {
dev_info(&device->dev,
@@ -520,16 +507,40 @@ static int acpi_pci_root_add(struct acpi_device *device,
"(_OSC support mask: 0x%02x)\n", flags);
}
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ dev_err(&device->dev,
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto end;
+ }
+
+ if (clear_aspm) {
+ dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
+ pcie_clear_aspm(root->bus);
+ }
+ if (no_aspm)
+ pcie_no_aspm();
+
pci_acpi_add_bus_pm_notifier(device, root->bus);
if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true);
if (system_state != SYSTEM_BOOTING) {
pcibios_resource_survey_bus(root->bus);
- pci_assign_unassigned_bus_resources(root->bus);
-
- /* need to after hot-added ioapic is registered */
- pci_enable_bridges(root->bus);
+ pci_assign_unassigned_root_bus_resources(root->bus);
}
pci_bus_add_devices(root->bus);
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 033d1179bdb..d678a180ca2 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -159,12 +159,16 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
return AE_OK;
}
-void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle)
+void acpi_pci_slot_enumerate(struct pci_bus *bus)
{
- mutex_lock(&slot_list_lock);
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
- register_slot, NULL, bus, NULL);
- mutex_unlock(&slot_list_lock);
+ acpi_handle handle = ACPI_HANDLE(bus->bridge);
+
+ if (handle) {
+ mutex_lock(&slot_list_lock);
+ acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ register_slot, NULL, bus, NULL);
+ mutex_unlock(&slot_list_lock);
+ }
}
void acpi_pci_slot_remove(struct pci_bus *bus)
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 5c28c894c0f..0dbe5cdf339 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -637,9 +637,7 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
}
/* Execute _PSW */
- arg_list.count = 1;
- in_arg[0].integer.value = enable;
- status = acpi_evaluate_object(dev->handle, "_PSW", &arg_list, NULL);
+ status = acpi_execute_simple_method(dev->handle, "_PSW", enable);
if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
printk(KERN_ERR PREFIX "_PSW execution failed\n");
dev->wakeup.flags.valid = 0;
@@ -786,7 +784,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
}
}
- *state = ACPI_STATE_D3;
+ *state = ACPI_STATE_D3_COLD;
return 0;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index aa1227a7e3f..04a13784dd2 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
dev->pnp.bus_id,
(u32) dev->wakeup.sleep_state);
+ mutex_lock(&dev->physical_node_lock);
+
if (!dev->physical_node_count) {
seq_printf(seq, "%c%-8s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
put_device(ldev);
}
}
+
+ mutex_unlock(&dev->physical_node_lock);
}
mutex_unlock(&acpi_device_lock);
return 0;
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev)
{
struct acpi_device_physical_node *entry;
+ mutex_lock(&adev->physical_node_lock);
+
list_for_each_entry(entry,
&adev->physical_node_list, node)
if (entry->dev && device_can_wakeup(entry->dev)) {
bool enable = !device_may_wakeup(entry->dev);
device_set_wakeup_enable(entry->dev, enable);
}
+
+ mutex_unlock(&adev->physical_node_lock);
}
static ssize_t
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index a5e9f4a5b28..cf34d903f4f 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -28,7 +28,7 @@ static int __init set_no_mwait(const struct dmi_system_id *id)
return 0;
}
-static struct dmi_system_id __initdata processor_idle_dmi_table[] = {
+static struct dmi_system_id processor_idle_dmi_table[] __initdata = {
{
set_no_mwait, "Extensa 5220", {
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 870eaf5fa54..e534ba66d5b 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -91,21 +91,17 @@ static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
acpi_processor_ppc_has_changed(pr, 1);
if (saved == pr->performance_platform_limit)
break;
- acpi_bus_generate_proc_event(device, event,
- pr->performance_platform_limit);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
pr->performance_platform_limit);
break;
case ACPI_PROCESSOR_NOTIFY_POWER:
acpi_processor_cst_has_changed(pr);
- acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_PROCESSOR_NOTIFY_THROTTLING:
acpi_processor_tstate_has_changed(pr);
- acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@@ -179,7 +175,9 @@ static int __acpi_processor_start(struct acpi_device *device)
acpi_processor_load_module(pr);
#endif
acpi_processor_get_throttling_info(pr);
- acpi_processor_get_limit_info(pr);
+
+ if (pr->flags.throttling)
+ pr->flags.limit = 1;
if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
acpi_processor_power_init(pr);
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 1e9732d809b..51d7948611d 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -164,17 +164,12 @@ static void acpi_processor_ppc_ost(acpi_handle handle, int status)
{.type = ACPI_TYPE_INTEGER,},
};
struct acpi_object_list arg_list = {2, params};
- acpi_handle temp;
- params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
- params[1].integer.value = status;
-
- /* when there is no _OST , skip it */
- if (ACPI_FAILURE(acpi_get_handle(handle, "_OST", &temp)))
- return;
-
- acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
- return;
+ if (acpi_has_method(handle, "_OST")) {
+ params[0].integer.value = ACPI_PROCESSOR_NOTIFY_PERFORMANCE;
+ params[1].integer.value = status;
+ acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
+ }
}
int acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
@@ -468,14 +463,11 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
int acpi_processor_get_performance_info(struct acpi_processor *pr)
{
int result = 0;
- acpi_status status = AE_OK;
- acpi_handle handle = NULL;
if (!pr || !pr->performance || !pr->handle)
return -EINVAL;
- status = acpi_get_handle(pr->handle, "_PCT", &handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(pr->handle, "_PCT")) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
"ACPI-based processor performance control unavailable\n"));
return -ENODEV;
@@ -501,7 +493,7 @@ int acpi_processor_get_performance_info(struct acpi_processor *pr)
*/
update_bios:
#ifdef CONFIG_X86
- if (ACPI_SUCCESS(acpi_get_handle(pr->handle, "_PPC", &handle))){
+ if (acpi_has_method(pr->handle, "_PPC")) {
if(boot_cpu_has(X86_FEATURE_EST))
printk(KERN_WARNING FW_BUG "BIOS needs update for CPU "
"frequency support\n");
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index e8e652710e6..d1d2e7fb5b3 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -186,18 +186,6 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
#endif
-int acpi_processor_get_limit_info(struct acpi_processor *pr)
-{
-
- if (!pr)
- return -EINVAL;
-
- if (pr->flags.throttling)
- pr->flags.limit = 1;
-
- return 0;
-}
-
/* thermal coolign device callbacks */
static int acpi_processor_max_state(struct acpi_processor *pr)
{
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 3322b47ab7c..b7201fc6f1e 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -505,14 +505,12 @@ int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list,
void *preproc_data)
{
struct res_proc_context c;
- acpi_handle not_used;
acpi_status status;
if (!adev || !adev->handle || !list_empty(list))
return -EINVAL;
- status = acpi_get_handle(adev->handle, METHOD_NAME__CRS, &not_used);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(adev->handle, METHOD_NAME__CRS))
return 0;
c.list = list;
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index b6241eeb113..aef7e1cd1e5 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -873,14 +873,9 @@ static void acpi_sbs_callback(void *context)
u8 saved_charger_state = sbs->charger_present;
u8 saved_battery_state;
acpi_ac_get_present(sbs);
- if (sbs->charger_present != saved_charger_state) {
-#ifdef CONFIG_ACPI_PROC_EVENT
- acpi_bus_generate_proc_event4(ACPI_AC_CLASS, ACPI_AC_DIR_NAME,
- ACPI_SBS_NOTIFY_STATUS,
- sbs->charger_present);
-#endif
+ if (sbs->charger_present != saved_charger_state)
kobject_uevent(&sbs->charger.dev->kobj, KOBJ_CHANGE);
- }
+
if (sbs->manager_present) {
for (id = 0; id < MAX_SBS_BAT; ++id) {
if (!(sbs->batteries_supported & (1 << id)))
@@ -890,12 +885,6 @@ static void acpi_sbs_callback(void *context)
acpi_battery_read(bat);
if (saved_battery_state == bat->present)
continue;
-#ifdef CONFIG_ACPI_PROC_EVENT
- acpi_bus_generate_proc_event4(ACPI_BATTERY_CLASS,
- bat->name,
- ACPI_SBS_NOTIFY_STATUS,
- bat->present);
-#endif
kobject_uevent(&bat->bat.dev->kobj, KOBJ_CHANGE);
}
}
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 8a46c924eff..61d090b6ce2 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -193,9 +193,6 @@ static acpi_status acpi_bus_online_companions(acpi_handle handle, u32 lvl,
static int acpi_scan_hot_remove(struct acpi_device *device)
{
acpi_handle handle = device->handle;
- acpi_handle not_used;
- struct acpi_object_list arg_list;
- union acpi_object arg;
struct device *errdev;
acpi_status status;
unsigned long long sta;
@@ -258,32 +255,15 @@ static int acpi_scan_hot_remove(struct acpi_device *device)
put_device(&device->dev);
device = NULL;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_LCK", &not_used))) {
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 0;
- acpi_evaluate_object(handle, "_LCK", &arg_list, NULL);
- }
-
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
+ acpi_evaluate_lck(handle, 0);
/*
* TBD: _EJD support.
*/
- status = acpi_evaluate_object(handle, "_EJ0", &arg_list, NULL);
- if (ACPI_FAILURE(status)) {
- if (status == AE_NOT_FOUND) {
- return -ENODEV;
- } else {
- acpi_handle_warn(handle, "Eject failed (0x%x)\n",
- status);
- return -EIO;
- }
- }
+ status = acpi_evaluate_ej0(handle);
+ if (status == AE_NOT_FOUND)
+ return -ENODEV;
+ else if (ACPI_FAILURE(status))
+ return -EIO;
/*
* Verify if eject was indeed successful. If not, log an error
@@ -307,6 +287,7 @@ static void acpi_bus_device_eject(void *context)
struct acpi_device *device = NULL;
struct acpi_scan_handler *handler;
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
+ int error;
mutex_lock(&acpi_scan_lock);
@@ -321,17 +302,13 @@ static void acpi_bus_device_eject(void *context)
}
acpi_evaluate_hotplug_ost(handle, ACPI_NOTIFY_EJECT_REQUEST,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
- if (handler->hotplug.mode == AHM_CONTAINER) {
- device->flags.eject_pending = true;
+ if (handler->hotplug.mode == AHM_CONTAINER)
kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
- } else {
- int error;
- get_device(&device->dev);
- error = acpi_scan_hot_remove(device);
- if (error)
- goto err_out;
- }
+ get_device(&device->dev);
+ error = acpi_scan_hot_remove(device);
+ if (error)
+ goto err_out;
out:
mutex_unlock(&acpi_scan_lock);
@@ -516,7 +493,6 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
struct acpi_eject_event *ej_event;
acpi_object_type not_used;
acpi_status status;
- u32 ost_source;
int ret;
if (!count || buf[0] != '1')
@@ -530,43 +506,28 @@ acpi_eject_store(struct device *d, struct device_attribute *attr,
if (ACPI_FAILURE(status) || !acpi_device->flags.ejectable)
return -ENODEV;
- mutex_lock(&acpi_scan_lock);
-
- if (acpi_device->flags.eject_pending) {
- /* ACPI eject notification event. */
- ost_source = ACPI_NOTIFY_EJECT_REQUEST;
- acpi_device->flags.eject_pending = 0;
- } else {
- /* Eject initiated by user space. */
- ost_source = ACPI_OST_EC_OSPM_EJECT;
- }
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
if (!ej_event) {
ret = -ENOMEM;
goto err_out;
}
- acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+ acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
ej_event->device = acpi_device;
- ej_event->event = ost_source;
+ ej_event->event = ACPI_OST_EC_OSPM_EJECT;
get_device(&acpi_device->dev);
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
- if (ACPI_FAILURE(status)) {
- put_device(&acpi_device->dev);
- kfree(ej_event);
- ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
- goto err_out;
- }
- ret = count;
+ if (ACPI_SUCCESS(status))
+ return count;
- out:
- mutex_unlock(&acpi_scan_lock);
- return ret;
+ put_device(&acpi_device->dev);
+ kfree(ej_event);
+ ret = status == AE_NO_MEMORY ? -ENOMEM : -EAGAIN;
err_out:
- acpi_evaluate_hotplug_ost(acpi_device->handle, ost_source,
+ acpi_evaluate_hotplug_ost(acpi_device->handle, ACPI_OST_EC_OSPM_EJECT,
ACPI_OST_SC_NON_SPECIFIC_FAILURE, NULL);
- goto out;
+ return ret;
}
static DEVICE_ATTR(eject, 0200, NULL, acpi_eject_store);
@@ -654,7 +615,6 @@ static int acpi_device_setup_files(struct acpi_device *dev)
{
struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
acpi_status status;
- acpi_handle temp;
unsigned long long sun;
int result = 0;
@@ -680,8 +640,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
/*
* If device has _STR, 'description' file is created
*/
- status = acpi_get_handle(dev->handle, "_STR", &temp);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->handle, "_STR")) {
status = acpi_evaluate_object(dev->handle, "_STR",
NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -711,8 +670,7 @@ static int acpi_device_setup_files(struct acpi_device *dev)
* If device has _EJ0, 'eject' file is created that is used to trigger
* hot-removal function from userland.
*/
- status = acpi_get_handle(dev->handle, "_EJ0", &temp);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->handle, "_EJ0")) {
result = device_create_file(&dev->dev, &dev_attr_eject);
if (result)
return result;
@@ -734,9 +692,6 @@ end:
static void acpi_device_remove_files(struct acpi_device *dev)
{
- acpi_status status;
- acpi_handle temp;
-
if (dev->flags.power_manageable) {
device_remove_file(&dev->dev, &dev_attr_power_state);
if (dev->power.flags.power_resources)
@@ -747,20 +702,17 @@ static void acpi_device_remove_files(struct acpi_device *dev)
/*
* If device has _STR, remove 'description' file
*/
- status = acpi_get_handle(dev->handle, "_STR", &temp);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(dev->handle, "_STR")) {
kfree(dev->pnp.str_obj);
device_remove_file(&dev->dev, &dev_attr_description);
}
/*
* If device has _EJ0, remove 'eject' file.
*/
- status = acpi_get_handle(dev->handle, "_EJ0", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(dev->handle, "_EJ0"))
device_remove_file(&dev->dev, &dev_attr_eject);
- status = acpi_get_handle(dev->handle, "_SUN", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(dev->handle, "_SUN"))
device_remove_file(&dev->dev, &dev_attr_sun);
if (dev->pnp.unique_id)
@@ -999,6 +951,28 @@ struct bus_type acpi_bus_type = {
.uevent = acpi_device_uevent,
};
+static void acpi_bus_data_handler(acpi_handle handle, void *context)
+{
+ /* Intentionally empty. */
+}
+
+int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
+{
+ acpi_status status;
+
+ if (!device)
+ return -EINVAL;
+
+ status = acpi_get_data(handle, acpi_bus_data_handler, (void **)device);
+ if (ACPI_FAILURE(status) || !*device) {
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
+ handle));
+ return -ENODEV;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_get_device);
+
int acpi_device_add(struct acpi_device *device,
void (*release)(struct device *))
{
@@ -1210,14 +1184,6 @@ acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
}
EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
-void acpi_bus_data_handler(acpi_handle handle, void *context)
-{
-
- /* TBD */
-
- return;
-}
-
static int acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
struct acpi_device_wakeup *wakeup)
{
@@ -1336,13 +1302,10 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
- acpi_handle temp;
- acpi_status status = 0;
int err;
/* Presence of _PRW indicates wake capable */
- status = acpi_get_handle(device->handle, "_PRW", &temp);
- if (ACPI_FAILURE(status))
+ if (!acpi_has_method(device->handle, "_PRW"))
return;
err = acpi_bus_extract_wakeup_device_power_package(device->handle,
@@ -1372,7 +1335,6 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
struct acpi_device_power_state *ps = &device->power.states[state];
char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
- acpi_handle handle;
acpi_status status;
INIT_LIST_HEAD(&ps->resources);
@@ -1395,8 +1357,7 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
/* Evaluate "_PSx" to see if we can do explicit sets */
pathname[2] = 'S';
- status = acpi_get_handle(device->handle, pathname, &handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, pathname))
ps->flags.explicit_set = 1;
/*
@@ -1415,28 +1376,21 @@ static void acpi_bus_init_power_state(struct acpi_device *device, int state)
static void acpi_bus_get_power_flags(struct acpi_device *device)
{
- acpi_status status;
- acpi_handle handle;
u32 i;
/* Presence of _PS0|_PR0 indicates 'power manageable' */
- status = acpi_get_handle(device->handle, "_PS0", &handle);
- if (ACPI_FAILURE(status)) {
- status = acpi_get_handle(device->handle, "_PR0", &handle);
- if (ACPI_FAILURE(status))
- return;
- }
+ if (!acpi_has_method(device->handle, "_PS0") &&
+ !acpi_has_method(device->handle, "_PR0"))
+ return;
device->flags.power_manageable = 1;
/*
* Power Management Flags
*/
- status = acpi_get_handle(device->handle, "_PSC", &handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_PSC"))
device->power.flags.explicit_get = 1;
- status = acpi_get_handle(device->handle, "_IRC", &handle);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_IRC"))
device->power.flags.inrush_current = 1;
/*
@@ -1450,8 +1404,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
/* Set defaults for D0 and D3 states (always valid) */
device->power.states[ACPI_STATE_D0].flags.valid = 1;
device->power.states[ACPI_STATE_D0].power = 100;
- device->power.states[ACPI_STATE_D3].flags.valid = 1;
- device->power.states[ACPI_STATE_D3].power = 0;
+ device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
+ device->power.states[ACPI_STATE_D3_COLD].power = 0;
/* Set D3cold's explicit_set flag if _PS3 exists. */
if (device->power.states[ACPI_STATE_D3_HOT].flags.explicit_set)
@@ -1470,28 +1424,18 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
static void acpi_bus_get_flags(struct acpi_device *device)
{
- acpi_status status = AE_OK;
- acpi_handle temp = NULL;
-
/* Presence of _STA indicates 'dynamic_status' */
- status = acpi_get_handle(device->handle, "_STA", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_STA"))
device->flags.dynamic_status = 1;
/* Presence of _RMV indicates 'removable' */
- status = acpi_get_handle(device->handle, "_RMV", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_RMV"))
device->flags.removable = 1;
/* Presence of _EJD|_EJ0 indicates 'ejectable' */
- status = acpi_get_handle(device->handle, "_EJD", &temp);
- if (ACPI_SUCCESS(status))
+ if (acpi_has_method(device->handle, "_EJD") ||
+ acpi_has_method(device->handle, "_EJ0"))
device->flags.ejectable = 1;
- else {
- status = acpi_get_handle(device->handle, "_EJ0", &temp);
- if (ACPI_SUCCESS(status))
- device->flags.ejectable = 1;
- }
}
static void acpi_device_get_busid(struct acpi_device *device)
@@ -1533,46 +1477,45 @@ static void acpi_device_get_busid(struct acpi_device *device)
}
/*
+ * acpi_ata_match - see if an acpi object is an ATA device
+ *
+ * If an acpi object has one of the ACPI ATA methods defined,
+ * then we can safely call it an ATA device.
+ */
+bool acpi_ata_match(acpi_handle handle)
+{
+ return acpi_has_method(handle, "_GTF") ||
+ acpi_has_method(handle, "_GTM") ||
+ acpi_has_method(handle, "_STM") ||
+ acpi_has_method(handle, "_SDD");
+}
+
+/*
* acpi_bay_match - see if an acpi object is an ejectable driver bay
*
* If an acpi object is ejectable and has one of the ACPI ATA methods defined,
* then we can safely call it an ejectable drive bay
*/
-static int acpi_bay_match(acpi_handle handle)
+bool acpi_bay_match(acpi_handle handle)
{
- acpi_status status;
- acpi_handle tmp;
acpi_handle phandle;
- status = acpi_get_handle(handle, "_EJ0", &tmp);
- if (ACPI_FAILURE(status))
- return -ENODEV;
-
- if ((ACPI_SUCCESS(acpi_get_handle(handle, "_GTF", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_GTM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_STM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(handle, "_SDD", &tmp))))
- return 0;
+ if (!acpi_has_method(handle, "_EJ0"))
+ return false;
+ if (acpi_ata_match(handle))
+ return true;
+ if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
+ return false;
- if (acpi_get_parent(handle, &phandle))
- return -ENODEV;
-
- if ((ACPI_SUCCESS(acpi_get_handle(phandle, "_GTF", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(phandle, "_GTM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(phandle, "_STM", &tmp))) ||
- (ACPI_SUCCESS(acpi_get_handle(phandle, "_SDD", &tmp))))
- return 0;
-
- return -ENODEV;
+ return acpi_ata_match(phandle);
}
/*
* acpi_dock_match - see if an acpi object has a _DCK method
*/
-static int acpi_dock_match(acpi_handle handle)
+bool acpi_dock_match(acpi_handle handle)
{
- acpi_handle tmp;
- return acpi_get_handle(handle, "_DCK", &tmp);
+ return acpi_has_method(handle, "_DCK");
}
const char *acpi_device_hid(struct acpi_device *device)
@@ -1610,34 +1553,26 @@ static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
* lacks the SMBUS01 HID and the methods do not have the necessary "_"
* prefix. Work around this.
*/
-static int acpi_ibm_smbus_match(acpi_handle handle)
+static bool acpi_ibm_smbus_match(acpi_handle handle)
{
- acpi_handle h_dummy;
- struct acpi_buffer path = {ACPI_ALLOCATE_BUFFER, NULL};
- int result;
+ char node_name[ACPI_PATH_SEGMENT_LENGTH];
+ struct acpi_buffer path = { sizeof(node_name), node_name };
if (!dmi_name_in_vendors("IBM"))
- return -ENODEV;
+ return false;
/* Look for SMBS object */
- result = acpi_get_name(handle, ACPI_SINGLE_NAME, &path);
- if (result)
- return result;
-
- if (strcmp("SMBS", path.pointer)) {
- result = -ENODEV;
- goto out;
- }
+ if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
+ strcmp("SMBS", path.pointer))
+ return false;
/* Does it have the necessary (but misnamed) methods? */
- result = -ENODEV;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "SBI", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(handle, "SBR", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(handle, "SBW", &h_dummy)))
- result = 0;
-out:
- kfree(path.pointer);
- return result;
+ if (acpi_has_method(handle, "SBI") &&
+ acpi_has_method(handle, "SBR") &&
+ acpi_has_method(handle, "SBW"))
+ return true;
+
+ return false;
}
static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
@@ -1685,11 +1620,11 @@ static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
*/
if (acpi_is_video_device(handle))
acpi_add_id(pnp, ACPI_VIDEO_HID);
- else if (ACPI_SUCCESS(acpi_bay_match(handle)))
+ else if (acpi_bay_match(handle))
acpi_add_id(pnp, ACPI_BAY_HID);
- else if (ACPI_SUCCESS(acpi_dock_match(handle)))
+ else if (acpi_dock_match(handle))
acpi_add_id(pnp, ACPI_DOCK_HID);
- else if (!acpi_ibm_smbus_match(handle))
+ else if (acpi_ibm_smbus_match(handle))
acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
else if (list_empty(&pnp->ids) && handle == ACPI_ROOT_OBJECT) {
acpi_add_id(pnp, ACPI_BUS_HID); /* \_SB, LNXSYBUS */
@@ -1900,7 +1835,6 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
struct acpi_device *device = NULL;
int type;
unsigned long long sta;
- acpi_status status;
int result;
acpi_bus_get_device(handle, &device);
@@ -1921,10 +1855,8 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
struct acpi_device_wakeup wakeup;
- acpi_handle temp;
- status = acpi_get_handle(handle, "_PRW", &temp);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_has_method(handle, "_PRW")) {
acpi_bus_extract_wakeup_device_power_package(handle,
&wakeup);
acpi_power_resources_list_free(&wakeup.resources);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 187ab61889e..14df30580e1 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -31,12 +31,9 @@ static u8 sleep_states[ACPI_S_STATE_COUNT];
static void acpi_sleep_tts_switch(u32 acpi_state)
{
- union acpi_object in_arg = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &in_arg };
- acpi_status status = AE_OK;
+ acpi_status status;
- in_arg.integer.value = acpi_state;
- status = acpi_evaluate_object(NULL, "\\_TTS", &arg_list, NULL);
+ status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state);
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
/*
* OS can't evaluate the _TTS object correctly. Some warning
@@ -141,7 +138,7 @@ static int __init init_nvs_nosave(const struct dmi_system_id *d)
return 0;
}
-static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
+static struct dmi_system_id acpisleep_dmi_table[] __initdata = {
{
.callback = init_old_suspend_ordering,
.ident = "Abit KN9 (nForce4 variant)",
@@ -423,10 +420,21 @@ static void acpi_pm_finish(void)
}
/**
- * acpi_pm_end - Finish up suspend sequence.
+ * acpi_pm_start - Start system PM transition.
+ */
+static void acpi_pm_start(u32 acpi_state)
+{
+ acpi_target_sleep_state = acpi_state;
+ acpi_sleep_tts_switch(acpi_target_sleep_state);
+ acpi_scan_lock_acquire();
+}
+
+/**
+ * acpi_pm_end - Finish up system PM transition.
*/
static void acpi_pm_end(void)
{
+ acpi_scan_lock_release();
/*
* This is necessary in case acpi_pm_finish() is not called during a
* failing transition to a sleep state.
@@ -454,21 +462,19 @@ static u32 acpi_suspend_states[] = {
static int acpi_suspend_begin(suspend_state_t pm_state)
{
u32 acpi_state = acpi_suspend_states[pm_state];
- int error = 0;
+ int error;
error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc();
if (error)
return error;
- if (sleep_states[acpi_state]) {
- acpi_target_sleep_state = acpi_state;
- acpi_sleep_tts_switch(acpi_target_sleep_state);
- } else {
- printk(KERN_ERR "ACPI does not support this state: %d\n",
- pm_state);
- error = -ENOSYS;
+ if (!sleep_states[acpi_state]) {
+ pr_err("ACPI does not support sleep state S%u\n", acpi_state);
+ return -ENOSYS;
}
- return error;
+
+ acpi_pm_start(acpi_state);
+ return 0;
}
/**
@@ -634,10 +640,8 @@ static int acpi_hibernation_begin(void)
int error;
error = nvs_nosave ? 0 : suspend_nvs_alloc();
- if (!error) {
- acpi_target_sleep_state = ACPI_STATE_S4;
- acpi_sleep_tts_switch(acpi_target_sleep_state);
- }
+ if (!error)
+ acpi_pm_start(ACPI_STATE_S4);
return error;
}
@@ -716,8 +720,10 @@ static int acpi_hibernation_begin_old(void)
if (!error) {
if (!nvs_nosave)
error = suspend_nvs_alloc();
- if (!error)
+ if (!error) {
acpi_target_sleep_state = ACPI_STATE_S4;
+ acpi_scan_lock_acquire();
+ }
}
return error;
}
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index a33821ca389..6a0329340b4 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -50,11 +50,6 @@
#define ACPI_THERMAL_CLASS "thermal_zone"
#define ACPI_THERMAL_DEVICE_NAME "Thermal Zone"
-#define ACPI_THERMAL_FILE_STATE "state"
-#define ACPI_THERMAL_FILE_TEMPERATURE "temperature"
-#define ACPI_THERMAL_FILE_TRIP_POINTS "trip_points"
-#define ACPI_THERMAL_FILE_COOLING_MODE "cooling_mode"
-#define ACPI_THERMAL_FILE_POLLING_FREQ "polling_frequency"
#define ACPI_THERMAL_NOTIFY_TEMPERATURE 0x80
#define ACPI_THERMAL_NOTIFY_THRESHOLDS 0x81
#define ACPI_THERMAL_NOTIFY_DEVICES 0x82
@@ -190,7 +185,6 @@ struct acpi_thermal {
struct thermal_zone_device *thermal_zone;
int tz_enabled;
int kelvin_offset;
- struct mutex lock;
};
/* --------------------------------------------------------------------------
@@ -239,26 +233,16 @@ static int acpi_thermal_get_polling_frequency(struct acpi_thermal *tz)
static int acpi_thermal_set_cooling_mode(struct acpi_thermal *tz, int mode)
{
- acpi_status status = AE_OK;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list arg_list = { 1, &arg0 };
- acpi_handle handle = NULL;
-
-
if (!tz)
return -EINVAL;
- status = acpi_get_handle(tz->device->handle, "_SCP", &handle);
- if (ACPI_FAILURE(status)) {
+ if (!acpi_has_method(tz->device->handle, "_SCP")) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "_SCP not present\n"));
return -ENODEV;
- }
-
- arg0.integer.value = mode;
-
- status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
- if (ACPI_FAILURE(status))
+ } else if (ACPI_FAILURE(acpi_execute_simple_method(tz->device->handle,
+ "_SCP", mode))) {
return -ENODEV;
+ }
return 0;
}
@@ -491,14 +475,14 @@ static int acpi_thermal_trips_update(struct acpi_thermal *tz, int flag)
break;
}
- if (flag & ACPI_TRIPS_DEVICES) {
- memset(&devices, 0, sizeof(struct acpi_handle_list));
+ if ((flag & ACPI_TRIPS_DEVICES)
+ && acpi_has_method(tz->device->handle, "_TZD")) {
+ memset(&devices, 0, sizeof(devices));
status = acpi_evaluate_reference(tz->device->handle, "_TZD",
NULL, &devices);
- if (memcmp(&tz->devices, &devices,
- sizeof(struct acpi_handle_list))) {
- memcpy(&tz->devices, &devices,
- sizeof(struct acpi_handle_list));
+ if (ACPI_SUCCESS(status)
+ && memcmp(&tz->devices, &devices, sizeof(devices))) {
+ tz->devices = devices;
ACPI_THERMAL_TRIPS_EXCEPTION(flag, "device");
}
}
@@ -769,7 +753,6 @@ static int thermal_notify(struct thermal_zone_device *thermal, int trip,
else
return 0;
- acpi_bus_generate_proc_event(tz->device, type, 1);
acpi_bus_generate_netlink_event(tz->device->pnp.device_class,
dev_name(&tz->device->dev), type, 1);
@@ -850,12 +833,13 @@ static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
if (ACPI_SUCCESS(status) && (dev == device)) {
if (bind)
result = thermal_zone_bind_cooling_device
- (thermal, -1, cdev,
- THERMAL_NO_LIMIT,
+ (thermal, THERMAL_TRIPS_NONE,
+ cdev, THERMAL_NO_LIMIT,
THERMAL_NO_LIMIT);
else
result = thermal_zone_unbind_cooling_device
- (thermal, -1, cdev);
+ (thermal, THERMAL_TRIPS_NONE,
+ cdev);
if (result)
goto failed;
}
@@ -980,14 +964,12 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
case ACPI_THERMAL_NOTIFY_THRESHOLDS:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
acpi_thermal_check(tz);
- acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
case ACPI_THERMAL_NOTIFY_DEVICES:
acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
acpi_thermal_check(tz);
- acpi_bus_generate_proc_event(device, event, 0);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event, 0);
break;
@@ -1101,8 +1083,6 @@ static int acpi_thermal_add(struct acpi_device *device)
strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
device->driver_data = tz;
- mutex_init(&tz->lock);
-
result = acpi_thermal_get_info(tz);
if (result)
@@ -1135,7 +1115,6 @@ static int acpi_thermal_remove(struct acpi_device *device)
tz = acpi_driver_data(device);
acpi_thermal_unregister_thermal_zone(tz);
- mutex_destroy(&tz->lock);
kfree(tz);
return 0;
}
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 74437130431..552248b0005 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -495,3 +495,73 @@ acpi_handle_printk(const char *level, acpi_handle handle, const char *fmt, ...)
kfree(buffer.pointer);
}
EXPORT_SYMBOL(acpi_handle_printk);
+
+/**
+ * acpi_has_method: Check whether @handle has a method named @name
+ * @handle: ACPI device handle
+ * @name: name of object or method
+ *
+ * Check whether @handle has a method named @name.
+ */
+bool acpi_has_method(acpi_handle handle, char *name)
+{
+ acpi_handle tmp;
+
+ return ACPI_SUCCESS(acpi_get_handle(handle, name, &tmp));
+}
+EXPORT_SYMBOL(acpi_has_method);
+
+acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
+ u64 arg)
+{
+ union acpi_object obj = { .type = ACPI_TYPE_INTEGER };
+ struct acpi_object_list arg_list = { .count = 1, .pointer = &obj, };
+
+ obj.integer.value = arg;
+
+ return acpi_evaluate_object(handle, method, &arg_list, NULL);
+}
+EXPORT_SYMBOL(acpi_execute_simple_method);
+
+/**
+ * acpi_evaluate_ej0: Evaluate _EJ0 method for hotplug operations
+ * @handle: ACPI device handle
+ *
+ * Evaluate device's _EJ0 method for hotplug operations.
+ */
+acpi_status acpi_evaluate_ej0(acpi_handle handle)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(handle, "_EJ0", 1);
+ if (status == AE_NOT_FOUND)
+ acpi_handle_warn(handle, "No _EJ0 support for device\n");
+ else if (ACPI_FAILURE(status))
+ acpi_handle_warn(handle, "Eject failed (0x%x)\n", status);
+
+ return status;
+}
+
+/**
+ * acpi_evaluate_lck: Evaluate _LCK method to lock/unlock device
+ * @handle: ACPI device handle
+ * @lock: lock device if non-zero, otherwise unlock device
+ *
+ * Evaluate device's _LCK method if present to lock/unlock device
+ */
+acpi_status acpi_evaluate_lck(acpi_handle handle, int lock)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(handle, "_LCK", !!lock);
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ if (lock)
+ acpi_handle_warn(handle,
+ "Locking device failed (0x%x)\n", status);
+ else
+ acpi_handle_warn(handle,
+ "Unlocking device failed (0x%x)\n", status);
+ }
+
+ return status;
+}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 6dd237e79b4..aebcf6355df 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1,5 +1,5 @@
/*
- * video.c - ACPI Video Driver ($Revision:$)
+ * video.c - ACPI Video Driver
*
* Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
* Copyright (C) 2004 Bruno Ducrot <ducrot@poupinou.org>
@@ -88,7 +88,7 @@ module_param(allow_duplicates, bool, 0644);
static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
-static int register_count = 0;
+static int register_count;
static int acpi_video_bus_add(struct acpi_device *device);
static int acpi_video_bus_remove(struct acpi_device *device);
static void acpi_video_bus_notify(struct acpi_device *device, u32 event);
@@ -118,26 +118,26 @@ struct acpi_video_bus_flags {
};
struct acpi_video_bus_cap {
- u8 _DOS:1; /*Enable/Disable output switching */
- u8 _DOD:1; /*Enumerate all devices attached to display adapter */
- u8 _ROM:1; /*Get ROM Data */
- u8 _GPD:1; /*Get POST Device */
- u8 _SPD:1; /*Set POST Device */
- u8 _VPO:1; /*Video POST Options */
+ u8 _DOS:1; /* Enable/Disable output switching */
+ u8 _DOD:1; /* Enumerate all devices attached to display adapter */
+ u8 _ROM:1; /* Get ROM Data */
+ u8 _GPD:1; /* Get POST Device */
+ u8 _SPD:1; /* Set POST Device */
+ u8 _VPO:1; /* Video POST Options */
u8 reserved:2;
};
struct acpi_video_device_attrib {
u32 display_index:4; /* A zero-based instance of the Display */
- u32 display_port_attachment:4; /*This field differentiates the display type */
- u32 display_type:4; /*Describe the specific type in use */
- u32 vendor_specific:4; /*Chipset Vendor Specific */
- u32 bios_can_detect:1; /*BIOS can detect the device */
- u32 depend_on_vga:1; /*Non-VGA output device whose power is related to
+ u32 display_port_attachment:4; /* This field differentiates the display type */
+ u32 display_type:4; /* Describe the specific type in use */
+ u32 vendor_specific:4; /* Chipset Vendor Specific */
+ u32 bios_can_detect:1; /* BIOS can detect the device */
+ u32 depend_on_vga:1; /* Non-VGA output device whose power is related to
the VGA device. */
- u32 pipe_id:3; /*For VGA multiple-head devices. */
- u32 reserved:10; /*Must be 0 */
- u32 device_id_scheme:1; /*Device ID Scheme */
+ u32 pipe_id:3; /* For VGA multiple-head devices. */
+ u32 reserved:10; /* Must be 0 */
+ u32 device_id_scheme:1; /* Device ID Scheme */
};
struct acpi_video_enumerated_device {
@@ -174,19 +174,17 @@ struct acpi_video_device_flags {
};
struct acpi_video_device_cap {
- u8 _ADR:1; /*Return the unique ID */
- u8 _BCL:1; /*Query list of brightness control levels supported */
- u8 _BCM:1; /*Set the brightness level */
+ u8 _ADR:1; /* Return the unique ID */
+ u8 _BCL:1; /* Query list of brightness control levels supported */
+ u8 _BCM:1; /* Set the brightness level */
u8 _BQC:1; /* Get current brightness level */
u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */
- u8 _DDC:1; /*Return the EDID for this device */
+ u8 _DDC:1; /* Return the EDID for this device */
};
struct acpi_video_brightness_flags {
u8 _BCL_no_ac_battery_levels:1; /* no AC/Battery levels in _BCL */
- u8 _BCL_reversed:1; /* _BCL package is in a reversed order*/
- u8 _BCL_use_index:1; /* levels in _BCL are index values */
- u8 _BCM_use_index:1; /* input of _BCM is an index value */
+ u8 _BCL_reversed:1; /* _BCL package is in a reversed order */
u8 _BQC_use_index:1; /* _BQC returns an index value */
};
@@ -231,21 +229,22 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
-/*backlight device sysfs support*/
+/* backlight device sysfs support */
static int acpi_video_get_brightness(struct backlight_device *bd)
{
unsigned long long cur_level;
int i;
- struct acpi_video_device *vd =
- (struct acpi_video_device *)bl_get_data(bd);
+ struct acpi_video_device *vd = bl_get_data(bd);
if (acpi_video_device_lcd_get_level_current(vd, &cur_level, false))
return -EINVAL;
for (i = 2; i < vd->brightness->count; i++) {
if (vd->brightness->levels[i] == cur_level)
- /* The first two entries are special - see page 575
- of the ACPI spec 3.0 */
- return i-2;
+ /*
+ * The first two entries are special - see page 575
+ * of the ACPI spec 3.0
+ */
+ return i - 2;
}
return 0;
}
@@ -253,8 +252,7 @@ static int acpi_video_get_brightness(struct backlight_device *bd)
static int acpi_video_set_brightness(struct backlight_device *bd)
{
int request_level = bd->props.brightness + 2;
- struct acpi_video_device *vd =
- (struct acpi_video_device *)bl_get_data(bd);
+ struct acpi_video_device *vd = bl_get_data(bd);
return acpi_video_device_lcd_set_level(vd,
vd->brightness->levels[request_level]);
@@ -302,11 +300,11 @@ video_set_cur_state(struct thermal_cooling_device *cooling_dev, unsigned long st
struct acpi_video_device *video = acpi_driver_data(device);
int level;
- if ( state >= video->brightness->count - 2)
+ if (state >= video->brightness->count - 2)
return -EINVAL;
state = video->brightness->count - state;
- level = video->brightness->levels[state -1];
+ level = video->brightness->levels[state - 1];
return acpi_video_device_lcd_set_level(video, level);
}
@@ -316,9 +314,11 @@ static const struct thermal_cooling_device_ops video_cooling_ops = {
.set_cur_state = video_set_cur_state,
};
-/* --------------------------------------------------------------------------
- Video Management
- -------------------------------------------------------------------------- */
+/*
+ * --------------------------------------------------------------------------
+ * Video Management
+ * --------------------------------------------------------------------------
+ */
static int
acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
@@ -345,7 +345,7 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
return 0;
- err:
+err:
kfree(buffer.pointer);
return status;
@@ -355,14 +355,10 @@ static int
acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
{
int status;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
int state;
- arg0.integer.value = level;
-
- status = acpi_evaluate_object(device->dev->handle, "_BCM",
- &args, NULL);
+ status = acpi_execute_simple_method(device->dev->handle,
+ "_BCM", level);
if (ACPI_FAILURE(status)) {
ACPI_ERROR((AE_INFO, "Evaluating _BCM failed"));
return -EIO;
@@ -546,7 +542,7 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
if (device->brightness->levels[i] == *level) {
device->brightness->curr = *level;
return 0;
- }
+ }
/*
* BQC returned an invalid level.
* Stop using it.
@@ -556,7 +552,8 @@ acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
buf));
device->cap._BQC = device->cap._BCQ = 0;
} else {
- /* Fixme:
+ /*
+ * Fixme:
* should we return an error or ignore this failure?
* dev->brightness->curr is a cached value which stores
* the correct current backlight level in most cases.
@@ -615,8 +612,8 @@ acpi_video_device_EDID(struct acpi_video_device *device,
/*
* Arg:
- * video : video bus device pointer
- * bios_flag :
+ * video : video bus device pointer
+ * bios_flag :
* 0. The system BIOS should NOT automatically switch(toggle)
* the active display output.
* 1. The system BIOS should automatically switch (toggle) the
@@ -628,9 +625,9 @@ acpi_video_device_EDID(struct acpi_video_device *device,
* lcd_flag :
* 0. The system BIOS should automatically control the brightness level
* of the LCD when the power changes from AC to DC
- * 1. The system BIOS should NOT automatically control the brightness
+ * 1. The system BIOS should NOT automatically control the brightness
* level of the LCD when the power changes from AC to DC.
- * Return Value:
+ * Return Value:
* -EINVAL wrong arg.
*/
@@ -638,18 +635,15 @@ static int
acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag)
{
acpi_status status;
- union acpi_object arg0 = { ACPI_TYPE_INTEGER };
- struct acpi_object_list args = { 1, &arg0 };
if (!video->cap._DOS)
return 0;
if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1)
return -EINVAL;
- arg0.integer.value = (lcd_flag << 2) | bios_flag;
- video->dos_setting = arg0.integer.value;
- status = acpi_evaluate_object(video->device->handle, "_DOS",
- &args, NULL);
+ video->dos_setting = (lcd_flag << 2) | bios_flag;
+ status = acpi_execute_simple_method(video->device->handle, "_DOS",
+ (lcd_flag << 2) | bios_flag);
if (ACPI_FAILURE(status))
return -EIO;
@@ -689,7 +683,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
* Some systems always report current brightness level as maximum
* through _BQC, we need to test another value for them.
*/
- test_level = current_level == max_level ? br->levels[2] : max_level;
+ test_level = current_level == max_level ? br->levels[3] : max_level;
result = acpi_video_device_lcd_set_level(device, test_level);
if (result)
@@ -717,8 +711,8 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device,
/*
- * Arg:
- * device : video output device (LCD, CRT, ..)
+ * Arg:
+ * device : video output device (LCD, CRT, ..)
*
* Return Value:
* Maximum brightness level
@@ -806,16 +800,6 @@ acpi_video_init_brightness(struct acpi_video_device *device)
br->count = count;
device->brightness = br;
- /* Check the input/output of _BQC/_BCL/_BCM */
- if ((max_level < 100) && (max_level <= (count - 2)))
- br->flags._BCL_use_index = 1;
-
- /*
- * _BCM is always consistent with _BCL,
- * at least for all the laptops we have ever seen.
- */
- br->flags._BCM_use_index = br->flags._BCL_use_index;
-
/* _BQC uses INDEX while _BCL uses VALUE in some laptops */
br->curr = level = max_level;
@@ -877,7 +861,7 @@ out:
* device : video output device (LCD, CRT, ..)
*
* Return Value:
- * None
+ * None
*
* Find out all required AML methods defined under the output
* device.
@@ -885,41 +869,34 @@ out:
static void acpi_video_device_find_cap(struct acpi_video_device *device)
{
- acpi_handle h_dummy1;
-
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_ADR", &h_dummy1))) {
+ if (acpi_has_method(device->dev->handle, "_ADR"))
device->cap._ADR = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCL", &h_dummy1))) {
+ if (acpi_has_method(device->dev->handle, "_BCL"))
device->cap._BCL = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCM", &h_dummy1))) {
+ if (acpi_has_method(device->dev->handle, "_BCM"))
device->cap._BCM = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle,"_BQC",&h_dummy1)))
+ if (acpi_has_method(device->dev->handle, "_BQC")) {
device->cap._BQC = 1;
- else if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_BCQ",
- &h_dummy1))) {
+ } else if (acpi_has_method(device->dev->handle, "_BCQ")) {
printk(KERN_WARNING FW_BUG "_BCQ is used instead of _BQC\n");
device->cap._BCQ = 1;
}
- if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) {
+ if (acpi_has_method(device->dev->handle, "_DDC"))
device->cap._DDC = 1;
- }
- if (acpi_video_init_brightness(device))
- return;
-
- if (acpi_video_verify_backlight_support()) {
+ if (acpi_video_backlight_support()) {
struct backlight_properties props;
struct pci_dev *pdev;
acpi_handle acpi_parent;
struct device *parent = NULL;
int result;
- static int count = 0;
+ static int count;
char *name;
+ result = acpi_video_init_brightness(device);
+ if (result)
+ return;
name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
if (!name)
return;
@@ -979,46 +956,33 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
if (result)
printk(KERN_ERR PREFIX "Create sysfs link\n");
- } else {
- /* Remove the brightness object. */
- kfree(device->brightness->levels);
- kfree(device->brightness);
- device->brightness = NULL;
}
}
/*
- * Arg:
- * device : video output device (VGA)
+ * Arg:
+ * device : video output device (VGA)
*
* Return Value:
- * None
+ * None
*
* Find out all required AML methods defined under the video bus device.
*/
static void acpi_video_bus_find_cap(struct acpi_video_bus *video)
{
- acpi_handle h_dummy1;
-
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOS", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_DOS"))
video->cap._DOS = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_DOD", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_DOD"))
video->cap._DOD = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_ROM", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_ROM"))
video->cap._ROM = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_GPD", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_GPD"))
video->cap._GPD = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_SPD", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_SPD"))
video->cap._SPD = 1;
- }
- if (ACPI_SUCCESS(acpi_get_handle(video->device->handle, "_VPO", &h_dummy1))) {
+ if (acpi_has_method(video->device->handle, "_VPO"))
video->cap._VPO = 1;
- }
}
/*
@@ -1039,7 +1003,8 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
return -ENODEV;
pci_dev_put(dev);
- /* Since there is no HID, CID and so on for VGA driver, we have
+ /*
+ * Since there is no HID, CID and so on for VGA driver, we have
* to check well known required nodes.
*/
@@ -1069,12 +1034,14 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
return status;
}
-/* --------------------------------------------------------------------------
- Driver Interface
- -------------------------------------------------------------------------- */
+/*
+ * --------------------------------------------------------------------------
+ * Driver Interface
+ * --------------------------------------------------------------------------
+ */
/* device interface */
-static struct acpi_video_device_attrib*
+static struct acpi_video_device_attrib *
acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
{
struct acpi_video_enumerated_device *ids;
@@ -1112,7 +1079,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
unsigned long long device_id;
int status, device_type;
struct acpi_video_device *data;
- struct acpi_video_device_attrib* attribute;
+ struct acpi_video_device_attrib *attribute;
status =
acpi_evaluate_integer(device->handle, "_ADR", NULL, &device_id);
@@ -1134,7 +1101,7 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
attribute = acpi_video_get_device_attr(video, device_id);
- if((attribute != NULL) && attribute->device_id_scheme) {
+ if (attribute && attribute->device_id_scheme) {
switch (attribute->display_type) {
case ACPI_VIDEO_DISPLAY_CRT:
data->flags.crt = 1;
@@ -1152,24 +1119,24 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
data->flags.unknown = 1;
break;
}
- if(attribute->bios_can_detect)
+ if (attribute->bios_can_detect)
data->flags.bios = 1;
} else {
/* Check for legacy IDs */
device_type = acpi_video_get_device_type(video, device_id);
/* Ignore bits 16 and 18-20 */
switch (device_type & 0xffe2ffff) {
- case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
- data->flags.crt = 1;
- break;
- case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
- data->flags.lcd = 1;
- break;
- case ACPI_VIDEO_DISPLAY_LEGACY_TV:
- data->flags.tvout = 1;
- break;
- default:
- data->flags.unknown = 1;
+ case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
+ data->flags.crt = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
+ data->flags.lcd = 1;
+ break;
+ case ACPI_VIDEO_DISPLAY_LEGACY_TV:
+ data->flags.tvout = 1;
+ break;
+ default:
+ data->flags.unknown = 1;
}
}
@@ -1192,12 +1159,12 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
/*
* Arg:
- * video : video bus device
+ * video : video bus device
*
* Return:
- * none
- *
- * Enumerate the video device list of the video bus,
+ * none
+ *
+ * Enumerate the video device list of the video bus,
* bind the ids with the corresponding video devices
* under the video bus.
*/
@@ -1216,13 +1183,13 @@ static void acpi_video_device_rebind(struct acpi_video_bus *video)
/*
* Arg:
- * video : video bus device
- * device : video output device under the video
- * bus
+ * video : video bus device
+ * device : video output device under the video
+ * bus
*
* Return:
- * none
- *
+ * none
+ *
* Bind the ids with the corresponding video devices
* under the video bus.
*/
@@ -1245,11 +1212,11 @@ acpi_video_device_bind(struct acpi_video_bus *video,
/*
* Arg:
- * video : video bus device
+ * video : video bus device
*
* Return:
- * < 0 : error
- *
+ * < 0 : error
+ *
* Call _DOD to enumerate all devices attached to display adapter
*
*/
@@ -1310,7 +1277,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
video->attached_array = active_list;
video->attached_count = count;
- out:
+out:
kfree(buffer.pointer);
return status;
}
@@ -1366,8 +1333,8 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
unsigned long long level_current, level_next;
int result = -EINVAL;
- /* no warning message if acpi_backlight=vendor or a quirk is used */
- if (!acpi_video_verify_backlight_support())
+ /* no warning message if acpi_backlight=vendor is used */
+ if (!acpi_video_backlight_support())
return 0;
if (!device->brightness)
@@ -1577,7 +1544,6 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
switch (event) {
case ACPI_VIDEO_NOTIFY_SWITCH: /* User requested a switch,
* most likely via hotkey. */
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
@@ -1585,20 +1551,16 @@ static void acpi_video_bus_notify(struct acpi_device *device, u32 event)
* connector. */
acpi_video_device_enumerate(video);
acpi_video_device_rebind(video);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_CYCLE: /* Cycle Display output hotkey pressed. */
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_SWITCHVIDEOMODE;
break;
case ACPI_VIDEO_NOTIFY_NEXT_OUTPUT: /* Next Display output hotkey pressed. */
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_NEXT;
break;
case ACPI_VIDEO_NOTIFY_PREV_OUTPUT: /* previous Display output hotkey pressed. */
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_VIDEO_PREV;
break;
@@ -1641,31 +1603,26 @@ static void acpi_video_device_notify(acpi_handle handle, u32 event, void *data)
case ACPI_VIDEO_NOTIFY_CYCLE_BRIGHTNESS: /* Cycle brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_CYCLE;
break;
case ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS: /* Increase brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSUP;
break;
case ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS: /* Decrease brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESSDOWN;
break;
case ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS: /* zero brightness */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_BRIGHTNESS_ZERO;
break;
case ACPI_VIDEO_NOTIFY_DISPLAY_OFF: /* display device off */
if (brightness_switch_enabled)
acpi_video_switch_brightness(video_device, event);
- acpi_bus_generate_proc_event(device, event, 0);
keycode = KEY_DISPLAY_OFF;
break;
default:
@@ -1765,7 +1722,7 @@ static int acpi_video_bus_add(struct acpi_device *device)
if (!strcmp(device->pnp.bus_id, "VID")) {
if (instance)
device->pnp.bus_id[3] = '0' + instance;
- instance ++;
+ instance++;
}
/* a hack to fix the duplicate name "VGA" problem on Pa 3553 */
if (!strcmp(device->pnp.bus_id, "VGA")) {
@@ -1875,46 +1832,6 @@ static int acpi_video_bus_remove(struct acpi_device *device)
return 0;
}
-static acpi_status video_unregister_backlight(acpi_handle handle, u32 lvl,
- void *context, void **rv)
-{
- struct acpi_device *acpi_dev;
- struct acpi_video_bus *video;
- struct acpi_video_device *dev, *next;
-
- if (acpi_bus_get_device(handle, &acpi_dev))
- return AE_OK;
-
- if (acpi_match_device_ids(acpi_dev, video_device_ids))
- return AE_OK;
-
- video = acpi_driver_data(acpi_dev);
- if (!video)
- return AE_OK;
-
- acpi_video_bus_stop_devices(video);
- mutex_lock(&video->device_list_lock);
- list_for_each_entry_safe(dev, next, &video->video_device_list, entry) {
- if (dev->backlight) {
- backlight_device_unregister(dev->backlight);
- dev->backlight = NULL;
- kfree(dev->brightness->levels);
- kfree(dev->brightness);
- }
- if (dev->cooling_dev) {
- sysfs_remove_link(&dev->dev->dev.kobj,
- "thermal_cooling");
- sysfs_remove_link(&dev->cooling_dev->device.kobj,
- "device");
- thermal_cooling_device_unregister(dev->cooling_dev);
- dev->cooling_dev = NULL;
- }
- }
- mutex_unlock(&video->device_list_lock);
- acpi_video_bus_start_devices(video);
- return AE_OK;
-}
-
static int __init is_i740(struct pci_dev *dev)
{
if (dev->device == 0x00D1)
@@ -1946,25 +1863,14 @@ static int __init intel_opregion_present(void)
return opregion;
}
-int __acpi_video_register(bool backlight_quirks)
+int acpi_video_register(void)
{
- bool no_backlight;
- int result;
-
- no_backlight = backlight_quirks ? acpi_video_backlight_quirks() : false;
-
+ int result = 0;
if (register_count) {
/*
- * If acpi_video_register() has been called already, don't try
- * to register acpi_video_bus, but unregister backlight devices
- * if no backlight support is requested.
+ * if the function of acpi_video_register is already called,
+ * don't register the acpi_vide_bus again and return no error.
*/
- if (no_backlight)
- acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
- ACPI_UINT32_MAX,
- video_unregister_backlight,
- NULL, NULL, NULL);
-
return 0;
}
@@ -1980,7 +1886,7 @@ int __acpi_video_register(bool backlight_quirks)
return 0;
}
-EXPORT_SYMBOL(__acpi_video_register);
+EXPORT_SYMBOL(acpi_video_register);
void acpi_video_unregister(void)
{
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 826e52def08..940edbf2fe8 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -53,14 +53,13 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
void **retyurn_value)
{
long *cap = context;
- acpi_handle h_dummy;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_BCM", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(handle, "_BCL", &h_dummy))) {
+ if (acpi_has_method(handle, "_BCM") &&
+ acpi_has_method(handle, "_BCL")) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
"support\n"));
*cap |= ACPI_VIDEO_BACKLIGHT;
- if (ACPI_FAILURE(acpi_get_handle(handle, "_BQC", &h_dummy)))
+ if (!acpi_has_method(handle, "_BQC"))
printk(KERN_WARNING FW_BUG PREFIX "No _BQC method, "
"cannot determine initial brightness\n");
/* We have backlight support, no need to scan further */
@@ -79,22 +78,20 @@ acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
*/
long acpi_is_video_device(acpi_handle handle)
{
- acpi_handle h_dummy;
long video_caps = 0;
/* Is this device able to support video switching ? */
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_DOD", &h_dummy)) ||
- ACPI_SUCCESS(acpi_get_handle(handle, "_DOS", &h_dummy)))
+ if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
/* Is this device able to retrieve a video ROM ? */
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_ROM", &h_dummy)))
+ if (acpi_has_method(handle, "_ROM"))
video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
/* Is this device able to configure which video head to be POSTed ? */
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_VPO", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(handle, "_GPD", &h_dummy)) &&
- ACPI_SUCCESS(acpi_get_handle(handle, "_SPD", &h_dummy)))
+ if (acpi_has_method(handle, "_VPO") &&
+ acpi_has_method(handle, "_GPD") &&
+ acpi_has_method(handle, "_SPD"))
video_caps |= ACPI_VIDEO_DEVICE_POSTING;
/* Only check for backlight functionality if one of the above hit. */
@@ -238,12 +235,7 @@ static void acpi_video_caps_check(void)
bool acpi_video_backlight_quirks(void)
{
- if (acpi_gbl_osi_data >= ACPI_OSI_WIN_8) {
- acpi_video_caps_check();
- acpi_video_support |= ACPI_VIDEO_SKIP_BACKLIGHT;
- return true;
- }
- return false;
+ return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
}
EXPORT_SYMBOL(acpi_video_backlight_quirks);
@@ -291,14 +283,6 @@ int acpi_video_backlight_support(void)
}
EXPORT_SYMBOL(acpi_video_backlight_support);
-/* For the ACPI video driver use only. */
-bool acpi_video_verify_backlight_support(void)
-{
- return (acpi_video_support & ACPI_VIDEO_SKIP_BACKLIGHT) ?
- false : acpi_video_backlight_support();
-}
-EXPORT_SYMBOL(acpi_video_verify_backlight_support);
-
/*
* Use acpi_backlight=vendor/video to force that backlight switching
* is processed by vendor specific acpi drivers or video.ko driver.
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 80dc988f01e..4e737728aee 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -97,6 +97,15 @@ config SATA_AHCI_PLATFORM
If unsure, say N.
+config AHCI_IMX
+ tristate "Freescale i.MX AHCI SATA support"
+ depends on SATA_AHCI_PLATFORM && MFD_SYSCON
+ help
+ This option enables support for the Freescale i.MX SoC's
+ onboard AHCI SATA.
+
+ If unsure, say N.
+
config SATA_FSL
tristate "Freescale 3.0Gbps SATA support"
depends on FSL_SOC
@@ -107,7 +116,7 @@ config SATA_FSL
If unsure, say N.
config SATA_INIC162X
- tristate "Initio 162x SATA support"
+ tristate "Initio 162x SATA support (Very Experimental)"
depends on PCI
help
This option enables support for Initio 162x Serial ATA.
diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile
index c04d0fd038a..46518c62246 100644
--- a/drivers/ata/Makefile
+++ b/drivers/ata/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_SATA_INIC162X) += sata_inic162x.o
obj-$(CONFIG_SATA_SIL24) += sata_sil24.o
obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
obj-$(CONFIG_SATA_HIGHBANK) += sata_highbank.o libahci.o
+obj-$(CONFIG_AHCI_IMX) += ahci_imx.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5064f3ea20f..9d715ae5ff6 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -1146,11 +1146,18 @@ int ahci_host_activate(struct ata_host *host, int irq, unsigned int n_msis)
return rc;
for (i = 0; i < host->n_ports; i++) {
+ const char* desc;
struct ahci_port_priv *pp = host->ports[i]->private_data;
+ /* pp is NULL for dummy ports */
+ if (pp)
+ desc = pp->irq_desc;
+ else
+ desc = dev_driver_string(host->dev);
+
rc = devm_request_threaded_irq(host->dev,
irq + i, ahci_hw_interrupt, ahci_thread_fn, IRQF_SHARED,
- pp->irq_desc, host->ports[i]);
+ desc, host->ports[i]);
if (rc)
goto out_free_irqs;
}
@@ -1288,6 +1295,14 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
if (!(hpriv->flags & AHCI_HFLAG_NO_FPDMA_AA))
pi.flags |= ATA_FLAG_FPDMA_AA;
+
+ /*
+ * All AHCI controllers should be forward-compatible
+ * with the new auxiliary field. This code should be
+ * conditionalized if any buggy AHCI controllers are
+ * encountered.
+ */
+ pi.flags |= ATA_FLAG_FPDMA_AUX;
}
if (hpriv->cap & HOST_CAP_PMP)
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
new file mode 100644
index 00000000000..58debb0acc3
--- /dev/null
+++ b/drivers/ata/ahci_imx.c
@@ -0,0 +1,236 @@
+/*
+ * Freescale IMX AHCI SATA platform driver
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * based on the AHCI SATA platform driver by Jeff Garzik and Anton Vorontsov
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/ahci_platform.h>
+#include <linux/of_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
+#include "ahci.h"
+
+enum {
+ HOST_TIMER1MS = 0xe0, /* Timer 1-ms */
+};
+
+struct imx_ahci_priv {
+ struct platform_device *ahci_pdev;
+ struct clk *sata_ref_clk;
+ struct clk *ahb_clk;
+ struct regmap *gpr;
+};
+
+static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
+{
+ int ret = 0;
+ unsigned int reg_val;
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ imxpriv->gpr =
+ syscon_regmap_lookup_by_compatible("fsl,imx6q-iomuxc-gpr");
+ if (IS_ERR(imxpriv->gpr)) {
+ dev_err(dev, "failed to find fsl,imx6q-iomux-gpr regmap\n");
+ return PTR_ERR(imxpriv->gpr);
+ }
+
+ ret = clk_prepare_enable(imxpriv->sata_ref_clk);
+ if (ret < 0) {
+ dev_err(dev, "prepare-enable sata_ref clock err:%d\n", ret);
+ return ret;
+ }
+
+ /*
+ * set PHY Paremeters, two steps to configure the GPR13,
+ * one write for rest of parameters, mask of first write
+ * is 0x07fffffd, and the other one write for setting
+ * the mpll_clk_en.
+ */
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
+ | IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK
+ | IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK
+ | IMX6Q_GPR13_SATA_SPD_MODE_MASK
+ | IMX6Q_GPR13_SATA_MPLL_SS_EN
+ | IMX6Q_GPR13_SATA_TX_ATTEN_MASK
+ | IMX6Q_GPR13_SATA_TX_BOOST_MASK
+ | IMX6Q_GPR13_SATA_TX_LVL_MASK
+ | IMX6Q_GPR13_SATA_TX_EDGE_RATE
+ , IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
+ | IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
+ | IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F
+ | IMX6Q_GPR13_SATA_SPD_MODE_3P0G
+ | IMX6Q_GPR13_SATA_MPLL_SS_EN
+ | IMX6Q_GPR13_SATA_TX_ATTEN_9_16
+ | IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB
+ | IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ usleep_range(100, 200);
+
+ /*
+ * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
+ * and IP vendor specific register HOST_TIMER1MS.
+ * Configure CAP_SSS (support stagered spin up).
+ * Implement the port0.
+ * Get the ahb clock rate, and configure the TIMER1MS register.
+ */
+ reg_val = readl(mmio + HOST_CAP);
+ if (!(reg_val & HOST_CAP_SSS)) {
+ reg_val |= HOST_CAP_SSS;
+ writel(reg_val, mmio + HOST_CAP);
+ }
+ reg_val = readl(mmio + HOST_PORTS_IMPL);
+ if (!(reg_val & 0x1)) {
+ reg_val |= 0x1;
+ writel(reg_val, mmio + HOST_PORTS_IMPL);
+ }
+
+ reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
+ writel(reg_val, mmio + HOST_TIMER1MS);
+
+ return 0;
+}
+
+static void imx6q_sata_exit(struct device *dev)
+{
+ struct imx_ahci_priv *imxpriv = dev_get_drvdata(dev->parent);
+
+ regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_MPLL_CLK_EN,
+ !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
+ clk_disable_unprepare(imxpriv->sata_ref_clk);
+}
+
+static struct ahci_platform_data imx6q_sata_pdata = {
+ .init = imx6q_sata_init,
+ .exit = imx6q_sata_exit,
+};
+
+static const struct of_device_id imx_ahci_of_match[] = {
+ { .compatible = "fsl,imx6q-ahci", .data = &imx6q_sata_pdata},
+ {},
+};
+MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
+
+static int imx_ahci_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem, *irq, res[2];
+ const struct of_device_id *of_id;
+ const struct ahci_platform_data *pdata = NULL;
+ struct imx_ahci_priv *imxpriv;
+ struct device *ahci_dev;
+ struct platform_device *ahci_pdev;
+ int ret;
+
+ imxpriv = devm_kzalloc(dev, sizeof(*imxpriv), GFP_KERNEL);
+ if (!imxpriv) {
+ dev_err(dev, "can't alloc ahci_host_priv\n");
+ return -ENOMEM;
+ }
+
+ ahci_pdev = platform_device_alloc("ahci", -1);
+ if (!ahci_pdev)
+ return -ENODEV;
+
+ ahci_dev = &ahci_pdev->dev;
+ ahci_dev->parent = dev;
+
+ imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
+ if (IS_ERR(imxpriv->ahb_clk)) {
+ dev_err(dev, "can't get ahb clock.\n");
+ ret = PTR_ERR(imxpriv->ahb_clk);
+ goto err_out;
+ }
+
+ imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+ if (IS_ERR(imxpriv->sata_ref_clk)) {
+ dev_err(dev, "can't get sata_ref clock.\n");
+ ret = PTR_ERR(imxpriv->sata_ref_clk);
+ goto err_out;
+ }
+
+ imxpriv->ahci_pdev = ahci_pdev;
+ platform_set_drvdata(pdev, imxpriv);
+
+ of_id = of_match_device(imx_ahci_of_match, dev);
+ if (of_id) {
+ pdata = of_id->data;
+ } else {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!mem || !irq) {
+ dev_err(dev, "no mmio/irq resource\n");
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ res[0] = *mem;
+ res[1] = *irq;
+
+ ahci_dev->coherent_dma_mask = DMA_BIT_MASK(32);
+ ahci_dev->dma_mask = &ahci_dev->coherent_dma_mask;
+ ahci_dev->of_node = dev->of_node;
+
+ ret = platform_device_add_resources(ahci_pdev, res, 2);
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add_data(ahci_pdev, pdata, sizeof(*pdata));
+ if (ret)
+ goto err_out;
+
+ ret = platform_device_add(ahci_pdev);
+ if (ret) {
+err_out:
+ platform_device_put(ahci_pdev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_ahci_remove(struct platform_device *pdev)
+{
+ struct imx_ahci_priv *imxpriv = platform_get_drvdata(pdev);
+ struct platform_device *ahci_pdev = imxpriv->ahci_pdev;
+
+ platform_device_unregister(ahci_pdev);
+ return 0;
+}
+
+static struct platform_driver imx_ahci_driver = {
+ .probe = imx_ahci_probe,
+ .remove = imx_ahci_remove,
+ .driver = {
+ .name = "ahci-imx",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_ahci_of_match,
+ },
+};
+module_platform_driver(imx_ahci_driver);
+
+MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver");
+MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ahci:imx");
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index b52a10c8eeb..513ad7ed0c9 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -330,7 +330,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
- { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
+ { 0x8086, 0x8d08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
/* SATA Controller IDE (Wellsburg) */
{ 0x8086, 0x8d60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Wellsburg) */
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index cf4e7020ada..4ba8b040557 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -34,14 +34,6 @@ struct ata_acpi_gtf {
u8 tf[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */
} __packed;
-/*
- * Helper - belongs in the PCI layer somewhere eventually
- */
-static int is_pci_dev(struct device *dev)
-{
- return (dev->bus == &pci_bus_type);
-}
-
static void ata_acpi_clear_gtf(struct ata_device *dev)
{
kfree(dev->gtf_cache);
@@ -49,47 +41,18 @@ static void ata_acpi_clear_gtf(struct ata_device *dev)
}
/**
- * ata_ap_acpi_handle - provide the acpi_handle for an ata_port
- * @ap: the acpi_handle returned will correspond to this port
- *
- * Returns the acpi_handle for the ACPI namespace object corresponding to
- * the ata_port passed into the function, or NULL if no such object exists
- */
-acpi_handle ata_ap_acpi_handle(struct ata_port *ap)
-{
- if (ap->flags & ATA_FLAG_ACPI_SATA)
- return NULL;
-
- return ap->scsi_host ?
- DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev) : NULL;
-}
-EXPORT_SYMBOL(ata_ap_acpi_handle);
-
-/**
* ata_dev_acpi_handle - provide the acpi_handle for an ata_device
- * @dev: the acpi_device returned will correspond to this port
+ * @dev: the acpi_handle returned will correspond to this device
*
* Returns the acpi_handle for the ACPI namespace object corresponding to
* the ata_device passed into the function, or NULL if no such object exists
+ * or ACPI is disabled for this device due to consecutive errors.
*/
acpi_handle ata_dev_acpi_handle(struct ata_device *dev)
{
- acpi_integer adr;
- struct ata_port *ap = dev->link->ap;
-
- if (libata_noacpi || dev->flags & ATA_DFLAG_ACPI_DISABLED)
- return NULL;
-
- if (ap->flags & ATA_FLAG_ACPI_SATA) {
- if (!sata_pmp_attached(ap))
- adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
- else
- adr = SATA_ADR(ap->port_no, dev->link->pmp);
- return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), adr);
- } else
- return acpi_get_child(ata_ap_acpi_handle(ap), dev->devno);
+ return dev->flags & ATA_DFLAG_ACPI_DISABLED ?
+ NULL : ACPI_HANDLE(&dev->tdev);
}
-EXPORT_SYMBOL(ata_dev_acpi_handle);
/* @ap and @dev are the same as ata_acpi_handle_hotplug() */
static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev)
@@ -156,10 +119,8 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev,
spin_unlock_irqrestore(ap->lock, flags);
- if (wait) {
+ if (wait)
ata_port_wait_eh(ap);
- flush_work(&ap->hotplug_task.work);
- }
}
static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -216,37 +177,55 @@ static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
.uevent = ata_acpi_ap_uevent,
};
-void ata_acpi_hotplug_init(struct ata_host *host)
+/* bind acpi handle to pata port */
+void ata_acpi_bind_port(struct ata_port *ap)
{
- int i;
+ acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
- acpi_handle handle;
- struct ata_device *dev;
+ if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA || !host_handle)
+ return;
- if (!ap)
- continue;
+ ACPI_HANDLE_SET(&ap->tdev, acpi_get_child(host_handle, ap->port_no));
- handle = ata_ap_acpi_handle(ap);
- if (handle) {
- /* we might be on a docking station */
- register_hotplug_dock_device(handle,
- &ata_acpi_ap_dock_ops, ap,
- NULL, NULL);
- }
+ if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0)
+ ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
- ata_for_each_dev(dev, &ap->link, ALL) {
- handle = ata_dev_acpi_handle(dev);
- if (!handle)
- continue;
+ /* we might be on a docking station */
+ register_hotplug_dock_device(ACPI_HANDLE(&ap->tdev),
+ &ata_acpi_ap_dock_ops, ap, NULL, NULL);
+}
- /* we might be on a docking station */
- register_hotplug_dock_device(handle,
- &ata_acpi_dev_dock_ops,
- dev, NULL, NULL);
- }
+void ata_acpi_bind_dev(struct ata_device *dev)
+{
+ struct ata_port *ap = dev->link->ap;
+ acpi_handle port_handle = ACPI_HANDLE(&ap->tdev);
+ acpi_handle host_handle = ACPI_HANDLE(ap->host->dev);
+ acpi_handle parent_handle;
+ u64 adr;
+
+ /*
+ * For both sata/pata devices, host handle is required.
+ * For pata device, port handle is also required.
+ */
+ if (libata_noacpi || !host_handle ||
+ (!(ap->flags & ATA_FLAG_ACPI_SATA) && !port_handle))
+ return;
+
+ if (ap->flags & ATA_FLAG_ACPI_SATA) {
+ if (!sata_pmp_attached(ap))
+ adr = SATA_ADR(ap->port_no, NO_PORT_MULT);
+ else
+ adr = SATA_ADR(ap->port_no, dev->link->pmp);
+ parent_handle = host_handle;
+ } else {
+ adr = dev->devno;
+ parent_handle = port_handle;
}
+
+ ACPI_HANDLE_SET(&dev->tdev, acpi_get_child(parent_handle, adr));
+
+ register_hotplug_dock_device(ata_dev_acpi_handle(dev),
+ &ata_acpi_dev_dock_ops, dev, NULL, NULL);
}
/**
@@ -270,18 +249,34 @@ void ata_acpi_dissociate(struct ata_host *host)
struct ata_port *ap = host->ports[i];
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
- if (ata_ap_acpi_handle(ap) && gtm)
+ if (ACPI_HANDLE(&ap->tdev) && gtm)
ata_acpi_stm(ap, gtm);
}
}
-static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle,
- struct ata_acpi_gtm *gtm)
+/**
+ * ata_acpi_gtm - execute _GTM
+ * @ap: target ATA port
+ * @gtm: out parameter for _GTM result
+ *
+ * Evaluate _GTM and store the result in @gtm.
+ *
+ * LOCKING:
+ * EH context.
+ *
+ * RETURNS:
+ * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
+ */
+int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
{
struct acpi_buffer output = { .length = ACPI_ALLOCATE_BUFFER };
union acpi_object *out_obj;
acpi_status status;
int rc = 0;
+ acpi_handle handle = ACPI_HANDLE(&ap->tdev);
+
+ if (!handle)
+ return -EINVAL;
status = acpi_evaluate_object(handle, "_GTM", NULL, &output);
@@ -317,27 +312,6 @@ static int __ata_acpi_gtm(struct ata_port *ap, acpi_handle handle,
return rc;
}
-/**
- * ata_acpi_gtm - execute _GTM
- * @ap: target ATA port
- * @gtm: out parameter for _GTM result
- *
- * Evaluate _GTM and store the result in @gtm.
- *
- * LOCKING:
- * EH context.
- *
- * RETURNS:
- * 0 on success, -ENOENT if _GTM doesn't exist, -errno on failure.
- */
-int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *gtm)
-{
- if (ata_ap_acpi_handle(ap))
- return __ata_acpi_gtm(ap, ata_ap_acpi_handle(ap), gtm);
- else
- return -EINVAL;
-}
-
EXPORT_SYMBOL_GPL(ata_acpi_gtm);
/**
@@ -374,8 +348,8 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm)
input.count = 3;
input.pointer = in_params;
- status = acpi_evaluate_object(ata_ap_acpi_handle(ap), "_STM", &input,
- NULL);
+ status = acpi_evaluate_object(ACPI_HANDLE(&ap->tdev), "_STM",
+ &input, NULL);
if (status == AE_NOT_FOUND)
return -ENOENT;
@@ -850,7 +824,7 @@ void ata_acpi_on_resume(struct ata_port *ap)
const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap);
struct ata_device *dev;
- if (ata_ap_acpi_handle(ap) && gtm) {
+ if (ACPI_HANDLE(&ap->tdev) && gtm) {
/* _GTM valid */
/* restore timing parameters */
@@ -894,8 +868,7 @@ static int ata_acpi_choose_suspend_state(struct ata_device *dev, bool runtime)
d_max_in = ACPI_STATE_D3_HOT;
out:
- return acpi_pm_device_sleep_state(&dev->sdev->sdev_gendev,
- NULL, d_max_in);
+ return acpi_pm_device_sleep_state(&dev->tdev, NULL, d_max_in);
}
static void sata_acpi_set_state(struct ata_port *ap, pm_message_t state)
@@ -932,7 +905,7 @@ static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
struct ata_device *dev;
acpi_handle port_handle;
- port_handle = ata_ap_acpi_handle(ap);
+ port_handle = ACPI_HANDLE(&ap->tdev);
if (!port_handle)
return;
@@ -947,11 +920,11 @@ static void pata_acpi_set_state(struct ata_port *ap, pm_message_t state)
continue;
acpi_bus_set_power(dev_handle, state.event & PM_EVENT_RESUME ?
- ACPI_STATE_D0 : ACPI_STATE_D3);
+ ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
}
if (!(state.event & PM_EVENT_RESUME))
- acpi_bus_set_power(port_handle, ACPI_STATE_D3);
+ acpi_bus_set_power(port_handle, ACPI_STATE_D3_COLD);
}
/**
@@ -1063,109 +1036,16 @@ void ata_acpi_on_disable(struct ata_device *dev)
ata_acpi_clear_gtf(dev);
}
-static int compat_pci_ata(struct ata_port *ap)
-{
- struct device *dev = ap->tdev.parent;
- struct pci_dev *pdev;
-
- if (!is_pci_dev(dev))
- return 0;
-
- pdev = to_pci_dev(dev);
-
- if ((pdev->class >> 8) != PCI_CLASS_STORAGE_SATA &&
- (pdev->class >> 8) != PCI_CLASS_STORAGE_IDE)
- return 0;
-
- return 1;
-}
-
-static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle)
-{
- if (libata_noacpi || ap->flags & ATA_FLAG_ACPI_SATA)
- return -ENODEV;
-
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(ap->tdev.parent),
- ap->port_no);
-
- if (!*handle)
- return -ENODEV;
-
- if (__ata_acpi_gtm(ap, *handle, &ap->__acpi_init_gtm) == 0)
- ap->pflags |= ATA_PFLAG_INIT_GTM_VALID;
-
- return 0;
-}
-
-static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
- acpi_handle *handle)
-{
- struct ata_device *ata_dev;
-
- if (ap->flags & ATA_FLAG_ACPI_SATA) {
- if (!sata_pmp_attached(ap))
- ata_dev = &ap->link.device[sdev->id];
- else
- ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
- }
- else {
- ata_dev = &ap->link.device[sdev->id];
- }
-
- *handle = ata_dev_acpi_handle(ata_dev);
-
- if (!*handle)
- return -ENODEV;
-
- return 0;
-}
-
-static int is_ata_port(const struct device *dev)
-{
- return dev->type == &ata_port_type;
-}
-
-static struct ata_port *dev_to_ata_port(struct device *dev)
-{
- while (!is_ata_port(dev)) {
- if (!dev->parent)
- return NULL;
- dev = dev->parent;
- }
- return to_ata_port(dev);
-}
-
-static int ata_acpi_find_device(struct device *dev, acpi_handle *handle)
-{
- struct ata_port *ap = dev_to_ata_port(dev);
-
- if (!ap)
- return -ENODEV;
-
- if (!compat_pci_ata(ap))
- return -ENODEV;
-
- if (scsi_is_host_device(dev))
- return ata_acpi_bind_host(ap, handle);
- else if (scsi_is_sdev_device(dev)) {
- struct scsi_device *sdev = to_scsi_device(dev);
-
- return ata_acpi_bind_device(ap, sdev, handle);
- } else
- return -ENODEV;
-}
-
-static struct acpi_bus_type ata_acpi_bus = {
- .name = "ATA",
- .find_device = ata_acpi_find_device,
-};
-
-int ata_acpi_register(void)
+void ata_scsi_acpi_bind(struct ata_device *dev)
{
- return scsi_register_acpi_bus_type(&ata_acpi_bus);
+ acpi_handle handle = ata_dev_acpi_handle(dev);
+ if (handle)
+ acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
}
-void ata_acpi_unregister(void)
+void ata_scsi_acpi_unbind(struct ata_device *dev)
{
- scsi_unregister_acpi_bus_type(&ata_acpi_bus);
+ acpi_handle handle = ata_dev_acpi_handle(dev);
+ if (handle)
+ acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c24354d44f3..83b1a9fb2d4 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -569,10 +569,10 @@ void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
fis[14] = 0;
fis[15] = tf->ctl;
- fis[16] = 0;
- fis[17] = 0;
- fis[18] = 0;
- fis[19] = 0;
+ fis[16] = tf->auxiliary & 0xff;
+ fis[17] = (tf->auxiliary >> 8) & 0xff;
+ fis[18] = (tf->auxiliary >> 16) & 0xff;
+ fis[19] = (tf->auxiliary >> 24) & 0xff;
}
/**
@@ -2139,6 +2139,22 @@ static int ata_dev_config_ncq(struct ata_device *dev,
else
snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth,
ddepth, aa_desc);
+
+ if ((ap->flags & ATA_FLAG_FPDMA_AUX) &&
+ ata_id_has_ncq_send_and_recv(dev->id)) {
+ err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV,
+ 0, ap->sector_buf, 1);
+ if (err_mask) {
+ ata_dev_dbg(dev,
+ "failed to get NCQ Send/Recv Log Emask 0x%x\n",
+ err_mask);
+ } else {
+ dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
+ memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
+ ATA_LOG_NCQ_SEND_RECV_SIZE);
+ }
+ }
+
return 0;
}
@@ -6150,8 +6166,6 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
if (rc)
goto err_tadd;
- ata_acpi_hotplug_init(host);
-
/* set cable, sata_spd_limit and report */
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
@@ -6632,8 +6646,6 @@ static int __init ata_init(void)
ata_parse_force_param();
- ata_acpi_register();
-
rc = ata_sff_init();
if (rc) {
kfree(ata_force_tbl);
@@ -6660,7 +6672,6 @@ static void __exit ata_exit(void)
ata_release_transport(ata_scsi_transport_template);
libata_transport_exit();
ata_sff_exit();
- ata_acpi_unregister();
kfree(ata_force_tbl);
}
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 1c41722bb7e..20fd337a573 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -289,24 +289,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info)
/* Disable sending Early R_OK.
* With "cached read" HDD testing and multiple ports busy on a SATA
- * host controller, 3726 PMP will very rarely drop a deferred
+ * host controller, 3x26 PMP will very rarely drop a deferred
* R_OK that was intended for the host. Symptom will be all
* 5 drives under test will timeout, get reset, and recover.
*/
- if (vendor == 0x1095 && devid == 0x3726) {
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
u32 reg;
err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, &reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to read Sil3726 Private Register";
+ reason = "failed to read Sil3x26 Private Register";
goto fail;
}
reg &= ~0x1;
err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg);
if (err_mask) {
rc = -EIO;
- reason = "failed to write Sil3726 Private Register";
+ reason = "failed to write Sil3x26 Private Register";
goto fail;
}
}
@@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap)
u16 devid = sata_pmp_gscr_devid(gscr);
struct ata_link *link;
- if (vendor == 0x1095 && devid == 0x3726) {
- /* sil3726 quirks */
+ if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) {
+ /* sil3x26 quirks */
ata_for_each_link(link, ap, EDGE) {
/* link reports offline after LPM */
link->flags |= ATA_LFLAG_NO_LPM;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 83c08907e04..97a0cef1295 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -49,7 +49,6 @@
#include <linux/hdreg.h>
#include <linux/uaccess.h>
#include <linux/suspend.h>
-#include <linux/pm_qos.h>
#include <asm/unaligned.h>
#include "libata.h"
@@ -206,8 +205,10 @@ static ssize_t ata_scsi_park_store(struct device *device,
unsigned long flags;
int rc;
- rc = strict_strtol(buf, 10, &input);
- if (rc || input < -2)
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if (input < -2)
return -EINVAL;
if (input > ATA_TMOUT_MAX_PARK) {
rc = -EOVERFLOW;
@@ -3098,12 +3099,25 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
buf = page_address(sg_page(scsi_sglist(scmd)));
size = ata_set_lba_range_entries(buf, 512, block, n_block);
- tf->protocol = ATA_PROT_DMA;
- tf->hob_feature = 0;
- tf->feature = ATA_DSM_TRIM;
- tf->hob_nsect = (size / 512) >> 8;
- tf->nsect = size / 512;
- tf->command = ATA_CMD_DSM;
+ if (ata_ncq_enabled(dev) && ata_fpdma_dsm_supported(dev)) {
+ /* Newer devices support queued TRIM commands */
+ tf->protocol = ATA_PROT_NCQ;
+ tf->command = ATA_CMD_FPDMA_SEND;
+ tf->hob_nsect = ATA_SUBCMD_FPDMA_SEND_DSM & 0x1f;
+ tf->nsect = qc->tag << 3;
+ tf->hob_feature = (size / 512) >> 8;
+ tf->feature = size / 512;
+
+ tf->auxiliary = 1;
+ } else {
+ tf->protocol = ATA_PROT_DMA;
+ tf->hob_feature = 0;
+ tf->feature = ATA_DSM_TRIM;
+ tf->hob_nsect = (size / 512) >> 8;
+ tf->nsect = size / 512;
+ tf->command = ATA_CMD_DSM;
+ }
+
tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 |
ATA_TFLAG_WRITE;
@@ -3665,9 +3679,7 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
if (!IS_ERR(sdev)) {
dev->sdev = sdev;
scsi_device_put(sdev);
- if (zpodd_dev_enabled(dev))
- dev_pm_qos_expose_flags(
- &sdev->sdev_gendev, 0);
+ ata_scsi_acpi_bind(dev);
} else {
dev->sdev = NULL;
}
@@ -3755,6 +3767,8 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
struct scsi_device *sdev;
unsigned long flags;
+ ata_scsi_acpi_unbind(dev);
+
/* Alas, we need to grab scan_mutex to ensure SCSI device
* state doesn't change underneath us and thus
* scsi_device_get() always succeeds. The mutex locking can
@@ -3764,9 +3778,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
mutex_lock(&ap->scsi_host->scan_mutex);
spin_lock_irqsave(ap->lock, flags);
- if (zpodd_dev_enabled(dev))
- zpodd_exit(dev);
-
/* clearing dev->sdev is protected by host lock */
sdev = dev->sdev;
dev->sdev = NULL;
@@ -3816,6 +3827,9 @@ static void ata_scsi_handle_link_detach(struct ata_link *link)
dev->flags &= ~ATA_DFLAG_DETACHED;
spin_unlock_irqrestore(ap->lock, flags);
+ if (zpodd_dev_enabled(dev))
+ zpodd_exit(dev);
+
ata_scsi_remove_dev(dev);
}
}
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index 077a856f5fd..150a917f0c3 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -287,6 +287,7 @@ int ata_tport_add(struct device *parent,
dev->release = ata_tport_release;
dev_set_name(dev, "ata%d", ap->print_id);
transport_setup_device(dev);
+ ata_acpi_bind_port(ap);
error = device_add(dev);
if (error) {
goto tport_err;
@@ -644,6 +645,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp);
transport_setup_device(dev);
+ ata_acpi_bind_dev(ata_dev);
error = device_add(dev);
if (error) {
ata_tdev_free(ata_dev);
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index cd8daf47188..68f9e3293e9 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -2,6 +2,7 @@
#include <linux/cdrom.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
+#include <linux/pm_qos.h>
#include <scsi/scsi_device.h>
#include "libata.h"
@@ -190,8 +191,8 @@ void zpodd_enable_run_wake(struct ata_device *dev)
sdev_disable_disk_events(dev->sdev);
zpodd->powered_off = true;
- device_set_run_wake(&dev->sdev->sdev_gendev, true);
- acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, true);
+ device_set_run_wake(&dev->tdev, true);
+ acpi_pm_device_run_wake(&dev->tdev, true);
}
/* Disable runtime wake capability if it is enabled */
@@ -200,8 +201,8 @@ void zpodd_disable_run_wake(struct ata_device *dev)
struct zpodd *zpodd = dev->zpodd;
if (zpodd->powered_off) {
- acpi_pm_device_run_wake(&dev->sdev->sdev_gendev, false);
- device_set_run_wake(&dev->sdev->sdev_gendev, false);
+ acpi_pm_device_run_wake(&dev->tdev, false);
+ device_set_run_wake(&dev->tdev, false);
}
}
@@ -262,7 +263,7 @@ static void ata_acpi_add_pm_notifier(struct ata_device *dev)
static void ata_acpi_remove_pm_notifier(struct ata_device *dev)
{
- acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->sdev->sdev_gendev);
+ acpi_handle handle = ata_dev_acpi_handle(dev);
acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY, zpodd_wake_dev);
}
@@ -290,6 +291,7 @@ void zpodd_init(struct ata_device *dev)
ata_acpi_add_pm_notifier(dev);
zpodd->dev = dev;
dev->zpodd = zpodd;
+ dev_pm_qos_expose_flags(&dev->tdev, 0);
}
void zpodd_exit(struct ata_device *dev)
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 577d902bc4d..eeeb77845d4 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -118,11 +118,11 @@ extern void ata_acpi_on_resume(struct ata_port *ap);
extern int ata_acpi_on_devcfg(struct ata_device *dev);
extern void ata_acpi_on_disable(struct ata_device *dev);
extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
-extern int ata_acpi_register(void);
-extern void ata_acpi_unregister(void);
-extern void ata_acpi_bind(struct ata_device *dev);
-extern void ata_acpi_unbind(struct ata_device *dev);
-extern void ata_acpi_hotplug_init(struct ata_host *host);
+extern void ata_acpi_bind_port(struct ata_port *ap);
+extern void ata_acpi_bind_dev(struct ata_device *dev);
+extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
+extern void ata_scsi_acpi_bind(struct ata_device *dev);
+extern void ata_scsi_acpi_unbind(struct ata_device *dev);
#else
static inline void ata_acpi_dissociate(struct ata_host *host) { }
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -131,11 +131,10 @@ static inline int ata_acpi_on_devcfg(struct ata_device *dev) { return 0; }
static inline void ata_acpi_on_disable(struct ata_device *dev) { }
static inline void ata_acpi_set_state(struct ata_port *ap,
pm_message_t state) { }
-static inline int ata_acpi_register(void) { return 0; }
-static inline void ata_acpi_unregister(void) { }
-static inline void ata_acpi_bind(struct ata_device *dev) { }
-static inline void ata_acpi_unbind(struct ata_device *dev) { }
-static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
+static inline void ata_acpi_bind_port(struct ata_port *ap) {}
+static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
+static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
+static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
#endif
/* libata-scsi.c */
diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
index 09723b76bea..73212c9c6d5 100644
--- a/drivers/ata/pata_acpi.c
+++ b/drivers/ata/pata_acpi.c
@@ -39,7 +39,7 @@ static int pacpi_pre_reset(struct ata_link *link, unsigned long deadline)
{
struct ata_port *ap = link->ap;
struct pata_acpi *acpi = ap->private_data;
- if (ata_ap_acpi_handle(ap) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
+ if (ACPI_HANDLE(&ap->tdev) == NULL || ata_acpi_gtm(ap, &acpi->gtm) < 0)
return -ENODEV;
return ata_sff_prereset(link, deadline);
@@ -195,7 +195,7 @@ static int pacpi_port_start(struct ata_port *ap)
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
struct pata_acpi *acpi;
- if (ata_ap_acpi_handle(ap) == NULL)
+ if (ACPI_HANDLE(&ap->tdev) == NULL)
return -ENODEV;
acpi = ap->private_data = devm_kzalloc(&pdev->dev, sizeof(struct pata_acpi), GFP_KERNEL);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 848ed3254dd..853f610af28 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -654,7 +654,7 @@ static void arasan_cf_freeze(struct ata_port *ap)
ata_sff_freeze(ap);
}
-void arasan_cf_error_handler(struct ata_port *ap)
+static void arasan_cf_error_handler(struct ata_port *ap)
{
struct arasan_cf_dev *acdev = ap->host->private_data;
@@ -683,7 +683,7 @@ static void arasan_cf_dma_start(struct arasan_cf_dev *acdev)
ata_sff_queue_work(&acdev->work);
}
-unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
+static unsigned int arasan_cf_qc_issue(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct arasan_cf_dev *acdev = ap->host->private_data;
diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
index 8d493b4a096..d59d5239405 100644
--- a/drivers/ata/pata_at32.c
+++ b/drivers/ata/pata_at32.c
@@ -271,7 +271,7 @@ static int __init pata_at32_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct at32_ide_info *info;
- struct ide_platform_data *board = pdev->dev.platform_data;
+ struct ide_platform_data *board = dev_get_platdata(&pdev->dev);
struct resource *res;
int irq;
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index 5364f97b42c..d63ee8f41a4 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -315,7 +315,7 @@ static struct ata_port_operations pata_at91_port_ops = {
static int pata_at91_probe(struct platform_device *pdev)
{
- struct at91_cf_data *board = pdev->dev.platform_data;
+ struct at91_cf_data *board = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct at91_ide_info *info;
struct resource *mem_res;
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 4ec7c04b3f8..26386f0b89a 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = {
/* sentinel */
}
};
+MODULE_DEVICE_TABLE(of, imx_pata_dt_ids);
static struct platform_driver pata_imx_driver = {
.probe = pata_imx_probe,
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index dcc6b243e52..1ec53f8ca96 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -48,7 +48,7 @@ static unsigned int ixp4xx_mmio_data_xfer(struct ata_device *dev,
u16 *buf16 = (u16 *) buf;
struct ata_port *ap = dev->link->ap;
void __iomem *mmio = ap->ioaddr.data_addr;
- struct ixp4xx_pata_data *data = ap->host->dev->platform_data;
+ struct ixp4xx_pata_data *data = dev_get_platdata(ap->host->dev);
/* set the expansion bus in 16bit mode and restore
* 8 bit mode after the transaction.
@@ -143,7 +143,7 @@ static int ixp4xx_pata_probe(struct platform_device *pdev)
struct resource *cs0, *cs1;
struct ata_host *host;
struct ata_port *ap;
- struct ixp4xx_pata_data *data = pdev->dev.platform_data;
+ struct ixp4xx_pata_data *data = dev_get_platdata(&pdev->dev);
cs0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
cs1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index e73bef3093d..c51bbb9ea8e 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -1037,7 +1037,7 @@ static void octeon_cf_shutdown(struct device *dev)
union cvmx_mio_boot_dma_cfgx dma_cfg;
union cvmx_mio_boot_dma_intx dma_int;
- struct octeon_cf_port *cf_port = dev->platform_data;
+ struct octeon_cf_port *cf_port = dev_get_platdata(dev);
if (cf_port->dma_base) {
/* Stop and clear the dma engine. */
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 71e093767f4..02794885de1 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -180,7 +180,7 @@ static int pata_platform_probe(struct platform_device *pdev)
struct resource *io_res;
struct resource *ctl_res;
struct resource *irq_res;
- struct pata_platform_info *pp_info = pdev->dev.platform_data;
+ struct pata_platform_info *pp_info = dev_get_platdata(&pdev->dev);
/*
* Simple resource validation ..
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index 942ef94b29e..a6f05acad61 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -238,7 +238,7 @@ static int pxa_ata_probe(struct platform_device *pdev)
struct resource *ctl_res;
struct resource *dma_res;
struct resource *irq_res;
- struct pata_pxa_pdata *pdata = pdev->dev.platform_data;
+ struct pata_pxa_pdata *pdata = dev_get_platdata(&pdev->dev);
int ret = 0;
/*
diff --git a/drivers/ata/pata_samsung_cf.c b/drivers/ata/pata_samsung_cf.c
index 6ef27e98c50..898e544a7ae 100644
--- a/drivers/ata/pata_samsung_cf.c
+++ b/drivers/ata/pata_samsung_cf.c
@@ -241,8 +241,8 @@ static u8 pata_s3c_check_altstatus(struct ata_port *ap)
/*
* pata_s3c_data_xfer - Transfer data by PIO
*/
-unsigned int pata_s3c_data_xfer(struct ata_device *dev, unsigned char *buf,
- unsigned int buflen, int rw)
+static unsigned int pata_s3c_data_xfer(struct ata_device *dev,
+ unsigned char *buf, unsigned int buflen, int rw)
{
struct ata_port *ap = dev->link->ap;
struct s3c_ide_info *info = ap->host->private_data;
@@ -418,7 +418,7 @@ static struct ata_port_operations pata_s5p_port_ops = {
.set_piomode = pata_s3c_set_piomode,
};
-static void pata_s3c_enable(void *s3c_ide_regbase, bool state)
+static void pata_s3c_enable(void __iomem *s3c_ide_regbase, bool state)
{
u32 temp = readl(s3c_ide_regbase + S3C_ATA_CTRL);
temp = state ? (temp | 1) : (temp & ~1);
@@ -475,7 +475,7 @@ static void pata_s3c_hwinit(struct s3c_ide_info *info,
static int __init pata_s3c_probe(struct platform_device *pdev)
{
- struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+ struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct s3c_ide_info *info;
struct resource *res;
@@ -617,7 +617,7 @@ static int pata_s3c_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct ata_host *host = platform_get_drvdata(pdev);
- struct s3c_ide_platdata *pdata = pdev->dev.platform_data;
+ struct s3c_ide_platdata *pdata = dev_get_platdata(&pdev->dev);
struct s3c_ide_info *info = host->private_data;
pata_s3c_hwinit(info, pdata);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 19720a0a4a6..851bd3f43ac 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -293,6 +293,7 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
{
struct sata_fsl_host_priv *host_priv = host->private_data;
void __iomem *hcr_base = host_priv->hcr_base;
+ unsigned long flags;
if (count > ICC_MAX_INT_COUNT_THRESHOLD)
count = ICC_MAX_INT_COUNT_THRESHOLD;
@@ -305,12 +306,12 @@ static void fsl_sata_set_irq_coalescing(struct ata_host *host,
(count > ICC_MIN_INT_COUNT_THRESHOLD))
ticks = ICC_SAFE_INT_TICKS;
- spin_lock(&host->lock);
+ spin_lock_irqsave(&host->lock, flags);
iowrite32((count << 24 | ticks), hcr_base + ICC);
intr_coalescing_count = count;
intr_coalescing_ticks = ticks;
- spin_unlock(&host->lock);
+ spin_unlock_irqrestore(&host->lock, flags);
DPRINTK("interrupt coalescing, count = 0x%x, ticks = %x\n",
intr_coalescing_count, intr_coalescing_ticks);
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index d047d92a456..7f5e5d96327 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -46,14 +46,19 @@
#define CR_BUSY 0x0001
#define CR_START 0x0001
#define CR_WR_RDN 0x0002
+#define CPHY_TX_INPUT_STS 0x2001
#define CPHY_RX_INPUT_STS 0x2002
-#define CPHY_SATA_OVERRIDE 0x4000
-#define CPHY_OVERRIDE 0x2005
+#define CPHY_SATA_TX_OVERRIDE 0x8000
+#define CPHY_SATA_RX_OVERRIDE 0x4000
+#define CPHY_TX_OVERRIDE 0x2004
+#define CPHY_RX_OVERRIDE 0x2005
#define SPHY_LANE 0x100
#define SPHY_HALF_RATE 0x0001
#define CPHY_SATA_DPLL_MODE 0x0700
#define CPHY_SATA_DPLL_SHIFT 8
#define CPHY_SATA_DPLL_RESET (1 << 11)
+#define CPHY_SATA_TX_ATTEN 0x1c00
+#define CPHY_SATA_TX_ATTEN_SHIFT 10
#define CPHY_PHY_COUNT 6
#define CPHY_LANE_COUNT 4
#define CPHY_PORT_COUNT (CPHY_PHY_COUNT * CPHY_LANE_COUNT)
@@ -66,6 +71,7 @@ struct phy_lane_info {
void __iomem *phy_base;
u8 lane_mapping;
u8 phy_devs;
+ u8 tx_atten;
};
static struct phy_lane_info port_data[CPHY_PORT_COUNT];
@@ -76,9 +82,11 @@ static DEFINE_SPINLOCK(sgpio_lock);
#define SGPIO_PINS 3
#define SGPIO_PORTS 8
-/* can be cast as an ahci_host_priv for compatibility with most functions */
struct ecx_plat_data {
u32 n_ports;
+ /* number of extra clocks that the SGPIO PIC controller expects */
+ u32 pre_clocks;
+ u32 post_clocks;
unsigned sgpio_gpio[SGPIO_PINS];
u32 sgpio_pattern;
u32 port_to_sgpio[SGPIO_PORTS];
@@ -86,11 +94,11 @@ struct ecx_plat_data {
#define SGPIO_SIGNALS 3
#define ECX_ACTIVITY_BITS 0x300000
-#define ECX_ACTIVITY_SHIFT 2
+#define ECX_ACTIVITY_SHIFT 0
#define ECX_LOCATE_BITS 0x80000
#define ECX_LOCATE_SHIFT 1
#define ECX_FAULT_BITS 0x400000
-#define ECX_FAULT_SHIFT 0
+#define ECX_FAULT_SHIFT 2
static inline int sgpio_bit_shift(struct ecx_plat_data *pdata, u32 port,
u32 shift)
{
@@ -155,6 +163,9 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
spin_lock_irqsave(&sgpio_lock, flags);
ecx_parse_sgpio(pdata, ap->port_no, state);
sgpio_out = pdata->sgpio_pattern;
+ for (i = 0; i < pdata->pre_clocks; i++)
+ ecx_led_cycle_clock(pdata);
+
gpio_set_value(pdata->sgpio_gpio[SLOAD], 1);
ecx_led_cycle_clock(pdata);
gpio_set_value(pdata->sgpio_gpio[SLOAD], 0);
@@ -167,6 +178,8 @@ static ssize_t ecx_transmit_led_message(struct ata_port *ap, u32 state,
sgpio_out >>= 1;
ecx_led_cycle_clock(pdata);
}
+ for (i = 0; i < pdata->post_clocks; i++)
+ ecx_led_cycle_clock(pdata);
/* save off new led state for port/slot */
emp->led_state = state;
@@ -201,6 +214,11 @@ static void highbank_set_em_messages(struct device *dev,
of_property_read_u32_array(np, "calxeda,led-order",
pdata->port_to_sgpio,
pdata->n_ports);
+ if (of_property_read_u32(np, "calxeda,pre-clocks", &pdata->pre_clocks))
+ pdata->pre_clocks = 0;
+ if (of_property_read_u32(np, "calxeda,post-clocks",
+ &pdata->post_clocks))
+ pdata->post_clocks = 0;
/* store em_loc */
hpriv->em_loc = 0;
@@ -259,8 +277,27 @@ static void highbank_cphy_disable_overrides(u8 sata_port)
if (unlikely(port_data[sata_port].phy_base == NULL))
return;
tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
- tmp &= ~CPHY_SATA_OVERRIDE;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ tmp &= ~CPHY_SATA_RX_OVERRIDE;
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
+}
+
+static void cphy_override_tx_attenuation(u8 sata_port, u32 val)
+{
+ u8 lane = port_data[sata_port].lane_mapping;
+ u32 tmp;
+
+ if (val & 0x8)
+ return;
+
+ tmp = combo_phy_read(sata_port, CPHY_TX_INPUT_STS + lane * SPHY_LANE);
+ tmp &= ~CPHY_SATA_TX_OVERRIDE;
+ combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+ tmp |= CPHY_SATA_TX_OVERRIDE;
+ combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
+
+ tmp |= (val << CPHY_SATA_TX_ATTEN_SHIFT) & CPHY_SATA_TX_ATTEN;
+ combo_phy_write(sata_port, CPHY_TX_OVERRIDE + lane * SPHY_LANE, tmp);
}
static void cphy_override_rx_mode(u8 sata_port, u32 val)
@@ -268,21 +305,21 @@ static void cphy_override_rx_mode(u8 sata_port, u32 val)
u8 lane = port_data[sata_port].lane_mapping;
u32 tmp;
tmp = combo_phy_read(sata_port, CPHY_RX_INPUT_STS + lane * SPHY_LANE);
- tmp &= ~CPHY_SATA_OVERRIDE;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ tmp &= ~CPHY_SATA_RX_OVERRIDE;
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
- tmp |= CPHY_SATA_OVERRIDE;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ tmp |= CPHY_SATA_RX_OVERRIDE;
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp &= ~CPHY_SATA_DPLL_MODE;
tmp |= val << CPHY_SATA_DPLL_SHIFT;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp |= CPHY_SATA_DPLL_RESET;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
tmp &= ~CPHY_SATA_DPLL_RESET;
- combo_phy_write(sata_port, CPHY_OVERRIDE + lane * SPHY_LANE, tmp);
+ combo_phy_write(sata_port, CPHY_RX_OVERRIDE + lane * SPHY_LANE, tmp);
msleep(15);
}
@@ -299,16 +336,20 @@ static void highbank_cphy_override_lane(u8 sata_port)
lane * SPHY_LANE);
} while ((tmp & SPHY_HALF_RATE) && (k++ < 1000));
cphy_override_rx_mode(sata_port, 3);
+ cphy_override_tx_attenuation(sata_port, port_data[sata_port].tx_atten);
}
static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
{
struct device_node *sata_node = dev->of_node;
- int phy_count = 0, phy, port = 0;
+ int phy_count = 0, phy, port = 0, i;
void __iomem *cphy_base[CPHY_PHY_COUNT];
struct device_node *phy_nodes[CPHY_PHY_COUNT];
+ u32 tx_atten[CPHY_PORT_COUNT];
+
memset(port_data, 0, sizeof(struct phy_lane_info) * CPHY_PORT_COUNT);
memset(phy_nodes, 0, sizeof(struct device_node*) * CPHY_PHY_COUNT);
+ memset(tx_atten, 0xff, CPHY_PORT_COUNT);
do {
u32 tmp;
@@ -336,6 +377,10 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
of_node_put(phy_data.np);
port += 1;
} while (port < CPHY_PORT_COUNT);
+ of_property_read_u32_array(sata_node, "calxeda,tx-atten",
+ tx_atten, port);
+ for (i = 0; i < port; i++)
+ port_data[i].tx_atten = (u8) tx_atten[i];
return 0;
}
@@ -479,6 +524,9 @@ static int ahci_highbank_probe(struct platform_device *pdev)
if (hpriv->cap & HOST_CAP_PMP)
pi.flags |= ATA_FLAG_PMP;
+ if (hpriv->cap & HOST_CAP_64)
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
+
/* CAP.NP sometimes indicate the index of the last enabled
* port, at other times, that of the last possible port, so
* determining the maximum port number requires looking at
diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
index e4513174824..5c54d957370 100644
--- a/drivers/ata/sata_inic162x.c
+++ b/drivers/ata/sata_inic162x.c
@@ -6,6 +6,18 @@
*
* This file is released under GPL v2.
*
+ * **** WARNING ****
+ *
+ * This driver never worked properly and unfortunately data corruption is
+ * relatively common. There isn't anyone working on the driver and there's
+ * no support from the vendor. Do not use this driver in any production
+ * environment.
+ *
+ * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60565
+ *
+ * *****************
+ *
* This controller is eccentric and easily locks up if something isn't
* right. Documentation is available at initio's website but it only
* documents registers (not programming model).
@@ -807,6 +819,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ata_print_version_once(&pdev->dev, DRV_VERSION);
+ dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n");
+
/* alloc host */
host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS);
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 35c6b6d09c2..56be3181989 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,10 +553,15 @@ struct mv_host_priv {
u32 irq_mask_offset;
u32 unmask_all_irqs;
-#if defined(CONFIG_HAVE_CLK)
+ /*
+ * Needed on some devices that require their clocks to be enabled.
+ * These are optional: if the platform device does not have any
+ * clocks, they won't be used. Also, if the underlying hardware
+ * does not support the common clock framework (CONFIG_HAVE_CLK=n),
+ * all the clock operations become no-ops (see clk.h).
+ */
struct clk *clk;
struct clk **port_clks;
-#endif
/*
* These consistent DMA memory pools give us guaranteed
* alignment for hardware-accessed data structures,
@@ -4032,9 +4037,7 @@ static int mv_platform_probe(struct platform_device *pdev)
struct resource *res;
int n_ports = 0, irq = 0;
int rc;
-#if defined(CONFIG_HAVE_CLK)
int port;
-#endif
ata_print_version_once(&pdev->dev, DRV_VERSION);
@@ -4058,7 +4061,7 @@ static int mv_platform_probe(struct platform_device *pdev)
of_property_read_u32(pdev->dev.of_node, "nr-ports", &n_ports);
irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
} else {
- mv_platform_data = pdev->dev.platform_data;
+ mv_platform_data = dev_get_platdata(&pdev->dev);
n_ports = mv_platform_data->n_ports;
irq = platform_get_irq(pdev, 0);
}
@@ -4068,13 +4071,11 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!host || !hpriv)
return -ENOMEM;
-#if defined(CONFIG_HAVE_CLK)
hpriv->port_clks = devm_kzalloc(&pdev->dev,
sizeof(struct clk *) * n_ports,
GFP_KERNEL);
if (!hpriv->port_clks)
return -ENOMEM;
-#endif
host->private_data = hpriv;
hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
@@ -4084,7 +4085,6 @@ static int mv_platform_probe(struct platform_device *pdev)
resource_size(res));
hpriv->base -= SATAHC0_REG_BASE;
-#if defined(CONFIG_HAVE_CLK)
hpriv->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(hpriv->clk))
dev_notice(&pdev->dev, "cannot get optional clkdev\n");
@@ -4098,7 +4098,6 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!IS_ERR(hpriv->port_clks[port]))
clk_prepare_enable(hpriv->port_clks[port]);
}
-#endif
/*
* (Re-)program MBUS remapping windows if we are asked to.
@@ -4124,7 +4123,6 @@ static int mv_platform_probe(struct platform_device *pdev)
return 0;
err:
-#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
@@ -4135,7 +4133,6 @@ err:
clk_put(hpriv->port_clks[port]);
}
}
-#endif
return rc;
}
@@ -4151,13 +4148,10 @@ err:
static int mv_platform_remove(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
-#if defined(CONFIG_HAVE_CLK)
struct mv_host_priv *hpriv = host->private_data;
int port;
-#endif
ata_host_detach(host);
-#if defined(CONFIG_HAVE_CLK)
if (!IS_ERR(hpriv->clk)) {
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
@@ -4168,7 +4162,6 @@ static int mv_platform_remove(struct platform_device *pdev)
clk_put(hpriv->port_clks[port]);
}
}
-#endif
return 0;
}
@@ -4428,9 +4421,6 @@ static int mv_pci_device_resume(struct pci_dev *pdev)
#endif
#endif
-static int mv_platform_probe(struct platform_device *pdev);
-static int mv_platform_remove(struct platform_device *pdev);
-
static int __init mv_init(void)
{
int rc = -ENODEV;
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 8108eb06544..c2d95e9fb97 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -778,10 +778,6 @@ static int sata_rcar_probe(struct platform_device *pdev)
int irq;
int ret = 0;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL)
- return -EINVAL;
-
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
@@ -807,6 +803,7 @@ static int sata_rcar_probe(struct platform_device *pdev)
host->private_data = priv;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base)) {
ret = PTR_ERR(priv->base);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 507362a76a7..449f6298dc8 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1088,15 +1088,8 @@ static int he_start(struct atm_dev *dev)
for (i = 0; i < 6; ++i)
dev->esi[i] = read_prom_byte(he_dev, MAC_ADDR + i);
- hprintk("%s%s, %x:%x:%x:%x:%x:%x\n",
- he_dev->prod_id,
- he_dev->media & 0x40 ? "SM" : "MM",
- dev->esi[0],
- dev->esi[1],
- dev->esi[2],
- dev->esi[3],
- dev->esi[4],
- dev->esi[5]);
+ hprintk("%s%s, %pM\n", he_dev->prod_id,
+ he_dev->media & 0x40 ? "SM" : "MM", dev->esi);
he_dev->atm_dev->link_rate = he_is622(he_dev) ?
ATM_OC12_PCR : ATM_OC3_PCR;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 6587dc295eb..409502a78e7 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -153,7 +153,6 @@ static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user * arg);
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
static void ns_poll(unsigned long arg);
-static int ns_parse_mac(char *mac, unsigned char *esi);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
@@ -779,7 +778,7 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
return error;
}
- if (ns_parse_mac(mac[i], card->atmdev->esi)) {
+ if (mac[i] == NULL || mac_pton(mac[i], card->atmdev->esi)) {
nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET,
card->atmdev->esi, 6);
if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) ==
@@ -2802,29 +2801,6 @@ static void ns_poll(unsigned long arg)
PRINTK("nicstar: Leaving ns_poll().\n");
}
-static int ns_parse_mac(char *mac, unsigned char *esi)
-{
- int i, j;
- short byte1, byte0;
-
- if (mac == NULL || esi == NULL)
- return -1;
- j = 0;
- for (i = 0; i < 6; i++) {
- if ((byte1 = hex_to_bin(mac[j++])) < 0)
- return -1;
- if ((byte0 = hex_to_bin(mac[j++])) < 0)
- return -1;
- esi[i] = (unsigned char)(byte1 * 16 + byte0);
- if (i < 5) {
- if (mac[j++] != ':')
- return -1;
- }
- }
- return 0;
-}
-
-
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr)
{
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 5daa2599ed4..e373671652b 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -200,11 +200,9 @@ config DMA_SHARED_BUFFER
APIs extension; the file's descriptor can then be passed on to other
driver.
-config CMA
- bool "Contiguous Memory Allocator"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
- select MIGRATION
- select MEMORY_ISOLATION
+config DMA_CMA
+ bool "DMA Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && CMA
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
@@ -213,17 +211,7 @@ config CMA
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
-if CMA
-
-config CMA_DEBUG
- bool "CMA debug messages (DEVELOPMENT)"
- depends on DEBUG_KERNEL
- help
- Turns on debug messages in CMA. This produces KERN_DEBUG
- messages for every CMA call as well as various messages while
- processing calls such as dma_alloc_from_contiguous().
- This option does not affect warning and error messages.
-
+if DMA_CMA
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 48029aa477d..94e8a80e87f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_CMA) += dma-contiguous.o
+obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index b8bdfe61daa..2cbc6774f4c 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -119,6 +119,16 @@ static inline int driver_match_device(struct device_driver *drv,
return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
+extern int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+extern void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups);
+
+extern int device_add_groups(struct device *dev,
+ const struct attribute_group **groups);
+extern void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups);
+
extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index d414331b480..4c289ab9135 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/mutex.h>
+#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
@@ -165,8 +166,8 @@ static const struct kset_uevent_ops bus_uevent_ops = {
static struct kset *bus_kset;
/* Manually detach a device from its associated driver. */
-static ssize_t driver_unbind(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t unbind_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
@@ -185,15 +186,15 @@ static ssize_t driver_unbind(struct device_driver *drv,
bus_put(bus);
return err;
}
-static DRIVER_ATTR(unbind, S_IWUSR, NULL, driver_unbind);
+static DRIVER_ATTR_WO(unbind);
/*
* Manually attach a device to a driver.
* Note: the driver must want to bind to the device,
* it is not possible to override the driver's id table.
*/
-static ssize_t driver_bind(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t bind_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
struct bus_type *bus = bus_get(drv->bus);
struct device *dev;
@@ -221,7 +222,7 @@ static ssize_t driver_bind(struct device_driver *drv,
bus_put(bus);
return err;
}
-static DRIVER_ATTR(bind, S_IWUSR, NULL, driver_bind);
+static DRIVER_ATTR_WO(bind);
static ssize_t show_drivers_autoprobe(struct bus_type *bus, char *buf)
{
@@ -460,7 +461,7 @@ static int device_add_attrs(struct bus_type *bus, struct device *dev)
if (!bus->dev_attrs)
return 0;
- for (i = 0; attr_name(bus->dev_attrs[i]); i++) {
+ for (i = 0; bus->dev_attrs[i].attr.name; i++) {
error = device_create_file(dev, &bus->dev_attrs[i]);
if (error) {
while (--i >= 0)
@@ -476,7 +477,7 @@ static void device_remove_attrs(struct bus_type *bus, struct device *dev)
int i;
if (bus->dev_attrs) {
- for (i = 0; attr_name(bus->dev_attrs[i]); i++)
+ for (i = 0; bus->dev_attrs[i].attr.name; i++)
device_remove_file(dev, &bus->dev_attrs[i]);
}
}
@@ -499,6 +500,9 @@ int bus_add_device(struct device *dev)
error = device_add_attrs(bus, dev);
if (error)
goto out_put;
+ error = device_add_groups(dev, bus->dev_groups);
+ if (error)
+ goto out_groups;
error = sysfs_create_link(&bus->p->devices_kset->kobj,
&dev->kobj, dev_name(dev));
if (error)
@@ -513,6 +517,8 @@ int bus_add_device(struct device *dev)
out_subsys:
sysfs_remove_link(&bus->p->devices_kset->kobj, dev_name(dev));
+out_groups:
+ device_remove_groups(dev, bus->dev_groups);
out_id:
device_remove_attrs(bus, dev);
out_put:
@@ -575,6 +581,7 @@ void bus_remove_device(struct device *dev)
sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
dev_name(dev));
device_remove_attrs(dev->bus, dev);
+ device_remove_groups(dev, dev->bus->dev_groups);
if (klist_node_attached(&dev->p->knode_bus))
klist_del(&dev->p->knode_bus);
@@ -590,7 +597,7 @@ static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
int i;
if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++) {
+ for (i = 0; bus->drv_attrs[i].attr.name; i++) {
error = driver_create_file(drv, &bus->drv_attrs[i]);
if (error)
goto err;
@@ -610,7 +617,7 @@ static void driver_remove_attrs(struct bus_type *bus,
int i;
if (bus->drv_attrs) {
- for (i = 0; attr_name(bus->drv_attrs[i]); i++)
+ for (i = 0; bus->drv_attrs[i].attr.name; i++)
driver_remove_file(drv, &bus->drv_attrs[i]);
}
}
@@ -659,8 +666,8 @@ static void remove_probe_files(struct bus_type *bus)
bus_remove_file(bus, &bus_attr_drivers_probe);
}
-static ssize_t driver_uevent_store(struct device_driver *drv,
- const char *buf, size_t count)
+static ssize_t uevent_store(struct device_driver *drv, const char *buf,
+ size_t count)
{
enum kobject_action action;
@@ -668,7 +675,7 @@ static ssize_t driver_uevent_store(struct device_driver *drv,
kobject_uevent(&drv->p->kobj, action);
return count;
}
-static DRIVER_ATTR(uevent, S_IWUSR, NULL, driver_uevent_store);
+static DRIVER_ATTR_WO(uevent);
/**
* bus_add_driver - Add a driver to the bus.
@@ -719,6 +726,10 @@ int bus_add_driver(struct device_driver *drv)
printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
__func__, drv->name);
}
+ error = driver_add_groups(drv, bus->drv_groups);
+ if (error)
+ printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
+ __func__, drv->name);
if (!drv->suppress_bind_attrs) {
error = add_bind_files(drv);
@@ -756,6 +767,7 @@ void bus_remove_driver(struct device_driver *drv)
if (!drv->suppress_bind_attrs)
remove_bind_files(drv);
driver_remove_attrs(drv->bus, drv);
+ driver_remove_groups(drv, drv->bus->drv_groups);
driver_remove_file(drv, &driver_attr_uevent);
klist_remove(&drv->p->knode_bus);
pr_debug("bus: '%s': remove driver %s\n", drv->bus->name, drv->name);
@@ -846,7 +858,7 @@ static int bus_add_attrs(struct bus_type *bus)
int i;
if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++) {
+ for (i = 0; bus->bus_attrs[i].attr.name; i++) {
error = bus_create_file(bus, &bus->bus_attrs[i]);
if (error)
goto err;
@@ -865,11 +877,23 @@ static void bus_remove_attrs(struct bus_type *bus)
int i;
if (bus->bus_attrs) {
- for (i = 0; attr_name(bus->bus_attrs[i]); i++)
+ for (i = 0; bus->bus_attrs[i].attr.name; i++)
bus_remove_file(bus, &bus->bus_attrs[i]);
}
}
+static int bus_add_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
+{
+ return sysfs_create_groups(&bus->p->subsys.kobj, groups);
+}
+
+static void bus_remove_groups(struct bus_type *bus,
+ const struct attribute_group **groups)
+{
+ sysfs_remove_groups(&bus->p->subsys.kobj, groups);
+}
+
static void klist_devices_get(struct klist_node *n)
{
struct device_private *dev_prv = to_device_private_bus(n);
@@ -962,10 +986,15 @@ int bus_register(struct bus_type *bus)
retval = bus_add_attrs(bus);
if (retval)
goto bus_attrs_fail;
+ retval = bus_add_groups(bus, bus->bus_groups);
+ if (retval)
+ goto bus_groups_fail;
pr_debug("bus: '%s': registered\n", bus->name);
return 0;
+bus_groups_fail:
+ bus_remove_attrs(bus);
bus_attrs_fail:
remove_probe_files(bus);
bus_probe_files_fail:
@@ -996,6 +1025,7 @@ void bus_unregister(struct bus_type *bus)
if (bus->dev_root)
device_unregister(bus->dev_root);
bus_remove_attrs(bus);
+ bus_remove_groups(bus, bus->bus_groups);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
kset_unregister(bus->p->devices_kset);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 3ce84547132..8b7818b8005 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -135,7 +135,7 @@ static int add_class_attrs(struct class *cls)
int error = 0;
if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++) {
+ for (i = 0; cls->class_attrs[i].attr.name; i++) {
error = class_create_file(cls, &cls->class_attrs[i]);
if (error)
goto error;
@@ -154,7 +154,7 @@ static void remove_class_attrs(struct class *cls)
int i;
if (cls->class_attrs) {
- for (i = 0; attr_name(cls->class_attrs[i]); i++)
+ for (i = 0; cls->class_attrs[i].attr.name; i++)
class_remove_file(cls, &cls->class_attrs[i]);
}
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 8856d74545d..c7cfadcf675 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -26,6 +26,7 @@
#include <linux/async.h>
#include <linux/pm_runtime.h>
#include <linux/netdevice.h>
+#include <linux/sysfs.h>
#include "base.h"
#include "power/power.h"
@@ -36,9 +37,9 @@ long sysfs_deprecated = 1;
#else
long sysfs_deprecated = 0;
#endif
-static __init int sysfs_deprecated_setup(char *arg)
+static int __init sysfs_deprecated_setup(char *arg)
{
- return strict_strtol(arg, 10, &sysfs_deprecated);
+ return kstrtol(arg, 10, &sysfs_deprecated);
}
early_param("sysfs.deprecated", sysfs_deprecated_setup);
#endif
@@ -49,6 +50,28 @@ static struct kobject *dev_kobj;
struct kobject *sysfs_dev_char_kobj;
struct kobject *sysfs_dev_block_kobj;
+static DEFINE_MUTEX(device_hotplug_lock);
+
+void lock_device_hotplug(void)
+{
+ mutex_lock(&device_hotplug_lock);
+}
+
+void unlock_device_hotplug(void)
+{
+ mutex_unlock(&device_hotplug_lock);
+}
+
+int lock_device_hotplug_sysfs(void)
+{
+ if (mutex_trylock(&device_hotplug_lock))
+ return 0;
+
+ /* Avoid busy looping (5 ms of sleep should do). */
+ msleep(5);
+ return restart_syscall();
+}
+
#ifdef CONFIG_BLOCK
static inline int device_is_not_partition(struct device *dev)
{
@@ -345,7 +368,7 @@ static const struct kset_uevent_ops device_uevent_ops = {
.uevent = dev_uevent,
};
-static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
+static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct kobject *top_kobj;
@@ -388,7 +411,7 @@ out:
return count;
}
-static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
+static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
enum kobject_action action;
@@ -399,22 +422,20 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
dev_err(dev, "uevent: unknown action-string\n");
return count;
}
+static DEVICE_ATTR_RW(uevent);
-static struct device_attribute uevent_attr =
- __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
-
-static ssize_t show_online(struct device *dev, struct device_attribute *attr,
+static ssize_t online_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
bool val;
- lock_device_hotplug();
+ device_lock(dev);
val = !dev->offline;
- unlock_device_hotplug();
+ device_unlock(dev);
return sprintf(buf, "%u\n", val);
}
-static ssize_t store_online(struct device *dev, struct device_attribute *attr,
+static ssize_t online_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
bool val;
@@ -424,14 +445,15 @@ static ssize_t store_online(struct device *dev, struct device_attribute *attr,
if (ret < 0)
return ret;
- lock_device_hotplug();
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
ret = val ? device_online(dev) : device_offline(dev);
unlock_device_hotplug();
return ret < 0 ? ret : count;
}
-
-static struct device_attribute online_attr =
- __ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
+static DEVICE_ATTR_RW(online);
static int device_add_attributes(struct device *dev,
struct device_attribute *attrs)
@@ -440,7 +462,7 @@ static int device_add_attributes(struct device *dev,
int i;
if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
+ for (i = 0; attrs[i].attr.name; i++) {
error = device_create_file(dev, &attrs[i]);
if (error)
break;
@@ -458,7 +480,7 @@ static void device_remove_attributes(struct device *dev,
int i;
if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
+ for (i = 0; attrs[i].attr.name; i++)
device_remove_file(dev, &attrs[i]);
}
@@ -469,7 +491,7 @@ static int device_add_bin_attributes(struct device *dev,
int i;
if (attrs) {
- for (i = 0; attr_name(attrs[i]); i++) {
+ for (i = 0; attrs[i].attr.name; i++) {
error = device_create_bin_file(dev, &attrs[i]);
if (error)
break;
@@ -487,38 +509,19 @@ static void device_remove_bin_attributes(struct device *dev,
int i;
if (attrs)
- for (i = 0; attr_name(attrs[i]); i++)
+ for (i = 0; attrs[i].attr.name; i++)
device_remove_bin_file(dev, &attrs[i]);
}
-static int device_add_groups(struct device *dev,
- const struct attribute_group **groups)
+int device_add_groups(struct device *dev, const struct attribute_group **groups)
{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&dev->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&dev->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
+ return sysfs_create_groups(&dev->kobj, groups);
}
-static void device_remove_groups(struct device *dev,
- const struct attribute_group **groups)
+void device_remove_groups(struct device *dev,
+ const struct attribute_group **groups)
{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&dev->kobj, groups[i]);
+ sysfs_remove_groups(&dev->kobj, groups);
}
static int device_add_attrs(struct device *dev)
@@ -550,7 +553,7 @@ static int device_add_attrs(struct device *dev)
goto err_remove_type_groups;
if (device_supports_offline(dev) && !dev->offline_disabled) {
- error = device_create_file(dev, &online_attr);
+ error = device_create_file(dev, &dev_attr_online);
if (error)
goto err_remove_type_groups;
}
@@ -578,7 +581,7 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
- device_remove_file(dev, &online_attr);
+ device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
if (type)
@@ -591,15 +594,12 @@ static void device_remove_attrs(struct device *dev)
}
}
-
-static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
+static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return print_dev_t(buf, dev->devt);
}
-
-static struct device_attribute devt_attr =
- __ATTR(dev, S_IRUGO, show_dev, NULL);
+static DEVICE_ATTR_RO(dev);
/* /sys/devices/ */
struct kset *devices_kset;
@@ -626,6 +626,7 @@ int device_create_file(struct device *dev,
return error;
}
+EXPORT_SYMBOL_GPL(device_create_file);
/**
* device_remove_file - remove sysfs attribute file.
@@ -638,6 +639,7 @@ void device_remove_file(struct device *dev,
if (dev)
sysfs_remove_file(&dev->kobj, &attr->attr);
}
+EXPORT_SYMBOL_GPL(device_remove_file);
/**
* device_create_bin_file - create sysfs binary attribute file for device.
@@ -748,6 +750,7 @@ void device_initialize(struct device *dev)
device_pm_init(dev);
set_dev_node(dev, -1);
}
+EXPORT_SYMBOL_GPL(device_initialize);
struct kobject *virtual_device_parent(struct device *dev)
{
@@ -1100,12 +1103,12 @@ int device_add(struct device *dev)
if (platform_notify)
platform_notify(dev);
- error = device_create_file(dev, &uevent_attr);
+ error = device_create_file(dev, &dev_attr_uevent);
if (error)
goto attrError;
if (MAJOR(dev->devt)) {
- error = device_create_file(dev, &devt_attr);
+ error = device_create_file(dev, &dev_attr_dev);
if (error)
goto ueventattrError;
@@ -1172,9 +1175,9 @@ done:
device_remove_sys_dev_entry(dev);
devtattrError:
if (MAJOR(dev->devt))
- device_remove_file(dev, &devt_attr);
+ device_remove_file(dev, &dev_attr_dev);
ueventattrError:
- device_remove_file(dev, &uevent_attr);
+ device_remove_file(dev, &dev_attr_uevent);
attrError:
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
kobject_del(&dev->kobj);
@@ -1187,6 +1190,7 @@ name_error:
dev->p = NULL;
goto done;
}
+EXPORT_SYMBOL_GPL(device_add);
/**
* device_register - register a device with the system.
@@ -1211,6 +1215,7 @@ int device_register(struct device *dev)
device_initialize(dev);
return device_add(dev);
}
+EXPORT_SYMBOL_GPL(device_register);
/**
* get_device - increment reference count for device.
@@ -1224,6 +1229,7 @@ struct device *get_device(struct device *dev)
{
return dev ? kobj_to_dev(kobject_get(&dev->kobj)) : NULL;
}
+EXPORT_SYMBOL_GPL(get_device);
/**
* put_device - decrement reference count.
@@ -1235,6 +1241,7 @@ void put_device(struct device *dev)
if (dev)
kobject_put(&dev->kobj);
}
+EXPORT_SYMBOL_GPL(put_device);
/**
* device_del - delete device from system.
@@ -1266,7 +1273,7 @@ void device_del(struct device *dev)
if (MAJOR(dev->devt)) {
devtmpfs_delete_node(dev);
device_remove_sys_dev_entry(dev);
- device_remove_file(dev, &devt_attr);
+ device_remove_file(dev, &dev_attr_dev);
}
if (dev->class) {
device_remove_class_symlinks(dev);
@@ -1281,7 +1288,7 @@ void device_del(struct device *dev)
klist_del(&dev->knode_class);
mutex_unlock(&dev->class->p->mutex);
}
- device_remove_file(dev, &uevent_attr);
+ device_remove_file(dev, &dev_attr_uevent);
device_remove_attrs(dev);
bus_remove_device(dev);
device_pm_remove(dev);
@@ -1297,6 +1304,7 @@ void device_del(struct device *dev)
kobject_del(&dev->kobj);
put_device(parent);
}
+EXPORT_SYMBOL_GPL(device_del);
/**
* device_unregister - unregister device from system.
@@ -1315,6 +1323,7 @@ void device_unregister(struct device *dev)
device_del(dev);
put_device(dev);
}
+EXPORT_SYMBOL_GPL(device_unregister);
static struct device *next_device(struct klist_iter *i)
{
@@ -1403,6 +1412,7 @@ int device_for_each_child(struct device *parent, void *data,
klist_iter_exit(&i);
return error;
}
+EXPORT_SYMBOL_GPL(device_for_each_child);
/**
* device_find_child - device iterator for locating a particular device.
@@ -1437,6 +1447,7 @@ struct device *device_find_child(struct device *parent, void *data,
klist_iter_exit(&i);
return child;
}
+EXPORT_SYMBOL_GPL(device_find_child);
int __init devices_init(void)
{
@@ -1464,33 +1475,6 @@ int __init devices_init(void)
return -ENOMEM;
}
-EXPORT_SYMBOL_GPL(device_for_each_child);
-EXPORT_SYMBOL_GPL(device_find_child);
-
-EXPORT_SYMBOL_GPL(device_initialize);
-EXPORT_SYMBOL_GPL(device_add);
-EXPORT_SYMBOL_GPL(device_register);
-
-EXPORT_SYMBOL_GPL(device_del);
-EXPORT_SYMBOL_GPL(device_unregister);
-EXPORT_SYMBOL_GPL(get_device);
-EXPORT_SYMBOL_GPL(put_device);
-
-EXPORT_SYMBOL_GPL(device_create_file);
-EXPORT_SYMBOL_GPL(device_remove_file);
-
-static DEFINE_MUTEX(device_hotplug_lock);
-
-void lock_device_hotplug(void)
-{
- mutex_lock(&device_hotplug_lock);
-}
-
-void unlock_device_hotplug(void)
-{
- mutex_unlock(&device_hotplug_lock);
-}
-
static int device_check_offline(struct device *dev, void *not_used)
{
int ret;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 4c358bc44c7..848ebbd2571 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/acpi.h>
+#include <linux/of.h>
#include "base.h"
@@ -43,11 +44,14 @@ static int __ref cpu_subsys_online(struct device *dev)
struct cpu *cpu = container_of(dev, struct cpu, dev);
int cpuid = dev->id;
int from_nid, to_nid;
- int ret;
+ int ret = -ENODEV;
cpu_hotplug_driver_lock();
from_nid = cpu_to_node(cpuid);
+ if (from_nid == NUMA_NO_NODE)
+ goto out;
+
ret = cpu_up(cpuid);
/*
* When hot adding memory to memoryless node and enabling a cpu
@@ -57,6 +61,7 @@ static int __ref cpu_subsys_online(struct device *dev)
if (from_nid != to_nid)
change_cpu_under_node(cpu, from_nid, to_nid);
+ out:
cpu_hotplug_driver_unlock();
return ret;
}
@@ -289,6 +294,7 @@ int register_cpu(struct cpu *cpu, int num)
cpu->dev.release = cpu_device_release;
cpu->dev.offline_disabled = !cpu->hotpluggable;
cpu->dev.offline = !cpu_online(num);
+ cpu->dev.of_node = of_get_cpu_node(num, NULL);
#ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
cpu->dev.bus->uevent = arch_cpu_uevent;
#endif
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 6687ba74187..1219ab7c310 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -680,7 +680,7 @@ int dma_buf_debugfs_create_file(const char *name,
d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
write, &dma_buf_debug_fops);
- return PTR_RET(d);
+ return PTR_ERR_OR_ZERO(d);
}
#else
static inline int dma_buf_init_debugfs(void)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca54421ce9..6c9cdaa9200 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -134,7 +134,7 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
static DEFINE_MUTEX(cma_mutex);
-static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+static int __init cma_activate_area(unsigned long base_pfn, unsigned long count)
{
unsigned long pfn = base_pfn;
unsigned i = count >> pageblock_order;
@@ -156,7 +156,7 @@ static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
return 0;
}
-static __init struct cma *cma_create_area(unsigned long base_pfn,
+static struct cma * __init cma_create_area(unsigned long base_pfn,
unsigned long count)
{
int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index 974e301a1ef..9e29943e56c 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -15,6 +15,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/sysfs.h>
#include "base.h"
static struct device *next_device(struct klist_iter *i)
@@ -123,34 +124,16 @@ void driver_remove_file(struct device_driver *drv,
}
EXPORT_SYMBOL_GPL(driver_remove_file);
-static int driver_add_groups(struct device_driver *drv,
- const struct attribute_group **groups)
+int driver_add_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
{
- int error = 0;
- int i;
-
- if (groups) {
- for (i = 0; groups[i]; i++) {
- error = sysfs_create_group(&drv->p->kobj, groups[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_group(&drv->p->kobj,
- groups[i]);
- break;
- }
- }
- }
- return error;
+ return sysfs_create_groups(&drv->p->kobj, groups);
}
-static void driver_remove_groups(struct device_driver *drv,
- const struct attribute_group **groups)
+void driver_remove_groups(struct device_driver *drv,
+ const struct attribute_group **groups)
{
- int i;
-
- if (groups)
- for (i = 0; groups[i]; i++)
- sysfs_remove_group(&drv->p->kobj, groups[i]);
+ sysfs_remove_groups(&drv->p->kobj, groups);
}
/**
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index a439602ea91..10a4467c63f 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -486,9 +486,8 @@ static struct notifier_block fw_shutdown_nb = {
.notifier_call = fw_shutdown_notify,
};
-static ssize_t firmware_timeout_show(struct class *class,
- struct class_attribute *attr,
- char *buf)
+static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", loading_timeout);
}
@@ -506,9 +505,8 @@ static ssize_t firmware_timeout_show(struct class *class,
*
* Note: zero means 'wait forever'.
**/
-static ssize_t firmware_timeout_store(struct class *class,
- struct class_attribute *attr,
- const char *buf, size_t count)
+static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
{
loading_timeout = simple_strtol(buf, NULL, 10);
if (loading_timeout < 0)
@@ -518,8 +516,7 @@ static ssize_t firmware_timeout_store(struct class *class,
}
static struct class_attribute firmware_class_attrs[] = {
- __ATTR(timeout, S_IWUSR | S_IRUGO,
- firmware_timeout_show, firmware_timeout_store),
+ __ATTR_RW(timeout),
__ATTR_NULL
};
@@ -868,8 +865,15 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
goto err_del_dev;
}
+ mutex_lock(&fw_lock);
+ list_add(&buf->pending_list, &pending_fw_head);
+ mutex_unlock(&fw_lock);
+
retval = device_create_file(f_dev, &dev_attr_loading);
if (retval) {
+ mutex_lock(&fw_lock);
+ list_del_init(&buf->pending_list);
+ mutex_unlock(&fw_lock);
dev_err(f_dev, "%s: device_create_file failed\n", __func__);
goto err_del_bin_attr;
}
@@ -884,10 +888,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
}
- mutex_lock(&fw_lock);
- list_add(&buf->pending_list, &pending_fw_head);
- mutex_unlock(&fw_lock);
-
wait_for_completion(&buf->completion);
cancel_delayed_work_sync(&fw_priv->timeout_work);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2b7813ec6d0..9e59f6535c4 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -16,7 +16,6 @@
#include <linux/capability.h>
#include <linux/device.h>
#include <linux/memory.h>
-#include <linux/kobject.h>
#include <linux/memory_hotplug.h>
#include <linux/mm.h>
#include <linux/mutex.h>
@@ -30,6 +29,8 @@ static DEFINE_MUTEX(mem_sysfs_mutex);
#define MEMORY_CLASS_NAME "memory"
+#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
+
static int sections_per_block;
static inline int base_memory_block_id(int section_nr)
@@ -77,7 +78,7 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier);
static void memory_block_release(struct device *dev)
{
- struct memory_block *mem = container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
kfree(mem);
}
@@ -110,8 +111,7 @@ static unsigned long get_memory_block_size(void)
static ssize_t show_mem_start_phys_index(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
@@ -121,8 +121,7 @@ static ssize_t show_mem_start_phys_index(struct device *dev,
static ssize_t show_mem_end_phys_index(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
unsigned long phys_index;
phys_index = mem->end_section_nr / sections_per_block;
@@ -137,10 +136,11 @@ static ssize_t show_mem_removable(struct device *dev,
{
unsigned long i, pfn;
int ret = 1;
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
for (i = 0; i < sections_per_block; i++) {
+ if (!present_section_nr(mem->start_section_nr + i))
+ continue;
pfn = section_nr_to_pfn(mem->start_section_nr + i);
ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
}
@@ -154,8 +154,7 @@ static ssize_t show_mem_removable(struct device *dev,
static ssize_t show_mem_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
ssize_t len = 0;
/*
@@ -261,9 +260,8 @@ memory_block_action(unsigned long phys_index, unsigned long action, int online_t
return ret;
}
-static int __memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req,
- int online_type)
+static int memory_block_change_state(struct memory_block *mem,
+ unsigned long to_state, unsigned long from_state_req)
{
int ret = 0;
@@ -273,105 +271,91 @@ static int __memory_block_change_state(struct memory_block *mem,
if (to_state == MEM_OFFLINE)
mem->state = MEM_GOING_OFFLINE;
- ret = memory_block_action(mem->start_section_nr, to_state, online_type);
+ ret = memory_block_action(mem->start_section_nr, to_state,
+ mem->online_type);
+
mem->state = ret ? from_state_req : to_state;
+
return ret;
}
+/* The device lock serializes operations on memory_subsys_[online|offline] */
static int memory_subsys_online(struct device *dev)
{
- struct memory_block *mem = container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
int ret;
- mutex_lock(&mem->state_mutex);
+ if (mem->state == MEM_ONLINE)
+ return 0;
- ret = mem->state == MEM_ONLINE ? 0 :
- __memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE,
- ONLINE_KEEP);
-
- mutex_unlock(&mem->state_mutex);
- return ret;
-}
-
-static int memory_subsys_offline(struct device *dev)
-{
- struct memory_block *mem = container_of(dev, struct memory_block, dev);
- int ret;
+ /*
+ * If we are called from store_mem_state(), online_type will be
+ * set >= 0 Otherwise we were called from the device online
+ * attribute and need to set the online_type.
+ */
+ if (mem->online_type < 0)
+ mem->online_type = ONLINE_KEEP;
- mutex_lock(&mem->state_mutex);
+ ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
- ret = mem->state == MEM_OFFLINE ? 0 :
- __memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE, -1);
+ /* clear online_type */
+ mem->online_type = -1;
- mutex_unlock(&mem->state_mutex);
return ret;
}
-static int __memory_block_change_state_uevent(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req,
- int online_type)
-{
- int ret = __memory_block_change_state(mem, to_state, from_state_req,
- online_type);
- if (!ret) {
- switch (mem->state) {
- case MEM_OFFLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
- break;
- case MEM_ONLINE:
- kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
- break;
- default:
- break;
- }
- }
- return ret;
-}
-
-static int memory_block_change_state(struct memory_block *mem,
- unsigned long to_state, unsigned long from_state_req,
- int online_type)
+static int memory_subsys_offline(struct device *dev)
{
- int ret;
+ struct memory_block *mem = to_memory_block(dev);
- mutex_lock(&mem->state_mutex);
- ret = __memory_block_change_state_uevent(mem, to_state, from_state_req,
- online_type);
- mutex_unlock(&mem->state_mutex);
+ if (mem->state == MEM_OFFLINE)
+ return 0;
- return ret;
+ return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
}
+
static ssize_t
store_mem_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct memory_block *mem;
- bool offline;
- int ret = -EINVAL;
-
- mem = container_of(dev, struct memory_block, dev);
-
- lock_device_hotplug();
-
- if (!strncmp(buf, "online_kernel", min_t(int, count, 13))) {
- offline = false;
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_KERNEL);
- } else if (!strncmp(buf, "online_movable", min_t(int, count, 14))) {
- offline = false;
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_MOVABLE);
- } else if (!strncmp(buf, "online", min_t(int, count, 6))) {
- offline = false;
- ret = memory_block_change_state(mem, MEM_ONLINE,
- MEM_OFFLINE, ONLINE_KEEP);
- } else if(!strncmp(buf, "offline", min_t(int, count, 7))) {
- offline = true;
- ret = memory_block_change_state(mem, MEM_OFFLINE,
- MEM_ONLINE, -1);
+ struct memory_block *mem = to_memory_block(dev);
+ int ret, online_type;
+
+ ret = lock_device_hotplug_sysfs();
+ if (ret)
+ return ret;
+
+ if (!strncmp(buf, "online_kernel", min_t(int, count, 13)))
+ online_type = ONLINE_KERNEL;
+ else if (!strncmp(buf, "online_movable", min_t(int, count, 14)))
+ online_type = ONLINE_MOVABLE;
+ else if (!strncmp(buf, "online", min_t(int, count, 6)))
+ online_type = ONLINE_KEEP;
+ else if (!strncmp(buf, "offline", min_t(int, count, 7)))
+ online_type = -1;
+ else
+ return -EINVAL;
+
+ switch (online_type) {
+ case ONLINE_KERNEL:
+ case ONLINE_MOVABLE:
+ case ONLINE_KEEP:
+ /*
+ * mem->online_type is not protected so there can be a
+ * race here. However, when racing online, the first
+ * will succeed and the second will just return as the
+ * block will already be online. The online type
+ * could be either one, but that is expected.
+ */
+ mem->online_type = online_type;
+ ret = device_online(&mem->dev);
+ break;
+ case -1:
+ ret = device_offline(&mem->dev);
+ break;
+ default:
+ ret = -EINVAL; /* should never happen */
}
- if (!ret)
- dev->offline = offline;
unlock_device_hotplug();
@@ -392,8 +376,7 @@ store_mem_state(struct device *dev,
static ssize_t show_phys_device(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct memory_block *mem =
- container_of(dev, struct memory_block, dev);
+ struct memory_block *mem = to_memory_block(dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
@@ -469,7 +452,7 @@ store_soft_offline_page(struct device *dev,
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
+ if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
if (!pfn_valid(pfn))
@@ -488,7 +471,7 @@ store_hard_offline_page(struct device *dev,
u64 pfn;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (strict_strtoull(buf, 0, &pfn) < 0)
+ if (kstrtoull(buf, 0, &pfn) < 0)
return -EINVAL;
pfn >>= PAGE_SHIFT;
ret = memory_failure(pfn, 0, 0);
@@ -525,7 +508,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
put_device(&hint->dev);
if (!dev)
return NULL;
- return container_of(dev, struct memory_block, dev);
+ return to_memory_block(dev);
}
/*
@@ -565,16 +548,13 @@ static const struct attribute_group *memory_memblk_attr_groups[] = {
static
int register_memory(struct memory_block *memory)
{
- int error;
-
memory->dev.bus = &memory_subsys;
memory->dev.id = memory->start_section_nr / sections_per_block;
memory->dev.release = memory_block_release;
memory->dev.groups = memory_memblk_attr_groups;
memory->dev.offline = memory->state == MEM_OFFLINE;
- error = device_register(&memory->dev);
- return error;
+ return device_register(&memory->dev);
}
static int init_memory_block(struct memory_block **memory,
@@ -595,7 +575,6 @@ static int init_memory_block(struct memory_block **memory,
mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
mem->state = state;
mem->section_count++;
- mutex_init(&mem->state_mutex);
start_pfn = section_nr_to_pfn(mem->start_section_nr);
mem->phys_device = arch_get_memory_phys_device(start_pfn);
@@ -605,55 +584,57 @@ static int init_memory_block(struct memory_block **memory,
return ret;
}
-static int add_memory_section(int nid, struct mem_section *section,
- struct memory_block **mem_p,
- unsigned long state, enum mem_add_context context)
+static int add_memory_block(int base_section_nr)
{
- struct memory_block *mem = NULL;
- int scn_nr = __section_nr(section);
- int ret = 0;
-
- mutex_lock(&mem_sysfs_mutex);
-
- if (context == BOOT) {
- /* same memory block ? */
- if (mem_p && *mem_p)
- if (scn_nr >= (*mem_p)->start_section_nr &&
- scn_nr <= (*mem_p)->end_section_nr) {
- mem = *mem_p;
- kobject_get(&mem->dev.kobj);
- }
- } else
- mem = find_memory_block(section);
-
- if (mem) {
- mem->section_count++;
- kobject_put(&mem->dev.kobj);
- } else {
- ret = init_memory_block(&mem, section, state);
- /* store memory_block pointer for next loop */
- if (!ret && context == BOOT)
- if (mem_p)
- *mem_p = mem;
- }
+ struct memory_block *mem;
+ int i, ret, section_count = 0, section_nr;
- if (!ret) {
- if (context == HOTPLUG &&
- mem->section_count == sections_per_block)
- ret = register_mem_sect_under_node(mem, nid);
+ for (i = base_section_nr;
+ (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
+ i++) {
+ if (!present_section_nr(i))
+ continue;
+ if (section_count == 0)
+ section_nr = i;
+ section_count++;
}
- mutex_unlock(&mem_sysfs_mutex);
- return ret;
+ if (section_count == 0)
+ return 0;
+ ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
+ if (ret)
+ return ret;
+ mem->section_count = section_count;
+ return 0;
}
+
/*
* need an interface for the VM to add new memory regions,
* but without onlining it.
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
+ int ret = 0;
+ struct memory_block *mem;
+
+ mutex_lock(&mem_sysfs_mutex);
+
+ mem = find_memory_block(section);
+ if (mem) {
+ mem->section_count++;
+ put_device(&mem->dev);
+ } else {
+ ret = init_memory_block(&mem, section, MEM_OFFLINE);
+ if (ret)
+ goto out;
+ }
+
+ if (mem->section_count == sections_per_block)
+ ret = register_mem_sect_under_node(mem, nid);
+out:
+ mutex_unlock(&mem_sysfs_mutex);
+ return ret;
}
#ifdef CONFIG_MEMORY_HOTREMOVE
@@ -663,7 +644,7 @@ unregister_memory(struct memory_block *memory)
BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->dev.kobj);
+ put_device(&memory->dev);
device_unregister(&memory->dev);
}
@@ -680,7 +661,7 @@ static int remove_memory_block(unsigned long node_id,
if (mem->section_count == 0)
unregister_memory(mem);
else
- kobject_put(&mem->dev.kobj);
+ put_device(&mem->dev);
mutex_unlock(&mem_sysfs_mutex);
return 0;
@@ -733,7 +714,6 @@ int __init memory_dev_init(void)
int ret;
int err;
unsigned long block_sz;
- struct memory_block *mem = NULL;
ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
if (ret)
@@ -746,17 +726,13 @@ int __init memory_dev_init(void)
* Create entries for memory sections that were found
* during boot and have been initialized
*/
- for (i = 0; i < NR_MEM_SECTIONS; i++) {
- if (!present_section_nr(i))
- continue;
- /* don't need to reuse memory_block if only one per block */
- err = add_memory_section(0, __nr_to_section(i),
- (sections_per_block == 1) ? NULL : &mem,
- MEM_ONLINE,
- BOOT);
+ mutex_lock(&mem_sysfs_mutex);
+ for (i = 0; i < NR_MEM_SECTIONS; i += sections_per_block) {
+ err = add_memory_block(i);
if (!ret)
ret = err;
}
+ mutex_unlock(&mem_sysfs_mutex);
out:
if (ret)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 3c3197a8de4..4f8bef3eb5a 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -672,11 +672,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute platform_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *platform_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(platform_dev);
static int platform_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -893,7 +895,7 @@ static const struct dev_pm_ops platform_dev_pm_ops = {
struct bus_type platform_bus_type = {
.name = "platform",
- .dev_attrs = platform_dev_attrs,
+ .dev_groups = platform_dev_groups,
.match = platform_match,
.uevent = platform_uevent,
.pm = &platform_dev_pm_ops,
@@ -1054,7 +1056,7 @@ void __init early_platform_driver_register_all(char *class_str)
* @epdrv: early platform driver structure
* @id: id to match against
*/
-static __init struct platform_device *
+static struct platform_device * __init
early_platform_match(struct early_platform_driver *epdrv, int id)
{
struct platform_device *pd;
@@ -1072,7 +1074,7 @@ early_platform_match(struct early_platform_driver *epdrv, int id)
* @epdrv: early platform driver structure
* @id: return true if id or above exists
*/
-static __init int early_platform_left(struct early_platform_driver *epdrv,
+static int __init early_platform_left(struct early_platform_driver *epdrv,
int id)
{
struct platform_device *pd;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 5a9b6569dd7..9f098a82cf0 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/async.h>
#include <linux/suspend.h>
+#include <trace/events/power.h>
#include <linux/cpuidle.h>
#include "../base.h"
#include "power.h"
@@ -56,6 +57,30 @@ static pm_message_t pm_transition;
static int async_error;
+static char *pm_verb(int event)
+{
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ return "suspend";
+ case PM_EVENT_RESUME:
+ return "resume";
+ case PM_EVENT_FREEZE:
+ return "freeze";
+ case PM_EVENT_QUIESCE:
+ return "quiesce";
+ case PM_EVENT_HIBERNATE:
+ return "hibernate";
+ case PM_EVENT_THAW:
+ return "thaw";
+ case PM_EVENT_RESTORE:
+ return "restore";
+ case PM_EVENT_RECOVER:
+ return "recover";
+ default:
+ return "(unknown PM event)";
+ }
+}
+
/**
* device_pm_sleep_init - Initialize system suspend-related device fields.
* @dev: Device object being initialized.
@@ -172,16 +197,21 @@ static ktime_t initcall_debug_start(struct device *dev)
}
static void initcall_debug_report(struct device *dev, ktime_t calltime,
- int error)
+ int error, pm_message_t state, char *info)
{
- ktime_t delta, rettime;
+ ktime_t rettime;
+ s64 nsecs;
+
+ rettime = ktime_get();
+ nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
if (pm_print_times_enabled) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
- error, (unsigned long long)ktime_to_ns(delta) >> 10);
+ error, (unsigned long long)nsecs >> 10);
}
+
+ trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
+ error);
}
/**
@@ -309,30 +339,6 @@ static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t stat
return NULL;
}
-static char *pm_verb(int event)
-{
- switch (event) {
- case PM_EVENT_SUSPEND:
- return "suspend";
- case PM_EVENT_RESUME:
- return "resume";
- case PM_EVENT_FREEZE:
- return "freeze";
- case PM_EVENT_QUIESCE:
- return "quiesce";
- case PM_EVENT_HIBERNATE:
- return "hibernate";
- case PM_EVENT_THAW:
- return "thaw";
- case PM_EVENT_RESTORE:
- return "restore";
- case PM_EVENT_RECOVER:
- return "recover";
- default:
- return "(unknown PM event)";
- }
-}
-
static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
{
dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
@@ -379,7 +385,7 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
error = cb(dev);
suspend_report_result(cb, error);
- initcall_debug_report(dev, calltime, error);
+ initcall_debug_report(dev, calltime, error, state, info);
return error;
}
@@ -1027,7 +1033,8 @@ EXPORT_SYMBOL_GPL(dpm_suspend_end);
* @cb: Suspend callback to execute.
*/
static int legacy_suspend(struct device *dev, pm_message_t state,
- int (*cb)(struct device *dev, pm_message_t state))
+ int (*cb)(struct device *dev, pm_message_t state),
+ char *info)
{
int error;
ktime_t calltime;
@@ -1037,7 +1044,7 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
error = cb(dev, state);
suspend_report_result(cb, error);
- initcall_debug_report(dev, calltime, error);
+ initcall_debug_report(dev, calltime, error, state, info);
return error;
}
@@ -1097,7 +1104,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_suspend(dev, state, dev->class->suspend);
+ error = legacy_suspend(dev, state, dev->class->suspend,
+ "legacy class ");
goto End;
}
}
@@ -1108,7 +1116,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
pm_dev_dbg(dev, state, "legacy bus ");
- error = legacy_suspend(dev, state, dev->bus->suspend);
+ error = legacy_suspend(dev, state, dev->bus->suspend,
+ "legacy bus ");
goto End;
}
}
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index c8ec186303d..ef89897c604 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -460,6 +460,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
return 0;
}
+EXPORT_SYMBOL_GPL(opp_add);
/**
* opp_set_availability() - helper to set the availability of an opp
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a53ebd26570..03e089ade5c 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -206,7 +206,7 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev,
if (!dev->power.use_autosuspend)
return -EIO;
- if (strict_strtol(buf, 10, &delay) != 0 || delay != (int) delay)
+ if (kstrtol(buf, 10, &delay) != 0 || delay != (int) delay)
return -EINVAL;
device_lock(dev);
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 29c83160ca2..57f777835d9 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -128,9 +128,6 @@ struct regmap {
void *cache;
u32 cache_dirty;
- unsigned long *cache_present;
- unsigned int cache_present_nbits;
-
struct reg_default *patch;
int patch_regs;
@@ -203,6 +200,7 @@ int regcache_write(struct regmap *map,
unsigned int reg, unsigned int value);
int regcache_sync(struct regmap *map);
int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end);
@@ -218,16 +216,6 @@ unsigned int regcache_get_val(struct regmap *map, const void *base,
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_set_reg_present(struct regmap *map, unsigned int reg);
-
-static inline bool regcache_reg_present(struct regmap *map, unsigned int reg)
-{
- if (!map->cache_present)
- return true;
- if (reg > map->cache_present_nbits)
- return false;
- return map->cache_present[BIT_WORD(reg)] & BIT_MASK(reg);
-}
int _regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len, bool async);
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 5c1435c4e21..930cad4e5df 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -29,6 +29,8 @@ struct regcache_rbtree_node {
unsigned int base_reg;
/* block of adjacent registers */
void *block;
+ /* Which registers are present */
+ long *cache_present;
/* number of registers available in the block */
unsigned int blklen;
} __attribute__ ((packed));
@@ -57,6 +59,7 @@ static void regcache_rbtree_set_register(struct regmap *map,
struct regcache_rbtree_node *rbnode,
unsigned int idx, unsigned int val)
{
+ set_bit(idx, rbnode->cache_present);
regcache_set_val(map, rbnode->block, idx, val);
}
@@ -146,13 +149,13 @@ static int rbtree_show(struct seq_file *s, void *ignored)
map->lock(map->lock_arg);
mem_size = sizeof(*rbtree_ctx);
- mem_size += BITS_TO_LONGS(map->cache_present_nbits) * sizeof(long);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
n = container_of(node, struct regcache_rbtree_node, node);
mem_size += sizeof(*n);
mem_size += (n->blklen * map->cache_word_size);
+ mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
regcache_rbtree_get_base_top_reg(map, n, &base, &top);
this_registers = ((top - base) / map->reg_stride) + 1;
@@ -245,6 +248,7 @@ static int regcache_rbtree_exit(struct regmap *map)
rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
next = rb_next(&rbtree_node->node);
rb_erase(&rbtree_node->node, &rbtree_ctx->root);
+ kfree(rbtree_node->cache_present);
kfree(rbtree_node->block);
kfree(rbtree_node);
}
@@ -265,7 +269,7 @@ static int regcache_rbtree_read(struct regmap *map,
rbnode = regcache_rbtree_lookup(map, reg);
if (rbnode) {
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
- if (!regcache_reg_present(map, reg))
+ if (!test_bit(reg_tmp, rbnode->cache_present))
return -ENOENT;
*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
} else {
@@ -278,27 +282,45 @@ static int regcache_rbtree_read(struct regmap *map,
static int regcache_rbtree_insert_to_block(struct regmap *map,
struct regcache_rbtree_node *rbnode,
- unsigned int pos, unsigned int reg,
+ unsigned int base_reg,
+ unsigned int top_reg,
+ unsigned int reg,
unsigned int value)
{
+ unsigned int blklen;
+ unsigned int pos, offset;
+ unsigned long *present;
u8 *blk;
+ blklen = (top_reg - base_reg) / map->reg_stride + 1;
+ pos = (reg - base_reg) / map->reg_stride;
+ offset = (rbnode->base_reg - base_reg) / map->reg_stride;
+
blk = krealloc(rbnode->block,
- (rbnode->blklen + 1) * map->cache_word_size,
+ blklen * map->cache_word_size,
GFP_KERNEL);
if (!blk)
return -ENOMEM;
+ present = krealloc(rbnode->cache_present,
+ BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL);
+ if (!present) {
+ kfree(blk);
+ return -ENOMEM;
+ }
+
/* insert the register value in the correct place in the rbnode block */
- memmove(blk + (pos + 1) * map->cache_word_size,
- blk + pos * map->cache_word_size,
- (rbnode->blklen - pos) * map->cache_word_size);
+ if (pos == 0) {
+ memmove(blk + offset * map->cache_word_size,
+ blk, rbnode->blklen * map->cache_word_size);
+ bitmap_shift_right(present, present, offset, blklen);
+ }
/* update the rbnode block, its size and the base register */
rbnode->block = blk;
- rbnode->blklen++;
- if (!pos)
- rbnode->base_reg = reg;
+ rbnode->blklen = blklen;
+ rbnode->base_reg = base_reg;
+ rbnode->cache_present = present;
regcache_rbtree_set_register(map, rbnode, pos, value);
return 0;
@@ -325,25 +347,34 @@ regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
if (i != map->rd_table->n_yes_ranges) {
range = &map->rd_table->yes_ranges[i];
- rbnode->blklen = range->range_max - range->range_min
- + 1;
+ rbnode->blklen = (range->range_max - range->range_min) /
+ map->reg_stride + 1;
rbnode->base_reg = range->range_min;
}
}
if (!rbnode->blklen) {
- rbnode->blklen = sizeof(*rbnode);
+ rbnode->blklen = 1;
rbnode->base_reg = reg;
}
rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
GFP_KERNEL);
- if (!rbnode->block) {
- kfree(rbnode);
- return NULL;
- }
+ if (!rbnode->block)
+ goto err_free;
+
+ rbnode->cache_present = kzalloc(BITS_TO_LONGS(rbnode->blklen) *
+ sizeof(*rbnode->cache_present), GFP_KERNEL);
+ if (!rbnode->cache_present)
+ goto err_free_block;
return rbnode;
+
+err_free_block:
+ kfree(rbnode->block);
+err_free:
+ kfree(rbnode);
+ return NULL;
}
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
@@ -353,15 +384,9 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
struct regcache_rbtree_node *rbnode, *rbnode_tmp;
struct rb_node *node;
unsigned int reg_tmp;
- unsigned int pos;
- int i;
int ret;
rbtree_ctx = map->cache;
- /* update the reg_present bitmap, make space if necessary */
- ret = regcache_set_reg_present(map, reg);
- if (ret < 0)
- return ret;
/* if we can't locate it in the cached rbnode we'll have
* to traverse the rbtree looking for it.
@@ -371,30 +396,43 @@ static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
} else {
+ unsigned int base_reg, top_reg;
+ unsigned int new_base_reg, new_top_reg;
+ unsigned int min, max;
+ unsigned int max_dist;
+
+ max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
+ map->cache_word_size;
+ if (reg < max_dist)
+ min = 0;
+ else
+ min = reg - max_dist;
+ max = reg + max_dist;
+
/* look for an adjacent register to the one we are about to add */
for (node = rb_first(&rbtree_ctx->root); node;
node = rb_next(node)) {
rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
node);
- for (i = 0; i < rbnode_tmp->blklen; i++) {
- reg_tmp = rbnode_tmp->base_reg +
- (i * map->reg_stride);
- if (abs(reg_tmp - reg) != map->reg_stride)
- continue;
- /* decide where in the block to place our register */
- if (reg_tmp + map->reg_stride == reg)
- pos = i + 1;
- else
- pos = i;
- ret = regcache_rbtree_insert_to_block(map,
- rbnode_tmp,
- pos, reg,
- value);
- if (ret)
- return ret;
- rbtree_ctx->cached_rbnode = rbnode_tmp;
- return 0;
+
+ regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
+ &base_reg, &top_reg);
+
+ if (base_reg <= max && top_reg >= min) {
+ new_base_reg = min(reg, base_reg);
+ new_top_reg = max(reg, top_reg);
+ } else {
+ continue;
}
+
+ ret = regcache_rbtree_insert_to_block(map, rbnode_tmp,
+ new_base_reg,
+ new_top_reg, reg,
+ value);
+ if (ret)
+ return ret;
+ rbtree_ctx->cached_rbnode = rbnode_tmp;
+ return 0;
}
/* We did not manage to find a place to insert it in
@@ -418,30 +456,34 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
struct regcache_rbtree_ctx *rbtree_ctx;
struct rb_node *node;
struct regcache_rbtree_node *rbnode;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
int ret;
- int base, end;
rbtree_ctx = map->cache;
for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
rbnode = rb_entry(node, struct regcache_rbtree_node, node);
- if (rbnode->base_reg > max)
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
break;
- if (rbnode->base_reg + rbnode->blklen < min)
+ if (top_reg < min)
continue;
- if (min > rbnode->base_reg)
- base = min - rbnode->base_reg;
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
else
- base = 0;
+ start = 0;
- if (max < rbnode->base_reg + rbnode->blklen)
- end = max - rbnode->base_reg + 1;
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
else
end = rbnode->blklen;
- ret = regcache_sync_block(map, rbnode->block, rbnode->base_reg,
- base, end);
+ ret = regcache_sync_block(map, rbnode->block,
+ rbnode->cache_present,
+ rbnode->base_reg, start, end);
if (ret != 0)
return ret;
}
@@ -449,6 +491,42 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
return regmap_async_complete(map);
}
+static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
+ unsigned int max)
+{
+ struct regcache_rbtree_ctx *rbtree_ctx;
+ struct regcache_rbtree_node *rbnode;
+ struct rb_node *node;
+ unsigned int base_reg, top_reg;
+ unsigned int start, end;
+
+ rbtree_ctx = map->cache;
+ for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
+ rbnode = rb_entry(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
+ &top_reg);
+ if (base_reg > max)
+ break;
+ if (top_reg < min)
+ continue;
+
+ if (min > base_reg)
+ start = (min - base_reg) / map->reg_stride;
+ else
+ start = 0;
+
+ if (max < top_reg)
+ end = (max - base_reg) / map->reg_stride + 1;
+ else
+ end = rbnode->blklen;
+
+ bitmap_clear(rbnode->cache_present, start, end - start);
+ }
+
+ return 0;
+}
+
struct regcache_ops regcache_rbtree_ops = {
.type = REGCACHE_RBTREE,
.name = "rbtree",
@@ -456,5 +534,6 @@ struct regcache_ops regcache_rbtree_ops = {
.exit = regcache_rbtree_exit,
.read = regcache_rbtree_read,
.write = regcache_rbtree_write,
- .sync = regcache_rbtree_sync
+ .sync = regcache_rbtree_sync,
+ .drop = regcache_rbtree_drop,
};
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index e6910269653..d6c2d691b6e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -121,8 +121,6 @@ int regcache_init(struct regmap *map, const struct regmap_config *config)
map->reg_defaults_raw = config->reg_defaults_raw;
map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
- map->cache_present = NULL;
- map->cache_present_nbits = 0;
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -181,7 +179,6 @@ void regcache_exit(struct regmap *map)
BUG_ON(!map->cache_ops);
- kfree(map->cache_present);
kfree(map->reg_defaults);
if (map->cache_free)
kfree(map->reg_defaults_raw);
@@ -241,9 +238,6 @@ int regcache_write(struct regmap *map,
BUG_ON(!map->cache_ops);
- if (!regmap_writeable(map, reg))
- return -EIO;
-
if (!regmap_volatile(map, reg))
return map->cache_ops->write(map, reg, value);
@@ -410,22 +404,16 @@ EXPORT_SYMBOL_GPL(regcache_sync_region);
int regcache_drop_region(struct regmap *map, unsigned int min,
unsigned int max)
{
- unsigned int reg;
int ret = 0;
- if (!map->cache_present && !(map->cache_ops && map->cache_ops->drop))
+ if (!map->cache_ops || !map->cache_ops->drop)
return -EINVAL;
map->lock(map->lock_arg);
trace_regcache_drop_region(map->dev, min, max);
- if (map->cache_present)
- for (reg = min; reg < max + 1; reg++)
- clear_bit(reg, map->cache_present);
-
- if (map->cache_ops && map->cache_ops->drop)
- ret = map->cache_ops->drop(map, min, max);
+ ret = map->cache_ops->drop(map, min, max);
map->unlock(map->lock_arg);
@@ -493,42 +481,6 @@ void regcache_cache_bypass(struct regmap *map, bool enable)
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
-int regcache_set_reg_present(struct regmap *map, unsigned int reg)
-{
- unsigned long *cache_present;
- unsigned int cache_present_size;
- unsigned int nregs;
- int i;
-
- nregs = reg + 1;
- cache_present_size = BITS_TO_LONGS(nregs);
- cache_present_size *= sizeof(long);
-
- if (!map->cache_present) {
- cache_present = kmalloc(cache_present_size, GFP_KERNEL);
- if (!cache_present)
- return -ENOMEM;
- bitmap_zero(cache_present, nregs);
- map->cache_present = cache_present;
- map->cache_present_nbits = nregs;
- }
-
- if (nregs > map->cache_present_nbits) {
- cache_present = krealloc(map->cache_present,
- cache_present_size, GFP_KERNEL);
- if (!cache_present)
- return -ENOMEM;
- for (i = 0; i < nregs; i++)
- if (i >= map->cache_present_nbits)
- clear_bit(i, cache_present);
- map->cache_present = cache_present;
- map->cache_present_nbits = nregs;
- }
-
- set_bit(reg, map->cache_present);
- return 0;
-}
-
bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
unsigned int val)
{
@@ -620,7 +572,16 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
return -ENOENT;
}
+static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
+{
+ if (!cache_present)
+ return true;
+
+ return test_bit(idx, cache_present);
+}
+
static int regcache_sync_block_single(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base,
unsigned int start, unsigned int end)
{
@@ -630,7 +591,7 @@ static int regcache_sync_block_single(struct regmap *map, void *block,
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
- if (!regcache_reg_present(map, regtmp))
+ if (!regcache_reg_present(cache_present, i))
continue;
val = regcache_get_val(map, block, i);
@@ -681,6 +642,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
}
static int regcache_sync_block_raw(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
@@ -693,7 +655,7 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
for (i = start; i < end; i++) {
regtmp = block_base + (i * map->reg_stride);
- if (!regcache_reg_present(map, regtmp)) {
+ if (!regcache_reg_present(cache_present, i)) {
ret = regcache_sync_block_raw_flush(map, &data,
base, regtmp);
if (ret != 0)
@@ -719,17 +681,19 @@ static int regcache_sync_block_raw(struct regmap *map, void *block,
}
}
- return regcache_sync_block_raw_flush(map, &data, base, regtmp);
+ return regcache_sync_block_raw_flush(map, &data, base, regtmp +
+ map->reg_stride);
}
int regcache_sync_block(struct regmap *map, void *block,
+ unsigned long *cache_present,
unsigned int block_base, unsigned int start,
unsigned int end)
{
if (regmap_can_raw_write(map))
- return regcache_sync_block_raw(map, block, block_base,
- start, end);
+ return regcache_sync_block_raw(map, block, cache_present,
+ block_base, start, end);
else
- return regcache_sync_block_single(map, block, block_base,
- start, end);
+ return regcache_sync_block_single(map, block, cache_present,
+ block_base, start, end);
}
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
index 53495753fbd..de11ecaf383 100644
--- a/drivers/base/regmap/regmap-debugfs.c
+++ b/drivers/base/regmap/regmap-debugfs.c
@@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
unsigned int reg_offset;
/* Suppress the cache if we're using a subrange */
- if (from)
- return from;
+ if (base)
+ return base;
/*
* If we don't have a cache build one so we don't have to do a
@@ -281,7 +281,7 @@ static ssize_t regmap_map_write_file(struct file *file,
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
- if (strict_strtoul(start, 16, &value))
+ if (kstrtoul(start, 16, &value))
return -EINVAL;
/* Userspace has been fiddling around behind the kernel's back */
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
index 1643e889baf..d10456ffd81 100644
--- a/drivers/base/regmap/regmap-irq.c
+++ b/drivers/base/regmap/regmap-irq.c
@@ -418,6 +418,31 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
reg, ret);
goto err_alloc;
}
+
+ if (!chip->init_ack_masked)
+ continue;
+
+ /* Ack masked but set interrupts */
+ reg = chip->status_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_read(map, reg, &d->status_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n",
+ ret);
+ goto err_alloc;
+ }
+
+ if (d->status_buf[i] && chip->ack_base) {
+ reg = chip->ack_base +
+ (i * map->reg_stride * d->irq_reg_stride);
+ ret = regmap_write(map, reg,
+ d->status_buf[i] & d->mask_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ reg, ret);
+ goto err_alloc;
+ }
+ }
}
/* Wake is disabled by default */
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index e0d0c7d8a5c..7d689a15c50 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -303,6 +303,7 @@ static void regmap_unlock_mutex(void *__map)
}
static void regmap_lock_spinlock(void *__map)
+__acquires(&map->spinlock)
{
struct regmap *map = __map;
unsigned long flags;
@@ -312,6 +313,7 @@ static void regmap_lock_spinlock(void *__map)
}
static void regmap_unlock_spinlock(void *__map)
+__releases(&map->spinlock)
{
struct regmap *map = __map;
spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
@@ -687,6 +689,10 @@ skip_format_initialization:
unsigned win_max = win_min +
config->ranges[j].window_len - 1;
+ /* Allow data window inside its own virtual range */
+ if (j == i)
+ continue;
+
if (range_cfg->range_min <= sel_reg &&
sel_reg <= range_cfg->range_max) {
dev_err(map->dev,
@@ -1261,6 +1267,9 @@ int _regmap_write(struct regmap *map, unsigned int reg,
int ret;
void *context = _regmap_map_get_context(map);
+ if (!regmap_writeable(map, reg))
+ return -EIO;
+
if (!map->cache_bypass && !map->defer_caching) {
ret = regcache_write(map, reg, val);
if (ret != 0)
@@ -1888,13 +1897,10 @@ EXPORT_SYMBOL_GPL(regmap_async_complete);
int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
int num_regs)
{
+ struct reg_default *p;
int i, ret;
bool bypass;
- /* If needed the implementation can be extended to support this */
- if (map->patch)
- return -EBUSY;
-
map->lock(map->lock_arg);
bypass = map->cache_bypass;
@@ -1911,11 +1917,13 @@ int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
}
}
- map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
- if (map->patch != NULL) {
- memcpy(map->patch, regs,
- num_regs * sizeof(struct reg_default));
- map->patch_regs = num_regs;
+ p = krealloc(map->patch,
+ sizeof(struct reg_default) * (map->patch_regs + num_regs),
+ GFP_KERNEL);
+ if (p) {
+ memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
+ map->patch = p;
+ map->patch_regs += num_regs;
} else {
ret = -ENOMEM;
}
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 2f5919ed91a..94ffee378f1 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -62,25 +62,6 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
}
#endif
-#ifdef arch_provides_topology_pointers
-#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(0, topology_##name(cpu), buf); \
-}
-
-#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct device *dev, \
- struct device_attribute *attr, \
- char *buf) \
-{ \
- unsigned int cpu = dev->id; \
- return show_cpumap(1, topology_##name(cpu), buf); \
-}
-
-#else
#define define_siblings_show_map(name) \
static ssize_t show_##name(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -95,7 +76,6 @@ static ssize_t show_##name##_list(struct device *dev, \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
}
-#endif
#define define_siblings_show_func(name) \
define_siblings_show_map(name); define_siblings_show_list(name)
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index 380a2003231..7c081b38ef3 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -35,8 +35,14 @@ config BCMA_DRIVER_PCI_HOSTMODE
PCI core hostmode operation (external PCI bus).
config BCMA_HOST_SOC
- bool
- depends on BCMA_DRIVER_MIPS
+ bool "Support for BCMA in a SoC"
+ depends on BCMA
+ help
+ Host interface for a Broadcom AIX bus directly mapped into
+ the memory. This only works with the Broadcom SoCs from the
+ BCM47XX line.
+
+ If unsure, say N
config BCMA_DRIVER_MIPS
bool "BCMA Broadcom MIPS core driver"
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index cf7a476a519..c9fd6943ce4 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -31,7 +31,7 @@ static void bcma_pcie_write(struct bcma_drv_pci *pc, u32 address, u32 data)
pcicore_write32(pc, BCMA_CORE_PCI_PCIEIND_DATA, data);
}
-static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
+static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u16 phy)
{
u32 v;
int i;
@@ -55,7 +55,7 @@ static void bcma_pcie_mdio_set_phy(struct bcma_drv_pci *pc, u8 phy)
}
}
-static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
+static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u16 device, u8 address)
{
int max_retries = 10;
u16 ret = 0;
@@ -98,7 +98,7 @@ static u16 bcma_pcie_mdio_read(struct bcma_drv_pci *pc, u8 device, u8 address)
return ret;
}
-static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
+static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u16 device,
u8 address, u16 data)
{
int max_retries = 10;
@@ -137,6 +137,13 @@ static void bcma_pcie_mdio_write(struct bcma_drv_pci *pc, u8 device,
pcicore_write32(pc, BCMA_CORE_PCI_MDIO_CONTROL, 0);
}
+static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
+ u8 address, u16 data)
+{
+ bcma_pcie_mdio_write(pc, device, address, data);
+ return bcma_pcie_mdio_read(pc, device, address);
+}
+
/**************************************************
* Workarounds.
**************************************************/
@@ -203,6 +210,25 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
}
}
+static void bcma_core_pci_power_save(struct bcma_drv_pci *pc, bool up)
+{
+ u16 data;
+
+ if (pc->core->id.rev >= 15 && pc->core->id.rev <= 20) {
+ data = up ? 0x74 : 0x7C;
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7F64);
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+ } else if (pc->core->id.rev >= 21 && pc->core->id.rev <= 22) {
+ data = up ? 0x75 : 0x7D;
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT1, 0x7E65);
+ bcma_pcie_mdio_writeread(pc, BCMA_CORE_PCI_MDIO_BLK1,
+ BCMA_CORE_PCI_MDIO_BLK1_MGMT3, data);
+ }
+}
+
/**************************************************
* Init.
**************************************************/
@@ -262,7 +288,7 @@ out:
}
EXPORT_SYMBOL_GPL(bcma_core_pci_irq_ctl);
-void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
+static void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
{
u32 w;
@@ -274,4 +300,33 @@ void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend)
bcma_pcie_write(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG, w);
bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_PMTHRESHREG);
}
-EXPORT_SYMBOL_GPL(bcma_core_pci_extend_L1timer);
+
+void bcma_core_pci_up(struct bcma_bus *bus)
+{
+ struct bcma_drv_pci *pc;
+
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ pc = &bus->drv_pci[0];
+
+ bcma_core_pci_power_save(pc, true);
+
+ bcma_core_pci_extend_L1timer(pc, true);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_up);
+
+void bcma_core_pci_down(struct bcma_bus *bus)
+{
+ struct bcma_drv_pci *pc;
+
+ if (bus->hosttype != BCMA_HOSTTYPE_PCI)
+ return;
+
+ pc = &bus->drv_pci[0];
+
+ bcma_core_pci_extend_L1timer(pc, false);
+
+ bcma_core_pci_power_save(pc, false);
+}
+EXPORT_SYMBOL_GPL(bcma_core_pci_down);
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 30629a3d44c..c3d7b03c2fd 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -581,6 +581,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
{
struct bcma_drv_pci_host *pc_host;
+ int readrq;
if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
/* This is not a device on the PCI-core bridge. */
@@ -595,6 +596,11 @@ int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
dev->irq = bcma_core_irq(pc_host->pdev->core);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
+ readrq = pcie_get_readrq(dev);
+ if (readrq > 128) {
+ pr_info("change PCIe max read request size from %i to 128\n", readrq);
+ pcie_set_readrq(dev, 128);
+ }
return 0;
}
EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 0067422ec17..90ee350442a 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -237,7 +237,7 @@ int bcma_bus_register(struct bcma_bus *bus)
err = bcma_bus_scan(bus);
if (err) {
bcma_err(bus, "Failed to scan: %d\n", err);
- return -1;
+ return err;
}
/* Early init CC core */
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index 8bffa5c9818..cd6b20fce68 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -32,6 +32,18 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
{ BCMA_CORE_4706_CHIPCOMMON, "BCM4706 ChipCommon" },
{ BCMA_CORE_4706_SOC_RAM, "BCM4706 SOC RAM" },
{ BCMA_CORE_4706_MAC_GBIT, "BCM4706 GBit MAC" },
+ { BCMA_CORE_PCIEG2, "PCIe Gen 2" },
+ { BCMA_CORE_DMA, "DMA" },
+ { BCMA_CORE_SDIO3, "SDIO3" },
+ { BCMA_CORE_USB20, "USB 2.0" },
+ { BCMA_CORE_USB30, "USB 3.0" },
+ { BCMA_CORE_A9JTAG, "ARM Cortex A9 JTAG" },
+ { BCMA_CORE_DDR23, "Denali DDR2/DDR3 memory controller" },
+ { BCMA_CORE_ROM, "ROM" },
+ { BCMA_CORE_NAND, "NAND flash controller" },
+ { BCMA_CORE_QSPI, "SPI flash controller" },
+ { BCMA_CORE_CHIPCOMMON_B, "Chipcommon B" },
+ { BCMA_CORE_ARMCA9, "ARM Cortex A9 core (ihost)" },
{ BCMA_CORE_AMEMC, "AMEMC (DDR)" },
{ BCMA_CORE_ALTA, "ALTA (I2S)" },
{ BCMA_CORE_INVALID, "Invalid" },
@@ -201,7 +213,7 @@ static s32 bcma_erom_get_mst_port(struct bcma_bus *bus, u32 __iomem **eromptr)
return ent;
}
-static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
+static u32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
u32 type, u8 port)
{
u32 addrl, addrh, sizel, sizeh = 0;
@@ -213,7 +225,7 @@ static s32 bcma_erom_get_addr_desc(struct bcma_bus *bus, u32 __iomem **eromptr,
((ent & SCAN_ADDR_TYPE) != type) ||
(((ent & SCAN_ADDR_PORT) >> SCAN_ADDR_PORT_SHIFT) != port)) {
bcma_erom_push_ent(eromptr);
- return -EINVAL;
+ return (u32)-EINVAL;
}
addrl = ent & SCAN_ADDR_ADDR;
@@ -261,7 +273,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
struct bcma_device_id *match, int core_num,
struct bcma_device *core)
{
- s32 tmp;
+ u32 tmp;
u8 i, j;
s32 cia, cib;
u8 ports[2], wrappers[2];
@@ -339,11 +351,11 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
* the main register space for the core
*/
tmp = bcma_erom_get_addr_desc(bus, eromptr, SCAN_ADDR_TYPE_SLAVE, 0);
- if (tmp <= 0) {
+ if (tmp == 0 || IS_ERR_VALUE(tmp)) {
/* Try again to see if it is a bridge */
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_BRIDGE, 0);
- if (tmp <= 0) {
+ if (tmp == 0 || IS_ERR_VALUE(tmp)) {
return -EILSEQ;
} else {
bcma_info(bus, "Bridge found\n");
@@ -357,7 +369,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SLAVE, i);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: slave port %d "
* "has %d descriptors\n", i, j); */
@@ -374,7 +386,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_MWRAP, i);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* "has %d descriptors\n", i, j); */
@@ -392,7 +404,7 @@ static int bcma_get_next_core(struct bcma_bus *bus, u32 __iomem **eromptr,
for (j = 0; ; j++) {
tmp = bcma_erom_get_addr_desc(bus, eromptr,
SCAN_ADDR_TYPE_SWRAP, i + hack);
- if (tmp < 0) {
+ if (IS_ERR_VALUE(tmp)) {
/* no more entries for port _i_ */
/* pr_debug("erom: master wrapper %d "
* has %d descriptors\n", i, j); */
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 99cb944a002..4d45dba7fb8 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio)
int i;
bio_for_each_segment(bv, bio, i) {
- page = bv->bv_page;
/* Non-zero page count for non-head members of
- * compound pages is no longer allowed by the kernel,
- * but this has never been seen here.
+ * compound pages is no longer allowed by the kernel.
*/
- if (unlikely(PageCompound(page)))
- if (compound_trans_head(page) != page) {
- pr_crit("page tail used for block I/O\n");
- BUG();
- }
+ page = compound_trans_head(bv->bv_page);
atomic_inc(&page->_count);
}
}
@@ -924,10 +918,13 @@ static void
bio_pagedec(struct bio *bio)
{
struct bio_vec *bv;
+ struct page *page;
int i;
- bio_for_each_segment(bv, bio, i)
- atomic_dec(&bv->bv_page->_count);
+ bio_for_each_segment(bv, bio, i) {
+ page = compound_trans_head(bv->bv_page);
+ atomic_dec(&page->_count);
+ }
}
static void
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 4ad2ad9a5bb..191cd177fef 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -397,15 +397,19 @@ static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
static void rbd_spec_put(struct rbd_spec *spec);
-static struct bus_attribute rbd_bus_attrs[] = {
- __ATTR(add, S_IWUSR, NULL, rbd_add),
- __ATTR(remove, S_IWUSR, NULL, rbd_remove),
- __ATTR_NULL
+static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
+static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
+
+static struct attribute *rbd_bus_attrs[] = {
+ &bus_attr_add.attr,
+ &bus_attr_remove.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(rbd_bus);
static struct bus_type rbd_bus_type = {
.name = "rbd",
- .bus_attrs = rbd_bus_attrs,
+ .bus_groups = rbd_bus_groups,
};
static void rbd_root_dev_release(struct device *dev)
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 11f467c00d0..a12b923bbac 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -91,6 +91,10 @@ static struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0489, 0xe04e) },
{ USB_DEVICE(0x0489, 0xe056) },
{ USB_DEVICE(0x0489, 0xe04d) },
+ { USB_DEVICE(0x04c5, 0x1330) },
+ { USB_DEVICE(0x13d3, 0x3402) },
+ { USB_DEVICE(0x0cf3, 0x3121) },
+ { USB_DEVICE(0x0cf3, 0xe003) },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE02C) },
@@ -128,6 +132,10 @@ static struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU22 with sflash firmware */
{ USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 },
@@ -193,24 +201,44 @@ error:
static int ath3k_get_state(struct usb_device *udev, unsigned char *state)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ char *buf;
+
+ buf = kmalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETSTATE,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
- state, 0x01, USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT);
+
+ *state = *buf;
+ kfree(buf);
+
+ return ret;
}
static int ath3k_get_version(struct usb_device *udev,
struct ath3k_version *version)
{
- int pipe = 0;
+ int ret, pipe = 0;
+ struct ath3k_version *buf;
+ const int size = sizeof(*buf);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
pipe = usb_rcvctrlpipe(udev, 0);
- return usb_control_msg(udev, pipe, ATH3K_GETVERSION,
- USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version,
- sizeof(struct ath3k_version),
- USB_CTRL_SET_TIMEOUT);
+ ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION,
+ USB_TYPE_VENDOR | USB_DIR_IN, 0, 0,
+ buf, size, USB_CTRL_SET_TIMEOUT);
+
+ memcpy(version, buf, size);
+ kfree(buf);
+
+ return ret;
}
static int ath3k_load_fwfile(struct usb_device *udev,
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index db2c3c305df..023d35e3c7a 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -43,7 +43,7 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@@ -89,7 +89,7 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
@@ -135,7 +135,7 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- ret = strict_strtol(buf, 10, &result);
+ ret = kstrtol(buf, 10, &result);
if (ret)
return ret;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 75c26269463..00da6df9f71 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -486,7 +486,7 @@ static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card)
if (firmwarelen - offset < txlen)
txlen = firmwarelen - offset;
- tx_blocks = (txlen + blksz_dl - 1) / blksz_dl;
+ tx_blocks = DIV_ROUND_UP(txlen, blksz_dl);
memcpy(fwbuf, &firmware[offset], txlen);
}
@@ -873,7 +873,7 @@ static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv,
}
blksz = SDIO_BLOCK_SIZE;
- buf_block_len = (nb + blksz - 1) / blksz;
+ buf_block_len = DIV_ROUND_UP(nb, blksz);
sdio_claim_host(card->func);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index de4cf4daa2f..8e16f0af635 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,10 @@ static struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
/* Atheros AR5BBU12 with sflash firmware */
{ USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
@@ -1095,7 +1099,7 @@ static int btusb_setup_intel_patching(struct hci_dev *hdev,
if (IS_ERR(skb)) {
BT_ERR("%s sending Intel patch command (0x%4.4x) failed (%ld)",
hdev->name, cmd->opcode, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
/* It ensures that the returned event matches the event data read from
@@ -1147,7 +1151,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s sending initial HCI reset command failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1161,7 +1165,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s reading Intel fw version command failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
if (skb->len != sizeof(*ver)) {
@@ -1219,7 +1223,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
BT_ERR("%s entering Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
release_firmware(fw);
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
if (skb->data[0]) {
@@ -1276,7 +1280,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1292,7 +1296,7 @@ exit_mfg_disable:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
@@ -1310,7 +1314,7 @@ exit_mfg_deactivate:
if (IS_ERR(skb)) {
BT_ERR("%s exiting Intel manufacturer mode failed (%ld)",
hdev->name, PTR_ERR(skb));
- return -PTR_ERR(skb);
+ return PTR_ERR(skb);
}
kfree_skb(skb);
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 733288967d4..20092669977 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -122,17 +122,8 @@ EXPORT_SYMBOL_GPL(cci_ace_get_port);
static void __init cci_ace_init_ports(void)
{
- int port, ac, cpu;
- u64 hwid;
- const u32 *cell;
- struct device_node *cpun, *cpus;
-
- cpus = of_find_node_by_path("/cpus");
- if (WARN(!cpus, "Missing cpus node, bailing out\n"))
- return;
-
- if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
- ac = of_n_addr_cells(cpus);
+ int port, cpu;
+ struct device_node *cpun;
/*
* Port index look-up speeds up the function disabling ports by CPU,
@@ -141,18 +132,13 @@ static void __init cci_ace_init_ports(void)
* The stashed index array is initialized for all possible CPUs
* at probe time.
*/
- for_each_child_of_node(cpus, cpun) {
- if (of_node_cmp(cpun->type, "cpu"))
- continue;
- cell = of_get_property(cpun, "reg", NULL);
- if (WARN(!cell, "%s: missing reg property\n", cpun->full_name))
- continue;
-
- hwid = of_read_number(cell, ac);
- cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
+ for_each_possible_cpu(cpu) {
+ /* too early to use cpu->of_node */
+ cpun = of_get_cpu_node(cpu, NULL);
- if (cpu < 0 || !cpu_possible(cpu))
+ if (WARN(!cpun, "Missing cpu device node\n"))
continue;
+
port = __cci_ace_get_port(cpun, ACE_PORT);
if (port < 0)
continue;
diff --git a/drivers/char/agp/parisc-agp.c b/drivers/char/agp/parisc-agp.c
index bf5d2477cb7..15f2e7025b7 100644
--- a/drivers/char/agp/parisc-agp.c
+++ b/drivers/char/agp/parisc-agp.c
@@ -129,7 +129,8 @@ parisc_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
off_t j, io_pg_start;
int io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
@@ -175,7 +176,8 @@ parisc_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
struct _parisc_agp_info *info = &parisc_agp_info;
int i, io_pg_start, io_pg_count;
- if (type != 0 || mem->type != 0) {
+ if (type != mem->type ||
+ agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
return -EINVAL;
}
diff --git a/drivers/char/bsr.c b/drivers/char/bsr.c
index 97467053a01..0671e45daa5 100644
--- a/drivers/char/bsr.c
+++ b/drivers/char/bsr.c
@@ -95,6 +95,7 @@ bsr_size_show(struct device *dev, struct device_attribute *attr, char *buf)
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", bsr_dev->bsr_bytes);
}
+static DEVICE_ATTR_RO(bsr_size);
static ssize_t
bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -102,20 +103,23 @@ bsr_stride_show(struct device *dev, struct device_attribute *attr, char *buf)
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", bsr_dev->bsr_stride);
}
+static DEVICE_ATTR_RO(bsr_stride);
static ssize_t
-bsr_len_show(struct device *dev, struct device_attribute *attr, char *buf)
+bsr_length_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct bsr_dev *bsr_dev = dev_get_drvdata(dev);
return sprintf(buf, "%llu\n", bsr_dev->bsr_len);
}
+static DEVICE_ATTR_RO(bsr_length);
-static struct device_attribute bsr_dev_attrs[] = {
- __ATTR(bsr_size, S_IRUGO, bsr_size_show, NULL),
- __ATTR(bsr_stride, S_IRUGO, bsr_stride_show, NULL),
- __ATTR(bsr_length, S_IRUGO, bsr_len_show, NULL),
- __ATTR_NULL
+static struct attribute *bsr_dev_attrs[] = {
+ &dev_attr_bsr_size.attr,
+ &dev_attr_bsr_stride.attr,
+ &dev_attr_bsr_length.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bsr_dev);
static int bsr_mmap(struct file *filp, struct vm_area_struct *vma)
{
@@ -308,7 +312,7 @@ static int __init bsr_init(void)
ret = PTR_ERR(bsr_class);
goto out_err_1;
}
- bsr_class->dev_attrs = bsr_dev_attrs;
+ bsr_class->dev_groups = bsr_dev_groups;
ret = alloc_chrdev_region(&bsr_dev, 0, BSR_MAX_DEVS, "bsr");
bsr_major = MAJOR(bsr_dev);
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index d5a5f020810..ec318bf434a 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -810,6 +810,7 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
struct ipmi_recv __user *precv64;
struct ipmi_recv recv64;
+ memset(&recv64, 0, sizeof(recv64));
if (get_compat_ipmi_recv(&recv64, compat_ptr(arg)))
return -EFAULT;
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4445fa164a2..ec4e10fcf1a 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1848,7 +1848,7 @@ int ipmi_request_settime(ipmi_user_t user,
int retries,
unsigned int retry_time_ms)
{
- unsigned char saddr, lun;
+ unsigned char saddr = 0, lun = 0;
int rv;
if (!user)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index af4b23ffc5a..15e4a603193 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -71,6 +71,11 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#ifdef CONFIG_PARISC
+#include <asm/hardware.h> /* for register_parisc_driver() stuff */
+#include <asm/parisc-device.h>
+#endif
+
#define PFX "ipmi_si: "
/* Measure times between events in the driver. */
@@ -298,6 +303,9 @@ static int pci_registered;
#ifdef CONFIG_ACPI
static int pnp_registered;
#endif
+#ifdef CONFIG_PARISC
+static int parisc_registered;
+#endif
static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
static int num_max_busy_us;
@@ -2279,6 +2287,8 @@ static struct pnp_driver ipmi_pnp_driver = {
.remove = ipmi_pnp_remove,
.id_table = pnp_dev_table,
};
+
+MODULE_DEVICE_TABLE(pnp, pnp_dev_table);
#endif
#ifdef CONFIG_DMI
@@ -2697,6 +2707,62 @@ static struct platform_driver ipmi_driver = {
.remove = ipmi_remove,
};
+#ifdef CONFIG_PARISC
+static int ipmi_parisc_probe(struct parisc_device *dev)
+{
+ struct smi_info *info;
+
+ info = smi_info_alloc();
+
+ if (!info) {
+ dev_err(&dev->dev,
+ "could not allocate memory for PARISC probe\n");
+ return -ENOMEM;
+ }
+
+ info->si_type = SI_KCS;
+ info->addr_source = SI_DEVICETREE;
+ info->io_setup = mem_setup;
+ info->io.addr_type = IPMI_MEM_ADDR_SPACE;
+ info->io.addr_data = dev->hpa.start;
+ info->io.regsize = 1;
+ info->io.regspacing = 1;
+ info->io.regshift = 0;
+ info->irq = 0; /* no interrupt */
+ info->irq_setup = NULL;
+ info->dev = &dev->dev;
+
+ dev_dbg(&dev->dev, "addr 0x%lx\n", info->io.addr_data);
+
+ dev_set_drvdata(&dev->dev, info);
+
+ if (add_smi(info)) {
+ kfree(info);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int ipmi_parisc_remove(struct parisc_device *dev)
+{
+ cleanup_one_si(dev_get_drvdata(&dev->dev));
+ return 0;
+}
+
+static struct parisc_device_id ipmi_parisc_tbl[] = {
+ { HPHW_MC, HVERSION_REV_ANY_ID, 0x004, 0xC0 },
+ { 0, }
+};
+
+static struct parisc_driver ipmi_parisc_driver = {
+ .name = "ipmi",
+ .id_table = ipmi_parisc_tbl,
+ .probe = ipmi_parisc_probe,
+ .remove = ipmi_parisc_remove,
+};
+#endif /* CONFIG_PARISC */
+
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
@@ -3462,6 +3528,13 @@ static int init_ipmi_si(void)
spmi_find_bmc();
#endif
+#ifdef CONFIG_PARISC
+ register_parisc_driver(&ipmi_parisc_driver);
+ parisc_registered = 1;
+ /* poking PC IO addresses will crash machine, don't do it */
+ si_trydefaults = 0;
+#endif
+
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
@@ -3608,6 +3681,10 @@ static void cleanup_ipmi_si(void)
if (pnp_registered)
pnp_unregister_driver(&ipmi_pnp_driver);
#endif
+#ifdef CONFIG_PARISC
+ if (parisc_registered)
+ unregister_parisc_driver(&ipmi_parisc_driver);
+#endif
platform_driver_unregister(&ipmi_driver);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 5c5cc00ebb0..d39cca659a3 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -1182,14 +1182,14 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
}
count++;
- if (gis & (BIT1 + BIT0)) {
+ if (gis & (BIT1 | BIT0)) {
isr = read_reg16(info, CHB + ISR);
if (isr & IRQ_DCD)
dcd_change(info, tty);
if (isr & IRQ_CTS)
cts_change(info, tty);
}
- if (gis & (BIT3 + BIT2))
+ if (gis & (BIT3 | BIT2))
{
isr = read_reg16(info, CHA + ISR);
if (isr & IRQ_TIMER) {
@@ -1210,7 +1210,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
if (isr & IRQ_RXTIME) {
issue_command(info, CHA, CMD_RXFIFO_READ);
}
- if (isr & (IRQ_RXEOM + IRQ_RXFIFO)) {
+ if (isr & (IRQ_RXEOM | IRQ_RXFIFO)) {
if (info->params.mode == MGSL_MODE_HDLC)
rx_ready_hdlc(info, isr & IRQ_RXEOM);
else
@@ -3031,11 +3031,11 @@ static void loopback_enable(MGSLPC_INFO *info)
unsigned char val;
/* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */
- val = read_reg(info, CHA + CCR1) | (BIT2 + BIT1 + BIT0);
+ val = read_reg(info, CHA + CCR1) | (BIT2 | BIT1 | BIT0);
write_reg(info, CHA + CCR1, val);
/* CCR2:04 SSEL Clock source select, 1=submode b */
- val = read_reg(info, CHA + CCR2) | (BIT4 + BIT5);
+ val = read_reg(info, CHA + CCR2) | (BIT4 | BIT5);
write_reg(info, CHA + CCR2, val);
/* set LinkSpeed if available, otherwise default to 2Mbps */
@@ -3125,10 +3125,10 @@ static void hdlc_mode(MGSLPC_INFO *info)
val |= BIT4;
break; // FM0
case HDLC_ENCODING_BIPHASE_MARK:
- val |= BIT4 + BIT2;
+ val |= BIT4 | BIT2;
break; // FM1
case HDLC_ENCODING_BIPHASE_LEVEL:
- val |= BIT4 + BIT3;
+ val |= BIT4 | BIT3;
break; // Manchester
}
write_reg(info, CHA + CCR0, val);
@@ -3185,7 +3185,7 @@ static void hdlc_mode(MGSLPC_INFO *info)
*/
val = 0x00;
if (info->params.crc_type == HDLC_CRC_NONE)
- val |= BIT2 + BIT1;
+ val |= BIT2 | BIT1;
if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE)
val |= BIT5;
switch (info->params.preamble_length)
@@ -3197,7 +3197,7 @@ static void hdlc_mode(MGSLPC_INFO *info)
val |= BIT6;
break;
case HDLC_PREAMBLE_LENGTH_64BITS:
- val |= BIT7 + BIT6;
+ val |= BIT7 | BIT6;
break;
}
write_reg(info, CHA + CCR3, val);
@@ -3264,8 +3264,8 @@ static void hdlc_mode(MGSLPC_INFO *info)
clear_reg_bits(info, CHA + PVR, BIT3);
irq_enable(info, CHA,
- IRQ_RXEOM + IRQ_RXFIFO + IRQ_ALLSENT +
- IRQ_UNDERRUN + IRQ_TXFIFO);
+ IRQ_RXEOM | IRQ_RXFIFO | IRQ_ALLSENT |
+ IRQ_UNDERRUN | IRQ_TXFIFO);
issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
wait_command_complete(info, CHA);
read_reg16(info, CHA + ISR); /* clear pending IRQs */
@@ -3582,8 +3582,8 @@ static void async_mode(MGSLPC_INFO *info)
} else
clear_reg_bits(info, CHA + PVR, BIT3);
irq_enable(info, CHA,
- IRQ_RXEOM + IRQ_RXFIFO + IRQ_BREAK_ON + IRQ_RXTIME +
- IRQ_ALLSENT + IRQ_TXFIFO);
+ IRQ_RXEOM | IRQ_RXFIFO | IRQ_BREAK_ON | IRQ_RXTIME |
+ IRQ_ALLSENT | IRQ_TXFIFO);
issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET);
wait_command_complete(info, CHA);
read_reg16(info, CHA + ISR); /* clear pending IRQs */
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
index bf2349dbbf7..7cc1fe2241f 100644
--- a/drivers/char/sonypi.c
+++ b/drivers/char/sonypi.c
@@ -876,11 +876,6 @@ found:
if (useinput)
sonypi_report_input_event(event);
-#ifdef CONFIG_ACPI
- if (sonypi_acpi_device)
- acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event);
-#endif
-
kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event,
sizeof(event), &sonypi_device.fifo_lock);
kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN);
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index 7faeb1cde97..0e506bad198 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -279,33 +279,37 @@ loff_t srom_llseek(struct file *file, loff_t offset, int origin)
return fixed_size_llseek(file, offset, origin, srom->total_size);
}
-static ssize_t total_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t total_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct srom_dev *srom = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", srom->total_size);
}
+static DEVICE_ATTR_RO(total_size);
-static ssize_t sector_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t sector_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct srom_dev *srom = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", srom->sector_size);
}
+static DEVICE_ATTR_RO(sector_size);
-static ssize_t page_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t page_size_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct srom_dev *srom = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", srom->page_size);
}
+static DEVICE_ATTR_RO(page_size);
-static struct device_attribute srom_dev_attrs[] = {
- __ATTR(total_size, S_IRUGO, total_show, NULL),
- __ATTR(sector_size, S_IRUGO, sector_show, NULL),
- __ATTR(page_size, S_IRUGO, page_show, NULL),
- __ATTR_NULL
+static struct attribute *srom_dev_attrs[] = {
+ &dev_attr_total_size.attr,
+ &dev_attr_sector_size.attr,
+ &dev_attr_page_size.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(srom_dev);
static char *srom_devnode(struct device *dev, umode_t *mode)
{
@@ -349,7 +353,7 @@ static int srom_setup_minor(struct srom_dev *srom, int index)
dev = device_create(srom_class, &platform_bus,
MKDEV(srom_major, index), srom, "%d", index);
- return PTR_RET(dev);
+ return PTR_ERR_OR_ZERO(dev);
}
/** srom_init() - Initialize the driver's module. */
@@ -418,7 +422,7 @@ static int srom_init(void)
result = PTR_ERR(srom_class);
goto fail_cdev;
}
- srom_class->dev_attrs = srom_dev_attrs;
+ srom_class->dev_groups = srom_dev_groups;
srom_class->devnode = srom_devnode;
/* Do per-partition initialization */
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index dbfd56446c3..94c0c74434e 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -91,4 +91,16 @@ config TCG_ST33_I2C
To compile this driver as a module, choose M here; the module will be
called tpm_stm_st33_i2c.
+config TCG_XEN
+ tristate "XEN TPM Interface"
+ depends on TCG_TPM && XEN
+ select XEN_XENBUS_FRONTEND
+ ---help---
+ If you want to make TPM support available to a Xen user domain,
+ say Yes and it will be accessible from within Linux. See
+ the manpages for xl, xl.conf, and docs/misc/vtpm.txt in
+ the Xen source repository for more details.
+ To compile this driver as a module, choose M here; the module
+ will be called xen-tpmfront.
+
endif # TCG_TPM
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index a3736c97c65..eb41ff97d0a 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -18,3 +18,4 @@ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
obj-$(CONFIG_TCG_IBMVTPM) += tpm_ibmvtpm.o
obj-$(CONFIG_TCG_ST33_I2C) += tpm_i2c_stm_st33.o
+obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o
diff --git a/drivers/char/tpm/xen-tpmfront.c b/drivers/char/tpm/xen-tpmfront.c
new file mode 100644
index 00000000000..7a7929ba265
--- /dev/null
+++ b/drivers/char/tpm/xen-tpmfront.c
@@ -0,0 +1,473 @@
+/*
+ * Implementation of the Xen vTPM device frontend
+ *
+ * Author: Daniel De Graaf <dgdegra@tycho.nsa.gov>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <xen/events.h>
+#include <xen/interface/io/tpmif.h>
+#include <xen/grant_table.h>
+#include <xen/xenbus.h>
+#include <xen/page.h>
+#include "tpm.h"
+
+struct tpm_private {
+ struct tpm_chip *chip;
+ struct xenbus_device *dev;
+
+ struct vtpm_shared_page *shr;
+
+ unsigned int evtchn;
+ int ring_ref;
+ domid_t backend_id;
+};
+
+enum status_bits {
+ VTPM_STATUS_RUNNING = 0x1,
+ VTPM_STATUS_IDLE = 0x2,
+ VTPM_STATUS_RESULT = 0x4,
+ VTPM_STATUS_CANCELED = 0x8,
+};
+
+static u8 vtpm_status(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_CANCELED;
+ case VTPM_STATE_FINISH:
+ return VTPM_STATUS_IDLE | VTPM_STATUS_RESULT;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL: /* cancel requested, not yet canceled */
+ return VTPM_STATUS_RUNNING;
+ default:
+ return 0;
+ }
+}
+
+static bool vtpm_req_canceled(struct tpm_chip *chip, u8 status)
+{
+ return status & VTPM_STATUS_CANCELED;
+}
+
+static void vtpm_cancel(struct tpm_chip *chip)
+{
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ priv->shr->state = VTPM_STATE_CANCEL;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+}
+
+static unsigned int shr_data_offset(struct vtpm_shared_page *shr)
+{
+ return sizeof(*shr) + sizeof(u32) * shr->nr_extra_pages;
+}
+
+static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ struct vtpm_shared_page *shr = priv->shr;
+ unsigned int offset = shr_data_offset(shr);
+
+ u32 ordinal;
+ unsigned long duration;
+
+ if (offset > PAGE_SIZE)
+ return -EINVAL;
+
+ if (offset + count > PAGE_SIZE)
+ return -EINVAL;
+
+ /* Wait for completion of any existing command or cancellation */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, chip->vendor.timeout_c,
+ &chip->vendor.read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ memcpy(offset + (u8 *)shr, buf, count);
+ shr->length = count;
+ barrier();
+ shr->state = VTPM_STATE_SUBMIT;
+ wmb();
+ notify_remote_via_evtchn(priv->evtchn);
+
+ ordinal = be32_to_cpu(((struct tpm_input_header*)buf)->ordinal);
+ duration = tpm_calc_ordinal_duration(chip, ordinal);
+
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_IDLE, duration,
+ &chip->vendor.read_queue, true) < 0) {
+ /* got a signal or timeout, try to cancel */
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ return count;
+}
+
+static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
+{
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ struct vtpm_shared_page *shr = priv->shr;
+ unsigned int offset = shr_data_offset(shr);
+ size_t length = shr->length;
+
+ if (shr->state == VTPM_STATE_IDLE)
+ return -ECANCELED;
+
+ /* In theory the wait at the end of _send makes this one unnecessary */
+ if (wait_for_tpm_stat(chip, VTPM_STATUS_RESULT, chip->vendor.timeout_c,
+ &chip->vendor.read_queue, true) < 0) {
+ vtpm_cancel(chip);
+ return -ETIME;
+ }
+
+ if (offset > PAGE_SIZE)
+ return -EIO;
+
+ if (offset + length > PAGE_SIZE)
+ length = PAGE_SIZE - offset;
+
+ if (length > count)
+ length = count;
+
+ memcpy(buf, offset + (u8 *)shr, length);
+
+ return length;
+}
+
+ssize_t tpm_show_locality(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ u8 locality = priv->shr->locality;
+
+ return sprintf(buf, "%d\n", locality);
+}
+
+ssize_t tpm_store_locality(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct tpm_chip *chip = dev_get_drvdata(dev);
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ u8 val;
+
+ int rv = kstrtou8(buf, 0, &val);
+ if (rv)
+ return rv;
+
+ priv->shr->locality = val;
+
+ return len;
+}
+
+static const struct file_operations vtpm_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = tpm_open,
+ .read = tpm_read,
+ .write = tpm_write,
+ .release = tpm_release,
+};
+
+static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
+static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
+static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
+static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
+static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
+static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
+ NULL);
+static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
+static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
+static DEVICE_ATTR(durations, S_IRUGO, tpm_show_durations, NULL);
+static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
+static DEVICE_ATTR(locality, S_IRUGO | S_IWUSR, tpm_show_locality,
+ tpm_store_locality);
+
+static struct attribute *vtpm_attrs[] = {
+ &dev_attr_pubek.attr,
+ &dev_attr_pcrs.attr,
+ &dev_attr_enabled.attr,
+ &dev_attr_active.attr,
+ &dev_attr_owned.attr,
+ &dev_attr_temp_deactivated.attr,
+ &dev_attr_caps.attr,
+ &dev_attr_cancel.attr,
+ &dev_attr_durations.attr,
+ &dev_attr_timeouts.attr,
+ &dev_attr_locality.attr,
+ NULL,
+};
+
+static struct attribute_group vtpm_attr_grp = {
+ .attrs = vtpm_attrs,
+};
+
+#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
+
+static const struct tpm_vendor_specific tpm_vtpm = {
+ .status = vtpm_status,
+ .recv = vtpm_recv,
+ .send = vtpm_send,
+ .cancel = vtpm_cancel,
+ .req_complete_mask = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_complete_val = VTPM_STATUS_IDLE | VTPM_STATUS_RESULT,
+ .req_canceled = vtpm_req_canceled,
+ .attr_group = &vtpm_attr_grp,
+ .miscdev = {
+ .fops = &vtpm_ops,
+ },
+ .duration = {
+ TPM_LONG_TIMEOUT,
+ TPM_LONG_TIMEOUT,
+ TPM_LONG_TIMEOUT,
+ },
+};
+
+static irqreturn_t tpmif_interrupt(int dummy, void *dev_id)
+{
+ struct tpm_private *priv = dev_id;
+
+ switch (priv->shr->state) {
+ case VTPM_STATE_IDLE:
+ case VTPM_STATE_FINISH:
+ wake_up_interruptible(&priv->chip->vendor.read_queue);
+ break;
+ case VTPM_STATE_SUBMIT:
+ case VTPM_STATE_CANCEL:
+ default:
+ break;
+ }
+ return IRQ_HANDLED;
+}
+
+static int setup_chip(struct device *dev, struct tpm_private *priv)
+{
+ struct tpm_chip *chip;
+
+ chip = tpm_register_hardware(dev, &tpm_vtpm);
+ if (!chip)
+ return -ENODEV;
+
+ init_waitqueue_head(&chip->vendor.read_queue);
+
+ priv->chip = chip;
+ TPM_VPRIV(chip) = priv;
+
+ return 0;
+}
+
+/* caller must clean up in case of errors */
+static int setup_ring(struct xenbus_device *dev, struct tpm_private *priv)
+{
+ struct xenbus_transaction xbt;
+ const char *message = NULL;
+ int rv;
+
+ priv->shr = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
+ if (!priv->shr) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
+ return -ENOMEM;
+ }
+
+ rv = xenbus_grant_ring(dev, virt_to_mfn(priv->shr));
+ if (rv < 0)
+ return rv;
+
+ priv->ring_ref = rv;
+
+ rv = xenbus_alloc_evtchn(dev, &priv->evtchn);
+ if (rv)
+ return rv;
+
+ rv = bind_evtchn_to_irqhandler(priv->evtchn, tpmif_interrupt, 0,
+ "tpmif", priv);
+ if (rv <= 0) {
+ xenbus_dev_fatal(dev, rv, "allocating TPM irq");
+ return rv;
+ }
+ priv->chip->vendor.irq = rv;
+
+ again:
+ rv = xenbus_transaction_start(&xbt);
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "starting transaction");
+ return rv;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename,
+ "ring-ref", "%u", priv->ring_ref);
+ if (rv) {
+ message = "writing ring-ref";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
+ priv->evtchn);
+ if (rv) {
+ message = "writing event-channel";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_printf(xbt, dev->nodename, "feature-protocol-v2", "1");
+ if (rv) {
+ message = "writing feature-protocol-v2";
+ goto abort_transaction;
+ }
+
+ rv = xenbus_transaction_end(xbt, 0);
+ if (rv == -EAGAIN)
+ goto again;
+ if (rv) {
+ xenbus_dev_fatal(dev, rv, "completing transaction");
+ return rv;
+ }
+
+ xenbus_switch_state(dev, XenbusStateInitialised);
+
+ return 0;
+
+ abort_transaction:
+ xenbus_transaction_end(xbt, 1);
+ if (message)
+ xenbus_dev_error(dev, rv, "%s", message);
+
+ return rv;
+}
+
+static void ring_free(struct tpm_private *priv)
+{
+ if (!priv)
+ return;
+
+ if (priv->ring_ref)
+ gnttab_end_foreign_access(priv->ring_ref, 0,
+ (unsigned long)priv->shr);
+ else
+ free_page((unsigned long)priv->shr);
+
+ if (priv->chip && priv->chip->vendor.irq)
+ unbind_from_irqhandler(priv->chip->vendor.irq, priv);
+
+ kfree(priv);
+}
+
+static int tpmfront_probe(struct xenbus_device *dev,
+ const struct xenbus_device_id *id)
+{
+ struct tpm_private *priv;
+ int rv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ xenbus_dev_fatal(dev, -ENOMEM, "allocating priv structure");
+ return -ENOMEM;
+ }
+
+ rv = setup_chip(&dev->dev, priv);
+ if (rv) {
+ kfree(priv);
+ return rv;
+ }
+
+ rv = setup_ring(dev, priv);
+ if (rv) {
+ tpm_remove_hardware(&dev->dev);
+ ring_free(priv);
+ return rv;
+ }
+
+ tpm_get_timeouts(priv->chip);
+
+ dev_set_drvdata(&dev->dev, priv->chip);
+
+ return rv;
+}
+
+static int tpmfront_remove(struct xenbus_device *dev)
+{
+ struct tpm_chip *chip = dev_get_drvdata(&dev->dev);
+ struct tpm_private *priv = TPM_VPRIV(chip);
+ tpm_remove_hardware(&dev->dev);
+ ring_free(priv);
+ TPM_VPRIV(chip) = NULL;
+ return 0;
+}
+
+static int tpmfront_resume(struct xenbus_device *dev)
+{
+ /* A suspend/resume/migrate will interrupt a vTPM anyway */
+ tpmfront_remove(dev);
+ return tpmfront_probe(dev, NULL);
+}
+
+static void backend_changed(struct xenbus_device *dev,
+ enum xenbus_state backend_state)
+{
+ int val;
+
+ switch (backend_state) {
+ case XenbusStateInitialised:
+ case XenbusStateConnected:
+ if (dev->state == XenbusStateConnected)
+ break;
+
+ if (xenbus_scanf(XBT_NIL, dev->otherend,
+ "feature-protocol-v2", "%d", &val) < 0)
+ val = 0;
+ if (!val) {
+ xenbus_dev_fatal(dev, -EINVAL,
+ "vTPM protocol 2 required");
+ return;
+ }
+ xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateClosing:
+ case XenbusStateClosed:
+ device_unregister(&dev->dev);
+ xenbus_frontend_closed(dev);
+ break;
+ default:
+ break;
+ }
+}
+
+static const struct xenbus_device_id tpmfront_ids[] = {
+ { "vtpm" },
+ { "" }
+};
+MODULE_ALIAS("xen:vtpm");
+
+static DEFINE_XENBUS_DRIVER(tpmfront, ,
+ .probe = tpmfront_probe,
+ .remove = tpmfront_remove,
+ .resume = tpmfront_resume,
+ .otherend_changed = backend_changed,
+ );
+
+static int __init xen_tpmfront_init(void)
+{
+ if (!xen_domain())
+ return -ENODEV;
+
+ return xenbus_register_frontend(&tpmfront_driver);
+}
+module_init(xen_tpmfront_init);
+
+static void __exit xen_tpmfront_exit(void)
+{
+ xenbus_unregister_driver(&tpmfront_driver);
+}
+module_exit(xen_tpmfront_exit);
+
+MODULE_AUTHOR("Daniel De Graaf <dgdegra@tycho.nsa.gov>");
+MODULE_DESCRIPTION("Xen vTPM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 1b456fe9b87..fc45567ad3a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev,
unsigned long flags;
spin_lock_irqsave(&portdev->ports_lock, flags);
- list_for_each_entry(port, &portdev->ports, list)
- if (port->cdev->dev == dev)
+ list_for_each_entry(port, &portdev->ports, list) {
+ if (port->cdev->dev == dev) {
+ kref_get(&port->kref);
goto out;
+ }
+ }
port = NULL;
out:
spin_unlock_irqrestore(&portdev->ports_lock, flags);
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
port = filp->private_data;
+ /* Port is hot-unplugged. */
+ if (!port->guest_connected)
+ return -ENODEV;
+
if (!port_has_data(port)) {
/*
* If nothing's connected on the host just return 0 in
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
if (ret < 0)
return ret;
}
- /* Port got hot-unplugged. */
+ /* Port got hot-unplugged while we were waiting above. */
if (!port->guest_connected)
return -ENODEV;
/*
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
if (is_rproc_serial(port->out_vq->vdev))
return -EINVAL;
+ /*
+ * pipe->nrbufs == 0 means there are no data to transfer,
+ * so this returns just 0 for no data.
+ */
+ pipe_lock(pipe);
+ if (!pipe->nrbufs) {
+ ret = 0;
+ goto error_out;
+ }
+
ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK);
if (ret < 0)
- return ret;
+ goto error_out;
buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ ret = -ENOMEM;
+ goto error_out;
+ }
sgl.n = 0;
sgl.len = 0;
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
sgl.sg = buf->sg;
sg_init_table(sgl.sg, sgl.size);
ret = __splice_from_pipe(pipe, &sd, pipe_to_sg);
+ pipe_unlock(pipe);
if (likely(ret > 0))
ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true);
if (unlikely(ret <= 0))
free_buf(buf, true);
return ret;
+
+error_out:
+ pipe_unlock(pipe);
+ return ret;
}
static unsigned int port_fops_poll(struct file *filp, poll_table *wait)
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp)
struct port *port;
int ret;
+ /* We get the port with a kref here */
port = find_port_by_devt(cdev->dev);
+ if (!port) {
+ /* Port was unplugged before we could proceed */
+ return -ENXIO;
+ }
filp->private_data = port;
- /* Prevent against a port getting hot-unplugged at the same time */
- spin_lock_irq(&port->portdev->ports_lock);
- kref_get(&port->kref);
- spin_unlock_irq(&port->portdev->ports_lock);
-
/*
* Don't allow opening of console port devices -- that's done
* via /dev/hvc
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref)
port = container_of(kref, struct port, kref);
- sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
- device_destroy(pdrvdata.class, port->dev->devt);
- cdev_del(port->cdev);
-
- kfree(port->name);
-
- debugfs_remove(port->debugfs_file);
-
kfree(port);
}
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port)
spin_unlock_irq(&port->portdev->ports_lock);
if (port->guest_connected) {
+ /* Let the app know the port is going down. */
+ send_sigio_to_port(port);
+
+ /* Do this after sigio is actually sent */
port->guest_connected = false;
port->host_connected = false;
- wake_up_interruptible(&port->waitqueue);
- /* Let the app know the port is going down. */
- send_sigio_to_port(port);
+ wake_up_interruptible(&port->waitqueue);
}
if (is_console_port(port)) {
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port)
*/
port->portdev = NULL;
+ sysfs_remove_group(&port->dev->kobj, &port_attribute_group);
+ device_destroy(pdrvdata.class, port->dev->devt);
+ cdev_del(port->cdev);
+
+ kfree(port->name);
+
+ debugfs_remove(port->debugfs_file);
+
/*
* Locks around here are not necessary - a port can't be
* opened after we removed the port struct from ports_list
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 1bdb882c845..4e5739773c3 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4),
DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8),
DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4),
- DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3),
- DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3),
+ DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3,
+ CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3,
+ CLK_GET_RATE_NOCACHE, 0),
DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3),
- DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3),
- DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3),
+ DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1,
+ 4, 3, CLK_GET_RATE_NOCACHE, 0),
+ DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
+ 8, 3, CLK_GET_RATE_NOCACHE, 0),
DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
};
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100",
E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"),
GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13,
- CLK_IGNORE_UNUSED, 0),
+ CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0),
};
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c
index 5c205b60a82..089d3e30e22 100644
--- a/drivers/clk/zynq/clkc.c
+++ b/drivers/clk/zynq/clkc.c
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock);
static DEFINE_SPINLOCK(ddrpll_lock);
static DEFINE_SPINLOCK(iopll_lock);
static DEFINE_SPINLOCK(armclk_lock);
+static DEFINE_SPINLOCK(swdtclk_lock);
static DEFINE_SPINLOCK(ddrclk_lock);
static DEFINE_SPINLOCK(dciclk_lock);
static DEFINE_SPINLOCK(gem0clk_lock);
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np)
}
clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt],
swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT,
- SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock);
+ SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock);
/* DDR clocks */
clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0,
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem0clk_lock);
- clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0,
- SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock);
+ clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0,
+ &gem0clk_lock);
clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0],
"gem0_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock);
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np)
CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6,
CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO,
&gem1clk_lock);
- clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0,
- SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock);
+ clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2,
+ CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0,
+ &gem1clk_lock);
clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1],
"gem1_emio_mux", CLK_SET_RATE_PARENT,
SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock);
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8b00c5cebfa..704d6d342ad 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o
obj-$(CONFIG_ORION_TIMER) += time-orion.o
obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o
obj-$(CONFIG_ARCH_MARCO) += timer-marco.o
+obj-$(CONFIG_ARCH_MOXART) += moxart_timer.o
obj-$(CONFIG_ARCH_MXS) += mxs_timer.o
obj-$(CONFIG_ARCH_PRIMA2) += timer-prima2.o
obj-$(CONFIG_SUN4I_TIMER) += sun4i_timer.o
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index ffadd836e0b..fbd9ccd5e11 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -16,13 +16,39 @@
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <linux/of_address.h>
#include <linux/io.h>
+#include <linux/slab.h>
#include <asm/arch_timer.h>
#include <asm/virt.h>
#include <clocksource/arm_arch_timer.h>
+#define CNTTIDR 0x08
+#define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4))
+
+#define CNTVCT_LO 0x08
+#define CNTVCT_HI 0x0c
+#define CNTFRQ 0x10
+#define CNTP_TVAL 0x28
+#define CNTP_CTL 0x2c
+#define CNTV_TVAL 0x38
+#define CNTV_CTL 0x3c
+
+#define ARCH_CP15_TIMER BIT(0)
+#define ARCH_MEM_TIMER BIT(1)
+static unsigned arch_timers_present __initdata;
+
+static void __iomem *arch_counter_base;
+
+struct arch_timer {
+ void __iomem *base;
+ struct clock_event_device evt;
+};
+
+#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
+
static u32 arch_timer_rate;
enum ppi_nr {
@@ -38,19 +64,83 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
static struct clock_event_device __percpu *arch_timer_evt;
static bool arch_timer_use_virtual = true;
+static bool arch_timer_mem_use_virtual;
/*
* Architected system timer support.
*/
-static inline irqreturn_t timer_handler(const int access,
+static __always_inline
+void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
+ struct clock_event_device *clk)
+{
+ if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
+ struct arch_timer *timer = to_arch_timer(clk);
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed(val, timer->base + CNTP_CTL);
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ writel_relaxed(val, timer->base + CNTP_TVAL);
+ break;
+ }
+ } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
+ struct arch_timer *timer = to_arch_timer(clk);
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ writel_relaxed(val, timer->base + CNTV_CTL);
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ writel_relaxed(val, timer->base + CNTV_TVAL);
+ break;
+ }
+ } else {
+ arch_timer_reg_write_cp15(access, reg, val);
+ }
+}
+
+static __always_inline
+u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
+ struct clock_event_device *clk)
+{
+ u32 val;
+
+ if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
+ struct arch_timer *timer = to_arch_timer(clk);
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ val = readl_relaxed(timer->base + CNTP_CTL);
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ val = readl_relaxed(timer->base + CNTP_TVAL);
+ break;
+ }
+ } else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
+ struct arch_timer *timer = to_arch_timer(clk);
+ switch (reg) {
+ case ARCH_TIMER_REG_CTRL:
+ val = readl_relaxed(timer->base + CNTV_CTL);
+ break;
+ case ARCH_TIMER_REG_TVAL:
+ val = readl_relaxed(timer->base + CNTV_TVAL);
+ break;
+ }
+ } else {
+ val = arch_timer_reg_read_cp15(access, reg);
+ }
+
+ return val;
+}
+
+static __always_inline irqreturn_t timer_handler(const int access,
struct clock_event_device *evt)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, evt);
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
evt->event_handler(evt);
return IRQ_HANDLED;
}
@@ -72,15 +162,30 @@ static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
}
-static inline void timer_set_mode(const int access, int mode)
+static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
+}
+
+static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+
+ return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
+}
+
+static __always_inline void timer_set_mode(const int access, int mode,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
switch (mode) {
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl &= ~ARCH_TIMER_CTRL_ENABLE;
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
break;
default:
break;
@@ -90,60 +195,108 @@ static inline void timer_set_mode(const int access, int mode)
static void arch_timer_set_mode_virt(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode);
+ timer_set_mode(ARCH_TIMER_VIRT_ACCESS, mode, clk);
}
static void arch_timer_set_mode_phys(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode);
+ timer_set_mode(ARCH_TIMER_PHYS_ACCESS, mode, clk);
+}
+
+static void arch_timer_set_mode_virt_mem(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_MEM_VIRT_ACCESS, mode, clk);
}
-static inline void set_next_event(const int access, unsigned long evt)
+static void arch_timer_set_mode_phys_mem(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ timer_set_mode(ARCH_TIMER_MEM_PHYS_ACCESS, mode, clk);
+}
+
+static __always_inline void set_next_event(const int access, unsigned long evt,
+ struct clock_event_device *clk)
{
unsigned long ctrl;
- ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL);
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt);
- arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
static int arch_timer_set_next_event_virt(unsigned long evt,
- struct clock_event_device *unused)
+ struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_VIRT_ACCESS, evt);
+ set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys(unsigned long evt,
- struct clock_event_device *unused)
+ struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_PHYS_ACCESS, evt);
+ set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
-static int arch_timer_setup(struct clock_event_device *clk)
+static int arch_timer_set_next_event_virt_mem(unsigned long evt,
+ struct clock_event_device *clk)
{
- clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
- clk->name = "arch_sys_timer";
- clk->rating = 450;
- if (arch_timer_use_virtual) {
- clk->irq = arch_timer_ppi[VIRT_PPI];
- clk->set_mode = arch_timer_set_mode_virt;
- clk->set_next_event = arch_timer_set_next_event_virt;
+ set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
+ return 0;
+}
+
+static int arch_timer_set_next_event_phys_mem(unsigned long evt,
+ struct clock_event_device *clk)
+{
+ set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
+ return 0;
+}
+
+static void __arch_timer_setup(unsigned type,
+ struct clock_event_device *clk)
+{
+ clk->features = CLOCK_EVT_FEAT_ONESHOT;
+
+ if (type == ARCH_CP15_TIMER) {
+ clk->features |= CLOCK_EVT_FEAT_C3STOP;
+ clk->name = "arch_sys_timer";
+ clk->rating = 450;
+ clk->cpumask = cpumask_of(smp_processor_id());
+ if (arch_timer_use_virtual) {
+ clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->set_mode = arch_timer_set_mode_virt;
+ clk->set_next_event = arch_timer_set_next_event_virt;
+ } else {
+ clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ clk->set_mode = arch_timer_set_mode_phys;
+ clk->set_next_event = arch_timer_set_next_event_phys;
+ }
} else {
- clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
- clk->set_mode = arch_timer_set_mode_phys;
- clk->set_next_event = arch_timer_set_next_event_phys;
+ clk->name = "arch_mem_timer";
+ clk->rating = 400;
+ clk->cpumask = cpu_all_mask;
+ if (arch_timer_mem_use_virtual) {
+ clk->set_mode = arch_timer_set_mode_virt_mem;
+ clk->set_next_event =
+ arch_timer_set_next_event_virt_mem;
+ } else {
+ clk->set_mode = arch_timer_set_mode_phys_mem;
+ clk->set_next_event =
+ arch_timer_set_next_event_phys_mem;
+ }
}
- clk->cpumask = cpumask_of(smp_processor_id());
+ clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, clk);
- clk->set_mode(CLOCK_EVT_MODE_SHUTDOWN, NULL);
+ clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
+}
- clockevents_config_and_register(clk, arch_timer_rate,
- 0xf, 0x7fffffff);
+static int arch_timer_setup(struct clock_event_device *clk)
+{
+ __arch_timer_setup(ARCH_CP15_TIMER, clk);
if (arch_timer_use_virtual)
enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
@@ -158,27 +311,41 @@ static int arch_timer_setup(struct clock_event_device *clk)
return 0;
}
-static int arch_timer_available(void)
+static void
+arch_timer_detect_rate(void __iomem *cntbase, struct device_node *np)
{
- u32 freq;
-
- if (arch_timer_rate == 0) {
- freq = arch_timer_get_cntfrq();
-
- /* Check the timer frequency. */
- if (freq == 0) {
- pr_warn("Architected timer frequency not available\n");
- return -EINVAL;
- }
+ /* Who has more than one independent system counter? */
+ if (arch_timer_rate)
+ return;
- arch_timer_rate = freq;
+ /* Try to determine the frequency from the device tree or CNTFRQ */
+ if (of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
+ if (cntbase)
+ arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
+ else
+ arch_timer_rate = arch_timer_get_cntfrq();
}
- pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
+ /* Check the timer frequency. */
+ if (arch_timer_rate == 0)
+ pr_warn("Architected timer frequency not available\n");
+}
+
+static void arch_timer_banner(unsigned type)
+{
+ pr_info("Architected %s%s%s timer(s) running at %lu.%02luMHz (%s%s%s).\n",
+ type & ARCH_CP15_TIMER ? "cp15" : "",
+ type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? " and " : "",
+ type & ARCH_MEM_TIMER ? "mmio" : "",
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
- arch_timer_use_virtual ? "virt" : "phys");
- return 0;
+ type & ARCH_CP15_TIMER ?
+ arch_timer_use_virtual ? "virt" : "phys" :
+ "",
+ type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
+ type & ARCH_MEM_TIMER ?
+ arch_timer_mem_use_virtual ? "virt" : "phys" :
+ "");
}
u32 arch_timer_get_rate(void)
@@ -186,19 +353,35 @@ u32 arch_timer_get_rate(void)
return arch_timer_rate;
}
-u64 arch_timer_read_counter(void)
+static u64 arch_counter_get_cntvct_mem(void)
{
- return arch_counter_get_cntvct();
+ u32 vct_lo, vct_hi, tmp_hi;
+
+ do {
+ vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
+ vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
+ tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
+ } while (vct_hi != tmp_hi);
+
+ return ((u64) vct_hi << 32) | vct_lo;
}
+/*
+ * Default to cp15 based access because arm64 uses this function for
+ * sched_clock() before DT is probed and the cp15 method is guaranteed
+ * to exist on arm64. arm doesn't use this before DT is probed so even
+ * if we don't have the cp15 accessors we won't have a problem.
+ */
+u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct;
+
static cycle_t arch_counter_read(struct clocksource *cs)
{
- return arch_counter_get_cntvct();
+ return arch_timer_read_counter();
}
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
- return arch_counter_get_cntvct();
+ return arch_timer_read_counter();
}
static struct clocksource clocksource_counter = {
@@ -221,6 +404,23 @@ struct timecounter *arch_timer_get_timecounter(void)
return &timecounter;
}
+static void __init arch_counter_register(unsigned type)
+{
+ u64 start_count;
+
+ /* Register the CP15 based counter if we have one */
+ if (type & ARCH_CP15_TIMER)
+ arch_timer_read_counter = arch_counter_get_cntvct;
+ else
+ arch_timer_read_counter = arch_counter_get_cntvct_mem;
+
+ start_count = arch_timer_read_counter();
+ clocksource_register_hz(&clocksource_counter, arch_timer_rate);
+ cyclecounter.mult = clocksource_counter.mult;
+ cyclecounter.shift = clocksource_counter.shift;
+ timecounter_init(&timecounter, &cyclecounter, start_count);
+}
+
static void arch_timer_stop(struct clock_event_device *clk)
{
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
@@ -265,22 +465,12 @@ static int __init arch_timer_register(void)
int err;
int ppi;
- err = arch_timer_available();
- if (err)
- goto out;
-
arch_timer_evt = alloc_percpu(struct clock_event_device);
if (!arch_timer_evt) {
err = -ENOMEM;
goto out;
}
- clocksource_register_hz(&clocksource_counter, arch_timer_rate);
- cyclecounter.mult = clocksource_counter.mult;
- cyclecounter.shift = clocksource_counter.shift;
- timecounter_init(&timecounter, &cyclecounter,
- arch_counter_get_cntvct());
-
if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI];
err = request_percpu_irq(ppi, arch_timer_handler_virt,
@@ -331,24 +521,77 @@ out:
return err;
}
+static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
+{
+ int ret;
+ irq_handler_t func;
+ struct arch_timer *t;
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ t->base = base;
+ t->evt.irq = irq;
+ __arch_timer_setup(ARCH_MEM_TIMER, &t->evt);
+
+ if (arch_timer_mem_use_virtual)
+ func = arch_timer_handler_virt_mem;
+ else
+ func = arch_timer_handler_phys_mem;
+
+ ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
+ if (ret) {
+ pr_err("arch_timer: Failed to request mem timer irq\n");
+ kfree(t);
+ }
+
+ return ret;
+}
+
+static const struct of_device_id arch_timer_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer", },
+ { .compatible = "arm,armv8-timer", },
+ {},
+};
+
+static const struct of_device_id arch_timer_mem_of_match[] __initconst = {
+ { .compatible = "arm,armv7-timer-mem", },
+ {},
+};
+
+static void __init arch_timer_common_init(void)
+{
+ unsigned mask = ARCH_CP15_TIMER | ARCH_MEM_TIMER;
+
+ /* Wait until both nodes are probed if we have two timers */
+ if ((arch_timers_present & mask) != mask) {
+ if (of_find_matching_node(NULL, arch_timer_mem_of_match) &&
+ !(arch_timers_present & ARCH_MEM_TIMER))
+ return;
+ if (of_find_matching_node(NULL, arch_timer_of_match) &&
+ !(arch_timers_present & ARCH_CP15_TIMER))
+ return;
+ }
+
+ arch_timer_banner(arch_timers_present);
+ arch_counter_register(arch_timers_present);
+ arch_timer_arch_init();
+}
+
static void __init arch_timer_init(struct device_node *np)
{
- u32 freq;
int i;
- if (arch_timer_get_rate()) {
+ if (arch_timers_present & ARCH_CP15_TIMER) {
pr_warn("arch_timer: multiple nodes in dt, skipping\n");
return;
}
- /* Try to determine the frequency from the device tree or CNTFRQ */
- if (!of_property_read_u32(np, "clock-frequency", &freq))
- arch_timer_rate = freq;
-
+ arch_timers_present |= ARCH_CP15_TIMER;
for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++)
arch_timer_ppi[i] = irq_of_parse_and_map(np, i);
-
- of_node_put(np);
+ arch_timer_detect_rate(NULL, np);
/*
* If HYP mode is available, we know that the physical timer
@@ -369,7 +612,73 @@ static void __init arch_timer_init(struct device_node *np)
}
arch_timer_register();
- arch_timer_arch_init();
+ arch_timer_common_init();
}
CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_init);
CLOCKSOURCE_OF_DECLARE(armv8_arch_timer, "arm,armv8-timer", arch_timer_init);
+
+static void __init arch_timer_mem_init(struct device_node *np)
+{
+ struct device_node *frame, *best_frame = NULL;
+ void __iomem *cntctlbase, *base;
+ unsigned int irq;
+ u32 cnttidr;
+
+ arch_timers_present |= ARCH_MEM_TIMER;
+ cntctlbase = of_iomap(np, 0);
+ if (!cntctlbase) {
+ pr_err("arch_timer: Can't find CNTCTLBase\n");
+ return;
+ }
+
+ cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+ iounmap(cntctlbase);
+
+ /*
+ * Try to find a virtual capable frame. Otherwise fall back to a
+ * physical capable frame.
+ */
+ for_each_available_child_of_node(np, frame) {
+ int n;
+
+ if (of_property_read_u32(frame, "frame-number", &n)) {
+ pr_err("arch_timer: Missing frame-number\n");
+ of_node_put(best_frame);
+ of_node_put(frame);
+ return;
+ }
+
+ if (cnttidr & CNTTIDR_VIRT(n)) {
+ of_node_put(best_frame);
+ best_frame = frame;
+ arch_timer_mem_use_virtual = true;
+ break;
+ }
+ of_node_put(best_frame);
+ best_frame = of_node_get(frame);
+ }
+
+ base = arch_counter_base = of_iomap(best_frame, 0);
+ if (!base) {
+ pr_err("arch_timer: Can't map frame's registers\n");
+ of_node_put(best_frame);
+ return;
+ }
+
+ if (arch_timer_mem_use_virtual)
+ irq = irq_of_parse_and_map(best_frame, 1);
+ else
+ irq = irq_of_parse_and_map(best_frame, 0);
+ of_node_put(best_frame);
+ if (!irq) {
+ pr_err("arch_timer: Frame missing %s irq",
+ arch_timer_mem_use_virtual ? "virt" : "phys");
+ return;
+ }
+
+ arch_timer_detect_rate(base, np);
+ arch_timer_mem_register(base, irq);
+ arch_timer_common_init();
+}
+CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem",
+ arch_timer_mem_init);
diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c
index 4cbe28c7463..b2bb3a4bc20 100644
--- a/drivers/clocksource/cadence_ttc_timer.c
+++ b/drivers/clocksource/cadence_ttc_timer.c
@@ -21,7 +21,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/slab.h>
-#include <linux/clk-provider.h>
+#include <linux/sched_clock.h>
/*
* This driver configures the 2 16-bit count-up timers as follows:
@@ -95,6 +95,8 @@ struct ttc_timer_clockevent {
#define to_ttc_timer_clkevent(x) \
container_of(x, struct ttc_timer_clockevent, ce)
+static void __iomem *ttc_sched_clock_val_reg;
+
/**
* ttc_set_interval - Set the timer interval value
*
@@ -156,6 +158,11 @@ static cycle_t __ttc_clocksource_read(struct clocksource *cs)
TTC_COUNT_VAL_OFFSET);
}
+static u32 notrace ttc_sched_clock_read(void)
+{
+ return __raw_readl(ttc_sched_clock_val_reg);
+}
+
/**
* ttc_set_next_event - Sets the time interval for next event
*
@@ -297,6 +304,10 @@ static void __init ttc_setup_clocksource(struct clk *clk, void __iomem *base)
kfree(ttccs);
return;
}
+
+ ttc_sched_clock_val_reg = base + TTC_COUNT_VAL_OFFSET;
+ setup_sched_clock(ttc_sched_clock_read, 16,
+ clk_get_rate(ttccs->ttc.clk) / PRESCALE);
}
static int ttc_rate_change_clockevent_cb(struct notifier_block *nb,
diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c
new file mode 100644
index 00000000000..5eb2c35932b
--- /dev/null
+++ b/drivers/clocksource/moxart_timer.c
@@ -0,0 +1,165 @@
+/*
+ * MOXA ART SoCs timer handling.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqreturn.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/clocksource.h>
+#include <linux/bitops.h>
+
+#define TIMER1_BASE 0x00
+#define TIMER2_BASE 0x10
+#define TIMER3_BASE 0x20
+
+#define REG_COUNT 0x0 /* writable */
+#define REG_LOAD 0x4
+#define REG_MATCH1 0x8
+#define REG_MATCH2 0xC
+
+#define TIMER_CR 0x30
+#define TIMER_INTR_STATE 0x34
+#define TIMER_INTR_MASK 0x38
+
+/*
+ * TIMER_CR flags:
+ *
+ * TIMEREG_CR_*_CLOCK 0: PCLK, 1: EXT1CLK
+ * TIMEREG_CR_*_INT overflow interrupt enable bit
+ */
+#define TIMEREG_CR_1_ENABLE BIT(0)
+#define TIMEREG_CR_1_CLOCK BIT(1)
+#define TIMEREG_CR_1_INT BIT(2)
+#define TIMEREG_CR_2_ENABLE BIT(3)
+#define TIMEREG_CR_2_CLOCK BIT(4)
+#define TIMEREG_CR_2_INT BIT(5)
+#define TIMEREG_CR_3_ENABLE BIT(6)
+#define TIMEREG_CR_3_CLOCK BIT(7)
+#define TIMEREG_CR_3_INT BIT(8)
+#define TIMEREG_CR_COUNT_UP BIT(9)
+
+#define TIMER1_ENABLE (TIMEREG_CR_2_ENABLE | TIMEREG_CR_1_ENABLE)
+#define TIMER1_DISABLE (TIMEREG_CR_2_ENABLE)
+
+static void __iomem *base;
+static unsigned int clock_count_per_tick;
+
+static void moxart_clkevt_mode(enum clock_event_mode mode,
+ struct clock_event_device *clk)
+{
+ switch (mode) {
+ case CLOCK_EVT_MODE_RESUME:
+ case CLOCK_EVT_MODE_ONESHOT:
+ writel(TIMER1_DISABLE, base + TIMER_CR);
+ writel(~0, base + TIMER1_BASE + REG_LOAD);
+ break;
+ case CLOCK_EVT_MODE_PERIODIC:
+ writel(clock_count_per_tick, base + TIMER1_BASE + REG_LOAD);
+ writel(TIMER1_ENABLE, base + TIMER_CR);
+ break;
+ case CLOCK_EVT_MODE_UNUSED:
+ case CLOCK_EVT_MODE_SHUTDOWN:
+ default:
+ writel(TIMER1_DISABLE, base + TIMER_CR);
+ break;
+ }
+}
+
+static int moxart_clkevt_next_event(unsigned long cycles,
+ struct clock_event_device *unused)
+{
+ u32 u;
+
+ writel(TIMER1_DISABLE, base + TIMER_CR);
+
+ u = readl(base + TIMER1_BASE + REG_COUNT) - cycles;
+ writel(u, base + TIMER1_BASE + REG_MATCH1);
+
+ writel(TIMER1_ENABLE, base + TIMER_CR);
+
+ return 0;
+}
+
+static struct clock_event_device moxart_clockevent = {
+ .name = "moxart_timer",
+ .rating = 200,
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .set_mode = moxart_clkevt_mode,
+ .set_next_event = moxart_clkevt_next_event,
+};
+
+static irqreturn_t moxart_timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = dev_id;
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static struct irqaction moxart_timer_irq = {
+ .name = "moxart-timer",
+ .flags = IRQF_TIMER,
+ .handler = moxart_timer_interrupt,
+ .dev_id = &moxart_clockevent,
+};
+
+static void __init moxart_timer_init(struct device_node *node)
+{
+ int ret, irq;
+ unsigned long pclk;
+ struct clk *clk;
+
+ base = of_iomap(node, 0);
+ if (!base)
+ panic("%s: of_iomap failed\n", node->full_name);
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0)
+ panic("%s: irq_of_parse_and_map failed\n", node->full_name);
+
+ ret = setup_irq(irq, &moxart_timer_irq);
+ if (ret)
+ panic("%s: setup_irq failed\n", node->full_name);
+
+ clk = of_clk_get(node, 0);
+ if (IS_ERR(clk))
+ panic("%s: of_clk_get failed\n", node->full_name);
+
+ pclk = clk_get_rate(clk);
+
+ if (clocksource_mmio_init(base + TIMER2_BASE + REG_COUNT,
+ "moxart_timer", pclk, 200, 32,
+ clocksource_mmio_readl_down))
+ panic("%s: clocksource_mmio_init failed\n", node->full_name);
+
+ clock_count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ);
+
+ writel(~0, base + TIMER2_BASE + REG_LOAD);
+ writel(TIMEREG_CR_2_ENABLE, base + TIMER_CR);
+
+ moxart_clockevent.cpumask = cpumask_of(0);
+ moxart_clockevent.irq = irq;
+
+ /*
+ * documentation is not publicly available:
+ * min_delta / max_delta obtained by trial-and-error,
+ * max_delta 0xfffffffe should be ok because count
+ * register size is u32
+ */
+ clockevents_config_and_register(&moxart_clockevent, pclk,
+ 0x4, 0xfffffffe);
+}
+CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index d4674e78ef3..8ead0258740 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -19,42 +19,83 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqreturn.h>
+#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#define TIMER_IRQ_EN_REG 0x00
-#define TIMER_IRQ_EN(val) (1 << val)
+#define TIMER_IRQ_EN(val) BIT(val)
#define TIMER_IRQ_ST_REG 0x04
#define TIMER_CTL_REG(val) (0x10 * val + 0x10)
-#define TIMER_CTL_ENABLE (1 << 0)
-#define TIMER_CTL_AUTORELOAD (1 << 1)
-#define TIMER_CTL_ONESHOT (1 << 7)
-#define TIMER_INTVAL_REG(val) (0x10 * val + 0x14)
-#define TIMER_CNTVAL_REG(val) (0x10 * val + 0x18)
-
-#define TIMER_SCAL 16
+#define TIMER_CTL_ENABLE BIT(0)
+#define TIMER_CTL_RELOAD BIT(1)
+#define TIMER_CTL_CLK_SRC(val) (((val) & 0x3) << 2)
+#define TIMER_CTL_CLK_SRC_OSC24M (1)
+#define TIMER_CTL_CLK_PRES(val) (((val) & 0x7) << 4)
+#define TIMER_CTL_ONESHOT BIT(7)
+#define TIMER_INTVAL_REG(val) (0x10 * (val) + 0x14)
+#define TIMER_CNTVAL_REG(val) (0x10 * (val) + 0x18)
static void __iomem *timer_base;
+static u32 ticks_per_jiffy;
+
+/*
+ * When we disable a timer, we need to wait at least for 2 cycles of
+ * the timer source clock. We will use for that the clocksource timer
+ * that is already setup and runs at the same frequency than the other
+ * timers, and we never will be disabled.
+ */
+static void sun4i_clkevt_sync(void)
+{
+ u32 old = readl(timer_base + TIMER_CNTVAL_REG(1));
+
+ while ((old - readl(timer_base + TIMER_CNTVAL_REG(1))) < 3)
+ cpu_relax();
+}
+
+static void sun4i_clkevt_time_stop(u8 timer)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+ writel(val & ~TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(timer));
+ sun4i_clkevt_sync();
+}
+
+static void sun4i_clkevt_time_setup(u8 timer, unsigned long delay)
+{
+ writel(delay, timer_base + TIMER_INTVAL_REG(timer));
+}
+
+static void sun4i_clkevt_time_start(u8 timer, bool periodic)
+{
+ u32 val = readl(timer_base + TIMER_CTL_REG(timer));
+
+ if (periodic)
+ val &= ~TIMER_CTL_ONESHOT;
+ else
+ val |= TIMER_CTL_ONESHOT;
+
+ writel(val | TIMER_CTL_ENABLE | TIMER_CTL_RELOAD,
+ timer_base + TIMER_CTL_REG(timer));
+}
static void sun4i_clkevt_mode(enum clock_event_mode mode,
struct clock_event_device *clk)
{
- u32 u = readl(timer_base + TIMER_CTL_REG(0));
-
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
- u &= ~(TIMER_CTL_ONESHOT);
- writel(u | TIMER_CTL_ENABLE, timer_base + TIMER_CTL_REG(0));
+ sun4i_clkevt_time_stop(0);
+ sun4i_clkevt_time_setup(0, ticks_per_jiffy);
+ sun4i_clkevt_time_start(0, true);
break;
-
case CLOCK_EVT_MODE_ONESHOT:
- writel(u | TIMER_CTL_ONESHOT, timer_base + TIMER_CTL_REG(0));
+ sun4i_clkevt_time_stop(0);
+ sun4i_clkevt_time_start(0, false);
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
default:
- writel(u & ~(TIMER_CTL_ENABLE), timer_base + TIMER_CTL_REG(0));
+ sun4i_clkevt_time_stop(0);
break;
}
}
@@ -62,10 +103,9 @@ static void sun4i_clkevt_mode(enum clock_event_mode mode,
static int sun4i_clkevt_next_event(unsigned long evt,
struct clock_event_device *unused)
{
- u32 u = readl(timer_base + TIMER_CTL_REG(0));
- writel(evt, timer_base + TIMER_CNTVAL_REG(0));
- writel(u | TIMER_CTL_ENABLE | TIMER_CTL_AUTORELOAD,
- timer_base + TIMER_CTL_REG(0));
+ sun4i_clkevt_time_stop(0);
+ sun4i_clkevt_time_setup(0, evt);
+ sun4i_clkevt_time_start(0, false);
return 0;
}
@@ -96,6 +136,11 @@ static struct irqaction sun4i_timer_irq = {
.dev_id = &sun4i_clockevent,
};
+static u32 sun4i_timer_sched_read(void)
+{
+ return ~readl(timer_base + TIMER_CNTVAL_REG(1));
+}
+
static void __init sun4i_timer_init(struct device_node *node)
{
unsigned long rate = 0;
@@ -114,22 +159,23 @@ static void __init sun4i_timer_init(struct device_node *node)
clk = of_clk_get(node, 0);
if (IS_ERR(clk))
panic("Can't get timer clock");
+ clk_prepare_enable(clk);
rate = clk_get_rate(clk);
- writel(rate / (TIMER_SCAL * HZ),
- timer_base + TIMER_INTVAL_REG(0));
+ writel(~0, timer_base + TIMER_INTVAL_REG(1));
+ writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD |
+ TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
+ timer_base + TIMER_CTL_REG(1));
+
+ setup_sched_clock(sun4i_timer_sched_read, 32, rate);
+ clocksource_mmio_init(timer_base + TIMER_CNTVAL_REG(1), node->name,
+ rate, 300, 32, clocksource_mmio_readl_down);
- /* set clock source to HOSC, 16 pre-division */
- val = readl(timer_base + TIMER_CTL_REG(0));
- val &= ~(0x07 << 4);
- val &= ~(0x03 << 2);
- val |= (4 << 4) | (1 << 2);
- writel(val, timer_base + TIMER_CTL_REG(0));
+ ticks_per_jiffy = DIV_ROUND_UP(rate, HZ);
- /* set mode to auto reload */
- val = readl(timer_base + TIMER_CTL_REG(0));
- writel(val | TIMER_CTL_AUTORELOAD, timer_base + TIMER_CTL_REG(0));
+ writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
+ timer_base + TIMER_CTL_REG(0));
ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
@@ -141,8 +187,8 @@ static void __init sun4i_timer_init(struct device_node *node)
sun4i_clockevent.cpumask = cpumask_of(0);
- clockevents_config_and_register(&sun4i_clockevent, rate / TIMER_SCAL,
- 0x1, 0xff);
+ clockevents_config_and_register(&sun4i_clockevent, rate, 0x1,
+ 0xffffffff);
}
CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-timer",
sun4i_timer_init);
diff --git a/drivers/clocksource/time-orion.c b/drivers/clocksource/time-orion.c
index ecbeb681021..9c7f018a67c 100644
--- a/drivers/clocksource/time-orion.c
+++ b/drivers/clocksource/time-orion.c
@@ -19,7 +19,7 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/spinlock.h>
-#include <asm/sched_clock.h>
+#include <linux/sched_clock.h>
#define TIMER_CTRL 0x00
#define TIMER0_EN BIT(0)
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index de4d5d93c3f..0fa204b244b 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -17,37 +17,47 @@ config ARM_DT_BL_CPUFREQ
big.LITTLE platform. This gets frequency tables from DT.
config ARM_EXYNOS_CPUFREQ
- bool "SAMSUNG EXYNOS SoCs"
- depends on ARCH_EXYNOS
+ bool
select CPU_FREQ_TABLE
- default y
- help
- This adds the CPUFreq driver common part for Samsung
- EXYNOS SoCs.
-
- If in doubt, say N.
config ARM_EXYNOS4210_CPUFREQ
- def_bool CPU_EXYNOS4210
+ bool "SAMSUNG EXYNOS4210"
+ depends on CPU_EXYNOS4210
+ default y
+ select ARM_EXYNOS_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210).
+ If in doubt, say N.
+
config ARM_EXYNOS4X12_CPUFREQ
- def_bool (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ bool "SAMSUNG EXYNOS4x12"
+ depends on (SOC_EXYNOS4212 || SOC_EXYNOS4412)
+ default y
+ select ARM_EXYNOS_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS4X12
SoC (EXYNOS4212 or EXYNOS4412).
+ If in doubt, say N.
+
config ARM_EXYNOS5250_CPUFREQ
- def_bool SOC_EXYNOS5250
+ bool "SAMSUNG EXYNOS5250"
+ depends on SOC_EXYNOS5250
+ default y
+ select ARM_EXYNOS_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+ If in doubt, say N.
+
config ARM_EXYNOS5440_CPUFREQ
- def_bool SOC_EXYNOS5440
+ bool "SAMSUNG EXYNOS5440"
+ depends on SOC_EXYNOS5440
depends on HAVE_CLK && PM_OPP && OF
+ default y
select CPU_FREQ_TABLE
help
This adds the CPUFreq driver for Samsung EXYNOS5440
@@ -55,6 +65,8 @@ config ARM_EXYNOS5440_CPUFREQ
different than previous exynos controllers so not using
the common exynos framework.
+ If in doubt, say N.
+
config ARM_HIGHBANK_CPUFREQ
tristate "Calxeda Highbank-based"
depends on ARCH_HIGHBANK
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index d345b5a7aa7..ad5866c2ada 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -23,7 +23,7 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
# powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers.
# speedstep-* is preferred over p4-clockmod.
-obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
+obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 39264020b88..a1260b4549d 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -45,7 +45,6 @@
#include <asm/msr.h>
#include <asm/processor.h>
#include <asm/cpufeature.h>
-#include "mperf.h"
MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
MODULE_DESCRIPTION("ACPI Processor P-States Driver");
@@ -198,7 +197,7 @@ static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
return sprintf(buf, "%u\n", boost_enabled);
}
-static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
+cpufreq_freq_attr_rw(cpb);
#endif
static int check_est_cpu(unsigned int cpuid)
@@ -710,7 +709,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
return blacklisted;
#endif
- data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -800,7 +799,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
goto err_unreg;
}
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ data->freq_table = kmalloc(sizeof(*data->freq_table) *
(perf->state_count+1), GFP_KERNEL);
if (!data->freq_table) {
result = -ENOMEM;
@@ -861,10 +860,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
/* notify BIOS that we exist */
acpi_processor_notify_smm(THIS_MODULE);
- /* Check for APERF/MPERF support in hardware */
- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
-
pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
for (i = 0; i < perf->state_count; i++)
pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
@@ -941,7 +936,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.exit = acpi_cpufreq_cpu_exit,
.resume = acpi_cpufreq_resume,
.name = "acpi-cpufreq",
- .owner = THIS_MODULE,
.attr = acpi_cpufreq_attr,
};
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index fd9e3ea6a48..480c0bd0468 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -19,12 +19,11 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/opp.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -34,27 +33,13 @@
/* get cpu node with valid operating-points */
static struct device_node *get_cpu_node_with_valid_op(int cpu)
{
- struct device_node *np = NULL, *parent;
- int count = 0;
+ struct device_node *np = of_cpu_device_node_get(cpu);
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_err("failed to find OF /cpus\n");
- return NULL;
+ if (!of_get_property(np, "operating-points", NULL)) {
+ of_node_put(np);
+ np = NULL;
}
- for_each_child_of_node(parent, np) {
- if (count++ != cpu)
- continue;
- if (!of_get_property(np, "operating-points", NULL)) {
- of_node_put(np);
- np = NULL;
- }
-
- break;
- }
-
- of_node_put(parent);
return np;
}
@@ -63,11 +48,12 @@ static int dt_init_opp_table(struct device *cpu_dev)
struct device_node *np;
int ret;
- np = get_cpu_node_with_valid_op(cpu_dev->id);
- if (!np)
- return -ENODATA;
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find cpu%d node\n", cpu_dev->id);
+ return -ENOENT;
+ }
- cpu_dev->of_node = np;
ret = of_init_opp_table(cpu_dev);
of_node_put(np);
@@ -79,9 +65,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
struct device_node *np;
u32 transition_latency = CPUFREQ_ETERNAL;
- np = get_cpu_node_with_valid_op(cpu_dev->id);
- if (!np)
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_info("Failed to find cpu node. Use CPUFREQ_ETERNAL transition latency\n");
return CPUFREQ_ETERNAL;
+ }
of_property_read_u32(np, "clock-latency", &transition_latency);
of_node_put(np);
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 654488723cb..e0c38d93899 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -108,7 +108,6 @@ static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
static struct cpufreq_driver at32_driver = {
.name = "at32ap",
- .owner = THIS_MODULE,
.init = at32_cpufreq_driver_init,
.verify = at32_verify_speed,
.target = at32_set_target,
diff --git a/drivers/cpufreq/blackfin-cpufreq.c b/drivers/cpufreq/blackfin-cpufreq.c
index 9cdbbd278a8..ef05978a723 100644
--- a/drivers/cpufreq/blackfin-cpufreq.c
+++ b/drivers/cpufreq/blackfin-cpufreq.c
@@ -225,7 +225,6 @@ static struct cpufreq_driver bfin_driver = {
.get = bfin_getfreq_khz,
.init = __bfin_cpu_init,
.name = "bfin cpufreq",
- .owner = THIS_MODULE,
.attr = bfin_freq_attr,
};
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ad1fde27766..cbfffa91ebd 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -69,7 +69,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
- if (cpu_reg) {
+ if (!IS_ERR(cpu_reg)) {
rcu_read_lock();
opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
if (IS_ERR(opp)) {
@@ -90,7 +90,7 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
freqs.new / 1000, volt ? volt / 1000 : -1);
/* scaling up? scale voltage before frequency */
- if (cpu_reg && freqs.new > freqs.old) {
+ if (!IS_ERR(cpu_reg) && freqs.new > freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage up: %d\n", ret);
@@ -102,14 +102,14 @@ static int cpu0_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(cpu_clk, freq_exact);
if (ret) {
pr_err("failed to set clock rate: %d\n", ret);
- if (cpu_reg)
+ if (!IS_ERR(cpu_reg))
regulator_set_voltage_tol(cpu_reg, volt_old, tol);
freqs.new = freqs.old;
goto post_notify;
}
/* scaling down? scale voltage after frequency */
- if (cpu_reg && freqs.new < freqs.old) {
+ if (!IS_ERR(cpu_reg) && freqs.new < freqs.old) {
ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
if (ret) {
pr_err("failed to scale voltage down: %d\n", ret);
@@ -174,30 +174,18 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
- struct device_node *np, *parent;
+ struct device_node *np;
int ret;
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_err("failed to find OF /cpus\n");
- return -ENOENT;
- }
-
- for_each_child_of_node(parent, np) {
- if (of_get_property(np, "operating-points", NULL))
- break;
- }
+ cpu_dev = &pdev->dev;
+ np = of_node_get(cpu_dev->of_node);
if (!np) {
pr_err("failed to find cpu0 node\n");
- ret = -ENOENT;
- goto out_put_parent;
+ return -ENOENT;
}
- cpu_dev = &pdev->dev;
- cpu_dev->of_node = np;
-
- cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
+ cpu_reg = devm_regulator_get_optional(cpu_dev, "cpu0");
if (IS_ERR(cpu_reg)) {
/*
* If cpu0 regulator supply node is present, but regulator is
@@ -210,7 +198,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
}
pr_warn("failed to get cpu0 regulator: %ld\n",
PTR_ERR(cpu_reg));
- cpu_reg = NULL;
}
cpu_clk = devm_clk_get(cpu_dev, NULL);
@@ -269,15 +256,12 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
}
of_node_put(np);
- of_node_put(parent);
return 0;
out_free_table:
opp_free_cpufreq_table(cpu_dev, &freq_table);
out_put_node:
of_node_put(np);
-out_put_parent:
- of_node_put(parent);
return ret;
}
diff --git a/drivers/cpufreq/cpufreq-nforce2.c b/drivers/cpufreq/cpufreq-nforce2.c
index af1542d4144..b83d45f6857 100644
--- a/drivers/cpufreq/cpufreq-nforce2.c
+++ b/drivers/cpufreq/cpufreq-nforce2.c
@@ -379,7 +379,6 @@ static struct cpufreq_driver nforce2_driver = {
.get = nforce2_get,
.init = nforce2_cpu_init,
.exit = nforce2_cpu_exit,
- .owner = THIS_MODULE,
};
#ifdef MODULE
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a4ad7339588..5c75e3147a6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -17,24 +17,17 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cputime.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/notifier.h>
+#include <linux/cpu.h>
#include <linux/cpufreq.h>
#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/tick.h>
#include <linux/device.h>
-#include <linux/slab.h>
-#include <linux/cpu.h>
-#include <linux/completion.h>
+#include <linux/init.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <linux/syscore_ops.h>
-
+#include <linux/tick.h>
#include <trace/events/power.h>
/**
@@ -44,8 +37,10 @@
*/
static struct cpufreq_driver *cpufreq_driver;
static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
+static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data_fallback);
static DEFINE_RWLOCK(cpufreq_driver_lock);
static DEFINE_MUTEX(cpufreq_governor_lock);
+static LIST_HEAD(cpufreq_policy_list);
#ifdef CONFIG_HOTPLUG_CPU
/* This one keeps track of the previously set governor of a removed CPU */
@@ -69,15 +64,14 @@ static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
* - Lock should not be held across
* __cpufreq_governor(data, CPUFREQ_GOV_STOP);
*/
-static DEFINE_PER_CPU(int, cpufreq_policy_cpu);
static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
#define lock_policy_rwsem(mode, cpu) \
static int lock_policy_rwsem_##mode(int cpu) \
{ \
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
- BUG_ON(policy_cpu == -1); \
- down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
+ BUG_ON(!policy); \
+ down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
\
return 0; \
}
@@ -88,14 +82,20 @@ lock_policy_rwsem(write, cpu);
#define unlock_policy_rwsem(mode, cpu) \
static void unlock_policy_rwsem_##mode(int cpu) \
{ \
- int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
- BUG_ON(policy_cpu == -1); \
- up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
+ struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
+ BUG_ON(!policy); \
+ up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
}
unlock_policy_rwsem(read, cpu);
unlock_policy_rwsem(write, cpu);
+/*
+ * rwsem to guarantee that cpufreq driver module doesn't unload during critical
+ * sections
+ */
+static DECLARE_RWSEM(cpufreq_rwsem);
+
/* internal prototypes */
static int __cpufreq_governor(struct cpufreq_policy *policy,
unsigned int event);
@@ -183,78 +183,46 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
}
EXPORT_SYMBOL_GPL(get_cpu_idle_time);
-static struct cpufreq_policy *__cpufreq_cpu_get(unsigned int cpu, bool sysfs)
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
{
- struct cpufreq_policy *data;
+ struct cpufreq_policy *policy = NULL;
unsigned long flags;
- if (cpu >= nr_cpu_ids)
- goto err_out;
+ if (cpufreq_disabled() || (cpu >= nr_cpu_ids))
+ return NULL;
+
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return NULL;
/* get the cpufreq driver */
read_lock_irqsave(&cpufreq_driver_lock, flags);
- if (!cpufreq_driver)
- goto err_out_unlock;
-
- if (!try_module_get(cpufreq_driver->owner))
- goto err_out_unlock;
-
- /* get the CPU */
- data = per_cpu(cpufreq_cpu_data, cpu);
-
- if (!data)
- goto err_out_put_module;
-
- if (!sysfs && !kobject_get(&data->kobj))
- goto err_out_put_module;
+ if (cpufreq_driver) {
+ /* get the CPU */
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+ if (policy)
+ kobject_get(&policy->kobj);
+ }
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return data;
-err_out_put_module:
- module_put(cpufreq_driver->owner);
-err_out_unlock:
- read_unlock_irqrestore(&cpufreq_driver_lock, flags);
-err_out:
- return NULL;
-}
-
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
-{
- if (cpufreq_disabled())
- return NULL;
+ if (!policy)
+ up_read(&cpufreq_rwsem);
- return __cpufreq_cpu_get(cpu, false);
+ return policy;
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
-static struct cpufreq_policy *cpufreq_cpu_get_sysfs(unsigned int cpu)
-{
- return __cpufreq_cpu_get(cpu, true);
-}
-
-static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs)
-{
- if (!sysfs)
- kobject_put(&data->kobj);
- module_put(cpufreq_driver->owner);
-}
-
-void cpufreq_cpu_put(struct cpufreq_policy *data)
+void cpufreq_cpu_put(struct cpufreq_policy *policy)
{
if (cpufreq_disabled())
return;
- __cpufreq_cpu_put(data, false);
+ kobject_put(&policy->kobj);
+ up_read(&cpufreq_rwsem);
}
EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
-static void cpufreq_cpu_put_sysfs(struct cpufreq_policy *data)
-{
- __cpufreq_cpu_put(data, true);
-}
-
/*********************************************************************
* EXTERNALLY AFFECTING FREQUENCY CHANGES *
*********************************************************************/
@@ -459,8 +427,8 @@ show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
show_one(scaling_cur_freq, cur);
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
- struct cpufreq_policy *policy);
+static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy);
/**
* cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
@@ -699,12 +667,12 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get_sysfs(policy->cpu);
- if (!policy)
- goto no_policy;
+
+ if (!down_read_trylock(&cpufreq_rwsem))
+ goto exit;
if (lock_policy_rwsem_read(policy->cpu) < 0)
- goto fail;
+ goto up_read;
if (fattr->show)
ret = fattr->show(policy, buf);
@@ -712,9 +680,10 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
ret = -EIO;
unlock_policy_rwsem_read(policy->cpu);
-fail:
- cpufreq_cpu_put_sysfs(policy);
-no_policy:
+
+up_read:
+ up_read(&cpufreq_rwsem);
+exit:
return ret;
}
@@ -724,12 +693,12 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
struct cpufreq_policy *policy = to_policy(kobj);
struct freq_attr *fattr = to_attr(attr);
ssize_t ret = -EINVAL;
- policy = cpufreq_cpu_get_sysfs(policy->cpu);
- if (!policy)
- goto no_policy;
+
+ if (!down_read_trylock(&cpufreq_rwsem))
+ goto exit;
if (lock_policy_rwsem_write(policy->cpu) < 0)
- goto fail;
+ goto up_read;
if (fattr->store)
ret = fattr->store(policy, buf, count);
@@ -737,9 +706,10 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
ret = -EIO;
unlock_policy_rwsem_write(policy->cpu);
-fail:
- cpufreq_cpu_put_sysfs(policy);
-no_policy:
+
+up_read:
+ up_read(&cpufreq_rwsem);
+exit:
return ret;
}
@@ -805,41 +775,32 @@ void cpufreq_sysfs_remove_file(const struct attribute *attr)
EXPORT_SYMBOL(cpufreq_sysfs_remove_file);
/* symlink affected CPUs */
-static int cpufreq_add_dev_symlink(unsigned int cpu,
- struct cpufreq_policy *policy)
+static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
{
unsigned int j;
int ret = 0;
for_each_cpu(j, policy->cpus) {
- struct cpufreq_policy *managed_policy;
struct device *cpu_dev;
- if (j == cpu)
+ if (j == policy->cpu)
continue;
- pr_debug("CPU %u already managed, adding link\n", j);
- managed_policy = cpufreq_cpu_get(cpu);
+ pr_debug("Adding link for CPU: %u\n", j);
cpu_dev = get_cpu_device(j);
ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
- if (ret) {
- cpufreq_cpu_put(managed_policy);
- return ret;
- }
+ if (ret)
+ break;
}
return ret;
}
-static int cpufreq_add_dev_interface(unsigned int cpu,
- struct cpufreq_policy *policy,
+static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
struct device *dev)
{
- struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
- unsigned long flags;
int ret = 0;
- unsigned int j;
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
@@ -871,18 +832,24 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
goto err_out_kobj_put;
}
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_cpu(j, policy->cpus) {
- per_cpu(cpufreq_cpu_data, j) = policy;
- per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
- }
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
- ret = cpufreq_add_dev_symlink(cpu, policy);
+ ret = cpufreq_add_dev_symlink(policy);
if (ret)
goto err_out_kobj_put;
- memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
+ return ret;
+
+err_out_kobj_put:
+ kobject_put(&policy->kobj);
+ wait_for_completion(&policy->kobj_unregister);
+ return ret;
+}
+
+static void cpufreq_init_policy(struct cpufreq_policy *policy)
+{
+ struct cpufreq_policy new_policy;
+ int ret = 0;
+
+ memcpy(&new_policy, policy, sizeof(*policy));
/* assure that the starting sequence is run in __cpufreq_set_policy */
policy->governor = NULL;
@@ -896,72 +863,106 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
}
- return ret;
-
-err_out_kobj_put:
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
- return ret;
}
#ifdef CONFIG_HOTPLUG_CPU
-static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
- struct device *dev)
+static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
+ unsigned int cpu, struct device *dev,
+ bool frozen)
{
- struct cpufreq_policy *policy;
int ret = 0, has_target = !!cpufreq_driver->target;
unsigned long flags;
- policy = cpufreq_cpu_get(sibling);
- WARN_ON(!policy);
-
- if (has_target)
- __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (has_target) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
+ }
+ }
- lock_policy_rwsem_write(sibling);
+ lock_policy_rwsem_write(policy->cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags);
cpumask_set_cpu(cpu, policy->cpus);
- per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
per_cpu(cpufreq_cpu_data, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- unlock_policy_rwsem_write(sibling);
+ unlock_policy_rwsem_write(policy->cpu);
if (has_target) {
- __cpufreq_governor(policy, CPUFREQ_GOV_START);
- __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
+ if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
+ (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+ pr_err("%s: Failed to start governor\n", __func__);
+ return ret;
+ }
}
- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
- if (ret) {
- cpufreq_cpu_put(policy);
- return ret;
- }
+ /* Don't touch sysfs links during light-weight init */
+ if (!frozen)
+ ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
- return 0;
+ return ret;
}
#endif
-/**
- * cpufreq_add_dev - add a CPU device
- *
- * Adds the cpufreq interface for a CPU device.
- *
- * The Oracle says: try running cpufreq registration/unregistration concurrently
- * with with cpu hotplugging and all hell will break loose. Tried to clean this
- * mess up, but more thorough testing is needed. - Mathieu
- */
-static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
+static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
+{
+ struct cpufreq_policy *policy;
+ unsigned long flags;
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ policy = per_cpu(cpufreq_cpu_data_fallback, cpu);
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ return policy;
+}
+
+static struct cpufreq_policy *cpufreq_policy_alloc(void)
+{
+ struct cpufreq_policy *policy;
+
+ policy = kzalloc(sizeof(*policy), GFP_KERNEL);
+ if (!policy)
+ return NULL;
+
+ if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
+ goto err_free_policy;
+
+ if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
+ goto err_free_cpumask;
+
+ INIT_LIST_HEAD(&policy->policy_list);
+ return policy;
+
+err_free_cpumask:
+ free_cpumask_var(policy->cpus);
+err_free_policy:
+ kfree(policy);
+
+ return NULL;
+}
+
+static void cpufreq_policy_free(struct cpufreq_policy *policy)
+{
+ free_cpumask_var(policy->related_cpus);
+ free_cpumask_var(policy->cpus);
+ kfree(policy);
+}
+
+static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
+ bool frozen)
{
unsigned int j, cpu = dev->id;
int ret = -ENOMEM;
struct cpufreq_policy *policy;
unsigned long flags;
#ifdef CONFIG_HOTPLUG_CPU
+ struct cpufreq_policy *tpolicy;
struct cpufreq_governor *gov;
- int sibling;
#endif
if (cpu_is_offline(cpu))
@@ -977,43 +978,38 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
cpufreq_cpu_put(policy);
return 0;
}
+#endif
+
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return 0;
#ifdef CONFIG_HOTPLUG_CPU
/* Check if this cpu was hot-unplugged earlier and has siblings */
read_lock_irqsave(&cpufreq_driver_lock, flags);
- for_each_online_cpu(sibling) {
- struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
- if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
+ list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
+ if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- return cpufreq_add_policy_cpu(cpu, sibling, dev);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+ up_read(&cpufreq_rwsem);
+ return ret;
}
}
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
-#endif
- if (!try_module_get(cpufreq_driver->owner)) {
- ret = -EINVAL;
- goto module_out;
- }
+ if (frozen)
+ /* Restore the saved policy when doing light-weight init */
+ policy = cpufreq_policy_restore(cpu);
+ else
+ policy = cpufreq_policy_alloc();
- policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
if (!policy)
goto nomem_out;
- if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
- goto err_free_policy;
-
- if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
- goto err_free_cpumask;
-
policy->cpu = cpu;
policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
cpumask_copy(policy->cpus, cpumask_of(cpu));
- /* Initially set CPU itself as the policy_cpu */
- per_cpu(cpufreq_policy_cpu, cpu) = cpu;
-
init_completion(&policy->kobj_unregister);
INIT_WORK(&policy->update, handle_update);
@@ -1050,12 +1046,26 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
}
#endif
- ret = cpufreq_add_dev_interface(cpu, policy, dev);
- if (ret)
- goto err_out_unregister;
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ for_each_cpu(j, policy->cpus)
+ per_cpu(cpufreq_cpu_data, j) = policy;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!frozen) {
+ ret = cpufreq_add_dev_interface(policy, dev);
+ if (ret)
+ goto err_out_unregister;
+ }
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_add(&policy->policy_list, &cpufreq_policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ cpufreq_init_policy(policy);
kobject_uevent(&policy->kobj, KOBJ_ADD);
- module_put(cpufreq_driver->owner);
+ up_read(&cpufreq_rwsem);
+
pr_debug("initialization complete\n");
return 0;
@@ -1066,32 +1076,33 @@ err_out_unregister:
per_cpu(cpufreq_cpu_data, j) = NULL;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobject_put(&policy->kobj);
- wait_for_completion(&policy->kobj_unregister);
-
err_set_policy_cpu:
- per_cpu(cpufreq_policy_cpu, cpu) = -1;
- free_cpumask_var(policy->related_cpus);
-err_free_cpumask:
- free_cpumask_var(policy->cpus);
-err_free_policy:
- kfree(policy);
+ cpufreq_policy_free(policy);
nomem_out:
- module_put(cpufreq_driver->owner);
-module_out:
+ up_read(&cpufreq_rwsem);
+
return ret;
}
-static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+/**
+ * cpufreq_add_dev - add a CPU device
+ *
+ * Adds the cpufreq interface for a CPU device.
+ *
+ * The Oracle says: try running cpufreq registration/unregistration concurrently
+ * with with cpu hotplugging and all hell will break loose. Tried to clean this
+ * mess up, but more thorough testing is needed. - Mathieu
+ */
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- int j;
+ return __cpufreq_add_dev(dev, sif, false);
+}
+static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
+{
policy->last_cpu = policy->cpu;
policy->cpu = cpu;
- for_each_cpu(j, policy->cpus)
- per_cpu(cpufreq_policy_cpu, j) = cpu;
-
#ifdef CONFIG_CPU_FREQ_TABLE
cpufreq_frequency_table_update_policy_cpu(policy);
#endif
@@ -1099,6 +1110,37 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
CPUFREQ_UPDATE_POLICY_CPU, policy);
}
+static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
+ unsigned int old_cpu, bool frozen)
+{
+ struct device *cpu_dev;
+ int ret;
+
+ /* first sibling now owns the new sysfs dir */
+ cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
+
+ /* Don't touch sysfs files during light-weight tear-down */
+ if (frozen)
+ return cpu_dev->id;
+
+ sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
+ ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
+ if (ret) {
+ pr_err("%s: Failed to move kobj: %d", __func__, ret);
+
+ WARN_ON(lock_policy_rwsem_write(old_cpu));
+ cpumask_set_cpu(old_cpu, policy->cpus);
+ unlock_policy_rwsem_write(old_cpu);
+
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
+ "cpufreq");
+
+ return -EINVAL;
+ }
+
+ return cpu_dev->id;
+}
+
/**
* __cpufreq_remove_dev - remove a CPU device
*
@@ -1107,110 +1149,126 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
* This routine frees the rwsem before returning.
*/
static int __cpufreq_remove_dev(struct device *dev,
- struct subsys_interface *sif)
+ struct subsys_interface *sif, bool frozen)
{
- unsigned int cpu = dev->id, ret, cpus;
+ unsigned int cpu = dev->id, cpus;
+ int new_cpu, ret;
unsigned long flags;
- struct cpufreq_policy *data;
+ struct cpufreq_policy *policy;
struct kobject *kobj;
struct completion *cmp;
- struct device *cpu_dev;
pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
write_lock_irqsave(&cpufreq_driver_lock, flags);
- data = per_cpu(cpufreq_cpu_data, cpu);
- per_cpu(cpufreq_cpu_data, cpu) = NULL;
+ policy = per_cpu(cpufreq_cpu_data, cpu);
+
+ /* Save the policy somewhere when doing a light-weight tear-down */
+ if (frozen)
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (!data) {
+ if (!policy) {
pr_debug("%s: No cpu_data found\n", __func__);
return -EINVAL;
}
- if (cpufreq_driver->target)
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
+ if (cpufreq_driver->target) {
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ if (ret) {
+ pr_err("%s: Failed to stop governor\n", __func__);
+ return ret;
+ }
+ }
#ifdef CONFIG_HOTPLUG_CPU
if (!cpufreq_driver->setpolicy)
strncpy(per_cpu(cpufreq_cpu_governor, cpu),
- data->governor->name, CPUFREQ_NAME_LEN);
+ policy->governor->name, CPUFREQ_NAME_LEN);
#endif
WARN_ON(lock_policy_rwsem_write(cpu));
- cpus = cpumask_weight(data->cpus);
+ cpus = cpumask_weight(policy->cpus);
if (cpus > 1)
- cpumask_clear_cpu(cpu, data->cpus);
+ cpumask_clear_cpu(cpu, policy->cpus);
unlock_policy_rwsem_write(cpu);
- if (cpu != data->cpu) {
+ if (cpu != policy->cpu && !frozen) {
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- /* first sibling now owns the new sysfs dir */
- cpu_dev = get_cpu_device(cpumask_first(data->cpus));
- sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
- ret = kobject_move(&data->kobj, &cpu_dev->kobj);
- if (ret) {
- pr_err("%s: Failed to move kobj: %d", __func__, ret);
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+ if (new_cpu >= 0) {
WARN_ON(lock_policy_rwsem_write(cpu));
- cpumask_set_cpu(cpu, data->cpus);
-
- write_lock_irqsave(&cpufreq_driver_lock, flags);
- per_cpu(cpufreq_cpu_data, cpu) = data;
- write_unlock_irqrestore(&cpufreq_driver_lock, flags);
-
+ update_policy_cpu(policy, new_cpu);
unlock_policy_rwsem_write(cpu);
- ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
- "cpufreq");
- return -EINVAL;
+ if (!frozen) {
+ pr_debug("%s: policy Kobject moved to cpu: %d "
+ "from: %d\n",__func__, new_cpu, cpu);
+ }
}
-
- WARN_ON(lock_policy_rwsem_write(cpu));
- update_policy_cpu(data, cpu_dev->id);
- unlock_policy_rwsem_write(cpu);
- pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
- __func__, cpu_dev->id, cpu);
}
- if ((cpus == 1) && (cpufreq_driver->target))
- __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
-
- pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
- cpufreq_cpu_put(data);
-
/* If cpu is last user of policy, free policy */
if (cpus == 1) {
- lock_policy_rwsem_read(cpu);
- kobj = &data->kobj;
- cmp = &data->kobj_unregister;
- unlock_policy_rwsem_read(cpu);
- kobject_put(kobj);
-
- /* we need to make sure that the underlying kobj is actually
- * not referenced anymore by anybody before we proceed with
- * unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
+ if (cpufreq_driver->target) {
+ ret = __cpufreq_governor(policy,
+ CPUFREQ_GOV_POLICY_EXIT);
+ if (ret) {
+ pr_err("%s: Failed to exit governor\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ if (!frozen) {
+ lock_policy_rwsem_read(cpu);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ unlock_policy_rwsem_read(cpu);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+ }
+ /*
+ * Perform the ->exit() even during light-weight tear-down,
+ * since this is a core component, and is essential for the
+ * subsequent light-weight ->init() to succeed.
+ */
if (cpufreq_driver->exit)
- cpufreq_driver->exit(data);
+ cpufreq_driver->exit(policy);
- free_cpumask_var(data->related_cpus);
- free_cpumask_var(data->cpus);
- kfree(data);
- } else if (cpufreq_driver->target) {
- __cpufreq_governor(data, CPUFREQ_GOV_START);
- __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ /* Remove policy from list of active policies */
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ list_del(&policy->policy_list);
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+
+ if (!frozen)
+ cpufreq_policy_free(policy);
+ } else {
+ if (cpufreq_driver->target) {
+ if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
+ (ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS))) {
+ pr_err("%s: Failed to start governor\n",
+ __func__);
+ return ret;
+ }
+ }
}
- per_cpu(cpufreq_policy_cpu, cpu) = -1;
+ per_cpu(cpufreq_cpu_data, cpu) = NULL;
return 0;
}
@@ -1222,7 +1280,7 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
if (cpu_is_offline(cpu))
return 0;
- retval = __cpufreq_remove_dev(dev, sif);
+ retval = __cpufreq_remove_dev(dev, sif, false);
return retval;
}
@@ -1343,10 +1401,9 @@ static unsigned int __cpufreq_get(unsigned int cpu)
unsigned int cpufreq_get(unsigned int cpu)
{
unsigned int ret_freq = 0;
- struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
- if (!policy)
- goto out;
+ if (!down_read_trylock(&cpufreq_rwsem))
+ return 0;
if (unlikely(lock_policy_rwsem_read(cpu)))
goto out_policy;
@@ -1356,8 +1413,8 @@ unsigned int cpufreq_get(unsigned int cpu)
unlock_policy_rwsem_read(cpu);
out_policy:
- cpufreq_cpu_put(policy);
-out:
+ up_read(&cpufreq_rwsem);
+
return ret_freq;
}
EXPORT_SYMBOL(cpufreq_get);
@@ -1380,23 +1437,23 @@ static int cpufreq_bp_suspend(void)
int ret = 0;
int cpu = smp_processor_id();
- struct cpufreq_policy *cpu_policy;
+ struct cpufreq_policy *policy;
pr_debug("suspending cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
return 0;
if (cpufreq_driver->suspend) {
- ret = cpufreq_driver->suspend(cpu_policy);
+ ret = cpufreq_driver->suspend(policy);
if (ret)
printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
- "step on CPU %u\n", cpu_policy->cpu);
+ "step on CPU %u\n", policy->cpu);
}
- cpufreq_cpu_put(cpu_policy);
+ cpufreq_cpu_put(policy);
return ret;
}
@@ -1418,28 +1475,28 @@ static void cpufreq_bp_resume(void)
int ret = 0;
int cpu = smp_processor_id();
- struct cpufreq_policy *cpu_policy;
+ struct cpufreq_policy *policy;
pr_debug("resuming cpu %u\n", cpu);
/* If there's no policy for the boot CPU, we have nothing to do. */
- cpu_policy = cpufreq_cpu_get(cpu);
- if (!cpu_policy)
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
return;
if (cpufreq_driver->resume) {
- ret = cpufreq_driver->resume(cpu_policy);
+ ret = cpufreq_driver->resume(policy);
if (ret) {
printk(KERN_ERR "cpufreq: resume failed in ->resume "
- "step on CPU %u\n", cpu_policy->cpu);
+ "step on CPU %u\n", policy->cpu);
goto fail;
}
}
- schedule_work(&cpu_policy->update);
+ schedule_work(&policy->update);
fail:
- cpufreq_cpu_put(cpu_policy);
+ cpufreq_cpu_put(policy);
}
static struct syscore_ops cpufreq_syscore_ops = {
@@ -1593,18 +1650,6 @@ fail:
}
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
-int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
-{
- if (cpufreq_disabled())
- return 0;
-
- if (!cpufreq_driver->getavg)
- return 0;
-
- return cpufreq_driver->getavg(policy, cpu);
-}
-EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
-
/*
* when "event" is CPUFREQ_GOV_LIMITS
*/
@@ -1639,8 +1684,9 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
}
}
- if (!try_module_get(policy->governor->owner))
- return -EINVAL;
+ if (event == CPUFREQ_GOV_POLICY_INIT)
+ if (!try_module_get(policy->governor->owner))
+ return -EINVAL;
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
policy->cpu, event);
@@ -1676,11 +1722,8 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
mutex_unlock(&cpufreq_governor_lock);
}
- /* we keep one module reference alive for
- each CPU governed by this CPU */
- if ((event != CPUFREQ_GOV_START) || ret)
- module_put(policy->governor->owner);
- if ((event == CPUFREQ_GOV_STOP) && !ret)
+ if (((event == CPUFREQ_GOV_POLICY_INIT) && ret) ||
+ ((event == CPUFREQ_GOV_POLICY_EXIT) && !ret))
module_put(policy->governor->owner);
return ret;
@@ -1760,7 +1803,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
if (!cpu_policy)
return -EINVAL;
- memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
+ memcpy(policy, cpu_policy, sizeof(*policy));
cpufreq_cpu_put(cpu_policy);
return 0;
@@ -1771,95 +1814,94 @@ EXPORT_SYMBOL(cpufreq_get_policy);
* data : current policy.
* policy : policy to be set.
*/
-static int __cpufreq_set_policy(struct cpufreq_policy *data,
- struct cpufreq_policy *policy)
+static int __cpufreq_set_policy(struct cpufreq_policy *policy,
+ struct cpufreq_policy *new_policy)
{
int ret = 0, failed = 1;
- pr_debug("setting new policy for CPU %u: %u - %u kHz\n", policy->cpu,
- policy->min, policy->max);
+ pr_debug("setting new policy for CPU %u: %u - %u kHz\n", new_policy->cpu,
+ new_policy->min, new_policy->max);
- memcpy(&policy->cpuinfo, &data->cpuinfo,
- sizeof(struct cpufreq_cpuinfo));
+ memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
- if (policy->min > data->max || policy->max < data->min) {
+ if (new_policy->min > policy->max || new_policy->max < policy->min) {
ret = -EINVAL;
goto error_out;
}
/* verify the cpu speed can be set within this limit */
- ret = cpufreq_driver->verify(policy);
+ ret = cpufreq_driver->verify(new_policy);
if (ret)
goto error_out;
/* adjust if necessary - all reasons */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_ADJUST, policy);
+ CPUFREQ_ADJUST, new_policy);
/* adjust if necessary - hardware incompatibility*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_INCOMPATIBLE, policy);
+ CPUFREQ_INCOMPATIBLE, new_policy);
/*
* verify the cpu speed can be set within this limit, which might be
* different to the first one
*/
- ret = cpufreq_driver->verify(policy);
+ ret = cpufreq_driver->verify(new_policy);
if (ret)
goto error_out;
/* notification of the new policy */
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
- CPUFREQ_NOTIFY, policy);
+ CPUFREQ_NOTIFY, new_policy);
- data->min = policy->min;
- data->max = policy->max;
+ policy->min = new_policy->min;
+ policy->max = new_policy->max;
pr_debug("new min and max freqs are %u - %u kHz\n",
- data->min, data->max);
+ policy->min, policy->max);
if (cpufreq_driver->setpolicy) {
- data->policy = policy->policy;
+ policy->policy = new_policy->policy;
pr_debug("setting range\n");
- ret = cpufreq_driver->setpolicy(policy);
+ ret = cpufreq_driver->setpolicy(new_policy);
} else {
- if (policy->governor != data->governor) {
+ if (new_policy->governor != policy->governor) {
/* save old, working values */
- struct cpufreq_governor *old_gov = data->governor;
+ struct cpufreq_governor *old_gov = policy->governor;
pr_debug("governor switch\n");
/* end old governor */
- if (data->governor) {
- __cpufreq_governor(data, CPUFREQ_GOV_STOP);
- unlock_policy_rwsem_write(policy->cpu);
- __cpufreq_governor(data,
+ if (policy->governor) {
+ __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
+ unlock_policy_rwsem_write(new_policy->cpu);
+ __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(policy->cpu);
+ lock_policy_rwsem_write(new_policy->cpu);
}
/* start new governor */
- data->governor = policy->governor;
- if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
- if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
+ policy->governor = new_policy->governor;
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) {
+ if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
failed = 0;
} else {
- unlock_policy_rwsem_write(policy->cpu);
- __cpufreq_governor(data,
+ unlock_policy_rwsem_write(new_policy->cpu);
+ __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_EXIT);
- lock_policy_rwsem_write(policy->cpu);
+ lock_policy_rwsem_write(new_policy->cpu);
}
}
if (failed) {
/* new governor failed, so re-start old one */
pr_debug("starting governor %s failed\n",
- data->governor->name);
+ policy->governor->name);
if (old_gov) {
- data->governor = old_gov;
- __cpufreq_governor(data,
+ policy->governor = old_gov;
+ __cpufreq_governor(policy,
CPUFREQ_GOV_POLICY_INIT);
- __cpufreq_governor(data,
+ __cpufreq_governor(policy,
CPUFREQ_GOV_START);
}
ret = -EINVAL;
@@ -1868,7 +1910,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
/* might be a policy change, too, so fall through */
}
pr_debug("governor: change or update limits\n");
- __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
+ ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
}
error_out:
@@ -1884,11 +1926,11 @@ error_out:
*/
int cpufreq_update_policy(unsigned int cpu)
{
- struct cpufreq_policy *data = cpufreq_cpu_get(cpu);
- struct cpufreq_policy policy;
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
+ struct cpufreq_policy new_policy;
int ret;
- if (!data) {
+ if (!policy) {
ret = -ENODEV;
goto no_policy;
}
@@ -1899,34 +1941,34 @@ int cpufreq_update_policy(unsigned int cpu)
}
pr_debug("updating policy for CPU %u\n", cpu);
- memcpy(&policy, data, sizeof(struct cpufreq_policy));
- policy.min = data->user_policy.min;
- policy.max = data->user_policy.max;
- policy.policy = data->user_policy.policy;
- policy.governor = data->user_policy.governor;
+ memcpy(&new_policy, policy, sizeof(*policy));
+ new_policy.min = policy->user_policy.min;
+ new_policy.max = policy->user_policy.max;
+ new_policy.policy = policy->user_policy.policy;
+ new_policy.governor = policy->user_policy.governor;
/*
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
if (cpufreq_driver->get) {
- policy.cur = cpufreq_driver->get(cpu);
- if (!data->cur) {
+ new_policy.cur = cpufreq_driver->get(cpu);
+ if (!policy->cur) {
pr_debug("Driver did not initialize current freq");
- data->cur = policy.cur;
+ policy->cur = new_policy.cur;
} else {
- if (data->cur != policy.cur && cpufreq_driver->target)
- cpufreq_out_of_sync(cpu, data->cur,
- policy.cur);
+ if (policy->cur != new_policy.cur && cpufreq_driver->target)
+ cpufreq_out_of_sync(cpu, policy->cur,
+ new_policy.cur);
}
}
- ret = __cpufreq_set_policy(data, &policy);
+ ret = __cpufreq_set_policy(policy, &new_policy);
unlock_policy_rwsem_write(cpu);
fail:
- cpufreq_cpu_put(data);
+ cpufreq_cpu_put(policy);
no_policy:
return ret;
}
@@ -1937,21 +1979,26 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
struct device *dev;
+ bool frozen = false;
dev = get_cpu_device(cpu);
if (dev) {
- switch (action) {
+
+ if (action & CPU_TASKS_FROZEN)
+ frozen = true;
+
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpufreq_add_dev(dev, NULL);
+ __cpufreq_add_dev(dev, NULL, frozen);
+ cpufreq_update_policy(cpu);
break;
+
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
- __cpufreq_remove_dev(dev, NULL);
+ __cpufreq_remove_dev(dev, NULL, frozen);
break;
+
case CPU_DOWN_FAILED:
- case CPU_DOWN_FAILED_FROZEN:
- cpufreq_add_dev(dev, NULL);
+ __cpufreq_add_dev(dev, NULL, frozen);
break;
}
}
@@ -2058,9 +2105,13 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
+ down_write(&cpufreq_rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
+
cpufreq_driver = NULL;
+
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ up_write(&cpufreq_rwsem);
return 0;
}
@@ -2073,10 +2124,8 @@ static int __init cpufreq_core_init(void)
if (cpufreq_disabled())
return -ENODEV;
- for_each_possible_cpu(cpu) {
- per_cpu(cpufreq_policy_cpu, cpu) = -1;
+ for_each_possible_cpu(cpu)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
- }
cpufreq_global_kobject = kobject_create();
BUG_ON(!cpufreq_global_kobject);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 0ceb2eff5a7..f62d822048e 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -11,19 +11,7 @@
* published by the Free Software Foundation.
*/
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/percpu-defs.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/types.h>
-
#include "cpufreq_governor.h"
/* Conservative governor macros */
@@ -79,8 +67,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
return;
dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
- if (dbs_info->requested_freq > policy->max)
- dbs_info->requested_freq = policy->max;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_H);
@@ -101,8 +87,6 @@ static void cs_check_cpu(int cpu, unsigned int load)
return;
dbs_info->requested_freq -= get_freq_target(cs_tuners, policy);
- if (dbs_info->requested_freq < policy->min)
- dbs_info->requested_freq = policy->min;
__cpufreq_driver_target(policy, dbs_info->requested_freq,
CPUFREQ_RELATION_L);
@@ -221,8 +205,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
unsigned int input, j;
@@ -235,10 +219,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == cs_tuners->ignore_nice) /* nothing to do */
+ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
return count;
- cs_tuners->ignore_nice = input;
+ cs_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -246,7 +230,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, 0);
- if (cs_tuners->ignore_nice)
+ if (cs_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
@@ -279,7 +263,7 @@ show_store_one(cs, sampling_rate);
show_store_one(cs, sampling_down_factor);
show_store_one(cs, up_threshold);
show_store_one(cs, down_threshold);
-show_store_one(cs, ignore_nice);
+show_store_one(cs, ignore_nice_load);
show_store_one(cs, freq_step);
declare_show_sampling_rate_min(cs);
@@ -287,7 +271,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(sampling_down_factor);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(down_threshold);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(freq_step);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -297,7 +281,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_down_factor_gov_sys.attr,
&up_threshold_gov_sys.attr,
&down_threshold_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&freq_step_gov_sys.attr,
NULL
};
@@ -313,7 +297,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_down_factor_gov_pol.attr,
&up_threshold_gov_pol.attr,
&down_threshold_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&freq_step_gov_pol.attr,
NULL
};
@@ -329,7 +313,7 @@ static int cs_init(struct dbs_data *dbs_data)
{
struct cs_dbs_tuners *tuners;
- tuners = kzalloc(sizeof(struct cs_dbs_tuners), GFP_KERNEL);
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
@@ -338,7 +322,7 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;
dbs_data->tuners = tuners;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 7b839a8db2a..0806c31e576 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -16,15 +16,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <asm/cputime.h>
-#include <linux/cpufreq.h>
-#include <linux/cpumask.h>
#include <linux/export.h>
#include <linux/kernel_stat.h>
-#include <linux/mutex.h>
#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/workqueue.h>
#include "cpufreq_governor.h"
@@ -47,13 +41,13 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
unsigned int j;
if (dbs_data->cdata->governor == GOV_ONDEMAND)
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
else
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
policy = cdbs->cur_policy;
- /* Get Absolute Load (in terms of freq for ondemand gov) */
+ /* Get Absolute Load */
for_each_cpu(j, policy->cpus) {
struct cpu_dbs_common_info *j_cdbs;
u64 cur_wall_time, cur_idle_time;
@@ -104,14 +98,6 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
load = 100 * (wall_time - idle_time) / wall_time;
- if (dbs_data->cdata->governor == GOV_ONDEMAND) {
- int freq_avg = __cpufreq_driver_getavg(policy, j);
- if (freq_avg <= 0)
- freq_avg = policy->cur;
-
- load *= freq_avg;
- }
-
if (load > max_load)
max_load = load;
}
@@ -133,8 +119,18 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
{
int i;
+ if (!policy->governor_enabled)
+ return;
+
if (!all_cpus) {
- __gov_queue_work(smp_processor_id(), dbs_data, delay);
+ /*
+ * Use raw_smp_processor_id() to avoid preemptible warnings.
+ * We know that this is only called with all_cpus == false from
+ * works that have been queued with *_work_on() functions and
+ * those works are canceled during CPU_DOWN_PREPARE so they
+ * can't possibly run on any other CPU.
+ */
+ __gov_queue_work(raw_smp_processor_id(), dbs_data, delay);
} else {
for_each_cpu(i, policy->cpus)
__gov_queue_work(i, dbs_data, delay);
@@ -244,7 +240,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
policy->governor_data = dbs_data;
- /* policy latency is in nS. Convert it to uS first */
+ /* policy latency is in ns. Convert it to us first */
latency = policy->cpuinfo.transition_latency / 1000;
if (latency == 0)
latency = 1;
@@ -298,12 +294,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
cs_tuners = dbs_data->tuners;
cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = cs_tuners->sampling_rate;
- ignore_nice = cs_tuners->ignore_nice;
+ ignore_nice = cs_tuners->ignore_nice_load;
} else {
od_tuners = dbs_data->tuners;
od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
sampling_rate = od_tuners->sampling_rate;
- ignore_nice = od_tuners->ignore_nice;
+ ignore_nice = od_tuners->ignore_nice_load;
od_ops = dbs_data->cdata->gov_ops;
io_busy = od_tuners->io_is_busy;
}
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 6663ec3b305..88cd39f7b0e 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -18,19 +18,18 @@
#define _CPUFREQ_GOVERNOR_H
#include <linux/cpufreq.h>
-#include <linux/kobject.h>
+#include <linux/kernel_stat.h>
+#include <linux/module.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
-#include <linux/sysfs.h>
/*
* The polling frequency depends on the capability of the processor. Default
* polling frequency is 1000 times the transition latency of the processor. The
- * governor will work on any processor with transition latency <= 10mS, using
+ * governor will work on any processor with transition latency <= 10ms, using
* appropriate sampling rate.
*
- * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL)
- * this governor will not work. All times here are in uS.
+ * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL)
+ * this governor will not work. All times here are in us (micro seconds).
*/
#define MIN_SAMPLING_RATE_RATIO (2)
#define LATENCY_MULTIPLIER (1000)
@@ -163,19 +162,18 @@ struct cs_cpu_dbs_info_s {
unsigned int enable:1;
};
-/* Per policy Governers sysfs tunables */
+/* Per policy Governors sysfs tunables */
struct od_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
- unsigned int adj_up_threshold;
unsigned int powersave_bias;
unsigned int io_is_busy;
};
struct cs_dbs_tuners {
- unsigned int ignore_nice;
+ unsigned int ignore_nice_load;
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
@@ -183,7 +181,7 @@ struct cs_dbs_tuners {
unsigned int freq_step;
};
-/* Common Governer data across policies */
+/* Common Governor data across policies */
struct dbs_data;
struct common_dbs_data {
/* Common across governors */
@@ -207,7 +205,7 @@ struct common_dbs_data {
void *gov_ops;
};
-/* Governer Per policy data */
+/* Governor Per policy data */
struct dbs_data {
struct common_dbs_data *cdata;
unsigned int min_sampling_rate;
@@ -223,7 +221,7 @@ struct od_ops {
void (*powersave_bias_init_cpu)(int cpu);
unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
unsigned int freq_next, unsigned int relation);
- void (*freq_increase)(struct cpufreq_policy *p, unsigned int freq);
+ void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
};
struct cs_ops {
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 93eb5cbcc1f..32f26f6e17c 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -12,28 +12,16 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/cpufreq.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/kernel_stat.h>
-#include <linux/kobject.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
+#include <linux/cpu.h>
#include <linux/percpu-defs.h>
#include <linux/slab.h>
-#include <linux/sysfs.h>
#include <linux/tick.h>
-#include <linux/types.h>
-#include <linux/cpu.h>
-
#include "cpufreq_governor.h"
/* On-demand governor macros */
-#define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10)
#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (100000)
-#define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3)
#define MICRO_FREQUENCY_UP_THRESHOLD (95)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define MIN_FREQUENCY_UP_THRESHOLD (11)
@@ -144,31 +132,27 @@ static void ondemand_powersave_bias_init(void)
}
}
-static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
+static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
{
- struct dbs_data *dbs_data = p->governor_data;
+ struct dbs_data *dbs_data = policy->governor_data;
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
if (od_tuners->powersave_bias)
- freq = od_ops.powersave_bias_target(p, freq,
+ freq = od_ops.powersave_bias_target(policy, freq,
CPUFREQ_RELATION_H);
- else if (p->cur == p->max)
+ else if (policy->cur == policy->max)
return;
- __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ?
+ __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
}
/*
* Every sampling_rate, we check, if current idle time is less than 20%
- * (default), then we try to increase frequency. Every sampling_rate, we look
- * for the lowest frequency which can sustain the load while keeping idle time
- * over 30%. If such a frequency exist, we try to decrease to this frequency.
- *
- * Any frequency increase takes it to the maximum frequency. Frequency reduction
- * happens at minimum steps of 5% (default) of current frequency
+ * (default), then we try to increase frequency. Else, we adjust the frequency
+ * proportional to load.
*/
-static void od_check_cpu(int cpu, unsigned int load_freq)
+static void od_check_cpu(int cpu, unsigned int load)
{
struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
@@ -178,36 +162,21 @@ static void od_check_cpu(int cpu, unsigned int load_freq)
dbs_info->freq_lo = 0;
/* Check for frequency increase */
- if (load_freq > od_tuners->up_threshold * policy->cur) {
+ if (load > od_tuners->up_threshold) {
/* If switching to max speed, apply sampling_down_factor */
if (policy->cur < policy->max)
dbs_info->rate_mult =
od_tuners->sampling_down_factor;
dbs_freq_increase(policy, policy->max);
return;
- }
-
- /* Check for frequency decrease */
- /* if we cannot reduce the frequency anymore, break out early */
- if (policy->cur == policy->min)
- return;
-
- /*
- * The optimal frequency is the frequency that is the lowest that can
- * support the current CPU usage without triggering the up policy. To be
- * safe, we focus 10 points under the threshold.
- */
- if (load_freq < od_tuners->adj_up_threshold
- * policy->cur) {
+ } else {
+ /* Calculate the next frequency proportional to load */
unsigned int freq_next;
- freq_next = load_freq / od_tuners->adj_up_threshold;
+ freq_next = load * policy->cpuinfo.max_freq / 100;
/* No longer fully busy, reset rate_mult */
dbs_info->rate_mult = 1;
- if (freq_next < policy->min)
- freq_next = policy->min;
-
if (!od_tuners->powersave_bias) {
__cpufreq_driver_target(policy, freq_next,
CPUFREQ_RELATION_L);
@@ -374,9 +343,6 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
input < MIN_FREQUENCY_UP_THRESHOLD) {
return -EINVAL;
}
- /* Calculate the new adj_up_threshold */
- od_tuners->adj_up_threshold += input;
- od_tuners->adj_up_threshold -= od_tuners->up_threshold;
od_tuners->up_threshold = input;
return count;
@@ -403,8 +369,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
return count;
}
-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
- size_t count)
+static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
+ const char *buf, size_t count)
{
struct od_dbs_tuners *od_tuners = dbs_data->tuners;
unsigned int input;
@@ -419,10 +385,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
if (input > 1)
input = 1;
- if (input == od_tuners->ignore_nice) { /* nothing to do */
+ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
return count;
}
- od_tuners->ignore_nice = input;
+ od_tuners->ignore_nice_load = input;
/* we need to re-evaluate prev_cpu_idle */
for_each_online_cpu(j) {
@@ -430,7 +396,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
dbs_info = &per_cpu(od_cpu_dbs_info, j);
dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
- if (od_tuners->ignore_nice)
+ if (od_tuners->ignore_nice_load)
dbs_info->cdbs.prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
@@ -461,7 +427,7 @@ show_store_one(od, sampling_rate);
show_store_one(od, io_is_busy);
show_store_one(od, up_threshold);
show_store_one(od, sampling_down_factor);
-show_store_one(od, ignore_nice);
+show_store_one(od, ignore_nice_load);
show_store_one(od, powersave_bias);
declare_show_sampling_rate_min(od);
@@ -469,7 +435,7 @@ gov_sys_pol_attr_rw(sampling_rate);
gov_sys_pol_attr_rw(io_is_busy);
gov_sys_pol_attr_rw(up_threshold);
gov_sys_pol_attr_rw(sampling_down_factor);
-gov_sys_pol_attr_rw(ignore_nice);
+gov_sys_pol_attr_rw(ignore_nice_load);
gov_sys_pol_attr_rw(powersave_bias);
gov_sys_pol_attr_ro(sampling_rate_min);
@@ -478,7 +444,7 @@ static struct attribute *dbs_attributes_gov_sys[] = {
&sampling_rate_gov_sys.attr,
&up_threshold_gov_sys.attr,
&sampling_down_factor_gov_sys.attr,
- &ignore_nice_gov_sys.attr,
+ &ignore_nice_load_gov_sys.attr,
&powersave_bias_gov_sys.attr,
&io_is_busy_gov_sys.attr,
NULL
@@ -494,7 +460,7 @@ static struct attribute *dbs_attributes_gov_pol[] = {
&sampling_rate_gov_pol.attr,
&up_threshold_gov_pol.attr,
&sampling_down_factor_gov_pol.attr,
- &ignore_nice_gov_pol.attr,
+ &ignore_nice_load_gov_pol.attr,
&powersave_bias_gov_pol.attr,
&io_is_busy_gov_pol.attr,
NULL
@@ -513,7 +479,7 @@ static int od_init(struct dbs_data *dbs_data)
u64 idle_time;
int cpu;
- tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
+ tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
@@ -525,8 +491,6 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
- tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
- MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
@@ -535,8 +499,6 @@ static int od_init(struct dbs_data *dbs_data)
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
- tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
- DEF_FREQUENCY_DOWN_DIFFERENTIAL;
/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
@@ -544,7 +506,7 @@ static int od_init(struct dbs_data *dbs_data)
}
tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
- tuners->ignore_nice = 0;
+ tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c
index 9fef7d6e4e6..cf117deb39b 100644
--- a/drivers/cpufreq/cpufreq_performance.c
+++ b/drivers/cpufreq/cpufreq_performance.c
@@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
+#include <linux/module.h>
static int cpufreq_governor_performance(struct cpufreq_policy *policy,
unsigned int event)
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c
index 32109a14f5d..e3b874c235e 100644
--- a/drivers/cpufreq/cpufreq_powersave.c
+++ b/drivers/cpufreq/cpufreq_powersave.c
@@ -12,10 +12,9 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/init.h>
+#include <linux/module.h>
static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
unsigned int event)
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index d37568c5ca9..04452f026ed 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -9,17 +9,10 @@
* published by the Free Software Foundation.
*/
-#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/cpu.h>
-#include <linux/sysfs.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/percpu.h>
-#include <linux/kobject.h>
-#include <linux/spinlock.h>
-#include <linux/notifier.h>
+#include <linux/slab.h>
#include <asm/cputime.h>
static spinlock_t cpufreq_stats_lock;
@@ -200,22 +193,22 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
{
unsigned int i, j, count = 0, ret = 0;
struct cpufreq_stats *stat;
- struct cpufreq_policy *data;
+ struct cpufreq_policy *current_policy;
unsigned int alloc_size;
unsigned int cpu = policy->cpu;
if (per_cpu(cpufreq_stats_table, cpu))
return -EBUSY;
- stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
+ stat = kzalloc(sizeof(*stat), GFP_KERNEL);
if ((stat) == NULL)
return -ENOMEM;
- data = cpufreq_cpu_get(cpu);
- if (data == NULL) {
+ current_policy = cpufreq_cpu_get(cpu);
+ if (current_policy == NULL) {
ret = -EINVAL;
goto error_get_fail;
}
- ret = sysfs_create_group(&data->kobj, &stats_attr_group);
+ ret = sysfs_create_group(&current_policy->kobj, &stats_attr_group);
if (ret)
goto error_out;
@@ -258,10 +251,10 @@ static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
stat->last_time = get_jiffies_64();
stat->last_index = freq_table_get_index(stat, policy->cur);
spin_unlock(&cpufreq_stats_lock);
- cpufreq_cpu_put(data);
+ cpufreq_cpu_put(current_policy);
return 0;
error_out:
- cpufreq_cpu_put(data);
+ cpufreq_cpu_put(current_policy);
error_get_fail:
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
@@ -348,16 +341,10 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
- case CPU_ONLINE:
- case CPU_ONLINE_FROZEN:
- cpufreq_update_policy(cpu);
- break;
case CPU_DOWN_PREPARE:
- case CPU_DOWN_PREPARE_FROZEN:
cpufreq_stats_free_sysfs(cpu);
break;
case CPU_DEAD:
- case CPU_DEAD_FROZEN:
cpufreq_stats_free_table(cpu);
break;
}
@@ -390,8 +377,6 @@ static int __init cpufreq_stats_init(void)
return ret;
register_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
- for_each_online_cpu(cpu)
- cpufreq_update_policy(cpu);
ret = cpufreq_register_notifier(&notifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
diff --git a/drivers/cpufreq/cris-artpec3-cpufreq.c b/drivers/cpufreq/cris-artpec3-cpufreq.c
index ee142c49057..cb8276dd19c 100644
--- a/drivers/cpufreq/cris-artpec3-cpufreq.c
+++ b/drivers/cpufreq/cris-artpec3-cpufreq.c
@@ -111,7 +111,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit,
.name = "cris_freq",
- .owner = THIS_MODULE,
.attr = cris_freq_attr,
};
diff --git a/drivers/cpufreq/cris-etraxfs-cpufreq.c b/drivers/cpufreq/cris-etraxfs-cpufreq.c
index 12952235d5d..72328f77dc5 100644
--- a/drivers/cpufreq/cris-etraxfs-cpufreq.c
+++ b/drivers/cpufreq/cris-etraxfs-cpufreq.c
@@ -108,7 +108,6 @@ static struct cpufreq_driver cris_freq_driver = {
.init = cris_freq_cpu_init,
.exit = cris_freq_cpu_exit,
.name = "cris_freq",
- .owner = THIS_MODULE,
.attr = cris_freq_attr,
};
diff --git a/drivers/cpufreq/e_powersaver.c b/drivers/cpufreq/e_powersaver.c
index a60efaeb4cf..09f64cc8301 100644
--- a/drivers/cpufreq/e_powersaver.c
+++ b/drivers/cpufreq/e_powersaver.c
@@ -54,7 +54,7 @@ static struct acpi_processor_performance *eps_acpi_cpu_perf;
/* Minimum necessary to get acpi_processor_get_bios_limit() working */
static int eps_acpi_init(void)
{
- eps_acpi_cpu_perf = kzalloc(sizeof(struct acpi_processor_performance),
+ eps_acpi_cpu_perf = kzalloc(sizeof(*eps_acpi_cpu_perf),
GFP_KERNEL);
if (!eps_acpi_cpu_perf)
return -ENOMEM;
@@ -366,7 +366,7 @@ static int eps_cpu_init(struct cpufreq_policy *policy)
states = 2;
/* Allocate private data and frequency table for current cpu */
- centaur = kzalloc(sizeof(struct eps_cpu_data)
+ centaur = kzalloc(sizeof(*centaur)
+ (states + 1) * sizeof(struct cpufreq_frequency_table),
GFP_KERNEL);
if (!centaur)
@@ -436,7 +436,6 @@ static struct cpufreq_driver eps_driver = {
.exit = eps_cpu_exit,
.get = eps_get,
.name = "e_powersaver",
- .owner = THIS_MODULE,
.attr = eps_attr,
};
diff --git a/drivers/cpufreq/elanfreq.c b/drivers/cpufreq/elanfreq.c
index 658d860344b..823a400d98f 100644
--- a/drivers/cpufreq/elanfreq.c
+++ b/drivers/cpufreq/elanfreq.c
@@ -274,7 +274,6 @@ static struct cpufreq_driver elanfreq_driver = {
.init = elanfreq_cpu_init,
.exit = elanfreq_cpu_exit,
.name = "elanfreq",
- .owner = THIS_MODULE,
.attr = elanfreq_attr,
};
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
index 0d32f02ef4d..0fac34439e3 100644
--- a/drivers/cpufreq/exynos-cpufreq.c
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -289,7 +289,7 @@ static int __init exynos_cpufreq_init(void)
{
int ret = -EINVAL;
- exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
+ exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
if (!exynos_info)
return -ENOMEM;
@@ -332,7 +332,6 @@ err_cpufreq:
regulator_put(arm_regulator);
err_vdd_arm:
kfree(exynos_info);
- pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
late_initcall(exynos_cpufreq_init);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
index 92b852ee5dd..7f25cee8cec 100644
--- a/drivers/cpufreq/exynos-cpufreq.h
+++ b/drivers/cpufreq/exynos-cpufreq.h
@@ -43,6 +43,27 @@ struct exynos_dvfs_info {
bool (*need_apll_change)(unsigned int, unsigned int);
};
+#ifdef CONFIG_ARM_EXYNOS4210_CPUFREQ
extern int exynos4210_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
+#else
+static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
+{
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/drivers/cpufreq/exynos5440-cpufreq.c b/drivers/cpufreq/exynos5440-cpufreq.c
index 0c74018eda4..d514c152fd1 100644
--- a/drivers/cpufreq/exynos5440-cpufreq.c
+++ b/drivers/cpufreq/exynos5440-cpufreq.c
@@ -238,6 +238,9 @@ static int exynos_target(struct cpufreq_policy *policy,
freqs.old = dvfs_info->cur_frequency;
freqs.new = freq_table[index].frequency;
+ if (freqs.old == freqs.new)
+ goto out;
+
cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
/* Set the target frequency in all C0_3_PSTATE register */
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f0d87412cc9..f111454a7ae 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -11,10 +11,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/module.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
diff --git a/drivers/cpufreq/gx-suspmod.c b/drivers/cpufreq/gx-suspmod.c
index 3dfc99b9ca8..70442c7b5e7 100644
--- a/drivers/cpufreq/gx-suspmod.c
+++ b/drivers/cpufreq/gx-suspmod.c
@@ -183,7 +183,7 @@ static void gx_write_byte(int reg, int value)
* gx_detect_chipset:
*
**/
-static __init struct pci_dev *gx_detect_chipset(void)
+static struct pci_dev * __init gx_detect_chipset(void)
{
struct pci_dev *gx_pci = NULL;
@@ -446,7 +446,6 @@ static struct cpufreq_driver gx_suspmod_driver = {
.target = cpufreq_gx_target,
.init = cpufreq_gx_cpu_init,
.name = "gx-suspmod",
- .owner = THIS_MODULE,
};
static int __init cpufreq_gx_init(void)
@@ -466,7 +465,7 @@ static int __init cpufreq_gx_init(void)
pr_debug("geode suspend modulation available.\n");
- params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL);
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
if (params == NULL)
return -ENOMEM;
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index b61b5a3fad6..794123fcf3e 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -69,23 +69,17 @@ static int hb_cpufreq_driver_init(void)
if (!of_machine_is_compatible("calxeda,highbank"))
return -ENODEV;
- for_each_child_of_node(of_find_node_by_path("/cpus"), np)
- if (of_get_property(np, "operating-points", NULL))
- break;
-
- if (!np) {
- pr_err("failed to find highbank cpufreq node\n");
- return -ENOENT;
- }
-
cpu_dev = get_cpu_device(0);
if (!cpu_dev) {
pr_err("failed to get highbank cpufreq device\n");
- ret = -ENODEV;
- goto out_put_node;
+ return -ENODEV;
}
- cpu_dev->of_node = np;
+ np = of_node_get(cpu_dev->of_node);
+ if (!np) {
+ pr_err("failed to find highbank cpufreq node\n");
+ return -ENOENT;
+ }
cpu_clk = clk_get(cpu_dev, NULL);
if (IS_ERR(cpu_clk)) {
diff --git a/drivers/cpufreq/ia64-acpi-cpufreq.c b/drivers/cpufreq/ia64-acpi-cpufreq.c
index 573c14ea802..3e14f031717 100644
--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
+++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
@@ -274,7 +274,7 @@ acpi_cpufreq_cpu_init (
pr_debug("acpi_cpufreq_cpu_init\n");
- data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return (-ENOMEM);
@@ -304,7 +304,7 @@ acpi_cpufreq_cpu_init (
}
/* alloc freq_table */
- data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) *
+ data->freq_table = kmalloc(sizeof(*data->freq_table) *
(data->acpi_data.state_count + 1),
GFP_KERNEL);
if (!data->freq_table) {
@@ -409,7 +409,6 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
.init = acpi_cpufreq_cpu_init,
.exit = acpi_cpufreq_cpu_exit,
.name = "acpi-cpufreq",
- .owner = THIS_MODULE,
.attr = acpi_cpufreq_attr,
};
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index e37cdaedbb5..3e396543aea 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -117,28 +117,11 @@ static int imx6q_set_target(struct cpufreq_policy *policy,
* - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it
* - Disable pll2_pfd2_396m_clk
*/
- clk_prepare_enable(pll2_pfd2_396m_clk);
clk_set_parent(step_clk, pll2_pfd2_396m_clk);
clk_set_parent(pll1_sw_clk, step_clk);
if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) {
clk_set_rate(pll1_sys_clk, freqs.new * 1000);
- /*
- * If we are leaving 396 MHz set-point, we need to enable
- * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep
- * their use count correct.
- */
- if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) {
- clk_prepare_enable(pll1_sys_clk);
- clk_disable_unprepare(pll2_pfd2_396m_clk);
- }
clk_set_parent(pll1_sw_clk, pll1_sys_clk);
- clk_disable_unprepare(pll2_pfd2_396m_clk);
- } else {
- /*
- * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient
- * to provide the frequency.
- */
- clk_disable_unprepare(pll1_sys_clk);
}
/* Ensure the arm clock divider is what we expect */
@@ -221,14 +204,12 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
cpu_dev = &pdev->dev;
- np = of_find_node_by_path("/cpus/cpu@0");
+ np = of_node_get(cpu_dev->of_node);
if (!np) {
dev_err(cpu_dev, "failed to find cpu0 node\n");
return -ENOENT;
}
- cpu_dev->of_node = np;
-
arm_clk = devm_clk_get(cpu_dev, "arm");
pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys");
pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b012d7600e1..6efd96c196b 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -103,10 +103,10 @@ struct pstate_adjust_policy {
static struct pstate_adjust_policy default_policy = {
.sample_rate_ms = 10,
.deadband = 0,
- .setpoint = 109,
- .p_gain_pct = 17,
+ .setpoint = 97,
+ .p_gain_pct = 20,
.d_gain_pct = 0,
- .i_gain_pct = 4,
+ .i_gain_pct = 0,
};
struct perf_limits {
@@ -468,12 +468,12 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
{
int32_t busy_scaled;
- int32_t core_busy, turbo_pstate, current_pstate;
+ int32_t core_busy, max_pstate, current_pstate;
core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy);
- turbo_pstate = int_tofp(cpu->pstate.turbo_pstate);
+ max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate));
+ busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
return fp_toint(busy_scaled);
}
@@ -665,7 +665,6 @@ static struct cpufreq_driver intel_pstate_driver = {
.init = intel_pstate_cpu_init,
.exit = intel_pstate_cpu_exit,
.name = "intel_pstate",
- .owner = THIS_MODULE,
};
static int __initdata no_load;
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index c233ea61736..ba10658a939 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -14,7 +14,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/cpufreq.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <asm/proc-fns.h>
@@ -158,7 +158,6 @@ static struct cpufreq_driver kirkwood_cpufreq_driver = {
.init = kirkwood_cpufreq_cpu_init,
.exit = kirkwood_cpufreq_cpu_exit,
.name = "kirkwood-cpufreq",
- .owner = THIS_MODULE,
.attr = kirkwood_cpufreq_attr,
};
@@ -175,9 +174,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
if (IS_ERR(priv.base))
return PTR_ERR(priv.base);
- np = of_find_node_by_path("/cpus/cpu@0");
- if (!np)
+ np = of_cpu_device_node_get(0);
+ if (!np) {
+ dev_err(&pdev->dev, "failed to get cpu device node\n");
return -ENODEV;
+ }
priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
if (IS_ERR(priv.cpu_clk)) {
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 8c49261df57..4ada1cccb05 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -948,7 +948,6 @@ static struct cpufreq_driver longhaul_driver = {
.init = longhaul_cpu_init,
.exit = longhaul_cpu_exit,
.name = "longhaul",
- .owner = THIS_MODULE,
.attr = longhaul_attr,
};
diff --git a/drivers/cpufreq/longrun.c b/drivers/cpufreq/longrun.c
index 0fe041d1f77..5aa031612d5 100644
--- a/drivers/cpufreq/longrun.c
+++ b/drivers/cpufreq/longrun.c
@@ -286,7 +286,6 @@ static struct cpufreq_driver longrun_driver = {
.get = longrun_get,
.init = longrun_cpu_init,
.name = "longrun",
- .owner = THIS_MODULE,
};
static const struct x86_cpu_id longrun_ids[] = {
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index bb838b98507..7bc3c44d34e 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
clk_put(cpuclk);
return -EINVAL;
}
- ret = clk_set_rate(cpuclk, rate);
- if (ret) {
- clk_put(cpuclk);
- return ret;
- }
/* clock table init */
for (i = 2;
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy)
i++)
loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+ ret = clk_set_rate(cpuclk, rate);
+ if (ret) {
+ clk_put(cpuclk);
+ return ret;
+ }
+
policy->cur = loongson2_cpufreq_get(policy->cpu);
cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
@@ -157,7 +158,6 @@ static struct freq_attr *loongson2_table_attr[] = {
};
static struct cpufreq_driver loongson2_cpufreq_driver = {
- .owner = THIS_MODULE,
.name = "loongson2",
.init = loongson2_cpufreq_cpu_init,
.verify = loongson2_cpufreq_verify,
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c
index cdd62915efa..6168d77b296 100644
--- a/drivers/cpufreq/maple-cpufreq.c
+++ b/drivers/cpufreq/maple-cpufreq.c
@@ -24,7 +24,7 @@
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/time.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#define DBG(fmt...) pr_debug(fmt)
@@ -190,7 +190,6 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver maple_cpufreq_driver = {
.name = "maple",
- .owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
.init = maple_cpufreq_cpu_init,
.verify = maple_cpufreq_verify,
@@ -201,7 +200,6 @@ static struct cpufreq_driver maple_cpufreq_driver = {
static int __init maple_cpufreq_init(void)
{
- struct device_node *cpus;
struct device_node *cpunode;
unsigned int psize;
unsigned long max_freq;
@@ -217,24 +215,11 @@ static int __init maple_cpufreq_init(void)
!of_machine_is_compatible("Momentum,Apache"))
return 0;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- DBG("No /cpus node !\n");
- return -ENODEV;
- }
-
/* Get first CPU node */
- for (cpunode = NULL;
- (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
- const u32 *reg = of_get_property(cpunode, "reg", NULL);
- if (reg == NULL || (*reg) != 0)
- continue;
- if (!strcmp(cpunode->type, "cpu"))
- break;
- }
+ cpunode = of_cpu_device_node_get(0);
if (cpunode == NULL) {
printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
- goto bail_cpus;
+ goto bail_noprops;
}
/* Check 970FX for now */
@@ -290,14 +275,11 @@ static int __init maple_cpufreq_init(void)
rc = cpufreq_register_driver(&maple_cpufreq_driver);
of_node_put(cpunode);
- of_node_put(cpus);
return rc;
bail_noprops:
of_node_put(cpunode);
-bail_cpus:
- of_node_put(cpus);
return rc;
}
diff --git a/drivers/cpufreq/mperf.c b/drivers/cpufreq/mperf.c
deleted file mode 100644
index 911e193018a..00000000000
--- a/drivers/cpufreq/mperf.c
+++ /dev/null
@@ -1,51 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/smp.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/slab.h>
-
-#include "mperf.h"
-
-static DEFINE_PER_CPU(struct aperfmperf, acfreq_old_perf);
-
-/* Called via smp_call_function_single(), on the target CPU */
-static void read_measured_perf_ctrs(void *_cur)
-{
- struct aperfmperf *am = _cur;
-
- get_aperfmperf(am);
-}
-
-/*
- * Return the measured active (C0) frequency on this CPU since last call
- * to this function.
- * Input: cpu number
- * Return: Average CPU frequency in terms of max frequency (zero on error)
- *
- * We use IA32_MPERF and IA32_APERF MSRs to get the measured performance
- * over a period of time, while CPU is in C0 state.
- * IA32_MPERF counts at the rate of max advertised frequency
- * IA32_APERF counts at the rate of actual CPU frequency
- * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and
- * no meaning should be associated with absolute values of these MSRs.
- */
-unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
- unsigned int cpu)
-{
- struct aperfmperf perf;
- unsigned long ratio;
- unsigned int retval;
-
- if (smp_call_function_single(cpu, read_measured_perf_ctrs, &perf, 1))
- return 0;
-
- ratio = calc_aperfmperf_ratio(&per_cpu(acfreq_old_perf, cpu), &perf);
- per_cpu(acfreq_old_perf, cpu) = perf;
-
- retval = (policy->cpuinfo.max_freq * ratio) >> APERFMPERF_SHIFT;
-
- return retval;
-}
-EXPORT_SYMBOL_GPL(cpufreq_get_measured_perf);
-MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/mperf.h b/drivers/cpufreq/mperf.h
deleted file mode 100644
index 5dbf2950dc2..00000000000
--- a/drivers/cpufreq/mperf.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * (c) 2010 Advanced Micro Devices, Inc.
- * Your use of this code is subject to the terms and conditions of the
- * GNU general public license version 2. See "COPYING" or
- * http://www.gnu.org/licenses/gpl.html
- */
-
-unsigned int cpufreq_get_measured_perf(struct cpufreq_policy *policy,
- unsigned int cpu);
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index 9ee78170ff8..2f0a2a65c37 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -279,7 +279,6 @@ static struct cpufreq_driver p4clockmod_driver = {
.exit = cpufreq_p4_cpu_exit,
.get = cpufreq_p4_get,
.name = "p4-clockmod",
- .owner = THIS_MODULE,
.attr = p4clockmod_attr,
};
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index b704da40406..534e43a60d1 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -297,7 +297,6 @@ static int pas_cpufreq_target(struct cpufreq_policy *policy,
static struct cpufreq_driver pas_cpufreq_driver = {
.name = "pas-cpufreq",
- .owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
.init = pas_cpufreq_cpu_init,
.exit = pas_cpufreq_cpu_exit,
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 1581fcc4cf4..d81c4e5ea0a 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -587,7 +587,6 @@ static struct cpufreq_driver pcc_cpufreq_driver = {
.init = pcc_cpufreq_cpu_init,
.exit = pcc_cpufreq_cpu_exit,
.name = "pcc-cpufreq",
- .owner = THIS_MODULE,
};
static int __init pcc_cpufreq_init(void)
diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
index 3104fad8248..a096cd3fa23 100644
--- a/drivers/cpufreq/pmac32-cpufreq.c
+++ b/drivers/cpufreq/pmac32-cpufreq.c
@@ -25,6 +25,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/hardirq.h>
+#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -477,7 +478,6 @@ static struct cpufreq_driver pmac_cpufreq_driver = {
.flags = CPUFREQ_PM_NO_WARN,
.attr = pmac_cpu_freqs_attr,
.name = "powermac",
- .owner = THIS_MODULE,
};
@@ -649,8 +649,8 @@ static int __init pmac_cpufreq_setup(void)
if (strstr(cmd_line, "nocpufreq"))
return 0;
- /* Assume only one CPU */
- cpunode = of_find_node_by_type(NULL, "cpu");
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
if (!cpunode)
goto out;
diff --git a/drivers/cpufreq/pmac64-cpufreq.c b/drivers/cpufreq/pmac64-cpufreq.c
index 7ba423431cf..3a51ad7e47c 100644
--- a/drivers/cpufreq/pmac64-cpufreq.c
+++ b/drivers/cpufreq/pmac64-cpufreq.c
@@ -22,6 +22,7 @@
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/mutex.h>
+#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/machdep.h>
#include <asm/irq.h>
@@ -371,7 +372,6 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
static struct cpufreq_driver g5_cpufreq_driver = {
.name = "powermac",
- .owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
.init = g5_cpufreq_cpu_init,
.verify = g5_cpufreq_verify,
@@ -383,9 +383,8 @@ static struct cpufreq_driver g5_cpufreq_driver = {
#ifdef CONFIG_PMAC_SMU
-static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
+static int __init g5_neo2_cpufreq_init(struct device_node *cpunode)
{
- struct device_node *cpunode;
unsigned int psize, ssize;
unsigned long max_freq;
char *freq_method, *volt_method;
@@ -405,20 +404,6 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
else
return -ENODEV;
- /* Get first CPU node */
- for (cpunode = NULL;
- (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
- const u32 *reg = of_get_property(cpunode, "reg", NULL);
- if (reg == NULL || (*reg) != 0)
- continue;
- if (!strcmp(cpunode->type, "cpu"))
- break;
- }
- if (cpunode == NULL) {
- printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n");
- return -ENODEV;
- }
-
/* Check 970FX for now */
valp = of_get_property(cpunode, "cpu-version", NULL);
if (!valp) {
@@ -447,9 +432,8 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
if (!shdr)
goto bail_noprops;
g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1];
- ssize = (shdr->len * sizeof(u32)) -
- sizeof(struct smu_sdbp_header);
- g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt);
+ ssize = (shdr->len * sizeof(u32)) - sizeof(*shdr);
+ g5_fvt_count = ssize / sizeof(*g5_fvt_table);
g5_fvt_cur = 0;
/* Sanity checking */
@@ -537,9 +521,9 @@ static int __init g5_neo2_cpufreq_init(struct device_node *cpus)
#endif /* CONFIG_PMAC_SMU */
-static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
+static int __init g5_pm72_cpufreq_init(struct device_node *cpunode)
{
- struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL;
+ struct device_node *cpuid = NULL, *hwclock = NULL;
const u8 *eeprom = NULL;
const u32 *valp;
u64 max_freq, min_freq, ih, il;
@@ -548,17 +532,6 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
DBG("cpufreq: Initializing for PowerMac7,2, PowerMac7,3 and"
" RackMac3,1...\n");
- /* Get first CPU node */
- for (cpunode = NULL;
- (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) {
- if (!strcmp(cpunode->type, "cpu"))
- break;
- }
- if (cpunode == NULL) {
- printk(KERN_ERR "cpufreq: Can't find any CPU node\n");
- return -ENODEV;
- }
-
/* Lookup the cpuid eeprom node */
cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0");
if (cpuid != NULL)
@@ -718,25 +691,25 @@ static int __init g5_pm72_cpufreq_init(struct device_node *cpus)
static int __init g5_cpufreq_init(void)
{
- struct device_node *cpus;
+ struct device_node *cpunode;
int rc = 0;
- cpus = of_find_node_by_path("/cpus");
- if (cpus == NULL) {
- DBG("No /cpus node !\n");
+ /* Get first CPU node */
+ cpunode = of_cpu_device_node_get(0);
+ if (cpunode == NULL) {
+ pr_err("cpufreq: Can't find any CPU node\n");
return -ENODEV;
}
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
- rc = g5_pm72_cpufreq_init(cpus);
+ rc = g5_pm72_cpufreq_init(cpunode);
#ifdef CONFIG_PMAC_SMU
else
- rc = g5_neo2_cpufreq_init(cpus);
+ rc = g5_neo2_cpufreq_init(cpunode);
#endif /* CONFIG_PMAC_SMU */
- of_node_put(cpus);
return rc;
}
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index ea8e10382ec..85f1c8c25dd 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -207,7 +207,6 @@ static struct cpufreq_driver powernow_k6_driver = {
.exit = powernow_k6_cpu_exit,
.get = powernow_k6_get,
.name = "powernow-k6",
- .owner = THIS_MODULE,
.attr = powernow_k6_attr,
};
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 95587087793..14ce480be8a 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -177,7 +177,7 @@ static int get_ranges(unsigned char *pst)
unsigned int speed;
u8 fid, vid;
- powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
+ powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table)
return -ENOMEM;
@@ -309,8 +309,7 @@ static int powernow_acpi_init(void)
goto err0;
}
- acpi_processor_perf = kzalloc(sizeof(struct acpi_processor_performance),
- GFP_KERNEL);
+ acpi_processor_perf = kzalloc(sizeof(*acpi_processor_perf), GFP_KERNEL);
if (!acpi_processor_perf) {
retval = -ENOMEM;
goto err0;
@@ -346,7 +345,7 @@ static int powernow_acpi_init(void)
goto err2;
}
- powernow_table = kzalloc((sizeof(struct cpufreq_frequency_table) *
+ powernow_table = kzalloc((sizeof(*powernow_table) *
(number_scales + 1)), GFP_KERNEL);
if (!powernow_table) {
retval = -ENOMEM;
@@ -497,7 +496,7 @@ static int powernow_decode_bios(int maxfid, int startvid)
"relevant to this CPU).\n",
psb->numpst);
- p += sizeof(struct psb_s);
+ p += sizeof(*psb);
pst = (struct pst_s *) p;
@@ -510,12 +509,12 @@ static int powernow_decode_bios(int maxfid, int startvid)
(maxfid == pst->maxfid) &&
(startvid == pst->startvid)) {
print_pst_entry(pst, j);
- p = (char *)pst + sizeof(struct pst_s);
+ p = (char *)pst + sizeof(*pst);
ret = get_ranges(p);
return ret;
} else {
unsigned int k;
- p = (char *)pst + sizeof(struct pst_s);
+ p = (char *)pst + sizeof(*pst);
for (k = 0; k < number_scales; k++)
p += 2;
}
@@ -717,7 +716,6 @@ static struct cpufreq_driver powernow_driver = {
.init = powernow_cpu_init,
.exit = powernow_cpu_exit,
.name = "powernow-k7",
- .owner = THIS_MODULE,
.attr = powernow_table_attr,
};
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index c39d189217c..2344a9ed17f 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -623,7 +623,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
if (check_pst_table(data, pst, maxvid))
return -EINVAL;
- powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+ powernow_table = kmalloc((sizeof(*powernow_table)
* (data->numps + 1)), GFP_KERNEL);
if (!powernow_table) {
printk(KERN_ERR PFX "powernow_table memory alloc failure\n");
@@ -793,7 +793,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
}
/* fill in data->powernow_table */
- powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table)
+ powernow_table = kmalloc((sizeof(*powernow_table)
* (data->acpi_data.state_count + 1)), GFP_KERNEL);
if (!powernow_table) {
pr_debug("powernow_table memory alloc failure\n");
@@ -1106,7 +1106,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
if (rc)
return -ENODEV;
- data = kzalloc(sizeof(struct powernow_k8_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
printk(KERN_ERR PFX "unable to alloc powernow_k8_data");
return -ENOMEM;
@@ -1240,7 +1240,6 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
.exit = powernowk8_cpu_exit,
.get = powernowk8_get,
.name = "powernow-k8",
- .owner = THIS_MODULE,
.attr = powernow_k8_attr,
};
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 3cae4529f95..60e81d524ea 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -300,7 +300,6 @@ static struct freq_attr *corenet_cpufreq_attr[] = {
static struct cpufreq_driver ppc_corenet_cpufreq_driver = {
.name = "ppc_cpufreq",
- .owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
.init = corenet_cpufreq_cpu_init,
.exit = __exit_p(corenet_cpufreq_cpu_exit),
diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
index 5936f8d6f2c..2e448f0bbdc 100644
--- a/drivers/cpufreq/ppc_cbe_cpufreq.c
+++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
@@ -181,7 +181,6 @@ static struct cpufreq_driver cbe_cpufreq_driver = {
.init = cbe_cpufreq_cpu_init,
.exit = cbe_cpufreq_cpu_exit,
.name = "cbe-cpufreq",
- .owner = THIS_MODULE,
.flags = CPUFREQ_CONST_LOOPS,
};
diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
index fb3981ac829..8749eaf1879 100644
--- a/drivers/cpufreq/pxa2xx-cpufreq.c
+++ b/drivers/cpufreq/pxa2xx-cpufreq.c
@@ -191,7 +191,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return ret;
}
-static __init void pxa_cpufreq_init_voltages(void)
+static void __init pxa_cpufreq_init_voltages(void)
{
vcc_core = regulator_get(NULL, "vcc_core");
if (IS_ERR(vcc_core)) {
@@ -207,7 +207,7 @@ static int pxa_cpufreq_change_voltage(pxa_freqs_t *pxa_freq)
return 0;
}
-static __init void pxa_cpufreq_init_voltages(void) { }
+static void __init pxa_cpufreq_init_voltages(void) { }
#endif
static void find_freq_tables(struct cpufreq_frequency_table **freq_table,
diff --git a/drivers/cpufreq/pxa3xx-cpufreq.c b/drivers/cpufreq/pxa3xx-cpufreq.c
index 9c92ef032a9..d26306fb00d 100644
--- a/drivers/cpufreq/pxa3xx-cpufreq.c
+++ b/drivers/cpufreq/pxa3xx-cpufreq.c
@@ -213,10 +213,12 @@ static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy)
policy->cur = policy->min = policy->max;
if (cpu_is_pxa300() || cpu_is_pxa310())
- ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa300_freqs));
+ ret = setup_freqs_table(policy, pxa300_freqs,
+ ARRAY_SIZE(pxa300_freqs));
if (cpu_is_pxa320())
- ret = setup_freqs_table(policy, ARRAY_AND_SIZE(pxa320_freqs));
+ ret = setup_freqs_table(policy, pxa320_freqs,
+ ARRAY_SIZE(pxa320_freqs));
if (ret) {
pr_err("failed to setup frequency table\n");
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index ce5b9fca9c1..22dcb81ef9d 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -524,7 +524,6 @@ static struct freq_attr *s3c2416_cpufreq_attr[] = {
};
static struct cpufreq_driver s3c2416_cpufreq_driver = {
- .owner = THIS_MODULE,
.flags = 0,
.verify = s3c2416_cpufreq_verify_speed,
.target = s3c2416_cpufreq_set_target,
diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c
index 87781eb20d6..b0f343fcb7e 100644
--- a/drivers/cpufreq/s3c24xx-cpufreq.c
+++ b/drivers/cpufreq/s3c24xx-cpufreq.c
@@ -392,7 +392,7 @@ static int s3c_cpufreq_init(struct cpufreq_policy *policy)
return 0;
}
-static __init int s3c_cpufreq_initclks(void)
+static int __init s3c_cpufreq_initclks(void)
{
_clk_mpll = s3c_cpufreq_clk_get(NULL, "mpll");
_clk_xtal = s3c_cpufreq_clk_get(NULL, "xtal");
@@ -522,7 +522,7 @@ int __init s3c_cpufreq_setboard(struct s3c_cpufreq_board *board)
/* Copy the board information so that each board can make this
* initdata. */
- ours = kzalloc(sizeof(struct s3c_cpufreq_board), GFP_KERNEL);
+ ours = kzalloc(sizeof(*ours), GFP_KERNEL);
if (ours == NULL) {
printk(KERN_ERR "%s: no memory\n", __func__);
return -ENOMEM;
@@ -615,7 +615,7 @@ static int s3c_cpufreq_build_freq(void)
size = cpu_cur.info->calc_freqtable(&cpu_cur, NULL, 0);
size++;
- ftab = kmalloc(sizeof(struct cpufreq_frequency_table) * size, GFP_KERNEL);
+ ftab = kmalloc(sizeof(*ftab) * size, GFP_KERNEL);
if (!ftab) {
printk(KERN_ERR "%s: no memory for tables\n", __func__);
return -ENOMEM;
@@ -691,7 +691,7 @@ int __init s3c_plltab_register(struct cpufreq_frequency_table *plls,
struct cpufreq_frequency_table *vals;
unsigned int size;
- size = sizeof(struct cpufreq_frequency_table) * (plls_no + 1);
+ size = sizeof(*vals) * (plls_no + 1);
vals = kmalloc(size, GFP_KERNEL);
if (vals) {
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 13bb4bae64e..8a72b0c555f 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -263,7 +263,6 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
- .owner = THIS_MODULE,
.flags = 0,
.verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target,
diff --git a/drivers/cpufreq/sc520_freq.c b/drivers/cpufreq/sc520_freq.c
index 77a210975fc..d6f6c6f4efa 100644
--- a/drivers/cpufreq/sc520_freq.c
+++ b/drivers/cpufreq/sc520_freq.c
@@ -147,7 +147,6 @@ static struct cpufreq_driver sc520_freq_driver = {
.init = sc520_freq_cpu_init,
.exit = sc520_freq_cpu_exit,
.name = "sc520_freq",
- .owner = THIS_MODULE,
.attr = sc520_freq_attr,
};
diff --git a/drivers/cpufreq/sh-cpufreq.c b/drivers/cpufreq/sh-cpufreq.c
index 73adb64651e..ffc6d24b0cf 100644
--- a/drivers/cpufreq/sh-cpufreq.c
+++ b/drivers/cpufreq/sh-cpufreq.c
@@ -160,7 +160,6 @@ static struct freq_attr *sh_freq_attr[] = {
};
static struct cpufreq_driver sh_cpufreq_driver = {
- .owner = THIS_MODULE,
.name = "sh",
.get = sh_cpufreq_get,
.target = sh_cpufreq_target,
diff --git a/drivers/cpufreq/sparc-us2e-cpufreq.c b/drivers/cpufreq/sparc-us2e-cpufreq.c
index 93061a40877..cf5bc2ca16f 100644
--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
+++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
@@ -351,12 +351,11 @@ static int __init us2e_freq_init(void)
struct cpufreq_driver *driver;
ret = -ENOMEM;
- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
goto err_out;
- us2e_freq_table = kzalloc(
- (NR_CPUS * sizeof(struct us2e_freq_percpu_info)),
+ us2e_freq_table = kzalloc((NR_CPUS * sizeof(*us2e_freq_table)),
GFP_KERNEL);
if (!us2e_freq_table)
goto err_out;
@@ -366,7 +365,6 @@ static int __init us2e_freq_init(void)
driver->target = us2e_freq_target;
driver->get = us2e_freq_get;
driver->exit = us2e_freq_cpu_exit;
- driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-IIe");
cpufreq_us2e_driver = driver;
diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
index 880ee293d61..ac76b489979 100644
--- a/drivers/cpufreq/sparc-us3-cpufreq.c
+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
@@ -212,12 +212,11 @@ static int __init us3_freq_init(void)
struct cpufreq_driver *driver;
ret = -ENOMEM;
- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
if (!driver)
goto err_out;
- us3_freq_table = kzalloc(
- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
+ us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
GFP_KERNEL);
if (!us3_freq_table)
goto err_out;
@@ -227,7 +226,6 @@ static int __init us3_freq_init(void)
driver->target = us3_freq_target;
driver->get = us3_freq_get;
driver->exit = us3_freq_cpu_exit;
- driver->owner = THIS_MODULE,
strcpy(driver->name, "UltraSPARC-III");
cpufreq_us3_driver = driver;
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index c3efa7f2a90..19e364fa595 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -18,7 +18,7 @@
#include <linux/err.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -223,7 +223,7 @@ static int spear_cpufreq_driver_init(void)
const __be32 *val;
int cnt, i, ret;
- np = of_find_node_by_path("/cpus/cpu@0");
+ np = of_cpu_device_node_get(0);
if (!np) {
pr_err("No cpu node found");
return -ENODEV;
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 0915e712fbd..f897d510584 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -575,7 +575,6 @@ static struct cpufreq_driver centrino_driver = {
.target = centrino_target,
.get = get_cur_freq,
.attr = centrino_attr,
- .owner = THIS_MODULE,
};
/*
diff --git a/drivers/cpufreq/speedstep-ich.c b/drivers/cpufreq/speedstep-ich.c
index e2e5aa97145..5355abb69af 100644
--- a/drivers/cpufreq/speedstep-ich.c
+++ b/drivers/cpufreq/speedstep-ich.c
@@ -378,7 +378,6 @@ static struct cpufreq_driver speedstep_driver = {
.init = speedstep_cpu_init,
.exit = speedstep_cpu_exit,
.get = speedstep_get,
- .owner = THIS_MODULE,
.attr = speedstep_attr,
};
diff --git a/drivers/cpufreq/speedstep-smi.c b/drivers/cpufreq/speedstep-smi.c
index f5a6b70ee6c..abfba4f731e 100644
--- a/drivers/cpufreq/speedstep-smi.c
+++ b/drivers/cpufreq/speedstep-smi.c
@@ -375,7 +375,6 @@ static struct cpufreq_driver speedstep_driver = {
.exit = speedstep_cpu_exit,
.get = speedstep_get,
.resume = speedstep_resume,
- .owner = THIS_MODULE,
.attr = speedstep_attr,
};
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index cd66b85d927..a7b876fdc1d 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -255,7 +255,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = {
static int __init tegra_cpufreq_init(void)
{
- cpu_clk = clk_get_sys(NULL, "cpu");
+ cpu_clk = clk_get_sys(NULL, "cclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
@@ -263,7 +263,7 @@ static int __init tegra_cpufreq_init(void)
if (IS_ERR(pll_x_clk))
return PTR_ERR(pll_x_clk);
- pll_p_clk = clk_get_sys(NULL, "pll_p_cclk");
+ pll_p_clk = clk_get_sys(NULL, "pll_p");
if (IS_ERR(pll_p_clk))
return PTR_ERR(pll_p_clk);
diff --git a/drivers/cpufreq/unicore2-cpufreq.c b/drivers/cpufreq/unicore2-cpufreq.c
index 12fc904d7da..b225f04d8ae 100644
--- a/drivers/cpufreq/unicore2-cpufreq.c
+++ b/drivers/cpufreq/unicore2-cpufreq.c
@@ -24,7 +24,7 @@ static struct cpufreq_driver ucv2_driver;
/* make sure that only the "userspace" governor is run
* -- anything else wouldn't make sense on this platform, anyway.
*/
-int ucv2_verify_speed(struct cpufreq_policy *policy)
+static int ucv2_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu)
return -EINVAL;
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 0e2cd5cab4d..b3fb81d7cf0 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,5 +1,6 @@
+menu "CPU Idle"
-menuconfig CPU_IDLE
+config CPU_IDLE
bool "CPU idle PM support"
default y if ACPI || PPC_PSERIES
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
@@ -29,20 +30,13 @@ config CPU_IDLE_GOV_MENU
bool "Menu governor (for tickless system)"
default y
-config CPU_IDLE_CALXEDA
- bool "CPU Idle Driver for Calxeda processors"
- depends on ARCH_HIGHBANK
- select ARM_CPU_SUSPEND
- help
- Select this to enable cpuidle on Calxeda processors.
-
-config CPU_IDLE_ZYNQ
- bool "CPU Idle Driver for Xilinx Zynq processors"
- depends on ARCH_ZYNQ
- help
- Select this to enable cpuidle on Xilinx Zynq processors.
+menu "ARM CPU Idle Drivers"
+depends on ARM
+source "drivers/cpuidle/Kconfig.arm"
+endmenu
endif
config ARCH_NEEDS_CPU_IDLE_COUPLED
def_bool n
+endmenu
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
new file mode 100644
index 00000000000..b3302193c15
--- /dev/null
+++ b/drivers/cpuidle/Kconfig.arm
@@ -0,0 +1,29 @@
+#
+# ARM CPU Idle drivers
+#
+
+config ARM_HIGHBANK_CPUIDLE
+ bool "CPU Idle Driver for Calxeda processors"
+ depends on ARCH_HIGHBANK
+ select ARM_CPU_SUSPEND
+ help
+ Select this to enable cpuidle on Calxeda processors.
+
+config ARM_KIRKWOOD_CPUIDLE
+ bool "CPU Idle Driver for Marvell Kirkwood SoCs"
+ depends on ARCH_KIRKWOOD
+ help
+ This adds the CPU Idle driver for Marvell Kirkwood SoCs.
+
+config ARM_ZYNQ_CPUIDLE
+ bool "CPU Idle Driver for Xilinx Zynq processors"
+ depends on ARCH_ZYNQ
+ help
+ Select this to enable cpuidle on Xilinx Zynq processors.
+
+config ARM_U8500_CPUIDLE
+ bool "Cpu Idle Driver for the ST-E u8500 processors"
+ depends on ARCH_U8500
+ help
+ Select this to enable cpuidle for ST-E u8500 processors
+
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile
index 8767a7b3eb9..0b9d200c7e4 100644
--- a/drivers/cpuidle/Makefile
+++ b/drivers/cpuidle/Makefile
@@ -5,6 +5,9 @@
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
-obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
-obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
-obj-$(CONFIG_CPU_IDLE_ZYNQ) += cpuidle-zynq.o
+##################################################################################
+# ARM SoC drivers
+obj-$(CONFIG_ARM_HIGHBANK_CPUIDLE) += cpuidle-calxeda.o
+obj-$(CONFIG_ARM_KIRKWOOD_CPUIDLE) += cpuidle-kirkwood.o
+obj-$(CONFIG_ARM_ZYNQ_CPUIDLE) += cpuidle-zynq.o
+obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 2a297f86dba..f8a86364c6b 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -106,6 +106,7 @@ struct cpuidle_coupled {
cpumask_t coupled_cpus;
int requested_state[NR_CPUS];
atomic_t ready_waiting_counts;
+ atomic_t abort_barrier;
int online_count;
int refcnt;
int prevent;
@@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock);
static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
/*
- * The cpuidle_coupled_poked_mask mask is used to avoid calling
+ * The cpuidle_coupled_poke_pending mask is used to avoid calling
* __smp_call_function_single with the per cpu call_single_data struct already
* in use. This prevents a deadlock where two cpus are waiting for each others
* call_single_data struct to be available
*/
-static cpumask_t cpuidle_coupled_poked_mask;
+static cpumask_t cpuidle_coupled_poke_pending;
+
+/*
+ * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
+ * once to minimize entering the ready loop with a poke pending, which would
+ * require aborting and retrying.
+ */
+static cpumask_t cpuidle_coupled_poked;
/**
* cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
@@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
return state;
}
-static void cpuidle_coupled_poked(void *info)
+static void cpuidle_coupled_handle_poke(void *info)
{
int cpu = (unsigned long)info;
- cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
+ cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
+ cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
}
/**
@@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu)
{
struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
- if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
+ if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
__smp_call_function_single(cpu, csd, 0);
}
@@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu,
* @coupled: the struct coupled that contains the current cpu
* @next_state: the index in drv->states of the requested state for this cpu
*
- * Updates the requested idle state for the specified cpuidle device,
- * poking all coupled cpus out of idle if necessary to let them see the new
- * state.
+ * Updates the requested idle state for the specified cpuidle device.
+ * Returns the number of waiting cpus.
*/
-static void cpuidle_coupled_set_waiting(int cpu,
+static int cpuidle_coupled_set_waiting(int cpu,
struct cpuidle_coupled *coupled, int next_state)
{
- int w;
-
coupled->requested_state[cpu] = next_state;
/*
- * If this is the last cpu to enter the waiting state, poke
- * all the other cpus out of their waiting state so they can
- * enter a deeper state. This can race with one of the cpus
- * exiting the waiting state due to an interrupt and
- * decrementing waiting_count, see comment below.
- *
* The atomic_inc_return provides a write barrier to order the write
* to requested_state with the later write that increments ready_count.
*/
- w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
- if (w == coupled->online_count)
- cpuidle_coupled_poke_others(cpu, coupled);
+ return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
}
/**
@@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
* been processed and the poke bit has been cleared.
*
* Other interrupts may also be processed while interrupts are enabled, so
- * need_resched() must be tested after turning interrupts off again to make sure
+ * need_resched() must be tested after this function returns to make sure
* the interrupt didn't schedule work that should take the cpu out of idle.
*
- * Returns 0 if need_resched was false, -EINTR if need_resched was true.
+ * Returns 0 if no poke was pending, 1 if a poke was cleared.
*/
static int cpuidle_coupled_clear_pokes(int cpu)
{
+ if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
+ return 0;
+
local_irq_enable();
- while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
+ while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
cpu_relax();
local_irq_disable();
- return need_resched() ? -EINTR : 0;
+ return 1;
+}
+
+static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
+{
+ cpumask_t cpus;
+ int ret;
+
+ cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
+ ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
+
+ return ret;
}
/**
@@ -449,31 +461,56 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
{
int entered_state = -1;
struct cpuidle_coupled *coupled = dev->coupled;
+ int w;
if (!coupled)
return -EINVAL;
while (coupled->prevent) {
- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_clear_pokes(dev->cpu);
+ if (need_resched()) {
local_irq_enable();
return entered_state;
}
entered_state = cpuidle_enter_state(dev, drv,
dev->safe_state_index);
+ local_irq_disable();
}
/* Read barrier ensures online_count is read after prevent is cleared */
smp_rmb();
- cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+reset:
+ cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
+
+ w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
+ /*
+ * If this is the last cpu to enter the waiting state, poke
+ * all the other cpus out of their waiting state so they can
+ * enter a deeper state. This can race with one of the cpus
+ * exiting the waiting state due to an interrupt and
+ * decrementing waiting_count, see comment below.
+ */
+ if (w == coupled->online_count) {
+ cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
+ cpuidle_coupled_poke_others(dev->cpu, coupled);
+ }
retry:
/*
* Wait for all coupled cpus to be idle, using the deepest state
- * allowed for a single cpu.
+ * allowed for a single cpu. If this was not the poking cpu, wait
+ * for at least one poke before leaving to avoid a race where
+ * two cpus could arrive at the waiting loop at the same time,
+ * but the first of the two to arrive could skip the loop without
+ * processing the pokes from the last to arrive.
*/
- while (!cpuidle_coupled_cpus_waiting(coupled)) {
- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ while (!cpuidle_coupled_cpus_waiting(coupled) ||
+ !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
+ if (cpuidle_coupled_clear_pokes(dev->cpu))
+ continue;
+
+ if (need_resched()) {
cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
goto out;
}
@@ -485,14 +522,22 @@ retry:
entered_state = cpuidle_enter_state(dev, drv,
dev->safe_state_index);
+ local_irq_disable();
}
- if (cpuidle_coupled_clear_pokes(dev->cpu)) {
+ cpuidle_coupled_clear_pokes(dev->cpu);
+ if (need_resched()) {
cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
goto out;
}
/*
+ * Make sure final poke status for this cpu is visible before setting
+ * cpu as ready.
+ */
+ smp_wmb();
+
+ /*
* All coupled cpus are probably idle. There is a small chance that
* one of the other cpus just became active. Increment the ready count,
* and spin until all coupled cpus have incremented the counter. Once a
@@ -511,6 +556,28 @@ retry:
cpu_relax();
}
+ /*
+ * Make sure read of all cpus ready is done before reading pending pokes
+ */
+ smp_rmb();
+
+ /*
+ * There is a small chance that a cpu left and reentered idle after this
+ * cpu saw that all cpus were waiting. The cpu that reentered idle will
+ * have sent this cpu a poke, which will still be pending after the
+ * ready loop. The pending interrupt may be lost by the interrupt
+ * controller when entering the deep idle state. It's not possible to
+ * clear a pending interrupt without turning interrupts on and handling
+ * it, and it's too late to turn on interrupts here, so reset the
+ * coupled idle state of all cpus and retry.
+ */
+ if (cpuidle_coupled_any_pokes_pending(coupled)) {
+ cpuidle_coupled_set_done(dev->cpu, coupled);
+ /* Wait for all cpus to see the pending pokes */
+ cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
+ goto reset;
+ }
+
/* all cpus have acked the coupled state */
next_state = cpuidle_coupled_get_state(dev, coupled);
@@ -596,7 +663,7 @@ have_coupled:
coupled->refcnt++;
csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
- csd->func = cpuidle_coupled_poked;
+ csd->func = cpuidle_coupled_handle_poke;
csd->info = (void *)(unsigned long)dev->cpu;
return 0;
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 0e6e408c0a6..34605847957 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -35,7 +35,7 @@
#include <asm/cp15.h>
extern void highbank_set_cpu_jump(int cpu, void *jump_addr);
-extern void *scu_base_addr;
+extern void __iomem *scu_base_addr;
static noinline void calxeda_idle_restore(void)
{
diff --git a/drivers/cpuidle/cpuidle-kirkwood.c b/drivers/cpuidle/cpuidle-kirkwood.c
index 521b0a7fdd8..41ba843251b 100644
--- a/drivers/cpuidle/cpuidle-kirkwood.c
+++ b/drivers/cpuidle/cpuidle-kirkwood.c
@@ -60,9 +60,6 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -EINVAL;
-
ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ddr_operation_base))
return PTR_ERR(ddr_operation_base);
@@ -70,7 +67,7 @@ static int kirkwood_cpuidle_probe(struct platform_device *pdev)
return cpuidle_register(&kirkwood_idle_driver, NULL);
}
-int kirkwood_cpuidle_remove(struct platform_device *pdev)
+static int kirkwood_cpuidle_remove(struct platform_device *pdev)
{
cpuidle_unregister(&kirkwood_idle_driver);
return 0;
diff --git a/arch/arm/mach-ux500/cpuidle.c b/drivers/cpuidle/cpuidle-ux500.c
index a45dd09daed..e0564652af3 100644
--- a/arch/arm/mach-ux500/cpuidle.c
+++ b/drivers/cpuidle/cpuidle-ux500.c
@@ -16,13 +16,11 @@
#include <linux/smp.h>
#include <linux/mfd/dbx500-prcmu.h>
#include <linux/platform_data/arm-ux500-pm.h>
+#include <linux/platform_device.h>
#include <asm/cpuidle.h>
#include <asm/proc-fns.h>
-#include "db8500-regs.h"
-#include "id.h"
-
static atomic_t master = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(master_lock);
@@ -113,11 +111,8 @@ static struct cpuidle_driver ux500_idle_driver = {
.state_count = 2,
};
-int __init ux500_idle_init(void)
+static int __init dbx500_cpuidle_probe(struct platform_device *pdev)
{
- if (!(cpu_is_u8500_family() || cpu_is_ux540_family()))
- return -ENODEV;
-
/* Configure wake up reasons */
prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
PRCMU_WAKEUP(ABB));
@@ -125,4 +120,12 @@ int __init ux500_idle_init(void)
return cpuidle_register(&ux500_idle_driver, NULL);
}
-device_initcall(ux500_idle_init);
+static struct platform_driver dbx500_cpuidle_plat_driver = {
+ .driver = {
+ .name = "cpuidle-dbx500",
+ .owner = THIS_MODULE,
+ },
+ .probe = dbx500_cpuidle_probe,
+};
+
+module_platform_driver(dbx500_cpuidle_plat_driver);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index fdc432f1802..d75040ddd2b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -42,8 +42,6 @@ void disable_cpuidle(void)
off = 1;
}
-static int __cpuidle_register_device(struct cpuidle_device *dev);
-
/**
* cpuidle_play_dead - cpu off-lining
*
@@ -278,7 +276,7 @@ static void poll_idle_init(struct cpuidle_driver *drv) {}
*/
int cpuidle_enable_device(struct cpuidle_device *dev)
{
- int ret, i;
+ int ret;
struct cpuidle_driver *drv;
if (!dev)
@@ -292,15 +290,12 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
if (!drv || !cpuidle_curr_governor)
return -EIO;
+ if (!dev->registered)
+ return -EINVAL;
+
if (!dev->state_count)
dev->state_count = drv->state_count;
- if (dev->registered == 0) {
- ret = __cpuidle_register_device(dev);
- if (ret)
- return ret;
- }
-
poll_idle_init(drv);
ret = cpuidle_add_device_sysfs(dev);
@@ -311,12 +306,6 @@ int cpuidle_enable_device(struct cpuidle_device *dev)
(ret = cpuidle_curr_governor->enable(drv, dev)))
goto fail_sysfs;
- for (i = 0; i < dev->state_count; i++) {
- dev->states_usage[i].usage = 0;
- dev->states_usage[i].time = 0;
- }
- dev->last_residency = 0;
-
smp_wmb();
dev->enabled = 1;
@@ -360,6 +349,23 @@ void cpuidle_disable_device(struct cpuidle_device *dev)
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
+static void __cpuidle_unregister_device(struct cpuidle_device *dev)
+{
+ struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
+
+ list_del(&dev->device_list);
+ per_cpu(cpuidle_devices, dev->cpu) = NULL;
+ module_put(drv->owner);
+}
+
+static int __cpuidle_device_init(struct cpuidle_device *dev)
+{
+ memset(dev->states_usage, 0, sizeof(dev->states_usage));
+ dev->last_residency = 0;
+
+ return 0;
+}
+
/**
* __cpuidle_register_device - internal register function called before register
* and enable routines
@@ -377,24 +383,15 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- ret = cpuidle_add_sysfs(dev);
- if (ret)
- goto err_sysfs;
ret = cpuidle_coupled_register_device(dev);
- if (ret)
- goto err_coupled;
+ if (ret) {
+ __cpuidle_unregister_device(dev);
+ return ret;
+ }
dev->registered = 1;
return 0;
-
-err_coupled:
- cpuidle_remove_sysfs(dev);
-err_sysfs:
- list_del(&dev->device_list);
- per_cpu(cpuidle_devices, dev->cpu) = NULL;
- module_put(drv->owner);
- return ret;
}
/**
@@ -403,25 +400,44 @@ err_sysfs:
*/
int cpuidle_register_device(struct cpuidle_device *dev)
{
- int ret;
+ int ret = -EBUSY;
if (!dev)
return -EINVAL;
mutex_lock(&cpuidle_lock);
- if ((ret = __cpuidle_register_device(dev))) {
- mutex_unlock(&cpuidle_lock);
- return ret;
- }
+ if (dev->registered)
+ goto out_unlock;
+
+ ret = __cpuidle_device_init(dev);
+ if (ret)
+ goto out_unlock;
+
+ ret = __cpuidle_register_device(dev);
+ if (ret)
+ goto out_unlock;
+
+ ret = cpuidle_add_sysfs(dev);
+ if (ret)
+ goto out_unregister;
+
+ ret = cpuidle_enable_device(dev);
+ if (ret)
+ goto out_sysfs;
- cpuidle_enable_device(dev);
cpuidle_install_idle_handler();
+out_unlock:
mutex_unlock(&cpuidle_lock);
- return 0;
+ return ret;
+out_sysfs:
+ cpuidle_remove_sysfs(dev);
+out_unregister:
+ __cpuidle_unregister_device(dev);
+ goto out_unlock;
}
EXPORT_SYMBOL_GPL(cpuidle_register_device);
@@ -432,8 +448,6 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
-
if (dev->registered == 0)
return;
@@ -442,14 +456,12 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
cpuidle_remove_sysfs(dev);
- list_del(&dev->device_list);
- per_cpu(cpuidle_devices, dev->cpu) = NULL;
+
+ __cpuidle_unregister_device(dev);
cpuidle_coupled_unregister_device(dev);
cpuidle_resume_and_unlock();
-
- module_put(drv->owner);
}
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 9b784051ec1..9f08e8cce1a 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -192,14 +192,4 @@ static int __init init_ladder(void)
return cpuidle_register_governor(&ladder_governor);
}
-/**
- * exit_ladder - exits the governor
- */
-static void __exit exit_ladder(void)
-{
- cpuidle_unregister_governor(&ladder_governor);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_ladder);
-module_exit(exit_ladder);
+postcore_initcall(init_ladder);
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index fe343a06b7d..cf7f2f0e4ef 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -21,6 +21,15 @@
#include <linux/math64.h>
#include <linux/module.h>
+/*
+ * Please note when changing the tuning values:
+ * If (MAX_INTERESTING-1) * RESOLUTION > UINT_MAX, the result of
+ * a scaling operation multiplication may overflow on 32 bit platforms.
+ * In that case, #define RESOLUTION as ULL to get 64 bit result:
+ * #define RESOLUTION 1024ULL
+ *
+ * The default values do not overflow.
+ */
#define BUCKETS 12
#define INTERVALS 8
#define RESOLUTION 1024
@@ -28,13 +37,6 @@
#define MAX_INTERESTING 50000
#define STDDEV_THRESH 400
-/* 60 * 60 > STDDEV_THRESH * INTERVALS = 400 * 8 */
-#define MAX_DEVIATION 60
-
-static DEFINE_PER_CPU(struct hrtimer, menu_hrtimer);
-static DEFINE_PER_CPU(int, hrtimer_status);
-/* menu hrtimer mode */
-enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
/*
* Concepts and ideas behind the menu governor
@@ -116,23 +118,16 @@ enum {MENU_HRTIMER_STOP, MENU_HRTIMER_REPEAT, MENU_HRTIMER_GENERAL};
*
*/
-/*
- * The C-state residency is so long that is is worthwhile to exit
- * from the shallow C-state and re-enter into a deeper C-state.
- */
-static unsigned int perfect_cstate_ms __read_mostly = 30;
-module_param(perfect_cstate_ms, uint, 0000);
-
struct menu_device {
int last_state_idx;
int needs_update;
unsigned int expected_us;
- u64 predicted_us;
+ unsigned int predicted_us;
unsigned int exit_us;
unsigned int bucket;
- u64 correction_factor[BUCKETS];
- u32 intervals[INTERVALS];
+ unsigned int correction_factor[BUCKETS];
+ unsigned int intervals[INTERVALS];
int interval_ptr;
};
@@ -205,59 +200,28 @@ static u64 div_round64(u64 dividend, u32 divisor)
return div_u64(dividend + (divisor / 2), divisor);
}
-/* Cancel the hrtimer if it is not triggered yet */
-void menu_hrtimer_cancel(void)
-{
- int cpu = smp_processor_id();
- struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
-
- /* The timer is still not time out*/
- if (per_cpu(hrtimer_status, cpu)) {
- hrtimer_cancel(hrtmr);
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
- }
-}
-EXPORT_SYMBOL_GPL(menu_hrtimer_cancel);
-
-/* Call back for hrtimer is triggered */
-static enum hrtimer_restart menu_hrtimer_notify(struct hrtimer *hrtimer)
-{
- int cpu = smp_processor_id();
- struct menu_device *data = &per_cpu(menu_devices, cpu);
-
- /* In general case, the expected residency is much larger than
- * deepest C-state target residency, but prediction logic still
- * predicts a small predicted residency, so the prediction
- * history is totally broken if the timer is triggered.
- * So reset the correction factor.
- */
- if (per_cpu(hrtimer_status, cpu) == MENU_HRTIMER_GENERAL)
- data->correction_factor[data->bucket] = RESOLUTION * DECAY;
-
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_STOP;
-
- return HRTIMER_NORESTART;
-}
-
/*
* Try detecting repeating patterns by keeping track of the last 8
* intervals, and checking if the standard deviation of that set
* of points is below a threshold. If it is... then use the
* average of these 8 points as the estimated value.
*/
-static u32 get_typical_interval(struct menu_device *data)
+static void get_typical_interval(struct menu_device *data)
{
- int i = 0, divisor = 0;
- uint64_t max = 0, avg = 0, stddev = 0;
- int64_t thresh = LLONG_MAX; /* Discard outliers above this value. */
- unsigned int ret = 0;
+ int i, divisor;
+ unsigned int max, thresh;
+ uint64_t avg, stddev;
+
+ thresh = UINT_MAX; /* Discard outliers above this value */
again:
- /* first calculate average and standard deviation of the past */
- max = avg = divisor = stddev = 0;
+ /* First calculate the average of past intervals */
+ max = 0;
+ avg = 0;
+ divisor = 0;
for (i = 0; i < INTERVALS; i++) {
- int64_t value = data->intervals[i];
+ unsigned int value = data->intervals[i];
if (value <= thresh) {
avg += value;
divisor++;
@@ -267,15 +231,38 @@ again:
}
do_div(avg, divisor);
+ /* Then try to determine standard deviation */
+ stddev = 0;
for (i = 0; i < INTERVALS; i++) {
- int64_t value = data->intervals[i];
+ unsigned int value = data->intervals[i];
if (value <= thresh) {
int64_t diff = value - avg;
stddev += diff * diff;
}
}
do_div(stddev, divisor);
- stddev = int_sqrt(stddev);
+ /*
+ * The typical interval is obtained when standard deviation is small
+ * or standard deviation is small compared to the average interval.
+ *
+ * int_sqrt() formal parameter type is unsigned long. When the
+ * greatest difference to an outlier exceeds ~65 ms * sqrt(divisor)
+ * the resulting squared standard deviation exceeds the input domain
+ * of int_sqrt on platforms where unsigned long is 32 bits in size.
+ * In such case reject the candidate average.
+ *
+ * Use this result only if there is no timer to wake us up sooner.
+ */
+ if (likely(stddev <= ULONG_MAX)) {
+ stddev = int_sqrt(stddev);
+ if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
+ || stddev <= 20) {
+ if (data->expected_us > avg)
+ data->predicted_us = avg;
+ return;
+ }
+ }
+
/*
* If we have outliers to the upside in our distribution, discard
* those by setting the threshold to exclude these outliers, then
@@ -284,23 +271,12 @@ again:
*
* This can deal with workloads that have long pauses interspersed
* with sporadic activity with a bunch of short pauses.
- *
- * The typical interval is obtained when standard deviation is small
- * or standard deviation is small compared to the average interval.
*/
- if (((avg > stddev * 6) && (divisor * 4 >= INTERVALS * 3))
- || stddev <= 20) {
- data->predicted_us = avg;
- ret = 1;
- return ret;
-
- } else if ((divisor * 4) > INTERVALS * 3) {
- /* Exclude the max interval */
- thresh = max - 1;
- goto again;
- }
+ if ((divisor * 4) <= INTERVALS * 3)
+ return;
- return ret;
+ thresh = max - 1;
+ goto again;
}
/**
@@ -315,9 +291,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
int i;
int multiplier;
struct timespec t;
- int repeat = 0, low_predicted = 0;
- int cpu = smp_processor_id();
- struct hrtimer *hrtmr = &per_cpu(menu_hrtimer, cpu);
if (data->needs_update) {
menu_update(drv, dev);
@@ -348,11 +321,16 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (data->correction_factor[data->bucket] == 0)
data->correction_factor[data->bucket] = RESOLUTION * DECAY;
- /* Make sure to round up for half microseconds */
- data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket],
+ /*
+ * Force the result of multiplication to be 64 bits even if both
+ * operands are 32 bits.
+ * Make sure to round up for half microseconds.
+ */
+ data->predicted_us = div_round64((uint64_t)data->expected_us *
+ data->correction_factor[data->bucket],
RESOLUTION * DECAY);
- repeat = get_typical_interval(data);
+ get_typical_interval(data);
/*
* We want to default to C1 (hlt), not to busy polling
@@ -373,10 +351,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
if (s->disabled || su->disable)
continue;
- if (s->target_residency > data->predicted_us) {
- low_predicted = 1;
+ if (s->target_residency > data->predicted_us)
continue;
- }
if (s->exit_latency > latency_req)
continue;
if (s->exit_latency * multiplier > data->predicted_us)
@@ -386,44 +362,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
data->exit_us = s->exit_latency;
}
- /* not deepest C-state chosen for low predicted residency */
- if (low_predicted) {
- unsigned int timer_us = 0;
- unsigned int perfect_us = 0;
-
- /*
- * Set a timer to detect whether this sleep is much
- * longer than repeat mode predicted. If the timer
- * triggers, the code will evaluate whether to put
- * the CPU into a deeper C-state.
- * The timer is cancelled on CPU wakeup.
- */
- timer_us = 2 * (data->predicted_us + MAX_DEVIATION);
-
- perfect_us = perfect_cstate_ms * 1000;
-
- if (repeat && (4 * timer_us < data->expected_us)) {
- RCU_NONIDLE(hrtimer_start(hrtmr,
- ns_to_ktime(1000 * timer_us),
- HRTIMER_MODE_REL_PINNED));
- /* In repeat case, menu hrtimer is started */
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_REPEAT;
- } else if (perfect_us < data->expected_us) {
- /*
- * The next timer is long. This could be because
- * we did not make a useful prediction.
- * In that case, it makes sense to re-enter
- * into a deeper C-state after some time.
- */
- RCU_NONIDLE(hrtimer_start(hrtmr,
- ns_to_ktime(1000 * timer_us),
- HRTIMER_MODE_REL_PINNED));
- /* In general case, menu hrtimer is started */
- per_cpu(hrtimer_status, cpu) = MENU_HRTIMER_GENERAL;
- }
-
- }
-
return data->last_state_idx;
}
@@ -455,7 +393,7 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
unsigned int last_idle_us = cpuidle_get_last_residency(dev);
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
- u64 new_factor;
+ unsigned int new_factor;
/*
* Ugh, this idle state doesn't support residency measurements, so we
@@ -476,10 +414,9 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
measured_us -= data->exit_us;
- /* update our correction ratio */
-
- new_factor = data->correction_factor[data->bucket]
- * (DECAY - 1) / DECAY;
+ /* Update our correction ratio */
+ new_factor = data->correction_factor[data->bucket];
+ new_factor -= new_factor / DECAY;
if (data->expected_us > 0 && measured_us < MAX_INTERESTING)
new_factor += RESOLUTION * measured_us / data->expected_us;
@@ -492,9 +429,11 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
/*
* We don't want 0 as factor; we always want at least
- * a tiny bit of estimated time.
+ * a tiny bit of estimated time. Fortunately, due to rounding,
+ * new_factor will stay nonzero regardless of measured_us values
+ * and the compiler can eliminate this test as long as DECAY > 1.
*/
- if (new_factor == 0)
+ if (DECAY == 1 && unlikely(new_factor == 0))
new_factor = 1;
data->correction_factor[data->bucket] = new_factor;
@@ -514,9 +453,6 @@ static int menu_enable_device(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
- struct hrtimer *t = &per_cpu(menu_hrtimer, dev->cpu);
- hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- t->function = menu_hrtimer_notify;
memset(data, 0, sizeof(struct menu_device));
@@ -540,14 +476,4 @@ static int __init init_menu(void)
return cpuidle_register_governor(&menu_governor);
}
-/**
- * exit_menu - exits the governor
- */
-static void __exit exit_menu(void)
-{
- cpuidle_unregister_governor(&menu_governor);
-}
-
-MODULE_LICENSE("GPL");
-module_init(init_menu);
-module_exit(exit_menu);
+postcore_initcall(init_menu);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 428754af623..8739cc05228 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -11,8 +11,10 @@
#include <linux/sysfs.h>
#include <linux/slab.h>
#include <linux/cpu.h>
+#include <linux/completion.h>
#include <linux/capability.h>
#include <linux/device.h>
+#include <linux/kobject.h>
#include "cpuidle.h"
@@ -33,7 +35,8 @@ static ssize_t show_available_governors(struct device *dev,
mutex_lock(&cpuidle_lock);
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
- if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2))
+ if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) -
+ CPUIDLE_NAME_LEN - 2))
goto out;
i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
}
@@ -166,13 +169,28 @@ struct cpuidle_attr {
#define define_one_rw(_name, show, store) \
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
-#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj)
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
-static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf)
+
+struct cpuidle_device_kobj {
+ struct cpuidle_device *dev;
+ struct completion kobj_unregister;
+ struct kobject kobj;
+};
+
+static inline struct cpuidle_device *to_cpuidle_device(struct kobject *kobj)
+{
+ struct cpuidle_device_kobj *kdev =
+ container_of(kobj, struct cpuidle_device_kobj, kobj);
+
+ return kdev->dev;
+}
+
+static ssize_t cpuidle_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
int ret = -EIO;
- struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
- struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
+ struct cpuidle_device *dev = to_cpuidle_device(kobj);
+ struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
if (cattr->show) {
mutex_lock(&cpuidle_lock);
@@ -182,12 +200,12 @@ static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char
return ret;
}
-static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
- const char * buf, size_t count)
+static ssize_t cpuidle_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
{
int ret = -EIO;
- struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
- struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
+ struct cpuidle_device *dev = to_cpuidle_device(kobj);
+ struct cpuidle_attr *cattr = attr_to_cpuidleattr(attr);
if (cattr->store) {
mutex_lock(&cpuidle_lock);
@@ -204,9 +222,10 @@ static const struct sysfs_ops cpuidle_sysfs_ops = {
static void cpuidle_sysfs_release(struct kobject *kobj)
{
- struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
+ struct cpuidle_device_kobj *kdev =
+ container_of(kobj, struct cpuidle_device_kobj, kobj);
- complete(&dev->kobj_unregister);
+ complete(&kdev->kobj_unregister);
}
static struct kobj_type ktype_cpuidle = {
@@ -237,8 +256,8 @@ static ssize_t show_state_##_name(struct cpuidle_state *state, \
#define define_store_state_ull_function(_name) \
static ssize_t store_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, \
- const char *buf, size_t size) \
+ struct cpuidle_state_usage *state_usage, \
+ const char *buf, size_t size) \
{ \
unsigned long long value; \
int err; \
@@ -256,14 +275,16 @@ static ssize_t store_state_##_name(struct cpuidle_state *state, \
#define define_show_state_ull_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, char *buf) \
+ struct cpuidle_state_usage *state_usage, \
+ char *buf) \
{ \
return sprintf(buf, "%llu\n", state_usage->_name);\
}
#define define_show_state_str_function(_name) \
static ssize_t show_state_##_name(struct cpuidle_state *state, \
- struct cpuidle_state_usage *state_usage, char *buf) \
+ struct cpuidle_state_usage *state_usage, \
+ char *buf) \
{ \
if (state->_name[0] == '\0')\
return sprintf(buf, "<null>\n");\
@@ -309,8 +330,9 @@ struct cpuidle_state_kobj {
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
#define kobj_to_state_usage(k) (kobj_to_state_obj(k)->state_usage)
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
-static ssize_t cpuidle_state_show(struct kobject * kobj,
- struct attribute * attr ,char * buf)
+
+static ssize_t cpuidle_state_show(struct kobject *kobj, struct attribute *attr,
+ char * buf)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
@@ -323,8 +345,8 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
return ret;
}
-static ssize_t cpuidle_state_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t size)
+static ssize_t cpuidle_state_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t size)
{
int ret = -EIO;
struct cpuidle_state *state = kobj_to_state(kobj);
@@ -371,6 +393,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
{
int i, ret = -ENOMEM;
struct cpuidle_state_kobj *kobj;
+ struct cpuidle_device_kobj *kdev = device->kobj_dev;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(device);
/* state statistics */
@@ -383,7 +406,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
init_completion(&kobj->kobj_unregister);
ret = kobject_init_and_add(&kobj->kobj, &ktype_state_cpuidle,
- &device->kobj, "state%d", i);
+ &kdev->kobj, "state%d", i);
if (ret) {
kfree(kobj);
goto error_state;
@@ -449,8 +472,8 @@ static void cpuidle_driver_sysfs_release(struct kobject *kobj)
complete(&driver_kobj->kobj_unregister);
}
-static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute * attr,
- char * buf)
+static ssize_t cpuidle_driver_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
{
int ret = -EIO;
struct cpuidle_driver_kobj *driver_kobj = kobj_to_driver_kobj(kobj);
@@ -500,6 +523,7 @@ static struct kobj_type ktype_driver_cpuidle = {
static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
{
struct cpuidle_driver_kobj *kdrv;
+ struct cpuidle_device_kobj *kdev = dev->kobj_dev;
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int ret;
@@ -511,7 +535,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
init_completion(&kdrv->kobj_unregister);
ret = kobject_init_and_add(&kdrv->kobj, &ktype_driver_cpuidle,
- &dev->kobj, "driver");
+ &kdev->kobj, "driver");
if (ret) {
kfree(kdrv);
return ret;
@@ -580,16 +604,28 @@ void cpuidle_remove_device_sysfs(struct cpuidle_device *device)
*/
int cpuidle_add_sysfs(struct cpuidle_device *dev)
{
+ struct cpuidle_device_kobj *kdev;
struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
int error;
- init_completion(&dev->kobj_unregister);
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+ kdev->dev = dev;
+ dev->kobj_dev = kdev;
- error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
- "cpuidle");
- if (!error)
- kobject_uevent(&dev->kobj, KOBJ_ADD);
- return error;
+ init_completion(&kdev->kobj_unregister);
+
+ error = kobject_init_and_add(&kdev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
+ "cpuidle");
+ if (error) {
+ kfree(kdev);
+ return error;
+ }
+
+ kobject_uevent(&kdev->kobj, KOBJ_ADD);
+
+ return 0;
}
/**
@@ -598,6 +634,9 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
*/
void cpuidle_remove_sysfs(struct cpuidle_device *dev)
{
- kobject_put(&dev->kobj);
- wait_for_completion(&dev->kobj_unregister);
+ struct cpuidle_device_kobj *kdev = dev->kobj_dev;
+
+ kobject_put(&kdev->kobj);
+ wait_for_completion(&kdev->kobj_unregister);
+ kfree(kdev);
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index e94e619fe05..c99c00d35d3 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -703,7 +703,7 @@ err_out:
}
EXPORT_SYMBOL(devfreq_remove_governor);
-static ssize_t show_governor(struct device *dev,
+static ssize_t governor_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
if (!to_devfreq(dev)->governor)
@@ -712,7 +712,7 @@ static ssize_t show_governor(struct device *dev,
return sprintf(buf, "%s\n", to_devfreq(dev)->governor->name);
}
-static ssize_t store_governor(struct device *dev, struct device_attribute *attr,
+static ssize_t governor_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
@@ -754,9 +754,11 @@ out:
ret = count;
return ret;
}
-static ssize_t show_available_governors(struct device *d,
- struct device_attribute *attr,
- char *buf)
+static DEVICE_ATTR_RW(governor);
+
+static ssize_t available_governors_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct devfreq_governor *tmp_governor;
ssize_t count = 0;
@@ -775,9 +777,10 @@ static ssize_t show_available_governors(struct device *d,
return count;
}
+static DEVICE_ATTR_RO(available_governors);
-static ssize_t show_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t cur_freq_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
unsigned long freq;
struct devfreq *devfreq = to_devfreq(dev);
@@ -788,20 +791,22 @@ static ssize_t show_freq(struct device *dev,
return sprintf(buf, "%lu\n", devfreq->previous_freq);
}
+static DEVICE_ATTR_RO(cur_freq);
-static ssize_t show_target_freq(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t target_freq_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->previous_freq);
}
+static DEVICE_ATTR_RO(target_freq);
-static ssize_t show_polling_interval(struct device *dev,
+static ssize_t polling_interval_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_devfreq(dev)->profile->polling_ms);
}
-static ssize_t store_polling_interval(struct device *dev,
+static ssize_t polling_interval_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -821,8 +826,9 @@ static ssize_t store_polling_interval(struct device *dev,
return ret;
}
+static DEVICE_ATTR_RW(polling_interval);
-static ssize_t store_min_freq(struct device *dev, struct device_attribute *attr,
+static ssize_t min_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
@@ -849,13 +855,13 @@ unlock:
return ret;
}
-static ssize_t show_min_freq(struct device *dev, struct device_attribute *attr,
+static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->min_freq);
}
-static ssize_t store_max_freq(struct device *dev, struct device_attribute *attr,
+static ssize_t max_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct devfreq *df = to_devfreq(dev);
@@ -881,16 +887,18 @@ unlock:
mutex_unlock(&df->lock);
return ret;
}
+static DEVICE_ATTR_RW(min_freq);
-static ssize_t show_max_freq(struct device *dev, struct device_attribute *attr,
+static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", to_devfreq(dev)->max_freq);
}
+static DEVICE_ATTR_RW(max_freq);
-static ssize_t show_available_freqs(struct device *d,
- struct device_attribute *attr,
- char *buf)
+static ssize_t available_frequencies_show(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct devfreq *df = to_devfreq(d);
struct device *dev = df->dev.parent;
@@ -918,9 +926,10 @@ static ssize_t show_available_freqs(struct device *d,
return count;
}
+static DEVICE_ATTR_RO(available_frequencies);
-static ssize_t show_trans_table(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t trans_stat_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct devfreq *devfreq = to_devfreq(dev);
ssize_t len;
@@ -959,20 +968,21 @@ static ssize_t show_trans_table(struct device *dev, struct device_attribute *att
devfreq->total_trans);
return len;
}
-
-static struct device_attribute devfreq_attrs[] = {
- __ATTR(governor, S_IRUGO | S_IWUSR, show_governor, store_governor),
- __ATTR(available_governors, S_IRUGO, show_available_governors, NULL),
- __ATTR(cur_freq, S_IRUGO, show_freq, NULL),
- __ATTR(available_frequencies, S_IRUGO, show_available_freqs, NULL),
- __ATTR(target_freq, S_IRUGO, show_target_freq, NULL),
- __ATTR(polling_interval, S_IRUGO | S_IWUSR, show_polling_interval,
- store_polling_interval),
- __ATTR(min_freq, S_IRUGO | S_IWUSR, show_min_freq, store_min_freq),
- __ATTR(max_freq, S_IRUGO | S_IWUSR, show_max_freq, store_max_freq),
- __ATTR(trans_stat, S_IRUGO, show_trans_table, NULL),
- { },
+static DEVICE_ATTR_RO(trans_stat);
+
+static struct attribute *devfreq_attrs[] = {
+ &dev_attr_governor.attr,
+ &dev_attr_available_governors.attr,
+ &dev_attr_cur_freq.attr,
+ &dev_attr_available_frequencies.attr,
+ &dev_attr_target_freq.attr,
+ &dev_attr_polling_interval.attr,
+ &dev_attr_min_freq.attr,
+ &dev_attr_max_freq.attr,
+ &dev_attr_trans_stat.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(devfreq);
static int __init devfreq_init(void)
{
@@ -988,7 +998,7 @@ static int __init devfreq_init(void)
pr_err("%s: couldn't create workqueue\n", __FILE__);
return PTR_ERR(devfreq_wq);
}
- devfreq_class->dev_attrs = devfreq_attrs;
+ devfreq_class->dev_groups = devfreq_groups;
return 0;
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 6825957c97f..daa4da281e5 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -194,7 +194,7 @@ config SIRF_DMA
Enable support for the CSR SiRFprimaII DMA engine.
config TI_EDMA
- tristate "TI EDMA support"
+ bool "TI EDMA support"
depends on ARCH_DAVINCI || ARCH_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
@@ -287,6 +287,14 @@ config DMA_OMAP
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
+config TI_CPPI41
+ tristate "AM33xx CPPI41 DMA support"
+ depends on ARCH_OMAP
+ select DMA_ENGINE
+ help
+ The Communications Port Programming Interface (CPPI) 4.1 DMA engine
+ is currently used by the USB driver on AM335x platforms.
+
config MMP_PDMA
bool "MMP PDMA support"
depends on (ARCH_MMP || ARCH_PXA)
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 5e0f2ef8561..6d62ec30c4b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -39,3 +39,4 @@ obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
obj-$(CONFIG_DMA_OMAP) += omap-dma.o
obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o
+obj-$(CONFIG_TI_CPPI41) += cppi41.o
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 9bfaddd57ef..31011d2a26f 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1339,15 +1339,14 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
{
u64 started_channels = debugfs_dma_base->pm.started_channels;
int pool_count = debugfs_dma_base->pool.debugfs_pool_counter;
- int i;
- int ret = 0;
char *dev_buf;
char *tmp;
- int dev_size;
+ int ret;
+ int i;
dev_buf = kmalloc(4*1024, GFP_KERNEL);
if (dev_buf == NULL)
- goto err_kmalloc;
+ return -ENOMEM;
tmp = dev_buf;
tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
@@ -1357,26 +1356,11 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
tmp += sprintf(tmp, "channel %d\n", i);
tmp += sprintf(tmp, "Pool alloc nbr %d\n", pool_count);
- dev_size = tmp - dev_buf;
-
- /* No more to read if offset != 0 */
- if (*f_pos > dev_size)
- goto out;
- if (count > dev_size - *f_pos)
- count = dev_size - *f_pos;
-
- if (copy_to_user(buf, dev_buf + *f_pos, count))
- ret = -EINVAL;
- ret = count;
- *f_pos += count;
-
- out:
+ ret = simple_read_from_buffer(buf, count, f_pos, dev_buf,
+ tmp - dev_buf);
kfree(dev_buf);
return ret;
-
- err_kmalloc:
- return 0;
}
static const struct file_operations coh901318_debugfs_status_operations = {
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
new file mode 100644
index 00000000000..7c82b92f9b1
--- /dev/null
+++ b/drivers/dma/cppi41.c
@@ -0,0 +1,1059 @@
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_dma.h>
+#include <linux/of_irq.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/pm_runtime.h>
+#include "dmaengine.h"
+
+#define DESC_TYPE 27
+#define DESC_TYPE_HOST 0x10
+#define DESC_TYPE_TEARD 0x13
+
+#define TD_DESC_IS_RX (1 << 16)
+#define TD_DESC_DMA_NUM 10
+
+#define DESC_LENGTH_BITS_NUM 21
+
+#define DESC_TYPE_USB (5 << 26)
+#define DESC_PD_COMPLETE (1 << 31)
+
+/* DMA engine */
+#define DMA_TDFDQ 4
+#define DMA_TXGCR(x) (0x800 + (x) * 0x20)
+#define DMA_RXGCR(x) (0x808 + (x) * 0x20)
+#define RXHPCRA0 4
+
+#define GCR_CHAN_ENABLE (1 << 31)
+#define GCR_TEARDOWN (1 << 30)
+#define GCR_STARV_RETRY (1 << 24)
+#define GCR_DESC_TYPE_HOST (1 << 14)
+
+/* DMA scheduler */
+#define DMA_SCHED_CTRL 0
+#define DMA_SCHED_CTRL_EN (1 << 31)
+#define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
+
+#define SCHED_ENTRY0_CHAN(x) ((x) << 0)
+#define SCHED_ENTRY0_IS_RX (1 << 7)
+
+#define SCHED_ENTRY1_CHAN(x) ((x) << 8)
+#define SCHED_ENTRY1_IS_RX (1 << 15)
+
+#define SCHED_ENTRY2_CHAN(x) ((x) << 16)
+#define SCHED_ENTRY2_IS_RX (1 << 23)
+
+#define SCHED_ENTRY3_CHAN(x) ((x) << 24)
+#define SCHED_ENTRY3_IS_RX (1 << 31)
+
+/* Queue manager */
+/* 4 KiB of memory for descriptors, 2 for each endpoint */
+#define ALLOC_DECS_NUM 128
+#define DESCS_AREAS 1
+#define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
+#define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
+
+#define QMGR_LRAM0_BASE 0x80
+#define QMGR_LRAM_SIZE 0x84
+#define QMGR_LRAM1_BASE 0x88
+#define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
+#define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
+#define QMGR_MEMCTRL_IDX_SH 16
+#define QMGR_MEMCTRL_DESC_SH 8
+
+#define QMGR_NUM_PEND 5
+#define QMGR_PEND(x) (0x90 + (x) * 4)
+
+#define QMGR_PENDING_SLOT_Q(x) (x / 32)
+#define QMGR_PENDING_BIT_Q(x) (x % 32)
+
+#define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
+#define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
+#define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
+#define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
+
+/* Glue layer specific */
+/* USBSS / USB AM335x */
+#define USBSS_IRQ_STATUS 0x28
+#define USBSS_IRQ_ENABLER 0x2c
+#define USBSS_IRQ_CLEARR 0x30
+
+#define USBSS_IRQ_PD_COMP (1 << 2)
+
+struct cppi41_channel {
+ struct dma_chan chan;
+ struct dma_async_tx_descriptor txd;
+ struct cppi41_dd *cdd;
+ struct cppi41_desc *desc;
+ dma_addr_t desc_phys;
+ void __iomem *gcr_reg;
+ int is_tx;
+ u32 residue;
+
+ unsigned int q_num;
+ unsigned int q_comp_num;
+ unsigned int port_num;
+
+ unsigned td_retry;
+ unsigned td_queued:1;
+ unsigned td_seen:1;
+ unsigned td_desc_seen:1;
+};
+
+struct cppi41_desc {
+ u32 pd0;
+ u32 pd1;
+ u32 pd2;
+ u32 pd3;
+ u32 pd4;
+ u32 pd5;
+ u32 pd6;
+ u32 pd7;
+} __aligned(32);
+
+struct chan_queues {
+ u16 submit;
+ u16 complete;
+};
+
+struct cppi41_dd {
+ struct dma_device ddev;
+
+ void *qmgr_scratch;
+ dma_addr_t scratch_phys;
+
+ struct cppi41_desc *cd;
+ dma_addr_t descs_phys;
+ u32 first_td_desc;
+ struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
+
+ void __iomem *usbss_mem;
+ void __iomem *ctrl_mem;
+ void __iomem *sched_mem;
+ void __iomem *qmgr_mem;
+ unsigned int irq;
+ const struct chan_queues *queues_rx;
+ const struct chan_queues *queues_tx;
+ struct chan_queues td_queue;
+};
+
+#define FIST_COMPLETION_QUEUE 93
+static struct chan_queues usb_queues_tx[] = {
+ /* USB0 ENDP 1 */
+ [ 0] = { .submit = 32, .complete = 93},
+ [ 1] = { .submit = 34, .complete = 94},
+ [ 2] = { .submit = 36, .complete = 95},
+ [ 3] = { .submit = 38, .complete = 96},
+ [ 4] = { .submit = 40, .complete = 97},
+ [ 5] = { .submit = 42, .complete = 98},
+ [ 6] = { .submit = 44, .complete = 99},
+ [ 7] = { .submit = 46, .complete = 100},
+ [ 8] = { .submit = 48, .complete = 101},
+ [ 9] = { .submit = 50, .complete = 102},
+ [10] = { .submit = 52, .complete = 103},
+ [11] = { .submit = 54, .complete = 104},
+ [12] = { .submit = 56, .complete = 105},
+ [13] = { .submit = 58, .complete = 106},
+ [14] = { .submit = 60, .complete = 107},
+
+ /* USB1 ENDP1 */
+ [15] = { .submit = 62, .complete = 125},
+ [16] = { .submit = 64, .complete = 126},
+ [17] = { .submit = 66, .complete = 127},
+ [18] = { .submit = 68, .complete = 128},
+ [19] = { .submit = 70, .complete = 129},
+ [20] = { .submit = 72, .complete = 130},
+ [21] = { .submit = 74, .complete = 131},
+ [22] = { .submit = 76, .complete = 132},
+ [23] = { .submit = 78, .complete = 133},
+ [24] = { .submit = 80, .complete = 134},
+ [25] = { .submit = 82, .complete = 135},
+ [26] = { .submit = 84, .complete = 136},
+ [27] = { .submit = 86, .complete = 137},
+ [28] = { .submit = 88, .complete = 138},
+ [29] = { .submit = 90, .complete = 139},
+};
+
+static const struct chan_queues usb_queues_rx[] = {
+ /* USB0 ENDP 1 */
+ [ 0] = { .submit = 1, .complete = 109},
+ [ 1] = { .submit = 2, .complete = 110},
+ [ 2] = { .submit = 3, .complete = 111},
+ [ 3] = { .submit = 4, .complete = 112},
+ [ 4] = { .submit = 5, .complete = 113},
+ [ 5] = { .submit = 6, .complete = 114},
+ [ 6] = { .submit = 7, .complete = 115},
+ [ 7] = { .submit = 8, .complete = 116},
+ [ 8] = { .submit = 9, .complete = 117},
+ [ 9] = { .submit = 10, .complete = 118},
+ [10] = { .submit = 11, .complete = 119},
+ [11] = { .submit = 12, .complete = 120},
+ [12] = { .submit = 13, .complete = 121},
+ [13] = { .submit = 14, .complete = 122},
+ [14] = { .submit = 15, .complete = 123},
+
+ /* USB1 ENDP 1 */
+ [15] = { .submit = 16, .complete = 141},
+ [16] = { .submit = 17, .complete = 142},
+ [17] = { .submit = 18, .complete = 143},
+ [18] = { .submit = 19, .complete = 144},
+ [19] = { .submit = 20, .complete = 145},
+ [20] = { .submit = 21, .complete = 146},
+ [21] = { .submit = 22, .complete = 147},
+ [22] = { .submit = 23, .complete = 148},
+ [23] = { .submit = 24, .complete = 149},
+ [24] = { .submit = 25, .complete = 150},
+ [25] = { .submit = 26, .complete = 151},
+ [26] = { .submit = 27, .complete = 152},
+ [27] = { .submit = 28, .complete = 153},
+ [28] = { .submit = 29, .complete = 154},
+ [29] = { .submit = 30, .complete = 155},
+};
+
+struct cppi_glue_infos {
+ irqreturn_t (*isr)(int irq, void *data);
+ const struct chan_queues *queues_rx;
+ const struct chan_queues *queues_tx;
+ struct chan_queues td_queue;
+};
+
+static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
+{
+ return container_of(c, struct cppi41_channel, chan);
+}
+
+static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
+{
+ struct cppi41_channel *c;
+ u32 descs_size;
+ u32 desc_num;
+
+ descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
+
+ if (!((desc >= cdd->descs_phys) &&
+ (desc < (cdd->descs_phys + descs_size)))) {
+ return NULL;
+ }
+
+ desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ BUG_ON(desc_num >= ALLOC_DECS_NUM);
+ c = cdd->chan_busy[desc_num];
+ cdd->chan_busy[desc_num] = NULL;
+ return c;
+}
+
+static void cppi_writel(u32 val, void *__iomem *mem)
+{
+ __raw_writel(val, mem);
+}
+
+static u32 cppi_readl(void *__iomem *mem)
+{
+ return __raw_readl(mem);
+}
+
+static u32 pd_trans_len(u32 val)
+{
+ return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
+}
+
+static irqreturn_t cppi41_irq(int irq, void *data)
+{
+ struct cppi41_dd *cdd = data;
+ struct cppi41_channel *c;
+ u32 status;
+ int i;
+
+ status = cppi_readl(cdd->usbss_mem + USBSS_IRQ_STATUS);
+ if (!(status & USBSS_IRQ_PD_COMP))
+ return IRQ_NONE;
+ cppi_writel(status, cdd->usbss_mem + USBSS_IRQ_STATUS);
+
+ for (i = QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE); i < QMGR_NUM_PEND;
+ i++) {
+ u32 val;
+ u32 q_num;
+
+ val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
+ if (i == QMGR_PENDING_SLOT_Q(FIST_COMPLETION_QUEUE) && val) {
+ u32 mask;
+ /* set corresponding bit for completetion Q 93 */
+ mask = 1 << QMGR_PENDING_BIT_Q(FIST_COMPLETION_QUEUE);
+ /* not set all bits for queues less than Q 93 */
+ mask--;
+ /* now invert and keep only Q 93+ set */
+ val &= ~mask;
+ }
+
+ if (val)
+ __iormb();
+
+ while (val) {
+ u32 desc;
+
+ q_num = __fls(val);
+ val &= ~(1 << q_num);
+ q_num += 32 * i;
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(q_num));
+ desc &= ~0x1f;
+ c = desc_to_chan(cdd, desc);
+ if (WARN_ON(!c)) {
+ pr_err("%s() q %d desc %08x\n", __func__,
+ q_num, desc);
+ continue;
+ }
+ c->residue = pd_trans_len(c->desc->pd6) -
+ pd_trans_len(c->desc->pd0);
+
+ dma_cookie_complete(&c->txd);
+ c->txd.callback(c->txd.callback_param);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
+{
+ dma_cookie_t cookie;
+
+ cookie = dma_cookie_assign(tx);
+
+ return cookie;
+}
+
+static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+
+ dma_cookie_init(chan);
+ dma_async_tx_descriptor_init(&c->txd, chan);
+ c->txd.tx_submit = cppi41_tx_submit;
+
+ if (!c->is_tx)
+ cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
+
+ return 0;
+}
+
+static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
+{
+}
+
+static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
+ dma_cookie_t cookie, struct dma_tx_state *txstate)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ enum dma_status ret;
+
+ /* lock */
+ ret = dma_cookie_status(chan, cookie, txstate);
+ if (txstate && ret == DMA_SUCCESS)
+ txstate->residue = c->residue;
+ /* unlock */
+
+ return ret;
+}
+
+static void push_desc_queue(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ u32 desc_num;
+ u32 desc_phys;
+ u32 reg;
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = c;
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+}
+
+static void cppi41_dma_issue_pending(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ u32 reg;
+
+ c->residue = 0;
+
+ reg = GCR_CHAN_ENABLE;
+ if (!c->is_tx) {
+ reg |= GCR_STARV_RETRY;
+ reg |= GCR_DESC_TYPE_HOST;
+ reg |= c->q_comp_num;
+ }
+
+ cppi_writel(reg, c->gcr_reg);
+
+ /*
+ * We don't use writel() but __raw_writel() so we have to make sure
+ * that the DMA descriptor in coherent memory made to the main memory
+ * before starting the dma engine.
+ */
+ __iowmb();
+ push_desc_queue(c);
+}
+
+static u32 get_host_pd0(u32 length)
+{
+ u32 reg;
+
+ reg = DESC_TYPE_HOST << DESC_TYPE;
+ reg |= length;
+
+ return reg;
+}
+
+static u32 get_host_pd1(struct cppi41_channel *c)
+{
+ u32 reg;
+
+ reg = 0;
+
+ return reg;
+}
+
+static u32 get_host_pd2(struct cppi41_channel *c)
+{
+ u32 reg;
+
+ reg = DESC_TYPE_USB;
+ reg |= c->q_comp_num;
+
+ return reg;
+}
+
+static u32 get_host_pd3(u32 length)
+{
+ u32 reg;
+
+ /* PD3 = packet size */
+ reg = length;
+
+ return reg;
+}
+
+static u32 get_host_pd6(u32 length)
+{
+ u32 reg;
+
+ /* PD6 buffer size */
+ reg = DESC_PD_COMPLETE;
+ reg |= length;
+
+ return reg;
+}
+
+static u32 get_host_pd4_or_7(u32 addr)
+{
+ u32 reg;
+
+ reg = addr;
+
+ return reg;
+}
+
+static u32 get_host_pd5(void)
+{
+ u32 reg;
+
+ reg = 0;
+
+ return reg;
+}
+
+static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
+ struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
+ enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_desc *d;
+ struct scatterlist *sg;
+ unsigned int i;
+ unsigned int num;
+
+ num = 0;
+ d = c->desc;
+ for_each_sg(sgl, sg, sg_len, i) {
+ u32 addr;
+ u32 len;
+
+ /* We need to use more than one desc once musb supports sg */
+ BUG_ON(num > 0);
+ addr = lower_32_bits(sg_dma_address(sg));
+ len = sg_dma_len(sg);
+
+ d->pd0 = get_host_pd0(len);
+ d->pd1 = get_host_pd1(c);
+ d->pd2 = get_host_pd2(c);
+ d->pd3 = get_host_pd3(len);
+ d->pd4 = get_host_pd4_or_7(addr);
+ d->pd5 = get_host_pd5();
+ d->pd6 = get_host_pd6(len);
+ d->pd7 = get_host_pd4_or_7(addr);
+
+ d++;
+ }
+
+ return &c->txd;
+}
+
+static int cpp41_cfg_chan(struct cppi41_channel *c,
+ struct dma_slave_config *cfg)
+{
+ return 0;
+}
+
+static void cppi41_compute_td_desc(struct cppi41_desc *d)
+{
+ d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
+}
+
+static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
+{
+ u32 desc;
+
+ desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
+ desc &= ~0x1f;
+ return desc;
+}
+
+static int cppi41_tear_down_chan(struct cppi41_channel *c)
+{
+ struct cppi41_dd *cdd = c->cdd;
+ struct cppi41_desc *td;
+ u32 reg;
+ u32 desc_phys;
+ u32 td_desc_phys;
+
+ td = cdd->cd;
+ td += cdd->first_td_desc;
+
+ td_desc_phys = cdd->descs_phys;
+ td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
+
+ if (!c->td_queued) {
+ cppi41_compute_td_desc(td);
+ __iowmb();
+
+ reg = (sizeof(struct cppi41_desc) - 24) / 4;
+ reg |= td_desc_phys;
+ cppi_writel(reg, cdd->qmgr_mem +
+ QMGR_QUEUE_D(cdd->td_queue.submit));
+
+ reg = GCR_CHAN_ENABLE;
+ if (!c->is_tx) {
+ reg |= GCR_STARV_RETRY;
+ reg |= GCR_DESC_TYPE_HOST;
+ reg |= c->q_comp_num;
+ }
+ reg |= GCR_TEARDOWN;
+ cppi_writel(reg, c->gcr_reg);
+ c->td_queued = 1;
+ c->td_retry = 100;
+ }
+
+ if (!c->td_seen) {
+ unsigned td_comp_queue;
+
+ if (c->is_tx)
+ td_comp_queue = cdd->td_queue.complete;
+ else
+ td_comp_queue = c->q_comp_num;
+
+ desc_phys = cppi41_pop_desc(cdd, td_comp_queue);
+ if (desc_phys) {
+ __iormb();
+
+ if (desc_phys == td_desc_phys) {
+ u32 pd0;
+ pd0 = td->pd0;
+ WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
+ WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
+ WARN_ON((pd0 & 0x1f) != c->port_num);
+ } else {
+ WARN_ON_ONCE(1);
+ }
+ c->td_seen = 1;
+ }
+ }
+ if (!c->td_desc_seen) {
+ desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
+ if (desc_phys) {
+ __iormb();
+ WARN_ON(c->desc_phys != desc_phys);
+ c->td_desc_seen = 1;
+ }
+ }
+ c->td_retry--;
+ /*
+ * If the TX descriptor / channel is in use, the caller needs to poke
+ * his TD bit multiple times. After that he hardware releases the
+ * transfer descriptor followed by TD descriptor. Waiting seems not to
+ * cause any difference.
+ * RX seems to be thrown out right away. However once the TearDown
+ * descriptor gets through we are done. If we have seens the transfer
+ * descriptor before the TD we fetch it from enqueue, it has to be
+ * there waiting for us.
+ */
+ if (!c->td_seen && c->td_retry)
+ return -EAGAIN;
+
+ WARN_ON(!c->td_retry);
+ if (!c->td_desc_seen) {
+ desc_phys = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
+ WARN_ON(!desc_phys);
+ }
+
+ c->td_queued = 0;
+ c->td_seen = 0;
+ c->td_desc_seen = 0;
+ cppi_writel(0, c->gcr_reg);
+ return 0;
+}
+
+static int cppi41_stop_chan(struct dma_chan *chan)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ struct cppi41_dd *cdd = c->cdd;
+ u32 desc_num;
+ u32 desc_phys;
+ int ret;
+
+ ret = cppi41_tear_down_chan(c);
+ if (ret)
+ return ret;
+
+ desc_phys = lower_32_bits(c->desc_phys);
+ desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+ WARN_ON(!cdd->chan_busy[desc_num]);
+ cdd->chan_busy[desc_num] = NULL;
+
+ return 0;
+}
+
+static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct cppi41_channel *c = to_cpp41_chan(chan);
+ int ret;
+
+ switch (cmd) {
+ case DMA_SLAVE_CONFIG:
+ ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg);
+ break;
+
+ case DMA_TERMINATE_ALL:
+ ret = cppi41_stop_chan(chan);
+ break;
+
+ default:
+ ret = -ENXIO;
+ break;
+ }
+ return ret;
+}
+
+static void cleanup_chans(struct cppi41_dd *cdd)
+{
+ while (!list_empty(&cdd->ddev.channels)) {
+ struct cppi41_channel *cchan;
+
+ cchan = list_first_entry(&cdd->ddev.channels,
+ struct cppi41_channel, chan.device_node);
+ list_del(&cchan->chan.device_node);
+ kfree(cchan);
+ }
+}
+
+static int cppi41_add_chans(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ struct cppi41_channel *cchan;
+ int i;
+ int ret;
+ u32 n_chans;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "#dma-channels",
+ &n_chans);
+ if (ret)
+ return ret;
+ /*
+ * The channels can only be used as TX or as RX. So we add twice
+ * that much dma channels because USB can only do RX or TX.
+ */
+ n_chans *= 2;
+
+ for (i = 0; i < n_chans; i++) {
+ cchan = kzalloc(sizeof(*cchan), GFP_KERNEL);
+ if (!cchan)
+ goto err;
+
+ cchan->cdd = cdd;
+ if (i & 1) {
+ cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
+ cchan->is_tx = 1;
+ } else {
+ cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
+ cchan->is_tx = 0;
+ }
+ cchan->port_num = i >> 1;
+ cchan->desc = &cdd->cd[i];
+ cchan->desc_phys = cdd->descs_phys;
+ cchan->desc_phys += i * sizeof(struct cppi41_desc);
+ cchan->chan.device = &cdd->ddev;
+ list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
+ }
+ cdd->first_td_desc = n_chans;
+
+ return 0;
+err:
+ cleanup_chans(cdd);
+ return -ENOMEM;
+}
+
+static void purge_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ unsigned int mem_decs;
+ int i;
+
+ mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
+
+ for (i = 0; i < DESCS_AREAS; i++) {
+
+ cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
+ cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+ dma_free_coherent(&pdev->dev, mem_decs, cdd->cd,
+ cdd->descs_phys);
+ }
+}
+
+static void disable_sched(struct cppi41_dd *cdd)
+{
+ cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static void deinit_cpii41(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ disable_sched(cdd);
+
+ purge_descs(pdev, cdd);
+
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ dma_free_coherent(&pdev->dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
+ cdd->scratch_phys);
+}
+
+static int init_descs(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ unsigned int desc_size;
+ unsigned int mem_decs;
+ int i;
+ u32 reg;
+ u32 idx;
+
+ BUILD_BUG_ON(sizeof(struct cppi41_desc) &
+ (sizeof(struct cppi41_desc) - 1));
+ BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
+ BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
+
+ desc_size = sizeof(struct cppi41_desc);
+ mem_decs = ALLOC_DECS_NUM * desc_size;
+
+ idx = 0;
+ for (i = 0; i < DESCS_AREAS; i++) {
+
+ reg = idx << QMGR_MEMCTRL_IDX_SH;
+ reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
+ reg |= ilog2(ALLOC_DECS_NUM) - 5;
+
+ BUILD_BUG_ON(DESCS_AREAS != 1);
+ cdd->cd = dma_alloc_coherent(&pdev->dev, mem_decs,
+ &cdd->descs_phys, GFP_KERNEL);
+ if (!cdd->cd)
+ return -ENOMEM;
+
+ cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
+ cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
+
+ idx += ALLOC_DECS_NUM;
+ }
+ return 0;
+}
+
+static void init_sched(struct cppi41_dd *cdd)
+{
+ unsigned ch;
+ unsigned word;
+ u32 reg;
+
+ word = 0;
+ cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
+ for (ch = 0; ch < 15 * 2; ch += 2) {
+
+ reg = SCHED_ENTRY0_CHAN(ch);
+ reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
+
+ reg |= SCHED_ENTRY2_CHAN(ch + 1);
+ reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
+ cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
+ word++;
+ }
+ reg = 15 * 2 * 2 - 1;
+ reg |= DMA_SCHED_CTRL_EN;
+ cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
+}
+
+static int init_cppi41(struct platform_device *pdev, struct cppi41_dd *cdd)
+{
+ int ret;
+
+ BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
+ cdd->qmgr_scratch = dma_alloc_coherent(&pdev->dev, QMGR_SCRATCH_SIZE,
+ &cdd->scratch_phys, GFP_KERNEL);
+ if (!cdd->qmgr_scratch)
+ return -ENOMEM;
+
+ cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
+ cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
+ cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
+
+ ret = init_descs(pdev, cdd);
+ if (ret)
+ goto err_td;
+
+ cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
+ init_sched(cdd);
+ return 0;
+err_td:
+ deinit_cpii41(pdev, cdd);
+ return ret;
+}
+
+static struct platform_driver cpp41_dma_driver;
+/*
+ * The param format is:
+ * X Y
+ * X: Port
+ * Y: 0 = RX else TX
+ */
+#define INFO_PORT 0
+#define INFO_IS_TX 1
+
+static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
+{
+ struct cppi41_channel *cchan;
+ struct cppi41_dd *cdd;
+ const struct chan_queues *queues;
+ u32 *num = param;
+
+ if (chan->device->dev->driver != &cpp41_dma_driver.driver)
+ return false;
+
+ cchan = to_cpp41_chan(chan);
+
+ if (cchan->port_num != num[INFO_PORT])
+ return false;
+
+ if (cchan->is_tx && !num[INFO_IS_TX])
+ return false;
+ cdd = cchan->cdd;
+ if (cchan->is_tx)
+ queues = cdd->queues_tx;
+ else
+ queues = cdd->queues_rx;
+
+ BUILD_BUG_ON(ARRAY_SIZE(usb_queues_rx) != ARRAY_SIZE(usb_queues_tx));
+ if (WARN_ON(cchan->port_num > ARRAY_SIZE(usb_queues_rx)))
+ return false;
+
+ cchan->q_num = queues[cchan->port_num].submit;
+ cchan->q_comp_num = queues[cchan->port_num].complete;
+ return true;
+}
+
+static struct of_dma_filter_info cpp41_dma_info = {
+ .filter_fn = cpp41_dma_filter_fn,
+};
+
+static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
+ struct of_dma *ofdma)
+{
+ int count = dma_spec->args_count;
+ struct of_dma_filter_info *info = ofdma->of_dma_data;
+
+ if (!info || !info->filter_fn)
+ return NULL;
+
+ if (count != 2)
+ return NULL;
+
+ return dma_request_channel(info->dma_cap, info->filter_fn,
+ &dma_spec->args[0]);
+}
+
+static const struct cppi_glue_infos usb_infos = {
+ .isr = cppi41_irq,
+ .queues_rx = usb_queues_rx,
+ .queues_tx = usb_queues_tx,
+ .td_queue = { .submit = 31, .complete = 0 },
+};
+
+static const struct of_device_id cppi41_dma_ids[] = {
+ { .compatible = "ti,am3359-cppi41", .data = &usb_infos},
+ {},
+};
+MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
+
+static const struct cppi_glue_infos *get_glue_info(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+
+ of_id = of_match_node(cppi41_dma_ids, pdev->dev.of_node);
+ if (!of_id)
+ return NULL;
+ return of_id->data;
+}
+
+static int cppi41_dma_probe(struct platform_device *pdev)
+{
+ struct cppi41_dd *cdd;
+ const struct cppi_glue_infos *glue_info;
+ int irq;
+ int ret;
+
+ glue_info = get_glue_info(pdev);
+ if (!glue_info)
+ return -EINVAL;
+
+ cdd = kzalloc(sizeof(*cdd), GFP_KERNEL);
+ if (!cdd)
+ return -ENOMEM;
+
+ dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
+ cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
+ cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
+ cdd->ddev.device_tx_status = cppi41_dma_tx_status;
+ cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
+ cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
+ cdd->ddev.device_control = cppi41_dma_control;
+ cdd->ddev.dev = &pdev->dev;
+ INIT_LIST_HEAD(&cdd->ddev.channels);
+ cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
+
+ cdd->usbss_mem = of_iomap(pdev->dev.of_node, 0);
+ cdd->ctrl_mem = of_iomap(pdev->dev.of_node, 1);
+ cdd->sched_mem = of_iomap(pdev->dev.of_node, 2);
+ cdd->qmgr_mem = of_iomap(pdev->dev.of_node, 3);
+
+ if (!cdd->usbss_mem || !cdd->ctrl_mem || !cdd->sched_mem ||
+ !cdd->qmgr_mem) {
+ ret = -ENXIO;
+ goto err_remap;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ goto err_get_sync;
+
+ cdd->queues_rx = glue_info->queues_rx;
+ cdd->queues_tx = glue_info->queues_tx;
+ cdd->td_queue = glue_info->td_queue;
+
+ ret = init_cppi41(pdev, cdd);
+ if (ret)
+ goto err_init_cppi;
+
+ ret = cppi41_add_chans(pdev, cdd);
+ if (ret)
+ goto err_chans;
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ goto err_irq;
+
+ cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER);
+
+ ret = request_irq(irq, glue_info->isr, IRQF_SHARED,
+ dev_name(&pdev->dev), cdd);
+ if (ret)
+ goto err_irq;
+ cdd->irq = irq;
+
+ ret = dma_async_device_register(&cdd->ddev);
+ if (ret)
+ goto err_dma_reg;
+
+ ret = of_dma_controller_register(pdev->dev.of_node,
+ cppi41_dma_xlate, &cpp41_dma_info);
+ if (ret)
+ goto err_of;
+
+ platform_set_drvdata(pdev, cdd);
+ return 0;
+err_of:
+ dma_async_device_unregister(&cdd->ddev);
+err_dma_reg:
+ free_irq(irq, cdd);
+err_irq:
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ cleanup_chans(cdd);
+err_chans:
+ deinit_cpii41(pdev, cdd);
+err_init_cppi:
+ pm_runtime_put(&pdev->dev);
+err_get_sync:
+ pm_runtime_disable(&pdev->dev);
+ iounmap(cdd->usbss_mem);
+ iounmap(cdd->ctrl_mem);
+ iounmap(cdd->sched_mem);
+ iounmap(cdd->qmgr_mem);
+err_remap:
+ kfree(cdd);
+ return ret;
+}
+
+static int cppi41_dma_remove(struct platform_device *pdev)
+{
+ struct cppi41_dd *cdd = platform_get_drvdata(pdev);
+
+ of_dma_controller_free(pdev->dev.of_node);
+ dma_async_device_unregister(&cdd->ddev);
+
+ cppi_writel(0, cdd->usbss_mem + USBSS_IRQ_CLEARR);
+ free_irq(cdd->irq, cdd);
+ cleanup_chans(cdd);
+ deinit_cpii41(pdev, cdd);
+ iounmap(cdd->usbss_mem);
+ iounmap(cdd->ctrl_mem);
+ iounmap(cdd->sched_mem);
+ iounmap(cdd->qmgr_mem);
+ pm_runtime_put(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ kfree(cdd);
+ return 0;
+}
+
+static struct platform_driver cpp41_dma_driver = {
+ .probe = cppi41_dma_probe,
+ .remove = cppi41_dma_remove,
+ .driver = {
+ .name = "cppi41-dma-engine",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(cppi41_dma_ids),
+ },
+};
+
+module_platform_driver(cpp41_dma_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 9e56745f87b..99af4db5948 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -87,7 +87,8 @@ static struct dma_chan *dev_to_dma_chan(struct device *dev)
return chan_dev->chan;
}
-static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t memcpy_count_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
@@ -106,9 +107,10 @@ static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *at
return err;
}
+static DEVICE_ATTR_RO(memcpy_count);
-static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t bytes_transferred_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct dma_chan *chan;
unsigned long count = 0;
@@ -127,8 +129,10 @@ static ssize_t show_bytes_transferred(struct device *dev, struct device_attribut
return err;
}
+static DEVICE_ATTR_RO(bytes_transferred);
-static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct dma_chan *chan;
int err;
@@ -143,13 +147,15 @@ static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, ch
return err;
}
+static DEVICE_ATTR_RO(in_use);
-static struct device_attribute dma_attrs[] = {
- __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
- __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
- __ATTR(in_use, S_IRUGO, show_in_use, NULL),
- __ATTR_NULL
+static struct attribute *dma_dev_attrs[] = {
+ &dev_attr_memcpy_count.attr,
+ &dev_attr_bytes_transferred.attr,
+ &dev_attr_in_use.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(dma_dev);
static void chan_dev_release(struct device *dev)
{
@@ -167,7 +173,7 @@ static void chan_dev_release(struct device *dev)
static struct class dma_devclass = {
.name = "dma",
- .dev_attrs = dma_attrs,
+ .dev_groups = dma_dev_groups,
.dev_release = chan_dev_release,
};
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index ce3dc3e9688..0bbdea5059f 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -867,6 +867,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper base address\n");
+ err = -ENODEV;
goto err_disable_pdev;
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 593827b3fdd..fa645d82500 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2505,6 +2505,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
/* Assign cookies to all nodes */
while (!list_empty(&last->node)) {
desc = list_entry(last->node.next, struct dma_pl330_desc, node);
+ if (pch->cyclic) {
+ desc->txd.callback = last->txd.callback;
+ desc->txd.callback_param = last->txd.callback_param;
+ }
dma_cookie_assign(&desc->txd);
@@ -2688,45 +2692,82 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction direction,
unsigned long flags, void *context)
{
- struct dma_pl330_desc *desc;
+ struct dma_pl330_desc *desc = NULL, *first = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
+ struct dma_pl330_dmac *pdmac = pch->dmac;
+ unsigned int i;
dma_addr_t dst;
dma_addr_t src;
- desc = pl330_get_desc(pch);
- if (!desc) {
- dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
- __func__, __LINE__);
+ if (len % period_len != 0)
return NULL;
- }
- switch (direction) {
- case DMA_MEM_TO_DEV:
- desc->rqcfg.src_inc = 1;
- desc->rqcfg.dst_inc = 0;
- desc->req.rqtype = MEMTODEV;
- src = dma_addr;
- dst = pch->fifo_addr;
- break;
- case DMA_DEV_TO_MEM:
- desc->rqcfg.src_inc = 0;
- desc->rqcfg.dst_inc = 1;
- desc->req.rqtype = DEVTOMEM;
- src = pch->fifo_addr;
- dst = dma_addr;
- break;
- default:
+ if (!is_slave_direction(direction)) {
dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n",
__func__, __LINE__);
return NULL;
}
- desc->rqcfg.brst_size = pch->burst_sz;
- desc->rqcfg.brst_len = 1;
+ for (i = 0; i < len / period_len; i++) {
+ desc = pl330_get_desc(pch);
+ if (!desc) {
+ dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n",
+ __func__, __LINE__);
- pch->cyclic = true;
+ if (!first)
+ return NULL;
+
+ spin_lock_irqsave(&pdmac->pool_lock, flags);
+
+ while (!list_empty(&first->node)) {
+ desc = list_entry(first->node.next,
+ struct dma_pl330_desc, node);
+ list_move_tail(&desc->node, &pdmac->desc_pool);
+ }
+
+ list_move_tail(&first->node, &pdmac->desc_pool);
- fill_px(&desc->px, dst, src, period_len);
+ spin_unlock_irqrestore(&pdmac->pool_lock, flags);
+
+ return NULL;
+ }
+
+ switch (direction) {
+ case DMA_MEM_TO_DEV:
+ desc->rqcfg.src_inc = 1;
+ desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
+ src = dma_addr;
+ dst = pch->fifo_addr;
+ break;
+ case DMA_DEV_TO_MEM:
+ desc->rqcfg.src_inc = 0;
+ desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
+ src = pch->fifo_addr;
+ dst = dma_addr;
+ break;
+ default:
+ break;
+ }
+
+ desc->rqcfg.brst_size = pch->burst_sz;
+ desc->rqcfg.brst_len = 1;
+ fill_px(&desc->px, dst, src, period_len);
+
+ if (!first)
+ first = desc;
+ else
+ list_add_tail(&desc->node, &first->node);
+
+ dma_addr += period_len;
+ }
+
+ if (!desc)
+ return NULL;
+
+ pch->cyclic = true;
+ desc->txd.flags = flags;
return &desc->txd;
}
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
index b67f45f5c27..5039fbc8825 100644
--- a/drivers/dma/sh/shdma.c
+++ b/drivers/dma/sh/shdma.c
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan,
shdma_chan);
struct sh_dmae_desc *sh_desc = container_of(sdesc,
struct sh_dmae_desc, shdma_desc);
- return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
- sh_chan->xmit_shift;
+ return sh_desc->hw.tcr -
+ (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift);
}
/* Called from error IRQ or NMI */
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b6a0343c22..3c9e4e98c65 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -123,7 +123,7 @@ static void f15h_select_dct(struct amd64_pvt *pvt, u8 dct)
u32 reg = 0;
amd64_read_pci_cfg(pvt->F1, DCT_CFG_SEL, &reg);
- reg &= 0xfffffffe;
+ reg &= (pvt->model >= 0x30) ? ~3 : ~1;
reg |= dct;
amd64_write_pci_cfg(pvt->F1, DCT_CFG_SEL, reg);
}
@@ -133,8 +133,9 @@ static int f15_read_dct_pci_cfg(struct amd64_pvt *pvt, int addr, u32 *val,
{
u8 dct = 0;
+ /* For F15 M30h, the second dct is DCT 3, refer to BKDG Section 2.10 */
if (addr >= 0x140 && addr <= 0x1a0) {
- dct = 1;
+ dct = (pvt->model >= 0x30) ? 3 : 1;
addr -= 0x100;
}
@@ -202,11 +203,11 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 bw)
struct amd64_pvt *pvt = mci->pvt_info;
u32 min_scrubrate = 0x5;
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
min_scrubrate = 0x0;
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
+ /* Erratum #505 */
+ if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
return __amd64_set_scrub_rate(pvt->F3, bw, min_scrubrate);
@@ -218,8 +219,8 @@ static int amd64_get_scrub_rate(struct mem_ctl_info *mci)
u32 scrubval = 0;
int i, retval = -EINVAL;
- /* F15h Erratum #505 */
- if (boot_cpu_data.x86 == 0x15)
+ /* Erratum #505 */
+ if (pvt->fam == 0x15 && pvt->model < 0x10)
f15h_select_dct(pvt, 0);
amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval);
@@ -335,7 +336,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
u64 csbase, csmask, base_bits, mask_bits;
u8 addr_shift;
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow];
base_bits = GENMASK(21, 31) | GENMASK(9, 15);
@@ -343,10 +344,11 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
addr_shift = 4;
/*
- * F16h needs two addr_shift values: 8 for high and 6 for low
- * (cf. F16h BKDG).
- */
- } else if (boot_cpu_data.x86 == 0x16) {
+ * F16h and F15h, models 30h and later need two addr_shift values:
+ * 8 for high and 6 for low (cf. F16h BKDG).
+ */
+ } else if (pvt->fam == 0x16 ||
+ (pvt->fam == 0x15 && pvt->model >= 0x30)) {
csbase = pvt->csels[dct].csbases[csrow];
csmask = pvt->csels[dct].csmasks[csrow >> 1];
@@ -367,7 +369,7 @@ static void get_cs_base_and_mask(struct amd64_pvt *pvt, int csrow, u8 dct,
csmask = pvt->csels[dct].csmasks[csrow >> 1];
addr_shift = 8;
- if (boot_cpu_data.x86 == 0x15)
+ if (pvt->fam == 0x15)
base_bits = mask_bits = GENMASK(19,30) | GENMASK(5,13);
else
base_bits = mask_bits = GENMASK(19,28) | GENMASK(5,13);
@@ -447,14 +449,14 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
struct amd64_pvt *pvt = mci->pvt_info;
/* only revE and later have the DRAM Hole Address Register */
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_E) {
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_E) {
edac_dbg(1, " revision %d for node %d does not support DHAR\n",
pvt->ext_model, pvt->mc_node_id);
return 1;
}
/* valid for Fam10h and above */
- if (boot_cpu_data.x86 >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
+ if (pvt->fam >= 0x10 && !dhar_mem_hoist_valid(pvt)) {
edac_dbg(1, " Dram Memory Hoisting is DISABLED on this system\n");
return 1;
}
@@ -486,10 +488,8 @@ int amd64_get_dram_hole_info(struct mem_ctl_info *mci, u64 *hole_base,
*hole_base = dhar_base(pvt);
*hole_size = (1ULL << 32) - *hole_base;
- if (boot_cpu_data.x86 > 0xf)
- *hole_offset = f10_dhar_offset(pvt);
- else
- *hole_offset = k8_dhar_offset(pvt);
+ *hole_offset = (pvt->fam > 0xf) ? f10_dhar_offset(pvt)
+ : k8_dhar_offset(pvt);
edac_dbg(1, " DHAR info for node %d base 0x%lx offset 0x%lx size 0x%lx\n",
pvt->mc_node_id, (unsigned long)*hole_base,
@@ -663,7 +663,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
u8 bit;
unsigned long edac_cap = EDAC_FLAG_NONE;
- bit = (boot_cpu_data.x86 > 0xf || pvt->ext_model >= K8_REV_F)
+ bit = (pvt->fam > 0xf || pvt->ext_model >= K8_REV_F)
? 19
: 17;
@@ -675,7 +675,7 @@ static unsigned long amd64_determine_edac_cap(struct amd64_pvt *pvt)
static void amd64_debug_display_dimm_sizes(struct amd64_pvt *, u8);
-static void amd64_dump_dramcfg_low(u32 dclr, int chan)
+static void amd64_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
{
edac_dbg(1, "F2x%d90 (DRAM Cfg Low): 0x%08x\n", chan, dclr);
@@ -686,7 +686,7 @@ static void amd64_dump_dramcfg_low(u32 dclr, int chan)
edac_dbg(1, " PAR/ERR parity: %s\n",
(dclr & BIT(8)) ? "enabled" : "disabled");
- if (boot_cpu_data.x86 == 0x10)
+ if (pvt->fam == 0x10)
edac_dbg(1, " DCT 128bit mode width: %s\n",
(dclr & BIT(11)) ? "128b" : "64b");
@@ -709,21 +709,21 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
(pvt->nbcap & NBCAP_SECDED) ? "yes" : "no",
(pvt->nbcap & NBCAP_CHIPKILL) ? "yes" : "no");
- amd64_dump_dramcfg_low(pvt->dclr0, 0);
+ amd64_dump_dramcfg_low(pvt, pvt->dclr0, 0);
edac_dbg(1, "F3xB0 (Online Spare): 0x%08x\n", pvt->online_spare);
edac_dbg(1, "F1xF0 (DRAM Hole Address): 0x%08x, base: 0x%08x, offset: 0x%08x\n",
pvt->dhar, dhar_base(pvt),
- (boot_cpu_data.x86 == 0xf) ? k8_dhar_offset(pvt)
- : f10_dhar_offset(pvt));
+ (pvt->fam == 0xf) ? k8_dhar_offset(pvt)
+ : f10_dhar_offset(pvt));
edac_dbg(1, " DramHoleValid: %s\n", dhar_valid(pvt) ? "yes" : "no");
amd64_debug_display_dimm_sizes(pvt, 0);
/* everything below this point is Fam10h and above */
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
amd64_debug_display_dimm_sizes(pvt, 1);
@@ -732,17 +732,20 @@ static void dump_misc_regs(struct amd64_pvt *pvt)
/* Only if NOT ganged does dclr1 have valid info */
if (!dct_ganging_enabled(pvt))
- amd64_dump_dramcfg_low(pvt->dclr1, 1);
+ amd64_dump_dramcfg_low(pvt, pvt->dclr1, 1);
}
/*
- * see BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
+ * See BKDG, F2x[1,0][5C:40], F2[1,0][6C:60]
*/
static void prep_chip_selects(struct amd64_pvt *pvt)
{
- if (boot_cpu_data.x86 == 0xf && pvt->ext_model < K8_REV_F) {
+ if (pvt->fam == 0xf && pvt->ext_model < K8_REV_F) {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 8;
+ } else if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 4;
+ pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 2;
} else {
pvt->csels[0].b_cnt = pvt->csels[1].b_cnt = 8;
pvt->csels[0].m_cnt = pvt->csels[1].m_cnt = 4;
@@ -768,7 +771,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
edac_dbg(0, " DCSB0[%d]=0x%08x reg: F2x%x\n",
cs, *base0, reg0);
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, base1))
@@ -786,7 +789,7 @@ static void read_dct_base_mask(struct amd64_pvt *pvt)
edac_dbg(0, " DCSM0[%d]=0x%08x reg: F2x%x\n",
cs, *mask0, reg0);
- if (boot_cpu_data.x86 == 0xf || dct_ganging_enabled(pvt))
+ if (pvt->fam == 0xf || dct_ganging_enabled(pvt))
continue;
if (!amd64_read_dct_pci_cfg(pvt, reg1, mask1))
@@ -800,9 +803,9 @@ static enum mem_type amd64_determine_memory_type(struct amd64_pvt *pvt, int cs)
enum mem_type type;
/* F15h supports only DDR3 */
- if (boot_cpu_data.x86 >= 0x15)
+ if (pvt->fam >= 0x15)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
- else if (boot_cpu_data.x86 == 0x10 || pvt->ext_model >= K8_REV_F) {
+ else if (pvt->fam == 0x10 || pvt->ext_model >= K8_REV_F) {
if (pvt->dchr0 & DDR3_MODE)
type = (pvt->dclr0 & BIT(16)) ? MEM_DDR3 : MEM_RDDR3;
else
@@ -835,14 +838,13 @@ static int k8_early_channel_count(struct amd64_pvt *pvt)
}
/* On F10h and later ErrAddr is MC4_ADDR[47:1] */
-static u64 get_error_address(struct mce *m)
+static u64 get_error_address(struct amd64_pvt *pvt, struct mce *m)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
u64 addr;
u8 start_bit = 1;
u8 end_bit = 47;
- if (c->x86 == 0xf) {
+ if (pvt->fam == 0xf) {
start_bit = 3;
end_bit = 39;
}
@@ -852,7 +854,7 @@ static u64 get_error_address(struct mce *m)
/*
* Erratum 637 workaround
*/
- if (c->x86 == 0x15) {
+ if (pvt->fam == 0x15) {
struct amd64_pvt *pvt;
u64 cc6_base, tmp_addr;
u32 tmp;
@@ -916,15 +918,15 @@ static struct pci_dev *pci_get_related_function(unsigned int vendor,
static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
{
struct amd_northbridge *nb;
- struct pci_dev *misc, *f1 = NULL;
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ struct pci_dev *f1 = NULL;
+ unsigned int pci_func;
int off = range << 3;
u32 llim;
amd64_read_pci_cfg(pvt->F1, DRAM_BASE_LO + off, &pvt->ranges[range].base.lo);
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_LO + off, &pvt->ranges[range].lim.lo);
- if (c->x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
if (!dram_rw(pvt, range))
@@ -934,15 +936,17 @@ static void read_dram_base_limit_regs(struct amd64_pvt *pvt, unsigned range)
amd64_read_pci_cfg(pvt->F1, DRAM_LIMIT_HI + off, &pvt->ranges[range].lim.hi);
/* F15h: factor in CC6 save area by reading dst node's limit reg */
- if (c->x86 != 0x15)
+ if (pvt->fam != 0x15)
return;
nb = node_to_amd_nb(dram_dst_node(pvt, range));
if (WARN_ON(!nb))
return;
- misc = nb->misc;
- f1 = pci_get_related_function(misc->vendor, PCI_DEVICE_ID_AMD_15H_NB_F1, misc);
+ pci_func = (pvt->model == 0x30) ? PCI_DEVICE_ID_AMD_15H_M30H_NB_F1
+ : PCI_DEVICE_ID_AMD_15H_NB_F1;
+
+ f1 = pci_get_related_function(nb->misc->vendor, pci_func, nb->misc);
if (WARN_ON(!f1))
return;
@@ -1089,7 +1093,7 @@ static int f1x_early_channel_count(struct amd64_pvt *pvt)
int i, j, channels = 0;
/* On F10h, if we are in 128 bit mode, then we are using 2 channels */
- if (boot_cpu_data.x86 == 0x10 && (pvt->dclr0 & WIDTH_128))
+ if (pvt->fam == 0x10 && (pvt->dclr0 & WIDTH_128))
return 2;
/*
@@ -1173,7 +1177,7 @@ static int f15_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
}
/*
- * F16h has only limited cs_modes
+ * F16h and F15h model 30h have only limited cs_modes.
*/
static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
unsigned cs_mode)
@@ -1190,7 +1194,7 @@ static int f16_dbam_to_chip_select(struct amd64_pvt *pvt, u8 dct,
static void read_dram_ctl_register(struct amd64_pvt *pvt)
{
- if (boot_cpu_data.x86 == 0xf)
+ if (pvt->fam == 0xf)
return;
if (!amd64_read_dct_pci_cfg(pvt, DCT_SEL_LO, &pvt->dct_sel_lo)) {
@@ -1218,6 +1222,29 @@ static void read_dram_ctl_register(struct amd64_pvt *pvt)
}
/*
+ * Determine channel (DCT) based on the interleaving mode (see F15h M30h BKDG,
+ * 2.10.12 Memory Interleaving Modes).
+ */
+static u8 f15_m30h_determine_channel(struct amd64_pvt *pvt, u64 sys_addr,
+ u8 intlv_en, int num_dcts_intlv,
+ u32 dct_sel)
+{
+ u8 channel = 0;
+ u8 select;
+
+ if (!(intlv_en))
+ return (u8)(dct_sel);
+
+ if (num_dcts_intlv == 2) {
+ select = (sys_addr >> 8) & 0x3;
+ channel = select ? 0x3 : 0;
+ } else if (num_dcts_intlv == 4)
+ channel = (sys_addr >> 8) & 0x7;
+
+ return channel;
+}
+
+/*
* Determine channel (DCT) based on the interleaving mode: F10h BKDG, 2.8.9 Memory
* Interleaving Modes.
*/
@@ -1366,6 +1393,10 @@ static int f1x_lookup_addr_in_dct(u64 in_addr, u8 nid, u8 dct)
(in_addr & cs_mask), (cs_base & cs_mask));
if ((in_addr & cs_mask) == (cs_base & cs_mask)) {
+ if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ cs_found = csrow;
+ break;
+ }
cs_found = f10_process_possible_spare(pvt, dct, csrow);
edac_dbg(1, " MATCH csrow=%d\n", cs_found);
@@ -1384,11 +1415,9 @@ static u64 f1x_swap_interleaved_region(struct amd64_pvt *pvt, u64 sys_addr)
{
u32 swap_reg, swap_base, swap_limit, rgn_size, tmp_addr;
- if (boot_cpu_data.x86 == 0x10) {
+ if (pvt->fam == 0x10) {
/* only revC3 and revE have that feature */
- if (boot_cpu_data.x86_model < 4 ||
- (boot_cpu_data.x86_model < 0xa &&
- boot_cpu_data.x86_mask < 3))
+ if (pvt->model < 4 || (pvt->model < 0xa && pvt->stepping < 3))
return sys_addr;
}
@@ -1492,20 +1521,143 @@ static int f1x_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
return cs_found;
}
-static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt, u64 sys_addr,
- int *chan_sel)
+static int f15_m30h_match_to_this_node(struct amd64_pvt *pvt, unsigned range,
+ u64 sys_addr, int *chan_sel)
+{
+ int cs_found = -EINVAL;
+ int num_dcts_intlv = 0;
+ u64 chan_addr, chan_offset;
+ u64 dct_base, dct_limit;
+ u32 dct_cont_base_reg, dct_cont_limit_reg, tmp;
+ u8 channel, alias_channel, leg_mmio_hole, dct_sel, dct_offset_en;
+
+ u64 dhar_offset = f10_dhar_offset(pvt);
+ u8 intlv_addr = dct_sel_interleave_addr(pvt);
+ u8 node_id = dram_dst_node(pvt, range);
+ u8 intlv_en = dram_intlv_en(pvt, range);
+
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &dct_cont_base_reg);
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &dct_cont_limit_reg);
+
+ dct_offset_en = (u8) ((dct_cont_base_reg >> 3) & BIT(0));
+ dct_sel = (u8) ((dct_cont_base_reg >> 4) & 0x7);
+
+ edac_dbg(1, "(range %d) SystemAddr= 0x%llx Limit=0x%llx\n",
+ range, sys_addr, get_dram_limit(pvt, range));
+
+ if (!(get_dram_base(pvt, range) <= sys_addr) &&
+ !(get_dram_limit(pvt, range) >= sys_addr))
+ return -EINVAL;
+
+ if (dhar_valid(pvt) &&
+ dhar_base(pvt) <= sys_addr &&
+ sys_addr < BIT_64(32)) {
+ amd64_warn("Huh? Address is in the MMIO hole: 0x%016llx\n",
+ sys_addr);
+ return -EINVAL;
+ }
+
+ /* Verify sys_addr is within DCT Range. */
+ dct_base = (u64) dct_sel_baseaddr(pvt);
+ dct_limit = (dct_cont_limit_reg >> 11) & 0x1FFF;
+
+ if (!(dct_cont_base_reg & BIT(0)) &&
+ !(dct_base <= (sys_addr >> 27) &&
+ dct_limit >= (sys_addr >> 27)))
+ return -EINVAL;
+
+ /* Verify number of dct's that participate in channel interleaving. */
+ num_dcts_intlv = (int) hweight8(intlv_en);
+
+ if (!(num_dcts_intlv % 2 == 0) || (num_dcts_intlv > 4))
+ return -EINVAL;
+
+ channel = f15_m30h_determine_channel(pvt, sys_addr, intlv_en,
+ num_dcts_intlv, dct_sel);
+
+ /* Verify we stay within the MAX number of channels allowed */
+ if (channel > 4 || channel < 0)
+ return -EINVAL;
+
+ leg_mmio_hole = (u8) (dct_cont_base_reg >> 1 & BIT(0));
+
+ /* Get normalized DCT addr */
+ if (leg_mmio_hole && (sys_addr >= BIT_64(32)))
+ chan_offset = dhar_offset;
+ else
+ chan_offset = dct_base << 27;
+
+ chan_addr = sys_addr - chan_offset;
+
+ /* remove channel interleave */
+ if (num_dcts_intlv == 2) {
+ if (intlv_addr == 0x4)
+ chan_addr = ((chan_addr >> 9) << 8) |
+ (chan_addr & 0xff);
+ else if (intlv_addr == 0x5)
+ chan_addr = ((chan_addr >> 10) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ return -EINVAL;
+
+ } else if (num_dcts_intlv == 4) {
+ if (intlv_addr == 0x4)
+ chan_addr = ((chan_addr >> 10) << 8) |
+ (chan_addr & 0xff);
+ else if (intlv_addr == 0x5)
+ chan_addr = ((chan_addr >> 11) << 9) |
+ (chan_addr & 0x1ff);
+ else
+ return -EINVAL;
+ }
+
+ if (dct_offset_en) {
+ amd64_read_pci_cfg(pvt->F1,
+ DRAM_CONT_HIGH_OFF + (int) channel * 4,
+ &tmp);
+ chan_addr += (u64) ((tmp >> 11) & 0xfff) << 27;
+ }
+
+ f15h_select_dct(pvt, channel);
+
+ edac_dbg(1, " Normalized DCT addr: 0x%llx\n", chan_addr);
+
+ /*
+ * Find Chip select:
+ * if channel = 3, then alias it to 1. This is because, in F15 M30h,
+ * there is support for 4 DCT's, but only 2 are currently functional.
+ * They are DCT0 and DCT3. But we have read all registers of DCT3 into
+ * pvt->csels[1]. So we need to use '1' here to get correct info.
+ * Refer F15 M30h BKDG Section 2.10 and 2.10.3 for clarifications.
+ */
+ alias_channel = (channel == 3) ? 1 : channel;
+
+ cs_found = f1x_lookup_addr_in_dct(chan_addr, node_id, alias_channel);
+
+ if (cs_found >= 0)
+ *chan_sel = alias_channel;
+
+ return cs_found;
+}
+
+static int f1x_translate_sysaddr_to_cs(struct amd64_pvt *pvt,
+ u64 sys_addr,
+ int *chan_sel)
{
int cs_found = -EINVAL;
unsigned range;
for (range = 0; range < DRAM_RANGES; range++) {
-
if (!dram_rw(pvt, range))
continue;
- if ((get_dram_base(pvt, range) <= sys_addr) &&
- (get_dram_limit(pvt, range) >= sys_addr)) {
+ if (pvt->fam == 0x15 && pvt->model >= 0x30)
+ cs_found = f15_m30h_match_to_this_node(pvt, range,
+ sys_addr,
+ chan_sel);
+ else if ((get_dram_base(pvt, range) <= sys_addr) &&
+ (get_dram_limit(pvt, range) >= sys_addr)) {
cs_found = f1x_match_to_this_node(pvt, range,
sys_addr, chan_sel);
if (cs_found >= 0)
@@ -1554,7 +1706,7 @@ static void amd64_debug_display_dimm_sizes(struct amd64_pvt *pvt, u8 ctrl)
u32 *dcsb = ctrl ? pvt->csels[1].csbases : pvt->csels[0].csbases;
u32 dbam = ctrl ? pvt->dbam1 : pvt->dbam0;
- if (boot_cpu_data.x86 == 0xf) {
+ if (pvt->fam == 0xf) {
/* K8 families < revF not supported yet */
if (pvt->ext_model < K8_REV_F)
return;
@@ -1624,6 +1776,17 @@ static struct amd64_family_type amd64_family_types[] = {
.read_dct_pci_cfg = f15_read_dct_pci_cfg,
}
},
+ [F15_M30H_CPUS] = {
+ .ctl_name = "F15h_M30h",
+ .f1_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F1,
+ .f3_id = PCI_DEVICE_ID_AMD_15H_M30H_NB_F3,
+ .ops = {
+ .early_channel_count = f1x_early_channel_count,
+ .map_sysaddr_to_csrow = f1x_map_sysaddr_to_csrow,
+ .dbam_to_cs = f16_dbam_to_chip_select,
+ .read_dct_pci_cfg = f15_read_dct_pci_cfg,
+ }
+ },
[F16_CPUS] = {
.ctl_name = "F16h",
.f1_id = PCI_DEVICE_ID_AMD_16H_NB_F1,
@@ -1860,7 +2023,7 @@ static inline void __amd64_decode_bus_error(struct mem_ctl_info *mci,
memset(&err, 0, sizeof(err));
- sys_addr = get_error_address(m);
+ sys_addr = get_error_address(pvt, m);
if (ecc_type == 2)
err.syndrome = extract_syndrome(m->status);
@@ -1921,10 +2084,9 @@ static void free_mc_sibling_devs(struct amd64_pvt *pvt)
*/
static void read_mc_regs(struct amd64_pvt *pvt)
{
- struct cpuinfo_x86 *c = &boot_cpu_data;
+ unsigned range;
u64 msr_val;
u32 tmp;
- unsigned range;
/*
* Retrieve TOP_MEM and TOP_MEM2; no masking off of reserved bits since
@@ -1985,14 +2147,14 @@ static void read_mc_regs(struct amd64_pvt *pvt)
pvt->ecc_sym_sz = 4;
- if (c->x86 >= 0x10) {
+ if (pvt->fam >= 0x10) {
amd64_read_pci_cfg(pvt->F3, EXT_NB_MCA_CFG, &tmp);
- if (c->x86 != 0x16)
+ if (pvt->fam != 0x16)
/* F16h has only DCT0 */
amd64_read_dct_pci_cfg(pvt, DBAM1, &pvt->dbam1);
/* F10h, revD and later can do x8 ECC too */
- if ((c->x86 > 0x10 || c->x86_model > 7) && tmp & BIT(25))
+ if ((pvt->fam > 0x10 || pvt->model > 7) && tmp & BIT(25))
pvt->ecc_sym_sz = 8;
}
dump_misc_regs(pvt);
@@ -2086,7 +2248,7 @@ static int init_csrows(struct mem_ctl_info *mci)
bool row_dct0 = !!csrow_enabled(i, 0, pvt);
bool row_dct1 = false;
- if (boot_cpu_data.x86 != 0xf)
+ if (pvt->fam != 0xf)
row_dct1 = !!csrow_enabled(i, 1, pvt);
if (!row_dct0 && !row_dct1)
@@ -2104,7 +2266,7 @@ static int init_csrows(struct mem_ctl_info *mci)
}
/* K8 has only one DCT */
- if (boot_cpu_data.x86 != 0xf && row_dct1) {
+ if (pvt->fam != 0xf && row_dct1) {
int row_dct1_pages = amd64_csrow_nr_pages(pvt, 1, i);
csrow->channels[1]->dimm->nr_pages = row_dct1_pages;
@@ -2333,13 +2495,14 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
int rc;
rc = amd64_create_sysfs_dbg_files(mci);
if (rc < 0)
return rc;
- if (boot_cpu_data.x86 >= 0x10) {
+ if (pvt->fam >= 0x10) {
rc = amd64_create_sysfs_inject_files(mci);
if (rc < 0)
return rc;
@@ -2350,9 +2513,11 @@ static int set_mc_sysfs_attrs(struct mem_ctl_info *mci)
static void del_mc_sysfs_attrs(struct mem_ctl_info *mci)
{
+ struct amd64_pvt *pvt = mci->pvt_info;
+
amd64_remove_sysfs_dbg_files(mci);
- if (boot_cpu_data.x86 >= 0x10)
+ if (pvt->fam >= 0x10)
amd64_remove_sysfs_inject_files(mci);
}
@@ -2387,10 +2552,14 @@ static void setup_mci_misc_attrs(struct mem_ctl_info *mci,
*/
static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
{
- u8 fam = boot_cpu_data.x86;
struct amd64_family_type *fam_type = NULL;
- switch (fam) {
+ pvt->ext_model = boot_cpu_data.x86_model >> 4;
+ pvt->stepping = boot_cpu_data.x86_mask;
+ pvt->model = boot_cpu_data.x86_model;
+ pvt->fam = boot_cpu_data.x86;
+
+ switch (pvt->fam) {
case 0xf:
fam_type = &amd64_family_types[K8_CPUS];
pvt->ops = &amd64_family_types[K8_CPUS].ops;
@@ -2402,6 +2571,12 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
break;
case 0x15:
+ if (pvt->model == 0x30) {
+ fam_type = &amd64_family_types[F15_M30H_CPUS];
+ pvt->ops = &amd64_family_types[F15_M30H_CPUS].ops;
+ break;
+ }
+
fam_type = &amd64_family_types[F15_CPUS];
pvt->ops = &amd64_family_types[F15_CPUS].ops;
break;
@@ -2416,10 +2591,8 @@ static struct amd64_family_type *amd64_per_family_init(struct amd64_pvt *pvt)
return NULL;
}
- pvt->ext_model = boot_cpu_data.x86_model >> 4;
-
amd64_info("%s %sdetected (node %d).\n", fam_type->ctl_name,
- (fam == 0xf ?
+ (pvt->fam == 0xf ?
(pvt->ext_model >= K8_REV_F ? "revF or later "
: "revE or earlier ")
: ""), pvt->mc_node_id);
@@ -2470,8 +2643,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
layers[0].size = pvt->csels[0].b_cnt;
layers[0].is_virt_csrow = true;
layers[1].type = EDAC_MC_LAYER_CHANNEL;
- layers[1].size = pvt->channel_count;
+
+ /*
+ * Always allocate two channels since we can have setups with DIMMs on
+ * only one channel. Also, this simplifies handling later for the price
+ * of a couple of KBs tops.
+ */
+ layers[1].size = 2;
layers[1].is_virt_csrow = false;
+
mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
if (!mci)
goto err_siblings;
@@ -2579,6 +2759,8 @@ static void amd64_remove_one_instance(struct pci_dev *pdev)
struct ecc_settings *s = ecc_stngs[nid];
mci = find_mci_by_dev(&pdev->dev);
+ WARN_ON(!mci);
+
del_mc_sysfs_attrs(mci);
/* Remove from EDAC CORE tracking list */
mci = edac_mc_del_mc(&pdev->dev);
@@ -2638,6 +2820,14 @@ static DEFINE_PCI_DEVICE_TABLE(amd64_pci_table) = {
},
{
.vendor = PCI_VENDOR_ID_AMD,
+ .device = PCI_DEVICE_ID_AMD_15H_M30H_NB_F2,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = 0,
+ .class_mask = 0,
+ },
+ {
+ .vendor = PCI_VENDOR_ID_AMD,
.device = PCI_DEVICE_ID_AMD_16H_NB_F2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 2c6f113bae2..d2443cfa069 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -170,6 +170,8 @@
/*
* PCI-defined configuration space registers
*/
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F1 0x141b
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F2 0x141c
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_16H_NB_F1 0x1531
@@ -181,13 +183,22 @@
#define DRAM_BASE_LO 0x40
#define DRAM_LIMIT_LO 0x44
-#define dram_intlv_en(pvt, i) ((u8)((pvt->ranges[i].base.lo >> 8) & 0x7))
+/*
+ * F15 M30h D18F1x2[1C:00]
+ */
+#define DRAM_CONT_BASE 0x200
+#define DRAM_CONT_LIMIT 0x204
+
+/*
+ * F15 M30h D18F1x2[4C:40]
+ */
+#define DRAM_CONT_HIGH_OFF 0x240
+
#define dram_rw(pvt, i) ((u8)(pvt->ranges[i].base.lo & 0x3))
#define dram_intlv_sel(pvt, i) ((u8)((pvt->ranges[i].lim.lo >> 8) & 0x7))
#define dram_dst_node(pvt, i) ((u8)(pvt->ranges[i].lim.lo & 0x7))
#define DHAR 0xf0
-#define dhar_valid(pvt) ((pvt)->dhar & BIT(0))
#define dhar_mem_hoist_valid(pvt) ((pvt)->dhar & BIT(1))
#define dhar_base(pvt) ((pvt)->dhar & 0xff000000)
#define k8_dhar_offset(pvt) (((pvt)->dhar & 0x0000ff00) << 16)
@@ -234,8 +245,6 @@
#define DDR3_MODE BIT(8)
#define DCT_SEL_LO 0x110
-#define dct_sel_baseaddr(pvt) ((pvt)->dct_sel_lo & 0xFFFFF800)
-#define dct_sel_interleave_addr(pvt) (((pvt)->dct_sel_lo >> 6) & 0x3)
#define dct_high_range_enabled(pvt) ((pvt)->dct_sel_lo & BIT(0))
#define dct_interleave_enabled(pvt) ((pvt)->dct_sel_lo & BIT(2))
@@ -297,6 +306,7 @@ enum amd_families {
K8_CPUS = 0,
F10_CPUS,
F15_CPUS,
+ F15_M30H_CPUS,
F16_CPUS,
NUM_FAMILIES,
};
@@ -337,6 +347,10 @@ struct amd64_pvt {
struct pci_dev *F1, *F2, *F3;
u16 mc_node_id; /* MC index of this MC node */
+ u8 fam; /* CPU family */
+ u8 model; /* ... model */
+ u8 stepping; /* ... stepping */
+
int ext_model; /* extended model value of this node */
int channel_count;
@@ -414,6 +428,14 @@ static inline u16 extract_syndrome(u64 status)
return ((status >> 47) & 0xff) | ((status >> 16) & 0xff00);
}
+static inline u8 dct_sel_interleave_addr(struct amd64_pvt *pvt)
+{
+ if (pvt->fam == 0x15 && pvt->model >= 0x30)
+ return (((pvt->dct_sel_hi >> 9) & 0x1) << 2) |
+ ((pvt->dct_sel_lo >> 6) & 0x3);
+
+ return ((pvt)->dct_sel_lo >> 6) & 0x3;
+}
/*
* per-node ECC settings descriptor
*/
@@ -504,3 +526,33 @@ static inline void enable_caches(void *dummy)
{
write_cr0(read_cr0() & ~X86_CR0_CD);
}
+
+static inline u8 dram_intlv_en(struct amd64_pvt *pvt, unsigned int i)
+{
+ if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ u32 tmp;
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_LIMIT, &tmp);
+ return (u8) tmp & 0xF;
+ }
+ return (u8) (pvt->ranges[i].base.lo >> 8) & 0x7;
+}
+
+static inline u8 dhar_valid(struct amd64_pvt *pvt)
+{
+ if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ u32 tmp;
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
+ return (tmp >> 1) & BIT(0);
+ }
+ return (pvt)->dhar & BIT(0);
+}
+
+static inline u32 dct_sel_baseaddr(struct amd64_pvt *pvt)
+{
+ if (pvt->fam == 0x15 && pvt->model >= 0x30) {
+ u32 tmp;
+ amd64_read_pci_cfg(pvt->F1, DRAM_CONT_BASE, &tmp);
+ return (tmp >> 11) & 0x1FFF;
+ }
+ return (pvt)->dct_sel_lo & 0xFFFFF800;
+}
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index 7f3c57113ba..df6575f1430 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -789,7 +789,7 @@ static struct cpc925_dev_info cpc925_devs[] = {
.exit = cpc925_htlink_exit,
.check = cpc925_htlink_check,
},
- {0}, /* Terminated by NULL */
+ { }
};
/*
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e7c32c4f783..9f7e0e60951 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -58,8 +58,10 @@ static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
if (!val)
return -EINVAL;
- ret = strict_strtol(val, 0, &l);
- if (ret == -EINVAL || ((int)l != l))
+ ret = kstrtol(val, 0, &l);
+ if (ret)
+ return ret;
+ if ((int)l != l)
return -EINVAL;
*((int *)kp->arg) = l;
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index aa44c1718f5..be10a74b16e 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -260,8 +260,7 @@ static void i3200_check(struct mem_ctl_info *mci)
i3200_process_error_info(mci, &info);
}
-
-void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
+static void __iomem *i3200_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index c9db24d95ca..1a4df82376b 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -248,8 +248,7 @@ static void x38_check(struct mem_ctl_info *mci)
x38_process_error_info(mci, &info);
}
-
-void __iomem *x38_map_mchbar(struct pci_dev *pdev)
+static void __iomem *x38_map_mchbar(struct pci_dev *pdev)
{
union {
u64 mchbar;
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index 63f454e2057..f1d54a3985b 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -14,6 +14,10 @@ if EXTCON
comment "Extcon Device Drivers"
+config OF_EXTCON
+ def_tristate y
+ depends on OF
+
config EXTCON_GPIO
tristate "GPIO extcon support"
depends on GPIOLIB
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 540e2c3a443..759fdae46f9 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -2,6 +2,8 @@
# Makefile for external connector class (extcon) devices
#
+obj-$(CONFIG_OF_EXTCON) += of_extcon.o
+
obj-$(CONFIG_EXTCON) += extcon-class.o
obj-$(CONFIG_EXTCON_GPIO) += extcon-gpio.o
obj-$(CONFIG_EXTCON_ADC_JACK) += extcon-adc-jack.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index d0233cd18ff..5985807e52c 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -87,7 +87,8 @@ static irqreturn_t adc_jack_irq_thread(int irq, void *_data)
{
struct adc_jack_data *data = _data;
- schedule_delayed_work(&data->handler, data->handling_delay);
+ queue_delayed_work(system_power_efficient_wq,
+ &data->handler, data->handling_delay);
return IRQ_HANDLED;
}
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index 7a1b4a7791b..e55713083c7 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -890,8 +890,9 @@ static void arizona_micd_detect(struct work_struct *work)
handled:
if (info->detecting)
- schedule_delayed_work(&info->micd_timeout_work,
- msecs_to_jiffies(info->micd_timeout));
+ queue_delayed_work(system_power_efficient_wq,
+ &info->micd_timeout_work,
+ msecs_to_jiffies(info->micd_timeout));
pm_runtime_mark_last_busy(info->dev);
mutex_unlock(&info->lock);
@@ -912,8 +913,9 @@ static irqreturn_t arizona_micdet(int irq, void *data)
mutex_unlock(&info->lock);
if (debounce)
- schedule_delayed_work(&info->micd_detect_work,
- msecs_to_jiffies(debounce));
+ queue_delayed_work(system_power_efficient_wq,
+ &info->micd_detect_work,
+ msecs_to_jiffies(debounce));
else
arizona_micd_detect(&info->micd_detect_work.work);
@@ -967,12 +969,14 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
if (val == info->last_jackdet) {
dev_dbg(arizona->dev, "Suppressing duplicate JACKDET\n");
if (cancelled_hp)
- schedule_delayed_work(&info->hpdet_work,
- msecs_to_jiffies(HPDET_DEBOUNCE));
+ queue_delayed_work(system_power_efficient_wq,
+ &info->hpdet_work,
+ msecs_to_jiffies(HPDET_DEBOUNCE));
if (cancelled_mic)
- schedule_delayed_work(&info->micd_timeout_work,
- msecs_to_jiffies(info->micd_timeout));
+ queue_delayed_work(system_power_efficient_wq,
+ &info->micd_timeout_work,
+ msecs_to_jiffies(info->micd_timeout));
goto out;
}
@@ -994,8 +998,9 @@ static irqreturn_t arizona_jackdet(int irq, void *data)
arizona_start_mic(info);
} else {
- schedule_delayed_work(&info->hpdet_work,
- msecs_to_jiffies(HPDET_DEBOUNCE));
+ queue_delayed_work(system_power_efficient_wq,
+ &info->hpdet_work,
+ msecs_to_jiffies(HPDET_DEBOUNCE));
}
regmap_update_bits(arizona->regmap,
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 18ccadef43f..148382faded 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -148,6 +148,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(state);
static ssize_t name_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -163,6 +164,7 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", dev_name(edev->dev));
}
+static DEVICE_ATTR_RO(name);
static ssize_t cable_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -527,11 +529,12 @@ int extcon_unregister_notifier(struct extcon_dev *edev,
}
EXPORT_SYMBOL_GPL(extcon_unregister_notifier);
-static struct device_attribute extcon_attrs[] = {
- __ATTR(state, S_IRUGO | S_IWUSR, state_show, state_store),
- __ATTR_RO(name),
- __ATTR_NULL,
+static struct attribute *extcon_attrs[] = {
+ &dev_attr_state.attr,
+ &dev_attr_name.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(extcon);
static int create_extcon_class(void)
{
@@ -539,7 +542,7 @@ static int create_extcon_class(void)
extcon_class = class_create(THIS_MODULE, "extcon");
if (IS_ERR(extcon_class))
return PTR_ERR(extcon_class);
- extcon_class->dev_attrs = extcon_attrs;
+ extcon_class->dev_groups = extcon_groups;
#if defined(CONFIG_ANDROID)
switch_class = class_compat_register("switch");
@@ -602,7 +605,8 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
edev->dev->class = extcon_class;
edev->dev->release = extcon_dev_release;
- dev_set_name(edev->dev, "%s", edev->name ? edev->name : dev_name(dev));
+ edev->name = edev->name ? edev->name : dev_name(dev);
+ dev_set_name(edev->dev, "%s", edev->name);
if (edev->max_supported) {
char buf[10];
diff --git a/drivers/extcon/extcon-gpio.c b/drivers/extcon/extcon-gpio.c
index 02bec32adde..f874c30ddbf 100644
--- a/drivers/extcon/extcon-gpio.c
+++ b/drivers/extcon/extcon-gpio.c
@@ -56,7 +56,7 @@ static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
{
struct gpio_extcon_data *extcon_data = dev_id;
- schedule_delayed_work(&extcon_data->work,
+ queue_delayed_work(system_power_efficient_wq, &extcon_data->work,
extcon_data->debounce_jiffies);
return IRQ_HANDLED;
}
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index b752a0ad7b6..89fdd05c5fd 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -57,6 +57,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (palmas_usb->linkstat != PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_VBUS;
extcon_set_cable_state(&palmas_usb->edev, "USB", true);
+ dev_info(palmas_usb->dev, "USB cable is attached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious connect event detected\n");
@@ -65,6 +66,7 @@ static irqreturn_t palmas_vbus_irq_handler(int irq, void *_palmas_usb)
if (palmas_usb->linkstat == PALMAS_USB_STATE_VBUS) {
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_cable_state(&palmas_usb->edev, "USB", false);
+ dev_info(palmas_usb->dev, "USB cable is detached\n");
} else {
dev_dbg(palmas_usb->dev,
"Spurious disconnect event detected\n");
@@ -84,28 +86,23 @@ static irqreturn_t palmas_id_irq_handler(int irq, void *_palmas_usb)
if (set & PALMAS_USB_ID_INT_SRC_ID_GND) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_INT_EN_HI_SET,
- PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_INT_EN_HI_CLR,
- PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_GND);
palmas_usb->linkstat = PALMAS_USB_STATE_ID;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", true);
+ dev_info(palmas_usb->dev, "USB-HOST cable is attached\n");
} else if (set & PALMAS_USB_ID_INT_SRC_ID_FLOAT) {
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_INT_EN_HI_SET,
- PALMAS_USB_ID_INT_EN_HI_SET_ID_GND);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
- PALMAS_USB_ID_INT_EN_HI_CLR,
- PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
- palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_LATCH_CLR,
PALMAS_USB_ID_INT_EN_HI_CLR_ID_FLOAT);
palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
+ dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
+ } else if ((palmas_usb->linkstat == PALMAS_USB_STATE_ID) &&
+ (!(set & PALMAS_USB_ID_INT_SRC_ID_GND))) {
+ palmas_usb->linkstat = PALMAS_USB_STATE_DISCONNECT;
+ extcon_set_cable_state(&palmas_usb->edev, "USB-HOST", false);
+ dev_info(palmas_usb->dev, "USB-HOST cable is detached\n");
}
return IRQ_HANDLED;
@@ -122,13 +119,17 @@ static void palmas_enable_irq(struct palmas_usb *palmas_usb)
palmas_write(palmas_usb->palmas, PALMAS_USB_OTG_BASE,
PALMAS_USB_ID_INT_EN_HI_SET,
- PALMAS_USB_ID_INT_EN_HI_SET_ID_GND);
+ PALMAS_USB_ID_INT_EN_HI_SET_ID_GND |
+ PALMAS_USB_ID_INT_EN_HI_SET_ID_FLOAT);
- palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
+ if (palmas_usb->enable_vbus_detection)
+ palmas_vbus_irq_handler(palmas_usb->vbus_irq, palmas_usb);
/* cold plug for host mode needs this delay */
- msleep(30);
- palmas_id_irq_handler(palmas_usb->id_irq, palmas_usb);
+ if (palmas_usb->enable_id_detection) {
+ msleep(30);
+ palmas_id_irq_handler(palmas_usb->id_irq, palmas_usb);
+ }
}
static int palmas_usb_probe(struct platform_device *pdev)
@@ -139,21 +140,25 @@ static int palmas_usb_probe(struct platform_device *pdev)
struct palmas_usb *palmas_usb;
int status;
- if (node && !pdata) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-
- if (!pdata)
- return -ENOMEM;
-
- pdata->wakeup = of_property_read_bool(node, "ti,wakeup");
- } else if (!pdata) {
- return -EINVAL;
- }
-
palmas_usb = devm_kzalloc(&pdev->dev, sizeof(*palmas_usb), GFP_KERNEL);
if (!palmas_usb)
return -ENOMEM;
+ if (node && !pdata) {
+ palmas_usb->wakeup = of_property_read_bool(node, "ti,wakeup");
+ palmas_usb->enable_id_detection = of_property_read_bool(node,
+ "ti,enable-id-detection");
+ palmas_usb->enable_vbus_detection = of_property_read_bool(node,
+ "ti,enable-vbus-detection");
+ } else {
+ palmas_usb->wakeup = true;
+ palmas_usb->enable_id_detection = true;
+ palmas_usb->enable_vbus_detection = true;
+
+ if (pdata)
+ palmas_usb->wakeup = pdata->wakeup;
+ }
+
palmas->usb = palmas_usb;
palmas_usb->palmas = palmas;
@@ -168,11 +173,10 @@ static int palmas_usb_probe(struct platform_device *pdev)
palmas_usb->vbus_irq = regmap_irq_get_virq(palmas->irq_data,
PALMAS_VBUS_IRQ);
- palmas_usb_wakeup(palmas, pdata->wakeup);
+ palmas_usb_wakeup(palmas, palmas_usb->wakeup);
platform_set_drvdata(pdev, palmas_usb);
- palmas_usb->edev.name = "palmas-usb";
palmas_usb->edev.supported_cable = palmas_extcon_cable;
palmas_usb->edev.mutually_exclusive = mutually_exclusive;
@@ -182,28 +186,36 @@ static int palmas_usb_probe(struct platform_device *pdev)
return status;
}
- status = devm_request_threaded_irq(palmas_usb->dev, palmas_usb->id_irq,
- NULL, palmas_id_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "palmas_usb_id", palmas_usb);
- if (status < 0) {
- dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
+ if (palmas_usb->enable_id_detection) {
+ status = devm_request_threaded_irq(palmas_usb->dev,
+ palmas_usb->id_irq,
+ NULL, palmas_id_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ "palmas_usb_id", palmas_usb);
+ if (status < 0) {
+ dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->id_irq, status);
- goto fail_extcon;
+ goto fail_extcon;
+ }
}
- status = devm_request_threaded_irq(palmas_usb->dev,
- palmas_usb->vbus_irq, NULL, palmas_vbus_irq_handler,
- IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
- "palmas_usb_vbus", palmas_usb);
- if (status < 0) {
- dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
+ if (palmas_usb->enable_vbus_detection) {
+ status = devm_request_threaded_irq(palmas_usb->dev,
+ palmas_usb->vbus_irq, NULL,
+ palmas_vbus_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT | IRQF_EARLY_RESUME,
+ "palmas_usb_vbus", palmas_usb);
+ if (status < 0) {
+ dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
palmas_usb->vbus_irq, status);
- goto fail_extcon;
+ goto fail_extcon;
+ }
}
palmas_enable_irq(palmas_usb);
-
+ device_set_wakeup_capable(&pdev->dev, true);
return 0;
fail_extcon:
@@ -221,6 +233,39 @@ static int palmas_usb_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int palmas_usb_suspend(struct device *dev)
+{
+ struct palmas_usb *palmas_usb = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (palmas_usb->enable_vbus_detection)
+ enable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_id_detection)
+ enable_irq_wake(palmas_usb->id_irq);
+ }
+ return 0;
+}
+
+static int palmas_usb_resume(struct device *dev)
+{
+ struct palmas_usb *palmas_usb = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (palmas_usb->enable_vbus_detection)
+ disable_irq_wake(palmas_usb->vbus_irq);
+ if (palmas_usb->enable_id_detection)
+ disable_irq_wake(palmas_usb->id_irq);
+ }
+ return 0;
+};
+#endif
+
+static const struct dev_pm_ops palmas_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(palmas_usb_suspend,
+ palmas_usb_resume)
+};
+
static struct of_device_id of_palmas_match_tbl[] = {
{ .compatible = "ti,palmas-usb", },
{ .compatible = "ti,twl6035-usb", },
@@ -234,6 +279,7 @@ static struct platform_driver palmas_usb_driver = {
.name = "palmas-usb",
.of_match_table = of_palmas_match_tbl,
.owner = THIS_MODULE,
+ .pm = &palmas_pm_ops,
},
};
diff --git a/drivers/extcon/of_extcon.c b/drivers/extcon/of_extcon.c
new file mode 100644
index 00000000000..72173ecbb31
--- /dev/null
+++ b/drivers/extcon/of_extcon.c
@@ -0,0 +1,64 @@
+/*
+ * OF helpers for External connector (extcon) framework
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ * Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/extcon.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/extcon/of_extcon.h>
+
+/*
+ * of_extcon_get_extcon_dev - Get the name of extcon device from devicetree
+ * @dev - instance to the given device
+ * @index - index into list of extcon_dev
+ *
+ * return the instance of extcon device
+ */
+struct extcon_dev *of_extcon_get_extcon_dev(struct device *dev, int index)
+{
+ struct device_node *node;
+ struct extcon_dev *edev;
+ struct platform_device *extcon_parent_dev;
+
+ if (!dev->of_node) {
+ dev_dbg(dev, "device does not have a device node entry\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ node = of_parse_phandle(dev->of_node, "extcon", index);
+ if (!node) {
+ dev_dbg(dev, "failed to get phandle in %s node\n",
+ dev->of_node->full_name);
+ return ERR_PTR(-ENODEV);
+ }
+
+ extcon_parent_dev = of_find_device_by_node(node);
+ if (!extcon_parent_dev) {
+ dev_dbg(dev, "unable to find device by node\n");
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ edev = extcon_get_extcon_dev(dev_name(&extcon_parent_dev->dev));
+ if (!edev) {
+ dev_dbg(dev, "unable to get extcon device : %s\n",
+ dev_name(&extcon_parent_dev->dev));
+ return ERR_PTR(-ENODEV);
+ }
+
+ return edev;
+}
+EXPORT_SYMBOL_GPL(of_extcon_get_extcon_dev);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 7ef316fdc4d..ac1b43a0428 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -54,6 +54,7 @@
#define FW_CDEV_KERNEL_VERSION 5
#define FW_CDEV_VERSION_EVENT_REQUEST2 4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END 4
+#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW 5
struct client {
u32 version;
@@ -1005,6 +1006,8 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel, a->speed, a->header_size, cb, client);
if (IS_ERR(context))
return PTR_ERR(context);
+ if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
+ context->drop_overflow_headers = true;
/* We only support one context at this time. */
spin_lock_irq(&client->lock);
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9e1db6490b9..afb701ec90c 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -2749,8 +2749,11 @@ static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
{
u32 *ctx_hdr;
- if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
+ if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return;
flush_iso_completions(ctx);
+ }
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
@@ -2910,8 +2913,11 @@ static int handle_it_packet(struct context *context,
sync_it_packet_for_cpu(context, d);
- if (ctx->header_length + 4 > PAGE_SIZE)
+ if (ctx->header_length + 4 > PAGE_SIZE) {
+ if (ctx->base.drop_overflow_headers)
+ return 1;
flush_iso_completions(ctx);
+ }
ctx_hdr = ctx->header + ctx->header_length;
ctx->last_timestamp = le16_to_cpu(last->res_count);
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c
index 8e77c02edb2..ff080ee2019 100644
--- a/drivers/firmware/dcdbas.c
+++ b/drivers/firmware/dcdbas.c
@@ -535,11 +535,12 @@ static struct attribute *dcdbas_dev_attrs[] = {
static struct attribute_group dcdbas_attr_group = {
.attrs = dcdbas_dev_attrs,
+ .bin_attrs = dcdbas_bin_attrs,
};
static int dcdbas_probe(struct platform_device *dev)
{
- int i, error;
+ int error;
host_control_action = HC_ACTION_NONE;
host_control_smi_type = HC_SMITYPE_NONE;
@@ -555,18 +556,6 @@ static int dcdbas_probe(struct platform_device *dev)
if (error)
return error;
- for (i = 0; dcdbas_bin_attrs[i]; i++) {
- error = sysfs_create_bin_file(&dev->dev.kobj,
- dcdbas_bin_attrs[i]);
- if (error) {
- while (--i >= 0)
- sysfs_remove_bin_file(&dev->dev.kobj,
- dcdbas_bin_attrs[i]);
- sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
- return error;
- }
- }
-
register_reboot_notifier(&dcdbas_reboot_nb);
dev_info(&dev->dev, "%s (version %s)\n",
@@ -577,11 +566,7 @@ static int dcdbas_probe(struct platform_device *dev)
static int dcdbas_remove(struct platform_device *dev)
{
- int i;
-
unregister_reboot_notifier(&dcdbas_reboot_nb);
- for (i = 0; dcdbas_bin_attrs[i]; i++)
- sysfs_remove_bin_file(&dev->dev.kobj, dcdbas_bin_attrs[i]);
sysfs_remove_group(&dev->dev.kobj, &dcdbas_attr_group);
return 0;
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index eb760a218da..232fa8fce26 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -419,6 +419,13 @@ static void __init dmi_format_ids(char *buf, size_t len)
dmi_get_system_info(DMI_BIOS_DATE));
}
+/*
+ * Check for DMI/SMBIOS headers in the system firmware image. Any
+ * SMBIOS header must start 16 bytes before the DMI header, so take a
+ * 32 byte buffer and check for DMI at offset 16 and SMBIOS at offset
+ * 0. If the DMI header is present, set dmi_ver accordingly (SMBIOS
+ * takes precedence) and return 0. Otherwise return 1.
+ */
static int __init dmi_present(const u8 *buf)
{
int smbios_ver;
@@ -506,6 +513,13 @@ void __init dmi_scan_machine(void)
if (p == NULL)
goto error;
+ /*
+ * Iterate over all possible DMI header addresses q.
+ * Maintain the 32 bytes around q in buf. On the
+ * first iteration, substitute zero for the
+ * out-of-range bytes so there is no chance of falsely
+ * detecting an SMBIOS header.
+ */
memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 73de5a9c224..5002d50e378 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -35,6 +35,7 @@ struct pstore_read_data {
enum pstore_type_id *type;
int *count;
struct timespec *timespec;
+ bool *compressed;
char **buf;
};
@@ -42,7 +43,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
{
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
struct pstore_read_data *cb_data = data;
- char name[DUMP_NAME_LEN];
+ char name[DUMP_NAME_LEN], data_type;
int i;
int cnt;
unsigned int part;
@@ -54,12 +55,23 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
for (i = 0; i < DUMP_NAME_LEN; i++)
name[i] = entry->var.VariableName[i];
- if (sscanf(name, "dump-type%u-%u-%d-%lu",
+ if (sscanf(name, "dump-type%u-%u-%d-%lu-%c",
+ cb_data->type, &part, &cnt, &time, &data_type) == 5) {
+ *cb_data->id = part;
+ *cb_data->count = cnt;
+ cb_data->timespec->tv_sec = time;
+ cb_data->timespec->tv_nsec = 0;
+ if (data_type == 'C')
+ *cb_data->compressed = true;
+ else
+ *cb_data->compressed = false;
+ } else if (sscanf(name, "dump-type%u-%u-%d-%lu",
cb_data->type, &part, &cnt, &time) == 4) {
*cb_data->id = part;
*cb_data->count = cnt;
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
+ *cb_data->compressed = false;
} else if (sscanf(name, "dump-type%u-%u-%lu",
cb_data->type, &part, &time) == 3) {
/*
@@ -71,6 +83,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
*cb_data->count = 0;
cb_data->timespec->tv_sec = time;
cb_data->timespec->tv_nsec = 0;
+ *cb_data->compressed = false;
} else
return 0;
@@ -87,7 +100,8 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *timespec,
- char **buf, struct pstore_info *psi)
+ char **buf, bool *compressed,
+ struct pstore_info *psi)
{
struct pstore_read_data data;
@@ -95,6 +109,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
data.type = type;
data.count = count;
data.timespec = timespec;
+ data.compressed = compressed;
data.buf = buf;
return __efivar_entry_iter(efi_pstore_read_func, &efivar_sysfs_list, &data,
@@ -103,7 +118,7 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
static int efi_pstore_write(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, size_t hsize, size_t size,
+ unsigned int part, int count, bool compressed, size_t size,
struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
@@ -111,8 +126,8 @@ static int efi_pstore_write(enum pstore_type_id type,
efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
int i, ret = 0;
- sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count,
- get_seconds());
+ sprintf(name, "dump-type%u-%u-%d-%lu-%c", type, part, count,
+ get_seconds(), compressed ? 'C' : 'D');
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name[i] = name[i];
diff --git a/drivers/fmc/fmc-chardev.c b/drivers/fmc/fmc-chardev.c
index cc031db2d2a..ace6ef24d15 100644
--- a/drivers/fmc/fmc-chardev.c
+++ b/drivers/fmc/fmc-chardev.c
@@ -143,18 +143,17 @@ static int fc_probe(struct fmc_device *fmc)
fc->misc.fops = &fc_fops;
fc->misc.name = kstrdup(dev_name(&fmc->dev), GFP_KERNEL);
- spin_lock(&fc_lock);
ret = misc_register(&fc->misc);
if (ret < 0)
- goto err_unlock;
+ goto out;
+ spin_lock(&fc_lock);
list_add(&fc->list, &fc_devices);
spin_unlock(&fc_lock);
dev_info(&fc->fmc->dev, "Created misc device \"%s\"\n",
fc->misc.name);
return 0;
-err_unlock:
- spin_unlock(&fc_lock);
+out:
kfree(fc->misc.name);
kfree(fc);
return ret;
@@ -174,10 +173,10 @@ static int fc_remove(struct fmc_device *fmc)
spin_lock(&fc_lock);
list_del(&fc->list);
+ spin_unlock(&fc_lock);
misc_deregister(&fc->misc);
kfree(fc->misc.name);
kfree(fc);
- spin_unlock(&fc_lock);
return 0;
}
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index 2cc680dd604..ee5b4790413 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -103,7 +103,7 @@ static int fwe_run(struct fmc_device *fmc, const struct firmware *fw, char *s)
* difficult to know in advance when probing the first card if others
* are there.
*/
-int fwe_probe(struct fmc_device *fmc)
+static int fwe_probe(struct fmc_device *fmc)
{
int err, index = 0;
const struct firmware *fw;
@@ -144,7 +144,7 @@ int fwe_probe(struct fmc_device *fmc)
return 0;
}
-int fwe_remove(struct fmc_device *fmc)
+static int fwe_remove(struct fmc_device *fmc)
{
return 0;
}
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index e3ceaacde45..73b73969d36 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/platform_device.h>
+#include <linux/err.h>
#include <mach/msm_gpiomux.h>
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index c57244ef428..dfeb3a3a8f2 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1037,18 +1037,6 @@ omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}
-#if defined(CONFIG_OF_GPIO)
-static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
-{
- return chip->of_node != NULL;
-}
-#else
-static inline bool omap_gpio_chip_boot_dt(struct gpio_chip *chip)
-{
- return false;
-}
-#endif
-
static void omap_gpio_chip_init(struct gpio_bank *bank)
{
int j;
@@ -1080,68 +1068,24 @@ static void omap_gpio_chip_init(struct gpio_bank *bank)
gpiochip_add(&bank->chip);
- /*
- * REVISIT these explicit calls to irq_create_mapping()
- * to do the GPIO to IRQ domain mapping for each GPIO in
- * the bank can be removed once all OMAP platforms have
- * been migrated to Device Tree boot only.
- * Since in DT boot irq_create_mapping() is called from
- * irq_create_of_mapping() only for the GPIO lines that
- * are used as interrupts.
- */
- if (!omap_gpio_chip_boot_dt(&bank->chip))
- for (j = 0; j < bank->width; j++)
- irq_create_mapping(bank->domain, j);
+ for (j = 0; j < bank->width; j++) {
+ int irq = irq_create_mapping(bank->domain, j);
+ irq_set_lockdep_class(irq, &gpio_lock_class);
+ irq_set_chip_data(irq, bank);
+ if (bank->is_mpuio) {
+ omap_mpuio_alloc_gc(bank, irq, bank->width);
+ } else {
+ irq_set_chip_and_handler(irq, &gpio_irq_chip,
+ handle_simple_irq);
+ set_irq_flags(irq, IRQF_VALID);
+ }
+ }
irq_set_chained_handler(bank->irq, gpio_irq_handler);
irq_set_handler_data(bank->irq, bank);
}
static const struct of_device_id omap_gpio_match[];
-static int omap_gpio_irq_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hwirq)
-{
- struct gpio_bank *bank = d->host_data;
- int gpio;
- int ret;
-
- if (!bank)
- return -EINVAL;
-
- irq_set_lockdep_class(virq, &gpio_lock_class);
- irq_set_chip_data(virq, bank);
- if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, virq, bank->width);
- } else {
- irq_set_chip_and_handler(virq, &gpio_irq_chip,
- handle_simple_irq);
- set_irq_flags(virq, IRQF_VALID);
- }
-
- /*
- * REVISIT most GPIO IRQ chip drivers need to call
- * gpio_request() before a GPIO line can be used as an
- * IRQ. Ideally this should be handled by the IRQ core
- * but until then this has to be done on a per driver
- * basis. Remove this once this is managed by the core.
- */
- if (omap_gpio_chip_boot_dt(&bank->chip)) {
- gpio = irq_to_gpio(bank, hwirq);
- ret = gpio_request_one(gpio, GPIOF_IN, NULL);
- if (ret) {
- dev_err(bank->dev, "Could not request GPIO%d\n", gpio);
- return ret;
- }
- }
-
- return 0;
-}
-
-static struct irq_domain_ops omap_gpio_irq_ops = {
- .xlate = irq_domain_xlate_onetwocell,
- .map = omap_gpio_irq_map,
-};
-
static int omap_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1207,10 +1151,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
}
bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
- 0, &omap_gpio_irq_ops, bank);
+ 0, &irq_domain_simple_ops, NULL);
#else
bank->domain = irq_domain_add_linear(node, bank->width,
- &omap_gpio_irq_ops, bank);
+ &irq_domain_simple_ops, NULL);
#endif
if (!bank->domain) {
dev_err(dev, "Couldn't register an IRQ domain\n");
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index a7c54c84329..955555d6ec8 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU && HAS_DMA
select HDMI
select I2C
select I2C_ALGOBIT
@@ -168,6 +168,17 @@ config DRM_I915_KMS
the driver to bind to PCI devices, which precludes loading things
like intelfb.
+config DRM_I915_PRELIMINARY_HW_SUPPORT
+ bool "Enable preliminary support for prerelease Intel hardware by default"
+ depends on DRM_I915
+ help
+ Choose this option if you have prerelease Intel hardware and want the
+ i915 driver to support it by default. You can enable such support at
+ runtime with the module option i915.preliminary_hw_support=1; this
+ option changes the default for that module option.
+
+ If in doubt, say "N".
+
config DRM_MGA
tristate "Matrox g200/g400"
depends on DRM && PCI
@@ -223,3 +234,5 @@ source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
+
+source "drivers/gpu/drm/msm/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 801bcafa302..f089adfe70e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -7,13 +7,13 @@ ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
- drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
+ drm_lock.o drm_memory.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o drm_pci.o \
drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_prime.o \
- drm_rect.o
+ drm_rect.o drm_vma_manager.o drm_flip_work.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -54,4 +54,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
+obj-$(CONFIG_DRM_MSM) += msm/
obj-y += i2c/
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index df0d0a08097..32e270dc714 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -190,7 +190,6 @@ static const struct file_operations ast_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = ast_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -198,7 +197,7 @@ static const struct file_operations ast_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_MTRR | DRIVER_MODESET | DRIVER_GEM,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
.dev_priv_size = 0,
.load = ast_driver_load,
@@ -216,7 +215,7 @@ static struct drm_driver driver = {
.gem_free_object = ast_gem_free_object,
.dumb_create = ast_dumb_create,
.dumb_map_offset = ast_dumb_mmap_offset,
- .dumb_destroy = ast_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 622d4ae7eb9..796dbb212a4 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -322,9 +322,6 @@ ast_bo(struct ttm_buffer_object *bo)
extern int ast_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
extern int ast_gem_init_object(struct drm_gem_object *obj);
extern void ast_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f60fd7bd118..7f6152d374c 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -449,13 +449,6 @@ int ast_dumb_create(struct drm_file *file,
return 0;
}
-int ast_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int ast_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -487,7 +480,7 @@ void ast_gem_free_object(struct drm_gem_object *obj)
static inline u64 ast_bo_mmap_offset(struct ast_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
ast_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 98d670825a1..32aecb34dbc 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -148,7 +148,9 @@ ast_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int ast_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct ast_bo *astbo = ast_bo(bo);
+
+ return drm_vma_node_verify_access(&astbo->gem.vma_node, filp);
}
static int ast_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,8 +323,8 @@ int ast_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- astbo->gem.driver_private = NULL;
astbo->bo.bdev = &ast->ttm.bdev;
+ astbo->bo.bdev->dev_mapping = dev->dev_mapping;
ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 8ecb601152e..138364d9178 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -85,10 +85,9 @@ static const struct file_operations cirrus_driver_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
- .fasync = drm_fasync,
};
static struct drm_driver driver = {
- .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = cirrus_driver_load,
.unload = cirrus_driver_unload,
.fops = &cirrus_driver_fops,
@@ -102,7 +101,7 @@ static struct drm_driver driver = {
.gem_free_object = cirrus_gem_free_object,
.dumb_create = cirrus_dumb_create,
.dumb_map_offset = cirrus_dumb_mmap_offset,
- .dumb_destroy = cirrus_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver cirrus_pci_driver = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index bae55609e6c..9b0bb9184af 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -203,9 +203,6 @@ int cirrus_gem_create(struct drm_device *dev,
int cirrus_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
int cirrus_framebuffer_init(struct drm_device *dev,
struct cirrus_framebuffer *gfb,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 35cbae82777..f130a533a51 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -255,13 +255,6 @@ int cirrus_dumb_create(struct drm_file *file,
return 0;
}
-int cirrus_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int cirrus_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -294,7 +287,7 @@ void cirrus_gem_free_object(struct drm_gem_object *obj)
static inline u64 cirrus_bo_mmap_offset(struct cirrus_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 0047012045c..75becdeac07 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -148,7 +148,9 @@ cirrus_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int cirrus_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct cirrus_bo *cirrusbo = cirrus_bo(bo);
+
+ return drm_vma_node_verify_access(&cirrusbo->gem.vma_node, filp);
}
static int cirrus_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -326,8 +328,8 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- cirrusbo->gem.driver_private = NULL;
cirrusbo->bo.bdev = &cirrus->ttm.bdev;
+ cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping;
cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 3d8fed17979..e301d653d97 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -424,6 +424,57 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
}
/**
+ * drm_agp_clear - Clear AGP resource list
+ * @dev: DRM device
+ *
+ * Iterate over all AGP resources and remove them. But keep the AGP head
+ * intact so it can still be used. It is safe to call this if AGP is disabled or
+ * was already removed.
+ *
+ * If DRIVER_MODESET is active, nothing is done to protect the modesetting
+ * resources from getting destroyed. Drivers are responsible of cleaning them up
+ * during device shutdown.
+ */
+void drm_agp_clear(struct drm_device *dev)
+{
+ struct drm_agp_mem *entry, *tempe;
+
+ if (!drm_core_has_AGP(dev) || !dev->agp)
+ return;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
+ if (entry->bound)
+ drm_unbind_agp(entry->memory);
+ drm_free_agp(entry->memory, entry->pages);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&dev->agp->memory);
+
+ if (dev->agp->acquired)
+ drm_agp_release(dev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+}
+
+/**
+ * drm_agp_destroy - Destroy AGP head
+ * @dev: DRM device
+ *
+ * Destroy resources that were previously allocated via drm_agp_initp. Caller
+ * must ensure to clean up all AGP resources before calling this. See
+ * drm_agp_clear().
+ *
+ * Call this to destroy AGP heads allocated via drm_agp_init().
+ */
+void drm_agp_destroy(struct drm_agp_head *agp)
+{
+ kfree(agp);
+}
+
+/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 5a4dbb410b7..471e051d295 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -207,12 +207,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return 0;
}
- if (drm_core_has_MTRR(dev)) {
- if (map->type == _DRM_FRAME_BUFFER ||
- (map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr =
- arch_phys_wc_add(map->offset, map->size);
- }
+ if (map->type == _DRM_FRAME_BUFFER ||
+ (map->flags & _DRM_WRITE_COMBINING)) {
+ map->mtrr =
+ arch_phys_wc_add(map->offset, map->size);
}
if (map->type == _DRM_REGISTERS) {
if (map->flags & _DRM_WRITE_COMBINING)
@@ -243,7 +241,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
}
map->handle = vmalloc_user(map->size);
DRM_DEBUG("%lu %d %p\n",
- map->size, drm_order(map->size), map->handle);
+ map->size, order_base_2(map->size), map->handle);
if (!map->handle) {
kfree(map);
return -ENOMEM;
@@ -464,8 +462,7 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
iounmap(map->handle);
/* FALLTHROUGH */
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev))
- arch_phys_wc_del(map->mtrr);
+ arch_phys_wc_del(map->mtrr);
break;
case _DRM_SHM:
vfree(map->handle);
@@ -630,7 +627,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -800,7 +797,7 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
@@ -1002,7 +999,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EPERM;
count = request->count;
- order = drm_order(request->size);
+ order = order_base_2(request->size);
size = 1 << order;
alignment = (request->flags & _DRM_PAGE_ALIGN)
@@ -1130,161 +1127,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return 0;
}
-static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
-{
- struct drm_device_dma *dma = dev->dma;
- struct drm_buf_entry *entry;
- struct drm_buf *buf;
- unsigned long offset;
- unsigned long agp_offset;
- int count;
- int order;
- int size;
- int alignment;
- int page_order;
- int total;
- int byte_count;
- int i;
- struct drm_buf **temp_buflist;
-
- if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
- return -EINVAL;
-
- if (!dma)
- return -EINVAL;
-
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- count = request->count;
- order = drm_order(request->size);
- size = 1 << order;
-
- alignment = (request->flags & _DRM_PAGE_ALIGN)
- ? PAGE_ALIGN(size) : size;
- page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
- total = PAGE_SIZE << page_order;
-
- byte_count = 0;
- agp_offset = request->agp_start;
-
- DRM_DEBUG("count: %d\n", count);
- DRM_DEBUG("order: %d\n", order);
- DRM_DEBUG("size: %d\n", size);
- DRM_DEBUG("agp_offset: %lu\n", agp_offset);
- DRM_DEBUG("alignment: %d\n", alignment);
- DRM_DEBUG("page_order: %d\n", page_order);
- DRM_DEBUG("total: %d\n", total);
-
- if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return -EINVAL;
-
- spin_lock(&dev->count_lock);
- if (dev->buf_use) {
- spin_unlock(&dev->count_lock);
- return -EBUSY;
- }
- atomic_inc(&dev->buf_alloc);
- spin_unlock(&dev->count_lock);
-
- mutex_lock(&dev->struct_mutex);
- entry = &dma->bufs[order];
- if (entry->buf_count) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM; /* May only call once for each order */
- }
-
- if (count < 0 || count > 4096) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -EINVAL;
- }
-
- entry->buflist = kzalloc(count * sizeof(*entry->buflist),
- GFP_KERNEL);
- if (!entry->buflist) {
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- entry->buf_size = size;
- entry->page_order = page_order;
-
- offset = 0;
-
- while (entry->buf_count < count) {
- buf = &entry->buflist[entry->buf_count];
- buf->idx = dma->buf_count + entry->buf_count;
- buf->total = alignment;
- buf->order = order;
- buf->used = 0;
-
- buf->offset = (dma->byte_count + offset);
- buf->bus_address = agp_offset + offset;
- buf->address = (void *)(agp_offset + offset);
- buf->next = NULL;
- buf->waiting = 0;
- buf->pending = 0;
- buf->file_priv = NULL;
-
- buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
- if (!buf->dev_private) {
- /* Set count correctly so we free the proper amount. */
- entry->buf_count = count;
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
-
- DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
-
- offset += alignment;
- entry->buf_count++;
- byte_count += PAGE_SIZE << page_order;
- }
-
- DRM_DEBUG("byte_count: %d\n", byte_count);
-
- temp_buflist = krealloc(dma->buflist,
- (dma->buf_count + entry->buf_count) *
- sizeof(*dma->buflist), GFP_KERNEL);
- if (!temp_buflist) {
- /* Free the entry because it isn't valid */
- drm_cleanup_buf_error(dev, entry);
- mutex_unlock(&dev->struct_mutex);
- atomic_dec(&dev->buf_alloc);
- return -ENOMEM;
- }
- dma->buflist = temp_buflist;
-
- for (i = 0; i < entry->buf_count; i++) {
- dma->buflist[i + dma->buf_count] = &entry->buflist[i];
- }
-
- dma->buf_count += entry->buf_count;
- dma->seg_count += entry->seg_count;
- dma->page_count += byte_count >> PAGE_SHIFT;
- dma->byte_count += byte_count;
-
- DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
- DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
-
- mutex_unlock(&dev->struct_mutex);
-
- request->count = entry->buf_count;
- request->size = size;
-
- dma->flags = _DRM_DMA_USE_FB;
-
- atomic_dec(&dev->buf_alloc);
- return 0;
-}
-
-
/**
* Add buffers for DMA transfers (ioctl).
*
@@ -1305,6 +1147,9 @@ int drm_addbufs(struct drm_device *dev, void *data,
struct drm_buf_desc *request = data;
int ret;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1316,7 +1161,7 @@ int drm_addbufs(struct drm_device *dev, void *data,
if (request->flags & _DRM_SG_BUFFER)
ret = drm_addbufs_sg(dev, request);
else if (request->flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, request);
+ ret = -EINVAL;
else
ret = drm_addbufs_pci(dev, request);
@@ -1348,6 +1193,9 @@ int drm_infobufs(struct drm_device *dev, void *data,
int i;
int count;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1427,6 +1275,9 @@ int drm_markbufs(struct drm_device *dev, void *data,
int order;
struct drm_buf_entry *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1435,7 +1286,7 @@ int drm_markbufs(struct drm_device *dev, void *data,
DRM_DEBUG("%d, %d, %d\n",
request->size, request->low_mark, request->high_mark);
- order = drm_order(request->size);
+ order = order_base_2(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
@@ -1472,6 +1323,9 @@ int drm_freebufs(struct drm_device *dev, void *data,
int idx;
struct drm_buf *buf;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1524,6 +1378,9 @@ int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_buf_map *request = data;
int i;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1541,9 +1398,7 @@ int drm_mapbufs(struct drm_device *dev, void *data,
if (request->count >= dma->buf_count) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
- && (dma->flags & _DRM_DMA_USE_SG))
- || (drm_core_check_feature(dev, DRIVER_FB_DMA)
- && (dma->flags & _DRM_DMA_USE_FB))) {
+ && (dma->flags & _DRM_DMA_USE_SG))) {
struct drm_local_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
@@ -1600,25 +1455,28 @@ int drm_mapbufs(struct drm_device *dev, void *data,
return retcode;
}
-/**
- * Compute size order. Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- *
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order(unsigned long size)
+int drm_dma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- int order;
- unsigned long tmp;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
- for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
+ if (dev->driver->dma_ioctl)
+ return dev->driver->dma_ioctl(dev, data, file_priv);
+ else
+ return -EINVAL;
+}
- if (size & (size - 1))
- ++order;
+struct drm_local_map *drm_getsarea(struct drm_device *dev)
+{
+ struct drm_map_list *entry;
- return order;
+ list_for_each_entry(entry, &dev->maplist, head) {
+ if (entry->map && entry->map->type == _DRM_SHM &&
+ (entry->map->flags & _DRM_CONTAINS_LOCK)) {
+ return entry->map;
+ }
+ }
+ return NULL;
}
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 725968d3897..b4fb86d8985 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -42,10 +42,6 @@
#include <drm/drmP.h>
-/******************************************************************/
-/** \name Context bitmap support */
-/*@{*/
-
/**
* Free a handle from the context bitmap.
*
@@ -56,13 +52,48 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
+static void drm_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
mutex_lock(&dev->struct_mutex);
idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
}
+/******************************************************************/
+/** \name Context bitmap support */
+/*@{*/
+
+void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+ struct drm_file *file_priv)
+{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ mutex_lock(&dev->ctxlist_mutex);
+ if (!list_empty(&dev->ctxlist)) {
+ struct drm_ctx_list *pos, *n;
+
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
+ if (pos->tag == file_priv &&
+ pos->handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver->context_dtor)
+ dev->driver->context_dtor(dev,
+ pos->handle);
+
+ drm_ctxbitmap_free(dev, pos->handle);
+
+ list_del(&pos->head);
+ kfree(pos);
+ --dev->ctx_count;
+ }
+ }
+ }
+ mutex_unlock(&dev->ctxlist_mutex);
+}
+
/**
* Context bitmap allocation.
*
@@ -90,10 +121,12 @@ static int drm_ctxbitmap_next(struct drm_device * dev)
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(struct drm_device * dev)
+void drm_legacy_ctxbitmap_init(struct drm_device * dev)
{
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
idr_init(&dev->ctx_idr);
- return 0;
}
/**
@@ -104,7 +137,7 @@ int drm_ctxbitmap_init(struct drm_device * dev)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(struct drm_device * dev)
+void drm_legacy_ctxbitmap_cleanup(struct drm_device * dev)
{
mutex_lock(&dev->struct_mutex);
idr_destroy(&dev->ctx_idr);
@@ -136,6 +169,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map;
struct drm_map_list *_entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
map = idr_find(&dev->ctx_idr, request->ctx_id);
@@ -180,6 +216,9 @@ int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_local_map *map = NULL;
struct drm_map_list *r_list = NULL;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
@@ -251,7 +290,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
struct drm_file *file_priv, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
- dev->last_switch = jiffies;
if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock)) {
DRM_ERROR("Lock isn't held after context switch\n");
@@ -261,7 +299,6 @@ static int drm_context_switch_complete(struct drm_device *dev,
when the kernel holds the lock, release
that lock here. */
clear_bit(0, &dev->context_flag);
- wake_up(&dev->context_wait);
return 0;
}
@@ -282,6 +319,9 @@ int drm_resctx(struct drm_device *dev, void *data,
struct drm_ctx ctx;
int i;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
@@ -312,6 +352,9 @@ int drm_addctx(struct drm_device *dev, void *data,
struct drm_ctx_list *ctx_entry;
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
ctx->handle = drm_ctxbitmap_next(dev);
if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
@@ -342,12 +385,6 @@ int drm_addctx(struct drm_device *dev, void *data,
return 0;
}
-int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
- /* This does nothing */
- return 0;
-}
-
/**
* Get context.
*
@@ -361,6 +398,9 @@ int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
/* This is 0, because we don't handle any context flags */
ctx->flags = 0;
@@ -383,6 +423,9 @@ int drm_switchctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
@@ -403,6 +446,9 @@ int drm_newctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
drm_context_switch_complete(dev, file_priv, ctx->handle);
@@ -425,6 +471,9 @@ int drm_rmctx(struct drm_device *dev, void *data,
{
struct drm_ctx *ctx = data;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
DRM_DEBUG("%d\n", ctx->handle);
if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index fc83bb9eb51..bff2fa941f6 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -125,13 +125,6 @@ static const struct drm_prop_enum_list drm_scaling_mode_enum_list[] =
{ DRM_MODE_SCALE_ASPECT, "Full aspect" },
};
-static const struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
-{
- { DRM_MODE_DITHERING_OFF, "Off" },
- { DRM_MODE_DITHERING_ON, "On" },
- { DRM_MODE_DITHERING_AUTO, "Automatic" },
-};
-
/*
* Non-global properties, but "required" for certain connectors.
*/
@@ -186,29 +179,29 @@ static const struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
struct drm_conn_prop_enum_list {
int type;
const char *name;
- int count;
+ struct ida ida;
};
/*
* Connector and encoder types.
*/
static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
-{ { DRM_MODE_CONNECTOR_Unknown, "Unknown", 0 },
- { DRM_MODE_CONNECTOR_VGA, "VGA", 0 },
- { DRM_MODE_CONNECTOR_DVII, "DVI-I", 0 },
- { DRM_MODE_CONNECTOR_DVID, "DVI-D", 0 },
- { DRM_MODE_CONNECTOR_DVIA, "DVI-A", 0 },
- { DRM_MODE_CONNECTOR_Composite, "Composite", 0 },
- { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO", 0 },
- { DRM_MODE_CONNECTOR_LVDS, "LVDS", 0 },
- { DRM_MODE_CONNECTOR_Component, "Component", 0 },
- { DRM_MODE_CONNECTOR_9PinDIN, "DIN", 0 },
- { DRM_MODE_CONNECTOR_DisplayPort, "DP", 0 },
- { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A", 0 },
- { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B", 0 },
- { DRM_MODE_CONNECTOR_TV, "TV", 0 },
- { DRM_MODE_CONNECTOR_eDP, "eDP", 0 },
- { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual", 0},
+{ { DRM_MODE_CONNECTOR_Unknown, "Unknown" },
+ { DRM_MODE_CONNECTOR_VGA, "VGA" },
+ { DRM_MODE_CONNECTOR_DVII, "DVI-I" },
+ { DRM_MODE_CONNECTOR_DVID, "DVI-D" },
+ { DRM_MODE_CONNECTOR_DVIA, "DVI-A" },
+ { DRM_MODE_CONNECTOR_Composite, "Composite" },
+ { DRM_MODE_CONNECTOR_SVIDEO, "SVIDEO" },
+ { DRM_MODE_CONNECTOR_LVDS, "LVDS" },
+ { DRM_MODE_CONNECTOR_Component, "Component" },
+ { DRM_MODE_CONNECTOR_9PinDIN, "DIN" },
+ { DRM_MODE_CONNECTOR_DisplayPort, "DP" },
+ { DRM_MODE_CONNECTOR_HDMIA, "HDMI-A" },
+ { DRM_MODE_CONNECTOR_HDMIB, "HDMI-B" },
+ { DRM_MODE_CONNECTOR_TV, "TV" },
+ { DRM_MODE_CONNECTOR_eDP, "eDP" },
+ { DRM_MODE_CONNECTOR_VIRTUAL, "Virtual" },
};
static const struct drm_prop_enum_list drm_encoder_enum_list[] =
@@ -220,6 +213,22 @@ static const struct drm_prop_enum_list drm_encoder_enum_list[] =
{ DRM_MODE_ENCODER_VIRTUAL, "Virtual" },
};
+void drm_connector_ida_init(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_init(&drm_connector_enum_list[i].ida);
+}
+
+void drm_connector_ida_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(drm_connector_enum_list); i++)
+ ida_destroy(&drm_connector_enum_list[i].ida);
+}
+
const char *drm_get_encoder_name(const struct drm_encoder *encoder)
{
static char buf[32];
@@ -677,20 +686,19 @@ void drm_mode_probed_add(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_mode_probed_add);
-/**
+/*
* drm_mode_remove - remove and free a mode
* @connector: connector list to modify
* @mode: mode to remove
*
* Remove @mode from @connector's mode list, then free it.
*/
-void drm_mode_remove(struct drm_connector *connector,
- struct drm_display_mode *mode)
+static void drm_mode_remove(struct drm_connector *connector,
+ struct drm_display_mode *mode)
{
list_del(&mode->head);
drm_mode_destroy(connector->dev, mode);
}
-EXPORT_SYMBOL(drm_mode_remove);
/**
* drm_connector_init - Init a preallocated connector
@@ -711,6 +719,8 @@ int drm_connector_init(struct drm_device *dev,
int connector_type)
{
int ret;
+ struct ida *connector_ida =
+ &drm_connector_enum_list[connector_type].ida;
drm_modeset_lock_all(dev);
@@ -723,7 +733,12 @@ int drm_connector_init(struct drm_device *dev,
connector->funcs = funcs;
connector->connector_type = connector_type;
connector->connector_type_id =
- ++drm_connector_enum_list[connector_type].count; /* TODO */
+ ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
+ if (connector->connector_type_id < 0) {
+ ret = connector->connector_type_id;
+ drm_mode_object_put(dev, &connector->base);
+ goto out;
+ }
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
connector->edid_blob_ptr = NULL;
@@ -764,6 +779,9 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->modes, head)
drm_mode_remove(connector, mode);
+ ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
+ connector->connector_type_id);
+
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -781,6 +799,41 @@ void drm_connector_unplug_all(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_connector_unplug_all);
+int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+ const struct drm_bridge_funcs *funcs)
+{
+ int ret;
+
+ drm_modeset_lock_all(dev);
+
+ ret = drm_mode_object_get(dev, &bridge->base, DRM_MODE_OBJECT_BRIDGE);
+ if (ret)
+ goto out;
+
+ bridge->dev = dev;
+ bridge->funcs = funcs;
+
+ list_add_tail(&bridge->head, &dev->mode_config.bridge_list);
+ dev->mode_config.num_bridge++;
+
+ out:
+ drm_modeset_unlock_all(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_bridge_init);
+
+void drm_bridge_cleanup(struct drm_bridge *bridge)
+{
+ struct drm_device *dev = bridge->dev;
+
+ drm_modeset_lock_all(dev);
+ drm_mode_object_put(dev, &bridge->base);
+ list_del(&bridge->head);
+ dev->mode_config.num_bridge--;
+ drm_modeset_unlock_all(dev);
+}
+EXPORT_SYMBOL(drm_bridge_cleanup);
+
int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@@ -1135,30 +1188,6 @@ int drm_mode_create_scaling_mode_property(struct drm_device *dev)
EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
/**
- * drm_mode_create_dithering_property - create dithering property
- * @dev: DRM device
- *
- * Called by a driver the first time it's needed, must be attached to desired
- * connectors.
- */
-int drm_mode_create_dithering_property(struct drm_device *dev)
-{
- struct drm_property *dithering_mode;
-
- if (dev->mode_config.dithering_mode_property)
- return 0;
-
- dithering_mode =
- drm_property_create_enum(dev, 0, "dithering",
- drm_dithering_mode_enum_list,
- ARRAY_SIZE(drm_dithering_mode_enum_list));
- dev->mode_config.dithering_mode_property = dithering_mode;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_mode_create_dithering_property);
-
-/**
* drm_mode_create_dirty_property - create dirty property
* @dev: DRM device
*
@@ -1190,6 +1219,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
total_objects += dev->mode_config.num_crtc;
total_objects += dev->mode_config.num_connector;
total_objects += dev->mode_config.num_encoder;
+ total_objects += dev->mode_config.num_bridge;
group->id_list = kzalloc(total_objects * sizeof(uint32_t), GFP_KERNEL);
if (!group->id_list)
@@ -1198,6 +1228,7 @@ static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *gr
group->num_crtcs = 0;
group->num_connectors = 0;
group->num_encoders = 0;
+ group->num_bridges = 0;
return 0;
}
@@ -1207,6 +1238,7 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
+ struct drm_bridge *bridge;
int ret;
if ((ret = drm_mode_group_init(dev, group)))
@@ -1223,6 +1255,11 @@ int drm_mode_group_init_legacy_group(struct drm_device *dev,
group->id_list[group->num_crtcs + group->num_encoders +
group->num_connectors++] = connector->base.id;
+ list_for_each_entry(bridge, &dev->mode_config.bridge_list, head)
+ group->id_list[group->num_crtcs + group->num_encoders +
+ group->num_connectors + group->num_bridges++] =
+ bridge->base.id;
+
return 0;
}
EXPORT_SYMBOL(drm_mode_group_init_legacy_group);
@@ -2604,10 +2641,22 @@ int drm_mode_getfb(struct drm_device *dev,
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
- if (fb->funcs->create_handle)
- ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
- else
+ if (fb->funcs->create_handle) {
+ if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handle);
+ } else {
+ /* GET_FB() is an unprivileged ioctl so we must not
+ * return a buffer-handle to non-master processes! For
+ * backwards-compatibility reasons, we cannot make
+ * GET_FB() privileged, so just return an invalid handle
+ * for non-masters. */
+ r->handle = 0;
+ ret = 0;
+ }
+ } else {
ret = -ENODEV;
+ }
drm_framebuffer_unreference(fb);
@@ -3514,6 +3563,9 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
page_flip->reserved != 0)
return -EINVAL;
+ if ((page_flip->flags & DRM_MODE_PAGE_FLIP_ASYNC) && !dev->mode_config.async_page_flip)
+ return -EINVAL;
+
obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj)
return -EINVAL;
@@ -3587,7 +3639,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
}
old_fb = crtc->fb;
- ret = crtc->funcs->page_flip(crtc, fb, e);
+ ret = crtc->funcs->page_flip(crtc, fb, e, page_flip->flags);
if (ret) {
if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
spin_lock_irqsave(&dev->event_lock, flags);
@@ -3905,6 +3957,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
+ INIT_LIST_HEAD(&dev->mode_config.bridge_list);
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
@@ -3941,6 +3994,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_connector *connector, *ot;
struct drm_crtc *crtc, *ct;
struct drm_encoder *encoder, *enct;
+ struct drm_bridge *bridge, *brt;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
struct drm_property_blob *blob, *bt;
@@ -3951,6 +4005,11 @@ void drm_mode_config_cleanup(struct drm_device *dev)
encoder->funcs->destroy(encoder);
}
+ list_for_each_entry_safe(bridge, brt,
+ &dev->mode_config.bridge_list, head) {
+ bridge->funcs->destroy(bridge);
+ }
+
list_for_each_entry_safe(connector, ot,
&dev->mode_config.connector_list, head) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6a647493ca7..c722c3b5404 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -257,10 +257,16 @@ drm_encoder_disable(struct drm_encoder *encoder)
{
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
if (encoder_funcs->disable)
(*encoder_funcs->disable)(encoder);
else
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
}
/**
@@ -424,6 +430,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
+ ret = encoder->bridge->funcs->mode_fixup(
+ encoder->bridge, mode, adjusted_mode);
+ if (!ret) {
+ DRM_DEBUG_KMS("Bridge fixup failed\n");
+ goto done;
+ }
+ }
+
encoder_funcs = encoder->helper_private;
if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
adjusted_mode))) {
@@ -443,9 +459,16 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->disable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
/* Disable the encoders as the first thing we do. */
encoder_funcs->prepare(encoder);
+
+ if (encoder->bridge)
+ encoder->bridge->funcs->post_disable(encoder->bridge);
}
drm_crtc_prepare_encoders(dev);
@@ -469,6 +492,10 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
+
+ if (encoder->bridge && encoder->bridge->funcs->mode_set)
+ encoder->bridge->funcs->mode_set(encoder->bridge, mode,
+ adjusted_mode);
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -479,9 +506,14 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
+ if (encoder->bridge)
+ encoder->bridge->funcs->pre_enable(encoder->bridge);
+
encoder_funcs = encoder->helper_private;
encoder_funcs->commit(encoder);
+ if (encoder->bridge)
+ encoder->bridge->funcs->enable(encoder->bridge);
}
/* Store real post-adjustment hardware mode. */
@@ -830,6 +862,31 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
return dpms;
}
+/* Helper which handles bridge ordering around encoder dpms */
+static void drm_helper_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_bridge *bridge = encoder->bridge;
+ struct drm_encoder_helper_funcs *encoder_funcs;
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->pre_enable(bridge);
+ else
+ bridge->funcs->disable(bridge);
+ }
+
+ encoder_funcs = encoder->helper_private;
+ if (encoder_funcs->dpms)
+ encoder_funcs->dpms(encoder, mode);
+
+ if (bridge) {
+ if (mode == DRM_MODE_DPMS_ON)
+ bridge->funcs->enable(bridge);
+ else
+ bridge->funcs->post_disable(bridge);
+ }
+}
+
static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
{
int dpms = DRM_MODE_DPMS_OFF;
@@ -857,7 +914,7 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
{
struct drm_encoder *encoder = connector->encoder;
struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
- int old_dpms;
+ int old_dpms, encoder_dpms = DRM_MODE_DPMS_OFF;
if (mode == connector->dpms)
return;
@@ -865,6 +922,9 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
old_dpms = connector->dpms;
connector->dpms = mode;
+ if (encoder)
+ encoder_dpms = drm_helper_choose_encoder_dpms(encoder);
+
/* from off to on, do crtc then encoder */
if (mode < old_dpms) {
if (crtc) {
@@ -873,22 +933,14 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
(*crtc_funcs->dpms) (crtc,
drm_helper_choose_crtc_dpms(crtc));
}
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
}
/* from on to off, do encoder then crtc */
if (mode > old_dpms) {
- if (encoder) {
- struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
- }
+ if (encoder)
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
if (crtc) {
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
if (crtc_funcs->dpms)
@@ -924,9 +976,8 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
{
struct drm_crtc *crtc;
struct drm_encoder *encoder;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc_helper_funcs *crtc_funcs;
- int ret;
+ int ret, encoder_dpms;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -946,10 +997,10 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if(encoder->crtc != crtc)
continue;
- encoder_funcs = encoder->helper_private;
- if (encoder_funcs->dpms)
- (*encoder_funcs->dpms) (encoder,
- drm_helper_choose_encoder_dpms(encoder));
+ encoder_dpms = drm_helper_choose_encoder_dpms(
+ encoder);
+
+ drm_helper_encoder_dpms(encoder, encoder_dpms);
}
crtc_funcs = crtc->helper_private;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index 495b5fd2787..8a140a95375 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -44,10 +44,18 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(struct drm_device *dev)
+int drm_legacy_dma_setup(struct drm_device *dev)
{
int i;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return 0;
+ }
+
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
dev->dma = kzalloc(sizeof(*dev->dma), GFP_KERNEL);
if (!dev->dma)
return -ENOMEM;
@@ -66,11 +74,16 @@ int drm_dma_setup(struct drm_device *dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
*/
-void drm_dma_takedown(struct drm_device *dev)
+void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
+ if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
+ drm_core_check_feature(dev, DRIVER_MODESET)) {
+ return;
+ }
+
if (!dma)
return;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 99fcd7c32ea..e572dd20bde 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -68,7 +68,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -87,7 +87,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -106,8 +106,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
- /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -122,7 +121,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
@@ -131,14 +130,14 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_HANDLE_TO_FD, drm_prime_handle_to_fd_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_PRIME_FD_TO_HANDLE, drm_prime_fd_to_handle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_CONTROL_ALLOW|DRM_UNLOCKED),
@@ -172,6 +171,31 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
/**
+ * drm_legacy_dev_reinit
+ *
+ * Reinitializes a legacy/ums drm device in it's lastclose function.
+ */
+static void drm_legacy_dev_reinit(struct drm_device *dev)
+{
+ int i;
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
+ atomic_set(&dev->counts[i], 0);
+
+ dev->sigdata.lock = NULL;
+
+ dev->context_flag = 0;
+ dev->last_context = 0;
+ dev->if_version = 0;
+}
+
+/**
* Take down the DRM device.
*
* \param dev DRM device structure.
@@ -195,32 +219,9 @@ int drm_lastclose(struct drm_device * dev)
mutex_lock(&dev->struct_mutex);
- /* Clear AGP information */
- if (drm_core_has_AGP(dev) && dev->agp &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_agp_mem *entry, *tempe;
-
- /* Remove AGP resources, but leave dev->agp
- intact until drv_cleanup is called. */
- list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
- if (entry->bound)
- drm_unbind_agp(entry->memory);
- drm_free_agp(entry->memory, entry->pages);
- kfree(entry);
- }
- INIT_LIST_HEAD(&dev->agp->memory);
+ drm_agp_clear(dev);
- if (dev->agp->acquired)
- drm_agp_release(dev);
-
- dev->agp->acquired = 0;
- dev->agp->enabled = 0;
- }
- if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_sg_cleanup(dev->sg);
- dev->sg = NULL;
- }
+ drm_legacy_sg_cleanup(dev);
/* Clear vma list (only built for debugging) */
list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
@@ -228,13 +229,13 @@ int drm_lastclose(struct drm_device * dev)
kfree(vma);
}
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET))
- drm_dma_takedown(dev);
+ drm_legacy_dma_takedown(dev);
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex);
+ drm_legacy_dev_reinit(dev);
+
DRM_DEBUG("lastclose completed\n");
return 0;
}
@@ -251,6 +252,7 @@ static int __init drm_core_init(void)
int ret = -ENOMEM;
drm_global_init();
+ drm_connector_ida_init();
idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -263,13 +265,6 @@ static int __init drm_core_init(void)
goto err_p2;
}
- drm_proc_root = proc_mkdir("dri", NULL);
- if (!drm_proc_root) {
- DRM_ERROR("Cannot create /proc/dri\n");
- ret = -1;
- goto err_p3;
- }
-
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
DRM_ERROR("Cannot create /sys/kernel/debug/dri\n");
@@ -292,12 +287,12 @@ err_p1:
static void __exit drm_core_exit(void)
{
- remove_proc_entry("dri", NULL);
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");
+ drm_connector_ida_destroy();
idr_destroy(&drm_minors_idr);
}
@@ -420,17 +415,15 @@ long drm_ioctl(struct file *filp,
/* Do not trust userspace, use our own definition */
func = ioctl->func;
- /* is there a local override? */
- if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
- func = dev->driver->dma_ioctl;
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_AUTH) && !drm_is_render_client(file_priv) && !file_priv->authenticated) ||
((ioctl->flags & DRM_MASTER) && !file_priv->is_master) ||
- (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL))) {
+ (!(ioctl->flags & DRM_CONTROL_ALLOW) && (file_priv->minor->type == DRM_MINOR_CONTROL)) ||
+ (!(ioctl->flags & DRM_RENDER_ALLOW) && drm_is_render_client(file_priv))) {
retcode = -EACCES;
} else {
if (cmd & (IOC_IN | IOC_OUT)) {
@@ -485,19 +478,4 @@ long drm_ioctl(struct file *filp,
DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
-
EXPORT_SYMBOL(drm_ioctl);
-
-struct drm_local_map *drm_getsarea(struct drm_device *dev)
-{
- struct drm_map_list *entry;
-
- list_for_each_entry(entry, &dev->maplist, head) {
- if (entry->map && entry->map->type == _DRM_SHM &&
- (entry->map->flags & _DRM_CONTAINS_LOCK)) {
- return entry->map;
- }
- }
- return NULL;
-}
-EXPORT_SYMBOL(drm_getsarea);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 95d6f4b6967..1688ff50051 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -125,6 +125,9 @@ static struct edid_quirk {
/* ViewSonic VA2026w */
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+
+ /* Medion MD 30217 PG */
+ { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
};
/*
@@ -931,6 +934,36 @@ static const struct drm_display_mode edid_cea_modes[] = {
.vrefresh = 100, },
};
+/*
+ * HDMI 1.4 4k modes.
+ */
+static const struct drm_display_mode edid_4k_modes[] = {
+ /* 1 - 3840x2160@30Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4016, 4104, 4400, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 30, },
+ /* 2 - 3840x2160@25Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 4896, 4984, 5280, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 25, },
+ /* 3 - 3840x2160@24Hz */
+ { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 3840, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+ /* 4 - 4096x2160@24Hz (SMPTE) */
+ { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 297000,
+ 4096, 5116, 5204, 5500, 0,
+ 2160, 2168, 2178, 2250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC),
+ .vrefresh = 24, },
+};
+
/*** DDC fetch and block validation ***/
static const u8 edid_header[] = {
@@ -2287,7 +2320,6 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
return closure.modes;
}
-#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
@@ -2298,10 +2330,10 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
-/**
+/*
* Search EDID for CEA extension block.
*/
-u8 *drm_find_cea_extension(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
{
u8 *edid_ext = NULL;
int i;
@@ -2322,7 +2354,6 @@ u8 *drm_find_cea_extension(struct edid *edid)
return edid_ext;
}
-EXPORT_SYMBOL(drm_find_cea_extension);
/*
* Calculate the alternate clock for the CEA mode
@@ -2380,6 +2411,54 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
}
EXPORT_SYMBOL(drm_match_cea_mode);
+/*
+ * Calculate the alternate clock for HDMI modes (those from the HDMI vendor
+ * specific block).
+ *
+ * It's almost like cea_mode_alternate_clock(), we just need to add an
+ * exception for the VIC 4 mode (4096x2160@24Hz): no alternate clock for this
+ * one.
+ */
+static unsigned int
+hdmi_mode_alternate_clock(const struct drm_display_mode *hdmi_mode)
+{
+ if (hdmi_mode->vdisplay == 4096 && hdmi_mode->hdisplay == 2160)
+ return hdmi_mode->clock;
+
+ return cea_mode_alternate_clock(hdmi_mode);
+}
+
+/*
+ * drm_match_hdmi_mode - look for a HDMI mode matching given mode
+ * @to_match: display mode
+ *
+ * An HDMI mode is one defined in the HDMI vendor specific block.
+ *
+ * Returns the HDMI Video ID (VIC) of the mode or 0 if it isn't one.
+ */
+static u8 drm_match_hdmi_mode(const struct drm_display_mode *to_match)
+{
+ u8 mode;
+
+ if (!to_match->clock)
+ return 0;
+
+ for (mode = 0; mode < ARRAY_SIZE(edid_4k_modes); mode++) {
+ const struct drm_display_mode *hdmi_mode = &edid_4k_modes[mode];
+ unsigned int clock1, clock2;
+
+ /* Make sure to also match alternate clocks */
+ clock1 = hdmi_mode->clock;
+ clock2 = hdmi_mode_alternate_clock(hdmi_mode);
+
+ if ((KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock1) ||
+ KHZ2PICOS(to_match->clock) == KHZ2PICOS(clock2)) &&
+ drm_mode_equal_no_clocks(to_match, hdmi_mode))
+ return mode + 1;
+ }
+ return 0;
+}
+
static int
add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
{
@@ -2397,18 +2476,26 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
* with the alternate clock for certain CEA modes.
*/
list_for_each_entry(mode, &connector->probed_modes, head) {
- const struct drm_display_mode *cea_mode;
+ const struct drm_display_mode *cea_mode = NULL;
struct drm_display_mode *newmode;
- u8 cea_mode_idx = drm_match_cea_mode(mode) - 1;
+ u8 mode_idx = drm_match_cea_mode(mode) - 1;
unsigned int clock1, clock2;
- if (cea_mode_idx >= ARRAY_SIZE(edid_cea_modes))
- continue;
+ if (mode_idx < ARRAY_SIZE(edid_cea_modes)) {
+ cea_mode = &edid_cea_modes[mode_idx];
+ clock2 = cea_mode_alternate_clock(cea_mode);
+ } else {
+ mode_idx = drm_match_hdmi_mode(mode) - 1;
+ if (mode_idx < ARRAY_SIZE(edid_4k_modes)) {
+ cea_mode = &edid_4k_modes[mode_idx];
+ clock2 = hdmi_mode_alternate_clock(cea_mode);
+ }
+ }
- cea_mode = &edid_cea_modes[cea_mode_idx];
+ if (!cea_mode)
+ continue;
clock1 = cea_mode->clock;
- clock2 = cea_mode_alternate_clock(cea_mode);
if (clock1 == clock2)
continue;
@@ -2442,10 +2529,11 @@ add_alternate_cea_modes(struct drm_connector *connector, struct edid *edid)
}
static int
-do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+do_cea_modes(struct drm_connector *connector, const u8 *db, u8 len)
{
struct drm_device *dev = connector->dev;
- u8 * mode, cea_mode;
+ const u8 *mode;
+ u8 cea_mode;
int modes = 0;
for (mode = db; mode < db + len; mode++) {
@@ -2465,6 +2553,68 @@ do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
return modes;
}
+/*
+ * do_hdmi_vsdb_modes - Parse the HDMI Vendor Specific data block
+ * @connector: connector corresponding to the HDMI sink
+ * @db: start of the CEA vendor specific block
+ * @len: length of the CEA block payload, ie. one can access up to db[len]
+ *
+ * Parses the HDMI VSDB looking for modes to add to @connector.
+ */
+static int
+do_hdmi_vsdb_modes(struct drm_connector *connector, const u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ int modes = 0, offset = 0, i;
+ u8 vic_len;
+
+ if (len < 8)
+ goto out;
+
+ /* no HDMI_Video_Present */
+ if (!(db[8] & (1 << 5)))
+ goto out;
+
+ /* Latency_Fields_Present */
+ if (db[8] & (1 << 7))
+ offset += 2;
+
+ /* I_Latency_Fields_Present */
+ if (db[8] & (1 << 6))
+ offset += 2;
+
+ /* the declared length is not long enough for the 2 first bytes
+ * of additional video format capabilities */
+ offset += 2;
+ if (len < (8 + offset))
+ goto out;
+
+ vic_len = db[8 + offset] >> 5;
+
+ for (i = 0; i < vic_len && len >= (9 + offset + i); i++) {
+ struct drm_display_mode *newmode;
+ u8 vic;
+
+ vic = db[9 + offset + i];
+
+ vic--; /* VICs start at 1 */
+ if (vic >= ARRAY_SIZE(edid_4k_modes)) {
+ DRM_ERROR("Unknown HDMI VIC: %d\n", vic);
+ continue;
+ }
+
+ newmode = drm_mode_duplicate(dev, &edid_4k_modes[vic]);
+ if (!newmode)
+ continue;
+
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+
+out:
+ return modes;
+}
+
static int
cea_db_payload_len(const u8 *db)
{
@@ -2496,14 +2646,30 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
return 0;
}
+static bool cea_db_is_hdmi_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (cea_db_tag(db) != VENDOR_BLOCK)
+ return false;
+
+ if (cea_db_payload_len(db) < 5)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI;
+}
+
#define for_each_cea_db(cea, i, start, end) \
for ((i) = (start); (i) < (end) && (i) + cea_db_payload_len(&(cea)[(i)]) < (end); (i) += cea_db_payload_len(&(cea)[(i)]) + 1)
static int
add_cea_modes(struct drm_connector *connector, struct edid *edid)
{
- u8 * cea = drm_find_cea_extension(edid);
- u8 * db, dbl;
+ const u8 *cea = drm_find_cea_extension(edid);
+ const u8 *db;
+ u8 dbl;
int modes = 0;
if (cea && cea_revision(cea) >= 3) {
@@ -2517,7 +2683,9 @@ add_cea_modes(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
if (cea_db_tag(db) == VIDEO_BLOCK)
- modes += do_cea_modes (connector, db+1, dbl);
+ modes += do_cea_modes(connector, db + 1, dbl);
+ else if (cea_db_is_hdmi_vsdb(db))
+ modes += do_hdmi_vsdb_modes(connector, db, dbl);
}
}
@@ -2570,21 +2738,6 @@ monitor_name(struct detailed_timing *t, void *data)
*(u8 **)data = t->data.other_data.data.str.str;
}
-static bool cea_db_is_hdmi_vsdb(const u8 *db)
-{
- int hdmi_id;
-
- if (cea_db_tag(db) != VENDOR_BLOCK)
- return false;
-
- if (cea_db_payload_len(db) < 5)
- return false;
-
- hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
- return hdmi_id == HDMI_IDENTIFIER;
-}
-
/**
* drm_edid_to_eld - build ELD from EDID
* @connector: connector corresponding to the HDMI/DP sink
@@ -2732,6 +2885,58 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads)
EXPORT_SYMBOL(drm_edid_to_sad);
/**
+ * drm_edid_to_speaker_allocation - extracts Speaker Allocation Data Blocks from EDID
+ * @edid: EDID to parse
+ * @sadb: pointer to the speaker block
+ *
+ * Looks for CEA EDID block and extracts the Speaker Allocation Data Block from it.
+ * Note: returned pointer needs to be kfreed
+ *
+ * Return number of found Speaker Allocation Blocks or negative number on error.
+ */
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
+{
+ int count = 0;
+ int i, start, end, dbl;
+ const u8 *cea;
+
+ cea = drm_find_cea_extension(edid);
+ if (!cea) {
+ DRM_DEBUG_KMS("SAD: no CEA Extension found\n");
+ return -ENOENT;
+ }
+
+ if (cea_revision(cea) < 3) {
+ DRM_DEBUG_KMS("SAD: wrong CEA revision\n");
+ return -ENOTSUPP;
+ }
+
+ if (cea_db_offsets(cea, &start, &end)) {
+ DRM_DEBUG_KMS("SAD: invalid data block offsets\n");
+ return -EPROTO;
+ }
+
+ for_each_cea_db(cea, i, start, end) {
+ const u8 *db = &cea[i];
+
+ if (cea_db_tag(db) == SPEAKER_BLOCK) {
+ dbl = cea_db_payload_len(db);
+
+ /* Speaker Allocation Data Block */
+ if (dbl == 3) {
+ *sadb = kmalloc(dbl, GFP_KERNEL);
+ memcpy(*sadb, &db[1], dbl);
+ count = dbl;
+ break;
+ }
+ }
+ }
+
+ return count;
+}
+EXPORT_SYMBOL(drm_edid_to_speaker_allocation);
+
+/**
* drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
* @connector: connector associated with the HDMI/DP sink
* @mode: the display mode
@@ -3102,9 +3307,10 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
if (err < 0)
return err;
+ if (mode->flags & DRM_MODE_FLAG_DBLCLK)
+ frame->pixel_repeat = 1;
+
frame->video_code = drm_match_cea_mode(mode);
- if (!frame->video_code)
- return 0;
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
@@ -3112,3 +3318,39 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
return 0;
}
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
+
+/**
+ * drm_hdmi_vendor_infoframe_from_display_mode() - fill an HDMI infoframe with
+ * data from a DRM display mode
+ * @frame: HDMI vendor infoframe
+ * @mode: DRM display mode
+ *
+ * Note that there's is a need to send HDMI vendor infoframes only when using a
+ * 4k or stereoscopic 3D mode. So when giving any other mode as input this
+ * function will return -EINVAL, error that can be safely ignored.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode)
+{
+ int err;
+ u8 vic;
+
+ if (!frame || !mode)
+ return -EINVAL;
+
+ vic = drm_match_hdmi_mode(mode);
+ if (!vic)
+ return -EINVAL;
+
+ err = hdmi_vendor_infoframe_init(frame);
+ if (err < 0)
+ return err;
+
+ frame->vic = vic;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index c385cc5e730..61b5a47ad23 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -181,11 +181,11 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
#ifdef CONFIG_DEBUG_FS
-/**
+/*
* drm_fb_cma_describe() - Helper to dump information about a single
* CMA framebuffer object
*/
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
+static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
struct drm_fb_cma *fb_cma = to_fb_cma(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
@@ -199,7 +199,6 @@ void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
drm_gem_cma_describe(fb_cma->obj[i], m);
}
}
-EXPORT_SYMBOL_GPL(drm_fb_cma_describe);
/**
* drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
new file mode 100644
index 00000000000..e788882d902
--- /dev/null
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_flip_work.h"
+
+/**
+ * drm_flip_work_queue - queue work
+ * @work: the flip-work
+ * @val: the value to queue
+ *
+ * Queues work, that will later be run (passed back to drm_flip_func_t
+ * func) on a work queue after drm_flip_work_commit() is called.
+ */
+void drm_flip_work_queue(struct drm_flip_work *work, void *val)
+{
+ if (kfifo_put(&work->fifo, (const void **)&val)) {
+ atomic_inc(&work->pending);
+ } else {
+ DRM_ERROR("%s fifo full!\n", work->name);
+ work->func(work, val);
+ }
+}
+EXPORT_SYMBOL(drm_flip_work_queue);
+
+/**
+ * drm_flip_work_commit - commit queued work
+ * @work: the flip-work
+ * @wq: the work-queue to run the queued work on
+ *
+ * Trigger work previously queued by drm_flip_work_queue() to run
+ * on a workqueue. The typical usage would be to queue work (via
+ * drm_flip_work_queue()) at any point (from vblank irq and/or
+ * prior), and then from vblank irq commit the queued work.
+ */
+void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq)
+{
+ uint32_t pending = atomic_read(&work->pending);
+ atomic_add(pending, &work->count);
+ atomic_sub(pending, &work->pending);
+ queue_work(wq, &work->worker);
+}
+EXPORT_SYMBOL(drm_flip_work_commit);
+
+static void flip_worker(struct work_struct *w)
+{
+ struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
+ uint32_t count = atomic_read(&work->count);
+ void *val = NULL;
+
+ atomic_sub(count, &work->count);
+
+ while(count--)
+ if (!WARN_ON(!kfifo_get(&work->fifo, &val)))
+ work->func(work, val);
+}
+
+/**
+ * drm_flip_work_init - initialize flip-work
+ * @work: the flip-work to initialize
+ * @size: the max queue depth
+ * @name: debug name
+ * @func: the callback work function
+ *
+ * Initializes/allocates resources for the flip-work
+ *
+ * RETURNS:
+ * Zero on success, error code on failure.
+ */
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+ const char *name, drm_flip_func_t func)
+{
+ int ret;
+
+ work->name = name;
+ atomic_set(&work->count, 0);
+ atomic_set(&work->pending, 0);
+ work->func = func;
+
+ ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
+ if (ret) {
+ DRM_ERROR("could not allocate %s fifo\n", name);
+ return ret;
+ }
+
+ INIT_WORK(&work->worker, flip_worker);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_flip_work_init);
+
+/**
+ * drm_flip_work_cleanup - cleans up flip-work
+ * @work: the flip-work to cleanup
+ *
+ * Destroy resources allocated for the flip-work
+ */
+void drm_flip_work_cleanup(struct drm_flip_work *work)
+{
+ WARN_ON(!kfifo_is_empty(&work->fifo));
+ kfifo_free(&work->fifo);
+}
+EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 3a24385e036..4be8e09a32e 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -48,59 +48,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
static int drm_setup(struct drm_device * dev)
{
- int i;
int ret;
- if (dev->driver->firstopen) {
+ if (dev->driver->firstopen &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
ret = dev->driver->firstopen(dev);
if (ret != 0)
return ret;
}
- atomic_set(&dev->ioctl_count, 0);
- atomic_set(&dev->vma_count, 0);
-
- if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
- !drm_core_check_feature(dev, DRIVER_MODESET)) {
- dev->buf_use = 0;
- atomic_set(&dev->buf_alloc, 0);
-
- i = drm_dma_setup(dev);
- if (i < 0)
- return i;
- }
-
- for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
- atomic_set(&dev->counts[i], 0);
-
- dev->sigdata.lock = NULL;
-
- dev->context_flag = 0;
- dev->interrupt_flag = 0;
- dev->dma_flag = 0;
- dev->last_context = 0;
- dev->last_switch = 0;
- dev->last_checked = 0;
- init_waitqueue_head(&dev->context_wait);
- dev->if_version = 0;
-
- dev->ctx_start = 0;
- dev->lck_start = 0;
+ ret = drm_legacy_dma_setup(dev);
+ if (ret < 0)
+ return ret;
- dev->buf_async = NULL;
- init_waitqueue_head(&dev->buf_readers);
- init_waitqueue_head(&dev->buf_writers);
DRM_DEBUG("\n");
-
- /*
- * The kernel's context could be created here, but is now created
- * in drm_dma_enqueue. This is more resource-efficient for
- * hardware that does not do DMA, but may mean that
- * drm_select_queue fails between the time the interrupt is
- * initialized and the time the queues are initialized.
- */
-
return 0;
}
@@ -257,7 +219,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
return -EBUSY; /* No exclusive opens */
if (!drm_cpu_valid())
return -EINVAL;
- if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
+ if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
return -EINVAL;
DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor_id);
@@ -300,10 +262,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_prime_destroy;
}
-
- /* if there is no current master make this fd it */
+ /* if there is no current master make this fd it, but do not create
+ * any master object for render clients */
mutex_lock(&dev->struct_mutex);
- if (!priv->minor->master) {
+ if (!priv->minor->master && !drm_is_render_client(priv)) {
/* create a new master */
priv->minor->master = drm_master_create(priv->minor);
if (!priv->minor->master) {
@@ -341,12 +303,11 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
goto out_close;
}
}
- mutex_unlock(&dev->struct_mutex);
- } else {
+ } else if (!drm_is_render_client(priv)) {
/* get a reference to the master */
priv->master = drm_master_get(priv->minor->master);
- mutex_unlock(&dev->struct_mutex);
}
+ mutex_unlock(&dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
list_add(&priv->lhead, &dev->filelist);
@@ -388,18 +349,6 @@ out_put_pid:
return ret;
}
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->minor->dev;
-
- DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
- (long)old_encode_dev(priv->minor->device));
- return fasync_helper(fd, filp, on, &dev->buf_async);
-}
-EXPORT_SYMBOL(drm_fasync);
-
static void drm_master_release(struct drm_device *dev, struct file *filp)
{
struct drm_file *file_priv = filp->private_data;
@@ -490,26 +439,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
- mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist)) {
- struct drm_ctx_list *pos, *n;
-
- list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == file_priv &&
- pos->handle != DRM_KERNEL_CONTEXT) {
- if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev,
- pos->handle);
-
- drm_ctxbitmap_free(dev, pos->handle);
-
- list_del(&pos->head);
- kfree(pos);
- --dev->ctx_count;
- }
- }
- }
- mutex_unlock(&dev->ctxlist_mutex);
+ drm_legacy_ctxbitmap_release(dev, file_priv);
mutex_lock(&dev->struct_mutex);
@@ -547,7 +477,8 @@ int drm_release(struct inode *inode, struct file *filp)
iput(container_of(dev->dev_mapping, struct inode, i_data));
/* drop the reference held my the file priv */
- drm_master_put(&file_priv->master);
+ if (file_priv->master)
+ drm_master_put(&file_priv->master);
file_priv->is_master = 0;
list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
@@ -555,6 +486,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->postclose)
dev->driver->postclose(dev, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_prime_destroy_file_private(&file_priv->prime);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 603f256152e..49293bdc972 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -37,6 +37,7 @@
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
/** @file drm_gem.c
*
@@ -92,7 +93,7 @@ drm_gem_init(struct drm_device *dev)
{
struct drm_gem_mm *mm;
- spin_lock_init(&dev->object_name_lock);
+ mutex_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
@@ -102,14 +103,9 @@ drm_gem_init(struct drm_device *dev)
}
dev->mm_private = mm;
-
- if (drm_ht_create(&mm->offset_hash, 12)) {
- kfree(mm);
- return -ENOMEM;
- }
-
- drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
- DRM_FILE_PAGE_OFFSET_SIZE);
+ drm_vma_offset_manager_init(&mm->vma_manager,
+ DRM_FILE_PAGE_OFFSET_START,
+ DRM_FILE_PAGE_OFFSET_SIZE);
return 0;
}
@@ -119,8 +115,7 @@ drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
- drm_mm_takedown(&mm->offset_manager);
- drm_ht_remove(&mm->offset_hash);
+ drm_vma_offset_manager_destroy(&mm->vma_manager);
kfree(mm);
dev->mm_private = NULL;
}
@@ -132,16 +127,14 @@ drm_gem_destroy(struct drm_device *dev)
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size)
{
- BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+ struct file *filp;
- obj->dev = dev;
- obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
- if (IS_ERR(obj->filp))
- return PTR_ERR(obj->filp);
+ filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
+ if (IS_ERR(filp))
+ return PTR_ERR(filp);
- kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
- obj->size = size;
+ drm_gem_private_object_init(dev, obj, size);
+ obj->filp = filp;
return 0;
}
@@ -152,8 +145,8 @@ EXPORT_SYMBOL(drm_gem_object_init);
* no GEM provided backing store. Instead the caller is responsible for
* backing the object and handling it.
*/
-int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size)
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size)
{
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
@@ -161,10 +154,9 @@ int drm_gem_private_object_init(struct drm_device *dev,
obj->filp = NULL;
kref_init(&obj->refcount);
- atomic_set(&obj->handle_count, 0);
+ obj->handle_count = 0;
obj->size = size;
-
- return 0;
+ drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
@@ -200,16 +192,79 @@ EXPORT_SYMBOL(drm_gem_object_alloc);
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
- if (obj->import_attach) {
- drm_prime_remove_buf_handle(&filp->prime,
- obj->import_attach->dmabuf);
+ /*
+ * Note: obj->dma_buf can't disappear as long as we still hold a
+ * handle reference in obj->handle_count.
+ */
+ mutex_lock(&filp->prime.lock);
+ if (obj->dma_buf) {
+ drm_prime_remove_buf_handle_locked(&filp->prime,
+ obj->dma_buf);
}
- if (obj->export_dma_buf) {
- drm_prime_remove_buf_handle(&filp->prime,
- obj->export_dma_buf);
+ mutex_unlock(&filp->prime.lock);
+}
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+static void drm_gem_object_handle_free(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+
+ /* Remove any name for this object */
+ if (obj->name) {
+ idr_remove(&dev->object_name_idr, obj->name);
+ obj->name = 0;
+ /*
+ * The object name held a reference to this object, drop
+ * that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
+ */
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
}
}
+static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
+{
+ /* Unbreak the reference cycle if we have an exported dma_buf. */
+ if (obj->dma_buf) {
+ dma_buf_put(obj->dma_buf);
+ obj->dma_buf = NULL;
+ }
+}
+
+static void
+drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
+{
+ if (WARN_ON(obj->handle_count == 0))
+ return;
+
+ /*
+ * Must bump handle count first as this may be the last
+ * ref, in which case the object would disappear before we
+ * checked for a name
+ */
+
+ mutex_lock(&obj->dev->object_name_lock);
+ if (--obj->handle_count == 0) {
+ drm_gem_object_handle_free(obj);
+ drm_gem_object_exported_dma_buf_free(obj);
+ }
+ mutex_unlock(&obj->dev->object_name_lock);
+
+ drm_gem_object_unreference_unlocked(obj);
+}
+
/**
* Removes the mapping from handle to filp for this object.
*/
@@ -242,7 +297,9 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- drm_gem_remove_prime_handles(obj, filp);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, filp);
+ drm_vma_node_revoke(&obj->vma_node, filp->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, filp);
@@ -253,18 +310,36 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
EXPORT_SYMBOL(drm_gem_handle_delete);
/**
- * Create a handle for this object. This adds a handle reference
- * to the object, which includes a regular reference count. Callers
- * will likely want to dereference the object afterwards.
+ * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
+ *
+ * This implements the ->dumb_destroy kms driver callback for drivers which use
+ * gem to manage their backing storage.
+ */
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file, handle);
+}
+EXPORT_SYMBOL(drm_gem_dumb_destroy);
+
+/**
+ * drm_gem_handle_create_tail - internal functions to create a handle
+ *
+ * This expects the dev->object_name_lock to be held already and will drop it
+ * before returning. Used to avoid races in establishing new handles when
+ * importing an object from either an flink name or a dma-buf.
*/
int
-drm_gem_handle_create(struct drm_file *file_priv,
- struct drm_gem_object *obj,
- u32 *handlep)
+drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
{
struct drm_device *dev = obj->dev;
int ret;
+ WARN_ON(!mutex_is_locked(&dev->object_name_lock));
+
/*
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
@@ -273,14 +348,22 @@ drm_gem_handle_create(struct drm_file *file_priv,
spin_lock(&file_priv->table_lock);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
-
+ drm_gem_object_reference(obj);
+ obj->handle_count++;
spin_unlock(&file_priv->table_lock);
idr_preload_end();
- if (ret < 0)
+ mutex_unlock(&dev->object_name_lock);
+ if (ret < 0) {
+ drm_gem_object_handle_unreference_unlocked(obj);
return ret;
+ }
*handlep = ret;
- drm_gem_object_handle_reference(obj);
+ ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
+ if (ret) {
+ drm_gem_handle_delete(file_priv, *handlep);
+ return ret;
+ }
if (dev->driver->gem_open_object) {
ret = dev->driver->gem_open_object(obj, file_priv);
@@ -292,6 +375,21 @@ drm_gem_handle_create(struct drm_file *file_priv,
return 0;
}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep)
+{
+ mutex_lock(&obj->dev->object_name_lock);
+
+ return drm_gem_handle_create_tail(file_priv, obj, handlep);
+}
EXPORT_SYMBOL(drm_gem_handle_create);
@@ -306,81 +404,155 @@ drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list = &obj->map_list;
- drm_ht_remove_item(&mm->offset_hash, &list->hash);
- drm_mm_put_block(list->file_offset_node);
- kfree(list->map);
- list->map = NULL;
+ drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);
/**
- * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
* @obj: obj in question
+ * @size: the virtual size
*
* GEM memory mapping works by handing back to userspace a fake mmap offset
* it can use in a subsequent mmap(2) call. The DRM core code then looks
* up the object based on the offset and sets up the various memory mapping
* structures.
*
- * This routine allocates and attaches a fake offset for @obj.
+ * This routine allocates and attaches a fake offset for @obj, in cases where
+ * the virtual size differs from the physical size (ie. obj->size). Otherwise
+ * just use drm_gem_create_mmap_offset().
*/
int
-drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
{
struct drm_device *dev = obj->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret;
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = obj->size;
- map->handle = obj;
+ return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
+ size / PAGE_SIZE);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, false);
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call. The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+ return drm_gem_create_mmap_offset_size(obj, obj->size);
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * from shmem
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = file_inode(obj->filp);
+ mapping = inode->i_mapping;
+
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
}
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
+ return pages;
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
+fail:
+ while (i--)
+ page_cache_release(pages[i]);
- return 0;
+ drm_free_large(pages);
+ return ERR_CAST(p);
+}
+EXPORT_SYMBOL(drm_gem_get_pages);
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ * @dirty: if true, pages will be marked as dirty
+ * @accessed: if true, the pages will be marked as accessed
+ */
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
- return ret;
+ /* We already BUG_ON() for non-page-aligned sizes in
+ * drm_gem_object_init(), so we should never hit this unless
+ * driver author is doing something really wrong:
+ */
+ WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
}
-EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+EXPORT_SYMBOL(drm_gem_put_pages);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
@@ -445,8 +617,14 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOENT;
+ mutex_lock(&dev->object_name_lock);
idr_preload(GFP_KERNEL);
- spin_lock(&dev->object_name_lock);
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ ret = -ENOENT;
+ goto err;
+ }
+
if (!obj->name) {
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
if (ret < 0)
@@ -462,8 +640,8 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
ret = 0;
err:
- spin_unlock(&dev->object_name_lock);
idr_preload_end();
+ mutex_unlock(&dev->object_name_lock);
drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -486,15 +664,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- spin_lock(&dev->object_name_lock);
+ mutex_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
- if (obj)
+ if (obj) {
drm_gem_object_reference(obj);
- spin_unlock(&dev->object_name_lock);
- if (!obj)
+ } else {
+ mutex_unlock(&dev->object_name_lock);
return -ENOENT;
+ }
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -527,7 +707,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
- drm_gem_remove_prime_handles(obj, file_priv);
+ if (drm_core_check_feature(dev, DRIVER_PRIME))
+ drm_gem_remove_prime_handles(obj, file_priv);
+ drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
if (dev->driver->gem_close_object)
dev->driver->gem_close_object(obj, file_priv);
@@ -553,6 +735,8 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
void
drm_gem_object_release(struct drm_gem_object *obj)
{
+ WARN_ON(obj->dma_buf);
+
if (obj->filp)
fput(obj->filp);
}
@@ -577,41 +761,6 @@ drm_gem_object_free(struct kref *kref)
}
EXPORT_SYMBOL(drm_gem_object_free);
-static void drm_gem_object_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
-/**
- * Called after the last handle to the object has been closed
- *
- * Removes any name for the object. Note that this must be
- * called before drm_gem_object_free or we'll be touching
- * freed memory
- */
-void drm_gem_object_handle_free(struct drm_gem_object *obj)
-{
- struct drm_device *dev = obj->dev;
-
- /* Remove any name for this object */
- spin_lock(&dev->object_name_lock);
- if (obj->name) {
- idr_remove(&dev->object_name_idr, obj->name);
- obj->name = 0;
- spin_unlock(&dev->object_name_lock);
- /*
- * The object name held a reference to this object, drop
- * that now.
- *
- * This cannot be the last reference, since the handle holds one too.
- */
- kref_put(&obj->refcount, drm_gem_object_ref_bug);
- } else
- spin_unlock(&dev->object_name_lock);
-
-}
-EXPORT_SYMBOL(drm_gem_object_handle_free);
-
void drm_gem_vm_open(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
@@ -653,6 +802,10 @@ EXPORT_SYMBOL(drm_gem_vm_close);
* the GEM object is not looked up based on its fake offset. To implement the
* DRM mmap operation, drivers should use the drm_gem_mmap() function.
*
+ * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
+ * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
+ * callers must verify access restrictions before calling this helper.
+ *
* NOTE: This function has to be protected with dev->struct_mutex
*
* Return 0 or success or -EINVAL if the object size is smaller than the VMA
@@ -701,14 +854,17 @@ EXPORT_SYMBOL(drm_gem_mmap_obj);
* Look up the GEM object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object) and map it with a call to drm_gem_mmap_obj().
+ *
+ * If the caller is not granted access to the buffer object, the mmap will fail
+ * with EACCES. Please see the vma manager for more information.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_local_map *map = NULL;
- struct drm_hash_item *hash;
+ struct drm_gem_object *obj;
+ struct drm_vma_offset_node *node;
int ret = 0;
if (drm_device_is_unplugged(dev))
@@ -716,21 +872,19 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
+ node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
+ vma_pages(vma));
+ if (!node) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
+ } else if (!drm_vma_node_is_allowed(node, filp)) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EACCES;
}
- map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
- if (!map ||
- ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
- ret = -EPERM;
- goto out_unlock;
- }
-
- ret = drm_gem_mmap_obj(map->handle, map->size, vma);
+ obj = container_of(node, struct drm_gem_object, vma_node);
+ ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
-out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index ece72a8ac24..6b51bf90df0 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -27,11 +27,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_gem_cma_helper.h>
-
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
-{
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
-}
+#include <drm/drm_vma_manager.h>
/*
* __drm_gem_cma_create - Create a GEM CMA object without allocating memory
@@ -172,8 +168,7 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
{
struct drm_gem_cma_object *cma_obj;
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
cma_obj = to_drm_gem_cma_obj(gem_obj);
@@ -215,7 +210,7 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
cma_obj = drm_gem_cma_create_with_handle(file_priv, dev,
args->size, &args->handle);
- return PTR_RET(cma_obj);
+ return PTR_ERR_OR_ZERO(cma_obj);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
@@ -237,7 +232,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
return -EINVAL;
}
- *offset = get_gem_mmap_offset(gem_obj);
+ *offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
drm_gem_object_unreference(gem_obj);
@@ -286,27 +281,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
}
EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
-/*
- * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *drm, unsigned int handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_destroy);
-
#ifdef CONFIG_DEBUG_FS
void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m)
{
struct drm_gem_object *obj = &cma_obj->base;
struct drm_device *dev = obj->dev;
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%2d (%2d) %08llx %08Zx %p %d",
obj->name, obj->refcount.refcount.counter,
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index d4b20ceda3f..53298320080 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -207,7 +207,7 @@ static int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
- atomic_read(&obj->handle_count),
+ obj->handle_count,
atomic_read(&obj->refcount.refcount));
return 0;
}
@@ -218,7 +218,11 @@ int drm_gem_name_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
seq_printf(m, " name size handles refcount\n");
+
+ mutex_lock(&dev->object_name_lock);
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
+ mutex_unlock(&dev->object_name_lock);
+
return 0;
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ffd7a7ba70d..07247e2855a 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -217,29 +217,30 @@ int drm_getclient(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_client *client = data;
- struct drm_file *pt;
- int idx;
- int i;
- idx = client->idx;
- i = 0;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx) {
- client->auth = pt->authenticated;
- client->pid = pid_vnr(pt->pid);
- client->uid = from_kuid_munged(current_user_ns(), pt->uid);
- client->magic = pt->magic;
- client->iocs = pt->ioctl_count;
- mutex_unlock(&dev->struct_mutex);
-
- return 0;
- }
+ /*
+ * Hollowed-out getclient ioctl to keep some dead old drm tests/tools
+ * not breaking completely. Userspace tools stop enumerating one they
+ * get -EINVAL, hence this is the return value we need to hand back for
+ * no clients tracked.
+ *
+ * Unfortunately some clients (*cough* libva *cough*) use this in a fun
+ * attempt to figure out whether they're authenticated or not. Since
+ * that's the only thing they care about, give it to the directly
+ * instead of walking one giant list.
+ */
+ if (client->idx == 0) {
+ client->auth = file_priv->authenticated;
+ client->pid = pid_vnr(file_priv->pid);
+ client->uid = from_kuid_munged(current_user_ns(),
+ file_priv->uid);
+ client->magic = 0;
+ client->iocs = 0;
+
+ return 0;
+ } else {
+ return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
- return -EINVAL;
}
/**
@@ -256,21 +257,10 @@ int drm_getstats(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_stats *stats = data;
- int i;
+ /* Clear stats to prevent userspace from eating its stack garbage. */
memset(stats, 0, sizeof(*stats));
- for (i = 0; i < dev->counters; i++) {
- if (dev->types[i] == _DRM_STAT_LOCK)
- stats->data[i].value =
- (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
- else
- stats->data[i].value = atomic_read(&dev->counts[i]);
- stats->data[i].type = dev->types[i];
- }
-
- stats->count = dev->counters;
-
return 0;
}
@@ -303,6 +293,9 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_TIMESTAMP_MONOTONIC:
req->value = drm_timestamp_monotonic;
break;
+ case DRM_CAP_ASYNC_PAGE_FLIP:
+ req->value = dev->mode_config.async_page_flip;
+ break;
default:
return -EINVAL;
}
@@ -352,9 +345,6 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
retcode = -EINVAL;
goto done;
}
-
- if (dev->driver->set_version)
- dev->driver->set_version(dev, sv);
}
done:
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 8bcce7866d3..f92da0a32f0 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
/* Subtract time delta from raw timestamp to get final
* vblank_time timestamp for end of vblank.
*/
- etime = ktime_sub_ns(etime, delta_ns);
+ if (delta_ns < 0)
+ etime = ktime_add_ns(etime, -delta_ns);
+ else
+ etime = ktime_sub_ns(etime, delta_ns);
*vblank_time = ktime_to_timeval(etime);
DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n",
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 126d50ea181..64e44fad8ae 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -86,7 +86,6 @@ void drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
agp_free_memory(handle);
}
-EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -99,7 +98,6 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return agp_unbind_memory(handle);
}
-EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 543b9b3171d..af93cc55259 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -49,58 +49,18 @@
#define MM_UNUSED_TARGET 4
-static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
-{
- struct drm_mm_node *child;
-
- if (atomic)
- child = kzalloc(sizeof(*child), GFP_ATOMIC);
- else
- child = kzalloc(sizeof(*child), GFP_KERNEL);
-
- if (unlikely(child == NULL)) {
- spin_lock(&mm->unused_lock);
- if (list_empty(&mm->unused_nodes))
- child = NULL;
- else {
- child =
- list_entry(mm->unused_nodes.next,
- struct drm_mm_node, node_list);
- list_del(&child->node_list);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
- }
- return child;
-}
-
-/* drm_mm_pre_get() - pre allocate drm_mm_node structure
- * drm_mm: memory manager struct we are pre-allocating for
- *
- * Returns 0 on success or -ENOMEM if allocation fails.
- */
-int drm_mm_pre_get(struct drm_mm *mm)
-{
- struct drm_mm_node *node;
-
- spin_lock(&mm->unused_lock);
- while (mm->num_unused < MM_UNUSED_TARGET) {
- spin_unlock(&mm->unused_lock);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- spin_lock(&mm->unused_lock);
-
- if (unlikely(node == NULL)) {
- int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
- spin_unlock(&mm->unused_lock);
- return ret;
- }
- ++mm->num_unused;
- list_add_tail(&node->node_list, &mm->unused_nodes);
- }
- spin_unlock(&mm->unused_lock);
- return 0;
-}
-EXPORT_SYMBOL(drm_mm_pre_get);
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags);
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags);
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
@@ -147,33 +107,27 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic)
+int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- struct drm_mm_node *hole, *node;
- unsigned long end = start + size;
+ struct drm_mm_node *hole;
+ unsigned long end = node->start + node->size;
unsigned long hole_start;
unsigned long hole_end;
+ BUG_ON(node == NULL);
+
+ /* Find the relevant hole to add our node to */
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
- if (hole_start > start || hole_end < end)
+ if (hole_start > node->start || hole_end < end)
continue;
- node = drm_mm_kmalloc(mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- node->start = start;
- node->size = size;
node->mm = mm;
node->allocated = 1;
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
- if (start == hole_start) {
+ if (node->start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
@@ -184,31 +138,14 @@ struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
node->hole_follows = 1;
}
- return node;
+ return 0;
}
- WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
- return NULL;
-}
-EXPORT_SYMBOL(drm_mm_create_block);
-
-struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper(hole_node, node, size, alignment, color);
-
- return node;
+ WARN(1, "no hole found for node 0x%lx + 0x%lx\n",
+ node->start, node->size);
+ return -ENOSPC;
}
-EXPORT_SYMBOL(drm_mm_get_block_generic);
+EXPORT_SYMBOL(drm_mm_reserve_node);
/**
* Search for free space and insert a preallocated memory node. Returns
@@ -217,12 +154,13 @@ EXPORT_SYMBOL(drm_mm_get_block_generic);
*/
int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment,
- unsigned long color)
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_generic(mm, size, alignment,
- color, 0);
+ color, flags);
if (!hole_node)
return -ENOSPC;
@@ -231,13 +169,6 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node,
}
EXPORT_SYMBOL(drm_mm_insert_node_generic);
-int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment)
-{
- return drm_mm_insert_node_generic(mm, node, size, alignment, 0);
-}
-EXPORT_SYMBOL(drm_mm_insert_node);
-
static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
@@ -290,27 +221,6 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
}
}
-struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *hole_node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic)
-{
- struct drm_mm_node *node;
-
- node = drm_mm_kmalloc(hole_node->mm, atomic);
- if (unlikely(node == NULL))
- return NULL;
-
- drm_mm_insert_helper_range(hole_node, node, size, alignment, color,
- start, end);
-
- return node;
-}
-EXPORT_SYMBOL(drm_mm_get_block_range_generic);
-
/**
* Search for free space and insert a preallocated memory node. Returns
* -ENOSPC if no suitable free area is available. This is for range
@@ -318,13 +228,14 @@ EXPORT_SYMBOL(drm_mm_get_block_range_generic);
*/
int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node,
unsigned long size, unsigned alignment, unsigned long color,
- unsigned long start, unsigned long end)
+ unsigned long start, unsigned long end,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *hole_node;
hole_node = drm_mm_search_free_in_range_generic(mm,
size, alignment, color,
- start, end, 0);
+ start, end, flags);
if (!hole_node)
return -ENOSPC;
@@ -335,14 +246,6 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic);
-int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node,
- unsigned long size, unsigned alignment,
- unsigned long start, unsigned long end)
-{
- return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, 0, start, end);
-}
-EXPORT_SYMBOL(drm_mm_insert_node_in_range);
-
/**
* Remove a memory node from the allocator.
*/
@@ -351,6 +254,9 @@ void drm_mm_remove_node(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct drm_mm_node *prev_node;
+ if (WARN_ON(!node->allocated))
+ return;
+
BUG_ON(node->scanned_block || node->scanned_prev_free
|| node->scanned_next_free);
@@ -377,28 +283,6 @@ void drm_mm_remove_node(struct drm_mm_node *node)
}
EXPORT_SYMBOL(drm_mm_remove_node);
-/*
- * Remove a memory node from the allocator and free the allocated struct
- * drm_mm_node. Only to be used on a struct drm_mm_node obtained by one of the
- * drm_mm_get_block functions.
- */
-void drm_mm_put_block(struct drm_mm_node *node)
-{
-
- struct drm_mm *mm = node->mm;
-
- drm_mm_remove_node(node);
-
- spin_lock(&mm->unused_lock);
- if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&node->node_list, &mm->unused_nodes);
- ++mm->num_unused;
- } else
- kfree(node);
- spin_unlock(&mm->unused_lock);
-}
-EXPORT_SYMBOL(drm_mm_put_block);
-
static int check_free_hole(unsigned long start, unsigned long end,
unsigned long size, unsigned alignment)
{
@@ -414,11 +298,11 @@ static int check_free_hole(unsigned long start, unsigned long end,
return end >= start + size;
}
-struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match)
+static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long color,
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -441,7 +325,7 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -452,15 +336,14 @@ struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_generic);
-struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
+static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
unsigned long end,
- bool best_match)
+ enum drm_mm_search_flags flags)
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
@@ -488,7 +371,7 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
- if (!best_match)
+ if (!(flags & DRM_MM_SEARCH_BEST))
return entry;
if (entry->size < best_size) {
@@ -499,7 +382,6 @@ struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm,
return best;
}
-EXPORT_SYMBOL(drm_mm_search_free_in_range_generic);
/**
* Moves an allocation. To be used with embedded struct drm_mm_node.
@@ -634,8 +516,8 @@ EXPORT_SYMBOL(drm_mm_scan_add_block);
* corrupted.
*
* When the scan list is empty, the selected memory nodes can be freed. An
- * immediately following drm_mm_search_free with best_match = 0 will then return
- * the just freed block (because its at the top of the free_stack list).
+ * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then
+ * return the just freed block (because its at the top of the free_stack list).
*
* Returns one if this block should be evicted, zero otherwise. Will always
* return zero when no hole has been found.
@@ -672,10 +554,7 @@ EXPORT_SYMBOL(drm_mm_clean);
void drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->hole_stack);
- INIT_LIST_HEAD(&mm->unused_nodes);
- mm->num_unused = 0;
mm->scanned_blocks = 0;
- spin_lock_init(&mm->unused_lock);
/* Clever trick to avoid a special case in the free hole tracking. */
INIT_LIST_HEAD(&mm->head_node.node_list);
@@ -695,22 +574,8 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct drm_mm_node *entry, *next;
-
- if (WARN(!list_empty(&mm->head_node.node_list),
- "Memory manager not clean. Delaying takedown\n")) {
- return;
- }
-
- spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, node_list) {
- list_del(&entry->node_list);
- kfree(entry);
- --mm->num_unused;
- }
- spin_unlock(&mm->unused_lock);
-
- BUG_ON(mm->num_unused != 0);
+ WARN(!list_empty(&mm->head_node.node_list),
+ "Memory manager not clean during takedown.\n");
}
EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index a6729bfe686..fc2adb62b75 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -596,27 +596,6 @@ void drm_mode_set_name(struct drm_display_mode *mode)
EXPORT_SYMBOL(drm_mode_set_name);
/**
- * drm_mode_list_concat - move modes from one list to another
- * @head: source list
- * @new: dst list
- *
- * LOCKING:
- * Caller must ensure both lists are locked.
- *
- * Move all the modes from @head to @new.
- */
-void drm_mode_list_concat(struct list_head *head, struct list_head *new)
-{
-
- struct list_head *entry, *tmp;
-
- list_for_each_safe(entry, tmp, head) {
- list_move_tail(entry, new);
- }
-}
-EXPORT_SYMBOL(drm_mode_list_concat);
-
-/**
* drm_mode_width - get the width of a mode
* @mode: mode
*
@@ -923,43 +902,6 @@ void drm_mode_validate_size(struct drm_device *dev,
EXPORT_SYMBOL(drm_mode_validate_size);
/**
- * drm_mode_validate_clocks - validate modes against clock limits
- * @dev: DRM device
- * @mode_list: list of modes to check
- * @min: minimum clock rate array
- * @max: maximum clock rate array
- * @n_ranges: number of clock ranges (size of arrays)
- *
- * LOCKING:
- * Caller must hold a lock protecting @mode_list.
- *
- * Some code may need to check a mode list against the clock limits of the
- * device in question. This function walks the mode list, testing to make
- * sure each mode falls within a given range (defined by @min and @max
- * arrays) and sets @mode->status as needed.
- */
-void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges)
-{
- struct drm_display_mode *mode;
- int i;
-
- list_for_each_entry(mode, mode_list, head) {
- bool good = false;
- for (i = 0; i < n_ranges; i++) {
- if (mode->clock >= min[i] && mode->clock <= max[i]) {
- good = true;
- break;
- }
- }
- if (!good)
- mode->status = MODE_CLOCK_RANGE;
- }
-}
-EXPORT_SYMBOL(drm_mode_validate_clocks);
-
-/**
* drm_mode_prune_invalid - remove invalid modes from mode list
* @dev: DRM device
* @mode_list: list of modes to check
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 80c0b2b2980..1f96cee6eee 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -52,10 +52,8 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
-#if 1
unsigned long addr;
size_t sz;
-#endif
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -97,10 +95,8 @@ EXPORT_SYMBOL(drm_pci_alloc);
*/
void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
-#if 1
unsigned long addr;
size_t sz;
-#endif
if (dmah->vaddr) {
/* XXX - Is virt_to_page() legal for consistent mem? */
@@ -276,17 +272,26 @@ static int drm_pci_agp_init(struct drm_device *dev)
DRM_ERROR("Cannot initialize the agpgart module.\n");
return -EINVAL;
}
- if (drm_core_has_MTRR(dev)) {
- if (dev->agp)
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
}
}
return 0;
}
+static void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (drm_core_has_AGP(dev) && dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_agp_clear(dev);
+ drm_agp_destroy(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
static struct drm_bus drm_pci_bus = {
.bus_type = DRIVER_BUS_PCI,
.get_irq = drm_pci_get_irq,
@@ -295,6 +300,7 @@ static struct drm_bus drm_pci_bus = {
.set_unique = drm_pci_set_unique,
.irq_by_busid = drm_pci_irq_by_busid,
.agp_init = drm_pci_agp_init,
+ .agp_destroy = drm_pci_agp_destroy,
};
/**
@@ -348,6 +354,12 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
goto err_g2;
}
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g21;
+ }
+
if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
goto err_g3;
@@ -377,6 +389,9 @@ int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
err_g4:
drm_put_minor(&dev->primary);
err_g3:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g21:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g2:
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index b8a282ea875..f7a18c6ba4c 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -28,7 +28,7 @@
#include <linux/export.h>
#include <drm/drmP.h>
-/**
+/*
* Register.
*
* \param platdev - Platform device struture
@@ -39,8 +39,8 @@
* Try and register, if we fail to register, backout previous work.
*/
-int drm_get_platform_dev(struct platform_device *platdev,
- struct drm_driver *driver)
+static int drm_get_platform_dev(struct platform_device *platdev,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -69,6 +69,12 @@ int drm_get_platform_dev(struct platform_device *platdev,
goto err_g1;
}
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g11;
+ }
+
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
@@ -100,6 +106,9 @@ int drm_get_platform_dev(struct platform_device *platdev,
err_g3:
drm_put_minor(&dev->primary);
err_g2:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g11:
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
err_g1:
@@ -107,7 +116,6 @@ err_g1:
mutex_unlock(&drm_global_mutex);
return ret;
}
-EXPORT_SYMBOL(drm_get_platform_dev);
static int drm_platform_get_irq(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 85e450e3241..276d470f7b3 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -83,6 +83,34 @@ static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv,
return 0;
}
+static struct dma_buf *drm_prime_lookup_buf_by_handle(struct drm_prime_file_private *prime_fpriv,
+ uint32_t handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->handle == handle)
+ return member->dma_buf;
+ }
+
+ return NULL;
+}
+
+static int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf,
+ uint32_t *handle)
+{
+ struct drm_prime_member *member;
+
+ list_for_each_entry(member, &prime_fpriv->head, entry) {
+ if (member->dma_buf == dma_buf) {
+ *handle = member->handle;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
static int drm_gem_map_attach(struct dma_buf *dma_buf,
struct device *target_dev,
struct dma_buf_attachment *attach)
@@ -131,9 +159,8 @@ static void drm_gem_map_detach(struct dma_buf *dma_buf,
attach->priv = NULL;
}
-static void drm_prime_remove_buf_handle_locked(
- struct drm_prime_file_private *prime_fpriv,
- struct dma_buf *dma_buf)
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv,
+ struct dma_buf *dma_buf)
{
struct drm_prime_member *member, *safe;
@@ -167,8 +194,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
if (WARN_ON(prime_attach->dir != DMA_NONE))
return ERR_PTR(-EBUSY);
- mutex_lock(&obj->dev->struct_mutex);
-
sgt = obj->dev->driver->gem_prime_get_sg_table(obj);
if (!IS_ERR(sgt)) {
@@ -182,7 +207,6 @@ static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach,
}
}
- mutex_unlock(&obj->dev->struct_mutex);
return sgt;
}
@@ -192,16 +216,14 @@ static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* nothing to be done here */
}
-static void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf)
{
struct drm_gem_object *obj = dma_buf->priv;
- if (obj->export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(obj);
- }
+ /* drop the reference on the export fd holds */
+ drm_gem_object_unreference_unlocked(obj);
}
+EXPORT_SYMBOL(drm_gem_dmabuf_release);
static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
@@ -300,62 +322,107 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_gem_prime_export);
+static struct dma_buf *export_and_register_object(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ uint32_t flags)
+{
+ struct dma_buf *dmabuf;
+
+ /* prevent races with concurrent gem_close. */
+ if (obj->handle_count == 0) {
+ dmabuf = ERR_PTR(-ENOENT);
+ return dmabuf;
+ }
+
+ dmabuf = dev->driver->gem_prime_export(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
+ /* normally the created dma-buf takes ownership of the ref,
+ * but if that fails then drop the ref
+ */
+ return dmabuf;
+ }
+
+ /*
+ * Note that callers do not need to clean up the export cache
+ * since the check for obj->handle_count guarantees that someone
+ * will clean it up.
+ */
+ obj->dma_buf = dmabuf;
+ get_dma_buf(obj->dma_buf);
+ /* Grab a new ref since the callers is now used by the dma-buf */
+ drm_gem_object_reference(obj);
+
+ return dmabuf;
+}
+
int drm_gem_prime_handle_to_fd(struct drm_device *dev,
struct drm_file *file_priv, uint32_t handle, uint32_t flags,
int *prime_fd)
{
struct drm_gem_object *obj;
- void *buf;
int ret = 0;
struct dma_buf *dmabuf;
+ mutex_lock(&file_priv->prime.lock);
obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
+ if (!obj) {
+ ret = -ENOENT;
+ goto out_unlock;
+ }
- mutex_lock(&file_priv->prime.lock);
+ dmabuf = drm_prime_lookup_buf_by_handle(&file_priv->prime, handle);
+ if (dmabuf) {
+ get_dma_buf(dmabuf);
+ goto out_have_handle;
+ }
+
+ mutex_lock(&dev->object_name_lock);
/* re-export the original imported object */
if (obj->import_attach) {
dmabuf = obj->import_attach->dmabuf;
+ get_dma_buf(dmabuf);
goto out_have_obj;
}
- if (obj->export_dma_buf) {
- dmabuf = obj->export_dma_buf;
+ if (obj->dma_buf) {
+ get_dma_buf(obj->dma_buf);
+ dmabuf = obj->dma_buf;
goto out_have_obj;
}
- buf = dev->driver->gem_prime_export(dev, obj, flags);
- if (IS_ERR(buf)) {
+ dmabuf = export_and_register_object(dev, obj, flags);
+ if (IS_ERR(dmabuf)) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
- ret = PTR_ERR(buf);
+ ret = PTR_ERR(dmabuf);
+ mutex_unlock(&dev->object_name_lock);
goto out;
}
- obj->export_dma_buf = buf;
- /* if we've exported this buffer the cheat and add it to the import list
- * so we get the correct handle back
+out_have_obj:
+ /*
+ * If we've exported this buffer then cheat and add it to the import list
+ * so we get the correct handle back. We must do this under the
+ * protection of dev->object_name_lock to ensure that a racing gem close
+ * ioctl doesn't miss to remove this buffer handle from the cache.
*/
ret = drm_prime_add_buf_handle(&file_priv->prime,
- obj->export_dma_buf, handle);
+ dmabuf, handle);
+ mutex_unlock(&dev->object_name_lock);
if (ret)
goto fail_put_dmabuf;
- ret = dma_buf_fd(buf, flags);
- if (ret < 0)
- goto fail_rm_handle;
-
- *prime_fd = ret;
- mutex_unlock(&file_priv->prime.lock);
- return 0;
-
-out_have_obj:
- get_dma_buf(dmabuf);
+out_have_handle:
ret = dma_buf_fd(dmabuf, flags);
+ /*
+ * We must _not_ remove the buffer from the handle cache since the newly
+ * created dma buf is already linked in the global obj->dma_buf pointer,
+ * and that is invariant as long as a userspace gem handle exists.
+ * Closing the handle will clean out the cache anyway, so we don't leak.
+ */
if (ret < 0) {
- dma_buf_put(dmabuf);
+ goto fail_put_dmabuf;
} else {
*prime_fd = ret;
ret = 0;
@@ -363,15 +430,13 @@ out_have_obj:
goto out;
-fail_rm_handle:
- drm_prime_remove_buf_handle_locked(&file_priv->prime, buf);
fail_put_dmabuf:
- /* clear NOT to be checked when releasing dma_buf */
- obj->export_dma_buf = NULL;
- dma_buf_put(buf);
+ dma_buf_put(dmabuf);
out:
drm_gem_object_unreference_unlocked(obj);
+out_unlock:
mutex_unlock(&file_priv->prime.lock);
+
return ret;
}
EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
@@ -446,19 +511,26 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
ret = drm_prime_lookup_buf_handle(&file_priv->prime,
dma_buf, handle);
- if (!ret) {
- ret = 0;
+ if (ret == 0)
goto out_put;
- }
/* never seen this one, need to import */
+ mutex_lock(&dev->object_name_lock);
obj = dev->driver->gem_prime_import(dev, dma_buf);
if (IS_ERR(obj)) {
ret = PTR_ERR(obj);
- goto out_put;
+ goto out_unlock;
}
- ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (obj->dma_buf) {
+ WARN_ON(obj->dma_buf != dma_buf);
+ } else {
+ obj->dma_buf = dma_buf;
+ get_dma_buf(dma_buf);
+ }
+
+ /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
+ ret = drm_gem_handle_create_tail(file_priv, obj, handle);
drm_gem_object_unreference_unlocked(obj);
if (ret)
goto out_put;
@@ -478,7 +550,9 @@ fail:
/* hmm, if driver attached, we are relying on the free-object path
* to detach.. which seems ok..
*/
- drm_gem_object_handle_unreference_unlocked(obj);
+ drm_gem_handle_delete(file_priv, *handle);
+out_unlock:
+ mutex_unlock(&dev->object_name_lock);
out_put:
dma_buf_put(dma_buf);
mutex_unlock(&file_priv->prime.lock);
@@ -618,25 +692,3 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
WARN_ON(!list_empty(&prime_fpriv->head));
}
EXPORT_SYMBOL(drm_prime_destroy_file_private);
-
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
-{
- struct drm_prime_member *member;
-
- list_for_each_entry(member, &prime_fpriv->head, entry) {
- if (member->dma_buf == dma_buf) {
- *handle = member->handle;
- return 0;
- }
- }
- return -ENOENT;
-}
-EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
-
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
-{
- mutex_lock(&prime_fpriv->lock);
- drm_prime_remove_buf_handle_locked(prime_fpriv, dma_buf);
- mutex_unlock(&prime_fpriv->lock);
-}
-EXPORT_SYMBOL(drm_prime_remove_buf_handle);
diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c
deleted file mode 100644
index d7f2324b4fb..00000000000
--- a/drivers/gpu/drm/drm_proc.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/**
- * \file drm_proc.c
- * /proc support for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- *
- * \par Acknowledgements:
- * Matthew J Sottek <matthew.j.sottek@intel.com> sent in a patch to fix
- * the problem with the proc files not outputting all their information.
- */
-
-/*
- * Created: Mon Jan 11 09:48:47 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include <linux/seq_file.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <drm/drmP.h>
-
-/***************************************************
- * Initialization, etc.
- **************************************************/
-
-/**
- * Proc file list.
- */
-static const struct drm_info_list drm_proc_list[] = {
- {"name", drm_name_info, 0},
- {"vm", drm_vm_info, 0},
- {"clients", drm_clients_info, 0},
- {"bufs", drm_bufs_info, 0},
- {"gem_names", drm_gem_name_info, DRIVER_GEM},
-#if DRM_DEBUG_CODE
- {"vma", drm_vma_info, 0},
-#endif
-};
-#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
-
-static int drm_proc_open(struct inode *inode, struct file *file)
-{
- struct drm_info_node* node = PDE_DATA(inode);
-
- return single_open(file, node->info_ent->show, node);
-}
-
-static const struct file_operations drm_proc_fops = {
- .owner = THIS_MODULE,
- .open = drm_proc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-
-/**
- * Initialize a given set of proc files for a device
- *
- * \param files The array of files to create
- * \param count The number of files given
- * \param root DRI proc dir entry.
- * \param minor device minor number
- * \return Zero on success, non-zero on failure
- *
- * Create a given set of proc files represented by an array of
- * gdm_proc_lists in the given root directory.
- */
-static int drm_proc_create_files(const struct drm_info_list *files, int count,
- struct proc_dir_entry *root, struct drm_minor *minor)
-{
- struct drm_device *dev = minor->dev;
- struct proc_dir_entry *ent;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- u32 features = files[i].driver_features;
-
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
- continue;
-
- tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
- if (!tmp)
- return -1;
-
- tmp->minor = minor;
- tmp->info_ent = &files[i];
- list_add(&tmp->list, &minor->proc_nodes.list);
-
- ent = proc_create_data(files[i].name, S_IRUGO, root,
- &drm_proc_fops, tmp);
- if (!ent) {
- DRM_ERROR("Cannot create /proc/dri/%u/%s\n",
- minor->index, files[i].name);
- list_del(&tmp->list);
- kfree(tmp);
- return -1;
- }
- }
- return 0;
-}
-
-/**
- * Initialize the DRI proc filesystem for a device
- *
- * \param dev DRM device
- * \param root DRI proc dir entry.
- * \param dev_root resulting DRI device proc dir entry.
- * \return root entry pointer on success, or NULL on failure.
- *
- * Create the DRI proc root entry "/proc/dri", the device proc root entry
- * "/proc/dri/%minor%/", and each entry in proc_list as
- * "/proc/dri/%minor%/%name%".
- */
-int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- char name[12];
- int ret;
-
- INIT_LIST_HEAD(&minor->proc_nodes.list);
- sprintf(name, "%u", minor->index);
- minor->proc_root = proc_mkdir(name, root);
- if (!minor->proc_root) {
- DRM_ERROR("Cannot create /proc/dri/%s\n", name);
- return -1;
- }
-
- ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
- minor->proc_root, minor);
- if (ret) {
- remove_proc_subtree(name, root);
- minor->proc_root = NULL;
- DRM_ERROR("Failed to create core drm proc files\n");
- return ret;
- }
-
- return 0;
-}
-
-static int drm_proc_remove_files(const struct drm_info_list *files, int count,
- struct drm_minor *minor)
-{
- struct list_head *pos, *q;
- struct drm_info_node *tmp;
- int i;
-
- for (i = 0; i < count; i++) {
- list_for_each_safe(pos, q, &minor->proc_nodes.list) {
- tmp = list_entry(pos, struct drm_info_node, list);
- if (tmp->info_ent == &files[i]) {
- remove_proc_entry(files[i].name,
- minor->proc_root);
- list_del(pos);
- kfree(tmp);
- }
- }
- }
- return 0;
-}
-
-/**
- * Cleanup the proc filesystem resources.
- *
- * \param minor device minor number.
- * \param root DRI proc dir entry.
- * \param dev_root DRI device proc dir entry.
- * \return always zero.
- *
- * Remove all proc entries created by proc_init().
- */
-int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
-{
- char name[64];
-
- if (!root || !minor->proc_root)
- return 0;
-
- drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
-
- sprintf(name, "%d", minor->index);
- remove_proc_subtree(name, root);
- return 0;
-}
-
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d87f60bbc33..1c78406f6e7 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -46,7 +46,7 @@ static inline void *drm_vmalloc_dma(unsigned long size)
#endif
}
-void drm_sg_cleanup(struct drm_sg_mem * entry)
+static void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
int i;
@@ -64,19 +64,32 @@ void drm_sg_cleanup(struct drm_sg_mem * entry)
kfree(entry);
}
+void drm_legacy_sg_cleanup(struct drm_device *dev)
+{
+ if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg &&
+ !drm_core_check_feature(dev, DRIVER_MODESET)) {
+ drm_sg_cleanup(dev->sg);
+ dev->sg = NULL;
+ }
+}
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
#else
# define ScatterHandle(x) (unsigned int)(x)
#endif
-int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
+int drm_sg_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
+ struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("\n");
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -181,21 +194,15 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
return -ENOMEM;
}
-int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_scatter_gather *request = data;
-
- return drm_sg_alloc(dev, request);
-
-}
-
int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_scatter_gather *request = data;
struct drm_sg_mem *entry;
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index 327ca19cda8..e7eb0276f7f 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -40,6 +40,9 @@
unsigned int drm_debug = 0; /* 1 to enable debug output */
EXPORT_SYMBOL(drm_debug);
+unsigned int drm_rnodes = 0; /* 1 to enable experimental render nodes API */
+EXPORT_SYMBOL(drm_rnodes);
+
unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
EXPORT_SYMBOL(drm_vblank_offdelay);
@@ -56,11 +59,13 @@ MODULE_AUTHOR(CORE_AUTHOR);
MODULE_DESCRIPTION(CORE_DESC);
MODULE_LICENSE("GPL and additional rights");
MODULE_PARM_DESC(debug, "Enable debug output");
+MODULE_PARM_DESC(rnodes, "Enable experimental render nodes API");
MODULE_PARM_DESC(vblankoffdelay, "Delay until vblank irq auto-disable [msecs]");
MODULE_PARM_DESC(timestamp_precision_usec, "Max. error on timestamps [usecs]");
MODULE_PARM_DESC(timestamp_monotonic, "Use monotonic timestamps");
module_param_named(debug, drm_debug, int, 0600);
+module_param_named(rnodes, drm_rnodes, int, 0600);
module_param_named(vblankoffdelay, drm_vblank_offdelay, int, 0600);
module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600);
module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
@@ -68,7 +73,6 @@ module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
struct idr drm_minors_idr;
struct class *drm_class;
-struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
int drm_err(const char *func, const char *format, ...)
@@ -113,12 +117,12 @@ static int drm_minor_get_id(struct drm_device *dev, int type)
int base = 0, limit = 63;
if (type == DRM_MINOR_CONTROL) {
- base += 64;
- limit = base + 127;
- } else if (type == DRM_MINOR_RENDER) {
- base += 128;
- limit = base + 255;
- }
+ base += 64;
+ limit = base + 63;
+ } else if (type == DRM_MINOR_RENDER) {
+ base += 128;
+ limit = base + 63;
+ }
mutex_lock(&dev->struct_mutex);
ret = idr_alloc(&drm_minors_idr, NULL, base, limit, GFP_KERNEL);
@@ -288,13 +292,7 @@ int drm_fill_in_dev(struct drm_device *dev,
goto error_out_unreg;
}
-
-
- retcode = drm_ctxbitmap_init(dev);
- if (retcode) {
- DRM_ERROR("Cannot allocate memory for context bitmap.\n");
- goto error_out_unreg;
- }
+ drm_legacy_ctxbitmap_init(dev);
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
@@ -321,9 +319,8 @@ EXPORT_SYMBOL(drm_fill_in_dev);
* \param sec-minor structure to hold the assigned minor
* \return negative number on failure.
*
- * Search an empty entry and initialize it to the given parameters, and
- * create the proc init entry via proc_init(). This routines assigns
- * minor numbers to secondary heads of multi-headed cards
+ * Search an empty entry and initialize it to the given parameters. This
+ * routines assigns minor numbers to secondary heads of multi-headed cards
*/
int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
@@ -351,20 +348,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
idr_replace(&drm_minors_idr, new_minor, minor_id);
- if (type == DRM_MINOR_LEGACY) {
- ret = drm_proc_init(new_minor, drm_proc_root);
- if (ret) {
- DRM_ERROR("DRM: Failed to initialize /proc/dri.\n");
- goto err_mem;
- }
- } else
- new_minor->proc_root = NULL;
-
#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
- goto err_g2;
+ goto err_mem;
}
#endif
@@ -372,7 +360,7 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
if (ret) {
printk(KERN_ERR
"DRM: Error sysfs_device_add.\n");
- goto err_g2;
+ goto err_debugfs;
}
*minor = new_minor;
@@ -380,10 +368,11 @@ int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
return 0;
-err_g2:
- if (new_minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(new_minor, drm_proc_root);
+err_debugfs:
+#if defined(CONFIG_DEBUG_FS)
+ drm_debugfs_cleanup(new_minor);
err_mem:
+#endif
kfree(new_minor);
err_idr:
idr_remove(&drm_minors_idr, minor_id);
@@ -397,10 +386,6 @@ EXPORT_SYMBOL(drm_get_minor);
*
* \param sec_minor - structure to be released
* \return always zero
- *
- * Cleans up the proc resources. Not legal for this to be the
- * last minor released.
- *
*/
int drm_put_minor(struct drm_minor **minor_p)
{
@@ -408,8 +393,6 @@ int drm_put_minor(struct drm_minor **minor_p)
DRM_DEBUG("release secondary minor %d\n", minor->index);
- if (minor->type == DRM_MINOR_LEGACY)
- drm_proc_cleanup(minor, drm_proc_root);
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
@@ -451,16 +434,11 @@ void drm_put_dev(struct drm_device *dev)
drm_lastclose(dev);
- if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp)
- arch_phys_wc_del(dev->agp->agp_mtrr);
-
if (dev->driver->unload)
dev->driver->unload(dev);
- if (drm_core_has_AGP(dev) && dev->agp) {
- kfree(dev->agp);
- dev->agp = NULL;
- }
+ if (dev->driver->bus->agp_destroy)
+ dev->driver->bus->agp_destroy(dev);
drm_vblank_cleanup(dev);
@@ -468,11 +446,14 @@ void drm_put_dev(struct drm_device *dev)
drm_rmmap(dev, r_list->map);
drm_ht_remove(&dev->map_hash);
- drm_ctxbitmap_cleanup(dev);
+ drm_legacy_ctxbitmap_cleanup(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_put_minor(&dev->control);
+ if (dev->render)
+ drm_put_minor(&dev->render);
+
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
@@ -489,6 +470,8 @@ void drm_unplug_dev(struct drm_device *dev)
/* for a USB device */
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_unplug_minor(dev->control);
+ if (dev->render)
+ drm_unplug_minor(dev->render);
drm_unplug_minor(dev->primary);
mutex_lock(&drm_global_mutex);
diff --git a/drivers/gpu/drm/drm_usb.c b/drivers/gpu/drm/drm_usb.c
index 34a156f0c33..87664723b9c 100644
--- a/drivers/gpu/drm/drm_usb.c
+++ b/drivers/gpu/drm/drm_usb.c
@@ -33,6 +33,12 @@ int drm_get_usb_dev(struct usb_interface *interface,
if (ret)
goto err_g1;
+ if (drm_core_check_feature(dev, DRIVER_RENDER) && drm_rnodes) {
+ ret = drm_get_minor(dev, &dev->render, DRM_MINOR_RENDER);
+ if (ret)
+ goto err_g11;
+ }
+
ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
if (ret)
goto err_g2;
@@ -62,6 +68,9 @@ int drm_get_usb_dev(struct usb_interface *interface,
err_g3:
drm_put_minor(&dev->primary);
err_g2:
+ if (dev->render)
+ drm_put_minor(&dev->render);
+err_g11:
drm_put_minor(&dev->control);
err_g1:
kfree(dev);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index feb20035b2c..b5c5af7328d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -251,8 +251,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
- if (drm_core_has_MTRR(dev))
- arch_phys_wc_del(map->mtrr);
+ arch_phys_wc_del(map->mtrr);
iounmap(map->handle);
break;
case _DRM_SHM:
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
new file mode 100644
index 00000000000..63b47120507
--- /dev/null
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2012 David Airlie <airlied@linux.ie>
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_mm.h>
+#include <drm/drm_vma_manager.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+/**
+ * DOC: vma offset manager
+ *
+ * The vma-manager is responsible to map arbitrary driver-dependent memory
+ * regions into the linear user address-space. It provides offsets to the
+ * caller which can then be used on the address_space of the drm-device. It
+ * takes care to not overlap regions, size them appropriately and to not
+ * confuse mm-core by inconsistent fake vm_pgoff fields.
+ * Drivers shouldn't use this for object placement in VMEM. This manager should
+ * only be used to manage mappings into linear user-space VMs.
+ *
+ * We use drm_mm as backend to manage object allocations. But it is highly
+ * optimized for alloc/free calls, not lookups. Hence, we use an rb-tree to
+ * speed up offset lookups.
+ *
+ * You must not use multiple offset managers on a single address_space.
+ * Otherwise, mm-core will be unable to tear down memory mappings as the VM will
+ * no longer be linear. Please use VM_NONLINEAR in that case and implement your
+ * own offset managers.
+ *
+ * This offset manager works on page-based addresses. That is, every argument
+ * and return code (with the exception of drm_vma_node_offset_addr()) is given
+ * in number of pages, not number of bytes. That means, object sizes and offsets
+ * must always be page-aligned (as usual).
+ * If you want to get a valid byte-based user-space address for a given offset,
+ * please see drm_vma_node_offset_addr().
+ *
+ * Additionally to offset management, the vma offset manager also handles access
+ * management. For every open-file context that is allowed to access a given
+ * node, you must call drm_vma_node_allow(). Otherwise, an mmap() call on this
+ * open-file with the offset of the node will fail with -EACCES. To revoke
+ * access again, use drm_vma_node_revoke(). However, the caller is responsible
+ * for destroying already existing mappings, if required.
+ */
+
+/**
+ * drm_vma_offset_manager_init - Initialize new offset-manager
+ * @mgr: Manager object
+ * @page_offset: Offset of available memory area (page-based)
+ * @size: Size of available address space range (page-based)
+ *
+ * Initialize a new offset-manager. The offset and area size available for the
+ * manager are given as @page_offset and @size. Both are interpreted as
+ * page-numbers, not bytes.
+ *
+ * Adding/removing nodes from the manager is locked internally and protected
+ * against concurrent access. However, node allocation and destruction is left
+ * for the caller. While calling into the vma-manager, a given node must
+ * always be guaranteed to be referenced.
+ */
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size)
+{
+ rwlock_init(&mgr->vm_lock);
+ mgr->vm_addr_space_rb = RB_ROOT;
+ drm_mm_init(&mgr->vm_addr_space_mm, page_offset, size);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_init);
+
+/**
+ * drm_vma_offset_manager_destroy() - Destroy offset manager
+ * @mgr: Manager object
+ *
+ * Destroy an object manager which was previously created via
+ * drm_vma_offset_manager_init(). The caller must remove all allocated nodes
+ * before destroying the manager. Otherwise, drm_mm will refuse to free the
+ * requested resources.
+ *
+ * The manager must not be accessed after this function is called.
+ */
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
+{
+ /* take the lock to protect against buggy drivers */
+ write_lock(&mgr->vm_lock);
+ drm_mm_takedown(&mgr->vm_addr_space_mm);
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
+
+/**
+ * drm_vma_offset_lookup() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Find a node given a start address and object size. This returns the _best_
+ * match for the given node. That is, @start may point somewhere into a valid
+ * region and the given node will be returned, as long as the node spans the
+ * whole requested area (given the size in number of pages as @pages).
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned. It's the caller's responsibility to make sure the node doesn't
+ * get destroyed before the caller can access it.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ read_lock(&mgr->vm_lock);
+ node = drm_vma_offset_lookup_locked(mgr, start, pages);
+ read_unlock(&mgr->vm_lock);
+
+ return node;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup);
+
+/**
+ * drm_vma_offset_lookup_locked() - Find node in offset space
+ * @mgr: Manager object
+ * @start: Start address for object (page-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but requires the caller to lock offset lookup
+ * manually. See drm_vma_offset_lock_lookup() for an example.
+ *
+ * RETURNS:
+ * Returns NULL if no suitable node can be found. Otherwise, the best match
+ * is returned.
+ */
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node, *best;
+ struct rb_node *iter;
+ unsigned long offset;
+
+ iter = mgr->vm_addr_space_rb.rb_node;
+ best = NULL;
+
+ while (likely(iter)) {
+ node = rb_entry(iter, struct drm_vma_offset_node, vm_rb);
+ offset = node->vm_node.start;
+ if (start >= offset) {
+ iter = iter->rb_right;
+ best = node;
+ if (start == offset)
+ break;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ /* verify that the node spans the requested area */
+ if (best) {
+ offset = best->vm_node.start + best->vm_node.size;
+ if (offset < start + pages)
+ best = NULL;
+ }
+
+ return best;
+}
+EXPORT_SYMBOL(drm_vma_offset_lookup_locked);
+
+/* internal helper to link @node into the rb-tree */
+static void _drm_vma_offset_add_rb(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ struct rb_node **iter = &mgr->vm_addr_space_rb.rb_node;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_node *iter_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ iter_node = rb_entry(*iter, struct drm_vma_offset_node, vm_rb);
+
+ if (node->vm_node.start < iter_node->vm_node.start)
+ iter = &(*iter)->rb_left;
+ else if (node->vm_node.start > iter_node->vm_node.start)
+ iter = &(*iter)->rb_right;
+ else
+ BUG();
+ }
+
+ rb_link_node(&node->vm_rb, parent, iter);
+ rb_insert_color(&node->vm_rb, &mgr->vm_addr_space_rb);
+}
+
+/**
+ * drm_vma_offset_add() - Add offset node to manager
+ * @mgr: Manager object
+ * @node: Node to be added
+ * @pages: Allocation size visible to user-space (in number of pages)
+ *
+ * Add a node to the offset-manager. If the node was already added, this does
+ * nothing and return 0. @pages is the size of the object given in number of
+ * pages.
+ * After this call succeeds, you can access the offset of the node until it
+ * is removed again.
+ *
+ * If this call fails, it is safe to retry the operation or call
+ * drm_vma_offset_remove(), anyway. However, no cleanup is required in that
+ * case.
+ *
+ * @pages is not required to be the same size as the underlying memory object
+ * that you want to map. It only limits the size that user-space can map into
+ * their address space.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on failure.
+ */
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages)
+{
+ int ret;
+
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ ret = 0;
+ goto out_unlock;
+ }
+
+ ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node,
+ pages, 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto out_unlock;
+
+ _drm_vma_offset_add_rb(mgr, node);
+
+out_unlock:
+ write_unlock(&mgr->vm_lock);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_offset_add);
+
+/**
+ * drm_vma_offset_remove() - Remove offset node from manager
+ * @mgr: Manager object
+ * @node: Node to be removed
+ *
+ * Remove a node from the offset manager. If the node wasn't added before, this
+ * does nothing. After this call returns, the offset and size will be 0 until a
+ * new offset is allocated via drm_vma_offset_add() again. Helper functions like
+ * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
+ * offset is allocated.
+ */
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node)
+{
+ write_lock(&mgr->vm_lock);
+
+ if (drm_mm_node_allocated(&node->vm_node)) {
+ rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
+ drm_mm_remove_node(&node->vm_node);
+ memset(&node->vm_node, 0, sizeof(node->vm_node));
+ }
+
+ write_unlock(&mgr->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_offset_remove);
+
+/**
+ * drm_vma_node_allow - Add open-file to list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to add
+ *
+ * Add @filp to the list of allowed open-files for this node. If @filp is
+ * already on this list, the ref-count is incremented.
+ *
+ * The list of allowed-users is preserved across drm_vma_offset_add() and
+ * drm_vma_offset_remove() calls. You may even call it if the node is currently
+ * not added to any offset-manager.
+ *
+ * You must remove all open-files the same number of times as you added them
+ * before destroying the node. Otherwise, you will leak memory.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * 0 on success, negative error code on internal failure (out-of-mem)
+ */
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct rb_node **iter;
+ struct rb_node *parent = NULL;
+ struct drm_vma_offset_file *new, *entry;
+ int ret = 0;
+
+ /* Preallocate entry to avoid atomic allocations below. It is quite
+ * unlikely that an open-file is added twice to a single node so we
+ * don't optimize for this case. OOM is checked below only if the entry
+ * is actually used. */
+ new = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ write_lock(&node->vm_lock);
+
+ iter = &node->vm_files.rb_node;
+
+ while (likely(*iter)) {
+ parent = *iter;
+ entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb);
+
+ if (filp == entry->vm_filp) {
+ entry->vm_count++;
+ goto unlock;
+ } else if (filp > entry->vm_filp) {
+ iter = &(*iter)->rb_right;
+ } else {
+ iter = &(*iter)->rb_left;
+ }
+ }
+
+ if (!new) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ new->vm_filp = filp;
+ new->vm_count = 1;
+ rb_link_node(&new->vm_rb, parent, iter);
+ rb_insert_color(&new->vm_rb, &node->vm_files);
+ new = NULL;
+
+unlock:
+ write_unlock(&node->vm_lock);
+ kfree(new);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vma_node_allow);
+
+/**
+ * drm_vma_node_revoke - Remove open-file from list of allowed users
+ * @node: Node to modify
+ * @filp: Open file to remove
+ *
+ * Decrement the ref-count of @filp in the list of allowed open-files on @node.
+ * If the ref-count drops to zero, remove @filp from the list. You must call
+ * this once for every drm_vma_node_allow() on @filp.
+ *
+ * This is locked against concurrent access internally.
+ *
+ * If @filp is not on the list, nothing is done.
+ */
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ write_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp) {
+ if (!--entry->vm_count) {
+ rb_erase(&entry->vm_rb, &node->vm_files);
+ kfree(entry);
+ }
+ break;
+ } else if (filp > entry->vm_filp) {
+ iter = iter->rb_right;
+ } else {
+ iter = iter->rb_left;
+ }
+ }
+
+ write_unlock(&node->vm_lock);
+}
+EXPORT_SYMBOL(drm_vma_node_revoke);
+
+/**
+ * drm_vma_node_is_allowed - Check whether an open-file is granted access
+ * @node: Node to check
+ * @filp: Open-file to check for
+ *
+ * Search the list in @node whether @filp is currently on the list of allowed
+ * open-files (see drm_vma_node_allow()).
+ *
+ * This is locked against concurrent access internally.
+ *
+ * RETURNS:
+ * true iff @filp is on the list
+ */
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ struct drm_vma_offset_file *entry;
+ struct rb_node *iter;
+
+ read_lock(&node->vm_lock);
+
+ iter = node->vm_files.rb_node;
+ while (likely(iter)) {
+ entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb);
+ if (filp == entry->vm_filp)
+ break;
+ else if (filp > entry->vm_filp)
+ iter = iter->rb_right;
+ else
+ iter = iter->rb_left;
+ }
+
+ read_unlock(&node->vm_lock);
+
+ return iter;
+}
+EXPORT_SYMBOL(drm_vma_node_is_allowed);
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 772c62a6e2a..4752f223e5b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,11 +1,12 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
- depends on DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+ depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+ select VIDEOMODE_HELPERS
help
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
@@ -24,9 +25,8 @@ config DRM_EXYNOS_DMABUF
config DRM_EXYNOS_FIMD
bool "Exynos DRM FIMD"
- depends on OF && DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS && !FB_S3C && !ARCH_MULTIPLATFORM
select FB_MODE_HELPERS
- select VIDEOMODE_HELPERS
help
Choose this option if you want to use Exynos FIMD for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
index 95c75edef01..6a8c84e7c83 100644
--- a/drivers/gpu/drm/exynos/exynos_ddc.c
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -15,8 +15,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
-
+#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
@@ -42,13 +41,6 @@ static int s5p_ddc_remove(struct i2c_client *client)
return 0;
}
-static struct i2c_device_id ddc_idtable[] = {
- {"s5p_ddc", 0},
- {"exynos5-hdmiddc", 0},
- { },
-};
-
-#ifdef CONFIG_OF
static struct of_device_id hdmiddc_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiddc",
@@ -58,15 +50,13 @@ static struct of_device_id hdmiddc_match_types[] = {
/* end node */
}
};
-#endif
struct i2c_driver ddc_driver = {
.driver = {
.name = "exynos-hdmiddc",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(hdmiddc_match_types),
+ .of_match_table = hdmiddc_match_types,
},
- .id_table = ddc_idtable,
.probe = s5p_ddc_probe,
.remove = s5p_ddc_remove,
.command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index b8ac06d92fb..3445a0f3a6b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -149,10 +149,8 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
DRM_DEBUG_KMS("desired size = 0x%x\n", size);
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
+ if (!buffer)
return NULL;
- }
buffer->size = size;
return buffer;
@@ -161,11 +159,6 @@ struct exynos_drm_gem_buf *exynos_drm_init_buf(struct drm_device *dev,
void exynos_drm_fini_buf(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer)
{
- if (!buffer) {
- DRM_DEBUG_KMS("buffer is null.\n");
- return;
- }
-
kfree(buffer);
buffer = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 02a8bc5226c..e082efb2fec 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -17,6 +17,7 @@
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\
drm_connector)
@@ -28,35 +29,6 @@ struct exynos_drm_connector {
uint32_t dpms;
};
-/* convert exynos_video_timings to drm_display_mode */
-static inline void
-convert_to_display_mode(struct drm_display_mode *mode,
- struct exynos_drm_panel_info *panel)
-{
- struct fb_videomode *timing = &panel->timing;
-
- mode->clock = timing->pixclock / 1000;
- mode->vrefresh = timing->refresh;
-
- mode->hdisplay = timing->xres;
- mode->hsync_start = mode->hdisplay + timing->right_margin;
- mode->hsync_end = mode->hsync_start + timing->hsync_len;
- mode->htotal = mode->hsync_end + timing->left_margin;
-
- mode->vdisplay = timing->yres;
- mode->vsync_start = mode->vdisplay + timing->lower_margin;
- mode->vsync_end = mode->vsync_start + timing->vsync_len;
- mode->vtotal = mode->vsync_end + timing->upper_margin;
- mode->width_mm = panel->width_mm;
- mode->height_mm = panel->height_mm;
-
- if (timing->vmode & FB_VMODE_INTERLACED)
- mode->flags |= DRM_MODE_FLAG_INTERLACE;
-
- if (timing->vmode & FB_VMODE_DOUBLE)
- mode->flags |= DRM_MODE_FLAG_DBLSCAN;
-}
-
static int exynos_drm_connector_get_modes(struct drm_connector *connector)
{
struct exynos_drm_connector *exynos_connector =
@@ -111,7 +83,9 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
return 0;
}
- convert_to_display_mode(mode, panel);
+ drm_display_mode_from_videomode(&panel->vm, mode);
+ mode->width_mm = panel->width_mm;
+ mode->height_mm = panel->height_mm;
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
@@ -278,10 +252,8 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
int err;
exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
- if (!exynos_connector) {
- DRM_ERROR("failed to allocate connector\n");
+ if (!exynos_connector)
return NULL;
- }
connector = &exynos_connector->drm_connector;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9a35d171a6d..ebc01503d50 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -15,6 +15,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include "exynos_drm_crtc.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_plane.h"
@@ -184,8 +185,9 @@ static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
};
static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct exynos_drm_private *dev_priv = dev->dev_private;
@@ -323,10 +325,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
struct drm_crtc *crtc;
exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
- if (!exynos_crtc) {
- DRM_ERROR("failed to allocate exynos crtc\n");
+ if (!exynos_crtc)
return -ENOMEM;
- }
exynos_crtc->pipe = nr;
exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
index a0f997e0cbd..59827cc5e77 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
@@ -11,6 +11,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
+#include "exynos_drm_dmabuf.h"
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
@@ -22,6 +23,11 @@ struct exynos_drm_dmabuf_attachment {
bool is_mapped;
};
+static struct exynos_drm_gem_obj *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_exynos_gem_obj(buf->priv);
+}
+
static int exynos_gem_attach_dma_buf(struct dma_buf *dmabuf,
struct device *dev,
struct dma_buf_attachment *attach)
@@ -63,7 +69,7 @@ static struct sg_table *
enum dma_data_direction dir)
{
struct exynos_drm_dmabuf_attachment *exynos_attach = attach->priv;
- struct exynos_drm_gem_obj *gem_obj = attach->dmabuf->priv;
+ struct exynos_drm_gem_obj *gem_obj = dma_buf_to_obj(attach->dmabuf);
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct scatterlist *rd, *wr;
@@ -127,27 +133,6 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
/* Nothing to do. */
}
-static void exynos_dmabuf_release(struct dma_buf *dmabuf)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = dmabuf->priv;
-
- /*
- * exynos_dmabuf_release() call means that file object's
- * f_count is 0 and it calls drm_gem_object_handle_unreference()
- * to drop the references that these values had been increased
- * at drm_prime_handle_to_fd()
- */
- if (exynos_gem_obj->base.export_dma_buf == dmabuf) {
- exynos_gem_obj->base.export_dma_buf = NULL;
-
- /*
- * drop this gem object refcount to release allocated buffer
- * and resources.
- */
- drm_gem_object_unreference_unlocked(&exynos_gem_obj->base);
- }
-}
-
static void *exynos_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
unsigned long page_num)
{
@@ -193,7 +178,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = {
.kunmap = exynos_gem_dmabuf_kunmap,
.kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic,
.mmap = exynos_gem_dmabuf_mmap,
- .release = exynos_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
};
struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
@@ -201,7 +186,7 @@ struct dma_buf *exynos_dmabuf_prime_export(struct drm_device *drm_dev,
{
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- return dma_buf_export(exynos_gem_obj, &exynos_dmabuf_ops,
+ return dma_buf_export(obj, &exynos_dmabuf_ops,
exynos_gem_obj->base.size, flags);
}
@@ -219,8 +204,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
if (dma_buf->ops == &exynos_dmabuf_ops) {
struct drm_gem_object *obj;
- exynos_gem_obj = dma_buf->priv;
- obj = &exynos_gem_obj->base;
+ obj = dma_buf->priv;
/* is it from our device? */
if (obj->dev == drm_dev) {
@@ -247,7 +231,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
- DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
ret = -ENOMEM;
goto err_unmap_attach;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ca2729a8512..bb82ef78ca8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -47,10 +47,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
int nr;
private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
- if (!private) {
- DRM_ERROR("failed to allocate private\n");
+ if (!private)
return -ENOMEM;
- }
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
@@ -213,7 +211,7 @@ static const struct vm_operations_struct exynos_drm_gem_vm_ops = {
.close = drm_gem_vm_close,
};
-static struct drm_ioctl_desc exynos_ioctls[] = {
+static const struct drm_ioctl_desc exynos_ioctls[] = {
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
@@ -271,12 +269,13 @@ static struct drm_driver exynos_drm_driver = {
.gem_vm_ops = &exynos_drm_gem_vm_ops,
.dumb_create = exynos_drm_gem_dumb_create,
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
- .dumb_destroy = exynos_drm_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = exynos_dmabuf_prime_export,
.gem_prime_import = exynos_dmabuf_prime_import,
.ioctls = exynos_ioctls,
+ .num_ioctls = ARRAY_SIZE(exynos_ioctls),
.fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -288,7 +287,6 @@ static struct drm_driver exynos_drm_driver = {
static int exynos_drm_platform_probe(struct platform_device *pdev)
{
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
return drm_platform_init(&exynos_drm_driver, pdev);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index a99a033793b..06f1b2a09da 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -324,10 +324,8 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
- if (!exynos_encoder) {
- DRM_ERROR("failed to allocate encoder\n");
+ if (!exynos_encoder)
return NULL;
- }
exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
exynos_encoder->manager = manager;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index c2d149f0408..ea39e0ef2ae 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -156,10 +156,8 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
}
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ if (!exynos_fb)
return ERR_PTR(-ENOMEM);
- }
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
@@ -220,10 +218,8 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
int i, ret;
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
- if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer\n");
+ if (!exynos_fb)
return ERR_PTR(-ENOMEM);
- }
obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!obj) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 8e60bd61137..78e868bcf1e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -16,9 +16,11 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
+#include "exynos_drm_fbdev.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
@@ -165,8 +167,18 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
size = mode_cmd.pitches[0] * mode_cmd.height;
- /* 0 means to allocate physically continuous memory */
- exynos_gem_obj = exynos_drm_gem_create(dev, 0, size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG, size);
+ /*
+ * If physically contiguous memory allocation fails and if IOMMU is
+ * supported then try to get buffer from non physically contiguous
+ * memory area.
+ */
+ if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ dev_warn(&pdev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_NONCONTIG,
+ size);
+ }
+
if (IS_ERR(exynos_gem_obj)) {
ret = PTR_ERR(exynos_gem_obj);
goto err_release_framebuffer;
@@ -236,10 +248,8 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
return 0;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
- if (!fbdev) {
- DRM_ERROR("failed to allocate drm fbdev.\n");
+ if (!fbdev)
return -ENOMEM;
- }
private->fb_helper = helper = &fbdev->drm_fb_helper;
helper->funcs = &exynos_drm_fb_helper_funcs;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 61b094f689a..8adfc8f1e08 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -12,16 +12,17 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-fimc.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_fimc.h"
@@ -1344,10 +1345,8 @@ static int fimc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 3e106beca5b..868a14d5299 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -14,13 +14,14 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
+#include <video/of_videomode.h>
#include <video/samsung_fimd.h>
#include <drm/exynos_drm.h>
@@ -36,6 +37,8 @@
* CPU Interface.
*/
+#define FIMD_DEFAULT_FRAMERATE 60
+
/* position control register for hardware window 0, 2 ~ 4.*/
#define VIDOSD_A(win) (VIDOSD_BASE + 0x00 + (win) * 16)
#define VIDOSD_B(win) (VIDOSD_BASE + 0x04 + (win) * 16)
@@ -66,11 +69,13 @@ struct fimd_driver_data {
unsigned int has_shadowcon:1;
unsigned int has_clksel:1;
+ unsigned int has_limited_fmt:1;
};
static struct fimd_driver_data s3c64xx_fimd_driver_data = {
.timing_base = 0x0,
.has_clksel = 1,
+ .has_limited_fmt = 1,
};
static struct fimd_driver_data exynos4_fimd_driver_data = {
@@ -91,6 +96,7 @@ struct fimd_win_data {
unsigned int fb_width;
unsigned int fb_height;
unsigned int bpp;
+ unsigned int pixel_format;
dma_addr_t dma_addr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
@@ -116,11 +122,10 @@ struct fimd_context {
wait_queue_head_t wait_vsync_queue;
atomic_t wait_vsync_event;
- struct exynos_drm_panel_info *panel;
+ struct exynos_drm_panel_info panel;
struct fimd_driver_data *driver_data;
};
-#ifdef CONFIG_OF
static const struct of_device_id fimd_driver_dt_match[] = {
{ .compatible = "samsung,s3c6400-fimd",
.data = &s3c64xx_fimd_driver_data },
@@ -130,22 +135,14 @@ static const struct of_device_id fimd_driver_dt_match[] = {
.data = &exynos5_fimd_driver_data },
{},
};
-MODULE_DEVICE_TABLE(of, fimd_driver_dt_match);
-#endif
static inline struct fimd_driver_data *drm_fimd_get_driver_data(
struct platform_device *pdev)
{
-#ifdef CONFIG_OF
const struct of_device_id *of_id =
of_match_device(fimd_driver_dt_match, &pdev->dev);
- if (of_id)
- return (struct fimd_driver_data *)of_id->data;
-#endif
-
- return (struct fimd_driver_data *)
- platform_get_device_id(pdev)->driver_data;
+ return (struct fimd_driver_data *)of_id->data;
}
static bool fimd_display_is_connected(struct device *dev)
@@ -159,7 +156,7 @@ static void *fimd_get_panel(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- return ctx->panel;
+ return &ctx->panel;
}
static int fimd_check_mode(struct device *dev, struct drm_display_mode *mode)
@@ -239,8 +236,8 @@ static void fimd_apply(struct device *subdrv_dev)
static void fimd_commit(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_panel_info *panel = ctx->panel;
- struct fb_videomode *timing = &panel->timing;
+ struct exynos_drm_panel_info *panel = &ctx->panel;
+ struct videomode *vm = &panel->vm;
struct fimd_driver_data *driver_data;
u32 val;
@@ -252,22 +249,22 @@ static void fimd_commit(struct device *dev)
writel(ctx->vidcon1, ctx->regs + driver_data->timing_base + VIDCON1);
/* setup vertical timing values. */
- val = VIDTCON0_VBPD(timing->upper_margin - 1) |
- VIDTCON0_VFPD(timing->lower_margin - 1) |
- VIDTCON0_VSPW(timing->vsync_len - 1);
+ val = VIDTCON0_VBPD(vm->vback_porch - 1) |
+ VIDTCON0_VFPD(vm->vfront_porch - 1) |
+ VIDTCON0_VSPW(vm->vsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON0);
/* setup horizontal timing values. */
- val = VIDTCON1_HBPD(timing->left_margin - 1) |
- VIDTCON1_HFPD(timing->right_margin - 1) |
- VIDTCON1_HSPW(timing->hsync_len - 1);
+ val = VIDTCON1_HBPD(vm->hback_porch - 1) |
+ VIDTCON1_HFPD(vm->hfront_porch - 1) |
+ VIDTCON1_HSPW(vm->hsync_len - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON1);
/* setup horizontal and vertical display size. */
- val = VIDTCON2_LINEVAL(timing->yres - 1) |
- VIDTCON2_HOZVAL(timing->xres - 1) |
- VIDTCON2_LINEVAL_E(timing->yres - 1) |
- VIDTCON2_HOZVAL_E(timing->xres - 1);
+ val = VIDTCON2_LINEVAL(vm->vactive - 1) |
+ VIDTCON2_HOZVAL(vm->hactive - 1) |
+ VIDTCON2_LINEVAL_E(vm->vactive - 1) |
+ VIDTCON2_HOZVAL_E(vm->hactive - 1);
writel(val, ctx->regs + driver_data->timing_base + VIDTCON2);
/* setup clock source, clock divider, enable dma. */
@@ -398,6 +395,7 @@ static void fimd_win_mode_set(struct device *dev,
win_data->fb_height = overlay->fb_height;
win_data->dma_addr = overlay->dma_addr[0] + offset;
win_data->bpp = overlay->bpp;
+ win_data->pixel_format = overlay->pixel_format;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
@@ -419,39 +417,38 @@ static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
val = WINCONx_ENWIN;
- switch (win_data->bpp) {
- case 1:
- val |= WINCON0_BPPMODE_1BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_4WORD;
- break;
- case 2:
- val |= WINCON0_BPPMODE_2BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 4:
- val |= WINCON0_BPPMODE_4BPP;
- val |= WINCONx_BITSWP;
- val |= WINCONx_BURSTLEN_8WORD;
- break;
- case 8:
+ /*
+ * In case of s3c64xx, window 0 doesn't support alpha channel.
+ * So the request format is ARGB8888 then change it to XRGB8888.
+ */
+ if (ctx->driver_data->has_limited_fmt && !win) {
+ if (win_data->pixel_format == DRM_FORMAT_ARGB8888)
+ win_data->pixel_format = DRM_FORMAT_XRGB8888;
+ }
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_C8:
val |= WINCON0_BPPMODE_8BPP_PALETTE;
val |= WINCONx_BURSTLEN_8WORD;
val |= WINCONx_BYTSWP;
break;
- case 16:
+ case DRM_FORMAT_XRGB1555:
+ val |= WINCON0_BPPMODE_16BPP_1555;
+ val |= WINCONx_HAWSWP;
+ val |= WINCONx_BURSTLEN_16WORD;
+ break;
+ case DRM_FORMAT_RGB565:
val |= WINCON0_BPPMODE_16BPP_565;
val |= WINCONx_HAWSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- case 24:
+ case DRM_FORMAT_XRGB8888:
val |= WINCON0_BPPMODE_24BPP_888;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
break;
- case 32:
- val |= WINCON1_BPPMODE_28BPP_A4888
+ case DRM_FORMAT_ARGB8888:
+ val |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
val |= WINCONx_WSWP;
val |= WINCONx_BURSTLEN_16WORD;
@@ -748,45 +745,54 @@ static void fimd_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
drm_iommu_detach_device(drm_dev, dev);
}
-static int fimd_calc_clkdiv(struct fimd_context *ctx,
- struct fb_videomode *timing)
+static int fimd_configure_clocks(struct fimd_context *ctx, struct device *dev)
{
- unsigned long clk = clk_get_rate(ctx->lcd_clk);
- u32 retrace;
- u32 clkdiv;
- u32 best_framerate = 0;
- u32 framerate;
-
- retrace = timing->left_margin + timing->hsync_len +
- timing->right_margin + timing->xres;
- retrace *= timing->upper_margin + timing->vsync_len +
- timing->lower_margin + timing->yres;
-
- /* default framerate is 60Hz */
- if (!timing->refresh)
- timing->refresh = 60;
-
- clk /= retrace;
-
- for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
- int tmp;
-
- /* get best framerate */
- framerate = clk / clkdiv;
- tmp = timing->refresh - framerate;
- if (tmp < 0) {
- best_framerate = framerate;
- continue;
- } else {
- if (!best_framerate)
- best_framerate = framerate;
- else if (tmp < (best_framerate - framerate))
- best_framerate = framerate;
- break;
+ struct videomode *vm = &ctx->panel.vm;
+ unsigned long clk;
+
+ ctx->bus_clk = devm_clk_get(dev, "fimd");
+ if (IS_ERR(ctx->bus_clk)) {
+ dev_err(dev, "failed to get bus clock\n");
+ return PTR_ERR(ctx->bus_clk);
+ }
+
+ ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
+ if (IS_ERR(ctx->lcd_clk)) {
+ dev_err(dev, "failed to get lcd clock\n");
+ return PTR_ERR(ctx->lcd_clk);
+ }
+
+ clk = clk_get_rate(ctx->lcd_clk);
+ if (clk == 0) {
+ dev_err(dev, "error getting sclk_fimd clock rate\n");
+ return -EINVAL;
+ }
+
+ if (vm->pixelclock == 0) {
+ unsigned long c;
+ c = vm->hactive + vm->hback_porch + vm->hfront_porch +
+ vm->hsync_len;
+ c *= vm->vactive + vm->vback_porch + vm->vfront_porch +
+ vm->vsync_len;
+ vm->pixelclock = c * FIMD_DEFAULT_FRAMERATE;
+ if (vm->pixelclock == 0) {
+ dev_err(dev, "incorrect display timings\n");
+ return -EINVAL;
}
+ dev_warn(dev, "pixel clock recalculated to %luHz (%dHz frame rate)\n",
+ vm->pixelclock, FIMD_DEFAULT_FRAMERATE);
}
+ ctx->clkdiv = DIV_ROUND_UP(clk, vm->pixelclock);
+ if (ctx->clkdiv > 256) {
+ dev_warn(dev, "calculated pixel clock divider too high (%u), lowered to 256\n",
+ ctx->clkdiv);
+ ctx->clkdiv = 256;
+ }
+ vm->pixelclock = clk / ctx->clkdiv;
+ DRM_DEBUG_KMS("pixel clock = %lu, clkdiv = %d\n", vm->pixelclock,
+ ctx->clkdiv);
- return clkdiv;
+ return 0;
}
static void fimd_clear_win(struct fimd_context *ctx, int win)
@@ -878,59 +884,53 @@ static int fimd_activate(struct fimd_context *ctx, bool enable)
return 0;
}
+static int fimd_get_platform_data(struct fimd_context *ctx, struct device *dev)
+{
+ struct videomode *vm;
+ int ret;
+
+ vm = &ctx->panel.vm;
+ ret = of_get_videomode(dev->of_node, vm, OF_USE_NATIVE_MODE);
+ if (ret) {
+ DRM_ERROR("failed: of_get_videomode() : %d\n", ret);
+ return ret;
+ }
+
+ if (vm->flags & DISPLAY_FLAGS_VSYNC_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_VSYNC;
+ if (vm->flags & DISPLAY_FLAGS_HSYNC_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_HSYNC;
+ if (vm->flags & DISPLAY_FLAGS_DE_LOW)
+ ctx->vidcon1 |= VIDCON1_INV_VDEN;
+ if (vm->flags & DISPLAY_FLAGS_PIXDATA_NEGEDGE)
+ ctx->vidcon1 |= VIDCON1_INV_VCLK;
+
+ return 0;
+}
+
static int fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct exynos_drm_subdrv *subdrv;
- struct exynos_drm_fimd_pdata *pdata;
- struct exynos_drm_panel_info *panel;
struct resource *res;
int win;
int ret = -EINVAL;
- if (dev->of_node) {
- pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- DRM_ERROR("memory allocation for pdata failed\n");
- return -ENOMEM;
- }
-
- ret = of_get_fb_videomode(dev->of_node, &pdata->panel.timing,
- OF_USE_NATIVE_MODE);
- if (ret) {
- DRM_ERROR("failed: of_get_fb_videomode() : %d\n", ret);
- return ret;
- }
- } else {
- pdata = dev->platform_data;
- if (!pdata) {
- DRM_ERROR("no platform data specified\n");
- return -EINVAL;
- }
- }
-
- panel = &pdata->panel;
- if (!panel) {
- dev_err(dev, "panel is null.\n");
- return -EINVAL;
- }
+ if (!dev->of_node)
+ return -ENODEV;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- ctx->bus_clk = devm_clk_get(dev, "fimd");
- if (IS_ERR(ctx->bus_clk)) {
- dev_err(dev, "failed to get bus clock\n");
- return PTR_ERR(ctx->bus_clk);
- }
+ ret = fimd_get_platform_data(ctx, dev);
+ if (ret)
+ return ret;
- ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd");
- if (IS_ERR(ctx->lcd_clk)) {
- dev_err(dev, "failed to get lcd clock\n");
- return PTR_ERR(ctx->lcd_clk);
- }
+ ret = fimd_configure_clocks(ctx, dev);
+ if (ret)
+ return ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -954,10 +954,6 @@ static int fimd_probe(struct platform_device *pdev)
}
ctx->driver_data = drm_fimd_get_driver_data(pdev);
- ctx->vidcon0 = pdata->vidcon0;
- ctx->vidcon1 = pdata->vidcon1;
- ctx->default_win = pdata->default_win;
- ctx->panel = panel;
DRM_INIT_WAITQUEUE(&ctx->wait_vsync_queue);
atomic_set(&ctx->wait_vsync_event, 0);
@@ -975,12 +971,6 @@ static int fimd_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
- ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
- panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
-
- DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
- panel->timing.pixclock, ctx->clkdiv);
-
for (win = 0; win < WINDOWS_NR; win++)
fimd_clear_win(ctx, win);
@@ -1069,21 +1059,6 @@ static int fimd_runtime_resume(struct device *dev)
}
#endif
-static struct platform_device_id fimd_driver_ids[] = {
- {
- .name = "s3c64xx-fb",
- .driver_data = (unsigned long)&s3c64xx_fimd_driver_data,
- }, {
- .name = "exynos4-fb",
- .driver_data = (unsigned long)&exynos4_fimd_driver_data,
- }, {
- .name = "exynos5-fb",
- .driver_data = (unsigned long)&exynos5_fimd_driver_data,
- },
- {},
-};
-MODULE_DEVICE_TABLE(platform, fimd_driver_ids);
-
static const struct dev_pm_ops fimd_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
@@ -1092,11 +1067,10 @@ static const struct dev_pm_ops fimd_pm_ops = {
struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = fimd_remove,
- .id_table = fimd_driver_ids,
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
.pm = &fimd_pm_ops,
- .of_match_table = of_match_ptr(fimd_driver_dt_match),
+ .of_match_table = fimd_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 42a5a546607..3271fd4b172 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/interrupt.h>
@@ -24,6 +23,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
+#include "exynos_drm_g2d.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_iommu.h"
@@ -447,10 +447,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
}
g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
- if (!g2d_userptr) {
- DRM_ERROR("failed to allocate g2d_userptr.\n");
+ if (!g2d_userptr)
return ERR_PTR(-ENOMEM);
- }
atomic_set(&g2d_userptr->refcount, 1);
@@ -500,7 +498,6 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
ret = -ENOMEM;
goto err_free_userptr;
}
@@ -806,9 +803,11 @@ static void g2d_dma_start(struct g2d_data *g2d,
struct g2d_cmdlist_node *node =
list_first_entry(&runqueue_node->run_cmdlist,
struct g2d_cmdlist_node, list);
+ int ret;
- pm_runtime_get_sync(g2d->dev);
- clk_enable(g2d->gate_clk);
+ ret = pm_runtime_get_sync(g2d->dev);
+ if (ret < 0)
+ return;
writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
@@ -861,7 +860,6 @@ static void g2d_runqueue_worker(struct work_struct *work)
runqueue_work);
mutex_lock(&g2d->runqueue_mutex);
- clk_disable(g2d->gate_clk);
pm_runtime_put_sync(g2d->dev);
complete(&g2d->runqueue_node->complete);
@@ -1086,8 +1084,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
e = kzalloc(sizeof(*node->event), GFP_KERNEL);
if (!e) {
- dev_err(dev, "failed to allocate event\n");
-
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -1317,10 +1313,8 @@ static int g2d_open(struct drm_device *drm_dev, struct device *dev,
struct exynos_drm_g2d_private *g2d_priv;
g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv) {
- dev_err(dev, "failed to allocate g2d private data\n");
+ if (!g2d_priv)
return -ENOMEM;
- }
g2d_priv->dev = dev;
file_priv->g2d_priv = g2d_priv;
@@ -1376,10 +1370,8 @@ static int g2d_probe(struct platform_device *pdev)
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
- if (!g2d) {
- dev_err(dev, "failed to allocate driver data\n");
+ if (!g2d)
return -ENOMEM;
- }
g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
sizeof(struct g2d_runqueue_node), 0, 0, NULL);
@@ -1514,15 +1506,38 @@ static int g2d_resume(struct device *dev)
}
#endif
-static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume);
+#ifdef CONFIG_PM_RUNTIME
+static int g2d_runtime_suspend(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(g2d->gate_clk);
+
+ return 0;
+}
+
+static int g2d_runtime_resume(struct device *dev)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(g2d->gate_clk);
+ if (ret < 0)
+ dev_warn(dev, "failed to enable clock.\n");
+
+ return ret;
+}
+#endif
+
+static const struct dev_pm_ops g2d_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
+ SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
+};
-#ifdef CONFIG_OF
static const struct of_device_id exynos_g2d_match[] = {
{ .compatible = "samsung,exynos5250-g2d" },
{},
};
-MODULE_DEVICE_TABLE(of, exynos_g2d_match);
-#endif
struct platform_driver g2d_driver = {
.probe = g2d_probe,
@@ -1531,6 +1546,6 @@ struct platform_driver g2d_driver = {
.name = "s5p-g2d",
.owner = THIS_MODULE,
.pm = &g2d_pm_ops,
- .of_match_table = of_match_ptr(exynos_g2d_match),
+ .of_match_table = exynos_g2d_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 24c22a8c336..49f9cd23275 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -10,6 +10,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <linux/shmem_fs.h>
#include <drm/exynos_drm.h>
@@ -17,6 +18,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_gem.h"
#include "exynos_drm_buf.h"
+#include "exynos_drm_iommu.h"
static unsigned int convert_to_vm_err_msg(int msg)
{
@@ -135,7 +137,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
obj = &exynos_gem_obj->base;
buf = exynos_gem_obj->buffer;
- DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+ DRM_DEBUG_KMS("handle count = %d\n", obj->handle_count);
/*
* do not release memory region from exporter.
@@ -152,8 +154,7 @@ out:
exynos_drm_fini_buf(obj->dev, buf);
exynos_gem_obj->buffer = NULL;
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
@@ -191,10 +192,8 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
int ret;
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
- if (!exynos_gem_obj) {
- DRM_ERROR("failed to allocate exynos gem object\n");
+ if (!exynos_gem_obj)
return NULL;
- }
exynos_gem_obj->size = size;
obj = &exynos_gem_obj->base;
@@ -668,6 +667,18 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
EXYNOS_BO_WC, args->size);
+ /*
+ * If physically contiguous memory allocation fails and if IOMMU is
+ * supported then try to get buffer from non physically contiguous
+ * memory area.
+ */
+ if (IS_ERR(exynos_gem_obj) && is_drm_iommu_supported(dev)) {
+ dev_warn(dev->dev, "contiguous FB allocation failed, falling back to non-contiguous\n");
+ exynos_gem_obj = exynos_drm_gem_create(dev,
+ EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
+ args->size);
+ }
+
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
@@ -703,13 +714,11 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
out:
@@ -719,26 +728,6 @@ unlock:
return ret;
}
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle)
-{
- int ret;
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 468766bee45..09555afdfe9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -151,15 +151,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev, uint32_t handle,
uint64_t *offset);
-/*
- * destroy memory region allocated.
- * - a gem handle and physical memory region pointed by a gem object
- * would be released by drm_gem_handle_delete().
- */
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- unsigned int handle);
-
/* page fault handler and mmap fault address(virtual) to physical memory. */
int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 472e3b25e7f..cd6aebd53bd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
@@ -21,6 +20,7 @@
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
#include "regs-gsc.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
#include "exynos_drm_gsc.h"
@@ -1338,10 +1338,8 @@ static int gsc_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->writeback = 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index aaa550d622f..8548b974bd5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -15,7 +15,6 @@
#include <linux/kernel.h>
#include <linux/wait.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -404,10 +403,8 @@ static int exynos_drm_hdmi_probe(struct platform_device *pdev)
struct drm_hdmi_context *ctx;
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+ if (!ctx)
return -ENOMEM;
- }
subdrv = &ctx->subdrv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 3799d5c2b5d..fb8db037827 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -47,10 +47,16 @@ int drm_create_iommu_mapping(struct drm_device *drm_dev)
dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
GFP_KERNEL);
+ if (!dev->dma_parms)
+ goto error;
+
dma_set_max_seg_size(dev, 0xffffffffu);
dev->archdata.mapping = mapping;
return 0;
+error:
+ arm_iommu_release_mapping(mapping);
+ return -ENOMEM;
}
/*
@@ -91,6 +97,9 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
sizeof(*subdrv_dev->dma_parms),
GFP_KERNEL);
+ if (!subdrv_dev->dma_parms)
+ return -ENOMEM;
+
dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index b1ef8e7ff9c..824e0705c8d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -12,7 +12,6 @@
*
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data,
*/
ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock,
prop_list->ipp_id);
- if (!ippdrv) {
+ if (IS_ERR(ippdrv)) {
DRM_ERROR("not found ipp%d driver.\n",
prop_list->ipp_id);
- return -EINVAL;
+ return PTR_ERR(ippdrv);
}
prop_list = ippdrv->prop_list;
@@ -409,10 +408,8 @@ static struct drm_exynos_ipp_cmd_work *ipp_create_cmd_work(void)
struct drm_exynos_ipp_cmd_work *cmd_work;
cmd_work = kzalloc(sizeof(*cmd_work), GFP_KERNEL);
- if (!cmd_work) {
- DRM_ERROR("failed to alloc cmd_work.\n");
+ if (!cmd_work)
return ERR_PTR(-ENOMEM);
- }
INIT_WORK((struct work_struct *)cmd_work, ipp_sched_cmd);
@@ -424,10 +421,8 @@ static struct drm_exynos_ipp_event_work *ipp_create_event_work(void)
struct drm_exynos_ipp_event_work *event_work;
event_work = kzalloc(sizeof(*event_work), GFP_KERNEL);
- if (!event_work) {
- DRM_ERROR("failed to alloc event_work.\n");
+ if (!event_work)
return ERR_PTR(-ENOMEM);
- }
INIT_WORK((struct work_struct *)event_work, ipp_sched_event);
@@ -483,10 +478,8 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
/* allocate command node */
c_node = kzalloc(sizeof(*c_node), GFP_KERNEL);
- if (!c_node) {
- DRM_ERROR("failed to allocate map node.\n");
+ if (!c_node)
return -ENOMEM;
- }
/* create property id */
ret = ipp_create_id(&ctx->prop_idr, &ctx->prop_lock, c_node,
@@ -695,10 +688,8 @@ static struct drm_exynos_ipp_mem_node
mutex_lock(&c_node->mem_lock);
m_node = kzalloc(sizeof(*m_node), GFP_KERNEL);
- if (!m_node) {
- DRM_ERROR("failed to allocate queue node.\n");
+ if (!m_node)
goto err_unlock;
- }
/* clear base address for error handling */
memset(&buf_info, 0x0, sizeof(buf_info));
@@ -799,9 +790,7 @@ static int ipp_get_event(struct drm_device *drm_dev,
DRM_DEBUG_KMS("ops_id[%d]buf_id[%d]\n", qbuf->ops_id, qbuf->buf_id);
e = kzalloc(sizeof(*e), GFP_KERNEL);
-
if (!e) {
- DRM_ERROR("failed to allocate event.\n");
spin_lock_irqsave(&drm_dev->event_lock, flags);
file->event_space += sizeof(e->event);
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
@@ -970,9 +959,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data,
/* find command node */
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
qbuf->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("failed to get command node.\n");
- return -EFAULT;
+ return PTR_ERR(c_node);
}
/* buffer control */
@@ -1106,9 +1095,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data,
c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock,
cmd_ctrl->prop_id);
- if (!c_node) {
+ if (IS_ERR(c_node)) {
DRM_ERROR("invalid command node list.\n");
- return -EINVAL;
+ return PTR_ERR(c_node);
}
if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl,
@@ -1781,10 +1770,8 @@ static int ipp_subdrv_open(struct drm_device *drm_dev, struct device *dev,
struct exynos_drm_ipp_private *priv;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- DRM_ERROR("failed to allocate priv.\n");
+ if (!priv)
return -ENOMEM;
- }
priv->dev = dev;
file_priv->ipp_priv = priv;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 6ee55e68e0a..fcb0652e77d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -16,6 +16,7 @@
#include "exynos_drm_encoder.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
#define to_exynos_plane(x) container_of(x, struct exynos_plane, base)
@@ -264,10 +265,8 @@ struct drm_plane *exynos_plane_init(struct drm_device *dev,
int err;
exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
- if (!exynos_plane) {
- DRM_ERROR("failed to allocate plane\n");
+ if (!exynos_plane)
return NULL;
- }
err = drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
&exynos_plane_funcs, formats, ARRAY_SIZE(formats),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 427640aa514..7b901688def 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -10,7 +10,6 @@
*/
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -22,6 +21,7 @@
#include <drm/exynos_drm.h>
#include "regs-rotator.h"
#include "exynos_drm.h"
+#include "exynos_drm_drv.h"
#include "exynos_drm_ipp.h"
/*
@@ -472,10 +472,8 @@ static int rotator_init_prop_list(struct exynos_drm_ippdrv *ippdrv)
struct drm_exynos_ipp_prop_list *prop_list;
prop_list = devm_kzalloc(ippdrv->dev, sizeof(*prop_list), GFP_KERNEL);
- if (!prop_list) {
- DRM_ERROR("failed to alloc property list.\n");
+ if (!prop_list)
return -ENOMEM;
- }
prop_list->version = 1;
prop_list->flip = (1 << EXYNOS_DRM_FLIP_VERTICAL) |
@@ -632,21 +630,96 @@ static int rotator_ippdrv_start(struct device *dev, enum drm_exynos_ipp_cmd cmd)
return 0;
}
+static struct rot_limit_table rot_limit_tbl_4210 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_64K,
+ .max_h = SZ_64K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_16K,
+ .max_h = SZ_16K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_4x12 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 2,
+ },
+};
+
+static struct rot_limit_table rot_limit_tbl_5250 = {
+ .ycbcr420_2p = {
+ .min_w = 32,
+ .min_h = 32,
+ .max_w = SZ_32K,
+ .max_h = SZ_32K,
+ .align = 3,
+ },
+ .rgb888 = {
+ .min_w = 8,
+ .min_h = 8,
+ .max_w = SZ_8K,
+ .max_h = SZ_8K,
+ .align = 1,
+ },
+};
+
+static const struct of_device_id exynos_rotator_match[] = {
+ {
+ .compatible = "samsung,exynos4210-rotator",
+ .data = &rot_limit_tbl_4210,
+ },
+ {
+ .compatible = "samsung,exynos4212-rotator",
+ .data = &rot_limit_tbl_4x12,
+ },
+ {
+ .compatible = "samsung,exynos5250-rotator",
+ .data = &rot_limit_tbl_5250,
+ },
+ {},
+};
+
static int rotator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct rot_context *rot;
struct exynos_drm_ippdrv *ippdrv;
+ const struct of_device_id *match;
int ret;
+ if (!dev->of_node) {
+ dev_err(dev, "cannot find of_node.\n");
+ return -ENODEV;
+ }
+
rot = devm_kzalloc(dev, sizeof(*rot), GFP_KERNEL);
- if (!rot) {
- dev_err(dev, "failed to allocate rot\n");
+ if (!rot)
return -ENOMEM;
- }
- rot->limit_tbl = (struct rot_limit_table *)
- platform_get_device_id(pdev)->driver_data;
+ match = of_match_node(exynos_rotator_match, dev->of_node);
+ if (!match) {
+ dev_err(dev, "failed to match node\n");
+ return -ENODEV;
+ }
+ rot->limit_tbl = (struct rot_limit_table *)match->data;
rot->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rot->regs = devm_ioremap_resource(dev, rot->regs_res);
@@ -718,31 +791,6 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
-static struct rot_limit_table rot_limit_tbl = {
- .ycbcr420_2p = {
- .min_w = 32,
- .min_h = 32,
- .max_w = SZ_32K,
- .max_h = SZ_32K,
- .align = 3,
- },
- .rgb888 = {
- .min_w = 8,
- .min_h = 8,
- .max_w = SZ_8K,
- .max_h = SZ_8K,
- .align = 2,
- },
-};
-
-static struct platform_device_id rotator_driver_ids[] = {
- {
- .name = "exynos-rot",
- .driver_data = (unsigned long)&rot_limit_tbl,
- },
- {},
-};
-
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
@@ -804,10 +852,10 @@ static const struct dev_pm_ops rotator_pm_ops = {
struct platform_driver rotator_driver = {
.probe = rotator_probe,
.remove = rotator_remove,
- .id_table = rotator_driver_ids,
.driver = {
.name = "exynos-rot",
.owner = THIS_MODULE,
.pm = &rotator_pm_ops,
+ .of_match_table = exynos_rotator_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 41cc74d83e4..4400330e444 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -13,7 +13,6 @@
#include <drm/drmP.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/exynos_drm.h>
@@ -24,6 +23,7 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
#include "exynos_drm_encoder.h"
+#include "exynos_drm_vidi.h"
/* vidi has totally three virtual windows. */
#define WINDOWS_NR 3
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 62ef5971ac3..a0e10aeb0e6 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -24,7 +24,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -33,6 +32,7 @@
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/io.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <drm/exynos_drm.h>
@@ -1825,10 +1825,8 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
res->regul_bulk = devm_kzalloc(dev, ARRAY_SIZE(supply) *
sizeof(res->regul_bulk[0]), GFP_KERNEL);
- if (!res->regul_bulk) {
- DRM_ERROR("failed to get memory for regulators\n");
+ if (!res->regul_bulk)
goto fail;
- }
for (i = 0; i < ARRAY_SIZE(supply); ++i) {
res->regul_bulk[i].supply = supply[i];
res->regul_bulk[i].consumer = NULL;
@@ -1860,7 +1858,6 @@ void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
hdmi_hdmiphy = hdmiphy;
}
-#ifdef CONFIG_OF
static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
(struct device *dev)
{
@@ -1869,10 +1866,8 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
u32 value;
pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- DRM_ERROR("memory allocation for pdata failed\n");
+ if (!pd)
goto err_data;
- }
if (!of_find_property(np, "hpd-gpio", &value)) {
DRM_ERROR("no hpd gpio property found\n");
@@ -1886,33 +1881,7 @@ static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
err_data:
return NULL;
}
-#else
-static struct s5p_hdmi_platform_data *drm_hdmi_dt_parse_pdata
- (struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static struct platform_device_id hdmi_driver_types[] = {
- {
- .name = "s5pv210-hdmi",
- .driver_data = HDMI_TYPE13,
- }, {
- .name = "exynos4-hdmi",
- .driver_data = HDMI_TYPE13,
- }, {
- .name = "exynos4-hdmi14",
- .driver_data = HDMI_TYPE14,
- }, {
- .name = "exynos5-hdmi",
- .driver_data = HDMI_TYPE14,
- }, {
- /* end node */
- }
-};
-#ifdef CONFIG_OF
static struct of_device_id hdmi_match_types[] = {
{
.compatible = "samsung,exynos5-hdmi",
@@ -1924,7 +1893,6 @@ static struct of_device_id hdmi_match_types[] = {
/* end node */
}
};
-#endif
static int hdmi_probe(struct platform_device *pdev)
{
@@ -1933,36 +1901,23 @@ static int hdmi_probe(struct platform_device *pdev)
struct hdmi_context *hdata;
struct s5p_hdmi_platform_data *pdata;
struct resource *res;
+ const struct of_device_id *match;
int ret;
- if (dev->of_node) {
- pdata = drm_hdmi_dt_parse_pdata(dev);
- if (IS_ERR(pdata)) {
- DRM_ERROR("failed to parse dt\n");
- return PTR_ERR(pdata);
- }
- } else {
- pdata = dev->platform_data;
- }
+ if (!dev->of_node)
+ return -ENODEV;
- if (!pdata) {
- DRM_ERROR("no platform data specified\n");
+ pdata = drm_hdmi_dt_parse_pdata(dev);
+ if (!pdata)
return -EINVAL;
- }
- drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
- GFP_KERNEL);
- if (!drm_hdmi_ctx) {
- DRM_ERROR("failed to allocate common hdmi context.\n");
+ drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx)
return -ENOMEM;
- }
- hdata = devm_kzalloc(dev, sizeof(struct hdmi_context),
- GFP_KERNEL);
- if (!hdata) {
- DRM_ERROR("out of memory\n");
+ hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata)
return -ENOMEM;
- }
mutex_init(&hdata->hdmi_mutex);
@@ -1971,23 +1926,15 @@ static int hdmi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drm_hdmi_ctx);
- if (dev->of_node) {
- const struct of_device_id *match;
- match = of_match_node(of_match_ptr(hdmi_match_types),
- dev->of_node);
- if (match == NULL)
- return -ENODEV;
- hdata->type = (enum hdmi_type)match->data;
- } else {
- hdata->type = (enum hdmi_type)platform_get_device_id
- (pdev)->driver_data;
- }
+ match = of_match_node(hdmi_match_types, dev->of_node);
+ if (!match)
+ return -ENODEV;
+ hdata->type = (enum hdmi_type)match->data;
hdata->hpd_gpio = pdata->hpd_gpio;
hdata->dev = dev;
ret = hdmi_resources_init(hdata);
-
if (ret) {
DRM_ERROR("hdmi_resources_init failed\n");
return -EINVAL;
@@ -2142,11 +2089,10 @@ static const struct dev_pm_ops hdmi_pm_ops = {
struct platform_driver hdmi_driver = {
.probe = hdmi_probe,
.remove = hdmi_remove,
- .id_table = hdmi_driver_types,
.driver = {
.name = "exynos-hdmi",
.owner = THIS_MODULE,
.pm = &hdmi_pm_ops,
- .of_match_table = of_match_ptr(hdmi_match_types),
+ .of_match_table = hdmi_match_types,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
index ef04255076c..59abb1494ce 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/i2c.h>
-#include <linux/module.h>
+#include <linux/of.h>
#include "exynos_drm_drv.h"
#include "exynos_hdmi.h"
@@ -40,13 +40,6 @@ static int hdmiphy_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id hdmiphy_id[] = {
- { "s5p_hdmiphy", 0 },
- { "exynos5-hdmiphy", 0 },
- { },
-};
-
-#ifdef CONFIG_OF
static struct of_device_id hdmiphy_match_types[] = {
{
.compatible = "samsung,exynos5-hdmiphy",
@@ -58,15 +51,13 @@ static struct of_device_id hdmiphy_match_types[] = {
/* end node */
}
};
-#endif
struct i2c_driver hdmiphy_driver = {
.driver = {
.name = "exynos-hdmiphy",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(hdmiphy_match_types),
+ .of_match_table = hdmiphy_match_types,
},
- .id_table = hdmiphy_id,
.probe = hdmiphy_probe,
.remove = hdmiphy_remove,
.command = NULL,
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 42ffb71c63b..63bc5f92fbb 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -23,7 +23,6 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/i2c.h>
-#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -31,6 +30,7 @@
#include <linux/pm_runtime.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/of.h>
#include <drm/exynos_drm.h>
@@ -1186,16 +1186,12 @@ static int mixer_probe(struct platform_device *pdev)
drm_hdmi_ctx = devm_kzalloc(dev, sizeof(*drm_hdmi_ctx),
GFP_KERNEL);
- if (!drm_hdmi_ctx) {
- DRM_ERROR("failed to allocate common hdmi context.\n");
+ if (!drm_hdmi_ctx)
return -ENOMEM;
- }
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
- if (!ctx) {
- DRM_ERROR("failed to alloc mixer context.\n");
+ if (!ctx)
return -ENOMEM;
- }
mutex_init(&ctx->mixer_mutex);
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index 7a2d40a5c1e..e9064dd9045 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -15,6 +15,7 @@ gma500_gfx-y += \
mmu.o \
power.o \
psb_drv.o \
+ gma_display.o \
psb_intel_display.o \
psb_intel_lvds.o \
psb_intel_modes.o \
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 23e14e93991..162f686c532 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -641,6 +641,7 @@ const struct psb_ops cdv_chip_ops = {
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
+ .clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,
.hotplug = cdv_hotplug_event,
@@ -655,4 +656,6 @@ const struct psb_ops cdv_chip_ops = {
.restore_regs = cdv_restore_display_registers,
.power_down = cdv_power_down,
.power_up = cdv_power_up,
+ .update_wm = cdv_update_wm,
+ .disable_sr = cdv_disable_sr,
};
diff --git a/drivers/gpu/drm/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 9561e17621b..705c11d47d4 100644
--- a/drivers/gpu/drm/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
@@ -17,6 +17,7 @@
extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
+extern const struct gma_clock_funcs cdv_clock_funcs;
extern void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void cdv_intel_lvds_init(struct drm_device *dev,
@@ -25,12 +26,5 @@ extern void cdv_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *
int reg);
extern struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-
-static inline void cdv_intel_wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- /* FIXME: msleep ?? */
- mdelay(20);
-}
-
-
+extern void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc);
+extern void cdv_disable_sr(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index 7b8386fc302..661af492173 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -95,13 +95,12 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *psb_intel_crtc =
- to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int dpll_md_reg;
u32 adpa, dpll_md;
u32 adpa_reg;
- if (psb_intel_crtc->pipe == 0)
+ if (gma_crtc->pipe == 0)
dpll_md_reg = DPLL_A_MD;
else
dpll_md_reg = DPLL_B_MD;
@@ -124,7 +123,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
- if (psb_intel_crtc->pipe == 0)
+ if (gma_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
@@ -197,10 +196,9 @@ static enum drm_connector_status cdv_intel_crt_detect(
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -208,9 +206,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector,
+ &gma_encoder->ddc_bus->adapter);
}
static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -227,8 +225,8 @@ static int cdv_intel_crt_set_property(struct drm_connector *connector,
static const struct drm_encoder_helper_funcs cdv_intel_crt_helper_funcs = {
.dpms = cdv_intel_crt_dpms,
.mode_fixup = cdv_intel_crt_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
- .commit = psb_intel_encoder_commit,
+ .prepare = gma_encoder_prepare,
+ .commit = gma_encoder_commit,
.mode_set = cdv_intel_crt_mode_set,
};
@@ -244,7 +242,7 @@ static const struct drm_connector_helper_funcs
cdv_intel_crt_connector_helper_funcs = {
.mode_valid = cdv_intel_crt_mode_valid,
.get_modes = cdv_intel_crt_get_modes,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static void cdv_intel_crt_enc_destroy(struct drm_encoder *encoder)
@@ -260,32 +258,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_connector *psb_intel_connector;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_connector *gma_connector;
+ struct gma_encoder *gma_encoder;
struct drm_connector *connector;
struct drm_encoder *encoder;
u32 i2c_reg;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
+ connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_connector_init(dev, connector,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- encoder = &psb_intel_encoder->base;
+ encoder = &gma_encoder->base;
drm_encoder_init(dev, encoder,
&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
/* Set up the DDC bus. */
i2c_reg = GPIOA;
@@ -294,15 +291,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}*/
- psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
- if (!psb_intel_encoder->ddc_bus) {
+ if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed_ddc;
}
- psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
+ gma_encoder->type = INTEL_OUTPUT_ANALOG;
/*
psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -318,10 +315,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
return;
failed_ddc:
- drm_encoder_cleanup(&psb_intel_encoder->base);
- drm_connector_cleanup(&psb_intel_connector->base);
- kfree(psb_intel_connector);
+ drm_encoder_cleanup(&gma_encoder->base);
+ drm_connector_cleanup(&gma_connector->base);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
return;
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 82430ad8ba6..8fbfa06da62 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -19,54 +19,20 @@
*/
#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
#include "cdv_device.h"
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk, struct gma_clock_t *best_clock);
-struct cdv_intel_range_t {
- int min, max;
-};
-
-struct cdv_intel_p2_t {
- int dot_limit;
- int p2_slow, p2_fast;
-};
-
-struct cdv_intel_clock_t {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-};
-
-#define INTEL_P2_NUM 2
-
-struct cdv_intel_limit_t {
- struct cdv_intel_range_t dot, vco, n, m, m1, m2, p, p1;
- struct cdv_intel_p2_t p2;
- bool (*find_pll)(const struct cdv_intel_limit_t *, struct drm_crtc *,
- int, int, struct cdv_intel_clock_t *);
-};
-
-static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
- struct drm_crtc *crtc, int target, int refclk,
- struct cdv_intel_clock_t *best_clock);
-static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock);
#define CDV_LIMIT_SINGLE_LVDS_96 0
#define CDV_LIMIT_SINGLE_LVDS_100 1
@@ -75,7 +41,7 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
#define CDV_LIMIT_DP_27 4
#define CDV_LIMIT_DP_100 5
-static const struct cdv_intel_limit_t cdv_intel_limits[] = {
+static const struct gma_limit_t cdv_intel_limits[] = {
{ /* CDV_SINGLE_LVDS_96MHz */
.dot = {.min = 20000, .max = 115500},
.vco = {.min = 1800000, .max = 3600000},
@@ -85,9 +51,8 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.m2 = {.min = 58, .max = 158},
.p = {.min = 28, .max = 140},
.p1 = {.min = 2, .max = 10},
- .p2 = {.dot_limit = 200000,
- .p2_slow = 14, .p2_fast = 14},
- .find_pll = cdv_intel_find_best_PLL,
+ .p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_SINGLE_LVDS_100MHz */
.dot = {.min = 20000, .max = 115500},
@@ -102,7 +67,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
* is 80-224Mhz. Prefer single channel as much as possible.
*/
.p2 = {.dot_limit = 200000, .p2_slow = 14, .p2_fast = 14},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_27MHz */
.dot = {.min = 20000, .max = 400000},
@@ -114,7 +79,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 90},
.p1 = {.min = 1, .max = 9},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DAC_HDMI_96MHz */
.dot = {.min = 20000, .max = 400000},
@@ -126,7 +91,7 @@ static const struct cdv_intel_limit_t cdv_intel_limits[] = {
.p = {.min = 5, .max = 100},
.p1 = {.min = 1, .max = 10},
.p2 = {.dot_limit = 225000, .p2_slow = 10, .p2_fast = 5},
- .find_pll = cdv_intel_find_best_PLL,
+ .find_pll = gma_find_best_pll,
},
{ /* CDV_DP_27MHz */
.dot = {.min = 160000, .max = 272000},
@@ -255,10 +220,10 @@ void cdv_sb_reset(struct drm_device *dev)
*/
static int
cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
- struct cdv_intel_clock_t *clock, bool is_lvds, u32 ddi_select)
+ struct gma_clock_t *clock, bool is_lvds, u32 ddi_select)
{
- struct psb_intel_crtc *psb_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
u32 m, n_vco, p;
int ret = 0;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -405,31 +370,11 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
return 0;
}
-/*
- * Returns whether any encoder on the specified pipe is of the specified type
- */
-static bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(l_entry);
- if (psb_intel_encoder->type == type)
- return true;
- }
- }
- return false;
-}
-
-static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
- int refclk)
+static const struct gma_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
+ int refclk)
{
- const struct cdv_intel_limit_t *limit;
- if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ const struct gma_limit_t *limit;
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* Now only single-channel LVDS is supported on CDV. If it is
* incorrect, please add the dual-channel LVDS.
@@ -438,8 +383,8 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_96];
else
limit = &cdv_intel_limits[CDV_LIMIT_SINGLE_LVDS_100];
- } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
- psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
+ gma_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (refclk == 27000)
limit = &cdv_intel_limits[CDV_LIMIT_DP_27];
else
@@ -454,8 +399,7 @@ static const struct cdv_intel_limit_t *cdv_intel_limit(struct drm_crtc *crtc,
}
/* m1 is reserved as 0 in CDV, n is a ring counter */
-static void cdv_intel_clock(struct drm_device *dev,
- int refclk, struct cdv_intel_clock_t *clock)
+static void cdv_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -463,93 +407,12 @@ static void cdv_intel_clock(struct drm_device *dev,
clock->dot = clock->vco / clock->p;
}
-
-#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
-static bool cdv_intel_PLL_is_valid(struct drm_crtc *crtc,
- const struct cdv_intel_limit_t *limit,
- struct cdv_intel_clock_t *clock)
-{
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
- /* unnecessary to check the range of m(m1/M2)/n again */
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
- /* XXX: We may need to be checking "Dot clock"
- * depending on the multiplier, connector, etc.,
- * rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
-
- return true;
-}
-
-static bool cdv_intel_find_best_PLL(const struct cdv_intel_limit_t *limit,
- struct drm_crtc *crtc, int target, int refclk,
- struct cdv_intel_clock_t *best_clock)
+static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target,
+ int refclk,
+ struct gma_clock_t *best_clock)
{
- struct drm_device *dev = crtc->dev;
- struct cdv_intel_clock_t clock;
- int err = target;
-
-
- if (cdv_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
- /*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
- */
- if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
- clock.m1 = 0;
- /* m1 is reserved as 0 in CDV, n is a ring counter.
- So skip the m1 loop */
- for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
- for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max;
- clock.m2++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- cdv_intel_clock(dev, refclk, &clock);
-
- if (!cdv_intel_PLL_is_valid(crtc,
- limit, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
-
- return err != target;
-}
-
-static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct drm_crtc *crtc, int target,
- int refclk,
- struct cdv_intel_clock_t *best_clock)
-{
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
if (refclk == 27000) {
if (target < 200000) {
clock.p1 = 2;
@@ -584,85 +447,10 @@ static bool cdv_intel_find_dp_pll(const struct cdv_intel_limit_t *limit, struct
clock.p = clock.p1 * clock.p2;
clock.vco = (refclk * clock.m) / clock.n;
clock.dot = clock.vco / clock.p;
- memcpy(best_clock, &clock, sizeof(struct cdv_intel_clock_t));
+ memcpy(best_clock, &clock, sizeof(struct gma_clock_t));
return true;
}
-static int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret = 0;
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* no fb bound */
- if (!crtc->fb) {
- dev_err(dev->dev, "No FB bound\n");
- goto psb_intel_pipe_cleaner;
- }
-
-
- /* We are displaying this buffer, make sure it is actually loaded
- into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
- if (ret < 0)
- goto psb_intel_pipe_set_base_exit;
- start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
-
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
-
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- dev_err(dev->dev, "Unknown color depth\n");
- ret = -EINVAL;
- goto psb_intel_pipe_set_base_exit;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- dev_dbg(dev->dev,
- "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
-
- REG_WRITE(map->base, offset);
- REG_READ(map->base);
- REG_WRITE(map->surf, start);
- REG_READ(map->surf);
-
-psb_intel_pipe_cleaner:
- /* If there was a previous display we can now unpin it */
- if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
-
-psb_intel_pipe_set_base_exit:
- gma_power_end(dev);
- return ret;
-}
-
#define FIFO_PIPEA (1 << 0)
#define FIFO_PIPEB (1 << 1)
@@ -670,12 +458,12 @@ static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = NULL;
+ struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
- psb_intel_crtc = to_psb_intel_crtc(crtc);
+ gma_crtc = to_gma_crtc(crtc);
- if (crtc->fb == NULL || !psb_intel_crtc->active)
+ if (crtc->fb == NULL || !gma_crtc->active)
return false;
return true;
}
@@ -701,29 +489,29 @@ static bool cdv_intel_single_pipe_active (struct drm_device *dev)
static bool is_pipeb_lvds(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- if (psb_intel_crtc->pipe != 1)
+ if (gma_crtc->pipe != 1)
return false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- if (psb_intel_encoder->type == INTEL_OUTPUT_LVDS)
+ if (gma_encoder->type == INTEL_OUTPUT_LVDS)
return true;
}
return false;
}
-static void cdv_intel_disable_self_refresh (struct drm_device *dev)
+void cdv_disable_sr(struct drm_device *dev)
{
if (REG_READ(FW_BLC_SELF) & FW_BLC_SELF_EN) {
@@ -731,7 +519,7 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
REG_WRITE(FW_BLC_SELF, (REG_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN));
REG_READ(FW_BLC_SELF);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* Cedarview workaround to write ovelay plane, which force to leave
* MAX_FIFO state.
@@ -739,13 +527,14 @@ static void cdv_intel_disable_self_refresh (struct drm_device *dev)
REG_WRITE(OV_OVADD, 0/*dev_priv->ovl_offset*/);
REG_READ(OV_OVADD);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
}
}
-static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc *crtc)
+void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
if (cdv_intel_single_pipe_active(dev)) {
u32 fw;
@@ -780,12 +569,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
REG_WRITE(DSPFW6, 0x10);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* enable self-refresh for single pipe active */
REG_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
REG_READ(FW_BLC_SELF);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
} else {
@@ -797,216 +586,12 @@ static void cdv_intel_update_watermark (struct drm_device *dev, struct drm_crtc
REG_WRITE(DSPFW5, 0x01010101);
REG_WRITE(DSPFW6, 0x1d0);
- cdv_intel_wait_for_vblank(dev);
-
- cdv_intel_disable_self_refresh(dev);
-
- }
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-static void cdv_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int palreg = PALETTE_A;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- break;
- case 1:
- palreg = PALETTE_B;
- break;
- case 2:
- palreg = PALETTE_C;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.pipe[0].palette[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void cdv_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- cdv_intel_disable_self_refresh(dev);
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- if (psb_intel_crtc->active)
- break;
-
- psb_intel_crtc->active = true;
-
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
-
- /* Jim Bish - switch plan and pipe per scott */
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- udelay(150);
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-
- temp = REG_READ(map->status);
- temp &= ~(0xFFFF);
- temp |= PIPE_FIFO_UNDERRUN;
- REG_WRITE(map->status, temp);
- REG_READ(map->status);
-
- cdv_intel_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
- break;
- case DRM_MODE_DPMS_OFF:
- if (!psb_intel_crtc->active)
- break;
-
- psb_intel_crtc->active = false;
-
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Jim Bish - changed pipe/plane here as well. */
-
- drm_vblank_off(dev, pipe);
- /* Wait for vblank for the disable to take effect */
- cdv_intel_wait_for_vblank(dev);
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
- REG_READ(map->conf);
- }
-
- /* Wait for vblank for the disable to take effect. */
- cdv_intel_wait_for_vblank(dev);
-
- udelay(150);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- }
+ gma_wait_for_vblank(dev);
- /* Wait for the clocks to turn off. */
- udelay(150);
- break;
+ dev_priv->ops->disable_sr(dev);
}
- cdv_intel_update_watermark(dev, crtc);
- /*Set FIFO Watermarks*/
- REG_WRITE(DSPARB, 0x3F3E);
-}
-
-static void cdv_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void cdv_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool cdv_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
}
-
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
@@ -1031,31 +616,31 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_hdmi = false, is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
- const struct cdv_intel_limit_t *limit;
+ const struct gma_limit_t *limit;
u32 ddi_select = 0;
bool is_edp = false;
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- ddi_select = psb_intel_encoder->ddi_select;
- switch (psb_intel_encoder->type) {
+ ddi_select = gma_encoder->ddi_select;
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -1108,12 +693,13 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
drm_mode_debug_printmodeline(adjusted_mode);
- limit = cdv_intel_limit(crtc, refclk);
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
- dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
return 0;
}
@@ -1264,7 +850,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- cdv_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
@@ -1275,344 +861,16 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
}
- cdv_intel_wait_for_vblank(dev);
-
- return 0;
-}
-
-
-/**
- * Save HW states of giving crtc
- */
-static void cdv_intel_crtc_save(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_dbg(dev->dev, "No CRTC state found\n");
- return;
- }
-
- crtc_state->saveDSPCNTR = REG_READ(map->cntr);
- crtc_state->savePIPECONF = REG_READ(map->conf);
- crtc_state->savePIPESRC = REG_READ(map->src);
- crtc_state->saveFP0 = REG_READ(map->fp0);
- crtc_state->saveFP1 = REG_READ(map->fp1);
- crtc_state->saveDPLL = REG_READ(map->dpll);
- crtc_state->saveHTOTAL = REG_READ(map->htotal);
- crtc_state->saveHBLANK = REG_READ(map->hblank);
- crtc_state->saveHSYNC = REG_READ(map->hsync);
- crtc_state->saveVTOTAL = REG_READ(map->vtotal);
- crtc_state->saveVBLANK = REG_READ(map->vblank);
- crtc_state->saveVSYNC = REG_READ(map->vsync);
- crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
-
- /*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(map->size);
- crtc_state->saveDSPPOS = REG_READ(map->pos);
-
- crtc_state->saveDSPBASE = REG_READ(map->base);
-
- DRM_DEBUG("(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
-}
-
-/**
- * Restore HW states of giving crtc
- */
-static void cdv_intel_crtc_restore(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_dbg(dev->dev, "No crtc state\n");
- return;
- }
-
- DRM_DEBUG(
- "current:(%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- REG_READ(map->cntr),
- REG_READ(map->conf),
- REG_READ(map->src),
- REG_READ(map->fp0),
- REG_READ(map->fp1),
- REG_READ(map->dpll),
- REG_READ(map->htotal),
- REG_READ(map->hblank),
- REG_READ(map->hsync),
- REG_READ(map->vtotal),
- REG_READ(map->vblank),
- REG_READ(map->vsync),
- REG_READ(map->stride),
- REG_READ(map->size),
- REG_READ(map->pos),
- REG_READ(map->base)
- );
-
- DRM_DEBUG(
- "saved: (%x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x)\n",
- crtc_state->saveDSPCNTR,
- crtc_state->savePIPECONF,
- crtc_state->savePIPESRC,
- crtc_state->saveFP0,
- crtc_state->saveFP1,
- crtc_state->saveDPLL,
- crtc_state->saveHTOTAL,
- crtc_state->saveHBLANK,
- crtc_state->saveHSYNC,
- crtc_state->saveVTOTAL,
- crtc_state->saveVBLANK,
- crtc_state->saveVSYNC,
- crtc_state->saveDSPSTRIDE,
- crtc_state->saveDSPSIZE,
- crtc_state->saveDSPPOS,
- crtc_state->saveDSPBASE
- );
-
-
- if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(map->dpll,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- DRM_DEBUG("write dpll: %x\n",
- REG_READ(map->dpll));
- udelay(150);
- }
-
- REG_WRITE(map->fp0, crtc_state->saveFP0);
- REG_READ(map->fp0);
-
- REG_WRITE(map->fp1, crtc_state->saveFP1);
- REG_READ(map->fp1);
-
- REG_WRITE(map->dpll, crtc_state->saveDPLL);
- REG_READ(map->dpll);
- udelay(150);
-
- REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
- REG_WRITE(map->hblank, crtc_state->saveHBLANK);
- REG_WRITE(map->hsync, crtc_state->saveHSYNC);
- REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
- REG_WRITE(map->vblank, crtc_state->saveVBLANK);
- REG_WRITE(map->vsync, crtc_state->saveVSYNC);
- REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
-
- REG_WRITE(map->size, crtc_state->saveDSPSIZE);
- REG_WRITE(map->pos, crtc_state->saveDSPPOS);
-
- REG_WRITE(map->src, crtc_state->savePIPESRC);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
- REG_WRITE(map->conf, crtc_state->savePIPECONF);
-
- cdv_intel_wait_for_vblank(dev);
-
- REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
-
- cdv_intel_wait_for_vblank(dev);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
-}
-
-static int cdv_intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp;
- size_t addr = 0;
- struct gtt_range *gt;
- struct drm_gem_object *obj;
- int ret = 0;
-
- /* if we want to turn of the cursor ignore width and height */
- if (!handle) {
- /* turn off the cursor */
- temp = CURSOR_MODE_DISABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, 0);
- gma_power_end(dev);
- }
-
- /* unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- return 0;
- }
-
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- if (obj->size < width * height * 4) {
- dev_dbg(dev->dev, "buffer is to small\n");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- gt = container_of(obj, struct gtt_range, gem);
-
- /* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
- if (ret) {
- dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- goto unref_cursor;
- }
-
- addr = gt->offset; /* Or resource.start ??? */
-
- psb_intel_crtc->cursor_addr = addr;
-
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
-
- /* unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- }
-
- psb_intel_crtc->cursor_obj = obj;
- return ret;
-
-unref_cursor:
- drm_gem_object_unreference(obj);
- return ret;
-}
-
-static int cdv_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
-
-
- if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
- x = -x;
- }
- if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
- y = -y;
- }
-
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+ gma_wait_for_vblank(dev);
- adder = psb_intel_crtc->cursor_addr;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
- gma_power_end(dev);
- }
return 0;
}
-static void cdv_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t start, uint32_t size)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int i;
- int end = (start + size > 256) ? 256 : start + size;
-
- for (i = start; i < end; i++) {
- psb_intel_crtc->lut_r[i] = red[i] >> 8;
- psb_intel_crtc->lut_g[i] = green[i] >> 8;
- psb_intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- cdv_intel_crtc_load_lut(crtc);
-}
-
-static int cdv_crtc_set_config(struct drm_mode_set *set)
-{
- int ret = 0;
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set);
-
- pm_runtime_forbid(&dev->pdev->dev);
-
- ret = drm_crtc_helper_set_config(set);
-
- pm_runtime_allow(&dev->pdev->dev);
-
- return ret;
-}
-
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
/* FIXME: why are we using this, should it be cdv_ in this tree ? */
-static void i8xx_clock(int refclk, struct cdv_intel_clock_t *clock)
+static void i8xx_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
@@ -1625,12 +883,12 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
- struct cdv_intel_clock_t clock;
+ struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
@@ -1703,8 +961,8 @@ static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -1747,44 +1005,28 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void cdv_intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
-
- kfree(psb_intel_crtc->crtc_state);
- drm_crtc_cleanup(crtc);
- kfree(psb_intel_crtc);
-}
-
-static void cdv_intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-
- if (crtc->fb) {
- gt = to_psb_fb(crtc->fb)->gtt;
- psb_gtt_unpin(gt);
- }
-}
-
const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
- .dpms = cdv_intel_crtc_dpms,
- .mode_fixup = cdv_intel_crtc_mode_fixup,
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = cdv_intel_crtc_mode_set,
- .mode_set_base = cdv_intel_pipe_set_base,
- .prepare = cdv_intel_crtc_prepare,
- .commit = cdv_intel_crtc_commit,
- .disable = cdv_intel_crtc_disable,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
- .save = cdv_intel_crtc_save,
- .restore = cdv_intel_crtc_restore,
- .cursor_set = cdv_intel_crtc_cursor_set,
- .cursor_move = cdv_intel_crtc_cursor_move,
- .gamma_set = cdv_intel_crtc_gamma_set,
- .set_config = cdv_crtc_set_config,
- .destroy = cdv_intel_crtc_destroy,
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs cdv_clock_funcs = {
+ .clock = cdv_intel_clock,
+ .limit = cdv_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
};
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 88d9ef6b5b4..f4eb43573ca 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -34,6 +34,7 @@
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
+#include "gma_display.h"
#include <drm/drm_dp_helper.h>
#define _wait_for(COND, MS, W) ({ \
@@ -68,7 +69,7 @@ struct cdv_intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[4];
- struct psb_intel_encoder *encoder;
+ struct gma_encoder *encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
uint8_t train_set[4];
@@ -114,18 +115,18 @@ static uint32_t dp_vswing_premph_table[] = {
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
*/
-static bool is_edp(struct psb_intel_encoder *encoder)
+static bool is_edp(struct gma_encoder *encoder)
{
return encoder->type == INTEL_OUTPUT_EDP;
}
-static void cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder);
-static void cdv_intel_dp_link_down(struct psb_intel_encoder *encoder);
+static void cdv_intel_dp_start_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_complete_link_train(struct gma_encoder *encoder);
+static void cdv_intel_dp_link_down(struct gma_encoder *encoder);
static int
-cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_lane_count(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_lane_count = 4;
@@ -143,7 +144,7 @@ cdv_intel_dp_max_lane_count(struct psb_intel_encoder *encoder)
}
static int
-cdv_intel_dp_max_link_bw(struct psb_intel_encoder *encoder)
+cdv_intel_dp_max_link_bw(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
@@ -180,7 +181,7 @@ cdv_intel_dp_max_data_rate(int max_link_clock, int max_lanes)
return (max_link_clock * max_lanes * 19) / 20;
}
-static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -200,7 +201,7 @@ static void cdv_intel_edp_panel_vdd_on(struct psb_intel_encoder *intel_encoder)
msleep(intel_dp->panel_power_up_delay);
}
-static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_vdd_off(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
@@ -215,7 +216,7 @@ static void cdv_intel_edp_panel_vdd_off(struct psb_intel_encoder *intel_encoder)
}
/* Returns true if the panel was already on when called */
-static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
+static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -242,7 +243,7 @@ static bool cdv_intel_edp_panel_on(struct psb_intel_encoder *intel_encoder)
return false;
}
-static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp, idle_off_mask = PP_ON ;
@@ -274,7 +275,7 @@ static void cdv_intel_edp_panel_off (struct psb_intel_encoder *intel_encoder)
DRM_DEBUG_KMS("Over\n");
}
-static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_on (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
u32 pp;
@@ -294,7 +295,7 @@ static void cdv_intel_edp_backlight_on (struct psb_intel_encoder *intel_encoder)
gma_backlight_enable(dev);
}
-static void cdv_intel_edp_backlight_off (struct psb_intel_encoder *intel_encoder)
+static void cdv_intel_edp_backlight_off (struct gma_encoder *intel_encoder)
{
struct drm_device *dev = intel_encoder->base.dev;
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
@@ -314,7 +315,7 @@ static int
cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
@@ -370,7 +371,7 @@ unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
}
static int
-cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
uint8_t *send, int send_bytes,
uint8_t *recv, int recv_size)
{
@@ -472,7 +473,7 @@ cdv_intel_dp_aux_ch(struct psb_intel_encoder *encoder,
/* Write data to the aux channel in native mode */
static int
-cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write(struct gma_encoder *encoder,
uint16_t address, uint8_t *send, int send_bytes)
{
int ret;
@@ -504,7 +505,7 @@ cdv_intel_dp_aux_native_write(struct psb_intel_encoder *encoder,
/* Write a single byte to the aux channel in native mode */
static int
-cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_write_1(struct gma_encoder *encoder,
uint16_t address, uint8_t byte)
{
return cdv_intel_dp_aux_native_write(encoder, address, &byte, 1);
@@ -512,7 +513,7 @@ cdv_intel_dp_aux_native_write_1(struct psb_intel_encoder *encoder,
/* read bytes from a native aux channel */
static int
-cdv_intel_dp_aux_native_read(struct psb_intel_encoder *encoder,
+cdv_intel_dp_aux_native_read(struct gma_encoder *encoder,
uint16_t address, uint8_t *recv, int recv_bytes)
{
uint8_t msg[4];
@@ -557,7 +558,7 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
struct cdv_intel_dp *intel_dp = container_of(adapter,
struct cdv_intel_dp,
adapter);
- struct psb_intel_encoder *encoder = intel_dp->encoder;
+ struct gma_encoder *encoder = intel_dp->encoder;
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
@@ -647,7 +648,8 @@ cdv_intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
static int
-cdv_intel_dp_i2c_init(struct psb_intel_connector *connector, struct psb_intel_encoder *encoder, const char *name)
+cdv_intel_dp_i2c_init(struct gma_connector *connector,
+ struct gma_encoder *encoder, const char *name)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -698,7 +700,7 @@ cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mo
struct drm_display_mode *adjusted_mode)
{
struct drm_psb_private *dev_priv = encoder->dev->dev_private;
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
int max_lane_count = cdv_intel_dp_max_lane_count(intel_encoder);
@@ -792,22 +794,22 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int lane_count = 4, bpp = 24;
struct cdv_intel_dp_m_n m_n;
- int pipe = intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
/*
* Find the lane count in the intel_encoder private
*/
list_for_each_entry(encoder, &mode_config->encoder_list, head) {
- struct psb_intel_encoder *intel_encoder;
+ struct gma_encoder *intel_encoder;
struct cdv_intel_dp *intel_dp;
if (encoder->crtc != crtc)
continue;
- intel_encoder = to_psb_intel_encoder(encoder);
+ intel_encoder = to_gma_encoder(encoder);
intel_dp = intel_encoder->dev_priv;
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = intel_dp->lane_count;
@@ -841,9 +843,9 @@ static void
cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
@@ -885,7 +887,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
}
/* CPT DP's pipe select is decided in TRANS_DP_CTL */
- if (intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
intel_dp->DP |= DP_PIPEB_SELECT;
REG_WRITE(intel_dp->output_reg, (intel_dp->DP | DP_PORT_EN));
@@ -900,7 +902,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
else
pfit_control = 0;
- pfit_control |= intel_crtc->pipe << PFIT_PIPE_SHIFT;
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
REG_WRITE(PFIT_CONTROL, pfit_control);
}
@@ -908,7 +910,7 @@ cdv_intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode
/* If the sink supports it, try to set the power state appropriately */
-static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
+static void cdv_intel_dp_sink_dpms(struct gma_encoder *encoder, int mode)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret, i;
@@ -940,7 +942,7 @@ static void cdv_intel_dp_sink_dpms(struct psb_intel_encoder *encoder, int mode)
static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp) {
@@ -957,7 +959,7 @@ static void cdv_intel_dp_prepare(struct drm_encoder *encoder)
static void cdv_intel_dp_commit(struct drm_encoder *encoder)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
int edp = is_edp(intel_encoder);
if (edp)
@@ -971,7 +973,7 @@ static void cdv_intel_dp_commit(struct drm_encoder *encoder)
static void
cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct drm_device *dev = encoder->dev;
uint32_t dp_reg = REG_READ(intel_dp->output_reg);
@@ -1006,7 +1008,7 @@ cdv_intel_dp_dpms(struct drm_encoder *encoder, int mode)
* cases where the sink may still be asleep.
*/
static bool
-cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t address,
+cdv_intel_dp_aux_native_read_retry(struct gma_encoder *encoder, uint16_t address,
uint8_t *recv, int recv_bytes)
{
int ret, i;
@@ -1031,7 +1033,7 @@ cdv_intel_dp_aux_native_read_retry(struct psb_intel_encoder *encoder, uint16_t a
* link status information
*/
static bool
-cdv_intel_dp_get_link_status(struct psb_intel_encoder *encoder)
+cdv_intel_dp_get_link_status(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
return cdv_intel_dp_aux_native_read_retry(encoder,
@@ -1105,7 +1107,7 @@ cdv_intel_dp_pre_emphasis_max(uint8_t voltage_swing)
}
*/
static void
-cdv_intel_get_adjust_train(struct psb_intel_encoder *encoder)
+cdv_intel_get_adjust_train(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t v = 0;
@@ -1164,7 +1166,7 @@ cdv_intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_c
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
-cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
+cdv_intel_channel_eq_ok(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
uint8_t lane_align;
@@ -1184,7 +1186,7 @@ cdv_intel_channel_eq_ok(struct psb_intel_encoder *encoder)
}
static bool
-cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
+cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
@@ -1211,7 +1213,7 @@ cdv_intel_dp_set_link_train(struct psb_intel_encoder *encoder,
static bool
-cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
+cdv_intel_dplink_set_level(struct gma_encoder *encoder,
uint8_t dp_train_pat)
{
@@ -1232,7 +1234,7 @@ cdv_intel_dplink_set_level(struct psb_intel_encoder *encoder,
}
static void
-cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal_level)
+cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1298,7 +1300,7 @@ cdv_intel_dp_set_vswing_premph(struct psb_intel_encoder *encoder, uint8_t signal
/* Enable corresponding port and start training pattern 1 */
static void
-cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1317,7 +1319,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
/* Enable output, wait for it to become active */
REG_WRITE(intel_dp->output_reg, reg);
REG_READ(intel_dp->output_reg);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
DRM_DEBUG_KMS("Link config\n");
/* Write the link configuration data */
@@ -1392,7 +1394,7 @@ cdv_intel_dp_start_link_train(struct psb_intel_encoder *encoder)
}
static void
-cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
+cdv_intel_dp_complete_link_train(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1478,7 +1480,7 @@ cdv_intel_dp_complete_link_train(struct psb_intel_encoder *encoder)
}
static void
-cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
+cdv_intel_dp_link_down(struct gma_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1502,8 +1504,7 @@ cdv_intel_dp_link_down(struct psb_intel_encoder *encoder)
REG_READ(intel_dp->output_reg);
}
-static enum drm_connector_status
-cdv_dp_detect(struct psb_intel_encoder *encoder)
+static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder)
{
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
@@ -1531,7 +1532,7 @@ cdv_dp_detect(struct psb_intel_encoder *encoder)
static enum drm_connector_status
cdv_intel_dp_detect(struct drm_connector *connector, bool force)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
enum drm_connector_status status;
struct edid *edid = NULL;
@@ -1565,7 +1566,7 @@ cdv_intel_dp_detect(struct drm_connector *connector, bool force)
static int cdv_intel_dp_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *intel_encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *intel_encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
struct edid *edid = NULL;
int ret = 0;
@@ -1621,7 +1622,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
static bool
cdv_intel_dp_detect_audio(struct drm_connector *connector)
{
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
struct edid *edid;
bool has_audio = false;
@@ -1647,7 +1648,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
uint64_t val)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
- struct psb_intel_encoder *encoder = psb_intel_attached_encoder(connector);
+ struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -1700,11 +1701,10 @@ done:
static void
cdv_intel_dp_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct cdv_intel_dp *intel_dp = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct cdv_intel_dp *intel_dp = gma_encoder->dev_priv;
- if (is_edp(psb_intel_encoder)) {
+ if (is_edp(gma_encoder)) {
/* cdv_intel_panel_destroy_backlight(connector->dev); */
if (intel_dp->panel_fixed_mode) {
kfree(intel_dp->panel_fixed_mode);
@@ -1741,7 +1741,7 @@ static const struct drm_connector_funcs cdv_intel_dp_connector_funcs = {
static const struct drm_connector_helper_funcs cdv_intel_dp_connector_helper_funcs = {
.get_modes = cdv_intel_dp_get_modes,
.mode_valid = cdv_intel_dp_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_encoder_funcs cdv_intel_dp_enc_funcs = {
@@ -1800,19 +1800,19 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
void
cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev, int output_reg)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct cdv_intel_dp *intel_dp;
const char *name = NULL;
int type = DRM_MODE_CONNECTOR_DisplayPort;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto err_connector;
intel_dp = kzalloc(sizeof(struct cdv_intel_dp), GFP_KERNEL);
if (!intel_dp)
@@ -1821,22 +1821,22 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
if ((output_reg == DP_C) && cdv_intel_dpc_is_edp(dev))
type = DRM_MODE_CONNECTOR_eDP;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector, &cdv_intel_dp_connector_funcs, type);
drm_encoder_init(dev, encoder, &cdv_intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector, psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
if (type == DRM_MODE_CONNECTOR_DisplayPort)
- psb_intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ gma_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
else
- psb_intel_encoder->type = INTEL_OUTPUT_EDP;
+ gma_encoder->type = INTEL_OUTPUT_EDP;
- psb_intel_encoder->dev_priv=intel_dp;
- intel_dp->encoder = psb_intel_encoder;
+ gma_encoder->dev_priv=intel_dp;
+ intel_dp->encoder = gma_encoder;
intel_dp->output_reg = output_reg;
drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
@@ -1852,21 +1852,21 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
switch (output_reg) {
case DP_B:
name = "DPDDC-B";
- psb_intel_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
+ gma_encoder->ddi_select = (DP_MASK | DDI0_SELECT);
break;
case DP_C:
name = "DPDDC-C";
- psb_intel_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
+ gma_encoder->ddi_select = (DP_MASK | DDI1_SELECT);
break;
}
cdv_disable_intel_clock_gating(dev);
- cdv_intel_dp_i2c_init(psb_intel_connector, psb_intel_encoder, name);
+ cdv_intel_dp_i2c_init(gma_connector, gma_encoder, name);
/* FIXME:fail check */
cdv_intel_dp_add_properties(connector);
- if (is_edp(psb_intel_encoder)) {
+ if (is_edp(gma_encoder)) {
int ret;
struct edp_power_seq cur;
u32 pp_on, pp_off, pp_div;
@@ -1920,11 +1920,11 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
- cdv_intel_edp_panel_vdd_on(psb_intel_encoder);
- ret = cdv_intel_dp_aux_native_read(psb_intel_encoder, DP_DPCD_REV,
+ cdv_intel_edp_panel_vdd_on(gma_encoder);
+ ret = cdv_intel_dp_aux_native_read(gma_encoder, DP_DPCD_REV,
intel_dp->dpcd,
sizeof(intel_dp->dpcd));
- cdv_intel_edp_panel_vdd_off(psb_intel_encoder);
+ cdv_intel_edp_panel_vdd_off(gma_encoder);
if (ret == 0) {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
@@ -1945,7 +1945,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
return;
err_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
err_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 464153d9d2d..1c0d723b8d2 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -64,11 +64,11 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
hdmib = (2 << 10);
@@ -77,7 +77,7 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmib |= HDMI_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
hdmib |= HDMIB_PIPE_B_SELECT;
if (hdmi_priv->has_hdmi_audio) {
@@ -99,9 +99,8 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
u32 hdmib;
hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -116,9 +115,8 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
static void cdv_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
}
@@ -126,9 +124,8 @@ static void cdv_hdmi_save(struct drm_connector *connector)
static void cdv_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
REG_READ(hdmi_priv->hdmi_reg);
@@ -137,13 +134,12 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
static enum drm_connector_status cdv_hdmi_detect(
struct drm_connector *connector, bool force)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = gma_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
hdmi_priv->has_hdmi_sink = false;
hdmi_priv->has_hdmi_audio = false;
@@ -167,7 +163,7 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *crtc = to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
bool centre;
uint64_t curValue;
@@ -221,12 +217,11 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
*/
static int cdv_hdmi_get_modes(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct edid *edid = NULL;
int ret = 0;
- edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
+ edid = drm_get_edid(connector, &gma_encoder->i2c_bus->adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
@@ -256,11 +251,10 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -269,16 +263,16 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs cdv_hdmi_helper_funcs = {
.dpms = cdv_hdmi_dpms,
.mode_fixup = cdv_hdmi_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = cdv_hdmi_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
cdv_hdmi_connector_helper_funcs = {
.get_modes = cdv_hdmi_get_modes,
.mode_valid = cdv_hdmi_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
@@ -294,23 +288,22 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
void cdv_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int reg)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct mid_intel_hdmi_priv *hdmi_priv;
int ddc_bus;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
- GFP_KERNEL);
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
- if (!psb_intel_connector)
+ if (!gma_connector)
goto err_connector;
hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
@@ -318,9 +311,9 @@ void cdv_hdmi_init(struct drm_device *dev,
if (!hdmi_priv)
goto err_priv;
- connector = &psb_intel_connector->base;
+ connector = &gma_connector->base;
connector->polled = DRM_CONNECTOR_POLL_HPD;
- encoder = &psb_intel_encoder->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
@@ -328,12 +321,11 @@ void cdv_hdmi_init(struct drm_device *dev,
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
hdmi_priv->hdmi_reg = reg;
hdmi_priv->has_hdmi_sink = false;
- psb_intel_encoder->dev_priv = hdmi_priv;
+ gma_encoder->dev_priv = hdmi_priv;
drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
drm_connector_helper_add(connector,
@@ -349,11 +341,11 @@ void cdv_hdmi_init(struct drm_device *dev,
switch (reg) {
case SDVOB:
ddc_bus = GPIOE;
- psb_intel_encoder->ddi_select = DDI0_SELECT;
+ gma_encoder->ddi_select = DDI0_SELECT;
break;
case SDVOC:
ddc_bus = GPIOD;
- psb_intel_encoder->ddi_select = DDI1_SELECT;
+ gma_encoder->ddi_select = DDI1_SELECT;
break;
default:
DRM_ERROR("unknown reg 0x%x for HDMI\n", reg);
@@ -361,16 +353,15 @@ void cdv_hdmi_init(struct drm_device *dev,
break;
}
- psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
- if (!psb_intel_encoder->i2c_bus) {
+ if (!gma_encoder->i2c_bus) {
dev_err(dev->dev, "No ddc adapter available!\n");
goto failed_ddc;
}
- hdmi_priv->hdmi_i2c_adapter =
- &(psb_intel_encoder->i2c_bus->adapter);
+ hdmi_priv->hdmi_i2c_adapter = &(gma_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
drm_sysfs_connector_add(connector);
return;
@@ -379,7 +370,7 @@ failed_ddc:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
err_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
err_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index d81dbc3368f..20e08e65d46 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -356,8 +356,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(
- encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
/*
@@ -379,7 +378,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
else
pfit_control = 0;
- pfit_control |= psb_intel_crtc->pipe << PFIT_PIPE_SHIFT;
+ pfit_control |= gma_crtc->pipe << PFIT_PIPE_SHIFT;
if (dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
@@ -407,12 +406,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
- ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
+ ret = psb_intel_ddc_get_modes(connector, &gma_encoder->i2c_bus->adapter);
if (ret)
return ret;
@@ -444,11 +442,10 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
*/
static void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -461,8 +458,7 @@ static int cdv_intel_lvds_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curValue;
if (!crtc)
@@ -529,7 +525,7 @@ static const struct drm_connector_helper_funcs
cdv_intel_lvds_connector_helper_funcs = {
.get_modes = cdv_intel_lvds_get_modes,
.mode_valid = cdv_intel_lvds_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
@@ -612,8 +608,8 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev,
void cdv_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct cdv_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -630,24 +626,24 @@ void cdv_intel_lvds_init(struct drm_device *dev,
return;
}
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ gma_encoder = kzalloc(sizeof(struct gma_encoder),
GFP_KERNEL);
- if (!psb_intel_encoder)
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ gma_connector = kzalloc(sizeof(struct gma_connector),
GFP_KERNEL);
- if (!psb_intel_connector)
+ if (!gma_connector)
goto failed_connector;
lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv)
goto failed_lvds_priv;
- psb_intel_encoder->dev_priv = lvds_priv;
+ gma_encoder->dev_priv = lvds_priv;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
@@ -659,9 +655,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -682,16 +677,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
+ gma_encoder->i2c_bus = psb_intel_i2c_create(dev,
GPIOB,
"LVDSBLC_B");
- if (!psb_intel_encoder->i2c_bus) {
+ if (!gma_encoder->i2c_bus) {
dev_printk(KERN_ERR,
&dev->pdev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
- psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
- dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
+ gma_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = gma_encoder->i2c_bus;
/*
* LVDS discovery:
@@ -704,10 +699,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
*/
/* Set up the DDC bus. */
- psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
GPIOC,
"LVDSDDC_C");
- if (!psb_intel_encoder->ddc_bus) {
+ if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
@@ -718,7 +713,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* preferred mode is the right one.
*/
psb_intel_ddc_get_modes(connector,
- &psb_intel_encoder->ddc_bus->adapter);
+ &gma_encoder->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -782,19 +777,19 @@ out:
failed_find:
printk(KERN_ERR "Failed find\n");
- if (psb_intel_encoder->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
- if (psb_intel_encoder->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
+ if (gma_encoder->i2c_bus)
+ psb_intel_i2c_destroy(gma_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
kfree(lvds_priv);
failed_lvds_priv:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 8b1b6d923ab..01dd7d22576 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -321,10 +321,8 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
/* Begin by trying to use stolen memory backing */
backing = psb_gtt_alloc_range(dev, aligned_size, "fb", 1);
if (backing) {
- if (drm_gem_private_object_init(dev,
- &backing->gem, aligned_size) == 0)
- return backing;
- psb_gtt_free_range(dev, backing);
+ drm_gem_private_object_init(dev, &backing->gem, aligned_size);
+ return backing;
}
return NULL;
}
@@ -522,21 +520,21 @@ static struct drm_framebuffer *psb_user_framebuffer_create
static void psbfb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- intel_crtc->lut_r[regno] = red >> 8;
- intel_crtc->lut_g[regno] = green >> 8;
- intel_crtc->lut_b[regno] = blue >> 8;
+ gma_crtc->lut_r[regno] = red >> 8;
+ gma_crtc->lut_g[regno] = green >> 8;
+ gma_crtc->lut_b[regno] = blue >> 8;
}
static void psbfb_gamma_get(struct drm_crtc *crtc, u16 *red,
u16 *green, u16 *blue, int regno)
{
- struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- *red = intel_crtc->lut_r[regno] << 8;
- *green = intel_crtc->lut_g[regno] << 8;
- *blue = intel_crtc->lut_b[regno] << 8;
+ *red = gma_crtc->lut_r[regno] << 8;
+ *green = gma_crtc->lut_g[regno] << 8;
+ *blue = gma_crtc->lut_b[regno] << 8;
}
static int psbfb_probe(struct drm_fb_helper *helper,
@@ -705,13 +703,12 @@ static void psb_setup_outputs(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct drm_encoder *encoder = &psb_intel_encoder->base;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct drm_encoder *encoder = &gma_encoder->base;
int crtc_mask = 0, clone_mask = 0;
/* valid crtcs */
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_ANALOG:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -746,7 +743,7 @@ static void psb_setup_outputs(struct drm_device *dev)
}
encoder->possible_crtcs = crtc_mask;
encoder->possible_clones =
- psb_intel_connector_clones(dev, clone_mask);
+ gma_connector_clones(dev, clone_mask);
}
}
diff --git a/drivers/gpu/drm/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index 989558a9e6e..395f20b07aa 100644
--- a/drivers/gpu/drm/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -41,7 +41,7 @@ struct psb_fbdev {
#define to_psb_fb(x) container_of(x, struct psb_framebuffer, base)
-extern int psb_intel_connector_clones(struct drm_device *dev, int type_mask);
+extern int gma_connector_clones(struct drm_device *dev, int type_mask);
#endif
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index eefd6cc5b80..10ae8c52d06 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -26,6 +26,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/gma_drm.h>
+#include <drm/drm_vma_manager.h>
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
@@ -38,8 +39,7 @@ void psb_gem_free_object(struct drm_gem_object *obj)
struct gtt_range *gtt = container_of(obj, struct gtt_range, gem);
/* Remove the list map if one is present */
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
drm_gem_object_release(obj);
/* This must occur last as it frees up the memory of the GEM object */
@@ -81,13 +81,10 @@ int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
/* What validation is needed here ? */
/* Make it mmapable */
- if (!obj->map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
- /* GEM should really work out the hash offsets for us */
- *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
+ *offset = drm_vma_node_offset_addr(&obj->vma_node);
out:
drm_gem_object_unreference(obj);
unlock:
@@ -165,23 +162,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * psb_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via psb_gem_dumb_create, at least
- * we hope it was created that way. i915 seems to assume the caller
- * does the checking but that might be worth review ! FIXME
- */
-int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* psb_gem_fault - pagefault handler for GEM objects
* @vma: the VMA of the GEM object
* @vmf: fault detail
@@ -261,11 +241,12 @@ static int psb_gem_create_stolen(struct drm_file *file, struct drm_device *dev,
struct gtt_range *gtt = psb_gtt_alloc_range(dev, size, "gem", 1);
if (gtt == NULL)
return -ENOMEM;
- if (drm_gem_private_object_init(dev, &gtt->gem, size) != 0)
- goto free_gtt;
+
+ drm_gem_private_object_init(dev, &gtt->gem, size);
if (drm_gem_handle_create(file, &gtt->gem, handle) == 0)
return 0;
-free_gtt:
+
+ drm_gem_object_release(&gtt->gem);
psb_gtt_free_range(dev, gtt);
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
new file mode 100644
index 00000000000..24e8af3d22b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#include <drm/drmP.h>
+#include "gma_display.h"
+#include "psb_intel_drv.h"
+#include "psb_intel_reg.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+
+/**
+ * Returns whether any output on the specified pipe is of the specified type
+ */
+bool gma_pipe_has_type(struct drm_crtc *crtc, int type)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *l_entry;
+
+ list_for_each_entry(l_entry, &mode_config->connector_list, head) {
+ if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
+ struct gma_encoder *gma_encoder =
+ gma_attached_encoder(l_entry);
+ if (gma_encoder->type == type)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void gma_wait_for_vblank(struct drm_device *dev)
+{
+ /* Wait for 20ms, i.e. one cycle at 50hz. */
+ mdelay(20);
+}
+
+int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ unsigned long start, offset;
+ u32 dspcntr;
+ int ret = 0;
+
+ if (!gma_power_begin(dev, true))
+ return 0;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ dev_err(dev->dev, "No FB bound\n");
+ goto gma_pipe_cleaner;
+ }
+
+ /* We are displaying this buffer, make sure it is actually loaded
+ into the GTT */
+ ret = psb_gtt_pin(psbfb->gtt);
+ if (ret < 0)
+ goto gma_pipe_set_base_exit;
+ start = psbfb->gtt->offset;
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
+
+ REG_WRITE(map->stride, crtc->fb->pitches[0]);
+
+ dspcntr = REG_READ(map->cntr);
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (crtc->fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ dev_err(dev->dev, "Unknown color depth\n");
+ ret = -EINVAL;
+ goto gma_pipe_set_base_exit;
+ }
+ REG_WRITE(map->cntr, dspcntr);
+
+ dev_dbg(dev->dev,
+ "Writing base %08lX %08lX %d %d\n", start, offset, x, y);
+
+ /* FIXME: Investigate whether this really is the base for psb and why
+ the linear offset is named base for the other chips. map->surf
+ should be the base and map->linoff the offset for all chips */
+ if (IS_PSB(dev)) {
+ REG_WRITE(map->base, offset + start);
+ REG_READ(map->base);
+ } else {
+ REG_WRITE(map->base, offset);
+ REG_READ(map->base);
+ REG_WRITE(map->surf, start);
+ REG_READ(map->surf);
+ }
+
+gma_pipe_cleaner:
+ /* If there was a previous display we can now unpin it */
+ if (old_fb)
+ psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
+
+gma_pipe_set_base_exit:
+ gma_power_end(dev);
+ return ret;
+}
+
+/* Loads the palette/gamma unit for the CRTC with the prepared values */
+void gma_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ int palreg = map->palette;
+ int i;
+
+ /* The clocks have to be on to load the palette. */
+ if (!crtc->enabled)
+ return;
+
+ if (gma_power_begin(dev, false)) {
+ for (i = 0; i < 256; i++) {
+ REG_WRITE(palreg + 4 * i,
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]));
+ }
+ gma_power_end(dev);
+ } else {
+ for (i = 0; i < 256; i++) {
+ /* FIXME: Why pipe[0] and not pipe[..._crtc->pipe]? */
+ dev_priv->regs.pipe[0].palette[i] =
+ ((gma_crtc->lut_r[i] +
+ gma_crtc->lut_adj[i]) << 16) |
+ ((gma_crtc->lut_g[i] +
+ gma_crtc->lut_adj[i]) << 8) |
+ (gma_crtc->lut_b[i] +
+ gma_crtc->lut_adj[i]);
+ }
+
+ }
+}
+
+void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
+ u32 start, u32 size)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int i;
+ int end = (start + size > 256) ? 256 : start + size;
+
+ for (i = start; i < end; i++) {
+ gma_crtc->lut_r[i] = red[i] >> 8;
+ gma_crtc->lut_g[i] = green[i] >> 8;
+ gma_crtc->lut_b[i] = blue[i] >> 8;
+ }
+
+ gma_crtc_load_lut(crtc);
+}
+
+/**
+ * Sets the power management mode of the pipe and plane.
+ *
+ * This code should probably grow support for turning the cursor off and back
+ * on appropriately at the same time as we're turning the pipe off/on.
+ */
+void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ const struct psb_offset *map = &dev_priv->regmap[pipe];
+ u32 temp;
+
+ /* XXX: When our outputs are all unaware of DPMS modes other than off
+ * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
+ */
+
+ if (IS_CDV(dev))
+ dev_priv->ops->disable_sr(dev);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ if (gma_crtc->active)
+ break;
+
+ gma_crtc->active = true;
+
+ /* Enable the DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) == 0) {
+ REG_WRITE(map->dpll, temp);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ /* Wait for the clocks to stabilize. */
+ udelay(150);
+ }
+
+ /* Enable the plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
+ REG_WRITE(map->cntr,
+ temp | DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ }
+
+ udelay(150);
+
+ /* Enable the pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) == 0)
+ REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
+
+ temp = REG_READ(map->status);
+ temp &= ~(0xFFFF);
+ temp |= PIPE_FIFO_UNDERRUN;
+ REG_WRITE(map->status, temp);
+ REG_READ(map->status);
+
+ gma_crtc_load_lut(crtc);
+
+ /* Give the overlay scaler a chance to enable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, true); TODO */
+ break;
+ case DRM_MODE_DPMS_OFF:
+ if (!gma_crtc->active)
+ break;
+
+ gma_crtc->active = false;
+
+ /* Give the overlay scaler a chance to disable
+ * if it's on this pipe */
+ /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
+
+ /* Disable the VGA plane that we never use */
+ REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
+
+ /* Turn off vblank interrupts */
+ drm_vblank_off(dev, pipe);
+
+ /* Wait for vblank for the disable to take effect */
+ gma_wait_for_vblank(dev);
+
+ /* Disable plane */
+ temp = REG_READ(map->cntr);
+ if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
+ REG_WRITE(map->cntr,
+ temp & ~DISPLAY_PLANE_ENABLE);
+ /* Flush the plane changes */
+ REG_WRITE(map->base, REG_READ(map->base));
+ REG_READ(map->base);
+ }
+
+ /* Disable pipe */
+ temp = REG_READ(map->conf);
+ if ((temp & PIPEACONF_ENABLE) != 0) {
+ REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
+ REG_READ(map->conf);
+ }
+
+ /* Wait for vblank for the disable to take effect. */
+ gma_wait_for_vblank(dev);
+
+ udelay(150);
+
+ /* Disable DPLL */
+ temp = REG_READ(map->dpll);
+ if ((temp & DPLL_VCO_ENABLE) != 0) {
+ REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ }
+
+ /* Wait for the clocks to turn off. */
+ udelay(150);
+ break;
+ }
+
+ if (IS_CDV(dev))
+ dev_priv->ops->update_wm(dev, crtc);
+
+ /* Set FIFO watermarks */
+ REG_WRITE(DSPARB, 0x3F3E);
+}
+
+int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
+ uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
+ uint32_t temp;
+ size_t addr = 0;
+ struct gtt_range *gt;
+ struct gtt_range *cursor_gt = gma_crtc->cursor_gt;
+ struct drm_gem_object *obj;
+ void *tmp_dst, *tmp_src;
+ int ret = 0, i, cursor_pages;
+
+ /* If we didn't get a handle then turn the cursor off */
+ if (!handle) {
+ temp = CURSOR_MODE_DISABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, 0);
+ gma_power_end(dev);
+ }
+
+ /* Unpin the old GEM object */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj,
+ struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ gma_crtc->cursor_obj = NULL;
+ }
+
+ return 0;
+ }
+
+ /* Currently we only support 64x64 cursors */
+ if (width != 64 || height != 64) {
+ dev_dbg(dev->dev, "We currently only support 64x64 cursors\n");
+ return -EINVAL;
+ }
+
+ obj = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!obj)
+ return -ENOENT;
+
+ if (obj->size < width * height * 4) {
+ dev_dbg(dev->dev, "Buffer is too small\n");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ gt = container_of(obj, struct gtt_range, gem);
+
+ /* Pin the memory into the GTT */
+ ret = psb_gtt_pin(gt);
+ if (ret) {
+ dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
+ goto unref_cursor;
+ }
+
+ if (dev_priv->ops->cursor_needs_phys) {
+ if (cursor_gt == NULL) {
+ dev_err(dev->dev, "No hardware cursor mem available");
+ ret = -ENOMEM;
+ goto unref_cursor;
+ }
+
+ /* Prevent overflow */
+ if (gt->npage > 4)
+ cursor_pages = 4;
+ else
+ cursor_pages = gt->npage;
+
+ /* Copy the cursor to cursor mem */
+ tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
+ for (i = 0; i < cursor_pages; i++) {
+ tmp_src = kmap(gt->pages[i]);
+ memcpy(tmp_dst, tmp_src, PAGE_SIZE);
+ kunmap(gt->pages[i]);
+ tmp_dst += PAGE_SIZE;
+ }
+
+ addr = gma_crtc->cursor_addr;
+ } else {
+ addr = gt->offset;
+ gma_crtc->cursor_addr = addr;
+ }
+
+ temp = 0;
+ /* set the pipe for the cursor */
+ temp |= (pipe << 28);
+ temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE(control, temp);
+ REG_WRITE(base, addr);
+ gma_power_end(dev);
+ }
+
+ /* unpin the old bo */
+ if (gma_crtc->cursor_obj) {
+ gt = container_of(gma_crtc->cursor_obj, struct gtt_range, gem);
+ psb_gtt_unpin(gt);
+ drm_gem_object_unreference(gma_crtc->cursor_obj);
+ }
+
+ gma_crtc->cursor_obj = obj;
+ return ret;
+
+unref_cursor:
+ drm_gem_object_unreference(obj);
+ return ret;
+}
+
+int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
+ uint32_t temp = 0;
+ uint32_t addr;
+
+ if (x < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
+ x = -x;
+ }
+ if (y < 0) {
+ temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
+ y = -y;
+ }
+
+ temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
+ temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
+
+ addr = gma_crtc->cursor_addr;
+
+ if (gma_power_begin(dev, false)) {
+ REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
+ REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
+ gma_power_end(dev);
+ }
+ return 0;
+}
+
+bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+void gma_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+void gma_crtc_commit(struct drm_crtc *crtc)
+{
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+void gma_crtc_disable(struct drm_crtc *crtc)
+{
+ struct gtt_range *gt;
+ struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
+
+ crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ if (crtc->fb) {
+ gt = to_psb_fb(crtc->fb)->gtt;
+ psb_gtt_unpin(gt);
+ }
+}
+
+void gma_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+
+ kfree(gma_crtc->crtc_state);
+ drm_crtc_cleanup(crtc);
+ kfree(gma_crtc);
+}
+
+int gma_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
+
+ pm_runtime_forbid(&dev->pdev->dev);
+ ret = drm_crtc_helper_set_config(set);
+ pm_runtime_allow(&dev->pdev->dev);
+
+ return ret;
+}
+
+/**
+ * Save HW states of given crtc
+ */
+void gma_crtc_save(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No CRTC state found\n");
+ return;
+ }
+
+ crtc_state->saveDSPCNTR = REG_READ(map->cntr);
+ crtc_state->savePIPECONF = REG_READ(map->conf);
+ crtc_state->savePIPESRC = REG_READ(map->src);
+ crtc_state->saveFP0 = REG_READ(map->fp0);
+ crtc_state->saveFP1 = REG_READ(map->fp1);
+ crtc_state->saveDPLL = REG_READ(map->dpll);
+ crtc_state->saveHTOTAL = REG_READ(map->htotal);
+ crtc_state->saveHBLANK = REG_READ(map->hblank);
+ crtc_state->saveHSYNC = REG_READ(map->hsync);
+ crtc_state->saveVTOTAL = REG_READ(map->vtotal);
+ crtc_state->saveVBLANK = REG_READ(map->vblank);
+ crtc_state->saveVSYNC = REG_READ(map->vsync);
+ crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
+
+ /* NOTE: DSPSIZE DSPPOS only for psb */
+ crtc_state->saveDSPSIZE = REG_READ(map->size);
+ crtc_state->saveDSPPOS = REG_READ(map->pos);
+
+ crtc_state->saveDSPBASE = REG_READ(map->base);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2));
+}
+
+/**
+ * Restore HW states of given crtc
+ */
+void gma_crtc_restore(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
+ const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
+ uint32_t palette_reg;
+ int i;
+
+ if (!crtc_state) {
+ dev_err(dev->dev, "No crtc state\n");
+ return;
+ }
+
+ if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
+ REG_WRITE(map->dpll,
+ crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
+ REG_READ(map->dpll);
+ udelay(150);
+ }
+
+ REG_WRITE(map->fp0, crtc_state->saveFP0);
+ REG_READ(map->fp0);
+
+ REG_WRITE(map->fp1, crtc_state->saveFP1);
+ REG_READ(map->fp1);
+
+ REG_WRITE(map->dpll, crtc_state->saveDPLL);
+ REG_READ(map->dpll);
+ udelay(150);
+
+ REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
+ REG_WRITE(map->hblank, crtc_state->saveHBLANK);
+ REG_WRITE(map->hsync, crtc_state->saveHSYNC);
+ REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
+ REG_WRITE(map->vblank, crtc_state->saveVBLANK);
+ REG_WRITE(map->vsync, crtc_state->saveVSYNC);
+ REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
+
+ REG_WRITE(map->size, crtc_state->saveDSPSIZE);
+ REG_WRITE(map->pos, crtc_state->saveDSPPOS);
+
+ REG_WRITE(map->src, crtc_state->savePIPESRC);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+ REG_WRITE(map->conf, crtc_state->savePIPECONF);
+
+ gma_wait_for_vblank(dev);
+
+ REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
+ REG_WRITE(map->base, crtc_state->saveDSPBASE);
+
+ gma_wait_for_vblank(dev);
+
+ palette_reg = map->palette;
+ for (i = 0; i < 256; ++i)
+ REG_WRITE(palette_reg + (i << 2), crtc_state->savePalette[i]);
+}
+
+void gma_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of prepare see psb_intel_lvds_prepare */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+void gma_encoder_commit(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ /* lvds has its own version of commit see psb_intel_lvds_commit */
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+void gma_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
+/* Currently there is only a 1:1 mapping of encoders and connectors */
+struct drm_encoder *gma_best_encoder(struct drm_connector *connector)
+{
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+
+ return &gma_encoder->base;
+}
+
+void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
+
+#define GMA_PLL_INVALID(s) { /* DRM_ERROR(s); */ return false; }
+
+bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock)
+{
+ if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
+ GMA_PLL_INVALID("p1 out of range");
+ if (clock->p < limit->p.min || limit->p.max < clock->p)
+ GMA_PLL_INVALID("p out of range");
+ if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
+ GMA_PLL_INVALID("m2 out of range");
+ if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
+ GMA_PLL_INVALID("m1 out of range");
+ /* On CDV m1 is always 0 */
+ if (clock->m1 <= clock->m2 && clock->m1 != 0)
+ GMA_PLL_INVALID("m1 <= m2 && m1 != 0");
+ if (clock->m < limit->m.min || limit->m.max < clock->m)
+ GMA_PLL_INVALID("m out of range");
+ if (clock->n < limit->n.min || limit->n.max < clock->n)
+ GMA_PLL_INVALID("n out of range");
+ if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
+ GMA_PLL_INVALID("vco out of range");
+ /* XXX: We may need to be checking "Dot clock"
+ * depending on the multiplier, connector, etc.,
+ * rather than just a single range.
+ */
+ if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
+ GMA_PLL_INVALID("dot out of range");
+
+ return true;
+}
+
+bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock)
+{
+ struct drm_device *dev = crtc->dev;
+ const struct gma_clock_funcs *clock_funcs =
+ to_gma_crtc(crtc)->clock_funcs;
+ struct gma_clock_t clock;
+ int err = target;
+
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
+ (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
+ /*
+ * For LVDS, if the panel is on, just rely on its current
+ * settings for dual-channel. We haven't figured out how to
+ * reliably set up different single/dual channel state, if we
+ * even can.
+ */
+ if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
+ LVDS_CLKB_POWER_UP)
+ clock.p2 = limit->p2.p2_fast;
+ else
+ clock.p2 = limit->p2.p2_slow;
+ } else {
+ if (target < limit->p2.dot_limit)
+ clock.p2 = limit->p2.p2_slow;
+ else
+ clock.p2 = limit->p2.p2_fast;
+ }
+
+ memset(best_clock, 0, sizeof(*best_clock));
+
+ /* m1 is always 0 on CDV so the outmost loop will run just once */
+ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
+ for (clock.m2 = limit->m2.min;
+ (clock.m2 < clock.m1 || clock.m1 == 0) &&
+ clock.m2 <= limit->m2.max; clock.m2++) {
+ for (clock.n = limit->n.min;
+ clock.n <= limit->n.max; clock.n++) {
+ for (clock.p1 = limit->p1.min;
+ clock.p1 <= limit->p1.max;
+ clock.p1++) {
+ int this_err;
+
+ clock_funcs->clock(refclk, &clock);
+
+ if (!clock_funcs->pll_is_valid(crtc,
+ limit, &clock))
+ continue;
+
+ this_err = abs(clock.dot - target);
+ if (this_err < err) {
+ *best_clock = clock;
+ err = this_err;
+ }
+ }
+ }
+ }
+ }
+
+ return err != target;
+}
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
new file mode 100644
index 00000000000..78b9f986a6e
--- /dev/null
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright © 2006-2011 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
+ */
+
+#ifndef _GMA_DISPLAY_H_
+#define _GMA_DISPLAY_H_
+
+#include <linux/pm_runtime.h>
+
+struct gma_clock_t {
+ /* given values */
+ int n;
+ int m1, m2;
+ int p1, p2;
+ /* derived values */
+ int dot;
+ int vco;
+ int m;
+ int p;
+};
+
+struct gma_range_t {
+ int min, max;
+};
+
+struct gma_p2_t {
+ int dot_limit;
+ int p2_slow, p2_fast;
+};
+
+struct gma_limit_t {
+ struct gma_range_t dot, vco, n, m, m1, m2, p, p1;
+ struct gma_p2_t p2;
+ bool (*find_pll)(const struct gma_limit_t *, struct drm_crtc *,
+ int target, int refclk,
+ struct gma_clock_t *best_clock);
+};
+
+struct gma_clock_funcs {
+ void (*clock)(int refclk, struct gma_clock_t *clock);
+ const struct gma_limit_t *(*limit)(struct drm_crtc *crtc, int refclk);
+ bool (*pll_is_valid)(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+};
+
+/* Common pipe related functions */
+extern bool gma_pipe_has_type(struct drm_crtc *crtc, int type);
+extern void gma_wait_for_vblank(struct drm_device *dev);
+extern int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb);
+extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t width, uint32_t height);
+extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+extern void gma_crtc_load_lut(struct drm_crtc *crtc);
+extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, u32 start, u32 size);
+extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
+extern bool gma_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+extern void gma_crtc_prepare(struct drm_crtc *crtc);
+extern void gma_crtc_commit(struct drm_crtc *crtc);
+extern void gma_crtc_disable(struct drm_crtc *crtc);
+extern void gma_crtc_destroy(struct drm_crtc *crtc);
+extern int gma_crtc_set_config(struct drm_mode_set *set);
+
+extern void gma_crtc_save(struct drm_crtc *crtc);
+extern void gma_crtc_restore(struct drm_crtc *crtc);
+
+extern void gma_encoder_prepare(struct drm_encoder *encoder);
+extern void gma_encoder_commit(struct drm_encoder *encoder);
+extern void gma_encoder_destroy(struct drm_encoder *encoder);
+
+/* Common clock related functions */
+extern const struct gma_limit_t *gma_limit(struct drm_crtc *crtc, int refclk);
+extern void gma_clock(int refclk, struct gma_clock_t *clock);
+extern bool gma_pll_is_valid(struct drm_crtc *crtc,
+ const struct gma_limit_t *limit,
+ struct gma_clock_t *clock);
+extern bool gma_find_best_pll(const struct gma_limit_t *limit,
+ struct drm_crtc *crtc, int target, int refclk,
+ struct gma_clock_t *best_clock);
+#endif
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 1f82183536a..92babac362e 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -196,37 +196,17 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
*/
static int psb_gtt_attach_pages(struct gtt_range *gt)
{
- struct inode *inode;
- struct address_space *mapping;
- int i;
- struct page *p;
- int pages = gt->gem.size / PAGE_SIZE;
+ struct page **pages;
WARN_ON(gt->pages);
- /* This is the shared memory object that backs the GEM resource */
- inode = file_inode(gt->gem.filp);
- mapping = inode->i_mapping;
+ pages = drm_gem_get_pages(&gt->gem, 0);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL);
- if (gt->pages == NULL)
- return -ENOMEM;
- gt->npage = pages;
+ gt->pages = pages;
- for (i = 0; i < pages; i++) {
- p = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(p))
- goto err;
- gt->pages[i] = p;
- }
return 0;
-
-err:
- while (i--)
- page_cache_release(gt->pages[i]);
- kfree(gt->pages);
- gt->pages = NULL;
- return PTR_ERR(p);
}
/**
@@ -240,13 +220,7 @@ err:
*/
static void psb_gtt_detach_pages(struct gtt_range *gt)
{
- int i;
- for (i = 0; i < gt->npage; i++) {
- /* FIXME: do we need to force dirty */
- set_page_dirty(gt->pages[i]);
- page_cache_release(gt->pages[i]);
- }
- kfree(gt->pages);
+ drm_gem_put_pages(&gt->gem, gt->pages, true, false);
gt->pages = NULL;
}
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
index 3abf8315f57..860a4ee9baa 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c
@@ -249,12 +249,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
struct drm_encoder *encoder = connector->encoder;
if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc *psb_crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
bool centerechange;
uint64_t val;
- if (!psb_crtc)
+ if (!gma_crtc)
goto set_prop_error;
switch (value) {
@@ -281,11 +280,11 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
centerechange = (val == DRM_MODE_SCALE_NO_SCALE) ||
(value == DRM_MODE_SCALE_NO_SCALE);
- if (psb_crtc->saved_mode.hdisplay != 0 &&
- psb_crtc->saved_mode.vdisplay != 0) {
+ if (gma_crtc->saved_mode.hdisplay != 0 &&
+ gma_crtc->saved_mode.vdisplay != 0) {
if (centerechange) {
if (!drm_crtc_helper_set_mode(encoder->crtc,
- &psb_crtc->saved_mode,
+ &gma_crtc->saved_mode,
encoder->crtc->x,
encoder->crtc->y,
encoder->crtc->fb))
@@ -294,8 +293,8 @@ static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
struct drm_encoder_helper_funcs *funcs =
encoder->helper_private;
funcs->mode_set(encoder,
- &psb_crtc->saved_mode,
- &psb_crtc->saved_adjusted_mode);
+ &gma_crtc->saved_mode,
+ &gma_crtc->saved_adjusted_mode);
}
}
} else if (!strcmp(property->name, "backlight") && encoder) {
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.h b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
index 36eb0744841..45d5af0546b 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_output.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.h
@@ -227,7 +227,7 @@ enum {
#define DSI_DPI_DISABLE_BTA BIT(3)
struct mdfld_dsi_connector {
- struct psb_intel_connector base;
+ struct gma_connector base;
int pipe;
void *private;
@@ -238,7 +238,7 @@ struct mdfld_dsi_connector {
};
struct mdfld_dsi_encoder {
- struct psb_intel_encoder base;
+ struct gma_encoder base;
void *private;
};
@@ -269,21 +269,21 @@ struct mdfld_dsi_config {
static inline struct mdfld_dsi_connector *mdfld_dsi_connector(
struct drm_connector *connector)
{
- struct psb_intel_connector *psb_connector;
+ struct gma_connector *gma_connector;
- psb_connector = to_psb_intel_connector(connector);
+ gma_connector = to_gma_connector(connector);
- return container_of(psb_connector, struct mdfld_dsi_connector, base);
+ return container_of(gma_connector, struct mdfld_dsi_connector, base);
}
static inline struct mdfld_dsi_encoder *mdfld_dsi_encoder(
struct drm_encoder *encoder)
{
- struct psb_intel_encoder *psb_encoder;
+ struct gma_encoder *gma_encoder;
- psb_encoder = to_psb_intel_encoder(encoder);
+ gma_encoder = to_gma_encoder(encoder);
- return container_of(psb_encoder, struct mdfld_dsi_encoder, base);
+ return container_of(gma_encoder, struct mdfld_dsi_encoder, base);
}
static inline struct mdfld_dsi_config *
diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c
index 74485dc4394..321c00a944e 100644
--- a/drivers/gpu/drm/gma500/mdfld_intel_display.c
+++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c
@@ -23,7 +23,7 @@
#include <drm/drmP.h>
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "framebuffer.h"
#include "mdfld_output.h"
#include "mdfld_dsi_output.h"
@@ -65,7 +65,7 @@ void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
}
/* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
return;
/* Wait for for the pipe disable to take effect. */
@@ -93,7 +93,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
}
/* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
return;
/* Wait for for the pipe enable to take effect. */
@@ -104,25 +104,6 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
}
}
-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
@@ -184,9 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
u32 dspcntr;
@@ -324,8 +305,8 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 pipeconf = dev_priv->pipeconf[pipe];
u32 temp;
@@ -436,7 +417,7 @@ static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
}
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
@@ -611,8 +592,8 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
- || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
else if (ksel == KSEL_BYPASS_25)
@@ -624,7 +605,7 @@ static const struct mrst_limit_t *mdfld_limit(struct drm_crtc *crtc)
(dev_priv->core_freq == 100 ||
dev_priv->core_freq == 200))
limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
- } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
+ } else if (gma_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
else if (ksel == KSEL_BYPASS_25)
@@ -688,9 +669,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0,
@@ -700,7 +681,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
u32 dpll = 0, fp = 0;
bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_encoder *psb_intel_encoder = NULL;
+ struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_encoder *encoder;
struct drm_connector *connector;
@@ -749,9 +730,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- memcpy(&psb_intel_crtc->saved_mode, mode,
+ memcpy(&gma_crtc->saved_mode, mode,
sizeof(struct drm_display_mode));
- memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode,
+ memcpy(&gma_crtc->saved_adjusted_mode, adjusted_mode,
sizeof(struct drm_display_mode));
list_for_each_entry(connector, &mode_config->connector_list, head) {
@@ -766,9 +747,9 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_MIPI:
is_mipi = true;
break;
@@ -819,7 +800,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->pos, 0);
- if (psb_intel_encoder)
+ if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
@@ -1034,7 +1015,7 @@ static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for for the pipe enable to take effect. */
REG_WRITE(map->cntr, dev_priv->dspcntr[pipe]);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
mrst_crtc_mode_set_exit:
@@ -1045,10 +1026,10 @@ mrst_crtc_mode_set_exit:
const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
.dpms = mdfld_crtc_dpms,
- .mode_fixup = psb_intel_crtc_mode_fixup,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = mdfld_crtc_mode_set,
.mode_set_base = mdfld__intel_pipe_set_base,
- .prepare = psb_intel_crtc_prepare,
- .commit = psb_intel_crtc_commit,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
};
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 3071526bc3c..54c98962b73 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -23,7 +23,7 @@
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
struct psb_intel_range_t {
@@ -88,8 +88,8 @@ static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
- || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
@@ -163,8 +163,8 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 temp;
@@ -212,7 +212,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_WRITE(map->base, REG_READ(map->base));
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
/* Give the overlay scaler a chance to enable
if it's on this pipe */
@@ -242,7 +242,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
REG_READ(map->conf);
}
/* Wait for for the pipe disable to take effect. */
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
temp = REG_READ(map->dpll);
if ((temp & DPLL_VCO_ENABLE) != 0) {
@@ -292,9 +292,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
struct oaktrail_clock_t clock;
@@ -303,7 +303,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
bool is_lvds = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_encoder *psb_intel_encoder = NULL;
+ struct gma_encoder *gma_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
struct drm_connector *connector;
@@ -313,10 +313,10 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (!gma_power_begin(dev, true))
return 0;
- memcpy(&psb_intel_crtc->saved_mode,
+ memcpy(&gma_crtc->saved_mode,
mode,
sizeof(struct drm_display_mode));
- memcpy(&psb_intel_crtc->saved_adjusted_mode,
+ memcpy(&gma_crtc->saved_adjusted_mode,
adjusted_mode,
sizeof(struct drm_display_mode));
@@ -324,9 +324,9 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -350,7 +350,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
- if (psb_intel_encoder)
+ if (gma_encoder)
drm_object_property_get_value(&connector->base,
dev->mode_config.scaling_mode_property, &scalingType);
@@ -484,31 +484,24 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
-static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
unsigned long start, offset;
@@ -563,24 +556,12 @@ pipe_set_base_exit:
return ret;
}
-static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void oaktrail_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
.dpms = oaktrail_crtc_dpms,
- .mode_fixup = oaktrail_crtc_mode_fixup,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = oaktrail_crtc_mode_set,
.mode_set_base = oaktrail_pipe_set_base,
- .prepare = oaktrail_crtc_prepare,
- .commit = oaktrail_crtc_commit,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
};
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index f036f1fc161..38153143ed8 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -155,12 +155,6 @@ static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
-static void wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
static unsigned int htotal_calculate(struct drm_display_mode *mode)
{
u32 htotal, new_crtc_htotal;
@@ -372,10 +366,10 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
REG_WRITE(PCH_PIPEBCONF, pipeconf);
REG_READ(PCH_PIPEBCONF);
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(dspcntr_reg, dspcntr);
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
gma_power_end(dev);
@@ -459,7 +453,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
REG_READ(PCH_PIPEBCONF);
}
- wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
/* Enable plane */
temp = REG_READ(DSPBCNTR);
@@ -470,7 +464,7 @@ void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
REG_READ(DSPBSURF);
}
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
}
/* DSPARB */
@@ -615,16 +609,16 @@ static void oaktrail_hdmi_destroy(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
.dpms = oaktrail_hdmi_dpms,
.mode_fixup = oaktrail_hdmi_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = oaktrail_hdmi_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_helper_funcs
oaktrail_hdmi_connector_helper_funcs = {
.get_modes = oaktrail_hdmi_get_modes,
.mode_valid = oaktrail_hdmi_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
@@ -646,21 +640,21 @@ static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
@@ -669,10 +663,9 @@ void oaktrail_hdmi_init(struct drm_device *dev,
&oaktrail_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ gma_encoder->type = INTEL_OUTPUT_HDMI;
drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
@@ -685,7 +678,7 @@ void oaktrail_hdmi_init(struct drm_device *dev,
return;
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 325013a9c48..e77d7214fca 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -43,7 +43,7 @@
* Sets the power state for the panel.
*/
static void oaktrail_lvds_set_power(struct drm_device *dev,
- struct psb_intel_encoder *psb_intel_encoder,
+ struct gma_encoder *gma_encoder,
bool on)
{
u32 pp_status;
@@ -78,13 +78,12 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
if (mode == DRM_MODE_DPMS_ON)
- oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
else
- oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -166,8 +165,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -176,7 +174,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
- oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
+ oaktrail_lvds_set_power(dev, gma_encoder, false);
gma_power_end(dev);
}
@@ -203,14 +201,13 @@ static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
oaktrail_lvds_get_max_backlight(dev);
- oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
+ oaktrail_lvds_set_power(dev, gma_encoder, true);
}
static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
@@ -325,8 +322,8 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -334,16 +331,16 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
- psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder)
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder)
return;
- psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector)
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector)
goto failed_connector;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
dev_priv->is_lvds_on = true;
drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
@@ -352,9 +349,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -434,15 +430,15 @@ out:
failed_find:
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (psb_intel_encoder->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
+ if (gma_encoder->ddc_bus)
+ psb_intel_i2c_destroy(gma_encoder->ddc_bus);
/* failed_ddc: */
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_connector:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index f6f534b4197..697678619bd 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -25,7 +25,7 @@
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include "intel_bios.h"
-
+#include "psb_device.h"
static int psb_output_init(struct drm_device *dev)
{
@@ -380,6 +380,7 @@ const struct psb_ops psb_chip_ops = {
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
+ .clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_device.h
index 3724b971e91..35e304c7f85 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_device.h
@@ -1,4 +1,6 @@
-/* copyright (c) 2008, Intel Corporation
+/*
+ * Copyright © 2013 Patrik Jakobsson
+ * Copyright © 2011 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,14 +14,11 @@
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
*/
-#ifndef _INTEL_DISPLAY_H_
-#define _INTEL_DISPLAY_H_
+#ifndef _PSB_DEVICE_H_
+#define _PSB_DEVICE_H_
-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type);
+extern const struct gma_clock_funcs psb_clock_funcs;
#endif
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index bddea580744..fcb4e9ff1f2 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -131,7 +131,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-static struct drm_ioctl_desc psb_ioctls[] = {
+static const struct drm_ioctl_desc psb_ioctls[] = {
DRM_IOCTL_DEF_DRV(GMA_ADB, psb_adb_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(GMA_MODE_OPERATION, psb_mode_operation_ioctl,
DRM_AUTH),
@@ -270,7 +270,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
unsigned long irqflags;
int ret = -ENOMEM;
struct drm_connector *connector;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_encoder *gma_encoder;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (dev_priv == NULL)
@@ -372,9 +372,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
/* Only add backlight support if we have LVDS output */
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- psb_intel_encoder = psb_intel_attached_encoder(connector);
+ gma_encoder = gma_attached_encoder(connector);
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
case INTEL_OUTPUT_MIPI:
ret = gma_backlight_init(dev);
@@ -441,7 +441,7 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct drm_connector *connector;
- struct psb_intel_crtc *psb_intel_crtc;
+ struct gma_crtc *gma_crtc;
int i = 0;
int32_t obj_id;
@@ -454,12 +454,12 @@ static int psb_gamma_ioctl(struct drm_device *dev, void *data,
connector = obj_to_connector(obj);
crtc = connector->encoder->crtc;
- psb_intel_crtc = to_psb_intel_crtc(crtc);
+ gma_crtc = to_gma_crtc(crtc);
for (i = 0; i < 256; i++)
- psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+ gma_crtc->lut_adj[i] = lut_arg->lut[i];
- psb_intel_crtc_load_lut(crtc);
+ gma_crtc_load_lut(crtc);
return 0;
}
@@ -622,13 +622,12 @@ static const struct file_operations psb_gem_fops = {
.unlocked_ioctl = psb_unlocked_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
};
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
- DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+ DRIVER_MODESET | DRIVER_GEM ,
.load = psb_driver_load,
.unload = psb_driver_unload,
@@ -652,7 +651,7 @@ static struct drm_driver driver = {
.gem_vm_ops = &psb_gem_vm_ops,
.dumb_create = psb_gem_dumb_create,
.dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = psb_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &psb_gem_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 6053b8abcd1..4535ac7708f 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -27,6 +27,7 @@
#include <drm/gma_drm.h>
#include "psb_reg.h"
#include "psb_intel_drv.h"
+#include "gma_display.h"
#include "intel_bios.h"
#include "gtt.h"
#include "power.h"
@@ -46,6 +47,7 @@ enum {
#define IS_PSB(dev) (((dev)->pci_device & 0xfffe) == 0x8108)
#define IS_MRST(dev) (((dev)->pci_device & 0xfffc) == 0x4100)
#define IS_MFLD(dev) (((dev)->pci_device & 0xfff8) == 0x0130)
+#define IS_CDV(dev) (((dev)->pci_device & 0xfff0) == 0x0be0)
/*
* Driver definitions
@@ -675,6 +677,7 @@ struct psb_ops {
/* Sub functions */
struct drm_crtc_helper_funcs const *crtc_helper;
struct drm_crtc_funcs const *crtc_funcs;
+ const struct gma_clock_funcs *clock_funcs;
/* Setup hooks */
int (*chip_setup)(struct drm_device *dev);
@@ -692,6 +695,8 @@ struct psb_ops {
int (*restore_regs)(struct drm_device *dev);
int (*power_up)(struct drm_device *dev);
int (*power_down)(struct drm_device *dev);
+ void (*update_wm)(struct drm_device *dev, struct drm_crtc *crtc);
+ void (*disable_sr)(struct drm_device *dev);
void (*lvds_bl_power)(struct drm_device *dev, bool on);
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
@@ -838,8 +843,6 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
extern int psb_gem_dumb_map_gtt(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
extern int psb_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 6666493789d..97f8a03fee4 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -19,46 +19,19 @@
*/
#include <linux/i2c.h>
-#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include "framebuffer.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-#include "psb_intel_display.h"
+#include "gma_display.h"
#include "power.h"
-struct psb_intel_clock_t {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-};
-
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct psb_intel_p2_t {
- int dot_limit;
- int p2_slow, p2_fast;
-};
-
-struct psb_intel_limit_t {
- struct psb_intel_range_t dot, vco, n, m, m1, m2, p, p1;
- struct psb_intel_p2_t p2;
-};
-
#define INTEL_LIMIT_I9XX_SDVO_DAC 0
#define INTEL_LIMIT_I9XX_LVDS 1
-static const struct psb_intel_limit_t psb_intel_limits[] = {
+static const struct gma_limit_t psb_intel_limits[] = {
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = {.min = 20000, .max = 400000},
.vco = {.min = 1400000, .max = 2800000},
@@ -68,8 +41,8 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
.m2 = {.min = 3, .max = 7},
.p = {.min = 5, .max = 80},
.p1 = {.min = 1, .max = 8},
- .p2 = {.dot_limit = 200000,
- .p2_slow = 10, .p2_fast = 5},
+ .p2 = {.dot_limit = 200000, .p2_slow = 10, .p2_fast = 5},
+ .find_pll = gma_find_best_pll,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = {.min = 20000, .max = 400000},
@@ -83,23 +56,24 @@ static const struct psb_intel_limit_t psb_intel_limits[] = {
/* The single-channel range is 25-112Mhz, and dual-channel
* is 80-224Mhz. Prefer single channel as much as possible.
*/
- .p2 = {.dot_limit = 112000,
- .p2_slow = 14, .p2_fast = 7},
+ .p2 = {.dot_limit = 112000, .p2_slow = 14, .p2_fast = 7},
+ .find_pll = gma_find_best_pll,
},
};
-static const struct psb_intel_limit_t *psb_intel_limit(struct drm_crtc *crtc)
+static const struct gma_limit_t *psb_intel_limit(struct drm_crtc *crtc,
+ int refclk)
{
- const struct psb_intel_limit_t *limit;
+ const struct gma_limit_t *limit;
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
+ if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &psb_intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
return limit;
}
-static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
+static void psb_intel_clock(int refclk, struct gma_clock_t *clock)
{
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
@@ -108,353 +82,6 @@ static void psb_intel_clock(int refclk, struct psb_intel_clock_t *clock)
}
/**
- * Returns whether any output on the specified pipe is of the specified type
- */
-bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct drm_connector *l_entry;
-
- list_for_each_entry(l_entry, &mode_config->connector_list, head) {
- if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(l_entry);
- if (psb_intel_encoder->type == type)
- return true;
- }
- }
- return false;
-}
-
-#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; }
-/**
- * Returns whether the given set of divisors are valid for a given refclk with
- * the given connectors.
- */
-
-static bool psb_intel_PLL_is_valid(struct drm_crtc *crtc,
- struct psb_intel_clock_t *clock)
-{
- const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
-
- if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
- INTELPllInvalid("p1 out of range\n");
- if (clock->p < limit->p.min || limit->p.max < clock->p)
- INTELPllInvalid("p out of range\n");
- if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
- INTELPllInvalid("m2 out of range\n");
- if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
- INTELPllInvalid("m1 out of range\n");
- if (clock->m1 <= clock->m2)
- INTELPllInvalid("m1 <= m2\n");
- if (clock->m < limit->m.min || limit->m.max < clock->m)
- INTELPllInvalid("m out of range\n");
- if (clock->n < limit->n.min || limit->n.max < clock->n)
- INTELPllInvalid("n out of range\n");
- if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
- INTELPllInvalid("vco out of range\n");
- /* XXX: We may need to be checking "Dot clock"
- * depending on the multiplier, connector, etc.,
- * rather than just a single range.
- */
- if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
- INTELPllInvalid("dot out of range\n");
-
- return true;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given
- * refclk, or FALSE. The returned values represent the clock equation:
- * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
- */
-static bool psb_intel_find_best_PLL(struct drm_crtc *crtc, int target,
- int refclk,
- struct psb_intel_clock_t *best_clock)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_clock_t clock;
- const struct psb_intel_limit_t *limit = psb_intel_limit(crtc);
- int err = target;
-
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
- (REG_READ(LVDS) & LVDS_PORT_EN) != 0) {
- /*
- * For LVDS, if the panel is on, just rely on its current
- * settings for dual-channel. We haven't figured out how to
- * reliably set up different single/dual channel state, if we
- * even can.
- */
- if ((REG_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
- LVDS_CLKB_POWER_UP)
- clock.p2 = limit->p2.p2_fast;
- else
- clock.p2 = limit->p2.p2_slow;
- } else {
- if (target < limit->p2.dot_limit)
- clock.p2 = limit->p2.p2_slow;
- else
- clock.p2 = limit->p2.p2_fast;
- }
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
- clock.m1++) {
- for (clock.m2 = limit->m2.min;
- clock.m2 < clock.m1 && clock.m2 <= limit->m2.max;
- clock.m2++) {
- for (clock.n = limit->n.min;
- clock.n <= limit->n.max; clock.n++) {
- for (clock.p1 = limit->p1.min;
- clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- psb_intel_clock(refclk, &clock);
-
- if (!psb_intel_PLL_is_valid
- (crtc, &clock))
- continue;
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- }
- }
-
- return err != target;
-}
-
-void psb_intel_wait_for_vblank(struct drm_device *dev)
-{
- /* Wait for 20ms, i.e. one cycle at 50hz. */
- mdelay(20);
-}
-
-static int psb_intel_pipe_set_base(struct drm_crtc *crtc,
- int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- unsigned long start, offset;
- u32 dspcntr;
- int ret = 0;
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* no fb bound */
- if (!crtc->fb) {
- dev_dbg(dev->dev, "No FB bound\n");
- goto psb_intel_pipe_cleaner;
- }
-
- /* We are displaying this buffer, make sure it is actually loaded
- into the GTT */
- ret = psb_gtt_pin(psbfb->gtt);
- if (ret < 0)
- goto psb_intel_pipe_set_base_exit;
- start = psbfb->gtt->offset;
-
- offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
-
- REG_WRITE(map->stride, crtc->fb->pitches[0]);
-
- dspcntr = REG_READ(map->cntr);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- dev_err(dev->dev, "Unknown color depth\n");
- ret = -EINVAL;
- psb_gtt_unpin(psbfb->gtt);
- goto psb_intel_pipe_set_base_exit;
- }
- REG_WRITE(map->cntr, dspcntr);
-
- REG_WRITE(map->base, start + offset);
- REG_READ(map->base);
-
-psb_intel_pipe_cleaner:
- /* If there was a previous display we can now unpin it */
- if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
-
-psb_intel_pipe_set_base_exit:
- gma_power_end(dev);
- return ret;
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void psb_intel_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- const struct psb_offset *map = &dev_priv->regmap[pipe];
- u32 temp;
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- REG_WRITE(map->dpll, temp);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- REG_WRITE(map->dpll, temp | DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- /* Wait for the clocks to stabilize. */
- udelay(150);
- }
-
- /* Enable the pipe */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) == 0)
- REG_WRITE(map->conf, temp | PIPEACONF_ENABLE);
-
- /* Enable the plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(map->cntr,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- }
-
- psb_intel_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable display plane */
- temp = REG_READ(map->cntr);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(map->cntr,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(map->base, REG_READ(map->base));
- REG_READ(map->base);
- }
-
- /* Next, disable display pipes */
- temp = REG_READ(map->conf);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- REG_WRITE(map->conf, temp & ~PIPEACONF_ENABLE);
- REG_READ(map->conf);
- }
-
- /* Wait for vblank for the disable to take effect. */
- psb_intel_wait_for_vblank(dev);
-
- temp = REG_READ(map->dpll);
- if ((temp & DPLL_VCO_ENABLE) != 0) {
- REG_WRITE(map->dpll, temp & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- }
-
- /* Wait for the clocks to turn off. */
- udelay(150);
- break;
- }
-
- /*Set FIFO Watermarks*/
- REG_WRITE(DSPARB, 0x3F3E);
-}
-
-static void psb_intel_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void psb_intel_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-void psb_intel_encoder_prepare(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of prepare see psb_intel_lvds_prepare */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-void psb_intel_encoder_commit(struct drm_encoder *encoder)
-{
- struct drm_encoder_helper_funcs *encoder_funcs =
- encoder->helper_private;
- /* lvds has its own version of commit see psb_intel_lvds_commit */
- encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-void psb_intel_encoder_destroy(struct drm_encoder *encoder)
-{
- struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
-
- drm_encoder_cleanup(encoder);
- kfree(intel_encoder);
-}
-
-static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-
-/**
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
@@ -479,17 +106,18 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk;
- struct psb_intel_clock_t clock;
+ struct gma_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
+ const struct gma_limit_t *limit;
/* No scan out no play */
if (crtc->fb == NULL) {
@@ -498,14 +126,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- switch (psb_intel_encoder->type) {
+ switch (gma_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -520,10 +147,13 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
refclk = 96000;
- ok = psb_intel_find_best_PLL(crtc, adjusted_mode->clock, refclk,
+ limit = gma_crtc->clock_funcs->limit(crtc, refclk);
+
+ ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk,
&clock);
if (!ok) {
- dev_err(dev->dev, "Couldn't find PLL settings for mode!\n");
+ DRM_ERROR("Couldn't find PLL settings for mode! target: %d, actual: %d",
+ adjusted_mode->clock, clock.dot);
return 0;
}
@@ -661,368 +291,29 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(map->conf, pipeconf);
REG_READ(map->conf);
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
REG_WRITE(map->cntr, dspcntr);
/* Flush the plane changes */
crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- psb_intel_wait_for_vblank(dev);
-
- return 0;
-}
-
-/** Loads the palette/gamma unit for the CRTC with the prepared values */
-void psb_intel_crtc_load_lut(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- int palreg = map->palette;
- int i;
-
- /* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
- return;
-
- switch (psb_intel_crtc->pipe) {
- case 0:
- case 1:
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- if (gma_power_begin(dev, false)) {
- for (i = 0; i < 256; i++) {
- REG_WRITE(palreg + 4 * i,
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]));
- }
- gma_power_end(dev);
- } else {
- for (i = 0; i < 256; i++) {
- dev_priv->regs.pipe[0].palette[i] =
- ((psb_intel_crtc->lut_r[i] +
- psb_intel_crtc->lut_adj[i]) << 16) |
- ((psb_intel_crtc->lut_g[i] +
- psb_intel_crtc->lut_adj[i]) << 8) |
- (psb_intel_crtc->lut_b[i] +
- psb_intel_crtc->lut_adj[i]);
- }
-
- }
-}
-
-/**
- * Save HW states of giving crtc
- */
-static void psb_intel_crtc_save(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_err(dev->dev, "No CRTC state found\n");
- return;
- }
-
- crtc_state->saveDSPCNTR = REG_READ(map->cntr);
- crtc_state->savePIPECONF = REG_READ(map->conf);
- crtc_state->savePIPESRC = REG_READ(map->src);
- crtc_state->saveFP0 = REG_READ(map->fp0);
- crtc_state->saveFP1 = REG_READ(map->fp1);
- crtc_state->saveDPLL = REG_READ(map->dpll);
- crtc_state->saveHTOTAL = REG_READ(map->htotal);
- crtc_state->saveHBLANK = REG_READ(map->hblank);
- crtc_state->saveHSYNC = REG_READ(map->hsync);
- crtc_state->saveVTOTAL = REG_READ(map->vtotal);
- crtc_state->saveVBLANK = REG_READ(map->vblank);
- crtc_state->saveVSYNC = REG_READ(map->vsync);
- crtc_state->saveDSPSTRIDE = REG_READ(map->stride);
-
- /*NOTE: DSPSIZE DSPPOS only for psb*/
- crtc_state->saveDSPSIZE = REG_READ(map->size);
- crtc_state->saveDSPPOS = REG_READ(map->pos);
-
- crtc_state->saveDSPBASE = REG_READ(map->base);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- crtc_state->savePalette[i] = REG_READ(paletteReg + (i << 2));
-}
-
-/**
- * Restore HW states of giving crtc
- */
-static void psb_intel_crtc_restore(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_crtc_state *crtc_state = psb_intel_crtc->crtc_state;
- const struct psb_offset *map = &dev_priv->regmap[psb_intel_crtc->pipe];
- uint32_t paletteReg;
- int i;
-
- if (!crtc_state) {
- dev_err(dev->dev, "No crtc state\n");
- return;
- }
-
- if (crtc_state->saveDPLL & DPLL_VCO_ENABLE) {
- REG_WRITE(map->dpll,
- crtc_state->saveDPLL & ~DPLL_VCO_ENABLE);
- REG_READ(map->dpll);
- udelay(150);
- }
-
- REG_WRITE(map->fp0, crtc_state->saveFP0);
- REG_READ(map->fp0);
-
- REG_WRITE(map->fp1, crtc_state->saveFP1);
- REG_READ(map->fp1);
-
- REG_WRITE(map->dpll, crtc_state->saveDPLL);
- REG_READ(map->dpll);
- udelay(150);
-
- REG_WRITE(map->htotal, crtc_state->saveHTOTAL);
- REG_WRITE(map->hblank, crtc_state->saveHBLANK);
- REG_WRITE(map->hsync, crtc_state->saveHSYNC);
- REG_WRITE(map->vtotal, crtc_state->saveVTOTAL);
- REG_WRITE(map->vblank, crtc_state->saveVBLANK);
- REG_WRITE(map->vsync, crtc_state->saveVSYNC);
- REG_WRITE(map->stride, crtc_state->saveDSPSTRIDE);
-
- REG_WRITE(map->size, crtc_state->saveDSPSIZE);
- REG_WRITE(map->pos, crtc_state->saveDSPPOS);
-
- REG_WRITE(map->src, crtc_state->savePIPESRC);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
- REG_WRITE(map->conf, crtc_state->savePIPECONF);
-
- psb_intel_wait_for_vblank(dev);
-
- REG_WRITE(map->cntr, crtc_state->saveDSPCNTR);
- REG_WRITE(map->base, crtc_state->saveDSPBASE);
-
- psb_intel_wait_for_vblank(dev);
-
- paletteReg = map->palette;
- for (i = 0; i < 256; ++i)
- REG_WRITE(paletteReg + (i << 2), crtc_state->savePalette[i]);
-}
-
-static int psb_intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp;
- size_t addr = 0;
- struct gtt_range *gt;
- struct gtt_range *cursor_gt = psb_intel_crtc->cursor_gt;
- struct drm_gem_object *obj;
- void *tmp_dst, *tmp_src;
- int ret = 0, i, cursor_pages;
-
- /* if we want to turn of the cursor ignore width and height */
- if (!handle) {
- /* turn off the cursor */
- temp = CURSOR_MODE_DISABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, 0);
- gma_power_end(dev);
- }
-
- /* Unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- return 0;
- }
-
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- dev_dbg(dev->dev, "we currently only support 64x64 cursors\n");
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- if (obj->size < width * height * 4) {
- dev_dbg(dev->dev, "buffer is to small\n");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- gt = container_of(obj, struct gtt_range, gem);
-
- /* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
- if (ret) {
- dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- goto unref_cursor;
- }
-
- if (dev_priv->ops->cursor_needs_phys) {
- if (cursor_gt == NULL) {
- dev_err(dev->dev, "No hardware cursor mem available");
- ret = -ENOMEM;
- goto unref_cursor;
- }
-
- /* Prevent overflow */
- if (gt->npage > 4)
- cursor_pages = 4;
- else
- cursor_pages = gt->npage;
-
- /* Copy the cursor to cursor mem */
- tmp_dst = dev_priv->vram_addr + cursor_gt->offset;
- for (i = 0; i < cursor_pages; i++) {
- tmp_src = kmap(gt->pages[i]);
- memcpy(tmp_dst, tmp_src, PAGE_SIZE);
- kunmap(gt->pages[i]);
- tmp_dst += PAGE_SIZE;
- }
-
- addr = psb_intel_crtc->cursor_addr;
- } else {
- addr = gt->offset; /* Or resource.start ??? */
- psb_intel_crtc->cursor_addr = addr;
- }
-
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-
- if (gma_power_begin(dev, false)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
-
- /* unpin the old bo */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- }
-
- psb_intel_crtc->cursor_obj = obj;
- return ret;
-
-unref_cursor:
- drm_gem_object_unreference(obj);
- return ret;
-}
-
-static int psb_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t addr;
-
-
- if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
- x = -x;
- }
- if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
- y = -y;
- }
-
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
-
- addr = psb_intel_crtc->cursor_addr;
+ gma_wait_for_vblank(dev);
- if (gma_power_begin(dev, false)) {
- REG_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- REG_WRITE((pipe == 0) ? CURABASE : CURBBASE, addr);
- gma_power_end(dev);
- }
return 0;
}
-static void psb_intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red,
- u16 *green, u16 *blue, uint32_t type, uint32_t size)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
-
- for (i = 0; i < 256; i++) {
- psb_intel_crtc->lut_r[i] = red[i] >> 8;
- psb_intel_crtc->lut_g[i] = green[i] >> 8;
- psb_intel_crtc->lut_b[i] = blue[i] >> 8;
- }
-
- psb_intel_crtc_load_lut(crtc);
-}
-
-static int psb_crtc_set_config(struct drm_mode_set *set)
-{
- int ret;
- struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (!dev_priv->rpm_enabled)
- return drm_crtc_helper_set_config(set);
-
- pm_runtime_forbid(&dev->pdev->dev);
- ret = drm_crtc_helper_set_config(set);
- pm_runtime_allow(&dev->pdev->dev);
- return ret;
-}
-
/* Returns the clock of the currently programmed mode of the given pipe. */
static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
+ int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
u32 fp;
- struct psb_intel_clock_t clock;
+ struct gma_clock_t clock;
bool is_lvds;
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
@@ -1092,8 +383,8 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ int pipe = gma_crtc->pipe;
struct drm_display_mode *mode;
int htot;
int hsync;
@@ -1136,58 +427,30 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
return mode;
}
-static void psb_intel_crtc_destroy(struct drm_crtc *crtc)
-{
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct gtt_range *gt;
-
- /* Unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
-
- if (psb_intel_crtc->cursor_gt != NULL)
- psb_gtt_free_range(crtc->dev, psb_intel_crtc->cursor_gt);
- kfree(psb_intel_crtc->crtc_state);
- drm_crtc_cleanup(crtc);
- kfree(psb_intel_crtc);
-}
-
-static void psb_intel_crtc_disable(struct drm_crtc *crtc)
-{
- struct gtt_range *gt;
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
-
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-
- if (crtc->fb) {
- gt = to_psb_fb(crtc->fb)->gtt;
- psb_gtt_unpin(gt);
- }
-}
-
const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
- .dpms = psb_intel_crtc_dpms,
- .mode_fixup = psb_intel_crtc_mode_fixup,
+ .dpms = gma_crtc_dpms,
+ .mode_fixup = gma_crtc_mode_fixup,
.mode_set = psb_intel_crtc_mode_set,
- .mode_set_base = psb_intel_pipe_set_base,
- .prepare = psb_intel_crtc_prepare,
- .commit = psb_intel_crtc_commit,
- .disable = psb_intel_crtc_disable,
+ .mode_set_base = gma_pipe_set_base,
+ .prepare = gma_crtc_prepare,
+ .commit = gma_crtc_commit,
+ .disable = gma_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
- .save = psb_intel_crtc_save,
- .restore = psb_intel_crtc_restore,
- .cursor_set = psb_intel_crtc_cursor_set,
- .cursor_move = psb_intel_crtc_cursor_move,
- .gamma_set = psb_intel_crtc_gamma_set,
- .set_config = psb_crtc_set_config,
- .destroy = psb_intel_crtc_destroy,
+ .save = gma_crtc_save,
+ .restore = gma_crtc_restore,
+ .cursor_set = gma_crtc_cursor_set,
+ .cursor_move = gma_crtc_cursor_move,
+ .gamma_set = gma_crtc_gamma_set,
+ .set_config = gma_crtc_set_config,
+ .destroy = gma_crtc_destroy,
+};
+
+const struct gma_clock_funcs psb_clock_funcs = {
+ .clock = psb_intel_clock,
+ .limit = psb_intel_limit,
+ .pll_is_valid = gma_pll_is_valid,
};
/*
@@ -1195,7 +458,7 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
* to zero. This is a workaround for h/w defect on Oaktrail
*/
static void psb_intel_cursor_init(struct drm_device *dev,
- struct psb_intel_crtc *psb_intel_crtc)
+ struct gma_crtc *gma_crtc)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
@@ -1208,88 +471,87 @@ static void psb_intel_cursor_init(struct drm_device *dev,
*/
cursor_gt = psb_gtt_alloc_range(dev, 4 * PAGE_SIZE, "cursor", 1);
if (!cursor_gt) {
- psb_intel_crtc->cursor_gt = NULL;
+ gma_crtc->cursor_gt = NULL;
goto out;
}
- psb_intel_crtc->cursor_gt = cursor_gt;
- psb_intel_crtc->cursor_addr = dev_priv->stolen_base +
+ gma_crtc->cursor_gt = cursor_gt;
+ gma_crtc->cursor_addr = dev_priv->stolen_base +
cursor_gt->offset;
} else {
- psb_intel_crtc->cursor_gt = NULL;
+ gma_crtc->cursor_gt = NULL;
}
out:
- REG_WRITE(control[psb_intel_crtc->pipe], 0);
- REG_WRITE(base[psb_intel_crtc->pipe], 0);
+ REG_WRITE(control[gma_crtc->pipe], 0);
+ REG_WRITE(base[gma_crtc->pipe], 0);
}
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc;
+ struct gma_crtc *gma_crtc;
int i;
uint16_t *r_base, *g_base, *b_base;
/* We allocate a extra array of drm_connector pointers
* for fbdev after the crtc */
- psb_intel_crtc =
- kzalloc(sizeof(struct psb_intel_crtc) +
- (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
- GFP_KERNEL);
- if (psb_intel_crtc == NULL)
+ gma_crtc = kzalloc(sizeof(struct gma_crtc) +
+ (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)),
+ GFP_KERNEL);
+ if (gma_crtc == NULL)
return;
- psb_intel_crtc->crtc_state =
+ gma_crtc->crtc_state =
kzalloc(sizeof(struct psb_intel_crtc_state), GFP_KERNEL);
- if (!psb_intel_crtc->crtc_state) {
+ if (!gma_crtc->crtc_state) {
dev_err(dev->dev, "Crtc state error: No memory\n");
- kfree(psb_intel_crtc);
+ kfree(gma_crtc);
return;
}
/* Set the CRTC operations from the chip specific data */
- drm_crtc_init(dev, &psb_intel_crtc->base, dev_priv->ops->crtc_funcs);
+ drm_crtc_init(dev, &gma_crtc->base, dev_priv->ops->crtc_funcs);
- drm_mode_crtc_set_gamma_size(&psb_intel_crtc->base, 256);
- psb_intel_crtc->pipe = pipe;
- psb_intel_crtc->plane = pipe;
+ /* Set the CRTC clock functions from chip specific data */
+ gma_crtc->clock_funcs = dev_priv->ops->clock_funcs;
- r_base = psb_intel_crtc->base.gamma_store;
+ drm_mode_crtc_set_gamma_size(&gma_crtc->base, 256);
+ gma_crtc->pipe = pipe;
+ gma_crtc->plane = pipe;
+
+ r_base = gma_crtc->base.gamma_store;
g_base = r_base + 256;
b_base = g_base + 256;
for (i = 0; i < 256; i++) {
- psb_intel_crtc->lut_r[i] = i;
- psb_intel_crtc->lut_g[i] = i;
- psb_intel_crtc->lut_b[i] = i;
+ gma_crtc->lut_r[i] = i;
+ gma_crtc->lut_g[i] = i;
+ gma_crtc->lut_b[i] = i;
r_base[i] = i << 8;
g_base[i] = i << 8;
b_base[i] = i << 8;
- psb_intel_crtc->lut_adj[i] = 0;
+ gma_crtc->lut_adj[i] = 0;
}
- psb_intel_crtc->mode_dev = mode_dev;
- psb_intel_crtc->cursor_addr = 0;
+ gma_crtc->mode_dev = mode_dev;
+ gma_crtc->cursor_addr = 0;
- drm_crtc_helper_add(&psb_intel_crtc->base,
+ drm_crtc_helper_add(&gma_crtc->base,
dev_priv->ops->crtc_helper);
/* Setup the array of drm_connector pointer array */
- psb_intel_crtc->mode_set.crtc = &psb_intel_crtc->base;
+ gma_crtc->mode_set.crtc = &gma_crtc->base;
BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
- dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] != NULL);
- dev_priv->plane_to_crtc_mapping[psb_intel_crtc->plane] =
- &psb_intel_crtc->base;
- dev_priv->pipe_to_crtc_mapping[psb_intel_crtc->pipe] =
- &psb_intel_crtc->base;
- psb_intel_crtc->mode_set.connectors =
- (struct drm_connector **) (psb_intel_crtc + 1);
- psb_intel_crtc->mode_set.num_connectors = 0;
- psb_intel_cursor_init(dev, psb_intel_crtc);
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] != NULL);
+ dev_priv->plane_to_crtc_mapping[gma_crtc->plane] = &gma_crtc->base;
+ dev_priv->pipe_to_crtc_mapping[gma_crtc->pipe] = &gma_crtc->base;
+ gma_crtc->mode_set.connectors = (struct drm_connector **)(gma_crtc + 1);
+ gma_crtc->mode_set.num_connectors = 0;
+ psb_intel_cursor_init(dev, gma_crtc);
/* Set to true so that the pipe is forced off on initial config. */
- psb_intel_crtc->active = true;
+ gma_crtc->active = true;
}
int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -1298,7 +560,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_psb_get_pipe_from_crtc_id_arg *pipe_from_crtc_id = data;
struct drm_mode_object *drmmode_obj;
- struct psb_intel_crtc *crtc;
+ struct gma_crtc *crtc;
if (!dev_priv) {
dev_err(dev->dev, "called with no initialization\n");
@@ -1313,7 +575,7 @@ int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
return -EINVAL;
}
- crtc = to_psb_intel_crtc(obj_to_crtc(drmmode_obj));
+ crtc = to_gma_crtc(obj_to_crtc(drmmode_obj));
pipe_from_crtc_id->pipe = crtc->pipe;
return 0;
@@ -1324,14 +586,14 @@ struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
struct drm_crtc *crtc = NULL;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- if (psb_intel_crtc->pipe == pipe)
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ if (gma_crtc->pipe == pipe)
break;
}
return crtc;
}
-int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
+int gma_connector_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
struct drm_connector *connector;
@@ -1339,30 +601,10 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- if (type_mask & (1 << psb_intel_encoder->type))
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ if (type_mask & (1 << gma_encoder->type))
index_mask |= (1 << entry);
entry++;
}
return index_mask;
}
-
-/* current intel driver doesn't take advantage of encoders
- always give back the encoder for the connector
-*/
-struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
-{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
-
- return &psb_intel_encoder->base;
-}
-
-void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
- struct psb_intel_encoder *encoder)
-{
- connector->encoder = encoder;
- drm_mode_connector_attach_encoder(&connector->base,
- &encoder->base);
-}
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 4dcae421a58..bde27fdb41b 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -24,6 +24,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <linux/gpio.h>
+#include "gma_display.h"
/*
* Display related stuff
@@ -116,11 +117,11 @@ struct psb_intel_i2c_chan {
u8 slave_addr;
};
-struct psb_intel_encoder {
+struct gma_encoder {
struct drm_encoder base;
int type;
bool needs_tv_clock;
- void (*hot_plug)(struct psb_intel_encoder *);
+ void (*hot_plug)(struct gma_encoder *);
int crtc_mask;
int clone_mask;
u32 ddi_select; /* Channel info */
@@ -136,9 +137,9 @@ struct psb_intel_encoder {
struct psb_intel_i2c_chan *ddc_bus;
};
-struct psb_intel_connector {
+struct gma_connector {
struct drm_connector base;
- struct psb_intel_encoder *encoder;
+ struct gma_encoder *encoder;
};
struct psb_intel_crtc_state {
@@ -161,7 +162,7 @@ struct psb_intel_crtc_state {
uint32_t savePalette[256];
};
-struct psb_intel_crtc {
+struct gma_crtc {
struct drm_crtc base;
int pipe;
int plane;
@@ -188,14 +189,16 @@ struct psb_intel_crtc {
/* Saved Crtc HW states */
struct psb_intel_crtc_state *crtc_state;
+
+ const struct gma_clock_funcs *clock_funcs;
};
-#define to_psb_intel_crtc(x) \
- container_of(x, struct psb_intel_crtc, base)
-#define to_psb_intel_connector(x) \
- container_of(x, struct psb_intel_connector, base)
-#define to_psb_intel_encoder(x) \
- container_of(x, struct psb_intel_encoder, base)
+#define to_gma_crtc(x) \
+ container_of(x, struct gma_crtc, base)
+#define to_gma_connector(x) \
+ container_of(x, struct gma_connector, base)
+#define to_gma_encoder(x) \
+ container_of(x, struct gma_encoder, base)
#define to_psb_intel_framebuffer(x) \
container_of(x, struct psb_intel_framebuffer, base)
@@ -223,27 +226,18 @@ extern void oaktrail_dsi_init(struct drm_device *dev,
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
-extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
-extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
-extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+extern struct drm_encoder *gma_best_encoder(struct drm_connector *connector);
+extern void gma_connector_attach_encoder(struct gma_connector *connector,
+ struct gma_encoder *encoder);
-static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+static inline struct gma_encoder *gma_attached_encoder(
struct drm_connector *connector)
{
- return to_psb_intel_connector(connector)->encoder;
+ return to_gma_connector(connector)->encoder;
}
-extern void psb_intel_connector_attach_encoder(
- struct psb_intel_connector *connector,
- struct psb_intel_encoder *encoder);
-
-extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
- *connector);
-
extern struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-extern void psb_intel_wait_for_vblank(struct drm_device *dev);
extern int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern struct drm_crtc *psb_intel_get_crtc_from_pipe(struct drm_device *dev,
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 9fa5fa2e619..32342f6990d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -267,10 +267,9 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -307,10 +306,9 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
u32 pp_status;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
+ (struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
@@ -349,12 +347,11 @@ int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_psb_private *dev_priv = connector->dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
- if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
/* just in case */
@@ -381,22 +378,20 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct psb_intel_crtc *psb_intel_crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
- struct psb_intel_encoder *psb_intel_encoder =
- to_psb_intel_encoder(encoder);
+ struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
- if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ if (gma_encoder->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
- if (!IS_MRST(dev) && psb_intel_crtc->pipe == 0) {
+ if (!IS_MRST(dev) && gma_crtc->pipe == 0) {
printk(KERN_ERR "Can't support LVDS on pipe A\n");
return false;
}
- if (IS_MRST(dev) && psb_intel_crtc->pipe != 0) {
+ if (IS_MRST(dev) && gma_crtc->pipe != 0) {
printk(KERN_ERR "Must use PIPE A\n");
return false;
}
@@ -525,9 +520,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
int ret = 0;
if (!IS_MRST(dev))
@@ -564,9 +558,8 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
*/
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
if (lvds_priv->ddc_bus)
psb_intel_i2c_destroy(lvds_priv->ddc_bus);
@@ -585,8 +578,7 @@ int psb_intel_lvds_set_property(struct drm_connector *connector,
return -1;
if (!strcmp(property->name, "scaling mode")) {
- struct psb_intel_crtc *crtc =
- to_psb_intel_crtc(encoder->crtc);
+ struct gma_crtc *crtc = to_gma_crtc(encoder->crtc);
uint64_t curval;
if (!crtc)
@@ -656,7 +648,7 @@ const struct drm_connector_helper_funcs
psb_intel_lvds_connector_helper_funcs = {
.get_modes = psb_intel_lvds_get_modes,
.mode_valid = psb_intel_lvds_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
const struct drm_connector_funcs psb_intel_lvds_connector_funcs = {
@@ -691,8 +683,8 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
void psb_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_encoder *psb_intel_encoder;
- struct psb_intel_connector *psb_intel_connector;
+ struct gma_encoder *gma_encoder;
+ struct gma_connector *gma_connector;
struct psb_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -702,17 +694,15 @@ void psb_intel_lvds_init(struct drm_device *dev,
u32 lvds;
int pipe;
- psb_intel_encoder =
- kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
- if (!psb_intel_encoder) {
- dev_err(dev->dev, "psb_intel_encoder allocation error\n");
+ gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
+ if (!gma_encoder) {
+ dev_err(dev->dev, "gma_encoder allocation error\n");
return;
}
- psb_intel_connector =
- kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
- if (!psb_intel_connector) {
- dev_err(dev->dev, "psb_intel_connector allocation error\n");
+ gma_connector = kzalloc(sizeof(struct gma_connector), GFP_KERNEL);
+ if (!gma_connector) {
+ dev_err(dev->dev, "gma_connector allocation error\n");
goto failed_encoder;
}
@@ -722,10 +712,10 @@ void psb_intel_lvds_init(struct drm_device *dev,
goto failed_connector;
}
- psb_intel_encoder->dev_priv = lvds_priv;
+ gma_encoder->dev_priv = lvds_priv;
- connector = &psb_intel_connector->base;
- encoder = &psb_intel_encoder->base;
+ connector = &gma_connector->base;
+ encoder = &gma_encoder->base;
drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
@@ -734,9 +724,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
&psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- psb_intel_connector_attach_encoder(psb_intel_connector,
- psb_intel_encoder);
- psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
+ gma_connector_attach_encoder(gma_connector, gma_encoder);
+ gma_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -851,8 +840,8 @@ failed_blc_i2c:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
failed_connector:
- kfree(psb_intel_connector);
+ kfree(gma_connector);
failed_encoder:
- kfree(psb_intel_encoder);
+ kfree(gma_encoder);
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 19e36603b23..6f01cdf5e12 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -65,7 +65,7 @@ static const char *tv_format_names[] = {
#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
struct psb_intel_sdvo {
- struct psb_intel_encoder base;
+ struct gma_encoder base;
struct i2c_adapter *i2c;
u8 slave_addr;
@@ -140,7 +140,7 @@ struct psb_intel_sdvo {
};
struct psb_intel_sdvo_connector {
- struct psb_intel_connector base;
+ struct gma_connector base;
/* Mark the type of connector */
uint16_t output_flag;
@@ -200,13 +200,13 @@ static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(psb_intel_attached_encoder(connector),
+ return container_of(gma_attached_encoder(connector),
struct psb_intel_sdvo, base);
}
static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
{
- return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+ return container_of(to_gma_connector(connector), struct psb_intel_sdvo_connector, base);
}
static bool
@@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) {
udelay(15);
if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
SDVO_I2C_CMD_STATUS,
@@ -987,7 +988,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
{
struct drm_device *dev = encoder->dev;
struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
u32 sdvox;
struct psb_intel_sdvo_in_out_map in_out;
@@ -1070,7 +1071,7 @@ static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
- if (psb_intel_crtc->pipe == 1)
+ if (gma_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (psb_intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@@ -1121,7 +1122,7 @@ static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
if ((temp & SDVO_ENABLE) == 0)
psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
for (i = 0; i < 2; i++)
- psb_intel_wait_for_vblank(dev);
+ gma_wait_for_vblank(dev);
status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
/* Warn if the device reported failure to sync.
@@ -1836,10 +1837,8 @@ done:
static void psb_intel_sdvo_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_encoder *psb_intel_encoder =
- psb_intel_attached_encoder(connector);
- struct psb_intel_sdvo *sdvo =
- to_psb_intel_sdvo(&psb_intel_encoder->base);
+ struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
+ struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(&gma_encoder->base);
sdvo->saveSDVO = REG_READ(sdvo->sdvo_reg);
}
@@ -1847,8 +1846,7 @@ static void psb_intel_sdvo_save(struct drm_connector *connector)
static void psb_intel_sdvo_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_encoder *encoder =
- &psb_intel_attached_encoder(connector)->base;
+ struct drm_encoder *encoder = &gma_attached_encoder(connector)->base;
struct psb_intel_sdvo *sdvo = to_psb_intel_sdvo(encoder);
struct drm_crtc *crtc = encoder->crtc;
@@ -1864,9 +1862,9 @@ static void psb_intel_sdvo_restore(struct drm_connector *connector)
static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
.dpms = psb_intel_sdvo_dpms,
.mode_fixup = psb_intel_sdvo_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
+ .prepare = gma_encoder_prepare,
.mode_set = psb_intel_sdvo_mode_set,
- .commit = psb_intel_encoder_commit,
+ .commit = gma_encoder_commit,
};
static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
@@ -1882,7 +1880,7 @@ static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
.get_modes = psb_intel_sdvo_get_modes,
.mode_valid = psb_intel_sdvo_mode_valid,
- .best_encoder = psb_intel_best_encoder,
+ .best_encoder = gma_best_encoder,
};
static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -1894,7 +1892,7 @@ static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
psb_intel_sdvo->sdvo_lvds_fixed_mode);
i2c_del_adapter(&psb_intel_sdvo->ddc);
- psb_intel_encoder_destroy(encoder);
+ gma_encoder_destroy(encoder);
}
static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
@@ -2055,7 +2053,7 @@ psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
connector->base.base.doublescan_allowed = 0;
connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
- psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+ gma_connector_attach_encoder(&connector->base, &encoder->base);
drm_sysfs_connector_add(&connector->base.base);
}
@@ -2075,7 +2073,7 @@ psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2115,7 +2113,7 @@ psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2154,7 +2152,7 @@ psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2188,7 +2186,7 @@ psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
{
struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
struct drm_connector *connector;
- struct psb_intel_connector *intel_connector;
+ struct gma_connector *intel_connector;
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
@@ -2540,7 +2538,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_encoder *psb_intel_encoder;
+ struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
@@ -2557,9 +2555,9 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
}
/* encoder type will be decided later */
- psb_intel_encoder = &psb_intel_sdvo->base;
- psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
- drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+ gma_encoder = &psb_intel_sdvo->base;
+ gma_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &gma_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
/* Read the regs to test if we can talk to the device */
for (i = 0; i < 0x40; i++) {
@@ -2577,7 +2575,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
else
dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
- drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+ drm_encoder_helper_add(&gma_encoder->base, &psb_intel_sdvo_helper_funcs);
/* In default case sdvo lvds is false */
if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
@@ -2620,7 +2618,7 @@ bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
return true;
err:
- drm_encoder_cleanup(&psb_intel_encoder->base);
+ drm_encoder_cleanup(&gma_encoder->base);
i2c_del_adapter(&psb_intel_sdvo->ddc);
kfree(psb_intel_sdvo);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index e68b58a1aaf..b1f8fc69023 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -23,7 +23,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_encoder_slave.h>
#include <drm/drm_edid.h>
-
+#include <drm/i2c/tda998x.h>
#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
@@ -32,6 +32,11 @@ struct tda998x_priv {
uint16_t rev;
uint8_t current_page;
int dpms;
+ bool is_hdmi_sink;
+ u8 vip_cntrl_0;
+ u8 vip_cntrl_1;
+ u8 vip_cntrl_2;
+ struct tda998x_encoder_params params;
};
#define to_tda998x_priv(x) ((struct tda998x_priv *)to_encoder_slave(x)->slave_priv)
@@ -68,10 +73,13 @@ struct tda998x_priv {
# define I2C_MASTER_DIS_MM (1 << 0)
# define I2C_MASTER_DIS_FILT (1 << 1)
# define I2C_MASTER_APP_STRT_LAT (1 << 2)
+#define REG_FEAT_POWERDOWN REG(0x00, 0x0e) /* read/write */
+# define FEAT_POWERDOWN_SPDIF (1 << 3)
#define REG_INT_FLAGS_0 REG(0x00, 0x0f) /* read/write */
#define REG_INT_FLAGS_1 REG(0x00, 0x10) /* read/write */
#define REG_INT_FLAGS_2 REG(0x00, 0x11) /* read/write */
# define INT_FLAGS_2_EDID_BLK_RD (1 << 1)
+#define REG_ENA_ACLK REG(0x00, 0x16) /* read/write */
#define REG_ENA_VP_0 REG(0x00, 0x18) /* read/write */
#define REG_ENA_VP_1 REG(0x00, 0x19) /* read/write */
#define REG_ENA_VP_2 REG(0x00, 0x1a) /* read/write */
@@ -110,6 +118,8 @@ struct tda998x_priv {
#define REG_VIP_CNTRL_5 REG(0x00, 0x25) /* write */
# define VIP_CNTRL_5_CKCASE (1 << 0)
# define VIP_CNTRL_5_SP_CNT(x) (((x) & 3) << 1)
+#define REG_MUX_AP REG(0x00, 0x26) /* read/write */
+#define REG_MUX_VP_VIP_OUT REG(0x00, 0x27) /* read/write */
#define REG_MAT_CONTRL REG(0x00, 0x80) /* write */
# define MAT_CONTRL_MAT_SC(x) (((x) & 3) << 0)
# define MAT_CONTRL_MAT_BP (1 << 2)
@@ -130,8 +140,12 @@ struct tda998x_priv {
#define REG_VS_LINE_END_1_LSB REG(0x00, 0xae) /* write */
#define REG_VS_PIX_END_1_MSB REG(0x00, 0xaf) /* write */
#define REG_VS_PIX_END_1_LSB REG(0x00, 0xb0) /* write */
+#define REG_VS_LINE_STRT_2_MSB REG(0x00, 0xb1) /* write */
+#define REG_VS_LINE_STRT_2_LSB REG(0x00, 0xb2) /* write */
#define REG_VS_PIX_STRT_2_MSB REG(0x00, 0xb3) /* write */
#define REG_VS_PIX_STRT_2_LSB REG(0x00, 0xb4) /* write */
+#define REG_VS_LINE_END_2_MSB REG(0x00, 0xb5) /* write */
+#define REG_VS_LINE_END_2_LSB REG(0x00, 0xb6) /* write */
#define REG_VS_PIX_END_2_MSB REG(0x00, 0xb7) /* write */
#define REG_VS_PIX_END_2_LSB REG(0x00, 0xb8) /* write */
#define REG_HS_PIX_START_MSB REG(0x00, 0xb9) /* write */
@@ -142,21 +156,29 @@ struct tda998x_priv {
#define REG_VWIN_START_1_LSB REG(0x00, 0xbe) /* write */
#define REG_VWIN_END_1_MSB REG(0x00, 0xbf) /* write */
#define REG_VWIN_END_1_LSB REG(0x00, 0xc0) /* write */
+#define REG_VWIN_START_2_MSB REG(0x00, 0xc1) /* write */
+#define REG_VWIN_START_2_LSB REG(0x00, 0xc2) /* write */
+#define REG_VWIN_END_2_MSB REG(0x00, 0xc3) /* write */
+#define REG_VWIN_END_2_LSB REG(0x00, 0xc4) /* write */
#define REG_DE_START_MSB REG(0x00, 0xc5) /* write */
#define REG_DE_START_LSB REG(0x00, 0xc6) /* write */
#define REG_DE_STOP_MSB REG(0x00, 0xc7) /* write */
#define REG_DE_STOP_LSB REG(0x00, 0xc8) /* write */
#define REG_TBG_CNTRL_0 REG(0x00, 0xca) /* write */
+# define TBG_CNTRL_0_TOP_TGL (1 << 0)
+# define TBG_CNTRL_0_TOP_SEL (1 << 1)
+# define TBG_CNTRL_0_DE_EXT (1 << 2)
+# define TBG_CNTRL_0_TOP_EXT (1 << 3)
# define TBG_CNTRL_0_FRAME_DIS (1 << 5)
# define TBG_CNTRL_0_SYNC_MTHD (1 << 6)
# define TBG_CNTRL_0_SYNC_ONCE (1 << 7)
#define REG_TBG_CNTRL_1 REG(0x00, 0xcb) /* write */
-# define TBG_CNTRL_1_VH_TGL_0 (1 << 0)
-# define TBG_CNTRL_1_VH_TGL_1 (1 << 1)
-# define TBG_CNTRL_1_VH_TGL_2 (1 << 2)
-# define TBG_CNTRL_1_VHX_EXT_DE (1 << 3)
-# define TBG_CNTRL_1_VHX_EXT_HS (1 << 4)
-# define TBG_CNTRL_1_VHX_EXT_VS (1 << 5)
+# define TBG_CNTRL_1_H_TGL (1 << 0)
+# define TBG_CNTRL_1_V_TGL (1 << 1)
+# define TBG_CNTRL_1_TGL_EN (1 << 2)
+# define TBG_CNTRL_1_X_EXT (1 << 3)
+# define TBG_CNTRL_1_H_EXT (1 << 4)
+# define TBG_CNTRL_1_V_EXT (1 << 5)
# define TBG_CNTRL_1_DWIN_DIS (1 << 6)
#define REG_ENABLE_SPACE REG(0x00, 0xd6) /* write */
#define REG_HVF_CNTRL_0 REG(0x00, 0xe4) /* write */
@@ -171,6 +193,12 @@ struct tda998x_priv {
# define HVF_CNTRL_1_PAD(x) (((x) & 3) << 4)
# define HVF_CNTRL_1_SEMI_PLANAR (1 << 6)
#define REG_RPT_CNTRL REG(0x00, 0xf0) /* write */
+#define REG_I2S_FORMAT REG(0x00, 0xfc) /* read/write */
+# define I2S_FORMAT(x) (((x) & 3) << 0)
+#define REG_AIP_CLKSEL REG(0x00, 0xfd) /* write */
+# define AIP_CLKSEL_FS(x) (((x) & 3) << 0)
+# define AIP_CLKSEL_CLK_POL(x) (((x) & 1) << 2)
+# define AIP_CLKSEL_AIP(x) (((x) & 7) << 3)
/* Page 02h: PLL settings */
@@ -194,6 +222,12 @@ struct tda998x_priv {
#define REG_PLL_SCGR1 REG(0x02, 0x09) /* read/write */
#define REG_PLL_SCGR2 REG(0x02, 0x0a) /* read/write */
#define REG_AUDIO_DIV REG(0x02, 0x0e) /* read/write */
+# define AUDIO_DIV_SERCLK_1 0
+# define AUDIO_DIV_SERCLK_2 1
+# define AUDIO_DIV_SERCLK_4 2
+# define AUDIO_DIV_SERCLK_8 3
+# define AUDIO_DIV_SERCLK_16 4
+# define AUDIO_DIV_SERCLK_32 5
#define REG_SEL_CLK REG(0x02, 0x11) /* read/write */
# define SEL_CLK_SEL_CLK1 (1 << 0)
# define SEL_CLK_SEL_VRF_CLK(x) (((x) & 3) << 1)
@@ -212,6 +246,11 @@ struct tda998x_priv {
/* Page 10h: information frames and packets */
+#define REG_IF1_HB0 REG(0x10, 0x20) /* read/write */
+#define REG_IF2_HB0 REG(0x10, 0x40) /* read/write */
+#define REG_IF3_HB0 REG(0x10, 0x60) /* read/write */
+#define REG_IF4_HB0 REG(0x10, 0x80) /* read/write */
+#define REG_IF5_HB0 REG(0x10, 0xa0) /* read/write */
/* Page 11h: audio settings and content info packets */
@@ -221,14 +260,39 @@ struct tda998x_priv {
# define AIP_CNTRL_0_LAYOUT (1 << 2)
# define AIP_CNTRL_0_ACR_MAN (1 << 5)
# define AIP_CNTRL_0_RST_CTS (1 << 6)
+#define REG_CA_I2S REG(0x11, 0x01) /* read/write */
+# define CA_I2S_CA_I2S(x) (((x) & 31) << 0)
+# define CA_I2S_HBR_CHSTAT (1 << 6)
+#define REG_LATENCY_RD REG(0x11, 0x04) /* read/write */
+#define REG_ACR_CTS_0 REG(0x11, 0x05) /* read/write */
+#define REG_ACR_CTS_1 REG(0x11, 0x06) /* read/write */
+#define REG_ACR_CTS_2 REG(0x11, 0x07) /* read/write */
+#define REG_ACR_N_0 REG(0x11, 0x08) /* read/write */
+#define REG_ACR_N_1 REG(0x11, 0x09) /* read/write */
+#define REG_ACR_N_2 REG(0x11, 0x0a) /* read/write */
+#define REG_CTS_N REG(0x11, 0x0c) /* read/write */
+# define CTS_N_K(x) (((x) & 7) << 0)
+# define CTS_N_M(x) (((x) & 3) << 4)
#define REG_ENC_CNTRL REG(0x11, 0x0d) /* read/write */
# define ENC_CNTRL_RST_ENC (1 << 0)
# define ENC_CNTRL_RST_SEL (1 << 1)
# define ENC_CNTRL_CTL_CODE(x) (((x) & 3) << 2)
+#define REG_DIP_FLAGS REG(0x11, 0x0e) /* read/write */
+# define DIP_FLAGS_ACR (1 << 0)
+# define DIP_FLAGS_GC (1 << 1)
+#define REG_DIP_IF_FLAGS REG(0x11, 0x0f) /* read/write */
+# define DIP_IF_FLAGS_IF1 (1 << 1)
+# define DIP_IF_FLAGS_IF2 (1 << 2)
+# define DIP_IF_FLAGS_IF3 (1 << 3)
+# define DIP_IF_FLAGS_IF4 (1 << 4)
+# define DIP_IF_FLAGS_IF5 (1 << 5)
+#define REG_CH_STAT_B(x) REG(0x11, 0x14 + (x)) /* read/write */
/* Page 12h: HDCP and OTP */
#define REG_TX3 REG(0x12, 0x9a) /* read/write */
+#define REG_TX4 REG(0x12, 0x9b) /* read/write */
+# define TX4_PD_RAM (1 << 1)
#define REG_TX33 REG(0x12, 0xb8) /* read/write */
# define TX33_HDMI (1 << 1)
@@ -338,6 +402,23 @@ fail:
return ret;
}
+static void
+reg_write_range(struct drm_encoder *encoder, uint16_t reg, uint8_t *p, int cnt)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+ uint8_t buf[cnt+1];
+ int ret;
+
+ buf[0] = REG2ADDR(reg);
+ memcpy(&buf[1], p, cnt);
+
+ set_page(encoder, reg);
+
+ ret = i2c_master_send(client, buf, cnt + 1);
+ if (ret < 0)
+ dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+}
+
static uint8_t
reg_read(struct drm_encoder *encoder, uint16_t reg)
{
@@ -406,13 +487,176 @@ tda998x_reset(struct drm_encoder *encoder)
reg_write(encoder, REG_SERIALIZER, 0x00);
reg_write(encoder, REG_BUFFER_OUT, 0x00);
reg_write(encoder, REG_PLL_SCG1, 0x00);
- reg_write(encoder, REG_AUDIO_DIV, 0x03);
+ reg_write(encoder, REG_AUDIO_DIV, AUDIO_DIV_SERCLK_8);
reg_write(encoder, REG_SEL_CLK, SEL_CLK_SEL_CLK1 | SEL_CLK_ENA_SC_CLK);
reg_write(encoder, REG_PLL_SCGN1, 0xfa);
reg_write(encoder, REG_PLL_SCGN2, 0x00);
reg_write(encoder, REG_PLL_SCGR1, 0x5b);
reg_write(encoder, REG_PLL_SCGR2, 0x00);
reg_write(encoder, REG_PLL_SCG2, 0x10);
+
+ /* Write the default value MUX register */
+ reg_write(encoder, REG_MUX_VP_VIP_OUT, 0x24);
+}
+
+static uint8_t tda998x_cksum(uint8_t *buf, size_t bytes)
+{
+ uint8_t sum = 0;
+
+ while (bytes--)
+ sum += *buf++;
+ return (255 - sum) + 1;
+}
+
+#define HB(x) (x)
+#define PB(x) (HB(2) + 1 + (x))
+
+static void
+tda998x_write_if(struct drm_encoder *encoder, uint8_t bit, uint16_t addr,
+ uint8_t *buf, size_t size)
+{
+ buf[PB(0)] = tda998x_cksum(buf, size);
+
+ reg_clear(encoder, REG_DIP_IF_FLAGS, bit);
+ reg_write_range(encoder, addr, buf, size);
+ reg_set(encoder, REG_DIP_IF_FLAGS, bit);
+}
+
+static void
+tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[PB(5) + 1];
+
+ buf[HB(0)] = 0x84;
+ buf[HB(1)] = 0x01;
+ buf[HB(2)] = 10;
+ buf[PB(0)] = 0;
+ buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
+ buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
+ buf[PB(4)] = p->audio_frame[4];
+ buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
+ sizeof(buf));
+}
+
+static void
+tda998x_write_avi(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ uint8_t buf[PB(13) + 1];
+
+ memset(buf, 0, sizeof(buf));
+ buf[HB(0)] = 0x82;
+ buf[HB(1)] = 0x02;
+ buf[HB(2)] = 13;
+ buf[PB(4)] = drm_match_cea_mode(mode);
+
+ tda998x_write_if(encoder, DIP_IF_FLAGS_IF2, REG_IF2_HB0, buf,
+ sizeof(buf));
+}
+
+static void tda998x_audio_mute(struct drm_encoder *encoder, bool on)
+{
+ if (on) {
+ reg_set(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_clear(encoder, REG_SOFTRESET, SOFTRESET_AUDIO);
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ } else {
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
+ }
+}
+
+static void
+tda998x_configure_audio(struct drm_encoder *encoder,
+ struct drm_display_mode *mode, struct tda998x_encoder_params *p)
+{
+ uint8_t buf[6], clksel_aip, clksel_fs, ca_i2s, cts_n, adiv;
+ uint32_t n;
+
+ /* Enable audio ports */
+ reg_write(encoder, REG_ENA_AP, p->audio_cfg);
+ reg_write(encoder, REG_ENA_ACLK, p->audio_clk_cfg);
+
+ /* Set audio input source */
+ switch (p->audio_format) {
+ case AFMT_SPDIF:
+ reg_write(encoder, REG_MUX_AP, 0x40);
+ clksel_aip = AIP_CLKSEL_AIP(0);
+ /* FS64SPDIF */
+ clksel_fs = AIP_CLKSEL_FS(2);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = 0;
+ break;
+
+ case AFMT_I2S:
+ reg_write(encoder, REG_MUX_AP, 0x64);
+ clksel_aip = AIP_CLKSEL_AIP(1);
+ /* ACLK */
+ clksel_fs = AIP_CLKSEL_FS(0);
+ cts_n = CTS_N_M(3) | CTS_N_K(3);
+ ca_i2s = CA_I2S_CA_I2S(0);
+ break;
+
+ default:
+ BUG();
+ return;
+ }
+
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_LAYOUT);
+
+ /* Enable automatic CTS generation */
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_ACR_MAN);
+ reg_write(encoder, REG_CTS_N, cts_n);
+
+ /*
+ * Audio input somehow depends on HDMI line rate which is
+ * related to pixclk. Testing showed that modes with pixclk
+ * >100MHz need a larger divider while <40MHz need the default.
+ * There is no detailed info in the datasheet, so we just
+ * assume 100MHz requires larger divider.
+ */
+ if (mode->clock > 100000)
+ adiv = AUDIO_DIV_SERCLK_16;
+ else
+ adiv = AUDIO_DIV_SERCLK_8;
+ reg_write(encoder, REG_AUDIO_DIV, adiv);
+
+ /*
+ * This is the approximate value of N, which happens to be
+ * the recommended values for non-coherent clocks.
+ */
+ n = 128 * p->audio_sample_rate / 1000;
+
+ /* Write the CTS and N values */
+ buf[0] = 0x44;
+ buf[1] = 0x42;
+ buf[2] = 0x01;
+ buf[3] = n;
+ buf[4] = n >> 8;
+ buf[5] = n >> 16;
+ reg_write_range(encoder, REG_ACR_CTS_0, buf, 6);
+
+ /* Set CTS clock reference */
+ reg_write(encoder, REG_AIP_CLKSEL, clksel_aip | clksel_fs);
+
+ /* Reset CTS generator */
+ reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+ reg_clear(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_CTS);
+
+ /* Write the channel status */
+ buf[0] = 0x04;
+ buf[1] = 0x00;
+ buf[2] = 0x00;
+ buf[3] = 0xf1;
+ reg_write_range(encoder, REG_CH_STAT_B(0), buf, 4);
+
+ tda998x_audio_mute(encoder, true);
+ mdelay(20);
+ tda998x_audio_mute(encoder, false);
+
+ /* Write the audio information packet */
+ tda998x_write_aif(encoder, p);
}
/* DRM encoder functions */
@@ -420,6 +664,23 @@ tda998x_reset(struct drm_encoder *encoder)
static void
tda998x_encoder_set_config(struct drm_encoder *encoder, void *params)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
+ struct tda998x_encoder_params *p = params;
+
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->params = *p;
}
static void
@@ -436,18 +697,14 @@ tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- /* enable audio and video ports */
- reg_write(encoder, REG_ENA_AP, 0xff);
+ /* enable video ports, audio will be enabled later */
reg_write(encoder, REG_ENA_VP_0, 0xff);
reg_write(encoder, REG_ENA_VP_1, 0xff);
reg_write(encoder, REG_ENA_VP_2, 0xff);
/* set muxing after enabling ports: */
- reg_write(encoder, REG_VIP_CNTRL_0,
- VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3));
- reg_write(encoder, REG_VIP_CNTRL_1,
- VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1));
- reg_write(encoder, REG_VIP_CNTRL_2,
- VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5));
+ reg_write(encoder, REG_VIP_CNTRL_0, priv->vip_cntrl_0);
+ reg_write(encoder, REG_VIP_CNTRL_1, priv->vip_cntrl_1);
+ reg_write(encoder, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
break;
case DRM_MODE_DPMS_OFF:
/* disable audio and video ports */
@@ -494,43 +751,78 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
- uint16_t hs_start, hs_end, line_start, line_end;
- uint16_t vwin_start, vwin_end, de_start, de_end;
- uint16_t ref_pix, ref_line, pix_start2;
+ uint16_t ref_pix, ref_line, n_pix, n_line;
+ uint16_t hs_pix_s, hs_pix_e;
+ uint16_t vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
+ uint16_t vs2_pix_s, vs2_pix_e, vs2_line_s, vs2_line_e;
+ uint16_t vwin1_line_s, vwin1_line_e;
+ uint16_t vwin2_line_s, vwin2_line_e;
+ uint16_t de_pix_s, de_pix_e;
uint8_t reg, div, rep;
- hs_start = mode->hsync_start - mode->hdisplay;
- hs_end = mode->hsync_end - mode->hdisplay;
- line_start = 1;
- line_end = 1 + mode->vsync_end - mode->vsync_start;
- vwin_start = mode->vtotal - mode->vsync_start;
- vwin_end = vwin_start + mode->vdisplay;
- de_start = mode->htotal - mode->hdisplay;
- de_end = mode->htotal;
-
- pix_start2 = 0;
- if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- pix_start2 = (mode->htotal / 2) + hs_start;
-
- /* TODO how is this value calculated? It is 2 for all common
- * formats in the tables in out of tree nxp driver (assuming
- * I've properly deciphered their byzantine table system)
+ /*
+ * Internally TDA998x is using ITU-R BT.656 style sync but
+ * we get VESA style sync. TDA998x is using a reference pixel
+ * relative to ITU to sync to the input frame and for output
+ * sync generation. Currently, we are using reference detection
+ * from HS/VS, i.e. REFPIX/REFLINE denote frame start sync point
+ * which is position of rising VS with coincident rising HS.
+ *
+ * Now there is some issues to take care of:
+ * - HDMI data islands require sync-before-active
+ * - TDA998x register values must be > 0 to be enabled
+ * - REFLINE needs an additional offset of +1
+ * - REFPIX needs an addtional offset of +1 for UYUV and +3 for RGB
+ *
+ * So we add +1 to all horizontal and vertical register values,
+ * plus an additional +3 for REFPIX as we are using RGB input only.
*/
- ref_line = 2;
-
- /* this might changes for other color formats from the CRTC: */
- ref_pix = 3 + hs_start;
+ n_pix = mode->htotal;
+ n_line = mode->vtotal;
+
+ hs_pix_e = mode->hsync_end - mode->hdisplay;
+ hs_pix_s = mode->hsync_start - mode->hdisplay;
+ de_pix_e = mode->htotal;
+ de_pix_s = mode->htotal - mode->hdisplay;
+ ref_pix = 3 + hs_pix_s;
+
+ /*
+ * Attached LCD controllers may generate broken sync. Allow
+ * those to adjust the position of the rising VS edge by adding
+ * HSKEW to ref_pix.
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_HSKEW)
+ ref_pix += adjusted_mode->hskew;
+
+ if ((mode->flags & DRM_MODE_FLAG_INTERLACE) == 0) {
+ ref_line = 1 + mode->vsync_start - mode->vdisplay;
+ vwin1_line_s = mode->vtotal - mode->vdisplay - 1;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = mode->vsync_start - mode->vdisplay;
+ vs1_line_e = vs1_line_s +
+ mode->vsync_end - mode->vsync_start;
+ vwin2_line_s = vwin2_line_e = 0;
+ vs2_pix_s = vs2_pix_e = 0;
+ vs2_line_s = vs2_line_e = 0;
+ } else {
+ ref_line = 1 + (mode->vsync_start - mode->vdisplay)/2;
+ vwin1_line_s = (mode->vtotal - mode->vdisplay)/2;
+ vwin1_line_e = vwin1_line_s + mode->vdisplay/2;
+ vs1_pix_s = vs1_pix_e = hs_pix_s;
+ vs1_line_s = (mode->vsync_start - mode->vdisplay)/2;
+ vs1_line_e = vs1_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ vwin2_line_s = vwin1_line_s + mode->vtotal/2;
+ vwin2_line_e = vwin2_line_s + mode->vdisplay/2;
+ vs2_pix_s = vs2_pix_e = hs_pix_s + mode->htotal/2;
+ vs2_line_s = vs1_line_s + mode->vtotal/2 ;
+ vs2_line_e = vs2_line_s +
+ (mode->vsync_end - mode->vsync_start)/2;
+ }
div = 148500 / mode->clock;
- DBG("clock=%d, div=%u", mode->clock, div);
- DBG("hs_start=%u, hs_end=%u, line_start=%u, line_end=%u",
- hs_start, hs_end, line_start, line_end);
- DBG("vwin_start=%u, vwin_end=%u, de_start=%u, de_end=%u",
- vwin_start, vwin_end, de_start, de_end);
- DBG("ref_line=%u, ref_pix=%u, pix_start2=%u",
- ref_line, ref_pix, pix_start2);
-
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -561,9 +853,6 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_write(encoder, REG_PLL_SERIAL_2, PLL_SERIAL_2_SRL_NOSC(div) |
PLL_SERIAL_2_SRL_PR(rep));
- reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, pix_start2);
- reg_write16(encoder, REG_VS_PIX_END_2_MSB, pix_start2);
-
/* set color matrix bypass flag: */
reg_set(encoder, REG_MAT_CONTRL, MAT_CONTRL_MAT_BP);
@@ -572,47 +861,75 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_MTHD);
+ /*
+ * Sync on rising HSYNC/VSYNC
+ */
reg_write(encoder, REG_VIP_CNTRL_3, 0);
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_SYNC_HS);
+
+ /*
+ * TDA19988 requires high-active sync at input stage,
+ * so invert low-active sync provided by master encoder here
+ */
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_V_TGL);
+ /*
+ * Always generate sync polarity relative to input sync and
+ * revert input stage toggled sync at output stage
+ */
+ reg = TBG_CNTRL_1_TGL_EN;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- reg_set(encoder, REG_VIP_CNTRL_3, VIP_CNTRL_3_H_TGL);
+ reg |= TBG_CNTRL_1_H_TGL;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ reg |= TBG_CNTRL_1_V_TGL;
+ reg_write(encoder, REG_TBG_CNTRL_1, reg);
reg_write(encoder, REG_VIDFORMAT, 0x00);
- reg_write16(encoder, REG_NPIX_MSB, mode->hdisplay - 1);
- reg_write16(encoder, REG_NLINE_MSB, mode->vdisplay - 1);
- reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, line_start);
- reg_write16(encoder, REG_VS_LINE_END_1_MSB, line_end);
- reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, hs_start);
- reg_write16(encoder, REG_VS_PIX_END_1_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_START_MSB, hs_start);
- reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_end);
- reg_write16(encoder, REG_VWIN_START_1_MSB, vwin_start);
- reg_write16(encoder, REG_VWIN_END_1_MSB, vwin_end);
- reg_write16(encoder, REG_DE_START_MSB, de_start);
- reg_write16(encoder, REG_DE_STOP_MSB, de_end);
+ reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
+ reg_write16(encoder, REG_REFLINE_MSB, ref_line);
+ reg_write16(encoder, REG_NPIX_MSB, n_pix);
+ reg_write16(encoder, REG_NLINE_MSB, n_line);
+ reg_write16(encoder, REG_VS_LINE_STRT_1_MSB, vs1_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_1_MSB, vs1_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_1_MSB, vs1_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_1_MSB, vs1_pix_e);
+ reg_write16(encoder, REG_VS_LINE_STRT_2_MSB, vs2_line_s);
+ reg_write16(encoder, REG_VS_PIX_STRT_2_MSB, vs2_pix_s);
+ reg_write16(encoder, REG_VS_LINE_END_2_MSB, vs2_line_e);
+ reg_write16(encoder, REG_VS_PIX_END_2_MSB, vs2_pix_e);
+ reg_write16(encoder, REG_HS_PIX_START_MSB, hs_pix_s);
+ reg_write16(encoder, REG_HS_PIX_STOP_MSB, hs_pix_e);
+ reg_write16(encoder, REG_VWIN_START_1_MSB, vwin1_line_s);
+ reg_write16(encoder, REG_VWIN_END_1_MSB, vwin1_line_e);
+ reg_write16(encoder, REG_VWIN_START_2_MSB, vwin2_line_s);
+ reg_write16(encoder, REG_VWIN_END_2_MSB, vwin2_line_e);
+ reg_write16(encoder, REG_DE_START_MSB, de_pix_s);
+ reg_write16(encoder, REG_DE_STOP_MSB, de_pix_e);
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
reg_write(encoder, REG_ENABLE_SPACE, 0x01);
}
- reg_write16(encoder, REG_REFPIX_MSB, ref_pix);
- reg_write16(encoder, REG_REFLINE_MSB, ref_line);
-
- reg = TBG_CNTRL_1_VHX_EXT_DE |
- TBG_CNTRL_1_VHX_EXT_HS |
- TBG_CNTRL_1_VHX_EXT_VS |
- TBG_CNTRL_1_DWIN_DIS | /* HDCP off */
- TBG_CNTRL_1_VH_TGL_2;
- if (mode->flags & (DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC))
- reg |= TBG_CNTRL_1_VH_TGL_0;
- reg_set(encoder, REG_TBG_CNTRL_1, reg);
-
/* must be last register set: */
reg_clear(encoder, REG_TBG_CNTRL_0, TBG_CNTRL_0_SYNC_ONCE);
+
+ /* Only setup the info frames if the sink is HDMI */
+ if (priv->is_hdmi_sink) {
+ /* We need to turn HDMI HDCP stuff on to get audio through */
+ reg_clear(encoder, REG_TBG_CNTRL_1, TBG_CNTRL_1_DWIN_DIS);
+ reg_write(encoder, REG_ENC_CNTRL, ENC_CNTRL_CTL_CODE(1));
+ reg_set(encoder, REG_TX33, TX33_HDMI);
+
+ tda998x_write_avi(encoder, adjusted_mode);
+
+ if (priv->params.audio_cfg)
+ tda998x_configure_audio(encoder, adjusted_mode,
+ &priv->params);
+ }
}
static enum drm_connector_status
@@ -673,6 +990,7 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
static uint8_t *
do_get_edid(struct drm_encoder *encoder)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
int j = 0, valid_extensions = 0;
uint8_t *block, *new;
bool print_bad_edid = drm_debug & DRM_UT_KMS;
@@ -680,6 +998,9 @@ do_get_edid(struct drm_encoder *encoder)
if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
return NULL;
+ if (priv->rev == TDA19988)
+ reg_clear(encoder, REG_TX4, TX4_PD_RAM);
+
/* base block fetch */
if (read_edid_block(encoder, block, 0))
goto fail;
@@ -689,7 +1010,7 @@ do_get_edid(struct drm_encoder *encoder)
/* if there's no extensions, we're done */
if (block[0x7e] == 0)
- return block;
+ goto done;
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
@@ -716,9 +1037,15 @@ do_get_edid(struct drm_encoder *encoder)
block = new;
}
+done:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
+
return block;
fail:
+ if (priv->rev == TDA19988)
+ reg_set(encoder, REG_TX4, TX4_PD_RAM);
dev_warn(encoder->dev->dev, "failed to read EDID\n");
kfree(block);
return NULL;
@@ -728,12 +1055,14 @@ static int
tda998x_encoder_get_modes(struct drm_encoder *encoder,
struct drm_connector *connector)
{
+ struct tda998x_priv *priv = to_tda998x_priv(encoder);
struct edid *edid = (struct edid *)do_get_edid(encoder);
int n = 0;
if (edid) {
drm_mode_connector_update_edid_property(connector, edid);
n = drm_add_edid_modes(connector, edid);
+ priv->is_hdmi_sink = drm_detect_hdmi_monitor(edid);
kfree(edid);
}
@@ -807,6 +1136,10 @@ tda998x_encoder_init(struct i2c_client *client,
if (!priv)
return -ENOMEM;
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
+
priv->current_page = 0;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
priv->dpms = DRM_MODE_DPMS_OFF;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index ada49eda489..ab1892eb107 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -113,7 +113,6 @@ static const struct file_operations i810_buffer_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = i810_mmap_buffers,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -1241,7 +1240,7 @@ int i810_driver_dma_quiescent(struct drm_device *dev)
return 0;
}
-struct drm_ioctl_desc i810_ioctls[] = {
+const struct drm_ioctl_desc i810_ioctls[] = {
DRM_IOCTL_DEF_DRV(I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index 2e91fc3580b..d8180d22ced 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -49,7 +49,6 @@ static const struct file_operations i810_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -58,7 +57,7 @@ static const struct file_operations i810_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
DRIVER_HAVE_DMA,
.dev_priv_size = sizeof(drm_i810_buf_priv_t),
.load = i810_driver_load,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 6e0acad9e0f..d4d16eddd65 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -125,7 +125,7 @@ extern void i810_driver_preclose(struct drm_device *dev,
extern int i810_driver_device_is_agp(struct drm_device *dev);
extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
-extern struct drm_ioctl_desc i810_ioctls[];
+extern const struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 40034ecefd3..b8449a84a0d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -5,6 +5,7 @@
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_debugfs.o \
+ i915_gpu_error.o \
i915_suspend.o \
i915_gem.o \
i915_gem_context.o \
@@ -37,6 +38,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
intel_sprite.o \
intel_opregion.o \
intel_sideband.o \
+ intel_uncore.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
dvo_ivch.o \
diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
index 757e0fa1104..af42e94f684 100644
--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
@@ -307,7 +307,7 @@ static void ch7xxx_mode_set(struct intel_dvo_device *dvo,
idf |= CH7xxx_IDF_HSP;
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- idf |= CH7xxx_IDF_HSP;
+ idf |= CH7xxx_IDF_VSP;
ch7xxx_writeb(dvo, CH7xxx_IDF, idf);
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 47d6c748057..55ab9246e1b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,7 +30,8 @@
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/export.h>
-#include <generated/utsrelease.h>
+#include <linux/list_sort.h>
+#include <asm/msr-index.h>
#include <drm/drmP.h>
#include "intel_drv.h"
#include "intel_ringbuffer.h"
@@ -90,41 +91,45 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
}
}
-static const char *cache_level_str(int type)
+static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
{
- switch (type) {
- case I915_CACHE_NONE: return " uncached";
- case I915_CACHE_LLC: return " snooped (LLC)";
- case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
- default: return "";
- }
+ return obj->has_global_gtt_mapping ? "g" : " ";
}
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%pK: %s%s %8zdKiB %02x %02x %d %d %d%s%s%s",
+ struct i915_vma *vma;
+ seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
+ get_global_flag(obj),
obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_read_seqno,
obj->last_write_seqno,
obj->last_fenced_seqno,
- cache_level_str(obj->cache_level),
+ i915_cache_level_str(obj->cache_level),
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
seq_printf(m, " (name: %d)", obj->base.name);
if (obj->pin_count)
seq_printf(m, " (pinned x %d)", obj->pin_count);
+ if (obj->pin_display)
+ seq_printf(m, " (display)");
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
- if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt offset: %08x, size: %08x)",
- obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!i915_is_ggtt(vma->vm))
+ seq_puts(m, " (pp");
+ else
+ seq_puts(m, " (g");
+ seq_printf(m, "gtt offset: %08lx, size: %08lx)",
+ vma->node.start, vma->node.size);
+ }
if (obj->stolen)
seq_printf(m, " (stolen: %08lx)", obj->stolen->start);
if (obj->pin_mappable || obj->fault_mappable) {
@@ -146,8 +151,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_vma *vma;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -155,14 +161,15 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
if (ret)
return ret;
+ /* FIXME: the user of this interface might want more than just GGTT */
switch (list) {
case ACTIVE_LIST:
- seq_printf(m, "Active:\n");
- head = &dev_priv->mm.active_list;
+ seq_puts(m, "Active:\n");
+ head = &vm->active_list;
break;
case INACTIVE_LIST:
- seq_printf(m, "Inactive:\n");
- head = &dev_priv->mm.inactive_list;
+ seq_puts(m, "Inactive:\n");
+ head = &vm->inactive_list;
break;
default:
mutex_unlock(&dev->struct_mutex);
@@ -170,14 +177,75 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj, head, mm_list) {
+ list_for_each_entry(vma, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj);
+ describe_obj(m, vma->obj);
seq_printf(m, "\n");
+ total_obj_size += vma->obj->base.size;
+ total_gtt_size += vma->node.size;
+ count++;
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+ count, total_obj_size, total_gtt_size);
+ return 0;
+}
+
+static int obj_rank_by_stolen(void *priv,
+ struct list_head *A, struct list_head *B)
+{
+ struct drm_i915_gem_object *a =
+ container_of(A, struct drm_i915_gem_object, obj_exec_link);
+ struct drm_i915_gem_object *b =
+ container_of(B, struct drm_i915_gem_object, obj_exec_link);
+
+ return a->stolen->start - b->stolen->start;
+}
+
+static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+ size_t total_obj_size, total_gtt_size;
+ LIST_HEAD(stolen);
+ int count, ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ total_obj_size = total_gtt_size = count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
+ total_obj_size += obj->base.size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
+ count++;
+ }
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ if (obj->stolen == NULL)
+ continue;
+
+ list_add(&obj->obj_exec_link, &stolen);
+
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
count++;
}
+ list_sort(NULL, &stolen, obj_rank_by_stolen);
+ seq_puts(m, "Stolen:\n");
+ while (!list_empty(&stolen)) {
+ obj = list_first_entry(&stolen, typeof(*obj), obj_exec_link);
+ seq_puts(m, " ");
+ describe_obj(m, obj);
+ seq_putc(m, '\n');
+ list_del_init(&obj->obj_exec_link);
+ }
mutex_unlock(&dev->struct_mutex);
seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
@@ -187,10 +255,10 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
#define count_objects(list, member) do { \
list_for_each_entry(obj, list, member) { \
- size += obj->gtt_space->size; \
+ size += i915_gem_obj_ggtt_size(obj); \
++count; \
if (obj->map_and_fenceable) { \
- mappable_size += obj->gtt_space->size; \
+ mappable_size += i915_gem_obj_ggtt_size(obj); \
++mappable_count; \
} \
} \
@@ -209,7 +277,7 @@ static int per_file_stats(int id, void *ptr, void *data)
stats->count++;
stats->total += obj->base.size;
- if (obj->gtt_space) {
+ if (i915_gem_obj_ggtt_bound(obj)) {
if (!list_empty(&obj->ring_list))
stats->active += obj->base.size;
else
@@ -222,6 +290,17 @@ static int per_file_stats(int id, void *ptr, void *data)
return 0;
}
+#define count_vmas(list, member) do { \
+ list_for_each_entry(vma, list, member) { \
+ size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++count; \
+ if (vma->obj->map_and_fenceable) { \
+ mappable_size += i915_gem_obj_ggtt_size(vma->obj); \
+ ++mappable_count; \
+ } \
+ } \
+} while (0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -230,7 +309,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
u32 count, mappable_count, purgeable_count;
size_t size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm = &dev_priv->gtt.base;
struct drm_file *file;
+ struct i915_vma *vma;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -247,12 +328,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.active_list, mm_list);
+ count_vmas(&vm->active_list, mm_list);
seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0;
- count_objects(&dev_priv->mm.inactive_list, mm_list);
+ count_vmas(&vm->inactive_list, mm_list);
seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
count, mappable_count, size, mappable_size);
@@ -267,11 +348,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
size = count = mappable_size = mappable_count = 0;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->fault_mappable) {
- size += obj->gtt_space->size;
+ size += i915_gem_obj_ggtt_size(obj);
++count;
}
if (obj->pin_mappable) {
- mappable_size += obj->gtt_space->size;
+ mappable_size += i915_gem_obj_ggtt_size(obj);
++mappable_count;
}
if (obj->madv == I915_MADV_DONTNEED) {
@@ -287,10 +368,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%zu [%lu] gtt total\n",
- dev_priv->gtt.total,
- dev_priv->gtt.mappable_end - dev_priv->gtt.start);
+ dev_priv->gtt.base.total,
+ dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
list_for_each_entry_reverse(file, &dev->filelist, lhead) {
struct file_stats stats;
@@ -310,7 +391,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
return 0;
}
-static int i915_gem_gtt_info(struct seq_file *m, void* data)
+static int i915_gem_gtt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -329,11 +410,11 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data)
if (list == PINNED_LIST && obj->pin_count == 0)
continue;
- seq_printf(m, " ");
+ seq_puts(m, " ");
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
total_obj_size += obj->base.size;
- total_gtt_size += obj->gtt_space->size;
+ total_gtt_size += i915_gem_obj_ggtt_size(obj);
count++;
}
@@ -371,20 +452,22 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
pipe, plane);
}
if (work->enable_stall_check)
- seq_printf(m, "Stall check enabled, ");
+ seq_puts(m, "Stall check enabled, ");
else
- seq_printf(m, "Stall check waiting for page flip ioctl, ");
+ seq_puts(m, "Stall check waiting for page flip ioctl, ");
seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
if (work->old_fb_obj) {
struct drm_i915_gem_object *obj = work->old_fb_obj;
if (obj)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj = work->pending_flip_obj;
if (obj)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
+ seq_printf(m, "New framebuffer gtt_offset 0x%08lx\n",
+ i915_gem_obj_ggtt_offset(obj));
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -424,7 +507,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
mutex_unlock(&dev->struct_mutex);
if (count == 0)
- seq_printf(m, "No requests\n");
+ seq_puts(m, "No requests\n");
return 0;
}
@@ -574,10 +657,10 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Fence %d, pin count = %d, object = ",
i, dev_priv->fence_regs[i].pin_count);
if (obj == NULL)
- seq_printf(m, "unused");
+ seq_puts(m, "unused");
else
describe_obj(m, obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->struct_mutex);
@@ -606,361 +689,6 @@ static int i915_hws_info(struct seq_file *m, void *data)
return 0;
}
-static const char *ring_str(int ring)
-{
- switch (ring) {
- case RCS: return "render";
- case VCS: return "bsd";
- case BCS: return "blt";
- case VECS: return "vebox";
- default: return "";
- }
-}
-
-static const char *pin_flag(int pinned)
-{
- if (pinned > 0)
- return " P";
- else if (pinned < 0)
- return " p";
- else
- return "";
-}
-
-static const char *tiling_flag(int tiling)
-{
- switch (tiling) {
- default:
- case I915_TILING_NONE: return "";
- case I915_TILING_X: return " X";
- case I915_TILING_Y: return " Y";
- }
-}
-
-static const char *dirty_flag(int dirty)
-{
- return dirty ? " dirty" : "";
-}
-
-static const char *purgeable_flag(int purgeable)
-{
- return purgeable ? " purgeable" : "";
-}
-
-static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
-{
-
- if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
- e->err = -ENOSPC;
- return false;
- }
-
- if (e->bytes == e->size - 1 || e->err)
- return false;
-
- return true;
-}
-
-static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- if (e->pos + len <= e->start) {
- e->pos += len;
- return false;
- }
-
- /* First vsnprintf needs to fit in its entirety for memmove */
- if (len >= e->size) {
- e->err = -EIO;
- return false;
- }
-
- return true;
-}
-
-static void __i915_error_advance(struct drm_i915_error_state_buf *e,
- unsigned len)
-{
- /* If this is first printf in this window, adjust it so that
- * start position matches start of the buffer
- */
-
- if (e->pos < e->start) {
- const size_t off = e->start - e->pos;
-
- /* Should not happen but be paranoid */
- if (off > len || e->bytes) {
- e->err = -EIO;
- return;
- }
-
- memmove(e->buf, e->buf + off, len - off);
- e->bytes = len - off;
- e->pos = e->start;
- return;
- }
-
- e->bytes += len;
- e->pos += len;
-}
-
-static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
- const char *f, va_list args)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- len = vsnprintf(NULL, 0, f, args);
- if (!__i915_error_seek(e, len))
- return;
- }
-
- len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
-
- __i915_error_advance(e, len);
-}
-
-static void i915_error_puts(struct drm_i915_error_state_buf *e,
- const char *str)
-{
- unsigned len;
-
- if (!__i915_error_ok(e))
- return;
-
- len = strlen(str);
-
- /* Seek the first printf which is hits start position */
- if (e->pos < e->start) {
- if (!__i915_error_seek(e, len))
- return;
- }
-
- if (len >= e->size - e->bytes)
- len = e->size - e->bytes - 1;
- memcpy(e->buf + e->bytes, str, len);
-
- __i915_error_advance(e, len);
-}
-
-void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
-{
- va_list args;
-
- va_start(args, f);
- i915_error_vprintf(e, f, args);
- va_end(args);
-}
-
-#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
-#define err_puts(e, s) i915_error_puts(e, s)
-
-static void print_error_buffers(struct drm_i915_error_state_buf *m,
- const char *name,
- struct drm_i915_error_buffer *err,
- int count)
-{
- err_printf(m, "%s [%d]:\n", name, count);
-
- while (count--) {
- err_printf(m, " %08x %8u %02x %02x %x %x",
- err->gtt_offset,
- err->size,
- err->read_domains,
- err->write_domain,
- err->rseqno, err->wseqno);
- err_puts(m, pin_flag(err->pinned));
- err_puts(m, tiling_flag(err->tiling));
- err_puts(m, dirty_flag(err->dirty));
- err_puts(m, purgeable_flag(err->purgeable));
- err_puts(m, err->ring != -1 ? " " : "");
- err_puts(m, ring_str(err->ring));
- err_puts(m, cache_level_str(err->cache_level));
-
- if (err->name)
- err_printf(m, " (name: %d)", err->name);
- if (err->fence_reg != I915_FENCE_REG_NONE)
- err_printf(m, " (fence: %d)", err->fence_reg);
-
- err_puts(m, "\n");
- err++;
- }
-}
-
-static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
- struct drm_device *dev,
- struct drm_i915_error_state *error,
- unsigned ring)
-{
- BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
- err_printf(m, "%s command stream:\n", ring_str(ring));
- err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
- err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
- err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
- err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
- err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
- err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
- err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
- if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
-
- if (INTEL_INFO(dev)->gen >= 4)
- err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
- err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
- err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
- err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
- err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][0],
- error->semaphore_seqno[ring][0]);
- err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
- error->semaphore_mboxes[ring][1],
- error->semaphore_seqno[ring][1]);
- }
- err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
- err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
- err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
- err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
-}
-
-struct i915_error_state_file_priv {
- struct drm_device *dev;
- struct drm_i915_error_state *error;
-};
-
-
-static int i915_error_state(struct i915_error_state_file_priv *error_priv,
- struct drm_i915_error_state_buf *m)
-
-{
- struct drm_device *dev = error_priv->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error = error_priv->error;
- struct intel_ring_buffer *ring;
- int i, j, page, offset, elt;
-
- if (!error) {
- err_printf(m, "no error state collected\n");
- return 0;
- }
-
- err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
- error->time.tv_usec);
- err_printf(m, "Kernel: " UTS_RELEASE "\n");
- err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
- err_printf(m, "EIR: 0x%08x\n", error->eir);
- err_printf(m, "IER: 0x%08x\n", error->ier);
- err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
- err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
- err_printf(m, "CCID: 0x%08x\n", error->ccid);
-
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
-
- for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
- err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
- error->extra_instdone[i]);
-
- if (INTEL_INFO(dev)->gen >= 6) {
- err_printf(m, "ERROR: 0x%08x\n", error->error);
- err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
-
- for_each_ring(ring, dev_priv, i)
- i915_ring_error_state(m, dev, error, i);
-
- if (error->active_bo)
- print_error_buffers(m, "Active",
- error->active_bo,
- error->active_bo_count);
-
- if (error->pinned_bo)
- print_error_buffers(m, "Pinned",
- error->pinned_bo,
- error->pinned_bo_count);
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- struct drm_i915_error_object *obj;
-
- if ((obj = error->ring[i].batchbuffer)) {
- err_printf(m, "%s --- gtt_offset = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n", offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- if (error->ring[i].num_requests) {
- err_printf(m, "%s --- %d requests\n",
- dev_priv->ring[i].name,
- error->ring[i].num_requests);
- for (j = 0; j < error->ring[i].num_requests; j++) {
- err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
- error->ring[i].requests[j].seqno,
- error->ring[i].requests[j].jiffies,
- error->ring[i].requests[j].tail);
- }
- }
-
- if ((obj = error->ring[i].ringbuffer)) {
- err_printf(m, "%s --- ringbuffer = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (page = 0; page < obj->page_count; page++) {
- for (elt = 0; elt < PAGE_SIZE/4; elt++) {
- err_printf(m, "%08x : %08x\n",
- offset,
- obj->pages[page][elt]);
- offset += 4;
- }
- }
- }
-
- obj = error->ring[i].ctx;
- if (obj) {
- err_printf(m, "%s --- HW Context = 0x%08x\n",
- dev_priv->ring[i].name,
- obj->gtt_offset);
- offset = 0;
- for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
- err_printf(m, "[%04x] %08x %08x %08x %08x\n",
- offset,
- obj->pages[0][elt],
- obj->pages[0][elt+1],
- obj->pages[0][elt+2],
- obj->pages[0][elt+3]);
- offset += 16;
- }
- }
- }
-
- if (error->overlay)
- intel_overlay_print_error_state(m, error->overlay);
-
- if (error->display)
- intel_display_print_error_state(m, dev, error->display);
-
- return 0;
-}
-
static ssize_t
i915_error_state_write(struct file *filp,
const char __user *ubuf,
@@ -986,9 +714,7 @@ i915_error_state_write(struct file *filp,
static int i915_error_state_open(struct inode *inode, struct file *file)
{
struct drm_device *dev = inode->i_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
struct i915_error_state_file_priv *error_priv;
- unsigned long flags;
error_priv = kzalloc(sizeof(*error_priv), GFP_KERNEL);
if (!error_priv)
@@ -996,11 +722,7 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error_priv->error = dev_priv->gpu_error.first_error;
- if (error_priv->error)
- kref_get(&error_priv->error->ref);
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ i915_error_state_get(dev, error_priv);
file->private_data = error_priv;
@@ -1011,8 +733,7 @@ static int i915_error_state_release(struct inode *inode, struct file *file)
{
struct i915_error_state_file_priv *error_priv = file->private_data;
- if (error_priv->error)
- kref_put(&error_priv->error->ref, i915_error_state_free);
+ i915_error_state_put(error_priv);
kfree(error_priv);
return 0;
@@ -1025,40 +746,15 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
struct drm_i915_error_state_buf error_str;
loff_t tmp_pos = 0;
ssize_t ret_count = 0;
- int ret = 0;
-
- memset(&error_str, 0, sizeof(error_str));
-
- /* We need to have enough room to store any i915_error_state printf
- * so that we can move it to start position.
- */
- error_str.size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size,
- GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
-
- if (error_str.buf == NULL) {
- error_str.size = PAGE_SIZE;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL) {
- error_str.size = 128;
- error_str.buf = kmalloc(error_str.size, GFP_TEMPORARY);
- }
-
- if (error_str.buf == NULL)
- return -ENOMEM;
-
- error_str.start = *pos;
+ int ret;
- ret = i915_error_state(error_priv, &error_str);
+ ret = i915_error_state_buf_init(&error_str, count, *pos);
if (ret)
- goto out;
+ return ret;
- if (error_str.bytes == 0 && error_str.err) {
- ret = error_str.err;
+ ret = i915_error_state_to_str(&error_str, error_priv);
+ if (ret)
goto out;
- }
ret_count = simple_read_from_buffer(userbuf, count, &tmp_pos,
error_str.buf,
@@ -1069,7 +765,7 @@ static ssize_t i915_error_state_read(struct file *file, char __user *userbuf,
else
*pos = error_str.start + ret_count;
out:
- kfree(error_str.buf);
+ i915_error_state_buf_release(&error_str);
return ret ?: ret_count;
}
@@ -1246,7 +942,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
(freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
- seq_printf(m, "no P-state info available\n");
+ seq_puts(m, "no P-state info available\n");
}
return 0;
@@ -1341,28 +1037,28 @@ static int ironlake_drpc_info(struct seq_file *m)
seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
seq_printf(m, "Render standby enabled: %s\n",
(rstdbyctl & RCX_SW_EXIT) ? "no" : "yes");
- seq_printf(m, "Current RS state: ");
+ seq_puts(m, "Current RS state: ");
switch (rstdbyctl & RSX_STATUS_MASK) {
case RSX_STATUS_ON:
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case RSX_STATUS_RC1:
- seq_printf(m, "RC1\n");
+ seq_puts(m, "RC1\n");
break;
case RSX_STATUS_RC1E:
- seq_printf(m, "RC1E\n");
+ seq_puts(m, "RC1E\n");
break;
case RSX_STATUS_RS1:
- seq_printf(m, "RS1\n");
+ seq_puts(m, "RS1\n");
break;
case RSX_STATUS_RS2:
- seq_printf(m, "RS2 (RC6)\n");
+ seq_puts(m, "RS2 (RC6)\n");
break;
case RSX_STATUS_RS3:
- seq_printf(m, "RC3 (RC6+)\n");
+ seq_puts(m, "RC3 (RC6+)\n");
break;
default:
- seq_printf(m, "unknown\n");
+ seq_puts(m, "unknown\n");
break;
}
@@ -1377,20 +1073,19 @@ static int gen6_drpc_info(struct seq_file *m)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
unsigned forcewake_count;
- int count=0, ret;
-
+ int count = 0, ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
if (forcewake_count) {
- seq_printf(m, "RC information inaccurate because somebody "
- "holds a forcewake reference \n");
+ seq_puts(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
} else {
/* NB: we cannot use forcewake, else we read the wrong values */
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1399,7 +1094,7 @@ static int gen6_drpc_info(struct seq_file *m)
}
gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
rcctl1 = I915_READ(GEN6_RC_CONTROL);
@@ -1423,25 +1118,25 @@ static int gen6_drpc_info(struct seq_file *m)
yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
seq_printf(m, "Deepest RC6 Enabled: %s\n",
yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_printf(m, "Current RC state: ");
+ seq_puts(m, "Current RC state: ");
switch (gt_core_status & GEN6_RCn_MASK) {
case GEN6_RC0:
if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_printf(m, "Core Power Down\n");
+ seq_puts(m, "Core Power Down\n");
else
- seq_printf(m, "on\n");
+ seq_puts(m, "on\n");
break;
case GEN6_RC3:
- seq_printf(m, "RC3\n");
+ seq_puts(m, "RC3\n");
break;
case GEN6_RC6:
- seq_printf(m, "RC6\n");
+ seq_puts(m, "RC6\n");
break;
case GEN6_RC7:
- seq_printf(m, "RC7\n");
+ seq_puts(m, "RC7\n");
break;
default:
- seq_printf(m, "Unknown\n");
+ seq_puts(m, "Unknown\n");
break;
}
@@ -1485,43 +1180,52 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
if (!I915_HAS_FBC(dev)) {
- seq_printf(m, "FBC unsupported on this chipset\n");
+ seq_puts(m, "FBC unsupported on this chipset\n");
return 0;
}
if (intel_fbc_enabled(dev)) {
- seq_printf(m, "FBC enabled\n");
+ seq_puts(m, "FBC enabled\n");
} else {
- seq_printf(m, "FBC disabled: ");
- switch (dev_priv->no_fbc_reason) {
+ seq_puts(m, "FBC disabled: ");
+ switch (dev_priv->fbc.no_fbc_reason) {
+ case FBC_OK:
+ seq_puts(m, "FBC actived, but currently disabled in hardware");
+ break;
+ case FBC_UNSUPPORTED:
+ seq_puts(m, "unsupported by this chipset");
+ break;
case FBC_NO_OUTPUT:
- seq_printf(m, "no outputs");
+ seq_puts(m, "no outputs");
break;
case FBC_STOLEN_TOO_SMALL:
- seq_printf(m, "not enough stolen memory");
+ seq_puts(m, "not enough stolen memory");
break;
case FBC_UNSUPPORTED_MODE:
- seq_printf(m, "mode not supported");
+ seq_puts(m, "mode not supported");
break;
case FBC_MODE_TOO_LARGE:
- seq_printf(m, "mode too large");
+ seq_puts(m, "mode too large");
break;
case FBC_BAD_PLANE:
- seq_printf(m, "FBC unsupported on plane");
+ seq_puts(m, "FBC unsupported on plane");
break;
case FBC_NOT_TILED:
- seq_printf(m, "scanout buffer not tiled");
+ seq_puts(m, "scanout buffer not tiled");
break;
case FBC_MULTIPLE_PIPES:
- seq_printf(m, "multiple pipes are enabled");
+ seq_puts(m, "multiple pipes are enabled");
break;
case FBC_MODULE_PARAM:
- seq_printf(m, "disabled per module param (default off)");
+ seq_puts(m, "disabled per module param (default off)");
+ break;
+ case FBC_CHIP_DEFAULT:
+ seq_puts(m, "disabled per chip default");
break;
default:
- seq_printf(m, "unknown reason");
+ seq_puts(m, "unknown reason");
}
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
return 0;
}
@@ -1604,7 +1308,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
int gpu_freq, ia_freq;
if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
- seq_printf(m, "unsupported on this chipset\n");
+ seq_puts(m, "unsupported on this chipset\n");
return 0;
}
@@ -1612,7 +1316,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
if (ret)
return ret;
- seq_printf(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
+ seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n");
for (gpu_freq = dev_priv->rps.min_delay;
gpu_freq <= dev_priv->rps.max_delay;
@@ -1701,7 +1405,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
mutex_unlock(&dev->mode_config.mutex);
mutex_lock(&dev->mode_config.fb_lock);
@@ -1716,7 +1420,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.bits_per_pixel,
atomic_read(&fb->base.refcount.refcount));
describe_obj(m, fb->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
mutex_unlock(&dev->mode_config.fb_lock);
@@ -1736,22 +1440,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
return ret;
if (dev_priv->ips.pwrctx) {
- seq_printf(m, "power context ");
+ seq_puts(m, "power context ");
describe_obj(m, dev_priv->ips.pwrctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
if (dev_priv->ips.renderctx) {
- seq_printf(m, "render context ");
+ seq_puts(m, "render context ");
describe_obj(m, dev_priv->ips.renderctx);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
for_each_ring(ring, dev_priv, i) {
if (ring->default_context) {
seq_printf(m, "HW default context %s ring ", ring->name);
describe_obj(m, ring->default_context->obj);
- seq_printf(m, "\n");
+ seq_putc(m, '\n');
}
}
@@ -1767,9 +1471,9 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned forcewake_count;
- spin_lock_irq(&dev_priv->gt_lock);
- forcewake_count = dev_priv->forcewake_count;
- spin_unlock_irq(&dev_priv->gt_lock);
+ spin_lock_irq(&dev_priv->uncore.lock);
+ forcewake_count = dev_priv->uncore.forcewake_count;
+ spin_unlock_irq(&dev_priv->uncore.lock);
seq_printf(m, "forcewake count = %u\n", forcewake_count);
@@ -1778,7 +1482,7 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
static const char *swizzle_string(unsigned swizzle)
{
- switch(swizzle) {
+ switch (swizzle) {
case I915_BIT_6_SWIZZLE_NONE:
return "none";
case I915_BIT_6_SWIZZLE_9:
@@ -1868,7 +1572,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
- seq_printf(m, "aliasing PPGTT:\n");
+ seq_puts(m, "aliasing PPGTT:\n");
seq_printf(m, "pd gtt offset: 0x%08x\n", ppgtt->pd_offset);
}
seq_printf(m, "ECOCHK: 0x%08x\n", I915_READ(GAM_ECOCHK));
@@ -1886,7 +1590,7 @@ static int i915_dpio_info(struct seq_file *m, void *data)
if (!IS_VALLEYVIEW(dev)) {
- seq_printf(m, "unsupported\n");
+ seq_puts(m, "unsupported\n");
return 0;
}
@@ -1924,6 +1628,194 @@ static int i915_dpio_info(struct seq_file *m, void *data)
return 0;
}
+static int i915_llc(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* Size calculation for LLC is a bit of a pain. Ignore for now. */
+ seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
+ seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
+
+ return 0;
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 psrstat, psrperf;
+
+ if (!IS_HASWELL(dev)) {
+ seq_puts(m, "PSR not supported on this platform\n");
+ } else if (IS_HASWELL(dev) && I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE) {
+ seq_puts(m, "PSR enabled\n");
+ } else {
+ seq_puts(m, "PSR disabled: ");
+ switch (dev_priv->no_psr_reason) {
+ case PSR_NO_SOURCE:
+ seq_puts(m, "not supported on this platform");
+ break;
+ case PSR_NO_SINK:
+ seq_puts(m, "not supported by panel");
+ break;
+ case PSR_MODULE_PARAM:
+ seq_puts(m, "disabled by flag");
+ break;
+ case PSR_CRTC_NOT_ACTIVE:
+ seq_puts(m, "crtc not active");
+ break;
+ case PSR_PWR_WELL_ENABLED:
+ seq_puts(m, "power well enabled");
+ break;
+ case PSR_NOT_TILED:
+ seq_puts(m, "not tiled");
+ break;
+ case PSR_SPRITE_ENABLED:
+ seq_puts(m, "sprite enabled");
+ break;
+ case PSR_S3D_ENABLED:
+ seq_puts(m, "stereo 3d enabled");
+ break;
+ case PSR_INTERLACED_ENABLED:
+ seq_puts(m, "interlaced enabled");
+ break;
+ case PSR_HSW_NOT_DDIA:
+ seq_puts(m, "HSW ties PSR to DDI A (eDP)");
+ break;
+ default:
+ seq_puts(m, "unknown reason");
+ }
+ seq_puts(m, "\n");
+ return 0;
+ }
+
+ psrstat = I915_READ(EDP_PSR_STATUS_CTL);
+
+ seq_puts(m, "PSR Current State: ");
+ switch (psrstat & EDP_PSR_STATUS_STATE_MASK) {
+ case EDP_PSR_STATUS_STATE_IDLE:
+ seq_puts(m, "Reset state\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDONACK:
+ seq_puts(m, "Wait for TG/Stream to send on frame of data after SRD conditions are met\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDENT:
+ seq_puts(m, "SRD entry\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFOFF:
+ seq_puts(m, "Wait for buffer turn off\n");
+ break;
+ case EDP_PSR_STATUS_STATE_BUFON:
+ seq_puts(m, "Wait for buffer turn on\n");
+ break;
+ case EDP_PSR_STATUS_STATE_AUXACK:
+ seq_puts(m, "Wait for AUX to acknowledge on SRD exit\n");
+ break;
+ case EDP_PSR_STATUS_STATE_SRDOFFACK:
+ seq_puts(m, "Wait for TG/Stream to acknowledge the SRD VDM exit\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_puts(m, "Link Status: ");
+ switch (psrstat & EDP_PSR_STATUS_LINK_MASK) {
+ case EDP_PSR_STATUS_LINK_FULL_OFF:
+ seq_puts(m, "Link is fully off\n");
+ break;
+ case EDP_PSR_STATUS_LINK_FULL_ON:
+ seq_puts(m, "Link is fully on\n");
+ break;
+ case EDP_PSR_STATUS_LINK_STANDBY:
+ seq_puts(m, "Link is in standby\n");
+ break;
+ default:
+ seq_puts(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "PSR Entry Count: %u\n",
+ psrstat >> EDP_PSR_STATUS_COUNT_SHIFT &
+ EDP_PSR_STATUS_COUNT_MASK);
+
+ seq_printf(m, "Max Sleep Timer Counter: %u\n",
+ psrstat >> EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT &
+ EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK);
+
+ seq_printf(m, "Had AUX error: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_ERROR));
+
+ seq_printf(m, "Sending AUX: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_AUX_SENDING));
+
+ seq_printf(m, "Sending Idle: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_IDLE));
+
+ seq_printf(m, "Sending TP2 TP3: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP2_TP3));
+
+ seq_printf(m, "Sending TP1: %s\n",
+ yesno(psrstat & EDP_PSR_STATUS_SENDING_TP1));
+
+ seq_printf(m, "Idle Count: %u\n",
+ psrstat & EDP_PSR_STATUS_IDLE_MASK);
+
+ psrperf = (I915_READ(EDP_PSR_PERF_CNT)) & EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance Counter: %u\n", psrperf);
+
+ return 0;
+}
+
+static int i915_energy_uJ(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u64 power;
+ u32 units;
+
+ if (INTEL_INFO(dev)->gen < 6)
+ return -ENODEV;
+
+ rdmsrl(MSR_RAPL_POWER_UNIT, power);
+ power = (power & 0x1f00) >> 8;
+ units = 1000000 / (1 << power); /* convert to uJ */
+ power = I915_READ(MCH_SECP_NRG_STTS);
+ power *= units;
+
+ seq_printf(m, "%llu", (long long unsigned)power);
+
+ return 0;
+}
+
+static int i915_pc8_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev)) {
+ seq_puts(m, "not supported\n");
+ return 0;
+ }
+
+ mutex_lock(&dev_priv->pc8.lock);
+ seq_printf(m, "Requirements met: %s\n",
+ yesno(dev_priv->pc8.requirements_met));
+ seq_printf(m, "GPU idle: %s\n", yesno(dev_priv->pc8.gpu_idle));
+ seq_printf(m, "Disable count: %d\n", dev_priv->pc8.disable_count);
+ seq_printf(m, "IRQs disabled: %s\n",
+ yesno(dev_priv->pc8.irqs_disabled));
+ seq_printf(m, "Enabled: %s\n", yesno(dev_priv->pc8.enabled));
+ mutex_unlock(&dev_priv->pc8.lock);
+
+ return 0;
+}
+
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -2006,6 +1898,8 @@ i915_drop_caches_set(void *data, u64 val)
struct drm_device *dev = data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
+ struct i915_address_space *vm;
+ struct i915_vma *vma, *x;
int ret;
DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -2026,12 +1920,17 @@ i915_drop_caches_set(void *data, u64 val)
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
- list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
- if (obj->pin_count == 0) {
- ret = i915_gem_object_unbind(obj);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(vma, x, &vm->inactive_list,
+ mm_list) {
+ if (vma->obj->pin_count)
+ continue;
+
+ ret = i915_vma_unbind(vma);
if (ret)
goto unlock;
}
+ }
}
if (val & DROP_UNBOUND) {
@@ -2326,6 +2225,7 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST},
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+ {"i915_gem_stolen", i915_gem_stolen_list_info },
{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -2353,64 +2253,42 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_ppgtt_info", i915_ppgtt_info, 0},
{"i915_dpio", i915_dpio_info, 0},
+ {"i915_llc", i915_llc, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_energy_uJ", i915_energy_uJ, 0},
+ {"i915_pc8_status", i915_pc8_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
+static struct i915_debugfs_files {
+ const char *name;
+ const struct file_operations *fops;
+} i915_debugfs_files[] = {
+ {"i915_wedged", &i915_wedged_fops},
+ {"i915_max_freq", &i915_max_freq_fops},
+ {"i915_min_freq", &i915_min_freq_fops},
+ {"i915_cache_sharing", &i915_cache_sharing_fops},
+ {"i915_ring_stop", &i915_ring_stop_fops},
+ {"i915_gem_drop_caches", &i915_drop_caches_fops},
+ {"i915_error_state", &i915_error_state_fops},
+ {"i915_next_seqno", &i915_next_seqno_fops},
+};
+
int i915_debugfs_init(struct drm_minor *minor)
{
- int ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_wedged",
- &i915_wedged_fops);
- if (ret)
- return ret;
+ int ret, i;
ret = i915_forcewake_create(minor->debugfs_root, minor);
if (ret)
return ret;
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_max_freq",
- &i915_max_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_min_freq",
- &i915_min_freq_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_cache_sharing",
- &i915_cache_sharing_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_ring_stop",
- &i915_ring_stop_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_gem_drop_caches",
- &i915_drop_caches_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_error_state",
- &i915_error_state_fops);
- if (ret)
- return ret;
-
- ret = i915_debugfs_create(minor->debugfs_root, minor,
- "i915_next_seqno",
- &i915_next_seqno_fops);
- if (ret)
- return ret;
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ ret = i915_debugfs_create(minor->debugfs_root, minor,
+ i915_debugfs_files[i].name,
+ i915_debugfs_files[i].fops);
+ if (ret)
+ return ret;
+ }
return drm_debugfs_create_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES,
@@ -2419,26 +2297,18 @@ int i915_debugfs_init(struct drm_minor *minor)
void i915_debugfs_cleanup(struct drm_minor *minor)
{
+ int i;
+
drm_debugfs_remove_files(i915_debugfs_list,
I915_DEBUGFS_ENTRIES, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_forcewake_fops,
1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_max_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_min_freq_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,
- 1, minor);
- drm_debugfs_remove_files((struct drm_info_list *) &i915_next_seqno_fops,
- 1, minor);
+ for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
+ struct drm_info_list *info_list =
+ (struct drm_info_list *) i915_debugfs_files[i].fops;
+
+ drm_debugfs_remove_files(info_list, 1, minor);
+ }
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 67ec54f67af..fdaa0915ce5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -976,6 +976,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_LLC:
value = HAS_LLC(dev);
break;
+ case I915_PARAM_HAS_WT:
+ value = HAS_WT(dev);
+ break;
case I915_PARAM_HAS_ALIASING_PPGTT:
value = dev_priv->mm.aliasing_ppgtt ? 1 : 0;
break;
@@ -1293,7 +1296,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
intel_register_dsm_handler();
- ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops);
+ ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
if (ret)
goto cleanup_vga_client;
@@ -1323,10 +1326,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
- if (INTEL_INFO(dev)->num_pipes == 0) {
- dev_priv->mm.suspended = 0;
+ if (INTEL_INFO(dev)->num_pipes == 0)
return 0;
- }
ret = intel_fbdev_init(dev);
if (ret)
@@ -1352,9 +1353,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
drm_kms_helper_poll_init(dev);
- /* We're off and running w/KMS */
- dev_priv->mm.suspended = 0;
-
return 0;
cleanup_gem:
@@ -1363,7 +1361,7 @@ cleanup_gem:
i915_gem_context_fini(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
cleanup_irq:
drm_irq_uninstall(dev);
cleanup_gem_stolen:
@@ -1441,22 +1439,6 @@ static void i915_dump_device_info(struct drm_i915_private *dev_priv)
}
/**
- * intel_early_sanitize_regs - clean up BIOS state
- * @dev: DRM device
- *
- * This function must be called before we do any I915_READ or I915_WRITE. Its
- * purpose is to clean up any state left by the BIOS that may affect us when
- * reading and/or writing registers.
- */
-static void intel_early_sanitize_regs(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (HAS_FPGA_DBG_UNCLAIMED(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
-}
-
-/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
* @flags: startup flags
@@ -1497,14 +1479,31 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->backlight.lock);
+ spin_lock_init(&dev_priv->uncore.lock);
+ spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+
i915_dump_device_info(dev_priv);
+ /* Not all pre-production machines fall into this category, only the
+ * very first ones. Almost everything should work, except for maybe
+ * suspend/resume. And we don't implement workarounds that affect only
+ * pre-production machines. */
+ if (IS_HSW_EARLY_SDV(dev))
+ DRM_INFO("This is an early pre-production Haswell machine. "
+ "It may not be fully functional.\n");
+
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1530,7 +1529,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_bridge;
}
- intel_early_sanitize_regs(dev);
+ intel_uncore_early_sanitize(dev);
+
+ if (IS_HASWELL(dev) && (I915_READ(HSW_EDRAM_PRESENT) == 1)) {
+ /* The docs do not explain exactly how the calculation can be
+ * made. It is somewhat guessable, but for now, it's always
+ * 128MB.
+ * NB: We can't write IDICR yet because we do not have gt funcs
+ * set up */
+ dev_priv->ellc_size = 128;
+ DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
+ }
ret = i915_gem_gtt_init(dev);
if (ret)
@@ -1566,8 +1575,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_rmmap;
}
- dev_priv->mm.gtt_mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
- aperture_size);
+ dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
@@ -1593,8 +1602,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
intel_irq_init(dev);
- intel_gt_sanitize(dev);
- intel_gt_init(dev);
+ intel_pm_init(dev);
+ intel_uncore_sanitize(dev);
+ intel_uncore_init(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
intel_setup_mchbar(dev);
@@ -1629,9 +1639,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto out_gem_unload;
}
- /* Start out suspended */
- dev_priv->mm.suspended = 1;
-
if (HAS_POWER_WELL(dev))
i915_init_power_well(dev);
@@ -1641,6 +1648,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
DRM_ERROR("failed to init modeset\n");
goto out_gem_unload;
}
+ } else {
+ /* Start out suspended in ums mode. */
+ dev_priv->ums.mm_suspended = 1;
}
i915_setup_sysfs(dev);
@@ -1648,7 +1658,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (INTEL_INFO(dev)->num_pipes) {
/* Must be done after probing outputs */
intel_opregion_init(dev);
- acpi_video_register_with_quirks();
+ acpi_video_register();
}
if (IS_GEN5(dev))
@@ -1667,9 +1677,9 @@ out_gem_unload:
intel_teardown_mchbar(dev);
destroy_workqueue(dev_priv->wq);
out_mtrrfree:
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
io_mapping_free(dev_priv->gtt.mappable);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
@@ -1686,8 +1696,13 @@ int i915_driver_unload(struct drm_device *dev)
intel_gpu_ips_teardown();
- if (HAS_POWER_WELL(dev))
+ if (HAS_POWER_WELL(dev)) {
+ /* The i915.ko module is still not prepared to be loaded when
+ * the power well is not enabled, so just enable it in case
+ * we're going to unload/reload. */
+ intel_set_power_well(dev, true);
i915_remove_power_well(dev);
+ }
i915_teardown_sysfs(dev);
@@ -1705,7 +1720,7 @@ int i915_driver_unload(struct drm_device *dev)
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->gtt.mappable);
- arch_phys_wc_del(dev_priv->mm.gtt_mtrr);
+ arch_phys_wc_del(dev_priv->gtt.mtrr);
acpi_video_unregister();
@@ -1733,6 +1748,8 @@ int i915_driver_unload(struct drm_device *dev)
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
+ cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
+
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
@@ -1754,7 +1771,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_free_hws(dev);
}
- drm_mm_takedown(&dev_priv->mm.gtt_space);
+ list_del(&dev_priv->gtt.base.global_link);
+ WARN_ON(!list_empty(&dev_priv->vm_list));
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
if (dev_priv->regs != NULL)
pci_iounmap(dev->pdev, dev_priv->regs);
@@ -1764,7 +1783,7 @@ int i915_driver_unload(struct drm_device *dev)
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
- dev_priv->gtt.gtt_remove(dev);
+ dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
@@ -1840,14 +1859,14 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
kfree(file_priv);
}
-struct drm_ioctl_desc i915_ioctls[] = {
+const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
@@ -1860,35 +1879,35 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 45b3c030f48..ccb28ead350 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -118,10 +118,14 @@ module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
-unsigned int i915_preliminary_hw_support __read_mostly = 0;
+int i915_enable_psr __read_mostly = 0;
+module_param_named(enable_psr, i915_enable_psr, int, 0600);
+MODULE_PARM_DESC(enable_psr, "Enable PSR (default: false)");
+
+unsigned int i915_preliminary_hw_support __read_mostly = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT);
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
- "Enable preliminary hardware support. (default: false)");
+ "Enable preliminary hardware support.");
int i915_disable_power_well __read_mostly = 1;
module_param_named(disable_power_well, i915_disable_power_well, int, 0600);
@@ -132,6 +136,24 @@ int i915_enable_ips __read_mostly = 1;
module_param_named(enable_ips, i915_enable_ips, int, 0600);
MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
+bool i915_fastboot __read_mostly = 0;
+module_param_named(fastboot, i915_fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time "
+ "(default: false)");
+
+int i915_enable_pc8 __read_mostly = 1;
+module_param_named(enable_pc8, i915_enable_pc8, int, 0600);
+MODULE_PARM_DESC(enable_pc8, "Enable support for low power package C states (PC8+) (default: true)");
+
+int i915_pc8_timeout __read_mostly = 5000;
+module_param_named(pc8_timeout, i915_pc8_timeout, int, 0600);
+MODULE_PARM_DESC(pc8_timeout, "Number of msecs of idleness required to enter PC8+ (default: 5000)");
+
+bool i915_prefault_disable __read_mostly;
+module_param_named(prefault_disable, i915_prefault_disable, bool, 0600);
+MODULE_PARM_DESC(prefault_disable,
+ "Disable page prefaulting for pread/pwrite/reloc (default:false). For developers only.");
+
static struct drm_driver driver;
extern int intel_agp_enabled;
@@ -543,6 +565,9 @@ static int i915_drm_freeze(struct drm_device *dev)
dev_priv->modeset_restore = MODESET_SUSPENDED;
mutex_unlock(&dev_priv->modeset_restore_lock);
+ /* We do a lot of poking in a lot of registers, make sure they work
+ * properly. */
+ hsw_disable_package_c8(dev_priv);
intel_set_power_well(dev, true);
drm_kms_helper_poll_disable(dev);
@@ -551,7 +576,11 @@ static int i915_drm_freeze(struct drm_device *dev)
/* If KMS is active, we do the leavevt stuff here */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int error = i915_gem_idle(dev);
+ int error;
+
+ mutex_lock(&dev->struct_mutex);
+ error = i915_gem_idle(dev);
+ mutex_unlock(&dev->struct_mutex);
if (error) {
dev_err(&dev->pdev->dev,
"GEM idle failed, resume might fail\n");
@@ -656,7 +685,6 @@ static int __i915_drm_thaw(struct drm_device *dev)
intel_init_pch_refclk(dev);
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
error = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
@@ -696,6 +724,10 @@ static int __i915_drm_thaw(struct drm_device *dev)
schedule_work(&dev_priv->console_resume_work);
}
+ /* Undo what we did at i915_drm_freeze so the refcount goes back to the
+ * expected level. */
+ hsw_enable_package_c8(dev_priv);
+
mutex_lock(&dev_priv->modeset_restore_lock);
dev_priv->modeset_restore = MODESET_DONE;
mutex_unlock(&dev_priv->modeset_restore_lock);
@@ -706,7 +738,7 @@ static int i915_drm_thaw(struct drm_device *dev)
{
int error = 0;
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
mutex_lock(&dev->struct_mutex);
@@ -732,7 +764,7 @@ int i915_resume(struct drm_device *dev)
pci_set_master(dev->pdev);
- intel_gt_sanitize(dev);
+ intel_uncore_sanitize(dev);
/*
* Platforms with opregion should have sane BIOS, older ones (gen3 and
@@ -753,139 +785,6 @@ int i915_resume(struct drm_device *dev)
return 0;
}
-static int i8xx_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_I85X(dev))
- return -ENODEV;
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- if (IS_I830(dev) || IS_845G(dev)) {
- I915_WRITE(DEBUG_RESET_I830,
- DEBUG_RESET_DISPLAY |
- DEBUG_RESET_RENDER |
- DEBUG_RESET_FULL);
- POSTING_READ(DEBUG_RESET_I830);
- msleep(1);
-
- I915_WRITE(DEBUG_RESET_I830, 0);
- POSTING_READ(DEBUG_RESET_I830);
- }
-
- msleep(1);
-
- I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
- POSTING_READ(D_STATE);
-
- return 0;
-}
-
-static int i965_reset_complete(struct drm_device *dev)
-{
- u8 gdrst;
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- return (gdrst & GRDOM_RESET_ENABLE) == 0;
-}
-
-static int i965_do_reset(struct drm_device *dev)
-{
- int ret;
- u8 gdrst;
-
- /*
- * Set the domains we want to reset (GRDOM/bits 2 and 3) as
- * well as the reset bit (GR/bit 0). Setting the GR bit
- * triggers the reset; when done, the hardware will clear it.
- */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_RENDER |
- GRDOM_RESET_ENABLE);
- ret = wait_for(i965_reset_complete(dev), 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
- pci_write_config_byte(dev->pdev, I965_GDRST,
- gdrst | GRDOM_MEDIA |
- GRDOM_RESET_ENABLE);
-
- return wait_for(i965_reset_complete(dev), 500);
-}
-
-static int ironlake_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 gdrst;
- int ret;
-
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
- ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
- if (ret)
- return ret;
-
- /* We can't reset render&media without also resetting display ... */
- gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
- gdrst &= ~GRDOM_MASK;
- I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
- gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
- return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
-}
-
-static int gen6_do_reset(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret;
- unsigned long irqflags;
-
- /* Hold gt_lock across reset to prevent any register access
- * with forcewake not set correctly
- */
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
-
- /* Reset the chip */
-
- /* GEN6_GDRST is not in the gt power well, no need to check
- * for fifo space for the write or forcewake the chip for
- * the read
- */
- I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
-
- /* Spin waiting for the device to ack the reset request */
- ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
-
- /* If reset with a user forcewake, try to restore, otherwise turn it off */
- if (dev_priv->forcewake_count)
- dev_priv->gt.force_wake_get(dev_priv);
- else
- dev_priv->gt.force_wake_put(dev_priv);
-
- /* Restore fifo count */
- dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
-
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
- return ret;
-}
-
-int intel_gpu_reset(struct drm_device *dev)
-{
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6: return gen6_do_reset(dev);
- case 5: return ironlake_do_reset(dev);
- case 4: return i965_do_reset(dev);
- case 2: return i8xx_do_reset(dev);
- default: return -ENODEV;
- }
-}
-
/**
* i915_reset - reset chip after a hang
* @dev: drm device to reset
@@ -955,11 +854,11 @@ int i915_reset(struct drm_device *dev)
* switched away).
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
- !dev_priv->mm.suspended) {
+ !dev_priv->ums.mm_suspended) {
struct intel_ring_buffer *ring;
int i;
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
i915_gem_init_swizzling(dev);
@@ -1110,7 +1009,6 @@ static const struct file_operations i915_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = i915_compat_ioctl,
@@ -1123,8 +1021,9 @@ static struct drm_driver driver = {
* deal with them for Intel hardware.
*/
.driver_features =
- DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME,
+ DRIVER_USE_AGP | DRIVER_REQUIRE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
+ DRIVER_RENDER,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
@@ -1154,7 +1053,7 @@ static struct drm_driver driver = {
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_mmap_gtt,
- .dumb_destroy = i915_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
.name = DRIVER_NAME,
@@ -1215,136 +1114,3 @@ module_exit(i915_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
-static void
-ilk_dummy_write(struct drm_i915_private *dev_priv)
-{
- /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
- * the chip from rc6 before touching it for real. MI_MODE is masked,
- * hence harmless to write 0 into. */
- I915_WRITE_NOTRACE(MI_MODE, 0);
-}
-
-static void
-hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unknown unclaimed register before writing to %x\n",
- reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-static void
-hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
-{
- if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed write to %x\n", reg);
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
-}
-
-#define __i915_read(x, y) \
-u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
- unsigned long irqflags; \
- u##x val = 0; \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_get(dev_priv); \
- val = read##y(dev_priv->regs + reg); \
- if (dev_priv->forcewake_count == 0) \
- dev_priv->gt.force_wake_put(dev_priv); \
- } else { \
- val = read##y(dev_priv->regs + reg); \
- } \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
- trace_i915_reg_rw(false, reg, val, sizeof(val)); \
- return val; \
-}
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
-#undef __i915_read
-
-#define __i915_write(x, y) \
-void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
- unsigned long irqflags; \
- u32 __fifo_ret = 0; \
- trace_i915_reg_rw(true, reg, val, sizeof(val)); \
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
- if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
- } \
- if (IS_GEN5(dev_priv->dev)) \
- ilk_dummy_write(dev_priv); \
- hsw_unclaimed_reg_clear(dev_priv, reg); \
- write##y(val, dev_priv->regs + reg); \
- if (unlikely(__fifo_ret)) { \
- gen6_gt_check_fifodbg(dev_priv); \
- } \
- hsw_unclaimed_reg_check(dev_priv, reg); \
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
-}
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
-#undef __i915_write
-
-static const struct register_whitelist {
- uint64_t offset;
- uint32_t size;
- uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
-} whitelist[] = {
- { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
-};
-
-int i915_reg_read_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_reg_read *reg = data;
- struct register_whitelist const *entry = whitelist;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
- if (entry->offset == reg->offset &&
- (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
- break;
- }
-
- if (i == ARRAY_SIZE(whitelist))
- return -EINVAL;
-
- switch (entry->size) {
- case 8:
- reg->val = I915_READ64(reg->offset);
- break;
- case 4:
- reg->val = I915_READ(reg->offset);
- break;
- case 2:
- reg->val = I915_READ16(reg->offset);
- break;
- case 1:
- reg->val = I915_READ8(reg->offset);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d2ee3343c94..52a3785a3fd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -144,6 +144,7 @@ enum intel_dpll_id {
struct intel_dpll_hw_state {
uint32_t dpll;
+ uint32_t dpll_md;
uint32_t fp0;
uint32_t fp1;
};
@@ -156,6 +157,8 @@ struct intel_shared_dpll {
/* should match the index in the dev_priv->shared_dplls array */
enum intel_dpll_id id;
struct intel_dpll_hw_state hw_state;
+ void (*mode_set)(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll);
void (*enable)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll);
void (*disable)(struct drm_i915_private *dev_priv,
@@ -198,7 +201,6 @@ struct intel_ddi_plls {
#define DRIVER_MINOR 6
#define DRIVER_PATCHLEVEL 0
-#define WATCH_COHERENCY 0
#define WATCH_LISTS 0
#define WATCH_GTT 0
@@ -320,8 +322,8 @@ struct drm_i915_error_state {
u32 purgeable:1;
s32 ring:4;
u32 cache_level:2;
- } *active_bo, *pinned_bo;
- u32 active_bo_count, pinned_bo_count;
+ } **active_bo, **pinned_bo;
+ u32 *active_bo_count, *pinned_bo_count;
struct intel_overlay_error_state *overlay;
struct intel_display_error_state *display;
};
@@ -356,14 +358,16 @@ struct drm_i915_display_funcs {
struct dpll *match_clock,
struct dpll *best_clock);
void (*update_wm)(struct drm_device *dev);
- void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ void (*update_sprite_wm)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable);
+ bool enable, bool scaled);
void (*modeset_global_resources)(struct drm_device *dev);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
bool (*get_pipe_config)(struct intel_crtc *,
struct intel_crtc_config *);
+ void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *);
int (*crtc_mode_set)(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb);
@@ -376,7 +380,8 @@ struct drm_i915_display_funcs {
void (*init_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj);
+ struct drm_i915_gem_object *obj,
+ uint32_t flags);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
@@ -387,11 +392,20 @@ struct drm_i915_display_funcs {
/* pll clock increase/decrease */
};
-struct drm_i915_gt_funcs {
+struct intel_uncore_funcs {
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
};
+struct intel_uncore {
+ spinlock_t lock; /** lock is also taken in irq contexts. */
+
+ struct intel_uncore_funcs funcs;
+
+ unsigned fifo_count;
+ unsigned forcewake_count;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
@@ -436,12 +450,64 @@ struct intel_device_info {
enum i915_cache_level {
I915_CACHE_NONE = 0,
- I915_CACHE_LLC,
- I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+ I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
+ I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
+ caches, eg sampler/render caches, and the
+ large Last-Level-Cache. LLC is coherent with
+ the CPU, but L3 is only visible to the GPU. */
+ I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
};
typedef uint32_t gen6_gtt_pte_t;
+struct i915_address_space {
+ struct drm_mm mm;
+ struct drm_device *dev;
+ struct list_head global_link;
+ unsigned long start; /* Start offset always 0 for dri2 */
+ size_t total; /* size addr space maps (ex. 2GB for ggtt) */
+
+ struct {
+ dma_addr_t addr;
+ struct page *page;
+ } scratch;
+
+ /**
+ * List of objects currently involved in rendering.
+ *
+ * Includes buffers having the contents of their GPU caches
+ * flushed, not necessarily primitives. last_rendering_seqno
+ * represents when the rendering involved will be completed.
+ *
+ * A reference is held on the buffer while on this list.
+ */
+ struct list_head active_list;
+
+ /**
+ * LRU list of objects which are not in the ringbuffer and
+ * are ready to unbind, but are still in the GTT.
+ *
+ * last_rendering_seqno is 0 while an object is in this list.
+ *
+ * A reference is not held on the buffer while on this list,
+ * as merely being GTT-bound shouldn't prevent its being
+ * freed, and we'll pull it off the list in the free path.
+ */
+ struct list_head inactive_list;
+
+ /* FIXME: Need a more generic return type */
+ gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
+ enum i915_cache_level level);
+ void (*clear_range)(struct i915_address_space *vm,
+ unsigned int first_entry,
+ unsigned int num_entries);
+ void (*insert_entries)(struct i915_address_space *vm,
+ struct sg_table *st,
+ unsigned int first_entry,
+ enum i915_cache_level cache_level);
+ void (*cleanup)(struct i915_address_space *vm);
+};
+
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
@@ -450,8 +516,7 @@ typedef uint32_t gen6_gtt_pte_t;
* the spec.
*/
struct i915_gtt {
- unsigned long start; /* Start offset of used GTT */
- size_t total; /* Total size GTT can map */
+ struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
@@ -462,50 +527,47 @@ struct i915_gtt {
void __iomem *gsm;
bool do_idle_maps;
- dma_addr_t scratch_page_dma;
- struct page *scratch_page;
+
+ int mtrr;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
- void (*gtt_remove)(struct drm_device *dev);
- void (*gtt_clear_range)(struct drm_device *dev,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*gtt_insert_entries)(struct drm_device *dev,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
};
-#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
+#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
-#define I915_PPGTT_PD_ENTRIES 512
-#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
- struct drm_device *dev;
+ struct i915_address_space base;
unsigned num_pd_entries;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
- dma_addr_t scratch_page_dma_addr;
- /* pte functions, mirroring the interface of the global gtt. */
- void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
- unsigned int first_entry,
- unsigned int num_entries);
- void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
- struct sg_table *st,
- unsigned int pg_start,
- enum i915_cache_level cache_level);
- gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level);
int (*enable)(struct drm_device *dev);
- void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
+};
+
+/**
+ * A VMA represents a GEM BO that is bound into an address space. Therefore, a
+ * VMA's presence cannot be guaranteed before binding, or after unbinding the
+ * object into/from the address space.
+ *
+ * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
+ * will always be <= an objects lifetime. So object refcounting should cover us.
+ */
+struct i915_vma {
+ struct drm_mm_node node;
+ struct drm_i915_gem_object *obj;
+ struct i915_address_space *vm;
+
+ /** This object's place on the active/inactive lists */
+ struct list_head mm_list;
+
+ struct list_head vma_link; /* Link in the object's VMA list */
+
+ /** This vma's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
+
};
struct i915_ctx_hang_stats {
@@ -528,15 +590,48 @@ struct i915_hw_context {
struct i915_ctx_hang_stats hang_stats;
};
-enum no_fbc_reason {
- FBC_NO_OUTPUT, /* no outputs enabled to compress */
- FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
- FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
- FBC_MODE_TOO_LARGE, /* mode too large for compression */
- FBC_BAD_PLANE, /* fbc not supported on plane */
- FBC_NOT_TILED, /* buffer not tiled */
- FBC_MULTIPLE_PIPES, /* more than one pipe active */
- FBC_MODULE_PARAM,
+struct i915_fbc {
+ unsigned long size;
+ unsigned int fb_id;
+ enum plane plane;
+ int y;
+
+ struct drm_mm_node *compressed_fb;
+ struct drm_mm_node *compressed_llb;
+
+ struct intel_fbc_work {
+ struct delayed_work work;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int interval;
+ } *fbc_work;
+
+ enum no_fbc_reason {
+ FBC_OK, /* FBC is enabled */
+ FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
+ FBC_NO_OUTPUT, /* no outputs enabled to compress */
+ FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+ FBC_MULTIPLE_PIPES, /* more than one pipe active */
+ FBC_MODULE_PARAM,
+ FBC_CHIP_DEFAULT, /* disabled by default on this chip */
+ } no_fbc_reason;
+};
+
+enum no_psr_reason {
+ PSR_NO_SOURCE, /* Not supported on platform */
+ PSR_NO_SINK, /* Not supported by panel */
+ PSR_MODULE_PARAM,
+ PSR_CRTC_NOT_ACTIVE,
+ PSR_PWR_WELL_ENABLED,
+ PSR_NOT_TILED,
+ PSR_SPRITE_ENABLED,
+ PSR_S3D_ENABLED,
+ PSR_INTERLACED_ENABLED,
+ PSR_HSW_NOT_DDIA,
};
enum intel_pch {
@@ -722,12 +817,12 @@ struct i915_suspend_saved_registers {
};
struct intel_gen6_power_mgmt {
+ /* work and pm_iir are protected by dev_priv->irq_lock */
struct work_struct work;
- struct delayed_work vlv_work;
u32 pm_iir;
- /* lock - irqsave spinlock that protectects the work_struct and
- * pm_iir. */
- spinlock_t lock;
+
+ /* On vlv we need to manually drop to Vmin with a delayed work. */
+ struct delayed_work vlv_work;
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
@@ -793,6 +888,18 @@ struct i915_dri1_state {
uint32_t counter;
};
+struct i915_ums_state {
+ /**
+ * Flag if the X Server, and thus DRM, is not currently in
+ * control of the device.
+ *
+ * This is set between LeaveVT and EnterVT. It needs to be
+ * replaced with a semaphore. It also needs to be
+ * transitioned away from for kernel modesetting.
+ */
+ int mm_suspended;
+};
+
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
@@ -801,8 +908,6 @@ struct intel_l3_parity {
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
- /** Memory allocator for GTT */
- struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
@@ -816,37 +921,12 @@ struct i915_gem_mm {
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
- int gtt_mtrr;
-
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
- /**
- * List of objects currently involved in rendering.
- *
- * Includes buffers having the contents of their GPU caches
- * flushed, not necessarily primitives. last_rendering_seqno
- * represents when the rendering involved will be completed.
- *
- * A reference is held on the buffer while on this list.
- */
- struct list_head active_list;
-
- /**
- * LRU list of objects which are not in the ringbuffer and
- * are ready to unbind, but are still in the GTT.
- *
- * last_rendering_seqno is 0 while an object is in this list.
- *
- * A reference is not held on the buffer while on this list,
- * as merely being GTT-bound shouldn't prevent its being
- * freed, and we'll pull it off the list in the free path.
- */
- struct list_head inactive_list;
-
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
@@ -865,16 +945,6 @@ struct i915_gem_mm {
*/
bool interruptible;
- /**
- * Flag if the X Server, and thus DRM, is not currently in
- * control of the device.
- *
- * This is set between LeaveVT and EnterVT. It needs to be
- * replaced with a semaphore. It also needs to be
- * transitioned away from for kernel modesetting.
- */
- int suspended;
-
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@@ -884,6 +954,7 @@ struct i915_gem_mm {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
+ spinlock_t object_stat_lock;
size_t object_memory;
u32 object_count;
};
@@ -897,6 +968,11 @@ struct drm_i915_error_state_buf {
loff_t pos;
};
+struct i915_error_state_file_priv {
+ struct drm_device *dev;
+ struct drm_i915_error_state *error;
+};
+
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
@@ -988,6 +1064,88 @@ struct intel_vbt_data {
struct child_device_config *child_dev;
};
+enum intel_ddb_partitioning {
+ INTEL_DDB_PART_1_2,
+ INTEL_DDB_PART_5_6, /* IVB+ */
+};
+
+struct intel_wm_level {
+ bool enable;
+ uint32_t pri_val;
+ uint32_t spr_val;
+ uint32_t cur_val;
+ uint32_t fbc_val;
+};
+
+/*
+ * This struct tracks the state needed for the Package C8+ feature.
+ *
+ * Package states C8 and deeper are really deep PC states that can only be
+ * reached when all the devices on the system allow it, so even if the graphics
+ * device allows PC8+, it doesn't mean the system will actually get to these
+ * states.
+ *
+ * Our driver only allows PC8+ when all the outputs are disabled, the power well
+ * is disabled and the GPU is idle. When these conditions are met, we manually
+ * do the other conditions: disable the interrupts, clocks and switch LCPLL
+ * refclk to Fclk.
+ *
+ * When we really reach PC8 or deeper states (not just when we allow it) we lose
+ * the state of some registers, so when we come back from PC8+ we need to
+ * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
+ * need to take care of the registers kept by RC6.
+ *
+ * The interrupt disabling is part of the requirements. We can only leave the
+ * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
+ * can lock the machine.
+ *
+ * Ideally every piece of our code that needs PC8+ disabled would call
+ * hsw_disable_package_c8, which would increment disable_count and prevent the
+ * system from reaching PC8+. But we don't have a symmetric way to do this for
+ * everything, so we have the requirements_met and gpu_idle variables. When we
+ * switch requirements_met or gpu_idle to true we decrease disable_count, and
+ * increase it in the opposite case. The requirements_met variable is true when
+ * all the CRTCs, encoders and the power well are disabled. The gpu_idle
+ * variable is true when the GPU is idle.
+ *
+ * In addition to everything, we only actually enable PC8+ if disable_count
+ * stays at zero for at least some seconds. This is implemented with the
+ * enable_work variable. We do this so we don't enable/disable PC8 dozens of
+ * consecutive times when all screens are disabled and some background app
+ * queries the state of our connectors, or we have some application constantly
+ * waking up to use the GPU. Only after the enable_work function actually
+ * enables PC8+ the "enable" variable will become true, which means that it can
+ * be false even if disable_count is 0.
+ *
+ * The irqs_disabled variable becomes true exactly after we disable the IRQs and
+ * goes back to false exactly before we reenable the IRQs. We use this variable
+ * to check if someone is trying to enable/disable IRQs while they're supposed
+ * to be disabled. This shouldn't happen and we'll print some error messages in
+ * case it happens, but if it actually happens we'll also update the variables
+ * inside struct regsave so when we restore the IRQs they will contain the
+ * latest expected values.
+ *
+ * For more, read "Display Sequences for Package C8" on our documentation.
+ */
+struct i915_package_c8 {
+ bool requirements_met;
+ bool gpu_idle;
+ bool irqs_disabled;
+ /* Only true after the delayed work task actually enables it. */
+ bool enabled;
+ int disable_count;
+ struct mutex lock;
+ struct delayed_work enable_work;
+
+ struct {
+ uint32_t deimr;
+ uint32_t sdeimr;
+ uint32_t gtimr;
+ uint32_t gtier;
+ uint32_t gen6_pmimr;
+ } regsave;
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -998,14 +1156,7 @@ typedef struct drm_i915_private {
void __iomem *regs;
- struct drm_i915_gt_funcs gt;
- /** gt_fifo_count and the subsequent register write are synchronized
- * with dev->struct_mutex. */
- unsigned gt_fifo_count;
- /** forcewake_count is protected by gt_lock */
- unsigned forcewake_count;
- /** gt_lock is also taken in irq contexts. */
- spinlock_t gt_lock;
+ struct intel_uncore uncore;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
@@ -1042,6 +1193,7 @@ typedef struct drm_i915_private {
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 irq_mask;
u32 gt_irq_mask;
+ u32 pm_irq_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
@@ -1059,12 +1211,7 @@ typedef struct drm_i915_private {
int num_plane;
- unsigned long cfb_size;
- unsigned int cfb_fb;
- enum plane cfb_plane;
- int cfb_y;
- struct intel_fbc_work *fbc_work;
-
+ struct i915_fbc fbc;
struct intel_opregion opregion;
struct intel_vbt_data vbt;
@@ -1081,8 +1228,6 @@ typedef struct drm_i915_private {
} backlight;
/* LVDS info */
- struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
- struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
bool no_aux_handshake;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
@@ -1105,7 +1250,8 @@ typedef struct drm_i915_private {
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
- struct i915_gtt gtt;
+ struct list_head vm_list; /* Global list of all address spaces */
+ struct i915_gtt gtt; /* VMA representing the global address space */
struct i915_gem_mm mm;
@@ -1132,6 +1278,9 @@ typedef struct drm_i915_private {
struct intel_l3_parity l3_parity;
+ /* Cannot be determined by PCIID. You must always read a register. */
+ size_t ellc_size;
+
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
@@ -1142,10 +1291,7 @@ typedef struct drm_i915_private {
/* Haswell power well */
struct i915_power_well power_well;
- enum no_fbc_reason no_fbc_reason;
-
- struct drm_mm_node *compressed_fb;
- struct drm_mm_node *compressed_llb;
+ enum no_psr_reason no_psr_reason;
struct i915_gpu_error gpu_error;
@@ -1170,11 +1316,34 @@ typedef struct drm_i915_private {
struct i915_suspend_saved_registers regfile;
+ struct {
+ /*
+ * Raw watermark latency values:
+ * in 0.1us units for WM0,
+ * in 0.5us units for WM1+.
+ */
+ /* primary */
+ uint16_t pri_latency[5];
+ /* sprite */
+ uint16_t spr_latency[5];
+ /* cursor */
+ uint16_t cur_latency[5];
+ } wm;
+
+ struct i915_package_c8 pc8;
+
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
+ /* Old ums support infrastructure, same warning applies. */
+ struct i915_ums_state ums;
} drm_i915_private_t;
+static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
+{
+ return dev->dev_private;
+}
+
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
@@ -1187,7 +1356,7 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
-#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
+#define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
@@ -1212,15 +1381,16 @@ struct drm_i915_gem_object {
const struct drm_i915_gem_object_ops *ops;
- /** Current space allocated to this object in the GTT, if any. */
- struct drm_mm_node *gtt_space;
+ /** List of VMAs backed by this object */
+ struct list_head vma_list;
+
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head global_list;
- /** This object's place on the active/inactive lists */
struct list_head ring_list;
- struct list_head mm_list;
+ /** Used in execbuf to temporarily hold a ref */
+ struct list_head obj_exec_link;
/** This object's place in the batchbuffer or on the eviction list */
struct list_head exec_list;
@@ -1287,6 +1457,7 @@ struct drm_i915_gem_object {
*/
unsigned int fault_mappable:1;
unsigned int pin_mappable:1;
+ unsigned int pin_display:1;
/*
* Is the GPU currently using a fence to access this buffer,
@@ -1294,7 +1465,7 @@ struct drm_i915_gem_object {
unsigned int pending_fenced_gpu_access:1;
unsigned int fenced_gpu_access:1;
- unsigned int cache_level:2;
+ unsigned int cache_level:3;
unsigned int has_aliasing_ppgtt_mapping:1;
unsigned int has_global_gtt_mapping:1;
@@ -1314,13 +1485,6 @@ struct drm_i915_gem_object {
unsigned long exec_handle;
struct drm_i915_gem_exec_object2 *exec_entry;
- /**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
- */
- uint32_t gtt_offset;
-
struct intel_ring_buffer *ring;
/** Breadcrumb of last rendering to the buffer. */
@@ -1396,7 +1560,7 @@ struct drm_i915_file_private {
struct i915_ctx_hang_stats hang_stats;
};
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+#define INTEL_INFO(dev) (to_i915(dev)->info)
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
@@ -1414,7 +1578,6 @@ struct drm_i915_file_private {
#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
@@ -1426,6 +1589,8 @@ struct drm_i915_file_private {
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
+ ((dev)->pci_device & 0xFF00) == 0x0C00)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
@@ -1446,6 +1611,7 @@ struct drm_i915_file_private {
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
+#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
@@ -1468,8 +1634,6 @@ struct drm_i915_file_private {
#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
@@ -1477,8 +1641,6 @@ struct drm_i915_file_private {
#define HAS_IPS(dev) (IS_ULT(dev))
-#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
-
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
@@ -1490,7 +1652,7 @@ struct drm_i915_file_private {
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -1526,7 +1688,7 @@ struct drm_i915_file_private {
#define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2)
-extern struct drm_ioctl_desc i915_ioctls[];
+extern const struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
@@ -1540,9 +1702,14 @@ extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
+extern int i915_enable_psr __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
extern int i915_enable_ips __read_mostly;
+extern bool i915_fastboot __read_mostly;
+extern int i915_enable_pc8 __read_mostly;
+extern int i915_pc8_timeout __read_mostly;
+extern bool i915_prefault_disable __read_mostly;
extern int i915_suspend(struct drm_device *dev, pm_message_t state);
extern int i915_resume(struct drm_device *dev);
@@ -1578,15 +1745,19 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
extern void intel_console_resume(struct work_struct *work);
/* i915_irq.c */
-void i915_hangcheck_elapsed(unsigned long data);
+void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_gt_init(struct drm_device *dev);
-extern void intel_gt_sanitize(struct drm_device *dev);
+extern void intel_pm_init(struct drm_device *dev);
-void i915_error_state_free(struct kref *error_ref);
+extern void intel_uncore_sanitize(struct drm_device *dev);
+extern void intel_uncore_early_sanitize(struct drm_device *dev);
+extern void intel_uncore_init(struct drm_device *dev);
+extern void intel_uncore_clear_errors(struct drm_device *dev);
+extern void intel_uncore_check_errors(struct drm_device *dev);
void
i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
@@ -1594,13 +1765,6 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
-#ifdef CONFIG_DEBUG_FS
-extern void i915_destroy_error_state(struct drm_device *dev);
-#else
-#define i915_destroy_error_state(x)
-#endif
-
-
/* i915_gem.c */
int i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1657,13 +1821,18 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+void i915_gem_vma_destroy(struct i915_vma *vma);
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+int __must_check i915_vma_unbind(struct i915_vma *vma);
+int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@@ -1700,8 +1869,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1753,10 +1920,7 @@ static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
}
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
-int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
+bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
@@ -1783,6 +1947,7 @@ int __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
u32 alignment,
struct intel_ring_buffer *pipelined);
+void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_i915_gem_object *obj,
int id,
@@ -1809,6 +1974,56 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
void i915_gem_restore_fences(struct drm_device *dev);
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm);
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm);
+/* Some GGTT VM helpers */
+#define obj_to_ggtt(obj) \
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+static inline bool i915_is_ggtt(struct i915_address_space *vm)
+{
+ struct i915_address_space *ggtt =
+ &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
+ return vm == ggtt;
+}
+
+static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
+}
+
+static inline unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_obj_size(obj, obj_to_ggtt(obj));
+}
+
+static inline int __must_check
+i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
+{
+ return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
+ map_and_fenceable, nonblocking);
+}
+#undef obj_to_ggtt
+
/* i915_gem_context.c */
void i915_gem_context_init(struct drm_device *dev);
void i915_gem_context_fini(struct drm_device *dev);
@@ -1827,7 +2042,7 @@ static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
}
struct i915_ctx_hang_stats * __must_check
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id);
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
@@ -1861,7 +2076,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
/* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+int __must_check i915_gem_evict_something(struct drm_device *dev,
+ struct i915_address_space *vm,
+ int min_size,
unsigned alignment,
unsigned cache_level,
bool mappable,
@@ -1883,7 +2100,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
/* i915_gem_tiling.c */
-inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
+static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
@@ -1896,23 +2113,36 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
- int handle);
-void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
- const char *where, uint32_t mark);
/* i915_debugfs.c */
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
+
+/* i915_gpu_error.c */
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
+int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
+ const struct i915_error_state_file_priv *error);
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
+ size_t count, loff_t pos);
+static inline void i915_error_state_buf_release(
+ struct drm_i915_error_state_buf *eb)
+{
+ kfree(eb->buf);
+}
+void i915_capture_error_state(struct drm_device *dev);
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv);
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
+void i915_destroy_error_state(struct drm_device *dev);
+
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
+const char *i915_cache_level_str(int type);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
@@ -1992,7 +2222,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* overlay */
-#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
struct intel_overlay_error_state *error);
@@ -2001,7 +2230,6 @@ extern struct intel_display_error_state *intel_display_capture_error_state(struc
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
struct drm_device *dev,
struct intel_display_error_state *error);
-#endif
/* On SNB platform, before reading ring registers forcewake bit
* must be set to prevent GT core from power down and stale values being
@@ -2009,7 +2237,6 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
@@ -2028,39 +2255,37 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
int vlv_gpu_freq(int ddr_freq, int val);
int vlv_freq_opcode(int ddr_freq, int val);
-#define __i915_read(x, y) \
- u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
-
-__i915_read(8, b)
-__i915_read(16, w)
-__i915_read(32, l)
-__i915_read(64, q)
+#define __i915_read(x) \
+ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
#undef __i915_read
-#define __i915_write(x, y) \
- void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
-
-__i915_write(8, b)
-__i915_write(16, w)
-__i915_write(32, l)
-__i915_write(64, q)
+#define __i915_write(x) \
+ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
#undef __i915_write
-#define I915_READ8(reg) i915_read8(dev_priv, (reg))
-#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+#define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
-#define I915_READ16(reg) i915_read16(dev_priv, (reg))
-#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
-#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+#define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
+#define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
+#define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
-#define I915_READ(reg) i915_read32(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
-#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
-#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+#define I915_READ(reg) i915_read32(dev_priv, (reg), true)
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
+#define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
+#define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
-#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
-#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
+#define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e2208cfe9..2d1cb10d846 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,6 +26,7 @@
*/
#include <drm/drmP.h>
+#include <drm/drm_vma_manager.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
@@ -37,11 +38,14 @@
#include <linux/dma-buf.h>
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force);
+static __must_check int
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking);
static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
@@ -59,6 +63,20 @@ static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
+static bool cpu_cache_is_coherent(struct drm_device *dev,
+ enum i915_cache_level level)
+{
+ return HAS_LLC(dev) || level != I915_CACHE_NONE;
+}
+
+static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
+{
+ if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return true;
+
+ return obj->pin_display;
+}
+
static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
{
if (obj->tiling_mode)
@@ -75,15 +93,19 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count++;
dev_priv->mm.object_memory += size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
size_t size)
{
+ spin_lock(&dev_priv->mm.object_stat_lock);
dev_priv->mm.object_count--;
dev_priv->mm.object_memory -= size;
+ spin_unlock(&dev_priv->mm.object_stat_lock);
}
static int
@@ -135,7 +157,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj->gtt_space && !obj->active;
+ return i915_gem_obj_bound_any(obj) && !obj->active;
}
int
@@ -178,10 +200,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
if (obj->pin_count)
- pinned += obj->gtt_space->size;
+ pinned += i915_gem_obj_ggtt_size(obj);
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.total;
+ args->aper_size = dev_priv->gtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -219,16 +241,10 @@ i915_gem_create(struct drm_file *file,
return -ENOMEM;
ret = drm_gem_handle_create(file, &obj->base, &handle);
- if (ret) {
- drm_gem_object_release(&obj->base);
- i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
- i915_gem_object_free(obj);
- return ret;
- }
-
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(&obj->base);
- trace_i915_gem_object_create(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
+ if (ret)
+ return ret;
*handle_p = handle;
return 0;
@@ -246,13 +262,6 @@ i915_gem_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int i915_gem_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
/**
* Creates a new mm object and returns a handle to it.
*/
@@ -420,9 +429,8 @@ i915_gem_shmem_pread(struct drm_device *dev,
* read domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will dirty the data
* anyway again before the next pread happens. */
- if (obj->cache_level == I915_CACHE_NONE)
- needs_clflush = 1;
- if (obj->gtt_space) {
+ needs_clflush = !cpu_cache_is_coherent(dev, obj->cache_level);
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret)
return ret;
@@ -465,7 +473,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
mutex_unlock(&dev->struct_mutex);
- if (!prefaulted) {
+ if (likely(!i915_prefault_disable) && !prefaulted) {
ret = fault_in_multipages_writeable(user_data, remain);
/* Userspace is tricking us, but we've already clobbered
* its pages with the prefault and promised to write the
@@ -594,7 +602,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
char __user *user_data;
int page_offset, page_length, ret;
- ret = i915_gem_object_pin(obj, 0, true, true);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
if (ret)
goto out;
@@ -609,7 +617,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
user_data = to_user_ptr(args->data_ptr);
remain = args->size;
- offset = obj->gtt_offset + args->offset;
+ offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -737,19 +745,18 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
* write domain and manually flush cachelines (if required). This
* optimizes for the case when the gpu will use the data
* right away and we therefore have to clflush anyway. */
- if (obj->cache_level == I915_CACHE_NONE)
- needs_clflush_after = 1;
- if (obj->gtt_space) {
+ needs_clflush_after = cpu_write_needs_clflush(obj);
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_set_to_gtt_domain(obj, true);
if (ret)
return ret;
}
}
- /* Same trick applies for invalidate partially written cachelines before
- * writing. */
- if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)
- && obj->cache_level == I915_CACHE_NONE)
- needs_clflush_before = 1;
+ /* Same trick applies to invalidate partially written cachelines read
+ * before writing. */
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ needs_clflush_before =
+ !cpu_cache_is_coherent(dev, obj->cache_level);
ret = i915_gem_object_get_pages(obj);
if (ret)
@@ -828,8 +835,8 @@ out:
*/
if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
- i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(dev);
+ if (i915_gem_clflush_object(obj, obj->pin_display))
+ i915_gem_chipset_flush(dev);
}
}
@@ -860,10 +867,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
args->size))
return -EFAULT;
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
- args->size);
- if (ret)
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ args->size);
+ if (ret)
+ return -EFAULT;
+ }
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -904,9 +913,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (obj->cache_level == I915_CACHE_NONE &&
- obj->tiling_mode == I915_TILING_NONE &&
- obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ if (obj->tiling_mode == I915_TILING_NONE &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+ cpu_write_needs_clflush(obj)) {
ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
/* Note that the gtt paths might fail with non-page-backed user
* pointers (e.g. gtt mappings when moving data between
@@ -990,6 +999,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
bool wait_forever = true;
int ret;
+ WARN(dev_priv->pc8.irqs_disabled, "IRQs disabled\n");
+
if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
return 0;
@@ -1255,8 +1266,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
}
/* Pinned buffers may be scanout, so flush the cache */
- if (obj->pin_count)
- i915_gem_object_flush_cpu_write_domain(obj);
+ if (obj->pin_display)
+ i915_gem_object_flush_cpu_write_domain(obj, true);
drm_gem_object_unreference(&obj->base);
unlock:
@@ -1346,7 +1357,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Now bind it into the GTT if needed */
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret)
goto unlock;
@@ -1360,8 +1371,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
- pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
- page_offset;
+ pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn >>= PAGE_SHIFT;
+ pfn += page_offset;
/* Finally, remap it using the new GTT offset */
ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
@@ -1425,11 +1437,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
if (!obj->fault_mappable)
return;
- if (obj->base.dev->dev_mapping)
- unmap_mapping_range(obj->base.dev->dev_mapping,
- (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
- obj->base.size, 1);
-
+ drm_vma_node_unmap(&obj->base.vma_node, obj->base.dev->dev_mapping);
obj->fault_mappable = false;
}
@@ -1485,7 +1493,7 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->base.map_list.map)
+ if (drm_vma_node_has_offset(&obj->base.vma_node))
return 0;
dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -1516,9 +1524,6 @@ out:
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- if (!obj->base.map_list.map)
- return;
-
drm_gem_free_mmap_offset(&obj->base);
}
@@ -1557,7 +1562,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
if (ret)
goto out;
- *offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
out:
drm_gem_object_unreference(&obj->base);
@@ -1632,7 +1637,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
* hope for the best.
*/
WARN_ON(ret != -EIO);
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, true);
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
@@ -1667,11 +1672,11 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
if (obj->pages == NULL)
return 0;
- BUG_ON(obj->gtt_space);
-
if (obj->pages_pin_count)
return -EBUSY;
+ BUG_ON(i915_gem_obj_bound_any(obj));
+
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
@@ -1704,12 +1709,18 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
}
}
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
- i915_gem_object_unbind(obj) == 0 &&
- i915_gem_object_put_pages(obj) == 0) {
+ list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list,
+ global_list) {
+ struct i915_vma *vma, *v;
+
+ if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
+ continue;
+
+ list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link)
+ if (i915_vma_unbind(vma))
+ break;
+
+ if (!i915_gem_object_put_pages(obj)) {
count += obj->base.size >> PAGE_SHIFT;
if (count >= target)
return count;
@@ -1892,8 +1903,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
obj->active = 1;
}
- /* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj->ring_list, &ring->active_list);
obj->last_read_seqno = seqno;
@@ -1915,13 +1924,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
static void
i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list);
list_del_init(&obj->ring_list);
obj->ring = NULL;
@@ -2085,11 +2095,9 @@ int __i915_add_request(struct intel_ring_buffer *ring,
trace_i915_gem_request_add(ring, request->seqno);
ring->outstanding_lazy_request = 0;
- if (!dev_priv->mm.suspended) {
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ if (!dev_priv->ums.mm_suspended) {
+ i915_queue_hangcheck(ring->dev);
+
if (was_empty) {
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
@@ -2119,10 +2127,11 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
spin_unlock(&file_priv->mm.lock);
}
-static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj)
+static bool i915_head_inside_object(u32 acthd, struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
{
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
+ if (acthd >= i915_gem_obj_offset(obj, vm) &&
+ acthd < i915_gem_obj_offset(obj, vm) + obj->base.size)
return true;
return false;
@@ -2145,6 +2154,17 @@ static bool i915_head_inside_request(const u32 acthd_unmasked,
return false;
}
+static struct i915_address_space *
+request_to_vm(struct drm_i915_gem_request *request)
+{
+ struct drm_i915_private *dev_priv = request->ring->dev->dev_private;
+ struct i915_address_space *vm;
+
+ vm = &dev_priv->gtt.base;
+
+ return vm;
+}
+
static bool i915_request_guilty(struct drm_i915_gem_request *request,
const u32 acthd, bool *inside)
{
@@ -2152,9 +2172,9 @@ static bool i915_request_guilty(struct drm_i915_gem_request *request,
* pointing inside the ring, matches the batch_obj address range.
* However this is extremely unlikely.
*/
-
if (request->batch_obj) {
- if (i915_head_inside_object(acthd, request->batch_obj)) {
+ if (i915_head_inside_object(acthd, request->batch_obj,
+ request_to_vm(request))) {
*inside = true;
return true;
}
@@ -2174,17 +2194,21 @@ static void i915_set_reset_status(struct intel_ring_buffer *ring,
{
struct i915_ctx_hang_stats *hs = NULL;
bool inside, guilty;
+ unsigned long offset = 0;
/* Innocent until proven guilty */
guilty = false;
- if (ring->hangcheck.action != wait &&
+ if (request->batch_obj)
+ offset = i915_gem_obj_offset(request->batch_obj,
+ request_to_vm(request));
+
+ if (ring->hangcheck.action != HANGCHECK_WAIT &&
i915_request_guilty(request, acthd, &inside)) {
- DRM_ERROR("%s hung %s bo (0x%x ctx %d) at 0x%x\n",
+ DRM_ERROR("%s hung %s bo (0x%lx ctx %d) at 0x%x\n",
ring->name,
inside ? "inside" : "flushing",
- request->batch_obj ?
- request->batch_obj->gtt_offset : 0,
+ offset,
request->ctx ? request->ctx->id : 0,
acthd);
@@ -2275,23 +2299,12 @@ void i915_gem_restore_fences(struct drm_device *dev)
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring;
int i;
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring);
- /* Move everything out of the GPU domains to ensure we do any
- * necessary invalidation upon reuse.
- */
- list_for_each_entry(obj,
- &dev_priv->mm.inactive_list,
- mm_list)
- {
- obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
- }
-
i915_gem_restore_fences(dev);
}
@@ -2400,7 +2413,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
idle &= list_empty(&ring->request_list);
}
- if (!dev_priv->mm.suspended && !idle)
+ if (!dev_priv->ums.mm_suspended && !idle)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
if (idle)
@@ -2586,18 +2599,18 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
-/**
- * Unbinds an object from the GTT aperture.
- */
-int
-i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+int i915_vma_unbind(struct i915_vma *vma)
{
+ struct drm_i915_gem_object *obj = vma->obj;
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret;
- if (obj->gtt_space == NULL)
+ if (list_empty(&vma->vma_link))
return 0;
+ if (!drm_mm_node_allocated(&vma->node))
+ goto destroy;
+
if (obj->pin_count)
return -EBUSY;
@@ -2618,7 +2631,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
if (ret)
return ret;
- trace_i915_gem_object_unbind(obj);
+ trace_i915_vma_unbind(vma);
if (obj->has_global_gtt_mapping)
i915_gem_gtt_unbind_object(obj);
@@ -2629,18 +2642,46 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
i915_gem_gtt_finish_object(obj);
i915_gem_object_unpin_pages(obj);
- list_del(&obj->mm_list);
- list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+ list_del(&vma->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */
- obj->map_and_fenceable = true;
+ if (i915_is_ggtt(vma->vm))
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj->gtt_space);
- obj->gtt_space = NULL;
- obj->gtt_offset = 0;
+ drm_mm_remove_node(&vma->node);
+
+destroy:
+ i915_gem_vma_destroy(vma);
+
+ /* Since the unbound list is global, only move to that list if
+ * no more VMAs exist.
+ * NB: Until we have real VMAs there will only ever be one */
+ WARN_ON(!list_empty(&obj->vma_list));
+ if (list_empty(&obj->vma_list))
+ list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
return 0;
}
+/**
+ * Unbinds an object from the global GTT aperture.
+ */
+int
+i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
+
+ if (!i915_gem_obj_ggtt_bound(obj))
+ return 0;
+
+ if (obj->pin_count)
+ return -EBUSY;
+
+ BUG_ON(obj->pages == NULL);
+
+ return i915_vma_unbind(i915_gem_obj_to_vma(obj, ggtt));
+}
+
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2691,12 +2732,12 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(fence_reg);
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
- val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
- val |= obj->gtt_offset & 0xfffff000;
+ val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
@@ -2720,15 +2761,15 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
u32 val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
int pitch_val;
int tile_width;
- WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
- obj->gtt_offset, obj->map_and_fenceable, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
@@ -2739,7 +2780,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size);
@@ -2764,19 +2805,19 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
uint32_t val;
if (obj) {
- u32 size = obj->gtt_space->size;
+ u32 size = i915_gem_obj_ggtt_size(obj);
uint32_t pitch_val;
- WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
(size & -size) != size ||
- (obj->gtt_offset & (size - 1)),
- "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
- obj->gtt_offset, size);
+ (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
+ "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
+ i915_gem_obj_ggtt_offset(obj), size);
pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- val = obj->gtt_offset;
+ val = i915_gem_obj_ggtt_offset(obj);
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I830_FENCE_SIZE_BITS(size);
@@ -2997,7 +3038,7 @@ static bool i915_gem_valid_gtt_space(struct drm_device *dev,
if (HAS_LLC(dev))
return true;
- if (gtt_space == NULL)
+ if (!drm_mm_node_allocated(gtt_space))
return true;
if (list_empty(&gtt_space->node_list))
@@ -3030,8 +3071,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
if (obj->cache_level != obj->gtt_space->color) {
printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level,
obj->gtt_space->color);
err++;
@@ -3042,8 +3083,8 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
obj->gtt_space,
obj->cache_level)) {
printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
- obj->gtt_space->start,
- obj->gtt_space->start + obj->gtt_space->size,
+ i915_gem_obj_ggtt_offset(obj),
+ i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
obj->cache_level);
err++;
continue;
@@ -3058,18 +3099,18 @@ static void i915_gem_verify_gtt(struct drm_device *dev)
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
- unsigned alignment,
- bool map_and_fenceable,
- bool nonblocking)
+i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
+ unsigned alignment,
+ bool map_and_fenceable,
+ bool nonblocking)
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
- bool mappable, fenceable;
- size_t gtt_max = map_and_fenceable ?
- dev_priv->gtt.mappable_end : dev_priv->gtt.total;
+ size_t gtt_max =
+ map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+ struct i915_vma *vma;
int ret;
fence_size = i915_gem_get_gtt_size(dev,
@@ -3110,77 +3151,89 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
i915_gem_object_pin_pages(obj);
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (node == NULL) {
- i915_gem_object_unpin_pages(obj);
- return -ENOMEM;
+ BUG_ON(!i915_is_ggtt(vm));
+
+ vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
}
+ /* For now we only ever use 1 vma per object */
+ WARN_ON(!list_is_singular(&obj->vma_list));
+
search_free:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
+ ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
size, alignment,
- obj->cache_level, 0, gtt_max);
+ obj->cache_level, 0, gtt_max,
+ DRM_MM_SEARCH_DEFAULT);
if (ret) {
- ret = i915_gem_evict_something(dev, size, alignment,
+ ret = i915_gem_evict_something(dev, vm, size, alignment,
obj->cache_level,
map_and_fenceable,
nonblocking);
if (ret == 0)
goto search_free;
- i915_gem_object_unpin_pages(obj);
- kfree(node);
- return ret;
+ goto err_free_vma;
}
- if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return -EINVAL;
+ if (WARN_ON(!i915_gem_valid_gtt_space(dev, &vma->node,
+ obj->cache_level))) {
+ ret = -EINVAL;
+ goto err_remove_node;
}
ret = i915_gem_gtt_prepare_object(obj);
- if (ret) {
- i915_gem_object_unpin_pages(obj);
- drm_mm_put_block(node);
- return ret;
- }
+ if (ret)
+ goto err_remove_node;
list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&vma->mm_list, &vm->inactive_list);
- obj->gtt_space = node;
- obj->gtt_offset = node->start;
+ if (i915_is_ggtt(vm)) {
+ bool mappable, fenceable;
- fenceable =
- node->size == fence_size &&
- (node->start & (fence_alignment - 1)) == 0;
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
- mappable =
- obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
+ mappable = (vma->node.start + obj->base.size <=
+ dev_priv->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+ }
- obj->map_and_fenceable = mappable && fenceable;
+ WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
- trace_i915_gem_object_bind(obj, map_and_fenceable);
+ trace_i915_vma_bind(vma, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
+
+err_remove_node:
+ drm_mm_remove_node(&vma->node);
+err_free_vma:
+ i915_gem_vma_destroy(vma);
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+ return ret;
}
-void
-i915_gem_clflush_object(struct drm_i915_gem_object *obj)
+bool
+i915_gem_clflush_object(struct drm_i915_gem_object *obj,
+ bool force)
{
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (obj->pages == NULL)
- return;
+ return false;
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
- return;
+ return false;
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
@@ -3190,12 +3243,13 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
* snooping behaviour occurs naturally as the result of our domain
* tracking.
*/
- if (obj->cache_level != I915_CACHE_NONE)
- return;
+ if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
+ return false;
trace_i915_gem_object_clflush(obj);
-
drm_clflush_sg(obj->pages);
+
+ return true;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -3227,15 +3281,17 @@ i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
+ bool force)
{
uint32_t old_write_domain;
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
- i915_gem_clflush_object(obj);
- i915_gem_chipset_flush(obj->base.dev);
+ if (i915_gem_clflush_object(obj, force))
+ i915_gem_chipset_flush(obj->base.dev);
+
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3258,7 +3314,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
int ret;
/* Not valid to be called on unbound objects. */
- if (obj->gtt_space == NULL)
+ if (!i915_gem_obj_bound_any(obj))
return -EINVAL;
if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
@@ -3268,7 +3324,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
if (ret)
return ret;
- i915_gem_object_flush_cpu_write_domain(obj);
+ i915_gem_object_flush_cpu_write_domain(obj, false);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
@@ -3296,8 +3352,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
old_write_domain);
/* And bump the LRU for this access */
- if (i915_gem_object_is_inactive(obj))
- list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ if (i915_gem_object_is_inactive(obj)) {
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj,
+ &dev_priv->gtt.base);
+ if (vma)
+ list_move_tail(&vma->mm_list,
+ &dev_priv->gtt.base.inactive_list);
+
+ }
return 0;
}
@@ -3307,6 +3369,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma;
int ret;
if (obj->cache_level == cache_level)
@@ -3317,13 +3380,17 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
return -EBUSY;
}
- if (!i915_gem_valid_gtt_space(dev, obj->gtt_space, cache_level)) {
- ret = i915_gem_object_unbind(obj);
- if (ret)
- return ret;
+ list_for_each_entry(vma, &obj->vma_list, vma_link) {
+ if (!i915_gem_valid_gtt_space(dev, &vma->node, cache_level)) {
+ ret = i915_vma_unbind(vma);
+ if (ret)
+ return ret;
+
+ break;
+ }
}
- if (obj->gtt_space) {
+ if (i915_gem_obj_bound_any(obj)) {
ret = i915_gem_object_finish_gpu(obj);
if (ret)
return ret;
@@ -3345,11 +3412,13 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
if (obj->has_aliasing_ppgtt_mapping)
i915_ppgtt_bind_object(dev_priv->mm.aliasing_ppgtt,
obj, cache_level);
-
- obj->gtt_space->color = cache_level;
}
- if (cache_level == I915_CACHE_NONE) {
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ vma->node.color = cache_level;
+ obj->cache_level = cache_level;
+
+ if (cpu_write_needs_clflush(obj)) {
u32 old_read_domains, old_write_domain;
/* If we're coming from LLC cached, then we haven't
@@ -3359,7 +3428,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
* Just set it to the CPU cache for now.
*/
WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
- WARN_ON(obj->base.read_domains & ~I915_GEM_DOMAIN_CPU);
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@@ -3372,7 +3440,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
old_write_domain);
}
- obj->cache_level = cache_level;
i915_gem_verify_gtt(dev);
return 0;
}
@@ -3394,7 +3461,20 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
goto unlock;
}
- args->caching = obj->cache_level != I915_CACHE_NONE;
+ switch (obj->cache_level) {
+ case I915_CACHE_LLC:
+ case I915_CACHE_L3_LLC:
+ args->caching = I915_CACHING_CACHED;
+ break;
+
+ case I915_CACHE_WT:
+ args->caching = I915_CACHING_DISPLAY;
+ break;
+
+ default:
+ args->caching = I915_CACHING_NONE;
+ break;
+ }
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3417,6 +3497,9 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
case I915_CACHING_CACHED:
level = I915_CACHE_LLC;
break;
+ case I915_CACHING_DISPLAY:
+ level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
+ break;
default:
return -EINVAL;
}
@@ -3439,6 +3522,22 @@ unlock:
return ret;
}
+static bool is_pin_display(struct drm_i915_gem_object *obj)
+{
+ /* There are 3 sources that pin objects:
+ * 1. The display engine (scanouts, sprites, cursors);
+ * 2. Reservations for execbuffer;
+ * 3. The user.
+ *
+ * We can ignore reservations as we hold the struct_mutex and
+ * are only called outside of the reservation path. The user
+ * can only increment pin_count once, and so if after
+ * subtracting the potential reference by the user, any pin_count
+ * remains, it must be due to another use by the display engine.
+ */
+ return obj->pin_count - !!obj->user_pin_count;
+}
+
/*
* Prepare buffer for display plane (scanout, cursors, etc).
* Can be called from an uninterruptible phase (modesetting) and allows
@@ -3458,6 +3557,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
return ret;
}
+ /* Mark the pin_display early so that we account for the
+ * display coherency whilst setting up the cache domains.
+ */
+ obj->pin_display = true;
+
/* The display engine is not coherent with the LLC cache on gen6. As
* a result, we make sure that the pinning that is about to occur is
* done with uncached PTEs. This is lowest common denominator for all
@@ -3467,19 +3571,20 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
* of uncaching, which would allow us to flush all the LLC-cached data
* with that bit in the PTE to main memory with just one PIPE_CONTROL.
*/
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ ret = i915_gem_object_set_cache_level(obj,
+ HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
if (ret)
- return ret;
+ goto err_unpin_display;
/* As the user may map the buffer once pinned in the display plane
* (e.g. libkms for the bootup splash), we have to ensure that we
* always use map_and_fenceable for all scanout buffers.
*/
- ret = i915_gem_object_pin(obj, alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
if (ret)
- return ret;
+ goto err_unpin_display;
- i915_gem_object_flush_cpu_write_domain(obj);
+ i915_gem_object_flush_cpu_write_domain(obj, true);
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@@ -3495,6 +3600,17 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
old_write_domain);
return 0;
+
+err_unpin_display:
+ obj->pin_display = is_pin_display(obj);
+ return ret;
+}
+
+void
+i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
+{
+ i915_gem_object_unpin(obj);
+ obj->pin_display = is_pin_display(obj);
}
int
@@ -3540,7 +3656,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
/* Flush the CPU cache if it's still invalid. */
if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, false);
obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
@@ -3618,37 +3734,44 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
int
i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm,
uint32_t alignment,
bool map_and_fenceable,
bool nonblocking)
{
+ struct i915_vma *vma;
int ret;
if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
return -EBUSY;
- if (obj->gtt_space != NULL) {
- if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+
+ if (vma) {
+ if ((alignment &&
+ vma->node.start & (alignment - 1)) ||
(map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n",
- obj->gtt_offset, alignment,
+ i915_gem_obj_offset(obj, vm), alignment,
map_and_fenceable,
obj->map_and_fenceable);
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
if (ret)
return ret;
}
}
- if (obj->gtt_space == NULL) {
+ if (!i915_gem_obj_bound(obj, vm)) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
- ret = i915_gem_object_bind_to_gtt(obj, alignment,
- map_and_fenceable,
- nonblocking);
+ ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
+ map_and_fenceable,
+ nonblocking);
if (ret)
return ret;
@@ -3669,7 +3792,7 @@ void
i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pin_count == 0);
- BUG_ON(obj->gtt_space == NULL);
+ BUG_ON(!i915_gem_obj_bound_any(obj));
if (--obj->pin_count == 0)
obj->pin_mappable = false;
@@ -3707,7 +3830,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
}
if (obj->user_pin_count == 0) {
- ret = i915_gem_object_pin(obj, args->alignment, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
@@ -3715,11 +3838,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
obj->user_pin_count++;
obj->pin_filp = file;
- /* XXX - flush the CPU caches for pinned objects
- * as the X server doesn't manage domains yet
- */
- i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj->gtt_offset;
+ args->offset = i915_gem_obj_ggtt_offset(obj);
out:
drm_gem_object_unreference(&obj->base);
unlock:
@@ -3858,10 +3977,11 @@ unlock:
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops)
{
- INIT_LIST_HEAD(&obj->mm_list);
INIT_LIST_HEAD(&obj->global_list);
INIT_LIST_HEAD(&obj->ring_list);
INIT_LIST_HEAD(&obj->exec_list);
+ INIT_LIST_HEAD(&obj->obj_exec_link);
+ INIT_LIST_HEAD(&obj->vma_list);
obj->ops = ops;
@@ -3926,6 +4046,8 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
} else
obj->cache_level = I915_CACHE_NONE;
+ trace_i915_gem_object_create(obj);
+
return obj;
}
@@ -3941,6 +4063,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
trace_i915_gem_object_destroy(obj);
@@ -3948,15 +4071,21 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_detach_phys_object(dev, obj);
obj->pin_count = 0;
- if (WARN_ON(i915_gem_object_unbind(obj) == -ERESTARTSYS)) {
- bool was_interruptible;
+ /* NB: 0 or 1 elements */
+ WARN_ON(!list_empty(&obj->vma_list) &&
+ !list_is_singular(&obj->vma_list));
+ list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
+ int ret = i915_vma_unbind(vma);
+ if (WARN_ON(ret == -ERESTARTSYS)) {
+ bool was_interruptible;
- was_interruptible = dev_priv->mm.interruptible;
- dev_priv->mm.interruptible = false;
+ was_interruptible = dev_priv->mm.interruptible;
+ dev_priv->mm.interruptible = false;
- WARN_ON(i915_gem_object_unbind(obj));
+ WARN_ON(i915_vma_unbind(vma));
- dev_priv->mm.interruptible = was_interruptible;
+ dev_priv->mm.interruptible = was_interruptible;
+ }
}
/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
@@ -3982,15 +4111,42 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
i915_gem_object_free(obj);
}
+struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
+ if (vma == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&vma->vma_link);
+ INIT_LIST_HEAD(&vma->mm_list);
+ INIT_LIST_HEAD(&vma->exec_list);
+ vma->vm = vm;
+ vma->obj = obj;
+
+ /* Keep GGTT vmas first to make debug easier */
+ if (i915_is_ggtt(vm))
+ list_add(&vma->vma_link, &obj->vma_list);
+ else
+ list_add_tail(&vma->vma_link, &obj->vma_list);
+
+ return vma;
+}
+
+void i915_gem_vma_destroy(struct i915_vma *vma)
+{
+ WARN_ON(vma->node.allocated);
+ list_del(&vma->vma_link);
+ kfree(vma);
+}
+
int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- mutex_lock(&dev->struct_mutex);
-
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -4006,18 +4162,11 @@ i915_gem_idle(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- * And not confound mm.suspended!
- */
- dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
- mutex_unlock(&dev->struct_mutex);
-
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
@@ -4150,8 +4299,8 @@ i915_gem_init_hw(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
- if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
- I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
+ if (dev_priv->ellc_size)
+ I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
if (HAS_PCH_NOP(dev)) {
u32 temp = I915_READ(GEN7_MSG_CTL);
@@ -4227,7 +4376,7 @@ int
i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4239,7 +4388,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- dev_priv->mm.suspended = 0;
+ dev_priv->ums.mm_suspended = 0;
ret = i915_gem_init_hw(dev);
if (ret != 0) {
@@ -4247,7 +4396,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
return ret;
}
- BUG_ON(!list_empty(&dev_priv->mm.active_list));
+ BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4259,7 +4408,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
cleanup_ringbuffer:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
- dev_priv->mm.suspended = 1;
+ dev_priv->ums.mm_suspended = 1;
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4269,11 +4418,26 @@ int
i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
drm_irq_uninstall(dev);
- return i915_gem_idle(dev);
+
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_gem_idle(dev);
+
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound ums.mm_suspended!
+ */
+ if (ret != 0)
+ dev_priv->ums.mm_suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
void
@@ -4284,9 +4448,11 @@ i915_gem_lastclose(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ mutex_lock(&dev->struct_mutex);
ret = i915_gem_idle(dev);
if (ret)
DRM_ERROR("failed to idle hardware: %d\n", ret);
+ mutex_unlock(&dev->struct_mutex);
}
static void
@@ -4296,6 +4462,16 @@ init_ring_lists(struct intel_ring_buffer *ring)
INIT_LIST_HEAD(&ring->request_list);
}
+static void i915_init_vm(struct drm_i915_private *dev_priv,
+ struct i915_address_space *vm)
+{
+ vm->dev = dev_priv->dev;
+ INIT_LIST_HEAD(&vm->active_list);
+ INIT_LIST_HEAD(&vm->inactive_list);
+ INIT_LIST_HEAD(&vm->global_link);
+ list_add(&vm->global_link, &dev_priv->vm_list);
+}
+
void
i915_gem_load(struct drm_device *dev)
{
@@ -4308,8 +4484,9 @@ i915_gem_load(struct drm_device *dev)
SLAB_HWCACHE_ALIGN,
NULL);
- INIT_LIST_HEAD(&dev_priv->mm.active_list);
- INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+ INIT_LIST_HEAD(&dev_priv->vm_list);
+ i915_init_vm(dev_priv, &dev_priv->gtt.base);
+
INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
INIT_LIST_HEAD(&dev_priv->mm.bound_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4608,11 +4785,101 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list)
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if (obj->active)
+ continue;
+
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
cnt += obj->base.size >> PAGE_SHIFT;
+ }
if (unlock)
mutex_unlock(&dev->struct_mutex);
return cnt;
}
+
+/* All the new VM stuff */
+unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+
+ BUG_ON(list_empty(&o->vma_list));
+ list_for_each_entry(vma, &o->vma_list, vma_link) {
+ if (vma->vm == vm)
+ return vma->node.start;
+
+ }
+ return -1;
+}
+
+bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
+ return true;
+
+ return false;
+}
+
+bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_address_space *vm;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ if (i915_gem_obj_bound(o, vm))
+ return true;
+
+ return false;
+}
+
+unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
+ struct i915_address_space *vm)
+{
+ struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+ struct i915_vma *vma;
+
+ if (vm == &dev_priv->mm.aliasing_ppgtt->base)
+ vm = &dev_priv->gtt.base;
+
+ BUG_ON(list_empty(&o->vma_list));
+
+ list_for_each_entry(vma, &o->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma->node.size;
+
+ return 0;
+}
+
+struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
+ if (vma->vm == vm)
+ return vma;
+
+ return NULL;
+}
+
+struct i915_vma *
+i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
+ struct i915_address_space *vm)
+{
+ struct i915_vma *vma;
+
+ vma = i915_gem_obj_to_vma(obj, vm);
+ if (!vma)
+ vma = i915_gem_vma_create(obj, vm);
+
+ return vma;
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 51b7a2171ca..403309c2a7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -155,7 +155,7 @@ create_hw_context(struct drm_device *dev,
if (INTEL_INFO(dev)->gen >= 7) {
ret = i915_gem_object_set_cache_level(ctx->obj,
- I915_CACHE_LLC_MLC);
+ I915_CACHE_L3_LLC);
/* Failure shouldn't ever happen this early */
if (WARN_ON(ret))
goto err_out;
@@ -214,7 +214,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
* default context.
*/
dev_priv->ring[RCS].default_context = ctx;
- ret = i915_gem_object_pin(ctx->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_obj_ggtt_pin(ctx->obj, CONTEXT_ALIGN, false, false);
if (ret) {
DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
goto err_destroy;
@@ -304,31 +304,24 @@ static int context_idr_cleanup(int id, void *p, void *data)
}
struct i915_ctx_hang_stats *
-i915_gem_context_get_hang_stats(struct intel_ring_buffer *ring,
+i915_gem_context_get_hang_stats(struct drm_device *dev,
struct drm_file *file,
u32 id)
{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_file_private *file_priv = file->driver_priv;
- struct i915_hw_context *to;
-
- if (dev_priv->hw_contexts_disabled)
- return ERR_PTR(-ENOENT);
-
- if (ring->id != RCS)
- return ERR_PTR(-EINVAL);
-
- if (file == NULL)
- return ERR_PTR(-EINVAL);
+ struct i915_hw_context *ctx;
if (id == DEFAULT_CONTEXT_ID)
return &file_priv->hang_stats;
- to = i915_gem_context_get(file->driver_priv, id);
- if (to == NULL)
+ ctx = NULL;
+ if (!dev_priv->hw_contexts_disabled)
+ ctx = i915_gem_context_get(file->driver_priv, id);
+ if (ctx == NULL)
return ERR_PTR(-ENOENT);
- return &to->hang_stats;
+ return &ctx->hang_stats;
}
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
@@ -377,7 +370,7 @@ mi_set_context(struct intel_ring_buffer *ring,
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, new_context->obj->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(new_context->obj) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -407,7 +400,7 @@ static int do_switch(struct i915_hw_context *to)
if (from == to)
return 0;
- ret = i915_gem_object_pin(to->obj, CONTEXT_ALIGN, false, false);
+ ret = i915_gem_obj_ggtt_pin(to->obj, CONTEXT_ALIGN, false, false);
if (ret)
return ret;
@@ -443,7 +436,10 @@ static int do_switch(struct i915_hw_context *to)
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from != NULL) {
+ struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+ list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list);
i915_gem_object_move_to_active(from->obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3da..775d506b320 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
}
}
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+ list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
@@ -115,73 +115,4 @@ i915_verify_lists(struct drm_device *dev)
return warned = err;
}
-#endif /* WATCH_INACTIVE */
-
-#if WATCH_COHERENCY
-void
-i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
-{
- struct drm_device *dev = obj->base.dev;
- int page;
- uint32_t *gtt_mapping;
- uint32_t *backing_map = NULL;
- int bad_count = 0;
-
- DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj->gtt_offset, handle,
- obj->size / 1024);
-
- gtt_mapping = ioremap(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
- obj->base.size);
- if (gtt_mapping == NULL) {
- DRM_ERROR("failed to map GTT space\n");
- return;
- }
-
- for (page = 0; page < obj->size / PAGE_SIZE; page++) {
- int i;
-
- backing_map = kmap_atomic(obj->pages[page]);
-
- if (backing_map == NULL) {
- DRM_ERROR("failed to map backing page\n");
- goto out;
- }
-
- for (i = 0; i < PAGE_SIZE / 4; i++) {
- uint32_t cpuval = backing_map[i];
- uint32_t gttval = readl(gtt_mapping +
- page * 1024 + i);
-
- if (cpuval != gttval) {
- DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
- "0x%08x vs 0x%08x\n",
- (int)(obj->gtt_offset +
- page * PAGE_SIZE + i * 4),
- cpuval, gttval);
- if (bad_count++ >= 8) {
- DRM_INFO("...\n");
- goto out;
- }
- }
- }
- kunmap_atomic(backing_map);
- backing_map = NULL;
- }
-
- out:
- if (backing_map != NULL)
- kunmap_atomic(backing_map);
- iounmap(gtt_mapping);
-
- /* give syslog time to catch up */
- msleep(1);
-
- /* Directly flush the object, since we just loaded values with the CPU
- * from the backing pages and we don't want to disturb the cache
- * management that we're trying to observe.
- */
-
- i915_gem_clflush_object(obj);
-}
-#endif
+#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index dc53a527126..e918b05fcbd 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -27,10 +27,15 @@
#include "i915_drv.h"
#include <linux/dma-buf.h>
+static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
+{
+ return to_intel_bo(buf->priv);
+}
+
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction dir)
{
- struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
struct sg_table *st;
struct scatterlist *src, *dst;
int ret, i;
@@ -85,25 +90,22 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
struct sg_table *sg,
enum dma_data_direction dir)
{
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
+
+ mutex_lock(&obj->base.dev->struct_mutex);
+
dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
sg_free_table(sg);
kfree(sg);
-}
-static void i915_gem_dmabuf_release(struct dma_buf *dma_buf)
-{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ i915_gem_object_unpin_pages(obj);
- if (obj->base.export_dma_buf == dma_buf) {
- /* drop the reference on the export fd holds */
- obj->base.export_dma_buf = NULL;
- drm_gem_object_unreference_unlocked(&obj->base);
- }
+ mutex_unlock(&obj->base.dev->struct_mutex);
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
struct sg_page_iter sg_iter;
struct page **pages;
@@ -151,7 +153,7 @@ error:
static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
@@ -194,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
{
- struct drm_i915_gem_object *obj = dma_buf->priv;
+ struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
struct drm_device *dev = obj->base.dev;
int ret;
bool write = (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE);
@@ -211,7 +213,7 @@ static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size
static const struct dma_buf_ops i915_dmabuf_ops = {
.map_dma_buf = i915_gem_map_dma_buf,
.unmap_dma_buf = i915_gem_unmap_dma_buf,
- .release = i915_gem_dmabuf_release,
+ .release = drm_gem_dmabuf_release,
.kmap = i915_gem_dmabuf_kmap,
.kmap_atomic = i915_gem_dmabuf_kmap_atomic,
.kunmap = i915_gem_dmabuf_kunmap,
@@ -225,9 +227,7 @@ static const struct dma_buf_ops i915_dmabuf_ops = {
struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gem_obj, int flags)
{
- struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
-
- return dma_buf_export(obj, &i915_dmabuf_ops, obj->base.size, flags);
+ return dma_buf_export(gem_obj, &i915_dmabuf_ops, gem_obj->size, flags);
}
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
@@ -264,7 +264,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
/* is this one of own objects? */
if (dma_buf->ops == &i915_dmabuf_ops) {
- obj = dma_buf->priv;
+ obj = dma_buf_to_obj(dma_buf);
/* is it from our device? */
if (obj->base.dev == dev) {
/*
@@ -289,12 +289,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
goto fail_detach;
}
- ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- if (ret) {
- i915_gem_object_free(obj);
- goto fail_detach;
- }
-
+ drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
obj->base.import_attach = attach;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index c86d5d9356f..91b70015585 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,23 +32,23 @@
#include "i915_trace.h"
static bool
-mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
+mark_free(struct i915_vma *vma, struct list_head *unwind)
{
- if (obj->pin_count)
+ if (vma->obj->pin_count)
return false;
- list_add(&obj->exec_list, unwind);
- return drm_mm_scan_add_block(obj->gtt_space);
+ list_add(&vma->exec_list, unwind);
+ return drm_mm_scan_add_block(&vma->node);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size,
- unsigned alignment, unsigned cache_level,
+i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+ int min_size, unsigned alignment, unsigned cache_level,
bool mappable, bool nonblocking)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
int ret = 0;
trace_i915_gem_evict(dev, min_size, alignment, mappable);
@@ -77,17 +77,17 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
*/
INIT_LIST_HEAD(&unwind_list);
- if (mappable)
- drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level,
- 0, dev_priv->gtt.mappable_end);
- else
- drm_mm_init_scan(&dev_priv->mm.gtt_space,
- min_size, alignment, cache_level);
+ if (mappable) {
+ BUG_ON(!i915_is_ggtt(vm));
+ drm_mm_init_scan_with_range(&vm->mm, min_size,
+ alignment, cache_level, 0,
+ dev_priv->gtt.mappable_end);
+ } else
+ drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(vma, &vm->inactive_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
goto found;
}
@@ -95,22 +95,21 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
goto none;
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (mark_free(obj, &unwind_list))
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ if (mark_free(vma, &unwind_list))
goto found;
}
none:
/* Nothing found, clean up and bail out! */
while (!list_empty(&unwind_list)) {
- obj = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
exec_list);
-
- ret = drm_mm_scan_remove_block(obj->gtt_space);
+ ret = drm_mm_scan_remove_block(&vma->node);
BUG_ON(ret);
- list_del_init(&obj->exec_list);
+ list_del_init(&vma->exec_list);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -124,27 +123,30 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
+ vma = list_first_entry(&unwind_list,
+ struct i915_vma,
exec_list);
- if (drm_mm_scan_remove_block(obj->gtt_space)) {
- list_move(&obj->exec_list, &eviction_list);
- drm_gem_object_reference(&obj->base);
+ if (drm_mm_scan_remove_block(&vma->node)) {
+ list_move(&vma->exec_list, &eviction_list);
+ drm_gem_object_reference(&vma->obj->base);
continue;
}
- list_del_init(&obj->exec_list);
+ list_del_init(&vma->exec_list);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
+ struct drm_gem_object *obj;
+ vma = list_first_entry(&eviction_list,
+ struct i915_vma,
exec_list);
+
+ obj = &vma->obj->base;
+ list_del_init(&vma->exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(vma);
- list_del_init(&obj->exec_list);
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(obj);
}
return ret;
@@ -154,12 +156,18 @@ int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj, *next;
- bool lists_empty;
+ struct i915_address_space *vm;
+ struct i915_vma *vma, *next;
+ bool lists_empty = true;
int ret;
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.active_list));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ lists_empty = (list_empty(&vm->inactive_list) &&
+ list_empty(&vm->active_list));
+ if (!lists_empty)
+ lists_empty = false;
+ }
+
if (lists_empty)
return -ENOSPC;
@@ -176,10 +184,11 @@ i915_gem_evict_everything(struct drm_device *dev)
i915_gem_retire_requests(dev);
/* Having flushed everything, unbind() should never raise an error */
- list_for_each_entry_safe(obj, next,
- &dev_priv->mm.inactive_list, mm_list)
- if (obj->pin_count == 0)
- WARN_ON(i915_gem_object_unbind(obj));
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
+ if (vma->obj->pin_count == 0)
+ WARN_ON(i915_vma_unbind(vma));
+ }
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 87a3227e517..792c52a235e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -172,9 +172,60 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
}
static int
+relocate_entry_cpu(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ uint32_t page_offset = offset_in_page(reloc->offset);
+ char *vaddr;
+ int ret = -EINVAL;
+
+ ret = i915_gem_object_set_to_cpu_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ vaddr = kmap_atomic(i915_gem_object_get_page(obj,
+ reloc->offset >> PAGE_SHIFT));
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+
+ return 0;
+}
+
+static int
+relocate_entry_gtt(struct drm_i915_gem_object *obj,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+ int ret = -EINVAL;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ return ret;
+
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += i915_gem_obj_ggtt_offset(obj);
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + offset_in_page(reloc->offset));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+
+ return 0;
+}
+
+static int
i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *reloc)
+ struct drm_i915_gem_relocation_entry *reloc,
+ struct i915_address_space *vm)
{
struct drm_device *dev = obj->base.dev;
struct drm_gem_object *target_obj;
@@ -188,7 +239,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -ENOENT;
target_i915_obj = to_intel_bo(target_obj);
- target_offset = target_i915_obj->gtt_offset;
+ target_offset = i915_gem_obj_ggtt_offset(target_i915_obj);
/* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
* pipe_control writes because the gpu doesn't properly redirect them
@@ -254,40 +305,10 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
return -EFAULT;
reloc->delta += target_offset;
- if (use_cpu_reloc(obj)) {
- uint32_t page_offset = reloc->offset & ~PAGE_MASK;
- char *vaddr;
-
- ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret)
- return ret;
-
- vaddr = kmap_atomic(i915_gem_object_get_page(obj,
- reloc->offset >> PAGE_SHIFT));
- *(uint32_t *)(vaddr + page_offset) = reloc->delta;
- kunmap_atomic(vaddr);
- } else {
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(obj, true);
- if (ret)
- return ret;
-
- ret = i915_gem_object_put_fence(obj);
- if (ret)
- return ret;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc->offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc->offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc->offset & ~PAGE_MASK));
- iowrite32(reloc->delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
+ if (use_cpu_reloc(obj))
+ ret = relocate_entry_cpu(obj, reloc);
+ else
+ ret = relocate_entry_gtt(obj, reloc);
/* and update the user's relocation entry */
reloc->presumed_offset = target_offset;
@@ -297,7 +318,8 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
- struct eb_objects *eb)
+ struct eb_objects *eb,
+ struct i915_address_space *vm)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
@@ -321,7 +343,8 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
do {
u64 offset = r->presumed_offset;
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, r);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, r,
+ vm);
if (ret)
return ret;
@@ -344,13 +367,15 @@ i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
struct eb_objects *eb,
- struct drm_i915_gem_relocation_entry *relocs)
+ struct drm_i915_gem_relocation_entry *relocs,
+ struct i915_address_space *vm)
{
const struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
int i, ret;
for (i = 0; i < entry->relocation_count; i++) {
- ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i]);
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, &relocs[i],
+ vm);
if (ret)
return ret;
}
@@ -359,7 +384,8 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
}
static int
-i915_gem_execbuffer_relocate(struct eb_objects *eb)
+i915_gem_execbuffer_relocate(struct eb_objects *eb,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@@ -373,7 +399,7 @@ i915_gem_execbuffer_relocate(struct eb_objects *eb)
*/
pagefault_disable();
list_for_each_entry(obj, &eb->objects, exec_list) {
- ret = i915_gem_execbuffer_relocate_object(obj, eb);
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, vm);
if (ret)
break;
}
@@ -395,6 +421,7 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
+ struct i915_address_space *vm,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@@ -409,7 +436,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- ret = i915_gem_object_pin(obj, entry->alignment, need_mappable, false);
+ ret = i915_gem_object_pin(obj, vm, entry->alignment, need_mappable,
+ false);
if (ret)
return ret;
@@ -436,8 +464,8 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
- if (entry->offset != obj->gtt_offset) {
- entry->offset = obj->gtt_offset;
+ if (entry->offset != i915_gem_obj_offset(obj, vm)) {
+ entry->offset = i915_gem_obj_offset(obj, vm);
*need_reloc = true;
}
@@ -458,7 +486,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
{
struct drm_i915_gem_exec_object2 *entry;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_bound_any(obj))
return;
entry = obj->exec_entry;
@@ -475,6 +503,7 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct list_head *objects,
+ struct i915_address_space *vm,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
@@ -529,31 +558,37 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
list_for_each_entry(obj, objects, exec_list) {
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
bool need_fence, need_mappable;
+ u32 obj_offset;
- if (!obj->gtt_space)
+ if (!i915_gem_obj_bound(obj, vm))
continue;
+ obj_offset = i915_gem_obj_offset(obj, vm);
need_fence =
has_fenced_gpu_access &&
entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE;
need_mappable = need_fence || need_reloc_mappable(obj);
- if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ WARN_ON((need_mappable || need_fence) &&
+ !i915_is_ggtt(vm));
+
+ if ((entry->alignment &&
+ obj_offset & (entry->alignment - 1)) ||
(need_mappable && !obj->map_and_fenceable))
- ret = i915_gem_object_unbind(obj);
+ ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
else
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
/* Bind fresh objects */
list_for_each_entry(obj, objects, exec_list) {
- if (obj->gtt_space)
+ if (i915_gem_obj_bound(obj, vm))
continue;
- ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
+ ret = i915_gem_execbuffer_reserve_object(obj, ring, vm, need_relocs);
if (ret)
goto err;
}
@@ -577,7 +612,8 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct eb_objects *eb,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
@@ -661,14 +697,15 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
- reloc + reloc_offset[offset]);
+ reloc + reloc_offset[offset],
+ vm);
if (ret)
goto err;
}
@@ -691,6 +728,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
+ bool flush_chipset = false;
int ret;
list_for_each_entry(obj, objects, exec_list) {
@@ -699,12 +737,12 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
return ret;
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
+ flush_chipset |= i915_gem_clflush_object(obj, false);
flush_domains |= obj->base.write_domain;
}
- if (flush_domains & I915_GEM_DOMAIN_CPU)
+ if (flush_chipset)
i915_gem_chipset_flush(ring->dev);
if (flush_domains & I915_GEM_DOMAIN_GTT)
@@ -758,8 +796,10 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
- if (fault_in_multipages_readable(ptr, length))
- return -EFAULT;
+ if (likely(!i915_prefault_disable)) {
+ if (fault_in_multipages_readable(ptr, length))
+ return -EFAULT;
+ }
}
return 0;
@@ -767,6 +807,7 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
static void
i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct i915_address_space *vm,
struct intel_ring_buffer *ring)
{
struct drm_i915_gem_object *obj;
@@ -781,6 +822,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+ /* FIXME: This lookup gets fixed later <-- danvet */
+ list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list);
i915_gem_object_move_to_active(obj, ring);
if (obj->base.write_domain) {
obj->dirty = 1;
@@ -835,7 +878,8 @@ static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec)
+ struct drm_i915_gem_exec_object2 *exec,
+ struct i915_address_space *vm)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct eb_objects *eb;
@@ -872,7 +916,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BSD:
ring = &dev_priv->ring[VCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -880,7 +924,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_BLT:
ring = &dev_priv->ring[BCS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -888,7 +932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
break;
case I915_EXEC_VEBOX:
ring = &dev_priv->ring[VECS];
- if (ctx_id != 0) {
+ if (ctx_id != DEFAULT_CONTEXT_ID) {
DRM_DEBUG("Ring %s doesn't support contexts\n",
ring->name);
return -EPERM;
@@ -972,7 +1016,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ret)
goto pre_mutex_err;
- if (dev_priv->mm.suspended) {
+ if (dev_priv->ums.mm_suspended) {
mutex_unlock(&dev->struct_mutex);
ret = -EBUSY;
goto pre_mutex_err;
@@ -997,17 +1041,17 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->objects, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, &eb->objects, vm, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
if (need_relocs)
- ret = i915_gem_execbuffer_relocate(eb);
+ ret = i915_gem_execbuffer_relocate(eb, vm);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
- eb, exec);
+ eb, exec, vm);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@@ -1058,7 +1102,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_start = i915_gem_obj_offset(batch_obj, vm) +
+ args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
for (i = 0; i < args->num_cliprects; i++) {
@@ -1083,7 +1128,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
- i915_gem_execbuffer_move_to_active(&eb->objects, ring);
+ i915_gem_execbuffer_move_to_active(&eb->objects, vm, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
err:
@@ -1104,6 +1149,7 @@ int
i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -1159,7 +1205,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.flags = I915_EXEC_RENDER;
i915_execbuffer2_set_context_id(exec2, 0);
- ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++)
@@ -1185,6 +1232,7 @@ int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL;
int ret;
@@ -1215,7 +1263,8 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT;
}
- ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list,
+ &dev_priv->gtt.base);
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user(to_user_ptr(args->buffers_ptr),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 5101ab6869b..212f6d8c35e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -28,8 +28,12 @@
#include "i915_trace.h"
#include "intel_drv.h"
+#define GEN6_PPGTT_PD_ENTRIES 512
+#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))
+
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
+#define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
@@ -39,19 +43,50 @@
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
-#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
+#define GEN7_PTE_CACHE_L3_LLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
+#define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
-static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
- enum i915_cache_level level)
+/* Cacheability Control is a 4-bit value. The low three bits are stored in *
+ * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
+ */
+#define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
+ (((bits) & 0x8) << (11 - 3)))
+#define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
+#define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
+#define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
+#define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
+
+static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
switch (level) {
- case I915_CACHE_LLC_MLC:
- pte |= GEN6_PTE_CACHE_LLC_MLC;
+ case I915_CACHE_L3_LLC:
+ case I915_CACHE_LLC:
+ pte |= GEN6_PTE_CACHE_LLC;
+ break;
+ case I915_CACHE_NONE:
+ pte |= GEN6_PTE_UNCACHED;
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return pte;
+}
+
+static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= GEN6_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_L3_LLC:
+ pte |= GEN7_PTE_CACHE_L3_LLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
@@ -60,7 +95,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
pte |= GEN6_PTE_UNCACHED;
break;
default:
- BUG();
+ WARN_ON(1);
}
return pte;
@@ -69,8 +104,7 @@ static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
#define BYT_PTE_WRITEABLE (1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
-static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
@@ -87,22 +121,41 @@ static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
return pte;
}
-static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
- dma_addr_t addr,
+static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
enum i915_cache_level level)
{
gen6_gtt_pte_t pte = GEN6_PTE_VALID;
- pte |= GEN6_PTE_ADDR_ENCODE(addr);
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
if (level != I915_CACHE_NONE)
- pte |= GEN6_PTE_CACHE_LLC;
+ pte |= HSW_WB_LLC_AGE3;
+
+ return pte;
+}
+
+static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level)
+{
+ gen6_gtt_pte_t pte = GEN6_PTE_VALID;
+ pte |= HSW_PTE_ADDR_ENCODE(addr);
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ break;
+ case I915_CACHE_WT:
+ pte |= HSW_WT_ELLC_LLC_AGE0;
+ break;
+ default:
+ pte |= HSW_WB_ELLC_LLC_AGE0;
+ break;
+ }
return pte;
}
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_i915_private *dev_priv = ppgtt->dev->dev_private;
+ struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
gen6_gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
@@ -181,18 +234,18 @@ static int gen6_ppgtt_enable(struct drm_device *dev)
}
/* PPGTT support for Sandybdrige/Gen6 and later */
-static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
unsigned first_entry,
unsigned num_entries)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr, scratch_pte;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
- scratch_pte = ppgtt->pte_encode(ppgtt->dev,
- ppgtt->scratch_page_dma_addr,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
while (num_entries) {
last_pte = first_pte + num_entries;
@@ -212,11 +265,13 @@ static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
-static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
+static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
gen6_gtt_pte_t *pt_vaddr;
unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
@@ -227,8 +282,7 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
dma_addr_t page_addr;
page_addr = sg_page_iter_dma_address(&sg_iter);
- pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
- cache_level);
+ pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level);
if (++act_pte == I915_PPGTT_PT_ENTRIES) {
kunmap_atomic(pt_vaddr);
act_pt++;
@@ -240,13 +294,17 @@ static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
kunmap_atomic(pt_vaddr);
}
-static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
+static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
+ struct i915_hw_ppgtt *ppgtt =
+ container_of(vm, struct i915_hw_ppgtt, base);
int i;
+ drm_mm_takedown(&ppgtt->base.mm);
+
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
- pci_unmap_page(ppgtt->dev->pdev,
+ pci_unmap_page(ppgtt->base.dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
@@ -260,7 +318,7 @@ static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
- struct drm_device *dev = ppgtt->dev;
+ struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
@@ -271,18 +329,13 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
* now. */
first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
- if (IS_HASWELL(dev)) {
- ppgtt->pte_encode = hsw_pte_encode;
- } else if (IS_VALLEYVIEW(dev)) {
- ppgtt->pte_encode = byt_pte_encode;
- } else {
- ppgtt->pte_encode = gen6_pte_encode;
- }
- ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
+ ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
ppgtt->enable = gen6_ppgtt_enable;
- ppgtt->clear_range = gen6_ppgtt_clear_range;
- ppgtt->insert_entries = gen6_ppgtt_insert_entries;
- ppgtt->cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.clear_range = gen6_ppgtt_clear_range;
+ ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
+ ppgtt->base.cleanup = gen6_ppgtt_cleanup;
+ ppgtt->base.scratch = dev_priv->gtt.base.scratch;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
@@ -313,8 +366,8 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pt_dma_addr[i] = pt_addr;
}
- ppgtt->clear_range(ppgtt, 0,
- ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
+ ppgtt->base.clear_range(&ppgtt->base, 0,
+ ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
@@ -347,8 +400,7 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return -ENOMEM;
- ppgtt->dev = dev;
- ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
+ ppgtt->base.dev = dev;
if (INTEL_INFO(dev)->gen < 8)
ret = gen6_ppgtt_init(ppgtt);
@@ -357,8 +409,11 @@ static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
if (ret)
kfree(ppgtt);
- else
+ else {
dev_priv->mm.aliasing_ppgtt = ppgtt;
+ drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
+ ppgtt->base.total);
+ }
return ret;
}
@@ -371,7 +426,7 @@ void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
if (!ppgtt)
return;
- ppgtt->cleanup(ppgtt);
+ ppgtt->base.cleanup(&ppgtt->base);
dev_priv->mm.aliasing_ppgtt = NULL;
}
@@ -379,17 +434,17 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
- ppgtt->insert_entries(ppgtt, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ cache_level);
}
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
- ppgtt->clear_range(ppgtt,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ ppgtt->base.clear_range(&ppgtt->base,
+ i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
}
extern int intel_iommu_gfx_mapped;
@@ -436,11 +491,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
- dev_priv->gtt.total / PAGE_SIZE);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ dev_priv->gtt.base.start / PAGE_SIZE,
+ dev_priv->gtt.base.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- i915_gem_clflush_object(obj);
+ i915_gem_clflush_object(obj, obj->pin_display);
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
@@ -466,12 +522,12 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
-static void gen6_ggtt_insert_entries(struct drm_device *dev,
+static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t __iomem *gtt_entries =
(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int i = 0;
@@ -480,8 +536,7 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
addr = sg_page_iter_dma_address(&sg_iter);
- iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
- &gtt_entries[i]);
+ iowrite32(vm->pte_encode(addr, level), &gtt_entries[i]);
i++;
}
@@ -492,8 +547,8 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
- WARN_ON(readl(&gtt_entries[i-1])
- != dev_priv->gtt.pte_encode(dev, addr, level));
+ WARN_ON(readl(&gtt_entries[i-1]) !=
+ vm->pte_encode(addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@@ -503,11 +558,11 @@ static void gen6_ggtt_insert_entries(struct drm_device *dev,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
-static void gen6_ggtt_clear_range(struct drm_device *dev,
+static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = vm->dev->dev_private;
gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
@@ -518,16 +573,14 @@ static void gen6_ggtt_clear_range(struct drm_device *dev,
first_entry, num_entries, max_entries))
num_entries = max_entries;
- scratch_pte = dev_priv->gtt.pte_encode(dev,
- dev_priv->gtt.scratch_page_dma,
- I915_CACHE_LLC);
+ scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
-static void i915_ggtt_insert_entries(struct drm_device *dev,
+static void i915_ggtt_insert_entries(struct i915_address_space *vm,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
@@ -539,7 +592,7 @@ static void i915_ggtt_insert_entries(struct drm_device *dev,
}
-static void i915_ggtt_clear_range(struct drm_device *dev,
+static void i915_ggtt_clear_range(struct i915_address_space *vm,
unsigned int first_entry,
unsigned int num_entries)
{
@@ -552,10 +605,11 @@ void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
- obj->gtt_space->start >> PAGE_SHIFT,
- cache_level);
+ dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
+ entry,
+ cache_level);
obj->has_global_gtt_mapping = 1;
}
@@ -564,10 +618,11 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
- dev_priv->gtt.gtt_clear_range(obj->base.dev,
- obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
+ dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
+ entry,
+ obj->base.size >> PAGE_SHIFT);
obj->has_global_gtt_mapping = 0;
}
@@ -618,7 +673,8 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -626,37 +682,38 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
- drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+ drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
- dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
+ dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
- obj->gtt_offset, obj->base.size);
-
- BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- obj->gtt_offset,
- obj->base.size,
- false);
+ struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
+ int ret;
+ DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
+ i915_gem_obj_ggtt_offset(obj), obj->base.size);
+
+ WARN_ON(i915_gem_obj_ggtt_bound(obj));
+ ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
+ if (ret)
+ DRM_DEBUG_KMS("Reservation failed\n");
obj->has_global_gtt_mapping = 1;
+ list_add(&vma->vma_link, &obj->vma_list);
}
- dev_priv->gtt.start = start;
- dev_priv->gtt.total = end - start;
+ dev_priv->gtt.base.start = start;
+ dev_priv->gtt.base.total = end - start;
/* Clear any non-preallocated blocks */
- drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
- hole_start, hole_end) {
+ drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
+ const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
- dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
- (hole_end-hole_start) / PAGE_SIZE);
+ ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count);
}
/* And finally clear the reserved guard page */
- dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+ ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1);
}
static bool
@@ -679,7 +736,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
- gtt_size = dev_priv->gtt.total;
+ gtt_size = dev_priv->gtt.base.total;
mappable_size = dev_priv->gtt.mappable_end;
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
@@ -688,7 +745,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
if (INTEL_INFO(dev)->gen <= 7) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
- gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
@@ -698,8 +755,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
return;
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
- drm_mm_takedown(&dev_priv->mm.gtt_space);
- gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
+ drm_mm_takedown(&dev_priv->gtt.base.mm);
+ gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -724,8 +781,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
- dev_priv->gtt.scratch_page = page;
- dev_priv->gtt.scratch_page_dma = dma_addr;
+ dev_priv->gtt.base.scratch.page = page;
+ dev_priv->gtt.base.scratch.addr = dma_addr;
return 0;
}
@@ -733,11 +790,13 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- set_pages_wb(dev_priv->gtt.scratch_page, 1);
- pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
+ struct page *page = dev_priv->gtt.base.scratch.page;
+
+ set_pages_wb(page, 1);
+ pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- put_page(dev_priv->gtt.scratch_page);
- __free_page(dev_priv->gtt.scratch_page);
+ put_page(page);
+ __free_page(page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -800,17 +859,18 @@ static int gen6_gmch_probe(struct drm_device *dev,
if (ret)
DRM_ERROR("Scratch setup failed\n");
- dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
return ret;
}
-static void gen6_gmch_remove(struct drm_device *dev)
+static void gen6_gmch_remove(struct i915_address_space *vm)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- iounmap(dev_priv->gtt.gsm);
- teardown_scratch_page(dev_priv->dev);
+
+ struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
+ iounmap(gtt->gsm);
+ teardown_scratch_page(vm->dev);
}
static int i915_gmch_probe(struct drm_device *dev,
@@ -831,13 +891,13 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+ dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
return 0;
}
-static void i915_gmch_remove(struct drm_device *dev)
+static void i915_gmch_remove(struct i915_address_space *vm)
{
intel_gmch_remove();
}
@@ -849,34 +909,35 @@ int i915_gem_gtt_init(struct drm_device *dev)
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- dev_priv->gtt.gtt_probe = i915_gmch_probe;
- dev_priv->gtt.gtt_remove = i915_gmch_remove;
+ gtt->gtt_probe = i915_gmch_probe;
+ gtt->base.cleanup = i915_gmch_remove;
} else {
- dev_priv->gtt.gtt_probe = gen6_gmch_probe;
- dev_priv->gtt.gtt_remove = gen6_gmch_remove;
- if (IS_HASWELL(dev)) {
- dev_priv->gtt.pte_encode = hsw_pte_encode;
- } else if (IS_VALLEYVIEW(dev)) {
- dev_priv->gtt.pte_encode = byt_pte_encode;
- } else {
- dev_priv->gtt.pte_encode = gen6_pte_encode;
- }
+ gtt->gtt_probe = gen6_gmch_probe;
+ gtt->base.cleanup = gen6_gmch_remove;
+ if (IS_HASWELL(dev) && dev_priv->ellc_size)
+ gtt->base.pte_encode = iris_pte_encode;
+ else if (IS_HASWELL(dev))
+ gtt->base.pte_encode = hsw_pte_encode;
+ else if (IS_VALLEYVIEW(dev))
+ gtt->base.pte_encode = byt_pte_encode;
+ else if (INTEL_INFO(dev)->gen >= 7)
+ gtt->base.pte_encode = ivb_pte_encode;
+ else
+ gtt->base.pte_encode = snb_pte_encode;
}
- ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
- &dev_priv->gtt.stolen_size,
- &gtt->mappable_base,
- &gtt->mappable_end);
+ ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
+ &gtt->mappable_base, &gtt->mappable_end);
if (ret)
return ret;
+ gtt->base.dev = dev;
+
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
- dev_priv->gtt.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
- dev_priv->gtt.mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
- dev_priv->gtt.stolen_size >> 20);
+ gtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
return 0;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 982d4732cec..9969d10b80f 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -45,49 +45,48 @@
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pdev = dev_priv->bridge_dev;
+ struct resource *r;
u32 base;
- /* On the machines I have tested the Graphics Base of Stolen Memory
- * is unreliable, so on those compute the base by subtracting the
- * stolen memory from the Top of Low Usable DRAM which is where the
- * BIOS places the graphics stolen memory.
+ /* Almost universally we can find the Graphics Base of Stolen Memory
+ * at offset 0x5c in the igfx configuration space. On a few (desktop)
+ * machines this is also mirrored in the bridge device at different
+ * locations, or in the MCHBAR. On gen2, the layout is again slightly
+ * different with the Graphics Segment immediately following Top of
+ * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
+ * reported by 865g, so we just use the top of memory as determined
+ * by the e820 probe.
*
- * On gen2, the layout is slightly different with the Graphics Segment
- * immediately following Top of Memory (or Top of Usable DRAM). Note
- * it appears that TOUD is only reported by 865g, so we just use the
- * top of memory as determined by the e820 probe.
- *
- * XXX gen2 requires an unavailable symbol and 945gm fails with
- * its value of TOLUD.
+ * XXX However gen2 requires an unavailable symbol.
*/
base = 0;
- if (IS_VALLEYVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen >= 3) {
+ /* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(dev->pdev, 0x5c, &base);
base &= ~((1<<20) - 1);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- /* Read Base Data of Stolen Memory Register (BDSM) directly.
- * Note that there is also a MCHBAR miror at 0x1080c0 or
- * we could use device 2:0x5c instead.
- */
- pci_read_config_dword(pdev, 0xB0, &base);
- base &= ~4095; /* lower bits used for locking register */
- } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
- /* Read Graphics Base of Stolen Memory directly */
- pci_read_config_dword(pdev, 0xA4, &base);
+ } else { /* GEN2 */
#if 0
- } else if (IS_GEN3(dev)) {
- u8 val;
- /* Stolen is immediately below Top of Low Usable DRAM */
- pci_read_config_byte(pdev, 0x9c, &val);
- base = val >> 3 << 27;
- base -= dev_priv->mm.gtt->stolen_size;
- } else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
}
+ if (base == 0)
+ return 0;
+
+ /* Verify that nothing else uses this physical address. Stolen
+ * memory should be reserved by the BIOS and hidden from the
+ * kernel. So if the region is already marked as busy, something
+ * is seriously wrong.
+ */
+ r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
+
return base;
}
@@ -95,32 +94,37 @@ static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
+ int ret;
- /* Try to over-allocate to reduce reallocations and fragmentation */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size <<= 1, 4096, 0);
+ compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
if (!compressed_fb)
- compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
- size >>= 1, 4096, 0);
- if (compressed_fb)
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb)
- goto err;
+ goto err_llb;
+
+ /* Try to over-allocate to reduce reallocations and fragmentation */
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
+ size >>= 1, 4096,
+ DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_llb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
- 4096, 4096, 0);
- if (compressed_llb)
- compressed_llb = drm_mm_get_block(compressed_llb,
- 4096, 4096);
+ compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
goto err_fb;
- dev_priv->compressed_llb = compressed_llb;
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
+ 4096, 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ goto err_fb;
+
+ dev_priv->fbc.compressed_llb = compressed_llb;
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
@@ -128,8 +132,8 @@ static int i915_setup_compression(struct drm_device *dev, int size)
dev_priv->mm.stolen_base + compressed_llb->start);
}
- dev_priv->compressed_fb = compressed_fb;
- dev_priv->cfb_size = size;
+ dev_priv->fbc.compressed_fb = compressed_fb;
+ dev_priv->fbc.size = size;
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
@@ -137,8 +141,10 @@ static int i915_setup_compression(struct drm_device *dev, int size)
return 0;
err_fb:
- drm_mm_put_block(compressed_fb);
-err:
+ kfree(compressed_llb);
+ drm_mm_remove_node(compressed_fb);
+err_llb:
+ kfree(compressed_fb);
pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
return -ENOSPC;
}
@@ -150,7 +156,7 @@ int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return -ENODEV;
- if (size < dev_priv->cfb_size)
+ if (size < dev_priv->fbc.size)
return 0;
/* Release any current block */
@@ -163,16 +169,20 @@ void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (dev_priv->cfb_size == 0)
+ if (dev_priv->fbc.size == 0)
return;
- if (dev_priv->compressed_fb)
- drm_mm_put_block(dev_priv->compressed_fb);
+ if (dev_priv->fbc.compressed_fb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_fb);
+ kfree(dev_priv->fbc.compressed_fb);
+ }
- if (dev_priv->compressed_llb)
- drm_mm_put_block(dev_priv->compressed_llb);
+ if (dev_priv->fbc.compressed_llb) {
+ drm_mm_remove_node(dev_priv->fbc.compressed_llb);
+ kfree(dev_priv->fbc.compressed_llb);
+ }
- dev_priv->cfb_size = 0;
+ dev_priv->fbc.size = 0;
}
void i915_gem_cleanup_stolen(struct drm_device *dev)
@@ -201,6 +211,9 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (IS_VALLEYVIEW(dev))
bios_reserved = 1024*1024; /* top 1M on VLV/BYT */
+ if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
+ return 0;
+
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
bios_reserved);
@@ -271,9 +284,7 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
if (obj == NULL)
return NULL;
- if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
- goto cleanup;
-
+ drm_gem_private_object_init(dev, &obj->base, stolen->size);
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
obj->pages = i915_pages_create_for_stolen(dev,
@@ -285,9 +296,8 @@ _i915_gem_object_create_stolen(struct drm_device *dev,
i915_gem_object_pin_pages(obj);
obj->stolen = stolen;
- obj->base.write_domain = I915_GEM_DOMAIN_GTT;
- obj->base.read_domains = I915_GEM_DOMAIN_GTT;
- obj->cache_level = I915_CACHE_NONE;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
+ obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
return obj;
@@ -302,6 +312,7 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -310,17 +321,23 @@ i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
if (size == 0)
return NULL;
- stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
- if (stolen)
- stolen = drm_mm_get_block(stolen, size, 4096);
- if (stolen == NULL)
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
+ 4096, DRM_MM_SEARCH_DEFAULT);
+ if (ret) {
+ kfree(stolen);
return NULL;
+ }
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
@@ -331,8 +348,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_address_space *ggtt = &dev_priv->gtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
+ struct i915_vma *vma;
+ int ret;
if (!drm_mm_initialized(&dev_priv->mm.stolen))
return NULL;
@@ -347,56 +367,74 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (WARN_ON(size == 0))
return NULL;
- stolen = drm_mm_create_block(&dev_priv->mm.stolen,
- stolen_offset, size,
- false);
- if (stolen == NULL) {
+ stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
+ if (!stolen)
+ return NULL;
+
+ stolen->start = stolen_offset;
+ stolen->size = size;
+ ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen space\n");
+ kfree(stolen);
return NULL;
}
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj == NULL) {
DRM_DEBUG_KMS("failed to allocate stolen object\n");
- drm_mm_put_block(stolen);
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
return NULL;
}
/* Some objects just need physical mem from stolen space */
- if (gtt_offset == -1)
+ if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj;
+ vma = i915_gem_vma_create(obj, ggtt);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_out;
+ }
+
/* To simplify the initialisation sequence between KMS and GTT,
* we allow construction of the stolen object prior to
* setting up the GTT space. The actual reservation will occur
* later.
*/
- if (drm_mm_initialized(&dev_priv->mm.gtt_space)) {
- obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
- gtt_offset, size,
- false);
- if (obj->gtt_space == NULL) {
+ vma->node.start = gtt_offset;
+ vma->node.size = size;
+ if (drm_mm_initialized(&ggtt->mm)) {
+ ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
+ if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
- drm_gem_object_unreference(&obj->base);
- return NULL;
+ goto err_vma;
}
- } else
- obj->gtt_space = I915_GTT_RESERVED;
+ }
- obj->gtt_offset = gtt_offset;
obj->has_global_gtt_mapping = 1;
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
- list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+ list_add_tail(&vma->mm_list, &ggtt->inactive_list);
return obj;
+
+err_vma:
+ i915_gem_vma_destroy(vma);
+err_out:
+ drm_mm_remove_node(stolen);
+ kfree(stolen);
+ drm_gem_object_unreference(&obj->base);
+ return NULL;
}
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
- drm_mm_put_block(obj->stolen);
+ drm_mm_remove_node(obj->stolen);
+ kfree(obj->stolen);
obj->stolen = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 537545be69d..032e9ef9c89 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -268,18 +268,18 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return true;
if (INTEL_INFO(obj->base.dev)->gen == 3) {
- if (obj->gtt_offset & ~I915_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj->gtt_offset & ~I830_FENCE_START_MASK)
+ if (i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK)
return false;
}
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
- if (obj->gtt_space->size != size)
+ if (i915_gem_obj_ggtt_size(obj) != size)
return false;
- if (obj->gtt_offset & (size - 1))
+ if (i915_gem_obj_ggtt_offset(obj) & (size - 1))
return false;
return true;
@@ -359,18 +359,19 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/
obj->map_and_fenceable =
- obj->gtt_space == NULL ||
- (obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
+ !i915_gem_obj_ggtt_bound(obj) ||
+ (i915_gem_obj_ggtt_offset(obj) +
+ obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
- u32 unfenced_alignment =
+ u32 unfenced_align =
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
- if (obj->gtt_offset & (unfenced_alignment - 1))
- ret = i915_gem_object_unbind(obj);
+ if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
+ ret = i915_gem_object_ggtt_unbind(obj);
}
if (ret == 0) {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
new file mode 100644
index 00000000000..558e568d5b4
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -0,0 +1,1019 @@
+/*
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Keith Packard <keithp@keithp.com>
+ * Mika Kuoppala <mika.kuoppala@intel.com>
+ *
+ */
+
+#include <generated/utsrelease.h>
+#include "i915_drv.h"
+
+static const char *yesno(int v)
+{
+ return v ? "yes" : "no";
+}
+
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RCS: return "render";
+ case VCS: return "bsd";
+ case BCS: return "blt";
+ case VECS: return "vebox";
+ default: return "";
+ }
+}
+
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
+static bool __i915_error_ok(struct drm_i915_error_state_buf *e)
+{
+
+ if (!e->err && WARN(e->bytes > (e->size - 1), "overflow")) {
+ e->err = -ENOSPC;
+ return false;
+ }
+
+ if (e->bytes == e->size - 1 || e->err)
+ return false;
+
+ return true;
+}
+
+static bool __i915_error_seek(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ if (e->pos + len <= e->start) {
+ e->pos += len;
+ return false;
+ }
+
+ /* First vsnprintf needs to fit in its entirety for memmove */
+ if (len >= e->size) {
+ e->err = -EIO;
+ return false;
+ }
+
+ return true;
+}
+
+static void __i915_error_advance(struct drm_i915_error_state_buf *e,
+ unsigned len)
+{
+ /* If this is first printf in this window, adjust it so that
+ * start position matches start of the buffer
+ */
+
+ if (e->pos < e->start) {
+ const size_t off = e->start - e->pos;
+
+ /* Should not happen but be paranoid */
+ if (off > len || e->bytes) {
+ e->err = -EIO;
+ return;
+ }
+
+ memmove(e->buf, e->buf + off, len - off);
+ e->bytes = len - off;
+ e->pos = e->start;
+ return;
+ }
+
+ e->bytes += len;
+ e->pos += len;
+}
+
+static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
+ const char *f, va_list args)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ len = vsnprintf(NULL, 0, f, args);
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ len = vsnprintf(e->buf + e->bytes, e->size - e->bytes, f, args);
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+
+ __i915_error_advance(e, len);
+}
+
+static void i915_error_puts(struct drm_i915_error_state_buf *e,
+ const char *str)
+{
+ unsigned len;
+
+ if (!__i915_error_ok(e))
+ return;
+
+ len = strlen(str);
+
+ /* Seek the first printf which is hits start position */
+ if (e->pos < e->start) {
+ if (!__i915_error_seek(e, len))
+ return;
+ }
+
+ if (len >= e->size - e->bytes)
+ len = e->size - e->bytes - 1;
+ memcpy(e->buf + e->bytes, str, len);
+
+ __i915_error_advance(e, len);
+}
+
+#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
+#define err_puts(e, s) i915_error_puts(e, s)
+
+static void print_error_buffers(struct drm_i915_error_state_buf *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ err_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ err_printf(m, " %08x %8u %02x %02x %x %x",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->rseqno, err->wseqno);
+ err_puts(m, pin_flag(err->pinned));
+ err_puts(m, tiling_flag(err->tiling));
+ err_puts(m, dirty_flag(err->dirty));
+ err_puts(m, purgeable_flag(err->purgeable));
+ err_puts(m, err->ring != -1 ? " " : "");
+ err_puts(m, ring_str(err->ring));
+ err_puts(m, i915_cache_level_str(err->cache_level));
+
+ if (err->name)
+ err_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ err_printf(m, " (fence: %d)", err->fence_reg);
+
+ err_puts(m, "\n");
+ err++;
+ }
+}
+
+static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
+ struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ unsigned ring)
+{
+ BUG_ON(ring >= I915_NUM_RINGS); /* shut up confused gcc */
+ err_printf(m, "%s command stream:\n", ring_str(ring));
+ err_printf(m, " HEAD: 0x%08x\n", error->head[ring]);
+ err_printf(m, " TAIL: 0x%08x\n", error->tail[ring]);
+ err_printf(m, " CTL: 0x%08x\n", error->ctl[ring]);
+ err_printf(m, " ACTHD: 0x%08x\n", error->acthd[ring]);
+ err_printf(m, " IPEIR: 0x%08x\n", error->ipeir[ring]);
+ err_printf(m, " IPEHR: 0x%08x\n", error->ipehr[ring]);
+ err_printf(m, " INSTDONE: 0x%08x\n", error->instdone[ring]);
+ if (ring == RCS && INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " BBADDR: 0x%08llx\n", error->bbaddr);
+
+ if (INTEL_INFO(dev)->gen >= 4)
+ err_printf(m, " INSTPS: 0x%08x\n", error->instps[ring]);
+ err_printf(m, " INSTPM: 0x%08x\n", error->instpm[ring]);
+ err_printf(m, " FADDR: 0x%08x\n", error->faddr[ring]);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, " RC PSMI: 0x%08x\n", error->rc_psmi[ring]);
+ err_printf(m, " FAULT_REG: 0x%08x\n", error->fault_reg[ring]);
+ err_printf(m, " SYNC_0: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][0],
+ error->semaphore_seqno[ring][0]);
+ err_printf(m, " SYNC_1: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][1],
+ error->semaphore_seqno[ring][1]);
+ if (HAS_VEBOX(dev)) {
+ err_printf(m, " SYNC_2: 0x%08x [last synced 0x%08x]\n",
+ error->semaphore_mboxes[ring][2],
+ error->semaphore_seqno[ring][2]);
+ }
+ }
+ err_printf(m, " seqno: 0x%08x\n", error->seqno[ring]);
+ err_printf(m, " waiting: %s\n", yesno(error->waiting[ring]));
+ err_printf(m, " ring->head: 0x%08x\n", error->cpu_ring_head[ring]);
+ err_printf(m, " ring->tail: 0x%08x\n", error->cpu_ring_tail[ring]);
+}
+
+void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
+{
+ va_list args;
+
+ va_start(args, f);
+ i915_error_vprintf(e, f, args);
+ va_end(args);
+}
+
+int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
+ const struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_device *dev = error_priv->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error = error_priv->error;
+ struct intel_ring_buffer *ring;
+ int i, j, page, offset, elt;
+
+ if (!error) {
+ err_printf(m, "no error state collected\n");
+ goto out;
+ }
+
+ err_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
+ error->time.tv_usec);
+ err_printf(m, "Kernel: " UTS_RELEASE "\n");
+ err_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
+ err_printf(m, "EIR: 0x%08x\n", error->eir);
+ err_printf(m, "IER: 0x%08x\n", error->ier);
+ err_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ err_printf(m, "FORCEWAKE: 0x%08x\n", error->forcewake);
+ err_printf(m, "DERRMR: 0x%08x\n", error->derrmr);
+ err_printf(m, "CCID: 0x%08x\n", error->ccid);
+
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ err_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ for (i = 0; i < ARRAY_SIZE(error->extra_instdone); i++)
+ err_printf(m, " INSTDONE_%d: 0x%08x\n", i,
+ error->extra_instdone[i]);
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ err_printf(m, "ERROR: 0x%08x\n", error->error);
+ err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
+
+ for_each_ring(ring, dev_priv, i)
+ i915_ring_error_state(m, dev, error, i);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo[0],
+ error->active_bo_count[0]);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo[0],
+ error->pinned_bo_count[0]);
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ struct drm_i915_error_object *obj;
+
+ if ((obj = error->ring[i].batchbuffer)) {
+ err_printf(m, "%s --- gtt_offset = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n", offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ if (error->ring[i].num_requests) {
+ err_printf(m, "%s --- %d requests\n",
+ dev_priv->ring[i].name,
+ error->ring[i].num_requests);
+ for (j = 0; j < error->ring[i].num_requests; j++) {
+ err_printf(m, " seqno 0x%08x, emitted %ld, tail 0x%08x\n",
+ error->ring[i].requests[j].seqno,
+ error->ring[i].requests[j].jiffies,
+ error->ring[i].requests[j].tail);
+ }
+ }
+
+ if ((obj = error->ring[i].ringbuffer)) {
+ err_printf(m, "%s --- ringbuffer = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ err_printf(m, "%08x : %08x\n",
+ offset,
+ obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+
+ obj = error->ring[i].ctx;
+ if (obj) {
+ err_printf(m, "%s --- HW Context = 0x%08x\n",
+ dev_priv->ring[i].name,
+ obj->gtt_offset);
+ offset = 0;
+ for (elt = 0; elt < PAGE_SIZE/16; elt += 4) {
+ err_printf(m, "[%04x] %08x %08x %08x %08x\n",
+ offset,
+ obj->pages[0][elt],
+ obj->pages[0][elt+1],
+ obj->pages[0][elt+2],
+ obj->pages[0][elt+3]);
+ offset += 16;
+ }
+ }
+ }
+
+ if (error->overlay)
+ intel_overlay_print_error_state(m, error->overlay);
+
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
+out:
+ if (m->bytes == 0 && m->err)
+ return m->err;
+
+ return 0;
+}
+
+int i915_error_state_buf_init(struct drm_i915_error_state_buf *ebuf,
+ size_t count, loff_t pos)
+{
+ memset(ebuf, 0, sizeof(*ebuf));
+
+ /* We need to have enough room to store any i915_error_state printf
+ * so that we can move it to start position.
+ */
+ ebuf->size = count + 1 > PAGE_SIZE ? count + 1 : PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size,
+ GFP_TEMPORARY | __GFP_NORETRY | __GFP_NOWARN);
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = PAGE_SIZE;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL) {
+ ebuf->size = 128;
+ ebuf->buf = kmalloc(ebuf->size, GFP_TEMPORARY);
+ }
+
+ if (ebuf->buf == NULL)
+ return -ENOMEM;
+
+ ebuf->start = pos;
+
+ return 0;
+}
+
+static void i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void i915_error_state_free(struct kref *error_ref)
+{
+ struct drm_i915_error_state *error = container_of(error_ref,
+ typeof(*error), ref);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
+ i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].ringbuffer);
+ i915_error_object_free(error->ring[i].ctx);
+ kfree(error->ring[i].requests);
+ }
+
+ kfree(error->active_bo);
+ kfree(error->overlay);
+ kfree(error->display);
+ kfree(error);
+}
+
+static struct drm_i915_error_object *
+i915_error_object_create_sized(struct drm_i915_private *dev_priv,
+ struct drm_i915_gem_object *src,
+ const int num_pages)
+{
+ struct drm_i915_error_object *dst;
+ int i;
+ u32 reloc_offset;
+
+ if (src == NULL || src->pages == NULL)
+ return NULL;
+
+ dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src);
+ for (i = 0; i < num_pages; i++) {
+ unsigned long flags;
+ void *d;
+
+ d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+
+ local_irq_save(flags);
+ if (reloc_offset < dev_priv->gtt.mappable_end &&
+ src->has_global_gtt_mapping) {
+ void __iomem *s;
+
+ /* Simply ignore tiling or any overlapping fence.
+ * It's part of the error state, and this hopefully
+ * captures what the GPU read.
+ */
+
+ s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_offset);
+ memcpy_fromio(d, s, PAGE_SIZE);
+ io_mapping_unmap_atomic(s);
+ } else if (src->stolen) {
+ unsigned long offset;
+
+ offset = dev_priv->mm.stolen_base;
+ offset += src->stolen->start;
+ offset += i << PAGE_SHIFT;
+
+ memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
+ } else {
+ struct page *page;
+ void *s;
+
+ page = i915_gem_object_get_page(src, i);
+
+ drm_clflush_pages(&page, 1);
+
+ s = kmap_atomic(page);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s);
+
+ drm_clflush_pages(&page, 1);
+ }
+ local_irq_restore(flags);
+
+ dst->pages[i] = d;
+
+ reloc_offset += PAGE_SIZE;
+ }
+ dst->page_count = num_pages;
+
+ return dst;
+
+unwind:
+ while (i--)
+ kfree(dst->pages[i]);
+ kfree(dst);
+ return NULL;
+}
+#define i915_error_object_create(dev_priv, src) \
+ i915_error_object_create_sized((dev_priv), (src), \
+ (src)->base.size>>PAGE_SHIFT)
+
+static void capture_bo(struct drm_i915_error_buffer *err,
+ struct drm_i915_gem_object *obj)
+{
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->rseqno = obj->last_read_seqno;
+ err->wseqno = obj->last_write_seqno;
+ err->gtt_offset = i915_gem_obj_ggtt_offset(obj);
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : -1;
+ err->cache_level = obj->cache_level;
+}
+
+static u32 capture_active_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct i915_vma *vma;
+ int i = 0;
+
+ list_for_each_entry(vma, head, mm_list) {
+ capture_bo(err++, vma->obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
+ int count, struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, global_list) {
+ if (obj->pin_count == 0)
+ continue;
+
+ capture_bo(err++, obj);
+ if (++i == count)
+ break;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6:
+ for (i = 0; i < dev_priv->num_fence_regs; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+static struct drm_i915_error_object *
+i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
+ struct i915_address_space *vm;
+ struct i915_vma *vma;
+ struct drm_i915_gem_object *obj;
+ u32 seqno;
+
+ if (!ring->get_seqno)
+ return NULL;
+
+ if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
+ u32 acthd = I915_READ(ACTHD);
+
+ if (WARN_ON(ring->id != RCS))
+ return NULL;
+
+ obj = ring->private;
+ if (acthd >= i915_gem_obj_ggtt_offset(obj) &&
+ acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size)
+ return i915_error_object_create(dev_priv, obj);
+ }
+
+ seqno = ring->get_seqno(ring, false);
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
+ list_for_each_entry(vma, &vm->active_list, mm_list) {
+ obj = vma->obj;
+ if (obj->ring != ring)
+ continue;
+
+ if (i915_seqno_passed(seqno, obj->last_read_seqno))
+ continue;
+
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
+ continue;
+
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userspace.
+ */
+ return i915_error_object_create(dev_priv, obj);
+ }
+ }
+
+ return NULL;
+}
+
+static void i915_record_ring_state(struct drm_device *dev,
+ struct drm_i915_error_state *error,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
+ error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
+ error->semaphore_mboxes[ring->id][0]
+ = I915_READ(RING_SYNC_0(ring->mmio_base));
+ error->semaphore_mboxes[ring->id][1]
+ = I915_READ(RING_SYNC_1(ring->mmio_base));
+ error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
+ error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
+ }
+
+ if (HAS_VEBOX(dev)) {
+ error->semaphore_mboxes[ring->id][2] =
+ I915_READ(RING_SYNC_2(ring->mmio_base));
+ error->semaphore_seqno[ring->id][2] = ring->sync_seqno[2];
+ }
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
+ error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
+ error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
+ error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
+ error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
+ if (ring->id == RCS)
+ error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
+ error->ipeir[ring->id] = I915_READ(IPEIR);
+ error->ipehr[ring->id] = I915_READ(IPEHR);
+ error->instdone[ring->id] = I915_READ(INSTDONE);
+ }
+
+ error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
+ error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
+ error->seqno[ring->id] = ring->get_seqno(ring, false);
+ error->acthd[ring->id] = intel_ring_get_active_head(ring);
+ error->head[ring->id] = I915_READ_HEAD(ring);
+ error->tail[ring->id] = I915_READ_TAIL(ring);
+ error->ctl[ring->id] = I915_READ_CTL(ring);
+
+ error->cpu_ring_head[ring->id] = ring->head;
+ error->cpu_ring_tail[ring->id] = ring->tail;
+}
+
+
+static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
+ struct drm_i915_error_state *error,
+ struct drm_i915_error_ring *ering)
+{
+ struct drm_i915_private *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ /* Currently render ring is the only HW context user */
+ if (ring->id != RCS || !error->ccid)
+ return;
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) {
+ ering->ctx = i915_error_object_create_sized(dev_priv,
+ obj, 1);
+ break;
+ }
+ }
+}
+
+static void i915_gem_record_rings(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
+ struct drm_i915_gem_request *request;
+ int i, count;
+
+ for_each_ring(ring, dev_priv, i) {
+ i915_record_ring_state(dev, error, ring);
+
+ error->ring[i].batchbuffer =
+ i915_error_first_batchbuffer(dev_priv, ring);
+
+ error->ring[i].ringbuffer =
+ i915_error_object_create(dev_priv, ring->obj);
+
+
+ i915_gem_record_active_context(ring, error, &error->ring[i]);
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list)
+ count++;
+
+ error->ring[i].num_requests = count;
+ error->ring[i].requests =
+ kmalloc(count*sizeof(struct drm_i915_error_request),
+ GFP_ATOMIC);
+ if (error->ring[i].requests == NULL) {
+ error->ring[i].num_requests = 0;
+ continue;
+ }
+
+ count = 0;
+ list_for_each_entry(request, &ring->request_list, list) {
+ struct drm_i915_error_request *erq;
+
+ erq = &error->ring[i].requests[count++];
+ erq->seqno = request->seqno;
+ erq->jiffies = request->emitted_jiffies;
+ erq->tail = request->tail;
+ }
+ }
+}
+
+/* FIXME: Since pin count/bound list is global, we duplicate what we capture per
+ * VM.
+ */
+static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error,
+ struct i915_address_space *vm,
+ const int ndx)
+{
+ struct drm_i915_error_buffer *active_bo = NULL, *pinned_bo = NULL;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int i;
+
+ i = 0;
+ list_for_each_entry(vma, &vm->active_list, mm_list)
+ i++;
+ error->active_bo_count[ndx] = i;
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+ if (obj->pin_count)
+ i++;
+ error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
+
+ if (i) {
+ active_bo = kmalloc(sizeof(*active_bo)*i, GFP_ATOMIC);
+ if (active_bo)
+ pinned_bo = active_bo + error->active_bo_count[ndx];
+ }
+
+ if (active_bo)
+ error->active_bo_count[ndx] =
+ capture_active_bo(active_bo,
+ error->active_bo_count[ndx],
+ &vm->active_list);
+
+ if (pinned_bo)
+ error->pinned_bo_count[ndx] =
+ capture_pinned_bo(pinned_bo,
+ error->pinned_bo_count[ndx],
+ &dev_priv->mm.bound_list);
+ error->active_bo[ndx] = active_bo;
+ error->pinned_bo[ndx] = pinned_bo;
+}
+
+static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
+ struct drm_i915_error_state *error)
+{
+ struct i915_address_space *vm;
+ int cnt = 0, i = 0;
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ cnt++;
+
+ if (WARN(cnt > 1, "Multiple VMs not yet supported\n"))
+ cnt = 1;
+
+ vm = &dev_priv->gtt.base;
+
+ error->active_bo = kcalloc(cnt, sizeof(*error->active_bo), GFP_ATOMIC);
+ error->pinned_bo = kcalloc(cnt, sizeof(*error->pinned_bo), GFP_ATOMIC);
+ error->active_bo_count = kcalloc(cnt, sizeof(*error->active_bo_count),
+ GFP_ATOMIC);
+ error->pinned_bo_count = kcalloc(cnt, sizeof(*error->pinned_bo_count),
+ GFP_ATOMIC);
+
+ list_for_each_entry(vm, &dev_priv->vm_list, global_link)
+ i915_gem_capture_vm(dev_priv, error, vm, i++);
+}
+
+/**
+ * i915_capture_error_state - capture an error record for later analysis
+ * @dev: drm device
+ *
+ * Should be called when an error is detected (either a hang or an error
+ * interrupt) to capture error state from the time of the error. Fills
+ * out a structure which becomes available in debugfs for user level tools
+ * to pick up.
+ */
+void i915_capture_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+ int pipe;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+ if (error)
+ return;
+
+ /* Account for pipe specific data like PIPE*STAT */
+ error = kzalloc(sizeof(*error), GFP_ATOMIC);
+ if (!error) {
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
+ }
+
+ DRM_INFO("capturing error event; look for more information in "
+ "/sys/class/drm/card%d/error\n", dev->primary->index);
+
+ kref_init(&error->ref);
+ error->eir = I915_READ(EIR);
+ error->pgtbl_er = I915_READ(PGTBL_ER);
+ if (HAS_HW_CONTEXTS(dev))
+ error->ccid = I915_READ(CCID);
+
+ if (HAS_PCH_SPLIT(dev))
+ error->ier = I915_READ(DEIER) | I915_READ(GTIER);
+ else if (IS_VALLEYVIEW(dev))
+ error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
+ else if (IS_GEN2(dev))
+ error->ier = I915_READ16(IER);
+ else
+ error->ier = I915_READ(IER);
+
+ if (INTEL_INFO(dev)->gen >= 6)
+ error->derrmr = I915_READ(DERRMR);
+
+ if (IS_VALLEYVIEW(dev))
+ error->forcewake = I915_READ(FORCEWAKE_VLV);
+ else if (INTEL_INFO(dev)->gen >= 7)
+ error->forcewake = I915_READ(FORCEWAKE_MT);
+ else if (INTEL_INFO(dev)->gen == 6)
+ error->forcewake = I915_READ(FORCEWAKE);
+
+ if (!HAS_PCH_SPLIT(dev))
+ for_each_pipe(pipe)
+ error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+ error->done_reg = I915_READ(DONE_REG);
+ }
+
+ if (INTEL_INFO(dev)->gen == 7)
+ error->err_int = I915_READ(GEN7_ERR_INT);
+
+ i915_get_extra_instdone(dev, error->extra_instdone);
+
+ i915_gem_capture_buffers(dev_priv, error);
+ i915_gem_record_fences(dev, error);
+ i915_gem_record_rings(dev, error);
+
+ do_gettimeofday(&error->time);
+
+ error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ if (dev_priv->gpu_error.first_error == NULL) {
+ dev_priv->gpu_error.first_error = error;
+ error = NULL;
+ }
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ i915_error_state_free(&error->ref);
+}
+
+void i915_error_state_get(struct drm_device *dev,
+ struct i915_error_state_file_priv *error_priv)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error_priv->error = dev_priv->gpu_error.first_error;
+ if (error_priv->error)
+ kref_get(&error_priv->error->ref);
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+}
+
+void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
+{
+ if (error_priv->error)
+ kref_put(&error_priv->error->ref, i915_error_state_free);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
+ error = dev_priv->gpu_error.first_error;
+ dev_priv->gpu_error.first_error = NULL;
+ spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
+
+ if (error)
+ kref_put(&error->ref, i915_error_state_free);
+}
+
+const char *i915_cache_level_str(int type)
+{
+ switch (type) {
+ case I915_CACHE_NONE: return " uncached";
+ case I915_CACHE_LLC: return " snooped or LLC";
+ case I915_CACHE_L3_LLC: return " L3+LLC";
+ default: return "";
+ }
+}
+
+/* NB: please notice the memset */
+void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
+
+ switch (INTEL_INFO(dev)->gen) {
+ case 2:
+ case 3:
+ instdone[0] = I915_READ(INSTDONE);
+ break;
+ case 4:
+ case 5:
+ case 6:
+ instdone[0] = I915_READ(INSTDONE_I965);
+ instdone[1] = I915_READ(INSTDONE1);
+ break;
+ default:
+ WARN_ONCE(1, "Unsupported platform\n");
+ case 7:
+ instdone[0] = I915_READ(GEN7_INSTDONE_1);
+ instdone[1] = I915_READ(GEN7_SC_INSTDONE);
+ instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
+ instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3d92a7cef15..a03b445ceb5 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -85,6 +85,12 @@ ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.deimr &= ~mask;
+ return;
+ }
+
if ((dev_priv->irq_mask & mask) != 0) {
dev_priv->irq_mask &= ~mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -97,6 +103,12 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
assert_spin_locked(&dev_priv->irq_lock);
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.deimr |= mask;
+ return;
+ }
+
if ((dev_priv->irq_mask & mask) != mask) {
dev_priv->irq_mask |= mask;
I915_WRITE(DEIMR, dev_priv->irq_mask);
@@ -104,6 +116,85 @@ ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
}
}
+/**
+ * ilk_update_gt_irq - update GTIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.gtimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ dev_priv->gt_irq_mask &= ~interrupt_mask;
+ dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
+}
+
+void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, mask);
+}
+
+void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ ilk_update_gt_irq(dev_priv, mask, 0);
+}
+
+/**
+ * snb_update_pm_irq - update GEN6_PMIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t new_val;
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ new_val = dev_priv->pm_irq_mask;
+ new_val &= ~interrupt_mask;
+ new_val |= (~enabled_irq_mask & interrupt_mask);
+
+ if (new_val != dev_priv->pm_irq_mask) {
+ dev_priv->pm_irq_mask = new_val;
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+ POSTING_READ(GEN6_PMIMR);
+ }
+}
+
+void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ snb_update_pm_irq(dev_priv, mask, mask);
+}
+
+void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
+{
+ snb_update_pm_irq(dev_priv, mask, 0);
+}
+
static bool ivb_can_enable_err_int(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -128,6 +219,8 @@ static bool cpt_can_enable_serr_int(struct drm_device *dev)
enum pipe pipe;
struct intel_crtc *crtc;
+ assert_spin_locked(&dev_priv->irq_lock);
+
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
@@ -152,38 +245,75 @@ static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
}
static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
- bool enable)
+ enum pipe pipe, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
-
if (enable) {
+ I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+
if (!ivb_can_enable_err_int(dev))
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
- ERR_INT_FIFO_UNDERRUN_B |
- ERR_INT_FIFO_UNDERRUN_C);
-
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
} else {
+ bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB);
+
+ /* Change the state _after_ we've read out the current one. */
ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+
+ if (!was_enabled &&
+ (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) {
+ DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
+ }
}
}
-static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
+/**
+ * ibx_display_interrupt_update - update SDEIMR
+ * @dev_priv: driver private
+ * @interrupt_mask: mask of interrupt bits to update
+ * @enabled_irq_mask: mask of interrupt bits to enable
+ */
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ uint32_t interrupt_mask,
+ uint32_t enabled_irq_mask)
+{
+ uint32_t sdeimr = I915_READ(SDEIMR);
+ sdeimr &= ~interrupt_mask;
+ sdeimr |= (~enabled_irq_mask & interrupt_mask);
+
+ assert_spin_locked(&dev_priv->irq_lock);
+
+ if (dev_priv->pc8.irqs_disabled &&
+ (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) {
+ WARN(1, "IRQs disabled\n");
+ dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask;
+ dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask &
+ interrupt_mask);
+ return;
+ }
+
+ I915_WRITE(SDEIMR, sdeimr);
+ POSTING_READ(SDEIMR);
+}
+#define ibx_enable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), (bits))
+#define ibx_disable_display_interrupt(dev_priv, bits) \
+ ibx_display_interrupt_update((dev_priv), (bits), 0)
+
+static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
+ enum transcoder pch_transcoder,
bool enable)
{
- struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
- SDE_TRANSB_FIFO_UNDER;
+ uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
+ SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
if (enable)
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
+ ibx_enable_display_interrupt(dev_priv, bit);
else
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
-
- POSTING_READ(SDEIMR);
+ ibx_disable_display_interrupt(dev_priv, bit);
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -193,19 +323,26 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = dev->dev_private;
if (enable) {
+ I915_WRITE(SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+
if (!cpt_can_enable_serr_int(dev))
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
- SERR_INT_TRANS_B_FIFO_UNDERRUN |
- SERR_INT_TRANS_C_FIFO_UNDERRUN);
-
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
+ ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
} else {
- I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
- }
+ uint32_t tmp = I915_READ(SERR_INT);
+ bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT);
- POSTING_READ(SDEIMR);
+ /* Change the state _after_ we've read out the current one. */
+ ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
+
+ if (!was_enabled &&
+ (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) {
+ DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n",
+ transcoder_name(pch_transcoder));
+ }
+ }
}
/**
@@ -243,7 +380,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
if (IS_GEN5(dev) || IS_GEN6(dev))
ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
else if (IS_GEN7(dev))
- ivybridge_set_fifo_underrun_reporting(dev, enable);
+ ivybridge_set_fifo_underrun_reporting(dev, pipe, enable);
done:
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -269,29 +406,19 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- enum pipe p;
- struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool ret;
- if (HAS_PCH_LPT(dev)) {
- crtc = NULL;
- for_each_pipe(p) {
- struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
- if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
- crtc = c;
- break;
- }
- }
- if (!crtc) {
- DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
- return false;
- }
- } else {
- crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
- }
- intel_crtc = to_intel_crtc(crtc);
+ /*
+ * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
+ * has only one pch transcoder A that all pipes can use. To avoid racy
+ * pch transcoder -> pipe lookups from interrupt code simply store the
+ * underrun statistics in crtc A. Since we never expose this anywhere
+ * nor use it outside of the fifo underrun code here using the "wrong"
+ * crtc on LPT won't cause issues.
+ */
spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -303,7 +430,7 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
intel_crtc->pch_fifo_underrun_disabled = !enable;
if (HAS_PCH_IBX(dev))
- ibx_set_fifo_underrun_reporting(intel_crtc, enable);
+ ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
else
cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
@@ -319,6 +446,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == mask)
return;
@@ -334,6 +463,8 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
u32 reg = PIPESTAT(pipe);
u32 pipestat = I915_READ(reg) & 0x7fff0000;
+ assert_spin_locked(&dev_priv->irq_lock);
+
if ((pipestat & mask) == 0)
return;
@@ -625,14 +756,13 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_kms_helper_hotplug_event(dev);
}
-static void ironlake_handle_rps_change(struct drm_device *dev)
+static void ironlake_rps_change_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 busy_up, busy_down, max_avg, min_avg;
u8 new_delay;
- unsigned long flags;
- spin_lock_irqsave(&mchdev_lock, flags);
+ spin_lock(&mchdev_lock);
I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -660,7 +790,7 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
if (ironlake_set_drps(dev, new_delay))
dev_priv->ips.cur_delay = new_delay;
- spin_unlock_irqrestore(&mchdev_lock, flags);
+ spin_unlock(&mchdev_lock);
return;
}
@@ -668,34 +798,31 @@ static void ironlake_handle_rps_change(struct drm_device *dev)
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
if (ring->obj == NULL)
return;
trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
wake_up_all(&ring->irq_queue);
- if (i915_enable_hangcheck) {
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
- }
+ i915_queue_hangcheck(dev);
}
static void gen6_pm_rps_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
rps.work);
- u32 pm_iir, pm_imr;
+ u32 pm_iir;
u8 new_delay;
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
pm_iir = dev_priv->rps.pm_iir;
dev_priv->rps.pm_iir = 0;
- pm_imr = I915_READ(GEN6_PMIMR);
/* Make sure not to corrupt PMIMR state used by ringbuffer code */
- I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
+ snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Make sure we didn't queue anything we're not going to process. */
+ WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS);
if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
return;
@@ -781,13 +908,12 @@ static void ivybridge_parity_work(struct work_struct *work)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ ilk_enable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
mutex_unlock(&dev_priv->dev->struct_mutex);
- parity_event[0] = "L3_PARITY_ERROR=1";
+ parity_event[0] = I915_L3_PARITY_UEVENT "=1";
parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
@@ -804,22 +930,31 @@ static void ivybridge_parity_work(struct work_struct *work)
kfree(parity_event[1]);
}
-static void ivybridge_handle_parity_error(struct drm_device *dev)
+static void ivybridge_parity_error_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long flags;
if (!HAS_L3_GPU_CACHE(dev))
return;
- spin_lock_irqsave(&dev_priv->irq_lock, flags);
- dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
+ spin_lock(&dev_priv->irq_lock);
+ ilk_disable_gt_irq(dev_priv, GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
+ spin_unlock(&dev_priv->irq_lock);
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
+static void ilk_gt_irq_handler(struct drm_device *dev,
+ struct drm_i915_private *dev_priv,
+ u32 gt_iir)
+{
+ if (gt_iir &
+ (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (gt_iir & ILK_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
+}
+
static void snb_gt_irq_handler(struct drm_device *dev,
struct drm_i915_private *dev_priv,
u32 gt_iir)
@@ -841,32 +976,7 @@ static void snb_gt_irq_handler(struct drm_device *dev,
}
if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
- ivybridge_handle_parity_error(dev);
-}
-
-/* Legacy way of handling PM interrupts */
-static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
- u32 pm_iir)
-{
- unsigned long flags;
-
- /*
- * IIR bits should never already be set because IMR should
- * prevent an interrupt from being shown in IIR. The warning
- * displays a case where we've unsafely cleared
- * dev_priv->rps.pm_iir. Although missing an interrupt of the same
- * type is not a problem, it displays a problem in the logic.
- *
- * The mask bit in IMR is cleared by dev_priv->rps.work.
- */
-
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- dev_priv->rps.pm_iir |= pm_iir;
- I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
- POSTING_READ(GEN6_PMIMR);
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
-
- queue_work(dev_priv->wq, &dev_priv->rps.work);
+ ivybridge_parity_error_irq_handler(dev);
}
#define HPD_STORM_DETECT_PERIOD 1000
@@ -886,6 +996,10 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
spin_lock(&dev_priv->irq_lock);
for (i = 1; i < HPD_NUM_PINS; i++) {
+ WARN(((hpd[i] & hotplug_trigger) &&
+ dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED),
+ "Received HPD interrupt although disabled\n");
+
if (!(hpd[i] & hotplug_trigger) ||
dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
continue;
@@ -896,6 +1010,7 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
+ msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
dev_priv->hpd_stats[i].hpd_cnt = 0;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
} else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
dev_priv->hpd_event_bits &= ~(1 << i);
@@ -903,6 +1018,8 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
storm_detected = true;
} else {
dev_priv->hpd_stats[i].hpd_cnt++;
+ DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
+ dev_priv->hpd_stats[i].hpd_cnt);
}
}
@@ -928,28 +1045,21 @@ static void dp_aux_irq_handler(struct drm_device *dev)
wake_up_all(&dev_priv->gmbus_wait_queue);
}
-/* Unlike gen6_queue_rps_work() from which this function is originally derived,
- * we must be able to deal with other PM interrupts. This is complicated because
- * of the way in which we use the masks to defer the RPS work (which for
- * posterity is necessary because of forcewake).
- */
-static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
- u32 pm_iir)
+/* The RPS events need forcewake, so we add them to a work queue and mask their
+ * IMR bits until the work is done. Other interrupts can be processed without
+ * the work queue. */
+static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
{
- unsigned long flags;
+ if (pm_iir & GEN6_PM_RPS_EVENTS) {
+ spin_lock(&dev_priv->irq_lock);
+ dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
+ snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS);
+ spin_unlock(&dev_priv->irq_lock);
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
- if (dev_priv->rps.pm_iir) {
- I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
- /* never want to mask useful interrupts. (also posting read) */
- WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- /* TODO: if queue_work is slow, move it out of the spinlock */
queue_work(dev_priv->wq, &dev_priv->rps.work);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
- if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
+ if (HAS_VEBOX(dev_priv->dev)) {
if (pm_iir & PM_VEBOX_USER_INTERRUPT)
notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
@@ -1028,8 +1138,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
- if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
+ if (pm_iir)
+ gen6_rps_irq_handler(dev_priv, pm_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -1179,27 +1289,112 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
cpt_serr_int_handler(dev);
}
-static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
+static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (de_iir & DE_AUX_CHANNEL_A)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE)
+ intel_opregion_asle_intr(dev);
+
+ if (de_iir & DE_PIPEA_VBLANK)
+ drm_handle_vblank(dev, 0);
+
+ if (de_iir & DE_PIPEB_VBLANK)
+ drm_handle_vblank(dev, 1);
+
+ if (de_iir & DE_POISON)
+ DRM_ERROR("Poison interrupt\n");
+
+ if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
+ DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
+
+ if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
+ if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
+ DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
+
+ if (de_iir & DE_PLANEA_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 0);
+ intel_finish_page_flip_plane(dev, 0);
+ }
+
+ if (de_iir & DE_PLANEB_FLIP_DONE) {
+ intel_prepare_page_flip(dev, 1);
+ intel_finish_page_flip_plane(dev, 1);
+ }
+
+ /* check event from PCH */
+ if (de_iir & DE_PCH_EVENT) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ if (HAS_PCH_CPT(dev))
+ cpt_irq_handler(dev, pch_iir);
+ else
+ ibx_irq_handler(dev, pch_iir);
+
+ /* should clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+
+ if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
+ ironlake_rps_change_irq_handler(dev);
+}
+
+static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (de_iir & DE_ERR_INT_IVB)
+ ivb_err_int_handler(dev);
+
+ if (de_iir & DE_AUX_CHANNEL_A_IVB)
+ dp_aux_irq_handler(dev);
+
+ if (de_iir & DE_GSE_IVB)
+ intel_opregion_asle_intr(dev);
+
+ for (i = 0; i < 3; i++) {
+ if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
+ drm_handle_vblank(dev, i);
+ if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
+ intel_prepare_page_flip(dev, i);
+ intel_finish_page_flip_plane(dev, i);
+ }
+ }
+
+ /* check event from PCH */
+ if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
+ u32 pch_iir = I915_READ(SDEIIR);
+
+ cpt_irq_handler(dev, pch_iir);
+
+ /* clear PCH hotplug event before clear CPU irq */
+ I915_WRITE(SDEIIR, pch_iir);
+ }
+}
+
+static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
+ u32 de_iir, gt_iir, de_ier, sde_ier = 0;
irqreturn_t ret = IRQ_NONE;
- int i;
+ bool err_int_reenable = false;
atomic_inc(&dev_priv->irq_received);
/* We get interrupts on unclaimed registers, so check for this before we
* do any I915_{READ,WRITE}. */
- if (IS_HASWELL(dev) &&
- (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
- DRM_ERROR("Unclaimed register before interrupt\n");
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- }
+ intel_uncore_check_errors(dev);
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+ POSTING_READ(DEIER);
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
@@ -1217,62 +1412,42 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
* handler. */
if (IS_HASWELL(dev)) {
spin_lock(&dev_priv->irq_lock);
- ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
+ err_int_reenable = ~dev_priv->irq_mask & DE_ERR_INT_IVB;
+ if (err_int_reenable)
+ ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
spin_unlock(&dev_priv->irq_lock);
}
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ if (INTEL_INFO(dev)->gen >= 6)
+ snb_gt_irq_handler(dev, dev_priv, gt_iir);
+ else
+ ilk_gt_irq_handler(dev, dev_priv, gt_iir);
I915_WRITE(GTIIR, gt_iir);
ret = IRQ_HANDLED;
}
de_iir = I915_READ(DEIIR);
if (de_iir) {
- if (de_iir & DE_ERR_INT_IVB)
- ivb_err_int_handler(dev);
-
- if (de_iir & DE_AUX_CHANNEL_A_IVB)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE_IVB)
- intel_opregion_asle_intr(dev);
-
- for (i = 0; i < 3; i++) {
- if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
- drm_handle_vblank(dev, i);
- if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
- intel_prepare_page_flip(dev, i);
- intel_finish_page_flip_plane(dev, i);
- }
- }
-
- /* check event from PCH */
- if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- cpt_irq_handler(dev, pch_iir);
-
- /* clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
+ if (INTEL_INFO(dev)->gen >= 7)
+ ivb_display_irq_handler(dev, de_iir);
+ else
+ ilk_display_irq_handler(dev, de_iir);
I915_WRITE(DEIIR, de_iir);
ret = IRQ_HANDLED;
}
- pm_iir = I915_READ(GEN6_PMIIR);
- if (pm_iir) {
- if (IS_HASWELL(dev))
- hsw_pm_irq_handler(dev_priv, pm_iir);
- else if (pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
- ret = IRQ_HANDLED;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 pm_iir = I915_READ(GEN6_PMIIR);
+ if (pm_iir) {
+ gen6_rps_irq_handler(dev_priv, pm_iir);
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+ ret = IRQ_HANDLED;
+ }
}
- if (IS_HASWELL(dev)) {
+ if (err_int_reenable) {
spin_lock(&dev_priv->irq_lock);
if (ivb_can_enable_err_int(dev))
ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
@@ -1289,119 +1464,6 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
return ret;
}
-static void ilk_gt_irq_handler(struct drm_device *dev,
- struct drm_i915_private *dev_priv,
- u32 gt_iir)
-{
- if (gt_iir &
- (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
- notify_ring(dev, &dev_priv->ring[RCS]);
- if (gt_iir & ILK_BSD_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->ring[VCS]);
-}
-
-static irqreturn_t ironlake_irq_handler(int irq, void *arg)
-{
- struct drm_device *dev = (struct drm_device *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
-
- atomic_inc(&dev_priv->irq_received);
-
- /* disable master interrupt before clearing iir */
- de_ier = I915_READ(DEIER);
- I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- POSTING_READ(DEIER);
-
- /* Disable south interrupts. We'll only write to SDEIIR once, so further
- * interrupts will will be stored on its back queue, and then we'll be
- * able to process them after we restore SDEIER (as soon as we restore
- * it, we'll get an interrupt if SDEIIR still has something to process
- * due to its back queue). */
- sde_ier = I915_READ(SDEIER);
- I915_WRITE(SDEIER, 0);
- POSTING_READ(SDEIER);
-
- de_iir = I915_READ(DEIIR);
- gt_iir = I915_READ(GTIIR);
- pm_iir = I915_READ(GEN6_PMIIR);
-
- if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
- goto done;
-
- ret = IRQ_HANDLED;
-
- if (IS_GEN5(dev))
- ilk_gt_irq_handler(dev, dev_priv, gt_iir);
- else
- snb_gt_irq_handler(dev, dev_priv, gt_iir);
-
- if (de_iir & DE_AUX_CHANNEL_A)
- dp_aux_irq_handler(dev);
-
- if (de_iir & DE_GSE)
- intel_opregion_asle_intr(dev);
-
- if (de_iir & DE_PIPEA_VBLANK)
- drm_handle_vblank(dev, 0);
-
- if (de_iir & DE_PIPEB_VBLANK)
- drm_handle_vblank(dev, 1);
-
- if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
-
- if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
- DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
-
- if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
- if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
- DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
-
- if (de_iir & DE_PLANEA_FLIP_DONE) {
- intel_prepare_page_flip(dev, 0);
- intel_finish_page_flip_plane(dev, 0);
- }
-
- if (de_iir & DE_PLANEB_FLIP_DONE) {
- intel_prepare_page_flip(dev, 1);
- intel_finish_page_flip_plane(dev, 1);
- }
-
- /* check event from PCH */
- if (de_iir & DE_PCH_EVENT) {
- u32 pch_iir = I915_READ(SDEIIR);
-
- if (HAS_PCH_CPT(dev))
- cpt_irq_handler(dev, pch_iir);
- else
- ibx_irq_handler(dev, pch_iir);
-
- /* should clear PCH hotplug event before clear CPU irq */
- I915_WRITE(SDEIIR, pch_iir);
- }
-
- if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
- ironlake_handle_rps_change(dev);
-
- if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
- gen6_queue_rps_work(dev_priv, pm_iir);
-
- I915_WRITE(GTIIR, gt_iir);
- I915_WRITE(DEIIR, de_iir);
- I915_WRITE(GEN6_PMIIR, pm_iir);
-
-done:
- I915_WRITE(DEIER, de_ier);
- POSTING_READ(DEIER);
- I915_WRITE(SDEIER, sde_ier);
- POSTING_READ(SDEIER);
-
- return ret;
-}
-
/**
* i915_error_work_func - do process context error handling work
* @work: work struct
@@ -1417,9 +1479,9 @@ static void i915_error_work_func(struct work_struct *work)
gpu_error);
struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
- char *error_event[] = { "ERROR=1", NULL };
- char *reset_event[] = { "RESET=1", NULL };
- char *reset_done_event[] = { "ERROR=0", NULL };
+ char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
+ char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
+ char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
@@ -1470,535 +1532,6 @@ static void i915_error_work_func(struct work_struct *work)
}
}
-/* NB: please notice the memset */
-static void i915_get_extra_instdone(struct drm_device *dev,
- uint32_t *instdone)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
-
- switch(INTEL_INFO(dev)->gen) {
- case 2:
- case 3:
- instdone[0] = I915_READ(INSTDONE);
- break;
- case 4:
- case 5:
- case 6:
- instdone[0] = I915_READ(INSTDONE_I965);
- instdone[1] = I915_READ(INSTDONE1);
- break;
- default:
- WARN_ONCE(1, "Unsupported platform\n");
- case 7:
- instdone[0] = I915_READ(GEN7_INSTDONE_1);
- instdone[1] = I915_READ(GEN7_SC_INSTDONE);
- instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
- instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
- break;
- }
-}
-
-#ifdef CONFIG_DEBUG_FS
-static struct drm_i915_error_object *
-i915_error_object_create_sized(struct drm_i915_private *dev_priv,
- struct drm_i915_gem_object *src,
- const int num_pages)
-{
- struct drm_i915_error_object *dst;
- int i;
- u32 reloc_offset;
-
- if (src == NULL || src->pages == NULL)
- return NULL;
-
- dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
- if (dst == NULL)
- return NULL;
-
- reloc_offset = src->gtt_offset;
- for (i = 0; i < num_pages; i++) {
- unsigned long flags;
- void *d;
-
- d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
- if (d == NULL)
- goto unwind;
-
- local_irq_save(flags);
- if (reloc_offset < dev_priv->gtt.mappable_end &&
- src->has_global_gtt_mapping) {
- void __iomem *s;
-
- /* Simply ignore tiling or any overlapping fence.
- * It's part of the error state, and this hopefully
- * captures what the GPU read.
- */
-
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- reloc_offset);
- memcpy_fromio(d, s, PAGE_SIZE);
- io_mapping_unmap_atomic(s);
- } else if (src->stolen) {
- unsigned long offset;
-
- offset = dev_priv->mm.stolen_base;
- offset += src->stolen->start;
- offset += i << PAGE_SHIFT;
-
- memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
- } else {
- struct page *page;
- void *s;
-
- page = i915_gem_object_get_page(src, i);
-
- drm_clflush_pages(&page, 1);
-
- s = kmap_atomic(page);
- memcpy(d, s, PAGE_SIZE);
- kunmap_atomic(s);
-
- drm_clflush_pages(&page, 1);
- }
- local_irq_restore(flags);
-
- dst->pages[i] = d;
-
- reloc_offset += PAGE_SIZE;
- }
- dst->page_count = num_pages;
- dst->gtt_offset = src->gtt_offset;
-
- return dst;
-
-unwind:
- while (i--)
- kfree(dst->pages[i]);
- kfree(dst);
- return NULL;
-}
-#define i915_error_object_create(dev_priv, src) \
- i915_error_object_create_sized((dev_priv), (src), \
- (src)->base.size>>PAGE_SHIFT)
-
-static void
-i915_error_object_free(struct drm_i915_error_object *obj)
-{
- int page;
-
- if (obj == NULL)
- return;
-
- for (page = 0; page < obj->page_count; page++)
- kfree(obj->pages[page]);
-
- kfree(obj);
-}
-
-void
-i915_error_state_free(struct kref *error_ref)
-{
- struct drm_i915_error_state *error = container_of(error_ref,
- typeof(*error), ref);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
- i915_error_object_free(error->ring[i].batchbuffer);
- i915_error_object_free(error->ring[i].ringbuffer);
- i915_error_object_free(error->ring[i].ctx);
- kfree(error->ring[i].requests);
- }
-
- kfree(error->active_bo);
- kfree(error->overlay);
- kfree(error->display);
- kfree(error);
-}
-static void capture_bo(struct drm_i915_error_buffer *err,
- struct drm_i915_gem_object *obj)
-{
- err->size = obj->base.size;
- err->name = obj->base.name;
- err->rseqno = obj->last_read_seqno;
- err->wseqno = obj->last_write_seqno;
- err->gtt_offset = obj->gtt_offset;
- err->read_domains = obj->base.read_domains;
- err->write_domain = obj->base.write_domain;
- err->fence_reg = obj->fence_reg;
- err->pinned = 0;
- if (obj->pin_count > 0)
- err->pinned = 1;
- if (obj->user_pin_count > 0)
- err->pinned = -1;
- err->tiling = obj->tiling_mode;
- err->dirty = obj->dirty;
- err->purgeable = obj->madv != I915_MADV_WILLNEED;
- err->ring = obj->ring ? obj->ring->id : -1;
- err->cache_level = obj->cache_level;
-}
-
-static u32 capture_active_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, mm_list) {
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
- int count, struct list_head *head)
-{
- struct drm_i915_gem_object *obj;
- int i = 0;
-
- list_for_each_entry(obj, head, global_list) {
- if (obj->pin_count == 0)
- continue;
-
- capture_bo(err++, obj);
- if (++i == count)
- break;
- }
-
- return i;
-}
-
-static void i915_gem_record_fences(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
-
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 7:
- case 6:
- for (i = 0; i < dev_priv->num_fence_regs; i++)
- error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- default:
- BUG();
- }
-}
-
-static struct drm_i915_error_object *
-i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_gem_object *obj;
- u32 seqno;
-
- if (!ring->get_seqno)
- return NULL;
-
- if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
- u32 acthd = I915_READ(ACTHD);
-
- if (WARN_ON(ring->id != RCS))
- return NULL;
-
- obj = ring->private;
- if (acthd >= obj->gtt_offset &&
- acthd < obj->gtt_offset + obj->base.size)
- return i915_error_object_create(dev_priv, obj);
- }
-
- seqno = ring->get_seqno(ring, false);
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
- if (obj->ring != ring)
- continue;
-
- if (i915_seqno_passed(seqno, obj->last_read_seqno))
- continue;
-
- if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
- continue;
-
- /* We need to copy these to an anonymous buffer as the simplest
- * method to avoid being overwritten by userspace.
- */
- return i915_error_object_create(dev_priv, obj);
- }
-
- return NULL;
-}
-
-static void i915_record_ring_state(struct drm_device *dev,
- struct drm_i915_error_state *error,
- struct intel_ring_buffer *ring)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
- error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
- error->semaphore_mboxes[ring->id][0]
- = I915_READ(RING_SYNC_0(ring->mmio_base));
- error->semaphore_mboxes[ring->id][1]
- = I915_READ(RING_SYNC_1(ring->mmio_base));
- error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
- error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
- }
-
- if (INTEL_INFO(dev)->gen >= 4) {
- error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
- error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
- error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
- error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
- error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
- if (ring->id == RCS)
- error->bbaddr = I915_READ64(BB_ADDR);
- } else {
- error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
- error->ipeir[ring->id] = I915_READ(IPEIR);
- error->ipehr[ring->id] = I915_READ(IPEHR);
- error->instdone[ring->id] = I915_READ(INSTDONE);
- }
-
- error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
- error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
- error->seqno[ring->id] = ring->get_seqno(ring, false);
- error->acthd[ring->id] = intel_ring_get_active_head(ring);
- error->head[ring->id] = I915_READ_HEAD(ring);
- error->tail[ring->id] = I915_READ_TAIL(ring);
- error->ctl[ring->id] = I915_READ_CTL(ring);
-
- error->cpu_ring_head[ring->id] = ring->head;
- error->cpu_ring_tail[ring->id] = ring->tail;
-}
-
-
-static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
- struct drm_i915_error_state *error,
- struct drm_i915_error_ring *ering)
-{
- struct drm_i915_private *dev_priv = ring->dev->dev_private;
- struct drm_i915_gem_object *obj;
-
- /* Currently render ring is the only HW context user */
- if (ring->id != RCS || !error->ccid)
- return;
-
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
- if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
- ering->ctx = i915_error_object_create_sized(dev_priv,
- obj, 1);
- }
- }
-}
-
-static void i915_gem_record_rings(struct drm_device *dev,
- struct drm_i915_error_state *error)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_ring_buffer *ring;
- struct drm_i915_gem_request *request;
- int i, count;
-
- for_each_ring(ring, dev_priv, i) {
- i915_record_ring_state(dev, error, ring);
-
- error->ring[i].batchbuffer =
- i915_error_first_batchbuffer(dev_priv, ring);
-
- error->ring[i].ringbuffer =
- i915_error_object_create(dev_priv, ring->obj);
-
-
- i915_gem_record_active_context(ring, error, &error->ring[i]);
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list)
- count++;
-
- error->ring[i].num_requests = count;
- error->ring[i].requests =
- kmalloc(count*sizeof(struct drm_i915_error_request),
- GFP_ATOMIC);
- if (error->ring[i].requests == NULL) {
- error->ring[i].num_requests = 0;
- continue;
- }
-
- count = 0;
- list_for_each_entry(request, &ring->request_list, list) {
- struct drm_i915_error_request *erq;
-
- erq = &error->ring[i].requests[count++];
- erq->seqno = request->seqno;
- erq->jiffies = request->emitted_jiffies;
- erq->tail = request->tail;
- }
- }
-}
-
-/**
- * i915_capture_error_state - capture an error record for later analysis
- * @dev: drm device
- *
- * Should be called when an error is detected (either a hang or an error
- * interrupt) to capture error state from the time of the error. Fills
- * out a structure which becomes available in debugfs for user level tools
- * to pick up.
- */
-static void i915_capture_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj;
- struct drm_i915_error_state *error;
- unsigned long flags;
- int i, pipe;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
- if (error)
- return;
-
- /* Account for pipe specific data like PIPE*STAT */
- error = kzalloc(sizeof(*error), GFP_ATOMIC);
- if (!error) {
- DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
- return;
- }
-
- DRM_INFO("capturing error event; look for more information in "
- "/sys/kernel/debug/dri/%d/i915_error_state\n",
- dev->primary->index);
-
- kref_init(&error->ref);
- error->eir = I915_READ(EIR);
- error->pgtbl_er = I915_READ(PGTBL_ER);
- if (HAS_HW_CONTEXTS(dev))
- error->ccid = I915_READ(CCID);
-
- if (HAS_PCH_SPLIT(dev))
- error->ier = I915_READ(DEIER) | I915_READ(GTIER);
- else if (IS_VALLEYVIEW(dev))
- error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
- else if (IS_GEN2(dev))
- error->ier = I915_READ16(IER);
- else
- error->ier = I915_READ(IER);
-
- if (INTEL_INFO(dev)->gen >= 6)
- error->derrmr = I915_READ(DERRMR);
-
- if (IS_VALLEYVIEW(dev))
- error->forcewake = I915_READ(FORCEWAKE_VLV);
- else if (INTEL_INFO(dev)->gen >= 7)
- error->forcewake = I915_READ(FORCEWAKE_MT);
- else if (INTEL_INFO(dev)->gen == 6)
- error->forcewake = I915_READ(FORCEWAKE);
-
- if (!HAS_PCH_SPLIT(dev))
- for_each_pipe(pipe)
- error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
-
- if (INTEL_INFO(dev)->gen >= 6) {
- error->error = I915_READ(ERROR_GEN6);
- error->done_reg = I915_READ(DONE_REG);
- }
-
- if (INTEL_INFO(dev)->gen == 7)
- error->err_int = I915_READ(GEN7_ERR_INT);
-
- i915_get_extra_instdone(dev, error->extra_instdone);
-
- i915_gem_record_fences(dev, error);
- i915_gem_record_rings(dev, error);
-
- /* Record buffers on the active and pinned lists. */
- error->active_bo = NULL;
- error->pinned_bo = NULL;
-
- i = 0;
- list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
- i++;
- error->active_bo_count = i;
- list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
- if (obj->pin_count)
- i++;
- error->pinned_bo_count = i - error->active_bo_count;
-
- error->active_bo = NULL;
- error->pinned_bo = NULL;
- if (i) {
- error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
- GFP_ATOMIC);
- if (error->active_bo)
- error->pinned_bo =
- error->active_bo + error->active_bo_count;
- }
-
- if (error->active_bo)
- error->active_bo_count =
- capture_active_bo(error->active_bo,
- error->active_bo_count,
- &dev_priv->mm.active_list);
-
- if (error->pinned_bo)
- error->pinned_bo_count =
- capture_pinned_bo(error->pinned_bo,
- error->pinned_bo_count,
- &dev_priv->mm.bound_list);
-
- do_gettimeofday(&error->time);
-
- error->overlay = intel_overlay_capture_error_state(dev);
- error->display = intel_display_capture_error_state(dev);
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- if (dev_priv->gpu_error.first_error == NULL) {
- dev_priv->gpu_error.first_error = error;
- error = NULL;
- }
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- i915_error_state_free(&error->ref);
-}
-
-void i915_destroy_error_state(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_error_state *error;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
- error = dev_priv->gpu_error.first_error;
- dev_priv->gpu_error.first_error = NULL;
- spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
-
- if (error)
- kref_put(&error->ref, i915_error_state_free);
-}
-#else
-#define i915_capture_error_state(x)
-#endif
-
static void i915_report_and_clear_eir(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2155,10 +1688,10 @@ static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, in
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = DSPSURF(intel_crtc->plane);
stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
- obj->gtt_offset;
+ i915_gem_obj_ggtt_offset(obj);
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
- stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -2202,29 +1735,14 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-
- return 0;
-}
-
-static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- if (!i915_pipe_enabled(dev, pipe))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (5 * pipe));
+ ironlake_enable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
@@ -2275,21 +1793,11 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
+ uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
+ DE_PIPE_VBLANK_ILK(pipe);
spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
- DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-}
-
-static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
-{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_disable_display_irq(dev_priv,
- DE_PIPEA_VBLANK_IVB << (pipe * 5));
+ ironlake_disable_display_irq(dev_priv, bit);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
@@ -2392,10 +1900,10 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
u32 tmp;
if (ring->hangcheck.acthd != acthd)
- return active;
+ return HANGCHECK_ACTIVE;
if (IS_GEN2(dev))
- return hung;
+ return HANGCHECK_HUNG;
/* Is the chip hanging on a WAIT_FOR_EVENT?
* If so we can simply poke the RB_WAIT bit
@@ -2407,24 +1915,24 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
DRM_ERROR("Kicking stuck wait on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
- return kick;
+ return HANGCHECK_KICK;
}
if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
switch (semaphore_passed(ring)) {
default:
- return hung;
+ return HANGCHECK_HUNG;
case 1:
DRM_ERROR("Kicking stuck semaphore on %s\n",
ring->name);
I915_WRITE_CTL(ring, tmp);
- return kick;
+ return HANGCHECK_KICK;
case 0:
- return wait;
+ return HANGCHECK_WAIT;
}
}
- return hung;
+ return HANGCHECK_HUNG;
}
/**
@@ -2435,7 +1943,7 @@ ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
* we kick the ring. If we see no progress on three subsequent calls
* we assume chip is wedged and try to fix it by resetting the chip.
*/
-void i915_hangcheck_elapsed(unsigned long data)
+static void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2471,8 +1979,6 @@ void i915_hangcheck_elapsed(unsigned long data)
} else
busy = false;
} else {
- int score;
-
/* We always increment the hangcheck score
* if the ring is busy and still processing
* the same request, so that no single request
@@ -2492,21 +1998,19 @@ void i915_hangcheck_elapsed(unsigned long data)
acthd);
switch (ring->hangcheck.action) {
- case wait:
- score = 0;
+ case HANGCHECK_WAIT:
break;
- case active:
- score = BUSY;
+ case HANGCHECK_ACTIVE:
+ ring->hangcheck.score += BUSY;
break;
- case kick:
- score = KICK;
+ case HANGCHECK_KICK:
+ ring->hangcheck.score += KICK;
break;
- case hung:
- score = HUNG;
+ case HANGCHECK_HUNG:
+ ring->hangcheck.score += HUNG;
stuck[i] = true;
break;
}
- ring->hangcheck.score += score;
}
} else {
/* Gradually reduce the count so that we catch DoS
@@ -2536,9 +2040,17 @@ void i915_hangcheck_elapsed(unsigned long data)
if (busy_count)
/* Reset timer case chip hangs without another request
* being added */
- mod_timer(&dev_priv->gpu_error.hangcheck_timer,
- round_jiffies_up(jiffies +
- DRM_I915_HANGCHECK_JIFFIES));
+ i915_queue_hangcheck(dev);
+}
+
+void i915_queue_hangcheck(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ if (!i915_enable_hangcheck)
+ return;
+
+ mod_timer(&dev_priv->gpu_error.hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
static void ibx_irq_preinstall(struct drm_device *dev)
@@ -2560,31 +2072,26 @@ static void ibx_irq_preinstall(struct drm_device *dev)
POSTING_READ(SDEIER);
}
-/* drm_dma.h hooks
-*/
-static void ironlake_irq_preinstall(struct drm_device *dev)
+static void gen5_gt_irq_preinstall(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-
- atomic_set(&dev_priv->irq_received, 0);
-
- I915_WRITE(HWSTAM, 0xeffe);
-
- /* XXX hotplug from PCH */
-
- I915_WRITE(DEIMR, 0xffffffff);
- I915_WRITE(DEIER, 0x0);
- POSTING_READ(DEIER);
+ struct drm_i915_private *dev_priv = dev->dev_private;
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
POSTING_READ(GTIER);
- ibx_irq_preinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ /* and PM */
+ I915_WRITE(GEN6_PMIMR, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0x0);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static void ivybridge_irq_preinstall(struct drm_device *dev)
+/* drm_dma.h hooks
+*/
+static void ironlake_irq_preinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -2592,21 +2099,11 @@ static void ivybridge_irq_preinstall(struct drm_device *dev)
I915_WRITE(HWSTAM, 0xeffe);
- /* XXX hotplug from PCH */
-
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
POSTING_READ(DEIER);
- /* and GT */
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
-
- /* Power management */
- I915_WRITE(GEN6_PMIMR, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0x0);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_preinstall(dev);
ibx_irq_preinstall(dev);
}
@@ -2627,9 +2124,8 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
/* and GT */
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, 0xffffffff);
- I915_WRITE(GTIER, 0x0);
- POSTING_READ(GTIER);
+
+ gen5_gt_irq_preinstall(dev);
I915_WRITE(DPINVGTT, 0xff);
@@ -2648,22 +2144,21 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *intel_encoder;
- u32 mask = ~I915_READ(SDEIMR);
- u32 hotplug;
+ u32 hotplug_irqs, hotplug, enabled_irqs = 0;
if (HAS_PCH_IBX(dev)) {
- mask &= ~SDE_HOTPLUG_MASK;
+ hotplug_irqs = SDE_HOTPLUG_MASK;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_ibx[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
} else {
- mask &= ~SDE_HOTPLUG_MASK_CPT;
+ hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
- mask |= hpd_cpt[intel_encoder->hpd_pin];
+ enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
}
- I915_WRITE(SDEIMR, ~mask);
+ ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
/*
* Enable digital hotplug on the PCH, and configure the DP short pulse
@@ -2700,123 +2195,103 @@ static void ibx_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIMR, ~mask);
}
-static int ironlake_irq_postinstall(struct drm_device *dev)
+static void gen5_gt_irq_postinstall(struct drm_device *dev)
{
- unsigned long irqflags;
-
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
- DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
- DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
- DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
- u32 gt_irqs;
-
- dev_priv->irq_mask = ~display_mask;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 pm_irqs, gt_irqs;
- /* should always can generate irq */
- I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER, display_mask |
- DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT);
- POSTING_READ(DEIER);
+ pm_irqs = gt_irqs = 0;
dev_priv->gt_irq_mask = ~0;
+ if (HAS_L3_GPU_CACHE(dev)) {
+ /* L3 parity interrupt is always unmasked. */
+ dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ gt_irqs |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
+ }
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT;
-
- if (IS_GEN6(dev))
- gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
- else
+ gt_irqs |= GT_RENDER_USER_INTERRUPT;
+ if (IS_GEN5(dev)) {
gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
ILK_BSD_USER_INTERRUPT;
+ } else {
+ gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
+ }
+ I915_WRITE(GTIIR, I915_READ(GTIIR));
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, gt_irqs);
POSTING_READ(GTIER);
- ibx_irq_postinstall(dev);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ pm_irqs |= GEN6_PM_RPS_EVENTS;
- if (IS_IRONLAKE_M(dev)) {
- /* Enable PCU event interrupts
- *
- * spinlocking not required here for correctness since interrupt
- * setup is guaranteed to run in single-threaded context. But we
- * need it to make the assert_spin_locked happy. */
- spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
- ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
- spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
- }
+ if (HAS_VEBOX(dev))
+ pm_irqs |= PM_VEBOX_USER_INTERRUPT;
- return 0;
+ dev_priv->pm_irq_mask = 0xffffffff;
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+ I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
+ I915_WRITE(GEN6_PMIER, pm_irqs);
+ POSTING_READ(GEN6_PMIER);
+ }
}
-static int ivybridge_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
{
+ unsigned long irqflags;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- /* enable kind of interrupts always enabled */
- u32 display_mask =
- DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
- DE_PLANEC_FLIP_DONE_IVB |
- DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB |
- DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB;
- u32 pm_irqs = GEN6_PM_RPS_EVENTS;
- u32 gt_irqs;
+ u32 display_mask, extra_mask;
+
+ if (INTEL_INFO(dev)->gen >= 7) {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
+ DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
+ DE_PLANEB_FLIP_DONE_IVB |
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
+ DE_ERR_INT_IVB);
+ extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
+ DE_PIPEA_VBLANK_IVB);
+
+ I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
+ } else {
+ display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
+ DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
+ DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
+ DE_PIPEA_FIFO_UNDERRUN | DE_POISON);
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ }
dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
- I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
I915_WRITE(DEIIR, I915_READ(DEIIR));
I915_WRITE(DEIMR, dev_priv->irq_mask);
- I915_WRITE(DEIER,
- display_mask |
- DE_PIPEC_VBLANK_IVB |
- DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ I915_WRITE(DEIER, display_mask | extra_mask);
POSTING_READ(DEIER);
- dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
-
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
-
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
- if (HAS_VEBOX(dev))
- pm_irqs |= PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
-
- /* Our enable/disable rps functions may touch these registers so
- * make sure to set a known state for only the non-RPS bits.
- * The RMW is extra paranoia since this should be called after being set
- * to a known state in preinstall.
- * */
- I915_WRITE(GEN6_PMIMR,
- (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
- I915_WRITE(GEN6_PMIER,
- (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
- POSTING_READ(GEN6_PMIER);
+ gen5_gt_irq_postinstall(dev);
ibx_irq_postinstall(dev);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Enable PCU event interrupts
+ *
+ * spinlocking not required here for correctness since interrupt
+ * setup is guaranteed to run in single-threaded context. But we
+ * need it to make the assert_spin_locked happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
return 0;
}
static int valleyview_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- u32 gt_irqs;
u32 enable_mask;
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
+ unsigned long irqflags;
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
@@ -2842,20 +2317,18 @@ static int valleyview_irq_postinstall(struct drm_device *dev)
I915_WRITE(PIPESTAT(1), 0xffff);
POSTING_READ(VLV_IER);
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
- I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
-
- gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- I915_WRITE(GTIER, gt_irqs);
- POSTING_READ(GTIER);
+ gen5_gt_irq_postinstall(dev);
/* ack & enable invalid PTE error interrupts */
#if 0 /* FIXME: add support to irq handler for checking these bits */
@@ -3001,7 +2474,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
u16 iir, new_iir;
u32 pipe_stats[2];
unsigned long irqflags;
- int irq_received;
int pipe;
u16 flip_mask =
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -3035,7 +2507,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
DRM_DEBUG_DRIVER("pipe %c underrun\n",
pipe_name(pipe));
I915_WRITE(reg, pipe_stats[pipe]);
- irq_received = 1;
}
}
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
@@ -3323,6 +2794,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 error_mask;
+ unsigned long irqflags;
/* Unmask the interrupts that we always want on. */
dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -3341,7 +2813,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
if (IS_G4X(dev))
enable_mask |= I915_BSD_USER_INTERRUPT;
+ /* Interrupt setup is already guaranteed to be single-threaded, this is
+ * just to make the assert_spin_locked check happy. */
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
/*
* Enable some error detection, note the instruction error mask
@@ -3616,15 +3092,6 @@ void intel_irq_init(struct drm_device *dev)
dev->driver->enable_vblank = valleyview_enable_vblank;
dev->driver->disable_vblank = valleyview_disable_vblank;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
- } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
- /* Share uninstall handlers with ILK/SNB */
- dev->driver->irq_handler = ivybridge_irq_handler;
- dev->driver->irq_preinstall = ivybridge_irq_preinstall;
- dev->driver->irq_postinstall = ivybridge_irq_postinstall;
- dev->driver->irq_uninstall = ironlake_irq_uninstall;
- dev->driver->enable_vblank = ivybridge_enable_vblank;
- dev->driver->disable_vblank = ivybridge_disable_vblank;
- dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
@@ -3683,3 +3150,67 @@ void intel_hpd_init(struct drm_device *dev)
dev_priv->display.hpd_irq_setup(dev);
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
+
+/* Disable interrupts so we can allow Package C8+. */
+void hsw_pc8_disable_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ dev_priv->pc8.regsave.deimr = I915_READ(DEIMR);
+ dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR);
+ dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR);
+ dev_priv->pc8.regsave.gtier = I915_READ(GTIER);
+ dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR);
+
+ ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB);
+ ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT);
+ ilk_disable_gt_irq(dev_priv, 0xffffffff);
+ snb_disable_pm_irq(dev_priv, 0xffffffff);
+
+ dev_priv->pc8.irqs_disabled = true;
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/* Restore interrupts so we can recover from Package C8+. */
+void hsw_pc8_restore_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+ uint32_t val, expected;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+
+ val = I915_READ(DEIMR);
+ expected = ~DE_PCH_EVENT_IVB;
+ WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+ val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT;
+ expected = ~SDE_HOTPLUG_MASK_CPT;
+ WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n",
+ val, expected);
+
+ val = I915_READ(GTIMR);
+ expected = 0xffffffff;
+ WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected);
+
+ val = I915_READ(GEN6_PMIMR);
+ expected = 0xffffffff;
+ WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val,
+ expected);
+
+ dev_priv->pc8.irqs_disabled = false;
+
+ ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr);
+ ibx_enable_display_interrupt(dev_priv,
+ ~dev_priv->pc8.regsave.sdeimr &
+ ~SDE_HOTPLUG_MASK_CPT);
+ ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr);
+ snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr);
+ I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier);
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f2326fc60ac..b6a58f720f9 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -61,6 +61,12 @@
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
#define GC_DISPLAY_CLOCK_333_MHZ (4 << 4)
+#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4)
+#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4)
+#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4)
+#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4)
+#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4)
+#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4)
#define GC_DISPLAY_CLOCK_MASK (7 << 4)
#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0)
#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0)
@@ -363,6 +369,7 @@
#define PUNIT_REG_GPU_LFM 0xd3
#define PUNIT_REG_GPU_FREQ_REQ 0xd4
#define PUNIT_REG_GPU_FREQ_STS 0xd8
+#define GENFREQSTATUS (1<<0)
#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */
@@ -680,6 +687,7 @@
#define ERR_INT_FIFO_UNDERRUN_C (1<<6)
#define ERR_INT_FIFO_UNDERRUN_B (1<<3)
#define ERR_INT_FIFO_UNDERRUN_A (1<<0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
#define FPGA_DBG 0x42300
#define FPGA_DBG_RM_NOCLAIM (1<<31)
@@ -752,6 +760,8 @@
will not assert AGPBUSY# and will only
be delivered when out of C3. */
#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1<<9)
+#define INSTPM_SYNC_FLUSH (1<<5)
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -1125,7 +1135,8 @@
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
-#define DPLL_DVO_HIGH_SPEED (1 << 30)
+#define DPLL_SDVO_HIGH_SPEED (1 << 30)
+#define DPLL_DVO_2X_MODE (1 << 30)
#define DPLL_EXT_BUFFER_ENABLE_VLV (1 << 30)
#define DPLL_SYNCLOCK_ENABLE (1 << 29)
#define DPLL_REFA_CLK_ENABLE_VLV (1 << 29)
@@ -1438,6 +1449,8 @@
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
+#define MCH_SECP_NRG_STTS (MCHBAR_MIRROR_BASE_SNB + 0x592c)
+
/* Clocking configuration register */
#define CLKCFG 0x10c00
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -1694,15 +1707,26 @@
*/
#define CCID 0x2180
#define CCID_EN (1<<0)
+/*
+ * Notes on SNB/IVB/VLV context size:
+ * - Power context is saved elsewhere (LLC or stolen)
+ * - Ring/execlist context is saved on SNB, not on IVB
+ * - Extended context size already includes render context size
+ * - We always need to follow the extended context size.
+ * SNB BSpec has comments indicating that we should use the
+ * render context size instead if execlists are disabled, but
+ * based on empirical testing that's just nonsense.
+ * - Pipelined/VF state is saved on SNB/IVB respectively
+ * - GT1 size just indicates how much of render context
+ * doesn't need saving on GT1
+ */
#define CXT_SIZE 0x21a0
#define GEN6_CXT_POWER_SIZE(cxt_reg) ((cxt_reg >> 24) & 0x3f)
#define GEN6_CXT_RING_SIZE(cxt_reg) ((cxt_reg >> 18) & 0x3f)
#define GEN6_CXT_RENDER_SIZE(cxt_reg) ((cxt_reg >> 12) & 0x3f)
#define GEN6_CXT_EXTENDED_SIZE(cxt_reg) ((cxt_reg >> 6) & 0x3f)
#define GEN6_CXT_PIPELINE_SIZE(cxt_reg) ((cxt_reg >> 0) & 0x3f)
-#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_POWER_SIZE(cxt_reg) + \
- GEN6_CXT_RING_SIZE(cxt_reg) + \
- GEN6_CXT_RENDER_SIZE(cxt_reg) + \
+#define GEN6_CXT_TOTAL_SIZE(cxt_reg) (GEN6_CXT_RING_SIZE(cxt_reg) + \
GEN6_CXT_EXTENDED_SIZE(cxt_reg) + \
GEN6_CXT_PIPELINE_SIZE(cxt_reg))
#define GEN7_CXT_SIZE 0x21a8
@@ -1712,11 +1736,7 @@
#define GEN7_CXT_EXTENDED_SIZE(ctx_reg) ((ctx_reg >> 9) & 0x7f)
#define GEN7_CXT_GT1_SIZE(ctx_reg) ((ctx_reg >> 6) & 0x7)
#define GEN7_CXT_VFSTATE_SIZE(ctx_reg) ((ctx_reg >> 0) & 0x3f)
-#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_POWER_SIZE(ctx_reg) + \
- GEN7_CXT_RING_SIZE(ctx_reg) + \
- GEN7_CXT_RENDER_SIZE(ctx_reg) + \
- GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
- GEN7_CXT_GT1_SIZE(ctx_reg) + \
+#define GEN7_CXT_TOTAL_SIZE(ctx_reg) (GEN7_CXT_EXTENDED_SIZE(ctx_reg) + \
GEN7_CXT_VFSTATE_SIZE(ctx_reg))
/* Haswell does have the CXT_SIZE register however it does not appear to be
* valid. Now, docs explain in dwords what is in the context object. The full
@@ -1776,6 +1796,71 @@
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
+/* HSW eDP PSR registers */
+#define EDP_PSR_CTL 0x64800
+#define EDP_PSR_ENABLE (1<<31)
+#define EDP_PSR_LINK_DISABLE (0<<27)
+#define EDP_PSR_LINK_STANDBY (1<<27)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_MASK (3<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES (0<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_4_LINES (1<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_2_LINES (2<<25)
+#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES (3<<25)
+#define EDP_PSR_MAX_SLEEP_TIME_SHIFT 20
+#define EDP_PSR_SKIP_AUX_EXIT (1<<12)
+#define EDP_PSR_TP1_TP2_SEL (0<<11)
+#define EDP_PSR_TP1_TP3_SEL (1<<11)
+#define EDP_PSR_TP2_TP3_TIME_500us (0<<8)
+#define EDP_PSR_TP2_TP3_TIME_100us (1<<8)
+#define EDP_PSR_TP2_TP3_TIME_2500us (2<<8)
+#define EDP_PSR_TP2_TP3_TIME_0us (3<<8)
+#define EDP_PSR_TP1_TIME_500us (0<<4)
+#define EDP_PSR_TP1_TIME_100us (1<<4)
+#define EDP_PSR_TP1_TIME_2500us (2<<4)
+#define EDP_PSR_TP1_TIME_0us (3<<4)
+#define EDP_PSR_IDLE_FRAME_SHIFT 0
+
+#define EDP_PSR_AUX_CTL 0x64810
+#define EDP_PSR_AUX_DATA1 0x64814
+#define EDP_PSR_DPCD_COMMAND 0x80060000
+#define EDP_PSR_AUX_DATA2 0x64818
+#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
+#define EDP_PSR_AUX_DATA3 0x6481c
+#define EDP_PSR_AUX_DATA4 0x64820
+#define EDP_PSR_AUX_DATA5 0x64824
+
+#define EDP_PSR_STATUS_CTL 0x64840
+#define EDP_PSR_STATUS_STATE_MASK (7<<29)
+#define EDP_PSR_STATUS_STATE_IDLE (0<<29)
+#define EDP_PSR_STATUS_STATE_SRDONACK (1<<29)
+#define EDP_PSR_STATUS_STATE_SRDENT (2<<29)
+#define EDP_PSR_STATUS_STATE_BUFOFF (3<<29)
+#define EDP_PSR_STATUS_STATE_BUFON (4<<29)
+#define EDP_PSR_STATUS_STATE_AUXACK (5<<29)
+#define EDP_PSR_STATUS_STATE_SRDOFFACK (6<<29)
+#define EDP_PSR_STATUS_LINK_MASK (3<<26)
+#define EDP_PSR_STATUS_LINK_FULL_OFF (0<<26)
+#define EDP_PSR_STATUS_LINK_FULL_ON (1<<26)
+#define EDP_PSR_STATUS_LINK_STANDBY (2<<26)
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_SHIFT 20
+#define EDP_PSR_STATUS_MAX_SLEEP_TIMER_MASK 0x1f
+#define EDP_PSR_STATUS_COUNT_SHIFT 16
+#define EDP_PSR_STATUS_COUNT_MASK 0xf
+#define EDP_PSR_STATUS_AUX_ERROR (1<<15)
+#define EDP_PSR_STATUS_AUX_SENDING (1<<12)
+#define EDP_PSR_STATUS_SENDING_IDLE (1<<9)
+#define EDP_PSR_STATUS_SENDING_TP2_TP3 (1<<8)
+#define EDP_PSR_STATUS_SENDING_TP1 (1<<4)
+#define EDP_PSR_STATUS_IDLE_MASK 0xf
+
+#define EDP_PSR_PERF_CNT 0x64844
+#define EDP_PSR_PERF_CNT_MASK 0xffffff
+
+#define EDP_PSR_DEBUG_CTL 0x64860
+#define EDP_PSR_DEBUG_MASK_LPSP (1<<27)
+#define EDP_PSR_DEBUG_MASK_MEMUP (1<<26)
+#define EDP_PSR_DEBUG_MASK_HPD (1<<25)
+
/* VGA port control */
#define ADPA 0x61100
#define PCH_ADPA 0xe1100
@@ -1856,10 +1941,16 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
-/* HDMI/DP bits are gen4+ */
-#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
+/*
+ * HDMI/DP bits are gen4+
+ *
+ * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
+ * Please check the detailed lore in the commit message for for experimental
+ * evidence.
+ */
+#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
-#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
+#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
@@ -2045,6 +2136,7 @@
* (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte
* of the infoframe structure specified by CEA-861. */
#define VIDEO_DIP_DATA_SIZE 32
+#define VIDEO_DIP_VSC_DATA_SIZE 36
#define VIDEO_DIP_CTL 0x61170
/* Pre HSW: */
#define VIDEO_DIP_ENABLE (1 << 31)
@@ -2192,6 +2284,8 @@
#define BLC_PWM_CPU_CTL2 0x48250
#define BLC_PWM_CPU_CTL 0x48254
+#define HSW_BLC_PWM2_CTL 0x48350
+
/* PCH CTL1 is totally different, all but the below bits are reserved. CTL2 is
* like the normal CTL from gen4 and earlier. Hooray for confusing naming. */
#define BLC_PWM_PCH_CTL1 0xc8250
@@ -2200,6 +2294,12 @@
#define BLM_PCH_POLARITY (1 << 29)
#define BLC_PWM_PCH_CTL2 0xc8254
+#define UTIL_PIN_CTL 0x48400
+#define UTIL_PIN_ENABLE (1 << 31)
+
+#define PCH_GTC_CTL 0xe7000
+#define PCH_GTC_ENABLE (1 << 31)
+
/* TV port control */
#define TV_CTL 0x68000
/** Enables the TV encoder */
@@ -3113,9 +3213,6 @@
#define MLTR_WM2_SHIFT 8
/* the unit of memory self-refresh latency time is 0.5us */
#define ILK_SRLT_MASK 0x3f
-#define ILK_LATENCY(shift) (I915_READ(MLTR_ILK) >> (shift) & ILK_SRLT_MASK)
-#define ILK_READ_WM1_LATENCY() ILK_LATENCY(MLTR_WM1_SHIFT)
-#define ILK_READ_WM2_LATENCY() ILK_LATENCY(MLTR_WM2_SHIFT)
/* define the fifo size on Ironlake */
#define ILK_DISPLAY_FIFO 128
@@ -3162,12 +3259,6 @@
#define SSKPD_WM2_SHIFT 16
#define SSKPD_WM3_SHIFT 24
-#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
-#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
-#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
-#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
-#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
-
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -3718,6 +3809,9 @@
#define DE_PLANEA_FLIP_DONE_IVB (1<<3)
#define DE_PIPEA_VBLANK_IVB (1<<0)
+#define DE_PIPE_VBLANK_ILK(pipe) (1 << ((pipe * 8) + 7))
+#define DE_PIPE_VBLANK_IVB(pipe) (1 << (pipe * 5))
+
#define VLV_MASTER_IER 0x4400c /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1<<31)
@@ -3880,6 +3974,7 @@
#define SERR_INT_TRANS_C_FIFO_UNDERRUN (1<<6)
#define SERR_INT_TRANS_B_FIFO_UNDERRUN (1<<3)
#define SERR_INT_TRANS_A_FIFO_UNDERRUN (1<<0)
+#define SERR_INT_TRANS_FIFO_UNDERRUN(pipe) (1<<(pipe*3))
/* digital port hotplug */
#define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */
@@ -4073,6 +4168,8 @@
_TRANSCODER(trans, HSW_VIDEO_DIP_CTL_A, HSW_VIDEO_DIP_CTL_B)
#define HSW_TVIDEO_DIP_AVI_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_AVI_DATA_A, HSW_VIDEO_DIP_AVI_DATA_B)
+#define HSW_TVIDEO_DIP_VS_DATA(trans) \
+ _TRANSCODER(trans, HSW_VIDEO_DIP_VS_DATA_A, HSW_VIDEO_DIP_VS_DATA_B)
#define HSW_TVIDEO_DIP_SPD_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_SPD_DATA_A, HSW_VIDEO_DIP_SPD_DATA_B)
#define HSW_TVIDEO_DIP_GCP(trans) \
@@ -4080,6 +4177,13 @@
#define HSW_TVIDEO_DIP_VSC_DATA(trans) \
_TRANSCODER(trans, HSW_VIDEO_DIP_VSC_DATA_A, HSW_VIDEO_DIP_VSC_DATA_B)
+#define HSW_STEREO_3D_CTL_A 0x70020
+#define S3D_ENABLE (1<<31)
+#define HSW_STEREO_3D_CTL_B 0x71020
+
+#define HSW_STEREO_3D_CTL(trans) \
+ _TRANSCODER(trans, HSW_STEREO_3D_CTL_A, HSW_STEREO_3D_CTL_A)
+
#define _PCH_TRANS_HTOTAL_B 0xe1000
#define _PCH_TRANS_HBLANK_B 0xe1004
#define _PCH_TRANS_HSYNC_B 0xe1008
@@ -4432,7 +4536,7 @@
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
-#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x3e <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
@@ -4468,6 +4572,10 @@
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define HSW_IDICR 0x9008
+#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
+#define HSW_EDRAM_PRESENT 0x120010
+
#define GEN6_UCGCTL1 0x9400
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
@@ -4736,8 +4844,8 @@
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
-#define HSW_PWR_WELL_ENABLE (1<<31)
-#define HSW_PWR_WELL_STATE (1<<30)
+#define HSW_PWR_WELL_ENABLE_REQUEST (1<<31)
+#define HSW_PWR_WELL_STATE_ENABLED (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
#define HSW_PWR_WELL_ENABLE_SINGLE_STEP (1<<31)
#define HSW_PWR_WELL_PWR_GATE_OVERRIDE (1<<20)
@@ -4858,7 +4966,8 @@
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
-#define SBI_DBUFF0_ENABLE (1<<0)
+#define SBI_GEN0 0x1f00
+#define SBI_GEN0_CFG_BUFFENABLE_DISABLE (1<<0)
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
@@ -4924,7 +5033,14 @@
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
+#define LCPLL_POWER_DOWN_ALLOW (1<<22)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
+#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
+
+#define D_COMP (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
+#define D_COMP_RCOMP_IN_PROGRESS (1<<9)
+#define D_COMP_COMP_FORCE (1<<8)
+#define D_COMP_COMP_DISABLE (1<<0)
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 6875b5654c6..a777e7f3b0d 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -409,6 +409,71 @@ static const struct attribute *gen6_attrs[] = {
NULL,
};
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ struct i915_error_state_file_priv error_priv;
+ struct drm_i915_error_state_buf error_str;
+ ssize_t ret_count = 0;
+ int ret;
+
+ memset(&error_priv, 0, sizeof(error_priv));
+
+ ret = i915_error_state_buf_init(&error_str, count, off);
+ if (ret)
+ return ret;
+
+ error_priv.dev = dev;
+ i915_error_state_get(dev, &error_priv);
+
+ ret = i915_error_state_to_str(&error_str, &error_priv);
+ if (ret)
+ goto out;
+
+ ret_count = count < error_str.bytes ? count : error_str.bytes;
+
+ memcpy(buf, error_str.buf, ret_count);
+out:
+ i915_error_state_put(&error_priv);
+ i915_error_state_buf_release(&error_str);
+
+ return ret ?: ret_count;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = container_of(kobj, struct device, kobj);
+ struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev);
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ DRM_DEBUG_DRIVER("Resetting error state\n");
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ i915_destroy_error_state(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return count;
+}
+
+static struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
void i915_setup_sysfs(struct drm_device *dev)
{
int ret;
@@ -432,10 +497,16 @@ void i915_setup_sysfs(struct drm_device *dev)
if (ret)
DRM_ERROR("gen6 sysfs setup failed\n");
}
+
+ ret = sysfs_create_bin_file(&dev->primary->kdev.kobj,
+ &error_state_attr);
+ if (ret)
+ DRM_ERROR("error_state sysfs setup failed\n");
}
void i915_teardown_sysfs(struct drm_device *dev)
{
+ sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr);
sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs);
device_remove_bin_file(&dev->primary->kdev, &dpf_attrs);
#ifdef CONFIG_PM
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 3db4a681771..e2c5ee6f619 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -33,47 +33,52 @@ TRACE_EVENT(i915_gem_object_create,
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
);
-TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_i915_gem_object *obj, bool mappable),
- TP_ARGS(obj, mappable),
+TRACE_EVENT(i915_vma_bind,
+ TP_PROTO(struct i915_vma *vma, bool mappable),
+ TP_ARGS(vma, mappable),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
__field(bool, mappable)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
__entry->mappable = mappable;
),
- TP_printk("obj=%p, offset=%08x size=%x%s",
+ TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
__entry->obj, __entry->offset, __entry->size,
- __entry->mappable ? ", mappable" : "")
+ __entry->mappable ? ", mappable" : "",
+ __entry->vm)
);
-TRACE_EVENT(i915_gem_object_unbind,
- TP_PROTO(struct drm_i915_gem_object *obj),
- TP_ARGS(obj),
+TRACE_EVENT(i915_vma_unbind,
+ TP_PROTO(struct i915_vma *vma),
+ TP_ARGS(vma),
TP_STRUCT__entry(
__field(struct drm_i915_gem_object *, obj)
+ __field(struct i915_address_space *, vm)
__field(u32, offset)
__field(u32, size)
),
TP_fast_assign(
- __entry->obj = obj;
- __entry->offset = obj->gtt_space->start;
- __entry->size = obj->gtt_space->size;
+ __entry->obj = vma->obj;
+ __entry->vm = vma->vm;
+ __entry->offset = vma->node.start;
+ __entry->size = vma->node.size;
),
- TP_printk("obj=%p, offset=%08x size=%x",
- __entry->obj, __entry->offset, __entry->size)
+ TP_printk("obj=%p, offset=%08x size=%x vm=%p",
+ __entry->obj, __entry->offset, __entry->size, __entry->vm)
);
TRACE_EVENT(i915_gem_object_change_domain,
@@ -406,10 +411,12 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
-TRACE_EVENT(i915_reg_rw,
- TP_PROTO(bool write, u32 reg, u64 val, int len),
+TRACE_EVENT_CONDITION(i915_reg_rw,
+ TP_PROTO(bool write, u32 reg, u64 val, int len, bool trace),
+
+ TP_ARGS(write, reg, val, len, trace),
- TP_ARGS(write, reg, val, len),
+ TP_CONDITION(trace),
TP_STRUCT__entry(
__field(u64, val)
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index bcbbaea2a78..57fe1ae32a0 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -28,7 +28,7 @@ static const u8 intel_dsm_guid[] = {
0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
};
-static int intel_dsm(acpi_handle handle, int func, int arg)
+static int intel_dsm(acpi_handle handle, int func)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -46,8 +46,9 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = arg;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (ret) {
@@ -151,8 +152,9 @@ static void intel_dsm_platform_mux_info(void)
params[1].integer.value = INTEL_DSM_REVISION_ID;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = 0;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
&output);
@@ -205,7 +207,7 @@ static bool intel_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
- ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+ ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS);
if (ret < 0) {
DRM_DEBUG_KMS("failed to get supported _DSM functions\n");
return false;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3acec8c4816..b5a3875f22c 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -52,15 +52,14 @@ struct intel_crt {
u32 adpa_reg;
};
-static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_crt, base);
+ return container_of(encoder, struct intel_crt, base);
}
-static struct intel_crt *intel_encoder_to_crt(struct intel_encoder *encoder)
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
{
- return container_of(encoder, struct intel_crt, base);
+ return intel_encoder_to_crt(intel_attached_encoder(connector));
}
static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
@@ -238,17 +237,14 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
return true;
}
-static void intel_crt_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_crt_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crt *crt =
- intel_encoder_to_crt(to_intel_encoder(encoder));
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_device *dev = encoder->base.dev;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 adpa;
if (HAS_PCH_SPLIT(dev))
@@ -265,14 +261,14 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
- adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
- else if (intel_crtc->pipe == 0)
+ adpa |= PORT_TRANS_SEL_CPT(crtc->pipe);
+ else if (crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
else
adpa |= ADPA_PIPE_B_SELECT;
if (!HAS_PCH_SPLIT(dev))
- I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);
+ I915_WRITE(BCLRPAT(crtc->pipe), 0);
I915_WRITE(crt->adpa_reg, adpa);
}
@@ -613,6 +609,10 @@ intel_crt_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct intel_load_detect_pipe tmp;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
if (I915_HAS_HOTPLUG(dev)) {
/* We can not rely on the HPD pin always being correctly wired
* up, for example many KVM do not pass it through, and so
@@ -707,10 +707,6 @@ static void intel_crt_reset(struct drm_connector *connector)
* Routines for controlling stuff on the analog port
*/
-static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
- .mode_set = intel_crt_mode_set,
-};
-
static const struct drm_connector_funcs intel_crt_connector_funcs = {
.reset = intel_crt_reset,
.dpms = intel_crt_dpms,
@@ -800,6 +796,7 @@ void intel_crt_init(struct drm_device *dev)
crt->adpa_reg = ADPA;
crt->base.compute_config = intel_crt_compute_config;
+ crt->base.mode_set = intel_crt_mode_set;
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
crt->base.get_config = intel_crt_get_config;
@@ -811,7 +808,6 @@ void intel_crt_init(struct drm_device *dev)
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
- drm_encoder_helper_add(&crt->base.base, &crt_encoder_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b042ee5c407..63aca49d11a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -84,25 +84,17 @@ static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
-static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
- bool use_fdi_mode)
+static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
int i;
- const u32 *ddi_translations = ((use_fdi_mode) ?
+ const u32 *ddi_translations = (port == PORT_E) ?
hsw_ddi_translations_fdi :
- hsw_ddi_translations_dp);
+ hsw_ddi_translations_dp;
- DRM_DEBUG_DRIVER("Initializing DDI buffers for port %c in %s mode\n",
- port_name(port),
- use_fdi_mode ? "FDI" : "DP");
-
- WARN((use_fdi_mode && (port != PORT_E)),
- "Programming port %c in FDI mode, this probably will not work.\n",
- port_name(port));
-
- for (i=0, reg=DDI_BUF_TRANS(port); i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
+ for (i = 0, reg = DDI_BUF_TRANS(port);
+ i < ARRAY_SIZE(hsw_ddi_translations_fdi); i++) {
I915_WRITE(reg, ddi_translations[i]);
reg += 4;
}
@@ -118,14 +110,8 @@ void intel_prepare_ddi(struct drm_device *dev)
if (!HAS_DDI(dev))
return;
- for (port = PORT_A; port < PORT_E; port++)
- intel_prepare_ddi_buffers(dev, port, false);
-
- /* DDI E is the suggested one to work in FDI mode, so program is as such
- * by default. It will have to be re-programmed in case a digital DP
- * output will be detected on it
- */
- intel_prepare_ddi_buffers(dev, PORT_E, true);
+ for (port = PORT_A; port <= PORT_E; port++)
+ intel_prepare_ddi_buffers(dev, port);
}
static const long hsw_ddi_buf_ctl_values[] = {
@@ -281,25 +267,22 @@ void hsw_fdi_link_train(struct drm_crtc *crtc)
DRM_ERROR("FDI link training failed!\n");
}
-static void intel_ddi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_ddi_mode_set(struct intel_encoder *encoder)
{
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- int port = intel_ddi_get_encoder_port(intel_encoder);
- int pipe = intel_crtc->pipe;
- int type = intel_encoder->type;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ int port = intel_ddi_get_encoder_port(encoder);
+ int pipe = crtc->pipe;
+ int type = encoder->type;
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
DRM_DEBUG_KMS("Preparing DDI mode on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
- intel_crtc->eld_vld = false;
+ crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *intel_dig_port =
- enc_to_dig_port(encoder);
+ enc_to_dig_port(&encoder->base);
intel_dp->DP = intel_dig_port->saved_port_bits |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
@@ -307,17 +290,17 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
} else if (type == INTEL_OUTPUT_HDMI) {
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
@@ -325,14 +308,14 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
* patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
}
@@ -1118,6 +1101,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
+ intel_edp_psr_enable(intel_dp);
}
if (intel_crtc->eld_vld && type != INTEL_OUTPUT_EDP) {
@@ -1148,16 +1132,20 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ intel_edp_psr_disable(intel_dp);
ironlake_edp_backlight_off(intel_dp);
}
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
- if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ uint32_t lcpll = I915_READ(LCPLL_CTL);
+
+ if (lcpll & LCPLL_CD_SOURCE_FCLK)
+ return 800000;
+ else if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450000;
- else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
- LCPLL_CLK_FREQ_450)
+ else if ((lcpll & LCPLL_CLK_FREQ_MASK) == LCPLL_CLK_FREQ_450)
return 450000;
else if (IS_ULT(dev_priv->dev))
return 337500;
@@ -1309,10 +1297,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
-static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
- .mode_set = intel_ddi_mode_set,
-};
-
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1337,9 +1321,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
intel_encoder->compute_config = intel_ddi_compute_config;
+ intel_encoder->mode_set = intel_ddi_mode_set;
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5fb305840db..38452d82ac7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -45,6 +45,15 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config);
+
+static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *old_fb);
+
+
typedef struct {
int min, max;
} intel_range_t;
@@ -54,7 +63,6 @@ typedef struct {
int p2_slow, p2_fast;
} intel_p2_t;
-#define INTEL_P2_NUM 2
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
@@ -84,7 +92,7 @@ intel_fdi_link_freq(struct drm_device *dev)
return 27;
}
-static const intel_limit_t intel_limits_i8xx_dvo = {
+static const intel_limit_t intel_limits_i8xx_dac = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
.n = { .min = 3, .max = 16 },
@@ -97,6 +105,19 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
.p2_slow = 4, .p2_fast = 2 },
};
+static const intel_limit_t intel_limits_i8xx_dvo = {
+ .dot = { .min = 25000, .max = 350000 },
+ .vco = { .min = 930000, .max = 1400000 },
+ .n = { .min = 3, .max = 16 },
+ .m = { .min = 96, .max = 140 },
+ .m1 = { .min = 18, .max = 26 },
+ .m2 = { .min = 6, .max = 16 },
+ .p = { .min = 4, .max = 128 },
+ .p1 = { .min = 2, .max = 33 },
+ .p2 = { .dot_limit = 165000,
+ .p2_slow = 4, .p2_fast = 4 },
+};
+
static const intel_limit_t intel_limits_i8xx_lvds = {
.dot = { .min = 25000, .max = 350000 },
.vco = { .min = 930000, .max = 1400000 },
@@ -405,8 +426,10 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits_i8xx_lvds;
- else
+ else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO))
limit = &intel_limits_i8xx_dvo;
+ else
+ limit = &intel_limits_i8xx_dac;
}
return limit;
}
@@ -667,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
{
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
u32 m, n, fastclk;
- u32 updrate, minupdate, fracbits, p;
+ u32 updrate, minupdate, p;
unsigned long bestppm, ppm, absppm;
int dotclk, flag;
@@ -678,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
fastclk = dotclk / (2*100);
updrate = 0;
minupdate = 19200;
- fracbits = 1;
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
bestm1 = bestm2 = bestp1 = bestp2 = 0;
@@ -892,8 +914,8 @@ static const char *state_string(bool enabled)
}
/* Only for pre-ILK configs */
-static void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -906,10 +928,8 @@ static void assert_pll(struct drm_i915_private *dev_priv,
"PLL state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
}
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-static struct intel_shared_dpll *
+struct intel_shared_dpll *
intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
@@ -921,9 +941,9 @@ intel_crtc_to_shared_dpll(struct intel_crtc *crtc)
}
/* For ILK+ */
-static void assert_shared_dpll(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll,
- bool state)
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state)
{
bool cur_state;
struct intel_dpll_hw_state hw_state;
@@ -942,8 +962,6 @@ static void assert_shared_dpll(struct drm_i915_private *dev_priv,
"%s assertion failure (expected %s, current %s)\n",
pll->name, state_string(state), state_string(cur_state));
}
-#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
-#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
static void assert_fdi_tx(struct drm_i915_private *dev_priv,
enum pipe pipe, bool state)
@@ -1007,15 +1025,19 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
-static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
+ bool cur_state;
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
- WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
+ cur_state = !!(val & FDI_RX_PLL_ENABLE);
+ WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ state_string(state), state_string(cur_state));
}
static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
@@ -1111,7 +1133,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
}
/* Need to check both planes against the pipe */
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
reg = DSPCNTR(i);
val = I915_READ(reg);
cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
@@ -1301,51 +1323,92 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
}
-/**
- * intel_enable_pll - enable a PLL
- * @dev_priv: i915 private structure
- * @pipe: pipe PLL to enable
- *
- * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
- * make sure the PLL reg is writable first though, since the panel write
- * protect mechanism may be enabled.
- *
- * Note! This is for pre-ILK only.
- *
- * Unfortunately needed by dvo_ns2501 since the dvo depends on it running.
- */
-static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void vlv_enable_pll(struct intel_crtc *crtc)
{
- int reg;
- u32 val;
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
- assert_pipe_disabled(dev_priv, pipe);
+ assert_pipe_disabled(dev_priv, crtc->pipe);
/* No really, not for ILK+ */
- BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
+ BUG_ON(!IS_VALLEYVIEW(dev_priv->dev));
/* PLL is protected by panel, make sure we can write it */
if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
- assert_panel_unlocked(dev_priv, pipe);
+ assert_panel_unlocked(dev_priv, crtc->pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
+ DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
+
+ I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md);
+ POSTING_READ(DPLL_MD(crtc->pipe));
/* We do this three times for luck */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
- I915_WRITE(reg, val);
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+}
+
+static void i9xx_enable_pll(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int reg = DPLL(crtc->pipe);
+ u32 dpll = crtc->config.dpll_hw_state.dpll;
+
+ assert_pipe_disabled(dev_priv, crtc->pipe);
+
+ /* No really, not for ILK+ */
+ BUG_ON(dev_priv->info->gen >= 5);
+
+ /* PLL is protected by panel, make sure we can write it */
+ if (IS_MOBILE(dev) && !IS_I830(dev))
+ assert_panel_unlocked(dev_priv, crtc->pipe);
+
+ I915_WRITE(reg, dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(reg);
+ udelay(150);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ I915_WRITE(DPLL_MD(crtc->pipe),
+ crtc->config.dpll_hw_state.dpll_md);
+ } else {
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(reg, dpll);
+ }
+
+ /* We do this three times for luck */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
+ POSTING_READ(reg);
+ udelay(150); /* wait for warmup */
+ I915_WRITE(reg, dpll);
POSTING_READ(reg);
udelay(150); /* wait for warmup */
}
/**
- * intel_disable_pll - disable a PLL
+ * i9xx_disable_pll - disable a PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to disable
*
@@ -1353,11 +1416,8 @@ static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* Note! This is for pre-ILK only.
*/
-static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
+static void i9xx_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- int reg;
- u32 val;
-
/* Don't disable pipe A or pipe A PLLs if needed */
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
@@ -1365,11 +1425,8 @@ static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, pipe);
- reg = DPLL(pipe);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(DPLL(pipe), 0);
+ POSTING_READ(DPLL(pipe));
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv, int port)
@@ -1819,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
return 0;
err_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
err_interruptible:
dev_priv->mm.interruptible = true;
return ret;
@@ -1828,7 +1885,7 @@ err_interruptible:
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
{
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
@@ -1942,16 +1999,17 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_crtc->dspaddr_offset = linear_offset;
}
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else
- I915_WRITE(DSPADDR(plane), obj->gtt_offset + linear_offset);
+ I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
POSTING_READ(reg);
return 0;
@@ -2031,11 +2089,12 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
- DRM_DEBUG_KMS("Writing base %08X %08lX %d %d %d\n",
- obj->gtt_offset, linear_offset, x, y, fb->pitches[0]);
+ DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
+ i915_gem_obj_ggtt_offset(obj), linear_offset, x, y,
+ fb->pitches[0]);
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
- obj->gtt_offset + intel_crtc->dspaddr_offset);
+ i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
@@ -2183,6 +2242,20 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
+ /* Update pipe size and adjust fitter if needed */
+ if (i915_fastboot) {
+ I915_WRITE(PIPESRC(intel_crtc->pipe),
+ ((crtc->mode.hdisplay - 1) << 16) |
+ (crtc->mode.vdisplay - 1));
+ if (!intel_crtc->config.pch_pfit.size &&
+ (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
+ intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
+ I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
+ I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
+ }
+ }
+
ret = dev_priv->display.update_plane(crtc, fb, x, y);
if (ret) {
intel_unpin_fb_obj(to_intel_framebuffer(fb)->obj);
@@ -2203,6 +2276,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
}
intel_update_fbc(dev);
+ intel_edp_psr_update(dev);
mutex_unlock(&dev->struct_mutex);
intel_crtc_update_sarea_pos(crtc, x, y);
@@ -2523,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- u32 reg, temp, i;
+ u32 reg, temp, i, j;
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -2539,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
I915_READ(FDI_RX_IIR(pipe)));
- /* enable CPU FDI TX and PCH FDI RX */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
-
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_AUTO;
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
- temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ /* Try each vswing and preemphasis setting twice before moving on */
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
+ /* disable first in case we need to retry */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
+ temp &= ~FDI_TX_ENABLE;
+ I915_WRITE(reg, temp);
- POSTING_READ(reg);
- udelay(150);
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_AUTO;
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp &= ~FDI_RX_ENABLE;
+ I915_WRITE(reg, temp);
- for (i = 0; i < 4; i++) {
+ /* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ temp |= snb_b_fdi_train_param[j/2];
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
- POSTING_READ(reg);
- udelay(500);
+ I915_WRITE(FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
- reg = FDI_RX_IIR(pipe);
+ reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
-
- if (temp & FDI_RX_BIT_LOCK ||
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
- break;
- }
- }
- if (i == 4)
- DRM_ERROR("FDI train 1 fail!\n");
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
+ temp |= FDI_COMPOSITE_SYNC;
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
- /* Train 2 */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(reg, temp);
+ POSTING_READ(reg);
+ udelay(1); /* should be 0.5us */
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- I915_WRITE(reg, temp);
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- POSTING_READ(reg);
- udelay(150);
+ if (temp & FDI_RX_BIT_LOCK ||
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
+ i);
+ break;
+ }
+ udelay(1); /* should be 0.5us */
+ }
+ if (i == 4) {
+ DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ continue;
+ }
- for (i = 0; i < 4; i++) {
+ /* Train 2 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
- temp |= snb_b_fdi_train_param[i];
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
I915_WRITE(reg, temp);
POSTING_READ(reg);
- udelay(500);
+ udelay(2); /* should be 1.5us */
- reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ for (i = 0; i < 4; i++) {
+ reg = FDI_RX_IIR(pipe);
+ temp = I915_READ(reg);
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
- if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
- break;
+ if (temp & FDI_RX_SYMBOL_LOCK ||
+ (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
+ i);
+ goto train_done;
+ }
+ udelay(2); /* should be 1.5us */
}
+ if (i == 4)
+ DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
}
- if (i == 4)
- DRM_ERROR("FDI train 2 fail!\n");
+train_done:
DRM_DEBUG_KMS("FDI train done.\n");
}
@@ -2927,15 +3003,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
- /* XXX: pch pll's can be enabled any time before we enable the PCH
- * transcoder, and we actually should do this to not upset any PCH
- * transcoder that already use the clock when we share it.
- *
- * Note that enable_shared_dpll tries to do the right thing, but
- * get_shared_dpll unconditionally resets the pll - we need that to have
- * the right LVDS enable sequence. */
- ironlake_enable_shared_dpll(intel_crtc);
-
+ /* We need to program the right clock selection before writing the pixel
+ * mutliplier into the DPLL. */
if (HAS_PCH_CPT(dev)) {
u32 sel;
@@ -2949,6 +3018,15 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_DPLL_SEL, temp);
}
+ /* XXX: pch pll's can be enabled any time before we enable the PCH
+ * transcoder, and we actually should do this to not upset any PCH
+ * transcoder that already use the clock when we share it.
+ *
+ * Note that enable_shared_dpll tries to do the right thing, but
+ * get_shared_dpll unconditionally resets the pll - we need that to have
+ * the right LVDS enable sequence. */
+ ironlake_enable_shared_dpll(intel_crtc);
+
/* set transcoder timing, panel must allow it */
assert_panel_unlocked(dev_priv, pipe);
ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
@@ -3031,7 +3109,7 @@ static void intel_put_shared_dpll(struct intel_crtc *crtc)
crtc->config.shared_dpll = DPLL_ID_PRIVATE;
}
-static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc, u32 dpll, u32 fp)
+static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc);
@@ -3045,7 +3123,7 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (HAS_PCH_IBX(dev_priv->dev)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
- i = crtc->pipe;
+ i = (enum intel_dpll_id) crtc->pipe;
pll = &dev_priv->shared_dplls[i];
DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
@@ -3061,8 +3139,8 @@ static struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc,
if (pll->refcount == 0)
continue;
- if (dpll == (I915_READ(PCH_DPLL(pll->id)) & 0x7fffffff) &&
- fp == I915_READ(PCH_FP0(pll->id))) {
+ if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state,
+ sizeof(pll->hw_state)) == 0) {
DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n",
crtc->base.base.id,
pll->name, pll->refcount, pll->active);
@@ -3096,13 +3174,7 @@ found:
WARN_ON(pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
- /* Wait for the clocks to stabilize before rewriting the regs */
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- I915_WRITE(PCH_FP0(pll->id), fp);
- I915_WRITE(PCH_DPLL(pll->id), dpll & ~DPLL_VCO_ENABLE);
+ pll->mode_set(dev_priv, pll);
}
pll->refcount++;
@@ -3174,7 +3246,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
- u32 temp;
WARN_ON(!crtc->enabled);
@@ -3188,12 +3259,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_update_watermarks(dev);
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- temp = I915_READ(PCH_LVDS);
- if ((temp & LVDS_PORT_EN) == 0)
- I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
- }
-
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ if (encoder->pre_enable)
+ encoder->pre_enable(encoder);
if (intel_crtc->config.has_pch_encoder) {
/* Note: FDI PLL enabling _must_ be done before we enable the
@@ -3205,10 +3273,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
assert_fdi_rx_disabled(dev_priv, pipe);
}
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_enable)
- encoder->pre_enable(encoder);
-
ironlake_pfit_enable(intel_crtc);
/*
@@ -3389,7 +3453,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_update_cursor(crtc, false);
@@ -3462,7 +3526,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
drm_vblank_off(dev, pipe);
/* FBC must be disabled before disabling the plane on HSW. */
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
hsw_disable_ips(intel_crtc);
@@ -3593,22 +3657,16 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- mutex_lock(&dev_priv->dpio_lock);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
- intel_enable_pll(dev_priv, pipe);
+ vlv_enable_pll(intel_crtc);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
- /* VLV wants encoder enabling _before_ the pipe is up. */
- for_each_encoder_on_crtc(dev, crtc, encoder)
- encoder->enable(encoder);
-
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@@ -3620,7 +3678,8 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
intel_update_fbc(dev);
- mutex_unlock(&dev_priv->dpio_lock);
+ for_each_encoder_on_crtc(dev, crtc, encoder)
+ encoder->enable(encoder);
}
static void i9xx_crtc_enable(struct drm_crtc *crtc)
@@ -3640,12 +3699,12 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
intel_crtc->active = true;
intel_update_watermarks(dev);
- intel_enable_pll(dev_priv, pipe);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
+ i9xx_enable_pll(intel_crtc);
+
i9xx_pfit_enable(intel_crtc);
intel_crtc_load_lut(crtc);
@@ -3701,7 +3760,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
intel_crtc_wait_for_pending_flips(crtc);
drm_vblank_off(dev, pipe);
- if (dev_priv->cfb_plane == plane)
+ if (dev_priv->fbc.plane == plane)
intel_disable_fbc(dev);
intel_crtc_dpms_overlay(intel_crtc, false);
@@ -3717,7 +3776,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
if (encoder->post_disable)
encoder->post_disable(encoder);
- intel_disable_pll(dev_priv, pipe);
+ i9xx_disable_pll(dev_priv, pipe);
intel_crtc->active = false;
intel_update_fbc(dev);
@@ -3817,16 +3876,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
}
}
-void intel_modeset_disable(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (crtc->enabled)
- intel_crtc_disable(crtc);
- }
-}
-
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
@@ -3835,10 +3884,10 @@ void intel_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_encoder);
}
-/* Simple dpms helper for encodres with just one connector, no cloning and only
+/* Simple dpms helper for encoders with just one connector, no cloning and only
* one kind of off state. It clamps all !ON modes to fully OFF and changes the
* state of the entire output pipe. */
-void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
+static void intel_encoder_dpms(struct intel_encoder *encoder, int mode)
{
if (mode == DRM_MODE_DPMS_ON) {
encoder->connectors_active = true;
@@ -4032,7 +4081,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
{
pipe_config->ips_enabled = i915_enable_ips &&
hsw_crtc_supports_ips(crtc) &&
- pipe_config->pipe_bpp == 24;
+ pipe_config->pipe_bpp <= 24;
}
static int intel_crtc_compute_config(struct intel_crtc *crtc,
@@ -4048,12 +4097,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
return -EINVAL;
}
- /* All interlaced capable intel hw wants timings in frames. Note though
- * that intel_lvds_mode_fixup does some funny tricks with the crtc
- * timings, so we need to be careful not to clobber these.*/
- if (!pipe_config->timings_set)
- drm_mode_set_crtcinfo(adjusted_mode, 0);
-
/* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
@@ -4103,6 +4146,30 @@ static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
return 200000;
}
+static int pnv_get_display_clock_speed(struct drm_device *dev)
+{
+ u16 gcfgc = 0;
+
+ pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
+
+ switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
+ case GC_DISPLAY_CLOCK_267_MHZ_PNV:
+ return 267000;
+ case GC_DISPLAY_CLOCK_333_MHZ_PNV:
+ return 333000;
+ case GC_DISPLAY_CLOCK_444_MHZ_PNV:
+ return 444000;
+ case GC_DISPLAY_CLOCK_200_MHZ_PNV:
+ return 200000;
+ default:
+ DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ case GC_DISPLAY_CLOCK_133_MHZ_PNV:
+ return 133000;
+ case GC_DISPLAY_CLOCK_167_MHZ_PNV:
+ return 167000;
+ }
+}
+
static int i915gm_get_display_clock_speed(struct drm_device *dev)
{
u16 gcfgc = 0;
@@ -4266,14 +4333,17 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
}
I915_WRITE(FP0(pipe), fp);
+ crtc->config.dpll_hw_state.fp0 = fp;
crtc->lowfreq_avail = false;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
reduced_clock && i915_powersave) {
I915_WRITE(FP1(pipe), fp2);
+ crtc->config.dpll_hw_state.fp1 = fp2;
crtc->lowfreq_avail = true;
} else {
I915_WRITE(FP1(pipe), fp);
+ crtc->config.dpll_hw_state.fp1 = fp;
}
}
@@ -4351,17 +4421,13 @@ static void vlv_update_pll(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
int pipe = crtc->pipe;
u32 dpll, mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
- bool is_hdmi;
u32 coreclk, reg_val, dpll_md;
mutex_lock(&dev_priv->dpio_lock);
- is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
-
bestn = crtc->config.dpll.n;
bestm1 = crtc->config.dpll.m1;
bestm2 = crtc->config.dpll.m2;
@@ -4407,7 +4473,7 @@ static void vlv_update_pll(struct intel_crtc *crtc)
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) ||
intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
- 0x005f0021);
+ 0x009f0003);
else
vlv_dpio_write(dev_priv, DPIO_LPF_COEFF(pipe),
0x00d0000f);
@@ -4440,10 +4506,6 @@ static void vlv_update_pll(struct intel_crtc *crtc)
vlv_dpio_write(dev_priv, DPIO_PLL_CML(pipe), 0x87871000);
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
@@ -4451,17 +4513,11 @@ static void vlv_update_pll(struct intel_crtc *crtc)
dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ crtc->config.dpll_hw_state.dpll = dpll;
dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
if (crtc->config.has_dp_encoder)
intel_dp_set_m_n(crtc);
@@ -4475,8 +4531,6 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
bool is_sdvo;
struct dpll *clock = &crtc->config.dpll;
@@ -4499,10 +4553,10 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
}
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT))
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
if (IS_PINEVIEW(dev))
@@ -4538,35 +4592,16 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- if (crtc->config.has_dp_encoder)
- intel_dp_set_m_n(crtc);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
+ crtc->config.dpll_hw_state.dpll = dpll;
if (INTEL_INFO(dev)->gen >= 4) {
u32 dpll_md = (crtc->config.pixel_multiplier - 1)
<< DPLL_MD_UDI_MULTIPLIER_SHIFT;
- I915_WRITE(DPLL_MD(pipe), dpll_md);
- } else {
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll_md = dpll_md;
}
+
+ if (crtc->config.has_dp_encoder)
+ intel_dp_set_m_n(crtc);
}
static void i8xx_update_pll(struct intel_crtc *crtc,
@@ -4575,8 +4610,6 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_encoder *encoder;
- int pipe = crtc->pipe;
u32 dpll;
struct dpll *clock = &crtc->config.dpll;
@@ -4595,6 +4628,9 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_P2_DIVIDE_BY_4;
}
+ if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO))
+ dpll |= DPLL_DVO_2X_MODE;
+
if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv) && num_connectors < 2)
dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
@@ -4602,26 +4638,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
dpll |= PLL_REF_INPUT_DREFCLK;
dpll |= DPLL_VCO_ENABLE;
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- for_each_encoder_on_crtc(dev, &crtc->base, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
- I915_WRITE(DPLL(pipe), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(DPLL(pipe), dpll);
+ crtc->config.dpll_hw_state.dpll = dpll;
}
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -4727,6 +4744,27 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
pipe_config->requested_mode.hdisplay = ((tmp >> 16) & 0xffff) + 1;
}
+static void intel_crtc_mode_from_pipe_config(struct intel_crtc *intel_crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_crtc *crtc = &intel_crtc->base;
+
+ crtc->mode.hdisplay = pipe_config->adjusted_mode.crtc_hdisplay;
+ crtc->mode.htotal = pipe_config->adjusted_mode.crtc_htotal;
+ crtc->mode.hsync_start = pipe_config->adjusted_mode.crtc_hsync_start;
+ crtc->mode.hsync_end = pipe_config->adjusted_mode.crtc_hsync_end;
+
+ crtc->mode.vdisplay = pipe_config->adjusted_mode.crtc_vdisplay;
+ crtc->mode.vtotal = pipe_config->adjusted_mode.crtc_vtotal;
+ crtc->mode.vsync_start = pipe_config->adjusted_mode.crtc_vsync_start;
+ crtc->mode.vsync_end = pipe_config->adjusted_mode.crtc_vsync_end;
+
+ crtc->mode.flags = pipe_config->adjusted_mode.flags;
+
+ crtc->mode.clock = pipe_config->adjusted_mode.clock;
+ crtc->mode.flags |= pipe_config->adjusted_mode.flags;
+}
+
static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -4939,7 +4977,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -4955,6 +4993,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
+ pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
tmp = I915_READ(DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
@@ -4966,6 +5005,16 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
+ pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ if (!IS_VALLEYVIEW(dev)) {
+ pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ } else {
+ /* Mask out read-only status bits. */
+ pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
+ DPLL_PORTC_READY_MASK |
+ DPLL_PORTB_READY_MASK);
+ }
return true;
}
@@ -5119,74 +5168,37 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
BUG_ON(val != final);
}
-/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
-static void lpt_init_pch_refclk(struct drm_device *dev)
+static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct intel_encoder *encoder;
- bool has_vga = false;
- bool is_sdv = false;
- u32 tmp;
-
- list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
- switch (encoder->type) {
- case INTEL_OUTPUT_ANALOG:
- has_vga = true;
- break;
- }
- }
-
- if (!has_vga)
- return;
-
- mutex_lock(&dev_priv->dpio_lock);
-
- /* XXX: Rip out SDV support once Haswell ships for real. */
- if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
- is_sdv = true;
-
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_DISABLE;
- tmp |= SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
-
- udelay(24);
+ uint32_t tmp;
- tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
- tmp &= ~SBI_SSCCTL_PATHALT;
- intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- if (!is_sdv) {
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ DRM_ERROR("FDI mPHY reset assert timeout\n");
- if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
+ tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ I915_WRITE(SOUTH_CHICKEN2, tmp);
- tmp = I915_READ(SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+}
- if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
- 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
- }
+/* WaMPhyProgramming:hsw */
+static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
+{
+ uint32_t tmp;
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
- tmp |= 0x7FFF;
- intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
@@ -5195,24 +5207,6 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
- if (is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
- tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
- intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
- tmp |= (0x3F << 8);
- intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
- }
-
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
@@ -5221,17 +5215,15 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
- }
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
@@ -5253,34 +5245,120 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
- if (!is_sdv) {
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+/* Implements 3 different sequences from BSpec chapter "Display iCLK
+ * Programming" based on the parameters passed:
+ * - Sequence to enable CLKOUT_DP
+ * - Sequence to enable CLKOUT_DP without spread
+ * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
+ */
+static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
+ bool with_fdi)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+ if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ with_spread = true;
+ if (WARN(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
+ with_fdi = false;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_DISABLE;
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ udelay(24);
+
+ if (with_spread) {
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ tmp &= ~SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+
+ if (with_fdi) {
+ lpt_reset_fdi_mphy(dev_priv);
+ lpt_program_fdi_mphy(dev_priv);
+ }
}
- /* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
- tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
- tmp |= SBI_DBUFF0_ENABLE;
- intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
mutex_unlock(&dev_priv->dpio_lock);
}
+/* Sequence to disable CLKOUT_DP */
+static void lpt_disable_clkout_dp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t reg, tmp;
+
+ mutex_lock(&dev_priv->dpio_lock);
+
+ reg = (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) ?
+ SBI_GEN0 : SBI_DBUFF0;
+ tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
+ tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
+ intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
+
+ tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
+ if (!(tmp & SBI_SSCCTL_DISABLE)) {
+ if (!(tmp & SBI_SSCCTL_PATHALT)) {
+ tmp |= SBI_SSCCTL_PATHALT;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ udelay(32);
+ }
+ tmp |= SBI_SSCCTL_DISABLE;
+ intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
+ }
+
+ mutex_unlock(&dev_priv->dpio_lock);
+}
+
+static void lpt_init_pch_refclk(struct drm_device *dev)
+{
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct intel_encoder *encoder;
+ bool has_vga = false;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
+ switch (encoder->type) {
+ case INTEL_OUTPUT_ANALOG:
+ has_vga = true;
+ break;
+ }
+ }
+
+ if (has_vga)
+ lpt_enable_clkout_dp(dev, true, true);
+ else
+ lpt_disable_clkout_dp(dev);
+}
+
/*
* Initialize reference clocks when the driver loads
*/
@@ -5610,9 +5688,9 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
<< PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
if (is_sdvo)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
if (intel_crtc->config.has_dp_encoder)
- dpll |= DPLL_DVO_HIGH_SPEED;
+ dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
@@ -5708,7 +5786,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
else
intel_crtc->config.dpll_hw_state.fp1 = fp;
- pll = intel_get_shared_dpll(intel_crtc, dpll, fp);
+ pll = intel_get_shared_dpll(intel_crtc);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
pipe_name(pipe));
@@ -5720,10 +5798,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_dp_encoder)
intel_dp_set_m_n(intel_crtc);
- for_each_encoder_on_crtc(dev, crtc, encoder)
- if (encoder->pre_pll_enable)
- encoder->pre_pll_enable(encoder);
-
if (is_lvds && has_reduced_clock && i915_powersave)
intel_crtc->lowfreq_avail = true;
else
@@ -5732,23 +5806,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (intel_crtc->config.has_pch_encoder) {
pll = intel_crtc_to_shared_dpll(intel_crtc);
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(pll->id));
- udelay(150);
-
- /* The pixel multiplier can only be updated once the
- * DPLL is enabled and the clocks are stable.
- *
- * So write it again.
- */
- I915_WRITE(PCH_DPLL(pll->id), dpll);
-
- if (has_reduced_clock)
- I915_WRITE(PCH_FP1(pll->id), fp2);
- else
- I915_WRITE(PCH_FP1(pll->id), fp);
}
intel_set_pipe_timings(intel_crtc);
@@ -5820,7 +5877,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(PIPECONF(crtc->pipe));
@@ -5838,12 +5895,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
ironlake_get_fdi_m_n_config(crtc, pipe_config);
- /* XXX: Can't properly read out the pch dpll pixel multiplier
- * since we don't have state tracking for pch clocks yet. */
- pipe_config->pixel_multiplier = 1;
-
if (HAS_PCH_IBX(dev_priv->dev)) {
- pipe_config->shared_dpll = crtc->pipe;
+ pipe_config->shared_dpll =
+ (enum intel_dpll_id) crtc->pipe;
} else {
tmp = I915_READ(PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
@@ -5856,6 +5910,11 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
WARN_ON(!pll->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
+
+ tmp = pipe_config->dpll_hw_state.dpll;
+ pipe_config->pixel_multiplier =
+ ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
+ >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -5867,6 +5926,305 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
return true;
}
+static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
+ struct intel_crtc *crtc;
+ unsigned long irqflags;
+ uint32_t val;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ pipe_name(crtc->pipe));
+
+ WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
+ WARN(plls->spll_refcount, "SPLL enabled\n");
+ WARN(plls->wrpll1_refcount, "WRPLL1 enabled\n");
+ WARN(plls->wrpll2_refcount, "WRPLL2 enabled\n");
+ WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
+ WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ "CPU PWM1 enabled\n");
+ WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ "CPU PWM2 enabled\n");
+ WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ "PCH PWM1 enabled\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Utility pin enabled\n");
+ WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ val = I915_READ(DEIMR);
+ WARN((val & ~DE_PCH_EVENT_IVB) != val,
+ "Unexpected DEIMR bits enabled: 0x%x\n", val);
+ val = I915_READ(SDEIMR);
+ WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
+ "Unexpected SDEIMR bits enabled: 0x%x\n", val);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+}
+
+/*
+ * This function implements pieces of two sequences from BSpec:
+ * - Sequence for display software to disable LCPLL
+ * - Sequence for display software to allow package C8+
+ * The steps implemented here are just the steps that actually touch the LCPLL
+ * register. Callers should take care of disabling all the display engine
+ * functions, doing the mode unset, fixing interrupts, etc.
+ */
+void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down)
+{
+ uint32_t val;
+
+ assert_can_disable_lcpll(dev_priv);
+
+ val = I915_READ(LCPLL_CTL);
+
+ if (switch_to_fclk) {
+ val |= LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE, 1))
+ DRM_ERROR("Switching to FCLK failed\n");
+
+ val = I915_READ(LCPLL_CTL);
+ }
+
+ val |= LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+
+ if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
+ DRM_ERROR("LCPLL still locked\n");
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ POSTING_READ(D_COMP);
+ ndelay(100);
+
+ if (wait_for((I915_READ(D_COMP) & D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
+ DRM_ERROR("D_COMP RCOMP still in progress\n");
+
+ if (allow_power_down) {
+ val = I915_READ(LCPLL_CTL);
+ val |= LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+}
+
+/*
+ * Fully restores LCPLL, disallowing power down and switching back to LCPLL
+ * source.
+ */
+void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ val = I915_READ(LCPLL_CTL);
+
+ if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
+ LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
+ return;
+
+ /* Make sure we're not on PC8 state before disabling PC8, otherwise
+ * we'll hang the machine! */
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+
+ if (val & LCPLL_POWER_DOWN_ALLOW) {
+ val &= ~LCPLL_POWER_DOWN_ALLOW;
+ I915_WRITE(LCPLL_CTL, val);
+ POSTING_READ(LCPLL_CTL);
+ }
+
+ val = I915_READ(D_COMP);
+ val |= D_COMP_COMP_FORCE;
+ val &= ~D_COMP_COMP_DISABLE;
+ I915_WRITE(D_COMP, val);
+ POSTING_READ(D_COMP);
+
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_PLL_DISABLE;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
+ DRM_ERROR("LCPLL not locked yet\n");
+
+ if (val & LCPLL_CD_SOURCE_FCLK) {
+ val = I915_READ(LCPLL_CTL);
+ val &= ~LCPLL_CD_SOURCE_FCLK;
+ I915_WRITE(LCPLL_CTL, val);
+
+ if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ DRM_ERROR("Switching back to LCPLL failed\n");
+ }
+
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+}
+
+void hsw_enable_pc8_work(struct work_struct *__work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(to_delayed_work(__work), struct drm_i915_private,
+ pc8.enable_work);
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ if (dev_priv->pc8.enabled)
+ return;
+
+ DRM_DEBUG_KMS("Enabling package C8+\n");
+
+ dev_priv->pc8.enabled = true;
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ lpt_disable_clkout_dp(dev);
+ hsw_pc8_disable_interrupts(dev);
+ hsw_disable_lcpll(dev_priv, true, true);
+}
+
+static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
+ WARN(dev_priv->pc8.disable_count < 1,
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
+
+ dev_priv->pc8.disable_count--;
+ if (dev_priv->pc8.disable_count != 0)
+ return;
+
+ schedule_delayed_work(&dev_priv->pc8.enable_work,
+ msecs_to_jiffies(i915_pc8_timeout));
+}
+
+static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ uint32_t val;
+
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
+ WARN(dev_priv->pc8.disable_count < 0,
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
+
+ dev_priv->pc8.disable_count++;
+ if (dev_priv->pc8.disable_count != 1)
+ return;
+
+ cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
+ if (!dev_priv->pc8.enabled)
+ return;
+
+ DRM_DEBUG_KMS("Disabling package C8+\n");
+
+ hsw_restore_lcpll(dev_priv);
+ hsw_pc8_restore_interrupts(dev);
+ lpt_init_pch_refclk(dev);
+
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ }
+
+ intel_prepare_ddi(dev);
+ i915_gem_init_swizzling(dev);
+ mutex_lock(&dev_priv->rps.hw_lock);
+ gen6_update_ring_freq(dev);
+ mutex_unlock(&dev_priv->rps.hw_lock);
+ dev_priv->pc8.enabled = false;
+}
+
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->pc8.lock);
+ __hsw_enable_package_c8(dev_priv);
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
+{
+ mutex_lock(&dev_priv->pc8.lock);
+ __hsw_disable_package_c8(dev_priv);
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_crtc *crtc;
+ uint32_t val;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
+ if (crtc->base.enabled)
+ return false;
+
+ /* This case is still possible since we have the i915.disable_power_well
+ * parameter and also the KVMr or something else might be requesting the
+ * power well. */
+ val = I915_READ(HSW_PWR_WELL_DRIVER);
+ if (val != 0) {
+ DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
+ return false;
+ }
+
+ return true;
+}
+
+/* Since we're called from modeset_global_resources there's no way to
+ * symmetrically increase and decrease the refcount, so we use
+ * dev_priv->pc8.requirements_met to track whether we already have the refcount
+ * or not.
+ */
+static void hsw_update_package_c8(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ bool allow;
+
+ if (!i915_enable_pc8)
+ return;
+
+ mutex_lock(&dev_priv->pc8.lock);
+
+ allow = hsw_can_enable_package_c8(dev_priv);
+
+ if (allow == dev_priv->pc8.requirements_met)
+ goto done;
+
+ dev_priv->pc8.requirements_met = allow;
+
+ if (allow)
+ __hsw_enable_package_c8(dev_priv);
+ else
+ __hsw_disable_package_c8(dev_priv);
+
+done:
+ mutex_unlock(&dev_priv->pc8.lock);
+}
+
+static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
+{
+ if (!dev_priv->pc8.gpu_idle) {
+ dev_priv->pc8.gpu_idle = true;
+ hsw_enable_package_c8(dev_priv);
+ }
+}
+
+static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
+{
+ if (dev_priv->pc8.gpu_idle) {
+ dev_priv->pc8.gpu_idle = false;
+ hsw_disable_package_c8(dev_priv);
+ }
+}
+
static void haswell_modeset_global_resources(struct drm_device *dev)
{
bool enable = false;
@@ -5882,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
}
intel_set_power_well(dev, enable);
+
+ hsw_update_package_c8(dev);
}
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
@@ -5935,7 +6295,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain pfit_domain;
uint32_t tmp;
- pipe_config->cpu_transcoder = crtc->pipe;
+ pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -6005,11 +6365,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *adjusted_mode =
- &intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
int pipe = intel_crtc->pipe;
int ret;
@@ -6028,12 +6385,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
- if (encoder->mode_set) {
- encoder->mode_set(encoder);
- } else {
- encoder_funcs = encoder->base.helper_private;
- encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
- }
+ encoder->mode_set(encoder);
}
return 0;
@@ -6548,7 +6900,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
goto fail_unpin;
}
- addr = obj->gtt_offset;
+ addr = i915_gem_obj_ggtt_offset(obj);
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, obj,
@@ -6570,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
- i915_gem_object_unpin(intel_crtc->cursor_bo);
+ i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
@@ -6585,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(obj);
+ i915_gem_object_unpin_from_display_plane(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
@@ -6875,11 +7227,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
}
/* Returns the clock of the currently programmed mode of the given pipe. */
-static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
+static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
{
+ struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int pipe = intel_crtc->pipe;
+ int pipe = pipe_config->cpu_transcoder;
u32 dpll = I915_READ(DPLL(pipe));
u32 fp;
intel_clock_t clock;
@@ -6918,7 +7271,8 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
default:
DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
"mode\n", (int)(dpll & DPLL_MODE_MASK));
- return 0;
+ pipe_config->adjusted_mode.clock = 0;
+ return;
}
if (IS_PINEVIEW(dev))
@@ -6955,12 +7309,55 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
}
- /* XXX: It would be nice to validate the clocks, but we can't reuse
- * i830PllIsValid() because it relies on the xf86_config connector
- * configuration being accurate, which it isn't necessarily.
+ pipe_config->adjusted_mode.clock = clock.dot *
+ pipe_config->pixel_multiplier;
+}
+
+static void ironlake_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_config *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
+ int link_freq, repeat;
+ u64 clock;
+ u32 link_m, link_n;
+
+ repeat = pipe_config->pixel_multiplier;
+
+ /*
+ * The calculation for the data clock is:
+ * pixel_clock = ((m/n)*(link_clock * nr_lanes * repeat))/bpp
+ * But we want to avoid losing precison if possible, so:
+ * pixel_clock = ((m * link_clock * nr_lanes * repeat)/(n*bpp))
+ *
+ * and the link clock is simpler:
+ * link_clock = (m * link_clock * repeat) / n
*/
- return clock.dot;
+ /*
+ * We need to get the FDI or DP link clock here to derive
+ * the M/N dividers.
+ *
+ * For FDI, we read it from the BIOS or use a fixed 2.7GHz.
+ * For DP, it's either 1.62GHz or 2.7GHz.
+ * We do our calculations in 10*MHz since we don't need much precison.
+ */
+ if (pipe_config->has_pch_encoder)
+ link_freq = intel_fdi_link_freq(dev) * 10000;
+ else
+ link_freq = pipe_config->port_clock;
+
+ link_m = I915_READ(PIPE_LINK_M1(cpu_transcoder));
+ link_n = I915_READ(PIPE_LINK_N1(cpu_transcoder));
+
+ if (!link_m || !link_n)
+ return;
+
+ clock = ((u64)link_m * (u64)link_freq * (u64)repeat);
+ do_div(clock, link_n);
+
+ pipe_config->adjusted_mode.clock = clock;
}
/** Returns the currently programmed mode of the given pipe. */
@@ -6971,6 +7368,7 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
struct drm_display_mode *mode;
+ struct intel_crtc_config pipe_config;
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
@@ -6980,7 +7378,18 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
if (!mode)
return NULL;
- mode->clock = intel_crtc_clock_get(dev, crtc);
+ /*
+ * Construct a pipe_config sufficient for getting the clock info
+ * back out of crtc_clock_get.
+ *
+ * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
+ * to use a real value here instead.
+ */
+ pipe_config.cpu_transcoder = (enum transcoder) intel_crtc->pipe;
+ pipe_config.pixel_multiplier = 1;
+ i9xx_crtc_clock_get(intel_crtc, &pipe_config);
+
+ mode->clock = pipe_config.adjusted_mode.clock;
mode->hdisplay = (htot & 0xffff) + 1;
mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
mode->hsync_start = (hsync & 0xffff) + 1;
@@ -7064,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
void intel_mark_busy(struct drm_device *dev)
{
- i915_update_gfx_val(dev->dev_private);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ hsw_package_c8_gpu_busy(dev_priv);
+ i915_update_gfx_val(dev_priv);
}
void intel_mark_idle(struct drm_device *dev)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
+ hsw_package_c8_gpu_idle(dev_priv);
+
if (!i915_powersave)
return;
@@ -7235,7 +7650,8 @@ inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7263,7 +7679,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
intel_mark_page_flip_active(intel_crtc);
@@ -7279,7 +7695,8 @@ err:
static int intel_gen3_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7304,7 +7721,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
intel_mark_page_flip_active(intel_crtc);
@@ -7320,7 +7737,8 @@ err:
static int intel_gen4_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7344,7 +7762,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring,
- (obj->gtt_offset + intel_crtc->dspaddr_offset) |
+ (i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset) |
obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7368,7 +7786,8 @@ err:
static int intel_gen6_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7387,7 +7806,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
/* Contrary to the suggestions in the documentation,
* "Enable Panel Fitter" does not seem to be required when page
@@ -7418,7 +7837,8 @@ err:
static int intel_gen7_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -7452,7 +7872,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
- intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
intel_mark_page_flip_active(intel_crtc);
@@ -7468,14 +7888,16 @@ err:
static int intel_default_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_i915_gem_object *obj)
+ struct drm_i915_gem_object *obj,
+ uint32_t flags)
{
return -ENODEV;
}
static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -7545,7 +7967,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
- ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
+ ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, page_flip_flags);
if (ret)
goto cleanup_pending;
@@ -7789,7 +8211,6 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc_config *pipe_config;
int plane_bpp, ret = -EINVAL;
@@ -7806,9 +8227,23 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
drm_mode_copy(&pipe_config->adjusted_mode, mode);
drm_mode_copy(&pipe_config->requested_mode, mode);
- pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
+ pipe_config->cpu_transcoder =
+ (enum transcoder) to_intel_crtc(crtc)->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ /*
+ * Sanitize sync polarity flags based on requested ones. If neither
+ * positive or negative polarity is requested, treat this as meaning
+ * negative polarity.
+ */
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+
+ if (!(pipe_config->adjusted_mode.flags &
+ (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
+ pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+
/* Compute a starting value for pipe_config->pipe_bpp taking the source
* plane pixel format and any sink constraints into account. Returns the
* source plane bpp so that dithering can be selected on mismatches
@@ -7823,6 +8258,9 @@ encoder_retry:
pipe_config->port_clock = 0;
pipe_config->pixel_multiplier = 1;
+ /* Fill in default crtc timings, allow encoders to overwrite them. */
+ drm_mode_set_crtcinfo(&pipe_config->adjusted_mode, 0);
+
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
@@ -7833,20 +8271,8 @@ encoder_retry:
if (&encoder->new_crtc->base != crtc)
continue;
- if (encoder->compute_config) {
- if (!(encoder->compute_config(encoder, pipe_config))) {
- DRM_DEBUG_KMS("Encoder config failure\n");
- goto fail;
- }
-
- continue;
- }
-
- encoder_funcs = encoder->base.helper_private;
- if (!(encoder_funcs->mode_fixup(&encoder->base,
- &pipe_config->requested_mode,
- &pipe_config->adjusted_mode))) {
- DRM_DEBUG_KMS("Encoder fixup failed\n");
+ if (!(encoder->compute_config(encoder, pipe_config))) {
+ DRM_DEBUG_KMS("Encoder config failure\n");
goto fail;
}
}
@@ -8041,6 +8467,28 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
}
+static bool intel_fuzzy_clock_check(struct intel_crtc_config *cur,
+ struct intel_crtc_config *new)
+{
+ int clock1, clock2, diff;
+
+ clock1 = cur->adjusted_mode.clock;
+ clock2 = new->adjusted_mode.clock;
+
+ if (clock1 == clock2)
+ return true;
+
+ if (!clock1 || !clock2)
+ return false;
+
+ diff = abs(clock1 - clock2);
+
+ if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
+ return true;
+
+ return false;
+}
+
#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
list_for_each_entry((intel_crtc), \
&(dev)->mode_config.crtc_list, \
@@ -8072,7 +8520,7 @@ intel_pipe_config_compare(struct drm_device *dev,
#define PIPE_CONF_CHECK_FLAGS(name, mask) \
if ((current_config->name ^ pipe_config->name) & (mask)) { \
- DRM_ERROR("mismatch in " #name " " \
+ DRM_ERROR("mismatch in " #name "(" #mask ") " \
"(expected %i, found %i)\n", \
current_config->name & (mask), \
pipe_config->name & (mask)); \
@@ -8106,8 +8554,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_start);
PIPE_CONF_CHECK_I(adjusted_mode.crtc_vsync_end);
- if (!HAS_PCH_SPLIT(dev))
- PIPE_CONF_CHECK_I(pixel_multiplier);
+ PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_FLAGS(adjusted_mode.flags,
DRM_MODE_FLAG_INTERLACE);
@@ -8138,6 +8585,7 @@ intel_pipe_config_compare(struct drm_device *dev,
PIPE_CONF_CHECK_I(shared_dpll);
PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
+ PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
@@ -8146,6 +8594,15 @@ intel_pipe_config_compare(struct drm_device *dev,
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_QUIRK
+ if (!IS_HASWELL(dev)) {
+ if (!intel_fuzzy_clock_check(current_config, pipe_config)) {
+ DRM_ERROR("mismatch in clock (expected %d, found %d)\n",
+ current_config->adjusted_mode.clock,
+ pipe_config->adjusted_mode.clock);
+ return false;
+ }
+ }
+
return true;
}
@@ -8269,12 +8726,17 @@ check_crtc_state(struct drm_device *dev)
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
+ enum pipe pipe;
if (encoder->base.crtc != &crtc->base)
continue;
- if (encoder->get_config)
+ if (encoder->get_config &&
+ encoder->get_hw_state(encoder, &pipe))
encoder->get_config(encoder, &pipe_config);
}
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc, &pipe_config);
+
WARN(crtc->active != active,
"crtc active state doesn't match with hw state "
"(expected %i, found %i)\n", crtc->active, active);
@@ -8452,9 +8914,9 @@ out:
return ret;
}
-int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb)
+static int intel_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y, struct drm_framebuffer *fb)
{
int ret;
@@ -8571,8 +9033,16 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
} else if (set->crtc->fb != set->fb) {
/* If we have no fb then treat it as a full mode set */
if (set->crtc->fb == NULL) {
- DRM_DEBUG_KMS("crtc has no fb, full mode set\n");
- config->mode_changed = true;
+ struct intel_crtc *intel_crtc =
+ to_intel_crtc(set->crtc);
+
+ if (intel_crtc->active && i915_fastboot) {
+ DRM_DEBUG_KMS("crtc has no fb, will flip\n");
+ config->fb_changed = true;
+ } else {
+ DRM_DEBUG_KMS("inactive crtc, full mode set\n");
+ config->mode_changed = true;
+ }
} else if (set->fb == NULL) {
config->mode_changed = true;
} else if (set->fb->pixel_format !=
@@ -8592,6 +9062,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
drm_mode_debug_printmodeline(set->mode);
config->mode_changed = true;
}
+
+ DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
+ set->crtc->base.id, config->mode_changed, config->fb_changed);
}
static int
@@ -8602,14 +9075,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_crtc *new_crtc;
struct intel_connector *connector;
struct intel_encoder *encoder;
- int count, ro;
+ int ro;
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
- count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
/* Otherwise traverse passed in connector list and get encoders
@@ -8643,7 +9115,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
/* connector->new_encoder is now updated for all connectors. */
/* Update crtc of enabled connectors. */
- count = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (!connector->new_encoder)
@@ -8802,19 +9273,32 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
+static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll)
+{
+ I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0);
+ I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1);
+}
+
static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- uint32_t reg, val;
-
/* PCH refclock must be enabled first */
assert_pch_refclk_enabled(dev_priv);
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val |= DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+
+ /* Wait for the clocks to stabilize. */
+ POSTING_READ(PCH_DPLL(pll->id));
+ udelay(150);
+
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
+ I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8823,7 +9307,6 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
struct drm_device *dev = dev_priv->dev;
struct intel_crtc *crtc;
- uint32_t reg, val;
/* Make sure no transcoder isn't still depending on us. */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
@@ -8831,11 +9314,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
assert_pch_transcoder_disabled(dev_priv, crtc->pipe);
}
- reg = PCH_DPLL(pll->id);
- val = I915_READ(reg);
- val &= ~DPLL_VCO_ENABLE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ I915_WRITE(PCH_DPLL(pll->id), 0);
+ POSTING_READ(PCH_DPLL(pll->id));
udelay(200);
}
@@ -8854,6 +9334,7 @@ static void ibx_pch_dpll_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_shared_dpll; i++) {
dev_priv->shared_dplls[i].id = i;
dev_priv->shared_dplls[i].name = ibx_pch_dpll_names[i];
+ dev_priv->shared_dplls[i].mode_set = ibx_pch_dpll_mode_set;
dev_priv->shared_dplls[i].enable = ibx_pch_dpll_enable;
dev_priv->shared_dplls[i].disable = ibx_pch_dpll_disable;
dev_priv->shared_dplls[i].get_hw_state =
@@ -9033,8 +9514,13 @@ static void intel_setup_outputs(struct drm_device *dev)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
- if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
- intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) {
+ intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
+ PORT_C);
+ if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
+ intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C,
+ PORT_C);
+ }
if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
@@ -9094,13 +9580,17 @@ static void intel_setup_outputs(struct drm_device *dev)
drm_helper_move_panel_connectors_to_head(dev);
}
+void intel_framebuffer_fini(struct intel_framebuffer *fb)
+{
+ drm_framebuffer_cleanup(&fb->base);
+ drm_gem_object_unreference_unlocked(&fb->obj->base);
+}
+
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
-
+ intel_framebuffer_fini(intel_fb);
kfree(intel_fb);
}
@@ -9270,6 +9760,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
+ dev_priv->display.get_clock = ironlake_crtc_clock_get;
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
@@ -9277,6 +9768,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = ironlake_update_plane;
} else if (IS_VALLEYVIEW(dev)) {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = valleyview_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9284,6 +9776,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.update_plane = i9xx_update_plane;
} else {
dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
+ dev_priv->display.get_clock = i9xx_crtc_clock_get;
dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
dev_priv->display.crtc_enable = i9xx_crtc_enable;
dev_priv->display.crtc_disable = i9xx_crtc_disable;
@@ -9301,9 +9794,12 @@ static void intel_init_display(struct drm_device *dev)
else if (IS_I915G(dev))
dev_priv->display.get_display_clock_speed =
i915_get_display_clock_speed;
- else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
+ else if (IS_I945GM(dev) || IS_845G(dev))
dev_priv->display.get_display_clock_speed =
i9xx_misc_get_display_clock_speed;
+ else if (IS_PINEVIEW(dev))
+ dev_priv->display.get_display_clock_speed =
+ pnv_get_display_clock_speed;
else if (IS_I915GM(dev))
dev_priv->display.get_display_clock_speed =
i915gm_get_display_clock_speed;
@@ -9584,7 +10080,7 @@ void intel_modeset_init(struct drm_device *dev)
INTEL_INFO(dev)->num_pipes,
INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
- for (i = 0; i < INTEL_INFO(dev)->num_pipes; i++) {
+ for_each_pipe(i) {
intel_crtc_init(dev, i);
for (j = 0; j < dev_priv->num_plane; j++) {
ret = intel_plane_init(dev, i, j);
@@ -9790,6 +10286,17 @@ void i915_redisable_vga(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg = i915_vgacntrl_reg(dev);
+ /* This function can be called both from intel_modeset_setup_hw_state or
+ * at a very early point in our resume sequence, where the power well
+ * structures are not yet restored. Since this function is at a very
+ * paranoid "someone might have enabled VGA while we were not looking"
+ * level, just check if the power well is enabled instead of trying to
+ * follow the "don't touch the power well if we don't need it" policy
+ * the rest of the driver uses. */
+ if (HAS_POWER_WELL(dev) &&
+ (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
+ return;
+
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
i915_disable_vga(dev);
@@ -9860,6 +10367,15 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
pipe);
}
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (!crtc->active)
+ continue;
+ if (dev_priv->display.get_clock)
+ dev_priv->display.get_clock(crtc,
+ &crtc->config);
+ }
+
list_for_each_entry(connector, &dev->mode_config.connector_list,
base.head) {
if (connector->get_hw_state(connector)) {
@@ -9891,6 +10407,22 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_readout_hw_state(dev);
+ /*
+ * Now that we have the config, copy it to each CRTC struct
+ * Note that this could go away if we move to using crtc_config
+ * checking everywhere.
+ */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list,
+ base.head) {
+ if (crtc->active && i915_fastboot) {
+ intel_crtc_mode_from_pipe_config(crtc, &crtc->config);
+
+ DRM_DEBUG_KMS("[CRTC:%d] found active mode: ",
+ crtc->base.base.id);
+ drm_mode_debug_printmodeline(&crtc->base.mode);
+ }
+ }
+
/* HW state is read out, now we need to sanitize this mess. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
@@ -9953,7 +10485,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct intel_crtc *intel_crtc;
/*
* Interrupts and polling as the first thing to avoid creating havoc.
@@ -9977,7 +10508,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (!crtc->fb)
continue;
- intel_crtc = to_intel_crtc(crtc);
intel_increase_pllclock(crtc);
}
@@ -10033,13 +10563,12 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_display_error_state {
u32 power_well_driver;
+ int num_transcoders;
+
struct intel_cursor_error_state {
u32 control;
u32 position;
@@ -10048,16 +10577,7 @@ struct intel_display_error_state {
} cursor[I915_MAX_PIPES];
struct intel_pipe_error_state {
- enum transcoder cpu_transcoder;
- u32 conf;
u32 source;
-
- u32 htotal;
- u32 hblank;
- u32 hsync;
- u32 vtotal;
- u32 vblank;
- u32 vsync;
} pipe[I915_MAX_PIPES];
struct intel_plane_error_state {
@@ -10069,6 +10589,19 @@ struct intel_display_error_state {
u32 surface;
u32 tile_offset;
} plane[I915_MAX_PIPES];
+
+ struct intel_transcoder_error_state {
+ enum transcoder cpu_transcoder;
+
+ u32 conf;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } transcoder[4];
};
struct intel_display_error_state *
@@ -10076,9 +10609,17 @@ intel_display_capture_error_state(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
- enum transcoder cpu_transcoder;
+ int transcoders[] = {
+ TRANSCODER_A,
+ TRANSCODER_B,
+ TRANSCODER_C,
+ TRANSCODER_EDP,
+ };
int i;
+ if (INTEL_INFO(dev)->num_pipes == 0)
+ return NULL;
+
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (error == NULL)
return NULL;
@@ -10087,9 +10628,6 @@ intel_display_capture_error_state(struct drm_device *dev)
error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
for_each_pipe(i) {
- cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
- error->pipe[i].cpu_transcoder = cpu_transcoder;
-
if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) {
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
@@ -10113,22 +10651,32 @@ intel_display_capture_error_state(struct drm_device *dev)
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
- error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
- error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ }
+
+ error->num_transcoders = INTEL_INFO(dev)->num_pipes;
+ if (HAS_DDI(dev_priv->dev))
+ error->num_transcoders++; /* Account for eDP. */
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ enum transcoder cpu_transcoder = transcoders[i];
+
+ error->transcoder[i].cpu_transcoder = cpu_transcoder;
+
+ error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
/* In the code above we read the registers without checking if the power
* well was on, so here we have to clear the FPGA_DBG_RM_NOCLAIM bit to
* prevent the next I915_WRITE from detecting it and printing an error
* message. */
- if (HAS_POWER_WELL(dev))
- I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ intel_uncore_clear_errors(dev);
return error;
}
@@ -10142,22 +10690,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
{
int i;
+ if (!error)
+ return;
+
err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
if (HAS_POWER_WELL(dev))
err_printf(m, "PWR_WELL_CTL2: %08x\n",
error->power_well_driver);
for_each_pipe(i) {
err_printf(m, "Pipe [%d]:\n", i);
- err_printf(m, " CPU transcoder: %c\n",
- transcoder_name(error->pipe[i].cpu_transcoder));
- err_printf(m, " CONF: %08x\n", error->pipe[i].conf);
err_printf(m, " SRC: %08x\n", error->pipe[i].source);
- err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
- err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
- err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
- err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
- err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
- err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
err_printf(m, "Plane [%d]:\n", i);
err_printf(m, " CNTR: %08x\n", error->plane[i].control);
@@ -10178,5 +10720,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
err_printf(m, " POS: %08x\n", error->cursor[i].position);
err_printf(m, " BASE: %08x\n", error->cursor[i].base);
}
+
+ for (i = 0; i < error->num_transcoders; i++) {
+ err_printf(m, " CPU transcoder: %c\n",
+ transcoder_name(error->transcoder[i].cpu_transcoder));
+ err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
+ err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
+ err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
+ err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
+ err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
+ err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
+ err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
+ }
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 26e162bb3a5..2151d13772b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -276,29 +276,13 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
return status;
}
-static int
-intel_dp_aux_ch(struct intel_dp *intel_dp,
- uint8_t *send, int send_bytes,
- uint8_t *recv, int recv_size)
+static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp,
+ int index)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
- uint32_t ch_data = ch_ctl + 4;
- int i, ret, recv_bytes;
- uint32_t status;
- uint32_t aux_clock_divider;
- int try, precharge;
- bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
- /* dp aux is extremely sensitive to irq latency, hence request the
- * lowest possible wakeup latency and so prevent the cpu from going into
- * deep sleep states.
- */
- pm_qos_update_request(&dev_priv->pm_qos, 0);
-
- intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
* hrawclk value and divide by 2 and use that
@@ -307,29 +291,61 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (IS_VALLEYVIEW(dev)) {
- aux_clock_divider = 100;
+ return index ? 0 : 100;
} else if (intel_dig_port->port == PORT_A) {
+ if (index)
+ return 0;
if (HAS_DDI(dev))
- aux_clock_divider = DIV_ROUND_CLOSEST(
- intel_ddi_get_cdclk_freq(dev_priv), 2000);
+ return DIV_ROUND_CLOSEST(intel_ddi_get_cdclk_freq(dev_priv), 2000);
else if (IS_GEN6(dev) || IS_GEN7(dev))
- aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
+ return 200; /* SNB & IVB eDP input clock at 400Mhz */
else
- aux_clock_divider = 225; /* eDP input clock at 450Mhz */
+ return 225; /* eDP input clock at 450Mhz */
} else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
/* Workaround for non-ULT HSW */
- aux_clock_divider = 74;
+ switch (index) {
+ case 0: return 63;
+ case 1: return 72;
+ default: return 0;
+ }
} else if (HAS_PCH_SPLIT(dev)) {
- aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
+ return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
} else {
- aux_clock_divider = intel_hrawclk(dev) / 2;
+ return index ? 0 :intel_hrawclk(dev) / 2;
}
+}
+
+static int
+intel_dp_aux_ch(struct intel_dp *intel_dp,
+ uint8_t *send, int send_bytes,
+ uint8_t *recv, int recv_size)
+{
+ struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = intel_dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
+ uint32_t ch_data = ch_ctl + 4;
+ uint32_t aux_clock_divider;
+ int i, ret, recv_bytes;
+ uint32_t status;
+ int try, precharge, clock = 0;
+ bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
+
+ /* dp aux is extremely sensitive to irq latency, hence request the
+ * lowest possible wakeup latency and so prevent the cpu from going into
+ * deep sleep states.
+ */
+ pm_qos_update_request(&dev_priv->pm_qos, 0);
+
+ intel_dp_check_edp(intel_dp);
if (IS_GEN6(dev))
precharge = 3;
else
precharge = 5;
+ intel_aux_display_runtime_get(dev_priv);
+
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ_NOTRACE(ch_ctl);
@@ -345,37 +361,41 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
goto out;
}
- /* Must try at least 3 times according to DP spec */
- for (try = 0; try < 5; try++) {
- /* Load the send data into the aux channel data registers */
- for (i = 0; i < send_bytes; i += 4)
- I915_WRITE(ch_data + i,
- pack_aux(send + i, send_bytes - i));
-
- /* Send the command and wait for it to complete */
- I915_WRITE(ch_ctl,
- DP_AUX_CH_CTL_SEND_BUSY |
- (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
- DP_AUX_CH_CTL_TIME_OUT_400us |
- (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
- (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
- (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
-
- /* Clear done status and any errors */
- I915_WRITE(ch_ctl,
- status |
- DP_AUX_CH_CTL_DONE |
- DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR);
-
- if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
- DP_AUX_CH_CTL_RECEIVE_ERROR))
- continue;
+ while ((aux_clock_divider = get_aux_clock_divider(intel_dp, clock++))) {
+ /* Must try at least 3 times according to DP spec */
+ for (try = 0; try < 5; try++) {
+ /* Load the send data into the aux channel data registers */
+ for (i = 0; i < send_bytes; i += 4)
+ I915_WRITE(ch_data + i,
+ pack_aux(send + i, send_bytes - i));
+
+ /* Send the command and wait for it to complete */
+ I915_WRITE(ch_ctl,
+ DP_AUX_CH_CTL_SEND_BUSY |
+ (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
+
+ /* Clear done status and any errors */
+ I915_WRITE(ch_ctl,
+ status |
+ DP_AUX_CH_CTL_DONE |
+ DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR);
+
+ if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
+ DP_AUX_CH_CTL_RECEIVE_ERROR))
+ continue;
+ if (status & DP_AUX_CH_CTL_DONE)
+ break;
+ }
if (status & DP_AUX_CH_CTL_DONE)
break;
}
@@ -416,6 +436,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
ret = recv_bytes;
out:
pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
+ intel_aux_display_runtime_put(dev_priv);
return ret;
}
@@ -710,8 +731,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
/* Walk through all bpp values. Luckily they're all nicely spaced with 2
* bpc in between. */
bpp = pipe_config->pipe_bpp;
- if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
+ if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp) {
+ DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp_bpp);
bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
+ }
for (; bpp >= 6*3; bpp -= 2*3) {
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
@@ -812,15 +836,14 @@ static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
udelay(500);
}
-static void
-intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dp_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = dp_to_dig_port(intel_dp)->port;
- struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
/*
* There are four kinds of DP registers:
@@ -852,7 +875,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
intel_dp_init_link_config(intel_dp);
@@ -1360,6 +1383,275 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
}
pipe_config->adjusted_mode.flags |= flags;
+
+ if (dp_to_dig_port(intel_dp)->port == PORT_A) {
+ if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
+ pipe_config->port_clock = 162000;
+ else
+ pipe_config->port_clock = 270000;
+ }
+}
+
+static bool is_edp_psr(struct intel_dp *intel_dp)
+{
+ return is_edp(intel_dp) &&
+ intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
+}
+
+static bool intel_edp_is_psr_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!IS_HASWELL(dev))
+ return false;
+
+ return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
+}
+
+static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
+ struct edp_vsc_psr *vsc_psr)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
+ u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
+ u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
+ uint32_t *data = (uint32_t *) vsc_psr;
+ unsigned int i;
+
+ /* As per BSPec (Pipe Video Data Island Packet), we need to disable
+ the video DIP being updated before program video DIP data buffer
+ registers for DIP being updated. */
+ I915_WRITE(ctl_reg, 0);
+ POSTING_READ(ctl_reg);
+
+ for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
+ if (i < sizeof(struct edp_vsc_psr))
+ I915_WRITE(data_reg + i, *data++);
+ else
+ I915_WRITE(data_reg + i, 0);
+ }
+
+ I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
+ POSTING_READ(ctl_reg);
+}
+
+static void intel_edp_psr_setup(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct edp_vsc_psr psr_vsc;
+
+ if (intel_dp->psr_setup_done)
+ return;
+
+ /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
+ memset(&psr_vsc, 0, sizeof(psr_vsc));
+ psr_vsc.sdp_header.HB0 = 0;
+ psr_vsc.sdp_header.HB1 = 0x7;
+ psr_vsc.sdp_header.HB2 = 0x2;
+ psr_vsc.sdp_header.HB3 = 0x8;
+ intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
+
+ /* Avoid continuous PSR exit by masking memup and hpd */
+ I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
+ EDP_PSR_DEBUG_MASK_HPD);
+
+ intel_dp->psr_setup_done = true;
+}
+
+static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp, 0);
+ int precharge = 0x3;
+ int msg_size = 5; /* Header(4) + Message(1) */
+
+ /* Enable PSR in sink */
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE &
+ ~DP_PSR_MAIN_LINK_ACTIVE);
+ else
+ intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
+ DP_PSR_ENABLE |
+ DP_PSR_MAIN_LINK_ACTIVE);
+
+ /* Setup AUX registers */
+ I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
+ I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
+ I915_WRITE(EDP_PSR_AUX_CTL,
+ DP_AUX_CH_CTL_TIME_OUT_400us |
+ (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
+ (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
+ (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
+}
+
+static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t max_sleep_time = 0x1f;
+ uint32_t idle_frames = 1;
+ uint32_t val = 0x0;
+
+ if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
+ val |= EDP_PSR_LINK_STANDBY;
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+ val |= EDP_PSR_TP1_TIME_0us;
+ val |= EDP_PSR_SKIP_AUX_EXIT;
+ } else
+ val |= EDP_PSR_LINK_DISABLE;
+
+ I915_WRITE(EDP_PSR_CTL, val |
+ EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
+ max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
+ idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
+ EDP_PSR_ENABLE);
+}
+
+static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_device *dev = dig_port->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc = dig_port->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
+ struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!IS_HASWELL(dev)) {
+ DRM_DEBUG_KMS("PSR not supported on this platform\n");
+ dev_priv->no_psr_reason = PSR_NO_SOURCE;
+ return false;
+ }
+
+ if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
+ (dig_port->port != PORT_A)) {
+ DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
+ dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
+ return false;
+ }
+
+ if (!is_edp_psr(intel_dp)) {
+ DRM_DEBUG_KMS("PSR not supported by this panel\n");
+ dev_priv->no_psr_reason = PSR_NO_SINK;
+ return false;
+ }
+
+ if (!i915_enable_psr) {
+ DRM_DEBUG_KMS("PSR disable by flag\n");
+ dev_priv->no_psr_reason = PSR_MODULE_PARAM;
+ return false;
+ }
+
+ crtc = dig_port->base.base.crtc;
+ if (crtc == NULL) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ intel_crtc = to_intel_crtc(crtc);
+ if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
+ DRM_DEBUG_KMS("crtc not active for PSR\n");
+ dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
+ return false;
+ }
+
+ obj = to_intel_framebuffer(crtc->fb)->obj;
+ if (obj->tiling_mode != I915_TILING_X ||
+ obj->fence_reg == I915_FENCE_REG_NONE) {
+ DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
+ dev_priv->no_psr_reason = PSR_NOT_TILED;
+ return false;
+ }
+
+ if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
+ dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
+ return false;
+ }
+
+ if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
+ S3D_ENABLE) {
+ DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
+ dev_priv->no_psr_reason = PSR_S3D_ENABLED;
+ return false;
+ }
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
+ DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
+ dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (!intel_edp_psr_match_conditions(intel_dp) ||
+ intel_edp_is_psr_enabled(dev))
+ return;
+
+ /* Setup PSR once */
+ intel_edp_psr_setup(intel_dp);
+
+ /* Enable PSR on the panel */
+ intel_edp_psr_enable_sink(intel_dp);
+
+ /* Enable PSR on the host */
+ intel_edp_psr_enable_source(intel_dp);
+}
+
+void intel_edp_psr_enable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+
+ if (intel_edp_psr_match_conditions(intel_dp) &&
+ !intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+}
+
+void intel_edp_psr_disable(struct intel_dp *intel_dp)
+{
+ struct drm_device *dev = intel_dp_to_dev(intel_dp);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!intel_edp_is_psr_enabled(dev))
+ return;
+
+ I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
+
+ /* Wait till PSR is idle */
+ if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
+ EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
+ DRM_ERROR("Timed out waiting for PSR Idle State\n");
+}
+
+void intel_edp_psr_update(struct drm_device *dev)
+{
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp = NULL;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
+ if (encoder->type == INTEL_OUTPUT_EDP) {
+ intel_dp = enc_to_intel_dp(&encoder->base);
+
+ if (!is_edp_psr(intel_dp))
+ return;
+
+ if (!intel_edp_psr_match_conditions(intel_dp))
+ intel_edp_psr_disable(intel_dp);
+ else
+ if (!intel_edp_is_psr_enabled(dev))
+ intel_edp_psr_do_enable(intel_dp);
+ }
}
static void intel_disable_dp(struct intel_encoder *encoder)
@@ -1411,47 +1703,50 @@ static void intel_enable_dp(struct intel_encoder *encoder)
intel_dp_complete_link_train(intel_dp);
intel_dp_stop_link_train(intel_dp);
ironlake_edp_backlight_on(intel_dp);
+}
- if (IS_VALLEYVIEW(dev)) {
- struct intel_digital_port *dport =
- enc_to_dig_port(&encoder->base);
- int channel = vlv_dport_to_channel(dport);
-
- vlv_wait_port_ready(dev_priv, channel);
- }
+static void vlv_enable_dp(struct intel_encoder *encoder)
+{
}
static void intel_pre_enable_dp(struct intel_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
+
+ if (dport->port == PORT_A)
+ ironlake_edp_pll_on(intel_dp);
+}
+
+static void vlv_pre_enable_dp(struct intel_encoder *encoder)
+{
+ struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
+ struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ int port = vlv_dport_to_channel(dport);
+ int pipe = intel_crtc->pipe;
+ u32 val;
- if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
- ironlake_edp_pll_on(intel_dp);
+ mutex_lock(&dev_priv->dpio_lock);
- if (IS_VALLEYVIEW(dev)) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(encoder->base.crtc);
- int port = vlv_dport_to_channel(dport);
- int pipe = intel_crtc->pipe;
- u32 val;
-
- val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
- val = 0;
- if (pipe)
- val |= (1<<21);
- else
- val &= ~(1<<21);
- val |= 0x001000c4;
- vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
+ val = 0;
+ if (pipe)
+ val |= (1<<21);
+ else
+ val &= ~(1<<21);
+ val |= 0x001000c4;
+ vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port), 0x00760018);
+ vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port), 0x00400888);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
- 0x00760018);
- vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
- 0x00400888);
- }
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_dp(encoder);
+
+ vlv_wait_port_ready(dev_priv, port);
}
static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -1465,6 +1760,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1478,6 +1774,7 @@ static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
+ mutex_unlock(&dev_priv->dpio_lock);
}
/*
@@ -1689,6 +1986,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
return 0;
}
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
@@ -1697,6 +1995,7 @@ static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
+ mutex_unlock(&dev_priv->dpio_lock);
return 0;
}
@@ -2030,7 +2329,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
- bool clock_recovery = false;
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
@@ -2048,7 +2346,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
voltage = 0xff;
voltage_tries = 0;
loop_tries = 0;
- clock_recovery = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
@@ -2069,7 +2366,6 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
- clock_recovery = true;
break;
}
@@ -2275,6 +2571,13 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
+ /* Check if the panel supports PSR */
+ memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
+ intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
+ intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+ if (is_edp_psr(intel_dp))
+ DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
DP_DWN_STRM_PORT_PRESENT))
return true; /* native DP sink */
@@ -2542,6 +2845,9 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
struct edid *edid = NULL;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_dp->has_audio = false;
if (HAS_PCH_SPLIT(dev))
@@ -2735,10 +3041,6 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
kfree(intel_dig_port);
}
-static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
- .mode_set = intel_dp_mode_set,
-};
-
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_dp_detect,
@@ -3166,6 +3468,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
error, port_name(port));
+ intel_dp->psr_setup_done = false;
+
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
i2c_del_adapter(&intel_dp->adapter);
if (is_edp(intel_dp)) {
@@ -3216,17 +3520,21 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
intel_encoder->compute_config = intel_dp_compute_config;
- intel_encoder->enable = intel_enable_dp;
- intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->mode_set = intel_dp_mode_set;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
intel_encoder->get_config = intel_dp_get_config;
- if (IS_VALLEYVIEW(dev))
+ if (IS_VALLEYVIEW(dev)) {
intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
+ intel_encoder->pre_enable = vlv_pre_enable_dp;
+ intel_encoder->enable = vlv_enable_dp;
+ } else {
+ intel_encoder->pre_enable = intel_pre_enable_dp;
+ intel_encoder->enable = intel_enable_dp;
+ }
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b7d6e09456c..176080822a7 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
+#include <linux/hdmi.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include <drm/drm_crtc.h>
@@ -208,10 +209,6 @@ struct intel_crtc_config {
struct drm_display_mode requested_mode;
struct drm_display_mode adjusted_mode;
- /* This flag must be set by the encoder's compute_config callback if it
- * changes the crtc timings in the mode to prevent the crtc fixup from
- * overwriting them. Currently only lvds needs that. */
- bool timings_set;
/* Whether to set up the PCH/FDI. Note that we never allow sharing
* between pch encoders and cpu encoders. */
bool has_pch_encoder;
@@ -334,6 +331,13 @@ struct intel_crtc {
bool pch_fifo_underrun_disabled;
};
+struct intel_plane_wm_parameters {
+ uint32_t horiz_pixels;
+ uint8_t bytes_per_pixel;
+ bool enabled;
+ bool scaled;
+};
+
struct intel_plane {
struct drm_plane base;
int plane;
@@ -352,20 +356,18 @@ struct intel_plane {
* as the other pieces of the struct may not reflect the values we want
* for the watermark calculations. Currently only Haswell uses this.
*/
- struct {
- bool enable;
- uint8_t bytes_per_pixel;
- uint32_t horiz_pixels;
- } wm;
+ struct intel_plane_wm_parameters wm;
void (*update_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
- void (*disable_plane)(struct drm_plane *plane);
+ void (*disable_plane)(struct drm_plane *plane,
+ struct drm_crtc *crtc);
int (*update_colorkey)(struct drm_plane *plane,
struct drm_intel_sprite_colorkey *key);
void (*get_colorkey)(struct drm_plane *plane,
@@ -397,66 +399,6 @@ struct cxsr_latency {
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
#define to_intel_plane(x) container_of(x, struct intel_plane, base)
-#define DIP_HEADER_SIZE 5
-
-#define DIP_TYPE_AVI 0x82
-#define DIP_VERSION_AVI 0x2
-#define DIP_LEN_AVI 13
-#define DIP_AVI_PR_1 0
-#define DIP_AVI_PR_2 1
-#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
-#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
-
-#define DIP_TYPE_SPD 0x83
-#define DIP_VERSION_SPD 0x1
-#define DIP_LEN_SPD 25
-#define DIP_SPD_UNKNOWN 0
-#define DIP_SPD_DSTB 0x1
-#define DIP_SPD_DVDP 0x2
-#define DIP_SPD_DVHS 0x3
-#define DIP_SPD_HDDVR 0x4
-#define DIP_SPD_DVC 0x5
-#define DIP_SPD_DSC 0x6
-#define DIP_SPD_VCD 0x7
-#define DIP_SPD_GAME 0x8
-#define DIP_SPD_PC 0x9
-#define DIP_SPD_BD 0xa
-#define DIP_SPD_SCD 0xb
-
-struct dip_infoframe {
- uint8_t type; /* HB0 */
- uint8_t ver; /* HB1 */
- uint8_t len; /* HB2 - body len, not including checksum */
- uint8_t ecc; /* Header ECC */
- uint8_t checksum; /* PB0 */
- union {
- struct {
- /* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
- uint8_t Y_A_B_S;
- /* PB2 - C 7:6, M 5:4, R 3:0 */
- uint8_t C_M_R;
- /* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
- uint8_t ITC_EC_Q_SC;
- /* PB4 - VIC 6:0 */
- uint8_t VIC;
- /* PB5 - YQ 7:6, CN 5:4, PR 3:0 */
- uint8_t YQ_CN_PR;
- /* PB6 to PB13 */
- uint16_t top_bar_end;
- uint16_t bottom_bar_start;
- uint16_t left_bar_end;
- uint16_t right_bar_start;
- } __attribute__ ((packed)) avi;
- struct {
- uint8_t vn[8];
- uint8_t pd[16];
- uint8_t sdi;
- } __attribute__ ((packed)) spd;
- uint8_t payload[27];
- } __attribute__ ((packed)) body;
-} __attribute__((packed));
-
struct intel_hdmi {
u32 hdmi_reg;
int ddc_bus;
@@ -467,7 +409,8 @@ struct intel_hdmi {
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
- struct dip_infoframe *frame);
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len);
void (*set_infoframes)(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
};
@@ -487,6 +430,7 @@ struct intel_dp {
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
+ uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
@@ -498,6 +442,7 @@ struct intel_dp {
int backlight_off_delay;
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
+ bool psr_setup_done;
struct intel_connector *attached_connector;
};
@@ -549,13 +494,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_fbc_work {
- struct delayed_work work;
- struct drm_crtc *crtc;
- struct drm_framebuffer *fb;
- int interval;
-};
-
int intel_pch_rawclk(struct drm_device *dev);
int intel_connector_update_modes(struct drm_connector *connector,
@@ -574,7 +512,6 @@ extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config);
-extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
extern void intel_dvo_init(struct drm_device *dev);
@@ -639,14 +576,10 @@ struct intel_set_config {
bool mode_changed;
};
-extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb);
-extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
-extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern void intel_connector_dpms(struct drm_connector *, int mode);
extern bool intel_connector_get_hw_state(struct intel_connector *connector);
extern void intel_modeset_check_state(struct drm_device *dev);
@@ -712,12 +645,10 @@ extern bool intel_get_load_detect_pipe(struct drm_connector *connector,
extern void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old);
-extern void intelfb_restore(void);
extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_enable_clock_gating(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_i915_gem_object *obj,
@@ -728,6 +659,7 @@ extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
+extern void intel_framebuffer_fini(struct intel_framebuffer *fb);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -747,6 +679,22 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+struct intel_shared_dpll *
+intel_crtc_to_shared_dpll(struct intel_crtc *crtc);
+
+void assert_shared_dpll(struct drm_i915_private *dev_priv,
+ struct intel_shared_dpll *pll,
+ bool state);
+#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
+#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
+void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_pll_enabled(d, p) assert_pll(d, p, true)
+#define assert_pll_disabled(d, p) assert_pll(d, p, false)
+void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state);
+#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
+#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool state);
#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
@@ -762,9 +710,10 @@ extern void intel_ddi_init(struct drm_device *dev, enum port port);
/* For use by IVB LP watermark workaround in intel_sprite.c */
extern void intel_update_watermarks(struct drm_device *dev);
-extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
- uint32_t sprite_width,
- int pixel_size, bool enable);
+extern void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ uint32_t sprite_width, int pixel_size,
+ bool enabled, bool scaled);
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
@@ -780,7 +729,6 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
extern void intel_init_pm(struct drm_device *dev);
/* FBC */
extern bool intel_fbc_enabled(struct drm_device *dev);
-extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern void intel_update_fbc(struct drm_device *dev);
/* IPS */
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
@@ -796,8 +744,8 @@ extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
-extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
+void gen6_update_ring_freq(struct drm_device *dev);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
@@ -825,4 +773,24 @@ extern bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
enum transcoder pch_transcoder,
bool enable);
+extern void intel_edp_psr_enable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_disable(struct intel_dp *intel_dp);
+extern void intel_edp_psr_update(struct drm_device *dev);
+extern void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
+ bool switch_to_fclk, bool allow_power_down);
+extern void hsw_restore_lcpll(struct drm_i915_private *dev_priv);
+extern void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void ilk_disable_gt_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask);
+extern void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
+extern void snb_disable_pm_irq(struct drm_i915_private *dev_priv,
+ uint32_t mask);
+extern void hsw_enable_pc8_work(struct work_struct *__work);
+extern void hsw_enable_package_c8(struct drm_i915_private *dev_priv);
+extern void hsw_disable_package_c8(struct drm_i915_private *dev_priv);
+extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
+extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
+extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
+extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index eb2020eb2b7..406303b509c 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -100,15 +100,14 @@ struct intel_dvo {
bool panel_wants_dither;
};
-static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
+static struct intel_dvo *enc_to_dvo(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_dvo, base.base);
+ return container_of(encoder, struct intel_dvo, base);
}
static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_dvo, base);
+ return enc_to_dvo(intel_attached_encoder(connector));
}
static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
@@ -123,7 +122,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -140,7 +139,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp, flags = 0;
tmp = I915_READ(intel_dvo->dev.dvo_reg);
@@ -159,7 +158,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
static void intel_disable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -171,7 +170,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
static void intel_enable_dvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(&encoder->base);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 dvo_reg = intel_dvo->dev.dvo_reg;
u32 temp = I915_READ(dvo_reg);
@@ -241,11 +240,11 @@ static int intel_dvo_mode_valid(struct drm_connector *connector,
return intel_dvo->dev.dev_ops->mode_valid(&intel_dvo->dev, mode);
}
-static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static bool intel_dvo_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_config *pipe_config)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
/* If we have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
@@ -267,23 +266,23 @@ static bool intel_dvo_mode_fixup(struct drm_encoder *encoder,
}
if (intel_dvo->dev.dev_ops->mode_fixup)
- return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev, mode, adjusted_mode);
+ return intel_dvo->dev.dev_ops->mode_fixup(&intel_dvo->dev,
+ &pipe_config->requested_mode,
+ adjusted_mode);
return true;
}
-static void intel_dvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_dvo_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
- int pipe = intel_crtc->pipe;
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
+ struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ int pipe = crtc->pipe;
u32 dvo_val;
u32 dvo_reg = intel_dvo->dev.dvo_reg, dvo_srcdim_reg;
- int dpll_reg = DPLL(pipe);
switch (dvo_reg) {
case DVOA:
@@ -298,7 +297,9 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
break;
}
- intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev, mode, adjusted_mode);
+ intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
+ &crtc->config.requested_mode,
+ adjusted_mode);
/* Save the data order, since I don't know what it should be set to. */
dvo_val = I915_READ(dvo_reg) &
@@ -314,8 +315,6 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
dvo_val |= DVO_VSYNC_ACTIVE_HIGH;
- I915_WRITE(dpll_reg, I915_READ(dpll_reg) | DPLL_DVO_HIGH_SPEED);
-
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
@@ -335,6 +334,8 @@ static enum drm_connector_status
intel_dvo_detect(struct drm_connector *connector, bool force)
{
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
}
@@ -372,11 +373,6 @@ static void intel_dvo_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
- .mode_fixup = intel_dvo_mode_fixup,
- .mode_set = intel_dvo_mode_set,
-};
-
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
.dpms = intel_dvo_dpms,
.detect = intel_dvo_detect,
@@ -392,7 +388,7 @@ static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs
static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+ struct intel_dvo *intel_dvo = enc_to_dvo(to_intel_encoder(encoder));
if (intel_dvo->dev.dev_ops->destroy)
intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev);
@@ -471,6 +467,8 @@ void intel_dvo_init(struct drm_device *dev)
intel_encoder->enable = intel_enable_dvo;
intel_encoder->get_hw_state = intel_dvo_get_hw_state;
intel_encoder->get_config = intel_dvo_get_config;
+ intel_encoder->compute_config = intel_dvo_compute_config;
+ intel_encoder->mode_set = intel_dvo_mode_set;
intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
/* Now, try to find a controller */
@@ -537,9 +535,6 @@ void intel_dvo_init(struct drm_device *dev)
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_encoder_helper_add(&intel_encoder->base,
- &intel_dvo_helper_funcs);
-
intel_connector_attach_encoder(intel_connector, intel_encoder);
if (dvo->type == INTEL_DVO_CHIP_LVDS) {
/* For our LVDS chipsets, we should hopefully be able
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index dff669e2387..bc2100007b2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -139,11 +139,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
- info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
ret = -ENOSPC;
@@ -166,9 +166,9 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+ DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08lx, bo %p\n",
fb->width, fb->height,
- obj->gtt_offset, obj);
+ i915_gem_obj_ggtt_offset(obj), obj);
mutex_unlock(&dev->struct_mutex);
@@ -193,26 +193,21 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
static void intel_fbdev_destroy(struct drm_device *dev,
struct intel_fbdev *ifbdev)
{
- struct fb_info *info;
- struct intel_framebuffer *ifb = &ifbdev->ifb;
-
if (ifbdev->helper.fbdev) {
- info = ifbdev->helper.fbdev;
+ struct fb_info *info = ifbdev->helper.fbdev;
+
unregister_framebuffer(info);
iounmap(info->screen_base);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
+
framebuffer_release(info);
}
drm_fb_helper_fini(&ifbdev->helper);
- drm_framebuffer_unregister_private(&ifb->base);
- drm_framebuffer_cleanup(&ifb->base);
- if (ifb->obj) {
- drm_gem_object_unreference_unlocked(&ifb->obj->base);
- ifb->obj = NULL;
- }
+ drm_framebuffer_unregister_private(&ifbdev->ifb.base);
+ intel_framebuffer_fini(&ifbdev->ifb);
}
int intel_fbdev_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 98df2a0c85b..4148cc85bf7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/delay.h>
+#include <linux/hdmi.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
@@ -66,89 +67,83 @@ static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
-void intel_dip_infoframe_csum(struct dip_infoframe *frame)
+static u32 g4x_infoframe_index(enum hdmi_infoframe_type type)
{
- uint8_t *data = (uint8_t *)frame;
- uint8_t sum = 0;
- unsigned i;
-
- frame->checksum = 0;
- frame->ecc = 0;
-
- for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
- sum += data[i];
-
- frame->checksum = 0x100 - sum;
-}
-
-static u32 g4x_infoframe_index(struct dip_infoframe *frame)
-{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_SELECT_AVI;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_SELECT_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_SELECT_VENDOR;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 g4x_infoframe_enable(struct dip_infoframe *frame)
+static u32 g4x_infoframe_enable(enum hdmi_infoframe_type type)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VENDOR;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 hsw_infoframe_enable(struct dip_infoframe *frame)
+static u32 hsw_infoframe_enable(enum hdmi_infoframe_type type)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return VIDEO_DIP_ENABLE_AVI_HSW;
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return VIDEO_DIP_ENABLE_SPD_HSW;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return VIDEO_DIP_ENABLE_VS_HSW;
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
-static u32 hsw_infoframe_data_reg(struct dip_infoframe *frame,
+static u32 hsw_infoframe_data_reg(enum hdmi_infoframe_type type,
enum transcoder cpu_transcoder)
{
- switch (frame->type) {
- case DIP_TYPE_AVI:
+ switch (type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
return HSW_TVIDEO_DIP_AVI_DATA(cpu_transcoder);
- case DIP_TYPE_SPD:
+ case HDMI_INFOFRAME_TYPE_SPD:
return HSW_TVIDEO_DIP_SPD_DATA(cpu_transcoder);
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ return HSW_TVIDEO_DIP_VS_DATA(cpu_transcoder);
default:
- DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
+ DRM_DEBUG_DRIVER("unknown info frame type %d\n", type);
return 0;
}
}
static void g4x_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val = I915_READ(VIDEO_DIP_CTL);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i;
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(VIDEO_DIP_CTL, val);
@@ -162,7 +157,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VIDEO_DIP_DATA, 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -171,22 +166,22 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
}
static void ibx_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -200,7 +195,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -209,25 +204,25 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
}
static void cpt_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
/* The DIP control register spec says that we need to update the AVI
* infoframe without clearing its enable bit */
- if (frame->type != DIP_TYPE_AVI)
- val &= ~g4x_infoframe_enable(frame);
+ if (type != HDMI_INFOFRAME_TYPE_AVI)
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -241,7 +236,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -250,22 +245,22 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
}
static void vlv_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- unsigned i, len = DIP_HEADER_SIZE + frame->len;
+ int i, reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
- val |= g4x_infoframe_index(frame);
+ val |= g4x_infoframe_index(type);
- val &= ~g4x_infoframe_enable(frame);
+ val &= ~g4x_infoframe_enable(type);
I915_WRITE(reg, val);
@@ -279,7 +274,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
mmiowb();
- val |= g4x_infoframe_enable(frame);
+ val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
@@ -288,21 +283,24 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
}
static void hsw_write_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+ enum hdmi_infoframe_type type,
+ const uint8_t *frame, ssize_t len)
{
uint32_t *data = (uint32_t *)frame;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
- u32 data_reg = hsw_infoframe_data_reg(frame, intel_crtc->config.cpu_transcoder);
- unsigned int i, len = DIP_HEADER_SIZE + frame->len;
+ u32 data_reg;
+ int i;
u32 val = I915_READ(ctl_reg);
+ data_reg = hsw_infoframe_data_reg(type,
+ intel_crtc->config.cpu_transcoder);
if (data_reg == 0)
return;
- val &= ~hsw_infoframe_enable(frame);
+ val &= ~hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
mmiowb();
@@ -315,18 +313,48 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
I915_WRITE(data_reg + i, 0);
mmiowb();
- val |= hsw_infoframe_enable(frame);
+ val |= hsw_infoframe_enable(type);
I915_WRITE(ctl_reg, val);
POSTING_READ(ctl_reg);
}
-static void intel_set_infoframe(struct drm_encoder *encoder,
- struct dip_infoframe *frame)
+/*
+ * The data we write to the DIP data buffer registers is 1 byte bigger than the
+ * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
+ * at 0). It's also a byte used by DisplayPort so the same DIP registers can be
+ * used for both technologies.
+ *
+ * DW0: Reserved/ECC/DP | HB2 | HB1 | HB0
+ * DW1: DB3 | DB2 | DB1 | DB0
+ * DW2: DB7 | DB6 | DB5 | DB4
+ * DW3: ...
+ *
+ * (HB is Header Byte, DB is Data Byte)
+ *
+ * The hdmi pack() functions don't know about that hardware specific hole so we
+ * trick them by giving an offset into the buffer and moving back the header
+ * bytes by one.
+ */
+static void intel_write_infoframe(struct drm_encoder *encoder,
+ union hdmi_infoframe *frame)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ uint8_t buffer[VIDEO_DIP_DATA_SIZE];
+ ssize_t len;
- intel_dip_infoframe_csum(frame);
- intel_hdmi->write_infoframe(encoder, frame);
+ /* see comment above for the reason for this offset */
+ len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
+ if (len < 0)
+ return;
+
+ /* Insert the 'hole' (see big comment above) at position 3 */
+ buffer[0] = buffer[1];
+ buffer[1] = buffer[2];
+ buffer[2] = buffer[3];
+ buffer[3] = 0;
+ len++;
+
+ intel_hdmi->write_infoframe(encoder, frame->any.type, buffer, len);
}
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
@@ -334,40 +362,57 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
+ union hdmi_infoframe frame;
+ int ret;
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
- avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return;
+ }
if (intel_hdmi->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
else
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
}
- avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
- intel_set_infoframe(encoder, &avi_if);
+ intel_write_infoframe(encoder, &frame);
}
static void intel_hdmi_set_spd_infoframe(struct drm_encoder *encoder)
{
- struct dip_infoframe spd_if;
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill SPD infoframe\n");
+ return;
+ }
- memset(&spd_if, 0, sizeof(spd_if));
- spd_if.type = DIP_TYPE_SPD;
- spd_if.ver = DIP_VERSION_SPD;
- spd_if.len = DIP_LEN_SPD;
- strcpy(spd_if.body.spd.vn, "Intel");
- strcpy(spd_if.body.spd.pd, "Integrated gfx");
- spd_if.body.spd.sdi = DIP_SPD_PC;
+ frame.spd.sdi = HDMI_SPD_SDI_PC;
- intel_set_infoframe(encoder, &spd_if);
+ intel_write_infoframe(encoder, &frame);
+}
+
+static void
+intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *adjusted_mode)
+{
+ union hdmi_infoframe frame;
+ int ret;
+
+ ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+ adjusted_mode);
+ if (ret < 0)
+ return;
+
+ intel_write_infoframe(encoder, &frame);
}
static void g4x_set_infoframes(struct drm_encoder *encoder,
@@ -432,6 +477,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void ibx_set_infoframes(struct drm_encoder *encoder,
@@ -493,6 +539,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void cpt_set_infoframes(struct drm_encoder *encoder,
@@ -528,6 +575,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void vlv_set_infoframes(struct drm_encoder *encoder,
@@ -562,6 +610,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
static void hsw_set_infoframes(struct drm_encoder *encoder,
@@ -589,16 +638,16 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_avi_infoframe(encoder, adjusted_mode);
intel_hdmi_set_spd_infoframe(encoder);
+ intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
-static void intel_hdmi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_hdmi_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
+ struct drm_display_mode *adjusted_mode = &crtc->config.adjusted_mode;
u32 hdmi_val;
hdmi_val = SDVO_ENCODING_HDMI;
@@ -609,7 +658,7 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
hdmi_val |= SDVO_HSYNC_ACTIVE_HIGH;
- if (intel_crtc->config.pipe_bpp > 24)
+ if (crtc->config.pipe_bpp > 24)
hdmi_val |= HDMI_COLOR_FORMAT_12bpc;
else
hdmi_val |= SDVO_COLOR_FORMAT_8bpc;
@@ -620,21 +669,21 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
if (intel_hdmi->has_audio) {
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
- pipe_name(intel_crtc->pipe));
+ pipe_name(crtc->pipe));
hdmi_val |= SDVO_AUDIO_ENABLE;
hdmi_val |= HDMI_MODE_SELECT_HDMI;
- intel_write_eld(encoder, adjusted_mode);
+ intel_write_eld(&encoder->base, adjusted_mode);
}
if (HAS_PCH_CPT(dev))
- hdmi_val |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe);
+ hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
else
- hdmi_val |= SDVO_PIPE_SEL(intel_crtc->pipe);
+ hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
POSTING_READ(intel_hdmi->hdmi_reg);
- intel_hdmi->set_infoframes(encoder, adjusted_mode);
+ intel_hdmi->set_infoframes(&encoder->base, adjusted_mode);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -719,14 +768,10 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
I915_WRITE(intel_hdmi->hdmi_reg, temp);
POSTING_READ(intel_hdmi->hdmi_reg);
}
+}
- if (IS_VALLEYVIEW(dev)) {
- struct intel_digital_port *dport =
- enc_to_dig_port(&encoder->base);
- int channel = vlv_dport_to_channel(dport);
-
- vlv_wait_port_ready(dev_priv, channel);
- }
+static void vlv_enable_hdmi(struct intel_encoder *encoder)
+{
}
static void intel_disable_hdmi(struct intel_encoder *encoder)
@@ -785,10 +830,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
}
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+
+ if (IS_G4X(dev))
+ return 165000;
+ else if (IS_HASWELL(dev))
+ return 300000;
+ else
+ return 225000;
+}
+
static int intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- if (mode->clock > 165000)
+ if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
return MODE_CLOCK_LOW;
@@ -806,6 +863,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2;
+ int portclock_limit = hdmi_portclock_limit(intel_hdmi);
int desired_bpp;
if (intel_hdmi->color_range_auto) {
@@ -829,7 +887,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000
+ if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
&& HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -846,7 +904,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
pipe_config->pipe_bpp = desired_bpp;
}
- if (adjusted_mode->clock > 225000) {
+ if (adjusted_mode->clock > portclock_limit) {
DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n");
return false;
}
@@ -866,6 +924,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
@@ -1017,6 +1078,7 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
return;
/* Enable clock channels for this port */
+ mutex_lock(&dev_priv->dpio_lock);
val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
val = 0;
if (pipe)
@@ -1047,6 +1109,11 @@ static void intel_hdmi_pre_enable(struct intel_encoder *encoder)
0x00760018);
vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
0x00400888);
+ mutex_unlock(&dev_priv->dpio_lock);
+
+ intel_enable_hdmi(encoder);
+
+ vlv_wait_port_ready(dev_priv, port);
}
static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1060,6 +1127,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
return;
/* Program Tx lane resets to default */
+ mutex_lock(&dev_priv->dpio_lock);
vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
@@ -1078,6 +1146,7 @@ static void intel_hdmi_pre_pll_enable(struct intel_encoder *encoder)
0x00002000);
vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port),
DPIO_TX_OCALINIT_EN);
+ mutex_unlock(&dev_priv->dpio_lock);
}
static void intel_hdmi_post_disable(struct intel_encoder *encoder)
@@ -1100,10 +1169,6 @@ static void intel_hdmi_destroy(struct drm_connector *connector)
kfree(connector);
}
-static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
- .mode_set = intel_hdmi_mode_set,
-};
-
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_hdmi_detect,
@@ -1208,7 +1273,6 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
- struct drm_encoder *encoder;
struct intel_connector *intel_connector;
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
@@ -1222,21 +1286,22 @@ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port)
}
intel_encoder = &intel_dig_port->base;
- encoder = &intel_encoder->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
intel_encoder->compute_config = intel_hdmi_compute_config;
- intel_encoder->enable = intel_enable_hdmi;
+ intel_encoder->mode_set = intel_hdmi_mode_set;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
intel_encoder->get_config = intel_hdmi_get_config;
if (IS_VALLEYVIEW(dev)) {
- intel_encoder->pre_enable = intel_hdmi_pre_enable;
intel_encoder->pre_pll_enable = intel_hdmi_pre_pll_enable;
+ intel_encoder->pre_enable = intel_hdmi_pre_enable;
+ intel_encoder->enable = vlv_enable_hdmi;
intel_encoder->post_disable = intel_hdmi_post_disable;
+ } else {
+ intel_encoder->enable = intel_enable_hdmi;
}
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 639fe192997..d1c1e0f7f26 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -398,6 +398,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
int i, reg_offset;
int ret = 0;
+ intel_aux_display_runtime_get(dev_priv);
mutex_lock(&dev_priv->gmbus_mutex);
if (bus->force_bit) {
@@ -497,6 +498,7 @@ timeout:
out:
mutex_unlock(&dev_priv->gmbus_mutex);
+ intel_aux_display_runtime_put(dev_priv);
return ret;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 61348eae2f0..4d33278e31f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -122,17 +122,25 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
-static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
+static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
- int pipe = intel_crtc->pipe;
+ int pipe = crtc->pipe;
u32 temp;
+ if (HAS_PCH_SPLIT(dev)) {
+ assert_fdi_rx_pll_disabled(dev_priv, pipe);
+ assert_shared_dpll_disabled(dev_priv,
+ intel_crtc_to_shared_dpll(crtc));
+ } else {
+ assert_pll_disabled(dev_priv, pipe);
+ }
+
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@@ -149,7 +157,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
/* set the corresponsding LVDS_BORDER bit */
temp &= ~LVDS_BORDER_ENABLE;
- temp |= intel_crtc->config.gmch_pfit.lvds_border_bits;
+ temp |= crtc->config.gmch_pfit.lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
@@ -169,8 +177,7 @@ static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
if (INTEL_INFO(dev)->gen == 4) {
/* Bspec wording suggests that LVDS port dithering only exists
* for 18bpp panels. */
- if (intel_crtc->config.dither &&
- intel_crtc->config.pipe_bpp == 18)
+ if (crtc->config.dither && crtc->config.pipe_bpp == 18)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
@@ -312,14 +319,12 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
return true;
}
-static void intel_lvds_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_lvds_mode_set(struct intel_encoder *encoder)
{
/*
- * The LVDS pin pair will already have been turned on in the
- * intel_crtc_mode_set since it has a large impact on the DPLL
- * settings.
+ * We don't do anything here, the LVDS port is fully set up in the pre
+ * enable hook - the ordering constraints for enabling the lvds port vs.
+ * enabling the display pll are too strict.
*/
}
@@ -336,6 +341,9 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
status = intel_panel_detect(dev);
if (status != connector_status_unknown)
return status;
@@ -497,10 +505,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
return 0;
}
-static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
- .mode_set = intel_lvds_mode_set,
-};
-
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
.get_modes = intel_lvds_get_modes,
.mode_valid = intel_lvds_mode_valid,
@@ -959,8 +963,9 @@ void intel_lvds_init(struct drm_device *dev)
DRM_MODE_ENCODER_LVDS);
intel_encoder->enable = intel_enable_lvds;
- intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
+ intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->compute_config = intel_lvds_compute_config;
+ intel_encoder->mode_set = intel_lvds_mode_set;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
@@ -977,7 +982,6 @@ void intel_lvds_init(struct drm_device *dev)
else
intel_encoder->crtc_mask = (1 << 1);
- drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a3698812e9c..ddfd0aefe0c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -740,7 +740,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_w;
swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width);
sheight = params->src_h;
- iowrite32(new_bo->gtt_offset + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -754,8 +754,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
sheight |= (params->src_h/uv_vscale) << 16;
- iowrite32(new_bo->gtt_offset + params->offset_U, &regs->OBUF_0U);
- iowrite32(new_bo->gtt_offset + params->offset_V, &regs->OBUF_0V);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_U, &regs->OBUF_0U);
+ iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_V, &regs->OBUF_0V);
ostride |= params->stride_UV << 16;
}
@@ -1333,7 +1333,9 @@ void intel_setup_overlay(struct drm_device *dev)
overlay->dev = dev;
- reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
+ reg_bo = NULL;
+ if (!OVERLAY_NEEDS_PHYSICAL(dev))
+ reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE);
if (reg_bo == NULL)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (reg_bo == NULL)
@@ -1350,12 +1352,12 @@ void intel_setup_overlay(struct drm_device *dev)
}
overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = reg_bo->gtt_offset;
+ overlay->flip_addr = i915_gem_obj_ggtt_offset(reg_bo);
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1412,9 +1414,6 @@ void intel_cleanup_overlay(struct drm_device *dev)
kfree(dev_priv->overlay);
}
-#ifdef CONFIG_DEBUG_FS
-#include <linux/seq_file.h>
-
struct intel_overlay_error_state {
struct overlay_registers regs;
unsigned long base;
@@ -1435,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
- overlay->reg_bo->gtt_offset);
+ i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
}
@@ -1468,7 +1467,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
error->base = (__force long)overlay->reg_bo->phys_obj->handle->vaddr;
else
- error->base = overlay->reg_bo->gtt_offset;
+ error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
regs = intel_overlay_map_regs_atomic(overlay);
if (!regs)
@@ -1537,4 +1536,3 @@ intel_overlay_print_error_state(struct drm_i915_error_state_buf *m,
P(UVSCALEV);
#undef P
}
-#endif
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 67e2c1f1c9a..a43c33bc4a3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -194,9 +194,6 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
adjusted_mode->vdisplay == mode->vdisplay)
goto out;
- drm_mode_set_crtcinfo(adjusted_mode, 0);
- pipe_config->timings_set = true;
-
switch (fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
@@ -497,8 +494,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max)
goto out;
}
- /* scale to hardware */
- level = level * freq / max;
+ /* scale to hardware, but be careful to not overflow */
+ if (freq < max)
+ level = level * freq / max;
+ else
+ level = freq / max * level;
dev_priv->backlight.level = level;
if (dev_priv->backlight.device)
@@ -515,6 +515,17 @@ void intel_panel_disable_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long flags;
+ /*
+ * Do not disable backlight on the vgaswitcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ return;
+ }
+
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
dev_priv->backlight.enabled = false;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6a347f54d39..46056820d1d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,8 +30,7 @@
#include "intel_drv.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/module.h>
-
-#define FORCEWAKE_ACK_TIMEOUT_MS 2
+#include <drm/i915_powerwell.h>
/* FBC, or Frame Buffer Compression, is a technique employed to compress the
* framebuffer contents in-memory, aiming at reducing the required bandwidth
@@ -86,7 +85,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
int plane, i;
u32 fbc_ctl, fbc_ctl2;
- cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
+ cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
if (fb->pitches[0] < cfb_pitch)
cfb_pitch = fb->pitches[0];
@@ -217,7 +216,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -274,7 +273,7 @@ static void gen7_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- I915_WRITE(IVB_FBC_RT_BASE, obj->gtt_offset);
+ I915_WRITE(IVB_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj));
I915_WRITE(ILK_DPFC_CONTROL, DPFC_CTL_EN | DPFC_CTL_LIMIT_1X |
IVB_DPFC_CTL_FENCE_EN |
@@ -325,7 +324,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
struct drm_i915_private *dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- if (work == dev_priv->fbc_work) {
+ if (work == dev_priv->fbc.fbc_work) {
/* Double check that we haven't switched fb without cancelling
* the prior work.
*/
@@ -333,12 +332,12 @@ static void intel_fbc_work_fn(struct work_struct *__work)
dev_priv->display.enable_fbc(work->crtc,
work->interval);
- dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
- dev_priv->cfb_fb = work->crtc->fb->base.id;
- dev_priv->cfb_y = work->crtc->y;
+ dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
+ dev_priv->fbc.fb_id = work->crtc->fb->base.id;
+ dev_priv->fbc.y = work->crtc->y;
}
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
mutex_unlock(&dev->struct_mutex);
@@ -347,28 +346,28 @@ static void intel_fbc_work_fn(struct work_struct *__work)
static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
{
- if (dev_priv->fbc_work == NULL)
+ if (dev_priv->fbc.fbc_work == NULL)
return;
DRM_DEBUG_KMS("cancelling pending FBC enable\n");
/* Synchronisation is provided by struct_mutex and checking of
- * dev_priv->fbc_work, so we can perform the cancellation
+ * dev_priv->fbc.fbc_work, so we can perform the cancellation
* entirely asynchronously.
*/
- if (cancel_delayed_work(&dev_priv->fbc_work->work))
+ if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
/* tasklet was killed before being run, clean up */
- kfree(dev_priv->fbc_work);
+ kfree(dev_priv->fbc.fbc_work);
/* Mark the work as no longer wanted so that if it does
* wake-up (because the work was already running and waiting
* for our mutex), it will discover that is no longer
* necessary to run.
*/
- dev_priv->fbc_work = NULL;
+ dev_priv->fbc.fbc_work = NULL;
}
-void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
{
struct intel_fbc_work *work;
struct drm_device *dev = crtc->dev;
@@ -381,6 +380,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) {
+ DRM_ERROR("Failed to allocate FBC work structure\n");
dev_priv->display.enable_fbc(crtc, interval);
return;
}
@@ -390,9 +390,7 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
work->interval = interval;
INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
- dev_priv->fbc_work = work;
-
- DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
+ dev_priv->fbc.fbc_work = work;
/* Delay the actual enabling to let pageflipping cease and the
* display to settle before starting the compression. Note that
@@ -404,6 +402,8 @@ void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
* following the termination of the page-flipping sequence
* and indeed performing the enable as a co-routine and not
* waiting synchronously upon the vblank.
+ *
+ * WaFbcWaitForVBlankBeforeEnable:ilk,snb
*/
schedule_delayed_work(&work->work, msecs_to_jiffies(50));
}
@@ -418,7 +418,17 @@ void intel_disable_fbc(struct drm_device *dev)
return;
dev_priv->display.disable_fbc(dev);
- dev_priv->cfb_plane = -1;
+ dev_priv->fbc.plane = -1;
+}
+
+static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
+ enum no_fbc_reason reason)
+{
+ if (dev_priv->fbc.no_fbc_reason == reason)
+ return false;
+
+ dev_priv->fbc.no_fbc_reason = reason;
+ return true;
}
/**
@@ -448,14 +458,18 @@ void intel_update_fbc(struct drm_device *dev)
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
- int enable_fbc;
unsigned int max_hdisplay, max_vdisplay;
- if (!i915_powersave)
+ if (!I915_HAS_FBC(dev)) {
+ set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
return;
+ }
- if (!I915_HAS_FBC(dev))
+ if (!i915_powersave) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
return;
+ }
/*
* If FBC is already on, we just have to verify that we can
@@ -470,8 +484,8 @@ void intel_update_fbc(struct drm_device *dev)
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
- DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
+ if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
+ DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
goto out_disable;
}
crtc = tmp_crtc;
@@ -479,8 +493,8 @@ void intel_update_fbc(struct drm_device *dev)
}
if (!crtc || crtc->fb == NULL) {
- DRM_DEBUG_KMS("no output, disabling\n");
- dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
+ if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
+ DRM_DEBUG_KMS("no output, disabling\n");
goto out_disable;
}
@@ -489,23 +503,22 @@ void intel_update_fbc(struct drm_device *dev)
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- enable_fbc = i915_enable_fbc;
- if (enable_fbc < 0) {
- DRM_DEBUG_KMS("fbc set to per-chip default\n");
- enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
- enable_fbc = 0;
+ if (i915_enable_fbc < 0 &&
+ INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+ if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
+ DRM_DEBUG_KMS("disabled per chip default\n");
+ goto out_disable;
}
- if (!enable_fbc) {
- DRM_DEBUG_KMS("fbc disabled per module param\n");
- dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
+ if (!i915_enable_fbc) {
+ if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
+ DRM_DEBUG_KMS("fbc disabled per module param\n");
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
- DRM_DEBUG_KMS("mode incompatible with compression, "
- "disabling\n");
- dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
+ if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
+ DRM_DEBUG_KMS("mode incompatible with compression, "
+ "disabling\n");
goto out_disable;
}
@@ -518,14 +531,14 @@ void intel_update_fbc(struct drm_device *dev)
}
if ((crtc->mode.hdisplay > max_hdisplay) ||
(crtc->mode.vdisplay > max_vdisplay)) {
- DRM_DEBUG_KMS("mode too large for compression, disabling\n");
- dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
+ if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
+ DRM_DEBUG_KMS("mode too large for compression, disabling\n");
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev) || IS_HASWELL(dev)) &&
intel_crtc->plane != 0) {
- DRM_DEBUG_KMS("plane not 0, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_BAD_PLANE;
+ if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
+ DRM_DEBUG_KMS("plane not 0, disabling compression\n");
goto out_disable;
}
@@ -534,8 +547,8 @@ void intel_update_fbc(struct drm_device *dev)
*/
if (obj->tiling_mode != I915_TILING_X ||
obj->fence_reg == I915_FENCE_REG_NONE) {
- DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_NOT_TILED;
+ if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
+ DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
goto out_disable;
}
@@ -544,8 +557,8 @@ void intel_update_fbc(struct drm_device *dev)
goto out_disable;
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
- DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
+ DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
goto out_disable;
}
@@ -554,9 +567,9 @@ void intel_update_fbc(struct drm_device *dev)
* cannot be unpinned (and have its GTT offset and fence revoked)
* without first being decoupled from the scanout and FBC disabled.
*/
- if (dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_fb == fb->base.id &&
- dev_priv->cfb_y == crtc->y)
+ if (dev_priv->fbc.plane == intel_crtc->plane &&
+ dev_priv->fbc.fb_id == fb->base.id &&
+ dev_priv->fbc.y == crtc->y)
return;
if (intel_fbc_enabled(dev)) {
@@ -588,6 +601,7 @@ void intel_update_fbc(struct drm_device *dev)
}
intel_enable_fbc(crtc, 500);
+ dev_priv->fbc.no_fbc_reason = FBC_OK;
return;
out_disable:
@@ -1666,9 +1680,6 @@ static void i830_update_wm(struct drm_device *dev)
I915_WRITE(FW_BLC, fwater_lo);
}
-#define ILK_LP0_PLANE_LATENCY 700
-#define ILK_LP0_CURSOR_LATENCY 1300
-
/*
* Check the wm result.
*
@@ -1783,9 +1794,9 @@ static void ironlake_update_wm(struct drm_device *dev)
enabled = 0;
if (g4x_compute_wm0(dev, PIPE_A,
&ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
+ dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
+ dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1797,9 +1808,9 @@ static void ironlake_update_wm(struct drm_device *dev)
if (g4x_compute_wm0(dev, PIPE_B,
&ironlake_display_wm_info,
- ILK_LP0_PLANE_LATENCY,
+ dev_priv->wm.pri_latency[0] * 100,
&ironlake_cursor_wm_info,
- ILK_LP0_CURSOR_LATENCY,
+ dev_priv->wm.cur_latency[0] * 100,
&plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
@@ -1823,7 +1834,7 @@ static void ironlake_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- ILK_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1831,14 +1842,14 @@ static void ironlake_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- ILK_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&ironlake_display_srwm_info,
&ironlake_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1846,7 +1857,7 @@ static void ironlake_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
@@ -1860,7 +1871,7 @@ static void ironlake_update_wm(struct drm_device *dev)
static void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
unsigned int enabled;
@@ -1915,7 +1926,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1923,14 +1934,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1938,14 +1949,14 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -1953,7 +1964,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
@@ -1962,7 +1973,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
static void ivybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int latency = dev_priv->wm.pri_latency[0] * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
@@ -2032,7 +2043,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.pri_latency[1] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -2040,14 +2051,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
- (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[1] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.pri_latency[2] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
@@ -2055,19 +2066,19 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
- (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[2] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
/* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.pri_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &ignore_cursor_wm) ||
!ironlake_compute_srwm(dev, 3, enabled,
- 2 * SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.cur_latency[3] * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
@@ -2075,14 +2086,14 @@ static void ivybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
- (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (dev_priv->wm.pri_latency[3] << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
-static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
- struct drm_crtc *crtc)
+static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
+ struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t pixel_rate, pfit_size;
@@ -2112,30 +2123,38 @@ static uint32_t hsw_wm_get_pixel_rate(struct drm_device *dev,
return pixel_rate;
}
-static uint32_t hsw_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint64_t ret;
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
return ret;
}
-static uint32_t hsw_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
+/* latency must be in 0.1us units. */
+static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
uint32_t horiz_pixels, uint8_t bytes_per_pixel,
uint32_t latency)
{
uint32_t ret;
+ if (WARN(latency == 0, "Latency value missing\n"))
+ return UINT_MAX;
+
ret = (latency * pixel_rate) / (pipe_htotal * 10000);
ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
ret = DIV_ROUND_UP(ret, 64) + 2;
return ret;
}
-static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
+static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
uint8_t bytes_per_pixel)
{
return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
@@ -2143,15 +2162,11 @@ static uint32_t hsw_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
struct hsw_pipe_wm_parameters {
bool active;
- bool sprite_enabled;
- uint8_t pri_bytes_per_pixel;
- uint8_t spr_bytes_per_pixel;
- uint8_t cur_bytes_per_pixel;
- uint32_t pri_horiz_pixels;
- uint32_t spr_horiz_pixels;
- uint32_t cur_horiz_pixels;
uint32_t pipe_htotal;
uint32_t pixel_rate;
+ struct intel_plane_wm_parameters pri;
+ struct intel_plane_wm_parameters spr;
+ struct intel_plane_wm_parameters cur;
};
struct hsw_wm_maximums {
@@ -2161,15 +2176,6 @@ struct hsw_wm_maximums {
uint16_t fbc;
};
-struct hsw_lp_wm_result {
- bool enable;
- bool fbc_enable;
- uint32_t pri_val;
- uint32_t spr_val;
- uint32_t cur_val;
- uint32_t fbc_val;
-};
-
struct hsw_wm_values {
uint32_t wm_pipe[3];
uint32_t wm_lp[3];
@@ -2178,128 +2184,289 @@ struct hsw_wm_values {
bool enable_fbc_wm;
};
-enum hsw_data_buf_partitioning {
- HSW_DATA_BUF_PART_1_2,
- HSW_DATA_BUF_PART_5_6,
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+ bool fbc_wm_enabled;
};
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_pri_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value,
bool is_lp)
{
uint32_t method1, method2;
- /* TODO: for now, assume the primary plane is always enabled. */
- if (!params->active)
+ if (!params->active || !params->pri.enabled)
return 0;
- method1 = hsw_wm_method1(params->pixel_rate,
- params->pri_bytes_per_pixel,
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->pri.bytes_per_pixel,
mem_value);
if (!is_lp)
return method1;
- method2 = hsw_wm_method2(params->pixel_rate,
+ method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->pri_horiz_pixels,
- params->pri_bytes_per_pixel,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_spr_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
uint32_t method1, method2;
- if (!params->active || !params->sprite_enabled)
+ if (!params->active || !params->spr.enabled)
return 0;
- method1 = hsw_wm_method1(params->pixel_rate,
- params->spr_bytes_per_pixel,
+ method1 = ilk_wm_method1(params->pixel_rate,
+ params->spr.bytes_per_pixel,
mem_value);
- method2 = hsw_wm_method2(params->pixel_rate,
+ method2 = ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->spr_horiz_pixels,
- params->spr_bytes_per_pixel,
+ params->spr.horiz_pixels,
+ params->spr.bytes_per_pixel,
mem_value);
return min(method1, method2);
}
-/* For both WM_PIPE and WM_LP. */
-static uint32_t hsw_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
+/*
+ * For both WM_PIPE and WM_LP.
+ * mem_value must be in 0.1us units.
+ */
+static uint32_t ilk_compute_cur_wm(struct hsw_pipe_wm_parameters *params,
uint32_t mem_value)
{
- if (!params->active)
+ if (!params->active || !params->cur.enabled)
return 0;
- return hsw_wm_method2(params->pixel_rate,
+ return ilk_wm_method2(params->pixel_rate,
params->pipe_htotal,
- params->cur_horiz_pixels,
- params->cur_bytes_per_pixel,
+ params->cur.horiz_pixels,
+ params->cur.bytes_per_pixel,
mem_value);
}
/* Only for WM_LP. */
-static uint32_t hsw_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
- uint32_t pri_val,
- uint32_t mem_value)
+static uint32_t ilk_compute_fbc_wm(struct hsw_pipe_wm_parameters *params,
+ uint32_t pri_val)
{
- if (!params->active)
+ if (!params->active || !params->pri.enabled)
return 0;
- return hsw_wm_fbc(pri_val,
- params->pri_horiz_pixels,
- params->pri_bytes_per_pixel);
+ return ilk_wm_fbc(pri_val,
+ params->pri.horiz_pixels,
+ params->pri.bytes_per_pixel);
}
-static bool hsw_compute_lp_wm(uint32_t mem_value, struct hsw_wm_maximums *max,
- struct hsw_pipe_wm_parameters *params,
- struct hsw_lp_wm_result *result)
+static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
{
- enum pipe pipe;
- uint32_t pri_val[3], spr_val[3], cur_val[3], fbc_val[3];
+ if (INTEL_INFO(dev)->gen >= 7)
+ return 768;
+ else
+ return 512;
+}
- for (pipe = PIPE_A; pipe <= PIPE_C; pipe++) {
- struct hsw_pipe_wm_parameters *p = &params[pipe];
+/* Calculate the maximum primary/sprite plane watermark */
+static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ bool is_sprite)
+{
+ unsigned int fifo_size = ilk_display_fifo_size(dev);
+ unsigned int max;
- pri_val[pipe] = hsw_compute_pri_wm(p, mem_value, true);
- spr_val[pipe] = hsw_compute_spr_wm(p, mem_value);
- cur_val[pipe] = hsw_compute_cur_wm(p, mem_value);
- fbc_val[pipe] = hsw_compute_fbc_wm(p, pri_val[pipe], mem_value);
- }
+ /* if sprites aren't enabled, sprites get nothing */
+ if (is_sprite && !config->sprites_enabled)
+ return 0;
- result->pri_val = max3(pri_val[0], pri_val[1], pri_val[2]);
- result->spr_val = max3(spr_val[0], spr_val[1], spr_val[2]);
- result->cur_val = max3(cur_val[0], cur_val[1], cur_val[2]);
- result->fbc_val = max3(fbc_val[0], fbc_val[1], fbc_val[2]);
+ /* HSW allows LP1+ watermarks even with multiple pipes */
+ if (level == 0 || config->num_pipes_active > 1) {
+ fifo_size /= INTEL_INFO(dev)->num_pipes;
- if (result->fbc_val > max->fbc) {
- result->fbc_enable = false;
- result->fbc_val = 0;
- } else {
- result->fbc_enable = true;
+ /*
+ * For some reason the non self refresh
+ * FIFO size is only half of the self
+ * refresh FIFO size on ILK/SNB.
+ */
+ if (INTEL_INFO(dev)->gen <= 6)
+ fifo_size /= 2;
+ }
+
+ if (config->sprites_enabled) {
+ /* level 0 is always calculated with 1:1 split */
+ if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
+ if (is_sprite)
+ fifo_size *= 5;
+ fifo_size /= 6;
+ } else {
+ fifo_size /= 2;
+ }
}
+ /* clamp to max that the registers can hold */
+ if (INTEL_INFO(dev)->gen >= 7)
+ /* IVB/HSW primary/sprite plane watermarks */
+ max = level == 0 ? 127 : 1023;
+ else if (!is_sprite)
+ /* ILK/SNB primary plane watermarks */
+ max = level == 0 ? 127 : 511;
+ else
+ /* ILK/SNB sprite plane watermarks */
+ max = level == 0 ? 63 : 255;
+
+ return min(fifo_size, max);
+}
+
+/* Calculate the maximum cursor plane watermark */
+static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config)
+{
+ /* HSW LP1+ watermarks w/ multiple pipes */
+ if (level > 0 && config->num_pipes_active > 1)
+ return 64;
+
+ /* otherwise just report max that registers can hold */
+ if (INTEL_INFO(dev)->gen >= 7)
+ return level == 0 ? 63 : 255;
+ else
+ return level == 0 ? 31 : 63;
+}
+
+/* Calculate the maximum FBC watermark */
+static unsigned int ilk_fbc_wm_max(void)
+{
+ /* max that registers can hold */
+ return 15;
+}
+
+static void ilk_wm_max(struct drm_device *dev,
+ int level,
+ const struct intel_wm_config *config,
+ enum intel_ddb_partitioning ddb_partitioning,
+ struct hsw_wm_maximums *max)
+{
+ max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
+ max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
+ max->cur = ilk_cursor_wm_max(dev, level, config);
+ max->fbc = ilk_fbc_wm_max();
+}
+
+static bool ilk_check_wm(int level,
+ const struct hsw_wm_maximums *max,
+ struct intel_wm_level *result)
+{
+ bool ret;
+
+ /* already determined to be invalid? */
+ if (!result->enable)
+ return false;
+
result->enable = result->pri_val <= max->pri &&
result->spr_val <= max->spr &&
result->cur_val <= max->cur;
- return result->enable;
+
+ ret = result->enable;
+
+ /*
+ * HACK until we can pre-compute everything,
+ * and thus fail gracefully if LP0 watermarks
+ * are exceeded...
+ */
+ if (level == 0 && !result->enable) {
+ if (result->pri_val > max->pri)
+ DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
+ level, result->pri_val, max->pri);
+ if (result->spr_val > max->spr)
+ DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
+ level, result->spr_val, max->spr);
+ if (result->cur_val > max->cur)
+ DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
+ level, result->cur_val, max->cur);
+
+ result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
+ result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
+ result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
+ result->enable = true;
+ }
+
+ DRM_DEBUG_KMS("WM%d: %sabled\n", level, result->enable ? "en" : "dis");
+
+ return ret;
+}
+
+static void ilk_compute_wm_level(struct drm_i915_private *dev_priv,
+ int level,
+ struct hsw_pipe_wm_parameters *p,
+ struct intel_wm_level *result)
+{
+ uint16_t pri_latency = dev_priv->wm.pri_latency[level];
+ uint16_t spr_latency = dev_priv->wm.spr_latency[level];
+ uint16_t cur_latency = dev_priv->wm.cur_latency[level];
+
+ /* WM1+ latency values stored in 0.5us units */
+ if (level > 0) {
+ pri_latency *= 5;
+ spr_latency *= 5;
+ cur_latency *= 5;
+ }
+
+ result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
+ result->spr_val = ilk_compute_spr_wm(p, spr_latency);
+ result->cur_val = ilk_compute_cur_wm(p, cur_latency);
+ result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
+ result->enable = true;
+}
+
+static bool hsw_compute_lp_wm(struct drm_i915_private *dev_priv,
+ int level, struct hsw_wm_maximums *max,
+ struct hsw_pipe_wm_parameters *params,
+ struct intel_wm_level *result)
+{
+ enum pipe pipe;
+ struct intel_wm_level res[3];
+
+ for (pipe = PIPE_A; pipe <= PIPE_C; pipe++)
+ ilk_compute_wm_level(dev_priv, level, &params[pipe], &res[pipe]);
+
+ result->pri_val = max3(res[0].pri_val, res[1].pri_val, res[2].pri_val);
+ result->spr_val = max3(res[0].spr_val, res[1].spr_val, res[2].spr_val);
+ result->cur_val = max3(res[0].cur_val, res[1].cur_val, res[2].cur_val);
+ result->fbc_val = max3(res[0].fbc_val, res[1].fbc_val, res[2].fbc_val);
+ result->enable = true;
+
+ return ilk_check_wm(level, max, result);
}
static uint32_t hsw_compute_wm_pipe(struct drm_i915_private *dev_priv,
- uint32_t mem_value, enum pipe pipe,
+ enum pipe pipe,
struct hsw_pipe_wm_parameters *params)
{
uint32_t pri_val, cur_val, spr_val;
+ /* WM0 latency values stored in 0.1us units */
+ uint16_t pri_latency = dev_priv->wm.pri_latency[0];
+ uint16_t spr_latency = dev_priv->wm.spr_latency[0];
+ uint16_t cur_latency = dev_priv->wm.cur_latency[0];
- pri_val = hsw_compute_pri_wm(params, mem_value, false);
- spr_val = hsw_compute_spr_wm(params, mem_value);
- cur_val = hsw_compute_cur_wm(params, mem_value);
+ pri_val = ilk_compute_pri_wm(params, pri_latency, false);
+ spr_val = ilk_compute_spr_wm(params, spr_latency);
+ cur_val = ilk_compute_cur_wm(params, cur_latency);
WARN(pri_val > 127,
"Primary WM error, mode not supported for pipe %c\n",
@@ -2338,27 +2505,116 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
PIPE_WM_LINETIME_TIME(linetime);
}
+static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_HASWELL(dev)) {
+ uint64_t sskpd = I915_READ64(MCH_SSKPD);
+
+ wm[0] = (sskpd >> 56) & 0xFF;
+ if (wm[0] == 0)
+ wm[0] = sskpd & 0xF;
+ wm[1] = (sskpd >> 4) & 0xFF;
+ wm[2] = (sskpd >> 12) & 0xFF;
+ wm[3] = (sskpd >> 20) & 0x1FF;
+ wm[4] = (sskpd >> 32) & 0x1FF;
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ uint32_t sskpd = I915_READ(MCH_SSKPD);
+
+ wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
+ wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
+ wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
+ wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
+ } else if (INTEL_INFO(dev)->gen >= 5) {
+ uint32_t mltr = I915_READ(MLTR_ILK);
+
+ /* ILK primary LP0 latency is 700 ns */
+ wm[0] = 7;
+ wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
+ wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
+ }
+}
+
+static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK sprite LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+}
+
+static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
+{
+ /* ILK cursor LP0 latency is 1300 ns */
+ if (INTEL_INFO(dev)->gen == 5)
+ wm[0] = 13;
+
+ /* WaDoubleCursorLP3Latency:ivb */
+ if (IS_IVYBRIDGE(dev))
+ wm[3] *= 2;
+}
+
+static void intel_print_wm_latency(struct drm_device *dev,
+ const char *name,
+ const uint16_t wm[5])
+{
+ int level, max_level;
+
+ /* how many WM levels are we expecting */
+ if (IS_HASWELL(dev))
+ max_level = 4;
+ else if (INTEL_INFO(dev)->gen >= 6)
+ max_level = 3;
+ else
+ max_level = 2;
+
+ for (level = 0; level <= max_level; level++) {
+ unsigned int latency = wm[level];
+
+ if (latency == 0) {
+ DRM_ERROR("%s WM%d latency not provided\n",
+ name, level);
+ continue;
+ }
+
+ /* WM1+ latency values in 0.5us units */
+ if (level > 0)
+ latency *= 5;
+
+ DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
+ name, level, wm[level],
+ latency / 10, latency % 10);
+ }
+}
+
+static void intel_setup_wm_latency(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
+
+ memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+ memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
+ sizeof(dev_priv->wm.pri_latency));
+
+ intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
+ intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
+
+ intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
+ intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
+ intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
+}
+
static void hsw_compute_wm_parameters(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
- uint32_t *wm,
struct hsw_wm_maximums *lp_max_1_2,
struct hsw_wm_maximums *lp_max_5_6)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct drm_plane *plane;
- uint64_t sskpd = I915_READ64(MCH_SSKPD);
enum pipe pipe;
- int pipes_active = 0, sprites_enabled = 0;
-
- if ((sskpd >> 56) & 0xFF)
- wm[0] = (sskpd >> 56) & 0xFF;
- else
- wm[0] = sskpd & 0xF;
- wm[1] = ((sskpd >> 4) & 0xFF) * 5;
- wm[2] = ((sskpd >> 12) & 0xFF) * 5;
- wm[3] = ((sskpd >> 20) & 0x1FF) * 5;
- wm[4] = ((sskpd >> 32) & 0x1FF) * 5;
+ struct intel_wm_config config = {};
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2371,15 +2627,18 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
if (!p->active)
continue;
- pipes_active++;
+ config.num_pipes_active++;
p->pipe_htotal = intel_crtc->config.adjusted_mode.htotal;
- p->pixel_rate = hsw_wm_get_pixel_rate(dev, crtc);
- p->pri_bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
- p->cur_bytes_per_pixel = 4;
- p->pri_horiz_pixels =
+ p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
+ p->pri.bytes_per_pixel = crtc->fb->bits_per_pixel / 8;
+ p->cur.bytes_per_pixel = 4;
+ p->pri.horiz_pixels =
intel_crtc->config.requested_mode.hdisplay;
- p->cur_horiz_pixels = 64;
+ p->cur.horiz_pixels = 64;
+ /* TODO: for now, assume primary and cursor planes are always enabled. */
+ p->pri.enabled = true;
+ p->cur.enabled = true;
}
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
@@ -2389,59 +2648,53 @@ static void hsw_compute_wm_parameters(struct drm_device *dev,
pipe = intel_plane->pipe;
p = &params[pipe];
- p->sprite_enabled = intel_plane->wm.enable;
- p->spr_bytes_per_pixel = intel_plane->wm.bytes_per_pixel;
- p->spr_horiz_pixels = intel_plane->wm.horiz_pixels;
+ p->spr = intel_plane->wm;
- if (p->sprite_enabled)
- sprites_enabled++;
+ config.sprites_enabled |= p->spr.enabled;
+ config.sprites_scaled |= p->spr.scaled;
}
- if (pipes_active > 1) {
- lp_max_1_2->pri = lp_max_5_6->pri = sprites_enabled ? 128 : 256;
- lp_max_1_2->spr = lp_max_5_6->spr = 128;
- lp_max_1_2->cur = lp_max_5_6->cur = 64;
- } else {
- lp_max_1_2->pri = sprites_enabled ? 384 : 768;
- lp_max_5_6->pri = sprites_enabled ? 128 : 768;
- lp_max_1_2->spr = 384;
- lp_max_5_6->spr = 640;
- lp_max_1_2->cur = lp_max_5_6->cur = 255;
- }
- lp_max_1_2->fbc = lp_max_5_6->fbc = 15;
+ ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_1_2, lp_max_1_2);
+
+ /* 5/6 split only in single pipe config on IVB+ */
+ if (INTEL_INFO(dev)->gen >= 7 && config.num_pipes_active <= 1)
+ ilk_wm_max(dev, 1, &config, INTEL_DDB_PART_5_6, lp_max_5_6);
+ else
+ *lp_max_5_6 = *lp_max_1_2;
}
static void hsw_compute_wm_results(struct drm_device *dev,
struct hsw_pipe_wm_parameters *params,
- uint32_t *wm,
struct hsw_wm_maximums *lp_maximums,
struct hsw_wm_values *results)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
- struct hsw_lp_wm_result lp_results[4] = {};
+ struct intel_wm_level lp_results[4] = {};
enum pipe pipe;
int level, max_level, wm_lp;
for (level = 1; level <= 4; level++)
- if (!hsw_compute_lp_wm(wm[level], lp_maximums, params,
+ if (!hsw_compute_lp_wm(dev_priv, level,
+ lp_maximums, params,
&lp_results[level - 1]))
break;
max_level = level - 1;
+ memset(results, 0, sizeof(*results));
+
/* The spec says it is preferred to disable FBC WMs instead of disabling
* a WM level. */
results->enable_fbc_wm = true;
for (level = 1; level <= max_level; level++) {
- if (!lp_results[level - 1].fbc_enable) {
+ if (lp_results[level - 1].fbc_val > lp_maximums->fbc) {
results->enable_fbc_wm = false;
- break;
+ lp_results[level - 1].fbc_val = 0;
}
}
- memset(results, 0, sizeof(*results));
for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
- const struct hsw_lp_wm_result *r;
+ const struct intel_wm_level *r;
level = (max_level == 4 && wm_lp > 1) ? wm_lp + 1 : wm_lp;
if (level > max_level)
@@ -2456,8 +2709,7 @@ static void hsw_compute_wm_results(struct drm_device *dev,
}
for_each_pipe(pipe)
- results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, wm[0],
- pipe,
+ results->wm_pipe[pipe] = hsw_compute_wm_pipe(dev_priv, pipe,
&params[pipe]);
for_each_pipe(pipe) {
@@ -2468,8 +2720,8 @@ static void hsw_compute_wm_results(struct drm_device *dev,
/* Find the result with the highest level enabled. Check for enable_fbc_wm in
* case both are at the same level. Prefer r1 in case they're the same. */
-struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
- struct hsw_wm_values *r2)
+static struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
+ struct hsw_wm_values *r2)
{
int i, val_r1 = 0, val_r2 = 0;
@@ -2498,11 +2750,11 @@ struct hsw_wm_values *hsw_find_best_result(struct hsw_wm_values *r1,
*/
static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
struct hsw_wm_values *results,
- enum hsw_data_buf_partitioning partitioning)
+ enum intel_ddb_partitioning partitioning)
{
struct hsw_wm_values previous;
uint32_t val;
- enum hsw_data_buf_partitioning prev_partitioning;
+ enum intel_ddb_partitioning prev_partitioning;
bool prev_enable_fbc_wm;
previous.wm_pipe[0] = I915_READ(WM0_PIPEA_ILK);
@@ -2519,7 +2771,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
previous.wm_linetime[2] = I915_READ(PIPE_WM_LINETIME(PIPE_C));
prev_partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
- HSW_DATA_BUF_PART_5_6 : HSW_DATA_BUF_PART_1_2;
+ INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
prev_enable_fbc_wm = !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
@@ -2558,7 +2810,7 @@ static void hsw_write_wm_values(struct drm_i915_private *dev_priv,
if (prev_partitioning != partitioning) {
val = I915_READ(WM_MISC);
- if (partitioning == HSW_DATA_BUF_PART_1_2)
+ if (partitioning == INTEL_DDB_PART_1_2)
val &= ~WM_MISC_DATA_PARTITION_5_6;
else
val |= WM_MISC_DATA_PARTITION_5_6;
@@ -2595,44 +2847,39 @@ static void haswell_update_wm(struct drm_device *dev)
struct hsw_wm_maximums lp_max_1_2, lp_max_5_6;
struct hsw_pipe_wm_parameters params[3];
struct hsw_wm_values results_1_2, results_5_6, *best_results;
- uint32_t wm[5];
- enum hsw_data_buf_partitioning partitioning;
+ enum intel_ddb_partitioning partitioning;
- hsw_compute_wm_parameters(dev, params, wm, &lp_max_1_2, &lp_max_5_6);
+ hsw_compute_wm_parameters(dev, params, &lp_max_1_2, &lp_max_5_6);
- hsw_compute_wm_results(dev, params, wm, &lp_max_1_2, &results_1_2);
+ hsw_compute_wm_results(dev, params,
+ &lp_max_1_2, &results_1_2);
if (lp_max_1_2.pri != lp_max_5_6.pri) {
- hsw_compute_wm_results(dev, params, wm, &lp_max_5_6,
- &results_5_6);
+ hsw_compute_wm_results(dev, params,
+ &lp_max_5_6, &results_5_6);
best_results = hsw_find_best_result(&results_1_2, &results_5_6);
} else {
best_results = &results_1_2;
}
partitioning = (best_results == &results_1_2) ?
- HSW_DATA_BUF_PART_1_2 : HSW_DATA_BUF_PART_5_6;
+ INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
hsw_write_wm_values(dev_priv, best_results, partitioning);
}
-static void haswell_update_sprite_wm(struct drm_device *dev, int pipe,
+static void haswell_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
- struct drm_plane *plane;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
- list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
- struct intel_plane *intel_plane = to_intel_plane(plane);
-
- if (intel_plane->pipe == pipe) {
- intel_plane->wm.enable = enable;
- intel_plane->wm.horiz_pixels = sprite_width + 1;
- intel_plane->wm.bytes_per_pixel = pixel_size;
- break;
- }
- }
+ intel_plane->wm.enabled = enabled;
+ intel_plane->wm.scaled = scaled;
+ intel_plane->wm.horiz_pixels = sprite_width;
+ intel_plane->wm.bytes_per_pixel = pixel_size;
- haswell_update_wm(dev);
+ haswell_update_wm(plane->dev);
}
static bool
@@ -2711,17 +2958,20 @@ sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
return *sprite_wm > 0x3ff ? false : true;
}
-static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+static void sandybridge_update_sprite_wm(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
+ struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int pipe = to_intel_plane(plane)->pipe;
+ int latency = dev_priv->wm.spr_latency[0] * 100; /* In unit 0.1us */
u32 val;
int sprite_wm, reg;
int ret;
- if (!enable)
+ if (!enabled)
return;
switch (pipe) {
@@ -2756,7 +3006,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM1_LATENCY() * 500,
+ dev_priv->wm.spr_latency[1] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %c\n",
@@ -2772,7 +3022,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM2_LATENCY() * 500,
+ dev_priv->wm.spr_latency[2] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %c\n",
@@ -2784,7 +3034,7 @@ static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
pixel_size,
&sandybridge_display_srwm_info,
- SNB_READ_WM3_LATENCY() * 500,
+ dev_priv->wm.spr_latency[3] * 500,
&sprite_wm);
if (!ret) {
DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %c\n",
@@ -2834,15 +3084,16 @@ void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
-void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+void intel_update_sprite_watermarks(struct drm_plane *plane,
+ struct drm_crtc *crtc,
uint32_t sprite_width, int pixel_size,
- bool enable)
+ bool enabled, bool scaled)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = plane->dev->dev_private;
if (dev_priv->display.update_sprite_wm)
- dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
- pixel_size, enable);
+ dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
+ pixel_size, enabled, scaled);
}
static struct drm_i915_gem_object *
@@ -2859,7 +3110,7 @@ intel_alloc_context_page(struct drm_device *dev)
return NULL;
}
- ret = i915_gem_object_pin(ctx, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -3076,19 +3327,12 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
*/
static void vlv_update_rps_cur_delay(struct drm_i915_private *dev_priv)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(10);
u32 pval;
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
- do {
- pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- if (time_after(jiffies, timeout)) {
- DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
- break;
- }
- udelay(10);
- } while (pval & 1);
+ if (wait_for(((pval = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 10))
+ DRM_DEBUG_DRIVER("timed out waiting for Punit\n");
pval >>= 8;
@@ -3129,13 +3373,10 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv->mem_freq, val));
}
-
-static void gen6_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) & ~GEN6_PM_RPS_EVENTS);
/* Complete PM interrupt masking here doesn't race with the rps work
@@ -3143,30 +3384,30 @@ static void gen6_disable_rps(struct drm_device *dev)
* register (PMIMR) to mask PM interrupts. The only risk is in leaving
* stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
- spin_lock_irq(&dev_priv->rps.lock);
+ spin_lock_irq(&dev_priv->irq_lock);
dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ spin_unlock_irq(&dev_priv->irq_lock);
I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
}
-static void valleyview_disable_rps(struct drm_device *dev)
+static void gen6_disable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(GEN6_RC_CONTROL, 0);
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
- I915_WRITE(GEN6_PMIER, 0);
- /* Complete PM interrupt masking here doesn't race with the rps work
- * item again unmasking PM interrupts because that is using a different
- * register (PMIMR) to mask PM interrupts. The only risk is in leaving
- * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
- spin_lock_irq(&dev_priv->rps.lock);
- dev_priv->rps.pm_iir = 0;
- spin_unlock_irq(&dev_priv->rps.lock);
+ gen6_disable_rps_interrupts(dev);
+}
- I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+static void valleyview_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ gen6_disable_rps_interrupts(dev);
if (dev_priv->vlv_pctx) {
drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
@@ -3176,6 +3417,10 @@ static void valleyview_disable_rps(struct drm_device *dev)
int intel_enable_rc6(const struct drm_device *dev)
{
+ /* No RC6 before Ironlake */
+ if (INTEL_INFO(dev)->gen < 5)
+ return 0;
+
/* Respect the kernel parameter if it is set */
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
@@ -3199,6 +3444,19 @@ int intel_enable_rc6(const struct drm_device *dev)
return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
}
+static void gen6_enable_rps_interrupts(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ WARN_ON(dev_priv->rps.pm_iir);
+ snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS);
+ I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
+ spin_unlock_irq(&dev_priv->irq_lock);
+ /* only unmask PM interrupts we need. Mask all others. */
+ I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS);
+}
+
static void gen6_enable_rps(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3250,7 +3508,10 @@ static void gen6_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
- I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ if (INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev))
+ I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
+ else
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
@@ -3327,17 +3588,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) | GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- /* FIXME: Our interrupt enabling sequence is bonghits.
- * dev_priv->rps.pm_iir really should be 0 here. */
- dev_priv->rps.pm_iir = 0;
- I915_WRITE(GEN6_PMIMR, I915_READ(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
- I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* unmask all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
@@ -3356,7 +3607,7 @@ static void gen6_enable_rps(struct drm_device *dev)
gen6_gt_force_wake_put(dev_priv);
}
-static void gen6_update_ring_freq(struct drm_device *dev)
+void gen6_update_ring_freq(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
@@ -3482,7 +3733,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
pcbr_offset,
- -1,
+ I915_GTT_OFFSET_NONE,
pctx_size);
goto out;
}
@@ -3607,14 +3858,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
- /* requires MSI enabled */
- I915_WRITE(GEN6_PMIER, GEN6_PM_RPS_EVENTS);
- spin_lock_irq(&dev_priv->rps.lock);
- WARN_ON(dev_priv->rps.pm_iir != 0);
- I915_WRITE(GEN6_PMIMR, 0);
- spin_unlock_irq(&dev_priv->rps.lock);
- /* enable all PM interrupts */
- I915_WRITE(GEN6_PMINTRMSK, 0);
+ gen6_enable_rps_interrupts(dev);
gen6_gt_force_wake_put(dev_priv);
}
@@ -3708,7 +3952,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
- intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
+ intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
@@ -3731,7 +3975,7 @@ static void ironlake_enable_rc6(struct drm_device *dev)
return;
}
- I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
+ I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
@@ -4429,7 +4673,10 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
- /* Required for FBC */
+ /*
+ * Required for FBC
+ * WaFbcDisableDpfcClockGating:ilk
+ */
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
@@ -4466,6 +4713,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
* The bit 7,8,9 of 0x42020.
*/
if (IS_IRONLAKE_M(dev)) {
+ /* WaFbcAsynchFlipDisableFbcQueue:ilk */
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
ILK_FBCQ_DIS);
@@ -4602,6 +4850,8 @@ static void gen6_init_clock_gating(struct drm_device *dev)
* The bit5 and bit7 of 0x42020
* The bit14 of 0x70180
* The bit14 of 0x71180
+ *
+ * WaFbcAsynchFlipDisableFbcQueue:snb
*/
I915_WRITE(ILK_DISPLAY_CHICKEN1,
I915_READ(ILK_DISPLAY_CHICKEN1) |
@@ -4614,10 +4864,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
- /* WaMbcDriverBootEnable:snb */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
g4x_disable_trickle_feed(dev);
/* The default value should be 0x200 according to docs, but the two
@@ -4713,10 +4959,6 @@ static void haswell_init_clock_gating(struct drm_device *dev)
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
- /* WaMbcDriverBootEnable:hsw */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
/* WaSwitchSolVfFArbitrationPriority:hsw */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -4800,10 +5042,6 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
g4x_disable_trickle_feed(dev);
- /* WaMbcDriverBootEnable:ivb */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
/* WaVSRefCountFullforceMissDisable:ivb */
gen7_setup_fixed_func_scheduler(dev_priv);
@@ -4863,11 +5101,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
- /* WaMbcDriverBootEnable:vlv */
- I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
- GEN6_MBCTL_ENABLE_BOOT_FETCH);
-
-
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
@@ -5035,7 +5268,7 @@ bool intel_display_power_enabled(struct drm_device *dev,
case POWER_DOMAIN_TRANSCODER_B:
case POWER_DOMAIN_TRANSCODER_C:
return I915_READ(HSW_PWR_WELL_DRIVER) ==
- (HSW_PWR_WELL_ENABLE | HSW_PWR_WELL_STATE);
+ (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
default:
BUG();
}
@@ -5048,23 +5281,42 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
uint32_t tmp;
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
- is_enabled = tmp & HSW_PWR_WELL_STATE;
- enable_requested = tmp & HSW_PWR_WELL_ENABLE;
+ is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
+ enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
if (enable) {
if (!enable_requested)
- I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
+ I915_WRITE(HSW_PWR_WELL_DRIVER,
+ HSW_PWR_WELL_ENABLE_REQUEST);
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
- HSW_PWR_WELL_STATE), 20))
+ HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
} else {
if (enable_requested) {
+ unsigned long irqflags;
+ enum pipe p;
+
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
+ POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
+
+ /*
+ * After this, the registers on the pipes that are part
+ * of the power well will become zero, so we have to
+ * adjust our counters according to that.
+ *
+ * FIXME: Should we do this in general in
+ * drm_vblank_post_modeset?
+ */
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ for_each_pipe(p)
+ if (p != PIPE_A)
+ dev->last_vblank[p] = 0;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
}
@@ -5160,10 +5412,21 @@ void intel_init_power_well(struct drm_device *dev)
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
- if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
+ if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
+/* Disables PC8 so we can use the GMBUS and DP AUX interrupts. */
+void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
+{
+ hsw_disable_package_c8(dev_priv);
+}
+
+void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
+{
+ hsw_enable_package_c8(dev_priv);
+}
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_device *dev)
{
@@ -5199,8 +5462,12 @@ void intel_init_pm(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ intel_setup_wm_latency(dev);
+
if (IS_GEN5(dev)) {
- if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
+ if (dev_priv->wm.pri_latency[1] &&
+ dev_priv->wm.spr_latency[1] &&
+ dev_priv->wm.cur_latency[1])
dev_priv->display.update_wm = ironlake_update_wm;
else {
DRM_DEBUG_KMS("Failed to get proper latency. "
@@ -5209,7 +5476,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
} else if (IS_GEN6(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
@@ -5219,7 +5488,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = gen6_init_clock_gating;
} else if (IS_IVYBRIDGE(dev)) {
- if (SNB_READ_WM0_LATENCY()) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
@@ -5229,7 +5500,9 @@ void intel_init_pm(struct drm_device *dev)
}
dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
} else if (IS_HASWELL(dev)) {
- if (I915_READ64(MCH_SSKPD)) {
+ if (dev_priv->wm.pri_latency[0] &&
+ dev_priv->wm.spr_latency[0] &&
+ dev_priv->wm.cur_latency[0]) {
dev_priv->display.update_wm = haswell_update_wm;
dev_priv->display.update_sprite_wm =
haswell_update_sprite_wm;
@@ -5292,256 +5565,6 @@ void intel_init_pm(struct drm_device *dev)
}
}
-static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
-{
- u32 gt_thread_status_mask;
-
- if (IS_HASWELL(dev_priv->dev))
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
- else
- gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
-
- /* w/a for a sporadic read returning 0 by waiting for the GT
- * thread to wake up.
- */
- if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
- DRM_ERROR("GT thread status wait timed out\n");
-}
-
-static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-}
-
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE, 1);
- POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:snb */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-}
-
-static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
-{
- u32 forcewake_ack;
-
- if (IS_HASWELL(dev_priv->dev))
- forcewake_ack = FORCEWAKE_ACK_HSW;
- else
- forcewake_ack = FORCEWAKE_MT_ACK;
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
-
- if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
-
- /* WaRsForcewakeWaitTC0:ivb,hsw */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-/*
- * Generally this is called implicitly by the register read function. However,
- * if some sequence requires the GT to not power down then this function should
- * be called at the beginning of the sequence followed by a call to
- * gen6_gt_force_wake_put() at the end of the sequence.
- */
-void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (dev_priv->forcewake_count++ == 0)
- dev_priv->gt.force_wake_get(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
-{
- u32 gtfifodbg;
- gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
- if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
- "MMIO read or write has been dropped %x\n", gtfifodbg))
- I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
-}
-
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE, 0);
- /* something from same cacheline, but !FORCEWAKE */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* something from same cacheline, but !FORCEWAKE_MT */
- POSTING_READ(ECOBUS);
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-/*
- * see gen6_gt_force_wake_get()
- */
-void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
-{
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
- if (--dev_priv->forcewake_count == 0)
- dev_priv->gt.force_wake_put(dev_priv);
- spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
-}
-
-int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
-{
- int ret = 0;
-
- if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
- int loop = 500;
- u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
- udelay(10);
- fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
- }
- if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
- ++ret;
- dev_priv->gt_fifo_count = fifo;
- }
- dev_priv->gt_fifo_count--;
-
- return ret;
-}
-
-static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
- /* something from same cacheline, but !FORCEWAKE_VLV */
- POSTING_READ(FORCEWAKE_ACK_VLV);
-}
-
-static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
-{
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
-
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
-
- if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
- FORCEWAKE_KERNEL),
- FORCEWAKE_ACK_TIMEOUT_MS))
- DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
-
- /* WaRsForcewakeWaitTC0:vlv */
- __gen6_gt_wait_for_thread_c0(dev_priv);
-}
-
-static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
-{
- I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
- _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
- /* The below doubles as a POSTING_READ */
- gen6_gt_check_fifodbg(dev_priv);
-}
-
-void intel_gt_sanitize(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (IS_VALLEYVIEW(dev)) {
- vlv_force_wake_reset(dev_priv);
- } else if (INTEL_INFO(dev)->gen >= 6) {
- __gen6_gt_force_wake_reset(dev_priv);
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- __gen6_gt_force_wake_mt_reset(dev_priv);
- }
-
- /* BIOS often leaves RC6 enabled, but disable it for hw init */
- if (INTEL_INFO(dev)->gen >= 6)
- intel_disable_gt_powersave(dev);
-}
-
-void intel_gt_init(struct drm_device *dev)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- spin_lock_init(&dev_priv->gt_lock);
-
- if (IS_VALLEYVIEW(dev)) {
- dev_priv->gt.force_wake_get = vlv_force_wake_get;
- dev_priv->gt.force_wake_put = vlv_force_wake_put;
- } else if (IS_HASWELL(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
- } else if (IS_IVYBRIDGE(dev)) {
- u32 ecobus;
-
- /* IVB configs may use multi-threaded forcewake */
-
- /* A small trick here - if the bios hasn't configured
- * MT forcewake, and if the device is in RC6, then
- * force_wake_mt_get will not wake the device and the
- * ECOBUS read will return zero. Which will be
- * (correctly) interpreted by the test below as MT
- * forcewake being disabled.
- */
- mutex_lock(&dev->struct_mutex);
- __gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ_NOTRACE(ECOBUS);
- __gen6_gt_force_wake_mt_put(dev_priv);
- mutex_unlock(&dev->struct_mutex);
-
- if (ecobus & FORCEWAKE_MT_ENABLE) {
- dev_priv->gt.force_wake_get =
- __gen6_gt_force_wake_mt_get;
- dev_priv->gt.force_wake_put =
- __gen6_gt_force_wake_mt_put;
- } else {
- DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
- DRM_INFO("when using vblank-synced partial screen updates.\n");
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- } else if (IS_GEN6(dev)) {
- dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
- dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
- }
- INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
- intel_gen6_powersave_work);
-}
-
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -5644,3 +5667,11 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
+void intel_pm_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
+ intel_gen6_powersave_work);
+}
+
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 664118d8c1d..f05cceac5a5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -440,14 +440,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
* registers with the above sequence (the readback of the HEAD registers
* also enforces ordering), otherwise the hw might lose the new ring
* register values. */
- I915_WRITE_START(ring, obj->gtt_offset);
+ I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_VALID);
/* If the head is still not zero, the ring is dead */
if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
- I915_READ_START(ring) == obj->gtt_offset &&
+ I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
(I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
@@ -501,11 +501,11 @@ init_pipe_control(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret)
goto err_unref;
- pc->gtt_offset = obj->gtt_offset;
+ pc->gtt_offset = i915_gem_obj_ggtt_offset(obj);
pc->cpu_page = kmap(sg_page(obj->pages->sgl));
if (pc->cpu_page == NULL) {
ret = -ENOMEM;
@@ -836,11 +836,8 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
- dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
- }
+ if (ring->irq_refcount++ == 0)
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
@@ -854,11 +851,8 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
- dev_priv->gt_irq_mask |= ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
- }
+ if (--ring->irq_refcount == 0)
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
@@ -873,7 +867,7 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -891,7 +885,7 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE(IMR, dev_priv->irq_mask);
POSTING_READ(IMR);
@@ -910,7 +904,7 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)
return false;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
dev_priv->irq_mask &= ~ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -928,7 +922,7 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
dev_priv->irq_mask |= ring->irq_enable_mask;
I915_WRITE16(IMR, dev_priv->irq_mask);
POSTING_READ16(IMR);
@@ -968,6 +962,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
POSTING_READ(mmio);
+
+ /* Flush the TLB for this page */
+ if (INTEL_INFO(dev)->gen >= 6) {
+ u32 reg = RING_INSTPM(ring->mmio_base);
+ I915_WRITE(reg,
+ _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
+ INSTPM_SYNC_FLUSH));
+ if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
+ 1000))
+ DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
+ ring->name);
+ }
}
static int
@@ -1021,16 +1027,14 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
gen6_gt_force_wake_get(dev_priv);
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (ring->irq_refcount.gt++ == 0) {
+ if (ring->irq_refcount++ == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~(ring->irq_enable_mask |
GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
else
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1045,15 +1049,13 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
unsigned long flags;
spin_lock_irqsave(&dev_priv->irq_lock, flags);
- if (--ring->irq_refcount.gt == 0) {
+ if (--ring->irq_refcount == 0) {
if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
I915_WRITE_IMR(ring,
~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
else
I915_WRITE_IMR(ring, ~0);
- dev_priv->gt_irq_mask |= ring->irq_enable_mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
- POSTING_READ(GTIMR);
+ ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
}
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -1070,14 +1072,12 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return false;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (ring->irq_refcount.pm++ == 0) {
- u32 pm_imr = I915_READ(GEN6_PMIMR);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (ring->irq_refcount++ == 0) {
I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
- I915_WRITE(GEN6_PMIMR, pm_imr & ~ring->irq_enable_mask);
- POSTING_READ(GEN6_PMIMR);
+ snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
return true;
}
@@ -1092,14 +1092,12 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)
if (!dev->irq_enabled)
return;
- spin_lock_irqsave(&dev_priv->rps.lock, flags);
- if (--ring->irq_refcount.pm == 0) {
- u32 pm_imr = I915_READ(GEN6_PMIMR);
+ spin_lock_irqsave(&dev_priv->irq_lock, flags);
+ if (--ring->irq_refcount == 0) {
I915_WRITE_IMR(ring, ~0);
- I915_WRITE(GEN6_PMIMR, pm_imr | ring->irq_enable_mask);
- POSTING_READ(GEN6_PMIMR);
+ snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
}
- spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
}
static int
@@ -1144,7 +1142,7 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
- u32 cs_offset = obj->gtt_offset;
+ u32 cs_offset = i915_gem_obj_ggtt_offset(obj);
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
@@ -1224,12 +1222,12 @@ static int init_status_page(struct intel_ring_buffer *ring)
i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
- ret = i915_gem_object_pin(obj, 4096, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
@@ -1307,7 +1305,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
if (ret)
goto err_unref;
@@ -1316,7 +1314,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
- ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
+ ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@@ -1594,6 +1592,8 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
+ if (HAS_VEBOX(ring->dev))
+ I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
}
ring->set_seqno(ring, seqno);
@@ -1828,7 +1828,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
return -ENOMEM;
}
- ret = i915_gem_object_pin(obj, 0, true, false);
+ ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
@@ -2008,8 +2008,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
- ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT |
- PM_VEBOX_CS_ERROR_INTERRUPT;
+ ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
ring->irq_get = hsw_vebox_get_irq;
ring->irq_put = hsw_vebox_put_irq;
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 799f04c9da4..432ad5311ba 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -33,11 +33,12 @@ struct intel_hw_status_page {
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
-#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
-#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
-#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
-
-enum intel_ring_hangcheck_action { wait, active, kick, hung };
+enum intel_ring_hangcheck_action {
+ HANGCHECK_WAIT,
+ HANGCHECK_ACTIVE,
+ HANGCHECK_KICK,
+ HANGCHECK_HUNG,
+};
struct intel_ring_hangcheck {
bool deadlock;
@@ -78,10 +79,7 @@ struct intel_ring_buffer {
*/
u32 last_retired_head;
- struct {
- u32 gt; /* protected by dev_priv->irq_lock */
- u32 pm; /* protected by dev_priv->rps.lock (sucks) */
- } irq_refcount;
+ unsigned irq_refcount; /* protected by dev_priv->irq_lock */
u32 irq_enable_mask; /* bitmask to enable ring interrupt */
u32 trace_irq_seqno;
u32 sync_seqno[I915_NUM_RINGS-1];
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2628d562244..317e058fb3c 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -202,15 +202,14 @@ struct intel_sdvo_connector {
u32 cur_dot_crawl, max_dot_crawl;
};
-static struct intel_sdvo *to_intel_sdvo(struct drm_encoder *encoder)
+static struct intel_sdvo *to_sdvo(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_sdvo, base.base);
+ return container_of(encoder, struct intel_sdvo, base);
}
static struct intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_sdvo, base);
+ return to_sdvo(intel_attached_encoder(connector));
}
static struct intel_sdvo_connector *to_intel_sdvo_connector(struct drm_connector *connector)
@@ -539,7 +538,8 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
&status))
goto log_fail;
- while (status == SDVO_CMD_STATUS_PENDING && --retry) {
+ while ((status == SDVO_CMD_STATUS_PENDING ||
+ status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && --retry) {
if (retry < 10)
msleep(15);
else
@@ -964,30 +964,32 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
- struct dip_infoframe avi_if = {
- .type = DIP_TYPE_AVI,
- .ver = DIP_VERSION_AVI,
- .len = DIP_LEN_AVI,
- };
- uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
- struct intel_crtc *intel_crtc = to_intel_crtc(intel_sdvo->base.base.crtc);
+ uint8_t sdvo_data[HDMI_INFOFRAME_SIZE(AVI)];
+ struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ union hdmi_infoframe frame;
+ int ret;
+ ssize_t len;
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
+ adjusted_mode);
+ if (ret < 0) {
+ DRM_ERROR("couldn't fill AVI infoframe\n");
+ return false;
+ }
if (intel_sdvo->rgb_quant_range_selectable) {
if (intel_crtc->config.limited_color_range)
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_LIMITED;
else
- avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
+ frame.avi.quantization_range =
+ HDMI_QUANTIZATION_RANGE_FULL;
}
- avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
-
- intel_dip_infoframe_csum(&avi_if);
-
- /* sdvo spec says that the ecc is handled by the hw, and it looks like
- * we must not send the ecc field, either. */
- memcpy(sdvo_data, &avi_if, 3);
- sdvo_data[3] = avi_if.checksum;
- memcpy(&sdvo_data[4], &avi_if.body, sizeof(avi_if.body.avi));
+ len = hdmi_infoframe_pack(&frame, sdvo_data, sizeof(sdvo_data));
+ if (len < 0)
+ return false;
return intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_AVI_IF,
SDVO_HBUF_TX_VSYNC,
@@ -1084,7 +1086,7 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_config *pipe_config)
static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
struct drm_display_mode *mode = &pipe_config->requested_mode;
@@ -1154,7 +1156,7 @@ static void intel_sdvo_mode_set(struct intel_encoder *intel_encoder)
struct drm_display_mode *adjusted_mode =
&intel_crtc->config.adjusted_mode;
struct drm_display_mode *mode = &intel_crtc->config.requested_mode;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&intel_encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder);
u32 sdvox;
struct intel_sdvo_in_out_map in_out;
struct intel_sdvo_dtd input_dtd, output_dtd;
@@ -1292,7 +1294,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u16 active_outputs = 0;
u32 tmp;
@@ -1315,7 +1317,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_sdvo_dtd dtd;
int encoder_pixel_multiplier = 0;
u32 flags = 0, sdvox;
@@ -1357,22 +1359,21 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
/* Cross check the port pixel multiplier with the sdvo encoder state. */
- intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT, &val, 1);
- switch (val) {
- case SDVO_CLOCK_RATE_MULT_1X:
- encoder_pixel_multiplier = 1;
- break;
- case SDVO_CLOCK_RATE_MULT_2X:
- encoder_pixel_multiplier = 2;
- break;
- case SDVO_CLOCK_RATE_MULT_4X:
- encoder_pixel_multiplier = 4;
- break;
+ if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_CLOCK_RATE_MULT,
+ &val, 1)) {
+ switch (val) {
+ case SDVO_CLOCK_RATE_MULT_1X:
+ encoder_pixel_multiplier = 1;
+ break;
+ case SDVO_CLOCK_RATE_MULT_2X:
+ encoder_pixel_multiplier = 2;
+ break;
+ case SDVO_CLOCK_RATE_MULT_4X:
+ encoder_pixel_multiplier = 4;
+ break;
+ }
}
- if(HAS_PCH_SPLIT(dev))
- return; /* no pixel multiplier readout support yet */
-
WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
"SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
pipe_config->pixel_multiplier, encoder_pixel_multiplier);
@@ -1381,7 +1382,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
static void intel_disable_sdvo(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
u32 temp;
intel_sdvo_set_active_outputs(intel_sdvo, 0);
@@ -1423,7 +1424,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
u32 temp;
bool input1, input2;
@@ -1584,7 +1585,7 @@ static uint16_t intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo)
static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG,
&intel_sdvo->hotplug_active, 2);
@@ -1697,6 +1698,9 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id, drm_get_connector_name(connector));
+
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
@@ -2188,7 +2192,7 @@ static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs
static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
{
- struct intel_sdvo *intel_sdvo = to_intel_sdvo(encoder);
+ struct intel_sdvo *intel_sdvo = to_sdvo(to_intel_encoder(encoder));
if (intel_sdvo->sdvo_lvds_fixed_mode != NULL)
drm_mode_destroy(encoder->dev,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1fa5612a457..78b621cdd10 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -38,7 +38,8 @@
#include "i915_drv.h"
static void
-vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
+vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -108,14 +109,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
sprctl |= SP_ENABLE;
+ intel_update_sprite_watermarks(dplane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
@@ -133,13 +135,13 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_framebuffer *fb,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
- I915_MODIFY_DISPBASE(SPSURF(pipe, plane), obj->gtt_offset +
+ I915_MODIFY_DISPBASE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
static void
-vlv_disable_plane(struct drm_plane *dplane)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -152,6 +154,8 @@ vlv_disable_plane(struct drm_plane *dplane)
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPSURF(pipe, plane), 0);
POSTING_READ(SPSURF(pipe, plane));
+
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, false, false);
}
static int
@@ -206,7 +210,8 @@ vlv_get_colorkey(struct drm_plane *dplane,
}
static void
-ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -262,14 +267,15 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
/*
* IVB workaround: must disable low power watermarks for at least
* one frame before enabling scaling. LP watermarks can be re-enabled
@@ -308,7 +314,8 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
- I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
+ I915_MODIFY_DISPBASE(SPRSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
/* potentially re-enable LP watermarks */
@@ -317,7 +324,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
}
static void
-ivb_disable_plane(struct drm_plane *plane)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -335,7 +342,7 @@ ivb_disable_plane(struct drm_plane *plane)
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
- intel_update_sprite_watermarks(dev, pipe, 0, 0, false);
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
@@ -397,7 +404,8 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
}
static void
-ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t x, uint32_t y,
@@ -449,14 +457,15 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
dvscntr |= DVS_ENABLE;
+ intel_update_sprite_watermarks(plane, crtc, src_w, pixel_size, true,
+ src_w != crtc_w || src_h != crtc_h);
+
/* Sizes are 0 based */
src_w--;
src_h--;
crtc_w--;
crtc_h--;
- intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size, true);
-
dvsscale = 0;
if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
@@ -478,12 +487,13 @@ ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
- I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
+ I915_MODIFY_DISPBASE(DVSSURF(pipe),
+ i915_gem_obj_ggtt_offset(obj) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
static void
-ilk_disable_plane(struct drm_plane *plane)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,6 +506,8 @@ ilk_disable_plane(struct drm_plane *plane)
/* Flush double buffered register updates */
I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
POSTING_READ(DVSSURF(pipe));
+
+ intel_update_sprite_watermarks(plane, crtc, 0, 0, false, false);
}
static void
@@ -818,11 +830,11 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
intel_enable_primary(crtc);
if (visible)
- intel_plane->update_plane(plane, fb, obj,
+ intel_plane->update_plane(plane, crtc, fb, obj,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
else
- intel_plane->disable_plane(plane);
+ intel_plane->disable_plane(plane, crtc);
if (disable_primary)
intel_disable_primary(crtc);
@@ -855,9 +867,14 @@ intel_disable_plane(struct drm_plane *plane)
struct intel_plane *intel_plane = to_intel_plane(plane);
int ret = 0;
- if (plane->crtc)
- intel_enable_primary(plane->crtc);
- intel_plane->disable_plane(plane);
+ if (!plane->fb)
+ return 0;
+
+ if (WARN_ON(!plane->crtc))
+ return -EINVAL;
+
+ intel_enable_primary(plane->crtc);
+ intel_plane->disable_plane(plane, plane->crtc);
if (!intel_plane->obj)
goto out;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 39debd80d19..f2c6d7909ae 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -823,16 +823,14 @@ static const struct tv_mode tv_modes[] = {
},
};
-static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
+static struct intel_tv *enc_to_tv(struct intel_encoder *encoder)
{
- return container_of(encoder, struct intel_tv, base.base);
+ return container_of(encoder, struct intel_tv, base);
}
static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
{
- return container_of(intel_attached_encoder(connector),
- struct intel_tv,
- base);
+ return enc_to_tv(intel_attached_encoder(connector));
}
static bool
@@ -908,7 +906,7 @@ static bool
intel_tv_compute_config(struct intel_encoder *encoder,
struct intel_crtc_config *pipe_config)
{
- struct intel_tv *intel_tv = enc_to_intel_tv(&encoder->base);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
if (!tv_mode)
@@ -921,15 +919,12 @@ intel_tv_compute_config(struct intel_encoder *encoder,
return true;
}
-static void
-intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void intel_tv_mode_set(struct intel_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
+ struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = encoder->crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+ struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
+ struct intel_tv *intel_tv = enc_to_tv(encoder);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
u32 tv_ctl;
u32 hctl1, hctl2, hctl3;
@@ -1305,6 +1300,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
struct intel_tv *intel_tv = intel_attached_tv(connector);
int type;
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, drm_get_connector_name(connector),
+ force);
+
mode = reported_modes[0];
if (force) {
@@ -1483,10 +1482,6 @@ out:
return ret;
}
-static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
- .mode_set = intel_tv_mode_set,
-};
-
static const struct drm_connector_funcs intel_tv_connector_funcs = {
.dpms = intel_connector_dpms,
.detect = intel_tv_detect,
@@ -1619,6 +1614,7 @@ intel_tv_init(struct drm_device *dev)
DRM_MODE_ENCODER_TVDAC);
intel_encoder->compute_config = intel_tv_compute_config;
+ intel_encoder->mode_set = intel_tv_mode_set;
intel_encoder->enable = intel_enable_tv;
intel_encoder->disable = intel_disable_tv;
intel_encoder->get_hw_state = intel_tv_get_hw_state;
@@ -1640,7 +1636,6 @@ intel_tv_init(struct drm_device *dev)
intel_tv->tv_format = tv_modes[initial_mode].name;
- drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
new file mode 100644
index 00000000000..8f5bc869c02
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -0,0 +1,595 @@
+/*
+ * Copyright © 2013 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include "i915_drv.h"
+#include "intel_drv.h"
+
+#define FORCEWAKE_ACK_TIMEOUT_MS 2
+
+#define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
+#define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
+#define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
+#define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
+#define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
+
+#define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
+
+
+static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
+{
+ u32 gt_thread_status_mask;
+
+ if (IS_HASWELL(dev_priv->dev))
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
+ else
+ gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
+
+ /* w/a for a sporadic read returning 0 by waiting for the GT
+ * thread to wake up.
+ */
+ if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
+ DRM_ERROR("GT thread status wait timed out\n");
+}
+
+static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE, 1);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:snb */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+}
+
+static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ u32 forcewake_ack;
+
+ if (IS_HASWELL(dev_priv->dev))
+ forcewake_ack = FORCEWAKE_ACK_HSW;
+ else
+ forcewake_ack = FORCEWAKE_MT_ACK;
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
+
+ /* WaRsForcewakeWaitTC0:ivb,hsw */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
+{
+ u32 gtfifodbg;
+
+ gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
+ if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
+ "MMIO read or write has been dropped %x\n", gtfifodbg))
+ __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
+}
+
+static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE, 0);
+ /* something from same cacheline, but !FORCEWAKE */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_MT,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* something from same cacheline, but !FORCEWAKE_MT */
+ __raw_posting_read(dev_priv, ECOBUS);
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
+{
+ int ret = 0;
+
+ if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
+ int loop = 500;
+ u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
+ udelay(10);
+ fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+ }
+ if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
+ ++ret;
+ dev_priv->uncore.fifo_count = fifo;
+ }
+ dev_priv->uncore.fifo_count--;
+
+ return ret;
+}
+
+static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(0xffff));
+ /* something from same cacheline, but !FORCEWAKE_VLV */
+ __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
+}
+
+static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
+
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
+
+ if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
+ FORCEWAKE_KERNEL),
+ FORCEWAKE_ACK_TIMEOUT_MS))
+ DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
+
+ /* WaRsForcewakeWaitTC0:vlv */
+ __gen6_gt_wait_for_thread_c0(dev_priv);
+}
+
+static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
+ _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
+ /* The below doubles as a POSTING_READ */
+ gen6_gt_check_fifodbg(dev_priv);
+}
+
+void intel_uncore_early_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
+ } else if (IS_HASWELL(dev)) {
+ dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
+ } else if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ /* IVB configs may use multi-threaded forcewake */
+
+ /* A small trick here - if the bios hasn't configured
+ * MT forcewake, and if the device is in RC6, then
+ * force_wake_mt_get will not wake the device and the
+ * ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT
+ * forcewake being disabled.
+ */
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = __raw_i915_read32(dev_priv, ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ } else {
+ DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
+ DRM_INFO("when using vblank-synced partial screen updates.\n");
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+ } else if (IS_GEN6(dev)) {
+ dev_priv->uncore.funcs.force_wake_get =
+ __gen6_gt_force_wake_get;
+ dev_priv->uncore.funcs.force_wake_put =
+ __gen6_gt_force_wake_put;
+ }
+}
+
+void intel_uncore_sanitize(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_VALLEYVIEW(dev)) {
+ vlv_force_wake_reset(dev_priv);
+ } else if (INTEL_INFO(dev)->gen >= 6) {
+ __gen6_gt_force_wake_reset(dev_priv);
+ if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
+ __gen6_gt_force_wake_mt_reset(dev_priv);
+ }
+
+ /* BIOS often leaves RC6 enabled, but disable it for hw init */
+ intel_disable_gt_powersave(dev);
+}
+
+/*
+ * Generally this is called implicitly by the register read function. However,
+ * if some sequence requires the GT to not power down then this function should
+ * be called at the beginning of the sequence followed by a call to
+ * gen6_gt_force_wake_put() at the end of the sequence.
+ */
+void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (dev_priv->uncore.forcewake_count++ == 0)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/*
+ * see gen6_gt_force_wake_get()
+ */
+void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ if (--dev_priv->uncore.forcewake_count == 0)
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+}
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+ ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE))
+
+static void
+ilk_dummy_write(struct drm_i915_private *dev_priv)
+{
+ /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
+ * the chip from rc6 before touching it for real. MI_MODE is masked,
+ * hence harmless to write 0 into. */
+ __raw_i915_write32(dev_priv, MI_MODE, 0);
+}
+
+static void
+hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unknown unclaimed register before writing to %x\n",
+ reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+static void
+hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
+{
+ if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed write to %x\n", reg);
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
+
+#define __i915_read(x) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace) { \
+ unsigned long irqflags; \
+ u##x val = 0; \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_get(dev_priv); \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ if (dev_priv->uncore.forcewake_count == 0) \
+ dev_priv->uncore.funcs.force_wake_put(dev_priv); \
+ } else { \
+ val = __raw_i915_read##x(dev_priv, reg); \
+ } \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+ trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
+ return val; \
+}
+
+__i915_read(8)
+__i915_read(16)
+__i915_read(32)
+__i915_read(64)
+#undef __i915_read
+
+#define __i915_write(x) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace) { \
+ unsigned long irqflags; \
+ u32 __fifo_ret = 0; \
+ trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
+ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
+ } \
+ if (dev_priv->info->gen == 5) \
+ ilk_dummy_write(dev_priv); \
+ hsw_unclaimed_reg_clear(dev_priv, reg); \
+ __raw_i915_write##x(dev_priv, reg, val); \
+ if (unlikely(__fifo_ret)) { \
+ gen6_gt_check_fifodbg(dev_priv); \
+ } \
+ hsw_unclaimed_reg_check(dev_priv, reg); \
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
+}
+__i915_write(8)
+__i915_write(16)
+__i915_write(32)
+__i915_write(64)
+#undef __i915_write
+
+static const struct register_whitelist {
+ uint64_t offset;
+ uint32_t size;
+ uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
+} whitelist[] = {
+ { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
+};
+
+int i915_reg_read_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_reg_read *reg = data;
+ struct register_whitelist const *entry = whitelist;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
+ if (entry->offset == reg->offset &&
+ (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(whitelist))
+ return -EINVAL;
+
+ switch (entry->size) {
+ case 8:
+ reg->val = I915_READ64(reg->offset);
+ break;
+ case 4:
+ reg->val = I915_READ(reg->offset);
+ break;
+ case 2:
+ reg->val = I915_READ16(reg->offset);
+ break;
+ case 1:
+ reg->val = I915_READ8(reg->offset);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (IS_I85X(dev))
+ return -ENODEV;
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ if (IS_I830(dev) || IS_845G(dev)) {
+ I915_WRITE(DEBUG_RESET_I830,
+ DEBUG_RESET_DISPLAY |
+ DEBUG_RESET_RENDER |
+ DEBUG_RESET_FULL);
+ POSTING_READ(DEBUG_RESET_I830);
+ msleep(1);
+
+ I915_WRITE(DEBUG_RESET_I830, 0);
+ POSTING_READ(DEBUG_RESET_I830);
+ }
+
+ msleep(1);
+
+ I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+ POSTING_READ(D_STATE);
+
+ return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+ u8 gdrst;
+ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+ return (gdrst & GRDOM_RESET_ENABLE) == 0;
+}
+
+static int i965_do_reset(struct drm_device *dev)
+{
+ int ret;
+
+ /*
+ * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+ * well as the reset bit (GR/bit 0). Setting the GR bit
+ * triggers the reset; when done, the hardware will clear it.
+ */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ pci_write_config_byte(dev->pdev, I965_GDRST,
+ GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+
+ ret = wait_for(i965_reset_complete(dev), 500);
+ if (ret)
+ return ret;
+
+ pci_write_config_byte(dev->pdev, I965_GDRST, 0);
+
+ return 0;
+}
+
+static int ironlake_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 gdrst;
+ int ret;
+
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
+ ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+ if (ret)
+ return ret;
+
+ /* We can't reset render&media without also resetting display ... */
+ gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+ gdrst &= ~GRDOM_MASK;
+ I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
+ gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
+ return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
+}
+
+static int gen6_do_reset(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long irqflags;
+
+ /* Hold uncore.lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->uncore.forcewake_count)
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
+ else
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ return ret;
+}
+
+int intel_gpu_reset(struct drm_device *dev)
+{
+ switch (INTEL_INFO(dev)->gen) {
+ case 7:
+ case 6: return gen6_do_reset(dev);
+ case 5: return ironlake_do_reset(dev);
+ case 4: return i965_do_reset(dev);
+ case 2: return i8xx_do_reset(dev);
+ default: return -ENODEV;
+ }
+}
+
+void intel_uncore_clear_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ /* XXX needs spinlock around caller's grouping */
+ if (HAS_FPGA_DBG_UNCLAIMED(dev))
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+}
+
+void intel_uncore_check_errors(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
+ (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
+ DRM_ERROR("Unclaimed register before interrupt\n");
+ __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
+ }
+}
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 17d0a637e4f..6b1a87c8aac 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -50,7 +50,6 @@ static const struct file_operations mga_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = mga_compat_ioctl,
#endif
@@ -59,7 +58,7 @@ static const struct file_operations mga_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 54558a01969..ca4bc54ea21 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -149,7 +149,7 @@ typedef struct drm_mga_private {
unsigned int agp_size;
} drm_mga_private_t;
-extern struct drm_ioctl_desc mga_ioctls[];
+extern const struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 9c145143ad0..37cc2fb4ead 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1083,7 +1083,7 @@ file_priv)
return 0;
}
-struct drm_ioctl_desc mga_ioctls[] = {
+const struct drm_ioctl_desc mga_ioctls[] = {
DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 122b571ccc7..fcce7b2f801 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -81,7 +81,6 @@ static const struct file_operations mgag200_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = mgag200_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -89,7 +88,7 @@ static const struct file_operations mgag200_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET,
.load = mgag200_driver_load,
.unload = mgag200_driver_unload,
.fops = &mgag200_driver_fops,
@@ -104,7 +103,7 @@ static struct drm_driver driver = {
.gem_free_object = mgag200_gem_free_object,
.dumb_create = mgag200_dumb_create,
.dumb_map_offset = mgag200_dumb_mmap_offset,
- .dumb_destroy = mgag200_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
};
static struct pci_driver mgag200_pci_driver = {
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 12e2499d935..baaae19332e 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -264,9 +264,6 @@ int mgag200_gem_init_object(struct drm_gem_object *obj);
int mgag200_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle);
void mgag200_gem_free_object(struct drm_gem_object *obj);
int
mgag200_dumb_mmap_offset(struct drm_file *file,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 9fa5685baee..0f8b861b10b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -310,13 +310,6 @@ int mgag200_dumb_create(struct drm_file *file,
return 0;
}
-int mgag200_dumb_destroy(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int mgag200_gem_init_object(struct drm_gem_object *obj)
{
BUG();
@@ -349,7 +342,7 @@ void mgag200_gem_free_object(struct drm_gem_object *obj)
static inline u64 mgag200_bo_mmap_offset(struct mgag200_bo *bo)
{
- return bo->bo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->bo.vma_node);
}
int
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 251784aa222..503a414cbda 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct mga_device *mdev = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
int i;
if (!crtc->enabled)
@@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
WREG8(DAC_INDEX + MGA1064_INDEX, 0);
+ if (fb && fb->bits_per_pixel == 16) {
+ int inc = (fb->depth == 15) ? 8 : 4;
+ u8 r, b;
+ for (i = 0; i < MGAG200_LUT_SIZE; i += inc) {
+ if (fb->depth == 16) {
+ if (i > (MGAG200_LUT_SIZE >> 1)) {
+ r = b = 0;
+ } else {
+ r = mga_crtc->lut_r[i << 1];
+ b = mga_crtc->lut_b[i << 1];
+ }
+ } else {
+ r = mga_crtc->lut_r[i];
+ b = mga_crtc->lut_b[i];
+ }
+ /* VGA registers */
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, r);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]);
+ WREG8(DAC_INDEX + MGA1064_COL_PAL, b);
+ }
+ return;
+ }
for (i = 0; i < MGAG200_LUT_SIZE; i++) {
/* VGA registers */
WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]);
@@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8);
if (crtc->fb->bits_per_pixel == 24)
- pitch = pitch >> (4 - bppshift);
+ pitch = (pitch * 3) >> (4 - bppshift);
else
pitch = pitch >> (4 - bppshift);
@@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc)
kfree(mga_crtc);
}
+static void mga_crtc_disable(struct drm_crtc *crtc)
+{
+ int ret;
+ DRM_DEBUG_KMS("\n");
+ mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc->fb) {
+ struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb);
+ struct drm_gem_object *obj = mga_fb->obj;
+ struct mgag200_bo *bo = gem_to_mga_bo(obj);
+ ret = mgag200_bo_reserve(bo, false);
+ if (ret)
+ return;
+ mgag200_bo_push_sysram(bo);
+ mgag200_bo_unreserve(bo);
+ }
+ crtc->fb = NULL;
+}
+
/* These provide the minimum set of functions required to handle a CRTC */
static const struct drm_crtc_funcs mga_crtc_funcs = {
.cursor_set = mga_crtc_cursor_set,
@@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = {
};
static const struct drm_crtc_helper_funcs mga_helper_funcs = {
+ .disable = mga_crtc_disable,
.dpms = mga_crtc_dpms,
.mode_fixup = mga_crtc_mode_fixup,
.mode_set = mga_crtc_mode_set,
@@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs);
+ drm_sysfs_connector_add(connector);
+
mga_connector->i2c = mgag200_i2c_create(dev);
if (!mga_connector->i2c)
DRM_ERROR("failed to add ddc bus\n");
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3acb2b044c7..07b192fe15c 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -148,7 +148,9 @@ mgag200_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
static int mgag200_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct mgag200_bo *mgabo = mgag200_bo(bo);
+
+ return drm_vma_node_verify_access(&mgabo->gem.vma_node, filp);
}
static int mgag200_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
@@ -321,8 +323,8 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align,
return ret;
}
- mgabo->gem.driver_private = NULL;
mgabo->bo.bdev = &mdev->ttm.bdev;
+ mgabo->bo.bdev->dev_mapping = dev->dev_mapping;
mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
@@ -353,6 +355,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
bo->pin_count++;
if (gpu_addr)
*gpu_addr = mgag200_bo_gpu_offset(bo);
+ return 0;
}
mgag200_ttm_placement(bo, pl_flag);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
new file mode 100644
index 00000000000..a06c19cc56f
--- /dev/null
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -0,0 +1,34 @@
+
+config DRM_MSM
+ tristate "MSM DRM"
+ depends on DRM
+ depends on ARCH_MSM
+ depends on ARCH_MSM8960
+ select DRM_KMS_HELPER
+ select SHMEM
+ select TMPFS
+ default y
+ help
+ DRM/KMS driver for MSM/snapdragon.
+
+config DRM_MSM_FBDEV
+ bool "Enable legacy fbdev support for MSM modesetting driver"
+ depends on DRM_MSM
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default y
+ help
+ Choose this option if you have a need for the legacy fbdev
+ support. Note that this support also provide the linux console
+ support on top of the MSM modesetting driver.
+
+config DRM_MSM_REGISTER_LOGGING
+ bool "MSM DRM register logging"
+ depends on DRM_MSM
+ default n
+ help
+ Compile in support for logging register reads/writes in a format
+ that can be parsed by envytools demsm tool. If enabled, register
+ logging can be switched on via msm.reglog=y module param.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
new file mode 100644
index 00000000000..e17914889e5
--- /dev/null
+++ b/drivers/gpu/drm/msm/Makefile
@@ -0,0 +1,30 @@
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
+ ccflags-y += -Werror
+endif
+
+msm-y := \
+ adreno/adreno_gpu.o \
+ adreno/a3xx_gpu.o \
+ hdmi/hdmi.o \
+ hdmi/hdmi_bridge.o \
+ hdmi/hdmi_connector.o \
+ hdmi/hdmi_i2c.o \
+ hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8x60.o \
+ mdp4/mdp4_crtc.o \
+ mdp4/mdp4_dtv_encoder.o \
+ mdp4/mdp4_format.o \
+ mdp4/mdp4_irq.o \
+ mdp4/mdp4_kms.o \
+ mdp4/mdp4_plane.o \
+ msm_drv.o \
+ msm_fb.o \
+ msm_gem.o \
+ msm_gem_submit.o \
+ msm_gpu.o \
+ msm_ringbuffer.o
+
+msm-$(CONFIG_DRM_MSM_FBDEV) += msm_fbdev.o
+
+obj-$(CONFIG_DRM_MSM) += msm.o
diff --git a/drivers/gpu/drm/msm/NOTES b/drivers/gpu/drm/msm/NOTES
new file mode 100644
index 00000000000..e036f6c1db9
--- /dev/null
+++ b/drivers/gpu/drm/msm/NOTES
@@ -0,0 +1,69 @@
+NOTES about msm drm/kms driver:
+
+In the current snapdragon SoC's, we have (at least) 3 different
+display controller blocks at play:
+ + MDP3 - ?? seems to be what is on geeksphone peak device
+ + MDP4 - S3 (APQ8060, touchpad), S4-pro (APQ8064, nexus4 & ifc6410)
+ + MDSS - snapdragon 800
+
+(I don't have a completely clear picture on which display controller
+maps to which part #)
+
+Plus a handful of blocks around them for HDMI/DSI/etc output.
+
+And on gpu side of things:
+ + zero, one, or two 2d cores (z180)
+ + and either a2xx or a3xx 3d core.
+
+But, HDMI/DSI/etc blocks seem like they can be shared across multiple
+display controller blocks. And I for sure don't want to have to deal
+with N different kms devices from xf86-video-freedreno. Plus, it
+seems like we can do some clever tricks like use GPU to trigger
+pageflip after rendering completes (ie. have the kms/crtc code build
+up gpu cmdstream to update scanout and write FLUSH register after).
+
+So, the approach is one drm driver, with some modularity. Different
+'struct msm_kms' implementations, depending on display controller.
+And one or more 'struct msm_gpu' for the various different gpu sub-
+modules.
+
+(Second part is not implemented yet. So far this is just basic KMS
+driver, and not exposing any custom ioctls to userspace for now.)
+
+The kms module provides the plane, crtc, and encoder objects, and
+loads whatever connectors are appropriate.
+
+For MDP4, the mapping is:
+
+ plane -> PIPE{RGBn,VGn} \
+ crtc -> OVLP{n} + DMA{P,S,E} (??) |-> MDP "device"
+ encoder -> DTV/LCDC/DSI (within MDP4) /
+ connector -> HDMI/DSI/etc --> other device(s)
+
+Since the irq's that drm core mostly cares about are vblank/framedone,
+we'll let msm_mdp4_kms provide the irq install/uninstall/etc functions
+and treat the MDP4 block's irq as "the" irq. Even though the connectors
+may have their own irqs which they install themselves. For this reason
+the display controller is the "master" device.
+
+Each connector probably ends up being a separate device, just for the
+logistics of finding/mapping io region, irq, etc. Idealy we would
+have a better way than just stashing the platform device in a global
+(ie. like DT super-node.. but I don't have any snapdragon hw yet that
+is using DT).
+
+Note that so far I've not been able to get any docs on the hw, and it
+seems that access to such docs would prevent me from working on the
+freedreno gallium driver. So there may be some mistakes in register
+names (I had to invent a few, since no sufficient hint was given in
+the downstream android fbdev driver), bitfield sizes, etc. My current
+state of understanding the registers is given in the envytools rnndb
+files at:
+
+ https://github.com/freedreno/envytools/tree/master/rnndb
+ (the mdp4/hdmi/dsi directories)
+
+These files are used both for a parser tool (in the same tree) to
+parse logged register reads/writes (both from downstream android fbdev
+driver, and this driver with register logging enabled), as well as to
+generate the register level headers.
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
new file mode 100644
index 00000000000..35463864b95
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -0,0 +1,1438 @@
+#ifndef A2XX_XML
+#define A2XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a2xx_rb_dither_type {
+ DITHER_PIXEL = 0,
+ DITHER_SUBPIXEL = 1,
+};
+
+enum a2xx_colorformatx {
+ COLORX_4_4_4_4 = 0,
+ COLORX_1_5_5_5 = 1,
+ COLORX_5_6_5 = 2,
+ COLORX_8 = 3,
+ COLORX_8_8 = 4,
+ COLORX_8_8_8_8 = 5,
+ COLORX_S8_8_8_8 = 6,
+ COLORX_16_FLOAT = 7,
+ COLORX_16_16_FLOAT = 8,
+ COLORX_16_16_16_16_FLOAT = 9,
+ COLORX_32_FLOAT = 10,
+ COLORX_32_32_FLOAT = 11,
+ COLORX_32_32_32_32_FLOAT = 12,
+ COLORX_2_3_3 = 13,
+ COLORX_8_8_8 = 14,
+};
+
+enum a2xx_sq_surfaceformat {
+ FMT_1_REVERSE = 0,
+ FMT_1 = 1,
+ FMT_8 = 2,
+ FMT_1_5_5_5 = 3,
+ FMT_5_6_5 = 4,
+ FMT_6_5_5 = 5,
+ FMT_8_8_8_8 = 6,
+ FMT_2_10_10_10 = 7,
+ FMT_8_A = 8,
+ FMT_8_B = 9,
+ FMT_8_8 = 10,
+ FMT_Cr_Y1_Cb_Y0 = 11,
+ FMT_Y1_Cr_Y0_Cb = 12,
+ FMT_5_5_5_1 = 13,
+ FMT_8_8_8_8_A = 14,
+ FMT_4_4_4_4 = 15,
+ FMT_10_11_11 = 16,
+ FMT_11_11_10 = 17,
+ FMT_DXT1 = 18,
+ FMT_DXT2_3 = 19,
+ FMT_DXT4_5 = 20,
+ FMT_24_8 = 22,
+ FMT_24_8_FLOAT = 23,
+ FMT_16 = 24,
+ FMT_16_16 = 25,
+ FMT_16_16_16_16 = 26,
+ FMT_16_EXPAND = 27,
+ FMT_16_16_EXPAND = 28,
+ FMT_16_16_16_16_EXPAND = 29,
+ FMT_16_FLOAT = 30,
+ FMT_16_16_FLOAT = 31,
+ FMT_16_16_16_16_FLOAT = 32,
+ FMT_32 = 33,
+ FMT_32_32 = 34,
+ FMT_32_32_32_32 = 35,
+ FMT_32_FLOAT = 36,
+ FMT_32_32_FLOAT = 37,
+ FMT_32_32_32_32_FLOAT = 38,
+ FMT_32_AS_8 = 39,
+ FMT_32_AS_8_8 = 40,
+ FMT_16_MPEG = 41,
+ FMT_16_16_MPEG = 42,
+ FMT_8_INTERLACED = 43,
+ FMT_32_AS_8_INTERLACED = 44,
+ FMT_32_AS_8_8_INTERLACED = 45,
+ FMT_16_INTERLACED = 46,
+ FMT_16_MPEG_INTERLACED = 47,
+ FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_DXN = 49,
+ FMT_8_8_8_8_AS_16_16_16_16 = 50,
+ FMT_DXT1_AS_16_16_16_16 = 51,
+ FMT_DXT2_3_AS_16_16_16_16 = 52,
+ FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_10_10_10_AS_16_16_16_16 = 54,
+ FMT_10_11_11_AS_16_16_16_16 = 55,
+ FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_32_32_32_FLOAT = 57,
+ FMT_DXT3A = 58,
+ FMT_DXT5A = 59,
+ FMT_CTX1 = 60,
+ FMT_DXT3A_AS_1_1_1_1 = 61,
+};
+
+enum a2xx_sq_ps_vtx_mode {
+ POSITION_1_VECTOR = 0,
+ POSITION_2_VECTORS_UNUSED = 1,
+ POSITION_2_VECTORS_SPRITE = 2,
+ POSITION_2_VECTORS_EDGE = 3,
+ POSITION_2_VECTORS_KILL = 4,
+ POSITION_2_VECTORS_SPRITE_KILL = 5,
+ POSITION_2_VECTORS_EDGE_KILL = 6,
+ MULTIPASS = 7,
+};
+
+enum a2xx_sq_sample_cntl {
+ CENTROIDS_ONLY = 0,
+ CENTERS_ONLY = 1,
+ CENTROIDS_AND_CENTERS = 2,
+};
+
+enum a2xx_dx_clip_space {
+ DXCLIP_OPENGL = 0,
+ DXCLIP_DIRECTX = 1,
+};
+
+enum a2xx_pa_su_sc_polymode {
+ POLY_DISABLED = 0,
+ POLY_DUALMODE = 1,
+};
+
+enum a2xx_rb_edram_mode {
+ EDRAM_NOP = 0,
+ COLOR_DEPTH = 4,
+ DEPTH_ONLY = 5,
+ EDRAM_COPY = 6,
+};
+
+enum a2xx_pa_sc_pattern_bit_order {
+ LITTLE = 0,
+ BIG = 1,
+};
+
+enum a2xx_pa_sc_auto_reset_cntl {
+ NEVER = 0,
+ EACH_PRIMITIVE = 1,
+ EACH_PACKET = 2,
+};
+
+enum a2xx_pa_pixcenter {
+ PIXCENTER_D3D = 0,
+ PIXCENTER_OGL = 1,
+};
+
+enum a2xx_pa_roundmode {
+ TRUNCATE = 0,
+ ROUND = 1,
+ ROUNDTOEVEN = 2,
+ ROUNDTOODD = 3,
+};
+
+enum a2xx_pa_quantmode {
+ ONE_SIXTEENTH = 0,
+ ONE_EIGTH = 1,
+ ONE_QUARTER = 2,
+ ONE_HALF = 3,
+ ONE = 4,
+};
+
+enum a2xx_rb_copy_sample_select {
+ SAMPLE_0 = 0,
+ SAMPLE_1 = 1,
+ SAMPLE_2 = 2,
+ SAMPLE_3 = 3,
+ SAMPLE_01 = 4,
+ SAMPLE_23 = 5,
+ SAMPLE_0123 = 6,
+};
+
+enum sq_tex_clamp {
+ SQ_TEX_WRAP = 0,
+ SQ_TEX_MIRROR = 1,
+ SQ_TEX_CLAMP_LAST_TEXEL = 2,
+ SQ_TEX_MIRROR_ONCE_LAST_TEXEL = 3,
+ SQ_TEX_CLAMP_HALF_BORDER = 4,
+ SQ_TEX_MIRROR_ONCE_HALF_BORDER = 5,
+ SQ_TEX_CLAMP_BORDER = 6,
+ SQ_TEX_MIRROR_ONCE_BORDER = 7,
+};
+
+enum sq_tex_swiz {
+ SQ_TEX_X = 0,
+ SQ_TEX_Y = 1,
+ SQ_TEX_Z = 2,
+ SQ_TEX_W = 3,
+ SQ_TEX_ZERO = 4,
+ SQ_TEX_ONE = 5,
+};
+
+enum sq_tex_filter {
+ SQ_TEX_FILTER_POINT = 0,
+ SQ_TEX_FILTER_BILINEAR = 1,
+ SQ_TEX_FILTER_BICUBIC = 2,
+};
+
+#define REG_A2XX_RBBM_PATCH_RELEASE 0x00000001
+
+#define REG_A2XX_RBBM_CNTL 0x0000003b
+
+#define REG_A2XX_RBBM_SOFT_RESET 0x0000003c
+
+#define REG_A2XX_CP_PFP_UCODE_ADDR 0x000000c0
+
+#define REG_A2XX_CP_PFP_UCODE_DATA 0x000000c1
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_SELECT 0x00000395
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_LO 0x00000397
+
+#define REG_A2XX_RBBM_PERFCOUNTER1_HI 0x00000398
+
+#define REG_A2XX_RBBM_DEBUG 0x0000039b
+
+#define REG_A2XX_RBBM_PM_OVERRIDE1 0x0000039c
+
+#define REG_A2XX_RBBM_PM_OVERRIDE2 0x0000039d
+
+#define REG_A2XX_RBBM_DEBUG_OUT 0x000003a0
+
+#define REG_A2XX_RBBM_DEBUG_CNTL 0x000003a1
+
+#define REG_A2XX_RBBM_READ_ERROR 0x000003b3
+
+#define REG_A2XX_RBBM_INT_CNTL 0x000003b4
+
+#define REG_A2XX_RBBM_INT_STATUS 0x000003b5
+
+#define REG_A2XX_RBBM_INT_ACK 0x000003b6
+
+#define REG_A2XX_MASTER_INT_SIGNAL 0x000003b7
+
+#define REG_A2XX_RBBM_PERIPHID1 0x000003f9
+
+#define REG_A2XX_RBBM_PERIPHID2 0x000003fa
+
+#define REG_A2XX_CP_PERFMON_CNTL 0x00000444
+
+#define REG_A2XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A2XX_CP_PERFCOUNTER_LO 0x00000446
+
+#define REG_A2XX_CP_PERFCOUNTER_HI 0x00000447
+
+#define REG_A2XX_CP_ST_BASE 0x0000044d
+
+#define REG_A2XX_CP_ST_BUFSZ 0x0000044e
+
+#define REG_A2XX_CP_IB1_BASE 0x00000458
+
+#define REG_A2XX_CP_IB1_BUFSZ 0x00000459
+
+#define REG_A2XX_CP_IB2_BASE 0x0000045a
+
+#define REG_A2XX_CP_IB2_BUFSZ 0x0000045b
+
+#define REG_A2XX_CP_STAT 0x0000047f
+
+#define REG_A2XX_RBBM_STATUS 0x000005d0
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK 0x0000001f
+#define A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT 0
+static inline uint32_t A2XX_RBBM_STATUS_CMDFIFO_AVAIL(uint32_t val)
+{
+ return ((val) << A2XX_RBBM_STATUS_CMDFIFO_AVAIL__SHIFT) & A2XX_RBBM_STATUS_CMDFIFO_AVAIL__MASK;
+}
+#define A2XX_RBBM_STATUS_TC_BUSY 0x00000020
+#define A2XX_RBBM_STATUS_HIRQ_PENDING 0x00000100
+#define A2XX_RBBM_STATUS_CPRQ_PENDING 0x00000200
+#define A2XX_RBBM_STATUS_CFRQ_PENDING 0x00000400
+#define A2XX_RBBM_STATUS_PFRQ_PENDING 0x00000800
+#define A2XX_RBBM_STATUS_VGT_BUSY_NO_DMA 0x00001000
+#define A2XX_RBBM_STATUS_RBBM_WU_BUSY 0x00004000
+#define A2XX_RBBM_STATUS_CP_NRT_BUSY 0x00010000
+#define A2XX_RBBM_STATUS_MH_BUSY 0x00040000
+#define A2XX_RBBM_STATUS_MH_COHERENCY_BUSY 0x00080000
+#define A2XX_RBBM_STATUS_SX_BUSY 0x00200000
+#define A2XX_RBBM_STATUS_TPC_BUSY 0x00400000
+#define A2XX_RBBM_STATUS_SC_CNTX_BUSY 0x01000000
+#define A2XX_RBBM_STATUS_PA_BUSY 0x02000000
+#define A2XX_RBBM_STATUS_VGT_BUSY 0x04000000
+#define A2XX_RBBM_STATUS_SQ_CNTX17_BUSY 0x08000000
+#define A2XX_RBBM_STATUS_SQ_CNTX0_BUSY 0x10000000
+#define A2XX_RBBM_STATUS_RB_CNTX_BUSY 0x40000000
+#define A2XX_RBBM_STATUS_GUI_ACTIVE 0x80000000
+
+#define REG_A2XX_A220_VSC_BIN_SIZE 0x00000c01
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_WIDTH__SHIFT) & A2XX_A220_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A2XX_A220_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A2XX_A220_VSC_BIN_SIZE_HEIGHT__SHIFT) & A2XX_A220_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_A2XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A2XX_PC_DEBUG_CNTL 0x00000c38
+
+#define REG_A2XX_PC_DEBUG_DATA 0x00000c39
+
+#define REG_A2XX_PA_SC_VIZ_QUERY_STATUS 0x00000c44
+
+#define REG_A2XX_GRAS_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_PA_SU_DEBUG_CNTL 0x00000c80
+
+#define REG_A2XX_GRAS_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_DEBUG_DATA 0x00000c81
+
+#define REG_A2XX_PA_SU_FACE_DATA 0x00000c86
+
+#define REG_A2XX_SQ_GPR_MANAGEMENT 0x00000d00
+
+#define REG_A2XX_SQ_FLOW_CONTROL 0x00000d01
+
+#define REG_A2XX_SQ_INST_STORE_MANAGMENT 0x00000d02
+
+#define REG_A2XX_SQ_DEBUG_MISC 0x00000d05
+
+#define REG_A2XX_SQ_INT_CNTL 0x00000d34
+
+#define REG_A2XX_SQ_INT_STATUS 0x00000d35
+
+#define REG_A2XX_SQ_INT_ACK 0x00000d36
+
+#define REG_A2XX_SQ_DEBUG_INPUT_FSM 0x00000dae
+
+#define REG_A2XX_SQ_DEBUG_CONST_MGR_FSM 0x00000daf
+
+#define REG_A2XX_SQ_DEBUG_TP_FSM 0x00000db0
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_0 0x00000db1
+
+#define REG_A2XX_SQ_DEBUG_FSM_ALU_1 0x00000db2
+
+#define REG_A2XX_SQ_DEBUG_EXP_ALLOC 0x00000db3
+
+#define REG_A2XX_SQ_DEBUG_PTR_BUFF 0x00000db4
+
+#define REG_A2XX_SQ_DEBUG_GPR_VTX 0x00000db5
+
+#define REG_A2XX_SQ_DEBUG_GPR_PIX 0x00000db6
+
+#define REG_A2XX_SQ_DEBUG_TB_STATUS_SEL 0x00000db7
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_0 0x00000db8
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_1 0x00000db9
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATUS_REG 0x00000dba
+
+#define REG_A2XX_SQ_DEBUG_VTX_TB_STATE_MEM 0x00000dbb
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_0 0x00000dbc
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_0 0x00000dbd
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_1 0x00000dbe
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_2 0x00000dbf
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATUS_REG_3 0x00000dc0
+
+#define REG_A2XX_SQ_DEBUG_PIX_TB_STATE_MEM 0x00000dc1
+
+#define REG_A2XX_TC_CNTL_STATUS 0x00000e00
+#define A2XX_TC_CNTL_STATUS_L2_INVALIDATE 0x00000001
+
+#define REG_A2XX_TP0_CHICKEN 0x00000e1e
+
+#define REG_A2XX_RB_BC_CONTROL 0x00000f01
+#define A2XX_RB_BC_CONTROL_ACCUM_LINEAR_MODE_ENABLE 0x00000001
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK 0x00000006
+#define A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT 1
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_DISABLE_EDRAM_CAM 0x00000008
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_FAST_CONTEXT_SWITCH 0x00000010
+#define A2XX_RB_BC_CONTROL_DISABLE_EZ_NULL_ZCMD_DROP 0x00000020
+#define A2XX_RB_BC_CONTROL_DISABLE_LZ_NULL_ZCMD_DROP 0x00000040
+#define A2XX_RB_BC_CONTROL_ENABLE_AZ_THROTTLE 0x00000080
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK 0x00001f00
+#define A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT 8
+static inline uint32_t A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__SHIFT) & A2XX_RB_BC_CONTROL_AZ_THROTTLE_COUNT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_ENABLE_CRC_UPDATE 0x00004000
+#define A2XX_RB_BC_CONTROL_CRC_MODE 0x00008000
+#define A2XX_RB_BC_CONTROL_DISABLE_SAMPLE_COUNTERS 0x00010000
+#define A2XX_RB_BC_CONTROL_DISABLE_ACCUM 0x00020000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK 0x003c0000
+#define A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT 18
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_ALLOC_MASK__MASK;
+}
+#define A2XX_RB_BC_CONTROL_LINEAR_PERFORMANCE_ENABLE 0x00400000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK 0x07800000
+#define A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT 23
+static inline uint32_t A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__SHIFT) & A2XX_RB_BC_CONTROL_ACCUM_DATA_FIFO_LIMIT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK 0x18000000
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT 27
+static inline uint32_t A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT(uint32_t val)
+{
+ return ((val) << A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__SHIFT) & A2XX_RB_BC_CONTROL_MEM_EXPORT_TIMEOUT_SELECT__MASK;
+}
+#define A2XX_RB_BC_CONTROL_MEM_EXPORT_LINEAR_MODE_ENABLE 0x20000000
+#define A2XX_RB_BC_CONTROL_CRC_SYSTEM 0x40000000
+#define A2XX_RB_BC_CONTROL_RESERVED6 0x80000000
+
+#define REG_A2XX_RB_EDRAM_INFO 0x00000f02
+
+#define REG_A2XX_RB_DEBUG_CNTL 0x00000f26
+
+#define REG_A2XX_RB_DEBUG_DATA 0x00000f27
+
+#define REG_A2XX_RB_SURFACE_INFO 0x00002000
+
+#define REG_A2XX_RB_COLOR_INFO 0x00002001
+#define A2XX_RB_COLOR_INFO_FORMAT__MASK 0x0000000f
+#define A2XX_RB_COLOR_INFO_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_COLOR_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_FORMAT__SHIFT) & A2XX_RB_COLOR_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__MASK 0x00000030
+#define A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT 4
+static inline uint32_t A2XX_RB_COLOR_INFO_ROUND_MODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ROUND_MODE__SHIFT) & A2XX_RB_COLOR_INFO_ROUND_MODE__MASK;
+}
+#define A2XX_RB_COLOR_INFO_LINEAR 0x00000040
+#define A2XX_RB_COLOR_INFO_ENDIAN__MASK 0x00000180
+#define A2XX_RB_COLOR_INFO_ENDIAN__SHIFT 7
+static inline uint32_t A2XX_RB_COLOR_INFO_ENDIAN(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_ENDIAN__SHIFT) & A2XX_RB_COLOR_INFO_ENDIAN__MASK;
+}
+#define A2XX_RB_COLOR_INFO_SWAP__MASK 0x00000600
+#define A2XX_RB_COLOR_INFO_SWAP__SHIFT 9
+static inline uint32_t A2XX_RB_COLOR_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLOR_INFO_SWAP__SHIFT) & A2XX_RB_COLOR_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COLOR_INFO_BASE__MASK 0xfffff000
+#define A2XX_RB_COLOR_INFO_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_COLOR_INFO_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_COLOR_INFO_BASE__SHIFT) & A2XX_RB_COLOR_INFO_BASE__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_INFO 0x00002002
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
+#define A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
+static inline uint32_t A2XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A2XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A2XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A2XX_A225_RB_COLOR_INFO3 0x00002005
+
+#define REG_A2XX_COHER_DEST_BASE_0 0x00002006
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_TL 0x0000200e
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_SCREEN_SCISSOR_BR 0x0000200f
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_OFFSET 0x00002080
+#define A2XX_PA_SC_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_X(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_OFFSET_Y(int32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A2XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+#define A2XX_PA_SC_WINDOW_OFFSET_DISABLE 0x80000000
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_TL 0x00002081
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A2XX_PA_SC_WINDOW_SCISSOR_BR 0x00002082
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A2XX_PA_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A2XX_PA_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A2XX_UNKNOWN_2010 0x00002010
+
+#define REG_A2XX_VGT_MAX_VTX_INDX 0x00002100
+
+#define REG_A2XX_VGT_MIN_VTX_INDX 0x00002101
+
+#define REG_A2XX_VGT_INDX_OFFSET 0x00002102
+
+#define REG_A2XX_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x00002103
+
+#define REG_A2XX_RB_COLOR_MASK 0x00002104
+#define A2XX_RB_COLOR_MASK_WRITE_RED 0x00000001
+#define A2XX_RB_COLOR_MASK_WRITE_GREEN 0x00000002
+#define A2XX_RB_COLOR_MASK_WRITE_BLUE 0x00000004
+#define A2XX_RB_COLOR_MASK_WRITE_ALPHA 0x00000008
+
+#define REG_A2XX_RB_BLEND_RED 0x00002105
+
+#define REG_A2XX_RB_BLEND_GREEN 0x00002106
+
+#define REG_A2XX_RB_BLEND_BLUE 0x00002107
+
+#define REG_A2XX_RB_BLEND_ALPHA 0x00002108
+
+#define REG_A2XX_RB_FOG_COLOR 0x00002109
+
+#define REG_A2XX_RB_STENCILREFMASK_BF 0x0000210c
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_STENCILREFMASK 0x0000210d
+#define A2XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A2XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A2XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A2XX_RB_ALPHA_REF 0x0000210e
+
+#define REG_A2XX_PA_CL_VPORT_XSCALE 0x0000210f
+#define A2XX_PA_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XSCALE__SHIFT) & A2XX_PA_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_XOFFSET 0x00002110
+#define A2XX_PA_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_XOFFSET__SHIFT) & A2XX_PA_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YSCALE 0x00002111
+#define A2XX_PA_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YSCALE__SHIFT) & A2XX_PA_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_YOFFSET 0x00002112
+#define A2XX_PA_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_YOFFSET__SHIFT) & A2XX_PA_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZSCALE 0x00002113
+#define A2XX_PA_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZSCALE__SHIFT) & A2XX_PA_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A2XX_PA_CL_VPORT_ZOFFSET 0x00002114
+#define A2XX_PA_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A2XX_PA_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A2XX_PA_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_VPORT_ZOFFSET__SHIFT) & A2XX_PA_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A2XX_SQ_PROGRAM_CNTL 0x00002180
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK 0x000000ff
+#define A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT 0
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK 0x0000ff00
+#define A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT 8
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_REGS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_REGS__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_REGS__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_RESOURCE 0x00010000
+#define A2XX_SQ_PROGRAM_CNTL_PS_RESOURCE 0x00020000
+#define A2XX_SQ_PROGRAM_CNTL_PARAM_GEN 0x00040000
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_PIX 0x00080000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK 0x00f00000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT 20
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_COUNT__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK 0x07000000
+#define A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT 24
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE(enum a2xx_sq_ps_vtx_mode val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_VS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK 0x78000000
+#define A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT 27
+static inline uint32_t A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__SHIFT) & A2XX_SQ_PROGRAM_CNTL_PS_EXPORT_MODE__MASK;
+}
+#define A2XX_SQ_PROGRAM_CNTL_GEN_INDEX_VTX 0x80000000
+
+#define REG_A2XX_SQ_CONTEXT_MISC 0x00002181
+#define A2XX_SQ_CONTEXT_MISC_INST_PRED_OPTIMIZE 0x00000001
+#define A2XX_SQ_CONTEXT_MISC_SC_OUTPUT_SCREEN_XY 0x00000002
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK 0x0000000c
+#define A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT 2
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL(enum a2xx_sq_sample_cntl val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__SHIFT) & A2XX_SQ_CONTEXT_MISC_SC_SAMPLE_CNTL__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK 0x0000ff00
+#define A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT 8
+static inline uint32_t A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS(uint32_t val)
+{
+ return ((val) << A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__SHIFT) & A2XX_SQ_CONTEXT_MISC_PARAM_GEN_POS__MASK;
+}
+#define A2XX_SQ_CONTEXT_MISC_PERFCOUNTER_REF 0x00010000
+#define A2XX_SQ_CONTEXT_MISC_YEILD_OPTIMIZE 0x00020000
+#define A2XX_SQ_CONTEXT_MISC_TX_CACHE_SEL 0x00040000
+
+#define REG_A2XX_SQ_INTERPOLATOR_CNTL 0x00002182
+
+#define REG_A2XX_SQ_WRAPPING_0 0x00002183
+
+#define REG_A2XX_SQ_WRAPPING_1 0x00002184
+
+#define REG_A2XX_SQ_PS_PROGRAM 0x000021f6
+
+#define REG_A2XX_SQ_VS_PROGRAM 0x000021f7
+
+#define REG_A2XX_RB_DEPTHCONTROL 0x00002200
+#define A2XX_RB_DEPTHCONTROL_STENCIL_ENABLE 0x00000001
+#define A2XX_RB_DEPTHCONTROL_Z_ENABLE 0x00000002
+#define A2XX_RB_DEPTHCONTROL_Z_WRITE_ENABLE 0x00000004
+#define A2XX_RB_DEPTHCONTROL_EARLY_Z_ENABLE 0x00000008
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__MASK 0x00000070
+#define A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A2XX_RB_DEPTHCONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_ZFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_ZFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_BACKFACE_ENABLE 0x00000080
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK 0x00000700
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT 8
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK 0x00003800
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT 11
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK 0x0001c000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT 14
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK 0x000e0000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT 17
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK 0x00700000
+#define A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT 20
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFUNC_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK 0x03800000
+#define A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT 23
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILFAIL_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK 0x1c000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT 26
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZPASS_BF__MASK;
+}
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK 0xe0000000
+#define A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT 29
+static inline uint32_t A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__SHIFT) & A2XX_RB_DEPTHCONTROL_STENCILZFAIL_BF__MASK;
+}
+
+#define REG_A2XX_RB_BLEND_CONTROL 0x00002201
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK 0x0000001f
+#define A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT 0
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK 0x000000e0
+#define A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT 5
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK 0x00001f00
+#define A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT 8
+static inline uint32_t A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_COLOR_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK 0x001f0000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT 16
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_SRCBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK 0x00e00000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT 21
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_COMB_FCN__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK 0x1f000000
+#define A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT 24
+static inline uint32_t A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__SHIFT) & A2XX_RB_BLEND_CONTROL_ALPHA_DESTBLEND__MASK;
+}
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE_ENABLE 0x20000000
+#define A2XX_RB_BLEND_CONTROL_BLEND_FORCE 0x40000000
+
+#define REG_A2XX_RB_COLORCONTROL 0x00002202
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK 0x00000007
+#define A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT 0
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_FUNC__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_FUNC__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TEST_ENABLE 0x00000008
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_ENABLE 0x00000010
+#define A2XX_RB_COLORCONTROL_BLEND_DISABLE 0x00000020
+#define A2XX_RB_COLORCONTROL_VOB_ENABLE 0x00000040
+#define A2XX_RB_COLORCONTROL_VS_EXPORTS_FOG 0x00000080
+#define A2XX_RB_COLORCONTROL_ROP_CODE__MASK 0x00000f00
+#define A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A2XX_RB_COLORCONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ROP_CODE__SHIFT) & A2XX_RB_COLORCONTROL_ROP_CODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__MASK 0x00003000
+#define A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_MODE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK 0x0000c000
+#define A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT 14
+static inline uint32_t A2XX_RB_COLORCONTROL_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_DITHER_TYPE__SHIFT) & A2XX_RB_COLORCONTROL_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COLORCONTROL_PIXEL_FOG 0x00010000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK 0x03000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT 24
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET0__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK 0x0c000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT 26
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET1__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK 0x30000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT 28
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET2__MASK;
+}
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK 0xc0000000
+#define A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT 30
+static inline uint32_t A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3(uint32_t val)
+{
+ return ((val) << A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__SHIFT) & A2XX_RB_COLORCONTROL_ALPHA_TO_MASK_OFFSET3__MASK;
+}
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MAX 0x00002203
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MAX_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_PA_CL_CLIP_CNTL 0x00002204
+#define A2XX_PA_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A2XX_PA_CL_CLIP_CNTL_BOUNDARY_EDGE_FLAG_ENA 0x00040000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK 0x00080000
+#define A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT 19
+static inline uint32_t A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF(enum a2xx_dx_clip_space val)
+{
+ return ((val) << A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__SHIFT) & A2XX_PA_CL_CLIP_CNTL_DX_CLIP_SPACE_DEF__MASK;
+}
+#define A2XX_PA_CL_CLIP_CNTL_DIS_CLIP_ERR_DETECT 0x00100000
+#define A2XX_PA_CL_CLIP_CNTL_VTX_KILL_OR 0x00200000
+#define A2XX_PA_CL_CLIP_CNTL_XY_NAN_RETAIN 0x00400000
+#define A2XX_PA_CL_CLIP_CNTL_Z_NAN_RETAIN 0x00800000
+#define A2XX_PA_CL_CLIP_CNTL_W_NAN_RETAIN 0x01000000
+
+#define REG_A2XX_PA_SU_SC_MODE_CNTL 0x00002205
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_FRONT 0x00000001
+#define A2XX_PA_SU_SC_MODE_CNTL_CULL_BACK 0x00000002
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE 0x00000004
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK 0x00000018
+#define A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT 3
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_POLYMODE(enum a2xx_pa_su_sc_polymode val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_POLYMODE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK 0x000000e0
+#define A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_FRONT_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK 0x00000700
+#define A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT 8
+static inline uint32_t A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__SHIFT) & A2XX_PA_SU_SC_MODE_CNTL_BACK_PTYPE__MASK;
+}
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_FRONT_ENABLE 0x00000800
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_BACK_ENABLE 0x00001000
+#define A2XX_PA_SU_SC_MODE_CNTL_POLY_OFFSET_PARA_ENABLE 0x00002000
+#define A2XX_PA_SU_SC_MODE_CNTL_MSAA_ENABLE 0x00008000
+#define A2XX_PA_SU_SC_MODE_CNTL_VTX_WINDOW_OFFSET_ENABLE 0x00010000
+#define A2XX_PA_SU_SC_MODE_CNTL_LINE_STIPPLE_ENABLE 0x00040000
+#define A2XX_PA_SU_SC_MODE_CNTL_PROVOKING_VTX_LAST 0x00080000
+#define A2XX_PA_SU_SC_MODE_CNTL_PERSP_CORR_DIS 0x00100000
+#define A2XX_PA_SU_SC_MODE_CNTL_MULTI_PRIM_IB_ENA 0x00200000
+#define A2XX_PA_SU_SC_MODE_CNTL_QUAD_ORDER_ENABLE 0x00800000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_ALL_TRI 0x02000000
+#define A2XX_PA_SU_SC_MODE_CNTL_WAIT_RB_IDLE_FIRST_TRI_NEW_STATE 0x04000000
+#define A2XX_PA_SU_SC_MODE_CNTL_CLAMPED_FACENESS 0x10000000
+#define A2XX_PA_SU_SC_MODE_CNTL_ZERO_AREA_FACENESS 0x20000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_KILL_ENABLE 0x40000000
+#define A2XX_PA_SU_SC_MODE_CNTL_FACE_WRITE_ENABLE 0x80000000
+
+#define REG_A2XX_PA_CL_VTE_CNTL 0x00002206
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_SCALE_ENA 0x00000001
+#define A2XX_PA_CL_VTE_CNTL_VPORT_X_OFFSET_ENA 0x00000002
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_SCALE_ENA 0x00000004
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Y_OFFSET_ENA 0x00000008
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_SCALE_ENA 0x00000010
+#define A2XX_PA_CL_VTE_CNTL_VPORT_Z_OFFSET_ENA 0x00000020
+#define A2XX_PA_CL_VTE_CNTL_VTX_XY_FMT 0x00000100
+#define A2XX_PA_CL_VTE_CNTL_VTX_Z_FMT 0x00000200
+#define A2XX_PA_CL_VTE_CNTL_VTX_W0_FMT 0x00000400
+#define A2XX_PA_CL_VTE_CNTL_PERFCOUNTER_REF 0x00000800
+
+#define REG_A2XX_VGT_CURRENT_BIN_ID_MIN 0x00002207
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK 0x00000007
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT 0
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_COLUMN__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK 0x00000038
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT 3
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_ROW(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_ROW__MASK;
+}
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK 0x000001c0
+#define A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT 6
+static inline uint32_t A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK(uint32_t val)
+{
+ return ((val) << A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__SHIFT) & A2XX_VGT_CURRENT_BIN_ID_MIN_GUARD_BAND_MASK__MASK;
+}
+
+#define REG_A2XX_RB_MODECONTROL 0x00002208
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__MASK 0x00000007
+#define A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT 0
+static inline uint32_t A2XX_RB_MODECONTROL_EDRAM_MODE(enum a2xx_rb_edram_mode val)
+{
+ return ((val) << A2XX_RB_MODECONTROL_EDRAM_MODE__SHIFT) & A2XX_RB_MODECONTROL_EDRAM_MODE__MASK;
+}
+
+#define REG_A2XX_A220_RB_LRZ_VSC_CONTROL 0x00002209
+
+#define REG_A2XX_RB_SAMPLE_POS 0x0000220a
+
+#define REG_A2XX_CLEAR_COLOR 0x0000220b
+#define A2XX_CLEAR_COLOR_RED__MASK 0x000000ff
+#define A2XX_CLEAR_COLOR_RED__SHIFT 0
+static inline uint32_t A2XX_CLEAR_COLOR_RED(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_RED__SHIFT) & A2XX_CLEAR_COLOR_RED__MASK;
+}
+#define A2XX_CLEAR_COLOR_GREEN__MASK 0x0000ff00
+#define A2XX_CLEAR_COLOR_GREEN__SHIFT 8
+static inline uint32_t A2XX_CLEAR_COLOR_GREEN(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_GREEN__SHIFT) & A2XX_CLEAR_COLOR_GREEN__MASK;
+}
+#define A2XX_CLEAR_COLOR_BLUE__MASK 0x00ff0000
+#define A2XX_CLEAR_COLOR_BLUE__SHIFT 16
+static inline uint32_t A2XX_CLEAR_COLOR_BLUE(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_BLUE__SHIFT) & A2XX_CLEAR_COLOR_BLUE__MASK;
+}
+#define A2XX_CLEAR_COLOR_ALPHA__MASK 0xff000000
+#define A2XX_CLEAR_COLOR_ALPHA__SHIFT 24
+static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
+{
+ return ((val) << A2XX_CLEAR_COLOR_ALPHA__SHIFT) & A2XX_CLEAR_COLOR_ALPHA__MASK;
+}
+
+#define REG_A2XX_A220_GRAS_CONTROL 0x00002210
+
+#define REG_A2XX_PA_SU_POINT_SIZE 0x00002280
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
+}
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
+#define A2XX_PA_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__MASK 0x0000ffff
+#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
+{
+ return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
+}
+
+#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_LINE_PATTERN__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK 0x00ff0000
+#define A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT 16
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_REPEAT_COUNT__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK 0x10000000
+#define A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT 28
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER(enum a2xx_pa_sc_pattern_bit_order val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_PATTERN_BIT_ORDER__MASK;
+}
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK 0x60000000
+#define A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT 29
+static inline uint32_t A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL(enum a2xx_pa_sc_auto_reset_cntl val)
+{
+ return ((val) << A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__SHIFT) & A2XX_PA_SC_LINE_STIPPLE_AUTO_RESET_CNTL__MASK;
+}
+
+#define REG_A2XX_PA_SC_VIZ_QUERY 0x00002293
+
+#define REG_A2XX_VGT_ENHANCE 0x00002294
+
+#define REG_A2XX_PA_SC_LINE_CNTL 0x00002300
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK 0x0000ffff
+#define A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT 0
+static inline uint32_t A2XX_PA_SC_LINE_CNTL_BRES_CNTL(uint32_t val)
+{
+ return ((val) << A2XX_PA_SC_LINE_CNTL_BRES_CNTL__SHIFT) & A2XX_PA_SC_LINE_CNTL_BRES_CNTL__MASK;
+}
+#define A2XX_PA_SC_LINE_CNTL_USE_BRES_CNTL 0x00000100
+#define A2XX_PA_SC_LINE_CNTL_EXPAND_LINE_WIDTH 0x00000200
+#define A2XX_PA_SC_LINE_CNTL_LAST_PIXEL 0x00000400
+
+#define REG_A2XX_PA_SC_AA_CONFIG 0x00002301
+
+#define REG_A2XX_PA_SU_VTX_CNTL 0x00002302
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK 0x00000001
+#define A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT 0
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_PIX_CENTER(enum a2xx_pa_pixcenter val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_PIX_CENTER__SHIFT) & A2XX_PA_SU_VTX_CNTL_PIX_CENTER__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK 0x00000006
+#define A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT 1
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_ROUND_MODE(enum a2xx_pa_roundmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_ROUND_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_ROUND_MODE__MASK;
+}
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK 0x00000380
+#define A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT 7
+static inline uint32_t A2XX_PA_SU_VTX_CNTL_QUANT_MODE(enum a2xx_pa_quantmode val)
+{
+ return ((val) << A2XX_PA_SU_VTX_CNTL_QUANT_MODE__SHIFT) & A2XX_PA_SU_VTX_CNTL_QUANT_MODE__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_CLIP_ADJ 0x00002303
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_VERT_DISC_ADJ 0x00002304
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_VERT_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_VERT_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_VERT_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_CLIP_ADJ 0x00002305
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_CLIP_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_CLIP_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_CLIP_ADJ__MASK;
+}
+
+#define REG_A2XX_PA_CL_GB_HORZ_DISC_ADJ 0x00002306
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK 0xffffffff
+#define A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT 0
+static inline uint32_t A2XX_PA_CL_GB_HORZ_DISC_ADJ(float val)
+{
+ return ((fui(val)) << A2XX_PA_CL_GB_HORZ_DISC_ADJ__SHIFT) & A2XX_PA_CL_GB_HORZ_DISC_ADJ__MASK;
+}
+
+#define REG_A2XX_SQ_VS_CONST 0x00002307
+#define A2XX_SQ_VS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_VS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_VS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_BASE__SHIFT) & A2XX_SQ_VS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_VS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_VS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_VS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_VS_CONST_SIZE__SHIFT) & A2XX_SQ_VS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_PS_CONST 0x00002308
+#define A2XX_SQ_PS_CONST_BASE__MASK 0x000001ff
+#define A2XX_SQ_PS_CONST_BASE__SHIFT 0
+static inline uint32_t A2XX_SQ_PS_CONST_BASE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_BASE__SHIFT) & A2XX_SQ_PS_CONST_BASE__MASK;
+}
+#define A2XX_SQ_PS_CONST_SIZE__MASK 0x001ff000
+#define A2XX_SQ_PS_CONST_SIZE__SHIFT 12
+static inline uint32_t A2XX_SQ_PS_CONST_SIZE(uint32_t val)
+{
+ return ((val) << A2XX_SQ_PS_CONST_SIZE__SHIFT) & A2XX_SQ_PS_CONST_SIZE__MASK;
+}
+
+#define REG_A2XX_SQ_DEBUG_MISC_0 0x00002309
+
+#define REG_A2XX_SQ_DEBUG_MISC_1 0x0000230a
+
+#define REG_A2XX_PA_SC_AA_MASK 0x00002312
+
+#define REG_A2XX_VGT_VERTEX_REUSE_BLOCK_CNTL 0x00002316
+
+#define REG_A2XX_VGT_OUT_DEALLOC_CNTL 0x00002317
+
+#define REG_A2XX_RB_COPY_CONTROL 0x00002318
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK 0x00000007
+#define A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT(enum a2xx_rb_copy_sample_select val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__SHIFT) & A2XX_RB_COPY_CONTROL_COPY_SAMPLE_SELECT__MASK;
+}
+#define A2XX_RB_COPY_CONTROL_DEPTH_CLEAR_ENABLE 0x00000008
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK 0x000000f0
+#define A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_CONTROL_CLEAR_MASK(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_CONTROL_CLEAR_MASK__SHIFT) & A2XX_RB_COPY_CONTROL_CLEAR_MASK__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_BASE 0x00002319
+
+#define REG_A2XX_RB_COPY_DEST_PITCH 0x0000231a
+#define A2XX_RB_COPY_DEST_PITCH__MASK 0xffffffff
+#define A2XX_RB_COPY_DEST_PITCH__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_RB_COPY_DEST_PITCH__SHIFT) & A2XX_RB_COPY_DEST_PITCH__MASK;
+}
+
+#define REG_A2XX_RB_COPY_DEST_INFO 0x0000231b
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK 0x00000007
+#define A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__SHIFT) & A2XX_RB_COPY_DEST_INFO_DEST_ENDIAN__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_LINEAR 0x00000008
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000f0
+#define A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 4
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_FORMAT(enum a2xx_colorformatx val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A2XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_SWAP(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A2XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
+#define A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK 0x00003000
+#define A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT 12
+static inline uint32_t A2XX_RB_COPY_DEST_INFO_DITHER_TYPE(enum a2xx_rb_dither_type val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__SHIFT) & A2XX_RB_COPY_DEST_INFO_DITHER_TYPE__MASK;
+}
+#define A2XX_RB_COPY_DEST_INFO_WRITE_RED 0x00004000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_GREEN 0x00008000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_BLUE 0x00010000
+#define A2XX_RB_COPY_DEST_INFO_WRITE_ALPHA 0x00020000
+
+#define REG_A2XX_RB_COPY_DEST_OFFSET 0x0000231c
+#define A2XX_RB_COPY_DEST_OFFSET_X__MASK 0x00001fff
+#define A2XX_RB_COPY_DEST_OFFSET_X__SHIFT 0
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_X(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_X__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_X__MASK;
+}
+#define A2XX_RB_COPY_DEST_OFFSET_Y__MASK 0x03ffe000
+#define A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT 13
+static inline uint32_t A2XX_RB_COPY_DEST_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A2XX_RB_COPY_DEST_OFFSET_Y__SHIFT) & A2XX_RB_COPY_DEST_OFFSET_Y__MASK;
+}
+
+#define REG_A2XX_RB_DEPTH_CLEAR 0x0000231d
+
+#define REG_A2XX_RB_SAMPLE_COUNT_CTL 0x00002324
+
+#define REG_A2XX_RB_COLOR_DEST_MASK 0x00002326
+
+#define REG_A2XX_A225_GRAS_UCP0X 0x00002340
+
+#define REG_A2XX_A225_GRAS_UCP5W 0x00002357
+
+#define REG_A2XX_A225_GRAS_UCP_ENABLED 0x00002360
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_FRONT_SCALE 0x00002380
+
+#define REG_A2XX_PA_SU_POLY_OFFSET_BACK_OFFSET 0x00002383
+
+#define REG_A2XX_SQ_CONSTANT_0 0x00004000
+
+#define REG_A2XX_SQ_FETCH_0 0x00004800
+
+#define REG_A2XX_SQ_CF_BOOLEANS 0x00004900
+
+#define REG_A2XX_SQ_CF_LOOP 0x00004908
+
+#define REG_A2XX_COHER_SIZE_PM4 0x00000a29
+
+#define REG_A2XX_COHER_BASE_PM4 0x00000a2a
+
+#define REG_A2XX_COHER_STATUS_PM4 0x00000a2b
+
+#define REG_A2XX_SQ_TEX_0 0x00000000
+#define A2XX_SQ_TEX_0_CLAMP_X__MASK 0x00001c00
+#define A2XX_SQ_TEX_0_CLAMP_X__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_X(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_X__SHIFT) & A2XX_SQ_TEX_0_CLAMP_X__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Y__MASK 0x0000e000
+#define A2XX_SQ_TEX_0_CLAMP_Y__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Y(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Y__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Y__MASK;
+}
+#define A2XX_SQ_TEX_0_CLAMP_Z__MASK 0x00070000
+#define A2XX_SQ_TEX_0_CLAMP_Z__SHIFT 16
+static inline uint32_t A2XX_SQ_TEX_0_CLAMP_Z(enum sq_tex_clamp val)
+{
+ return ((val) << A2XX_SQ_TEX_0_CLAMP_Z__SHIFT) & A2XX_SQ_TEX_0_CLAMP_Z__MASK;
+}
+#define A2XX_SQ_TEX_0_PITCH__MASK 0xffc00000
+#define A2XX_SQ_TEX_0_PITCH__SHIFT 22
+static inline uint32_t A2XX_SQ_TEX_0_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A2XX_SQ_TEX_0_PITCH__SHIFT) & A2XX_SQ_TEX_0_PITCH__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_1 0x00000001
+
+#define REG_A2XX_SQ_TEX_2 0x00000002
+#define A2XX_SQ_TEX_2_WIDTH__MASK 0x00001fff
+#define A2XX_SQ_TEX_2_WIDTH__SHIFT 0
+static inline uint32_t A2XX_SQ_TEX_2_WIDTH(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_WIDTH__SHIFT) & A2XX_SQ_TEX_2_WIDTH__MASK;
+}
+#define A2XX_SQ_TEX_2_HEIGHT__MASK 0x03ffe000
+#define A2XX_SQ_TEX_2_HEIGHT__SHIFT 13
+static inline uint32_t A2XX_SQ_TEX_2_HEIGHT(uint32_t val)
+{
+ return ((val) << A2XX_SQ_TEX_2_HEIGHT__SHIFT) & A2XX_SQ_TEX_2_HEIGHT__MASK;
+}
+
+#define REG_A2XX_SQ_TEX_3 0x00000003
+#define A2XX_SQ_TEX_3_SWIZ_X__MASK 0x0000000e
+#define A2XX_SQ_TEX_3_SWIZ_X__SHIFT 1
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_X(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_X__SHIFT) & A2XX_SQ_TEX_3_SWIZ_X__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Y__MASK 0x00000070
+#define A2XX_SQ_TEX_3_SWIZ_Y__SHIFT 4
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Y(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Y__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Y__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_Z__MASK 0x00000380
+#define A2XX_SQ_TEX_3_SWIZ_Z__SHIFT 7
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_Z(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_Z__SHIFT) & A2XX_SQ_TEX_3_SWIZ_Z__MASK;
+}
+#define A2XX_SQ_TEX_3_SWIZ_W__MASK 0x00001c00
+#define A2XX_SQ_TEX_3_SWIZ_W__SHIFT 10
+static inline uint32_t A2XX_SQ_TEX_3_SWIZ_W(enum sq_tex_swiz val)
+{
+ return ((val) << A2XX_SQ_TEX_3_SWIZ_W__SHIFT) & A2XX_SQ_TEX_3_SWIZ_W__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK 0x00180000
+#define A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT 19
+static inline uint32_t A2XX_SQ_TEX_3_XY_MAG_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MAG_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MAG_FILTER__MASK;
+}
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK 0x00600000
+#define A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT 21
+static inline uint32_t A2XX_SQ_TEX_3_XY_MIN_FILTER(enum sq_tex_filter val)
+{
+ return ((val) << A2XX_SQ_TEX_3_XY_MIN_FILTER__SHIFT) & A2XX_SQ_TEX_3_XY_MIN_FILTER__MASK;
+}
+
+
+#endif /* A2XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
new file mode 100644
index 00000000000..d183516067b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -0,0 +1,2193 @@
+#ifndef A3XX_XML
+#define A3XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a3xx_render_mode {
+ RB_RENDERING_PASS = 0,
+ RB_TILING_PASS = 1,
+ RB_RESOLVE_PASS = 2,
+};
+
+enum a3xx_tile_mode {
+ LINEAR = 0,
+ TILE_32X32 = 2,
+};
+
+enum a3xx_threadmode {
+ MULTI = 0,
+ SINGLE = 1,
+};
+
+enum a3xx_instrbuffermode {
+ BUFFER = 1,
+};
+
+enum a3xx_threadsize {
+ TWO_QUADS = 0,
+ FOUR_QUADS = 1,
+};
+
+enum a3xx_state_block_id {
+ HLSQ_BLOCK_ID_TP_TEX = 2,
+ HLSQ_BLOCK_ID_TP_MIPMAP = 3,
+ HLSQ_BLOCK_ID_SP_VS = 4,
+ HLSQ_BLOCK_ID_SP_FS = 6,
+};
+
+enum a3xx_cache_opcode {
+ INVALIDATE = 1,
+};
+
+enum a3xx_vtx_fmt {
+ VFMT_FLOAT_32 = 0,
+ VFMT_FLOAT_32_32 = 1,
+ VFMT_FLOAT_32_32_32 = 2,
+ VFMT_FLOAT_32_32_32_32 = 3,
+ VFMT_FLOAT_16 = 4,
+ VFMT_FLOAT_16_16 = 5,
+ VFMT_FLOAT_16_16_16 = 6,
+ VFMT_FLOAT_16_16_16_16 = 7,
+ VFMT_FIXED_32 = 8,
+ VFMT_FIXED_32_32 = 9,
+ VFMT_FIXED_32_32_32 = 10,
+ VFMT_FIXED_32_32_32_32 = 11,
+ VFMT_SHORT_16 = 16,
+ VFMT_SHORT_16_16 = 17,
+ VFMT_SHORT_16_16_16 = 18,
+ VFMT_SHORT_16_16_16_16 = 19,
+ VFMT_USHORT_16 = 20,
+ VFMT_USHORT_16_16 = 21,
+ VFMT_USHORT_16_16_16 = 22,
+ VFMT_USHORT_16_16_16_16 = 23,
+ VFMT_NORM_SHORT_16 = 24,
+ VFMT_NORM_SHORT_16_16 = 25,
+ VFMT_NORM_SHORT_16_16_16 = 26,
+ VFMT_NORM_SHORT_16_16_16_16 = 27,
+ VFMT_NORM_USHORT_16 = 28,
+ VFMT_NORM_USHORT_16_16 = 29,
+ VFMT_NORM_USHORT_16_16_16 = 30,
+ VFMT_NORM_USHORT_16_16_16_16 = 31,
+ VFMT_UBYTE_8 = 40,
+ VFMT_UBYTE_8_8 = 41,
+ VFMT_UBYTE_8_8_8 = 42,
+ VFMT_UBYTE_8_8_8_8 = 43,
+ VFMT_NORM_UBYTE_8 = 44,
+ VFMT_NORM_UBYTE_8_8 = 45,
+ VFMT_NORM_UBYTE_8_8_8 = 46,
+ VFMT_NORM_UBYTE_8_8_8_8 = 47,
+ VFMT_BYTE_8 = 48,
+ VFMT_BYTE_8_8 = 49,
+ VFMT_BYTE_8_8_8 = 50,
+ VFMT_BYTE_8_8_8_8 = 51,
+ VFMT_NORM_BYTE_8 = 52,
+ VFMT_NORM_BYTE_8_8 = 53,
+ VFMT_NORM_BYTE_8_8_8 = 54,
+ VFMT_NORM_BYTE_8_8_8_8 = 55,
+ VFMT_UINT_10_10_10_2 = 60,
+ VFMT_NORM_UINT_10_10_10_2 = 61,
+ VFMT_INT_10_10_10_2 = 62,
+ VFMT_NORM_INT_10_10_10_2 = 63,
+};
+
+enum a3xx_tex_fmt {
+ TFMT_NORM_USHORT_565 = 4,
+ TFMT_NORM_USHORT_5551 = 6,
+ TFMT_NORM_USHORT_4444 = 7,
+ TFMT_NORM_UINT_X8Z24 = 10,
+ TFMT_NORM_UINT_NV12_UV_TILED = 17,
+ TFMT_NORM_UINT_NV12_Y_TILED = 19,
+ TFMT_NORM_UINT_NV12_UV = 21,
+ TFMT_NORM_UINT_NV12_Y = 23,
+ TFMT_NORM_UINT_I420_Y = 24,
+ TFMT_NORM_UINT_I420_U = 26,
+ TFMT_NORM_UINT_I420_V = 27,
+ TFMT_NORM_UINT_2_10_10_10 = 41,
+ TFMT_NORM_UINT_A8 = 44,
+ TFMT_NORM_UINT_L8_A8 = 47,
+ TFMT_NORM_UINT_8 = 48,
+ TFMT_NORM_UINT_8_8 = 49,
+ TFMT_NORM_UINT_8_8_8 = 50,
+ TFMT_NORM_UINT_8_8_8_8 = 51,
+ TFMT_FLOAT_16 = 64,
+ TFMT_FLOAT_16_16 = 65,
+ TFMT_FLOAT_16_16_16_16 = 67,
+ TFMT_FLOAT_32 = 84,
+ TFMT_FLOAT_32_32 = 85,
+ TFMT_FLOAT_32_32_32_32 = 87,
+};
+
+enum a3xx_tex_fetchsize {
+ TFETCH_DISABLE = 0,
+ TFETCH_1_BYTE = 1,
+ TFETCH_2_BYTE = 2,
+ TFETCH_4_BYTE = 3,
+ TFETCH_8_BYTE = 4,
+ TFETCH_16_BYTE = 5,
+};
+
+enum a3xx_color_fmt {
+ RB_R8G8B8_UNORM = 4,
+ RB_R8G8B8A8_UNORM = 8,
+ RB_Z16_UNORM = 12,
+ RB_A8_UNORM = 20,
+};
+
+enum a3xx_color_swap {
+ WZYX = 0,
+ WXYZ = 1,
+ ZYXW = 2,
+ XYZW = 3,
+};
+
+enum a3xx_msaa_samples {
+ MSAA_ONE = 0,
+ MSAA_TWO = 1,
+ MSAA_FOUR = 2,
+};
+
+enum a3xx_sp_perfcounter_select {
+ SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_FULL_ALU_INSTRUCTIONS = 14,
+ SP0_ICL1_MISSES = 26,
+ SP_ALU_ACTIVE_CYCLES = 29,
+};
+
+enum adreno_rb_copy_control_mode {
+ RB_COPY_RESOLVE = 1,
+ RB_COPY_DEPTH_STENCIL = 5,
+};
+
+enum a3xx_tex_filter {
+ A3XX_TEX_NEAREST = 0,
+ A3XX_TEX_LINEAR = 1,
+};
+
+enum a3xx_tex_clamp {
+ A3XX_TEX_REPEAT = 0,
+ A3XX_TEX_CLAMP_TO_EDGE = 1,
+ A3XX_TEX_MIRROR_REPEAT = 2,
+ A3XX_TEX_CLAMP_NONE = 3,
+};
+
+enum a3xx_tex_swiz {
+ A3XX_TEX_X = 0,
+ A3XX_TEX_Y = 1,
+ A3XX_TEX_Z = 2,
+ A3XX_TEX_W = 3,
+ A3XX_TEX_ZERO = 4,
+ A3XX_TEX_ONE = 5,
+};
+
+enum a3xx_tex_type {
+ A3XX_TEX_1D = 0,
+ A3XX_TEX_2D = 1,
+ A3XX_TEX_CUBE = 2,
+ A3XX_TEX_3D = 3,
+};
+
+#define A3XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A3XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A3XX_INT0_RBBM_REG_TIMEOUT 0x00000004
+#define A3XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A3XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A3XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
+#define A3XX_INT0_VFD_ERROR 0x00000040
+#define A3XX_INT0_CP_SW_INT 0x00000080
+#define A3XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
+#define A3XX_INT0_CP_OPCODE_ERROR 0x00000200
+#define A3XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
+#define A3XX_INT0_CP_HW_FAULT 0x00000800
+#define A3XX_INT0_CP_DMA 0x00001000
+#define A3XX_INT0_CP_IB2_INT 0x00002000
+#define A3XX_INT0_CP_IB1_INT 0x00004000
+#define A3XX_INT0_CP_RB_INT 0x00008000
+#define A3XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
+#define A3XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A3XX_INT0_CP_VS_DONE_TS 0x00040000
+#define A3XX_INT0_CP_PS_DONE_TS 0x00080000
+#define A3XX_INT0_CACHE_FLUSH_TS 0x00100000
+#define A3XX_INT0_CP_AHB_ERROR_HALT 0x00200000
+#define A3XX_INT0_MISC_HANG_DETECT 0x01000000
+#define A3XX_INT0_UCHE_OOB_ACCESS 0x02000000
+#define REG_A3XX_RBBM_HW_VERSION 0x00000000
+
+#define REG_A3XX_RBBM_HW_RELEASE 0x00000001
+
+#define REG_A3XX_RBBM_HW_CONFIGURATION 0x00000002
+
+#define REG_A3XX_RBBM_CLOCK_CTL 0x00000010
+
+#define REG_A3XX_RBBM_SP_HYST_CNT 0x00000012
+
+#define REG_A3XX_RBBM_SW_RESET_CMD 0x00000018
+
+#define REG_A3XX_RBBM_AHB_CTL0 0x00000020
+
+#define REG_A3XX_RBBM_AHB_CTL1 0x00000021
+
+#define REG_A3XX_RBBM_AHB_CMD 0x00000022
+
+#define REG_A3XX_RBBM_AHB_ERROR_STATUS 0x00000027
+
+#define REG_A3XX_RBBM_GPR0_CTL 0x0000002e
+
+#define REG_A3XX_RBBM_STATUS 0x00000030
+#define A3XX_RBBM_STATUS_HI_BUSY 0x00000001
+#define A3XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A3XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A3XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
+#define A3XX_RBBM_STATUS_VBIF_BUSY 0x00008000
+#define A3XX_RBBM_STATUS_TSE_BUSY 0x00010000
+#define A3XX_RBBM_STATUS_RAS_BUSY 0x00020000
+#define A3XX_RBBM_STATUS_RB_BUSY 0x00040000
+#define A3XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A3XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A3XX_RBBM_STATUS_VFD_BUSY 0x00200000
+#define A3XX_RBBM_STATUS_VPC_BUSY 0x00400000
+#define A3XX_RBBM_STATUS_UCHE_BUSY 0x00800000
+#define A3XX_RBBM_STATUS_SP_BUSY 0x01000000
+#define A3XX_RBBM_STATUS_TPL1_BUSY 0x02000000
+#define A3XX_RBBM_STATUS_MARB_BUSY 0x04000000
+#define A3XX_RBBM_STATUS_VSC_BUSY 0x08000000
+#define A3XX_RBBM_STATUS_ARB_BUSY 0x10000000
+#define A3XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A3XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
+#define A3XX_RBBM_STATUS_GPU_BUSY 0x80000000
+
+#define REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x00000033
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x00000050
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x00000051
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x00000054
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x00000057
+
+#define REG_A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x0000005a
+
+#define REG_A3XX_RBBM_INT_CLEAR_CMD 0x00000061
+
+#define REG_A3XX_RBBM_INT_0_MASK 0x00000063
+
+#define REG_A3XX_RBBM_INT_0_STATUS 0x00000064
+
+#define REG_A3XX_RBBM_PERFCTR_CTL 0x00000080
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD0 0x00000081
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_CMD1 0x00000082
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000084
+
+#define REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000085
+
+#define REG_A3XX_RBBM_PERFCOUNTER0_SELECT 0x00000086
+
+#define REG_A3XX_RBBM_PERFCOUNTER1_SELECT 0x00000087
+
+#define REG_A3XX_RBBM_GPU_BUSY_MASKED 0x00000088
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_LO 0x00000090
+
+#define REG_A3XX_RBBM_PERFCTR_CP_0_HI 0x00000091
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_LO 0x00000092
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_0_HI 0x00000093
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_LO 0x00000094
+
+#define REG_A3XX_RBBM_PERFCTR_RBBM_1_HI 0x00000095
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_LO 0x00000096
+
+#define REG_A3XX_RBBM_PERFCTR_PC_0_HI 0x00000097
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_LO 0x00000098
+
+#define REG_A3XX_RBBM_PERFCTR_PC_1_HI 0x00000099
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_LO 0x0000009a
+
+#define REG_A3XX_RBBM_PERFCTR_PC_2_HI 0x0000009b
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_LO 0x0000009c
+
+#define REG_A3XX_RBBM_PERFCTR_PC_3_HI 0x0000009d
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_LO 0x0000009e
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_0_HI 0x0000009f
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_LO 0x000000a0
+
+#define REG_A3XX_RBBM_PERFCTR_VFD_1_HI 0x000000a1
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000a2
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000a3
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000a4
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000a5
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000a6
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000a7
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000a8
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000a9
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000aa
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000ab
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000ac
+
+#define REG_A3XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000ad
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_LO 0x000000ae
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_0_HI 0x000000af
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_LO 0x000000b0
+
+#define REG_A3XX_RBBM_PERFCTR_VPC_1_HI 0x000000b1
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_LO 0x000000b2
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_0_HI 0x000000b3
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_LO 0x000000b4
+
+#define REG_A3XX_RBBM_PERFCTR_TSE_1_HI 0x000000b5
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_LO 0x000000b6
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_0_HI 0x000000b7
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_LO 0x000000b8
+
+#define REG_A3XX_RBBM_PERFCTR_RAS_1_HI 0x000000b9
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_LO 0x000000ba
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_0_HI 0x000000bb
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_LO 0x000000bc
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_1_HI 0x000000bd
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_LO 0x000000be
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_2_HI 0x000000bf
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_LO 0x000000c0
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_3_HI 0x000000c1
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_LO 0x000000c2
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_4_HI 0x000000c3
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_LO 0x000000c4
+
+#define REG_A3XX_RBBM_PERFCTR_UCHE_5_HI 0x000000c5
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_LO 0x000000c6
+
+#define REG_A3XX_RBBM_PERFCTR_TP_0_HI 0x000000c7
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_LO 0x000000c8
+
+#define REG_A3XX_RBBM_PERFCTR_TP_1_HI 0x000000c9
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_LO 0x000000ca
+
+#define REG_A3XX_RBBM_PERFCTR_TP_2_HI 0x000000cb
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_LO 0x000000cc
+
+#define REG_A3XX_RBBM_PERFCTR_TP_3_HI 0x000000cd
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_LO 0x000000ce
+
+#define REG_A3XX_RBBM_PERFCTR_TP_4_HI 0x000000cf
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_LO 0x000000d0
+
+#define REG_A3XX_RBBM_PERFCTR_TP_5_HI 0x000000d1
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_LO 0x000000d2
+
+#define REG_A3XX_RBBM_PERFCTR_SP_0_HI 0x000000d3
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_LO 0x000000d4
+
+#define REG_A3XX_RBBM_PERFCTR_SP_1_HI 0x000000d5
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_LO 0x000000d6
+
+#define REG_A3XX_RBBM_PERFCTR_SP_2_HI 0x000000d7
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_LO 0x000000d8
+
+#define REG_A3XX_RBBM_PERFCTR_SP_3_HI 0x000000d9
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_LO 0x000000da
+
+#define REG_A3XX_RBBM_PERFCTR_SP_4_HI 0x000000db
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_LO 0x000000dc
+
+#define REG_A3XX_RBBM_PERFCTR_SP_5_HI 0x000000dd
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_LO 0x000000de
+
+#define REG_A3XX_RBBM_PERFCTR_SP_6_HI 0x000000df
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_LO 0x000000e0
+
+#define REG_A3XX_RBBM_PERFCTR_SP_7_HI 0x000000e1
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_LO 0x000000e2
+
+#define REG_A3XX_RBBM_PERFCTR_RB_0_HI 0x000000e3
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_LO 0x000000e4
+
+#define REG_A3XX_RBBM_PERFCTR_RB_1_HI 0x000000e5
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_LO 0x000000ea
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_0_HI 0x000000eb
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_LO 0x000000ec
+
+#define REG_A3XX_RBBM_PERFCTR_PWR_1_HI 0x000000ed
+
+#define REG_A3XX_RBBM_RBBM_CTL 0x00000100
+
+#define REG_A3XX_RBBM_DEBUG_BUS_CTL 0x00000111
+
+#define REG_A3XX_RBBM_DEBUG_BUS_DATA_STATUS 0x00000112
+
+#define REG_A3XX_CP_PFP_UCODE_ADDR 0x000001c9
+
+#define REG_A3XX_CP_PFP_UCODE_DATA 0x000001ca
+
+#define REG_A3XX_CP_ROQ_ADDR 0x000001cc
+
+#define REG_A3XX_CP_ROQ_DATA 0x000001cd
+
+#define REG_A3XX_CP_MERCIU_ADDR 0x000001d1
+
+#define REG_A3XX_CP_MERCIU_DATA 0x000001d2
+
+#define REG_A3XX_CP_MERCIU_DATA2 0x000001d3
+
+#define REG_A3XX_CP_MEQ_ADDR 0x000001da
+
+#define REG_A3XX_CP_MEQ_DATA 0x000001db
+
+#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
+
+#define REG_A3XX_CP_HW_FAULT 0x0000045c
+
+#define REG_A3XX_CP_PROTECT_CTRL 0x0000045e
+
+#define REG_A3XX_CP_PROTECT_STATUS 0x0000045f
+
+static inline uint32_t REG_A3XX_CP_PROTECT(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460 + 0x1*i0; }
+
+#define REG_A3XX_CP_AHB_FAULT 0x0000054d
+
+#define REG_A3XX_GRAS_CL_CLIP_CNTL 0x00002040
+#define A3XX_GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 0x00001000
+#define A3XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00010000
+#define A3XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 0x00080000
+#define A3XX_GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 0x00100000
+#define A3XX_GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 0x00200000
+
+#define REG_A3XX_GRAS_CL_GB_CLIP_ADJ 0x00002044
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
+}
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A3XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A3XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XOFFSET 0x00002048
+#define A3XX_GRAS_CL_VPORT_XOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_XOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_XSCALE 0x00002049
+#define A3XX_GRAS_CL_VPORT_XSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_XSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_XSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_XSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_XSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YOFFSET 0x0000204a
+#define A3XX_GRAS_CL_VPORT_YOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_YOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_YSCALE 0x0000204b
+#define A3XX_GRAS_CL_VPORT_YSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_YSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_YSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_YSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_YSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZOFFSET 0x0000204c
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZOFFSET(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZOFFSET__SHIFT) & A3XX_GRAS_CL_VPORT_ZOFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_CL_VPORT_ZSCALE 0x0000204d
+#define A3XX_GRAS_CL_VPORT_ZSCALE__MASK 0xffffffff
+#define A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT 0
+static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
+{
+ return ((fui(val)) << A3XX_GRAS_CL_VPORT_ZSCALE__SHIFT) & A3XX_GRAS_CL_VPORT_ZSCALE__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POINT_MINMAX 0x00002068
+
+#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK 0x00ffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
+{
+ return ((((uint32_t)(val * 40.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((((uint32_t)(val * 44.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
+#define A3XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007fc
+#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 2
+static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
+}
+#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
+
+#define REG_A3XX_GRAS_SC_CONTROL 0x00002072
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x000000f0
+#define A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 4
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000f00
+#define A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 8
+static inline uint32_t A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A3XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
+}
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
+#define A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
+static inline uint32_t A3XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A3XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x00002074
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x00002075
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x00002079
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000207a
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A3XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A3XX_RB_MODE_CONTROL 0x000020c0
+#define A3XX_RB_MODE_CONTROL_GMEM_BYPASS 0x00000080
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK 0x00000700
+#define A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT 8
+static inline uint32_t A3XX_RB_MODE_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
+{
+ return ((val) << A3XX_RB_MODE_CONTROL_RENDER_MODE__SHIFT) & A3XX_RB_MODE_CONTROL_RENDER_MODE__MASK;
+}
+#define A3XX_RB_MODE_CONTROL_MARB_CACHE_SPLIT_MODE 0x00008000
+#define A3XX_RB_MODE_CONTROL_PACKER_TIMER_ENABLE 0x00010000
+
+#define REG_A3XX_RB_RENDER_CONTROL 0x000020c1
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK 0x00000ff0
+#define A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT 4
+static inline uint32_t A3XX_RB_RENDER_CONTROL_BIN_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_RENDER_CONTROL_BIN_WIDTH__SHIFT) & A3XX_RB_RENDER_CONTROL_BIN_WIDTH__MASK;
+}
+#define A3XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00001000
+#define A3XX_RB_RENDER_CONTROL_ENABLE_GMEM 0x00002000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK 0x07000000
+#define A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT 24
+static inline uint32_t A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A3XX_RB_RENDER_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A3XX_RB_MSAA_CONTROL 0x000020c2
+#define A3XX_RB_MSAA_CONTROL_DISABLE 0x00000400
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000f000
+#define A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 12
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLES__MASK;
+}
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK 0xffff0000
+#define A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A3XX_RB_MSAA_CONTROL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__SHIFT) & A3XX_RB_MSAA_CONTROL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_20C3 0x000020c3
+
+static inline uint32_t REG_A3XX_RB_MRT(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020c4 + 0x4*i0; }
+#define A3XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
+#define A3XX_RB_MRT_CONTROL_BLEND 0x00000010
+#define A3XX_RB_MRT_CONTROL_BLEND2 0x00000020
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_CONTROL_ROP_CODE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A3XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK 0x00003000
+#define A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT 12
+static inline uint32_t A3XX_RB_MRT_CONTROL_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_DITHER_MODE__SHIFT) & A3XX_RB_MRT_CONTROL_DITHER_MODE__MASK;
+}
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
+#define A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A3XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020c5 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
+#define A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x000000c0
+#define A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 6
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
+#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
+static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BUF_BASE(uint32_t i0) { return 0x000020c6 + 0x4*i0; }
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK 0xfffffff0
+#define A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__SHIFT) & A3XX_RB_MRT_BUF_BASE_COLOR_BUF_BASE__MASK;
+}
+
+static inline uint32_t REG_A3XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020c7 + 0x4*i0; }
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum adreno_rb_blend_opcode val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A3XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+#define A3XX_RB_MRT_BLEND_CONTROL_CLAMP_ENABLE 0x20000000
+
+#define REG_A3XX_RB_BLEND_RED 0x000020e4
+#define A3XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_RED_UINT__SHIFT) & A3XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A3XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_RED_FLOAT__SHIFT) & A3XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_GREEN 0x000020e5
+#define A3XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_GREEN_UINT__SHIFT) & A3XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A3XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A3XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_BLUE 0x000020e6
+#define A3XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_BLUE_UINT__SHIFT) & A3XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A3XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A3XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A3XX_RB_BLEND_ALPHA 0x000020e7
+#define A3XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A3XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A3XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A3XX_RB_BLEND_ALPHA_UINT__SHIFT) & A3XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A3XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A3XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A3XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A3XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_20E8 0x000020e8
+
+#define REG_A3XX_UNKNOWN_20E9 0x000020e9
+
+#define REG_A3XX_UNKNOWN_20EA 0x000020ea
+
+#define REG_A3XX_UNKNOWN_20EB 0x000020eb
+
+#define REG_A3XX_RB_COPY_CONTROL 0x000020ec
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
+#define A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A3XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
+#define A3XX_RB_COPY_CONTROL_MODE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
+{
+ return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
+}
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xfffffc00
+#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 10
+static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_BASE 0x000020ed
+#define A3XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
+#define A3XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
+static inline uint32_t A3XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A3XX_RB_COPY_DEST_BASE_BASE__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_PITCH 0x000020ee
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
+#define A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A3XX_RB_COPY_DEST_PITCH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_COPY_DEST_INFO 0x000020ef
+#define A3XX_RB_COPY_DEST_INFO_TILE__MASK 0x00000003
+#define A3XX_RB_COPY_DEST_INFO_TILE__SHIFT 0
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_TILE(enum a3xx_tile_mode val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A3XX_RB_COPY_DEST_INFO_TILE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
+#define A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_FORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A3XX_RB_COPY_DEST_INFO_FORMAT__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
+#define A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A3XX_RB_COPY_DEST_INFO_SWAP__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
+#define A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A3XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
+}
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
+#define A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
+static inline uint32_t A3XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
+{
+ return ((val) << A3XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A3XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_CONTROL 0x00002100
+#define A3XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
+#define A3XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
+#define A3XX_RB_DEPTH_CONTROL_EARLY_Z_ENABLE 0x00000008
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
+#define A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
+static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
+}
+#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
+
+#define REG_A3XX_UNKNOWN_2101 0x00002101
+
+#define REG_A3XX_RB_DEPTH_INFO 0x00002102
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001
+#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
+{
+ return ((val) << A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
+}
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff800
+#define A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 11
+static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
+{
+ return ((val >> 10) << A3XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A3XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
+}
+
+#define REG_A3XX_RB_DEPTH_PITCH 0x00002103
+#define A3XX_RB_DEPTH_PITCH__MASK 0xffffffff
+#define A3XX_RB_DEPTH_PITCH__SHIFT 0
+static inline uint32_t A3XX_RB_DEPTH_PITCH(uint32_t val)
+{
+ return ((val >> 3) << A3XX_RB_DEPTH_PITCH__SHIFT) & A3XX_RB_DEPTH_PITCH__MASK;
+}
+
+#define REG_A3XX_RB_STENCIL_CONTROL 0x00002104
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A3XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000004
+#define A3XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A3XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A3XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A3XX_UNKNOWN_2105 0x00002105
+
+#define REG_A3XX_UNKNOWN_2106 0x00002106
+
+#define REG_A3XX_UNKNOWN_2107 0x00002107
+
+#define REG_A3XX_RB_STENCILREFMASK 0x00002108
+#define A3XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_RB_STENCILREFMASK_BF 0x00002109
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A3XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A3XX_PA_SC_WINDOW_OFFSET 0x0000210e
+#define A3XX_PA_SC_WINDOW_OFFSET_X__MASK 0x0000ffff
+#define A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A3XX_PA_SC_WINDOW_OFFSET_X__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_X__MASK;
+}
+#define A3XX_PA_SC_WINDOW_OFFSET_Y__MASK 0xffff0000
+#define A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A3XX_PA_SC_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A3XX_PA_SC_WINDOW_OFFSET_Y__SHIFT) & A3XX_PA_SC_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A3XX_PC_VSTREAM_CONTROL 0x000021e4
+
+#define REG_A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x000021ea
+
+#define REG_A3XX_PC_PRIM_VTX_CNTL 0x000021ec
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK 0x0000001f
+#define A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_STRIDE_IN_VPC__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x000000e0
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 5
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000700
+#define A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT 8
+static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
+
+#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
+
+#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
+#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
+#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000
+#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
+#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
+#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
+#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
+
+#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
+#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
+
+#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
+#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONTROL_3_REG 0x00002203
+
+#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK;
+}
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
+}
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK;
+}
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_0_REG 0x0000220a
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_1_REG 0x0000220b
+
+#define REG_A3XX_HLSQ_CL_NDRANGE_2_REG 0x0000220c
+
+#define REG_A3XX_HLSQ_CL_CONTROL_0_REG 0x00002211
+
+#define REG_A3XX_HLSQ_CL_CONTROL_1_REG 0x00002212
+
+#define REG_A3XX_HLSQ_CL_KERNEL_CONST_REG 0x00002214
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x00002215
+
+#define REG_A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x00002217
+
+#define REG_A3XX_HLSQ_CL_WG_OFFSET_REG 0x0000221a
+
+#define REG_A3XX_VFD_CONTROL_0 0x00002240
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x0003ffff
+#define A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A3XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
+}
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__MASK 0x003c0000
+#define A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT 18
+static inline uint32_t A3XX_VFD_CONTROL_0_PACKETSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_PACKETSIZE__SHIFT) & A3XX_VFD_CONTROL_0_PACKETSIZE__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x07c00000
+#define A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 22
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xf8000000
+#define A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 27
+static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
+}
+
+#define REG_A3XX_VFD_CONTROL_1 0x00002241
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A3XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A3XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
+#define A3XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
+static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A3XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+
+#define REG_A3XX_VFD_INDEX_MIN 0x00002242
+
+#define REG_A3XX_VFD_INDEX_MAX 0x00002243
+
+#define REG_A3XX_VFD_INSTANCEID_OFFSET 0x00002244
+
+#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
+
+static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
+#define A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
+#define A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00020000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK 0x00fc0000
+#define A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT 18
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_INDEXCODE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_INDEXCODE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_INDEXCODE__MASK;
+}
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
+#define A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
+static inline uint32_t A3XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
+{
+ return ((val) << A3XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A3XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
+}
+
+static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x00002247 + 0x2*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x00002266 + 0x1*i0; }
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
+#define A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A3XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A3XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
+#define A3XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
+#define A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
+static inline uint32_t A3XX_VFD_DECODE_INSTR_FORMAT(enum a3xx_vtx_fmt val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A3XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
+#define A3XX_VFD_DECODE_INSTR_REGID__SHIFT 12
+static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
+#define A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
+static inline uint32_t A3XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A3XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
+}
+#define A3XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
+#define A3XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
+
+#define REG_A3XX_VFD_VS_THREADING_THRESHOLD 0x0000227e
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK 0x0000000f
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT 0
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_THRESHOLD__MASK;
+}
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK 0x0000ff00
+#define A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT 8
+static inline uint32_t A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT(uint32_t val)
+{
+ return ((val) << A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__SHIFT) & A3XX_VFD_VS_THREADING_THRESHOLD_REGID_VTXCNT__MASK;
+}
+
+#define REG_A3XX_VPC_ATTR 0x00002280
+#define A3XX_VPC_ATTR_TOTALATTR__MASK 0x00000fff
+#define A3XX_VPC_ATTR_TOTALATTR__SHIFT 0
+static inline uint32_t A3XX_VPC_ATTR_TOTALATTR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_TOTALATTR__SHIFT) & A3XX_VPC_ATTR_TOTALATTR__MASK;
+}
+#define A3XX_VPC_ATTR_THRDASSIGN__MASK 0x0ffff000
+#define A3XX_VPC_ATTR_THRDASSIGN__SHIFT 12
+static inline uint32_t A3XX_VPC_ATTR_THRDASSIGN(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_THRDASSIGN__SHIFT) & A3XX_VPC_ATTR_THRDASSIGN__MASK;
+}
+#define A3XX_VPC_ATTR_LMSIZE__MASK 0xf0000000
+#define A3XX_VPC_ATTR_LMSIZE__SHIFT 28
+static inline uint32_t A3XX_VPC_ATTR_LMSIZE(uint32_t val)
+{
+ return ((val) << A3XX_VPC_ATTR_LMSIZE__SHIFT) & A3XX_VPC_ATTR_LMSIZE__MASK;
+}
+
+#define REG_A3XX_VPC_PACK 0x00002281
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
+#define A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
+static inline uint32_t A3XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A3XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
+}
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
+#define A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
+static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
+{
+ return ((val) << A3XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A3XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00002286 + 0x1*i0; }
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_0 0x0000228a
+
+#define REG_A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x0000228b
+
+#define REG_A3XX_SP_SP_CTRL_REG 0x000022c0
+#define A3XX_SP_SP_CTRL_REG_RESOLVE 0x00010000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK 0x000c0000
+#define A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT 18
+static inline uint32_t A3XX_SP_SP_CTRL_REG_CONSTMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_CONSTMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_CONSTMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK 0x00300000
+#define A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT 20
+static inline uint32_t A3XX_SP_SP_CTRL_REG_SLEEPMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_SLEEPMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_SLEEPMODE__MASK;
+}
+#define A3XX_SP_SP_CTRL_REG_LOMODE__MASK 0x00c00000
+#define A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT 22
+static inline uint32_t A3XX_SP_SP_CTRL_REG_LOMODE(uint32_t val)
+{
+ return ((val) << A3XX_SP_SP_CTRL_REG_LOMODE__SHIFT) & A3XX_SP_SP_CTRL_REG_LOMODE__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG0 0x000022c4
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_VS_CTRL_REG1 0x000022c5
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x3f000000
+#define A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
+static inline uint32_t A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+
+#define REG_A3XX_SP_VS_PARAM_REG 0x000022c6
+#define A3XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
+#define A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_POSREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
+#define A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
+}
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
+static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
+#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
+static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
+#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
+
+#define REG_A3XX_SP_VS_PVT_MEM_CTRL_REG 0x000022d6
+
+#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+
+#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
+
+#define REG_A3XX_SP_VS_LENGTH_REG 0x000022df
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_VS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_VS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG0 0x000022e0
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
+#define A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK 0x00000002
+#define A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT 1
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffermode val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
+#define A3XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__MASK 0xff000000
+#define A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG0_LENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG0_LENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG0_LENGTH__MASK;
+}
+
+#define REG_A3XX_SP_FS_CTRL_REG1 0x000022e1
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000003ff
+#define A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK 0x000ffc00
+#define A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT 10
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG1_CONSTFOOTPRINT__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x00f00000
+#define A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 20
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
+}
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
+static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT) & A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
+#define A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
+
+#define REG_A3XX_SP_FS_PVT_MEM_CTRL_REG 0x000022e4
+
+#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+
+#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x000022e8
+
+#define REG_A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x000022e9
+
+#define REG_A3XX_SP_FS_OUTPUT_REG 0x000022ec
+
+static inline uint32_t REG_A3XX_SP_FS_MRT(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f0 + 0x1*i0; }
+#define A3XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
+#define A3XX_SP_FS_MRT_REG_REGID__SHIFT 0
+static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
+}
+#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+
+static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT_REG(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK 0x0000003f
+#define A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT 0
+static inline uint32_t A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT(enum a3xx_color_fmt val)
+{
+ return ((val) << A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__SHIFT) & A3XX_SP_FS_IMAGE_OUTPUT_REG_MRTFORMAT__MASK;
+}
+
+#define REG_A3XX_SP_FS_LENGTH_REG 0x000022ff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK 0xffffffff
+#define A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT 0
+static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_VS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_VS_BORDER_COLOR_BASE_ADDR 0x00002341
+
+#define REG_A3XX_TPL1_TP_FS_TEX_OFFSET 0x00002342
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_SAMPLEROFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK 0x0000ff00
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT 8
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_MEMOBJOFFSET__MASK;
+}
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK 0xffff0000
+#define A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT 16
+static inline uint32_t A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR(uint32_t val)
+{
+ return ((val) << A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__SHIFT) & A3XX_TPL1_TP_FS_TEX_OFFSET_BASETABLEPTR__MASK;
+}
+
+#define REG_A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x00002343
+
+#define REG_A3XX_VBIF_CLKON 0x00003001
+
+#define REG_A3XX_VBIF_FIXED_SORT_EN 0x0000300c
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL0 0x0000300d
+
+#define REG_A3XX_VBIF_FIXED_SORT_SEL1 0x0000300e
+
+#define REG_A3XX_VBIF_ABIT_SORT 0x0000301c
+
+#define REG_A3XX_VBIF_ABIT_SORT_CONF 0x0000301d
+
+#define REG_A3XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A3XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF0 0x00003030
+
+#define REG_A3XX_VBIF_IN_WR_LIM_CONF1 0x00003031
+
+#define REG_A3XX_VBIF_OUT_RD_LIM_CONF0 0x00003034
+
+#define REG_A3XX_VBIF_OUT_WR_LIM_CONF0 0x00003035
+
+#define REG_A3XX_VBIF_DDR_OUT_MAX_BURST 0x00003036
+
+#define REG_A3XX_VBIF_ARB_CTL 0x0000303c
+
+#define REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A3XX_VBIF_OUT_AXI_AMEMTYPE_CONF0 0x00003058
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO_EN 0x0000305e
+
+#define REG_A3XX_VBIF_OUT_AXI_AOOO 0x0000305f
+
+#define REG_A3XX_VSC_BIN_SIZE 0x00000c01
+#define A3XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
+#define A3XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A3XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A3XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
+#define A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
+static inline uint32_t A3XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A3XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A3XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_VSC_SIZE_ADDRESS 0x00000c02
+
+static inline uint32_t REG_A3XX_VSC_PIPE(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c06 + 0x3*i0; }
+#define A3XX_VSC_PIPE_CONFIG_X__MASK 0x000003ff
+#define A3XX_VSC_PIPE_CONFIG_X__SHIFT 0
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_X(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_X__SHIFT) & A3XX_VSC_PIPE_CONFIG_X__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_Y__MASK 0x000ffc00
+#define A3XX_VSC_PIPE_CONFIG_Y__SHIFT 10
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_Y(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_Y__SHIFT) & A3XX_VSC_PIPE_CONFIG_Y__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_W__MASK 0x00f00000
+#define A3XX_VSC_PIPE_CONFIG_W__SHIFT 20
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_W(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_W__SHIFT) & A3XX_VSC_PIPE_CONFIG_W__MASK;
+}
+#define A3XX_VSC_PIPE_CONFIG_H__MASK 0x0f000000
+#define A3XX_VSC_PIPE_CONFIG_H__SHIFT 24
+static inline uint32_t A3XX_VSC_PIPE_CONFIG_H(uint32_t val)
+{
+ return ((val) << A3XX_VSC_PIPE_CONFIG_H__SHIFT) & A3XX_VSC_PIPE_CONFIG_H__MASK;
+}
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c07 + 0x3*i0; }
+
+static inline uint32_t REG_A3XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c08 + 0x3*i0; }
+
+#define REG_A3XX_UNKNOWN_0C3D 0x00000c3d
+
+#define REG_A3XX_PC_PERFCOUNTER0_SELECT 0x00000c48
+
+#define REG_A3XX_PC_PERFCOUNTER1_SELECT 0x00000c49
+
+#define REG_A3XX_PC_PERFCOUNTER2_SELECT 0x00000c4a
+
+#define REG_A3XX_PC_PERFCOUNTER3_SELECT 0x00000c4b
+
+#define REG_A3XX_UNKNOWN_0C81 0x00000c81
+
+#define REG_A3XX_GRAS_PERFCOUNTER0_SELECT 0x00000c88
+
+#define REG_A3XX_GRAS_PERFCOUNTER1_SELECT 0x00000c89
+
+#define REG_A3XX_GRAS_PERFCOUNTER2_SELECT 0x00000c8a
+
+#define REG_A3XX_GRAS_PERFCOUNTER3_SELECT 0x00000c8b
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_X(uint32_t i0) { return 0x00000ca0 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Y(uint32_t i0) { return 0x00000ca1 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_Z(uint32_t i0) { return 0x00000ca2 + 0x4*i0; }
+
+static inline uint32_t REG_A3XX_GRAS_CL_USER_PLANE_W(uint32_t i0) { return 0x00000ca3 + 0x4*i0; }
+
+#define REG_A3XX_RB_GMEM_BASE_ADDR 0x00000cc0
+
+#define REG_A3XX_RB_PERFCOUNTER0_SELECT 0x00000cc6
+
+#define REG_A3XX_RB_PERFCOUNTER1_SELECT 0x00000cc7
+
+#define REG_A3XX_RB_WINDOW_SIZE 0x00000ce0
+#define A3XX_RB_WINDOW_SIZE_WIDTH__MASK 0x00003fff
+#define A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A3XX_RB_WINDOW_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_SIZE_WIDTH__SHIFT) & A3XX_RB_WINDOW_SIZE_WIDTH__MASK;
+}
+#define A3XX_RB_WINDOW_SIZE_HEIGHT__MASK 0x0fffc000
+#define A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT 14
+static inline uint32_t A3XX_RB_WINDOW_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_RB_WINDOW_SIZE_HEIGHT__SHIFT) & A3XX_RB_WINDOW_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A3XX_HLSQ_PERFCOUNTER0_SELECT 0x00000e00
+
+#define REG_A3XX_HLSQ_PERFCOUNTER1_SELECT 0x00000e01
+
+#define REG_A3XX_HLSQ_PERFCOUNTER2_SELECT 0x00000e02
+
+#define REG_A3XX_HLSQ_PERFCOUNTER3_SELECT 0x00000e03
+
+#define REG_A3XX_HLSQ_PERFCOUNTER4_SELECT 0x00000e04
+
+#define REG_A3XX_HLSQ_PERFCOUNTER5_SELECT 0x00000e05
+
+#define REG_A3XX_UNKNOWN_0E43 0x00000e43
+
+#define REG_A3XX_VFD_PERFCOUNTER0_SELECT 0x00000e44
+
+#define REG_A3XX_VFD_PERFCOUNTER1_SELECT 0x00000e45
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_SEL 0x00000e61
+
+#define REG_A3XX_VPC_VPC_DEBUG_RAM_READ 0x00000e62
+
+#define REG_A3XX_VPC_PERFCOUNTER0_SELECT 0x00000e64
+
+#define REG_A3XX_VPC_PERFCOUNTER1_SELECT 0x00000e65
+
+#define REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG 0x00000e82
+
+#define REG_A3XX_UCHE_PERFCOUNTER0_SELECT 0x00000e84
+
+#define REG_A3XX_UCHE_PERFCOUNTER1_SELECT 0x00000e85
+
+#define REG_A3XX_UCHE_PERFCOUNTER2_SELECT 0x00000e86
+
+#define REG_A3XX_UCHE_PERFCOUNTER3_SELECT 0x00000e87
+
+#define REG_A3XX_UCHE_PERFCOUNTER4_SELECT 0x00000e88
+
+#define REG_A3XX_UCHE_PERFCOUNTER5_SELECT 0x00000e89
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE0_REG 0x00000ea0
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE0_REG_ADDR__MASK;
+}
+
+#define REG_A3XX_UCHE_CACHE_INVALIDATE1_REG 0x00000ea1
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK 0x0fffffff
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT 0
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR(uint32_t val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_ADDR__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK 0x30000000
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT 28
+static inline uint32_t A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE(enum a3xx_cache_opcode val)
+{
+ return ((val) << A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__SHIFT) & A3XX_UCHE_CACHE_INVALIDATE1_REG_OPCODE__MASK;
+}
+#define A3XX_UCHE_CACHE_INVALIDATE1_REG_ENTIRE_CACHE 0x80000000
+
+#define REG_A3XX_SP_PERFCOUNTER0_SELECT 0x00000ec4
+
+#define REG_A3XX_SP_PERFCOUNTER1_SELECT 0x00000ec5
+
+#define REG_A3XX_SP_PERFCOUNTER2_SELECT 0x00000ec6
+
+#define REG_A3XX_SP_PERFCOUNTER3_SELECT 0x00000ec7
+
+#define REG_A3XX_SP_PERFCOUNTER4_SELECT 0x00000ec8
+
+#define REG_A3XX_SP_PERFCOUNTER5_SELECT 0x00000ec9
+
+#define REG_A3XX_SP_PERFCOUNTER6_SELECT 0x00000eca
+
+#define REG_A3XX_SP_PERFCOUNTER7_SELECT 0x00000ecb
+
+#define REG_A3XX_UNKNOWN_0EE0 0x00000ee0
+
+#define REG_A3XX_UNKNOWN_0F03 0x00000f03
+
+#define REG_A3XX_TP_PERFCOUNTER0_SELECT 0x00000f04
+
+#define REG_A3XX_TP_PERFCOUNTER1_SELECT 0x00000f05
+
+#define REG_A3XX_TP_PERFCOUNTER2_SELECT 0x00000f06
+
+#define REG_A3XX_TP_PERFCOUNTER3_SELECT 0x00000f07
+
+#define REG_A3XX_TP_PERFCOUNTER4_SELECT 0x00000f08
+
+#define REG_A3XX_TP_PERFCOUNTER5_SELECT 0x00000f09
+
+#define REG_A3XX_TEX_SAMP_0 0x00000000
+#define A3XX_TEX_SAMP_0_XY_MAG__MASK 0x0000000c
+#define A3XX_TEX_SAMP_0_XY_MAG__SHIFT 2
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MAG(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MAG__SHIFT) & A3XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A3XX_TEX_SAMP_0_XY_MIN__MASK 0x00000030
+#define A3XX_TEX_SAMP_0_XY_MIN__SHIFT 4
+static inline uint32_t A3XX_TEX_SAMP_0_XY_MIN(enum a3xx_tex_filter val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_XY_MIN__SHIFT) & A3XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_S__MASK 0x000001c0
+#define A3XX_TEX_SAMP_0_WRAP_S__SHIFT 6
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_S(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_S__SHIFT) & A3XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_T__MASK 0x00000e00
+#define A3XX_TEX_SAMP_0_WRAP_T__SHIFT 9
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_T(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_T__SHIFT) & A3XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A3XX_TEX_SAMP_0_WRAP_R__MASK 0x00007000
+#define A3XX_TEX_SAMP_0_WRAP_R__SHIFT 12
+static inline uint32_t A3XX_TEX_SAMP_0_WRAP_R(enum a3xx_tex_clamp val)
+{
+ return ((val) << A3XX_TEX_SAMP_0_WRAP_R__SHIFT) & A3XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
+
+#define REG_A3XX_TEX_SAMP_1 0x00000001
+
+#define REG_A3XX_TEX_CONST_0 0x00000000
+#define A3XX_TEX_CONST_0_TILED 0x00000001
+#define A3XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A3XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_X(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_X__SHIFT) & A3XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A3XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Y(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A3XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_Z(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A3XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A3XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A3XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A3XX_TEX_CONST_0_SWIZ_W(enum a3xx_tex_swiz val)
+{
+ return ((val) << A3XX_TEX_CONST_0_SWIZ_W__SHIFT) & A3XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A3XX_TEX_CONST_0_FMT__MASK 0x1fc00000
+#define A3XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A3XX_TEX_CONST_0_FMT(enum a3xx_tex_fmt val)
+{
+ return ((val) << A3XX_TEX_CONST_0_FMT__SHIFT) & A3XX_TEX_CONST_0_FMT__MASK;
+}
+#define A3XX_TEX_CONST_0_TYPE__MASK 0xc0000000
+#define A3XX_TEX_CONST_0_TYPE__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_0_TYPE(enum a3xx_tex_type val)
+{
+ return ((val) << A3XX_TEX_CONST_0_TYPE__SHIFT) & A3XX_TEX_CONST_0_TYPE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_1 0x00000001
+#define A3XX_TEX_CONST_1_HEIGHT__MASK 0x00003fff
+#define A3XX_TEX_CONST_1_HEIGHT__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_HEIGHT__SHIFT) & A3XX_TEX_CONST_1_HEIGHT__MASK;
+}
+#define A3XX_TEX_CONST_1_WIDTH__MASK 0x0fffc000
+#define A3XX_TEX_CONST_1_WIDTH__SHIFT 14
+static inline uint32_t A3XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_1_WIDTH__SHIFT) & A3XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A3XX_TEX_CONST_1_FETCHSIZE__MASK 0xf0000000
+#define A3XX_TEX_CONST_1_FETCHSIZE__SHIFT 28
+static inline uint32_t A3XX_TEX_CONST_1_FETCHSIZE(enum a3xx_tex_fetchsize val)
+{
+ return ((val) << A3XX_TEX_CONST_1_FETCHSIZE__SHIFT) & A3XX_TEX_CONST_1_FETCHSIZE__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_2 0x00000002
+#define A3XX_TEX_CONST_2_INDX__MASK 0x000000ff
+#define A3XX_TEX_CONST_2_INDX__SHIFT 0
+static inline uint32_t A3XX_TEX_CONST_2_INDX(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_INDX__SHIFT) & A3XX_TEX_CONST_2_INDX__MASK;
+}
+#define A3XX_TEX_CONST_2_PITCH__MASK 0x3ffff000
+#define A3XX_TEX_CONST_2_PITCH__SHIFT 12
+static inline uint32_t A3XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A3XX_TEX_CONST_2_PITCH__SHIFT) & A3XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A3XX_TEX_CONST_2_SWAP__MASK 0xc0000000
+#define A3XX_TEX_CONST_2_SWAP__SHIFT 30
+static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A3XX_TEX_CONST_2_SWAP__SHIFT) & A3XX_TEX_CONST_2_SWAP__MASK;
+}
+
+#define REG_A3XX_TEX_CONST_3 0x00000003
+
+
+#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
new file mode 100644
index 00000000000..035bd13dc8b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "a3xx_gpu.h"
+
+#define A3XX_INT0_MASK \
+ (A3XX_INT0_RBBM_AHB_ERROR | \
+ A3XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
+ A3XX_INT0_CP_T0_PACKET_IN_IB | \
+ A3XX_INT0_CP_OPCODE_ERROR | \
+ A3XX_INT0_CP_RESERVED_BIT_ERROR | \
+ A3XX_INT0_CP_HW_FAULT | \
+ A3XX_INT0_CP_IB1_INT | \
+ A3XX_INT0_CP_IB2_INT | \
+ A3XX_INT0_CP_RB_INT | \
+ A3XX_INT0_CP_REG_PROTECT_FAULT | \
+ A3XX_INT0_CP_AHB_ERROR_HALT | \
+ A3XX_INT0_UCHE_OOB_ACCESS)
+
+static struct platform_device *a3xx_pdev;
+
+static void a3xx_me_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb;
+
+ OUT_PKT3(ring, CP_ME_INIT, 17);
+ OUT_RING(ring, 0x000003f7);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000080);
+ OUT_RING(ring, 0x00000100);
+ OUT_RING(ring, 0x00000180);
+ OUT_RING(ring, 0x00006600);
+ OUT_RING(ring, 0x00000150);
+ OUT_RING(ring, 0x0000014e);
+ OUT_RING(ring, 0x00000154);
+ OUT_RING(ring, 0x00000001);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu);
+ gpu->funcs->idle(gpu);
+}
+
+static int a3xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t *ptr, len;
+ int i, ret;
+
+ DBG("%s", gpu->name);
+
+ if (adreno_is_a305(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+
+ } else if (adreno_is_a320(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x10101010);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x10101010);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x0000ff);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000003c);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0x003c003c);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x000000ff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+
+ } else if (adreno_is_a330(adreno_gpu)) {
+ /* Set up 16 deep read/write request queues: */
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF1, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_DDR_OUT_MAX_BURST, 0x0000303);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A3XX_VBIF_IN_WR_LIM_CONF1, 0x18181818);
+ /* Enable WR-REQ: */
+ gpu_write(gpu, REG_A3XX_VBIF_GATE_OFF_WRREQ_EN, 0x00003f);
+ /* Set up round robin arbitration between both AXI ports: */
+ gpu_write(gpu, REG_A3XX_VBIF_ARB_CTL, 0x00000030);
+ /* Set up VBIF_ROUND_ROBIN_QOS_ARB: */
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0001);
+ /* Set up AOOO: */
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO_EN, 0x0000ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_AXI_AOOO, 0xffffffff);
+ /* Enable 1K sort: */
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT, 0x0001ffff);
+ gpu_write(gpu, REG_A3XX_VBIF_ABIT_SORT_CONF, 0x000000a4);
+ /* Disable VBIF clock gating. This is to enable AXI running
+ * higher frequency than GPU:
+ */
+ gpu_write(gpu, REG_A3XX_VBIF_CLKON, 0x00000001);
+
+ } else {
+ BUG();
+ }
+
+ /* Make all blocks contribute to the GPU BUSY perf counter: */
+ gpu_write(gpu, REG_A3XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Tune the hystersis counters for SP and CP idle detection: */
+ gpu_write(gpu, REG_A3XX_RBBM_SP_HYST_CNT, 0x10);
+ gpu_write(gpu, REG_A3XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+
+ /* Enable the RBBM error reporting bits. This lets us get
+ * useful information on failure:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL0, 0x00000001);
+
+ /* Enable AHB error reporting: */
+ gpu_write(gpu, REG_A3XX_RBBM_AHB_CTL1, 0xa6ffffff);
+
+ /* Turn on the power counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_RBBM_CTL, 0x00030000);
+
+ /* Turn on hang detection - this spews a lot of useful information
+ * into the RBBM registers on a hang:
+ */
+ gpu_write(gpu, REG_A3XX_RBBM_INTERFACE_HANG_INT_CTL, 0x00010fff);
+
+ /* Enable 64-byte cacheline size. HW Default is 32-byte (0x000000E0): */
+ gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
+
+ /* Enable Clock gating: */
+ gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
+
+ /* Set the OCMEM base address for A330 */
+//TODO:
+// if (adreno_is_a330(adreno_gpu)) {
+// gpu_write(gpu, REG_A3XX_RB_GMEM_BASE_ADDR,
+// (unsigned int)(a3xx_gpu->ocmem_base >> 14));
+// }
+
+ /* Turn on performance counters: */
+ gpu_write(gpu, REG_A3XX_RBBM_PERFCTR_CTL, 0x01);
+
+ /* Set SP perfcounter 7 to count SP_FS_FULL_ALU_INSTRUCTIONS
+ * we will use this to augment our hang detection:
+ */
+ gpu_write(gpu, REG_A3XX_SP_PERFCOUNTER7_SELECT,
+ SP_FS_FULL_ALU_INSTRUCTIONS);
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_0_MASK, A3XX_INT0_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ /* setup access protection: */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT_CTRL, 0x00000007);
+
+ /* RBBM registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(0), 0x63000040);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(1), 0x62000080);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(2), 0x600000cc);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(3), 0x60000108);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(4), 0x64000140);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(5), 0x66000400);
+
+ /* CP registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(6), 0x65000700);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(7), 0x610007d8);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(8), 0x620007e0);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(9), 0x61001178);
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(10), 0x64001180);
+
+ /* RB registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(11), 0x60003300);
+
+ /* VBIF registers */
+ gpu_write(gpu, REG_A3XX_CP_PROTECT(12), 0x6b00c000);
+
+ /* NOTE: PM4/micro-engine firmware registers look to be the same
+ * for a2xx and a3xx.. we could possibly push that part down to
+ * adreno_gpu base class. Or push both PM4 and PFP but
+ * parameterize the pfp ucode addr/data registers..
+ */
+
+ /* Load PM4: */
+ ptr = (uint32_t *)(adreno_gpu->pm4->data);
+ len = adreno_gpu->pm4->size / 4;
+ DBG("loading PM4 ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_AXXX_CP_DEBUG,
+ AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE |
+ AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE);
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_WADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_AXXX_CP_ME_RAM_DATA, ptr[i]);
+
+ /* Load PFP: */
+ ptr = (uint32_t *)(adreno_gpu->pfp->data);
+ len = adreno_gpu->pfp->size / 4;
+ DBG("loading PFP ucode version: %u", ptr[0]);
+
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_ADDR, 0);
+ for (i = 1; i < len; i++)
+ gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
+
+ /* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
+ if (adreno_is_a305(adreno_gpu) || adreno_is_a320(adreno_gpu))
+ gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
+ AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(14));
+
+
+ /* clear ME_HALT to start micro engine */
+ gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
+
+ a3xx_me_init(gpu);
+
+ return 0;
+}
+
+static void a3xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a3xx_gpu *a3xx_gpu = to_a3xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ put_device(&a3xx_gpu->pdev->dev);
+ kfree(a3xx_gpu);
+}
+
+static void a3xx_idle(struct msm_gpu *gpu)
+{
+ unsigned long t;
+
+ /* wait for ringbuffer to drain: */
+ adreno_idle(gpu);
+
+ t = jiffies + ADRENO_IDLE_TIMEOUT;
+
+ /* then wait for GPU to finish: */
+ do {
+ uint32_t rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+ if (!(rbbm_status & A3XX_RBBM_STATUS_GPU_BUSY))
+ return;
+ } while(time_before(jiffies, t));
+
+ DRM_ERROR("timeout waiting for %s to idle!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+
+ status = gpu_read(gpu, REG_A3XX_RBBM_INT_0_STATUS);
+ DBG("%s: %08x", gpu->name, status);
+
+ // TODO
+
+ gpu_write(gpu, REG_A3XX_RBBM_INT_CLEAR_CMD, status);
+
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static const unsigned int a3xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0012, 0x0018, 0x0018, 0x0020, 0x0027,
+ 0x0029, 0x002b, 0x002e, 0x0033, 0x0040, 0x0042, 0x0050, 0x005c,
+ 0x0060, 0x006c, 0x0080, 0x0082, 0x0084, 0x0088, 0x0090, 0x00e5,
+ 0x00ea, 0x00ed, 0x0100, 0x0100, 0x0110, 0x0123, 0x01c0, 0x01c1,
+ 0x01c3, 0x01c5, 0x01c7, 0x01c7, 0x01d5, 0x01d9, 0x01dc, 0x01dd,
+ 0x01ea, 0x01ea, 0x01ee, 0x01f1, 0x01f5, 0x01f5, 0x01fc, 0x01ff,
+ 0x0440, 0x0440, 0x0443, 0x0443, 0x0445, 0x0445, 0x044d, 0x044f,
+ 0x0452, 0x0452, 0x0454, 0x046f, 0x047c, 0x047c, 0x047f, 0x047f,
+ 0x0578, 0x057f, 0x0600, 0x0602, 0x0605, 0x0607, 0x060a, 0x060e,
+ 0x0612, 0x0614, 0x0c01, 0x0c02, 0x0c06, 0x0c1d, 0x0c3d, 0x0c3f,
+ 0x0c48, 0x0c4b, 0x0c80, 0x0c80, 0x0c88, 0x0c8b, 0x0ca0, 0x0cb7,
+ 0x0cc0, 0x0cc1, 0x0cc6, 0x0cc7, 0x0ce4, 0x0ce5, 0x0e00, 0x0e05,
+ 0x0e0c, 0x0e0c, 0x0e22, 0x0e23, 0x0e41, 0x0e45, 0x0e64, 0x0e65,
+ 0x0e80, 0x0e82, 0x0e84, 0x0e89, 0x0ea0, 0x0ea1, 0x0ea4, 0x0ea7,
+ 0x0ec4, 0x0ecb, 0x0ee0, 0x0ee0, 0x0f00, 0x0f01, 0x0f03, 0x0f09,
+ 0x2040, 0x2040, 0x2044, 0x2044, 0x2048, 0x204d, 0x2068, 0x2069,
+ 0x206c, 0x206d, 0x2070, 0x2070, 0x2072, 0x2072, 0x2074, 0x2075,
+ 0x2079, 0x207a, 0x20c0, 0x20d3, 0x20e4, 0x20ef, 0x2100, 0x2109,
+ 0x210c, 0x210c, 0x210e, 0x210e, 0x2110, 0x2111, 0x2114, 0x2115,
+ 0x21e4, 0x21e4, 0x21ea, 0x21ea, 0x21ec, 0x21ed, 0x21f0, 0x21f0,
+ 0x2200, 0x2212, 0x2214, 0x2217, 0x221a, 0x221a, 0x2240, 0x227e,
+ 0x2280, 0x228b, 0x22c0, 0x22c0, 0x22c4, 0x22ce, 0x22d0, 0x22d8,
+ 0x22df, 0x22e6, 0x22e8, 0x22e9, 0x22ec, 0x22ec, 0x22f0, 0x22f7,
+ 0x22ff, 0x22ff, 0x2340, 0x2343, 0x2348, 0x2349, 0x2350, 0x2356,
+ 0x2360, 0x2360, 0x2440, 0x2440, 0x2444, 0x2444, 0x2448, 0x244d,
+ 0x2468, 0x2469, 0x246c, 0x246d, 0x2470, 0x2470, 0x2472, 0x2472,
+ 0x2474, 0x2475, 0x2479, 0x247a, 0x24c0, 0x24d3, 0x24e4, 0x24ef,
+ 0x2500, 0x2509, 0x250c, 0x250c, 0x250e, 0x250e, 0x2510, 0x2511,
+ 0x2514, 0x2515, 0x25e4, 0x25e4, 0x25ea, 0x25ea, 0x25ec, 0x25ed,
+ 0x25f0, 0x25f0, 0x2600, 0x2612, 0x2614, 0x2617, 0x261a, 0x261a,
+ 0x2640, 0x267e, 0x2680, 0x268b, 0x26c0, 0x26c0, 0x26c4, 0x26ce,
+ 0x26d0, 0x26d8, 0x26df, 0x26e6, 0x26e8, 0x26e9, 0x26ec, 0x26ec,
+ 0x26f0, 0x26f7, 0x26ff, 0x26ff, 0x2740, 0x2743, 0x2748, 0x2749,
+ 0x2750, 0x2756, 0x2760, 0x2760, 0x300c, 0x300e, 0x301c, 0x301d,
+ 0x302a, 0x302a, 0x302c, 0x302d, 0x3030, 0x3031, 0x3034, 0x3036,
+ 0x303c, 0x303c, 0x305e, 0x305f,
+};
+
+static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ int i;
+
+ adreno_show(gpu, m);
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A3XX_RBBM_STATUS));
+
+ /* dump these out in a form that can be parsed by demsm: */
+ seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
+ for (i = 0; i < ARRAY_SIZE(a3xx_registers); i += 2) {
+ uint32_t start = a3xx_registers[i];
+ uint32_t end = a3xx_registers[i+1];
+ uint32_t addr;
+
+ for (addr = start; addr <= end; addr++) {
+ uint32_t val = gpu_read(gpu, addr);
+ seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ }
+ }
+}
+#endif
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a3xx_hw_init,
+ .pm_suspend = msm_gpu_pm_suspend,
+ .pm_resume = msm_gpu_pm_resume,
+ .recover = adreno_recover,
+ .last_fence = adreno_last_fence,
+ .submit = adreno_submit,
+ .flush = adreno_flush,
+ .idle = a3xx_idle,
+ .irq = a3xx_irq,
+ .destroy = a3xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a3xx_show,
+#endif
+ },
+};
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
+{
+ struct a3xx_gpu *a3xx_gpu = NULL;
+ struct msm_gpu *gpu;
+ struct platform_device *pdev = a3xx_pdev;
+ struct adreno_platform_config *config;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no a3xx device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ a3xx_gpu = kzalloc(sizeof(*a3xx_gpu), GFP_KERNEL);
+ if (!a3xx_gpu) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ gpu = &a3xx_gpu->base.base;
+
+ get_device(&pdev->dev);
+ a3xx_gpu->pdev = pdev;
+
+ gpu->fast_rate = config->fast_rate;
+ gpu->slow_rate = config->slow_rate;
+ gpu->bus_freq = config->bus_freq;
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+
+ ret = adreno_gpu_init(dev, pdev, &a3xx_gpu->base,
+ &funcs, config->rev);
+ if (ret)
+ goto fail;
+
+ return &a3xx_gpu->base.base;
+
+fail:
+ if (a3xx_gpu)
+ a3xx_destroy(&a3xx_gpu->base.base);
+
+ return ERR_PTR(ret);
+}
+
+/*
+ * The a3xx device:
+ */
+
+static int a3xx_probe(struct platform_device *pdev)
+{
+ static struct adreno_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ uint32_t version = socinfo_get_version();
+ if (cpu_is_apq8064ab()) {
+ config.fast_rate = 450000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+ config.rev = ADRENO_REV(3, 2, 1, 0);
+ } else if (cpu_is_apq8064() || cpu_is_msm8960ab()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 4;
+
+ if (SOCINFO_VERSION_MAJOR(version) == 2)
+ config.rev = ADRENO_REV(3, 2, 0, 2);
+ else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 1))
+ config.rev = ADRENO_REV(3, 2, 0, 1);
+ else
+ config.rev = ADRENO_REV(3, 2, 0, 0);
+
+ } else if (cpu_is_msm8930()) {
+ config.fast_rate = 400000000;
+ config.slow_rate = 27000000;
+ config.bus_freq = 3;
+
+ if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
+ (SOCINFO_VERSION_MINOR(version) == 2))
+ config.rev = ADRENO_REV(3, 0, 5, 2);
+ else
+ config.rev = ADRENO_REV(3, 0, 5, 0);
+
+ }
+#endif
+ pdev->dev.platform_data = &config;
+ a3xx_pdev = pdev;
+ return 0;
+}
+
+static int a3xx_remove(struct platform_device *pdev)
+{
+ a3xx_pdev = NULL;
+ return 0;
+}
+
+static struct platform_driver a3xx_driver = {
+ .probe = a3xx_probe,
+ .remove = a3xx_remove,
+ .driver.name = "kgsl-3d0",
+};
+
+void __init a3xx_register(void)
+{
+ platform_driver_register(&a3xx_driver);
+}
+
+void __exit a3xx_unregister(void)
+{
+ platform_driver_unregister(&a3xx_driver);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.h b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
new file mode 100644
index 00000000000..32c398c2d00
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __A3XX_GPU_H__
+#define __A3XX_GPU_H__
+
+#include "adreno_gpu.h"
+#include "a3xx.xml.h"
+
+struct a3xx_gpu {
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+};
+#define to_a3xx_gpu(x) container_of(x, struct a3xx_gpu, base)
+
+#endif /* __A3XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
new file mode 100644
index 00000000000..61979d458ac
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -0,0 +1,432 @@
+#ifndef ADRENO_COMMON_XML
+#define ADRENO_COMMON_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum adreno_pa_su_sc_draw {
+ PC_DRAW_POINTS = 0,
+ PC_DRAW_LINES = 1,
+ PC_DRAW_TRIANGLES = 2,
+};
+
+enum adreno_compare_func {
+ FUNC_NEVER = 0,
+ FUNC_LESS = 1,
+ FUNC_EQUAL = 2,
+ FUNC_LEQUAL = 3,
+ FUNC_GREATER = 4,
+ FUNC_NOTEQUAL = 5,
+ FUNC_GEQUAL = 6,
+ FUNC_ALWAYS = 7,
+};
+
+enum adreno_stencil_op {
+ STENCIL_KEEP = 0,
+ STENCIL_ZERO = 1,
+ STENCIL_REPLACE = 2,
+ STENCIL_INCR_CLAMP = 3,
+ STENCIL_DECR_CLAMP = 4,
+ STENCIL_INVERT = 5,
+ STENCIL_INCR_WRAP = 6,
+ STENCIL_DECR_WRAP = 7,
+};
+
+enum adreno_rb_blend_factor {
+ FACTOR_ZERO = 0,
+ FACTOR_ONE = 1,
+ FACTOR_SRC_COLOR = 4,
+ FACTOR_ONE_MINUS_SRC_COLOR = 5,
+ FACTOR_SRC_ALPHA = 6,
+ FACTOR_ONE_MINUS_SRC_ALPHA = 7,
+ FACTOR_DST_COLOR = 8,
+ FACTOR_ONE_MINUS_DST_COLOR = 9,
+ FACTOR_DST_ALPHA = 10,
+ FACTOR_ONE_MINUS_DST_ALPHA = 11,
+ FACTOR_CONSTANT_COLOR = 12,
+ FACTOR_ONE_MINUS_CONSTANT_COLOR = 13,
+ FACTOR_CONSTANT_ALPHA = 14,
+ FACTOR_ONE_MINUS_CONSTANT_ALPHA = 15,
+ FACTOR_SRC_ALPHA_SATURATE = 16,
+};
+
+enum adreno_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_MIN_DST_SRC = 2,
+ BLEND_MAX_DST_SRC = 3,
+ BLEND_DST_MINUS_SRC = 4,
+ BLEND_DST_PLUS_SRC_BIAS = 5,
+};
+
+enum adreno_rb_surface_endian {
+ ENDIAN_NONE = 0,
+ ENDIAN_8IN16 = 1,
+ ENDIAN_8IN32 = 2,
+ ENDIAN_16IN32 = 3,
+ ENDIAN_8IN64 = 4,
+ ENDIAN_8IN128 = 5,
+};
+
+enum adreno_rb_dither_mode {
+ DITHER_DISABLE = 0,
+ DITHER_ALWAYS = 1,
+ DITHER_IF_ALPHA_OFF = 2,
+};
+
+enum adreno_rb_depth_format {
+ DEPTHX_16 = 0,
+ DEPTHX_24_8 = 1,
+};
+
+enum adreno_mmu_clnt_beh {
+ BEH_NEVR = 0,
+ BEH_TRAN_RNG = 1,
+ BEH_TRAN_FLT = 2,
+};
+
+#define REG_AXXX_MH_MMU_CONFIG 0x00000040
+#define AXXX_MH_MMU_CONFIG_MMU_ENABLE 0x00000001
+#define AXXX_MH_MMU_CONFIG_SPLIT_MODE_ENABLE 0x00000002
+#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK 0x00000030
+#define AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT 4
+static inline uint32_t AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_RB_W_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK 0x000000c0
+#define AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT 6
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_W_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK 0x00000300
+#define AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT 8
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R0_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK 0x00000c00
+#define AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT 10
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R1_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK 0x00003000
+#define AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT 12
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R2_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK 0x0000c000
+#define AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT 14
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R3_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK 0x00030000
+#define AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT 16
+static inline uint32_t AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_CP_R4_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK 0x000c0000
+#define AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT 18
+static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R0_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK 0x00300000
+#define AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT 20
+static inline uint32_t AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_VGT_R1_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK 0x00c00000
+#define AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT 22
+static inline uint32_t AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_TC_R_CLNT_BEHAVIOR__MASK;
+}
+#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK 0x03000000
+#define AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT 24
+static inline uint32_t AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR(enum adreno_mmu_clnt_beh val)
+{
+ return ((val) << AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__SHIFT) & AXXX_MH_MMU_CONFIG_PA_W_CLNT_BEHAVIOR__MASK;
+}
+
+#define REG_AXXX_MH_MMU_VA_RANGE 0x00000041
+
+#define REG_AXXX_MH_MMU_PT_BASE 0x00000042
+
+#define REG_AXXX_MH_MMU_PAGE_FAULT 0x00000043
+
+#define REG_AXXX_MH_MMU_TRAN_ERROR 0x00000044
+
+#define REG_AXXX_MH_MMU_INVALIDATE 0x00000045
+
+#define REG_AXXX_MH_MMU_MPU_BASE 0x00000046
+
+#define REG_AXXX_MH_MMU_MPU_END 0x00000047
+
+#define REG_AXXX_CP_RB_BASE 0x000001c0
+
+#define REG_AXXX_CP_RB_CNTL 0x000001c1
+#define AXXX_CP_RB_CNTL_BUFSZ__MASK 0x0000003f
+#define AXXX_CP_RB_CNTL_BUFSZ__SHIFT 0
+static inline uint32_t AXXX_CP_RB_CNTL_BUFSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUFSZ__SHIFT) & AXXX_CP_RB_CNTL_BUFSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BLKSZ__MASK 0x00003f00
+#define AXXX_CP_RB_CNTL_BLKSZ__SHIFT 8
+static inline uint32_t AXXX_CP_RB_CNTL_BLKSZ(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BLKSZ__SHIFT) & AXXX_CP_RB_CNTL_BLKSZ__MASK;
+}
+#define AXXX_CP_RB_CNTL_BUF_SWAP__MASK 0x00030000
+#define AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT 16
+static inline uint32_t AXXX_CP_RB_CNTL_BUF_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_CNTL_BUF_SWAP__SHIFT) & AXXX_CP_RB_CNTL_BUF_SWAP__MASK;
+}
+#define AXXX_CP_RB_CNTL_POLL_EN 0x00100000
+#define AXXX_CP_RB_CNTL_NO_UPDATE 0x08000000
+#define AXXX_CP_RB_CNTL_RPTR_WR_EN 0x80000000
+
+#define REG_AXXX_CP_RB_RPTR_ADDR 0x000001c3
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__MASK 0x00000003
+#define AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT 0
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_CP_RB_RPTR_ADDR_SWAP__SHIFT) & AXXX_CP_RB_RPTR_ADDR_SWAP__MASK;
+}
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__MASK 0xfffffffc
+#define AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT 2
+static inline uint32_t AXXX_CP_RB_RPTR_ADDR_ADDR(uint32_t val)
+{
+ return ((val >> 2) << AXXX_CP_RB_RPTR_ADDR_ADDR__SHIFT) & AXXX_CP_RB_RPTR_ADDR_ADDR__MASK;
+}
+
+#define REG_AXXX_CP_RB_RPTR 0x000001c4
+
+#define REG_AXXX_CP_RB_WPTR 0x000001c5
+
+#define REG_AXXX_CP_RB_WPTR_DELAY 0x000001c6
+
+#define REG_AXXX_CP_RB_RPTR_WR 0x000001c7
+
+#define REG_AXXX_CP_RB_WPTR_BASE 0x000001c8
+
+#define REG_AXXX_CP_QUEUE_THRESHOLDS 0x000001d5
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK 0x0000000f
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT 0
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK 0x00000f00
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT 8
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START__MASK;
+}
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK 0x000f0000
+#define AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT 16
+static inline uint32_t AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START(uint32_t val)
+{
+ return ((val) << AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__SHIFT) & AXXX_CP_QUEUE_THRESHOLDS_CSQ_ST_START__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_THRESHOLDS 0x000001d6
+
+#define REG_AXXX_CP_CSQ_AVAIL 0x000001d7
+#define AXXX_CP_CSQ_AVAIL_RING__MASK 0x0000007f
+#define AXXX_CP_CSQ_AVAIL_RING__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_AVAIL_RING(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_RING__SHIFT) & AXXX_CP_CSQ_AVAIL_RING__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB1__MASK 0x00007f00
+#define AXXX_CP_CSQ_AVAIL_IB1__SHIFT 8
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB1(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB1__SHIFT) & AXXX_CP_CSQ_AVAIL_IB1__MASK;
+}
+#define AXXX_CP_CSQ_AVAIL_IB2__MASK 0x007f0000
+#define AXXX_CP_CSQ_AVAIL_IB2__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_AVAIL_IB2(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_AVAIL_IB2__SHIFT) & AXXX_CP_CSQ_AVAIL_IB2__MASK;
+}
+
+#define REG_AXXX_CP_STQ_AVAIL 0x000001d8
+#define AXXX_CP_STQ_AVAIL_ST__MASK 0x0000007f
+#define AXXX_CP_STQ_AVAIL_ST__SHIFT 0
+static inline uint32_t AXXX_CP_STQ_AVAIL_ST(uint32_t val)
+{
+ return ((val) << AXXX_CP_STQ_AVAIL_ST__SHIFT) & AXXX_CP_STQ_AVAIL_ST__MASK;
+}
+
+#define REG_AXXX_CP_MEQ_AVAIL 0x000001d9
+#define AXXX_CP_MEQ_AVAIL_MEQ__MASK 0x0000001f
+#define AXXX_CP_MEQ_AVAIL_MEQ__SHIFT 0
+static inline uint32_t AXXX_CP_MEQ_AVAIL_MEQ(uint32_t val)
+{
+ return ((val) << AXXX_CP_MEQ_AVAIL_MEQ__SHIFT) & AXXX_CP_MEQ_AVAIL_MEQ__MASK;
+}
+
+#define REG_AXXX_SCRATCH_UMSK 0x000001dc
+#define AXXX_SCRATCH_UMSK_UMSK__MASK 0x000000ff
+#define AXXX_SCRATCH_UMSK_UMSK__SHIFT 0
+static inline uint32_t AXXX_SCRATCH_UMSK_UMSK(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_UMSK__SHIFT) & AXXX_SCRATCH_UMSK_UMSK__MASK;
+}
+#define AXXX_SCRATCH_UMSK_SWAP__MASK 0x00030000
+#define AXXX_SCRATCH_UMSK_SWAP__SHIFT 16
+static inline uint32_t AXXX_SCRATCH_UMSK_SWAP(uint32_t val)
+{
+ return ((val) << AXXX_SCRATCH_UMSK_SWAP__SHIFT) & AXXX_SCRATCH_UMSK_SWAP__MASK;
+}
+
+#define REG_AXXX_SCRATCH_ADDR 0x000001dd
+
+#define REG_AXXX_CP_ME_RDADDR 0x000001ea
+
+#define REG_AXXX_CP_STATE_DEBUG_INDEX 0x000001ec
+
+#define REG_AXXX_CP_STATE_DEBUG_DATA 0x000001ed
+
+#define REG_AXXX_CP_INT_CNTL 0x000001f2
+
+#define REG_AXXX_CP_INT_STATUS 0x000001f3
+
+#define REG_AXXX_CP_INT_ACK 0x000001f4
+
+#define REG_AXXX_CP_ME_CNTL 0x000001f6
+
+#define REG_AXXX_CP_ME_STATUS 0x000001f7
+
+#define REG_AXXX_CP_ME_RAM_WADDR 0x000001f8
+
+#define REG_AXXX_CP_ME_RAM_RADDR 0x000001f9
+
+#define REG_AXXX_CP_ME_RAM_DATA 0x000001fa
+
+#define REG_AXXX_CP_DEBUG 0x000001fc
+#define AXXX_CP_DEBUG_PREDICATE_DISABLE 0x00800000
+#define AXXX_CP_DEBUG_PROG_END_PTR_ENABLE 0x01000000
+#define AXXX_CP_DEBUG_MIU_128BIT_WRITE_ENABLE 0x02000000
+#define AXXX_CP_DEBUG_PREFETCH_PASS_NOPS 0x04000000
+#define AXXX_CP_DEBUG_DYNAMIC_CLK_DISABLE 0x08000000
+#define AXXX_CP_DEBUG_PREFETCH_MATCH_DISABLE 0x10000000
+#define AXXX_CP_DEBUG_SIMPLE_ME_FLOW_CONTROL 0x40000000
+#define AXXX_CP_DEBUG_MIU_WRITE_PACK_DISABLE 0x80000000
+
+#define REG_AXXX_CP_CSQ_RB_STAT 0x000001fd
+#define AXXX_CP_CSQ_RB_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_RB_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_RB_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_RB_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_RB_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB1_STAT 0x000001fe
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB1_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB1_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB1_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_CSQ_IB2_STAT 0x000001ff
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__MASK 0x0000007f
+#define AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT 0
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_RPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_RPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_RPTR__MASK;
+}
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__MASK 0x007f0000
+#define AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT 16
+static inline uint32_t AXXX_CP_CSQ_IB2_STAT_WPTR(uint32_t val)
+{
+ return ((val) << AXXX_CP_CSQ_IB2_STAT_WPTR__SHIFT) & AXXX_CP_CSQ_IB2_STAT_WPTR__MASK;
+}
+
+#define REG_AXXX_CP_SCRATCH_REG0 0x00000578
+
+#define REG_AXXX_CP_SCRATCH_REG1 0x00000579
+
+#define REG_AXXX_CP_SCRATCH_REG2 0x0000057a
+
+#define REG_AXXX_CP_SCRATCH_REG3 0x0000057b
+
+#define REG_AXXX_CP_SCRATCH_REG4 0x0000057c
+
+#define REG_AXXX_CP_SCRATCH_REG5 0x0000057d
+
+#define REG_AXXX_CP_SCRATCH_REG6 0x0000057e
+
+#define REG_AXXX_CP_SCRATCH_REG7 0x0000057f
+
+#define REG_AXXX_CP_ME_CF_EVENT_SRC 0x0000060a
+
+#define REG_AXXX_CP_ME_CF_EVENT_ADDR 0x0000060b
+
+#define REG_AXXX_CP_ME_CF_EVENT_DATA 0x0000060c
+
+#define REG_AXXX_CP_ME_NRT_ADDR 0x0000060d
+
+#define REG_AXXX_CP_ME_NRT_DATA 0x0000060e
+
+
+#endif /* ADRENO_COMMON_XML */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
new file mode 100644
index 00000000000..a60584763b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "adreno_gpu.h"
+#include "msm_gem.h"
+
+struct adreno_info {
+ struct adreno_rev rev;
+ uint32_t revn;
+ const char *name;
+ const char *pm4fw, *pfpfw;
+ uint32_t gmem;
+};
+
+#define ANY_ID 0xff
+
+static const struct adreno_info gpulist[] = {
+ {
+ .rev = ADRENO_REV(3, 0, 5, ANY_ID),
+ .revn = 305,
+ .name = "A305",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_256K,
+ }, {
+ .rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
+ .revn = 320,
+ .name = "A320",
+ .pm4fw = "a300_pm4.fw",
+ .pfpfw = "a300_pfp.fw",
+ .gmem = SZ_512K,
+ }, {
+ .rev = ADRENO_REV(3, 3, 0, 0),
+ .revn = 330,
+ .name = "A330",
+ .pm4fw = "a330_pm4.fw",
+ .pfpfw = "a330_pfp.fw",
+ .gmem = SZ_1M,
+ },
+};
+
+#define RB_SIZE SZ_32K
+#define RB_BLKSIZE 16
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ switch (param) {
+ case MSM_PARAM_GPU_ID:
+ *value = adreno_gpu->info->revn;
+ return 0;
+ case MSM_PARAM_GMEM_SIZE:
+ *value = adreno_gpu->info->gmem;
+ return 0;
+ default:
+ DBG("%s: invalid param: %u", gpu->name, param);
+ return -EINVAL;
+ }
+}
+
+#define rbmemptr(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+int adreno_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ DBG("%s", gpu->name);
+
+ /* Setup REG_CP_RB_CNTL: */
+ gpu_write(gpu, REG_AXXX_CP_RB_CNTL,
+ /* size is log2(quad-words): */
+ AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
+ AXXX_CP_RB_CNTL_BLKSZ(RB_BLKSIZE));
+
+ /* Setup ringbuffer address: */
+ gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova);
+ gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr));
+
+ /* Setup scratch/timestamp: */
+ gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence));
+
+ gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1);
+
+ return 0;
+}
+
+static uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return ring->cur - ring->start;
+}
+
+uint32_t adreno_last_fence(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ return adreno_gpu->memptrs->fence;
+}
+
+void adreno_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct drm_device *dev = gpu->dev;
+ int ret;
+
+ gpu->funcs->pm_suspend(gpu);
+
+ /* reset ringbuffer: */
+ gpu->rb->cur = gpu->rb->start;
+
+ /* reset completed fence seqno, just discard anything pending: */
+ adreno_gpu->memptrs->fence = gpu->submitted_fence;
+
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ /* hmm, oh well? */
+ }
+}
+
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb;
+ unsigned i, ibs = 0;
+
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ /* ignore IB-targets */
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ /* ignore if there has not been a ctx switch: */
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /* on a320, at least, we seem to need to pad things out to an
+ * even number of qwords to avoid issue w/ CP hanging on wrap-
+ * around:
+ */
+ if (ibs % 2)
+ OUT_PKT2(ring);
+
+ OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
+ OUT_RING(ring, submit->fence);
+
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Flush HLSQ lazy updates to make sure there is nothing
+ * pending for indirect loads after the timestamp has
+ * passed:
+ */
+ OUT_PKT3(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, HLSQ_FLUSH);
+
+ OUT_PKT3(ring, CP_WAIT_FOR_IDLE, 1);
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_PKT3(ring, CP_EVENT_WRITE, 3);
+ OUT_RING(ring, CACHE_FLUSH_TS);
+ OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, submit->fence);
+
+ /* we could maybe be clever and only CP_COND_EXEC the interrupt: */
+ OUT_PKT3(ring, CP_INTERRUPT, 1);
+ OUT_RING(ring, 0x80000000);
+
+#if 0
+ if (adreno_is_a3xx(adreno_gpu)) {
+ /* Dummy set-constant to trigger context rollover */
+ OUT_PKT3(ring, CP_SET_CONSTANT, 2);
+ OUT_RING(ring, CP_REG(REG_A3XX_HLSQ_CL_KERNEL_GROUP_X_REG));
+ OUT_RING(ring, 0x00000000);
+ }
+#endif
+
+ gpu->funcs->flush(gpu);
+
+ return 0;
+}
+
+void adreno_flush(struct msm_gpu *gpu)
+{
+ uint32_t wptr = get_wptr(gpu->rb);
+
+ /* ensure writes to ringbuffer have hit system memory: */
+ mb();
+
+ gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr);
+}
+
+void adreno_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t rptr, wptr = get_wptr(gpu->rb);
+ unsigned long t;
+
+ t = jiffies + ADRENO_IDLE_TIMEOUT;
+
+ /* then wait for CP to drain ringbuffer: */
+ do {
+ rptr = adreno_gpu->memptrs->rptr;
+ if (rptr == wptr)
+ return;
+ } while(time_before(jiffies, t));
+
+ DRM_ERROR("timeout waiting for %s to drain ringbuffer!\n", gpu->name);
+
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+}
+
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ adreno_gpu->info->revn, adreno_gpu->rev.core,
+ adreno_gpu->rev.major, adreno_gpu->rev.minor,
+ adreno_gpu->rev.patchid);
+
+ seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
+ gpu->submitted_fence);
+ seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
+ seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
+ seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+}
+#endif
+
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ uint32_t freedwords;
+ do {
+ uint32_t size = gpu->rb->size / 4;
+ uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t rptr = adreno_gpu->memptrs->rptr;
+ freedwords = (rptr + (size - 1) - wptr) % size;
+ } while(freedwords < ndwords);
+}
+
+static const char *iommu_ports[] = {
+ "gfx3d_user", "gfx3d_priv",
+ "gfx3d1_user", "gfx3d1_priv",
+};
+
+static inline bool _rev_match(uint8_t entry, uint8_t id)
+{
+ return (entry == ANY_ID) || (entry == id);
+}
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev)
+{
+ int i, ret;
+
+ /* identify gpu: */
+ for (i = 0; i < ARRAY_SIZE(gpulist); i++) {
+ const struct adreno_info *info = &gpulist[i];
+ if (_rev_match(info->rev.core, rev.core) &&
+ _rev_match(info->rev.major, rev.major) &&
+ _rev_match(info->rev.minor, rev.minor) &&
+ _rev_match(info->rev.patchid, rev.patchid)) {
+ gpu->info = info;
+ gpu->revn = info->revn;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(gpulist)) {
+ dev_err(drm->dev, "Unknown GPU revision: %u.%u.%u.%u\n",
+ rev.core, rev.major, rev.minor, rev.patchid);
+ return -ENXIO;
+ }
+
+ DBG("Found GPU: %s (%u.%u.%u.%u)", gpu->info->name,
+ rev.core, rev.major, rev.minor, rev.patchid);
+
+ gpu->funcs = funcs;
+ gpu->rev = rev;
+
+ ret = request_firmware(&gpu->pm4, gpu->info->pm4fw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
+ gpu->info->pm4fw, ret);
+ return ret;
+ }
+
+ ret = request_firmware(&gpu->pfp, gpu->info->pfpfw, drm->dev);
+ if (ret) {
+ dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
+ gpu->info->pfpfw, ret);
+ return ret;
+ }
+
+ ret = msm_gpu_init(drm, pdev, &gpu->base, &funcs->base,
+ gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
+ RB_SIZE);
+ if (ret)
+ return ret;
+
+ ret = msm_iommu_attach(drm, gpu->base.iommu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ if (ret)
+ return ret;
+
+ gpu->memptrs_bo = msm_gem_new(drm, sizeof(*gpu->memptrs),
+ MSM_BO_UNCACHED);
+ if (IS_ERR(gpu->memptrs_bo)) {
+ ret = PTR_ERR(gpu->memptrs_bo);
+ gpu->memptrs_bo = NULL;
+ dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
+ return ret;
+ }
+
+ gpu->memptrs = msm_gem_vaddr_locked(gpu->memptrs_bo);
+ if (!gpu->memptrs) {
+ dev_err(drm->dev, "could not vmap memptrs\n");
+ return -ENOMEM;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->memptrs_bo, gpu->base.id,
+ &gpu->memptrs_iova);
+ if (ret) {
+ dev_err(drm->dev, "could not map memptrs: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+{
+ if (gpu->memptrs_bo) {
+ if (gpu->memptrs_iova)
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ drm_gem_object_unreference(gpu->memptrs_bo);
+ }
+ if (gpu->pm4)
+ release_firmware(gpu->pm4);
+ if (gpu->pfp)
+ release_firmware(gpu->pfp);
+ msm_gpu_cleanup(&gpu->base);
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
new file mode 100644
index 00000000000..f73abfba7c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ADRENO_GPU_H__
+#define __ADRENO_GPU_H__
+
+#include <linux/firmware.h>
+
+#include "msm_gpu.h"
+
+#include "adreno_common.xml.h"
+#include "adreno_pm4.xml.h"
+
+struct adreno_rev {
+ uint8_t core;
+ uint8_t major;
+ uint8_t minor;
+ uint8_t patchid;
+};
+
+#define ADRENO_REV(core, major, minor, patchid) \
+ ((struct adreno_rev){ core, major, minor, patchid })
+
+struct adreno_gpu_funcs {
+ struct msm_gpu_funcs base;
+};
+
+struct adreno_info;
+
+struct adreno_rbmemptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t wptr;
+ volatile uint32_t fence;
+};
+
+struct adreno_gpu {
+ struct msm_gpu base;
+ struct adreno_rev rev;
+ const struct adreno_info *info;
+ uint32_t revn; /* numeric revision name */
+ const struct adreno_gpu_funcs *funcs;
+
+ /* firmware: */
+ const struct firmware *pm4, *pfp;
+
+ /* ringbuffer rptr/wptr: */
+ // TODO should this be in msm_ringbuffer? I think it would be
+ // different for z180..
+ struct adreno_rbmemptrs *memptrs;
+ struct drm_gem_object *memptrs_bo;
+ uint32_t memptrs_iova;
+};
+#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct adreno_platform_config {
+ struct adreno_rev rev;
+ uint32_t fast_rate, slow_rate, bus_freq;
+};
+
+#define ADRENO_IDLE_TIMEOUT (20 * 1000)
+
+static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
+{
+ return (gpu->revn >= 300) && (gpu->revn < 400);
+}
+
+static inline bool adreno_is_a305(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 305;
+}
+
+static inline bool adreno_is_a320(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 320;
+}
+
+static inline bool adreno_is_a330(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 330;
+}
+
+int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+int adreno_hw_init(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu);
+void adreno_recover(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+void adreno_flush(struct msm_gpu *gpu);
+void adreno_idle(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+
+int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct adreno_rev rev);
+void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+
+
+/* ringbuffer helpers (the parts that are adreno specific) */
+
+static inline void
+OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
+}
+
+/* no-op packet: */
+static inline void
+OUT_PKT2(struct msm_ringbuffer *ring)
+{
+ adreno_wait_ring(ring->gpu, 1);
+ OUT_RING(ring, CP_TYPE2_PKT);
+}
+
+static inline void
+OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring->gpu, cnt+1);
+ OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
+}
+
+
+#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
new file mode 100644
index 00000000000..94c13f418e7
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -0,0 +1,254 @@
+#ifndef ADRENO_PM4_XML
+#define ADRENO_PM4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 327 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/a2xx/a2xx.xml ( 30005 bytes, from 2013-07-19 21:30:48)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_common.xml ( 8983 bytes, from 2013-07-24 01:38:36)
+- /home/robclark/src/freedreno/envytools/rnndb/adreno_pm4.xml ( 9712 bytes, from 2013-05-26 15:22:37)
+- /home/robclark/src/freedreno/envytools/rnndb/a3xx/a3xx.xml ( 51415 bytes, from 2013-08-03 14:26:05)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum vgt_event_type {
+ VS_DEALLOC = 0,
+ PS_DEALLOC = 1,
+ VS_DONE_TS = 2,
+ PS_DONE_TS = 3,
+ CACHE_FLUSH_TS = 4,
+ CONTEXT_DONE = 5,
+ CACHE_FLUSH = 6,
+ HLSQ_FLUSH = 7,
+ VIZQUERY_START = 7,
+ VIZQUERY_END = 8,
+ SC_WAIT_WC = 9,
+ RST_PIX_CNT = 13,
+ RST_VTX_CNT = 14,
+ TILE_FLUSH = 15,
+ CACHE_FLUSH_AND_INV_TS_EVENT = 20,
+ ZPASS_DONE = 21,
+ CACHE_FLUSH_AND_INV_EVENT = 22,
+ PERFCOUNTER_START = 23,
+ PERFCOUNTER_STOP = 24,
+ VS_FETCH_DONE = 27,
+ FACENESS_FLUSH = 28,
+};
+
+enum pc_di_primtype {
+ DI_PT_NONE = 0,
+ DI_PT_POINTLIST = 1,
+ DI_PT_LINELIST = 2,
+ DI_PT_LINESTRIP = 3,
+ DI_PT_TRILIST = 4,
+ DI_PT_TRIFAN = 5,
+ DI_PT_TRISTRIP = 6,
+ DI_PT_RECTLIST = 8,
+ DI_PT_QUADLIST = 13,
+ DI_PT_QUADSTRIP = 14,
+ DI_PT_POLYGON = 15,
+ DI_PT_2D_COPY_RECT_LIST_V0 = 16,
+ DI_PT_2D_COPY_RECT_LIST_V1 = 17,
+ DI_PT_2D_COPY_RECT_LIST_V2 = 18,
+ DI_PT_2D_COPY_RECT_LIST_V3 = 19,
+ DI_PT_2D_FILL_RECT_LIST = 20,
+ DI_PT_2D_LINE_STRIP = 21,
+ DI_PT_2D_TRI_STRIP = 22,
+};
+
+enum pc_di_src_sel {
+ DI_SRC_SEL_DMA = 0,
+ DI_SRC_SEL_IMMEDIATE = 1,
+ DI_SRC_SEL_AUTO_INDEX = 2,
+ DI_SRC_SEL_RESERVED = 3,
+};
+
+enum pc_di_index_size {
+ INDEX_SIZE_IGN = 0,
+ INDEX_SIZE_16_BIT = 0,
+ INDEX_SIZE_32_BIT = 1,
+ INDEX_SIZE_8_BIT = 2,
+ INDEX_SIZE_INVALID = 0,
+};
+
+enum pc_di_vis_cull_mode {
+ IGNORE_VISIBILITY = 0,
+};
+
+enum adreno_pm4_packet_type {
+ CP_TYPE0_PKT = 0,
+ CP_TYPE1_PKT = 0x40000000,
+ CP_TYPE2_PKT = 0x80000000,
+ CP_TYPE3_PKT = 0xc0000000,
+};
+
+enum adreno_pm4_type3_packets {
+ CP_ME_INIT = 72,
+ CP_NOP = 16,
+ CP_INDIRECT_BUFFER = 63,
+ CP_INDIRECT_BUFFER_PFD = 55,
+ CP_WAIT_FOR_IDLE = 38,
+ CP_WAIT_REG_MEM = 60,
+ CP_WAIT_REG_EQ = 82,
+ CP_WAT_REG_GTE = 83,
+ CP_WAIT_UNTIL_READ = 92,
+ CP_WAIT_IB_PFD_COMPLETE = 93,
+ CP_REG_RMW = 33,
+ CP_SET_BIN_DATA = 47,
+ CP_REG_TO_MEM = 62,
+ CP_MEM_WRITE = 61,
+ CP_MEM_WRITE_CNTR = 79,
+ CP_COND_EXEC = 68,
+ CP_COND_WRITE = 69,
+ CP_EVENT_WRITE = 70,
+ CP_EVENT_WRITE_SHD = 88,
+ CP_EVENT_WRITE_CFL = 89,
+ CP_EVENT_WRITE_ZPD = 91,
+ CP_RUN_OPENCL = 49,
+ CP_DRAW_INDX = 34,
+ CP_DRAW_INDX_2 = 54,
+ CP_DRAW_INDX_BIN = 52,
+ CP_DRAW_INDX_2_BIN = 53,
+ CP_VIZ_QUERY = 35,
+ CP_SET_STATE = 37,
+ CP_SET_CONSTANT = 45,
+ CP_IM_LOAD = 39,
+ CP_IM_LOAD_IMMEDIATE = 43,
+ CP_LOAD_CONSTANT_CONTEXT = 46,
+ CP_INVALIDATE_STATE = 59,
+ CP_SET_SHADER_BASES = 74,
+ CP_SET_BIN_MASK = 80,
+ CP_SET_BIN_SELECT = 81,
+ CP_CONTEXT_UPDATE = 94,
+ CP_INTERRUPT = 64,
+ CP_IM_STORE = 44,
+ CP_SET_BIN_BASE_OFFSET = 75,
+ CP_SET_DRAW_INIT_FLAGS = 75,
+ CP_SET_PROTECTED_MODE = 95,
+ CP_LOAD_STATE = 48,
+ CP_COND_INDIRECT_BUFFER_PFE = 58,
+ CP_COND_INDIRECT_BUFFER_PFD = 50,
+ CP_INDIRECT_BUFFER_PFE = 63,
+ CP_SET_BIN = 76,
+};
+
+enum adreno_state_block {
+ SB_VERT_TEX = 0,
+ SB_VERT_MIPADDR = 1,
+ SB_FRAG_TEX = 2,
+ SB_FRAG_MIPADDR = 3,
+ SB_VERT_SHADER = 4,
+ SB_FRAG_SHADER = 6,
+};
+
+enum adreno_state_type {
+ ST_SHADER = 0,
+ ST_CONSTANTS = 1,
+};
+
+enum adreno_state_src {
+ SS_DIRECT = 0,
+ SS_INDIRECT = 4,
+};
+
+#define REG_CP_LOAD_STATE_0 0x00000000
+#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_DST_OFF__SHIFT) & CP_LOAD_STATE_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_SRC__MASK 0x00070000
+#define CP_LOAD_STATE_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE_0_STATE_SRC(enum adreno_state_src val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_SRC__SHIFT) & CP_LOAD_STATE_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE_0_STATE_BLOCK__MASK 0x00380000
+#define CP_LOAD_STATE_0_STATE_BLOCK__SHIFT 19
+static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
+{
+ return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE_1 0x00000001
+#define CP_LOAD_STATE_1_STATE_TYPE__MASK 0x00000003
+#define CP_LOAD_STATE_1_STATE_TYPE__SHIFT 0
+static inline uint32_t CP_LOAD_STATE_1_STATE_TYPE(enum adreno_state_type val)
+{
+ return ((val) << CP_LOAD_STATE_1_STATE_TYPE__SHIFT) & CP_LOAD_STATE_1_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_SET_BIN_0 0x00000000
+
+#define REG_CP_SET_BIN_1 0x00000001
+#define CP_SET_BIN_1_X1__MASK 0x0000ffff
+#define CP_SET_BIN_1_X1__SHIFT 0
+static inline uint32_t CP_SET_BIN_1_X1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_X1__SHIFT) & CP_SET_BIN_1_X1__MASK;
+}
+#define CP_SET_BIN_1_Y1__MASK 0xffff0000
+#define CP_SET_BIN_1_Y1__SHIFT 16
+static inline uint32_t CP_SET_BIN_1_Y1(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_1_Y1__SHIFT) & CP_SET_BIN_1_Y1__MASK;
+}
+
+#define REG_CP_SET_BIN_2 0x00000002
+#define CP_SET_BIN_2_X2__MASK 0x0000ffff
+#define CP_SET_BIN_2_X2__SHIFT 0
+static inline uint32_t CP_SET_BIN_2_X2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_X2__SHIFT) & CP_SET_BIN_2_X2__MASK;
+}
+#define CP_SET_BIN_2_Y2__MASK 0xffff0000
+#define CP_SET_BIN_2_Y2__SHIFT 16
+static inline uint32_t CP_SET_BIN_2_Y2(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_2_Y2__SHIFT) & CP_SET_BIN_2_Y2__MASK;
+}
+
+
+#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
new file mode 100644
index 00000000000..6f8396be431
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -0,0 +1,502 @@
+#ifndef DSI_XML
+#define DSI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum dsi_traffic_mode {
+ NON_BURST_SYNCH_PULSE = 0,
+ NON_BURST_SYNCH_EVENT = 1,
+ BURST_MODE = 2,
+};
+
+enum dsi_dst_format {
+ DST_FORMAT_RGB565 = 0,
+ DST_FORMAT_RGB666 = 1,
+ DST_FORMAT_RGB666_LOOSE = 2,
+ DST_FORMAT_RGB888 = 3,
+};
+
+enum dsi_rgb_swap {
+ SWAP_RGB = 0,
+ SWAP_RBG = 1,
+ SWAP_BGR = 2,
+ SWAP_BRG = 3,
+ SWAP_GRB = 4,
+ SWAP_GBR = 5,
+};
+
+enum dsi_cmd_trigger {
+ TRIGGER_NONE = 0,
+ TRIGGER_TE = 2,
+ TRIGGER_SW = 4,
+ TRIGGER_SW_SEOF = 5,
+ TRIGGER_SW_TE = 6,
+};
+
+#define DSI_IRQ_CMD_DMA_DONE 0x00000001
+#define DSI_IRQ_MASK_CMD_DMA_DONE 0x00000002
+#define DSI_IRQ_CMD_MDP_DONE 0x00000100
+#define DSI_IRQ_MASK_CMD_MDP_DONE 0x00000200
+#define DSI_IRQ_VIDEO_DONE 0x00010000
+#define DSI_IRQ_MASK_VIDEO_DONE 0x00020000
+#define DSI_IRQ_ERROR 0x01000000
+#define DSI_IRQ_MASK_ERROR 0x02000000
+#define REG_DSI_CTRL 0x00000000
+#define DSI_CTRL_ENABLE 0x00000001
+#define DSI_CTRL_VID_MODE_EN 0x00000002
+#define DSI_CTRL_CMD_MODE_EN 0x00000004
+#define DSI_CTRL_LANE0 0x00000010
+#define DSI_CTRL_LANE1 0x00000020
+#define DSI_CTRL_LANE2 0x00000040
+#define DSI_CTRL_LANE3 0x00000080
+#define DSI_CTRL_CLK_EN 0x00000100
+#define DSI_CTRL_ECC_CHECK 0x00100000
+#define DSI_CTRL_CRC_CHECK 0x01000000
+
+#define REG_DSI_STATUS0 0x00000004
+#define DSI_STATUS0_CMD_MODE_DMA_BUSY 0x00000002
+#define DSI_STATUS0_VIDEO_MODE_ENGINE_BUSY 0x00000008
+#define DSI_STATUS0_DSI_BUSY 0x00000010
+
+#define REG_DSI_FIFO_STATUS 0x00000008
+
+#define REG_DSI_VID_CFG0 0x0000000c
+#define DSI_VID_CFG0_VIRT_CHANNEL__MASK 0x00000003
+#define DSI_VID_CFG0_VIRT_CHANNEL__SHIFT 0
+static inline uint32_t DSI_VID_CFG0_VIRT_CHANNEL(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG0_VIRT_CHANNEL__SHIFT) & DSI_VID_CFG0_VIRT_CHANNEL__MASK;
+}
+#define DSI_VID_CFG0_DST_FORMAT__MASK 0x00000030
+#define DSI_VID_CFG0_DST_FORMAT__SHIFT 4
+static inline uint32_t DSI_VID_CFG0_DST_FORMAT(enum dsi_dst_format val)
+{
+ return ((val) << DSI_VID_CFG0_DST_FORMAT__SHIFT) & DSI_VID_CFG0_DST_FORMAT__MASK;
+}
+#define DSI_VID_CFG0_TRAFFIC_MODE__MASK 0x00000300
+#define DSI_VID_CFG0_TRAFFIC_MODE__SHIFT 8
+static inline uint32_t DSI_VID_CFG0_TRAFFIC_MODE(enum dsi_traffic_mode val)
+{
+ return ((val) << DSI_VID_CFG0_TRAFFIC_MODE__SHIFT) & DSI_VID_CFG0_TRAFFIC_MODE__MASK;
+}
+#define DSI_VID_CFG0_BLLP_POWER_STOP 0x00001000
+#define DSI_VID_CFG0_EOF_BLLP_POWER_STOP 0x00008000
+#define DSI_VID_CFG0_HSA_POWER_STOP 0x00010000
+#define DSI_VID_CFG0_HBP_POWER_STOP 0x00100000
+#define DSI_VID_CFG0_HFP_POWER_STOP 0x01000000
+#define DSI_VID_CFG0_PULSE_MODE_HSA_HE 0x10000000
+
+#define REG_DSI_VID_CFG1 0x0000001c
+#define DSI_VID_CFG1_R_SEL 0x00000010
+#define DSI_VID_CFG1_G_SEL 0x00000100
+#define DSI_VID_CFG1_B_SEL 0x00001000
+#define DSI_VID_CFG1_RGB_SWAP__MASK 0x00070000
+#define DSI_VID_CFG1_RGB_SWAP__SHIFT 16
+static inline uint32_t DSI_VID_CFG1_RGB_SWAP(enum dsi_rgb_swap val)
+{
+ return ((val) << DSI_VID_CFG1_RGB_SWAP__SHIFT) & DSI_VID_CFG1_RGB_SWAP__MASK;
+}
+#define DSI_VID_CFG1_INTERLEAVE_MAX__MASK 0x00f00000
+#define DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT 20
+static inline uint32_t DSI_VID_CFG1_INTERLEAVE_MAX(uint32_t val)
+{
+ return ((val) << DSI_VID_CFG1_INTERLEAVE_MAX__SHIFT) & DSI_VID_CFG1_INTERLEAVE_MAX__MASK;
+}
+
+#define REG_DSI_ACTIVE_H 0x00000020
+#define DSI_ACTIVE_H_START__MASK 0x00000fff
+#define DSI_ACTIVE_H_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_H_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_START__SHIFT) & DSI_ACTIVE_H_START__MASK;
+}
+#define DSI_ACTIVE_H_END__MASK 0x0fff0000
+#define DSI_ACTIVE_H_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_H_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_H_END__SHIFT) & DSI_ACTIVE_H_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_V 0x00000024
+#define DSI_ACTIVE_V_START__MASK 0x00000fff
+#define DSI_ACTIVE_V_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_V_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_START__SHIFT) & DSI_ACTIVE_V_START__MASK;
+}
+#define DSI_ACTIVE_V_END__MASK 0x0fff0000
+#define DSI_ACTIVE_V_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_V_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_V_END__SHIFT) & DSI_ACTIVE_V_END__MASK;
+}
+
+#define REG_DSI_TOTAL 0x00000028
+#define DSI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define DSI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t DSI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_H_TOTAL__SHIFT) & DSI_TOTAL_H_TOTAL__MASK;
+}
+#define DSI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define DSI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t DSI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << DSI_TOTAL_V_TOTAL__SHIFT) & DSI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_DSI_ACTIVE_HSYNC 0x0000002c
+#define DSI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_START__SHIFT) & DSI_ACTIVE_HSYNC_START__MASK;
+}
+#define DSI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_HSYNC_END__SHIFT) & DSI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_DSI_ACTIVE_VSYNC 0x00000034
+#define DSI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define DSI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t DSI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_START__SHIFT) & DSI_ACTIVE_VSYNC_START__MASK;
+}
+#define DSI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define DSI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t DSI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << DSI_ACTIVE_VSYNC_END__SHIFT) & DSI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_DSI_CMD_DMA_CTRL 0x00000038
+#define DSI_CMD_DMA_CTRL_FROM_FRAME_BUFFER 0x10000000
+#define DSI_CMD_DMA_CTRL_LOW_POWER 0x04000000
+
+#define REG_DSI_CMD_CFG0 0x0000003c
+
+#define REG_DSI_CMD_CFG1 0x00000040
+
+#define REG_DSI_DMA_BASE 0x00000044
+
+#define REG_DSI_DMA_LEN 0x00000048
+
+#define REG_DSI_ACK_ERR_STATUS 0x00000064
+
+static inline uint32_t REG_DSI_RDBK(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+static inline uint32_t REG_DSI_RDBK_DATA(uint32_t i0) { return 0x00000068 + 0x4*i0; }
+
+#define REG_DSI_TRIG_CTRL 0x00000080
+#define DSI_TRIG_CTRL_DMA_TRIGGER__MASK 0x0000000f
+#define DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT 0
+static inline uint32_t DSI_TRIG_CTRL_DMA_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_DMA_TRIGGER__SHIFT) & DSI_TRIG_CTRL_DMA_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_MDP_TRIGGER__MASK 0x000000f0
+#define DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT 4
+static inline uint32_t DSI_TRIG_CTRL_MDP_TRIGGER(enum dsi_cmd_trigger val)
+{
+ return ((val) << DSI_TRIG_CTRL_MDP_TRIGGER__SHIFT) & DSI_TRIG_CTRL_MDP_TRIGGER__MASK;
+}
+#define DSI_TRIG_CTRL_STREAM 0x00000100
+#define DSI_TRIG_CTRL_TE 0x80000000
+
+#define REG_DSI_TRIG_DMA 0x0000008c
+
+#define REG_DSI_DLN0_PHY_ERR 0x000000b0
+
+#define REG_DSI_TIMEOUT_STATUS 0x000000bc
+
+#define REG_DSI_CLKOUT_TIMING_CTRL 0x000000c0
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK 0x0000003f
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT 0
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_PRE__MASK;
+}
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK 0x00003f00
+#define DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT 8
+static inline uint32_t DSI_CLKOUT_TIMING_CTRL_T_CLK_POST(uint32_t val)
+{
+ return ((val) << DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__SHIFT) & DSI_CLKOUT_TIMING_CTRL_T_CLK_POST__MASK;
+}
+
+#define REG_DSI_EOT_PACKET_CTRL 0x000000c8
+#define DSI_EOT_PACKET_CTRL_TX_EOT_APPEND 0x00000001
+#define DSI_EOT_PACKET_CTRL_RX_EOT_IGNORE 0x00000010
+
+#define REG_DSI_LANE_SWAP_CTRL 0x000000ac
+
+#define REG_DSI_ERR_INT_MASK0 0x00000108
+
+#define REG_DSI_INTR_CTRL 0x0000010c
+
+#define REG_DSI_RESET 0x00000114
+
+#define REG_DSI_CLK_CTRL 0x00000118
+
+#define REG_DSI_PHY_RESET 0x00000128
+
+#define REG_DSI_PHY_PLL_CTRL_0 0x00000200
+#define DSI_PHY_PLL_CTRL_0_ENABLE 0x00000001
+
+#define REG_DSI_PHY_PLL_CTRL_1 0x00000204
+
+#define REG_DSI_PHY_PLL_CTRL_2 0x00000208
+
+#define REG_DSI_PHY_PLL_CTRL_3 0x0000020c
+
+#define REG_DSI_PHY_PLL_CTRL_4 0x00000210
+
+#define REG_DSI_PHY_PLL_CTRL_5 0x00000214
+
+#define REG_DSI_PHY_PLL_CTRL_6 0x00000218
+
+#define REG_DSI_PHY_PLL_CTRL_7 0x0000021c
+
+#define REG_DSI_PHY_PLL_CTRL_8 0x00000220
+
+#define REG_DSI_PHY_PLL_CTRL_9 0x00000224
+
+#define REG_DSI_PHY_PLL_CTRL_10 0x00000228
+
+#define REG_DSI_PHY_PLL_CTRL_11 0x0000022c
+
+#define REG_DSI_PHY_PLL_CTRL_12 0x00000230
+
+#define REG_DSI_PHY_PLL_CTRL_13 0x00000234
+
+#define REG_DSI_PHY_PLL_CTRL_14 0x00000238
+
+#define REG_DSI_PHY_PLL_CTRL_15 0x0000023c
+
+#define REG_DSI_PHY_PLL_CTRL_16 0x00000240
+
+#define REG_DSI_PHY_PLL_CTRL_17 0x00000244
+
+#define REG_DSI_PHY_PLL_CTRL_18 0x00000248
+
+#define REG_DSI_PHY_PLL_CTRL_19 0x0000024c
+
+#define REG_DSI_PHY_PLL_CTRL_20 0x00000250
+
+#define REG_DSI_PHY_PLL_STATUS 0x00000280
+#define DSI_PHY_PLL_STATUS_PLL_BUSY 0x00000001
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_1 0x00000258
+
+#define REG_DSI_8x60_PHY_TPA_CTRL_2 0x0000025c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_0 0x00000260
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_1 0x00000264
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_2 0x00000268
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_3 0x0000026c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_4 0x00000270
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_5 0x00000274
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_6 0x00000278
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_7 0x0000027c
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_8 0x00000280
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_9 0x00000284
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_10 0x00000288
+
+#define REG_DSI_8x60_PHY_TIMING_CTRL_11 0x0000028c
+
+#define REG_DSI_8x60_PHY_CTRL_0 0x00000290
+
+#define REG_DSI_8x60_PHY_CTRL_1 0x00000294
+
+#define REG_DSI_8x60_PHY_CTRL_2 0x00000298
+
+#define REG_DSI_8x60_PHY_CTRL_3 0x0000029c
+
+#define REG_DSI_8x60_PHY_STRENGTH_0 0x000002a0
+
+#define REG_DSI_8x60_PHY_STRENGTH_1 0x000002a4
+
+#define REG_DSI_8x60_PHY_STRENGTH_2 0x000002a8
+
+#define REG_DSI_8x60_PHY_STRENGTH_3 0x000002ac
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_0 0x000002cc
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_1 0x000002d0
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_2 0x000002d4
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_3 0x000002d8
+
+#define REG_DSI_8x60_PHY_REGULATOR_CTRL_4 0x000002dc
+
+#define REG_DSI_8x60_PHY_CAL_HW_TRIGGER 0x000000f0
+
+#define REG_DSI_8x60_PHY_CAL_CTRL 0x000000f4
+
+#define REG_DSI_8x60_PHY_CAL_STATUS 0x000000fc
+#define DSI_8x60_PHY_CAL_STATUS_CAL_BUSY 0x10000000
+
+static inline uint32_t REG_DSI_8960_LN(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_0(uint32_t i0) { return 0x00000300 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_1(uint32_t i0) { return 0x00000304 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_CFG_2(uint32_t i0) { return 0x00000308 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_DATAPATH(uint32_t i0) { return 0x0000030c + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_0(uint32_t i0) { return 0x00000314 + 0x40*i0; }
+
+static inline uint32_t REG_DSI_8960_LN_TEST_STR_1(uint32_t i0) { return 0x00000318 + 0x40*i0; }
+
+#define REG_DSI_8960_PHY_LNCK_CFG_0 0x00000400
+
+#define REG_DSI_8960_PHY_LNCK_CFG_1 0x00000404
+
+#define REG_DSI_8960_PHY_LNCK_CFG_2 0x00000408
+
+#define REG_DSI_8960_PHY_LNCK_TEST_DATAPATH 0x0000040c
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR0 0x00000414
+
+#define REG_DSI_8960_PHY_LNCK_TEST_STR1 0x00000418
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_0 0x00000440
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_1 0x00000444
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_2 0x00000448
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_3 0x0000044c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_4 0x00000450
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_5 0x00000454
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_6 0x00000458
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_7 0x0000045c
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_8 0x00000460
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_9 0x00000464
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_10 0x00000468
+
+#define REG_DSI_8960_PHY_TIMING_CTRL_11 0x0000046c
+
+#define REG_DSI_8960_PHY_CTRL_0 0x00000470
+
+#define REG_DSI_8960_PHY_CTRL_1 0x00000474
+
+#define REG_DSI_8960_PHY_CTRL_2 0x00000478
+
+#define REG_DSI_8960_PHY_CTRL_3 0x0000047c
+
+#define REG_DSI_8960_PHY_STRENGTH_0 0x00000480
+
+#define REG_DSI_8960_PHY_STRENGTH_1 0x00000484
+
+#define REG_DSI_8960_PHY_STRENGTH_2 0x00000488
+
+#define REG_DSI_8960_PHY_BIST_CTRL_0 0x0000048c
+
+#define REG_DSI_8960_PHY_BIST_CTRL_1 0x00000490
+
+#define REG_DSI_8960_PHY_BIST_CTRL_2 0x00000494
+
+#define REG_DSI_8960_PHY_BIST_CTRL_3 0x00000498
+
+#define REG_DSI_8960_PHY_BIST_CTRL_4 0x0000049c
+
+#define REG_DSI_8960_PHY_LDO_CTRL 0x000004b0
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_0 0x00000500
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_1 0x00000504
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_2 0x00000508
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_3 0x0000050c
+
+#define REG_DSI_8960_PHY_REGULATOR_CTRL_4 0x00000510
+
+#define REG_DSI_8960_PHY_REGULATOR_CAL_PWR_CFG 0x00000518
+
+#define REG_DSI_8960_PHY_CAL_HW_TRIGGER 0x00000528
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_0 0x0000052c
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_1 0x00000530
+
+#define REG_DSI_8960_PHY_CAL_SW_CFG_2 0x00000534
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_0 0x00000538
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_1 0x0000053c
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_2 0x00000540
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_3 0x00000544
+
+#define REG_DSI_8960_PHY_CAL_HW_CFG_4 0x00000548
+
+#define REG_DSI_8960_PHY_CAL_STATUS 0x00000550
+#define DSI_8960_PHY_CAL_STATUS_CAL_BUSY 0x00000010
+
+
+#endif /* DSI_XML */
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
new file mode 100644
index 00000000000..aefc1b8feae
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -0,0 +1,114 @@
+#ifndef MMSS_CC_XML
+#define MMSS_CC_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mmss_cc_clk {
+ CLK = 0,
+ PCLK = 1,
+};
+
+#define REG_MMSS_CC_AHB 0x00000008
+
+static inline uint32_t __offset_CLK(enum mmss_cc_clk idx)
+{
+ switch (idx) {
+ case CLK: return 0x0000004c;
+ case PCLK: return 0x00000130;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MMSS_CC_CLK(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+
+static inline uint32_t REG_MMSS_CC_CLK_CC(enum mmss_cc_clk i0) { return 0x00000000 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_CC_CLK_EN 0x00000001
+#define MMSS_CC_CLK_CC_ROOT_EN 0x00000004
+#define MMSS_CC_CLK_CC_MND_EN 0x00000020
+#define MMSS_CC_CLK_CC_MND_MODE__MASK 0x000000c0
+#define MMSS_CC_CLK_CC_MND_MODE__SHIFT 6
+static inline uint32_t MMSS_CC_CLK_CC_MND_MODE(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_MND_MODE__SHIFT) & MMSS_CC_CLK_CC_MND_MODE__MASK;
+}
+#define MMSS_CC_CLK_CC_PMXO_SEL__MASK 0x00000300
+#define MMSS_CC_CLK_CC_PMXO_SEL__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_CC_PMXO_SEL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_CC_PMXO_SEL__SHIFT) & MMSS_CC_CLK_CC_PMXO_SEL__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_MD(enum mmss_cc_clk i0) { return 0x00000004 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_MD_D__MASK 0x000000ff
+#define MMSS_CC_CLK_MD_D__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_MD_D(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_D__SHIFT) & MMSS_CC_CLK_MD_D__MASK;
+}
+#define MMSS_CC_CLK_MD_M__MASK 0x0000ff00
+#define MMSS_CC_CLK_MD_M__SHIFT 8
+static inline uint32_t MMSS_CC_CLK_MD_M(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_MD_M__SHIFT) & MMSS_CC_CLK_MD_M__MASK;
+}
+
+static inline uint32_t REG_MMSS_CC_CLK_NS(enum mmss_cc_clk i0) { return 0x00000008 + __offset_CLK(i0); }
+#define MMSS_CC_CLK_NS_SRC__MASK 0x0000000f
+#define MMSS_CC_CLK_NS_SRC__SHIFT 0
+static inline uint32_t MMSS_CC_CLK_NS_SRC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_SRC__SHIFT) & MMSS_CC_CLK_NS_SRC__MASK;
+}
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK 0x00fff000
+#define MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT 12
+static inline uint32_t MMSS_CC_CLK_NS_PRE_DIV_FUNC(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_PRE_DIV_FUNC__SHIFT) & MMSS_CC_CLK_NS_PRE_DIV_FUNC__MASK;
+}
+#define MMSS_CC_CLK_NS_VAL__MASK 0xff000000
+#define MMSS_CC_CLK_NS_VAL__SHIFT 24
+static inline uint32_t MMSS_CC_CLK_NS_VAL(uint32_t val)
+{
+ return ((val) << MMSS_CC_CLK_NS_VAL__SHIFT) & MMSS_CC_CLK_NS_VAL__MASK;
+}
+
+
+#endif /* MMSS_CC_XML */
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
new file mode 100644
index 00000000000..a225e8170b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -0,0 +1,48 @@
+#ifndef SFPB_XML
+#define SFPB_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_SFPB_CFG 0x00000058
+
+
+#endif /* SFPB_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
new file mode 100644
index 00000000000..50d11df35b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+static struct platform_device *hdmi_pdev;
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl = HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ DBG("HDMI Core: %s, HDMI_CTRL=0x%08x",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+static irqreturn_t hdmi_irq(int irq, void *dev_id)
+{
+ struct hdmi *hdmi = dev_id;
+
+ /* Process HPD: */
+ hdmi_connector_irq(hdmi->connector);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+void hdmi_destroy(struct kref *kref)
+{
+ struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (phy)
+ phy->funcs->destroy(phy);
+
+ if (hdmi->i2c)
+ hdmi_i2c_destroy(hdmi->i2c);
+
+ put_device(&hdmi->pdev->dev);
+}
+
+/* initialize connector */
+int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
+{
+ struct hdmi *hdmi = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = hdmi_pdev;
+ struct hdmi_platform_config *config;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "no hdmi device\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ config = pdev->dev.platform_data;
+
+ hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
+ if (!hdmi) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ kref_init(&hdmi->refcount);
+
+ get_device(&pdev->dev);
+
+ hdmi->dev = dev;
+ hdmi->pdev = pdev;
+ hdmi->encoder = encoder;
+
+ /* not sure about which phy maps to which msm.. probably I miss some */
+ if (config->phy_init)
+ hdmi->phy = config->phy_init(hdmi);
+ else
+ hdmi->phy = ERR_PTR(-ENXIO);
+
+ if (IS_ERR(hdmi->phy)) {
+ ret = PTR_ERR(hdmi->phy);
+ dev_err(dev->dev, "failed to load phy: %d\n", ret);
+ hdmi->phy = NULL;
+ goto fail;
+ }
+
+ hdmi->mmio = msm_ioremap(pdev, "hdmi_msm_hdmi_addr", "HDMI");
+ if (IS_ERR(hdmi->mmio)) {
+ ret = PTR_ERR(hdmi->mmio);
+ goto fail;
+ }
+
+ hdmi->mvs = devm_regulator_get(&pdev->dev, "8901_hdmi_mvs");
+ if (IS_ERR(hdmi->mvs))
+ hdmi->mvs = devm_regulator_get(&pdev->dev, "hdmi_mvs");
+ if (IS_ERR(hdmi->mvs)) {
+ ret = PTR_ERR(hdmi->mvs);
+ dev_err(dev->dev, "failed to get mvs regulator: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->mpp0 = devm_regulator_get(&pdev->dev, "8901_mpp0");
+ if (IS_ERR(hdmi->mpp0))
+ hdmi->mpp0 = NULL;
+
+ hdmi->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(hdmi->clk)) {
+ ret = PTR_ERR(hdmi->clk);
+ dev_err(dev->dev, "failed to get 'clk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->m_pclk = devm_clk_get(&pdev->dev, "master_iface_clk");
+ if (IS_ERR(hdmi->m_pclk)) {
+ ret = PTR_ERR(hdmi->m_pclk);
+ dev_err(dev->dev, "failed to get 'm_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->s_pclk = devm_clk_get(&pdev->dev, "slave_iface_clk");
+ if (IS_ERR(hdmi->s_pclk)) {
+ ret = PTR_ERR(hdmi->s_pclk);
+ dev_err(dev->dev, "failed to get 's_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ hdmi->i2c = hdmi_i2c_init(hdmi);
+ if (IS_ERR(hdmi->i2c)) {
+ ret = PTR_ERR(hdmi->i2c);
+ dev_err(dev->dev, "failed to get i2c: %d\n", ret);
+ hdmi->i2c = NULL;
+ goto fail;
+ }
+
+ hdmi->bridge = hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ ret = PTR_ERR(hdmi->bridge);
+ dev_err(dev->dev, "failed to create HDMI bridge: %d\n", ret);
+ hdmi->bridge = NULL;
+ goto fail;
+ }
+
+ hdmi->connector = hdmi_connector_init(hdmi);
+ if (IS_ERR(hdmi->connector)) {
+ ret = PTR_ERR(hdmi->connector);
+ dev_err(dev->dev, "failed to create HDMI connector: %d\n", ret);
+ hdmi->connector = NULL;
+ goto fail;
+ }
+
+ hdmi->irq = platform_get_irq(pdev, 0);
+ if (hdmi->irq < 0) {
+ ret = hdmi->irq;
+ dev_err(dev->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq,
+ NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "hdmi_isr", hdmi);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to request IRQ%u: %d\n",
+ hdmi->irq, ret);
+ goto fail;
+ }
+
+ encoder->bridge = hdmi->bridge;
+
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+ priv->connectors[priv->num_connectors++] = hdmi->connector;
+
+ return 0;
+
+fail:
+ if (hdmi) {
+ /* bridge/connector are normally destroyed by drm: */
+ if (hdmi->bridge)
+ hdmi->bridge->funcs->destroy(hdmi->bridge);
+ if (hdmi->connector)
+ hdmi->connector->funcs->destroy(hdmi->connector);
+ hdmi_destroy(&hdmi->refcount);
+ }
+
+ return ret;
+}
+
+/*
+ * The hdmi device:
+ */
+
+static int hdmi_dev_probe(struct platform_device *pdev)
+{
+ static struct hdmi_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ if (cpu_is_apq8064()) {
+ config.phy_init = hdmi_phy_8960_init;
+ config.ddc_clk_gpio = 70;
+ config.ddc_data_gpio = 71;
+ config.hpd_gpio = 72;
+ config.pmic_gpio = 13 + NR_GPIO_IRQS;
+ } else if (cpu_is_msm8960()) {
+ config.phy_init = hdmi_phy_8960_init;
+ config.ddc_clk_gpio = 100;
+ config.ddc_data_gpio = 101;
+ config.hpd_gpio = 102;
+ config.pmic_gpio = -1;
+ } else if (cpu_is_msm8x60()) {
+ config.phy_init = hdmi_phy_8x60_init;
+ config.ddc_clk_gpio = 170;
+ config.ddc_data_gpio = 171;
+ config.hpd_gpio = 172;
+ config.pmic_gpio = -1;
+ }
+#endif
+ pdev->dev.platform_data = &config;
+ hdmi_pdev = pdev;
+ return 0;
+}
+
+static int hdmi_dev_remove(struct platform_device *pdev)
+{
+ hdmi_pdev = NULL;
+ return 0;
+}
+
+static struct platform_driver hdmi_driver = {
+ .probe = hdmi_dev_probe,
+ .remove = hdmi_dev_remove,
+ .driver.name = "hdmi_msm",
+};
+
+void __init hdmi_register(void)
+{
+ platform_driver_register(&hdmi_driver);
+}
+
+void __exit hdmi_unregister(void)
+{
+ platform_driver_unregister(&hdmi_driver);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
new file mode 100644
index 00000000000..2c2ec566394
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __HDMI_CONNECTOR_H__
+#define __HDMI_CONNECTOR_H__
+
+#include <linux/i2c.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "hdmi.xml.h"
+
+
+struct hdmi_phy;
+
+struct hdmi {
+ struct kref refcount;
+
+ struct drm_device *dev;
+ struct platform_device *pdev;
+
+ void __iomem *mmio;
+
+ struct regulator *mvs; /* HDMI_5V */
+ struct regulator *mpp0; /* External 5V */
+
+ struct clk *clk;
+ struct clk *m_pclk;
+ struct clk *s_pclk;
+
+ struct hdmi_phy *phy;
+ struct i2c_adapter *i2c;
+ struct drm_connector *connector;
+ struct drm_bridge *bridge;
+
+ /* the encoder we are hooked to (outside of hdmi block) */
+ struct drm_encoder *encoder;
+
+ bool hdmi_mode; /* are we in hdmi mode? */
+
+ int irq;
+};
+
+/* platform config data (ie. from DT, or pdata) */
+struct hdmi_platform_config {
+ struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
+ int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, pmic_gpio;
+};
+
+void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+void hdmi_destroy(struct kref *kref);
+
+static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
+{
+ msm_writel(data, hdmi->mmio + reg);
+}
+
+static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
+{
+ return msm_readl(hdmi->mmio + reg);
+}
+
+static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
+{
+ kref_get(&hdmi->refcount);
+ return hdmi;
+}
+
+static inline void hdmi_unreference(struct hdmi *hdmi)
+{
+ kref_put(&hdmi->refcount, hdmi_destroy);
+}
+
+/*
+ * The phy appears to be different, for example between 8960 and 8x60,
+ * so split the phy related functions out and load the correct one at
+ * runtime:
+ */
+
+struct hdmi_phy_funcs {
+ void (*destroy)(struct hdmi_phy *phy);
+ void (*reset)(struct hdmi_phy *phy);
+ void (*powerup)(struct hdmi_phy *phy, unsigned long int pixclock);
+ void (*powerdown)(struct hdmi_phy *phy);
+};
+
+struct hdmi_phy {
+ const struct hdmi_phy_funcs *funcs;
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi);
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi);
+
+/*
+ * hdmi bridge:
+ */
+
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi);
+
+/*
+ * hdmi connector:
+ */
+
+void hdmi_connector_irq(struct drm_connector *connector);
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi);
+
+/*
+ * i2c adapter for ddc:
+ */
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c);
+void hdmi_i2c_destroy(struct i2c_adapter *i2c);
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
+
+#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
new file mode 100644
index 00000000000..f5fa4865e05
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -0,0 +1,508 @@
+#ifndef HDMI_XML
+#define HDMI_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum hdmi_hdcp_key_state {
+ NO_KEYS = 0,
+ NOT_CHECKED = 1,
+ CHECKING = 2,
+ KEYS_VALID = 3,
+ AKSV_INVALID = 4,
+ CHECKSUM_MISMATCH = 5,
+};
+
+enum hdmi_ddc_read_write {
+ DDC_WRITE = 0,
+ DDC_READ = 1,
+};
+
+enum hdmi_acr_cts {
+ ACR_NONE = 0,
+ ACR_32 = 1,
+ ACR_44 = 2,
+ ACR_48 = 3,
+};
+
+#define REG_HDMI_CTRL 0x00000000
+#define HDMI_CTRL_ENABLE 0x00000001
+#define HDMI_CTRL_HDMI 0x00000002
+#define HDMI_CTRL_ENCRYPTED 0x00000004
+
+#define REG_HDMI_AUDIO_PKT_CTRL1 0x00000020
+#define HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND 0x00000001
+
+#define REG_HDMI_ACR_PKT_CTRL 0x00000024
+#define HDMI_ACR_PKT_CTRL_CONT 0x00000001
+#define HDMI_ACR_PKT_CTRL_SEND 0x00000002
+#define HDMI_ACR_PKT_CTRL_SELECT__MASK 0x00000030
+#define HDMI_ACR_PKT_CTRL_SELECT__SHIFT 4
+static inline uint32_t HDMI_ACR_PKT_CTRL_SELECT(enum hdmi_acr_cts val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_SELECT__SHIFT) & HDMI_ACR_PKT_CTRL_SELECT__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_SOURCE 0x00000100
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK 0x00070000
+#define HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT 16
+static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
+{
+ return ((val) << HDMI_ACR_PKT_CTRL_N_MULTIPLIER__SHIFT) & HDMI_ACR_PKT_CTRL_N_MULTIPLIER__MASK;
+}
+#define HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY 0x80000000
+
+#define REG_HDMI_VBI_PKT_CTRL 0x00000028
+#define HDMI_VBI_PKT_CTRL_GC_ENABLE 0x00000010
+#define HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME 0x00000020
+#define HDMI_VBI_PKT_CTRL_ISRC_SEND 0x00000100
+#define HDMI_VBI_PKT_CTRL_ISRC_CONTINUOUS 0x00000200
+#define HDMI_VBI_PKT_CTRL_ACP_SEND 0x00001000
+#define HDMI_VBI_PKT_CTRL_ACP_SRC_SW 0x00002000
+
+#define REG_HDMI_INFOFRAME_CTRL0 0x0000002c
+#define HDMI_INFOFRAME_CTRL0_AVI_SEND 0x00000001
+#define HDMI_INFOFRAME_CTRL0_AVI_CONT 0x00000002
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND 0x00000010
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT 0x00000020
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
+#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+
+#define REG_HDMI_GEN_PKT_CTRL 0x00000034
+#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
+#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK 0x0000000c
+#define HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT 2
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_UPDATE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_SEND 0x00000010
+#define HDMI_GEN_PKT_CTRL_GENERIC1_CONT 0x00000020
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK 0x003f0000
+#define HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT 16
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC0_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC0_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC0_LINE__MASK;
+}
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK 0x3f000000
+#define HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT 24
+static inline uint32_t HDMI_GEN_PKT_CTRL_GENERIC1_LINE(uint32_t val)
+{
+ return ((val) << HDMI_GEN_PKT_CTRL_GENERIC1_LINE__SHIFT) & HDMI_GEN_PKT_CTRL_GENERIC1_LINE__MASK;
+}
+
+#define REG_HDMI_GC 0x00000040
+#define HDMI_GC_MUTE 0x00000001
+
+#define REG_HDMI_AUDIO_PKT_CTRL2 0x00000044
+#define HDMI_AUDIO_PKT_CTRL2_OVERRIDE 0x00000001
+#define HDMI_AUDIO_PKT_CTRL2_LAYOUT 0x00000002
+
+static inline uint32_t REG_HDMI_AVI_INFO(uint32_t i0) { return 0x0000006c + 0x4*i0; }
+
+#define REG_HDMI_GENERIC0_HDR 0x00000084
+
+static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*i0; }
+
+#define REG_HDMI_GENERIC1_HDR 0x000000a4
+
+static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_ACR(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+
+static inline uint32_t REG_HDMI_ACR_0(uint32_t i0) { return 0x000000c4 + 0x8*i0; }
+#define HDMI_ACR_0_CTS__MASK 0xfffff000
+#define HDMI_ACR_0_CTS__SHIFT 12
+static inline uint32_t HDMI_ACR_0_CTS(uint32_t val)
+{
+ return ((val) << HDMI_ACR_0_CTS__SHIFT) & HDMI_ACR_0_CTS__MASK;
+}
+
+static inline uint32_t REG_HDMI_ACR_1(uint32_t i0) { return 0x000000c8 + 0x8*i0; }
+#define HDMI_ACR_1_N__MASK 0xffffffff
+#define HDMI_ACR_1_N__SHIFT 0
+static inline uint32_t HDMI_ACR_1_N(uint32_t val)
+{
+ return ((val) << HDMI_ACR_1_N__SHIFT) & HDMI_ACR_1_N__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO0 0x000000e4
+#define HDMI_AUDIO_INFO0_CHECKSUM__MASK 0x000000ff
+#define HDMI_AUDIO_INFO0_CHECKSUM__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO0_CHECKSUM(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CHECKSUM__SHIFT) & HDMI_AUDIO_INFO0_CHECKSUM__MASK;
+}
+#define HDMI_AUDIO_INFO0_CC__MASK 0x00000700
+#define HDMI_AUDIO_INFO0_CC__SHIFT 8
+static inline uint32_t HDMI_AUDIO_INFO0_CC(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO0_CC__SHIFT) & HDMI_AUDIO_INFO0_CC__MASK;
+}
+
+#define REG_HDMI_AUDIO_INFO1 0x000000e8
+#define HDMI_AUDIO_INFO1_CA__MASK 0x000000ff
+#define HDMI_AUDIO_INFO1_CA__SHIFT 0
+static inline uint32_t HDMI_AUDIO_INFO1_CA(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_CA__SHIFT) & HDMI_AUDIO_INFO1_CA__MASK;
+}
+#define HDMI_AUDIO_INFO1_LSV__MASK 0x00007800
+#define HDMI_AUDIO_INFO1_LSV__SHIFT 11
+static inline uint32_t HDMI_AUDIO_INFO1_LSV(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_INFO1_LSV__SHIFT) & HDMI_AUDIO_INFO1_LSV__MASK;
+}
+#define HDMI_AUDIO_INFO1_DM_INH 0x00008000
+
+#define REG_HDMI_HDCP_CTRL 0x00000110
+#define HDMI_HDCP_CTRL_ENABLE 0x00000001
+#define HDMI_HDCP_CTRL_ENCRYPTION_ENABLE 0x00000100
+
+#define REG_HDMI_HDCP_INT_CTRL 0x00000118
+
+#define REG_HDMI_HDCP_LINK0_STATUS 0x0000011c
+#define HDMI_HDCP_LINK0_STATUS_AN_0_READY 0x00000100
+#define HDMI_HDCP_LINK0_STATUS_AN_1_READY 0x00000200
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK 0x70000000
+#define HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT 28
+static inline uint32_t HDMI_HDCP_LINK0_STATUS_KEY_STATE(enum hdmi_hdcp_key_state val)
+{
+ return ((val) << HDMI_HDCP_LINK0_STATUS_KEY_STATE__SHIFT) & HDMI_HDCP_LINK0_STATUS_KEY_STATE__MASK;
+}
+
+#define REG_HDMI_HDCP_RESET 0x00000130
+#define HDMI_HDCP_RESET_LINK0_DEAUTHENTICATE 0x00000001
+
+#define REG_HDMI_AUDIO_CFG 0x000001d0
+#define HDMI_AUDIO_CFG_ENGINE_ENABLE 0x00000001
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK 0x000000f0
+#define HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT 4
+static inline uint32_t HDMI_AUDIO_CFG_FIFO_WATERMARK(uint32_t val)
+{
+ return ((val) << HDMI_AUDIO_CFG_FIFO_WATERMARK__SHIFT) & HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK;
+}
+
+#define REG_HDMI_USEC_REFTIMER 0x00000208
+
+#define REG_HDMI_DDC_CTRL 0x0000020c
+#define HDMI_DDC_CTRL_GO 0x00000001
+#define HDMI_DDC_CTRL_SOFT_RESET 0x00000002
+#define HDMI_DDC_CTRL_SEND_RESET 0x00000004
+#define HDMI_DDC_CTRL_SW_STATUS_RESET 0x00000008
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__MASK 0x00300000
+#define HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT 20
+static inline uint32_t HDMI_DDC_CTRL_TRANSACTION_CNT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_CTRL_TRANSACTION_CNT__SHIFT) & HDMI_DDC_CTRL_TRANSACTION_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_INT_CTRL 0x00000214
+#define HDMI_DDC_INT_CTRL_SW_DONE_INT 0x00000001
+#define HDMI_DDC_INT_CTRL_SW_DONE_ACK 0x00000002
+#define HDMI_DDC_INT_CTRL_SW_DONE_MASK 0x00000004
+
+#define REG_HDMI_DDC_SW_STATUS 0x00000218
+#define HDMI_DDC_SW_STATUS_NACK0 0x00001000
+#define HDMI_DDC_SW_STATUS_NACK1 0x00002000
+#define HDMI_DDC_SW_STATUS_NACK2 0x00004000
+#define HDMI_DDC_SW_STATUS_NACK3 0x00008000
+
+#define REG_HDMI_DDC_HW_STATUS 0x0000021c
+
+#define REG_HDMI_DDC_SPEED 0x00000220
+#define HDMI_DDC_SPEED_THRESHOLD__MASK 0x00000003
+#define HDMI_DDC_SPEED_THRESHOLD__SHIFT 0
+static inline uint32_t HDMI_DDC_SPEED_THRESHOLD(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_THRESHOLD__SHIFT) & HDMI_DDC_SPEED_THRESHOLD__MASK;
+}
+#define HDMI_DDC_SPEED_PRESCALE__MASK 0xffff0000
+#define HDMI_DDC_SPEED_PRESCALE__SHIFT 16
+static inline uint32_t HDMI_DDC_SPEED_PRESCALE(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SPEED_PRESCALE__SHIFT) & HDMI_DDC_SPEED_PRESCALE__MASK;
+}
+
+#define REG_HDMI_DDC_SETUP 0x00000224
+#define HDMI_DDC_SETUP_TIMEOUT__MASK 0xff000000
+#define HDMI_DDC_SETUP_TIMEOUT__SHIFT 24
+static inline uint32_t HDMI_DDC_SETUP_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_DDC_SETUP_TIMEOUT__SHIFT) & HDMI_DDC_SETUP_TIMEOUT__MASK;
+}
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+
+static inline uint32_t REG_HDMI_I2C_TRANSACTION_REG(uint32_t i0) { return 0x00000228 + 0x4*i0; }
+#define HDMI_I2C_TRANSACTION_REG_RW__MASK 0x00000001
+#define HDMI_I2C_TRANSACTION_REG_RW__SHIFT 0
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_RW__SHIFT) & HDMI_I2C_TRANSACTION_REG_RW__MASK;
+}
+#define HDMI_I2C_TRANSACTION_REG_STOP_ON_NACK 0x00000100
+#define HDMI_I2C_TRANSACTION_REG_START 0x00001000
+#define HDMI_I2C_TRANSACTION_REG_STOP 0x00002000
+#define HDMI_I2C_TRANSACTION_REG_CNT__MASK 0x00ff0000
+#define HDMI_I2C_TRANSACTION_REG_CNT__SHIFT 16
+static inline uint32_t HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
+{
+ return ((val) << HDMI_I2C_TRANSACTION_REG_CNT__SHIFT) & HDMI_I2C_TRANSACTION_REG_CNT__MASK;
+}
+
+#define REG_HDMI_DDC_DATA 0x00000238
+#define HDMI_DDC_DATA_DATA_RW__MASK 0x00000001
+#define HDMI_DDC_DATA_DATA_RW__SHIFT 0
+static inline uint32_t HDMI_DDC_DATA_DATA_RW(enum hdmi_ddc_read_write val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA_RW__SHIFT) & HDMI_DDC_DATA_DATA_RW__MASK;
+}
+#define HDMI_DDC_DATA_DATA__MASK 0x0000ff00
+#define HDMI_DDC_DATA_DATA__SHIFT 8
+static inline uint32_t HDMI_DDC_DATA_DATA(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_DATA__SHIFT) & HDMI_DDC_DATA_DATA__MASK;
+}
+#define HDMI_DDC_DATA_INDEX__MASK 0x00ff0000
+#define HDMI_DDC_DATA_INDEX__SHIFT 16
+static inline uint32_t HDMI_DDC_DATA_INDEX(uint32_t val)
+{
+ return ((val) << HDMI_DDC_DATA_INDEX__SHIFT) & HDMI_DDC_DATA_INDEX__MASK;
+}
+#define HDMI_DDC_DATA_INDEX_WRITE 0x80000000
+
+#define REG_HDMI_HPD_INT_STATUS 0x00000250
+#define HDMI_HPD_INT_STATUS_INT 0x00000001
+#define HDMI_HPD_INT_STATUS_CABLE_DETECTED 0x00000002
+
+#define REG_HDMI_HPD_INT_CTRL 0x00000254
+#define HDMI_HPD_INT_CTRL_INT_ACK 0x00000001
+#define HDMI_HPD_INT_CTRL_INT_CONNECT 0x00000002
+#define HDMI_HPD_INT_CTRL_INT_EN 0x00000004
+#define HDMI_HPD_INT_CTRL_RX_INT_ACK 0x00000010
+#define HDMI_HPD_INT_CTRL_RX_INT_EN 0x00000020
+#define HDMI_HPD_INT_CTRL_RCV_PLUGIN_DET_MASK 0x00000200
+
+#define REG_HDMI_HPD_CTRL 0x00000258
+#define HDMI_HPD_CTRL_TIMEOUT__MASK 0x00001fff
+#define HDMI_HPD_CTRL_TIMEOUT__SHIFT 0
+static inline uint32_t HDMI_HPD_CTRL_TIMEOUT(uint32_t val)
+{
+ return ((val) << HDMI_HPD_CTRL_TIMEOUT__SHIFT) & HDMI_HPD_CTRL_TIMEOUT__MASK;
+}
+#define HDMI_HPD_CTRL_ENABLE 0x10000000
+
+#define REG_HDMI_DDC_REF 0x0000027c
+#define HDMI_DDC_REF_REFTIMER_ENABLE 0x00010000
+#define HDMI_DDC_REF_REFTIMER__MASK 0x0000ffff
+#define HDMI_DDC_REF_REFTIMER__SHIFT 0
+static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
+{
+ return ((val) << HDMI_DDC_REF_REFTIMER__SHIFT) & HDMI_DDC_REF_REFTIMER__MASK;
+}
+
+#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) & HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
+static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
+static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) & HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
+static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) & HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define REG_HDMI_TOTAL 0x000002c0
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define HDMI_TOTAL_H_TOTAL__SHIFT 0
+static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
+}
+#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__SHIFT 16
+static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) & HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
+static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) & HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+#define REG_HDMI_FRAME_CTRL 0x000002c8
+#define HDMI_FRAME_CTRL_RGB_MUX_SEL_BGR 0x00001000
+#define HDMI_FRAME_CTRL_VSYNC_LOW 0x10000000
+#define HDMI_FRAME_CTRL_HSYNC_LOW 0x20000000
+#define HDMI_FRAME_CTRL_INTERLACED_EN 0x80000000
+
+#define REG_HDMI_PHY_CTRL 0x000002d4
+#define HDMI_PHY_CTRL_SW_RESET_PLL 0x00000001
+#define HDMI_PHY_CTRL_SW_RESET_PLL_LOW 0x00000002
+#define HDMI_PHY_CTRL_SW_RESET 0x00000004
+#define HDMI_PHY_CTRL_SW_RESET_LOW 0x00000008
+
+#define REG_HDMI_AUD_INT 0x000002cc
+#define HDMI_AUD_INT_AUD_FIFO_URUN_INT 0x00000001
+#define HDMI_AUD_INT_AUD_FIFO_URAN_MASK 0x00000002
+#define HDMI_AUD_INT_AUD_SAM_DROP_INT 0x00000004
+#define HDMI_AUD_INT_AUD_SAM_DROP_MASK 0x00000008
+
+#define REG_HDMI_8x60_PHY_REG0 0x00000300
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
+#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
+static inline uint32_t HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT) & HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG1 0x00000304
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK 0x000000f0
+#define HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT 4
+static inline uint32_t HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__SHIFT) & HDMI_8x60_PHY_REG1_DTEST_MUX_SEL__MASK;
+}
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK 0x0000000f
+#define HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT 0
+static inline uint32_t HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(uint32_t val)
+{
+ return ((val) << HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__SHIFT) & HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL__MASK;
+}
+
+#define REG_HDMI_8x60_PHY_REG2 0x00000308
+#define HDMI_8x60_PHY_REG2_PD_DESER 0x00000001
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_1 0x00000002
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_2 0x00000004
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_3 0x00000008
+#define HDMI_8x60_PHY_REG2_PD_DRIVE_4 0x00000010
+#define HDMI_8x60_PHY_REG2_PD_PLL 0x00000020
+#define HDMI_8x60_PHY_REG2_PD_PWRGEN 0x00000040
+#define HDMI_8x60_PHY_REG2_RCV_SENSE_EN 0x00000080
+
+#define REG_HDMI_8x60_PHY_REG3 0x0000030c
+#define HDMI_8x60_PHY_REG3_PLL_ENABLE 0x00000001
+
+#define REG_HDMI_8x60_PHY_REG4 0x00000310
+
+#define REG_HDMI_8x60_PHY_REG5 0x00000314
+
+#define REG_HDMI_8x60_PHY_REG6 0x00000318
+
+#define REG_HDMI_8x60_PHY_REG7 0x0000031c
+
+#define REG_HDMI_8x60_PHY_REG8 0x00000320
+
+#define REG_HDMI_8x60_PHY_REG9 0x00000324
+
+#define REG_HDMI_8x60_PHY_REG10 0x00000328
+
+#define REG_HDMI_8x60_PHY_REG11 0x0000032c
+
+#define REG_HDMI_8x60_PHY_REG12 0x00000330
+#define HDMI_8x60_PHY_REG12_RETIMING_EN 0x00000001
+#define HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN 0x00000002
+#define HDMI_8x60_PHY_REG12_FORCE_LOCK 0x00000010
+
+#define REG_HDMI_8960_PHY_REG0 0x00000400
+
+#define REG_HDMI_8960_PHY_REG1 0x00000404
+
+#define REG_HDMI_8960_PHY_REG2 0x00000408
+
+#define REG_HDMI_8960_PHY_REG3 0x0000040c
+
+#define REG_HDMI_8960_PHY_REG4 0x00000410
+
+#define REG_HDMI_8960_PHY_REG5 0x00000414
+
+#define REG_HDMI_8960_PHY_REG6 0x00000418
+
+#define REG_HDMI_8960_PHY_REG7 0x0000041c
+
+#define REG_HDMI_8960_PHY_REG8 0x00000420
+
+#define REG_HDMI_8960_PHY_REG9 0x00000424
+
+#define REG_HDMI_8960_PHY_REG10 0x00000428
+
+#define REG_HDMI_8960_PHY_REG11 0x0000042c
+
+#define REG_HDMI_8960_PHY_REG12 0x00000430
+
+
+#endif /* HDMI_XML */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
new file mode 100644
index 00000000000..5a8ee3473cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_bridge {
+ struct drm_bridge base;
+
+ struct hdmi *hdmi;
+
+ unsigned long int pixclock;
+};
+#define to_hdmi_bridge(x) container_of(x, struct hdmi_bridge, base)
+
+static void hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ hdmi_unreference(hdmi_bridge->hdmi);
+ drm_bridge_cleanup(bridge);
+ kfree(hdmi_bridge);
+}
+
+static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power up");
+ phy->funcs->powerup(phy, hdmi_bridge->pixclock);
+ hdmi_set_mode(hdmi, true);
+}
+
+static void hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+}
+
+static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DBG("power down");
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->powerdown(phy);
+}
+
+static void hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi_bridge->pixclock = mode->clock * 1000;
+
+ hdmi->hdmi_mode = drm_match_cea_mode(mode) > 1;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DBG("htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DBG("frame_ctrl=%08x", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ // TODO until we have audio, this might be safest:
+ if (hdmi->hdmi_mode)
+ hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE);
+}
+
+static const struct drm_bridge_funcs hdmi_bridge_funcs = {
+ .pre_enable = hdmi_bridge_pre_enable,
+ .enable = hdmi_bridge_enable,
+ .disable = hdmi_bridge_disable,
+ .post_disable = hdmi_bridge_post_disable,
+ .mode_set = hdmi_bridge_mode_set,
+ .destroy = hdmi_bridge_destroy,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct hdmi_bridge *hdmi_bridge;
+ int ret;
+
+ hdmi_bridge = kzalloc(sizeof(*hdmi_bridge), GFP_KERNEL);
+ if (!hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_bridge->hdmi = hdmi_reference(hdmi);
+
+ bridge = &hdmi_bridge->base;
+
+ drm_bridge_init(hdmi->dev, bridge, &hdmi_bridge_funcs);
+
+ return bridge;
+
+fail:
+ if (bridge)
+ hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
new file mode 100644
index 00000000000..823eee521a3
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -0,0 +1,367 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/gpio.h>
+
+#include "hdmi.h"
+
+struct hdmi_connector {
+ struct drm_connector base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_connector(x) container_of(x, struct hdmi_connector, base)
+
+static int gpio_config(struct hdmi *hdmi, bool on)
+{
+ struct drm_device *dev = hdmi->dev;
+ struct hdmi_platform_config *config =
+ hdmi->pdev->dev.platform_data;
+ int ret;
+
+ if (on) {
+ ret = gpio_request(config->ddc_clk_gpio, "HDMI_DDC_CLK");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio, ret);
+ goto error1;
+ }
+ ret = gpio_request(config->ddc_data_gpio, "HDMI_DDC_DATA");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio, ret);
+ goto error2;
+ }
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error3;
+ }
+ if (config->pmic_gpio != -1) {
+ ret = gpio_request(config->pmic_gpio, "PMIC_HDMI_MUX_SEL");
+ if (ret) {
+ dev_err(dev->dev, "'%s'(%d) gpio_request failed: %d\n",
+ "PMIC_HDMI_MUX_SEL", config->pmic_gpio, ret);
+ goto error4;
+ }
+ gpio_set_value_cansleep(config->pmic_gpio, 0);
+ }
+ DBG("gpio on");
+ } else {
+ gpio_free(config->ddc_clk_gpio);
+ gpio_free(config->ddc_data_gpio);
+ gpio_free(config->hpd_gpio);
+
+ if (config->pmic_gpio != -1) {
+ gpio_set_value_cansleep(config->pmic_gpio, 1);
+ gpio_free(config->pmic_gpio);
+ }
+ DBG("gpio off");
+ }
+
+ return 0;
+
+error4:
+ gpio_free(config->hpd_gpio);
+error3:
+ gpio_free(config->ddc_data_gpio);
+error2:
+ gpio_free(config->ddc_clk_gpio);
+error1:
+ return ret;
+}
+
+static int hpd_enable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ struct hdmi_phy *phy = hdmi->phy;
+ uint32_t hpd_ctrl;
+ int ret;
+
+ ret = gpio_config(hdmi, true);
+ if (ret) {
+ dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->clk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'clk': %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->m_pclk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'm_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ ret = clk_prepare_enable(hdmi->s_pclk);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 's_pclk': %d\n", ret);
+ goto fail;
+ }
+
+ if (hdmi->mpp0)
+ ret = regulator_enable(hdmi->mpp0);
+ if (!ret)
+ ret = regulator_enable(hdmi->mvs);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+ goto fail;
+ }
+
+ hdmi_set_mode(hdmi, false);
+ phy->funcs->reset(phy);
+ hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static int hdp_disable(struct hdmi_connector *hdmi_connector)
+{
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct drm_device *dev = hdmi_connector->base.dev;
+ int ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ hdmi_set_mode(hdmi, false);
+
+ if (hdmi->mpp0)
+ ret = regulator_disable(hdmi->mpp0);
+ if (!ret)
+ ret = regulator_disable(hdmi->mvs);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulators: %d\n", ret);
+ goto fail;
+ }
+
+ clk_disable_unprepare(hdmi->clk);
+ clk_disable_unprepare(hdmi->m_pclk);
+ clk_disable_unprepare(hdmi->s_pclk);
+
+ ret = gpio_config(hdmi, false);
+ if (ret) {
+ dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void hdmi_connector_irq(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
+
+ /* ack the irq: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+
+ drm_helper_hpd_irq_event(connector->dev);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!detected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+ }
+}
+
+static enum drm_connector_status hdmi_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ uint32_t hpd_int_status;
+ int retry = 20;
+
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+
+ /* sense seems to in some cases be momentarily de-asserted, don't
+ * let that trick us into thinking the monitor is gone:
+ */
+ while (retry-- && !(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED)) {
+ mdelay(10);
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ DBG("status=%08x", hpd_int_status);
+ }
+
+ return (hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED) ?
+ connector_status_connected : connector_status_disconnected;
+}
+
+static void hdmi_connector_destroy(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+
+ hdp_disable(hdmi_connector);
+
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+
+ hdmi_unreference(hdmi_connector->hdmi);
+
+ kfree(hdmi_connector);
+}
+
+static int hdmi_connector_get_modes(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct hdmi *hdmi = hdmi_connector->hdmi;
+ struct edid *edid;
+ uint32_t hdmi_ctrl;
+ int ret = 0;
+
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+
+ edid = drm_get_edid(connector, hdmi->i2c);
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ drm_mode_connector_update_edid_property(connector, edid);
+
+ if (edid) {
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ }
+
+ return ret;
+}
+
+static int hdmi_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ struct msm_drm_private *priv = connector->dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ long actual, requested;
+
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi_connector->hdmi->encoder);
+
+ DBG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return 0;
+}
+
+static struct drm_encoder *
+hdmi_connector_best_encoder(struct drm_connector *connector)
+{
+ struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
+ return hdmi_connector->hdmi->encoder;
+}
+
+static const struct drm_connector_funcs hdmi_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = hdmi_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = hdmi_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
+ .get_modes = hdmi_connector_get_modes,
+ .mode_valid = hdmi_connector_mode_valid,
+ .best_encoder = hdmi_connector_best_encoder,
+};
+
+/* initialize connector */
+struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = NULL;
+ struct hdmi_connector *hdmi_connector;
+ int ret;
+
+ hdmi_connector = kzalloc(sizeof(*hdmi_connector), GFP_KERNEL);
+ if (!hdmi_connector) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ hdmi_connector->hdmi = hdmi_reference(hdmi);
+
+ connector = &hdmi_connector->base;
+
+ drm_connector_init(hdmi->dev, connector, &hdmi_connector_funcs,
+ DRM_MODE_CONNECTOR_HDMIA);
+ drm_connector_helper_add(connector, &hdmi_connector_helper_funcs);
+
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ ret = hpd_enable(hdmi_connector);
+ if (ret) {
+ dev_err(hdmi->dev->dev, "failed to enable HPD: %d\n", ret);
+ goto fail;
+ }
+
+ drm_mode_connector_attach_encoder(connector, hdmi->encoder);
+
+ return connector;
+
+fail:
+ if (connector)
+ hdmi_connector_destroy(connector);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
new file mode 100644
index 00000000000..f4ab7f70fed
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -0,0 +1,281 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_i2c_adapter {
+ struct i2c_adapter base;
+ struct hdmi *hdmi;
+ bool sw_done;
+ wait_queue_head_t ddc_event;
+};
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SW_STATUS_RESET);
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SOFT_RESET);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ HDMI_DDC_SPEED_THRESHOLD(2) |
+ HDMI_DDC_SPEED_PRESCALE(10));
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 27us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(27));
+}
+
+static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ uint32_t retry = 0xffff;
+ uint32_t ddc_int_ctrl;
+
+ do {
+ --retry;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+ HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+ if (!retry) {
+ dev_err(dev->dev, "timeout waiting for DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ hdmi_i2c->sw_done = false;
+
+ return 0;
+}
+
+#define MAX_TRANSACTIONS 4
+
+static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
+{
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+
+ if (!hdmi_i2c->sw_done) {
+ uint32_t ddc_int_ctrl;
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ if ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_MASK) &&
+ (ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT)) {
+ hdmi_i2c->sw_done = true;
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK);
+ }
+ }
+
+ return hdmi_i2c->sw_done;
+}
+
+static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
+ struct i2c_msg *msgs, int num)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ struct hdmi *hdmi = hdmi_i2c->hdmi;
+ struct drm_device *dev = hdmi->dev;
+ static const uint32_t nack[] = {
+ HDMI_DDC_SW_STATUS_NACK0, HDMI_DDC_SW_STATUS_NACK1,
+ HDMI_DDC_SW_STATUS_NACK2, HDMI_DDC_SW_STATUS_NACK3,
+ };
+ int indices[MAX_TRANSACTIONS];
+ int ret, i, j, index = 0;
+ uint32_t ddc_status, ddc_data, i2c_trans;
+
+ num = min(num, MAX_TRANSACTIONS);
+
+ WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
+ if (num == 0)
+ return num;
+
+ init_ddc(hdmi_i2c);
+
+ ret = ddc_clear_irq(hdmi_i2c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+ uint32_t raw_addr = p->addr << 1;
+
+ if (p->flags & I2C_M_RD)
+ raw_addr |= 1;
+
+ ddc_data = HDMI_DDC_DATA_DATA(raw_addr) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+
+ if (i == 0) {
+ ddc_data |= HDMI_DDC_DATA_INDEX(0) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+
+ indices[i] = index;
+
+ if (p->flags & I2C_M_RD) {
+ index += p->len;
+ } else {
+ for (j = 0; j < p->len; j++) {
+ ddc_data = HDMI_DDC_DATA_DATA(p->buf[j]) |
+ HDMI_DDC_DATA_DATA_RW(DDC_WRITE);
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+ index++;
+ }
+ }
+
+ i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+ HDMI_I2C_TRANSACTION_REG_RW(
+ (p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
+ HDMI_I2C_TRANSACTION_REG_START;
+
+ if (i == (num - 1))
+ i2c_trans |= HDMI_I2C_TRANSACTION_REG_STOP;
+
+ hdmi_write(hdmi, REG_HDMI_I2C_TRANSACTION(i), i2c_trans);
+ }
+
+ /* trigger the transfer: */
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_TRANSACTION_CNT(num - 1) |
+ HDMI_DDC_CTRL_GO);
+
+ ret = wait_event_timeout(hdmi_i2c->ddc_event, sw_done(hdmi_i2c), HZ/4);
+ if (ret <= 0) {
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+ dev_warn(dev->dev, "DDC timeout: %d\n", ret);
+ DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ return ret;
+ }
+
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS);
+
+ /* read back results of any read transactions: */
+ for (i = 0; i < num; i++) {
+ struct i2c_msg *p = &msgs[i];
+
+ if (!(p->flags & I2C_M_RD))
+ continue;
+
+ /* check for NACK: */
+ if (ddc_status & nack[i]) {
+ DBG("ddc_status=%08x", ddc_status);
+ break;
+ }
+
+ ddc_data = HDMI_DDC_DATA_DATA_RW(DDC_READ) |
+ HDMI_DDC_DATA_INDEX(indices[i]) |
+ HDMI_DDC_DATA_INDEX_WRITE;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_DATA, ddc_data);
+
+ /* discard first byte: */
+ hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+
+ for (j = 0; j < p->len; j++) {
+ ddc_data = hdmi_read(hdmi, REG_HDMI_DDC_DATA);
+ p->buf[j] = FIELD(ddc_data, HDMI_DDC_DATA_DATA);
+ }
+ }
+
+ return i;
+}
+
+static u32 hdmi_i2c_func(struct i2c_adapter *adapter)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm hdmi_i2c_algorithm = {
+ .master_xfer = hdmi_i2c_xfer,
+ .functionality = hdmi_i2c_func,
+};
+
+void hdmi_i2c_irq(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+
+ if (sw_done(hdmi_i2c))
+ wake_up_all(&hdmi_i2c->ddc_event);
+}
+
+void hdmi_i2c_destroy(struct i2c_adapter *i2c)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(i2c);
+ i2c_del_adapter(i2c);
+ kfree(hdmi_i2c);
+}
+
+struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi)
+{
+ struct drm_device *dev = hdmi->dev;
+ struct hdmi_i2c_adapter *hdmi_i2c;
+ struct i2c_adapter *i2c = NULL;
+ int ret;
+
+ hdmi_i2c = kzalloc(sizeof(*hdmi_i2c), GFP_KERNEL);
+ if (!hdmi_i2c) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ i2c = &hdmi_i2c->base;
+
+ hdmi_i2c->hdmi = hdmi;
+ init_waitqueue_head(&hdmi_i2c->ddc_event);
+
+
+ i2c->owner = THIS_MODULE;
+ i2c->class = I2C_CLASS_DDC;
+ snprintf(i2c->name, sizeof(i2c->name), "msm hdmi i2c");
+ i2c->dev.parent = &hdmi->pdev->dev;
+ i2c->algo = &hdmi_i2c_algorithm;
+
+ ret = i2c_add_adapter(i2c);
+ if (ret) {
+ dev_err(dev->dev, "failed to register hdmi i2c: %d\n", ret);
+ goto fail;
+ }
+
+ return i2c;
+
+fail:
+ if (i2c)
+ hdmi_i2c_destroy(i2c);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
new file mode 100644
index 00000000000..e5b7ed5b8f0
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8960 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base)
+
+static void hdmi_phy_8960_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ kfree(phy_8960);
+}
+
+static void hdmi_phy_8960_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ }
+}
+
+static void hdmi_phy_8960_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG0, 0x1b);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG1, 0xf2);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG4, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG5, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG6, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG7, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG8, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG9, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG10, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG11, 0x00);
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG3, 0x20);
+}
+
+static void hdmi_phy_8960_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8960 *phy_8960 = to_hdmi_phy_8960(phy);
+ struct hdmi *hdmi = phy_8960->hdmi;
+
+ hdmi_write(hdmi, REG_HDMI_8960_PHY_REG2, 0x7f);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8960_funcs = {
+ .destroy = hdmi_phy_8960_destroy,
+ .reset = hdmi_phy_8960_reset,
+ .powerup = hdmi_phy_8960_powerup,
+ .powerdown = hdmi_phy_8960_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8960 *phy_8960;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL);
+ if (!phy_8960) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8960->base;
+
+ phy->funcs = &hdmi_phy_8960_funcs;
+
+ phy_8960->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8960_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
new file mode 100644
index 00000000000..391433c1af7
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8x60.c
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hdmi.h"
+
+struct hdmi_phy_8x60 {
+ struct hdmi_phy base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_phy_8x60(x) container_of(x, struct hdmi_phy_8x60, base)
+
+static void hdmi_phy_8x60_destroy(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ kfree(phy_8x60);
+}
+
+static void hdmi_phy_8x60_reset(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ }
+
+ msleep(100);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW) {
+ /* pull high */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ } else {
+ /* pull low */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ }
+}
+
+static void hdmi_phy_8x60_powerup(struct hdmi_phy *phy,
+ unsigned long int pixclock)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* De-serializer delay D/C for non-lbk mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG0,
+ HDMI_8x60_PHY_REG0_DESER_DEL_CTRL(3));
+
+ if (pixclock == 27000000) {
+ /* video_format == HDMI_VFRMT_720x480p60_16_9 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(3));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG1,
+ HDMI_8x60_PHY_REG1_DTEST_MUX_SEL(5) |
+ HDMI_8x60_PHY_REG1_OUTVOL_SWING_CTRL(4));
+ }
+
+ /* No matter what, start from the power down mode: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PowerGen on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Turn PLL power on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* Write to HIGH after PLL power down de-assert: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3,
+ HDMI_8x60_PHY_REG3_PLL_ENABLE);
+
+ /* ASIC power on; PHY REG9 = 0 */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+
+ /* Enable PLL lock detect, PLL lock det will go high after lock
+ * Enable the re-time logic
+ */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN);
+
+ /* Drivers are on: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ /* If the RX detector is needed: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG4, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG5, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG6, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG7, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG8, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG9, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG10, 0);
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG11, 0);
+
+ /* If we want to use lock enable based on counting: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG12,
+ HDMI_8x60_PHY_REG12_RETIMING_EN |
+ HDMI_8x60_PHY_REG12_PLL_LOCK_DETECT_EN |
+ HDMI_8x60_PHY_REG12_FORCE_LOCK);
+}
+
+static void hdmi_phy_8x60_powerdown(struct hdmi_phy *phy)
+{
+ struct hdmi_phy_8x60 *phy_8x60 = to_hdmi_phy_8x60(phy);
+ struct hdmi *hdmi = phy_8x60->hdmi;
+
+ /* Assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ HDMI_PHY_CTRL_SW_RESET);
+ udelay(10);
+ /* De-assert RESET PHY from controller */
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL, 0);
+ /* Turn off Driver */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+ udelay(10);
+ /* Disable PLL */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG3, 0);
+ /* Power down PHY, but keep RX-sense: */
+ hdmi_write(hdmi, REG_HDMI_8x60_PHY_REG2,
+ HDMI_8x60_PHY_REG2_RCV_SENSE_EN |
+ HDMI_8x60_PHY_REG2_PD_PWRGEN |
+ HDMI_8x60_PHY_REG2_PD_PLL |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_4 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_3 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_2 |
+ HDMI_8x60_PHY_REG2_PD_DRIVE_1 |
+ HDMI_8x60_PHY_REG2_PD_DESER);
+}
+
+static const struct hdmi_phy_funcs hdmi_phy_8x60_funcs = {
+ .destroy = hdmi_phy_8x60_destroy,
+ .reset = hdmi_phy_8x60_reset,
+ .powerup = hdmi_phy_8x60_powerup,
+ .powerdown = hdmi_phy_8x60_powerdown,
+};
+
+struct hdmi_phy *hdmi_phy_8x60_init(struct hdmi *hdmi)
+{
+ struct hdmi_phy_8x60 *phy_8x60;
+ struct hdmi_phy *phy = NULL;
+ int ret;
+
+ phy_8x60 = kzalloc(sizeof(*phy_8x60), GFP_KERNEL);
+ if (!phy_8x60) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phy = &phy_8x60->base;
+
+ phy->funcs = &hdmi_phy_8x60_funcs;
+
+ phy_8x60->hdmi = hdmi;
+
+ return phy;
+
+fail:
+ if (phy)
+ hdmi_phy_8x60_destroy(phy);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
new file mode 100644
index 00000000000..bee36363bcd
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -0,0 +1,50 @@
+#ifndef QFPROM_XML
+#define QFPROM_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define REG_QFPROM_CONFIG_ROW0_LSB 0x00000238
+#define QFPROM_CONFIG_ROW0_LSB_HDMI_DISABLE 0x00200000
+#define QFPROM_CONFIG_ROW0_LSB_HDCP_DISABLE 0x00400000
+
+
+#endif /* QFPROM_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
new file mode 100644
index 00000000000..bbeeebe2db5
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4.xml.h
@@ -0,0 +1,1061 @@
+#ifndef MDP4_XML
+#define MDP4_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://0x04.net/cgit/index.cgi/rules-ng-ng
+git clone git://0x04.net/rules-ng-ng
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 595 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
+- /home/robclark/src/freedreno/envytools/rnndb/mdp4/mdp4.xml ( 19332 bytes, from 2013-08-16 22:16:36)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
+- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 19288 bytes, from 2013-08-11 18:14:15)
+
+Copyright (C) 2013 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum mpd4_bpc {
+ BPC1 = 0,
+ BPC5 = 1,
+ BPC6 = 2,
+ BPC8 = 3,
+};
+
+enum mpd4_bpc_alpha {
+ BPC1A = 0,
+ BPC4A = 1,
+ BPC6A = 2,
+ BPC8A = 3,
+};
+
+enum mpd4_alpha_type {
+ FG_CONST = 0,
+ BG_CONST = 1,
+ FG_PIXEL = 2,
+ BG_PIXEL = 3,
+};
+
+enum mpd4_pipe {
+ VG1 = 0,
+ VG2 = 1,
+ RGB1 = 2,
+ RGB2 = 3,
+ RGB3 = 4,
+ VG3 = 5,
+ VG4 = 6,
+};
+
+enum mpd4_mixer {
+ MIXER0 = 0,
+ MIXER1 = 1,
+ MIXER2 = 2,
+};
+
+enum mpd4_mixer_stage_id {
+ STAGE_UNUSED = 0,
+ STAGE_BASE = 1,
+ STAGE0 = 2,
+ STAGE1 = 3,
+ STAGE2 = 4,
+ STAGE3 = 5,
+};
+
+enum mdp4_intf {
+ INTF_LCDC_DTV = 0,
+ INTF_DSI_VIDEO = 1,
+ INTF_DSI_CMD = 2,
+ INTF_EBI2_TV = 3,
+};
+
+enum mdp4_cursor_format {
+ CURSOR_ARGB = 1,
+ CURSOR_XRGB = 2,
+};
+
+enum mdp4_dma {
+ DMA_P = 0,
+ DMA_S = 1,
+ DMA_E = 2,
+};
+
+#define MDP4_IRQ_OVERLAY0_DONE 0x00000001
+#define MDP4_IRQ_OVERLAY1_DONE 0x00000002
+#define MDP4_IRQ_DMA_S_DONE 0x00000004
+#define MDP4_IRQ_DMA_E_DONE 0x00000008
+#define MDP4_IRQ_DMA_P_DONE 0x00000010
+#define MDP4_IRQ_VG1_HISTOGRAM 0x00000020
+#define MDP4_IRQ_VG2_HISTOGRAM 0x00000040
+#define MDP4_IRQ_PRIMARY_VSYNC 0x00000080
+#define MDP4_IRQ_PRIMARY_INTF_UDERRUN 0x00000100
+#define MDP4_IRQ_EXTERNAL_VSYNC 0x00000200
+#define MDP4_IRQ_EXTERNAL_INTF_UDERRUN 0x00000400
+#define MDP4_IRQ_PRIMARY_RDPTR 0x00000800
+#define MDP4_IRQ_DMA_P_HISTOGRAM 0x00020000
+#define MDP4_IRQ_DMA_S_HISTOGRAM 0x04000000
+#define MDP4_IRQ_OVERLAY2_DONE 0x40000000
+#define REG_MDP4_VERSION 0x00000000
+#define MDP4_VERSION_MINOR__MASK 0x00ff0000
+#define MDP4_VERSION_MINOR__SHIFT 16
+static inline uint32_t MDP4_VERSION_MINOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MINOR__SHIFT) & MDP4_VERSION_MINOR__MASK;
+}
+#define MDP4_VERSION_MAJOR__MASK 0xff000000
+#define MDP4_VERSION_MAJOR__SHIFT 24
+static inline uint32_t MDP4_VERSION_MAJOR(uint32_t val)
+{
+ return ((val) << MDP4_VERSION_MAJOR__SHIFT) & MDP4_VERSION_MAJOR__MASK;
+}
+
+#define REG_MDP4_OVLP0_KICK 0x00000004
+
+#define REG_MDP4_OVLP1_KICK 0x00000008
+
+#define REG_MDP4_OVLP2_KICK 0x000000d0
+
+#define REG_MDP4_DMA_P_KICK 0x0000000c
+
+#define REG_MDP4_DMA_S_KICK 0x00000010
+
+#define REG_MDP4_DMA_E_KICK 0x00000014
+
+#define REG_MDP4_DISP_STATUS 0x00000018
+
+#define REG_MDP4_DISP_INTF_SEL 0x00000038
+#define MDP4_DISP_INTF_SEL_PRIM__MASK 0x00000003
+#define MDP4_DISP_INTF_SEL_PRIM__SHIFT 0
+static inline uint32_t MDP4_DISP_INTF_SEL_PRIM(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_PRIM__SHIFT) & MDP4_DISP_INTF_SEL_PRIM__MASK;
+}
+#define MDP4_DISP_INTF_SEL_SEC__MASK 0x0000000c
+#define MDP4_DISP_INTF_SEL_SEC__SHIFT 2
+static inline uint32_t MDP4_DISP_INTF_SEL_SEC(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_SEC__SHIFT) & MDP4_DISP_INTF_SEL_SEC__MASK;
+}
+#define MDP4_DISP_INTF_SEL_EXT__MASK 0x00000030
+#define MDP4_DISP_INTF_SEL_EXT__SHIFT 4
+static inline uint32_t MDP4_DISP_INTF_SEL_EXT(enum mdp4_intf val)
+{
+ return ((val) << MDP4_DISP_INTF_SEL_EXT__SHIFT) & MDP4_DISP_INTF_SEL_EXT__MASK;
+}
+#define MDP4_DISP_INTF_SEL_DSI_VIDEO 0x00000040
+#define MDP4_DISP_INTF_SEL_DSI_CMD 0x00000080
+
+#define REG_MDP4_RESET_STATUS 0x0000003c
+
+#define REG_MDP4_READ_CNFG 0x0000004c
+
+#define REG_MDP4_INTR_ENABLE 0x00000050
+
+#define REG_MDP4_INTR_STATUS 0x00000054
+
+#define REG_MDP4_INTR_CLEAR 0x00000058
+
+#define REG_MDP4_EBI2_LCD0 0x00000060
+
+#define REG_MDP4_EBI2_LCD1 0x00000064
+
+#define REG_MDP4_PORTMAP_MODE 0x00000070
+
+#define REG_MDP4_CS_CONTROLLER0 0x000000c0
+
+#define REG_MDP4_CS_CONTROLLER1 0x000000c4
+
+#define REG_MDP4_LAYERMIXER2_IN_CFG 0x000100f0
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER2_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER2_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER2_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER2_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD 0x000100fc
+
+#define REG_MDP4_LAYERMIXER_IN_CFG 0x00010100
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK 0x00000007
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT 0
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE0(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE0__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE0__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1 0x00000008
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK 0x00000070
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT 4
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE1(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE1__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE1__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1 0x00000080
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK 0x00000700
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT 8
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE2(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE2__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE2__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1 0x00000800
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK 0x00007000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT 12
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE3(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE3__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE3__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1 0x00008000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK 0x00070000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT 16
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE4(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE4__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE4__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1 0x00080000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK 0x00700000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT 20
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE5(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE5__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE5__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1 0x00800000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK 0x07000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT 24
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE6(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE6__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE6__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1 0x08000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK 0x70000000
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT 28
+static inline uint32_t MDP4_LAYERMIXER_IN_CFG_PIPE7(enum mpd4_mixer_stage_id val)
+{
+ return ((val) << MDP4_LAYERMIXER_IN_CFG_PIPE7__SHIFT) & MDP4_LAYERMIXER_IN_CFG_PIPE7__MASK;
+}
+#define MDP4_LAYERMIXER_IN_CFG_PIPE7_MIXER1 0x80000000
+
+#define REG_MDP4_VG2_SRC_FORMAT 0x00030050
+
+#define REG_MDP4_VG2_CONST_COLOR 0x00031008
+
+#define REG_MDP4_OVERLAY_FLUSH 0x00018000
+#define MDP4_OVERLAY_FLUSH_OVLP0 0x00000001
+#define MDP4_OVERLAY_FLUSH_OVLP1 0x00000002
+#define MDP4_OVERLAY_FLUSH_VG1 0x00000004
+#define MDP4_OVERLAY_FLUSH_VG2 0x00000008
+#define MDP4_OVERLAY_FLUSH_RGB1 0x00000010
+#define MDP4_OVERLAY_FLUSH_RGB2 0x00000020
+
+static inline uint32_t __offset_OVLP(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00010000;
+ case 1: return 0x00018000;
+ case 2: return 0x00088000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP(uint32_t i0) { return 0x00000000 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CFG(uint32_t i0) { return 0x00000004 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_SIZE(uint32_t i0) { return 0x00000008 + __offset_OVLP(i0); }
+#define MDP4_OVLP_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_OVLP_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_OVLP_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_HEIGHT__SHIFT) & MDP4_OVLP_SIZE_HEIGHT__MASK;
+}
+#define MDP4_OVLP_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_OVLP_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_OVLP_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_OVLP_SIZE_WIDTH__SHIFT) & MDP4_OVLP_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_OVLP_BASE(uint32_t i0) { return 0x0000000c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_STRIDE(uint32_t i0) { return 0x00000010 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_OPMODE(uint32_t i0) { return 0x00000014 + __offset_OVLP(i0); }
+
+static inline uint32_t __offset_STAGE(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00000104;
+ case 1: return 0x00000124;
+ case 2: return 0x00000144;
+ case 3: return 0x00000160;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_OP(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK 0x00000003
+#define MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT 0
+static inline uint32_t MDP4_OVLP_STAGE_OP_FG_ALPHA(enum mpd4_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_FG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_FG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_FG_INV_ALPHA 0x00000004
+#define MDP4_OVLP_STAGE_OP_FG_MOD_ALPHA 0x00000008
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK 0x00000030
+#define MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT 4
+static inline uint32_t MDP4_OVLP_STAGE_OP_BG_ALPHA(enum mpd4_alpha_type val)
+{
+ return ((val) << MDP4_OVLP_STAGE_OP_BG_ALPHA__SHIFT) & MDP4_OVLP_STAGE_OP_BG_ALPHA__MASK;
+}
+#define MDP4_OVLP_STAGE_OP_BG_INV_ALPHA 0x00000040
+#define MDP4_OVLP_STAGE_OP_BG_MOD_ALPHA 0x00000080
+#define MDP4_OVLP_STAGE_OP_FG_TRANSP 0x00000100
+#define MDP4_OVLP_STAGE_OP_BG_TRANSP 0x00000200
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_FG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000004 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_BG_ALPHA(uint32_t i0, uint32_t i1) { return 0x00000008 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW0(uint32_t i0, uint32_t i1) { return 0x0000000c + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_LOW1(uint32_t i0, uint32_t i1) { return 0x00000010 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(uint32_t i0, uint32_t i1) { return 0x00000014 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(uint32_t i0, uint32_t i1) { return 0x00000018 + __offset_OVLP(i0) + __offset_STAGE(i1); }
+
+static inline uint32_t __offset_STAGE_CO3(uint32_t idx)
+{
+ switch (idx) {
+ case 0: return 0x00001004;
+ case 1: return 0x00001404;
+ case 2: return 0x00001804;
+ case 3: return 0x00001b84;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+
+static inline uint32_t REG_MDP4_OVLP_STAGE_CO3_SEL(uint32_t i0, uint32_t i1) { return 0x00000000 + __offset_OVLP(i0) + __offset_STAGE_CO3(i1); }
+#define MDP4_OVLP_STAGE_CO3_SEL_FG_ALPHA 0x00000001
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW0(uint32_t i0) { return 0x00000180 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_LOW1(uint32_t i0) { return 0x00000184 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH0(uint32_t i0) { return 0x00000188 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_TRANSP_HIGH1(uint32_t i0) { return 0x0000018c + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_CONFIG(uint32_t i0) { return 0x00000200 + __offset_OVLP(i0); }
+
+static inline uint32_t REG_MDP4_OVLP_CSC(uint32_t i0) { return 0x00002000 + __offset_OVLP(i0); }
+
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_MV_VAL(uint32_t i0, uint32_t i1) { return 0x00002400 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002500 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_BV_VAL(uint32_t i0, uint32_t i1) { return 0x00002580 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_PRE_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002600 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_OVLP_CSC_POST_LV_VAL(uint32_t i0, uint32_t i1) { return 0x00002680 + __offset_OVLP(i0) + 0x4*i1; }
+
+#define REG_MDP4_DMA_P_OP_MODE 0x00090070
+
+static inline uint32_t REG_MDP4_LUTN(uint32_t i0) { return 0x00094800 + 0x400*i0; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_LUTN_LUT_VAL(uint32_t i0, uint32_t i1) { return 0x00094800 + 0x400*i0 + 0x4*i1; }
+
+#define REG_MDP4_DMA_S_OP_MODE 0x000a0028
+
+static inline uint32_t REG_MDP4_DMA_E_QUANT(uint32_t i0) { return 0x000b0070 + 0x4*i0; }
+
+static inline uint32_t __offset_DMA(enum mdp4_dma idx)
+{
+ switch (idx) {
+ case DMA_P: return 0x00090000;
+ case DMA_S: return 0x000a0000;
+ case DMA_E: return 0x000b0000;
+ default: return INVALID_IDX(idx);
+ }
+}
+static inline uint32_t REG_MDP4_DMA(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CONFIG(enum mdp4_dma i0) { return 0x00000000 + __offset_DMA(i0); }
+#define MDP4_DMA_CONFIG_G_BPC__MASK 0x00000003
+#define MDP4_DMA_CONFIG_G_BPC__SHIFT 0
+static inline uint32_t MDP4_DMA_CONFIG_G_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_G_BPC__SHIFT) & MDP4_DMA_CONFIG_G_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_B_BPC__MASK 0x0000000c
+#define MDP4_DMA_CONFIG_B_BPC__SHIFT 2
+static inline uint32_t MDP4_DMA_CONFIG_B_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_B_BPC__SHIFT) & MDP4_DMA_CONFIG_B_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_R_BPC__MASK 0x00000030
+#define MDP4_DMA_CONFIG_R_BPC__SHIFT 4
+static inline uint32_t MDP4_DMA_CONFIG_R_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_DMA_CONFIG_R_BPC__SHIFT) & MDP4_DMA_CONFIG_R_BPC__MASK;
+}
+#define MDP4_DMA_CONFIG_PACK_ALIGN_MSB 0x00000080
+#define MDP4_DMA_CONFIG_PACK__MASK 0x0000ff00
+#define MDP4_DMA_CONFIG_PACK__SHIFT 8
+static inline uint32_t MDP4_DMA_CONFIG_PACK(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CONFIG_PACK__SHIFT) & MDP4_DMA_CONFIG_PACK__MASK;
+}
+#define MDP4_DMA_CONFIG_DEFLKR_EN 0x01000000
+#define MDP4_DMA_CONFIG_DITHER_EN 0x01000000
+
+static inline uint32_t REG_MDP4_DMA_SRC_SIZE(enum mdp4_dma i0) { return 0x00000004 + __offset_DMA(i0); }
+#define MDP4_DMA_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_HEIGHT__SHIFT) & MDP4_DMA_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_SRC_SIZE_WIDTH__SHIFT) & MDP4_DMA_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_SRC_BASE(enum mdp4_dma i0) { return 0x00000008 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_SRC_STRIDE(enum mdp4_dma i0) { return 0x0000000c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_DST_SIZE(enum mdp4_dma i0) { return 0x00000010 + __offset_DMA(i0); }
+#define MDP4_DMA_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_DMA_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_HEIGHT__SHIFT) & MDP4_DMA_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_DMA_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_DMA_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_DST_SIZE_WIDTH__SHIFT) & MDP4_DMA_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_SIZE(enum mdp4_dma i0) { return 0x00000044 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__MASK 0x0000007f
+#define MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_WIDTH__SHIFT) & MDP4_DMA_CURSOR_SIZE_WIDTH__MASK;
+}
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK 0x007f0000
+#define MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_SIZE_HEIGHT__SHIFT) & MDP4_DMA_CURSOR_SIZE_HEIGHT__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BASE(enum mdp4_dma i0) { return 0x00000048 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_POS(enum mdp4_dma i0) { return 0x0000004c + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_POS_X__MASK 0x0000ffff
+#define MDP4_DMA_CURSOR_POS_X__SHIFT 0
+static inline uint32_t MDP4_DMA_CURSOR_POS_X(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_X__SHIFT) & MDP4_DMA_CURSOR_POS_X__MASK;
+}
+#define MDP4_DMA_CURSOR_POS_Y__MASK 0xffff0000
+#define MDP4_DMA_CURSOR_POS_Y__SHIFT 16
+static inline uint32_t MDP4_DMA_CURSOR_POS_Y(uint32_t val)
+{
+ return ((val) << MDP4_DMA_CURSOR_POS_Y__SHIFT) & MDP4_DMA_CURSOR_POS_Y__MASK;
+}
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_CONFIG(enum mdp4_dma i0) { return 0x00000060 + __offset_DMA(i0); }
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN 0x00000001
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK 0x00000006
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT 1
+static inline uint32_t MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(enum mdp4_cursor_format val)
+{
+ return ((val) << MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__SHIFT) & MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT__MASK;
+}
+#define MDP4_DMA_CURSOR_BLEND_CONFIG_TRANSP_EN 0x00000008
+
+static inline uint32_t REG_MDP4_DMA_CURSOR_BLEND_PARAM(enum mdp4_dma i0) { return 0x00000064 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_LOW(enum mdp4_dma i0) { return 0x00000068 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_BLEND_TRANS_HIGH(enum mdp4_dma i0) { return 0x0000006c + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_FETCH_CONFIG(enum mdp4_dma i0) { return 0x00001004 + __offset_DMA(i0); }
+
+static inline uint32_t REG_MDP4_DMA_CSC(enum mdp4_dma i0) { return 0x00003000 + __offset_DMA(i0); }
+
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_MV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003400 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003500 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_BV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003580 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_PRE_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003600 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_DMA_CSC_POST_LV_VAL(enum mdp4_dma i0, uint32_t i1) { return 0x00003680 + __offset_DMA(i0) + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_SIZE(enum mpd4_pipe i0) { return 0x00020000 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_SRC_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_SRC_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_SIZE_WIDTH__SHIFT) & MDP4_PIPE_SRC_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_XY(enum mpd4_pipe i0) { return 0x00020004 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_SRC_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_Y__SHIFT) & MDP4_PIPE_SRC_XY_Y__MASK;
+}
+#define MDP4_PIPE_SRC_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_XY_X__SHIFT) & MDP4_PIPE_SRC_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_SIZE(enum mpd4_pipe i0) { return 0x00020008 + 0x10000*i0; }
+#define MDP4_PIPE_DST_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_DST_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_DST_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_DST_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_SIZE_WIDTH__SHIFT) & MDP4_PIPE_DST_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_DST_XY(enum mpd4_pipe i0) { return 0x0002000c + 0x10000*i0; }
+#define MDP4_PIPE_DST_XY_Y__MASK 0xffff0000
+#define MDP4_PIPE_DST_XY_Y__SHIFT 16
+static inline uint32_t MDP4_PIPE_DST_XY_Y(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_Y__SHIFT) & MDP4_PIPE_DST_XY_Y__MASK;
+}
+#define MDP4_PIPE_DST_XY_X__MASK 0x0000ffff
+#define MDP4_PIPE_DST_XY_X__SHIFT 0
+static inline uint32_t MDP4_PIPE_DST_XY_X(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_DST_XY_X__SHIFT) & MDP4_PIPE_DST_XY_X__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRCP0_BASE(enum mpd4_pipe i0) { return 0x00020010 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP1_BASE(enum mpd4_pipe i0) { return 0x00020014 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRCP2_BASE(enum mpd4_pipe i0) { return 0x00020018 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_A(enum mpd4_pipe i0) { return 0x00020040 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_A_P0__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P0__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P0__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_A_P1__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_A_P1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_A_P1__SHIFT) & MDP4_PIPE_SRC_STRIDE_A_P1__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_STRIDE_B(enum mpd4_pipe i0) { return 0x00020044 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_STRIDE_B_P2__MASK 0x0000ffff
+#define MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P2__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P2__MASK;
+}
+#define MDP4_PIPE_SRC_STRIDE_B_P3__MASK 0xffff0000
+#define MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_STRIDE_B_P3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_STRIDE_B_P3__SHIFT) & MDP4_PIPE_SRC_STRIDE_B_P3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_FRAME_SIZE(enum mpd4_pipe i0) { return 0x00020048 + 0x10000*i0; }
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK 0xffff0000
+#define MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT 16
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_HEIGHT__SHIFT) & MDP4_PIPE_FRAME_SIZE_HEIGHT__MASK;
+}
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__MASK 0x0000ffff
+#define MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT 0
+static inline uint32_t MDP4_PIPE_FRAME_SIZE_WIDTH(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_FRAME_SIZE_WIDTH__SHIFT) & MDP4_PIPE_FRAME_SIZE_WIDTH__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_SRC_FORMAT(enum mpd4_pipe i0) { return 0x00020050 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__MASK 0x00000003
+#define MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_G_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_G_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_G_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__MASK 0x0000000c
+#define MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT 2
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_B_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_B_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_B_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__MASK 0x00000030
+#define MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT 4
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_R_BPC(enum mpd4_bpc val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_R_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_R_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__MASK 0x000000c0
+#define MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT 6
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_A_BPC(enum mpd4_bpc_alpha val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_A_BPC__SHIFT) & MDP4_PIPE_SRC_FORMAT_A_BPC__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE 0x00000100
+#define MDP4_PIPE_SRC_FORMAT_CPP__MASK 0x00000600
+#define MDP4_PIPE_SRC_FORMAT_CPP__SHIFT 9
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_CPP(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_CPP__SHIFT) & MDP4_PIPE_SRC_FORMAT_CPP__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_ROTATED_90 0x00001000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK 0x00006000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT 13
+static inline uint32_t MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__SHIFT) & MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT__MASK;
+}
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT 0x00020000
+#define MDP4_PIPE_SRC_FORMAT_UNPACK_ALIGN_MSB 0x00040000
+#define MDP4_PIPE_SRC_FORMAT_SOLID_FILL 0x00400000
+
+static inline uint32_t REG_MDP4_PIPE_SRC_UNPACK(enum mpd4_pipe i0) { return 0x00020054 + 0x10000*i0; }
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__MASK 0x000000ff
+#define MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT 0
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM0(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM0__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM0__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__MASK 0x0000ff00
+#define MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT 8
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM1(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM1__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM1__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__MASK 0x00ff0000
+#define MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT 16
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM2(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM2__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM2__MASK;
+}
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__MASK 0xff000000
+#define MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT 24
+static inline uint32_t MDP4_PIPE_SRC_UNPACK_ELEM3(uint32_t val)
+{
+ return ((val) << MDP4_PIPE_SRC_UNPACK_ELEM3__SHIFT) & MDP4_PIPE_SRC_UNPACK_ELEM3__MASK;
+}
+
+static inline uint32_t REG_MDP4_PIPE_OP_MODE(enum mpd4_pipe i0) { return 0x00020058 + 0x10000*i0; }
+#define MDP4_PIPE_OP_MODE_SCALEX_EN 0x00000001
+#define MDP4_PIPE_OP_MODE_SCALEY_EN 0x00000002
+#define MDP4_PIPE_OP_MODE_SRC_YCBCR 0x00000200
+#define MDP4_PIPE_OP_MODE_DST_YCBCR 0x00000400
+#define MDP4_PIPE_OP_MODE_CSC_EN 0x00000800
+#define MDP4_PIPE_OP_MODE_FLIP_LR 0x00002000
+#define MDP4_PIPE_OP_MODE_FLIP_UD 0x00004000
+#define MDP4_PIPE_OP_MODE_DITHER_EN 0x00008000
+#define MDP4_PIPE_OP_MODE_IGC_LUT_EN 0x00010000
+#define MDP4_PIPE_OP_MODE_DEINT_EN 0x00040000
+#define MDP4_PIPE_OP_MODE_DEINT_ODD_REF 0x00080000
+
+static inline uint32_t REG_MDP4_PIPE_PHASEX_STEP(enum mpd4_pipe i0) { return 0x0002005c + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_PHASEY_STEP(enum mpd4_pipe i0) { return 0x00020060 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_FETCH_CONFIG(enum mpd4_pipe i0) { return 0x00021004 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_SOLID_COLOR(enum mpd4_pipe i0) { return 0x00021008 + 0x10000*i0; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC(enum mpd4_pipe i0) { return 0x00024000 + 0x10000*i0; }
+
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_MV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024400 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024500 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_BV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024580 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_PRE_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024600 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+static inline uint32_t REG_MDP4_PIPE_CSC_POST_LV_VAL(enum mpd4_pipe i0, uint32_t i1) { return 0x00024680 + 0x10000*i0 + 0x4*i1; }
+
+#define REG_MDP4_LCDC 0x000c0000
+
+#define REG_MDP4_LCDC_ENABLE 0x000c0000
+
+#define REG_MDP4_LCDC_HSYNC_CTRL 0x000c0004
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_LCDC_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_LCDC_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_LCDC_VSYNC_PERIOD 0x000c0008
+
+#define REG_MDP4_LCDC_VSYNC_LEN 0x000c000c
+
+#define REG_MDP4_LCDC_DISPLAY_HCTRL 0x000c0010
+#define MDP4_LCDC_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_START__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_LCDC_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_DISPLAY_HCTRL_END__SHIFT) & MDP4_LCDC_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_LCDC_DISPLAY_VSTART 0x000c0014
+
+#define REG_MDP4_LCDC_DISPLAY_VEND 0x000c0018
+
+#define REG_MDP4_LCDC_ACTIVE_HCTL 0x000c001c
+#define MDP4_LCDC_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_LCDC_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_START__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_LCDC_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_LCDC_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_ACTIVE_HCTL_END__SHIFT) & MDP4_LCDC_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_LCDC_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_LCDC_ACTIVE_VSTART 0x000c0020
+
+#define REG_MDP4_LCDC_ACTIVE_VEND 0x000c0024
+
+#define REG_MDP4_LCDC_BORDER_CLR 0x000c0028
+
+#define REG_MDP4_LCDC_UNDERFLOW_CLR 0x000c002c
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_LCDC_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_LCDC_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_LCDC_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_LCDC_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_LCDC_HSYNC_SKEW 0x000c0030
+
+#define REG_MDP4_LCDC_TEST_CNTL 0x000c0034
+
+#define REG_MDP4_LCDC_CTRL_POLARITY 0x000c0038
+#define MDP4_LCDC_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_LCDC_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_LCDC_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DTV 0x000d0000
+
+#define REG_MDP4_DTV_ENABLE 0x000d0000
+
+#define REG_MDP4_DTV_HSYNC_CTRL 0x000d0004
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DTV_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DTV_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DTV_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DTV_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DTV_VSYNC_PERIOD 0x000d0008
+
+#define REG_MDP4_DTV_VSYNC_LEN 0x000d000c
+
+#define REG_MDP4_DTV_DISPLAY_HCTRL 0x000d0018
+#define MDP4_DTV_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DTV_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_START__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DTV_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DTV_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_DISPLAY_HCTRL_END__SHIFT) & MDP4_DTV_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DTV_DISPLAY_VSTART 0x000d001c
+
+#define REG_MDP4_DTV_DISPLAY_VEND 0x000d0020
+
+#define REG_MDP4_DTV_ACTIVE_HCTL 0x000d002c
+#define MDP4_DTV_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DTV_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_START__SHIFT) & MDP4_DTV_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DTV_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DTV_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DTV_ACTIVE_HCTL_END__SHIFT) & MDP4_DTV_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DTV_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DTV_ACTIVE_VSTART 0x000d0030
+
+#define REG_MDP4_DTV_ACTIVE_VEND 0x000d0038
+
+#define REG_MDP4_DTV_BORDER_CLR 0x000d0040
+
+#define REG_MDP4_DTV_UNDERFLOW_CLR 0x000d0044
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DTV_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DTV_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DTV_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DTV_HSYNC_SKEW 0x000d0048
+
+#define REG_MDP4_DTV_TEST_CNTL 0x000d004c
+
+#define REG_MDP4_DTV_CTRL_POLARITY 0x000d0050
+#define MDP4_DTV_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DTV_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DTV_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+#define REG_MDP4_DSI 0x000e0000
+
+#define REG_MDP4_DSI_ENABLE 0x000e0000
+
+#define REG_MDP4_DSI_HSYNC_CTRL 0x000e0004
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__MASK 0x0000ffff
+#define MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT 0
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PULSEW(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PULSEW__SHIFT) & MDP4_DSI_HSYNC_CTRL_PULSEW__MASK;
+}
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__MASK 0xffff0000
+#define MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT 16
+static inline uint32_t MDP4_DSI_HSYNC_CTRL_PERIOD(uint32_t val)
+{
+ return ((val) << MDP4_DSI_HSYNC_CTRL_PERIOD__SHIFT) & MDP4_DSI_HSYNC_CTRL_PERIOD__MASK;
+}
+
+#define REG_MDP4_DSI_VSYNC_PERIOD 0x000e0008
+
+#define REG_MDP4_DSI_VSYNC_LEN 0x000e000c
+
+#define REG_MDP4_DSI_DISPLAY_HCTRL 0x000e0010
+#define MDP4_DSI_DISPLAY_HCTRL_START__MASK 0x0000ffff
+#define MDP4_DSI_DISPLAY_HCTRL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_START__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_START__MASK;
+}
+#define MDP4_DSI_DISPLAY_HCTRL_END__MASK 0xffff0000
+#define MDP4_DSI_DISPLAY_HCTRL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_DISPLAY_HCTRL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_DISPLAY_HCTRL_END__SHIFT) & MDP4_DSI_DISPLAY_HCTRL_END__MASK;
+}
+
+#define REG_MDP4_DSI_DISPLAY_VSTART 0x000e0014
+
+#define REG_MDP4_DSI_DISPLAY_VEND 0x000e0018
+
+#define REG_MDP4_DSI_ACTIVE_HCTL 0x000e001c
+#define MDP4_DSI_ACTIVE_HCTL_START__MASK 0x00007fff
+#define MDP4_DSI_ACTIVE_HCTL_START__SHIFT 0
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_START(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_START__SHIFT) & MDP4_DSI_ACTIVE_HCTL_START__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_END__MASK 0x7fff0000
+#define MDP4_DSI_ACTIVE_HCTL_END__SHIFT 16
+static inline uint32_t MDP4_DSI_ACTIVE_HCTL_END(uint32_t val)
+{
+ return ((val) << MDP4_DSI_ACTIVE_HCTL_END__SHIFT) & MDP4_DSI_ACTIVE_HCTL_END__MASK;
+}
+#define MDP4_DSI_ACTIVE_HCTL_ACTIVE_START_X 0x80000000
+
+#define REG_MDP4_DSI_ACTIVE_VSTART 0x000e0020
+
+#define REG_MDP4_DSI_ACTIVE_VEND 0x000e0024
+
+#define REG_MDP4_DSI_BORDER_CLR 0x000e0028
+
+#define REG_MDP4_DSI_UNDERFLOW_CLR 0x000e002c
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK 0x00ffffff
+#define MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT 0
+static inline uint32_t MDP4_DSI_UNDERFLOW_CLR_COLOR(uint32_t val)
+{
+ return ((val) << MDP4_DSI_UNDERFLOW_CLR_COLOR__SHIFT) & MDP4_DSI_UNDERFLOW_CLR_COLOR__MASK;
+}
+#define MDP4_DSI_UNDERFLOW_CLR_ENABLE_RECOVERY 0x80000000
+
+#define REG_MDP4_DSI_HSYNC_SKEW 0x000e0030
+
+#define REG_MDP4_DSI_TEST_CNTL 0x000e0034
+
+#define REG_MDP4_DSI_CTRL_POLARITY 0x000e0038
+#define MDP4_DSI_CTRL_POLARITY_HSYNC_LOW 0x00000001
+#define MDP4_DSI_CTRL_POLARITY_VSYNC_LOW 0x00000002
+#define MDP4_DSI_CTRL_POLARITY_DATA_EN_LOW 0x00000004
+
+
+#endif /* MDP4_XML */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
new file mode 100644
index 00000000000..de6bea297cd
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_crtc.c
@@ -0,0 +1,685 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+#include <drm/drm_mode.h>
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+#include "drm_flip_work.h"
+
+struct mdp4_crtc {
+ struct drm_crtc base;
+ char name[8];
+ struct drm_plane *plane;
+ int id;
+ int ovlp;
+ enum mdp4_dma dma;
+ bool enabled;
+
+ /* which mixer/encoder we route output to: */
+ int mixer;
+
+ struct {
+ spinlock_t lock;
+ bool stale;
+ uint32_t width, height;
+
+ /* next cursor to scan-out: */
+ uint32_t next_iova;
+ struct drm_gem_object *next_bo;
+
+ /* current cursor being scanned out: */
+ struct drm_gem_object *scanout_bo;
+ } cursor;
+
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct work_struct pageflip_work;
+
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *fb;
+
+ /* for unref'ing framebuffers after scanout completes: */
+ struct drm_flip_work unref_fb_work;
+
+ /* for unref'ing cursor bo's after scanout completes: */
+ struct drm_flip_work unref_cursor_work;
+
+ struct mdp4_irq vblank;
+ struct mdp4_irq err;
+};
+#define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
+
+static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+static void update_fb(struct drm_crtc *crtc, bool async,
+ struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
+
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+
+ /* grab reference to incoming scanout fb: */
+ drm_framebuffer_reference(new_fb);
+ mdp4_crtc->base.fb = new_fb;
+ mdp4_crtc->fb = new_fb;
+
+ if (!async) {
+ /* enable vblank to pick up the old_fb */
+ mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+ }
+}
+
+static void complete_flip(struct drm_crtc *crtc, bool canceled)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = mdp4_crtc->event;
+ if (event) {
+ mdp4_crtc->event = NULL;
+ if (canceled)
+ event->base.destroy(&event->base);
+ else
+ drm_send_vblank_event(dev, mdp4_crtc->id, event);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t flush = 0;
+
+ flush |= pipe2flush(mdp4_plane_pipe(mdp4_crtc->plane));
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void pageflip_worker(struct work_struct *work)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, pageflip_work);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+
+ mdp4_plane_set_scanout(mdp4_crtc->plane, crtc->fb);
+ crtc_flush(crtc);
+
+ /* enable vblank to complete flip: */
+ mdp4_irq_register(get_kms(crtc), &mdp4_crtc->vblank);
+}
+
+static void unref_fb_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_fb_work);
+ struct drm_device *dev = mdp4_crtc->base.dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
+}
+
+static void unref_cursor_worker(struct drm_flip_work *work, void *val)
+{
+ struct mdp4_crtc *mdp4_crtc =
+ container_of(work, struct mdp4_crtc, unref_cursor_work);
+ struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
+
+ msm_gem_put_iova(val, mdp4_kms->id);
+ drm_gem_object_unreference_unlocked(val);
+}
+
+static void mdp4_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ mdp4_crtc->plane->funcs->destroy(mdp4_crtc->plane);
+
+ drm_crtc_cleanup(crtc);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
+ drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
+
+ kfree(mdp4_crtc);
+}
+
+static void mdp4_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("%s: mode=%d", mdp4_crtc->name, mode);
+
+ if (enabled != mdp4_crtc->enabled) {
+ if (enabled) {
+ mdp4_enable(mdp4_kms);
+ mdp4_irq_register(mdp4_kms, &mdp4_crtc->err);
+ } else {
+ mdp4_irq_unregister(mdp4_kms, &mdp4_crtc->err);
+ mdp4_disable(mdp4_kms);
+ }
+ mdp4_crtc->enabled = enabled;
+ }
+}
+
+static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void blend_setup(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ int i, ovlp = mdp4_crtc->ovlp;
+ uint32_t mixer_cfg = 0;
+
+ /*
+ * This probably would also need to be triggered by any attached
+ * plane when it changes.. for now since we are only using a single
+ * private plane, the configuration is hard-coded:
+ */
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
+
+ for (i = 0; i < 4; i++) {
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i),
+ MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
+ MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
+ }
+
+ /* TODO single register for all CRTCs, so this won't work properly
+ * when multiple CRTCs are active..
+ */
+ switch (mdp4_plane_pipe(mdp4_crtc->plane)) {
+ case VG1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE0(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE0_MIXER1);
+ break;
+ case VG2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE1(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE1_MIXER1);
+ break;
+ case RGB1:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE2(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE2_MIXER1);
+ break;
+ case RGB2:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE3(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE3_MIXER1);
+ break;
+ case RGB3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE4(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE4_MIXER1);
+ break;
+ case VG3:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE5(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE5_MIXER1);
+ break;
+ case VG4:
+ mixer_cfg = MDP4_LAYERMIXER_IN_CFG_PIPE6(STAGE_BASE) |
+ COND(mdp4_crtc->mixer == 1, MDP4_LAYERMIXER_IN_CFG_PIPE6_MIXER1);
+ break;
+ default:
+ WARN_ON("invalid pipe");
+ break;
+ }
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
+}
+
+static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ int ret, ovlp = mdp4_crtc->ovlp;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mdp4_crtc->name, mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
+ MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
+
+ /* take data from pipe: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma),
+ crtc->fb->pitches[0]);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
+ MDP4_DMA_DST_SIZE_WIDTH(0) |
+ MDP4_DMA_DST_SIZE_HEIGHT(0));
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
+ MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
+ MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp),
+ crtc->fb->pitches[0]);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
+
+ update_fb(crtc, false, crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
+ if (dma == DMA_E) {
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
+ }
+
+ return 0;
+}
+
+static void mdp4_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ DBG("%s", mdp4_crtc->name);
+ /* make sure we hold a ref to mdp clks while setting up mode: */
+ mdp4_enable(get_kms(crtc));
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_crtc_commit(struct drm_crtc *crtc)
+{
+ mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+ crtc_flush(crtc);
+ /* drop the ref to mdp clk's that we got in prepare: */
+ mdp4_disable(get_kms(crtc));
+}
+
+static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_plane *plane = mdp4_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+
+ update_fb(crtc, false, crtc->fb);
+
+ return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
+ struct drm_framebuffer *new_fb,
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *obj;
+
+ if (mdp4_crtc->event) {
+ dev_err(dev->dev, "already pending flip!\n");
+ return -EBUSY;
+ }
+
+ obj = msm_framebuffer_bo(new_fb, 0);
+
+ mdp4_crtc->event = event;
+ update_fb(crtc, true, new_fb);
+
+ return msm_gem_queue_inactive_work(obj,
+ &mdp4_crtc->pageflip_work);
+}
+
+static int mdp4_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+#define CURSOR_WIDTH 64
+#define CURSOR_HEIGHT 64
+
+/* called from IRQ to update cursor related registers (if needed). The
+ * cursor registers, other than x/y position, appear not to be double
+ * buffered, and changing them other than from vblank seems to trigger
+ * underflow.
+ */
+static void update_cursor(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ if (mdp4_crtc->cursor.stale) {
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
+ struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
+ uint32_t iova = mdp4_crtc->cursor.next_iova;
+
+ if (next_bo) {
+ /* take a obj ref + iova ref when we start scanning out: */
+ drm_gem_object_reference(next_bo);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+
+ /* enable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
+ MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
+ MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
+ MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
+ } else {
+ /* disable cursor: */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
+ MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+ }
+
+ /* and drop the iova ref + obj rev when done scanning out: */
+ if (prev_bo)
+ drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
+
+ mdp4_crtc->cursor.scanout_bo = next_bo;
+ mdp4_crtc->cursor.stale = false;
+ }
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+}
+
+static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
+ struct drm_file *file_priv, uint32_t handle,
+ uint32_t width, uint32_t height)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_gem_object *cursor_bo, *old_bo;
+ unsigned long flags;
+ uint32_t iova;
+ int ret;
+
+ if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
+ dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
+ return -EINVAL;
+ }
+
+ if (handle) {
+ cursor_bo = drm_gem_object_lookup(dev, file_priv, handle);
+ if (!cursor_bo)
+ return -ENOENT;
+ } else {
+ cursor_bo = NULL;
+ }
+
+ if (cursor_bo) {
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ if (ret)
+ goto fail;
+ } else {
+ iova = 0;
+ }
+
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ old_bo = mdp4_crtc->cursor.next_bo;
+ mdp4_crtc->cursor.next_bo = cursor_bo;
+ mdp4_crtc->cursor.next_iova = iova;
+ mdp4_crtc->cursor.width = width;
+ mdp4_crtc->cursor.height = height;
+ mdp4_crtc->cursor.stale = true;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ if (old_bo) {
+ /* drop our previous reference: */
+ msm_gem_put_iova(old_bo, mdp4_kms->id);
+ drm_gem_object_unreference_unlocked(old_bo);
+ }
+
+ return 0;
+
+fail:
+ drm_gem_object_unreference_unlocked(cursor_bo);
+ return ret;
+}
+
+static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ enum mdp4_dma dma = mdp4_crtc->dma;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(x) |
+ MDP4_DMA_CURSOR_POS_Y(y));
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs mdp4_crtc_funcs = {
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = mdp4_crtc_destroy,
+ .page_flip = mdp4_crtc_page_flip,
+ .set_property = mdp4_crtc_set_property,
+ .cursor_set = mdp4_crtc_cursor_set,
+ .cursor_move = mdp4_crtc_cursor_move,
+};
+
+static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
+ .dpms = mdp4_crtc_dpms,
+ .mode_fixup = mdp4_crtc_mode_fixup,
+ .mode_set = mdp4_crtc_mode_set,
+ .prepare = mdp4_crtc_prepare,
+ .commit = mdp4_crtc_commit,
+ .mode_set_base = mdp4_crtc_mode_set_base,
+ .load_lut = mdp4_crtc_load_lut,
+};
+
+static void mdp4_crtc_vblank_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ update_cursor(crtc);
+ complete_flip(crtc, false);
+ mdp4_irq_unregister(get_kms(crtc), &mdp4_crtc->vblank);
+
+ drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
+ drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->wq);
+}
+
+static void mdp4_crtc_err_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
+ struct drm_crtc *crtc = &mdp4_crtc->base;
+ DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
+ crtc_flush(crtc);
+}
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ return mdp4_crtc->vblank.irqmask;
+}
+
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc)
+{
+ complete_flip(crtc, true);
+}
+
+/* set dma config, ie. the format the encoder wants. */
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
+}
+
+/* set interface for routing crtc->encoder: */
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t intf_sel;
+
+ intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
+
+ switch (mdp4_crtc->dma) {
+ case DMA_P:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
+ break;
+ case DMA_S:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
+ break;
+ case DMA_E:
+ intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
+ intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
+ break;
+ }
+
+ if (intf == INTF_DSI_VIDEO) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_DSI_CMD) {
+ intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
+ intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
+ mdp4_crtc->mixer = 0;
+ } else if (intf == INTF_LCDC_DTV){
+ mdp4_crtc->mixer = 1;
+ }
+
+ blend_setup(crtc);
+
+ DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
+}
+
+static const char *dma_names[] = {
+ "DMA_P", "DMA_S", "DMA_E",
+};
+
+/* initialize crtc */
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct mdp4_crtc *mdp4_crtc;
+ int ret;
+
+ mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
+ if (!mdp4_crtc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ crtc = &mdp4_crtc->base;
+
+ mdp4_crtc->plane = plane;
+ mdp4_crtc->plane->crtc = crtc;
+
+ mdp4_crtc->ovlp = ovlp_id;
+ mdp4_crtc->dma = dma_id;
+
+ mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
+ mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
+
+ mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
+ mdp4_crtc->err.irq = mdp4_crtc_err_irq;
+
+ snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
+ dma_names[dma_id], ovlp_id);
+
+ spin_lock_init(&mdp4_crtc->cursor.lock);
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16,
+ "unref fb", unref_fb_worker);
+ if (ret)
+ goto fail;
+
+ ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
+ "unref cursor", unref_cursor_worker);
+
+ INIT_WORK(&mdp4_crtc->pageflip_work, pageflip_worker);
+
+ drm_crtc_init(dev, crtc, &mdp4_crtc_funcs);
+ drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
+
+ mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base);
+
+ return crtc;
+
+fail:
+ if (crtc)
+ mdp4_crtc_destroy(crtc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
new file mode 100644
index 00000000000..5e0dcae70ab
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_dtv_encoder.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <mach/clk.h>
+
+#include "mdp4_kms.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+
+struct mdp4_dtv_encoder {
+ struct drm_encoder base;
+ struct clk *src_clk;
+ struct clk *hdmi_clk;
+ struct clk *mdp_clk;
+ unsigned long int pixclock;
+ bool enabled;
+ uint32_t bsc;
+};
+#define to_mdp4_dtv_encoder(x) container_of(x, struct mdp4_dtv_encoder, base)
+
+static struct mdp4_kms *get_kms(struct drm_encoder *encoder)
+{
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+/* not ironically named at all.. no, really.. */
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ struct drm_device *dev = mdp4_dtv_encoder->base.dev;
+ struct lcdc_platform_data *dtv_pdata = mdp4_find_pdata("dtv.0");
+
+ if (!dtv_pdata) {
+ dev_err(dev->dev, "could not find dtv pdata\n");
+ return;
+ }
+
+ if (dtv_pdata->bus_scale_table) {
+ mdp4_dtv_encoder->bsc = msm_bus_scale_register_client(
+ dtv_pdata->bus_scale_table);
+ DBG("bus scale client: %08x", mdp4_dtv_encoder->bsc);
+ DBG("lcdc_power_save: %p", dtv_pdata->lcdc_power_save);
+ if (dtv_pdata->lcdc_power_save)
+ dtv_pdata->lcdc_power_save(1);
+ }
+}
+
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ msm_bus_scale_unregister_client(mdp4_dtv_encoder->bsc);
+ mdp4_dtv_encoder->bsc = 0;
+ }
+}
+
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx)
+{
+ if (mdp4_dtv_encoder->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(mdp4_dtv_encoder->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_fini(struct mdp4_dtv_encoder *mdp4_dtv_encoder) {}
+static void bs_set(struct mdp4_dtv_encoder *mdp4_dtv_encoder, int idx) {}
+#endif
+
+static void mdp4_dtv_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ bs_fini(mdp4_dtv_encoder);
+ drm_encoder_cleanup(encoder);
+ kfree(mdp4_dtv_encoder);
+}
+
+static const struct drm_encoder_funcs mdp4_dtv_encoder_funcs = {
+ .destroy = mdp4_dtv_encoder_destroy,
+};
+
+static void mdp4_dtv_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ bool enabled = (mode == DRM_MODE_DPMS_ON);
+
+ DBG("mode=%d", mode);
+
+ if (enabled == mdp4_dtv_encoder->enabled)
+ return;
+
+ if (enabled) {
+ unsigned long pc = mdp4_dtv_encoder->pixclock;
+ int ret;
+
+ bs_set(mdp4_dtv_encoder, 1);
+
+ DBG("setting src_clk=%lu", pc);
+
+ ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc);
+ if (ret)
+ dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret);
+ clk_prepare_enable(mdp4_dtv_encoder->src_clk);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
+ ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
+ if (ret)
+ dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
+ } else {
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ mdp4_irq_wait(mdp4_kms, MDP4_IRQ_EXTERNAL_VSYNC);
+
+ clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
+ clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
+
+ bs_set(mdp4_dtv_encoder, 0);
+ }
+
+ mdp4_dtv_encoder->enabled = enabled;
+}
+
+static bool mdp4_dtv_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static void mdp4_dtv_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ struct mdp4_kms *mdp4_kms = get_kms(encoder);
+ uint32_t dtv_hsync_skew, vsync_period, vsync_len, ctrl_pol;
+ uint32_t display_v_start, display_v_end;
+ uint32_t hsync_start_x, hsync_end_x;
+
+ mode = adjusted_mode;
+
+ DBG("set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ mode->base.id, mode->name,
+ mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal,
+ mode->type, mode->flags);
+
+ mdp4_dtv_encoder->pixclock = mode->clock * 1000;
+
+ DBG("pixclock=%lu", mdp4_dtv_encoder->pixclock);
+
+ ctrl_pol = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ ctrl_pol |= MDP4_DTV_CTRL_POLARITY_VSYNC_LOW;
+ /* probably need to get DATA_EN polarity from panel.. */
+
+ dtv_hsync_skew = 0; /* get this from panel? */
+
+ hsync_start_x = (mode->htotal - mode->hsync_start);
+ hsync_end_x = mode->htotal - (mode->hsync_start - mode->hdisplay) - 1;
+
+ vsync_period = mode->vtotal * mode->htotal;
+ vsync_len = (mode->vsync_end - mode->vsync_start) * mode->htotal;
+ display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
+ display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
+
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_CTRL,
+ MDP4_DTV_HSYNC_CTRL_PULSEW(mode->hsync_end - mode->hsync_start) |
+ MDP4_DTV_HSYNC_CTRL_PERIOD(mode->htotal));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_PERIOD, vsync_period);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_VSYNC_LEN, vsync_len);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_HCTRL,
+ MDP4_DTV_DISPLAY_HCTRL_START(hsync_start_x) |
+ MDP4_DTV_DISPLAY_HCTRL_END(hsync_end_x));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VSTART, display_v_start);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_DISPLAY_VEND, display_v_end);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_BORDER_CLR, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_UNDERFLOW_CLR,
+ MDP4_DTV_UNDERFLOW_CLR_ENABLE_RECOVERY |
+ MDP4_DTV_UNDERFLOW_CLR_COLOR(0xff));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_HSYNC_SKEW, dtv_hsync_skew);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_CTRL_POLARITY, ctrl_pol);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_HCTL,
+ MDP4_DTV_ACTIVE_HCTL_START(0) |
+ MDP4_DTV_ACTIVE_HCTL_END(0));
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VSTART, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ACTIVE_VEND, 0);
+}
+
+static void mdp4_dtv_encoder_prepare(struct drm_encoder *encoder)
+{
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void mdp4_dtv_encoder_commit(struct drm_encoder *encoder)
+{
+ mdp4_crtc_set_config(encoder->crtc,
+ MDP4_DMA_CONFIG_R_BPC(BPC8) |
+ MDP4_DMA_CONFIG_G_BPC(BPC8) |
+ MDP4_DMA_CONFIG_B_BPC(BPC8) |
+ MDP4_DMA_CONFIG_PACK(0x21));
+ mdp4_crtc_set_intf(encoder->crtc, INTF_LCDC_DTV);
+ mdp4_dtv_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
+ .dpms = mdp4_dtv_encoder_dpms,
+ .mode_fixup = mdp4_dtv_encoder_mode_fixup,
+ .mode_set = mdp4_dtv_encoder_mode_set,
+ .prepare = mdp4_dtv_encoder_prepare,
+ .commit = mdp4_dtv_encoder_commit,
+};
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
+{
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
+ return clk_round_rate(mdp4_dtv_encoder->src_clk, rate);
+}
+
+/* initialize encoder */
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
+{
+ struct drm_encoder *encoder = NULL;
+ struct mdp4_dtv_encoder *mdp4_dtv_encoder;
+ int ret;
+
+ mdp4_dtv_encoder = kzalloc(sizeof(*mdp4_dtv_encoder), GFP_KERNEL);
+ if (!mdp4_dtv_encoder) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ encoder = &mdp4_dtv_encoder->base;
+
+ drm_encoder_init(dev, encoder, &mdp4_dtv_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
+
+ mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
+ if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
+ dev_err(dev->dev, "failed to get src_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
+ if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
+ dev_err(dev->dev, "failed to get hdmi_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->hdmi_clk);
+ goto fail;
+ }
+
+ mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk");
+ if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
+ dev_err(dev->dev, "failed to get mdp_clk\n");
+ ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
+ goto fail;
+ }
+
+ bs_init(mdp4_dtv_encoder);
+
+ return encoder;
+
+fail:
+ if (encoder)
+ mdp4_dtv_encoder_destroy(encoder);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_format.c b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
new file mode 100644
index 00000000000..7b645f2e837
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_format.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+#define FMT(name, a, r, g, b, e0, e1, e2, e3, alpha, tight, c, cnt) { \
+ .base = { .pixel_format = DRM_FORMAT_ ## name }, \
+ .bpc_a = BPC ## a ## A, \
+ .bpc_r = BPC ## r, \
+ .bpc_g = BPC ## g, \
+ .bpc_b = BPC ## b, \
+ .unpack = { e0, e1, e2, e3 }, \
+ .alpha_enable = alpha, \
+ .unpack_tight = tight, \
+ .cpp = c, \
+ .unpack_count = cnt, \
+ }
+
+#define BPC0A 0
+
+static const struct mdp4_format formats[] = {
+ /* name a r g b e0 e1 e2 e3 alpha tight cpp cnt */
+ FMT(ARGB8888, 8, 8, 8, 8, 1, 0, 2, 3, true, true, 4, 4),
+ FMT(XRGB8888, 8, 8, 8, 8, 1, 0, 2, 3, false, true, 4, 4),
+ FMT(RGB888, 0, 8, 8, 8, 1, 0, 2, 0, false, true, 3, 3),
+ FMT(BGR888, 0, 8, 8, 8, 2, 0, 1, 0, false, true, 3, 3),
+ FMT(RGB565, 0, 5, 6, 5, 1, 0, 2, 0, false, true, 2, 3),
+ FMT(BGR565, 0, 5, 6, 5, 2, 0, 1, 0, false, true, 2, 3),
+};
+
+const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ const struct mdp4_format *f = &formats[i];
+ if (f->base.pixel_format == format)
+ return &f->base;
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_irq.c b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
new file mode 100644
index 00000000000..5c6b7fca4ed
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_irq.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+
+struct mdp4_irq_wait {
+ struct mdp4_irq irq;
+ int count;
+};
+
+static DECLARE_WAIT_QUEUE_HEAD(wait_event);
+
+static DEFINE_SPINLOCK(list_lock);
+
+static void update_irq(struct mdp4_kms *mdp4_kms)
+{
+ struct mdp4_irq *irq;
+ uint32_t irqmask = mdp4_kms->vblank_mask;
+
+ BUG_ON(!spin_is_locked(&list_lock));
+
+ list_for_each_entry(irq, &mdp4_kms->irq_list, node)
+ irqmask |= irq->irqmask;
+
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask);
+}
+
+static void update_irq_unlocked(struct mdp4_kms *mdp4_kms)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&list_lock, flags);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ DRM_ERROR("errors: %08x\n", irqstatus);
+}
+
+void mdp4_irq_preinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff);
+}
+
+int mdp4_irq_postinstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct mdp4_irq *error_handler = &mdp4_kms->error_handler;
+
+ INIT_LIST_HEAD(&mdp4_kms->irq_list);
+
+ error_handler->irq = mdp4_irq_error_handler;
+ error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN |
+ MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+
+ mdp4_irq_register(mdp4_kms, error_handler);
+
+ return 0;
+}
+
+void mdp4_irq_uninstall(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000);
+}
+
+irqreturn_t mdp4_irq(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct mdp4_irq *handler, *n;
+ unsigned long flags;
+ unsigned int id;
+ uint32_t status;
+
+ status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS);
+ mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status);
+
+ VERB("status=%08x", status);
+
+ for (id = 0; id < priv->num_crtcs; id++)
+ if (status & mdp4_crtc_vblank(priv->crtcs[id]))
+ drm_handle_vblank(dev, id);
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->in_irq = true;
+ list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) {
+ if (handler->irqmask & status) {
+ spin_unlock_irqrestore(&list_lock, flags);
+ handler->irq(handler, handler->irqmask & status);
+ spin_lock_irqsave(&list_lock, flags);
+ }
+ }
+ mdp4_kms->in_irq = false;
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ return 0;
+}
+
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ unsigned long flags;
+
+ spin_lock_irqsave(&list_lock, flags);
+ mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc);
+ update_irq(mdp4_kms);
+ spin_unlock_irqrestore(&list_lock, flags);
+}
+
+static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus)
+{
+ struct mdp4_irq_wait *wait =
+ container_of(irq, struct mdp4_irq_wait, irq);
+ wait->count--;
+ wake_up_all(&wait_event);
+}
+
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask)
+{
+ struct mdp4_irq_wait wait = {
+ .irq = {
+ .irq = wait_irq,
+ .irqmask = irqmask,
+ },
+ .count = 1,
+ };
+ mdp4_irq_register(mdp4_kms, &wait.irq);
+ wait_event(wait_event, (wait.count <= 0));
+ mdp4_irq_unregister(mdp4_kms, &wait.irq);
+}
+
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (!irq->registered) {
+ irq->registered = true;
+ list_add(&irq->node, &mdp4_kms->irq_list);
+ needs_update = !mdp4_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp4_kms);
+}
+
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq)
+{
+ unsigned long flags;
+ bool needs_update = false;
+
+ spin_lock_irqsave(&list_lock, flags);
+
+ if (irq->registered) {
+ irq->registered = false;
+ list_del(&irq->node);
+ needs_update = !mdp4_kms->in_irq;
+ }
+
+ spin_unlock_irqrestore(&list_lock, flags);
+
+ if (needs_update)
+ update_irq_unlocked(mdp4_kms);
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
new file mode 100644
index 00000000000..5db5bbaedae
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.c
@@ -0,0 +1,365 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include "msm_drv.h"
+#include "mdp4_kms.h"
+
+#include <mach/iommu.h>
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev);
+
+static int mdp4_hw_init(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct drm_device *dev = mdp4_kms->dev;
+ uint32_t version, major, minor, dmap_cfg, vg_cfg;
+ unsigned long clk;
+ int ret = 0;
+
+ pm_runtime_get_sync(dev->dev);
+
+ version = mdp4_read(mdp4_kms, REG_MDP4_VERSION);
+
+ major = FIELD(version, MDP4_VERSION_MAJOR);
+ minor = FIELD(version, MDP4_VERSION_MINOR);
+
+ DBG("found MDP version v%d.%d", major, minor);
+
+ if (major != 4) {
+ dev_err(dev->dev, "unexpected MDP version: v%d.%d\n",
+ major, minor);
+ ret = -ENXIO;
+ goto out;
+ }
+
+ mdp4_kms->rev = minor;
+
+ if (mdp4_kms->dsi_pll_vdda) {
+ if ((mdp4_kms->rev == 2) || (mdp4_kms->rev == 4)) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vdda,
+ 1200000, 1200000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vdda voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->dsi_pll_vddio) {
+ if (mdp4_kms->rev == 2) {
+ ret = regulator_set_voltage(mdp4_kms->dsi_pll_vddio,
+ 1800000, 1800000);
+ if (ret) {
+ dev_err(dev->dev,
+ "failed to set dsi_pll_vddio voltage: %d\n", ret);
+ goto out;
+ }
+ }
+ }
+
+ if (mdp4_kms->rev > 1) {
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER0, 0x0707ffff);
+ mdp4_write(mdp4_kms, REG_MDP4_CS_CONTROLLER1, 0x03073f3f);
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PORTMAP_MODE, 0x3);
+
+ /* max read pending cmd config, 3 pending requests: */
+ mdp4_write(mdp4_kms, REG_MDP4_READ_CNFG, 0x02222);
+
+ clk = clk_get_rate(mdp4_kms->clk);
+
+ if ((mdp4_kms->rev >= 1) || (clk >= 90000000)) {
+ dmap_cfg = 0x47; /* 16 bytes-burst x 8 req */
+ vg_cfg = 0x47; /* 16 bytes-burs x 8 req */
+ } else {
+ dmap_cfg = 0x27; /* 8 bytes-burst x 8 req */
+ vg_cfg = 0x43; /* 16 bytes-burst x 4 req */
+ }
+
+ DBG("fetch config: dmap=%02x, vg=%02x", dmap_cfg, vg_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_P), dmap_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_FETCH_CONFIG(DMA_E), dmap_cfg);
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(VG2), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB1), vg_cfg);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_FETCH_CONFIG(RGB2), vg_cfg);
+
+ if (mdp4_kms->rev >= 2)
+ mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG_UPDATE_METHOD, 1);
+
+ /* disable CSC matrix / YUV by default: */
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(VG2), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_P_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_S_OP_MODE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(1), 0);
+ mdp4_write(mdp4_kms, REG_MDP4_OVLP_CSC_CONFIG(2), 0);
+
+ if (mdp4_kms->rev > 1)
+ mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
+
+out:
+ pm_runtime_put_sync(dev->dev);
+
+ return ret;
+}
+
+static long mdp4_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ /* if we had >1 encoder, we'd need something more clever: */
+ return mdp4_dtv_round_pixclk(encoder, rate);
+}
+
+static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ mdp4_crtc_cancel_pending_flip(priv->crtcs[i]);
+}
+
+static void mdp4_destroy(struct msm_kms *kms)
+{
+ struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms);
+ kfree(mdp4_kms);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = mdp4_hw_init,
+ .irq_preinstall = mdp4_irq_preinstall,
+ .irq_postinstall = mdp4_irq_postinstall,
+ .irq_uninstall = mdp4_irq_uninstall,
+ .irq = mdp4_irq,
+ .enable_vblank = mdp4_enable_vblank,
+ .disable_vblank = mdp4_disable_vblank,
+ .get_format = mdp4_get_format,
+ .round_pixclk = mdp4_round_pixclk,
+ .preclose = mdp4_preclose,
+ .destroy = mdp4_destroy,
+};
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_disable_unprepare(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+int mdp4_enable(struct mdp4_kms *mdp4_kms)
+{
+ DBG("");
+
+ clk_prepare_enable(mdp4_kms->clk);
+ if (mdp4_kms->pclk)
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+
+ return 0;
+}
+
+static int modeset_init(struct mdp4_kms *mdp4_kms)
+{
+ struct drm_device *dev = mdp4_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ int ret;
+
+ /*
+ * NOTE: this is a bit simplistic until we add support
+ * for more than just RGB1->DMA_E->DTV->HDMI
+ */
+
+ /* the CRTCs get constructed with a private plane: */
+ plane = mdp4_plane_init(dev, RGB1, true);
+ if (IS_ERR(plane)) {
+ dev_err(dev->dev, "failed to construct plane for RGB1\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+
+ crtc = mdp4_crtc_init(dev, plane, priv->num_crtcs, 1, DMA_E);
+ if (IS_ERR(crtc)) {
+ dev_err(dev->dev, "failed to construct crtc for DMA_E\n");
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ encoder = mdp4_dtv_encoder_init(dev);
+ if (IS_ERR(encoder)) {
+ dev_err(dev->dev, "failed to construct DTV encoder\n");
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+ encoder->possible_crtcs = 0x1; /* DTV can be hooked to DMA_E */
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ ret = hdmi_init(dev, encoder);
+ if (ret) {
+ dev_err(dev->dev, "failed to initialize HDMI\n");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static const char *iommu_ports[] = {
+ "mdp_port0_cb0", "mdp_port1_cb0",
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct mdp4_platform_config *config = mdp4_get_config(pdev);
+ struct mdp4_kms *mdp4_kms;
+ struct msm_kms *kms = NULL;
+ int ret;
+
+ mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
+ if (!mdp4_kms) {
+ dev_err(dev->dev, "failed to allocate kms\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ kms = &mdp4_kms->base;
+ kms->funcs = &kms_funcs;
+
+ mdp4_kms->dev = dev;
+
+ mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4");
+ if (IS_ERR(mdp4_kms->mmio)) {
+ ret = PTR_ERR(mdp4_kms->mmio);
+ goto fail;
+ }
+
+ mdp4_kms->dsi_pll_vdda = devm_regulator_get(&pdev->dev, "dsi_pll_vdda");
+ if (IS_ERR(mdp4_kms->dsi_pll_vdda))
+ mdp4_kms->dsi_pll_vdda = NULL;
+
+ mdp4_kms->dsi_pll_vddio = devm_regulator_get(&pdev->dev, "dsi_pll_vddio");
+ if (IS_ERR(mdp4_kms->dsi_pll_vddio))
+ mdp4_kms->dsi_pll_vddio = NULL;
+
+ mdp4_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
+ if (IS_ERR(mdp4_kms->vdd))
+ mdp4_kms->vdd = NULL;
+
+ if (mdp4_kms->vdd) {
+ ret = regulator_enable(mdp4_kms->vdd);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(mdp4_kms->clk)) {
+ dev_err(dev->dev, "failed to get core_clk\n");
+ ret = PTR_ERR(mdp4_kms->clk);
+ goto fail;
+ }
+
+ mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(mdp4_kms->pclk))
+ mdp4_kms->pclk = NULL;
+
+ // XXX if (rev >= MDP_REV_42) { ???
+ mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk");
+ if (IS_ERR(mdp4_kms->lut_clk)) {
+ dev_err(dev->dev, "failed to get lut_clk\n");
+ ret = PTR_ERR(mdp4_kms->lut_clk);
+ goto fail;
+ }
+
+ clk_set_rate(mdp4_kms->clk, config->max_clk);
+ clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
+
+ if (!config->iommu) {
+ dev_err(dev->dev, "no iommu\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ /* make sure things are off before attaching iommu (bootloader could
+ * have left things on, in which case we'll start getting faults if
+ * we don't disable):
+ */
+ mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0);
+ mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0);
+ mdelay(16);
+
+ ret = msm_iommu_attach(dev, config->iommu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ if (ret)
+ goto fail;
+
+ mdp4_kms->id = msm_register_iommu(dev, config->iommu);
+ if (mdp4_kms->id < 0) {
+ ret = mdp4_kms->id;
+ dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
+ goto fail;
+ }
+
+ ret = modeset_init(mdp4_kms);
+ if (ret) {
+ dev_err(dev->dev, "modeset_init failed: %d\n", ret);
+ goto fail;
+ }
+
+ return kms;
+
+fail:
+ if (kms)
+ mdp4_destroy(kms);
+ return ERR_PTR(ret);
+}
+
+static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
+{
+ static struct mdp4_platform_config config = {};
+#ifdef CONFIG_OF
+ /* TODO */
+#else
+ if (cpu_is_apq8064())
+ config.max_clk = 266667000;
+ else
+ config.max_clk = 200000000;
+
+ config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
+#endif
+ return &config;
+}
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
new file mode 100644
index 00000000000..1e83554955f
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_kms.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MDP4_KMS_H__
+#define __MDP4_KMS_H__
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "mdp4.xml.h"
+
+
+/* For transiently registering for different MDP4 irqs that various parts
+ * of the KMS code need during setup/configuration. We these are not
+ * necessarily the same as what drm_vblank_get/put() are requesting, and
+ * the hysteresis in drm_vblank_put() is not necessarily desirable for
+ * internal housekeeping related irq usage.
+ */
+struct mdp4_irq {
+ struct list_head node;
+ uint32_t irqmask;
+ bool registered;
+ void (*irq)(struct mdp4_irq *irq, uint32_t irqstatus);
+};
+
+struct mdp4_kms {
+ struct msm_kms base;
+
+ struct drm_device *dev;
+
+ int rev;
+
+ /* mapper-id used to request GEM buffer mapped for scanout: */
+ int id;
+
+ void __iomem *mmio;
+
+ struct regulator *dsi_pll_vdda;
+ struct regulator *dsi_pll_vddio;
+ struct regulator *vdd;
+
+ struct clk *clk;
+ struct clk *pclk;
+ struct clk *lut_clk;
+
+ /* irq handling: */
+ bool in_irq;
+ struct list_head irq_list; /* list of mdp4_irq */
+ uint32_t vblank_mask; /* irq bits set for userspace vblank */
+ struct mdp4_irq error_handler;
+};
+#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
+
+/* platform config data (ie. from DT, or pdata) */
+struct mdp4_platform_config {
+ struct iommu_domain *iommu;
+ uint32_t max_clk;
+};
+
+struct mdp4_format {
+ struct msm_format base;
+ enum mpd4_bpc bpc_r, bpc_g, bpc_b;
+ enum mpd4_bpc_alpha bpc_a;
+ uint8_t unpack[4];
+ bool alpha_enable, unpack_tight;
+ uint8_t cpp, unpack_count;
+};
+#define to_mdp4_format(x) container_of(x, struct mdp4_format, base)
+
+static inline void mdp4_write(struct mdp4_kms *mdp4_kms, u32 reg, u32 data)
+{
+ msm_writel(data, mdp4_kms->mmio + reg);
+}
+
+static inline u32 mdp4_read(struct mdp4_kms *mdp4_kms, u32 reg)
+{
+ return msm_readl(mdp4_kms->mmio + reg);
+}
+
+static inline uint32_t pipe2flush(enum mpd4_pipe pipe)
+{
+ switch (pipe) {
+ case VG1: return MDP4_OVERLAY_FLUSH_VG1;
+ case VG2: return MDP4_OVERLAY_FLUSH_VG2;
+ case RGB1: return MDP4_OVERLAY_FLUSH_RGB1;
+ case RGB2: return MDP4_OVERLAY_FLUSH_RGB1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t ovlp2flush(int ovlp)
+{
+ switch (ovlp) {
+ case 0: return MDP4_OVERLAY_FLUSH_OVLP0;
+ case 1: return MDP4_OVERLAY_FLUSH_OVLP1;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2irq(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_DMA_P_DONE;
+ case DMA_S: return MDP4_IRQ_DMA_S_DONE;
+ case DMA_E: return MDP4_IRQ_DMA_E_DONE;
+ default: return 0;
+ }
+}
+
+static inline uint32_t dma2err(enum mdp4_dma dma)
+{
+ switch (dma) {
+ case DMA_P: return MDP4_IRQ_PRIMARY_INTF_UDERRUN;
+ case DMA_S: return 0; // ???
+ case DMA_E: return MDP4_IRQ_EXTERNAL_INTF_UDERRUN;
+ default: return 0;
+ }
+}
+
+int mdp4_disable(struct mdp4_kms *mdp4_kms);
+int mdp4_enable(struct mdp4_kms *mdp4_kms);
+
+void mdp4_irq_preinstall(struct msm_kms *kms);
+int mdp4_irq_postinstall(struct msm_kms *kms);
+void mdp4_irq_uninstall(struct msm_kms *kms);
+irqreturn_t mdp4_irq(struct msm_kms *kms);
+void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask);
+void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq);
+int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+const struct msm_format *mdp4_get_format(struct msm_kms *kms, uint32_t format);
+
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj);
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb);
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane);
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mpd4_pipe pipe_id, bool private_plane);
+
+uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
+void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc);
+void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
+void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf);
+struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane, int id, int ovlp_id,
+ enum mdp4_dma dma_id);
+
+long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate);
+struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+static inline int match_dev_name(struct device *dev, void *data)
+{
+ return !strcmp(dev_name(dev), data);
+}
+/* bus scaling data is associated with extra pointless platform devices,
+ * "dtv", etc.. this is a bit of a hack, but we need a way for encoders
+ * to find their pdata to make the bus-scaling stuff work.
+ */
+static inline void *mdp4_find_pdata(const char *devname)
+{
+ struct device *dev;
+ dev = bus_find_device(&platform_bus_type, NULL,
+ (void *)devname, match_dev_name);
+ return dev ? dev->platform_data : NULL;
+}
+#endif
+
+#endif /* __MDP4_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
new file mode 100644
index 00000000000..3468229d58b
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp4/mdp4_plane.c
@@ -0,0 +1,243 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "mdp4_kms.h"
+
+
+struct mdp4_plane {
+ struct drm_plane base;
+ const char *name;
+
+ enum mpd4_pipe pipe;
+
+ uint32_t nformats;
+ uint32_t formats[32];
+
+ bool enabled;
+};
+#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
+
+static struct mdp4_kms *get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv = plane->dev->dev_private;
+ return to_mdp4_kms(priv->kms);
+}
+
+static int mdp4_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane->enabled = true;
+
+ if (plane->fb)
+ drm_framebuffer_unreference(plane->fb);
+
+ drm_framebuffer_reference(fb);
+
+ return mdp4_plane_mode_set(plane, crtc, fb,
+ crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+}
+
+static int mdp4_plane_disable(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ DBG("%s: TODO", mdp4_plane->name); // XXX
+ return 0;
+}
+
+static void mdp4_plane_destroy(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+
+ mdp4_plane_disable(plane);
+ drm_plane_cleanup(plane);
+
+ kfree(mdp4_plane);
+}
+
+/* helper to install properties which are common to planes and crtcs */
+void mdp4_plane_install_properties(struct drm_plane *plane,
+ struct drm_mode_object *obj)
+{
+ // XXX
+}
+
+int mdp4_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ // XXX
+ return -EINVAL;
+}
+
+static const struct drm_plane_funcs mdp4_plane_funcs = {
+ .update_plane = mdp4_plane_update,
+ .disable_plane = mdp4_plane_disable,
+ .destroy = mdp4_plane_destroy,
+ .set_property = mdp4_plane_set_property,
+};
+
+void mdp4_plane_set_scanout(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mpd4_pipe pipe = mdp4_plane->pipe;
+ uint32_t iova;
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
+ MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
+ MDP4_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_B(pipe),
+ MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
+ MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
+
+ msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
+
+ plane->fb = fb;
+}
+
+#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
+
+int mdp4_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ struct mdp4_kms *mdp4_kms = get_kms(plane);
+ enum mpd4_pipe pipe = mdp4_plane->pipe;
+ const struct mdp4_format *format;
+ uint32_t op_mode = 0;
+ uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
+ uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ if (src_w != crtc_w) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEX_EN;
+ /* TODO calc phasex_step */
+ }
+
+ if (src_h != crtc_h) {
+ op_mode |= MDP4_PIPE_OP_MODE_SCALEY_EN;
+ /* TODO calc phasey_step */
+ }
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_SIZE(pipe),
+ MDP4_PIPE_SRC_SIZE_WIDTH(src_w) |
+ MDP4_PIPE_SRC_SIZE_HEIGHT(src_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(src_x) |
+ MDP4_PIPE_SRC_XY_Y(src_y));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_SIZE(pipe),
+ MDP4_PIPE_DST_SIZE_WIDTH(crtc_w) |
+ MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
+ MDP4_PIPE_SRC_XY_X(crtc_x) |
+ MDP4_PIPE_SRC_XY_Y(crtc_y));
+
+ mdp4_plane_set_scanout(plane, fb);
+
+ format = to_mdp4_format(msm_framebuffer_format(fb));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_FORMAT(pipe),
+ MDP4_PIPE_SRC_FORMAT_A_BPC(format->bpc_a) |
+ MDP4_PIPE_SRC_FORMAT_R_BPC(format->bpc_r) |
+ MDP4_PIPE_SRC_FORMAT_G_BPC(format->bpc_g) |
+ MDP4_PIPE_SRC_FORMAT_B_BPC(format->bpc_b) |
+ COND(format->alpha_enable, MDP4_PIPE_SRC_FORMAT_ALPHA_ENABLE) |
+ MDP4_PIPE_SRC_FORMAT_CPP(format->cpp - 1) |
+ MDP4_PIPE_SRC_FORMAT_UNPACK_COUNT(format->unpack_count - 1) |
+ COND(format->unpack_tight, MDP4_PIPE_SRC_FORMAT_UNPACK_TIGHT));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_UNPACK(pipe),
+ MDP4_PIPE_SRC_UNPACK_ELEM0(format->unpack[0]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM1(format->unpack[1]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM2(format->unpack[2]) |
+ MDP4_PIPE_SRC_UNPACK_ELEM3(format->unpack[3]));
+
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_OP_MODE(pipe), op_mode);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
+ mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
+
+ plane->crtc = crtc;
+
+ return 0;
+}
+
+static const char *pipe_names[] = {
+ "VG1", "VG2",
+ "RGB1", "RGB2", "RGB3",
+ "VG3", "VG4",
+};
+
+enum mpd4_pipe mdp4_plane_pipe(struct drm_plane *plane)
+{
+ struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
+ return mdp4_plane->pipe;
+}
+
+/* initialize plane */
+struct drm_plane *mdp4_plane_init(struct drm_device *dev,
+ enum mpd4_pipe pipe_id, bool private_plane)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane = NULL;
+ struct mdp4_plane *mdp4_plane;
+ int ret;
+
+ mdp4_plane = kzalloc(sizeof(*mdp4_plane), GFP_KERNEL);
+ if (!mdp4_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &mdp4_plane->base;
+
+ mdp4_plane->pipe = pipe_id;
+ mdp4_plane->name = pipe_names[pipe_id];
+
+ drm_plane_init(dev, plane, (1 << priv->num_crtcs) - 1, &mdp4_plane_funcs,
+ mdp4_plane->formats, mdp4_plane->nformats, private_plane);
+
+ mdp4_plane_install_properties(plane, &plane->base);
+
+ return plane;
+
+fail:
+ if (plane)
+ mdp4_plane_destroy(plane);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
new file mode 100644
index 00000000000..864c9773636
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+
+#include <mach/iommu.h>
+
+static void msm_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev)
+ drm_fb_helper_hotplug_event(priv->fbdev);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = msm_framebuffer_create,
+ .output_poll_changed = msm_fb_output_poll_changed,
+};
+
+static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
+ unsigned long iova, int flags, void *arg)
+{
+ DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
+ return 0;
+}
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ int idx = priv->num_iommus++;
+
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->iommus)))
+ return -EINVAL;
+
+ priv->iommus[idx] = iommu;
+
+ iommu_set_fault_handler(iommu, msm_fault_handler, dev);
+
+ /* need to iommu_attach_device() somewhere?? on resume?? */
+
+ return idx;
+}
+
+int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
+ const char **names, int cnt)
+{
+ int i, ret;
+
+ for (i = 0; i < cnt; i++) {
+ struct device *ctx = msm_iommu_get_ctx(names[i]);
+ if (!ctx)
+ continue;
+ ret = iommu_attach_device(iommu, ctx);
+ if (ret) {
+ dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
+static bool reglog = false;
+MODULE_PARM_DESC(reglog, "Enable register read/write logging");
+module_param(reglog, bool, 0600);
+#else
+#define reglog 0
+#endif
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname)
+{
+ struct resource *res;
+ unsigned long size;
+ void __iomem *ptr;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ size = resource_size(res);
+
+ ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ if (!ptr) {
+ dev_err(&pdev->dev, "failed to ioremap: %s\n", name);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (reglog)
+ printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
+
+ return ptr;
+}
+
+void msm_writel(u32 data, void __iomem *addr)
+{
+ if (reglog)
+ printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+ writel(data, addr);
+}
+
+u32 msm_readl(const void __iomem *addr)
+{
+ u32 val = readl(addr);
+ if (reglog)
+ printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+ return val;
+}
+
+/*
+ * DRM operations:
+ */
+
+static int msm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_gpu *gpu = priv->gpu;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+ drm_vblank_cleanup(dev);
+
+ pm_runtime_get_sync(dev->dev);
+ drm_irq_uninstall(dev);
+ pm_runtime_put_sync(dev->dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ if (kms) {
+ pm_runtime_disable(dev->dev);
+ kms->funcs->destroy(kms);
+ }
+
+ if (gpu) {
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->destroy(gpu);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int msm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm", 0);
+ init_waitqueue_head(&priv->fence_event);
+
+ INIT_LIST_HEAD(&priv->inactive_list);
+
+ drm_mode_config_init(dev);
+
+ kms = mdp4_kms_init(dev);
+ if (IS_ERR(kms)) {
+ /*
+ * NOTE: once we have GPU support, having no kms should not
+ * be considered fatal.. ideally we would still support gpu
+ * and (for example) use dmabuf/prime to share buffers with
+ * imx drm driver on iMX5
+ */
+ dev_err(dev->dev, "failed to load kms\n");
+ ret = PTR_ERR(priv->kms);
+ goto fail;
+ }
+
+ priv->kms = kms;
+
+ if (kms) {
+ pm_runtime_enable(dev->dev);
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ ret = drm_vblank_init(dev, 1);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ pm_runtime_get_sync(dev->dev);
+ ret = drm_irq_install(dev);
+ pm_runtime_put_sync(dev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to install IRQ handler\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, dev);
+
+#ifdef CONFIG_DRM_MSM_FBDEV
+ priv->fbdev = msm_fbdev_init(dev);
+#endif
+
+ drm_kms_helper_poll_init(dev);
+
+ return 0;
+
+fail:
+ msm_unload(dev);
+ return ret;
+}
+
+static void load_gpu(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu;
+
+ if (priv->gpu)
+ return;
+
+ mutex_lock(&dev->struct_mutex);
+ gpu = a3xx_gpu_init(dev);
+ if (IS_ERR(gpu)) {
+ dev_warn(dev->dev, "failed to load a3xx gpu\n");
+ gpu = NULL;
+ /* not fatal */
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ if (gpu) {
+ int ret;
+ gpu->funcs->pm_resume(gpu);
+ ret = gpu->funcs->hw_init(gpu);
+ if (ret) {
+ dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ gpu->funcs->destroy(gpu);
+ gpu = NULL;
+ }
+ }
+
+ priv->gpu = gpu;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx;
+
+ /* For now, load gpu on open.. to avoid the requirement of having
+ * firmware in the initrd.
+ */
+ load_gpu(dev);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ file->driver_priv = ctx;
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms)
+ kms->funcs->preclose(kms, file);
+
+ mutex_lock(&dev->struct_mutex);
+ if (ctx == priv->lastctx)
+ priv->lastctx = NULL;
+ mutex_unlock(&dev->struct_mutex);
+
+ kfree(ctx);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ if (priv->fbdev) {
+ drm_modeset_lock_all(dev);
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+ }
+}
+
+static irqreturn_t msm_irq(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = arg;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq(kms);
+}
+
+static void msm_irq_preinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_preinstall(kms);
+}
+
+static int msm_irq_postinstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ return kms->funcs->irq_postinstall(kms);
+}
+
+static void msm_irq_uninstall(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ BUG_ON(!kms);
+ kms->funcs->irq_uninstall(kms);
+}
+
+static int msm_enable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return -ENXIO;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ return kms->funcs->enable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+static void msm_disable_vblank(struct drm_device *dev, int crtc_id)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ if (!kms)
+ return;
+ DBG("dev=%p, crtc=%d", dev, crtc_id);
+ kms->funcs->disable_vblank(kms, priv->crtcs[crtc_id]);
+}
+
+/*
+ * DRM debugfs:
+ */
+
+#ifdef CONFIG_DEBUG_FS
+static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, m);
+ }
+
+ return 0;
+}
+
+static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (gpu) {
+ seq_printf(m, "Active Objects (%s):\n", gpu->name);
+ msm_gem_describe_objects(&gpu->active_list, m);
+ }
+
+ seq_printf(m, "Inactive Objects:\n");
+ msm_gem_describe_objects(&priv->inactive_list, m);
+
+ return 0;
+}
+
+static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+{
+ return drm_mm_dump_table(m, dev->mm_private);
+}
+
+static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb, *fbdev_fb = NULL;
+
+ if (priv->fbdev) {
+ seq_printf(m, "fbcon ");
+ fbdev_fb = priv->fbdev->fb;
+ msm_framebuffer_describe(fbdev_fb, m);
+ }
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user ");
+ msm_framebuffer_describe(fb, m);
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int show_locked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ ret = show(dev, m);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+static struct drm_info_list msm_debugfs_list[] = {
+ {"gpu", show_locked, 0, msm_gpu_show},
+ {"gem", show_locked, 0, msm_gem_show},
+ { "mm", show_locked, 0, msm_mm_show },
+ { "fb", show_locked, 0, msm_fb_show },
+};
+
+static int msm_debugfs_init(struct drm_minor *minor)
+{
+ struct drm_device *dev = minor->dev;
+ int ret;
+
+ ret = drm_debugfs_create_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list),
+ minor->debugfs_root, minor);
+
+ if (ret) {
+ dev_err(dev->dev, "could not install msm_debugfs_list\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void msm_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(msm_debugfs_list,
+ ARRAY_SIZE(msm_debugfs_list), minor);
+}
+#endif
+
+/*
+ * Fences:
+ */
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned long timeout_jiffies = timespec_to_jiffies(timeout);
+ unsigned long start_jiffies = jiffies;
+ unsigned long remaining_jiffies;
+ int ret;
+
+ if (time_after(start_jiffies, timeout_jiffies))
+ remaining_jiffies = 0;
+ else
+ remaining_jiffies = timeout_jiffies - start_jiffies;
+
+ ret = wait_event_interruptible_timeout(priv->fence_event,
+ priv->completed_fence >= fence,
+ remaining_jiffies);
+ if (ret == 0) {
+ DBG("timeout waiting for fence: %u (completed: %u)",
+ fence, priv->completed_fence);
+ ret = -ETIMEDOUT;
+ } else if (ret != -ERESTARTSYS) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+/* call under struct_mutex */
+void msm_update_fence(struct drm_device *dev, uint32_t fence)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (fence > priv->completed_fence) {
+ priv->completed_fence = fence;
+ wake_up_all(&priv->fence_event);
+ }
+}
+
+/*
+ * DRM ioctls:
+ */
+
+static int msm_ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_param *args = data;
+ struct msm_gpu *gpu;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (!gpu)
+ return -ENXIO;
+
+ return gpu->funcs->get_param(gpu, args->param, &args->value);
+}
+
+static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_new *args = data;
+ return msm_gem_new_handle(dev, file, args->size,
+ args->flags, &args->handle);
+}
+
+#define TS(t) ((struct timespec){ .tv_sec = (t).tv_sec, .tv_nsec = (t).tv_nsec })
+
+static int msm_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_prep(obj, args->op, &TS(args->timeout));
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ ret = msm_gem_cpu_fini(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ if (args->pad)
+ return -EINVAL;
+
+ obj = drm_gem_object_lookup(dev, file, args->handle);
+ if (!obj)
+ return -ENOENT;
+
+ args->offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_wait_fence *args = data;
+ return msm_wait_fence_interruptable(dev, args->fence, &TS(args->timeout));
+}
+
+static const struct drm_ioctl_desc msm_ioctls[] = {
+ DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_INFO, msm_ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_PREP, msm_ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_UNLOCKED|DRM_AUTH),
+};
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = msm_gem_mmap,
+};
+
+static struct drm_driver msm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET,
+ .load = msm_load,
+ .unload = msm_unload,
+ .open = msm_open,
+ .preclose = msm_preclose,
+ .lastclose = msm_lastclose,
+ .irq_handler = msm_irq,
+ .irq_preinstall = msm_irq_preinstall,
+ .irq_postinstall = msm_irq_postinstall,
+ .irq_uninstall = msm_irq_uninstall,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = msm_enable_vblank,
+ .disable_vblank = msm_disable_vblank,
+ .gem_free_object = msm_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .dumb_destroy = msm_gem_dumb_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = msm_debugfs_init,
+ .debugfs_cleanup = msm_debugfs_cleanup,
+#endif
+ .ioctls = msm_ioctls,
+ .num_ioctls = DRM_MSM_NUM_IOCTLS,
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20130625",
+ .major = 1,
+ .minor = 0,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static int msm_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_disable(ddev);
+
+ return 0;
+}
+
+static int msm_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+
+ drm_kms_helper_poll_enable(ddev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops msm_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+};
+
+/*
+ * Platform driver:
+ */
+
+static int msm_pdev_probe(struct platform_device *pdev)
+{
+ return drm_platform_init(&msm_driver, pdev);
+}
+
+static int msm_pdev_remove(struct platform_device *pdev)
+{
+ drm_platform_exit(&msm_driver, pdev);
+
+ return 0;
+}
+
+static const struct platform_device_id msm_id[] = {
+ { "mdp", 0 },
+ { }
+};
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_probe,
+ .remove = msm_pdev_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "msm",
+ .pm = &msm_pm_ops,
+ },
+ .id_table = msm_id,
+};
+
+static int __init msm_drm_register(void)
+{
+ DBG("init");
+ hdmi_register();
+ a3xx_register();
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_drm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+ hdmi_unregister();
+ a3xx_unregister();
+}
+
+module_init(msm_drm_register);
+module_exit(msm_drm_unregister);
+
+MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
+MODULE_DESCRIPTION("MSM DRM Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
new file mode 100644
index 00000000000..80d75094bf0
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRV_H__
+#define __MSM_DRV_H__
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/iommu.h>
+#include <linux/types.h>
+#include <asm/sizes.h>
+
+#ifndef CONFIG_OF
+#include <mach/board.h>
+#include <mach/socinfo.h>
+#include <mach/iommu_domains.h>
+#endif
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/msm_drm.h>
+
+struct msm_kms;
+struct msm_gpu;
+
+#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+
+struct msm_file_private {
+ /* currently we don't do anything useful with this.. but when
+ * per-context address spaces are supported we'd keep track of
+ * the context's page-tables here.
+ */
+ int dummy;
+};
+
+struct msm_drm_private {
+
+ struct msm_kms *kms;
+
+ /* when we have more than one 'msm_gpu' these need to be an array: */
+ struct msm_gpu *gpu;
+ struct msm_file_private *lastctx;
+
+ struct drm_fb_helper *fbdev;
+
+ uint32_t next_fence, completed_fence;
+ wait_queue_head_t fence_event;
+
+ /* list of GEM objects: */
+ struct list_head inactive_list;
+
+ struct workqueue_struct *wq;
+
+ /* registered IOMMU domains: */
+ unsigned int num_iommus;
+ struct iommu_domain *iommus[NUM_DOMAINS];
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+
+ unsigned int num_bridges;
+ struct drm_bridge *bridges[8];
+
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+};
+
+struct msm_format {
+ uint32_t pixel_format;
+};
+
+/* As there are different display controller blocks depending on the
+ * snapdragon version, the kms support is split out and the appropriate
+ * implementation is loaded at runtime. The kms module is responsible
+ * for constructing the appropriate planes/crtcs/encoders/connectors.
+ */
+struct msm_kms_funcs {
+ /* hw initialization: */
+ int (*hw_init)(struct msm_kms *kms);
+ /* irq handling: */
+ void (*irq_preinstall)(struct msm_kms *kms);
+ int (*irq_postinstall)(struct msm_kms *kms);
+ void (*irq_uninstall)(struct msm_kms *kms);
+ irqreturn_t (*irq)(struct msm_kms *kms);
+ int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
+ /* misc: */
+ const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
+ long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder);
+ /* cleanup: */
+ void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*destroy)(struct msm_kms *kms);
+};
+
+struct msm_kms {
+ const struct msm_kms_funcs *funcs;
+};
+
+struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+
+int msm_register_iommu(struct drm_device *dev, struct iommu_domain *iommu);
+int msm_iommu_attach(struct drm_device *dev, struct iommu_domain *iommu,
+ const char **names, int cnt);
+
+int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
+ struct timespec *timeout);
+void msm_update_fence(struct drm_device *dev, uint32_t fence);
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file);
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
+void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
+void *msm_gem_vaddr(struct drm_gem_object *obj);
+int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
+ struct work_struct *work);
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, uint32_t fence);
+void msm_gem_move_to_inactive(struct drm_gem_object *obj);
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout);
+int msm_gem_cpu_fini(struct drm_gem_object *obj);
+void msm_gem_free_object(struct drm_gem_object *obj);
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+
+int hdmi_init(struct drm_device *dev, struct drm_encoder *encoder);
+void __init hdmi_register(void);
+void __exit hdmi_unregister(void);
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m);
+#endif
+
+void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
+ const char *dbgname);
+void msm_writel(u32 data, void __iomem *addr);
+u32 msm_readl(const void __iomem *addr);
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+
+static inline int align_pitch(int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* adreno needs pitch aligned to 32 pixels: */
+ return bytespp * ALIGN(width, 32);
+}
+
+/* for the generated headers: */
+#define INVALID_IDX(idx) ({BUG(); 0;})
+#define fui(x) ({BUG(); 0;})
+#define util_float_to_half(x) ({BUG(); 0;})
+
+
+#define FIELD(val, name) (((val) & name ## __MASK) >> name ## __SHIFT)
+
+/* for conditionally setting boolean flag(s): */
+#define COND(bool, val) ((bool) ? (val) : 0)
+
+
+#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
new file mode 100644
index 00000000000..0286c0eeb10
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+struct msm_framebuffer {
+ struct drm_framebuffer base;
+ const struct msm_format *format;
+ struct drm_gem_object *planes[2];
+};
+#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
+
+
+static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return drm_gem_handle_create(file_priv,
+ msm_fb->planes[0], handle);
+}
+
+static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < n; i++) {
+ struct drm_gem_object *bo = msm_fb->planes[i];
+ if (bo)
+ drm_gem_object_unreference_unlocked(bo);
+ }
+
+ kfree(msm_fb);
+}
+
+static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
+ .create_handle = msm_framebuffer_create_handle,
+ .destroy = msm_framebuffer_destroy,
+ .dirty = msm_framebuffer_dirty,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ int i, n = drm_format_num_planes(fb->pixel_format);
+
+ seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
+ fb->width, fb->height, (char *)&fb->pixel_format,
+ fb->refcount.refcount.counter, fb->base.id);
+
+ for (i = 0; i < n; i++) {
+ seq_printf(m, " %d: offset=%d pitch=%d, obj: ",
+ i, fb->offsets[i], fb->pitches[i]);
+ msm_gem_describe(msm_fb->planes[i], m);
+ }
+}
+#endif
+
+struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->planes[plane];
+}
+
+const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
+{
+ struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ return msm_fb->format;
+}
+
+struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *bos[4] = {0};
+ struct drm_framebuffer *fb;
+ int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(dev, file,
+ mode_cmd->handles[i]);
+ if (!bos[i]) {
+ ret = -ENXIO;
+ goto out_unref;
+ }
+ }
+
+ fb = msm_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ ret = PTR_ERR(fb);
+ goto out_unref;
+ }
+
+ return fb;
+
+out_unref:
+ for (i = 0; i < n; i++)
+ drm_gem_object_unreference_unlocked(bos[i]);
+ return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_framebuffer *msm_fb;
+ struct drm_framebuffer *fb = NULL;
+ const struct msm_format *format;
+ int ret, i, n;
+ unsigned int hsub, vsub;
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ n = drm_format_num_planes(mode_cmd->pixel_format);
+ hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
+ vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
+
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ if (!format) {
+ dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb = kzalloc(sizeof(*msm_fb), GFP_KERNEL);
+ if (!msm_fb) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &msm_fb->base;
+
+ msm_fb->format = format;
+
+ for (i = 0; i < n; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_fb->planes[i] = bos[i];
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ return fb;
+
+fail:
+ if (fb)
+ msm_framebuffer_destroy(fb);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
new file mode 100644
index 00000000000..6c6d7d4c9b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_msm_fbdev(x) container_of(x, struct msm_fbdev, base)
+
+struct msm_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+};
+
+static struct fb_ops msm_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = fb_sys_write,
+ .fb_fillrect = sys_fillrect,
+ .fb_copyarea = sys_copyarea,
+ .fb_imageblit = sys_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = drm_fb_helper_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+};
+
+static int msm_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct msm_fbdev *fbdev = to_msm_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct drm_framebuffer *fb = NULL;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ dma_addr_t paddr;
+ int ret, size;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = align_pitch(
+ mode_cmd.width, sizes->surface_bpp);
+
+ /* allocate backing bo */
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ DBG("allocating %d bytes for fb %d", size, dev->primary->index);
+ mutex_lock(&dev->struct_mutex);
+ fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT | MSM_BO_WC);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(fbdev->bo)) {
+ ret = PTR_ERR(fbdev->bo);
+ fbdev->bo = NULL;
+ dev_err(dev->dev, "failed to allocate buffer object: %d\n", ret);
+ goto fail;
+ }
+
+ fb = msm_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference(fbdev->bo);
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ /* TODO implement our own fb_mmap so we don't need this: */
+ msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &msm_fb_ops;
+
+ strcpy(fbi->fix.id, "msm");
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_size = fbdev->bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = fbdev->bo->size;
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb) {
+ drm_framebuffer_unregister_private(fb);
+ drm_framebuffer_remove(fb);
+ }
+ }
+
+ return ret;
+}
+
+static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static struct drm_fb_helper_funcs msm_fb_helper_funcs = {
+ .gamma_set = msm_crtc_fb_gamma_set,
+ .gamma_get = msm_crtc_fb_gamma_get,
+ .fb_probe = msm_fbdev_create,
+};
+
+/* initialize fbdev helper */
+struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev)
+ goto fail;
+
+ helper = &fbdev->base;
+
+ helper->funcs = &msm_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+
+ /* disable all the possible outputs/crtcs before entering KMS mode */
+ drm_helper_disable_unused_functions(dev);
+
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void msm_fbdev_free(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct msm_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ /* only cleanup framebuffer if it is present */
+ if (fbi) {
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+ }
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_msm_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb) {
+ drm_framebuffer_unregister_private(fbdev->fb);
+ drm_framebuffer_remove(fbdev->fb);
+ }
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
new file mode 100644
index 00000000000..6b5a6c8c765
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -0,0 +1,597 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_gpu.h"
+
+
+/* called with dev->struct_mutex held */
+static struct page **get_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->pages) {
+ struct drm_device *dev = obj->dev;
+ struct page **p = drm_gem_get_pages(obj, 0);
+ int npages = obj->size >> PAGE_SHIFT;
+
+ if (IS_ERR(p)) {
+ dev_err(dev->dev, "could not get pages: %ld\n",
+ PTR_ERR(p));
+ return p;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (!msm_obj->sgt) {
+ dev_err(dev->dev, "failed to allocate sgt\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ msm_obj->pages = p;
+
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_map_sg(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ }
+
+ return msm_obj->pages;
+}
+
+static void put_pages(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->pages) {
+ /* For non-cached buffers, ensure the new pages are clean
+ * because display controller, GPU, etc. are not coherent:
+ */
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+
+ drm_gem_put_pages(obj, msm_obj->pages, true, false);
+ msm_obj->pages = NULL;
+ }
+}
+
+int msm_gem_mmap_obj(struct drm_gem_object *obj,
+ struct vm_area_struct *vma)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (msm_obj->flags & MSM_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (msm_obj->flags & MSM_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ /*
+ * Shunt off cached objs to shmem file so they have their own
+ * address_space (so unmap_mapping_range does what we want,
+ * in particular in the case of mmap'd dmabufs)
+ */
+ fput(vma->vm_file);
+ get_file(obj->filp);
+ vma->vm_pgoff = 0;
+ vma->vm_file = obj->filp;
+
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return 0;
+}
+
+int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ return msm_gem_mmap_obj(vma->vm_private_data, vma);
+}
+
+int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ unsigned long pfn;
+ pgoff_t pgoff;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ goto out;
+
+ /* make sure we have pages attached now */
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out_unlock;
+ }
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ pfn = page_to_pfn(msm_obj->pages[pgoff]);
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* Make it mmapable */
+ ret = drm_gem_create_mmap_offset(obj);
+
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
+ }
+
+ return drm_vma_node_offset_addr(&obj->vma_node);
+}
+
+uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
+/* helpers for dealing w/ iommu: */
+static int map_range(struct iommu_domain *domain, unsigned int iova,
+ struct sg_table *sgt, unsigned int len, int prot)
+{
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ unsigned int i, j;
+ int ret;
+
+ if (!domain || !sgt)
+ return -EINVAL;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ u32 pa = sg_phys(sg) - sg->offset;
+ size_t bytes = sg->length + sg->offset;
+
+ VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+
+ ret = iommu_map(domain, da, pa, bytes, prot);
+ if (ret)
+ goto fail;
+
+ da += bytes;
+ }
+
+ return 0;
+
+fail:
+ da = iova;
+
+ for_each_sg(sgt->sgl, sg, i, j) {
+ size_t bytes = sg->length + sg->offset;
+ iommu_unmap(domain, da, bytes);
+ da += bytes;
+ }
+ return ret;
+}
+
+static void unmap_range(struct iommu_domain *domain, unsigned int iova,
+ struct sg_table *sgt, unsigned int len)
+{
+ struct scatterlist *sg;
+ unsigned int da = iova;
+ int i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = sg->length + sg->offset;
+ size_t unmapped;
+
+ unmapped = iommu_unmap(domain, da, bytes);
+ if (unmapped < bytes)
+ break;
+
+ VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+
+ BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+
+ da += bytes;
+ }
+}
+
+/* should be called under struct_mutex.. although it can be called
+ * from atomic context without struct_mutex to acquire an extra
+ * iova ref if you know one is already held.
+ *
+ * That means when I do eventually need to add support for unpinning
+ * the refcnt counter needs to be atomic_t.
+ */
+int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
+ uint32_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (!msm_obj->domain[id].iova) {
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ struct page **pages;
+ pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
+ // XXX ideally we would not map buffers writable when not needed...
+ ret = map_range(priv->iommus[id], offset, msm_obj->sgt,
+ obj->size, IOMMU_READ | IOMMU_WRITE);
+ msm_obj->domain[id].iova = offset;
+ }
+
+ if (!ret)
+ *iova = msm_obj->domain[id].iova;
+
+ return ret;
+}
+
+int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+{
+ int ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_get_iova_locked(obj, id, iova);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+{
+ // XXX TODO ..
+ // NOTE: probably don't need a _locked() version.. we wouldn't
+ // normally unmap here, but instead just mark that it could be
+ // unmapped (if the iova refcnt drops to zero), but then later
+ // if another _get_iova_locked() fails we can start unmapping
+ // things that are no longer needed..
+}
+
+int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ args->pitch = align_pitch(args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+ return msm_gem_new_handle(dev, file, args->size,
+ MSM_BO_SCANOUT | MSM_BO_WC, &args->handle);
+}
+
+int msm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = msm_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
+ if (!msm_obj->vaddr) {
+ struct page **pages = get_pages(obj);
+ if (IS_ERR(pages))
+ return ERR_CAST(pages);
+ msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return msm_obj->vaddr;
+}
+
+void *msm_gem_vaddr(struct drm_gem_object *obj)
+{
+ void *ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = msm_gem_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+int msm_gem_queue_inactive_work(struct drm_gem_object *obj,
+ struct work_struct *work)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&dev->struct_mutex);
+ if (!list_empty(&work->entry)) {
+ ret = -EINVAL;
+ } else if (is_active(msm_obj)) {
+ list_add_tail(&work->entry, &msm_obj->inactive_work);
+ } else {
+ queue_work(priv->wq, work);
+ }
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+void msm_gem_move_to_active(struct drm_gem_object *obj,
+ struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ msm_obj->gpu = gpu;
+ msm_obj->fence = fence;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &gpu->active_list);
+}
+
+void msm_gem_move_to_inactive(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ msm_obj->gpu = NULL;
+ msm_obj->fence = 0;
+ list_del_init(&msm_obj->mm_list);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ while (!list_empty(&msm_obj->inactive_work)) {
+ struct work_struct *work;
+
+ work = list_first_entry(&msm_obj->inactive_work,
+ struct work_struct, entry);
+
+ list_del_init(&work->entry);
+ queue_work(priv->wq, work);
+ }
+}
+
+int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
+ struct timespec *timeout)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int ret = 0;
+
+ if (is_active(msm_obj) && !(op & MSM_PREP_NOSYNC))
+ ret = msm_wait_fence_interruptable(dev, msm_obj->fence, timeout);
+
+ /* TODO cache maintenance */
+
+ return ret;
+}
+
+int msm_gem_cpu_fini(struct drm_gem_object *obj)
+{
+ /* TODO cache maintenance */
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ uint64_t off = drm_vma_node_start(&obj->vma_node);
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ seq_printf(m, "%08x: %c(%d) %2d (%2d) %08llx %p %d\n",
+ msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
+ msm_obj->fence, obj->name, obj->refcount.refcount.counter,
+ off, msm_obj->vaddr, obj->size);
+}
+
+void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
+{
+ struct msm_gem_object *msm_obj;
+ int count = 0;
+ size_t size = 0;
+
+ list_for_each_entry(msm_obj, list, mm_list) {
+ struct drm_gem_object *obj = &msm_obj->base;
+ seq_printf(m, " ");
+ msm_gem_describe(obj, m);
+ count++;
+ size += obj->size;
+ }
+
+ seq_printf(m, "Total %d objects, %zu bytes\n", count, size);
+}
+#endif
+
+void msm_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ int id;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ /* object should not be on active list: */
+ WARN_ON(is_active(msm_obj));
+
+ list_del(&msm_obj->mm_list);
+
+ for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ if (msm_obj->domain[id].iova) {
+ struct msm_drm_private *priv = obj->dev->dev_private;
+ uint32_t offset = (uint32_t)mmap_offset(obj);
+ unmap_range(priv->iommus[id], offset,
+ msm_obj->sgt, obj->size);
+ }
+ }
+
+ drm_gem_free_mmap_offset(obj);
+
+ if (msm_obj->vaddr)
+ vunmap(msm_obj->vaddr);
+
+ put_pages(obj);
+
+ if (msm_obj->resv == &msm_obj->_resv)
+ reservation_object_fini(msm_obj->resv);
+
+ drm_gem_object_release(obj);
+
+ kfree(msm_obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint32_t size, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ obj = msm_gem_new(dev, size, flags);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gem_object *msm_obj;
+ struct drm_gem_object *obj = NULL;
+ int ret;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ size = PAGE_ALIGN(size);
+
+ switch (flags & MSM_BO_CACHE_MASK) {
+ case MSM_BO_UNCACHED:
+ case MSM_BO_CACHED:
+ case MSM_BO_WC:
+ break;
+ default:
+ dev_err(dev->dev, "invalid cache flag: %x\n",
+ (flags & MSM_BO_CACHE_MASK));
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ if (!msm_obj) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ obj = &msm_obj->base;
+
+ ret = drm_gem_object_init(dev, obj, size);
+ if (ret)
+ goto fail;
+
+ msm_obj->flags = flags;
+
+ msm_obj->resv = &msm_obj->_resv;
+ reservation_object_init(msm_obj->resv);
+
+ INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->inactive_work);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+
+ return obj;
+
+fail:
+ if (obj)
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
new file mode 100644
index 00000000000..d746f13d283
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GEM_H__
+#define __MSM_GEM_H__
+
+#include <linux/reservation.h>
+#include "msm_drv.h"
+
+struct msm_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /* And object is either:
+ * inactive - on priv->inactive_list
+ * active - on one one of the gpu's active_list.. well, at
+ * least for now we don't have (I don't think) hw sync between
+ * 2d and 3d one devices which have both, meaning we need to
+ * block on submit if a bo is already on other ring
+ *
+ */
+ struct list_head mm_list;
+ struct msm_gpu *gpu; /* non-null if active */
+ uint32_t fence;
+
+ /* Transiently in the process of submit ioctl, objects associated
+ * with the submit are on submit->bo_list.. this only lasts for
+ * the duration of the ioctl, so one bo can never be on multiple
+ * submit lists.
+ */
+ struct list_head submit_entry;
+
+ /* work defered until bo is inactive: */
+ struct list_head inactive_work;
+
+ struct page **pages;
+ struct sg_table *sgt;
+ void *vaddr;
+
+ struct {
+ // XXX
+ uint32_t iova;
+ } domain[NUM_DOMAINS];
+
+ /* normally (resv == &_resv) except for imported bo's */
+ struct reservation_object *resv;
+ struct reservation_object _resv;
+};
+#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+
+static inline bool is_active(struct msm_gem_object *msm_obj)
+{
+ return msm_obj->gpu != NULL;
+}
+
+#define MAX_CMDS 4
+
+/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
+ * associated with the cmdstream submission for synchronization (and
+ * make it easier to unwind when things go wrong, etc). This only
+ * lasts for the duration of the submit-ioctl.
+ */
+struct msm_gem_submit {
+ struct drm_device *dev;
+ struct msm_gpu *gpu;
+ struct list_head bo_list;
+ struct ww_acquire_ctx ticket;
+ uint32_t fence;
+ bool valid;
+ unsigned int nr_cmds;
+ unsigned int nr_bos;
+ struct {
+ uint32_t type;
+ uint32_t size; /* in dwords */
+ uint32_t iova;
+ } cmd[MAX_CMDS];
+ struct {
+ uint32_t flags;
+ struct msm_gem_object *obj;
+ uint32_t iova;
+ } bos[0];
+};
+
+#endif /* __MSM_GEM_H__ */
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
new file mode 100644
index 00000000000..3e1ef3a00f6
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -0,0 +1,412 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+/*
+ * Cmdstream submission:
+ */
+
+#define BO_INVALID_FLAGS ~(MSM_SUBMIT_BO_READ | MSM_SUBMIT_BO_WRITE)
+/* make sure these don't conflict w/ MSM_SUBMIT_BO_x */
+#define BO_VALID 0x8000
+#define BO_LOCKED 0x4000
+#define BO_PINNED 0x2000
+
+static inline void __user *to_user_ptr(u64 address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static struct msm_gem_submit *submit_create(struct drm_device *dev,
+ struct msm_gpu *gpu, int nr)
+{
+ struct msm_gem_submit *submit;
+ int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
+
+ submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
+ if (submit) {
+ submit->dev = dev;
+ submit->gpu = gpu;
+
+ /* initially, until copy_from_user() and bo lookup succeeds: */
+ submit->nr_bos = 0;
+ submit->nr_cmds = 0;
+
+ INIT_LIST_HEAD(&submit->bo_list);
+ ww_acquire_init(&submit->ticket, &reservation_ww_class);
+ }
+
+ return submit;
+}
+
+static int submit_lookup_objects(struct msm_gem_submit *submit,
+ struct drm_msm_gem_submit *args, struct drm_file *file)
+{
+ unsigned i;
+ int ret = 0;
+
+ spin_lock(&file->table_lock);
+
+ for (i = 0; i < args->nr_bos; i++) {
+ struct drm_msm_gem_submit_bo submit_bo;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ void __user *userptr =
+ to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+
+ ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
+ if (ret) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
+
+ if (submit_bo.flags & BO_INVALID_FLAGS) {
+ DBG("invalid flags: %x", submit_bo.flags);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->bos[i].flags = submit_bo.flags;
+ /* in validate_objects() we figure out if this is true: */
+ submit->bos[i].iova = submit_bo.presumed;
+
+ /* normally use drm_gem_object_lookup(), but for bulk lookup
+ * all under single table_lock just hit object_idr directly:
+ */
+ obj = idr_find(&file->object_idr, submit_bo.handle);
+ if (!obj) {
+ DBG("invalid handle %u at index %u", submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ msm_obj = to_msm_bo(obj);
+
+ if (!list_empty(&msm_obj->submit_entry)) {
+ DBG("handle %u at index %u already on submit list",
+ submit_bo.handle, i);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ drm_gem_object_reference(obj);
+
+ submit->bos[i].obj = msm_obj;
+
+ list_add_tail(&msm_obj->submit_entry, &submit->bo_list);
+ }
+
+out_unlock:
+ submit->nr_bos = i;
+ spin_unlock(&file->table_lock);
+
+ return ret;
+}
+
+static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+{
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ if (submit->bos[i].flags & BO_PINNED)
+ msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+
+ if (submit->bos[i].flags & BO_LOCKED)
+ ww_mutex_unlock(&msm_obj->resv->lock);
+
+ if (!(submit->bos[i].flags & BO_VALID))
+ submit->bos[i].iova = 0;
+
+ submit->bos[i].flags &= ~(BO_LOCKED | BO_PINNED);
+}
+
+/* This is where we make sure all the bo's are reserved and pin'd: */
+static int submit_validate_objects(struct msm_gem_submit *submit)
+{
+ int contended, slow_locked = -1, i, ret = 0;
+
+retry:
+ submit->valid = true;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ uint32_t iova;
+
+ if (slow_locked == i)
+ slow_locked = -1;
+
+ contended = i;
+
+ if (!(submit->bos[i].flags & BO_LOCKED)) {
+ ret = ww_mutex_lock_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (ret)
+ goto fail;
+ submit->bos[i].flags |= BO_LOCKED;
+ }
+
+
+ /* if locking succeeded, pin bo: */
+ ret = msm_gem_get_iova(&msm_obj->base,
+ submit->gpu->id, &iova);
+
+ /* this would break the logic in the fail path.. there is no
+ * reason for this to happen, but just to be on the safe side
+ * let's notice if this starts happening in the future:
+ */
+ WARN_ON(ret == -EDEADLK);
+
+ if (ret)
+ goto fail;
+
+ submit->bos[i].flags |= BO_PINNED;
+
+ if (iova == submit->bos[i].iova) {
+ submit->bos[i].flags |= BO_VALID;
+ } else {
+ submit->bos[i].iova = iova;
+ submit->bos[i].flags &= ~BO_VALID;
+ submit->valid = false;
+ }
+ }
+
+ ww_acquire_done(&submit->ticket);
+
+ return 0;
+
+fail:
+ for (; i >= 0; i--)
+ submit_unlock_unpin_bo(submit, i);
+
+ if (slow_locked > 0)
+ submit_unlock_unpin_bo(submit, slow_locked);
+
+ if (ret == -EDEADLK) {
+ struct msm_gem_object *msm_obj = submit->bos[contended].obj;
+ /* we lost out in a seqno race, lock and retry.. */
+ ret = ww_mutex_lock_slow_interruptible(&msm_obj->resv->lock,
+ &submit->ticket);
+ if (!ret) {
+ submit->bos[contended].flags |= BO_LOCKED;
+ slow_locked = contended;
+ goto retry;
+ }
+ }
+
+ return ret;
+}
+
+static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
+ struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+{
+ if (idx >= submit->nr_bos) {
+ DBG("invalid buffer index: %u (out of %u)", idx, submit->nr_bos);
+ return EINVAL;
+ }
+
+ if (obj)
+ *obj = submit->bos[idx].obj;
+ if (iova)
+ *iova = submit->bos[idx].iova;
+ if (valid)
+ *valid = !!(submit->bos[idx].flags & BO_VALID);
+
+ return 0;
+}
+
+/* process the reloc's and patch up the cmdstream as needed: */
+static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
+ uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+{
+ uint32_t i, last_offset = 0;
+ uint32_t *ptr;
+ int ret;
+
+ if (offset % 4) {
+ DBG("non-aligned cmdstream buffer: %u", offset);
+ return -EINVAL;
+ }
+
+ /* For now, just map the entire thing. Eventually we probably
+ * to do it page-by-page, w/ kmap() if not vmap()d..
+ */
+ ptr = msm_gem_vaddr(&obj->base);
+
+ if (IS_ERR(ptr)) {
+ ret = PTR_ERR(ptr);
+ DBG("failed to map: %d", ret);
+ return ret;
+ }
+
+ for (i = 0; i < nr_relocs; i++) {
+ struct drm_msm_gem_submit_reloc submit_reloc;
+ void __user *userptr =
+ to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ uint32_t iova, off;
+ bool valid;
+
+ ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
+ if (ret)
+ return -EFAULT;
+
+ if (submit_reloc.submit_offset % 4) {
+ DBG("non-aligned reloc offset: %u",
+ submit_reloc.submit_offset);
+ return -EINVAL;
+ }
+
+ /* offset in dwords: */
+ off = submit_reloc.submit_offset / 4;
+
+ if ((off >= (obj->base.size / 4)) ||
+ (off < last_offset)) {
+ DBG("invalid offset %u at reloc %u", off, i);
+ return -EINVAL;
+ }
+
+ ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ if (ret)
+ return ret;
+
+ if (valid)
+ continue;
+
+ iova += submit_reloc.reloc_offset;
+
+ if (submit_reloc.shift < 0)
+ iova >>= -submit_reloc.shift;
+ else
+ iova <<= submit_reloc.shift;
+
+ ptr[off] = iova | submit_reloc.or;
+
+ last_offset = off;
+ }
+
+ return 0;
+}
+
+static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+{
+ unsigned i;
+
+ mutex_lock(&submit->dev->struct_mutex);
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ submit_unlock_unpin_bo(submit, i);
+ list_del_init(&msm_obj->submit_entry);
+ drm_gem_object_unreference(&msm_obj->base);
+ }
+ mutex_unlock(&submit->dev->struct_mutex);
+
+ ww_acquire_fini(&submit->ticket);
+ kfree(submit);
+}
+
+int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_gem_submit *args = data;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_submit *submit;
+ struct msm_gpu *gpu;
+ unsigned i;
+ int ret;
+
+ /* for now, we just have 3d pipe.. eventually this would need to
+ * be more clever to dispatch to appropriate gpu module:
+ */
+ if (args->pipe != MSM_PIPE_3D0)
+ return -EINVAL;
+
+ gpu = priv->gpu;
+
+ if (args->nr_cmds > MAX_CMDS)
+ return -EINVAL;
+
+ submit = submit_create(dev, gpu, args->nr_bos);
+ if (!submit) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = submit_lookup_objects(submit, args, file);
+ if (ret)
+ goto out;
+
+ ret = submit_validate_objects(submit);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < args->nr_cmds; i++) {
+ struct drm_msm_gem_submit_cmd submit_cmd;
+ void __user *userptr =
+ to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ struct msm_gem_object *msm_obj;
+ uint32_t iova;
+
+ ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
+ if (ret) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ ret = submit_bo(submit, submit_cmd.submit_idx,
+ &msm_obj, &iova, NULL);
+ if (ret)
+ goto out;
+
+ if (submit_cmd.size % 4) {
+ DBG("non-aligned cmdstream buffer size: %u",
+ submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (submit_cmd.size >= msm_obj->base.size) {
+ DBG("invalid cmdstream size: %u", submit_cmd.size);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ submit->cmd[i].type = submit_cmd.type;
+ submit->cmd[i].size = submit_cmd.size / 4;
+ submit->cmd[i].iova = iova + submit_cmd.submit_offset;
+
+ if (submit->valid)
+ continue;
+
+ ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
+ submit_cmd.nr_relocs, submit_cmd.relocs);
+ if (ret)
+ goto out;
+ }
+
+ submit->nr_cmds = i;
+
+ ret = msm_gpu_submit(gpu, submit, ctx);
+
+ args->fence = submit->fence;
+
+out:
+ if (submit)
+ submit_cleanup(submit, !!ret);
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
new file mode 100644
index 00000000000..e1e1ec9321f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -0,0 +1,463 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+
+
+/*
+ * Power Management:
+ */
+
+#ifdef CONFIG_MSM_BUS_SCALING
+#include <mach/board.h>
+#include <mach/kgsl.h>
+static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev)
+{
+ struct drm_device *dev = gpu->dev;
+ struct kgsl_device_platform_data *pdata = pdev->dev.platform_data;
+
+ if (!pdev) {
+ dev_err(dev->dev, "could not find dtv pdata\n");
+ return;
+ }
+
+ if (pdata->bus_scale_table) {
+ gpu->bsc = msm_bus_scale_register_client(pdata->bus_scale_table);
+ DBG("bus scale client: %08x", gpu->bsc);
+ }
+}
+
+static void bs_fini(struct msm_gpu *gpu)
+{
+ if (gpu->bsc) {
+ msm_bus_scale_unregister_client(gpu->bsc);
+ gpu->bsc = 0;
+ }
+}
+
+static void bs_set(struct msm_gpu *gpu, int idx)
+{
+ if (gpu->bsc) {
+ DBG("set bus scaling: %d", idx);
+ msm_bus_scale_client_update_request(gpu->bsc, idx);
+ }
+}
+#else
+static void bs_init(struct msm_gpu *gpu, struct platform_device *pdev) {}
+static void bs_fini(struct msm_gpu *gpu) {}
+static void bs_set(struct msm_gpu *gpu, int idx) {}
+#endif
+
+static int enable_pwrrail(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ int ret = 0;
+
+ if (gpu->gpu_reg) {
+ ret = regulator_enable(gpu->gpu_reg);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (gpu->gpu_cx) {
+ ret = regulator_enable(gpu->gpu_cx);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int disable_pwrrail(struct msm_gpu *gpu)
+{
+ if (gpu->gpu_cx)
+ regulator_disable(gpu->gpu_cx);
+ if (gpu->gpu_reg)
+ regulator_disable(gpu->gpu_reg);
+ return 0;
+}
+
+static int enable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_prepare(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->fast_rate)
+ clk_set_rate(rate_clk, gpu->fast_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_enable(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int disable_clk(struct msm_gpu *gpu)
+{
+ struct clk *rate_clk = NULL;
+ int i;
+
+ /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
+ if (gpu->grp_clks[i]) {
+ clk_disable(gpu->grp_clks[i]);
+ rate_clk = gpu->grp_clks[i];
+ }
+ }
+
+ if (rate_clk && gpu->slow_rate)
+ clk_set_rate(rate_clk, gpu->slow_rate);
+
+ for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ if (gpu->grp_clks[i])
+ clk_unprepare(gpu->grp_clks[i]);
+
+ return 0;
+}
+
+static int enable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_prepare_enable(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, gpu->bus_freq);
+ return 0;
+}
+
+static int disable_axi(struct msm_gpu *gpu)
+{
+ if (gpu->ebi1_clk)
+ clk_disable_unprepare(gpu->ebi1_clk);
+ if (gpu->bus_freq)
+ bs_set(gpu, 0);
+ return 0;
+}
+
+int msm_gpu_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+
+ ret = enable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = enable_axi(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu)
+{
+ int ret;
+
+ DBG("%s", gpu->name);
+
+ ret = disable_axi(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_clk(gpu);
+ if (ret)
+ return ret;
+
+ ret = disable_pwrrail(gpu);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/*
+ * Hangcheck detection for locked gpu:
+ */
+
+static void recover_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
+ struct drm_device *dev = gpu->dev;
+
+ dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
+
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->recover(gpu);
+ mutex_unlock(&dev->struct_mutex);
+
+ msm_gpu_retire(gpu);
+}
+
+static void hangcheck_timer_reset(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+ mod_timer(&gpu->hangcheck_timer,
+ round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
+}
+
+static void hangcheck_handler(unsigned long data)
+{
+ struct msm_gpu *gpu = (struct msm_gpu *)data;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ if (fence != gpu->hangcheck_fence) {
+ /* some progress has been made.. ya! */
+ gpu->hangcheck_fence = fence;
+ } else if (fence < gpu->submitted_fence) {
+ /* no progress and not done.. hung! */
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ gpu->hangcheck_fence = fence;
+ queue_work(priv->wq, &gpu->recover_work);
+ }
+
+ /* if still more pending work, reset the hangcheck timer: */
+ if (gpu->submitted_fence > gpu->hangcheck_fence)
+ hangcheck_timer_reset(gpu);
+}
+
+/*
+ * Cmdstream submission/retirement:
+ */
+
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ uint32_t fence = gpu->funcs->last_fence(gpu);
+
+ mutex_lock(&dev->struct_mutex);
+
+ while (!list_empty(&gpu->active_list)) {
+ struct msm_gem_object *obj;
+
+ obj = list_first_entry(&gpu->active_list,
+ struct msm_gem_object, mm_list);
+
+ if (obj->fence <= fence) {
+ /* move to inactive: */
+ msm_gem_move_to_inactive(&obj->base);
+ msm_gem_put_iova(&obj->base, gpu->id);
+ drm_gem_object_unreference(&obj->base);
+ } else {
+ break;
+ }
+ }
+
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_unlock(&dev->struct_mutex);
+}
+
+/* call from irq handler to schedule work to retire bo's */
+void msm_gpu_retire(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ queue_work(priv->wq, &gpu->retire_work);
+}
+
+/* add bo's to gpu's ring, and kick gpu: */
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ int i, ret;
+
+ mutex_lock(&dev->struct_mutex);
+
+ submit->fence = ++priv->next_fence;
+
+ gpu->submitted_fence = submit->fence;
+
+ ret = gpu->funcs->submit(gpu, submit, ctx);
+ priv->lastctx = ctx;
+
+ for (i = 0; i < submit->nr_bos; i++) {
+ struct msm_gem_object *msm_obj = submit->bos[i].obj;
+
+ /* can't happen yet.. but when we add 2d support we'll have
+ * to deal w/ cross-ring synchronization:
+ */
+ WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
+
+ if (!is_active(msm_obj)) {
+ uint32_t iova;
+
+ /* ring takes a reference to the bo and iova: */
+ drm_gem_object_reference(&msm_obj->base);
+ msm_gem_get_iova_locked(&msm_obj->base,
+ submit->gpu->id, &iova);
+ }
+
+ msm_gem_move_to_active(&msm_obj->base, gpu, submit->fence);
+ }
+ hangcheck_timer_reset(gpu);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+/*
+ * Init/Cleanup:
+ */
+
+static irqreturn_t irq_handler(int irq, void *data)
+{
+ struct msm_gpu *gpu = data;
+ return gpu->funcs->irq(gpu);
+}
+
+static const char *clk_names[] = {
+ "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
+};
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz)
+{
+ int i, ret;
+
+ gpu->dev = drm;
+ gpu->funcs = funcs;
+ gpu->name = name;
+
+ INIT_LIST_HEAD(&gpu->active_list);
+ INIT_WORK(&gpu->retire_work, retire_worker);
+ INIT_WORK(&gpu->recover_work, recover_worker);
+
+ setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
+ (unsigned long)gpu);
+
+ BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
+
+ /* Map registers: */
+ gpu->mmio = msm_ioremap(pdev, ioname, name);
+ if (IS_ERR(gpu->mmio)) {
+ ret = PTR_ERR(gpu->mmio);
+ goto fail;
+ }
+
+ /* Get Interrupt: */
+ gpu->irq = platform_get_irq_byname(pdev, irqname);
+ if (gpu->irq < 0) {
+ ret = gpu->irq;
+ dev_err(drm->dev, "failed to get irq: %d\n", ret);
+ goto fail;
+ }
+
+ ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
+ IRQF_TRIGGER_HIGH, gpu->name, gpu);
+ if (ret) {
+ dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
+ goto fail;
+ }
+
+ /* Acquire clocks: */
+ for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
+ gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
+ DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
+ if (IS_ERR(gpu->grp_clks[i]))
+ gpu->grp_clks[i] = NULL;
+ }
+
+ gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ DBG("ebi1_clk: %p", gpu->ebi1_clk);
+ if (IS_ERR(gpu->ebi1_clk))
+ gpu->ebi1_clk = NULL;
+
+ /* Acquire regulators: */
+ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
+ DBG("gpu_reg: %p", gpu->gpu_reg);
+ if (IS_ERR(gpu->gpu_reg))
+ gpu->gpu_reg = NULL;
+
+ gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
+ DBG("gpu_cx: %p", gpu->gpu_cx);
+ if (IS_ERR(gpu->gpu_cx))
+ gpu->gpu_cx = NULL;
+
+ /* Setup IOMMU.. eventually we will (I think) do this once per context
+ * and have separate page tables per context. For now, to keep things
+ * simple and to get something working, just use a single address space:
+ */
+ gpu->iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!gpu->iommu) {
+ dev_err(drm->dev, "failed to allocate IOMMU\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ gpu->id = msm_register_iommu(drm, gpu->iommu);
+
+ /* Create ringbuffer: */
+ gpu->rb = msm_ringbuffer_new(gpu, ringsz);
+ if (IS_ERR(gpu->rb)) {
+ ret = PTR_ERR(gpu->rb);
+ gpu->rb = NULL;
+ dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
+ if (ret) {
+ gpu->rb_iova = 0;
+ dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
+ goto fail;
+ }
+
+ bs_init(gpu, pdev);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+void msm_gpu_cleanup(struct msm_gpu *gpu)
+{
+ DBG("%s", gpu->name);
+
+ WARN_ON(!list_empty(&gpu->active_list));
+
+ bs_fini(gpu);
+
+ if (gpu->rb) {
+ if (gpu->rb_iova)
+ msm_gem_put_iova(gpu->rb->bo, gpu->id);
+ msm_ringbuffer_destroy(gpu->rb);
+ }
+
+ if (gpu->iommu)
+ iommu_domain_free(gpu->iommu);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
new file mode 100644
index 00000000000..8cd829e520b
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_GPU_H__
+#define __MSM_GPU_H__
+
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "msm_drv.h"
+#include "msm_ringbuffer.h"
+
+struct msm_gem_submit;
+
+/* So far, with hardware that I've seen to date, we can have:
+ * + zero, one, or two z180 2d cores
+ * + a3xx or a2xx 3d core, which share a common CP (the firmware
+ * for the CP seems to implement some different PM4 packet types
+ * but the basics of cmdstream submission are the same)
+ *
+ * Which means that the eventual complete "class" hierarchy, once
+ * support for all past and present hw is in place, becomes:
+ * + msm_gpu
+ * + adreno_gpu
+ * + a3xx_gpu
+ * + a2xx_gpu
+ * + z180_gpu
+ */
+struct msm_gpu_funcs {
+ int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
+ int (*hw_init)(struct msm_gpu *gpu);
+ int (*pm_suspend)(struct msm_gpu *gpu);
+ int (*pm_resume)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+ void (*flush)(struct msm_gpu *gpu);
+ void (*idle)(struct msm_gpu *gpu);
+ irqreturn_t (*irq)(struct msm_gpu *irq);
+ uint32_t (*last_fence)(struct msm_gpu *gpu);
+ void (*recover)(struct msm_gpu *gpu);
+ void (*destroy)(struct msm_gpu *gpu);
+#ifdef CONFIG_DEBUG_FS
+ /* show GPU status in debugfs: */
+ void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+#endif
+};
+
+struct msm_gpu {
+ const char *name;
+ struct drm_device *dev;
+ const struct msm_gpu_funcs *funcs;
+
+ struct msm_ringbuffer *rb;
+ uint32_t rb_iova;
+
+ /* list of GEM active objects: */
+ struct list_head active_list;
+
+ uint32_t submitted_fence;
+
+ /* worker for handling active-list retiring: */
+ struct work_struct retire_work;
+
+ void __iomem *mmio;
+ int irq;
+
+ struct iommu_domain *iommu;
+ int id;
+
+ /* Power Control: */
+ struct regulator *gpu_reg, *gpu_cx;
+ struct clk *ebi1_clk, *grp_clks[5];
+ uint32_t fast_rate, slow_rate, bus_freq;
+ uint32_t bsc;
+
+ /* Hang Detction: */
+#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
+#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
+ struct timer_list hangcheck_timer;
+ uint32_t hangcheck_fence;
+ struct work_struct recover_work;
+};
+
+static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
+{
+ msm_writel(data, gpu->mmio + (reg << 2));
+}
+
+static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
+{
+ return msm_readl(gpu->mmio + (reg << 2));
+}
+
+int msm_gpu_pm_suspend(struct msm_gpu *gpu);
+int msm_gpu_pm_resume(struct msm_gpu *gpu);
+
+void msm_gpu_retire(struct msm_gpu *gpu);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx);
+
+int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
+ struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
+ const char *name, const char *ioname, const char *irqname, int ringsz);
+void msm_gpu_cleanup(struct msm_gpu *gpu);
+
+struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
+void __init a3xx_register(void);
+void __exit a3xx_unregister(void);
+
+#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
new file mode 100644
index 00000000000..8171537dd7d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_ringbuffer.h"
+#include "msm_gpu.h"
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+{
+ struct msm_ringbuffer *ring;
+ int ret;
+
+ size = ALIGN(size, 4); /* size should be dword aligned */
+
+ ring = kzalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ring->gpu = gpu;
+ ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ if (IS_ERR(ring->bo)) {
+ ret = PTR_ERR(ring->bo);
+ ring->bo = NULL;
+ goto fail;
+ }
+
+ ring->start = msm_gem_vaddr_locked(ring->bo);
+ ring->end = ring->start + (size / 4);
+ ring->cur = ring->start;
+
+ ring->size = size;
+
+ return ring;
+
+fail:
+ if (ring)
+ msm_ringbuffer_destroy(ring);
+ return ERR_PTR(ret);
+}
+
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
+{
+ if (ring->bo)
+ drm_gem_object_unreference(ring->bo);
+ kfree(ring);
+}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
new file mode 100644
index 00000000000..6e0e1049fa4
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_RINGBUFFER_H__
+#define __MSM_RINGBUFFER_H__
+
+#include "msm_drv.h"
+
+struct msm_ringbuffer {
+ struct msm_gpu *gpu;
+ int size;
+ struct drm_gem_object *bo;
+ uint32_t *start, *end, *cur;
+};
+
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
+
+/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
+
+static inline void
+OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
+{
+ if (ring->cur == ring->end)
+ ring->cur = ring->start;
+ *(ring->cur++) = data;
+}
+
+#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c
index d8291724dbd..7a4e0891c5f 100644
--- a/drivers/gpu/drm/nouveau/core/core/mm.c
+++ b/drivers/gpu/drm/nouveau/core/core/mm.c
@@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
u32 splitoff;
u32 s, e;
+ BUG_ON(!type);
+
list_for_each_entry(this, &mm->free, fl_entry) {
e = this->offset + this->length;
s = this->offset;
@@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min,
struct nouveau_mm_node *prev, *this, *next;
u32 mask = align - 1;
+ BUG_ON(!type);
+
list_for_each_entry_reverse(this, &mm->free, fl_entry) {
u32 e = this->offset + this->length;
u32 s = this->offset;
diff --git a/drivers/gpu/drm/nouveau/core/core/printk.c b/drivers/gpu/drm/nouveau/core/core/printk.c
index 6161eaf5447..52fb2aa129e 100644
--- a/drivers/gpu/drm/nouveau/core/core/printk.c
+++ b/drivers/gpu/drm/nouveau/core/core/printk.c
@@ -27,6 +27,8 @@
#include <core/subdev.h>
#include <core/printk.h>
+int nv_printk_suspend_level = NV_DBG_DEBUG;
+
void
nv_printk_(struct nouveau_object *object, const char *pfx, int level,
const char *fmt, ...)
@@ -72,3 +74,20 @@ nv_printk_(struct nouveau_object *object, const char *pfx, int level,
vprintk(mfmt, args);
va_end(args);
}
+
+#define CONV_LEVEL(x) case NV_DBG_##x: return NV_PRINTK_##x
+
+const char *nv_printk_level_to_pfx(int level)
+{
+ switch (level) {
+ CONV_LEVEL(FATAL);
+ CONV_LEVEL(ERROR);
+ CONV_LEVEL(WARN);
+ CONV_LEVEL(INFO);
+ CONV_LEVEL(DEBUG);
+ CONV_LEVEL(PARANOIA);
+ CONV_LEVEL(TRACE);
+ CONV_LEVEL(SPAM);
+ }
+ return NV_PRINTK_DEBUG;
+}
diff --git a/drivers/gpu/drm/nouveau/core/core/ramht.c b/drivers/gpu/drm/nouveau/core/core/ramht.c
index 86a64045dd6..f3b9bddc387 100644
--- a/drivers/gpu/drm/nouveau/core/core/ramht.c
+++ b/drivers/gpu/drm/nouveau/core/core/ramht.c
@@ -22,7 +22,6 @@
#include <core/object.h>
#include <core/ramht.h>
-#include <core/math.h>
#include <subdev/bar.h>
@@ -104,6 +103,6 @@ nouveau_ramht_new(struct nouveau_object *parent, struct nouveau_object *pargpu,
if (ret)
return ret;
- ramht->bits = log2i(nv_gpuobj(ramht)->size >> 3);
+ ramht->bits = order_base_2(nv_gpuobj(ramht)->size >> 3);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
index 8bf92b0e6d8..6b089e022fd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nv98.c
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/bsp.h>
struct nv98_bsp_priv {
- struct nouveau_engine base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -37,31 +35,49 @@ struct nv98_bsp_priv {
static struct nouveau_oclass
nv98_bsp_sclass[] = {
+ { 0x88b1, &nouveau_object_ofuncs },
+ { 0x85b1, &nouveau_object_ofuncs },
+ { 0x86b1, &nouveau_object_ofuncs },
{},
};
/*******************************************************************************
- * BSP context
+ * PBSP context
******************************************************************************/
static struct nouveau_oclass
nv98_bsp_cclass = {
.handle = NV_ENGCTX(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
/*******************************************************************************
- * BSP engine/subdev functions
+ * PBSP engine/subdev functions
******************************************************************************/
static int
+nv98_bsp_init(struct nouveau_object *object)
+{
+ struct nv98_bsp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x084010, 0x0000ffd2);
+ nv_wr32(priv, 0x08401c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -69,7 +85,7 @@ nv98_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_bsp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x084000, true,
"PBSP", "bsp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -86,8 +102,10 @@ nv98_bsp_oclass = {
.handle = NV_ENGINE(BSP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_bsp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_bsp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
index f02fd9f443f..a66b27c0fca 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
@@ -49,18 +49,23 @@ int
nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
{
const u32 doff = (or * 0x800);
- int load = -EINVAL;
+
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
+
nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
mdelay(9);
udelay(500);
- nv_wr32(priv, 0x61a00c + doff, 0x80000000);
- load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
- nv_wr32(priv, 0x61a00c + doff, 0x00000000);
+ loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
+
nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
- return load;
+
+ nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
+ if (!(loadval & 0x80000000))
+ return -ETIMEDOUT;
+
+ return (loadval & 0x38000000) >> 27;
}
int
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 31cc8fe8e7f..054d9cff4f5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -150,7 +150,7 @@ dp_link_train_update(struct dp_state *dp, u32 delay)
if (ret)
return ret;
- DBG("status %*ph\n", 6, dp->stat);
+ DBG("status %6ph\n", dp->stat);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
index 373dbcc523b..a19e7d79b84 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c
@@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x61c440 + soff, (i << 8));
nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
index dc57e24fc1d..717639386ce 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c
@@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size)
if (data && data[0]) {
for (i = 0; i < size; i++)
nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]);
+ for (; i < 0x60; i++)
+ nv_wr32(priv, 0x10ec00 + soff, (i << 8));
nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003);
} else
if (data) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 7ffe2f309f1..c168ae3eaa9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -628,7 +628,7 @@ nv50_disp_base_init(struct nouveau_object *object)
}
/* ... PIOR caps */
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < priv->pior.nr; i++) {
tmp = nv_rd32(priv, 0x61e000 + (i * 0x800));
nv_wr32(priv, 0x6101f0 + (i * 0x04), tmp);
}
@@ -834,10 +834,11 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
u8 ver, hdr, cnt, len;
u16 data;
u32 ctrl = 0x00000000;
+ u32 reg;
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
ctrl = nv_rd32(priv, 0x610b5c + (i * 8));
/* SOR */
@@ -845,19 +846,18 @@ exec_script(struct nv50_disp_priv *priv, int head, int id)
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b74 + (i * 8));
- i += 4;
+ reg = 0x610b74;
} else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610798 + (i * 8));
- i += 4;
+ reg = 0x610798;
}
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
ctrl = nv_rd32(priv, 0x610b84 + (i * 8));
i += 8;
}
@@ -893,10 +893,11 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
u8 ver, hdr, cnt, len;
u32 ctrl = 0x00000000;
u32 data, conf = ~0;
+ u32 reg;
int i;
/* DAC */
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->dac.nr; i++)
ctrl = nv_rd32(priv, 0x610b58 + (i * 8));
/* SOR */
@@ -904,19 +905,18 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk,
if (nv_device(priv)->chipset < 0x90 ||
nv_device(priv)->chipset == 0x92 ||
nv_device(priv)->chipset == 0xa0) {
- for (i = 0; !(ctrl & (1 << head)) && i < 2; i++)
- ctrl = nv_rd32(priv, 0x610b70 + (i * 8));
- i += 4;
+ reg = 0x610b70;
} else {
- for (i = 0; !(ctrl & (1 << head)) && i < 4; i++)
- ctrl = nv_rd32(priv, 0x610794 + (i * 8));
- i += 4;
+ reg = 0x610794;
}
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->sor.nr; i++)
+ ctrl = nv_rd32(priv, reg + (i * 8));
+ i += 4;
}
/* PIOR */
if (!(ctrl & (1 << head))) {
- for (i = 0; !(ctrl & (1 << head)) && i < 3; i++)
+ for (i = 0; !(ctrl & (1 << head)) && i < priv->pior.nr; i++)
ctrl = nv_rd32(priv, 0x610b80 + (i * 8));
i += 8;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index ab1e918469a..526b7524289 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -47,14 +47,8 @@ int
nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
{
struct nv50_disp_priv *priv = (void *)object->engine;
- struct nouveau_bios *bios = nouveau_bios(priv);
- const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12;
const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3;
- const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2;
const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR);
- const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or);
- struct dcb_output outp;
- u8 ver, hdr;
u32 data;
int ret = -EINVAL;
@@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
return -EINVAL;
data = *(u32 *)args;
- if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp))
- return -ENODEV;
switch (mthd & ~0x3f) {
case NV50_DISP_SOR_PWR:
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index e9b8217d007..7e5dff51d3c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -26,7 +26,6 @@
#include <core/engctx.h>
#include <core/ramht.h>
#include <core/class.h>
-#include <core/math.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -278,7 +277,7 @@ nv50_fifo_chan_ctor_ind(struct nouveau_object *parent,
return ret;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
index 7f53196cff5..91a87cd7195 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv84.c
@@ -28,7 +28,6 @@
#include <core/ramht.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <subdev/timer.h>
#include <subdev/bar.h>
@@ -57,6 +56,7 @@ nv84_fifo_context_attach(struct nouveau_object *parent,
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : addr = 0x0020; break;
case NVDEV_ENGINE_VP : addr = 0x0040; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : addr = 0x0060; break;
case NVDEV_ENGINE_BSP : addr = 0x0080; break;
case NVDEV_ENGINE_CRYPT: addr = 0x00a0; break;
@@ -92,6 +92,7 @@ nv84_fifo_context_detach(struct nouveau_object *parent, bool suspend,
case NVDEV_ENGINE_SW : return 0;
case NVDEV_ENGINE_GR : engn = 0; addr = 0x0020; break;
case NVDEV_ENGINE_VP : engn = 3; addr = 0x0040; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : engn = 1; addr = 0x0060; break;
case NVDEV_ENGINE_BSP : engn = 5; addr = 0x0080; break;
case NVDEV_ENGINE_CRYPT: engn = 4; addr = 0x00a0; break;
@@ -258,7 +259,7 @@ nv84_fifo_chan_ctor_ind(struct nouveau_object *parent,
nv_parent(chan)->object_detach = nv50_fifo_object_detach;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
nv_wo32(base->ramfc, 0x3c, 0x403f6078);
nv_wo32(base->ramfc, 0x44, 0x01003fff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 46dfa68c47b..ce92f289e75 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -29,7 +29,6 @@
#include <core/engctx.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -200,7 +199,7 @@ nvc0_fifo_chan_ctor(struct nouveau_object *parent,
usermem = chan->base.chid * 0x1000;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
for (i = 0; i < 0x1000; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 09644fa9602..8e8121abe31 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -29,7 +29,6 @@
#include <core/engctx.h>
#include <core/event.h>
#include <core/class.h>
-#include <core/math.h>
#include <core/enum.h>
#include <subdev/timer.h>
@@ -240,7 +239,7 @@ nve0_fifo_chan_ctor(struct nouveau_object *parent,
usermem = chan->base.chid * 0x200;
ioffset = args->ioffset;
- ilength = log2i(args->ilength / 8);
+ ilength = order_base_2(args->ilength / 8);
for (i = 0; i < 0x200; i += 4)
nv_wo32(priv->user.mem, usermem + i, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
index 7da35a4e797..ad820937752 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
@@ -1,6 +1,9 @@
#ifndef __NV40_GRAPH_H__
#define __NV40_GRAPH_H__
+#include <core/device.h>
+#include <core/gpuobj.h>
+
/* returns 1 if device is one of the nv4x using the 0x4497 object class,
* helpful to determine a number of other hardware features
*/
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
index 49ecbb859b2..c1900430130 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c
@@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int
nv31_mpeg_init(struct nouveau_object *object)
{
- struct nouveau_engine *engine = nv_engine(object->engine);
- struct nv31_mpeg_priv *priv = (void *)engine;
+ struct nouveau_engine *engine = nv_engine(object);
+ struct nv31_mpeg_priv *priv = (void *)object;
struct nouveau_fb *pfb = nouveau_fb(object);
int ret, i;
@@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object)
/* PMPEG init */
nv_wr32(priv, 0x00b32c, 0x00000000);
nv_wr32(priv, 0x00b314, 0x00000100);
- nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031);
+ if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv))
+ nv_wr32(priv, 0x00b220, 0x00000044);
+ else
+ nv_wr32(priv, 0x00b220, 0x00000031);
nv_wr32(priv, 0x00b300, 0x02001ec1);
nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
index f7c581ad199..dd6196072e9 100644
--- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c
@@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ nv_wo32(&chan->base.base, 0x78, 0x02001ec1);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
index 5a5b2a773ed..13bf31c40aa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nv98.c
@@ -19,21 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engine.h>
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/ppp.h>
struct nv98_ppp_priv {
- struct nouveau_engine base;
-};
-
-struct nv98_ppp_chan {
- struct nouveau_engctx base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -42,6 +35,8 @@ struct nv98_ppp_chan {
static struct nouveau_oclass
nv98_ppp_sclass[] = {
+ { 0x88b3, &nouveau_object_ofuncs },
+ { 0x85b3, &nouveau_object_ofuncs },
{},
};
@@ -53,12 +48,12 @@ static struct nouveau_oclass
nv98_ppp_cclass = {
.handle = NV_ENGCTX(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -67,6 +62,21 @@ nv98_ppp_cclass = {
******************************************************************************/
static int
+nv98_ppp_init(struct nouveau_object *object)
+{
+ struct nv98_ppp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x086010, 0x0000ffd2);
+ nv_wr32(priv, 0x08601c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -74,7 +84,7 @@ nv98_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_ppp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x086000, true,
"PPPP", "ppp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -91,8 +101,10 @@ nv98_ppp_oclass = {
.handle = NV_ENGINE(PPP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_ppp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_ppp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
index 8a8236bc84d..fc9ae0ff1ef 100644
--- a/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/engine/vp/nv98.c
@@ -19,16 +19,14 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors: Ben Skeggs
+ * Authors: Ben Skeggs, Maarten Lankhorst, Ilia Mirkin
*/
-#include <core/engctx.h>
-#include <core/class.h>
-
+#include <engine/falcon.h>
#include <engine/vp.h>
struct nv98_vp_priv {
- struct nouveau_engine base;
+ struct nouveau_falcon base;
};
/*******************************************************************************
@@ -37,6 +35,8 @@ struct nv98_vp_priv {
static struct nouveau_oclass
nv98_vp_sclass[] = {
+ { 0x88b2, &nouveau_object_ofuncs },
+ { 0x85b2, &nouveau_object_ofuncs },
{},
};
@@ -48,12 +48,12 @@ static struct nouveau_oclass
nv98_vp_cclass = {
.handle = NV_ENGCTX(VP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
- .ctor = _nouveau_engctx_ctor,
- .dtor = _nouveau_engctx_dtor,
- .init = _nouveau_engctx_init,
- .fini = _nouveau_engctx_fini,
- .rd32 = _nouveau_engctx_rd32,
- .wr32 = _nouveau_engctx_wr32,
+ .ctor = _nouveau_falcon_context_ctor,
+ .dtor = _nouveau_falcon_context_dtor,
+ .init = _nouveau_falcon_context_init,
+ .fini = _nouveau_falcon_context_fini,
+ .rd32 = _nouveau_falcon_context_rd32,
+ .wr32 = _nouveau_falcon_context_wr32,
},
};
@@ -62,6 +62,21 @@ nv98_vp_cclass = {
******************************************************************************/
static int
+nv98_vp_init(struct nouveau_object *object)
+{
+ struct nv98_vp_priv *priv = (void *)object;
+ int ret;
+
+ ret = nouveau_falcon_init(&priv->base);
+ if (ret)
+ return ret;
+
+ nv_wr32(priv, 0x085010, 0x0000ffd2);
+ nv_wr32(priv, 0x08501c, 0x0000fff2);
+ return 0;
+}
+
+static int
nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 size,
struct nouveau_object **pobject)
@@ -69,7 +84,7 @@ nv98_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_vp_priv *priv;
int ret;
- ret = nouveau_engine_create(parent, engine, oclass, true,
+ ret = nouveau_falcon_create(parent, engine, oclass, 0x085000, true,
"PVP", "vp", &priv);
*pobject = nv_object(priv);
if (ret)
@@ -86,8 +101,10 @@ nv98_vp_oclass = {
.handle = NV_ENGINE(VP, 0x98),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nv98_vp_ctor,
- .dtor = _nouveau_engine_dtor,
- .init = _nouveau_engine_init,
- .fini = _nouveau_engine_fini,
+ .dtor = _nouveau_falcon_dtor,
+ .init = nv98_vp_init,
+ .fini = _nouveau_falcon_fini,
+ .rd32 = _nouveau_falcon_rd32,
+ .wr32 = _nouveau_falcon_wr32,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
index 0639bc59d0a..5f6ede7c489 100644
--- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c
+++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c
@@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object)
return ret;
}
- ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0,
+ if (fw->size > 0x40000) {
+ nv_warn(xtensa, "firmware %s too large\n", name);
+ release_firmware(fw);
+ return -EINVAL;
+ }
+
+ ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0,
&xtensa->gpu_fw);
if (ret) {
release_firmware(fw);
diff --git a/drivers/gpu/drm/nouveau/core/include/core/math.h b/drivers/gpu/drm/nouveau/core/include/core/math.h
deleted file mode 100644
index f808131c5cd..00000000000
--- a/drivers/gpu/drm/nouveau/core/include/core/math.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef __NOUVEAU_MATH_H__
-#define __NOUVEAU_MATH_H__
-
-static inline int
-log2i(u64 base)
-{
- u64 temp = base >> 1;
- int log2;
-
- for (log2 = 0; temp; log2++, temp >>= 1) {
- }
-
- return (base & (base - 1)) ? log2 + 1: log2;
-}
-
-#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/core/printk.h b/drivers/gpu/drm/nouveau/core/include/core/printk.h
index febed2ea5c8..d87836e3a70 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/printk.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/printk.h
@@ -15,6 +15,12 @@ struct nouveau_object;
#define NV_PRINTK_TRACE KERN_DEBUG
#define NV_PRINTK_SPAM KERN_DEBUG
+extern int nv_printk_suspend_level;
+
+#define NV_DBG_SUSPEND (nv_printk_suspend_level)
+#define NV_PRINTK_SUSPEND (nv_printk_level_to_pfx(nv_printk_suspend_level))
+
+const char *nv_printk_level_to_pfx(int level);
void __printf(4, 5)
nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
@@ -31,6 +37,13 @@ nv_printk_(struct nouveau_object *, const char *, int, const char *, ...);
#define nv_trace(o,f,a...) nv_printk((o), TRACE, f, ##a)
#define nv_spam(o,f,a...) nv_printk((o), SPAM, f, ##a)
+#define nv_suspend(o,f,a...) nv_printk((o), SUSPEND, f, ##a)
+
+static inline void nv_suspend_set_printk_level(int level)
+{
+ nv_printk_suspend_level = level;
+}
+
#define nv_assert(f,a...) do { \
if (NV_DBG_FATAL <= CONFIG_NOUVEAU_DEBUG) \
nv_printk_(NULL, NV_PRINTK_FATAL, NV_DBG_FATAL, f "\n", ##a); \
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 888384c0bed..7e4e2775f24 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -39,8 +39,8 @@ struct nouveau_i2c_func {
int (*drv_ctl)(struct nouveau_i2c_port *, int lane, int sw, int pe);
};
-#define nouveau_i2c_port_create(p,e,o,i,a,d) \
- nouveau_i2c_port_create_((p), (e), (o), (i), (a), \
+#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
+ nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
sizeof(**d), (void **)d)
#define nouveau_i2c_port_destroy(p) ({ \
struct nouveau_i2c_port *port = (p); \
@@ -53,7 +53,9 @@ struct nouveau_i2c_func {
int nouveau_i2c_port_create_(struct nouveau_object *, struct nouveau_object *,
struct nouveau_oclass *, u8,
- const struct i2c_algorithm *, int, void **);
+ const struct i2c_algorithm *,
+ const struct nouveau_i2c_func *,
+ int, void **);
void _nouveau_i2c_port_dtor(struct nouveau_object *);
#define _nouveau_i2c_port_init nouveau_object_init
#define _nouveau_i2c_port_fini nouveau_object_fini
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index d5502267c30..ce6569f365a 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -12,6 +12,7 @@ struct nouveau_mc_intr {
struct nouveau_mc {
struct nouveau_subdev base;
const struct nouveau_mc_intr *intr_map;
+ bool use_msi;
};
static inline struct nouveau_mc *
@@ -20,8 +21,8 @@ nouveau_mc(void *obj)
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC];
}
-#define nouveau_mc_create(p,e,o,d) \
- nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d)
+#define nouveau_mc_create(p,e,o,m,d) \
+ nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d)
#define nouveau_mc_destroy(p) ({ \
struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \
})
@@ -33,7 +34,8 @@ nouveau_mc(void *obj)
})
int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *,
- struct nouveau_oclass *, int, void **);
+ struct nouveau_oclass *, const struct nouveau_mc_intr *,
+ int, void **);
void _nouveau_mc_dtor(struct nouveau_object *);
int _nouveau_mc_init(struct nouveau_object *);
int _nouveau_mc_fini(struct nouveau_object *, bool);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
index e465d158d35..9ab70dfe5b0 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/timer.h
@@ -22,6 +22,7 @@ bool nouveau_timer_wait_eq(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_ne(void *, u64 nsec, u32 addr, u32 mask, u32 data);
bool nouveau_timer_wait_cb(void *, u64 nsec, bool (*func)(void *), void *data);
void nouveau_timer_alarm(void *, u32 nsec, struct nouveau_alarm *);
+void nouveau_timer_alarm_cancel(void *, struct nouveau_alarm *);
#define NV_WAIT_DEFAULT 2000000000ULL
#define nv_wait(o,a,m,v) \
@@ -35,6 +36,7 @@ struct nouveau_timer {
struct nouveau_subdev base;
u64 (*read)(struct nouveau_timer *);
void (*alarm)(struct nouveau_timer *, u64 time, struct nouveau_alarm *);
+ void (*alarm_cancel)(struct nouveau_timer *, struct nouveau_alarm *);
};
static inline struct nouveau_timer *
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
index f2e87b10566..fcf57fa309b 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h
@@ -55,7 +55,7 @@ struct nouveau_vma {
struct nouveau_vm {
struct nouveau_vmmgr *vmm;
struct nouveau_mm mm;
- int refcount;
+ struct kref refcount;
struct list_head pgd_list;
atomic_t engref[NVDEV_SUBDEV_NR];
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index 3bd9be2ab37..191e739f30d 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -13,11 +13,13 @@
#include <linux/i2c-algo-bit.h>
#include <linux/delay.h>
#include <linux/io-mapping.h>
-#include <linux/vmalloc.h>
#include <linux/acpi.h>
+#include <linux/vmalloc.h>
#include <linux/dmi.h>
#include <linux/reboot.h>
#include <linux/interrupt.h>
+#include <linux/log2.h>
+#include <linux/pm_runtime.h>
#include <asm/unaligned.h>
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 0687e648143..2e11ea02cf8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -2165,7 +2165,7 @@ nvbios_init(struct nouveau_subdev *subdev, bool execute)
u16 data;
if (execute)
- nv_info(bios, "running init tables\n");
+ nv_suspend(bios, "running init tables\n");
while (!ret && (data = (init_script(bios, ++i)))) {
struct nvbios_init init = {
.subdev = subdev,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
index 22a20573ed1..22ac6dbd6c8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/therm.c
@@ -184,7 +184,8 @@ nvbios_therm_fan_parse(struct nouveau_bios *bios,
cur_trip->fan_duty = value;
break;
case 0x26:
- fan->pwm_freq = value;
+ if (!fan->pwm_freq)
+ fan->pwm_freq = value;
break;
case 0x3b:
fan->bump_period = value;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 6c974dd83e8..db9d6ddde52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *);
void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size,
u32 pitch, u32 flags, struct nouveau_fb_tile *);
-void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **);
+void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *);
extern int nv50_fb_memtype[0x80];
#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
index 19e3a9a63a0..ab7ef0ac9e3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c
@@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
return ret;
switch (pfb914 & 0x00000003) {
- case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break;
case 0x00000003: break;
}
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
- pfb->ram->tags = nv_rd32(pfb, 0x100320);
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1;
+ ram->tags = nv_rd32(pfb, 0x100320);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
index 7192aa6e557..63a6aab8602 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c
@@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
- pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
- pfb->ram->type = NV_MEM_TYPE_STOLEN;
+ ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000;
+ ram->type = NV_MEM_TYPE_STOLEN;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
index af5aa7ee8ad..903baff77fd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c
@@ -27,17 +27,10 @@
#include "priv.h"
void
-nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem)
{
struct nouveau_mm_node *this;
- struct nouveau_mem *mem;
- mem = *pmem;
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&pfb->base.mutex);
while (!list_empty(&mem->regions)) {
this = list_first_entry(&mem->regions, typeof(*this), rl_entry);
@@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
}
nouveau_mm_free(&pfb->tags, &mem->tag);
+}
+
+void
+nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
+{
+ struct nouveau_mem *mem = *pmem;
+
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ mutex_lock(&pfb->base.mutex);
+ __nv50_ram_put(pfb, mem);
mutex_unlock(&pfb->base.mutex);
kfree(mem);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
index 9c3634acbb9..cf97c4de4a6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c
@@ -33,11 +33,19 @@ void
nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem)
{
struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb);
+ struct nouveau_mem *mem = *pmem;
- if ((*pmem)->tag)
- ltcg->tags_free(ltcg, &(*pmem)->tag);
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
- nv50_ram_put(pfb, pmem);
+ mutex_lock(&pfb->base.mutex);
+ if (mem->tag)
+ ltcg->tags_free(ltcg, &mem->tag);
+ __nv50_ram_put(pfb, mem);
+ mutex_unlock(&pfb->base.mutex);
+
+ kfree(mem);
}
int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index bf489dcf46e..c4c1d415e7f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
int i;
intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070);
hi = (intr0 & 0x0000ffff) | (intr1 << 16);
@@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev)
}
nv_wr32(priv, 0xe054, intr0);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe074, intr1);
}
@@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_gpio_create(parent, engine, oclass,
- nv_device(parent)->chipset >= 0x90 ? 32 : 16,
+ nv_device(parent)->chipset > 0x92 ? 32 : 16,
&priv);
*pobject = nv_object(priv);
if (ret)
@@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object)
/* disable, and ack any pending gpio interrupts */
nv_wr32(priv, 0xe050, 0x00000000);
nv_wr32(priv, 0xe054, 0xffffffff);
- if (nv_device(priv)->chipset >= 0x90) {
+ if (nv_device(priv)->chipset > 0x92) {
nv_wr32(priv, 0xe070, 0x00000000);
nv_wr32(priv, 0xe074, 0xffffffff);
}
@@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend)
{
struct nv50_gpio_priv *priv = (void *)object;
nv_wr32(priv, 0xe050, 0x00000000);
- if (nv_device(priv)->chipset >= 0x90)
+ if (nv_device(priv)->chipset > 0x92)
nv_wr32(priv, 0xe070, 0x00000000);
return nouveau_gpio_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
index dec94e9d776..4b195ac4da6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/anx9805.c
@@ -118,7 +118,8 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &chan);
+ &nouveau_i2c_aux_algo, &anx9805_aux_func,
+ &chan);
*pobject = nv_object(chan);
if (ret)
return ret;
@@ -140,8 +141,6 @@ anx9805_aux_chan_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
-
- chan->base.func = &anx9805_aux_func;
return 0;
}
@@ -234,7 +233,8 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &anx9805_i2c_algo, &port);
+ &anx9805_i2c_algo, &anx9805_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -256,8 +256,6 @@ anx9805_ddc_port_ctor(struct nouveau_object *parent,
struct i2c_algo_bit_data *algo = mast->adapter.algo_data;
algo->udelay = max(algo->udelay, 40);
}
-
- port->base.func = &anx9805_i2c_func;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 8ae2625415e..2895c19bb15 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -95,6 +95,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
struct nouveau_object *engine,
struct nouveau_oclass *oclass, u8 index,
const struct i2c_algorithm *algo,
+ const struct nouveau_i2c_func *func,
int size, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
@@ -112,6 +113,7 @@ nouveau_i2c_port_create_(struct nouveau_object *parent,
port->adapter.owner = THIS_MODULE;
port->adapter.dev.parent = &device->pdev->dev;
port->index = index;
+ port->func = func;
i2c_set_adapdata(&port->adapter, i2c);
if ( algo == &nouveau_i2c_bit_algo &&
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
index 2ad18840fe6..860d5d2365d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv04.c
@@ -91,12 +91,12 @@ nv04_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv04_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv04_i2c_func;
port->drive = info->drive;
port->sense = info->sense;
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
index f501ae25dbb..0c2655a03bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv4e.c
@@ -84,12 +84,12 @@ nv4e_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv4e_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv4e_i2c_func;
port->addr = 0x600800 + info->drive;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
index 378dfa324e5..a8d67a28770 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.c
@@ -85,7 +85,8 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv50_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -93,7 +94,6 @@ nv50_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (info->drive >= nv50_i2c_addr_nr)
return -EINVAL;
- port->base.func = &nv50_i2c_func;
port->state = 0x00000007;
port->addr = nv50_i2c_addr[info->drive];
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index 61b771670bf..df6d3e4b68b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -186,7 +186,8 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nv94_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
@@ -194,7 +195,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
if (info->drive >= nv50_i2c_addr_nr)
return -EINVAL;
- port->base.func = &nv94_i2c_func;
port->state = 7;
port->addr = nv50_i2c_addr[info->drive];
if (info->share != DCB_I2C_UNUSED) {
@@ -221,12 +221,12 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_aux_algo, &port);
+ &nouveau_i2c_aux_algo, &nv94_aux_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nv94_aux_func;
port->addr = info->drive;
if (info->share != DCB_I2C_UNUSED) {
port->ctrl = 0x00e500 + (info->drive * 0x50);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index f761b8a610f..29967d30f97 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -60,12 +60,12 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
int ret;
ret = nouveau_i2c_port_create(parent, engine, oclass, index,
- &nouveau_i2c_bit_algo, &port);
+ &nouveau_i2c_bit_algo, &nvd0_i2c_func,
+ &port);
*pobject = nv_object(port);
if (ret)
return ret;
- port->base.func = &nvd0_i2c_func;
port->state = 0x00000007;
port->addr = 0x00d014 + (info->drive * 0x20);
if (info->share != DCB_I2C_UNUSED) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
index 716bf41bc3c..b10a143787a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/instmem/nv40.c
@@ -22,15 +22,9 @@
* Authors: Ben Skeggs
*/
-#include "nv04.h"
+#include <engine/graph/nv40.h>
-static inline int
-nv44_graph_class(struct nv04_instmem_priv *priv)
-{
- if ((nv_device(priv)->chipset & 0xf0) == 0x60)
- return 1;
- return !(0x0baf & (1 << (nv_device(priv)->chipset & 0x0f)));
-}
+#include "nv04.h"
static int
nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index bcca883018f..cce65cc5651 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -30,8 +30,9 @@ struct nvc0_ltcg_priv {
struct nouveau_ltcg base;
u32 part_nr;
u32 subp_nr;
- struct nouveau_mm tags;
u32 num_tags;
+ u32 tag_base;
+ struct nouveau_mm tags;
struct nouveau_mm_node *tag_ram;
};
@@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
u32 tag_size, tag_margin, tag_align;
int ret;
- nv_wr32(priv, 0x17e8d8, priv->part_nr);
- if (nv_device(pfb)->card_type >= NV_E0)
- nv_wr32(priv, 0x17e000, priv->part_nr);
-
/* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
priv->num_tags = (pfb->ram->size >> 17) / 4;
if (priv->num_tags > (1 << 17))
@@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_size += tag_align;
tag_size = (tag_size + 0xfff) >> 12; /* round up */
- ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1,
+ ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1,
&priv->tag_ram);
if (ret) {
priv->num_tags = 0;
@@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
tag_base += tag_align - 1;
ret = do_div(tag_base, tag_align);
- nv_wr32(priv, 0x17e8d4, tag_base);
+ priv->tag_base = tag_base;
}
ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1);
@@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
}
priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
- nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
-
ret = nvc0_ltcg_init_tag_ram(pfb, priv);
if (ret)
return ret;
@@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object)
nouveau_ltcg_destroy(ltcg);
}
+static int
+nvc0_ltcg_init(struct nouveau_object *object)
+{
+ struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object;
+ struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg;
+ int ret;
+
+ ret = nouveau_ltcg_init(ltcg);
+ if (ret)
+ return ret;
+
+ nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
+ nv_wr32(priv, 0x17e8d8, priv->part_nr);
+ if (nv_device(ltcg)->card_type >= NV_E0)
+ nv_wr32(priv, 0x17e000, priv->part_nr);
+ nv_wr32(priv, 0x17e8d4, priv->tag_base);
+ return 0;
+}
+
struct nouveau_oclass
nvc0_ltcg_oclass = {
.handle = NV_SUBDEV(LTCG, 0xc0),
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvc0_ltcg_ctor,
.dtor = nvc0_ltcg_dtor,
- .init = _nouveau_ltcg_init,
+ .init = nvc0_ltcg_init,
.fini = _nouveau_ltcg_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
index 1c0330b8c9a..37712a6df92 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c
@@ -23,16 +23,20 @@
*/
#include <subdev/mc.h>
+#include <core/option.h>
static irqreturn_t
nouveau_mc_intr(int irq, void *arg)
{
struct nouveau_mc *pmc = arg;
const struct nouveau_mc_intr *map = pmc->intr_map;
+ struct nouveau_device *device = nv_device(pmc);
struct nouveau_subdev *unit;
u32 stat, intr;
intr = stat = nv_rd32(pmc, 0x000100);
+ if (intr == 0xffffffff)
+ return IRQ_NONE;
while (stat && map->stat) {
if (stat & map->stat) {
unit = nouveau_subdev(pmc, map->unit);
@@ -43,10 +47,15 @@ nouveau_mc_intr(int irq, void *arg)
map++;
}
+ if (pmc->use_msi)
+ nv_wr08(pmc->base.base.parent, 0x00088068, 0xff);
+
if (intr) {
nv_error(pmc, "unknown intr 0x%08x\n", stat);
}
+ if (stat == IRQ_HANDLED)
+ pm_runtime_mark_last_busy(&device->pdev->dev);
return stat ? IRQ_HANDLED : IRQ_NONE;
}
@@ -75,12 +84,16 @@ _nouveau_mc_dtor(struct nouveau_object *object)
struct nouveau_device *device = nv_device(object);
struct nouveau_mc *pmc = (void *)object;
free_irq(device->pdev->irq, pmc);
+ if (pmc->use_msi)
+ pci_disable_msi(device->pdev);
nouveau_subdev_destroy(&pmc->base);
}
int
nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
- struct nouveau_oclass *oclass, int length, void **pobject)
+ struct nouveau_oclass *oclass,
+ const struct nouveau_mc_intr *intr_map,
+ int length, void **pobject)
{
struct nouveau_device *device = nv_device(parent);
struct nouveau_mc *pmc;
@@ -92,6 +105,25 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
if (ret)
return ret;
+ pmc->intr_map = intr_map;
+
+ switch (device->pdev->device & 0x0ff0) {
+ case 0x00f0: /* BR02? */
+ case 0x02e0: /* BR02? */
+ pmc->use_msi = false;
+ break;
+ default:
+ pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
+ if (pmc->use_msi) {
+ pmc->use_msi = pci_enable_msi(device->pdev) == 0;
+ if (pmc->use_msi) {
+ nv_info(pmc, "MSI interrupts enabled\n");
+ nv_wr08(device, 0x00088068, 0xff);
+ }
+ }
+ break;
+ }
+
ret = request_irq(device->pdev->irq, nouveau_mc_intr,
IRQF_SHARED, "nouveau", pmc);
if (ret < 0)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
index 8c769715227..64aa4edb0d9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c
@@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv04_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 51919371810..d9891782bf2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv44_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv04_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
index 0cb322a5e72..2b1afe225db 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c
@@ -41,7 +41,7 @@ nv50_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0000d101, NVDEV_SUBDEV_FB },
+ { 0x0002d101, NVDEV_SUBDEV_FB },
{},
};
@@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv50_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv50_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
index e82fd21b504..06710419a59 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c
@@ -35,6 +35,7 @@ nv98_mc_intr[] = {
{ 0x00001000, NVDEV_ENGINE_GR },
{ 0x00004000, NVDEV_ENGINE_CRYPT }, /* NV84:NVA3 */
{ 0x00008000, NVDEV_ENGINE_BSP },
+ { 0x00020000, NVDEV_ENGINE_VP },
{ 0x00080000, NVDEV_SUBDEV_THERM }, /* NVA3:NVC0 */
{ 0x00100000, NVDEV_SUBDEV_TIMER },
{ 0x00200000, NVDEV_SUBDEV_GPIO },
@@ -42,7 +43,7 @@ nv98_mc_intr[] = {
{ 0x04000000, NVDEV_ENGINE_DISP },
{ 0x10000000, NVDEV_SUBDEV_BUS },
{ 0x80000000, NVDEV_ENGINE_SW },
- { 0x0040d101, NVDEV_SUBDEV_FB },
+ { 0x0042d101, NVDEV_SUBDEV_FB },
{},
};
@@ -54,12 +55,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nv98_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nv98_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
index c5da3babbc6..104175c5a2d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c
@@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nvc0_mc_priv *priv;
int ret;
- ret = nouveau_mc_create(parent, engine, oclass, &priv);
+ ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv);
*pobject = nv_object(priv);
if (ret)
return ret;
- priv->base.intr_map = nvc0_mc_intr;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
index a00a5a76e2d..f1de7a9c572 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/base.c
@@ -95,12 +95,14 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
int duty;
spin_lock_irqsave(&priv->lock, flags);
+ nv_debug(therm, "FAN speed check\n");
if (mode < 0)
mode = priv->mode;
priv->mode = mode;
switch (mode) {
case NOUVEAU_THERM_CTRL_MANUAL:
+ ptimer->alarm_cancel(ptimer, &priv->alarm);
duty = nouveau_therm_fan_get(therm);
if (duty < 0)
duty = 100;
@@ -113,6 +115,7 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
break;
case NOUVEAU_THERM_CTRL_NONE:
default:
+ ptimer->alarm_cancel(ptimer, &priv->alarm);
goto done;
}
@@ -122,6 +125,8 @@ nouveau_therm_update(struct nouveau_therm *therm, int mode)
done:
if (list_empty(&priv->alarm.head) && (mode == NOUVEAU_THERM_CTRL_AUTO))
ptimer->alarm(ptimer, 1000000000ULL, &priv->alarm);
+ else if (!list_empty(&priv->alarm.head))
+ nv_debug(therm, "therm fan alarm list is not empty\n");
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -267,9 +272,15 @@ _nouveau_therm_init(struct nouveau_object *object)
if (ret)
return ret;
- if (priv->suspend >= 0)
- nouveau_therm_fan_mode(therm, priv->mode);
- priv->sensor.program_alarms(therm);
+ if (priv->suspend >= 0) {
+ /* restore the pwm value only when on manual or auto mode */
+ if (priv->suspend > 0)
+ nouveau_therm_fan_set(therm, true, priv->fan->percent);
+
+ nouveau_therm_fan_mode(therm, priv->suspend);
+ }
+ nouveau_therm_sensor_init(therm);
+ nouveau_therm_fan_init(therm);
return 0;
}
@@ -279,6 +290,8 @@ _nouveau_therm_fini(struct nouveau_object *object, bool suspend)
struct nouveau_therm *therm = (void *)object;
struct nouveau_therm_priv *priv = (void *)therm;
+ nouveau_therm_fan_fini(therm, suspend);
+ nouveau_therm_sensor_fini(therm, suspend);
if (suspend) {
priv->suspend = priv->mode;
priv->mode = NOUVEAU_THERM_CTRL_NONE;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
index c728380d3d6..39f47b950ad 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/fan.c
@@ -204,6 +204,23 @@ nouveau_therm_fan_safety_checks(struct nouveau_therm *therm)
}
int
+nouveau_therm_fan_init(struct nouveau_therm *therm)
+{
+ return 0;
+}
+
+int
+nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+
+ if (suspend)
+ ptimer->alarm_cancel(ptimer, &priv->fan->alarm);
+ return 0;
+}
+
+int
nouveau_therm_fan_ctor(struct nouveau_therm *therm)
{
struct nouveau_therm_priv *priv = (void *)therm;
@@ -234,6 +251,9 @@ nouveau_therm_fan_ctor(struct nouveau_therm *therm)
nv_info(therm, "FAN control: %s\n", priv->fan->type);
+ /* read the current speed, it is useful when resuming */
+ priv->fan->percent = nouveau_therm_fan_get(therm);
+
/* attempt to detect a tachometer connection */
ret = gpio->find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &priv->fan->tach);
if (ret)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
index 15ca64e481f..dd38529262f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/priv.h
@@ -113,6 +113,8 @@ void nouveau_therm_ic_ctor(struct nouveau_therm *therm);
int nouveau_therm_sensor_ctor(struct nouveau_therm *therm);
int nouveau_therm_fan_ctor(struct nouveau_therm *therm);
+int nouveau_therm_fan_init(struct nouveau_therm *therm);
+int nouveau_therm_fan_fini(struct nouveau_therm *therm, bool suspend);
int nouveau_therm_fan_get(struct nouveau_therm *therm);
int nouveau_therm_fan_set(struct nouveau_therm *therm, bool now, int percent);
int nouveau_therm_fan_user_get(struct nouveau_therm *therm);
@@ -122,6 +124,8 @@ int nouveau_therm_fan_sense(struct nouveau_therm *therm);
int nouveau_therm_preinit(struct nouveau_therm *);
+int nouveau_therm_sensor_init(struct nouveau_therm *therm);
+int nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend);
void nouveau_therm_sensor_preinit(struct nouveau_therm *);
void nouveau_therm_sensor_set_threshold_state(struct nouveau_therm *therm,
enum nouveau_therm_thrs thrs,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index dde746c78c8..b80a33011b9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -180,6 +180,8 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
spin_lock_irqsave(&priv->sensor.alarm_program_lock, flags);
+ nv_debug(therm, "polling the internal temperature\n");
+
nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_fan_boost,
NOUVEAU_THERM_THRS_FANBOOST);
@@ -216,6 +218,25 @@ nouveau_therm_program_alarms_polling(struct nouveau_therm *therm)
alarm_timer_callback(&priv->sensor.therm_poll_alarm);
}
+int
+nouveau_therm_sensor_init(struct nouveau_therm *therm)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ priv->sensor.program_alarms(therm);
+ return 0;
+}
+
+int
+nouveau_therm_sensor_fini(struct nouveau_therm *therm, bool suspend)
+{
+ struct nouveau_therm_priv *priv = (void *)therm;
+ struct nouveau_timer *ptimer = nouveau_timer(therm);
+
+ if (suspend)
+ ptimer->alarm_cancel(ptimer, &priv->sensor.therm_poll_alarm);
+ return 0;
+}
+
void
nouveau_therm_sensor_preinit(struct nouveau_therm *therm)
{
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
index 5d417cc9949..cf8a0e0f8ee 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/base.c
@@ -85,3 +85,10 @@ nouveau_timer_alarm(void *obj, u32 nsec, struct nouveau_alarm *alarm)
struct nouveau_timer *ptimer = nouveau_timer(obj);
ptimer->alarm(ptimer, nsec, alarm);
}
+
+void
+nouveau_timer_alarm_cancel(void *obj, struct nouveau_alarm *alarm)
+{
+ struct nouveau_timer *ptimer = nouveau_timer(obj);
+ ptimer->alarm_cancel(ptimer, alarm);
+}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
index 9469b827567..57711ecb566 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/timer/nv04.c
@@ -36,6 +36,7 @@ struct nv04_timer_priv {
struct nouveau_timer base;
struct list_head alarms;
spinlock_t lock;
+ u64 suspend_time;
};
static u64
@@ -113,6 +114,25 @@ nv04_timer_alarm(struct nouveau_timer *ptimer, u64 time,
}
static void
+nv04_timer_alarm_cancel(struct nouveau_timer *ptimer,
+ struct nouveau_alarm *alarm)
+{
+ struct nv04_timer_priv *priv = (void *)ptimer;
+ unsigned long flags;
+
+ /* avoid deleting an entry while the alarm intr is running */
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* delete the alarm from the list */
+ list_del(&alarm->head);
+
+ /* reset the head so as list_empty returns 1 */
+ INIT_LIST_HEAD(&alarm->head);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+static void
nv04_timer_intr(struct nouveau_subdev *subdev)
{
struct nv04_timer_priv *priv = (void *)subdev;
@@ -146,6 +166,8 @@ nv04_timer_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
priv->base.base.intr = nv04_timer_intr;
priv->base.read = nv04_timer_read;
priv->base.alarm = nv04_timer_alarm;
+ priv->base.alarm_cancel = nv04_timer_alarm_cancel;
+ priv->suspend_time = 0;
INIT_LIST_HEAD(&priv->alarms);
spin_lock_init(&priv->lock);
@@ -164,7 +186,7 @@ nv04_timer_init(struct nouveau_object *object)
{
struct nouveau_device *device = nv_device(object);
struct nv04_timer_priv *priv = (void *)object;
- u32 m = 1, f, n, d;
+ u32 m = 1, f, n, d, lo, hi;
int ret;
ret = nouveau_timer_init(&priv->base);
@@ -221,16 +243,25 @@ nv04_timer_init(struct nouveau_object *object)
d >>= 1;
}
+ /* restore the time before suspend */
+ lo = priv->suspend_time;
+ hi = (priv->suspend_time >> 32);
+
nv_debug(priv, "input frequency : %dHz\n", f);
nv_debug(priv, "input multiplier: %d\n", m);
nv_debug(priv, "numerator : 0x%08x\n", n);
nv_debug(priv, "denominator : 0x%08x\n", d);
nv_debug(priv, "timer frequency : %dHz\n", (f * m) * d / n);
+ nv_debug(priv, "time low : 0x%08x\n", lo);
+ nv_debug(priv, "time high : 0x%08x\n", hi);
nv_wr32(priv, NV04_PTIMER_NUMERATOR, n);
nv_wr32(priv, NV04_PTIMER_DENOMINATOR, d);
nv_wr32(priv, NV04_PTIMER_INTR_0, 0xffffffff);
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ nv_wr32(priv, NV04_PTIMER_TIME_1, hi);
+ nv_wr32(priv, NV04_PTIMER_TIME_0, lo);
+
return 0;
}
@@ -238,6 +269,8 @@ static int
nv04_timer_fini(struct nouveau_object *object, bool suspend)
{
struct nv04_timer_priv *priv = (void *)object;
+ if (suspend)
+ priv->suspend_time = nv04_timer_read(&priv->base);
nv_wr32(priv, NV04_PTIMER_INTR_EN_0, 0x00000000);
return nouveau_timer_fini(&priv->base, suspend);
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
index 67fcb6c852a..ef3133e7575 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c
@@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length,
INIT_LIST_HEAD(&vm->pgd_list);
vm->vmm = vmm;
- vm->refcount = 1;
+ kref_init(&vm->refcount);
vm->fpde = offset >> (vmm->pgt_bits + 12);
vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12);
@@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
}
static void
-nouveau_vm_del(struct nouveau_vm *vm)
+nouveau_vm_del(struct kref *kref)
{
+ struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount);
struct nouveau_vm_pgd *vpgd, *tmp;
list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
@@ -458,27 +459,19 @@ int
nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
struct nouveau_gpuobj *pgd)
{
- struct nouveau_vm *vm;
- int ret;
-
- vm = ref;
- if (vm) {
- ret = nouveau_vm_link(vm, pgd);
+ if (ref) {
+ int ret = nouveau_vm_link(ref, pgd);
if (ret)
return ret;
- vm->refcount++;
+ kref_get(&ref->refcount);
}
- vm = *ptr;
- *ptr = ref;
-
- if (vm) {
- nouveau_vm_unlink(vm, pgd);
-
- if (--vm->refcount == 0)
- nouveau_vm_del(vm);
+ if (*ptr) {
+ nouveau_vm_unlink(*ptr, pgd);
+ kref_put(&(*ptr)->refcount, nouveau_vm_del);
}
+ *ptr = ref;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
index 07dd1fe2d6f..a4aa81a2173 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/vm/nv50.c
@@ -174,6 +174,7 @@ nv50_vm_flush(struct nouveau_vm *vm)
case NVDEV_ENGINE_GR : vme = 0x00; break;
case NVDEV_ENGINE_VP : vme = 0x01; break;
case NVDEV_SUBDEV_BAR : vme = 0x06; break;
+ case NVDEV_ENGINE_PPP :
case NVDEV_ENGINE_MPEG : vme = 0x08; break;
case NVDEV_ENGINE_BSP : vme = 0x09; break;
case NVDEV_ENGINE_CRYPT: vme = 0x0a; break;
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 0782bd2f1e0..d4fbf11360f 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -22,6 +22,7 @@
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/pm_runtime.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -606,6 +607,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
regp->ramdac_a34 = 0x1;
}
+static int
+nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ int ret;
+
+ ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
+ if (ret == 0) {
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]);
+ }
+
+ return ret;
+}
+
/**
* Sets up registers for the given mode/adjusted_mode pair.
*
@@ -622,10 +641,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_device *dev = crtc->dev;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_drm *drm = nouveau_drm(dev);
+ int ret;
NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index);
drm_mode_debug_printmodeline(adjusted_mode);
+ ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
+
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1);
@@ -722,6 +746,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc)
static void nv_crtc_destroy(struct drm_crtc *crtc)
{
+ struct nv04_display *disp = nv04_display(crtc->dev);
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
if (!nv_crtc)
@@ -729,6 +754,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
drm_crtc_cleanup(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_unpin(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
@@ -754,6 +783,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
}
static void
+nv_crtc_disable(struct drm_crtc *crtc)
+{
+ struct nv04_display *disp = nv04_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ if (disp->image[nv_crtc->index])
+ nouveau_bo_unpin(disp->image[nv_crtc->index]);
+ nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
+}
+
+static void
nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
uint32_t size)
{
@@ -791,7 +830,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *drm_fb;
struct nouveau_framebuffer *fb;
int arb_burst, arb_lwm;
- int ret;
NV_DEBUG(drm, "index %d\n", nv_crtc->index);
@@ -801,10 +839,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
return 0;
}
-
/* If atomic, we want to switch to the fb we were passed, so
- * now we update pointers to do that. (We don't pin; just
- * assume we're already pinned and update the base address.)
+ * now we update pointers to do that.
*/
if (atomic) {
drm_fb = passed_fb;
@@ -812,17 +848,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
} else {
drm_fb = crtc->fb;
fb = nouveau_framebuffer(crtc->fb);
- /* If not atomic, we can go ahead and pin, and unpin the
- * old fb we were passed.
- */
- ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
-
- if (passed_fb) {
- struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb);
- nouveau_bo_unpin(ofb->nvbo);
- }
}
nv_crtc->fb.offset = fb->nvbo->bo.offset;
@@ -877,6 +902,9 @@ static int
nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
+ int ret = nv_crtc_swap_fbs(crtc, old_fb);
+ if (ret)
+ return ret;
return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -1007,13 +1035,59 @@ nv04_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
return 0;
}
+int
+nouveau_crtc_set_config(struct drm_mode_set *set)
+{
+ struct drm_device *dev;
+ struct nouveau_drm *drm;
+ int ret;
+ struct drm_crtc *crtc;
+ bool active = false;
+ if (!set || !set->crtc)
+ return -EINVAL;
+
+ dev = set->crtc->dev;
+
+ /* get a pm reference here */
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_crtc_helper_set_config(set);
+
+ drm = nouveau_drm(dev);
+
+ /* if we get here with no crtcs active then we can drop a reference */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (crtc->enabled)
+ active = true;
+ }
+
+ pm_runtime_mark_last_busy(dev->dev);
+ /* if we have active crtcs and we don't have a power ref,
+ take the current one */
+ if (active && !drm->have_disp_power_ref) {
+ drm->have_disp_power_ref = true;
+ return ret;
+ }
+ /* if we have no active crtcs, then drop the power ref
+ we got before */
+ if (!active && drm->have_disp_power_ref) {
+ pm_runtime_put_autosuspend(dev->dev);
+ drm->have_disp_power_ref = false;
+ }
+ /* drop the power reference we got coming in here */
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
+
static const struct drm_crtc_funcs nv04_crtc_funcs = {
.save = nv_crtc_save,
.restore = nv_crtc_restore,
.cursor_set = nv04_crtc_cursor_set,
.cursor_move = nv04_crtc_cursor_move,
.gamma_set = nv_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = nouveau_crtc_set_config,
.page_flip = nouveau_crtc_page_flip,
.destroy = nv_crtc_destroy,
};
@@ -1027,6 +1101,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.load_lut = nv_crtc_gamma_load,
+ .disable = nv_crtc_disable,
};
int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h
index a0a031dad13..9928187f0a7 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.h
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h
@@ -81,6 +81,7 @@ struct nv04_display {
uint32_t saved_vga_font[4][16384];
uint32_t dac_users[4];
struct nouveau_object *core;
+ struct nouveau_bo *image[2];
};
static inline struct nv04_display *
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d97f20069d3..dd7d2e18271 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -25,8 +25,27 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
-#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
+#define NOUVEAU_DSM_OPTIMUS_CAPS 0x1A
+#define NOUVEAU_DSM_OPTIMUS_FLAGS 0x1B
+
+#define NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 (3 << 24)
+#define NOUVEAU_DSM_OPTIMUS_NO_POWERDOWN_PS3 (2 << 24)
+#define NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED (1)
+
+#define NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN (NOUVEAU_DSM_OPTIMUS_POWERDOWN_PS3 | NOUVEAU_DSM_OPTIMUS_FLAGS_CHANGED)
+
+/* result of the optimus caps function */
+#define OPTIMUS_ENABLED (1 << 0)
+#define OPTIMUS_STATUS_MASK (3 << 3)
+#define OPTIMUS_STATUS_OFF (0 << 3)
+#define OPTIMUS_STATUS_ON_ENABLED (1 << 3)
+#define OPTIMUS_STATUS_PWR_STABLE (3 << 3)
+#define OPTIMUS_DISPLAY_HOTPLUG (1 << 6)
+#define OPTIMUS_CAPS_MASK (7 << 24)
+#define OPTIMUS_DYNAMIC_PWR_CAP (1 << 24)
+
+#define OPTIMUS_AUDIO_CAPS_MASK (3 << 27)
+#define OPTIMUS_HDA_CODEC_MASK (2 << 27) /* hda bios control */
static struct nouveau_dsm_priv {
bool dsm_detected;
@@ -251,9 +270,18 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
retval |= NOUVEAU_DSM_HAS_MUX;
if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
- NOUVEAU_DSM_OPTIMUS_FN))
+ NOUVEAU_DSM_OPTIMUS_CAPS))
retval |= NOUVEAU_DSM_HAS_OPT;
+ if (retval & NOUVEAU_DSM_HAS_OPT) {
+ uint32_t result;
+ nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
+ &result);
+ dev_info(&pdev->dev, "optimus capabilities: %s, status %s%s\n",
+ (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
+ (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
+ (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
+ }
if (retval)
nouveau_dsm_priv.dhandle = dhandle;
@@ -328,8 +356,12 @@ void nouveau_switcheroo_optimus_dsm(void)
if (!nouveau_dsm_priv.optimus_detected)
return;
- nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
- NOUVEAU_DSM_OPTIMUS_ARGS, &result);
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
+ 0x3, &result);
+
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
+ NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
+
}
void nouveau_unregister_dsm_handler(void)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 4e7ee5f4155..755c38d0627 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -198,7 +198,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
- int max_size = INT_MAX & ~((1 << drm->client.base.vm->vmm->lpg_shift) - 1);
+ int lpg_shift = 12;
+ int max_size;
+
+ if (drm->client.base.vm)
+ lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ max_size = INT_MAX & ~((1 << lpg_shift) - 1);
if (size <= 0 || size > max_size) {
nv_warn(drm, "skipped size %x\n", (u32)size);
@@ -1260,7 +1265,9 @@ out:
static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ return drm_vma_node_verify_access(&nvbo->gem->vma_node, filp);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 4da776f344d..c5b36f9e9a1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -26,6 +26,8 @@
#include <acpi/button.h>
+#include <linux/pm_runtime.h>
+
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
@@ -240,6 +242,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
struct nouveau_encoder *nv_partner;
struct nouveau_i2c_port *i2c;
int type;
+ int ret;
+ enum drm_connector_status conn_status = connector_status_disconnected;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
@@ -248,6 +252,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0)
+ return conn_status;
+
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
@@ -263,7 +271,8 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
!nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
NV_ERROR(drm, "Detected %s, but failed init\n",
drm_get_connector_name(connector));
- return connector_status_disconnected;
+ conn_status = connector_status_disconnected;
+ goto out;
}
/* Override encoder type for DVI-I based on whether EDID
@@ -290,13 +299,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
}
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
nv_encoder = nouveau_connector_of_detect(connector);
if (nv_encoder) {
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
detect_analog:
@@ -311,12 +322,18 @@ detect_analog:
if (helper->detect(encoder, connector) ==
connector_status_connected) {
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ conn_status = connector_status_connected;
+ goto out;
}
}
- return connector_status_disconnected;
+ out:
+
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+
+ return conn_status;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 907d20ef6d4..d2712e6e5d3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -107,6 +107,11 @@ nouveau_framebuffer_init(struct drm_device *dev,
return -EINVAL;
}
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ NV_ERROR(drm, "framebuffer requires contiguous bo\n");
+ return -EINVAL;
+ }
+
if (nv_device(drm->device)->chipset == 0x50)
nv_fb->r_format |= (tile_flags << 8);
@@ -394,7 +399,7 @@ nouveau_display_suspend(struct drm_device *dev)
nouveau_display_fini(dev);
- NV_INFO(drm, "unpinning framebuffer(s)...\n");
+ NV_SUSPEND(drm, "unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -416,7 +421,7 @@ nouveau_display_suspend(struct drm_device *dev)
}
void
-nouveau_display_resume(struct drm_device *dev)
+nouveau_display_repin(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_crtc *crtc;
@@ -441,10 +446,12 @@ nouveau_display_resume(struct drm_device *dev)
if (ret)
NV_ERROR(drm, "Could not pin/map cursor.\n");
}
+}
- nouveau_fbcon_set_suspend(dev, 0);
- nouveau_fbcon_zfill_all(dev);
-
+void
+nouveau_display_resume(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
@@ -519,7 +526,8 @@ fail:
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
@@ -577,6 +585,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nv50_display_flip_next(crtc, fb, chan, 0);
if (ret)
goto fail_unreserve;
+ } else {
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]);
}
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
@@ -674,13 +685,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
}
int
-nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
-int
nouveau_display_dumb_map_offset(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *poffset)
@@ -690,7 +694,7 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = gem->driver_private;
- *poffset = bo->bo.addr_space_offset;
+ *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 1ea3e4734b6..025c66f8e0e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -57,10 +57,12 @@ void nouveau_display_destroy(struct drm_device *dev);
int nouveau_display_init(struct drm_device *dev);
void nouveau_display_fini(struct drm_device *dev);
int nouveau_display_suspend(struct drm_device *dev);
+void nouveau_display_repin(struct drm_device *dev);
void nouveau_display_resume(struct drm_device *dev);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
@@ -68,11 +70,10 @@ int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
u32 handle, u64 *offset);
-int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
- u32 handle);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+int nouveau_crtc_set_config(struct drm_mode_set *set);
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
extern int nouveau_backlight_init(struct drm_device *);
extern void nouveau_backlight_exit(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 61972668fd0..8863644024b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -25,7 +25,10 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
-
+#include <linux/pm_runtime.h>
+#include <linux/vga_switcheroo.h>
+#include "drmP.h"
+#include "drm_crtc_helper.h"
#include <core/device.h>
#include <core/client.h>
#include <core/gpuobj.h>
@@ -69,6 +72,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
+int nouveau_runtime_pm = -1;
+module_param_named(runpm, nouveau_runtime_pm, int, 0400);
+
static struct drm_driver driver;
static int
@@ -296,6 +303,31 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
return 0;
}
+#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
+
+static void
+nouveau_get_hdmi_dev(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = dev->dev_private;
+ struct pci_dev *pdev = dev->pdev;
+
+ /* subfunction one is a hdmi audio device? */
+ drm->hdmi_device = pci_get_bus_and_slot((unsigned int)pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 1));
+
+ if (!drm->hdmi_device) {
+ DRM_INFO("hdmi device not found %d %d %d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), 1);
+ return;
+ }
+
+ if ((drm->hdmi_device->class >> 8) != PCI_CLASS_MULTIMEDIA_HD_AUDIO) {
+ DRM_INFO("possible hdmi device not audio %d\n", drm->hdmi_device->class);
+ pci_dev_put(drm->hdmi_device);
+ drm->hdmi_device = NULL;
+ return;
+ }
+}
+
static int
nouveau_drm_load(struct drm_device *dev, unsigned long flags)
{
@@ -314,6 +346,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
+ nouveau_get_hdmi_dev(dev);
+
/* make sure AGP controller is in a consistent state before we
* (possibly) execute vbios init tables (see nouveau_agp.h)
*/
@@ -388,6 +422,15 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
nouveau_accel_init(drm);
nouveau_fbcon_init(dev);
+
+ if (nouveau_runtime_pm != 0) {
+ pm_runtime_use_autosuspend(dev->dev);
+ pm_runtime_set_autosuspend_delay(dev->dev, 5000);
+ pm_runtime_set_active(dev->dev);
+ pm_runtime_allow(dev->dev);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put(dev->dev);
+ }
return 0;
fail_dispinit:
@@ -409,6 +452,7 @@ nouveau_drm_unload(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
+ pm_runtime_get_sync(dev->dev);
nouveau_fbcon_fini(dev);
nouveau_accel_fini(drm);
@@ -424,6 +468,8 @@ nouveau_drm_unload(struct drm_device *dev)
nouveau_agp_fini(drm);
nouveau_vga_fini(drm);
+ if (drm->hdmi_device)
+ pci_dev_put(drm->hdmi_device);
nouveau_cli_destroy(&drm->client);
return 0;
}
@@ -450,19 +496,16 @@ nouveau_do_suspend(struct drm_device *dev)
int ret;
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "suspending fbcon...\n");
- nouveau_fbcon_set_suspend(dev, 1);
-
- NV_INFO(drm, "suspending display...\n");
+ NV_SUSPEND(drm, "suspending display...\n");
ret = nouveau_display_suspend(dev);
if (ret)
return ret;
}
- NV_INFO(drm, "evicting buffers...\n");
+ NV_SUSPEND(drm, "evicting buffers...\n");
ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
- NV_INFO(drm, "waiting for kernel channels to go idle...\n");
+ NV_SUSPEND(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
ret = nouveau_channel_idle(drm->cechan);
if (ret)
@@ -475,7 +518,7 @@ nouveau_do_suspend(struct drm_device *dev)
return ret;
}
- NV_INFO(drm, "suspending client object trees...\n");
+ NV_SUSPEND(drm, "suspending client object trees...\n");
if (drm->fence && nouveau_fence(drm)->suspend) {
if (!nouveau_fence(drm)->suspend(drm))
return -ENOMEM;
@@ -487,7 +530,7 @@ nouveau_do_suspend(struct drm_device *dev)
goto fail_client;
}
- NV_INFO(drm, "suspending kernel object tree...\n");
+ NV_SUSPEND(drm, "suspending kernel object tree...\n");
ret = nouveau_client_fini(&drm->client.base, true);
if (ret)
goto fail_client;
@@ -501,7 +544,7 @@ fail_client:
}
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
+ NV_SUSPEND(drm, "resuming display...\n");
nouveau_display_resume(dev);
}
return ret;
@@ -513,9 +556,14 @@ int nouveau_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 1);
+
+ nv_suspend_set_printk_level(NV_DBG_INFO);
ret = nouveau_do_suspend(drm_dev);
if (ret)
return ret;
@@ -523,6 +571,7 @@ int nouveau_pmops_suspend(struct device *dev)
pci_save_state(pdev);
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
return 0;
}
@@ -533,15 +582,15 @@ nouveau_do_resume(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_cli *cli;
- NV_INFO(drm, "re-enabling device...\n");
+ NV_SUSPEND(drm, "re-enabling device...\n");
nouveau_agp_reset(drm);
- NV_INFO(drm, "resuming kernel object tree...\n");
+ NV_SUSPEND(drm, "resuming kernel object tree...\n");
nouveau_client_init(&drm->client.base);
nouveau_agp_init(drm);
- NV_INFO(drm, "resuming client object trees...\n");
+ NV_SUSPEND(drm, "resuming client object trees...\n");
if (drm->fence && nouveau_fence(drm)->resume)
nouveau_fence(drm)->resume(drm);
@@ -553,9 +602,10 @@ nouveau_do_resume(struct drm_device *dev)
nouveau_pm_resume(dev);
if (dev->mode_config.num_crtc) {
- NV_INFO(drm, "resuming display...\n");
- nouveau_display_resume(dev);
+ NV_SUSPEND(drm, "resuming display...\n");
+ nouveau_display_repin(dev);
}
+
return 0;
}
@@ -565,7 +615,8 @@ int nouveau_pmops_resume(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
- if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
+ if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
+ drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
return 0;
pci_set_power_state(pdev, PCI_D0);
@@ -575,23 +626,54 @@ int nouveau_pmops_resume(struct device *dev)
return ret;
pci_set_master(pdev);
- return nouveau_do_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ ret = nouveau_do_resume(drm_dev);
+ if (ret) {
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
+ }
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+
+ nouveau_fbcon_zfill_all(drm_dev);
+ nouveau_display_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return 0;
}
static int nouveau_pmops_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 1);
- return nouveau_do_suspend(drm_dev);
+ ret = nouveau_do_suspend(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
}
static int nouveau_pmops_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
- return nouveau_do_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_INFO);
+ ret = nouveau_do_resume(drm_dev);
+ if (ret) {
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return ret;
+ }
+ if (drm_dev->mode_config.num_crtc)
+ nouveau_fbcon_set_suspend(drm_dev, 0);
+ nouveau_fbcon_zfill_all(drm_dev);
+ nouveau_display_resume(drm_dev);
+ nv_suspend_set_printk_level(NV_DBG_DEBUG);
+ return 0;
}
@@ -604,19 +686,24 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
char name[32], tmpname[TASK_COMM_LEN];
int ret;
+ /* need to bring up power immediately if opening device */
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
get_task_comm(tmpname, current);
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
ret = nouveau_cli_create(pdev, name, sizeof(*cli), (void **)&cli);
if (ret)
- return ret;
+ goto out_suspend;
if (nv_device(drm->device)->card_type >= NV_50) {
ret = nouveau_vm_new(nv_device(drm->device), 0, (1ULL << 40),
0x1000, &cli->base.vm);
if (ret) {
nouveau_cli_destroy(cli);
- return ret;
+ goto out_suspend;
}
}
@@ -625,7 +712,12 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
mutex_lock(&drm->client.mutex);
list_add(&cli->head, &drm->clients);
mutex_unlock(&drm->client.mutex);
- return 0;
+
+out_suspend:
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+
+ return ret;
}
static void
@@ -634,12 +726,15 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
struct nouveau_cli *cli = nouveau_cli(fpriv);
struct nouveau_drm *drm = nouveau_drm(dev);
+ pm_runtime_get_sync(dev->dev);
+
if (cli->abi16)
nouveau_abi16_fini(cli->abi16);
mutex_lock(&drm->client.mutex);
list_del(&cli->head);
mutex_unlock(&drm->client.mutex);
+
}
static void
@@ -647,33 +742,52 @@ nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
{
struct nouveau_cli *cli = nouveau_cli(fpriv);
nouveau_cli_destroy(cli);
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
}
-static struct drm_ioctl_desc
+static const struct drm_ioctl_desc
nouveau_ioctls[] = {
- DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_abi16_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_abi16_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
- DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_abi16_ioctl_channel_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_abi16_ioctl_channel_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_abi16_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_abi16_ioctl_notifierobj_alloc, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_abi16_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH|DRM_RENDER_ALLOW),
};
+long nouveau_drm_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev;
+ long ret;
+ dev = file_priv->minor->dev;
+
+ ret = pm_runtime_get_sync(dev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_ioctl(filp, cmd, arg);
+
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
+ return ret;
+}
static const struct file_operations
nouveau_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = nouveau_drm_ioctl,
.mmap = nouveau_ttm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = nouveau_compat_ioctl,
@@ -684,8 +798,8 @@ nouveau_driver_fops = {
static struct drm_driver
driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
+ DRIVER_USE_AGP |
+ DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | DRIVER_RENDER,
.load = nouveau_drm_load,
.unload = nouveau_drm_unload,
@@ -704,6 +818,7 @@ driver = {
.disable_vblank = nouveau_drm_vblank_disable,
.ioctls = nouveau_ioctls,
+ .num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -724,7 +839,7 @@ driver = {
.dumb_create = nouveau_display_dumb_create,
.dumb_map_offset = nouveau_display_dumb_map_offset,
- .dumb_destroy = nouveau_display_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
@@ -753,6 +868,90 @@ nouveau_drm_pci_table[] = {
{}
};
+static int nouveau_pmops_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ drm_kms_helper_poll_disable(drm_dev);
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
+ nouveau_switcheroo_optimus_dsm();
+ ret = nouveau_do_suspend(drm_dev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3cold);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
+ return ret;
+}
+
+static int nouveau_pmops_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_device *device = nouveau_dev(drm_dev);
+ int ret;
+
+ if (nouveau_runtime_pm == 0)
+ return -EINVAL;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+ pci_set_master(pdev);
+
+ ret = nouveau_do_resume(drm_dev);
+ nouveau_display_resume(drm_dev);
+ drm_kms_helper_poll_enable(drm_dev);
+ /* do magic */
+ nv_mask(device, 0x88488, (1 << 25), (1 << 25));
+ vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
+ drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ return ret;
+}
+
+static int nouveau_pmops_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct drm_device *drm_dev = pci_get_drvdata(pdev);
+ struct nouveau_drm *drm = nouveau_drm(drm_dev);
+ struct drm_crtc *crtc;
+
+ if (nouveau_runtime_pm == 0)
+ return -EBUSY;
+
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EBUSY;
+ }
+
+ /* if we have a hdmi audio device - make sure it has a driver loaded */
+ if (drm->hdmi_device) {
+ if (!drm->hdmi_device->driver) {
+ DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
+ pm_runtime_mark_last_busy(dev);
+ return -EBUSY;
+ }
+ }
+
+ list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
+ if (crtc->enabled) {
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+ return -EBUSY;
+ }
+ }
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_autosuspend(dev);
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ return 1;
+}
+
static const struct dev_pm_ops nouveau_pm_ops = {
.suspend = nouveau_pmops_suspend,
.resume = nouveau_pmops_resume,
@@ -760,6 +959,9 @@ static const struct dev_pm_ops nouveau_pm_ops = {
.thaw = nouveau_pmops_thaw,
.poweroff = nouveau_pmops_freeze,
.restore = nouveau_pmops_resume,
+ .runtime_suspend = nouveau_pmops_runtime_suspend,
+ .runtime_resume = nouveau_pmops_runtime_resume,
+ .runtime_idle = nouveau_pmops_runtime_idle,
};
static struct pci_driver
@@ -774,8 +976,6 @@ nouveau_drm_pci_driver = {
static int __init
nouveau_drm_init(void)
{
- driver.num_ioctls = ARRAY_SIZE(nouveau_ioctls);
-
if (nouveau_modeset == -1) {
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force())
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
index 41ff7e0d403..994fd6ec373 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -70,6 +70,8 @@ nouveau_cli(struct drm_file *fpriv)
return fpriv ? fpriv->driver_priv : NULL;
}
+extern int nouveau_runtime_pm;
+
struct nouveau_drm {
struct nouveau_cli client;
struct drm_device *dev;
@@ -129,6 +131,12 @@ struct nouveau_drm {
/* power management */
struct nouveau_pm *pm;
+
+ /* display power reference */
+ bool have_disp_power_ref;
+
+ struct dev_pm_domain vga_pm_domain;
+ struct pci_dev *hdmi_device;
};
static inline struct nouveau_drm *
@@ -146,6 +154,7 @@ nouveau_dev(struct drm_device *dev)
int nouveau_pmops_suspend(struct device *);
int nouveau_pmops_resume(struct device *);
+#define NV_SUSPEND(cli, fmt, args...) nv_suspend((cli), fmt, ##args)
#define NV_FATAL(cli, fmt, args...) nv_fatal((cli), fmt, ##args)
#define NV_ERROR(cli, fmt, args...) nv_error((cli), fmt, ##args)
#define NV_WARN(cli, fmt, args...) nv_warn((cli), fmt, ##args)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4c1bc061fae..8f6d63d7edd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -398,7 +398,8 @@ void
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- drm_fb_helper_hotplug_event(&drm->fbcon->helper);
+ if (drm->fbcon)
+ drm_fb_helper_hotplug_event(&drm->fbcon->helper);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 830cb7bad92..f32b71238c0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -220,7 +220,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
}
rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- rep->map_handle = nvbo->bo.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node);
rep->tile_mode = nvbo->tile_mode;
rep->tile_flags = nvbo->tile_flags;
return 0;
@@ -579,18 +579,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
}
+static inline void
+u_free(void *addr)
+{
+ if (!is_vmalloc_addr(addr))
+ kfree(addr);
+ else
+ vfree(addr);
+}
+
static inline void *
u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
{
void *mem;
void __user *userptr = (void __force __user *)(uintptr_t)user;
- mem = kmalloc(nmemb * size, GFP_KERNEL);
+ size *= nmemb;
+
+ mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+ if (!mem)
+ mem = vmalloc(size);
if (!mem)
return ERR_PTR(-ENOMEM);
- if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) {
- kfree(mem);
+ if (DRM_COPY_FROM_USER(mem, userptr, size)) {
+ u_free(mem);
return ERR_PTR(-EFAULT);
}
@@ -676,7 +689,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
- kfree(reloc);
+ u_free(reloc);
return ret;
}
@@ -738,7 +751,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
if (IS_ERR(bo)) {
- kfree(push);
+ u_free(push);
return nouveau_abi16_put(abi16, PTR_ERR(bo));
}
@@ -849,8 +862,8 @@ out:
nouveau_fence_unref(&fence);
out_prevalid:
- kfree(bo);
- kfree(push);
+ u_free(bo);
+ u_free(push);
out_next:
if (chan->dma.ib_max) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
index 08214bcdcb1..c1a7e5a73a2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
@@ -63,7 +63,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
if (fn != NULL)
ret = (*fn)(filp, cmd, arg);
else
- ret = drm_ioctl(filp, cmd, arg);
+ ret = nouveau_drm_ioctl(filp, cmd, arg);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioctl.h b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
index ef2b2906d9e..3b9f2e5463a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ioctl.h
+++ b/drivers/gpu/drm/nouveau/nouveau_ioctl.h
@@ -2,5 +2,6 @@
#define __NOUVEAU_IOCTL_H__
long nouveau_compat_ioctl(struct file *, unsigned int cmd, unsigned long arg);
+long nouveau_drm_ioctl(struct file *, unsigned int cmd, unsigned long arg);
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 25d3495725e..81638d7f2ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -32,6 +32,9 @@ nouveau_switcheroo_set_state(struct pci_dev *pdev,
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
+ return;
+
if (state == VGA_SWITCHEROO_ON) {
printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
@@ -78,8 +81,17 @@ void
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
+ bool runtime = false;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
- vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops);
+
+ if (nouveau_runtime_pm == 1)
+ runtime = true;
+ if ((nouveau_runtime_pm == -1) && (nouveau_is_optimus() || nouveau_is_v1_dsm()))
+ runtime = true;
+ vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime);
+
+ if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
+ vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
}
void
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 8e47a9bae8c..22aa9963ea6 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan)
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
u32 start = mem->start * PAGE_SIZE;
- u32 limit = mem->start + mem->size - 1;
+ u32 limit = start + mem->size - 1;
int ret = 0;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index 3af5bcd0b20..625f80d53dc 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll,
if (clk < pll->vco1.max_freq)
pll->vco2.max_freq = 0;
- pclk->pll_calc(pclk, pll, clk, &coef);
+ ret = pclk->pll_calc(pclk, pll, clk, &coef);
if (ret == 0)
return -ERANGE;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 8b40a36c1b5..f8e66c08b11 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1326,7 +1326,7 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
.cursor_set = nv50_crtc_cursor_set,
.cursor_move = nv50_crtc_cursor_move,
.gamma_set = nv50_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
+ .set_config = nouveau_crtc_set_config,
.destroy = nv50_crtc_destroy,
.page_flip = nouveau_crtc_page_flip,
};
@@ -1583,7 +1583,7 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
load = 340;
ret = nv_exec(disp->core, NV50_DISP_DAC_LOAD + or, &load, sizeof(load));
- if (ret || load != 7)
+ if (ret || !load)
return connector_status_disconnected;
return connector_status_connected;
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index f9701e567db..0ee36384003 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan)
struct nv10_fence_chan *fctx;
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
struct nouveau_object *object;
+ u32 start = mem->start * PAGE_SIZE;
+ u32 limit = start + mem->size - 1;
int ret, i;
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
@@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync = nv17_fence_sync;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
- NvSema, 0x0002,
+ NvSema, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = mem->start * PAGE_SIZE,
- .limit = mem->size - 1,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
/* dma objects for display sync channel semaphore blocks */
for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) {
struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i);
+ u32 start = bo->bo.mem.start * PAGE_SIZE;
+ u32 limit = start + bo->bo.mem.size - 1;
ret = nouveau_object_new(nv_object(chan->cli), chan->handle,
NvEvoSema0 + i, 0x003d,
&(struct nv_dma_class) {
.flags = NV_DMA_TARGET_VRAM |
NV_DMA_ACCESS_RDWR,
- .start = bo->bo.offset,
- .limit = bo->bo.offset + 0xfff,
+ .start = start,
+ .limit = limit,
}, sizeof(struct nv_dma_class),
&object);
}
diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile
index d85e058f284..778372b062a 100644
--- a/drivers/gpu/drm/omapdrm/Makefile
+++ b/drivers/gpu/drm/omapdrm/Makefile
@@ -18,7 +18,4 @@ omapdrm-y := omap_drv.o \
omap_dmm_tiler.o \
tcm-sita.o
-# temporary:
-omapdrm-y += omap_gem_helpers.o
-
obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 11a5263a5e9..0fd2eb139f6 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -331,7 +331,8 @@ static void page_flip_cb(void *arg)
static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 9b794c933c8..acf667859cb 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -871,7 +871,7 @@ int tiler_map_show(struct seq_file *s, void *arg)
goto error;
for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
- memset(map, 0, sizeof(h_adj * sizeof(*map)));
+ memset(map, 0, h_adj * sizeof(*map));
memset(global_map, ' ', (w_adj + 1) * h_adj);
for (i = 0; i < omap_dmm->container_height; i++) {
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index a3004f12b9a..2603d909f49 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -419,7 +419,7 @@ static int ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+static const struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
@@ -524,12 +524,6 @@ static int dev_open(struct drm_device *dev, struct drm_file *file)
return 0;
}
-static int dev_firstopen(struct drm_device *dev)
-{
- DBG("firstopen: dev=%p", dev);
- return 0;
-}
-
/**
* lastclose - clean up after all DRM clients have exited
* @dev: DRM device
@@ -598,7 +592,6 @@ static const struct file_operations omapdriver_fops = {
.release = drm_release,
.mmap = omap_gem_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
};
@@ -609,7 +602,6 @@ static struct drm_driver omap_drm_driver = {
.load = dev_load,
.unload = dev_unload,
.open = dev_open,
- .firstopen = dev_firstopen,
.lastclose = dev_lastclose,
.preclose = dev_preclose,
.postclose = dev_postclose,
@@ -633,7 +625,7 @@ static struct drm_driver omap_drm_driver = {
.gem_vm_ops = &omap_gem_vm_ops,
.dumb_create = omap_gem_dumb_create,
.dumb_map_offset = omap_gem_dumb_map_offset,
- .dumb_destroy = omap_gem_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = ioctls,
.num_ioctls = DRM_OMAP_NUM_IOCTLS,
.fops = &omapdriver_fops,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 14f17da2ce2..30b95b73665 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -203,9 +203,8 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
-int omap_framebuffer_replace(struct drm_framebuffer *a,
- struct drm_framebuffer *b, void *arg,
- void (*unpin)(void *arg, struct drm_gem_object *bo));
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+int omap_framebuffer_unpin(struct drm_framebuffer *fb);
void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
struct omap_drm_window *win, struct omap_overlay_info *info);
struct drm_connector *omap_framebuffer_get_next_connector(
@@ -225,8 +224,6 @@ int omap_gem_init_object(struct drm_gem_object *obj);
void *omap_gem_vaddr(struct drm_gem_object *obj);
int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle);
int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index c29451ba65d..6a12e899235 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -133,7 +133,7 @@ int omap_encoder_update(struct drm_encoder *encoder,
struct omap_dss_driver *dssdrv = dssdev->driver;
int ret;
- dssdev->output->manager = mgr;
+ dssdev->src->manager = mgr;
if (dssdrv->check_timings) {
ret = dssdrv->check_timings(dssdev, timings);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index 8031402e795..f2b8f0668c0 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -237,55 +237,49 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
}
}
-/* Call for unpin 'a' (if not NULL), and pin 'b' (if not NULL). Although
- * buffers to unpin are just pushed to the unpin fifo so that the
- * caller can defer unpin until vblank.
- *
- * Note if this fails (ie. something went very wrong!), all buffers are
- * unpinned, and the caller disables the overlay. We could have tried
- * to revert back to the previous set of pinned buffers but if things are
- * hosed there is no guarantee that would succeed.
- */
-int omap_framebuffer_replace(struct drm_framebuffer *a,
- struct drm_framebuffer *b, void *arg,
- void (*unpin)(void *arg, struct drm_gem_object *bo))
+/* pin, prepare for scanout: */
+int omap_framebuffer_pin(struct drm_framebuffer *fb)
{
- int ret = 0, i, na, nb;
- struct omap_framebuffer *ofba = to_omap_framebuffer(a);
- struct omap_framebuffer *ofbb = to_omap_framebuffer(b);
- uint32_t pinned_mask = 0;
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
- na = a ? drm_format_num_planes(a->pixel_format) : 0;
- nb = b ? drm_format_num_planes(b->pixel_format) : 0;
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
+ if (ret)
+ goto fail;
+ omap_gem_dma_sync(plane->bo, DMA_TO_DEVICE);
+ }
- for (i = 0; i < max(na, nb); i++) {
- struct plane *pa, *pb;
+ return 0;
- pa = (i < na) ? &ofba->planes[i] : NULL;
- pb = (i < nb) ? &ofbb->planes[i] : NULL;
+fail:
+ for (i--; i >= 0; i--) {
+ struct plane *plane = &omap_fb->planes[i];
+ omap_gem_put_paddr(plane->bo);
+ plane->paddr = 0;
+ }
- if (pa)
- unpin(arg, pa->bo);
+ return ret;
+}
- if (pb && !ret) {
- ret = omap_gem_get_paddr(pb->bo, &pb->paddr, true);
- if (!ret) {
- omap_gem_dma_sync(pb->bo, DMA_TO_DEVICE);
- pinned_mask |= (1 << i);
- }
- }
- }
+/* unpin, no longer being scanned out: */
+int omap_framebuffer_unpin(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(fb->pixel_format);
- if (ret) {
- /* something went wrong.. unpin what has been pinned */
- for (i = 0; i < nb; i++) {
- if (pinned_mask & (1 << i)) {
- struct plane *pb = &ofba->planes[i];
- unpin(arg, pb->bo);
- }
- }
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ ret = omap_gem_put_paddr(plane->bo);
+ if (ret)
+ goto fail;
+ plane->paddr = 0;
}
+ return 0;
+
+fail:
return ret;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index ebbdf4132e9..533f6ebec53 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
+#include <drm/drm_vma_manager.h>
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
@@ -236,7 +237,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
* mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
* we actually want CMA memory for it all anyways..
*/
- pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ pages = drm_gem_get_pages(obj, GFP_KERNEL);
if (IS_ERR(pages)) {
dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
return PTR_ERR(pages);
@@ -270,7 +271,7 @@ static int omap_gem_attach_pages(struct drm_gem_object *obj)
return 0;
free_pages:
- _drm_gem_put_pages(obj, pages, true, false);
+ drm_gem_put_pages(obj, pages, true, false);
return ret;
}
@@ -294,7 +295,7 @@ static void omap_gem_detach_pages(struct drm_gem_object *obj)
kfree(omap_obj->addrs);
omap_obj->addrs = NULL;
- _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ drm_gem_put_pages(obj, omap_obj->pages, true, false);
omap_obj->pages = NULL;
}
@@ -308,21 +309,20 @@ uint32_t omap_gem_flags(struct drm_gem_object *obj)
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ int ret;
+ size_t size;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (!obj->map_list.map) {
- /* Make it mmapable */
- size_t size = omap_gem_mmap_size(obj);
- int ret = _drm_gem_create_mmap_offset_size(obj, size);
-
- if (ret) {
- dev_err(dev->dev, "could not allocate mmap offset\n");
- return 0;
- }
+ /* Make it mmapable */
+ size = omap_gem_mmap_size(obj);
+ ret = drm_gem_create_mmap_offset_size(obj, size);
+ if (ret) {
+ dev_err(dev->dev, "could not allocate mmap offset\n");
+ return 0;
}
- return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+ return drm_vma_node_offset_addr(&obj->vma_node);
}
uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
@@ -629,21 +629,6 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
/**
- * omap_gem_dumb_destroy - destroy a dumb buffer
- * @file: client file
- * @dev: our DRM device
- * @handle: the object handle
- *
- * Destroy a handle that was created via omap_gem_dumb_create.
- */
-int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- /* No special work needed, drop the reference and see what falls out */
- return drm_gem_handle_delete(file, handle);
-}
-
-/**
* omap_gem_dumb_map - buffer mapping for dumb interface
* @file: our drm client file
* @dev: drm device
@@ -997,12 +982,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct omap_gem_object *omap_obj = to_omap_bo(obj);
- uint64_t off = 0;
+ uint64_t off;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- if (obj->map_list.map)
- off = (uint64_t)obj->map_list.hash.key;
+ off = drm_vma_node_start(&obj->vma_node);
seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d",
omap_obj->flags, obj->name, obj->refcount.refcount.counter,
@@ -1309,8 +1293,7 @@ void omap_gem_free_object(struct drm_gem_object *obj)
list_del(&omap_obj->mm_list);
- if (obj->map_list.map)
- drm_gem_free_mmap_offset(obj);
+ drm_gem_free_mmap_offset(obj);
/* this means the object is still pinned.. which really should
* not happen. I think..
@@ -1427,8 +1410,9 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev,
omap_obj->height = gsize.tiled.height;
}
+ ret = 0;
if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM))
- ret = drm_gem_private_object_init(dev, obj, size);
+ drm_gem_private_object_init(dev, obj, size);
else
ret = drm_gem_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c b/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
deleted file mode 100644
index f9eb679eb79..00000000000
--- a/drivers/gpu/drm/omapdrm/omap_gem_helpers.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * drivers/gpu/drm/omapdrm/omap_gem_helpers.c
- *
- * Copyright (C) 2011 Texas Instruments
- * Author: Rob Clark <rob.clark@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-/* temporary copy of drm_gem_{get,put}_pages() until the
- * "drm/gem: add functions to get/put pages" patch is merged..
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/shmem_fs.h>
-
-#include <drm/drmP.h>
-
-/**
- * drm_gem_get_pages - helper to allocate backing pages for a GEM object
- * @obj: obj in question
- * @gfpmask: gfp mask of requested pages
- */
-struct page **_drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
-{
- struct inode *inode;
- struct address_space *mapping;
- struct page *p, **pages;
- int i, npages;
-
- /* This is the shared memory object that backs the GEM resource */
- inode = file_inode(obj->filp);
- mapping = inode->i_mapping;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < npages; i++) {
- p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
-
- /* There is a hypothetical issue w/ drivers that require
- * buffer memory in the low 4GB.. if the pages are un-
- * pinned, and swapped out, they can end up swapped back
- * in above 4GB. If pages are already in memory, then
- * shmem_read_mapping_page_gfp will ignore the gfpmask,
- * even if the already in-memory page disobeys the mask.
- *
- * It is only a theoretical issue today, because none of
- * the devices with this limitation can be populated with
- * enough memory to trigger the issue. But this BUG_ON()
- * is here as a reminder in case the problem with
- * shmem_read_mapping_page_gfp() isn't solved by the time
- * it does become a real issue.
- *
- * See this thread: http://lkml.org/lkml/2011/7/11/238
- */
- BUG_ON((gfpmask & __GFP_DMA32) &&
- (page_to_pfn(p) >= 0x00100000UL));
- }
-
- return pages;
-
-fail:
- while (i--)
- page_cache_release(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-/**
- * drm_gem_put_pages - helper to free backing pages for a GEM object
- * @obj: obj in question
- * @pages: pages to free
- */
-void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
- bool dirty, bool accessed)
-{
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- for (i = 0; i < npages; i++) {
- if (dirty)
- set_page_dirty(pages[i]);
-
- if (accessed)
- mark_page_accessed(pages[i]);
-
- /* Undo the reference we took when populating the table */
- page_cache_release(pages[i]);
- }
-
- drm_free_large(pages);
-}
-
-int
-_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
-{
- struct drm_device *dev = obj->dev;
- struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
- struct drm_local_map *map;
- int ret = 0;
-
- /* Set the object up for mmap'ing */
- list = &obj->map_list;
- list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
- if (!list->map)
- return -ENOMEM;
-
- map = list->map;
- map->type = _DRM_GEM;
- map->size = size;
- map->handle = obj;
-
- /* Get a DRM GEM mmap offset allocated... */
- list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- size / PAGE_SIZE, 0, 0);
-
- if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
- ret = -ENOSPC;
- goto out_free_list;
- }
-
- list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- size / PAGE_SIZE, 0);
- if (!list->file_offset_node) {
- ret = -ENOMEM;
- goto out_free_list;
- }
-
- list->hash.key = list->file_offset_node->start;
- ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
- if (ret) {
- DRM_ERROR("failed to add to map hash\n");
- goto out_free_mm;
- }
-
- return 0;
-
-out_free_mm:
- drm_mm_put_block(list->file_offset_node);
-out_free_list:
- kfree(list->map);
- list->map = NULL;
-
- return ret;
-}
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 8d225d7ff4e..046d5e660c0 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -17,7 +17,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
#include "omap_drv.h"
#include "omap_dmm_tiler.h"
@@ -58,26 +58,23 @@ struct omap_plane {
struct omap_drm_irq error_irq;
- /* set of bo's pending unpin until next post_apply() */
- DECLARE_KFIFO_PTR(unpin_fifo, struct drm_gem_object *);
+ /* for deferring bo unpin's until next post_apply(): */
+ struct drm_flip_work unpin_work;
// XXX maybe get rid of this and handle vblank in crtc too?
struct callback apply_done_cb;
};
-static void unpin(void *arg, struct drm_gem_object *bo)
+static void unpin_worker(struct drm_flip_work *work, void *val)
{
- struct drm_plane *plane = arg;
- struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_plane *omap_plane =
+ container_of(work, struct omap_plane, unpin_work);
+ struct drm_device *dev = omap_plane->base.dev;
- if (kfifo_put(&omap_plane->unpin_fifo,
- (const struct drm_gem_object **)&bo)) {
- /* also hold a ref so it isn't free'd while pinned */
- drm_gem_object_reference(bo);
- } else {
- dev_err(plane->dev->dev, "unpin fifo full!\n");
- omap_gem_put_paddr(bo);
- }
+ omap_framebuffer_unpin(val);
+ mutex_lock(&dev->mode_config.mutex);
+ drm_framebuffer_unreference(val);
+ mutex_unlock(&dev->mode_config.mutex);
}
/* update which fb (if any) is pinned for scanout */
@@ -87,23 +84,22 @@ static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
struct drm_framebuffer *pinned_fb = omap_plane->pinned_fb;
if (pinned_fb != fb) {
- int ret;
+ int ret = 0;
DBG("%p -> %p", pinned_fb, fb);
- if (fb)
+ if (fb) {
drm_framebuffer_reference(fb);
-
- ret = omap_framebuffer_replace(pinned_fb, fb, plane, unpin);
+ ret = omap_framebuffer_pin(fb);
+ }
if (pinned_fb)
- drm_framebuffer_unreference(pinned_fb);
+ drm_flip_work_queue(&omap_plane->unpin_work, pinned_fb);
if (ret) {
dev_err(plane->dev->dev, "could not swap %p -> %p\n",
omap_plane->pinned_fb, fb);
- if (fb)
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_unreference(fb);
omap_plane->pinned_fb = NULL;
return ret;
}
@@ -170,17 +166,14 @@ static void omap_plane_post_apply(struct omap_drm_apply *apply)
struct omap_plane *omap_plane =
container_of(apply, struct omap_plane, apply);
struct drm_plane *plane = &omap_plane->base;
+ struct omap_drm_private *priv = plane->dev->dev_private;
struct omap_overlay_info *info = &omap_plane->info;
- struct drm_gem_object *bo = NULL;
struct callback cb;
cb = omap_plane->apply_done_cb;
omap_plane->apply_done_cb.fxn = NULL;
- while (kfifo_get(&omap_plane->unpin_fifo, &bo)) {
- omap_gem_put_paddr(bo);
- drm_gem_object_unreference_unlocked(bo);
- }
+ drm_flip_work_commit(&omap_plane->unpin_work, priv->wq);
if (cb.fxn)
cb.fxn(cb.arg);
@@ -277,8 +270,7 @@ static void omap_plane_destroy(struct drm_plane *plane)
omap_plane_disable(plane);
drm_plane_cleanup(plane);
- WARN_ON(!kfifo_is_empty(&omap_plane->unpin_fifo));
- kfifo_free(&omap_plane->unpin_fifo);
+ drm_flip_work_cleanup(&omap_plane->unpin_work);
kfree(omap_plane);
}
@@ -399,7 +391,8 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
if (!omap_plane)
goto fail;
- ret = kfifo_alloc(&omap_plane->unpin_fifo, 16, GFP_KERNEL);
+ ret = drm_flip_work_init(&omap_plane->unpin_work, 16,
+ "unpin", unpin_worker);
if (ret) {
dev_err(dev->dev, "could not allocate unpin FIFO\n");
goto fail;
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 93c2f2cceb5..eb89653a7a1 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->command_ring, &cmd, interruptible);
}
@@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
uint32_t type, bool interruptible)
{
struct qxl_command cmd;
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
cmd.type = type;
- cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset);
+ cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset);
return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible);
}
@@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev)
struct qxl_release *release;
uint64_t id, next_id;
int i = 0;
- int ret;
union qxl_release_info *info;
while (qxl_ring_pop(qdev->release_ring, &id)) {
@@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev)
if (release == NULL)
break;
- ret = qxl_release_reserve(qdev, release, false);
- if (ret) {
- qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id);
- DRM_ERROR("failed to reserve release %lld\n", id);
- }
-
info = qxl_release_map(qdev, release);
next_id = info->next;
qxl_release_unmap(qdev, release, info);
- qxl_release_unreserve(qdev, release);
QXL_INFO(qdev, "popped %lld, next %lld\n", id,
next_id);
@@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev)
return i;
}
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo)
{
struct qxl_bo *bo;
int ret;
ret = qxl_bo_create(qdev, size, false /* not kernel - device */,
- QXL_GEM_DOMAIN_VRAM, NULL, &bo);
+ false, QXL_GEM_DOMAIN_VRAM, NULL, &bo);
if (ret) {
DRM_ERROR("failed to allocate VRAM BO\n");
return ret;
}
- ret = qxl_bo_reserve(bo, false);
- if (unlikely(ret != 0))
+ ret = qxl_release_list_add(release, bo);
+ if (ret)
goto out_unref;
*_bo = bo;
return 0;
out_unref:
qxl_bo_unref(&bo);
- return 0;
+ return ret;
}
static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
@@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
if (ret)
return ret;
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ return ret;
+
cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_SURFACE_CMD_CREATE;
cmd->u.surface_create.format = surf->surf.format;
@@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev,
surf->surf_create = release;
- /* no need to add a release to the fence for this bo,
+ /* no need to add a release to the fence for this surface bo,
since it is only released when we ask to destroy the surface
and it would never signal otherwise */
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
-
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
surf->hw_surf_alloc = true;
spin_lock(&qdev->surf_id_idr_lock);
@@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev,
cmd->surface_id = id;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
-
qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false);
- qxl_release_unreserve(qdev, release);
-
+ qxl_release_fence_buffer_objects(release);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index f76f5dd7bfc..835caba026d 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc)
kfree(qxl_crtc);
}
-static void
+static int
qxl_hide_cursor(struct qxl_device *qdev)
{
struct qxl_release *release;
@@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev)
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_HIDE;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+ return 0;
}
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
@@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
int size = 64*64*4;
int ret = 0;
- if (!handle) {
- qxl_hide_cursor(qdev);
- return 0;
- }
+ if (!handle)
+ return qxl_hide_cursor(qdev);
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
@@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
goto out_unref;
ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL);
+ qxl_bo_unreserve(user_bo);
if (ret)
- goto out_unreserve;
+ goto out_unref;
ret = qxl_bo_kmap(user_bo, &user_ptr);
if (ret)
@@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
&release, NULL);
if (ret)
goto out_kunmap;
- ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size,
- &cursor_bo);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size,
+ &cursor_bo);
if (ret)
goto out_free_release;
- ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+
+ ret = qxl_release_reserve_list(release, false);
if (ret)
goto out_free_bo;
+ ret = qxl_bo_kmap(cursor_bo, (void **)&cursor);
+ if (ret)
+ goto out_backoff;
+
cursor->header.unique = 0;
cursor->header.type = SPICE_CURSOR_TYPE_ALPHA;
cursor->header.width = 64;
@@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
qxl_bo_kunmap(cursor_bo);
- /* finish with the userspace bo */
qxl_bo_kunmap(user_bo);
- qxl_bo_unpin(user_bo);
- qxl_bo_unreserve(user_bo);
- drm_gem_object_unreference_unlocked(obj);
cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_SET;
@@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
cmd->u.set.position.y = qcrtc->cur_y;
cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
- qxl_release_add_res(qdev, release, cursor_bo);
cmd->u.set.visible = 1;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+ /* finish with the userspace bo */
+ ret = qxl_bo_reserve(user_bo, false);
+ if (!ret) {
+ qxl_bo_unpin(user_bo);
+ qxl_bo_unreserve(user_bo);
+ }
+ drm_gem_object_unreference_unlocked(obj);
- qxl_bo_unreserve(cursor_bo);
qxl_bo_unref(&cursor_bo);
return ret;
+
+out_backoff:
+ qxl_release_backoff_reserve_list(release);
out_free_bo:
qxl_bo_unref(&cursor_bo);
out_free_release:
- qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
out_kunmap:
qxl_bo_kunmap(user_bo);
out_unpin:
qxl_bo_unpin(user_bo);
-out_unreserve:
- qxl_bo_unreserve(user_bo);
out_unref:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD,
&release, NULL);
+ if (ret)
+ return ret;
+
+ ret = qxl_release_reserve_list(release, true);
+ if (ret) {
+ qxl_release_free(qdev, release);
+ return ret;
+ }
qcrtc->cur_x = x;
qcrtc->cur_y = y;
@@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
cmd->u.position.y = qcrtc->cur_y;
qxl_release_unmap(qdev, release, &cmd->release_info);
- qxl_fence_releaseable(qdev, release);
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 3c8c3dbf937..56e1d633875 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -23,25 +23,29 @@
#include "qxl_drv.h"
#include "qxl_object.h"
+static int alloc_clips(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned num_clips,
+ struct qxl_bo **clips_bo)
+{
+ int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips;
+
+ return qxl_alloc_bo_reserved(qdev, release, size, clips_bo);
+}
+
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
- struct qxl_bo **clips_bo,
- struct qxl_release *release)
+ struct qxl_bo *clips_bo)
{
struct qxl_clip_rects *dev_clips;
int ret;
- int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
- ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
- if (ret)
- return NULL;
- ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
+ ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips);
if (ret) {
- qxl_bo_unref(clips_bo);
return NULL;
}
dev_clips->num_rects = num_clips;
@@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
}
static int
+alloc_drawable(struct qxl_device *qdev, struct qxl_release **release)
+{
+ int ret;
+ ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable),
+ QXL_RELEASE_DRAWABLE, release,
+ NULL);
+ return ret;
+}
+
+static void
+free_drawable(struct qxl_device *qdev, struct qxl_release *release)
+{
+ qxl_release_free(qdev, release);
+}
+
+/* release needs to be reserved at this point */
+static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
- struct qxl_release **release)
+ struct qxl_release *release)
{
struct qxl_drawable *drawable;
- int i, ret;
+ int i;
- ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
- QXL_RELEASE_DRAWABLE, release,
- NULL);
- if (ret)
- return ret;
+ drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
+ if (!drawable)
+ return -ENOMEM;
- drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
@@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
- qxl_release_unmap(qdev, *release, &drawable->release_info);
+ qxl_release_unmap(qdev, release, &drawable->release_info);
return 0;
}
-static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
+static int alloc_palette_object(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_bo **palette_bo)
+{
+ return qxl_alloc_bo_reserved(qdev, release,
+ sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
+ palette_bo);
+}
+
+static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
+ struct qxl_release *release,
const struct qxl_fb_image *qxl_fb_image)
{
- struct qxl_device *qdev = qxl_fb_image->qdev;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
uint32_t visual = qxl_fb_image->visual;
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
@@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
static uint64_t unique; /* we make no attempt to actually set this
* correctly globaly, since that would require
* tracking all of our palettes. */
-
- ret = qxl_alloc_bo_reserved(qdev,
- sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
- palette_bo);
-
- ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
+ ret = qxl_bo_kmap(palette_bo, (void **)&pal);
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
}
pal->ents[0] = bgcolor;
pal->ents[1] = fgcolor;
- qxl_bo_kunmap(*palette_bo);
+ qxl_bo_kunmap(palette_bo);
return 0;
}
@@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
const char *src = fb_image->data;
int depth = fb_image->depth;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_image *image;
int ret;
-
+ struct qxl_drm_image *dimage;
+ struct qxl_bo *palette_bo = NULL;
if (stride == 0)
stride = depth * width / 8;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_drawable;
+
+ if (depth == 1) {
+ ret = alloc_palette_object(qdev, release, &palette_bo);
+ if (ret)
+ goto out_free_image;
+ }
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_palette;
+
rect.left = x;
rect.right = x + width;
rect.top = y;
rect.bottom = y + height;
- ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_palette;
+ }
- ret = qxl_image_create(qdev, release, &image_bo,
- (const uint8_t *)src, 0, 0,
- width, height, depth, stride);
+ ret = qxl_image_init(qdev, release, dimage,
+ (const uint8_t *)src, 0, 0,
+ width, height, depth, stride);
if (ret) {
- qxl_release_unreserve(qdev, release);
+ qxl_release_backoff_reserve_list(release);
qxl_release_free(qdev, release);
return;
}
if (depth == 1) {
- struct qxl_bo *palette_bo;
void *ptr;
- ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
- qxl_release_add_res(qdev, release, palette_bo);
+ ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image);
- ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
+ ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0);
image = ptr;
image->u.bitmap.palette =
qxl_bo_physical_address(qdev, palette_bo, 0);
- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
- qxl_bo_unreserve(palette_bo);
- qxl_bo_unref(&palette_bo);
+ qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr);
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
@@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap =
- qxl_bo_physical_address(qdev, image_bo, 0);
+ qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
-
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_palette:
+ if (palette_bo)
+ qxl_bo_unref(&palette_bo);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_drawable:
+ if (ret)
+ free_drawable(qdev, release);
}
/* push a draw command using the given clipping rectangles as
@@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
int depth = qxl_fb->base.bits_per_pixel;
uint8_t *surface_base;
struct qxl_release *release;
- struct qxl_bo *image_bo;
struct qxl_bo *clips_bo;
+ struct qxl_drm_image *dimage;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
left = clips->x1;
right = clips->x2;
top = clips->y1;
@@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
width = right - left;
height = bottom - top;
+
+ ret = alloc_clips(qdev, release, num_clips, &clips_bo);
+ if (ret)
+ goto out_free_drawable;
+
+ ret = qxl_image_alloc_objects(qdev, release,
+ &dimage,
+ height, stride);
+ if (ret)
+ goto out_free_clips;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_image;
+
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
+
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
- &release);
+ release);
if (ret)
- return;
+ goto out_release_backoff;
ret = qxl_bo_kmap(bo, (void **)&surface_base);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
- ret = qxl_image_create(qdev, release, &image_bo, surface_base,
- left, top, width, height, depth, stride);
+
+ ret = qxl_image_init(qdev, release, dimage, surface_base,
+ left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
if (ret)
- goto out_unref;
+ goto out_release_backoff;
+
+ rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo);
+ if (!rects)
+ goto out_release_backoff;
- rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
- if (!rects) {
- qxl_bo_unref(&image_bo);
- goto out_unref;
- }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
- qxl_release_add_res(qdev, release, clips_bo);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
@@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
- drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
+ drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_release_add_res(qdev, release, image_bo);
- qxl_bo_unreserve(image_bo);
- qxl_bo_unref(&image_bo);
+
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
@@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_kunmap(clips_bo);
- qxl_bo_unreserve(clips_bo);
- qxl_bo_unref(&clips_bo);
- qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
- return;
+ qxl_release_fence_buffer_objects(release);
+
+out_release_backoff:
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+out_free_image:
+ qxl_image_free_objects(qdev, dimage);
+out_free_clips:
+ qxl_bo_unref(&clips_bo);
+out_free_drawable:
+ /* only free drawable on error */
+ if (ret)
+ free_drawable(qdev, release);
-out_unref:
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
}
void qxl_draw_copyarea(struct qxl_device *qdev,
@@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
struct qxl_release *release;
int ret;
+ ret = alloc_drawable(qdev, &release);
+ if (ret)
+ return;
+
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
rect.left = dx;
rect.top = dy;
rect.right = dx + width;
rect.bottom = dy + height;
- ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
- if (ret)
- return;
+ ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy_bits.src_pos.x = sx;
drawable->u.copy_bits.src_pos.y = sy;
-
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
@@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
struct qxl_release *release;
int ret;
- ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
+ ret = alloc_drawable(qdev, &release);
if (ret)
return;
+ /* do a reservation run over all the objects we just allocated */
+ ret = qxl_release_reserve_list(release, true);
+ if (ret)
+ goto out_free_release;
+
+ ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release);
+ if (ret) {
+ qxl_release_backoff_reserve_list(release);
+ goto out_free_release;
+ }
+
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
drawable->u.fill.brush.u.color = color;
@@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
drawable->u.fill.mask.bitmap = 0;
qxl_release_unmap(qdev, release, &drawable->release_info);
- qxl_fence_releaseable(qdev, release);
+
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
- qxl_release_unreserve(qdev, release);
+ qxl_release_fence_buffer_objects(release);
+
+out_free_release:
+ if (ret)
+ free_drawable(qdev, release);
}
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index df0b577a660..514118ae72d 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -84,7 +84,6 @@ static const struct file_operations qxl_fops = {
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
- .fasync = drm_fasync,
.mmap = qxl_mmap,
};
@@ -221,7 +220,7 @@ static struct drm_driver qxl_driver = {
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
- .dumb_destroy = qxl_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index aacb791464a..f7c9adde46a 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -42,6 +42,9 @@
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
+/* just for ttm_validate_buffer */
+#include <ttm/ttm_execbuf_util.h>
+
#include <drm/qxl_drm.h>
#include "qxl_dev.h"
@@ -118,9 +121,9 @@ struct qxl_bo {
uint32_t surface_id;
struct qxl_fence fence; /* per bo fence - list of releases */
struct qxl_release *surf_create;
- atomic_t reserve_count;
};
#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base)
+#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo)
struct qxl_gem {
struct mutex mutex;
@@ -128,12 +131,7 @@ struct qxl_gem {
};
struct qxl_bo_list {
- struct list_head lhead;
- struct qxl_bo *bo;
-};
-
-struct qxl_reloc_list {
- struct list_head bos;
+ struct ttm_validate_buffer tv;
};
struct qxl_crtc {
@@ -195,10 +193,20 @@ enum {
struct qxl_release {
int id;
int type;
- int bo_count;
uint32_t release_offset;
uint32_t surface_release_id;
- struct qxl_bo *bos[QXL_MAX_RES];
+ struct ww_acquire_ctx ticket;
+ struct list_head bos;
+};
+
+struct qxl_drm_chunk {
+ struct list_head head;
+ struct qxl_bo *bo;
+};
+
+struct qxl_drm_image {
+ struct qxl_bo *bo;
+ struct list_head chunk_list;
};
struct qxl_fb_image {
@@ -314,12 +322,13 @@ struct qxl_device {
struct workqueue_struct *gc_queue;
struct work_struct gc_work;
+ struct work_struct fb_work;
};
/* forward declaration for QXL_INFO_IO */
void qxl_io_log(struct qxl_device *qdev, const char *fmt, ...);
-extern struct drm_ioctl_desc qxl_ioctls[];
+extern const struct drm_ioctl_desc qxl_ioctls[];
extern int qxl_max_ioctl;
int qxl_driver_load(struct drm_device *dev, unsigned long flags);
@@ -396,9 +405,6 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
bool discardable, bool kernel,
struct qxl_surface *surf,
struct drm_gem_object **obj);
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr);
-void qxl_gem_object_unpin(struct drm_gem_object *obj);
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
@@ -418,9 +424,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
int qxl_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
@@ -433,12 +436,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma);
/* qxl image */
-int qxl_image_create(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int x, int y, int width, int height,
- int depth, int stride);
+int qxl_image_init(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int x, int y, int width, int height,
+ int depth, int stride);
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image **image_ptr,
+ int height, int stride);
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage);
+
void qxl_update_screen(struct qxl_device *qxl);
/* qxl io operations (qxl_cmd.c) */
@@ -459,20 +469,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible
void qxl_io_flush_release(struct qxl_device *qdev);
void qxl_io_flush_surfaces(struct qxl_device *qdev);
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait);
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release);
union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
struct qxl_release *release);
void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info);
-/*
- * qxl_bo_add_resource.
- *
- */
-void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource);
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo);
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr);
+void qxl_release_backoff_reserve_list(struct qxl_release *release);
+void qxl_release_fence_buffer_objects(struct qxl_release *release);
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
@@ -481,15 +486,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
int type, struct qxl_release **release,
struct qxl_bo **rbo);
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release);
+
int
qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
int
qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release,
uint32_t type, bool interruptible);
-int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size,
+int qxl_alloc_bo_reserved(struct qxl_device *qdev,
+ struct qxl_release *release,
+ unsigned long size,
struct qxl_bo **_bo);
/* qxl drawing commands */
@@ -510,15 +516,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev,
u32 sx, u32 sy,
u32 dx, u32 dy);
-uint64_t
-qxl_release_alloc(struct qxl_device *qdev, int type,
- struct qxl_release **ret);
-
void qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release);
-void qxl_release_add_res(struct qxl_device *qdev,
- struct qxl_release *release,
- struct qxl_bo *bo);
+
/* used by qxl_debugfs_release */
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id);
@@ -561,7 +561,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein
int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf);
/* qxl_fence.c */
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id);
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id);
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence);
void qxl_fence_fini(struct qxl_fence *qfence);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 847c4ee798f..d34bb4130ff 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -68,13 +68,6 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int qxl_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 76f39d88d68..88722f23343 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -37,12 +37,29 @@
#define QXL_DIRTY_DELAY (HZ / 30)
+#define QXL_FB_OP_FILLRECT 1
+#define QXL_FB_OP_COPYAREA 2
+#define QXL_FB_OP_IMAGEBLIT 3
+
+struct qxl_fb_op {
+ struct list_head head;
+ int op_type;
+ union {
+ struct fb_fillrect fr;
+ struct fb_copyarea ca;
+ struct fb_image ib;
+ } op;
+ void *img_data;
+};
+
struct qxl_fbdev {
struct drm_fb_helper helper;
struct qxl_framebuffer qfb;
struct list_head fbdev_list;
struct qxl_device *qdev;
+ spinlock_t delayed_ops_lock;
+ struct list_head delayed_ops;
void *shadow;
int size;
@@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = {
.deferred_io = qxl_deferred_io,
};
-static void qxl_fb_fillrect(struct fb_info *info,
- const struct fb_fillrect *fb_rect)
+static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.fr = *fb_rect;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_FILLRECT;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev,
+ const struct fb_copyarea *fb_copy)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+
+ op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ca = *fb_copy;
+ op->img_data = NULL;
+ op->op_type = QXL_FB_OP_COPYAREA;
+
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev,
+ const struct fb_image *fb_image)
+{
+ struct qxl_fb_op *op;
+ unsigned long flags;
+ uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1);
+
+ op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!op)
+ return;
+
+ op->op.ib = *fb_image;
+ op->img_data = (void *)(op + 1);
+ op->op_type = QXL_FB_OP_IMAGEBLIT;
+
+ memcpy(op->img_data, fb_image->data, size);
+
+ op->op.ib.data = op->img_data;
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_add_tail(&op->head, &qfbdev->delayed_ops);
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+}
+
+static void qxl_fb_fillrect_internal(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
@@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info,
qxl_draw_fill_rec.rect = rect;
qxl_draw_fill_rec.color = color;
qxl_draw_fill_rec.rop = rop;
+
+ qxl_draw_fill(&qxl_draw_fill_rec);
+}
+
+static void qxl_fb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *fb_rect)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
if (!drm_can_sleep()) {
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_fillrect(qfbdev, fb_rect);
+ schedule_work(&qdev->fb_work);
return;
}
- qxl_draw_fill(&qxl_draw_fill_rec);
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_fillrect_internal(info, fb_rect);
}
-static void qxl_fb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region)
+static void qxl_fb_copyarea_internal(struct fb_info *info,
+ const struct fb_copyarea *region)
{
struct qxl_fbdev *qfbdev = info->par;
@@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info,
region->dx, region->dy);
}
+static void qxl_fb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *region)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_device *qdev = qfbdev->qdev;
+
+ if (!drm_can_sleep()) {
+ qxl_fb_delayed_copyarea(qfbdev, region);
+ schedule_work(&qdev->fb_work);
+ return;
+ }
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_copyarea_internal(info, region);
+}
+
static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image)
{
qxl_draw_opaque_fb(qxl_fb_image, 0);
}
+static void qxl_fb_imageblit_internal(struct fb_info *info,
+ const struct fb_image *image)
+{
+ struct qxl_fbdev *qfbdev = info->par;
+ struct qxl_fb_image qxl_fb_image;
+
+ /* ensure proper order rendering operations - TODO: must do this
+ * for everything. */
+ qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
+ qxl_fb_imageblit_safe(&qxl_fb_image);
+}
+
static void qxl_fb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct qxl_fbdev *qfbdev = info->par;
struct qxl_device *qdev = qfbdev->qdev;
- struct qxl_fb_image qxl_fb_image;
if (!drm_can_sleep()) {
- /* we cannot do any ttm_bo allocation since that will fail on
- * ioremap_wc..__get_vm_area_node, so queue the work item
- * instead This can happen from printk inside an interrupt
- * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */
- qxl_io_log(qdev,
- "%s: TODO use RCU, mysterious locks with spin_lock\n",
- __func__);
+ qxl_fb_delayed_imageblit(qfbdev, image);
+ schedule_work(&qdev->fb_work);
return;
}
+ /* make sure any previous work is done */
+ flush_work(&qdev->fb_work);
+ qxl_fb_imageblit_internal(info, image);
+}
- /* ensure proper order of rendering operations - TODO: must do this
- * for everything. */
- qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image);
- qxl_fb_imageblit_safe(&qxl_fb_image);
+static void qxl_fb_work(struct work_struct *work)
+{
+ struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work);
+ unsigned long flags;
+ struct qxl_fb_op *entry, *tmp;
+ struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev;
+
+ /* since the irq context just adds entries to the end of the
+ list dropping the lock should be fine, as entry isn't modified
+ in the operation code */
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) {
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
+ switch (entry->op_type) {
+ case QXL_FB_OP_FILLRECT:
+ qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr);
+ break;
+ case QXL_FB_OP_COPYAREA:
+ qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca);
+ break;
+ case QXL_FB_OP_IMAGEBLIT:
+ qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib);
+ break;
+ }
+ spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags);
+ list_del(&entry->head);
+ kfree(entry);
+ }
+ spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags);
}
int qxl_fb_init(struct qxl_device *qdev)
{
+ INIT_WORK(&qdev->fb_work, qxl_fb_work);
return 0;
}
@@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev)
qfbdev->qdev = qdev;
qdev->mode_info.qfbdev = qfbdev;
qfbdev->helper.funcs = &qxl_fb_helper_funcs;
-
+ spin_lock_init(&qfbdev->delayed_ops_lock);
+ INIT_LIST_HEAD(&qfbdev->delayed_ops);
ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper,
qxl_num_crtc /* num_crtc - QXL supports just 1 */,
QXLFB_CONN_LIMIT);
diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c
index 63c6715ad38..ae59e91cfb9 100644
--- a/drivers/gpu/drm/qxl/qxl_fence.c
+++ b/drivers/gpu/drm/qxl/qxl_fence.c
@@ -49,17 +49,11 @@
For some reason every so often qxl hw fails to release, things go wrong.
*/
-
-
-int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
+/* must be called with the fence lock held */
+void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id)
{
- struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
-
- spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
- spin_unlock(&bo->tbo.bdev->fence_lock);
- return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index a235693aabb..1648e4125af 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size,
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
- r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
+ r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
@@ -101,32 +101,6 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
return 0;
}
-int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
- uint64_t *gpu_addr)
-{
- struct qxl_bo *qobj = obj->driver_private;
- int r;
-
- r = qxl_bo_reserve(qobj, false);
- if (unlikely(r != 0))
- return r;
- r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
- qxl_bo_unreserve(qobj);
- return r;
-}
-
-void qxl_gem_object_unpin(struct drm_gem_object *obj)
-{
- struct qxl_bo *qobj = obj->driver_private;
- int r;
-
- r = qxl_bo_reserve(qobj, false);
- if (likely(r == 0)) {
- qxl_bo_unpin(qobj);
- qxl_bo_unreserve(qobj);
- }
-}
-
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
index cf856206996..7fbcc35e8ad 100644
--- a/drivers/gpu/drm/qxl/qxl_image.c
+++ b/drivers/gpu/drm/qxl/qxl_image.c
@@ -30,31 +30,100 @@
#include "qxl_object.h"
static int
-qxl_image_create_helper(struct qxl_device *qdev,
+qxl_allocate_chunk(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *image,
+ unsigned int chunk_size)
+{
+ struct qxl_drm_chunk *chunk;
+ int ret;
+
+ chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL);
+ if (!chunk)
+ return -ENOMEM;
+
+ ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo);
+ if (ret) {
+ kfree(chunk);
+ return ret;
+ }
+
+ list_add_tail(&chunk->head, &image->chunk_list);
+ return 0;
+}
+
+int
+qxl_image_alloc_objects(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
- const uint8_t *data,
- int width, int height,
- int depth, unsigned int hash,
- int stride)
+ struct qxl_drm_image **image_ptr,
+ int height, int stride)
+{
+ struct qxl_drm_image *image;
+ int ret;
+
+ image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL);
+ if (!image)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&image->chunk_list);
+
+ ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo);
+ if (ret) {
+ kfree(image);
+ return ret;
+ }
+
+ ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height);
+ if (ret) {
+ qxl_bo_unref(&image->bo);
+ kfree(image);
+ return ret;
+ }
+ *image_ptr = image;
+ return 0;
+}
+
+void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage)
{
+ struct qxl_drm_chunk *chunk, *tmp;
+
+ list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) {
+ qxl_bo_unref(&chunk->bo);
+ kfree(chunk);
+ }
+
+ qxl_bo_unref(&dimage->bo);
+ kfree(dimage);
+}
+
+static int
+qxl_image_init_helper(struct qxl_device *qdev,
+ struct qxl_release *release,
+ struct qxl_drm_image *dimage,
+ const uint8_t *data,
+ int width, int height,
+ int depth, unsigned int hash,
+ int stride)
+{
+ struct qxl_drm_chunk *drv_chunk;
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
- struct qxl_bo *chunk_bo;
- int ret;
+ struct qxl_bo *chunk_bo, *image_bo;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
+
+ drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head);
+
+ chunk_bo = drv_chunk->bo;
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
- &chunk_bo);
-
+
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
@@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev,
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
-
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
@@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev,
}
}
}
-
-
qxl_bo_kunmap(chunk_bo);
- /* Image */
- ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
-
- ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
+ image_bo = dimage->bo;
+ ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->descriptor.id = 0;
@@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev,
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
- qxl_release_add_res(qdev, release, chunk_bo);
- qxl_bo_unreserve(chunk_bo);
- qxl_bo_unref(&chunk_bo);
- qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
+ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
return 0;
}
-int qxl_image_create(struct qxl_device *qdev,
+int qxl_image_init(struct qxl_device *qdev,
struct qxl_release *release,
- struct qxl_bo **image_bo,
+ struct qxl_drm_image *dimage,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
- return qxl_image_create_helper(qdev, release, image_bo, data,
+ return qxl_image_init_helper(qdev, release, dimage, data,
width, height, depth, 0, stride);
}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 27f45e49250..7b95c75e962 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
&qxl_map->offset);
}
+struct qxl_reloc_info {
+ int type;
+ struct qxl_bo *dst_bo;
+ uint32_t dst_offset;
+ struct qxl_bo *src_bo;
+ int src_offset;
+};
+
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
-apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src, uint64_t src_off)
+apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
void *reloc_page;
-
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
- src, src_off);
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ info->src_bo,
+ info->src_offset);
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
static void
-apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
- struct qxl_bo *src)
+apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info)
{
uint32_t id = 0;
void *reloc_page;
- if (src && !src->is_primary)
- id = src->surface_id;
+ if (info->src_bo && !info->src_bo->is_primary)
+ id = info->src_bo->surface_id;
- reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
- *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
- qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
+ reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+ qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
}
/* return holding the reference to this object */
static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
- struct qxl_reloc_list *reloc_list)
+ struct qxl_release *release)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
- if (!gobj) {
- DRM_ERROR("bad bo handle %lld\n", handle);
+ if (!gobj)
return NULL;
- }
+
qobj = gem_to_qxl_bo(gobj);
- ret = qxl_bo_list_add(reloc_list, qobj);
+ ret = qxl_release_list_add(release, qobj);
if (ret)
return NULL;
@@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
-static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static int qxl_process_single_command(struct qxl_device *qdev,
+ struct drm_qxl_command *cmd,
+ struct drm_file *file_priv)
{
- struct qxl_device *qdev = dev->dev_private;
- struct drm_qxl_execbuffer *execbuffer = data;
- struct drm_qxl_command user_cmd;
- int cmd_num;
- struct qxl_bo *reloc_src_bo;
- struct qxl_bo *reloc_dst_bo;
- struct drm_qxl_reloc reloc;
+ struct qxl_reloc_info *reloc_info;
+ int release_type;
+ struct qxl_release *release;
+ struct qxl_bo *cmd_bo;
void *fb_cmd;
- int i, ret;
- struct qxl_reloc_list reloc_list;
+ int i, j, ret, num_relocs;
int unwritten;
- uint32_t reloc_dst_offset;
- INIT_LIST_HEAD(&reloc_list.bos);
- for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
- struct qxl_release *release;
- struct qxl_bo *cmd_bo;
- int release_type;
- struct drm_qxl_command *commands =
- (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+ switch (cmd->type) {
+ case QXL_CMD_DRAW:
+ release_type = QXL_RELEASE_DRAWABLE;
+ break;
+ case QXL_CMD_SURFACE:
+ case QXL_CMD_CURSOR:
+ default:
+ DRM_DEBUG("Only draw commands in execbuffers\n");
+ return -EINVAL;
+ break;
+ }
- if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
- sizeof(user_cmd)))
- return -EFAULT;
- switch (user_cmd.type) {
- case QXL_CMD_DRAW:
- release_type = QXL_RELEASE_DRAWABLE;
- break;
- case QXL_CMD_SURFACE:
- case QXL_CMD_CURSOR:
- default:
- DRM_DEBUG("Only draw commands in execbuffers\n");
- return -EINVAL;
- break;
- }
+ if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info))
+ return -EINVAL;
- if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
- return -EINVAL;
+ if (!access_ok(VERIFY_READ,
+ (void *)(unsigned long)cmd->command,
+ cmd->command_size))
+ return -EFAULT;
- if (!access_ok(VERIFY_READ,
- (void *)(unsigned long)user_cmd.command,
- user_cmd.command_size))
- return -EFAULT;
+ reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ if (!reloc_info)
+ return -ENOMEM;
- ret = qxl_alloc_release_reserved(qdev,
- sizeof(union qxl_release_info) +
- user_cmd.command_size,
- release_type,
- &release,
- &cmd_bo);
- if (ret)
- return ret;
+ ret = qxl_alloc_release_reserved(qdev,
+ sizeof(union qxl_release_info) +
+ cmd->command_size,
+ release_type,
+ &release,
+ &cmd_bo);
+ if (ret)
+ goto out_free_reloc;
- /* TODO copy slow path code from i915 */
- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
- unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
+ /* TODO copy slow path code from i915 */
+ fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
+ unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size);
- {
- struct qxl_drawable *draw = fb_cmd;
+ {
+ struct qxl_drawable *draw = fb_cmd;
+ draw->mm_time = qdev->rom->mm_clock;
+ }
- draw->mm_time = qdev->rom->mm_clock;
- }
- qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
- if (unwritten) {
- DRM_ERROR("got unwritten %d\n", unwritten);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
+ qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+ if (unwritten) {
+ DRM_ERROR("got unwritten %d\n", unwritten);
+ ret = -EFAULT;
+ goto out_free_release;
+ }
+
+ /* fill out reloc info structs */
+ num_relocs = 0;
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ struct drm_qxl_reloc reloc;
+
+ if (DRM_COPY_FROM_USER(&reloc,
+ &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i],
+ sizeof(reloc))) {
+ ret = -EFAULT;
+ goto out_free_bos;
}
- for (i = 0 ; i < user_cmd.relocs_num; ++i) {
- if (DRM_COPY_FROM_USER(&reloc,
- &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i],
- sizeof(reloc))) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EFAULT;
- }
+ /* add the bos to the list of bos to validate -
+ need to validate first then process relocs? */
+ if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) {
+ DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type);
- /* add the bos to the list of bos to validate -
- need to validate first then process relocs? */
- if (reloc.dst_handle) {
- reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
- reloc.dst_handle, &reloc_list);
- if (!reloc_dst_bo) {
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- reloc_dst_offset = 0;
- } else {
- reloc_dst_bo = cmd_bo;
- reloc_dst_offset = release->release_offset;
+ ret = -EINVAL;
+ goto out_free_bos;
+ }
+ reloc_info[i].type = reloc.reloc_type;
+
+ if (reloc.dst_handle) {
+ reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.dst_handle, release);
+ if (!reloc_info[i].dst_bo) {
+ ret = -EINVAL;
+ reloc_info[i].src_bo = NULL;
+ goto out_free_bos;
}
-
- /* reserve and validate the reloc dst bo */
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
- reloc_src_bo =
- qxlhw_handle_to_bo(qdev, file_priv,
- reloc.src_handle, &reloc_list);
- if (!reloc_src_bo) {
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- qxl_bo_list_unreserve(&reloc_list, true);
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- return -EINVAL;
- }
- } else
- reloc_src_bo = NULL;
- if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
- apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
- reloc_src_bo, reloc.src_offset);
- } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
- apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
- } else {
- DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
- return -EINVAL;
+ reloc_info[i].dst_offset = reloc.dst_offset;
+ } else {
+ reloc_info[i].dst_bo = cmd_bo;
+ reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset;
+ }
+ num_relocs++;
+
+ /* reserve and validate the reloc dst bo */
+ if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
+ reloc_info[i].src_bo =
+ qxlhw_handle_to_bo(qdev, file_priv,
+ reloc.src_handle, release);
+ if (!reloc_info[i].src_bo) {
+ if (reloc_info[i].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base);
+ ret = -EINVAL;
+ goto out_free_bos;
}
+ reloc_info[i].src_offset = reloc.src_offset;
+ } else {
+ reloc_info[i].src_bo = NULL;
+ reloc_info[i].src_offset = 0;
+ }
+ }
- if (reloc_src_bo && reloc_src_bo != cmd_bo) {
- qxl_release_add_res(qdev, release, reloc_src_bo);
- drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
- }
+ /* validate all buffers */
+ ret = qxl_release_reserve_list(release, false);
+ if (ret)
+ goto out_free_bos;
- if (reloc_dst_bo != cmd_bo)
- drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
- }
- qxl_fence_releaseable(qdev, release);
+ for (i = 0; i < cmd->relocs_num; ++i) {
+ if (reloc_info[i].type == QXL_RELOC_TYPE_BO)
+ apply_reloc(qdev, &reloc_info[i]);
+ else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF)
+ apply_surf_reloc(qdev, &reloc_info[i]);
+ }
- ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
- if (ret == -ERESTARTSYS) {
- qxl_release_unreserve(qdev, release);
- qxl_release_free(qdev, release);
- qxl_bo_list_unreserve(&reloc_list, true);
+ ret = qxl_push_command_ring_release(qdev, release, cmd->type, true);
+ if (ret)
+ qxl_release_backoff_reserve_list(release);
+ else
+ qxl_release_fence_buffer_objects(release);
+
+out_free_bos:
+ for (j = 0; j < num_relocs; j++) {
+ if (reloc_info[j].dst_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base);
+ if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo)
+ drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base);
+ }
+out_free_release:
+ if (ret)
+ qxl_release_free(qdev, release);
+out_free_reloc:
+ kfree(reloc_info);
+ return ret;
+}
+
+static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct qxl_device *qdev = dev->dev_private;
+ struct drm_qxl_execbuffer *execbuffer = data;
+ struct drm_qxl_command user_cmd;
+ int cmd_num;
+ int ret;
+
+ for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
+
+ struct drm_qxl_command *commands =
+ (struct drm_qxl_command *)(uintptr_t)execbuffer->commands;
+
+ if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
+ sizeof(user_cmd)))
+ return -EFAULT;
+
+ ret = qxl_process_single_command(qdev, &user_cmd, file_priv);
+ if (ret)
return ret;
- }
- qxl_release_unreserve(qdev, release);
}
- qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
@@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
goto out;
if (!qobj->pin_count) {
- qxl_ttm_placement_from_domain(qobj, qobj->type);
+ qxl_ttm_placement_from_domain(qobj, qobj->type, false);
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
@@ -402,7 +433,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
return ret;
}
-struct drm_ioctl_desc qxl_ioctls[] = {
+const struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 1191fe7788c..8691c76c5ef 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
return false;
}
-void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
+void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
{
u32 c = 0;
+ u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0;
qbo->placement.fpfn = 0;
qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain == QXL_GEM_DOMAIN_VRAM)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag;
if (domain == QXL_GEM_DOMAIN_SURFACE)
- qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
+ qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag;
if (domain == QXL_GEM_DOMAIN_CPU)
- qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+ qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag;
if (!c)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
@@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
int qxl_bo_create(struct qxl_device *qdev,
- unsigned long size, bool kernel, u32 domain,
+ unsigned long size, bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
@@ -97,17 +98,16 @@ int qxl_bo_create(struct qxl_device *qdev,
kfree(bo);
return r;
}
- bo->gem_base.driver_private = NULL;
bo->type = domain;
- bo->pin_count = 0;
+ bo->pin_count = pinned ? 1 : 0;
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
- atomic_set(&bo->reserve_count, 0);
+
if (surf)
bo->surf = *surf;
- qxl_ttm_placement_from_domain(bo, domain);
+ qxl_ttm_placement_from_domain(bo, domain, pinned);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, NULL, size,
@@ -228,7 +228,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
- int r, i;
+ int r;
if (bo->pin_count) {
bo->pin_count++;
@@ -236,9 +236,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
- qxl_ttm_placement_from_domain(bo, domain);
- for (i = 0; i < bo->placement.num_placement; i++)
- bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+ qxl_ttm_placement_from_domain(bo, domain, true);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
@@ -317,53 +315,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
return 0;
}
-void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
-{
- struct qxl_bo_list *entry, *sf;
-
- list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
- qxl_bo_unreserve(entry->bo);
- list_del(&entry->lhead);
- kfree(entry);
- }
-}
-
-int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
-{
- struct qxl_bo_list *entry;
- int ret;
-
- list_for_each_entry(entry, &reloc_list->bos, lhead) {
- if (entry->bo == bo)
- return 0;
- }
-
- entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
-
- entry->bo = bo;
- list_add(&entry->lhead, &reloc_list->bos);
-
- ret = qxl_bo_reserve(bo, false);
- if (ret)
- return ret;
-
- if (!bo->pin_count) {
- qxl_ttm_placement_from_domain(bo, bo->type);
- ret = ttm_bo_validate(&bo->tbo, &bo->placement,
- true, false);
- if (ret)
- return ret;
- }
-
- /* allocate a surface for reserved + validated buffers */
- ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
- if (ret)
- return ret;
- return 0;
-}
-
int qxl_surf_evict(struct qxl_device *qdev)
{
return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0);
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee7ad79ce78..d458a140c02 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -59,7 +59,7 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
@@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type,
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
- bool kernel, u32 domain,
+ bool kernel, bool pinned, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr);
extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
@@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
extern void qxl_bo_unref(struct qxl_bo **bo);
extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr);
extern int qxl_bo_unpin(struct qxl_bo *bo);
-extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain);
+extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned);
extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo);
-extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo);
-extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b443d6751d5..0109a9644cb 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -38,7 +38,8 @@
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
-uint64_t
+
+static uint64_t
qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
@@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
return 0;
}
release->type = type;
- release->bo_count = 0;
release->release_offset = 0;
release->surface_release_id = 0;
+ INIT_LIST_HEAD(&release->bos);
idr_preload(GFP_KERNEL);
spin_lock(&qdev->release_idr_lock);
@@ -77,20 +78,20 @@ void
qxl_release_free(struct qxl_device *qdev,
struct qxl_release *release)
{
- int i;
-
- QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id,
- release->type, release->bo_count);
+ struct qxl_bo_list *entry, *tmp;
+ QXL_INFO(qdev, "release %d, type %d\n", release->id,
+ release->type);
if (release->surface_release_id)
qxl_surface_id_dealloc(qdev, release->surface_release_id);
- for (i = 0 ; i < release->bo_count; ++i) {
+ list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
QXL_INFO(qdev, "release %llx\n",
- release->bos[i]->tbo.addr_space_offset
+ drm_vma_node_offset_addr(&entry->tv.bo->vma_node)
- DRM_FILE_OFFSET);
- qxl_fence_remove_release(&release->bos[i]->fence, release->id);
- qxl_bo_unref(&release->bos[i]);
+ qxl_fence_remove_release(&bo->fence, release->id);
+ qxl_bo_unref(&bo);
}
spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
@@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev,
kfree(release);
}
-void
-qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release,
- struct qxl_bo *bo)
-{
- int i;
- for (i = 0; i < release->bo_count; i++)
- if (release->bos[i] == bo)
- return;
-
- if (release->bo_count >= QXL_MAX_RES) {
- DRM_ERROR("exceeded max resource on a qxl_release item\n");
- return;
- }
- release->bos[release->bo_count++] = qxl_bo_ref(bo);
-}
-
static int qxl_release_bo_alloc(struct qxl_device *qdev,
struct qxl_bo **bo)
{
int ret;
- ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL,
+ /* pin releases bo's they are too messy to evict */
+ ret = qxl_bo_create(qdev, PAGE_SIZE, false, true,
+ QXL_GEM_DOMAIN_VRAM, NULL,
bo);
return ret;
}
-int qxl_release_reserve(struct qxl_device *qdev,
- struct qxl_release *release, bool no_wait)
+int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
+{
+ struct qxl_bo_list *entry;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ if (entry->tv.bo == &bo->tbo)
+ return 0;
+ }
+
+ entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ qxl_bo_ref(bo);
+ entry->tv.bo = &bo->tbo;
+ list_add_tail(&entry->tv.head, &release->bos);
+ return 0;
+}
+
+static int qxl_release_validate_bo(struct qxl_bo *bo)
{
int ret;
- if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) {
- ret = qxl_bo_reserve(release->bos[0], no_wait);
+
+ if (!bo->pin_count) {
+ qxl_ttm_placement_from_domain(bo, bo->type, false);
+ ret = ttm_bo_validate(&bo->tbo, &bo->placement,
+ true, false);
if (ret)
return ret;
}
+
+ /* allocate a surface for reserved + validated buffers */
+ ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
+{
+ int ret;
+ struct qxl_bo_list *entry;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return 0;
+
+ ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(entry, &release->bos, tv.head) {
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
+
+ ret = qxl_release_validate_bo(bo);
+ if (ret) {
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
+ return ret;
+ }
+ }
return 0;
}
-void qxl_release_unreserve(struct qxl_device *qdev,
- struct qxl_release *release)
+void qxl_release_backoff_reserve_list(struct qxl_release *release)
{
- if (atomic_dec_and_test(&release->bos[0]->reserve_count))
- qxl_bo_unreserve(release->bos[0]);
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ ttm_eu_backoff_reservation(&release->ticket, &release->bos);
}
+
int qxl_alloc_surface_release_reserved(struct qxl_device *qdev,
enum qxl_surface_cmd_type surface_cmd_type,
struct qxl_release *create_rel,
struct qxl_release **release)
{
- int ret;
-
if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) {
int idr_ret;
+ struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head);
struct qxl_bo *bo;
union qxl_release_info *info;
/* stash the release after the create command */
idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release);
- bo = qxl_bo_ref(create_rel->bos[0]);
+ bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo));
(*release)->release_offset = create_rel->release_offset + 64;
- qxl_release_add_res(qdev, *release, bo);
+ qxl_release_list_add(*release, bo);
- ret = qxl_release_reserve(qdev, *release, false);
- if (ret) {
- DRM_ERROR("release reserve failed\n");
- goto out_unref;
- }
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-
-out_unref:
qxl_bo_unref(&bo);
- return ret;
+ return 0;
}
return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd),
@@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
{
struct qxl_bo *bo;
int idr_ret;
- int ret;
+ int ret = 0;
union qxl_release_info *info;
int cur_idx;
@@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
mutex_unlock(&qdev->release_mutex);
return ret;
}
-
- /* pin releases bo's they are too messy to evict */
- ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false);
- qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL);
- qxl_bo_unreserve(qdev->current_release_bo[cur_idx]);
}
bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]);
@@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size,
if (rbo)
*rbo = bo;
- qxl_release_add_res(qdev, *release, bo);
-
- ret = qxl_release_reserve(qdev, *release, false);
mutex_unlock(&qdev->release_mutex);
- if (ret)
- goto out_unref;
+
+ qxl_release_list_add(*release, bo);
info = qxl_release_map(qdev, *release);
info->id = idr_ret;
qxl_release_unmap(qdev, *release, info);
-out_unref:
qxl_bo_unref(&bo);
return ret;
}
-int qxl_fence_releaseable(struct qxl_device *qdev,
- struct qxl_release *release)
-{
- int i, ret;
- for (i = 0; i < release->bo_count; i++) {
- if (!release->bos[i]->tbo.sync_obj)
- release->bos[i]->tbo.sync_obj = &release->bos[i]->fence;
- ret = qxl_fence_add_release(&release->bos[i]->fence, release->id);
- if (ret)
- return ret;
- }
- return 0;
-}
-
struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
uint64_t id)
{
@@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
}
- if (release->bo_count < 1) {
- DRM_ERROR("read a released resource with 0 bos\n");
- return NULL;
- }
+
return release;
}
@@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev,
{
void *ptr;
union qxl_release_info *info;
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE);
+ if (!ptr)
+ return NULL;
info = ptr + (release->release_offset & ~PAGE_SIZE);
return info;
}
@@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev,
struct qxl_release *release,
union qxl_release_info *info)
{
- struct qxl_bo *bo = release->bos[0];
+ struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head);
+ struct qxl_bo *bo = to_qxl_bo(entry->tv.bo);
void *ptr;
ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE);
qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
}
+
+void qxl_release_fence_buffer_objects(struct qxl_release *release)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
+ struct qxl_bo *qbo;
+
+ /* if only one object on the release its the release itself
+ since these objects are pinned no need to reserve */
+ if (list_is_singular(&release->bos))
+ return;
+
+ bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, &release->bos, head) {
+ bo = entry->bo;
+ qbo = to_qxl_bo(bo);
+
+ if (!entry->bo->sync_obj)
+ entry->bo->sync_obj = &qbo->fence;
+
+ qxl_fence_add_release_locked(&qbo->fence, release->id);
+
+ ttm_bo_add_to_lru(bo);
+ ww_mutex_unlock(&bo->resv->lock);
+ entry->reserved = false;
+ }
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ ww_acquire_fini(&release->ticket);
+}
+
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 489cb8cece4..037786d7c1d 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -206,13 +206,15 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
return;
}
qbo = container_of(bo, struct qxl_bo, tbo);
- qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU);
+ qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false);
*placement = qbo->placement;
}
static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct qxl_bo *qbo = to_qxl_bo(bo);
+
+ return drm_vma_node_verify_access(&qbo->gem_base.vma_node, filp);
}
static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index d4660cf942a..c451257f08f 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -540,7 +540,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
dev_priv->ring.end = ((u32 *) dev_priv->cce_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 472c38fe123..5bd307cd8da 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations r128_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = r128_compat_ioctl,
#endif
@@ -57,7 +56,7 @@ static const struct file_operations r128_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.load = r128_driver_load,
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 930c71b2fb5..56eb5e3f543 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -131,7 +131,7 @@ typedef struct drm_r128_buf_priv {
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
-extern struct drm_ioctl_desc r128_ioctls[];
+extern const struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 19bb7e6f3d9..01dd9aef9f0 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1643,7 +1643,7 @@ void r128_driver_lastclose(struct drm_device *dev)
r128_do_cleanup_cce(dev);
}
-struct drm_ioctl_desc r128_ioctls[] = {
+const struct drm_ioctl_desc r128_ioctls[] = {
DRM_IOCTL_DEF_DRV(R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index c3df52c1a60..306364a1ecd 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -72,14 +72,32 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
- evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
+ radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o evergreen_cs.o evergreen_blit_shaders.o \
evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \
atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \
si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \
r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
- trinity_smc.o ni_dpm.o si_smc.o si_dpm.o
+ trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
+ ci_dpm.o dce6_afmt.o
+
+# add async DMA block
+radeon-y += \
+ r600_dma.o \
+ rv770_dma.o \
+ evergreen_dma.o \
+ ni_dma.o \
+ si_dma.o \
+ cik_sdma.o \
+
+# add UVD block
+radeon-y += \
+ radeon_uvd.o \
+ uvd_v1_0.o \
+ uvd_v2_2.o \
+ uvd_v3_1.o \
+ uvd_v4_2.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index fb441a790f3..15da7ef344a 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
int r;
mutex_lock(&ctx->mutex);
+ /* reset data block */
+ ctx->data_block = 0;
/* reset reg block */
ctx->reg_block = 0;
/* reset fb window */
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
+ /* reset divmul */
+ ctx->divmul[0] = 0;
+ ctx->divmul[1] = 0;
r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 16b120c3f14..af10f8571d8 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -7661,618 +7661,6 @@ typedef struct _ATOM_POWERPLAY_INFO_V3
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
}ATOM_POWERPLAY_INFO_V3;
-/* New PPlib */
-/**************************************************************************/
-typedef struct _ATOM_PPLIB_THERMALCONTROLLER
-
-{
- UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
- UCHAR ucI2cLine; // as interpreted by DAL I2C
- UCHAR ucI2cAddress;
- UCHAR ucFanParameters; // Fan Control Parameters.
- UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
- UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
- UCHAR ucReserved; // ----
- UCHAR ucFlags; // to be defined
-} ATOM_PPLIB_THERMALCONTROLLER;
-
-#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
-#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
-
-#define ATOM_PP_THERMALCONTROLLER_NONE 0
-#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_LM64 5
-#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
-#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
-#define ATOM_PP_THERMALCONTROLLER_RV770 8
-#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
-#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
-#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
-#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
-#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
-#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
-#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
-#define ATOM_PP_THERMALCONTROLLER_LM96163 17
-#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
-
-// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
-// We probably should reserve the bit 0x80 for this use.
-// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
-// The driver can pick the correct internal controller based on the ASIC.
-
-#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
-#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
-
-typedef struct _ATOM_PPLIB_STATE
-{
- UCHAR ucNonClockStateIndex;
- UCHAR ucClockStateIndices[1]; // variable-sized
-} ATOM_PPLIB_STATE;
-
-
-typedef struct _ATOM_PPLIB_FANTABLE
-{
- UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
- UCHAR ucTHyst; // Temperature hysteresis. Integer.
- USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
- USHORT usTMed; // The middle temperature where we change slopes.
- USHORT usTHigh; // The high point above TMed for adjusting the second slope.
- USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
- USHORT usPWMMed; // The PWM value (in percent) at TMed.
- USHORT usPWMHigh; // The PWM value at THigh.
-} ATOM_PPLIB_FANTABLE;
-
-typedef struct _ATOM_PPLIB_FANTABLE2
-{
- ATOM_PPLIB_FANTABLE basicTable;
- USHORT usTMax; // The max temperature
-} ATOM_PPLIB_FANTABLE2;
-
-typedef struct _ATOM_PPLIB_EXTENDEDHEADER
-{
- USHORT usSize;
- ULONG ulMaxEngineClock; // For Overdrive.
- ULONG ulMaxMemoryClock; // For Overdrive.
- // Add extra system parameters here, always adjust size to include all fields.
- USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
- USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
- USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
- USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
-} ATOM_PPLIB_EXTENDEDHEADER;
-
-//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
-#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
-#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
-#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
-#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
-#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
-#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
-#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
-#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
-#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
-#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
-#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
-#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
-#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
-#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
-#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
-#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
-#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
-#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
-#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
-#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
-#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
-#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE
-{
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- UCHAR ucDataRevision;
-
- UCHAR ucNumStates;
- UCHAR ucStateEntrySize;
- UCHAR ucClockInfoSize;
- UCHAR ucNonClockSize;
-
- // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
- USHORT usStateArrayOffset;
-
- // offset from start of this table to array of ASIC-specific structures,
- // currently ATOM_PPLIB_CLOCK_INFO.
- USHORT usClockInfoArrayOffset;
-
- // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
- USHORT usNonClockInfoArrayOffset;
-
- USHORT usBackbiasTime; // in microseconds
- USHORT usVoltageTime; // in microseconds
- USHORT usTableSize; //the size of this structure, or the extended structure
-
- ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
-
- ATOM_PPLIB_THERMALCONTROLLER sThermalController;
-
- USHORT usBootClockInfoOffset;
- USHORT usBootNonClockInfoOffset;
-
-} ATOM_PPLIB_POWERPLAYTABLE;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
-{
- ATOM_PPLIB_POWERPLAYTABLE basicTable;
- UCHAR ucNumCustomThermalPolicy;
- USHORT usCustomThermalPolicyArrayOffset;
-}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
-{
- ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
- USHORT usFormatID; // To be used ONLY by PPGen.
- USHORT usFanTableOffset;
- USHORT usExtendendedHeaderOffset;
-} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
-{
- ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
- ULONG ulGoldenPPID; // PPGen use only
- ULONG ulGoldenRevision; // PPGen use only
- USHORT usVddcDependencyOnSCLKOffset;
- USHORT usVddciDependencyOnMCLKOffset;
- USHORT usVddcDependencyOnMCLKOffset;
- USHORT usMaxClockVoltageOnDCOffset;
- USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
- USHORT usMvddDependencyOnMCLKOffset;
-} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
-
-typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
-{
- ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
- ULONG ulTDPLimit;
- ULONG ulNearTDPLimit;
- ULONG ulSQRampingThreshold;
- USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
- ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
- USHORT usTDPODLimit;
- USHORT usLoadLineSlope; // in milliOhms * 100
-} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
-#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
-#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
-#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
-#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
-#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
-// 2, 4, 6, 7 are reserved
-
-#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
-#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
-#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
-#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
-#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
-#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
-#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
-#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
-#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
-#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
-#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
-#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
-#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
-
-//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
-#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
-#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
-#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
-
-//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
-#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
-#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
-
-// 0 is 2.5Gb/s, 1 is 5Gb/s
-#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
-#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
-
-// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
-#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
-
-// lookup into reduced refresh-rate table
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
-
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
-#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
-// 2-15 TBD as needed.
-
-#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
-#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
-
-#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
-
-#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
-
-//memory related flags
-#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
-
-//M3 Arb //2bits, current 3 sets of parameters in total
-#define ATOM_PPLIB_M3ARB_MASK 0x00060000
-#define ATOM_PPLIB_M3ARB_SHIFT 17
-
-#define ATOM_PPLIB_ENABLE_DRR 0x00080000
-
-// remaining 16 bits are reserved
-typedef struct _ATOM_PPLIB_THERMAL_STATE
-{
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucThermalAction;
-}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
-#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
-#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
-typedef struct _ATOM_PPLIB_NONCLOCK_INFO
-{
- USHORT usClassification;
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- ULONG ulCapsAndSettings;
- UCHAR ucRequiredPower;
- USHORT usClassification2;
- ULONG ulVCLK;
- ULONG ulDCLK;
- UCHAR ucUnused[5];
-} ATOM_PPLIB_NONCLOCK_INFO;
-
-// Contained in an array starting at the offset
-// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
-// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
-typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usUnused1;
- USHORT usUnused2;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_R600_CLOCK_INFO;
-
-// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
-#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
-#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
-#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
-#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
-#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
-
-typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- USHORT usUnused;
-
- ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
-
-} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- USHORT usVDDC;
- USHORT usVDDCI;
- UCHAR ucPCIEGen;
- UCHAR ucUnused1;
-
- ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
-
-} ATOM_PPLIB_SI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
-{
- USHORT usEngineClockLow;
- UCHAR ucEngineClockHigh;
-
- USHORT usMemoryClockLow;
- UCHAR ucMemoryClockHigh;
-
- UCHAR ucPCIEGen;
- USHORT usPCIELane;
-} ATOM_PPLIB_CI_CLOCK_INFO;
-
-typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
-
-{
- USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
- UCHAR ucLowEngineClockHigh;
- USHORT usHighEngineClockLow; // High Engine clock in MHz.
- UCHAR ucHighEngineClockHigh;
- USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
- UCHAR ucMemoryClockHigh; // Currentyl unused.
- UCHAR ucPadding; // For proper alignment and size.
- USHORT usVDDC; // For the 780, use: None, Low, High, Variable
- UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
- UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
- USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
-} ATOM_PPLIB_RS780_CLOCK_INFO;
-
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
-
-#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
-#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
-#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
-
-typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
- USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
- UCHAR ucEngineClockHigh; //clockfrequency >> 16.
- UCHAR vddcIndex; //2-bit vddc index;
- USHORT tdpLimit;
- //please initalize to 0
- USHORT rsv1;
- //please initialize to 0s
- ULONG rsv2[2];
-}ATOM_PPLIB_SUMO_CLOCK_INFO;
-
-
-
-typedef struct _ATOM_PPLIB_STATE_V2
-{
- //number of valid dpm levels in this state; Driver uses it to calculate the whole
- //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
- UCHAR ucNumDPMLevels;
-
- //a index to the array of nonClockInfos
- UCHAR nonClockInfoIndex;
- /**
- * Driver will read the first ucNumDPMLevels in this array
- */
- UCHAR clockInfoIndex[1];
-} ATOM_PPLIB_STATE_V2;
-
-typedef struct _StateArray{
- //how many states we have
- UCHAR ucNumEntries;
-
- ATOM_PPLIB_STATE_V2 states[1];
-}StateArray;
-
-
-typedef struct _ClockInfoArray{
- //how many clock levels we have
- UCHAR ucNumEntries;
-
- //sizeof(ATOM_PPLIB_CLOCK_INFO)
- UCHAR ucEntrySize;
-
- UCHAR clockInfo[1];
-}ClockInfoArray;
-
-typedef struct _NonClockInfoArray{
-
- //how many non-clock levels we have. normally should be same as number of states
- UCHAR ucNumEntries;
- //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
- UCHAR ucEntrySize;
-
- ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
-}NonClockInfoArray;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
-{
- USHORT usClockLow;
- UCHAR ucClockHigh;
- USHORT usVoltage;
-}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
-{
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
- USHORT usVddc;
- USHORT usVddci;
-}ATOM_PPLIB_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Record
-{
- USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value.
- ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value.
-}ATOM_PPLIB_CAC_Leakage_Record;
-
-typedef struct _ATOM_PPLIB_CAC_Leakage_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_CAC_Leakage_Table;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
-{
- USHORT usVoltage;
- USHORT usSclkLow;
- UCHAR ucSclkHigh;
- USHORT usMclkLow;
- UCHAR ucMclkHigh;
-}ATOM_PPLIB_PhaseSheddingLimits_Record;
-
-typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
-{
- UCHAR ucNumEntries; // Number of entries.
- ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
-}ATOM_PPLIB_PhaseSheddingLimits_Table;
-
-typedef struct _VCEClockInfo{
- USHORT usEVClkLow;
- UCHAR ucEVClkHigh;
- USHORT usECClkLow;
- UCHAR ucECClkHigh;
-}VCEClockInfo;
-
-typedef struct _VCEClockInfoArray{
- UCHAR ucNumEntries;
- VCEClockInfo entries[1];
-}VCEClockInfoArray;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucVCEClockInfoIndex;
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_VCE_State_Record
-{
- UCHAR ucVCEClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_VCE_State_Record;
-
-typedef struct _ATOM_PPLIB_VCE_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_VCE_State_Record entries[1];
-}ATOM_PPLIB_VCE_State_Table;
-
-
-typedef struct _ATOM_PPLIB_VCE_Table
-{
- UCHAR revid;
-// VCEClockInfoArray array;
-// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_VCE_State_Table states;
-}ATOM_PPLIB_VCE_Table;
-
-
-typedef struct _UVDClockInfo{
- USHORT usVClkLow;
- UCHAR ucVClkHigh;
- USHORT usDClkLow;
- UCHAR ucDClkHigh;
-}UVDClockInfo;
-
-typedef struct _UVDClockInfoArray{
- UCHAR ucNumEntries;
- UVDClockInfo entries[1];
-}UVDClockInfoArray;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
-{
- USHORT usVoltage;
- UCHAR ucUVDClockInfoIndex;
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_UVD_State_Record
-{
- UCHAR ucUVDClockInfoIndex;
- UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
-}ATOM_PPLIB_UVD_State_Record;
-
-typedef struct _ATOM_PPLIB_UVD_State_Table
-{
- UCHAR numEntries;
- ATOM_PPLIB_UVD_State_Record entries[1];
-}ATOM_PPLIB_UVD_State_Table;
-
-
-typedef struct _ATOM_PPLIB_UVD_Table
-{
- UCHAR revid;
-// UVDClockInfoArray array;
-// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
-// ATOM_PPLIB_UVD_State_Table states;
-}ATOM_PPLIB_UVD_Table;
-
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
-{
- USHORT usVoltage;
- USHORT usSAMClockLow;
- UCHAR ucSAMClockHigh;
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
-
-typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
- UCHAR numEntries;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
-}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
-
-typedef struct _ATOM_PPLIB_SAMU_Table
-{
- UCHAR revid;
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
-}ATOM_PPLIB_SAMU_Table;
-
-#define ATOM_PPM_A_A 1
-#define ATOM_PPM_A_I 2
-typedef struct _ATOM_PPLIB_PPM_Table
-{
- UCHAR ucRevId;
- UCHAR ucPpmDesign; //A+I or A+A
- USHORT usCpuCoreNumber;
- ULONG ulPlatformTDP;
- ULONG ulSmallACPlatformTDP;
- ULONG ulPlatformTDC;
- ULONG ulSmallACPlatformTDC;
- ULONG ulApuTDP;
- ULONG ulDGpuTDP;
- ULONG ulDGpuUlvPower;
- ULONG ulTjmax;
-} ATOM_PPLIB_PPM_Table;
-
-/**************************************************************************/
-
// Following definitions are for compatibility issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
@@ -8485,3 +7873,6 @@ typedef struct {
#endif /* _ATOMBIOS_H */
+
+#include "pptable.h"
+
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b9d3b43f19c..bf87f6d435f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1910,6 +1910,12 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
int i;
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ /* disable the GRPH */
+ if (ASIC_IS_DCE4(rdev))
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+ else if (ASIC_IS_AVIVO(rdev))
+ WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 0);
+
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_ENABLE);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 064023bed48..00885417fff 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -44,6 +44,41 @@ static char *pre_emph_names[] = {
};
/***** radeon AUX functions *****/
+
+/* Atom needs data in little endian format
+ * so swap as appropriate when copying data to
+ * or from atom. Note that atom operates on
+ * dw units.
+ */
+void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
+{
+#ifdef __BIG_ENDIAN
+ u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
+ u32 *dst32, *src32;
+ int i;
+
+ memcpy(src_tmp, src, num_bytes);
+ src32 = (u32 *)src_tmp;
+ dst32 = (u32 *)dst_tmp;
+ if (to_le) {
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = cpu_to_le32(src32[i]);
+ memcpy(dst, dst_tmp, num_bytes);
+ } else {
+ u8 dws = num_bytes & ~3;
+ for (i = 0; i < ((num_bytes + 3) / 4); i++)
+ dst32[i] = le32_to_cpu(src32[i]);
+ memcpy(dst, dst_tmp, dws);
+ if (num_bytes % 4) {
+ for (i = 0; i < (num_bytes % 4); i++)
+ dst[dws+i] = dst_tmp[dws+i];
+ }
+ }
+#else
+ memcpy(dst, src, num_bytes);
+#endif
+}
+
union aux_channel_transaction {
PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
@@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
- memcpy(base, send, send_bytes);
+ radeon_atom_copy_swap(base, send, send_bytes, true);
- args.v1.lpAuxRequest = 0 + 4;
- args.v1.lpDataOut = 16 + 4;
+ args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
+ args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
@@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
recv_bytes = recv_size;
if (recv && recv_size)
- memcpy(recv, base + 16, recv_bytes);
+ radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
return recv_bytes;
}
@@ -550,7 +585,7 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
+ DRM_DEBUG_KMS("link status %6ph\n", link_status);
return true;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 092275d53d4..dfac7965ea2 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -682,8 +682,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
@@ -710,8 +708,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
@@ -722,8 +719,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
@@ -737,8 +733,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
- radeon_audio &&
- !ASIC_IS_DCE6(rdev)) /* remove once we support DCE6 */
+ radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 082338df708..deaf98cdca3 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -27,10 +27,12 @@
#include "radeon.h"
#include "atom.h"
+extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
+
#define TARGET_HW_I2C_CLOCK 50
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
-#define ATOM_MAX_HW_I2C_WRITE 2
+#define ATOM_MAX_HW_I2C_WRITE 3
#define ATOM_MAX_HW_I2C_READ 255
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
@@ -50,20 +52,24 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
- DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
+ DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 3)\n", num);
return -EINVAL;
}
- memcpy(&out, buf, num);
+ args.ucRegIndex = buf[0];
+ if (num > 1)
+ memcpy(&out, &buf[1], num - 1);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
+ args.ucRegIndex = 0;
+ args.lpI2CDataOut = 0;
}
+ args.ucFlag = flags;
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
- args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
@@ -77,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
}
if (!(flags & HW_I2C_WRITE))
- memcpy(buf, base, num);
+ radeon_atom_copy_swap(buf, base, num, false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0bfd55e0882..084e69414fd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2712,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
else
rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index 19a0114d2e3..98d009e154b 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -317,58 +317,4 @@ const u32 cayman_default_state[] =
0x00000010, /* */
};
-const u32 cayman_vs[] =
-{
- 0x00000004,
- 0x80400400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15000688,
- 0x00000000,
- 0x88000000,
- 0x04000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x00020000,
-#else
- 0x00000000,
-#endif
- 0x00000000,
- 0x04000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 cayman_ps[] =
-{
- 0x00000004,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95000688,
- 0x00000000,
- 0x88000000,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
-const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
new file mode 100644
index 00000000000..3cce533397c
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -0,0 +1,5243 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "ci_dpm.h"
+#include "atom.h"
+#include <linux/seq_file.h>
+
+#define MC_CG_ARB_FREQ_F0 0x0a
+#define MC_CG_ARB_FREQ_F1 0x0b
+#define MC_CG_ARB_FREQ_F2 0x0c
+#define MC_CG_ARB_FREQ_F3 0x0d
+
+#define SMC_RAM_END 0x40000
+
+#define VOLTAGE_SCALE 4
+#define VOLTAGE_VID_OFFSET_SCALE1 625
+#define VOLTAGE_VID_OFFSET_SCALE2 100
+
+static const struct ci_pt_defaults defaults_bonaire_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
+ { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
+ { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
+};
+
+static const struct ci_pt_defaults defaults_bonaire_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
+ { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
+ { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
+};
+
+static const struct ci_pt_defaults defaults_saturn_xt =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
+ { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
+ { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
+};
+
+static const struct ci_pt_defaults defaults_saturn_pro =
+{
+ 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
+ { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
+ { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
+};
+
+static const struct ci_pt_config_reg didt_config_ci[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
+extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
+ u32 arb_freq_src, u32 arb_freq_dest);
+extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
+extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
+extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table);
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp);
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
+
+static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
+{
+ struct ci_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ switch (rdev->pdev->device) {
+ case 0x6650:
+ case 0x6658:
+ case 0x665C:
+ default:
+ pi->powertune_defaults = &defaults_bonaire_xt;
+ break;
+ case 0x6651:
+ case 0x665D:
+ pi->powertune_defaults = &defaults_bonaire_pro;
+ break;
+ case 0x6640:
+ pi->powertune_defaults = &defaults_saturn_xt;
+ break;
+ case 0x6641:
+ pi->powertune_defaults = &defaults_saturn_pro;
+ break;
+ }
+
+ pi->dte_tj_offset = 0;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = false;
+ pi->caps_sq_ramping = false;
+ pi->caps_db_ramping = false;
+ pi->caps_td_ramping = false;
+ pi->caps_tcp_ramping = false;
+
+ if (pi->caps_power_containment) {
+ pi->caps_cac = true;
+ pi->enable_bapm_feature = true;
+ pi->enable_tdc_limit_feature = true;
+ pi->enable_pkg_pwr_tracking_feature = true;
+ }
+}
+
+static u8 ci_convert_to_vid(u16 vddc)
+{
+ return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
+}
+
+static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
+ u32 i;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
+ return -EINVAL;
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
+ return -EINVAL;
+
+ for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
+ hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
+ hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
+ } else {
+ lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
+ hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
+ }
+ }
+ return 0;
+}
+
+static int ci_populate_vddc_vid(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *vid = pi->smc_powertune_table.VddCVid;
+ u32 i;
+
+ if (pi->vddc_voltage_table.count > 8)
+ return -EINVAL;
+
+ for (i = 0; i < pi->vddc_voltage_table.count; i++)
+ vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
+
+ return 0;
+}
+
+static int ci_populate_svi_load_line(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+
+ pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
+ pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
+ pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
+ pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
+
+ return 0;
+}
+
+static int ci_populate_tdc_limit(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ u16 tdc_limit;
+
+ tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
+ pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
+ pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
+ pt_defaults->tdc_vddc_throttle_release_limit_perc;
+ pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
+
+ return 0;
+}
+
+static int ci_populate_dw8(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable) +
+ offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
+ (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
+ pi->sram_end);
+ if (ret)
+ return -EINVAL;
+ else
+ pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
+
+ return 0;
+}
+
+static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
+ u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
+ int i, min, max;
+
+ min = max = hi_vid[0];
+ for (i = 0; i < 8; i++) {
+ if (0 != hi_vid[i]) {
+ if (min > hi_vid[i])
+ min = hi_vid[i];
+ if (max < hi_vid[i])
+ max = hi_vid[i];
+ }
+
+ if (0 != lo_vid[i]) {
+ if (min > lo_vid[i])
+ min = lo_vid[i];
+ if (max < lo_vid[i])
+ max = lo_vid[i];
+ }
+ }
+
+ if ((min == 0) || (max == 0))
+ return -EINVAL;
+ pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
+ pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
+
+ return 0;
+}
+
+static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
+ u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+
+ hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
+ lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
+
+ pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
+ pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
+
+ return 0;
+}
+
+static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
+ SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
+ int i, j, k;
+ const u16 *def1;
+ const u16 *def2;
+
+ dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
+ dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
+
+ dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
+ dpm_table->GpuTjMax =
+ (u8)(pi->thermal_temp_setting.temperature_high / 1000);
+ dpm_table->GpuTjHyst = 8;
+
+ dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
+
+ if (ppm) {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
+ } else {
+ dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
+ dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
+ }
+
+ dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
+ def1 = pt_defaults->bapmti_r;
+ def2 = pt_defaults->bapmti_rc;
+
+ for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
+ for (j = 0; j < SMU7_DTE_SOURCES; j++) {
+ for (k = 0; k < SMU7_DTE_SINKS; k++) {
+ dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
+ dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
+ def1++;
+ def2++;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int ci_populate_pm_base(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 pm_fuse_table_offset;
+ int ret;
+
+ if (pi->caps_power_containment) {
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, PmFuseTable),
+ &pm_fuse_table_offset, pi->sram_end);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_vid_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_vddc_vid(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_svi_load_line(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_tdc_limit(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_dw8(rdev);
+ if (ret)
+ return ret;
+ ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
+ if (ret)
+ return ret;
+ ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
+ if (ret)
+ return ret;
+ ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
+ (u8 *)&pi->smc_powertune_table,
+ sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int ci_program_pt_config_registers(struct radeon_device *rdev,
+ const struct ci_pt_config_reg *cac_config_regs)
+{
+ const struct ci_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+
+ switch (config_regs->type) {
+ case CISLANDS_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case CISLANDS_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ cache = 0;
+ }
+ config_regs++;
+ }
+ return 0;
+}
+
+static int ci_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping || pi->caps_db_ramping ||
+ pi->caps_td_ramping || pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = ci_program_pt_config_registers(rdev, didt_config_ci);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ ci_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (enable) {
+ pi->power_containment_features = 0;
+ if (pi->caps_power_containment) {
+ if (pi->enable_bapm_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
+ }
+
+ if (pi->enable_tdc_limit_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
+ if (smc_result != PPSMC_Result_OK)
+ ret = -EINVAL;
+ else
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
+ }
+
+ if (pi->enable_pkg_pwr_tracking_feature) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ } else {
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 default_pwr_limit =
+ (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+
+ pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
+
+ ci_set_power_limit(rdev, default_pwr_limit);
+ }
+ }
+ }
+ } else {
+ if (pi->caps_power_containment && pi->power_containment_features) {
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
+ pi->power_containment_features = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
+ if (smc_result != PPSMC_Result_OK) {
+ ret = -EINVAL;
+ pi->cac_enabled = false;
+ } else {
+ pi->cac_enabled = true;
+ }
+ } else if (pi->cac_enabled) {
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_power_control_set_level(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ s32 adjust_percent;
+ s32 target_tdp;
+ int ret = 0;
+ bool adjust_polarity = false; /* ??? */
+
+ if (pi->caps_power_containment &&
+ (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
+ adjust_percent = adjust_polarity ?
+ rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
+ target_tdp = ((100 + adjust_percent) *
+ (s32)cac_tdp_table->configurable_tdp) / 100;
+ target_tdp *= 256;
+
+ ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
+ }
+
+ return ret;
+}
+
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ ci_update_uvd_dpm(rdev, gate);
+}
+
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+
+ if (vblank_time < switch_limit)
+ return true;
+ else
+ return false;
+
+}
+
+static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_and_voltage_limits *max_limits;
+ bool disable_mclk_switching;
+ u32 sclk, mclk;
+ int i;
+
+ if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
+ ci_dpm_vblank_too_short(rdev))
+ disable_mclk_switching = true;
+ else
+ disable_mclk_switching = false;
+
+ if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (rdev->pm.dpm.ac_power == false) {
+ for (i = 0; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].mclk > max_limits->mclk)
+ ps->performance_levels[i].mclk = max_limits->mclk;
+ if (ps->performance_levels[i].sclk > max_limits->sclk)
+ ps->performance_levels[i].sclk = max_limits->sclk;
+ }
+ }
+
+ /* XXX validate the min clocks required for display */
+
+ if (disable_mclk_switching) {
+ mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ } else {
+ mclk = ps->performance_levels[0].mclk;
+ sclk = ps->performance_levels[0].sclk;
+ }
+
+ ps->performance_levels[0].sclk = sclk;
+ ps->performance_levels[0].mclk = mclk;
+
+ if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
+ ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
+
+ if (disable_mclk_switching) {
+ if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
+ ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
+ } else {
+ if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
+ ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
+ }
+}
+
+static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT);
+ tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
+ tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
+ CI_DIG_THERM_INTL(low_temp / 1000);
+ WREG32_SMC(CG_THERMAL_INT, tmp);
+
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DIG_THERM_DPM_MASK;
+ tmp |= DIG_THERM_DPM(high_temp / 1000);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ return 0;
+}
+
+#if 0
+static int ci_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_read_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static int ci_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ return ci_write_smc_sram_dword(rdev,
+ pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+
+static void ci_init_fps_limits(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ table->FpsHighT = cpu_to_be16(tmp);
+
+ tmp = 30;
+ table->FpsLowT = cpu_to_be16(tmp);
+ }
+}
+
+static int ci_update_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret = 0;
+ u32 low_sclk_interrupt_t = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+
+ }
+
+ return ret;
+}
+
+static void ci_get_leakage_voltages(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 leakage_id, virtual_voltage_id;
+ u16 vddc, vddci;
+ int i;
+
+ pi->vddc_leakage.count = 0;
+ pi->vddci_leakage.count = 0;
+
+ if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
+ for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
+ virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
+ if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
+ virtual_voltage_id,
+ leakage_id) == 0) {
+ if (vddc != 0 && vddc != virtual_voltage_id) {
+ pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
+ pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
+ pi->vddc_leakage.count++;
+ }
+ if (vddci != 0 && vddci != virtual_voltage_id) {
+ pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
+ pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
+ pi->vddci_leakage.count++;
+ }
+ }
+ }
+ }
+}
+
+static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool want_thermal_protection;
+ enum radeon_dpm_event_src dpm_event_src;
+ u32 tmp;
+
+ switch (sources) {
+ case 0:
+ default:
+ want_thermal_protection = false;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
+ break;
+ case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
+ break;
+ case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ want_thermal_protection = true;
+ dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ break;
+ }
+
+ if (want_thermal_protection) {
+#if 0
+ /* XXX: need to figure out how to handle this properly */
+ tmp = RREG32_SMC(CG_THERMAL_CTRL);
+ tmp &= DPM_EVENT_SRC_MASK;
+ tmp |= DPM_EVENT_SRC(dpm_event_src);
+ WREG32_SMC(CG_THERMAL_CTRL, tmp);
+#endif
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ if (pi->thermal_protection)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ } else {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
+ enum radeon_dpm_auto_throttle_src source,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (!(pi->active_auto_throttle_sources & (1 << source))) {
+ pi->active_auto_throttle_sources |= 1 << source;
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ } else {
+ if (pi->active_auto_throttle_sources & (1 << source)) {
+ pi->active_auto_throttle_sources &= ~(1 << source);
+ ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
+ }
+ }
+}
+
+static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
+{
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
+}
+
+static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ pi->need_update_smu7_dpm_table = 0;
+ return 0;
+}
+
+static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (enable) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x05);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x05);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
+
+ udelay(10);
+
+ WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
+ WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
+ WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
+ }
+ } else {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int ci_start_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
+
+ WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, true);
+ if (ret)
+ return ret;
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if ((!pi->sclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ if ((!pi->mclk_dpm_key_disabled) &&
+ (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_stop_dpm(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ int ret;
+ u32 tmp;
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ tmp = RREG32(SCLK_PWRMGT_CNTL);
+ tmp &= ~DYNAMIC_PM_EN;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ ret = ci_enable_sclk_mclk_dpm(rdev, false);
+ if (ret)
+ return ret;
+
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ if (enable)
+ tmp &= ~SCLK_PWRMGT_OFF;
+ else
+ tmp |= SCLK_PWRMGT_OFF;
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+}
+
+#if 0
+static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
+ bool ac_power)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_cac_tdp_table *cac_tdp_table =
+ rdev->pm.dpm.dyn_state.cac_tdp_table;
+ u32 power_limit;
+
+ if (ac_power)
+ power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
+ else
+ power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
+
+ ci_set_power_limit(rdev, power_limit);
+
+ if (pi->caps_automatic_dc_transition) {
+ if (ac_power)
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
+ else
+ ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
+ }
+
+ return 0;
+}
+#endif
+
+static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+ WREG32(SMC_MSG_ARG_0, parameter);
+ return ci_send_msg_to_smc(rdev, msg);
+}
+
+static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 *parameter)
+{
+ PPSMC_Result smc_result;
+
+ smc_result = ci_send_msg_to_smc(rdev, msg);
+
+ if ((smc_result == PPSMC_Result_OK) && parameter)
+ *parameter = RREG32(SMC_MSG_ARG_0);
+
+ return smc_result;
+}
+
+static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->sclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->mclk_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->pcie_dpm_key_disabled) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
+ u32 target_tdp)
+{
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ return 0;
+}
+
+static int ci_set_boot_state(struct radeon_device *rdev)
+{
+ return ci_enable_sclk_mclk_dpm(rdev, false);
+}
+
+static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
+{
+ u32 sclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetSclkFrequency,
+ &sclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ sclk_freq = 0;
+
+ return sclk_freq;
+}
+
+static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
+{
+ u32 mclk_freq;
+ PPSMC_Result smc_result =
+ ci_send_msg_to_smc_return_parameter(rdev,
+ PPSMC_MSG_API_GetMclkFrequency,
+ &mclk_freq);
+ if (smc_result != PPSMC_Result_OK)
+ mclk_freq = 0;
+
+ return mclk_freq;
+}
+
+static void ci_dpm_start_smc(struct radeon_device *rdev)
+{
+ int i;
+
+ ci_program_jump_on_start(rdev);
+ ci_start_smc_clock(rdev);
+ ci_start_smc(rdev);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
+ break;
+ }
+}
+
+static void ci_dpm_stop_smc(struct radeon_device *rdev)
+{
+ ci_reset_smc(rdev);
+ ci_stop_smc_clock(rdev);
+}
+
+static int ci_process_firmware_header(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->dpm_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->soft_regs_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcRegisterTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->mc_reg_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, FanTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->fan_table_start = tmp;
+
+ ret = ci_read_smc_sram_dword(rdev,
+ SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->arb_table_start = tmp;
+
+ return 0;
+}
+
+static void ci_read_clock_registers(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->clock_registers.cg_spll_func_cntl =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL);
+ pi->clock_registers.cg_spll_func_cntl_2 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
+ pi->clock_registers.cg_spll_func_cntl_3 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
+ pi->clock_registers.cg_spll_func_cntl_4 =
+ RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
+ pi->clock_registers.cg_spll_spread_spectrum =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ pi->clock_registers.cg_spll_spread_spectrum_2 =
+ RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
+ pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
+ pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
+ pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
+ pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
+ pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
+ pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
+ pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
+ pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
+}
+
+static void ci_init_sclk_t(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static void ci_enable_thermal_protection(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ if (enable)
+ tmp &= ~THERMAL_PROTECTION_DIS;
+ else
+ tmp |= THERMAL_PROTECTION_DIS;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static void ci_enable_acpi_power_management(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= STATIC_PM_EN;
+
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+#if 0
+static int ci_enter_ulp_state(struct radeon_device *rdev)
+{
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
+
+ udelay(25000);
+
+ return 0;
+}
+
+static int ci_exit_ulp_state(struct radeon_device *rdev)
+{
+ int i;
+
+ WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
+
+ udelay(7000);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(SMC_RESP_0) == 1)
+ break;
+ udelay(1000);
+ }
+
+ return 0;
+}
+#endif
+
+static int ci_notify_smc_display_change(struct radeon_device *rdev,
+ bool has_display)
+{
+ PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
+
+ return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
+}
+
+static int ci_enable_ds_master_switch(struct radeon_device *rdev,
+ bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (enable) {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
+ return -EINVAL;
+ } else {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ } else {
+ if (pi->caps_sclk_ds) {
+ if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void ci_program_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+ u32 pre_vbi_time_in_us;
+ u32 frame_time_in_us;
+ u32 ref_clock = rdev->clock.spll.reference_freq;
+ u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
+ u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+
+ tmp &= ~DISP_GAP_MASK;
+ if (rdev->pm.dpm.new_active_crtc_count > 0)
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
+ else
+ tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+
+ if (refresh_rate == 0)
+ refresh_rate = 60;
+ if (vblank_time == 0xffffffff)
+ vblank_time = 500;
+ frame_time_in_us = 1000000 / refresh_rate;
+ pre_vbi_time_in_us =
+ frame_time_in_us - 200 - vblank_time;
+ tmp = pre_vbi_time_in_us * (ref_clock / 100);
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
+ ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
+
+
+ ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
+
+}
+
+static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (enable) {
+ if (pi->caps_sclk_ss_support) {
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp |= DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+ } else {
+ tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
+ tmp &= ~SSEN;
+ WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
+
+ tmp = RREG32_SMC(GENERAL_PWRMGT);
+ tmp &= ~DYN_SPREAD_SPECTRUM_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+ }
+}
+
+static void ci_program_sstp(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
+}
+
+static void ci_enable_display_gap(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
+
+ tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
+ tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
+
+ WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
+}
+
+static void ci_program_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
+ WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
+ WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
+ WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
+ WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
+ WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
+ WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
+ WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
+}
+
+static void ci_clear_vc(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
+ tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+ WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
+
+ WREG32_SMC(CG_FTV_0, 0);
+ WREG32_SMC(CG_FTV_1, 0);
+ WREG32_SMC(CG_FTV_2, 0);
+ WREG32_SMC(CG_FTV_3, 0);
+ WREG32_SMC(CG_FTV_4, 0);
+ WREG32_SMC(CG_FTV_5, 0);
+ WREG32_SMC(CG_FTV_6, 0);
+ WREG32_SMC(CG_FTV_7, 0);
+}
+
+static int ci_upload_firmware(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int i, ret;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
+ break;
+ }
+ WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
+
+ ci_stop_smc_clock(rdev);
+ ci_reset_smc(rdev);
+
+ ret = ci_load_smc_ucode(rdev, pi->sram_end);
+
+ return ret;
+
+}
+
+static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
+ struct atom_voltage_table *voltage_table)
+{
+ u32 i;
+
+ if (voltage_dependency_table == NULL)
+ return -EINVAL;
+
+ voltage_table->mask_low = 0;
+ voltage_table->phase_delay = 0;
+
+ voltage_table->count = voltage_dependency_table->count;
+ for (i = 0; i < voltage_table->count; i++) {
+ voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
+ voltage_table->entries[i].smio_low = 0;
+ }
+
+ return 0;
+}
+
+static int ci_construct_voltage_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ &pi->vddc_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
+ &pi->vddc_voltage_table);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ &pi->vddci_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
+ &pi->vddci_voltage_table);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
+ ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
+ VOLTAGE_OBJ_GPIO_LUT,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ ret = ci_get_svi2_voltage_table(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ &pi->mvdd_voltage_table);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
+ si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
+ &pi->mvdd_voltage_table);
+
+ return 0;
+}
+
+static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ SMU7_Discrete_VoltageLevel *smc_voltage_table)
+{
+ int ret;
+
+ ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
+ &smc_voltage_table->StdVoltageHiSidd,
+ &smc_voltage_table->StdVoltageLoSidd);
+
+ if (ret) {
+ smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
+ smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
+ }
+
+ smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
+ smc_voltage_table->StdVoltageHiSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
+ smc_voltage_table->StdVoltageLoSidd =
+ cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
+}
+
+static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->VddcLevelCount = pi->vddc_voltage_table.count;
+ for (count = 0; count < table->VddcLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddc_voltage_table.entries[count],
+ &table->VddcLevel[count]);
+
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddcLevel[count].Smio |=
+ pi->vddc_voltage_table.entries[count].smio_low;
+ else
+ table->VddcLevel[count].Smio = 0;
+ }
+ table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ unsigned int count;
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ table->VddciLevelCount = pi->vddci_voltage_table.count;
+ for (count = 0; count < table->VddciLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->vddci_voltage_table.entries[count],
+ &table->VddciLevel[count]);
+
+ if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->VddciLevel[count].Smio |=
+ pi->vddci_voltage_table.entries[count].smio_low;
+ else
+ table->VddciLevel[count].Smio = 0;
+ }
+ table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ unsigned int count;
+
+ table->MvddLevelCount = pi->mvdd_voltage_table.count;
+ for (count = 0; count < table->MvddLevelCount; count++) {
+ ci_populate_smc_voltage_table(rdev,
+ &pi->mvdd_voltage_table.entries[count],
+ &table->MvddLevel[count]);
+
+ if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
+ table->MvddLevel[count].Smio |=
+ pi->mvdd_voltage_table.entries[count].smio_low;
+ else
+ table->MvddLevel[count].Smio = 0;
+ }
+ table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
+
+ return 0;
+}
+
+static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ int ret;
+
+ ret = ci_populate_smc_vddc_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vddci_table(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_mvdd_table(rdev, table);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
+ SMU7_Discrete_VoltageLevel *voltage)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
+ if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
+ voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
+ break;
+ }
+ }
+
+ if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
+ struct atom_voltage_table_entry *voltage_table,
+ u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
+{
+ u16 v_index, idx;
+ bool voltage_found = false;
+ *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
+ *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
+ return -EINVAL;
+
+ if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value ==
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+
+ if (!voltage_found) {
+ for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
+ if (voltage_table->value <=
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
+ voltage_found = true;
+ if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
+ idx = v_index;
+ else
+ idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
+ *std_voltage_lo_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
+ *std_voltage_hi_sidd =
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
+ break;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 sclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (sclk < limits->entries[i].sclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
+ const struct radeon_phase_shedding_limits_table *limits,
+ u32 mclk,
+ u32 *phase_shedding)
+{
+ unsigned int i;
+
+ *phase_shedding = 1;
+
+ for (i = 0; i < limits->count; i++) {
+ if (mclk < limits->entries[i].mclk) {
+ *phase_shedding = i;
+ break;
+ }
+ }
+}
+
+static int ci_init_arb_table_index(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
+ &tmp, pi->sram_end);
+ if (ret)
+ return ret;
+
+ tmp &= 0x00FFFFFF;
+ tmp |= MC_CG_ARB_FREQ_F1 << 24;
+
+ return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
+ tmp, pi->sram_end);
+}
+
+static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
+ u32 clock, u32 *voltage)
+{
+ u32 i = 0;
+
+ if (allowed_clock_voltage_table->count == 0)
+ return -EINVAL;
+
+ for (i = 0; i < allowed_clock_voltage_table->count; i++) {
+ if (allowed_clock_voltage_table->entries[i].clk >= clock) {
+ *voltage = allowed_clock_voltage_table->entries[i].v;
+ return 0;
+ }
+ }
+
+ *voltage = allowed_clock_voltage_table->entries[i-1].v;
+
+ return 0;
+}
+
+static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ u32 i;
+ u32 tmp;
+ u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
+ tmp = sclk / (1 << i);
+ if (tmp >= min || i == 0)
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
+{
+ return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
+}
+
+static int ci_reset_to_default(struct radeon_device *rdev)
+{
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
+
+ if (tmp == MC_CG_ARB_FREQ_F0)
+ return 0;
+
+ return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
+}
+
+static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
+ u32 sclk,
+ u32 mclk,
+ SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
+{
+ u32 dram_timing;
+ u32 dram_timing2;
+ u32 burst_time;
+
+ radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
+
+ dram_timing = RREG32(MC_ARB_DRAM_TIMING);
+ dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
+ burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
+
+ arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
+ arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
+ arb_regs->McArbBurstTime = (u8)burst_time;
+
+ return 0;
+}
+
+static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ SMU7_Discrete_MCArbDramTimingTable arb_regs;
+ u32 i, j;
+ int ret = 0;
+
+ memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
+
+ for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
+ for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
+ ret = ci_populate_memory_timing_parameters(rdev,
+ pi->dpm_table.sclk_table.dpm_levels[i].value,
+ pi->dpm_table.mclk_table.dpm_levels[j].value,
+ &arb_regs.entries[i][j]);
+ if (ret)
+ break;
+ }
+ }
+
+ if (ret == 0)
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->arb_table_start,
+ (u8 *)&arb_regs,
+ sizeof(SMU7_Discrete_MCArbDramTimingTable),
+ pi->sram_end);
+
+ return ret;
+}
+
+static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (pi->need_update_smu7_dpm_table == 0)
+ return 0;
+
+ return ci_do_program_memory_timing_parameters(rdev);
+}
+
+static void ci_populate_smc_initial_state(struct radeon_device *rdev,
+ struct radeon_ps *radeon_boot_state)
+{
+ struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 level = 0;
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
+ boot_state->performance_levels[0].sclk) {
+ pi->smc_state_table.GraphicsBootLevel = level;
+ break;
+ }
+ }
+
+ for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
+ boot_state->performance_levels[0].mclk) {
+ pi->smc_state_table.MemoryBootLevel = level;
+ break;
+ }
+ }
+}
+
+static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
+{
+ u32 i;
+ u32 mask_value = 0;
+
+ for (i = dpm_table->count; i > 0; i--) {
+ mask_value = mask_value << 1;
+ if (dpm_table->dpm_levels[i-1].enabled)
+ mask_value |= 0x1;
+ else
+ mask_value &= 0xFFFFFFFE;
+ }
+
+ return mask_value;
+}
+
+static void ci_populate_smc_link_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 i;
+
+ for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
+ table->LinkLevel[i].PcieGenSpeed =
+ (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
+ table->LinkLevel[i].PcieLaneCount =
+ r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
+ table->LinkLevel[i].EnabledForActivity = 1;
+ table->LinkLevel[i].DownT = cpu_to_be32(5);
+ table->LinkLevel[i].UpT = cpu_to_be32(30);
+ }
+
+ pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
+}
+
+static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->UvdLevelCount =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->UvdLevelCount; count++) {
+ table->UvdLevel[count].VclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
+ table->UvdLevel[count].DclkFrequency =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
+ table->UvdLevel[count].MinVddc =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->UvdLevel[count].MinVddcPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].VclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->UvdLevel[count].DclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
+
+ table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
+ table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
+ table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_vce_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->VceLevelCount =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->VceLevelCount; count++) {
+ table->VceLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
+ table->VceLevel[count].MinVoltage =
+ (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->VceLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->VceLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->VceLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
+ table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
+ }
+
+ return ret;
+
+}
+
+static int ci_populate_smc_acp_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->AcpLevelCount = (u8)
+ (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
+
+ for (count = 0; count < table->AcpLevelCount; count++) {
+ table->AcpLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
+ table->AcpLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
+ table->AcpLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->AcpLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->AcpLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
+ table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_populate_smc_samu_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ u32 count;
+ struct atom_clock_dividers dividers;
+ int ret = -EINVAL;
+
+ table->SamuLevelCount =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
+
+ for (count = 0; count < table->SamuLevelCount; count++) {
+ table->SamuLevel[count].Frequency =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
+ table->SamuLevel[count].MinVoltage =
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
+ table->SamuLevel[count].MinPhases = 1;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
+ table->SamuLevel[count].Frequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->SamuLevel[count].Divider = (u8)dividers.post_divider;
+
+ table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
+ table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
+ }
+
+ return ret;
+}
+
+static int ci_calculate_mclk_params(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *mclk,
+ bool strobe_mode,
+ bool dll_state_on)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
+ u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
+ u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
+ u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
+ u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
+ u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
+ struct atom_mpll_param mpll_param;
+ int ret;
+
+ ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
+ if (ret)
+ return ret;
+
+ mpll_func_cntl &= ~BWCTRL_MASK;
+ mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
+
+ mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
+ mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
+ CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
+
+ mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
+ mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
+
+ if (pi->mem_gddr5) {
+ mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
+ mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
+ YCLK_POST_DIV(mpll_param.post_div);
+ }
+
+ if (pi->caps_mclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 freq_nom;
+ u32 tmp;
+ u32 reference_clock = rdev->clock.mpll.reference_freq;
+
+ if (pi->mem_gddr5)
+ freq_nom = memory_clock * 4;
+ else
+ freq_nom = memory_clock * 2;
+
+ tmp = (freq_nom / reference_clock);
+ tmp = tmp * tmp;
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
+ u32 clks = reference_clock * 5 / ss.rate;
+ u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
+
+ mpll_ss1 &= ~CLKV_MASK;
+ mpll_ss1 |= CLKV(clkv);
+
+ mpll_ss2 &= ~CLKS_MASK;
+ mpll_ss2 |= CLKS(clks);
+ }
+ }
+
+ mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
+ mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
+
+ if (dll_state_on)
+ mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
+ else
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ mclk->MclkFrequency = memory_clock;
+ mclk->MpllFuncCntl = mpll_func_cntl;
+ mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
+ mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
+ mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
+ mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
+ mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
+ mclk->DllCntl = dll_cntl;
+ mclk->MpllSs1 = mpll_ss1;
+ mclk->MpllSs2 = mpll_ss2;
+
+ return 0;
+}
+
+static int ci_populate_single_memory_level(struct radeon_device *rdev,
+ u32 memory_clock,
+ SMU7_Discrete_MemoryLevel *memory_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+ bool dll_state_on;
+
+ if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddc);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ memory_clock, &memory_level->MinVddci);
+ if (ret)
+ return ret;
+ }
+
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ memory_clock, &memory_level->MinMvdd);
+ if (ret)
+ return ret;
+ }
+
+ memory_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_mclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ memory_clock,
+ &memory_level->MinVddcPhases);
+
+ memory_level->EnabledForThrottle = 1;
+ memory_level->EnabledForActivity = 1;
+ memory_level->UpH = 0;
+ memory_level->DownH = 100;
+ memory_level->VoltageDownH = 0;
+ memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
+
+ memory_level->StutterEnable = false;
+ memory_level->StrobeEnable = false;
+ memory_level->EdcReadEnable = false;
+ memory_level->EdcWriteEnable = false;
+ memory_level->RttEnable = false;
+
+ memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ if (pi->mclk_stutter_mode_threshold &&
+ (memory_clock <= pi->mclk_stutter_mode_threshold) &&
+ (pi->uvd_enabled == false) &&
+ (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
+ (rdev->pm.dpm.new_active_crtc_count <= 2))
+ memory_level->StutterEnable = true;
+
+ if (pi->mclk_strobe_mode_threshold &&
+ (memory_clock <= pi->mclk_strobe_mode_threshold))
+ memory_level->StrobeEnable = 1;
+
+ if (pi->mem_gddr5) {
+ memory_level->StrobeRatio =
+ si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
+ if (pi->mclk_edc_enable_threshold &&
+ (memory_clock > pi->mclk_edc_enable_threshold))
+ memory_level->EdcReadEnable = true;
+
+ if (pi->mclk_edc_wr_enable_threshold &&
+ (memory_clock > pi->mclk_edc_wr_enable_threshold))
+ memory_level->EdcWriteEnable = true;
+
+ if (memory_level->StrobeEnable) {
+ if (si_get_mclk_frequency_ratio(memory_clock, true) >=
+ ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ else
+ dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
+ } else {
+ dll_state_on = pi->dll_default_on;
+ }
+ } else {
+ memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
+ dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
+ }
+
+ ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
+ if (ret)
+ return ret;
+
+ memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
+ memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
+ memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
+ memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
+
+ memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
+ memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
+ memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
+ memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
+ memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
+ memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
+ memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
+ memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
+ memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
+ memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
+ memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
+
+ return 0;
+}
+
+static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
+ SMU7_Discrete_DpmTable *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ SMU7_Discrete_VoltageLevel voltage_level;
+ u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
+ u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
+ u32 dll_cntl = pi->clock_registers.dll_cntl;
+ u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
+ int ret;
+
+ table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
+
+ if (pi->acpi_vddc)
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
+ else
+ table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
+
+ table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
+
+ table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ table->ACPILevel.SclkFrequency, false, &dividers);
+ if (ret)
+ return ret;
+
+ table->ACPILevel.SclkDid = (u8)dividers.post_divider;
+ table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+ table->ACPILevel.DeepSleepDivId = 0;
+
+ spll_func_cntl &= ~SPLL_PWRON;
+ spll_func_cntl |= SPLL_RESET;
+
+ spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
+ spll_func_cntl_2 |= SCLK_MUX_SEL(4);
+
+ table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
+ table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
+ table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
+ table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
+ table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ table->ACPILevel.CcPwrDynRm = 0;
+ table->ACPILevel.CcPwrDynRm1 = 0;
+
+ table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
+ table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
+ table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
+ table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
+ table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
+ table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
+ table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
+ table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
+ table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
+ table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
+ table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
+
+ table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
+ table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
+
+ if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ if (pi->acpi_vddci)
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
+ else
+ table->MemoryACPILevel.MinVddci =
+ cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
+ }
+
+ if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
+ table->MemoryACPILevel.MinMvdd = 0;
+ else
+ table->MemoryACPILevel.MinMvdd =
+ cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
+
+ mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
+ mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
+
+ dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
+
+ table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
+ table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
+ table->MemoryACPILevel.MpllAdFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
+ table->MemoryACPILevel.MpllDqFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl);
+ table->MemoryACPILevel.MpllFuncCntl_1 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
+ table->MemoryACPILevel.MpllFuncCntl_2 =
+ cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
+ table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
+ table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
+
+ table->MemoryACPILevel.EnabledForThrottle = 0;
+ table->MemoryACPILevel.EnabledForActivity = 0;
+ table->MemoryACPILevel.UpH = 0;
+ table->MemoryACPILevel.DownH = 100;
+ table->MemoryACPILevel.VoltageDownH = 0;
+ table->MemoryACPILevel.ActivityLevel =
+ cpu_to_be16((u16)pi->mclk_activity_target);
+
+ table->MemoryACPILevel.StutterEnable = false;
+ table->MemoryACPILevel.StrobeEnable = false;
+ table->MemoryACPILevel.EdcReadEnable = false;
+ table->MemoryACPILevel.EdcWriteEnable = false;
+ table->MemoryACPILevel.RttEnable = false;
+
+ return 0;
+}
+
+
+static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+
+ if (ulv->supported) {
+ if (enable)
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ else
+ return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ci_populate_ulv_level(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
+
+ state->CcPwrDynRm = 0;
+ state->CcPwrDynRm1 = 0;
+
+ if (ulv_voltage == 0) {
+ pi->ulv.supported = false;
+ return 0;
+ }
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffset = 0;
+ else
+ state->VddcOffset =
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
+ } else {
+ if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
+ state->VddcOffsetVid = 0;
+ else
+ state->VddcOffsetVid = (u8)
+ ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
+ VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
+ }
+ state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
+
+ state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
+ state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
+ state->VddcOffset = cpu_to_be16(state->VddcOffset);
+
+ return 0;
+}
+
+static int ci_calculate_sclk_params(struct radeon_device *rdev,
+ u32 engine_clock,
+ SMU7_Discrete_GraphicsLevel *sclk)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
+ u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
+ u32 reference_clock = rdev->clock.spll.reference_freq;
+ u32 reference_divider;
+ u32 fbdiv;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev,
+ COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
+ engine_clock, false, &dividers);
+ if (ret)
+ return ret;
+
+ reference_divider = 1 + dividers.ref_div;
+ fbdiv = dividers.fb_div & 0x3FFFFFF;
+
+ spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
+ spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
+ spll_func_cntl_3 |= SPLL_DITHEN;
+
+ if (pi->caps_sclk_ss_support) {
+ struct radeon_atom_ss ss;
+ u32 vco_freq = engine_clock * dividers.post_div;
+
+ if (radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
+ u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
+ u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
+
+ cg_spll_spread_spectrum &= ~CLK_S_MASK;
+ cg_spll_spread_spectrum |= CLK_S(clk_s);
+ cg_spll_spread_spectrum |= SSEN;
+
+ cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
+ cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
+ }
+ }
+
+ sclk->SclkFrequency = engine_clock;
+ sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
+ sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
+ sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
+ sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
+ sclk->SclkDid = (u8)dividers.post_divider;
+
+ return 0;
+}
+
+static int ci_populate_single_graphic_level(struct radeon_device *rdev,
+ u32 engine_clock,
+ u16 sclk_activity_level_t,
+ SMU7_Discrete_GraphicsLevel *graphic_level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
+ if (ret)
+ return ret;
+
+ ret = ci_get_dependency_volt_by_clk(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ engine_clock, &graphic_level->MinVddc);
+ if (ret)
+ return ret;
+
+ graphic_level->SclkFrequency = engine_clock;
+
+ graphic_level->Flags = 0;
+ graphic_level->MinVddcPhases = 1;
+
+ if (pi->vddc_phase_shed_control)
+ ci_populate_phase_value_based_on_sclk(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
+ engine_clock,
+ &graphic_level->MinVddcPhases);
+
+ graphic_level->ActivityLevel = sclk_activity_level_t;
+
+ graphic_level->CcPwrDynRm = 0;
+ graphic_level->CcPwrDynRm1 = 0;
+ graphic_level->EnabledForActivity = 1;
+ graphic_level->EnabledForThrottle = 1;
+ graphic_level->UpH = 0;
+ graphic_level->DownH = 0;
+ graphic_level->VoltageDownH = 0;
+ graphic_level->PowerThrottle = 0;
+
+ if (pi->caps_sclk_ds)
+ graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
+ engine_clock,
+ CISLAND_MINIMUM_ENGINE_CLOCK);
+
+ graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
+
+ graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
+ graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
+ graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
+ graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
+ graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
+ graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
+ graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
+ graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
+ graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
+ graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
+ graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
+
+ return 0;
+}
+
+static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
+ SMU7_MAX_LEVELS_GRAPHICS;
+ SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->sclk_table.count; i++) {
+ ret = ci_populate_single_graphic_level(rdev,
+ dpm_table->sclk_table.dpm_levels[i].value,
+ (u16)pi->activity_target[i],
+ &pi->smc_state_table.GraphicsLevel[i]);
+ if (ret)
+ return ret;
+ if (i == (dpm_table->sclk_table.count - 1))
+ pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+ }
+
+ pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int ci_populate_ulv_state(struct radeon_device *rdev,
+ SMU7_Discrete_Ulv *ulv_level)
+{
+ return ci_populate_ulv_level(rdev, ulv_level);
+}
+
+static int ci_populate_all_memory_levels(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ u32 level_array_address = pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
+ u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
+ SMU7_MAX_LEVELS_MEMORY;
+ SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
+ u32 i, ret;
+
+ memset(levels, 0, level_array_size);
+
+ for (i = 0; i < dpm_table->mclk_table.count; i++) {
+ if (dpm_table->mclk_table.dpm_levels[i].value == 0)
+ return -EINVAL;
+ ret = ci_populate_single_memory_level(rdev,
+ dpm_table->mclk_table.dpm_levels[i].value,
+ &pi->smc_state_table.MemoryLevel[i]);
+ if (ret)
+ return ret;
+ }
+
+ pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
+
+ pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
+
+ pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
+ PPSMC_DISPLAY_WATERMARK_HIGH;
+
+ ret = ci_copy_bytes_to_smc(rdev, level_array_address,
+ (u8 *)levels, level_array_size,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_reset_single_dpm_table(struct radeon_device *rdev,
+ struct ci_single_dpm_table* dpm_table,
+ u32 count)
+{
+ u32 i;
+
+ dpm_table->count = count;
+ for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
+ dpm_table->dpm_levels[i].enabled = false;
+}
+
+static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
+ u32 index, u32 pcie_gen, u32 pcie_lanes)
+{
+ dpm_table->dpm_levels[index].value = pcie_gen;
+ dpm_table->dpm_levels[index].param1 = pcie_lanes;
+ dpm_table->dpm_levels[index].enabled = true;
+}
+
+static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
+ return -EINVAL;
+
+ if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_powersaving = pi->pcie_gen_performance;
+ pi->pcie_lane_powersaving = pi->pcie_lane_performance;
+ } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
+ pi->pcie_gen_performance = pi->pcie_gen_powersaving;
+ pi->pcie_lane_performance = pi->pcie_lane_powersaving;
+ }
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.pcie_speed_table,
+ SMU7_MAX_LEVELS_LINK);
+
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.min);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
+ pi->pcie_gen_powersaving.min,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
+ pi->pcie_gen_performance.min,
+ pi->pcie_lane_performance.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
+ pi->pcie_gen_powersaving.max,
+ pi->pcie_lane_powersaving.max);
+ ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
+ pi->pcie_gen_performance.max,
+ pi->pcie_lane_performance.max);
+
+ pi->dpm_table.pcie_speed_table.count = 6;
+
+ return 0;
+}
+
+static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_cac_leakage_table *std_voltage_table =
+ &rdev->pm.dpm.dyn_state.cac_leakage_table;
+ u32 i;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_table->count < 1)
+ return -EINVAL;
+
+ memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
+
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.sclk_table,
+ SMU7_MAX_LEVELS_GRAPHICS);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mclk_table,
+ SMU7_MAX_LEVELS_MEMORY);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddc_table,
+ SMU7_MAX_LEVELS_VDDC);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.vddci_table,
+ SMU7_MAX_LEVELS_VDDCI);
+ ci_reset_single_dpm_table(rdev,
+ &pi->dpm_table.mvdd_table,
+ SMU7_MAX_LEVELS_MVDD);
+
+ pi->dpm_table.sclk_table.count = 0;
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ if ((i == 0) ||
+ (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
+ allowed_sclk_vddc_table->entries[i].clk)) {
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
+ allowed_sclk_vddc_table->entries[i].clk;
+ pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
+ pi->dpm_table.sclk_table.count++;
+ }
+ }
+
+ pi->dpm_table.mclk_table.count = 0;
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ if ((i==0) ||
+ (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
+ allowed_mclk_table->entries[i].clk)) {
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
+ allowed_mclk_table->entries[i].clk;
+ pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
+ pi->dpm_table.mclk_table.count++;
+ }
+ }
+
+ for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
+ pi->dpm_table.vddc_table.dpm_levels[i].value =
+ allowed_sclk_vddc_table->entries[i].v;
+ pi->dpm_table.vddc_table.dpm_levels[i].param1 =
+ std_voltage_table->entries[i].leakage;
+ pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.vddci_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
+ }
+
+ allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
+ if (allowed_mclk_table) {
+ for (i = 0; i < allowed_mclk_table->count; i++) {
+ pi->dpm_table.mvdd_table.dpm_levels[i].value =
+ allowed_mclk_table->entries[i].v;
+ pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
+ }
+ pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
+ }
+
+ ci_setup_default_pcie_tables(rdev);
+
+ return 0;
+}
+
+static int ci_find_boot_level(struct ci_single_dpm_table *table,
+ u32 value, u32 *boot_level)
+{
+ u32 i;
+ int ret = -EINVAL;
+
+ for(i = 0; i < table->count; i++) {
+ if (value == table->dpm_levels[i].value) {
+ *boot_level = i;
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static int ci_init_smc_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ulv_parm *ulv = &pi->ulv;
+ struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
+ SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
+ int ret;
+
+ ret = ci_setup_default_dpm_tables(rdev);
+ if (ret)
+ return ret;
+
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
+ ci_populate_smc_voltage_tables(rdev, table);
+
+ ci_init_fps_limits(rdev);
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
+
+ if (pi->mem_gddr5)
+ table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
+
+ if (ulv->supported) {
+ ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
+ if (ret)
+ return ret;
+ WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
+ }
+
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+
+ ci_populate_smc_link_level(rdev, table);
+
+ ret = ci_populate_smc_acpi_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_vce_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_acp_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_samu_level(rdev, table);
+ if (ret)
+ return ret;
+
+ ret = ci_do_program_memory_timing_parameters(rdev);
+ if (ret)
+ return ret;
+
+ ret = ci_populate_smc_uvd_level(rdev, table);
+ if (ret)
+ return ret;
+
+ table->UvdBootLevel = 0;
+ table->VceBootLevel = 0;
+ table->AcpBootLevel = 0;
+ table->SamuBootLevel = 0;
+ table->GraphicsBootLevel = 0;
+ table->MemoryBootLevel = 0;
+
+ ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
+ pi->vbios_boot_state.sclk_bootup_value,
+ (u32 *)&pi->smc_state_table.GraphicsBootLevel);
+
+ ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
+ pi->vbios_boot_state.mclk_bootup_value,
+ (u32 *)&pi->smc_state_table.MemoryBootLevel);
+
+ table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
+ table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
+ table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
+
+ ci_populate_smc_initial_state(rdev, radeon_boot_state);
+
+ ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
+ if (ret)
+ return ret;
+
+ table->UVDInterval = 1;
+ table->VCEInterval = 1;
+ table->ACPInterval = 1;
+ table->SAMUInterval = 1;
+ table->GraphicsVoltageChangeEnable = 1;
+ table->GraphicsThermThrottleEnable = 1;
+ table->GraphicsInterval = 1;
+ table->VoltageInterval = 1;
+ table->ThermalInterval = 1;
+ table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
+ CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
+ table->MemoryVoltageChangeEnable = 1;
+ table->MemoryInterval = 1;
+ table->VoltageResponseTime = 0;
+ table->VddcVddciDelta = 4000;
+ table->PhaseResponseTime = 0;
+ table->MemoryThermThrottleEnable = 1;
+ table->PCIeBootLinkLevel = 0;
+ table->PCIeGenInterval = 1;
+ if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
+ table->SVI2Enable = 1;
+ else
+ table->SVI2Enable = 0;
+
+ table->ThermGpio = 17;
+ table->SclkStepSize = 0x4000;
+
+ table->SystemFlags = cpu_to_be32(table->SystemFlags);
+ table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
+ table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
+ table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
+ table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
+ table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
+ table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
+ table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
+ table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
+ table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
+ table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
+ table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
+ table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
+ table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
+
+ ret = ci_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Discrete_DpmTable, SystemFlags),
+ (u8 *)&table->SystemFlags,
+ sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void ci_trim_single_dpm_states(struct radeon_device *rdev,
+ struct ci_single_dpm_table *dpm_table,
+ u32 low_limit, u32 high_limit)
+{
+ u32 i;
+
+ for (i = 0; i < dpm_table->count; i++) {
+ if ((dpm_table->dpm_levels[i].value < low_limit) ||
+ (dpm_table->dpm_levels[i].value > high_limit))
+ dpm_table->dpm_levels[i].enabled = false;
+ else
+ dpm_table->dpm_levels[i].enabled = true;
+ }
+}
+
+static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
+ u32 speed_low, u32 lanes_low,
+ u32 speed_high, u32 lanes_high)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
+ u32 i, j;
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if ((pcie_table->dpm_levels[i].value < speed_low) ||
+ (pcie_table->dpm_levels[i].param1 < lanes_low) ||
+ (pcie_table->dpm_levels[i].value > speed_high) ||
+ (pcie_table->dpm_levels[i].param1 > lanes_high))
+ pcie_table->dpm_levels[i].enabled = false;
+ else
+ pcie_table->dpm_levels[i].enabled = true;
+ }
+
+ for (i = 0; i < pcie_table->count; i++) {
+ if (pcie_table->dpm_levels[i].enabled) {
+ for (j = i + 1; j < pcie_table->count; j++) {
+ if (pcie_table->dpm_levels[j].enabled) {
+ if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
+ (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
+ pcie_table->dpm_levels[j].enabled = false;
+ }
+ }
+ }
+ }
+}
+
+static int ci_trim_dpm_states(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 high_limit_count;
+
+ if (state->performance_level_count < 1)
+ return -EINVAL;
+
+ if (state->performance_level_count == 1)
+ high_limit_count = 0;
+ else
+ high_limit_count = 1;
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.sclk_table,
+ state->performance_levels[0].sclk,
+ state->performance_levels[high_limit_count].sclk);
+
+ ci_trim_single_dpm_states(rdev,
+ &pi->dpm_table.mclk_table,
+ state->performance_levels[0].mclk,
+ state->performance_levels[high_limit_count].mclk);
+
+ ci_trim_pcie_dpm_states(rdev,
+ state->performance_levels[0].pcie_gen,
+ state->performance_levels[0].pcie_lane,
+ state->performance_levels[high_limit_count].pcie_gen,
+ state->performance_levels[high_limit_count].pcie_lane);
+
+ return 0;
+}
+
+static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
+{
+ struct radeon_clock_voltage_dependency_table *disp_voltage_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
+ struct radeon_clock_voltage_dependency_table *vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 requested_voltage = 0;
+ u32 i;
+
+ if (disp_voltage_table == NULL)
+ return -EINVAL;
+ if (!disp_voltage_table->count)
+ return -EINVAL;
+
+ for (i = 0; i < disp_voltage_table->count; i++) {
+ if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
+ requested_voltage = disp_voltage_table->entries[i].v;
+ }
+
+ for (i = 0; i < vddc_table->count; i++) {
+ if (requested_voltage <= vddc_table->entries[i].v) {
+ requested_voltage = vddc_table->entries[i].v;
+ return (ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VddC_Request,
+ requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result result;
+
+ if (!pi->sclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->mclk_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ if (!pi->pcie_dpm_key_disabled) {
+ if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ result = ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_PCIeDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ if (result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ ci_apply_disp_minimum_voltage_request(rdev);
+
+ return 0;
+}
+
+static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ u32 i;
+
+ pi->need_update_smu7_dpm_table = 0;
+
+ for (i = 0; i < sclk_table->count; i++) {
+ if (sclk == sclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= sclk_table->count) {
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
+ } else {
+ /* XXX check display min clock requirements */
+ if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
+ }
+
+ for (i = 0; i < mclk_table->count; i++) {
+ if (mclk == mclk_table->dpm_levels[i].value)
+ break;
+ }
+
+ if (i >= mclk_table->count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
+
+ if (rdev->pm.dpm.current_active_crtc_count !=
+ rdev->pm.dpm.new_active_crtc_count)
+ pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
+}
+
+static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
+ u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
+ struct ci_dpm_table *dpm_table = &pi->dpm_table;
+ int ret;
+
+ if (!pi->need_update_smu7_dpm_table)
+ return 0;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
+ dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
+
+ if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
+ dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
+ ret = ci_populate_all_graphic_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
+ ret = ci_populate_all_memory_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
+
+ for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_uvd_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
+
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = true;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ } else {
+ if (pi->last_mclk_dpm_enable_mask & 0x1) {
+ pi->uvd_enabled = false;
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_MCLKDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ }
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+#if 0
+static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_vce_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.vce_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_samu_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.samu_dpm_enable_mask);
+ }
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+
+static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ const struct radeon_clock_and_voltage_limits *max_limits;
+ int i;
+
+ if (rdev->pm.dpm.ac_power)
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ else
+ max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
+
+ if (enable) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
+ for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
+
+ if (!pi->caps_acp_dpm)
+ break;
+ }
+ }
+
+ ci_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ pi->dpm_level_enable_mask.acp_dpm_enable_mask);
+ }
+
+ return (ci_send_msg_to_smc(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
+ 0 : -EINVAL;
+}
+#endif
+
+static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ if (pi->caps_uvd_dpm ||
+ (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
+ pi->smc_state_table.UvdBootLevel = 0;
+ else
+ pi->smc_state_table.UvdBootLevel =
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~UvdBootLevel_MASK;
+ tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ u32 min_evclk = 30000; /* ??? */
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= min_evclk)
+ return i;
+ }
+
+ return table->count - 1;
+}
+
+static int ci_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ bool new_vce_clock_non_zero = (radeon_new_state->evclk != 0);
+ bool old_vce_clock_non_zero = (radeon_current_state->evclk != 0);
+ int ret = 0;
+ u32 tmp;
+
+ if (new_vce_clock_non_zero != old_vce_clock_non_zero) {
+ if (new_vce_clock_non_zero) {
+ pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~VceBootLevel_MASK;
+ tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+
+ ret = ci_enable_vce_dpm(rdev, true);
+ } else {
+ ret = ci_enable_vce_dpm(rdev, false);
+ }
+ }
+ return ret;
+}
+
+static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ return ci_enable_samu_dpm(rdev, gate);
+}
+
+static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ if (!gate) {
+ pi->smc_state_table.AcpBootLevel = 0;
+
+ tmp = RREG32_SMC(DPM_TABLE_475);
+ tmp &= ~AcpBootLevel_MASK;
+ tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
+ WREG32_SMC(DPM_TABLE_475, tmp);
+ }
+
+ return ci_enable_acp_dpm(rdev, !gate);
+}
+#endif
+
+static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ ret = ci_trim_dpm_states(rdev, radeon_state);
+ if (ret)
+ return ret;
+
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
+ pi->last_mclk_dpm_enable_mask =
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ if (pi->uvd_enabled) {
+ if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
+ }
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
+ ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
+
+ return 0;
+}
+
+static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
+ u32 level_mask)
+{
+ u32 level = 0;
+
+ while ((level_mask & (1 << level)) == 0)
+ level++;
+
+ return level;
+}
+
+
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ PPSMC_Result smc_result;
+ u32 tmp, levels, i;
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = 0;
+ tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
+ while (tmp >>= 1)
+ levels++;
+ if (levels) {
+ ret = ci_dpm_force_state_pcie(rdev, level);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ if ((!pi->sclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_sclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->mclk_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
+ ret = ci_dpm_force_state_mclk(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
+ CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ if ((!pi->pcie_dpm_key_disabled) &&
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
+ levels = ci_get_lowest_enabled_level(rdev,
+ pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
+ ret = ci_dpm_force_state_pcie(rdev, levels);
+ if (ret)
+ return ret;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
+ CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
+ if (tmp == levels)
+ break;
+ udelay(1);
+ }
+ }
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ if (!pi->sclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->mclk_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ if (!pi->pcie_dpm_key_disabled) {
+ smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
+ if (smc_result != PPSMC_Result_OK)
+ return -EINVAL;
+ }
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+static int ci_set_mc_special_registers(struct radeon_device *rdev,
+ struct ci_mc_reg_table *table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u8 i, j, k;
+ u32 temp_reg;
+
+ for (i = 0, j = table->last; i < table->last; i++) {
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ switch(table->mc_reg_address[i].s1 << 2) {
+ case MC_SEQ_MISC1:
+ temp_reg = RREG32(MC_PMG_CMD_EMRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
+ }
+ j++;
+ if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ temp_reg = RREG32(MC_PMG_CMD_MRS);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ if (!pi->mem_gddr5)
+ table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+
+ if (!pi->mem_gddr5) {
+ table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
+ table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ }
+ break;
+ case MC_SEQ_RESERVE_M:
+ temp_reg = RREG32(MC_PMG_CMD_MRS1);
+ table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
+ table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ for (k = 0; k < table->num_entries; k++) {
+ table->mc_reg_table_entry[k].mc_data[j] =
+ (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
+ }
+ j++;
+ if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ break;
+ default:
+ break;
+ }
+
+ }
+
+ table->last = j;
+
+ return 0;
+}
+
+static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
+{
+ bool result = true;
+
+ switch(in_reg) {
+ case MC_SEQ_RAS_TIMING >> 2:
+ *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_DLL_STBY >> 2:
+ *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD0 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CMD1 >> 2:
+ *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
+ break;
+ case MC_SEQ_G5PDX_CTRL >> 2:
+ *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
+ break;
+ case MC_SEQ_CAS_TIMING >> 2:
+ *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
+ break;
+ case MC_SEQ_MISC_TIMING2 >> 2:
+ *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CMD >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
+ break;
+ case MC_SEQ_PMG_DVS_CTL >> 2:
+ *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_RD_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D0 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_D1 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
+ break;
+ case MC_PMG_CMD_EMRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS1 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
+ break;
+ case MC_SEQ_PMG_TIMING >> 2:
+ *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
+ break;
+ case MC_PMG_CMD_MRS2 >> 2:
+ *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
+ break;
+ case MC_SEQ_WR_CTL_2 >> 2:
+ *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
+ break;
+ default:
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+static void ci_set_valid_flag(struct ci_mc_reg_table *table)
+{
+ u8 i, j;
+
+ for (i = 0; i < table->last; i++) {
+ for (j = 1; j < table->num_entries; j++) {
+ if (table->mc_reg_table_entry[j-1].mc_data[i] !=
+ table->mc_reg_table_entry[j].mc_data[i]) {
+ table->valid_flag |= 1 << i;
+ break;
+ }
+ }
+ }
+}
+
+static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
+{
+ u32 i;
+ u16 address;
+
+ for (i = 0; i < table->last; i++) {
+ table->mc_reg_address[i].s0 =
+ ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
+ address : table->mc_reg_address[i].s1;
+ }
+}
+
+static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
+ struct ci_mc_reg_table *ci_table)
+{
+ u8 i, j;
+
+ if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ if (table->num_entries > MAX_AC_TIMING_ENTRIES)
+ return -EINVAL;
+
+ for (i = 0; i < table->last; i++)
+ ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
+
+ ci_table->last = table->last;
+
+ for (i = 0; i < table->num_entries; i++) {
+ ci_table->mc_reg_table_entry[i].mclk_max =
+ table->mc_reg_table_entry[i].mclk_max;
+ for (j = 0; j < table->last; j++)
+ ci_table->mc_reg_table_entry[i].mc_data[j] =
+ table->mc_reg_table_entry[i].mc_data[j];
+ }
+ ci_table->num_entries = table->num_entries;
+
+ return 0;
+}
+
+static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct atom_mc_reg_table *table;
+ struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
+ u8 module_index = rv770_get_memory_module_index(rdev);
+ int ret;
+
+ table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
+ WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
+ WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
+ WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
+ WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
+ WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
+ WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
+ WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
+ WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
+ WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
+ WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
+ WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
+ WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
+ WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
+ WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
+ WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
+ WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
+ WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
+ WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
+
+ ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
+ if (ret)
+ goto init_mc_done;
+
+ ret = ci_copy_vbios_mc_reg_table(table, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_s0_mc_reg_index(ci_table);
+
+ ret = ci_set_mc_special_registers(rdev, ci_table);
+ if (ret)
+ goto init_mc_done;
+
+ ci_set_valid_flag(ci_table);
+
+init_mc_done:
+ kfree(table);
+
+ return ret;
+}
+
+static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i, j;
+
+ for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
+ if (pi->mc_reg_table.valid_flag & (1 << j)) {
+ if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
+ return -EINVAL;
+ mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
+ mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
+ i++;
+ }
+ }
+
+ mc_reg_table->last = (u8)i;
+
+ return 0;
+}
+
+static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
+ SMU7_Discrete_MCRegisterSet *data,
+ u32 num_entries, u32 valid_flag)
+{
+ u32 i, j;
+
+ for (i = 0, j = 0; j < num_entries; j++) {
+ if (valid_flag & (1 << j)) {
+ data->value[i] = cpu_to_be32(entry->mc_data[j]);
+ i++;
+ }
+ }
+}
+
+static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
+ const u32 memory_clock,
+ SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i = 0;
+
+ for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
+ if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
+ break;
+ }
+
+ if ((i == pi->mc_reg_table.num_entries) && (i > 0))
+ --i;
+
+ ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
+ mc_reg_table_data, pi->mc_reg_table.last,
+ pi->mc_reg_table.valid_flag);
+}
+
+static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
+ SMU7_Discrete_MCRegisters *mc_reg_table)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
+ ci_convert_mc_reg_table_entry_to_smc(rdev,
+ pi->dpm_table.mclk_table.dpm_levels[i].value,
+ &mc_reg_table->data[i]);
+}
+
+static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ int ret;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
+ if (ret)
+ return ret;
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start,
+ (u8 *)&pi->smc_mc_reg_table,
+ sizeof(SMU7_Discrete_MCRegisters),
+ pi->sram_end);
+}
+
+static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
+ return 0;
+
+ memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
+
+ ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
+
+ return ci_copy_bytes_to_smc(rdev,
+ pi->mc_reg_table_start +
+ offsetof(SMU7_Discrete_MCRegisters, data[0]),
+ (u8 *)&pi->smc_mc_reg_table.data[0],
+ sizeof(SMU7_Discrete_MCRegisterSet) *
+ pi->dpm_table.mclk_table.count,
+ pi->sram_end);
+}
+
+static void ci_enable_voltage_control(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= VOLT_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+}
+
+static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
+ struct radeon_ps *radeon_state)
+{
+ struct ci_ps *state = ci_get_ps(radeon_state);
+ int i;
+ u16 pcie_speed, max_speed = 0;
+
+ for (i = 0; i < state->performance_level_count; i++) {
+ pcie_speed = state->performance_levels[i].pcie_gen;
+ if (max_speed < pcie_speed)
+ max_speed = pcie_speed;
+ }
+
+ return max_speed;
+}
+
+static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
+{
+ u32 speed_cntl = 0;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
+ speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
+
+ return (u16)speed_cntl;
+}
+
+static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
+{
+ u32 link_width = 0;
+
+ link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
+ link_width >>= LC_LINK_WIDTH_RD_SHIFT;
+
+ switch (link_width) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X12:
+ /* not actually supported */
+ return 12;
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
+static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ enum radeon_pcie_gen current_link_speed;
+
+ if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
+ current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
+ else
+ current_link_speed = pi->force_pcie_gen;
+
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+ pi->pspp_notify_required = false;
+ if (target_link_speed > current_link_speed) {
+ switch (target_link_speed) {
+#ifdef CONFIG_ACPI
+ case RADEON_PCIE_GEN3:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
+ break;
+ pi->force_pcie_gen = RADEON_PCIE_GEN2;
+ if (current_link_speed == RADEON_PCIE_GEN2)
+ break;
+ case RADEON_PCIE_GEN2:
+ if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
+ break;
+#endif
+ default:
+ pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
+ break;
+ }
+ } else {
+ if (target_link_speed < current_link_speed)
+ pi->pspp_notify_required = true;
+ }
+}
+
+static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ enum radeon_pcie_gen target_link_speed =
+ ci_get_maximum_link_speed(rdev, radeon_new_state);
+ u8 request;
+
+ if (pi->pspp_notify_required) {
+ if (target_link_speed == RADEON_PCIE_GEN3)
+ request = PCIE_PERF_REQ_PECI_GEN3;
+ else if (target_link_speed == RADEON_PCIE_GEN2)
+ request = PCIE_PERF_REQ_PECI_GEN2;
+ else
+ request = PCIE_PERF_REQ_PECI_GEN1;
+
+ if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
+ (ci_get_current_pcie_speed(rdev) > 0))
+ return;
+
+#ifdef CONFIG_ACPI
+ radeon_acpi_pcie_performance_request(rdev, request, false);
+#endif
+ }
+}
+
+static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
+
+ if (allowed_sclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_sclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddc_table->count < 1)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table == NULL)
+ return -EINVAL;
+ if (allowed_mclk_vddci_table->count < 1)
+ return -EINVAL;
+
+ pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
+ pi->max_vddc_in_pp_table =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+
+ pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
+ pi->max_vddci_in_pp_table =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
+ allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
+ allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
+ allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
+
+ return 0;
+}
+
+static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddc) {
+ *vddc = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
+ u32 leakage_index;
+
+ for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
+ if (leakage_table->leakage_id[leakage_index] == *vddci) {
+ *vddci = leakage_table->actual_voltage[leakage_index];
+ break;
+ }
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
+ struct radeon_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_vce_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_uvd_clock_voltage_dependency_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
+ }
+}
+
+static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_phase_shedding_limits_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
+ }
+}
+
+static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ if (table) {
+ ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
+ ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
+ }
+}
+
+static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
+ struct radeon_cac_leakage_table *table)
+{
+ u32 i;
+
+ if (table) {
+ for (i = 0; i < table->count; i++)
+ ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
+ }
+}
+
+static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
+{
+
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
+ ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
+ ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
+ ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
+ ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
+ ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
+ ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
+ &rdev->pm.dpm.dyn_state.cac_leakage_table);
+
+}
+
+static void ci_get_memory_type(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ u32 tmp;
+
+ tmp = RREG32(MC_SEQ_MISC0);
+
+ if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
+ MC_SEQ_MISC0_GDDR5_VALUE)
+ pi->mem_gddr5 = true;
+ else
+ pi->mem_gddr5 = false;
+
+}
+
+void ci_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+void ci_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *new_ps = ci_get_ps(rps);
+ struct ci_power_info *pi = ci_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ ci_update_requested_ps(rdev, new_ps);
+
+ ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
+
+ return 0;
+}
+
+void ci_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ ci_update_current_ps(rdev, new_ps);
+}
+
+
+void ci_dpm_setup_asic(struct radeon_device *rdev)
+{
+ ci_read_clock_registers(rdev);
+ ci_get_memory_type(rdev);
+ ci_enable_acpi_power_management(rdev);
+ ci_init_sclk_t(rdev);
+}
+
+int ci_dpm_enable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (ci_is_smc_running(rdev))
+ return -EINVAL;
+ if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
+ ci_enable_voltage_control(rdev);
+ ret = ci_construct_voltage_tables(rdev);
+ if (ret) {
+ DRM_ERROR("ci_construct_voltage_tables failed\n");
+ return ret;
+ }
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_initialize_mc_reg_table(rdev);
+ if (ret)
+ pi->caps_dynamic_ac_timing = false;
+ }
+ if (pi->dynamic_ss)
+ ci_enable_spread_spectrum(rdev, true);
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, true);
+ ci_program_sstp(rdev);
+ ci_enable_display_gap(rdev);
+ ci_program_vc(rdev);
+ ret = ci_upload_firmware(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_firmware failed\n");
+ return ret;
+ }
+ ret = ci_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("ci_process_firmware_header failed\n");
+ return ret;
+ }
+ ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
+ if (ret) {
+ DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
+ return ret;
+ }
+ ret = ci_init_smc_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_smc_table failed\n");
+ return ret;
+ }
+ ret = ci_init_arb_table_index(rdev);
+ if (ret) {
+ DRM_ERROR("ci_init_arb_table_index failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_populate_initial_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_populate_pm_base(rdev);
+ if (ret) {
+ DRM_ERROR("ci_populate_pm_base failed\n");
+ return ret;
+ }
+ ci_dpm_start_smc(rdev);
+ ci_enable_vr_hot_gpio_interrupt(rdev);
+ ret = ci_notify_smc_display_change(rdev, false);
+ if (ret) {
+ DRM_ERROR("ci_notify_smc_display_change failed\n");
+ return ret;
+ }
+ ci_enable_sclk_control(rdev, true);
+ ret = ci_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ulv failed\n");
+ return ret;
+ }
+ ret = ci_enable_ds_master_switch(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_ds_master_switch failed\n");
+ return ret;
+ }
+ ret = ci_start_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_start_dpm failed\n");
+ return ret;
+ }
+ ret = ci_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_didt failed\n");
+ return ret;
+ }
+ ret = ci_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_smc_cac failed\n");
+ return ret;
+ }
+ ret = ci_enable_power_containment(rdev, true);
+ if (ret) {
+ DRM_ERROR("ci_enable_power_containment failed\n");
+ return ret;
+ }
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+#if 0
+ PPSMC_Result result;
+#endif
+ ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("ci_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+#if 0
+ result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
+
+ if (result != PPSMC_Result_OK)
+ DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
+#endif
+ }
+
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+
+ ci_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ ci_update_current_ps(rdev, boot_ps);
+
+ return 0;
+}
+
+void ci_dpm_disable(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_dpm_powergate_uvd(rdev, false);
+
+ if (!ci_is_smc_running(rdev))
+ return;
+
+ if (pi->thermal_protection)
+ ci_enable_thermal_protection(rdev, false);
+ ci_enable_power_containment(rdev, false);
+ ci_enable_smc_cac(rdev, false);
+ ci_enable_didt(rdev, false);
+ ci_enable_spread_spectrum(rdev, false);
+ ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ ci_stop_dpm(rdev);
+ ci_enable_ds_master_switch(rdev, true);
+ ci_enable_ulv(rdev, false);
+ ci_clear_vc(rdev);
+ ci_reset_to_default(rdev);
+ ci_dpm_stop_smc(rdev);
+ ci_force_switch_to_arb_f0(rdev);
+
+ ci_update_current_ps(rdev, boot_ps);
+}
+
+int ci_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ struct radeon_ps *old_ps = &pi->current_rps;
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
+ if (pi->pcie_performance_request)
+ ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
+ ret = ci_freeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
+ return ret;
+ }
+ ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
+ if (ret) {
+ DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+#if 0
+ ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("ci_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ ret = ci_update_sclk_t(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_sclk_t failed\n");
+ return ret;
+ }
+ if (pi->caps_dynamic_ac_timing) {
+ ret = ci_update_and_upload_mc_reg_table(rdev);
+ if (ret) {
+ DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
+ return ret;
+ }
+ }
+ ret = ci_program_memory_timing_parameters(rdev);
+ if (ret) {
+ DRM_ERROR("ci_program_memory_timing_parameters failed\n");
+ return ret;
+ }
+ ret = ci_unfreeze_sclk_mclk_dpm(rdev);
+ if (ret) {
+ DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
+ return ret;
+ }
+ ret = ci_upload_dpm_level_enable_mask(rdev);
+ if (ret) {
+ DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
+ return ret;
+ }
+ if (pi->pcie_performance_request)
+ ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
+
+ ret = ci_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
+ if (ret) {
+ DRM_ERROR("ci_dpm_force_performance_level failed\n");
+ return ret;
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
+ return 0;
+}
+
+int ci_dpm_power_control_set_level(struct radeon_device *rdev)
+{
+ return ci_power_control_set_level(rdev);
+}
+
+void ci_dpm_reset_asic(struct radeon_device *rdev)
+{
+ ci_set_boot_state(rdev);
+}
+
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+ ci_program_display_gap(rdev);
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+ struct _ATOM_PPLIB_SI_CLOCK_INFO si;
+ struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ rdev->pm.dpm.boot_ps = rps;
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl = &ps->performance_levels[index];
+
+ ps->performance_level_count = index + 1;
+
+ pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
+ pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
+ pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
+ pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
+
+ pl->pcie_gen = r600_get_pcie_gen_support(rdev,
+ pi->sys_pcie_mask,
+ pi->vbios_boot_state.pcie_gen_bootup_value,
+ clock_info->ci.ucPCIEGen);
+ pl->pcie_lane = r600_get_pcie_lane_support(rdev,
+ pi->vbios_boot_state.pcie_lane_bootup_value,
+ le16_to_cpu(clock_info->ci.usPCIELane));
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
+ pi->acpi_pcie_gen = pl->pcie_gen;
+ }
+
+ if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
+ pi->ulv.supported = true;
+ pi->ulv.pl = *pl;
+ pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
+ }
+
+ /* patch up boot state */
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
+ pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
+ pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
+ pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
+ }
+
+ switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ pi->use_pcie_powersaving_levels = true;
+ if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
+ pi->pcie_gen_powersaving.max = pl->pcie_gen;
+ if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
+ pi->pcie_gen_powersaving.min = pl->pcie_gen;
+ if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
+ pi->pcie_lane_powersaving.max = pl->pcie_lane;
+ if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
+ pi->pcie_lane_powersaving.min = pl->pcie_lane;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ pi->use_pcie_performance_levels = true;
+ if (pi->pcie_gen_performance.max < pl->pcie_gen)
+ pi->pcie_gen_performance.max = pl->pcie_gen;
+ if (pi->pcie_gen_performance.min > pl->pcie_gen)
+ pi->pcie_gen_performance.min = pl->pcie_gen;
+ if (pi->pcie_lane_performance.max < pl->pcie_lane)
+ pi->pcie_lane_performance.max = pl->pcie_lane;
+ if (pi->pcie_lane_performance.min > pl->pcie_lane)
+ pi->pcie_lane_performance.min = pl->pcie_lane;
+ break;
+ default:
+ break;
+ }
+}
+
+static int ci_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct ci_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ ci_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int ci_get_vbios_boot_values(struct radeon_device *rdev,
+ struct ci_vbios_boot_state *boot_state)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
+ ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
+ u8 frev, crev;
+ u16 data_offset;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
+ data_offset);
+ boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
+ boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
+ boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
+ boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
+ boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
+ boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
+ boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+void ci_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
+ r600_free_extended_power_table(rdev);
+}
+
+int ci_dpm_init(struct radeon_device *rdev)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
+ u16 data_offset, size;
+ u8 frev, crev;
+ struct ci_power_info *pi;
+ int ret;
+ u32 mask;
+
+ pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret)
+ pi->sys_pcie_mask = 0;
+ else
+ pi->sys_pcie_mask = mask;
+ pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
+
+ pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
+ pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
+ pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
+
+ pi->pcie_lane_performance.max = 0;
+ pi->pcie_lane_performance.min = 16;
+ pi->pcie_lane_powersaving.max = 0;
+ pi->pcie_lane_powersaving.min = 16;
+
+ ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = ci_parse_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret) {
+ ci_dpm_fini(rdev);
+ return ret;
+ }
+
+ pi->dll_default_on = false;
+ pi->sram_end = SMC_RAM_END;
+
+ pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
+ pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
+
+ pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
+
+ pi->sclk_dpm_key_disabled = 0;
+ pi->mclk_dpm_key_disabled = 0;
+ pi->pcie_dpm_key_disabled = 0;
+
+ pi->caps_sclk_ds = true;
+
+ pi->mclk_strobe_mode_threshold = 40000;
+ pi->mclk_stutter_mode_threshold = 40000;
+ pi->mclk_edc_enable_threshold = 40000;
+ pi->mclk_edc_wr_enable_threshold = 40000;
+
+ ci_initialize_powertune_defaults(rdev);
+
+ pi->caps_fps = false;
+
+ pi->caps_sclk_throttle_low_notification = false;
+
+ pi->caps_uvd_dpm = true;
+
+ ci_get_leakage_voltages(rdev);
+ ci_patch_dependency_tables_with_leakage(rdev);
+ ci_set_private_data_variables_based_on_pptable(rdev);
+
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
+ kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
+ ci_dpm_fini(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
+ rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
+
+ rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
+ rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
+ rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
+
+ rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
+ rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
+
+ pi->thermal_temp_setting.temperature_low = 99500;
+ pi->thermal_temp_setting.temperature_high = 100000;
+ pi->thermal_temp_setting.temperature_shutdown = 104000;
+
+ pi->uvd_enabled = false;
+
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
+ pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
+ pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
+ }
+
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
+ if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
+ else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
+ pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
+ else
+ rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
+ }
+
+ pi->vddc_phase_shed_control = true;
+
+#if defined(CONFIG_ACPI)
+ pi->pcie_performance_request =
+ radeon_acpi_is_pcie_performance_request_supported(rdev);
+#else
+ pi->pcie_performance_request = false;
+#endif
+
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset)) {
+ pi->caps_sclk_ss_support = true;
+ pi->caps_mclk_ss_support = true;
+ pi->dynamic_ss = true;
+ } else {
+ pi->caps_sclk_ss_support = false;
+ pi->caps_mclk_ss_support = false;
+ pi->dynamic_ss = true;
+ }
+
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
+ pi->thermal_protection = true;
+ else
+ pi->thermal_protection = false;
+
+ pi->caps_dynamic_ac_timing = true;
+
+ pi->uvd_power_gated = false;
+
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ return 0;
+}
+
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ u32 sclk = ci_get_average_sclk_freq(rdev);
+ u32 mclk = ci_get_average_mclk_freq(rdev);
+
+ seq_printf(m, "power level avg sclk: %u mclk: %u\n",
+ sclk, mclk);
+}
+
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct ci_ps *ps = ci_get_ps(rps);
+ struct ci_pl *pl;
+ int i;
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->performance_level_count; i++) {
+ pl = &ps->performance_levels[i];
+ printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
+ i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].sclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
+}
+
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct ci_power_info *pi = ci_get_pi(rdev);
+ struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->performance_levels[0].mclk;
+ else
+ return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
+}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
new file mode 100644
index 00000000000..93bbed977ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __CI_DPM_H__
+#define __CI_DPM_H__
+
+#include "ppsmc.h"
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 6
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 8
+#include "smu7_discrete.h"
+
+#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
+
+struct ci_pl {
+ u32 mclk;
+ u32 sclk;
+ enum radeon_pcie_gen pcie_gen;
+ u16 pcie_lane;
+};
+
+struct ci_ps {
+ u16 performance_level_count;
+ bool dc_compatible;
+ u32 sclk_t;
+ struct ci_pl performance_levels[CISLANDS_MAX_HARDWARE_POWERLEVELS];
+};
+
+struct ci_dpm_level {
+ bool enabled;
+ u32 value;
+ u32 param1;
+};
+
+#define CISLAND_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define MAX_REGULAR_DPM_NUMBER 8
+#define CISLAND_MINIMUM_ENGINE_CLOCK 800
+
+struct ci_single_dpm_table {
+ u32 count;
+ struct ci_dpm_level dpm_levels[MAX_REGULAR_DPM_NUMBER];
+};
+
+struct ci_dpm_table {
+ struct ci_single_dpm_table sclk_table;
+ struct ci_single_dpm_table mclk_table;
+ struct ci_single_dpm_table pcie_speed_table;
+ struct ci_single_dpm_table vddc_table;
+ struct ci_single_dpm_table vddci_table;
+ struct ci_single_dpm_table mvdd_table;
+};
+
+struct ci_mc_reg_entry {
+ u32 mclk_max;
+ u32 mc_data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_mc_reg_table {
+ u8 last;
+ u8 num_entries;
+ u16 valid_flag;
+ struct ci_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES];
+ SMU7_Discrete_MCRegisterAddress mc_reg_address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+struct ci_ulv_parm
+{
+ bool supported;
+ u32 cg_ulv_parameter;
+ u32 volt_change_delay;
+ struct ci_pl pl;
+};
+
+#define CISLANDS_MAX_LEAKAGE_COUNT 8
+
+struct ci_leakage_voltage {
+ u16 count;
+ u16 leakage_id[CISLANDS_MAX_LEAKAGE_COUNT];
+ u16 actual_voltage[CISLANDS_MAX_LEAKAGE_COUNT];
+};
+
+struct ci_dpm_level_enable_mask {
+ u32 uvd_dpm_enable_mask;
+ u32 vce_dpm_enable_mask;
+ u32 acp_dpm_enable_mask;
+ u32 samu_dpm_enable_mask;
+ u32 sclk_dpm_enable_mask;
+ u32 mclk_dpm_enable_mask;
+ u32 pcie_dpm_enable_mask;
+};
+
+struct ci_vbios_boot_state
+{
+ u16 mvdd_bootup_value;
+ u16 vddc_bootup_value;
+ u16 vddci_bootup_value;
+ u32 sclk_bootup_value;
+ u32 mclk_bootup_value;
+ u16 pcie_gen_bootup_value;
+ u16 pcie_lane_bootup_value;
+};
+
+struct ci_clock_registers {
+ u32 cg_spll_func_cntl;
+ u32 cg_spll_func_cntl_2;
+ u32 cg_spll_func_cntl_3;
+ u32 cg_spll_func_cntl_4;
+ u32 cg_spll_spread_spectrum;
+ u32 cg_spll_spread_spectrum_2;
+ u32 dll_cntl;
+ u32 mclk_pwrmgt_cntl;
+ u32 mpll_ad_func_cntl;
+ u32 mpll_dq_func_cntl;
+ u32 mpll_func_cntl;
+ u32 mpll_func_cntl_1;
+ u32 mpll_func_cntl_2;
+ u32 mpll_ss1;
+ u32 mpll_ss2;
+};
+
+struct ci_thermal_temperature_setting {
+ s32 temperature_low;
+ s32 temperature_high;
+ s32 temperature_shutdown;
+};
+
+struct ci_pcie_perf_range {
+ u16 max;
+ u16 min;
+};
+
+enum ci_pt_config_reg_type {
+ CISLANDS_CONFIGREG_MMR = 0,
+ CISLANDS_CONFIGREG_SMC_IND,
+ CISLANDS_CONFIGREG_DIDT_IND,
+ CISLANDS_CONFIGREG_CACHE,
+ CISLANDS_CONFIGREG_MAX
+};
+
+#define POWERCONTAINMENT_FEATURE_BAPM 0x00000001
+#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
+#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
+
+struct ci_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum ci_pt_config_reg_type type;
+};
+
+struct ci_pt_defaults {
+ u8 svi_load_line_en;
+ u8 svi_load_line_vddc;
+ u8 tdc_vddc_throttle_release_limit_perc;
+ u8 tdc_mawt;
+ u8 tdc_waterfall_ctl;
+ u8 dte_ambient_temp_base;
+ u32 display_cac;
+ u32 bapm_temp_gradient;
+ u16 bapmti_r[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+ u16 bapmti_rc[SMU7_DTE_ITERATIONS * SMU7_DTE_SOURCES * SMU7_DTE_SINKS];
+};
+
+#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
+#define DPMTABLE_OD_UPDATE_MCLK 0x00000002
+#define DPMTABLE_UPDATE_SCLK 0x00000004
+#define DPMTABLE_UPDATE_MCLK 0x00000008
+
+struct ci_power_info {
+ struct ci_dpm_table dpm_table;
+ u32 voltage_control;
+ u32 mvdd_control;
+ u32 vddci_control;
+ u32 active_auto_throttle_sources;
+ struct ci_clock_registers clock_registers;
+ u16 acpi_vddc;
+ u16 acpi_vddci;
+ enum radeon_pcie_gen force_pcie_gen;
+ enum radeon_pcie_gen acpi_pcie_gen;
+ struct ci_leakage_voltage vddc_leakage;
+ struct ci_leakage_voltage vddci_leakage;
+ u16 max_vddc_in_pp_table;
+ u16 min_vddc_in_pp_table;
+ u16 max_vddci_in_pp_table;
+ u16 min_vddci_in_pp_table;
+ u32 mclk_strobe_mode_threshold;
+ u32 mclk_stutter_mode_threshold;
+ u32 mclk_edc_enable_threshold;
+ u32 mclk_edc_wr_enable_threshold;
+ struct ci_vbios_boot_state vbios_boot_state;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ u32 mc_reg_table_start;
+ u32 fan_table_start;
+ u32 arb_table_start;
+ /* smc tables */
+ SMU7_Discrete_DpmTable smc_state_table;
+ SMU7_Discrete_MCRegisters smc_mc_reg_table;
+ SMU7_Discrete_PmFuses smc_powertune_table;
+ /* other stuff */
+ struct ci_mc_reg_table mc_reg_table;
+ struct atom_voltage_table vddc_voltage_table;
+ struct atom_voltage_table vddci_voltage_table;
+ struct atom_voltage_table mvdd_voltage_table;
+ struct ci_ulv_parm ulv;
+ u32 power_containment_features;
+ const struct ci_pt_defaults *powertune_defaults;
+ u32 dte_tj_offset;
+ bool vddc_phase_shed_control;
+ struct ci_thermal_temperature_setting thermal_temp_setting;
+ struct ci_dpm_level_enable_mask dpm_level_enable_mask;
+ u32 need_update_smu7_dpm_table;
+ u32 sclk_dpm_key_disabled;
+ u32 mclk_dpm_key_disabled;
+ u32 pcie_dpm_key_disabled;
+ struct ci_pcie_perf_range pcie_gen_performance;
+ struct ci_pcie_perf_range pcie_lane_performance;
+ struct ci_pcie_perf_range pcie_gen_powersaving;
+ struct ci_pcie_perf_range pcie_lane_powersaving;
+ u32 activity_target[SMU7_MAX_LEVELS_GRAPHICS];
+ u32 mclk_activity_target;
+ u32 low_sclk_interrupt_t;
+ u32 last_mclk_dpm_enable_mask;
+ u32 sys_pcie_mask;
+ /* caps */
+ bool caps_power_containment;
+ bool caps_cac;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_fps;
+ bool caps_sclk_ds;
+ bool caps_sclk_ss_support;
+ bool caps_mclk_ss_support;
+ bool caps_uvd_dpm;
+ bool caps_vce_dpm;
+ bool caps_samu_dpm;
+ bool caps_acp_dpm;
+ bool caps_automatic_dc_transition;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_dynamic_ac_timing;
+ /* flags */
+ bool thermal_protection;
+ bool pcie_performance_request;
+ bool dynamic_ss;
+ bool dll_default_on;
+ bool cac_enabled;
+ bool uvd_enabled;
+ bool battery_state;
+ bool pspp_notify_required;
+ bool mem_gddr5;
+ bool enable_bapm_feature;
+ bool enable_tdc_limit_feature;
+ bool enable_pkg_pwr_tracking_feature;
+ bool use_pcie_performance_levels;
+ bool use_pcie_powersaving_levels;
+ bool uvd_power_gated;
+ /* driver states */
+ struct radeon_ps current_rps;
+ struct ci_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct ci_ps requested_ps;
+};
+
+#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
+#define CISLANDS_VOLTAGE_CONTROL_BY_GPIO 0x1
+#define CISLANDS_VOLTAGE_CONTROL_BY_SVID2 0x2
+
+#define CISLANDS_Q88_FORMAT_CONVERSION_UNIT 256
+
+#define CISLANDS_VRC_DFLT0 0x3FFFC000
+#define CISLANDS_VRC_DFLT1 0x000400
+#define CISLANDS_VRC_DFLT2 0xC00080
+#define CISLANDS_VRC_DFLT3 0xC00200
+#define CISLANDS_VRC_DFLT4 0xC01680
+#define CISLANDS_VRC_DFLT5 0xC00033
+#define CISLANDS_VRC_DFLT6 0xC00033
+#define CISLANDS_VRC_DFLT7 0x3FFFC000
+
+#define CISLANDS_CGULVPARAMETER_DFLT 0x00040035
+#define CISLAND_TARGETACTIVITY_DFLT 30
+#define CISLAND_MCLK_TARGETACTIVITY_DFLT 10
+
+#define PCIE_PERF_REQ_REMOVE_REGISTRY 0
+#define PCIE_PERF_REQ_FORCE_LOWPOWER 1
+#define PCIE_PERF_REQ_PECI_GEN1 2
+#define PCIE_PERF_REQ_PECI_GEN2 3
+#define PCIE_PERF_REQ_PECI_GEN3 4
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+void ci_start_smc(struct radeon_device *rdev);
+void ci_reset_smc(struct radeon_device *rdev);
+int ci_program_jump_on_start(struct radeon_device *rdev);
+void ci_stop_smc_clock(struct radeon_device *rdev);
+void ci_start_smc_clock(struct radeon_device *rdev);
+bool ci_is_smc_running(struct radeon_device *rdev);
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg);
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev);
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit);
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit);
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
new file mode 100644
index 00000000000..53b43dd3cf1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "ppsmc.h"
+#include "radeon_ucode.h"
+
+static int ci_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ u32 data, original_data;
+ u32 addr;
+ u32 extra_shift;
+ int ret;
+
+ if (smc_start_address & 3)
+ return -EINVAL;
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = ci_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
+void ci_start_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp &= ~RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+void ci_reset_smc(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL);
+
+ tmp |= RST_REG;
+ WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp);
+}
+
+int ci_program_jump_on_start(struct radeon_device *rdev)
+{
+ static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
+
+ return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
+}
+
+void ci_stop_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp |= CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+void ci_start_smc_clock(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+
+ tmp &= ~CK_DISABLE;
+
+ WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp);
+}
+
+bool ci_is_smc_running(struct radeon_device *rdev)
+{
+ u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ u32 pc_c = RREG32_SMC(SMC_PC_C);
+
+ if (!(clk & CK_DISABLE) && (0x20100 <= pc_c))
+ return true;
+
+ return false;
+}
+
+PPSMC_Result ci_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_Failed;
+
+ WREG32(SMC_MESSAGE_0, msg);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(SMC_RESP_0);
+ if (tmp != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0);
+
+ return (PPSMC_Result)tmp;
+}
+
+PPSMC_Result ci_wait_for_smc_inactive(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ if (!ci_is_smc_running(rdev))
+ return PPSMC_Result_OK;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0);
+ if ((tmp & CKEN) == 0)
+ break;
+ udelay(1);
+ }
+
+ return PPSMC_Result_OK;
+}
+
+int ci_load_smc_ucode(struct radeon_device *rdev, u32 limit)
+{
+ u32 ucode_start_address;
+ u32 ucode_size;
+ const u8 *src;
+ u32 data;
+
+ if (!rdev->smc_fw)
+ return -EINVAL;
+
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ ucode_start_address = BONAIRE_SMC_UCODE_START;
+ ucode_size = BONAIRE_SMC_UCODE_SIZE;
+ break;
+ default:
+ DRM_ERROR("unknown asic in smc ucode loader\n");
+ BUG();
+ }
+
+ if (ucode_size & 3)
+ return -EINVAL;
+
+ src = (const u8 *)rdev->smc_fw->data;
+ WREG32(SMC_IND_INDEX_0, ucode_start_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0);
+ while (ucode_size >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3];
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ ucode_size -= 4;
+ }
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int ci_read_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int ci_write_smc_sram_dword(struct radeon_device *rdev,
+ u32 smc_address, u32 value, u32 limit)
+{
+ int ret;
+
+ ret = ci_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, value);
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dacec4e209..a3bba058727 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -30,22 +30,8 @@
#include "cikd.h"
#include "atom.h"
#include "cik_blit_shaders.h"
-
-/* GFX */
-#define CIK_PFP_UCODE_SIZE 2144
-#define CIK_ME_UCODE_SIZE 2144
-#define CIK_CE_UCODE_SIZE 2144
-/* compute */
-#define CIK_MEC_UCODE_SIZE 4192
-/* interrupts */
-#define BONAIRE_RLC_UCODE_SIZE 2048
-#define KB_RLC_UCODE_SIZE 2560
-#define KV_RLC_UCODE_SIZE 2560
-/* gddr controller */
-#define CIK_MC_UCODE_SIZE 7866
-/* sdma */
-#define CIK_SDMA_UCODE_SIZE 1050
-#define CIK_SDMA_UCODE_VERSION 64
+#include "radeon_ucode.h"
+#include "clearstate_ci.h"
MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -54,6 +40,7 @@ MODULE_FIRMWARE("radeon/BONAIRE_mec.bin");
MODULE_FIRMWARE("radeon/BONAIRE_mc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_rlc.bin");
MODULE_FIRMWARE("radeon/BONAIRE_sdma.bin");
+MODULE_FIRMWARE("radeon/BONAIRE_smc.bin");
MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
MODULE_FIRMWARE("radeon/KAVERI_me.bin");
MODULE_FIRMWARE("radeon/KAVERI_ce.bin");
@@ -72,10 +59,61 @@ extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern void si_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
-extern void si_rlc_fini(struct radeon_device *rdev);
-extern int si_rlc_init(struct radeon_device *rdev);
+extern void si_rlc_reset(struct radeon_device *rdev);
+extern void si_init_uvd_internal_cg(struct radeon_device *rdev);
+extern int cik_sdma_resume(struct radeon_device *rdev);
+extern void cik_sdma_enable(struct radeon_device *rdev, bool enable);
+extern void cik_sdma_fini(struct radeon_device *rdev);
+extern void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static void cik_rlc_stop(struct radeon_device *rdev);
+static void cik_pcie_gen3_enable(struct radeon_device *rdev);
+static void cik_program_aspm(struct radeon_device *rdev);
+static void cik_init_pg(struct radeon_device *rdev);
+static void cik_init_cg(struct radeon_device *rdev);
+
+/* get temperature in millidegrees */
+int ci_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = (RREG32_SMC(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
+ CTF_TEMP_SHIFT;
+
+ if (temp & 0x200)
+ actual_temp = 255;
+ else
+ actual_temp = temp & 0x1ff;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
+
+/* get temperature in millidegrees */
+int kv_get_temp(struct radeon_device *rdev)
+{
+ u32 temp;
+ int actual_temp = 0;
+
+ temp = RREG32_SMC(0xC0300E0C);
+
+ if (temp)
+ actual_temp = (temp / 8) - 49;
+ else
+ actual_temp = 0;
+
+ actual_temp = actual_temp * 1000;
+
+ return actual_temp;
+}
/*
* Indirect registers accessor
@@ -98,6 +136,778 @@ void cik_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
(void)RREG32(PCIE_DATA);
}
+static const u32 spectre_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc178 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc278 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc27c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc280 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc284 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc288 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc29c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0001 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc778 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc77c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc780 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc784 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc788 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc78c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a4 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7a8 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7ac >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b0 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc7b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92cc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x92d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x8e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x9e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xae00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0xbe00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
+static const u32 kalindi_rlc_save_restore_register_list[] =
+{
+ (0x0e00 << 16) | (0xc12c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc140 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc150 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc15c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc168 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc170 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc204 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8228 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x829c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x869c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x98f4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x98f8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc260 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x90e8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c000 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c00c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c1c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xcd20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89bc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8900 >> 2),
+ 0x00000000,
+ 0x3,
+ (0x0e00 << 16) | (0xc130 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc134 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc208 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc264 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc268 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc26c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc270 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc274 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc28c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc290 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc294 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc298 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2a8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc2ac >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x301d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30238 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30250 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30254 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30258 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3025c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc900 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc904 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc908 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc90c >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0xc910 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc99c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9834 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f00 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f04 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f08 >> 2),
+ 0x00000000,
+ (0x0000 << 16) | (0x30f0c >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x9b7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bf0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8bcc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8b24 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30a04 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a10 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a14 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a18 >> 2),
+ 0x00000000,
+ (0x0600 << 16) | (0x30a2c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc700 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc704 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc708 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xc768 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc770 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc774 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc798 >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0xc79c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9100 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c010 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c04 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c20 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c38 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8c3c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xae00 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9604 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac08 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac0c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac58 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac68 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac6c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac70 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac74 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac78 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac7c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac80 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac84 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac88 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xac8c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x970c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9714 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x9718 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x971c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x4e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x5e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x6e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x7e00 << 16) | (0x31068 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd10 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0xcd14 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88b8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88bc >> 2),
+ 0x00000000,
+ (0x0400 << 16) | (0x89c0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88c8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x88d8 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8980 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30938 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3093c >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30940 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89a0 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30900 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x30904 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x89b4 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3e1fc >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c210 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c214 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x3c218 >> 2),
+ 0x00000000,
+ (0x0e00 << 16) | (0x8904 >> 2),
+ 0x00000000,
+ 0x5,
+ (0x0e00 << 16) | (0x8c28 >> 2),
+ (0x0e00 << 16) | (0x8c2c >> 2),
+ (0x0e00 << 16) | (0x8c30 >> 2),
+ (0x0e00 << 16) | (0x8c34 >> 2),
+ (0x0e00 << 16) | (0x9600 >> 2),
+};
+
static const u32 bonaire_golden_spm_registers[] =
{
0x30800, 0xe0ffffff, 0xe0000000
@@ -744,7 +1554,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
const char *chip_name;
size_t pfp_req_size, me_req_size, ce_req_size,
mec_req_size, rlc_req_size, mc_req_size,
- sdma_req_size;
+ sdma_req_size, smc_req_size;
char fw_name[30];
int err;
@@ -760,6 +1570,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
rlc_req_size = BONAIRE_RLC_UCODE_SIZE * 4;
mc_req_size = CIK_MC_UCODE_SIZE * 4;
sdma_req_size = CIK_SDMA_UCODE_SIZE * 4;
+ smc_req_size = ALIGN(BONAIRE_SMC_UCODE_SIZE, 4);
break;
case CHIP_KAVERI:
chip_name = "KAVERI";
@@ -851,7 +1662,7 @@ static int cik_init_microcode(struct radeon_device *rdev)
err = -EINVAL;
}
- /* No MC ucode on APUs */
+ /* No SMC, MC ucode on APUs */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
@@ -863,6 +1674,21 @@ static int cik_init_microcode(struct radeon_device *rdev)
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
+
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
+ err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
+ printk(KERN_ERR
+ "cik_smc: Bogus length %zu in firmware \"%s\"\n",
+ rdev->smc_fw->size, fw_name);
+ err = -EINVAL;
+ }
}
out:
@@ -881,6 +1707,8 @@ out:
rdev->rlc_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
}
return err;
}
@@ -1880,7 +2708,46 @@ static void cik_gpu_init(struct radeon_device *rdev)
gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KAVERI:
- /* TODO */
+ rdev->config.cik.max_shader_engines = 1;
+ rdev->config.cik.max_tile_pipes = 4;
+ if ((rdev->pdev->device == 0x1304) ||
+ (rdev->pdev->device == 0x1305) ||
+ (rdev->pdev->device == 0x130C) ||
+ (rdev->pdev->device == 0x130F) ||
+ (rdev->pdev->device == 0x1310) ||
+ (rdev->pdev->device == 0x1311) ||
+ (rdev->pdev->device == 0x131C)) {
+ rdev->config.cik.max_cu_per_sh = 8;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1309) ||
+ (rdev->pdev->device == 0x130A) ||
+ (rdev->pdev->device == 0x130D) ||
+ (rdev->pdev->device == 0x1313)) {
+ rdev->config.cik.max_cu_per_sh = 6;
+ rdev->config.cik.max_backends_per_se = 2;
+ } else if ((rdev->pdev->device == 0x1306) ||
+ (rdev->pdev->device == 0x1307) ||
+ (rdev->pdev->device == 0x130B) ||
+ (rdev->pdev->device == 0x130E) ||
+ (rdev->pdev->device == 0x1315) ||
+ (rdev->pdev->device == 0x131B)) {
+ rdev->config.cik.max_cu_per_sh = 4;
+ rdev->config.cik.max_backends_per_se = 1;
+ } else {
+ rdev->config.cik.max_cu_per_sh = 3;
+ rdev->config.cik.max_backends_per_se = 1;
+ }
+ rdev->config.cik.max_sh_per_se = 1;
+ rdev->config.cik.max_texture_channel_caches = 4;
+ rdev->config.cik.max_gprs = 256;
+ rdev->config.cik.max_gs_threads = 16;
+ rdev->config.cik.max_hw_contexts = 8;
+
+ rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
+ rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
+ rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
+ rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
+ gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_KABINI:
default:
@@ -2535,8 +3402,8 @@ static int cik_cp_gfx_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2587,11 +3454,12 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
rptr = RREG32(CP_HQD_PQ_RPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -2604,11 +3472,12 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
if (rdev->wb.enabled) {
wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]);
} else {
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0);
wptr = RREG32(CP_HQD_PQ_WPTR);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
}
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -2616,10 +3485,8 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- u32 wptr = (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask;
-
- rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(wptr);
- WDOORBELL32(ring->doorbell_offset, wptr);
+ rdev->wb.wb[ring->wptr_offs/4] = cpu_to_le32(ring->wptr);
+ WDOORBELL32(ring->doorbell_offset, ring->wptr);
}
/**
@@ -2897,6 +3764,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_CPF_DEBUG, tmp);
/* init the pipes */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
int me = (i < 4) ? 1 : 2;
int pipe = (i < 4) ? i : (i - 4);
@@ -2915,10 +3783,11 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
/* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
tmp = RREG32(CP_HPD_EOP_CONTROL);
tmp &= ~EOP_SIZE_MASK;
- tmp |= drm_order(MEC_HPD_SIZE / 8);
+ tmp |= order_base_2(MEC_HPD_SIZE / 8);
WREG32(CP_HPD_EOP_CONTROL, tmp);
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
/* init the queues. Just two for now. */
for (i = 0; i < 2; i++) {
@@ -2972,6 +3841,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
mqd->static_thread_mgmt23[0] = 0xffffffff;
mqd->static_thread_mgmt23[1] = 0xffffffff;
+ mutex_lock(&rdev->srbm_mutex);
cik_srbm_select(rdev, rdev->ring[idx].me,
rdev->ring[idx].pipe,
rdev->ring[idx].queue, 0);
@@ -3030,9 +3900,9 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
~(QUEUE_SIZE_MASK | RPTR_BLOCK_SIZE_MASK);
mqd->queue_state.cp_hqd_pq_control |=
- drm_order(rdev->ring[idx].ring_size / 8);
+ order_base_2(rdev->ring[idx].ring_size / 8);
mqd->queue_state.cp_hqd_pq_control |=
- (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8);
+ (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8);
#ifdef __BIG_ENDIAN
mqd->queue_state.cp_hqd_pq_control |= BUF_SWAP_32BIT;
#endif
@@ -3099,6 +3969,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active);
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
radeon_bo_kunmap(rdev->ring[idx].mqd_obj);
radeon_bo_unreserve(rdev->ring[idx].mqd_obj);
@@ -3142,13 +4013,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
{
int r;
- /* Reset all cp blocks */
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
r = cik_cp_load_microcode(rdev);
if (r)
return r;
@@ -3163,579 +4027,6 @@ static int cik_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * sDMA - System DMA
- * Starting with CIK, the GPU has new asynchronous
- * DMA engines. These engines are used for compute
- * and gfx. There are two DMA engines (SDMA0, SDMA1)
- * and each one supports 1 ring buffer used for gfx
- * and 2 queues used for compute.
- *
- * The programming model is very similar to the CP
- * (ring buffer, IBs, etc.), but sDMA has it's own
- * packet format that is different from the PM4 format
- * used by the CP. sDMA supports copying data, writing
- * embedded data, solid fills, and a number of other
- * things. It also has support for tiling/detiling of
- * buffers.
- */
-/**
- * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (CIK).
- */
-void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
- u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 5;
- while ((next_rptr & 7) != 4)
- next_rptr++;
- next_rptr += 4;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, next_rptr);
- }
-
- /* IB packet must end on a 8 DW boundary */
- while ((ring->wptr & 7) != 4)
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
- radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
- radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, ib->length_dw);
-
-}
-
-/**
- * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (CIK).
- */
-void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (fence->ring == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- /* write the fence */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-}
-
-/**
- * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (CIK).
- */
-void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
- radeon_ring_write(ring, addr & 0xfffffff8);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
-}
-
-/**
- * cik_sdma_gfx_stop - stop the gfx async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the gfx async dma ring buffers (CIK).
- */
-static void cik_sdma_gfx_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl, reg_offset;
- int i;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
- rb_cntl &= ~SDMA_RB_ENABLE;
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
- }
-}
-
-/**
- * cik_sdma_rlc_stop - stop the compute async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the compute async dma queues (CIK).
- */
-static void cik_sdma_rlc_stop(struct radeon_device *rdev)
-{
- /* XXX todo */
-}
-
-/**
- * cik_sdma_enable - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- * @enable: enable/disable the DMA MEs.
- *
- * Halt or unhalt the async dma engines (CIK).
- */
-static void cik_sdma_enable(struct radeon_device *rdev, bool enable)
-{
- u32 me_cntl, reg_offset;
- int i;
-
- for (i = 0; i < 2; i++) {
- if (i == 0)
- reg_offset = SDMA0_REGISTER_OFFSET;
- else
- reg_offset = SDMA1_REGISTER_OFFSET;
- me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
- if (enable)
- me_cntl &= ~SDMA_HALT;
- else
- me_cntl |= SDMA_HALT;
- WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
- }
-}
-
-/**
- * cik_sdma_gfx_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the gfx DMA ring buffers and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_gfx_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = SDMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = SDMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
- WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
- WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
-
- ring->wptr = 0;
- WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
-
- /* enable DMA RB */
- WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
-
- ib_cntl = SDMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= SDMA_IB_SWAP_ENABLE;
-#endif
- /* enable DMA IBs */
- WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cik_sdma_rlc_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the compute DMA queues and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_rlc_resume(struct radeon_device *rdev)
-{
- /* XXX todo */
- return 0;
-}
-
-/**
- * cik_sdma_load_microcode - load the sDMA ME ucode
- *
- * @rdev: radeon_device pointer
- *
- * Loads the sDMA0/1 ucode.
- * Returns 0 for success, -EINVAL if the ucode is not available.
- */
-static int cik_sdma_load_microcode(struct radeon_device *rdev)
-{
- const __be32 *fw_data;
- int i;
-
- if (!rdev->sdma_fw)
- return -EINVAL;
-
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
-
- /* sdma0 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- /* sdma1 */
- fw_data = (const __be32 *)rdev->sdma_fw->data;
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
- WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
-
- WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
- WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
- return 0;
-}
-
-/**
- * cik_sdma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA engines and enable them (CIK).
- * Returns 0 for success, error for failure.
- */
-static int cik_sdma_resume(struct radeon_device *rdev)
-{
- int r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
- RREG32(SRBM_SOFT_RESET);
-
- r = cik_sdma_load_microcode(rdev);
- if (r)
- return r;
-
- /* unhalt the MEs */
- cik_sdma_enable(rdev, true);
-
- /* start the gfx rings and rlc compute queues */
- r = cik_sdma_gfx_resume(rdev);
- if (r)
- return r;
- r = cik_sdma_rlc_resume(rdev);
- if (r)
- return r;
-
- return 0;
-}
-
-/**
- * cik_sdma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (CIK).
- */
-static void cik_sdma_fini(struct radeon_device *rdev)
-{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
- /* halt the MEs */
- cik_sdma_enable(rdev, false);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
- /* XXX - compute dma queue tear down */
-}
-
-/**
- * cik_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (CIK).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int cik_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0x1fffff)
- cur_size_in_bytes = 0x1fffff;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, cur_size_in_bytes);
- radeon_ring_write(ring, 0); /* src/dst endian swap */
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
-/**
- * cik_sdma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (CIK).
- * Returns 0 for success, error for failure.
- */
-int cik_sdma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
- radeon_ring_write(ring, 1); /* number of DWs to follow */
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-/**
- * cik_sdma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (CIK).
- * Returns 0 on success, error on failure.
- */
-int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
- ib.ptr[3] = 1;
- ib.ptr[4] = 0xDEADBEEF;
- ib.length_dw = 5;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-
static void cik_print_gpu_status_regs(struct radeon_device *rdev)
{
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@@ -3785,7 +4076,7 @@ static void cik_print_gpu_status_regs(struct radeon_device *rdev)
* mask to be used by cik_gpu_soft_reset().
* Returns a mask of the blocks to be reset.
*/
-static u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -4036,34 +4327,6 @@ bool cik_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cik_sdma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up (CIK).
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cik_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
/**
* cik_mc_program - program the GPU memory controller
@@ -4320,6 +4583,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SH_MEM regs */
/* where to put LDS, scratch, GPUVM in FSA64 space */
+ mutex_lock(&rdev->srbm_mutex);
for (i = 0; i < 16; i++) {
cik_srbm_select(rdev, 0, 0, 0, i);
/* CP and shaders */
@@ -4335,6 +4599,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
/* XXX SDMA RLC - todo */
}
cik_srbm_select(rdev, 0, 0, 0, 0);
+ mutex_unlock(&rdev->srbm_mutex);
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -4598,131 +4863,8 @@ void cik_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = ndw;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count;
- if (ndw > 0x7FFFF)
- ndw = 0x7FFFF;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe);
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = ndw; /* number of entries */
- pe += ndw * 8;
- addr += ndw * incr;
- count -= ndw;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
- }
-}
-
-/**
- * cik_dma_vm_flush - cik vm flush using sDMA
- *
- * @rdev: radeon_device pointer
- *
- * Update the page table base and flush the VM TLB
- * using sDMA (CIK).
- */
-void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
- u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
- SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
- u32 ref_and_mask;
-
- if (vm == NULL)
- return;
-
- if (ridx == R600_RING_TYPE_DMA_INDEX)
- ref_and_mask = SDMA0;
- else
- ref_and_mask = SDMA1;
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- if (vm->id < 8) {
- radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
- } else {
- radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ cik_sdma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* update SH_MEM_* regs */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(vm->id));
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_BASES >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
- radeon_ring_write(ring, VMID(0));
-
- /* flush HDP */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
- radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
- radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
- radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
- radeon_ring_write(ring, ref_and_mask); /* MASK */
- radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
-
- /* flush TLB */
- radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
- radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
- radeon_ring_write(ring, 1 << vm->id);
}
/*
@@ -4731,31 +4873,34 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
* variety of functions, the most important of which is
* the interrupt controller.
*/
-/**
- * cik_rlc_stop - stop the RLC ME
- *
- * @rdev: radeon_device pointer
- *
- * Halt the RLC ME (MicroEngine) (CIK).
- */
-static void cik_rlc_stop(struct radeon_device *rdev)
+static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
+ bool enable)
{
- int i, j, k;
- u32 mask, tmp;
+ u32 tmp = RREG32(CP_INT_CNTL_RING0);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (enable)
+ tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ else
+ tmp &= ~(CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING0, tmp);
+}
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
- RREG32(CB_CGTT_SCLK_CTRL);
+static void cik_enable_lbpw(struct radeon_device *rdev, bool enable)
+{
+ u32 tmp;
- tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
- WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+ tmp = RREG32(RLC_LB_CNTL);
+ if (enable)
+ tmp |= LOAD_BALANCE_ENABLE;
+ else
+ tmp &= ~LOAD_BALANCE_ENABLE;
+ WREG32(RLC_LB_CNTL, tmp);
+}
- WREG32(RLC_CNTL, 0);
+static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
+{
+ u32 i, j, k;
+ u32 mask;
for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
@@ -4777,6 +4922,84 @@ static void cik_rlc_stop(struct radeon_device *rdev)
}
}
+static void cik_update_rlc(struct radeon_device *rdev, u32 rlc)
+{
+ u32 tmp;
+
+ tmp = RREG32(RLC_CNTL);
+ if (tmp != rlc)
+ WREG32(RLC_CNTL, rlc);
+}
+
+static u32 cik_halt_rlc(struct radeon_device *rdev)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_CNTL);
+
+ if (data & RLC_ENABLE) {
+ u32 i;
+
+ data &= ~RLC_ENABLE;
+ WREG32(RLC_CNTL, data);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & RLC_GPM_BUSY) == 0)
+ break;
+ udelay(1);
+ }
+
+ cik_wait_for_rlc_serdes(rdev);
+ }
+
+ return orig;
+}
+
+void cik_enter_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp, i, mask;
+
+ tmp = REQ | MESSAGE(MSG_ENTER_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+
+ mask = GFX_POWER_STATUS | GFX_CLOCK_STATUS;
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPM_STAT) & mask) == mask)
+ break;
+ udelay(1);
+ }
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(RLC_GPR_REG2) & REQ) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+void cik_exit_rlc_safe_mode(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = REQ | MESSAGE(MSG_EXIT_RLC_SAFE_MODE);
+ WREG32(RLC_GPR_REG2, tmp);
+}
+
+/**
+ * cik_rlc_stop - stop the RLC ME
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Halt the RLC ME (MicroEngine) (CIK).
+ */
+static void cik_rlc_stop(struct radeon_device *rdev)
+{
+ WREG32(RLC_CNTL, 0);
+
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ cik_wait_for_rlc_serdes(rdev);
+}
+
/**
* cik_rlc_start - start the RLC ME
*
@@ -4786,13 +5009,9 @@ static void cik_rlc_stop(struct radeon_device *rdev)
*/
static void cik_rlc_start(struct radeon_device *rdev)
{
- u32 tmp;
-
WREG32(RLC_CNTL, RLC_ENABLE);
- tmp = RREG32(CP_INT_CNTL_RING0);
- tmp |= (CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
- WREG32(CP_INT_CNTL_RING0, tmp);
+ cik_enable_gui_idle_interrupt(rdev, true);
udelay(50);
}
@@ -4808,8 +5027,7 @@ static void cik_rlc_start(struct radeon_device *rdev)
*/
static int cik_rlc_resume(struct radeon_device *rdev)
{
- u32 i, size;
- u32 clear_state_info[3];
+ u32 i, size, tmp;
const __be32 *fw_data;
if (!rdev->rlc_fw)
@@ -4830,12 +5048,15 @@ static int cik_rlc_resume(struct radeon_device *rdev)
cik_rlc_stop(rdev);
- WREG32(GRBM_SOFT_RESET, SOFT_RESET_RLC);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
- udelay(50);
+ /* disable CG */
+ tmp = RREG32(RLC_CGCG_CGLS_CTRL) & 0xfffffffc;
+ WREG32(RLC_CGCG_CGLS_CTRL, tmp);
+
+ si_rlc_reset(rdev);
+
+ cik_init_pg(rdev);
+
+ cik_init_cg(rdev);
WREG32(RLC_LB_CNTR_INIT, 0);
WREG32(RLC_LB_CNTR_MAX, 0x00008000);
@@ -4854,20 +5075,757 @@ static int cik_rlc_resume(struct radeon_device *rdev)
WREG32(RLC_GPM_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(RLC_GPM_UCODE_ADDR, 0);
- /* XXX */
- clear_state_info[0] = 0;//upper_32_bits(rdev->rlc.save_restore_gpu_addr);
- clear_state_info[1] = 0;//rdev->rlc.save_restore_gpu_addr;
- clear_state_info[2] = 0;//cik_default_size;
- WREG32(RLC_GPM_SCRATCH_ADDR, 0x3d);
- for (i = 0; i < 3; i++)
- WREG32(RLC_GPM_SCRATCH_DATA, clear_state_info[i]);
- WREG32(RLC_DRIVER_DMA_STATUS, 0);
+ /* XXX - find out what chips support lbpw */
+ cik_enable_lbpw(rdev, false);
+
+ if (rdev->family == CHIP_BONAIRE)
+ WREG32(RLC_DRIVER_DMA_STATUS, 0);
cik_rlc_start(rdev);
return 0;
}
+static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp, tmp2;
+
+ orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ cik_enable_gui_idle_interrupt(rdev, true);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
+ WREG32(RLC_SERDES_WR_CTRL, tmp2);
+
+ cik_update_rlc(rdev, tmp);
+
+ data |= CGCG_EN | CGLS_EN;
+ } else {
+ cik_enable_gui_idle_interrupt(rdev, false);
+
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+ RREG32(CB_CGTT_SCLK_CTRL);
+
+ data &= ~(CGCG_EN | CGLS_EN);
+ }
+
+ if (orig != data)
+ WREG32(RLC_CGCG_CGLS_CTRL, data);
+
+}
+
+static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig, tmp = 0;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) {
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+ }
+
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data &= 0xfffffffd;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS) {
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data &= ~SM_MODE_MASK;
+ data |= SM_MODE(0x2);
+ data |= SM_MODE_ENABLE;
+ data &= ~CGTS_OVERRIDE;
+ if ((rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGLS) &&
+ (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGTS_LS))
+ data &= ~CGTS_LS_OVERRIDE;
+ data &= ~ON_MONITOR_ADD_MASK;
+ data |= ON_MONITOR_ADD_EN;
+ data |= ON_MONITOR_ADD(0x96);
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+ }
+ } else {
+ orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
+ data |= 0x00000002;
+ if (orig != data)
+ WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
+
+ data = RREG32(RLC_MEM_SLP_CNTL);
+ if (data & RLC_MEM_LS_EN) {
+ data &= ~RLC_MEM_LS_EN;
+ WREG32(RLC_MEM_SLP_CNTL, data);
+ }
+
+ data = RREG32(CP_MEM_SLP_CNTL);
+ if (data & CP_MEM_LS_EN) {
+ data &= ~CP_MEM_LS_EN;
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
+
+ orig = data = RREG32(CGTS_SM_CTRL_REG);
+ data |= CGTS_OVERRIDE | CGTS_LS_OVERRIDE;
+ if (orig != data)
+ WREG32(CGTS_SM_CTRL_REG, data);
+
+ tmp = cik_halt_rlc(rdev);
+
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+ WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
+ WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
+ data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
+ WREG32(RLC_SERDES_WR_CTRL, data);
+
+ cik_update_rlc(rdev, tmp);
+ }
+}
+
+static const u32 mc_cg_registers[] =
+{
+ MC_HUB_MISC_HUB_CG,
+ MC_HUB_MISC_SIP_CG,
+ MC_HUB_MISC_VM_CG,
+ MC_XPB_CLK_GAT,
+ ATC_MISC_CG,
+ MC_CITF_MISC_WR_CG,
+ MC_CITF_MISC_RD_CG,
+ MC_CITF_MISC_VM_CG,
+ VM_L2_CG,
+};
+
+static void cik_enable_mc_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
+ data |= MC_LS_ENABLE;
+ else
+ data &= ~MC_LS_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ int i;
+ u32 orig, data;
+
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
+ }
+}
+
+static void cik_enable_sdma_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
+ } else {
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET);
+ data |= 0xff000000;
+ if (data != orig)
+ WREG32(SDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_sdma_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_LS)) {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data |= 0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ } else {
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET, data);
+
+ orig = data = RREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET);
+ data &= ~0x100;
+ if (orig != data)
+ WREG32(SDMA0_POWER_CNTL + SDMA1_REGISTER_OFFSET, data);
+ }
+}
+
+static void cik_enable_uvd_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data = 0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data |= DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ } else {
+ data = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
+ data &= ~0xfff;
+ WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, data);
+
+ orig = data = RREG32(UVD_CGC_CTRL);
+ data &= ~DCM;
+ if (orig != data)
+ WREG32(UVD_CGC_CTRL, data);
+ }
+}
+
+static void cik_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+}
+
+static void cik_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
+}
+
+static void cik_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 orig, data;
+
+ orig = data = RREG32(HDP_MEM_POWER_LS);
+
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
+
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
+}
+
+void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
+{
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ cik_enable_mgcg(rdev, true);
+ cik_enable_cgcg(rdev, true);
+ } else {
+ cik_enable_cgcg(rdev, false);
+ cik_enable_mgcg(rdev, false);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_MC) {
+ if (!(rdev->flags & RADEON_IS_IGP)) {
+ cik_enable_mc_mgcg(rdev, enable);
+ cik_enable_mc_ls(rdev, enable);
+ }
+ }
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ cik_enable_sdma_mgcg(rdev, enable);
+ cik_enable_sdma_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_BIF) {
+ cik_enable_bif_mgls(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd)
+ cik_enable_uvd_mgcg(rdev, enable);
+ }
+
+ if (block & RADEON_CG_BLOCK_HDP) {
+ cik_enable_hdp_mgcg(rdev, enable);
+ cik_enable_hdp_ls(rdev, enable);
+ }
+}
+
+static void cik_init_cg(struct radeon_device *rdev)
+{
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, true);
+
+ if (rdev->has_uvd)
+ si_init_uvd_internal_cg(rdev);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+}
+
+static void cik_fini_cg(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
+ cik_update_cg(rdev, RADEON_CG_BLOCK_GFX, false);
+}
+
+static void cik_enable_sck_slowdown_on_pu(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PU_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_sck_slowdown_on_pd(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_RLC_SMU_HS))
+ data |= SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ else
+ data &= ~SMU_CLK_SLOWDOWN_ON_PD_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_cp_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_CP))
+ data &= ~DISABLE_CP_PG;
+ else
+ data |= DISABLE_CP_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gds_pg(struct radeon_device *rdev, bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GDS))
+ data &= ~DISABLE_GDS_PG;
+ else
+ data |= DISABLE_GDS_PG;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define CP_ME_TABLE_SIZE 96
+#define CP_ME_TABLE_OFFSET 2048
+#define CP_MEC_TABLE_OFFSET 4096
+
+void cik_init_cp_pg_table(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ volatile u32 *dst_ptr;
+ int me, i, max_me = 4;
+ u32 bo_offset = 0;
+ u32 table_offset;
+
+ if (rdev->family == CHIP_KAVERI)
+ max_me = 5;
+
+ if (rdev->rlc.cp_table_ptr == NULL)
+ return;
+
+ /* write the cp table buffer */
+ dst_ptr = rdev->rlc.cp_table_ptr;
+ for (me = 0; me < max_me; me++) {
+ if (me == 0) {
+ fw_data = (const __be32 *)rdev->ce_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 1) {
+ fw_data = (const __be32 *)rdev->pfp_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else if (me == 2) {
+ fw_data = (const __be32 *)rdev->me_fw->data;
+ table_offset = CP_ME_TABLE_OFFSET;
+ } else {
+ fw_data = (const __be32 *)rdev->mec_fw->data;
+ table_offset = CP_MEC_TABLE_OFFSET;
+ }
+
+ for (i = 0; i < CP_ME_TABLE_SIZE; i ++) {
+ dst_ptr[bo_offset + i] = be32_to_cpu(fw_data[table_offset + i]);
+ }
+ bo_offset += CP_ME_TABLE_SIZE;
+ }
+}
+
+static void cik_enable_gfx_cgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data |= AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+ } else {
+ orig = data = RREG32(RLC_PG_CNTL);
+ data &= ~GFX_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ orig = data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~AUTO_PG_EN;
+ if (orig != data)
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+ data = RREG32(DB_RENDER_CONTROL);
+ }
+}
+
+static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
+{
+ u32 mask = 0, tmp, tmp1;
+ int i;
+
+ cik_select_se_sh(rdev, se, sh);
+ tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
+ tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
+ cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
+
+ tmp &= 0xffff0000;
+
+ tmp |= tmp1;
+ tmp >>= 16;
+
+ for (i = 0; i < rdev->config.cik.max_cu_per_sh; i ++) {
+ mask <<= 1;
+ mask |= 1;
+ }
+
+ return (~tmp) & mask;
+}
+
+static void cik_init_ao_cu_mask(struct radeon_device *rdev)
+{
+ u32 i, j, k, active_cu_number = 0;
+ u32 mask, counter, cu_bitmap;
+ u32 tmp = 0;
+
+ for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
+ for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
+ mask = 1;
+ cu_bitmap = 0;
+ counter = 0;
+ for (k = 0; k < rdev->config.cik.max_cu_per_sh; k ++) {
+ if (cik_get_cu_active_bitmap(rdev, i, j) & mask) {
+ if (counter < 2)
+ cu_bitmap |= mask;
+ counter ++;
+ }
+ mask <<= 1;
+ }
+
+ active_cu_number += counter;
+ tmp |= (cu_bitmap << (i * 16 + j * 8));
+ }
+ }
+
+ WREG32(RLC_PG_AO_CU_MASK, tmp);
+
+ tmp = RREG32(RLC_MAX_PG_CU);
+ tmp &= ~MAX_PU_CU_MASK;
+ tmp |= MAX_PU_CU(active_cu_number);
+ WREG32(RLC_MAX_PG_CU, tmp);
+}
+
+static void cik_enable_gfx_static_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_SMG))
+ data |= STATIC_PER_CU_PG_ENABLE;
+ else
+ data &= ~STATIC_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+static void cik_enable_gfx_dynamic_mgpg(struct radeon_device *rdev,
+ bool enable)
+{
+ u32 data, orig;
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_DMG))
+ data |= DYN_PER_CU_PG_ENABLE;
+ else
+ data &= ~DYN_PER_CU_PG_ENABLE;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+}
+
+#define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
+#define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
+
+static void cik_init_gfx_cgpg(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ u32 i;
+
+ if (rdev->rlc.cs_data) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ WREG32(RLC_GPM_SCRATCH_DATA, upper_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, lower_32_bits(rdev->rlc.clear_state_gpu_addr));
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.clear_state_size);
+ } else {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET);
+ for (i = 0; i < 3; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, 0);
+ }
+ if (rdev->rlc.reg_list) {
+ WREG32(RLC_GPM_SCRATCH_ADDR, RLC_SAVE_AND_RESTORE_STARTING_OFFSET);
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ WREG32(RLC_GPM_SCRATCH_DATA, rdev->rlc.reg_list[i]);
+ }
+
+ orig = data = RREG32(RLC_PG_CNTL);
+ data |= GFX_PG_SRC;
+ if (orig != data)
+ WREG32(RLC_PG_CNTL, data);
+
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CP_TABLE_RESTORE, rdev->rlc.cp_table_gpu_addr >> 8);
+
+ data = RREG32(CP_RB_WPTR_POLL_CNTL);
+ data &= ~IDLE_POLL_COUNT_MASK;
+ data |= IDLE_POLL_COUNT(0x60);
+ WREG32(CP_RB_WPTR_POLL_CNTL, data);
+
+ data = 0x10101010;
+ WREG32(RLC_PG_DELAY, data);
+
+ data = RREG32(RLC_PG_DELAY_2);
+ data &= ~0xff;
+ data |= 0x3;
+ WREG32(RLC_PG_DELAY_2, data);
+
+ data = RREG32(RLC_AUTO_PG_CTRL);
+ data &= ~GRBM_REG_SGIT_MASK;
+ data |= GRBM_REG_SGIT(0x700);
+ WREG32(RLC_AUTO_PG_CTRL, data);
+
+}
+
+static void cik_update_gfx_pg(struct radeon_device *rdev, bool enable)
+{
+ cik_enable_gfx_cgpg(rdev, enable);
+ cik_enable_gfx_static_mgpg(rdev, enable);
+ cik_enable_gfx_dynamic_mgpg(rdev, enable);
+}
+
+u32 cik_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
+ }
+ /* pa_sc_raster_config/pa_sc_raster_config1 */
+ count += 4;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
+ }
+ }
+ }
+
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_BONAIRE:
+ buffer[count++] = 0x16000012;
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KAVERI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ case CHIP_KABINI:
+ buffer[count++] = 0x00000000; /* XXX */
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ buffer[count++] = 0x00000000;
+ break;
+ }
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
+}
+
+static void cik_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_enable_sck_slowdown_on_pu(rdev, true);
+ cik_enable_sck_slowdown_on_pd(rdev, true);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_init_gfx_cgpg(rdev);
+ cik_enable_cp_pg(rdev, true);
+ cik_enable_gds_pg(rdev, true);
+ }
+ cik_init_ao_cu_mask(rdev);
+ cik_update_gfx_pg(rdev, true);
+ }
+}
+
+static void cik_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ cik_update_gfx_pg(rdev, false);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ cik_enable_cp_pg(rdev, false);
+ cik_enable_gds_pg(rdev, false);
+ }
+ }
+}
+
/*
* Interrupts
* Starting with r6xx, interrupts are handled via a ring buffer.
@@ -5030,7 +5988,7 @@ static int cik_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -5086,6 +6044,7 @@ int cik_irq_set(struct radeon_device *rdev)
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 dma_cntl, dma_cntl1;
+ u32 thermal_int;
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
@@ -5118,6 +6077,13 @@ int cik_irq_set(struct radeon_device *rdev)
cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
+ ~(THERM_INTH_MASK | THERM_INTL_MASK);
+ else
+ thermal_int = RREG32_SMC(CG_THERMAL_INT) &
+ ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
+
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("cik_irq_set: sw int gfx\n");
@@ -5275,6 +6241,14 @@ int cik_irq_set(struct radeon_device *rdev)
hpd6 |= DC_HPDx_INT_EN;
}
+ if (rdev->irq.dpm_thermal) {
+ DRM_DEBUG("dpm thermal\n");
+ if (rdev->flags & RADEON_IS_IGP)
+ thermal_int |= THERM_INTH_MASK | THERM_INTL_MASK;
+ else
+ thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
+ }
+
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(SDMA0_CNTL + SDMA0_REGISTER_OFFSET, dma_cntl);
@@ -5309,6 +6283,11 @@ int cik_irq_set(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
+ if (rdev->flags & RADEON_IS_IGP)
+ WREG32_SMC(CG_THERMAL_INT_CTRL, thermal_int);
+ else
+ WREG32_SMC(CG_THERMAL_INT, thermal_int);
+
return 0;
}
@@ -5520,6 +6499,7 @@ int cik_irq_process(struct radeon_device *rdev)
bool queue_hotplug = false;
bool queue_reset = false;
u32 addr, status, mc_client;
+ bool queue_thermal = false;
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
@@ -5753,6 +6733,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -5870,6 +6854,19 @@ restart_ih:
break;
}
break;
+ case 230: /* thermal low to high */
+ DRM_DEBUG("IH: thermal low to high\n");
+ rdev->pm.dpm.thermal.high_to_low = false;
+ queue_thermal = true;
+ break;
+ case 231: /* thermal high to low */
+ DRM_DEBUG("IH: thermal high to low\n");
+ rdev->pm.dpm.thermal.high_to_low = true;
+ queue_thermal = true;
+ break;
+ case 233: /* GUI IDLE */
+ DRM_DEBUG("IH: GUI idle\n");
+ break;
case 241: /* SDMA Privileged inst */
case 247: /* SDMA Privileged inst */
DRM_ERROR("Illegal instruction in SDMA command stream\n");
@@ -5909,9 +6906,6 @@ restart_ih:
break;
}
break;
- case 233: /* GUI IDLE */
- DRM_DEBUG("IH: GUI idle\n");
- break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
@@ -5925,6 +6919,8 @@ restart_ih:
schedule_work(&rdev->hotplug_work);
if (queue_reset)
schedule_work(&rdev->reset_work);
+ if (queue_thermal)
+ schedule_work(&rdev->pm.dpm.thermal.work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
@@ -5954,6 +6950,18 @@ static int cik_startup(struct radeon_device *rdev)
struct radeon_ring *ring;
int r;
+ /* enable pcie gen2/3 link */
+ cik_pcie_gen3_enable(rdev);
+ /* enable aspm */
+ cik_program_aspm(rdev);
+
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ cik_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) {
@@ -5981,18 +6989,26 @@ static int cik_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- cik_mc_program(rdev);
r = cik_pcie_gart_enable(rdev);
if (r)
return r;
cik_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->flags & RADEON_IS_IGP) {
+ if (rdev->family == CHIP_KAVERI) {
+ rdev->rlc.reg_list = spectre_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(spectre_rlc_save_restore_register_list);
+ } else {
+ rdev->rlc.reg_list = kalindi_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(kalindi_rlc_save_restore_register_list);
+ }
+ }
+ rdev->rlc.cs_data = ci_cs_data;
+ rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6040,12 +7056,15 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
- r = cik_uvd_resume(rdev);
+ r = radeon_uvd_resume(rdev);
if (!r) {
- r = radeon_fence_driver_start_ring(rdev,
- R600_RING_TYPE_UVD_INDEX);
- if (r)
- dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ r = uvd_v4_2_resume(rdev);
+ if (!r) {
+ r = radeon_fence_driver_start_ring(rdev,
+ R600_RING_TYPE_UVD_INDEX);
+ if (r)
+ dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
+ }
}
if (r)
rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
@@ -6068,7 +7087,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6077,7 +7096,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xfffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
ring->me = 1; /* first MEC */
@@ -6089,7 +7108,7 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_HQD_PQ_RPTR, CP_HQD_PQ_WPTR,
- 0, 0xffffffff, PACKET3(PACKET3_NOP, 0x3FFF));
+ PACKET3(PACKET3_NOP, 0x3FFF));
if (r)
return r;
/* dGPU only have 1 MEC */
@@ -6102,7 +7121,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA0_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA0_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6110,7 +7129,7 @@ static int cik_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
SDMA0_GFX_RB_RPTR + SDMA1_REGISTER_OFFSET,
SDMA0_GFX_RB_WPTR + SDMA1_REGISTER_OFFSET,
- 2, 0xfffffffc, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
if (r)
return r;
@@ -6124,12 +7143,11 @@ static int cik_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6146,6 +7164,10 @@ static int cik_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6191,11 +7213,14 @@ int cik_resume(struct radeon_device *rdev)
*/
int cik_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cik_cp_enable(rdev, false);
cik_sdma_enable(rdev, false);
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_suspend(rdev);
radeon_wb_disable(rdev);
cik_pcie_gart_disable(rdev);
@@ -6316,7 +7341,7 @@ int cik_init(struct radeon_device *rdev)
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
@@ -6351,13 +7376,16 @@ void cik_fini(struct radeon_device *rdev)
{
cik_cp_fini(rdev);
cik_sdma_fini(rdev);
+ cik_fini_pg(rdev);
+ cik_fini_cg(rdev);
cik_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
cik_mec_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cik_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -6386,8 +7414,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode)
{
- u32 tmp;
-
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 6 line buffers, one for each display controllers.
@@ -6397,22 +7425,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
* them using the stereo blender.
*/
if (radeon_crtc->base.enabled && mode) {
- if (mode->crtc_hdisplay < 1920)
+ if (mode->crtc_hdisplay < 1920) {
tmp = 1;
- else if (mode->crtc_hdisplay < 2560)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 2560) {
tmp = 2;
- else if (mode->crtc_hdisplay < 4096)
+ buffer_alloc = 2;
+ } else if (mode->crtc_hdisplay < 4096) {
tmp = 0;
- else {
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
+ } else {
DRM_DEBUG_KMS("Mode too big for LB!\n");
tmp = 0;
+ buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
}
- } else
+ } else {
tmp = 1;
+ buffer_alloc = 0;
+ }
WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -6814,7 +7857,7 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
- struct dce8_wm_params wm;
+ struct dce8_wm_params wm_low, wm_high;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
@@ -6824,35 +7867,82 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
- wm.yclk = rdev->pm.current_mclk * 10;
- wm.sclk = rdev->pm.current_sclk * 10;
- wm.disp_clk = mode->clock;
- wm.src_width = mode->crtc_hdisplay;
- wm.active_time = mode->crtc_hdisplay * pixel_period;
- wm.blank_time = line_time - wm.active_time;
- wm.interlaced = false;
+ /* watermark for high clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_high.yclk =
+ radeon_dpm_get_mclk(rdev, false) * 10;
+ wm_high.sclk =
+ radeon_dpm_get_sclk(rdev, false) * 10;
+ } else {
+ wm_high.yclk = rdev->pm.current_mclk * 10;
+ wm_high.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_high.disp_clk = mode->clock;
+ wm_high.src_width = mode->crtc_hdisplay;
+ wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.blank_time = line_time - wm_high.active_time;
+ wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
- wm.interlaced = true;
- wm.vsc = radeon_crtc->vsc;
- wm.vtaps = 1;
+ wm_high.interlaced = true;
+ wm_high.vsc = radeon_crtc->vsc;
+ wm_high.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
- wm.vtaps = 2;
- wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
- wm.lb_size = lb_size;
- wm.dram_channels = cik_get_number_of_dram_channels(rdev);
- wm.num_heads = num_heads;
+ wm_high.vtaps = 2;
+ wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_high.lb_size = lb_size;
+ wm_high.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_high.num_heads = num_heads;
/* set for high clocks */
- latency_watermark_a = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_a = min(dce8_latency_watermark(&wm_high), (u32)65535);
+
+ /* possibly force display priority to high */
+ /* should really do this at mode validation time... */
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_high) ||
+ !dce8_check_latency_hiding(&wm_high) ||
+ (rdev->disp_priority == 2)) {
+ DRM_DEBUG_KMS("force priority to high\n");
+ }
+
+ /* watermark for low clocks */
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) &&
+ rdev->pm.dpm_enabled) {
+ wm_low.yclk =
+ radeon_dpm_get_mclk(rdev, true) * 10;
+ wm_low.sclk =
+ radeon_dpm_get_sclk(rdev, true) * 10;
+ } else {
+ wm_low.yclk = rdev->pm.current_mclk * 10;
+ wm_low.sclk = rdev->pm.current_sclk * 10;
+ }
+
+ wm_low.disp_clk = mode->clock;
+ wm_low.src_width = mode->crtc_hdisplay;
+ wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.blank_time = line_time - wm_low.active_time;
+ wm_low.interlaced = false;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ wm_low.interlaced = true;
+ wm_low.vsc = radeon_crtc->vsc;
+ wm_low.vtaps = 1;
+ if (radeon_crtc->rmx_type != RMX_OFF)
+ wm_low.vtaps = 2;
+ wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */
+ wm_low.lb_size = lb_size;
+ wm_low.dram_channels = cik_get_number_of_dram_channels(rdev);
+ wm_low.num_heads = num_heads;
+
/* set for low clocks */
- /* wm.yclk = low clk; wm.sclk = low clk */
- latency_watermark_b = min(dce8_latency_watermark(&wm), (u32)65535);
+ latency_watermark_b = min(dce8_latency_watermark(&wm_low), (u32)65535);
/* possibly force display priority to high */
/* should really do this at mode validation time... */
- if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
- !dce8_average_bandwidth_vs_available_bandwidth(&wm) ||
- !dce8_check_latency_hiding(&wm) ||
+ if (!dce8_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) ||
+ !dce8_average_bandwidth_vs_available_bandwidth(&wm_low) ||
+ !dce8_check_latency_hiding(&wm_low) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
}
@@ -6877,6 +7967,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(DPG_WATERMARK_MASK_CONTROL + radeon_crtc->crtc_offset, wm_mask);
+
+ /* save values for DPM */
+ radeon_crtc->line_time = line_time;
+ radeon_crtc->wm_high = latency_watermark_a;
+ radeon_crtc->wm_low = latency_watermark_b;
}
/**
@@ -6966,39 +8061,307 @@ int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
return r;
}
-int cik_uvd_resume(struct radeon_device *rdev)
+static void cik_pcie_gen3_enable(struct radeon_device *rdev)
{
- uint64_t addr;
- uint32_t size;
- int r;
+ struct pci_dev *root = rdev->pdev->bus->self;
+ int bridge_pos, gpu_pos;
+ u32 speed_cntl, mask, current_data_rate;
+ int ret, i;
+ u16 tmp16;
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
+ if (radeon_pcie_gen2 == 0)
+ return;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
+ if (ret != 0)
+ return;
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
+ if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
+ return;
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ current_data_rate = (speed_cntl & LC_CURRENT_DATA_RATE_MASK) >>
+ LC_CURRENT_DATA_RATE_SHIFT;
+ if (mask & DRM_PCIE_SPEED_80) {
+ if (current_data_rate == 2) {
+ DRM_INFO("PCIE gen 3 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 3 link speeds, disable with radeon.pcie_gen2=0\n");
+ } else if (mask & DRM_PCIE_SPEED_50) {
+ if (current_data_rate == 1) {
+ DRM_INFO("PCIE gen 2 link speeds already enabled\n");
+ return;
+ }
+ DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
+ }
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
+ bridge_pos = pci_pcie_cap(root);
+ if (!bridge_pos)
+ return;
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+ gpu_pos = pci_pcie_cap(rdev->pdev);
+ if (!gpu_pos)
+ return;
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+ if (mask & DRM_PCIE_SPEED_80) {
+ /* re-try equalization if gen3 is not already enabled */
+ if (current_data_rate != 2) {
+ u16 bridge_cfg, gpu_cfg;
+ u16 bridge_cfg2, gpu_cfg2;
+ u32 max_lw, current_lw, tmp;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ tmp16 = bridge_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp16 = gpu_cfg | PCI_EXP_LNKCTL_HAWD;
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ max_lw = (tmp & LC_DETECTED_LINK_WIDTH_MASK) >> LC_DETECTED_LINK_WIDTH_SHIFT;
+ current_lw = (tmp & LC_OPERATING_LINK_WIDTH_MASK) >> LC_OPERATING_LINK_WIDTH_SHIFT;
+
+ if (current_lw < max_lw) {
+ tmp = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ if (tmp & LC_RENEGOTIATION_SUPPORT) {
+ tmp &= ~(LC_LINK_WIDTH_MASK | LC_UPCONFIGURE_DIS);
+ tmp |= (max_lw << LC_LINK_WIDTH_SHIFT);
+ tmp |= LC_UPCONFIGURE_SUPPORT | LC_RENEGOTIATE_EN | LC_RECONFIG_NOW;
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, tmp);
+ }
+ }
- return 0;
+ for (i = 0; i < 10; i++) {
+ /* check status */
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_DEVSTA, &tmp16);
+ if (tmp16 & PCI_EXP_DEVSTA_TRPND)
+ break;
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &bridge_cfg);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &gpu_cfg);
+
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &bridge_cfg2);
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &gpu_cfg2);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp |= LC_REDO_EQ;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+
+ mdelay(100);
+
+ /* linkctl */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (bridge_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, &tmp16);
+ tmp16 &= ~PCI_EXP_LNKCTL_HAWD;
+ tmp16 |= (gpu_cfg & PCI_EXP_LNKCTL_HAWD);
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL, tmp16);
+
+ /* linkctl2 */
+ pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (bridge_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(root, bridge_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~((1 << 4) | (7 << 9));
+ tmp16 |= (gpu_cfg2 & ((1 << 4) | (7 << 9)));
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL4);
+ tmp &= ~LC_SET_QUIESCE;
+ WREG32_PCIE_PORT(PCIE_LC_CNTL4, tmp);
+ }
+ }
+ }
+
+ /* set the link speed */
+ speed_cntl |= LC_FORCE_EN_SW_SPEED_CHANGE | LC_FORCE_DIS_HW_SPEED_CHANGE;
+ speed_cntl &= ~LC_FORCE_DIS_SW_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ pci_read_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
+ tmp16 &= ~0xf;
+ if (mask & DRM_PCIE_SPEED_80)
+ tmp16 |= 3; /* gen3 */
+ else if (mask & DRM_PCIE_SPEED_50)
+ tmp16 |= 2; /* gen2 */
+ else
+ tmp16 |= 1; /* gen1 */
+ pci_write_config_word(rdev->pdev, gpu_pos + PCI_EXP_LNKCTL2, tmp16);
+
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ speed_cntl |= LC_INITIATE_LINK_SPEED_CHANGE;
+ WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, speed_cntl);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
+ if ((speed_cntl & LC_INITIATE_LINK_SPEED_CHANGE) == 0)
+ break;
+ udelay(1);
+ }
+}
+
+static void cik_program_aspm(struct radeon_device *rdev)
+{
+ u32 data, orig;
+ bool disable_l0s = false, disable_l1 = false, disable_plloff_in_l1 = false;
+ bool disable_clkreq = false;
+
+ if (radeon_aspm == 0)
+ return;
+
+ /* XXX double check IGPs */
+ if (rdev->flags & RADEON_IS_IGP)
+ return;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return;
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ data &= ~LC_XMIT_N_FTS_MASK;
+ data |= LC_XMIT_N_FTS(0x24) | LC_XMIT_N_FTS_OVERRIDE_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL3);
+ data |= LC_GO_TO_RECOVERY;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL3, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_P_CNTL);
+ data |= P_IGNORE_EDB_ERR;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_P_CNTL, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~(LC_L0S_INACTIVITY_MASK | LC_L1_INACTIVITY_MASK);
+ data |= LC_PMI_TO_L1_DIS;
+ if (!disable_l0s)
+ data |= LC_L0S_INACTIVITY(7);
+
+ if (!disable_l1) {
+ data |= LC_L1_INACTIVITY(7);
+ data &= ~LC_PMI_TO_L1_DIS;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+
+ if (!disable_plloff_in_l1) {
+ bool clk_req_support;
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB0_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0);
+ data &= ~(PLL_POWER_STATE_IN_OFF_0_MASK | PLL_POWER_STATE_IN_TXS2_0_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_0(7) | PLL_POWER_STATE_IN_TXS2_0(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_0, data);
+
+ orig = data = RREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1);
+ data &= ~(PLL_POWER_STATE_IN_OFF_1_MASK | PLL_POWER_STATE_IN_TXS2_1_MASK);
+ data |= PLL_POWER_STATE_IN_OFF_1(7) | PLL_POWER_STATE_IN_TXS2_1(7);
+ if (orig != data)
+ WREG32_PCIE_PORT(PB1_PIF_PWRDOWN_1, data);
+
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
+ data &= ~LC_DYN_LANES_PWR_STATE_MASK;
+ data |= LC_DYN_LANES_PWR_STATE(3);
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data);
+
+ if (!disable_clkreq) {
+ struct pci_dev *root = rdev->pdev->bus->self;
+ u32 lnkcap;
+
+ clk_req_support = false;
+ pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
+ if (lnkcap & PCI_EXP_LNKCAP_CLKPM)
+ clk_req_support = true;
+ } else {
+ clk_req_support = false;
+ }
+
+ if (clk_req_support) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL2);
+ data |= LC_ALLOW_PDWN_IN_L1 | LC_ALLOW_PDWN_IN_L23;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL2, data);
+
+ orig = data = RREG32_SMC(THM_CLK_CNTL);
+ data &= ~(CMON_CLK_SEL_MASK | TMON_CLK_SEL_MASK);
+ data |= CMON_CLK_SEL(1) | TMON_CLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(THM_CLK_CNTL, data);
+
+ orig = data = RREG32_SMC(MISC_CLK_CTRL);
+ data &= ~(DEEP_SLEEP_CLK_SEL_MASK | ZCLK_SEL_MASK);
+ data |= DEEP_SLEEP_CLK_SEL(1) | ZCLK_SEL(1);
+ if (orig != data)
+ WREG32_SMC(MISC_CLK_CTRL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL);
+ data &= ~BCLK_AS_XCLK;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL, data);
+
+ orig = data = RREG32_SMC(CG_CLKPIN_CNTL_2);
+ data &= ~FORCE_BIF_REFCLK_EN;
+ if (orig != data)
+ WREG32_SMC(CG_CLKPIN_CNTL_2, data);
+
+ orig = data = RREG32_SMC(MPLL_BYPASSCLK_SEL);
+ data &= ~MPLL_CLKOUT_SEL_MASK;
+ data |= MPLL_CLKOUT_SEL(4);
+ if (orig != data)
+ WREG32_SMC(MPLL_BYPASSCLK_SEL, data);
+ }
+ }
+ } else {
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+
+ orig = data = RREG32_PCIE_PORT(PCIE_CNTL2);
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN | REPLAY_MEM_LS_EN;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_CNTL2, data);
+
+ if (!disable_l0s) {
+ data = RREG32_PCIE_PORT(PCIE_LC_N_FTS_CNTL);
+ if((data & LC_N_FTS_MASK) == LC_N_FTS_MASK) {
+ data = RREG32_PCIE_PORT(PCIE_LC_STATUS1);
+ if ((data & LC_REVERSE_XMIT) && (data & LC_REVERSE_RCVR)) {
+ orig = data = RREG32_PCIE_PORT(PCIE_LC_CNTL);
+ data &= ~LC_L0S_INACTIVITY_MASK;
+ if (orig != data)
+ WREG32_PCIE_PORT(PCIE_LC_CNTL, data);
+ }
+ }
+ }
}
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index d71e46d571f..ca1bb613358 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -24,6 +24,9 @@
#ifndef __CIK_REG_H__
#define __CIK_REG_H__
+#define CIK_DIDT_IND_INDEX 0xca00
+#define CIK_DIDT_IND_DATA 0xca04
+
#define CIK_DC_GPIO_HPD_MASK 0x65b0
#define CIK_DC_GPIO_HPD_A 0x65b4
#define CIK_DC_GPIO_HPD_EN 0x65b8
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
new file mode 100644
index 00000000000..b6286068e11
--- /dev/null
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -0,0 +1,785 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/* sdma */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
+
+u32 cik_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * sDMA - System DMA
+ * Starting with CIK, the GPU has new asynchronous
+ * DMA engines. These engines are used for compute
+ * and gfx. There are two DMA engines (SDMA0, SDMA1)
+ * and each one supports 1 ring buffer used for gfx
+ * and 2 queues used for compute.
+ *
+ * The programming model is very similar to the CP
+ * (ring buffer, IBs, etc.), but sDMA has it's own
+ * packet format that is different from the PM4 format
+ * used by the CP. sDMA supports copying data, writing
+ * embedded data, solid fills, and a number of other
+ * things. It also has support for tiling/detiling of
+ * buffers.
+ */
+
+/**
+ * cik_sdma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (CIK).
+ */
+void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+ u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf;
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 5;
+ while ((next_rptr & 7) != 4)
+ next_rptr++;
+ next_rptr += 4;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* IB packet must end on a 8 DW boundary */
+ while ((ring->wptr & 7) != 4)
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0));
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits));
+ radeon_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, ib->length_dw);
+
+}
+
+/**
+ * cik_sdma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (CIK).
+ */
+void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (fence->ring == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ /* write the fence */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+}
+
+/**
+ * cik_sdma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (CIK).
+ */
+void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 extra_bits = emit_wait ? 0 : SDMA_SEMAPHORE_EXTRA_S;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SEMAPHORE, 0, extra_bits));
+ radeon_ring_write(ring, addr & 0xfffffff8);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+}
+
+/**
+ * cik_sdma_gfx_stop - stop the gfx async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the gfx async dma ring buffers (CIK).
+ */
+static void cik_sdma_gfx_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl, reg_offset;
+ int i;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ rb_cntl = RREG32(SDMA0_GFX_RB_CNTL + reg_offset);
+ rb_cntl &= ~SDMA_RB_ENABLE;
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
+ }
+}
+
+/**
+ * cik_sdma_rlc_stop - stop the compute async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the compute async dma queues (CIK).
+ */
+static void cik_sdma_rlc_stop(struct radeon_device *rdev)
+{
+ /* XXX todo */
+}
+
+/**
+ * cik_sdma_enable - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ * @enable: enable/disable the DMA MEs.
+ *
+ * Halt or unhalt the async dma engines (CIK).
+ */
+void cik_sdma_enable(struct radeon_device *rdev, bool enable)
+{
+ u32 me_cntl, reg_offset;
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ else
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ me_cntl = RREG32(SDMA0_ME_CNTL + reg_offset);
+ if (enable)
+ me_cntl &= ~SDMA_HALT;
+ else
+ me_cntl |= SDMA_HALT;
+ WREG32(SDMA0_ME_CNTL + reg_offset, me_cntl);
+ }
+}
+
+/**
+ * cik_sdma_gfx_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the gfx DMA ring buffers and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_gfx_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = SDMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = SDMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(SDMA0_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(SDMA0_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= SDMA_RB_SWAP_ENABLE | SDMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(SDMA0_GFX_RB_RPTR + reg_offset, 0);
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF);
+ WREG32(SDMA0_GFX_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= SDMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(SDMA0_GFX_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+ WREG32(SDMA0_GFX_RB_BASE_HI + reg_offset, ring->gpu_addr >> 40);
+
+ ring->wptr = 0;
+ WREG32(SDMA0_GFX_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(SDMA0_GFX_RB_RPTR + reg_offset) >> 2;
+
+ /* enable DMA RB */
+ WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl | SDMA_RB_ENABLE);
+
+ ib_cntl = SDMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= SDMA_IB_SWAP_ENABLE;
+#endif
+ /* enable DMA IBs */
+ WREG32(SDMA0_GFX_IB_CNTL + reg_offset, ib_cntl);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cik_sdma_rlc_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the compute DMA queues and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+static int cik_sdma_rlc_resume(struct radeon_device *rdev)
+{
+ /* XXX todo */
+ return 0;
+}
+
+/**
+ * cik_sdma_load_microcode - load the sDMA ME ucode
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Loads the sDMA0/1 ucode.
+ * Returns 0 for success, -EINVAL if the ucode is not available.
+ */
+static int cik_sdma_load_microcode(struct radeon_device *rdev)
+{
+ const __be32 *fw_data;
+ int i;
+
+ if (!rdev->sdma_fw)
+ return -EINVAL;
+
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+
+ /* sdma0 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA0_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ /* sdma1 */
+ fw_data = (const __be32 *)rdev->sdma_fw->data;
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ for (i = 0; i < CIK_SDMA_UCODE_SIZE; i++)
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, be32_to_cpup(fw_data++));
+ WREG32(SDMA0_UCODE_DATA + SDMA1_REGISTER_OFFSET, CIK_SDMA_UCODE_VERSION);
+
+ WREG32(SDMA0_UCODE_ADDR + SDMA0_REGISTER_OFFSET, 0);
+ WREG32(SDMA0_UCODE_ADDR + SDMA1_REGISTER_OFFSET, 0);
+ return 0;
+}
+
+/**
+ * cik_sdma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA engines and enable them (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_SDMA | SOFT_RESET_SDMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+ RREG32(SRBM_SOFT_RESET);
+
+ r = cik_sdma_load_microcode(rdev);
+ if (r)
+ return r;
+
+ /* unhalt the MEs */
+ cik_sdma_enable(rdev, true);
+
+ /* start the gfx rings and rlc compute queues */
+ r = cik_sdma_gfx_resume(rdev);
+ if (r)
+ return r;
+ r = cik_sdma_rlc_resume(rdev);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+/**
+ * cik_sdma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (CIK).
+ */
+void cik_sdma_fini(struct radeon_device *rdev)
+{
+ /* stop the gfx rings and rlc compute queues */
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ /* halt the MEs */
+ cik_sdma_enable(rdev, false);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+ /* XXX - compute dma queue tear down */
+}
+
+/**
+ * cik_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (CIK).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int cik_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0x1fffff)
+ cur_size_in_bytes = 0x1fffff;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_COPY_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, cur_size_in_bytes);
+ radeon_ring_write(ring, 0); /* src/dst endian swap */
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * cik_sdma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (CIK).
+ * Returns 0 for success, error for failure.
+ */
+int cik_sdma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff);
+ radeon_ring_write(ring, 1); /* number of DWs to follow */
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * cik_sdma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (CIK).
+ * Returns 0 on success, error on failure.
+ */
+int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xffffffff;
+ ib.ptr[3] = 1;
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * cik_sdma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up (CIK).
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cik_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cik_sdma_vm_set_page - update the page tables using sDMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using sDMA (CIK).
+ */
+void cik_sdma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = ndw;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count;
+ if (ndw > 0x7FFFF)
+ ndw = 0x7FFFF;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe);
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = ndw; /* number of entries */
+ pe += ndw * 8;
+ addr += ndw * incr;
+ count -= ndw;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0);
+}
+
+/**
+ * cik_dma_vm_flush - cik vm flush using sDMA
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Update the page table base and flush the VM TLB
+ * using sDMA (CIK).
+ */
+void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(1) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(3)); /* == */
+ u32 ref_and_mask;
+
+ if (vm == NULL)
+ return;
+
+ if (ridx == R600_RING_TYPE_DMA_INDEX)
+ ref_and_mask = SDMA0;
+ else
+ ref_and_mask = SDMA1;
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
+ } else {
+ radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* update SH_MEM_* regs */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(vm->id));
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_BASES >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_CONFIG >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_BASE >> 2);
+ radeon_ring_write(ring, 1);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SH_MEM_APE1_LIMIT >> 2);
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
+ radeon_ring_write(ring, VMID(0));
+
+ /* flush HDP */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, GPU_HDP_FLUSH_DONE);
+ radeon_ring_write(ring, GPU_HDP_FLUSH_REQ);
+ radeon_ring_write(ring, ref_and_mask); /* REFERENCE */
+ radeon_ring_write(ring, ref_and_mask); /* MASK */
+ radeon_ring_write(ring, (4 << 16) | 10); /* RETRY_COUNT, POLL_INTERVAL */
+
+ /* flush TLB */
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 7e9275eaef8..203d2a09a1f 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -28,21 +28,375 @@
#define CIK_RB_BITMAP_WIDTH_PER_SH 2
+/* DIDT IND registers */
+#define DIDT_SQ_CTRL0 0x0
+# define DIDT_CTRL_EN (1 << 0)
+#define DIDT_DB_CTRL0 0x20
+#define DIDT_TD_CTRL0 0x40
+#define DIDT_TCP_CTRL0 0x60
+
/* SMC IND registers */
+#define DPM_TABLE_475 0x3F768
+# define SamuBootLevel(x) ((x) << 0)
+# define SamuBootLevel_MASK 0x000000ff
+# define SamuBootLevel_SHIFT 0
+# define AcpBootLevel(x) ((x) << 8)
+# define AcpBootLevel_MASK 0x0000ff00
+# define AcpBootLevel_SHIFT 8
+# define VceBootLevel(x) ((x) << 16)
+# define VceBootLevel_MASK 0x00ff0000
+# define VceBootLevel_SHIFT 16
+# define UvdBootLevel(x) ((x) << 24)
+# define UvdBootLevel_MASK 0xff000000
+# define UvdBootLevel_SHIFT 24
+
+#define FIRMWARE_FLAGS 0x3F800
+# define INTERRUPTS_ENABLED (1 << 0)
+
+#define NB_DPM_CONFIG_1 0x3F9E8
+# define Dpm0PgNbPsLo(x) ((x) << 0)
+# define Dpm0PgNbPsLo_MASK 0x000000ff
+# define Dpm0PgNbPsLo_SHIFT 0
+# define Dpm0PgNbPsHi(x) ((x) << 8)
+# define Dpm0PgNbPsHi_MASK 0x0000ff00
+# define Dpm0PgNbPsHi_SHIFT 8
+# define DpmXNbPsLo(x) ((x) << 16)
+# define DpmXNbPsLo_MASK 0x00ff0000
+# define DpmXNbPsLo_SHIFT 16
+# define DpmXNbPsHi(x) ((x) << 24)
+# define DpmXNbPsHi_MASK 0xff000000
+# define DpmXNbPsHi_SHIFT 24
+
+#define SMC_SYSCON_RESET_CNTL 0x80000000
+# define RST_REG (1 << 0)
+#define SMC_SYSCON_CLOCK_CNTL_0 0x80000004
+# define CK_DISABLE (1 << 0)
+# define CKEN (1 << 24)
+
+#define SMC_SYSCON_MISC_CNTL 0x80000010
+
+#define SMC_SYSCON_MSG_ARG_0 0x80000068
+
+#define SMC_PC_C 0x80000370
+
+#define SMC_SCRATCH9 0x80000424
+
+#define RCU_UC_EVENTS 0xC0000004
+# define BOOT_SEQ_DONE (1 << 7)
+
#define GENERAL_PWRMGT 0xC0200000
+# define GLOBAL_PWRMGT_EN (1 << 0)
+# define STATIC_PM_EN (1 << 1)
+# define THERMAL_PROTECTION_DIS (1 << 2)
+# define THERMAL_PROTECTION_TYPE (1 << 3)
+# define SW_SMIO_INDEX(x) ((x) << 6)
+# define SW_SMIO_INDEX_MASK (1 << 6)
+# define SW_SMIO_INDEX_SHIFT 6
+# define VOLT_PWRMGT_EN (1 << 10)
# define GPU_COUNTER_CLK (1 << 15)
-
+# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
+
+#define CNB_PWRMGT_CNTL 0xC0200004
+# define GNB_SLOW_MODE(x) ((x) << 0)
+# define GNB_SLOW_MODE_MASK (3 << 0)
+# define GNB_SLOW_MODE_SHIFT 0
+# define GNB_SLOW (1 << 2)
+# define FORCE_NB_PS1 (1 << 3)
+# define DPM_ENABLED (1 << 4)
+
+#define SCLK_PWRMGT_CNTL 0xC0200008
+# define SCLK_PWRMGT_OFF (1 << 0)
+# define RESET_BUSY_CNT (1 << 4)
+# define RESET_SCLK_CNT (1 << 5)
+# define DYNAMIC_PM_EN (1 << 21)
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0xC0200014
+# define CURRENT_STATE_MASK (0xf << 4)
+# define CURRENT_STATE_SHIFT 4
+# define CURR_MCLK_INDEX_MASK (0xf << 8)
+# define CURR_MCLK_INDEX_SHIFT 8
+# define CURR_SCLK_INDEX_MASK (0x1f << 16)
+# define CURR_SCLK_INDEX_SHIFT 16
+
+#define CG_SSP 0xC0200044
+# define SST(x) ((x) << 0)
+# define SST_MASK (0xffff << 0)
+# define SSTU(x) ((x) << 16)
+# define SSTU_MASK (0xf << 16)
+
+#define CG_DISPLAY_GAP_CNTL 0xC0200060
+# define DISP_GAP(x) ((x) << 0)
+# define DISP_GAP_MASK (3 << 0)
+# define VBI_TIMER_COUNT(x) ((x) << 4)
+# define VBI_TIMER_COUNT_MASK (0x3fff << 4)
+# define VBI_TIMER_UNIT(x) ((x) << 20)
+# define VBI_TIMER_UNIT_MASK (7 << 20)
+# define DISP_GAP_MCHG(x) ((x) << 24)
+# define DISP_GAP_MCHG_MASK (3 << 24)
+
+#define SMU_VOLTAGE_STATUS 0xC0200094
+# define SMU_VOLTAGE_CURRENT_LEVEL_MASK (0xff << 1)
+# define SMU_VOLTAGE_CURRENT_LEVEL_SHIFT 1
+
+#define TARGET_AND_CURRENT_PROFILE_INDEX_1 0xC02000F0
+# define CURR_PCIE_INDEX_MASK (0xf << 24)
+# define CURR_PCIE_INDEX_SHIFT 24
+
+#define CG_ULV_PARAMETER 0xC0200158
+
+#define CG_FTV_0 0xC02001A8
+#define CG_FTV_1 0xC02001AC
+#define CG_FTV_2 0xC02001B0
+#define CG_FTV_3 0xC02001B4
+#define CG_FTV_4 0xC02001B8
+#define CG_FTV_5 0xC02001BC
+#define CG_FTV_6 0xC02001C0
+#define CG_FTV_7 0xC02001C4
+
+#define CG_DISPLAY_GAP_CNTL2 0xC0200230
+
+#define LCAC_SX0_OVR_SEL 0xC0400D04
+#define LCAC_SX0_OVR_VAL 0xC0400D08
+
+#define LCAC_MC0_CNTL 0xC0400D30
+#define LCAC_MC0_OVR_SEL 0xC0400D34
+#define LCAC_MC0_OVR_VAL 0xC0400D38
+#define LCAC_MC1_CNTL 0xC0400D3C
+#define LCAC_MC1_OVR_SEL 0xC0400D40
+#define LCAC_MC1_OVR_VAL 0xC0400D44
+
+#define LCAC_MC2_OVR_SEL 0xC0400D4C
+#define LCAC_MC2_OVR_VAL 0xC0400D50
+
+#define LCAC_MC3_OVR_SEL 0xC0400D58
+#define LCAC_MC3_OVR_VAL 0xC0400D5C
+
+#define LCAC_CPL_CNTL 0xC0400D80
+#define LCAC_CPL_OVR_SEL 0xC0400D84
+#define LCAC_CPL_OVR_VAL 0xC0400D88
+
+/* dGPU */
+#define CG_THERMAL_CTRL 0xC0300004
+#define DPM_EVENT_SRC(x) ((x) << 0)
+#define DPM_EVENT_SRC_MASK (7 << 0)
+#define DIG_THERM_DPM(x) ((x) << 14)
+#define DIG_THERM_DPM_MASK 0x003FC000
+#define DIG_THERM_DPM_SHIFT 14
+
+#define CG_THERMAL_INT 0xC030000C
+#define CI_DIG_THERM_INTH(x) ((x) << 8)
+#define CI_DIG_THERM_INTH_MASK 0x0000FF00
+#define CI_DIG_THERM_INTH_SHIFT 8
+#define CI_DIG_THERM_INTL(x) ((x) << 16)
+#define CI_DIG_THERM_INTL_MASK 0x00FF0000
+#define CI_DIG_THERM_INTL_SHIFT 16
+#define THERM_INT_MASK_HIGH (1 << 24)
+#define THERM_INT_MASK_LOW (1 << 25)
+
+#define CG_MULT_THERMAL_STATUS 0xC0300014
+#define ASIC_MAX_TEMP(x) ((x) << 0)
+#define ASIC_MAX_TEMP_MASK 0x000001ff
+#define ASIC_MAX_TEMP_SHIFT 0
+#define CTF_TEMP(x) ((x) << 9)
+#define CTF_TEMP_MASK 0x0003fe00
+#define CTF_TEMP_SHIFT 9
+
+#define CG_SPLL_FUNC_CNTL 0xC0500140
+#define SPLL_RESET (1 << 0)
+#define SPLL_PWRON (1 << 1)
+#define SPLL_BYPASS_EN (1 << 3)
+#define SPLL_REF_DIV(x) ((x) << 5)
+#define SPLL_REF_DIV_MASK (0x3f << 5)
+#define SPLL_PDIV_A(x) ((x) << 20)
+#define SPLL_PDIV_A_MASK (0x7f << 20)
+#define SPLL_PDIV_A_SHIFT 20
+#define CG_SPLL_FUNC_CNTL_2 0xC0500144
+#define SCLK_MUX_SEL(x) ((x) << 0)
+#define SCLK_MUX_SEL_MASK (0x1ff << 0)
+#define CG_SPLL_FUNC_CNTL_3 0xC0500148
+#define SPLL_FB_DIV(x) ((x) << 0)
+#define SPLL_FB_DIV_MASK (0x3ffffff << 0)
+#define SPLL_FB_DIV_SHIFT 0
+#define SPLL_DITHEN (1 << 28)
+#define CG_SPLL_FUNC_CNTL_4 0xC050014C
+
+#define CG_SPLL_SPREAD_SPECTRUM 0xC0500164
+#define SSEN (1 << 0)
+#define CLK_S(x) ((x) << 4)
+#define CLK_S_MASK (0xfff << 4)
+#define CLK_S_SHIFT 4
+#define CG_SPLL_SPREAD_SPECTRUM_2 0xC0500168
+#define CLK_V(x) ((x) << 0)
+#define CLK_V_MASK (0x3ffffff << 0)
+#define CLK_V_SHIFT 0
+
+#define MPLL_BYPASSCLK_SEL 0xC050019C
+# define MPLL_CLKOUT_SEL(x) ((x) << 8)
+# define MPLL_CLKOUT_SEL_MASK 0xFF00
#define CG_CLKPIN_CNTL 0xC05001A0
# define XTALIN_DIVIDE (1 << 1)
-
+# define BCLK_AS_XCLK (1 << 2)
+#define CG_CLKPIN_CNTL_2 0xC05001A4
+# define FORCE_BIF_REFCLK_EN (1 << 3)
+# define MUX_TCLK_TO_XCLK (1 << 8)
+#define THM_CLK_CNTL 0xC05001A8
+# define CMON_CLK_SEL(x) ((x) << 0)
+# define CMON_CLK_SEL_MASK 0xFF
+# define TMON_CLK_SEL(x) ((x) << 8)
+# define TMON_CLK_SEL_MASK 0xFF00
+#define MISC_CLK_CTRL 0xC05001AC
+# define DEEP_SLEEP_CLK_SEL(x) ((x) << 0)
+# define DEEP_SLEEP_CLK_SEL_MASK 0xFF
+# define ZCLK_SEL(x) ((x) << 8)
+# define ZCLK_SEL_MASK 0xFF00
+
+/* KV/KB */
+#define CG_THERMAL_INT_CTRL 0xC2100028
+#define DIG_THERM_INTH(x) ((x) << 0)
+#define DIG_THERM_INTH_MASK 0x000000FF
+#define DIG_THERM_INTH_SHIFT 0
+#define DIG_THERM_INTL(x) ((x) << 8)
+#define DIG_THERM_INTL_MASK 0x0000FF00
+#define DIG_THERM_INTL_SHIFT 8
+#define THERM_INTH_MASK (1 << 24)
+#define THERM_INTL_MASK (1 << 25)
+
+/* PCIE registers idx/data 0x38/0x3c */
+#define PB0_PIF_PWRDOWN_0 0x1100012 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_0(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_0_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_0(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_0_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_0_SHIFT 10
+# define PLL_RAMP_UP_TIME_0(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_0_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_0_SHIFT 24
+#define PB0_PIF_PWRDOWN_1 0x1100013 /* PCIE */
+# define PLL_POWER_STATE_IN_TXS2_1(x) ((x) << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_MASK (0x7 << 7)
+# define PLL_POWER_STATE_IN_TXS2_1_SHIFT 7
+# define PLL_POWER_STATE_IN_OFF_1(x) ((x) << 10)
+# define PLL_POWER_STATE_IN_OFF_1_MASK (0x7 << 10)
+# define PLL_POWER_STATE_IN_OFF_1_SHIFT 10
+# define PLL_RAMP_UP_TIME_1(x) ((x) << 24)
+# define PLL_RAMP_UP_TIME_1_MASK (0x7 << 24)
+# define PLL_RAMP_UP_TIME_1_SHIFT 24
+
+#define PCIE_CNTL2 0x1001001c /* PCIE */
+# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
+# define MST_MEM_LS_EN (1 << 18)
+# define REPLAY_MEM_LS_EN (1 << 19)
+
+#define PCIE_LC_STATUS1 0x1400028 /* PCIE */
+# define LC_REVERSE_RCVR (1 << 0)
+# define LC_REVERSE_XMIT (1 << 1)
+# define LC_OPERATING_LINK_WIDTH_MASK (0x7 << 2)
+# define LC_OPERATING_LINK_WIDTH_SHIFT 2
+# define LC_DETECTED_LINK_WIDTH_MASK (0x7 << 5)
+# define LC_DETECTED_LINK_WIDTH_SHIFT 5
+
+#define PCIE_P_CNTL 0x1400040 /* PCIE */
+# define P_IGNORE_EDB_ERR (1 << 6)
+
+#define PB1_PIF_PWRDOWN_0 0x2100012 /* PCIE */
+#define PB1_PIF_PWRDOWN_1 0x2100013 /* PCIE */
+
+#define PCIE_LC_CNTL 0x100100A0 /* PCIE */
+# define LC_L0S_INACTIVITY(x) ((x) << 8)
+# define LC_L0S_INACTIVITY_MASK (0xf << 8)
+# define LC_L0S_INACTIVITY_SHIFT 8
+# define LC_L1_INACTIVITY(x) ((x) << 12)
+# define LC_L1_INACTIVITY_MASK (0xf << 12)
+# define LC_L1_INACTIVITY_SHIFT 12
+# define LC_PMI_TO_L1_DIS (1 << 16)
+# define LC_ASPM_TO_L1_DIS (1 << 24)
+
+#define PCIE_LC_LINK_WIDTH_CNTL 0x100100A2 /* PCIE */
+# define LC_LINK_WIDTH_SHIFT 0
+# define LC_LINK_WIDTH_MASK 0x7
+# define LC_LINK_WIDTH_X0 0
+# define LC_LINK_WIDTH_X1 1
+# define LC_LINK_WIDTH_X2 2
+# define LC_LINK_WIDTH_X4 3
+# define LC_LINK_WIDTH_X8 4
+# define LC_LINK_WIDTH_X16 6
+# define LC_LINK_WIDTH_RD_SHIFT 4
+# define LC_LINK_WIDTH_RD_MASK 0x70
+# define LC_RECONFIG_ARC_MISSING_ESCAPE (1 << 7)
+# define LC_RECONFIG_NOW (1 << 8)
+# define LC_RENEGOTIATION_SUPPORT (1 << 9)
+# define LC_RENEGOTIATE_EN (1 << 10)
+# define LC_SHORT_RECONFIG_EN (1 << 11)
+# define LC_UPCONFIGURE_SUPPORT (1 << 12)
+# define LC_UPCONFIGURE_DIS (1 << 13)
+# define LC_DYN_LANES_PWR_STATE(x) ((x) << 21)
+# define LC_DYN_LANES_PWR_STATE_MASK (0x3 << 21)
+# define LC_DYN_LANES_PWR_STATE_SHIFT 21
+#define PCIE_LC_N_FTS_CNTL 0x100100a3 /* PCIE */
+# define LC_XMIT_N_FTS(x) ((x) << 0)
+# define LC_XMIT_N_FTS_MASK (0xff << 0)
+# define LC_XMIT_N_FTS_SHIFT 0
+# define LC_XMIT_N_FTS_OVERRIDE_EN (1 << 8)
+# define LC_N_FTS_MASK (0xff << 24)
+#define PCIE_LC_SPEED_CNTL 0x100100A4 /* PCIE */
+# define LC_GEN2_EN_STRAP (1 << 0)
+# define LC_GEN3_EN_STRAP (1 << 1)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_EN (1 << 2)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_MASK (0x3 << 3)
+# define LC_TARGET_LINK_SPEED_OVERRIDE_SHIFT 3
+# define LC_FORCE_EN_SW_SPEED_CHANGE (1 << 5)
+# define LC_FORCE_DIS_SW_SPEED_CHANGE (1 << 6)
+# define LC_FORCE_EN_HW_SPEED_CHANGE (1 << 7)
+# define LC_FORCE_DIS_HW_SPEED_CHANGE (1 << 8)
+# define LC_INITIATE_LINK_SPEED_CHANGE (1 << 9)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_MASK (0x3 << 10)
+# define LC_SPEED_CHANGE_ATTEMPTS_ALLOWED_SHIFT 10
+# define LC_CURRENT_DATA_RATE_MASK (0x3 << 13) /* 0/1/2 = gen1/2/3 */
+# define LC_CURRENT_DATA_RATE_SHIFT 13
+# define LC_CLR_FAILED_SPD_CHANGE_CNT (1 << 16)
+# define LC_OTHER_SIDE_EVER_SENT_GEN2 (1 << 18)
+# define LC_OTHER_SIDE_SUPPORTS_GEN2 (1 << 19)
+# define LC_OTHER_SIDE_EVER_SENT_GEN3 (1 << 20)
+# define LC_OTHER_SIDE_SUPPORTS_GEN3 (1 << 21)
+
+#define PCIE_LC_CNTL2 0x100100B1 /* PCIE */
+# define LC_ALLOW_PDWN_IN_L1 (1 << 17)
+# define LC_ALLOW_PDWN_IN_L23 (1 << 18)
+
+#define PCIE_LC_CNTL3 0x100100B5 /* PCIE */
+# define LC_GO_TO_RECOVERY (1 << 30)
+#define PCIE_LC_CNTL4 0x100100B6 /* PCIE */
+# define LC_REDO_EQ (1 << 5)
+# define LC_SET_QUIESCE (1 << 13)
+
+/* direct registers */
#define PCIE_INDEX 0x38
#define PCIE_DATA 0x3C
+#define SMC_IND_INDEX_0 0x200
+#define SMC_IND_DATA_0 0x204
+
+#define SMC_IND_ACCESS_CNTL 0x240
+#define AUTO_INCREMENT_IND_0 (1 << 0)
+
+#define SMC_MESSAGE_0 0x250
+#define SMC_MSG_MASK 0xffff
+#define SMC_RESP_0 0x254
+#define SMC_RESP_MASK 0xffff
+
+#define SMC_MSG_ARG_0 0x290
+
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_GFX_CNTL 0xE44
#define PIPEID(x) ((x) << 0)
#define MEID(x) ((x) << 2)
@@ -172,6 +526,10 @@
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
+#define VM_L2_CG 0x15c0
+#define MC_CG_ENABLE (1 << 18)
+#define MC_LS_ENABLE (1 << 19)
+
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
@@ -201,6 +559,17 @@
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
+#define MC_HUB_MISC_HUB_CG 0x20b8
+#define MC_HUB_MISC_VM_CG 0x20bc
+
+#define MC_HUB_MISC_SIP_CG 0x20c0
+
+#define MC_XPB_CLK_GAT 0x2478
+
+#define MC_CITF_MISC_RD_CG 0x2648
+#define MC_CITF_MISC_WR_CG 0x264c
+#define MC_CITF_MISC_VM_CG 0x2650
+
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
@@ -215,9 +584,37 @@
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
+#define MC_ARB_DRAM_TIMING 0x2774
+#define MC_ARB_DRAM_TIMING2 0x2778
+
+#define MC_ARB_BURST_TIME 0x2808
+#define STATE0(x) ((x) << 0)
+#define STATE0_MASK (0x1f << 0)
+#define STATE0_SHIFT 0
+#define STATE1(x) ((x) << 5)
+#define STATE1_MASK (0x1f << 5)
+#define STATE1_SHIFT 5
+#define STATE2(x) ((x) << 10)
+#define STATE2_MASK (0x1f << 10)
+#define STATE2_SHIFT 10
+#define STATE3(x) ((x) << 15)
+#define STATE3_MASK (0x1f << 15)
+#define STATE3_SHIFT 15
+
+#define MC_SEQ_RAS_TIMING 0x28a0
+#define MC_SEQ_CAS_TIMING 0x28a4
+#define MC_SEQ_MISC_TIMING 0x28a8
+#define MC_SEQ_MISC_TIMING2 0x28ac
+#define MC_SEQ_PMG_TIMING 0x28b0
+#define MC_SEQ_RD_CTL_D0 0x28b4
+#define MC_SEQ_RD_CTL_D1 0x28b8
+#define MC_SEQ_WR_CTL_D0 0x28bc
+#define MC_SEQ_WR_CTL_D1 0x28c0
+
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
+#define MC_PMG_AUTO_CMD 0x28d0
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x28e8
#define TRAIN_DONE_D0 (1 << 30)
@@ -226,10 +623,92 @@
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
+#define MC_SEQ_MISC0 0x2a00
+#define MC_SEQ_MISC0_VEN_ID_SHIFT 8
+#define MC_SEQ_MISC0_VEN_ID_MASK 0x00000f00
+#define MC_SEQ_MISC0_VEN_ID_VALUE 3
+#define MC_SEQ_MISC0_REV_ID_SHIFT 12
+#define MC_SEQ_MISC0_REV_ID_MASK 0x0000f000
+#define MC_SEQ_MISC0_REV_ID_VALUE 1
+#define MC_SEQ_MISC0_GDDR5_SHIFT 28
+#define MC_SEQ_MISC0_GDDR5_MASK 0xf0000000
+#define MC_SEQ_MISC0_GDDR5_VALUE 5
+#define MC_SEQ_MISC1 0x2a04
+#define MC_SEQ_RESERVE_M 0x2a08
+#define MC_PMG_CMD_EMRS 0x2a0c
+
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
+#define MC_SEQ_MISC5 0x2a54
+#define MC_SEQ_MISC6 0x2a58
+
+#define MC_SEQ_MISC7 0x2a64
+
+#define MC_SEQ_RAS_TIMING_LP 0x2a6c
+#define MC_SEQ_CAS_TIMING_LP 0x2a70
+#define MC_SEQ_MISC_TIMING_LP 0x2a74
+#define MC_SEQ_MISC_TIMING2_LP 0x2a78
+#define MC_SEQ_WR_CTL_D0_LP 0x2a7c
+#define MC_SEQ_WR_CTL_D1_LP 0x2a80
+#define MC_SEQ_PMG_CMD_EMRS_LP 0x2a84
+#define MC_SEQ_PMG_CMD_MRS_LP 0x2a88
+
+#define MC_PMG_CMD_MRS 0x2aac
+
+#define MC_SEQ_RD_CTL_D0_LP 0x2b1c
+#define MC_SEQ_RD_CTL_D1_LP 0x2b20
+
+#define MC_PMG_CMD_MRS1 0x2b44
+#define MC_SEQ_PMG_CMD_MRS1_LP 0x2b48
+#define MC_SEQ_PMG_TIMING_LP 0x2b4c
+
+#define MC_SEQ_WR_CTL_2 0x2b54
+#define MC_SEQ_WR_CTL_2_LP 0x2b58
+#define MC_PMG_CMD_MRS2 0x2b5c
+#define MC_SEQ_PMG_CMD_MRS2_LP 0x2b60
+
+#define MCLK_PWRMGT_CNTL 0x2ba0
+# define DLL_SPEED(x) ((x) << 0)
+# define DLL_SPEED_MASK (0x1f << 0)
+# define DLL_READY (1 << 6)
+# define MC_INT_CNTL (1 << 7)
+# define MRDCK0_PDNB (1 << 8)
+# define MRDCK1_PDNB (1 << 9)
+# define MRDCK0_RESET (1 << 16)
+# define MRDCK1_RESET (1 << 17)
+# define DLL_READY_READ (1 << 24)
+#define DLL_CNTL 0x2ba4
+# define MRDCK0_BYPASS (1 << 24)
+# define MRDCK1_BYPASS (1 << 25)
+
+#define MPLL_FUNC_CNTL 0x2bb4
+#define BWCTRL(x) ((x) << 20)
+#define BWCTRL_MASK (0xff << 20)
+#define MPLL_FUNC_CNTL_1 0x2bb8
+#define VCO_MODE(x) ((x) << 0)
+#define VCO_MODE_MASK (3 << 0)
+#define CLKFRAC(x) ((x) << 4)
+#define CLKFRAC_MASK (0xfff << 4)
+#define CLKF(x) ((x) << 16)
+#define CLKF_MASK (0xfff << 16)
+#define MPLL_FUNC_CNTL_2 0x2bbc
+#define MPLL_AD_FUNC_CNTL 0x2bc0
+#define YCLK_POST_DIV(x) ((x) << 0)
+#define YCLK_POST_DIV_MASK (7 << 0)
+#define MPLL_DQ_FUNC_CNTL 0x2bc4
+#define YCLK_SEL(x) ((x) << 4)
+#define YCLK_SEL_MASK (1 << 4)
+
+#define MPLL_SS1 0x2bcc
+#define CLKV(x) ((x) << 0)
+#define CLKV_MASK (0x3ffffff << 0)
+#define MPLL_SS2 0x2bd0
+#define CLKS(x) ((x) << 0)
+#define CLKS_MASK (0xfff << 0)
+
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -237,6 +716,26 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
+
+#define ATC_MISC_CG 0x3350
+
+#define MC_SEQ_CNTL_3 0x3600
+# define CAC_EN (1 << 31)
+#define MC_SEQ_G5PDX_CTRL 0x3604
+#define MC_SEQ_G5PDX_CTRL_LP 0x3608
+#define MC_SEQ_G5PDX_CMD0 0x360c
+#define MC_SEQ_G5PDX_CMD0_LP 0x3610
+#define MC_SEQ_G5PDX_CMD1 0x3614
+#define MC_SEQ_G5PDX_CMD1_LP 0x3618
+
+#define MC_SEQ_PMG_DVS_CTL 0x3628
+#define MC_SEQ_PMG_DVS_CTL_LP 0x362c
+#define MC_SEQ_PMG_DVS_CMD 0x3630
+#define MC_SEQ_PMG_DVS_CMD_LP 0x3634
+#define MC_SEQ_DLL_STBY 0x3638
+#define MC_SEQ_DLL_STBY_LP 0x363c
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
@@ -265,6 +764,9 @@
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
+#define BIF_LNCNT_RESET 0x5220
+# define RESET_LNCNT_EN (1 << 0)
+
#define CONFIG_MEMSIZE 0x5428
#define INTERRUPT_CNTL 0x5468
@@ -401,6 +903,9 @@
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
+#define DPG_PIPE_STUTTER_CONTROL 0x6cd4
+# define STUTTER_ENABLE (1 << 0)
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -504,6 +1009,9 @@
#define CP_RB0_RPTR 0x8700
#define CP_RB_WPTR_DELAY 0x8704
+#define CP_RB_WPTR_POLL_CNTL 0x8708
+#define IDLE_POLL_COUNT(x) ((x) << 16)
+#define IDLE_POLL_COUNT_MASK (0xffff << 16)
#define CP_MEQ_THRESHOLDS 0x8764
#define MEQ1_START(x) ((x) << 0)
@@ -730,6 +1238,9 @@
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
+#define CP_MEM_SLP_CNTL 0xC1E4
+# define CP_MEM_LS_EN (1 << 0)
+
#define CP_CPF_DEBUG 0xC200
#define CP_PQ_WPTR_POLL_CNTL 0xC20C
@@ -775,14 +1286,20 @@
#define RLC_MC_CNTL 0xC30C
+#define RLC_MEM_SLP_CNTL 0xC318
+# define RLC_MEM_LS_EN (1 << 0)
+
#define RLC_LB_CNTR_MAX 0xC348
#define RLC_LB_CNTL 0xC364
+# define LOAD_BALANCE_ENABLE (1 << 0)
#define RLC_LB_CNTR_INIT 0xC36C
#define RLC_SAVE_AND_RESTORE_BASE 0xC374
-#define RLC_DRIVER_DMA_STATUS 0xC378
+#define RLC_DRIVER_DMA_STATUS 0xC378 /* dGPU */
+#define RLC_CP_TABLE_RESTORE 0xC378 /* APU */
+#define RLC_PG_DELAY_2 0xC37C
#define RLC_GPM_UCODE_ADDR 0xC388
#define RLC_GPM_UCODE_DATA 0xC38C
@@ -791,12 +1308,52 @@
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC398
#define RLC_UCODE_CNTL 0xC39C
+#define RLC_GPM_STAT 0xC400
+# define RLC_GPM_BUSY (1 << 0)
+# define GFX_POWER_STATUS (1 << 1)
+# define GFX_CLOCK_STATUS (1 << 2)
+
+#define RLC_PG_CNTL 0xC40C
+# define GFX_PG_ENABLE (1 << 0)
+# define GFX_PG_SRC (1 << 1)
+# define DYN_PER_CU_PG_ENABLE (1 << 2)
+# define STATIC_PER_CU_PG_ENABLE (1 << 3)
+# define DISABLE_GDS_PG (1 << 13)
+# define DISABLE_CP_PG (1 << 15)
+# define SMU_CLK_SLOWDOWN_ON_PU_ENABLE (1 << 17)
+# define SMU_CLK_SLOWDOWN_ON_PD_ENABLE (1 << 18)
+
+#define RLC_CGTT_MGCG_OVERRIDE 0xC420
#define RLC_CGCG_CGLS_CTRL 0xC424
+# define CGCG_EN (1 << 0)
+# define CGLS_EN (1 << 1)
+
+#define RLC_PG_DELAY 0xC434
#define RLC_LB_INIT_CU_MASK 0xC43C
#define RLC_LB_PARAMS 0xC444
+#define RLC_PG_AO_CU_MASK 0xC44C
+
+#define RLC_MAX_PG_CU 0xC450
+# define MAX_PU_CU(x) ((x) << 0)
+# define MAX_PU_CU_MASK (0xff << 0)
+#define RLC_AUTO_PG_CTRL 0xC454
+# define AUTO_PG_EN (1 << 0)
+# define GRBM_REG_SGIT(x) ((x) << 3)
+# define GRBM_REG_SGIT_MASK (0xffff << 3)
+
+#define RLC_SERDES_WR_CU_MASTER_MASK 0xC474
+#define RLC_SERDES_WR_NONCU_MASTER_MASK 0xC478
+#define RLC_SERDES_WR_CTRL 0xC47C
+#define BPM_ADDR(x) ((x) << 0)
+#define BPM_ADDR_MASK (0xff << 0)
+#define CGLS_ENABLE (1 << 16)
+#define CGCG_OVERRIDE_0 (1 << 20)
+#define MGCG_OVERRIDE_0 (1 << 22)
+#define MGCG_OVERRIDE_1 (1 << 23)
+
#define RLC_SERDES_CU_MASTER_BUSY 0xC484
#define RLC_SERDES_NONCU_MASTER_BUSY 0xC488
# define SE_MASTER_BUSY_MASK 0x0000ffff
@@ -807,6 +1364,13 @@
#define RLC_GPM_SCRATCH_ADDR 0xC4B0
#define RLC_GPM_SCRATCH_DATA 0xC4B4
+#define RLC_GPR_REG2 0xC4E8
+#define REQ 0x00000001
+#define MESSAGE(x) ((x) << 1)
+#define MESSAGE_MASK 0x0000001e
+#define MSG_ENTER_RLC_SAFE_MODE 1
+#define MSG_EXIT_RLC_SAFE_MODE 0
+
#define CP_HPD_EOP_BASE_ADDR 0xC904
#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
#define CP_HPD_EOP_VMID 0xC90C
@@ -851,6 +1415,8 @@
#define MQD_VMID(x) ((x) << 0)
#define MQD_VMID_MASK (0xf << 0)
+#define DB_RENDER_CONTROL 0x28000
+
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
@@ -944,6 +1510,16 @@
#define CP_PERFMON_CNTL 0x36020
+#define CGTS_SM_CTRL_REG 0x3c000
+#define SM_MODE(x) ((x) << 17)
+#define SM_MODE_MASK (0x7 << 17)
+#define SM_MODE_ENABLE (1 << 20)
+#define CGTS_OVERRIDE (1 << 21)
+#define CGTS_LS_OVERRIDE (1 << 22)
+#define ON_MONITOR_ADD_EN (1 << 23)
+#define ON_MONITOR_ADD(x) ((x) << 24)
+#define ON_MONITOR_ADD_MASK (0xff << 24)
+
#define CGTS_TCC_DISABLE 0x3c00c
#define CGTS_USER_TCC_DISABLE 0x3c010
#define TCC_DISABLE_MASK 0xFFFF0000
@@ -1176,6 +1752,8 @@
#define SDMA0_UCODE_ADDR 0xD000
#define SDMA0_UCODE_DATA 0xD004
+#define SDMA0_POWER_CNTL 0xD008
+#define SDMA0_CLK_CTRL 0xD00C
#define SDMA0_CNTL 0xD010
# define TRAP_ENABLE (1 << 0)
@@ -1300,6 +1878,13 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CGC_CTRL 0xF4B0
+# define DCM (1 << 0)
+# define CG_DT(x) ((x) << 2)
+# define CG_DT_MASK (0xf << 2)
+# define CLK_OD(x) ((x) << 6)
+# define CLK_OD_MASK (0x1f << 6)
+
/* UVD clocks */
#define CG_DCLK_CNTL 0xC050009C
@@ -1310,4 +1895,7 @@
#define CG_VCLK_CNTL 0xC05000A4
#define CG_VCLK_STATUS 0xC05000A8
+/* UVD CTX indirect */
+#define UVD_CGC_MEM_CTRL 0xC0
+
#endif
diff --git a/drivers/gpu/drm/radeon/clearstate_cayman.h b/drivers/gpu/drm/radeon/clearstate_cayman.h
index c00339440c5..aa908c55a51 100644
--- a/drivers/gpu/drm/radeon/clearstate_cayman.h
+++ b/drivers/gpu/drm/radeon/clearstate_cayman.h
@@ -1073,7 +1073,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def cayman_cs_data[] = {
+static const struct cs_section_def cayman_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/clearstate_ci.h b/drivers/gpu/drm/radeon/clearstate_ci.h
new file mode 100644
index 00000000000..c3982f9475f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/clearstate_ci.h
@@ -0,0 +1,944 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+static const unsigned int ci_SECT_CONTEXT_def_1[] =
+{
+ 0x00000000, // DB_RENDER_CONTROL
+ 0x00000000, // DB_COUNT_CONTROL
+ 0x00000000, // DB_DEPTH_VIEW
+ 0x00000000, // DB_RENDER_OVERRIDE
+ 0x00000000, // DB_RENDER_OVERRIDE2
+ 0x00000000, // DB_HTILE_DATA_BASE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_BOUNDS_MIN
+ 0x00000000, // DB_DEPTH_BOUNDS_MAX
+ 0x00000000, // DB_STENCIL_CLEAR
+ 0x00000000, // DB_DEPTH_CLEAR
+ 0x00000000, // PA_SC_SCREEN_SCISSOR_TL
+ 0x40004000, // PA_SC_SCREEN_SCISSOR_BR
+ 0, // HOLE
+ 0x00000000, // DB_DEPTH_INFO
+ 0x00000000, // DB_Z_INFO
+ 0x00000000, // DB_STENCIL_INFO
+ 0x00000000, // DB_Z_READ_BASE
+ 0x00000000, // DB_STENCIL_READ_BASE
+ 0x00000000, // DB_Z_WRITE_BASE
+ 0x00000000, // DB_STENCIL_WRITE_BASE
+ 0x00000000, // DB_DEPTH_SIZE
+ 0x00000000, // DB_DEPTH_SLICE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // TA_BC_BASE_ADDR
+ 0x00000000, // TA_BC_BASE_ADDR_HI
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // COHER_DEST_BASE_HI_0
+ 0x00000000, // COHER_DEST_BASE_HI_1
+ 0x00000000, // COHER_DEST_BASE_HI_2
+ 0x00000000, // COHER_DEST_BASE_HI_3
+ 0x00000000, // COHER_DEST_BASE_2
+ 0x00000000, // COHER_DEST_BASE_3
+ 0x00000000, // PA_SC_WINDOW_OFFSET
+ 0x80000000, // PA_SC_WINDOW_SCISSOR_TL
+ 0x40004000, // PA_SC_WINDOW_SCISSOR_BR
+ 0x0000ffff, // PA_SC_CLIPRECT_RULE
+ 0x00000000, // PA_SC_CLIPRECT_0_TL
+ 0x40004000, // PA_SC_CLIPRECT_0_BR
+ 0x00000000, // PA_SC_CLIPRECT_1_TL
+ 0x40004000, // PA_SC_CLIPRECT_1_BR
+ 0x00000000, // PA_SC_CLIPRECT_2_TL
+ 0x40004000, // PA_SC_CLIPRECT_2_BR
+ 0x00000000, // PA_SC_CLIPRECT_3_TL
+ 0x40004000, // PA_SC_CLIPRECT_3_BR
+ 0xaa99aaaa, // PA_SC_EDGERULE
+ 0x00000000, // PA_SU_HARDWARE_SCREEN_OFFSET
+ 0xffffffff, // CB_TARGET_MASK
+ 0xffffffff, // CB_SHADER_MASK
+ 0x80000000, // PA_SC_GENERIC_SCISSOR_TL
+ 0x40004000, // PA_SC_GENERIC_SCISSOR_BR
+ 0x00000000, // COHER_DEST_BASE_0
+ 0x00000000, // COHER_DEST_BASE_1
+ 0x80000000, // PA_SC_VPORT_SCISSOR_0_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_0_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_1_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_1_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_2_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_2_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_3_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_3_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_4_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_4_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_5_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_5_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_6_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_6_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_7_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_7_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_8_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_8_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_9_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_9_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_10_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_10_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_11_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_11_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_12_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_12_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_13_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_13_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_14_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_14_BR
+ 0x80000000, // PA_SC_VPORT_SCISSOR_15_TL
+ 0x40004000, // PA_SC_VPORT_SCISSOR_15_BR
+ 0x00000000, // PA_SC_VPORT_ZMIN_0
+ 0x3f800000, // PA_SC_VPORT_ZMAX_0
+ 0x00000000, // PA_SC_VPORT_ZMIN_1
+ 0x3f800000, // PA_SC_VPORT_ZMAX_1
+ 0x00000000, // PA_SC_VPORT_ZMIN_2
+ 0x3f800000, // PA_SC_VPORT_ZMAX_2
+ 0x00000000, // PA_SC_VPORT_ZMIN_3
+ 0x3f800000, // PA_SC_VPORT_ZMAX_3
+ 0x00000000, // PA_SC_VPORT_ZMIN_4
+ 0x3f800000, // PA_SC_VPORT_ZMAX_4
+ 0x00000000, // PA_SC_VPORT_ZMIN_5
+ 0x3f800000, // PA_SC_VPORT_ZMAX_5
+ 0x00000000, // PA_SC_VPORT_ZMIN_6
+ 0x3f800000, // PA_SC_VPORT_ZMAX_6
+ 0x00000000, // PA_SC_VPORT_ZMIN_7
+ 0x3f800000, // PA_SC_VPORT_ZMAX_7
+ 0x00000000, // PA_SC_VPORT_ZMIN_8
+ 0x3f800000, // PA_SC_VPORT_ZMAX_8
+ 0x00000000, // PA_SC_VPORT_ZMIN_9
+ 0x3f800000, // PA_SC_VPORT_ZMAX_9
+ 0x00000000, // PA_SC_VPORT_ZMIN_10
+ 0x3f800000, // PA_SC_VPORT_ZMAX_10
+ 0x00000000, // PA_SC_VPORT_ZMIN_11
+ 0x3f800000, // PA_SC_VPORT_ZMAX_11
+ 0x00000000, // PA_SC_VPORT_ZMIN_12
+ 0x3f800000, // PA_SC_VPORT_ZMAX_12
+ 0x00000000, // PA_SC_VPORT_ZMIN_13
+ 0x3f800000, // PA_SC_VPORT_ZMAX_13
+ 0x00000000, // PA_SC_VPORT_ZMIN_14
+ 0x3f800000, // PA_SC_VPORT_ZMAX_14
+ 0x00000000, // PA_SC_VPORT_ZMIN_15
+ 0x3f800000, // PA_SC_VPORT_ZMAX_15
+};
+static const unsigned int ci_SECT_CONTEXT_def_2[] =
+{
+ 0x00000000, // PA_SC_SCREEN_EXTENT_CONTROL
+ 0, // HOLE
+ 0x00000000, // CP_PERFMON_CNTX_CNTL
+ 0x00000000, // CP_RINGID
+ 0x00000000, // CP_VMID
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0xffffffff, // VGT_MAX_VTX_INDX
+ 0x00000000, // VGT_MIN_VTX_INDX
+ 0x00000000, // VGT_INDX_OFFSET
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_INDX
+ 0, // HOLE
+ 0x00000000, // CB_BLEND_RED
+ 0x00000000, // CB_BLEND_GREEN
+ 0x00000000, // CB_BLEND_BLUE
+ 0x00000000, // CB_BLEND_ALPHA
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // DB_STENCIL_CONTROL
+ 0x00000000, // DB_STENCILREFMASK
+ 0x00000000, // DB_STENCILREFMASK_BF
+ 0, // HOLE
+ 0x00000000, // PA_CL_VPORT_XSCALE
+ 0x00000000, // PA_CL_VPORT_XOFFSET
+ 0x00000000, // PA_CL_VPORT_YSCALE
+ 0x00000000, // PA_CL_VPORT_YOFFSET
+ 0x00000000, // PA_CL_VPORT_ZSCALE
+ 0x00000000, // PA_CL_VPORT_ZOFFSET
+ 0x00000000, // PA_CL_VPORT_XSCALE_1
+ 0x00000000, // PA_CL_VPORT_XOFFSET_1
+ 0x00000000, // PA_CL_VPORT_YSCALE_1
+ 0x00000000, // PA_CL_VPORT_YOFFSET_1
+ 0x00000000, // PA_CL_VPORT_ZSCALE_1
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_1
+ 0x00000000, // PA_CL_VPORT_XSCALE_2
+ 0x00000000, // PA_CL_VPORT_XOFFSET_2
+ 0x00000000, // PA_CL_VPORT_YSCALE_2
+ 0x00000000, // PA_CL_VPORT_YOFFSET_2
+ 0x00000000, // PA_CL_VPORT_ZSCALE_2
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_2
+ 0x00000000, // PA_CL_VPORT_XSCALE_3
+ 0x00000000, // PA_CL_VPORT_XOFFSET_3
+ 0x00000000, // PA_CL_VPORT_YSCALE_3
+ 0x00000000, // PA_CL_VPORT_YOFFSET_3
+ 0x00000000, // PA_CL_VPORT_ZSCALE_3
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_3
+ 0x00000000, // PA_CL_VPORT_XSCALE_4
+ 0x00000000, // PA_CL_VPORT_XOFFSET_4
+ 0x00000000, // PA_CL_VPORT_YSCALE_4
+ 0x00000000, // PA_CL_VPORT_YOFFSET_4
+ 0x00000000, // PA_CL_VPORT_ZSCALE_4
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_4
+ 0x00000000, // PA_CL_VPORT_XSCALE_5
+ 0x00000000, // PA_CL_VPORT_XOFFSET_5
+ 0x00000000, // PA_CL_VPORT_YSCALE_5
+ 0x00000000, // PA_CL_VPORT_YOFFSET_5
+ 0x00000000, // PA_CL_VPORT_ZSCALE_5
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_5
+ 0x00000000, // PA_CL_VPORT_XSCALE_6
+ 0x00000000, // PA_CL_VPORT_XOFFSET_6
+ 0x00000000, // PA_CL_VPORT_YSCALE_6
+ 0x00000000, // PA_CL_VPORT_YOFFSET_6
+ 0x00000000, // PA_CL_VPORT_ZSCALE_6
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_6
+ 0x00000000, // PA_CL_VPORT_XSCALE_7
+ 0x00000000, // PA_CL_VPORT_XOFFSET_7
+ 0x00000000, // PA_CL_VPORT_YSCALE_7
+ 0x00000000, // PA_CL_VPORT_YOFFSET_7
+ 0x00000000, // PA_CL_VPORT_ZSCALE_7
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_7
+ 0x00000000, // PA_CL_VPORT_XSCALE_8
+ 0x00000000, // PA_CL_VPORT_XOFFSET_8
+ 0x00000000, // PA_CL_VPORT_YSCALE_8
+ 0x00000000, // PA_CL_VPORT_YOFFSET_8
+ 0x00000000, // PA_CL_VPORT_ZSCALE_8
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_8
+ 0x00000000, // PA_CL_VPORT_XSCALE_9
+ 0x00000000, // PA_CL_VPORT_XOFFSET_9
+ 0x00000000, // PA_CL_VPORT_YSCALE_9
+ 0x00000000, // PA_CL_VPORT_YOFFSET_9
+ 0x00000000, // PA_CL_VPORT_ZSCALE_9
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_9
+ 0x00000000, // PA_CL_VPORT_XSCALE_10
+ 0x00000000, // PA_CL_VPORT_XOFFSET_10
+ 0x00000000, // PA_CL_VPORT_YSCALE_10
+ 0x00000000, // PA_CL_VPORT_YOFFSET_10
+ 0x00000000, // PA_CL_VPORT_ZSCALE_10
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_10
+ 0x00000000, // PA_CL_VPORT_XSCALE_11
+ 0x00000000, // PA_CL_VPORT_XOFFSET_11
+ 0x00000000, // PA_CL_VPORT_YSCALE_11
+ 0x00000000, // PA_CL_VPORT_YOFFSET_11
+ 0x00000000, // PA_CL_VPORT_ZSCALE_11
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_11
+ 0x00000000, // PA_CL_VPORT_XSCALE_12
+ 0x00000000, // PA_CL_VPORT_XOFFSET_12
+ 0x00000000, // PA_CL_VPORT_YSCALE_12
+ 0x00000000, // PA_CL_VPORT_YOFFSET_12
+ 0x00000000, // PA_CL_VPORT_ZSCALE_12
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_12
+ 0x00000000, // PA_CL_VPORT_XSCALE_13
+ 0x00000000, // PA_CL_VPORT_XOFFSET_13
+ 0x00000000, // PA_CL_VPORT_YSCALE_13
+ 0x00000000, // PA_CL_VPORT_YOFFSET_13
+ 0x00000000, // PA_CL_VPORT_ZSCALE_13
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_13
+ 0x00000000, // PA_CL_VPORT_XSCALE_14
+ 0x00000000, // PA_CL_VPORT_XOFFSET_14
+ 0x00000000, // PA_CL_VPORT_YSCALE_14
+ 0x00000000, // PA_CL_VPORT_YOFFSET_14
+ 0x00000000, // PA_CL_VPORT_ZSCALE_14
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_14
+ 0x00000000, // PA_CL_VPORT_XSCALE_15
+ 0x00000000, // PA_CL_VPORT_XOFFSET_15
+ 0x00000000, // PA_CL_VPORT_YSCALE_15
+ 0x00000000, // PA_CL_VPORT_YOFFSET_15
+ 0x00000000, // PA_CL_VPORT_ZSCALE_15
+ 0x00000000, // PA_CL_VPORT_ZOFFSET_15
+ 0x00000000, // PA_CL_UCP_0_X
+ 0x00000000, // PA_CL_UCP_0_Y
+ 0x00000000, // PA_CL_UCP_0_Z
+ 0x00000000, // PA_CL_UCP_0_W
+ 0x00000000, // PA_CL_UCP_1_X
+ 0x00000000, // PA_CL_UCP_1_Y
+ 0x00000000, // PA_CL_UCP_1_Z
+ 0x00000000, // PA_CL_UCP_1_W
+ 0x00000000, // PA_CL_UCP_2_X
+ 0x00000000, // PA_CL_UCP_2_Y
+ 0x00000000, // PA_CL_UCP_2_Z
+ 0x00000000, // PA_CL_UCP_2_W
+ 0x00000000, // PA_CL_UCP_3_X
+ 0x00000000, // PA_CL_UCP_3_Y
+ 0x00000000, // PA_CL_UCP_3_Z
+ 0x00000000, // PA_CL_UCP_3_W
+ 0x00000000, // PA_CL_UCP_4_X
+ 0x00000000, // PA_CL_UCP_4_Y
+ 0x00000000, // PA_CL_UCP_4_Z
+ 0x00000000, // PA_CL_UCP_4_W
+ 0x00000000, // PA_CL_UCP_5_X
+ 0x00000000, // PA_CL_UCP_5_Y
+ 0x00000000, // PA_CL_UCP_5_Z
+ 0x00000000, // PA_CL_UCP_5_W
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_CNTL_0
+ 0x00000000, // SPI_PS_INPUT_CNTL_1
+ 0x00000000, // SPI_PS_INPUT_CNTL_2
+ 0x00000000, // SPI_PS_INPUT_CNTL_3
+ 0x00000000, // SPI_PS_INPUT_CNTL_4
+ 0x00000000, // SPI_PS_INPUT_CNTL_5
+ 0x00000000, // SPI_PS_INPUT_CNTL_6
+ 0x00000000, // SPI_PS_INPUT_CNTL_7
+ 0x00000000, // SPI_PS_INPUT_CNTL_8
+ 0x00000000, // SPI_PS_INPUT_CNTL_9
+ 0x00000000, // SPI_PS_INPUT_CNTL_10
+ 0x00000000, // SPI_PS_INPUT_CNTL_11
+ 0x00000000, // SPI_PS_INPUT_CNTL_12
+ 0x00000000, // SPI_PS_INPUT_CNTL_13
+ 0x00000000, // SPI_PS_INPUT_CNTL_14
+ 0x00000000, // SPI_PS_INPUT_CNTL_15
+ 0x00000000, // SPI_PS_INPUT_CNTL_16
+ 0x00000000, // SPI_PS_INPUT_CNTL_17
+ 0x00000000, // SPI_PS_INPUT_CNTL_18
+ 0x00000000, // SPI_PS_INPUT_CNTL_19
+ 0x00000000, // SPI_PS_INPUT_CNTL_20
+ 0x00000000, // SPI_PS_INPUT_CNTL_21
+ 0x00000000, // SPI_PS_INPUT_CNTL_22
+ 0x00000000, // SPI_PS_INPUT_CNTL_23
+ 0x00000000, // SPI_PS_INPUT_CNTL_24
+ 0x00000000, // SPI_PS_INPUT_CNTL_25
+ 0x00000000, // SPI_PS_INPUT_CNTL_26
+ 0x00000000, // SPI_PS_INPUT_CNTL_27
+ 0x00000000, // SPI_PS_INPUT_CNTL_28
+ 0x00000000, // SPI_PS_INPUT_CNTL_29
+ 0x00000000, // SPI_PS_INPUT_CNTL_30
+ 0x00000000, // SPI_PS_INPUT_CNTL_31
+ 0x00000000, // SPI_VS_OUT_CONFIG
+ 0, // HOLE
+ 0x00000000, // SPI_PS_INPUT_ENA
+ 0x00000000, // SPI_PS_INPUT_ADDR
+ 0x00000000, // SPI_INTERP_CONTROL_0
+ 0x00000002, // SPI_PS_IN_CONTROL
+ 0, // HOLE
+ 0x00000000, // SPI_BARYC_CNTL
+ 0, // HOLE
+ 0x00000000, // SPI_TMPRING_SIZE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // SPI_SHADER_POS_FORMAT
+ 0x00000000, // SPI_SHADER_Z_FORMAT
+ 0x00000000, // SPI_SHADER_COL_FORMAT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_BLEND0_CONTROL
+ 0x00000000, // CB_BLEND1_CONTROL
+ 0x00000000, // CB_BLEND2_CONTROL
+ 0x00000000, // CB_BLEND3_CONTROL
+ 0x00000000, // CB_BLEND4_CONTROL
+ 0x00000000, // CB_BLEND5_CONTROL
+ 0x00000000, // CB_BLEND6_CONTROL
+ 0x00000000, // CB_BLEND7_CONTROL
+};
+static const unsigned int ci_SECT_CONTEXT_def_3[] =
+{
+ 0x00000000, // PA_CL_POINT_X_RAD
+ 0x00000000, // PA_CL_POINT_Y_RAD
+ 0x00000000, // PA_CL_POINT_SIZE
+ 0x00000000, // PA_CL_POINT_CULL_RAD
+ 0x00000000, // VGT_DMA_BASE_HI
+ 0x00000000, // VGT_DMA_BASE
+};
+static const unsigned int ci_SECT_CONTEXT_def_4[] =
+{
+ 0x00000000, // DB_DEPTH_CONTROL
+ 0x00000000, // DB_EQAA
+ 0x00000000, // CB_COLOR_CONTROL
+ 0x00000000, // DB_SHADER_CONTROL
+ 0x00090000, // PA_CL_CLIP_CNTL
+ 0x00000004, // PA_SU_SC_MODE_CNTL
+ 0x00000000, // PA_CL_VTE_CNTL
+ 0x00000000, // PA_CL_VS_OUT_CNTL
+ 0x00000000, // PA_CL_NANINF_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_CNTL
+ 0x00000000, // PA_SU_LINE_STIPPLE_SCALE
+ 0x00000000, // PA_SU_PRIM_FILTER_CNTL
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SU_POINT_SIZE
+ 0x00000000, // PA_SU_POINT_MINMAX
+ 0x00000000, // PA_SU_LINE_CNTL
+ 0x00000000, // PA_SC_LINE_STIPPLE
+ 0x00000000, // VGT_OUTPUT_PATH_CNTL
+ 0x00000000, // VGT_HOS_CNTL
+ 0x00000000, // VGT_HOS_MAX_TESS_LEVEL
+ 0x00000000, // VGT_HOS_MIN_TESS_LEVEL
+ 0x00000000, // VGT_HOS_REUSE_DEPTH
+ 0x00000000, // VGT_GROUP_PRIM_TYPE
+ 0x00000000, // VGT_GROUP_FIRST_DECR
+ 0x00000000, // VGT_GROUP_DECR
+ 0x00000000, // VGT_GROUP_VECT_0_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_CNTL
+ 0x00000000, // VGT_GROUP_VECT_0_FMT_CNTL
+ 0x00000000, // VGT_GROUP_VECT_1_FMT_CNTL
+ 0x00000000, // VGT_GS_MODE
+ 0x00000000, // VGT_GS_ONCHIP_CNTL
+ 0x00000000, // PA_SC_MODE_CNTL_0
+ 0x00000000, // PA_SC_MODE_CNTL_1
+ 0x00000000, // VGT_ENHANCE
+ 0x00000100, // VGT_GS_PER_ES
+ 0x00000080, // VGT_ES_PER_GS
+ 0x00000002, // VGT_GS_PER_VS
+ 0x00000000, // VGT_GSVS_RING_OFFSET_1
+ 0x00000000, // VGT_GSVS_RING_OFFSET_2
+ 0x00000000, // VGT_GSVS_RING_OFFSET_3
+ 0x00000000, // VGT_GS_OUT_PRIM_TYPE
+ 0x00000000, // IA_ENHANCE
+};
+static const unsigned int ci_SECT_CONTEXT_def_5[] =
+{
+ 0x00000000, // WD_ENHANCE
+ 0x00000000, // VGT_PRIMITIVEID_EN
+};
+static const unsigned int ci_SECT_CONTEXT_def_6[] =
+{
+ 0x00000000, // VGT_PRIMITIVEID_RESET
+};
+static const unsigned int ci_SECT_CONTEXT_def_7[] =
+{
+ 0x00000000, // VGT_MULTI_PRIM_IB_RESET_EN
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_0
+ 0x00000000, // VGT_INSTANCE_STEP_RATE_1
+ 0x000000ff, // IA_MULTI_VGT_PARAM
+ 0x00000000, // VGT_ESGS_RING_ITEMSIZE
+ 0x00000000, // VGT_GSVS_RING_ITEMSIZE
+ 0x00000000, // VGT_REUSE_OFF
+ 0x00000000, // VGT_VTX_CNT_EN
+ 0x00000000, // DB_HTILE_SURFACE
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE0
+ 0x00000000, // DB_SRESULTS_COMPARE_STATE1
+ 0x00000000, // DB_PRELOAD_CONTROL
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_0
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_0
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_0
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_1
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_1
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_1
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_2
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_2
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_2
+ 0x00000000, // VGT_STRMOUT_BUFFER_SIZE_3
+ 0x00000000, // VGT_STRMOUT_VTX_STRIDE_3
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_BUFFER_OFFSET_3
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_OFFSET
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
+ 0x00000000, // VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
+ 0, // HOLE
+ 0x00000000, // VGT_GS_MAX_VERT_OUT
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // VGT_SHADER_STAGES_EN
+ 0x00000000, // VGT_LS_HS_CONFIG
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_1
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_2
+ 0x00000000, // VGT_GS_VERT_ITEMSIZE_3
+ 0x00000000, // VGT_TF_PARAM
+ 0x00000000, // DB_ALPHA_TO_MASK
+ 0, // HOLE
+ 0x00000000, // PA_SU_POLY_OFFSET_DB_FMT_CNTL
+ 0x00000000, // PA_SU_POLY_OFFSET_CLAMP
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_FRONT_OFFSET
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_SCALE
+ 0x00000000, // PA_SU_POLY_OFFSET_BACK_OFFSET
+ 0x00000000, // VGT_GS_INSTANCE_CNT
+ 0x00000000, // VGT_STRMOUT_CONFIG
+ 0x00000000, // VGT_STRMOUT_BUFFER_CONFIG
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_0
+ 0x00000000, // PA_SC_CENTROID_PRIORITY_1
+ 0x00001000, // PA_SC_LINE_CNTL
+ 0x00000000, // PA_SC_AA_CONFIG
+ 0x00000005, // PA_SU_VTX_CNTL
+ 0x3f800000, // PA_CL_GB_VERT_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_VERT_DISC_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_CLIP_ADJ
+ 0x3f800000, // PA_CL_GB_HORZ_DISC_ADJ
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y0_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y1_3
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_0
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_1
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_2
+ 0x00000000, // PA_SC_AA_SAMPLE_LOCS_PIXEL_X1Y1_3
+ 0xffffffff, // PA_SC_AA_MASK_X0Y0_X1Y0
+ 0xffffffff, // PA_SC_AA_MASK_X0Y1_X1Y1
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0, // HOLE
+ 0x0000000e, // VGT_VERTEX_REUSE_BLOCK_CNTL
+ 0x00000010, // VGT_OUT_DEALLOC_CNTL
+ 0x00000000, // CB_COLOR0_BASE
+ 0x00000000, // CB_COLOR0_PITCH
+ 0x00000000, // CB_COLOR0_SLICE
+ 0x00000000, // CB_COLOR0_VIEW
+ 0x00000000, // CB_COLOR0_INFO
+ 0x00000000, // CB_COLOR0_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR0_CMASK
+ 0x00000000, // CB_COLOR0_CMASK_SLICE
+ 0x00000000, // CB_COLOR0_FMASK
+ 0x00000000, // CB_COLOR0_FMASK_SLICE
+ 0x00000000, // CB_COLOR0_CLEAR_WORD0
+ 0x00000000, // CB_COLOR0_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_BASE
+ 0x00000000, // CB_COLOR1_PITCH
+ 0x00000000, // CB_COLOR1_SLICE
+ 0x00000000, // CB_COLOR1_VIEW
+ 0x00000000, // CB_COLOR1_INFO
+ 0x00000000, // CB_COLOR1_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR1_CMASK
+ 0x00000000, // CB_COLOR1_CMASK_SLICE
+ 0x00000000, // CB_COLOR1_FMASK
+ 0x00000000, // CB_COLOR1_FMASK_SLICE
+ 0x00000000, // CB_COLOR1_CLEAR_WORD0
+ 0x00000000, // CB_COLOR1_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_BASE
+ 0x00000000, // CB_COLOR2_PITCH
+ 0x00000000, // CB_COLOR2_SLICE
+ 0x00000000, // CB_COLOR2_VIEW
+ 0x00000000, // CB_COLOR2_INFO
+ 0x00000000, // CB_COLOR2_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR2_CMASK
+ 0x00000000, // CB_COLOR2_CMASK_SLICE
+ 0x00000000, // CB_COLOR2_FMASK
+ 0x00000000, // CB_COLOR2_FMASK_SLICE
+ 0x00000000, // CB_COLOR2_CLEAR_WORD0
+ 0x00000000, // CB_COLOR2_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_BASE
+ 0x00000000, // CB_COLOR3_PITCH
+ 0x00000000, // CB_COLOR3_SLICE
+ 0x00000000, // CB_COLOR3_VIEW
+ 0x00000000, // CB_COLOR3_INFO
+ 0x00000000, // CB_COLOR3_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR3_CMASK
+ 0x00000000, // CB_COLOR3_CMASK_SLICE
+ 0x00000000, // CB_COLOR3_FMASK
+ 0x00000000, // CB_COLOR3_FMASK_SLICE
+ 0x00000000, // CB_COLOR3_CLEAR_WORD0
+ 0x00000000, // CB_COLOR3_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_BASE
+ 0x00000000, // CB_COLOR4_PITCH
+ 0x00000000, // CB_COLOR4_SLICE
+ 0x00000000, // CB_COLOR4_VIEW
+ 0x00000000, // CB_COLOR4_INFO
+ 0x00000000, // CB_COLOR4_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR4_CMASK
+ 0x00000000, // CB_COLOR4_CMASK_SLICE
+ 0x00000000, // CB_COLOR4_FMASK
+ 0x00000000, // CB_COLOR4_FMASK_SLICE
+ 0x00000000, // CB_COLOR4_CLEAR_WORD0
+ 0x00000000, // CB_COLOR4_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_BASE
+ 0x00000000, // CB_COLOR5_PITCH
+ 0x00000000, // CB_COLOR5_SLICE
+ 0x00000000, // CB_COLOR5_VIEW
+ 0x00000000, // CB_COLOR5_INFO
+ 0x00000000, // CB_COLOR5_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR5_CMASK
+ 0x00000000, // CB_COLOR5_CMASK_SLICE
+ 0x00000000, // CB_COLOR5_FMASK
+ 0x00000000, // CB_COLOR5_FMASK_SLICE
+ 0x00000000, // CB_COLOR5_CLEAR_WORD0
+ 0x00000000, // CB_COLOR5_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_BASE
+ 0x00000000, // CB_COLOR6_PITCH
+ 0x00000000, // CB_COLOR6_SLICE
+ 0x00000000, // CB_COLOR6_VIEW
+ 0x00000000, // CB_COLOR6_INFO
+ 0x00000000, // CB_COLOR6_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR6_CMASK
+ 0x00000000, // CB_COLOR6_CMASK_SLICE
+ 0x00000000, // CB_COLOR6_FMASK
+ 0x00000000, // CB_COLOR6_FMASK_SLICE
+ 0x00000000, // CB_COLOR6_CLEAR_WORD0
+ 0x00000000, // CB_COLOR6_CLEAR_WORD1
+ 0, // HOLE
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_BASE
+ 0x00000000, // CB_COLOR7_PITCH
+ 0x00000000, // CB_COLOR7_SLICE
+ 0x00000000, // CB_COLOR7_VIEW
+ 0x00000000, // CB_COLOR7_INFO
+ 0x00000000, // CB_COLOR7_ATTRIB
+ 0, // HOLE
+ 0x00000000, // CB_COLOR7_CMASK
+ 0x00000000, // CB_COLOR7_CMASK_SLICE
+ 0x00000000, // CB_COLOR7_FMASK
+ 0x00000000, // CB_COLOR7_FMASK_SLICE
+ 0x00000000, // CB_COLOR7_CLEAR_WORD0
+ 0x00000000, // CB_COLOR7_CLEAR_WORD1
+};
+static const struct cs_extent_def ci_SECT_CONTEXT_defs[] =
+{
+ {ci_SECT_CONTEXT_def_1, 0x0000a000, 212 },
+ {ci_SECT_CONTEXT_def_2, 0x0000a0d6, 274 },
+ {ci_SECT_CONTEXT_def_3, 0x0000a1f5, 6 },
+ {ci_SECT_CONTEXT_def_4, 0x0000a200, 157 },
+ {ci_SECT_CONTEXT_def_5, 0x0000a2a0, 2 },
+ {ci_SECT_CONTEXT_def_6, 0x0000a2a3, 1 },
+ {ci_SECT_CONTEXT_def_7, 0x0000a2a5, 233 },
+ { 0, 0, 0 }
+};
+static const struct cs_section_def ci_cs_data[] = {
+ { ci_SECT_CONTEXT_defs, SECT_CONTEXT },
+ { 0, SECT_NONE }
+};
diff --git a/drivers/gpu/drm/radeon/clearstate_evergreen.h b/drivers/gpu/drm/radeon/clearstate_evergreen.h
index 4791d856b7f..63a1ffbb3ce 100644
--- a/drivers/gpu/drm/radeon/clearstate_evergreen.h
+++ b/drivers/gpu/drm/radeon/clearstate_evergreen.h
@@ -1072,7 +1072,7 @@ static const struct cs_extent_def SECT_CTRLCONST_defs[] =
{SECT_CTRLCONST_def_1, 0x0000f3fc, 2 },
{ 0, 0, 0 }
};
-struct cs_section_def evergreen_cs_data[] = {
+static const struct cs_section_def evergreen_cs_data[] = {
{ SECT_CONTEXT_defs, SECT_CONTEXT },
{ SECT_CLEAR_defs, SECT_CLEAR },
{ SECT_CTRLCONST_defs, SECT_CTRLCONST },
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 9bcdd174780..95a66db08d9 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2179,7 +2166,8 @@ bool cypress_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
new file mode 100644
index 00000000000..8953255e894
--- /dev/null
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -0,0 +1,278 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/hdmi.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "sid.h"
+
+static u32 dce6_endpoint_rreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg)
+{
+ u32 r;
+
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ r = RREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset);
+ return r;
+}
+
+static void dce6_endpoint_wreg(struct radeon_device *rdev,
+ u32 block_offset, u32 reg, u32 v)
+{
+ if (ASIC_IS_DCE8(rdev))
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset, reg);
+ else
+ WREG32(AZ_F0_CODEC_ENDPOINT_INDEX + block_offset,
+ AZ_ENDPOINT_REG_WRITE_EN | AZ_ENDPOINT_REG_INDEX(reg));
+ WREG32(AZ_F0_CODEC_ENDPOINT_DATA + block_offset, v);
+}
+
+#define RREG32_ENDPOINT(block, reg) dce6_endpoint_rreg(rdev, (block), (reg))
+#define WREG32_ENDPOINT(block, reg, v) dce6_endpoint_wreg(rdev, (block), (reg), (v))
+
+
+static void dce6_afmt_get_connected_pins(struct radeon_device *rdev)
+{
+ int i;
+ u32 offset, tmp;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ offset = rdev->audio.pin[i].offset;
+ tmp = RREG32_ENDPOINT(offset,
+ AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT);
+ if (((tmp & PORT_CONNECTIVITY_MASK) >> PORT_CONNECTIVITY_SHIFT) == 1)
+ rdev->audio.pin[i].connected = false;
+ else
+ rdev->audio.pin[i].connected = true;
+ }
+}
+
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev)
+{
+ int i;
+
+ dce6_afmt_get_connected_pins(rdev);
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ if (rdev->audio.pin[i].connected)
+ return &rdev->audio.pin[i];
+ }
+ DRM_ERROR("No connected audio pins found!\n");
+ return NULL;
+}
+
+void dce6_afmt_select_pin(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset = dig->afmt->offset;
+ u32 id = dig->afmt->pin->id;
+
+ if (!dig->afmt->pin)
+ return;
+
+ WREG32(AFMT_AUDIO_SRC_CONTROL + offset, AFMT_AUDIO_SRC_SELECT(id));
+}
+
+void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 offset, tmp;
+ u8 *sadb;
+ int sad_count;
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u32 offset;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ if (!dig->afmt->pin)
+ return;
+
+ offset = dig->afmt->pin->offset;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
+static int dce6_audio_chipset_supported(struct radeon_device *rdev)
+{
+ return !ASIC_IS_NODCE(rdev);
+}
+
+static void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
+{
+ WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
+ AUDIO_ENABLED);
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+}
+
+static const u32 pin_offsets[7] =
+{
+ (0x5e00 - 0x5e00),
+ (0x5e18 - 0x5e00),
+ (0x5e30 - 0x5e00),
+ (0x5e48 - 0x5e00),
+ (0x5e60 - 0x5e00),
+ (0x5e78 - 0x5e00),
+ (0x5e90 - 0x5e00),
+};
+
+int dce6_audio_init(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!radeon_audio || !dce6_audio_chipset_supported(rdev))
+ return 0;
+
+ rdev->audio.enabled = true;
+
+ if (ASIC_IS_DCE8(rdev))
+ rdev->audio.num_pins = 7;
+ else
+ rdev->audio.num_pins = 6;
+
+ for (i = 0; i < rdev->audio.num_pins; i++) {
+ rdev->audio.pin[i].channels = -1;
+ rdev->audio.pin[i].rate = -1;
+ rdev->audio.pin[i].bits_per_sample = -1;
+ rdev->audio.pin[i].status_bits = 0;
+ rdev->audio.pin[i].category_code = 0;
+ rdev->audio.pin[i].connected = false;
+ rdev->audio.pin[i].offset = pin_offsets[i];
+ rdev->audio.pin[i].id = i;
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ }
+
+ return 0;
+}
+
+void dce6_audio_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ if (!rdev->audio.enabled)
+ return;
+
+ for (i = 0; i < rdev->audio.num_pins; i++)
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
+
+ rdev->audio.enabled = false;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 038dcac7670..555164e270a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -47,7 +47,7 @@ static const u32 crtc_offsets[6] =
#include "clearstate_evergreen.h"
-static u32 sumo_rlc_save_restore_register_list[] =
+static const u32 sumo_rlc_save_restore_register_list[] =
{
0x98fc,
0x9830,
@@ -131,7 +131,6 @@ static u32 sumo_rlc_save_restore_register_list[] =
0x9150,
0x802c,
};
-static u32 sumo_rlc_save_restore_register_list_size = ARRAY_SIZE(sumo_rlc_save_restore_register_list);
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
@@ -141,6 +140,12 @@ extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
extern void cayman_vm_decode_fault(struct radeon_device *rdev,
u32 status, u32 addr);
+void cik_init_cp_pg_table(struct radeon_device *rdev);
+
+extern u32 si_get_csb_size(struct radeon_device *rdev);
+extern void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
+extern u32 cik_get_csb_size(struct radeon_device *rdev);
+extern void cik_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer);
static const u32 evergreen_golden_registers[] =
{
@@ -1807,7 +1812,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1830,18 +1836,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
/* second controller of the pair uses second half of the lb */
if (radeon_crtc->crtc_id % 2)
tmp += 4;
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -2881,8 +2903,8 @@ static int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3613,7 +3635,7 @@ bool evergreen_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3839,28 +3861,6 @@ bool evergreen_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *rin
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * evergreen_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/*
* RLC
*/
@@ -3894,147 +3894,231 @@ void sumo_rlc_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
+
+ /* clear state block */
+ if (rdev->rlc.cp_table_obj) {
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0))
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ radeon_bo_unpin(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ radeon_bo_unref(&rdev->rlc.cp_table_obj);
+ rdev->rlc.cp_table_obj = NULL;
+ }
}
+#define CP_ME_TABLE_SIZE 96
+
int sumo_rlc_init(struct radeon_device *rdev)
{
- u32 *src_ptr;
+ const u32 *src_ptr;
volatile u32 *dst_ptr;
u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
+ u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index = 0;
u64 reg_list_mc_addr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
int r;
src_ptr = rdev->rlc.reg_list;
dws = rdev->rlc.reg_list_size;
+ if (rdev->family >= CHIP_BONAIRE) {
+ dws += (5 * 16) + 48 + 48 + 64;
+ }
cs_data = rdev->rlc.cs_data;
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (src_ptr) {
+ /* save restore block */
+ if (rdev->rlc.save_restore_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.save_restore_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.save_restore_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
+ r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* write the sr buffer */
+ dst_ptr = rdev->rlc.sr_ptr;
+ if (rdev->family >= CHIP_TAHITI) {
+ /* SI */
+ for (i = 0; i < rdev->rlc.reg_list_size; i++)
+ dst_ptr[i] = src_ptr[i];
+ } else {
+ /* ON/LN/TN */
+ /* format:
+ * dw0: (reg2 << 16) | reg1
+ * dw1: reg1 save space
+ * dw2: reg2 save space
+ */
+ for (i = 0; i < dws; i++) {
+ data = src_ptr[i] >> 2;
+ i++;
+ if (i < dws)
+ data |= (src_ptr[i] >> 2) << 16;
+ j = (((i - 1) * 3) / 2);
+ dst_ptr[j] = data;
+ }
+ j = ((i * 3) / 2);
+ dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
+ }
+ radeon_bo_kunmap(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
}
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- /* format:
- * dw0: (reg2 << 16) | reg1
- * dw1: reg1 save space
- * dw2: reg2 save space
- */
- for (i = 0; i < dws; i++) {
- data = src_ptr[i] >> 2;
- i++;
- if (i < dws)
- data |= (src_ptr[i] >> 2) << 16;
- j = (((i - 1) * 3) / 2);
- dst_ptr[j] = data;
- }
- j = ((i * 3) / 2);
- dst_ptr[j] = RLC_SAVE_RESTORE_LIST_END_MARKER;
-
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (cs_data) {
+ /* clear state block */
+ if (rdev->family >= CHIP_BONAIRE) {
+ rdev->rlc.clear_state_size = dws = cik_get_csb_size(rdev);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ rdev->rlc.clear_state_size = si_get_csb_size(rdev);
+ dws = rdev->rlc.clear_state_size + (256 / 4);
+ } else {
+ reg_list_num = 0;
+ dws = 0;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_list_num++;
+ dws += cs_data[i].section[j].reg_count;
+ }
+ }
+ reg_list_blk_index = (3 * reg_list_num + 2);
+ dws += reg_list_blk_index;
+ rdev->rlc.clear_state_size = dws;
}
- }
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (rdev->rlc.clear_state_obj == NULL) {
+ r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+ r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
+ if (unlikely(r != 0)) {
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.clear_state_gpu_addr);
if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
sumo_rlc_fini(rdev);
return r;
}
- }
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- sumo_rlc_fini(rdev);
- return r;
- }
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
+ r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ /* set up the cs buffer */
+ dst_ptr = rdev->rlc.cs_ptr;
+ if (rdev->family >= CHIP_BONAIRE) {
+ cik_get_csb_buffer(rdev, dst_ptr);
+ } else if (rdev->family >= CHIP_TAHITI) {
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + 256;
+ dst_ptr[0] = upper_32_bits(reg_list_mc_addr);
+ dst_ptr[1] = lower_32_bits(reg_list_mc_addr);
+ dst_ptr[2] = rdev->rlc.clear_state_size;
+ si_get_csb_buffer(rdev, &dst_ptr[(256/4)]);
+ } else {
+ reg_list_hdr_blk_index = 0;
+ reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
+ data = upper_32_bits(reg_list_mc_addr);
dst_ptr[reg_list_hdr_blk_index] = data;
reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ for (i = 0; cs_data[i].section != NULL; i++) {
+ for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
+ reg_num = cs_data[i].section[j].reg_count;
+ data = reg_list_mc_addr & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ data = 0x08000000 | (reg_num * 4);
+ dst_ptr[reg_list_hdr_blk_index] = data;
+ reg_list_hdr_blk_index++;
+
+ for (k = 0; k < reg_num; k++) {
+ data = cs_data[i].section[j].extent[k];
+ dst_ptr[reg_list_blk_index + k] = data;
+ }
+ reg_list_mc_addr += reg_num * 4;
+ reg_list_blk_index += reg_num;
+ }
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
+ dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
}
+ radeon_bo_kunmap(rdev->rlc.clear_state_obj);
+ radeon_bo_unreserve(rdev->rlc.clear_state_obj);
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (rdev->rlc.cp_table_size) {
+ if (rdev->rlc.cp_table_obj == NULL) {
+ r = radeon_bo_create(rdev, rdev->rlc.cp_table_size, PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.cp_table_obj);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) create RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->rlc.cp_table_obj, false);
+ if (unlikely(r != 0)) {
+ dev_warn(rdev->dev, "(%d) reserve RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_pin(rdev->rlc.cp_table_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->rlc.cp_table_gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+ dev_warn(rdev->dev, "(%d) pin RLC cp_table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+ r = radeon_bo_kmap(rdev->rlc.cp_table_obj, (void **)&rdev->rlc.cp_table_ptr);
+ if (r) {
+ dev_warn(rdev->dev, "(%d) map RLC cp table bo failed\n", r);
+ sumo_rlc_fini(rdev);
+ return r;
+ }
+
+ cik_init_cp_pg_table(rdev);
+
+ radeon_bo_kunmap(rdev->rlc.cp_table_obj);
+ radeon_bo_unreserve(rdev->rlc.cp_table_obj);
+
+ }
return 0;
}
@@ -4959,143 +5043,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (evergreen-SI).
- */
-void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, fence->seq);
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
- /* flush HDP */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-}
-
-/**
- * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (evergreen).
- */
-void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * evergreen_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (evergreen-cayman).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int evergreen_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFFF)
- cur_size_in_dw = 0xFFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -5106,6 +5053,13 @@ static int evergreen_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ evergreen_mc_program(rdev);
+
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
@@ -5129,11 +5083,6 @@ static int evergreen_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
} else {
@@ -5143,17 +5092,11 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = sumo_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = sumo_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(sumo_rlc_save_restore_register_list);
rdev->rlc.cs_data = evergreen_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -5179,7 +5122,7 @@ static int evergreen_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -5208,14 +5151,14 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0));
if (r)
return r;
@@ -5231,12 +5174,11 @@ static int evergreen_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: error initializing UVD (%d).\n", r);
@@ -5291,10 +5233,10 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
@@ -5419,7 +5361,6 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -5429,6 +5370,7 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
deleted file mode 100644
index 057c87b6515..00000000000
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * Copyright 2010 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Alex Deucher <alexander.deucher@amd.com>
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "evergreend.h"
-#include "evergreen_blit_shaders.h"
-#include "cayman_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* emits 17 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, pitch);
- radeon_ring_write(ring, slice);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- if (rdev->family >= CHIP_CAYMAN) {
- /* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
- * to the RB directly. For IBs, the CP programs this as part of the
- * surface_sync packet.
- */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
- }
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 11dw + 1 surface sync = 16dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, 1);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 2);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 10 + 1 sync (5) = 15 */
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
-
- /* high addr, stride */
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
- /* xyzw swizzles */
- sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
- SQ_VTCX_SEL_Y(SQ_SEL_Y) |
- SQ_VTCX_SEL_Z(SQ_SEL_Z) |
- SQ_VTCX_SEL_W(SQ_SEL_W);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0x580);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1); /* size */
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, sq_vtx_constant_word3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-
-}
-
-/* emits 10 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_tex_resource_word0, sq_tex_resource_word1;
- u32 sq_tex_resource_word4, sq_tex_resource_word7;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
- sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
- ((w - 1) << 18));
- sq_tex_resource_word1 = ((h - 1) << 0) |
- TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- /* xyzw swizzles */
- sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
- TEX_DST_SEL_Y(SQ_SEL_Y) |
- TEX_DST_SEL_Z(SQ_SEL_Z) |
- TEX_DST_SEL_W(SQ_SEL_W);
-
- sq_tex_resource_word7 = format |
- S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word7);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- /* workaround some hw bugs */
- if (x2 == 0)
- x1 = 1;
- if (y2 == 0)
- y1 = 1;
- if (rdev->family >= CHIP_CAYMAN) {
- if ((x2 == 1) && (y2 == 1))
- x2 = 2;
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 39 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
- u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
- u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs;
- int num_gs_gprs, num_es_gprs, num_hs_gprs, num_ls_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_hs_threads, num_ls_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- int num_hs_stack_entries, num_ls_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- /* set clear context state */
- radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(ring, 0);
-
- if (rdev->family < CHIP_CAYMAN) {
- switch (rdev->family) {
- case CHIP_CEDAR:
- default:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_REDWOOD:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_JUNIPER:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_PALM:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 16;
- num_gs_threads = 16;
- num_es_threads = 16;
- num_hs_threads = 16;
- num_ls_threads = 16;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_SUMO2:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 96;
- num_vs_threads = 25;
- num_gs_threads = 25;
- num_es_threads = 25;
- num_hs_threads = 25;
- num_ls_threads = 25;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_BARTS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 85;
- num_vs_stack_entries = 85;
- num_gs_stack_entries = 85;
- num_es_stack_entries = 85;
- num_hs_stack_entries = 85;
- num_ls_stack_entries = 85;
- break;
- case CHIP_TURKS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 20;
- num_gs_threads = 20;
- num_es_threads = 20;
- num_hs_threads = 20;
- num_ls_threads = 20;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- case CHIP_CAICOS:
- num_ps_gprs = 93;
- num_vs_gprs = 46;
- num_temp_gprs = 4;
- num_gs_gprs = 31;
- num_es_gprs = 31;
- num_hs_gprs = 23;
- num_ls_gprs = 23;
- num_ps_threads = 128;
- num_vs_threads = 10;
- num_gs_threads = 10;
- num_es_threads = 10;
- num_hs_threads = 10;
- num_ls_threads = 10;
- num_ps_stack_entries = 42;
- num_vs_stack_entries = 42;
- num_gs_stack_entries = 42;
- num_es_stack_entries = 42;
- num_hs_stack_entries = 42;
- num_ls_stack_entries = 42;
- break;
- }
-
- if ((rdev->family == CHIP_CEDAR) ||
- (rdev->family == CHIP_PALM) ||
- (rdev->family == CHIP_SUMO) ||
- (rdev->family == CHIP_SUMO2) ||
- (rdev->family == CHIP_CAICOS))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (EXPORT_SRC_C |
- CS_PRIO(0) |
- LS_PRIO(0) |
- HS_PRIO(0) |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
- NUM_LS_GPRS(num_ls_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
- NUM_LS_THREADS(num_ls_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
- sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
- NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
-
- /* disable dyn gprs */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0);
-
- /* setup LDS */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, 0x10001000);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_thread_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
- radeon_ring_write(ring, sq_stack_resource_mgmt_3);
- }
-
- /* CONTEXT_CONTROL */
- radeon_ring_write(ring, 0xc0012800);
- radeon_ring_write(ring, 0x80000000);
- radeon_ring_write(ring, 0x80000000);
-
- /* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(ring, 0xc0026f00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* SET_SAMPLER */
- radeon_ring_write(ring, 0xc0036e00);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000012);
- radeon_ring_write(ring, 0x00000000);
- radeon_ring_write(ring, 0x00000000);
-
- /* set to DX10/11 mode */
- radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(ring, 1);
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
-}
-
-int evergreen_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 74;
- if (rdev->family >= CHIP_CAYMAN)
- rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
-
- rdev->r600_blit.max_dim = 16384;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family < CHIP_CAYMAN)
- rdev->r600_blit.state_len = evergreen_default_size;
- else
- rdev->r600_blit.state_len = cayman_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_vs_size * 4;
- else
- obj_size += cayman_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- if (rdev->family < CHIP_CAYMAN)
- obj_size += evergreen_ps_size * 4;
- else
- obj_size += cayman_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (!rdev->r600_blit.shader_obj) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("evergreen failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("evergreen blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
-
- if (rdev->family < CHIP_CAYMAN) {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- evergreen_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < evergreen_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
- for (i = 0; i < evergreen_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
- } else {
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- cayman_default_state, rdev->r600_blit.state_len * 4);
-
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < cayman_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
- for (i = 0; i < cayman_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
- }
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index f85c0af115b..d43383470cd 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -300,58 +300,4 @@ const u32 evergreen_default_state[] =
0x00000010, /* */
};
-const u32 evergreen_vs[] =
-{
- 0x00000004,
- 0x80800400,
- 0x0000a03c,
- 0x95000688,
- 0x00004000,
- 0x15200688,
- 0x00000000,
- 0x00000000,
- 0x3c000000,
- 0x67961001,
-#ifdef __BIG_ENDIAN
- 0x000a0000,
-#else
- 0x00080000,
-#endif
- 0x00000000,
- 0x1c000000,
- 0x67961000,
-#ifdef __BIG_ENDIAN
- 0x00020008,
-#else
- 0x00000008,
-#endif
- 0x00000000,
-};
-
-const u32 evergreen_ps[] =
-{
- 0x00000003,
- 0xa00c0000,
- 0x00000008,
- 0x80400000,
- 0x00000000,
- 0x95200688,
- 0x00380400,
- 0x00146b10,
- 0x00380000,
- 0x20146b10,
- 0x00380400,
- 0x40146b00,
- 0x80380000,
- 0x60146b00,
- 0x00000000,
- 0x00000000,
- 0x00000010,
- 0x000d1000,
- 0xb0800000,
- 0x00000000,
-};
-
-const u32 evergreen_ps_size = ARRAY_SIZE(evergreen_ps);
-const u32 evergreen_vs_size = ARRAY_SIZE(evergreen_vs);
const u32 evergreen_default_size = ARRAY_SIZE(evergreen_default_state);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
new file mode 100644
index 00000000000..6a0656d00ed
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "evergreend.h"
+
+u32 evergreen_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (evergreen-SI).
+ */
+void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, fence->seq);
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0));
+ /* flush HDP */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+}
+
+/**
+ * evergreen_dma_ring_ib_execute - schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (evergreen).
+ */
+void evergreen_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * evergreen_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (evergreen-cayman).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int evergreen_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFFF)
+ cur_size_in_dw = 0xFFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
+/**
+ * evergreen_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = evergreen_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b0d3fb34141..f71ce390aeb 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -32,6 +32,10 @@
#include "evergreend.h"
#include "atom.h"
+extern void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder);
+extern void dce6_afmt_write_sad_regs(struct drm_encoder *encoder);
+extern void dce6_afmt_select_pin(struct drm_encoder *encoder);
+
/*
* update the N and CTS parameters for a given pixel clock rate
*/
@@ -54,6 +58,45 @@ static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t cloc
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
+static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
@@ -148,18 +191,44 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (ASIC_IS_DCE6(rdev)) {
+ dto_phase = 24 * 1000;
+ } else {
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ }
+
/* XXX two dtos; generally use dto0 for hdmi */
/* Express [24MHz / target pixel clock] as an exact rational
* number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
* is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
}
@@ -238,13 +307,23 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
AFMT_60958_CS_CHANNEL_NUMBER_6(7) |
AFMT_60958_CS_CHANNEL_NUMBER_7(8));
- /* fglrx sets 0x0001005f | (x & 0x00fc0000) in 0x5f78 here */
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_write_speaker_allocation(encoder);
+ } else {
+ dce4_afmt_write_speaker_allocation(encoder);
+ }
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
/* fglrx sets 0x40 in 0x5f80 here */
- evergreen_hdmi_write_sad_regs(encoder);
+
+ if (ASIC_IS_DCE6(rdev)) {
+ dce6_afmt_select_pin(encoder);
+ dce6_afmt_write_sad_regs(encoder);
+ } else {
+ evergreen_hdmi_write_sad_regs(encoder);
+ }
err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
if (err < 0) {
@@ -280,6 +359,8 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -292,6 +373,15 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable) {
+ if (ASIC_IS_DCE6(rdev))
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ } else {
+ dig->afmt->pin = NULL;
+ }
+
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a7baf67aef6..8768fd6a1e2 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -497,6 +497,9 @@
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
@@ -711,6 +714,13 @@
#define AFMT_GENERIC0_7 0x7138
/* DCE4/5 ELD audio interface */
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x5f78
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x5f84 /* LPCM */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x5f88 /* AC3 */
#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x5f8c /* MPEG1 */
@@ -1150,6 +1160,10 @@
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
new file mode 100644
index 00000000000..ecd60809db4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -0,0 +1,2645 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "r600_dpm.h"
+#include "kv_dpm.h"
+#include "radeon_asic.h"
+#include <linux/seq_file.h>
+
+#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
+#define KV_MINIMUM_ENGINE_CLOCK 800
+#define SMC_RAM_END 0x40000
+
+static void kv_init_graphics_levels(struct radeon_device *rdev);
+static int kv_calculate_ds_divider(struct radeon_device *rdev);
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev);
+static int kv_calculate_dpm_settings(struct radeon_device *rdev);
+static void kv_enable_new_levels(struct radeon_device *rdev);
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps);
+static int kv_set_enabled_levels(struct radeon_device *rdev);
+static int kv_force_dpm_highest(struct radeon_device *rdev);
+static int kv_force_dpm_lowest(struct radeon_device *rdev);
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps);
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp);
+static int kv_init_fps_limits(struct radeon_device *rdev);
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate);
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate);
+
+extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
+extern void cik_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
+static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 2 },
+ { 4, 1, 1 },
+ { 5, 5, 2 },
+ { 6, 6, 1 },
+ { 7, 9, 2 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
+{
+ { 0, 4, 1 },
+ { 1, 4, 1 },
+ { 2, 5, 1 },
+ { 3, 4, 1 },
+ { 4, 1, 1 },
+ { 5, 5, 1 },
+ { 6, 6, 1 },
+ { 7, 9, 1 },
+ { 8, 4, 1 },
+ { 9, 2, 1 },
+ { 10, 3, 1 },
+ { 11, 6, 1 },
+ { 12, 8, 2 },
+ { 13, 1, 1 },
+ { 14, 2, 1 },
+ { 15, 3, 1 },
+ { 16, 1, 1 },
+ { 17, 4, 1 },
+ { 18, 3, 1 },
+ { 19, 1, 1 },
+ { 20, 8, 1 },
+ { 21, 5, 1 },
+ { 22, 1, 1 },
+ { 23, 1, 1 },
+ { 24, 4, 1 },
+ { 27, 6, 1 },
+ { 28, 1, 1 },
+ { 0xffffffff }
+};
+
+static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
+{
+ { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
+{
+ { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
+{
+ { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
+{
+ { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
+{
+ { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
+{
+ { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
+};
+
+static const struct kv_pt_config_reg didt_config_kv[] =
+{
+ { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
+ { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
+ { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
+ { 0xFFFFFFFF }
+};
+
+static struct kv_ps *kv_get_ps(struct radeon_ps *rps)
+{
+ struct kv_ps *ps = rps->ps_priv;
+
+ return ps;
+}
+
+static struct kv_power_info *kv_get_pi(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = rdev->pm.dpm.priv;
+
+ return pi;
+}
+
+#if 0
+static void kv_program_local_cac_table(struct radeon_device *rdev,
+ const struct kv_lcac_config_values *local_cac_table,
+ const struct kv_lcac_config_reg *local_cac_reg)
+{
+ u32 i, count, data;
+ const struct kv_lcac_config_values *values = local_cac_table;
+
+ while (values->block_id != 0xffffffff) {
+ count = values->signal_id;
+ for (i = 0; i < count; i++) {
+ data = ((values->block_id << local_cac_reg->block_shift) &
+ local_cac_reg->block_mask);
+ data |= ((i << local_cac_reg->signal_shift) &
+ local_cac_reg->signal_mask);
+ data |= ((values->t << local_cac_reg->t_shift) &
+ local_cac_reg->t_mask);
+ data |= ((1 << local_cac_reg->enable_shift) &
+ local_cac_reg->enable_mask);
+ WREG32_SMC(local_cac_reg->cntl, data);
+ }
+ values++;
+ }
+}
+#endif
+
+static int kv_program_pt_config_registers(struct radeon_device *rdev,
+ const struct kv_pt_config_reg *cac_config_regs)
+{
+ const struct kv_pt_config_reg *config_regs = cac_config_regs;
+ u32 data;
+ u32 cache = 0;
+
+ if (config_regs == NULL)
+ return -EINVAL;
+
+ while (config_regs->offset != 0xFFFFFFFF) {
+ if (config_regs->type == KV_CONFIGREG_CACHE) {
+ cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ } else {
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ data = RREG32_SMC(config_regs->offset);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ data = RREG32_DIDT(config_regs->offset);
+ break;
+ default:
+ data = RREG32(config_regs->offset << 2);
+ break;
+ }
+
+ data &= ~config_regs->mask;
+ data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
+ data |= cache;
+ cache = 0;
+
+ switch (config_regs->type) {
+ case KV_CONFIGREG_SMC_IND:
+ WREG32_SMC(config_regs->offset, data);
+ break;
+ case KV_CONFIGREG_DIDT_IND:
+ WREG32_DIDT(config_regs->offset, data);
+ break;
+ default:
+ WREG32(config_regs->offset << 2, data);
+ break;
+ }
+ }
+ config_regs++;
+ }
+
+ return 0;
+}
+
+static void kv_do_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 data;
+
+ if (pi->caps_sq_ramping) {
+ data = RREG32_DIDT(DIDT_SQ_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_SQ_CTRL0, data);
+ }
+
+ if (pi->caps_db_ramping) {
+ data = RREG32_DIDT(DIDT_DB_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_DB_CTRL0, data);
+ }
+
+ if (pi->caps_td_ramping) {
+ data = RREG32_DIDT(DIDT_TD_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TD_CTRL0, data);
+ }
+
+ if (pi->caps_tcp_ramping) {
+ data = RREG32_DIDT(DIDT_TCP_CTRL0);
+ if (enable)
+ data |= DIDT_CTRL_EN;
+ else
+ data &= ~DIDT_CTRL_EN;
+ WREG32_DIDT(DIDT_TCP_CTRL0, data);
+ }
+}
+
+static int kv_enable_didt(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ if (pi->caps_sq_ramping ||
+ pi->caps_db_ramping ||
+ pi->caps_td_ramping ||
+ pi->caps_tcp_ramping) {
+ cik_enter_rlc_safe_mode(rdev);
+
+ if (enable) {
+ ret = kv_program_pt_config_registers(rdev, didt_config_kv);
+ if (ret) {
+ cik_exit_rlc_safe_mode(rdev);
+ return ret;
+ }
+ }
+
+ kv_do_enable_didt(rdev, enable);
+
+ cik_exit_rlc_safe_mode(rdev);
+ }
+
+ return 0;
+}
+
+#if 0
+static void kv_initialize_hardware_cac_manager(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->caps_cac) {
+ WREG32_SMC(LCAC_SX0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_SX0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC0_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC0_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC1_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC1_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC2_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC2_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
+
+ WREG32_SMC(LCAC_MC3_OVR_SEL, 0);
+ WREG32_SMC(LCAC_MC3_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
+
+ WREG32_SMC(LCAC_CPL_OVR_SEL, 0);
+ WREG32_SMC(LCAC_CPL_OVR_VAL, 0);
+ kv_program_local_cac_table(rdev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
+ }
+}
+#endif
+
+static int kv_enable_smc_cac(struct radeon_device *rdev, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_cac) {
+ if (enable) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_EnableCac);
+ if (ret)
+ pi->cac_enabled = false;
+ else
+ pi->cac_enabled = true;
+ } else if (pi->cac_enabled) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_DisableCac);
+ pi->cac_enabled = false;
+ }
+ }
+
+ return ret;
+}
+
+static int kv_process_firmware_header(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 tmp;
+ int ret;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, DpmTable),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->dpm_table_start = tmp;
+
+ ret = kv_read_smc_sram_dword(rdev, SMU7_FIRMWARE_HEADER_LOCATION +
+ offsetof(SMU7_Firmware_Header, SoftRegisters),
+ &tmp, pi->sram_end);
+
+ if (ret == 0)
+ pi->soft_regs_start = tmp;
+
+ return ret;
+}
+
+static int kv_enable_dpm_voltage_scaling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_voltage_change_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
+ &pi->graphics_voltage_change_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_interval(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
+ &pi->graphics_interval,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_set_dpm_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
+ &pi->graphics_boot_level,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static void kv_program_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0x3FFFC000);
+}
+
+static void kv_clear_vc(struct radeon_device *rdev)
+{
+ WREG32_SMC(CG_FTV_0, 0);
+}
+
+static int kv_set_divider_value(struct radeon_device *rdev,
+ u32 index, u32 sclk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct atom_clock_dividers dividers;
+ int ret;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ sclk, false, &dividers);
+ if (ret)
+ return ret;
+
+ pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
+ pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
+
+ return 0;
+}
+
+static u16 kv_convert_8bit_index_to_voltage(struct radeon_device *rdev,
+ u16 voltage)
+{
+ return 6200 - (voltage * 25);
+}
+
+static u16 kv_convert_2bit_index_to_voltage(struct radeon_device *rdev,
+ u32 vid_2bit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 vid_8bit = sumo_convert_vid2_to_vid7(rdev,
+ &pi->sys_info.vid_mapping_table,
+ vid_2bit);
+
+ return kv_convert_8bit_index_to_voltage(rdev, (u16)vid_8bit);
+}
+
+
+static int kv_set_vid(struct radeon_device *rdev, u32 index, u32 vid)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
+ pi->graphics_level[index].MinVddNb =
+ cpu_to_be32(kv_convert_2bit_index_to_voltage(rdev, vid));
+
+ return 0;
+}
+
+static int kv_set_at(struct radeon_device *rdev, u32 index, u32 at)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].AT = cpu_to_be16((u16)at);
+
+ return 0;
+}
+
+static void kv_dpm_power_level_enable(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
+}
+
+static void kv_start_dpm(struct radeon_device *rdev)
+{
+ u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
+
+ tmp |= GLOBAL_PWRMGT_EN;
+ WREG32_SMC(GENERAL_PWRMGT, tmp);
+
+ kv_smc_dpm_enable(rdev, true);
+}
+
+static void kv_stop_dpm(struct radeon_device *rdev)
+{
+ kv_smc_dpm_enable(rdev, false);
+}
+
+static void kv_start_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
+ sclk_pwrmgt_cntl |= DYNAMIC_PM_EN;
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static void kv_reset_am(struct radeon_device *rdev)
+{
+ u32 sclk_pwrmgt_cntl = RREG32_SMC(SCLK_PWRMGT_CNTL);
+
+ sclk_pwrmgt_cntl |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
+
+ WREG32_SMC(SCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
+}
+
+static int kv_freeze_sclk_dpm(struct radeon_device *rdev, bool freeze)
+{
+ return kv_notify_message_to_smu(rdev, freeze ?
+ PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
+}
+
+static int kv_force_lowest_valid(struct radeon_device *rdev)
+{
+ return kv_force_dpm_lowest(rdev);
+}
+
+static int kv_unforce_levels(struct radeon_device *rdev)
+{
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_NoForcedLevel);
+}
+
+static int kv_update_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 low_sclk_interrupt_t = 0;
+ int ret = 0;
+
+ if (pi->caps_sclk_throttle_low_notification) {
+ low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
+ (u8 *)&low_sclk_interrupt_t,
+ sizeof(u32), pi->sram_end);
+ }
+ return ret;
+}
+
+static int kv_program_bootup_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ if (table->num_max_dpm_entries == 0)
+ return -EINVAL;
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].sclk_frequency == pi->boot_pl.sclk) ||
+ (i == 0))
+ break;
+ }
+
+ pi->graphics_boot_level = (u8)i;
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+ return 0;
+}
+
+static int kv_enable_auto_thermal_throttling(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ pi->graphics_therm_throttle_enable = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
+ &pi->graphics_therm_throttle_enable,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static int kv_upload_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
+ (u8 *)&pi->graphics_level,
+ sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
+ pi->sram_end);
+
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
+ &pi->graphics_dpm_level_count,
+ sizeof(u8), pi->sram_end);
+
+ return ret;
+}
+
+static u32 kv_get_clock_difference(u32 a, u32 b)
+{
+ return (a >= b) ? a - b : b - a;
+}
+
+static u32 kv_get_clk_bypass(struct radeon_device *rdev, u32 clk)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 value;
+
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(clk, 40000) < 200)
+ value = 3;
+ else if (kv_get_clock_difference(clk, 30000) < 200)
+ value = 2;
+ else if (kv_get_clock_difference(clk, 20000) < 200)
+ value = 7;
+ else if (kv_get_clock_difference(clk, 15000) < 200)
+ value = 6;
+ else if (kv_get_clock_difference(clk, 10000) < 200)
+ value = 8;
+ else
+ value = 0;
+ } else {
+ value = 0;
+ }
+
+ return value;
+}
+
+static int kv_populate_uvd_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->uvd_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t < table->entries[i].v))
+ break;
+
+ pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
+ pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
+ pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
+
+ pi->uvd_level[i].VClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].vclk);
+ pi->uvd_level[i].DClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].dclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].vclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].dclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
+
+ pi->uvd_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
+ (u8 *)&pi->uvd_level_count,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->uvd_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UVDInterval),
+ &pi->uvd_interval,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdLevel),
+ (u8 *)&pi->uvd_level,
+ sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
+ pi->sram_end);
+
+ return ret;
+
+}
+
+static int kv_populate_vce_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+ u32 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->vce_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
+ pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->vce_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].evclk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].evclk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->vce_level[i].Divider = (u8)dividers.post_div;
+
+ pi->vce_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
+ (u8 *)&pi->vce_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->vce_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VCEInterval),
+ (u8 *)&pi->vce_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceLevel),
+ (u8 *)&pi->vce_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
+ pi->sram_end);
+
+ return ret;
+}
+
+static int kv_populate_samu_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->samu_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t < table->entries[i].v)
+ break;
+
+ pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ pi->samu_level[i].ClkBypassCntl =
+ (u8)kv_get_clk_bypass(rdev, table->entries[i].clk);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->samu_level[i].Divider = (u8)dividers.post_div;
+
+ pi->samu_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
+ (u8 *)&pi->samu_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->samu_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
+ (u8 *)&pi->samu_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuLevel),
+ (u8 *)&pi->samu_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+
+static int kv_populate_acp_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ struct atom_clock_dividers dividers;
+ int ret;
+ u32 i;
+
+ if (table == NULL || table->count == 0)
+ return 0;
+
+ pi->acp_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
+ pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
+
+ ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
+ table->entries[i].clk, false, &dividers);
+ if (ret)
+ return ret;
+ pi->acp_level[i].Divider = (u8)dividers.post_div;
+
+ pi->acp_level_count++;
+ }
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
+ (u8 *)&pi->acp_level_count,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ pi->acp_interval = 1;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, ACPInterval),
+ (u8 *)&pi->acp_interval,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpLevel),
+ (u8 *)&pi->acp_level,
+ sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static void kv_calculate_dfs_bypass_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if (pi->caps_enable_dfs_bypass) {
+ if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 3;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 2;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 7;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 6;
+ else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
+ pi->graphics_level[i].ClkBypassCntl = 8;
+ else
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ } else {
+ pi->graphics_level[i].ClkBypassCntl = 0;
+ }
+ }
+ }
+}
+
+static int kv_enable_ulv(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
+}
+
+static void kv_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void kv_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
+int kv_dpm_enable(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ ret = kv_process_firmware_header(rdev);
+ if (ret) {
+ DRM_ERROR("kv_process_firmware_header failed\n");
+ return ret;
+ }
+ kv_init_fps_limits(rdev);
+ kv_init_graphics_levels(rdev);
+ ret = kv_program_bootup_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_program_bootup_state failed\n");
+ return ret;
+ }
+ kv_calculate_dfs_bypass_settings(rdev);
+ ret = kv_upload_dpm_settings(rdev);
+ if (ret) {
+ DRM_ERROR("kv_upload_dpm_settings failed\n");
+ return ret;
+ }
+ ret = kv_populate_uvd_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_uvd_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_vce_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_vce_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_samu_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_samu_table failed\n");
+ return ret;
+ }
+ ret = kv_populate_acp_table(rdev);
+ if (ret) {
+ DRM_ERROR("kv_populate_acp_table failed\n");
+ return ret;
+ }
+ kv_program_vc(rdev);
+#if 0
+ kv_initialize_hardware_cac_manager(rdev);
+#endif
+ kv_start_am(rdev);
+ if (pi->enable_auto_thermal_throttling) {
+ ret = kv_enable_auto_thermal_throttling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
+ return ret;
+ }
+ }
+ ret = kv_enable_dpm_voltage_scaling(rdev);
+ if (ret) {
+ DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_interval(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_interval failed\n");
+ return ret;
+ }
+ ret = kv_set_dpm_boot_state(rdev);
+ if (ret) {
+ DRM_ERROR("kv_set_dpm_boot_state failed\n");
+ return ret;
+ }
+ ret = kv_enable_ulv(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_ulv failed\n");
+ return ret;
+ }
+ kv_start_dpm(rdev);
+ ret = kv_enable_didt(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_didt failed\n");
+ return ret;
+ }
+ ret = kv_enable_smc_cac(rdev, true);
+ if (ret) {
+ DRM_ERROR("kv_enable_smc_cac failed\n");
+ return ret;
+ }
+
+ if (rdev->irq.installed &&
+ r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
+ ret = kv_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret) {
+ DRM_ERROR("kv_set_thermal_temperature_range failed\n");
+ return ret;
+ }
+ rdev->irq.dpm_thermal = true;
+ radeon_irq_set(rdev);
+ }
+
+ /* powerdown unused blocks for now */
+ kv_dpm_powergate_acp(rdev, true);
+ kv_dpm_powergate_samu(rdev, true);
+ kv_dpm_powergate_vce(rdev, true);
+ kv_dpm_powergate_uvd(rdev, true);
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
+ return ret;
+}
+
+void kv_dpm_disable(struct radeon_device *rdev)
+{
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ /* powerup blocks */
+ kv_dpm_powergate_acp(rdev, false);
+ kv_dpm_powergate_samu(rdev, false);
+ kv_dpm_powergate_vce(rdev, false);
+ kv_dpm_powergate_uvd(rdev, false);
+
+ kv_enable_smc_cac(rdev, false);
+ kv_enable_didt(rdev, false);
+ kv_clear_vc(rdev);
+ kv_stop_dpm(rdev);
+ kv_enable_ulv(rdev, false);
+ kv_reset_am(rdev);
+
+ kv_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+}
+
+#if 0
+static int kv_write_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_copy_bytes_to_smc(rdev, pi->soft_regs_start + reg_offset,
+ (u8 *)&value, sizeof(u16), pi->sram_end);
+}
+
+static int kv_read_smc_soft_register(struct radeon_device *rdev,
+ u16 reg_offset, u32 *value)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return kv_read_smc_sram_dword(rdev, pi->soft_regs_start + reg_offset,
+ value, pi->sram_end);
+}
+#endif
+
+static void kv_init_sclk_t(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->low_sclk_interrupt_t = 0;
+}
+
+static int kv_init_fps_limits(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->caps_fps) {
+ u16 tmp;
+
+ tmp = 45;
+ pi->fps_high_t = cpu_to_be16(tmp);
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsHighT),
+ (u8 *)&pi->fps_high_t,
+ sizeof(u16), pi->sram_end);
+
+ tmp = 30;
+ pi->fps_low_t = cpu_to_be16(tmp);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, FpsLowT),
+ (u8 *)&pi->fps_low_t,
+ sizeof(u16), pi->sram_end);
+
+ }
+ return ret;
+}
+
+static void kv_init_powergate_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->uvd_power_gated = false;
+ pi->vce_power_gated = false;
+ pi->samu_power_gated = false;
+ pi->acp_power_gated = false;
+
+}
+
+static int kv_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
+}
+
+#if 0
+static int kv_enable_vce_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
+}
+#endif
+
+static int kv_enable_samu_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
+}
+
+static int kv_enable_acp_dpm(struct radeon_device *rdev, bool enable)
+{
+ return kv_notify_message_to_smu(rdev, enable ?
+ PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
+}
+
+static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (!pi->caps_uvd_dpm || table->count || pi->caps_stable_p_state)
+ pi->uvd_boot_level = table->count - 1;
+ else
+ pi->uvd_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
+ (uint8_t *)&pi->uvd_boot_level,
+ sizeof(u8), pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (!pi->caps_uvd_dpm ||
+ pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_UVDDPM_SetEnabledMask,
+ (1 << pi->uvd_boot_level));
+ }
+
+ return kv_enable_uvd_dpm(rdev, !gate);
+}
+
+#if 0
+static u8 kv_get_vce_boot_level(struct radeon_device *rdev)
+{
+ u8 i;
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+
+ for (i = 0; i < table->count; i++) {
+ if (table->entries[i].evclk >= 0) /* XXX */
+ break;
+ }
+
+ return i;
+}
+
+static int kv_update_vce_dpm(struct radeon_device *rdev,
+ struct radeon_ps *radeon_new_state,
+ struct radeon_ps *radeon_current_state)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_vce_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
+ int ret;
+
+ if (radeon_new_state->evclk > 0 && radeon_current_state->evclk == 0) {
+ if (pi->caps_stable_p_state)
+ pi->vce_boot_level = table->count - 1;
+ else
+ pi->vce_boot_level = kv_get_vce_boot_level(rdev);
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
+ (u8 *)&pi->vce_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_VCEDPM_SetEnabledMask,
+ (1 << pi->vce_boot_level));
+
+ kv_enable_vce_dpm(rdev, true);
+ } else if (radeon_new_state->evclk == 0 && radeon_current_state->evclk > 0) {
+ kv_enable_vce_dpm(rdev, false);
+ }
+
+ return 0;
+}
+#endif
+
+static int kv_update_samu_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->samu_boot_level = table->count - 1;
+ else
+ pi->samu_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
+ (u8 *)&pi->samu_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SAMUDPM_SetEnabledMask,
+ (1 << pi->samu_boot_level));
+ }
+
+ return kv_enable_samu_dpm(rdev, !gate);
+}
+
+static int kv_update_acp_dpm(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
+ int ret;
+
+ if (!gate) {
+ if (pi->caps_stable_p_state)
+ pi->acp_boot_level = table->count - 1;
+ else
+ pi->acp_boot_level = 0;
+
+ ret = kv_copy_bytes_to_smc(rdev,
+ pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
+ (u8 *)&pi->acp_boot_level,
+ sizeof(u8),
+ pi->sram_end);
+ if (ret)
+ return ret;
+
+ if (pi->caps_stable_p_state)
+ kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_ACPDPM_SetEnabledMask,
+ (1 << pi->acp_boot_level));
+ }
+
+ return kv_enable_acp_dpm(rdev, !gate);
+}
+
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->uvd_power_gated == gate)
+ return;
+
+ pi->uvd_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_uvd_pg) {
+ uvd_v1_0_stop(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ if (pi->caps_uvd_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerOFF);
+ } else {
+ if (pi->caps_uvd_pg) {
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_UVDPowerON);
+ uvd_v4_2_resume(rdev);
+ uvd_v1_0_start(rdev);
+ cik_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ }
+ kv_update_uvd_dpm(rdev, gate);
+ }
+}
+
+static void kv_dpm_powergate_vce(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->vce_power_gated == gate)
+ return;
+
+ pi->vce_power_gated = gate;
+
+ if (gate) {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_VCEPowerON);
+ }
+}
+
+static void kv_dpm_powergate_samu(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->samu_power_gated == gate)
+ return;
+
+ pi->samu_power_gated = gate;
+
+ if (gate) {
+ kv_update_samu_dpm(rdev, true);
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerOFF);
+ } else {
+ if (pi->caps_samu_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_SAMPowerON);
+ kv_update_samu_dpm(rdev, false);
+ }
+}
+
+static void kv_dpm_powergate_acp(struct radeon_device *rdev, bool gate)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->acp_power_gated == gate)
+ return;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ pi->acp_power_gated = gate;
+
+ if (gate) {
+ kv_update_acp_dpm(rdev, true);
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerOFF);
+ } else {
+ if (pi->caps_acp_pg)
+ kv_notify_message_to_smu(rdev, PPSMC_MSG_ACPPowerON);
+ kv_update_acp_dpm(rdev, false);
+ }
+}
+
+static void kv_set_valid_clock_range(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ for (i = 0; i < pi->graphics_dpm_level_count; i++) {
+ if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
+ (i == (pi->graphics_dpm_level_count - 1))) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if ((table->entries[i].clk <= new_ps->levels[new_ps->num_levels -1].sclk) ||
+ (i == 0)) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
+ (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
+ if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
+ i == (int)(pi->graphics_dpm_level_count - 1)) {
+ pi->lowest_valid = i;
+ break;
+ }
+ }
+
+ for (i = pi->graphics_dpm_level_count - 1; i >= 0; i--) {
+ if (table->entries[i].sclk_frequency <=
+ new_ps->levels[new_ps->num_levels - 1].sclk ||
+ i == 0) {
+ pi->highest_valid = i;
+ break;
+ }
+ }
+
+ if (pi->lowest_valid > pi->highest_valid) {
+ if ((new_ps->levels[0].sclk -
+ table->entries[pi->highest_valid].sclk_frequency) >
+ (table->entries[pi->lowest_valid].sclk_frequency -
+ new_ps->levels[new_ps->num_levels -1].sclk))
+ pi->highest_valid = pi->lowest_valid;
+ else
+ pi->lowest_valid = pi->highest_valid;
+ }
+ }
+}
+
+static int kv_update_dfs_bypass_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+ u8 clk_bypass_cntl;
+
+ if (pi->caps_enable_dfs_bypass) {
+ clk_bypass_cntl = new_ps->need_dfs_bypass ?
+ pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
+ ret = kv_copy_bytes_to_smc(rdev,
+ (pi->dpm_table_start +
+ offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
+ (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
+ offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
+ &clk_bypass_cntl,
+ sizeof(u8), pi->sram_end);
+ }
+
+ return ret;
+}
+
+static int kv_enable_nb_dpm(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ int ret = 0;
+
+ if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_NBDPM_Enable);
+ if (ret == 0)
+ pi->nb_dpm_enabled = true;
+ }
+
+ return ret;
+}
+
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ int ret;
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ ret = kv_force_dpm_highest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ ret = kv_force_dpm_lowest(rdev);
+ if (ret)
+ return ret;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
+ ret = kv_unforce_levels(rdev);
+ if (ret)
+ return ret;
+ }
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
+
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ kv_update_requested_ps(rdev, new_ps);
+
+ kv_apply_state_adjust_rules(rdev,
+ &pi->requested_rps,
+ &pi->current_rps);
+
+ return 0;
+}
+
+int kv_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ /*struct radeon_ps *old_ps = &pi->current_rps;*/
+ int ret;
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+
+ if (rdev->family == CHIP_KABINI) {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_enable_new_levels(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_unforce_levels(rdev);
+ kv_set_enabled_levels(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ }
+ } else {
+ if (pi->enable_dpm) {
+ kv_set_valid_clock_range(rdev, new_ps);
+ kv_update_dfs_bypass_settings(rdev, new_ps);
+ ret = kv_calculate_ds_divider(rdev);
+ if (ret) {
+ DRM_ERROR("kv_calculate_ds_divider failed\n");
+ return ret;
+ }
+ kv_calculate_nbps_level_settings(rdev);
+ kv_calculate_dpm_settings(rdev);
+ kv_freeze_sclk_dpm(rdev, true);
+ kv_upload_dpm_settings(rdev);
+ kv_program_nbps_index_settings(rdev, new_ps);
+ kv_freeze_sclk_dpm(rdev, false);
+ kv_set_enabled_levels(rdev);
+#if 0
+ ret = kv_update_vce_dpm(rdev, new_ps, old_ps);
+ if (ret) {
+ DRM_ERROR("kv_update_vce_dpm failed\n");
+ return ret;
+ }
+#endif
+ kv_update_sclk_t(rdev);
+ kv_enable_nb_dpm(rdev);
+ }
+ }
+
+ cik_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+ return 0;
+}
+
+void kv_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ kv_update_current_ps(rdev, new_ps);
+}
+
+void kv_dpm_setup_asic(struct radeon_device *rdev)
+{
+ sumo_take_smu_control(rdev, true);
+ kv_init_powergate_state(rdev);
+ kv_init_sclk_t(rdev);
+}
+
+void kv_dpm_reset_asic(struct radeon_device *rdev)
+{
+ kv_force_lowest_valid(rdev);
+ kv_init_graphics_levels(rdev);
+ kv_program_bootup_state(rdev);
+ kv_upload_dpm_settings(rdev);
+ kv_force_lowest_valid(rdev);
+ kv_unforce_levels(rdev);
+}
+
+//XXX use sumo_dpm_display_configuration_changed
+
+static void kv_construct_max_power_limits_table(struct radeon_device *rdev,
+ struct radeon_clock_and_voltage_limits *table)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
+ int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
+ table->sclk =
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
+ table->vddc =
+ kv_convert_2bit_index_to_voltage(rdev,
+ pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
+ }
+
+ table->mclk = pi->sys_info.nbp_memory_clock[0];
+}
+
+static void kv_patch_voltage_values(struct radeon_device *rdev)
+{
+ int i;
+ struct radeon_uvd_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
+
+ if (table->count) {
+ for (i = 0; i < table->count; i++)
+ table->entries[i].v =
+ kv_convert_8bit_index_to_voltage(rdev,
+ table->entries[i].v);
+ }
+
+}
+
+static void kv_construct_boot_state(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
+ pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
+ pi->boot_pl.ds_divider_index = 0;
+ pi->boot_pl.ss_divider_index = 0;
+ pi->boot_pl.allow_gnb_slow = 1;
+ pi->boot_pl.force_nbp_state = 0;
+ pi->boot_pl.display_wm = 0;
+ pi->boot_pl.vce_wm = 0;
+}
+
+static int kv_force_dpm_highest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i >= 0; i--) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static int kv_force_dpm_lowest(struct radeon_device *rdev)
+{
+ int ret;
+ u32 enable_mask, i;
+
+ ret = kv_dpm_get_enable_mask(rdev, &enable_mask);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (enable_mask & (1 << i))
+ break;
+ }
+
+ return kv_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, i);
+}
+
+static u8 kv_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk, u32 min_sclk_in_sr)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ u32 temp;
+ u32 min = (min_sclk_in_sr > KV_MINIMUM_ENGINE_CLOCK) ?
+ min_sclk_in_sr : KV_MINIMUM_ENGINE_CLOCK;
+
+ if (sclk < min)
+ return 0;
+
+ if (!pi->caps_sclk_ds)
+ return 0;
+
+ for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i <= 0; i--) {
+ temp = sclk / sumo_get_sleep_divider_from_id(i);
+ if ((temp >= min) || (i == 0))
+ break;
+ }
+
+ return (u8)i;
+}
+
+static int kv_get_high_voltage_limit(struct radeon_device *rdev, int *limit)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ int i;
+
+ if (table && table->count) {
+ for (i = table->count - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
+ if (pi->high_voltage_t &&
+ (kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit) <=
+ pi->high_voltage_t)) {
+ *limit = i;
+ return 0;
+ }
+ }
+ }
+
+ *limit = 0;
+ return 0;
+}
+
+static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
+{
+ struct kv_ps *ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 min_sclk = 10000; /* ??? */
+ u32 sclk, mclk = 0;
+ int i, limit;
+ bool force_high;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+ u32 stable_p_state_sclk = 0;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
+ mclk = max_limits->mclk;
+ sclk = min_sclk;
+
+ if (pi->caps_stable_p_state) {
+ stable_p_state_sclk = (max_limits->sclk * 75) / 100;
+
+ for (i = table->count - 1; i >= 0; i++) {
+ if (stable_p_state_sclk >= table->entries[i].clk) {
+ stable_p_state_sclk = table->entries[i].clk;
+ break;
+ }
+ }
+
+ if (i > 0)
+ stable_p_state_sclk = table->entries[0].clk;
+
+ sclk = stable_p_state_sclk;
+ }
+
+ ps->need_dfs_bypass = true;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (ps->levels[i].sclk < sclk)
+ ps->levels[i].sclk = sclk;
+ }
+
+ if (table && table->count) {
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].clk;
+ }
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ for (i = 0; i < ps->num_levels; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, ps->levels[i].vddc_index))) {
+ kv_get_high_voltage_limit(rdev, &limit);
+ ps->levels[i].sclk = table->entries[limit].sclk_frequency;
+ }
+ }
+ }
+
+ if (pi->caps_stable_p_state) {
+ for (i = 0; i < ps->num_levels; i++) {
+ ps->levels[i].sclk = stable_p_state_sclk;
+ }
+ }
+
+ pi->video_start = new_rps->dclk || new_rps->vclk;
+
+ if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
+ ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
+ pi->battery_state = true;
+ else
+ pi->battery_state = false;
+
+ if (rdev->family == CHIP_KABINI) {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x1;
+ ps->dpmx_nb_ps_hi = 0x0;
+ } else {
+ ps->dpm0_pg_nb_ps_lo = 0x1;
+ ps->dpm0_pg_nb_ps_hi = 0x0;
+ ps->dpmx_nb_ps_lo = 0x2;
+ ps->dpmx_nb_ps_hi = 0x1;
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ pi->video_start || (rdev->pm.dpm.new_active_crtc_count >= 3) ||
+ pi->disable_nb_ps3_in_battery;
+ ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpm0_pg_nb_ps_hi = 0x2;
+ ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
+ ps->dpmx_nb_ps_hi = 0x2;
+ }
+ }
+}
+
+static void kv_dpm_power_level_enabled_for_throttle(struct radeon_device *rdev,
+ u32 index, bool enable)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
+}
+
+static int kv_calculate_ds_divider(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 sclk_in_sr = 10000; /* ??? */
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].DeepSleepDivId =
+ kv_get_sleep_divider_id_from_clock(rdev,
+ be32_to_cpu(pi->graphics_level[i].SclkFrequency),
+ sclk_in_sr);
+ }
+ return 0;
+}
+
+static int kv_calculate_nbps_level_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ bool force_high;
+ struct radeon_clock_and_voltage_limits *max_limits =
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+ u32 mclk = max_limits->mclk;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ if (rdev->family == CHIP_KABINI) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (!pi->sys_info.nb_dpm_enable)
+ return 0;
+
+ force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
+ (rdev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
+
+ if (force_high) {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].GnbSlow = 0;
+ } else {
+ if (pi->battery_state)
+ pi->graphics_level[0].ForceNbPs1 = 1;
+
+ pi->graphics_level[1].GnbSlow = 0;
+ pi->graphics_level[2].GnbSlow = 0;
+ pi->graphics_level[3].GnbSlow = 0;
+ pi->graphics_level[4].GnbSlow = 0;
+ }
+ } else {
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
+ pi->graphics_level[i].GnbSlow = 1;
+ pi->graphics_level[i].ForceNbPs1 = 0;
+ pi->graphics_level[i].UpH = 0;
+ }
+
+ if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
+ pi->graphics_level[pi->lowest_valid].UpH = 0x28;
+ pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
+ if (pi->lowest_valid != pi->highest_valid)
+ pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
+ }
+ }
+ return 0;
+}
+
+static int kv_calculate_dpm_settings(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ if (pi->lowest_valid > pi->highest_valid)
+ return -EINVAL;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
+
+ return 0;
+}
+
+static void kv_init_graphics_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+ struct radeon_clock_voltage_dependency_table *table =
+ &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
+
+ if (table && table->count) {
+ u32 vid_2bit;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->count; i++) {
+ if (pi->high_voltage_t &&
+ (pi->high_voltage_t <
+ kv_convert_8bit_index_to_voltage(rdev, table->entries[i].v)))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].clk);
+ vid_2bit = sumo_convert_vid7_to_vid2(rdev,
+ &pi->sys_info.vid_mapping_table,
+ table->entries[i].v);
+ kv_set_vid(rdev, i, vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ } else {
+ struct sumo_sclk_voltage_mapping_table *table =
+ &pi->sys_info.sclk_voltage_mapping_table;
+
+ pi->graphics_dpm_level_count = 0;
+ for (i = 0; i < table->num_max_dpm_entries; i++) {
+ if (pi->high_voltage_t &&
+ pi->high_voltage_t <
+ kv_convert_2bit_index_to_voltage(rdev, table->entries[i].vid_2bit))
+ break;
+
+ kv_set_divider_value(rdev, i, table->entries[i].sclk_frequency);
+ kv_set_vid(rdev, i, table->entries[i].vid_2bit);
+ kv_set_at(rdev, i, pi->at[i]);
+ kv_dpm_power_level_enabled_for_throttle(rdev, i, true);
+ pi->graphics_dpm_level_count++;
+ }
+ }
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
+ kv_dpm_power_level_enable(rdev, i, false);
+}
+
+static void kv_enable_new_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i;
+
+ for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
+ if (i >= pi->lowest_valid && i <= pi->highest_valid)
+ kv_dpm_power_level_enable(rdev, i, true);
+ }
+}
+
+static int kv_set_enabled_levels(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 i, new_mask = 0;
+
+ for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+ new_mask |= (1 << i);
+
+ return kv_send_msg_to_smc_with_parameter(rdev,
+ PPSMC_MSG_SCLKDPM_SetEnabledMask,
+ new_mask);
+}
+
+static void kv_program_nbps_index_settings(struct radeon_device *rdev,
+ struct radeon_ps *new_rps)
+{
+ struct kv_ps *new_ps = kv_get_ps(new_rps);
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 nbdpmconfig1;
+
+ if (rdev->family == CHIP_KABINI)
+ return;
+
+ if (pi->sys_info.nb_dpm_enable) {
+ nbdpmconfig1 = RREG32_SMC(NB_DPM_CONFIG_1);
+ nbdpmconfig1 &= ~(Dpm0PgNbPsLo_MASK | Dpm0PgNbPsHi_MASK |
+ DpmXNbPsLo_MASK | DpmXNbPsHi_MASK);
+ nbdpmconfig1 |= (Dpm0PgNbPsLo(new_ps->dpm0_pg_nb_ps_lo) |
+ Dpm0PgNbPsHi(new_ps->dpm0_pg_nb_ps_hi) |
+ DpmXNbPsLo(new_ps->dpmx_nb_ps_lo) |
+ DpmXNbPsHi(new_ps->dpmx_nb_ps_hi));
+ WREG32_SMC(NB_DPM_CONFIG_1, nbdpmconfig1);
+ }
+}
+
+static int kv_set_thermal_temperature_range(struct radeon_device *rdev,
+ int min_temp, int max_temp)
+{
+ int low_temp = 0 * 1000;
+ int high_temp = 255 * 1000;
+ u32 tmp;
+
+ if (low_temp < min_temp)
+ low_temp = min_temp;
+ if (high_temp > max_temp)
+ high_temp = max_temp;
+ if (high_temp < low_temp) {
+ DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
+ return -EINVAL;
+ }
+
+ tmp = RREG32_SMC(CG_THERMAL_INT_CTRL);
+ tmp &= ~(DIG_THERM_INTH_MASK | DIG_THERM_INTL_MASK);
+ tmp |= (DIG_THERM_INTH(49 + (high_temp / 1000)) |
+ DIG_THERM_INTL(49 + (low_temp / 1000)));
+ WREG32_SMC(CG_THERMAL_INT_CTRL, tmp);
+
+ rdev->pm.dpm.thermal.min_temp = low_temp;
+ rdev->pm.dpm.thermal.max_temp = high_temp;
+
+ return 0;
+}
+
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
+};
+
+static int kv_parse_sys_info_table(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+ union igp_info *igp_info;
+ u8 frev, crev;
+ u16 data_offset;
+ int i;
+
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ data_offset);
+
+ if (crev != 8) {
+ DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+ return -EINVAL;
+ }
+ pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
+ pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
+ pi->sys_info.bootup_nb_voltage_index =
+ le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
+ if (igp_info->info_8.ucHtcTmpLmt == 0)
+ pi->sys_info.htc_tmp_lmt = 203;
+ else
+ pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
+ if (igp_info->info_8.ucHtcHystLmt == 0)
+ pi->sys_info.htc_hyst_lmt = 5;
+ else
+ pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
+ if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
+ DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
+ }
+
+ if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
+ pi->sys_info.nb_dpm_enable = true;
+ else
+ pi->sys_info.nb_dpm_enable = false;
+
+ for (i = 0; i < KV_NUM_NBPSTATES; i++) {
+ pi->sys_info.nbp_memory_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
+ pi->sys_info.nbp_n_clock[i] =
+ le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
+ }
+ if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
+ SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
+ pi->caps_enable_dfs_bypass = true;
+
+ sumo_construct_sclk_voltage_mapping_table(rdev,
+ &pi->sys_info.sclk_voltage_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ sumo_construct_vid_mapping_table(rdev,
+ &pi->sys_info.vid_mapping_table,
+ igp_info->info_8.sAvail_SCLK);
+
+ kv_construct_max_power_limits_table(rdev,
+ &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
+ }
+ return 0;
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+};
+
+union pplib_clock_info {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
+ struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
+ struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
+};
+
+union pplib_power_state {
+ struct _ATOM_PPLIB_STATE v1;
+ struct _ATOM_PPLIB_STATE_V2 v2;
+};
+
+static void kv_patch_boot_state(struct radeon_device *rdev,
+ struct kv_ps *ps)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ ps->num_levels = 1;
+ ps->levels[0] = pi->boot_pl;
+}
+
+static void kv_parse_pplib_non_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
+ u8 table_rev)
+{
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ rps->class = le16_to_cpu(non_clock_info->usClassification);
+ rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
+
+ if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
+ rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
+ rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
+ } else {
+ rps->vclk = 0;
+ rps->dclk = 0;
+ }
+
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.dpm.boot_ps = rps;
+ kv_patch_boot_state(rdev, ps);
+ }
+ if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ rdev->pm.dpm.uvd_ps = rps;
+}
+
+static void kv_parse_pplib_clock_info(struct radeon_device *rdev,
+ struct radeon_ps *rps, int index,
+ union pplib_clock_info *clock_info)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *ps = kv_get_ps(rps);
+ struct kv_pl *pl = &ps->levels[index];
+ u32 sclk;
+
+ sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
+ sclk |= clock_info->sumo.ucEngineClockHigh << 16;
+ pl->sclk = sclk;
+ pl->vddc_index = clock_info->sumo.vddcIndex;
+
+ ps->num_levels = index + 1;
+
+ if (pi->caps_sclk_ds) {
+ pl->ds_divider_index = 5;
+ pl->ss_divider_index = 5;
+ }
+}
+
+static int kv_parse_power_table(struct radeon_device *rdev)
+{
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ union pplib_power_state *power_state;
+ int i, j, k, non_clock_array_index, clock_array_index;
+ union pplib_clock_info *clock_info;
+ struct _StateArray *state_array;
+ struct _ClockInfoArray *clock_info_array;
+ struct _NonClockInfoArray *non_clock_info_array;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u8 *power_state_offset;
+ struct kv_ps *ps;
+
+ if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ state_array = (struct _StateArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usStateArrayOffset));
+ clock_info_array = (struct _ClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
+ non_clock_info_array = (struct _NonClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
+
+ rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
+ state_array->ucNumEntries, GFP_KERNEL);
+ if (!rdev->pm.dpm.ps)
+ return -ENOMEM;
+ power_state_offset = (u8 *)state_array->states;
+ rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+ for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
+ power_state = (union pplib_power_state *)power_state_offset;
+ non_clock_array_index = power_state->v2.nonClockInfoIndex;
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ &non_clock_info_array->nonClockInfo[non_clock_array_index];
+ if (!rdev->pm.power_state[i].clock_info)
+ return -EINVAL;
+ ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
+ if (ps == NULL) {
+ kfree(rdev->pm.dpm.ps);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.ps[i].ps_priv = ps;
+ k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
+ for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
+ clock_array_index = idx[j];
+ if (clock_array_index >= clock_info_array->ucNumEntries)
+ continue;
+ if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
+ break;
+ clock_info = (union pplib_clock_info *)
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
+ kv_parse_pplib_clock_info(rdev,
+ &rdev->pm.dpm.ps[i], k,
+ clock_info);
+ k++;
+ }
+ kv_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
+ non_clock_info,
+ non_clock_info_array->ucEntrySize);
+ power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
+ }
+ rdev->pm.dpm.num_ps = state_array->ucNumEntries;
+ return 0;
+}
+
+int kv_dpm_init(struct radeon_device *rdev)
+{
+ struct kv_power_info *pi;
+ int ret, i;
+
+ pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
+ if (pi == NULL)
+ return -ENOMEM;
+ rdev->pm.dpm.priv = pi;
+
+ ret = r600_parse_extended_power_table(rdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
+ pi->at[i] = TRINITY_AT_DFLT;
+
+ pi->sram_end = SMC_RAM_END;
+
+ if (rdev->family == CHIP_KABINI)
+ pi->high_voltage_t = 4001;
+
+ pi->enable_nb_dpm = true;
+
+ pi->caps_power_containment = true;
+ pi->caps_cac = true;
+ pi->enable_didt = false;
+ if (pi->enable_didt) {
+ pi->caps_sq_ramping = true;
+ pi->caps_db_ramping = true;
+ pi->caps_td_ramping = true;
+ pi->caps_tcp_ramping = true;
+ }
+
+ pi->caps_sclk_ds = true;
+ pi->enable_auto_thermal_throttling = true;
+ pi->disable_nb_ps3_in_battery = false;
+ pi->bapm_enable = true;
+ pi->voltage_drop_t = 0;
+ pi->caps_sclk_throttle_low_notification = false;
+ pi->caps_fps = false; /* true? */
+ pi->caps_uvd_pg = true;
+ pi->caps_uvd_dpm = true;
+ pi->caps_vce_pg = false;
+ pi->caps_samu_pg = false;
+ pi->caps_acp_pg = false;
+ pi->caps_stable_p_state = false;
+
+ ret = kv_parse_sys_info_table(rdev);
+ if (ret)
+ return ret;
+
+ kv_patch_voltage_values(rdev);
+ kv_construct_boot_state(rdev);
+
+ ret = kv_parse_power_table(rdev);
+ if (ret)
+ return ret;
+
+ pi->enable_dpm = true;
+
+ return 0;
+}
+
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ u32 current_index =
+ (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & CURR_SCLK_INDEX_MASK) >>
+ CURR_SCLK_INDEX_SHIFT;
+ u32 sclk, tmp;
+ u16 vddc;
+
+ if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
+ tmp = (RREG32_SMC(SMU_VOLTAGE_STATUS) & SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
+ SMU_VOLTAGE_CURRENT_LEVEL_SHIFT;
+ vddc = kv_convert_8bit_index_to_voltage(rdev, (u16)tmp);
+ seq_printf(m, "power level %d sclk: %u vddc: %u\n",
+ current_index, sclk, vddc);
+ }
+}
+
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ int i;
+ struct kv_ps *ps = kv_get_ps(rps);
+
+ r600_dpm_print_class_info(rps->class, rps->class2);
+ r600_dpm_print_cap_info(rps->caps);
+ printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ for (i = 0; i < ps->num_levels; i++) {
+ struct kv_pl *pl = &ps->levels[i];
+ printk("\t\tpower level %d sclk: %u vddc: %u\n",
+ i, pl->sclk,
+ kv_convert_8bit_index_to_voltage(rdev, pl->vddc_index));
+ }
+ r600_dpm_print_ps_status(rdev, rps);
+}
+
+void kv_dpm_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
+ kfree(rdev->pm.dpm.ps[i].ps_priv);
+ }
+ kfree(rdev->pm.dpm.ps);
+ kfree(rdev->pm.dpm.priv);
+ r600_free_extended_power_table(rdev);
+}
+
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev)
+{
+
+}
+
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+ struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
+
+ if (low)
+ return requested_state->levels[0].sclk;
+ else
+ return requested_state->levels[requested_state->num_levels - 1].sclk;
+}
+
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low)
+{
+ struct kv_power_info *pi = kv_get_pi(rdev);
+
+ return pi->sys_info.bootup_uma_clk;
+}
+
diff --git a/drivers/gpu/drm/radeon/kv_dpm.h b/drivers/gpu/drm/radeon/kv_dpm.h
new file mode 100644
index 00000000000..32bb079572d
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_dpm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __KV_DPM_H__
+#define __KV_DPM_H__
+
+#define SMU__NUM_SCLK_DPM_STATE 8
+#define SMU__NUM_MCLK_DPM_LEVELS 4
+#define SMU__NUM_LCLK_DPM_LEVELS 8
+#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */
+#include "smu7_fusion.h"
+#include "trinity_dpm.h"
+#include "ppsmc.h"
+
+#define KV_NUM_NBPSTATES 4
+
+enum kv_pt_config_reg_type {
+ KV_CONFIGREG_MMR = 0,
+ KV_CONFIGREG_SMC_IND,
+ KV_CONFIGREG_DIDT_IND,
+ KV_CONFIGREG_CACHE,
+ KV_CONFIGREG_MAX
+};
+
+struct kv_pt_config_reg {
+ u32 offset;
+ u32 mask;
+ u32 shift;
+ u32 value;
+ enum kv_pt_config_reg_type type;
+};
+
+struct kv_lcac_config_values {
+ u32 block_id;
+ u32 signal_id;
+ u32 t;
+};
+
+struct kv_lcac_config_reg {
+ u32 cntl;
+ u32 block_mask;
+ u32 block_shift;
+ u32 signal_mask;
+ u32 signal_shift;
+ u32 t_mask;
+ u32 t_shift;
+ u32 enable_mask;
+ u32 enable_shift;
+};
+
+struct kv_pl {
+ u32 sclk;
+ u8 vddc_index;
+ u8 ds_divider_index;
+ u8 ss_divider_index;
+ u8 allow_gnb_slow;
+ u8 force_nbp_state;
+ u8 display_wm;
+ u8 vce_wm;
+};
+
+struct kv_ps {
+ struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 num_levels;
+ bool need_dfs_bypass;
+ u8 dpm0_pg_nb_ps_lo;
+ u8 dpm0_pg_nb_ps_hi;
+ u8 dpmx_nb_ps_lo;
+ u8 dpmx_nb_ps_hi;
+};
+
+struct kv_sys_info {
+ u32 bootup_uma_clk;
+ u32 bootup_sclk;
+ u32 dentist_vco_freq;
+ u32 nb_dpm_enable;
+ u32 nbp_memory_clock[KV_NUM_NBPSTATES];
+ u32 nbp_n_clock[KV_NUM_NBPSTATES];
+ u16 bootup_nb_voltage_index;
+ u8 htc_tmp_lmt;
+ u8 htc_hyst_lmt;
+ struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table;
+ struct sumo_vid_mapping_table vid_mapping_table;
+ u32 uma_channel_number;
+};
+
+struct kv_power_info {
+ u32 at[SUMO_MAX_HARDWARE_POWERLEVELS];
+ u32 voltage_drop_t;
+ struct kv_sys_info sys_info;
+ struct kv_pl boot_pl;
+ bool enable_nb_ps_policy;
+ bool disable_nb_ps3_in_battery;
+ bool video_start;
+ bool battery_state;
+ u32 lowest_valid;
+ u32 highest_valid;
+ u16 high_voltage_t;
+ bool cac_enabled;
+ bool bapm_enable;
+ /* smc offsets */
+ u32 sram_end;
+ u32 dpm_table_start;
+ u32 soft_regs_start;
+ /* dpm SMU tables */
+ u8 graphics_dpm_level_count;
+ u8 uvd_level_count;
+ u8 vce_level_count;
+ u8 acp_level_count;
+ u8 samu_level_count;
+ u16 fps_high_t;
+ SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel acpi_level;
+ SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU];
+ u8 uvd_boot_level;
+ u8 vce_boot_level;
+ u8 acp_boot_level;
+ u8 samu_boot_level;
+ u8 uvd_interval;
+ u8 vce_interval;
+ u8 acp_interval;
+ u8 samu_interval;
+ u8 graphics_boot_level;
+ u8 graphics_interval;
+ u8 graphics_therm_throttle_enable;
+ u8 graphics_voltage_change_enable;
+ u8 graphics_clk_slow_enable;
+ u8 graphics_clk_slow_divider;
+ u8 fps_low_t;
+ u32 low_sclk_interrupt_t;
+ bool uvd_power_gated;
+ bool vce_power_gated;
+ bool acp_power_gated;
+ bool samu_power_gated;
+ bool nb_dpm_enabled;
+ /* flags */
+ bool enable_didt;
+ bool enable_dpm;
+ bool enable_auto_thermal_throttling;
+ bool enable_nb_dpm;
+ /* caps */
+ bool caps_cac;
+ bool caps_power_containment;
+ bool caps_sq_ramping;
+ bool caps_db_ramping;
+ bool caps_td_ramping;
+ bool caps_tcp_ramping;
+ bool caps_sclk_throttle_low_notification;
+ bool caps_fps;
+ bool caps_uvd_dpm;
+ bool caps_uvd_pg;
+ bool caps_vce_pg;
+ bool caps_samu_pg;
+ bool caps_acp_pg;
+ bool caps_stable_p_state;
+ bool caps_enable_dfs_bypass;
+ bool caps_sclk_ds;
+ struct radeon_ps current_rps;
+ struct kv_ps current_ps;
+ struct radeon_ps requested_rps;
+ struct kv_ps requested_ps;
+};
+
+
+/* kv_smc.c */
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id);
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask);
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter);
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit);
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable);
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit);
+
+#endif
diff --git a/drivers/gpu/drm/radeon/kv_smc.c b/drivers/gpu/drm/radeon/kv_smc.c
new file mode 100644
index 00000000000..34a226d7e34
--- /dev/null
+++ b/drivers/gpu/drm/radeon/kv_smc.c
@@ -0,0 +1,207 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+
+#include "drmP.h"
+#include "radeon.h"
+#include "cikd.h"
+#include "kv_dpm.h"
+
+int kv_notify_message_to_smu(struct radeon_device *rdev, u32 id)
+{
+ u32 i;
+ u32 tmp = 0;
+
+ WREG32(SMC_MESSAGE_0, id & SMC_MSG_MASK);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if ((RREG32(SMC_RESP_0) & SMC_RESP_MASK) != 0)
+ break;
+ udelay(1);
+ }
+ tmp = RREG32(SMC_RESP_0) & SMC_RESP_MASK;
+
+ if (tmp != 1) {
+ if (tmp == 0xFF)
+ return -EINVAL;
+ else if (tmp == 0xFE)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int kv_dpm_get_enable_mask(struct radeon_device *rdev, u32 *enable_mask)
+{
+ int ret;
+
+ ret = kv_notify_message_to_smu(rdev, PPSMC_MSG_SCLKDPM_GetEnabledMask);
+
+ if (ret == 0)
+ *enable_mask = RREG32_SMC(SMC_SYSCON_MSG_ARG_0);
+
+ return ret;
+}
+
+int kv_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
+ PPSMC_Msg msg, u32 parameter)
+{
+
+ WREG32(SMC_MSG_ARG_0, parameter);
+
+ return kv_notify_message_to_smu(rdev, msg);
+}
+
+static int kv_set_smc_sram_address(struct radeon_device *rdev,
+ u32 smc_address, u32 limit)
+{
+ if (smc_address & 3)
+ return -EINVAL;
+ if ((smc_address + 3) > limit)
+ return -EINVAL;
+
+ WREG32(SMC_IND_INDEX_0, smc_address);
+ WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0);
+
+ return 0;
+}
+
+int kv_read_smc_sram_dword(struct radeon_device *rdev, u32 smc_address,
+ u32 *value, u32 limit)
+{
+ int ret;
+
+ ret = kv_set_smc_sram_address(rdev, smc_address, limit);
+ if (ret)
+ return ret;
+
+ *value = RREG32(SMC_IND_DATA_0);
+ return 0;
+}
+
+int kv_smc_dpm_enable(struct radeon_device *rdev, bool enable)
+{
+ if (enable)
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Enable);
+ else
+ return kv_notify_message_to_smu(rdev, PPSMC_MSG_DPM_Disable);
+}
+
+int kv_copy_bytes_to_smc(struct radeon_device *rdev,
+ u32 smc_start_address,
+ const u8 *src, u32 byte_count, u32 limit)
+{
+ int ret;
+ u32 data, original_data, addr, extra_shift, t_byte, count, mask;
+
+ if ((smc_start_address + byte_count) > limit)
+ return -EINVAL;
+
+ addr = smc_start_address;
+ t_byte = addr & 3;
+
+ /* RMW for the initial bytes */
+ if (t_byte != 0) {
+ addr -= t_byte;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data = RREG32(SMC_IND_DATA_0);
+
+ data = 0;
+ mask = 0;
+ count = 4;
+ while (count > 0) {
+ if (t_byte > 0) {
+ mask = (mask << 8) | 0xff;
+ t_byte--;
+ } else if (byte_count > 0) {
+ data = (data << 8) + *src++;
+ byte_count--;
+ mask <<= 8;
+ } else {
+ data <<= 8;
+ mask = (mask << 8) | 0xff;
+ }
+ count--;
+ }
+
+ data |= original_data & mask;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ addr += 4;
+ }
+
+ while (byte_count >= 4) {
+ /* SMC address space is BE */
+ data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3];
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+
+ src += 4;
+ byte_count -= 4;
+ addr += 4;
+ }
+
+ /* RMW for the final bytes */
+ if (byte_count > 0) {
+ data = 0;
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ original_data= RREG32(SMC_IND_DATA_0);
+
+ extra_shift = 8 * (4 - byte_count);
+
+ while (byte_count > 0) {
+ /* SMC address space is BE */
+ data = (data << 8) + *src++;
+ byte_count--;
+ }
+
+ data <<= extra_shift;
+
+ data |= (original_data & ~((~0UL) << extra_shift));
+
+ ret = kv_set_smc_sram_address(rdev, addr, limit);
+ if (ret)
+ return ret;
+
+ WREG32(SMC_IND_DATA_0, data);
+ }
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 56bd4f3be4f..93c1f9ef5da 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -35,7 +35,7 @@
#include "radeon_ucode.h"
#include "clearstate_cayman.h"
-static u32 tn_rlc_save_restore_register_list[] =
+static const u32 tn_rlc_save_restore_register_list[] =
{
0x98fc,
0x98f0,
@@ -160,7 +160,6 @@ static u32 tn_rlc_save_restore_register_list[] =
0x9830,
0x802c,
};
-static u32 tn_rlc_save_restore_register_list_size = ARRAY_SIZE(tn_rlc_save_restore_register_list);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
@@ -175,6 +174,11 @@ extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void evergreen_program_aspm(struct radeon_device *rdev);
extern void sumo_rlc_fini(struct radeon_device *rdev);
extern int sumo_rlc_init(struct radeon_device *rdev);
+extern void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
@@ -794,9 +798,13 @@ int ni_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"ni_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
@@ -1370,23 +1378,6 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, 10); /* poll interval */
}
-void cayman_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
-}
-
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1560,8 +1551,8 @@ static int cayman_cp_resume(struct radeon_device *rdev)
/* Set ring buffer size */
ring = &rdev->ring[ridx[i]];
- rb_cntl = drm_order(ring->ring_size / 8);
- rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
+ rb_cntl = order_base_2(ring->ring_size / 8);
+ rb_cntl |= order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
rb_cntl |= BUF_SWAP_32BIT;
#endif
@@ -1609,186 +1600,7 @@ static int cayman_cp_resume(struct radeon_device *rdev)
return 0;
}
-/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- * Cayman and newer support two asynchronous DMA engines.
- */
-/**
- * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (cayman-SI).
- */
-void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
- struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
-/**
- * cayman_dma_stop - stop the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines (cayman-SI).
- */
-void cayman_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- /* dma0 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
-
- /* dma1 */
- rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
- rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
-}
-
-/**
- * cayman_dma_resume - setup and start the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffers and enable them. (cayman-SI).
- * Returns 0 for success, error for failure.
- */
-int cayman_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring;
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- u32 reg_offset, wb_offset;
- int i, r;
-
- /* Reset dma */
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- for (i = 0; i < 2; i++) {
- if (i == 0) {
- ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- reg_offset = DMA0_REGISTER_OFFSET;
- wb_offset = R600_WB_DMA_RPTR_OFFSET;
- } else {
- ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
- reg_offset = DMA1_REGISTER_OFFSET;
- wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
- }
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR + reg_offset, 0);
- WREG32(DMA_RB_WPTR + reg_offset, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
- upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
- ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL + reg_offset);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL + reg_offset, dma_cntl);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
-
- WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, ring->idx, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * cayman_dma_fini - tear down the async dma engines
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engines and free the rings (cayman-SI).
- */
-void cayman_dma_fini(struct radeon_device *rdev)
-{
- cayman_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
- radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
-}
-
-static u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -2041,34 +1853,6 @@ bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * cayman_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
@@ -2079,6 +1863,13 @@ static int cayman_startup(struct radeon_device *rdev)
/* enable aspm */
evergreen_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ evergreen_mc_program(rdev);
+
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
@@ -2103,27 +1894,16 @@ static int cayman_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
return r;
cayman_gpu_init(rdev);
- r = evergreen_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
-
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
rdev->rlc.reg_list = tn_rlc_save_restore_register_list;
- rdev->rlc.reg_list_size = tn_rlc_save_restore_register_list_size;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(tn_rlc_save_restore_register_list);
rdev->rlc.cs_data = cayman_cs_data;
r = sumo_rlc_init(rdev);
if (r) {
@@ -2143,7 +1923,7 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -2194,7 +1974,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -2202,7 +1982,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2210,7 +1990,7 @@ static int cayman_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -2227,12 +2007,11 @@ static int cayman_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -2249,9 +2028,15 @@ static int cayman_startup(struct radeon_device *rdev)
return r;
}
- r = r600_audio_init(rdev);
- if (r)
- return r;
+ if (ASIC_IS_DCE6(rdev)) {
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+ } else {
+ r = r600_audio_init(rdev);
+ if (r)
+ return r;
+ }
return 0;
}
@@ -2282,11 +2067,14 @@ int cayman_resume(struct radeon_device *rdev)
int cayman_suspend(struct radeon_device *rdev)
{
- r600_audio_fini(rdev);
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_fini(rdev);
+ else
+ r600_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
cayman_cp_enable(rdev, false);
cayman_dma_stop(rdev);
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
@@ -2408,7 +2196,6 @@ int cayman_init(struct radeon_device *rdev)
void cayman_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
cayman_cp_fini(rdev);
cayman_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2418,6 +2205,7 @@ void cayman_fini(struct radeon_device *rdev)
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
@@ -2678,61 +2466,7 @@ void cayman_vm_set_page(struct radeon_device *rdev,
}
}
} else {
- if ((flags & RADEON_VM_PAGE_SYSTEM) ||
- (count == 1)) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+ cayman_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -2766,26 +2500,3 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
-
-void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
new file mode 100644
index 00000000000..dd6e9688fbe
--- /dev/null
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+u32 cayman_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ * Cayman and newer support two asynchronous DMA engines.
+ */
+
+/**
+ * cayman_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (cayman-SI).
+ */
+void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
+ struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * cayman_dma_stop - stop the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines (cayman-SI).
+ */
+void cayman_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl;
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ /* dma0 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA0_REGISTER_OFFSET, rb_cntl);
+
+ /* dma1 */
+ rb_cntl = RREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET);
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL + DMA1_REGISTER_OFFSET, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
+}
+
+/**
+ * cayman_dma_resume - setup and start the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffers and enable them. (cayman-SI).
+ * Returns 0 for success, error for failure.
+ */
+int cayman_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring;
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ u32 reg_offset, wb_offset;
+ int i, r;
+
+ /* Reset dma */
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA | SOFT_RESET_DMA1);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ reg_offset = DMA0_REGISTER_OFFSET;
+ wb_offset = R600_WB_DMA_RPTR_OFFSET;
+ } else {
+ ring = &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX];
+ reg_offset = DMA1_REGISTER_OFFSET;
+ wb_offset = CAYMAN_WB_DMA1_RPTR_OFFSET;
+ }
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL + reg_offset, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL + reg_offset, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR + reg_offset, 0);
+ WREG32(DMA_RB_WPTR + reg_offset, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI + reg_offset,
+ upper_32_bits(rdev->wb.gpu_addr + wb_offset) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO + reg_offset,
+ ((rdev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE + reg_offset, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE | CMD_VMID_FORCE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL + reg_offset, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL + reg_offset);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL + reg_offset, dma_cntl);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR + reg_offset, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR + reg_offset) >> 2;
+
+ WREG32(DMA_RB_CNTL + reg_offset, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, ring->idx, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * cayman_dma_fini - tear down the async dma engines
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engines and free the rings (cayman-SI).
+ */
+void cayman_dma_fini(struct radeon_device *rdev)
+{
+ cayman_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+ radeon_ring_fini(rdev, &rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX]);
+}
+
+/**
+ * cayman_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = cayman_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * cayman_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ * @r600_flags: hw access flags
+ *
+ * Update the page tables using the DMA (cayman/TN).
+ */
+void cayman_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if ((flags & RADEON_VM_PAGE_SYSTEM) || (count == 1)) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
+}
+
+void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 559cf24d51a..f7b625c9e0e 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -769,7 +769,8 @@ bool ni_dpm_vblank_too_short(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* we never hit the non-gddr5 limit so disable it */
+ u32 switch_limit = pi->mem_gddr5 ? 450 : 0;
if (vblank_time < switch_limit)
return true;
@@ -1054,10 +1055,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd
int ni_dpm_force_performance_level(struct radeon_device *rdev,
enum radeon_dpm_forced_level level)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
- struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
-
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
@@ -1068,8 +1065,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev,
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
@@ -4042,6 +4038,7 @@ static int ni_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct ni_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -4051,12 +4048,12 @@ static int ni_parse_power_table(struct radeon_device *rdev)
ni_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
ni_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -4072,9 +4069,6 @@ int ni_dpm_init(struct radeon_device *rdev)
struct rv7xx_power_info *pi;
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -4167,16 +4161,7 @@ int ni_dpm_init(struct radeon_device *rdev)
eg_pi->vddci_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -4193,8 +4178,7 @@ int ni_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -4288,6 +4272,12 @@ int ni_dpm_init(struct radeon_device *rdev)
ni_pi->use_power_boost_limit = true;
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index b5564a3645d..682842804bc 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -99,11 +99,68 @@ typedef uint8_t PPSMC_Result;
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
+/* CI/KV/KB */
+#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
+#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E)
+#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F)
+#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
+#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
+#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
+#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
+#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137)
+#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138)
+#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139)
+#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a)
+#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
+#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140)
+#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141)
+#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145)
+#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146)
+#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147)
+#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148)
+#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a)
+#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e)
+#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f)
+#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150)
+#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151)
+#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154)
+#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155)
+#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156)
+#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157)
+#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158)
+#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159)
+#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a)
+#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b)
+#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f)
+#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162)
+#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167)
+#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169)
+#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a)
+#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185)
+#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186)
+#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187)
+#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188)
+#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189)
+#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A)
+#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B)
+#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C)
+#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
+#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
+#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
+
+#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
+#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
+
/* TN */
#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
+#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
+#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
+#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
new file mode 100644
index 00000000000..da43ab32883
--- /dev/null
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -0,0 +1,682 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _PPTABLE_H
+#define _PPTABLE_H
+
+#pragma pack(push, 1)
+
+typedef struct _ATOM_PPLIB_THERMALCONTROLLER
+
+{
+ UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
+ UCHAR ucI2cLine; // as interpreted by DAL I2C
+ UCHAR ucI2cAddress;
+ UCHAR ucFanParameters; // Fan Control Parameters.
+ UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
+ UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
+ UCHAR ucReserved; // ----
+ UCHAR ucFlags; // to be defined
+} ATOM_PPLIB_THERMALCONTROLLER;
+
+#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
+#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
+
+#define ATOM_PP_THERMALCONTROLLER_NONE 0
+#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_LM64 5
+#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
+#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
+#define ATOM_PP_THERMALCONTROLLER_RV770 8
+#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
+#define ATOM_PP_THERMALCONTROLLER_KONG 10
+#define ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO 11
+#define ATOM_PP_THERMALCONTROLLER_EVERGREEN 12
+#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
+#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
+#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
+#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
+#define ATOM_PP_THERMALCONTROLLER_LM96163 17
+#define ATOM_PP_THERMALCONTROLLER_CISLANDS 18
+#define ATOM_PP_THERMALCONTROLLER_KAVERI 19
+
+
+// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
+// We probably should reserve the bit 0x80 for this use.
+// To keep the number of these types low we should also use the same code for all ASICs (i.e. do not distinguish RV6xx and RV7xx Internal here).
+// The driver can pick the correct internal controller based on the ASIC.
+
+#define ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL 0x89 // ADT7473 Fan Control + Internal Thermal Controller
+#define ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL 0x8D // EMC2103 Fan Control + Internal Thermal Controller
+
+typedef struct _ATOM_PPLIB_STATE
+{
+ UCHAR ucNonClockStateIndex;
+ UCHAR ucClockStateIndices[1]; // variable-sized
+} ATOM_PPLIB_STATE;
+
+
+typedef struct _ATOM_PPLIB_FANTABLE
+{
+ UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
+ UCHAR ucTHyst; // Temperature hysteresis. Integer.
+ USHORT usTMin; // The temperature, in 0.01 centigrades, below which we just run at a minimal PWM.
+ USHORT usTMed; // The middle temperature where we change slopes.
+ USHORT usTHigh; // The high point above TMed for adjusting the second slope.
+ USHORT usPWMMin; // The minimum PWM value in percent (0.01% increments).
+ USHORT usPWMMed; // The PWM value (in percent) at TMed.
+ USHORT usPWMHigh; // The PWM value at THigh.
+} ATOM_PPLIB_FANTABLE;
+
+typedef struct _ATOM_PPLIB_FANTABLE2
+{
+ ATOM_PPLIB_FANTABLE basicTable;
+ USHORT usTMax; // The max temperature
+} ATOM_PPLIB_FANTABLE2;
+
+typedef struct _ATOM_PPLIB_EXTENDEDHEADER
+{
+ USHORT usSize;
+ ULONG ulMaxEngineClock; // For Overdrive.
+ ULONG ulMaxMemoryClock; // For Overdrive.
+ // Add extra system parameters here, always adjust size to include all fields.
+ USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
+ USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
+ USHORT usSAMUTableOffset; //points to ATOM_PPLIB_SAMU_Table
+ USHORT usPPMTableOffset; //points to ATOM_PPLIB_PPM_Table
+ USHORT usACPTableOffset; //points to ATOM_PPLIB_ACP_Table
+ USHORT usPowerTuneTableOffset; //points to ATOM_PPLIB_POWERTUNE_Table
+} ATOM_PPLIB_EXTENDEDHEADER;
+
+//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
+#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
+#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
+#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
+#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
+#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
+#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
+#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
+#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
+#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
+#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
+#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
+#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
+#define ATOM_PP_PLATFORM_CAP_MVDDCONTROL 4096
+#define ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT 0x2000 // Go to boot state on alerts, e.g. on an AC->DC transition.
+#define ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT 0x4000 // Do NOT wait for VBLANK during an alert (e.g. AC->DC transition).
+#define ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL 0x8000 // Does the driver control VDDCI independently from VDDC.
+#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
+#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
+#define ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE 0x00040000 // Does the driver supports new CAC voltage table.
+#define ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY 0x00080000 // Does the driver supports revert GPIO5 polarity.
+#define ATOM_PP_PLATFORM_CAP_OUTPUT_THERMAL2GPIO17 0x00100000 // Does the driver supports thermal2GPIO17.
+#define ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE 0x00200000 // Does the driver supports VR HOT GPIO Configurable.
+#define ATOM_PP_PLATFORM_CAP_TEMP_INVERSION 0x00400000 // Does the driver supports Temp Inversion feature.
+#define ATOM_PP_PLATFORM_CAP_EVV 0x00800000
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ UCHAR ucDataRevision;
+
+ UCHAR ucNumStates;
+ UCHAR ucStateEntrySize;
+ UCHAR ucClockInfoSize;
+ UCHAR ucNonClockSize;
+
+ // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
+ USHORT usStateArrayOffset;
+
+ // offset from start of this table to array of ASIC-specific structures,
+ // currently ATOM_PPLIB_CLOCK_INFO.
+ USHORT usClockInfoArrayOffset;
+
+ // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
+ USHORT usNonClockInfoArrayOffset;
+
+ USHORT usBackbiasTime; // in microseconds
+ USHORT usVoltageTime; // in microseconds
+ USHORT usTableSize; //the size of this structure, or the extended structure
+
+ ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
+
+ ATOM_PPLIB_THERMALCONTROLLER sThermalController;
+
+ USHORT usBootClockInfoOffset;
+ USHORT usBootNonClockInfoOffset;
+
+} ATOM_PPLIB_POWERPLAYTABLE;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE2
+{
+ ATOM_PPLIB_POWERPLAYTABLE basicTable;
+ UCHAR ucNumCustomThermalPolicy;
+ USHORT usCustomThermalPolicyArrayOffset;
+}ATOM_PPLIB_POWERPLAYTABLE2, *LPATOM_PPLIB_POWERPLAYTABLE2;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE3
+{
+ ATOM_PPLIB_POWERPLAYTABLE2 basicTable2;
+ USHORT usFormatID; // To be used ONLY by PPGen.
+ USHORT usFanTableOffset;
+ USHORT usExtendendedHeaderOffset;
+} ATOM_PPLIB_POWERPLAYTABLE3, *LPATOM_PPLIB_POWERPLAYTABLE3;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE4
+{
+ ATOM_PPLIB_POWERPLAYTABLE3 basicTable3;
+ ULONG ulGoldenPPID; // PPGen use only
+ ULONG ulGoldenRevision; // PPGen use only
+ USHORT usVddcDependencyOnSCLKOffset;
+ USHORT usVddciDependencyOnMCLKOffset;
+ USHORT usVddcDependencyOnMCLKOffset;
+ USHORT usMaxClockVoltageOnDCOffset;
+ USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
+ USHORT usMvddDependencyOnMCLKOffset;
+} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
+
+typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
+{
+ ATOM_PPLIB_POWERPLAYTABLE4 basicTable4;
+ ULONG ulTDPLimit;
+ ULONG ulNearTDPLimit;
+ ULONG ulSQRampingThreshold;
+ USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
+ ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
+ USHORT usTDPODLimit;
+ USHORT usLoadLineSlope; // in milliOhms * 100
+} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
+#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
+#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
+#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
+#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
+#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
+// 2, 4, 6, 7 are reserved
+
+#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
+#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
+#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
+#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
+#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
+#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
+#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
+#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
+#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
+#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
+#define ATOM_PPLIB_CLASSIFICATION_HD2STATE 0x2000
+#define ATOM_PPLIB_CLASSIFICATION_HDSTATE 0x4000
+#define ATOM_PPLIB_CLASSIFICATION_SDSTATE 0x8000
+
+//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
+#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
+#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
+#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
+
+//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
+#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
+#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
+
+// 0 is 2.5Gb/s, 1 is 5Gb/s
+#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
+#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
+
+// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
+#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
+
+// lookup into reduced refresh-rate table
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
+
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
+#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
+// 2-15 TBD as needed.
+
+#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
+#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
+
+#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
+
+#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
+
+//memory related flags
+#define ATOM_PPLIB_SWSTATE_MEMORY_DLL_OFF 0x000010000
+
+//M3 Arb //2bits, current 3 sets of parameters in total
+#define ATOM_PPLIB_M3ARB_MASK 0x00060000
+#define ATOM_PPLIB_M3ARB_SHIFT 17
+
+#define ATOM_PPLIB_ENABLE_DRR 0x00080000
+
+// remaining 16 bits are reserved
+typedef struct _ATOM_PPLIB_THERMAL_STATE
+{
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucThermalAction;
+}ATOM_PPLIB_THERMAL_STATE, *LPATOM_PPLIB_THERMAL_STATE;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
+#define ATOM_PPLIB_NONCLOCKINFO_VER1 12
+#define ATOM_PPLIB_NONCLOCKINFO_VER2 24
+typedef struct _ATOM_PPLIB_NONCLOCK_INFO
+{
+ USHORT usClassification;
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ ULONG ulCapsAndSettings;
+ UCHAR ucRequiredPower;
+ USHORT usClassification2;
+ ULONG ulVCLK;
+ ULONG ulDCLK;
+ UCHAR ucUnused[5];
+} ATOM_PPLIB_NONCLOCK_INFO;
+
+// Contained in an array starting at the offset
+// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
+// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
+typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usUnused1;
+ USHORT usUnused2;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_R600_CLOCK_INFO;
+
+// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
+#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
+#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
+#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
+#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
+#define ATOM_PPLIB_R600_FLAGS_LOWPOWER 32 // On the RV770 use 'low power' setting (sequencer S0).
+
+typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
+
+{
+ USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
+ UCHAR ucLowEngineClockHigh;
+ USHORT usHighEngineClockLow; // High Engine clock in MHz.
+ UCHAR ucHighEngineClockHigh;
+ USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
+ UCHAR ucMemoryClockHigh; // Currentyl unused.
+ UCHAR ucPadding; // For proper alignment and size.
+ USHORT usVDDC; // For the 780, use: None, Low, High, Variable
+ UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
+ UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could
+ USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
+ ULONG ulFlags;
+} ATOM_PPLIB_RS780_CLOCK_INFO;
+
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+
+#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
+#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
+#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
+
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+
+typedef struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ USHORT usUnused;
+
+ ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
+
+} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ USHORT usVDDC;
+ USHORT usVDDCI;
+ UCHAR ucPCIEGen;
+ UCHAR ucUnused1;
+
+ ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
+
+} ATOM_PPLIB_SI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_CI_CLOCK_INFO
+{
+ USHORT usEngineClockLow;
+ UCHAR ucEngineClockHigh;
+
+ USHORT usMemoryClockLow;
+ UCHAR ucMemoryClockHigh;
+
+ UCHAR ucPCIEGen;
+ USHORT usPCIELane;
+} ATOM_PPLIB_CI_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_SUMO_CLOCK_INFO{
+ USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
+ UCHAR ucEngineClockHigh; //clockfrequency >> 16.
+ UCHAR vddcIndex; //2-bit vddc index;
+ USHORT tdpLimit;
+ //please initalize to 0
+ USHORT rsv1;
+ //please initialize to 0s
+ ULONG rsv2[2];
+}ATOM_PPLIB_SUMO_CLOCK_INFO;
+
+typedef struct _ATOM_PPLIB_STATE_V2
+{
+ //number of valid dpm levels in this state; Driver uses it to calculate the whole
+ //size of the state: sizeof(ATOM_PPLIB_STATE_V2) + (ucNumDPMLevels - 1) * sizeof(UCHAR)
+ UCHAR ucNumDPMLevels;
+
+ //a index to the array of nonClockInfos
+ UCHAR nonClockInfoIndex;
+ /**
+ * Driver will read the first ucNumDPMLevels in this array
+ */
+ UCHAR clockInfoIndex[1];
+} ATOM_PPLIB_STATE_V2;
+
+typedef struct _StateArray{
+ //how many states we have
+ UCHAR ucNumEntries;
+
+ ATOM_PPLIB_STATE_V2 states[1];
+}StateArray;
+
+
+typedef struct _ClockInfoArray{
+ //how many clock levels we have
+ UCHAR ucNumEntries;
+
+ //sizeof(ATOM_PPLIB_CLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ UCHAR clockInfo[1];
+}ClockInfoArray;
+
+typedef struct _NonClockInfoArray{
+
+ //how many non-clock levels we have. normally should be same as number of states
+ UCHAR ucNumEntries;
+ //sizeof(ATOM_PPLIB_NONCLOCK_INFO)
+ UCHAR ucEntrySize;
+
+ ATOM_PPLIB_NONCLOCK_INFO nonClockInfo[1];
+}NonClockInfoArray;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Record
+{
+ USHORT usClockLow;
+ UCHAR ucClockHigh;
+ USHORT usVoltage;
+}ATOM_PPLIB_Clock_Voltage_Dependency_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Dependency_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Dependency_Table;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Record
+{
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+ USHORT usVddc;
+ USHORT usVddci;
+}ATOM_PPLIB_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_Clock_Voltage_Limit_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_Clock_Voltage_Limit_Table;
+
+union _ATOM_PPLIB_CAC_Leakage_Record
+{
+ struct
+ {
+ USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations; For CI and newer, we use this as the real VDDC value. in CI we read it as StdVoltageHiSidd
+ ULONG ulLeakageValue; // For CI and newer we use this as the "fake" standar VDDC value. in CI we read it as StdVoltageLoSidd
+
+ };
+ struct
+ {
+ USHORT usVddc1;
+ USHORT usVddc2;
+ USHORT usVddc3;
+ };
+};
+
+typedef union _ATOM_PPLIB_CAC_Leakage_Record ATOM_PPLIB_CAC_Leakage_Record;
+
+typedef struct _ATOM_PPLIB_CAC_Leakage_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_CAC_Leakage_Table;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
+{
+ USHORT usVoltage;
+ USHORT usSclkLow;
+ UCHAR ucSclkHigh;
+ USHORT usMclkLow;
+ UCHAR ucMclkHigh;
+}ATOM_PPLIB_PhaseSheddingLimits_Record;
+
+typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
+{
+ UCHAR ucNumEntries; // Number of entries.
+ ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
+}ATOM_PPLIB_PhaseSheddingLimits_Table;
+
+typedef struct _VCEClockInfo{
+ USHORT usEVClkLow;
+ UCHAR ucEVClkHigh;
+ USHORT usECClkLow;
+ UCHAR ucECClkHigh;
+}VCEClockInfo;
+
+typedef struct _VCEClockInfoArray{
+ UCHAR ucNumEntries;
+ VCEClockInfo entries[1];
+}VCEClockInfoArray;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucVCEClockInfoIndex;
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_VCE_State_Record
+{
+ UCHAR ucVCEClockInfoIndex;
+ UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
+}ATOM_PPLIB_VCE_State_Record;
+
+typedef struct _ATOM_PPLIB_VCE_State_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_VCE_State_Record entries[1];
+}ATOM_PPLIB_VCE_State_Table;
+
+
+typedef struct _ATOM_PPLIB_VCE_Table
+{
+ UCHAR revid;
+// VCEClockInfoArray array;
+// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
+// ATOM_PPLIB_VCE_State_Table states;
+}ATOM_PPLIB_VCE_Table;
+
+
+typedef struct _UVDClockInfo{
+ USHORT usVClkLow;
+ UCHAR ucVClkHigh;
+ USHORT usDClkLow;
+ UCHAR ucDClkHigh;
+}UVDClockInfo;
+
+typedef struct _UVDClockInfoArray{
+ UCHAR ucNumEntries;
+ UVDClockInfo entries[1];
+}UVDClockInfoArray;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ UCHAR ucUVDClockInfoIndex;
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
+{
+ UCHAR numEntries;
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_UVD_Table
+{
+ UCHAR revid;
+// UVDClockInfoArray array;
+// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
+}ATOM_PPLIB_UVD_Table;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usSAMClockLow;
+ UCHAR ucSAMClockHigh;
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_SAMClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_SAMClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_SAMU_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_SAMU_Table;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Record
+{
+ USHORT usVoltage;
+ USHORT usACPClockLow;
+ UCHAR ucACPClockHigh;
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Record;
+
+typedef struct _ATOM_PPLIB_ACPClk_Voltage_Limit_Table{
+ UCHAR numEntries;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record entries[1];
+}ATOM_PPLIB_ACPClk_Voltage_Limit_Table;
+
+typedef struct _ATOM_PPLIB_ACP_Table
+{
+ UCHAR revid;
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table limits;
+}ATOM_PPLIB_ACP_Table;
+
+typedef struct _ATOM_PowerTune_Table{
+ USHORT usTDP;
+ USHORT usConfigurableTDP;
+ USHORT usTDC;
+ USHORT usBatteryPowerLimit;
+ USHORT usSmallPowerLimit;
+ USHORT usLowCACLeakage;
+ USHORT usHighCACLeakage;
+}ATOM_PowerTune_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+}ATOM_PPLIB_POWERTUNE_Table;
+
+typedef struct _ATOM_PPLIB_POWERTUNE_Table_V1
+{
+ UCHAR revid;
+ ATOM_PowerTune_Table power_tune_table;
+ USHORT usMaximumPowerDeliveryLimit;
+ USHORT usReserve[7];
+} ATOM_PPLIB_POWERTUNE_Table_V1;
+
+#define ATOM_PPM_A_A 1
+#define ATOM_PPM_A_I 2
+typedef struct _ATOM_PPLIB_PPM_Table
+{
+ UCHAR ucRevId;
+ UCHAR ucPpmDesign; //A+I or A+A
+ USHORT usCpuCoreNumber;
+ ULONG ulPlatformTDP;
+ ULONG ulSmallACPlatformTDP;
+ ULONG ulPlatformTDC;
+ ULONG ulSmallACPlatformTDC;
+ ULONG ulApuTDP;
+ ULONG ulDGpuTDP;
+ ULONG ulDGpuUlvPower;
+ ULONG ulTjmax;
+} ATOM_PPLIB_PPM_Table;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 75349cdaa84..9fc61dd68bc 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1097,12 +1097,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
}
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
- 0, 0x7fffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 393880a0941..ea4d3734e6d 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1374,7 +1374,7 @@ static bool r600_is_display_hung(struct radeon_device *rdev)
return true;
}
-static u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -1622,28 +1622,6 @@ bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * r600_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = r600_gpu_check_soft_reset(rdev);
-
- if (!(reset_mask & RADEON_RESET_DMA)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
@@ -2299,9 +2277,13 @@ int r600_init_microcode(struct radeon_device *rdev)
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -2413,8 +2395,8 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -2467,7 +2449,7 @@ void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsign
int r;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 8);
+ rb_bufsz = order_base_2(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
@@ -2490,327 +2472,6 @@ void r600_cp_fini(struct radeon_device *rdev)
}
/*
- * DMA
- * Starting with R600, the GPU has an asynchronous
- * DMA engine. The programming model is very similar
- * to the 3D engine (ring buffer, IBs, etc.), but the
- * DMA controller has it's own packet format that is
- * different form the PM4 format used by the 3D engine.
- * It supports copying data, writing embedded data,
- * solid fills, and a number of other things. It also
- * has support for tiling/detiling of buffers.
- */
-/**
- * r600_dma_stop - stop the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine (r6xx-evergreen).
- */
-void r600_dma_stop(struct radeon_device *rdev)
-{
- u32 rb_cntl = RREG32(DMA_RB_CNTL);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
-
- rb_cntl &= ~DMA_RB_ENABLE;
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
-}
-
-/**
- * r600_dma_resume - setup and start the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_resume(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
- u32 rb_cntl, dma_cntl, ib_cntl;
- u32 rb_bufsz;
- int r;
-
- /* Reset dma */
- if (rdev->family >= CHIP_RV770)
- WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
- else
- WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
- RREG32(SRBM_SOFT_RESET);
- udelay(50);
- WREG32(SRBM_SOFT_RESET, 0);
-
- WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
- WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
-
- /* Set ring buffer size in dwords */
- rb_bufsz = drm_order(ring->ring_size / 4);
- rb_cntl = rb_bufsz << 1;
-#ifdef __BIG_ENDIAN
- rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
-#endif
- WREG32(DMA_RB_CNTL, rb_cntl);
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(DMA_RB_RPTR, 0);
- WREG32(DMA_RB_WPTR, 0);
-
- /* set the wb address whether it's enabled or not */
- WREG32(DMA_RB_RPTR_ADDR_HI,
- upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
- WREG32(DMA_RB_RPTR_ADDR_LO,
- ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
-
- if (rdev->wb.enabled)
- rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
-
- WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
-
- /* enable DMA IBs */
- ib_cntl = DMA_IB_ENABLE;
-#ifdef __BIG_ENDIAN
- ib_cntl |= DMA_IB_SWAP_ENABLE;
-#endif
- WREG32(DMA_IB_CNTL, ib_cntl);
-
- dma_cntl = RREG32(DMA_CNTL);
- dma_cntl &= ~CTXEMPTY_INT_ENABLE;
- WREG32(DMA_CNTL, dma_cntl);
-
- if (rdev->family >= CHIP_RV770)
- WREG32(DMA_MODE, 1);
-
- ring->wptr = 0;
- WREG32(DMA_RB_WPTR, ring->wptr << 2);
-
- ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
-
- WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
-
- ring->ready = true;
-
- r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
-
- return 0;
-}
-
-/**
- * r600_dma_fini - tear down the async dma engine
- *
- * @rdev: radeon_device pointer
- *
- * Stop the async dma engine and free the ring (r6xx-evergreen).
- */
-void r600_dma_fini(struct radeon_device *rdev)
-{
- r600_dma_stop(rdev);
- radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
-}
-
-/*
- * UVD
- */
-int r600_uvd_rbc_start(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
- uint64_t rptr_addr;
- uint32_t rb_bufsz, tmp;
- int r;
-
- rptr_addr = rdev->wb.gpu_addr + R600_WB_UVD_RPTR_OFFSET;
-
- if (upper_32_bits(rptr_addr) != upper_32_bits(ring->gpu_addr)) {
- DRM_ERROR("UVD ring and rptr not in the same 4GB segment!\n");
- return -EINVAL;
- }
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
-
- /* Set the write pointer delay */
- WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
-
- /* set the wb address */
- WREG32(UVD_RBC_RB_RPTR_ADDR, rptr_addr >> 2);
-
- /* programm the 4GB memory segment for rptr and ring buffer */
- WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(rptr_addr) |
- (0x7 << 16) | (0x1 << 31));
-
- /* Initialize the ring buffer's read and write pointers */
- WREG32(UVD_RBC_RB_RPTR, 0x0);
-
- ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
- WREG32(UVD_RBC_RB_WPTR, ring->wptr);
-
- /* set the ring address */
- WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
-
- /* Set ring buffer size */
- rb_bufsz = drm_order(ring->ring_size);
- rb_bufsz = (0x1 << 8) | rb_bufsz;
- WREG32(UVD_RBC_RB_CNTL, rb_bufsz);
-
- ring->ready = true;
- r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
- if (r) {
- ring->ready = false;
- return r;
- }
-
- r = radeon_ring_lock(rdev, ring, 10);
- if (r) {
- DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
- return r;
- }
-
- tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
- radeon_ring_write(ring, tmp);
- radeon_ring_write(ring, 0xFFFFF);
-
- /* Clear timeout status bits */
- radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
- radeon_ring_write(ring, 0x8);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
- radeon_ring_write(ring, 3);
-
- radeon_ring_unlock_commit(rdev, ring);
-
- return 0;
-}
-
-void r600_uvd_rbc_stop(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
- /* force RBC into idle state */
- WREG32(UVD_RBC_RB_CNTL, 0x11010101);
- ring->ready = false;
-}
-
-int r600_uvd_init(struct radeon_device *rdev)
-{
- int i, j, r;
- /* disable byte swapping */
- u32 lmi_swap_cntl = 0;
- u32 mp_swap_cntl = 0;
-
- /* raise clocks while booting up the VCPU */
- radeon_set_uvd_clocks(rdev, 53300, 40000);
-
- /* disable clock gating */
- WREG32(UVD_CGC_GATE, 0);
-
- /* disable interupt */
- WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
-
- /* put LMI, VCPU, RBC etc... into reset */
- WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
- LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
- CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
- mdelay(5);
-
- /* take UVD block out of reset */
- WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
- mdelay(5);
-
- /* initialize UVD memory controller */
- WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
- (1 << 21) | (1 << 9) | (1 << 20));
-
-#ifdef __BIG_ENDIAN
- /* swap (8 in 32) RB and IB */
- lmi_swap_cntl = 0xa;
- mp_swap_cntl = 0;
-#endif
- WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
- WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
-
- WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXA1, 0x0);
- WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
- WREG32(UVD_MPC_SET_MUXB1, 0x0);
- WREG32(UVD_MPC_SET_ALU, 0);
- WREG32(UVD_MPC_SET_MUX, 0x88);
-
- /* Stall UMC */
- WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
- WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
-
- /* take all subblocks out of reset, except VCPU */
- WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
- mdelay(5);
-
- /* enable VCPU clock */
- WREG32(UVD_VCPU_CNTL, 1 << 9);
-
- /* enable UMC */
- WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
-
- /* boot up the VCPU */
- WREG32(UVD_SOFT_RESET, 0);
- mdelay(10);
-
- WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
-
- for (i = 0; i < 10; ++i) {
- uint32_t status;
- for (j = 0; j < 100; ++j) {
- status = RREG32(UVD_STATUS);
- if (status & 2)
- break;
- mdelay(10);
- }
- r = 0;
- if (status & 2)
- break;
-
- DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
- WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
- mdelay(10);
- WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
- mdelay(10);
- r = -1;
- }
-
- if (r) {
- DRM_ERROR("UVD not responding, giving up!!!\n");
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
- }
-
- /* enable interupt */
- WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
-
- r = r600_uvd_rbc_start(rdev);
- if (!r)
- DRM_INFO("UVD initialized successfully.\n");
-
- /* lower clocks again */
- radeon_set_uvd_clocks(rdev, 0, 0);
-
- return r;
-}
-
-/*
* GPU scratch registers helpers function.
*/
void r600_scratch_init(struct radeon_device *rdev)
@@ -2865,94 +2526,6 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
return r;
}
-/**
- * r600_dma_ring_test - simple async dma engine test
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test the DMA engine by writing using it to write an
- * value to memory. (r6xx-SI).
- * Returns 0 for success, error for failure.
- */
-int r600_dma_ring_test(struct radeon_device *rdev,
- struct radeon_ring *ring)
-{
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ring_lock(rdev, ring, 4);
- if (r) {
- DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
-
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- uint32_t tmp = 0;
- unsigned i;
- int r;
-
- WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, ring, 3);
- if (r) {
- DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
- ring->idx, r);
- return r;
- }
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev, ring);
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = RREG32(UVD_CONTEXT_ID);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
-
- if (i < rdev->usec_timeout) {
- DRM_INFO("ring test on %d succeeded in %d usecs\n",
- ring->idx, i);
- } else {
- DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
- ring->idx, tmp);
- r = -EINVAL;
- }
- return r;
-}
-
/*
* CP fences/semaphores
*/
@@ -3004,30 +2577,6 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
}
}
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
- radeon_ring_write(ring, fence->seq);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, addr & 0xffffffff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
- radeon_ring_write(ring, 2);
- return;
-}
-
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
@@ -3044,95 +2593,6 @@ void r600_semaphore_ring_emit(struct radeon_device *rdev,
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
-/*
- * DMA fences/semaphores
- */
-
-/**
- * r600_dma_fence_ring_emit - emit a fence on the DMA ring
- *
- * @rdev: radeon_device pointer
- * @fence: radeon fence object
- *
- * Add a DMA fence packet to the ring to write
- * the fence seq number and DMA trap packet to generate
- * an interrupt if needed (r6xx-r7xx).
- */
-void r600_dma_fence_ring_emit(struct radeon_device *rdev,
- struct radeon_fence *fence)
-{
- struct radeon_ring *ring = &rdev->ring[fence->ring];
- u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
-
- /* write the fence */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
- radeon_ring_write(ring, lower_32_bits(fence->seq));
- /* generate an interrupt */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
-}
-
-/**
- * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- * @semaphore: radeon semaphore object
- * @emit_wait: wait or signal semaphore
- *
- * Add a DMA semaphore packet to the ring wait on or signal
- * other rings (r6xx-SI).
- */
-void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- u64 addr = semaphore->gpu_addr;
- u32 s = emit_wait ? 0 : 1;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
- radeon_ring_write(ring, addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
-}
-
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait)
-{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, emit_wait ? 1 : 0);
-}
-
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- struct radeon_sa_bo *vb = NULL;
- int r;
-
- r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
- if (r) {
- return r;
- }
- r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
- r600_blit_done_copy(rdev, fence, vb, sem);
- return 0;
-}
-
/**
* r600_copy_cpdma - copy pages using the CP DMA engine
*
@@ -3166,7 +2626,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,
size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21);
+ r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
radeon_semaphore_free(rdev, &sem, NULL);
@@ -3181,6 +2641,9 @@ int r600_copy_cpdma(struct radeon_device *rdev,
radeon_semaphore_free(rdev, &sem, NULL);
}
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit);
for (i = 0; i < num_loops; i++) {
cur_size_in_bytes = size_in_bytes;
if (cur_size_in_bytes > 0x1fffff)
@@ -3214,80 +2677,6 @@ int r600_copy_cpdma(struct radeon_device *rdev,
return r;
}
-/**
- * r600_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r6xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int r600_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
- r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFE)
- cur_size_in_dw = 0xFFFE;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
- (upper_32_bits(src_offset) & 0xff)));
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size)
@@ -3309,6 +2698,13 @@ static int r600_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
r600_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ r600_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -3317,11 +2713,6 @@ static int r600_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- r600_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
r600_agp_enable(rdev);
} else {
@@ -3330,12 +2721,6 @@ static int r600_startup(struct radeon_device *rdev)
return r;
}
r600_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -3372,14 +2757,14 @@ static int r600_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -3548,7 +2933,6 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
- r600_blit_fini(rdev);
r600_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -3600,16 +2984,6 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
radeon_ring_write(ring, ib->length_dw);
}
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
- radeon_ring_write(ring, ib->gpu_addr);
- radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
- radeon_ring_write(ring, ib->length_dw);
-}
-
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib ib;
@@ -3663,139 +3037,6 @@ free_scratch:
return r;
}
-/**
- * r600_dma_ib_test - test an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Test a simple IB in the DMA ring (r6xx-SI).
- * Returns 0 on success, error on failure.
- */
-int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_ib ib;
- unsigned i;
- int r;
- void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
- u32 tmp = 0;
-
- if (!ptr) {
- DRM_ERROR("invalid vram scratch pointer\n");
- return -EINVAL;
- }
-
- tmp = 0xCAFEDEAD;
- writel(tmp, ptr);
-
- r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
- if (r) {
- DRM_ERROR("radeon: failed to get ib (%d).\n", r);
- return r;
- }
-
- ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
- ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
- ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
- ib.ptr[3] = 0xDEADBEEF;
- ib.length_dw = 4;
-
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
- return r;
- }
- r = radeon_fence_wait(ib.fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- return r;
- }
- for (i = 0; i < rdev->usec_timeout; i++) {
- tmp = readl(ptr);
- if (tmp == 0xDEADBEEF)
- break;
- DRM_UDELAY(1);
- }
- if (i < rdev->usec_timeout) {
- DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
- } else {
- DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
- r = -EINVAL;
- }
- radeon_ib_free(rdev, &ib);
- return r;
-}
-
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- struct radeon_fence *fence = NULL;
- int r;
-
- r = radeon_set_uvd_clocks(rdev, 53300, 40000);
- if (r) {
- DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
- return r;
- }
-
- r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
- if (r) {
- DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
- goto error;
- }
-
- r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
- if (r) {
- DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
- goto error;
- }
-
- r = radeon_fence_wait(fence, false);
- if (r) {
- DRM_ERROR("radeon: fence wait failed (%d).\n", r);
- goto error;
- }
- DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
-error:
- radeon_fence_unref(&fence);
- radeon_set_uvd_clocks(rdev, 0, 0);
- return r;
-}
-
-/**
- * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
- *
- * @rdev: radeon_device pointer
- * @ib: IB object to schedule
- *
- * Schedule an IB in the DMA ring (r6xx-r7xx).
- */
-void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-{
- struct radeon_ring *ring = &rdev->ring[ib->ring];
-
- if (rdev->wb.enabled) {
- u32 next_rptr = ring->wptr + 4;
- while ((next_rptr & 7) != 5)
- next_rptr++;
- next_rptr += 3;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
- radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
- radeon_ring_write(ring, next_rptr);
- }
-
- /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
- * Pad as necessary with NOPs.
- */
- while ((ring->wptr & 7) != 5)
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
- radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
- radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
-
-}
-
/*
* Interrupts
*
@@ -3812,7 +3053,7 @@ void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size)
u32 rb_bufsz;
/* Align ring size */
- rb_bufsz = drm_order(ring_size / 4);
+ rb_bufsz = order_base_2(ring_size / 4);
ring_size = (1 << rb_bufsz) * 4;
rdev->ih.ring_size = ring_size;
rdev->ih.ptr_mask = rdev->ih.ring_size - 1;
@@ -4049,7 +3290,7 @@ int r600_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index c92eb86a8e5..47fc2b88697 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,12 +57,12 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE6(rdev);
+ return ASIC_IS_DCE2(rdev) && !ASIC_IS_NODCE(rdev);
}
-struct r600_audio r600_audio_status(struct radeon_device *rdev)
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev)
{
- struct r600_audio status;
+ struct r600_audio_pin status;
uint32_t value;
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
@@ -120,16 +120,16 @@ void r600_audio_update_hdmi(struct work_struct *work)
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
- struct r600_audio audio_status = r600_audio_status(rdev);
+ struct r600_audio_pin audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
bool changed = false;
- if (rdev->audio_status.channels != audio_status.channels ||
- rdev->audio_status.rate != audio_status.rate ||
- rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
- rdev->audio_status.status_bits != audio_status.status_bits ||
- rdev->audio_status.category_code != audio_status.category_code) {
- rdev->audio_status = audio_status;
+ if (rdev->audio.pin[0].channels != audio_status.channels ||
+ rdev->audio.pin[0].rate != audio_status.rate ||
+ rdev->audio.pin[0].bits_per_sample != audio_status.bits_per_sample ||
+ rdev->audio.pin[0].status_bits != audio_status.status_bits ||
+ rdev->audio.pin[0].category_code != audio_status.category_code) {
+ rdev->audio.pin[0] = audio_status;
changed = true;
}
@@ -141,13 +141,13 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
}
-/*
- * turn on/off audio engine
- */
-static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+/* enable the audio stream */
+static void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
- DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +158,7 @@ static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- rdev->audio_enabled = enable;
+ DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -169,13 +169,17 @@ int r600_audio_init(struct radeon_device *rdev)
if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- r600_audio_engine_enable(rdev, true);
+ rdev->audio.enabled = true;
+
+ rdev->audio.num_pins = 1;
+ rdev->audio.pin[0].channels = -1;
+ rdev->audio.pin[0].rate = -1;
+ rdev->audio.pin[0].bits_per_sample = -1;
+ rdev->audio.pin[0].status_bits = 0;
+ rdev->audio.pin[0].category_code = 0;
+ rdev->audio.pin[0].id = 0;
- rdev->audio_status.channels = -1;
- rdev->audio_status.rate = -1;
- rdev->audio_status.bits_per_sample = -1;
- rdev->audio_status.status_bits = 0;
- rdev->audio_status.category_code = 0;
+ r600_audio_enable(rdev, &rdev->audio.pin[0], true);
return 0;
}
@@ -186,8 +190,16 @@ int r600_audio_init(struct radeon_device *rdev)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!rdev->audio_enabled)
+ if (!rdev->audio.enabled)
return;
- r600_audio_engine_enable(rdev, false);
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
+
+ rdev->audio.enabled = false;
+}
+
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev)
+{
+ /* only one pin on 6xx-NI */
+ return &rdev->audio.pin[0];
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index f651881eb0a..daf7572be97 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -31,6 +31,37 @@
#include "r600_blit_shaders.h"
+/* 23 bits of float fractional data */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Will be exact from 0 to 2^24. Above that, we round towards zero
+ * as the fractional bits will not fit in a float. (It would be better to
+ * round towards even as the fpu does, but that is slower.)
+ */
+static __pure uint32_t int2float(uint32_t x)
+{
+ uint32_t msb, exponent, fraction;
+
+ /* Zero is special */
+ if (!x) return 0;
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ /*
+ * Use a rotate instead of a shift because that works both leftwards
+ * and rightwards due to the mod(32) behaviour. This means we don't
+ * need to check to see if we are above 2^24 or not.
+ */
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return fraction + exponent;
+}
+
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
deleted file mode 100644
index 9fb5780a552..00000000000
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ /dev/null
@@ -1,785 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/radeon_drm.h>
-#include "radeon.h"
-
-#include "r600d.h"
-#include "r600_blit_shaders.h"
-#include "radeon_blit_common.h"
-
-/* 23 bits of float fractional data */
-#define I2F_FRAC_BITS 23
-#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
-
-/*
- * Converts unsigned integer into 32-bit IEEE floating point representation.
- * Will be exact from 0 to 2^24. Above that, we round towards zero
- * as the fractional bits will not fit in a float. (It would be better to
- * round towards even as the fpu does, but that is slower.)
- */
-__pure uint32_t int2float(uint32_t x)
-{
- uint32_t msb, exponent, fraction;
-
- /* Zero is special */
- if (!x) return 0;
-
- /* Get location of the most significant bit */
- msb = __fls(x);
-
- /*
- * Use a rotate instead of a shift because that works both leftwards
- * and rightwards due to the mod(32) behaviour. This means we don't
- * need to check to see if we are above 2^24 or not.
- */
- fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
- exponent = (127 + msb) << I2F_FRAC_BITS;
-
- return fraction + exponent;
-}
-
-/* emits 21 on rv770+, 23 on r600 */
-static void
-set_render_target(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cb_color_info;
- int pitch, slice;
-
- h = ALIGN(h, 8);
- if (h < 8)
- h = 8;
-
- cb_color_info = CB_FORMAT(format) |
- CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
- CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- pitch = (w / 8) - 1;
- slice = ((w * h) / 64) - 1;
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(ring, 2 << 0);
- }
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (pitch << 0) | (slice << 10));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, cb_color_info);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-}
-
-/* emits 5dw */
-static void
-cp_set_surface_sync(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 cp_coher_size;
-
- if (size == 0xffffffff)
- cp_coher_size = 0xffffffff;
- else
- cp_coher_size = ((size + 255) >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(ring, sync_type);
- radeon_ring_write(ring, cp_coher_size);
- radeon_ring_write(ring, mc_addr >> 8);
- radeon_ring_write(ring, 10); /* poll interval */
-}
-
-/* emits 21dw + 1 surface sync = 26dw */
-static void
-set_shaders(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u64 gpu_addr;
- u32 sq_pgm_resources;
-
- /* setup shader regs */
- sq_pgm_resources = (1 << 0);
-
- /* VS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- /* PS */
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, gpu_addr >> 8);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 2);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, 0);
-
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
-}
-
-/* emits 9 + 1 sync (5) = 14*/
-static void
-set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_vtx_constant_word2;
-
- sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
- SQ_VTXC_STRIDE(16);
-#ifdef __BIG_ENDIAN
- sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
-#endif
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0x460);
- radeon_ring_write(ring, gpu_addr & 0xffffffff);
- radeon_ring_write(ring, 48 - 1);
- radeon_ring_write(ring, sq_vtx_constant_word2);
- radeon_ring_write(ring, 1 << 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, 48, gpu_addr);
- else
- cp_set_surface_sync(rdev,
- PACKET3_VC_ACTION_ENA, 48, gpu_addr);
-}
-
-/* emits 9 */
-static void
-set_tex_resource(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
-
- if (h < 1)
- h = 1;
-
- sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
- S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
- sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
- S_038000_TEX_WIDTH(w - 1);
-
- sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
- sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
-
- sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
- S_038010_DST_SEL_X(SQ_SEL_X) |
- S_038010_DST_SEL_Y(SQ_SEL_Y) |
- S_038010_DST_SEL_Z(SQ_SEL_Z) |
- S_038010_DST_SEL_W(SQ_SEL_W);
-
- cp_set_surface_sync(rdev,
- PACKET3_TC_ACTION_ENA, size, gpu_addr);
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, sq_tex_resource_word0);
- radeon_ring_write(ring, sq_tex_resource_word1);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(ring, sq_tex_resource_word4);
- radeon_ring_write(ring, 0);
- radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
-}
-
-/* emits 12 */
-static void
-set_scissors(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
-}
-
-/* emits 10 */
-static void
-draw_auto(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, DI_PT_RECTLIST);
-
- radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 2) |
-#endif
- DI_INDEX_SIZE_16_BIT);
-
- radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(ring, 1);
-
- radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(ring, 3);
- radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
-
-}
-
-/* emits 14 */
-static void
-set_default_state(struct radeon_device *rdev)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
- u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
- int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
- int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads;
- int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries;
- u64 gpu_addr;
- int dwords;
-
- switch (rdev->family) {
- case CHIP_R600:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV630:
- case CHIP_RV635:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 40;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV610:
- case CHIP_RV620:
- case CHIP_RS780:
- case CHIP_RS880:
- default:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV670:
- num_ps_gprs = 144;
- num_vs_gprs = 40;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 136;
- num_vs_threads = 48;
- num_gs_threads = 4;
- num_es_threads = 4;
- num_ps_stack_entries = 40;
- num_vs_stack_entries = 40;
- num_gs_stack_entries = 32;
- num_es_stack_entries = 16;
- break;
- case CHIP_RV770:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 256;
- num_vs_stack_entries = 256;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV730:
- case CHIP_RV740:
- num_ps_gprs = 84;
- num_vs_gprs = 36;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 188;
- num_vs_threads = 60;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- case CHIP_RV710:
- num_ps_gprs = 192;
- num_vs_gprs = 56;
- num_temp_gprs = 4;
- num_gs_gprs = 0;
- num_es_gprs = 0;
- num_ps_threads = 144;
- num_vs_threads = 48;
- num_gs_threads = 0;
- num_es_threads = 0;
- num_ps_stack_entries = 128;
- num_vs_stack_entries = 128;
- num_gs_stack_entries = 0;
- num_es_stack_entries = 0;
- break;
- }
-
- if ((rdev->family == CHIP_RV610) ||
- (rdev->family == CHIP_RV620) ||
- (rdev->family == CHIP_RS780) ||
- (rdev->family == CHIP_RS880) ||
- (rdev->family == CHIP_RV710))
- sq_config = 0;
- else
- sq_config = VC_ENABLE;
-
- sq_config |= (DX9_CONSTS |
- ALU_INST_PREFER_VECTOR |
- PS_PRIO(0) |
- VS_PRIO(1) |
- GS_PRIO(2) |
- ES_PRIO(3));
-
- sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
- NUM_VS_GPRS(num_vs_gprs) |
- NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
- sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
- NUM_ES_GPRS(num_es_gprs));
- sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
- NUM_VS_THREADS(num_vs_threads) |
- NUM_GS_THREADS(num_gs_threads) |
- NUM_ES_THREADS(num_es_threads));
- sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
- NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
- sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
- NUM_ES_STACK_ENTRIES(num_es_stack_entries));
-
- /* emit an IB pointing at default state */
- dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
- gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(ring,
-#ifdef __BIG_ENDIAN
- (2 << 0) |
-#endif
- (gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(ring, dwords);
-
- /* SQ config */
- radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(ring, sq_config);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
- radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
- radeon_ring_write(ring, sq_thread_resource_mgmt);
- radeon_ring_write(ring, sq_stack_resource_mgmt_1);
- radeon_ring_write(ring, sq_stack_resource_mgmt_2);
-}
-
-int r600_blit_init(struct radeon_device *rdev)
-{
- u32 obj_size;
- int i, r, dwords;
- void *ptr;
- u32 packet2s[16];
- int num_packet2s = 0;
-
- rdev->r600_blit.primitives.set_render_target = set_render_target;
- rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
- rdev->r600_blit.primitives.set_shaders = set_shaders;
- rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
- rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
- rdev->r600_blit.primitives.set_scissors = set_scissors;
- rdev->r600_blit.primitives.draw_auto = draw_auto;
- rdev->r600_blit.primitives.set_default_state = set_default_state;
-
- rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
- rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
- rdev->r600_blit.ring_size_common += 5; /* done copy */
- rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
-
- rdev->r600_blit.ring_size_per_loop = 76;
- /* set_render_target emits 2 extra dwords on rv6xx */
- if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
- rdev->r600_blit.ring_size_per_loop += 2;
-
- rdev->r600_blit.max_dim = 8192;
-
- rdev->r600_blit.state_offset = 0;
-
- if (rdev->family >= CHIP_RV770)
- rdev->r600_blit.state_len = r7xx_default_size;
- else
- rdev->r600_blit.state_len = r6xx_default_size;
-
- dwords = rdev->r600_blit.state_len;
- while (dwords & 0xf) {
- packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
- dwords++;
- }
-
- obj_size = dwords * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.vs_offset = obj_size;
- obj_size += r6xx_vs_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- rdev->r600_blit.ps_offset = obj_size;
- obj_size += r6xx_ps_size * 4;
- obj_size = ALIGN(obj_size, 256);
-
- /* pin copy shader into vram if not already initialized */
- if (rdev->r600_blit.shader_obj == NULL) {
- r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM,
- NULL, &rdev->r600_blit.shader_obj);
- if (r) {
- DRM_ERROR("r600 failed to allocate shader\n");
- return r;
- }
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->r600_blit.shader_gpu_addr);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- if (r) {
- dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
- return r;
- }
- }
-
- DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
- obj_size,
- rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
-
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
- if (r) {
- DRM_ERROR("failed to map blit object %d\n", r);
- return r;
- }
- if (rdev->family >= CHIP_RV770)
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r7xx_default_state, rdev->r600_blit.state_len * 4);
- else
- memcpy_toio(ptr + rdev->r600_blit.state_offset,
- r6xx_default_state, rdev->r600_blit.state_len * 4);
- if (num_packet2s)
- memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
- packet2s, num_packet2s * 4);
- for (i = 0; i < r6xx_vs_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
- for (i = 0; i < r6xx_ps_size; i++)
- *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
- radeon_bo_kunmap(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
- return 0;
-}
-
-void r600_blit_fini(struct radeon_device *rdev)
-{
- int r;
-
- radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- if (rdev->r600_blit.shader_obj == NULL)
- return;
- /* If we can't reserve the bo, unref should be enough to destroy
- * it when it becomes idle.
- */
- r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
- if (!r) {
- radeon_bo_unpin(rdev->r600_blit.shader_obj);
- radeon_bo_unreserve(rdev->r600_blit.shader_obj);
- }
- radeon_bo_unref(&rdev->r600_blit.shader_obj);
-}
-
-static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
- int *width, int *height, int max_dim)
-{
- unsigned max_pages;
- unsigned pages = num_gpu_pages;
- int w, h;
-
- if (num_gpu_pages == 0) {
- /* not supposed to be called with no pages, but just in case */
- h = 0;
- w = 0;
- pages = 0;
- WARN_ON(1);
- } else {
- int rect_order = 2;
- h = RECT_UNIT_H;
- while (num_gpu_pages / rect_order) {
- h *= 2;
- rect_order *= 4;
- if (h >= max_dim) {
- h = max_dim;
- break;
- }
- }
- max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
- if (pages > max_pages)
- pages = max_pages;
- w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
- w = (w / RECT_UNIT_W) * RECT_UNIT_W;
- pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
- BUG_ON(pages == 0);
- }
-
-
- DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
-
- /* return width and height only of the caller wants it */
- if (height)
- *height = h;
- if (width)
- *width = w;
-
- return pages;
-}
-
-
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
- int ring_size;
- int num_loops = 0;
- int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
-
- /* num loops */
- while (num_gpu_pages) {
- num_gpu_pages -=
- r600_blit_create_rect(num_gpu_pages, NULL, NULL,
- rdev->r600_blit.max_dim);
- num_loops++;
- }
-
- /* 48 bytes for vertex per loop */
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
- (num_loops*48)+256, 256, true);
- if (r) {
- return r;
- }
-
- r = radeon_semaphore_create(rdev, sem);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- return r;
- }
-
- /* calculate number of loops correctly */
- ring_size = num_loops * dwords_per_loop;
- ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring, ring_size);
- if (r) {
- radeon_sa_bo_free(rdev, vb, NULL);
- radeon_semaphore_free(rdev, sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
- radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
- RADEON_RING_TYPE_GFX_INDEX);
- radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
- } else {
- radeon_semaphore_free(rdev, sem, NULL);
- }
-
- rdev->r600_blit.primitives.set_default_state(rdev);
- rdev->r600_blit.primitives.set_shaders(rdev);
- return 0;
-}
-
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
-{
- struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- int r;
-
- r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_sa_bo_free(rdev, &vb, *fence);
- radeon_semaphore_free(rdev, &sem, *fence);
-}
-
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb)
-{
- u64 vb_gpu_addr;
- u32 *vb_cpu_addr;
-
- DRM_DEBUG("emitting copy %16llx %16llx %d\n",
- src_gpu_addr, dst_gpu_addr, num_gpu_pages);
- vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
- vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
-
- while (num_gpu_pages) {
- int w, h;
- unsigned size_in_bytes;
- unsigned pages_per_loop =
- r600_blit_create_rect(num_gpu_pages, &w, &h,
- rdev->r600_blit.max_dim);
-
- size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
- DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
-
- vb_cpu_addr[0] = 0;
- vb_cpu_addr[1] = 0;
- vb_cpu_addr[2] = 0;
- vb_cpu_addr[3] = 0;
-
- vb_cpu_addr[4] = 0;
- vb_cpu_addr[5] = int2float(h);
- vb_cpu_addr[6] = 0;
- vb_cpu_addr[7] = int2float(h);
-
- vb_cpu_addr[8] = int2float(w);
- vb_cpu_addr[9] = int2float(h);
- vb_cpu_addr[10] = int2float(w);
- vb_cpu_addr[11] = int2float(h);
-
- rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
- w, h, w, src_gpu_addr, size_in_bytes);
- rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
- w, h, dst_gpu_addr);
- rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
- rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
- rdev->r600_blit.primitives.draw_auto(rdev);
- rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
- PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
- size_in_bytes, dst_gpu_addr);
-
- vb_cpu_addr += 12;
- vb_gpu_addr += 4*12;
- src_gpu_addr += size_in_bytes;
- dst_gpu_addr += size_in_bytes;
- num_gpu_pages -= pages_per_loop;
- }
-}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.h b/drivers/gpu/drm/radeon/r600_blit_shaders.h
index 2f3ce7a7597..f437d36dd98 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.h
@@ -35,5 +35,4 @@ extern const u32 r6xx_default_state[];
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
-__pure uint32_t int2float(uint32_t x);
#endif
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 1c51c08b1fd..d8eb48bff0e 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -2200,13 +2200,13 @@ int r600_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order(/* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2(/* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order(/* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2(/* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
new file mode 100644
index 00000000000..3b317456512
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+u32 r600_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/*
+ * DMA
+ * Starting with R600, the GPU has an asynchronous
+ * DMA engine. The programming model is very similar
+ * to the 3D engine (ring buffer, IBs, etc.), but the
+ * DMA controller has it's own packet format that is
+ * different form the PM4 format used by the 3D engine.
+ * It supports copying data, writing embedded data,
+ * solid fills, and a number of other things. It also
+ * has support for tiling/detiling of buffers.
+ */
+
+/**
+ * r600_dma_get_rptr - get the current read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current rptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (radeon_ring_generic_get_rptr(rdev, ring) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_get_wptr - get the current write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Get the current wptr from the hardware (r6xx+).
+ */
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return (RREG32(ring->wptr_reg) & 0x3fffc) >> 2;
+}
+
+/**
+ * r600_dma_set_wptr - commit the write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon ring pointer
+ *
+ * Write the wptr back to the hardware (r6xx+).
+ */
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(ring->wptr_reg, (ring->wptr << 2) & 0x3fffc);
+}
+
+/**
+ * r600_dma_stop - stop the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine (r6xx-evergreen).
+ */
+void r600_dma_stop(struct radeon_device *rdev)
+{
+ u32 rb_cntl = RREG32(DMA_RB_CNTL);
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
+ rb_cntl &= ~DMA_RB_ENABLE;
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+}
+
+/**
+ * r600_dma_resume - setup and start the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Set up the DMA ring buffer and enable it. (r6xx-evergreen).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_resume(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
+ u32 rb_cntl, dma_cntl, ib_cntl;
+ u32 rb_bufsz;
+ int r;
+
+ /* Reset dma */
+ if (rdev->family >= CHIP_RV770)
+ WREG32(SRBM_SOFT_RESET, RV770_SOFT_RESET_DMA);
+ else
+ WREG32(SRBM_SOFT_RESET, SOFT_RESET_DMA);
+ RREG32(SRBM_SOFT_RESET);
+ udelay(50);
+ WREG32(SRBM_SOFT_RESET, 0);
+
+ WREG32(DMA_SEM_INCOMPLETE_TIMER_CNTL, 0);
+ WREG32(DMA_SEM_WAIT_FAIL_TIMER_CNTL, 0);
+
+ /* Set ring buffer size in dwords */
+ rb_bufsz = order_base_2(ring->ring_size / 4);
+ rb_cntl = rb_bufsz << 1;
+#ifdef __BIG_ENDIAN
+ rb_cntl |= DMA_RB_SWAP_ENABLE | DMA_RPTR_WRITEBACK_SWAP_ENABLE;
+#endif
+ WREG32(DMA_RB_CNTL, rb_cntl);
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(DMA_RB_RPTR, 0);
+ WREG32(DMA_RB_WPTR, 0);
+
+ /* set the wb address whether it's enabled or not */
+ WREG32(DMA_RB_RPTR_ADDR_HI,
+ upper_32_bits(rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFF);
+ WREG32(DMA_RB_RPTR_ADDR_LO,
+ ((rdev->wb.gpu_addr + R600_WB_DMA_RPTR_OFFSET) & 0xFFFFFFFC));
+
+ if (rdev->wb.enabled)
+ rb_cntl |= DMA_RPTR_WRITEBACK_ENABLE;
+
+ WREG32(DMA_RB_BASE, ring->gpu_addr >> 8);
+
+ /* enable DMA IBs */
+ ib_cntl = DMA_IB_ENABLE;
+#ifdef __BIG_ENDIAN
+ ib_cntl |= DMA_IB_SWAP_ENABLE;
+#endif
+ WREG32(DMA_IB_CNTL, ib_cntl);
+
+ dma_cntl = RREG32(DMA_CNTL);
+ dma_cntl &= ~CTXEMPTY_INT_ENABLE;
+ WREG32(DMA_CNTL, dma_cntl);
+
+ if (rdev->family >= CHIP_RV770)
+ WREG32(DMA_MODE, 1);
+
+ ring->wptr = 0;
+ WREG32(DMA_RB_WPTR, ring->wptr << 2);
+
+ ring->rptr = RREG32(DMA_RB_RPTR) >> 2;
+
+ WREG32(DMA_RB_CNTL, rb_cntl | DMA_RB_ENABLE);
+
+ ring->ready = true;
+
+ r = radeon_ring_test(rdev, R600_RING_TYPE_DMA_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ return r;
+ }
+
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
+
+ return 0;
+}
+
+/**
+ * r600_dma_fini - tear down the async dma engine
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the async dma engine and free the ring (r6xx-evergreen).
+ */
+void r600_dma_fini(struct radeon_device *rdev)
+{
+ r600_dma_stop(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
+}
+
+/**
+ * r600_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool r600_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = r600_gpu_check_soft_reset(rdev);
+
+ if (!(reset_mask & RADEON_RESET_DMA)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+
+/**
+ * r600_dma_ring_test - simple async dma engine test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test the DMA engine by writing using it to write an
+ * value to memory. (r6xx-SI).
+ * Returns 0 for success, error for failure.
+ */
+int r600_dma_ring_test(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ring_lock(rdev, ring, 4);
+ if (r) {
+ DRM_ERROR("radeon: dma failed to lock ring %d (%d).\n", ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * r600_dma_fence_ring_emit - emit a fence on the DMA ring
+ *
+ * @rdev: radeon_device pointer
+ * @fence: radeon fence object
+ *
+ * Add a DMA fence packet to the ring to write
+ * the fence seq number and DMA trap packet to generate
+ * an interrupt if needed (r6xx-r7xx).
+ */
+void r600_dma_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* write the fence */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff));
+ radeon_ring_write(ring, lower_32_bits(fence->seq));
+ /* generate an interrupt */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_TRAP, 0, 0, 0));
+}
+
+/**
+ * r600_dma_semaphore_ring_emit - emit a semaphore on the dma ring
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ * @semaphore: radeon semaphore object
+ * @emit_wait: wait or signal semaphore
+ *
+ * Add a DMA semaphore packet to the ring wait on or signal
+ * other rings (r6xx-SI).
+ */
+void r600_dma_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ u64 addr = semaphore->gpu_addr;
+ u32 s = emit_wait ? 0 : 1;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SEMAPHORE, 0, s, 0));
+ radeon_ring_write(ring, addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+}
+
+/**
+ * r600_dma_ib_test - test an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Test a simple IB in the DMA ring (r6xx-SI).
+ * Returns 0 on success, error on failure.
+ */
+int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_ib ib;
+ unsigned i;
+ int r;
+ void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
+ u32 tmp = 0;
+
+ if (!ptr) {
+ DRM_ERROR("invalid vram scratch pointer\n");
+ return -EINVAL;
+ }
+
+ tmp = 0xCAFEDEAD;
+ writel(tmp, ptr);
+
+ r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
+ if (r) {
+ DRM_ERROR("radeon: failed to get ib (%d).\n", r);
+ return r;
+ }
+
+ ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
+ ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc;
+ ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff;
+ ib.ptr[3] = 0xDEADBEEF;
+ ib.length_dw = 4;
+
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r) {
+ radeon_ib_free(rdev, &ib);
+ DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
+ return r;
+ }
+ r = radeon_fence_wait(ib.fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ return r;
+ }
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = readl(ptr);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
+ } else {
+ DRM_ERROR("radeon: ib test failed (0x%08X)\n", tmp);
+ r = -EINVAL;
+ }
+ radeon_ib_free(rdev, &ib);
+ return r;
+}
+
+/**
+ * r600_dma_ring_ib_execute - Schedule an IB on the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @ib: IB object to schedule
+ *
+ * Schedule an IB in the DMA ring (r6xx-r7xx).
+ */
+void r600_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ if (rdev->wb.enabled) {
+ u32 next_rptr = ring->wptr + 4;
+ while ((next_rptr & 7) != 5)
+ next_rptr++;
+ next_rptr += 3;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
+ radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xff);
+ radeon_ring_write(ring, next_rptr);
+ }
+
+ /* The indirect buffer packet must end on an 8 DW boundary in the DMA ring.
+ * Pad as necessary with NOPs.
+ */
+ while ((ring->wptr & 7) != 5)
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_INDIRECT_BUFFER, 0, 0, 0));
+ radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
+ radeon_ring_write(ring, (ib->length_dw << 16) | (upper_32_bits(ib->gpu_addr) & 0xFF));
+
+}
+
+/**
+ * r600_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r6xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int r600_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFE)
+ cur_size_in_dw = 0xFFFE;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, (((upper_32_bits(dst_offset) & 0xff) << 16) |
+ (upper_32_bits(src_offset) & 0xff)));
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b88f54b134a..fa0de46fcc0 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -174,6 +174,24 @@ u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
return vblank_time_us;
}
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct drm_crtc *crtc;
+ struct radeon_crtc *radeon_crtc;
+ u32 vrefresh = 0;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ radeon_crtc = to_radeon_crtc(crtc);
+ if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
+ vrefresh = radeon_crtc->hw_mode.vrefresh;
+ break;
+ }
+ }
+
+ return vrefresh;
+}
+
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u)
{
@@ -278,9 +296,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev)
void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
{
if (enable)
- WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
else
- WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
+ WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
}
void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
@@ -745,6 +763,8 @@ bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
return true;
case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
@@ -779,15 +799,19 @@ static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependen
u32 size = atom_table->ucNumEntries *
sizeof(struct radeon_clock_voltage_dependency_entry);
int i;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
radeon_table->entries = kzalloc(size, GFP_KERNEL);
if (!radeon_table->entries)
return -ENOMEM;
+ entry = &atom_table->entries[0];
for (i = 0; i < atom_table->ucNumEntries; i++) {
- radeon_table->entries[i].clk = le16_to_cpu(atom_table->entries[i].usClockLow) |
- (atom_table->entries[i].ucClockHigh << 16);
- radeon_table->entries[i].v = le16_to_cpu(atom_table->entries[i].usVoltage);
+ radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+ (entry->ucClockHigh << 16);
+ radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
}
radeon_table->count = atom_table->ucNumEntries;
@@ -875,6 +899,19 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
return ret;
}
}
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
+ kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ return ret;
+ }
+ }
if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
@@ -898,27 +935,27 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_PhaseSheddingLimits_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
kzalloc(psl->ucNumEntries *
sizeof(struct radeon_phase_shedding_limits_entry),
GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &psl->entries[0];
for (i = 0; i < psl->ucNumEntries; i++) {
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(psl->entries[i].usSclkLow) |
- (psl->entries[i].ucSclkHigh << 16);
+ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(psl->entries[i].usMclkLow) |
- (psl->entries[i].ucMclkHigh << 16);
+ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(psl->entries[i].usVoltage);
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
}
rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
psl->ucNumEntries;
@@ -945,30 +982,140 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
(ATOM_PPLIB_CAC_Leakage_Table *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
+ entry = &cac_table->entries[0];
for (i = 0; i < cac_table->ucNumEntries; i++) {
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(cac_table->entries[i].usVddc);
- rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(cac_table->entries[i].ulLeakageValue);
+ if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+ le16_to_cpu(entry->usVddc1);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+ le16_to_cpu(entry->usVddc2);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+ le16_to_cpu(entry->usVddc3);
+ } else {
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+ le16_to_cpu(entry->usVddc);
+ rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+ le32_to_cpu(entry->ulLeakageValue);
+ }
+ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
}
rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
}
}
- /* ppm table */
+ /* ext tables */
if (le16_to_cpu(power_info->pplib.usTableSize) >=
sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+ ext_hdr->usVCETableOffset) {
+ VCEClockInfoArray *array = (VCEClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_vce_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ VCEClockInfo *vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+ ext_hdr->usUVDTableOffset) {
+ UVDClockInfoArray *array = (UVDClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ UVDClockInfo *uvd_clk = (UVDClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+ rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(limits->entries[i].usVoltage);
+ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+ ext_hdr->usSAMUTableOffset) {
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+ rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+ }
+ }
if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
ext_hdr->usPPMTableOffset) {
ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
@@ -977,10 +1124,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table =
kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
if (!rdev->pm.dpm.dyn_state.ppm_table) {
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
- kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
+ r600_free_extended_power_table(rdev);
return -ENOMEM;
}
rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
@@ -1003,6 +1147,71 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
rdev->pm.dpm.dyn_state.ppm_table->tj_max =
le32_to_cpu(ppm->ulTjmax);
}
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+ ext_hdr->usACPTableOffset) {
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct radeon_clock_voltage_dependency_entry);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+ rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+ ext_hdr->usPowerTuneTableOffset) {
+ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ ATOM_PowerTune_Table *pt;
+ rdev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
+ if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
+ r600_free_extended_power_table(rdev);
+ return -ENOMEM;
+ }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+ ppt->usMaximumPowerDeliveryLimit;
+ pt = &ppt->power_tune_table;
+ } else {
+ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+ pt = &ppt->power_tune_table;
+ }
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+ le16_to_cpu(pt->usConfigurableTDP);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+ le16_to_cpu(pt->usBatteryPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+ le16_to_cpu(pt->usSmallPowerLimit);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+ le16_to_cpu(pt->usLowCACLeakage);
+ rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+ le16_to_cpu(pt->usHighCACLeakage);
+ }
}
return 0;
@@ -1016,12 +1225,24 @@ void r600_free_extended_power_table(struct radeon_device *rdev)
kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries)
kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
+ if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries)
+ kfree(rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries);
if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries)
kfree(rdev->pm.dpm.dyn_state.cac_leakage_table.entries);
if (rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries)
kfree(rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries);
if (rdev->pm.dpm.dyn_state.ppm_table)
kfree(rdev->pm.dpm.dyn_state.ppm_table);
+ if (rdev->pm.dpm.dyn_state.cac_tdp_table)
+ kfree(rdev->pm.dpm.dyn_state.cac_tdp_table);
+ if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries);
+ if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries)
+ kfree(rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries);
}
enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
@@ -1046,3 +1267,36 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
}
return RADEON_PCIE_GEN1;
}
+
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes)
+{
+ switch (asic_lanes) {
+ case 0:
+ default:
+ return default_lanes;
+ case 1:
+ return 1;
+ case 2:
+ return 2;
+ case 4:
+ return 4;
+ case 8:
+ return 8;
+ case 12:
+ return 12;
+ case 16:
+ return 16;
+ }
+}
+
+u8 r600_encode_pci_lane_width(u32 lanes)
+{
+ u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+
+ if (lanes > 16)
+ return 0;
+
+ return encoded_lanes[lanes];
+}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 7c822d9ae53..1000bf9719f 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -130,6 +130,7 @@ void r600_dpm_print_cap_info(u32 caps);
void r600_dpm_print_ps_status(struct radeon_device *rdev,
struct radeon_ps *rps);
u32 r600_dpm_get_vblank_time(struct radeon_device *rdev);
+u32 r600_dpm_get_vrefresh(struct radeon_device *rdev);
bool r600_is_uvd_state(u32 class, u32 class2);
void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
u32 *p, u32 *u);
@@ -224,4 +225,9 @@ enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
enum radeon_pcie_gen asic_gen,
enum radeon_pcie_gen default_gen);
+u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
+ u16 asic_lanes,
+ u16 default_lanes);
+u8 r600_encode_pci_lane_width(u32 lanes);
+
#endif
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f48240bb8c5..f443010ce90 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u32 base_rate = 24000;
+ u32 max_ratio = clock / base_rate;
+ u32 dto_phase;
+ u32 dto_modulo = clock;
+ u32 wallclock_ratio;
+ u32 dto_cntl;
if (!dig || !dig->afmt)
return;
+ if (max_ratio >= 8) {
+ dto_phase = 192 * 1000;
+ wallclock_ratio = 3;
+ } else if (max_ratio >= 4) {
+ dto_phase = 96 * 1000;
+ wallclock_ratio = 2;
+ } else if (max_ratio >= 2) {
+ dto_phase = 48 * 1000;
+ wallclock_ratio = 1;
+ } else {
+ dto_phase = 24 * 1000;
+ wallclock_ratio = 0;
+ }
+
/* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
* doesn't matter which one you use. Just use the first one.
*/
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
/* according to the reg specs, this should DCE3.2 only, but in
* practice it seems to cover DCE3.0 as well.
*/
- WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
- WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
- WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ if (dig->dig_encoder == 0) {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
+ } else {
+ dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK;
+ dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio);
+ WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl);
+ WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase);
+ WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo);
+ WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */
+ }
} else {
/* according to the reg specs, this should be DCE2.0 and DCE3.0 */
WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
@@ -252,6 +283,107 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
}
}
+static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ u32 tmp;
+ u8 *sadb;
+ int sad_count;
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
+ return;
+ }
+
+ /* program the speaker allocation */
+ tmp = RREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER);
+ tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
+ /* set HDMI mode */
+ tmp |= HDMI_CONNECTION;
+ if (sad_count)
+ tmp |= SPEAKER_ALLOCATION(sadb[0]);
+ else
+ tmp |= SPEAKER_ALLOCATION(5); /* stereo */
+ WREG32(AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER, tmp);
+
+ kfree(sadb);
+}
+
+static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
+{
+ struct radeon_device *rdev = encoder->dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector = NULL;
+ struct cea_sad *sads;
+ int i, sad_count;
+
+ static const u16 eld_reg_to_type[][2] = {
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP },
+ { AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
+ };
+
+ list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder)
+ radeon_connector = to_radeon_connector(connector);
+ }
+
+ if (!radeon_connector) {
+ DRM_ERROR("Couldn't find encoder's connector\n");
+ return;
+ }
+
+ sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
+ if (sad_count < 0) {
+ DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
+ return;
+ }
+ BUG_ON(!sads);
+
+ for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
+ u32 value = 0;
+ int j;
+
+ for (j = 0; j < sad_count; j++) {
+ struct cea_sad *sad = &sads[j];
+
+ if (sad->format == eld_reg_to_type[i][1]) {
+ value = MAX_CHANNELS(sad->channels) |
+ DESCRIPTOR_BYTE_2(sad->byte2) |
+ SUPPORTED_FREQUENCIES(sad->freq);
+ if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM)
+ value |= SUPPORTED_FREQUENCIES_STEREO(sad->freq);
+ break;
+ }
+ }
+ WREG32(eld_reg_to_type[i][0], value);
+ }
+
+ kfree(sads);
+}
+
/*
* update the info frames with the data from the current display mode
*/
@@ -296,6 +428,11 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
+ if (ASIC_IS_DCE32(rdev)) {
+ dce3_2_afmt_write_speaker_allocation(encoder);
+ dce3_2_afmt_write_sad_regs(encoder);
+ }
+
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
@@ -351,7 +488,7 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- struct r600_audio audio = r600_audio_status(rdev);
+ struct r600_audio_pin audio = r600_audio_status(rdev);
uint8_t buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AUDIO_INFOFRAME_SIZE];
struct hdmi_audio_infoframe frame;
uint32_t offset;
@@ -460,6 +597,11 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
+ if (enable)
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ else
+ dig->afmt->pin = NULL;
+
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 8e3fe815eda..454f90a849e 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -933,6 +933,9 @@
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0)
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7
+# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
@@ -957,6 +960,42 @@
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
+#define AZ_F0_CODEC_PIN0_CONTROL_CHANNEL_SPEAKER 0x71bc
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR0 0x71c8 /* LPCM */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR1 0x71cc /* AC3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR2 0x71d0 /* MPEG1 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR3 0x71d4 /* MP3 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR4 0x71d8 /* MPEG2 */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR5 0x71dc /* AAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR6 0x71e0 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR7 0x71e4 /* ATRAC */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR8 0x71e8 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR9 0x71ec /* Dolby Digital */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR10 0x71f0 /* DTS-HD */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR11 0x71f4 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR12 0x71f8 /* DTS */
+#define AZ_F0_CODEC_PIN0_CONTROL_AUDIO_DESCRIPTOR13 0x71fc /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f08219c39b..ff8b564ce2b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -152,6 +152,47 @@ extern int radeon_aspm;
#define RADEON_RESET_MC (1 << 10)
#define RADEON_RESET_DISPLAY (1 << 11)
+/* CG block flags */
+#define RADEON_CG_BLOCK_GFX (1 << 0)
+#define RADEON_CG_BLOCK_MC (1 << 1)
+#define RADEON_CG_BLOCK_SDMA (1 << 2)
+#define RADEON_CG_BLOCK_UVD (1 << 3)
+#define RADEON_CG_BLOCK_VCE (1 << 4)
+#define RADEON_CG_BLOCK_HDP (1 << 5)
+#define RADEON_CG_BLOCK_BIF (1 << 6)
+
+/* CG flags */
+#define RADEON_CG_SUPPORT_GFX_MGCG (1 << 0)
+#define RADEON_CG_SUPPORT_GFX_MGLS (1 << 1)
+#define RADEON_CG_SUPPORT_GFX_CGCG (1 << 2)
+#define RADEON_CG_SUPPORT_GFX_CGLS (1 << 3)
+#define RADEON_CG_SUPPORT_GFX_CGTS (1 << 4)
+#define RADEON_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
+#define RADEON_CG_SUPPORT_GFX_CP_LS (1 << 6)
+#define RADEON_CG_SUPPORT_GFX_RLC_LS (1 << 7)
+#define RADEON_CG_SUPPORT_MC_LS (1 << 8)
+#define RADEON_CG_SUPPORT_MC_MGCG (1 << 9)
+#define RADEON_CG_SUPPORT_SDMA_LS (1 << 10)
+#define RADEON_CG_SUPPORT_SDMA_MGCG (1 << 11)
+#define RADEON_CG_SUPPORT_BIF_LS (1 << 12)
+#define RADEON_CG_SUPPORT_UVD_MGCG (1 << 13)
+#define RADEON_CG_SUPPORT_VCE_MGCG (1 << 14)
+#define RADEON_CG_SUPPORT_HDP_LS (1 << 15)
+#define RADEON_CG_SUPPORT_HDP_MGCG (1 << 16)
+
+/* PG flags */
+#define RADEON_PG_SUPPORT_GFX_CG (1 << 0)
+#define RADEON_PG_SUPPORT_GFX_SMG (1 << 1)
+#define RADEON_PG_SUPPORT_GFX_DMG (1 << 2)
+#define RADEON_PG_SUPPORT_UVD (1 << 3)
+#define RADEON_PG_SUPPORT_VCE (1 << 4)
+#define RADEON_PG_SUPPORT_CP (1 << 5)
+#define RADEON_PG_SUPPORT_GDS (1 << 6)
+#define RADEON_PG_SUPPORT_RLC_SMU_HS (1 << 7)
+#define RADEON_PG_SUPPORT_SDMA (1 << 8)
+#define RADEON_PG_SUPPORT_ACP (1 << 9)
+#define RADEON_PG_SUPPORT_SAMU (1 << 10)
+
/* max cursor sizes (in pixels) */
#define CURSOR_WIDTH 64
#define CURSOR_HEIGHT 64
@@ -238,6 +279,12 @@ int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev,
u16 *voltage,
u16 leakage_idx);
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id);
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id);
int radeon_atom_round_to_true_voltage(struct radeon_device *rdev,
u8 voltage_type,
u16 nominal_voltage,
@@ -492,9 +539,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
int radeon_mode_dumb_mmap(struct drm_file *filp,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
/*
* Semaphores.
@@ -682,7 +726,7 @@ union radeon_irq_stat_regs {
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
struct radeon_irq {
bool installed;
@@ -746,8 +790,6 @@ struct radeon_ring {
uint32_t align_mask;
uint32_t ptr_mask;
bool ready;
- u32 ptr_reg_shift;
- u32 ptr_reg_mask;
u32 nop;
u32 idx;
u64 last_semaphore_signal_addr;
@@ -844,35 +886,6 @@ struct r600_ih {
bool enabled;
};
-struct r600_blit_cp_primitives {
- void (*set_render_target)(struct radeon_device *rdev, int format,
- int w, int h, u64 gpu_addr);
- void (*cp_set_surface_sync)(struct radeon_device *rdev,
- u32 sync_type, u32 size,
- u64 mc_addr);
- void (*set_shaders)(struct radeon_device *rdev);
- void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
- void (*set_tex_resource)(struct radeon_device *rdev,
- int format, int w, int h, int pitch,
- u64 gpu_addr, u32 size);
- void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
- int x2, int y2);
- void (*draw_auto)(struct radeon_device *rdev);
- void (*set_default_state)(struct radeon_device *rdev);
-};
-
-struct r600_blit {
- struct radeon_bo *shader_obj;
- struct r600_blit_cp_primitives primitives;
- int max_dim;
- int ring_size_common;
- int ring_size_per_loop;
- u64 shader_gpu_addr;
- u32 vs_offset, ps_offset;
- u32 state_offset;
- u32 state_len;
-};
-
/*
* RLC stuff
*/
@@ -883,13 +896,19 @@ struct radeon_rlc {
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
volatile uint32_t *sr_ptr;
- u32 *reg_list;
+ const u32 *reg_list;
u32 reg_list_size;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
volatile uint32_t *cs_ptr;
- struct cs_section_def *cs_data;
+ const struct cs_section_def *cs_data;
+ u32 clear_state_size;
+ /* for cp tables */
+ struct radeon_bo *cp_table_obj;
+ uint64_t cp_table_gpu_addr;
+ volatile uint32_t *cp_table_ptr;
+ u32 cp_table_size;
};
int radeon_ib_get(struct radeon_device *rdev, int ring,
@@ -921,8 +940,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -1036,7 +1054,6 @@ struct radeon_wb {
#define R600_WB_DMA_RPTR_OFFSET 1792
#define R600_WB_IH_WPTR_OFFSET 2048
#define CAYMAN_WB_DMA1_RPTR_OFFSET 2304
-#define R600_WB_UVD_RPTR_OFFSET 2560
#define R600_WB_EVENT_OFFSET 3072
#define CIK_WB_CP1_WPTR_OFFSET 3328
#define CIK_WB_CP2_WPTR_OFFSET 3584
@@ -1147,6 +1164,7 @@ enum radeon_int_thermal_type {
THERMAL_TYPE_SI,
THERMAL_TYPE_EMC2103_WITH_INTERNAL,
THERMAL_TYPE_CI,
+ THERMAL_TYPE_KV,
};
struct radeon_voltage {
@@ -1220,6 +1238,9 @@ struct radeon_ps {
/* UVD clocks */
u32 vclk;
u32 dclk;
+ /* VCE clocks */
+ u32 evclk;
+ u32 ecclk;
/* asic priv */
void *ps_priv;
};
@@ -1270,14 +1291,21 @@ struct radeon_clock_voltage_dependency_table {
struct radeon_clock_voltage_dependency_entry *entries;
};
-struct radeon_cac_leakage_entry {
- u16 vddc;
- u32 leakage;
+union radeon_cac_leakage_entry {
+ struct {
+ u16 vddc;
+ u32 leakage;
+ };
+ struct {
+ u16 vddc1;
+ u16 vddc2;
+ u16 vddc3;
+ };
};
struct radeon_cac_leakage_table {
u32 count;
- struct radeon_cac_leakage_entry *entries;
+ union radeon_cac_leakage_entry *entries;
};
struct radeon_phase_shedding_limits_entry {
@@ -1291,6 +1319,28 @@ struct radeon_phase_shedding_limits_table {
struct radeon_phase_shedding_limits_entry *entries;
};
+struct radeon_uvd_clock_voltage_dependency_entry {
+ u32 vclk;
+ u32 dclk;
+ u16 v;
+};
+
+struct radeon_uvd_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_uvd_clock_voltage_dependency_entry *entries;
+};
+
+struct radeon_vce_clock_voltage_dependency_entry {
+ u32 ecclk;
+ u32 evclk;
+ u16 v;
+};
+
+struct radeon_vce_clock_voltage_dependency_table {
+ u8 count;
+ struct radeon_vce_clock_voltage_dependency_entry *entries;
+};
+
struct radeon_ppm_table {
u8 ppm_design;
u16 cpu_core_number;
@@ -1304,11 +1354,27 @@ struct radeon_ppm_table {
u32 tj_max;
};
+struct radeon_cac_tdp_table {
+ u16 tdp;
+ u16 configurable_tdp;
+ u16 tdc;
+ u16 battery_power_limit;
+ u16 small_power_limit;
+ u16 low_cac_leakage;
+ u16 high_cac_leakage;
+ u16 maximum_power_delivery_limit;
+};
+
struct radeon_dpm_dynamic_state {
struct radeon_clock_voltage_dependency_table vddc_dependency_on_sclk;
struct radeon_clock_voltage_dependency_table vddci_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_mclk;
+ struct radeon_clock_voltage_dependency_table mvdd_dependency_on_mclk;
struct radeon_clock_voltage_dependency_table vddc_dependency_on_dispclk;
+ struct radeon_uvd_clock_voltage_dependency_table uvd_clock_voltage_dependency_table;
+ struct radeon_vce_clock_voltage_dependency_table vce_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table samu_clock_voltage_dependency_table;
+ struct radeon_clock_voltage_dependency_table acp_clock_voltage_dependency_table;
struct radeon_clock_array valid_sclk_values;
struct radeon_clock_array valid_mclk_values;
struct radeon_clock_and_voltage_limits max_clock_voltage_on_dc;
@@ -1320,6 +1386,7 @@ struct radeon_dpm_dynamic_state {
struct radeon_cac_leakage_table cac_leakage_table;
struct radeon_phase_shedding_limits_table phase_shedding_limits_table;
struct radeon_ppm_table *ppm_table;
+ struct radeon_cac_tdp_table *cac_tdp_table;
};
struct radeon_dpm_fan {
@@ -1389,11 +1456,12 @@ struct radeon_dpm {
struct radeon_dpm_thermal thermal;
/* forced levels */
enum radeon_dpm_forced_level forced_level;
+ /* track UVD streams */
+ unsigned sd;
+ unsigned hd;
};
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state);
-
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable);
struct radeon_pm {
struct mutex mutex;
@@ -1468,9 +1536,9 @@ struct radeon_uvd {
void *cpu_addr;
uint64_t gpu_addr;
void *saved_bo;
- unsigned fw_size;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
+ unsigned img_size[RADEON_MAX_UVD_HANDLES];
struct delayed_work idle_work;
};
@@ -1499,12 +1567,21 @@ int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,
unsigned cg_upll_func_cntl);
-struct r600_audio {
+struct r600_audio_pin {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
+ u32 offset;
+ bool connected;
+ u32 id;
+};
+
+struct r600_audio {
+ bool enabled;
+ struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS];
+ int num_pins;
};
/*
@@ -1536,6 +1613,34 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
+/*
+ * ASIC ring specific functions.
+ */
+struct radeon_asic_ring {
+ /* ring read/write ptr handling */
+ u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+ void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
+
+ /* validating and patching of IBs */
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*cs_parse)(struct radeon_cs_parser *p);
+
+ /* command emmit functions */
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
+
+ /* testing functions */
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
+ bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
+
+ /* deprecated */
+ void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
+};
/*
* ASIC specific functions.
@@ -1579,23 +1684,7 @@ struct radeon_asic {
uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
- struct {
- void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
- int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
- void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
- void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
- struct radeon_semaphore *semaphore, bool emit_wait);
- int (*cs_parse)(struct radeon_cs_parser *p);
- void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
- bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
- void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
-
- u32 (*get_rptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- u32 (*get_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- void (*set_wptr)(struct radeon_device *rdev, struct radeon_ring *ring);
- } ring[RADEON_NUM_RINGS];
+ struct radeon_asic_ring *ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
@@ -1688,6 +1777,7 @@ struct radeon_asic {
void (*debugfs_print_current_performance_level)(struct radeon_device *rdev, struct seq_file *m);
int (*force_performance_level)(struct radeon_device *rdev, enum radeon_dpm_forced_level level);
bool (*vblank_too_short)(struct radeon_device *rdev);
+ void (*powergate_uvd)(struct radeon_device *rdev, bool gate);
} dpm;
/* pageflipping */
struct {
@@ -2066,7 +2156,7 @@ struct radeon_device {
const struct firmware *mec_fw; /* CIK MEC firmware */
const struct firmware *sdma_fw; /* CIK SDMA firmware */
const struct firmware *smc_fw; /* SMC firmware */
- struct r600_blit r600_blit;
+ const struct firmware *uvd_fw; /* UVD firmware */
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
@@ -2077,9 +2167,8 @@ struct radeon_device {
struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
- bool audio_enabled;
bool has_uvd;
- struct r600_audio audio_status; /* audio stuff */
+ struct r600_audio audio; /* audio stuff */
struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
struct drm_file *hyperz_filp;
@@ -2095,6 +2184,11 @@ struct radeon_device {
/* ACPI interface */
struct radeon_atif atif;
struct radeon_atcs atcs;
+ /* srbm instance registers */
+ struct mutex srbm_mutex;
+ /* clock, powergating flags */
+ u32 cg_flags;
+ u32 pg_flags;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -2153,6 +2247,8 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
#define WREG32_PIF_PHY1(reg, v) eg_pif_phy1_wreg(rdev, (reg), (v))
#define RREG32_UVD_CTX(reg) r600_uvd_ctx_rreg(rdev, (reg))
#define WREG32_UVD_CTX(reg, v) r600_uvd_ctx_wreg(rdev, (reg), (v))
+#define RREG32_DIDT(reg) cik_didt_rreg(rdev, (reg))
+#define WREG32_DIDT(reg, v) cik_didt_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -2161,7 +2257,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v);
WREG32(reg, tmp_); \
} while (0)
#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
-#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or)
+#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
#define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL(reg); \
@@ -2284,6 +2380,22 @@ static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
WREG32(R600_UVD_CTX_DATA, (v));
}
+
+static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg)
+{
+ u32 r;
+
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ r = RREG32(CIK_DIDT_IND_DATA);
+ return r;
+}
+
+static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ WREG32(CIK_DIDT_IND_INDEX, (reg));
+ WREG32(CIK_DIDT_IND_DATA, (v));
+}
+
void r100_pll_errata_after_index(struct radeon_device *rdev);
@@ -2379,7 +2491,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
-#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
+#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)]->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
@@ -2387,16 +2499,16 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags)))
-#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
-#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
-#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
-#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
-#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
-#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
-#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
-#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_rptr((rdev), (r))
-#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].get_wptr((rdev), (r))
-#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx].set_wptr((rdev), (r))
+#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp))
+#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp))
+#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
+#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
+#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
+#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
+#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
+#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
@@ -2404,8 +2516,8 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_hdmi_enable(rdev, e, b) (rdev)->asic->display.hdmi_enable((e), (b))
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
-#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
-#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
@@ -2456,6 +2568,7 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#define radeon_dpm_debugfs_print_current_performance_level(rdev, m) rdev->asic->dpm.debugfs_print_current_performance_level((rdev), (m))
#define radeon_dpm_force_performance_level(rdev, l) rdev->asic->dpm.force_performance_level((rdev), (l))
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev))
+#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g))
/* Common functions */
/* AGP */
@@ -2522,6 +2635,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
+struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
+struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 78bec1a58ed..630853b9684 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -172,6 +172,21 @@ void radeon_agp_disable(struct radeon_device *rdev)
/*
* ASIC
*/
+
+static struct radeon_asic_ring r100_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r100_asic = {
.init = &r100_init,
.fini = &r100_fini,
@@ -187,19 +202,7 @@ static struct radeon_asic r100_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -266,19 +269,7 @@ static struct radeon_asic r200_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r100_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r100_cs_parse,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r100_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -330,6 +321,20 @@ static struct radeon_asic r200_asic = {
},
};
+static struct radeon_asic_ring r300_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,19 +350,7 @@ static struct radeon_asic r300_asic = {
.set_page = &r100_pci_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -424,19 +417,7 @@ static struct radeon_asic r300_asic_pcie = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -503,19 +484,7 @@ static struct radeon_asic r420_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -582,19 +551,7 @@ static struct radeon_asic rs400_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &r100_irq_set,
@@ -661,19 +618,7 @@ static struct radeon_asic rs600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -742,19 +687,7 @@ static struct radeon_asic rs690_asic = {
.set_page = &rs400_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -823,19 +756,7 @@ static struct radeon_asic rv515_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -902,19 +823,7 @@ static struct radeon_asic r520_asic = {
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r100_ring_ib_execute,
- .emit_fence = &r300_fence_ring_emit,
- .emit_semaphore = &r100_semaphore_ring_emit,
- .cs_parse = &r300_cs_parse,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ib_test = &r100_ib_test,
- .is_lockup = &r100_gpu_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -966,6 +875,32 @@ static struct radeon_asic r520_asic = {
},
};
+static struct radeon_asic_ring r600_gfx_ring = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &r600_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring r600_dma_ring = {
+ .ib_execute = &r600_dma_ring_ib_execute,
+ .emit_fence = &r600_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &r600_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &r600_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic r600_asic = {
.init = &r600_init,
.fini = &r600_fini,
@@ -983,30 +918,8 @@ static struct radeon_asic r600_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1022,7 +935,7 @@ static struct radeon_asic r600_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1078,30 +991,8 @@ static struct radeon_asic rv6xx_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1115,7 +1006,7 @@ static struct radeon_asic rv6xx_asic = {
.get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1161,6 +1052,7 @@ static struct radeon_asic rv6xx_asic = {
.get_mclk = &rv6xx_dpm_get_mclk,
.print_power_state = &rv6xx_dpm_print_power_state,
.debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &rv6xx_dpm_force_performance_level,
},
.pflip = {
.pre_page_flip = &rs600_pre_page_flip,
@@ -1186,30 +1078,8 @@ static struct radeon_asic rs780_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1225,7 +1095,7 @@ static struct radeon_asic rs780_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r600_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1279,6 +1149,19 @@ static struct radeon_asic rs780_asic = {
},
};
+static struct radeon_asic_ring rv770_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic rv770_asic = {
.init = &rv770_init,
.fini = &rv770_fini,
@@ -1296,42 +1179,9 @@ static struct radeon_asic rv770_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &r600_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &r600_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &r600_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &r600_dma_ring_ib_execute,
- .emit_fence = &r600_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &r600_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &r600_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &r600_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &r600_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &r600_irq_set,
@@ -1347,7 +1197,7 @@ static struct radeon_asic rv770_asic = {
.hdmi_setmode = &r600_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &rv770_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1404,6 +1254,32 @@ static struct radeon_asic rv770_asic = {
},
};
+static struct radeon_asic_ring evergreen_gfx_ring = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &evergreen_gfx_is_lockup,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring evergreen_dma_ring = {
+ .ib_execute = &evergreen_dma_ring_ib_execute,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &evergreen_dma_is_lockup,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic evergreen_asic = {
.init = &evergreen_init,
.fini = &evergreen_fini,
@@ -1421,42 +1297,9 @@ static struct radeon_asic evergreen_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1472,7 +1315,7 @@ static struct radeon_asic evergreen_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1546,42 +1389,9 @@ static struct radeon_asic sumo_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1597,7 +1407,7 @@ static struct radeon_asic sumo_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1670,42 +1480,9 @@ static struct radeon_asic btc_asic = {
.set_page = &rs600_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &evergreen_ring_ib_execute,
- .emit_fence = &r600_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &evergreen_gfx_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &evergreen_dma_ring_ib_execute,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &evergreen_dma_is_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &r600_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &evergreen_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &evergreen_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &rv770_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1721,7 +1498,7 @@ static struct radeon_asic btc_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1778,6 +1555,49 @@ static struct radeon_asic btc_asic = {
},
};
+static struct radeon_asic_ring cayman_gfx_ring = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = &evergreen_cs_parse,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &cayman_gfx_is_lockup,
+ .vm_flush = &cayman_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring cayman_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = &evergreen_dma_cs_parse,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &cayman_dma_is_lockup,
+ .vm_flush = &cayman_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr
+};
+
+static struct radeon_asic_ring cayman_uvd_ring = {
+ .ib_execute = &uvd_v1_0_ib_execute,
+ .emit_fence = &uvd_v2_2_fence_emit,
+ .emit_semaphore = &uvd_v3_1_semaphore_emit,
+ .cs_parse = &radeon_uvd_cs_parse,
+ .ring_test = &uvd_v1_0_ring_test,
+ .ib_test = &uvd_v1_0_ib_test,
+ .is_lockup = &radeon_ring_test_lockup,
+ .get_rptr = &uvd_v1_0_get_rptr,
+ .get_wptr = &uvd_v1_0_get_wptr,
+ .set_wptr = &uvd_v1_0_set_wptr,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
@@ -1801,88 +1621,12 @@ static struct radeon_asic cayman_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -1898,7 +1642,7 @@ static struct radeon_asic cayman_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -1978,88 +1722,12 @@ static struct radeon_asic trinity_asic = {
.set_page = &cayman_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cayman_ring_ib_execute,
- .ib_parse = &evergreen_ib_parse,
- .emit_fence = &cayman_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = &evergreen_cs_parse,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &cayman_gfx_is_lockup,
- .vm_flush = &cayman_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = &evergreen_dma_cs_parse,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &cayman_dma_is_lockup,
- .vm_flush = &cayman_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &cayman_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &cayman_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &cayman_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &cayman_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &evergreen_irq_set,
@@ -2071,9 +1739,11 @@ static struct radeon_asic trinity_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = &r600_copy_blit,
+ .blit = &r600_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &evergreen_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2129,6 +1799,36 @@ static struct radeon_asic trinity_asic = {
},
};
+static struct radeon_asic_ring si_gfx_ring = {
+ .ib_execute = &si_ring_ib_execute,
+ .ib_parse = &si_ib_parse,
+ .emit_fence = &si_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_ring_test,
+ .ib_test = &r600_ib_test,
+ .is_lockup = &si_gfx_is_lockup,
+ .vm_flush = &si_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring si_dma_ring = {
+ .ib_execute = &cayman_dma_ring_ib_execute,
+ .ib_parse = &evergreen_dma_ib_parse,
+ .emit_fence = &evergreen_dma_fence_ring_emit,
+ .emit_semaphore = &r600_dma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &r600_dma_ring_test,
+ .ib_test = &r600_dma_ib_test,
+ .is_lockup = &si_dma_is_lockup,
+ .vm_flush = &si_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic si_asic = {
.init = &si_init,
.fini = &si_fini,
@@ -2152,88 +1852,12 @@ static struct radeon_asic si_asic = {
.set_page = &si_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &si_ring_ib_execute,
- .ib_parse = &si_ib_parse,
- .emit_fence = &si_fence_ring_emit,
- .emit_semaphore = &r600_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_ring_test,
- .ib_test = &r600_ib_test,
- .is_lockup = &si_gfx_is_lockup,
- .vm_flush = &si_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cayman_dma_ring_ib_execute,
- .ib_parse = &evergreen_dma_ib_parse,
- .emit_fence = &evergreen_dma_fence_ring_emit,
- .emit_semaphore = &r600_dma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &r600_dma_ring_test,
- .ib_test = &r600_dma_ib_test,
- .is_lockup = &si_dma_is_lockup,
- .vm_flush = &si_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &si_gfx_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &si_gfx_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &si_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &si_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &si_irq_set,
@@ -2245,6 +1869,8 @@ static struct radeon_asic si_asic = {
.wait_for_vblank = &dce4_wait_for_vblank,
.set_backlight_level = &atombios_set_backlight_level,
.get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2304,6 +1930,51 @@ static struct radeon_asic si_asic = {
},
};
+static struct radeon_asic_ring ci_gfx_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_gfx_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &radeon_ring_generic_get_rptr,
+ .get_wptr = &radeon_ring_generic_get_wptr,
+ .set_wptr = &radeon_ring_generic_set_wptr,
+};
+
+static struct radeon_asic_ring ci_cp_ring = {
+ .ib_execute = &cik_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_fence_compute_ring_emit,
+ .emit_semaphore = &cik_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_ring_test,
+ .ib_test = &cik_ib_test,
+ .is_lockup = &cik_gfx_is_lockup,
+ .vm_flush = &cik_vm_flush,
+ .get_rptr = &cik_compute_ring_get_rptr,
+ .get_wptr = &cik_compute_ring_get_wptr,
+ .set_wptr = &cik_compute_ring_set_wptr,
+};
+
+static struct radeon_asic_ring ci_dma_ring = {
+ .ib_execute = &cik_sdma_ring_ib_execute,
+ .ib_parse = &cik_ib_parse,
+ .emit_fence = &cik_sdma_fence_ring_emit,
+ .emit_semaphore = &cik_sdma_semaphore_ring_emit,
+ .cs_parse = NULL,
+ .ring_test = &cik_sdma_ring_test,
+ .ib_test = &cik_sdma_ib_test,
+ .is_lockup = &cik_sdma_is_lockup,
+ .vm_flush = &cik_dma_vm_flush,
+ .get_rptr = &r600_dma_get_rptr,
+ .get_wptr = &r600_dma_get_wptr,
+ .set_wptr = &r600_dma_set_wptr,
+};
+
static struct radeon_asic ci_asic = {
.init = &cik_init,
.fini = &cik_fini,
@@ -2327,88 +1998,12 @@ static struct radeon_asic ci_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2418,6 +2013,8 @@ static struct radeon_asic ci_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2451,6 +2048,25 @@ static struct radeon_asic ci_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &ci_get_temp,
+ },
+ .dpm = {
+ .init = &ci_dpm_init,
+ .setup_asic = &ci_dpm_setup_asic,
+ .enable = &ci_dpm_enable,
+ .disable = &ci_dpm_disable,
+ .pre_set_power_state = &ci_dpm_pre_set_power_state,
+ .set_power_state = &ci_dpm_set_power_state,
+ .post_set_power_state = &ci_dpm_post_set_power_state,
+ .display_configuration_changed = &ci_dpm_display_configuration_changed,
+ .fini = &ci_dpm_fini,
+ .get_sclk = &ci_dpm_get_sclk,
+ .get_mclk = &ci_dpm_get_mclk,
+ .print_power_state = &ci_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &ci_dpm_force_performance_level,
+ .vblank_too_short = &ci_dpm_vblank_too_short,
+ .powergate_uvd = &ci_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2482,88 +2098,12 @@ static struct radeon_asic kv_asic = {
.set_page = &cik_vm_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_gfx_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP1_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [CAYMAN_RING_TYPE_CP2_INDEX] = {
- .ib_execute = &cik_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_fence_compute_ring_emit,
- .emit_semaphore = &cik_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_ring_test,
- .ib_test = &cik_ib_test,
- .is_lockup = &cik_gfx_is_lockup,
- .vm_flush = &cik_vm_flush,
- .get_rptr = &cik_compute_ring_get_rptr,
- .get_wptr = &cik_compute_ring_get_wptr,
- .set_wptr = &cik_compute_ring_set_wptr,
- },
- [R600_RING_TYPE_DMA_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [CAYMAN_RING_TYPE_DMA1_INDEX] = {
- .ib_execute = &cik_sdma_ring_ib_execute,
- .ib_parse = &cik_ib_parse,
- .emit_fence = &cik_sdma_fence_ring_emit,
- .emit_semaphore = &cik_sdma_semaphore_ring_emit,
- .cs_parse = NULL,
- .ring_test = &cik_sdma_ring_test,
- .ib_test = &cik_sdma_ib_test,
- .is_lockup = &cik_sdma_is_lockup,
- .vm_flush = &cik_dma_vm_flush,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- },
- [R600_RING_TYPE_UVD_INDEX] = {
- .ib_execute = &r600_uvd_ib_execute,
- .emit_fence = &r600_uvd_fence_emit,
- .emit_semaphore = &cayman_uvd_semaphore_emit,
- .cs_parse = &radeon_uvd_cs_parse,
- .ring_test = &r600_uvd_ring_test,
- .ib_test = &r600_uvd_ib_test,
- .is_lockup = &radeon_ring_test_lockup,
- .get_rptr = &radeon_ring_generic_get_rptr,
- .get_wptr = &radeon_ring_generic_get_wptr,
- .set_wptr = &radeon_ring_generic_set_wptr,
- }
+ [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring,
+ [CAYMAN_RING_TYPE_CP1_INDEX] = &ci_cp_ring,
+ [CAYMAN_RING_TYPE_CP2_INDEX] = &ci_cp_ring,
+ [R600_RING_TYPE_DMA_INDEX] = &ci_dma_ring,
+ [CAYMAN_RING_TYPE_DMA1_INDEX] = &ci_dma_ring,
+ [R600_RING_TYPE_UVD_INDEX] = &cayman_uvd_ring,
},
.irq = {
.set = &cik_irq_set,
@@ -2573,6 +2113,8 @@ static struct radeon_asic kv_asic = {
.bandwidth_update = &dce8_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
.blit = NULL,
@@ -2606,6 +2148,24 @@ static struct radeon_asic kv_asic = {
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_uvd_clocks = &cik_set_uvd_clocks,
+ .get_temperature = &kv_get_temp,
+ },
+ .dpm = {
+ .init = &kv_dpm_init,
+ .setup_asic = &kv_dpm_setup_asic,
+ .enable = &kv_dpm_enable,
+ .disable = &kv_dpm_disable,
+ .pre_set_power_state = &kv_dpm_pre_set_power_state,
+ .set_power_state = &kv_dpm_set_power_state,
+ .post_set_power_state = &kv_dpm_post_set_power_state,
+ .display_configuration_changed = &kv_dpm_display_configuration_changed,
+ .fini = &kv_dpm_fini,
+ .get_sclk = &kv_dpm_get_sclk,
+ .get_mclk = &kv_dpm_get_mclk,
+ .print_power_state = &kv_dpm_print_power_state,
+ .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
+ .force_performance_level = &kv_dpm_force_performance_level,
+ .powergate_uvd = &kv_dpm_powergate_uvd,
},
.pflip = {
.pre_page_flip = &evergreen_pre_page_flip,
@@ -2775,19 +2335,188 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->has_uvd = false;
else
rdev->has_uvd = true;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_PITCAIRN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_VERDE:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0 |
+ /*RADEON_PG_SUPPORT_GFX_CG | */
+ RADEON_PG_SUPPORT_SDMA;
+ break;
+ case CHIP_OLAND:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ case CHIP_HAINAN:
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_GFX_RLC_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ break;
+ default:
+ rdev->cg_flags = 0;
+ rdev->pg_flags = 0;
+ break;
+ }
break;
case CHIP_BONAIRE:
rdev->asic = &ci_asic;
rdev->num_crtc = 6;
+ rdev->has_uvd = true;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_MC_LS |
+ RADEON_CG_SUPPORT_MC_MGCG |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
break;
case CHIP_KAVERI:
case CHIP_KABINI:
rdev->asic = &kv_asic;
/* set num crtcs */
- if (rdev->family == CHIP_KAVERI)
+ if (rdev->family == CHIP_KAVERI) {
rdev->num_crtc = 4;
- else
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_GFX_DMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_ACP |
+ RADEON_PG_SUPPORT_SAMU;*/
+ } else {
rdev->num_crtc = 2;
+ rdev->cg_flags =
+ RADEON_CG_SUPPORT_GFX_MGCG |
+ RADEON_CG_SUPPORT_GFX_MGLS |
+ /*RADEON_CG_SUPPORT_GFX_CGCG |*/
+ RADEON_CG_SUPPORT_GFX_CGLS |
+ RADEON_CG_SUPPORT_GFX_CGTS |
+ RADEON_CG_SUPPORT_GFX_CGTS_LS |
+ RADEON_CG_SUPPORT_GFX_CP_LS |
+ RADEON_CG_SUPPORT_SDMA_MGCG |
+ RADEON_CG_SUPPORT_SDMA_LS |
+ RADEON_CG_SUPPORT_BIF_LS |
+ RADEON_CG_SUPPORT_VCE_MGCG |
+ RADEON_CG_SUPPORT_UVD_MGCG |
+ RADEON_CG_SUPPORT_HDP_LS |
+ RADEON_CG_SUPPORT_HDP_MGCG;
+ rdev->pg_flags = 0;
+ /*RADEON_PG_SUPPORT_GFX_CG |
+ RADEON_PG_SUPPORT_GFX_SMG |
+ RADEON_PG_SUPPORT_UVD |
+ RADEON_PG_SUPPORT_VCE |
+ RADEON_PG_SUPPORT_CP |
+ RADEON_PG_SUPPORT_GDS |
+ RADEON_PG_SUPPORT_RLC_SMU_HS |
+ RADEON_PG_SUPPORT_SAMU;*/
+ }
+ rdev->has_uvd = true;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index ca189570990..818bbe6b884 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -336,10 +336,6 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_dma_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
-int r600_uvd_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
-int r600_copy_blit(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages, struct radeon_fence **fence);
int r600_copy_cpdma(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence **fence);
@@ -371,8 +367,6 @@ int r600_count_pipe_bits(uint32_t val);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
int r600_pcie_gart_init(struct radeon_device *rdev);
void r600_scratch_init(struct radeon_device *rdev);
-int r600_blit_init(struct radeon_device *rdev);
-void r600_blit_fini(struct radeon_device *rdev);
int r600_init_microcode(struct radeon_device *rdev);
/* r600 irq */
int r600_irq_process(struct radeon_device *rdev);
@@ -385,28 +379,25 @@ void r600_disable_interrupts(struct radeon_device *rdev);
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
-struct r600_audio r600_audio_status(struct radeon_device *rdev);
+struct r600_audio_pin r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
-/* r600 blit */
-int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
- struct radeon_fence **fence, struct radeon_sa_bo **vb,
- struct radeon_semaphore **sem);
-void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
- struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
-void r600_kms_blit_copy(struct radeon_device *rdev,
- u64 src_gpu_addr, u64 dst_gpu_addr,
- unsigned num_gpu_pages,
- struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
u32 r600_get_xclk(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
int rv6xx_get_temp(struct radeon_device *rdev);
int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
void r600_dpm_post_set_power_state(struct radeon_device *rdev);
+/* r600 dma */
+uint32_t r600_dma_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t r600_dma_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void r600_dma_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
/* rv6xx dpm */
int rv6xx_dpm_init(struct radeon_device *rdev);
int rv6xx_dpm_enable(struct radeon_device *rdev);
@@ -421,6 +412,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev,
struct radeon_ps *ps);
void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
/* rs780 dpm */
int rs780_dpm_init(struct radeon_device *rdev);
int rs780_dpm_enable(struct radeon_device *rdev);
@@ -436,19 +429,6 @@ void rs780_dpm_print_power_state(struct radeon_device *rdev,
void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m);
-/* uvd */
-int r600_uvd_init(struct radeon_device *rdev);
-int r600_uvd_rbc_start(struct radeon_device *rdev);
-void r600_uvd_rbc_stop(struct radeon_device *rdev);
-int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
-void r600_uvd_fence_emit(struct radeon_device *rdev,
- struct radeon_fence *fence);
-void r600_uvd_semaphore_emit(struct radeon_device *rdev,
- struct radeon_ring *ring,
- struct radeon_semaphore *semaphore,
- bool emit_wait);
-void r600_uvd_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-
/*
* rv770,rv730,rv710,rv740
*/
@@ -466,7 +446,6 @@ int rv770_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 rv770_get_xclk(struct radeon_device *rdev);
-int rv770_uvd_resume(struct radeon_device *rdev);
int rv770_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
int rv770_get_temp(struct radeon_device *rdev);
/* rv7xx pm */
@@ -528,7 +507,6 @@ extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_ba
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
-int evergreen_blit_init(struct radeon_device *rdev);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
void evergreen_dma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
@@ -650,6 +628,8 @@ int trinity_dpm_force_performance_level(struct radeon_device *rdev,
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
+int dce6_audio_init(struct radeon_device *rdev);
+void dce6_audio_fini(struct radeon_device *rdev);
/*
* si
@@ -710,7 +690,6 @@ u32 cik_get_xclk(struct radeon_device *rdev);
uint32_t cik_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void cik_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int cik_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
-int cik_uvd_resume(struct radeon_device *rdev);
void cik_sdma_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cik_sdma_semaphore_ring_emit(struct radeon_device *rdev,
@@ -761,5 +740,81 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
void cik_compute_ring_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring);
+int ci_get_temp(struct radeon_device *rdev);
+int kv_get_temp(struct radeon_device *rdev);
+
+int ci_dpm_init(struct radeon_device *rdev);
+int ci_dpm_enable(struct radeon_device *rdev);
+void ci_dpm_disable(struct radeon_device *rdev);
+int ci_dpm_pre_set_power_state(struct radeon_device *rdev);
+int ci_dpm_set_power_state(struct radeon_device *rdev);
+void ci_dpm_post_set_power_state(struct radeon_device *rdev);
+void ci_dpm_setup_asic(struct radeon_device *rdev);
+void ci_dpm_display_configuration_changed(struct radeon_device *rdev);
+void ci_dpm_fini(struct radeon_device *rdev);
+u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void ci_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int ci_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+bool ci_dpm_vblank_too_short(struct radeon_device *rdev);
+void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+int kv_dpm_init(struct radeon_device *rdev);
+int kv_dpm_enable(struct radeon_device *rdev);
+void kv_dpm_disable(struct radeon_device *rdev);
+int kv_dpm_pre_set_power_state(struct radeon_device *rdev);
+int kv_dpm_set_power_state(struct radeon_device *rdev);
+void kv_dpm_post_set_power_state(struct radeon_device *rdev);
+void kv_dpm_setup_asic(struct radeon_device *rdev);
+void kv_dpm_display_configuration_changed(struct radeon_device *rdev);
+void kv_dpm_fini(struct radeon_device *rdev);
+u32 kv_dpm_get_sclk(struct radeon_device *rdev, bool low);
+u32 kv_dpm_get_mclk(struct radeon_device *rdev, bool low);
+void kv_dpm_print_power_state(struct radeon_device *rdev,
+ struct radeon_ps *ps);
+void kv_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
+int kv_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level);
+void kv_dpm_powergate_uvd(struct radeon_device *rdev, bool gate);
+
+/* uvd v1.0 */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring);
+
+int uvd_v1_0_init(struct radeon_device *rdev);
+void uvd_v1_0_fini(struct radeon_device *rdev);
+int uvd_v1_0_start(struct radeon_device *rdev);
+void uvd_v1_0_stop(struct radeon_device *rdev);
+
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring);
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+
+/* uvd v2.2 */
+int uvd_v2_2_resume(struct radeon_device *rdev);
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
+
+/* uvd v3.1 */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+
+/* uvd v4.2 */
+int uvd_v4_2_resume(struct radeon_device *rdev);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index e3f3e884178..404e25d285b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -163,8 +163,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
@@ -172,6 +172,8 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
@@ -195,9 +197,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+ gpio = &i2c_info->asGPIO_Info[0];
for (i = 0; i < num_indices; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
-
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
@@ -206,12 +207,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
+ gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
+ ((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
}
}
}
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
- u8 id)
+ u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
struct radeon_gpio_rec gpio;
@@ -230,8 +233,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+ pin = gpio_info->asGPIO_Pin;
for (i = 0; i < num_indices; i++) {
- pin = &gpio_info->asGPIO_Pin[i];
if (id == pin->ucGPIO_ID) {
gpio.id = pin->ucGPIO_ID;
gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
@@ -239,6 +242,8 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
gpio.valid = true;
break;
}
+ pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
+ ((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
}
}
@@ -711,13 +716,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
(ctx->bios + data_offset +
le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ u8 *num_dst_objs = (u8 *)
+ ((u8 *)router_src_dst_table + 1 +
+ (router_src_dst_table->ucNumberOfSrc * 2));
+ u16 *dst_objs = (u16 *)(num_dst_objs + 1);
int enum_id;
router.router_id = router_obj_id;
- for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
- enum_id++) {
+ for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
if (le16_to_cpu(path->usConnObjectId) ==
- le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ le16_to_cpu(dst_objs[enum_id]))
break;
}
@@ -1480,6 +1488,15 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
uint8_t frev, crev;
int i, num_indices;
+ if (id == ASIC_INTERNAL_MEMORY_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
+ return false;
+ }
+ if (id == ASIC_INTERNAL_ENGINE_SS) {
+ if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
+ return false;
+ }
+
memset(ss, 0, sizeof(struct radeon_atom_ss));
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
@@ -1672,7 +1689,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
kfree(edid);
}
}
- record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
+ record += fake_edid_record->ucFakeEDIDLength ?
+ fake_edid_record->ucFakeEDIDLength + 2 :
+ sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
break;
case LCD_PANEL_RESOLUTION_RECORD_TYPE:
panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
@@ -2237,6 +2256,11 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_CI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_KV;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
@@ -2782,7 +2806,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev,
ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
dividers->enable_dithen = (args.v3.ucCntlFlag &
ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
- dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
+ dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
dividers->ref_div = args.v3.ucRefDiv;
dividers->vco_mode = (args.v3.ucCntlFlag &
@@ -3077,6 +3101,121 @@ int radeon_atom_get_leakage_vddc_based_on_leakage_idx(struct radeon_device *rdev
return radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
}
+int radeon_atom_get_leakage_id_from_vbios(struct radeon_device *rdev,
+ u16 *leakage_id)
+{
+ union set_voltage args;
+ int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
+ u8 frev, crev;
+
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return -EINVAL;
+
+ switch (crev) {
+ case 3:
+ case 4:
+ args.v3.ucVoltageType = 0;
+ args.v3.ucVoltageMode = ATOM_GET_LEAKAGE_ID;
+ args.v3.usVoltageLevel = 0;
+
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ *leakage_id = le16_to_cpu(args.v3.usVoltageLevel);
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int radeon_atom_get_leakage_vddc_based_on_leakage_params(struct radeon_device *rdev,
+ u16 *vddc, u16 *vddci,
+ u16 virtual_voltage_id,
+ u16 vbios_voltage_id)
+{
+ int index = GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo);
+ u8 frev, crev;
+ u16 data_offset, size;
+ int i, j;
+ ATOM_ASIC_PROFILING_INFO_V2_1 *profile;
+ u16 *leakage_bin, *vddc_id_buf, *vddc_buf, *vddci_id_buf, *vddci_buf;
+
+ *vddc = 0;
+ *vddci = 0;
+
+ if (!atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+
+ profile = (ATOM_ASIC_PROFILING_INFO_V2_1 *)
+ (rdev->mode_info.atom_context->bios + data_offset);
+
+ switch (frev) {
+ case 1:
+ return -EINVAL;
+ case 2:
+ switch (crev) {
+ case 1:
+ if (size < sizeof(ATOM_ASIC_PROFILING_INFO_V2_1))
+ return -EINVAL;
+ leakage_bin = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usLeakageBinArrayOffset));
+ vddc_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_IdArrayOffset));
+ vddc_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDC_LevelArrayOffset));
+ vddci_id_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_IdArrayOffset));
+ vddci_buf = (u16 *)
+ (rdev->mode_info.atom_context->bios + data_offset +
+ le16_to_cpu(profile->usElbVDDCI_LevelArrayOffset));
+
+ if (profile->ucElbVDDC_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDC_Num; i++) {
+ if (vddc_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddc = vddc_buf[j * profile->ucElbVDDC_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ if (profile->ucElbVDDCI_Num > 0) {
+ for (i = 0; i < profile->ucElbVDDCI_Num; i++) {
+ if (vddci_id_buf[i] == virtual_voltage_id) {
+ for (j = 0; j < profile->ucLeakageBinNum; j++) {
+ if (vbios_voltage_id <= leakage_bin[j]) {
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int radeon_atom_get_voltage_gpio_settings(struct radeon_device *rdev,
u16 voltage_level, u8 voltage_type,
u32 *gpio_value, u32 *gpio_mask)
@@ -3279,10 +3418,11 @@ int radeon_atom_get_max_voltage(struct radeon_device *rdev,
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
if (formula->ucNumOfVoltageEntries) {
+ VOLTAGE_LUT_ENTRY *lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)&formula->asVIDAdjustEntries[0] +
+ (sizeof(VOLTAGE_LUT_ENTRY) * (formula->ucNumOfVoltageEntries - 1)));
*max_voltage =
- le16_to_cpu(formula->asVIDAdjustEntries[
- formula->ucNumOfVoltageEntries - 1
- ].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
return 0;
}
}
@@ -3442,11 +3582,13 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_VOLTAGE_FORMULA_V2 *formula =
&voltage_object->v2.asFormula;
+ VOLTAGE_LUT_ENTRY *lut;
if (formula->ucNumOfVoltageEntries > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &formula->asVIDAdjustEntries[0];
for (i = 0; i < formula->ucNumOfVoltageEntries; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(formula->asVIDAdjustEntries[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
ret = radeon_atom_get_voltage_gpio_settings(rdev,
voltage_table->entries[i].value,
voltage_type,
@@ -3454,6 +3596,8 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
&voltage_table->mask_low);
if (ret)
return ret;
+ lut = (VOLTAGE_LUT_ENTRY *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY));
}
voltage_table->count = formula->ucNumOfVoltageEntries;
return 0;
@@ -3473,13 +3617,17 @@ int radeon_atom_get_voltage_table(struct radeon_device *rdev,
if (voltage_object) {
ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
&voltage_object->v3.asGpioVoltageObj;
+ VOLTAGE_LUT_ENTRY_V2 *lut;
if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
return -EINVAL;
+ lut = &gpio->asVolGpioLut[0];
for (i = 0; i < gpio->ucGpioEntryNum; i++) {
voltage_table->entries[i].value =
- le16_to_cpu(gpio->asVolGpioLut[i].usVoltageValue);
+ le16_to_cpu(lut->usVoltageValue);
voltage_table->entries[i].smio_low =
- le32_to_cpu(gpio->asVolGpioLut[i].ulVoltageId);
+ le32_to_cpu(lut->ulVoltageId);
+ lut = (VOLTAGE_LUT_ENTRY_V2 *)
+ ((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
}
voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
voltage_table->count = gpio->ucGpioEntryNum;
@@ -3605,7 +3753,6 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
union vram_info *vram_info;
u32 mem_timing_size = gddr5 ?
sizeof(ATOM_MEMORY_TIMING_FORMAT_V2) : sizeof(ATOM_MEMORY_TIMING_FORMAT);
- u8 *p;
memset(mclk_range_table, 0, sizeof(struct atom_memory_clock_range_table));
@@ -3624,6 +3771,7 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
if (module_index < vram_info->v1_4.ucNumOfVRAMModule) {
ATOM_VRAM_MODULE_V4 *vram_module =
(ATOM_VRAM_MODULE_V4 *)vram_info->v1_4.aVramInfo;
+ ATOM_MEMORY_TIMING_FORMAT *format;
for (i = 0; i < module_index; i++) {
if (le16_to_cpu(vram_module->usModuleSize) == 0)
@@ -3634,11 +3782,11 @@ int radeon_atom_get_mclk_range_table(struct radeon_device *rdev,
mclk_range_table->num_entries = (u8)
((le16_to_cpu(vram_module->usModuleSize) - offsetof(ATOM_VRAM_MODULE_V4, asMemTiming)) /
mem_timing_size);
- p = (u8 *)&vram_module->asMemTiming[0];
+ format = &vram_module->asMemTiming[0];
for (i = 0; i < mclk_range_table->num_entries; i++) {
- ATOM_MEMORY_TIMING_FORMAT *format = (ATOM_MEMORY_TIMING_FORMAT *)p;
mclk_range_table->mclk[i] = le32_to_cpu(format->ulClkRange);
- p += mem_timing_size;
+ format = (ATOM_MEMORY_TIMING_FORMAT *)
+ ((u8 *)format + mem_timing_size);
}
} else
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_blit_common.h b/drivers/gpu/drm/radeon/radeon_blit_common.h
deleted file mode 100644
index 4ecbe72c9d2..00000000000
--- a/drivers/gpu/drm/radeon/radeon_blit_common.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright 2009 Advanced Micro Devices, Inc.
- * Copyright 2009 Red Hat Inc.
- * Copyright 2012 Alcatel-Lucent, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- */
-
-#ifndef __RADEON_BLIT_COMMON_H__
-
-#define DI_PT_RECTLIST 0x11
-#define DI_INDEX_SIZE_16_BIT 0x0
-#define DI_SRC_SEL_AUTO_INDEX 0x2
-
-#define FMT_8 0x1
-#define FMT_5_6_5 0x8
-#define FMT_8_8_8_8 0x1a
-#define COLOR_8 0x1
-#define COLOR_5_6_5 0x8
-#define COLOR_8_8_8_8 0x1a
-
-#define RECT_UNIT_H 32
-#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
-
-#define __RADEON_BLIT_COMMON_H__
-#endif
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 78edadc9e86..68ce3605601 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
enum radeon_combios_table_offset table)
{
struct radeon_device *rdev = dev->dev_private;
- int rev;
+ int rev, size;
uint16_t offset = 0, check_offset;
if (!rdev->bios)
@@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0xc);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0xc;
break;
case COMBIOS_BIOS_SUPPORT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x14);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x14;
break;
case COMBIOS_DAC_PROGRAMMING_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2a;
break;
case COMBIOS_MAX_COLOR_DEPTH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2c;
break;
case COMBIOS_CRTC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x2e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x2e;
break;
case COMBIOS_PLL_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x30);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x30;
break;
case COMBIOS_TV_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x32);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x32;
break;
case COMBIOS_DFP_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x34);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x34;
break;
case COMBIOS_HW_CONFIG_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x36);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x36;
break;
case COMBIOS_MULTIMEDIA_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x38);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x38;
break;
case COMBIOS_TV_STD_PATCH_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x3e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x3e;
break;
case COMBIOS_LCD_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x40);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x40;
break;
case COMBIOS_MOBILE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x42);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x42;
break;
case COMBIOS_PLL_INIT_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x46);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x46;
break;
case COMBIOS_MEM_CONFIG_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x48);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x48;
break;
case COMBIOS_SAVE_MASK_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4a;
break;
case COMBIOS_HARDCODED_EDID_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4c;
break;
case COMBIOS_ASIC_INIT_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x4e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x4e;
break;
case COMBIOS_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x50);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x50;
break;
case COMBIOS_DYN_CLK_1_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x52);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x52;
break;
case COMBIOS_RESERVED_MEM_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x54);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x54;
break;
case COMBIOS_EXT_TMDS_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x58);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x58;
break;
case COMBIOS_MEM_CLK_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5a;
break;
case COMBIOS_EXT_DAC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5c;
break;
case COMBIOS_MISC_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x5e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x5e;
break;
case COMBIOS_CRT_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x60);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x60;
break;
case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x62);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x62;
break;
case COMBIOS_COMPONENT_VIDEO_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x64);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x64;
break;
case COMBIOS_FAN_SPEED_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x66);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x66;
break;
case COMBIOS_OVERDRIVE_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x68);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x68;
break;
case COMBIOS_OEM_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6a);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6a;
break;
case COMBIOS_DYN_CLK_2_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6c);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6c;
break;
case COMBIOS_POWER_CONNECTOR_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x6e);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x6e;
break;
case COMBIOS_I2C_INFO_TABLE:
- check_offset = RBIOS16(rdev->bios_header_start + 0x70);
- if (check_offset)
- offset = check_offset;
+ check_offset = 0x70;
break;
/* relative offset tables */
case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */
@@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
break;
default:
+ check_offset = 0;
break;
}
- return offset;
+ size = RBIOS8(rdev->bios_header_start + 0x6);
+ /* check absolute offset tables */
+ if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size)
+ offset = RBIOS16(rdev->bios_header_start + check_offset);
+ return offset;
}
bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
@@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
dac = RBIOS8(dac_info + 0x3) & 0xf;
p_dac->ps2_pdac_adj = (bg << 8) | (dac);
}
- /* if the values are all zeros, use the table */
- if (p_dac->ps2_pdac_adj)
+ /* if the values are zeros, use the table */
+ if ((dac == 0) || (bg == 0))
+ found = 0;
+ else
found = 1;
}
/* quirks */
+ /* Radeon 7000 (RV100) */
+ if (((dev->pdev->device == 0x5159) &&
+ (dev->pdev->subsystem_vendor == 0x174B) &&
+ (dev->pdev->subsystem_device == 0x7c28)) ||
/* Radeon 9100 (R200) */
- if ((dev->pdev->device == 0x514D) &&
+ ((dev->pdev->device == 0x514D) &&
(dev->pdev->subsystem_vendor == 0x174B) &&
- (dev->pdev->subsystem_device == 0x7149)) {
+ (dev->pdev->subsystem_device == 0x7149))) {
/* vbios value is bad, use the default */
found = 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index efc4f6441ef..3cae2bbc185 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1444,13 +1444,13 @@ static int radeon_do_init_cp(struct drm_device *dev, drm_radeon_init_t *init,
dev_priv->ring.end = ((u32 *) dev_priv->cp_ring->handle
+ init->ring_size / sizeof(u32));
dev_priv->ring.size = init->ring_size;
- dev_priv->ring.size_l2qw = drm_order(init->ring_size / 8);
+ dev_priv->ring.size_l2qw = order_base_2(init->ring_size / 8);
dev_priv->ring.rptr_update = /* init->rptr_update */ 4096;
- dev_priv->ring.rptr_update_l2qw = drm_order( /* init->rptr_update */ 4096 / 8);
+ dev_priv->ring.rptr_update_l2qw = order_base_2( /* init->rptr_update */ 4096 / 8);
dev_priv->ring.fetch_size = /* init->fetch_size */ 32;
- dev_priv->ring.fetch_size_l2ow = drm_order( /* init->fetch_size */ 32 / 16);
+ dev_priv->ring.fetch_size_l2ow = order_base_2( /* init->fetch_size */ 32 / 16);
dev_priv->ring.tail_mask = (dev_priv->ring.size / sizeof(u32)) - 1;
dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 13a130fb351..a5608441037 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -268,7 +268,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
/* we only support VM on some SI+ rings */
- if ((p->rdev->asic->ring[p->ring].cs_parse == NULL) &&
+ if ((p->rdev->asic->ring[p->ring]->cs_parse == NULL) &&
((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
DRM_ERROR("Ring %d requires VM!\n", p->ring);
return -EINVAL;
@@ -383,6 +383,10 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
DRM_ERROR("Invalid command stream !\n");
return r;
}
+
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
radeon_cs_sync_rings(parser);
r = radeon_ib_schedule(rdev, &parser->ib, NULL);
if (r) {
@@ -474,6 +478,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
return r;
}
+ if (parser->ring == R600_RING_TYPE_UVD_INDEX)
+ radeon_uvd_note_usage(rdev);
+
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
r = radeon_vm_alloc_pt(rdev, vm);
@@ -552,10 +559,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
}
- /* XXX pick SD/HD/MVC */
- if (parser.ring == R600_RING_TYPE_UVD_INDEX)
- radeon_uvd_note_usage(rdev);
-
r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
goto out;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 82335e38ec4..16cb8792b1e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1003,16 +1003,28 @@ static void radeon_check_arguments(struct radeon_device *rdev)
radeon_vram_limit = 0;
}
+ if (radeon_gart_size == -1) {
+ /* default to a larger gart size on newer asics */
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
+ }
/* gtt size must be power of two and greater or equal to 32M */
if (radeon_gart_size < 32) {
- dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
+ dev_warn(rdev->dev, "gart size (%d) too small\n",
radeon_gart_size);
- radeon_gart_size = 512;
-
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
- radeon_gart_size = 512;
+ if (rdev->family >= CHIP_RV770)
+ radeon_gart_size = 1024;
+ else
+ radeon_gart_size = 512;
}
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
@@ -1144,7 +1156,7 @@ int radeon_device_init(struct radeon_device *rdev,
rdev->family = flags & RADEON_FAMILY_MASK;
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+ rdev->mc.gtt_size = 512 * 1024 * 1024;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1163,6 +1175,7 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->gpu_clock_mutex);
+ mutex_init(&rdev->srbm_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
@@ -1269,7 +1282,7 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
- vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
+ vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
r = radeon_init(rdev);
if (r)
@@ -1519,6 +1532,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
+ radeon_pm_suspend(rdev);
radeon_suspend(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -1564,6 +1578,7 @@ retry:
}
}
+ radeon_pm_resume(rdev);
drm_helper_resume_force_mode(rdev->ddev);
ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index c2b67b4e1ac..b055bddaa94 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -345,7 +345,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
static int radeon_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -1254,41 +1255,41 @@ static void radeon_afmt_init(struct radeon_device *rdev)
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
- if (ASIC_IS_DCE6(rdev)) {
- /* todo */
+ if (ASIC_IS_NODCE(rdev)) {
+ /* nothing to do */
} else if (ASIC_IS_DCE4(rdev)) {
+ static uint32_t eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ 0x13830 - 0x7030,
+ };
+ int num_afmt;
+
+ /* DCE8 has 7 audio blocks tied to DIG encoders */
+ /* DCE6 has 6 audio blocks tied to DIG encoders */
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
- rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[0]) {
- rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
- rdev->mode_info.afmt[0]->id = 0;
- }
- rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[1]) {
- rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
- rdev->mode_info.afmt[1]->id = 1;
- }
- if (!ASIC_IS_DCE41(rdev)) {
- rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[2]) {
- rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
- rdev->mode_info.afmt[2]->id = 2;
- }
- rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[3]) {
- rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
- rdev->mode_info.afmt[3]->id = 3;
- }
- rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[4]) {
- rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
- rdev->mode_info.afmt[4]->id = 4;
- }
- rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
- if (rdev->mode_info.afmt[5]) {
- rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
- rdev->mode_info.afmt[5]->id = 5;
+ if (ASIC_IS_DCE8(rdev))
+ num_afmt = 7;
+ else if (ASIC_IS_DCE6(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE5(rdev))
+ num_afmt = 6;
+ else if (ASIC_IS_DCE41(rdev))
+ num_afmt = 2;
+ else /* DCE4 */
+ num_afmt = 6;
+
+ BUG_ON(num_afmt > ARRAY_SIZE(eg_offsets));
+ for (i = 0; i < num_afmt; i++) {
+ rdev->mode_info.afmt[i] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
+ if (rdev->mode_info.afmt[i]) {
+ rdev->mode_info.afmt[i]->offset = eg_offsets[i];
+ rdev->mode_info.afmt[i]->id = i;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 29876b1be8e..cb4445f55a9 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -81,7 +81,6 @@
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
-int radeon_driver_firstopen_kms(struct drm_device *dev);
void radeon_driver_lastclose_kms(struct drm_device *dev);
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
void radeon_driver_postclose_kms(struct drm_device *dev,
@@ -101,8 +100,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
int radeon_gem_object_open(struct drm_gem_object *obj,
@@ -111,7 +108,7 @@ void radeon_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
-extern struct drm_ioctl_desc radeon_ioctls_kms[];
+extern const struct drm_ioctl_desc radeon_ioctls_kms[];
extern int radeon_max_kms_ioctl;
int radeon_mmap(struct file *filp, struct vm_area_struct *vma);
int radeon_mode_dumb_mmap(struct drm_file *filp,
@@ -120,9 +117,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle);
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
size_t size,
@@ -154,7 +148,7 @@ int radeon_dynclks = -1;
int radeon_r4xx_atom = 0;
int radeon_agpmode = 0;
int radeon_vram_limit = 0;
-int radeon_gart_size = 512; /* default gart size */
+int radeon_gart_size = -1; /* auto */
int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
@@ -187,7 +181,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc., -1 = auto)");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -272,7 +266,6 @@ static const struct file_operations radeon_driver_old_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_compat_ioctl,
@@ -282,7 +275,7 @@ static const struct file_operations radeon_driver_old_fops = {
static struct drm_driver driver_old = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
+ DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
@@ -381,7 +374,6 @@ static const struct file_operations radeon_driver_kms_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = radeon_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = radeon_kms_compat_ioctl,
@@ -390,12 +382,11 @@ static const struct file_operations radeon_driver_kms_fops = {
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM |
- DRIVER_PRIME,
+ DRIVER_USE_AGP |
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_PRIME | DRIVER_RENDER,
.dev_priv_size = 0,
.load = radeon_driver_load_kms,
- .firstopen = radeon_driver_firstopen_kms,
.open = radeon_driver_open_kms,
.preclose = radeon_driver_preclose_kms,
.postclose = radeon_driver_postclose_kms,
@@ -421,10 +412,9 @@ static struct drm_driver kms_driver = {
.gem_free_object = radeon_gem_object_free,
.gem_open_object = radeon_gem_object_open,
.gem_close_object = radeon_gem_object_close,
- .dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
- .dumb_destroy = radeon_mode_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &radeon_driver_kms_fops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 7ddb0efe240..ddb8f8e04eb 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
} else {
/* put fence directly behind firmware */
- index = ALIGN(rdev->uvd.fw_size, 8);
+ index = ALIGN(rdev->uvd_fw->size, 8);
rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index d9d31a38327..b990b1a2bd5 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev)
if (rdev->gart.robj == NULL) {
return;
}
- radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
@@ -466,7 +465,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
- RADEON_VM_PTB_ALIGN(size),
+ RADEON_GPU_PAGE_ALIGN(size),
RADEON_VM_PTB_ALIGN_SIZE,
RADEON_GEM_DOMAIN_VRAM);
if (r) {
@@ -621,7 +620,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
}
retry:
- pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev));
+ pd_size = radeon_vm_directory_size(rdev);
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_VM_PTB_ALIGN_SIZE, false);
@@ -953,8 +952,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev,
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
- RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8),
- RADEON_VM_PTB_ALIGN_SIZE, false);
+ RADEON_VM_PTE_COUNT * 8,
+ RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa796031ab6..dce99c8a583 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -570,13 +570,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return 0;
}
-int radeon_mode_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file_priv, handle);
-}
-
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_gem_info(struct seq_file *m, void *data)
{
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 081886b0642..cc9e8482cf3 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
dev_info(rdev->dev, "radeon: using MSI.\n");
}
}
+
+ INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+ INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
+ INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
+
rdev->irq.installed = true;
r = drm_irq_install(rdev->ddev);
if (r) {
rdev->irq.installed = false;
+ flush_work(&rdev->hotplug_work);
return r;
}
- INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
- INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
- INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
-
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 49ff3d1a610..61580ddc4eb 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_SI_CP_DMA_COMPUTE:
+ *value = 1;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -449,19 +452,6 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
* Outdated mess for old drm with Xorg being in charge (void function now).
*/
/**
- * radeon_driver_firstopen_kms - drm callback for first open
- *
- * @dev: drm dev pointer
- *
- * Nothing to be done for KMS (all asics).
- * Returns 0 on success.
- */
-int radeon_driver_firstopen_kms(struct drm_device *dev)
-{
- return 0;
-}
-
-/**
* radeon_driver_firstopen_kms - drm callback for last close
*
* @dev: drm dev pointer
@@ -683,16 +673,6 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
drmcrtc);
}
-/*
- * IOCTL.
- */
-int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- /* Not valid in KMS. */
- return -EINVAL;
-}
-
#define KMS_INVALID_IOCTL(name) \
int name(struct drm_device *dev, void *data, struct drm_file *file_priv)\
{ \
@@ -732,7 +712,7 @@ KMS_INVALID_IOCTL(radeon_surface_alloc_kms)
KMS_INVALID_IOCTL(radeon_surface_free_kms)
-struct drm_ioctl_desc radeon_ioctls_kms[] = {
+const struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_CP_INIT, radeon_cp_init_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_START, radeon_cp_start_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(RADEON_CP_STOP, radeon_cp_stop_kms, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -761,18 +741,18 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
DRM_IOCTL_DEF_DRV(RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
- DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 8296632a423..d908d8d68f6 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -225,6 +225,7 @@ struct radeon_afmt {
int offset;
bool last_buffer_filled_status;
int id;
+ struct r600_audio_pin *pin;
};
struct radeon_mode_info {
@@ -233,7 +234,7 @@ struct radeon_mode_info {
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
- struct radeon_afmt *afmt[6];
+ struct radeon_afmt *afmt[7];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 2020bf4a383..c0fa4aa9cee 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -142,7 +142,6 @@ int radeon_bo_create(struct radeon_device *rdev,
return r;
}
bo->rdev = rdev;
- bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
INIT_LIST_HEAD(&bo->va);
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 49c82c48001..209b1115026 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -113,13 +113,10 @@ static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
* @bo: radeon object for which we query the offset
*
* Returns mmap offset of the object.
- *
- * Note: addr_space_offset is constant after ttm bo init thus isn't protected
- * by any lock.
*/
static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
{
- return bo->tbo.addr_space_offset;
+ return drm_vma_node_offset_addr(&bo->tbo.vma_node);
}
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index f374c467aac..d7555369a3e 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -569,6 +569,8 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
case THERMAL_TYPE_NI:
case THERMAL_TYPE_SUMO:
case THERMAL_TYPE_SI:
+ case THERMAL_TYPE_CI:
+ case THERMAL_TYPE_KV:
if (rdev->asic->pm.get_temperature == NULL)
return err;
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
@@ -624,7 +626,15 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work)
/* switch back the user state */
dpm_state = rdev->pm.dpm.user_state;
}
- radeon_dpm_enable_power_state(rdev, dpm_state);
+ mutex_lock(&rdev->pm.mutex);
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ rdev->pm.dpm.thermal_active = true;
+ else
+ rdev->pm.dpm.thermal_active = false;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+
+ radeon_pm_compute_clocks(rdev);
}
static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev,
@@ -687,7 +697,10 @@ restart_search:
break;
/* internal states */
case POWER_STATE_TYPE_INTERNAL_UVD:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps)
+ return rdev->pm.dpm.uvd_ps;
+ else
+ break;
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
return ps;
@@ -729,10 +742,17 @@ restart_search:
/* use a fallback state if we didn't match */
switch (dpm_state) {
case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
case POWER_STATE_TYPE_INTERNAL_UVD_HD:
case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- return rdev->pm.dpm.uvd_ps;
+ if (rdev->pm.dpm.uvd_ps) {
+ return rdev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
case POWER_STATE_TYPE_INTERNAL_THERMAL:
dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
goto restart_search;
@@ -850,38 +870,51 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev)
radeon_dpm_post_set_power_state(rdev);
+ /* force low perf level for thermal */
+ if (rdev->pm.dpm.thermal_active &&
+ rdev->asic->dpm.force_performance_level) {
+ radeon_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_LOW);
+ }
+
done:
mutex_unlock(&rdev->ring_lock);
up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
-void radeon_dpm_enable_power_state(struct radeon_device *rdev,
- enum radeon_pm_state_type dpm_state)
+void radeon_dpm_enable_uvd(struct radeon_device *rdev, bool enable)
{
- if (!rdev->pm.dpm_enabled)
- return;
+ enum radeon_pm_state_type dpm_state;
- mutex_lock(&rdev->pm.mutex);
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- rdev->pm.dpm.thermal_active = true;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD:
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- rdev->pm.dpm.uvd_active = true;
- break;
- default:
- rdev->pm.dpm.thermal_active = false;
- rdev->pm.dpm.uvd_active = false;
- break;
+ if (rdev->asic->dpm.powergate_uvd) {
+ mutex_lock(&rdev->pm.mutex);
+ /* enable/disable UVD */
+ radeon_dpm_powergate_uvd(rdev, !enable);
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ if (enable) {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = true;
+ if ((rdev->pm.dpm.sd == 1) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_SD;
+ else if ((rdev->pm.dpm.sd == 2) && (rdev->pm.dpm.hd == 0))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 1))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ else if ((rdev->pm.dpm.sd == 0) && (rdev->pm.dpm.hd == 2))
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD2;
+ else
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD;
+ rdev->pm.dpm.state = dpm_state;
+ mutex_unlock(&rdev->pm.mutex);
+ } else {
+ mutex_lock(&rdev->pm.mutex);
+ rdev->pm.dpm.uvd_active = false;
+ mutex_unlock(&rdev->pm.mutex);
+ }
+
+ radeon_pm_compute_clocks(rdev);
}
- rdev->pm.dpm.state = dpm_state;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
}
static void radeon_pm_suspend_old(struct radeon_device *rdev)
@@ -1176,7 +1209,17 @@ int radeon_pm_init(struct radeon_device *rdev)
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
- if (radeon_dpm == 1)
+ case CHIP_BONAIRE:
+ case CHIP_KABINI:
+ case CHIP_KAVERI:
+ /* DPM requires the RLC, RV770+ dGPU requires SMC */
+ if (!rdev->rlc_fw)
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if ((rdev->family >= CHIP_RV770) &&
+ (!(rdev->flags & RADEON_IS_IGP)) &&
+ (!rdev->smc_fw))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (radeon_dpm == 1)
rdev->pm.pm_method = PM_METHOD_DPM;
else
rdev->pm.pm_method = PM_METHOD_PROFILE;
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 65b9eabd5a2..20074560fc2 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -68,7 +68,6 @@ struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev,
RADEON_GEM_DOMAIN_GTT, sg, &bo);
if (ret)
return ERR_PTR(ret);
- bo->gem_base.driver_private = bo;
mutex_lock(&rdev->gem.mutex);
list_add_tail(&bo->list, &rdev->gem.objects);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index fb5ea620897..46a25f037b8 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -363,11 +363,10 @@ u32 radeon_ring_generic_get_rptr(struct radeon_device *rdev,
{
u32 rptr;
- if (rdev->wb.enabled && ring != &rdev->ring[R600_RING_TYPE_UVD_INDEX])
+ if (rdev->wb.enabled)
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rptr = RREG32(ring->rptr_reg);
- rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return rptr;
}
@@ -378,7 +377,6 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
u32 wptr;
wptr = RREG32(ring->wptr_reg);
- wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
return wptr;
}
@@ -386,7 +384,7 @@ u32 radeon_ring_generic_get_wptr(struct radeon_device *rdev,
void radeon_ring_generic_set_wptr(struct radeon_device *rdev,
struct radeon_ring *ring)
{
- WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ WREG32(ring->wptr_reg, ring->wptr);
(void)RREG32(ring->wptr_reg);
}
@@ -719,16 +717,13 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
- * @ptr_reg_shift: bit offset of the rptr/wptr values
- * @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
- unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
- u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, u32 nop)
{
int r;
@@ -736,8 +731,6 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
- ring->ptr_reg_shift = ptr_reg_shift;
- ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (ring->ring_obj == NULL) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 6c0ce8915fa..71245d6f34a 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -203,7 +203,9 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+
+ return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
}
static void radeon_move_null(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/radeon_ucode.h b/drivers/gpu/drm/radeon/radeon_ucode.h
index d8b05f7bcf1..33858364fe8 100644
--- a/drivers/gpu/drm/radeon/radeon_ucode.h
+++ b/drivers/gpu/drm/radeon/radeon_ucode.h
@@ -35,6 +35,12 @@
#define SI_PFP_UCODE_SIZE 2144
#define SI_PM4_UCODE_SIZE 2144
#define SI_CE_UCODE_SIZE 2144
+#define CIK_PFP_UCODE_SIZE 2144
+#define CIK_ME_UCODE_SIZE 2144
+#define CIK_CE_UCODE_SIZE 2144
+
+/* MEC */
+#define CIK_MEC_UCODE_SIZE 4192
/* RLC */
#define R600_RLC_UCODE_SIZE 768
@@ -43,12 +49,20 @@
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
#define SI_RLC_UCODE_SIZE 2048
+#define BONAIRE_RLC_UCODE_SIZE 2048
+#define KB_RLC_UCODE_SIZE 2560
+#define KV_RLC_UCODE_SIZE 2560
/* MC */
#define BTC_MC_UCODE_SIZE 6024
#define CAYMAN_MC_UCODE_SIZE 6037
#define SI_MC_UCODE_SIZE 7769
#define OLAND_MC_UCODE_SIZE 7863
+#define CIK_MC_UCODE_SIZE 7866
+
+/* SDMA */
+#define CIK_SDMA_UCODE_SIZE 1050
+#define CIK_SDMA_UCODE_VERSION 64
/* SMC */
#define RV770_SMC_UCODE_START 0x0100
@@ -126,4 +140,7 @@
#define HAINAN_SMC_UCODE_START 0x10000
#define HAINAN_SMC_UCODE_SIZE 0xe67C
+#define BONAIRE_SMC_UCODE_START 0x20000
+#define BONAIRE_SMC_UCODE_SIZE 0x1FDEC
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 414fd145d20..1a01bbff9bf 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work);
int radeon_uvd_init(struct radeon_device *rdev)
{
- const struct firmware *fw;
unsigned long bo_size;
const char *fw_name;
int i, r;
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev)
return -EINVAL;
}
- r = request_firmware(&fw, fw_name, rdev->dev);
+ r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
if (r) {
dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
fw_name);
return r;
}
- bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) +
+ bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE;
r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo);
@@ -145,15 +144,10 @@ int radeon_uvd_init(struct radeon_device *rdev)
radeon_bo_unreserve(rdev->uvd.vcpu_bo);
- rdev->uvd.fw_size = fw->size;
- memset(rdev->uvd.cpu_addr, 0, bo_size);
- memcpy(rdev->uvd.cpu_addr, fw->data, fw->size);
-
- release_firmware(fw);
-
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
atomic_set(&rdev->uvd.handles[i], 0);
rdev->uvd.filp[i] = NULL;
+ rdev->uvd.img_size[i] = 0;
}
return 0;
@@ -174,33 +168,60 @@ void radeon_uvd_fini(struct radeon_device *rdev)
}
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+
+ release_firmware(rdev->uvd_fw);
}
int radeon_uvd_suspend(struct radeon_device *rdev)
{
unsigned size;
+ void *ptr;
+ int i;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
+ if (atomic_read(&rdev->uvd.handles[i]))
+ break;
+
+ if (i == RADEON_MAX_UVD_HANDLES)
+ return 0;
+
size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size);
+ memcpy(rdev->uvd.saved_bo, ptr, size);
return 0;
}
int radeon_uvd_resume(struct radeon_device *rdev)
{
+ unsigned size;
+ void *ptr;
+
if (rdev->uvd.vcpu_bo == NULL)
return -EINVAL;
+ memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
+
+ size = radeon_bo_size(rdev->uvd.vcpu_bo);
+ size -= rdev->uvd_fw->size;
+
+ ptr = rdev->uvd.cpu_addr;
+ ptr += rdev->uvd_fw->size;
+
if (rdev->uvd.saved_bo != NULL) {
- unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo);
- memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size);
+ memcpy(ptr, rdev->uvd.saved_bo, size);
kfree(rdev->uvd.saved_bo);
rdev->uvd.saved_bo = NULL;
- }
+ } else
+ memset(ptr, 0, size);
return 0;
}
@@ -215,8 +236,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
{
int i, r;
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (rdev->uvd.filp[i] == filp) {
- uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0 && rdev->uvd.filp[i] == filp) {
struct radeon_fence *fence;
r = radeon_uvd_get_destroy_msg(rdev,
@@ -327,6 +348,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[])
{
int32_t *msg, msg_type, handle;
+ unsigned img_size = 0;
void *ptr;
int i, r;
@@ -336,9 +358,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
+ if (bo->tbo.sync_obj) {
+ r = radeon_fence_wait(bo->tbo.sync_obj, false);
+ if (r) {
+ DRM_ERROR("Failed waiting for UVD message (%d)!\n", r);
+ return r;
+ }
+ }
+
r = radeon_bo_kmap(bo, &ptr);
- if (r)
+ if (r) {
+ DRM_ERROR("Failed mapping the UVD message (%d)!\n", r);
return r;
+ }
msg = ptr + offset;
@@ -353,6 +385,8 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
if (msg_type == 1) {
/* it's a decode msg, calc buffer sizes */
r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ /* calc image size (width * height) */
+ img_size = msg[6] * msg[7];
radeon_bo_kunmap(bo);
if (r)
return r;
@@ -364,8 +398,16 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
radeon_bo_kunmap(bo);
return 0;
} else {
- /* it's a create msg, no special handling needed */
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
radeon_bo_kunmap(bo);
+
+ if (msg_type != 0) {
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
+ }
+
+ /* it's a create msg, no special handling needed */
}
/* create or decode, validate the handle */
@@ -378,6 +420,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
return 0;
}
}
@@ -388,7 +431,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
int data0, int data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[], bool *has_msg_cmd)
{
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_reloc *reloc;
@@ -417,7 +460,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
if (cmd < 0x4) {
if ((end - start) < buf_sizes[cmd]) {
- DRM_ERROR("buffer to small (%d / %d)!\n",
+ DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
(unsigned)(end - start), buf_sizes[cmd]);
return -EINVAL;
}
@@ -442,9 +485,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
}
if (cmd == 0) {
+ if (*has_msg_cmd) {
+ DRM_ERROR("More than one message in a UVD-IB!\n");
+ return -EINVAL;
+ }
+ *has_msg_cmd = true;
r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes);
if (r)
return r;
+ } else if (!*has_msg_cmd) {
+ DRM_ERROR("Message needed before other commands are send!\n");
+ return -EINVAL;
}
return 0;
@@ -453,7 +504,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int *data0, int *data1,
- unsigned buf_sizes[])
+ unsigned buf_sizes[],
+ bool *has_msg_cmd)
{
int i, r;
@@ -467,7 +519,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p,
*data1 = p->idx;
break;
case UVD_GPCOM_VCPU_CMD:
- r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes);
+ r = radeon_uvd_cs_reloc(p, *data0, *data1,
+ buf_sizes, has_msg_cmd);
if (r)
return r;
break;
@@ -488,6 +541,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
struct radeon_cs_packet pkt;
int r, data0 = 0, data1 = 0;
+ /* does the IB has a msg command */
+ bool has_msg_cmd = false;
+
/* minimum buffer sizes */
unsigned buf_sizes[] = {
[0x00000000] = 2048,
@@ -514,8 +570,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return r;
switch (pkt.type) {
case RADEON_PACKET_TYPE0:
- r = radeon_uvd_cs_reg(p, &pkt, &data0,
- &data1, buf_sizes);
+ r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1,
+ buf_sizes, &has_msg_cmd);
if (r)
return r;
break;
@@ -527,6 +583,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
return -EINVAL;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
+
+ if (!has_msg_cmd) {
+ DRM_ERROR("UVD-IBs need a msg command!\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -678,6 +740,34 @@ int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
return radeon_uvd_send_msg(rdev, ring, bo, fence);
}
+/**
+ * radeon_uvd_count_handles - count number of open streams
+ *
+ * @rdev: radeon_device pointer
+ * @sd: number of SD streams
+ * @hd: number of HD streams
+ *
+ * Count the number of open SD/HD streams as a hint for power mangement
+ */
+static void radeon_uvd_count_handles(struct radeon_device *rdev,
+ unsigned *sd, unsigned *hd)
+{
+ unsigned i;
+
+ *sd = 0;
+ *hd = 0;
+
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (!atomic_read(&rdev->uvd.handles[i]))
+ continue;
+
+ if (rdev->uvd.img_size[i] >= 720*576)
+ ++(*hd);
+ else
+ ++(*sd);
+ }
+}
+
static void radeon_uvd_idle_work_handler(struct work_struct *work)
{
struct radeon_device *rdev =
@@ -685,10 +775,7 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- mutex_lock(&rdev->pm.mutex);
- rdev->pm.dpm.uvd_active = false;
- mutex_unlock(&rdev->pm.mutex);
- radeon_pm_compute_clocks(rdev);
+ radeon_dpm_enable_uvd(rdev, false);
} else {
radeon_set_uvd_clocks(rdev, 0, 0);
}
@@ -700,13 +787,25 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work)
void radeon_uvd_note_usage(struct radeon_device *rdev)
{
+ bool streams_changed = false;
bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
- if (set_clocks) {
+
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
+ unsigned hd = 0, sd = 0;
+ radeon_uvd_count_handles(rdev, &sd, &hd);
+ if ((rdev->pm.dpm.sd != sd) ||
+ (rdev->pm.dpm.hd != hd)) {
+ rdev->pm.dpm.sd = sd;
+ rdev->pm.dpm.hd = hd;
+ streams_changed = true;
+ }
+ }
+
+ if (set_clocks || streams_changed) {
if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
- /* XXX pick SD/HD/MVC */
- radeon_dpm_enable_power_state(rdev, POWER_STATE_TYPE_INTERNAL_UVD);
+ radeon_dpm_enable_uvd(rdev, true);
} else {
radeon_set_uvd_clocks(rdev, 53300, 40000);
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 233a9b9fa1f..b8074a8ec75 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
/* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
* AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
- WREG32_MC(RS480_MC_MISC_CNTL,
- (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
} else {
- WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
+ tmp = RREG32_MC(RS480_MC_MISC_CNTL);
+ tmp |= RS480_GART_INDEX_REG_EN;
+ WREG32_MC(RS480_MC_MISC_CNTL, tmp);
}
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index 65e33f38734..ab1f2016f21 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev)
POWERMODE1(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
POWERMODE2(calculate_memory_refresh_rate(rdev,
- pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) |
+ pi->hw.sclks[R600_POWER_LEVEL_HIGH])) |
POWERMODE3(calculate_memory_refresh_rate(rdev,
pi->hw.sclks[R600_POWER_LEVEL_HIGH])));
WREG32(ARB_RFSH_RATE, arb_refresh_rate);
@@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev)
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ if (rdev->pm.dpm.new_active_crtcs & 1) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
- } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) {
+ } else if (rdev->pm.dpm.new_active_crtcs & 2) {
tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
} else {
@@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
int ret;
+ pi->restricted_levels = 0;
+
rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
rv6xx_clear_vc(rdev);
@@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev)
rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
+ rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
+
return 0;
}
@@ -1914,6 +1918,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv6xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -1922,12 +1927,12 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.ps[i].ps_priv = ps;
rv6xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv6xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -1940,9 +1945,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev)
int rv6xx_dpm_init(struct radeon_device *rdev)
{
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
+ struct radeon_atom_ss ss;
struct atom_clock_dividers dividers;
struct rv6xx_power_info *pi;
int ret;
@@ -1985,16 +1988,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev)
pi->gfx_clock_gating = true;
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ /* Disable sclk ss, causes hangs on a lot of systems */
+ pi->sclk_ss = false;
+
+ if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
+ else
pi->dynamic_ss = false;
- }
pi->dynamic_pcie_gen2 = true;
@@ -2085,3 +2090,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low)
else
return requested_state->high.mclk;
}
+
+int rv6xx_dpm_force_performance_level(struct radeon_device *rdev,
+ enum radeon_dpm_forced_level level)
+{
+ struct rv6xx_power_info *pi = rv6xx_get_pi(rdev);
+
+ if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
+ pi->restricted_levels = 3;
+ } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
+ pi->restricted_levels = 2;
+ } else {
+ pi->restricted_levels = 0;
+ }
+
+ rv6xx_clear_vc(rdev);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true);
+ r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF);
+ r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false);
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false);
+ rv6xx_enable_medium(rdev);
+ rv6xx_enable_high(rdev);
+ if (pi->restricted_levels == 3)
+ r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false);
+ rv6xx_program_vc(rdev);
+ rv6xx_program_at(rdev);
+
+ rdev->pm.dpm.forced_level = level;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 30ea14e8854..9f5846743c9 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv730_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv730_golden_registers));
radeon_program_register_sequence(rdev,
rv730_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv730_mgcg_init));
break;
case CHIP_RV710:
radeon_program_register_sequence(rdev,
@@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev)
(const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers));
radeon_program_register_sequence(rdev,
rv710_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv710_golden_registers));
radeon_program_register_sequence(rdev,
rv710_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv710_mgcg_init));
break;
case CHIP_RV740:
radeon_program_register_sequence(rdev,
rv740_golden_registers,
- (const u32)ARRAY_SIZE(rv770_golden_registers));
+ (const u32)ARRAY_SIZE(rv740_golden_registers));
radeon_program_register_sequence(rdev,
rv740_mgcg_init,
- (const u32)ARRAY_SIZE(rv770_mgcg_init));
+ (const u32)ARRAY_SIZE(rv740_mgcg_init));
break;
default:
break;
@@ -801,103 +801,6 @@ u32 rv770_get_xclk(struct radeon_device *rdev)
return reference_clock;
}
-int rv770_uvd_resume(struct radeon_device *rdev)
-{
- uint64_t addr;
- uint32_t chip_id, size;
- int r;
-
- r = radeon_uvd_resume(rdev);
- if (r)
- return r;
-
- /* programm the VCPU memory controller bits 0-27 */
- addr = rdev->uvd.gpu_addr >> 3;
- size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
- WREG32(UVD_VCPU_CACHE_SIZE0, size);
-
- addr += size;
- size = RADEON_UVD_STACK_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
- WREG32(UVD_VCPU_CACHE_SIZE1, size);
-
- addr += size;
- size = RADEON_UVD_HEAP_SIZE >> 3;
- WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
- WREG32(UVD_VCPU_CACHE_SIZE2, size);
-
- /* bits 28-31 */
- addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
- WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
-
- /* bits 32-39 */
- addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
- WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
-
- /* tell firmware which hardware it is running on */
- switch (rdev->family) {
- default:
- return -EINVAL;
- case CHIP_RV710:
- chip_id = 0x01000005;
- break;
- case CHIP_RV730:
- chip_id = 0x01000006;
- break;
- case CHIP_RV740:
- chip_id = 0x01000007;
- break;
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- chip_id = 0x01000008;
- break;
- case CHIP_JUNIPER:
- chip_id = 0x01000009;
- break;
- case CHIP_REDWOOD:
- chip_id = 0x0100000a;
- break;
- case CHIP_CEDAR:
- chip_id = 0x0100000b;
- break;
- case CHIP_SUMO:
- case CHIP_SUMO2:
- chip_id = 0x0100000c;
- break;
- case CHIP_PALM:
- chip_id = 0x0100000e;
- break;
- case CHIP_CAYMAN:
- chip_id = 0x0100000f;
- break;
- case CHIP_BARTS:
- chip_id = 0x01000010;
- break;
- case CHIP_TURKS:
- chip_id = 0x01000011;
- break;
- case CHIP_CAICOS:
- chip_id = 0x01000012;
- break;
- case CHIP_TAHITI:
- chip_id = 0x01000014;
- break;
- case CHIP_VERDE:
- chip_id = 0x01000015;
- break;
- case CHIP_PITCAIRN:
- chip_id = 0x01000016;
- break;
- case CHIP_ARUBA:
- chip_id = 0x01000017;
- break;
- }
- WREG32(UVD_VCPU_CHIP_ID, chip_id);
-
- return 0;
-}
-
u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
@@ -1747,80 +1650,6 @@ static int rv770_mc_init(struct radeon_device *rdev)
return 0;
}
-/**
- * rv770_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (r7xx).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int rv770_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_dw, cur_size_in_dw;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
- num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_dw = size_in_dw;
- if (cur_size_in_dw > 0xFFFF)
- cur_size_in_dw = 0xFFFF;
- size_in_dw -= cur_size_in_dw;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
- radeon_ring_write(ring, src_offset & 0xfffffffc);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_dw * 4;
- dst_offset += cur_size_in_dw * 4;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
@@ -1829,6 +1658,13 @@ static int rv770_startup(struct radeon_device *rdev)
/* enable pcie gen2 link */
rv770_pcie_gen2_enable(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ rv770_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = r600_init_microcode(rdev);
if (r) {
@@ -1837,11 +1673,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
} else {
@@ -1851,12 +1682,6 @@ static int rv770_startup(struct radeon_device *rdev)
}
rv770_gpu_init(rdev);
- r = r600_blit_init(rdev);
- if (r) {
- r600_blit_fini(rdev);
- rdev->asic->copy.copy = NULL;
- dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
- }
/* allocate wb buffer */
r = radeon_wb_init(rdev);
@@ -1875,7 +1700,7 @@ static int rv770_startup(struct radeon_device *rdev)
return r;
}
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -1904,14 +1729,14 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[R600_RING_TYPE_DMA_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR, DMA_RB_WPTR,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
if (r)
return r;
@@ -1928,12 +1753,11 @@ static int rv770_startup(struct radeon_device *rdev)
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
@@ -1983,6 +1807,7 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
r700_cp_stop(rdev);
r600_dma_stop(rdev);
@@ -2090,7 +1915,6 @@ int rv770_init(struct radeon_device *rdev)
void rv770_fini(struct radeon_device *rdev)
{
- r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_dma_fini(rdev);
r600_irq_fini(rdev);
@@ -2098,6 +1922,7 @@ void rv770_fini(struct radeon_device *rdev)
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
new file mode 100644
index 00000000000..f9b02e3d683
--- /dev/null
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * rv770_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (r7xx).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int rv770_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_dw, cur_size_in_dw;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
+ num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_dw = size_in_dw;
+ if (cur_size_in_dw > 0xFFFF)
+ cur_size_in_dw = 0xFFFF;
+ size_in_dw -= cur_size_in_dw;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw));
+ radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, src_offset & 0xfffffffc);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_dw * 4;
+ dst_offset += cur_size_in_dw * 4;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 2d347925f77..8cbb85dae5a 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2294,6 +2294,7 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
if (power_info->pplib.ucStateEntrySize - 1) {
+ u8 *idx;
ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
if (ps == NULL) {
kfree(rdev->pm.dpm.ps);
@@ -2303,12 +2304,12 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
non_clock_info,
power_info->pplib.ucNonClockSize);
+ idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
- (power_state->v1.ucClockStateIndices[j] *
- power_info->pplib.ucClockInfoSize));
+ (idx[j] * power_info->pplib.ucClockInfoSize));
rv7xx_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], j,
clock_info);
@@ -2319,12 +2320,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev)
return 0;
}
+void rv770_get_engine_memory_ss(struct radeon_device *rdev)
+{
+ struct rv7xx_power_info *pi = rv770_get_pi(rdev);
+ struct radeon_atom_ss ss;
+
+ pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_ENGINE_SS, 0);
+ pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_MEMORY_SS, 0);
+
+ if (pi->sclk_ss || pi->mclk_ss)
+ pi->dynamic_ss = true;
+ else
+ pi->dynamic_ss = false;
+}
+
int rv770_dpm_init(struct radeon_device *rdev)
{
struct rv7xx_power_info *pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- uint16_t data_offset, size;
- uint8_t frev, crev;
struct atom_clock_dividers dividers;
int ret;
@@ -2369,16 +2383,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->mvdd_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = false;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = RV770_HASI_DFLT;
@@ -2393,8 +2398,7 @@ int rv770_dpm_init(struct radeon_device *rdev)
pi->dynamic_pcie_gen2 = true;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -2514,8 +2518,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
+ u32 switch_limit = 300;
+
+ /* quirks */
+ /* ASUS K70AF */
+ if ((rdev->pdev->device == 0x9553) &&
+ (rdev->pdev->subsystem_vendor == 0x1043) &&
+ (rdev->pdev->subsystem_device == 0x1c42))
+ switch_limit = 200;
- if (vblank_time < 300)
+ if (vblank_time < switch_limit)
return true;
else
return false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h
index 96b1b2a62a8..9244effc6b5 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.h
+++ b/drivers/gpu/drm/radeon/rv770_dpm.h
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
struct radeon_ps *new_ps,
struct radeon_ps *old_ps);
+void rv770_get_engine_memory_ss(struct radeon_device *rdev);
/* smc */
int rv770_read_smc_soft_register(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 6bef2b7d601..9fe60e54292 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -971,7 +971,21 @@
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
+/*
+ * PM4
+ */
+#define PACKET0(reg, n) ((RADEON_PACKET_TYPE0 << 30) | \
+ (((reg) >> 2) & 0xFFFF) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3(op, n) ((RADEON_PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+
/* UVD */
+#define UVD_GPCOM_VCPU_CMD 0xef0c
+#define UVD_GPCOM_VCPU_DATA0 0xef10
+#define UVD_GPCOM_VCPU_DATA1 0xef14
+
#define UVD_LMI_EXT40_ADDR 0xf498
#define UVD_VCPU_CHIP_ID 0xf4d4
#define UVD_VCPU_CACHE_OFFSET0 0xf4d8
@@ -985,4 +999,6 @@
#define UVD_RBC_RB_RPTR 0xf690
#define UVD_RBC_RB_WPTR 0xf694
+#define UVD_CONTEXT_ID 0xf6f4
+
#endif
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index d325280e2f9..3e23b757dcf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -68,6 +68,8 @@ MODULE_FIRMWARE("radeon/HAINAN_smc.bin");
static void si_pcie_gen3_enable(struct radeon_device *rdev);
static void si_program_aspm(struct radeon_device *rdev);
+extern void sumo_rlc_fini(struct radeon_device *rdev);
+extern int sumo_rlc_init(struct radeon_device *rdev);
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
@@ -76,6 +78,11 @@ extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
extern void evergreen_print_gpu_status_regs(struct radeon_device *rdev);
extern bool evergreen_is_display_hung(struct radeon_device *rdev);
+extern void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags);
static const u32 verde_rlc_save_restore_register_list[] =
{
@@ -1663,9 +1670,13 @@ static int si_init_microcode(struct radeon_device *rdev)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
- if (err)
- goto out;
- if (rdev->smc_fw->size != smc_req_size) {
+ if (err) {
+ printk(KERN_ERR
+ "smc: error loading firmware \"%s\"\n",
+ fw_name);
+ release_firmware(rdev->smc_fw);
+ rdev->smc_fw = NULL;
+ } else if (rdev->smc_fw->size != smc_req_size) {
printk(KERN_ERR
"si_smc: Bogus length %zu in firmware \"%s\"\n",
rdev->smc_fw->size, fw_name);
@@ -1700,7 +1711,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
- u32 tmp;
+ u32 tmp, buffer_alloc, i;
+ u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
@@ -1715,16 +1727,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
- if (other_mode)
+ if (other_mode) {
tmp = 0; /* 1/2 */
- else
+ buffer_alloc = 1;
+ } else {
tmp = 2; /* whole */
- } else
+ buffer_alloc = 2;
+ }
+ } else {
tmp = 0;
+ buffer_alloc = 0;
+ }
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
+ WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
+ DMIF_BUFFERS_ALLOCATED(buffer_alloc));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
+ DMIF_BUFFERS_ALLOCATED_COMPLETED)
+ break;
+ udelay(1);
+ }
+
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
@@ -3360,17 +3386,6 @@ static int si_cp_resume(struct radeon_device *rdev)
u32 rb_bufsz;
int r;
- /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
- WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
- SOFT_RESET_PA |
- SOFT_RESET_VGT |
- SOFT_RESET_SPI |
- SOFT_RESET_SX));
- RREG32(GRBM_SOFT_RESET);
- mdelay(15);
- WREG32(GRBM_SOFT_RESET, 0);
- RREG32(GRBM_SOFT_RESET);
-
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
@@ -3383,8 +3398,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3416,8 +3431,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3442,8 +3457,8 @@ static int si_cp_resume(struct radeon_device *rdev)
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
- rb_bufsz = drm_order(ring->ring_size / 8);
- tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = (order_base_2(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
@@ -3489,7 +3504,7 @@ static int si_cp_resume(struct radeon_device *rdev)
return 0;
}
-static u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev)
{
u32 reset_mask = 0;
u32 tmp;
@@ -3738,34 +3753,6 @@ bool si_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
return radeon_ring_test_lockup(rdev, ring);
}
-/**
- * si_dma_is_lockup - Check if the DMA engine is locked up
- *
- * @rdev: radeon_device pointer
- * @ring: radeon_ring structure holding ring information
- *
- * Check if the async DMA engine is locked up.
- * Returns true if the engine appears to be locked up, false if not.
- */
-bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
-{
- u32 reset_mask = si_gpu_check_soft_reset(rdev);
- u32 mask;
-
- if (ring->idx == R600_RING_TYPE_DMA_INDEX)
- mask = RADEON_RESET_DMA;
- else
- mask = RADEON_RESET_DMA1;
-
- if (!(reset_mask & mask)) {
- radeon_ring_lockup_update(ring);
- return false;
- }
- /* force ring activities */
- radeon_ring_force_activity(rdev, ring);
- return radeon_ring_test_lockup(rdev, ring);
-}
-
/* MC */
static void si_mc_program(struct radeon_device *rdev)
{
@@ -4079,13 +4066,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
return 0;
}
+static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
+{
+ u32 start_reg, reg, i;
+ u32 command = ib[idx + 4];
+ u32 info = ib[idx + 1];
+ u32 idx_value = ib[idx];
+ if (command & PACKET3_CP_DMA_CMD_SAS) {
+ /* src address space is register */
+ if (((info & 0x60000000) >> 29) == 0) {
+ start_reg = idx_value << 2;
+ if (command & PACKET3_CP_DMA_CMD_SAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad SRC register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ if (command & PACKET3_CP_DMA_CMD_DAS) {
+ /* dst address space is register */
+ if (((info & 0x00300000) >> 20) == 0) {
+ start_reg = ib[idx + 2];
+ if (command & PACKET3_CP_DMA_CMD_DAIC) {
+ reg = start_reg;
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ } else {
+ for (i = 0; i < (command & 0x1fffff); i++) {
+ reg = start_reg + (4 * i);
+ if (!si_vm_reg_valid(reg)) {
+ DRM_ERROR("CP DMA Bad DST register\n");
+ return -EINVAL;
+ }
+ }
+ }
+ }
+ }
+ return 0;
+}
+
static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
- u32 command, info;
switch (pkt->opcode) {
case PACKET3_NOP:
@@ -4186,50 +4224,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
}
break;
case PACKET3_CP_DMA:
- command = ib[idx + 4];
- info = ib[idx + 1];
- if (command & PACKET3_CP_DMA_CMD_SAS) {
- /* src address space is register */
- if (((info & 0x60000000) >> 29) == 0) {
- start_reg = idx_value << 2;
- if (command & PACKET3_CP_DMA_CMD_SAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad SRC register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
- if (command & PACKET3_CP_DMA_CMD_DAS) {
- /* dst address space is register */
- if (((info & 0x00300000) >> 20) == 0) {
- start_reg = ib[idx + 2];
- if (command & PACKET3_CP_DMA_CMD_DAIC) {
- reg = start_reg;
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- } else {
- for (i = 0; i < (command & 0x1fffff); i++) {
- reg = start_reg + (4 * i);
- if (!si_vm_reg_valid(reg)) {
- DRM_ERROR("CP DMA Bad DST register\n");
- return -EINVAL;
- }
- }
- }
- }
- }
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
@@ -4241,6 +4238,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
static int si_vm_packet3_compute_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
+ int r;
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, reg, i;
@@ -4313,6 +4311,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
return -EINVAL;
}
break;
+ case PACKET3_CP_DMA:
+ r = si_vm_packet3_cp_dma_check(ib, idx);
+ if (r)
+ return r;
+ break;
default:
DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
@@ -4704,58 +4707,7 @@ void si_vm_set_page(struct radeon_device *rdev,
}
} else {
/* DMA */
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- /* for non-physically contiguous pages (system) */
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
- ib->ptr[ib->length_dw++] = pe;
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- for (; ndw > 0; ndw -= 2, --count, pe += 8) {
- if (flags & RADEON_VM_PAGE_SYSTEM) {
- value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
- } else if (flags & RADEON_VM_PAGE_VALID) {
- value = addr;
- } else {
- value = 0;
- }
- addr += incr;
- value |= r600_flags;
- ib->ptr[ib->length_dw++] = value;
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- }
- }
- } else {
- while (count) {
- ndw = count * 2;
- if (ndw > 0xFFFFE)
- ndw = 0xFFFFE;
-
- if (flags & RADEON_VM_PAGE_VALID)
- value = addr;
- else
- value = 0;
- /* for physically contiguous pages (vram) */
- ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
- ib->ptr[ib->length_dw++] = pe; /* dst addr */
- ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
- ib->ptr[ib->length_dw++] = r600_flags; /* mask */
- ib->ptr[ib->length_dw++] = 0;
- ib->ptr[ib->length_dw++] = value; /* value */
- ib->ptr[ib->length_dw++] = upper_32_bits(value);
- ib->ptr[ib->length_dw++] = incr; /* increment size */
- ib->ptr[ib->length_dw++] = 0;
- pe += ndw * 4;
- addr += (ndw / 2) * incr;
- count -= ndw / 2;
- }
- }
- while (ib->length_dw & 0x7)
- ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+ si_dma_vm_set_page(rdev, ib, pe, addr, count, incr, flags);
}
}
@@ -4802,32 +4754,6 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
radeon_ring_write(ring, 0x0);
}
-void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
-{
- struct radeon_ring *ring = &rdev->ring[ridx];
-
- if (vm == NULL)
- return;
-
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- if (vm->id < 8) {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
- } else {
- radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
- }
- radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
-
- /* flush hdp cache */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
- radeon_ring_write(ring, 1);
-
- /* bits 0-7 are the VM contexts0-7 */
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
- radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
- radeon_ring_write(ring, 1 << vm->id);
-}
-
/*
* Power and clock gating
*/
@@ -4895,7 +4821,7 @@ static void si_set_uvd_dcm(struct radeon_device *rdev,
WREG32_UVD_CTX(UVD_CGC_CTRL2, tmp2);
}
-static void si_init_uvd_internal_cg(struct radeon_device *rdev)
+void si_init_uvd_internal_cg(struct radeon_device *rdev)
{
bool hw_mode = true;
@@ -4938,7 +4864,7 @@ static void si_enable_dma_pg(struct radeon_device *rdev, bool enable)
u32 data, orig;
orig = data = RREG32(DMA_PG);
- if (enable)
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA))
data |= PG_CNTL_ENABLE;
else
data &= ~PG_CNTL_ENABLE;
@@ -4962,7 +4888,7 @@ static void si_enable_gfx_cgpg(struct radeon_device *rdev,
{
u32 tmp;
- if (enable) {
+ if (enable && (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG)) {
tmp = RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10);
WREG32(RLC_TTOP_D, tmp);
@@ -5065,9 +4991,9 @@ static void si_enable_cgcg(struct radeon_device *rdev,
orig = data = RREG32(RLC_CGCG_CGLS_CTRL);
- si_enable_gui_idle_interrupt(rdev, enable);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CGCG)) {
+ si_enable_gui_idle_interrupt(rdev, true);
- if (enable) {
WREG32(RLC_GCPM_GENERAL_3, 0x00000080);
tmp = si_halt_rlc(rdev);
@@ -5084,6 +5010,8 @@ static void si_enable_cgcg(struct radeon_device *rdev,
data |= CGCG_EN | CGLS_EN;
} else {
+ si_enable_gui_idle_interrupt(rdev, false);
+
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
RREG32(CB_CGTT_SCLK_CTRL);
@@ -5101,16 +5029,18 @@ static void si_enable_mgcg(struct radeon_device *rdev,
{
u32 data, orig, tmp = 0;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_MGCG)) {
orig = data = RREG32(CGTS_SM_CTRL_REG);
data = 0x96940200;
if (orig != data)
WREG32(CGTS_SM_CTRL_REG, data);
- orig = data = RREG32(CP_MEM_SLP_CNTL);
- data |= CP_MEM_LS_EN;
- if (orig != data)
- WREG32(CP_MEM_SLP_CNTL, data);
+ if (rdev->cg_flags & RADEON_CG_SUPPORT_GFX_CP_LS) {
+ orig = data = RREG32(CP_MEM_SLP_CNTL);
+ data |= CP_MEM_LS_EN;
+ if (orig != data)
+ WREG32(CP_MEM_SLP_CNTL, data);
+ }
orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
data &= 0xffffffc0;
@@ -5155,7 +5085,7 @@ static void si_enable_uvd_mgcg(struct radeon_device *rdev,
{
u32 orig, data, tmp;
- if (enable) {
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_UVD_MGCG)) {
tmp = RREG32_UVD_CTX(UVD_CGC_MEM_CTRL);
tmp |= 0x3fff;
WREG32_UVD_CTX(UVD_CGC_MEM_CTRL, tmp);
@@ -5203,7 +5133,7 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable)
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_LS))
data |= MC_LS_ENABLE;
else
data &= ~MC_LS_ENABLE;
@@ -5212,230 +5142,295 @@ static void si_enable_mc_ls(struct radeon_device *rdev,
}
}
-
-static void si_init_cg(struct radeon_device *rdev)
+static void si_enable_mc_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_uvd = true;
+ int i;
+ u32 orig, data;
- si_enable_mgcg(rdev, true);
- si_enable_cgcg(rdev, true);
- /* disable MC LS on Tahiti */
- if (rdev->family == CHIP_TAHITI)
- si_enable_mc_ls(rdev, false);
- if (has_uvd) {
- si_enable_uvd_mgcg(rdev, true);
- si_init_uvd_internal_cg(rdev);
+ for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
+ orig = data = RREG32(mc_cg_registers[i]);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_MC_MGCG))
+ data |= MC_CG_ENABLE;
+ else
+ data &= ~MC_CG_ENABLE;
+ if (data != orig)
+ WREG32(mc_cg_registers[i], data);
}
}
-static void si_fini_cg(struct radeon_device *rdev)
+static void si_enable_dma_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_uvd = true;
+ u32 orig, data, offset;
+ int i;
- if (has_uvd)
- si_enable_uvd_mgcg(rdev, false);
- si_enable_cgcg(rdev, false);
- si_enable_mgcg(rdev, false);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_SDMA_MGCG)) {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data &= ~MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+ WREG32(DMA_CLK_CTRL + offset, 0x00000100);
+ }
+ } else {
+ for (i = 0; i < 2; i++) {
+ if (i == 0)
+ offset = DMA0_REGISTER_OFFSET;
+ else
+ offset = DMA1_REGISTER_OFFSET;
+ orig = data = RREG32(DMA_POWER_CNTL + offset);
+ data |= MEM_POWER_OVERRIDE;
+ if (data != orig)
+ WREG32(DMA_POWER_CNTL + offset, data);
+
+ orig = data = RREG32(DMA_CLK_CTRL + offset);
+ data = 0xff000000;
+ if (data != orig)
+ WREG32(DMA_CLK_CTRL + offset, data);
+ }
+ }
}
-static void si_init_pg(struct radeon_device *rdev)
+static void si_enable_bif_mgls(struct radeon_device *rdev,
+ bool enable)
{
- bool has_pg = false;
+ u32 orig, data;
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
+ orig = data = RREG32_PCIE(PCIE_CNTL2);
- if (has_pg) {
- si_init_ao_cu_mask(rdev);
- si_init_dma_pg(rdev);
- si_enable_dma_pg(rdev, true);
- si_init_gfx_cgpg(rdev);
- si_enable_gfx_cgpg(rdev, true);
- } else {
- WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
- WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
- }
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_BIF_LS))
+ data |= SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN;
+ else
+ data &= ~(SLV_MEM_LS_EN | MST_MEM_LS_EN |
+ REPLAY_MEM_LS_EN | SLV_MEM_AGGRESSIVE_LS_EN);
+
+ if (orig != data)
+ WREG32_PCIE(PCIE_CNTL2, data);
}
-static void si_fini_pg(struct radeon_device *rdev)
+static void si_enable_hdp_mgcg(struct radeon_device *rdev,
+ bool enable)
{
- bool has_pg = false;
+ u32 orig, data;
- /* only cape verde supports PG */
- if (rdev->family == CHIP_VERDE)
- has_pg = true;
+ orig = data = RREG32(HDP_HOST_PATH_CNTL);
- if (has_pg) {
- si_enable_dma_pg(rdev, false);
- si_enable_gfx_cgpg(rdev, false);
- }
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_MGCG))
+ data &= ~CLOCK_GATING_DIS;
+ else
+ data |= CLOCK_GATING_DIS;
+
+ if (orig != data)
+ WREG32(HDP_HOST_PATH_CNTL, data);
}
-/*
- * RLC
- */
-void si_rlc_fini(struct radeon_device *rdev)
+static void si_enable_hdp_ls(struct radeon_device *rdev,
+ bool enable)
{
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj) {
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.save_restore_obj);
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
+ u32 orig, data;
- radeon_bo_unref(&rdev->rlc.save_restore_obj);
- rdev->rlc.save_restore_obj = NULL;
- }
+ orig = data = RREG32(HDP_MEM_POWER_LS);
- /* clear state block */
- if (rdev->rlc.clear_state_obj) {
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0))
- dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
- radeon_bo_unpin(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ if (enable && (rdev->cg_flags & RADEON_CG_SUPPORT_HDP_LS))
+ data |= HDP_LS_ENABLE;
+ else
+ data &= ~HDP_LS_ENABLE;
- radeon_bo_unref(&rdev->rlc.clear_state_obj);
- rdev->rlc.clear_state_obj = NULL;
- }
+ if (orig != data)
+ WREG32(HDP_MEM_POWER_LS, data);
}
-#define RLC_CLEAR_STATE_END_MARKER 0x00000001
-
-int si_rlc_init(struct radeon_device *rdev)
+void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable)
{
- volatile u32 *dst_ptr;
- u32 dws, data, i, j, k, reg_num;
- u32 reg_list_num, reg_list_hdr_blk_index, reg_list_blk_index;
- u64 reg_list_mc_addr;
- const struct cs_section_def *cs_data = si_cs_data;
- int r;
-
- /* save restore block */
- if (rdev->rlc.save_restore_obj == NULL) {
- r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL,
- &rdev->rlc.save_restore_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
- return r;
+ if (block & RADEON_CG_BLOCK_GFX) {
+ /* order matters! */
+ if (enable) {
+ si_enable_mgcg(rdev, true);
+ si_enable_cgcg(rdev, true);
+ } else {
+ si_enable_cgcg(rdev, false);
+ si_enable_mgcg(rdev, false);
}
}
- r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+ if (block & RADEON_CG_BLOCK_MC) {
+ si_enable_mc_mgcg(rdev, enable);
+ si_enable_mc_ls(rdev, enable);
}
- r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.save_restore_gpu_addr);
- if (r) {
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+
+ if (block & RADEON_CG_BLOCK_SDMA) {
+ si_enable_dma_mgcg(rdev, enable);
}
- if (rdev->family == CHIP_VERDE) {
- r = radeon_bo_kmap(rdev->rlc.save_restore_obj, (void **)&rdev->rlc.sr_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC sr bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
- /* write the sr buffer */
- dst_ptr = rdev->rlc.sr_ptr;
- for (i = 0; i < ARRAY_SIZE(verde_rlc_save_restore_register_list); i++) {
- dst_ptr[i] = verde_rlc_save_restore_register_list[i];
- }
- radeon_bo_kunmap(rdev->rlc.save_restore_obj);
+ if (block & RADEON_CG_BLOCK_BIF) {
+ si_enable_bif_mgls(rdev, enable);
}
- radeon_bo_unreserve(rdev->rlc.save_restore_obj);
- /* clear state block */
- reg_list_num = 0;
- dws = 0;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_list_num++;
- dws += cs_data[i].section[j].reg_count;
+ if (block & RADEON_CG_BLOCK_UVD) {
+ if (rdev->has_uvd) {
+ si_enable_uvd_mgcg(rdev, enable);
}
}
- reg_list_blk_index = (3 * reg_list_num + 2);
- dws += reg_list_blk_index;
- if (rdev->rlc.clear_state_obj == NULL) {
- r = radeon_bo_create(rdev, dws * 4, PAGE_SIZE, true,
- RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->rlc.clear_state_obj);
- if (r) {
- dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
- }
+ if (block & RADEON_CG_BLOCK_HDP) {
+ si_enable_hdp_mgcg(rdev, enable);
+ si_enable_hdp_ls(rdev, enable);
}
- r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
- if (unlikely(r != 0)) {
- si_rlc_fini(rdev);
- return r;
+}
+
+static void si_init_cg(struct radeon_device *rdev)
+{
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), true);
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, true);
+ si_init_uvd_internal_cg(rdev);
}
- r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
- &rdev->rlc.clear_state_gpu_addr);
- if (r) {
+}
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
- dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+static void si_fini_cg(struct radeon_device *rdev)
+{
+ if (rdev->has_uvd) {
+ si_update_cg(rdev, RADEON_CG_BLOCK_UVD, false);
}
- r = radeon_bo_kmap(rdev->rlc.clear_state_obj, (void **)&rdev->rlc.cs_ptr);
- if (r) {
- dev_warn(rdev->dev, "(%d) map RLC c bo failed\n", r);
- si_rlc_fini(rdev);
- return r;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_HDP), false);
+}
+
+u32 si_get_csb_size(struct radeon_device *rdev)
+{
+ u32 count = 0;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return 0;
+
+ /* begin clear state */
+ count += 2;
+ /* context control state */
+ count += 3;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT)
+ count += 2 + ext->reg_count;
+ else
+ return 0;
+ }
}
- /* set up the cs buffer */
- dst_ptr = rdev->rlc.cs_ptr;
- reg_list_hdr_blk_index = 0;
- reg_list_mc_addr = rdev->rlc.clear_state_gpu_addr + (reg_list_blk_index * 4);
- data = upper_32_bits(reg_list_mc_addr);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
- for (i = 0; cs_data[i].section != NULL; i++) {
- for (j = 0; cs_data[i].section[j].extent != NULL; j++) {
- reg_num = cs_data[i].section[j].reg_count;
- data = reg_list_mc_addr & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = (cs_data[i].section[j].reg_index * 4) & 0xffffffff;
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- data = 0x08000000 | (reg_num * 4);
- dst_ptr[reg_list_hdr_blk_index] = data;
- reg_list_hdr_blk_index++;
-
- for (k = 0; k < reg_num; k++) {
- data = cs_data[i].section[j].extent[k];
- dst_ptr[reg_list_blk_index + k] = data;
+ /* pa_sc_raster_config */
+ count += 3;
+ /* end clear state */
+ count += 2;
+ /* clear state */
+ count += 2;
+
+ return count;
+}
+
+void si_get_csb_buffer(struct radeon_device *rdev, volatile u32 *buffer)
+{
+ u32 count = 0, i;
+ const struct cs_section_def *sect = NULL;
+ const struct cs_extent_def *ext = NULL;
+
+ if (rdev->rlc.cs_data == NULL)
+ return;
+ if (buffer == NULL)
+ return;
+
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_BEGIN_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ buffer[count++] = 0x80000000;
+ buffer[count++] = 0x80000000;
+
+ for (sect = rdev->rlc.cs_data; sect->section != NULL; ++sect) {
+ for (ext = sect->section; ext->extent != NULL; ++ext) {
+ if (sect->id == SECT_CONTEXT) {
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, ext->reg_count);
+ buffer[count++] = ext->reg_index - 0xa000;
+ for (i = 0; i < ext->reg_count; i++)
+ buffer[count++] = ext->extent[i];
+ } else {
+ return;
}
- reg_list_mc_addr += reg_num * 4;
- reg_list_blk_index += reg_num;
}
}
- dst_ptr[reg_list_hdr_blk_index] = RLC_CLEAR_STATE_END_MARKER;
- radeon_bo_kunmap(rdev->rlc.clear_state_obj);
- radeon_bo_unreserve(rdev->rlc.clear_state_obj);
+ buffer[count++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ buffer[count++] = PA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START;
+ switch (rdev->family) {
+ case CHIP_TAHITI:
+ case CHIP_PITCAIRN:
+ buffer[count++] = 0x2a00126a;
+ break;
+ case CHIP_VERDE:
+ buffer[count++] = 0x0000124a;
+ break;
+ case CHIP_OLAND:
+ buffer[count++] = 0x00000082;
+ break;
+ case CHIP_HAINAN:
+ buffer[count++] = 0x00000000;
+ break;
+ default:
+ buffer[count++] = 0x00000000;
+ break;
+ }
- return 0;
+ buffer[count++] = PACKET3(PACKET3_PREAMBLE_CNTL, 0);
+ buffer[count++] = PACKET3_PREAMBLE_END_CLEAR_STATE;
+
+ buffer[count++] = PACKET3(PACKET3_CLEAR_STATE, 0);
+ buffer[count++] = 0;
+}
+
+static void si_init_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_SDMA) {
+ si_init_dma_pg(rdev);
+ }
+ si_init_ao_cu_mask(rdev);
+ if (rdev->pg_flags & RADEON_PG_SUPPORT_GFX_CG) {
+ si_init_gfx_cgpg(rdev);
+ }
+ si_enable_dma_pg(rdev, true);
+ si_enable_gfx_cgpg(rdev, true);
+ } else {
+ WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
+ WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
+ }
+}
+
+static void si_fini_pg(struct radeon_device *rdev)
+{
+ if (rdev->pg_flags) {
+ si_enable_dma_pg(rdev, false);
+ si_enable_gfx_cgpg(rdev, false);
+ }
}
-static void si_rlc_reset(struct radeon_device *rdev)
+/*
+ * RLC
+ */
+void si_rlc_reset(struct radeon_device *rdev)
{
u32 tmp = RREG32(GRBM_SOFT_RESET);
@@ -5651,7 +5646,7 @@ static int si_irq_init(struct radeon_device *rdev)
WREG32(INTERRUPT_CNTL, interrupt_cntl);
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
- rb_bufsz = drm_order(rdev->ih.ring_size / 4);
+ rb_bufsz = order_base_2(rdev->ih.ring_size / 4);
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
@@ -6335,80 +6330,6 @@ restart_ih:
return IRQ_HANDLED;
}
-/**
- * si_copy_dma - copy pages using the DMA engine
- *
- * @rdev: radeon_device pointer
- * @src_offset: src GPU address
- * @dst_offset: dst GPU address
- * @num_gpu_pages: number of GPU pages to xfer
- * @fence: radeon fence object
- *
- * Copy GPU paging using the DMA engine (SI).
- * Used by the radeon ttm implementation to move pages if
- * registered as the asic copy callback.
- */
-int si_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset, uint64_t dst_offset,
- unsigned num_gpu_pages,
- struct radeon_fence **fence)
-{
- struct radeon_semaphore *sem = NULL;
- int ring_index = rdev->asic->copy.dma_ring_index;
- struct radeon_ring *ring = &rdev->ring[ring_index];
- u32 size_in_bytes, cur_size_in_bytes;
- int i, num_loops;
- int r = 0;
-
- r = radeon_semaphore_create(rdev, &sem);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
-
- size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
- num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
- r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- radeon_semaphore_free(rdev, &sem, NULL);
- return r;
- }
-
- if (radeon_fence_need_sync(*fence, ring->idx)) {
- radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
- ring->idx);
- radeon_fence_note_sync(*fence, ring->idx);
- } else {
- radeon_semaphore_free(rdev, &sem, NULL);
- }
-
- for (i = 0; i < num_loops; i++) {
- cur_size_in_bytes = size_in_bytes;
- if (cur_size_in_bytes > 0xFFFFF)
- cur_size_in_bytes = 0xFFFFF;
- size_in_bytes -= cur_size_in_bytes;
- radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
- radeon_ring_write(ring, dst_offset & 0xffffffff);
- radeon_ring_write(ring, src_offset & 0xffffffff);
- radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
- radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
- src_offset += cur_size_in_bytes;
- dst_offset += cur_size_in_bytes;
- }
-
- r = radeon_fence_emit(rdev, fence, ring->idx);
- if (r) {
- radeon_ring_unlock_undo(rdev, ring);
- return r;
- }
-
- radeon_ring_unlock_commit(rdev, ring);
- radeon_semaphore_free(rdev, &sem, *fence);
-
- return r;
-}
-
/*
* startup/shutdown callbacks
*/
@@ -6422,6 +6343,13 @@ static int si_startup(struct radeon_device *rdev)
/* enable aspm */
si_program_aspm(rdev);
+ /* scratch needs to be initialized before MC */
+ r = r600_vram_scratch_init(rdev);
+ if (r)
+ return r;
+
+ si_mc_program(rdev);
+
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
@@ -6437,18 +6365,19 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
- r = r600_vram_scratch_init(rdev);
- if (r)
- return r;
-
- si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
si_gpu_init(rdev);
/* allocate rlc buffers */
- r = si_rlc_init(rdev);
+ if (rdev->family == CHIP_VERDE) {
+ rdev->rlc.reg_list = verde_rlc_save_restore_register_list;
+ rdev->rlc.reg_list_size =
+ (u32)ARRAY_SIZE(verde_rlc_save_restore_register_list);
+ }
+ rdev->rlc.cs_data = si_cs_data;
+ r = sumo_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
@@ -6490,7 +6419,7 @@ static int si_startup(struct radeon_device *rdev)
}
if (rdev->has_uvd) {
- r = rv770_uvd_resume(rdev);
+ r = uvd_v2_2_resume(rdev);
if (!r) {
r = radeon_fence_driver_start_ring(rdev,
R600_RING_TYPE_UVD_INDEX);
@@ -6519,21 +6448,21 @@ static int si_startup(struct radeon_device *rdev)
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_RB1_RPTR, CP_RB1_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_RB2_RPTR, CP_RB2_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (r)
return r;
@@ -6541,7 +6470,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, R600_WB_DMA_RPTR_OFFSET,
DMA_RB_RPTR + DMA0_REGISTER_OFFSET,
DMA_RB_WPTR + DMA0_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6549,7 +6478,7 @@ static int si_startup(struct radeon_device *rdev)
r = radeon_ring_init(rdev, ring, ring->ring_size, CAYMAN_WB_DMA1_RPTR_OFFSET,
DMA_RB_RPTR + DMA1_REGISTER_OFFSET,
DMA_RB_WPTR + DMA1_REGISTER_OFFSET,
- 2, 0x3fffc, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
+ DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0));
if (r)
return r;
@@ -6567,12 +6496,11 @@ static int si_startup(struct radeon_device *rdev)
if (rdev->has_uvd) {
ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
if (ring->ring_size) {
- r = radeon_ring_init(rdev, ring, ring->ring_size,
- R600_WB_UVD_RPTR_OFFSET,
+ r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
- 0, 0xfffff, RADEON_CP_PACKET2);
+ RADEON_CP_PACKET2);
if (!r)
- r = r600_uvd_init(rdev);
+ r = uvd_v1_0_init(rdev);
if (r)
DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
}
@@ -6590,6 +6518,10 @@ static int si_startup(struct radeon_device *rdev)
return r;
}
+ r = dce6_audio_init(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -6621,13 +6553,16 @@ int si_resume(struct radeon_device *rdev)
int si_suspend(struct radeon_device *rdev)
{
+ dce6_audio_fini(rdev);
radeon_vm_manager_fini(rdev);
si_cp_enable(rdev, false);
cayman_dma_stop(rdev);
if (rdev->has_uvd) {
- r600_uvd_rbc_stop(rdev);
+ uvd_v1_0_fini(rdev);
radeon_uvd_suspend(rdev);
}
+ si_fini_pg(rdev);
+ si_fini_cg(rdev);
si_irq_suspend(rdev);
radeon_wb_disable(rdev);
si_pcie_gart_disable(rdev);
@@ -6734,7 +6669,7 @@ int si_init(struct radeon_device *rdev)
si_cp_fini(rdev);
cayman_dma_fini(rdev);
si_irq_fini(rdev);
- si_rlc_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_vm_manager_fini(rdev);
@@ -6759,16 +6694,18 @@ void si_fini(struct radeon_device *rdev)
{
si_cp_fini(rdev);
cayman_dma_fini(rdev);
- si_irq_fini(rdev);
- si_rlc_fini(rdev);
- si_fini_cg(rdev);
si_fini_pg(rdev);
+ si_fini_cg(rdev);
+ si_irq_fini(rdev);
+ sumo_rlc_fini(rdev);
radeon_wb_fini(rdev);
radeon_vm_manager_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- if (rdev->has_uvd)
+ if (rdev->has_uvd) {
+ uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ }
si_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
new file mode 100644
index 00000000000..49909d23dfc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -0,0 +1,235 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "sid.h"
+
+u32 si_gpu_check_soft_reset(struct radeon_device *rdev);
+
+/**
+ * si_dma_is_lockup - Check if the DMA engine is locked up
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring structure holding ring information
+ *
+ * Check if the async DMA engine is locked up.
+ * Returns true if the engine appears to be locked up, false if not.
+ */
+bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 reset_mask = si_gpu_check_soft_reset(rdev);
+ u32 mask;
+
+ if (ring->idx == R600_RING_TYPE_DMA_INDEX)
+ mask = RADEON_RESET_DMA;
+ else
+ mask = RADEON_RESET_DMA1;
+
+ if (!(reset_mask & mask)) {
+ radeon_ring_lockup_update(ring);
+ return false;
+ }
+ /* force ring activities */
+ radeon_ring_force_activity(rdev, ring);
+ return radeon_ring_test_lockup(rdev, ring);
+}
+
+/**
+ * si_dma_vm_set_page - update the page tables using the DMA
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to fill with commands
+ * @pe: addr of the page entry
+ * @addr: dst addr to write into pe
+ * @count: number of page entries to update
+ * @incr: increase next addr by incr bytes
+ * @flags: access flags
+ *
+ * Update the page tables using the DMA (SI).
+ */
+void si_dma_vm_set_page(struct radeon_device *rdev,
+ struct radeon_ib *ib,
+ uint64_t pe,
+ uint64_t addr, unsigned count,
+ uint32_t incr, uint32_t flags)
+{
+ uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
+ uint64_t value;
+ unsigned ndw;
+
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ /* for non-physically contiguous pages (system) */
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 0, ndw);
+ ib->ptr[ib->length_dw++] = pe;
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ for (; ndw > 0; ndw -= 2, --count, pe += 8) {
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ value = radeon_vm_map_gart(rdev, addr);
+ value &= 0xFFFFFFFFFFFFF000ULL;
+ } else if (flags & RADEON_VM_PAGE_VALID) {
+ value = addr;
+ } else {
+ value = 0;
+ }
+ addr += incr;
+ value |= r600_flags;
+ ib->ptr[ib->length_dw++] = value;
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ }
+ }
+ } else {
+ while (count) {
+ ndw = count * 2;
+ if (ndw > 0xFFFFE)
+ ndw = 0xFFFFE;
+
+ if (flags & RADEON_VM_PAGE_VALID)
+ value = addr;
+ else
+ value = 0;
+ /* for physically contiguous pages (vram) */
+ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw);
+ ib->ptr[ib->length_dw++] = pe; /* dst addr */
+ ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff;
+ ib->ptr[ib->length_dw++] = r600_flags; /* mask */
+ ib->ptr[ib->length_dw++] = 0;
+ ib->ptr[ib->length_dw++] = value; /* value */
+ ib->ptr[ib->length_dw++] = upper_32_bits(value);
+ ib->ptr[ib->length_dw++] = incr; /* increment size */
+ ib->ptr[ib->length_dw++] = 0;
+ pe += ndw * 4;
+ addr += (ndw / 2) * incr;
+ count -= ndw / 2;
+ }
+ }
+ while (ib->length_dw & 0x7)
+ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0, 0);
+}
+
+void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
+{
+ struct radeon_ring *ring = &rdev->ring[ridx];
+
+ if (vm == NULL)
+ return;
+
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ if (vm->id < 8) {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2));
+ } else {
+ radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2));
+ }
+ radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
+
+ /* flush hdp cache */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (HDP_MEM_COHERENCY_FLUSH_CNTL >> 2));
+ radeon_ring_write(ring, 1);
+
+ /* bits 0-7 are the VM contexts0-7 */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
+ radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 1 << vm->id);
+}
+
+/**
+ * si_copy_dma - copy pages using the DMA engine
+ *
+ * @rdev: radeon_device pointer
+ * @src_offset: src GPU address
+ * @dst_offset: dst GPU address
+ * @num_gpu_pages: number of GPU pages to xfer
+ * @fence: radeon fence object
+ *
+ * Copy GPU paging using the DMA engine (SI).
+ * Used by the radeon ttm implementation to move pages if
+ * registered as the asic copy callback.
+ */
+int si_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset, uint64_t dst_offset,
+ unsigned num_gpu_pages,
+ struct radeon_fence **fence)
+{
+ struct radeon_semaphore *sem = NULL;
+ int ring_index = rdev->asic->copy.dma_ring_index;
+ struct radeon_ring *ring = &rdev->ring[ring_index];
+ u32 size_in_bytes, cur_size_in_bytes;
+ int i, num_loops;
+ int r = 0;
+
+ r = radeon_semaphore_create(rdev, &sem);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+
+ size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
+ num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
+ r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ radeon_semaphore_free(rdev, &sem, NULL);
+ return r;
+ }
+
+ if (radeon_fence_need_sync(*fence, ring->idx)) {
+ radeon_semaphore_sync_rings(rdev, sem, (*fence)->ring,
+ ring->idx);
+ radeon_fence_note_sync(*fence, ring->idx);
+ } else {
+ radeon_semaphore_free(rdev, &sem, NULL);
+ }
+
+ for (i = 0; i < num_loops; i++) {
+ cur_size_in_bytes = size_in_bytes;
+ if (cur_size_in_bytes > 0xFFFFF)
+ cur_size_in_bytes = 0xFFFFF;
+ size_in_bytes -= cur_size_in_bytes;
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 1, 0, 0, cur_size_in_bytes));
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
+ radeon_ring_write(ring, src_offset & 0xffffffff);
+ radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff);
+ radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff);
+ src_offset += cur_size_in_bytes;
+ dst_offset += cur_size_in_bytes;
+ }
+
+ r = radeon_fence_emit(rdev, fence, ring->idx);
+ if (r) {
+ radeon_ring_unlock_undo(rdev, ring);
+ return r;
+ }
+
+ radeon_ring_unlock_commit(rdev, ring);
+ radeon_semaphore_free(rdev, &sem, *fence);
+
+ return r;
+}
+
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 73aaa2e4c31..5be9b4e7235 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -37,8 +37,6 @@
#define SMC_RAM_END 0x20000
-#define DDR3_DRAM_ROWS 0x2000
-
#define SCLK_MIN_DEEPSLEEP_FREQ 1350
static const struct si_cac_config_reg cac_weights_tahiti[] =
@@ -1755,6 +1753,9 @@ static int si_calculate_sclk_params(struct radeon_device *rdev,
u32 engine_clock,
SISLANDS_SMC_SCLK_VALUE *sclk);
+extern void si_update_cg(struct radeon_device *rdev,
+ u32 block, bool enable);
+
static struct si_power_info *si_get_pi(struct radeon_device *rdev)
{
struct si_power_info *pi = rdev->pm.dpm.priv;
@@ -1767,8 +1768,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
{
s64 kt, kv, leakage_w, i_leakage, vddc;
s64 temperature, t_slope, t_intercept, av, bv, t_ref;
+ s64 tmp;
- i_leakage = drm_int2fixp(ileakage / 100);
+ i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
vddc = div64_s64(drm_int2fixp(v), 1000);
temperature = div64_s64(drm_int2fixp(t), 1000);
@@ -1778,8 +1780,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe
bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
t_ref = drm_int2fixp(coeff->t_ref);
- kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)),
- drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref)));
+ tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
+ kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
+ kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
@@ -1931,6 +1934,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
si_pi->cac_override = cac_override_pitcairn;
si_pi->powertune_data = &powertune_data_pitcairn;
si_pi->dte_data = dte_data_pitcairn;
+ break;
}
} else if (rdev->family == CHIP_VERDE) {
si_pi->lcac_config = lcac_cape_verde;
@@ -1941,6 +1945,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev)
case 0x683B:
case 0x683F:
case 0x6829:
+ case 0x6835:
si_pi->cac_weights = cac_weights_cape_verde_pro;
si_pi->dte_data = dte_data_cape_verde;
break;
@@ -2901,7 +2906,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
{
struct ni_ps *ps = ni_get_ps(rps);
struct radeon_clock_and_voltage_limits *max_limits;
- bool disable_mclk_switching;
+ bool disable_mclk_switching = false;
+ bool disable_sclk_switching = false;
u32 mclk, sclk;
u16 vddc, vddci;
int i;
@@ -2909,8 +2915,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
disable_mclk_switching = true;
- else
- disable_mclk_switching = false;
+
+ if (rps->vclk || rps->dclk) {
+ disable_mclk_switching = true;
+ disable_sclk_switching = true;
+ }
if (rdev->pm.dpm.ac_power)
max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
@@ -2938,27 +2947,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (disable_mclk_switching) {
mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
- sclk = ps->performance_levels[0].sclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
} else {
- sclk = ps->performance_levels[0].sclk;
mclk = ps->performance_levels[0].mclk;
- vddc = ps->performance_levels[0].vddc;
vddci = ps->performance_levels[0].vddci;
}
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
+ vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
+ } else {
+ sclk = ps->performance_levels[0].sclk;
+ vddc = ps->performance_levels[0].vddc;
+ }
+
/* adjusted low state */
ps->performance_levels[0].sclk = sclk;
ps->performance_levels[0].mclk = mclk;
ps->performance_levels[0].vddc = vddc;
ps->performance_levels[0].vddci = vddci;
- for (i = 1; i < ps->performance_level_count; i++) {
- if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
- ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
- if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
- ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ if (disable_sclk_switching) {
+ sclk = ps->performance_levels[0].sclk;
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (sclk < ps->performance_levels[i].sclk)
+ sclk = ps->performance_levels[i].sclk;
+ }
+ for (i = 0; i < ps->performance_level_count; i++) {
+ ps->performance_levels[i].sclk = sclk;
+ ps->performance_levels[i].vddc = vddc;
+ }
+ } else {
+ for (i = 1; i < ps->performance_level_count; i++) {
+ if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
+ ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
+ if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
+ ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
+ }
}
if (disable_mclk_switching) {
@@ -3237,10 +3262,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
{
struct radeon_ps *rps = rdev->pm.dpm.current_ps;
struct ni_ps *ps = ni_get_ps(rps);
- u32 levels;
+ u32 levels = ps->performance_level_count;
if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
@@ -3249,14 +3274,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev,
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- levels = ps->performance_level_count - 1;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
return -EINVAL;
} else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
return -EINVAL;
- if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK)
+ if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
return -EINVAL;
}
@@ -3620,8 +3644,12 @@ static void si_enable_display_gap(struct radeon_device *rdev)
{
u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
+ tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
+ tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
+ DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
+
tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
- tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
+ tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
WREG32(CG_DISPLAY_GAP_CNTL, tmp);
}
@@ -3638,7 +3666,7 @@ static void si_clear_vc(struct radeon_device *rdev)
WREG32(CG_FTV, 0);
}
-static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
+u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
{
u8 mc_para_index;
@@ -3651,7 +3679,7 @@ static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
return mc_para_index;
}
-static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
+u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
{
u8 mc_para_index;
@@ -3733,20 +3761,21 @@ static bool si_validate_phase_shedding_tables(struct radeon_device *rdev,
return true;
}
-static void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
- struct atom_voltage_table *voltage_table)
+void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
+ u32 max_voltage_steps,
+ struct atom_voltage_table *voltage_table)
{
unsigned int i, diff;
- if (voltage_table->count <= SISLANDS_MAX_NO_VREG_STEPS)
+ if (voltage_table->count <= max_voltage_steps)
return;
- diff = voltage_table->count - SISLANDS_MAX_NO_VREG_STEPS;
+ diff = voltage_table->count - max_voltage_steps;
- for (i= 0; i < SISLANDS_MAX_NO_VREG_STEPS; i++)
+ for (i= 0; i < max_voltage_steps; i++)
voltage_table->entries[i] = voltage_table->entries[i + diff];
- voltage_table->count = SISLANDS_MAX_NO_VREG_STEPS;
+ voltage_table->count = max_voltage_steps;
}
static int si_construct_voltage_tables(struct radeon_device *rdev)
@@ -3762,7 +3791,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddc_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddc_voltage_table);
if (eg_pi->vddci_control) {
ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
@@ -3771,7 +3802,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
return ret;
if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &eg_pi->vddci_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &eg_pi->vddci_voltage_table);
}
if (pi->mvdd_control) {
@@ -3789,7 +3822,9 @@ static int si_construct_voltage_tables(struct radeon_device *rdev)
}
if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
- si_trim_voltage_table_to_fit_state_table(rdev, &si_pi->mvdd_voltage_table);
+ si_trim_voltage_table_to_fit_state_table(rdev,
+ SISLANDS_MAX_NO_VREG_STEPS,
+ &si_pi->mvdd_voltage_table);
}
if (si_pi->vddc_phase_shed_control) {
@@ -4036,16 +4071,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev)
static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev,
u32 engine_clock)
{
- struct rv7xx_power_info *pi = rv770_get_pi(rdev);
u32 dram_rows;
u32 dram_refresh_rate;
u32 mc_arb_rfsh_rate;
u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
- if (pi->mem_gddr5)
- dram_rows = 1 << (tmp + 10);
+ if (tmp >= 4)
+ dram_rows = 16384;
else
- dram_rows = DDR3_DRAM_ROWS;
+ dram_rows = 1 << (tmp + 10);
dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
@@ -5728,6 +5762,13 @@ int si_dpm_enable(struct radeon_device *rdev)
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (si_is_smc_running(rdev))
return -EINVAL;
if (pi->voltage_control)
@@ -5847,6 +5888,13 @@ int si_dpm_enable(struct radeon_device *rdev)
si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
+
ni_update_current_ps(rdev, boot_ps);
return 0;
@@ -5857,6 +5905,13 @@ void si_dpm_disable(struct radeon_device *rdev)
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
if (!si_is_smc_running(rdev))
return;
si_disable_ulv(rdev);
@@ -5921,6 +5976,13 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
struct radeon_ps *old_ps = &eg_pi->current_rps;
int ret;
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), false);
+
ret = si_disable_ulv(rdev);
if (ret) {
DRM_ERROR("si_disable_ulv failed\n");
@@ -6013,16 +6075,18 @@ int si_dpm_set_power_state(struct radeon_device *rdev)
return ret;
}
-#if 0
- /* XXX */
ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO);
if (ret) {
DRM_ERROR("si_dpm_force_performance_level failed\n");
return ret;
}
-#else
- rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO;
-#endif
+
+ si_update_cg(rdev, (RADEON_CG_BLOCK_GFX |
+ RADEON_CG_BLOCK_MC |
+ RADEON_CG_BLOCK_SDMA |
+ RADEON_CG_BLOCK_BIF |
+ RADEON_CG_BLOCK_UVD |
+ RADEON_CG_BLOCK_HDP), true);
return 0;
}
@@ -6213,6 +6277,7 @@ static int si_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -6229,14 +6294,16 @@ static int si_parse_power_table(struct radeon_device *rdev)
non_clock_info,
non_clock_info_array->ucEntrySize);
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
si_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -6254,9 +6321,6 @@ int si_dpm_init(struct radeon_device *rdev)
struct evergreen_power_info *eg_pi;
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
- int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
- u16 data_offset, size;
- u8 frev, crev;
struct atom_clock_dividers dividers;
int ret;
u32 mask;
@@ -6347,16 +6411,7 @@ int si_dpm_init(struct radeon_device *rdev)
si_pi->vddc_phase_shed_control =
radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT);
- if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
- &frev, &crev, &data_offset)) {
- pi->sclk_ss = true;
- pi->mclk_ss = true;
- pi->dynamic_ss = true;
- } else {
- pi->sclk_ss = false;
- pi->mclk_ss = false;
- pi->dynamic_ss = true;
- }
+ rv770_get_engine_memory_ss(rdev);
pi->asi = RV770_ASI_DFLT;
pi->pasi = CYPRESS_HASI_DFLT;
@@ -6367,8 +6422,7 @@ int si_dpm_init(struct radeon_device *rdev)
eg_pi->sclk_deep_sleep = true;
si_pi->sclk_deep_sleep_above_low = false;
- if (pi->gfx_clock_gating &&
- (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE))
+ if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
pi->thermal_protection = true;
else
pi->thermal_protection = false;
@@ -6395,6 +6449,12 @@ int si_dpm_init(struct radeon_device *rdev)
si_initialize_powertune_defaults(rdev);
+ /* make sure dc limits are valid */
+ if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
+ (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
+ rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
+
return 0;
}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 2c8da27a929..52d2ab6b67a 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -282,6 +282,10 @@
#define DMIF_ADDR_CALC 0xC00
+#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
+# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
+# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
+
#define SRBM_STATUS 0xE50
#define GRBM_RQ_PENDING (1 << 5)
#define VMC_BUSY (1 << 8)
@@ -581,6 +585,7 @@
#define CLKS_MASK (0xfff << 0)
#define HDP_HOST_PATH_CNTL 0x2C00
+#define CLOCK_GATING_DIS (1 << 23)
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
@@ -588,6 +593,8 @@
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
+#define HDP_MEM_POWER_LS 0x2F50
+#define HDP_LS_ENABLE (1 << 0)
#define ATC_MISC_CG 0x3350
@@ -635,6 +642,54 @@
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
+/* DCE6 ELD audio interface */
+#define AZ_F0_CODEC_ENDPOINT_INDEX 0x5E00
+# define AZ_ENDPOINT_REG_INDEX(x) (((x) & 0xff) << 0)
+# define AZ_ENDPOINT_REG_WRITE_EN (1 << 8)
+#define AZ_F0_CODEC_ENDPOINT_DATA 0x5E04
+
+#define AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x25
+#define SPEAKER_ALLOCATION(x) (((x) & 0x7f) << 0)
+#define SPEAKER_ALLOCATION_MASK (0x7f << 0)
+#define SPEAKER_ALLOCATION_SHIFT 0
+#define HDMI_CONNECTION (1 << 16)
+#define DP_CONNECTION (1 << 17)
+
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x28 /* LPCM */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x29 /* AC3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x2A /* MPEG1 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x2B /* MP3 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x2C /* MPEG2 */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x2D /* AAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x2E /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x2F /* ATRAC */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x30 /* one bit audio - leave at 0 (default) */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x31 /* Dolby Digital */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x32 /* DTS-HD */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x33 /* MAT-MLP */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x34 /* DTS */
+#define AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x35 /* WMA Pro */
+# define MAX_CHANNELS(x) (((x) & 0x7) << 0)
+/* max channels minus one. 7 = 8 channels */
+# define SUPPORTED_FREQUENCIES(x) (((x) & 0xff) << 8)
+# define DESCRIPTOR_BYTE_2(x) (((x) & 0xff) << 16)
+# define SUPPORTED_FREQUENCIES_STEREO(x) (((x) & 0xff) << 24) /* LPCM only */
+/* SUPPORTED_FREQUENCIES, SUPPORTED_FREQUENCIES_STEREO
+ * bit0 = 32 kHz
+ * bit1 = 44.1 kHz
+ * bit2 = 48 kHz
+ * bit3 = 88.2 kHz
+ * bit4 = 96 kHz
+ * bit5 = 176.4 kHz
+ * bit6 = 192 kHz
+ */
+#define AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL 0x54
+# define AUDIO_ENABLED (1 << 31)
+
+#define AZ_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x56
+#define PORT_CONNECTIVITY_MASK (3 << 30)
+#define PORT_CONNECTIVITY_SHIFT 30
+
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
@@ -755,6 +810,17 @@
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
+#define AFMT_AUDIO_SRC_CONTROL 0x713c
+#define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
+/* AFMT_AUDIO_SRC_SELECT
+ * 0 = stream0
+ * 1 = stream1
+ * 2 = stream2
+ * 3 = stream3
+ * 4 = stream4
+ * 5 = stream5
+ */
+
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
@@ -1295,6 +1361,7 @@
/* PCIE registers idx/data 0x30/0x34 */
#define PCIE_CNTL2 0x1c /* PCIE */
# define SLV_MEM_LS_EN (1 << 16)
+# define SLV_MEM_AGGRESSIVE_LS_EN (1 << 17)
# define MST_MEM_LS_EN (1 << 18)
# define REPLAY_MEM_LS_EN (1 << 19)
#define PCIE_LC_STATUS1 0x28 /* PCIE */
@@ -1644,6 +1711,10 @@
# define DMA_IDLE (1 << 0)
#define DMA_TILING_CONFIG 0xd0b8
+#define DMA_POWER_CNTL 0xd0bc
+# define MEM_POWER_OVERRIDE (1 << 8)
+#define DMA_CLK_CTRL 0xd0c0
+
#define DMA_PG 0xd0d4
# define PG_CNTL_ENABLE (1 << 0)
#define DMA_PGFSM_CONFIG 0xd0d8
diff --git a/drivers/gpu/drm/radeon/smu7.h b/drivers/gpu/drm/radeon/smu7.h
new file mode 100644
index 00000000000..75a380a1529
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_H
+#define SMU7_H
+
+#pragma pack(push, 1)
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+
+#define SMU7_CONTEXT_ID_SMC 1
+#define SMU7_CONTEXT_ID_VBIOS 2
+
+#define SMU7_MAX_LEVELS_VDDC 8
+#define SMU7_MAX_LEVELS_VDDCI 4
+#define SMU7_MAX_LEVELS_MVDD 4
+#define SMU7_MAX_LEVELS_VDDNB 8
+
+#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV
+#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM
+#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels
+#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes.
+#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD.
+#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE.
+#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP.
+#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU.
+#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table.
+
+#define DPM_NO_LIMIT 0
+#define DPM_NO_UP 1
+#define DPM_GO_DOWN 2
+#define DPM_GO_UP 3
+
+#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0
+#define SMU7_FIRST_DPM_MEMORY_LEVEL 0
+
+#define GPIO_CLAMP_MODE_VRHOT 1
+#define GPIO_CLAMP_MODE_THERM 2
+#define GPIO_CLAMP_MODE_DC 4
+
+#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0
+#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7<<SCRATCH_B_TARG_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_PCIE_INDEX_SHIFT 3
+#define SCRATCH_B_CURR_PCIE_INDEX_MASK (0x7<<SCRATCH_B_CURR_PCIE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_UVD_INDEX_SHIFT 6
+#define SCRATCH_B_TARG_UVD_INDEX_MASK (0x7<<SCRATCH_B_TARG_UVD_INDEX_SHIFT)
+#define SCRATCH_B_CURR_UVD_INDEX_SHIFT 9
+#define SCRATCH_B_CURR_UVD_INDEX_MASK (0x7<<SCRATCH_B_CURR_UVD_INDEX_SHIFT)
+#define SCRATCH_B_TARG_VCE_INDEX_SHIFT 12
+#define SCRATCH_B_TARG_VCE_INDEX_MASK (0x7<<SCRATCH_B_TARG_VCE_INDEX_SHIFT)
+#define SCRATCH_B_CURR_VCE_INDEX_SHIFT 15
+#define SCRATCH_B_CURR_VCE_INDEX_MASK (0x7<<SCRATCH_B_CURR_VCE_INDEX_SHIFT)
+#define SCRATCH_B_TARG_ACP_INDEX_SHIFT 18
+#define SCRATCH_B_TARG_ACP_INDEX_MASK (0x7<<SCRATCH_B_TARG_ACP_INDEX_SHIFT)
+#define SCRATCH_B_CURR_ACP_INDEX_SHIFT 21
+#define SCRATCH_B_CURR_ACP_INDEX_MASK (0x7<<SCRATCH_B_CURR_ACP_INDEX_SHIFT)
+#define SCRATCH_B_TARG_SAMU_INDEX_SHIFT 24
+#define SCRATCH_B_TARG_SAMU_INDEX_MASK (0x7<<SCRATCH_B_TARG_SAMU_INDEX_SHIFT)
+#define SCRATCH_B_CURR_SAMU_INDEX_SHIFT 27
+#define SCRATCH_B_CURR_SAMU_INDEX_MASK (0x7<<SCRATCH_B_CURR_SAMU_INDEX_SHIFT)
+
+
+struct SMU7_PIDController
+{
+ uint32_t Ki;
+ int32_t LFWindupUL;
+ int32_t LFWindupLL;
+ uint32_t StatePrecision;
+ uint32_t LfPrecision;
+ uint32_t LfOffset;
+ uint32_t MaxState;
+ uint32_t MaxLfFraction;
+ uint32_t StateShift;
+};
+
+typedef struct SMU7_PIDController SMU7_PIDController;
+
+// -------------------------------------------------------------------------------------------------------------------------
+#define SMU7_MAX_PCIE_LINK_SPEEDS 3 /* 0:Gen1 1:Gen2 2:Gen3 */
+
+#define SMU7_SCLK_DPM_CONFIG_MASK 0x01
+#define SMU7_VOLTAGE_CONTROLLER_CONFIG_MASK 0x02
+#define SMU7_THERMAL_CONTROLLER_CONFIG_MASK 0x04
+#define SMU7_MCLK_DPM_CONFIG_MASK 0x08
+#define SMU7_UVD_DPM_CONFIG_MASK 0x10
+#define SMU7_VCE_DPM_CONFIG_MASK 0x20
+#define SMU7_ACP_DPM_CONFIG_MASK 0x40
+#define SMU7_SAMU_DPM_CONFIG_MASK 0x80
+#define SMU7_PCIEGEN_DPM_CONFIG_MASK 0x100
+
+#define SMU7_ACP_MCLK_HANDSHAKE_DISABLE 0x00000001
+#define SMU7_ACP_SCLK_HANDSHAKE_DISABLE 0x00000002
+#define SMU7_UVD_MCLK_HANDSHAKE_DISABLE 0x00000100
+#define SMU7_UVD_SCLK_HANDSHAKE_DISABLE 0x00000200
+#define SMU7_VCE_MCLK_HANDSHAKE_DISABLE 0x00010000
+#define SMU7_VCE_SCLK_HANDSHAKE_DISABLE 0x00020000
+
+struct SMU7_Firmware_Header
+{
+ uint32_t Digest[5];
+ uint32_t Version;
+ uint32_t HeaderSize;
+ uint32_t Flags;
+ uint32_t EntryPoint;
+ uint32_t CodeSize;
+ uint32_t ImageSize;
+
+ uint32_t Rtos;
+ uint32_t SoftRegisters;
+ uint32_t DpmTable;
+ uint32_t FanTable;
+ uint32_t CacConfigTable;
+ uint32_t CacStatusTable;
+
+ uint32_t mcRegisterTable;
+
+ uint32_t mcArbDramTimingTable;
+
+ uint32_t PmFuseTable;
+ uint32_t Globals;
+ uint32_t Reserved[42];
+ uint32_t Signature;
+};
+
+typedef struct SMU7_Firmware_Header SMU7_Firmware_Header;
+
+#define SMU7_FIRMWARE_HEADER_LOCATION 0x20000
+
+enum DisplayConfig {
+ PowerDown = 1,
+ DP54x4,
+ DP54x2,
+ DP54x1,
+ DP27x4,
+ DP27x2,
+ DP27x1,
+ HDMI297,
+ HDMI162,
+ LVDS,
+ DP324x4,
+ DP324x2,
+ DP324x1
+};
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
new file mode 100644
index 00000000000..82f70c90a9e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -0,0 +1,486 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_DISCRETE_H
+#define SMU7_DISCRETE_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 3
+#define SMU7_DTE_SINKS 1
+#define SMU7_NUM_CPU_TES 0
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t PreVBlankGap;
+ uint32_t VBlankTimeout;
+ uint32_t TrainTimeGap;
+
+ uint32_t MvddSwitchTime;
+ uint32_t LongestAcpiTrainTime;
+ uint32_t AcpiDelay;
+ uint32_t G5TrainTime;
+ uint32_t DelayMpllPwron;
+ uint32_t VoltageChangeTimeout;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Discrete_VoltageLevel
+{
+ uint16_t Voltage;
+ uint16_t StdVoltageHiSidd;
+ uint16_t StdVoltageLoSidd;
+ uint8_t Smio;
+ uint8_t padding;
+};
+
+typedef struct SMU7_Discrete_VoltageLevel SMU7_Discrete_VoltageLevel;
+
+struct SMU7_Discrete_GraphicsLevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+
+ uint32_t SclkFrequency;
+
+ uint8_t padding1[2];
+ uint16_t ActivityLevel;
+
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t PowerThrottle;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_GraphicsLevel SMU7_Discrete_GraphicsLevel;
+
+struct SMU7_Discrete_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding;
+ uint32_t CgSpllFuncCntl;
+ uint32_t CgSpllFuncCntl2;
+ uint32_t CgSpllFuncCntl3;
+ uint32_t CgSpllFuncCntl4;
+ uint32_t SpllSpreadSpectrum;
+ uint32_t SpllSpreadSpectrum2;
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+};
+
+typedef struct SMU7_Discrete_ACPILevel SMU7_Discrete_ACPILevel;
+
+struct SMU7_Discrete_Ulv
+{
+ uint32_t CcPwrDynRm;
+ uint32_t CcPwrDynRm1;
+ uint16_t VddcOffset;
+ uint8_t VddcOffsetVid;
+ uint8_t VddcPhase;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_Ulv SMU7_Discrete_Ulv;
+
+struct SMU7_Discrete_MemoryLevel
+{
+ uint32_t MinVddc;
+ uint32_t MinVddcPhases;
+ uint32_t MinVddci;
+ uint32_t MinMvdd;
+
+ uint32_t MclkFrequency;
+
+ uint8_t EdcReadEnable;
+ uint8_t EdcWriteEnable;
+ uint8_t RttEnable;
+ uint8_t StutterEnable;
+
+ uint8_t StrobeEnable;
+ uint8_t StrobeRatio;
+ uint8_t EnabledForThrottle;
+ uint8_t EnabledForActivity;
+
+ uint8_t UpH;
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t padding;
+
+ uint16_t ActivityLevel;
+ uint8_t DisplayWatermark;
+ uint8_t padding1;
+
+ uint32_t MpllFuncCntl;
+ uint32_t MpllFuncCntl_1;
+ uint32_t MpllFuncCntl_2;
+ uint32_t MpllAdFuncCntl;
+ uint32_t MpllDqFuncCntl;
+ uint32_t MclkPwrmgtCntl;
+ uint32_t DllCntl;
+ uint32_t MpllSs1;
+ uint32_t MpllSs2;
+};
+
+typedef struct SMU7_Discrete_MemoryLevel SMU7_Discrete_MemoryLevel;
+
+struct SMU7_Discrete_LinkLevel
+{
+ uint8_t PcieGenSpeed;
+ uint8_t PcieLaneCount;
+ uint8_t EnabledForActivity;
+ uint8_t Padding;
+ uint32_t DownT;
+ uint32_t UpT;
+ uint32_t Reserved;
+};
+
+typedef struct SMU7_Discrete_LinkLevel SMU7_Discrete_LinkLevel;
+
+
+struct SMU7_Discrete_MCArbDramTimingTableEntry
+{
+ uint32_t McArbDramTiming;
+ uint32_t McArbDramTiming2;
+ uint8_t McArbBurstTime;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTableEntry SMU7_Discrete_MCArbDramTimingTableEntry;
+
+struct SMU7_Discrete_MCArbDramTimingTable
+{
+ SMU7_Discrete_MCArbDramTimingTableEntry entries[SMU__NUM_SCLK_DPM_STATE][SMU__NUM_MCLK_DPM_LEVELS];
+};
+
+typedef struct SMU7_Discrete_MCArbDramTimingTable SMU7_Discrete_MCArbDramTimingTable;
+
+struct SMU7_Discrete_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddc;
+ uint8_t MinVddcPhases;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Discrete_UvdLevel SMU7_Discrete_UvdLevel;
+
+struct SMU7_Discrete_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t MinPhases;
+ uint8_t Divider;
+};
+
+typedef struct SMU7_Discrete_ExtClkLevel SMU7_Discrete_ExtClkLevel;
+
+struct SMU7_Discrete_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t MclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint16_t MvddVoltage;
+ uint16_t padding16;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ uint8_t McRegIndex;
+ uint8_t SeqIndex;
+ uint8_t SclkDid;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+ uint8_t PCIeGen;
+
+};
+
+typedef struct SMU7_Discrete_StateInfo SMU7_Discrete_StateInfo;
+
+
+struct SMU7_Discrete_DpmTable
+{
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController MemoryPIDController;
+ SMU7_PIDController LinkPIDController;
+
+ uint32_t SystemFlags;
+
+
+ uint32_t SmioMaskVddcVid;
+ uint32_t SmioMaskVddcPhase;
+ uint32_t SmioMaskVddciVid;
+ uint32_t SmioMaskMvddVid;
+
+ uint32_t VddcLevelCount;
+ uint32_t VddciLevelCount;
+ uint32_t MvddLevelCount;
+
+ SMU7_Discrete_VoltageLevel VddcLevel [SMU7_MAX_LEVELS_VDDC];
+// SMU7_Discrete_VoltageLevel VddcStandardReference [SMU7_MAX_LEVELS_VDDC];
+ SMU7_Discrete_VoltageLevel VddciLevel [SMU7_MAX_LEVELS_VDDCI];
+ SMU7_Discrete_VoltageLevel MvddLevel [SMU7_MAX_LEVELS_MVDD];
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t MemoryDpmLevelCount;
+ uint8_t LinkLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint8_t MasterDeepSleepControl;
+ uint32_t Reserved[5];
+// uint32_t SamuDefaultLevel;
+
+ SMU7_Discrete_GraphicsLevel GraphicsLevel [SMU7_MAX_LEVELS_GRAPHICS];
+ SMU7_Discrete_MemoryLevel MemoryACPILevel;
+ SMU7_Discrete_MemoryLevel MemoryLevel [SMU7_MAX_LEVELS_MEMORY];
+ SMU7_Discrete_LinkLevel LinkLevel [SMU7_MAX_LEVELS_LINK];
+ SMU7_Discrete_ACPILevel ACPILevel;
+ SMU7_Discrete_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Discrete_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Discrete_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Discrete_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+ SMU7_Discrete_Ulv Ulv;
+
+ uint32_t SclkStepSize;
+ uint32_t Smio [SMU7_MAX_ENTRIES_SMIO];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsVoltageChangeEnable;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsInterval;
+
+ uint8_t VoltageInterval;
+ uint8_t ThermalInterval;
+ uint16_t TemperatureLimitHigh;
+
+ uint16_t TemperatureLimitLow;
+ uint8_t MemoryBootLevel;
+ uint8_t MemoryVoltageChangeEnable;
+
+ uint8_t MemoryInterval;
+ uint8_t MemoryThermThrottleEnable;
+ uint16_t VddcVddciDelta;
+
+ uint16_t VoltageResponseTime;
+ uint16_t PhaseResponseTime;
+
+ uint8_t PCIeBootLinkLevel;
+ uint8_t PCIeGenInterval;
+ uint8_t DTEInterval;
+ uint8_t DTEMode;
+
+ uint8_t SVI2Enable;
+ uint8_t VRHotGpio;
+ uint8_t AcDcGpio;
+ uint8_t ThermGpio;
+
+ uint16_t PPM_PkgPwrLimit;
+ uint16_t PPM_TemperatureLimit;
+
+ uint16_t DefaultTdp;
+ uint16_t TargetTdp;
+
+ uint16_t FpsHighT;
+ uint16_t FpsLowT;
+
+ uint16_t BAPMTI_R [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+ uint16_t BAPMTI_RC [SMU7_DTE_ITERATIONS][SMU7_DTE_SOURCES][SMU7_DTE_SINKS];
+
+ uint8_t DTEAmbientTempBase;
+ uint8_t DTETjOffset;
+ uint8_t GpuTjMax;
+ uint8_t GpuTjHyst;
+
+ uint16_t BootVddc;
+ uint16_t BootVddci;
+
+ uint16_t BootMVdd;
+ uint16_t padding;
+
+ uint32_t BAPM_TEMP_GRADIENT;
+
+ uint32_t LowSclkInterruptT;
+};
+
+typedef struct SMU7_Discrete_DpmTable SMU7_Discrete_DpmTable;
+
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE 16
+#define SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT SMU7_MAX_LEVELS_MEMORY
+
+struct SMU7_Discrete_MCRegisterAddress
+{
+ uint16_t s0;
+ uint16_t s1;
+};
+
+typedef struct SMU7_Discrete_MCRegisterAddress SMU7_Discrete_MCRegisterAddress;
+
+struct SMU7_Discrete_MCRegisterSet
+{
+ uint32_t value[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+};
+
+typedef struct SMU7_Discrete_MCRegisterSet SMU7_Discrete_MCRegisterSet;
+
+struct SMU7_Discrete_MCRegisters
+{
+ uint8_t last;
+ uint8_t reserved[3];
+ SMU7_Discrete_MCRegisterAddress address[SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE];
+ SMU7_Discrete_MCRegisterSet data[SMU7_DISCRETE_MC_REGISTER_ARRAY_SET_COUNT];
+};
+
+typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
+
+struct SMU7_Discrete_PmFuses {
+ // dw0-dw1
+ uint8_t BapmVddCVidHiSidd[8];
+
+ // dw2-dw3
+ uint8_t BapmVddCVidLoSidd[8];
+
+ // dw4-dw5
+ uint8_t VddCVid[8];
+
+ // dw6
+ uint8_t SviLoadLineEn;
+ uint8_t SviLoadLineVddC;
+ uint8_t SviLoadLineTrimVddC;
+ uint8_t SviLoadLineOffsetVddC;
+
+ // dw7
+ uint16_t TDC_VDDC_PkgLimit;
+ uint8_t TDC_VDDC_ThrottleReleaseLimitPerc;
+ uint8_t TDC_MAWt;
+
+ // dw8
+ uint8_t TdcWaterfallCtl;
+ uint8_t LPMLTemperatureMin;
+ uint8_t LPMLTemperatureMax;
+ uint8_t Reserved;
+
+ // dw9-dw10
+ uint8_t BapmVddCVidHiSidd2[8];
+
+ // dw11-dw12
+ uint32_t Reserved6[2];
+
+ // dw13-dw16
+ uint8_t GnbLPML[16];
+
+ // dw17
+ uint8_t GnbLPMLMaxVid;
+ uint8_t GnbLPMLMinVid;
+ uint8_t Reserved1[2];
+
+ // dw18
+ uint16_t BapmVddCBaseLeakageHiSidd;
+ uint16_t BapmVddCBaseLeakageLoSidd;
+};
+
+typedef struct SMU7_Discrete_PmFuses SMU7_Discrete_PmFuses;
+
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/smu7_fusion.h b/drivers/gpu/drm/radeon/smu7_fusion.h
new file mode 100644
index 00000000000..78ada9ffd50
--- /dev/null
+++ b/drivers/gpu/drm/radeon/smu7_fusion.h
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef SMU7_FUSION_H
+#define SMU7_FUSION_H
+
+#include "smu7.h"
+
+#pragma pack(push, 1)
+
+#define SMU7_DTE_ITERATIONS 5
+#define SMU7_DTE_SOURCES 5
+#define SMU7_DTE_SINKS 3
+#define SMU7_NUM_CPU_TES 2
+#define SMU7_NUM_GPU_TES 1
+#define SMU7_NUM_NON_TES 2
+
+// All 'soft registers' should be uint32_t.
+struct SMU7_SoftRegisters
+{
+ uint32_t RefClockFrequency;
+ uint32_t PmTimerP;
+ uint32_t FeatureEnables;
+ uint32_t HandshakeDisables;
+
+ uint8_t DisplayPhy1Config;
+ uint8_t DisplayPhy2Config;
+ uint8_t DisplayPhy3Config;
+ uint8_t DisplayPhy4Config;
+
+ uint8_t DisplayPhy5Config;
+ uint8_t DisplayPhy6Config;
+ uint8_t DisplayPhy7Config;
+ uint8_t DisplayPhy8Config;
+
+ uint32_t AverageGraphicsA;
+ uint32_t AverageMemoryA;
+ uint32_t AverageGioA;
+
+ uint8_t SClkDpmEnabledLevels;
+ uint8_t MClkDpmEnabledLevels;
+ uint8_t LClkDpmEnabledLevels;
+ uint8_t PCIeDpmEnabledLevels;
+
+ uint8_t UVDDpmEnabledLevels;
+ uint8_t SAMUDpmEnabledLevels;
+ uint8_t ACPDpmEnabledLevels;
+ uint8_t VCEDpmEnabledLevels;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+ uint32_t UlvEnterC;
+ uint32_t UlvTime;
+ uint32_t Reserved[3];
+
+};
+
+typedef struct SMU7_SoftRegisters SMU7_SoftRegisters;
+
+struct SMU7_Fusion_GraphicsLevel
+{
+ uint32_t MinVddNb;
+
+ uint32_t SclkFrequency;
+
+ uint8_t Vid;
+ uint8_t VidOffset;
+ uint16_t AT;
+
+ uint8_t PowerThrottle;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t SclkDid;
+
+ uint8_t DisplayWatermark;
+ uint8_t EnabledForActivity;
+ uint8_t EnabledForThrottle;
+ uint8_t UpH;
+
+ uint8_t DownH;
+ uint8_t VoltageDownH;
+ uint8_t DeepSleepDivId;
+
+ uint8_t ClkBypassCntl;
+
+ uint32_t reserved;
+};
+
+typedef struct SMU7_Fusion_GraphicsLevel SMU7_Fusion_GraphicsLevel;
+
+struct SMU7_Fusion_GIOLevel
+{
+ uint8_t EnabledForActivity;
+ uint8_t LclkDid;
+ uint8_t Vid;
+ uint8_t VoltageDownH;
+
+ uint32_t MinVddNb;
+
+ uint16_t ResidencyCounter;
+ uint8_t UpH;
+ uint8_t DownH;
+
+ uint32_t LclkFrequency;
+
+ uint8_t ActivityLevel;
+ uint8_t EnabledForThrottle;
+
+ uint8_t ClkBypassCntl;
+
+ uint8_t padding;
+};
+
+typedef struct SMU7_Fusion_GIOLevel SMU7_Fusion_GIOLevel;
+
+// UVD VCLK/DCLK state (level) definition.
+struct SMU7_Fusion_UvdLevel
+{
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint16_t MinVddNb;
+ uint8_t VclkDivider;
+ uint8_t DclkDivider;
+
+ uint8_t VClkBypassCntl;
+ uint8_t DClkBypassCntl;
+
+ uint8_t padding[2];
+
+};
+
+typedef struct SMU7_Fusion_UvdLevel SMU7_Fusion_UvdLevel;
+
+// Clocks for other external blocks (VCE, ACP, SAMU).
+struct SMU7_Fusion_ExtClkLevel
+{
+ uint32_t Frequency;
+ uint16_t MinVoltage;
+ uint8_t Divider;
+ uint8_t ClkBypassCntl;
+
+ uint32_t Reserved;
+};
+typedef struct SMU7_Fusion_ExtClkLevel SMU7_Fusion_ExtClkLevel;
+
+struct SMU7_Fusion_ACPILevel
+{
+ uint32_t Flags;
+ uint32_t MinVddNb;
+ uint32_t SclkFrequency;
+ uint8_t SclkDid;
+ uint8_t GnbSlow;
+ uint8_t ForceNbPs1;
+ uint8_t DisplayWatermark;
+ uint8_t DeepSleepDivId;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_ACPILevel SMU7_Fusion_ACPILevel;
+
+struct SMU7_Fusion_NbDpm
+{
+ uint8_t DpmXNbPsHi;
+ uint8_t DpmXNbPsLo;
+ uint8_t Dpm0PgNbPsHi;
+ uint8_t Dpm0PgNbPsLo;
+ uint8_t EnablePsi1;
+ uint8_t SkipDPM0;
+ uint8_t SkipPG;
+ uint8_t Hysteresis;
+ uint8_t EnableDpmPstatePoll;
+ uint8_t padding[3];
+};
+
+typedef struct SMU7_Fusion_NbDpm SMU7_Fusion_NbDpm;
+
+struct SMU7_Fusion_StateInfo
+{
+ uint32_t SclkFrequency;
+ uint32_t LclkFrequency;
+ uint32_t VclkFrequency;
+ uint32_t DclkFrequency;
+ uint32_t SamclkFrequency;
+ uint32_t AclkFrequency;
+ uint32_t EclkFrequency;
+ uint8_t DisplayWatermark;
+ uint8_t McArbIndex;
+ int8_t SclkIndex;
+ int8_t MclkIndex;
+};
+
+typedef struct SMU7_Fusion_StateInfo SMU7_Fusion_StateInfo;
+
+struct SMU7_Fusion_DpmTable
+{
+ uint32_t SystemFlags;
+
+ SMU7_PIDController GraphicsPIDController;
+ SMU7_PIDController GioPIDController;
+
+ uint8_t GraphicsDpmLevelCount;
+ uint8_t GIOLevelCount;
+ uint8_t UvdLevelCount;
+ uint8_t VceLevelCount;
+
+ uint8_t AcpLevelCount;
+ uint8_t SamuLevelCount;
+ uint16_t FpsHighT;
+
+ SMU7_Fusion_GraphicsLevel GraphicsLevel [SMU__NUM_SCLK_DPM_STATE];
+ SMU7_Fusion_ACPILevel ACPILevel;
+ SMU7_Fusion_UvdLevel UvdLevel [SMU7_MAX_LEVELS_UVD];
+ SMU7_Fusion_ExtClkLevel VceLevel [SMU7_MAX_LEVELS_VCE];
+ SMU7_Fusion_ExtClkLevel AcpLevel [SMU7_MAX_LEVELS_ACP];
+ SMU7_Fusion_ExtClkLevel SamuLevel [SMU7_MAX_LEVELS_SAMU];
+
+ uint8_t UvdBootLevel;
+ uint8_t VceBootLevel;
+ uint8_t AcpBootLevel;
+ uint8_t SamuBootLevel;
+ uint8_t UVDInterval;
+ uint8_t VCEInterval;
+ uint8_t ACPInterval;
+ uint8_t SAMUInterval;
+
+ uint8_t GraphicsBootLevel;
+ uint8_t GraphicsInterval;
+ uint8_t GraphicsThermThrottleEnable;
+ uint8_t GraphicsVoltageChangeEnable;
+
+ uint8_t GraphicsClkSlowEnable;
+ uint8_t GraphicsClkSlowDivider;
+ uint16_t FpsLowT;
+
+ uint32_t DisplayCac;
+ uint32_t LowSclkInterruptT;
+
+ uint32_t DRAM_LOG_ADDR_H;
+ uint32_t DRAM_LOG_ADDR_L;
+ uint32_t DRAM_LOG_PHY_ADDR_H;
+ uint32_t DRAM_LOG_PHY_ADDR_L;
+ uint32_t DRAM_LOG_BUFF_SIZE;
+
+};
+
+struct SMU7_Fusion_GIODpmTable
+{
+
+ SMU7_Fusion_GIOLevel GIOLevel [SMU7_MAX_LEVELS_GIO];
+
+ SMU7_PIDController GioPIDController;
+
+ uint32_t GIOLevelCount;
+
+ uint8_t Enable;
+ uint8_t GIOVoltageChangeEnable;
+ uint8_t GIOBootLevel;
+ uint8_t padding;
+ uint8_t padding1[2];
+ uint8_t TargetState;
+ uint8_t CurrenttState;
+ uint8_t ThrottleOnHtc;
+ uint8_t ThermThrottleStatus;
+ uint8_t ThermThrottleTempSelect;
+ uint8_t ThermThrottleEnable;
+ uint16_t TemperatureLimitHigh;
+ uint16_t TemperatureLimitLow;
+
+};
+
+typedef struct SMU7_Fusion_DpmTable SMU7_Fusion_DpmTable;
+typedef struct SMU7_Fusion_GIODpmTable SMU7_Fusion_GIODpmTable;
+
+#pragma pack(pop)
+
+#endif
+
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index c0a85031990..864761c0120 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1483,6 +1483,7 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1496,12 +1497,15 @@ static int sumo_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
+
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
sumo_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
@@ -1530,6 +1534,20 @@ u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit)
+{
+ u32 i;
+
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
+ return vid_mapping_table->entries[i].vid_2bit;
+ }
+
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
+}
+
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
{
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.h b/drivers/gpu/drm/radeon/sumo_dpm.h
index 07dda299c78..db1ea32a907 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.h
+++ b/drivers/gpu/drm/radeon/sumo_dpm.h
@@ -202,6 +202,9 @@ void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
struct sumo_vid_mapping_table *vid_mapping_table,
u32 vid_2bit);
+u32 sumo_convert_vid7_to_vid2(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_7bit);
u32 sumo_get_sleep_divider_from_id(u32 id);
u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
u32 sclk,
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index a1eb5f59939..b07b7b8f1af 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1675,6 +1675,7 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
for (i = 0; i < state_array->ucNumEntries; i++) {
+ u8 *idx;
power_state = (union pplib_power_state *)power_state_offset;
non_clock_array_index = power_state->v2.nonClockInfoIndex;
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
@@ -1688,14 +1689,16 @@ static int trinity_parse_power_table(struct radeon_device *rdev)
}
rdev->pm.dpm.ps[i].ps_priv = ps;
k = 0;
+ idx = (u8 *)&power_state->v2.clockInfoIndex[0];
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
- clock_array_index = power_state->v2.clockInfoIndex[j];
+ clock_array_index = idx[j];
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
break;
clock_info = (union pplib_clock_info *)
- &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
+ ((u8 *)&clock_info_array->clockInfo[0] +
+ (clock_array_index * clock_info_array->ucEntrySize));
trinity_parse_pplib_clock_info(rdev,
&rdev->pm.dpm.ps[i], k,
clock_info);
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
new file mode 100644
index 00000000000..7266805d978
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -0,0 +1,436 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "r600d.h"
+
+/**
+ * uvd_v1_0_get_rptr - get read pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware read pointer
+ */
+uint32_t uvd_v1_0_get_rptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_RPTR);
+}
+
+/**
+ * uvd_v1_0_get_wptr - get write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Returns the current hardware write pointer
+ */
+uint32_t uvd_v1_0_get_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ return RREG32(UVD_RBC_RB_WPTR);
+}
+
+/**
+ * uvd_v1_0_set_wptr - set write pointer
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Commits the write pointer to the hardware
+ */
+void uvd_v1_0_set_wptr(struct radeon_device *rdev,
+ struct radeon_ring *ring)
+{
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+}
+
+/**
+ * uvd_v1_0_init - start and test UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Initialize the hardware, boot up the VCPU and do some testing
+ */
+int uvd_v1_0_init(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t tmp;
+ int r;
+
+ /* raise clocks while booting up the VCPU */
+ radeon_set_uvd_clocks(rdev, 53300, 40000);
+
+ r = uvd_v1_0_start(rdev);
+ if (r)
+ goto done;
+
+ ring->ready = true;
+ r = radeon_ring_test(rdev, R600_RING_TYPE_UVD_INDEX, ring);
+ if (r) {
+ ring->ready = false;
+ goto done;
+ }
+
+ r = radeon_ring_lock(rdev, ring, 10);
+ if (r) {
+ DRM_ERROR("radeon: ring failed to lock UVD ring (%d).\n", r);
+ goto done;
+ }
+
+ tmp = PACKET0(UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ tmp = PACKET0(UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
+ radeon_ring_write(ring, tmp);
+ radeon_ring_write(ring, 0xFFFFF);
+
+ /* Clear timeout status bits */
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_TIMEOUT_STATUS, 0));
+ radeon_ring_write(ring, 0x8);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));
+ radeon_ring_write(ring, 3);
+
+ radeon_ring_unlock_commit(rdev, ring);
+
+done:
+ /* lower clocks again */
+ radeon_set_uvd_clocks(rdev, 0, 0);
+
+ if (!r)
+ DRM_INFO("UVD initialized successfully.\n");
+
+ return r;
+}
+
+/**
+ * uvd_v1_0_fini - stop the hardware block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Stop the UVD block, mark ring as not ready any more
+ */
+void uvd_v1_0_fini(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+
+ uvd_v1_0_stop(rdev);
+ ring->ready = false;
+}
+
+/**
+ * uvd_v1_0_start - start UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Setup and start the UVD block
+ */
+int uvd_v1_0_start(struct radeon_device *rdev)
+{
+ struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
+ uint32_t rb_bufsz;
+ int i, j, r;
+
+ /* disable byte swapping */
+ u32 lmi_swap_cntl = 0;
+ u32 mp_swap_cntl = 0;
+
+ /* disable clock gating */
+ WREG32(UVD_CGC_GATE, 0);
+
+ /* disable interupt */
+ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1));
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put LMI, VCPU, RBC etc... into reset */
+ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET |
+ LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET |
+ CXW_SOFT_RESET | TAP_SOFT_RESET | LMI_UMC_SOFT_RESET);
+ mdelay(5);
+
+ /* take UVD block out of reset */
+ WREG32_P(SRBM_SOFT_RESET, 0, ~SOFT_RESET_UVD);
+ mdelay(5);
+
+ /* initialize UVD memory controller */
+ WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
+ (1 << 21) | (1 << 9) | (1 << 20));
+
+#ifdef __BIG_ENDIAN
+ /* swap (8 in 32) RB and IB */
+ lmi_swap_cntl = 0xa;
+ mp_swap_cntl = 0;
+#endif
+ WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+ WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
+
+ WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXA1, 0x0);
+ WREG32(UVD_MPC_SET_MUXB0, 0x40c2040);
+ WREG32(UVD_MPC_SET_MUXB1, 0x0);
+ WREG32(UVD_MPC_SET_ALU, 0);
+ WREG32(UVD_MPC_SET_MUX, 0x88);
+
+ /* take all subblocks out of reset, except VCPU */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* enable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 1 << 9);
+
+ /* enable UMC */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ /* boot up the VCPU */
+ WREG32(UVD_SOFT_RESET, 0);
+ mdelay(10);
+
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+
+ for (i = 0; i < 10; ++i) {
+ uint32_t status;
+ for (j = 0; j < 100; ++j) {
+ status = RREG32(UVD_STATUS);
+ if (status & 2)
+ break;
+ mdelay(10);
+ }
+ r = 0;
+ if (status & 2)
+ break;
+
+ DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
+ WREG32_P(UVD_SOFT_RESET, VCPU_SOFT_RESET, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ WREG32_P(UVD_SOFT_RESET, 0, ~VCPU_SOFT_RESET);
+ mdelay(10);
+ r = -1;
+ }
+
+ if (r) {
+ DRM_ERROR("UVD not responding, giving up!!!\n");
+ return r;
+ }
+
+ /* enable interupt */
+ WREG32_P(UVD_MASTINT_EN, 3<<1, ~(3 << 1));
+
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Set the write pointer delay */
+ WREG32(UVD_RBC_RB_WPTR_CNTL, 0);
+
+ /* programm the 4GB memory segment for rptr and ring buffer */
+ WREG32(UVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
+ (0x7 << 16) | (0x1 << 31));
+
+ /* Initialize the ring buffer's read and write pointers */
+ WREG32(UVD_RBC_RB_RPTR, 0x0);
+
+ ring->wptr = ring->rptr = RREG32(UVD_RBC_RB_RPTR);
+ WREG32(UVD_RBC_RB_WPTR, ring->wptr);
+
+ /* set the ring address */
+ WREG32(UVD_RBC_RB_BASE, ring->gpu_addr);
+
+ /* Set ring buffer size */
+ rb_bufsz = order_base_2(ring->ring_size);
+ rb_bufsz = (0x1 << 8) | rb_bufsz;
+ WREG32_P(UVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
+
+ return 0;
+}
+
+/**
+ * uvd_v1_0_stop - stop UVD block
+ *
+ * @rdev: radeon_device pointer
+ *
+ * stop the UVD block
+ */
+void uvd_v1_0_stop(struct radeon_device *rdev)
+{
+ /* force RBC into idle state */
+ WREG32(UVD_RBC_RB_CNTL, 0x11010101);
+
+ /* Stall UMC and register bus before resetting VCPU */
+ WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3));
+ mdelay(1);
+
+ /* put VCPU into reset */
+ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET);
+ mdelay(5);
+
+ /* disable VCPU clock */
+ WREG32(UVD_VCPU_CNTL, 0x0);
+
+ /* Unstall UMC and register bus */
+ WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8));
+ WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3));
+}
+
+/**
+ * uvd_v1_0_ring_test - register write test
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully write to the context register
+ */
+int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ uint32_t tmp = 0;
+ unsigned i;
+ int r;
+
+ WREG32(UVD_CONTEXT_ID, 0xCAFEDEAD);
+ r = radeon_ring_lock(rdev, ring, 3);
+ if (r) {
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n",
+ ring->idx, r);
+ return r;
+ }
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ tmp = RREG32(UVD_CONTEXT_ID);
+ if (tmp == 0xDEADBEEF)
+ break;
+ DRM_UDELAY(1);
+ }
+
+ if (i < rdev->usec_timeout) {
+ DRM_INFO("ring test on %d succeeded in %d usecs\n",
+ ring->idx, i);
+ } else {
+ DRM_ERROR("radeon: ring %d test failed (0x%08X)\n",
+ ring->idx, tmp);
+ r = -EINVAL;
+ }
+ return r;
+}
+
+/**
+ * uvd_v1_0_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+}
+
+/**
+ * uvd_v1_0_ib_execute - execute indirect buffer
+ *
+ * @rdev: radeon_device pointer
+ * @ib: indirect buffer to execute
+ *
+ * Write ring commands to execute the indirect buffer
+ */
+void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->ring];
+
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_BASE, 0));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, PACKET0(UVD_RBC_IB_SIZE, 0));
+ radeon_ring_write(ring, ib->length_dw);
+}
+
+/**
+ * uvd_v1_0_ib_test - test ib execution
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ *
+ * Test if we can successfully execute an IB
+ */
+int uvd_v1_0_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ struct radeon_fence *fence = NULL;
+ int r;
+
+ r = radeon_set_uvd_clocks(rdev, 53300, 40000);
+ if (r) {
+ DRM_ERROR("radeon: failed to raise UVD clocks (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_uvd_get_create_msg(rdev, ring->idx, 1, NULL);
+ if (r) {
+ DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_uvd_get_destroy_msg(rdev, ring->idx, 1, &fence);
+ if (r) {
+ DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
+ goto error;
+ }
+
+ r = radeon_fence_wait(fence, false);
+ if (r) {
+ DRM_ERROR("radeon: fence wait failed (%d).\n", r);
+ goto error;
+ }
+ DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
+error:
+ radeon_fence_unref(&fence);
+ radeon_set_uvd_clocks(rdev, 0, 0);
+ return r;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
new file mode 100644
index 00000000000..b19ef495108
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -0,0 +1,165 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "rv770d.h"
+
+/**
+ * uvd_v2_2_fence_emit - emit an fence & trap command
+ *
+ * @rdev: radeon_device pointer
+ * @fence: fence to emit
+ *
+ * Write a fence and a trap command to the ring.
+ */
+void uvd_v2_2_fence_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, upper_32_bits(addr) & 0xff);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 0);
+
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA1, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
+ radeon_ring_write(ring, 2);
+ return;
+}
+
+/**
+ * uvd_v2_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v2_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t chip_id, size;
+ int r;
+
+ r = radeon_uvd_resume(rdev);
+ if (r)
+ return r;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ /* tell firmware which hardware it is running on */
+ switch (rdev->family) {
+ default:
+ return -EINVAL;
+ case CHIP_RV710:
+ chip_id = 0x01000005;
+ break;
+ case CHIP_RV730:
+ chip_id = 0x01000006;
+ break;
+ case CHIP_RV740:
+ chip_id = 0x01000007;
+ break;
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ chip_id = 0x01000008;
+ break;
+ case CHIP_JUNIPER:
+ chip_id = 0x01000009;
+ break;
+ case CHIP_REDWOOD:
+ chip_id = 0x0100000a;
+ break;
+ case CHIP_CEDAR:
+ chip_id = 0x0100000b;
+ break;
+ case CHIP_SUMO:
+ case CHIP_SUMO2:
+ chip_id = 0x0100000c;
+ break;
+ case CHIP_PALM:
+ chip_id = 0x0100000e;
+ break;
+ case CHIP_CAYMAN:
+ chip_id = 0x0100000f;
+ break;
+ case CHIP_BARTS:
+ chip_id = 0x01000010;
+ break;
+ case CHIP_TURKS:
+ chip_id = 0x01000011;
+ break;
+ case CHIP_CAICOS:
+ chip_id = 0x01000012;
+ break;
+ case CHIP_TAHITI:
+ chip_id = 0x01000014;
+ break;
+ case CHIP_VERDE:
+ chip_id = 0x01000015;
+ break;
+ case CHIP_PITCAIRN:
+ chip_id = 0x01000016;
+ break;
+ case CHIP_ARUBA:
+ chip_id = 0x01000017;
+ break;
+ }
+ WREG32(UVD_VCPU_CHIP_ID, chip_id);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v3_1.c b/drivers/gpu/drm/radeon/uvd_v3_1.c
new file mode 100644
index 00000000000..5b6fa1f62d4
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v3_1.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "nid.h"
+
+/**
+ * uvd_v3_1_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+void uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0));
+}
diff --git a/drivers/gpu/drm/radeon/uvd_v4_2.c b/drivers/gpu/drm/radeon/uvd_v4_2.c
new file mode 100644
index 00000000000..d04d5073eef
--- /dev/null
+++ b/drivers/gpu/drm/radeon/uvd_v4_2.c
@@ -0,0 +1,68 @@
+/*
+ * Copyright 2013 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/firmware.h>
+#include <drm/drmP.h>
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "cikd.h"
+
+/**
+ * uvd_v4_2_resume - memory controller programming
+ *
+ * @rdev: radeon_device pointer
+ *
+ * Let the UVD memory controller know it's offsets
+ */
+int uvd_v4_2_resume(struct radeon_device *rdev)
+{
+ uint64_t addr;
+ uint32_t size;
+
+ /* programm the VCPU memory controller bits 0-27 */
+ addr = rdev->uvd.gpu_addr >> 3;
+ size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET0, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE0, size);
+
+ addr += size;
+ size = RADEON_UVD_STACK_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET1, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE1, size);
+
+ addr += size;
+ size = RADEON_UVD_HEAP_SIZE >> 3;
+ WREG32(UVD_VCPU_CACHE_OFFSET2, addr);
+ WREG32(UVD_VCPU_CACHE_SIZE2, size);
+
+ /* bits 28-31 */
+ addr = (rdev->uvd.gpu_addr >> 28) & 0xF;
+ WREG32(UVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
+
+ /* bits 32-39 */
+ addr = (rdev->uvd.gpu_addr >> 32) & 0xFF;
+ WREG32(UVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 72887df8dd7..c590cd9dca0 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -7,3 +7,10 @@ config DRM_RCAR_DU
help
Choose this option if you have an R-Car chipset.
If M is selected the module will be called rcar-du-drm.
+
+config DRM_RCAR_LVDS
+ bool "R-Car DU LVDS Encoder Support"
+ depends on DRM_RCAR_DU
+ help
+ Enable support the R-Car Display Unit embedded LVDS encoders
+ (currently only on R8A7790).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 7333c009401..12b8d447783 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -1,8 +1,12 @@
rcar-du-drm-y := rcar_du_crtc.o \
rcar_du_drv.o \
+ rcar_du_encoder.o \
+ rcar_du_group.o \
rcar_du_kms.o \
- rcar_du_lvds.o \
+ rcar_du_lvdscon.o \
rcar_du_plane.o \
- rcar_du_vga.o
+ rcar_du_vgacon.o
-obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
+rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
+
+obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 24183fb9359..a9d24e4bf79 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -23,30 +23,26 @@
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
#include "rcar_du_plane.h"
#include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
-
-#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
static u32 rcar_du_crtc_read(struct rcar_du_crtc *rcrtc, u32 reg)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
return rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
}
static void rcar_du_crtc_write(struct rcar_du_crtc *rcrtc, u32 reg, u32 data)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, data);
}
static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) & ~clr);
@@ -54,7 +50,7 @@ static void rcar_du_crtc_clr(struct rcar_du_crtc *rcrtc, u32 reg, u32 clr)
static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
rcar_du_write(rcdu, rcrtc->mmio_offset + reg,
rcar_du_read(rcdu, rcrtc->mmio_offset + reg) | set);
@@ -63,29 +59,48 @@ static void rcar_du_crtc_set(struct rcar_du_crtc *rcrtc, u32 reg, u32 set)
static void rcar_du_crtc_clr_set(struct rcar_du_crtc *rcrtc, u32 reg,
u32 clr, u32 set)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
u32 value = rcar_du_read(rcdu, rcrtc->mmio_offset + reg);
rcar_du_write(rcdu, rcrtc->mmio_offset + reg, (value & ~clr) | set);
}
+static int rcar_du_crtc_get(struct rcar_du_crtc *rcrtc)
+{
+ int ret;
+
+ ret = clk_prepare_enable(rcrtc->clock);
+ if (ret < 0)
+ return ret;
+
+ ret = rcar_du_group_get(rcrtc->group);
+ if (ret < 0)
+ clk_disable_unprepare(rcrtc->clock);
+
+ return ret;
+}
+
+static void rcar_du_crtc_put(struct rcar_du_crtc *rcrtc)
+{
+ rcar_du_group_put(rcrtc->group);
+ clk_disable_unprepare(rcrtc->clock);
+}
+
static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
{
- struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
- const struct drm_display_mode *mode = &crtc->mode;
+ const struct drm_display_mode *mode = &rcrtc->crtc.mode;
unsigned long clk;
u32 value;
u32 div;
/* Dot clock */
- clk = clk_get_rate(rcdu->clock);
+ clk = clk_get_rate(rcrtc->clock);
div = DIV_ROUND_CLOSEST(clk, mode->clock * 1000);
div = clamp(div, 1U, 64U) - 1;
- rcar_du_write(rcdu, rcrtc->index ? ESCR2 : ESCR,
- ESCR_DCLKSEL_CLKS | div);
- rcar_du_write(rcdu, rcrtc->index ? OTAR2 : OTAR, 0);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? ESCR2 : ESCR,
+ ESCR_DCLKSEL_CLKS | div);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
@@ -112,68 +127,25 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
-static void rcar_du_crtc_set_routing(struct rcar_du_crtc *rcrtc)
-{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
- u32 dorcr = rcar_du_read(rcdu, DORCR);
-
- dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
-
- /* Set the DU1 pins sources. Select CRTC 0 if explicitly requested and
- * CRTC 1 in all other cases to avoid cloning CRTC 0 to DU0 and DU1 by
- * default.
- */
- if (rcrtc->outputs & (1 << 1) && rcrtc->index == 0)
- dorcr |= DORCR_PG2D_DS1;
- else
- dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
-
- rcar_du_write(rcdu, DORCR, dorcr);
-}
-
-static void __rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
- rcar_du_write(rcdu, DSYSR,
- (rcar_du_read(rcdu, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
- (start ? DSYSR_DEN : DSYSR_DRES));
-}
-
-static void rcar_du_start_stop(struct rcar_du_device *rcdu, bool start)
-{
- /* Many of the configuration bits are only updated when the display
- * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
- * of those bits could be pre-configured, but others (especially the
- * bits related to plane assignment to display timing controllers) need
- * to be modified at runtime.
- *
- * Restart the display controller if a start is requested. Sorry for the
- * flicker. It should be possible to move most of the "DRES-update" bits
- * setup to driver initialization time and minimize the number of cases
- * when the display controller will have to be restarted.
- */
- if (start) {
- if (rcdu->used_crtcs++ != 0)
- __rcar_du_start_stop(rcdu, false);
- __rcar_du_start_stop(rcdu, true);
- } else {
- if (--rcdu->used_crtcs == 0)
- __rcar_du_start_stop(rcdu, false);
- }
-}
-
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output)
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+ enum rcar_du_output output)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
/* Store the route from the CRTC output to the DU output. The DU will be
* configured when starting the CRTC.
*/
- rcrtc->outputs |= 1 << output;
+ rcrtc->outputs |= BIT(output);
+
+ /* Store RGB routing to DPAD0 for R8A7790. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_DEFR8) &&
+ output == RCAR_DU_OUTPUT_DPAD0)
+ rcdu->dpad0_source = rcrtc->index;
}
void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct rcar_du_plane *planes[RCAR_DU_NUM_HW_PLANES];
unsigned int num_planes = 0;
@@ -182,8 +154,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
u32 dptsr = 0;
u32 dspr = 0;
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
unsigned int j;
if (plane->crtc != &rcrtc->crtc || !plane->enabled)
@@ -220,8 +192,8 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
/* Select display timing and dot clock generator 2 for planes associated
* with superposition controller 2.
*/
- if (rcrtc->index) {
- u32 value = rcar_du_read(rcdu, DPTSR);
+ if (rcrtc->index % 2) {
+ u32 value = rcar_du_group_read(rcrtc->group, DPTSR);
/* The DPTSR register is updated when the display controller is
* stopped. We thus need to restart the DU. Once again, sorry
@@ -231,21 +203,19 @@ void rcar_du_crtc_update_planes(struct drm_crtc *crtc)
* occur only if we need to break the pre-association.
*/
if (value != dptsr) {
- rcar_du_write(rcdu, DPTSR, dptsr);
- if (rcdu->used_crtcs) {
- __rcar_du_start_stop(rcdu, false);
- __rcar_du_start_stop(rcdu, true);
- }
+ rcar_du_group_write(rcrtc->group, DPTSR, dptsr);
+ if (rcrtc->group->used_crtcs)
+ rcar_du_group_restart(rcrtc->group);
}
}
- rcar_du_write(rcdu, rcrtc->index ? DS2PR : DS1PR, dspr);
+ rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? DS2PR : DS1PR,
+ dspr);
}
static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
unsigned int i;
if (rcrtc->started)
@@ -260,16 +230,16 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
/* Configure display timings and output routing */
rcar_du_crtc_set_display_timing(rcrtc);
- rcar_du_crtc_set_routing(rcrtc);
+ rcar_du_group_set_routing(rcrtc->group);
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rcrtc->group->planes.lock);
rcrtc->plane->enabled = true;
rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rcrtc->group->planes.lock);
/* Setup planes. */
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(rcrtc->group->planes.planes); ++i) {
+ struct rcar_du_plane *plane = &rcrtc->group->planes.planes[i];
if (plane->crtc != crtc || !plane->enabled)
continue;
@@ -283,7 +253,7 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_MASTER);
- rcar_du_start_stop(rcdu, true);
+ rcar_du_group_start_stop(rcrtc->group, true);
rcrtc->started = true;
}
@@ -291,42 +261,37 @@ static void rcar_du_crtc_start(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
{
struct drm_crtc *crtc = &rcrtc->crtc;
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
if (!rcrtc->started)
return;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rcrtc->group->planes.lock);
rcrtc->plane->enabled = false;
rcar_du_crtc_update_planes(crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rcrtc->group->planes.lock);
/* Select switch sync mode. This stops display operation and configures
* the HSYNC and VSYNC signals as inputs.
*/
rcar_du_crtc_clr_set(rcrtc, DSYSR, DSYSR_TVM_MASK, DSYSR_TVM_SWITCH);
- rcar_du_start_stop(rcdu, false);
+ rcar_du_group_start_stop(rcrtc->group, false);
rcrtc->started = false;
}
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
rcar_du_crtc_stop(rcrtc);
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
}
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc)
{
- struct rcar_du_device *rcdu = rcrtc->crtc.dev->dev_private;
-
if (rcrtc->dpms != DRM_MODE_DPMS_ON)
return;
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
}
@@ -340,18 +305,17 @@ static void rcar_du_crtc_update_base(struct rcar_du_crtc *rcrtc)
static void rcar_du_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
if (rcrtc->dpms == mode)
return;
if (mode == DRM_MODE_DPMS_ON) {
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
rcar_du_crtc_start(rcrtc);
} else {
rcar_du_crtc_stop(rcrtc);
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
}
rcrtc->dpms = mode;
@@ -367,13 +331,12 @@ static bool rcar_du_crtc_mode_fixup(struct drm_crtc *crtc,
static void rcar_du_crtc_mode_prepare(struct drm_crtc *crtc)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
/* We need to access the hardware during mode set, acquire a reference
- * to the DU.
+ * to the CRTC.
*/
- rcar_du_get(rcdu);
+ rcar_du_crtc_get(rcrtc);
/* Stop the CRTC and release the plane. Force the DPMS mode to off as a
* result.
@@ -390,8 +353,8 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
int x, int y,
struct drm_framebuffer *old_fb)
{
- struct rcar_du_device *rcdu = crtc->dev->dev_private;
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
const struct rcar_du_format_info *format;
int ret;
@@ -423,10 +386,10 @@ static int rcar_du_crtc_mode_set(struct drm_crtc *crtc,
error:
/* There's no rollback/abort operation to clean up in case of error. We
- * thus need to release the reference to the DU acquired in prepare()
+ * thus need to release the reference to the CRTC acquired in prepare()
* here.
*/
- rcar_du_put(rcdu);
+ rcar_du_crtc_put(rcrtc);
return ret;
}
@@ -514,9 +477,28 @@ static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
drm_vblank_put(dev, rcrtc->index);
}
+static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
+{
+ struct rcar_du_crtc *rcrtc = arg;
+ irqreturn_t ret = IRQ_NONE;
+ u32 status;
+
+ status = rcar_du_crtc_read(rcrtc, DSSR);
+ rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
+
+ if (status & DSSR_VBK) {
+ drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
+ rcar_du_crtc_finish_page_flip(rcrtc);
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
static int rcar_du_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
struct drm_device *dev = rcrtc->crtc.dev;
@@ -549,16 +531,41 @@ static const struct drm_crtc_funcs crtc_funcs = {
.page_flip = rcar_du_crtc_page_flip,
};
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
{
+ static const unsigned int mmio_offsets[] = {
+ DU0_REG_OFFSET, DU1_REG_OFFSET, DU2_REG_OFFSET
+ };
+
+ struct rcar_du_device *rcdu = rgrp->dev;
+ struct platform_device *pdev = to_platform_device(rcdu->dev);
struct rcar_du_crtc *rcrtc = &rcdu->crtcs[index];
struct drm_crtc *crtc = &rcrtc->crtc;
+ unsigned int irqflags;
+ char clk_name[5];
+ char *name;
+ int irq;
int ret;
- rcrtc->mmio_offset = index ? DISP2_REG_OFFSET : 0;
+ /* Get the CRTC clock. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ sprintf(clk_name, "du.%u", index);
+ name = clk_name;
+ } else {
+ name = NULL;
+ }
+
+ rcrtc->clock = devm_clk_get(rcdu->dev, name);
+ if (IS_ERR(rcrtc->clock)) {
+ dev_err(rcdu->dev, "no clock for CRTC %u\n", index);
+ return PTR_ERR(rcrtc->clock);
+ }
+
+ rcrtc->group = rgrp;
+ rcrtc->mmio_offset = mmio_offsets[index];
rcrtc->index = index;
rcrtc->dpms = DRM_MODE_DPMS_OFF;
- rcrtc->plane = &rcdu->planes.planes[index];
+ rcrtc->plane = &rgrp->planes.planes[index % 2];
rcrtc->plane->crtc = crtc;
@@ -568,6 +575,28 @@ int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index)
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
+ /* Register the interrupt handler. */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ irq = platform_get_irq(pdev, index);
+ irqflags = 0;
+ } else {
+ irq = platform_get_irq(pdev, 0);
+ irqflags = IRQF_SHARED;
+ }
+
+ if (irq < 0) {
+ dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
+ return ret;
+ }
+
+ ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
+ dev_name(rcdu->dev), rcrtc);
+ if (ret < 0) {
+ dev_err(rcdu->dev,
+ "failed to register IRQ for CRTC %u\n", index);
+ return ret;
+ }
+
return 0;
}
@@ -580,16 +609,3 @@ void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable)
rcar_du_crtc_clr(rcrtc, DIER, DIER_VBE);
}
}
-
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc)
-{
- u32 status;
-
- status = rcar_du_crtc_read(rcrtc, DSSR);
- rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
-
- if (status & DSSR_VBK) {
- drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index);
- rcar_du_crtc_finish_page_flip(rcrtc);
- }
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 2a0365bcbd1..43e7575c700 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,16 +15,18 @@
#define __RCAR_DU_CRTC_H__
#include <linux/mutex.h>
+#include <linux/platform_data/rcar-du.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
-struct rcar_du_device;
+struct rcar_du_group;
struct rcar_du_plane;
struct rcar_du_crtc {
struct drm_crtc crtc;
+ struct clk *clock;
unsigned int mmio_offset;
unsigned int index;
bool started;
@@ -33,18 +35,21 @@ struct rcar_du_crtc {
unsigned int outputs;
int dpms;
+ struct rcar_du_group *group;
struct rcar_du_plane *plane;
};
-int rcar_du_crtc_create(struct rcar_du_device *rcdu, unsigned int index);
+#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
+
+int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_irq(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
struct drm_file *file);
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
-void rcar_du_crtc_route_output(struct drm_crtc *crtc, unsigned int output);
+void rcar_du_crtc_route_output(struct drm_crtc *crtc,
+ enum rcar_du_output output);
void rcar_du_crtc_update_planes(struct drm_crtc *crtc);
#endif /* __RCAR_DU_CRTC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index dc0fe09b2ba..0023f9719cf 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -21,6 +21,7 @@
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include "rcar_du_crtc.h"
@@ -29,74 +30,21 @@
#include "rcar_du_regs.h"
/* -----------------------------------------------------------------------------
- * Core device operations
- */
-
-/*
- * rcar_du_get - Acquire a reference to the DU
- *
- * Acquiring a reference enables the device clock and setup core registers. A
- * reference must be held before accessing any hardware registers.
- *
- * This function must be called with the DRM mode_config lock held.
- *
- * Return 0 in case of success or a negative error code otherwise.
- */
-int rcar_du_get(struct rcar_du_device *rcdu)
-{
- int ret;
-
- if (rcdu->use_count)
- goto done;
-
- /* Enable clocks before accessing the hardware. */
- ret = clk_prepare_enable(rcdu->clock);
- if (ret < 0)
- return ret;
-
- /* Enable extended features */
- rcar_du_write(rcdu, DEFR, DEFR_CODE | DEFR_DEFE);
- rcar_du_write(rcdu, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
- rcar_du_write(rcdu, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
- rcar_du_write(rcdu, DEFR4, DEFR4_CODE);
- rcar_du_write(rcdu, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
-
- /* Use DS1PR and DS2PR to configure planes priorities and connects the
- * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
- */
- rcar_du_write(rcdu, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
-
-done:
- rcdu->use_count++;
- return 0;
-}
-
-/*
- * rcar_du_put - Release a reference to the DU
- *
- * Releasing the last reference disables the device clock.
- *
- * This function must be called with the DRM mode_config lock held.
- */
-void rcar_du_put(struct rcar_du_device *rcdu)
-{
- if (--rcdu->use_count)
- return;
-
- clk_disable_unprepare(rcdu->clock);
-}
-
-/* -----------------------------------------------------------------------------
* DRM operations
*/
static int rcar_du_unload(struct drm_device *dev)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
+
+ if (rcdu->fbdev)
+ drm_fbdev_cma_fini(rcdu->fbdev);
+
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
- drm_irq_uninstall(dev);
+ dev->irq_enabled = 0;
dev->dev_private = NULL;
return 0;
@@ -107,7 +55,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
struct platform_device *pdev = dev->platformdev;
struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
struct rcar_du_device *rcdu;
- struct resource *ioarea;
struct resource *mem;
int ret;
@@ -124,35 +71,15 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
rcdu->dev = &pdev->dev;
rcdu->pdata = pdata;
+ rcdu->info = (struct rcar_du_device_info *)pdev->id_entry->driver_data;
rcdu->ddev = dev;
dev->dev_private = rcdu;
- /* I/O resources and clocks */
+ /* I/O resources */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem == NULL) {
- dev_err(&pdev->dev, "failed to get memory resource\n");
- return -EINVAL;
- }
-
- ioarea = devm_request_mem_region(&pdev->dev, mem->start,
- resource_size(mem), pdev->name);
- if (ioarea == NULL) {
- dev_err(&pdev->dev, "failed to request memory region\n");
- return -EBUSY;
- }
-
- rcdu->mmio = devm_ioremap_nocache(&pdev->dev, ioarea->start,
- resource_size(ioarea));
- if (rcdu->mmio == NULL) {
- dev_err(&pdev->dev, "failed to remap memory resource\n");
- return -ENOMEM;
- }
-
- rcdu->clock = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(rcdu->clock)) {
- dev_err(&pdev->dev, "failed to get clock\n");
- return -ENOENT;
- }
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
@@ -161,18 +88,14 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
goto done;
}
- /* IRQ and vblank handling */
+ /* vblank handling */
ret = drm_vblank_init(dev, (1 << rcdu->num_crtcs) - 1);
if (ret < 0) {
dev_err(&pdev->dev, "failed to initialize vblank\n");
goto done;
}
- ret = drm_irq_install(dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to install IRQ handler\n");
- goto done;
- }
+ dev->irq_enabled = 1;
platform_set_drvdata(pdev, rcdu);
@@ -188,20 +111,15 @@ static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
struct rcar_du_device *rcdu = dev->dev_private;
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
+ for (i = 0; i < rcdu->num_crtcs; ++i)
rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
}
-static irqreturn_t rcar_du_irq(int irq, void *arg)
+static void rcar_du_lastclose(struct drm_device *dev)
{
- struct drm_device *dev = arg;
struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i)
- rcar_du_crtc_irq(&rcdu->crtcs[i]);
- return IRQ_HANDLED;
+ drm_fbdev_cma_restore_mode(rcdu->fbdev);
}
static int rcar_du_enable_vblank(struct drm_device *dev, int crtc)
@@ -230,18 +148,16 @@ static const struct file_operations rcar_du_fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
static struct drm_driver rcar_du_driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM | DRIVER_MODESET
- | DRIVER_PRIME,
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME,
.load = rcar_du_load,
.unload = rcar_du_unload,
.preclose = rcar_du_preclose,
- .irq_handler = rcar_du_irq,
+ .lastclose = rcar_du_lastclose,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
@@ -258,7 +174,7 @@ static struct drm_driver rcar_du_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = rcar_du_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -313,6 +229,57 @@ static int rcar_du_remove(struct platform_device *pdev)
return 0;
}
+static const struct rcar_du_device_info rcar_du_r8a7779_info = {
+ .features = 0,
+ .num_crtcs = 2,
+ .routes = {
+ /* R8A7779 has two RGB outputs and one (currently unsupported)
+ * TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_DPAD1] = {
+ .possible_crtcs = BIT(1) | BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ },
+ .num_lvds = 0,
+};
+
+static const struct rcar_du_device_info rcar_du_r8a7790_info = {
+ .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK | RCAR_DU_FEATURE_ALIGN_128B
+ | RCAR_DU_FEATURE_DEFR8,
+ .num_crtcs = 3,
+ .routes = {
+ /* R8A7790 has one RGB output, two LVDS outputs and one
+ * (currently unsupported) TCON output.
+ */
+ [RCAR_DU_OUTPUT_DPAD0] = {
+ .possible_crtcs = BIT(2) | BIT(1) | BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_NONE,
+ },
+ [RCAR_DU_OUTPUT_LVDS0] = {
+ .possible_crtcs = BIT(0),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ [RCAR_DU_OUTPUT_LVDS1] = {
+ .possible_crtcs = BIT(2) | BIT(1),
+ .encoder_type = DRM_MODE_ENCODER_LVDS,
+ },
+ },
+ .num_lvds = 2,
+};
+
+static const struct platform_device_id rcar_du_id_table[] = {
+ { "rcar-du-r8a7779", (kernel_ulong_t)&rcar_du_r8a7779_info },
+ { "rcar-du-r8a7790", (kernel_ulong_t)&rcar_du_r8a7790_info },
+ { }
+};
+
+MODULE_DEVICE_TABLE(platform, rcar_du_id_table);
+
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove = rcar_du_remove,
@@ -321,6 +288,7 @@ static struct platform_driver rcar_du_platform_driver = {
.name = "rcar-du",
.pm = &rcar_du_pm_ops,
},
+ .id_table = rcar_du_id_table,
};
module_platform_driver(rcar_du_platform_driver);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 193cc59d495..65d2d636b00 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,43 +15,74 @@
#define __RCAR_DU_DRV_H__
#include <linux/kernel.h>
-#include <linux/mutex.h>
#include <linux/platform_data/rcar-du.h>
#include "rcar_du_crtc.h"
-#include "rcar_du_plane.h"
+#include "rcar_du_group.h"
struct clk;
struct device;
struct drm_device;
+struct drm_fbdev_cma;
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK (1 << 0) /* Per-CRTC IRQ and clock */
+#define RCAR_DU_FEATURE_ALIGN_128B (1 << 1) /* Align pitches to 128 bytes */
+#define RCAR_DU_FEATURE_DEFR8 (1 << 2) /* Has DEFR8 register */
+
+/*
+ * struct rcar_du_output_routing - Output routing specification
+ * @possible_crtcs: bitmask of possible CRTCs for the output
+ * @encoder_type: DRM type of the internal encoder associated with the output
+ *
+ * The DU has 5 possible outputs (DPAD0/1, LVDS0/1, TCON). Output routing data
+ * specify the valid SoC outputs, which CRTCs can drive the output, and the type
+ * of in-SoC encoder for the output.
+ */
+struct rcar_du_output_routing {
+ unsigned int possible_crtcs;
+ unsigned int encoder_type;
+};
+
+/*
+ * struct rcar_du_device_info - DU model-specific information
+ * @features: device features (RCAR_DU_FEATURE_*)
+ * @num_crtcs: total number of CRTCs
+ * @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
+ * @num_lvds: number of internal LVDS encoders
+ */
+struct rcar_du_device_info {
+ unsigned int features;
+ unsigned int num_crtcs;
+ struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
+ unsigned int num_lvds;
+};
struct rcar_du_device {
struct device *dev;
const struct rcar_du_platform_data *pdata;
+ const struct rcar_du_device_info *info;
void __iomem *mmio;
- struct clk *clock;
- unsigned int use_count;
struct drm_device *ddev;
+ struct drm_fbdev_cma *fbdev;
- struct rcar_du_crtc crtcs[2];
- unsigned int used_crtcs;
+ struct rcar_du_crtc crtcs[3];
unsigned int num_crtcs;
- struct {
- struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
- unsigned int free;
- struct mutex lock;
+ struct rcar_du_group groups[2];
- struct drm_property *alpha;
- struct drm_property *colorkey;
- struct drm_property *zpos;
- } planes;
+ unsigned int dpad0_source;
+ struct rcar_du_lvdsenc *lvds[2];
};
-int rcar_du_get(struct rcar_du_device *rcdu);
-void rcar_du_put(struct rcar_du_device *rcdu);
+static inline bool rcar_du_has(struct rcar_du_device *rcdu,
+ unsigned int feature)
+{
+ return rcdu->info->features & feature;
+}
static inline u32 rcar_du_read(struct rcar_du_device *rcdu, u32 reg)
{
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
new file mode 100644
index 00000000000..3daa7a168dc
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -0,0 +1,202 @@
+/*
+ * rcar_du_encoder.c -- R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/export.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_kms.h"
+#include "rcar_du_lvdscon.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_du_vgacon.h"
+
+/* -----------------------------------------------------------------------------
+ * Common connector functions
+ */
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector)
+{
+ struct rcar_du_connector *rcon = to_rcar_connector(connector);
+
+ return &rcon->encoder->encoder;
+}
+
+/* -----------------------------------------------------------------------------
+ * Encoder
+ */
+
+static void rcar_du_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc, mode);
+}
+
+static bool rcar_du_encoder_mode_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ const struct drm_display_mode *panel_mode;
+ struct drm_device *dev = encoder->dev;
+ struct drm_connector *connector;
+ bool found = false;
+
+ /* DAC encoders have currently no restriction on the mode. */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_DAC)
+ return true;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_dbg(dev->dev, "mode_fixup: no connector found\n");
+ return false;
+ }
+
+ if (list_empty(&connector->modes)) {
+ dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
+ return false;
+ }
+
+ panel_mode = list_first_entry(&connector->modes,
+ struct drm_display_mode, head);
+
+ /* We're not allowed to modify the resolution. */
+ if (mode->hdisplay != panel_mode->hdisplay ||
+ mode->vdisplay != panel_mode->vdisplay)
+ return false;
+
+ /* The flat panel mode is fixed, just copy it to the adjusted mode. */
+ drm_mode_copy(adjusted_mode, panel_mode);
+
+ /* The internal LVDS encoder has a clock frequency operating range of
+ * 30MHz to 150MHz. Clamp the clock accordingly.
+ */
+ if (renc->lvds)
+ adjusted_mode->clock = clamp(adjusted_mode->clock,
+ 30000, 150000);
+
+ return true;
+}
+
+static void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+ DRM_MODE_DPMS_OFF);
+}
+
+static void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ if (renc->lvds)
+ rcar_du_lvdsenc_dpms(renc->lvds, encoder->crtc,
+ DRM_MODE_DPMS_ON);
+}
+
+static void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+
+ rcar_du_crtc_route_output(encoder->crtc, renc->output);
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .dpms = rcar_du_encoder_dpms,
+ .mode_fixup = rcar_du_encoder_mode_fixup,
+ .prepare = rcar_du_encoder_mode_prepare,
+ .commit = rcar_du_encoder_mode_commit,
+ .mode_set = rcar_du_encoder_mode_set,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ enum rcar_du_encoder_type type,
+ enum rcar_du_output output,
+ const struct rcar_du_encoder_data *data)
+{
+ struct rcar_du_encoder *renc;
+ unsigned int encoder_type;
+ int ret;
+
+ renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
+ if (renc == NULL)
+ return -ENOMEM;
+
+ renc->output = output;
+
+ switch (output) {
+ case RCAR_DU_OUTPUT_LVDS0:
+ renc->lvds = rcdu->lvds[0];
+ break;
+
+ case RCAR_DU_OUTPUT_LVDS1:
+ renc->lvds = rcdu->lvds[1];
+ break;
+
+ default:
+ break;
+ }
+
+ switch (type) {
+ case RCAR_DU_ENCODER_VGA:
+ encoder_type = DRM_MODE_ENCODER_DAC;
+ break;
+ case RCAR_DU_ENCODER_LVDS:
+ encoder_type = DRM_MODE_ENCODER_LVDS;
+ break;
+ case RCAR_DU_ENCODER_NONE:
+ default:
+ /* No external encoder, use the internal encoder type. */
+ encoder_type = rcdu->info->routes[output].encoder_type;
+ break;
+ }
+
+ ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
+ encoder_type);
+ if (ret < 0)
+ return ret;
+
+ drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
+
+ switch (encoder_type) {
+ case DRM_MODE_ENCODER_LVDS:
+ return rcar_du_lvds_connector_init(rcdu, renc,
+ &data->connector.lvds.panel);
+
+ case DRM_MODE_ENCODER_DAC:
+ return rcar_du_vga_connector_init(rcdu, renc);
+
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
new file mode 100644
index 00000000000..0e5a65e45d0
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -0,0 +1,49 @@
+/*
+ * rcar_du_encoder.h -- R-Car Display Unit Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_ENCODER_H__
+#define __RCAR_DU_ENCODER_H__
+
+#include <linux/platform_data/rcar-du.h>
+
+#include <drm/drm_crtc.h>
+
+struct rcar_du_device;
+struct rcar_du_lvdsenc;
+
+struct rcar_du_encoder {
+ struct drm_encoder encoder;
+ enum rcar_du_output output;
+ struct rcar_du_lvdsenc *lvds;
+};
+
+#define to_rcar_encoder(e) \
+ container_of(e, struct rcar_du_encoder, encoder)
+
+struct rcar_du_connector {
+ struct drm_connector connector;
+ struct rcar_du_encoder *encoder;
+};
+
+#define to_rcar_connector(c) \
+ container_of(c, struct rcar_du_connector, connector)
+
+struct drm_encoder *
+rcar_du_connector_best_encoder(struct drm_connector *connector);
+
+int rcar_du_encoder_init(struct rcar_du_device *rcdu,
+ enum rcar_du_encoder_type type,
+ enum rcar_du_output output,
+ const struct rcar_du_encoder_data *data);
+
+#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
new file mode 100644
index 00000000000..eb53cd97e8c
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -0,0 +1,187 @@
+/*
+ * rcar_du_group.c -- R-Car Display Unit Channels Pair
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+/*
+ * The R8A7779 DU is split in per-CRTC resources (scan-out engine, blending
+ * unit, timings generator, ...) and device-global resources (start/stop
+ * control, planes, ...) shared between the two CRTCs.
+ *
+ * The R8A7790 introduced a third CRTC with its own set of global resources.
+ * This would be modeled as two separate DU device instances if it wasn't for
+ * a handful or resources that are shared between the three CRTCs (mostly
+ * related to input and output routing). For this reason the R8A7790 DU must be
+ * modeled as a single device with three CRTCs, two sets of "semi-global"
+ * resources, and a few device-global resources.
+ *
+ * The rcar_du_group object is a driver specific object, without any real
+ * counterpart in the DU documentation, that models those semi-global resources.
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_group.h"
+#include "rcar_du_regs.h"
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg)
+{
+ return rcar_du_read(rgrp->dev, rgrp->mmio_offset + reg);
+}
+
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data)
+{
+ rcar_du_write(rgrp->dev, rgrp->mmio_offset + reg, data);
+}
+
+static void rcar_du_group_setup_defr8(struct rcar_du_group *rgrp)
+{
+ u32 defr8 = DEFR8_CODE | DEFR8_DEFE8;
+
+ if (!rcar_du_has(rgrp->dev, RCAR_DU_FEATURE_DEFR8))
+ return;
+
+ /* The DEFR8 register for the first group also controls RGB output
+ * routing to DPAD0
+ */
+ if (rgrp->index == 0)
+ defr8 |= DEFR8_DRGBS_DU(rgrp->dev->dpad0_source);
+
+ rcar_du_group_write(rgrp, DEFR8, defr8);
+}
+
+static void rcar_du_group_setup(struct rcar_du_group *rgrp)
+{
+ /* Enable extended features */
+ rcar_du_group_write(rgrp, DEFR, DEFR_CODE | DEFR_DEFE);
+ rcar_du_group_write(rgrp, DEFR2, DEFR2_CODE | DEFR2_DEFE2G);
+ rcar_du_group_write(rgrp, DEFR3, DEFR3_CODE | DEFR3_DEFE3);
+ rcar_du_group_write(rgrp, DEFR4, DEFR4_CODE);
+ rcar_du_group_write(rgrp, DEFR5, DEFR5_CODE | DEFR5_DEFE5);
+
+ rcar_du_group_setup_defr8(rgrp);
+
+ /* Use DS1PR and DS2PR to configure planes priorities and connects the
+ * superposition 0 to DU0 pins. DU1 pins will be configured dynamically.
+ */
+ rcar_du_group_write(rgrp, DORCR, DORCR_PG1D_DS1 | DORCR_DPRS);
+}
+
+/*
+ * rcar_du_group_get - Acquire a reference to the DU channels group
+ *
+ * Acquiring the first reference setups core registers. A reference must be held
+ * before accessing any hardware registers.
+ *
+ * This function must be called with the DRM mode_config lock held.
+ *
+ * Return 0 in case of success or a negative error code otherwise.
+ */
+int rcar_du_group_get(struct rcar_du_group *rgrp)
+{
+ if (rgrp->use_count)
+ goto done;
+
+ rcar_du_group_setup(rgrp);
+
+done:
+ rgrp->use_count++;
+ return 0;
+}
+
+/*
+ * rcar_du_group_put - Release a reference to the DU
+ *
+ * This function must be called with the DRM mode_config lock held.
+ */
+void rcar_du_group_put(struct rcar_du_group *rgrp)
+{
+ --rgrp->use_count;
+}
+
+static void __rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+ rcar_du_group_write(rgrp, DSYSR,
+ (rcar_du_group_read(rgrp, DSYSR) & ~(DSYSR_DRES | DSYSR_DEN)) |
+ (start ? DSYSR_DEN : DSYSR_DRES));
+}
+
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start)
+{
+ /* Many of the configuration bits are only updated when the display
+ * reset (DRES) bit in DSYSR is set to 1, disabling *both* CRTCs. Some
+ * of those bits could be pre-configured, but others (especially the
+ * bits related to plane assignment to display timing controllers) need
+ * to be modified at runtime.
+ *
+ * Restart the display controller if a start is requested. Sorry for the
+ * flicker. It should be possible to move most of the "DRES-update" bits
+ * setup to driver initialization time and minimize the number of cases
+ * when the display controller will have to be restarted.
+ */
+ if (start) {
+ if (rgrp->used_crtcs++ != 0)
+ __rcar_du_group_start_stop(rgrp, false);
+ __rcar_du_group_start_stop(rgrp, true);
+ } else {
+ if (--rgrp->used_crtcs == 0)
+ __rcar_du_group_start_stop(rgrp, false);
+ }
+}
+
+void rcar_du_group_restart(struct rcar_du_group *rgrp)
+{
+ __rcar_du_group_start_stop(rgrp, false);
+ __rcar_du_group_start_stop(rgrp, true);
+}
+
+static int rcar_du_set_dpad0_routing(struct rcar_du_device *rcdu)
+{
+ int ret;
+
+ /* RGB output routing to DPAD0 is configured in the DEFR8 register of
+ * the first group. As this function can be called with the DU0 and DU1
+ * CRTCs disabled, we need to enable the first group clock before
+ * accessing the register.
+ */
+ ret = clk_prepare_enable(rcdu->crtcs[0].clock);
+ if (ret < 0)
+ return ret;
+
+ rcar_du_group_setup_defr8(&rcdu->groups[0]);
+
+ clk_disable_unprepare(rcdu->crtcs[0].clock);
+
+ return 0;
+}
+
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp)
+{
+ struct rcar_du_crtc *crtc0 = &rgrp->dev->crtcs[rgrp->index * 2];
+ u32 dorcr = rcar_du_group_read(rgrp, DORCR);
+
+ dorcr &= ~(DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_MASK);
+
+ /* Set the DPAD1 pins sources. Select CRTC 0 if explicitly requested and
+ * CRTC 1 in all other cases to avoid cloning CRTC 0 to DPAD0 and DPAD1
+ * by default.
+ */
+ if (crtc0->outputs & BIT(RCAR_DU_OUTPUT_DPAD1))
+ dorcr |= DORCR_PG2D_DS1;
+ else
+ dorcr |= DORCR_PG2T | DORCR_DK2S | DORCR_PG2D_DS2;
+
+ rcar_du_group_write(rgrp, DORCR, dorcr);
+
+ return rcar_du_set_dpad0_routing(rgrp->dev);
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.h b/drivers/gpu/drm/rcar-du/rcar_du_group.h
new file mode 100644
index 00000000000..5025930972e
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.h
@@ -0,0 +1,50 @@
+/*
+ * rcar_du_group.c -- R-Car Display Unit Planes and CRTCs Group
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_GROUP_H__
+#define __RCAR_DU_GROUP_H__
+
+#include "rcar_du_plane.h"
+
+struct rcar_du_device;
+
+/*
+ * struct rcar_du_group - CRTCs and planes group
+ * @dev: the DU device
+ * @mmio_offset: registers offset in the device memory map
+ * @index: group index
+ * @use_count: number of users of the group (rcar_du_group_(get|put))
+ * @used_crtcs: number of CRTCs currently in use
+ * @planes: planes handled by the group
+ */
+struct rcar_du_group {
+ struct rcar_du_device *dev;
+ unsigned int mmio_offset;
+ unsigned int index;
+
+ unsigned int use_count;
+ unsigned int used_crtcs;
+
+ struct rcar_du_planes planes;
+};
+
+u32 rcar_du_group_read(struct rcar_du_group *rgrp, u32 reg);
+void rcar_du_group_write(struct rcar_du_group *rgrp, u32 reg, u32 data);
+
+int rcar_du_group_get(struct rcar_du_group *rgrp);
+void rcar_du_group_put(struct rcar_du_group *rgrp);
+void rcar_du_group_start_stop(struct rcar_du_group *rgrp, bool start);
+void rcar_du_group_restart(struct rcar_du_group *rgrp);
+int rcar_du_group_set_routing(struct rcar_du_group *rgrp);
+
+#endif /* __RCAR_DU_GROUP_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index d30c2e29bee..b31ac080c4a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,10 +19,10 @@
#include "rcar_du_crtc.h"
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdsenc.h"
#include "rcar_du_regs.h"
-#include "rcar_du_vga.h"
/* -----------------------------------------------------------------------------
* Format helpers
@@ -106,46 +106,24 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
}
/* -----------------------------------------------------------------------------
- * Common connector and encoder functions
- */
-
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector)
-{
- struct rcar_du_connector *rcon = to_rcar_connector(connector);
-
- return &rcon->encoder->encoder;
-}
-
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder)
-{
-}
-
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
-
- rcar_du_crtc_route_output(encoder->crtc, renc->output);
-}
-
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder)
-{
-}
-
-/* -----------------------------------------------------------------------------
* Frame buffer
*/
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
unsigned int align;
- /* The pitch must be aligned to a 16 pixels boundary. */
- align = 16 * args->bpp / 8;
+ /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
+ * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
+ */
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ align = 128;
+ else
+ align = 16 * args->bpp / 8;
+
args->pitch = roundup(max(args->pitch, min_pitch), align);
return drm_gem_cma_dumb_create(file, dev, args);
@@ -155,6 +133,7 @@ static struct drm_framebuffer *
rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd)
{
+ struct rcar_du_device *rcdu = dev->dev_private;
const struct rcar_du_format_info *format;
unsigned int align;
@@ -165,7 +144,10 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-EINVAL);
}
- align = 16 * format->bpp / 8;
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_ALIGN_128B))
+ align = 128;
+ else
+ align = 16 * format->bpp / 8;
if (mode_cmd->pitches[0] & (align - 1) ||
mode_cmd->pitches[0] >= 8192) {
@@ -185,81 +167,124 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return drm_fb_cma_create(dev, file_priv, mode_cmd);
}
+static void rcar_du_output_poll_changed(struct drm_device *dev)
+{
+ struct rcar_du_device *rcdu = dev->dev_private;
+
+ drm_fbdev_cma_hotplug_event(rcdu->fbdev);
+}
+
static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
.fb_create = rcar_du_fb_create,
+ .output_poll_changed = rcar_du_output_poll_changed,
};
int rcar_du_modeset_init(struct rcar_du_device *rcdu)
{
+ static const unsigned int mmio_offsets[] = {
+ DU0_REG_OFFSET, DU2_REG_OFFSET
+ };
+
struct drm_device *dev = rcdu->ddev;
struct drm_encoder *encoder;
+ struct drm_fbdev_cma *fbdev;
+ unsigned int num_groups;
unsigned int i;
int ret;
- drm_mode_config_init(rcdu->ddev);
+ drm_mode_config_init(dev);
- rcdu->ddev->mode_config.min_width = 0;
- rcdu->ddev->mode_config.min_height = 0;
- rcdu->ddev->mode_config.max_width = 4095;
- rcdu->ddev->mode_config.max_height = 2047;
- rcdu->ddev->mode_config.funcs = &rcar_du_mode_config_funcs;
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 4095;
+ dev->mode_config.max_height = 2047;
+ dev->mode_config.funcs = &rcar_du_mode_config_funcs;
- ret = rcar_du_plane_init(rcdu);
- if (ret < 0)
- return ret;
+ rcdu->num_crtcs = rcdu->info->num_crtcs;
+
+ /* Initialize the groups. */
+ num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
+
+ for (i = 0; i < num_groups; ++i) {
+ struct rcar_du_group *rgrp = &rcdu->groups[i];
+
+ rgrp->dev = rcdu;
+ rgrp->mmio_offset = mmio_offsets[i];
+ rgrp->index = i;
+
+ ret = rcar_du_planes_init(rgrp);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Create the CRTCs. */
+ for (i = 0; i < rcdu->num_crtcs; ++i) {
+ struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
- for (i = 0; i < ARRAY_SIZE(rcdu->crtcs); ++i) {
- ret = rcar_du_crtc_create(rcdu, i);
+ ret = rcar_du_crtc_create(rgrp, i);
if (ret < 0)
return ret;
}
- rcdu->used_crtcs = 0;
- rcdu->num_crtcs = i;
+ /* Initialize the encoders. */
+ ret = rcar_du_lvdsenc_init(rcdu);
+ if (ret < 0)
+ return ret;
for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
const struct rcar_du_encoder_data *pdata =
&rcdu->pdata->encoders[i];
+ const struct rcar_du_output_routing *route =
+ &rcdu->info->routes[pdata->output];
+
+ if (pdata->type == RCAR_DU_ENCODER_UNUSED)
+ continue;
- if (pdata->output >= ARRAY_SIZE(rcdu->crtcs)) {
+ if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
+ route->possible_crtcs == 0) {
dev_warn(rcdu->dev,
"encoder %u references unexisting output %u, skipping\n",
i, pdata->output);
continue;
}
- switch (pdata->encoder) {
- case RCAR_DU_ENCODER_VGA:
- rcar_du_vga_init(rcdu, &pdata->u.vga, pdata->output);
- break;
-
- case RCAR_DU_ENCODER_LVDS:
- rcar_du_lvds_init(rcdu, &pdata->u.lvds, pdata->output);
- break;
-
- default:
- break;
- }
+ rcar_du_encoder_init(rcdu, pdata->type, pdata->output, pdata);
}
- /* Set the possible CRTCs and possible clones. All encoders can be
- * driven by the CRTC associated with the output they're connected to,
- * as well as by CRTC 0.
+ /* Set the possible CRTCs and possible clones. There's always at least
+ * one way for all encoders to clone each other, set all bits in the
+ * possible clones field.
*/
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
+ const struct rcar_du_output_routing *route =
+ &rcdu->info->routes[renc->output];
- encoder->possible_crtcs = (1 << 0) | (1 << renc->output);
- encoder->possible_clones = 1 << 0;
+ encoder->possible_crtcs = route->possible_crtcs;
+ encoder->possible_clones = (1 << rcdu->pdata->num_encoders) - 1;
}
- ret = rcar_du_plane_register(rcdu);
- if (ret < 0)
- return ret;
+ /* Now that the CRTCs have been initialized register the planes. */
+ for (i = 0; i < num_groups; ++i) {
+ ret = rcar_du_planes_register(&rcdu->groups[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ drm_kms_helper_poll_init(dev);
+
+ drm_helper_disable_unused_functions(dev);
+
+ fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
+ dev->mode_config.num_connector);
+ if (IS_ERR(fbdev))
+ return PTR_ERR(fbdev);
- drm_kms_helper_poll_init(rcdu->ddev);
+#ifndef CONFIG_FRAMEBUFFER_CONSOLE
+ drm_fbdev_cma_restore_mode(fbdev);
+#endif
- drm_helper_disable_unused_functions(rcdu->ddev);
+ rcdu->fbdev = fbdev;
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index dba47226348..5750e6af565 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -16,8 +16,9 @@
#include <linux/types.h>
-#include <drm/drm_crtc.h>
-
+struct drm_file;
+struct drm_device;
+struct drm_mode_create_dumb;
struct rcar_du_device;
struct rcar_du_format_info {
@@ -28,32 +29,8 @@ struct rcar_du_format_info {
unsigned int edf;
};
-struct rcar_du_encoder {
- struct drm_encoder encoder;
- unsigned int output;
-};
-
-#define to_rcar_encoder(e) \
- container_of(e, struct rcar_du_encoder, encoder)
-
-struct rcar_du_connector {
- struct drm_connector connector;
- struct rcar_du_encoder *encoder;
-};
-
-#define to_rcar_connector(c) \
- container_of(c, struct rcar_du_connector, connector)
-
const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc);
-struct drm_encoder *
-rcar_du_connector_best_encoder(struct drm_connector *connector);
-void rcar_du_encoder_mode_prepare(struct drm_encoder *encoder);
-void rcar_du_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-void rcar_du_encoder_mode_commit(struct drm_encoder *encoder);
-
int rcar_du_modeset_init(struct rcar_du_device *rcdu);
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 7aefe7267e1..4f3ba93cd91 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_lvds.c -- R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.c -- R-Car Display Unit LVDS Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -16,8 +16,9 @@
#include <drm/drm_crtc_helper.h>
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_lvds.h"
+#include "rcar_du_lvdscon.h"
struct rcar_du_lvds_connector {
struct rcar_du_connector connector;
@@ -28,13 +29,10 @@ struct rcar_du_lvds_connector {
#define to_rcar_lvds_connector(c) \
container_of(c, struct rcar_du_lvds_connector, connector.connector)
-/* -----------------------------------------------------------------------------
- * Connector
- */
-
static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
{
- struct rcar_du_lvds_connector *lvdscon = to_rcar_lvds_connector(connector);
+ struct rcar_du_lvds_connector *lvdscon =
+ to_rcar_lvds_connector(connector);
struct drm_display_mode *mode;
mode = drm_mode_create(connector->dev);
@@ -90,9 +88,9 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = rcar_du_lvds_connector_destroy,
};
-static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc,
- const struct rcar_du_panel_data *panel)
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ const struct rcar_du_panel_data *panel)
{
struct rcar_du_lvds_connector *lvdscon;
struct drm_connector *connector;
@@ -131,86 +129,3 @@ static int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
return 0;
}
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_lvds_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- const struct drm_display_mode *panel_mode;
- struct drm_device *dev = encoder->dev;
- struct drm_connector *connector;
- bool found = false;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- dev_dbg(dev->dev, "mode_fixup: no connector found\n");
- return false;
- }
-
- if (list_empty(&connector->modes)) {
- dev_dbg(dev->dev, "mode_fixup: empty modes list\n");
- return false;
- }
-
- panel_mode = list_first_entry(&connector->modes,
- struct drm_display_mode, head);
-
- /* We're not allowed to modify the resolution. */
- if (mode->hdisplay != panel_mode->hdisplay ||
- mode->vdisplay != panel_mode->vdisplay)
- return false;
-
- /* The flat panel mode is fixed, just copy it to the adjusted mode. */
- drm_mode_copy(adjusted_mode, panel_mode);
-
- return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_lvds_encoder_dpms,
- .mode_fixup = rcar_du_lvds_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
- .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_lvds_data *data,
- unsigned int output)
-{
- struct rcar_du_encoder *renc;
- int ret;
-
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
- return rcar_du_lvds_connector_init(rcdu, renc, &data->panel);
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index b47f8328e10..bff8683699c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvds.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -1,5 +1,5 @@
/*
- * rcar_du_lvds.h -- R-Car Display Unit LVDS Encoder and Connector
+ * rcar_du_lvdscon.h -- R-Car Display Unit LVDS Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -11,14 +11,15 @@
* (at your option) any later version.
*/
-#ifndef __RCAR_DU_LVDS_H__
-#define __RCAR_DU_LVDS_H__
+#ifndef __RCAR_DU_LVDSCON_H__
+#define __RCAR_DU_LVDSCON_H__
struct rcar_du_device;
-struct rcar_du_encoder_lvds_data;
+struct rcar_du_encoder;
+struct rcar_du_panel_data;
-int rcar_du_lvds_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_lvds_data *data,
- unsigned int output);
+int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc,
+ const struct rcar_du_panel_data *panel);
-#endif /* __RCAR_DU_LVDS_H__ */
+#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
new file mode 100644
index 00000000000..a0f6a178192
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -0,0 +1,196 @@
+/*
+ * rcar_du_lvdsenc.c -- R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
+#include "rcar_du_lvdsenc.h"
+#include "rcar_lvds_regs.h"
+
+struct rcar_du_lvdsenc {
+ struct rcar_du_device *dev;
+
+ unsigned int index;
+ void __iomem *mmio;
+ struct clk *clock;
+ int dpms;
+
+ enum rcar_lvds_input input;
+};
+
+static void rcar_lvds_write(struct rcar_du_lvdsenc *lvds, u32 reg, u32 data)
+{
+ iowrite32(data, lvds->mmio + reg);
+}
+
+static int rcar_du_lvdsenc_start(struct rcar_du_lvdsenc *lvds,
+ struct rcar_du_crtc *rcrtc)
+{
+ const struct drm_display_mode *mode = &rcrtc->crtc.mode;
+ unsigned int freq = mode->clock;
+ u32 lvdcr0;
+ u32 pllcr;
+ int ret;
+
+ if (lvds->dpms == DRM_MODE_DPMS_ON)
+ return 0;
+
+ ret = clk_prepare_enable(lvds->clock);
+ if (ret < 0)
+ return ret;
+
+ /* PLL clock configuration */
+ if (freq <= 38000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
+ else if (freq <= 60000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
+ else if (freq <= 121000)
+ pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
+ else
+ pllcr = LVDPLLCR_PLLDLYCNT_150M;
+
+ rcar_lvds_write(lvds, LVDPLLCR, pllcr);
+
+ /* Hardcode the channels and control signals routing for now.
+ *
+ * HSYNC -> CTRL0
+ * VSYNC -> CTRL1
+ * DISP -> CTRL2
+ * 0 -> CTRL3
+ *
+ * Channels 1 and 3 are switched on ES1.
+ */
+ rcar_lvds_write(lvds, LVDCTRCR, LVDCTRCR_CTR3SEL_ZERO |
+ LVDCTRCR_CTR2SEL_DISP | LVDCTRCR_CTR1SEL_VSYNC |
+ LVDCTRCR_CTR0SEL_HSYNC);
+ rcar_lvds_write(lvds, LVDCHCR,
+ LVDCHCR_CHSEL_CH(0, 0) | LVDCHCR_CHSEL_CH(1, 3) |
+ LVDCHCR_CHSEL_CH(2, 2) | LVDCHCR_CHSEL_CH(3, 1));
+
+ /* Select the input, hardcode mode 0, enable LVDS operation and turn
+ * bias circuitry on.
+ */
+ lvdcr0 = LVDCR0_BEN | LVDCR0_LVEN;
+ if (rcrtc->index == 2)
+ lvdcr0 |= LVDCR0_DUSEL;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ /* Turn all the channels on. */
+ rcar_lvds_write(lvds, LVDCR1, LVDCR1_CHSTBY(3) | LVDCR1_CHSTBY(2) |
+ LVDCR1_CHSTBY(1) | LVDCR1_CHSTBY(0) | LVDCR1_CLKSTBY);
+
+ /* Turn the PLL on, wait for the startup delay, and turn the output
+ * on.
+ */
+ lvdcr0 |= LVDCR0_PLLEN;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ usleep_range(100, 150);
+
+ lvdcr0 |= LVDCR0_LVRES;
+ rcar_lvds_write(lvds, LVDCR0, lvdcr0);
+
+ lvds->dpms = DRM_MODE_DPMS_ON;
+ return 0;
+}
+
+static void rcar_du_lvdsenc_stop(struct rcar_du_lvdsenc *lvds)
+{
+ if (lvds->dpms == DRM_MODE_DPMS_OFF)
+ return;
+
+ rcar_lvds_write(lvds, LVDCR0, 0);
+ rcar_lvds_write(lvds, LVDCR1, 0);
+
+ clk_disable_unprepare(lvds->clock);
+
+ lvds->dpms = DRM_MODE_DPMS_OFF;
+}
+
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode)
+{
+ if (mode == DRM_MODE_DPMS_OFF) {
+ rcar_du_lvdsenc_stop(lvds);
+ return 0;
+ } else if (crtc) {
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ return rcar_du_lvdsenc_start(lvds, rcrtc);
+ } else
+ return -EINVAL;
+}
+
+static int rcar_du_lvdsenc_get_resources(struct rcar_du_lvdsenc *lvds,
+ struct platform_device *pdev)
+{
+ struct resource *mem;
+ char name[7];
+
+ sprintf(name, "lvds.%u", lvds->index);
+
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (mem == NULL) {
+ dev_err(&pdev->dev, "failed to get memory resource for %s\n",
+ name);
+ return -EINVAL;
+ }
+
+ lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (lvds->mmio == NULL) {
+ dev_err(&pdev->dev, "failed to remap memory resource for %s\n",
+ name);
+ return -ENOMEM;
+ }
+
+ lvds->clock = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(lvds->clock)) {
+ dev_err(&pdev->dev, "failed to get clock for %s\n", name);
+ return PTR_ERR(lvds->clock);
+ }
+
+ return 0;
+}
+
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+ struct platform_device *pdev = to_platform_device(rcdu->dev);
+ struct rcar_du_lvdsenc *lvds;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < rcdu->info->num_lvds; ++i) {
+ lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
+ if (lvds == NULL) {
+ dev_err(&pdev->dev, "failed to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ lvds->dev = rcdu;
+ lvds->index = i;
+ lvds->input = i ? RCAR_LVDS_INPUT_DU1 : RCAR_LVDS_INPUT_DU0;
+ lvds->dpms = DRM_MODE_DPMS_OFF;
+
+ ret = rcar_du_lvdsenc_get_resources(lvds, pdev);
+ if (ret < 0)
+ return ret;
+
+ rcdu->lvds[i] = lvds;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
new file mode 100644
index 00000000000..7051c6de19a
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -0,0 +1,46 @@
+/*
+ * rcar_du_lvdsenc.h -- R-Car Display Unit LVDS Encoder
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __RCAR_DU_LVDSENC_H__
+#define __RCAR_DU_LVDSENC_H__
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_data/rcar-du.h>
+
+struct rcar_drm_crtc;
+struct rcar_du_lvdsenc;
+
+enum rcar_lvds_input {
+ RCAR_LVDS_INPUT_DU0,
+ RCAR_LVDS_INPUT_DU1,
+ RCAR_LVDS_INPUT_DU2,
+};
+
+#if IS_ENABLED(CONFIG_DRM_RCAR_LVDS)
+int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu);
+int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode);
+#else
+static inline int rcar_du_lvdsenc_init(struct rcar_du_device *rcdu)
+{
+ return 0;
+}
+static inline int rcar_du_lvdsenc_dpms(struct rcar_du_lvdsenc *lvds,
+ struct drm_crtc *crtc, int mode)
+{
+ return 0;
+}
+#endif
+
+#endif /* __RCAR_DU_LVDSENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index a65f81ddf51..53000644733 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -36,90 +36,95 @@ static inline struct rcar_du_plane *to_rcar_plane(struct drm_plane *plane)
return container_of(plane, struct rcar_du_kms_plane, plane)->hwplane;
}
-static u32 rcar_du_plane_read(struct rcar_du_device *rcdu,
+static u32 rcar_du_plane_read(struct rcar_du_group *rgrp,
unsigned int index, u32 reg)
{
- return rcar_du_read(rcdu, index * PLANE_OFF + reg);
+ return rcar_du_read(rgrp->dev,
+ rgrp->mmio_offset + index * PLANE_OFF + reg);
}
-static void rcar_du_plane_write(struct rcar_du_device *rcdu,
+static void rcar_du_plane_write(struct rcar_du_group *rgrp,
unsigned int index, u32 reg, u32 data)
{
- rcar_du_write(rcdu, index * PLANE_OFF + reg, data);
+ rcar_du_write(rgrp->dev, rgrp->mmio_offset + index * PLANE_OFF + reg,
+ data);
}
int rcar_du_plane_reserve(struct rcar_du_plane *plane,
const struct rcar_du_format_info *format)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
unsigned int i;
int ret = -EBUSY;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rgrp->planes.lock);
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- if (!(rcdu->planes.free & (1 << i)))
+ for (i = 0; i < ARRAY_SIZE(rgrp->planes.planes); ++i) {
+ if (!(rgrp->planes.free & (1 << i)))
continue;
if (format->planes == 1 ||
- rcdu->planes.free & (1 << ((i + 1) % 8)))
+ rgrp->planes.free & (1 << ((i + 1) % 8)))
break;
}
- if (i == ARRAY_SIZE(rcdu->planes.planes))
+ if (i == ARRAY_SIZE(rgrp->planes.planes))
goto done;
- rcdu->planes.free &= ~(1 << i);
+ rgrp->planes.free &= ~(1 << i);
if (format->planes == 2)
- rcdu->planes.free &= ~(1 << ((i + 1) % 8));
+ rgrp->planes.free &= ~(1 << ((i + 1) % 8));
plane->hwindex = i;
ret = 0;
done:
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rgrp->planes.lock);
return ret;
}
void rcar_du_plane_release(struct rcar_du_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
if (plane->hwindex == -1)
return;
- mutex_lock(&rcdu->planes.lock);
- rcdu->planes.free |= 1 << plane->hwindex;
+ mutex_lock(&rgrp->planes.lock);
+ rgrp->planes.free |= 1 << plane->hwindex;
if (plane->format->planes == 2)
- rcdu->planes.free |= 1 << ((plane->hwindex + 1) % 8);
- mutex_unlock(&rcdu->planes.lock);
+ rgrp->planes.free |= 1 << ((plane->hwindex + 1) % 8);
+ mutex_unlock(&rgrp->planes.lock);
plane->hwindex = -1;
}
void rcar_du_plane_update_base(struct rcar_du_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
unsigned int index = plane->hwindex;
- /* According to the datasheet the Y position is expressed in raster line
- * units. However, 32bpp formats seem to require a doubled Y position
- * value. Similarly, for the second plane, NV12 and NV21 formats seem to
+ /* The Y position is expressed in raster line units and must be doubled
+ * for 32bpp formats, according to the R8A7790 datasheet. No mention of
+ * doubling the Y position is found in the R8A7779 datasheet, but the
+ * rule seems to apply there as well.
+ *
+ * Similarly, for the second plane, NV12 and NV21 formats seem to
* require a halved Y position value.
*/
- rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+ rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 32 ? 2 : 1));
- rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[0]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[0]);
if (plane->format->planes == 2) {
index = (index + 1) % 8;
- rcar_du_plane_write(rcdu, index, PnSPXR, plane->src_x);
- rcar_du_plane_write(rcdu, index, PnSPYR, plane->src_y *
+ rcar_du_plane_write(rgrp, index, PnSPXR, plane->src_x);
+ rcar_du_plane_write(rgrp, index, PnSPYR, plane->src_y *
(plane->format->bpp == 16 ? 2 : 1) / 2);
- rcar_du_plane_write(rcdu, index, PnDSA0R, plane->dma[1]);
+ rcar_du_plane_write(rgrp, index, PnDSA0R, plane->dma[1]);
}
}
@@ -140,7 +145,7 @@ void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
unsigned int index)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
u32 colorkey;
u32 pnmr;
@@ -154,9 +159,9 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
* enable alpha-blending regardless of the X bit value.
*/
if (plane->format->fourcc != DRM_FORMAT_XRGB1555)
- rcar_du_plane_write(rcdu, index, PnALPHAR, PnALPHAR_ABIT_0);
+ rcar_du_plane_write(rgrp, index, PnALPHAR, PnALPHAR_ABIT_0);
else
- rcar_du_plane_write(rcdu, index, PnALPHAR,
+ rcar_du_plane_write(rgrp, index, PnALPHAR,
PnALPHAR_ABIT_X | plane->alpha);
pnmr = PnMR_BM_MD | plane->format->pnmr;
@@ -172,14 +177,14 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
if (plane->format->fourcc == DRM_FORMAT_YUYV)
pnmr |= PnMR_YCDF_YUYV;
- rcar_du_plane_write(rcdu, index, PnMR, pnmr);
+ rcar_du_plane_write(rgrp, index, PnMR, pnmr);
switch (plane->format->fourcc) {
case DRM_FORMAT_RGB565:
colorkey = ((plane->colorkey & 0xf80000) >> 8)
| ((plane->colorkey & 0x00fc00) >> 5)
| ((plane->colorkey & 0x0000f8) >> 3);
- rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+ rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_ARGB1555:
@@ -187,12 +192,12 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
colorkey = ((plane->colorkey & 0xf80000) >> 9)
| ((plane->colorkey & 0x00f800) >> 6)
| ((plane->colorkey & 0x0000f8) >> 3);
- rcar_du_plane_write(rcdu, index, PnTC2R, colorkey);
+ rcar_du_plane_write(rgrp, index, PnTC2R, colorkey);
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
- rcar_du_plane_write(rcdu, index, PnTC3R,
+ rcar_du_plane_write(rgrp, index, PnTC3R,
PnTC3R_CODE | (plane->colorkey & 0xffffff));
break;
}
@@ -201,7 +206,7 @@ static void rcar_du_plane_setup_mode(struct rcar_du_plane *plane,
static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
unsigned int index)
{
- struct rcar_du_device *rcdu = plane->dev;
+ struct rcar_du_group *rgrp = plane->group;
u32 ddcr2 = PnDDCR2_CODE;
u32 ddcr4;
u32 mwr;
@@ -211,7 +216,7 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
* The data format is selected by the DDDF field in PnMR and the EDF
* field in DDCR4.
*/
- ddcr4 = rcar_du_plane_read(rcdu, index, PnDDCR4);
+ ddcr4 = rcar_du_plane_read(rgrp, index, PnDDCR4);
ddcr4 &= ~PnDDCR4_EDF_MASK;
ddcr4 |= plane->format->edf | PnDDCR4_CODE;
@@ -232,8 +237,8 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
}
}
- rcar_du_plane_write(rcdu, index, PnDDCR2, ddcr2);
- rcar_du_plane_write(rcdu, index, PnDDCR4, ddcr4);
+ rcar_du_plane_write(rgrp, index, PnDDCR2, ddcr2);
+ rcar_du_plane_write(rgrp, index, PnDDCR4, ddcr4);
/* Memory pitch (expressed in pixels) */
if (plane->format->planes == 2)
@@ -241,19 +246,19 @@ static void __rcar_du_plane_setup(struct rcar_du_plane *plane,
else
mwr = plane->pitch * 8 / plane->format->bpp;
- rcar_du_plane_write(rcdu, index, PnMWR, mwr);
+ rcar_du_plane_write(rgrp, index, PnMWR, mwr);
/* Destination position and size */
- rcar_du_plane_write(rcdu, index, PnDSXR, plane->width);
- rcar_du_plane_write(rcdu, index, PnDSYR, plane->height);
- rcar_du_plane_write(rcdu, index, PnDPXR, plane->dst_x);
- rcar_du_plane_write(rcdu, index, PnDPYR, plane->dst_y);
+ rcar_du_plane_write(rgrp, index, PnDSXR, plane->width);
+ rcar_du_plane_write(rgrp, index, PnDSYR, plane->height);
+ rcar_du_plane_write(rgrp, index, PnDPXR, plane->dst_x);
+ rcar_du_plane_write(rgrp, index, PnDPYR, plane->dst_y);
/* Wrap-around and blinking, disabled */
- rcar_du_plane_write(rcdu, index, PnWASPR, 0);
- rcar_du_plane_write(rcdu, index, PnWAMWR, 4095);
- rcar_du_plane_write(rcdu, index, PnBTR, 0);
- rcar_du_plane_write(rcdu, index, PnMLR, 0);
+ rcar_du_plane_write(rgrp, index, PnWASPR, 0);
+ rcar_du_plane_write(rgrp, index, PnWAMWR, 4095);
+ rcar_du_plane_write(rgrp, index, PnBTR, 0);
+ rcar_du_plane_write(rgrp, index, PnMLR, 0);
}
void rcar_du_plane_setup(struct rcar_du_plane *plane)
@@ -273,7 +278,7 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
uint32_t src_w, uint32_t src_h)
{
struct rcar_du_plane *rplane = to_rcar_plane(plane);
- struct rcar_du_device *rcdu = plane->dev->dev_private;
+ struct rcar_du_device *rcdu = rplane->group->dev;
const struct rcar_du_format_info *format;
unsigned int nplanes;
int ret;
@@ -316,26 +321,25 @@ rcar_du_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
rcar_du_plane_compute_base(rplane, fb);
rcar_du_plane_setup(rplane);
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rplane->group->planes.lock);
rplane->enabled = true;
rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rplane->group->planes.lock);
return 0;
}
static int rcar_du_plane_disable(struct drm_plane *plane)
{
- struct rcar_du_device *rcdu = plane->dev->dev_private;
struct rcar_du_plane *rplane = to_rcar_plane(plane);
if (!rplane->enabled)
return 0;
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&rplane->group->planes.lock);
rplane->enabled = false;
rcar_du_crtc_update_planes(rplane->crtc);
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&rplane->group->planes.lock);
rcar_du_plane_release(rplane);
@@ -377,9 +381,7 @@ static void rcar_du_plane_set_colorkey(struct rcar_du_plane *plane,
static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
unsigned int zpos)
{
- struct rcar_du_device *rcdu = plane->dev;
-
- mutex_lock(&rcdu->planes.lock);
+ mutex_lock(&plane->group->planes.lock);
if (plane->zpos == zpos)
goto done;
@@ -390,21 +392,21 @@ static void rcar_du_plane_set_zpos(struct rcar_du_plane *plane,
rcar_du_crtc_update_planes(plane->crtc);
done:
- mutex_unlock(&rcdu->planes.lock);
+ mutex_unlock(&plane->group->planes.lock);
}
static int rcar_du_plane_set_property(struct drm_plane *plane,
struct drm_property *property,
uint64_t value)
{
- struct rcar_du_device *rcdu = plane->dev->dev_private;
struct rcar_du_plane *rplane = to_rcar_plane(plane);
+ struct rcar_du_group *rgrp = rplane->group;
- if (property == rcdu->planes.alpha)
+ if (property == rgrp->planes.alpha)
rcar_du_plane_set_alpha(rplane, value);
- else if (property == rcdu->planes.colorkey)
+ else if (property == rgrp->planes.colorkey)
rcar_du_plane_set_colorkey(rplane, value);
- else if (property == rcdu->planes.zpos)
+ else if (property == rgrp->planes.zpos)
rcar_du_plane_set_zpos(rplane, value);
else
return -EINVAL;
@@ -432,37 +434,39 @@ static const uint32_t formats[] = {
DRM_FORMAT_NV16,
};
-int rcar_du_plane_init(struct rcar_du_device *rcdu)
+int rcar_du_planes_init(struct rcar_du_group *rgrp)
{
+ struct rcar_du_planes *planes = &rgrp->planes;
+ struct rcar_du_device *rcdu = rgrp->dev;
unsigned int i;
- mutex_init(&rcdu->planes.lock);
- rcdu->planes.free = 0xff;
+ mutex_init(&planes->lock);
+ planes->free = 0xff;
- rcdu->planes.alpha =
+ planes->alpha =
drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
- if (rcdu->planes.alpha == NULL)
+ if (planes->alpha == NULL)
return -ENOMEM;
/* The color key is expressed as an RGB888 triplet stored in a 32-bit
* integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
* or enable source color keying (1).
*/
- rcdu->planes.colorkey =
+ planes->colorkey =
drm_property_create_range(rcdu->ddev, 0, "colorkey",
0, 0x01ffffff);
- if (rcdu->planes.colorkey == NULL)
+ if (planes->colorkey == NULL)
return -ENOMEM;
- rcdu->planes.zpos =
+ planes->zpos =
drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
- if (rcdu->planes.zpos == NULL)
+ if (planes->zpos == NULL)
return -ENOMEM;
- for (i = 0; i < ARRAY_SIZE(rcdu->planes.planes); ++i) {
- struct rcar_du_plane *plane = &rcdu->planes.planes[i];
+ for (i = 0; i < ARRAY_SIZE(planes->planes); ++i) {
+ struct rcar_du_plane *plane = &planes->planes[i];
- plane->dev = rcdu;
+ plane->group = rgrp;
plane->hwindex = -1;
plane->alpha = 255;
plane->colorkey = RCAR_DU_COLORKEY_NONE;
@@ -472,11 +476,16 @@ int rcar_du_plane_init(struct rcar_du_device *rcdu)
return 0;
}
-int rcar_du_plane_register(struct rcar_du_device *rcdu)
+int rcar_du_planes_register(struct rcar_du_group *rgrp)
{
+ struct rcar_du_planes *planes = &rgrp->planes;
+ struct rcar_du_device *rcdu = rgrp->dev;
+ unsigned int crtcs;
unsigned int i;
int ret;
+ crtcs = ((1 << rcdu->num_crtcs) - 1) & (3 << (2 * rgrp->index));
+
for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
struct rcar_du_kms_plane *plane;
@@ -484,23 +493,22 @@ int rcar_du_plane_register(struct rcar_du_device *rcdu)
if (plane == NULL)
return -ENOMEM;
- plane->hwplane = &rcdu->planes.planes[i + 2];
+ plane->hwplane = &planes->planes[i + 2];
plane->hwplane->zpos = 1;
- ret = drm_plane_init(rcdu->ddev, &plane->plane,
- (1 << rcdu->num_crtcs) - 1,
+ ret = drm_plane_init(rcdu->ddev, &plane->plane, crtcs,
&rcar_du_plane_funcs, formats,
ARRAY_SIZE(formats), false);
if (ret < 0)
return ret;
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.alpha, 255);
+ planes->alpha, 255);
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.colorkey,
+ planes->colorkey,
RCAR_DU_COLORKEY_NONE);
drm_object_attach_property(&plane->plane.base,
- rcdu->planes.zpos, 1);
+ planes->zpos, 1);
}
return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.h b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
index 5397dba2fe5..f94f9ce8499 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.h
@@ -14,10 +14,13 @@
#ifndef __RCAR_DU_PLANE_H__
#define __RCAR_DU_PLANE_H__
-struct drm_crtc;
-struct drm_framebuffer;
-struct rcar_du_device;
+#include <linux/mutex.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
struct rcar_du_format_info;
+struct rcar_du_group;
/* The RCAR DU has 8 hardware planes, shared between KMS planes and CRTCs. As
* using KMS planes requires at least one of the CRTCs being enabled, no more
@@ -30,7 +33,7 @@ struct rcar_du_format_info;
#define RCAR_DU_NUM_SW_PLANES 9
struct rcar_du_plane {
- struct rcar_du_device *dev;
+ struct rcar_du_group *group;
struct drm_crtc *crtc;
bool enabled;
@@ -54,8 +57,19 @@ struct rcar_du_plane {
unsigned int dst_y;
};
-int rcar_du_plane_init(struct rcar_du_device *rcdu);
-int rcar_du_plane_register(struct rcar_du_device *rcdu);
+struct rcar_du_planes {
+ struct rcar_du_plane planes[RCAR_DU_NUM_SW_PLANES];
+ unsigned int free;
+ struct mutex lock;
+
+ struct drm_property *alpha;
+ struct drm_property *colorkey;
+ struct drm_property *zpos;
+};
+
+int rcar_du_planes_init(struct rcar_du_group *rgrp);
+int rcar_du_planes_register(struct rcar_du_group *rgrp);
+
void rcar_du_plane_setup(struct rcar_du_plane *plane);
void rcar_du_plane_update_base(struct rcar_du_plane *plane);
void rcar_du_plane_compute_base(struct rcar_du_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index 69f21f19b51..73f7347f740 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -13,14 +13,15 @@
#ifndef __RCAR_DU_REGS_H__
#define __RCAR_DU_REGS_H__
-#define DISP2_REG_OFFSET 0x30000
+#define DU0_REG_OFFSET 0x00000
+#define DU1_REG_OFFSET 0x30000
+#define DU2_REG_OFFSET 0x40000
/* -----------------------------------------------------------------------------
* Display Control Registers
*/
#define DSYSR 0x00000 /* display 1 */
-#define D2SYSR 0x30000 /* display 2 */
#define DSYSR_ILTS (1 << 29)
#define DSYSR_DSEC (1 << 20)
#define DSYSR_IUPD (1 << 16)
@@ -35,7 +36,6 @@
#define DSYSR_SCM_INT_VIDEO (3 << 4)
#define DSMR 0x00004
-#define D2SMR 0x30004
#define DSMR_VSPM (1 << 28)
#define DSMR_ODPM (1 << 27)
#define DSMR_DIPM_DISP (0 << 25)
@@ -60,7 +60,6 @@
#define DSMR_CSY_MASK (3 << 6)
#define DSSR 0x00008
-#define D2SSR 0x30008
#define DSSR_VC1FB_DSA0 (0 << 30)
#define DSSR_VC1FB_DSA1 (1 << 30)
#define DSSR_VC1FB_DSA2 (2 << 30)
@@ -80,7 +79,6 @@
#define DSSR_ADC(n) (1 << ((n)-1))
#define DSRCR 0x0000c
-#define D2SRCR 0x3000c
#define DSRCR_TVCL (1 << 15)
#define DSRCR_FRCL (1 << 14)
#define DSRCR_VBCL (1 << 11)
@@ -90,7 +88,6 @@
#define DSRCR_MASK 0x0000cbff
#define DIER 0x00010
-#define D2IER 0x30010
#define DIER_TVE (1 << 15)
#define DIER_FRE (1 << 14)
#define DIER_VBE (1 << 11)
@@ -114,7 +111,6 @@
#define DPPR_BPP32 (DPPR_BPP32_P1 | DPPR_BPP32_P2) /* plane1 & 2 */
#define DEFR 0x00020
-#define D2EFR 0x30020
#define DEFR_CODE (0x7773 << 16)
#define DEFR_EXSL (1 << 12)
#define DEFR_EXVL (1 << 11)
@@ -137,12 +133,10 @@
#define DCPCR_DCE (1 << 0)
#define DEFR2 0x00034
-#define D2EFR2 0x30034
#define DEFR2_CODE (0x7775 << 16)
#define DEFR2_DEFE2G (1 << 0)
#define DEFR3 0x00038
-#define D2EFR3 0x30038
#define DEFR3_CODE (0x7776 << 16)
#define DEFR3_EVDA (1 << 14)
#define DEFR3_EVDM_1 (1 << 12)
@@ -153,7 +147,6 @@
#define DEFR3_DEFE3 (1 << 0)
#define DEFR4 0x0003c
-#define D2EFR4 0x3003c
#define DEFR4_CODE (0x7777 << 16)
#define DEFR4_LRUO (1 << 5)
#define DEFR4_SPCE (1 << 4)
@@ -205,6 +198,68 @@
#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2)
/* -----------------------------------------------------------------------------
+ * R8A7790-only Control Registers
+ */
+
+#define DD1SSR 0x20008
+#define DD1SSR_TVR (1 << 15)
+#define DD1SSR_FRM (1 << 14)
+#define DD1SSR_BUF (1 << 12)
+#define DD1SSR_VBK (1 << 11)
+#define DD1SSR_RINT (1 << 9)
+#define DD1SSR_HBK (1 << 8)
+#define DD1SSR_ADC(n) (1 << ((n)-1))
+
+#define DD1SRCR 0x2000c
+#define DD1SRCR_TVR (1 << 15)
+#define DD1SRCR_FRM (1 << 14)
+#define DD1SRCR_BUF (1 << 12)
+#define DD1SRCR_VBK (1 << 11)
+#define DD1SRCR_RINT (1 << 9)
+#define DD1SRCR_HBK (1 << 8)
+#define DD1SRCR_ADC(n) (1 << ((n)-1))
+
+#define DD1IER 0x20010
+#define DD1IER_TVR (1 << 15)
+#define DD1IER_FRM (1 << 14)
+#define DD1IER_BUF (1 << 12)
+#define DD1IER_VBK (1 << 11)
+#define DD1IER_RINT (1 << 9)
+#define DD1IER_HBK (1 << 8)
+#define DD1IER_ADC(n) (1 << ((n)-1))
+
+#define DEFR8 0x20020
+#define DEFR8_CODE (0x7790 << 16)
+#define DEFR8_VSCS (1 << 6)
+#define DEFR8_DRGBS_DU(n) ((n) << 4)
+#define DEFR8_DRGBS_MASK (3 << 4)
+#define DEFR8_DEFE8 (1 << 0)
+
+#define DOFLR 0x20024
+#define DOFLR_CODE (0x7790 << 16)
+#define DOFLR_HSYCFL1 (1 << 13)
+#define DOFLR_VSYCFL1 (1 << 12)
+#define DOFLR_ODDFL1 (1 << 11)
+#define DOFLR_DISPFL1 (1 << 10)
+#define DOFLR_CDEFL1 (1 << 9)
+#define DOFLR_RGBFL1 (1 << 8)
+#define DOFLR_HSYCFL0 (1 << 5)
+#define DOFLR_VSYCFL0 (1 << 4)
+#define DOFLR_ODDFL0 (1 << 3)
+#define DOFLR_DISPFL0 (1 << 2)
+#define DOFLR_CDEFL0 (1 << 1)
+#define DOFLR_RGBFL0 (1 << 0)
+
+#define DIDSR 0x20028
+#define DIDSR_CODE (0x7790 << 16)
+#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
+#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
+#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
+#define DIDSR_PCDS_CLK(n, clk) (clk << ((n) * 2))
+#define DIDSR_PCDS_MASK(n) (3 << ((n) * 2))
+
+/* -----------------------------------------------------------------------------
* Display Timing Generation Registers
*/
@@ -349,21 +404,34 @@
#define APnMR_BM_AD (2 << 4) /* Auto Display Change Mode */
#define APnMWR 0x0a104
+
+#define APnDSXR 0x0a110
+#define APnDSYR 0x0a114
+#define APnDPXR 0x0a118
+#define APnDPYR 0x0a11c
+
#define APnDSA0R 0x0a120
#define APnDSA1R 0x0a124
#define APnDSA2R 0x0a128
+
+#define APnSPXR 0x0a130
+#define APnSPYR 0x0a134
+#define APnWASPR 0x0a138
+#define APnWAMWR 0x0a13c
+
+#define APnBTR 0x0a140
+
#define APnMLR 0x0a150
+#define APnSWAPR 0x0a180
/* -----------------------------------------------------------------------------
* Display Capture Registers
*/
+#define DCMR 0x0c100
#define DCMWR 0x0c104
-#define DC2MWR 0x0c204
#define DCSAR 0x0c120
-#define DC2SAR 0x0c220
#define DCMLR 0x0c150
-#define DC2MLR 0x0c250
/* -----------------------------------------------------------------------------
* Color Palette Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 327289ec380..41d563adfea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -1,5 +1,5 @@
/*
- * rcar_du_vga.c -- R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.c -- R-Car Display Unit VGA Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -16,12 +16,9 @@
#include <drm/drm_crtc_helper.h>
#include "rcar_du_drv.h"
+#include "rcar_du_encoder.h"
#include "rcar_du_kms.h"
-#include "rcar_du_vga.h"
-
-/* -----------------------------------------------------------------------------
- * Connector
- */
+#include "rcar_du_vgacon.h"
static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
{
@@ -49,7 +46,7 @@ static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
static enum drm_connector_status
rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_unknown;
+ return connector_status_connected;
}
static const struct drm_connector_funcs connector_funcs = {
@@ -59,8 +56,8 @@ static const struct drm_connector_funcs connector_funcs = {
.destroy = rcar_du_vga_connector_destroy,
};
-static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
- struct rcar_du_encoder *renc)
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc)
{
struct rcar_du_connector *rcon;
struct drm_connector *connector;
@@ -97,53 +94,3 @@ static int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
return 0;
}
-
-/* -----------------------------------------------------------------------------
- * Encoder
- */
-
-static void rcar_du_vga_encoder_dpms(struct drm_encoder *encoder, int mode)
-{
-}
-
-static bool rcar_du_vga_encoder_mode_fixup(struct drm_encoder *encoder,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
- .dpms = rcar_du_vga_encoder_dpms,
- .mode_fixup = rcar_du_vga_encoder_mode_fixup,
- .prepare = rcar_du_encoder_mode_prepare,
- .commit = rcar_du_encoder_mode_commit,
- .mode_set = rcar_du_encoder_mode_set,
-};
-
-static const struct drm_encoder_funcs encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_vga_data *data,
- unsigned int output)
-{
- struct rcar_du_encoder *renc;
- int ret;
-
- renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL);
- if (renc == NULL)
- return -ENOMEM;
-
- renc->output = output;
-
- ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs,
- DRM_MODE_ENCODER_DAC);
- if (ret < 0)
- return ret;
-
- drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs);
-
- return rcar_du_vga_connector_init(rcdu, renc);
-}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vga.h b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
index 66b4d2d7190..b12b0cf7f11 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vga.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.h
@@ -1,5 +1,5 @@
/*
- * rcar_du_vga.h -- R-Car Display Unit VGA DAC and Connector
+ * rcar_du_vgacon.h -- R-Car Display Unit VGA Connector
*
* Copyright (C) 2013 Renesas Corporation
*
@@ -11,14 +11,13 @@
* (at your option) any later version.
*/
-#ifndef __RCAR_DU_VGA_H__
-#define __RCAR_DU_VGA_H__
+#ifndef __RCAR_DU_VGACON_H__
+#define __RCAR_DU_VGACON_H__
struct rcar_du_device;
-struct rcar_du_encoder_vga_data;
+struct rcar_du_encoder;
-int rcar_du_vga_init(struct rcar_du_device *rcdu,
- const struct rcar_du_encoder_vga_data *data,
- unsigned int output);
+int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
+ struct rcar_du_encoder *renc);
-#endif /* __RCAR_DU_VGA_H__ */
+#endif /* __RCAR_DU_VGACON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
new file mode 100644
index 00000000000..77cf9289ab6
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -0,0 +1,69 @@
+/*
+ * rcar_lvds_regs.h -- R-Car LVDS Interface Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __RCAR_LVDS_REGS_H__
+#define __RCAR_LVDS_REGS_H__
+
+#define LVDCR0 0x0000
+#define LVDCR0_DUSEL (1 << 15)
+#define LVDCR0_DMD (1 << 12)
+#define LVDCR0_LVMD_MASK (0xf << 8)
+#define LVDCR0_LVMD_SHIFT 8
+#define LVDCR0_PLLEN (1 << 4)
+#define LVDCR0_BEN (1 << 2)
+#define LVDCR0_LVEN (1 << 1)
+#define LVDCR0_LVRES (1 << 0)
+
+#define LVDCR1 0x0004
+#define LVDCR1_CKSEL (1 << 15)
+#define LVDCR1_CHSTBY(n) (3 << (2 + (n) * 2))
+#define LVDCR1_CLKSTBY (3 << 0)
+
+#define LVDPLLCR 0x0008
+#define LVDPLLCR_CEEN (1 << 14)
+#define LVDPLLCR_FBEN (1 << 13)
+#define LVDPLLCR_COSEL (1 << 12)
+#define LVDPLLCR_PLLDLYCNT_150M (0x1bf << 0)
+#define LVDPLLCR_PLLDLYCNT_121M (0x22c << 0)
+#define LVDPLLCR_PLLDLYCNT_60M (0x77b << 0)
+#define LVDPLLCR_PLLDLYCNT_38M (0x69a << 0)
+#define LVDPLLCR_PLLDLYCNT_MASK (0x7ff << 0)
+
+#define LVDCTRCR 0x000c
+#define LVDCTRCR_CTR3SEL_ZERO (0 << 12)
+#define LVDCTRCR_CTR3SEL_ODD (1 << 12)
+#define LVDCTRCR_CTR3SEL_CDE (2 << 12)
+#define LVDCTRCR_CTR3SEL_MASK (7 << 12)
+#define LVDCTRCR_CTR2SEL_DISP (0 << 8)
+#define LVDCTRCR_CTR2SEL_ODD (1 << 8)
+#define LVDCTRCR_CTR2SEL_CDE (2 << 8)
+#define LVDCTRCR_CTR2SEL_HSYNC (3 << 8)
+#define LVDCTRCR_CTR2SEL_VSYNC (4 << 8)
+#define LVDCTRCR_CTR2SEL_MASK (7 << 8)
+#define LVDCTRCR_CTR1SEL_VSYNC (0 << 4)
+#define LVDCTRCR_CTR1SEL_DISP (1 << 4)
+#define LVDCTRCR_CTR1SEL_ODD (2 << 4)
+#define LVDCTRCR_CTR1SEL_CDE (3 << 4)
+#define LVDCTRCR_CTR1SEL_HSYNC (4 << 4)
+#define LVDCTRCR_CTR1SEL_MASK (7 << 4)
+#define LVDCTRCR_CTR0SEL_HSYNC (0 << 0)
+#define LVDCTRCR_CTR0SEL_VSYNC (1 << 0)
+#define LVDCTRCR_CTR0SEL_DISP (2 << 0)
+#define LVDCTRCR_CTR0SEL_ODD (3 << 0)
+#define LVDCTRCR_CTR0SEL_CDE (4 << 0)
+#define LVDCTRCR_CTR0SEL_MASK (7 << 0)
+
+#define LVDCHCR 0x0010
+#define LVDCHCR_CHSEL_CH(n, c) ((((c) - (n)) & 3) << ((n) * 4))
+#define LVDCHCR_CHSEL_MASK(n) (3 << ((n) * 4))
+
+#endif /* __RCAR_LVDS_REGS_H__ */
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bd6b2cf508d..b17d0710871 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -1072,7 +1072,7 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
drm_idlelock_release(&file_priv->master->lock);
}
-struct drm_ioctl_desc savage_ioctls[] = {
+const struct drm_ioctl_desc savage_ioctls[] = {
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 71b2081e783..3c030216e88 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -42,7 +42,6 @@ static const struct file_operations savage_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -51,7 +50,7 @@ static const struct file_operations savage_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+ DRIVER_USE_AGP | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
.dev_priv_size = sizeof(drm_savage_buf_priv_t),
.load = savage_driver_load,
.firstopen = savage_driver_firstopen,
diff --git a/drivers/gpu/drm/savage/savage_drv.h b/drivers/gpu/drm/savage/savage_drv.h
index c05082a59f6..335f8fcf104 100644
--- a/drivers/gpu/drm/savage/savage_drv.h
+++ b/drivers/gpu/drm/savage/savage_drv.h
@@ -104,7 +104,7 @@ enum savage_family {
S3_LAST
};
-extern struct drm_ioctl_desc savage_ioctls[];
+extern const struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 99e2034e49c..54bad98e947 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -465,7 +465,8 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct shmob_drm_crtc *scrtc = to_shmob_crtc(crtc);
struct drm_device *dev = scrtc->crtc.dev;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 5f83f9a3ef5..015551866b4 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -257,7 +257,6 @@ static const struct file_operations shmob_drm_fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
@@ -285,7 +284,7 @@ static struct drm_driver shmob_drm_driver = {
.gem_prime_mmap = drm_gem_cma_prime_mmap,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &shmob_drm_fops,
.name = "shmob-drm",
.desc = "Renesas SH Mobile DRM",
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 5a5325e6b75..4383b74a3aa 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -72,7 +72,6 @@ static const struct file_operations sis_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -103,7 +102,7 @@ void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
}
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
+ .driver_features = DRIVER_USE_AGP,
.load = sis_driver_load,
.unload = sis_driver_unload,
.open = sis_driver_open,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 13b527bb83b..c31c0253054 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -70,7 +70,7 @@ extern void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern void sis_lastclose(struct drm_device *dev);
-extern struct drm_ioctl_desc sis_ioctls[];
+extern const struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 9a43d98e500..01857d83635 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -109,7 +109,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
if (pool == AGP_TYPE) {
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
} else {
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
@@ -121,7 +122,8 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
#else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- mem->size, 0);
+ mem->size, 0,
+ DRM_MM_SEARCH_DEFAULT);
offset = item->mm_node.start;
#endif
}
@@ -348,7 +350,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
return;
}
-struct drm_ioctl_desc sis_ioctls[] = {
+const struct drm_ioctl_desc sis_ioctls[] = {
DRM_IOCTL_DEF_DRV(SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_FB_FREE, sis_drm_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index ddfa743459d..3492ca5c46d 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -48,7 +48,6 @@ static const struct file_operations tdfx_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -56,7 +55,6 @@ static const struct file_operations tdfx_driver_fops = {
};
static struct drm_driver driver = {
- .driver_features = DRIVER_USE_MTRR,
.fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 7418dcd986d..d36efc13b16 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -15,7 +15,7 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include <linux/kfifo.h>
+#include "drm_flip_work.h"
#include "tilcdc_drv.h"
#include "tilcdc_regs.h"
@@ -35,21 +35,18 @@ struct tilcdc_crtc {
struct drm_framebuffer *scanout[2];
/* for deferred fb unref's: */
- DECLARE_KFIFO_PTR(unref_fifo, struct drm_framebuffer *);
- struct work_struct work;
+ struct drm_flip_work unref_work;
};
#define to_tilcdc_crtc(x) container_of(x, struct tilcdc_crtc, base)
-static void unref_worker(struct work_struct *work)
+static void unref_worker(struct drm_flip_work *work, void *val)
{
struct tilcdc_crtc *tilcdc_crtc =
- container_of(work, struct tilcdc_crtc, work);
+ container_of(work, struct tilcdc_crtc, unref_work);
struct drm_device *dev = tilcdc_crtc->base.dev;
- struct drm_framebuffer *fb;
mutex_lock(&dev->mode_config.mutex);
- while (kfifo_get(&tilcdc_crtc->unref_fifo, &fb))
- drm_framebuffer_unreference(fb);
+ drm_framebuffer_unreference(val);
mutex_unlock(&dev->mode_config.mutex);
}
@@ -68,19 +65,14 @@ static void set_scanout(struct drm_crtc *crtc, int n)
};
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct tilcdc_drm_private *priv = dev->dev_private;
pm_runtime_get_sync(dev->dev);
tilcdc_write(dev, base_reg[n], tilcdc_crtc->start);
tilcdc_write(dev, ceil_reg[n], tilcdc_crtc->end);
if (tilcdc_crtc->scanout[n]) {
- if (kfifo_put(&tilcdc_crtc->unref_fifo,
- (const struct drm_framebuffer **)&tilcdc_crtc->scanout[n])) {
- struct tilcdc_drm_private *priv = dev->dev_private;
- queue_work(priv->wq, &tilcdc_crtc->work);
- } else {
- dev_err(dev->dev, "unref fifo full!\n");
- drm_framebuffer_unreference(tilcdc_crtc->scanout[n]);
- }
+ drm_flip_work_queue(&tilcdc_crtc->unref_work, tilcdc_crtc->scanout[n]);
+ drm_flip_work_commit(&tilcdc_crtc->unref_work, priv->wq);
}
tilcdc_crtc->scanout[n] = crtc->fb;
drm_framebuffer_reference(tilcdc_crtc->scanout[n]);
@@ -149,14 +141,15 @@ static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
WARN_ON(tilcdc_crtc->dpms == DRM_MODE_DPMS_ON);
drm_crtc_cleanup(crtc);
- WARN_ON(!kfifo_is_empty(&tilcdc_crtc->unref_fifo));
- kfifo_free(&tilcdc_crtc->unref_fifo);
+ drm_flip_work_cleanup(&tilcdc_crtc->unref_work);
+
kfree(tilcdc_crtc);
}
static int tilcdc_crtc_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -379,7 +372,12 @@ static int tilcdc_crtc_mode_set(struct drm_crtc *crtc,
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_SYNC_EDGE);
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ /*
+ * use value from adjusted_mode here as this might have been
+ * changed as part of the fixup for slave encoders to solve the
+ * issue where tilcdc timings are not VESA compliant
+ */
+ if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
tilcdc_set(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
else
tilcdc_clear(dev, LCDC_RASTER_TIMING_2_REG, LCDC_INVERT_HSYNC);
@@ -666,14 +664,13 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
- ret = kfifo_alloc(&tilcdc_crtc->unref_fifo, 16, GFP_KERNEL);
+ ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16,
+ "unref", unref_worker);
if (ret) {
dev_err(dev->dev, "could not allocate unref FIFO\n");
goto fail;
}
- INIT_WORK(&tilcdc_crtc->work, unref_worker);
-
ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
if (ret < 0)
goto fail;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 40b71da5a21..116da199b94 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -497,7 +497,6 @@ static const struct file_operations fops = {
#endif
.poll = drm_poll,
.read = drm_read,
- .fasync = drm_fasync,
.llseek = no_llseek,
.mmap = drm_gem_cma_mmap,
};
@@ -519,7 +518,7 @@ static struct drm_driver tilcdc_driver = {
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = tilcdc_debugfs_init,
.debugfs_cleanup = tilcdc_debugfs_cleanup,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_slave.c b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
index dfffaf01402..595068ba2d5 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_slave.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_slave.c
@@ -16,7 +16,6 @@
*/
#include <linux/i2c.h>
-#include <linux/of_i2c.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/consumer.h>
#include <drm/drm_encoder_slave.h>
@@ -73,13 +72,38 @@ static void slave_encoder_prepare(struct drm_encoder *encoder)
tilcdc_crtc_set_panel_info(encoder->crtc, &slave_info);
}
+static bool slave_encoder_fixup(struct drm_encoder *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /*
+ * tilcdc does not generate VESA-complient sync but aligns
+ * VS on the second edge of HS instead of first edge.
+ * We use adjusted_mode, to fixup sync by aligning both rising
+ * edges and add HSKEW offset to let the slave encoder fix it up.
+ */
+ adjusted_mode->hskew = mode->hsync_end - mode->hsync_start;
+ adjusted_mode->flags |= DRM_MODE_FLAG_HSKEW;
+
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC) {
+ adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_NHSYNC;
+ } else {
+ adjusted_mode->flags |= DRM_MODE_FLAG_NHSYNC;
+ adjusted_mode->flags &= ~DRM_MODE_FLAG_PHSYNC;
+ }
+
+ return drm_i2c_encoder_mode_fixup(encoder, mode, adjusted_mode);
+}
+
+
static const struct drm_encoder_funcs slave_encoder_funcs = {
.destroy = slave_encoder_destroy,
};
static const struct drm_encoder_helper_funcs slave_encoder_helper_funcs = {
.dpms = drm_i2c_encoder_dpms,
- .mode_fixup = drm_i2c_encoder_mode_fixup,
+ .mode_fixup = slave_encoder_fixup,
.prepare = slave_encoder_prepare,
.commit = drm_i2c_encoder_commit,
.mode_set = drm_i2c_encoder_mode_set,
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 925c7cddeff..c38b56b268a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -16,7 +16,6 @@
*/
#include <linux/i2c.h>
-#include <linux/of_i2c.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/pinmux.h>
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cb9dd674670..f1a857ec102 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -615,13 +614,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- write_lock(&bdev->vm_lock);
- if (likely(bo->vm_node != NULL)) {
- rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
- drm_mm_put_block(bo->vm_node);
- bo->vm_node = NULL;
- }
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -1129,6 +1122,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->resv = &bo->ttm_resv;
reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
ret = ttm_bo_check_placement(bo, placement);
@@ -1139,7 +1133,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
if (likely(!ret) &&
(bo->type == ttm_bo_type_device ||
bo->type == ttm_bo_type_sg))
- ret = ttm_bo_setup_vm(bo);
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
locked = ww_mutex_trylock(&bo->resv->lock);
WARN_ON(!locked);
@@ -1424,10 +1419,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
TTM_DEBUG("Swap list was clean\n");
spin_unlock(&glob->lru_lock);
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_manager_destroy(&bdev->vma_manager);
return ret;
}
@@ -1441,7 +1433,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
{
int ret = -EINVAL;
- rwlock_init(&bdev->vm_lock);
bdev->driver = driver;
memset(bdev->man, 0, sizeof(bdev->man));
@@ -1454,9 +1445,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- bdev->addr_space_rb = RB_ROOT;
- drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
-
+ drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+ 0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = NULL;
@@ -1498,12 +1488,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- loff_t offset = (loff_t) bo->addr_space_offset;
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
- if (!bdev->dev_mapping)
- return;
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1520,78 +1506,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct rb_node **cur = &bdev->addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct ttm_buffer_object *cur_bo;
- unsigned long offset = bo->vm_node->start;
- unsigned long cur_offset;
-
- while (*cur) {
- parent = *cur;
- cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
- cur_offset = cur_bo->vm_node->start;
- if (offset < cur_offset)
- cur = &parent->rb_left;
- else if (offset > cur_offset)
- cur = &parent->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&bo->vm_rb, parent, cur);
- rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
-retry_pre_get:
- ret = drm_mm_pre_get(&bdev->addr_space_mm);
- if (unlikely(ret != 0))
- return ret;
-
- write_lock(&bdev->vm_lock);
- bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
- bo->mem.num_pages, 0, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
- bo->mem.num_pages, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- write_unlock(&bdev->vm_lock);
- goto retry_pre_get;
- }
-
- ttm_bo_vm_insert_rb(bo);
- write_unlock(&bdev->vm_lock);
- bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
- return 0;
-out_unlock:
- write_unlock(&bdev->vm_lock);
- return ret;
-}
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index e4367f91472..c58eba33bd5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -61,28 +61,25 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
lpfn = placement->lpfn;
if (!lpfn)
lpfn = man->size;
- do {
- ret = drm_mm_pre_get(mm);
- if (unlikely(ret))
- return ret;
- spin_lock(&rman->lock);
- node = drm_mm_search_free_in_range(mm,
- mem->num_pages, mem->page_alignment,
- placement->fpfn, lpfn, 1);
- if (unlikely(node == NULL)) {
- spin_unlock(&rman->lock);
- return 0;
- }
- node = drm_mm_get_block_atomic_range(node, mem->num_pages,
- mem->page_alignment,
- placement->fpfn,
- lpfn);
- spin_unlock(&rman->lock);
- } while (node == NULL);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ spin_lock(&rman->lock);
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ mem->page_alignment,
+ placement->fpfn, lpfn,
+ DRM_MM_SEARCH_BEST);
+ spin_unlock(&rman->lock);
+
+ if (unlikely(ret)) {
+ kfree(node);
+ } else {
+ mem->mm_node = node;
+ mem->start = node->start;
+ }
- mem->mm_node = node;
- mem->start = node->start;
return 0;
}
@@ -93,8 +90,10 @@ static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&rman->lock);
- drm_mm_put_block(mem->mm_node);
+ drm_mm_remove_node(mem->mm_node);
spin_unlock(&rman->lock);
+
+ kfree(mem->mm_node);
mem->mm_node = NULL;
}
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 319cf4127c5..7cc904d3a4d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
@@ -450,7 +451,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
- fbo->vm_node = NULL;
+ drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3df9f16b041..1006c15445e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -33,6 +33,7 @@
#include <ttm/ttm_module.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/mm.h>
#include <linux/rbtree.h>
#include <linux/module.h>
@@ -40,37 +41,6 @@
#define TTM_BO_VM_NUM_PREFAULT 16
-static struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev,
- unsigned long page_start,
- unsigned long num_pages)
-{
- struct rb_node *cur = bdev->addr_space_rb.rb_node;
- unsigned long cur_offset;
- struct ttm_buffer_object *bo;
- struct ttm_buffer_object *best_bo = NULL;
-
- while (likely(cur != NULL)) {
- bo = rb_entry(cur, struct ttm_buffer_object, vm_rb);
- cur_offset = bo->vm_node->start;
- if (page_start >= cur_offset) {
- cur = cur->rb_right;
- best_bo = bo;
- if (page_start == cur_offset)
- break;
- } else
- cur = cur->rb_left;
- }
-
- if (unlikely(best_bo == NULL))
- return NULL;
-
- if (unlikely((best_bo->vm_node->start + best_bo->num_pages) <
- (page_start + num_pages)))
- return NULL;
-
- return best_bo;
-}
-
static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -146,9 +116,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
page_last = vma_pages(vma) +
- bo->vm_node->start - vma->vm_pgoff;
+ drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
@@ -249,6 +219,30 @@ static const struct vm_operations_struct ttm_bo_vm_ops = {
.close = ttm_bo_vm_close
};
+static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev,
+ unsigned long offset,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+ struct ttm_buffer_object *bo = NULL;
+
+ drm_vma_offset_lock_lookup(&bdev->vma_manager);
+
+ node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages);
+ if (likely(node)) {
+ bo = container_of(node, struct ttm_buffer_object, vma_node);
+ if (!kref_get_unless_zero(&bo->kref))
+ bo = NULL;
+ }
+
+ drm_vma_offset_unlock_lookup(&bdev->vma_manager);
+
+ if (!bo)
+ pr_err("Could not find buffer object to map\n");
+
+ return bo;
+}
+
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_bo_device *bdev)
{
@@ -256,17 +250,9 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
struct ttm_buffer_object *bo;
int ret;
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, vma->vm_pgoff,
- vma_pages(vma));
- if (likely(bo != NULL) && !kref_get_unless_zero(&bo->kref))
- bo = NULL;
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL)) {
- pr_err("Could not find buffer object to map\n");
+ bo = ttm_bo_vm_lookup(bdev, vma->vm_pgoff, vma_pages(vma));
+ if (unlikely(!bo))
return -EINVAL;
- }
driver = bo->bdev->driver;
if (unlikely(!driver->verify_access)) {
@@ -304,162 +290,3 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
return 0;
}
EXPORT_SYMBOL(ttm_fbdev_mmap);
-
-
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
- const char __user *wbuf, char __user *rbuf, size_t count,
- loff_t *f_pos, bool write)
-{
- struct ttm_buffer_object *bo;
- struct ttm_bo_driver *driver;
- struct ttm_bo_kmap_obj map;
- unsigned long dev_offset = (*f_pos >> PAGE_SHIFT);
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- read_lock(&bdev->vm_lock);
- bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
- if (likely(bo != NULL))
- ttm_bo_reference(bo);
- read_unlock(&bdev->vm_lock);
-
- if (unlikely(bo == NULL))
- return -EFAULT;
-
- driver = bo->bdev->driver;
- if (unlikely(!driver->verify_access)) {
- ret = -EPERM;
- goto out_unref;
- }
-
- ret = driver->verify_access(bo, filp);
- if (unlikely(ret != 0))
- goto out_unref;
-
- kmap_offset = dev_offset - bo->vm_node->start;
- if (unlikely(kmap_offset >= bo->num_pages)) {
- ret = -EFBIG;
- goto out_unref;
- }
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- ret = -EAGAIN;
- goto out_unref;
- default:
- goto out_unref;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- goto out_unref;
- }
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return -EFBIG;
-
- *f_pos += io_size;
-
- return io_size;
-out_unref:
- ttm_bo_unref(&bo);
- return ret;
-}
-
-ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
- char __user *rbuf, size_t count, loff_t *f_pos,
- bool write)
-{
- struct ttm_bo_kmap_obj map;
- unsigned long kmap_offset;
- unsigned long kmap_end;
- unsigned long kmap_num;
- size_t io_size;
- unsigned int page_offset;
- char *virtual;
- int ret;
- bool no_wait = false;
- bool dummy;
-
- kmap_offset = (*f_pos >> PAGE_SHIFT);
- if (unlikely(kmap_offset >= bo->num_pages))
- return -EFBIG;
-
- page_offset = *f_pos & ~PAGE_MASK;
- io_size = bo->num_pages - kmap_offset;
- io_size = (io_size << PAGE_SHIFT) - page_offset;
- if (count < io_size)
- io_size = count;
-
- kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT;
- kmap_num = kmap_end - kmap_offset + 1;
-
- ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
-
- switch (ret) {
- case 0:
- break;
- case -EBUSY:
- return -EAGAIN;
- default:
- return ret;
- }
-
- ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
- if (unlikely(ret != 0)) {
- ttm_bo_unreserve(bo);
- return ret;
- }
-
- virtual = ttm_kmap_obj_virtual(&map, &dummy);
- virtual += page_offset;
-
- if (write)
- ret = copy_from_user(virtual, wbuf, io_size);
- else
- ret = copy_to_user(rbuf, virtual, io_size);
-
- ttm_bo_kunmap(&map);
- ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
-
- if (unlikely(ret != 0))
- return ret;
-
- *f_pos += io_size;
-
- return io_size;
-}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c0770dbba74..7650dc0d78c 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -65,7 +65,6 @@ static const struct file_operations udl_driver_fops = {
.read = drm_read,
.unlocked_ioctl = drm_ioctl,
.release = drm_release,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -84,7 +83,7 @@ static struct drm_driver driver = {
.dumb_create = udl_dumb_create,
.dumb_map_offset = udl_gem_mmap,
- .dumb_destroy = udl_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.fops = &udl_driver_fops,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index cc6d90f28c7..56aec9409fa 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -114,8 +114,6 @@ int udl_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args);
int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
uint32_t handle, uint64_t *offset);
-int udl_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
- uint32_t handle);
int udl_gem_init_object(struct drm_gem_object *obj);
void udl_gem_free_object(struct drm_gem_object *gem_obj);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index ef034fa3e6f..8dbe9d0ae9a 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -66,12 +66,6 @@ int udl_dumb_create(struct drm_file *file,
args->size, &args->handle);
}
-int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
- uint32_t handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
-
int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
@@ -123,55 +117,23 @@ int udl_gem_init_object(struct drm_gem_object *obj)
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
{
- int page_count, i;
- struct page *page;
- struct inode *inode;
- struct address_space *mapping;
+ struct page **pages;
if (obj->pages)
return 0;
- page_count = obj->base.size / PAGE_SIZE;
- BUG_ON(obj->pages != NULL);
- obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
- if (obj->pages == NULL)
- return -ENOMEM;
+ pages = drm_gem_get_pages(&obj->base, gfpmask);
+ if (IS_ERR(pages))
+ return PTR_ERR(pages);
- inode = file_inode(obj->base.filp);
- mapping = inode->i_mapping;
- gfpmask |= mapping_gfp_mask(mapping);
-
- for (i = 0; i < page_count; i++) {
- page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
- if (IS_ERR(page))
- goto err_pages;
- obj->pages[i] = page;
- }
+ obj->pages = pages;
return 0;
-err_pages:
- while (i--)
- page_cache_release(obj->pages[i]);
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return PTR_ERR(page);
}
static void udl_gem_put_pages(struct udl_gem_object *obj)
{
- int page_count = obj->base.size / PAGE_SIZE;
- int i;
-
- if (obj->base.import_attach) {
- drm_free_large(obj->pages);
- obj->pages = NULL;
- return;
- }
-
- for (i = 0; i < page_count; i++)
- page_cache_release(obj->pages[i]);
-
- drm_free_large(obj->pages);
+ drm_gem_put_pages(&obj->base, obj->pages, false, false);
obj->pages = NULL;
}
@@ -223,8 +185,7 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->pages)
udl_gem_put_pages(obj);
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
+ drm_gem_free_mmap_offset(gem_obj);
}
/* the dumb interface doesn't work with the GEM straight MMAP
@@ -247,13 +208,11 @@ int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
ret = udl_gem_get_pages(gobj, GFP_KERNEL);
if (ret)
goto out;
- if (!gobj->base.map_list.map) {
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
- }
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret)
+ goto out;
- *offset = (u64)gobj->base.map_list.hash.key << PAGE_SHIFT;
+ *offset = drm_vma_node_offset_addr(&gobj->base.vma_node);
out:
drm_gem_object_unreference(&gobj->base);
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index 0ce2d719525..f5ae57406f3 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -41,8 +41,8 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
total_len = usb_get_descriptor(usbdev, 0x5f, /* vendor specific */
0, desc, MAX_VENDOR_DESCRIPTOR_SIZE);
if (total_len > 5) {
- DRM_INFO("vendor descriptor length:%x data:%*ph\n",
- total_len, 11, desc);
+ DRM_INFO("vendor descriptor length:%x data:%11ph\n",
+ total_len, desc);
if ((desc[0] != total_len) || /* descriptor length */
(desc[1] != 0x5f) || /* vendor descriptor type */
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index 13558f5a242..652f9b43ec9 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -720,7 +720,7 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
return ret;
}
-struct drm_ioctl_desc via_ioctls[] = {
+const struct drm_ioctl_desc via_ioctls[] = {
DRM_IOCTL_DEF_DRV(VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_FREEMEM, via_mem_free, DRM_AUTH),
DRM_IOCTL_DEF_DRV(VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index f4ae2032794..92684a9b7e3 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -64,7 +64,6 @@ static const struct file_operations via_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -73,7 +72,7 @@ static const struct file_operations via_driver_fops = {
static struct drm_driver driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
+ DRIVER_USE_AGP | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 893a65090c3..a811ef2b505 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -114,7 +114,7 @@ enum via_family {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
-extern struct drm_ioctl_desc via_ioctls[];
+extern const struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 0ab93ff0987..7e3ad87c366 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -140,11 +140,11 @@ int via_mem_alloc(struct drm_device *dev, void *data,
if (mem->type == VIA_MEM_AGP)
retval = drm_mm_insert_node(&dev_priv->agp_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
else
retval = drm_mm_insert_node(&dev_priv->vram_mm,
&item->mm_node,
- tmpSize, 0);
+ tmpSize, 0, DRM_MM_SEARCH_DEFAULT);
if (retval)
goto fail_alloc;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 78e21649d48..1a90f0a2f7e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -124,7 +124,7 @@
* Ioctl definitions.
*/
-static struct drm_ioctl_desc vmw_ioctls[] = {
+static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
@@ -622,8 +622,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
}
dev_priv->fman = vmw_fence_manager_init(dev_priv);
- if (unlikely(dev_priv->fman == NULL))
+ if (unlikely(dev_priv->fman == NULL)) {
+ ret = -ENOMEM;
goto out_no_fman;
+ }
vmw_kms_save_vga(dev_priv);
@@ -782,7 +784,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
- struct drm_ioctl_desc *ioctl =
+ const struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd_drv != cmd)) {
@@ -795,29 +797,12 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
return drm_ioctl(filp, cmd, arg);
}
-static int vmw_firstopen(struct drm_device *dev)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- dev_priv->is_opened = true;
-
- return 0;
-}
-
static void vmw_lastclose(struct drm_device *dev)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_crtc *crtc;
struct drm_mode_set set;
int ret;
- /**
- * Do nothing on the lastclose call from drm_unload.
- */
-
- if (!dev_priv->is_opened)
- return;
-
- dev_priv->is_opened = false;
set.x = 0;
set.y = 0;
set.fb = NULL;
@@ -1120,7 +1105,6 @@ static const struct file_operations vmwgfx_driver_fops = {
.mmap = vmw_mmap,
.poll = vmw_fops_poll,
.read = vmw_fops_read,
- .fasync = drm_fasync,
#if defined(CONFIG_COMPAT)
.compat_ioctl = drm_compat_ioctl,
#endif
@@ -1132,7 +1116,6 @@ static struct drm_driver driver = {
DRIVER_MODESET,
.load = vmw_driver_load,
.unload = vmw_driver_unload,
- .firstopen = vmw_firstopen,
.lastclose = vmw_lastclose,
.irq_preinstall = vmw_irq_preinstall,
.irq_postinstall = vmw_irq_postinstall,
@@ -1143,7 +1126,6 @@ static struct drm_driver driver = {
.disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
- .dma_quiescent = NULL, /*vmw_dma_quiescent, */
.master_create = vmw_master_create,
.master_destroy = vmw_master_destroy,
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 13aeda71280..150ec64af61 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -324,7 +324,6 @@ struct vmw_private {
*/
bool stealth;
- bool is_opened;
bool enable_fb;
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 3751730764a..1a0bf07fe54 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -29,7 +29,9 @@
#include <drm/drmP.h>
#include <drm/ttm/ttm_bo_driver.h>
-#define VMW_PPN_SIZE sizeof(unsigned long)
+#define VMW_PPN_SIZE (sizeof(unsigned long))
+/* A future safe maximum remap size. */
+#define VMW_PPN_PER_REMAP ((31 * 1024) / VMW_PPN_SIZE)
static int vmw_gmr2_bind(struct vmw_private *dev_priv,
struct page *pages[],
@@ -38,43 +40,61 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
{
SVGAFifoCmdDefineGMR2 define_cmd;
SVGAFifoCmdRemapGMR2 remap_cmd;
- uint32_t define_size = sizeof(define_cmd) + 4;
- uint32_t remap_size = VMW_PPN_SIZE * num_pages + sizeof(remap_cmd) + 4;
uint32_t *cmd;
uint32_t *cmd_orig;
+ uint32_t define_size = sizeof(define_cmd) + sizeof(*cmd);
+ uint32_t remap_num = num_pages / VMW_PPN_PER_REMAP + ((num_pages % VMW_PPN_PER_REMAP) > 0);
+ uint32_t remap_size = VMW_PPN_SIZE * num_pages + (sizeof(remap_cmd) + sizeof(*cmd)) * remap_num;
+ uint32_t remap_pos = 0;
+ uint32_t cmd_size = define_size + remap_size;
uint32_t i;
- cmd_orig = cmd = vmw_fifo_reserve(dev_priv, define_size + remap_size);
+ cmd_orig = cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL))
return -ENOMEM;
define_cmd.gmrId = gmr_id;
define_cmd.numPages = num_pages;
+ *cmd++ = SVGA_CMD_DEFINE_GMR2;
+ memcpy(cmd, &define_cmd, sizeof(define_cmd));
+ cmd += sizeof(define_cmd) / sizeof(*cmd);
+
+ /*
+ * Need to split the command if there are too many
+ * pages that goes into the gmr.
+ */
+
remap_cmd.gmrId = gmr_id;
remap_cmd.flags = (VMW_PPN_SIZE > sizeof(*cmd)) ?
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
- remap_cmd.offsetPages = 0;
- remap_cmd.numPages = num_pages;
- *cmd++ = SVGA_CMD_DEFINE_GMR2;
- memcpy(cmd, &define_cmd, sizeof(define_cmd));
- cmd += sizeof(define_cmd) / sizeof(uint32);
+ while (num_pages > 0) {
+ unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+
+ remap_cmd.offsetPages = remap_pos;
+ remap_cmd.numPages = nr;
- *cmd++ = SVGA_CMD_REMAP_GMR2;
- memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
- cmd += sizeof(remap_cmd) / sizeof(uint32);
+ *cmd++ = SVGA_CMD_REMAP_GMR2;
+ memcpy(cmd, &remap_cmd, sizeof(remap_cmd));
+ cmd += sizeof(remap_cmd) / sizeof(*cmd);
- for (i = 0; i < num_pages; ++i) {
- if (VMW_PPN_SIZE <= 4)
- *cmd = page_to_pfn(*pages++);
- else
- *((uint64_t *)cmd) = page_to_pfn(*pages++);
+ for (i = 0; i < nr; ++i) {
+ if (VMW_PPN_SIZE <= 4)
+ *cmd = page_to_pfn(*pages++);
+ else
+ *((uint64_t *)cmd) = page_to_pfn(*pages++);
- cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ cmd += VMW_PPN_SIZE / sizeof(*cmd);
+ }
+
+ num_pages -= nr;
+ remap_pos += nr;
}
- vmw_fifo_commit(dev_priv, define_size + remap_size);
+ BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd));
+
+ vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d4607b2530d..fc43c060123 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1706,7 +1706,8 @@ int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 6fa89c9d621..8d038c36bd5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -123,7 +123,8 @@ struct vmw_display_unit {
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags);
void vmw_du_crtc_save(struct drm_crtc *crtc);
void vmw_du_crtc_restore(struct drm_crtc *crtc);
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 7953d1f90b6..0e67cf41065 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -500,7 +500,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
goto out_no_dmabuf;
rep->handle = handle;
- rep->map_handle = dma_buf->base.addr_space_offset;
+ rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
rep->cur_gmr_id = handle;
rep->cur_gmr_offset = 0;
@@ -834,7 +834,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
if (ret != 0)
return -EINVAL;
- *offset = out_buf->base.addr_space_offset;
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
vmw_dmabuf_unreference(&out_buf);
return 0;
}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index 28e28a23d44..47163029987 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -43,7 +43,7 @@ void host1x_set_drm_data(struct device *dev, void *data)
void *host1x_get_drm_data(struct device *dev)
{
struct host1x *host1x = dev_get_drvdata(dev);
- return host1x->drm_data;
+ return host1x ? host1x->drm_data : NULL;
}
void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r)
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index 790ddf114e5..bed90a8131b 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -301,8 +301,8 @@ static inline void host1x_hw_show_mlocks(struct host1x *host, struct output *o)
host->debug_op->show_mlocks(host, o);
}
-extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_dc_driver;
+extern struct platform_driver tegra_hdmi_driver;
extern struct platform_driver tegra_gr2d_driver;
#endif
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 5360e5a57ec..b1a05ad901c 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -235,7 +235,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
}
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
struct tegra_dc *dc = to_tegra_dc(crtc);
struct drm_device *drm = crtc->dev;
diff --git a/drivers/gpu/host1x/drm/drm.c b/drivers/gpu/host1x/drm/drm.c
index e184b00faac..8c61ceeaa12 100644
--- a/drivers/gpu/host1x/drm/drm.c
+++ b/drivers/gpu/host1x/drm/drm.c
@@ -356,7 +356,7 @@ static int tegra_gem_mmap(struct drm_device *drm, void *data,
bo = to_tegra_bo(gem);
- args->offset = tegra_bo_get_mmap_offset(bo);
+ args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_unreference(gem);
@@ -487,7 +487,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
}
#endif
-static struct drm_ioctl_desc tegra_drm_ioctls[] = {
+static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
#ifdef CONFIG_DRM_TEGRA_STAGING
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create, DRM_UNLOCKED | DRM_AUTH),
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap, DRM_UNLOCKED),
@@ -508,7 +508,6 @@ static const struct file_operations tegra_drm_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = tegra_drm_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
@@ -633,7 +632,7 @@ struct drm_driver tegra_drm_driver = {
.gem_vm_ops = &tegra_bo_vm_ops,
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
- .dumb_destroy = tegra_bo_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
diff --git a/drivers/gpu/host1x/drm/gem.c b/drivers/gpu/host1x/drm/gem.c
index c5e9a9b494c..59623de4ee1 100644
--- a/drivers/gpu/host1x/drm/gem.c
+++ b/drivers/gpu/host1x/drm/gem.c
@@ -106,11 +106,6 @@ static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo)
dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr);
}
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo)
-{
- return (unsigned int)bo->gem.map_list.hash.key << PAGE_SHIFT;
-}
-
struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size)
{
struct tegra_bo *bo;
@@ -182,8 +177,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
{
struct tegra_bo *bo = to_tegra_bo(gem);
- if (gem->map_list.map)
- drm_gem_free_mmap_offset(gem);
+ drm_gem_free_mmap_offset(gem);
drm_gem_object_release(gem);
tegra_bo_destroy(gem->dev, bo);
@@ -228,7 +222,7 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
bo = to_tegra_bo(gem);
- *offset = tegra_bo_get_mmap_offset(bo);
+ *offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_unreference(gem);
@@ -262,9 +256,3 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
return ret;
}
-
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle)
-{
- return drm_gem_handle_delete(file, handle);
-}
diff --git a/drivers/gpu/host1x/drm/gem.h b/drivers/gpu/host1x/drm/gem.h
index 34de2b486eb..492533a2dac 100644
--- a/drivers/gpu/host1x/drm/gem.h
+++ b/drivers/gpu/host1x/drm/gem.h
@@ -44,13 +44,10 @@ struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
unsigned int size,
unsigned int *handle);
void tegra_bo_free_object(struct drm_gem_object *gem);
-unsigned int tegra_bo_get_mmap_offset(struct tegra_bo *bo);
int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
struct drm_mode_create_dumb *args);
int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
uint32_t handle, uint64_t *offset);
-int tegra_bo_dumb_destroy(struct drm_file *file, struct drm_device *drm,
- unsigned int handle);
int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
diff --git a/drivers/gpu/host1x/drm/hdmi.c b/drivers/gpu/host1x/drm/hdmi.c
index 01097da09f7..644d95c7d48 100644
--- a/drivers/gpu/host1x/drm/hdmi.c
+++ b/drivers/gpu/host1x/drm/hdmi.c
@@ -551,24 +551,8 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
return;
}
- memset(&frame, 0, sizeof(frame));
-
- frame.type = HDMI_INFOFRAME_TYPE_VENDOR;
- frame.version = 0x01;
- frame.length = 6;
-
- frame.data[0] = 0x03; /* regid0 */
- frame.data[1] = 0x0c; /* regid1 */
- frame.data[2] = 0x00; /* regid2 */
- frame.data[3] = 0x02 << 5; /* video format */
-
- /* TODO: 74 MHz limit? */
- if (1) {
- frame.data[4] = 0x00 << 4; /* 3D structure */
- } else {
- frame.data[4] = 0x08 << 4; /* 3D structure */
- frame.data[5] = 0x00 << 4; /* 3D ext. data */
- }
+ hdmi_vendor_infoframe_init(&frame);
+ frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
if (err < 0) {
@@ -904,6 +888,11 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
{
struct drm_info_node *node = s->private;
struct tegra_hdmi *hdmi = node->info_ent->data;
+ int err;
+
+ err = clk_enable(hdmi->clk);
+ if (err)
+ return err;
#define DUMP_REG(name) \
seq_printf(s, "%-56s %#05x %08lx\n", #name, name, \
@@ -1069,6 +1058,8 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
#undef DUMP_REG
+ clk_disable(hdmi->clk);
+
return 0;
}
diff --git a/drivers/gpu/host1x/drm/output.c b/drivers/gpu/host1x/drm/output.c
index 8140fc6c34d..137ae81ab80 100644
--- a/drivers/gpu/host1x/drm/output.c
+++ b/drivers/gpu/host1x/drm/output.c
@@ -9,7 +9,7 @@
#include <linux/module.h>
#include <linux/of_gpio.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include "drm.h"
diff --git a/drivers/gpu/host1x/drm/rgb.c b/drivers/gpu/host1x/drm/rgb.c
index ed4416f2026..5aa66ef7a94 100644
--- a/drivers/gpu/host1x/drm/rgb.c
+++ b/drivers/gpu/host1x/drm/rgb.c
@@ -147,6 +147,13 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
if (!rgb)
return -ENOMEM;
+ rgb->output.dev = dc->dev;
+ rgb->output.of_node = np;
+
+ err = tegra_output_parse_dt(&rgb->output);
+ if (err < 0)
+ return err;
+
rgb->clk = devm_clk_get(dc->dev, NULL);
if (IS_ERR(rgb->clk)) {
dev_err(dc->dev, "failed to get clock\n");
@@ -165,13 +172,6 @@ int tegra_dc_rgb_probe(struct tegra_dc *dc)
return err;
}
- rgb->output.dev = dc->dev;
- rgb->output.of_node = np;
-
- err = tegra_output_parse_dt(&rgb->output);
- if (err < 0)
- return err;
-
dc->rgb = &rgb->output;
return 0;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index cc807667d8f..c4e1050f225 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -42,12 +42,12 @@ struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
/* Check that we're not going to overflow */
total = sizeof(struct host1x_job) +
- num_relocs * sizeof(struct host1x_reloc) +
- num_unpins * sizeof(struct host1x_job_unpin_data) +
- num_waitchks * sizeof(struct host1x_waitchk) +
- num_cmdbufs * sizeof(struct host1x_job_gather) +
- num_unpins * sizeof(dma_addr_t) +
- num_unpins * sizeof(u32 *);
+ (u64)num_relocs * sizeof(struct host1x_reloc) +
+ (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
+ (u64)num_waitchks * sizeof(struct host1x_waitchk) +
+ (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
+ (u64)num_unpins * sizeof(dma_addr_t) +
+ (u64)num_unpins * sizeof(u32 *);
if (total > ULONG_MAX)
return NULL;
@@ -466,9 +466,8 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
&job->gather_copy,
GFP_KERNEL);
if (!job->gather_copy_mapped) {
- int err = PTR_ERR(job->gather_copy_mapped);
job->gather_copy_mapped = NULL;
- return err;
+ return -ENOMEM;
}
job->gather_copy_size = size;
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cf787e1d932..ec0ae2d1686 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/console.h>
#include <linux/vga_switcheroo.h>
+#include <linux/pm_runtime.h>
#include <linux/vgaarb.h>
@@ -37,6 +38,7 @@ struct vga_switcheroo_client {
const struct vga_switcheroo_client_ops *ops;
int id;
bool active;
+ bool driver_power_control;
struct list_head list;
};
@@ -132,7 +134,7 @@ EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
static int register_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- int id, bool active)
+ int id, bool active, bool driver_power_control)
{
struct vga_switcheroo_client *client;
@@ -145,6 +147,7 @@ static int register_client(struct pci_dev *pdev,
client->ops = ops;
client->id = id;
client->active = active;
+ client->driver_power_control = driver_power_control;
mutex_lock(&vgasr_mutex);
list_add_tail(&client->list, &vgasr_priv.clients);
@@ -160,10 +163,11 @@ static int register_client(struct pci_dev *pdev,
}
int vga_switcheroo_register_client(struct pci_dev *pdev,
- const struct vga_switcheroo_client_ops *ops)
+ const struct vga_switcheroo_client_ops *ops,
+ bool driver_power_control)
{
return register_client(pdev, ops, -1,
- pdev == vga_default_device());
+ pdev == vga_default_device(), driver_power_control);
}
EXPORT_SYMBOL(vga_switcheroo_register_client);
@@ -171,7 +175,7 @@ int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id, bool active)
{
- return register_client(pdev, ops, id | ID_BIT_AUDIO, active);
+ return register_client(pdev, ops, id | ID_BIT_AUDIO, active, false);
}
EXPORT_SYMBOL(vga_switcheroo_register_audio_client);
@@ -258,10 +262,11 @@ static int vga_switcheroo_show(struct seq_file *m, void *v)
int i = 0;
mutex_lock(&vgasr_mutex);
list_for_each_entry(client, &vgasr_priv.clients, list) {
- seq_printf(m, "%d:%s%s:%c:%s:%s\n", i,
+ seq_printf(m, "%d:%s%s:%c:%s%s:%s\n", i,
client_id(client) == VGA_SWITCHEROO_DIS ? "DIS" : "IGD",
client_is_vga(client) ? "" : "-Audio",
client->active ? '+' : ' ',
+ client->driver_power_control ? "Dyn" : "",
client->pwr_state ? "Pwr" : "Off",
pci_name(client->pdev));
i++;
@@ -277,6 +282,8 @@ static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
static int vga_switchon(struct vga_switcheroo_client *client)
{
+ if (client->driver_power_control)
+ return 0;
if (vgasr_priv.handler->power_state)
vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
/* call the driver callback to turn on device */
@@ -287,6 +294,8 @@ static int vga_switchon(struct vga_switcheroo_client *client)
static int vga_switchoff(struct vga_switcheroo_client *client)
{
+ if (client->driver_power_control)
+ return 0;
/* call the driver callback to turn off device */
client->ops->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
if (vgasr_priv.handler->power_state)
@@ -402,6 +411,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
+ if (client->driver_power_control)
+ continue;
set_audio_state(client->id, VGA_SWITCHEROO_OFF);
if (client->pwr_state == VGA_SWITCHEROO_ON)
vga_switchoff(client);
@@ -413,6 +424,8 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
list_for_each_entry(client, &vgasr_priv.clients, list) {
if (client->active || client_is_audio(client))
continue;
+ if (client->driver_power_control)
+ continue;
if (client->pwr_state == VGA_SWITCHEROO_OFF)
vga_switchon(client);
set_audio_state(client->id, VGA_SWITCHEROO_ON);
@@ -565,3 +578,127 @@ err:
return err;
}
EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
+
+static void vga_switcheroo_power_switch(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct vga_switcheroo_client *client;
+
+ if (!vgasr_priv.handler->power_state)
+ return;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return;
+
+ if (!client->driver_power_control)
+ return;
+
+ vgasr_priv.handler->power_state(client->id, state);
+}
+
+/* force a PCI device to a certain state - mainly to turn off audio clients */
+
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic)
+{
+ struct vga_switcheroo_client *client;
+
+ client = find_client_from_pci(&vgasr_priv.clients, pdev);
+ if (!client)
+ return;
+
+ if (!client->driver_power_control)
+ return;
+
+ client->pwr_state = dynamic;
+ set_audio_state(client->id, dynamic);
+}
+EXPORT_SYMBOL(vga_switcheroo_set_dynamic_switch);
+
+/* switcheroo power domain */
+static int vga_switcheroo_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = dev->bus->pm->runtime_suspend(dev);
+ if (ret)
+ return ret;
+
+ vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_OFF);
+ return 0;
+}
+
+static int vga_switcheroo_runtime_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON);
+ ret = dev->bus->pm->runtime_resume(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+/* this version is for the case where the power switch is separate
+ to the device being powered down. */
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
+{
+ /* copy over all the bus versions */
+ if (dev->bus && dev->bus->pm) {
+ domain->ops = *dev->bus->pm;
+ domain->ops.runtime_suspend = vga_switcheroo_runtime_suspend;
+ domain->ops.runtime_resume = vga_switcheroo_runtime_resume;
+
+ dev->pm_domain = domain;
+ return 0;
+ }
+ dev->pm_domain = NULL;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_ops);
+
+static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+ struct vga_switcheroo_client *client, *found = NULL;
+
+ /* we need to check if we have to switch back on the video
+ device so the audio device can come back */
+ list_for_each_entry(client, &vgasr_priv.clients, list) {
+ if (PCI_SLOT(client->pdev->devfn) == PCI_SLOT(pdev->devfn) && client_is_vga(client)) {
+ found = client;
+ ret = pm_runtime_get_sync(&client->pdev->dev);
+ if (ret) {
+ if (ret != 1)
+ return ret;
+ }
+ break;
+ }
+ }
+ ret = dev->bus->pm->runtime_resume(dev);
+
+ /* put the reference for the gpu */
+ if (found) {
+ pm_runtime_mark_last_busy(&found->pdev->dev);
+ pm_runtime_put_autosuspend(&found->pdev->dev);
+ }
+ return ret;
+}
+
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
+{
+ /* copy over all the bus versions */
+ if (dev->bus && dev->bus->pm) {
+ domain->ops = *dev->bus->pm;
+ domain->ops.runtime_resume = vga_switcheroo_runtime_resume_hdmi_audio;
+
+ dev->pm_domain = domain;
+ return 0;
+ }
+ dev->pm_domain = NULL;
+ return -EINVAL;
+}
+EXPORT_SYMBOL(vga_switcheroo_init_domain_pm_optimus_hdmi_audio);
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 14ef6ab6979..3d7c9f67b6d 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -743,6 +743,14 @@ config HID_WIIMOTE
To compile this driver as a module, choose M here: the
module will be called hid-wiimote.
+config HID_XINMO
+ tristate "Xin-Mo non-fully compliant devices"
+ depends on HID
+ ---help---
+ Support for Xin-Mo devices that are not fully compliant with the HID
+ standard. Currently only supports the Xin-Mo Dual Arcade. Say Y here
+ if you have a Xin-Mo Dual Arcade controller.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 6f687287e21..a959f4aecaf 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -110,6 +110,7 @@ obj-$(CONFIG_HID_TIVO) += hid-tivo.o
obj-$(CONFIG_HID_TOPSEED) += hid-topseed.o
obj-$(CONFIG_HID_TWINHAN) += hid-twinhan.o
obj-$(CONFIG_HID_UCLOGIC) += hid-uclogic.o
+obj-$(CONFIG_HID_XINMO) += hid-xinmo.o
obj-$(CONFIG_HID_ZEROPLUS) += hid-zpff.o
obj-$(CONFIG_HID_ZYDACRON) += hid-zydacron.o
obj-$(CONFIG_HID_WACOM) += hid-wacom.o
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 7c5507e9482..9428ea7cdf8 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -90,11 +90,10 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct a4tech_sc *a4;
int ret;
- a4 = kzalloc(sizeof(*a4), GFP_KERNEL);
+ a4 = devm_kzalloc(&hdev->dev, sizeof(*a4), GFP_KERNEL);
if (a4 == NULL) {
hid_err(hdev, "can't alloc device descriptor\n");
- ret = -ENOMEM;
- goto err_free;
+ return -ENOMEM;
}
a4->quirks = id->driver_data;
@@ -104,27 +103,16 @@ static int a4_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ return ret;
}
return 0;
-err_free:
- kfree(a4);
- return ret;
-}
-
-static void a4_remove(struct hid_device *hdev)
-{
- struct a4tech_sc *a4 = hid_get_drvdata(hdev);
-
- hid_hw_stop(hdev);
- kfree(a4);
}
static const struct hid_device_id a4_devices[] = {
@@ -144,7 +132,6 @@ static struct hid_driver a4_driver = {
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
- .remove = a4_remove,
};
module_hid_driver(a4_driver);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index c7710b5c69a..881cf7b4f9a 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -349,7 +349,7 @@ static int apple_probe(struct hid_device *hdev,
unsigned int connect_mask = HID_CONNECT_DEFAULT;
int ret;
- asc = kzalloc(sizeof(*asc), GFP_KERNEL);
+ asc = devm_kzalloc(&hdev->dev, sizeof(*asc), GFP_KERNEL);
if (asc == NULL) {
hid_err(hdev, "can't alloc apple descriptor\n");
return -ENOMEM;
@@ -362,7 +362,7 @@ static int apple_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ return ret;
}
if (quirks & APPLE_HIDDEV)
@@ -373,19 +373,10 @@ static int apple_probe(struct hid_device *hdev,
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ return ret;
}
return 0;
-err_free:
- kfree(asc);
- return ret;
-}
-
-static void apple_remove(struct hid_device *hdev)
-{
- hid_hw_stop(hdev);
- kfree(hid_get_drvdata(hdev));
}
static const struct hid_device_id apple_devices[] = {
@@ -551,7 +542,6 @@ static struct hid_driver apple_driver = {
.id_table = apple_devices,
.report_fixup = apple_report_fixup,
.probe = apple_probe,
- .remove = apple_remove,
.event = apple_event,
.input_mapping = apple_input_mapping,
.input_mapped = apple_input_mapped,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 36668d1aca8..ae88a97f976 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
struct hid_report_enum *report_enum = device->report_enum + type;
struct hid_report *report;
+ if (id >= HID_MAX_IDS)
+ return NULL;
if (report_enum->report_id_hash[id])
return report_enum->report_id_hash[id];
@@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_ID:
parser->global.report_id = item_udata(item);
- if (parser->global.report_id == 0) {
- hid_err(parser->device, "report_id 0 is invalid\n");
+ if (parser->global.report_id == 0 ||
+ parser->global.report_id >= HID_MAX_IDS) {
+ hid_err(parser->device, "report_id %u is invalid\n",
+ parser->global.report_id);
return -1;
}
return 0;
@@ -450,7 +454,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
}
parser->local.delimiter_depth--;
}
- return 1;
+ return 0;
case HID_LOCAL_ITEM_TAG_USAGE:
@@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
for (i = 0; i < HID_REPORT_TYPES; i++) {
struct hid_report_enum *report_enum = device->report_enum + i;
- for (j = 0; j < 256; j++) {
+ for (j = 0; j < HID_MAX_IDS; j++) {
struct hid_report *report = report_enum->report_id_hash[j];
if (report)
hid_free_report(report);
@@ -677,12 +681,61 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
return NULL;
}
-static void hid_scan_usage(struct hid_device *hid, u32 usage)
+static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
{
+ struct hid_device *hid = parser->device;
+
if (usage == HID_DG_CONTACTID)
hid->group = HID_GROUP_MULTITOUCH;
}
+static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
+{
+ if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
+ parser->global.report_size == 8)
+ parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
+}
+
+static void hid_scan_collection(struct hid_parser *parser, unsigned type)
+{
+ struct hid_device *hid = parser->device;
+
+ if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
+ type == HID_COLLECTION_PHYSICAL)
+ hid->group = HID_GROUP_SENSOR_HUB;
+}
+
+static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
+{
+ __u32 data;
+ int i;
+
+ data = item_udata(item);
+
+ switch (item->tag) {
+ case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
+ hid_scan_collection(parser, data & 0xff);
+ break;
+ case HID_MAIN_ITEM_TAG_END_COLLECTION:
+ break;
+ case HID_MAIN_ITEM_TAG_INPUT:
+ for (i = 0; i < parser->local.usage_index; i++)
+ hid_scan_input_usage(parser, parser->local.usage[i]);
+ break;
+ case HID_MAIN_ITEM_TAG_OUTPUT:
+ break;
+ case HID_MAIN_ITEM_TAG_FEATURE:
+ for (i = 0; i < parser->local.usage_index; i++)
+ hid_scan_feature_usage(parser, parser->local.usage[i]);
+ break;
+ }
+
+ /* Reset the local parser environment */
+ memset(&parser->local, 0, sizeof(parser->local));
+
+ return 0;
+}
+
/*
* Scan a report descriptor before the device is added to the bus.
* Sets device groups and other properties that determine what driver
@@ -690,48 +743,41 @@ static void hid_scan_usage(struct hid_device *hid, u32 usage)
*/
static int hid_scan_report(struct hid_device *hid)
{
- unsigned int page = 0, delim = 0;
+ struct hid_parser *parser;
+ struct hid_item item;
__u8 *start = hid->dev_rdesc;
__u8 *end = start + hid->dev_rsize;
- unsigned int u, u_min = 0, u_max = 0;
- struct hid_item item;
+ static int (*dispatch_type[])(struct hid_parser *parser,
+ struct hid_item *item) = {
+ hid_scan_main,
+ hid_parser_global,
+ hid_parser_local,
+ hid_parser_reserved
+ };
+
+ parser = vzalloc(sizeof(struct hid_parser));
+ if (!parser)
+ return -ENOMEM;
+ parser->device = hid;
hid->group = HID_GROUP_GENERIC;
- while ((start = fetch_item(start, end, &item)) != NULL) {
- if (item.format != HID_ITEM_FORMAT_SHORT)
- return -EINVAL;
- if (item.type == HID_ITEM_TYPE_GLOBAL) {
- if (item.tag == HID_GLOBAL_ITEM_TAG_USAGE_PAGE)
- page = item_udata(&item) << 16;
- } else if (item.type == HID_ITEM_TYPE_LOCAL) {
- if (delim > 1)
- break;
- u = item_udata(&item);
- if (item.size <= 2)
- u += page;
- switch (item.tag) {
- case HID_LOCAL_ITEM_TAG_DELIMITER:
- delim += !!u;
- break;
- case HID_LOCAL_ITEM_TAG_USAGE:
- hid_scan_usage(hid, u);
- break;
- case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
- u_min = u;
- break;
- case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
- u_max = u;
- for (u = u_min; u <= u_max; u++)
- hid_scan_usage(hid, u);
- break;
- }
- } else if (page == HID_UP_SENSOR &&
- item.type == HID_ITEM_TYPE_MAIN &&
- item.tag == HID_MAIN_ITEM_TAG_BEGIN_COLLECTION &&
- (item_udata(&item) & 0xff) == HID_COLLECTION_PHYSICAL)
- hid->group = HID_GROUP_SENSOR_HUB;
- }
+ /*
+ * The parsing is simpler than the one in hid_open_report() as we should
+ * be robust against hid errors. Those errors will be raised by
+ * hid_open_report() anyway.
+ */
+ while ((start = fetch_item(start, end, &item)) != NULL)
+ dispatch_type[item.type](parser, &item);
+
+ /*
+ * Handle special flags set during scanning.
+ */
+ if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
+ (hid->group == HID_GROUP_MULTITOUCH))
+ hid->group = HID_GROUP_MULTITOUCH_WIN_8;
+
+ vfree(parser);
return 0;
}
@@ -1128,7 +1174,8 @@ static void hid_output_field(const struct hid_device *hid,
}
/*
- * Create a report.
+ * Create a report. 'data' has to be allocated using
+ * hid_alloc_report_buf() so that it has proper size.
*/
void hid_output_report(struct hid_report *report, __u8 *data)
@@ -1145,6 +1192,22 @@ void hid_output_report(struct hid_report *report, __u8 *data)
EXPORT_SYMBOL_GPL(hid_output_report);
/*
+ * Allocator for buffer that is going to be passed to hid_output_report()
+ */
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
+{
+ /*
+ * 7 extra bytes are necessary to achieve proper functionality
+ * of implement() working on 8 byte chunks
+ */
+
+ int len = ((report->size - 1) >> 3) + 1 + (report->id > 0) + 7;
+
+ return kmalloc(len, flags);
+}
+EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
+
+/*
* Set a field value. The report this field belongs to has to be
* created and transferred to the device, to set this value in the
* device.
@@ -1152,7 +1215,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
{
- unsigned size = field->report_size;
+ unsigned size;
+
+ if (!field)
+ return -1;
+
+ size = field->report_size;
hid_dump_input(field->report->device, field->usage + offset, value);
@@ -1597,6 +1665,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
@@ -1679,6 +1748,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_LUA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1736,6 +1806,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
@@ -1917,11 +1988,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute hid_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *hid_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(hid_dev);
static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -1949,7 +2022,7 @@ static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct bus_type hid_bus_type = {
.name = "hid",
- .dev_attrs = hid_dev_attrs,
+ .dev_groups = hid_dev_groups,
.match = hid_bus_match,
.probe = hid_device_probe,
.remove = hid_device_remove,
diff --git a/drivers/hid/hid-holtekff.c b/drivers/hid/hid-holtekff.c
index 9a8f0512452..9325545fc3a 100644
--- a/drivers/hid/hid-holtekff.c
+++ b/drivers/hid/hid-holtekff.c
@@ -98,7 +98,7 @@ static void holtekff_send(struct holtekff_device *holtekff,
holtekff->field->value[i] = data[i];
}
- dbg_hid("sending %*ph\n", 7, data);
+ dbg_hid("sending %7ph\n", data);
hid_hw_request(hid, holtekff->field->report, HID_REQ_SET_REPORT);
}
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 713217380b4..8fae6d1414c 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -590,6 +590,5 @@ static void __exit mousevsc_exit(void)
}
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
module_init(mousevsc_init);
module_exit(mousevsc_exit);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index ffe4c7ae334..e60e8d53069 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,9 +135,9 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
@@ -482,6 +482,7 @@
#define USB_VENDOR_ID_KYE 0x0458
#define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
#define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
+#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
@@ -658,6 +659,7 @@
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
#define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
+#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
#define USB_VENDOR_ID_ONTRAK 0x0a07
#define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
@@ -716,6 +718,7 @@
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
#define USB_DEVICE_ID_ROCCAT_KONEPURE 0x2dbe
+#define USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL 0x2db4
#define USB_DEVICE_ID_ROCCAT_KONEXTD 0x2e22
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
#define USB_DEVICE_ID_ROCCAT_LUA 0x2c2e
@@ -887,6 +890,9 @@
#define USB_VENDOR_ID_XAT 0x2505
#define USB_DEVICE_ID_XAT_CSR 0x0220
+#define USB_VENDOR_ID_XIN_MO 0x16c0
+#define USB_DEVICE_ID_XIN_MO_DUAL_ARCADE 0x05e1
+
#define USB_VENDOR_ID_XIROKU 0x1477
#define USB_DEVICE_ID_XIROKU_SPX 0x1006
#define USB_DEVICE_ID_XIROKU_MPX 0x1007
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7480799e535..b420f4a0fd2 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
{
struct hid_device *dev = container_of(psy, struct hid_device, battery);
int ret = 0;
- __u8 buf[2] = {};
+ __u8 *buf;
switch (prop) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -349,12 +349,19 @@ static int hidinput_get_battery_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CAPACITY:
+
+ buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ break;
+ }
ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
- buf, sizeof(buf),
+ buf, 2,
dev->battery_report_type);
if (ret != 2) {
ret = -ENODATA;
+ kfree(buf);
break;
}
ret = 0;
@@ -364,6 +371,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
buf[1] <= dev->battery_max)
val->intval = (100 * (buf[1] - dev->battery_min)) /
(dev->battery_max - dev->battery_min);
+ kfree(buf);
break;
case POWER_SUPPLY_PROP_MODEL_NAME:
@@ -1137,6 +1145,74 @@ unsigned int hidinput_count_leds(struct hid_device *hid)
}
EXPORT_SYMBOL_GPL(hidinput_count_leds);
+static void hidinput_led_worker(struct work_struct *work)
+{
+ struct hid_device *hid = container_of(work, struct hid_device,
+ led_work);
+ struct hid_field *field;
+ struct hid_report *report;
+ int len;
+ __u8 *buf;
+
+ field = hidinput_get_led_field(hid);
+ if (!field)
+ return;
+
+ /*
+ * field->report is accessed unlocked regarding HID core. So there might
+ * be another incoming SET-LED request from user-space, which changes
+ * the LED state while we assemble our outgoing buffer. However, this
+ * doesn't matter as hid_output_report() correctly converts it into a
+ * boolean value no matter what information is currently set on the LED
+ * field (even garbage). So the remote device will always get a valid
+ * request.
+ * And in case we send a wrong value, a next led worker is spawned
+ * for every SET-LED request so the following worker will send the
+ * correct value, guaranteed!
+ */
+
+ report = field->report;
+
+ /* use custom SET_REPORT request if possible (asynchronous) */
+ if (hid->ll_driver->request)
+ return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT);
+
+ /* fall back to generic raw-output-report */
+ len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ hid_output_report(report, buf);
+ /* synchronous output report */
+ hid->hid_output_raw_report(hid, buf, len, HID_OUTPUT_REPORT);
+ kfree(buf);
+}
+
+static int hidinput_input_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct hid_field *field;
+ int offset;
+
+ if (type == EV_FF)
+ return input_ff_event(dev, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
+ hid_warn(dev, "event field not found\n");
+ return -1;
+ }
+
+ hid_set_field(field, offset, value);
+
+ schedule_work(&hid->led_work);
+ return 0;
+}
+
static int hidinput_open(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -1183,7 +1259,10 @@ static struct hid_input *hidinput_allocate(struct hid_device *hid)
}
input_set_drvdata(input_dev, hid);
- input_dev->event = hid->ll_driver->hidinput_input_event;
+ if (hid->ll_driver->hidinput_input_event)
+ input_dev->event = hid->ll_driver->hidinput_input_event;
+ else if (hid->ll_driver->request || hid->hid_output_raw_report)
+ input_dev->event = hidinput_input_event;
input_dev->open = hidinput_open;
input_dev->close = hidinput_close;
input_dev->setkeycode = hidinput_setkeycode;
@@ -1278,6 +1357,7 @@ int hidinput_connect(struct hid_device *hid, unsigned int force)
int i, j, k;
INIT_LIST_HEAD(&hid->inputs);
+ INIT_WORK(&hid->led_work, hidinput_led_worker);
if (!force) {
for (i = 0; i < hid->maxcollection; i++) {
@@ -1379,6 +1459,12 @@ void hidinput_disconnect(struct hid_device *hid)
input_unregister_device(hidinput->input);
kfree(hidinput);
}
+
+ /* led_work is spawned by input_dev callbacks, but doesn't access the
+ * parent input_dev at all. Once all input devices are removed, we
+ * know that led_work will never get restarted, so we can cancel it
+ * synchronously and are safe. */
+ cancel_work_sync(&hid->led_work);
}
EXPORT_SYMBOL_GPL(hidinput_disconnect);
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index 1e2ee2aa84a..73845120295 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -268,6 +268,26 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
0xC0 /* End Collection */
};
+static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
+ unsigned int *rsize, int offset, const char *device_name) {
+ /*
+ * the fixup that need to be done:
+ * - change Usage Maximum in the Comsumer Control
+ * (report ID 3) to a reasonable value
+ */
+ if (*rsize >= offset + 31 &&
+ /* Usage Page (Consumer Devices) */
+ rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c &&
+ /* Usage (Consumer Control) */
+ rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 &&
+ /* Usage Maximum > 12287 */
+ rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) {
+ hid_info(hdev, "fixing up %s report descriptor\n", device_name);
+ rdesc[offset + 12] = 0x2f;
+ }
+ return rdesc;
+}
+
static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
@@ -315,23 +335,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
- /*
- * the fixup that need to be done:
- * - change Usage Maximum in the Comsumer Control
- * (report ID 3) to a reasonable value
- */
- if (*rsize >= 135 &&
- /* Usage Page (Consumer Devices) */
- rdesc[104] == 0x05 && rdesc[105] == 0x0c &&
- /* Usage (Consumer Control) */
- rdesc[106] == 0x09 && rdesc[107] == 0x01 &&
- /* Usage Maximum > 12287 */
- rdesc[114] == 0x2a && rdesc[116] > 0x2f) {
- hid_info(hdev,
- "fixing up Genius Gila Gaming Mouse "
- "report descriptor\n");
- rdesc[116] = 0x2f;
- }
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
+ "Genius Gila Gaming Mouse");
+ break;
+ case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
+ rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
+ "Genius Gx Imperator Keyboard");
break;
}
return rdesc;
@@ -428,6 +437,8 @@ static const struct hid_device_id kye_devices[] = {
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
{ }
};
MODULE_DEVICE_TABLE(hid, kye_devices);
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 5207591a598..7800b141056 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -192,6 +192,7 @@ static struct hid_ll_driver logi_dj_ll_driver;
static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf,
size_t count,
unsigned char report_type);
+static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev);
static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev,
struct dj_report *dj_report)
@@ -232,6 +233,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] &
SPFUNCTION_DEVICE_LIST_EMPTY) {
dbg_hid("%s: device list is empty\n", __func__);
+ djrcv_dev->querying_devices = false;
return;
}
@@ -242,6 +244,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
return;
}
+ if (djrcv_dev->paired_dj_devices[dj_report->device_index]) {
+ /* The device is already known. No need to reallocate it. */
+ dbg_hid("%s: device is already known\n", __func__);
+ return;
+ }
+
dj_hiddev = hid_allocate_device();
if (IS_ERR(dj_hiddev)) {
dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n",
@@ -305,6 +313,7 @@ static void delayedwork_callback(struct work_struct *work)
struct dj_report dj_report;
unsigned long flags;
int count;
+ int retval;
dbg_hid("%s\n", __func__);
@@ -337,6 +346,25 @@ static void delayedwork_callback(struct work_struct *work)
logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report);
break;
default:
+ /* A normal report (i. e. not belonging to a pair/unpair notification)
+ * arriving here, means that the report arrived but we did not have a
+ * paired dj_device associated to the report's device_index, this
+ * means that the original "device paired" notification corresponding
+ * to this dj_device never arrived to this driver. The reason is that
+ * hid-core discards all packets coming from a device while probe() is
+ * executing. */
+ if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) {
+ /* ok, we don't know the device, just re-ask the
+ * receiver for the list of connected devices. */
+ retval = logi_dj_recv_query_paired_devices(djrcv_dev);
+ if (!retval) {
+ /* everything went fine, so just leave */
+ break;
+ }
+ dev_err(&djrcv_dev->hdev->dev,
+ "%s:logi_dj_recv_query_paired_devices "
+ "error:%d\n", __func__, retval);
+ }
dbg_hid("%s: unexpected report type\n", __func__);
}
}
@@ -367,6 +395,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev,
if (!djdev) {
dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
" is NULL, index %d\n", dj_report->device_index);
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
return;
}
@@ -397,6 +431,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
if (dj_device == NULL) {
dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]"
" is NULL, index %d\n", dj_report->device_index);
+ kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report));
+
+ if (schedule_work(&djrcv_dev->work) == 0) {
+ dbg_hid("%s: did not schedule the work item, was already "
+ "queued\n", __func__);
+ }
return;
}
@@ -444,6 +484,10 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
struct dj_report *dj_report;
int retval;
+ /* no need to protect djrcv_dev->querying_devices */
+ if (djrcv_dev->querying_devices)
+ return 0;
+
dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
if (!dj_report)
return -ENOMEM;
@@ -455,6 +499,7 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
return retval;
}
+
static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
unsigned timeout)
{
@@ -574,7 +619,7 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
struct hid_field *field;
struct hid_report *report;
- unsigned char data[8];
+ unsigned char *data;
int offset;
dbg_hid("%s: %s, type:%d | code:%d | value:%d\n",
@@ -590,6 +635,13 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
return -1;
}
hid_set_field(field, offset, value);
+
+ data = hid_alloc_report_buf(field->report, GFP_ATOMIC);
+ if (!data) {
+ dev_warn(&dev->dev, "failed to allocate report buf memory\n");
+ return -1;
+ }
+
hid_output_report(field->report, &data[0]);
output_report_enum = &dj_rcv_hiddev->report_enum[HID_OUTPUT_REPORT];
@@ -600,8 +652,9 @@ static int logi_dj_ll_input_event(struct input_dev *dev, unsigned int type,
hid_hw_request(dj_rcv_hiddev, report, HID_REQ_SET_REPORT);
- return 0;
+ kfree(data);
+ return 0;
}
static int logi_dj_ll_start(struct hid_device *hid)
@@ -756,10 +809,10 @@ static int logi_dj_probe(struct hid_device *hdev,
}
/* This is enabling the polling urb on the IN endpoint */
- retval = hdev->ll_driver->open(hdev);
+ retval = hid_hw_open(hdev);
if (retval < 0) {
- dev_err(&hdev->dev, "%s:hdev->ll_driver->open returned "
- "error:%d\n", __func__, retval);
+ dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n",
+ __func__, retval);
goto llopen_failed;
}
@@ -776,7 +829,7 @@ static int logi_dj_probe(struct hid_device *hdev,
return retval;
logi_dj_recv_query_paired_devices_failed:
- hdev->ll_driver->close(hdev);
+ hid_hw_close(hdev);
llopen_failed:
switch_to_dj_mode_fail:
@@ -818,7 +871,7 @@ static void logi_dj_remove(struct hid_device *hdev)
cancel_work_sync(&djrcv_dev->work);
- hdev->ll_driver->close(hdev);
+ hid_hw_close(hdev);
hid_hw_stop(hdev);
/* I suppose that at this point the only context that can access
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
index fd28a5e0ca3..4a4000340ce 100644
--- a/drivers/hid/hid-logitech-dj.h
+++ b/drivers/hid/hid-logitech-dj.h
@@ -101,6 +101,7 @@ struct dj_receiver_dev {
struct work_struct work;
struct kfifo notif_fifo;
spinlock_t lock;
+ bool querying_devices;
};
struct dj_device {
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 5bc37343eb2..3b43d1cfa93 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -36,7 +36,7 @@ MODULE_PARM_DESC(emulate_scroll_wheel, "Emulate a scroll wheel");
static unsigned int scroll_speed = 32;
static int param_set_scroll_speed(const char *val, struct kernel_param *kp) {
unsigned long speed;
- if (!val || strict_strtoul(val, 0, &speed) || speed > 63)
+ if (!val || kstrtoul(val, 0, &speed) || speed > 63)
return -EINVAL;
scroll_speed = speed;
return 0;
@@ -484,7 +484,7 @@ static int magicmouse_probe(struct hid_device *hdev,
struct hid_report *report;
int ret;
- msc = kzalloc(sizeof(*msc), GFP_KERNEL);
+ msc = devm_kzalloc(&hdev->dev, sizeof(*msc), GFP_KERNEL);
if (msc == NULL) {
hid_err(hdev, "can't alloc magicmouse descriptor\n");
return -ENOMEM;
@@ -498,13 +498,13 @@ static int magicmouse_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "magicmouse hid parse failed\n");
- goto err_free;
+ return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "magicmouse hw start failed\n");
- goto err_free;
+ return ret;
}
if (!msc->input) {
@@ -548,19 +548,9 @@ static int magicmouse_probe(struct hid_device *hdev,
return 0;
err_stop_hw:
hid_hw_stop(hdev);
-err_free:
- kfree(msc);
return ret;
}
-static void magicmouse_remove(struct hid_device *hdev)
-{
- struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-
- hid_hw_stop(hdev);
- kfree(msc);
-}
-
static const struct hid_device_id magic_mice[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_MAGICMOUSE), .driver_data = 0 },
@@ -574,7 +564,6 @@ static struct hid_driver magicmouse_driver = {
.name = "magicmouse",
.id_table = magic_mice,
.probe = magicmouse_probe,
- .remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
.input_configured = magicmouse_input_configured,
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index cb0e361d7a4..ac28f08c386 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -133,6 +133,7 @@ static void mt_post_parse(struct mt_device *td);
#define MT_CLS_NSMU 0x000a
#define MT_CLS_DUAL_CONTACT_NUMBER 0x0010
#define MT_CLS_DUAL_CONTACT_ID 0x0011
+#define MT_CLS_WIN_8 0x0012
/* vendor specific classes */
#define MT_CLS_3M 0x0101
@@ -205,6 +206,11 @@ static struct mt_class mt_classes[] = {
MT_QUIRK_CONTACT_CNT_ACCURATE |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
+ { .name = MT_CLS_WIN_8,
+ .quirks = MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_IGNORE_DUPLICATES |
+ MT_QUIRK_HOVERING |
+ MT_QUIRK_CONTACT_CNT_ACCURATE },
/*
* vendor specific classes
@@ -261,17 +267,6 @@ static struct mt_class mt_classes[] = {
{ }
};
-static void mt_free_input_name(struct hid_input *hi)
-{
- struct hid_device *hdev = hi->report->device;
- const char *name = hi->input->name;
-
- if (name != hdev->name) {
- hi->input->name = hdev->name;
- kfree(name);
- }
-}
-
static ssize_t mt_show_quirks(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -343,19 +338,6 @@ static void mt_feature_mapping(struct hid_device *hdev,
td->maxcontacts = td->mtclass.maxcontacts;
break;
- case 0xff0000c5:
- if (field->report_count == 256 && field->report_size == 8) {
- /* Win 8 devices need special quirks */
- __s32 *quirks = &td->mtclass.quirks;
- *quirks |= MT_QUIRK_ALWAYS_VALID;
- *quirks |= MT_QUIRK_IGNORE_DUPLICATES;
- *quirks |= MT_QUIRK_HOVERING;
- *quirks |= MT_QUIRK_CONTACT_CNT_ACCURATE;
- *quirks &= ~MT_QUIRK_NOT_SEEN_MEANS_UP;
- *quirks &= ~MT_QUIRK_VALID_IS_INRANGE;
- *quirks &= ~MT_QUIRK_VALID_IS_CONFIDENCE;
- }
- break;
}
}
@@ -415,13 +397,6 @@ static void mt_pen_report(struct hid_device *hid, struct hid_report *report)
static void mt_pen_input_configured(struct hid_device *hdev,
struct hid_input *hi)
{
- char *name = kzalloc(strlen(hi->input->name) + 5, GFP_KERNEL);
- if (name) {
- sprintf(name, "%s Pen", hi->input->name);
- mt_free_input_name(hi);
- hi->input->name = name;
- }
-
/* force BTN_STYLUS to allow tablet matching in udev */
__set_bit(BTN_STYLUS, hi->input->keybit);
}
@@ -928,16 +903,26 @@ static void mt_post_parse(struct mt_device *td)
static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
{
struct mt_device *td = hid_get_drvdata(hdev);
- char *name = kstrdup(hdev->name, GFP_KERNEL);
-
- if (name)
- hi->input->name = name;
+ char *name;
+ const char *suffix = NULL;
if (hi->report->id == td->mt_report_id)
mt_touch_input_configured(hdev, hi);
- if (hi->report->id == td->pen_report_id)
+ if (hi->report->field[0]->physical == HID_DG_STYLUS) {
+ suffix = "Pen";
mt_pen_input_configured(hdev, hi);
+ }
+
+ if (suffix) {
+ name = devm_kzalloc(&hi->input->dev,
+ strlen(hdev->name) + strlen(suffix) + 2,
+ GFP_KERNEL);
+ if (name) {
+ sprintf(name, "%s %s", hdev->name, suffix);
+ hi->input->name = name;
+ }
+ }
}
static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
@@ -945,7 +930,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret, i;
struct mt_device *td;
struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */
- struct hid_input *hi;
for (i = 0; mt_classes[i].name ; i++) {
if (id->driver_data == mt_classes[i].name) {
@@ -967,7 +951,19 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
hdev->quirks |= HID_QUIRK_MULTI_INPUT;
hdev->quirks |= HID_QUIRK_NO_EMPTY_INPUT;
- td = kzalloc(sizeof(struct mt_device), GFP_KERNEL);
+ /*
+ * Handle special quirks for Windows 8 certified devices.
+ */
+ if (id->group == HID_GROUP_MULTITOUCH_WIN_8)
+ /*
+ * Some multitouch screens do not like to be polled for input
+ * reports. Fortunately, the Win8 spec says that all touches
+ * should be sent during each report, making the initialization
+ * of input reports unnecessary.
+ */
+ hdev->quirks |= HID_QUIRK_NO_INIT_INPUT_REPORTS;
+
+ td = devm_kzalloc(&hdev->dev, sizeof(struct mt_device), GFP_KERNEL);
if (!td) {
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
@@ -980,11 +976,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
td->pen_report_id = -1;
hid_set_drvdata(hdev, td);
- td->fields = kzalloc(sizeof(struct mt_fields), GFP_KERNEL);
+ td->fields = devm_kzalloc(&hdev->dev, sizeof(struct mt_fields),
+ GFP_KERNEL);
if (!td->fields) {
dev_err(&hdev->dev, "cannot allocate multitouch fields data\n");
- ret = -ENOMEM;
- goto fail;
+ return -ENOMEM;
}
if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID)
@@ -992,29 +988,22 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret != 0)
- goto fail;
+ return ret;
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret)
- goto hid_fail;
+ return ret;
ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
mt_set_maxcontacts(hdev);
mt_set_input_mode(hdev);
- kfree(td->fields);
+ /* release .fields memory as it is not used anymore */
+ devm_kfree(&hdev->dev, td->fields);
td->fields = NULL;
return 0;
-
-hid_fail:
- list_for_each_entry(hi, &hdev->inputs, list)
- mt_free_input_name(hi);
-fail:
- kfree(td->fields);
- kfree(td);
- return ret;
}
#ifdef CONFIG_PM
@@ -1039,17 +1028,8 @@ static int mt_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
- struct mt_device *td = hid_get_drvdata(hdev);
- struct hid_input *hi;
-
sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
- list_for_each_entry(hi, &hdev->inputs, list)
- mt_free_input_name(hi);
-
hid_hw_stop(hdev);
-
- kfree(td);
- hid_set_drvdata(hdev, NULL);
}
static const struct hid_device_id mt_devices[] = {
@@ -1371,6 +1351,11 @@ static const struct hid_device_id mt_devices[] = {
/* Generic MT device */
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH, HID_ANY_ID, HID_ANY_ID) },
+
+ /* Generic Win 8 certified MT device */
+ { .driver_data = MT_CLS_WIN_8,
+ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8,
+ HID_ANY_ID, HID_ANY_ID) },
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index ef95102515e..600f2075512 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
report_id_hash[0x0d];
- if (!report)
+ if (!report || report->maxfield < 1 ||
+ report->field[0]->report_count < 1)
return -EINVAL;
hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
@@ -237,7 +238,7 @@ static ssize_t set_min_width(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > nd->sensor_physical_width)
@@ -272,7 +273,7 @@ static ssize_t set_min_height(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > nd->sensor_physical_height)
@@ -306,7 +307,7 @@ static ssize_t set_activate_slack(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > 0x7f)
@@ -341,7 +342,7 @@ static ssize_t set_activation_width(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > nd->sensor_physical_width)
@@ -377,7 +378,7 @@ static ssize_t set_activation_height(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
if (val > nd->sensor_physical_height)
@@ -411,7 +412,7 @@ static ssize_t set_deactivate_slack(struct device *dev,
unsigned long val;
- if (strict_strtoul(buf, 0, &val))
+ if (kstrtoul(buf, 0, &val))
return -EINVAL;
/*
diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
index e346038f0f1..59d5eb1e742 100644
--- a/drivers/hid/hid-picolcd_cir.c
+++ b/drivers/hid/hid-picolcd_cir.c
@@ -145,6 +145,7 @@ void picolcd_exit_cir(struct picolcd_data *data)
struct rc_dev *rdev = data->rc_dev;
data->rc_dev = NULL;
- rc_unregister_device(rdev);
+ if (rdev)
+ rc_unregister_device(rdev);
}
diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
index b48092d0e13..acbb021065e 100644
--- a/drivers/hid/hid-picolcd_core.c
+++ b/drivers/hid/hid-picolcd_core.c
@@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
buf += 10;
cnt -= 10;
}
- if (!report)
+ if (!report || report->maxfield != 1)
return -EINVAL;
while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
diff --git a/drivers/hid/hid-picolcd_debugfs.c b/drivers/hid/hid-picolcd_debugfs.c
index 59ab8e157e6..024cdf3c229 100644
--- a/drivers/hid/hid-picolcd_debugfs.c
+++ b/drivers/hid/hid-picolcd_debugfs.c
@@ -394,7 +394,7 @@ static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data,
void picolcd_debug_out_report(struct picolcd_data *data,
struct hid_device *hdev, struct hid_report *report)
{
- u8 raw_data[70];
+ u8 *raw_data;
int raw_size = (report->size >> 3) + 1;
char *buff;
#define BUFF_SZ 256
@@ -407,20 +407,20 @@ void picolcd_debug_out_report(struct picolcd_data *data,
if (!buff)
return;
- snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
- report->id, raw_size);
- hid_debug_event(hdev, buff);
- if (raw_size + 5 > sizeof(raw_data)) {
+ raw_data = hid_alloc_report_buf(report, GFP_ATOMIC);
+ if (!raw_data) {
kfree(buff);
- hid_debug_event(hdev, " TOO BIG\n");
return;
- } else {
- raw_data[0] = report->id;
- hid_output_report(report, raw_data);
- dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
- hid_debug_event(hdev, buff);
}
+ snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ",
+ report->id, raw_size);
+ hid_debug_event(hdev, buff);
+ raw_data[0] = report->id;
+ hid_output_report(report, raw_data);
+ dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size);
+ hid_debug_event(hdev, buff);
+
switch (report->id) {
case REPORT_LED_STATE:
/* 1 data byte with GPO state */
@@ -644,6 +644,7 @@ void picolcd_debug_out_report(struct picolcd_data *data,
break;
}
wake_up_interruptible(&hdev->debug_wait);
+ kfree(raw_data);
kfree(buff);
}
diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
index 591f6b22aa9..c930ab8554e 100644
--- a/drivers/hid/hid-picolcd_fb.c
+++ b/drivers/hid/hid-picolcd_fb.c
@@ -593,10 +593,14 @@ err_nomem:
void picolcd_exit_framebuffer(struct picolcd_data *data)
{
struct fb_info *info = data->fb_info;
- struct picolcd_fb_data *fbdata = info->par;
+ struct picolcd_fb_data *fbdata;
unsigned long flags;
+ if (!info)
+ return;
+
device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
+ fbdata = info->par;
/* disconnect framebuffer from HID dev */
spin_lock_irqsave(&fbdata->lock, flags);
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index d29112fa5cd..2dcd7d98dbd 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
strong = &report->field[0]->value[2];
weak = &report->field[0]->value[3];
debug("detected single-field device");
- } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
- report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
+ } else if (report->field[0]->maxusage == 1 &&
+ report->field[0]->usage[0].hid ==
+ (HID_UP_LED | 0x43) &&
+ report->maxfield >= 4 &&
+ report->field[0]->report_count >= 1 &&
+ report->field[1]->report_count >= 1 &&
+ report->field[2]->report_count >= 1 &&
+ report->field[3]->report_count >= 1) {
report->field[0]->value[0] = 0x00;
report->field[1]->value[0] = 0x00;
strong = &report->field[2]->value[0];
diff --git a/drivers/hid/hid-roccat-arvo.c b/drivers/hid/hid-roccat-arvo.c
index 327f9b8ed1f..1948208fe03 100644
--- a/drivers/hid/hid-roccat-arvo.c
+++ b/drivers/hid/hid-roccat-arvo.c
@@ -59,7 +59,7 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
unsigned long state;
int retval;
- retval = strict_strtoul(buf, 10, &state);
+ retval = kstrtoul(buf, 10, &state);
if (retval)
return retval;
@@ -75,6 +75,8 @@ static ssize_t arvo_sysfs_set_mode_key(struct device *dev,
return size;
}
+static DEVICE_ATTR(mode_key, 0660,
+ arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key);
static ssize_t arvo_sysfs_show_key_mask(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -107,7 +109,7 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
unsigned long key_mask;
int retval;
- retval = strict_strtoul(buf, 10, &key_mask);
+ retval = kstrtoul(buf, 10, &key_mask);
if (retval)
return retval;
@@ -123,6 +125,8 @@ static ssize_t arvo_sysfs_set_key_mask(struct device *dev,
return size;
}
+static DEVICE_ATTR(key_mask, 0660,
+ arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask);
/* retval is 1-5 on success, < 0 on error */
static int arvo_get_actual_profile(struct usb_device *usb_dev)
@@ -159,7 +163,7 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
unsigned long profile;
int retval;
- retval = strict_strtoul(buf, 10, &profile);
+ retval = kstrtoul(buf, 10, &profile);
if (retval)
return retval;
@@ -179,6 +183,9 @@ static ssize_t arvo_sysfs_set_actual_profile(struct device *dev,
mutex_unlock(&arvo->arvo_lock);
return retval;
}
+static DEVICE_ATTR(actual_profile, 0660,
+ arvo_sysfs_show_actual_profile,
+ arvo_sysfs_set_actual_profile);
static ssize_t arvo_sysfs_write(struct file *fp,
struct kobject *kobj, void const *buf,
@@ -230,6 +237,8 @@ static ssize_t arvo_sysfs_write_button(struct file *fp,
return arvo_sysfs_write(fp, kobj, buf, off, count,
sizeof(struct arvo_button), ARVO_COMMAND_BUTTON);
}
+static BIN_ATTR(button, 0220, NULL, arvo_sysfs_write_button,
+ sizeof(struct arvo_button));
static ssize_t arvo_sysfs_read_info(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
@@ -238,31 +247,30 @@ static ssize_t arvo_sysfs_read_info(struct file *fp,
return arvo_sysfs_read(fp, kobj, buf, off, count,
sizeof(struct arvo_info), ARVO_COMMAND_INFO);
}
+static BIN_ATTR(info, 0440, arvo_sysfs_read_info, NULL,
+ sizeof(struct arvo_info));
+
+static struct attribute *arvo_attrs[] = {
+ &dev_attr_mode_key.attr,
+ &dev_attr_key_mask.attr,
+ &dev_attr_actual_profile.attr,
+ NULL,
+};
+static struct bin_attribute *arvo_bin_attributes[] = {
+ &bin_attr_button,
+ &bin_attr_info,
+ NULL,
+};
-static struct device_attribute arvo_attributes[] = {
- __ATTR(mode_key, 0660,
- arvo_sysfs_show_mode_key, arvo_sysfs_set_mode_key),
- __ATTR(key_mask, 0660,
- arvo_sysfs_show_key_mask, arvo_sysfs_set_key_mask),
- __ATTR(actual_profile, 0660,
- arvo_sysfs_show_actual_profile,
- arvo_sysfs_set_actual_profile),
- __ATTR_NULL
+static const struct attribute_group arvo_group = {
+ .attrs = arvo_attrs,
+ .bin_attrs = arvo_bin_attributes,
};
-static struct bin_attribute arvo_bin_attributes[] = {
- {
- .attr = { .name = "button", .mode = 0220 },
- .size = sizeof(struct arvo_button),
- .write = arvo_sysfs_write_button
- },
- {
- .attr = { .name = "info", .mode = 0440 },
- .size = sizeof(struct arvo_info),
- .read = arvo_sysfs_read_info
- },
- __ATTR_NULL
+static const struct attribute_group *arvo_groups[] = {
+ &arvo_group,
+ NULL,
};
static int arvo_init_arvo_device_struct(struct usb_device *usb_dev,
@@ -430,8 +438,7 @@ static int __init arvo_init(void)
arvo_class = class_create(THIS_MODULE, "arvo");
if (IS_ERR(arvo_class))
return PTR_ERR(arvo_class);
- arvo_class->dev_attrs = arvo_attributes;
- arvo_class->dev_bin_attrs = arvo_bin_attributes;
+ arvo_class->dev_groups = arvo_groups;
retval = hid_register_driver(&arvo_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
index 8023751d525..bc62ed91e45 100644
--- a/drivers/hid/hid-roccat-isku.c
+++ b/drivers/hid/hid-roccat-isku.c
@@ -82,7 +82,7 @@ static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
isku = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
- retval = strict_strtoul(buf, 10, &profile);
+ retval = kstrtoul(buf, 10, &profile);
if (retval)
return retval;
@@ -109,12 +109,12 @@ static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
return size;
}
+static DEVICE_ATTR(actual_profile, 0660, isku_sysfs_show_actual_profile,
+ isku_sysfs_set_actual_profile);
-static struct device_attribute isku_attributes[] = {
- __ATTR(actual_profile, 0660,
- isku_sysfs_show_actual_profile,
- isku_sysfs_set_actual_profile),
- __ATTR_NULL
+static struct attribute *isku_attrs[] = {
+ &dev_attr_actual_profile.attr,
+ NULL,
};
static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
@@ -184,7 +184,8 @@ ISKU_SYSFS_R(thingy, THINGY) \
ISKU_SYSFS_W(thingy, THINGY)
#define ISKU_BIN_ATTR_RW(thingy, THINGY) \
-{ \
+ISKU_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
@@ -192,52 +193,64 @@ ISKU_SYSFS_W(thingy, THINGY)
}
#define ISKU_BIN_ATTR_R(thingy, THINGY) \
-{ \
+ISKU_SYSFS_R(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = ISKU_SIZE_ ## THINGY, \
.read = isku_sysfs_read_ ## thingy, \
}
#define ISKU_BIN_ATTR_W(thingy, THINGY) \
-{ \
+ISKU_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = ISKU_SIZE_ ## THINGY, \
.write = isku_sysfs_write_ ## thingy \
}
-ISKU_SYSFS_RW(macro, MACRO)
-ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION)
-ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE)
-ISKU_SYSFS_RW(keys_media, KEYS_MEDIA)
-ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER)
-ISKU_SYSFS_RW(keys_macro, KEYS_MACRO)
-ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK)
-ISKU_SYSFS_RW(light, LIGHT)
-ISKU_SYSFS_RW(key_mask, KEY_MASK)
-ISKU_SYSFS_RW(last_set, LAST_SET)
-ISKU_SYSFS_W(talk, TALK)
-ISKU_SYSFS_W(talkfx, TALKFX)
-ISKU_SYSFS_R(info, INFO)
-ISKU_SYSFS_W(control, CONTROL)
-ISKU_SYSFS_W(reset, RESET)
-
-static struct bin_attribute isku_bin_attributes[] = {
- ISKU_BIN_ATTR_RW(macro, MACRO),
- ISKU_BIN_ATTR_RW(keys_function, KEYS_FUNCTION),
- ISKU_BIN_ATTR_RW(keys_easyzone, KEYS_EASYZONE),
- ISKU_BIN_ATTR_RW(keys_media, KEYS_MEDIA),
- ISKU_BIN_ATTR_RW(keys_thumbster, KEYS_THUMBSTER),
- ISKU_BIN_ATTR_RW(keys_macro, KEYS_MACRO),
- ISKU_BIN_ATTR_RW(keys_capslock, KEYS_CAPSLOCK),
- ISKU_BIN_ATTR_RW(light, LIGHT),
- ISKU_BIN_ATTR_RW(key_mask, KEY_MASK),
- ISKU_BIN_ATTR_RW(last_set, LAST_SET),
- ISKU_BIN_ATTR_W(talk, TALK),
- ISKU_BIN_ATTR_W(talkfx, TALKFX),
- ISKU_BIN_ATTR_R(info, INFO),
- ISKU_BIN_ATTR_W(control, CONTROL),
- ISKU_BIN_ATTR_W(reset, RESET),
- __ATTR_NULL
+ISKU_BIN_ATTR_RW(macro, MACRO);
+ISKU_BIN_ATTR_RW(keys_function, KEYS_FUNCTION);
+ISKU_BIN_ATTR_RW(keys_easyzone, KEYS_EASYZONE);
+ISKU_BIN_ATTR_RW(keys_media, KEYS_MEDIA);
+ISKU_BIN_ATTR_RW(keys_thumbster, KEYS_THUMBSTER);
+ISKU_BIN_ATTR_RW(keys_macro, KEYS_MACRO);
+ISKU_BIN_ATTR_RW(keys_capslock, KEYS_CAPSLOCK);
+ISKU_BIN_ATTR_RW(light, LIGHT);
+ISKU_BIN_ATTR_RW(key_mask, KEY_MASK);
+ISKU_BIN_ATTR_RW(last_set, LAST_SET);
+ISKU_BIN_ATTR_W(talk, TALK);
+ISKU_BIN_ATTR_W(talkfx, TALKFX);
+ISKU_BIN_ATTR_W(control, CONTROL);
+ISKU_BIN_ATTR_W(reset, RESET);
+ISKU_BIN_ATTR_R(info, INFO);
+
+static struct bin_attribute *isku_bin_attributes[] = {
+ &bin_attr_macro,
+ &bin_attr_keys_function,
+ &bin_attr_keys_easyzone,
+ &bin_attr_keys_media,
+ &bin_attr_keys_thumbster,
+ &bin_attr_keys_macro,
+ &bin_attr_keys_capslock,
+ &bin_attr_light,
+ &bin_attr_key_mask,
+ &bin_attr_last_set,
+ &bin_attr_talk,
+ &bin_attr_talkfx,
+ &bin_attr_control,
+ &bin_attr_reset,
+ &bin_attr_info,
+ NULL,
+};
+
+static const struct attribute_group isku_group = {
+ .attrs = isku_attrs,
+ .bin_attrs = isku_bin_attributes,
+};
+
+static const struct attribute_group *isku_groups[] = {
+ &isku_group,
+ NULL,
};
static int isku_init_isku_device_struct(struct usb_device *usb_dev,
@@ -427,8 +440,7 @@ static int __init isku_init(void)
isku_class = class_create(THIS_MODULE, "isku");
if (IS_ERR(isku_class))
return PTR_ERR(isku_class);
- isku_class->dev_attrs = isku_attributes;
- isku_class->dev_bin_attrs = isku_bin_attributes;
+ isku_class->dev_groups = isku_groups;
retval = hid_register_driver(&isku_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 7fae070788f..602c188e9d8 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -324,6 +324,8 @@ static ssize_t kone_sysfs_write_settings(struct file *fp, struct kobject *kobj,
return sizeof(struct kone_settings);
}
+static BIN_ATTR(settings, 0660, kone_sysfs_read_settings,
+ kone_sysfs_write_settings, sizeof(struct kone_settings));
static ssize_t kone_sysfs_read_profilex(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr,
@@ -378,6 +380,19 @@ static ssize_t kone_sysfs_write_profilex(struct file *fp,
return sizeof(struct kone_profile);
}
+#define PROFILE_ATTR(number) \
+static struct bin_attribute bin_attr_profile##number = { \
+ .attr = { .name = "profile##number", .mode = 0660 }, \
+ .size = sizeof(struct kone_profile), \
+ .read = kone_sysfs_read_profilex, \
+ .write = kone_sysfs_write_profilex, \
+ .private = &profile_numbers[number-1], \
+};
+PROFILE_ATTR(1);
+PROFILE_ATTR(2);
+PROFILE_ATTR(3);
+PROFILE_ATTR(4);
+PROFILE_ATTR(5);
static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -386,6 +401,7 @@ static ssize_t kone_sysfs_show_actual_profile(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_profile);
}
+static DEVICE_ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL);
static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -394,6 +410,7 @@ static ssize_t kone_sysfs_show_actual_dpi(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->actual_dpi);
}
+static DEVICE_ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL);
/* weight is read each time, since we don't get informed when it's changed */
static ssize_t kone_sysfs_show_weight(struct device *dev,
@@ -416,6 +433,7 @@ static ssize_t kone_sysfs_show_weight(struct device *dev,
return retval;
return snprintf(buf, PAGE_SIZE, "%d\n", weight);
}
+static DEVICE_ATTR(weight, 0440, kone_sysfs_show_weight, NULL);
static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -424,6 +442,8 @@ static ssize_t kone_sysfs_show_firmware_version(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kone->firmware_version);
}
+static DEVICE_ATTR(firmware_version, 0440, kone_sysfs_show_firmware_version,
+ NULL);
static ssize_t kone_sysfs_show_tcu(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -456,7 +476,7 @@ static ssize_t kone_sysfs_set_tcu(struct device *dev,
kone = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
- retval = strict_strtoul(buf, 10, &state);
+ retval = kstrtoul(buf, 10, &state);
if (retval)
return retval;
@@ -524,6 +544,7 @@ exit_unlock:
mutex_unlock(&kone->kone_lock);
return retval;
}
+static DEVICE_ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu);
static ssize_t kone_sysfs_show_startup_profile(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -545,7 +566,7 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
kone = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
- retval = strict_strtoul(buf, 10, &new_startup_profile);
+ retval = kstrtoul(buf, 10, &new_startup_profile);
if (retval)
return retval;
@@ -570,15 +591,17 @@ static ssize_t kone_sysfs_set_startup_profile(struct device *dev,
mutex_unlock(&kone->kone_lock);
return size;
}
+static DEVICE_ATTR(startup_profile, 0660, kone_sysfs_show_startup_profile,
+ kone_sysfs_set_startup_profile);
-static struct device_attribute kone_attributes[] = {
+static struct attribute *kone_attrs[] = {
/*
* Read actual dpi settings.
* Returns raw value for further processing. Refer to enum
* kone_polling_rates to get real value.
*/
- __ATTR(actual_dpi, 0440, kone_sysfs_show_actual_dpi, NULL),
- __ATTR(actual_profile, 0440, kone_sysfs_show_actual_profile, NULL),
+ &dev_attr_actual_dpi.attr,
+ &dev_attr_actual_profile.attr,
/*
* The mouse can be equipped with one of four supplied weights from 5
@@ -587,7 +610,7 @@ static struct device_attribute kone_attributes[] = {
* by software. Refer to enum kone_weights to get corresponding real
* weight.
*/
- __ATTR(weight, 0440, kone_sysfs_show_weight, NULL),
+ &dev_attr_weight.attr,
/*
* Prints firmware version stored in mouse as integer.
@@ -595,66 +618,38 @@ static struct device_attribute kone_attributes[] = {
* to get the real version number the decimal point has to be shifted 2
* positions to the left. E.g. a value of 138 means 1.38.
*/
- __ATTR(firmware_version, 0440,
- kone_sysfs_show_firmware_version, NULL),
+ &dev_attr_firmware_version.attr,
/*
* Prints state of Tracking Control Unit as number where 0 = off and
* 1 = on. Writing 0 deactivates tcu and writing 1 calibrates and
* activates the tcu
*/
- __ATTR(tcu, 0660, kone_sysfs_show_tcu, kone_sysfs_set_tcu),
+ &dev_attr_tcu.attr,
/* Prints and takes the number of the profile the mouse starts with */
- __ATTR(startup_profile, 0660,
- kone_sysfs_show_startup_profile,
- kone_sysfs_set_startup_profile),
- __ATTR_NULL
+ &dev_attr_startup_profile.attr,
+ NULL,
+};
+
+static struct bin_attribute *kone_bin_attributes[] = {
+ &bin_attr_settings,
+ &bin_attr_profile1,
+ &bin_attr_profile2,
+ &bin_attr_profile3,
+ &bin_attr_profile4,
+ &bin_attr_profile5,
+ NULL,
+};
+
+static const struct attribute_group kone_group = {
+ .attrs = kone_attrs,
+ .bin_attrs = kone_bin_attributes,
};
-static struct bin_attribute kone_bin_attributes[] = {
- {
- .attr = { .name = "settings", .mode = 0660 },
- .size = sizeof(struct kone_settings),
- .read = kone_sysfs_read_settings,
- .write = kone_sysfs_write_settings
- },
- {
- .attr = { .name = "profile1", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profilex,
- .write = kone_sysfs_write_profilex,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profilex,
- .write = kone_sysfs_write_profilex,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profilex,
- .write = kone_sysfs_write_profilex,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profilex,
- .write = kone_sysfs_write_profilex,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5", .mode = 0660 },
- .size = sizeof(struct kone_profile),
- .read = kone_sysfs_read_profilex,
- .write = kone_sysfs_write_profilex,
- .private = &profile_numbers[4]
- },
- __ATTR_NULL
+static const struct attribute_group *kone_groups[] = {
+ &kone_group,
+ NULL,
};
static int kone_init_kone_device_struct(struct usb_device *usb_dev,
@@ -891,8 +886,7 @@ static int __init kone_init(void)
kone_class = class_create(THIS_MODULE, "kone");
if (IS_ERR(kone_class))
return PTR_ERR(kone_class);
- kone_class->dev_attrs = kone_attributes;
- kone_class->dev_bin_attrs = kone_bin_attributes;
+ kone_class->dev_groups = kone_groups;
retval = hid_register_driver(&kone_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-koneplus.c b/drivers/hid/hid-roccat-koneplus.c
index 6a48fa3c7da..5ddf605b6b8 100644
--- a/drivers/hid/hid-roccat-koneplus.c
+++ b/drivers/hid/hid-roccat-koneplus.c
@@ -156,7 +156,8 @@ KONEPLUS_SYSFS_W(thingy, THINGY) \
KONEPLUS_SYSFS_R(thingy, THINGY)
#define KONEPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-{ \
+KONEPLUS_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
.read = koneplus_sysfs_read_ ## thingy, \
@@ -164,28 +165,29 @@ KONEPLUS_SYSFS_R(thingy, THINGY)
}
#define KONEPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
-{ \
+KONEPLUS_SYSFS_R(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
.read = koneplus_sysfs_read_ ## thingy, \
}
#define KONEPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
-{ \
+KONEPLUS_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KONEPLUS_SIZE_ ## THINGY, \
.write = koneplus_sysfs_write_ ## thingy \
}
-
-KONEPLUS_SYSFS_W(control, CONTROL)
-KONEPLUS_SYSFS_RW(info, INFO)
-KONEPLUS_SYSFS_W(talk, TALK)
-KONEPLUS_SYSFS_W(macro, MACRO)
-KONEPLUS_SYSFS_RW(sensor, SENSOR)
-KONEPLUS_SYSFS_RW(tcu, TCU)
-KONEPLUS_SYSFS_R(tcu_image, TCU_IMAGE)
-KONEPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
-KONEPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
+KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK);
+KONEPLUS_BIN_ATTRIBUTE_W(macro, MACRO);
+KONEPLUS_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE);
+KONEPLUS_BIN_ATTRIBUTE_RW(info, INFO);
+KONEPLUS_BIN_ATTRIBUTE_RW(sensor, SENSOR);
+KONEPLUS_BIN_ATTRIBUTE_RW(tcu, TCU);
+KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
+KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t koneplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
@@ -225,6 +227,25 @@ static ssize_t koneplus_sysfs_read_profilex_buttons(struct file *fp,
KONEPLUS_COMMAND_PROFILE_BUTTONS);
}
+#define PROFILE_ATTR(number) \
+static struct bin_attribute bin_attr_profile##number##_settings = { \
+ .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+ .size = KONEPLUS_SIZE_PROFILE_SETTINGS, \
+ .read = koneplus_sysfs_read_profilex_settings, \
+ .private = &profile_numbers[number-1], \
+}; \
+static struct bin_attribute bin_attr_profile##number##_buttons = { \
+ .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \
+ .size = KONEPLUS_SIZE_PROFILE_BUTTONS, \
+ .read = koneplus_sysfs_read_profilex_buttons, \
+ .private = &profile_numbers[number-1], \
+};
+PROFILE_ATTR(1);
+PROFILE_ATTR(2);
+PROFILE_ATTR(3);
+PROFILE_ATTR(4);
+PROFILE_ATTR(5);
+
static ssize_t koneplus_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -246,7 +267,7 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
koneplus = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
- retval = strict_strtoul(buf, 10, &profile);
+ retval = kstrtoul(buf, 10, &profile);
if (retval)
return retval;
@@ -274,6 +295,12 @@ static ssize_t koneplus_sysfs_set_actual_profile(struct device *dev,
return size;
}
+static DEVICE_ATTR(actual_profile, 0660,
+ koneplus_sysfs_show_actual_profile,
+ koneplus_sysfs_set_actual_profile);
+static DEVICE_ATTR(startup_profile, 0660,
+ koneplus_sysfs_show_actual_profile,
+ koneplus_sysfs_set_actual_profile);
static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -293,90 +320,47 @@ static ssize_t koneplus_sysfs_show_firmware_version(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
+static DEVICE_ATTR(firmware_version, 0440,
+ koneplus_sysfs_show_firmware_version, NULL);
+
+static struct attribute *koneplus_attrs[] = {
+ &dev_attr_actual_profile.attr,
+ &dev_attr_startup_profile.attr,
+ &dev_attr_firmware_version.attr,
+ NULL,
+};
+
+static struct bin_attribute *koneplus_bin_attributes[] = {
+ &bin_attr_control,
+ &bin_attr_talk,
+ &bin_attr_macro,
+ &bin_attr_tcu_image,
+ &bin_attr_info,
+ &bin_attr_sensor,
+ &bin_attr_tcu,
+ &bin_attr_profile_settings,
+ &bin_attr_profile_buttons,
+ &bin_attr_profile1_settings,
+ &bin_attr_profile2_settings,
+ &bin_attr_profile3_settings,
+ &bin_attr_profile4_settings,
+ &bin_attr_profile5_settings,
+ &bin_attr_profile1_buttons,
+ &bin_attr_profile2_buttons,
+ &bin_attr_profile3_buttons,
+ &bin_attr_profile4_buttons,
+ &bin_attr_profile5_buttons,
+ NULL,
+};
-static struct device_attribute koneplus_attributes[] = {
- __ATTR(actual_profile, 0660,
- koneplus_sysfs_show_actual_profile,
- koneplus_sysfs_set_actual_profile),
- __ATTR(startup_profile, 0660,
- koneplus_sysfs_show_actual_profile,
- koneplus_sysfs_set_actual_profile),
- __ATTR(firmware_version, 0440,
- koneplus_sysfs_show_firmware_version, NULL),
- __ATTR_NULL
+static const struct attribute_group koneplus_group = {
+ .attrs = koneplus_attrs,
+ .bin_attrs = koneplus_bin_attributes,
};
-static struct bin_attribute koneplus_bin_attributes[] = {
- KONEPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
- KONEPLUS_BIN_ATTRIBUTE_RW(info, INFO),
- KONEPLUS_BIN_ATTRIBUTE_W(talk, TALK),
- KONEPLUS_BIN_ATTRIBUTE_W(macro, MACRO),
- KONEPLUS_BIN_ATTRIBUTE_RW(sensor, SENSOR),
- KONEPLUS_BIN_ATTRIBUTE_RW(tcu, TCU),
- KONEPLUS_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE),
- KONEPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
- KONEPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
- {
- .attr = { .name = "profile1_settings", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
- .read = koneplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_settings", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
- .read = koneplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_settings", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
- .read = koneplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_settings", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
- .read = koneplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_settings", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_SETTINGS,
- .read = koneplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[4]
- },
- {
- .attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
- .read = koneplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
- .read = koneplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
- .read = koneplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
- .read = koneplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = KONEPLUS_SIZE_PROFILE_BUTTONS,
- .read = koneplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[4]
- },
- __ATTR_NULL
+static const struct attribute_group *koneplus_groups[] = {
+ &koneplus_group,
+ NULL,
};
static int koneplus_init_koneplus_device_struct(struct usb_device *usb_dev,
@@ -572,8 +556,7 @@ static int __init koneplus_init(void)
koneplus_class = class_create(THIS_MODULE, "koneplus");
if (IS_ERR(koneplus_class))
return PTR_ERR(koneplus_class);
- koneplus_class->dev_attrs = koneplus_attributes;
- koneplus_class->dev_bin_attrs = koneplus_bin_attributes;
+ koneplus_class->dev_groups = koneplus_groups;
retval = hid_register_driver(&koneplus_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-konepure.c b/drivers/hid/hid-roccat-konepure.c
index c79d0b06c14..99a605ebb66 100644
--- a/drivers/hid/hid-roccat-konepure.c
+++ b/drivers/hid/hid-roccat-konepure.c
@@ -94,7 +94,8 @@ KONEPURE_SYSFS_W(thingy, THINGY) \
KONEPURE_SYSFS_R(thingy, THINGY)
#define KONEPURE_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-{ \
+KONEPURE_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KONEPURE_SIZE_ ## THINGY, \
.read = konepure_sysfs_read_ ## thingy, \
@@ -102,44 +103,56 @@ KONEPURE_SYSFS_R(thingy, THINGY)
}
#define KONEPURE_BIN_ATTRIBUTE_R(thingy, THINGY) \
-{ \
+KONEPURE_SYSFS_R(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = KONEPURE_SIZE_ ## THINGY, \
.read = konepure_sysfs_read_ ## thingy, \
}
#define KONEPURE_BIN_ATTRIBUTE_W(thingy, THINGY) \
-{ \
+KONEPURE_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KONEPURE_SIZE_ ## THINGY, \
.write = konepure_sysfs_write_ ## thingy \
}
-KONEPURE_SYSFS_RW(actual_profile, ACTUAL_PROFILE)
-KONEPURE_SYSFS_W(control, CONTROL)
-KONEPURE_SYSFS_RW(info, INFO)
-KONEPURE_SYSFS_W(talk, TALK)
-KONEPURE_SYSFS_W(macro, MACRO)
-KONEPURE_SYSFS_RW(sensor, SENSOR)
-KONEPURE_SYSFS_RW(tcu, TCU)
-KONEPURE_SYSFS_R(tcu_image, TCU_IMAGE)
-KONEPURE_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
-KONEPURE_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
-
-static struct bin_attribute konepure_bin_attributes[] = {
- KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE),
- KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL),
- KONEPURE_BIN_ATTRIBUTE_RW(info, INFO),
- KONEPURE_BIN_ATTRIBUTE_W(talk, TALK),
- KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO),
- KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR),
- KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU),
- KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE),
- KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
- KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
- __ATTR_NULL
+KONEPURE_BIN_ATTRIBUTE_RW(actual_profile, ACTUAL_PROFILE);
+KONEPURE_BIN_ATTRIBUTE_RW(info, INFO);
+KONEPURE_BIN_ATTRIBUTE_RW(sensor, SENSOR);
+KONEPURE_BIN_ATTRIBUTE_RW(tcu, TCU);
+KONEPURE_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
+KONEPURE_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
+KONEPURE_BIN_ATTRIBUTE_W(control, CONTROL);
+KONEPURE_BIN_ATTRIBUTE_W(talk, TALK);
+KONEPURE_BIN_ATTRIBUTE_W(macro, MACRO);
+KONEPURE_BIN_ATTRIBUTE_R(tcu_image, TCU_IMAGE);
+
+static struct bin_attribute *konepure_bin_attributes[] = {
+ &bin_attr_actual_profile,
+ &bin_attr_info,
+ &bin_attr_sensor,
+ &bin_attr_tcu,
+ &bin_attr_profile_settings,
+ &bin_attr_profile_buttons,
+ &bin_attr_control,
+ &bin_attr_talk,
+ &bin_attr_macro,
+ &bin_attr_tcu_image,
+ NULL,
+};
+
+static const struct attribute_group konepure_group = {
+ .bin_attrs = konepure_bin_attributes,
+};
+
+static const struct attribute_group *konepure_groups[] = {
+ &konepure_group,
+ NULL,
};
+
static int konepure_init_konepure_device_struct(struct usb_device *usb_dev,
struct konepure_device *konepure)
{
@@ -262,6 +275,7 @@ static int konepure_raw_event(struct hid_device *hdev,
static const struct hid_device_id konepure_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPURE_OPTICAL) },
{ }
};
@@ -282,7 +296,7 @@ static int __init konepure_init(void)
konepure_class = class_create(THIS_MODULE, "konepure");
if (IS_ERR(konepure_class))
return PTR_ERR(konepure_class);
- konepure_class->dev_bin_attrs = konepure_bin_attributes;
+ konepure_class->dev_groups = konepure_groups;
retval = hid_register_driver(&konepure_driver);
if (retval)
@@ -300,5 +314,5 @@ module_init(konepure_init);
module_exit(konepure_exit);
MODULE_AUTHOR("Stefan Achatz");
-MODULE_DESCRIPTION("USB Roccat KonePure driver");
+MODULE_DESCRIPTION("USB Roccat KonePure/Optical driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
index b8b37789b86..515bc03136c 100644
--- a/drivers/hid/hid-roccat-kovaplus.c
+++ b/drivers/hid/hid-roccat-kovaplus.c
@@ -197,31 +197,25 @@ KOVAPLUS_SYSFS_W(thingy, THINGY) \
KOVAPLUS_SYSFS_R(thingy, THINGY)
#define KOVAPLUS_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-{ \
+KOVAPLUS_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
.read = kovaplus_sysfs_read_ ## thingy, \
.write = kovaplus_sysfs_write_ ## thingy \
}
-#define KOVAPLUS_BIN_ATTRIBUTE_R(thingy, THINGY) \
-{ \
- .attr = { .name = #thingy, .mode = 0440 }, \
- .size = KOVAPLUS_SIZE_ ## THINGY, \
- .read = kovaplus_sysfs_read_ ## thingy, \
-}
-
#define KOVAPLUS_BIN_ATTRIBUTE_W(thingy, THINGY) \
-{ \
+KOVAPLUS_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = KOVAPLUS_SIZE_ ## THINGY, \
.write = kovaplus_sysfs_write_ ## thingy \
}
-
-KOVAPLUS_SYSFS_W(control, CONTROL)
-KOVAPLUS_SYSFS_RW(info, INFO)
-KOVAPLUS_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
-KOVAPLUS_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
+KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL);
+KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO);
+KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
+KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t kovaplus_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
@@ -261,6 +255,25 @@ static ssize_t kovaplus_sysfs_read_profilex_buttons(struct file *fp,
KOVAPLUS_COMMAND_PROFILE_BUTTONS);
}
+#define PROFILE_ATTR(number) \
+static struct bin_attribute bin_attr_profile##number##_settings = { \
+ .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+ .size = KOVAPLUS_SIZE_PROFILE_SETTINGS, \
+ .read = kovaplus_sysfs_read_profilex_settings, \
+ .private = &profile_numbers[number-1], \
+}; \
+static struct bin_attribute bin_attr_profile##number##_buttons = { \
+ .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \
+ .size = KOVAPLUS_SIZE_PROFILE_BUTTONS, \
+ .read = kovaplus_sysfs_read_profilex_buttons, \
+ .private = &profile_numbers[number-1], \
+};
+PROFILE_ATTR(1);
+PROFILE_ATTR(2);
+PROFILE_ATTR(3);
+PROFILE_ATTR(4);
+PROFILE_ATTR(5);
+
static ssize_t kovaplus_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -282,7 +295,7 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
kovaplus = hid_get_drvdata(dev_get_drvdata(dev));
usb_dev = interface_to_usbdev(to_usb_interface(dev));
- retval = strict_strtoul(buf, 10, &profile);
+ retval = kstrtoul(buf, 10, &profile);
if (retval)
return retval;
@@ -310,6 +323,9 @@ static ssize_t kovaplus_sysfs_set_actual_profile(struct device *dev,
return size;
}
+static DEVICE_ATTR(actual_profile, 0660,
+ kovaplus_sysfs_show_actual_profile,
+ kovaplus_sysfs_set_actual_profile);
static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -318,6 +334,7 @@ static ssize_t kovaplus_sysfs_show_actual_cpi(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_cpi);
}
+static DEVICE_ATTR(actual_cpi, 0440, kovaplus_sysfs_show_actual_cpi, NULL);
static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -326,6 +343,8 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_x(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_x_sensitivity);
}
+static DEVICE_ATTR(actual_sensitivity_x, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_x, NULL);
static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -334,6 +353,8 @@ static ssize_t kovaplus_sysfs_show_actual_sensitivity_y(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", kovaplus->actual_y_sensitivity);
}
+static DEVICE_ATTR(actual_sensitivity_y, 0440,
+ kovaplus_sysfs_show_actual_sensitivity_y, NULL);
static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -353,88 +374,44 @@ static ssize_t kovaplus_sysfs_show_firmware_version(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
+static DEVICE_ATTR(firmware_version, 0440,
+ kovaplus_sysfs_show_firmware_version, NULL);
+
+static struct attribute *kovaplus_attrs[] = {
+ &dev_attr_actual_cpi.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_actual_sensitivity_x.attr,
+ &dev_attr_actual_sensitivity_y.attr,
+ NULL,
+};
+
+static struct bin_attribute *kovaplus_bin_attributes[] = {
+ &bin_attr_control,
+ &bin_attr_info,
+ &bin_attr_profile_settings,
+ &bin_attr_profile_buttons,
+ &bin_attr_profile1_settings,
+ &bin_attr_profile2_settings,
+ &bin_attr_profile3_settings,
+ &bin_attr_profile4_settings,
+ &bin_attr_profile5_settings,
+ &bin_attr_profile1_buttons,
+ &bin_attr_profile2_buttons,
+ &bin_attr_profile3_buttons,
+ &bin_attr_profile4_buttons,
+ &bin_attr_profile5_buttons,
+ NULL,
+};
-static struct device_attribute kovaplus_attributes[] = {
- __ATTR(actual_cpi, 0440,
- kovaplus_sysfs_show_actual_cpi, NULL),
- __ATTR(firmware_version, 0440,
- kovaplus_sysfs_show_firmware_version, NULL),
- __ATTR(actual_profile, 0660,
- kovaplus_sysfs_show_actual_profile,
- kovaplus_sysfs_set_actual_profile),
- __ATTR(actual_sensitivity_x, 0440,
- kovaplus_sysfs_show_actual_sensitivity_x, NULL),
- __ATTR(actual_sensitivity_y, 0440,
- kovaplus_sysfs_show_actual_sensitivity_y, NULL),
- __ATTR_NULL
+static const struct attribute_group kovaplus_group = {
+ .attrs = kovaplus_attrs,
+ .bin_attrs = kovaplus_bin_attributes,
};
-static struct bin_attribute kovaplus_bin_attributes[] = {
- KOVAPLUS_BIN_ATTRIBUTE_W(control, CONTROL),
- KOVAPLUS_BIN_ATTRIBUTE_RW(info, INFO),
- KOVAPLUS_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
- KOVAPLUS_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
- {
- .attr = { .name = "profile1_settings", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
- .read = kovaplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_settings", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
- .read = kovaplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_settings", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
- .read = kovaplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_settings", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
- .read = kovaplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_settings", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_SETTINGS,
- .read = kovaplus_sysfs_read_profilex_settings,
- .private = &profile_numbers[4]
- },
- {
- .attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
- .read = kovaplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
- .read = kovaplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
- .read = kovaplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
- .read = kovaplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = KOVAPLUS_SIZE_PROFILE_BUTTONS,
- .read = kovaplus_sysfs_read_profilex_buttons,
- .private = &profile_numbers[4]
- },
- __ATTR_NULL
+static const struct attribute_group *kovaplus_groups[] = {
+ &kovaplus_group,
+ NULL,
};
static int kovaplus_init_kovaplus_device_struct(struct usb_device *usb_dev,
@@ -662,8 +639,7 @@ static int __init kovaplus_init(void)
kovaplus_class = class_create(THIS_MODULE, "kovaplus");
if (IS_ERR(kovaplus_class))
return PTR_ERR(kovaplus_class);
- kovaplus_class->dev_attrs = kovaplus_attributes;
- kovaplus_class->dev_bin_attrs = kovaplus_bin_attributes;
+ kovaplus_class->dev_groups = kovaplus_groups;
retval = hid_register_driver(&kovaplus_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index d4f1e3bee59..5a6dbbeee79 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -156,7 +156,8 @@ PYRA_SYSFS_W(thingy, THINGY) \
PYRA_SYSFS_R(thingy, THINGY)
#define PYRA_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-{ \
+PYRA_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = PYRA_SIZE_ ## THINGY, \
.read = pyra_sysfs_read_ ## thingy, \
@@ -164,24 +165,25 @@ PYRA_SYSFS_R(thingy, THINGY)
}
#define PYRA_BIN_ATTRIBUTE_R(thingy, THINGY) \
-{ \
+PYRA_SYSFS_R(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0440 }, \
.size = PYRA_SIZE_ ## THINGY, \
.read = pyra_sysfs_read_ ## thingy, \
}
#define PYRA_BIN_ATTRIBUTE_W(thingy, THINGY) \
-{ \
+PYRA_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = PYRA_SIZE_ ## THINGY, \
.write = pyra_sysfs_write_ ## thingy \
}
-PYRA_SYSFS_W(control, CONTROL)
-PYRA_SYSFS_RW(info, INFO)
-PYRA_SYSFS_RW(profile_settings, PROFILE_SETTINGS)
-PYRA_SYSFS_RW(profile_buttons, PROFILE_BUTTONS)
-PYRA_SYSFS_R(settings, SETTINGS)
+PYRA_BIN_ATTRIBUTE_W(control, CONTROL);
+PYRA_BIN_ATTRIBUTE_RW(info, INFO);
+PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS);
+PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS);
static ssize_t pyra_sysfs_read_profilex_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
@@ -221,6 +223,25 @@ static ssize_t pyra_sysfs_read_profilex_buttons(struct file *fp,
PYRA_COMMAND_PROFILE_BUTTONS);
}
+#define PROFILE_ATTR(number) \
+static struct bin_attribute bin_attr_profile##number##_settings = { \
+ .attr = { .name = "profile##number##_settings", .mode = 0440 }, \
+ .size = PYRA_SIZE_PROFILE_SETTINGS, \
+ .read = pyra_sysfs_read_profilex_settings, \
+ .private = &profile_numbers[number-1], \
+}; \
+static struct bin_attribute bin_attr_profile##number##_buttons = { \
+ .attr = { .name = "profile##number##_buttons", .mode = 0440 }, \
+ .size = PYRA_SIZE_PROFILE_BUTTONS, \
+ .read = pyra_sysfs_read_profilex_buttons, \
+ .private = &profile_numbers[number-1], \
+};
+PROFILE_ATTR(1);
+PROFILE_ATTR(2);
+PROFILE_ATTR(3);
+PROFILE_ATTR(4);
+PROFILE_ATTR(5);
+
static ssize_t pyra_sysfs_write_settings(struct file *fp,
struct kobject *kobj, struct bin_attribute *attr, char *buf,
loff_t off, size_t count)
@@ -258,6 +279,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
return PYRA_SIZE_SETTINGS;
}
+PYRA_SYSFS_R(settings, SETTINGS);
+static struct bin_attribute bin_attr_settings =
+ __BIN_ATTR(settings, (S_IWUSR | S_IRUGO),
+ pyra_sysfs_read_settings, pyra_sysfs_write_settings,
+ PYRA_SIZE_SETTINGS);
static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -266,6 +292,7 @@ static ssize_t pyra_sysfs_show_actual_cpi(struct device *dev,
hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
return snprintf(buf, PAGE_SIZE, "%d\n", pyra->actual_cpi);
}
+static DEVICE_ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL);
static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -282,6 +309,8 @@ static ssize_t pyra_sysfs_show_actual_profile(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", settings.startup_profile);
}
+static DEVICE_ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
+static DEVICE_ATTR(startup_profile, 0440, pyra_sysfs_show_actual_profile, NULL);
static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -301,84 +330,44 @@ static ssize_t pyra_sysfs_show_firmware_version(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%d\n", info.firmware_version);
}
+static DEVICE_ATTR(firmware_version, 0440, pyra_sysfs_show_firmware_version,
+ NULL);
+
+static struct attribute *pyra_attrs[] = {
+ &dev_attr_actual_cpi.attr,
+ &dev_attr_actual_profile.attr,
+ &dev_attr_firmware_version.attr,
+ &dev_attr_startup_profile.attr,
+ NULL,
+};
+
+static struct bin_attribute *pyra_bin_attributes[] = {
+ &bin_attr_control,
+ &bin_attr_info,
+ &bin_attr_profile_settings,
+ &bin_attr_profile_buttons,
+ &bin_attr_settings,
+ &bin_attr_profile1_settings,
+ &bin_attr_profile2_settings,
+ &bin_attr_profile3_settings,
+ &bin_attr_profile4_settings,
+ &bin_attr_profile5_settings,
+ &bin_attr_profile1_buttons,
+ &bin_attr_profile2_buttons,
+ &bin_attr_profile3_buttons,
+ &bin_attr_profile4_buttons,
+ &bin_attr_profile5_buttons,
+ NULL,
+};
-static struct device_attribute pyra_attributes[] = {
- __ATTR(actual_cpi, 0440, pyra_sysfs_show_actual_cpi, NULL),
- __ATTR(actual_profile, 0440, pyra_sysfs_show_actual_profile, NULL),
- __ATTR(firmware_version, 0440,
- pyra_sysfs_show_firmware_version, NULL),
- __ATTR(startup_profile, 0440,
- pyra_sysfs_show_actual_profile, NULL),
- __ATTR_NULL
+static const struct attribute_group pyra_group = {
+ .attrs = pyra_attrs,
+ .bin_attrs = pyra_bin_attributes,
};
-static struct bin_attribute pyra_bin_attributes[] = {
- PYRA_BIN_ATTRIBUTE_W(control, CONTROL),
- PYRA_BIN_ATTRIBUTE_RW(info, INFO),
- PYRA_BIN_ATTRIBUTE_RW(profile_settings, PROFILE_SETTINGS),
- PYRA_BIN_ATTRIBUTE_RW(profile_buttons, PROFILE_BUTTONS),
- PYRA_BIN_ATTRIBUTE_RW(settings, SETTINGS),
- {
- .attr = { .name = "profile1_settings", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_SETTINGS,
- .read = pyra_sysfs_read_profilex_settings,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_settings", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_SETTINGS,
- .read = pyra_sysfs_read_profilex_settings,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_settings", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_SETTINGS,
- .read = pyra_sysfs_read_profilex_settings,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_settings", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_SETTINGS,
- .read = pyra_sysfs_read_profilex_settings,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_settings", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_SETTINGS,
- .read = pyra_sysfs_read_profilex_settings,
- .private = &profile_numbers[4]
- },
- {
- .attr = { .name = "profile1_buttons", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_BUTTONS,
- .read = pyra_sysfs_read_profilex_buttons,
- .private = &profile_numbers[0]
- },
- {
- .attr = { .name = "profile2_buttons", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_BUTTONS,
- .read = pyra_sysfs_read_profilex_buttons,
- .private = &profile_numbers[1]
- },
- {
- .attr = { .name = "profile3_buttons", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_BUTTONS,
- .read = pyra_sysfs_read_profilex_buttons,
- .private = &profile_numbers[2]
- },
- {
- .attr = { .name = "profile4_buttons", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_BUTTONS,
- .read = pyra_sysfs_read_profilex_buttons,
- .private = &profile_numbers[3]
- },
- {
- .attr = { .name = "profile5_buttons", .mode = 0440 },
- .size = PYRA_SIZE_PROFILE_BUTTONS,
- .read = pyra_sysfs_read_profilex_buttons,
- .private = &profile_numbers[4]
- },
- __ATTR_NULL
+static const struct attribute_group *pyra_groups[] = {
+ &pyra_group,
+ NULL,
};
static int pyra_init_pyra_device_struct(struct usb_device *usb_dev,
@@ -600,8 +589,7 @@ static int __init pyra_init(void)
pyra_class = class_create(THIS_MODULE, "pyra");
if (IS_ERR(pyra_class))
return PTR_ERR(pyra_class);
- pyra_class->dev_attrs = pyra_attributes;
- pyra_class->dev_bin_attrs = pyra_bin_attributes;
+ pyra_class->dev_groups = pyra_groups;
retval = hid_register_driver(&pyra_driver);
if (retval)
diff --git a/drivers/hid/hid-roccat-savu.c b/drivers/hid/hid-roccat-savu.c
index 31747a29c09..0332267199d 100644
--- a/drivers/hid/hid-roccat-savu.c
+++ b/drivers/hid/hid-roccat-savu.c
@@ -94,44 +94,48 @@ SAVU_SYSFS_W(thingy, THINGY) \
SAVU_SYSFS_R(thingy, THINGY)
#define SAVU_BIN_ATTRIBUTE_RW(thingy, THINGY) \
-{ \
+SAVU_SYSFS_RW(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0660 }, \
.size = SAVU_SIZE_ ## THINGY, \
.read = savu_sysfs_read_ ## thingy, \
.write = savu_sysfs_write_ ## thingy \
}
-#define SAVU_BIN_ATTRIBUTE_R(thingy, THINGY) \
-{ \
- .attr = { .name = #thingy, .mode = 0440 }, \
- .size = SAVU_SIZE_ ## THINGY, \
- .read = savu_sysfs_read_ ## thingy, \
-}
-
#define SAVU_BIN_ATTRIBUTE_W(thingy, THINGY) \
-{ \
+SAVU_SYSFS_W(thingy, THINGY); \
+static struct bin_attribute bin_attr_##thingy = { \
.attr = { .name = #thingy, .mode = 0220 }, \
.size = SAVU_SIZE_ ## THINGY, \
.write = savu_sysfs_write_ ## thingy \
}
-SAVU_SYSFS_W(control, CONTROL)
-SAVU_SYSFS_RW(profile, PROFILE)
-SAVU_SYSFS_RW(general, GENERAL)
-SAVU_SYSFS_RW(buttons, BUTTONS)
-SAVU_SYSFS_RW(macro, MACRO)
-SAVU_SYSFS_RW(info, INFO)
-SAVU_SYSFS_RW(sensor, SENSOR)
-
-static struct bin_attribute savu_bin_attributes[] = {
- SAVU_BIN_ATTRIBUTE_W(control, CONTROL),
- SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE),
- SAVU_BIN_ATTRIBUTE_RW(general, GENERAL),
- SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS),
- SAVU_BIN_ATTRIBUTE_RW(macro, MACRO),
- SAVU_BIN_ATTRIBUTE_RW(info, INFO),
- SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR),
- __ATTR_NULL
+SAVU_BIN_ATTRIBUTE_W(control, CONTROL);
+SAVU_BIN_ATTRIBUTE_RW(profile, PROFILE);
+SAVU_BIN_ATTRIBUTE_RW(general, GENERAL);
+SAVU_BIN_ATTRIBUTE_RW(buttons, BUTTONS);
+SAVU_BIN_ATTRIBUTE_RW(macro, MACRO);
+SAVU_BIN_ATTRIBUTE_RW(info, INFO);
+SAVU_BIN_ATTRIBUTE_RW(sensor, SENSOR);
+
+static struct bin_attribute *savu_bin_attributes[] = {
+ &bin_attr_control,
+ &bin_attr_profile,
+ &bin_attr_general,
+ &bin_attr_buttons,
+ &bin_attr_macro,
+ &bin_attr_info,
+ &bin_attr_sensor,
+ NULL,
+};
+
+static const struct attribute_group savu_group = {
+ .bin_attrs = savu_bin_attributes,
+};
+
+static const struct attribute_group *savu_groups[] = {
+ &savu_group,
+ NULL,
};
static int savu_init_savu_device_struct(struct usb_device *usb_dev,
@@ -294,7 +298,7 @@ static int __init savu_init(void)
savu_class = class_create(THIS_MODULE, "savu");
if (IS_ERR(savu_class))
return PTR_ERR(savu_class);
- savu_class->dev_bin_attrs = savu_bin_attributes;
+ savu_class->dev_groups = savu_groups;
retval = hid_register_driver(&savu_driver);
if (retval)
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index ca749810732..10e1581022c 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -103,8 +103,7 @@ static int sensor_hub_get_physical_device_count(
list_for_each_entry(report, &report_enum->report_list, list) {
field = report->field[0];
- if (report->maxfield && field &&
- field->physical)
+ if (report->maxfield && field && field->physical)
cnt++;
}
@@ -192,12 +191,12 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, s32 value)
{
struct hid_report *report;
- struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+ struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int ret = 0;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
- if (!report || (field_index >= report->maxfield)) {
+ if (!report || (field_index >= report->maxfield)) {
ret = -EINVAL;
goto done_proc;
}
@@ -216,12 +215,13 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
u32 field_index, s32 *value)
{
struct hid_report *report;
- struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+ struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
int ret = 0;
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
- if (!report || (field_index >= report->maxfield)) {
+ if (!report || (field_index >= report->maxfield) ||
+ report->field[field_index]->report_count < 1) {
ret = -EINVAL;
goto done_proc;
}
@@ -241,7 +241,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
u32 usage_id,
u32 attr_usage_id, u32 report_id)
{
- struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
+ struct sensor_hub_data *data = hid_get_drvdata(hsdev->hdev);
unsigned long flags;
struct hid_report *report;
int ret_val = 0;
@@ -302,7 +302,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
/* Initialize with defaults */
info->usage_id = usage_id;
- info->attrib_id = attr_usage_id;
+ info->attrib_id = attr_usage_id;
info->report_id = -1;
info->index = -1;
info->units = -1;
@@ -333,7 +333,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
if (field->usage[j].hid ==
attr_usage_id &&
field->usage[j].collection_index ==
- collection_index) {
+ collection_index) {
sensor_hub_fill_attr_info(info,
i, report->id,
field->unit,
@@ -357,7 +357,7 @@ EXPORT_SYMBOL_GPL(sensor_hub_input_get_attribute_info);
#ifdef CONFIG_PM
static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
{
- struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+ struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
hid_dbg(hdev, " sensor_hub_suspend\n");
@@ -374,7 +374,7 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
static int sensor_hub_resume(struct hid_device *hdev)
{
- struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+ struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
struct hid_sensor_hub_callbacks_list *callback;
hid_dbg(hdev, " sensor_hub_resume\n");
@@ -394,6 +394,7 @@ static int sensor_hub_reset_resume(struct hid_device *hdev)
return 0;
}
#endif
+
/*
* Handle raw report as sent by device
*/
@@ -416,12 +417,11 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
return 1;
ptr = raw_data;
- ptr++; /*Skip report id*/
+ ptr++; /* Skip report id */
spin_lock_irqsave(&pdata->lock, flags);
for (i = 0; i < report->maxfield; ++i) {
-
hid_dbg(hdev, "%d collection_index:%x hid:%x sz:%x\n",
i, report->field[i]->usage->collection_index,
report->field[i]->usage->hid,
@@ -431,11 +431,10 @@ static int sensor_hub_raw_event(struct hid_device *hdev,
if (pdata->pending.status && pdata->pending.attr_usage_id ==
report->field[i]->usage->hid) {
hid_dbg(hdev, "data was pending ...\n");
- pdata->pending.raw_data = kmalloc(sz, GFP_ATOMIC);
- if (pdata->pending.raw_data) {
- memcpy(pdata->pending.raw_data, ptr, sz);
- pdata->pending.raw_size = sz;
- } else
+ pdata->pending.raw_data = kmemdup(ptr, sz, GFP_ATOMIC);
+ if (pdata->pending.raw_data)
+ pdata->pending.raw_size = sz;
+ else
pdata->pending.raw_size = 0;
complete(&pdata->pending.ready);
}
@@ -478,16 +477,15 @@ static int sensor_hub_probe(struct hid_device *hdev,
struct hid_field *field;
int dev_cnt;
- sd = kzalloc(sizeof(struct sensor_hub_data), GFP_KERNEL);
+ sd = devm_kzalloc(&hdev->dev, sizeof(*sd), GFP_KERNEL);
if (!sd) {
hid_err(hdev, "cannot allocate Sensor data\n");
return -ENOMEM;
}
- sd->hsdev = kzalloc(sizeof(struct hid_sensor_hub_device), GFP_KERNEL);
+ sd->hsdev = devm_kzalloc(&hdev->dev, sizeof(*sd->hsdev), GFP_KERNEL);
if (!sd->hsdev) {
hid_err(hdev, "cannot allocate hid_sensor_hub_device\n");
- ret = -ENOMEM;
- goto err_free_hub;
+ return -ENOMEM;
}
hid_set_drvdata(hdev, sd);
sd->hsdev->hdev = hdev;
@@ -499,14 +497,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ return ret;
}
INIT_LIST_HEAD(&hdev->inputs);
ret = hid_hw_start(hdev, 0);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ return ret;
}
ret = hid_hw_open(hdev);
if (ret) {
@@ -539,7 +537,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
field->physical) {
name = kasprintf(GFP_KERNEL, "HID-SENSOR-%x",
field->physical);
- if (name == NULL) {
+ if (name == NULL) {
hid_err(hdev, "Failed MFD device name\n");
ret = -ENOMEM;
goto err_free_names;
@@ -571,10 +569,6 @@ err_close:
hid_hw_close(hdev);
err_stop_hw:
hid_hw_stop(hdev);
-err_free:
- kfree(sd->hsdev);
-err_free_hub:
- kfree(sd);
return ret;
}
@@ -598,8 +592,6 @@ static void sensor_hub_remove(struct hid_device *hdev)
kfree(data->hid_sensor_hub_client_devs);
hid_set_drvdata(hdev, NULL);
mutex_destroy(&data->mutex);
- kfree(data->hsdev);
- kfree(data);
}
static const struct hid_device_id sensor_hub_devices[] = {
@@ -617,8 +609,8 @@ static struct hid_driver sensor_hub_driver = {
.raw_event = sensor_hub_raw_event,
#ifdef CONFIG_PM
.suspend = sensor_hub_suspend,
- .resume = sensor_hub_resume,
- .reset_resume = sensor_hub_reset_resume,
+ .resume = sensor_hub_resume,
+ .reset_resume = sensor_hub_reset_resume,
#endif
};
module_hid_driver(sensor_hub_driver);
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index ecbc74923d0..30dbb6b40bb 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -369,7 +369,8 @@ static int sony_mapping(struct hid_device *hdev, struct hid_input *hi,
if (sc->quirks & PS3REMOTE)
return ps3remote_mapping(hdev, hi, field, usage, bit, max);
- return -1;
+ /* Let hid-core decide for the others */
+ return 0;
}
/*
@@ -623,7 +624,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
struct sony_sc *sc;
unsigned int connect_mask = HID_CONNECT_DEFAULT;
- sc = kzalloc(sizeof(*sc), GFP_KERNEL);
+ sc = devm_kzalloc(&hdev->dev, sizeof(*sc), GFP_KERNEL);
if (sc == NULL) {
hid_err(hdev, "can't alloc sony descriptor\n");
return -ENOMEM;
@@ -635,7 +636,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ return ret;
}
if (sc->quirks & VAIO_RDESC_CONSTANT)
@@ -648,7 +649,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_hw_start(hdev, connect_mask);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ return ret;
}
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
@@ -668,8 +669,6 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
return 0;
err_stop:
hid_hw_stop(hdev);
-err_free:
- kfree(sc);
return ret;
}
@@ -681,7 +680,6 @@ static void sony_remove(struct hid_device *hdev)
buzz_remove(hdev);
hid_hw_stop(hdev);
- kfree(sc);
}
static const struct hid_device_id sony_devices[] = {
diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
index a2f587d004e..7112f3e832e 100644
--- a/drivers/hid/hid-speedlink.c
+++ b/drivers/hid/hid-speedlink.c
@@ -3,7 +3,7 @@
* Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
* the HID descriptor.
*
- * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
+ * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de>
*/
/*
@@ -46,8 +46,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
/* No other conditions due to usage_table. */
- /* Fix "jumpy" cursor (invalid events sent by device). */
- if (value == 256)
+
+ /* This fixes the "jumpy" cursor occuring due to invalid events sent
+ * by the device. Some devices only send them with value==+256, others
+ * don't. However, catching abs(value)>=256 is restrictive enough not
+ * to interfere with devices that were bug-free (has been tested).
+ */
+ if (abs(value) >= 256)
return 1;
/* Drop useless distance 0 events (on button clicks etc.) as well */
if (value == 0)
diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
index 0c06054cab8..bd2bc4a1f37 100644
--- a/drivers/hid/hid-wiimote-core.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -212,10 +212,12 @@ static __u8 select_drm(struct wiimote_data *wdata)
if (ir == WIIPROTO_FLAG_IR_BASIC) {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
- if (ext)
- return WIIPROTO_REQ_DRM_KAIE;
- else
- return WIIPROTO_REQ_DRM_KAI;
+ /* GEN10 and ealier devices bind IR formats to DRMs.
+ * Hence, we cannot use DRM_KAI here as it might be
+ * bound to IR_EXT. Use DRM_KAIE unconditionally so we
+ * work with all devices and our parsers can use the
+ * fixed formats, too. */
+ return WIIPROTO_REQ_DRM_KAIE;
} else {
return WIIPROTO_REQ_DRM_KIE;
}
@@ -439,8 +441,7 @@ static __u8 wiimote_cmd_read_ext(struct wiimote_data *wdata, __u8 *rmem)
if (ret != 6)
return WIIMOTE_EXT_NONE;
- hid_dbg(wdata->hdev, "extension ID: %02x:%02x %02x:%02x %02x:%02x\n",
- rmem[0], rmem[1], rmem[2], rmem[3], rmem[4], rmem[5]);
+ hid_dbg(wdata->hdev, "extension ID: %6phC\n", rmem);
if (rmem[0] == 0xff && rmem[1] == 0xff && rmem[2] == 0xff &&
rmem[3] == 0xff && rmem[4] == 0xff && rmem[5] == 0xff)
@@ -454,6 +455,12 @@ static __u8 wiimote_cmd_read_ext(struct wiimote_data *wdata, __u8 *rmem)
return WIIMOTE_EXT_BALANCE_BOARD;
if (rmem[4] == 0x01 && rmem[5] == 0x20)
return WIIMOTE_EXT_PRO_CONTROLLER;
+ if (rmem[0] == 0x01 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_GUITAR_HERO_DRUMS;
+ if (rmem[0] == 0x00 && rmem[1] == 0x00 &&
+ rmem[4] == 0x01 && rmem[5] == 0x03)
+ return WIIMOTE_EXT_GUITAR_HERO_GUITAR;
return WIIMOTE_EXT_UNKNOWN;
}
@@ -487,6 +494,8 @@ static bool wiimote_cmd_map_mp(struct wiimote_data *wdata, __u8 exttype)
/* map MP with correct pass-through mode */
switch (exttype) {
case WIIMOTE_EXT_CLASSIC_CONTROLLER:
+ case WIIMOTE_EXT_GUITAR_HERO_DRUMS:
+ case WIIMOTE_EXT_GUITAR_HERO_GUITAR:
wmem = 0x07;
break;
case WIIMOTE_EXT_NUNCHUK:
@@ -510,14 +519,12 @@ static bool wiimote_cmd_read_mp(struct wiimote_data *wdata, __u8 *rmem)
if (ret != 6)
return false;
- hid_dbg(wdata->hdev, "motion plus ID: %02x:%02x %02x:%02x %02x:%02x\n",
- rmem[0], rmem[1], rmem[2], rmem[3], rmem[4], rmem[5]);
+ hid_dbg(wdata->hdev, "motion plus ID: %6phC\n", rmem);
if (rmem[5] == 0x05)
return true;
- hid_info(wdata->hdev, "unknown motion plus ID: %02x:%02x %02x:%02x %02x:%02x\n",
- rmem[0], rmem[1], rmem[2], rmem[3], rmem[4], rmem[5]);
+ hid_info(wdata->hdev, "unknown motion plus ID: %6phC\n", rmem);
return false;
}
@@ -533,8 +540,7 @@ static __u8 wiimote_cmd_read_mp_mapped(struct wiimote_data *wdata)
if (ret != 6)
return WIIMOTE_MP_NONE;
- hid_dbg(wdata->hdev, "mapped motion plus ID: %02x:%02x %02x:%02x %02x:%02x\n",
- rmem[0], rmem[1], rmem[2], rmem[3], rmem[4], rmem[5]);
+ hid_dbg(wdata->hdev, "mapped motion plus ID: %6phC\n", rmem);
if (rmem[0] == 0xff && rmem[1] == 0xff && rmem[2] == 0xff &&
rmem[3] == 0xff && rmem[4] == 0xff && rmem[5] == 0xff)
@@ -1077,6 +1083,8 @@ static const char *wiimote_exttype_names[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = "Nintendo Wii Classic Controller",
[WIIMOTE_EXT_BALANCE_BOARD] = "Nintendo Wii Balance Board",
[WIIMOTE_EXT_PRO_CONTROLLER] = "Nintendo Wii U Pro Controller",
+ [WIIMOTE_EXT_GUITAR_HERO_DRUMS] = "Nintendo Wii Guitar Hero Drums",
+ [WIIMOTE_EXT_GUITAR_HERO_GUITAR] = "Nintendo Wii Guitar Hero Guitar",
};
/*
@@ -1126,9 +1134,8 @@ static void wiimote_init_hotplug(struct wiimote_data *wdata)
wiimote_ext_unload(wdata);
if (exttype == WIIMOTE_EXT_UNKNOWN) {
- hid_info(wdata->hdev, "cannot detect extension; %02x:%02x %02x:%02x %02x:%02x\n",
- extdata[0], extdata[1], extdata[2],
- extdata[3], extdata[4], extdata[5]);
+ hid_info(wdata->hdev, "cannot detect extension; %6phC\n",
+ extdata);
} else if (exttype == WIIMOTE_EXT_NONE) {
spin_lock_irq(&wdata->state.lock);
wdata->state.exttype = WIIMOTE_EXT_NONE;
@@ -1663,6 +1670,10 @@ static ssize_t wiimote_ext_show(struct device *dev,
return sprintf(buf, "balanceboard\n");
case WIIMOTE_EXT_PRO_CONTROLLER:
return sprintf(buf, "procontroller\n");
+ case WIIMOTE_EXT_GUITAR_HERO_DRUMS:
+ return sprintf(buf, "drums\n");
+ case WIIMOTE_EXT_GUITAR_HERO_GUITAR:
+ return sprintf(buf, "guitar\n");
case WIIMOTE_EXT_UNKNOWN:
/* fallthrough */
default:
diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c
index 2e7d644dba1..7e124c351e6 100644
--- a/drivers/hid/hid-wiimote-modules.c
+++ b/drivers/hid/hid-wiimote-modules.c
@@ -1834,6 +1834,396 @@ static const struct wiimod_ops wiimod_pro = {
};
/*
+ * Drums
+ * Guitar-Hero, Rock-Band and other games came bundled with drums which can
+ * be plugged as extension to a Wiimote. Drum-reports are still not entirely
+ * figured out, but the most important information is known.
+ * We create a separate device for drums and report all information via this
+ * input device.
+ */
+
+static inline void wiimod_drums_report_pressure(struct wiimote_data *wdata,
+ __u8 none, __u8 which,
+ __u8 pressure, __u8 onoff,
+ __u8 *store, __u16 code,
+ __u8 which_code)
+{
+ static const __u8 default_pressure = 3;
+
+ if (!none && which == which_code) {
+ *store = pressure;
+ input_report_abs(wdata->extension.input, code, *store);
+ } else if (onoff != !!*store) {
+ *store = onoff ? default_pressure : 0;
+ input_report_abs(wdata->extension.input, code, *store);
+ }
+}
+
+static void wiimod_drums_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 pressure, which, none, hhp, sx, sy;
+ __u8 o, r, y, g, b, bass, bm, bp;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, the following bits will get invalid:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> |XXXXX|
+ * 2 | 0 | 0 | SY <5:1> |XXXXX|
+ * -----+-----+-----+-----------------------------+-----+
+ * 3 | HPP | NON | WHICH <5:1> | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 4 | SOFT <7:5> | 0 | 1 | 1 | 0 | ? |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | ? | 1 | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | O | R | Y | G | B | BSS |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ pressure = 7 - (ext[3] >> 5);
+ which = (ext[2] >> 1) & 0x1f;
+ none = !!(ext[2] & 0x40);
+ hhp = !(ext[2] & 0x80);
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ o = !(ext[5] & 0x80);
+ r = !(ext[5] & 0x40);
+ y = !(ext[5] & 0x20);
+ g = !(ext[5] & 0x10);
+ b = !(ext[5] & 0x08);
+ bass = !(ext[5] & 0x04);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ o, &wdata->state.pressure_drums[0],
+ ABS_CYMBAL_RIGHT, 0x0e);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ r, &wdata->state.pressure_drums[1],
+ ABS_TOM_LEFT, 0x19);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ y, &wdata->state.pressure_drums[2],
+ ABS_CYMBAL_LEFT, 0x11);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ g, &wdata->state.pressure_drums[3],
+ ABS_TOM_FAR_RIGHT, 0x12);
+ wiimod_drums_report_pressure(wdata, none, which, pressure,
+ b, &wdata->state.pressure_drums[4],
+ ABS_TOM_RIGHT, 0x0f);
+
+ /* Bass shares pressure with hi-hat (set via hhp) */
+ wiimod_drums_report_pressure(wdata, none, hhp ? 0xff : which, pressure,
+ bass, &wdata->state.pressure_drums[5],
+ ABS_BASS, 0x1b);
+ /* Hi-hat has no on/off values, just pressure. Force to off/0. */
+ wiimod_drums_report_pressure(wdata, none, hhp ? which : 0xff, pressure,
+ 0, &wdata->state.pressure_drums[6],
+ ABS_HI_HAT, 0x0e);
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+
+ input_report_key(wdata->extension.input, BTN_START, bp);
+ input_report_key(wdata->extension.input, BTN_SELECT, bm);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_drums_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_drums_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_drums_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_drums_open;
+ wdata->extension.input->close = wiimod_drums_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Drums";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ set_bit(BTN_START, wdata->extension.input->keybit);
+ set_bit(BTN_SELECT, wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_TOM_LEFT, wdata->extension.input->absbit);
+ set_bit(ABS_TOM_RIGHT, wdata->extension.input->absbit);
+ set_bit(ABS_TOM_FAR_RIGHT, wdata->extension.input->absbit);
+ set_bit(ABS_CYMBAL_LEFT, wdata->extension.input->absbit);
+ set_bit(ABS_CYMBAL_RIGHT, wdata->extension.input->absbit);
+ set_bit(ABS_BASS, wdata->extension.input->absbit);
+ set_bit(ABS_HI_HAT, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_TOM_LEFT, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_TOM_RIGHT, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_TOM_FAR_RIGHT, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_CYMBAL_LEFT, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_CYMBAL_RIGHT, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_BASS, 0, 7, 0, 0);
+ input_set_abs_params(wdata->extension.input,
+ ABS_HI_HAT, 0, 7, 0, 0);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_drums_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_drums = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_drums_probe,
+ .remove = wiimod_drums_remove,
+ .in_ext = wiimod_drums_in_ext,
+};
+
+/*
+ * Guitar
+ * Guitar-Hero, Rock-Band and other games came bundled with guitars which can
+ * be plugged as extension to a Wiimote.
+ * We create a separate device for guitars and report all information via this
+ * input device.
+ */
+
+static void wiimod_guitar_in_ext(struct wiimote_data *wdata, const __u8 *ext)
+{
+ __u8 sx, sy, tb, wb, bd, bm, bp, bo, br, bb, bg, by, bu;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:0> |
+ * 2 | 0 | 0 | SY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 | 1 | BU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ *
+ * With Motion+ enabled, the following bits will get invalid:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | 0 | 0 | SX <5:1> |XXXXX|
+ * 2 | 0 | 0 | SY <5:1> |XXXXX|
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 | 0 | 0 | 0 | TB <4:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 4 | 0 | 0 | 0 | WB <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | 1 | BD | 1 | B- | 1 | B+ | 1 |XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BO | BR | BB | BG | BY | 1 |XXXXX|XXXXX|
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ */
+
+ sx = ext[0] & 0x3f;
+ sy = ext[1] & 0x3f;
+ tb = ext[2] & 0x1f;
+ wb = ext[3] & 0x1f;
+ bd = !(ext[4] & 0x40);
+ bm = !(ext[4] & 0x10);
+ bp = !(ext[4] & 0x04);
+ bo = !(ext[5] & 0x80);
+ br = !(ext[5] & 0x40);
+ bb = !(ext[5] & 0x20);
+ bg = !(ext[5] & 0x10);
+ by = !(ext[5] & 0x08);
+ bu = !(ext[5] & 0x01);
+
+ input_report_abs(wdata->extension.input, ABS_X, sx - 0x20);
+ input_report_abs(wdata->extension.input, ABS_Y, sy - 0x20);
+ input_report_abs(wdata->extension.input, ABS_FRET_BOARD, tb);
+ input_report_abs(wdata->extension.input, ABS_WHAMMY_BAR, wb - 0x10);
+
+ input_report_key(wdata->extension.input, BTN_MODE, bm);
+ input_report_key(wdata->extension.input, BTN_START, bp);
+ input_report_key(wdata->extension.input, BTN_STRUM_BAR_UP, bu);
+ input_report_key(wdata->extension.input, BTN_STRUM_BAR_DOWN, bd);
+ input_report_key(wdata->extension.input, BTN_FRET_FAR_UP, bg);
+ input_report_key(wdata->extension.input, BTN_FRET_UP, br);
+ input_report_key(wdata->extension.input, BTN_FRET_MID, by);
+ input_report_key(wdata->extension.input, BTN_FRET_LOW, bb);
+ input_report_key(wdata->extension.input, BTN_FRET_FAR_LOW, bo);
+
+ input_sync(wdata->extension.input);
+}
+
+static int wiimod_guitar_open(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags |= WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+}
+
+static void wiimod_guitar_close(struct input_dev *dev)
+{
+ struct wiimote_data *wdata = input_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED;
+ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+}
+
+static int wiimod_guitar_probe(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ int ret;
+
+ wdata->extension.input = input_allocate_device();
+ if (!wdata->extension.input)
+ return -ENOMEM;
+
+ input_set_drvdata(wdata->extension.input, wdata);
+ wdata->extension.input->open = wiimod_guitar_open;
+ wdata->extension.input->close = wiimod_guitar_close;
+ wdata->extension.input->dev.parent = &wdata->hdev->dev;
+ wdata->extension.input->id.bustype = wdata->hdev->bus;
+ wdata->extension.input->id.vendor = wdata->hdev->vendor;
+ wdata->extension.input->id.product = wdata->hdev->product;
+ wdata->extension.input->id.version = wdata->hdev->version;
+ wdata->extension.input->name = WIIMOTE_NAME " Guitar";
+
+ set_bit(EV_KEY, wdata->extension.input->evbit);
+ set_bit(BTN_MODE, wdata->extension.input->keybit);
+ set_bit(BTN_START, wdata->extension.input->keybit);
+ set_bit(BTN_FRET_FAR_UP, wdata->extension.input->keybit);
+ set_bit(BTN_FRET_UP, wdata->extension.input->keybit);
+ set_bit(BTN_FRET_MID, wdata->extension.input->keybit);
+ set_bit(BTN_FRET_LOW, wdata->extension.input->keybit);
+ set_bit(BTN_FRET_FAR_LOW, wdata->extension.input->keybit);
+ set_bit(BTN_STRUM_BAR_UP, wdata->extension.input->keybit);
+ set_bit(BTN_STRUM_BAR_DOWN, wdata->extension.input->keybit);
+
+ set_bit(EV_ABS, wdata->extension.input->evbit);
+ set_bit(ABS_X, wdata->extension.input->absbit);
+ set_bit(ABS_Y, wdata->extension.input->absbit);
+ set_bit(ABS_FRET_BOARD, wdata->extension.input->absbit);
+ set_bit(ABS_WHAMMY_BAR, wdata->extension.input->absbit);
+ input_set_abs_params(wdata->extension.input,
+ ABS_X, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_Y, -32, 31, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_FRET_BOARD, 0, 0x1f, 1, 1);
+ input_set_abs_params(wdata->extension.input,
+ ABS_WHAMMY_BAR, 0, 0x0f, 1, 1);
+
+ ret = input_register_device(wdata->extension.input);
+ if (ret)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ input_free_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+ return ret;
+}
+
+static void wiimod_guitar_remove(const struct wiimod_ops *ops,
+ struct wiimote_data *wdata)
+{
+ if (!wdata->extension.input)
+ return;
+
+ input_unregister_device(wdata->extension.input);
+ wdata->extension.input = NULL;
+}
+
+static const struct wiimod_ops wiimod_guitar = {
+ .flags = 0,
+ .arg = 0,
+ .probe = wiimod_guitar_probe,
+ .remove = wiimod_guitar_remove,
+ .in_ext = wiimod_guitar_in_ext,
+};
+
+/*
* Builtin Motion Plus
* This module simply sets the WIIPROTO_FLAG_BUILTIN_MP protocol flag which
* disables polling for Motion-Plus. This should be set only for devices which
@@ -2083,4 +2473,6 @@ const struct wiimod_ops *wiimod_ext_table[WIIMOTE_EXT_NUM] = {
[WIIMOTE_EXT_CLASSIC_CONTROLLER] = &wiimod_classic,
[WIIMOTE_EXT_BALANCE_BOARD] = &wiimod_bboard,
[WIIMOTE_EXT_PRO_CONTROLLER] = &wiimod_pro,
+ [WIIMOTE_EXT_GUITAR_HERO_DRUMS] = &wiimod_drums,
+ [WIIMOTE_EXT_GUITAR_HERO_GUITAR] = &wiimod_guitar,
};
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
index f1474f372c0..379cdfb6bd2 100644
--- a/drivers/hid/hid-wiimote.h
+++ b/drivers/hid/hid-wiimote.h
@@ -88,6 +88,8 @@ enum wiimote_exttype {
WIIMOTE_EXT_CLASSIC_CONTROLLER,
WIIMOTE_EXT_BALANCE_BOARD,
WIIMOTE_EXT_PRO_CONTROLLER,
+ WIIMOTE_EXT_GUITAR_HERO_DRUMS,
+ WIIMOTE_EXT_GUITAR_HERO_GUITAR,
WIIMOTE_EXT_NUM,
};
@@ -135,6 +137,7 @@ struct wiimote_state {
/* calibration data */
__u16 calib_bboard[4][3];
+ __u8 pressure_drums[7];
};
struct wiimote_data {
diff --git a/drivers/hid/hid-xinmo.c b/drivers/hid/hid-xinmo.c
new file mode 100644
index 00000000000..7df5227a7e6
--- /dev/null
+++ b/drivers/hid/hid-xinmo.c
@@ -0,0 +1,61 @@
+/*
+ * HID driver for Xin-Mo devices, currently only the Dual Arcade controller.
+ * Fixes the negative axis event values (the devices sends -2) to match the
+ * logical axis minimum of the HID report descriptor (the report announces
+ * -1). It is needed because hid-input discards out of bounds values.
+ * (This module is based on "hid-saitek" and "hid-lg".)
+ *
+ * Copyright (c) 2013 Olivier Scherler
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+
+#include "hid-ids.h"
+
+/*
+ * Fix negative events that are out of bounds.
+ */
+static int xinmo_event(struct hid_device *hdev, struct hid_field *field,
+ struct hid_usage *usage, __s32 value)
+{
+ switch (usage->code) {
+ case ABS_X:
+ case ABS_Y:
+ case ABS_Z:
+ case ABS_RX:
+ if (value < -1) {
+ input_event(field->hidinput->input, usage->type,
+ usage->code, -1);
+ return 1;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static const struct hid_device_id xinmo_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, xinmo_devices);
+
+static struct hid_driver xinmo_driver = {
+ .name = "xinmo",
+ .id_table = xinmo_devices,
+ .event = xinmo_event
+};
+
+module_hid_driver(xinmo_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-zydacron.c b/drivers/hid/hid-zydacron.c
index e4cddeccd6b..1a660bd97ab 100644
--- a/drivers/hid/hid-zydacron.c
+++ b/drivers/hid/hid-zydacron.c
@@ -169,7 +169,7 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
int ret;
struct zc_device *zc;
- zc = kzalloc(sizeof(*zc), GFP_KERNEL);
+ zc = devm_kzalloc(&hdev->dev, sizeof(*zc), GFP_KERNEL);
if (zc == NULL) {
hid_err(hdev, "can't alloc descriptor\n");
return -ENOMEM;
@@ -180,28 +180,16 @@ static int zc_probe(struct hid_device *hdev, const struct hid_device_id *id)
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
- goto err_free;
+ return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
- goto err_free;
+ return ret;
}
return 0;
-err_free:
- kfree(zc);
-
- return ret;
-}
-
-static void zc_remove(struct hid_device *hdev)
-{
- struct zc_device *zc = hid_get_drvdata(hdev);
-
- hid_hw_stop(hdev);
- kfree(zc);
}
static const struct hid_device_id zc_devices[] = {
@@ -217,7 +205,6 @@ static struct hid_driver zc_driver = {
.input_mapping = zc_input_mapping,
.raw_event = zc_raw_event,
.probe = zc_probe,
- .remove = zc_remove,
};
module_hid_driver(zc_driver);
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index a7451632ceb..8918dd12bb6 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
__u8 *buf;
int ret = 0;
- if (!hidraw_table[minor]) {
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
ret = -ENODEV;
goto out;
}
@@ -253,6 +253,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
unsigned int minor = iminor(inode);
struct hidraw *dev;
struct hidraw_list *list;
+ unsigned long flags;
int err = 0;
if (!(list = kzalloc(sizeof(struct hidraw_list), GFP_KERNEL))) {
@@ -261,16 +262,11 @@ static int hidraw_open(struct inode *inode, struct file *file)
}
mutex_lock(&minors_lock);
- if (!hidraw_table[minor]) {
+ if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
err = -ENODEV;
goto out_unlock;
}
- list->hidraw = hidraw_table[minor];
- mutex_init(&list->read_mutex);
- list_add_tail(&list->node, &hidraw_table[minor]->list);
- file->private_data = list;
-
dev = hidraw_table[minor];
if (!dev->open++) {
err = hid_hw_power(dev->hid, PM_HINT_FULLON);
@@ -283,9 +279,16 @@ static int hidraw_open(struct inode *inode, struct file *file)
if (err < 0) {
hid_hw_power(dev->hid, PM_HINT_NORMAL);
dev->open--;
+ goto out_unlock;
}
}
+ list->hidraw = hidraw_table[minor];
+ mutex_init(&list->read_mutex);
+ spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
+ list_add_tail(&list->node, &hidraw_table[minor]->list);
+ spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
+ file->private_data = list;
out_unlock:
mutex_unlock(&minors_lock);
out:
@@ -302,39 +305,41 @@ static int hidraw_fasync(int fd, struct file *file, int on)
return fasync_helper(fd, file, on, &list->fasync);
}
+static void drop_ref(struct hidraw *hidraw, int exists_bit)
+{
+ if (exists_bit) {
+ hid_hw_close(hidraw->hid);
+ hidraw->exist = 0;
+ if (hidraw->open)
+ wake_up_interruptible(&hidraw->wait);
+ } else {
+ --hidraw->open;
+ }
+
+ if (!hidraw->open && !hidraw->exist) {
+ device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+ hidraw_table[hidraw->minor] = NULL;
+ kfree(hidraw);
+ }
+}
+
static int hidraw_release(struct inode * inode, struct file * file)
{
unsigned int minor = iminor(inode);
- struct hidraw *dev;
struct hidraw_list *list = file->private_data;
- int ret;
- int i;
+ unsigned long flags;
mutex_lock(&minors_lock);
- if (!hidraw_table[minor]) {
- ret = -ENODEV;
- goto unlock;
- }
+ spin_lock_irqsave(&hidraw_table[minor]->list_lock, flags);
list_del(&list->node);
- dev = hidraw_table[minor];
- if (!--dev->open) {
- if (list->hidraw->exist) {
- hid_hw_power(dev->hid, PM_HINT_NORMAL);
- hid_hw_close(dev->hid);
- } else {
- kfree(list->hidraw);
- }
- }
-
- for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
- kfree(list->buffer[i].value);
+ spin_unlock_irqrestore(&hidraw_table[minor]->list_lock, flags);
kfree(list);
- ret = 0;
-unlock:
- mutex_unlock(&minors_lock);
- return ret;
+ drop_ref(hidraw_table[minor], 0);
+
+ mutex_unlock(&minors_lock);
+ return 0;
}
static long hidraw_ioctl(struct file *file, unsigned int cmd,
@@ -457,7 +462,9 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
struct hidraw *dev = hid->hidraw;
struct hidraw_list *list;
int ret = 0;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->list_lock, flags);
list_for_each_entry(list, &dev->list, node) {
int new_head = (list->head + 1) & (HIDRAW_BUFFER_SIZE - 1);
@@ -472,6 +479,7 @@ int hidraw_report_event(struct hid_device *hid, u8 *data, int len)
list->head = new_head;
kill_fasync(&list->fasync, SIGIO, POLL_IN);
}
+ spin_unlock_irqrestore(&dev->list_lock, flags);
wake_up_interruptible(&dev->wait);
return ret;
@@ -518,8 +526,8 @@ int hidraw_connect(struct hid_device *hid)
goto out;
}
- mutex_unlock(&minors_lock);
init_waitqueue_head(&dev->wait);
+ spin_lock_init(&dev->list_lock);
INIT_LIST_HEAD(&dev->list);
dev->hid = hid;
@@ -528,6 +536,7 @@ int hidraw_connect(struct hid_device *hid)
dev->exist = 1;
hid->hidraw = dev;
+ mutex_unlock(&minors_lock);
out:
return result;
@@ -539,18 +548,9 @@ void hidraw_disconnect(struct hid_device *hid)
struct hidraw *hidraw = hid->hidraw;
mutex_lock(&minors_lock);
- hidraw->exist = 0;
- device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
+ drop_ref(hidraw, 1);
- hidraw_table[hidraw->minor] = NULL;
-
- if (hidraw->open) {
- hid_hw_close(hid);
- wake_up_interruptible(&hidraw->wait);
- } else {
- kfree(hidraw);
- }
mutex_unlock(&minors_lock);
}
EXPORT_SYMBOL_GPL(hidraw_disconnect);
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 879b0ed701a..c1336193b04 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -35,6 +35,7 @@
#include <linux/hid.h>
#include <linux/mutex.h>
#include <linux/acpi.h>
+#include <linux/of.h>
#include <linux/i2c/i2c-hid.h>
@@ -756,29 +757,6 @@ static int i2c_hid_power(struct hid_device *hid, int lvl)
return ret;
}
-static int i2c_hid_hidinput_input_event(struct input_dev *dev,
- unsigned int type, unsigned int code, int value)
-{
- struct hid_device *hid = input_get_drvdata(dev);
- struct hid_field *field;
- int offset;
-
- if (type == EV_FF)
- return input_ff_event(dev, type, code, value);
-
- if (type != EV_LED)
- return -1;
-
- offset = hidinput_find_field(hid, type, code, &field);
-
- if (offset == -1) {
- hid_warn(dev, "event field not found\n");
- return -1;
- }
-
- return hid_set_field(field, offset, value);
-}
-
static struct hid_ll_driver i2c_hid_ll_driver = {
.parse = i2c_hid_parse,
.start = i2c_hid_start,
@@ -787,7 +765,6 @@ static struct hid_ll_driver i2c_hid_ll_driver = {
.close = i2c_hid_close,
.power = i2c_hid_power,
.request = i2c_hid_request,
- .hidinput_input_event = i2c_hid_hidinput_input_event,
};
static int i2c_hid_init_irq(struct i2c_client *client)
@@ -824,8 +801,8 @@ static int i2c_hid_fetch_hid_descriptor(struct i2c_hid *ihid)
* bytes 2-3 -> bcdVersion (has to be 1.00) */
ret = i2c_hid_command(client, &hid_descr_cmd, ihid->hdesc_buffer, 4);
- i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %*ph\n",
- __func__, 4, ihid->hdesc_buffer);
+ i2c_hid_dbg(ihid, "%s, ihid->hdesc_buffer: %4ph\n", __func__,
+ ihid->hdesc_buffer);
if (ret) {
dev_err(&client->dev,
@@ -897,8 +874,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
params[1].integer.value = 1;
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = 1; /* HID function */
- params[3].type = ACPI_TYPE_INTEGER;
- params[3].integer.value = 0;
+ params[3].type = ACPI_TYPE_PACKAGE;
+ params[3].package.count = 0;
+ params[3].package.elements = NULL;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DSM", &input, &buf))) {
dev_err(&client->dev, "device _DSM execution failed\n");
@@ -933,6 +911,42 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client,
}
#endif
+#ifdef CONFIG_OF
+static int i2c_hid_of_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ struct device *dev = &client->dev;
+ u32 val;
+ int ret;
+
+ ret = of_property_read_u32(dev->of_node, "hid-descr-addr", &val);
+ if (ret) {
+ dev_err(&client->dev, "HID register address not provided\n");
+ return -ENODEV;
+ }
+ if (val >> 16) {
+ dev_err(&client->dev, "Bad HID register address: 0x%08x\n",
+ val);
+ return -EINVAL;
+ }
+ pdata->hid_descriptor_address = val;
+
+ return 0;
+}
+
+static const struct of_device_id i2c_hid_of_match[] = {
+ { .compatible = "hid-over-i2c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, i2c_hid_of_match);
+#else
+static inline int i2c_hid_of_probe(struct i2c_client *client,
+ struct i2c_hid_platform_data *pdata)
+{
+ return -ENODEV;
+}
+#endif
+
static int i2c_hid_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
@@ -954,7 +968,11 @@ static int i2c_hid_probe(struct i2c_client *client,
if (!ihid)
return -ENOMEM;
- if (!platform_data) {
+ if (client->dev.of_node) {
+ ret = i2c_hid_of_probe(client, &ihid->pdata);
+ if (ret)
+ goto err;
+ } else if (!platform_data) {
ret = i2c_hid_acpi_pdata(client, &ihid->pdata);
if (ret) {
dev_err(&client->dev,
@@ -1095,6 +1113,7 @@ static struct i2c_driver i2c_hid_driver = {
.owner = THIS_MODULE,
.pm = &i2c_hid_pm,
.acpi_match_table = ACPI_PTR(i2c_hid_acpi_match),
+ .of_match_table = of_match_ptr(i2c_hid_of_match),
},
.probe = i2c_hid_probe,
diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
index fc307e0422a..5bf2fb78584 100644
--- a/drivers/hid/uhid.c
+++ b/drivers/hid/uhid.c
@@ -116,30 +116,6 @@ static void uhid_hid_close(struct hid_device *hid)
uhid_queue_event(uhid, UHID_CLOSE);
}
-static int uhid_hid_input(struct input_dev *input, unsigned int type,
- unsigned int code, int value)
-{
- struct hid_device *hid = input_get_drvdata(input);
- struct uhid_device *uhid = hid->driver_data;
- unsigned long flags;
- struct uhid_event *ev;
-
- ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
- if (!ev)
- return -ENOMEM;
-
- ev->type = UHID_OUTPUT_EV;
- ev->u.output_ev.type = type;
- ev->u.output_ev.code = code;
- ev->u.output_ev.value = value;
-
- spin_lock_irqsave(&uhid->qlock, flags);
- uhid_queue(uhid, ev);
- spin_unlock_irqrestore(&uhid->qlock, flags);
-
- return 0;
-}
-
static int uhid_hid_parse(struct hid_device *hid)
{
struct uhid_device *uhid = hid->driver_data;
@@ -273,7 +249,6 @@ static struct hid_ll_driver uhid_hid_driver = {
.stop = uhid_hid_stop,
.open = uhid_hid_open,
.close = uhid_hid_close,
- .hidinput_input_event = uhid_hid_input,
.parse = uhid_hid_parse,
};
@@ -659,3 +634,4 @@ module_exit(uhid_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem");
+MODULE_ALIAS("devname:" UHID_NAME);
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index 99418285222..44df131d390 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -535,7 +535,6 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
{
int head;
struct usbhid_device *usbhid = hid->driver_data;
- int len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN)
return;
@@ -546,7 +545,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
return;
}
- usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC);
+ usbhid->out[usbhid->outhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
if (!usbhid->out[usbhid->outhead].raw_report) {
hid_warn(hid, "output queueing failed\n");
return;
@@ -595,7 +594,7 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
}
if (dir == USB_DIR_OUT) {
- usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC);
+ usbhid->ctrl[usbhid->ctrlhead].raw_report = hid_alloc_report_buf(report, GFP_ATOMIC);
if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) {
hid_warn(hid, "control queueing failed\n");
return;
@@ -649,62 +648,6 @@ static void usbhid_submit_report(struct hid_device *hid, struct hid_report *repo
spin_unlock_irqrestore(&usbhid->lock, flags);
}
-/* Workqueue routine to send requests to change LEDs */
-static void hid_led(struct work_struct *work)
-{
- struct usbhid_device *usbhid =
- container_of(work, struct usbhid_device, led_work);
- struct hid_device *hid = usbhid->hid;
- struct hid_field *field;
- unsigned long flags;
-
- field = hidinput_get_led_field(hid);
- if (!field) {
- hid_warn(hid, "LED event field not found\n");
- return;
- }
-
- spin_lock_irqsave(&usbhid->lock, flags);
- if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
- usbhid->ledcount = hidinput_count_leds(hid);
- hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
- __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
- }
- spin_unlock_irqrestore(&usbhid->lock, flags);
-}
-
-static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
-{
- struct hid_device *hid = input_get_drvdata(dev);
- struct usbhid_device *usbhid = hid->driver_data;
- struct hid_field *field;
- unsigned long flags;
- int offset;
-
- if (type == EV_FF)
- return input_ff_event(dev, type, code, value);
-
- if (type != EV_LED)
- return -1;
-
- if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) {
- hid_warn(dev, "event field not found\n");
- return -1;
- }
-
- spin_lock_irqsave(&usbhid->lock, flags);
- hid_set_field(field, offset, value);
- spin_unlock_irqrestore(&usbhid->lock, flags);
-
- /*
- * Defer performing requested LED action.
- * This is more likely gather all LED changes into a single URB.
- */
- schedule_work(&usbhid->led_work);
-
- return 0;
-}
-
static int usbhid_wait_io(struct hid_device *hid)
{
struct usbhid_device *usbhid = hid->driver_data;
@@ -807,12 +750,17 @@ void usbhid_init_reports(struct hid_device *hid)
{
struct hid_report *report;
struct usbhid_device *usbhid = hid->driver_data;
+ struct hid_report_enum *report_enum;
int err, ret;
- list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list)
- usbhid_submit_report(hid, report, USB_DIR_IN);
+ if (!(hid->quirks & HID_QUIRK_NO_INIT_INPUT_REPORTS)) {
+ report_enum = &hid->report_enum[HID_INPUT_REPORT];
+ list_for_each_entry(report, &report_enum->report_list, list)
+ usbhid_submit_report(hid, report, USB_DIR_IN);
+ }
- list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list)
+ report_enum = &hid->report_enum[HID_FEATURE_REPORT];
+ list_for_each_entry(report, &report_enum->report_list, list)
usbhid_submit_report(hid, report, USB_DIR_IN);
err = 0;
@@ -857,7 +805,7 @@ static int hid_find_field_early(struct hid_device *hid, unsigned int page,
return -1;
}
-void usbhid_set_leds(struct hid_device *hid)
+static void usbhid_set_leds(struct hid_device *hid)
{
struct hid_field *field;
int offset;
@@ -867,7 +815,6 @@ void usbhid_set_leds(struct hid_device *hid)
usbhid_submit_report(hid, field->report, USB_DIR_OUT);
}
}
-EXPORT_SYMBOL_GPL(usbhid_set_leds);
/*
* Traverse the supplied list of reports and find the longest
@@ -1274,7 +1221,6 @@ static struct hid_ll_driver usb_hid_driver = {
.open = usbhid_open,
.close = usbhid_close,
.power = usbhid_power,
- .hidinput_input_event = usb_hidinput_input_event,
.request = usbhid_request,
.wait = usbhid_wait_io,
.idle = usbhid_idle,
@@ -1368,8 +1314,6 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
- INIT_WORK(&usbhid->led_work, hid_led);
-
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1402,7 +1346,6 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
{
del_timer_sync(&usbhid->io_retry);
cancel_work_sync(&usbhid->reset_work);
- cancel_work_sync(&usbhid->led_work);
}
static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1522,15 +1465,17 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
struct usbhid_device *usbhid = hid->driver_data;
int status = 0;
bool driver_suspended = false;
+ unsigned int ledcount;
if (PMSG_IS_AUTO(message)) {
+ ledcount = hidinput_count_leds(hid);
spin_lock_irq(&usbhid->lock); /* Sync with error handler */
if (!test_bit(HID_RESET_PENDING, &usbhid->iofl)
&& !test_bit(HID_CLEAR_HALT, &usbhid->iofl)
&& !test_bit(HID_OUT_RUNNING, &usbhid->iofl)
&& !test_bit(HID_CTRL_RUNNING, &usbhid->iofl)
&& !test_bit(HID_KEYS_PRESSED, &usbhid->iofl)
- && (!usbhid->ledcount || ignoreled))
+ && (!ledcount || ignoreled))
{
set_bit(HID_SUSPENDED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 19b8360f233..07345521f42 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -109,6 +109,8 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
+
{ 0, 0 }
};
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index dbb6af69913..f633c24ce28 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -92,9 +92,6 @@ struct usbhid_device {
unsigned int retry_delay; /* Delay length in ms */
struct work_struct reset_work; /* Task context for resets */
wait_queue_head_t wait; /* For sleeping */
- int ledcount; /* counting the number of active leds */
-
- struct work_struct led_work; /* Task context for setting LEDs */
};
#define hid_to_usb_dev(hid_dev) \
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 0df75908200..bbff5f200be 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -48,30 +48,39 @@ struct vmbus_channel_message_table_entry {
* @negop is of type &struct icmsg_negotiate.
* Set up and fill in default negotiate response message.
*
- * The max_fw_version specifies the maximum framework version that
- * we can support and max _srv_version specifies the maximum service
- * version we can support. A special value MAX_SRV_VER can be
- * specified to indicate that we can handle the maximum version
- * exposed by the host.
+ * The fw_version specifies the framework version that
+ * we can support and srv_version specifies the service
+ * version we can support.
*
* Mainly used by Hyper-V drivers.
*/
-void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
+bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
struct icmsg_negotiate *negop, u8 *buf,
- int max_fw_version, int max_srv_version)
+ int fw_version, int srv_version)
{
- int icframe_vercnt;
- int icmsg_vercnt;
+ int icframe_major, icframe_minor;
+ int icmsg_major, icmsg_minor;
+ int fw_major, fw_minor;
+ int srv_major, srv_minor;
int i;
+ bool found_match = false;
icmsghdrp->icmsgsize = 0x10;
+ fw_major = (fw_version >> 16);
+ fw_minor = (fw_version & 0xFFFF);
+
+ srv_major = (srv_version >> 16);
+ srv_minor = (srv_version & 0xFFFF);
negop = (struct icmsg_negotiate *)&buf[
sizeof(struct vmbuspipe_hdr) +
sizeof(struct icmsg_hdr)];
- icframe_vercnt = negop->icframe_vercnt;
- icmsg_vercnt = negop->icmsg_vercnt;
+ icframe_major = negop->icframe_vercnt;
+ icframe_minor = 0;
+
+ icmsg_major = negop->icmsg_vercnt;
+ icmsg_minor = 0;
/*
* Select the framework version number we will
@@ -79,26 +88,48 @@ void vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
*/
for (i = 0; i < negop->icframe_vercnt; i++) {
- if (negop->icversion_data[i].major <= max_fw_version)
- icframe_vercnt = negop->icversion_data[i].major;
+ if ((negop->icversion_data[i].major == fw_major) &&
+ (negop->icversion_data[i].minor == fw_minor)) {
+ icframe_major = negop->icversion_data[i].major;
+ icframe_minor = negop->icversion_data[i].minor;
+ found_match = true;
+ }
}
+ if (!found_match)
+ goto fw_error;
+
+ found_match = false;
+
for (i = negop->icframe_vercnt;
(i < negop->icframe_vercnt + negop->icmsg_vercnt); i++) {
- if (negop->icversion_data[i].major <= max_srv_version)
- icmsg_vercnt = negop->icversion_data[i].major;
+ if ((negop->icversion_data[i].major == srv_major) &&
+ (negop->icversion_data[i].minor == srv_minor)) {
+ icmsg_major = negop->icversion_data[i].major;
+ icmsg_minor = negop->icversion_data[i].minor;
+ found_match = true;
+ }
}
/*
- * Respond with the maximum framework and service
+ * Respond with the framework and service
* version numbers we can support.
*/
- negop->icframe_vercnt = 1;
- negop->icmsg_vercnt = 1;
- negop->icversion_data[0].major = icframe_vercnt;
- negop->icversion_data[0].minor = 0;
- negop->icversion_data[1].major = icmsg_vercnt;
- negop->icversion_data[1].minor = 0;
+
+fw_error:
+ if (!found_match) {
+ negop->icframe_vercnt = 0;
+ negop->icmsg_vercnt = 0;
+ } else {
+ negop->icframe_vercnt = 1;
+ negop->icmsg_vercnt = 1;
+ }
+
+ negop->icversion_data[0].major = icframe_major;
+ negop->icversion_data[0].minor = icframe_minor;
+ negop->icversion_data[1].major = icmsg_major;
+ negop->icversion_data[1].minor = icmsg_minor;
+ return found_match;
}
EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
@@ -262,6 +293,13 @@ static void vmbus_process_offer(struct work_struct *work)
}
/*
+ * This state is used to indicate a successful open
+ * so that when we do close the channel normally, we
+ * can cleanup properly
+ */
+ newchannel->state = CHANNEL_OPEN_STATE;
+
+ /*
* Start the process of binding this offer to the driver
* We need to set the DeviceObject field before calling
* vmbus_child_dev_add()
@@ -287,13 +325,6 @@ static void vmbus_process_offer(struct work_struct *work)
kfree(newchannel->device_obj);
free_channel(newchannel);
- } else {
- /*
- * This state is used to indicate a successful open
- * so that when we do close the channel normally, we
- * can cleanup properly
- */
- newchannel->state = CHANNEL_OPEN_STATE;
}
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index ec3b8cdf1e0..8f4743ab5fb 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -195,7 +195,10 @@ int vmbus_connect(void)
do {
ret = vmbus_negotiate_version(msginfo, version);
- if (ret == 0)
+ if (ret)
+ goto cleanup;
+
+ if (vmbus_connection.conn_state == CONNECTED)
break;
version = vmbus_get_next_version(version);
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 4c605c70ebf..7e17a5495e0 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -562,7 +562,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
struct hv_hotadd_state *has)
{
int ret = 0;
- int i, nid, t;
+ int i, nid;
unsigned long start_pfn;
unsigned long processed_pfn;
unsigned long total_pfn = pfn_count;
@@ -607,14 +607,11 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
/*
* Wait for the memory block to be onlined.
+ * Since the hot add has succeeded, it is ok to
+ * proceed even if the pages in the hot added region
+ * have not been "onlined" within the allowed time.
*/
- t = wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
- if (t == 0) {
- pr_info("hot_add memory timedout\n");
- has->ha_end_pfn -= HA_CHUNK;
- has->covered_end_pfn -= processed_pfn;
- break;
- }
+ wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
}
@@ -828,7 +825,6 @@ static void hot_add_req(struct work_struct *dummy)
memset(&resp, 0, sizeof(struct dm_hot_add_response));
resp.hdr.type = DM_MEM_HOT_ADD_RESPONSE;
resp.hdr.size = sizeof(struct dm_hot_add_response);
- resp.hdr.trans_id = atomic_inc_return(&trans_id);
#ifdef CONFIG_MEMORY_HOTPLUG
pg_start = dm->ha_wrk.ha_page_range.finfo.start_page;
@@ -890,6 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
pr_info("Memory hot add failed\n");
dm->state = DM_INITIALIZED;
+ resp.hdr.trans_id = atomic_inc_return(&trans_id);
vmbus_sendpacket(dm->dev->channel, &resp,
sizeof(struct dm_hot_add_response),
(unsigned long)NULL,
@@ -978,6 +975,14 @@ static void post_status(struct hv_dynmem_device *dm)
dm->num_pages_ballooned +
compute_balloon_floor();
+ /*
+ * If our transaction ID is no longer current, just don't
+ * send the status. This can happen if we were interrupted
+ * after we picked our transaction ID.
+ */
+ if (status.hdr.trans_id != atomic_read(&trans_id))
+ return;
+
vmbus_sendpacket(dm->dev->channel, &status,
sizeof(struct dm_status),
(unsigned long)NULL,
@@ -1076,7 +1081,6 @@ static void balloon_up(struct work_struct *dummy)
bl_resp = (struct dm_balloon_response *)send_buffer;
memset(send_buffer, 0, PAGE_SIZE);
bl_resp->hdr.type = DM_BALLOON_RESPONSE;
- bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
bl_resp->hdr.size = sizeof(struct dm_balloon_response);
bl_resp->more_pages = 1;
@@ -1104,6 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
*/
do {
+ bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
ret = vmbus_sendpacket(dm_device.dev->channel,
bl_resp,
bl_resp->hdr.size,
@@ -1521,5 +1526,4 @@ static int __init init_balloon_drv(void)
module_init(init_balloon_drv);
MODULE_DESCRIPTION("Hyper-V Balloon");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ed50e9e83c6..28b03325b87 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -29,6 +29,16 @@
#include <linux/hyperv.h>
+/*
+ * Pre win8 version numbers used in ws2008 and ws 2008 r2 (win7)
+ */
+#define WIN7_SRV_MAJOR 3
+#define WIN7_SRV_MINOR 0
+#define WIN7_SRV_MAJOR_MINOR (WIN7_SRV_MAJOR << 16 | WIN7_SRV_MINOR)
+
+#define WIN8_SRV_MAJOR 4
+#define WIN8_SRV_MINOR 0
+#define WIN8_SRV_MAJOR_MINOR (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
/*
* Global state maintained for transaction that is being processed.
@@ -76,7 +86,9 @@ static u8 *recv_buffer;
/*
* Register the kernel component with the user-level daemon.
* As part of this registration, pass the LIC version number.
+ * This number has no meaning, it satisfies the registration protocol.
*/
+#define HV_DRV_VERSION "3.1"
static void
kvp_register(int reg_value)
@@ -593,8 +605,19 @@ void hv_kvp_onchannelcallback(void *context)
sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
+ /*
+ * We start with win8 version and if the host cannot
+ * support that we use the previous version.
+ */
+ if (vmbus_prep_negotiate_resp(icmsghdrp, negop,
+ recv_buffer, UTIL_FW_MAJOR_MINOR,
+ WIN8_SRV_MAJOR_MINOR))
+ goto done;
+
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, MAX_SRV_VER, MAX_SRV_VER);
+ recv_buffer, UTIL_FW_MAJOR_MINOR,
+ WIN7_SRV_MAJOR_MINOR);
+
} else {
kvp_msg = (struct hv_kvp_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
@@ -626,6 +649,7 @@ void hv_kvp_onchannelcallback(void *context)
return;
}
+done:
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
| ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 8ad5653ce44..e4572f3f283 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -24,6 +24,10 @@
#include <linux/workqueue.h>
#include <linux/hyperv.h>
+#define VSS_MAJOR 5
+#define VSS_MINOR 0
+#define VSS_MAJOR_MINOR (VSS_MAJOR << 16 | VSS_MINOR)
+
/*
@@ -186,18 +190,8 @@ void hv_vss_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- recv_buffer, MAX_SRV_VER, MAX_SRV_VER);
- /*
- * We currently negotiate the highest number the
- * host has presented. If this version is not
- * atleast 5.0, reject.
- */
- negop = (struct icmsg_negotiate *)&recv_buffer[
- sizeof(struct vmbuspipe_hdr) +
- sizeof(struct icmsg_hdr)];
-
- if (negop->icversion_data[1].major < 5)
- negop->icframe_vercnt = 0;
+ recv_buffer, UTIL_FW_MAJOR_MINOR,
+ VSS_MAJOR_MINOR);
} else {
vss_msg = (struct hv_vss_msg *)&recv_buffer[
sizeof(struct vmbuspipe_hdr) +
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index 2f561c5dfe2..cb82233541b 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -28,6 +28,18 @@
#include <linux/reboot.h>
#include <linux/hyperv.h>
+#define SHUTDOWN_MAJOR 3
+#define SHUTDOWN_MINOR 0
+#define SHUTDOWN_MAJOR_MINOR (SHUTDOWN_MAJOR << 16 | SHUTDOWN_MINOR)
+
+#define TIMESYNCH_MAJOR 3
+#define TIMESYNCH_MINOR 0
+#define TIMESYNCH_MAJOR_MINOR (TIMESYNCH_MAJOR << 16 | TIMESYNCH_MINOR)
+
+#define HEARTBEAT_MAJOR 3
+#define HEARTBEAT_MINOR 0
+#define HEARTBEAT_MAJOR_MINOR (HEARTBEAT_MAJOR << 16 | HEARTBEAT_MINOR)
+
static void shutdown_onchannelcallback(void *context);
static struct hv_util_service util_shutdown = {
.util_cb = shutdown_onchannelcallback,
@@ -87,7 +99,8 @@ static void shutdown_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, negop,
- shut_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
+ shut_txf_buf, UTIL_FW_MAJOR_MINOR,
+ SHUTDOWN_MAJOR_MINOR);
} else {
shutdown_msg =
(struct shutdown_msg_data *)&shut_txf_buf[
@@ -213,7 +226,8 @@ static void timesync_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, NULL, time_txf_buf,
- MAX_SRV_VER, MAX_SRV_VER);
+ UTIL_FW_MAJOR_MINOR,
+ TIMESYNCH_MAJOR_MINOR);
} else {
timedatap = (struct ictimesync_data *)&time_txf_buf[
sizeof(struct vmbuspipe_hdr) +
@@ -253,7 +267,8 @@ static void heartbeat_onchannelcallback(void *context)
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
vmbus_prep_negotiate_resp(icmsghdrp, NULL,
- hbeat_txf_buf, MAX_SRV_VER, MAX_SRV_VER);
+ hbeat_txf_buf, UTIL_FW_MAJOR_MINOR,
+ HEARTBEAT_MAJOR_MINOR);
} else {
heartbeat_msg =
(struct heartbeat_msg_data *)&hbeat_txf_buf[
@@ -380,5 +395,4 @@ module_init(init_hyperv_utils);
module_exit(exit_hyperv_utils);
MODULE_DESCRIPTION("Hyper-V Utilities");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_LICENSE("GPL");
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index a2464bf07c4..f9fe46f52cf 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -690,7 +690,7 @@ int vmbus_device_register(struct hv_device *child_device_obj)
if (ret)
pr_err("Unable to register child device\n");
else
- pr_info("child device %s registered\n",
+ pr_debug("child device %s registered\n",
dev_name(&child_device_obj->device));
return ret;
@@ -702,14 +702,14 @@ int vmbus_device_register(struct hv_device *child_device_obj)
*/
void vmbus_device_unregister(struct hv_device *device_obj)
{
+ pr_debug("child device %s unregistered\n",
+ dev_name(&device_obj->device));
+
/*
* Kick off the process of unregistering the device.
* This will call vmbus_remove() and eventually vmbus_device_release()
*/
device_unregister(&device_obj->device);
-
- pr_info("child device %s unregistered\n",
- dev_name(&device_obj->device));
}
@@ -816,7 +816,6 @@ static void __exit vmbus_exit(void)
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
subsys_initcall(hv_acpi_init);
module_exit(vmbus_exit);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e989f7fd645..b3ab9d43bb3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -296,8 +296,8 @@ config SENSORS_K10TEMP
If you say yes here you get support for the temperature
sensor(s) inside your CPU. Supported are later revisions of
the AMD Family 10h and all revisions of the AMD Family 11h,
- 12h (Llano), 14h (Brazos) and 15h (Bulldozer/Trinity)
- microarchitectures.
+ 12h (Llano), 14h (Brazos), 15h (Bulldozer/Trinity) and
+ 16h (Kabini) microarchitectures.
This driver can also be built as a module. If so, the module
will be called k10temp.
@@ -511,6 +511,16 @@ config SENSORS_HIH6130
This driver can also be built as a module. If so, the module
will be called hih6130.
+config SENSORS_HTU21
+ tristate "Measurement Specialties HTU21D humidity/temperature sensors"
+ depends on I2C
+ help
+ If you say yes here you get support for the Measurement Specialties
+ HTU21D humidity and temperature sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called htu21.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86
@@ -1202,8 +1212,8 @@ config SENSORS_ADS1015
tristate "Texas Instruments ADS1015"
depends on I2C
help
- If you say yes here you get support for Texas Instruments ADS1015
- 12-bit 4-input ADC device.
+ If you say yes here you get support for Texas Instruments
+ ADS1015/ADS1115 12/16-bit 4-input ADC device.
This driver can also be built as a module. If so, the module
will be called ads1015.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4f0fb5235f4..ec7cde06eb5 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SENSORS_GL518SM) += gl518sm.o
obj-$(CONFIG_SENSORS_GL520SM) += gl520sm.o
obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
+obj-$(CONFIG_SENSORS_HTU21) += htu21.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 6351aba8819..a9e3d0152c0 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -2,7 +2,7 @@
* A hwmon driver for ACPI 4.0 power meters
* Copyright (C) 2009 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1001,7 +1001,7 @@ static void __exit acpi_power_meter_exit(void)
acpi_bus_unregister_driver(&acpi_power_meter_driver);
}
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("ACPI 4.0 power meter driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index 2798246ad81..7f9dc2f86b6 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -46,17 +46,28 @@ static const unsigned int fullscale_table[8] = {
6144, 4096, 2048, 1024, 512, 256, 256, 256 };
/* Data rates in samples per second */
-static const unsigned int data_rate_table[8] = {
- 128, 250, 490, 920, 1600, 2400, 3300, 3300 };
+static const unsigned int data_rate_table_1015[8] = {
+ 128, 250, 490, 920, 1600, 2400, 3300, 3300
+};
+
+static const unsigned int data_rate_table_1115[8] = {
+ 8, 16, 32, 64, 128, 250, 475, 860
+};
#define ADS1015_DEFAULT_CHANNELS 0xff
#define ADS1015_DEFAULT_PGA 2
#define ADS1015_DEFAULT_DATA_RATE 4
+enum ads1015_chips {
+ ads1015,
+ ads1115,
+};
+
struct ads1015_data {
struct device *hwmon_dev;
struct mutex update_lock; /* mutex protect updates */
struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
+ enum ads1015_chips id;
};
static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
@@ -66,6 +77,8 @@ static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
unsigned int pga = data->channel_data[channel].pga;
unsigned int data_rate = data->channel_data[channel].data_rate;
unsigned int conversion_time_ms;
+ const unsigned int * const rate_table = data->id == ads1115 ?
+ data_rate_table_1115 : data_rate_table_1015;
int res;
mutex_lock(&data->update_lock);
@@ -75,7 +88,7 @@ static int ads1015_read_adc(struct i2c_client *client, unsigned int channel)
if (res < 0)
goto err_unlock;
config = res;
- conversion_time_ms = DIV_ROUND_UP(1000, data_rate_table[data_rate]);
+ conversion_time_ms = DIV_ROUND_UP(1000, rate_table[data_rate]);
/* setup and start single conversion */
config &= 0x001f;
@@ -113,8 +126,9 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
struct ads1015_data *data = i2c_get_clientdata(client);
unsigned int pga = data->channel_data[channel].pga;
int fullscale = fullscale_table[pga];
+ const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
- return DIV_ROUND_CLOSEST(reg * fullscale, 0x7ff0);
+ return DIV_ROUND_CLOSEST(reg * fullscale, mask);
}
/* sysfs callback function */
@@ -257,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client,
GFP_KERNEL);
if (!data)
return -ENOMEM;
-
+ data->id = id->driver_data;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
@@ -286,7 +300,8 @@ exit_remove:
}
static const struct i2c_device_id ads1015_id[] = {
- { "ads1015", 0 },
+ { "ads1015", ads1015},
+ { "ads1115", ads1115},
{ }
};
MODULE_DEVICE_TABLE(i2c, ads1015_id);
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index ba962ac4b81..7092c78f814 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -145,7 +145,7 @@ static int ads7828_remove(struct i2c_client *client)
static int ads7828_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct ads7828_platform_data *pdata = client->dev.platform_data;
+ struct ads7828_platform_data *pdata = dev_get_platdata(&client->dev);
struct ads7828_data *data;
int err;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 69481d3a3d2..addb5a4d506 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -2,7 +2,7 @@
* A hwmon driver for the Analog Devices ADT7462
* Copyright (C) 2008 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -333,7 +333,7 @@ static int ADT7462_REG_VOLT_MAX(struct adt7462_data *data, int which)
return 0x4C;
break;
}
- return -ENODEV;
+ return 0;
}
static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
@@ -392,7 +392,7 @@ static int ADT7462_REG_VOLT_MIN(struct adt7462_data *data, int which)
return 0x77;
break;
}
- return -ENODEV;
+ return 0;
}
static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
@@ -1970,6 +1970,6 @@ static int adt7462_remove(struct i2c_client *client)
module_i2c_driver(adt7462_driver);
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("ADT7462 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f34bca9f5e..0f4dea5ccf1 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -2,7 +2,7 @@
* A hwmon driver for the Analog Devices ADT7470
* Copyright (C) 2007 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg,
u16 value)
{
return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
+ || i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
}
static void adt7470_init_client(struct i2c_client *client)
@@ -1314,6 +1314,6 @@ static int adt7470_remove(struct i2c_client *client)
module_i2c_driver(adt7470_driver);
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("ADT7470 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 2e5e2dc47ea..78be6617684 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -316,6 +316,18 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return tjmax;
}
+static bool cpu_has_tjmax(struct cpuinfo_x86 *c)
+{
+ u8 model = c->x86_model;
+
+ return model > 0xe &&
+ model != 0x1c &&
+ model != 0x26 &&
+ model != 0x27 &&
+ model != 0x35 &&
+ model != 0x36;
+}
+
static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
{
int err;
@@ -328,7 +340,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
*/
err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
if (err) {
- if (c->x86_model > 0xe && c->x86_model != 0x1c)
+ if (cpu_has_tjmax(c))
dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
} else {
val = (eax >> 16) & 0xff;
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index f1d6b422cf0..0918b913658 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -77,7 +77,7 @@ struct ds620_data {
static void ds620_init_client(struct i2c_client *client)
{
- struct ds620_platform_data *ds620_info = client->dev.platform_data;
+ struct ds620_platform_data *ds620_info = dev_get_platdata(&client->dev);
u16 conf, new_conf;
new_conf = conf =
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 0c9f3da242b..15b7f5281de 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -1375,7 +1375,7 @@ static void f71805f_init_device(struct f71805f_data *data)
static int f71805f_probe(struct platform_device *pdev)
{
- struct f71805f_sio_data *sio_data = pdev->dev.platform_data;
+ struct f71805f_sio_data *sio_data = dev_get_platdata(&pdev->dev);
struct f71805f_data *data;
struct resource *res;
int i, err;
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index cfb02dd91aa..31b221eeee6 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -2267,7 +2267,7 @@ static int f71882fg_create_fan_sysfs_files(
static int f71882fg_probe(struct platform_device *pdev)
{
struct f71882fg_data *data;
- struct f71882fg_sio_data *sio_data = pdev->dev.platform_data;
+ struct f71882fg_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int nr_fans = f71882fg_nr_fans[sio_data->type];
int nr_temps = f71882fg_nr_temps[sio_data->type];
int err, i;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 9e300e567f1..a837b94977f 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -832,7 +832,8 @@ static int f75375_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct f75375_data *data;
- struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
+ struct f75375s_platform_data *f75375s_pdata =
+ dev_get_platdata(&client->dev);
int err;
if (!i2c_check_functionality(client->adapter,
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 73adf01b0ef..b4b8b5bef71 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -717,7 +717,7 @@ static void g762_of_clock_disable(struct i2c_client *client) { }
static int g762_pdata_prop_import(struct i2c_client *client)
{
- struct g762_platform_data *pdata = client->dev.platform_data;
+ struct g762_platform_data *pdata = dev_get_platdata(&client->dev);
int ret;
if (!pdata)
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 3104149795c..b7d6a5704eb 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -495,7 +495,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
{
int err;
struct gpio_fan_data *fan_data;
- struct gpio_fan_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev);
#ifdef CONFIG_OF_GPIO
if (!pdata) {
diff --git a/drivers/hwmon/htu21.c b/drivers/hwmon/htu21.c
new file mode 100644
index 00000000000..839086e0e95
--- /dev/null
+++ b/drivers/hwmon/htu21.c
@@ -0,0 +1,199 @@
+/*
+ * Measurement Specialties HTU21D humidity and temperature sensor driver
+ *
+ * Copyright (C) 2013 William Markezana <william.markezana@meas-spec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/jiffies.h>
+
+/* HTU21 Commands */
+#define HTU21_T_MEASUREMENT_HM 0xE3
+#define HTU21_RH_MEASUREMENT_HM 0xE5
+
+struct htu21 {
+ struct device *hwmon_dev;
+ struct mutex lock;
+ bool valid;
+ unsigned long last_update;
+ int temperature;
+ int humidity;
+};
+
+static inline int htu21_temp_ticks_to_millicelsius(int ticks)
+{
+ ticks &= ~0x0003; /* clear status bits */
+ /*
+ * Formula T = -46.85 + 175.72 * ST / 2^16 from datasheet p14,
+ * optimized for integer fixed point (3 digits) arithmetic
+ */
+ return ((21965 * ticks) >> 13) - 46850;
+}
+
+static inline int htu21_rh_ticks_to_per_cent_mille(int ticks)
+{
+ ticks &= ~0x0003; /* clear status bits */
+ /*
+ * Formula RH = -6 + 125 * SRH / 2^16 from datasheet p14,
+ * optimized for integer fixed point (3 digits) arithmetic
+ */
+ return ((15625 * ticks) >> 13) - 6000;
+}
+
+static int htu21_update_measurements(struct i2c_client *client)
+{
+ int ret = 0;
+ struct htu21 *htu21 = i2c_get_clientdata(client);
+
+ mutex_lock(&htu21->lock);
+
+ if (time_after(jiffies, htu21->last_update + HZ / 2) ||
+ !htu21->valid) {
+ ret = i2c_smbus_read_word_swapped(client,
+ HTU21_T_MEASUREMENT_HM);
+ if (ret < 0)
+ goto out;
+ htu21->temperature = htu21_temp_ticks_to_millicelsius(ret);
+ ret = i2c_smbus_read_word_swapped(client,
+ HTU21_RH_MEASUREMENT_HM);
+ if (ret < 0)
+ goto out;
+ htu21->humidity = htu21_rh_ticks_to_per_cent_mille(ret);
+ htu21->last_update = jiffies;
+ htu21->valid = true;
+ }
+out:
+ mutex_unlock(&htu21->lock);
+
+ return ret >= 0 ? 0 : ret;
+}
+
+static ssize_t htu21_show_temperature(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct htu21 *htu21 = i2c_get_clientdata(client);
+ int ret = htu21_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", htu21->temperature);
+}
+
+static ssize_t htu21_show_humidity(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct htu21 *htu21 = i2c_get_clientdata(client);
+ int ret = htu21_update_measurements(client);
+ if (ret < 0)
+ return ret;
+ return sprintf(buf, "%d\n", htu21->humidity);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ htu21_show_temperature, NULL, 0);
+static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO,
+ htu21_show_humidity, NULL, 0);
+
+static struct attribute *htu21_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_humidity1_input.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group htu21_group = {
+ .attrs = htu21_attributes,
+};
+
+static int htu21_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct htu21 *htu21;
+ int err;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+ dev_err(&client->dev,
+ "adapter does not support SMBus word transactions\n");
+ return -ENODEV;
+ }
+
+ htu21 = devm_kzalloc(&client->dev, sizeof(*htu21), GFP_KERNEL);
+ if (!htu21)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, htu21);
+
+ mutex_init(&htu21->lock);
+
+ err = sysfs_create_group(&client->dev.kobj, &htu21_group);
+ if (err) {
+ dev_dbg(&client->dev, "could not create sysfs files\n");
+ return err;
+ }
+ htu21->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(htu21->hwmon_dev)) {
+ dev_dbg(&client->dev, "unable to register hwmon device\n");
+ err = PTR_ERR(htu21->hwmon_dev);
+ goto error;
+ }
+
+ dev_info(&client->dev, "initialized\n");
+
+ return 0;
+
+error:
+ sysfs_remove_group(&client->dev.kobj, &htu21_group);
+ return err;
+}
+
+static int htu21_remove(struct i2c_client *client)
+{
+ struct htu21 *htu21 = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(htu21->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &htu21_group);
+
+ return 0;
+}
+
+static const struct i2c_device_id htu21_id[] = {
+ { "htu21", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, htu21_id);
+
+static struct i2c_driver htu21_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "htu21",
+ },
+ .probe = htu21_probe,
+ .remove = htu21_remove,
+ .id_table = htu21_id,
+};
+
+module_i2c_driver(htu21_driver);
+
+MODULE_AUTHOR("William Markezana <william.markezana@meas-spec.com>");
+MODULE_DESCRIPTION("MEAS HTU21D humidity and temperature sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index de058c278aa..6c0080a3b90 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -3,7 +3,7 @@
* temperature sensors
* Copyright (C) 2007 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -609,7 +609,7 @@ static void __exit i5k_amb_exit(void)
platform_driver_unregister(&i5k_amb_driver);
}
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("Intel 5000 chipset FB-DIMM AMB temperature sensor");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 1429f6e177f..e2b56a2b756 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -3,7 +3,7 @@
* temperature/power/energy sensors and capping functionality.
* Copyright (C) 2008 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1103,7 +1103,7 @@ static void __exit aem_exit(void)
aem_delete(p1);
}
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("IBM AEM power/temp/energy sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 74b365ea01c..20ab0fb8539 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -2,7 +2,7 @@
* A hwmon driver for the IBM PowerExecutive temperature/power sensors
* Copyright (C) 2007 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -606,7 +606,7 @@ static void __exit ibmpex_exit(void)
ibmpex_bmc_delete(p);
}
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("IBM PowerExecutive power/temperature sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index d917a2d8c30..18c062360ca 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -232,9 +232,9 @@ static int ina2xx_probe(struct i2c_client *client,
if (!data)
return -ENOMEM;
- if (client->dev.platform_data) {
+ if (dev_get_platdata(&client->dev)) {
pdata =
- (struct ina2xx_platform_data *)client->dev.platform_data;
+ (struct ina2xx_platform_data *)dev_get_platdata(&client->dev);
shunt = pdata->shunt_uohms;
} else if (!of_property_read_u32(client->dev.of_node,
"shunt-resistor", &val)) {
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 72b21d5b1c6..29ffa27c60b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -1962,7 +1962,7 @@ exit:
static void it87_remove_files(struct device *dev)
{
struct it87_data *data = platform_get_drvdata(pdev);
- struct it87_sio_data *sio_data = dev->platform_data;
+ struct it87_sio_data *sio_data = dev_get_platdata(dev);
int i;
sysfs_remove_group(&dev->kobj, &it87_group);
@@ -2014,7 +2014,7 @@ static int it87_probe(struct platform_device *pdev)
struct it87_data *data;
struct resource *res;
struct device *dev = &pdev->dev;
- struct it87_sio_data *sio_data = dev->platform_data;
+ struct it87_sio_data *sio_data = dev_get_platdata(dev);
int err = 0, i;
int enable_pwm_interface;
int fan_beep_need_rw;
@@ -2316,7 +2316,7 @@ static int it87_check_pwm(struct device *dev)
/* Called when we have found a new IT87. */
static void it87_init_device(struct platform_device *pdev)
{
- struct it87_sio_data *sio_data = pdev->dev.platform_data;
+ struct it87_sio_data *sio_data = dev_get_platdata(&pdev->dev);
struct it87_data *data = platform_get_drvdata(pdev);
int tmp, i;
u8 mask;
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index e3b037c73a7..e633856370c 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
/*
- * k10temp.c - AMD Family 10h/11h/12h/14h/15h processor hardware monitoring
+ * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring
*
* Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
*
@@ -211,6 +211,7 @@ static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
{}
};
MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index 16e45d70215..333092ce246 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -855,8 +855,8 @@ static void lm87_init_client(struct i2c_client *client)
{
struct lm87_data *data = i2c_get_clientdata(client);
- if (client->dev.platform_data) {
- data->channel = *(u8 *)client->dev.platform_data;
+ if (dev_get_platdata(&client->dev)) {
+ data->channel = *(u8 *)dev_get_platdata(&client->dev);
lm87_write_value(client,
LM87_REG_CHANNEL_MODE, data->channel);
} else {
diff --git a/drivers/hwmon/max197.c b/drivers/hwmon/max197.c
index b5ebb9198c7..96dccaf919d 100644
--- a/drivers/hwmon/max197.c
+++ b/drivers/hwmon/max197.c
@@ -261,7 +261,7 @@ static int max197_probe(struct platform_device *pdev)
{
int ch, ret;
struct max197_data *data;
- struct max197_platform_data *pdata = pdev->dev.platform_data;
+ struct max197_platform_data *pdata = dev_get_platdata(&pdev->dev);
enum max197_chips chip = platform_get_device_id(pdev)->driver_data;
if (pdata == NULL) {
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index 3e7b4269f5b..066e587a18a 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -428,7 +428,7 @@ static int max6639_init_client(struct i2c_client *client)
{
struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_platform_data *max6639_info =
- client->dev.platform_data;
+ dev_get_platdata(&client->dev);
int i;
int rpm_range = 1; /* default: 4000 RPM */
int err;
diff --git a/drivers/hwmon/max6697.c b/drivers/hwmon/max6697.c
index 328fb0353c1..a41b5f3fc50 100644
--- a/drivers/hwmon/max6697.c
+++ b/drivers/hwmon/max6697.c
@@ -605,12 +605,12 @@ static int max6697_init_chip(struct i2c_client *client)
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client, MAX6581_REG_IDEALITY,
- pdata->ideality_mask >> 1);
+ pdata->ideality_value);
if (ret < 0)
return ret;
ret = i2c_smbus_write_byte_data(client,
MAX6581_REG_IDEALITY_SELECT,
- pdata->ideality_value);
+ pdata->ideality_mask >> 1);
if (ret < 0)
return ret;
}
diff --git a/drivers/hwmon/mcp3021.c b/drivers/hwmon/mcp3021.c
index eedb32292d6..d219c06a857 100644
--- a/drivers/hwmon/mcp3021.c
+++ b/drivers/hwmon/mcp3021.c
@@ -143,12 +143,13 @@ static int mcp3021_probe(struct i2c_client *client,
break;
}
- if (client->dev.platform_data) {
- data->vdd = *(u32 *)client->dev.platform_data;
+ if (dev_get_platdata(&client->dev)) {
+ data->vdd = *(u32 *)dev_get_platdata(&client->dev);
if (data->vdd > MCP3021_VDD_MAX || data->vdd < MCP3021_VDD_MIN)
return -EINVAL;
- } else
+ } else {
data->vdd = MCP3021_VDD_REF;
+ }
err = sysfs_create_file(&client->dev.kobj, &dev_attr_in0_input.attr);
if (err)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 99cec182542..6eb03ce2cff 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -33,9 +33,11 @@
* Supports the following chips:
*
* Chip #vin #fan #pwm #temp chip IDs man ID
+ * nct6106d 9 3 3 6+3 0xc450 0xc1 0x5ca3
* nct6775f 9 4 3 6+3 0xb470 0xc1 0x5ca3
* nct6776f 9 5 3 6+3 0xc330 0xc1 0x5ca3
* nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
+ * nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -59,13 +61,15 @@
#define USE_ALTERNATE
-enum kinds { nct6775, nct6776, nct6779 };
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791 };
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
+ "nct6106",
"nct6775",
"nct6776",
"nct6779",
+ "nct6791",
};
static unsigned short force_id;
@@ -91,9 +95,11 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_REG_ENABLE 0x30 /* Logical device enable */
#define SIO_REG_ADDR 0x60 /* Logical device address (2 bytes) */
+#define SIO_NCT6106_ID 0xc450
#define SIO_NCT6775_ID 0xb470
#define SIO_NCT6776_ID 0xc330
#define SIO_NCT6779_ID 0xc560
+#define SIO_NCT6791_ID 0xc800
#define SIO_ID_MASK 0xFFF0
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -167,7 +173,10 @@ superio_exit(int ioreg)
#define NUM_TEMP 10 /* Max number of temp attribute sets w/ limits*/
#define NUM_TEMP_FIXED 6 /* Max number of fixed temp attribute sets */
-#define NUM_REG_ALARM 4 /* Max number of alarm registers */
+#define NUM_REG_ALARM 7 /* Max number of alarm registers */
+#define NUM_REG_BEEP 5 /* Max number of beep registers */
+
+#define NUM_FAN 6
/* Common and NCT6775 specific data */
@@ -185,6 +194,7 @@ static const u16 NCT6775_REG_IN[] = {
#define NCT6775_REG_VBAT 0x5D
#define NCT6775_REG_DIODE 0x5E
+#define NCT6775_DIODE_MASK 0x02
#define NCT6775_REG_FANDIV1 0x506
#define NCT6775_REG_FANDIV2 0x507
@@ -193,7 +203,7 @@ static const u16 NCT6775_REG_IN[] = {
static const u16 NCT6775_REG_ALARM[NUM_REG_ALARM] = { 0x459, 0x45A, 0x45B };
-/* 0..15 voltages, 16..23 fans, 24..31 temperatures */
+/* 0..15 voltages, 16..23 fans, 24..29 temperatures, 30..31 intrusion */
static const s8 NCT6775_ALARM_BITS[] = {
0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
@@ -208,6 +218,23 @@ static const s8 NCT6775_ALARM_BITS[] = {
#define TEMP_ALARM_BASE 24
#define INTRUSION_ALARM_BASE 30
+static const u16 NCT6775_REG_BEEP[NUM_REG_BEEP] = { 0x56, 0x57, 0x453, 0x4e };
+
+/*
+ * 0..14 voltages, 15 global beep enable, 16..23 fans, 24..29 temperatures,
+ * 30..31 intrusion
+ */
+static const s8 NCT6775_BEEP_BITS[] = {
+ 0, 1, 2, 3, 8, 9, 10, 16, /* in0.. in7 */
+ 17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
+ 21, /* global beep enable */
+ 6, 7, 11, 28, -1, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
+ 12, -1 }; /* intrusion0, intrusion1 */
+
+#define BEEP_ENABLE_BASE 15
+
static const u8 NCT6775_REG_CR_CASEOPEN_CLR[] = { 0xe6, 0xee };
static const u8 NCT6775_CR_CASEOPEN_CLR_MASK[] = { 0x20, 0x01 };
@@ -217,27 +244,32 @@ static const u8 NCT6775_PWM_MODE_MASK[] = { 0x01, 0x02, 0x01 };
/* Advanced Fan control, some values are common for all fans */
-static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301, 0x801, 0x901 };
-static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302, 0x802, 0x902 };
+static const u16 NCT6775_REG_TARGET[] = {
+ 0x101, 0x201, 0x301, 0x801, 0x901, 0xa01 };
+static const u16 NCT6775_REG_FAN_MODE[] = {
+ 0x102, 0x202, 0x302, 0x802, 0x902, 0xa02 };
static const u16 NCT6775_REG_FAN_STEP_DOWN_TIME[] = {
- 0x103, 0x203, 0x303, 0x803, 0x903 };
+ 0x103, 0x203, 0x303, 0x803, 0x903, 0xa03 };
static const u16 NCT6775_REG_FAN_STEP_UP_TIME[] = {
- 0x104, 0x204, 0x304, 0x804, 0x904 };
+ 0x104, 0x204, 0x304, 0x804, 0x904, 0xa04 };
static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = {
- 0x105, 0x205, 0x305, 0x805, 0x905 };
-static const u16 NCT6775_REG_FAN_START_OUTPUT[]
- = { 0x106, 0x206, 0x306, 0x806, 0x906 };
+ 0x105, 0x205, 0x305, 0x805, 0x905, 0xa05 };
+static const u16 NCT6775_REG_FAN_START_OUTPUT[] = {
+ 0x106, 0x206, 0x306, 0x806, 0x906, 0xa06 };
static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a };
static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b };
static const u16 NCT6775_REG_FAN_STOP_TIME[] = {
- 0x107, 0x207, 0x307, 0x807, 0x907 };
-static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309, 0x809, 0x909 };
-static const u16 NCT6775_REG_PWM_READ[] = { 0x01, 0x03, 0x11, 0x13, 0x15 };
+ 0x107, 0x207, 0x307, 0x807, 0x907, 0xa07 };
+static const u16 NCT6775_REG_PWM[] = {
+ 0x109, 0x209, 0x309, 0x809, 0x909, 0xa09 };
+static const u16 NCT6775_REG_PWM_READ[] = {
+ 0x01, 0x03, 0x11, 0x13, 0x15, 0xa09 };
static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 };
static const u16 NCT6775_REG_FAN_MIN[] = { 0x3b, 0x3c, 0x3d };
static const u16 NCT6775_REG_FAN_PULSES[] = { 0x641, 0x642, 0x643, 0x644, 0 };
+static const u16 NCT6775_FAN_PULSE_SHIFT[] = { 0, 0, 0, 0, 0, 0 };
static const u16 NCT6775_REG_TEMP[] = {
0x27, 0x150, 0x250, 0x62b, 0x62c, 0x62d };
@@ -253,25 +285,25 @@ static const u16 NCT6775_REG_TEMP_SOURCE[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
0x621, 0x622, 0x623, 0x624, 0x625, 0x626 };
static const u16 NCT6775_REG_TEMP_SEL[] = {
- 0x100, 0x200, 0x300, 0x800, 0x900 };
+ 0x100, 0x200, 0x300, 0x800, 0x900, 0xa00 };
static const u16 NCT6775_REG_WEIGHT_TEMP_SEL[] = {
- 0x139, 0x239, 0x339, 0x839, 0x939 };
+ 0x139, 0x239, 0x339, 0x839, 0x939, 0xa39 };
static const u16 NCT6775_REG_WEIGHT_TEMP_STEP[] = {
- 0x13a, 0x23a, 0x33a, 0x83a, 0x93a };
+ 0x13a, 0x23a, 0x33a, 0x83a, 0x93a, 0xa3a };
static const u16 NCT6775_REG_WEIGHT_TEMP_STEP_TOL[] = {
- 0x13b, 0x23b, 0x33b, 0x83b, 0x93b };
+ 0x13b, 0x23b, 0x33b, 0x83b, 0x93b, 0xa3b };
static const u16 NCT6775_REG_WEIGHT_DUTY_STEP[] = {
- 0x13c, 0x23c, 0x33c, 0x83c, 0x93c };
+ 0x13c, 0x23c, 0x33c, 0x83c, 0x93c, 0xa3c };
static const u16 NCT6775_REG_WEIGHT_TEMP_BASE[] = {
- 0x13d, 0x23d, 0x33d, 0x83d, 0x93d };
+ 0x13d, 0x23d, 0x33d, 0x83d, 0x93d, 0xa3d };
static const u16 NCT6775_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 };
static const u16 NCT6775_REG_AUTO_TEMP[] = {
- 0x121, 0x221, 0x321, 0x821, 0x921 };
+ 0x121, 0x221, 0x321, 0x821, 0x921, 0xa21 };
static const u16 NCT6775_REG_AUTO_PWM[] = {
- 0x127, 0x227, 0x327, 0x827, 0x927 };
+ 0x127, 0x227, 0x327, 0x827, 0x927, 0xa27 };
#define NCT6775_AUTO_TEMP(data, nr, p) ((data)->REG_AUTO_TEMP[nr] + (p))
#define NCT6775_AUTO_PWM(data, nr, p) ((data)->REG_AUTO_PWM[nr] + (p))
@@ -279,9 +311,9 @@ static const u16 NCT6775_REG_AUTO_PWM[] = {
static const u16 NCT6775_REG_CRITICAL_ENAB[] = { 0x134, 0x234, 0x334 };
static const u16 NCT6775_REG_CRITICAL_TEMP[] = {
- 0x135, 0x235, 0x335, 0x835, 0x935 };
+ 0x135, 0x235, 0x335, 0x835, 0x935, 0xa35 };
static const u16 NCT6775_REG_CRITICAL_TEMP_TOLERANCE[] = {
- 0x138, 0x238, 0x338, 0x838, 0x938 };
+ 0x138, 0x238, 0x338, 0x838, 0x938, 0xa38 };
static const char *const nct6775_temp_label[] = {
"",
@@ -325,17 +357,28 @@ static const s8 NCT6776_ALARM_BITS[] = {
4, 5, 13, -1, -1, -1, /* temp1..temp6 */
12, 9 }; /* intrusion0, intrusion1 */
+static const u16 NCT6776_REG_BEEP[NUM_REG_BEEP] = { 0xb2, 0xb3, 0xb4, 0xb5 };
+
+static const s8 NCT6776_BEEP_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, /* in0.. in7 */
+ 8, -1, -1, -1, -1, -1, -1, /* in8..in14 */
+ 24, /* global beep enable */
+ 25, 26, 27, 28, 29, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
+ 30, 31 }; /* intrusion0, intrusion1 */
+
static const u16 NCT6776_REG_TOLERANCE_H[] = {
- 0x10c, 0x20c, 0x30c, 0x80c, 0x90c };
+ 0x10c, 0x20c, 0x30c, 0x80c, 0x90c, 0xa0c };
-static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0 };
-static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0 };
+static const u8 NCT6776_REG_PWM_MODE[] = { 0x04, 0, 0, 0, 0, 0 };
+static const u8 NCT6776_PWM_MODE_MASK[] = { 0x01, 0, 0, 0, 0, 0 };
static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642 };
static const u16 NCT6776_REG_FAN_PULSES[] = { 0x644, 0x645, 0x646, 0, 0 };
static const u16 NCT6776_REG_WEIGHT_DUTY_BASE[] = {
- 0x13e, 0x23e, 0x33e, 0x83e, 0x93e };
+ 0x13e, 0x23e, 0x33e, 0x83e, 0x93e, 0xa3e };
static const u16 NCT6776_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6775_REG_TEMP)] = {
0x18, 0x152, 0x252, 0x628, 0x629, 0x62A };
@@ -390,14 +433,25 @@ static const s8 NCT6779_ALARM_BITS[] = {
4, 5, 13, -1, -1, -1, /* temp1..temp6 */
12, 9 }; /* intrusion0, intrusion1 */
-static const u16 NCT6779_REG_FAN[] = { 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8 };
+static const s8 NCT6779_BEEP_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 6, 7, /* in0.. in7 */
+ 8, 9, 10, 11, 12, 13, 14, /* in8..in14 */
+ 24, /* global beep enable */
+ 25, 26, 27, 28, 29, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, -1, -1, -1, -1, /* temp1..temp6 */
+ 30, 31 }; /* intrusion0, intrusion1 */
+
+static const u16 NCT6779_REG_FAN[] = {
+ 0x4b0, 0x4b2, 0x4b4, 0x4b6, 0x4b8, 0x4ba };
static const u16 NCT6779_REG_FAN_PULSES[] = {
- 0x644, 0x645, 0x646, 0x647, 0x648 };
+ 0x644, 0x645, 0x646, 0x647, 0x648, 0x649 };
static const u16 NCT6779_REG_CRITICAL_PWM_ENABLE[] = {
- 0x136, 0x236, 0x336, 0x836, 0x936 };
+ 0x136, 0x236, 0x336, 0x836, 0x936, 0xa36 };
+#define NCT6779_CRITICAL_PWM_ENABLE_MASK 0x01
static const u16 NCT6779_REG_CRITICAL_PWM[] = {
- 0x137, 0x237, 0x337, 0x837, 0x937 };
+ 0x137, 0x237, 0x337, 0x837, 0x937, 0xa37 };
static const u16 NCT6779_REG_TEMP[] = { 0x27, 0x150 };
static const u16 NCT6779_REG_TEMP_CONFIG[ARRAY_SIZE(NCT6779_REG_TEMP)] = {
@@ -449,6 +503,122 @@ static const u16 NCT6779_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6779_temp_label) - 1]
static const u16 NCT6779_REG_TEMP_CRIT[ARRAY_SIZE(nct6779_temp_label) - 1]
= { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x709, 0x70a };
+/* NCT6791 specific data */
+
+#define NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE 0x28
+
+static const u16 NCT6791_REG_ALARM[NUM_REG_ALARM] = {
+ 0x459, 0x45A, 0x45B, 0x568, 0x45D };
+
+static const s8 NCT6791_ALARM_BITS[] = {
+ 0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
+ 17, 24, 25, 26, 27, 28, 29, /* in8..in14 */
+ -1, /* unused */
+ 6, 7, 11, 10, 23, 33, /* fan1..fan6 */
+ -1, -1, /* unused */
+ 4, 5, 13, -1, -1, -1, /* temp1..temp6 */
+ 12, 9 }; /* intrusion0, intrusion1 */
+
+
+/* NCT6102D/NCT6106D specific data */
+
+#define NCT6106_REG_VBAT 0x318
+#define NCT6106_REG_DIODE 0x319
+#define NCT6106_DIODE_MASK 0x01
+
+static const u16 NCT6106_REG_IN_MAX[] = {
+ 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9e, 0xa0, 0xa2 };
+static const u16 NCT6106_REG_IN_MIN[] = {
+ 0x91, 0x93, 0x95, 0x97, 0x99, 0x9b, 0x9f, 0xa1, 0xa3 };
+static const u16 NCT6106_REG_IN[] = {
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x07, 0x08, 0x09 };
+
+static const u16 NCT6106_REG_TEMP[] = { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15 };
+static const u16 NCT6106_REG_TEMP_HYST[] = {
+ 0xc3, 0xc7, 0xcb, 0xcf, 0xd3, 0xd7 };
+static const u16 NCT6106_REG_TEMP_OVER[] = {
+ 0xc2, 0xc6, 0xca, 0xce, 0xd2, 0xd6 };
+static const u16 NCT6106_REG_TEMP_CRIT_L[] = {
+ 0xc0, 0xc4, 0xc8, 0xcc, 0xd0, 0xd4 };
+static const u16 NCT6106_REG_TEMP_CRIT_H[] = {
+ 0xc1, 0xc5, 0xc9, 0xcf, 0xd1, 0xd5 };
+static const u16 NCT6106_REG_TEMP_OFFSET[] = { 0x311, 0x312, 0x313 };
+static const u16 NCT6106_REG_TEMP_CONFIG[] = {
+ 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc };
+
+static const u16 NCT6106_REG_FAN[] = { 0x20, 0x22, 0x24 };
+static const u16 NCT6106_REG_FAN_MIN[] = { 0xe0, 0xe2, 0xe4 };
+static const u16 NCT6106_REG_FAN_PULSES[] = { 0xf6, 0xf6, 0xf6, 0, 0 };
+static const u16 NCT6106_FAN_PULSE_SHIFT[] = { 0, 2, 4, 0, 0 };
+
+static const u8 NCT6106_REG_PWM_MODE[] = { 0xf3, 0xf3, 0xf3 };
+static const u8 NCT6106_PWM_MODE_MASK[] = { 0x01, 0x02, 0x04 };
+static const u16 NCT6106_REG_PWM[] = { 0x119, 0x129, 0x139 };
+static const u16 NCT6106_REG_PWM_READ[] = { 0x4a, 0x4b, 0x4c };
+static const u16 NCT6106_REG_FAN_MODE[] = { 0x113, 0x123, 0x133 };
+static const u16 NCT6106_REG_TEMP_SEL[] = { 0x110, 0x120, 0x130 };
+static const u16 NCT6106_REG_TEMP_SOURCE[] = {
+ 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5 };
+
+static const u16 NCT6106_REG_CRITICAL_TEMP[] = { 0x11a, 0x12a, 0x13a };
+static const u16 NCT6106_REG_CRITICAL_TEMP_TOLERANCE[] = {
+ 0x11b, 0x12b, 0x13b };
+
+static const u16 NCT6106_REG_CRITICAL_PWM_ENABLE[] = { 0x11c, 0x12c, 0x13c };
+#define NCT6106_CRITICAL_PWM_ENABLE_MASK 0x10
+static const u16 NCT6106_REG_CRITICAL_PWM[] = { 0x11d, 0x12d, 0x13d };
+
+static const u16 NCT6106_REG_FAN_STEP_UP_TIME[] = { 0x114, 0x124, 0x134 };
+static const u16 NCT6106_REG_FAN_STEP_DOWN_TIME[] = { 0x115, 0x125, 0x135 };
+static const u16 NCT6106_REG_FAN_STOP_OUTPUT[] = { 0x116, 0x126, 0x136 };
+static const u16 NCT6106_REG_FAN_START_OUTPUT[] = { 0x117, 0x127, 0x137 };
+static const u16 NCT6106_REG_FAN_STOP_TIME[] = { 0x118, 0x128, 0x138 };
+static const u16 NCT6106_REG_TOLERANCE_H[] = { 0x112, 0x122, 0x132 };
+
+static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
+
+static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
+static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
+static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
+
+static const u16 NCT6106_REG_AUTO_TEMP[] = { 0x160, 0x170, 0x180 };
+static const u16 NCT6106_REG_AUTO_PWM[] = { 0x164, 0x174, 0x184 };
+
+static const u16 NCT6106_REG_ALARM[NUM_REG_ALARM] = {
+ 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d };
+
+static const s8 NCT6106_ALARM_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, -1, -1, -1, -1, -1, -1, /* in8..in14 */
+ -1, /* unused */
+ 32, 33, 34, -1, -1, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
+ 48, -1 /* intrusion0, intrusion1 */
+};
+
+static const u16 NCT6106_REG_BEEP[NUM_REG_BEEP] = {
+ 0x3c0, 0x3c1, 0x3c2, 0x3c3, 0x3c4 };
+
+static const s8 NCT6106_BEEP_BITS[] = {
+ 0, 1, 2, 3, 4, 5, 7, 8, /* in0.. in7 */
+ 9, 10, 11, 12, -1, -1, -1, /* in8..in14 */
+ 32, /* global beep enable */
+ 24, 25, 26, 27, 28, /* fan1..fan5 */
+ -1, -1, -1, /* unused */
+ 16, 17, 18, 19, 20, 21, /* temp1..temp6 */
+ 34, -1 /* intrusion0, intrusion1 */
+};
+
+static const u16 NCT6106_REG_TEMP_ALTERNATE[ARRAY_SIZE(nct6776_temp_label) - 1]
+ = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x51, 0x52, 0x54 };
+
+static const u16 NCT6106_REG_TEMP_CRIT[ARRAY_SIZE(nct6776_temp_label) - 1]
+ = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x204, 0x205 };
+
static enum pwm_enable reg_to_pwm_enable(int pwm, int mode)
{
if (mode == 0 && pwm == 255)
@@ -550,13 +720,18 @@ static inline u8 in_to_reg(u32 val, u8 nr)
struct nct6775_data {
int addr; /* IO base of hw monitor block */
+ int sioreg; /* SIO register address */
enum kinds kind;
const char *name;
struct device *hwmon_dev;
+ struct attribute_group *group_in;
+ struct attribute_group *group_fan;
+ struct attribute_group *group_temp;
+ struct attribute_group *group_pwm;
- u16 reg_temp[4][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
- * 3=temp_crit
+ u16 reg_temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
+ * 3=temp_crit, 4=temp_lcrit
*/
u8 temp_src[NUM_TEMP];
u16 reg_temp_config[NUM_TEMP];
@@ -566,8 +741,10 @@ struct nct6775_data {
u16 REG_CONFIG;
u16 REG_VBAT;
u16 REG_DIODE;
+ u8 DIODE_MASK;
const s8 *ALARM_BITS;
+ const s8 *BEEP_BITS;
const u16 *REG_VIN;
const u16 *REG_IN_MINMAX[2];
@@ -577,6 +754,7 @@ struct nct6775_data {
const u16 *REG_FAN_MODE;
const u16 *REG_FAN_MIN;
const u16 *REG_FAN_PULSES;
+ const u16 *FAN_PULSE_SHIFT;
const u16 *REG_FAN_TIME[3];
const u16 *REG_TOLERANCE_H;
@@ -590,6 +768,10 @@ struct nct6775_data {
*/
const u16 *REG_PWM_READ;
+ const u16 *REG_CRITICAL_PWM_ENABLE;
+ u8 CRITICAL_PWM_ENABLE_MASK;
+ const u16 *REG_CRITICAL_PWM;
+
const u16 *REG_AUTO_TEMP;
const u16 *REG_AUTO_PWM;
@@ -604,6 +786,7 @@ struct nct6775_data {
const u16 *REG_TEMP_OFFSET;
const u16 *REG_ALARM;
+ const u16 *REG_BEEP;
unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg);
unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg);
@@ -616,26 +799,30 @@ struct nct6775_data {
u8 bank; /* current register bank */
u8 in_num; /* number of in inputs we have */
u8 in[15][3]; /* [0]=in, [1]=in_max, [2]=in_min */
- unsigned int rpm[5];
- u16 fan_min[5];
- u8 fan_pulses[5];
- u8 fan_div[5];
+ unsigned int rpm[NUM_FAN];
+ u16 fan_min[NUM_FAN];
+ u8 fan_pulses[NUM_FAN];
+ u8 fan_div[NUM_FAN];
u8 has_pwm;
u8 has_fan; /* some fan inputs can be disabled */
u8 has_fan_min; /* some fans don't have min register */
bool has_fan_div;
- u8 num_temp_alarms; /* 2 or 3 */
+ u8 num_temp_alarms; /* 2, 3, or 6 */
+ u8 num_temp_beeps; /* 2, 3, or 6 */
u8 temp_fixed_num; /* 3 or 6 */
u8 temp_type[NUM_TEMP_FIXED];
s8 temp_offset[NUM_TEMP_FIXED];
- s16 temp[4][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
- * 3=temp_crit */
+ s16 temp[5][NUM_TEMP]; /* 0=temp, 1=temp_over, 2=temp_hyst,
+ * 3=temp_crit, 4=temp_lcrit */
u64 alarms;
+ u64 beeps;
u8 pwm_num; /* number of pwm */
- u8 pwm_mode[5]; /* 1->DC variable voltage, 0->PWM variable duty cycle */
- enum pwm_enable pwm_enable[5];
+ u8 pwm_mode[NUM_FAN]; /* 1->DC variable voltage,
+ * 0->PWM variable duty cycle
+ */
+ enum pwm_enable pwm_enable[NUM_FAN];
/* 0->off
* 1->manual
* 2->thermal cruise mode (also called SmartFan I)
@@ -643,35 +830,37 @@ struct nct6775_data {
* 4->SmartFan III
* 5->enhanced variable thermal cruise (SmartFan IV)
*/
- u8 pwm[7][5]; /* [0]=pwm, [1]=pwm_start, [2]=pwm_floor,
- * [3]=pwm_max, [4]=pwm_step,
- * [5]=weight_duty_step, [6]=weight_duty_base
- */
+ u8 pwm[7][NUM_FAN]; /* [0]=pwm, [1]=pwm_start, [2]=pwm_floor,
+ * [3]=pwm_max, [4]=pwm_step,
+ * [5]=weight_duty_step, [6]=weight_duty_base
+ */
- u8 target_temp[5];
+ u8 target_temp[NUM_FAN];
u8 target_temp_mask;
- u32 target_speed[5];
- u32 target_speed_tolerance[5];
+ u32 target_speed[NUM_FAN];
+ u32 target_speed_tolerance[NUM_FAN];
u8 speed_tolerance_limit;
- u8 temp_tolerance[2][5];
+ u8 temp_tolerance[2][NUM_FAN];
u8 tolerance_mask;
- u8 fan_time[3][5]; /* 0 = stop_time, 1 = step_up, 2 = step_down */
+ u8 fan_time[3][NUM_FAN]; /* 0 = stop_time, 1 = step_up, 2 = step_down */
/* Automatic fan speed control registers */
int auto_pwm_num;
- u8 auto_pwm[5][7];
- u8 auto_temp[5][7];
- u8 pwm_temp_sel[5];
- u8 pwm_weight_temp_sel[5];
- u8 weight_temp[3][5]; /* 0->temp_step, 1->temp_step_tol,
- * 2->temp_base
- */
+ u8 auto_pwm[NUM_FAN][7];
+ u8 auto_temp[NUM_FAN][7];
+ u8 pwm_temp_sel[NUM_FAN];
+ u8 pwm_weight_temp_sel[NUM_FAN];
+ u8 weight_temp[3][NUM_FAN]; /* 0->temp_step, 1->temp_step_tol,
+ * 2->temp_base
+ */
u8 vid;
u8 vrm;
+ bool have_vid;
+
u16 have_temp;
u16 have_temp_fixed;
u16 have_in;
@@ -688,9 +877,145 @@ struct nct6775_sio_data {
enum kinds kind;
};
+struct sensor_device_template {
+ struct device_attribute dev_attr;
+ union {
+ struct {
+ u8 nr;
+ u8 index;
+ } s;
+ int index;
+ } u;
+ bool s2; /* true if both index and nr are used */
+};
+
+struct sensor_device_attr_u {
+ union {
+ struct sensor_device_attribute a1;
+ struct sensor_device_attribute_2 a2;
+ } u;
+ char name[32];
+};
+
+#define __TEMPLATE_ATTR(_template, _mode, _show, _store) { \
+ .attr = {.name = _template, .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+}
+
+#define SENSOR_DEVICE_TEMPLATE(_template, _mode, _show, _store, _index) \
+ { .dev_attr = __TEMPLATE_ATTR(_template, _mode, _show, _store), \
+ .u.index = _index, \
+ .s2 = false }
+
+#define SENSOR_DEVICE_TEMPLATE_2(_template, _mode, _show, _store, \
+ _nr, _index) \
+ { .dev_attr = __TEMPLATE_ATTR(_template, _mode, _show, _store), \
+ .u.s.index = _index, \
+ .u.s.nr = _nr, \
+ .s2 = true }
+
+#define SENSOR_TEMPLATE(_name, _template, _mode, _show, _store, _index) \
+static struct sensor_device_template sensor_dev_template_##_name \
+ = SENSOR_DEVICE_TEMPLATE(_template, _mode, _show, _store, \
+ _index)
+
+#define SENSOR_TEMPLATE_2(_name, _template, _mode, _show, _store, \
+ _nr, _index) \
+static struct sensor_device_template sensor_dev_template_##_name \
+ = SENSOR_DEVICE_TEMPLATE_2(_template, _mode, _show, _store, \
+ _nr, _index)
+
+struct sensor_template_group {
+ struct sensor_device_template **templates;
+ umode_t (*is_visible)(struct kobject *, struct attribute *, int);
+ int base;
+};
+
+static struct attribute_group *
+nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
+ int repeat)
+{
+ struct attribute_group *group;
+ struct sensor_device_attr_u *su;
+ struct sensor_device_attribute *a;
+ struct sensor_device_attribute_2 *a2;
+ struct attribute **attrs;
+ struct sensor_device_template **t;
+ int err, i, j, count;
+
+ if (repeat <= 0)
+ return ERR_PTR(-EINVAL);
+
+ t = tg->templates;
+ for (count = 0; *t; t++, count++)
+ ;
+
+ if (count == 0)
+ return ERR_PTR(-EINVAL);
+
+ group = devm_kzalloc(dev, sizeof(*group), GFP_KERNEL);
+ if (group == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ attrs = devm_kzalloc(dev, sizeof(*attrs) * (repeat * count + 1),
+ GFP_KERNEL);
+ if (attrs == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ su = devm_kzalloc(dev, sizeof(*su) * repeat * count,
+ GFP_KERNEL);
+ if (su == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ group->attrs = attrs;
+ group->is_visible = tg->is_visible;
+
+ for (i = 0; i < repeat; i++) {
+ t = tg->templates;
+ for (j = 0; *t != NULL; j++) {
+ snprintf(su->name, sizeof(su->name),
+ (*t)->dev_attr.attr.name, tg->base + i);
+ if ((*t)->s2) {
+ a2 = &su->u.a2;
+ a2->dev_attr.attr.name = su->name;
+ a2->nr = (*t)->u.s.nr + i;
+ a2->index = (*t)->u.s.index;
+ a2->dev_attr.attr.mode =
+ (*t)->dev_attr.attr.mode;
+ a2->dev_attr.show = (*t)->dev_attr.show;
+ a2->dev_attr.store = (*t)->dev_attr.store;
+ *attrs = &a2->dev_attr.attr;
+ } else {
+ a = &su->u.a1;
+ a->dev_attr.attr.name = su->name;
+ a->index = (*t)->u.index + i;
+ a->dev_attr.attr.mode =
+ (*t)->dev_attr.attr.mode;
+ a->dev_attr.show = (*t)->dev_attr.show;
+ a->dev_attr.store = (*t)->dev_attr.store;
+ *attrs = &a->dev_attr.attr;
+ }
+ attrs++;
+ su++;
+ t++;
+ }
+ }
+
+ err = sysfs_create_group(&dev->kobj, group);
+ if (err)
+ return ERR_PTR(-ENOMEM);
+
+ return group;
+}
+
static bool is_word_sized(struct nct6775_data *data, u16 reg)
{
switch (data->kind) {
+ case nct6106:
+ return reg == 0x20 || reg == 0x22 || reg == 0x24 ||
+ reg == 0xe0 || reg == 0xe2 || reg == 0xe4 ||
+ reg == 0x111 || reg == 0x121 || reg == 0x131;
case nct6775:
return (((reg & 0xff00) == 0x100 ||
(reg & 0xff00) == 0x200) &&
@@ -714,8 +1039,9 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
((reg & 0xfff0) == 0x650 && (reg & 0x000f) >= 0x06) ||
reg == 0x73 || reg == 0x75 || reg == 0x77;
case nct6779:
+ case nct6791:
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
- ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x09) ||
+ ((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
reg == 0x402 ||
reg == 0x63a || reg == 0x63c || reg == 0x63e ||
reg == 0x640 || reg == 0x642 ||
@@ -1056,15 +1382,17 @@ static void nct6775_update_pwm_limits(struct device *dev)
case nct6776:
data->auto_pwm[i][data->auto_pwm_num] = 0xff;
break;
+ case nct6106:
case nct6779:
+ case nct6791:
reg = nct6775_read_value(data,
- NCT6779_REG_CRITICAL_PWM_ENABLE[i]);
- if (reg & 1)
- data->auto_pwm[i][data->auto_pwm_num] =
- nct6775_read_value(data,
- NCT6779_REG_CRITICAL_PWM[i]);
+ data->REG_CRITICAL_PWM_ENABLE[i]);
+ if (reg & data->CRITICAL_PWM_ENABLE_MASK)
+ reg = nct6775_read_value(data,
+ data->REG_CRITICAL_PWM[i]);
else
- data->auto_pwm[i][data->auto_pwm_num] = 0xff;
+ reg = 0xff;
+ data->auto_pwm[i][data->auto_pwm_num] = reg;
break;
}
}
@@ -1110,7 +1438,8 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
data->fan_min[i] = nct6775_read_value(data,
data->REG_FAN_MIN[i]);
data->fan_pulses[i] =
- nct6775_read_value(data, data->REG_FAN_PULSES[i]);
+ (nct6775_read_value(data, data->REG_FAN_PULSES[i])
+ >> data->FAN_PULSE_SHIFT[i]) & 0x03;
nct6775_select_fan_div(dev, data, i, reg);
}
@@ -1143,6 +1472,15 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
data->alarms |= ((u64)alarm) << (i << 3);
}
+ data->beeps = 0;
+ for (i = 0; i < NUM_REG_BEEP; i++) {
+ u8 beep;
+ if (!data->REG_BEEP[i])
+ continue;
+ beep = nct6775_read_value(data, data->REG_BEEP[i]);
+ data->beeps |= ((u64)beep) << (i << 3);
+ }
+
data->last_updated = jiffies;
data->valid = true;
}
@@ -1230,224 +1568,138 @@ show_temp_alarm(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%u\n", alarm);
}
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO, show_in_reg, NULL, 0, 0);
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO, show_in_reg, NULL, 1, 0);
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO, show_in_reg, NULL, 2, 0);
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO, show_in_reg, NULL, 3, 0);
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO, show_in_reg, NULL, 4, 0);
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO, show_in_reg, NULL, 5, 0);
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO, show_in_reg, NULL, 6, 0);
-static SENSOR_DEVICE_ATTR_2(in7_input, S_IRUGO, show_in_reg, NULL, 7, 0);
-static SENSOR_DEVICE_ATTR_2(in8_input, S_IRUGO, show_in_reg, NULL, 8, 0);
-static SENSOR_DEVICE_ATTR_2(in9_input, S_IRUGO, show_in_reg, NULL, 9, 0);
-static SENSOR_DEVICE_ATTR_2(in10_input, S_IRUGO, show_in_reg, NULL, 10, 0);
-static SENSOR_DEVICE_ATTR_2(in11_input, S_IRUGO, show_in_reg, NULL, 11, 0);
-static SENSOR_DEVICE_ATTR_2(in12_input, S_IRUGO, show_in_reg, NULL, 12, 0);
-static SENSOR_DEVICE_ATTR_2(in13_input, S_IRUGO, show_in_reg, NULL, 13, 0);
-static SENSOR_DEVICE_ATTR_2(in14_input, S_IRUGO, show_in_reg, NULL, 14, 0);
-
-static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2);
-static SENSOR_DEVICE_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3);
-static SENSOR_DEVICE_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 4);
-static SENSOR_DEVICE_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 5);
-static SENSOR_DEVICE_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 6);
-static SENSOR_DEVICE_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 7);
-static SENSOR_DEVICE_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 8);
-static SENSOR_DEVICE_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 9);
-static SENSOR_DEVICE_ATTR(in10_alarm, S_IRUGO, show_alarm, NULL, 10);
-static SENSOR_DEVICE_ATTR(in11_alarm, S_IRUGO, show_alarm, NULL, 11);
-static SENSOR_DEVICE_ATTR(in12_alarm, S_IRUGO, show_alarm, NULL, 12);
-static SENSOR_DEVICE_ATTR(in13_alarm, S_IRUGO, show_alarm, NULL, 13);
-static SENSOR_DEVICE_ATTR(in14_alarm, S_IRUGO, show_alarm, NULL, 14);
-
-static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 0, 1);
-static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 1, 1);
-static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 2, 1);
-static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 3, 1);
-static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 4, 1);
-static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 5, 1);
-static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 6, 1);
-static SENSOR_DEVICE_ATTR_2(in7_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 7, 1);
-static SENSOR_DEVICE_ATTR_2(in8_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 8, 1);
-static SENSOR_DEVICE_ATTR_2(in9_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 9, 1);
-static SENSOR_DEVICE_ATTR_2(in10_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 10, 1);
-static SENSOR_DEVICE_ATTR_2(in11_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 11, 1);
-static SENSOR_DEVICE_ATTR_2(in12_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 12, 1);
-static SENSOR_DEVICE_ATTR_2(in13_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 13, 1);
-static SENSOR_DEVICE_ATTR_2(in14_min, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 14, 1);
-
-static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 0, 2);
-static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 1, 2);
-static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 2, 2);
-static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 3, 2);
-static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 4, 2);
-static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 5, 2);
-static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 6, 2);
-static SENSOR_DEVICE_ATTR_2(in7_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 7, 2);
-static SENSOR_DEVICE_ATTR_2(in8_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 8, 2);
-static SENSOR_DEVICE_ATTR_2(in9_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 9, 2);
-static SENSOR_DEVICE_ATTR_2(in10_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 10, 2);
-static SENSOR_DEVICE_ATTR_2(in11_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 11, 2);
-static SENSOR_DEVICE_ATTR_2(in12_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 12, 2);
-static SENSOR_DEVICE_ATTR_2(in13_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 13, 2);
-static SENSOR_DEVICE_ATTR_2(in14_max, S_IWUSR | S_IRUGO, show_in_reg,
- store_in_reg, 14, 2);
-
-static struct attribute *nct6775_attributes_in[15][5] = {
- {
- &sensor_dev_attr_in0_input.dev_attr.attr,
- &sensor_dev_attr_in0_min.dev_attr.attr,
- &sensor_dev_attr_in0_max.dev_attr.attr,
- &sensor_dev_attr_in0_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in1_min.dev_attr.attr,
- &sensor_dev_attr_in1_max.dev_attr.attr,
- &sensor_dev_attr_in1_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in2_min.dev_attr.attr,
- &sensor_dev_attr_in2_max.dev_attr.attr,
- &sensor_dev_attr_in2_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in3_input.dev_attr.attr,
- &sensor_dev_attr_in3_min.dev_attr.attr,
- &sensor_dev_attr_in3_max.dev_attr.attr,
- &sensor_dev_attr_in3_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in4_input.dev_attr.attr,
- &sensor_dev_attr_in4_min.dev_attr.attr,
- &sensor_dev_attr_in4_max.dev_attr.attr,
- &sensor_dev_attr_in4_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in5_input.dev_attr.attr,
- &sensor_dev_attr_in5_min.dev_attr.attr,
- &sensor_dev_attr_in5_max.dev_attr.attr,
- &sensor_dev_attr_in5_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in6_input.dev_attr.attr,
- &sensor_dev_attr_in6_min.dev_attr.attr,
- &sensor_dev_attr_in6_max.dev_attr.attr,
- &sensor_dev_attr_in6_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in7_input.dev_attr.attr,
- &sensor_dev_attr_in7_min.dev_attr.attr,
- &sensor_dev_attr_in7_max.dev_attr.attr,
- &sensor_dev_attr_in7_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in8_input.dev_attr.attr,
- &sensor_dev_attr_in8_min.dev_attr.attr,
- &sensor_dev_attr_in8_max.dev_attr.attr,
- &sensor_dev_attr_in8_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in9_input.dev_attr.attr,
- &sensor_dev_attr_in9_min.dev_attr.attr,
- &sensor_dev_attr_in9_max.dev_attr.attr,
- &sensor_dev_attr_in9_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in10_input.dev_attr.attr,
- &sensor_dev_attr_in10_min.dev_attr.attr,
- &sensor_dev_attr_in10_max.dev_attr.attr,
- &sensor_dev_attr_in10_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in11_input.dev_attr.attr,
- &sensor_dev_attr_in11_min.dev_attr.attr,
- &sensor_dev_attr_in11_max.dev_attr.attr,
- &sensor_dev_attr_in11_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in12_input.dev_attr.attr,
- &sensor_dev_attr_in12_min.dev_attr.attr,
- &sensor_dev_attr_in12_max.dev_attr.attr,
- &sensor_dev_attr_in12_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in13_input.dev_attr.attr,
- &sensor_dev_attr_in13_min.dev_attr.attr,
- &sensor_dev_attr_in13_max.dev_attr.attr,
- &sensor_dev_attr_in13_alarm.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_in14_input.dev_attr.attr,
- &sensor_dev_attr_in14_min.dev_attr.attr,
- &sensor_dev_attr_in14_max.dev_attr.attr,
- &sensor_dev_attr_in14_alarm.dev_attr.attr,
- NULL
- },
+static ssize_t
+show_beep(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct nct6775_data *data = nct6775_update_device(dev);
+ int nr = data->BEEP_BITS[sattr->index];
+
+ return sprintf(buf, "%u\n",
+ (unsigned int)((data->beeps >> nr) & 0x01));
+}
+
+static ssize_t
+store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int nr = data->BEEP_BITS[sattr->index];
+ int regindex = nr >> 3;
+ unsigned long val;
+
+ int err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+ if (val > 1)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ if (val)
+ data->beeps |= (1ULL << nr);
+ else
+ data->beeps &= ~(1ULL << nr);
+ nct6775_write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t
+show_temp_beep(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sattr = to_sensor_dev_attr(attr);
+ struct nct6775_data *data = nct6775_update_device(dev);
+ unsigned int beep = 0;
+ int nr;
+
+ /*
+ * For temperatures, there is no fixed mapping from registers to beep
+ * enable bits. Beep enable bits are determined by the temperature
+ * source mapping.
+ */
+ nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
+ if (nr >= 0) {
+ int bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
+ beep = (data->beeps >> bit) & 0x01;
+ }
+ return sprintf(buf, "%u\n", beep);
+}
+
+static ssize_t
+store_temp_beep(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int nr, bit, regindex;
+ unsigned long val;
+
+ int err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+ if (val > 1)
+ return -EINVAL;
+
+ nr = find_temp_source(data, sattr->index, data->num_temp_beeps);
+ if (nr < 0)
+ return -ENODEV;
+
+ bit = data->BEEP_BITS[nr + TEMP_ALARM_BASE];
+ regindex = bit >> 3;
+
+ mutex_lock(&data->update_lock);
+ if (val)
+ data->beeps |= (1ULL << bit);
+ else
+ data->beeps &= ~(1ULL << bit);
+ nct6775_write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static umode_t nct6775_in_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int in = index / 5; /* voltage index */
+
+ if (!(data->have_in & (1 << in)))
+ return 0;
+
+ return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(in_input, "in%d_input", S_IRUGO, show_in_reg, NULL, 0, 0);
+SENSOR_TEMPLATE(in_alarm, "in%d_alarm", S_IRUGO, show_alarm, NULL, 0);
+SENSOR_TEMPLATE(in_beep, "in%d_beep", S_IWUSR | S_IRUGO, show_beep, store_beep,
+ 0);
+SENSOR_TEMPLATE_2(in_min, "in%d_min", S_IWUSR | S_IRUGO, show_in_reg,
+ store_in_reg, 0, 1);
+SENSOR_TEMPLATE_2(in_max, "in%d_max", S_IWUSR | S_IRUGO, show_in_reg,
+ store_in_reg, 0, 2);
+
+/*
+ * nct6775_in_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_in_template[] = {
+ &sensor_dev_template_in_input,
+ &sensor_dev_template_in_alarm,
+ &sensor_dev_template_in_beep,
+ &sensor_dev_template_in_min,
+ &sensor_dev_template_in_max,
+ NULL
};
-static const struct attribute_group nct6775_group_in[15] = {
- { .attrs = nct6775_attributes_in[0] },
- { .attrs = nct6775_attributes_in[1] },
- { .attrs = nct6775_attributes_in[2] },
- { .attrs = nct6775_attributes_in[3] },
- { .attrs = nct6775_attributes_in[4] },
- { .attrs = nct6775_attributes_in[5] },
- { .attrs = nct6775_attributes_in[6] },
- { .attrs = nct6775_attributes_in[7] },
- { .attrs = nct6775_attributes_in[8] },
- { .attrs = nct6775_attributes_in[9] },
- { .attrs = nct6775_attributes_in[10] },
- { .attrs = nct6775_attributes_in[11] },
- { .attrs = nct6775_attributes_in[12] },
- { .attrs = nct6775_attributes_in[13] },
- { .attrs = nct6775_attributes_in[14] },
+static struct sensor_template_group nct6775_in_template_group = {
+ .templates = nct6775_attributes_in_template,
+ .is_visible = nct6775_in_is_visible,
};
static ssize_t
@@ -1592,6 +1844,7 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
int nr = sattr->index;
unsigned long val;
int err;
+ u8 reg;
err = kstrtoul(buf, 10, &val);
if (err < 0)
@@ -1602,60 +1855,68 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->fan_pulses[nr] = val & 3;
- nct6775_write_value(data, data->REG_FAN_PULSES[nr], val & 3);
+ reg = nct6775_read_value(data, data->REG_FAN_PULSES[nr]);
+ reg &= ~(0x03 << data->FAN_PULSE_SHIFT[nr]);
+ reg |= (val & 3) << data->FAN_PULSE_SHIFT[nr];
+ nct6775_write_value(data, data->REG_FAN_PULSES[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
-static struct sensor_device_attribute sda_fan_input[] = {
- SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0),
- SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1),
- SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2),
- SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3),
- SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4),
-};
+static umode_t nct6775_fan_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int fan = index / 6; /* fan index */
+ int nr = index % 6; /* attribute index */
-static struct sensor_device_attribute sda_fan_alarm[] = {
- SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE),
- SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 1),
- SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 2),
- SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 3),
- SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, FAN_ALARM_BASE + 4),
-};
+ if (!(data->has_fan & (1 << fan)))
+ return 0;
-static struct sensor_device_attribute sda_fan_min[] = {
- SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 0),
- SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 1),
- SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 2),
- SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 3),
- SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min,
- store_fan_min, 4),
-};
+ if (nr == 1 && data->ALARM_BITS[FAN_ALARM_BASE + fan] == -1)
+ return 0;
+ if (nr == 2 && data->BEEP_BITS[FAN_ALARM_BASE + fan] == -1)
+ return 0;
+ if (nr == 4 && !(data->has_fan_min & (1 << fan)))
+ return 0;
+ if (nr == 5 && data->kind != nct6775)
+ return 0;
+
+ return attr->mode;
+}
-static struct sensor_device_attribute sda_fan_pulses[] = {
- SENSOR_ATTR(fan1_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- store_fan_pulses, 0),
- SENSOR_ATTR(fan2_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- store_fan_pulses, 1),
- SENSOR_ATTR(fan3_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- store_fan_pulses, 2),
- SENSOR_ATTR(fan4_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- store_fan_pulses, 3),
- SENSOR_ATTR(fan5_pulses, S_IWUSR | S_IRUGO, show_fan_pulses,
- store_fan_pulses, 4),
+SENSOR_TEMPLATE(fan_input, "fan%d_input", S_IRUGO, show_fan, NULL, 0);
+SENSOR_TEMPLATE(fan_alarm, "fan%d_alarm", S_IRUGO, show_alarm, NULL,
+ FAN_ALARM_BASE);
+SENSOR_TEMPLATE(fan_beep, "fan%d_beep", S_IWUSR | S_IRUGO, show_beep,
+ store_beep, FAN_ALARM_BASE);
+SENSOR_TEMPLATE(fan_pulses, "fan%d_pulses", S_IWUSR | S_IRUGO, show_fan_pulses,
+ store_fan_pulses, 0);
+SENSOR_TEMPLATE(fan_min, "fan%d_min", S_IWUSR | S_IRUGO, show_fan_min,
+ store_fan_min, 0);
+SENSOR_TEMPLATE(fan_div, "fan%d_div", S_IRUGO, show_fan_div, NULL, 0);
+
+/*
+ * nct6775_fan_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_fan_template[] = {
+ &sensor_dev_template_fan_input,
+ &sensor_dev_template_fan_alarm, /* 1 */
+ &sensor_dev_template_fan_beep, /* 2 */
+ &sensor_dev_template_fan_pulses,
+ &sensor_dev_template_fan_min, /* 4 */
+ &sensor_dev_template_fan_div, /* 5 */
+ NULL
};
-static struct sensor_device_attribute sda_fan_div[] = {
- SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0),
- SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1),
- SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2),
- SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3),
- SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4),
+static struct sensor_template_group nct6775_fan_template_group = {
+ .templates = nct6775_attributes_fan_template,
+ .is_visible = nct6775_fan_is_visible,
+ .base = 1,
};
static ssize_t
@@ -1752,7 +2013,7 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
int nr = sattr->index;
unsigned long val;
int err;
- u8 vbat, diode, bit;
+ u8 vbat, diode, vbit, dbit;
err = kstrtoul(buf, 10, &val);
if (err < 0)
@@ -1764,16 +2025,17 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->temp_type[nr] = val;
- vbat = nct6775_read_value(data, data->REG_VBAT) & ~(0x02 << nr);
- diode = nct6775_read_value(data, data->REG_DIODE) & ~(0x02 << nr);
- bit = 0x02 << nr;
+ vbit = 0x02 << nr;
+ dbit = data->DIODE_MASK << nr;
+ vbat = nct6775_read_value(data, data->REG_VBAT) & ~vbit;
+ diode = nct6775_read_value(data, data->REG_DIODE) & ~dbit;
switch (val) {
case 1: /* CPU diode (diode, current mode) */
- vbat |= bit;
- diode |= bit;
+ vbat |= vbit;
+ diode |= dbit;
break;
case 3: /* diode, voltage mode */
- vbat |= bit;
+ vbat |= dbit;
break;
case 4: /* thermistor */
break;
@@ -1785,142 +2047,83 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
return count;
}
-static struct sensor_device_attribute_2 sda_temp_input[] = {
- SENSOR_ATTR_2(temp1_input, S_IRUGO, show_temp, NULL, 0, 0),
- SENSOR_ATTR_2(temp2_input, S_IRUGO, show_temp, NULL, 1, 0),
- SENSOR_ATTR_2(temp3_input, S_IRUGO, show_temp, NULL, 2, 0),
- SENSOR_ATTR_2(temp4_input, S_IRUGO, show_temp, NULL, 3, 0),
- SENSOR_ATTR_2(temp5_input, S_IRUGO, show_temp, NULL, 4, 0),
- SENSOR_ATTR_2(temp6_input, S_IRUGO, show_temp, NULL, 5, 0),
- SENSOR_ATTR_2(temp7_input, S_IRUGO, show_temp, NULL, 6, 0),
- SENSOR_ATTR_2(temp8_input, S_IRUGO, show_temp, NULL, 7, 0),
- SENSOR_ATTR_2(temp9_input, S_IRUGO, show_temp, NULL, 8, 0),
- SENSOR_ATTR_2(temp10_input, S_IRUGO, show_temp, NULL, 9, 0),
-};
+static umode_t nct6775_temp_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int temp = index / 10; /* temp index */
+ int nr = index % 10; /* attribute index */
-static struct sensor_device_attribute sda_temp_label[] = {
- SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0),
- SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1),
- SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2),
- SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3),
- SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4),
- SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5),
- SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6),
- SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7),
- SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8),
- SENSOR_ATTR(temp10_label, S_IRUGO, show_temp_label, NULL, 9),
-};
+ if (!(data->have_temp & (1 << temp)))
+ return 0;
-static struct sensor_device_attribute_2 sda_temp_max[] = {
- SENSOR_ATTR_2(temp1_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 0, 1),
- SENSOR_ATTR_2(temp2_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 1, 1),
- SENSOR_ATTR_2(temp3_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 2, 1),
- SENSOR_ATTR_2(temp4_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 3, 1),
- SENSOR_ATTR_2(temp5_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 4, 1),
- SENSOR_ATTR_2(temp6_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 5, 1),
- SENSOR_ATTR_2(temp7_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 6, 1),
- SENSOR_ATTR_2(temp8_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 7, 1),
- SENSOR_ATTR_2(temp9_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 8, 1),
- SENSOR_ATTR_2(temp10_max, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 9, 1),
-};
+ if (nr == 2 && find_temp_source(data, temp, data->num_temp_alarms) < 0)
+ return 0; /* alarm */
-static struct sensor_device_attribute_2 sda_temp_max_hyst[] = {
- SENSOR_ATTR_2(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 0, 2),
- SENSOR_ATTR_2(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 1, 2),
- SENSOR_ATTR_2(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 2, 2),
- SENSOR_ATTR_2(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 3, 2),
- SENSOR_ATTR_2(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 4, 2),
- SENSOR_ATTR_2(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 5, 2),
- SENSOR_ATTR_2(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 6, 2),
- SENSOR_ATTR_2(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 7, 2),
- SENSOR_ATTR_2(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 8, 2),
- SENSOR_ATTR_2(temp10_max_hyst, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 9, 2),
-};
+ if (nr == 3 && find_temp_source(data, temp, data->num_temp_beeps) < 0)
+ return 0; /* beep */
-static struct sensor_device_attribute_2 sda_temp_crit[] = {
- SENSOR_ATTR_2(temp1_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 0, 3),
- SENSOR_ATTR_2(temp2_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 1, 3),
- SENSOR_ATTR_2(temp3_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 2, 3),
- SENSOR_ATTR_2(temp4_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 3, 3),
- SENSOR_ATTR_2(temp5_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 4, 3),
- SENSOR_ATTR_2(temp6_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 5, 3),
- SENSOR_ATTR_2(temp7_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 6, 3),
- SENSOR_ATTR_2(temp8_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 7, 3),
- SENSOR_ATTR_2(temp9_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 8, 3),
- SENSOR_ATTR_2(temp10_crit, S_IRUGO | S_IWUSR, show_temp, store_temp,
- 9, 3),
-};
+ if (nr == 4 && !data->reg_temp[1][temp]) /* max */
+ return 0;
-static struct sensor_device_attribute sda_temp_offset[] = {
- SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 0),
- SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 1),
- SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 2),
- SENSOR_ATTR(temp4_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 3),
- SENSOR_ATTR(temp5_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 4),
- SENSOR_ATTR(temp6_offset, S_IRUGO | S_IWUSR, show_temp_offset,
- store_temp_offset, 5),
-};
+ if (nr == 5 && !data->reg_temp[2][temp]) /* max_hyst */
+ return 0;
+
+ if (nr == 6 && !data->reg_temp[3][temp]) /* crit */
+ return 0;
+
+ if (nr == 7 && !data->reg_temp[4][temp]) /* lcrit */
+ return 0;
+
+ /* offset and type only apply to fixed sensors */
+ if (nr > 7 && !(data->have_temp_fixed & (1 << temp)))
+ return 0;
-static struct sensor_device_attribute sda_temp_type[] = {
- SENSOR_ATTR(temp1_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 0),
- SENSOR_ATTR(temp2_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 1),
- SENSOR_ATTR(temp3_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 2),
- SENSOR_ATTR(temp4_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 3),
- SENSOR_ATTR(temp5_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 4),
- SENSOR_ATTR(temp6_type, S_IRUGO | S_IWUSR, show_temp_type,
- store_temp_type, 5),
+ return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(temp_input, "temp%d_input", S_IRUGO, show_temp, NULL, 0, 0);
+SENSOR_TEMPLATE(temp_label, "temp%d_label", S_IRUGO, show_temp_label, NULL, 0);
+SENSOR_TEMPLATE_2(temp_max, "temp%d_max", S_IRUGO | S_IWUSR, show_temp,
+ store_temp, 0, 1);
+SENSOR_TEMPLATE_2(temp_max_hyst, "temp%d_max_hyst", S_IRUGO | S_IWUSR,
+ show_temp, store_temp, 0, 2);
+SENSOR_TEMPLATE_2(temp_crit, "temp%d_crit", S_IRUGO | S_IWUSR, show_temp,
+ store_temp, 0, 3);
+SENSOR_TEMPLATE_2(temp_lcrit, "temp%d_lcrit", S_IRUGO | S_IWUSR, show_temp,
+ store_temp, 0, 4);
+SENSOR_TEMPLATE(temp_offset, "temp%d_offset", S_IRUGO | S_IWUSR,
+ show_temp_offset, store_temp_offset, 0);
+SENSOR_TEMPLATE(temp_type, "temp%d_type", S_IRUGO | S_IWUSR, show_temp_type,
+ store_temp_type, 0);
+SENSOR_TEMPLATE(temp_alarm, "temp%d_alarm", S_IRUGO, show_temp_alarm, NULL, 0);
+SENSOR_TEMPLATE(temp_beep, "temp%d_beep", S_IRUGO | S_IWUSR, show_temp_beep,
+ store_temp_beep, 0);
+
+/*
+ * nct6775_temp_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct sensor_device_template *nct6775_attributes_temp_template[] = {
+ &sensor_dev_template_temp_input,
+ &sensor_dev_template_temp_label,
+ &sensor_dev_template_temp_alarm, /* 2 */
+ &sensor_dev_template_temp_beep, /* 3 */
+ &sensor_dev_template_temp_max, /* 4 */
+ &sensor_dev_template_temp_max_hyst, /* 5 */
+ &sensor_dev_template_temp_crit, /* 6 */
+ &sensor_dev_template_temp_lcrit, /* 7 */
+ &sensor_dev_template_temp_offset, /* 8 */
+ &sensor_dev_template_temp_type, /* 9 */
+ NULL
};
-static struct sensor_device_attribute sda_temp_alarm[] = {
- SENSOR_ATTR(temp1_alarm, S_IRUGO, show_temp_alarm, NULL, 0),
- SENSOR_ATTR(temp2_alarm, S_IRUGO, show_temp_alarm, NULL, 1),
- SENSOR_ATTR(temp3_alarm, S_IRUGO, show_temp_alarm, NULL, 2),
- SENSOR_ATTR(temp4_alarm, S_IRUGO, show_temp_alarm, NULL, 3),
- SENSOR_ATTR(temp5_alarm, S_IRUGO, show_temp_alarm, NULL, 4),
- SENSOR_ATTR(temp6_alarm, S_IRUGO, show_temp_alarm, NULL, 5),
- SENSOR_ATTR(temp7_alarm, S_IRUGO, show_temp_alarm, NULL, 6),
- SENSOR_ATTR(temp8_alarm, S_IRUGO, show_temp_alarm, NULL, 7),
- SENSOR_ATTR(temp9_alarm, S_IRUGO, show_temp_alarm, NULL, 8),
- SENSOR_ATTR(temp10_alarm, S_IRUGO, show_temp_alarm, NULL, 9),
+static struct sensor_template_group nct6775_temp_template_group = {
+ .templates = nct6775_attributes_temp_template,
+ .is_visible = nct6775_temp_is_visible,
+ .base = 1,
};
static ssize_t
@@ -2422,77 +2625,19 @@ store_speed_tolerance(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR_2(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4, 0);
-
-static SENSOR_DEVICE_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 0);
-static SENSOR_DEVICE_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 1);
-static SENSOR_DEVICE_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 2);
-static SENSOR_DEVICE_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 3);
-static SENSOR_DEVICE_ATTR(pwm5_mode, S_IWUSR | S_IRUGO, show_pwm_mode,
- store_pwm_mode, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 2);
-static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 3);
-static SENSOR_DEVICE_ATTR(pwm5_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- store_pwm_enable, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_temp_sel, store_pwm_temp_sel, 0);
-static SENSOR_DEVICE_ATTR(pwm2_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_temp_sel, store_pwm_temp_sel, 1);
-static SENSOR_DEVICE_ATTR(pwm3_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_temp_sel, store_pwm_temp_sel, 2);
-static SENSOR_DEVICE_ATTR(pwm4_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_temp_sel, store_pwm_temp_sel, 3);
-static SENSOR_DEVICE_ATTR(pwm5_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_temp_sel, store_pwm_temp_sel, 4);
-
-static SENSOR_DEVICE_ATTR(pwm1_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 0);
-static SENSOR_DEVICE_ATTR(pwm2_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 1);
-static SENSOR_DEVICE_ATTR(pwm3_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 2);
-static SENSOR_DEVICE_ATTR(pwm4_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 3);
-static SENSOR_DEVICE_ATTR(pwm5_target_temp, S_IWUSR | S_IRUGO, show_target_temp,
- store_target_temp, 4);
-
-static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, show_target_speed,
- store_target_speed, 0);
-static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, show_target_speed,
- store_target_speed, 1);
-static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, show_target_speed,
- store_target_speed, 2);
-static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, show_target_speed,
- store_target_speed, 3);
-static SENSOR_DEVICE_ATTR(fan5_target, S_IWUSR | S_IRUGO, show_target_speed,
- store_target_speed, 4);
-
-static SENSOR_DEVICE_ATTR(fan1_tolerance, S_IWUSR | S_IRUGO,
- show_speed_tolerance, store_speed_tolerance, 0);
-static SENSOR_DEVICE_ATTR(fan2_tolerance, S_IWUSR | S_IRUGO,
- show_speed_tolerance, store_speed_tolerance, 1);
-static SENSOR_DEVICE_ATTR(fan3_tolerance, S_IWUSR | S_IRUGO,
- show_speed_tolerance, store_speed_tolerance, 2);
-static SENSOR_DEVICE_ATTR(fan4_tolerance, S_IWUSR | S_IRUGO,
- show_speed_tolerance, store_speed_tolerance, 3);
-static SENSOR_DEVICE_ATTR(fan5_tolerance, S_IWUSR | S_IRUGO,
- show_speed_tolerance, store_speed_tolerance, 4);
+SENSOR_TEMPLATE_2(pwm, "pwm%d", S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 0);
+SENSOR_TEMPLATE(pwm_mode, "pwm%d_mode", S_IWUSR | S_IRUGO, show_pwm_mode,
+ store_pwm_mode, 0);
+SENSOR_TEMPLATE(pwm_enable, "pwm%d_enable", S_IWUSR | S_IRUGO, show_pwm_enable,
+ store_pwm_enable, 0);
+SENSOR_TEMPLATE(pwm_temp_sel, "pwm%d_temp_sel", S_IWUSR | S_IRUGO,
+ show_pwm_temp_sel, store_pwm_temp_sel, 0);
+SENSOR_TEMPLATE(pwm_target_temp, "pwm%d_target_temp", S_IWUSR | S_IRUGO,
+ show_target_temp, store_target_temp, 0);
+SENSOR_TEMPLATE(fan_target, "fan%d_target", S_IWUSR | S_IRUGO,
+ show_target_speed, store_target_speed, 0);
+SENSOR_TEMPLATE(fan_tolerance, "fan%d_tolerance", S_IWUSR | S_IRUGO,
+ show_speed_tolerance, store_speed_tolerance, 0);
/* Smart Fan registers */
@@ -2531,79 +2676,18 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
return count;
}
-static SENSOR_DEVICE_ATTR(pwm1_weight_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
- 0);
-static SENSOR_DEVICE_ATTR(pwm2_weight_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
- 1);
-static SENSOR_DEVICE_ATTR(pwm3_weight_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
- 2);
-static SENSOR_DEVICE_ATTR(pwm4_weight_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
- 3);
-static SENSOR_DEVICE_ATTR(pwm5_weight_temp_sel, S_IWUSR | S_IRUGO,
- show_pwm_weight_temp_sel, store_pwm_weight_temp_sel,
- 4);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step_tol, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step_tol, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step_tol, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step_tol, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step_tol, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_temp_step_base, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_temp_step_base, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_temp_step_base, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_temp_step_base, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_temp_step_base, S_IWUSR | S_IRUGO,
- show_weight_temp, store_weight_temp, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_weight_duty_step, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 0, 5);
-static SENSOR_DEVICE_ATTR_2(pwm2_weight_duty_step, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 1, 5);
-static SENSOR_DEVICE_ATTR_2(pwm3_weight_duty_step, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 2, 5);
-static SENSOR_DEVICE_ATTR_2(pwm4_weight_duty_step, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 3, 5);
-static SENSOR_DEVICE_ATTR_2(pwm5_weight_duty_step, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 4, 5);
-
-/* duty_base is not supported on all chips */
-static struct sensor_device_attribute_2 sda_weight_duty_base[] = {
- SENSOR_ATTR_2(pwm1_weight_duty_base, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 0, 6),
- SENSOR_ATTR_2(pwm2_weight_duty_base, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 1, 6),
- SENSOR_ATTR_2(pwm3_weight_duty_base, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 2, 6),
- SENSOR_ATTR_2(pwm4_weight_duty_base, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 3, 6),
- SENSOR_ATTR_2(pwm5_weight_duty_base, S_IWUSR | S_IRUGO,
- show_pwm, store_pwm, 4, 6),
-};
+SENSOR_TEMPLATE(pwm_weight_temp_sel, "pwm%d_weight_temp_sel", S_IWUSR | S_IRUGO,
+ show_pwm_weight_temp_sel, store_pwm_weight_temp_sel, 0);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step, "pwm%d_weight_temp_step",
+ S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 0);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step_tol, "pwm%d_weight_temp_step_tol",
+ S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 1);
+SENSOR_TEMPLATE_2(pwm_weight_temp_step_base, "pwm%d_weight_temp_step_base",
+ S_IWUSR | S_IRUGO, show_weight_temp, store_weight_temp, 0, 2);
+SENSOR_TEMPLATE_2(pwm_weight_duty_step, "pwm%d_weight_duty_step",
+ S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 5);
+SENSOR_TEMPLATE_2(pwm_weight_duty_base, "pwm%d_weight_duty_base",
+ S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 6);
static ssize_t
show_fan_time(struct device *dev, struct device_attribute *attr, char *buf)
@@ -2651,227 +2735,6 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
-static SENSOR_DEVICE_ATTR_2(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_stop_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_step_up_time, S_IWUSR | S_IRUGO, show_fan_time,
- store_fan_time, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_step_down_time, S_IWUSR | S_IRUGO,
- show_fan_time, store_fan_time, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_step_down_time, S_IWUSR | S_IRUGO,
- show_fan_time, store_fan_time, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_step_down_time, S_IWUSR | S_IRUGO,
- show_fan_time, store_fan_time, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_step_down_time, S_IWUSR | S_IRUGO,
- show_fan_time, store_fan_time, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_step_down_time, S_IWUSR | S_IRUGO,
- show_fan_time, store_fan_time, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_start, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_start, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_start, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_start, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_start, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 4, 1);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_floor, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 0, 2);
-static SENSOR_DEVICE_ATTR_2(pwm2_floor, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 1, 2);
-static SENSOR_DEVICE_ATTR_2(pwm3_floor, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 2, 2);
-static SENSOR_DEVICE_ATTR_2(pwm4_floor, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 3, 2);
-static SENSOR_DEVICE_ATTR_2(pwm5_floor, S_IWUSR | S_IRUGO, show_pwm,
- store_pwm, 4, 2);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 0, 0);
-static SENSOR_DEVICE_ATTR_2(pwm2_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 1, 0);
-static SENSOR_DEVICE_ATTR_2(pwm3_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 2, 0);
-static SENSOR_DEVICE_ATTR_2(pwm4_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 3, 0);
-static SENSOR_DEVICE_ATTR_2(pwm5_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 4, 0);
-
-static SENSOR_DEVICE_ATTR_2(pwm1_crit_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 0, 1);
-static SENSOR_DEVICE_ATTR_2(pwm2_crit_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 1, 1);
-static SENSOR_DEVICE_ATTR_2(pwm3_crit_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 2, 1);
-static SENSOR_DEVICE_ATTR_2(pwm4_crit_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 3, 1);
-static SENSOR_DEVICE_ATTR_2(pwm5_crit_temp_tolerance, S_IWUSR | S_IRUGO,
- show_temp_tolerance, store_temp_tolerance, 4, 1);
-
-/* pwm_max is not supported on all chips */
-static struct sensor_device_attribute_2 sda_pwm_max[] = {
- SENSOR_ATTR_2(pwm1_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
- 0, 3),
- SENSOR_ATTR_2(pwm2_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
- 1, 3),
- SENSOR_ATTR_2(pwm3_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
- 2, 3),
- SENSOR_ATTR_2(pwm4_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
- 3, 3),
- SENSOR_ATTR_2(pwm5_max, S_IWUSR | S_IRUGO, show_pwm, store_pwm,
- 4, 3),
-};
-
-/* pwm_step is not supported on all chips */
-static struct sensor_device_attribute_2 sda_pwm_step[] = {
- SENSOR_ATTR_2(pwm1_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0, 4),
- SENSOR_ATTR_2(pwm2_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1, 4),
- SENSOR_ATTR_2(pwm3_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2, 4),
- SENSOR_ATTR_2(pwm4_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3, 4),
- SENSOR_ATTR_2(pwm5_step, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 4, 4),
-};
-
-static struct attribute *nct6775_attributes_pwm[5][20] = {
- {
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm1_mode.dev_attr.attr,
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm1_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm1_crit_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm1_target_temp.dev_attr.attr,
- &sensor_dev_attr_fan1_target.dev_attr.attr,
- &sensor_dev_attr_fan1_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm1_stop_time.dev_attr.attr,
- &sensor_dev_attr_pwm1_step_up_time.dev_attr.attr,
- &sensor_dev_attr_pwm1_step_down_time.dev_attr.attr,
- &sensor_dev_attr_pwm1_start.dev_attr.attr,
- &sensor_dev_attr_pwm1_floor.dev_attr.attr,
- &sensor_dev_attr_pwm1_weight_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm1_weight_temp_step.dev_attr.attr,
- &sensor_dev_attr_pwm1_weight_temp_step_tol.dev_attr.attr,
- &sensor_dev_attr_pwm1_weight_temp_step_base.dev_attr.attr,
- &sensor_dev_attr_pwm1_weight_duty_step.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_pwm2_mode.dev_attr.attr,
- &sensor_dev_attr_pwm2_enable.dev_attr.attr,
- &sensor_dev_attr_pwm2_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm2_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm2_crit_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm2_target_temp.dev_attr.attr,
- &sensor_dev_attr_fan2_target.dev_attr.attr,
- &sensor_dev_attr_fan2_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm2_stop_time.dev_attr.attr,
- &sensor_dev_attr_pwm2_step_up_time.dev_attr.attr,
- &sensor_dev_attr_pwm2_step_down_time.dev_attr.attr,
- &sensor_dev_attr_pwm2_start.dev_attr.attr,
- &sensor_dev_attr_pwm2_floor.dev_attr.attr,
- &sensor_dev_attr_pwm2_weight_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm2_weight_temp_step.dev_attr.attr,
- &sensor_dev_attr_pwm2_weight_temp_step_tol.dev_attr.attr,
- &sensor_dev_attr_pwm2_weight_temp_step_base.dev_attr.attr,
- &sensor_dev_attr_pwm2_weight_duty_step.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_pwm3.dev_attr.attr,
- &sensor_dev_attr_pwm3_mode.dev_attr.attr,
- &sensor_dev_attr_pwm3_enable.dev_attr.attr,
- &sensor_dev_attr_pwm3_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm3_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm3_crit_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm3_target_temp.dev_attr.attr,
- &sensor_dev_attr_fan3_target.dev_attr.attr,
- &sensor_dev_attr_fan3_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm3_stop_time.dev_attr.attr,
- &sensor_dev_attr_pwm3_step_up_time.dev_attr.attr,
- &sensor_dev_attr_pwm3_step_down_time.dev_attr.attr,
- &sensor_dev_attr_pwm3_start.dev_attr.attr,
- &sensor_dev_attr_pwm3_floor.dev_attr.attr,
- &sensor_dev_attr_pwm3_weight_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm3_weight_temp_step.dev_attr.attr,
- &sensor_dev_attr_pwm3_weight_temp_step_tol.dev_attr.attr,
- &sensor_dev_attr_pwm3_weight_temp_step_base.dev_attr.attr,
- &sensor_dev_attr_pwm3_weight_duty_step.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_pwm4.dev_attr.attr,
- &sensor_dev_attr_pwm4_mode.dev_attr.attr,
- &sensor_dev_attr_pwm4_enable.dev_attr.attr,
- &sensor_dev_attr_pwm4_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm4_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm4_crit_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm4_target_temp.dev_attr.attr,
- &sensor_dev_attr_fan4_target.dev_attr.attr,
- &sensor_dev_attr_fan4_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm4_stop_time.dev_attr.attr,
- &sensor_dev_attr_pwm4_step_up_time.dev_attr.attr,
- &sensor_dev_attr_pwm4_step_down_time.dev_attr.attr,
- &sensor_dev_attr_pwm4_start.dev_attr.attr,
- &sensor_dev_attr_pwm4_floor.dev_attr.attr,
- &sensor_dev_attr_pwm4_weight_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm4_weight_temp_step.dev_attr.attr,
- &sensor_dev_attr_pwm4_weight_temp_step_tol.dev_attr.attr,
- &sensor_dev_attr_pwm4_weight_temp_step_base.dev_attr.attr,
- &sensor_dev_attr_pwm4_weight_duty_step.dev_attr.attr,
- NULL
- },
- {
- &sensor_dev_attr_pwm5.dev_attr.attr,
- &sensor_dev_attr_pwm5_mode.dev_attr.attr,
- &sensor_dev_attr_pwm5_enable.dev_attr.attr,
- &sensor_dev_attr_pwm5_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm5_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm5_crit_temp_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm5_target_temp.dev_attr.attr,
- &sensor_dev_attr_fan5_target.dev_attr.attr,
- &sensor_dev_attr_fan5_tolerance.dev_attr.attr,
- &sensor_dev_attr_pwm5_stop_time.dev_attr.attr,
- &sensor_dev_attr_pwm5_step_up_time.dev_attr.attr,
- &sensor_dev_attr_pwm5_step_down_time.dev_attr.attr,
- &sensor_dev_attr_pwm5_start.dev_attr.attr,
- &sensor_dev_attr_pwm5_floor.dev_attr.attr,
- &sensor_dev_attr_pwm5_weight_temp_sel.dev_attr.attr,
- &sensor_dev_attr_pwm5_weight_temp_step.dev_attr.attr,
- &sensor_dev_attr_pwm5_weight_temp_step_tol.dev_attr.attr,
- &sensor_dev_attr_pwm5_weight_temp_step_base.dev_attr.attr,
- &sensor_dev_attr_pwm5_weight_duty_step.dev_attr.attr,
- NULL
- },
-};
-
-static const struct attribute_group nct6775_group_pwm[5] = {
- { .attrs = nct6775_attributes_pwm[0] },
- { .attrs = nct6775_attributes_pwm[1] },
- { .attrs = nct6775_attributes_pwm[2] },
- { .attrs = nct6775_attributes_pwm[3] },
- { .attrs = nct6775_attributes_pwm[4] },
-};
-
static ssize_t
show_auto_pwm(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -2927,17 +2790,19 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
break;
case nct6776:
break; /* always enabled, nothing to do */
+ case nct6106:
case nct6779:
- nct6775_write_value(data, NCT6779_REG_CRITICAL_PWM[nr],
+ case nct6791:
+ nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
val);
reg = nct6775_read_value(data,
- NCT6779_REG_CRITICAL_PWM_ENABLE[nr]);
+ data->REG_CRITICAL_PWM_ENABLE[nr]);
if (val == 255)
- reg &= ~0x01;
+ reg &= ~data->CRITICAL_PWM_ENABLE_MASK;
else
- reg |= 0x01;
+ reg |= data->CRITICAL_PWM_ENABLE_MASK;
nct6775_write_value(data,
- NCT6779_REG_CRITICAL_PWM_ENABLE[nr],
+ data->REG_CRITICAL_PWM_ENABLE[nr],
reg);
break;
}
@@ -2992,155 +2857,140 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
return count;
}
+static umode_t nct6775_pwm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct nct6775_data *data = dev_get_drvdata(dev);
+ int pwm = index / 36; /* pwm index */
+ int nr = index % 36; /* attribute index */
+
+ if (!(data->has_pwm & (1 << pwm)))
+ return 0;
+
+ if (nr == 19 && data->REG_PWM[3] == NULL) /* pwm_max */
+ return 0;
+ if (nr == 20 && data->REG_PWM[4] == NULL) /* pwm_step */
+ return 0;
+ if (nr == 21 && data->REG_PWM[6] == NULL) /* weight_duty_base */
+ return 0;
+
+ if (nr >= 22 && nr <= 35) { /* auto point */
+ int api = (nr - 22) / 2; /* auto point index */
+
+ if (api > data->auto_pwm_num)
+ return 0;
+ }
+ return attr->mode;
+}
+
+SENSOR_TEMPLATE_2(pwm_stop_time, "pwm%d_stop_time", S_IWUSR | S_IRUGO,
+ show_fan_time, store_fan_time, 0, 0);
+SENSOR_TEMPLATE_2(pwm_step_up_time, "pwm%d_step_up_time", S_IWUSR | S_IRUGO,
+ show_fan_time, store_fan_time, 0, 1);
+SENSOR_TEMPLATE_2(pwm_step_down_time, "pwm%d_step_down_time", S_IWUSR | S_IRUGO,
+ show_fan_time, store_fan_time, 0, 2);
+SENSOR_TEMPLATE_2(pwm_start, "pwm%d_start", S_IWUSR | S_IRUGO, show_pwm,
+ store_pwm, 0, 1);
+SENSOR_TEMPLATE_2(pwm_floor, "pwm%d_floor", S_IWUSR | S_IRUGO, show_pwm,
+ store_pwm, 0, 2);
+SENSOR_TEMPLATE_2(pwm_temp_tolerance, "pwm%d_temp_tolerance", S_IWUSR | S_IRUGO,
+ show_temp_tolerance, store_temp_tolerance, 0, 0);
+SENSOR_TEMPLATE_2(pwm_crit_temp_tolerance, "pwm%d_crit_temp_tolerance",
+ S_IWUSR | S_IRUGO, show_temp_tolerance, store_temp_tolerance,
+ 0, 1);
+
+SENSOR_TEMPLATE_2(pwm_max, "pwm%d_max", S_IWUSR | S_IRUGO, show_pwm, store_pwm,
+ 0, 3);
+
+SENSOR_TEMPLATE_2(pwm_step, "pwm%d_step", S_IWUSR | S_IRUGO, show_pwm,
+ store_pwm, 0, 4);
+
+SENSOR_TEMPLATE_2(pwm_auto_point1_pwm, "pwm%d_auto_point1_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 0);
+SENSOR_TEMPLATE_2(pwm_auto_point1_temp, "pwm%d_auto_point1_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 0);
+
+SENSOR_TEMPLATE_2(pwm_auto_point2_pwm, "pwm%d_auto_point2_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 1);
+SENSOR_TEMPLATE_2(pwm_auto_point2_temp, "pwm%d_auto_point2_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 1);
+
+SENSOR_TEMPLATE_2(pwm_auto_point3_pwm, "pwm%d_auto_point3_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 2);
+SENSOR_TEMPLATE_2(pwm_auto_point3_temp, "pwm%d_auto_point3_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 2);
+
+SENSOR_TEMPLATE_2(pwm_auto_point4_pwm, "pwm%d_auto_point4_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 3);
+SENSOR_TEMPLATE_2(pwm_auto_point4_temp, "pwm%d_auto_point4_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 3);
+
+SENSOR_TEMPLATE_2(pwm_auto_point5_pwm, "pwm%d_auto_point5_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 4);
+SENSOR_TEMPLATE_2(pwm_auto_point5_temp, "pwm%d_auto_point5_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 4);
+
+SENSOR_TEMPLATE_2(pwm_auto_point6_pwm, "pwm%d_auto_point6_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 5);
+SENSOR_TEMPLATE_2(pwm_auto_point6_temp, "pwm%d_auto_point6_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 5);
+
+SENSOR_TEMPLATE_2(pwm_auto_point7_pwm, "pwm%d_auto_point7_pwm",
+ S_IWUSR | S_IRUGO, show_auto_pwm, store_auto_pwm, 0, 6);
+SENSOR_TEMPLATE_2(pwm_auto_point7_temp, "pwm%d_auto_point7_temp",
+ S_IWUSR | S_IRUGO, show_auto_temp, store_auto_temp, 0, 6);
+
/*
- * The number of auto-point trip points is chip dependent.
- * Need to check support while generating/removing attribute files.
+ * nct6775_pwm_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
*/
-static struct sensor_device_attribute_2 sda_auto_pwm_arrays[] = {
- SENSOR_ATTR_2(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 0),
- SENSOR_ATTR_2(pwm1_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 0),
- SENSOR_ATTR_2(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 1),
- SENSOR_ATTR_2(pwm1_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 1),
- SENSOR_ATTR_2(pwm1_auto_point3_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 2),
- SENSOR_ATTR_2(pwm1_auto_point3_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 2),
- SENSOR_ATTR_2(pwm1_auto_point4_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 3),
- SENSOR_ATTR_2(pwm1_auto_point4_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 3),
- SENSOR_ATTR_2(pwm1_auto_point5_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 4),
- SENSOR_ATTR_2(pwm1_auto_point5_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 4),
- SENSOR_ATTR_2(pwm1_auto_point6_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 5),
- SENSOR_ATTR_2(pwm1_auto_point6_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 5),
- SENSOR_ATTR_2(pwm1_auto_point7_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 0, 6),
- SENSOR_ATTR_2(pwm1_auto_point7_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 0, 6),
-
- SENSOR_ATTR_2(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 0),
- SENSOR_ATTR_2(pwm2_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 0),
- SENSOR_ATTR_2(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 1),
- SENSOR_ATTR_2(pwm2_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 1),
- SENSOR_ATTR_2(pwm2_auto_point3_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 2),
- SENSOR_ATTR_2(pwm2_auto_point3_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 2),
- SENSOR_ATTR_2(pwm2_auto_point4_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 3),
- SENSOR_ATTR_2(pwm2_auto_point4_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 3),
- SENSOR_ATTR_2(pwm2_auto_point5_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 4),
- SENSOR_ATTR_2(pwm2_auto_point5_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 4),
- SENSOR_ATTR_2(pwm2_auto_point6_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 5),
- SENSOR_ATTR_2(pwm2_auto_point6_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 5),
- SENSOR_ATTR_2(pwm2_auto_point7_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 1, 6),
- SENSOR_ATTR_2(pwm2_auto_point7_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 1, 6),
-
- SENSOR_ATTR_2(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 0),
- SENSOR_ATTR_2(pwm3_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 0),
- SENSOR_ATTR_2(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 1),
- SENSOR_ATTR_2(pwm3_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 1),
- SENSOR_ATTR_2(pwm3_auto_point3_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 2),
- SENSOR_ATTR_2(pwm3_auto_point3_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 2),
- SENSOR_ATTR_2(pwm3_auto_point4_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 3),
- SENSOR_ATTR_2(pwm3_auto_point4_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 3),
- SENSOR_ATTR_2(pwm3_auto_point5_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 4),
- SENSOR_ATTR_2(pwm3_auto_point5_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 4),
- SENSOR_ATTR_2(pwm3_auto_point6_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 5),
- SENSOR_ATTR_2(pwm3_auto_point6_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 5),
- SENSOR_ATTR_2(pwm3_auto_point7_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 2, 6),
- SENSOR_ATTR_2(pwm3_auto_point7_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 2, 6),
-
- SENSOR_ATTR_2(pwm4_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 0),
- SENSOR_ATTR_2(pwm4_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 0),
- SENSOR_ATTR_2(pwm4_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 1),
- SENSOR_ATTR_2(pwm4_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 1),
- SENSOR_ATTR_2(pwm4_auto_point3_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 2),
- SENSOR_ATTR_2(pwm4_auto_point3_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 2),
- SENSOR_ATTR_2(pwm4_auto_point4_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 3),
- SENSOR_ATTR_2(pwm4_auto_point4_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 3),
- SENSOR_ATTR_2(pwm4_auto_point5_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 4),
- SENSOR_ATTR_2(pwm4_auto_point5_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 4),
- SENSOR_ATTR_2(pwm4_auto_point6_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 5),
- SENSOR_ATTR_2(pwm4_auto_point6_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 5),
- SENSOR_ATTR_2(pwm4_auto_point7_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 3, 6),
- SENSOR_ATTR_2(pwm4_auto_point7_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 3, 6),
-
- SENSOR_ATTR_2(pwm5_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 0),
- SENSOR_ATTR_2(pwm5_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 0),
- SENSOR_ATTR_2(pwm5_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 1),
- SENSOR_ATTR_2(pwm5_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 1),
- SENSOR_ATTR_2(pwm5_auto_point3_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 2),
- SENSOR_ATTR_2(pwm5_auto_point3_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 2),
- SENSOR_ATTR_2(pwm5_auto_point4_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 3),
- SENSOR_ATTR_2(pwm5_auto_point4_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 3),
- SENSOR_ATTR_2(pwm5_auto_point5_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 4),
- SENSOR_ATTR_2(pwm5_auto_point5_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 4),
- SENSOR_ATTR_2(pwm5_auto_point6_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 5),
- SENSOR_ATTR_2(pwm5_auto_point6_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 5),
- SENSOR_ATTR_2(pwm5_auto_point7_pwm, S_IWUSR | S_IRUGO,
- show_auto_pwm, store_auto_pwm, 4, 6),
- SENSOR_ATTR_2(pwm5_auto_point7_temp, S_IWUSR | S_IRUGO,
- show_auto_temp, store_auto_temp, 4, 6),
+static struct sensor_device_template *nct6775_attributes_pwm_template[] = {
+ &sensor_dev_template_pwm,
+ &sensor_dev_template_pwm_mode,
+ &sensor_dev_template_pwm_enable,
+ &sensor_dev_template_pwm_temp_sel,
+ &sensor_dev_template_pwm_temp_tolerance,
+ &sensor_dev_template_pwm_crit_temp_tolerance,
+ &sensor_dev_template_pwm_target_temp,
+ &sensor_dev_template_fan_target,
+ &sensor_dev_template_fan_tolerance,
+ &sensor_dev_template_pwm_stop_time,
+ &sensor_dev_template_pwm_step_up_time,
+ &sensor_dev_template_pwm_step_down_time,
+ &sensor_dev_template_pwm_start,
+ &sensor_dev_template_pwm_floor,
+ &sensor_dev_template_pwm_weight_temp_sel,
+ &sensor_dev_template_pwm_weight_temp_step,
+ &sensor_dev_template_pwm_weight_temp_step_tol,
+ &sensor_dev_template_pwm_weight_temp_step_base,
+ &sensor_dev_template_pwm_weight_duty_step,
+ &sensor_dev_template_pwm_max, /* 19 */
+ &sensor_dev_template_pwm_step, /* 20 */
+ &sensor_dev_template_pwm_weight_duty_base, /* 21 */
+ &sensor_dev_template_pwm_auto_point1_pwm, /* 22 */
+ &sensor_dev_template_pwm_auto_point1_temp,
+ &sensor_dev_template_pwm_auto_point2_pwm,
+ &sensor_dev_template_pwm_auto_point2_temp,
+ &sensor_dev_template_pwm_auto_point3_pwm,
+ &sensor_dev_template_pwm_auto_point3_temp,
+ &sensor_dev_template_pwm_auto_point4_pwm,
+ &sensor_dev_template_pwm_auto_point4_temp,
+ &sensor_dev_template_pwm_auto_point5_pwm,
+ &sensor_dev_template_pwm_auto_point5_temp,
+ &sensor_dev_template_pwm_auto_point6_pwm,
+ &sensor_dev_template_pwm_auto_point6_temp,
+ &sensor_dev_template_pwm_auto_point7_pwm,
+ &sensor_dev_template_pwm_auto_point7_temp, /* 35 */
+
+ NULL
+};
+
+static struct sensor_template_group nct6775_pwm_template_group = {
+ .templates = nct6775_attributes_pwm_template,
+ .is_visible = nct6775_pwm_is_visible,
+ .base = 1,
};
static ssize_t
@@ -3159,7 +3009,6 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- struct nct6775_sio_data *sio_data = dev->platform_data;
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3175,19 +3024,19 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
* The CR registers are the same for all chips, and not all chips
* support clearing the caseopen status through "regular" registers.
*/
- ret = superio_enter(sio_data->sioreg);
+ ret = superio_enter(data->sioreg);
if (ret) {
count = ret;
goto error;
}
- superio_select(sio_data->sioreg, NCT6775_LD_ACPI);
- reg = superio_inb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
+ superio_select(data->sioreg, NCT6775_LD_ACPI);
+ reg = superio_inb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
reg |= NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
reg &= ~NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(sio_data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
- superio_exit(sio_data->sioreg);
+ superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ superio_exit(data->sioreg);
data->valid = false; /* Force cache refresh */
error:
@@ -3195,71 +3044,79 @@ error:
return count;
}
-static struct sensor_device_attribute sda_caseopen[] = {
- SENSOR_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm,
- clear_caseopen, INTRUSION_ALARM_BASE),
- SENSOR_ATTR(intrusion1_alarm, S_IWUSR | S_IRUGO, show_alarm,
- clear_caseopen, INTRUSION_ALARM_BASE + 1),
-};
-
-/*
- * Driver and device management
- */
-
-static void nct6775_device_remove_files(struct device *dev)
+static SENSOR_DEVICE_ATTR(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm,
+ clear_caseopen, INTRUSION_ALARM_BASE);
+static SENSOR_DEVICE_ATTR(intrusion1_alarm, S_IWUSR | S_IRUGO, show_alarm,
+ clear_caseopen, INTRUSION_ALARM_BASE + 1);
+static SENSOR_DEVICE_ATTR(intrusion0_beep, S_IWUSR | S_IRUGO, show_beep,
+ store_beep, INTRUSION_ALARM_BASE);
+static SENSOR_DEVICE_ATTR(intrusion1_beep, S_IWUSR | S_IRUGO, show_beep,
+ store_beep, INTRUSION_ALARM_BASE + 1);
+static SENSOR_DEVICE_ATTR(beep_enable, S_IWUSR | S_IRUGO, show_beep,
+ store_beep, BEEP_ENABLE_BASE);
+
+static umode_t nct6775_other_is_visible(struct kobject *kobj,
+ struct attribute *attr, int index)
{
- /*
- * some entries in the following arrays may not have been used in
- * device_create_file(), but device_remove_file() will ignore them
- */
- int i;
+ struct device *dev = container_of(kobj, struct device, kobj);
struct nct6775_data *data = dev_get_drvdata(dev);
- for (i = 0; i < data->pwm_num; i++)
- sysfs_remove_group(&dev->kobj, &nct6775_group_pwm[i]);
+ if (index == 1 && !data->have_vid)
+ return 0;
- for (i = 0; i < ARRAY_SIZE(sda_pwm_max); i++)
- device_remove_file(dev, &sda_pwm_max[i].dev_attr);
+ if (index == 2 || index == 3) {
+ if (data->ALARM_BITS[INTRUSION_ALARM_BASE + index - 2] < 0)
+ return 0;
+ }
+
+ if (index == 4 || index == 5) {
+ if (data->BEEP_BITS[INTRUSION_ALARM_BASE + index - 4] < 0)
+ return 0;
+ }
- for (i = 0; i < ARRAY_SIZE(sda_pwm_step); i++)
- device_remove_file(dev, &sda_pwm_step[i].dev_attr);
+ return attr->mode;
+}
- for (i = 0; i < ARRAY_SIZE(sda_weight_duty_base); i++)
- device_remove_file(dev, &sda_weight_duty_base[i].dev_attr);
+/*
+ * nct6775_other_is_visible uses the index into the following array
+ * to determine if attributes should be created or not.
+ * Any change in order or content must be matched.
+ */
+static struct attribute *nct6775_attributes_other[] = {
+ &dev_attr_name.attr,
+ &dev_attr_cpu0_vid.attr, /* 1 */
+ &sensor_dev_attr_intrusion0_alarm.dev_attr.attr, /* 2 */
+ &sensor_dev_attr_intrusion1_alarm.dev_attr.attr, /* 3 */
+ &sensor_dev_attr_intrusion0_beep.dev_attr.attr, /* 4 */
+ &sensor_dev_attr_intrusion1_beep.dev_attr.attr, /* 5 */
+ &sensor_dev_attr_beep_enable.dev_attr.attr, /* 6 */
+
+ NULL
+};
- for (i = 0; i < ARRAY_SIZE(sda_auto_pwm_arrays); i++)
- device_remove_file(dev, &sda_auto_pwm_arrays[i].dev_attr);
+static const struct attribute_group nct6775_group_other = {
+ .attrs = nct6775_attributes_other,
+ .is_visible = nct6775_other_is_visible,
+};
- for (i = 0; i < data->in_num; i++)
- sysfs_remove_group(&dev->kobj, &nct6775_group_in[i]);
+/*
+ * Driver and device management
+ */
- for (i = 0; i < 5; i++) {
- device_remove_file(dev, &sda_fan_input[i].dev_attr);
- device_remove_file(dev, &sda_fan_alarm[i].dev_attr);
- device_remove_file(dev, &sda_fan_div[i].dev_attr);
- device_remove_file(dev, &sda_fan_min[i].dev_attr);
- device_remove_file(dev, &sda_fan_pulses[i].dev_attr);
- }
- for (i = 0; i < NUM_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_input[i].dev_attr);
- device_remove_file(dev, &sda_temp_label[i].dev_attr);
- device_remove_file(dev, &sda_temp_max[i].dev_attr);
- device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr);
- device_remove_file(dev, &sda_temp_crit[i].dev_attr);
- device_remove_file(dev, &sda_temp_alarm[i].dev_attr);
- if (!(data->have_temp_fixed & (1 << i)))
- continue;
- device_remove_file(dev, &sda_temp_type[i].dev_attr);
- device_remove_file(dev, &sda_temp_offset[i].dev_attr);
- }
+static void nct6775_device_remove_files(struct device *dev)
+{
+ struct nct6775_data *data = dev_get_drvdata(dev);
- device_remove_file(dev, &sda_caseopen[0].dev_attr);
- device_remove_file(dev, &sda_caseopen[1].dev_attr);
+ if (data->group_pwm)
+ sysfs_remove_group(&dev->kobj, data->group_pwm);
+ if (data->group_in)
+ sysfs_remove_group(&dev->kobj, data->group_in);
+ if (data->group_fan)
+ sysfs_remove_group(&dev->kobj, data->group_fan);
+ if (data->group_temp)
+ sysfs_remove_group(&dev->kobj, data->group_temp);
- device_remove_file(dev, &dev_attr_name);
- device_remove_file(dev, &dev_attr_cpu0_vid);
+ sysfs_remove_group(&dev->kobj, &nct6775_group_other);
}
/* Get the monitoring functions started */
@@ -3297,68 +3154,78 @@ static inline void nct6775_init_device(struct nct6775_data *data)
for (i = 0; i < data->temp_fixed_num; i++) {
if (!(data->have_temp_fixed & (1 << i)))
continue;
- if ((tmp & (0x02 << i))) /* diode */
- data->temp_type[i] = 3 - ((diode >> i) & 0x02);
+ if ((tmp & (data->DIODE_MASK << i))) /* diode */
+ data->temp_type[i]
+ = 3 - ((diode >> i) & data->DIODE_MASK);
else /* thermistor */
data->temp_type[i] = 4;
}
}
-static int
-nct6775_check_fan_inputs(const struct nct6775_sio_data *sio_data,
- struct nct6775_data *data)
+static void
+nct6775_check_fan_inputs(struct nct6775_data *data)
{
+ bool fan3pin, fan4pin, fan4min, fan5pin, fan6pin;
+ bool pwm3pin, pwm4pin, pwm5pin, pwm6pin;
+ int sioreg = data->sioreg;
int regval;
- bool fan3pin, fan3min, fan4pin, fan4min, fan5pin;
- bool pwm3pin, pwm4pin, pwm5pin;
- int ret;
-
- ret = superio_enter(sio_data->sioreg);
- if (ret)
- return ret;
/* fan4 and fan5 share some pins with the GPIO and serial flash */
if (data->kind == nct6775) {
- regval = superio_inb(sio_data->sioreg, 0x2c);
+ regval = superio_inb(sioreg, 0x2c);
fan3pin = regval & (1 << 6);
- fan3min = fan3pin;
pwm3pin = regval & (1 << 7);
/* On NCT6775, fan4 shares pins with the fdc interface */
- fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80);
- fan4min = 0;
- fan5pin = 0;
- pwm4pin = 0;
- pwm5pin = 0;
+ fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
+ fan4min = false;
+ fan5pin = false;
+ fan6pin = false;
+ pwm4pin = false;
+ pwm5pin = false;
+ pwm6pin = false;
} else if (data->kind == nct6776) {
- bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
+ bool gpok = superio_inb(sioreg, 0x27) & 0x80;
- superio_select(sio_data->sioreg, NCT6775_LD_HWM);
- regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
+ superio_select(sioreg, NCT6775_LD_HWM);
+ regval = superio_inb(sioreg, SIO_REG_ENABLE);
if (regval & 0x80)
fan3pin = gpok;
else
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+ fan3pin = !(superio_inb(sioreg, 0x24) & 0x40);
if (regval & 0x40)
fan4pin = gpok;
else
- fan4pin = superio_inb(sio_data->sioreg, 0x1C) & 0x01;
+ fan4pin = superio_inb(sioreg, 0x1C) & 0x01;
if (regval & 0x20)
fan5pin = gpok;
else
- fan5pin = superio_inb(sio_data->sioreg, 0x1C) & 0x02;
+ fan5pin = superio_inb(sioreg, 0x1C) & 0x02;
fan4min = fan4pin;
- fan3min = fan3pin;
+ fan6pin = false;
pwm3pin = fan3pin;
- pwm4pin = 0;
- pwm5pin = 0;
- } else { /* NCT6779D */
- regval = superio_inb(sio_data->sioreg, 0x1c);
+ pwm4pin = false;
+ pwm5pin = false;
+ pwm6pin = false;
+ } else if (data->kind == nct6106) {
+ regval = superio_inb(sioreg, 0x24);
+ fan3pin = !(regval & 0x80);
+ pwm3pin = regval & 0x08;
+
+ fan4pin = false;
+ fan4min = false;
+ fan5pin = false;
+ fan6pin = false;
+ pwm4pin = false;
+ pwm5pin = false;
+ pwm6pin = false;
+ } else { /* NCT6779D or NCT6791D */
+ regval = superio_inb(sioreg, 0x1c);
fan3pin = !(regval & (1 << 5));
fan4pin = !(regval & (1 << 6));
@@ -3368,22 +3235,25 @@ nct6775_check_fan_inputs(const struct nct6775_sio_data *sio_data,
pwm4pin = !(regval & (1 << 1));
pwm5pin = !(regval & (1 << 2));
- fan3min = fan3pin;
fan4min = fan4pin;
- }
-
- superio_exit(sio_data->sioreg);
- data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */
- data->has_fan |= fan3pin << 2;
- data->has_fan_min |= fan3min << 2;
-
- data->has_fan |= (fan4pin << 3) | (fan5pin << 4);
- data->has_fan_min |= (fan4min << 3) | (fan5pin << 4);
-
- data->has_pwm = 0x03 | (pwm3pin << 2) | (pwm4pin << 3) | (pwm5pin << 4);
+ if (data->kind == nct6791) {
+ regval = superio_inb(sioreg, 0x2d);
+ fan6pin = (regval & (1 << 1));
+ pwm6pin = (regval & (1 << 0));
+ } else { /* NCT6779D */
+ fan6pin = false;
+ pwm6pin = false;
+ }
+ }
- return 0;
+ /* fan 1 and 2 (0x03) are always present */
+ data->has_fan = 0x03 | (fan3pin << 2) | (fan4pin << 3) |
+ (fan5pin << 4) | (fan6pin << 5);
+ data->has_fan_min = 0x03 | (fan3pin << 2) | (fan4min << 3) |
+ (fan5pin << 4);
+ data->has_pwm = 0x03 | (pwm3pin << 2) | (pwm4pin << 3) |
+ (pwm5pin << 4) | (pwm6pin << 5);
}
static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
@@ -3415,16 +3285,17 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
static int nct6775_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct nct6775_sio_data *sio_data = dev->platform_data;
+ struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
struct nct6775_data *data;
struct resource *res;
int i, s, err = 0;
int src, mask, available;
const u16 *reg_temp, *reg_temp_over, *reg_temp_hyst, *reg_temp_config;
const u16 *reg_temp_alternate, *reg_temp_crit;
+ const u16 *reg_temp_crit_l = NULL, *reg_temp_crit_h = NULL;
int num_reg_temp;
- bool have_vid = false;
u8 cr2a;
+ struct attribute_group *group;
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
@@ -3437,6 +3308,7 @@ static int nct6775_probe(struct platform_device *pdev)
return -ENOMEM;
data->kind = sio_data->kind;
+ data->sioreg = sio_data->sioreg;
data->addr = res->start;
mutex_init(&data->update_lock);
data->name = nct6775_device_names[data->kind];
@@ -3444,6 +3316,75 @@ static int nct6775_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, data);
switch (data->kind) {
+ case nct6106:
+ data->in_num = 9;
+ data->pwm_num = 3;
+ data->auto_pwm_num = 4;
+ data->temp_fixed_num = 3;
+ data->num_temp_alarms = 6;
+ data->num_temp_beeps = 6;
+
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+
+ data->temp_label = nct6776_temp_label;
+ data->temp_label_num = ARRAY_SIZE(nct6776_temp_label);
+
+ data->REG_VBAT = NCT6106_REG_VBAT;
+ data->REG_DIODE = NCT6106_REG_DIODE;
+ data->DIODE_MASK = NCT6106_DIODE_MASK;
+ data->REG_VIN = NCT6106_REG_IN;
+ data->REG_IN_MINMAX[0] = NCT6106_REG_IN_MIN;
+ data->REG_IN_MINMAX[1] = NCT6106_REG_IN_MAX;
+ data->REG_TARGET = NCT6106_REG_TARGET;
+ data->REG_FAN = NCT6106_REG_FAN;
+ data->REG_FAN_MODE = NCT6106_REG_FAN_MODE;
+ data->REG_FAN_MIN = NCT6106_REG_FAN_MIN;
+ data->REG_FAN_PULSES = NCT6106_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6106_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+ data->REG_PWM[0] = NCT6106_REG_PWM;
+ data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
+ data->REG_PWM[5] = NCT6106_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6106_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM_READ = NCT6106_REG_PWM_READ;
+ data->REG_PWM_MODE = NCT6106_REG_PWM_MODE;
+ data->PWM_MODE_MASK = NCT6106_PWM_MODE_MASK;
+ data->REG_AUTO_TEMP = NCT6106_REG_AUTO_TEMP;
+ data->REG_AUTO_PWM = NCT6106_REG_AUTO_PWM;
+ data->REG_CRITICAL_TEMP = NCT6106_REG_CRITICAL_TEMP;
+ data->REG_CRITICAL_TEMP_TOLERANCE
+ = NCT6106_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6106_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK
+ = NCT6106_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6106_REG_CRITICAL_PWM;
+ data->REG_TEMP_OFFSET = NCT6106_REG_TEMP_OFFSET;
+ data->REG_TEMP_SOURCE = NCT6106_REG_TEMP_SOURCE;
+ data->REG_TEMP_SEL = NCT6106_REG_TEMP_SEL;
+ data->REG_WEIGHT_TEMP_SEL = NCT6106_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6106_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6106_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6106_REG_WEIGHT_TEMP_BASE;
+ data->REG_ALARM = NCT6106_REG_ALARM;
+ data->ALARM_BITS = NCT6106_ALARM_BITS;
+ data->REG_BEEP = NCT6106_REG_BEEP;
+ data->BEEP_BITS = NCT6106_BEEP_BITS;
+
+ reg_temp = NCT6106_REG_TEMP;
+ num_reg_temp = ARRAY_SIZE(NCT6106_REG_TEMP);
+ reg_temp_over = NCT6106_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6106_REG_TEMP_HYST;
+ reg_temp_config = NCT6106_REG_TEMP_CONFIG;
+ reg_temp_alternate = NCT6106_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6106_REG_TEMP_CRIT;
+ reg_temp_crit_l = NCT6106_REG_TEMP_CRIT_L;
+ reg_temp_crit_h = NCT6106_REG_TEMP_CRIT_H;
+
+ break;
case nct6775:
data->in_num = 9;
data->pwm_num = 3;
@@ -3451,8 +3392,10 @@ static int nct6775_probe(struct platform_device *pdev)
data->has_fan_div = true;
data->temp_fixed_num = 3;
data->num_temp_alarms = 3;
+ data->num_temp_beeps = 3;
data->ALARM_BITS = NCT6775_ALARM_BITS;
+ data->BEEP_BITS = NCT6775_BEEP_BITS;
data->fan_from_reg = fan_from_reg16;
data->fan_from_reg_min = fan_from_reg8;
@@ -3466,6 +3409,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
data->REG_DIODE = NCT6775_REG_DIODE;
+ data->DIODE_MASK = NCT6775_DIODE_MASK;
data->REG_VIN = NCT6775_REG_IN;
data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3474,6 +3418,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
data->REG_FAN_MIN = NCT6775_REG_FAN_MIN;
data->REG_FAN_PULSES = NCT6775_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3499,6 +3444,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
+ data->REG_BEEP = NCT6775_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
@@ -3516,8 +3462,10 @@ static int nct6775_probe(struct platform_device *pdev)
data->has_fan_div = false;
data->temp_fixed_num = 3;
data->num_temp_alarms = 3;
+ data->num_temp_beeps = 6;
data->ALARM_BITS = NCT6776_ALARM_BITS;
+ data->BEEP_BITS = NCT6776_BEEP_BITS;
data->fan_from_reg = fan_from_reg13;
data->fan_from_reg_min = fan_from_reg13;
@@ -3531,6 +3479,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
data->REG_DIODE = NCT6775_REG_DIODE;
+ data->DIODE_MASK = NCT6775_DIODE_MASK;
data->REG_VIN = NCT6775_REG_IN;
data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3539,6 +3488,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3564,6 +3514,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6775_REG_ALARM;
+ data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6775_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6775_REG_TEMP);
@@ -3581,8 +3532,10 @@ static int nct6775_probe(struct platform_device *pdev)
data->has_fan_div = false;
data->temp_fixed_num = 6;
data->num_temp_alarms = 2;
+ data->num_temp_beeps = 2;
data->ALARM_BITS = NCT6779_ALARM_BITS;
+ data->BEEP_BITS = NCT6779_BEEP_BITS;
data->fan_from_reg = fan_from_reg13;
data->fan_from_reg_min = fan_from_reg13;
@@ -3596,6 +3549,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_CONFIG = NCT6775_REG_CONFIG;
data->REG_VBAT = NCT6775_REG_VBAT;
data->REG_DIODE = NCT6775_REG_DIODE;
+ data->DIODE_MASK = NCT6775_DIODE_MASK;
data->REG_VIN = NCT6779_REG_IN;
data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
@@ -3604,6 +3558,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
@@ -3621,6 +3576,10 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_CRITICAL_TEMP = NCT6775_REG_CRITICAL_TEMP;
data->REG_CRITICAL_TEMP_TOLERANCE
= NCT6775_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6779_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK
+ = NCT6779_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6779_REG_CRITICAL_PWM;
data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
@@ -3629,6 +3588,81 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
data->REG_ALARM = NCT6779_REG_ALARM;
+ data->REG_BEEP = NCT6776_REG_BEEP;
+
+ reg_temp = NCT6779_REG_TEMP;
+ num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
+ reg_temp_over = NCT6779_REG_TEMP_OVER;
+ reg_temp_hyst = NCT6779_REG_TEMP_HYST;
+ reg_temp_config = NCT6779_REG_TEMP_CONFIG;
+ reg_temp_alternate = NCT6779_REG_TEMP_ALTERNATE;
+ reg_temp_crit = NCT6779_REG_TEMP_CRIT;
+
+ break;
+ case nct6791:
+ data->in_num = 15;
+ data->pwm_num = 6;
+ data->auto_pwm_num = 4;
+ data->has_fan_div = false;
+ data->temp_fixed_num = 6;
+ data->num_temp_alarms = 2;
+ data->num_temp_beeps = 2;
+
+ data->ALARM_BITS = NCT6791_ALARM_BITS;
+ data->BEEP_BITS = NCT6779_BEEP_BITS;
+
+ data->fan_from_reg = fan_from_reg13;
+ data->fan_from_reg_min = fan_from_reg13;
+ data->target_temp_mask = 0xff;
+ data->tolerance_mask = 0x07;
+ data->speed_tolerance_limit = 63;
+
+ data->temp_label = nct6779_temp_label;
+ data->temp_label_num = ARRAY_SIZE(nct6779_temp_label);
+
+ data->REG_CONFIG = NCT6775_REG_CONFIG;
+ data->REG_VBAT = NCT6775_REG_VBAT;
+ data->REG_DIODE = NCT6775_REG_DIODE;
+ data->DIODE_MASK = NCT6775_DIODE_MASK;
+ data->REG_VIN = NCT6779_REG_IN;
+ data->REG_IN_MINMAX[0] = NCT6775_REG_IN_MIN;
+ data->REG_IN_MINMAX[1] = NCT6775_REG_IN_MAX;
+ data->REG_TARGET = NCT6775_REG_TARGET;
+ data->REG_FAN = NCT6779_REG_FAN;
+ data->REG_FAN_MODE = NCT6775_REG_FAN_MODE;
+ data->REG_FAN_MIN = NCT6776_REG_FAN_MIN;
+ data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
+ data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
+ data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
+ data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
+ data->REG_PWM[0] = NCT6775_REG_PWM;
+ data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
+ data->REG_PWM[2] = NCT6775_REG_FAN_STOP_OUTPUT;
+ data->REG_PWM[5] = NCT6775_REG_WEIGHT_DUTY_STEP;
+ data->REG_PWM[6] = NCT6776_REG_WEIGHT_DUTY_BASE;
+ data->REG_PWM_READ = NCT6775_REG_PWM_READ;
+ data->REG_PWM_MODE = NCT6776_REG_PWM_MODE;
+ data->PWM_MODE_MASK = NCT6776_PWM_MODE_MASK;
+ data->REG_AUTO_TEMP = NCT6775_REG_AUTO_TEMP;
+ data->REG_AUTO_PWM = NCT6775_REG_AUTO_PWM;
+ data->REG_CRITICAL_TEMP = NCT6775_REG_CRITICAL_TEMP;
+ data->REG_CRITICAL_TEMP_TOLERANCE
+ = NCT6775_REG_CRITICAL_TEMP_TOLERANCE;
+ data->REG_CRITICAL_PWM_ENABLE = NCT6779_REG_CRITICAL_PWM_ENABLE;
+ data->CRITICAL_PWM_ENABLE_MASK
+ = NCT6779_CRITICAL_PWM_ENABLE_MASK;
+ data->REG_CRITICAL_PWM = NCT6779_REG_CRITICAL_PWM;
+ data->REG_TEMP_OFFSET = NCT6779_REG_TEMP_OFFSET;
+ data->REG_TEMP_SOURCE = NCT6775_REG_TEMP_SOURCE;
+ data->REG_TEMP_SEL = NCT6775_REG_TEMP_SEL;
+ data->REG_WEIGHT_TEMP_SEL = NCT6775_REG_WEIGHT_TEMP_SEL;
+ data->REG_WEIGHT_TEMP[0] = NCT6775_REG_WEIGHT_TEMP_STEP;
+ data->REG_WEIGHT_TEMP[1] = NCT6775_REG_WEIGHT_TEMP_STEP_TOL;
+ data->REG_WEIGHT_TEMP[2] = NCT6775_REG_WEIGHT_TEMP_BASE;
+ data->REG_ALARM = NCT6791_REG_ALARM;
+ data->REG_BEEP = NCT6776_REG_BEEP;
reg_temp = NCT6779_REG_TEMP;
num_reg_temp = ARRAY_SIZE(NCT6779_REG_TEMP);
@@ -3700,6 +3734,13 @@ static int nct6775_probe(struct platform_device *pdev)
data->reg_temp[0][src - 1] = reg_temp[i];
data->reg_temp[1][src - 1] = reg_temp_over[i];
data->reg_temp[2][src - 1] = reg_temp_hyst[i];
+ if (reg_temp_crit_h && reg_temp_crit_h[i])
+ data->reg_temp[3][src - 1] = reg_temp_crit_h[i];
+ else if (reg_temp_crit[src - 1])
+ data->reg_temp[3][src - 1]
+ = reg_temp_crit[src - 1];
+ if (reg_temp_crit_l && reg_temp_crit_l[i])
+ data->reg_temp[4][src - 1] = reg_temp_crit_l[i];
data->reg_temp_config[src - 1] = reg_temp_config[i];
data->temp_src[src - 1] = src;
continue;
@@ -3714,8 +3755,12 @@ static int nct6775_probe(struct platform_device *pdev)
data->reg_temp[1][s] = reg_temp_over[i];
data->reg_temp[2][s] = reg_temp_hyst[i];
data->reg_temp_config[s] = reg_temp_config[i];
- if (reg_temp_crit[src - 1])
+ if (reg_temp_crit_h && reg_temp_crit_h[i])
+ data->reg_temp[3][s] = reg_temp_crit_h[i];
+ else if (reg_temp_crit[src - 1])
data->reg_temp[3][s] = reg_temp_crit[src - 1];
+ if (reg_temp_crit_l && reg_temp_crit_l[i])
+ data->reg_temp[4][s] = reg_temp_crit_l[i];
data->temp_src[s] = src;
s++;
@@ -3767,12 +3812,14 @@ static int nct6775_probe(struct platform_device *pdev)
cr2a = superio_inb(sio_data->sioreg, 0x2a);
switch (data->kind) {
case nct6775:
- have_vid = (cr2a & 0x40);
+ data->have_vid = (cr2a & 0x40);
break;
case nct6776:
- have_vid = (cr2a & 0x60) == 0x40;
+ data->have_vid = (cr2a & 0x60) == 0x40;
break;
+ case nct6106:
case nct6779:
+ case nct6791:
break;
}
@@ -3780,7 +3827,7 @@ static int nct6775_probe(struct platform_device *pdev)
* Read VID value
* We can get the VID input values directly at logical device D 0xe3.
*/
- if (have_vid) {
+ if (data->have_vid) {
superio_select(sio_data->sioreg, NCT6775_LD_VID);
data->vid = superio_inb(sio_data->sioreg, 0xe3);
data->vrm = vid_which_vrm();
@@ -3793,6 +3840,9 @@ static int nct6775_probe(struct platform_device *pdev)
tmp = superio_inb(sio_data->sioreg,
NCT6775_REG_CR_FAN_DEBOUNCE);
switch (data->kind) {
+ case nct6106:
+ tmp |= 0xe0;
+ break;
case nct6775:
tmp |= 0x1e;
break;
@@ -3800,6 +3850,9 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6779:
tmp |= 0x3e;
break;
+ case nct6791:
+ tmp |= 0x7e;
+ break;
}
superio_outb(sio_data->sioreg, NCT6775_REG_CR_FAN_DEBOUNCE,
tmp);
@@ -3807,157 +3860,47 @@ static int nct6775_probe(struct platform_device *pdev)
data->name);
}
- superio_exit(sio_data->sioreg);
-
- if (have_vid) {
- err = device_create_file(dev, &dev_attr_cpu0_vid);
- if (err)
- return err;
- }
+ nct6775_check_fan_inputs(data);
- err = nct6775_check_fan_inputs(sio_data, data);
- if (err)
- goto exit_remove;
+ superio_exit(sio_data->sioreg);
/* Read fan clock dividers immediately */
nct6775_init_fan_common(dev, data);
/* Register sysfs hooks */
- for (i = 0; i < data->pwm_num; i++) {
- if (!(data->has_pwm & (1 << i)))
- continue;
-
- err = sysfs_create_group(&dev->kobj, &nct6775_group_pwm[i]);
- if (err)
- goto exit_remove;
-
- if (data->REG_PWM[3]) {
- err = device_create_file(dev,
- &sda_pwm_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->REG_PWM[4]) {
- err = device_create_file(dev,
- &sda_pwm_step[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->REG_PWM[6]) {
- err = device_create_file(dev,
- &sda_weight_duty_base[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- }
- for (i = 0; i < ARRAY_SIZE(sda_auto_pwm_arrays); i++) {
- struct sensor_device_attribute_2 *attr =
- &sda_auto_pwm_arrays[i];
-
- if (!(data->has_pwm & (1 << attr->nr)))
- continue;
- if (attr->index > data->auto_pwm_num)
- continue;
- err = device_create_file(dev, &attr->dev_attr);
- if (err)
- goto exit_remove;
- }
-
- for (i = 0; i < data->in_num; i++) {
- if (!(data->have_in & (1 << i)))
- continue;
- err = sysfs_create_group(&dev->kobj, &nct6775_group_in[i]);
- if (err)
- goto exit_remove;
+ group = nct6775_create_attr_group(dev, &nct6775_pwm_template_group,
+ data->pwm_num);
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto exit_remove;
}
+ data->group_pwm = group;
- for (i = 0; i < 5; i++) {
- if (data->has_fan & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->ALARM_BITS[FAN_ALARM_BASE + i] >= 0) {
- err = device_create_file(dev,
- &sda_fan_alarm[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->kind != nct6776 &&
- data->kind != nct6779) {
- err = device_create_file(dev,
- &sda_fan_div[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->has_fan_min & (1 << i)) {
- err = device_create_file(dev,
- &sda_fan_min[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- err = device_create_file(dev,
- &sda_fan_pulses[i].dev_attr);
- if (err)
- goto exit_remove;
- }
+ group = nct6775_create_attr_group(dev, &nct6775_in_template_group,
+ fls(data->have_in));
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto exit_remove;
}
+ data->group_in = group;
- for (i = 0; i < NUM_TEMP; i++) {
- if (!(data->have_temp & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_input[i].dev_attr);
- if (err)
- goto exit_remove;
- if (data->temp_label) {
- err = device_create_file(dev,
- &sda_temp_label[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp[1][i]) {
- err = device_create_file(dev,
- &sda_temp_max[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp[2][i]) {
- err = device_create_file(dev,
- &sda_temp_max_hyst[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (data->reg_temp[3][i]) {
- err = device_create_file(dev,
- &sda_temp_crit[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (find_temp_source(data, i, data->num_temp_alarms) >= 0) {
- err = device_create_file(dev,
- &sda_temp_alarm[i].dev_attr);
- if (err)
- goto exit_remove;
- }
- if (!(data->have_temp_fixed & (1 << i)))
- continue;
- err = device_create_file(dev, &sda_temp_type[i].dev_attr);
- if (err)
- goto exit_remove;
- err = device_create_file(dev, &sda_temp_offset[i].dev_attr);
- if (err)
- goto exit_remove;
+ group = nct6775_create_attr_group(dev, &nct6775_fan_template_group,
+ fls(data->has_fan));
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto exit_remove;
}
+ data->group_fan = group;
- for (i = 0; i < ARRAY_SIZE(sda_caseopen); i++) {
- if (data->ALARM_BITS[INTRUSION_ALARM_BASE + i] < 0)
- continue;
- err = device_create_file(dev, &sda_caseopen[i].dev_attr);
- if (err)
- goto exit_remove;
+ group = nct6775_create_attr_group(dev, &nct6775_temp_template_group,
+ fls(data->have_temp));
+ if (IS_ERR(group)) {
+ err = PTR_ERR(group);
+ goto exit_remove;
}
+ data->group_temp = group;
- err = device_create_file(dev, &dev_attr_name);
+ err = sysfs_create_group(&dev->kobj, &nct6775_group_other);
if (err)
goto exit_remove;
@@ -3988,11 +3931,10 @@ static int nct6775_remove(struct platform_device *pdev)
static int nct6775_suspend(struct device *dev)
{
struct nct6775_data *data = nct6775_update_device(dev);
- struct nct6775_sio_data *sio_data = dev->platform_data;
mutex_lock(&data->update_lock);
data->vbat = nct6775_read_value(data, data->REG_VBAT);
- if (sio_data->kind == nct6775) {
+ if (data->kind == nct6775) {
data->fandiv1 = nct6775_read_value(data, NCT6775_REG_FANDIV1);
data->fandiv2 = nct6775_read_value(data, NCT6775_REG_FANDIV2);
}
@@ -4004,7 +3946,6 @@ static int nct6775_suspend(struct device *dev)
static int nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- struct nct6775_sio_data *sio_data = dev->platform_data;
int i, j;
mutex_lock(&data->update_lock);
@@ -4041,7 +3982,7 @@ static int nct6775_resume(struct device *dev)
/* Restore other settings */
nct6775_write_value(data, data->REG_VBAT, data->vbat);
- if (sio_data->kind == nct6775) {
+ if (data->kind == nct6775) {
nct6775_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
}
@@ -4056,6 +3997,8 @@ static int nct6775_resume(struct device *dev)
static const struct dev_pm_ops nct6775_dev_pm_ops = {
.suspend = nct6775_suspend,
.resume = nct6775_resume,
+ .freeze = nct6775_suspend,
+ .restore = nct6775_resume,
};
#define NCT6775_DEV_PM_OPS (&nct6775_dev_pm_ops)
@@ -4074,17 +4017,19 @@ static struct platform_driver nct6775_driver = {
};
static const char * const nct6775_sio_names[] __initconst = {
+ "NCT6106D",
"NCT6775F",
"NCT6776D/F",
"NCT6779D",
+ "NCT6791D",
};
/* nct6775_find() looks for a '627 in the Super-I/O config space */
-static int __init nct6775_find(int sioaddr, unsigned short *addr,
- struct nct6775_sio_data *sio_data)
+static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
{
u16 val;
int err;
+ int addr;
err = superio_enter(sioaddr);
if (err)
@@ -4096,6 +4041,9 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8)
| superio_inb(sioaddr, SIO_REG_DEVID + 1);
switch (val & SIO_ID_MASK) {
+ case SIO_NCT6106_ID:
+ sio_data->kind = nct6106;
+ break;
case SIO_NCT6775_ID:
sio_data->kind = nct6775;
break;
@@ -4105,6 +4053,9 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
case SIO_NCT6779_ID:
sio_data->kind = nct6779;
break;
+ case SIO_NCT6791_ID:
+ sio_data->kind = nct6791;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4116,8 +4067,8 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
superio_select(sioaddr, NCT6775_LD_HWM);
val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
| superio_inb(sioaddr, SIO_REG_ADDR + 1);
- *addr = val & IOREGION_ALIGNMENT;
- if (*addr == 0) {
+ addr = val & IOREGION_ALIGNMENT;
+ if (addr == 0) {
pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
superio_exit(sioaddr);
return -ENODEV;
@@ -4129,13 +4080,22 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
+ if (sio_data->kind == nct6791) {
+ val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+ if (val & 0x10) {
+ pr_info("Enabling hardware monitor logical device mappings.\n");
+ superio_outb(sioaddr,
+ NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+ val & ~0x10);
+ }
+ }
superio_exit(sioaddr);
- pr_info("Found %s or compatible chip at %#x\n",
- nct6775_sio_names[sio_data->kind], *addr);
+ pr_info("Found %s or compatible chip at %#x:%#x\n",
+ nct6775_sio_names[sio_data->kind], sioaddr, addr);
sio_data->sioreg = sioaddr;
- return 0;
+ return addr;
}
/*
@@ -4144,14 +4104,20 @@ static int __init nct6775_find(int sioaddr, unsigned short *addr,
* track of the nct6775 driver. But since we platform_device_alloc(), we
* must keep track of the device
*/
-static struct platform_device *pdev;
+static struct platform_device *pdev[2];
static int __init sensors_nct6775_init(void)
{
- int err;
- unsigned short address;
+ int i, err;
+ bool found = false;
+ int address;
struct resource res;
struct nct6775_sio_data sio_data;
+ int sioaddr[2] = { 0x2e, 0x4e };
+
+ err = platform_driver_register(&nct6775_driver);
+ if (err)
+ return err;
/*
* initialize sio_data->kind and sio_data->sioreg.
@@ -4160,64 +4126,71 @@ static int __init sensors_nct6775_init(void)
* driver will probe 0x2e and 0x4e and auto-detect the presence of a
* nct6775 hardware monitor, and call probe()
*/
- if (nct6775_find(0x2e, &address, &sio_data) &&
- nct6775_find(0x4e, &address, &sio_data))
- return -ENODEV;
-
- err = platform_driver_register(&nct6775_driver);
- if (err)
- goto exit;
+ for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+ address = nct6775_find(sioaddr[i], &sio_data);
+ if (address <= 0)
+ continue;
- pdev = platform_device_alloc(DRVNAME, address);
- if (!pdev) {
- err = -ENOMEM;
- pr_err("Device allocation failed\n");
- goto exit_unregister;
- }
+ found = true;
- err = platform_device_add_data(pdev, &sio_data,
- sizeof(struct nct6775_sio_data));
- if (err) {
- pr_err("Platform data allocation failed\n");
- goto exit_device_put;
- }
+ pdev[i] = platform_device_alloc(DRVNAME, address);
+ if (!pdev[i]) {
+ err = -ENOMEM;
+ goto exit_device_put;
+ }
- memset(&res, 0, sizeof(res));
- res.name = DRVNAME;
- res.start = address + IOREGION_OFFSET;
- res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
- res.flags = IORESOURCE_IO;
+ err = platform_device_add_data(pdev[i], &sio_data,
+ sizeof(struct nct6775_sio_data));
+ if (err)
+ goto exit_device_put;
+
+ memset(&res, 0, sizeof(res));
+ res.name = DRVNAME;
+ res.start = address + IOREGION_OFFSET;
+ res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+ res.flags = IORESOURCE_IO;
+
+ err = acpi_check_resource_conflict(&res);
+ if (err) {
+ platform_device_put(pdev[i]);
+ pdev[i] = NULL;
+ continue;
+ }
- err = acpi_check_resource_conflict(&res);
- if (err)
- goto exit_device_put;
+ err = platform_device_add_resources(pdev[i], &res, 1);
+ if (err)
+ goto exit_device_put;
- err = platform_device_add_resources(pdev, &res, 1);
- if (err) {
- pr_err("Device resource addition failed (%d)\n", err);
- goto exit_device_put;
+ /* platform_device_add calls probe() */
+ err = platform_device_add(pdev[i]);
+ if (err)
+ goto exit_device_put;
}
-
- /* platform_device_add calls probe() */
- err = platform_device_add(pdev);
- if (err) {
- pr_err("Device addition failed (%d)\n", err);
- goto exit_device_put;
+ if (!found) {
+ err = -ENODEV;
+ goto exit_unregister;
}
return 0;
exit_device_put:
- platform_device_put(pdev);
+ for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+ if (pdev[i])
+ platform_device_put(pdev[i]);
+ }
exit_unregister:
platform_driver_unregister(&nct6775_driver);
-exit:
return err;
}
static void __exit sensors_nct6775_exit(void)
{
- platform_device_unregister(pdev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+ if (pdev[i])
+ platform_device_unregister(pdev[i]);
+ }
platform_driver_unregister(&nct6775_driver);
}
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 830a842d796..8c23203915a 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -424,7 +424,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
if (IS_ERR(pdata))
return PTR_ERR(pdata);
else if (pdata == NULL)
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "No platform init data supplied.\n");
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index ea606860d2b..6e6ea4437bb 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -983,7 +983,7 @@ static int pc87427_request_regions(struct platform_device *pdev,
static void pc87427_init_device(struct device *dev)
{
- struct pc87427_sio_data *sio_data = dev->platform_data;
+ struct pc87427_sio_data *sio_data = dev_get_platdata(dev);
struct pc87427_data *data = dev_get_drvdata(dev);
int i;
u8 reg;
@@ -1075,7 +1075,7 @@ static void pc87427_remove_files(struct device *dev)
static int pc87427_probe(struct platform_device *pdev)
{
- struct pc87427_sio_data *sio_data = pdev->dev.platform_data;
+ struct pc87427_sio_data *sio_data = dev_get_platdata(&pdev->dev);
struct pc87427_data *data;
int i, err, res_count;
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 9add60920ac..9319fcf142d 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -1726,7 +1726,7 @@ int pmbus_do_probe(struct i2c_client *client, const struct i2c_device_id *id,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- const struct pmbus_platform_data *pdata = dev->platform_data;
+ const struct pmbus_platform_data *pdata = dev_get_platdata(dev);
struct pmbus_data *data;
int ret;
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index a9f7e804f1e..73bd64e8c30 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -165,7 +165,7 @@ static ssize_t s3c_hwmon_ch_show(struct device *dev,
{
struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
struct s3c_hwmon *hwmon = platform_get_drvdata(to_platform_device(dev));
- struct s3c_hwmon_pdata *pdata = dev->platform_data;
+ struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
struct s3c_hwmon_chcfg *cfg;
int ret;
@@ -194,7 +194,7 @@ static ssize_t s3c_hwmon_label_show(struct device *dev,
char *buf)
{
struct sensor_device_attribute *sen_attr = to_sensor_dev_attr(attr);
- struct s3c_hwmon_pdata *pdata = dev->platform_data;
+ struct s3c_hwmon_pdata *pdata = dev_get_platdata(dev);
struct s3c_hwmon_chcfg *cfg;
cfg = pdata->in[sen_attr->index];
@@ -274,7 +274,7 @@ static void s3c_hwmon_remove_attr(struct device *dev,
*/
static int s3c_hwmon_probe(struct platform_device *dev)
{
- struct s3c_hwmon_pdata *pdata = dev->dev.platform_data;
+ struct s3c_hwmon_pdata *pdata = dev_get_platdata(&dev->dev);
struct s3c_hwmon *hwmon;
int ret = 0;
int i;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index 2507f902fb7..97cd45a8432 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -940,11 +940,11 @@ static int sht15_probe(struct platform_device *pdev)
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -EINVAL;
}
- data->pdata = pdev->dev.platform_data;
+ data->pdata = dev_get_platdata(&pdev->dev);
data->supply_uv = data->pdata->supply_mv * 1000;
if (data->pdata->checksum)
data->checksumming = true;
@@ -957,7 +957,7 @@ static int sht15_probe(struct platform_device *pdev)
* If a regulator is available,
* query what the supply voltage actually is!
*/
- data->reg = devm_regulator_get(data->dev, "vcc");
+ data->reg = devm_regulator_get_optional(data->dev, "vcc");
if (!IS_ERR(data->reg)) {
int voltage;
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index 6d8255ccf07..05cb814539c 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -668,7 +668,7 @@ static void smsc47m1_remove_files(struct device *dev)
static int __init smsc47m1_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct smsc47m1_sio_data *sio_data = dev->platform_data;
+ struct smsc47m1_sio_data *sio_data = dev_get_platdata(dev);
struct smsc47m1_data *data;
struct resource *res;
int err;
@@ -940,7 +940,7 @@ exit_device:
static void __exit sm_smsc47m1_exit(void)
{
platform_driver_unregister(&smsc47m1_driver);
- smsc47m1_restore(pdev->dev.platform_data);
+ smsc47m1_restore(dev_get_platdata(&pdev->dev));
platform_device_unregister(pdev);
}
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 004801e6fbb..23ff210513d 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -673,7 +673,7 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr)
static void w83627ehf_write_fan_div_common(struct device *dev,
struct w83627ehf_data *data, int nr)
{
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
if (sio_data->kind == nct6776)
; /* no dividers, do nothing */
@@ -724,7 +724,7 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data)
static void w83627ehf_update_fan_div_common(struct device *dev,
struct w83627ehf_data *data)
{
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
if (sio_data->kind == nct6776)
; /* no dividers, do nothing */
@@ -781,7 +781,7 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data)
static void w83627ehf_update_pwm_common(struct device *dev,
struct w83627ehf_data *data)
{
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
if (sio_data->kind == nct6775 || sio_data->kind == nct6776)
nct6775_update_pwm(data);
@@ -792,7 +792,7 @@ static void w83627ehf_update_pwm_common(struct device *dev,
static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
@@ -1392,7 +1392,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int nr = sensor_attr->index;
unsigned long val;
int err;
@@ -1448,7 +1448,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
unsigned long val;
@@ -1527,7 +1527,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
u16 reg;
@@ -2065,7 +2065,7 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
static int w83627ehf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
struct w83627ehf_data *data;
struct resource *res;
u8 en_vrm10;
@@ -2618,7 +2618,7 @@ static int w83627ehf_remove(struct platform_device *pdev)
static int w83627ehf_suspend(struct device *dev)
{
struct w83627ehf_data *data = w83627ehf_update_device(dev);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
mutex_lock(&data->update_lock);
data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT);
@@ -2634,7 +2634,7 @@ static int w83627ehf_suspend(struct device *dev)
static int w83627ehf_resume(struct device *dev)
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
- struct w83627ehf_sio_data *sio_data = dev->platform_data;
+ struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev);
int i;
mutex_lock(&data->update_lock);
@@ -2694,6 +2694,8 @@ static int w83627ehf_resume(struct device *dev)
static const struct dev_pm_ops w83627ehf_dev_pm_ops = {
.suspend = w83627ehf_suspend,
.resume = w83627ehf_resume,
+ .freeze = w83627ehf_suspend,
+ .restore = w83627ehf_resume,
};
#define W83627EHF_DEV_PM_OPS (&w83627ehf_dev_pm_ops)
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 3b9ef2d2345..cb9cd326ecb 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -1415,7 +1415,7 @@ static const struct attribute_group w83627hf_group_opt = {
static int w83627hf_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct w83627hf_sio_data *sio_data = dev->platform_data;
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(dev);
struct w83627hf_data *data;
struct resource *res;
int err, i;
@@ -1636,7 +1636,7 @@ static int w83627hf_read_value(struct w83627hf_data *data, u16 reg)
static int w83627thf_read_gpio5(struct platform_device *pdev)
{
- struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff, sel;
superio_enter(sio_data);
@@ -1669,7 +1669,7 @@ exit:
static int w83687thf_read_vid(struct platform_device *pdev)
{
- struct w83627hf_sio_data *sio_data = pdev->dev.platform_data;
+ struct w83627hf_sio_data *sio_data = dev_get_platdata(&pdev->dev);
int res = 0xff;
superio_enter(sio_data);
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index dc6dea614ab..fcdd321f709 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -385,7 +385,7 @@ config I2C_CPM
config I2C_DAVINCI
tristate "DaVinci I2C driver"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
help
Support for TI DaVinci I2C controller driver.
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 6bb839b688b..fd059308aff 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -28,7 +28,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/platform_data/dma-atmel.h>
@@ -775,8 +774,6 @@ static int at91_twi_probe(struct platform_device *pdev)
return rc;
}
- of_i2c_register_devices(&dev->adapter);
-
dev_info(dev->dev, "AT91 i2c bus driver.\n");
return 0;
}
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index 13ea1c29873..35a473ba3d8 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -582,6 +582,7 @@ static struct i2c_algorithm bfin_twi_algorithm = {
.functionality = bfin_twi_functionality,
};
+#ifdef CONFIG_PM_SLEEP
static int i2c_bfin_twi_suspend(struct device *dev)
{
struct bfin_twi_iface *iface = dev_get_drvdata(dev);
@@ -619,6 +620,10 @@ static int i2c_bfin_twi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(i2c_bfin_twi_pm,
i2c_bfin_twi_suspend, i2c_bfin_twi_resume);
+#define I2C_BFIN_TWI_PM_OPS (&i2c_bfin_twi_pm)
+#else
+#define I2C_BFIN_TWI_PM_OPS NULL
+#endif
static int i2c_bfin_twi_probe(struct platform_device *pdev)
{
@@ -669,8 +674,9 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
p_adap->timeout = 5 * HZ;
p_adap->retries = 3;
- rc = peripheral_request_list((unsigned short *)pdev->dev.platform_data,
- "i2c-bfin-twi");
+ rc = peripheral_request_list(
+ (unsigned short *)dev_get_platdata(&pdev->dev),
+ "i2c-bfin-twi");
if (rc) {
dev_err(&pdev->dev, "Can't setup pin mux!\n");
goto out_error_pin_mux;
@@ -717,7 +723,7 @@ out_error_add_adapter:
free_irq(iface->irq, iface);
out_error_req_irq:
out_error_no_irq:
- peripheral_free_list((unsigned short *)pdev->dev.platform_data);
+ peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
out_error_pin_mux:
iounmap(iface->regs_base);
out_error_ioremap:
@@ -733,7 +739,7 @@ static int i2c_bfin_twi_remove(struct platform_device *pdev)
i2c_del_adapter(&(iface->adap));
free_irq(iface->irq, iface);
- peripheral_free_list((unsigned short *)pdev->dev.platform_data);
+ peripheral_free_list((unsigned short *)dev_get_platdata(&pdev->dev));
iounmap(iface->regs_base);
kfree(iface);
@@ -746,7 +752,7 @@ static struct platform_driver i2c_bfin_twi_driver = {
.driver = {
.name = "i2c-bfin-twi",
.owner = THIS_MODULE,
- .pm = &i2c_bfin_twi_pm,
+ .pm = I2C_BFIN_TWI_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-cbus-gpio.c b/drivers/i2c/busses/i2c-cbus-gpio.c
index 1be13ac11dc..2d46f13adfd 100644
--- a/drivers/i2c/busses/i2c-cbus-gpio.c
+++ b/drivers/i2c/busses/i2c-cbus-gpio.c
@@ -233,8 +233,9 @@ static int cbus_i2c_probe(struct platform_device *pdev)
chost->clk_gpio = of_get_gpio(dnode, 0);
chost->dat_gpio = of_get_gpio(dnode, 1);
chost->sel_gpio = of_get_gpio(dnode, 2);
- } else if (pdev->dev.platform_data) {
- struct i2c_cbus_platform_data *pdata = pdev->dev.platform_data;
+ } else if (dev_get_platdata(&pdev->dev)) {
+ struct i2c_cbus_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
chost->clk_gpio = pdata->clk_gpio;
chost->dat_gpio = pdata->dat_gpio;
chost->sel_gpio = pdata->sel_gpio;
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 2e1f7eb55bf..b2b8aa9adc0 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -42,7 +42,6 @@
#include <linux/dma-mapping.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
-#include <linux/of_i2c.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>
@@ -681,11 +680,6 @@ static int cpm_i2c_probe(struct platform_device *ofdev)
dev_dbg(&ofdev->dev, "hw routines for %s registered.\n",
cpm->adap.name);
- /*
- * register OF I2C devices
- */
- of_i2c_register_devices(&cpm->adap);
-
return 0;
out_shut:
cpm_i2c_shutdown(cpm);
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index fa556057d22..57473415be1 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -38,10 +38,7 @@
#include <linux/slab.h>
#include <linux/cpufreq.h>
#include <linux/gpio.h>
-#include <linux/of_i2c.h>
#include <linux/of_device.h>
-
-#include <mach/hardware.h>
#include <linux/platform_data/i2c-davinci.h>
/* ----- global defines ----------------------------------------------- */
@@ -665,7 +662,7 @@ static int davinci_i2c_probe(struct platform_device *pdev)
#endif
dev->dev = &pdev->dev;
dev->irq = irq->start;
- dev->pdata = dev->dev->platform_data;
+ dev->pdata = dev_get_platdata(&dev->dev);
platform_set_drvdata(pdev, dev);
if (!dev->pdata && pdev->dev.of_node) {
@@ -728,7 +725,6 @@ static int davinci_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_unuse_clocks;
}
- of_i2c_register_devices(adap);
return 0;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index ad46616de29..dbecf08399f 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -317,6 +317,12 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
47, /* tLOW = 4.7 us */
3, /* tf = 0.3 us */
0); /* No offset */
+
+ /* Allow platforms to specify the ideal HCNT and LCNT values */
+ if (dev->ss_hcnt && dev->ss_lcnt) {
+ hcnt = dev->ss_hcnt;
+ lcnt = dev->ss_lcnt;
+ }
dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
@@ -331,6 +337,11 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
13, /* tLOW = 1.3 us */
3, /* tf = 0.3 us */
0); /* No offset */
+
+ if (dev->fs_hcnt && dev->fs_lcnt) {
+ hcnt = dev->fs_hcnt;
+ lcnt = dev->fs_lcnt;
+ }
dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
@@ -416,6 +427,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
u32 addr = msgs[dev->msg_write_idx].addr;
u32 buf_len = dev->tx_buf_len;
u8 *buf = dev->tx_buf;
+ bool need_restart = false;
intr_mask = DW_IC_INTR_DEFAULT_MASK;
@@ -443,6 +455,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
/* new i2c_msg */
buf = msgs[dev->msg_write_idx].buf;
buf_len = msgs[dev->msg_write_idx].len;
+
+ /* If both IC_EMPTYFIFO_HOLD_MASTER_EN and
+ * IC_RESTART_EN are set, we must manually
+ * set restart bit between messages.
+ */
+ if ((dev->master_cfg & DW_IC_CON_RESTART_EN) &&
+ (dev->msg_write_idx > 0))
+ need_restart = true;
}
tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR);
@@ -461,6 +481,11 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
buf_len == 1)
cmd |= BIT(9);
+ if (need_restart) {
+ cmd |= BIT(10);
+ need_restart = false;
+ }
+
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
/* avoid rx buffer overrun */
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 912aa226286..e8a756537ed 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -61,6 +61,14 @@
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
* @rx_outstanding: current master-rx elements in tx fifo
+ * @ss_hcnt: standard speed HCNT value
+ * @ss_lcnt: standard speed LCNT value
+ * @fs_hcnt: fast speed HCNT value
+ * @fs_lcnt: fast speed LCNT value
+ *
+ * HCNT and LCNT parameters can be used if the platform knows more accurate
+ * values than the one computed based only on the input clock frequency.
+ * Leave them to be %0 if not used.
*/
struct dw_i2c_dev {
struct device *dev;
@@ -91,6 +99,10 @@ struct dw_i2c_dev {
unsigned int rx_fifo_depth;
int rx_outstanding;
u32 sda_hold_time;
+ u16 ss_hcnt;
+ u16 ss_lcnt;
+ u16 fs_hcnt;
+ u16 fs_lcnt;
};
#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4c5fadabe49..4c1b60539a2 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -35,7 +35,6 @@
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/of.h>
-#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
@@ -54,9 +53,33 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
}
#ifdef CONFIG_ACPI
+static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
+ u16 *hcnt, u16 *lcnt, u32 *sda_hold)
+{
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER };
+ acpi_handle handle = ACPI_HANDLE(&pdev->dev);
+ union acpi_object *obj;
+
+ if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
+ return;
+
+ obj = (union acpi_object *)buf.pointer;
+ if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 3) {
+ const union acpi_object *objs = obj->package.elements;
+
+ *hcnt = (u16)objs[0].integer.value;
+ *lcnt = (u16)objs[1].integer.value;
+ if (sda_hold)
+ *sda_hold = (u32)objs[2].integer.value;
+ }
+
+ kfree(buf.pointer);
+}
+
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ bool fs_mode = dev->master_cfg & DW_IC_CON_SPEED_FAST;
if (!ACPI_HANDLE(&pdev->dev))
return -ENODEV;
@@ -64,6 +87,16 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dev->adapter.nr = -1;
dev->tx_fifo_depth = 32;
dev->rx_fifo_depth = 32;
+
+ /*
+ * Try to get SDA hold time and *CNT values from an ACPI method if
+ * it exists for both supported speed modes.
+ */
+ dw_i2c_acpi_params(pdev, "SSCN", &dev->ss_hcnt, &dev->ss_lcnt,
+ fs_mode ? NULL : &dev->sda_hold_time);
+ dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
+ fs_mode ? &dev->sda_hold_time : NULL);
+
return 0;
}
@@ -172,8 +205,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failure adding adapter\n");
return r;
}
- of_i2c_register_devices(adap);
- acpi_i2c_register_devices(adap);
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_use_autosuspend(&pdev->dev);
@@ -207,7 +238,7 @@ static const struct of_device_id dw_i2c_of_match[] = {
MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
#endif
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw_i2c_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -228,9 +259,12 @@ static int dw_i2c_resume(struct device *dev)
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
+#define DW_I2C_DEV_PM_OPS (&dw_i2c_dev_pm_ops)
+#else
+#define DW_I2C_DEV_PM_OPS NULL
+#endif
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -242,7 +276,7 @@ static struct platform_driver dw_i2c_driver = {
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(dw_i2c_of_match),
.acpi_match_table = ACPI_PTR(dw_i2c_acpi_match),
- .pm = &dw_i2c_dev_pm_ops,
+ .pm = DW_I2C_DEV_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index bc6e139c6e7..bfa02c6c2dd 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/of_i2c.h>
struct i2c_gpio_private_data {
struct i2c_adapter adap;
@@ -137,9 +136,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
- if (!pdev->dev.platform_data)
+ if (!dev_get_platdata(&pdev->dev))
return -ENXIO;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
sda_pin = pdata->sda_pin;
scl_pin = pdata->scl_pin;
}
@@ -171,7 +170,7 @@ static int i2c_gpio_probe(struct platform_device *pdev)
pdata->scl_pin = scl_pin;
of_i2c_gpio_get_props(pdev->dev.of_node, pdata);
} else {
- memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));
+ memcpy(pdata, dev_get_platdata(&pdev->dev), sizeof(*pdata));
}
if (pdata->sda_is_open_drain) {
@@ -224,8 +223,6 @@ static int i2c_gpio_probe(struct platform_device *pdev)
if (ret)
goto err_add_bus;
- of_i2c_register_devices(adap);
-
platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "using pins %u (SDA) and %u (SCL%s)\n",
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4ebceed6bc6..4296d172127 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -87,7 +87,6 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/err.h>
-#include <linux/of_i2c.h>
#if (defined CONFIG_I2C_MUX_GPIO || defined CONFIG_I2C_MUX_GPIO_MODULE) && \
defined CONFIG_DMI
@@ -1230,7 +1229,6 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
goto exit_free_irq;
}
- of_i2c_register_devices(&priv->adapter);
i801_probe_optional_slaves(priv);
/* We ignore errors - multiplexing is optional */
i801_add_mux(priv);
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 973f5168827..ff3caa0c28c 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -42,7 +42,6 @@
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/of_platform.h>
-#include <linux/of_i2c.h>
#include "i2c-ibm_iic.h"
@@ -759,9 +758,6 @@ static int iic_probe(struct platform_device *ofdev)
dev_info(&ofdev->dev, "using %s mode\n",
dev->fast_mode ? "fast (400 kHz)" : "standard (100 kHz)");
- /* Now register all the child nodes */
- of_i2c_register_devices(adap);
-
return 0;
error_cleanup:
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index e24279725d3..ccf46656bda 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -30,6 +30,8 @@
* Copyright (C) 2007 RightHand Technologies, Inc.
* Copyright (C) 2008 Darius Augulis <darius.augulis at teltonika.lt>
*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
*/
/** Includes *******************************************************************
@@ -50,7 +52,6 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_i2c.h>
#include <linux/platform_data/i2c-imx.h>
/** Defines ********************************************************************
@@ -62,12 +63,22 @@
/* Default value */
#define IMX_I2C_BIT_RATE 100000 /* 100kHz */
-/* IMX I2C registers */
+/* IMX I2C registers:
+ * the I2C register offset is different between SoCs,
+ * to provid support for all these chips, split the
+ * register offset into a fixed base address and a
+ * variable shift value, then the full register offset
+ * will be calculated by
+ * reg_off = ( reg_base_addr << reg_shift)
+ */
#define IMX_I2C_IADR 0x00 /* i2c slave address */
-#define IMX_I2C_IFDR 0x04 /* i2c frequency divider */
-#define IMX_I2C_I2CR 0x08 /* i2c control */
-#define IMX_I2C_I2SR 0x0C /* i2c status */
-#define IMX_I2C_I2DR 0x10 /* i2c transfer data */
+#define IMX_I2C_IFDR 0x01 /* i2c frequency divider */
+#define IMX_I2C_I2CR 0x02 /* i2c control */
+#define IMX_I2C_I2SR 0x03 /* i2c status */
+#define IMX_I2C_I2DR 0x04 /* i2c transfer data */
+
+#define IMX_I2C_REGSHIFT 2
+#define VF610_I2C_REGSHIFT 0
/* Bits of IMX I2C registers */
#define I2SR_RXAK 0x01
@@ -84,6 +95,19 @@
#define I2CR_IIEN 0x40
#define I2CR_IEN 0x80
+/* register bits different operating codes definition:
+ * 1) I2SR: Interrupt flags clear operation differ between SoCs:
+ * - write zero to clear(w0c) INT flag on i.MX,
+ * - but write one to clear(w1c) INT flag on Vybrid.
+ * 2) I2CR: I2C module enable operation also differ between SoCs:
+ * - set I2CR_IEN bit enable the module on i.MX,
+ * - but clear I2CR_IEN bit enable the module on Vybrid.
+ */
+#define I2SR_CLR_OPCODE_W0C 0x0
+#define I2SR_CLR_OPCODE_W1C (I2SR_IAL | I2SR_IIF)
+#define I2CR_IEN_OPCODE_0 0x0
+#define I2CR_IEN_OPCODE_1 I2CR_IEN
+
/** Variables ******************************************************************
*******************************************************************************/
@@ -95,8 +119,12 @@
*
* Duplicated divider values removed from list
*/
+struct imx_i2c_clk_pair {
+ u16 div;
+ u16 val;
+};
-static u16 __initdata i2c_clk_div[50][2] = {
+static struct imx_i2c_clk_pair imx_i2c_clk_div[] = {
{ 22, 0x20 }, { 24, 0x21 }, { 26, 0x22 }, { 28, 0x23 },
{ 30, 0x00 }, { 32, 0x24 }, { 36, 0x25 }, { 40, 0x26 },
{ 42, 0x03 }, { 44, 0x27 }, { 48, 0x28 }, { 52, 0x05 },
@@ -112,9 +140,38 @@ static u16 __initdata i2c_clk_div[50][2] = {
{ 3072, 0x1E }, { 3840, 0x1F }
};
+/* Vybrid VF610 clock divider, register value pairs */
+static struct imx_i2c_clk_pair vf610_i2c_clk_div[] = {
+ { 20, 0x00 }, { 22, 0x01 }, { 24, 0x02 }, { 26, 0x03 },
+ { 28, 0x04 }, { 30, 0x05 }, { 32, 0x09 }, { 34, 0x06 },
+ { 36, 0x0A }, { 40, 0x07 }, { 44, 0x0C }, { 48, 0x0D },
+ { 52, 0x43 }, { 56, 0x0E }, { 60, 0x45 }, { 64, 0x12 },
+ { 68, 0x0F }, { 72, 0x13 }, { 80, 0x14 }, { 88, 0x15 },
+ { 96, 0x19 }, { 104, 0x16 }, { 112, 0x1A }, { 128, 0x17 },
+ { 136, 0x4F }, { 144, 0x1C }, { 160, 0x1D }, { 176, 0x55 },
+ { 192, 0x1E }, { 208, 0x56 }, { 224, 0x22 }, { 228, 0x24 },
+ { 240, 0x1F }, { 256, 0x23 }, { 288, 0x5C }, { 320, 0x25 },
+ { 384, 0x26 }, { 448, 0x2A }, { 480, 0x27 }, { 512, 0x2B },
+ { 576, 0x2C }, { 640, 0x2D }, { 768, 0x31 }, { 896, 0x32 },
+ { 960, 0x2F }, { 1024, 0x33 }, { 1152, 0x34 }, { 1280, 0x35 },
+ { 1536, 0x36 }, { 1792, 0x3A }, { 1920, 0x37 }, { 2048, 0x3B },
+ { 2304, 0x3C }, { 2560, 0x3D }, { 3072, 0x3E }, { 3584, 0x7A },
+ { 3840, 0x3F }, { 4096, 0x7B }, { 5120, 0x7D }, { 6144, 0x7E },
+};
+
enum imx_i2c_type {
IMX1_I2C,
IMX21_I2C,
+ VF610_I2C,
+};
+
+struct imx_i2c_hwdata {
+ enum imx_i2c_type devtype;
+ unsigned regshift;
+ struct imx_i2c_clk_pair *clk_div;
+ unsigned ndivs;
+ unsigned i2sr_clr_opcode;
+ unsigned i2cr_ien_opcode;
};
struct imx_i2c_struct {
@@ -126,16 +183,46 @@ struct imx_i2c_struct {
unsigned int disable_delay;
int stopped;
unsigned int ifdr; /* IMX_I2C_IFDR */
- enum imx_i2c_type devtype;
+ const struct imx_i2c_hwdata *hwdata;
+};
+
+static const struct imx_i2c_hwdata imx1_i2c_hwdata = {
+ .devtype = IMX1_I2C,
+ .regshift = IMX_I2C_REGSHIFT,
+ .clk_div = imx_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(imx_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_1,
+
+};
+
+static const struct imx_i2c_hwdata imx21_i2c_hwdata = {
+ .devtype = IMX21_I2C,
+ .regshift = IMX_I2C_REGSHIFT,
+ .clk_div = imx_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(imx_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W0C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_1,
+
+};
+
+static struct imx_i2c_hwdata vf610_i2c_hwdata = {
+ .devtype = VF610_I2C,
+ .regshift = VF610_I2C_REGSHIFT,
+ .clk_div = vf610_i2c_clk_div,
+ .ndivs = ARRAY_SIZE(vf610_i2c_clk_div),
+ .i2sr_clr_opcode = I2SR_CLR_OPCODE_W1C,
+ .i2cr_ien_opcode = I2CR_IEN_OPCODE_0,
+
};
static struct platform_device_id imx_i2c_devtype[] = {
{
.name = "imx1-i2c",
- .driver_data = IMX1_I2C,
+ .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata,
}, {
.name = "imx21-i2c",
- .driver_data = IMX21_I2C,
+ .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata,
}, {
/* sentinel */
}
@@ -143,15 +230,28 @@ static struct platform_device_id imx_i2c_devtype[] = {
MODULE_DEVICE_TABLE(platform, imx_i2c_devtype);
static const struct of_device_id i2c_imx_dt_ids[] = {
- { .compatible = "fsl,imx1-i2c", .data = &imx_i2c_devtype[IMX1_I2C], },
- { .compatible = "fsl,imx21-i2c", .data = &imx_i2c_devtype[IMX21_I2C], },
+ { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, },
+ { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, },
+ { .compatible = "fsl,vf610-i2c", .data = &vf610_i2c_hwdata, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, i2c_imx_dt_ids);
static inline int is_imx1_i2c(struct imx_i2c_struct *i2c_imx)
{
- return i2c_imx->devtype == IMX1_I2C;
+ return i2c_imx->hwdata->devtype == IMX1_I2C;
+}
+
+static inline void imx_i2c_write_reg(unsigned int val,
+ struct imx_i2c_struct *i2c_imx, unsigned int reg)
+{
+ writeb(val, i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
+}
+
+static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx,
+ unsigned int reg)
+{
+ return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift));
}
/** Functions for IMX I2C adapter driver ***************************************
@@ -165,7 +265,7 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
while (1) {
- temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
if (for_busy && (temp & I2SR_IBB))
break;
if (!for_busy && !(temp & I2SR_IBB))
@@ -196,7 +296,7 @@ static int i2c_imx_trx_complete(struct imx_i2c_struct *i2c_imx)
static int i2c_imx_acked(struct imx_i2c_struct *i2c_imx)
{
- if (readb(i2c_imx->base + IMX_I2C_I2SR) & I2SR_RXAK) {
+ if (imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR) & I2SR_RXAK) {
dev_dbg(&i2c_imx->adapter.dev, "<%s> No ACK\n", __func__);
return -EIO; /* No ACK */
}
@@ -213,25 +313,25 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
clk_prepare_enable(i2c_imx->clk);
- writeb(i2c_imx->ifdr, i2c_imx->base + IMX_I2C_IFDR);
+ imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
/* Enable I2C controller */
- writeb(0, i2c_imx->base + IMX_I2C_I2SR);
- writeb(I2CR_IEN, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
+ imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode, i2c_imx, IMX_I2C_I2CR);
/* Wait controller to be stable */
udelay(50);
/* Start I2C transaction */
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_MSTA;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
return result;
i2c_imx->stopped = 0;
temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
return result;
}
@@ -242,9 +342,9 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
if (!i2c_imx->stopped) {
/* Stop I2C transaction */
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
if (is_imx1_i2c(i2c_imx)) {
/*
@@ -260,13 +360,15 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx)
}
/* Disable I2C controller */
- writeb(0, i2c_imx->base + IMX_I2C_I2CR);
+ temp = i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
clk_disable_unprepare(i2c_imx->clk);
}
static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
unsigned int rate)
{
+ struct imx_i2c_clk_pair *i2c_clk_div = i2c_imx->hwdata->clk_div;
unsigned int i2c_clk_rate;
unsigned int div;
int i;
@@ -274,15 +376,15 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
/* Divider value calculation */
i2c_clk_rate = clk_get_rate(i2c_imx->clk);
div = (i2c_clk_rate + rate - 1) / rate;
- if (div < i2c_clk_div[0][0])
+ if (div < i2c_clk_div[0].div)
i = 0;
- else if (div > i2c_clk_div[ARRAY_SIZE(i2c_clk_div) - 1][0])
- i = ARRAY_SIZE(i2c_clk_div) - 1;
+ else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
+ i = i2c_imx->hwdata->ndivs - 1;
else
- for (i = 0; i2c_clk_div[i][0] < div; i++);
+ for (i = 0; i2c_clk_div[i].div < div; i++);
/* Store divider value */
- i2c_imx->ifdr = i2c_clk_div[i][1];
+ i2c_imx->ifdr = i2c_clk_div[i].val;
/*
* There dummy delay is calculated.
@@ -290,7 +392,7 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
* This delay is used in I2C bus disable function
* to fix chip hardware bug.
*/
- i2c_imx->disable_delay = (500000U * i2c_clk_div[i][0]
+ i2c_imx->disable_delay = (500000U * i2c_clk_div[i].div
+ (i2c_clk_rate / 2) - 1) / (i2c_clk_rate / 2);
/* dev_dbg() can't be used, because adapter is not yet registered */
@@ -298,7 +400,7 @@ static void __init i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx,
dev_dbg(&i2c_imx->adapter.dev, "<%s> I2C_CLK=%d, REQ DIV=%d\n",
__func__, i2c_clk_rate, div);
dev_dbg(&i2c_imx->adapter.dev, "<%s> IFDR[IC]=0x%x, REAL DIV=%d\n",
- __func__, i2c_clk_div[i][1], i2c_clk_div[i][0]);
+ __func__, i2c_clk_div[i].val, i2c_clk_div[i].div);
#endif
}
@@ -307,12 +409,13 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id)
struct imx_i2c_struct *i2c_imx = dev_id;
unsigned int temp;
- temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
if (temp & I2SR_IIF) {
/* save status register */
i2c_imx->i2csr = temp;
temp &= ~I2SR_IIF;
- writeb(temp, i2c_imx->base + IMX_I2C_I2SR);
+ temp |= (i2c_imx->hwdata->i2sr_clr_opcode & I2SR_IIF);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2SR);
wake_up(&i2c_imx->queue);
return IRQ_HANDLED;
}
@@ -328,7 +431,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
__func__, msgs->addr << 1);
/* write slave address */
- writeb(msgs->addr << 1, i2c_imx->base + IMX_I2C_I2DR);
+ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR);
result = i2c_imx_trx_complete(i2c_imx);
if (result)
return result;
@@ -342,7 +445,7 @@ static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
dev_dbg(&i2c_imx->adapter.dev,
"<%s> write byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
- writeb(msgs->buf[i], i2c_imx->base + IMX_I2C_I2DR);
+ imx_i2c_write_reg(msgs->buf[i], i2c_imx, IMX_I2C_I2DR);
result = i2c_imx_trx_complete(i2c_imx);
if (result)
return result;
@@ -363,7 +466,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
__func__, (msgs->addr << 1) | 0x01);
/* write slave address */
- writeb((msgs->addr << 1) | 0x01, i2c_imx->base + IMX_I2C_I2DR);
+ imx_i2c_write_reg((msgs->addr << 1) | 0x01, i2c_imx, IMX_I2C_I2DR);
result = i2c_imx_trx_complete(i2c_imx);
if (result)
return result;
@@ -374,12 +477,12 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
dev_dbg(&i2c_imx->adapter.dev, "<%s> setup bus\n", __func__);
/* setup bus to read data */
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~I2CR_MTX;
if (msgs->len - 1)
temp &= ~I2CR_TXAK;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
- readb(i2c_imx->base + IMX_I2C_I2DR); /* dummy read */
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); /* dummy read */
dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__);
@@ -393,19 +496,19 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs)
controller from generating another clock cycle */
dev_dbg(&i2c_imx->adapter.dev,
"<%s> clear MSTA\n", __func__);
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp &= ~(I2CR_MSTA | I2CR_MTX);
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
i2c_imx_bus_busy(i2c_imx, 0);
i2c_imx->stopped = 1;
} else if (i == (msgs->len - 2)) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> set TXAK\n", __func__);
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_TXAK;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
}
- msgs->buf[i] = readb(i2c_imx->base + IMX_I2C_I2DR);
+ msgs->buf[i] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR);
dev_dbg(&i2c_imx->adapter.dev,
"<%s> read byte: B%d=0x%X\n",
__func__, i, msgs->buf[i]);
@@ -432,9 +535,9 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
if (i) {
dev_dbg(&i2c_imx->adapter.dev,
"<%s> repeated start\n", __func__);
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
temp |= I2CR_RSTA;
- writeb(temp, i2c_imx->base + IMX_I2C_I2CR);
+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR);
result = i2c_imx_bus_busy(i2c_imx, 1);
if (result)
goto fail0;
@@ -443,13 +546,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
"<%s> transfer message: %d\n", __func__, i);
/* write/read data */
#ifdef CONFIG_I2C_DEBUG_BUS
- temp = readb(i2c_imx->base + IMX_I2C_I2CR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
"MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
(temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
(temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
(temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
- temp = readb(i2c_imx->base + IMX_I2C_I2SR);
+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
dev_dbg(&i2c_imx->adapter.dev,
"<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
"IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
@@ -492,7 +595,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
&pdev->dev);
struct imx_i2c_struct *i2c_imx;
struct resource *res;
- struct imxi2c_platform_data *pdata = pdev->dev.platform_data;
+ struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev);
void __iomem *base;
int irq, ret;
u32 bitrate;
@@ -518,8 +621,10 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
}
if (of_id)
- pdev->id_entry = of_id->data;
- i2c_imx->devtype = pdev->id_entry->driver_data;
+ i2c_imx->hwdata = of_id->data;
+ else
+ i2c_imx->hwdata = (struct imx_i2c_hwdata *)
+ platform_get_device_id(pdev)->driver_data;
/* Setup i2c_imx driver structure */
strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
@@ -537,6 +642,11 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return PTR_ERR(i2c_imx->clk);
}
+ ret = clk_prepare_enable(i2c_imx->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "can't enable I2C clock\n");
+ return ret;
+ }
/* Request IRQ */
ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0,
pdev->name, i2c_imx);
@@ -560,8 +670,9 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
i2c_imx_set_clk(i2c_imx, bitrate);
/* Set up chip registers to defaults */
- writeb(0, i2c_imx->base + IMX_I2C_I2CR);
- writeb(0, i2c_imx->base + IMX_I2C_I2SR);
+ imx_i2c_write_reg(i2c_imx->hwdata->i2cr_ien_opcode ^ I2CR_IEN,
+ i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
@@ -570,10 +681,9 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
return ret;
}
- of_i2c_register_devices(&i2c_imx->adapter);
-
/* Set up platform driver data */
platform_set_drvdata(pdev, i2c_imx);
+ clk_disable_unprepare(i2c_imx->clk);
dev_dbg(&i2c_imx->adapter.dev, "claimed irq %d\n", irq);
dev_dbg(&i2c_imx->adapter.dev, "device resources from 0x%x to 0x%x\n",
@@ -596,10 +706,10 @@ static int __exit i2c_imx_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c_imx->adapter);
/* setup chip registers to defaults */
- writeb(0, i2c_imx->base + IMX_I2C_IADR);
- writeb(0, i2c_imx->base + IMX_I2C_IFDR);
- writeb(0, i2c_imx->base + IMX_I2C_I2CR);
- writeb(0, i2c_imx->base + IMX_I2C_I2SR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR);
+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2SR);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index cd82eb44e4c..8ed79a086f8 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -879,6 +879,7 @@ ismt_probe(struct pci_dev *pdev, const struct pci_device_id *id)
DMA_BIT_MASK(32)) != 0)) {
dev_err(&pdev->dev, "pci_set_dma_mask fail %p\n",
pdev);
+ err = -ENODEV;
goto fail;
}
}
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c
index ccec916bc3e..af8f65fb1c0 100644
--- a/drivers/i2c/busses/i2c-kempld.c
+++ b/drivers/i2c/busses/i2c-kempld.c
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c)
bus_frequency = KEMPLD_I2C_FREQ_MAX;
if (pld->info.spec_major == 1)
- prescale = pld->pld_clock / bus_frequency * 5 - 1000;
+ prescale = pld->pld_clock / (bus_frequency * 5) - 1000;
else
- prescale = pld->pld_clock / bus_frequency * 4 - 3000;
+ prescale = pld->pld_clock / (bus_frequency * 4) - 3000;
if (prescale < 0)
prescale = 0;
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 7607dc06191..b80c76888ca 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -18,9 +18,9 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/of_platform.h>
-#include <linux/of_i2c.h>
#include <linux/slab.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <linux/fsl_devices.h>
#include <linux/i2c.h>
@@ -64,9 +64,10 @@ struct mpc_i2c {
struct i2c_adapter adap;
int irq;
u32 real_clk;
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
u8 fdr, dfsrr;
#endif
+ struct clk *clk_per;
};
struct mpc_i2c_divider {
@@ -609,7 +610,6 @@ static const struct i2c_algorithm mpc_algo = {
static struct i2c_adapter mpc_ops = {
.owner = THIS_MODULE,
- .name = "MPC adapter",
.algo = &mpc_algo,
.timeout = HZ,
};
@@ -623,6 +623,9 @@ static int fsl_i2c_probe(struct platform_device *op)
u32 clock = MPC_I2C_CLOCK_LEGACY;
int result = 0;
int plen;
+ struct resource res;
+ struct clk *clk;
+ int err;
match = of_match_device(mpc_i2c_of_match, &op->dev);
if (!match)
@@ -653,6 +656,21 @@ static int fsl_i2c_probe(struct platform_device *op)
}
}
+ /*
+ * enable clock for the I2C peripheral (non fatal),
+ * keep a reference upon successful allocation
+ */
+ clk = devm_clk_get(&op->dev, NULL);
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ dev_err(&op->dev, "failed to enable clock\n");
+ goto fail_request;
+ } else {
+ i2c->clk_per = clk;
+ }
+ }
+
if (of_get_property(op->dev.of_node, "fsl,preserve-clocking", NULL)) {
clock = MPC_I2C_CLOCK_PRESERVE;
} else {
@@ -682,6 +700,9 @@ static int fsl_i2c_probe(struct platform_device *op)
platform_set_drvdata(op, i2c);
i2c->adap = mpc_ops;
+ of_address_to_resource(op->dev.of_node, 0, &res);
+ scnprintf(i2c->adap.name, sizeof(i2c->adap.name),
+ "MPC adapter at 0x%llx", (unsigned long long)res.start);
i2c_set_adapdata(&i2c->adap, i2c);
i2c->adap.dev.parent = &op->dev;
i2c->adap.dev.of_node = of_node_get(op->dev.of_node);
@@ -691,11 +712,12 @@ static int fsl_i2c_probe(struct platform_device *op)
dev_err(i2c->dev, "failed to add adapter\n");
goto fail_add;
}
- of_i2c_register_devices(&i2c->adap);
return result;
fail_add:
+ if (i2c->clk_per)
+ clk_disable_unprepare(i2c->clk_per);
free_irq(i2c->irq, i2c);
fail_request:
irq_dispose_mapping(i2c->irq);
@@ -711,6 +733,9 @@ static int fsl_i2c_remove(struct platform_device *op)
i2c_del_adapter(&i2c->adap);
+ if (i2c->clk_per)
+ clk_disable_unprepare(i2c->clk_per);
+
if (i2c->irq)
free_irq(i2c->irq, i2c);
@@ -720,7 +745,7 @@ static int fsl_i2c_remove(struct platform_device *op)
return 0;
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int mpc_i2c_suspend(struct device *dev)
{
struct mpc_i2c *i2c = dev_get_drvdata(dev);
@@ -741,7 +766,10 @@ static int mpc_i2c_resume(struct device *dev)
return 0;
}
-SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+static SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
+#define MPC_I2C_PM_OPS (&mpc_i2c_pm_ops)
+#else
+#define MPC_I2C_PM_OPS NULL
#endif
static const struct mpc_i2c_data mpc_i2c_data_512x = {
@@ -788,9 +816,7 @@ static struct platform_driver mpc_i2c_driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = mpc_i2c_of_match,
-#ifdef CONFIG_PM
- .pm = &mpc_i2c_pm_ops,
-#endif
+ .pm = MPC_I2C_PM_OPS,
},
};
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b1f42bf4096..7f3a4744349 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -21,9 +21,9 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
-#include <linux/of_i2c.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/delay.h>
#define MV64XXX_I2C_ADDR_ADDR(val) ((val & 0x7f) << 1)
#define MV64XXX_I2C_BAUD_DIV_N(val) (val & 0x7)
@@ -55,6 +55,32 @@
#define MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_NO_ACK 0xe8
#define MV64XXX_I2C_STATUS_NO_STATUS 0xf8
+/* Register defines (I2C bridge) */
+#define MV64XXX_I2C_REG_TX_DATA_LO 0xc0
+#define MV64XXX_I2C_REG_TX_DATA_HI 0xc4
+#define MV64XXX_I2C_REG_RX_DATA_LO 0xc8
+#define MV64XXX_I2C_REG_RX_DATA_HI 0xcc
+#define MV64XXX_I2C_REG_BRIDGE_CONTROL 0xd0
+#define MV64XXX_I2C_REG_BRIDGE_STATUS 0xd4
+#define MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE 0xd8
+#define MV64XXX_I2C_REG_BRIDGE_INTR_MASK 0xdC
+#define MV64XXX_I2C_REG_BRIDGE_TIMING 0xe0
+
+/* Bridge Control values */
+#define MV64XXX_I2C_BRIDGE_CONTROL_WR 0x00000001
+#define MV64XXX_I2C_BRIDGE_CONTROL_RD 0x00000002
+#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT 2
+#define MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT 0x00001000
+#define MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT 13
+#define MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT 16
+#define MV64XXX_I2C_BRIDGE_CONTROL_ENABLE 0x00080000
+
+/* Bridge Status values */
+#define MV64XXX_I2C_BRIDGE_STATUS_ERROR 0x00000001
+#define MV64XXX_I2C_STATUS_OFFLOAD_ERROR 0xf0000001
+#define MV64XXX_I2C_STATUS_OFFLOAD_OK 0xf0000000
+
+
/* Driver states */
enum {
MV64XXX_I2C_STATE_INVALID,
@@ -71,14 +97,17 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
+ MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
+ MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
MV64XXX_I2C_ACTION_SEND_ADDR_1,
MV64XXX_I2C_ACTION_SEND_ADDR_2,
MV64XXX_I2C_ACTION_SEND_DATA,
MV64XXX_I2C_ACTION_RCV_DATA,
MV64XXX_I2C_ACTION_RCV_DATA_STOP,
MV64XXX_I2C_ACTION_SEND_STOP,
+ MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
};
struct mv64xxx_i2c_regs {
@@ -117,6 +146,9 @@ struct mv64xxx_i2c_data {
spinlock_t lock;
struct i2c_msg *msg;
struct i2c_adapter adapter;
+ bool offload_enabled;
+/* 5us delay in order to avoid repeated start timing violation */
+ bool errata_delay;
};
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -165,6 +197,77 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
}
}
+static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
+{
+ unsigned long data_reg_hi = 0;
+ unsigned long data_reg_lo = 0;
+ unsigned long ctrl_reg;
+ struct i2c_msg *msg = drv_data->msgs;
+
+ drv_data->msg = msg;
+ drv_data->byte_posn = 0;
+ drv_data->bytes_left = msg->len;
+ drv_data->aborting = 0;
+ drv_data->rc = 0;
+ /* Only regular transactions can be offloaded */
+ if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
+ return -EINVAL;
+
+ /* Only 1-8 byte transfers can be offloaded */
+ if (msg->len < 1 || msg->len > 8)
+ return -EINVAL;
+
+ /* Build transaction */
+ ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
+ (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
+
+ if ((msg->flags & I2C_M_TEN) != 0)
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
+
+ if ((msg->flags & I2C_M_RD) == 0) {
+ u8 local_buf[8] = { 0 };
+
+ memcpy(local_buf, msg->buf, msg->len);
+ data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
+ data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
+
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
+ (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
+
+ writel_relaxed(data_reg_lo,
+ drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
+ writel_relaxed(data_reg_hi,
+ drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
+
+ } else {
+ ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
+ (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
+ }
+
+ /* Execute transaction */
+ writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+
+ return 0;
+}
+
+static void
+mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
+{
+ struct i2c_msg *msg = drv_data->msg;
+
+ if (msg->flags & I2C_M_RD) {
+ u32 data_reg_lo = readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_RX_DATA_LO);
+ u32 data_reg_hi = readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_RX_DATA_HI);
+ u8 local_buf[8] = { 0 };
+
+ *((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
+ *((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
+ memcpy(msg->buf, local_buf, msg->len);
+ }
+
+}
/*
*****************************************************************************
*
@@ -177,6 +280,15 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
static void
mv64xxx_i2c_hw_init(struct mv64xxx_i2c_data *drv_data)
{
+ if (drv_data->offload_enabled) {
+ writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+ writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_TIMING);
+ writel(0, drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+ writel(0, drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_MASK);
+ }
+
writel(0, drv_data->reg_base + drv_data->reg_offsets.soft_reset);
writel(MV64XXX_I2C_BAUD_DIV_M(drv_data->freq_m) | MV64XXX_I2C_BAUD_DIV_N(drv_data->freq_n),
drv_data->reg_base + drv_data->reg_offsets.clock);
@@ -283,6 +395,16 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
drv_data->rc = -ENXIO;
break;
+ case MV64XXX_I2C_STATUS_OFFLOAD_OK:
+ if (drv_data->send_stop || drv_data->aborting) {
+ drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
+ drv_data->state = MV64XXX_I2C_STATE_IDLE;
+ } else {
+ drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
+ }
+ break;
+
default:
dev_err(&drv_data->adapter.dev,
"mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, "
@@ -299,19 +421,29 @@ static void
mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
{
switch(drv_data->action) {
+ case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
+ mv64xxx_i2c_update_offload_data(drv_data);
+ writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+ writel(0, drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+ /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_RESTART:
/* We should only get here if we have further messages */
BUG_ON(drv_data->num_msgs == 0);
- drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
- writel(drv_data->cntl_bits,
- drv_data->reg_base + drv_data->reg_offsets.control);
-
drv_data->msgs++;
drv_data->num_msgs--;
+ if (!(drv_data->offload_enabled &&
+ mv64xxx_i2c_offload_msg(drv_data))) {
+ drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
+ writel(drv_data->cntl_bits,
+ drv_data->reg_base + drv_data->reg_offsets.control);
- /* Setup for the next message */
- mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ /* Setup for the next message */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ }
+ if (drv_data->errata_delay)
+ udelay(5);
/*
* We're never at the start of the message here, and by this
@@ -326,6 +458,12 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
+ case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
+ if (!mv64xxx_i2c_offload_msg(drv_data))
+ break;
+ else
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -366,6 +504,9 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
drv_data->reg_base + drv_data->reg_offsets.control);
drv_data->block = 0;
+ if (drv_data->errata_delay)
+ udelay(5);
+
wake_up(&drv_data->waitq);
break;
@@ -375,6 +516,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
"mv64xxx_i2c_do_action: Invalid action: %d\n",
drv_data->action);
drv_data->rc = -EIO;
+
/* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_STOP:
drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN;
@@ -383,6 +525,15 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->block = 0;
wake_up(&drv_data->waitq);
break;
+
+ case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP:
+ mv64xxx_i2c_update_offload_data(drv_data);
+ writel(0, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+ writel(0, drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+ drv_data->block = 0;
+ wake_up(&drv_data->waitq);
+ break;
}
}
@@ -395,6 +546,21 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
irqreturn_t rc = IRQ_NONE;
spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->offload_enabled) {
+ while (readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) {
+ int reg_status = readl(drv_data->reg_base +
+ MV64XXX_I2C_REG_BRIDGE_STATUS);
+ if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
+ status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
+ else
+ status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
+ mv64xxx_i2c_fsm(drv_data, status);
+ mv64xxx_i2c_do_action(drv_data);
+ rc = IRQ_HANDLED;
+ }
+ }
while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
MV64XXX_I2C_REG_CONTROL_IFLG) {
status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
@@ -459,11 +625,15 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
-
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+ if (drv_data->offload_enabled) {
+ drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+ } else {
+ mv64xxx_i2c_prepare_for_io(drv_data, msg);
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+ }
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
@@ -521,6 +691,7 @@ static const struct i2c_algorithm mv64xxx_i2c_algo = {
static const struct of_device_id mv64xxx_i2c_of_match_table[] = {
{ .compatible = "allwinner,sun4i-i2c", .data = &mv64xxx_i2c_regs_sun4i},
{ .compatible = "marvell,mv64xxx-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
+ { .compatible = "marvell,mv78230-i2c", .data = &mv64xxx_i2c_regs_mv64xxx},
{}
};
MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
@@ -601,6 +772,15 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
memcpy(&drv_data->reg_offsets, device->data, sizeof(drv_data->reg_offsets));
+ /*
+ * For controllers embedded in new SoCs activate the
+ * Transaction Generator support and the errata fix.
+ */
+ if (of_device_is_compatible(np, "marvell,mv78230-i2c")) {
+ drv_data->offload_enabled = true;
+ drv_data->errata_delay = true;
+ }
+
out:
return rc;
#endif
@@ -618,7 +798,7 @@ static int
mv64xxx_i2c_probe(struct platform_device *pd)
{
struct mv64xxx_i2c_data *drv_data;
- struct mv64xxx_i2c_pdata *pdata = pd->dev.platform_data;
+ struct mv64xxx_i2c_pdata *pdata = dev_get_platdata(&pd->dev);
struct resource *r;
int rc;
@@ -654,6 +834,7 @@ mv64xxx_i2c_probe(struct platform_device *pd)
drv_data->freq_n = pdata->freq_n;
drv_data->irq = platform_get_irq(pd, 0);
drv_data->adapter.timeout = msecs_to_jiffies(pdata->timeout);
+ drv_data->offload_enabled = false;
memcpy(&drv_data->reg_offsets, &mv64xxx_i2c_regs_mv64xxx, sizeof(drv_data->reg_offsets));
} else if (pd->dev.of_node) {
rc = mv64xxx_of_config(drv_data, &pd->dev);
@@ -689,8 +870,6 @@ mv64xxx_i2c_probe(struct platform_device *pd)
goto exit_free_irq;
}
- of_i2c_register_devices(&drv_data->adapter);
-
return 0;
exit_free_irq:
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index df8ff5aea5b..f4a01675fa7 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,7 +27,6 @@
#include <linux/stmp_device.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_i2c.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -114,18 +113,21 @@ struct mxs_i2c_dev {
uint32_t timing0;
uint32_t timing1;
+ uint32_t timing2;
/* DMA support components */
- struct dma_chan *dmach;
+ struct dma_chan *dmach;
uint32_t pio_data[2];
uint32_t addr_data;
struct scatterlist sg_io[2];
bool dma_read;
};
-static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
+static int mxs_i2c_reset(struct mxs_i2c_dev *i2c)
{
- stmp_reset_block(i2c->regs);
+ int ret = stmp_reset_block(i2c->regs);
+ if (ret)
+ return ret;
/*
* Configure timing for the I2C block. The I2C TIMING2 register has to
@@ -136,9 +138,11 @@ static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
*/
writel(i2c->timing0, i2c->regs + MXS_I2C_TIMING0);
writel(i2c->timing1, i2c->regs + MXS_I2C_TIMING1);
- writel(0x00300030, i2c->regs + MXS_I2C_TIMING2);
+ writel(i2c->timing2, i2c->regs + MXS_I2C_TIMING2);
writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
+
+ return 0;
}
static void mxs_i2c_dma_finish(struct mxs_i2c_dev *i2c)
@@ -475,7 +479,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
int stop)
{
struct mxs_i2c_dev *i2c = i2c_get_adapdata(adap);
- int ret;
+ int ret, err;
int flags;
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
@@ -493,10 +497,13 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
* based on this empirical measurement and a lot of previous frobbing.
*/
i2c->cmd_err = 0;
- if (msg->len < 8) {
+ if (0) { /* disable PIO mode until a proper fix is made */
ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
- if (ret)
- mxs_i2c_reset(i2c);
+ if (ret) {
+ err = mxs_i2c_reset(i2c);
+ if (err)
+ return err;
+ }
} else {
INIT_COMPLETION(i2c->cmd_complete);
ret = mxs_i2c_dma_setup_xfer(adap, msg, flags);
@@ -527,7 +534,10 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
timeout:
dev_dbg(i2c->dev, "Timeout!\n");
mxs_i2c_dma_finish(i2c);
- mxs_i2c_reset(i2c);
+ ret = mxs_i2c_reset(i2c);
+ if (ret)
+ return ret;
+
return -ETIMEDOUT;
}
@@ -577,41 +587,79 @@ static const struct i2c_algorithm mxs_i2c_algo = {
.functionality = mxs_i2c_func,
};
-static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, int speed)
+static void mxs_i2c_derive_timing(struct mxs_i2c_dev *i2c, uint32_t speed)
{
- /* The I2C block clock run at 24MHz */
+ /* The I2C block clock runs at 24MHz */
const uint32_t clk = 24000000;
- uint32_t base;
+ uint32_t divider;
uint16_t high_count, low_count, rcv_count, xmit_count;
+ uint32_t bus_free, leadin;
struct device *dev = i2c->dev;
- if (speed > 540000) {
- dev_warn(dev, "Speed too high (%d Hz), using 540 kHz\n", speed);
- speed = 540000;
- } else if (speed < 12000) {
- dev_warn(dev, "Speed too low (%d Hz), using 12 kHz\n", speed);
- speed = 12000;
+ divider = DIV_ROUND_UP(clk, speed);
+
+ if (divider < 25) {
+ /*
+ * limit the divider, so that min(low_count, high_count)
+ * is >= 1
+ */
+ divider = 25;
+ dev_warn(dev,
+ "Speed too high (%u.%03u kHz), using %u.%03u kHz\n",
+ speed / 1000, speed % 1000,
+ clk / divider / 1000, clk / divider % 1000);
+ } else if (divider > 1897) {
+ /*
+ * limit the divider, so that max(low_count, high_count)
+ * cannot exceed 1023
+ */
+ divider = 1897;
+ dev_warn(dev,
+ "Speed too low (%u.%03u kHz), using %u.%03u kHz\n",
+ speed / 1000, speed % 1000,
+ clk / divider / 1000, clk / divider % 1000);
}
/*
- * The timing derivation algorithm. There is no documentation for this
- * algorithm available, it was derived by using the scope and fiddling
- * with constants until the result observed on the scope was good enough
- * for 20kHz, 50kHz, 100kHz, 200kHz, 300kHz and 400kHz. It should be
- * possible to assume the algorithm works for other frequencies as well.
+ * The I2C spec specifies the following timing data:
+ * standard mode fast mode Bitfield name
+ * tLOW (SCL LOW period) 4700 ns 1300 ns
+ * tHIGH (SCL HIGH period) 4000 ns 600 ns
+ * tSU;DAT (data setup time) 250 ns 100 ns
+ * tHD;STA (START hold time) 4000 ns 600 ns
+ * tBUF (bus free time) 4700 ns 1300 ns
*
- * Note it was necessary to cap the frequency on both ends as it's not
- * possible to configure completely arbitrary frequency for the I2C bus
- * clock.
+ * The hardware (of the i.MX28 at least) seems to add 2 additional
+ * clock cycles to the low_count and 7 cycles to the high_count.
+ * This is compensated for by subtracting the respective constants
+ * from the values written to the timing registers.
*/
- base = ((clk / speed) - 38) / 2;
- high_count = base + 3;
- low_count = base - 3;
- rcv_count = (high_count * 3) / 4;
- xmit_count = low_count / 4;
+ if (speed > 100000) {
+ /* fast mode */
+ low_count = DIV_ROUND_CLOSEST(divider * 13, (13 + 6));
+ high_count = DIV_ROUND_CLOSEST(divider * 6, (13 + 6));
+ leadin = DIV_ROUND_UP(600 * (clk / 1000000), 1000);
+ bus_free = DIV_ROUND_UP(1300 * (clk / 1000000), 1000);
+ } else {
+ /* normal mode */
+ low_count = DIV_ROUND_CLOSEST(divider * 47, (47 + 40));
+ high_count = DIV_ROUND_CLOSEST(divider * 40, (47 + 40));
+ leadin = DIV_ROUND_UP(4700 * (clk / 1000000), 1000);
+ bus_free = DIV_ROUND_UP(4700 * (clk / 1000000), 1000);
+ }
+ rcv_count = high_count * 3 / 8;
+ xmit_count = low_count * 3 / 8;
+
+ dev_dbg(dev,
+ "speed=%u(actual %u) divider=%u low=%u high=%u xmit=%u rcv=%u leadin=%u bus_free=%u\n",
+ speed, clk / divider, divider, low_count, high_count,
+ xmit_count, rcv_count, leadin, bus_free);
+ low_count -= 2;
+ high_count -= 7;
i2c->timing0 = (high_count << 16) | rcv_count;
i2c->timing1 = (low_count << 16) | xmit_count;
+ i2c->timing2 = (bus_free << 16 | leadin);
}
static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c)
@@ -683,7 +731,9 @@ static int mxs_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
/* Do reset to enforce correct startup after pinmuxing */
- mxs_i2c_reset(i2c);
+ err = mxs_i2c_reset(i2c);
+ if (err)
+ return err;
adap = &i2c->adapter;
strlcpy(adap->name, "MXS I2C adapter", sizeof(adap->name));
@@ -701,8 +751,6 @@ static int mxs_i2c_probe(struct platform_device *pdev)
return err;
}
- of_i2c_register_devices(adap);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c
index 512dfe60970..8bf9ac01301 100644
--- a/drivers/i2c/busses/i2c-nomadik.c
+++ b/drivers/i2c/busses/i2c-nomadik.c
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/platform_data/i2c-nomadik.h>
#include <linux/of.h>
-#include <linux/of_i2c.h>
#include <linux/pinctrl/consumer.h>
#define DRIVER_NAME "nmk-i2c"
@@ -943,7 +942,7 @@ static void nmk_i2c_of_probe(struct device_node *np,
static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
{
int ret = 0;
- struct nmk_i2c_controller *pdata = adev->dev.platform_data;
+ struct nmk_i2c_controller *pdata = dev_get_platdata(&adev->dev);
struct device_node *np = adev->dev.of_node;
struct nmk_i2c_dev *dev;
struct i2c_adapter *adap;
@@ -1045,8 +1044,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id)
goto err_add_adap;
}
- of_i2c_register_devices(adap);
-
pm_runtime_put(&adev->dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-nuc900.c b/drivers/i2c/busses/i2c-nuc900.c
index 865ee350adb..36394d737fa 100644
--- a/drivers/i2c/busses/i2c-nuc900.c
+++ b/drivers/i2c/busses/i2c-nuc900.c
@@ -525,7 +525,7 @@ static int nuc900_i2c_probe(struct platform_device *pdev)
struct resource *res;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 0e1f8245e76..c61f37a10a0 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -24,7 +24,6 @@
#include <linux/i2c-ocores.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_i2c.h>
#include <linux/log2.h>
struct ocores_i2c {
@@ -353,10 +352,6 @@ static int ocores_i2c_probe(struct platform_device *pdev)
int ret;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -365,11 +360,12 @@ static int ocores_i2c_probe(struct platform_device *pdev)
if (!i2c)
return -ENOMEM;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(i2c->base))
return PTR_ERR(i2c->base);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
i2c->reg_shift = pdata->reg_shift;
i2c->reg_io_width = pdata->reg_io_width;
@@ -435,8 +431,6 @@ static int ocores_i2c_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
i2c_new_device(&i2c->adap, pdata->devices + i);
- } else {
- of_i2c_register_devices(&i2c->adap);
}
return 0;
@@ -456,7 +450,7 @@ static int ocores_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int ocores_i2c_suspend(struct device *dev)
{
struct ocores_i2c *i2c = dev_get_drvdata(dev);
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 956fe320f31..b929ba271b4 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -15,7 +15,6 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -599,8 +598,6 @@ static int octeon_i2c_probe(struct platform_device *pdev)
}
dev_info(i2c->dev, "version %s\n", DRV_VERSION);
- of_i2c_register_devices(&i2c->adap);
-
return 0;
out:
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 142b694d1c6..6d8308d5dc4 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -38,12 +38,10 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/of.h>
-#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
-#include <linux/pinctrl/consumer.h>
/* I2C controller revisions */
#define OMAP_I2C_OMAP1_REV_2 0x20
@@ -216,8 +214,6 @@ struct omap_i2c_dev {
u16 syscstate;
u16 westate;
u16 errata;
-
- struct pinctrl *pins;
};
static const u8 reg_map_ip_v1[] = {
@@ -618,11 +614,10 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
if (dev->cmd_err & OMAP_I2C_STAT_NACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
return 0;
- if (stop) {
- w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
- w |= OMAP_I2C_CON_STP;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
- }
+
+ w = omap_i2c_read_reg(dev, OMAP_I2C_CON_REG);
+ w |= OMAP_I2C_CON_STP;
+ omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, w);
return -EREMOTEIO;
}
return -EIO;
@@ -1079,7 +1074,7 @@ omap_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem;
const struct omap_i2c_bus_platform_data *pdata =
- pdev->dev.platform_data;
+ dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
int irq;
@@ -1120,16 +1115,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
}
- dev->pins = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(dev->pins)) {
- if (PTR_ERR(dev->pins) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
-
- dev_warn(&pdev->dev, "did not get pins for i2c error: %li\n",
- PTR_ERR(dev->pins));
- dev->pins = NULL;
- }
-
dev->dev = &pdev->dev;
dev->irq = irq;
@@ -1245,8 +1230,6 @@ omap_i2c_probe(struct platform_device *pdev)
dev_info(dev->dev, "bus %d rev%d.%d at %d kHz\n", adap->nr,
major, minor, dev->speed);
- of_i2c_register_devices(adap);
-
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index aa00df14e30..39e2755e3f2 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -136,7 +136,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
struct i2c_pca_pf_data *i2c;
struct resource *res;
struct i2c_pca9564_pf_platform_data *platform_data =
- pdev->dev.platform_data;
+ dev_get_platdata(&pdev->dev);
int ret = 0;
int irq;
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index d05ad590af2..a028617b8f1 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -231,11 +231,11 @@ static int piix4_setup(struct pci_dev *PIIX4_dev,
}
static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
- const struct pci_device_id *id)
+ const struct pci_device_id *id, u8 aux)
{
unsigned short piix4_smba;
unsigned short smba_idx = 0xcd6;
- u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
+ u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
@@ -245,6 +245,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
}
/* Determine the address of the SMBus areas */
+ smb_en = (aux) ? 0x28 : 0x2c;
+
if (!request_region(smba_idx, 2, "smba_idx")) {
dev_err(&PIIX4_dev->dev, "SMBus base address index region "
"0x%x already in use!\n", smba_idx);
@@ -272,6 +274,13 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev,
return -EBUSY;
}
+ /* Aux SMBus does not support IRQ information */
+ if (aux) {
+ dev_info(&PIIX4_dev->dev,
+ "SMBus Host Controller at 0x%x\n", piix4_smba);
+ return piix4_smba;
+ }
+
/* Request the SMBus I2C bus config region */
if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) {
dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region "
@@ -597,7 +606,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD)
/* base address location etc changed in SB800 */
- retval = piix4_setup_sb800(dev, id);
+ retval = piix4_setup_sb800(dev, id, 0);
else
retval = piix4_setup(dev, id);
@@ -611,17 +620,29 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id)
return retval;
/* Check for auxiliary SMBus on some AMD chipsets */
+ retval = -ENODEV;
+
if (dev->vendor == PCI_VENDOR_ID_ATI &&
- dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
- dev->revision < 0x40) {
- retval = piix4_setup_aux(dev, id, 0x58);
- if (retval > 0) {
- /* Try to add the aux adapter if it exists,
- * piix4_add_adapter will clean up if this fails */
- piix4_add_adapter(dev, retval, &piix4_aux_adapter);
+ dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS) {
+ if (dev->revision < 0x40) {
+ retval = piix4_setup_aux(dev, id, 0x58);
+ } else {
+ /* SB800 added aux bus too */
+ retval = piix4_setup_sb800(dev, id, 1);
}
}
+ if (dev->vendor == PCI_VENDOR_ID_AMD &&
+ dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) {
+ retval = piix4_setup_sb800(dev, id, 1);
+ }
+
+ if (retval > 0) {
+ /* Try to add the aux adapter if it exists,
+ * piix4_add_adapter will clean up if this fails */
+ piix4_add_adapter(dev, retval, &piix4_aux_adapter);
+ }
+
return 0;
}
diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c
index 5f39c6d8117..1a9ea25f231 100644
--- a/drivers/i2c/busses/i2c-pnx.c
+++ b/drivers/i2c/busses/i2c-pnx.c
@@ -23,7 +23,6 @@
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/slab.h>
-#include <linux/of_i2c.h>
#define I2C_PNX_TIMEOUT_DEFAULT 10 /* msec */
#define I2C_PNX_SPEED_KHZ_DEFAULT 100
@@ -595,7 +594,7 @@ static struct i2c_algorithm pnx_algorithm = {
.functionality = i2c_pnx_func,
};
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int i2c_pnx_controller_suspend(struct device *dev)
{
struct i2c_pnx_algo_data *alg_data = dev_get_drvdata(dev);
@@ -727,7 +726,8 @@ static int i2c_pnx_probe(struct platform_device *pdev)
alg_data->irq = platform_get_irq(pdev, 0);
if (alg_data->irq < 0) {
dev_err(&pdev->dev, "Failed to get IRQ from platform resource\n");
- goto out_irq;
+ ret = alg_data->irq;
+ goto out_clock;
}
ret = request_irq(alg_data->irq, i2c_pnx_interrupt,
0, pdev->name, alg_data);
@@ -741,8 +741,6 @@ static int i2c_pnx_probe(struct platform_device *pdev)
goto out_irq;
}
- of_i2c_register_devices(&alg_data->adapter);
-
dev_dbg(&pdev->dev, "%s: Master at %#8x, irq %d.\n",
alg_data->adapter.name, res->start, alg_data->irq);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 8dc90da1e6e..37e8cfad625 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -398,7 +398,7 @@ static void i2c_powermac_register_devices(struct i2c_adapter *adap,
static int i2c_powermac_probe(struct platform_device *dev)
{
- struct pmac_i2c_bus *bus = dev->dev.platform_data;
+ struct pmac_i2c_bus *bus = dev_get_platdata(&dev->dev);
struct device_node *parent = NULL;
struct i2c_adapter *adapter;
const char *basename;
@@ -440,22 +440,24 @@ static int i2c_powermac_probe(struct platform_device *dev)
adapter->algo = &i2c_powermac_algorithm;
i2c_set_adapdata(adapter, bus);
adapter->dev.parent = &dev->dev;
- adapter->dev.of_node = dev->dev.of_node;
+
+ /* Clear of_node to skip automatic registration of i2c child nodes */
+ adapter->dev.of_node = NULL;
rc = i2c_add_adapter(adapter);
if (rc) {
printk(KERN_ERR "i2c-powermac: Adapter %s registration "
"failed\n", adapter->name);
memset(adapter, 0, sizeof(*adapter));
+ return rc;
}
printk(KERN_INFO "PowerMac i2c bus %s registered\n", adapter->name);
- /* Cannot use of_i2c_register_devices() due to Apple device-tree
- * funkyness
- */
+ /* Use custom child registration due to Apple device-tree funkyness */
+ adapter->dev.of_node = dev->dev.of_node;
i2c_powermac_register_devices(adapter, bus);
- return rc;
+ return 0;
}
static struct platform_driver i2c_powermac_driver = {
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index 37a84c87c5f..ac80199885b 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -245,7 +245,7 @@ static int puv3_i2c_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int puv3_i2c_suspend(struct device *dev)
{
int poll_count;
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index fbafed29fb8..bbe6dfbc5c0 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -31,7 +31,6 @@
#include <linux/i2c-pxa.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/clk.h>
@@ -110,6 +109,8 @@ MODULE_DEVICE_TABLE(platform, i2c_pxa_id_table);
#define ICR_SADIE (1 << 13) /* slave address detected int enable */
#define ICR_UR (1 << 14) /* unit reset */
#define ICR_FM (1 << 15) /* fast mode */
+#define ICR_HS (1 << 16) /* High Speed mode */
+#define ICR_GPIOEN (1 << 19) /* enable GPIO mode for SCL in HS */
#define ISR_RWM (1 << 0) /* read/write mode */
#define ISR_ACKNAK (1 << 1) /* ack/nak status */
@@ -155,6 +156,10 @@ struct pxa_i2c {
int irq;
unsigned int use_pio :1;
unsigned int fast_mode :1;
+ unsigned int high_mode:1;
+ unsigned char master_code;
+ unsigned long rate;
+ bool highmode_enter;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@@ -459,6 +464,7 @@ static void i2c_pxa_reset(struct pxa_i2c *i2c)
/* set control register values */
writel(I2C_ICR_INIT | (i2c->fast_mode ? ICR_FM : 0), _ICR(i2c));
+ writel(readl(_ICR(i2c)) | (i2c->high_mode ? ICR_HS : 0), _ICR(i2c));
#ifdef CONFIG_I2C_PXA_SLAVE
dev_info(&i2c->adap.dev, "Enabling slave mode\n");
@@ -680,6 +686,34 @@ static int i2c_pxa_pio_set_master(struct pxa_i2c *i2c)
return 0;
}
+/*
+ * PXA I2C send master code
+ * 1. Load master code to IDBR and send it.
+ * Note for HS mode, set ICR [GPIOEN].
+ * 2. Wait until win arbitration.
+ */
+static int i2c_pxa_send_mastercode(struct pxa_i2c *i2c)
+{
+ u32 icr;
+ long timeout;
+
+ spin_lock_irq(&i2c->lock);
+ i2c->highmode_enter = true;
+ writel(i2c->master_code, _IDBR(i2c));
+
+ icr = readl(_ICR(i2c)) & ~(ICR_STOP | ICR_ALDIE);
+ icr |= ICR_GPIOEN | ICR_START | ICR_TB | ICR_ITEIE;
+ writel(icr, _ICR(i2c));
+
+ spin_unlock_irq(&i2c->lock);
+ timeout = wait_event_timeout(i2c->wait,
+ i2c->highmode_enter == false, HZ * 1);
+
+ i2c->highmode_enter = false;
+
+ return (timeout == 0) ? I2C_RETRY : 0;
+}
+
static int i2c_pxa_do_pio_xfer(struct pxa_i2c *i2c,
struct i2c_msg *msg, int num)
{
@@ -743,6 +777,14 @@ static int i2c_pxa_do_xfer(struct pxa_i2c *i2c, struct i2c_msg *msg, int num)
goto out;
}
+ if (i2c->high_mode) {
+ ret = i2c_pxa_send_mastercode(i2c);
+ if (ret) {
+ dev_err(&i2c->adap.dev, "i2c_pxa_send_mastercode timeout\n");
+ goto out;
+ }
+ }
+
spin_lock_irq(&i2c->lock);
i2c->msg = msg;
@@ -990,11 +1032,14 @@ static irqreturn_t i2c_pxa_handler(int this_irq, void *dev_id)
i2c_pxa_slave_txempty(i2c, isr);
if (isr & ISR_IRF)
i2c_pxa_slave_rxfull(i2c, isr);
- } else if (i2c->msg) {
+ } else if (i2c->msg && (!i2c->highmode_enter)) {
if (isr & ISR_ITE)
i2c_pxa_irq_txempty(i2c, isr);
if (isr & ISR_IRF)
i2c_pxa_irq_rxfull(i2c, isr);
+ } else if ((isr & ISR_ITE) && i2c->highmode_enter) {
+ i2c->highmode_enter = false;
+ wake_up(&i2c->wait);
} else {
i2c_pxa_scream_blue_murder(i2c, "spurious irq");
}
@@ -1072,20 +1117,25 @@ static int i2c_pxa_probe_pdata(struct platform_device *pdev,
struct pxa_i2c *i2c,
enum pxa_i2c_types *i2c_types)
{
- struct i2c_pxa_platform_data *plat = pdev->dev.platform_data;
+ struct i2c_pxa_platform_data *plat = dev_get_platdata(&pdev->dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
*i2c_types = id->driver_data;
if (plat) {
i2c->use_pio = plat->use_pio;
i2c->fast_mode = plat->fast_mode;
+ i2c->high_mode = plat->high_mode;
+ i2c->master_code = plat->master_code;
+ if (!i2c->master_code)
+ i2c->master_code = 0xe;
+ i2c->rate = plat->rate;
}
return 0;
}
static int i2c_pxa_probe(struct platform_device *dev)
{
- struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+ struct i2c_pxa_platform_data *plat = dev_get_platdata(&dev->dev);
enum pxa_i2c_types i2c_type;
struct pxa_i2c *i2c;
struct resource *res = NULL;
@@ -1151,6 +1201,7 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->irq = irq;
i2c->slave_addr = I2C_PXA_SLAVE_ADDR;
+ i2c->highmode_enter = false;
if (plat) {
#ifdef CONFIG_I2C_PXA_SLAVE
@@ -1160,6 +1211,16 @@ static int i2c_pxa_probe(struct platform_device *dev)
i2c->adap.class = plat->class;
}
+ if (i2c->high_mode) {
+ if (i2c->rate) {
+ clk_set_rate(i2c->clk, i2c->rate);
+ pr_info("i2c: <%s> set rate to %ld\n",
+ i2c->adap.name, clk_get_rate(i2c->clk));
+ } else
+ pr_warn("i2c: <%s> clock rate not set\n",
+ i2c->adap.name);
+ }
+
clk_prepare_enable(i2c->clk);
if (i2c->use_pio) {
@@ -1185,7 +1246,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
printk(KERN_INFO "I2C: Failed to add bus\n");
goto eadapt;
}
- of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(dev, i2c);
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 0fc58586161..d2fe11da5e8 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -101,6 +101,11 @@ enum {
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
+enum rcar_i2c_type {
+ I2C_RCAR_H1,
+ I2C_RCAR_H2,
+};
+
struct rcar_i2c_priv {
void __iomem *io;
struct i2c_adapter adap;
@@ -113,6 +118,7 @@ struct rcar_i2c_priv {
int irq;
u32 icccr;
u32 flags;
+ enum rcar_i2c_type devtype;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -224,12 +230,25 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
u32 scgd, cdf;
u32 round, ick;
u32 scl;
+ u32 cdf_width;
if (!clkp) {
dev_err(dev, "there is no peripheral_clk\n");
return -EIO;
}
+ switch (priv->devtype) {
+ case I2C_RCAR_H1:
+ cdf_width = 2;
+ break;
+ case I2C_RCAR_H2:
+ cdf_width = 3;
+ break;
+ default:
+ dev_err(dev, "device type error\n");
+ return -EIO;
+ }
+
/*
* calculate SCL clock
* see
@@ -245,7 +264,7 @@ static int rcar_i2c_clock_calculate(struct rcar_i2c_priv *priv,
* clkp : peripheral_clk
* F[] : integer up-valuation
*/
- for (cdf = 0; cdf < 4; cdf++) {
+ for (cdf = 0; cdf < (1 << cdf_width); cdf++) {
ick = clk_get_rate(clkp) / (1 + cdf);
if (ick < 20000000)
goto ick_find;
@@ -287,7 +306,7 @@ scgd_find:
/*
* keep icccr value
*/
- priv->icccr = (scgd << 2 | cdf);
+ priv->icccr = (scgd << (cdf_width) | cdf);
return 0;
}
@@ -615,7 +634,7 @@ static const struct i2c_algorithm rcar_i2c_algo = {
static int rcar_i2c_probe(struct platform_device *pdev)
{
- struct i2c_rcar_platform_data *pdata = pdev->dev.platform_data;
+ struct i2c_rcar_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct rcar_i2c_priv *priv;
struct i2c_adapter *adap;
struct resource *res;
@@ -632,6 +651,9 @@ static int rcar_i2c_probe(struct platform_device *pdev)
bus_speed = 100000; /* default 100 kHz */
if (pdata && pdata->bus_speed)
bus_speed = pdata->bus_speed;
+
+ priv->devtype = platform_get_device_id(pdev)->driver_data;
+
ret = rcar_i2c_clock_calculate(priv, bus_speed, dev);
if (ret < 0)
return ret;
@@ -686,6 +708,14 @@ static int rcar_i2c_remove(struct platform_device *pdev)
return 0;
}
+static struct platform_device_id rcar_i2c_id_table[] = {
+ { "i2c-rcar", I2C_RCAR_H1 },
+ { "i2c-rcar_h1", I2C_RCAR_H1 },
+ { "i2c-rcar_h2", I2C_RCAR_H2 },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, rcar_i2c_id_table);
+
static struct platform_driver rcar_i2c_driver = {
.driver = {
.name = "i2c-rcar",
@@ -693,6 +723,7 @@ static struct platform_driver rcar_i2c_driver = {
},
.probe = rcar_i2c_probe,
.remove = rcar_i2c_remove,
+ .id_table = rcar_i2c_id_table,
};
module_platform_driver(rcar_i2c_driver);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index cab1c91b75a..3535f3c0f7b 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -36,7 +36,6 @@
#include <linux/cpufreq.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
#include <linux/pinctrl/consumer.h>
@@ -1033,7 +1032,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
int ret;
if (!pdev->dev.of_node) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -EINVAL;
@@ -1154,7 +1153,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
return ret;
}
- of_i2c_register_devices(&i2c->adap);
platform_set_drvdata(pdev, i2c);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/i2c/busses/i2c-s6000.c b/drivers/i2c/busses/i2c-s6000.c
index 7c1ca5aca08..dd186a03768 100644
--- a/drivers/i2c/busses/i2c-s6000.c
+++ b/drivers/i2c/busses/i2c-s6000.c
@@ -290,8 +290,9 @@ static int s6i2c_probe(struct platform_device *dev)
clock = 0;
bus_num = -1;
- if (dev->dev.platform_data) {
- struct s6_i2c_platform_data *pdata = dev->dev.platform_data;
+ if (dev_get_platdata(&dev->dev)) {
+ struct s6_i2c_platform_data *pdata =
+ dev_get_platdata(&dev->dev);
bus_num = pdata->bus_num;
clock = pdata->clock;
}
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index 5351a2f3491..5e8f136e233 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -437,7 +437,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev)
struct cami2c *id;
int ret;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "no platform_data!\n");
ret = -ENODEV;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index debf745c026..55110ddbed1 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,7 +27,6 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
-#include <linux/of_i2c.h>
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/clk.h>
@@ -658,7 +657,7 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, int hook)
static int sh_mobile_i2c_probe(struct platform_device *dev)
{
- struct i2c_sh_mobile_platform_data *pdata = dev->dev.platform_data;
+ struct i2c_sh_mobile_platform_data *pdata = dev_get_platdata(&dev->dev);
struct sh_mobile_i2c_data *pd;
struct i2c_adapter *adap;
struct resource *res;
@@ -758,7 +757,6 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
"I2C adapter %d with bus speed %lu Hz (L/H=%x/%x)\n",
adap->nr, pd->bus_speed, pd->iccl, pd->icch);
- of_i2c_register_devices(adap);
return 0;
err_all:
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index a63c7d50683..6784f7f527a 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -12,7 +12,6 @@
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
-#include <linux/of_i2c.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
@@ -65,6 +64,8 @@
#define SIRFSOC_I2C_START BIT(7)
#define SIRFSOC_I2C_DEFAULT_SPEED 100000
+#define SIRFSOC_I2C_ERR_NOACK 1
+#define SIRFSOC_I2C_ERR_TIMEOUT 2
struct sirfsoc_i2c {
void __iomem *base;
@@ -143,14 +144,24 @@ static irqreturn_t i2c_sirfsoc_irq(int irq, void *dev_id)
if (i2c_stat & SIRFSOC_I2C_STAT_ERR) {
/* Error conditions */
- siic->err_status = 1;
+ siic->err_status = SIRFSOC_I2C_ERR_NOACK;
writel(SIRFSOC_I2C_STAT_ERR, siic->base + SIRFSOC_I2C_STATUS);
if (i2c_stat & SIRFSOC_I2C_STAT_NACK)
- dev_err(&siic->adapter.dev, "ACK not received\n");
+ dev_dbg(&siic->adapter.dev, "ACK not received\n");
else
dev_err(&siic->adapter.dev, "I2C error\n");
+ /*
+ * Due to hardware ANOMALY, we need to reset I2C earlier after
+ * we get NOACK while accessing non-existing clients, otherwise
+ * we will get errors even we access existing clients later
+ */
+ writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
+ siic->base + SIRFSOC_I2C_CTRL);
+ while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
+ cpu_relax();
+
complete(&siic->done);
} else if (i2c_stat & SIRFSOC_I2C_STAT_CMD_DONE) {
/* CMD buffer execution complete */
@@ -183,6 +194,10 @@ static void i2c_sirfsoc_set_address(struct sirfsoc_i2c *siic,
if (msg->flags & I2C_M_RD)
addr |= 1;
+ /* Reverse direction bit */
+ if (msg->flags & I2C_M_REV_DIR_ADDR)
+ addr ^= 1;
+
writel(addr, siic->base + SIRFSOC_I2C_CMD(siic->cmd_ptr++));
}
@@ -191,7 +206,6 @@ static int i2c_sirfsoc_xfer_msg(struct sirfsoc_i2c *siic, struct i2c_msg *msg)
u32 regval = readl(siic->base + SIRFSOC_I2C_CTRL);
/* timeout waiting for the xfer to finish or fail */
int timeout = msecs_to_jiffies((msg->len + 1) * 50);
- int ret = 0;
i2c_sirfsoc_set_address(siic, msg);
@@ -200,7 +214,7 @@ static int i2c_sirfsoc_xfer_msg(struct sirfsoc_i2c *siic, struct i2c_msg *msg)
i2c_sirfsoc_queue_cmd(siic);
if (wait_for_completion_timeout(&siic->done, timeout) == 0) {
- siic->err_status = 1;
+ siic->err_status = SIRFSOC_I2C_ERR_TIMEOUT;
dev_err(&siic->adapter.dev, "Transfer timeout\n");
}
@@ -208,16 +222,14 @@ static int i2c_sirfsoc_xfer_msg(struct sirfsoc_i2c *siic, struct i2c_msg *msg)
siic->base + SIRFSOC_I2C_CTRL);
writel(0, siic->base + SIRFSOC_I2C_CMD_START);
- if (siic->err_status) {
+ /* i2c control doesn't response, reset it */
+ if (siic->err_status == SIRFSOC_I2C_ERR_TIMEOUT) {
writel(readl(siic->base + SIRFSOC_I2C_CTRL) | SIRFSOC_I2C_RESET,
siic->base + SIRFSOC_I2C_CTRL);
while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
cpu_relax();
-
- ret = -EIO;
}
-
- return ret;
+ return siic->err_status ? -EAGAIN : 0;
}
static u32 i2c_sirfsoc_func(struct i2c_adapter *adap)
@@ -321,6 +333,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
adap->algo = &i2c_sirfsoc_algo;
adap->algo_data = siic;
+ adap->retries = 3;
adap->dev.of_node = pdev->dev.of_node;
adap->dev.parent = &pdev->dev;
@@ -348,7 +361,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
if (bitrate < 100000)
regval =
- (2 * ctrl_speed) / (2 * bitrate * 11);
+ (2 * ctrl_speed) / (bitrate * 11);
else
regval = ctrl_speed / (bitrate * 5);
@@ -366,8 +379,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
clk_disable(clk);
- of_i2c_register_devices(adap);
-
dev_info(&pdev->dev, " I2C adapter ready to operate\n");
return 0;
@@ -416,6 +427,8 @@ static int i2c_sirfsoc_resume(struct device *dev)
clk_enable(siic->clk);
writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
+ while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
+ cpu_relax();
writel(SIRFSOC_I2C_CORE_EN | SIRFSOC_I2C_MASTER_MODE,
siic->base + SIRFSOC_I2C_CTRL);
writel(siic->clk_div, siic->base + SIRFSOC_I2C_CLK_CTRL);
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index d1a6b204af0..f8f6f2e552d 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -17,7 +17,6 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/of_i2c.h>
/* the name of this kernel module */
#define NAME "stu300"
@@ -884,9 +883,6 @@ stu300_probe(struct platform_device *pdev)
dev->pdev = pdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
dev->virtbase = devm_ioremap_resource(&pdev->dev, res);
dev_dbg(&pdev->dev, "initialize bus device I2C%d on virtual "
"base %p\n", bus_nr, dev->virtbase);
@@ -936,12 +932,11 @@ stu300_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "ST DDC I2C @ %p, irq %d\n",
dev->virtbase, dev->irq);
- of_i2c_register_devices(adap);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int stu300_suspend(struct device *device)
{
struct stu300_dev *dev = dev_get_drvdata(device);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 9aa1b60f7fd..c457cb447c6 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -25,7 +25,6 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/slab.h>
-#include <linux/of_i2c.h>
#include <linux/of_device.h>
#include <linux/module.h>
#include <linux/clk/tegra.h>
@@ -802,8 +801,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
return ret;
}
- of_i2c_register_devices(&i2c_dev->adapter);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 05106368d40..e7d3b755af3 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -54,12 +54,16 @@ static int usb_write(struct i2c_adapter *adapter, int cmd,
static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
{
- unsigned char status;
+ unsigned char *pstatus;
struct i2c_msg *pmsg;
- int i;
+ int i, ret;
dev_dbg(&adapter->dev, "master xfer %d messages:\n", num);
+ pstatus = kmalloc(sizeof(*pstatus), GFP_KERNEL);
+ if (!pstatus)
+ return -ENOMEM;
+
for (i = 0 ; i < num ; i++) {
int cmd = CMD_I2C_IO;
@@ -84,7 +88,8 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure reading data\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
} else {
/* write data */
@@ -93,36 +98,50 @@ static int usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num)
pmsg->buf, pmsg->len) != pmsg->len) {
dev_err(&adapter->dev,
"failure writing data\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
}
/* read status */
- if (usb_read(adapter, CMD_GET_STATUS, 0, 0, &status, 1) != 1) {
+ if (usb_read(adapter, CMD_GET_STATUS, 0, 0, pstatus, 1) != 1) {
dev_err(&adapter->dev, "failure reading status\n");
- return -EREMOTEIO;
+ ret = -EREMOTEIO;
+ goto out;
}
- dev_dbg(&adapter->dev, " status = %d\n", status);
- if (status == STATUS_ADDRESS_NAK)
- return -EREMOTEIO;
+ dev_dbg(&adapter->dev, " status = %d\n", *pstatus);
+ if (*pstatus == STATUS_ADDRESS_NAK) {
+ ret = -EREMOTEIO;
+ goto out;
+ }
}
- return i;
+ ret = i;
+out:
+ kfree(pstatus);
+ return ret;
}
static u32 usb_func(struct i2c_adapter *adapter)
{
- __le32 func;
+ __le32 *pfunc;
+ u32 ret;
+
+ pfunc = kmalloc(sizeof(*pfunc), GFP_KERNEL);
/* get functionality from adapter */
- if (usb_read(adapter, CMD_GET_FUNC, 0, 0, &func, sizeof(func)) !=
- sizeof(func)) {
+ if (!pfunc || usb_read(adapter, CMD_GET_FUNC, 0, 0, pfunc,
+ sizeof(*pfunc)) != sizeof(*pfunc)) {
dev_err(&adapter->dev, "failure reading functionality\n");
- return 0;
+ ret = 0;
+ goto out;
}
- return le32_to_cpu(func);
+ ret = le32_to_cpup(pfunc);
+out:
+ kfree(pfunc);
+ return ret;
}
/* This is the actual algorithm we define */
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f3a8790a07e..6bb3a89a440 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -16,7 +16,6 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/io.h>
-#include <linux/of_i2c.h>
#define I2C_CONTROL 0x00
#define I2C_CONTROLS 0x00
@@ -108,7 +107,6 @@ static int i2c_versatile_probe(struct platform_device *dev)
ret = i2c_bit_add_numbered_bus(&i2c->adap);
if (ret >= 0) {
platform_set_drvdata(dev, i2c);
- of_i2c_register_devices(&i2c->adap);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-wmt.c b/drivers/i2c/busses/i2c-wmt.c
index baaa7d15b73..c65da3d913a 100644
--- a/drivers/i2c/busses/i2c-wmt.c
+++ b/drivers/i2c/busses/i2c-wmt.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_i2c.h>
#include <linux/of_irq.h>
#include <linux/platform_device.h>
@@ -439,8 +438,6 @@ static int wmt_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c_dev);
- of_i2c_register_devices(adap);
-
return 0;
}
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 3d0f0520c1b..4c8b368d463 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,7 +40,6 @@
#include <linux/i2c-xiic.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/of_i2c.h>
#define DRIVER_NAME "xiic-i2c"
@@ -703,7 +702,7 @@ static int xiic_i2c_probe(struct platform_device *pdev)
if (irq < 0)
goto resource_missing;
- pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
+ pdata = (struct xiic_i2c_platform_data *)dev_get_platdata(&pdev->dev);
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
if (!i2c)
@@ -752,8 +751,6 @@ static int xiic_i2c_probe(struct platform_device *pdev)
i2c_new_device(&i2c->adap, pdata->devices + i);
}
- of_i2c_register_devices(&i2c->adap);
-
return 0;
add_adapter_failed:
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index f32ca293ae0..29d3f045a2b 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -23,7 +23,11 @@
SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
Jean Delvare <khali@linux-fr.org>
Mux support by Rodolfo Giometti <giometti@enneenne.com> and
- Michael Lawnick <michael.lawnick.ext@nsn.com> */
+ Michael Lawnick <michael.lawnick.ext@nsn.com>
+ OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
+ (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and
+ (c) 2013 Wolfram Sang <wsa@the-dreams.de>
+ */
#include <linux/module.h>
#include <linux/kernel.h>
@@ -35,7 +39,9 @@
#include <linux/init.h>
#include <linux/idr.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/completion.h>
#include <linux/hardirq.h>
#include <linux/irqflags.h>
@@ -954,6 +960,194 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
up_read(&__i2c_board_lock);
}
+/* OF support code */
+
+#if IS_ENABLED(CONFIG_OF)
+static void of_i2c_register_devices(struct i2c_adapter *adap)
+{
+ void *result;
+ struct device_node *node;
+
+ /* Only register child devices if the adapter has a node pointer set */
+ if (!adap->dev.of_node)
+ return;
+
+ dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
+
+ for_each_available_child_of_node(adap->dev.of_node, node) {
+ struct i2c_board_info info = {};
+ struct dev_archdata dev_ad = {};
+ const __be32 *addr;
+ int len;
+
+ dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
+
+ if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+ dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
+ node->full_name);
+ continue;
+ }
+
+ addr = of_get_property(node, "reg", &len);
+ if (!addr || (len < sizeof(int))) {
+ dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
+ node->full_name);
+ continue;
+ }
+
+ info.addr = be32_to_cpup(addr);
+ if (info.addr > (1 << 10) - 1) {
+ dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
+ info.addr, node->full_name);
+ continue;
+ }
+
+ info.irq = irq_of_parse_and_map(node, 0);
+ info.of_node = of_node_get(node);
+ info.archdata = &dev_ad;
+
+ if (of_get_property(node, "wakeup-source", NULL))
+ info.flags |= I2C_CLIENT_WAKE;
+
+ request_module("%s%s", I2C_MODULE_PREFIX, info.type);
+
+ result = i2c_new_device(adap, &info);
+ if (result == NULL) {
+ dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
+ node->full_name);
+ of_node_put(node);
+ irq_dispose_mapping(info.irq);
+ continue;
+ }
+ }
+}
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, node,
+ of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return i2c_verify_client(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_device_by_node);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&i2c_bus_type, NULL, node,
+ of_dev_node_match);
+ if (!dev)
+ return NULL;
+
+ return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+#else
+static void of_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_OF */
+
+/* ACPI support code */
+
+#if IS_ENABLED(CONFIG_ACPI)
+static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct i2c_board_info *info = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_i2c_serialbus *sb;
+
+ sb = &ares->data.i2c_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+ info->addr = sb->slave_address;
+ if (sb->access_mode == ACPI_I2C_10BIT_MODE)
+ info->flags |= I2C_CLIENT_TEN;
+ }
+ } else if (info->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ info->irq = r.start;
+ }
+
+ /* Tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_i2c_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct i2c_adapter *adapter = data;
+ struct list_head resource_list;
+ struct i2c_board_info info;
+ struct acpi_device *adev;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ memset(&info, 0, sizeof(info));
+ info.acpi_node.handle = handle;
+ info.irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_i2c_add_resource, &info);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !info.addr)
+ return AE_OK;
+
+ strlcpy(info.type, dev_name(&adev->dev), sizeof(info.type));
+ if (!i2c_new_device(adapter, &info)) {
+ dev_err(&adapter->dev,
+ "failed to add I2C device %s from ACPI\n",
+ dev_name(&adev->dev));
+ }
+
+ return AE_OK;
+}
+
+/**
+ * acpi_i2c_register_devices - enumerate I2C slave devices behind adapter
+ * @adap: pointer to adapter
+ *
+ * Enumerate all I2C slave devices behind this adapter by walking the ACPI
+ * namespace. When a device is found it will be added to the Linux device
+ * model and bound to the corresponding ACPI handle.
+ */
+static void acpi_i2c_register_devices(struct i2c_adapter *adap)
+{
+ acpi_handle handle;
+ acpi_status status;
+
+ handle = ACPI_HANDLE(adap->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_i2c_add_device, NULL,
+ adap, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&adap->dev, "failed to enumerate I2C slaves\n");
+}
+#else
+static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {}
+#endif /* CONFIG_ACPI */
+
static int i2c_do_add_adapter(struct i2c_driver *driver,
struct i2c_adapter *adap)
{
@@ -1058,6 +1252,9 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
exit_recovery:
/* create pre-declared device nodes */
+ of_i2c_register_devices(adap);
+ acpi_i2c_register_devices(adap);
+
if (adap->nr < __i2c_first_dynamic_bus_num)
i2c_scan_static_board_info(adap);
@@ -1282,7 +1479,6 @@ void i2c_del_adapter(struct i2c_adapter *adap)
}
EXPORT_SYMBOL(i2c_del_adapter);
-
/* ------------------------------------------------------------------------- */
int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *))
@@ -1665,7 +1861,8 @@ static int i2c_default_probe(struct i2c_adapter *adap, unsigned short addr)
err = i2c_smbus_xfer(adap, addr, 0, I2C_SMBUS_READ, 0,
I2C_SMBUS_BYTE, &dummy);
else {
- dev_warn(&adap->dev, "No suitable probing method supported\n");
+ dev_warn(&adap->dev, "No suitable probing method supported for address 0x%02X\n",
+ addr);
err = -EOPNOTSUPP;
}
@@ -1825,7 +2022,8 @@ EXPORT_SYMBOL(i2c_get_adapter);
void i2c_put_adapter(struct i2c_adapter *adap)
{
- module_put(adap->owner);
+ if (adap)
+ module_put(adap->owner);
}
EXPORT_SYMBOL(i2c_put_adapter);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 7409ebb33c4..797e3117bef 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -25,7 +25,6 @@
#include <linux/i2c.h>
#include <linux/i2c-mux.h>
#include <linux/of.h>
-#include <linux/of_i2c.h>
/* multiplexer per channel data */
struct i2c_mux_priv {
@@ -185,8 +184,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
i2c_adapter_id(&priv->adap));
- of_i2c_register_devices(&priv->adap);
-
return &priv->adap;
}
EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index 92cdd2323b0..44d4c6071c1 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -137,7 +137,7 @@ static irqreturn_t smbalert_irq(int irq, void *d)
static int smbalert_probe(struct i2c_client *ara,
const struct i2c_device_id *id)
{
- struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
+ struct i2c_smbus_alert_setup *setup = dev_get_platdata(&ara->dev);
struct i2c_smbus_alert *alert;
struct i2c_adapter *adapter = ara->adapter;
int res;
diff --git a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
index 210b6f7b902..74b41ae690f 100644
--- a/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
+++ b/drivers/i2c/muxes/i2c-arb-gpio-challenge.c
@@ -21,7 +21,6 @@
#include <linux/i2c-mux.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -131,7 +130,7 @@ static int i2c_arbitrator_probe(struct platform_device *pdev)
dev_err(dev, "Cannot find device tree node\n");
return -ENODEV;
}
- if (dev->platform_data) {
+ if (dev_get_platdata(dev)) {
dev_err(dev, "Platform data is not supported\n");
return -EINVAL;
}
diff --git a/drivers/i2c/muxes/i2c-mux-gpio.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index 5a0ce0081dc..5d4a99ba743 100644
--- a/drivers/i2c/muxes/i2c-mux-gpio.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/gpio.h>
-#include <linux/of_i2c.h>
#include <linux/of_gpio.h>
struct gpiomux {
@@ -148,12 +147,14 @@ static int i2c_mux_gpio_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mux);
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
ret = i2c_mux_gpio_probe_dt(mux, pdev);
if (ret < 0)
return ret;
- } else
- memcpy(&mux->data, pdev->dev.platform_data, sizeof(mux->data));
+ } else {
+ memcpy(&mux->data, dev_get_platdata(&pdev->dev),
+ sizeof(mux->data));
+ }
/*
* If a GPIO chip name is provided, the GPIO pin numbers provided are
diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index 966a18a5d12..c4f08ad3118 100644
--- a/drivers/i2c/muxes/i2c-mux-pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -324,7 +324,7 @@ static int pca9541_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = client->adapter;
- struct pca954x_platform_data *pdata = client->dev.platform_data;
+ struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
struct pca9541 *data;
int force;
int ret = -ENODEV;
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index a531d801dbe..bad5b84a598 100644
--- a/drivers/i2c/muxes/i2c-mux-pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -185,7 +185,7 @@ static int pca954x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
- struct pca954x_platform_data *pdata = client->dev.platform_data;
+ struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
int num, force, class;
struct pca954x *data;
int ret = -ENODEV;
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
index a43c0ce5e3d..69a91732ae6 100644
--- a/drivers/i2c/muxes/i2c-mux-pinctrl.c
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -20,7 +20,6 @@
#include <linux/i2c-mux.h>
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
#include <linux/pinctrl/consumer.h>
#include <linux/i2c-mux-pinctrl.h>
#include <linux/platform_device.h>
@@ -145,7 +144,7 @@ static int i2c_mux_pinctrl_probe(struct platform_device *pdev)
mux->dev = &pdev->dev;
- mux->pdata = pdev->dev.platform_data;
+ mux->pdata = dev_get_platdata(&pdev->dev);
if (!mux->pdata) {
ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
if (ret < 0)
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 0a8440ae056..97a2f9dc75d 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -132,7 +132,7 @@ static int __init amiga_gayle_ide_probe(struct platform_device *pdev)
if (!request_mem_region(res->start, resource_size(res), "IDE"))
return -EBUSY;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
pr_info("ide: Gayle IDE controller (A%u style%s)\n",
pdata->explicit_ack ? 1200 : 4000,
ide_doubler ? ", IDE doubler" : "");
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index f1a6796b165..140c8ef5052 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -520,11 +520,12 @@ void ide_acpi_set_state(ide_hwif_t *hwif, int on)
ide_port_for_each_present_dev(i, drive, hwif) {
if (drive->acpidata->obj_handle)
acpi_bus_set_power(drive->acpidata->obj_handle,
- on ? ACPI_STATE_D0 : ACPI_STATE_D3);
+ on ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
}
if (!on)
- acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3);
+ acpi_bus_set_power(hwif->acpidata->obj_handle,
+ ACPI_STATE_D3_COLD);
}
/**
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c
index 4d19eb9772a..6233fa2cb8a 100644
--- a/drivers/ide/ide-ioctls.c
+++ b/drivers/ide/ide-ioctls.c
@@ -141,8 +141,8 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
if (args[0] == ATA_CMD_SMART) {
tf->nsect = args[3];
tf->lbal = args[1];
- tf->lbam = 0x4f;
- tf->lbah = 0xc2;
+ tf->lbam = ATA_SMART_LBAM_PASS;
+ tf->lbah = ATA_SMART_LBAH_PASS;
cmd.valid.out.tf = IDE_VALID_OUT_TF;
cmd.valid.in.tf = IDE_VALID_NSECT;
} else {
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c
index 6ab9ab2a508..f41558a0bcd 100644
--- a/drivers/ide/ide-park.c
+++ b/drivers/ide/ide-park.c
@@ -116,8 +116,10 @@ ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
long int input;
int rc;
- rc = strict_strtol(buf, 10, &input);
- if (rc || input < -2)
+ rc = kstrtol(buf, 10, &input);
+ if (rc)
+ return rc;
+ if (input < -2)
return -EINVAL;
if (input > MAX_PARK_TIMEOUT) {
input = MAX_PARK_TIMEOUT;
diff --git a/drivers/ide/ide_platform.c b/drivers/ide/ide_platform.c
index ba4bfbead24..a8b4b6af80e 100644
--- a/drivers/ide/ide_platform.c
+++ b/drivers/ide/ide_platform.c
@@ -56,7 +56,7 @@ static int plat_ide_probe(struct platform_device *pdev)
struct ide_hw hw, *hws[] = { &hw };
struct ide_port_info d = platform_ide_port_info;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* get a pointer to the register memory */
res_base = platform_get_resource(pdev, IORESOURCE_IO, 0);
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index 6107cc4ee01..ba20d18c037 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -191,7 +191,7 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int is_slave = drive->dn & 1;
- void __iomem *base = (void *)hwif->dma_base;
+ void __iomem *base = (void __iomem *)hwif->dma_base;
const u8 xferspeed = drive->dma_mode;
if (xferspeed >= XFER_UDMA_0) {
@@ -209,7 +209,7 @@ static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
unsigned int cycle_time;
int is_slave = drive->dn & 1;
ide_drive_t *mate;
- void __iomem *base = (void *)hwif->dma_base;
+ void __iomem *base = (void __iomem *)hwif->dma_base;
const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index a5ca179a83b..63761db6138 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -600,7 +600,7 @@ out:
return ret;
}
-int ioc4_ide_attach_one(struct ioc4_driver_data *idd)
+static int ioc4_ide_attach_one(struct ioc4_driver_data *idd)
{
/*
* PCI-RT does not bring out IDE connection.
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index ede8575ac7d..68edd4f58a2 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -58,7 +58,7 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct tx4938ide_platform_info *pdata = hwif->dev->platform_data;
+ struct tx4938ide_platform_info *pdata = dev_get_platdata(hwif->dev);
u8 safe = drive->pio_mode - XFER_PIO_0;
ide_drive_t *pair;
@@ -132,7 +132,7 @@ static int __init tx4938ide_probe(struct platform_device *pdev)
struct ide_hw hw, *hws[] = { &hw };
struct ide_host *host;
struct resource *res;
- struct tx4938ide_platform_info *pdata = pdev->dev.platform_data;
+ struct tx4938ide_platform_info *pdata = dev_get_platdata(&pdev->dev);
int irq, ret, i;
unsigned long mapbase, mapctl;
struct ide_port_info d = tx4938ide_port_info;
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 9af763a90d9..cbea3271c1b 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -23,15 +23,14 @@ if IIO_BUFFER
config IIO_BUFFER_CB
boolean "IIO callback buffer used for push in-kernel interfaces"
help
- Should be selected by any drivers that do-inkernel push
+ Should be selected by any drivers that do in-kernel push
usage. That is, those where the data is pushed to the consumer.
config IIO_KFIFO_BUF
select IIO_TRIGGER
tristate "Industrial I/O buffering based on kfifo"
help
- A simple fifo based on kfifo. Use this if you want a fifo
- rather than a ring buffer. Note that this currently provides
+ A simple fifo based on kfifo. Note that this currently provides
no buffer events so it is up to userspace to work out how
often to read from the buffer.
@@ -49,7 +48,7 @@ config IIO_TRIGGER
help
Provides IIO core support for triggers. Currently these
are used to initialize capture of samples to push into
- ring buffers. The triggers are effectively a 'capture
+ buffers. The triggers are effectively a 'capture
data now' interrupt.
config IIO_CONSUMERS_PER_TRIGGER
@@ -74,5 +73,6 @@ if IIO_TRIGGER
source "drivers/iio/trigger/Kconfig"
endif #IIO_TRIGGER
source "drivers/iio/pressure/Kconfig"
+source "drivers/iio/temperature/Kconfig"
endif # IIO
diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile
index 7a3866c2d2a..bcf7e9e3b05 100644
--- a/drivers/iio/Makefile
+++ b/drivers/iio/Makefile
@@ -21,5 +21,6 @@ obj-y += frequency/
obj-y += imu/
obj-y += light/
obj-y += magnetometer/
-obj-y += trigger/
obj-y += pressure/
+obj-y += temperature/
+obj-y += trigger/
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index 719d83fe51d..e23e5085065 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -1,8 +1,22 @@
#
# Accelerometer drivers
#
+# When adding new entries keep the list in alphabetical order
+
menu "Accelerometers"
+config BMA180
+ tristate "Bosch BMA180 3-Axis Accelerometer Driver"
+ depends on I2C
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ help
+ Say Y here if you want to build a driver for the Bosch BMA180
+ triaxial acceleration sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called bma180.
+
config HID_SENSOR_ACCEL_3D
depends on HID_SENSOR_HUB
select IIO_BUFFER
@@ -14,13 +28,6 @@ config HID_SENSOR_ACCEL_3D
Say yes here to build support for the HID SENSOR
accelerometers 3D.
-config KXSD9
- tristate "Kionix KXSD9 Accelerometer Driver"
- depends on SPI
- help
- Say yes here to build support for the Kionix KXSD9 accelerometer.
- Currently this only supports the device via an SPI interface.
-
config IIO_ST_ACCEL_3AXIS
tristate "STMicroelectronics accelerometers 3-Axis Driver"
depends on (I2C || SPI_MASTER) && SYSFS
@@ -33,8 +40,8 @@ config IIO_ST_ACCEL_3AXIS
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
LIS331DLH, LSM303DL, LSM303DLM, LSM330.
- This driver can also be built as a module. If so, will be created
- these modules:
+ This driver can also be built as a module. If so, these modules
+ will be created:
- st_accel (core functions for the driver [it is mandatory]);
- st_accel_i2c (necessary for the I2C devices [optional*]);
- st_accel_spi (necessary for the SPI devices [optional*]);
@@ -51,4 +58,11 @@ config IIO_ST_ACCEL_SPI_3AXIS
depends on IIO_ST_ACCEL_3AXIS
depends on IIO_ST_SENSORS_SPI
+config KXSD9
+ tristate "Kionix KXSD9 Accelerometer Driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Kionix KXSD9 accelerometer.
+ Currently this only supports the device via an SPI interface.
+
endmenu
diff --git a/drivers/iio/accel/Makefile b/drivers/iio/accel/Makefile
index 87d8fa26489..c48d15f2561 100644
--- a/drivers/iio/accel/Makefile
+++ b/drivers/iio/accel/Makefile
@@ -2,7 +2,10 @@
# Makefile for industrial I/O accelerometer drivers
#
+# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_BMA180) += bma180.o
obj-$(CONFIG_HID_SENSOR_ACCEL_3D) += hid-sensor-accel-3d.o
+obj-$(CONFIG_KXSD9) += kxsd9.o
obj-$(CONFIG_IIO_ST_ACCEL_3AXIS) += st_accel.o
st_accel-y := st_accel_core.o
@@ -10,5 +13,3 @@ st_accel-$(CONFIG_IIO_BUFFER) += st_accel_buffer.o
obj-$(CONFIG_IIO_ST_ACCEL_I2C_3AXIS) += st_accel_i2c.o
obj-$(CONFIG_IIO_ST_ACCEL_SPI_3AXIS) += st_accel_spi.o
-
-obj-$(CONFIG_KXSD9) += kxsd9.o
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
new file mode 100644
index 00000000000..12e32e6b410
--- /dev/null
+++ b/drivers/iio/accel/bma180.c
@@ -0,0 +1,676 @@
+/*
+ * bma180.c - IIO driver for Bosch BMA180 triaxial acceleration sensor
+ *
+ * Copyright 2013 Oleksandr Kravchenko <x0199363@ti.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define BMA180_DRV_NAME "bma180"
+#define BMA180_IRQ_NAME "bma180_event"
+
+/* Register set */
+#define BMA180_CHIP_ID 0x00 /* Need to distinguish BMA180 from other */
+#define BMA180_ACC_X_LSB 0x02 /* First of 6 registers of accel data */
+#define BMA180_CTRL_REG0 0x0d
+#define BMA180_RESET 0x10
+#define BMA180_BW_TCS 0x20
+#define BMA180_CTRL_REG3 0x21
+#define BMA180_TCO_Z 0x30
+#define BMA180_OFFSET_LSB1 0x35
+
+/* BMA180_CTRL_REG0 bits */
+#define BMA180_DIS_WAKE_UP BIT(0) /* Disable wake up mode */
+#define BMA180_SLEEP BIT(1) /* 1 - chip will sleep */
+#define BMA180_EE_W BIT(4) /* Unlock writing to addr from 0x20 */
+#define BMA180_RESET_INT BIT(6) /* Reset pending interrupts */
+
+/* BMA180_CTRL_REG3 bits */
+#define BMA180_NEW_DATA_INT BIT(1) /* Intr every new accel data is ready */
+
+/* BMA180_OFFSET_LSB1 skipping mode bit */
+#define BMA180_SMP_SKIP BIT(0)
+
+/* Bit masks for registers bit fields */
+#define BMA180_RANGE 0x0e /* Range of measured accel values*/
+#define BMA180_BW 0xf0 /* Accel bandwidth */
+#define BMA180_MODE_CONFIG 0x03 /* Config operation modes */
+
+/* We have to write this value in reset register to do soft reset */
+#define BMA180_RESET_VAL 0xb6
+
+#define BMA_180_ID_REG_VAL 0x03
+
+/* Chip power modes */
+#define BMA180_LOW_NOISE 0x00
+#define BMA180_LOW_POWER 0x03
+
+#define BMA180_LOW_NOISE_STR "low_noise"
+#define BMA180_LOW_POWER_STR "low_power"
+
+/* Defaults values */
+#define BMA180_DEF_PMODE 0
+#define BMA180_DEF_BW 20
+#define BMA180_DEF_SCALE 250
+
+/* Available values for sysfs */
+#define BMA180_FLP_FREQ_AVAILABLE \
+ "10 20 40 75 150 300"
+#define BMA180_SCALE_AVAILABLE \
+ "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+
+struct bma180_data {
+ struct i2c_client *client;
+ struct iio_trigger *trig;
+ struct mutex mutex;
+ int sleep_state;
+ int scale;
+ int bw;
+ int pmode;
+ char *buff;
+};
+
+enum bma180_axis {
+ AXIS_X,
+ AXIS_Y,
+ AXIS_Z,
+};
+
+static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
+static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+
+static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
+{
+ u8 reg = BMA180_ACC_X_LSB + axis * 2;
+ int ret;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ ret = i2c_smbus_read_word_data(data->client, reg);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "failed to read accel_%c registers\n", 'x' + axis);
+
+ return ret;
+}
+
+static int bma180_set_bits(struct bma180_data *data, u8 reg, u8 mask, u8 val)
+{
+ int ret = i2c_smbus_read_byte_data(data->client, reg);
+ u8 reg_val = (ret & ~mask) | (val << (ffs(mask) - 1));
+
+ if (ret < 0)
+ return ret;
+
+ return i2c_smbus_write_byte_data(data->client, reg, reg_val);
+}
+
+static int bma180_reset_intr(struct bma180_data *data)
+{
+ int ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_RESET_INT, 1);
+
+ if (ret)
+ dev_err(&data->client->dev, "failed to reset interrupt\n");
+
+ return ret;
+}
+
+static int bma180_set_new_data_intr_state(struct bma180_data *data, int state)
+{
+ u8 reg_val = state ? BMA180_NEW_DATA_INT : 0x00;
+ int ret = i2c_smbus_write_byte_data(data->client, BMA180_CTRL_REG3,
+ reg_val);
+
+ if (ret)
+ goto err;
+ ret = bma180_reset_intr(data);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev,
+ "failed to set new data interrupt state %d\n", state);
+ return ret;
+}
+
+static int bma180_set_sleep_state(struct bma180_data *data, int state)
+{
+ int ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_SLEEP, state);
+
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set sleep state %d\n", state);
+ return ret;
+ }
+ data->sleep_state = state;
+
+ return 0;
+}
+
+static int bma180_set_ee_writing_state(struct bma180_data *data, int state)
+{
+ int ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_EE_W, state);
+
+ if (ret)
+ dev_err(&data->client->dev,
+ "failed to set ee writing state %d\n", state);
+
+ return ret;
+}
+
+static int bma180_set_bw(struct bma180_data *data, int val)
+{
+ int ret, i;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(bw_table); ++i) {
+ if (bw_table[i] == val) {
+ ret = bma180_set_bits(data,
+ BMA180_BW_TCS, BMA180_BW, i);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set bandwidth\n");
+ return ret;
+ }
+ data->bw = val;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int bma180_set_scale(struct bma180_data *data, int val)
+{
+ int ret, i;
+
+ if (data->sleep_state)
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(scale_table); ++i)
+ if (scale_table[i] == val) {
+ ret = bma180_set_bits(data,
+ BMA180_OFFSET_LSB1, BMA180_RANGE, i);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set scale\n");
+ return ret;
+ }
+ data->scale = val;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bma180_set_pmode(struct bma180_data *data, int mode)
+{
+ u8 reg_val = mode ? BMA180_LOW_POWER : BMA180_LOW_NOISE;
+ int ret = bma180_set_bits(data, BMA180_TCO_Z, BMA180_MODE_CONFIG,
+ reg_val);
+
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set power mode\n");
+ return ret;
+ }
+ data->pmode = mode;
+
+ return 0;
+}
+
+static int bma180_soft_reset(struct bma180_data *data)
+{
+ int ret = i2c_smbus_write_byte_data(data->client,
+ BMA180_RESET, BMA180_RESET_VAL);
+
+ if (ret)
+ dev_err(&data->client->dev, "failed to reset the chip\n");
+
+ return ret;
+}
+
+static int bma180_chip_init(struct bma180_data *data)
+{
+ /* Try to read chip_id register. It must return 0x03. */
+ int ret = i2c_smbus_read_byte_data(data->client, BMA180_CHIP_ID);
+
+ if (ret < 0)
+ goto err;
+ if (ret != BMA_180_ID_REG_VAL) {
+ ret = -ENODEV;
+ goto err;
+ }
+
+ ret = bma180_soft_reset(data);
+ if (ret)
+ goto err;
+ /*
+ * No serial transaction should occur within minimum 10 us
+ * after soft_reset command
+ */
+ msleep(20);
+
+ ret = bma180_set_bits(data, BMA180_CTRL_REG0, BMA180_DIS_WAKE_UP, 1);
+ if (ret)
+ goto err;
+ ret = bma180_set_ee_writing_state(data, 1);
+ if (ret)
+ goto err;
+ ret = bma180_set_new_data_intr_state(data, 0);
+ if (ret)
+ goto err;
+ ret = bma180_set_bits(data, BMA180_OFFSET_LSB1, BMA180_SMP_SKIP, 1);
+ if (ret)
+ goto err;
+ ret = bma180_set_pmode(data, BMA180_DEF_PMODE);
+ if (ret)
+ goto err;
+ ret = bma180_set_bw(data, BMA180_DEF_BW);
+ if (ret)
+ goto err;
+ ret = bma180_set_scale(data, BMA180_DEF_SCALE);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to init the chip\n");
+ return ret;
+}
+
+static void bma180_chip_disable(struct bma180_data *data)
+{
+ if (bma180_set_new_data_intr_state(data, 0))
+ goto err;
+ if (bma180_set_ee_writing_state(data, 0))
+ goto err;
+ if (bma180_set_sleep_state(data, 1))
+ goto err;
+
+ return;
+
+err:
+ dev_err(&data->client->dev, "failed to disable the chip\n");
+}
+
+static IIO_CONST_ATTR(in_accel_filter_low_pass_3db_frequency_available,
+ BMA180_FLP_FREQ_AVAILABLE);
+static IIO_CONST_ATTR(in_accel_scale_available, BMA180_SCALE_AVAILABLE);
+
+static struct attribute *bma180_attributes[] = {
+ &iio_const_attr_in_accel_filter_low_pass_3db_frequency_available.dev_attr.attr,
+ &iio_const_attr_in_accel_scale_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bma180_attrs_group = {
+ .attrs = bma180_attributes,
+};
+
+static int bma180_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->mutex);
+ if (iio_buffer_enabled(indio_dev))
+ ret = -EBUSY;
+ else
+ ret = bma180_get_acc_reg(data, chan->scan_index);
+ mutex_unlock(&data->mutex);
+ if (ret < 0)
+ return ret;
+ *val = (s16)ret >> chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ *val = data->bw;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = data->scale;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma180_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ if (val)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ ret = bma180_set_scale(data, val2);
+ mutex_unlock(&data->mutex);
+ return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&data->mutex);
+ ret = bma180_set_bw(data, val);
+ mutex_unlock(&data->mutex);
+ return ret;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int bma180_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ if (data->buff)
+ devm_kfree(&indio_dev->dev, data->buff);
+ data->buff = devm_kzalloc(&indio_dev->dev,
+ indio_dev->scan_bytes, GFP_KERNEL);
+ if (!data->buff)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static const struct iio_info bma180_info = {
+ .attrs = &bma180_attrs_group,
+ .read_raw = bma180_read_raw,
+ .write_raw = bma180_write_raw,
+ .update_scan_mode = bma180_update_scan_mode,
+ .driver_module = THIS_MODULE,
+};
+
+static const char * const bma180_power_modes[] = {
+ BMA180_LOW_NOISE_STR,
+ BMA180_LOW_POWER_STR,
+};
+
+static int bma180_get_power_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return data->pmode;
+}
+
+static int bma180_set_power_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned int mode)
+{
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_pmode(data, mode);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_enum bma180_power_mode_enum = {
+ .items = bma180_power_modes,
+ .num_items = ARRAY_SIZE(bma180_power_modes),
+ .get = bma180_get_power_mode,
+ .set = bma180_set_power_mode,
+};
+
+static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
+ IIO_ENUM("power_mode", true, &bma180_power_mode_enum),
+ IIO_ENUM_AVAILABLE("power_mode", &bma180_power_mode_enum),
+ { },
+};
+
+#define BMA180_CHANNEL(_index) { \
+ .type = IIO_ACCEL, \
+ .indexed = 1, \
+ .channel = (_index), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
+ BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .scan_index = (_index), \
+ .scan_type = IIO_ST('s', 14, 16, 2), \
+ .ext_info = bma180_ext_info, \
+}
+
+static const struct iio_chan_spec bma180_channels[] = {
+ BMA180_CHANNEL(AXIS_X),
+ BMA180_CHANNEL(AXIS_Y),
+ BMA180_CHANNEL(AXIS_Z),
+ IIO_CHAN_SOFT_TIMESTAMP(4),
+};
+
+static irqreturn_t bma180_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bma180_data *data = iio_priv(indio_dev);
+ int bit, ret, i = 0;
+
+ mutex_lock(&data->mutex);
+ if (indio_dev->scan_timestamp) {
+ ret = indio_dev->scan_bytes / sizeof(s64) - 1;
+ ((s64 *)data->buff)[ret] = iio_get_time_ns();
+ }
+
+ for_each_set_bit(bit, indio_dev->buffer->scan_mask,
+ indio_dev->masklength) {
+ ret = bma180_get_acc_reg(data, bit);
+ if (ret < 0) {
+ mutex_unlock(&data->mutex);
+ goto err;
+ }
+ ((s16 *)data->buff)[i++] = ret;
+ }
+ mutex_unlock(&data->mutex);
+
+ iio_push_to_buffers(indio_dev, (u8 *)data->buff);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+
+ return IRQ_HANDLED;
+}
+
+static int bma180_data_rdy_trigger_set_state(struct iio_trigger *trig,
+ bool state)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return bma180_set_new_data_intr_state(data, state);
+}
+
+static int bma180_trig_try_reen(struct iio_trigger *trig)
+{
+ struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ return bma180_reset_intr(data);
+}
+
+static const struct iio_trigger_ops bma180_trigger_ops = {
+ .set_trigger_state = bma180_data_rdy_trigger_set_state,
+ .try_reenable = bma180_trig_try_reen,
+ .owner = THIS_MODULE,
+};
+
+static int bma180_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct bma180_data *data;
+ struct iio_dev *indio_dev;
+ struct iio_trigger *trig;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ ret = bma180_chip_init(data);
+ if (ret < 0)
+ goto err_chip_disable;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = bma180_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bma180_channels);
+ indio_dev->name = BMA180_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bma180_info;
+
+ trig = iio_trigger_alloc("%s-dev%d", indio_dev->name, indio_dev->id);
+ if (!trig) {
+ ret = -ENOMEM;
+ goto err_chip_disable;
+ }
+
+ ret = devm_request_irq(&client->dev, client->irq,
+ iio_trigger_generic_data_rdy_poll,
+ IRQF_TRIGGER_RISING, BMA180_IRQ_NAME, trig);
+ if (ret) {
+ dev_err(&client->dev, "unable to request IRQ\n");
+ goto err_trigger_free;
+ }
+
+ trig->dev.parent = &client->dev;
+ trig->ops = &bma180_trigger_ops;
+ iio_trigger_set_drvdata(trig, indio_dev);
+ data->trig = trig;
+ indio_dev->trig = trig;
+
+ ret = iio_trigger_register(trig);
+ if (ret)
+ goto err_trigger_free;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ bma180_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to setup iio triggered buffer\n");
+ goto err_trigger_unregister;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to register iio device\n");
+ goto err_buffer_cleanup;
+ }
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+err_trigger_unregister:
+ iio_trigger_unregister(trig);
+err_trigger_free:
+ iio_trigger_free(trig);
+err_chip_disable:
+ bma180_chip_disable(data);
+
+ return ret;
+}
+
+static int bma180_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bma180_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ iio_trigger_unregister(data->trig);
+ iio_trigger_free(data->trig);
+
+ mutex_lock(&data->mutex);
+ bma180_chip_disable(data);
+ mutex_unlock(&data->mutex);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bma180_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_sleep_state(data, 1);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int bma180_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct bma180_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = bma180_set_sleep_state(data, 0);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(bma180_pm_ops, bma180_suspend, bma180_resume);
+#define BMA180_PM_OPS (&bma180_pm_ops)
+#else
+#define BMA180_PM_OPS NULL
+#endif
+
+static struct i2c_device_id bma180_id[] = {
+ { BMA180_DRV_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, bma180_id);
+
+static struct i2c_driver bma180_driver = {
+ .driver = {
+ .name = BMA180_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = BMA180_PM_OPS,
+ },
+ .probe = bma180_probe,
+ .remove = bma180_remove,
+ .id_table = bma180_id,
+};
+
+module_i2c_driver(bma180_driver);
+
+MODULE_AUTHOR("Kravchenko Oleksandr <x0199363@ti.com>");
+MODULE_AUTHOR("Texas Instruments, Inc.");
+MODULE_DESCRIPTION("Bosch BMA180 triaxial acceleration sensor");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index bbcbd7101f3..46d22f3fb1a 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -30,10 +30,6 @@
#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
-/*Format: HID-SENSOR-usage_id_in_hex*/
-/*Usage ID from spec for Accelerometer-3D: 0x200073*/
-#define DRIVER_NAME "HID-SENSOR-200073"
-
enum accel_3d_channel {
CHANNEL_SCAN_INDEX_X,
CHANNEL_SCAN_INDEX_Y,
@@ -179,18 +175,10 @@ static int accel_3d_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int accel_3d_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static const struct iio_info accel_3d_info = {
.driver_module = THIS_MODULE,
.read_raw = &accel_3d_read_raw,
.write_raw = &accel_3d_write_raw,
- .write_raw_get_fmt = &accel_3d_write_raw_get_fmt,
};
/* Function to push data to buffer */
@@ -286,11 +274,11 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_chan_spec *channels;
- indio_dev = iio_device_alloc(sizeof(struct accel_3d_state));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct accel_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, indio_dev);
accel_state = iio_priv(indio_dev);
@@ -302,15 +290,14 @@ static int hid_accel_3d_probe(struct platform_device *pdev)
&accel_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
- goto error_free_dev;
+ return ret;
}
channels = kmemdup(accel_3d_channels, sizeof(accel_3d_channels),
GFP_KERNEL);
if (!channels) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
- goto error_free_dev;
+ return -ENOMEM;
}
ret = accel_3d_parse_report(pdev, hsdev, channels,
@@ -367,9 +354,6 @@ error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
kfree(indio_dev->channels);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -384,14 +368,23 @@ static int hid_accel_3d_remove(struct platform_device *pdev)
hid_sensor_remove_trigger(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
- iio_device_free(indio_dev);
return 0;
}
+static struct platform_device_id hid_accel_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200073",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_accel_3d_ids);
+
static struct platform_driver hid_accel_3d_platform_driver = {
+ .id_table = hid_accel_3d_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_accel_3d_probe,
diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
index 7229645bf1d..709c13259f1 100644
--- a/drivers/iio/accel/kxsd9.c
+++ b/drivers/iio/accel/kxsd9.c
@@ -224,11 +224,10 @@ static int kxsd9_probe(struct spi_device *spi)
struct kxsd9_state *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
@@ -247,20 +246,14 @@ static int kxsd9_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int kxsd9_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 37949b94377..c3877630b2e 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -25,7 +25,16 @@
#define LSM303DLM_ACCEL_DEV_NAME "lsm303dlm_accel"
#define LSM330_ACCEL_DEV_NAME "lsm330_accel"
-int st_accel_common_probe(struct iio_dev *indio_dev);
+/**
+* struct st_sensors_platform_data - default accel platform data
+* @drdy_int_pin: default accel DRDY is available on INT1 pin.
+*/
+static const struct st_sensors_platform_data default_accel_pdata = {
+ .drdy_int_pin = 1,
+};
+
+int st_accel_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
void st_accel_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index 4aec121261d..1458343f6f3 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -65,7 +65,8 @@
#define ST_ACCEL_1_BDU_ADDR 0x23
#define ST_ACCEL_1_BDU_MASK 0x80
#define ST_ACCEL_1_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_1_DRDY_IRQ_MASK 0x10
+#define ST_ACCEL_1_DRDY_IRQ_INT1_MASK 0x10
+#define ST_ACCEL_1_DRDY_IRQ_INT2_MASK 0x08
#define ST_ACCEL_1_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 2 */
@@ -89,7 +90,8 @@
#define ST_ACCEL_2_BDU_ADDR 0x23
#define ST_ACCEL_2_BDU_MASK 0x80
#define ST_ACCEL_2_DRDY_IRQ_ADDR 0x22
-#define ST_ACCEL_2_DRDY_IRQ_MASK 0x02
+#define ST_ACCEL_2_DRDY_IRQ_INT1_MASK 0x02
+#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
#define ST_ACCEL_2_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 3 */
@@ -121,7 +123,8 @@
#define ST_ACCEL_3_BDU_ADDR 0x20
#define ST_ACCEL_3_BDU_MASK 0x08
#define ST_ACCEL_3_DRDY_IRQ_ADDR 0x23
-#define ST_ACCEL_3_DRDY_IRQ_MASK 0x80
+#define ST_ACCEL_3_DRDY_IRQ_INT1_MASK 0x80
+#define ST_ACCEL_3_DRDY_IRQ_INT2_MASK 0x00
#define ST_ACCEL_3_IG1_EN_ADDR 0x23
#define ST_ACCEL_3_IG1_EN_MASK 0x08
#define ST_ACCEL_3_MULTIREAD_BIT false
@@ -224,7 +227,8 @@ static const struct st_sensors st_accel_sensors[] = {
},
.drdy_irq = {
.addr = ST_ACCEL_1_DRDY_IRQ_ADDR,
- .mask = ST_ACCEL_1_DRDY_IRQ_MASK,
+ .mask_int1 = ST_ACCEL_1_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
},
.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
.bootime = 2,
@@ -285,7 +289,8 @@ static const struct st_sensors st_accel_sensors[] = {
},
.drdy_irq = {
.addr = ST_ACCEL_2_DRDY_IRQ_ADDR,
- .mask = ST_ACCEL_2_DRDY_IRQ_MASK,
+ .mask_int1 = ST_ACCEL_2_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
},
.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
.bootime = 2,
@@ -358,7 +363,8 @@ static const struct st_sensors st_accel_sensors[] = {
},
.drdy_irq = {
.addr = ST_ACCEL_3_DRDY_IRQ_ADDR,
- .mask = ST_ACCEL_3_DRDY_IRQ_MASK,
+ .mask_int1 = ST_ACCEL_3_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
.ig1 = {
.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -443,7 +449,8 @@ static const struct iio_trigger_ops st_accel_trigger_ops = {
#define ST_ACCEL_TRIGGER_OPS NULL
#endif
-int st_accel_common_probe(struct iio_dev *indio_dev)
+int st_accel_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *plat_data)
{
int err;
struct st_sensor_data *adata = iio_priv(indio_dev);
@@ -465,7 +472,11 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
&adata->sensor->fs.fs_avl[0];
adata->odr = adata->sensor->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev);
+ if (!plat_data)
+ plat_data =
+ (struct st_sensors_platform_data *)&default_accel_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, plat_data);
if (err < 0)
goto st_accel_common_probe_error;
@@ -506,7 +517,6 @@ void st_accel_common_remove(struct iio_dev *indio_dev)
st_sensors_deallocate_trigger(indio_dev);
st_accel_deallocate_ring(indio_dev);
}
- iio_device_free(indio_dev);
}
EXPORT_SYMBOL(st_accel_common_remove);
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index ffc9d097e48..d7bedbdfc81 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -25,27 +25,20 @@ static int st_accel_i2c_probe(struct i2c_client *client,
struct st_sensor_data *adata;
int err;
- indio_dev = iio_device_alloc(sizeof(*adata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*adata));
+ if (!indio_dev)
+ return -ENOMEM;
adata = iio_priv(indio_dev);
adata->dev = &client->dev;
st_sensors_i2c_configure(indio_dev, client, adata);
- err = st_accel_common_probe(indio_dev);
+ err = st_accel_common_probe(indio_dev, client->dev.platform_data);
if (err < 0)
- goto st_accel_common_probe_error;
+ return err;
return 0;
-
-st_accel_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_accel_i2c_remove(struct i2c_client *client)
diff --git a/drivers/iio/accel/st_accel_spi.c b/drivers/iio/accel/st_accel_spi.c
index 22b35bfea7d..195639646e3 100644
--- a/drivers/iio/accel/st_accel_spi.c
+++ b/drivers/iio/accel/st_accel_spi.c
@@ -24,27 +24,20 @@ static int st_accel_spi_probe(struct spi_device *spi)
struct st_sensor_data *adata;
int err;
- indio_dev = iio_device_alloc(sizeof(*adata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adata));
+ if (!indio_dev)
+ return -ENOMEM;
adata = iio_priv(indio_dev);
adata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, adata);
- err = st_accel_common_probe(indio_dev);
+ err = st_accel_common_probe(indio_dev, spi->dev.platform_data);
if (err < 0)
- goto st_accel_common_probe_error;
+ return err;
return 0;
-
-st_accel_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_accel_spi_remove(struct spi_device *spi)
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 93129ec4b64..09371cbc9dc 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -1,6 +1,8 @@
#
# ADC drivers
#
+# When adding new entries keep the list in alphabetical order
+
menu "Analog to digital converters"
config AD_SIGMA_DELTA
@@ -30,17 +32,20 @@ config AD7298
To compile this driver as a module, choose M here: the
module will be called ad7298.
-config AD7923
- tristate "Analog Devices AD7923 and similar ADCs driver"
+config AD7476
+ tristate "Analog Devices AD7476 and similar 1-channel ADCs driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Analog Devices
- AD7904, AD7914, AD7923, AD7924 4 Channel ADCs.
+ Say yes here to build support for Analog Devices AD7273, AD7274, AD7276,
+ AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
+ AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
+
+ If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
- module will be called ad7923.
+ module will be called ad7476.
config AD7791
tristate "Analog Devices AD7791 ADC driver"
@@ -66,33 +71,30 @@ config AD7793
To compile this driver as a module, choose M here: the
module will be called AD7793.
-config AD7476
- tristate "Analog Devices AD7476 and similar 1-channel ADCs driver"
+config AD7887
+ tristate "Analog Devices AD7887 ADC driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
- Say yes here to build support for Analog Devices AD7273, AD7274, AD7276,
- AD7277, AD7278, AD7475, AD7476, AD7477, AD7478, AD7466, AD7467, AD7468,
- AD7495, AD7910, AD7920, AD7920 SPI analog to digital converters (ADC).
-
+ Say yes here to build support for Analog Devices
+ AD7887 SPI analog to digital converter (ADC).
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
- module will be called ad7476.
+ module will be called ad7887.
-config AD7887
- tristate "Analog Devices AD7887 ADC driver"
+config AD7923
+ tristate "Analog Devices AD7923 and similar ADCs driver"
depends on SPI
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Analog Devices
- AD7887 SPI analog to digital converter (ADC).
- If unsure, say N (but it's safe to say "Y").
+ AD7904, AD7914, AD7923, AD7924 4 Channel ADCs.
To compile this driver as a module, choose M here: the
- module will be called ad7887.
+ module will be called ad7923.
config AT91_ADC
tristate "Atmel AT91 ADC"
@@ -143,6 +145,15 @@ config MCP320X
This driver can also be built as a module. If so, the module will be
called mcp320x.
+config NAU7802
+ tristate "Nuvoton NAU7802 ADC driver"
+ depends on I2C
+ help
+ Say yes here to build support for Nuvoton NAU7802 ADC.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nau7802.
+
config TI_ADC081C
tristate "Texas Instruments ADC081C021/027"
depends on I2C
@@ -154,12 +165,26 @@ config TI_ADC081C
called ti-adc081c.
config TI_AM335X_ADC
- tristate "TI's ADC driver"
+ tristate "TI's AM335X ADC driver"
depends on MFD_TI_AM335X_TSCADC
help
Say yes here to build support for Texas Instruments ADC
driver which is also a MFD client.
+config TWL6030_GPADC
+ tristate "TWL6030 GPADC (General Purpose A/D Converter) Support"
+ depends on TWL4030_CORE
+ default n
+ help
+ Say yes here if you want support for the TWL6030/TWL6032 General
+ Purpose A/D Converter. This will add support for battery type
+ detection, battery voltage and temperature measurement, die
+ temperature measurement, system supply voltage, audio accessory,
+ USB ID detection.
+
+ This driver can also be built as a module. If so, the module will be
+ called twl6030-gpadc.
+
config VIPERBOARD_ADC
tristate "Viperboard ADC support"
depends on MFD_VIPERBOARD && USB
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 8f475d31fe4..33656ef7d1f 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -2,6 +2,7 @@
# Makefile for IIO ADC drivers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o
obj-$(CONFIG_AD7266) += ad7266.o
obj-$(CONFIG_AD7298) += ad7298.o
@@ -15,6 +16,8 @@ obj-$(CONFIG_EXYNOS_ADC) += exynos_adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
+obj-$(CONFIG_NAU7802) += nau7802.o
obj-$(CONFIG_TI_ADC081C) += ti-adc081c.o
obj-$(CONFIG_TI_AM335X_ADC) += ti_am335x_adc.o
+obj-$(CONFIG_TWL6030_GPADC) += twl6030-gpadc.o
obj-$(CONFIG_VIPERBOARD_ADC) += viperboard_adc.o
diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
index c2744a75c3b..371731df163 100644
--- a/drivers/iio/adc/ad7266.c
+++ b/drivers/iio/adc/ad7266.c
@@ -399,17 +399,17 @@ static int ad7266_probe(struct spi_device *spi)
unsigned int i;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vref");
+ st->reg = devm_regulator_get(&spi->dev, "vref");
if (!IS_ERR_OR_NULL(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = regulator_get_voltage(st->reg);
if (ret < 0)
@@ -489,11 +489,6 @@ error_free_gpios:
error_disable_reg:
if (!IS_ERR_OR_NULL(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR_OR_NULL(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -507,11 +502,8 @@ static int ad7266_remove(struct spi_device *spi)
iio_triggered_buffer_cleanup(indio_dev);
if (!st->fixed_addr)
gpio_free_array(st->gpios, ARRAY_SIZE(st->gpios));
- if (!IS_ERR_OR_NULL(st->reg)) {
+ if (!IS_ERR_OR_NULL(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7298.c b/drivers/iio/adc/ad7298.c
index 03b77189dbf..85d1481c312 100644
--- a/drivers/iio/adc/ad7298.c
+++ b/drivers/iio/adc/ad7298.c
@@ -296,9 +296,10 @@ static int ad7298_probe(struct spi_device *spi)
{
struct ad7298_platform_data *pdata = spi->dev.platform_data;
struct ad7298_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
int ret;
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -308,14 +309,13 @@ static int ad7298_probe(struct spi_device *spi)
st->ext_ref = AD7298_EXTREF;
if (st->ext_ref) {
- st->reg = regulator_get(&spi->dev, "vref");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_free;
- }
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -361,11 +361,6 @@ error_cleanup_ring:
error_disable_reg:
if (st->ext_ref)
regulator_disable(st->reg);
-error_put_reg:
- if (st->ext_ref)
- regulator_put(st->reg);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -377,11 +372,8 @@ static int ad7298_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (st->ext_ref) {
+ if (st->ext_ref)
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7476.c b/drivers/iio/adc/ad7476.c
index 2e98bef4af6..6d2b1d8d1a1 100644
--- a/drivers/iio/adc/ad7476.c
+++ b/drivers/iio/adc/ad7476.c
@@ -213,24 +213,21 @@ static int ad7476_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
st->chip_info =
&ad7476_chip_info_tbl[spi_get_device_id(spi)->driver_data];
- st->reg = regulator_get(&spi->dev, "vcc");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_free_dev;
- }
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
spi_set_drvdata(spi, indio_dev);
@@ -268,12 +265,7 @@ error_ring_unregister:
iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
regulator_disable(st->reg);
-error_put_reg:
- regulator_put(st->reg);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -285,8 +277,6 @@ static int ad7476_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
- regulator_put(st->reg);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7791.c b/drivers/iio/adc/ad7791.c
index 5e8d1da6887..c20203577d2 100644
--- a/drivers/iio/adc/ad7791.c
+++ b/drivers/iio/adc/ad7791.c
@@ -361,21 +361,19 @@ static int ad7791_probe(struct spi_device *spi)
return -ENXIO;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "refin");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto err_iio_free;
- }
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
st->info = &ad7791_chip_infos[spi_get_device_id(spi)->driver_data];
ad_sd_init(&st->sd, indio_dev, spi, &ad7791_sigma_delta_info);
@@ -410,10 +408,6 @@ error_remove_trigger:
ad_sd_cleanup_buffer_and_trigger(indio_dev);
error_disable_reg:
regulator_disable(st->reg);
-error_put_reg:
- regulator_put(st->reg);
-err_iio_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -427,9 +421,6 @@ static int ad7791_remove(struct spi_device *spi)
ad_sd_cleanup_buffer_and_trigger(indio_dev);
regulator_disable(st->reg);
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7793.c b/drivers/iio/adc/ad7793.c
index 334e31ff7a4..4dddeabdfbb 100644
--- a/drivers/iio/adc/ad7793.c
+++ b/drivers/iio/adc/ad7793.c
@@ -757,7 +757,7 @@ static int ad7793_probe(struct spi_device *spi)
return -ENODEV;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -766,15 +766,13 @@ static int ad7793_probe(struct spi_device *spi)
ad_sd_init(&st->sd, indio_dev, spi, &ad7793_sigma_delta_info);
if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
- st->reg = regulator_get(&spi->dev, "refin");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_device_free;
- }
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
vref_mv = regulator_get_voltage(st->reg);
if (vref_mv < 0) {
@@ -818,11 +816,6 @@ error_remove_trigger:
error_disable_reg:
if (pdata->refsel != AD7793_REFSEL_INTERNAL)
regulator_disable(st->reg);
-error_put_reg:
- if (pdata->refsel != AD7793_REFSEL_INTERNAL)
- regulator_put(st->reg);
-error_device_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -836,12 +829,8 @@ static int ad7793_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
ad_sd_cleanup_buffer_and_trigger(indio_dev);
- if (pdata->refsel != AD7793_REFSEL_INTERNAL) {
+ if (pdata->refsel != AD7793_REFSEL_INTERNAL)
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index dd15a5b0f70..9dd077b7875 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -237,25 +237,24 @@ static int ad7887_probe(struct spi_device *spi)
{
struct ad7887_platform_data *pdata = spi->dev.platform_data;
struct ad7887_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
uint8_t mode;
int ret;
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
if (!pdata || !pdata->use_onchip_ref) {
- st->reg = regulator_get(&spi->dev, "vref");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_free;
- }
+ st->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
st->chip_info =
@@ -331,11 +330,6 @@ error_unregister_ring:
error_disable_reg:
if (st->reg)
regulator_disable(st->reg);
-error_put_reg:
- if (st->reg)
- regulator_put(st->reg);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -347,11 +341,8 @@ static int ad7887_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (st->reg) {
+ if (st->reg)
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/ad7923.c b/drivers/iio/adc/ad7923.c
index 97fa0d3dc4a..4108dbb28c3 100644
--- a/drivers/iio/adc/ad7923.c
+++ b/drivers/iio/adc/ad7923.c
@@ -275,10 +275,11 @@ static const struct iio_info ad7923_info = {
static int ad7923_probe(struct spi_device *spi)
{
struct ad7923_state *st;
- struct iio_dev *indio_dev = iio_device_alloc(sizeof(*st));
+ struct iio_dev *indio_dev;
const struct ad7923_chip_info *info;
int ret;
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -311,14 +312,13 @@ static int ad7923_probe(struct spi_device *spi)
spi_message_add_tail(&st->scan_single_xfer[0], &st->scan_single_msg);
spi_message_add_tail(&st->scan_single_xfer[1], &st->scan_single_msg);
- st->reg = regulator_get(&spi->dev, "refin");
- if (IS_ERR(st->reg)) {
- ret = PTR_ERR(st->reg);
- goto error_free;
- }
+ st->reg = devm_regulator_get(&spi->dev, "refin");
+ if (IS_ERR(st->reg))
+ return PTR_ERR(st->reg);
+
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = iio_triggered_buffer_setup(indio_dev, NULL,
&ad7923_trigger_handler, NULL);
@@ -335,10 +335,6 @@ error_cleanup_ring:
iio_triggered_buffer_cleanup(indio_dev);
error_disable_reg:
regulator_disable(st->reg);
-error_put_reg:
- regulator_put(st->reg);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -351,8 +347,6 @@ static int ad7923_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
regulator_disable(st->reg);
- regulator_put(st->reg);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index b6db6a0e09c..84be63bdf03 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -39,6 +39,10 @@
#define at91_adc_writel(st, reg, val) \
(writel_relaxed(val, st->reg_base + reg))
+struct at91_adc_caps {
+ struct at91_adc_reg_desc registers;
+};
+
struct at91_adc_state {
struct clk *adc_clk;
u16 *buffer;
@@ -62,6 +66,7 @@ struct at91_adc_state {
u32 res; /* resolution used for convertions */
bool low_res; /* the resolution corresponds to the lowest one */
wait_queue_head_t wq_data_avail;
+ struct at91_adc_caps *caps;
};
static irqreturn_t at91_adc_trigger_handler(int irq, void *p)
@@ -429,6 +434,8 @@ ret:
return ret;
}
+static const struct of_device_id at91_adc_dt_ids[];
+
static int at91_adc_probe_dt(struct at91_adc_state *st,
struct platform_device *pdev)
{
@@ -441,6 +448,9 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
if (!node)
return -EINVAL;
+ st->caps = (struct at91_adc_caps *)
+ of_match_device(at91_adc_dt_ids, &pdev->dev)->data;
+
st->use_external = of_property_read_bool(node, "atmel,adc-use-external-triggers");
if (of_property_read_u32(node, "atmel,adc-channels-used", &prop)) {
@@ -481,43 +491,7 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
if (ret)
goto error_ret;
- st->registers = devm_kzalloc(&idev->dev,
- sizeof(struct at91_adc_reg_desc),
- GFP_KERNEL);
- if (!st->registers) {
- dev_err(&idev->dev, "Could not allocate register memory.\n");
- ret = -ENOMEM;
- goto error_ret;
- }
-
- if (of_property_read_u32(node, "atmel,adc-channel-base", &prop)) {
- dev_err(&idev->dev, "Missing adc-channel-base property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- st->registers->channel_base = prop;
-
- if (of_property_read_u32(node, "atmel,adc-drdy-mask", &prop)) {
- dev_err(&idev->dev, "Missing adc-drdy-mask property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- st->registers->drdy_mask = prop;
-
- if (of_property_read_u32(node, "atmel,adc-status-register", &prop)) {
- dev_err(&idev->dev, "Missing adc-status-register property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- st->registers->status_register = prop;
-
- if (of_property_read_u32(node, "atmel,adc-trigger-register", &prop)) {
- dev_err(&idev->dev, "Missing adc-trigger-register property in the DT.\n");
- ret = -EINVAL;
- goto error_ret;
- }
- st->registers->trigger_register = prop;
-
+ st->registers = &st->caps->registers;
st->trigger_number = of_get_child_count(node);
st->trigger_list = devm_kzalloc(&idev->dev, st->trigger_number *
sizeof(struct at91_adc_trigger),
@@ -589,11 +563,9 @@ static int at91_adc_probe(struct platform_device *pdev)
struct resource *res;
u32 reg;
- idev = iio_device_alloc(sizeof(struct at91_adc_state));
- if (idev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ idev = devm_iio_device_alloc(&pdev->dev, sizeof(struct at91_adc_state));
+ if (!idev)
+ return -ENOMEM;
st = iio_priv(idev);
@@ -604,8 +576,7 @@ static int at91_adc_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "No platform data available.\n");
- ret = -EINVAL;
- goto error_free_device;
+ return -EINVAL;
}
platform_set_drvdata(pdev, idev);
@@ -618,16 +589,14 @@ static int at91_adc_probe(struct platform_device *pdev)
st->irq = platform_get_irq(pdev, 0);
if (st->irq < 0) {
dev_err(&pdev->dev, "No IRQ ID is designated\n");
- ret = -ENODEV;
- goto error_free_device;
+ return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
st->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(st->reg_base)) {
- ret = PTR_ERR(st->reg_base);
- goto error_free_device;
+ return PTR_ERR(st->reg_base);
}
/*
@@ -642,7 +611,7 @@ static int at91_adc_probe(struct platform_device *pdev)
idev);
if (ret) {
dev_err(&pdev->dev, "Failed to allocate IRQ.\n");
- goto error_free_device;
+ return ret;
}
st->clk = devm_clk_get(&pdev->dev, "adc_clk");
@@ -703,8 +672,8 @@ static int at91_adc_probe(struct platform_device *pdev)
shtim = round_up((st->sample_hold_time * adc_clk /
1000000) - 1, 1);
- reg = AT91_ADC_PRESCAL_(prsc) & AT91_ADC_PRESCAL;
- reg |= AT91_ADC_STARTUP_(ticks) & AT91_ADC_STARTUP;
+ reg = AT91_ADC_PRESCAL_(prsc) & st->registers->mr_prescal_mask;
+ reg |= AT91_ADC_STARTUP_(ticks) & st->registers->mr_startup_mask;
if (st->low_res)
reg |= AT91_ADC_LOWRES;
if (st->sleep_mode)
@@ -752,9 +721,6 @@ error_disable_clk:
clk_disable_unprepare(st->clk);
error_free_irq:
free_irq(st->irq, idev);
-error_free_device:
- iio_device_free(idev);
-error_ret:
return ret;
}
@@ -769,14 +735,49 @@ static int at91_adc_remove(struct platform_device *pdev)
clk_disable_unprepare(st->adc_clk);
clk_disable_unprepare(st->clk);
free_irq(st->irq, idev);
- iio_device_free(idev);
return 0;
}
#ifdef CONFIG_OF
+static struct at91_adc_caps at91sam9260_caps = {
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9260,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9260,
+ .mr_startup_mask = AT91_ADC_STARTUP_9260,
+ },
+};
+
+static struct at91_adc_caps at91sam9g45_caps = {
+ .registers = {
+ .channel_base = AT91_ADC_CHR(0),
+ .drdy_mask = AT91_ADC_DRDY,
+ .status_register = AT91_ADC_SR,
+ .trigger_register = AT91_ADC_TRGR_9G45,
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9G45,
+ .mr_startup_mask = AT91_ADC_STARTUP_9G45,
+ },
+};
+
+static struct at91_adc_caps at91sam9x5_caps = {
+ .registers = {
+ .channel_base = AT91_ADC_CDR0_9X5,
+ .drdy_mask = AT91_ADC_SR_DRDY_9X5,
+ .status_register = AT91_ADC_SR_9X5,
+ .trigger_register = AT91_ADC_TRGR_9X5,
+ /* prescal mask is same as 9G45 */
+ .mr_prescal_mask = AT91_ADC_PRESCAL_9G45,
+ .mr_startup_mask = AT91_ADC_STARTUP_9X5,
+ },
+};
+
static const struct of_device_id at91_adc_dt_ids[] = {
- { .compatible = "atmel,at91sam9260-adc" },
+ { .compatible = "atmel,at91sam9260-adc", .data = &at91sam9260_caps },
+ { .compatible = "atmel,at91sam9g45-adc", .data = &at91sam9g45_caps },
+ { .compatible = "atmel,at91sam9x5-adc", .data = &at91sam9x5_caps },
{},
};
MODULE_DEVICE_TABLE(of, at91_adc_dt_ids);
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 9809fc9a35d..d25b262193a 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -33,6 +33,7 @@
#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
#include <linux/of_platform.h>
+#include <linux/err.h>
#include <linux/iio/iio.h>
#include <linux/iio/machine.h>
@@ -261,7 +262,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (!np)
return ret;
- indio_dev = iio_device_alloc(sizeof(struct exynos_adc));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct exynos_adc));
if (!indio_dev) {
dev_err(&pdev->dev, "failed allocating iio device\n");
return -ENOMEM;
@@ -271,23 +272,18 @@ static int exynos_adc_probe(struct platform_device *pdev)
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->regs = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(info->regs)) {
- ret = PTR_ERR(info->regs);
- goto err_iio;
- }
+ if (IS_ERR(info->regs))
+ return PTR_ERR(info->regs);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
info->enable_reg = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(info->enable_reg)) {
- ret = PTR_ERR(info->enable_reg);
- goto err_iio;
- }
+ if (IS_ERR(info->enable_reg))
+ return PTR_ERR(info->enable_reg);
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
- ret = irq;
- goto err_iio;
+ return irq;
}
info->irq = irq;
@@ -299,7 +295,7 @@ static int exynos_adc_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed requesting irq, irq = %d\n",
info->irq);
- goto err_iio;
+ return ret;
}
writel(1, info->enable_reg);
@@ -365,8 +361,6 @@ err_iio_dev:
iio_device_unregister(indio_dev);
err_irq:
free_irq(info->irq, info);
-err_iio:
- iio_device_free(indio_dev);
return ret;
}
@@ -382,7 +376,6 @@ static int exynos_adc_remove(struct platform_device *pdev)
writel(0, info->enable_reg);
iio_device_unregister(indio_dev);
free_irq(info->irq, info);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/lp8788_adc.c b/drivers/iio/adc/lp8788_adc.c
index 62bc39e9c94..5c8c91595f4 100644
--- a/drivers/iio/adc/lp8788_adc.c
+++ b/drivers/iio/adc/lp8788_adc.c
@@ -194,7 +194,7 @@ static int lp8788_adc_probe(struct platform_device *pdev)
struct lp8788_adc *adc;
int ret;
- indio_dev = iio_device_alloc(sizeof(*adc));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
@@ -205,7 +205,7 @@ static int lp8788_adc_probe(struct platform_device *pdev)
indio_dev->dev.of_node = pdev->dev.of_node;
ret = lp8788_iio_map_register(indio_dev, lp->pdata, adc);
if (ret)
- goto err_iio_map;
+ return ret;
mutex_init(&adc->lock);
@@ -226,8 +226,6 @@ static int lp8788_adc_probe(struct platform_device *pdev)
err_iio_device:
iio_map_array_unregister(indio_dev);
-err_iio_map:
- iio_device_free(indio_dev);
return ret;
}
@@ -237,7 +235,6 @@ static int lp8788_adc_remove(struct platform_device *pdev)
iio_device_unregister(indio_dev);
iio_map_array_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index f148d00b83f..4fb35d1d749 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1498,16 +1498,15 @@ static int max1363_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct regulator *vref;
- indio_dev = iio_device_alloc(sizeof(struct max1363_state));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_out;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev,
+ sizeof(struct max1363_state));
+ if (!indio_dev)
+ return -ENOMEM;
indio_dev->dev.of_node = client->dev.of_node;
ret = iio_map_array_register(indio_dev, client->dev.platform_data);
if (ret < 0)
- goto error_free_device;
+ return ret;
st = iio_priv(indio_dev);
@@ -1590,9 +1589,6 @@ error_disable_reg:
regulator_disable(st->reg);
error_unregister_map:
iio_map_array_unregister(indio_dev);
-error_free_device:
- iio_device_free(indio_dev);
-error_out:
return ret;
}
@@ -1607,7 +1603,6 @@ static int max1363_remove(struct i2c_client *client)
regulator_disable(st->vref);
regulator_disable(st->reg);
iio_map_array_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index ebc015922a7..28a086e4877 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -169,7 +169,7 @@ static int mcp320x_probe(struct spi_device *spi)
const struct mcp3208_chip_info *chip_info;
int ret;
- indio_dev = iio_device_alloc(sizeof(*adc));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adc));
if (!indio_dev)
return -ENOMEM;
@@ -193,15 +193,13 @@ static int mcp320x_probe(struct spi_device *spi)
spi_message_init_with_transfers(&adc->msg, adc->transfer,
ARRAY_SIZE(adc->transfer));
- adc->reg = regulator_get(&spi->dev, "vref");
- if (IS_ERR(adc->reg)) {
- ret = PTR_ERR(adc->reg);
- goto iio_free;
- }
+ adc->reg = devm_regulator_get(&spi->dev, "vref");
+ if (IS_ERR(adc->reg))
+ return PTR_ERR(adc->reg);
ret = regulator_enable(adc->reg);
if (ret < 0)
- goto reg_free;
+ return ret;
mutex_init(&adc->lock);
@@ -213,10 +211,6 @@ static int mcp320x_probe(struct spi_device *spi)
reg_disable:
regulator_disable(adc->reg);
-reg_free:
- regulator_put(adc->reg);
-iio_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -228,8 +222,6 @@ static int mcp320x_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
regulator_disable(adc->reg);
- regulator_put(adc->reg);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/adc/nau7802.c b/drivers/iio/adc/nau7802.c
new file mode 100644
index 00000000000..bdf03468f3b
--- /dev/null
+++ b/drivers/iio/adc/nau7802.c
@@ -0,0 +1,581 @@
+/*
+ * Driver for the Nuvoton NAU7802 ADC
+ *
+ * Copyright 2013 Free Electrons
+ *
+ * Licensed under the GPLv2 or later.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+#include <linux/log2.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define NAU7802_REG_PUCTRL 0x00
+#define NAU7802_PUCTRL_RR(x) (x << 0)
+#define NAU7802_PUCTRL_RR_BIT NAU7802_PUCTRL_RR(1)
+#define NAU7802_PUCTRL_PUD(x) (x << 1)
+#define NAU7802_PUCTRL_PUD_BIT NAU7802_PUCTRL_PUD(1)
+#define NAU7802_PUCTRL_PUA(x) (x << 2)
+#define NAU7802_PUCTRL_PUA_BIT NAU7802_PUCTRL_PUA(1)
+#define NAU7802_PUCTRL_PUR(x) (x << 3)
+#define NAU7802_PUCTRL_PUR_BIT NAU7802_PUCTRL_PUR(1)
+#define NAU7802_PUCTRL_CS(x) (x << 4)
+#define NAU7802_PUCTRL_CS_BIT NAU7802_PUCTRL_CS(1)
+#define NAU7802_PUCTRL_CR(x) (x << 5)
+#define NAU7802_PUCTRL_CR_BIT NAU7802_PUCTRL_CR(1)
+#define NAU7802_PUCTRL_AVDDS(x) (x << 7)
+#define NAU7802_PUCTRL_AVDDS_BIT NAU7802_PUCTRL_AVDDS(1)
+#define NAU7802_REG_CTRL1 0x01
+#define NAU7802_CTRL1_VLDO(x) (x << 3)
+#define NAU7802_CTRL1_GAINS(x) (x)
+#define NAU7802_CTRL1_GAINS_BITS 0x07
+#define NAU7802_REG_CTRL2 0x02
+#define NAU7802_CTRL2_CHS(x) (x << 7)
+#define NAU7802_CTRL2_CRS(x) (x << 4)
+#define NAU7802_SAMP_FREQ_320 0x07
+#define NAU7802_CTRL2_CHS_BIT NAU7802_CTRL2_CHS(1)
+#define NAU7802_REG_ADC_B2 0x12
+#define NAU7802_REG_ADC_B1 0x13
+#define NAU7802_REG_ADC_B0 0x14
+#define NAU7802_REG_ADC_CTRL 0x15
+
+#define NAU7802_MIN_CONVERSIONS 6
+
+struct nau7802_state {
+ struct i2c_client *client;
+ s32 last_value;
+ struct mutex lock;
+ struct mutex data_lock;
+ u32 vref_mv;
+ u32 conversion_count;
+ u32 min_conversions;
+ u8 sample_rate;
+ u32 scale_avail[8];
+ struct completion value_ok;
+};
+
+#define NAU7802_CHANNEL(chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = (chan), \
+ .scan_index = (chan), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ) \
+}
+
+static const struct iio_chan_spec nau7802_chan_array[] = {
+ NAU7802_CHANNEL(0),
+ NAU7802_CHANNEL(1),
+};
+
+static const u16 nau7802_sample_freq_avail[] = {10, 20, 40, 80,
+ 10, 10, 10, 320};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("10 40 80 320");
+
+static struct attribute *nau7802_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group nau7802_attribute_group = {
+ .attrs = nau7802_attributes,
+};
+
+static int nau7802_set_gain(struct nau7802_state *st, int gain)
+{
+ int ret;
+
+ mutex_lock(&st->lock);
+ st->conversion_count = 0;
+
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL1);
+ if (ret < 0)
+ goto nau7802_sysfs_set_gain_out;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL1,
+ (ret & (~NAU7802_CTRL1_GAINS_BITS)) |
+ gain);
+
+nau7802_sysfs_set_gain_out:
+ mutex_unlock(&st->lock);
+
+ return ret;
+}
+
+static int nau7802_read_conversion(struct nau7802_state *st)
+{
+ int data;
+
+ mutex_lock(&st->data_lock);
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B2);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value = data << 16;
+
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B1);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value |= data << 8;
+
+ data = i2c_smbus_read_byte_data(st->client, NAU7802_REG_ADC_B0);
+ if (data < 0)
+ goto nau7802_read_conversion_out;
+ st->last_value |= data;
+
+ st->last_value = sign_extend32(st->last_value, 23);
+
+nau7802_read_conversion_out:
+ mutex_unlock(&st->data_lock);
+
+ return data;
+}
+
+/*
+ * Conversions are synchronised on the rising edge of NAU7802_PUCTRL_CS_BIT
+ */
+static int nau7802_sync(struct nau7802_state *st)
+{
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ ret | NAU7802_PUCTRL_CS_BIT);
+
+ return ret;
+}
+
+static irqreturn_t nau7802_eoc_trigger(int irq, void *private)
+{
+ struct iio_dev *indio_dev = private;
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int status;
+
+ status = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (status < 0)
+ return IRQ_HANDLED;
+
+ if (!(status & NAU7802_PUCTRL_CR_BIT))
+ return IRQ_NONE;
+
+ if (nau7802_read_conversion(st) < 0)
+ return IRQ_HANDLED;
+
+ /*
+ * Because there is actually only one ADC for both channels, we have to
+ * wait for enough conversions to happen before getting a significant
+ * value when changing channels and the values are far apart.
+ */
+ if (st->conversion_count < NAU7802_MIN_CONVERSIONS)
+ st->conversion_count++;
+ if (st->conversion_count >= NAU7802_MIN_CONVERSIONS)
+ complete_all(&st->value_ok);
+
+ return IRQ_HANDLED;
+}
+
+static int nau7802_read_irq(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ INIT_COMPLETION(st->value_ok);
+ enable_irq(st->client->irq);
+
+ nau7802_sync(st);
+
+ /* read registers to ensure we flush everything */
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ goto read_chan_info_failure;
+
+ /* Wait for a conversion to finish */
+ ret = wait_for_completion_interruptible_timeout(&st->value_ok,
+ msecs_to_jiffies(1000));
+ if (ret == 0)
+ ret = -ETIMEDOUT;
+
+ if (ret < 0)
+ goto read_chan_info_failure;
+
+ disable_irq(st->client->irq);
+
+ *val = st->last_value;
+
+ return IIO_VAL_INT;
+
+read_chan_info_failure:
+ disable_irq(st->client->irq);
+
+ return ret;
+}
+
+static int nau7802_read_poll(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ nau7802_sync(st);
+
+ /* read registers to ensure we flush everything */
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Because there is actually only one ADC for both channels, we have to
+ * wait for enough conversions to happen before getting a significant
+ * value when changing channels and the values are far appart.
+ */
+ do {
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+
+ while (!(ret & NAU7802_PUCTRL_CR_BIT)) {
+ if (st->sample_rate != NAU7802_SAMP_FREQ_320)
+ msleep(20);
+ else
+ mdelay(4);
+ ret = i2c_smbus_read_byte_data(st->client,
+ NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = nau7802_read_conversion(st);
+ if (ret < 0)
+ return ret;
+ if (st->conversion_count < NAU7802_MIN_CONVERSIONS)
+ st->conversion_count++;
+ } while (st->conversion_count < NAU7802_MIN_CONVERSIONS);
+
+ *val = st->last_value;
+
+ return IIO_VAL_INT;
+}
+
+static int nau7802_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&st->lock);
+ /*
+ * Select the channel to use
+ * - Channel 1 is value 0 in the CHS register
+ * - Channel 2 is value 1 in the CHS register
+ */
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL2);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ if (((ret & NAU7802_CTRL2_CHS_BIT) && !chan->channel) ||
+ (!(ret & NAU7802_CTRL2_CHS_BIT) &&
+ chan->channel)) {
+ st->conversion_count = 0;
+ ret = i2c_smbus_write_byte_data(st->client,
+ NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CHS(chan->channel) |
+ NAU7802_CTRL2_CRS(st->sample_rate));
+
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+ }
+
+ if (st->client->irq)
+ ret = nau7802_read_irq(indio_dev, chan, val);
+ else
+ ret = nau7802_read_poll(indio_dev, chan, val);
+
+ mutex_unlock(&st->lock);
+ return ret;
+
+ case IIO_CHAN_INFO_SCALE:
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_CTRL1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We have 24 bits of signed data, that means 23 bits of data
+ * plus the sign bit
+ */
+ *val = st->vref_mv;
+ *val2 = 23 + (ret & NAU7802_CTRL1_GAINS_BITS);
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ *val = nau7802_sample_freq_avail[st->sample_rate];
+ *val2 = 0;
+ return IIO_VAL_INT;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int nau7802_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct nau7802_state *st = iio_priv(indio_dev);
+ int i, ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ if (val2 == st->scale_avail[i])
+ return nau7802_set_gain(st, i);
+
+ break;
+
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0; i < ARRAY_SIZE(nau7802_sample_freq_avail); i++)
+ if (val == nau7802_sample_freq_avail[i]) {
+ mutex_lock(&st->lock);
+ st->sample_rate = i;
+ st->conversion_count = 0;
+ ret = i2c_smbus_write_byte_data(st->client,
+ NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CRS(st->sample_rate));
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int nau7802_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ return IIO_VAL_INT_PLUS_NANO;
+}
+
+static const struct iio_info nau7802_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &nau7802_read_raw,
+ .write_raw = &nau7802_write_raw,
+ .write_raw_get_fmt = nau7802_write_raw_get_fmt,
+ .attrs = &nau7802_attribute_group,
+};
+
+static int nau7802_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct nau7802_state *st;
+ struct device_node *np = client->dev.of_node;
+ int i, ret;
+ u8 data;
+ u32 tmp = 0;
+
+ if (!client->dev.of_node) {
+ dev_err(&client->dev, "No device tree node available.\n");
+ return -EINVAL;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
+ st = iio_priv(indio_dev);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &nau7802_info;
+
+ st->client = client;
+
+ /* Reset the device */
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ NAU7802_PUCTRL_RR_BIT);
+ if (ret < 0)
+ return ret;
+
+ /* Enter normal operation mode */
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL,
+ NAU7802_PUCTRL_PUD_BIT);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * After about 200 usecs, the device should be ready and then
+ * the Power Up bit will be set to 1. If not, wait for it.
+ */
+ udelay(210);
+ ret = i2c_smbus_read_byte_data(st->client, NAU7802_REG_PUCTRL);
+ if (ret < 0)
+ return ret;
+ if (!(ret & NAU7802_PUCTRL_PUR_BIT))
+ return ret;
+
+ of_property_read_u32(np, "nuvoton,vldo", &tmp);
+ st->vref_mv = tmp;
+
+ data = NAU7802_PUCTRL_PUD_BIT | NAU7802_PUCTRL_PUA_BIT |
+ NAU7802_PUCTRL_CS_BIT;
+ if (tmp >= 2400)
+ data |= NAU7802_PUCTRL_AVDDS_BIT;
+
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_PUCTRL, data);
+ if (ret < 0)
+ return ret;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_ADC_CTRL, 0x30);
+ if (ret < 0)
+ return ret;
+
+ if (tmp >= 2400) {
+ data = NAU7802_CTRL1_VLDO((4500 - tmp) / 300);
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL1,
+ data);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Populate available ADC input ranges */
+ for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
+ st->scale_avail[i] = (((u64)st->vref_mv) * 1000000000ULL)
+ >> (23 + i);
+
+ init_completion(&st->value_ok);
+
+ /*
+ * The ADC fires continuously and we can't do anything about
+ * it. So we need to have the IRQ disabled by default, and we
+ * will enable them back when we will need them..
+ */
+ if (client->irq) {
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ nau7802_eoc_trigger,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ client->dev.driver->name,
+ indio_dev);
+ if (ret) {
+ /*
+ * What may happen here is that our IRQ controller is
+ * not able to get level interrupt but this is required
+ * by this ADC as when going over 40 sample per second,
+ * the interrupt line may stay high between conversions.
+ * So, we continue no matter what but we switch to
+ * polling mode.
+ */
+ dev_info(&client->dev,
+ "Failed to allocate IRQ, using polling mode\n");
+ client->irq = 0;
+ } else
+ disable_irq(client->irq);
+ }
+
+ if (!client->irq) {
+ /*
+ * We are polling, use the fastest sample rate by
+ * default
+ */
+ st->sample_rate = NAU7802_SAMP_FREQ_320;
+ ret = i2c_smbus_write_byte_data(st->client, NAU7802_REG_CTRL2,
+ NAU7802_CTRL2_CRS(st->sample_rate));
+ if (ret)
+ goto error_free_irq;
+ }
+
+ /* Setup the ADC channels available on the board */
+ indio_dev->num_channels = ARRAY_SIZE(nau7802_chan_array);
+ indio_dev->channels = nau7802_chan_array;
+
+ mutex_init(&st->lock);
+ mutex_init(&st->data_lock);
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0) {
+ dev_err(&client->dev, "Couldn't register the device.\n");
+ goto error_device_register;
+ }
+
+ return 0;
+
+error_device_register:
+ mutex_destroy(&st->lock);
+ mutex_destroy(&st->data_lock);
+error_free_irq:
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ return ret;
+}
+
+static int nau7802_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct nau7802_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ mutex_destroy(&st->lock);
+ mutex_destroy(&st->data_lock);
+ if (client->irq)
+ free_irq(client->irq, indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id nau7802_i2c_id[] = {
+ { "nau7802", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, nau7802_i2c_id);
+
+static const struct of_device_id nau7802_dt_ids[] = {
+ { .compatible = "nuvoton,nau7802" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nau7802_dt_ids);
+
+static struct i2c_driver nau7802_driver = {
+ .probe = nau7802_probe,
+ .remove = nau7802_remove,
+ .id_table = nau7802_i2c_id,
+ .driver = {
+ .name = "nau7802",
+ .of_match_table = of_match_ptr(nau7802_dt_ids),
+ },
+};
+
+module_i2c_driver(nau7802_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Nuvoton NAU7802 ADC Driver");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index 2826faae706..ee5f72bffe5 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -74,22 +74,20 @@ static int adc081c_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
return -ENODEV;
- iio = iio_device_alloc(sizeof(*adc));
+ iio = devm_iio_device_alloc(&client->dev, sizeof(*adc));
if (!iio)
return -ENOMEM;
adc = iio_priv(iio);
adc->i2c = client;
- adc->ref = regulator_get(&client->dev, "vref");
- if (IS_ERR(adc->ref)) {
- err = PTR_ERR(adc->ref);
- goto iio_free;
- }
+ adc->ref = devm_regulator_get(&client->dev, "vref");
+ if (IS_ERR(adc->ref))
+ return PTR_ERR(adc->ref);
err = regulator_enable(adc->ref);
if (err < 0)
- goto regulator_put;
+ return err;
iio->dev.parent = &client->dev;
iio->name = dev_name(&client->dev);
@@ -109,10 +107,6 @@ static int adc081c_probe(struct i2c_client *client,
regulator_disable:
regulator_disable(adc->ref);
-regulator_put:
- regulator_put(adc->ref);
-iio_free:
- iio_device_free(iio);
return err;
}
@@ -124,8 +118,6 @@ static int adc081c_remove(struct i2c_client *client)
iio_device_unregister(iio);
regulator_disable(adc->ref);
- regulator_put(adc->ref);
- iio_device_free(iio);
return 0;
}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0ad208a69c2..a952538a1a8 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
{
unsigned int stepconfig;
int i, steps;
- u32 step_en;
/*
* There are 16 configurable steps and 8 analog input
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev)
adc_dev->channel_step[i] = steps;
steps++;
}
- step_en = get_adc_step_mask(adc_dev);
- am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
}
static const char * const chan_name_ain[] = {
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- int i;
- unsigned int fifo1count, read;
+ int i, map_val;
+ unsigned int fifo1count, read, stepid;
u32 step = UINT_MAX;
bool found = false;
+ u32 step_en;
+ unsigned long timeout = jiffies + usecs_to_jiffies
+ (IDLE_TIMEOUT * adc_dev->channels);
+ step_en = get_adc_step_mask(adc_dev);
+ am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en);
+
+ /* Wait for ADC sequencer to complete sampling */
+ while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) {
+ if (time_after(jiffies, timeout))
+ return -EAGAIN;
+ }
+ map_val = chan->channel + TOTAL_CHANNELS;
/*
* When the sub-system is first enabled,
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT);
for (i = 0; i < fifo1count; i++) {
read = tiadc_readl(adc_dev, REG_FIFO1);
- if (read >> 16 == step) {
- *val = read & 0xfff;
+ stepid = read & FIFOREAD_CHNLID_MASK;
+ stepid = stepid >> 0x10;
+
+ if (stepid == map_val) {
+ read = read & FIFOREAD_DATA_MASK;
found = true;
+ *val = read;
}
}
- am335x_tsc_se_update(adc_dev->mfd_tscadc);
+
if (found == false)
return -EBUSY;
return IIO_VAL_INT;
@@ -202,11 +216,11 @@ static int tiadc_probe(struct platform_device *pdev)
return -EINVAL;
}
- indio_dev = iio_device_alloc(sizeof(struct tiadc_device));
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct tiadc_device));
if (indio_dev == NULL) {
dev_err(&pdev->dev, "failed to allocate iio device\n");
- err = -ENOMEM;
- goto err_ret;
+ return -ENOMEM;
}
adc_dev = iio_priv(indio_dev);
@@ -227,7 +241,7 @@ static int tiadc_probe(struct platform_device *pdev)
err = tiadc_channel_init(indio_dev, adc_dev->channels);
if (err < 0)
- goto err_free_device;
+ return err;
err = iio_device_register(indio_dev);
if (err)
@@ -239,9 +253,6 @@ static int tiadc_probe(struct platform_device *pdev)
err_free_channels:
tiadc_channels_remove(indio_dev);
-err_free_device:
- iio_device_free(indio_dev);
-err_ret:
return err;
}
@@ -257,8 +268,6 @@ static int tiadc_remove(struct platform_device *pdev)
step_en = get_adc_step_mask(adc_dev);
am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/adc/twl6030-gpadc.c b/drivers/iio/adc/twl6030-gpadc.c
new file mode 100644
index 00000000000..0ea96c058c0
--- /dev/null
+++ b/drivers/iio/adc/twl6030-gpadc.c
@@ -0,0 +1,1013 @@
+/*
+ * TWL6030 GPADC module driver
+ *
+ * Copyright (C) 2009-2013 Texas Instruments Inc.
+ * Nishant Kamat <nskamat@ti.com>
+ * Balaji T K <balajitk@ti.com>
+ * Graeme Gregory <gg@slimlogic.co.uk>
+ * Girish S Ghongdemath <girishsg@ti.com>
+ * Ambresh K <ambresh@ti.com>
+ * Oleksandr Kozaruk <oleksandr.kozaruk@ti.com
+ *
+ * Based on twl4030-madc.c
+ * Copyright (C) 2008 Nokia Corporation
+ * Mikko Ylinen <mikko.k.ylinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_platform.h>
+#include <linux/i2c/twl.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define DRIVER_NAME "twl6030_gpadc"
+
+/*
+ * twl6030 per TRM has 17 channels, and twl6032 has 19 channels
+ * 2 test network channels are not used,
+ * 2 die temperature channels are not used either, as it is not
+ * defined how to convert ADC value to temperature
+ */
+#define TWL6030_GPADC_USED_CHANNELS 13
+#define TWL6030_GPADC_MAX_CHANNELS 15
+#define TWL6032_GPADC_USED_CHANNELS 15
+#define TWL6032_GPADC_MAX_CHANNELS 19
+#define TWL6030_GPADC_NUM_TRIM_REGS 16
+
+#define TWL6030_GPADC_CTRL_P1 0x05
+
+#define TWL6032_GPADC_GPSELECT_ISB 0x07
+#define TWL6032_GPADC_CTRL_P1 0x08
+
+#define TWL6032_GPADC_GPCH0_LSB 0x0d
+#define TWL6032_GPADC_GPCH0_MSB 0x0e
+
+#define TWL6030_GPADC_CTRL_P1_SP1 BIT(3)
+
+#define TWL6030_GPADC_GPCH0_LSB (0x29)
+
+#define TWL6030_GPADC_RT_SW1_EOC_MASK BIT(5)
+
+#define TWL6030_GPADC_TRIM1 0xCD
+
+#define TWL6030_REG_TOGGLE1 0x90
+#define TWL6030_GPADCS BIT(1)
+#define TWL6030_GPADCR BIT(0)
+
+/**
+ * struct twl6030_chnl_calib - channel calibration
+ * @gain: slope coefficient for ideal curve
+ * @gain_error: gain error
+ * @offset_error: offset of the real curve
+ */
+struct twl6030_chnl_calib {
+ s32 gain;
+ s32 gain_error;
+ s32 offset_error;
+};
+
+/**
+ * struct twl6030_ideal_code - GPADC calibration parameters
+ * GPADC is calibrated in two points: close to the beginning and
+ * to the and of the measurable input range
+ *
+ * @channel: channel number
+ * @code1: ideal code for the input at the beginning
+ * @code2: ideal code for at the end of the range
+ * @volt1: voltage input at the beginning(low voltage)
+ * @volt2: voltage input at the end(high voltage)
+ */
+struct twl6030_ideal_code {
+ int channel;
+ u16 code1;
+ u16 code2;
+ u16 volt1;
+ u16 volt2;
+};
+
+struct twl6030_gpadc_data;
+
+/**
+ * struct twl6030_gpadc_platform_data - platform specific data
+ * @nchannels: number of GPADC channels
+ * @iio_channels: iio channels
+ * @twl6030_ideal: pointer to calibration parameters
+ * @start_conversion: pointer to ADC start conversion function
+ * @channel_to_reg pointer to ADC function to convert channel to
+ * register address for reading conversion result
+ * @calibrate: pointer to calibration function
+ */
+struct twl6030_gpadc_platform_data {
+ const int nchannels;
+ const struct iio_chan_spec *iio_channels;
+ const struct twl6030_ideal_code *ideal;
+ int (*start_conversion)(int channel);
+ u8 (*channel_to_reg)(int channel);
+ int (*calibrate)(struct twl6030_gpadc_data *gpadc);
+};
+
+/**
+ * struct twl6030_gpadc_data - GPADC data
+ * @dev: device pointer
+ * @lock: mutual exclusion lock for the structure
+ * @irq_complete: completion to signal end of conversion
+ * @twl6030_cal_tbl: pointer to calibration data for each
+ * channel with gain error and offset
+ * @pdata: pointer to device specific data
+ */
+struct twl6030_gpadc_data {
+ struct device *dev;
+ struct mutex lock;
+ struct completion irq_complete;
+ struct twl6030_chnl_calib *twl6030_cal_tbl;
+ const struct twl6030_gpadc_platform_data *pdata;
+};
+
+/*
+ * channels 11, 12, 13, 15 and 16 have no calibration data
+ * calibration offset is same for channels 1, 3, 4, 5
+ *
+ * The data is taken from GPADC_TRIM registers description.
+ * GPADC_TRIM registers keep difference between the code measured
+ * at volt1 and volt2 input voltages and corresponding code1 and code2
+ */
+static const struct twl6030_ideal_code
+ twl6030_ideal[TWL6030_GPADC_USED_CHANNELS] = {
+ [0] = { /* ch 0, external, battery type, resistor value */
+ .channel = 0,
+ .code1 = 116,
+ .code2 = 745,
+ .volt1 = 141,
+ .volt2 = 910,
+ },
+ [1] = { /* ch 1, external, battery temperature, NTC resistor value */
+ .channel = 1,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [2] = { /* ch 2, external, audio accessory/general purpose */
+ .channel = 2,
+ .code1 = 55,
+ .code2 = 818,
+ .volt1 = 101,
+ .volt2 = 1499,
+ },
+ [3] = { /* ch 3, external, general purpose */
+ .channel = 3,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [4] = { /* ch 4, external, temperature measurement/general purpose */
+ .channel = 4,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [5] = { /* ch 5, external, general purpose */
+ .channel = 5,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [6] = { /* ch 6, external, general purpose */
+ .channel = 6,
+ .code1 = 82,
+ .code2 = 900,
+ .volt1 = 100,
+ .volt2 = 1100,
+ },
+ [7] = { /* ch 7, internal, main battery */
+ .channel = 7,
+ .code1 = 614,
+ .code2 = 941,
+ .volt1 = 3001,
+ .volt2 = 4599,
+ },
+ [8] = { /* ch 8, internal, backup battery */
+ .channel = 8,
+ .code1 = 82,
+ .code2 = 688,
+ .volt1 = 501,
+ .volt2 = 4203,
+ },
+ [9] = { /* ch 9, internal, external charger input */
+ .channel = 9,
+ .code1 = 182,
+ .code2 = 818,
+ .volt1 = 2001,
+ .volt2 = 8996,
+ },
+ [10] = { /* ch 10, internal, VBUS */
+ .channel = 10,
+ .code1 = 149,
+ .code2 = 818,
+ .volt1 = 1001,
+ .volt2 = 5497,
+ },
+ [11] = { /* ch 11, internal, VBUS charging current */
+ .channel = 11,
+ },
+ /* ch 12, internal, Die temperature */
+ /* ch 13, internal, Die temperature */
+ [12] = { /* ch 14, internal, USB ID line */
+ .channel = 14,
+ .code1 = 48,
+ .code2 = 714,
+ .volt1 = 323,
+ .volt2 = 4800,
+ },
+};
+
+static const struct twl6030_ideal_code
+ twl6032_ideal[TWL6032_GPADC_USED_CHANNELS] = {
+ [0] = { /* ch 0, external, battery type, resistor value */
+ .channel = 0,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [1] = { /* ch 1, external, battery temperature, NTC resistor value */
+ .channel = 1,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [2] = { /* ch 2, external, audio accessory/general purpose */
+ .channel = 2,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 660,
+ .volt2 = 1500,
+ },
+ [3] = { /* ch 3, external, temperature with external diode/general
+ purpose */
+ .channel = 3,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [4] = { /* ch 4, external, temperature measurement/general purpose */
+ .channel = 4,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [5] = { /* ch 5, external, general purpose */
+ .channel = 5,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [6] = { /* ch 6, external, general purpose */
+ .channel = 6,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 440,
+ .volt2 = 1000,
+ },
+ [7] = { /* ch7, internal, system supply */
+ .channel = 7,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+ [8] = { /* ch8, internal, backup battery */
+ .channel = 8,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+ [9] = { /* ch 9, internal, external charger input */
+ .channel = 9,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 3960,
+ .volt2 = 9000,
+ },
+ [10] = { /* ch10, internal, VBUS */
+ .channel = 10,
+ .code1 = 150,
+ .code2 = 751,
+ .volt1 = 1000,
+ .volt2 = 5000,
+ },
+ [11] = { /* ch 11, internal, VBUS DC-DC output current */
+ .channel = 11,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 660,
+ .volt2 = 1500,
+ },
+ /* ch 12, internal, Die temperature */
+ /* ch 13, internal, Die temperature */
+ [12] = { /* ch 14, internal, USB ID line */
+ .channel = 14,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2420,
+ .volt2 = 5500,
+ },
+ /* ch 15, internal, test network */
+ /* ch 16, internal, test network */
+ [13] = { /* ch 17, internal, battery charging current */
+ .channel = 17,
+ },
+ [14] = { /* ch 18, internal, battery voltage */
+ .channel = 18,
+ .code1 = 1441,
+ .code2 = 3276,
+ .volt1 = 2200,
+ .volt2 = 5000,
+ },
+};
+
+static inline int twl6030_gpadc_write(u8 reg, u8 val)
+{
+ return twl_i2c_write_u8(TWL6030_MODULE_GPADC, val, reg);
+}
+
+static inline int twl6030_gpadc_read(u8 reg, u8 *val)
+{
+
+ return twl_i2c_read(TWL6030_MODULE_GPADC, val, reg, 2);
+}
+
+static int twl6030_gpadc_enable_irq(u8 mask)
+{
+ int ret;
+
+ ret = twl6030_interrupt_unmask(mask, REG_INT_MSK_LINE_B);
+ if (ret < 0)
+ return ret;
+
+ ret = twl6030_interrupt_unmask(mask, REG_INT_MSK_STS_B);
+
+ return ret;
+}
+
+static void twl6030_gpadc_disable_irq(u8 mask)
+{
+ twl6030_interrupt_mask(mask, REG_INT_MSK_LINE_B);
+ twl6030_interrupt_mask(mask, REG_INT_MSK_STS_B);
+}
+
+static irqreturn_t twl6030_gpadc_irq_handler(int irq, void *indio_dev)
+{
+ struct twl6030_gpadc_data *gpadc = iio_priv(indio_dev);
+
+ complete(&gpadc->irq_complete);
+
+ return IRQ_HANDLED;
+}
+
+static int twl6030_start_conversion(int channel)
+{
+ return twl6030_gpadc_write(TWL6030_GPADC_CTRL_P1,
+ TWL6030_GPADC_CTRL_P1_SP1);
+}
+
+static int twl6032_start_conversion(int channel)
+{
+ int ret;
+
+ ret = twl6030_gpadc_write(TWL6032_GPADC_GPSELECT_ISB, channel);
+ if (ret)
+ return ret;
+
+ return twl6030_gpadc_write(TWL6032_GPADC_CTRL_P1,
+ TWL6030_GPADC_CTRL_P1_SP1);
+}
+
+static u8 twl6030_channel_to_reg(int channel)
+{
+ return TWL6030_GPADC_GPCH0_LSB + 2 * channel;
+}
+
+static u8 twl6032_channel_to_reg(int channel)
+{
+ /*
+ * for any prior chosen channel, when the conversion is ready
+ * the result is avalable in GPCH0_LSB, GPCH0_MSB.
+ */
+
+ return TWL6032_GPADC_GPCH0_LSB;
+}
+
+static int twl6030_gpadc_lookup(const struct twl6030_ideal_code *ideal,
+ int channel, int size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ if (ideal[i].channel == channel)
+ break;
+
+ return i;
+}
+
+static int twl6030_channel_calibrated(const struct twl6030_gpadc_platform_data
+ *pdata, int channel)
+{
+ const struct twl6030_ideal_code *ideal = pdata->ideal;
+ int i;
+
+ i = twl6030_gpadc_lookup(ideal, channel, pdata->nchannels);
+ /* not calibrated channels have 0 in all structure members */
+ return pdata->ideal[i].code2;
+}
+
+static int twl6030_gpadc_make_correction(struct twl6030_gpadc_data *gpadc,
+ int channel, int raw_code)
+{
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+ int corrected_code;
+ int i;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+ corrected_code = ((raw_code * 1000) -
+ gpadc->twl6030_cal_tbl[i].offset_error) /
+ gpadc->twl6030_cal_tbl[i].gain_error;
+
+ return corrected_code;
+}
+
+static int twl6030_gpadc_get_raw(struct twl6030_gpadc_data *gpadc,
+ int channel, int *res)
+{
+ u8 reg = gpadc->pdata->channel_to_reg(channel);
+ __le16 val;
+ int raw_code;
+ int ret;
+
+ ret = twl6030_gpadc_read(reg, (u8 *)&val);
+ if (ret) {
+ dev_dbg(gpadc->dev, "unable to read register 0x%X\n", reg);
+ return ret;
+ }
+
+ raw_code = le16_to_cpu(val);
+ dev_dbg(gpadc->dev, "GPADC raw code: %d", raw_code);
+
+ if (twl6030_channel_calibrated(gpadc->pdata, channel))
+ *res = twl6030_gpadc_make_correction(gpadc, channel, raw_code);
+ else
+ *res = raw_code;
+
+ return ret;
+}
+
+static int twl6030_gpadc_get_processed(struct twl6030_gpadc_data *gpadc,
+ int channel, int *val)
+{
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+ int corrected_code;
+ int channel_value;
+ int i;
+ int ret;
+
+ ret = twl6030_gpadc_get_raw(gpadc, channel, &corrected_code);
+ if (ret)
+ return ret;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+ channel_value = corrected_code *
+ gpadc->twl6030_cal_tbl[i].gain;
+
+ /* Shift back into mV range */
+ channel_value /= 1000;
+
+ dev_dbg(gpadc->dev, "GPADC corrected code: %d", corrected_code);
+ dev_dbg(gpadc->dev, "GPADC value: %d", channel_value);
+
+ *val = channel_value;
+
+ return ret;
+}
+
+static int twl6030_gpadc_read_raw(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan,
+ int *val, int *val2, long mask)
+{
+ struct twl6030_gpadc_data *gpadc = iio_priv(indio_dev);
+ int ret;
+ long timeout;
+
+ mutex_lock(&gpadc->lock);
+
+ ret = gpadc->pdata->start_conversion(chan->channel);
+ if (ret) {
+ dev_err(gpadc->dev, "failed to start conversion\n");
+ goto err;
+ }
+ /* wait for conversion to complete */
+ timeout = wait_for_completion_interruptible_timeout(
+ &gpadc->irq_complete, msecs_to_jiffies(5000));
+ if (timeout == 0) {
+ ret = -ETIMEDOUT;
+ goto err;
+ } else if (timeout < 0) {
+ ret = -EINTR;
+ goto err;
+ }
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = twl6030_gpadc_get_raw(gpadc, chan->channel, val);
+ ret = ret ? -EIO : IIO_VAL_INT;
+ break;
+
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = twl6030_gpadc_get_processed(gpadc, chan->channel, val);
+ ret = ret ? -EIO : IIO_VAL_INT;
+ break;
+
+ default:
+ break;
+ }
+err:
+ mutex_unlock(&gpadc->lock);
+
+ return ret;
+}
+
+/*
+ * The GPADC channels are calibrated using a two point calibration method.
+ * The channels measured with two known values: volt1 and volt2, and
+ * ideal corresponding output codes are known: code1, code2.
+ * The difference(d1, d2) between ideal and measured codes stored in trim
+ * registers.
+ * The goal is to find offset and gain of the real curve for each calibrated
+ * channel.
+ * gain: k = 1 + ((d2 - d1) / (x2 - x1))
+ * offset: b = d1 + (k - 1) * x1
+ */
+static void twl6030_calibrate_channel(struct twl6030_gpadc_data *gpadc,
+ int channel, int d1, int d2)
+{
+ int b, k, gain, x1, x2, i;
+ const struct twl6030_ideal_code *ideal = gpadc->pdata->ideal;
+
+ i = twl6030_gpadc_lookup(ideal, channel, gpadc->pdata->nchannels);
+
+ /* Gain */
+ gain = ((ideal[i].volt2 - ideal[i].volt1) * 1000) /
+ (ideal[i].code2 - ideal[i].code1);
+
+ x1 = ideal[i].code1;
+ x2 = ideal[i].code2;
+
+ /* k - real curve gain */
+ k = 1000 + (((d2 - d1) * 1000) / (x2 - x1));
+
+ /* b - offset of the real curve gain */
+ b = (d1 * 1000) - (k - 1000) * x1;
+
+ gpadc->twl6030_cal_tbl[i].gain = gain;
+ gpadc->twl6030_cal_tbl[i].gain_error = k;
+ gpadc->twl6030_cal_tbl[i].offset_error = b;
+
+ dev_dbg(gpadc->dev, "GPADC d1 for Chn: %d = %d\n", channel, d1);
+ dev_dbg(gpadc->dev, "GPADC d2 for Chn: %d = %d\n", channel, d2);
+ dev_dbg(gpadc->dev, "GPADC x1 for Chn: %d = %d\n", channel, x1);
+ dev_dbg(gpadc->dev, "GPADC x2 for Chn: %d = %d\n", channel, x2);
+ dev_dbg(gpadc->dev, "GPADC Gain for Chn: %d = %d\n", channel, gain);
+ dev_dbg(gpadc->dev, "GPADC k for Chn: %d = %d\n", channel, k);
+ dev_dbg(gpadc->dev, "GPADC b for Chn: %d = %d\n", channel, b);
+}
+
+static inline int twl6030_gpadc_get_trim_offset(s8 d)
+{
+ /*
+ * XXX NOTE!
+ * bit 0 - sign, bit 7 - reserved, 6..1 - trim value
+ * though, the documentation states that trim value
+ * is absolute value, the correct conversion results are
+ * obtained if the value is interpreted as 2's complement.
+ */
+ __u32 temp = ((d & 0x7f) >> 1) | ((d & 1) << 6);
+
+ return sign_extend32(temp, 6);
+}
+
+static int twl6030_calibration(struct twl6030_gpadc_data *gpadc)
+{
+ int ret;
+ int chn;
+ u8 trim_regs[TWL6030_GPADC_NUM_TRIM_REGS];
+ s8 d1, d2;
+
+ /*
+ * for calibration two measurements have been performed at
+ * factory, for some channels, during the production test and
+ * have been stored in registers. This two stored values are
+ * used to correct the measurements. The values represent
+ * offsets for the given input from the output on ideal curve.
+ */
+ ret = twl_i2c_read(TWL6030_MODULE_ID2, trim_regs,
+ TWL6030_GPADC_TRIM1, TWL6030_GPADC_NUM_TRIM_REGS);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "calibration failed\n");
+ return ret;
+ }
+
+ for (chn = 0; chn < TWL6030_GPADC_MAX_CHANNELS; chn++) {
+
+ switch (chn) {
+ case 0:
+ d1 = trim_regs[0];
+ d2 = trim_regs[1];
+ break;
+ case 1:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ d1 = trim_regs[4];
+ d2 = trim_regs[5];
+ break;
+ case 2:
+ d1 = trim_regs[12];
+ d2 = trim_regs[13];
+ break;
+ case 7:
+ d1 = trim_regs[6];
+ d2 = trim_regs[7];
+ break;
+ case 8:
+ d1 = trim_regs[2];
+ d2 = trim_regs[3];
+ break;
+ case 9:
+ d1 = trim_regs[8];
+ d2 = trim_regs[9];
+ break;
+ case 10:
+ d1 = trim_regs[10];
+ d2 = trim_regs[11];
+ break;
+ case 14:
+ d1 = trim_regs[14];
+ d2 = trim_regs[15];
+ break;
+ default:
+ continue;
+ }
+
+ d1 = twl6030_gpadc_get_trim_offset(d1);
+ d2 = twl6030_gpadc_get_trim_offset(d2);
+
+ twl6030_calibrate_channel(gpadc, chn, d1, d2);
+ }
+
+ return 0;
+}
+
+static int twl6032_get_trim_value(u8 *trim_regs, unsigned int reg0,
+ unsigned int reg1, unsigned int mask0, unsigned int mask1,
+ unsigned int shift0)
+{
+ int val;
+
+ val = (trim_regs[reg0] & mask0) << shift0;
+ val |= (trim_regs[reg1] & mask1) >> 1;
+ if (trim_regs[reg1] & 0x01)
+ val = -val;
+
+ return val;
+}
+
+static int twl6032_calibration(struct twl6030_gpadc_data *gpadc)
+{
+ int chn, d1 = 0, d2 = 0, temp;
+ u8 trim_regs[TWL6030_GPADC_NUM_TRIM_REGS];
+ int ret;
+
+ ret = twl_i2c_read(TWL6030_MODULE_ID2, trim_regs,
+ TWL6030_GPADC_TRIM1, TWL6030_GPADC_NUM_TRIM_REGS);
+ if (ret < 0) {
+ dev_err(gpadc->dev, "calibration failed\n");
+ return ret;
+ }
+
+ /*
+ * Loop to calculate the value needed for returning voltages from
+ * GPADC not values.
+ *
+ * gain is calculated to 3 decimal places fixed point.
+ */
+ for (chn = 0; chn < TWL6032_GPADC_MAX_CHANNELS; chn++) {
+
+ switch (chn) {
+ case 0:
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 11:
+ case 14:
+ d1 = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d2 = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+ break;
+ case 8:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d1 = temp + twl6032_get_trim_value(trim_regs, 7, 6,
+ 0x18, 0x1E, 1);
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3F,
+ 0x06, 2);
+ d2 = temp + twl6032_get_trim_value(trim_regs, 9, 7,
+ 0x1F, 0x06, 2);
+ break;
+ case 9:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+ d1 = temp + twl6032_get_trim_value(trim_regs, 13, 11,
+ 0x18, 0x1E, 1);
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+ d2 = temp + twl6032_get_trim_value(trim_regs, 15, 13,
+ 0x1F, 0x06, 1);
+ break;
+ case 10:
+ d1 = twl6032_get_trim_value(trim_regs, 10, 8, 0x0f,
+ 0x0E, 3);
+ d2 = twl6032_get_trim_value(trim_regs, 14, 12, 0x0f,
+ 0x0E, 3);
+ break;
+ case 7:
+ case 18:
+ temp = twl6032_get_trim_value(trim_regs, 2, 0, 0x1f,
+ 0x06, 2);
+
+ d1 = (trim_regs[4] & 0x7E) >> 1;
+ if (trim_regs[4] & 0x01)
+ d1 = -d1;
+ d1 += temp;
+
+ temp = twl6032_get_trim_value(trim_regs, 3, 1, 0x3f,
+ 0x06, 2);
+
+ d2 = (trim_regs[5] & 0xFE) >> 1;
+ if (trim_regs[5] & 0x01)
+ d2 = -d2;
+
+ d2 += temp;
+ break;
+ default:
+ /* No data for other channels */
+ continue;
+ }
+
+ twl6030_calibrate_channel(gpadc, chn, d1, d2);
+ }
+
+ return 0;
+}
+
+#define TWL6030_GPADC_CHAN(chn, _type, chan_info) { \
+ .type = _type, \
+ .channel = chn, \
+ .info_mask_separate = BIT(chan_info), \
+ .indexed = 1, \
+}
+
+static const struct iio_chan_spec twl6030_gpadc_iio_channels[] = {
+ TWL6030_GPADC_CHAN(0, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(1, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(2, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(3, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(4, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(5, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(6, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(7, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(8, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(9, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(10, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(11, IIO_VOLTAGE, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(14, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+};
+
+static const struct iio_chan_spec twl6032_gpadc_iio_channels[] = {
+ TWL6030_GPADC_CHAN(0, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(1, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(2, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(3, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(4, IIO_TEMP, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(5, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(6, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(7, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(8, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(9, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(10, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(11, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(14, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+ TWL6030_GPADC_CHAN(17, IIO_VOLTAGE, IIO_CHAN_INFO_RAW),
+ TWL6030_GPADC_CHAN(18, IIO_VOLTAGE, IIO_CHAN_INFO_PROCESSED),
+};
+
+static const struct iio_info twl6030_gpadc_iio_info = {
+ .read_raw = &twl6030_gpadc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static const struct twl6030_gpadc_platform_data twl6030_pdata = {
+ .iio_channels = twl6030_gpadc_iio_channels,
+ .nchannels = TWL6030_GPADC_USED_CHANNELS,
+ .ideal = twl6030_ideal,
+ .start_conversion = twl6030_start_conversion,
+ .channel_to_reg = twl6030_channel_to_reg,
+ .calibrate = twl6030_calibration,
+};
+
+static const struct twl6030_gpadc_platform_data twl6032_pdata = {
+ .iio_channels = twl6032_gpadc_iio_channels,
+ .nchannels = TWL6032_GPADC_USED_CHANNELS,
+ .ideal = twl6032_ideal,
+ .start_conversion = twl6032_start_conversion,
+ .channel_to_reg = twl6032_channel_to_reg,
+ .calibrate = twl6032_calibration,
+};
+
+static const struct of_device_id of_twl6030_match_tbl[] = {
+ {
+ .compatible = "ti,twl6030-gpadc",
+ .data = &twl6030_pdata,
+ },
+ {
+ .compatible = "ti,twl6032-gpadc",
+ .data = &twl6032_pdata,
+ },
+ { /* end */ }
+};
+
+static int twl6030_gpadc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct twl6030_gpadc_data *gpadc;
+ const struct twl6030_gpadc_platform_data *pdata;
+ const struct of_device_id *match;
+ struct iio_dev *indio_dev;
+ int irq;
+ int ret;
+
+ match = of_match_device(of_match_ptr(of_twl6030_match_tbl), dev);
+ if (!match)
+ return -EINVAL;
+
+ pdata = match->data;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*gpadc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ gpadc = iio_priv(indio_dev);
+
+ gpadc->twl6030_cal_tbl = devm_kzalloc(dev,
+ sizeof(*gpadc->twl6030_cal_tbl) *
+ pdata->nchannels, GFP_KERNEL);
+ if (!gpadc->twl6030_cal_tbl)
+ return -ENOMEM;
+
+ gpadc->dev = dev;
+ gpadc->pdata = pdata;
+
+ platform_set_drvdata(pdev, indio_dev);
+ mutex_init(&gpadc->lock);
+ init_completion(&gpadc->irq_complete);
+
+ ret = pdata->calibrate(gpadc);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read calibration registers\n");
+ return ret;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return irq;
+ }
+
+ ret = devm_request_threaded_irq(dev, irq, NULL,
+ twl6030_gpadc_irq_handler,
+ IRQF_ONESHOT, "twl6030_gpadc", indio_dev);
+
+ ret = twl6030_gpadc_enable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable GPADC interrupt\n");
+ return ret;
+ }
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCS,
+ TWL6030_REG_TOGGLE1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable GPADC module\n");
+ return ret;
+ }
+
+ indio_dev->name = DRIVER_NAME;
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &twl6030_gpadc_iio_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = pdata->iio_channels;
+ indio_dev->num_channels = pdata->nchannels;
+
+ ret = iio_device_register(indio_dev);
+
+ return ret;
+}
+
+static int twl6030_gpadc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+
+ twl6030_gpadc_disable_irq(TWL6030_GPADC_RT_SW1_EOC_MASK);
+ iio_device_unregister(indio_dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int twl6030_gpadc_suspend(struct device *pdev)
+{
+ int ret;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCR,
+ TWL6030_REG_TOGGLE1);
+ if (ret)
+ dev_err(pdev, "error reseting GPADC (%d)!\n", ret);
+
+ return 0;
+};
+
+static int twl6030_gpadc_resume(struct device *pdev)
+{
+ int ret;
+
+ ret = twl_i2c_write_u8(TWL6030_MODULE_ID1, TWL6030_GPADCS,
+ TWL6030_REG_TOGGLE1);
+ if (ret)
+ dev_err(pdev, "error setting GPADC (%d)!\n", ret);
+
+ return 0;
+};
+#endif
+
+static SIMPLE_DEV_PM_OPS(twl6030_gpadc_pm_ops, twl6030_gpadc_suspend,
+ twl6030_gpadc_resume);
+
+static struct platform_driver twl6030_gpadc_driver = {
+ .probe = twl6030_gpadc_probe,
+ .remove = twl6030_gpadc_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &twl6030_gpadc_pm_ops,
+ .of_match_table = of_twl6030_match_tbl,
+ },
+};
+
+module_platform_driver(twl6030_gpadc_driver);
+
+MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
+MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
+MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
+MODULE_DESCRIPTION("twl6030 ADC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/adc/viperboard_adc.c b/drivers/iio/adc/viperboard_adc.c
index 56ac481c73c..09727a71e9f 100644
--- a/drivers/iio/adc/viperboard_adc.c
+++ b/drivers/iio/adc/viperboard_adc.c
@@ -124,7 +124,7 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
int ret;
/* registering iio */
- indio_dev = iio_device_alloc(sizeof(*adc));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
if (!indio_dev) {
dev_err(&pdev->dev, "failed allocating iio device\n");
return -ENOMEM;
@@ -142,16 +142,12 @@ static int vprbrd_adc_probe(struct platform_device *pdev)
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&pdev->dev, "could not register iio (adc)");
- goto error;
+ return ret;
}
platform_set_drvdata(pdev, indio_dev);
return 0;
-
-error:
- iio_device_free(indio_dev);
- return ret;
}
static int vprbrd_adc_remove(struct platform_device *pdev)
@@ -159,7 +155,6 @@ static int vprbrd_adc_remove(struct platform_device *pdev)
struct iio_dev *indio_dev = platform_get_drvdata(pdev);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/amplifiers/Kconfig b/drivers/iio/amplifiers/Kconfig
index 05d707ed7d4..e9c5f2cd925 100644
--- a/drivers/iio/amplifiers/Kconfig
+++ b/drivers/iio/amplifiers/Kconfig
@@ -1,6 +1,8 @@
#
# Gain Amplifiers, etc.
#
+# When adding new entries keep the list in alphabetical order
+
menu "Amplifiers"
config AD8366
diff --git a/drivers/iio/amplifiers/Makefile b/drivers/iio/amplifiers/Makefile
index a6ca366908e..8da4b787898 100644
--- a/drivers/iio/amplifiers/Makefile
+++ b/drivers/iio/amplifiers/Makefile
@@ -2,4 +2,5 @@
# Makefile iio/amplifiers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD8366) += ad8366.o
diff --git a/drivers/iio/amplifiers/ad8366.c b/drivers/iio/amplifiers/ad8366.c
index d354554b51b..d0a79a4bce1 100644
--- a/drivers/iio/amplifiers/ad8366.c
+++ b/drivers/iio/amplifiers/ad8366.c
@@ -139,17 +139,17 @@ static int ad8366_probe(struct spi_device *spi)
struct ad8366_state *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -173,11 +173,6 @@ static int ad8366_probe(struct spi_device *spi)
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -195,8 +190,6 @@ static int ad8366_remove(struct spi_device *spi)
regulator_put(reg);
}
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/common/Makefile b/drivers/iio/common/Makefile
index c2352beb5d9..3112df0060e 100644
--- a/drivers/iio/common/Makefile
+++ b/drivers/iio/common/Makefile
@@ -6,5 +6,6 @@
# instead of duplicating in each module.
#
+# When adding new entries keep the list in alphabetical order
obj-y += hid-sensors/
obj-y += st_sensors/
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index 865b1781df6..965ee22d3ac 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -22,7 +22,7 @@
static inline u32 st_sensors_get_unaligned_le24(const u8 *p)
{
- return ((s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8);
+ return (s32)((p[0] | p[1] << 8 | p[2] << 16) << 8) >> 8;
}
static int st_sensors_write_data_with_mask(struct iio_dev *indio_dev,
@@ -118,7 +118,7 @@ st_sensors_match_odr_error:
}
static int st_sensors_set_fullscale(struct iio_dev *indio_dev,
- unsigned int fs)
+ unsigned int fs)
{
int err, i = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
@@ -198,13 +198,39 @@ int st_sensors_set_axis_enable(struct iio_dev *indio_dev, u8 axis_enable)
}
EXPORT_SYMBOL(st_sensors_set_axis_enable);
-int st_sensors_init_sensor(struct iio_dev *indio_dev)
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
{
int err;
struct st_sensor_data *sdata = iio_priv(indio_dev);
mutex_init(&sdata->tb.buf_lock);
+ switch (pdata->drdy_int_pin) {
+ case 1:
+ if (sdata->sensor->drdy_irq.mask_int1 == 0) {
+ dev_err(&indio_dev->dev,
+ "DRDY on INT1 not available.\n");
+ err = -EINVAL;
+ goto init_error;
+ }
+ sdata->drdy_int_pin = 1;
+ break;
+ case 2:
+ if (sdata->sensor->drdy_irq.mask_int2 == 0) {
+ dev_err(&indio_dev->dev,
+ "DRDY on INT2 not available.\n");
+ err = -EINVAL;
+ goto init_error;
+ }
+ sdata->drdy_int_pin = 2;
+ break;
+ default:
+ dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n");
+ err = -EINVAL;
+ goto init_error;
+ }
+
err = st_sensors_set_enable(indio_dev, false);
if (err < 0)
goto init_error;
@@ -234,6 +260,7 @@ EXPORT_SYMBOL(st_sensors_init_sensor);
int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
{
int err;
+ u8 drdy_mask;
struct st_sensor_data *sdata = iio_priv(indio_dev);
/* Enable/Disable the interrupt generator 1. */
@@ -245,10 +272,14 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
goto st_accel_set_dataready_irq_error;
}
+ if (sdata->drdy_int_pin == 1)
+ drdy_mask = sdata->sensor->drdy_irq.mask_int1;
+ else
+ drdy_mask = sdata->sensor->drdy_irq.mask_int2;
+
/* Enable/Disable the interrupt generator for data ready. */
err = st_sensors_write_data_with_mask(indio_dev,
- sdata->sensor->drdy_irq.addr,
- sdata->sensor->drdy_irq.mask, (int)enable);
+ sdata->sensor->drdy_irq.addr, drdy_mask, (int)enable);
st_accel_set_dataready_irq_error:
return err;
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index c9c33ce32d3..3c6a78a75b7 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -1,6 +1,8 @@
#
# DAC drivers
#
+# When adding new entries keep the list in alphabetical order
+
menu "Digital to analog converters"
config AD5064
@@ -15,7 +17,7 @@ config AD5064
module will be called ad5064.
config AD5360
- tristate "Analog Devices Analog Devices AD5360/61/62/63/70/71/73 DAC driver"
+ tristate "Analog Devices AD5360/61/62/63/70/71/73 DAC driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD5360, AD5361,
@@ -48,13 +50,6 @@ config AD5421
To compile this driver as module choose M here: the module will be called
ad5421.
-config AD5624R_SPI
- tristate "Analog Devices AD5624/44/64R DAC spi driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices AD5624R, AD5644R and
- AD5664R converters (DAC). This driver uses the common SPI interface.
-
config AD5446
tristate "Analog Devices AD5446 and similar single channel DACs driver"
depends on (SPI_MASTER && I2C!=m) || I2C
@@ -68,7 +63,7 @@ config AD5446
module will be called ad5446.
config AD5449
- tristate "Analog Device AD5449 and similar DACs driver"
+ tristate "Analog Devices AD5449 and similar DACs driver"
depends on SPI_MASTER
help
Say yes here to build support for Analog Devices AD5415, AD5426, AD5429,
@@ -87,6 +82,24 @@ config AD5504
To compile this driver as a module, choose M here: the
module will be called ad5504.
+config AD5624R_SPI
+ tristate "Analog Devices AD5624/44/64R DAC spi driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5624R, AD5644R and
+ AD5664R converters (DAC). This driver uses the common SPI interface.
+
+config AD5686
+ tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5686R, AD5685R,
+ AD5684R, AD5791 Voltage Output Digital to
+ Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5686.
+
config AD5755
tristate "Analog Devices AD5755/AD5755-1/AD5757/AD5735/AD5737 DAC driver"
depends on SPI_MASTER
@@ -119,19 +132,8 @@ config AD5791
To compile this driver as a module, choose M here: the
module will be called ad5791.
-config AD5686
- tristate "Analog Devices AD5686R/AD5685R/AD5684R DAC SPI driver"
- depends on SPI
- help
- Say yes here to build support for Analog Devices AD5686R, AD5685R,
- AD5684R, AD5791 Voltage Output Digital to
- Analog Converter.
-
- To compile this driver as a module, choose M here: the
- module will be called ad5686.
-
config AD7303
- tristate "Analog Devices Analog Devices AD7303 DAC driver"
+ tristate "Analog Devices AD7303 DAC driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD7303 Digital to Analog
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index c8d7ab6bff0..bb84ad64463 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -2,6 +2,7 @@
# Makefile for industrial I/O DAC drivers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD5360) += ad5360.o
obj-$(CONFIG_AD5380) += ad5380.o
obj-$(CONFIG_AD5421) += ad5421.o
diff --git a/drivers/iio/dac/ad5064.c b/drivers/iio/dac/ad5064.c
index aa26d50ab63..a3a52be4852 100644
--- a/drivers/iio/dac/ad5064.c
+++ b/drivers/iio/dac/ad5064.c
@@ -442,7 +442,7 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
unsigned int i;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -456,23 +456,23 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
for (i = 0; i < ad5064_num_vref(st); ++i)
st->vref_reg[i].supply = ad5064_vref_name(st, i);
- ret = regulator_bulk_get(dev, ad5064_num_vref(st),
+ ret = devm_regulator_bulk_get(dev, ad5064_num_vref(st),
st->vref_reg);
if (ret) {
if (!st->chip_info->internal_vref)
- goto error_free;
+ return ret;
st->use_internal_vref = true;
ret = ad5064_write(st, AD5064_CMD_CONFIG, 0,
AD5064_CONFIG_INT_VREF_ENABLE, 0);
if (ret) {
dev_err(dev, "Failed to enable internal vref: %d\n",
ret);
- goto error_free;
+ return ret;
}
} else {
ret = regulator_bulk_enable(ad5064_num_vref(st), st->vref_reg);
if (ret)
- goto error_free_reg;
+ return ret;
}
indio_dev->dev.parent = dev;
@@ -498,11 +498,6 @@ static int ad5064_probe(struct device *dev, enum ad5064_type type,
error_disable_reg:
if (!st->use_internal_vref)
regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
-error_free_reg:
- if (!st->use_internal_vref)
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -514,12 +509,8 @@ static int ad5064_remove(struct device *dev)
iio_device_unregister(indio_dev);
- if (!st->use_internal_vref) {
+ if (!st->use_internal_vref)
regulator_bulk_disable(ad5064_num_vref(st), st->vref_reg);
- regulator_bulk_free(ad5064_num_vref(st), st->vref_reg);
- }
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index 80923af424f..d2da71ece74 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -459,7 +459,7 @@ static int ad5360_probe(struct spi_device *spi)
unsigned int i;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
dev_err(&spi->dev, "Failed to allocate iio device\n");
return -ENOMEM;
@@ -480,13 +480,13 @@ static int ad5360_probe(struct spi_device *spi)
ret = ad5360_alloc_channels(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to allocate channel spec: %d\n", ret);
- goto error_free;
+ return ret;
}
for (i = 0; i < st->chip_info->num_vrefs; ++i)
st->vref_reg[i].supply = ad5360_vref_name[i];
- ret = regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs,
+ ret = devm_regulator_bulk_get(&st->spi->dev, st->chip_info->num_vrefs,
st->vref_reg);
if (ret) {
dev_err(&spi->dev, "Failed to request vref regulators: %d\n", ret);
@@ -496,7 +496,7 @@ static int ad5360_probe(struct spi_device *spi)
ret = regulator_bulk_enable(st->chip_info->num_vrefs, st->vref_reg);
if (ret) {
dev_err(&spi->dev, "Failed to enable vref regulators: %d\n", ret);
- goto error_free_reg;
+ goto error_free_channels;
}
ret = iio_device_register(indio_dev);
@@ -509,12 +509,8 @@ static int ad5360_probe(struct spi_device *spi)
error_disable_reg:
regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
-error_free_reg:
- regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg);
error_free_channels:
kfree(indio_dev->channels);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -529,9 +525,6 @@ static int ad5360_remove(struct spi_device *spi)
kfree(indio_dev->channels);
regulator_bulk_disable(st->chip_info->num_vrefs, st->vref_reg);
- regulator_bulk_free(st->chip_info->num_vrefs, st->vref_reg);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5380.c b/drivers/iio/dac/ad5380.c
index bf2db02215c..1c44ae3920e 100644
--- a/drivers/iio/dac/ad5380.c
+++ b/drivers/iio/dac/ad5380.c
@@ -369,11 +369,10 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
unsigned int ctrl = 0;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL) {
dev_err(dev, "Failed to allocate iio device\n");
- ret = -ENOMEM;
- goto error_out;
+ return -ENOMEM;
}
st = iio_priv(indio_dev);
@@ -391,13 +390,13 @@ static int ad5380_probe(struct device *dev, struct regmap *regmap,
ret = ad5380_alloc_channels(indio_dev);
if (ret) {
dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
- goto error_free;
+ return ret;
}
if (st->chip_info->int_vref == 2500000)
ctrl |= AD5380_CTRL_INT_VREF_2V5;
- st->vref_reg = regulator_get(dev, "vref");
+ st->vref_reg = devm_regulator_get(dev, "vref");
if (!IS_ERR(st->vref_reg)) {
ret = regulator_enable(st->vref_reg);
if (ret) {
@@ -434,13 +433,7 @@ error_disable_reg:
if (!IS_ERR(st->vref_reg))
regulator_disable(st->vref_reg);
error_free_reg:
- if (!IS_ERR(st->vref_reg))
- regulator_put(st->vref_reg);
-
kfree(indio_dev->channels);
-error_free:
- iio_device_free(indio_dev);
-error_out:
return ret;
}
@@ -456,11 +449,8 @@ static int ad5380_remove(struct device *dev)
if (!IS_ERR(st->vref_reg)) {
regulator_disable(st->vref_reg);
- regulator_put(st->vref_reg);
}
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 98f24407c3c..1f78b14abb7 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -451,7 +451,7 @@ static int ad5421_probe(struct spi_device *spi)
struct ad5421_state *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
dev_err(&spi->dev, "Failed to allocate iio device\n");
return -ENOMEM;
@@ -484,31 +484,23 @@ static int ad5421_probe(struct spi_device *spi)
ad5421_update_ctrl(indio_dev, 0, 0);
if (spi->irq) {
- ret = request_threaded_irq(spi->irq,
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
NULL,
ad5421_fault_handler,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
"ad5421 fault",
indio_dev);
if (ret)
- goto error_free;
+ return ret;
}
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
- goto error_free_irq;
+ return ret;
}
return 0;
-
-error_free_irq:
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
-error_free:
- iio_device_free(indio_dev);
-
- return ret;
}
static int ad5421_remove(struct spi_device *spi)
@@ -516,9 +508,6 @@ static int ad5421_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
iio_device_unregister(indio_dev);
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5446.c b/drivers/iio/dac/ad5446.c
index cae8f6056ac..96e9ed4c2d0 100644
--- a/drivers/iio/dac/ad5446.c
+++ b/drivers/iio/dac/ad5446.c
@@ -220,11 +220,11 @@ static int ad5446_probe(struct device *dev, const char *name,
struct regulator *reg;
int ret, voltage_uv = 0;
- reg = regulator_get(dev, "vcc");
+ reg = devm_regulator_get(dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = regulator_get_voltage(reg);
if (ret < 0)
@@ -233,7 +233,7 @@ static int ad5446_probe(struct device *dev, const char *name,
voltage_uv = ret;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_disable_reg;
@@ -264,19 +264,13 @@ static int ad5446_probe(struct device *dev, const char *name,
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_device;
+ goto error_disable_reg;
return 0;
-error_free_device:
- iio_device_free(indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
regulator_disable(reg);
-error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
-
return ret;
}
@@ -286,11 +280,8 @@ static int ad5446_remove(struct device *dev)
struct ad5446_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5449.c b/drivers/iio/dac/ad5449.c
index ba1c914b039..fff7d0762c0 100644
--- a/drivers/iio/dac/ad5449.c
+++ b/drivers/iio/dac/ad5449.c
@@ -275,7 +275,7 @@ static int ad5449_spi_probe(struct spi_device *spi)
unsigned int i;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -288,14 +288,14 @@ static int ad5449_spi_probe(struct spi_device *spi)
for (i = 0; i < st->chip_info->num_channels; ++i)
st->vref_reg[i].supply = ad5449_vref_name(st, i);
- ret = regulator_bulk_get(&spi->dev, st->chip_info->num_channels,
+ ret = devm_regulator_bulk_get(&spi->dev, st->chip_info->num_channels,
st->vref_reg);
if (ret)
- goto error_free;
+ return ret;
ret = regulator_bulk_enable(st->chip_info->num_channels, st->vref_reg);
if (ret)
- goto error_free_reg;
+ return ret;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = id->name;
@@ -325,10 +325,6 @@ static int ad5449_spi_probe(struct spi_device *spi)
error_disable_reg:
regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
-error_free_reg:
- regulator_bulk_free(st->chip_info->num_channels, st->vref_reg);
-error_free:
- iio_device_free(indio_dev);
return ret;
}
@@ -341,9 +337,6 @@ static int ad5449_spi_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
regulator_bulk_disable(st->chip_info->num_channels, st->vref_reg);
- regulator_bulk_free(st->chip_info->num_channels, st->vref_reg);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c
index 139206e84cb..caffb16bc05 100644
--- a/drivers/iio/dac/ad5504.c
+++ b/drivers/iio/dac/ad5504.c
@@ -281,16 +281,14 @@ static int ad5504_probe(struct spi_device *spi)
struct regulator *reg;
int ret, voltage_uv = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- reg = regulator_get(&spi->dev, "vcc");
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+ reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(reg)) {
ret = regulator_enable(reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = regulator_get_voltage(reg);
if (ret < 0)
@@ -321,7 +319,7 @@ static int ad5504_probe(struct spi_device *spi)
indio_dev->modes = INDIO_DIRECT_MODE;
if (spi->irq) {
- ret = request_threaded_irq(spi->irq,
+ ret = devm_request_threaded_irq(&spi->dev, spi->irq,
NULL,
&ad5504_event_handler,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
@@ -333,22 +331,14 @@ static int ad5504_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq;
+ goto error_disable_reg;
return 0;
-error_free_irq:
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
error_disable_reg:
if (!IS_ERR(reg))
regulator_disable(reg);
-error_put_reg:
- if (!IS_ERR(reg))
- regulator_put(reg);
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -358,14 +348,9 @@ static int ad5504_remove(struct spi_device *spi)
struct ad5504_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (spi->irq)
- free_irq(spi->irq, indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5624r_spi.c b/drivers/iio/dac/ad5624r_spi.c
index bb298aaff32..714af757cd5 100644
--- a/drivers/iio/dac/ad5624r_spi.c
+++ b/drivers/iio/dac/ad5624r_spi.c
@@ -226,17 +226,15 @@ static int ad5624r_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret, voltage_uv = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = regulator_get_voltage(st->reg);
if (ret < 0)
@@ -277,11 +275,6 @@ static int ad5624r_probe(struct spi_device *spi)
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -292,11 +285,8 @@ static int ad5624r_remove(struct spi_device *spi)
struct ad5624r_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5686.c b/drivers/iio/dac/ad5686.c
index 06439b1af9b..57825ead7db 100644
--- a/drivers/iio/dac/ad5686.c
+++ b/drivers/iio/dac/ad5686.c
@@ -314,18 +314,18 @@ static int ad5686_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret, regdone = 0, voltage_uv = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
ret = regulator_get_voltage(st->reg);
if (ret < 0)
@@ -369,12 +369,6 @@ static int ad5686_probe(struct spi_device *spi)
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
-
return ret;
}
@@ -384,11 +378,8 @@ static int ad5686_remove(struct spi_device *spi)
struct ad5686_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5755.c b/drivers/iio/dac/ad5755.c
index 12bb315e55f..36a4361aece 100644
--- a/drivers/iio/dac/ad5755.c
+++ b/drivers/iio/dac/ad5755.c
@@ -565,7 +565,7 @@ static int ad5755_probe(struct spi_device *spi)
struct ad5755_state *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
dev_err(&spi->dev, "Failed to allocate iio device\n");
return -ENOMEM;
@@ -589,24 +589,19 @@ static int ad5755_probe(struct spi_device *spi)
ret = ad5755_init_channels(indio_dev, pdata);
if (ret)
- goto error_free;
+ return ret;
ret = ad5755_setup_pdata(indio_dev, pdata);
if (ret)
- goto error_free;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret) {
dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
- goto error_free;
+ return ret;
}
return 0;
-
-error_free:
- iio_device_free(indio_dev);
-
- return ret;
}
static int ad5755_remove(struct spi_device *spi)
@@ -614,7 +609,6 @@ static int ad5755_remove(struct spi_device *spi)
struct iio_dev *indio_dev = spi_get_drvdata(spi);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5764.c b/drivers/iio/dac/ad5764.c
index 7a53f7d70da..df7e028d9db 100644
--- a/drivers/iio/dac/ad5764.c
+++ b/drivers/iio/dac/ad5764.c
@@ -275,7 +275,7 @@ static int ad5764_probe(struct spi_device *spi)
struct ad5764_state *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL) {
dev_err(&spi->dev, "Failed to allocate iio device\n");
return -ENOMEM;
@@ -298,12 +298,12 @@ static int ad5764_probe(struct spi_device *spi)
st->vref_reg[0].supply = "vrefAB";
st->vref_reg[1].supply = "vrefCD";
- ret = regulator_bulk_get(&st->spi->dev,
+ ret = devm_regulator_bulk_get(&st->spi->dev,
ARRAY_SIZE(st->vref_reg), st->vref_reg);
if (ret) {
dev_err(&spi->dev, "Failed to request vref regulators: %d\n",
ret);
- goto error_free;
+ return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(st->vref_reg),
@@ -311,7 +311,7 @@ static int ad5764_probe(struct spi_device *spi)
if (ret) {
dev_err(&spi->dev, "Failed to enable vref regulators: %d\n",
ret);
- goto error_free_reg;
+ return ret;
}
}
@@ -326,12 +326,6 @@ static int ad5764_probe(struct spi_device *spi)
error_disable_reg:
if (st->chip_info->int_vref == 0)
regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
-error_free_reg:
- if (st->chip_info->int_vref == 0)
- regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
-error_free:
- iio_device_free(indio_dev);
-
return ret;
}
@@ -342,12 +336,8 @@ static int ad5764_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (st->chip_info->int_vref == 0) {
+ if (st->chip_info->int_vref == 0)
regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
- regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
- }
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c
index 97c1e5d780d..ce745896330 100644
--- a/drivers/iio/dac/ad5791.c
+++ b/drivers/iio/dac/ad5791.c
@@ -349,17 +349,15 @@ static int ad5791_probe(struct spi_device *spi)
struct ad5791_state *st;
int ret, pos_voltage_uv = 0, neg_voltage_uv = 0;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg_vdd = regulator_get(&spi->dev, "vdd");
+ st->reg_vdd = devm_regulator_get(&spi->dev, "vdd");
if (!IS_ERR(st->reg_vdd)) {
ret = regulator_enable(st->reg_vdd);
if (ret)
- goto error_put_reg_pos;
+ return ret;
ret = regulator_get_voltage(st->reg_vdd);
if (ret < 0)
@@ -368,11 +366,11 @@ static int ad5791_probe(struct spi_device *spi)
pos_voltage_uv = ret;
}
- st->reg_vss = regulator_get(&spi->dev, "vss");
+ st->reg_vss = devm_regulator_get(&spi->dev, "vss");
if (!IS_ERR(st->reg_vss)) {
ret = regulator_enable(st->reg_vss);
if (ret)
- goto error_put_reg_neg;
+ goto error_disable_reg_pos;
ret = regulator_get_voltage(st->reg_vss);
if (ret < 0)
@@ -428,19 +426,9 @@ static int ad5791_probe(struct spi_device *spi)
error_disable_reg_neg:
if (!IS_ERR(st->reg_vss))
regulator_disable(st->reg_vss);
-error_put_reg_neg:
- if (!IS_ERR(st->reg_vss))
- regulator_put(st->reg_vss);
-
error_disable_reg_pos:
if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
-error_put_reg_pos:
- if (!IS_ERR(st->reg_vdd))
- regulator_put(st->reg_vdd);
- iio_device_free(indio_dev);
-error_ret:
-
return ret;
}
@@ -450,16 +438,11 @@ static int ad5791_remove(struct spi_device *spi)
struct ad5791_state *st = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg_vdd)) {
+ if (!IS_ERR(st->reg_vdd))
regulator_disable(st->reg_vdd);
- regulator_put(st->reg_vdd);
- }
- if (!IS_ERR(st->reg_vss)) {
+ if (!IS_ERR(st->reg_vss))
regulator_disable(st->reg_vss);
- regulator_put(st->reg_vss);
- }
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index d546f50f925..ed2d276477b 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -203,7 +203,7 @@ static int ad7303_probe(struct spi_device *spi)
bool ext_ref;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -212,15 +212,13 @@ static int ad7303_probe(struct spi_device *spi)
st->spi = spi;
- st->vdd_reg = regulator_get(&spi->dev, "Vdd");
- if (IS_ERR(st->vdd_reg)) {
- ret = PTR_ERR(st->vdd_reg);
- goto err_free;
- }
+ st->vdd_reg = devm_regulator_get(&spi->dev, "Vdd");
+ if (IS_ERR(st->vdd_reg))
+ return PTR_ERR(st->vdd_reg);
ret = regulator_enable(st->vdd_reg);
if (ret)
- goto err_put_vdd_reg;
+ return ret;
if (spi->dev.of_node) {
ext_ref = of_property_read_bool(spi->dev.of_node,
@@ -234,7 +232,7 @@ static int ad7303_probe(struct spi_device *spi)
}
if (ext_ref) {
- st->vref_reg = regulator_get(&spi->dev, "REF");
+ st->vref_reg = devm_regulator_get(&spi->dev, "REF");
if (IS_ERR(st->vref_reg)) {
ret = PTR_ERR(st->vref_reg);
goto err_disable_vdd_reg;
@@ -242,7 +240,7 @@ static int ad7303_probe(struct spi_device *spi)
ret = regulator_enable(st->vref_reg);
if (ret)
- goto err_put_vref_reg;
+ goto err_disable_vdd_reg;
st->config |= AD7303_CFG_EXTERNAL_VREF;
}
@@ -263,16 +261,8 @@ static int ad7303_probe(struct spi_device *spi)
err_disable_vref_reg:
if (st->vref_reg)
regulator_disable(st->vref_reg);
-err_put_vref_reg:
- if (st->vref_reg)
- regulator_put(st->vref_reg);
err_disable_vdd_reg:
regulator_disable(st->vdd_reg);
-err_put_vdd_reg:
- regulator_put(st->vdd_reg);
-err_free:
- iio_device_free(indio_dev);
-
return ret;
}
@@ -283,14 +273,9 @@ static int ad7303_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (st->vref_reg) {
+ if (st->vref_reg)
regulator_disable(st->vref_reg);
- regulator_put(st->vref_reg);
- }
regulator_disable(st->vdd_reg);
- regulator_put(st->vdd_reg);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/dac/max517.c b/drivers/iio/dac/max517.c
index ebfaa415624..83adcbf1a20 100644
--- a/drivers/iio/dac/max517.c
+++ b/drivers/iio/dac/max517.c
@@ -164,11 +164,9 @@ static int max517_probe(struct i2c_client *client,
struct max517_platform_data *platform_data = client->dev.platform_data;
int err;
- indio_dev = iio_device_alloc(sizeof(*data));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto exit;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
@@ -198,23 +196,16 @@ static int max517_probe(struct i2c_client *client,
err = iio_device_register(indio_dev);
if (err)
- goto exit_free_device;
+ return err;
dev_info(&client->dev, "DAC registered\n");
return 0;
-
-exit_free_device:
- iio_device_free(indio_dev);
-exit:
- return err;
}
static int max517_remove(struct i2c_client *client)
{
iio_device_unregister(i2c_get_clientdata(client));
- iio_device_free(i2c_get_clientdata(client));
-
return 0;
}
diff --git a/drivers/iio/dac/mcp4725.c b/drivers/iio/dac/mcp4725.c
index a612ec766d9..1f4a48e6a82 100644
--- a/drivers/iio/dac/mcp4725.c
+++ b/drivers/iio/dac/mcp4725.c
@@ -12,14 +12,13 @@
* driver for the Microchip I2C 12-bit digital-to-analog converter (DAC)
* (7-bit I2C slave address 0x60, the three LSBs can be configured in
* hardware)
- *
- * writing the DAC value to EEPROM is not supported
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
#include <linux/err.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
@@ -32,15 +31,19 @@ struct mcp4725_data {
struct i2c_client *client;
u16 vref_mv;
u16 dac_value;
+ bool powerdown;
+ unsigned powerdown_mode;
};
-#ifdef CONFIG_PM_SLEEP
static int mcp4725_suspend(struct device *dev)
{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp4725_data *data = iio_priv(indio_dev);
u8 outbuf[2];
- outbuf[0] = 0x3 << 4; /* power-down bits, 500 kOhm resistor */
+ outbuf[0] = (data->powerdown_mode + 1) << 4;
outbuf[1] = 0;
+ data->powerdown = true;
return i2c_master_send(to_i2c_client(dev), outbuf, 2);
}
@@ -54,16 +57,150 @@ static int mcp4725_resume(struct device *dev)
/* restore previous DAC value */
outbuf[0] = (data->dac_value >> 8) & 0xf;
outbuf[1] = data->dac_value & 0xff;
+ data->powerdown = false;
return i2c_master_send(to_i2c_client(dev), outbuf, 2);
}
+#ifdef CONFIG_PM_SLEEP
static SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend, mcp4725_resume);
#define MCP4725_PM_OPS (&mcp4725_pm_ops)
#else
#define MCP4725_PM_OPS NULL
#endif
+static ssize_t mcp4725_store_eeprom(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ int tries = 20;
+ u8 inoutbuf[3];
+ bool state;
+ int ret;
+
+ ret = strtobool(buf, &state);
+ if (ret < 0)
+ return ret;
+
+ if (!state)
+ return 0;
+
+ inoutbuf[0] = 0x60; /* write EEPROM */
+ inoutbuf[1] = data->dac_value >> 4;
+ inoutbuf[2] = (data->dac_value & 0xf) << 4;
+
+ ret = i2c_master_send(data->client, inoutbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ /* wait for write complete, takes up to 50ms */
+ while (tries--) {
+ msleep(20);
+ ret = i2c_master_recv(data->client, inoutbuf, 3);
+ if (ret < 0)
+ return ret;
+ else if (ret != 3)
+ return -EIO;
+
+ if (inoutbuf[0] & 0x80)
+ break;
+ }
+
+ if (tries < 0) {
+ dev_err(&data->client->dev,
+ "mcp4725_store_eeprom() failed, incomplete\n");
+ return -EIO;
+ }
+
+ return len;
+}
+
+static IIO_DEVICE_ATTR(store_eeprom, S_IWUSR, NULL, mcp4725_store_eeprom, 0);
+
+static struct attribute *mcp4725_attributes[] = {
+ &iio_dev_attr_store_eeprom.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group mcp4725_attribute_group = {
+ .attrs = mcp4725_attributes,
+};
+
+static const char * const mcp4725_powerdown_modes[] = {
+ "1kohm_to_gnd",
+ "100kohm_to_gnd",
+ "500kohm_to_gnd"
+};
+
+static int mcp4725_get_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ return data->powerdown_mode;
+}
+
+static int mcp4725_set_powerdown_mode(struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan, unsigned mode)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ data->powerdown_mode = mode;
+
+ return 0;
+}
+
+static ssize_t mcp4725_read_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan, char *buf)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", data->powerdown);
+}
+
+static ssize_t mcp4725_write_powerdown(struct iio_dev *indio_dev,
+ uintptr_t private, const struct iio_chan_spec *chan,
+ const char *buf, size_t len)
+{
+ struct mcp4725_data *data = iio_priv(indio_dev);
+ bool state;
+ int ret;
+
+ ret = strtobool(buf, &state);
+ if (ret)
+ return ret;
+
+ if (state)
+ ret = mcp4725_suspend(&data->client->dev);
+ else
+ ret = mcp4725_resume(&data->client->dev);
+ if (ret < 0)
+ return ret;
+
+ return len;
+}
+
+static const struct iio_enum mcp4725_powerdown_mode_enum = {
+ .items = mcp4725_powerdown_modes,
+ .num_items = ARRAY_SIZE(mcp4725_powerdown_modes),
+ .get = mcp4725_get_powerdown_mode,
+ .set = mcp4725_set_powerdown_mode,
+};
+
+static const struct iio_chan_spec_ext_info mcp4725_ext_info[] = {
+ {
+ .name = "powerdown",
+ .read = mcp4725_read_powerdown,
+ .write = mcp4725_write_powerdown,
+ },
+ IIO_ENUM("powerdown_mode", false, &mcp4725_powerdown_mode_enum),
+ IIO_ENUM_AVAILABLE("powerdown_mode", &mcp4725_powerdown_mode_enum),
+ { },
+};
+
static const struct iio_chan_spec mcp4725_channel = {
.type = IIO_VOLTAGE,
.indexed = 1,
@@ -72,6 +209,7 @@ static const struct iio_chan_spec mcp4725_channel = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.scan_type = IIO_ST('u', 12, 16, 0),
+ .ext_info = mcp4725_ext_info,
};
static int mcp4725_set_value(struct iio_dev *indio_dev, int val)
@@ -138,6 +276,7 @@ static int mcp4725_write_raw(struct iio_dev *indio_dev,
static const struct iio_info mcp4725_info = {
.read_raw = mcp4725_read_raw,
.write_raw = mcp4725_write_raw,
+ .attrs = &mcp4725_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -148,19 +287,17 @@ static int mcp4725_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
struct mcp4725_platform_data *platform_data = client->dev.platform_data;
u8 inbuf[3];
+ u8 pd;
int err;
if (!platform_data || !platform_data->vref_mv) {
dev_err(&client->dev, "invalid platform data");
- err = -EINVAL;
- goto exit;
+ return -EINVAL;
}
- indio_dev = iio_device_alloc(sizeof(*data));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto exit;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
@@ -177,31 +314,25 @@ static int mcp4725_probe(struct i2c_client *client,
err = i2c_master_recv(client, inbuf, 3);
if (err < 0) {
dev_err(&client->dev, "failed to read DAC value");
- goto exit_free_device;
+ return err;
}
+ pd = (inbuf[0] >> 1) & 0x3;
+ data->powerdown = pd > 0 ? true : false;
+ data->powerdown_mode = pd ? pd-1 : 2; /* 500kohm_to_gnd */
data->dac_value = (inbuf[1] << 4) | (inbuf[2] >> 4);
err = iio_device_register(indio_dev);
if (err)
- goto exit_free_device;
+ return err;
dev_info(&client->dev, "MCP4725 DAC registered\n");
return 0;
-
-exit_free_device:
- iio_device_free(indio_dev);
-exit:
- return err;
}
static int mcp4725_remove(struct i2c_client *client)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
-
+ iio_device_unregister(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/iio/frequency/Kconfig b/drivers/iio/frequency/Kconfig
index 6aaa33ef454..dc5e0b72882 100644
--- a/drivers/iio/frequency/Kconfig
+++ b/drivers/iio/frequency/Kconfig
@@ -4,6 +4,7 @@
# Clock Distribution device drivers
# Phase-Locked Loop (PLL) frequency synthesizers
#
+# When adding new entries keep the list in alphabetical order
menu "Frequency Synthesizers DDS/PLL"
diff --git a/drivers/iio/frequency/Makefile b/drivers/iio/frequency/Makefile
index 00d26e5d1dc..2bca03f3e2e 100644
--- a/drivers/iio/frequency/Makefile
+++ b/drivers/iio/frequency/Makefile
@@ -2,5 +2,6 @@
# Makefile iio/frequency
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AD9523) += ad9523.o
obj-$(CONFIG_ADF4350) += adf4350.o
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 92276deeb02..7c5245d9f99 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -961,17 +961,17 @@ static int ad9523_probe(struct spi_device *spi)
return -EINVAL;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ return ret;
}
spi_set_drvdata(spi, indio_dev);
@@ -1001,11 +1001,6 @@ static int ad9523_probe(struct spi_device *spi)
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
- iio_device_free(indio_dev);
return ret;
}
@@ -1017,12 +1012,8 @@ static int ad9523_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
- if (!IS_ERR(st->reg)) {
+ if (!IS_ERR(st->reg))
regulator_disable(st->reg);
- regulator_put(st->reg);
- }
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index a4157cdb314..a7b30be86ae 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -515,7 +515,7 @@ static int adf4350_probe(struct spi_device *spi)
}
if (!pdata->clkin) {
- clk = clk_get(&spi->dev, "clkin");
+ clk = devm_clk_get(&spi->dev, "clkin");
if (IS_ERR(clk))
return -EPROBE_DEFER;
@@ -524,17 +524,17 @@ static int adf4350_probe(struct spi_device *spi)
return ret;
}
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
st = iio_priv(indio_dev);
- st->reg = regulator_get(&spi->dev, "vcc");
+ st->reg = devm_regulator_get(&spi->dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
- goto error_put_reg;
+ goto error_disable_clk;
}
spi_set_drvdata(spi, indio_dev);
@@ -564,7 +564,8 @@ static int adf4350_probe(struct spi_device *spi)
memset(st->regs_hw, 0xFF, sizeof(st->regs_hw));
if (gpio_is_valid(pdata->gpio_lock_detect)) {
- ret = gpio_request(pdata->gpio_lock_detect, indio_dev->name);
+ ret = devm_gpio_request(&spi->dev, pdata->gpio_lock_detect,
+ indio_dev->name);
if (ret) {
dev_err(&spi->dev, "fail to request lock detect GPIO-%d",
pdata->gpio_lock_detect);
@@ -576,29 +577,21 @@ static int adf4350_probe(struct spi_device *spi)
if (pdata->power_up_frequency) {
ret = adf4350_set_freq(st, pdata->power_up_frequency);
if (ret)
- goto error_free_gpio;
+ goto error_disable_reg;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_gpio;
+ goto error_disable_reg;
return 0;
-error_free_gpio:
- if (gpio_is_valid(pdata->gpio_lock_detect))
- gpio_free(pdata->gpio_lock_detect);
-
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
-error_put_reg:
- if (!IS_ERR(st->reg))
- regulator_put(st->reg);
-
+error_disable_clk:
if (clk)
clk_disable_unprepare(clk);
- iio_device_free(indio_dev);
return ret;
}
@@ -619,14 +612,8 @@ static int adf4350_remove(struct spi_device *spi)
if (!IS_ERR(reg)) {
regulator_disable(reg);
- regulator_put(reg);
}
- if (gpio_is_valid(st->pdata->gpio_lock_detect))
- gpio_free(st->pdata->gpio_lock_detect);
-
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 8498e9dcda6..41c64a43bca 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -1,6 +1,8 @@
#
# IIO Digital Gyroscope Sensor drivers configuration
#
+# When adding new entries keep the list in alphabetical order
+
menu "Digital gyroscope sensors"
config ADIS16080
@@ -26,6 +28,18 @@ config ADIS16136
Say yes here to build support for the Analog Devices ADIS16133, ADIS16135,
ADIS16136 gyroscope devices.
+config ADIS16260
+ tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
+ depends on SPI
+ select IIO_ADIS_LIB
+ select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
+ help
+ Say yes here to build support for Analog Devices ADIS16260 ADIS16265
+ ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
+
+ This driver can also be built as a module. If so, the module
+ will be called adis16260.
+
config ADXRS450
tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
depends on SPI
@@ -58,8 +72,8 @@ config IIO_ST_GYRO_3AXIS
Say yes here to build support for STMicroelectronics gyroscopes:
L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
- This driver can also be built as a module. If so, will be created
- these modules:
+ This driver can also be built as a module. If so, these modules
+ will be created:
- st_gyro (core functions for the driver [it is mandatory]);
- st_gyro_i2c (necessary for the I2C devices [optional*]);
- st_gyro_spi (necessary for the SPI devices [optional*]);
diff --git a/drivers/iio/gyro/Makefile b/drivers/iio/gyro/Makefile
index e9dc034aa18..2f2752a4ea8 100644
--- a/drivers/iio/gyro/Makefile
+++ b/drivers/iio/gyro/Makefile
@@ -2,9 +2,11 @@
# Makefile for industrial I/O gyroscope sensor drivers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADIS16080) += adis16080.o
obj-$(CONFIG_ADIS16130) += adis16130.o
obj-$(CONFIG_ADIS16136) += adis16136.o
+obj-$(CONFIG_ADIS16260) += adis16260.o
obj-$(CONFIG_ADXRS450) += adxrs450.o
obj-$(CONFIG_HID_SENSOR_GYRO_3D) += hid-sensor-gyro-3d.o
diff --git a/drivers/iio/gyro/adis16080.c b/drivers/iio/gyro/adis16080.c
index e1bb5f994a5..e9ec022ae22 100644
--- a/drivers/iio/gyro/adis16080.c
+++ b/drivers/iio/gyro/adis16080.c
@@ -192,16 +192,13 @@ static const struct adis16080_chip_info adis16080_chip_info[] = {
static int adis16080_probe(struct spi_device *spi)
{
const struct spi_device_id *id = spi_get_device_id(spi);
- int ret;
struct adis16080_state *st;
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -217,22 +214,12 @@ static int adis16080_probe(struct spi_device *spi)
indio_dev->info = &adis16080_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
- return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
+ return iio_device_register(indio_dev);
}
static int adis16080_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
-
return 0;
}
diff --git a/drivers/iio/gyro/adis16130.c b/drivers/iio/gyro/adis16130.c
index 129acdf801a..ac66fc18404 100644
--- a/drivers/iio/gyro/adis16130.c
+++ b/drivers/iio/gyro/adis16130.c
@@ -148,16 +148,13 @@ static const struct iio_info adis16130_info = {
static int adis16130_probe(struct spi_device *spi)
{
- int ret;
struct adis16130_state *st;
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -170,24 +167,12 @@ static int adis16130_probe(struct spi_device *spi)
indio_dev->info = &adis16130_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_free_dev;
-
- return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
- return ret;
+ return iio_device_register(indio_dev);
}
static int adis16130_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
-
return 0;
}
diff --git a/drivers/iio/gyro/adis16136.c b/drivers/iio/gyro/adis16136.c
index 058e6d5c955..591bd555e1f 100644
--- a/drivers/iio/gyro/adis16136.c
+++ b/drivers/iio/gyro/adis16136.c
@@ -497,7 +497,7 @@ static int adis16136_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*adis16136));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis16136));
if (indio_dev == NULL)
return -ENOMEM;
@@ -515,11 +515,11 @@ static int adis16136_probe(struct spi_device *spi)
ret = adis_init(&adis16136->adis, indio_dev, spi, &adis16136_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(&adis16136->adis, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis16136_initial_setup(indio_dev);
if (ret)
@@ -537,8 +537,6 @@ error_stop_device:
adis16136_stop_device(indio_dev);
error_cleanup_buffer:
adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
return ret;
}
@@ -552,8 +550,6 @@ static int adis16136_remove(struct spi_device *spi)
adis_cleanup_buffer_and_trigger(&adis16136->adis, indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/iio/gyro/adis16260.c
index 620d63fd099..06541162fc0 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/iio/gyro/adis16260.c
@@ -7,54 +7,119 @@
*/
#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
-#include <linux/slab.h>
#include <linux/sysfs.h>
-#include <linux/list.h>
#include <linux/module.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
+#include <linux/iio/imu/adis.h>
+
+#define ADIS16260_STARTUP_DELAY 220 /* ms */
+
+#define ADIS16260_FLASH_CNT 0x00 /* Flash memory write count */
+#define ADIS16260_SUPPLY_OUT 0x02 /* Power supply measurement */
+#define ADIS16260_GYRO_OUT 0x04 /* X-axis gyroscope output */
+#define ADIS16260_AUX_ADC 0x0A /* analog input channel measurement */
+#define ADIS16260_TEMP_OUT 0x0C /* internal temperature measurement */
+#define ADIS16260_ANGL_OUT 0x0E /* angle displacement */
+#define ADIS16260_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
+#define ADIS16260_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
+#define ADIS16260_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
+#define ADIS16260_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
+#define ADIS16260_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
+#define ADIS16260_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
+#define ADIS16260_ALM_CTRL 0x28 /* Alarm control */
+#define ADIS16260_AUX_DAC 0x30 /* Auxiliary DAC data */
+#define ADIS16260_GPIO_CTRL 0x32 /* Control, digital I/O line */
+#define ADIS16260_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
+#define ADIS16260_SMPL_PRD 0x36 /* Control, internal sample rate */
+#define ADIS16260_SENS_AVG 0x38 /* Control, dynamic range, filtering */
+#define ADIS16260_SLP_CNT 0x3A /* Control, sleep mode initiation */
+#define ADIS16260_DIAG_STAT 0x3C /* Diagnostic, error flags */
+#define ADIS16260_GLOB_CMD 0x3E /* Control, global commands */
+#define ADIS16260_LOT_ID1 0x52 /* Lot Identification Code 1 */
+#define ADIS16260_LOT_ID2 0x54 /* Lot Identification Code 2 */
+#define ADIS16260_PROD_ID 0x56 /* Product identifier;
+ * convert to decimal = 16,265/16,260 */
+#define ADIS16260_SERIAL_NUM 0x58 /* Serial number */
+
+#define ADIS16260_ERROR_ACTIVE (1<<14)
+#define ADIS16260_NEW_DATA (1<<15)
+
+/* MSC_CTRL */
+#define ADIS16260_MSC_CTRL_MEM_TEST (1<<11)
+/* Internal self-test enable */
+#define ADIS16260_MSC_CTRL_INT_SELF_TEST (1<<10)
+#define ADIS16260_MSC_CTRL_NEG_SELF_TEST (1<<9)
+#define ADIS16260_MSC_CTRL_POS_SELF_TEST (1<<8)
+#define ADIS16260_MSC_CTRL_DATA_RDY_EN (1<<2)
+#define ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
+#define ADIS16260_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
+
+/* SMPL_PRD */
+/* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
+#define ADIS16260_SMPL_PRD_TIME_BASE (1<<7)
+#define ADIS16260_SMPL_PRD_DIV_MASK 0x7F
+
+/* SLP_CNT */
+#define ADIS16260_SLP_CNT_POWER_OFF 0x80
+
+/* DIAG_STAT */
+#define ADIS16260_DIAG_STAT_ALARM2 (1<<9)
+#define ADIS16260_DIAG_STAT_ALARM1 (1<<8)
+#define ADIS16260_DIAG_STAT_FLASH_CHK_BIT 6
+#define ADIS16260_DIAG_STAT_SELF_TEST_BIT 5
+#define ADIS16260_DIAG_STAT_OVERFLOW_BIT 4
+#define ADIS16260_DIAG_STAT_SPI_FAIL_BIT 3
+#define ADIS16260_DIAG_STAT_FLASH_UPT_BIT 2
+#define ADIS16260_DIAG_STAT_POWER_HIGH_BIT 1
+#define ADIS16260_DIAG_STAT_POWER_LOW_BIT 0
+
+/* GLOB_CMD */
+#define ADIS16260_GLOB_CMD_SW_RESET (1<<7)
+#define ADIS16260_GLOB_CMD_FLASH_UPD (1<<3)
+#define ADIS16260_GLOB_CMD_DAC_LATCH (1<<2)
+#define ADIS16260_GLOB_CMD_FAC_CALIB (1<<1)
+#define ADIS16260_GLOB_CMD_AUTO_NULL (1<<0)
+
+#define ADIS16260_SPI_SLOW (u32)(300 * 1000)
+#define ADIS16260_SPI_BURST (u32)(1000 * 1000)
+#define ADIS16260_SPI_FAST (u32)(2000 * 1000)
+
+/* At the moment triggers are only used for ring buffer
+ * filling. This may change!
+ */
-#include "adis16260.h"
-
-static ssize_t adis16260_read_frequency_available(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
- if (spi_get_device_id(st->adis.spi)->driver_data)
- return sprintf(buf, "%s\n", "0.129 ~ 256");
- else
- return sprintf(buf, "%s\n", "256 2048");
-}
+#define ADIS16260_SCAN_GYRO 0
+#define ADIS16260_SCAN_SUPPLY 1
+#define ADIS16260_SCAN_AUX_ADC 2
+#define ADIS16260_SCAN_TEMP 3
+#define ADIS16260_SCAN_ANGL 4
static ssize_t adis16260_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
+ struct adis *adis = iio_priv(indio_dev);
int ret, len = 0;
u16 t;
int sps;
- ret = adis_read_reg_16(&st->adis, ADIS16260_SMPL_PRD, &t);
+ ret = adis_read_reg_16(adis, ADIS16260_SMPL_PRD, &t);
if (ret)
return ret;
- if (spi_get_device_id(st->adis.spi)->driver_data) /* If an adis16251 */
- sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 8 : 256;
+ if (spi_get_device_id(adis->spi)->driver_data) /* If an adis16251 */
+ sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 8 : 256;
else
- sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 66 : 2048;
+ sps = (t & ADIS16260_SMPL_PRD_TIME_BASE) ? 66 : 2048;
sps /= (t & ADIS16260_SMPL_PRD_DIV_MASK) + 1;
- len = sprintf(buf, "%d SPS\n", sps);
+ len = sprintf(buf, "%d\n", sps);
return len;
}
@@ -64,36 +129,31 @@ static ssize_t adis16260_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16260_state *st = iio_priv(indio_dev);
- long val;
+ struct adis *adis = iio_priv(indio_dev);
+ unsigned int val;
int ret;
u8 t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtouint(buf, 10, &val);
if (ret)
return ret;
- if (val == 0)
- return -EINVAL;
mutex_lock(&indio_dev->mlock);
- if (spi_get_device_id(st->adis.spi)->driver_data) {
- t = (256 / val);
- if (t > 0)
- t--;
- t &= ADIS16260_SMPL_PRD_DIV_MASK;
- } else {
- t = (2048 / val);
- if (t > 0)
- t--;
- t &= ADIS16260_SMPL_PRD_DIV_MASK;
- }
- if ((t & ADIS16260_SMPL_PRD_DIV_MASK) >= 0x0A)
- st->adis.spi->max_speed_hz = ADIS16260_SPI_SLOW;
+ if (spi_get_device_id(adis->spi)->driver_data)
+ t = 256 / val;
+ else
+ t = 2048 / val;
+
+ if (t > ADIS16260_SMPL_PRD_DIV_MASK)
+ t = ADIS16260_SMPL_PRD_DIV_MASK;
+ else if (t > 0)
+ t--;
+
+ if (t >= 0x0A)
+ adis->spi->max_speed_hz = ADIS16260_SPI_SLOW;
else
- st->adis.spi->max_speed_hz = ADIS16260_SPI_FAST;
- ret = adis_write_reg_8(&st->adis,
- ADIS16260_SMPL_PRD,
- t);
+ adis->spi->max_speed_hz = ADIS16260_SPI_FAST;
+ ret = adis_write_reg_8(adis, ADIS16260_SMPL_PRD, t);
mutex_unlock(&indio_dev->mlock);
@@ -103,11 +163,11 @@ static ssize_t adis16260_write_frequency(struct device *dev,
/* Power down the device */
static int adis16260_stop_device(struct iio_dev *indio_dev)
{
- struct adis16260_state *st = iio_priv(indio_dev);
+ struct adis *adis = iio_priv(indio_dev);
int ret;
u16 val = ADIS16260_SLP_CNT_POWER_OFF;
- ret = adis_write_reg_16(&st->adis, ADIS16260_SLP_CNT, val);
+ ret = adis_write_reg_16(adis, ADIS16260_SLP_CNT, val);
if (ret)
dev_err(&indio_dev->dev, "problem with turning device off: SLP_CNT");
@@ -118,24 +178,16 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
adis16260_read_frequency,
adis16260_write_frequency);
-static IIO_DEVICE_ATTR(sampling_frequency_available,
- S_IRUGO, adis16260_read_frequency_available, NULL, 0);
-
-#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
-struct iio_chan_spec adis16260_channels_##axis[] = { \
- ADIS_GYRO_CHAN(mod, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO, \
- BIT(IIO_CHAN_INFO_CALIBBIAS) | \
- BIT(IIO_CHAN_INFO_CALIBSCALE), 14), \
- ADIS_INCLI_CHAN(mod, ADIS16260_ANGL_OUT, ADIS16260_SCAN_ANGL, 0, 14), \
- ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP, 12), \
- ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY, 12), \
- ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC, 12), \
- IIO_CHAN_SOFT_TIMESTAMP(5), \
-}
-
-static const ADIS16260_GYRO_CHANNEL_SET(x, X);
-static const ADIS16260_GYRO_CHANNEL_SET(y, Y);
-static const ADIS16260_GYRO_CHANNEL_SET(z, Z);
+static const struct iio_chan_spec adis16260_channels[] = {
+ ADIS_GYRO_CHAN(X, ADIS16260_GYRO_OUT, ADIS16260_SCAN_GYRO,
+ BIT(IIO_CHAN_INFO_CALIBBIAS) |
+ BIT(IIO_CHAN_INFO_CALIBSCALE), 14),
+ ADIS_INCLI_CHAN(X, ADIS16260_ANGL_OUT, ADIS16260_SCAN_ANGL, 0, 14),
+ ADIS_TEMP_CHAN(ADIS16260_TEMP_OUT, ADIS16260_SCAN_TEMP, 12),
+ ADIS_SUPPLY_CHAN(ADIS16260_SUPPLY_OUT, ADIS16260_SCAN_SUPPLY, 12),
+ ADIS_AUX_ADC_CHAN(ADIS16260_AUX_ADC, ADIS16260_SCAN_AUX_ADC, 12),
+ IIO_CHAN_SOFT_TIMESTAMP(5),
+};
static const u8 adis16260_addresses[][2] = {
[ADIS16260_SCAN_GYRO] = { ADIS16260_GYRO_OFF, ADIS16260_GYRO_SCALE },
@@ -146,9 +198,8 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
int *val, int *val2,
long mask)
{
- struct adis16260_state *st = iio_priv(indio_dev);
+ struct adis *adis = iio_priv(indio_dev);
int ret;
- int bits;
u8 addr;
s16 val16;
@@ -160,7 +211,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
- if (spi_get_device_id(st->adis.spi)->driver_data) {
+ if (spi_get_device_id(adis->spi)->driver_data) {
/* 0.01832 degree / sec */
*val2 = IIO_DEGREE_TO_RAD(18320);
} else {
@@ -168,6 +219,10 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val2 = IIO_DEGREE_TO_RAD(73260);
}
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_INCLI:
+ *val = 0;
+ *val2 = IIO_DEGREE_TO_RAD(36630);
+ return IIO_VAL_INT_PLUS_MICRO;
case IIO_VOLTAGE:
if (chan->channel == 0) {
*val = 1;
@@ -189,42 +244,20 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = 250000 / 1453; /* 25 C = 0x00 */
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
addr = adis16260_addresses[chan->scan_index][0];
- ret = adis_read_reg_16(&st->adis, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
+ ret = adis_read_reg_16(adis, addr, &val16);
+ if (ret)
return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
+
+ *val = sign_extend32(val16, 11);
return IIO_VAL_INT;
case IIO_CHAN_INFO_CALIBSCALE:
- switch (chan->type) {
- case IIO_ANGL_VEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- mutex_lock(&indio_dev->mlock);
addr = adis16260_addresses[chan->scan_index][1];
- ret = adis_read_reg_16(&st->adis, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
+ ret = adis_read_reg_16(adis, addr, &val16);
+ if (ret)
return ret;
- }
- *val = (1 << bits) - 1;
- mutex_unlock(&indio_dev->mlock);
+
+ *val = val16;
return IIO_VAL_INT;
}
return -EINVAL;
@@ -236,26 +269,28 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
int val2,
long mask)
{
- struct adis16260_state *st = iio_priv(indio_dev);
- int bits = 12;
- s16 val16;
+ struct adis *adis = iio_priv(indio_dev);
u8 addr;
+
switch (mask) {
case IIO_CHAN_INFO_CALIBBIAS:
- val16 = val & ((1 << bits) - 1);
+ if (val < -2048 || val >= 2048)
+ return -EINVAL;
+
addr = adis16260_addresses[chan->scan_index][0];
- return adis_write_reg_16(&st->adis, addr, val16);
+ return adis_write_reg_16(adis, addr, val);
case IIO_CHAN_INFO_CALIBSCALE:
- val16 = val & ((1 << bits) - 1);
+ if (val < 0 || val >= 4096)
+ return -EINVAL;
+
addr = adis16260_addresses[chan->scan_index][1];
- return adis_write_reg_16(&st->adis, addr, val16);
+ return adis_write_reg_16(adis, addr, val);
}
return -EINVAL;
}
static struct attribute *adis16260_attributes[] = {
&iio_dev_attr_sampling_frequency.dev_attr.attr,
- &iio_dev_attr_sampling_frequency_available.dev_attr.attr,
NULL
};
@@ -303,71 +338,35 @@ static const struct adis_data adis16260_data = {
static int adis16260_probe(struct spi_device *spi)
{
- int ret;
- struct adis16260_platform_data *pd = spi->dev.platform_data;
- struct adis16260_state *st;
struct iio_dev *indio_dev;
+ struct adis *adis;
+ int ret;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- st = iio_priv(indio_dev);
- if (pd)
- st->negate = pd->negate;
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*adis));
+ if (!indio_dev)
+ return -ENOMEM;
+ adis = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adis16260_info;
- indio_dev->num_channels
- = ARRAY_SIZE(adis16260_channels_x);
- if (pd && pd->direction)
- switch (pd->direction) {
- case 'x':
- indio_dev->channels = adis16260_channels_x;
- break;
- case 'y':
- indio_dev->channels = adis16260_channels_y;
- break;
- case 'z':
- indio_dev->channels = adis16260_channels_z;
- break;
- default:
- return -EINVAL;
- }
- else
- indio_dev->channels = adis16260_channels_x;
- indio_dev->num_channels = ARRAY_SIZE(adis16260_channels_x);
+ indio_dev->channels = adis16260_channels;
+ indio_dev->num_channels = ARRAY_SIZE(adis16260_channels);
indio_dev->modes = INDIO_DIRECT_MODE;
- ret = adis_init(&st->adis, indio_dev, spi, &adis16260_data);
+ ret = adis_init(adis, indio_dev, spi, &adis16260_data);
if (ret)
- goto error_free_dev;
+ return ret;
- ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
+ ret = adis_setup_buffer_and_trigger(adis, indio_dev, NULL);
if (ret)
- goto error_free_dev;
-
- if (indio_dev->buffer) {
- /* Set default scan mode */
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(indio_dev, indio_dev->buffer,
- ADIS16260_SCAN_ANGL);
- }
+ return ret;
/* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
+ ret = adis_initial_startup(adis);
if (ret)
goto error_cleanup_buffer_trigger;
ret = iio_device_register(indio_dev);
@@ -377,22 +376,18 @@ static int adis16260_probe(struct spi_device *spi)
return 0;
error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
+ adis_cleanup_buffer_and_trigger(adis, indio_dev);
return ret;
}
static int adis16260_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis16260_state *st = iio_priv(indio_dev);
+ struct adis *adis = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
adis16260_stop_device(indio_dev);
- adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- iio_device_free(indio_dev);
+ adis_cleanup_buffer_and_trigger(adis, indio_dev);
return 0;
}
diff --git a/drivers/iio/gyro/adxrs450.c b/drivers/iio/gyro/adxrs450.c
index 8bd72b490b7..6dab2995f0f 100644
--- a/drivers/iio/gyro/adxrs450.c
+++ b/drivers/iio/gyro/adxrs450.c
@@ -426,11 +426,9 @@ static int adxrs450_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
st->us = spi;
mutex_init(&st->buf_lock);
@@ -447,7 +445,7 @@ static int adxrs450_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adxrs450_initial_setup(indio_dev);
@@ -456,17 +454,12 @@ static int adxrs450_probe(struct spi_device *spi)
return 0;
error_initial:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
return ret;
}
static int adxrs450_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
return 0;
}
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index bc943dd47da..c688d974d3e 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -30,10 +30,6 @@
#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
-/*Format: HID-SENSOR-usage_id_in_hex*/
-/*Usage ID from spec for Gyro-3D: 0x200076*/
-#define DRIVER_NAME "HID-SENSOR-200076"
-
enum gyro_3d_channel {
CHANNEL_SCAN_INDEX_X,
CHANNEL_SCAN_INDEX_Y,
@@ -179,18 +175,10 @@ static int gyro_3d_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int gyro_3d_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static const struct iio_info gyro_3d_info = {
.driver_module = THIS_MODULE,
.read_raw = &gyro_3d_read_raw,
.write_raw = &gyro_3d_write_raw,
- .write_raw_get_fmt = &gyro_3d_write_raw_get_fmt,
};
/* Function to push data to buffer */
@@ -286,11 +274,9 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_chan_spec *channels;
- indio_dev = iio_device_alloc(sizeof(struct gyro_3d_state));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*gyro_state));
+ if (!indio_dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, indio_dev);
gyro_state = iio_priv(indio_dev);
@@ -302,15 +288,14 @@ static int hid_gyro_3d_probe(struct platform_device *pdev)
&gyro_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
- goto error_free_dev;
+ return ret;
}
channels = kmemdup(gyro_3d_channels, sizeof(gyro_3d_channels),
GFP_KERNEL);
if (!channels) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
- goto error_free_dev;
+ return -ENOMEM;
}
ret = gyro_3d_parse_report(pdev, hsdev, channels,
@@ -367,9 +352,6 @@ error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
kfree(indio_dev->channels);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -384,14 +366,23 @@ static int hid_gyro_3d_remove(struct platform_device *pdev)
hid_sensor_remove_trigger(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
- iio_device_free(indio_dev);
return 0;
}
+static struct platform_device_id hid_gyro_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200076",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_gyro_3d_ids);
+
static struct platform_driver hid_gyro_3d_platform_driver = {
+ .id_table = hid_gyro_3d_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_gyro_3d_probe,
diff --git a/drivers/iio/gyro/itg3200_core.c b/drivers/iio/gyro/itg3200_core.c
index d66605d2629..4d3f3b92b36 100644
--- a/drivers/iio/gyro/itg3200_core.c
+++ b/drivers/iio/gyro/itg3200_core.c
@@ -309,11 +309,9 @@ static int itg3200_probe(struct i2c_client *client,
dev_dbg(&client->dev, "probe I2C dev with IRQ %i", client->irq);
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
@@ -330,7 +328,7 @@ static int itg3200_probe(struct i2c_client *client,
ret = itg3200_buffer_configure(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
if (client->irq) {
ret = itg3200_probe_trigger(indio_dev);
@@ -353,9 +351,6 @@ error_remove_trigger:
itg3200_remove_trigger(indio_dev);
error_unconfigure_buffer:
itg3200_buffer_unconfigure(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -370,8 +365,6 @@ static int itg3200_remove(struct i2c_client *client)
itg3200_buffer_unconfigure(indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 3ad9907bb15..f8f2bf84a5a 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -23,7 +23,16 @@
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
-int st_gyro_common_probe(struct iio_dev *indio_dev);
+/**
+ * struct st_sensors_platform_data - gyro platform data
+ * @drdy_int_pin: DRDY on gyros is available only on INT2 pin.
+ */
+static const struct st_sensors_platform_data gyro_pdata = {
+ .drdy_int_pin = 2,
+};
+
+int st_gyro_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
void st_gyro_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index f9ed3488c31..e13c2b0bf3d 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -60,7 +60,7 @@
#define ST_GYRO_1_BDU_ADDR 0x23
#define ST_GYRO_1_BDU_MASK 0x80
#define ST_GYRO_1_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_1_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_1_DRDY_IRQ_INT2_MASK 0x08
#define ST_GYRO_1_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 2 */
@@ -84,7 +84,7 @@
#define ST_GYRO_2_BDU_ADDR 0x23
#define ST_GYRO_2_BDU_MASK 0x80
#define ST_GYRO_2_DRDY_IRQ_ADDR 0x22
-#define ST_GYRO_2_DRDY_IRQ_MASK 0x08
+#define ST_GYRO_2_DRDY_IRQ_INT2_MASK 0x08
#define ST_GYRO_2_MULTIREAD_BIT true
static const struct iio_chan_spec st_gyro_16bit_channels[] = {
@@ -158,7 +158,7 @@ static const struct st_sensors st_gyro_sensors[] = {
},
.drdy_irq = {
.addr = ST_GYRO_1_DRDY_IRQ_ADDR,
- .mask = ST_GYRO_1_DRDY_IRQ_MASK,
+ .mask_int2 = ST_GYRO_1_DRDY_IRQ_INT2_MASK,
},
.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
.bootime = 2,
@@ -221,7 +221,7 @@ static const struct st_sensors st_gyro_sensors[] = {
},
.drdy_irq = {
.addr = ST_GYRO_2_DRDY_IRQ_ADDR,
- .mask = ST_GYRO_2_DRDY_IRQ_MASK,
+ .mask_int2 = ST_GYRO_2_DRDY_IRQ_INT2_MASK,
},
.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
.bootime = 2,
@@ -302,7 +302,8 @@ static const struct iio_trigger_ops st_gyro_trigger_ops = {
#define ST_GYRO_TRIGGER_OPS NULL
#endif
-int st_gyro_common_probe(struct iio_dev *indio_dev)
+int st_gyro_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
{
int err;
struct st_sensor_data *gdata = iio_priv(indio_dev);
@@ -324,7 +325,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
&gdata->sensor->fs.fs_avl[0];
gdata->odr = gdata->sensor->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_gyro_common_probe_error;
@@ -365,7 +366,6 @@ void st_gyro_common_remove(struct iio_dev *indio_dev)
st_sensors_deallocate_trigger(indio_dev);
st_gyro_deallocate_ring(indio_dev);
}
- iio_device_free(indio_dev);
}
EXPORT_SYMBOL(st_gyro_common_remove);
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 8a310500573..16b8b8d70bf 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -25,27 +25,21 @@ static int st_gyro_i2c_probe(struct i2c_client *client,
struct st_sensor_data *gdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*gdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*gdata));
+ if (!indio_dev)
+ return -ENOMEM;
gdata = iio_priv(indio_dev);
gdata->dev = &client->dev;
st_sensors_i2c_configure(indio_dev, client, gdata);
- err = st_gyro_common_probe(indio_dev);
+ err = st_gyro_common_probe(indio_dev,
+ (struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
- goto st_gyro_common_probe_error;
+ return err;
return 0;
-
-st_gyro_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_gyro_i2c_remove(struct i2c_client *client)
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index f3540390eb2..94763e25caf 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -24,27 +24,21 @@ static int st_gyro_spi_probe(struct spi_device *spi)
struct st_sensor_data *gdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*gdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*gdata));
+ if (!indio_dev)
+ return -ENOMEM;
gdata = iio_priv(indio_dev);
gdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, gdata);
- err = st_gyro_common_probe(indio_dev);
+ err = st_gyro_common_probe(indio_dev,
+ (struct st_sensors_platform_data *)&gyro_pdata);
if (err < 0)
- goto st_gyro_common_probe_error;
+ return err;
return 0;
-
-st_gyro_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_gyro_spi_remove(struct spi_device *spi)
diff --git a/drivers/iio/iio_core_trigger.h b/drivers/iio/iio_core_trigger.h
index 6f7c56fcbe7..1fdb1e4ea4a 100644
--- a/drivers/iio/iio_core_trigger.h
+++ b/drivers/iio/iio_core_trigger.h
@@ -30,7 +30,7 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev);
static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
return 0;
-};
+}
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
@@ -38,9 +38,6 @@ static int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
**/
static void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
{
-};
+}
#endif /* CONFIG_TRIGGER_CONSUMER */
-
-
-
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 4f40a10cb74..663e88a1a3c 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -1,6 +1,8 @@
#
# IIO imu drivers configuration
#
+# When adding new entries keep the list in alphabetical order
+
menu "Inertial measurement units"
config ADIS16400
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index f2f56ceaed2..114d2c17cbe 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -2,6 +2,7 @@
# Makefile for Inertial Measurement Units
#
+# When adding new entries keep the list in alphabetical order
adis16400-y := adis16400_core.o
adis16400-$(CONFIG_IIO_BUFFER) += adis16400_buffer.o
obj-$(CONFIG_ADIS16400) += adis16400.o
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index f60591f0b92..3fb7757a102 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -871,7 +871,7 @@ static int adis16400_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -893,12 +893,12 @@ static int adis16400_probe(struct spi_device *spi)
ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev,
adis16400_trigger_handler);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis16400_initial_setup(indio_dev);
@@ -913,8 +913,6 @@ static int adis16400_probe(struct spi_device *spi)
error_cleanup_buffer:
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
return ret;
}
@@ -928,8 +926,6 @@ static int adis16400_remove(struct spi_device *spi)
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index b7db3837629..dd4206cac62 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -839,7 +839,7 @@ static int adis16480_probe(struct spi_device *spi)
struct adis16480 *st;
int ret;
- indio_dev = iio_device_alloc(sizeof(*st));
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (indio_dev == NULL)
return -ENOMEM;
@@ -857,11 +857,11 @@ static int adis16480_probe(struct spi_device *spi)
ret = adis_init(&st->adis, indio_dev, spi, &adis16480_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(&st->adis, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis16480_initial_setup(indio_dev);
if (ret)
@@ -879,8 +879,6 @@ error_stop_device:
adis16480_stop_device(indio_dev);
error_cleanup_buffer:
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
return ret;
}
@@ -894,8 +892,6 @@ static int adis16480_remove(struct spi_device *spi)
adis_cleanup_buffer_and_trigger(&st->adis, indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index fe4c61e219f..df7f1e1157a 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/kfifo.h>
#include <linux/spinlock.h>
+#include <linux/iio/iio.h>
#include "inv_mpu_iio.h"
/*
@@ -663,16 +664,13 @@ static int inv_mpu_probe(struct i2c_client *client,
int result;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_I2C_BLOCK |
- I2C_FUNC_SMBUS_WRITE_I2C_BLOCK)) {
- result = -ENOSYS;
- goto out_no_free;
- }
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- result = -ENOMEM;
- goto out_no_free;
- }
+ I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -ENOSYS;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
st->client = client;
st->plat_data = *(struct inv_mpu6050_platform_data
@@ -680,13 +678,13 @@ static int inv_mpu_probe(struct i2c_client *client,
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st, id);
if (result)
- goto out_free;
+ return result;
result = inv_mpu6050_init_config(indio_dev);
if (result) {
dev_err(&client->dev,
"Could not initialize device.\n");
- goto out_free;
+ return result;
}
i2c_set_clientdata(client, indio_dev);
@@ -705,7 +703,7 @@ static int inv_mpu_probe(struct i2c_client *client,
if (result) {
dev_err(&st->client->dev, "configure buffer fail %d\n",
result);
- goto out_free;
+ return result;
}
result = inv_mpu6050_probe_trigger(indio_dev);
if (result) {
@@ -727,10 +725,6 @@ out_remove_trigger:
inv_mpu6050_remove_trigger(st);
out_unreg_ring:
iio_triggered_buffer_cleanup(indio_dev);
-out_free:
- iio_device_free(indio_dev);
-out_no_free:
-
return result;
}
@@ -742,7 +736,6 @@ static int inv_mpu_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
inv_mpu6050_remove_trigger(st);
iio_triggered_buffer_cleanup(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index e145931ef1b..97f0297b120 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -383,14 +383,14 @@ static ssize_t iio_read_channel_info(struct device *dev,
scale_db = true;
case IIO_VAL_INT_PLUS_MICRO:
if (val2 < 0)
- return sprintf(buf, "-%d.%06u%s\n", val, -val2,
+ return sprintf(buf, "-%ld.%06u%s\n", abs(val), -val2,
scale_db ? " dB" : "");
else
return sprintf(buf, "%d.%06u%s\n", val, val2,
scale_db ? " dB" : "");
case IIO_VAL_INT_PLUS_NANO:
if (val2 < 0)
- return sprintf(buf, "-%d.%09u\n", val, -val2);
+ return sprintf(buf, "-%ld.%09u\n", abs(val), -val2);
else
return sprintf(buf, "%d.%09u\n", val, val2);
case IIO_VAL_FRACTIONAL:
@@ -912,6 +912,53 @@ void iio_device_free(struct iio_dev *dev)
}
EXPORT_SYMBOL(iio_device_free);
+static void devm_iio_device_release(struct device *dev, void *res)
+{
+ iio_device_free(*(struct iio_dev **)res);
+}
+
+static int devm_iio_device_match(struct device *dev, void *res, void *data)
+{
+ struct iio_dev **r = res;
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+ return *r == data;
+}
+
+struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv)
+{
+ struct iio_dev **ptr, *iio_dev;
+
+ ptr = devres_alloc(devm_iio_device_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ iio_dev = iio_device_alloc(sizeof_priv);
+ if (iio_dev) {
+ *ptr = iio_dev;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return iio_dev;
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
+
+void devm_iio_device_free(struct device *dev, struct iio_dev *iio_dev)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_device_release,
+ devm_iio_device_match, iio_dev);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_device_free);
+
/**
* iio_chrdev_open() - chrdev file open for buffer access and ioctls
**/
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
index ea8a4146620..bf5e70a32d3 100644
--- a/drivers/iio/industrialio-trigger.c
+++ b/drivers/iio/industrialio-trigger.c
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name,
void iio_trigger_poll(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
generic_handle_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll);
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count)
- for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
- if (trig->subirqs[i].enabled) {
- trig->use_count++;
+
+ if (!atomic_read(&trig->use_count)) {
+ atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER);
+
+ for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
+ if (trig->subirqs[i].enabled)
handle_nested_irq(trig->subirq_base + i);
- }
+ else
+ iio_trigger_notify_done(trig);
+ }
+ }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
void iio_trigger_notify_done(struct iio_trigger *trig)
{
- trig->use_count--;
- if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
+ if (atomic_dec_and_test(&trig->use_count) && trig->ops &&
+ trig->ops->try_reenable)
if (trig->ops->try_reenable(trig))
/* Missed an interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
@@ -414,9 +424,8 @@ static void iio_trig_subirqunmask(struct irq_data *d)
trig->subirqs[d->irq - trig->subirq_base].enabled = true;
}
-struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
+static struct iio_trigger *viio_trigger_alloc(const char *fmt, va_list vargs)
{
- va_list vargs;
struct iio_trigger *trig;
trig = kzalloc(sizeof *trig, GFP_KERNEL);
if (trig) {
@@ -434,9 +443,8 @@ struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
kfree(trig);
return NULL;
}
- va_start(vargs, fmt);
+
trig->name = kvasprintf(GFP_KERNEL, fmt, vargs);
- va_end(vargs);
if (trig->name == NULL) {
irq_free_descs(trig->subirq_base,
CONFIG_IIO_CONSUMERS_PER_TRIGGER);
@@ -457,6 +465,19 @@ struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
}
get_device(&trig->dev);
}
+
+ return trig;
+}
+
+struct iio_trigger *iio_trigger_alloc(const char *fmt, ...)
+{
+ struct iio_trigger *trig;
+ va_list vargs;
+
+ va_start(vargs, fmt);
+ trig = viio_trigger_alloc(fmt, vargs);
+ va_end(vargs);
+
return trig;
}
EXPORT_SYMBOL(iio_trigger_alloc);
@@ -468,6 +489,59 @@ void iio_trigger_free(struct iio_trigger *trig)
}
EXPORT_SYMBOL(iio_trigger_free);
+static void devm_iio_trigger_release(struct device *dev, void *res)
+{
+ iio_trigger_free(*(struct iio_trigger **)res);
+}
+
+static int devm_iio_trigger_match(struct device *dev, void *res, void *data)
+{
+ struct iio_trigger **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
+ const char *fmt, ...)
+{
+ struct iio_trigger **ptr, *trig;
+ va_list vargs;
+
+ ptr = devres_alloc(devm_iio_trigger_release, sizeof(*ptr),
+ GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ /* use raw alloc_dr for kmalloc caller tracing */
+ va_start(vargs, fmt);
+ trig = viio_trigger_alloc(fmt, vargs);
+ va_end(vargs);
+ if (trig) {
+ *ptr = trig;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return trig;
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_alloc);
+
+void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig)
+{
+ int rc;
+
+ rc = devres_release(dev, devm_iio_trigger_release,
+ devm_iio_trigger_match, iio_trig);
+ WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_iio_trigger_free);
+
void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index 5ef1a396e0c..bf9fa0d7aff 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -1,6 +1,8 @@
#
# Light sensors
#
+# When adding new entries keep the list in alphabetical order
+
menu "Light sensors"
config ADJD_S311
@@ -15,6 +17,27 @@ config ADJD_S311
This driver can also be built as a module. If so, the module
will be called adjd_s311.
+config APDS9300
+ tristate "APDS9300 ambient light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Avago APDS9300
+ ambient light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called apds9300.
+
+config HID_SENSOR_ALS
+ depends on HID_SENSOR_HUB
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+ select HID_SENSOR_IIO_COMMON
+ select HID_SENSOR_IIO_TRIGGER
+ tristate "HID ALS"
+ help
+ Say yes here to build support for the HID SENSOR
+ Ambient light sensor.
+
config SENSORS_LM3533
tristate "LM3533 ambient light sensor"
depends on MFD_LM3533
@@ -52,15 +75,4 @@ config VCNL4000
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
-config HID_SENSOR_ALS
- depends on HID_SENSOR_HUB
- select IIO_BUFFER
- select IIO_TRIGGERED_BUFFER
- select HID_SENSOR_IIO_COMMON
- select HID_SENSOR_IIO_TRIGGER
- tristate "HID ALS"
- help
- Say yes here to build support for the HID SENSOR
- Ambient light sensor.
-
endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index 040d9c75f8e..354ee9ab237 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -2,8 +2,10 @@
# Makefile for IIO Light sensors
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_ADJD_S311) += adjd_s311.o
+obj-$(CONFIG_APDS9300) += apds9300.o
+obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_SENSORS_TSL2563) += tsl2563.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
-obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o
diff --git a/drivers/iio/light/adjd_s311.c b/drivers/iio/light/adjd_s311.c
index 5f4749e60b0..23cff798598 100644
--- a/drivers/iio/light/adjd_s311.c
+++ b/drivers/iio/light/adjd_s311.c
@@ -37,22 +37,14 @@
#define ADJD_S311_CAP_GREEN 0x07
#define ADJD_S311_CAP_BLUE 0x08
#define ADJD_S311_CAP_CLEAR 0x09
-#define ADJD_S311_INT_RED_LO 0x0a
-#define ADJD_S311_INT_RED_HI 0x0b
-#define ADJD_S311_INT_GREEN_LO 0x0c
-#define ADJD_S311_INT_GREEN_HI 0x0d
-#define ADJD_S311_INT_BLUE_LO 0x0e
-#define ADJD_S311_INT_BLUE_HI 0x0f
-#define ADJD_S311_INT_CLEAR_LO 0x10
-#define ADJD_S311_INT_CLEAR_HI 0x11
-#define ADJD_S311_DATA_RED_LO 0x40
-#define ADJD_S311_DATA_RED_HI 0x41
-#define ADJD_S311_DATA_GREEN_LO 0x42
-#define ADJD_S311_DATA_GREEN_HI 0x43
-#define ADJD_S311_DATA_BLUE_LO 0x44
-#define ADJD_S311_DATA_BLUE_HI 0x45
-#define ADJD_S311_DATA_CLEAR_LO 0x46
-#define ADJD_S311_DATA_CLEAR_HI 0x47
+#define ADJD_S311_INT_RED 0x0a
+#define ADJD_S311_INT_GREEN 0x0c
+#define ADJD_S311_INT_BLUE 0x0e
+#define ADJD_S311_INT_CLEAR 0x10
+#define ADJD_S311_DATA_RED 0x40
+#define ADJD_S311_DATA_GREEN 0x42
+#define ADJD_S311_DATA_BLUE 0x44
+#define ADJD_S311_DATA_CLEAR 0x46
#define ADJD_S311_OFFSET_RED 0x48
#define ADJD_S311_OFFSET_GREEN 0x49
#define ADJD_S311_OFFSET_BLUE 0x4a
@@ -73,8 +65,8 @@ enum adjd_s311_channel_idx {
IDX_RED, IDX_GREEN, IDX_BLUE, IDX_CLEAR
};
-#define ADJD_S311_DATA_REG(chan) (ADJD_S311_DATA_RED_LO + (chan) * 2)
-#define ADJD_S311_INT_REG(chan) (ADJD_S311_INT_RED_LO + (chan) * 2)
+#define ADJD_S311_DATA_REG(chan) (ADJD_S311_DATA_RED + (chan) * 2)
+#define ADJD_S311_INT_REG(chan) (ADJD_S311_INT_RED + (chan) * 2)
#define ADJD_S311_CAP_REG(chan) (ADJD_S311_CAP_RED + (chan))
static int adjd_s311_req_data(struct iio_dev *indio_dev)
@@ -232,7 +224,8 @@ static int adjd_s311_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- ret = adjd_s311_read_data(indio_dev, chan->address, val);
+ ret = adjd_s311_read_data(indio_dev,
+ ADJD_S311_DATA_REG(chan->address), val);
if (ret < 0)
return ret;
return IIO_VAL_INT;
@@ -293,11 +286,10 @@ static int adjd_s311_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- indio_dev = iio_device_alloc(sizeof(*data));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto exit;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
@@ -312,7 +304,7 @@ static int adjd_s311_probe(struct i2c_client *client,
err = iio_triggered_buffer_setup(indio_dev, NULL,
adjd_s311_trigger_handler, NULL);
if (err < 0)
- goto exit_free_device;
+ return err;
err = iio_device_register(indio_dev);
if (err)
@@ -324,9 +316,6 @@ static int adjd_s311_probe(struct i2c_client *client,
exit_unreg_buffer:
iio_triggered_buffer_cleanup(indio_dev);
-exit_free_device:
- iio_device_free(indio_dev);
-exit:
return err;
}
@@ -338,7 +327,6 @@ static int adjd_s311_remove(struct i2c_client *client)
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(data->buffer);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/light/apds9300.c b/drivers/iio/light/apds9300.c
new file mode 100644
index 00000000000..66a58bda6dc
--- /dev/null
+++ b/drivers/iio/light/apds9300.c
@@ -0,0 +1,512 @@
+/*
+ * apds9300.c - IIO driver for Avago APDS9300 ambient light sensor
+ *
+ * Copyright 2013 Oleksandr Kravchenko <o.v.kravchenko@globallogic.com>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pm.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+#include <linux/interrupt.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/events.h>
+
+#define APDS9300_DRV_NAME "apds9300"
+#define APDS9300_IRQ_NAME "apds9300_event"
+
+/* Command register bits */
+#define APDS9300_CMD BIT(7) /* Select command register. Must write as 1 */
+#define APDS9300_WORD BIT(5) /* I2C write/read: if 1 word, if 0 byte */
+#define APDS9300_CLEAR BIT(6) /* Interrupt clear. Clears pending interrupt */
+
+/* Register set */
+#define APDS9300_CONTROL 0x00 /* Control of basic functions */
+#define APDS9300_THRESHLOWLOW 0x02 /* Low byte of low interrupt threshold */
+#define APDS9300_THRESHHIGHLOW 0x04 /* Low byte of high interrupt threshold */
+#define APDS9300_INTERRUPT 0x06 /* Interrupt control */
+#define APDS9300_DATA0LOW 0x0c /* Low byte of ADC channel 0 */
+#define APDS9300_DATA1LOW 0x0e /* Low byte of ADC channel 1 */
+
+/* Power on/off value for APDS9300_CONTROL register */
+#define APDS9300_POWER_ON 0x03
+#define APDS9300_POWER_OFF 0x00
+
+/* Interrupts */
+#define APDS9300_INTR_ENABLE 0x10
+/* Interrupt Persist Function: Any value outside of threshold range */
+#define APDS9300_THRESH_INTR 0x01
+
+#define APDS9300_THRESH_MAX 0xffff /* Max threshold value */
+
+struct apds9300_data {
+ struct i2c_client *client;
+ struct mutex mutex;
+ int power_state;
+ int thresh_low;
+ int thresh_hi;
+ int intr_en;
+};
+
+/* Lux calculation */
+
+/* Calculated values 1000 * (CH1/CH0)^1.4 for CH1/CH0 from 0 to 0.52 */
+static const u16 apds9300_lux_ratio[] = {
+ 0, 2, 4, 7, 11, 15, 19, 24, 29, 34, 40, 45, 51, 57, 64, 70, 77, 84, 91,
+ 98, 105, 112, 120, 128, 136, 144, 152, 160, 168, 177, 185, 194, 203,
+ 212, 221, 230, 239, 249, 258, 268, 277, 287, 297, 307, 317, 327, 337,
+ 347, 358, 368, 379, 390, 400,
+};
+
+static unsigned long apds9300_calculate_lux(u16 ch0, u16 ch1)
+{
+ unsigned long lux, tmp;
+
+ /* avoid division by zero */
+ if (ch0 == 0)
+ return 0;
+
+ tmp = DIV_ROUND_UP(ch1 * 100, ch0);
+ if (tmp <= 52) {
+ lux = 3150 * ch0 - (unsigned long)DIV_ROUND_UP_ULL(ch0
+ * apds9300_lux_ratio[tmp] * 5930ull, 1000);
+ } else if (tmp <= 65) {
+ lux = 2290 * ch0 - 2910 * ch1;
+ } else if (tmp <= 80) {
+ lux = 1570 * ch0 - 1800 * ch1;
+ } else if (tmp <= 130) {
+ lux = 338 * ch0 - 260 * ch1;
+ } else {
+ lux = 0;
+ }
+
+ return lux / 100000;
+}
+
+static int apds9300_get_adc_val(struct apds9300_data *data, int adc_number)
+{
+ int ret;
+ u8 flags = APDS9300_CMD | APDS9300_WORD;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ /* Select ADC0 or ADC1 data register */
+ flags |= adc_number ? APDS9300_DATA1LOW : APDS9300_DATA0LOW;
+
+ ret = i2c_smbus_read_word_data(data->client, flags);
+ if (ret < 0)
+ dev_err(&data->client->dev,
+ "failed to read ADC%d value\n", adc_number);
+
+ return ret;
+}
+
+static int apds9300_set_thresh_low(struct apds9300_data *data, int value)
+{
+ int ret;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ if (value > APDS9300_THRESH_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(data->client, APDS9300_THRESHLOWLOW
+ | APDS9300_CMD | APDS9300_WORD, value);
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set thresh_low\n");
+ return ret;
+ }
+ data->thresh_low = value;
+
+ return 0;
+}
+
+static int apds9300_set_thresh_hi(struct apds9300_data *data, int value)
+{
+ int ret;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ if (value > APDS9300_THRESH_MAX)
+ return -EINVAL;
+
+ ret = i2c_smbus_write_word_data(data->client, APDS9300_THRESHHIGHLOW
+ | APDS9300_CMD | APDS9300_WORD, value);
+ if (ret) {
+ dev_err(&data->client->dev, "failed to set thresh_hi\n");
+ return ret;
+ }
+ data->thresh_hi = value;
+
+ return 0;
+}
+
+static int apds9300_set_intr_state(struct apds9300_data *data, int state)
+{
+ int ret;
+ u8 cmd;
+
+ if (!data->power_state)
+ return -EBUSY;
+
+ cmd = state ? APDS9300_INTR_ENABLE | APDS9300_THRESH_INTR : 0x00;
+ ret = i2c_smbus_write_byte_data(data->client,
+ APDS9300_INTERRUPT | APDS9300_CMD, cmd);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set interrupt state %d\n", state);
+ return ret;
+ }
+ data->intr_en = state;
+
+ return 0;
+}
+
+static int apds9300_set_power_state(struct apds9300_data *data, int state)
+{
+ int ret;
+ u8 cmd;
+
+ cmd = state ? APDS9300_POWER_ON : APDS9300_POWER_OFF;
+ ret = i2c_smbus_write_byte_data(data->client,
+ APDS9300_CONTROL | APDS9300_CMD, cmd);
+ if (ret) {
+ dev_err(&data->client->dev,
+ "failed to set power state %d\n", state);
+ return ret;
+ }
+ data->power_state = state;
+
+ return 0;
+}
+
+static void apds9300_clear_intr(struct apds9300_data *data)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte(data->client, APDS9300_CLEAR | APDS9300_CMD);
+ if (ret < 0)
+ dev_err(&data->client->dev, "failed to clear interrupt\n");
+}
+
+static int apds9300_chip_init(struct apds9300_data *data)
+{
+ int ret;
+
+ /* Need to set power off to ensure that the chip is off */
+ ret = apds9300_set_power_state(data, 0);
+ if (ret < 0)
+ goto err;
+ /*
+ * Probe the chip. To do so we try to power up the device and then to
+ * read back the 0x03 code
+ */
+ ret = apds9300_set_power_state(data, 1);
+ if (ret < 0)
+ goto err;
+ ret = i2c_smbus_read_byte_data(data->client,
+ APDS9300_CONTROL | APDS9300_CMD);
+ if (ret != APDS9300_POWER_ON) {
+ ret = -ENODEV;
+ goto err;
+ }
+ /*
+ * Disable interrupt to ensure thai it is doesn't enable
+ * i.e. after device soft reset
+ */
+ ret = apds9300_set_intr_state(data, 0);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ dev_err(&data->client->dev, "failed to init the chip\n");
+ return ret;
+}
+
+static int apds9300_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2,
+ long mask)
+{
+ int ch0, ch1, ret = -EINVAL;
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ mutex_lock(&data->mutex);
+ switch (chan->type) {
+ case IIO_LIGHT:
+ ch0 = apds9300_get_adc_val(data, 0);
+ if (ch0 < 0) {
+ ret = ch0;
+ break;
+ }
+ ch1 = apds9300_get_adc_val(data, 1);
+ if (ch1 < 0) {
+ ret = ch1;
+ break;
+ }
+ *val = apds9300_calculate_lux(ch0, ch1);
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_INTENSITY:
+ ret = apds9300_get_adc_val(data, chan->channel);
+ if (ret < 0)
+ break;
+ *val = ret;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_read_thresh(struct iio_dev *indio_dev, u64 event_code,
+ int *val)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ switch (IIO_EVENT_CODE_EXTRACT_DIR(event_code)) {
+ case IIO_EV_DIR_RISING:
+ *val = data->thresh_hi;
+ break;
+ case IIO_EV_DIR_FALLING:
+ *val = data->thresh_low;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int apds9300_write_thresh(struct iio_dev *indio_dev, u64 event_code,
+ int val)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) == IIO_EV_DIR_RISING)
+ ret = apds9300_set_thresh_hi(data, val);
+ else
+ ret = apds9300_set_thresh_low(data, val);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_read_interrupt_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ return data->intr_en;
+}
+
+static int apds9300_write_interrupt_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_intr_state(data, state);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static const struct iio_info apds9300_info_no_irq = {
+ .driver_module = THIS_MODULE,
+ .read_raw = apds9300_read_raw,
+};
+
+static const struct iio_info apds9300_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = apds9300_read_raw,
+ .read_event_value = apds9300_read_thresh,
+ .write_event_value = apds9300_write_thresh,
+ .read_event_config = apds9300_read_interrupt_config,
+ .write_event_config = apds9300_write_interrupt_config,
+};
+
+static const struct iio_chan_spec apds9300_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .channel = 0,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }, {
+ .type = IIO_INTENSITY,
+ .channel = 0,
+ .channel2 = IIO_MOD_LIGHT_BOTH,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING)),
+ }, {
+ .type = IIO_INTENSITY,
+ .channel = 1,
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .indexed = true,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+};
+
+static irqreturn_t apds9300_interrupt_handler(int irq, void *private)
+{
+ struct iio_dev *dev_info = private;
+ struct apds9300_data *data = iio_priv(dev_info);
+
+ iio_push_event(dev_info,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ iio_get_time_ns());
+
+ apds9300_clear_intr(data);
+
+ return IRQ_HANDLED;
+}
+
+static int apds9300_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct apds9300_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ ret = apds9300_chip_init(data);
+ if (ret < 0)
+ goto err;
+
+ mutex_init(&data->mutex);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->channels = apds9300_channels;
+ indio_dev->num_channels = ARRAY_SIZE(apds9300_channels);
+ indio_dev->name = APDS9300_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ if (client->irq)
+ indio_dev->info = &apds9300_info;
+ else
+ indio_dev->info = &apds9300_info_no_irq;
+
+ if (client->irq) {
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ NULL, apds9300_interrupt_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ APDS9300_IRQ_NAME, indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "irq request error %d\n", -ret);
+ goto err;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err;
+
+ return 0;
+
+err:
+ /* Ensure that power off in case of error */
+ apds9300_set_power_state(data, 0);
+ return ret;
+}
+
+static int apds9300_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct apds9300_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ /* Ensure that power off and interrupts are disabled */
+ apds9300_set_intr_state(data, 0);
+ apds9300_set_power_state(data, 0);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int apds9300_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_power_state(data, 0);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static int apds9300_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct apds9300_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->mutex);
+ ret = apds9300_set_power_state(data, 1);
+ mutex_unlock(&data->mutex);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(apds9300_pm_ops, apds9300_suspend, apds9300_resume);
+#define APDS9300_PM_OPS (&apds9300_pm_ops)
+#else
+#define APDS9300_PM_OPS NULL
+#endif
+
+static struct i2c_device_id apds9300_id[] = {
+ { APDS9300_DRV_NAME, 0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(i2c, apds9300_id);
+
+static struct i2c_driver apds9300_driver = {
+ .driver = {
+ .name = APDS9300_DRV_NAME,
+ .owner = THIS_MODULE,
+ .pm = APDS9300_PM_OPS,
+ },
+ .probe = apds9300_probe,
+ .remove = apds9300_remove,
+ .id_table = apds9300_id,
+};
+
+module_i2c_driver(apds9300_driver);
+
+MODULE_AUTHOR("Kravchenko Oleksandr <o.v.kravchenko@globallogic.com>");
+MODULE_AUTHOR("GlobalLogic inc.");
+MODULE_DESCRIPTION("APDS9300 ambient light photo sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index cdc2cad0f01..e59d00c3139 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -30,10 +30,6 @@
#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
-/*Format: HID-SENSOR-usage_id_in_hex*/
-/*Usage ID from spec for Ambiant-Light: 0x200041*/
-#define DRIVER_NAME "HID-SENSOR-200041"
-
#define CHANNEL_SCAN_INDEX_ILLUM 0
struct als_state {
@@ -158,18 +154,10 @@ static int als_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int als_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static const struct iio_info als_info = {
.driver_module = THIS_MODULE,
.read_raw = &als_read_raw,
.write_raw = &als_write_raw,
- .write_raw_get_fmt = &als_write_raw_get_fmt,
};
/* Function to push data to buffer */
@@ -253,11 +241,9 @@ static int hid_als_probe(struct platform_device *pdev)
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_chan_spec *channels;
- indio_dev = iio_device_alloc(sizeof(struct als_state));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(struct als_state));
+ if (!indio_dev)
+ return -ENOMEM;
platform_set_drvdata(pdev, indio_dev);
als_state = iio_priv(indio_dev);
@@ -268,14 +254,13 @@ static int hid_als_probe(struct platform_device *pdev)
&als_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
- goto error_free_dev;
+ return ret;
}
channels = kmemdup(als_channels, sizeof(als_channels), GFP_KERNEL);
if (!channels) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
- goto error_free_dev;
+ return -ENOMEM;
}
ret = als_parse_report(pdev, hsdev, channels,
@@ -333,9 +318,6 @@ error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
kfree(indio_dev->channels);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -350,14 +332,23 @@ static int hid_als_remove(struct platform_device *pdev)
hid_sensor_remove_trigger(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
- iio_device_free(indio_dev);
return 0;
}
+static struct platform_device_id hid_als_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200041",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_als_ids);
+
static struct platform_driver hid_als_platform_driver = {
+ .id_table = hid_als_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_als_probe,
diff --git a/drivers/iio/light/lm3533-als.c b/drivers/iio/light/lm3533-als.c
index 5fa31a4ef82..c1aadc6b865 100644
--- a/drivers/iio/light/lm3533-als.c
+++ b/drivers/iio/light/lm3533-als.c
@@ -847,7 +847,7 @@ static int lm3533_als_probe(struct platform_device *pdev)
return -EINVAL;
}
- indio_dev = iio_device_alloc(sizeof(*als));
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*als));
if (!indio_dev)
return -ENOMEM;
@@ -870,7 +870,7 @@ static int lm3533_als_probe(struct platform_device *pdev)
if (als->irq) {
ret = lm3533_als_setup_irq(als, indio_dev);
if (ret)
- goto err_free_dev;
+ return ret;
}
ret = lm3533_als_setup(als, pdata);
@@ -894,8 +894,6 @@ err_disable:
err_free_irq:
if (als->irq)
free_irq(als->irq, indio_dev);
-err_free_dev:
- iio_device_free(indio_dev);
return ret;
}
@@ -910,7 +908,6 @@ static int lm3533_als_remove(struct platform_device *pdev)
lm3533_als_disable(als);
if (als->irq)
free_irq(als->irq, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 1f529f36f13..ebb962c5c32 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -702,7 +702,7 @@ static int tsl2563_probe(struct i2c_client *client,
int err = 0;
u8 id = 0;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev)
return -ENOMEM;
@@ -714,13 +714,13 @@ static int tsl2563_probe(struct i2c_client *client,
err = tsl2563_detect(chip);
if (err) {
dev_err(&client->dev, "detect error %d\n", -err);
- goto fail1;
+ return err;
}
err = tsl2563_read_id(chip, &id);
if (err) {
dev_err(&client->dev, "read id error %d\n", -err);
- goto fail1;
+ return err;
}
mutex_init(&chip->lock);
@@ -751,7 +751,7 @@ static int tsl2563_probe(struct i2c_client *client,
indio_dev->info = &tsl2563_info_no_irq;
if (client->irq) {
- err = request_threaded_irq(client->irq,
+ err = devm_request_threaded_irq(&client->dev, client->irq,
NULL,
&tsl2563_event_handler,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -759,14 +759,14 @@ static int tsl2563_probe(struct i2c_client *client,
indio_dev);
if (err) {
dev_err(&client->dev, "irq request error %d\n", -err);
- goto fail1;
+ return err;
}
}
err = tsl2563_configure(chip);
if (err) {
dev_err(&client->dev, "configure error %d\n", -err);
- goto fail2;
+ return err;
}
INIT_DELAYED_WORK(&chip->poweroff_work, tsl2563_poweroff_work);
@@ -777,19 +777,14 @@ static int tsl2563_probe(struct i2c_client *client,
err = iio_device_register(indio_dev);
if (err) {
dev_err(&client->dev, "iio registration error %d\n", -err);
- goto fail3;
+ goto fail;
}
return 0;
-fail3:
+fail:
cancel_delayed_work(&chip->poweroff_work);
flush_scheduled_work();
-fail2:
- if (client->irq)
- free_irq(client->irq, indio_dev);
-fail1:
- iio_device_free(indio_dev);
return err;
}
@@ -807,10 +802,6 @@ static int tsl2563_remove(struct i2c_client *client)
chip->intr);
flush_scheduled_work();
tsl2563_set_power(chip, 0);
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/iio/light/vcnl4000.c b/drivers/iio/light/vcnl4000.c
index 1014943d949..2bb304215b1 100644
--- a/drivers/iio/light/vcnl4000.c
+++ b/drivers/iio/light/vcnl4000.c
@@ -157,7 +157,7 @@ static int vcnl4000_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*data));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
@@ -167,7 +167,7 @@ static int vcnl4000_probe(struct i2c_client *client,
ret = i2c_smbus_read_byte_data(data->client, VCNL4000_PROD_REV);
if (ret < 0)
- goto error_free_dev;
+ return ret;
dev_info(&client->dev, "VCNL4000 Ambient light/proximity sensor, Prod %02x, Rev: %02x\n",
ret >> 4, ret & 0xf);
@@ -181,22 +181,14 @@ static int vcnl4000_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_free_dev;
+ return ret;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
- return ret;
}
static int vcnl4000_remove(struct i2c_client *client)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
-
- iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
-
+ iio_device_unregister(i2c_get_clientdata(client));
return 0;
}
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index c332b0ae4a3..4fa923f37b9 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -1,6 +1,8 @@
#
# Magnetometer sensors
#
+# When adding new entries keep the list in alphabetical order
+
menu "Magnetometer sensors"
config AK8975
@@ -36,8 +38,8 @@ config IIO_ST_MAGN_3AXIS
Say yes here to build support for STMicroelectronics magnetometers:
LSM303DLHC, LSM303DLM, LIS3MDL.
- This driver can also be built as a module. If so, will be created
- these modules:
+ This driver can also be built as a module. If so, these modules
+ will be created:
- st_magn (core functions for the driver [it is mandatory]);
- st_magn_i2c (necessary for the I2C devices [optional*]);
- st_magn_spi (necessary for the SPI devices [optional*]);
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index 7f328e37fba..f91b1b68d39 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -2,6 +2,7 @@
# Makefile for industrial I/O Magnetometer sensor drivers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 99f4e494513..a98460b15e4 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -30,10 +30,6 @@
#include <linux/iio/triggered_buffer.h>
#include "../common/hid-sensors/hid-sensor-trigger.h"
-/*Format: HID-SENSOR-usage_id_in_hex*/
-/*Usage ID from spec for Magnetometer-3D: 0x200083*/
-#define DRIVER_NAME "HID-SENSOR-200083"
-
enum magn_3d_channel {
CHANNEL_SCAN_INDEX_X,
CHANNEL_SCAN_INDEX_Y,
@@ -180,18 +176,10 @@ static int magn_3d_write_raw(struct iio_dev *indio_dev,
return ret;
}
-static int magn_3d_write_raw_get_fmt(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- long mask)
-{
- return IIO_VAL_INT_PLUS_MICRO;
-}
-
static const struct iio_info magn_3d_info = {
.driver_module = THIS_MODULE,
.read_raw = &magn_3d_read_raw,
.write_raw = &magn_3d_write_raw,
- .write_raw_get_fmt = &magn_3d_write_raw_get_fmt,
};
/* Function to push data to buffer */
@@ -287,11 +275,11 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
struct iio_chan_spec *channels;
- indio_dev = iio_device_alloc(sizeof(struct magn_3d_state));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&pdev->dev,
+ sizeof(struct magn_3d_state));
+ if (indio_dev == NULL)
+ return -ENOMEM;
+
platform_set_drvdata(pdev, indio_dev);
magn_state = iio_priv(indio_dev);
@@ -303,15 +291,14 @@ static int hid_magn_3d_probe(struct platform_device *pdev)
&magn_state->common_attributes);
if (ret) {
dev_err(&pdev->dev, "failed to setup common attributes\n");
- goto error_free_dev;
+ return ret;
}
channels = kmemdup(magn_3d_channels, sizeof(magn_3d_channels),
GFP_KERNEL);
if (!channels) {
- ret = -ENOMEM;
dev_err(&pdev->dev, "failed to duplicate channels\n");
- goto error_free_dev;
+ return -ENOMEM;
}
ret = magn_3d_parse_report(pdev, hsdev, channels,
@@ -368,9 +355,6 @@ error_unreg_buffer_funcs:
iio_triggered_buffer_cleanup(indio_dev);
error_free_dev_mem:
kfree(indio_dev->channels);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -385,14 +369,23 @@ static int hid_magn_3d_remove(struct platform_device *pdev)
hid_sensor_remove_trigger(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
kfree(indio_dev->channels);
- iio_device_free(indio_dev);
return 0;
}
+static struct platform_device_id hid_magn_3d_ids[] = {
+ {
+ /* Format: HID-SENSOR-usage_id_in_hex_lowercase */
+ .name = "HID-SENSOR-200083",
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(platform, hid_magn_3d_ids);
+
static struct platform_driver hid_magn_3d_platform_driver = {
+ .id_table = hid_magn_3d_ids,
.driver = {
- .name = DRIVER_NAME,
+ .name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = hid_magn_3d_probe,
diff --git a/drivers/iio/magnetometer/st_magn.h b/drivers/iio/magnetometer/st_magn.h
index 7e81d00ef0c..694e33e0fb7 100644
--- a/drivers/iio/magnetometer/st_magn.h
+++ b/drivers/iio/magnetometer/st_magn.h
@@ -18,7 +18,8 @@
#define LSM303DLM_MAGN_DEV_NAME "lsm303dlm_magn"
#define LIS3MDL_MAGN_DEV_NAME "lis3mdl"
-int st_magn_common_probe(struct iio_dev *indio_dev);
+int st_magn_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
void st_magn_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index ebfe8f11a0c..e8d2849cc81 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -345,7 +345,8 @@ static const struct iio_info magn_info = {
.write_raw = &st_magn_write_raw,
};
-int st_magn_common_probe(struct iio_dev *indio_dev)
+int st_magn_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata)
{
int err;
struct st_sensor_data *mdata = iio_priv(indio_dev);
@@ -367,7 +368,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
&mdata->sensor->fs.fs_avl[0];
mdata->odr = mdata->sensor->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev);
+ err = st_sensors_init_sensor(indio_dev, pdata);
if (err < 0)
goto st_magn_common_probe_error;
@@ -406,7 +407,6 @@ void st_magn_common_remove(struct iio_dev *indio_dev)
st_sensors_deallocate_trigger(indio_dev);
st_magn_deallocate_ring(indio_dev);
}
- iio_device_free(indio_dev);
}
EXPORT_SYMBOL(st_magn_common_remove);
diff --git a/drivers/iio/magnetometer/st_magn_i2c.c b/drivers/iio/magnetometer/st_magn_i2c.c
index e6adc4a8642..892e0feeb5c 100644
--- a/drivers/iio/magnetometer/st_magn_i2c.c
+++ b/drivers/iio/magnetometer/st_magn_i2c.c
@@ -25,27 +25,20 @@ static int st_magn_i2c_probe(struct i2c_client *client,
struct st_sensor_data *mdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*mdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mdata));
+ if (!indio_dev)
+ return -ENOMEM;
mdata = iio_priv(indio_dev);
mdata->dev = &client->dev;
st_sensors_i2c_configure(indio_dev, client, mdata);
- err = st_magn_common_probe(indio_dev);
+ err = st_magn_common_probe(indio_dev, NULL);
if (err < 0)
- goto st_magn_common_probe_error;
+ return err;
return 0;
-
-st_magn_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_magn_i2c_remove(struct i2c_client *client)
diff --git a/drivers/iio/magnetometer/st_magn_spi.c b/drivers/iio/magnetometer/st_magn_spi.c
index 51adb797cb7..a6143ea51df 100644
--- a/drivers/iio/magnetometer/st_magn_spi.c
+++ b/drivers/iio/magnetometer/st_magn_spi.c
@@ -24,27 +24,20 @@ static int st_magn_spi_probe(struct spi_device *spi)
struct st_sensor_data *mdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*mdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*mdata));
+ if (!indio_dev)
+ return -ENOMEM;
mdata = iio_priv(indio_dev);
mdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, mdata);
- err = st_magn_common_probe(indio_dev);
+ err = st_magn_common_probe(indio_dev, NULL);
if (err < 0)
- goto st_magn_common_probe_error;
+ return err;
return 0;
-
-st_magn_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_magn_spi_remove(struct spi_device *spi)
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 9427f01e149..26fdc0bdb99 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -1,21 +1,23 @@
#
# Pressure drivers
#
-menu "Pressure Sensors"
+# When adding new entries keep the list in alphabetical order
+
+menu "Pressure sensors"
config IIO_ST_PRESS
- tristate "STMicroelectronics pressures Driver"
+ tristate "STMicroelectronics pressure sensor Driver"
depends on (I2C || SPI_MASTER) && SYSFS
select IIO_ST_SENSORS_CORE
select IIO_ST_PRESS_I2C if (I2C)
select IIO_ST_PRESS_SPI if (SPI_MASTER)
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
- Say yes here to build support for STMicroelectronics pressures:
- LPS331AP.
+ Say yes here to build support for STMicroelectronics pressure
+ sensors: LPS331AP.
- This driver can also be built as a module. If so, will be created
- these modules:
+ This driver can also be built as a module. If so, these modules
+ will be created:
- st_pressure (core functions for the driver [it is mandatory]);
- st_pressure_i2c (necessary for the I2C devices [optional*]);
- st_pressure_spi (necessary for the SPI devices [optional*]);
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d4bb33e5c84..be71464c275 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -2,6 +2,7 @@
# Makefile for industrial I/O pressure drivers
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
diff --git a/drivers/iio/pressure/st_pressure.h b/drivers/iio/pressure/st_pressure.h
index 414e45ac9b9..b0b630688da 100644
--- a/drivers/iio/pressure/st_pressure.h
+++ b/drivers/iio/pressure/st_pressure.h
@@ -16,7 +16,16 @@
#define LPS331AP_PRESS_DEV_NAME "lps331ap"
-int st_press_common_probe(struct iio_dev *indio_dev);
+/**
+ * struct st_sensors_platform_data - default press platform data
+ * @drdy_int_pin: default press DRDY is available on INT1 pin.
+ */
+static const struct st_sensors_platform_data default_press_pdata = {
+ .drdy_int_pin = 1,
+};
+
+int st_press_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
void st_press_common_remove(struct iio_dev *indio_dev);
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 3ffbc56917b..ceebd3c2789 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -31,6 +31,9 @@
#define ST_PRESS_LSB_PER_MBAR 4096UL
#define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
ST_PRESS_LSB_PER_MBAR)
+#define ST_PRESS_LSB_PER_CELSIUS 480UL
+#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
+ ST_PRESS_LSB_PER_CELSIUS)
#define ST_PRESS_NUMBER_DATA_CHANNELS 1
/* DEFAULT VALUE FOR SENSORS */
@@ -53,12 +56,13 @@
#define ST_PRESS_1_FS_ADDR 0x23
#define ST_PRESS_1_FS_MASK 0x30
#define ST_PRESS_1_FS_AVL_1260_VAL 0x00
-#define ST_PRESS_1_FS_AVL_TEMP_GAIN 2083000
#define ST_PRESS_1_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
+#define ST_PRESS_1_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
#define ST_PRESS_1_BDU_ADDR 0x20
#define ST_PRESS_1_BDU_MASK 0x04
#define ST_PRESS_1_DRDY_IRQ_ADDR 0x22
-#define ST_PRESS_1_DRDY_IRQ_MASK 0x04
+#define ST_PRESS_1_DRDY_IRQ_INT1_MASK 0x04
+#define ST_PRESS_1_DRDY_IRQ_INT2_MASK 0x20
#define ST_PRESS_1_MULTIREAD_BIT true
#define ST_PRESS_1_TEMP_OFFSET 42500
@@ -116,7 +120,8 @@ static const struct st_sensors st_press_sensors[] = {
},
.drdy_irq = {
.addr = ST_PRESS_1_DRDY_IRQ_ADDR,
- .mask = ST_PRESS_1_DRDY_IRQ_MASK,
+ .mask_int1 = ST_PRESS_1_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_PRESS_1_DRDY_IRQ_INT2_MASK,
},
.multi_read_bit = ST_PRESS_1_MULTIREAD_BIT,
.bootime = 2,
@@ -202,7 +207,8 @@ static const struct iio_trigger_ops st_press_trigger_ops = {
#define ST_PRESS_TRIGGER_OPS NULL
#endif
-int st_press_common_probe(struct iio_dev *indio_dev)
+int st_press_common_probe(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *plat_data)
{
int err;
struct st_sensor_data *pdata = iio_priv(indio_dev);
@@ -224,7 +230,11 @@ int st_press_common_probe(struct iio_dev *indio_dev)
&pdata->sensor->fs.fs_avl[0];
pdata->odr = pdata->sensor->odr.odr_avl[0].hz;
- err = st_sensors_init_sensor(indio_dev);
+ if (!plat_data)
+ plat_data =
+ (struct st_sensors_platform_data *)&default_press_pdata;
+
+ err = st_sensors_init_sensor(indio_dev, plat_data);
if (err < 0)
goto st_press_common_probe_error;
@@ -265,7 +275,6 @@ void st_press_common_remove(struct iio_dev *indio_dev)
st_sensors_deallocate_trigger(indio_dev);
st_press_deallocate_ring(indio_dev);
}
- iio_device_free(indio_dev);
}
EXPORT_SYMBOL(st_press_common_remove);
diff --git a/drivers/iio/pressure/st_pressure_i2c.c b/drivers/iio/pressure/st_pressure_i2c.c
index 7cebcc73bfb..08aac5e6251 100644
--- a/drivers/iio/pressure/st_pressure_i2c.c
+++ b/drivers/iio/pressure/st_pressure_i2c.c
@@ -25,27 +25,20 @@ static int st_press_i2c_probe(struct i2c_client *client,
struct st_sensor_data *pdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*pdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*pdata));
+ if (!indio_dev)
+ return -ENOMEM;
pdata = iio_priv(indio_dev);
pdata->dev = &client->dev;
st_sensors_i2c_configure(indio_dev, client, pdata);
- err = st_press_common_probe(indio_dev);
+ err = st_press_common_probe(indio_dev, client->dev.platform_data);
if (err < 0)
- goto st_press_common_probe_error;
+ return err;
return 0;
-
-st_press_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_press_i2c_remove(struct i2c_client *client)
diff --git a/drivers/iio/pressure/st_pressure_spi.c b/drivers/iio/pressure/st_pressure_spi.c
index 17a14907940..399a29b6017 100644
--- a/drivers/iio/pressure/st_pressure_spi.c
+++ b/drivers/iio/pressure/st_pressure_spi.c
@@ -24,27 +24,20 @@ static int st_press_spi_probe(struct spi_device *spi)
struct st_sensor_data *pdata;
int err;
- indio_dev = iio_device_alloc(sizeof(*pdata));
- if (indio_dev == NULL) {
- err = -ENOMEM;
- goto iio_device_alloc_error;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*pdata));
+ if (indio_dev == NULL)
+ return -ENOMEM;
pdata = iio_priv(indio_dev);
pdata->dev = &spi->dev;
st_sensors_spi_configure(indio_dev, spi, pdata);
- err = st_press_common_probe(indio_dev);
+ err = st_press_common_probe(indio_dev, spi->dev.platform_data);
if (err < 0)
- goto st_press_common_probe_error;
+ return err;
return 0;
-
-st_press_common_probe_error:
- iio_device_free(indio_dev);
-iio_device_alloc_error:
- return err;
}
static int st_press_spi_remove(struct spi_device *spi)
diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig
new file mode 100644
index 00000000000..372f8fb3085
--- /dev/null
+++ b/drivers/iio/temperature/Kconfig
@@ -0,0 +1,16 @@
+#
+# Temperature sensor drivers
+#
+menu "Temperature sensors"
+
+config TMP006
+ tristate "TMP006 infrared thermopile sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Texas Instruments
+ TMP006 infrared thermopile sensor.
+
+ This driver can also be built as a module. If so, the module will
+ be called tmp006.
+
+endmenu
diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile
new file mode 100644
index 00000000000..24d7b602db3
--- /dev/null
+++ b/drivers/iio/temperature/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for industrial I/O temperature drivers
+#
+
+obj-$(CONFIG_TMP006) += tmp006.o
diff --git a/drivers/iio/temperature/tmp006.c b/drivers/iio/temperature/tmp006.c
new file mode 100644
index 00000000000..64ccde3f1f7
--- /dev/null
+++ b/drivers/iio/temperature/tmp006.c
@@ -0,0 +1,291 @@
+/*
+ * tmp006.c - Support for TI TMP006 IR thermopile sensor
+ *
+ * Copyright (c) 2013 Peter Meerwald <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Driver for the Texas Instruments I2C 16-bit IR thermopile sensor
+ *
+ * (7-bit I2C slave address 0x40, changeable via ADR pins)
+ *
+ * TODO: data ready irq
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/bitops.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define TMP006_VOBJECT 0x00
+#define TMP006_TAMBIENT 0x01
+#define TMP006_CONFIG 0x02
+#define TMP006_MANUFACTURER_ID 0xfe
+#define TMP006_DEVICE_ID 0xff
+
+#define TMP006_TAMBIENT_SHIFT 2
+
+#define TMP006_CONFIG_RESET BIT(15)
+#define TMP006_CONFIG_DRDY_EN BIT(8)
+#define TMP006_CONFIG_DRDY BIT(7)
+
+#define TMP006_CONFIG_MOD_MASK 0x7000
+
+#define TMP006_CONFIG_CR_MASK 0x0e00
+#define TMP006_CONFIG_CR_SHIFT 9
+
+#define MANUFACTURER_MAGIC 0x5449
+#define DEVICE_MAGIC 0x0067
+
+struct tmp006_data {
+ struct i2c_client *client;
+ u16 config;
+};
+
+static int tmp006_read_measurement(struct tmp006_data *data, u8 reg)
+{
+ s32 ret;
+ int tries = 50;
+
+ while (tries-- > 0) {
+ ret = i2c_smbus_read_word_swapped(data->client,
+ TMP006_CONFIG);
+ if (ret < 0)
+ return ret;
+ if (ret & TMP006_CONFIG_DRDY)
+ break;
+ msleep(100);
+ }
+
+ if (tries < 0)
+ return -EIO;
+
+ return i2c_smbus_read_word_swapped(data->client, reg);
+}
+
+static int tmp006_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *channel, int *val,
+ int *val2, long mask)
+{
+ struct tmp006_data *data = iio_priv(indio_dev);
+ s32 ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (channel->type == IIO_VOLTAGE) {
+ /* LSB is 156.25 nV */
+ ret = tmp006_read_measurement(data, TMP006_VOBJECT);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 15);
+ } else if (channel->type == IIO_TEMP) {
+ /* LSB is 0.03125 degrees Celsius */
+ ret = tmp006_read_measurement(data, TMP006_TAMBIENT);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(ret, 15) >> TMP006_TAMBIENT_SHIFT;
+ } else {
+ break;
+ }
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ if (channel->type == IIO_VOLTAGE) {
+ *val = 0;
+ *val2 = 156250;
+ } else if (channel->type == IIO_TEMP) {
+ *val = 31;
+ *val2 = 250000;
+ } else {
+ break;
+ }
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const char * const tmp006_freqs[] = { "4", "2", "1", "0.5", "0.25" };
+
+static ssize_t tmp006_show_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
+ int cr = (data->config & TMP006_CONFIG_CR_MASK)
+ >> TMP006_CONFIG_CR_SHIFT;
+ return sprintf(buf, "%s\n", tmp006_freqs[cr]);
+}
+
+static ssize_t tmp006_store_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct tmp006_data *data = iio_priv(indio_dev);
+ int i;
+ bool found = false;
+
+ for (i = 0; i < ARRAY_SIZE(tmp006_freqs); i++)
+ if (sysfs_streq(buf, tmp006_freqs[i])) {
+ found = true;
+ break;
+ }
+ if (!found)
+ return -EINVAL;
+
+ data->config &= ~TMP006_CONFIG_CR_MASK;
+ data->config |= i << TMP006_CONFIG_CR_SHIFT;
+
+ return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
+ data->config);
+}
+
+static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR,
+ tmp006_show_freq, tmp006_store_freq);
+
+static IIO_CONST_ATTR(sampling_frequency_available, "4 2 1 0.5 0.25");
+
+static struct attribute *tmp006_attributes[] = {
+ &iio_dev_attr_sampling_frequency.dev_attr.attr,
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group tmp006_attribute_group = {
+ .attrs = tmp006_attributes,
+};
+
+static const struct iio_chan_spec tmp006_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ }
+};
+
+static const struct iio_info tmp006_info = {
+ .read_raw = tmp006_read_raw,
+ .attrs = &tmp006_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static bool tmp006_check_identification(struct i2c_client *client)
+{
+ int mid, did;
+
+ mid = i2c_smbus_read_word_swapped(client, TMP006_MANUFACTURER_ID);
+ if (mid < 0)
+ return false;
+
+ did = i2c_smbus_read_word_swapped(client, TMP006_DEVICE_ID);
+ if (did < 0)
+ return false;
+
+ return mid == MANUFACTURER_MAGIC && did == DEVICE_MAGIC;
+}
+
+static int tmp006_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct tmp006_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
+ return -ENODEV;
+
+ if (!tmp006_check_identification(client)) {
+ dev_err(&client->dev, "no TMP006 sensor\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client = client;
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->name = dev_name(&client->dev);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &tmp006_info;
+
+ indio_dev->channels = tmp006_channels;
+ indio_dev->num_channels = ARRAY_SIZE(tmp006_channels);
+
+ ret = i2c_smbus_read_word_swapped(data->client, TMP006_CONFIG);
+ if (ret < 0)
+ return ret;
+ data->config = ret;
+
+ return iio_device_register(indio_dev);
+}
+
+static int tmp006_powerdown(struct tmp006_data *data)
+{
+ return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
+ data->config & ~TMP006_CONFIG_MOD_MASK);
+}
+
+static int tmp006_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ tmp006_powerdown(iio_priv(indio_dev));
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tmp006_suspend(struct device *dev)
+{
+ return tmp006_powerdown(iio_priv(dev_to_iio_dev(dev)));
+}
+
+static int tmp006_resume(struct device *dev)
+{
+ struct tmp006_data *data = iio_priv(dev_to_iio_dev(dev));
+ return i2c_smbus_write_word_swapped(data->client, TMP006_CONFIG,
+ data->config | TMP006_CONFIG_MOD_MASK);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(tmp006_pm_ops, tmp006_suspend, tmp006_resume);
+
+static const struct i2c_device_id tmp006_id[] = {
+ { "tmp006", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tmp006_id);
+
+static struct i2c_driver tmp006_driver = {
+ .driver = {
+ .name = "tmp006",
+ .pm = &tmp006_pm_ops,
+ .owner = THIS_MODULE,
+ },
+ .probe = tmp006_probe,
+ .remove = tmp006_remove,
+ .id_table = tmp006_id,
+};
+module_i2c_driver(tmp006_driver);
+
+MODULE_AUTHOR("Peter Meerwald <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("TI TMP006 IR thermopile sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/trigger/Kconfig b/drivers/iio/trigger/Kconfig
index 360fd508b08..79996123a71 100644
--- a/drivers/iio/trigger/Kconfig
+++ b/drivers/iio/trigger/Kconfig
@@ -1,6 +1,8 @@
#
# Industrial I/O standalone triggers
#
+# When adding new entries keep the list in alphabetical order
+
menu "Triggers - standalone"
config IIO_INTERRUPT_TRIGGER
@@ -17,7 +19,7 @@ config IIO_SYSFS_TRIGGER
depends on SYSFS
select IRQ_WORK
help
- Provides support for using SYSFS entry as IIO triggers.
+ Provides support for using SYSFS entries as IIO triggers.
If unsure, say N (but it's safe to say "Y").
To compile this driver as a module, choose M here: the
diff --git a/drivers/iio/trigger/Makefile b/drivers/iio/trigger/Makefile
index ce319a51b6a..0694daecaf2 100644
--- a/drivers/iio/trigger/Makefile
+++ b/drivers/iio/trigger/Makefile
@@ -2,5 +2,6 @@
# Makefile for triggers not associated with iio-devices
#
+# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_IIO_INTERRUPT_TRIGGER) += iio-trig-interrupt.o
obj-$(CONFIG_IIO_SYSFS_TRIGGER) += iio-trig-sysfs.o
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f1c279fabe6..dab4b41f171 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -423,7 +423,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
struct sockaddr_ib *addr;
union ib_gid gid, sgid, *dgid;
u16 pkey, index;
- u8 port, p;
+ u8 p;
int i;
cma_dev = NULL;
@@ -443,7 +443,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
if (!memcmp(&gid, dgid, sizeof(gid))) {
cma_dev = cur_dev;
sgid = gid;
- port = p;
+ id_priv->id.port_num = p;
goto found;
}
@@ -451,7 +451,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
dgid->global.subnet_prefix)) {
cma_dev = cur_dev;
sgid = gid;
- port = p;
+ id_priv->id.port_num = p;
}
}
}
@@ -462,7 +462,6 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
found:
cma_attach_to_dev(id_priv, cma_dev);
- id_priv->id.port_num = port;
addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
memcpy(&addr->sib_addr, &sgid, sizeof sgid);
cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
@@ -880,7 +879,8 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
{
struct cma_hdr *hdr;
- if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
+ if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
+ (ib_event->event == IB_CM_REQ_RECEIVED)) {
cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
return 0;
}
@@ -1385,8 +1385,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
{
struct rdma_id_private *id_priv = iw_id->context;
struct rdma_cm_event event;
- struct sockaddr_in *sin;
int ret = 0;
+ struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
return 0;
@@ -1397,10 +1398,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
event.event = RDMA_CM_EVENT_DISCONNECTED;
break;
case IW_CM_EVENT_CONNECT_REPLY:
- sin = (struct sockaddr_in *) cma_src_addr(id_priv);
- *sin = iw_event->local_addr;
- sin = (struct sockaddr_in *) cma_dst_addr(id_priv);
- *sin = iw_event->remote_addr;
+ memcpy(cma_src_addr(id_priv), laddr,
+ rdma_addr_size(laddr));
+ memcpy(cma_dst_addr(id_priv), raddr,
+ rdma_addr_size(raddr));
switch (iw_event->status) {
case 0:
event.event = RDMA_CM_EVENT_ESTABLISHED;
@@ -1450,11 +1451,12 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
{
struct rdma_cm_id *new_cm_id;
struct rdma_id_private *listen_id, *conn_id;
- struct sockaddr_in *sin;
struct net_device *dev = NULL;
struct rdma_cm_event event;
int ret;
struct ib_device_attr attr;
+ struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
+ struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
listen_id = cm_id->context;
if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
@@ -1472,14 +1474,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
conn_id->state = RDMA_CM_CONNECT;
- dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);
- if (!dev) {
- ret = -EADDRNOTAVAIL;
- mutex_unlock(&conn_id->handler_mutex);
- rdma_destroy_id(new_cm_id);
- goto out;
- }
- ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL);
+ ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
if (ret) {
mutex_unlock(&conn_id->handler_mutex);
rdma_destroy_id(new_cm_id);
@@ -1497,10 +1492,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
cm_id->context = conn_id;
cm_id->cm_handler = cma_iw_handler;
- sin = (struct sockaddr_in *) cma_src_addr(conn_id);
- *sin = iw_event->local_addr;
- sin = (struct sockaddr_in *) cma_dst_addr(conn_id);
- *sin = iw_event->remote_addr;
+ memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
+ memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
ret = ib_query_device(conn_id->id.device, &attr);
if (ret) {
@@ -1576,7 +1569,6 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
{
int ret;
- struct sockaddr_in *sin;
struct iw_cm_id *id;
id = iw_create_cm_id(id_priv->id.device,
@@ -1587,8 +1579,8 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
id_priv->cm_id.iw = id;
- sin = (struct sockaddr_in *) cma_src_addr(id_priv);
- id_priv->cm_id.iw->local_addr = *sin;
+ memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
+ rdma_addr_size(cma_src_addr(id_priv)));
ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
@@ -2677,29 +2669,32 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
{
struct ib_cm_sidr_req_param req;
struct ib_cm_id *id;
+ void *private_data;
int offset, ret;
+ memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv);
req.private_data_len = offset + conn_param->private_data_len;
if (req.private_data_len < conn_param->private_data_len)
return -EINVAL;
if (req.private_data_len) {
- req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
- if (!req.private_data)
+ private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
+ if (!private_data)
return -ENOMEM;
} else {
- req.private_data = NULL;
+ private_data = NULL;
}
if (conn_param->private_data && conn_param->private_data_len)
- memcpy((void *) req.private_data + offset,
- conn_param->private_data, conn_param->private_data_len);
+ memcpy(private_data + offset, conn_param->private_data,
+ conn_param->private_data_len);
- if (req.private_data) {
- ret = cma_format_hdr((void *) req.private_data, id_priv);
+ if (private_data) {
+ ret = cma_format_hdr(private_data, id_priv);
if (ret)
goto out;
+ req.private_data = private_data;
}
id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
@@ -2721,7 +2716,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
id_priv->cm_id.ib = NULL;
}
out:
- kfree(req.private_data);
+ kfree(private_data);
return ret;
}
@@ -2800,7 +2795,6 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
struct rdma_conn_param *conn_param)
{
struct iw_cm_id *cm_id;
- struct sockaddr_in* sin;
int ret;
struct iw_cm_conn_param iw_param;
@@ -2810,11 +2804,10 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
id_priv->cm_id.iw = cm_id;
- sin = (struct sockaddr_in *) cma_src_addr(id_priv);
- cm_id->local_addr = *sin;
-
- sin = (struct sockaddr_in *) cma_dst_addr(id_priv);
- cm_id->remote_addr = *sin;
+ memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
+ rdma_addr_size(cma_src_addr(id_priv)));
+ memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
+ rdma_addr_size(cma_dst_addr(id_priv)));
ret = cma_modify_qp_rtr(id_priv, conn_param);
if (ret)
@@ -3205,7 +3198,7 @@ static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
id_priv->id.port_num, &rec,
comp_mask, GFP_KERNEL,
cma_ib_mc_handler, mc);
- return PTR_RET(mc->multicast.ib);
+ return PTR_ERR_OR_ZERO(mc->multicast.ib);
}
static void iboe_mcast_work_handler(struct work_struct *work)
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index dc3fd1e8af0..4c837e66516 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -2663,6 +2663,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
int ret, i;
struct ib_qp_attr *attr;
struct ib_qp *qp;
+ u16 pkey_index;
attr = kmalloc(sizeof *attr, GFP_KERNEL);
if (!attr) {
@@ -2670,6 +2671,11 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
return -ENOMEM;
}
+ ret = ib_find_pkey(port_priv->device, port_priv->port_num,
+ IB_DEFAULT_PKEY_FULL, &pkey_index);
+ if (ret)
+ pkey_index = 0;
+
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
if (!qp)
@@ -2680,7 +2686,7 @@ static int ib_mad_port_start(struct ib_mad_port_private *port_priv)
* one is needed for the Reset to Init transition
*/
attr->qp_state = IB_QPS_INIT;
- attr->pkey_index = 0;
+ attr->pkey_index = pkey_index;
attr->qkey = (qp->qp_num == 0) ? 0 : IB_QP1_QKEY;
ret = ib_modify_qp(qp, attr, IB_QP_STATE |
IB_QP_PKEY_INDEX | IB_QP_QKEY);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index 0fcd7aa26fa..d040b877475 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -135,6 +135,7 @@ struct ib_usrq_object {
struct ib_uqp_object {
struct ib_uevent_object uevent;
struct list_head mcast_list;
+ struct ib_uxrcd_object *uxrcd;
};
struct ib_ucq_object {
@@ -155,6 +156,7 @@ extern struct idr ib_uverbs_cq_idr;
extern struct idr ib_uverbs_qp_idr;
extern struct idr ib_uverbs_srq_idr;
extern struct idr ib_uverbs_xrcd_idr;
+extern struct idr ib_uverbs_rule_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
@@ -215,5 +217,7 @@ IB_UVERBS_DECLARE_CMD(destroy_srq);
IB_UVERBS_DECLARE_CMD(create_xsrq);
IB_UVERBS_DECLARE_CMD(open_xrcd);
IB_UVERBS_DECLARE_CMD(close_xrcd);
+IB_UVERBS_DECLARE_CMD(create_flow);
+IB_UVERBS_DECLARE_CMD(destroy_flow);
#endif /* UVERBS_H */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index b3c07b0c9f2..f2b81b9ee0d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -54,6 +54,7 @@ static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
+static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
@@ -330,6 +331,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
INIT_LIST_HEAD(&ucontext->srq_list);
INIT_LIST_HEAD(&ucontext->ah_list);
INIT_LIST_HEAD(&ucontext->xrcd_list);
+ INIT_LIST_HEAD(&ucontext->rule_list);
ucontext->closing = 0;
resp.num_comp_vectors = file->device->num_comp_vectors;
@@ -1526,7 +1528,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
- obj = kmalloc(sizeof *obj, GFP_KERNEL);
+ obj = kzalloc(sizeof *obj, GFP_KERNEL);
if (!obj)
return -ENOMEM;
@@ -1642,8 +1644,13 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
goto err_copy;
}
- if (xrcd)
+ if (xrcd) {
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
+ uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
put_xrcd_read(xrcd_uobj);
+ }
+
if (pd)
put_pd_read(pd);
if (scq)
@@ -1753,6 +1760,8 @@ ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
goto err_remove;
}
+ obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
+ atomic_inc(&obj->uxrcd->refcnt);
put_xrcd_read(xrcd_uobj);
mutex_lock(&file->mutex);
@@ -2019,6 +2028,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
if (ret)
return ret;
+ if (obj->uxrcd)
+ atomic_dec(&obj->uxrcd->refcnt);
+
idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
mutex_lock(&file->mutex);
@@ -2587,6 +2599,232 @@ out_put:
return ret ? ret : in_len;
}
+static int kern_spec_to_ib_spec(struct ib_kern_spec *kern_spec,
+ union ib_flow_spec *ib_spec)
+{
+ ib_spec->type = kern_spec->type;
+
+ switch (ib_spec->type) {
+ case IB_FLOW_SPEC_ETH:
+ ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
+ if (ib_spec->eth.size != kern_spec->eth.size)
+ return -EINVAL;
+ memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
+ sizeof(struct ib_flow_eth_filter));
+ memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
+ sizeof(struct ib_flow_eth_filter));
+ break;
+ case IB_FLOW_SPEC_IPV4:
+ ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
+ if (ib_spec->ipv4.size != kern_spec->ipv4.size)
+ return -EINVAL;
+ memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
+ sizeof(struct ib_flow_ipv4_filter));
+ memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
+ sizeof(struct ib_flow_ipv4_filter));
+ break;
+ case IB_FLOW_SPEC_TCP:
+ case IB_FLOW_SPEC_UDP:
+ ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
+ if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
+ return -EINVAL;
+ memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
+ sizeof(struct ib_flow_tcp_udp_filter));
+ memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
+ sizeof(struct ib_flow_tcp_udp_filter));
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+ssize_t ib_uverbs_create_flow(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len)
+{
+ struct ib_uverbs_create_flow cmd;
+ struct ib_uverbs_create_flow_resp resp;
+ struct ib_uobject *uobj;
+ struct ib_flow *flow_id;
+ struct ib_kern_flow_attr *kern_flow_attr;
+ struct ib_flow_attr *flow_attr;
+ struct ib_qp *qp;
+ int err = 0;
+ void *kern_spec;
+ void *ib_spec;
+ int i;
+ int kern_attr_size;
+
+ if (out_len < sizeof(resp))
+ return -ENOSPC;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ if (cmd.comp_mask)
+ return -EINVAL;
+
+ if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
+ !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
+ return -EPERM;
+
+ if (cmd.flow_attr.num_of_specs < 0 ||
+ cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
+ return -EINVAL;
+
+ kern_attr_size = cmd.flow_attr.size - sizeof(cmd) -
+ sizeof(struct ib_uverbs_cmd_hdr_ex);
+
+ if (cmd.flow_attr.size < 0 || cmd.flow_attr.size > in_len ||
+ kern_attr_size < 0 || kern_attr_size >
+ (cmd.flow_attr.num_of_specs * sizeof(struct ib_kern_spec)))
+ return -EINVAL;
+
+ if (cmd.flow_attr.num_of_specs) {
+ kern_flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
+ if (!kern_flow_attr)
+ return -ENOMEM;
+
+ memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
+ if (copy_from_user(kern_flow_attr + 1, buf + sizeof(cmd),
+ kern_attr_size)) {
+ err = -EFAULT;
+ goto err_free_attr;
+ }
+ } else {
+ kern_flow_attr = &cmd.flow_attr;
+ kern_attr_size = sizeof(cmd.flow_attr);
+ }
+
+ uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
+ if (!uobj) {
+ err = -ENOMEM;
+ goto err_free_attr;
+ }
+ init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
+ down_write(&uobj->mutex);
+
+ qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ if (!qp) {
+ err = -EINVAL;
+ goto err_uobj;
+ }
+
+ flow_attr = kmalloc(cmd.flow_attr.size, GFP_KERNEL);
+ if (!flow_attr) {
+ err = -ENOMEM;
+ goto err_put;
+ }
+
+ flow_attr->type = kern_flow_attr->type;
+ flow_attr->priority = kern_flow_attr->priority;
+ flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
+ flow_attr->port = kern_flow_attr->port;
+ flow_attr->flags = kern_flow_attr->flags;
+ flow_attr->size = sizeof(*flow_attr);
+
+ kern_spec = kern_flow_attr + 1;
+ ib_spec = flow_attr + 1;
+ for (i = 0; i < flow_attr->num_of_specs && kern_attr_size > 0; i++) {
+ err = kern_spec_to_ib_spec(kern_spec, ib_spec);
+ if (err)
+ goto err_free;
+ flow_attr->size +=
+ ((union ib_flow_spec *) ib_spec)->size;
+ kern_attr_size -= ((struct ib_kern_spec *) kern_spec)->size;
+ kern_spec += ((struct ib_kern_spec *) kern_spec)->size;
+ ib_spec += ((union ib_flow_spec *) ib_spec)->size;
+ }
+ if (kern_attr_size) {
+ pr_warn("create flow failed, %d bytes left from uverb cmd\n",
+ kern_attr_size);
+ goto err_free;
+ }
+ flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
+ if (IS_ERR(flow_id)) {
+ err = PTR_ERR(flow_id);
+ goto err_free;
+ }
+ flow_id->qp = qp;
+ flow_id->uobject = uobj;
+ uobj->object = flow_id;
+
+ err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
+ if (err)
+ goto destroy_flow;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.flow_handle = uobj->id;
+
+ if (copy_to_user((void __user *)(unsigned long) cmd.response,
+ &resp, sizeof(resp))) {
+ err = -EFAULT;
+ goto err_copy;
+ }
+
+ put_qp_read(qp);
+ mutex_lock(&file->mutex);
+ list_add_tail(&uobj->list, &file->ucontext->rule_list);
+ mutex_unlock(&file->mutex);
+
+ uobj->live = 1;
+
+ up_write(&uobj->mutex);
+ kfree(flow_attr);
+ if (cmd.flow_attr.num_of_specs)
+ kfree(kern_flow_attr);
+ return in_len;
+err_copy:
+ idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
+destroy_flow:
+ ib_destroy_flow(flow_id);
+err_free:
+ kfree(flow_attr);
+err_put:
+ put_qp_read(qp);
+err_uobj:
+ put_uobj_write(uobj);
+err_free_attr:
+ if (cmd.flow_attr.num_of_specs)
+ kfree(kern_flow_attr);
+ return err;
+}
+
+ssize_t ib_uverbs_destroy_flow(struct ib_uverbs_file *file,
+ const char __user *buf, int in_len,
+ int out_len) {
+ struct ib_uverbs_destroy_flow cmd;
+ struct ib_flow *flow_id;
+ struct ib_uobject *uobj;
+ int ret;
+
+ if (copy_from_user(&cmd, buf, sizeof(cmd)))
+ return -EFAULT;
+
+ uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
+ file->ucontext);
+ if (!uobj)
+ return -EINVAL;
+ flow_id = uobj->object;
+
+ ret = ib_destroy_flow(flow_id);
+ if (!ret)
+ uobj->live = 0;
+
+ put_uobj_write(uobj);
+
+ idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
+
+ mutex_lock(&file->mutex);
+ list_del(&uobj->list);
+ mutex_unlock(&file->mutex);
+
+ put_uobj(uobj);
+
+ return ret ? ret : in_len;
+}
+
static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
struct ib_uverbs_create_xsrq *cmd,
struct ib_udata *udata)
@@ -2860,6 +3098,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
struct ib_srq *srq;
struct ib_uevent_object *obj;
int ret = -EINVAL;
+ struct ib_usrq_object *us;
+ enum ib_srq_type srq_type;
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
@@ -2869,6 +3109,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
return -EINVAL;
srq = uobj->object;
obj = container_of(uobj, struct ib_uevent_object, uobject);
+ srq_type = srq->srq_type;
ret = ib_destroy_srq(srq);
if (!ret)
@@ -2879,6 +3120,11 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
if (ret)
return ret;
+ if (srq_type == IB_SRQT_XRC) {
+ us = container_of(obj, struct ib_usrq_object, uevent);
+ atomic_dec(&us->uxrcd->refcnt);
+ }
+
idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
mutex_lock(&file->mutex);
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 2c6f0f2ecd9..75ad86c4abf 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -73,6 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
DEFINE_IDR(ib_uverbs_xrcd_idr);
+DEFINE_IDR(ib_uverbs_rule_idr);
static DEFINE_SPINLOCK(map_lock);
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
@@ -113,7 +114,9 @@ static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
[IB_USER_VERBS_CMD_OPEN_XRCD] = ib_uverbs_open_xrcd,
[IB_USER_VERBS_CMD_CLOSE_XRCD] = ib_uverbs_close_xrcd,
[IB_USER_VERBS_CMD_CREATE_XSRQ] = ib_uverbs_create_xsrq,
- [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp
+ [IB_USER_VERBS_CMD_OPEN_QP] = ib_uverbs_open_qp,
+ [IB_USER_VERBS_CMD_CREATE_FLOW] = ib_uverbs_create_flow,
+ [IB_USER_VERBS_CMD_DESTROY_FLOW] = ib_uverbs_destroy_flow
};
static void ib_uverbs_add_one(struct ib_device *device);
@@ -212,6 +215,14 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
kfree(uobj);
}
+ list_for_each_entry_safe(uobj, tmp, &context->rule_list, list) {
+ struct ib_flow *flow_id = uobj->object;
+
+ idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
+ ib_destroy_flow(flow_id);
+ kfree(uobj);
+ }
+
list_for_each_entry_safe(uobj, tmp, &context->qp_list, list) {
struct ib_qp *qp = uobj->object;
struct ib_uqp_object *uqp =
@@ -583,9 +594,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (copy_from_user(&hdr, buf, sizeof hdr))
return -EFAULT;
- if (hdr.in_words * 4 != count)
- return -EINVAL;
-
if (hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
!uverbs_cmd_table[hdr.command])
return -EINVAL;
@@ -597,8 +605,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
return -ENOSYS;
- return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
- hdr.in_words * 4, hdr.out_words * 4);
+ if (hdr.command >= IB_USER_VERBS_CMD_THRESHOLD) {
+ struct ib_uverbs_cmd_hdr_ex hdr_ex;
+
+ if (copy_from_user(&hdr_ex, buf, sizeof(hdr_ex)))
+ return -EFAULT;
+
+ if (((hdr_ex.in_words + hdr_ex.provider_in_words) * 4) != count)
+ return -EINVAL;
+
+ return uverbs_cmd_table[hdr.command](file,
+ buf + sizeof(hdr_ex),
+ (hdr_ex.in_words +
+ hdr_ex.provider_in_words) * 4,
+ (hdr_ex.out_words +
+ hdr_ex.provider_out_words) * 4);
+ } else {
+ if (hdr.in_words * 4 != count)
+ return -EINVAL;
+
+ return uverbs_cmd_table[hdr.command](file,
+ buf + sizeof(hdr),
+ hdr.in_words * 4,
+ hdr.out_words * 4);
+ }
}
static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 22192deb882..a321df28bab 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -346,10 +346,13 @@ EXPORT_SYMBOL(ib_destroy_srq);
static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
{
struct ib_qp *qp = context;
+ unsigned long flags;
+ spin_lock_irqsave(&qp->device->event_handler_lock, flags);
list_for_each_entry(event->element.qp, &qp->open_list, open_list)
if (event->element.qp->event_handler)
event->element.qp->event_handler(event, event->element.qp->qp_context);
+ spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
}
static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
@@ -1254,3 +1257,30 @@ int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return xrcd->device->dealloc_xrcd(xrcd);
}
EXPORT_SYMBOL(ib_dealloc_xrcd);
+
+struct ib_flow *ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ int domain)
+{
+ struct ib_flow *flow_id;
+ if (!qp->device->create_flow)
+ return ERR_PTR(-ENOSYS);
+
+ flow_id = qp->device->create_flow(qp, flow_attr, domain);
+ if (!IS_ERR(flow_id))
+ atomic_inc(&qp->usecnt);
+ return flow_id;
+}
+EXPORT_SYMBOL(ib_create_flow);
+
+int ib_destroy_flow(struct ib_flow *flow_id)
+{
+ int err;
+ struct ib_qp *qp = flow_id->qp;
+
+ err = qp->device->destroy_flow(flow_id);
+ if (!err)
+ atomic_dec(&qp->usecnt);
+ return err;
+}
+EXPORT_SYMBOL(ib_destroy_flow);
diff --git a/drivers/infiniband/hw/amso1100/c2_ae.c b/drivers/infiniband/hw/amso1100/c2_ae.c
index 706cf97cbe8..d5d1929753e 100644
--- a/drivers/infiniband/hw/amso1100/c2_ae.c
+++ b/drivers/infiniband/hw/amso1100/c2_ae.c
@@ -155,6 +155,8 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
enum c2_event_id event_id;
unsigned long flags;
int status;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_event.local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_event.remote_addr;
/*
* retrieve the message
@@ -206,10 +208,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
case CCAE_ACTIVE_CONNECT_RESULTS:
res = &wr->ae.ae_active_connect_results;
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
- cm_event.local_addr.sin_addr.s_addr = res->laddr;
- cm_event.remote_addr.sin_addr.s_addr = res->raddr;
- cm_event.local_addr.sin_port = res->lport;
- cm_event.remote_addr.sin_port = res->rport;
+ laddr->sin_addr.s_addr = res->laddr;
+ raddr->sin_addr.s_addr = res->raddr;
+ laddr->sin_port = res->lport;
+ raddr->sin_port = res->rport;
if (status == 0) {
cm_event.private_data_len =
be32_to_cpu(res->private_data_length);
@@ -281,10 +283,10 @@ void c2_ae_event(struct c2_dev *c2dev, u32 mq_index)
}
cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
cm_event.provider_data = (void*)(unsigned long)req->cr_handle;
- cm_event.local_addr.sin_addr.s_addr = req->laddr;
- cm_event.remote_addr.sin_addr.s_addr = req->raddr;
- cm_event.local_addr.sin_port = req->lport;
- cm_event.remote_addr.sin_port = req->rport;
+ laddr->sin_addr.s_addr = req->laddr;
+ raddr->sin_addr.s_addr = req->raddr;
+ laddr->sin_port = req->lport;
+ raddr->sin_port = req->rport;
cm_event.private_data_len =
be32_to_cpu(req->private_data_length);
cm_event.private_data = req->private_data;
diff --git a/drivers/infiniband/hw/amso1100/c2_cm.c b/drivers/infiniband/hw/amso1100/c2_cm.c
index 95f58ab1e0b..23bfa94fbd4 100644
--- a/drivers/infiniband/hw/amso1100/c2_cm.c
+++ b/drivers/infiniband/hw/amso1100/c2_cm.c
@@ -46,6 +46,10 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
struct c2wr_qp_connect_req *wr; /* variable size needs a malloc. */
struct c2_vq_req *vq_req;
int err;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+ if (cm_id->remote_addr.ss_family != AF_INET)
+ return -ENOSYS;
ibqp = c2_get_qp(cm_id->device, iw_param->qpn);
if (!ibqp)
@@ -91,8 +95,8 @@ int c2_llp_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
wr->rnic_handle = c2dev->adapter_handle;
wr->qp_handle = qp->adapter_handle;
- wr->remote_addr = cm_id->remote_addr.sin_addr.s_addr;
- wr->remote_port = cm_id->remote_addr.sin_port;
+ wr->remote_addr = raddr->sin_addr.s_addr;
+ wr->remote_port = raddr->sin_port;
/*
* Move any private data from the callers's buf into
@@ -135,6 +139,10 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
struct c2wr_ep_listen_create_rep *reply;
struct c2_vq_req *vq_req;
int err;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+
+ if (cm_id->local_addr.ss_family != AF_INET)
+ return -ENOSYS;
c2dev = to_c2dev(cm_id->device);
if (c2dev == NULL)
@@ -153,8 +161,8 @@ int c2_llp_service_create(struct iw_cm_id *cm_id, int backlog)
c2_wr_set_id(&wr, CCWR_EP_LISTEN_CREATE);
wr.hdr.context = (u64) (unsigned long) vq_req;
wr.rnic_handle = c2dev->adapter_handle;
- wr.local_addr = cm_id->local_addr.sin_addr.s_addr;
- wr.local_port = cm_id->local_addr.sin_port;
+ wr.local_addr = laddr->sin_addr.s_addr;
+ wr.local_port = laddr->sin_port;
wr.backlog = cpu_to_be32(backlog);
wr.user_context = (u64) (unsigned long) cm_id;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 3e094cd6a0e..095bb046e2c 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -721,8 +721,10 @@ static void connect_reply_upcall(struct iwch_ep *ep, int status)
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.remote_addr));
if ((status == 0) || (status == -ECONNREFUSED)) {
event.private_data_len = ep->plen;
@@ -747,8 +749,10 @@ static void connect_request_upcall(struct iwch_ep *ep)
PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.local_addr));
event.private_data_len = ep->plen;
event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
event.provider_data = ep;
@@ -1872,8 +1876,9 @@ err:
static int is_loopback_dst(struct iw_cm_id *cm_id)
{
struct net_device *dev;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
- dev = ip_dev_find(&init_net, cm_id->remote_addr.sin_addr.s_addr);
+ dev = ip_dev_find(&init_net, raddr->sin_addr.s_addr);
if (!dev)
return 0;
dev_put(dev);
@@ -1886,6 +1891,13 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+ if (cm_id->remote_addr.ss_family != PF_INET) {
+ err = -ENOSYS;
+ goto out;
+ }
if (is_loopback_dst(cm_id)) {
err = -ENOSYS;
@@ -1929,11 +1941,9 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
/* find a route */
- rt = find_route(h->rdev.t3cdev_p,
- cm_id->local_addr.sin_addr.s_addr,
- cm_id->remote_addr.sin_addr.s_addr,
- cm_id->local_addr.sin_port,
- cm_id->remote_addr.sin_port, IPTOS_LOWDELAY);
+ rt = find_route(h->rdev.t3cdev_p, laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr, laddr->sin_port,
+ raddr->sin_port, IPTOS_LOWDELAY);
if (!rt) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
@@ -1941,7 +1951,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL,
- &cm_id->remote_addr.sin_addr.s_addr);
+ &raddr->sin_addr.s_addr);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
@@ -1950,8 +1960,10 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = IPTOS_LOWDELAY;
- ep->com.local_addr = cm_id->local_addr;
- ep->com.remote_addr = cm_id->remote_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ sizeof(ep->com.remote_addr));
/* send connect request to rnic */
err = send_connect(ep);
@@ -1979,6 +1991,11 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
might_sleep();
+ if (cm_id->local_addr.ss_family != PF_INET) {
+ err = -ENOSYS;
+ goto fail1;
+ }
+
ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
if (!ep) {
printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -1990,7 +2007,8 @@ int iwch_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->add_ref(cm_id);
ep->com.cm_id = cm_id;
ep->backlog = backlog;
- ep->com.local_addr = cm_id->local_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
/*
* Allocate a server TID.
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index e87f2201b22..d2283837d45 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -226,6 +226,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
mm->len = PAGE_ALIGN(((1UL << uresp.size_log2) + 1) *
sizeof(struct t3_cqe));
uresp.memsize = mm->len;
+ uresp.reserved = 0;
resplen = sizeof uresp;
}
if (ib_copy_to_udata(udata, &uresp, resplen)) {
diff --git a/drivers/infiniband/hw/cxgb4/Kconfig b/drivers/infiniband/hw/cxgb4/Kconfig
index 6b7e6c54353..d4e8983fba5 100644
--- a/drivers/infiniband/hw/cxgb4/Kconfig
+++ b/drivers/infiniband/hw/cxgb4/Kconfig
@@ -1,6 +1,6 @@
config INFINIBAND_CXGB4
tristate "Chelsio T4 RDMA Driver"
- depends on CHELSIO_T4 && INET
+ depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
select GENERIC_ALLOCATOR
---help---
This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 65c30ea8c1a..12fef76c791 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -44,6 +44,8 @@
#include <net/netevent.h>
#include <net/route.h>
#include <net/tcp.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
#include "iw_cxgb4.h"
@@ -330,22 +332,80 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
} else {
skb = alloc_skb(len, gfp);
}
+ t4_set_arp_err_handler(skb, NULL, NULL);
return skb;
}
-static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
+static struct net_device *get_real_dev(struct net_device *egress_dev)
+{
+ struct net_device *phys_dev = egress_dev;
+ if (egress_dev->priv_flags & IFF_802_1Q_VLAN)
+ phys_dev = vlan_dev_real_dev(egress_dev);
+ return phys_dev;
+}
+
+static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev)
+{
+ int i;
+
+ egress_dev = get_real_dev(egress_dev);
+ for (i = 0; i < dev->rdev.lldi.nports; i++)
+ if (dev->rdev.lldi.ports[i] == egress_dev)
+ return 1;
+ return 0;
+}
+
+static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip,
+ __u8 *peer_ip, __be16 local_port,
+ __be16 peer_port, u8 tos,
+ __u32 sin6_scope_id)
+{
+ struct dst_entry *dst = NULL;
+
+ if (IS_ENABLED(CONFIG_IPV6)) {
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ memcpy(&fl6.daddr, peer_ip, 16);
+ memcpy(&fl6.saddr, local_ip, 16);
+ if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+ fl6.flowi6_oif = sin6_scope_id;
+ dst = ip6_route_output(&init_net, NULL, &fl6);
+ if (!dst)
+ goto out;
+ if (!our_interface(dev, ip6_dst_idev(dst)->dev) &&
+ !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
+ dst_release(dst);
+ dst = NULL;
+ }
+ }
+
+out:
+ return dst;
+}
+
+static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip,
__be32 peer_ip, __be16 local_port,
__be16 peer_port, u8 tos)
{
struct rtable *rt;
struct flowi4 fl4;
+ struct neighbour *n;
rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip,
peer_port, local_port, IPPROTO_TCP,
tos, 0);
if (IS_ERR(rt))
return NULL;
- return rt;
+ n = dst_neigh_lookup(&rt->dst, &peer_ip);
+ if (!n)
+ return NULL;
+ if (!our_interface(dev, n->dev)) {
+ dst_release(&rt->dst);
+ return NULL;
+ }
+ neigh_release(n);
+ return &rt->dst;
}
static void arp_failure_discard(void *handle, struct sk_buff *skb)
@@ -487,7 +547,7 @@ static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
else {
ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC;
+ ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
}
ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
FILTER_SEL_WIDTH_VLD_TAG_P_FC;
@@ -512,15 +572,28 @@ static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
struct cpl_t5_act_open_req *t5_req;
+ struct cpl_act_open_req6 *req6;
+ struct cpl_t5_act_open_req6 *t5_req6;
struct sk_buff *skb;
u64 opt0;
u32 opt2;
unsigned int mtu_idx;
int wscale;
- int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
- sizeof(struct cpl_act_open_req) :
- sizeof(struct cpl_t5_act_open_req);
- int wrlen = roundup(size, 16);
+ int wrlen;
+ int sizev4 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+ sizeof(struct cpl_act_open_req) :
+ sizeof(struct cpl_t5_act_open_req);
+ int sizev6 = is_t4(ep->com.dev->rdev.lldi.adapter_type) ?
+ sizeof(struct cpl_act_open_req6) :
+ sizeof(struct cpl_t5_act_open_req6);
+ struct sockaddr_in *la = (struct sockaddr_in *)&ep->com.local_addr;
+ struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
+
+ wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
+ roundup(sizev4, 16) :
+ roundup(sizev6, 16);
PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
@@ -557,33 +630,82 @@ static int send_connect(struct c4iw_ep *ep)
t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
- req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(
- MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
- ((ep->rss_qid << 14) | ep->atid)));
- req->local_port = ep->com.local_addr.sin_port;
- req->peer_port = ep->com.remote_addr.sin_port;
- req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev,
- ep->dst, ep->l2t));
- req->opt2 = cpu_to_be32(opt2);
+ if (ep->com.remote_addr.ss_family == AF_INET) {
+ req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
+ ((ep->rss_qid << 14) | ep->atid)));
+ req->local_port = la->sin_port;
+ req->peer_port = ra->sin_port;
+ req->local_ip = la->sin_addr.s_addr;
+ req->peer_ip = ra->sin_addr.s_addr;
+ req->opt0 = cpu_to_be64(opt0);
+ req->params = cpu_to_be32(select_ntuple(ep->com.dev,
+ ep->dst, ep->l2t));
+ req->opt2 = cpu_to_be32(opt2);
+ } else {
+ req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
+
+ INIT_TP_WR(req6, 0);
+ OPCODE_TID(req6) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ ((ep->rss_qid<<14)|ep->atid)));
+ req6->local_port = la6->sin6_port;
+ req6->peer_port = ra6->sin6_port;
+ req6->local_ip_hi = *((__be64 *)
+ (la6->sin6_addr.s6_addr));
+ req6->local_ip_lo = *((__be64 *)
+ (la6->sin6_addr.s6_addr + 8));
+ req6->peer_ip_hi = *((__be64 *)
+ (ra6->sin6_addr.s6_addr));
+ req6->peer_ip_lo = *((__be64 *)
+ (ra6->sin6_addr.s6_addr + 8));
+ req6->opt0 = cpu_to_be64(opt0);
+ req6->params = cpu_to_be32(
+ select_ntuple(ep->com.dev, ep->dst,
+ ep->l2t));
+ req6->opt2 = cpu_to_be32(opt2);
+ }
} else {
- t5_req = (struct cpl_t5_act_open_req *) skb_put(skb, wrlen);
- INIT_TP_WR(t5_req, 0);
- OPCODE_TID(t5_req) = cpu_to_be32(
+ if (ep->com.remote_addr.ss_family == AF_INET) {
+ t5_req = (struct cpl_t5_act_open_req *)
+ skb_put(skb, wrlen);
+ INIT_TP_WR(t5_req, 0);
+ OPCODE_TID(t5_req) = cpu_to_be32(
MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
((ep->rss_qid << 14) | ep->atid)));
- t5_req->local_port = ep->com.local_addr.sin_port;
- t5_req->peer_port = ep->com.remote_addr.sin_port;
- t5_req->local_ip = ep->com.local_addr.sin_addr.s_addr;
- t5_req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
- t5_req->opt0 = cpu_to_be64(opt0);
- t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
- select_ntuple(ep->com.dev, ep->dst, ep->l2t)));
- t5_req->opt2 = cpu_to_be32(opt2);
+ t5_req->local_port = la->sin_port;
+ t5_req->peer_port = ra->sin_port;
+ t5_req->local_ip = la->sin_addr.s_addr;
+ t5_req->peer_ip = ra->sin_addr.s_addr;
+ t5_req->opt0 = cpu_to_be64(opt0);
+ t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
+ select_ntuple(ep->com.dev,
+ ep->dst, ep->l2t)));
+ t5_req->opt2 = cpu_to_be32(opt2);
+ } else {
+ t5_req6 = (struct cpl_t5_act_open_req6 *)
+ skb_put(skb, wrlen);
+ INIT_TP_WR(t5_req6, 0);
+ OPCODE_TID(t5_req6) = cpu_to_be32(
+ MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+ ((ep->rss_qid<<14)|ep->atid)));
+ t5_req6->local_port = la6->sin6_port;
+ t5_req6->peer_port = ra6->sin6_port;
+ t5_req6->local_ip_hi = *((__be64 *)
+ (la6->sin6_addr.s6_addr));
+ t5_req6->local_ip_lo = *((__be64 *)
+ (la6->sin6_addr.s6_addr + 8));
+ t5_req6->peer_ip_hi = *((__be64 *)
+ (ra6->sin6_addr.s6_addr));
+ t5_req6->peer_ip_lo = *((__be64 *)
+ (ra6->sin6_addr.s6_addr + 8));
+ t5_req6->opt0 = cpu_to_be64(opt0);
+ t5_req6->params = (__force __be64)cpu_to_be32(
+ select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+ t5_req6->opt2 = cpu_to_be32(opt2);
+ }
}
set_bit(ACT_OPEN_REQ, &ep->com.history);
@@ -952,8 +1074,10 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REPLY;
event.status = status;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.remote_addr));
if ((status == 0) || (status == -ECONNREFUSED)) {
if (!ep->tried_with_mpa_v1) {
@@ -989,8 +1113,10 @@ static void connect_request_upcall(struct c4iw_ep *ep)
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
memset(&event, 0, sizeof(event));
event.event = IW_CM_EVENT_CONNECT_REQUEST;
- event.local_addr = ep->com.local_addr;
- event.remote_addr = ep->com.remote_addr;
+ memcpy(&event.local_addr, &ep->com.local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&event.remote_addr, &ep->com.remote_addr,
+ sizeof(ep->com.remote_addr));
event.provider_data = ep;
if (!ep->tried_with_mpa_v1) {
/* this means MPA_v2 is used */
@@ -1447,10 +1573,9 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
" qpid %u ep %p state %d tid %u status %d\n",
__func__, ep->com.qp->wq.sq.qid, ep,
state_read(&ep->com), ep->hwtid, status);
- attrs.next_state = C4IW_QP_STATE_ERROR;
+ attrs.next_state = C4IW_QP_STATE_TERMINATE;
c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
- C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
- c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
+ C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
break;
}
default:
@@ -1498,6 +1623,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
struct fw_ofld_connection_wr *req;
unsigned int mtu_idx;
int wscale;
+ struct sockaddr_in *sin;
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
@@ -1506,10 +1632,12 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
ep->l2t));
- req->le.lport = ep->com.local_addr.sin_port;
- req->le.pport = ep->com.remote_addr.sin_port;
- req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr;
- req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr;
+ sin = (struct sockaddr_in *)&ep->com.local_addr;
+ req->le.lport = sin->sin_port;
+ req->le.u.ipv4.lip = sin->sin_addr.s_addr;
+ sin = (struct sockaddr_in *)&ep->com.remote_addr;
+ req->le.pport = sin->sin_port;
+ req->le.u.ipv4.pip = sin->sin_addr.s_addr;
req->tcb.t_state_to_astid =
htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) |
V_FW_OFLD_CONNECTION_WR_ASTID(atid));
@@ -1560,14 +1688,98 @@ static inline int act_open_has_tid(int status)
#define ACT_OPEN_RETRY_COUNT 2
+static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
+ struct dst_entry *dst, struct c4iw_dev *cdev,
+ bool clear_mpa_v1)
+{
+ struct neighbour *n;
+ int err, step;
+ struct net_device *pdev;
+
+ n = dst_neigh_lookup(dst, peer_ip);
+ if (!n)
+ return -ENODEV;
+
+ rcu_read_lock();
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ if (iptype == 4)
+ pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
+ else if (IS_ENABLED(CONFIG_IPV6))
+ for_each_netdev(&init_net, pdev) {
+ if (ipv6_chk_addr(&init_net,
+ (struct in6_addr *)peer_ip,
+ pdev, 1))
+ break;
+ }
+ else
+ pdev = NULL;
+
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ pdev = get_real_dev(n->dev);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(dst);
+ ep->tx_chan = cxgb4_port_chan(n->dev);
+ ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(n->dev) * step];
+
+ if (clear_mpa_v1) {
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
+ }
+ }
+ err = 0;
+out:
+ rcu_read_unlock();
+
+ neigh_release(n);
+
+ return err;
+}
+
static int c4iw_reconnect(struct c4iw_ep *ep)
{
int err = 0;
- struct rtable *rt;
- struct port_info *pi;
- struct net_device *pdev;
- int step;
- struct neighbour *neigh;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)
+ &ep->com.cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)
+ &ep->com.cm_id->remote_addr;
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
+ &ep->com.cm_id->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
+ &ep->com.cm_id->remote_addr;
+ int iptype;
+ __u8 *ra;
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
@@ -1584,57 +1796,28 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
/* find a route */
- rt = find_route(ep->com.dev,
- ep->com.cm_id->local_addr.sin_addr.s_addr,
- ep->com.cm_id->remote_addr.sin_addr.s_addr,
- ep->com.cm_id->local_addr.sin_port,
- ep->com.cm_id->remote_addr.sin_port, 0);
- if (!rt) {
+ if (ep->com.cm_id->local_addr.ss_family == AF_INET) {
+ ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr, laddr->sin_port,
+ raddr->sin_port, 0);
+ iptype = 4;
+ ra = (__u8 *)&raddr->sin_addr;
+ } else {
+ ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port, raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
+ iptype = 6;
+ ra = (__u8 *)&raddr6->sin6_addr;
+ }
+ if (!ep->dst) {
pr_err("%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
- ep->dst = &rt->dst;
-
- neigh = dst_neigh_lookup(ep->dst,
- &ep->com.cm_id->remote_addr.sin_addr.s_addr);
- if (!neigh) {
- pr_err("%s - cannot alloc neigh.\n", __func__);
- err = -ENOMEM;
- goto fail4;
- }
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- ep->com.cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- pi = (struct port_info *)netdev_priv(pdev);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- pi = (struct port_info *)netdev_priv(neigh->dev);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) &
- 0x7F) << 1;
- }
-
- step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = pi->port_id * step;
- ep->ctrlq_idx = pi->port_id;
- step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step];
-
- if (!ep->l2t) {
+ err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false);
+ if (err) {
pr_err("%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
@@ -1677,8 +1860,16 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
ntohl(rpl->atid_status)));
struct tid_info *t = dev->rdev.lldi.tids;
int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
+ struct sockaddr_in *la;
+ struct sockaddr_in *ra;
+ struct sockaddr_in6 *la6;
+ struct sockaddr_in6 *ra6;
ep = lookup_atid(t, atid);
+ la = (struct sockaddr_in *)&ep->com.local_addr;
+ ra = (struct sockaddr_in *)&ep->com.remote_addr;
+ la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+ ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
status, status2errno(status));
@@ -1699,10 +1890,11 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
case CPL_ERR_CONN_TIMEDOUT:
break;
case CPL_ERR_TCAM_FULL:
+ mutex_lock(&dev->rdev.stats.lock);
dev->rdev.stats.tcam_full++;
- if (dev->rdev.lldi.enable_fw_ofld_conn) {
- mutex_lock(&dev->rdev.stats.lock);
- mutex_unlock(&dev->rdev.stats.lock);
+ mutex_unlock(&dev->rdev.stats.lock);
+ if (ep->com.local_addr.ss_family == AF_INET &&
+ dev->rdev.lldi.enable_fw_ofld_conn) {
send_fw_act_open_req(ep,
GET_TID_TID(GET_AOPEN_ATID(
ntohl(rpl->atid_status))));
@@ -1722,13 +1914,17 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
}
break;
default:
- printk(KERN_INFO MOD "Active open failure - "
- "atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
- atid, status, status2errno(status),
- &ep->com.local_addr.sin_addr.s_addr,
- ntohs(ep->com.local_addr.sin_port),
- &ep->com.remote_addr.sin_addr.s_addr,
- ntohs(ep->com.remote_addr.sin_port));
+ if (ep->com.local_addr.ss_family == AF_INET) {
+ pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
+ atid, status, status2errno(status),
+ &la->sin_addr.s_addr, ntohs(la->sin_port),
+ &ra->sin_addr.s_addr, ntohs(ra->sin_port));
+ } else {
+ pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
+ atid, status, status2errno(status),
+ la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
+ ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
+ }
break;
}
@@ -1766,27 +1962,6 @@ out:
return 0;
}
-static int listen_stop(struct c4iw_listen_ep *ep)
-{
- struct sk_buff *skb;
- struct cpl_close_listsvr_req *req;
-
- PDBG("%s ep %p\n", __func__, ep);
- skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
- if (!skb) {
- printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
- return -ENOMEM;
- }
- req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
- ep->stid));
- req->reply_ctrl = cpu_to_be16(
- QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
- set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
- return c4iw_ofld_send(&ep->com.dev->rdev, skb);
-}
-
static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
@@ -1799,7 +1974,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
-static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
+static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
struct cpl_pass_accept_req *req)
{
struct cpl_pass_accept_rpl *rpl;
@@ -1851,16 +2026,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
rpl->opt0 = cpu_to_be64(opt0);
rpl->opt2 = cpu_to_be32(opt2);
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
+ t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
return;
}
-static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
- struct sk_buff *skb)
+static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
{
- PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
- peer_ip);
+ PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
BUG_ON(skb_cloned(skb));
skb_trim(skb, sizeof(struct cpl_tid_release));
skb_get(skb);
@@ -1868,95 +2042,38 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
return;
}
-static void get_4tuple(struct cpl_pass_accept_req *req,
- __be32 *local_ip, __be32 *peer_ip,
+static void get_4tuple(struct cpl_pass_accept_req *req, int *iptype,
+ __u8 *local_ip, __u8 *peer_ip,
__be16 *local_port, __be16 *peer_port)
{
int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
+ struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len);
struct tcphdr *tcp = (struct tcphdr *)
((u8 *)(req + 1) + eth_len + ip_len);
- PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
- ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
- ntohs(tcp->dest));
-
- *peer_ip = ip->saddr;
- *local_ip = ip->daddr;
+ if (ip->version == 4) {
+ PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
+ ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 4;
+ memcpy(peer_ip, &ip->saddr, 4);
+ memcpy(local_ip, &ip->daddr, 4);
+ } else {
+ PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__,
+ ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source),
+ ntohs(tcp->dest));
+ *iptype = 6;
+ memcpy(peer_ip, ip6->saddr.s6_addr, 16);
+ memcpy(local_ip, ip6->daddr.s6_addr, 16);
+ }
*peer_port = tcp->source;
*local_port = tcp->dest;
return;
}
-static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
- struct c4iw_dev *cdev, bool clear_mpa_v1)
-{
- struct neighbour *n;
- int err, step;
-
- n = dst_neigh_lookup(dst, &peer_ip);
- if (!n)
- return -ENODEV;
-
- rcu_read_lock();
- err = -ENOMEM;
- if (n->dev->flags & IFF_LOOPBACK) {
- struct net_device *pdev;
-
- pdev = ip_dev_find(&init_net, peer_ip);
- if (!pdev) {
- err = -ENODEV;
- goto out;
- }
- ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, pdev, 0);
- if (!ep->l2t)
- goto out;
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = cdev->rdev.lldi.ntxq /
- cdev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = cdev->rdev.lldi.nrxq /
- cdev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = cdev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
- n, n->dev, 0);
- if (!ep->l2t)
- goto out;
- ep->mtu = dst_mtu(dst);
- ep->tx_chan = cxgb4_port_chan(n->dev);
- ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
- step = cdev->rdev.lldi.ntxq /
- cdev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(n->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(n->dev);
- step = cdev->rdev.lldi.nrxq /
- cdev->rdev.lldi.nchan;
- ep->rss_qid = cdev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(n->dev) * step];
-
- if (clear_mpa_v1) {
- ep->retry_with_mpa_v1 = 0;
- ep->tried_with_mpa_v1 = 0;
- }
- }
- err = 0;
-out:
- rcu_read_unlock();
-
- neigh_release(n);
-
- return err;
-}
-
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep = NULL, *parent_ep;
@@ -1965,23 +2082,17 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
struct dst_entry *dst;
- struct rtable *rt;
- __be32 local_ip, peer_ip = 0;
+ __u8 local_ip[16], peer_ip[16];
__be16 local_port, peer_port;
int err;
u16 peer_mss = ntohs(req->tcpopt.mss);
+ int iptype;
parent_ep = lookup_stid(t, stid);
if (!parent_ep) {
PDBG("%s connect request on invalid stid %d\n", __func__, stid);
goto reject;
}
- get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
-
- PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \
- "rport %d peer_mss %d\n", __func__, parent_ep, hwtid,
- ntohl(local_ip), ntohl(peer_ip), ntohs(local_port),
- ntohs(peer_port), peer_mss);
if (state_read(&parent_ep->com) != LISTEN) {
printk(KERN_ERR "%s - listening ep not in LISTEN\n",
@@ -1989,15 +2100,32 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
+ get_4tuple(req, &iptype, local_ip, peer_ip, &local_port, &peer_port);
+
/* Find output route */
- rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
- GET_POPEN_TOS(ntohl(req->tos_stid)));
- if (!rt) {
+ if (iptype == 4) {
+ PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
+ , __func__, parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip,
+ local_port, peer_port,
+ GET_POPEN_TOS(ntohl(req->tos_stid)));
+ } else {
+ PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
+ , __func__, parent_ep, hwtid,
+ local_ip, peer_ip, ntohs(local_port),
+ ntohs(peer_port), peer_mss);
+ dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port,
+ PASS_OPEN_TOS(ntohl(req->tos_stid)),
+ ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_scope_id);
+ }
+ if (!dst) {
printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
__func__);
goto reject;
}
- dst = &rt->dst;
child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
if (!child_ep) {
@@ -2007,7 +2135,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
- err = import_ep(child_ep, peer_ip, dst, dev, false);
+ err = import_ep(child_ep, iptype, peer_ip, dst, dev, false);
if (err) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -2022,12 +2150,27 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
- child_ep->com.local_addr.sin_family = PF_INET;
- child_ep->com.local_addr.sin_port = local_port;
- child_ep->com.local_addr.sin_addr.s_addr = local_ip;
- child_ep->com.remote_addr.sin_family = PF_INET;
- child_ep->com.remote_addr.sin_port = peer_port;
- child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
+ if (iptype == 4) {
+ struct sockaddr_in *sin = (struct sockaddr_in *)
+ &child_ep->com.local_addr;
+ sin->sin_family = PF_INET;
+ sin->sin_port = local_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+ sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+ sin->sin_family = PF_INET;
+ sin->sin_port = peer_port;
+ sin->sin_addr.s_addr = *(__be32 *)peer_ip;
+ } else {
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
+ &child_ep->com.local_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = local_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = peer_port;
+ memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
+ }
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
@@ -2040,11 +2183,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);
- accept_cr(child_ep, peer_ip, skb, req);
+ accept_cr(child_ep, skb, req);
set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
goto out;
reject:
- reject_cr(dev, hwtid, peer_ip, skb);
+ reject_cr(dev, hwtid, skb);
out:
return 0;
}
@@ -2512,12 +2655,79 @@ err:
return err;
}
+static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
+{
+ struct in_device *ind;
+ int found = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+
+ ind = in_dev_get(dev->rdev.lldi.ports[0]);
+ if (!ind)
+ return -EADDRNOTAVAIL;
+ for_primary_ifa(ind) {
+ laddr->sin_addr.s_addr = ifa->ifa_address;
+ raddr->sin_addr.s_addr = ifa->ifa_address;
+ found = 1;
+ break;
+ }
+ endfor_ifa(ind);
+ in_dev_put(ind);
+ return found ? 0 : -EADDRNOTAVAIL;
+}
+
+static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags)
+{
+ struct inet6_dev *idev;
+ int err = -EADDRNOTAVAIL;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev != NULL) {
+ struct inet6_ifaddr *ifp;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifp, &idev->addr_list, if_list) {
+ if (ifp->scope == IFA_LINK &&
+ !(ifp->flags & banned_flags)) {
+ memcpy(addr, &ifp->addr, 16);
+ err = 0;
+ break;
+ }
+ }
+ read_unlock_bh(&idev->lock);
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
+{
+ struct in6_addr uninitialized_var(addr);
+ struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->remote_addr;
+
+ if (get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
+ memcpy(la6->sin6_addr.s6_addr, &addr, 16);
+ memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
+ return 0;
+ }
+ return -EADDRNOTAVAIL;
+}
+
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
- struct rtable *rt;
int err = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)&cm_id->local_addr;
+ struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
+ &cm_id->remote_addr;
+ __u8 *ra;
+ int iptype;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2545,7 +2755,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
ep->com.dev = dev;
ep->com.cm_id = cm_id;
ep->com.qp = get_qhp(dev, conn_param->qpn);
- BUG_ON(!ep->com.qp);
+ if (!ep->com.qp) {
+ PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
+ err = -EINVAL;
+ goto fail2;
+ }
ref_qp(ep);
PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
ep->com.qp, cm_id);
@@ -2561,27 +2775,56 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
insert_handle(dev, &dev->atid_idr, ep, ep->atid);
- PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port),
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port));
+ if (cm_id->remote_addr.ss_family == AF_INET) {
+ iptype = 4;
+ ra = (__u8 *)&raddr->sin_addr;
- /* find a route */
- rt = find_route(dev,
- cm_id->local_addr.sin_addr.s_addr,
- cm_id->remote_addr.sin_addr.s_addr,
- cm_id->local_addr.sin_port,
- cm_id->remote_addr.sin_port, 0);
- if (!rt) {
+ /*
+ * Handle loopback requests to INADDR_ANY.
+ */
+ if ((__force int)raddr->sin_addr.s_addr == INADDR_ANY) {
+ err = pick_local_ipaddrs(dev, cm_id);
+ if (err)
+ goto fail2;
+ }
+
+ /* find a route */
+ PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
+ __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
+ ra, ntohs(raddr->sin_port));
+ ep->dst = find_route(dev, laddr->sin_addr.s_addr,
+ raddr->sin_addr.s_addr, laddr->sin_port,
+ raddr->sin_port, 0);
+ } else {
+ iptype = 6;
+ ra = (__u8 *)&raddr6->sin6_addr;
+
+ /*
+ * Handle loopback requests to INADDR_ANY.
+ */
+ if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
+ err = pick_local_ip6addrs(dev, cm_id);
+ if (err)
+ goto fail2;
+ }
+
+ /* find a route */
+ PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
+ __func__, laddr6->sin6_addr.s6_addr,
+ ntohs(laddr6->sin6_port),
+ raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
+ ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr,
+ raddr6->sin6_addr.s6_addr,
+ laddr6->sin6_port, raddr6->sin6_port, 0,
+ raddr6->sin6_scope_id);
+ }
+ if (!ep->dst) {
printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
err = -EHOSTUNREACH;
goto fail3;
}
- ep->dst = &rt->dst;
- err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
- ep->dst, ep->com.dev, true);
+ err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true);
if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
goto fail4;
@@ -2593,8 +2836,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
state_set(&ep->com, CONNECTING);
ep->tos = 0;
- ep->com.local_addr = cm_id->local_addr;
- ep->com.remote_addr = cm_id->remote_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
+ memcpy(&ep->com.remote_addr, &cm_id->remote_addr,
+ sizeof(ep->com.remote_addr));
/* send connect request to rnic */
err = send_connect(ep);
@@ -2614,6 +2859,60 @@ out:
return err;
}
+static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
+{
+ int err;
+ struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
+
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
+ ep->stid, &sin6->sin6_addr,
+ sin6->sin6_port,
+ ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (!err)
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+ &ep->com.wr_wait,
+ 0, 0, __func__);
+ if (err)
+ pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
+ err, ep->stid,
+ sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
+ return err;
+}
+
+static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
+{
+ int err;
+ struct sockaddr_in *sin = (struct sockaddr_in *)&ep->com.local_addr;
+
+ if (dev->rdev.lldi.enable_fw_ofld_conn) {
+ do {
+ err = cxgb4_create_server_filter(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ sin->sin_addr.s_addr, sin->sin_port, 0,
+ ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
+ if (err == -EBUSY) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(100));
+ }
+ } while (err == -EBUSY);
+ } else {
+ c4iw_init_wr_wait(&ep->com.wr_wait);
+ err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
+ ep->stid, sin->sin_addr.s_addr, sin->sin_port,
+ 0, ep->com.dev->rdev.lldi.rxq_ids[0]);
+ if (!err)
+ err = c4iw_wait_for_reply(&ep->com.dev->rdev,
+ &ep->com.wr_wait,
+ 0, 0, __func__);
+ }
+ if (err)
+ pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
+ , err, ep->stid,
+ &sin->sin_addr, ntohs(sin->sin_port));
+ return err;
+}
+
int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
{
int err = 0;
@@ -2633,15 +2932,18 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
ep->com.cm_id = cm_id;
ep->com.dev = dev;
ep->backlog = backlog;
- ep->com.local_addr = cm_id->local_addr;
+ memcpy(&ep->com.local_addr, &cm_id->local_addr,
+ sizeof(ep->com.local_addr));
/*
* Allocate a server TID.
*/
if (dev->rdev.lldi.enable_fw_ofld_conn)
- ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep);
+ ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
+ cm_id->local_addr.ss_family, ep);
else
- ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
+ ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
+ cm_id->local_addr.ss_family, ep);
if (ep->stid == -1) {
printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
@@ -2650,43 +2952,16 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
}
insert_handle(dev, &dev->stid_idr, ep, ep->stid);
state_set(&ep->com, LISTEN);
- if (dev->rdev.lldi.enable_fw_ofld_conn) {
- do {
- err = cxgb4_create_server_filter(
- ep->com.dev->rdev.lldi.ports[0], ep->stid,
- ep->com.local_addr.sin_addr.s_addr,
- ep->com.local_addr.sin_port,
- 0,
- ep->com.dev->rdev.lldi.rxq_ids[0],
- 0,
- 0);
- if (err == -EBUSY) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(100));
- }
- } while (err == -EBUSY);
- } else {
- c4iw_init_wr_wait(&ep->com.wr_wait);
- err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
- ep->stid, ep->com.local_addr.sin_addr.s_addr,
- ep->com.local_addr.sin_port,
- 0,
- ep->com.dev->rdev.lldi.rxq_ids[0]);
- if (!err)
- err = c4iw_wait_for_reply(&ep->com.dev->rdev,
- &ep->com.wr_wait,
- 0, 0, __func__);
- }
+ if (ep->com.local_addr.ss_family == AF_INET)
+ err = create_server4(dev, ep);
+ else
+ err = create_server6(dev, ep);
if (!err) {
cm_id->provider_data = ep;
goto out;
}
- pr_err("%s cxgb4_create_server/filter failed err %d " \
- "stid %d laddr %08x lport %d\n", \
- __func__, err, ep->stid,
- ntohl(ep->com.local_addr.sin_addr.s_addr),
- ntohs(ep->com.local_addr.sin_port));
- cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+ ep->com.local_addr.ss_family);
fail2:
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
@@ -2704,20 +2979,24 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)
might_sleep();
state_set(&ep->com, DEAD);
- if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) {
+ if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
+ ep->com.local_addr.ss_family == AF_INET) {
err = cxgb4_remove_server_filter(
ep->com.dev->rdev.lldi.ports[0], ep->stid,
ep->com.dev->rdev.lldi.rxq_ids[0], 0);
} else {
c4iw_init_wr_wait(&ep->com.wr_wait);
- err = listen_stop(ep);
+ err = cxgb4_remove_server(
+ ep->com.dev->rdev.lldi.ports[0], ep->stid,
+ ep->com.dev->rdev.lldi.rxq_ids[0], 0);
if (err)
goto done;
err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
0, 0, __func__);
}
remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
- cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
+ cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
+ ep->com.local_addr.ss_family);
done:
cm_id->rem_ref(cm_id);
c4iw_put_ep(&ep->com);
@@ -3021,7 +3300,6 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
struct cpl_pass_accept_req *req = (void *)(rss + 1);
struct l2t_entry *e;
struct dst_entry *dst;
- struct rtable *rt;
struct c4iw_ep *lep;
u16 window;
struct port_info *pi;
@@ -3079,14 +3357,13 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
ntohs(tcph->source), iph->tos);
- rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
- iph->tos);
- if (!rt) {
+ dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source,
+ iph->tos);
+ if (!dst) {
pr_err("%s - failed to find dst entry!\n",
__func__);
goto reject;
}
- dst = &rt->dst;
neigh = dst_neigh_lookup_skb(dst, skb);
if (!neigh) {
@@ -3103,10 +3380,11 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
tx_chan = cxgb4_port_chan(pdev);
dev_put(pdev);
} else {
+ pdev = get_real_dev(neigh->dev);
e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
- neigh->dev, 0);
- pi = (struct port_info *)netdev_priv(neigh->dev);
- tx_chan = cxgb4_port_chan(neigh->dev);
+ pdev, 0);
+ pi = (struct port_info *)netdev_priv(pdev);
+ tx_chan = cxgb4_port_chan(pdev);
}
if (!e) {
pr_err("%s - failed to allocate l2t entry!\n",
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 0f1607c8325..88de3aa9c5b 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -225,43 +225,186 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
t4_swcq_produce(cq);
}
-int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
+static void advance_oldest_read(struct t4_wq *wq);
+
+int c4iw_flush_sq(struct c4iw_qp *qhp)
{
int flushed = 0;
- struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
- int in_use = wq->sq.in_use - count;
-
- BUG_ON(in_use < 0);
- while (in_use--) {
- swsqe->signaled = 0;
- insert_sq_cqe(wq, cq, swsqe);
- swsqe++;
- if (swsqe == (wq->sq.sw_sq + wq->sq.size))
- swsqe = wq->sq.sw_sq;
- flushed++;
+ struct t4_wq *wq = &qhp->wq;
+ struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq);
+ struct t4_cq *cq = &chp->cq;
+ int idx;
+ struct t4_swsqe *swsqe;
+ int error = (qhp->attr.state != C4IW_QP_STATE_CLOSING &&
+ qhp->attr.state != C4IW_QP_STATE_IDLE);
+
+ if (wq->sq.flush_cidx == -1)
+ wq->sq.flush_cidx = wq->sq.cidx;
+ idx = wq->sq.flush_cidx;
+ BUG_ON(idx >= wq->sq.size);
+ while (idx != wq->sq.pidx) {
+ if (error) {
+ swsqe = &wq->sq.sw_sq[idx];
+ BUG_ON(swsqe->flushed);
+ swsqe->flushed = 1;
+ insert_sq_cqe(wq, cq, swsqe);
+ if (wq->sq.oldest_read == swsqe) {
+ BUG_ON(swsqe->opcode != FW_RI_READ_REQ);
+ advance_oldest_read(wq);
+ }
+ flushed++;
+ } else {
+ t4_sq_consume(wq);
+ }
+ if (++idx == wq->sq.size)
+ idx = 0;
}
+ wq->sq.flush_cidx += flushed;
+ if (wq->sq.flush_cidx >= wq->sq.size)
+ wq->sq.flush_cidx -= wq->sq.size;
return flushed;
}
+static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
+{
+ struct t4_swsqe *swsqe;
+ int cidx;
+
+ if (wq->sq.flush_cidx == -1)
+ wq->sq.flush_cidx = wq->sq.cidx;
+ cidx = wq->sq.flush_cidx;
+ BUG_ON(cidx > wq->sq.size);
+
+ while (cidx != wq->sq.pidx) {
+ swsqe = &wq->sq.sw_sq[cidx];
+ if (!swsqe->signaled) {
+ if (++cidx == wq->sq.size)
+ cidx = 0;
+ } else if (swsqe->complete) {
+
+ BUG_ON(swsqe->flushed);
+
+ /*
+ * Insert this completed cqe into the swcq.
+ */
+ PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
+ __func__, cidx, cq->sw_pidx);
+ swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
+ cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
+ t4_swcq_produce(cq);
+ swsqe->flushed = 1;
+ if (++cidx == wq->sq.size)
+ cidx = 0;
+ wq->sq.flush_cidx = cidx;
+ } else
+ break;
+ }
+}
+
+static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
+ struct t4_cqe *read_cqe)
+{
+ read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
+ read_cqe->len = htonl(wq->sq.oldest_read->read_len);
+ read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
+ V_CQE_SWCQE(SW_CQE(hw_cqe)) |
+ V_CQE_OPCODE(FW_RI_READ_REQ) |
+ V_CQE_TYPE(1));
+ read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
+}
+
+static void advance_oldest_read(struct t4_wq *wq)
+{
+
+ u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
+
+ if (rptr == wq->sq.size)
+ rptr = 0;
+ while (rptr != wq->sq.pidx) {
+ wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
+
+ if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
+ return;
+ if (++rptr == wq->sq.size)
+ rptr = 0;
+ }
+ wq->sq.oldest_read = NULL;
+}
+
/*
* Move all CQEs from the HWCQ into the SWCQ.
+ * Deal with out-of-order and/or completions that complete
+ * prior unsignalled WRs.
*/
-void c4iw_flush_hw_cq(struct t4_cq *cq)
+void c4iw_flush_hw_cq(struct c4iw_cq *chp)
{
- struct t4_cqe *cqe = NULL, *swcqe;
+ struct t4_cqe *hw_cqe, *swcqe, read_cqe;
+ struct c4iw_qp *qhp;
+ struct t4_swsqe *swsqe;
int ret;
- PDBG("%s cq %p cqid 0x%x\n", __func__, cq, cq->cqid);
- ret = t4_next_hw_cqe(cq, &cqe);
+ PDBG("%s cqid 0x%x\n", __func__, chp->cq.cqid);
+ ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
+
+ /*
+ * This logic is similar to poll_cq(), but not quite the same
+ * unfortunately. Need to move pertinent HW CQEs to the SW CQ but
+ * also do any translation magic that poll_cq() normally does.
+ */
while (!ret) {
- PDBG("%s flushing hwcq cidx 0x%x swcq pidx 0x%x\n",
- __func__, cq->cidx, cq->sw_pidx);
- swcqe = &cq->sw_queue[cq->sw_pidx];
- *swcqe = *cqe;
- swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
- t4_swcq_produce(cq);
- t4_hwcq_consume(cq);
- ret = t4_next_hw_cqe(cq, &cqe);
+ qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe));
+
+ /*
+ * drop CQEs with no associated QP
+ */
+ if (qhp == NULL)
+ goto next_cqe;
+
+ if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
+ goto next_cqe;
+
+ if (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP) {
+
+ /*
+ * drop peer2peer RTR reads.
+ */
+ if (CQE_WRID_STAG(hw_cqe) == 1)
+ goto next_cqe;
+
+ /*
+ * Eat completions for unsignaled read WRs.
+ */
+ if (!qhp->wq.sq.oldest_read->signaled) {
+ advance_oldest_read(&qhp->wq);
+ goto next_cqe;
+ }
+
+ /*
+ * Don't write to the HWCQ, create a new read req CQE
+ * in local memory and move it into the swcq.
+ */
+ create_read_req_cqe(&qhp->wq, hw_cqe, &read_cqe);
+ hw_cqe = &read_cqe;
+ advance_oldest_read(&qhp->wq);
+ }
+
+ /* if its a SQ completion, then do the magic to move all the
+ * unsignaled and now in-order completions into the swcq.
+ */
+ if (SQ_TYPE(hw_cqe)) {
+ swsqe = &qhp->wq.sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
+ swsqe->cqe = *hw_cqe;
+ swsqe->complete = 1;
+ flush_completed_wrs(&qhp->wq, &chp->cq);
+ } else {
+ swcqe = &chp->cq.sw_queue[chp->cq.sw_pidx];
+ *swcqe = *hw_cqe;
+ swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
+ t4_swcq_produce(&chp->cq);
+ }
+next_cqe:
+ t4_hwcq_consume(&chp->cq);
+ ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
}
}
@@ -281,25 +424,6 @@ static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
return 1;
}
-void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
-{
- struct t4_cqe *cqe;
- u32 ptr;
-
- *count = 0;
- ptr = cq->sw_cidx;
- while (ptr != cq->sw_pidx) {
- cqe = &cq->sw_queue[ptr];
- if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
- wq->sq.oldest_read)) &&
- (CQE_QPID(cqe) == wq->sq.qid))
- (*count)++;
- if (++ptr == cq->size)
- ptr = 0;
- }
- PDBG("%s cq %p count %d\n", __func__, cq, *count);
-}
-
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
{
struct t4_cqe *cqe;
@@ -319,70 +443,6 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
PDBG("%s cq %p count %d\n", __func__, cq, *count);
}
-static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
-{
- struct t4_swsqe *swsqe;
- u16 ptr = wq->sq.cidx;
- int count = wq->sq.in_use;
- int unsignaled = 0;
-
- swsqe = &wq->sq.sw_sq[ptr];
- while (count--)
- if (!swsqe->signaled) {
- if (++ptr == wq->sq.size)
- ptr = 0;
- swsqe = &wq->sq.sw_sq[ptr];
- unsignaled++;
- } else if (swsqe->complete) {
-
- /*
- * Insert this completed cqe into the swcq.
- */
- PDBG("%s moving cqe into swcq sq idx %u cq idx %u\n",
- __func__, ptr, cq->sw_pidx);
- swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
- cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
- t4_swcq_produce(cq);
- swsqe->signaled = 0;
- wq->sq.in_use -= unsignaled;
- break;
- } else
- break;
-}
-
-static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
- struct t4_cqe *read_cqe)
-{
- read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
- read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
- read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
- V_CQE_SWCQE(SW_CQE(hw_cqe)) |
- V_CQE_OPCODE(FW_RI_READ_REQ) |
- V_CQE_TYPE(1));
- read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
-}
-
-/*
- * Return a ptr to the next read wr in the SWSQ or NULL.
- */
-static void advance_oldest_read(struct t4_wq *wq)
-{
-
- u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
-
- if (rptr == wq->sq.size)
- rptr = 0;
- while (rptr != wq->sq.pidx) {
- wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
-
- if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
- return;
- if (++rptr == wq->sq.size)
- rptr = 0;
- }
- wq->sq.oldest_read = NULL;
-}
-
/*
* poll_cq
*
@@ -427,6 +487,22 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * skip hw cqe's if the wq is flushed.
+ */
+ if (wq->flushed && !SW_CQE(hw_cqe)) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
+ * skip TERMINATE cqes...
+ */
+ if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -440,7 +516,7 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
* was generated by the kernel driver as part of peer-2-peer
* connection setup. So ignore the completion.
*/
- if (!wq->sq.oldest_read) {
+ if (CQE_WRID_STAG(hw_cqe) == 1) {
if (CQE_STATUS(hw_cqe))
t4_set_wq_in_error(wq);
ret = -EAGAIN;
@@ -448,6 +524,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * Eat completions for unsignaled read WRs.
+ */
+ if (!wq->sq.oldest_read->signaled) {
+ advance_oldest_read(wq);
+ ret = -EAGAIN;
+ goto skip_cqe;
+ }
+
+ /*
* Don't write to the HWCQ, so create a new read req CQE
* in local memory.
*/
@@ -457,14 +542,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
- *cqe_flushed = t4_wq_in_error(wq);
+ *cqe_flushed = (CQE_STATUS(hw_cqe) == T4_ERR_SWFLUSH);
t4_set_wq_in_error(wq);
- goto proc_cqe;
- }
-
- if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
- ret = -EAGAIN;
- goto skip_cqe;
}
/*
@@ -523,7 +602,24 @@ proc_cqe:
* completion.
*/
if (SQ_TYPE(hw_cqe)) {
- wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
+ int idx = CQE_WRID_SQ_IDX(hw_cqe);
+ BUG_ON(idx > wq->sq.size);
+
+ /*
+ * Account for any unsignaled completions completed by
+ * this signaled completion. In this case, cidx points
+ * to the first unsignaled one, and idx points to the
+ * signaled one. So adjust in_use based on this delta.
+ * if this is not completing any unsigned wrs, then the
+ * delta will be 0. Handle wrapping also!
+ */
+ if (idx < wq->sq.cidx)
+ wq->sq.in_use -= wq->sq.size + idx - wq->sq.cidx;
+ else
+ wq->sq.in_use -= idx - wq->sq.cidx;
+ BUG_ON(wq->sq.in_use < 0 && wq->sq.in_use < wq->sq.size);
+
+ wq->sq.cidx = (uint16_t)idx;
PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
t4_sq_consume(wq);
@@ -532,6 +628,7 @@ proc_cqe:
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq));
t4_rq_consume(wq);
+ goto skip_cqe;
}
flush_wq:
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index ae656016e1a..33d2cc6ab56 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -103,18 +103,43 @@ static int dump_qp(int id, void *p, void *data)
if (space == 0)
return 1;
- if (qp->ep)
- cc = snprintf(qpd->buf + qpd->pos, space,
- "qp sq id %u rq id %u state %u onchip %u "
- "ep tid %u state %u %pI4:%u->%pI4:%u\n",
- qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
- qp->wq.sq.flags & T4_SQ_ONCHIP,
- qp->ep->hwtid, (int)qp->ep->com.state,
- &qp->ep->com.local_addr.sin_addr.s_addr,
- ntohs(qp->ep->com.local_addr.sin_port),
- &qp->ep->com.remote_addr.sin_addr.s_addr,
- ntohs(qp->ep->com.remote_addr.sin_port));
- else
+ if (qp->ep) {
+ if (qp->ep->com.local_addr.ss_family == AF_INET) {
+ struct sockaddr_in *lsin = (struct sockaddr_in *)
+ &qp->ep->com.local_addr;
+ struct sockaddr_in *rsin = (struct sockaddr_in *)
+ &qp->ep->com.remote_addr;
+
+ cc = snprintf(qpd->buf + qpd->pos, space,
+ "rc qp sq id %u rq id %u state %u "
+ "onchip %u ep tid %u state %u "
+ "%pI4:%u->%pI4:%u\n",
+ qp->wq.sq.qid, qp->wq.rq.qid,
+ (int)qp->attr.state,
+ qp->wq.sq.flags & T4_SQ_ONCHIP,
+ qp->ep->hwtid, (int)qp->ep->com.state,
+ &lsin->sin_addr, ntohs(lsin->sin_port),
+ &rsin->sin_addr, ntohs(rsin->sin_port));
+ } else {
+ struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+ &qp->ep->com.local_addr;
+ struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
+ &qp->ep->com.remote_addr;
+
+ cc = snprintf(qpd->buf + qpd->pos, space,
+ "rc qp sq id %u rq id %u state %u "
+ "onchip %u ep tid %u state %u "
+ "%pI6:%u->%pI6:%u\n",
+ qp->wq.sq.qid, qp->wq.rq.qid,
+ (int)qp->attr.state,
+ qp->wq.sq.flags & T4_SQ_ONCHIP,
+ qp->ep->hwtid, (int)qp->ep->com.state,
+ &lsin6->sin6_addr,
+ ntohs(lsin6->sin6_port),
+ &rsin6->sin6_addr,
+ ntohs(rsin6->sin6_port));
+ }
+ } else
cc = snprintf(qpd->buf + qpd->pos, space,
"qp sq id %u rq id %u state %u onchip %u\n",
qp->wq.sq.qid, qp->wq.rq.qid,
@@ -351,15 +376,37 @@ static int dump_ep(int id, void *p, void *data)
if (space == 0)
return 1;
- cc = snprintf(epd->buf + epd->pos, space,
- "ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx "
- "hwtid %d atid %d %pI4:%d <-> %pI4:%d\n",
- ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state,
- ep->com.flags, ep->com.history, ep->hwtid, ep->atid,
- &ep->com.local_addr.sin_addr.s_addr,
- ntohs(ep->com.local_addr.sin_port),
- &ep->com.remote_addr.sin_addr.s_addr,
- ntohs(ep->com.remote_addr.sin_port));
+ if (ep->com.local_addr.ss_family == AF_INET) {
+ struct sockaddr_in *lsin = (struct sockaddr_in *)
+ &ep->com.local_addr;
+ struct sockaddr_in *rsin = (struct sockaddr_in *)
+ &ep->com.remote_addr;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p qp %p state %d flags 0x%lx "
+ "history 0x%lx hwtid %d atid %d "
+ "%pI4:%d <-> %pI4:%d\n",
+ ep, ep->com.cm_id, ep->com.qp,
+ (int)ep->com.state, ep->com.flags,
+ ep->com.history, ep->hwtid, ep->atid,
+ &lsin->sin_addr, ntohs(lsin->sin_port),
+ &rsin->sin_addr, ntohs(rsin->sin_port));
+ } else {
+ struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+ &ep->com.local_addr;
+ struct sockaddr_in6 *rsin6 = (struct sockaddr_in6 *)
+ &ep->com.remote_addr;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p qp %p state %d flags 0x%lx "
+ "history 0x%lx hwtid %d atid %d "
+ "%pI6:%d <-> %pI6:%d\n",
+ ep, ep->com.cm_id, ep->com.qp,
+ (int)ep->com.state, ep->com.flags,
+ ep->com.history, ep->hwtid, ep->atid,
+ &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
+ &rsin6->sin6_addr, ntohs(rsin6->sin6_port));
+ }
if (cc < space)
epd->pos += cc;
return 0;
@@ -376,12 +423,27 @@ static int dump_listen_ep(int id, void *p, void *data)
if (space == 0)
return 1;
- cc = snprintf(epd->buf + epd->pos, space,
- "ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d "
- "%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state,
- ep->com.flags, ep->stid, ep->backlog,
- &ep->com.local_addr.sin_addr.s_addr,
- ntohs(ep->com.local_addr.sin_port));
+ if (ep->com.local_addr.ss_family == AF_INET) {
+ struct sockaddr_in *lsin = (struct sockaddr_in *)
+ &ep->com.local_addr;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p state %d flags 0x%lx stid %d "
+ "backlog %d %pI4:%d\n",
+ ep, ep->com.cm_id, (int)ep->com.state,
+ ep->com.flags, ep->stid, ep->backlog,
+ &lsin->sin_addr, ntohs(lsin->sin_port));
+ } else {
+ struct sockaddr_in6 *lsin6 = (struct sockaddr_in6 *)
+ &ep->com.local_addr;
+
+ cc = snprintf(epd->buf + epd->pos, space,
+ "ep %p cm_id %p state %d flags 0x%lx stid %d "
+ "backlog %d %pI6:%d\n",
+ ep, ep->com.cm_id, (int)ep->com.state,
+ ep->com.flags, ep->stid, ep->backlog,
+ &lsin6->sin6_addr, ntohs(lsin6->sin6_port));
+ }
if (cc < space)
epd->pos += cc;
return 0;
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 1a840b2211d..d61d0a18f78 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -44,16 +44,6 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
struct c4iw_qp_attributes attrs;
unsigned long flag;
- if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||
- (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) {
- pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\
- "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
- __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
- CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
- CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
- return;
- }
-
printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
"type %d wrid.hi 0x%x wrid.lo 0x%x\n",
CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 485183ad34c..23eaeabab93 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -752,8 +752,8 @@ struct c4iw_ep_common {
enum c4iw_ep_state state;
struct kref kref;
struct mutex mutex;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
struct c4iw_wr_wait wr_wait;
unsigned long flags;
unsigned long history;
@@ -917,12 +917,11 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb);
-void c4iw_flush_hw_cq(struct t4_cq *cq);
+void c4iw_flush_hw_cq(struct c4iw_cq *chp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
-void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);
-int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count);
+int c4iw_flush_sq(struct c4iw_qp *qhp);
int c4iw_ev_handler(struct c4iw_dev *rnicp, u32 qid);
u16 c4iw_rqes_posted(struct c4iw_qp *qhp);
int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 232040447e8..582936708e6 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -737,6 +737,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
swsqe->idx = qhp->wq.sq.pidx;
swsqe->complete = 0;
swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED);
+ swsqe->flushed = 0;
swsqe->wr_id = wr->wr_id;
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
@@ -1006,7 +1007,15 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag);
spin_lock(&qhp->lock);
- c4iw_flush_hw_cq(&rchp->cq);
+
+ if (qhp->wq.flushed) {
+ spin_unlock(&qhp->lock);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+ return;
+ }
+ qhp->wq.flushed = 1;
+
+ c4iw_flush_hw_cq(rchp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
spin_unlock(&qhp->lock);
@@ -1020,9 +1029,9 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
/* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&schp->lock, flag);
spin_lock(&qhp->lock);
- c4iw_flush_hw_cq(&schp->cq);
- c4iw_count_scqes(&schp->cq, &qhp->wq, &count);
- flushed = c4iw_flush_sq(&qhp->wq, &schp->cq, count);
+ if (schp != rchp)
+ c4iw_flush_hw_cq(schp);
+ flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock);
spin_unlock_irqrestore(&schp->lock, flag);
if (flushed) {
@@ -1037,11 +1046,11 @@ static void flush_qp(struct c4iw_qp *qhp)
struct c4iw_cq *rchp, *schp;
unsigned long flag;
- rchp = get_chp(qhp->rhp, qhp->attr.rcq);
- schp = get_chp(qhp->rhp, qhp->attr.scq);
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
- t4_set_wq_in_error(&qhp->wq);
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
@@ -1330,8 +1339,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
disconnect = 1;
c4iw_get_ep(&qhp->ep->com);
}
- if (qhp->ibqp.uobject)
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq);
ret = rdma_fini(rhp, qhp, ep);
if (ret)
goto err;
@@ -1340,18 +1348,21 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
set_state(qhp, C4IW_QP_STATE_TERMINATE);
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
- if (qhp->ibqp.uobject)
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq);
ep = qhp->ep;
+ disconnect = 1;
if (!internal)
terminate = 1;
- disconnect = 1;
+ else {
+ ret = rdma_fini(rhp, qhp, ep);
+ if (ret)
+ goto err;
+ }
c4iw_get_ep(&qhp->ep->com);
break;
case C4IW_QP_STATE_ERROR:
set_state(qhp, C4IW_QP_STATE_ERROR);
- if (qhp->ibqp.uobject)
- t4_set_wq_in_error(&qhp->wq);
+ t4_set_wq_in_error(&qhp->wq);
if (!internal) {
abort = 1;
disconnect = 1;
@@ -1552,12 +1563,12 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
-
qhp = kzalloc(sizeof(*qhp), GFP_KERNEL);
if (!qhp)
return ERR_PTR(-ENOMEM);
qhp->wq.sq.size = sqsize;
qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+ qhp->wq.sq.flush_cidx = -1;
qhp->wq.rq.size = rqsize;
qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
@@ -1657,6 +1668,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
if (mm5) {
uresp.ma_sync_key = ucontext->key;
ucontext->key += PAGE_SIZE;
+ } else {
+ uresp.ma_sync_key = 0;
}
uresp.sq_key = ucontext->key;
ucontext->key += PAGE_SIZE;
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index ebcb03bd1b7..e73ace73918 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -36,9 +36,9 @@
#include "t4_msg.h"
#include "t4fw_ri_api.h"
-#define T4_MAX_NUM_QP (1<<16)
-#define T4_MAX_NUM_CQ (1<<15)
-#define T4_MAX_NUM_PD (1<<15)
+#define T4_MAX_NUM_QP 65536
+#define T4_MAX_NUM_CQ 65536
+#define T4_MAX_NUM_PD 65536
#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
#define T4_MAX_IQ_SIZE (65520 - 1)
@@ -47,7 +47,7 @@
#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
#define T4_MAX_NUM_STAG (1<<15)
-#define T4_MAX_MR_SIZE (~0ULL - 1)
+#define T4_MAX_MR_SIZE (~0ULL)
#define T4_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
#define T4_STAG_UNSET 0xffffffff
#define T4_FW_MAJ 0
@@ -269,6 +269,7 @@ struct t4_swsqe {
int complete;
int signaled;
u16 idx;
+ int flushed;
};
static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -300,6 +301,7 @@ struct t4_sq {
u16 pidx;
u16 wq_pidx;
u16 flags;
+ short flush_cidx;
};
struct t4_swrqe {
@@ -330,6 +332,7 @@ struct t4_wq {
void __iomem *db;
void __iomem *gts;
struct c4iw_rdev *rdev;
+ int flushed;
};
static inline int t4_rqes_posted(struct t4_wq *wq)
@@ -412,6 +415,9 @@ static inline void t4_sq_produce(struct t4_wq *wq, u8 len16)
static inline void t4_sq_consume(struct t4_wq *wq)
{
+ BUG_ON(wq->sq.in_use < 1);
+ if (wq->sq.cidx == wq->sq.flush_cidx)
+ wq->sq.flush_cidx = -1;
wq->sq.in_use--;
if (++wq->sq.cidx == wq->sq.size)
wq->sq.cidx = 0;
@@ -505,12 +511,18 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
static inline void t4_swcq_produce(struct t4_cq *cq)
{
cq->sw_in_use++;
+ if (cq->sw_in_use == cq->size) {
+ PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+ cq->error = 1;
+ BUG_ON(1);
+ }
if (++cq->sw_pidx == cq->size)
cq->sw_pidx = 0;
}
static inline void t4_swcq_consume(struct t4_cq *cq)
{
+ BUG_ON(cq->sw_in_use < 1);
cq->sw_in_use--;
if (++cq->sw_cidx == cq->size)
cq->sw_cidx = 0;
@@ -519,7 +531,7 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
static inline void t4_hwcq_consume(struct t4_cq *cq)
{
cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
- if (++cq->cidx_inc == (cq->size >> 4)) {
+ if (++cq->cidx_inc == (cq->size >> 4) || cq->cidx_inc == CIDXINC_MASK) {
u32 val;
val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
@@ -552,6 +564,7 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
ret = -EOVERFLOW;
cq->error = 1;
printk(KERN_ERR MOD "cq overflow cqid %u\n", cq->cqid);
+ BUG_ON(1);
} else if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) {
*cqe = &cq->queue[cq->cidx];
ret = 0;
@@ -562,6 +575,12 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{
+ if (cq->sw_in_use == cq->size) {
+ PDBG("%s cxgb4 sw cq overflow cqid %u\n", __func__, cq->cqid);
+ cq->error = 1;
+ BUG_ON(1);
+ return NULL;
+ }
if (cq->sw_in_use)
return &cq->sw_queue[cq->sw_cidx];
return NULL;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 4d599cedbb0..f2a3f48107e 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -1511,8 +1511,14 @@ static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
memset(&attr, 0, sizeof attr);
attr.qp_state = IB_QPS_INIT;
- attr.pkey_index =
- to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
+ ret = 0;
+ if (create_tun)
+ ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
+ ctx->port, IB_DEFAULT_PKEY_FULL,
+ &attr.pkey_index);
+ if (ret || !create_tun)
+ attr.pkey_index =
+ to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
attr.qkey = IB_QP1_QKEY;
attr.port_num = ctx->port;
ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index a188d317855..d6c5a73becf 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -54,6 +54,8 @@
#define DRV_VERSION "1.0"
#define DRV_RELDATE "April 4, 2008"
+#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
+
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -88,6 +90,25 @@ static void init_query_mad(struct ib_smp *mad)
static union ib_gid zgid;
+static int check_flow_steering_support(struct mlx4_dev *dev)
+{
+ int ib_num_ports = 0;
+ int i;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
+ if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ if (ib_num_ports || mlx4_is_mfunc(dev)) {
+ pr_warn("Device managed flow steering is unavailable "
+ "for IB ports or in multifunction env.\n");
+ return 0;
+ }
+ return 1;
+ }
+ return 0;
+}
+
static int mlx4_ib_query_device(struct ib_device *ibdev,
struct ib_device_attr *props)
{
@@ -144,6 +165,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B;
else
props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A;
+ if (check_flow_steering_support(dev->dev))
+ props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING;
}
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
@@ -798,6 +821,209 @@ struct mlx4_ib_steering {
union ib_gid gid;
};
+static int parse_flow_attr(struct mlx4_dev *dev,
+ union ib_flow_spec *ib_spec,
+ struct _rule_hw *mlx4_spec)
+{
+ enum mlx4_net_trans_rule_id type;
+
+ switch (ib_spec->type) {
+ case IB_FLOW_SPEC_ETH:
+ type = MLX4_NET_TRANS_RULE_ID_ETH;
+ memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac,
+ ETH_ALEN);
+ memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac,
+ ETH_ALEN);
+ mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag;
+ mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag;
+ break;
+
+ case IB_FLOW_SPEC_IPV4:
+ type = MLX4_NET_TRANS_RULE_ID_IPV4;
+ mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip;
+ mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip;
+ mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip;
+ mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip;
+ break;
+
+ case IB_FLOW_SPEC_TCP:
+ case IB_FLOW_SPEC_UDP:
+ type = ib_spec->type == IB_FLOW_SPEC_TCP ?
+ MLX4_NET_TRANS_RULE_ID_TCP :
+ MLX4_NET_TRANS_RULE_ID_UDP;
+ mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port;
+ mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port;
+ mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port;
+ mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ if (mlx4_map_sw_to_hw_steering_id(dev, type) < 0 ||
+ mlx4_hw_rule_sz(dev, type) < 0)
+ return -EINVAL;
+ mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type));
+ mlx4_spec->size = mlx4_hw_rule_sz(dev, type) >> 2;
+ return mlx4_hw_rule_sz(dev, type);
+}
+
+static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr,
+ int domain,
+ enum mlx4_net_trans_promisc_mode flow_type,
+ u64 *reg_id)
+{
+ int ret, i;
+ int size = 0;
+ void *ib_flow;
+ struct mlx4_ib_dev *mdev = to_mdev(qp->device);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_net_trans_rule_hw_ctrl *ctrl;
+ size_t rule_size = sizeof(struct mlx4_net_trans_rule_hw_ctrl) +
+ (sizeof(struct _rule_hw) * flow_attr->num_of_specs);
+
+ static const u16 __mlx4_domain[] = {
+ [IB_FLOW_DOMAIN_USER] = MLX4_DOMAIN_UVERBS,
+ [IB_FLOW_DOMAIN_ETHTOOL] = MLX4_DOMAIN_ETHTOOL,
+ [IB_FLOW_DOMAIN_RFS] = MLX4_DOMAIN_RFS,
+ [IB_FLOW_DOMAIN_NIC] = MLX4_DOMAIN_NIC,
+ };
+
+ if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) {
+ pr_err("Invalid priority value %d\n", flow_attr->priority);
+ return -EINVAL;
+ }
+
+ if (domain >= IB_FLOW_DOMAIN_NUM) {
+ pr_err("Invalid domain value %d\n", domain);
+ return -EINVAL;
+ }
+
+ if (mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type) < 0)
+ return -EINVAL;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, rule_size);
+ ctrl = mailbox->buf;
+
+ ctrl->prio = cpu_to_be16(__mlx4_domain[domain] |
+ flow_attr->priority);
+ ctrl->type = mlx4_map_sw_to_hw_steering_mode(mdev->dev, flow_type);
+ ctrl->port = flow_attr->port;
+ ctrl->qpn = cpu_to_be32(qp->qp_num);
+
+ ib_flow = flow_attr + 1;
+ size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
+ for (i = 0; i < flow_attr->num_of_specs; i++) {
+ ret = parse_flow_attr(mdev->dev, ib_flow, mailbox->buf + size);
+ if (ret < 0) {
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return -EINVAL;
+ }
+ ib_flow += ((union ib_flow_spec *) ib_flow)->size;
+ size += ret;
+ }
+
+ ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
+ MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (ret == -ENOMEM)
+ pr_err("mcg table is full. Fail to register network rule.\n");
+ else if (ret == -ENXIO)
+ pr_err("Device managed flow steering is disabled. Fail to register network rule.\n");
+ else if (ret)
+ pr_err("Invalid argumant. Fail to register network rule.\n");
+
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return ret;
+}
+
+static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
+{
+ int err;
+ err = mlx4_cmd(dev, reg_id, 0, 0,
+ MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ pr_err("Fail to detach network rule. registration id = 0x%llx\n",
+ reg_id);
+ return err;
+}
+
+static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr,
+ int domain)
+{
+ int err = 0, i = 0;
+ struct mlx4_ib_flow *mflow;
+ enum mlx4_net_trans_promisc_mode type[2];
+
+ memset(type, 0, sizeof(type));
+
+ mflow = kzalloc(sizeof(*mflow), GFP_KERNEL);
+ if (!mflow) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ switch (flow_attr->type) {
+ case IB_FLOW_ATTR_NORMAL:
+ type[0] = MLX4_FS_REGULAR;
+ break;
+
+ case IB_FLOW_ATTR_ALL_DEFAULT:
+ type[0] = MLX4_FS_ALL_DEFAULT;
+ break;
+
+ case IB_FLOW_ATTR_MC_DEFAULT:
+ type[0] = MLX4_FS_MC_DEFAULT;
+ break;
+
+ case IB_FLOW_ATTR_SNIFFER:
+ type[0] = MLX4_FS_UC_SNIFFER;
+ type[1] = MLX4_FS_MC_SNIFFER;
+ break;
+
+ default:
+ err = -EINVAL;
+ goto err_free;
+ }
+
+ while (i < ARRAY_SIZE(type) && type[i]) {
+ err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
+ &mflow->reg_id[i]);
+ if (err)
+ goto err_free;
+ i++;
+ }
+
+ return &mflow->ibflow;
+
+err_free:
+ kfree(mflow);
+ return ERR_PTR(err);
+}
+
+static int mlx4_ib_destroy_flow(struct ib_flow *flow_id)
+{
+ int err, ret = 0;
+ int i = 0;
+ struct mlx4_ib_dev *mdev = to_mdev(flow_id->qp->device);
+ struct mlx4_ib_flow *mflow = to_mflow(flow_id);
+
+ while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i]) {
+ err = __mlx4_ib_destroy_flow(mdev->dev, mflow->reg_id[i]);
+ if (err)
+ ret = err;
+ i++;
+ }
+
+ kfree(mflow);
+ return ret;
+}
+
static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
{
int err;
@@ -1461,6 +1687,15 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
(1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
}
+ if (check_flow_steering_support(dev)) {
+ ibdev->ib_dev.create_flow = mlx4_ib_create_flow;
+ ibdev->ib_dev.destroy_flow = mlx4_ib_destroy_flow;
+
+ ibdev->ib_dev.uverbs_cmd_mask |=
+ (1ull << IB_USER_VERBS_CMD_CREATE_FLOW) |
+ (1ull << IB_USER_VERBS_CMD_DESTROY_FLOW);
+ }
+
mlx4_ib_alloc_eqs(dev, ibdev);
spin_lock_init(&iboe->lock);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index f61ec26500c..036b663dd26 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -132,6 +132,12 @@ struct mlx4_ib_fmr {
struct mlx4_fmr mfmr;
};
+struct mlx4_ib_flow {
+ struct ib_flow ibflow;
+ /* translating DMFS verbs sniffer rule to FW API requires two reg IDs */
+ u64 reg_id[2];
+};
+
struct mlx4_ib_wq {
u64 *wrid;
spinlock_t lock;
@@ -552,6 +558,12 @@ static inline struct mlx4_ib_fmr *to_mfmr(struct ib_fmr *ibfmr)
{
return container_of(ibfmr, struct mlx4_ib_fmr, ibfmr);
}
+
+static inline struct mlx4_ib_flow *to_mflow(struct ib_flow *ibflow)
+{
+ return container_of(ibflow, struct mlx4_ib_flow, ibflow);
+}
+
static inline struct mlx4_ib_qp *to_mqp(struct ib_qp *ibqp)
{
return container_of(ibqp, struct mlx4_ib_qp, ibqp);
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8000fff4d44..3f831de9a4d 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -619,7 +619,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.tot_uuars = req.total_num_uuars;
resp.num_ports = dev->mdev.caps.num_ports;
- err = ib_copy_to_udata(udata, &resp, sizeof(resp));
+ err = ib_copy_to_udata(udata, &resp,
+ sizeof(resp) - sizeof(resp.reserved));
if (err)
goto out_uars;
@@ -1426,7 +1427,8 @@ static int init_one(struct pci_dev *pdev,
if (err)
goto err_eqs;
- if (ib_register_device(&dev->ib_dev, NULL))
+ err = ib_register_device(&dev->ib_dev, NULL);
+ if (err)
goto err_rsrc;
err = create_umr_res(dev);
@@ -1434,8 +1436,9 @@ static int init_one(struct pci_dev *pdev,
goto err_dev;
for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
- if (device_create_file(&dev->ib_dev.dev,
- mlx5_class_attributes[i]))
+ err = device_create_file(&dev->ib_dev.dev,
+ mlx5_class_attributes[i]);
+ if (err)
goto err_umrc;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 16ac54c9819..045f8cdbd30 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -199,7 +199,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
static int sq_overhead(enum ib_qp_type qp_type)
{
- int size;
+ int size = 0;
switch (qp_type) {
case IB_QPT_XRC_INI:
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 24b9f1a0107..6b29249aa85 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2998,6 +2998,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
u8 *start_ptr = &start_addr;
u8 **start_buff = &start_ptr;
u16 buff_len = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
@@ -3062,8 +3064,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
/* setup our first outgoing iWarp send WQE (the IETF frame response) */
wqe = &nesqp->hwqp.sq_vbase[0];
- if (cm_id->remote_addr.sin_addr.s_addr !=
- cm_id->local_addr.sin_addr.s_addr) {
+ if (raddr->sin_addr.s_addr != laddr->sin_addr.s_addr) {
u64temp = (unsigned long)nesqp;
nesibdev = nesvnic->nesibdev;
nespd = nesqp->nespd;
@@ -3132,13 +3133,10 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_cm_init_tsa_conn(nesqp, cm_node);
- nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
- nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
- nesqp->nesqp_context->ip0 =
- cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3162,9 +3160,9 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
memset(&nes_quad, 0, sizeof(nes_quad));
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
- nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
- nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+ nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = raddr->sin_port;
+ nes_quad.TcpPorts[1] = laddr->sin_port;
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3180,10 +3178,8 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_debug(NES_DBG_CM, "QP%u, Destination IP = 0x%08X:0x%04X, local = "
"0x%08X:0x%04X, rcv_nxt=0x%08X, snd_nxt=0x%08X, mpa + "
"private data length=%u.\n", nesqp->hwqp.qp_id,
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port),
+ ntohl(raddr->sin_addr.s_addr), ntohs(raddr->sin_port),
+ ntohl(laddr->sin_addr.s_addr), ntohs(laddr->sin_port),
le32_to_cpu(nesqp->nesqp_context->rcv_nxt),
le32_to_cpu(nesqp->nesqp_context->snd_nxt),
buff_len);
@@ -3263,7 +3259,11 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
struct nes_cm_node *cm_node;
struct nes_cm_info cm_info;
int apbvt_set = 0;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ if (cm_id->remote_addr.ss_family != AF_INET)
+ return -ENOSYS;
ibqp = nes_get_qp(cm_id->device, conn_param->qpn);
if (!ibqp)
return -EINVAL;
@@ -3277,16 +3277,14 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
if (!nesdev)
return -EINVAL;
- if (!(cm_id->local_addr.sin_port) || !(cm_id->remote_addr.sin_port))
+ if (!laddr->sin_port || !raddr->sin_port)
return -EINVAL;
nes_debug(NES_DBG_CM, "QP%u, current IP = 0x%08X, Destination IP = "
"0x%08X:0x%04X, local = 0x%08X:0x%04X.\n", nesqp->hwqp.qp_id,
- ntohl(nesvnic->local_ipaddr),
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohl(cm_id->local_addr.sin_addr.s_addr),
- ntohs(cm_id->local_addr.sin_port));
+ ntohl(nesvnic->local_ipaddr), ntohl(raddr->sin_addr.s_addr),
+ ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
+ ntohs(laddr->sin_port));
atomic_inc(&cm_connects);
nesqp->active_conn = 1;
@@ -3306,18 +3304,18 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
nes_debug(NES_DBG_CM, "mpa private data len =%u\n",
conn_param->private_data_len);
- if (cm_id->local_addr.sin_addr.s_addr !=
- cm_id->remote_addr.sin_addr.s_addr) {
- nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
- PCI_FUNC(nesdev->pcidev->devfn), NES_MANAGE_APBVT_ADD);
+ if (laddr->sin_addr.s_addr != raddr->sin_addr.s_addr) {
+ nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
+ PCI_FUNC(nesdev->pcidev->devfn),
+ NES_MANAGE_APBVT_ADD);
apbvt_set = 1;
}
/* set up the connection params for the node */
- cm_info.loc_addr = htonl(cm_id->local_addr.sin_addr.s_addr);
- cm_info.loc_port = htons(cm_id->local_addr.sin_port);
- cm_info.rem_addr = htonl(cm_id->remote_addr.sin_addr.s_addr);
- cm_info.rem_port = htons(cm_id->remote_addr.sin_port);
+ cm_info.loc_addr = htonl(laddr->sin_addr.s_addr);
+ cm_info.loc_port = htons(laddr->sin_port);
+ cm_info.rem_addr = htonl(raddr->sin_addr.s_addr);
+ cm_info.rem_port = htons(raddr->sin_port);
cm_info.cm_id = cm_id;
cm_info.conn_type = NES_CM_IWARP_CONN_TYPE;
@@ -3329,7 +3327,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
&cm_info);
if (!cm_node) {
if (apbvt_set)
- nes_manage_apbvt(nesvnic, ntohs(cm_id->local_addr.sin_port),
+ nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
PCI_FUNC(nesdev->pcidev->devfn),
NES_MANAGE_APBVT_DEL);
@@ -3355,10 +3353,13 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
struct nes_cm_listener *cm_node;
struct nes_cm_info cm_info;
int err;
+ struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->local_addr;
nes_debug(NES_DBG_CM, "cm_id = %p, local port = 0x%04X.\n",
- cm_id, ntohs(cm_id->local_addr.sin_port));
+ cm_id, ntohs(laddr->sin_port));
+ if (cm_id->local_addr.ss_family != AF_INET)
+ return -ENOSYS;
nesvnic = to_nesvnic(cm_id->device);
if (!nesvnic)
return -EINVAL;
@@ -3367,11 +3368,11 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
nesvnic, nesvnic->netdev, nesvnic->netdev->name);
nes_debug(NES_DBG_CM, "nesvnic->local_ipaddr=0x%08x, sin_addr.s_addr=0x%08x\n",
- nesvnic->local_ipaddr, cm_id->local_addr.sin_addr.s_addr);
+ nesvnic->local_ipaddr, laddr->sin_addr.s_addr);
/* setup listen params in our api call struct */
cm_info.loc_addr = nesvnic->local_ipaddr;
- cm_info.loc_port = cm_id->local_addr.sin_port;
+ cm_info.loc_port = laddr->sin_port;
cm_info.backlog = backlog;
cm_info.cm_id = cm_id;
@@ -3388,8 +3389,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
cm_id->provider_data = cm_node;
if (!cm_node->reused_node) {
- err = nes_manage_apbvt(nesvnic,
- ntohs(cm_id->local_addr.sin_port),
+ err = nes_manage_apbvt(nesvnic, ntohs(laddr->sin_port),
PCI_FUNC(nesvnic->nesdev->pcidev->devfn),
NES_MANAGE_APBVT_ADD);
if (err) {
@@ -3487,6 +3487,9 @@ static void cm_event_connected(struct nes_cm_event *event)
struct nes_v4_quad nes_quad;
u32 crc_value;
int ret;
+ struct sockaddr_in *laddr;
+ struct sockaddr_in *raddr;
+ struct sockaddr_in *cm_event_laddr;
/* get all our handles */
cm_node = event->cm_node;
@@ -3496,27 +3499,24 @@ static void cm_event_connected(struct nes_cm_event *event)
nesvnic = to_nesvnic(nesqp->ibqp.device);
nesdev = nesvnic->nesdev;
nesadapter = nesdev->nesadapter;
+ laddr = (struct sockaddr_in *)&cm_id->local_addr;
+ raddr = (struct sockaddr_in *)&cm_id->remote_addr;
+ cm_event_laddr = (struct sockaddr_in *)&cm_event.local_addr;
if (nesqp->destroyed)
return;
atomic_inc(&cm_connecteds);
nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
" local port 0x%04X. jiffies = %lu.\n",
- nesqp->hwqp.qp_id,
- ntohl(cm_id->remote_addr.sin_addr.s_addr),
- ntohs(cm_id->remote_addr.sin_port),
- ntohs(cm_id->local_addr.sin_port),
- jiffies);
+ nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
+ ntohs(raddr->sin_port), ntohs(laddr->sin_port), jiffies);
nes_cm_init_tsa_conn(nesqp, cm_node);
/* set the QP tsa context */
- nesqp->nesqp_context->tcpPorts[0] =
- cpu_to_le16(ntohs(cm_id->local_addr.sin_port));
- nesqp->nesqp_context->tcpPorts[1] =
- cpu_to_le16(ntohs(cm_id->remote_addr.sin_port));
- nesqp->nesqp_context->ip0 =
- cpu_to_le32(ntohl(cm_id->remote_addr.sin_addr.s_addr));
+ nesqp->nesqp_context->tcpPorts[0] = cpu_to_le16(ntohs(laddr->sin_port));
+ nesqp->nesqp_context->tcpPorts[1] = cpu_to_le16(ntohs(raddr->sin_port));
+ nesqp->nesqp_context->ip0 = cpu_to_le32(ntohl(raddr->sin_addr.s_addr));
nesqp->nesqp_context->misc2 |= cpu_to_le32(
(u32)PCI_FUNC(nesdev->pcidev->devfn) <<
@@ -3544,9 +3544,9 @@ static void cm_event_connected(struct nes_cm_event *event)
nes_quad.DstIpAdrIndex =
cpu_to_le32((u32)PCI_FUNC(nesdev->pcidev->devfn) << 24);
- nes_quad.SrcIpadr = cm_id->remote_addr.sin_addr.s_addr;
- nes_quad.TcpPorts[0] = cm_id->remote_addr.sin_port;
- nes_quad.TcpPorts[1] = cm_id->local_addr.sin_port;
+ nes_quad.SrcIpadr = raddr->sin_addr.s_addr;
+ nes_quad.TcpPorts[0] = raddr->sin_port;
+ nes_quad.TcpPorts[1] = laddr->sin_port;
/* Produce hash key */
crc_value = get_crc_value(&nes_quad);
@@ -3565,8 +3565,8 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
cm_event.status = 0;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr.sin_family = AF_INET;
- cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
+ cm_event_laddr->sin_family = AF_INET;
+ cm_event_laddr->sin_port = laddr->sin_port;
cm_event.remote_addr = cm_id->remote_addr;
cm_event.private_data = (void *)event->cm_node->mpa_frame_buf;
@@ -3574,7 +3574,7 @@ static void cm_event_connected(struct nes_cm_event *event)
cm_event.ird = cm_node->ird_size;
cm_event.ord = cm_node->ord_size;
- cm_event.local_addr.sin_addr.s_addr = event->cm_info.rem_addr;
+ cm_event_laddr->sin_addr.s_addr = event->cm_info.rem_addr;
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
@@ -3627,9 +3627,16 @@ static void cm_event_connect_error(struct nes_cm_event *event)
cm_event.private_data = NULL;
cm_event.private_data_len = 0;
- nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, "
- "remove_addr=%08x\n", cm_event.local_addr.sin_addr.s_addr,
- cm_event.remote_addr.sin_addr.s_addr);
+#ifdef CONFIG_INFINIBAND_NES_DEBUG
+ {
+ struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+ &cm_event.local_addr;
+ struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+ &cm_event.remote_addr;
+ nes_debug(NES_DBG_CM, "call CM_EVENT REJECTED, local_addr=%08x, remote_addr=%08x\n",
+ cm_event_laddr->sin_addr.s_addr, cm_event_raddr->sin_addr.s_addr);
+ }
+#endif
ret = cm_id->event_handler(cm_id, &cm_event);
nes_debug(NES_DBG_CM, "OFA CM event_handler returned, ret=%d\n", ret);
@@ -3709,6 +3716,10 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
struct iw_cm_event cm_event;
int ret;
struct nes_cm_node *cm_node;
+ struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+ &cm_event.local_addr;
+ struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+ &cm_event.remote_addr;
cm_node = event->cm_node;
if (!cm_node)
@@ -3723,13 +3734,13 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
cm_event.status = 0;
cm_event.provider_data = (void *)cm_node;
- cm_event.local_addr.sin_family = AF_INET;
- cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
- cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+ cm_event_laddr->sin_family = AF_INET;
+ cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
- cm_event.remote_addr.sin_family = AF_INET;
- cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
- cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+ cm_event_raddr->sin_family = AF_INET;
+ cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
+ cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
cm_event.ird = cm_node->ird_size;
@@ -3749,6 +3760,10 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
struct iw_cm_event cm_event;
struct nes_cm_node *cm_node;
int ret;
+ struct sockaddr_in *cm_event_laddr = (struct sockaddr_in *)
+ &cm_event.local_addr;
+ struct sockaddr_in *cm_event_raddr = (struct sockaddr_in *)
+ &cm_event.remote_addr;
cm_node = event->cm_node;
if (!cm_node)
@@ -3763,21 +3778,21 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
cm_event.status = -ECONNREFUSED;
cm_event.provider_data = cm_id->provider_data;
- cm_event.local_addr.sin_family = AF_INET;
- cm_event.local_addr.sin_port = htons(event->cm_info.loc_port);
- cm_event.local_addr.sin_addr.s_addr = htonl(event->cm_info.loc_addr);
+ cm_event_laddr->sin_family = AF_INET;
+ cm_event_laddr->sin_port = htons(event->cm_info.loc_port);
+ cm_event_laddr->sin_addr.s_addr = htonl(event->cm_info.loc_addr);
- cm_event.remote_addr.sin_family = AF_INET;
- cm_event.remote_addr.sin_port = htons(event->cm_info.rem_port);
- cm_event.remote_addr.sin_addr.s_addr = htonl(event->cm_info.rem_addr);
+ cm_event_raddr->sin_family = AF_INET;
+ cm_event_raddr->sin_port = htons(event->cm_info.rem_port);
+ cm_event_raddr->sin_addr.s_addr = htonl(event->cm_info.rem_addr);
cm_event.private_data = cm_node->mpa_frame_buf;
cm_event.private_data_len = (u8)cm_node->mpa_frame_size;
nes_debug(NES_DBG_CM, "call CM_EVENT_MPA_REJECTED, local_addr=%08x, "
"remove_addr=%08x\n",
- cm_event.local_addr.sin_addr.s_addr,
- cm_event.remote_addr.sin_addr.s_addr);
+ cm_event_laddr->sin_addr.s_addr,
+ cm_event_raddr->sin_addr.s_addr);
ret = cm_id->event_handler(cm_id, &cm_event);
if (ret)
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 418004c93fe..90200245c5e 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -3570,10 +3570,10 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
nes_debug(NES_DBG_AEQ, "aeid = 0x%04X, qp-cq id = %d, aeqe = %p,"
- " Tcp state = %d, iWARP state = %d\n",
+ " Tcp state = %s, iWARP state = %s\n",
async_event_id,
le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
- tcp_state, iwarp_state);
+ nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
aeqe_cq_id = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]);
if (aeq_info & NES_AEQE_QP) {
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 8f67fe2e91e..5b53ca5a228 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1384,6 +1384,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
if (ibpd->uobject) {
uresp.mmap_sq_db_index = nesqp->mmap_sq_db_index;
+ uresp.mmap_rq_db_index = 0;
uresp.actual_sq_size = sq_size;
uresp.actual_rq_size = rq_size;
uresp.qp_id = nesqp->hwqp.qp_id;
@@ -1767,7 +1768,7 @@ static struct ib_cq *nes_create_cq(struct ib_device *ibdev, int entries,
resp.cq_id = nescq->hw_cq.cq_number;
resp.cq_size = nescq->hw_cq.cq_size;
resp.mmap_db_index = 0;
- if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
+ if (ib_copy_to_udata(udata, &resp, sizeof resp - sizeof resp.reserved)) {
nes_free_resource(nesadapter, nesadapter->allocated_cqs, cq_num);
kfree(nescq);
return ERR_PTR(-EFAULT);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index d540180a8e4..adc11d14f87 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -56,10 +56,12 @@ struct ocrdma_dev_attr {
u16 max_qp;
u16 max_wqe;
u16 max_rqe;
+ u16 max_srq;
u32 max_inline_data;
int max_send_sge;
int max_recv_sge;
int max_srq_sge;
+ int max_rdma_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
@@ -130,8 +132,7 @@ struct ocrdma_dev {
struct ocrdma_cq **cq_tbl;
struct ocrdma_qp **qp_tbl;
- struct ocrdma_eq meq;
- struct ocrdma_eq *qp_eq_tbl;
+ struct ocrdma_eq *eq_tbl;
int eq_cnt;
u16 base_eqid;
u16 max_eq;
@@ -168,11 +169,12 @@ struct ocrdma_dev {
struct list_head entry;
struct rcu_head rcu;
int id;
+ u64 stag_arr[OCRDMA_MAX_STAG];
+ u16 pvid;
};
struct ocrdma_cq {
struct ib_cq ibcq;
- struct ocrdma_dev *dev;
struct ocrdma_cqe *va;
u32 phase;
u32 getp; /* pointer to pending wrs to
@@ -214,7 +216,6 @@ struct ocrdma_pd {
struct ocrdma_ah {
struct ib_ah ibah;
- struct ocrdma_dev *dev;
struct ocrdma_av *av;
u16 sgid_index;
u32 id;
@@ -234,7 +235,6 @@ struct ocrdma_qp_hwq_info {
struct ocrdma_srq {
struct ib_srq ibsrq;
- struct ocrdma_dev *dev;
u8 __iomem *db;
struct ocrdma_qp_hwq_info rq;
u64 *rqe_wr_id_tbl;
@@ -290,10 +290,11 @@ struct ocrdma_qp {
u32 qkey;
bool dpp_enabled;
u8 *ird_q_va;
+ bool signaled;
+ u16 db_cache;
};
struct ocrdma_hw_mr {
- struct ocrdma_dev *dev;
u32 lkey;
u8 fr_mr;
u8 remote_atomic;
@@ -317,15 +318,16 @@ struct ocrdma_mr {
struct ib_mr ibmr;
struct ib_umem *umem;
struct ocrdma_hw_mr hwmr;
- struct ocrdma_pd *pd;
};
struct ocrdma_ucontext {
struct ib_ucontext ibucontext;
- struct ocrdma_dev *dev;
struct list_head mm_head;
struct mutex mm_list_lock; /* protects list entries of mm type */
+ struct ocrdma_pd *cntxt_pd;
+ int pd_in_use;
+
struct {
u32 *va;
dma_addr_t pa;
@@ -386,14 +388,14 @@ static inline struct ocrdma_srq *get_ocrdma_srq(struct ib_srq *ibsrq)
static inline int ocrdma_get_num_posted_shift(struct ocrdma_qp *qp)
{
return ((qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY &&
- qp->id < 64) ? 24 : 16);
+ qp->id < 128) ? 24 : 16);
}
static inline int is_cqe_valid(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe)
{
int cqe_valid;
cqe_valid = le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_VALID;
- return ((cqe_valid == cq->phase) ? 1 : 0);
+ return (cqe_valid == cq->phase);
}
static inline int is_cqe_for_sq(struct ocrdma_cqe *cqe)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 517ab20b727..fbac8eb4403 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -28,6 +28,9 @@
#ifndef __OCRDMA_ABI_H__
#define __OCRDMA_ABI_H__
+#define OCRDMA_ABI_VERSION 1
+/* user kernel communication data structures. */
+
struct ocrdma_alloc_ucontext_resp {
u32 dev_id;
u32 wqe_size;
@@ -35,16 +38,16 @@ struct ocrdma_alloc_ucontext_resp {
u32 dpp_wqe_size;
u64 ah_tbl_page;
u32 ah_tbl_len;
- u32 rsvd;
- u8 fw_ver[32];
u32 rqe_size;
+ u8 fw_ver[32];
+ /* for future use/new features in progress */
u64 rsvd1;
-} __packed;
+ u64 rsvd2;
+};
-/* user kernel communication data structures. */
struct ocrdma_alloc_pd_ureq {
u64 rsvd1;
-} __packed;
+};
struct ocrdma_alloc_pd_uresp {
u32 id;
@@ -52,12 +55,12 @@ struct ocrdma_alloc_pd_uresp {
u32 dpp_page_addr_hi;
u32 dpp_page_addr_lo;
u64 rsvd1;
-} __packed;
+};
struct ocrdma_create_cq_ureq {
u32 dpp_cq;
- u32 rsvd;
-} __packed;
+ u32 rsvd; /* pad */
+};
#define MAX_CQ_PAGES 8
struct ocrdma_create_cq_uresp {
@@ -69,9 +72,10 @@ struct ocrdma_create_cq_uresp {
u64 db_page_addr;
u32 db_page_size;
u32 phase_change;
+ /* for future use/new features in progress */
u64 rsvd1;
u64 rsvd2;
-} __packed;
+};
#define MAX_QP_PAGES 8
#define MAX_UD_AV_PAGES 8
@@ -80,14 +84,14 @@ struct ocrdma_create_qp_ureq {
u8 enable_dpp_cq;
u8 rsvd;
u16 dpp_cq_id;
- u32 rsvd1;
+ u32 rsvd1; /* pad */
};
struct ocrdma_create_qp_uresp {
u16 qp_id;
u16 sq_dbid;
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 sq_page_size;
u32 rq_page_size;
u32 num_sq_pages;
@@ -98,19 +102,19 @@ struct ocrdma_create_qp_uresp {
u32 db_page_size;
u32 dpp_credit;
u32 dpp_offset;
- u32 rsvd1;
u32 num_wqe_allocated;
u32 num_rqe_allocated;
u32 db_sq_offset;
u32 db_rq_offset;
u32 db_shift;
+ u64 rsvd1;
u64 rsvd2;
u64 rsvd3;
} __packed;
struct ocrdma_create_srq_uresp {
u16 rq_dbid;
- u16 resv0;
+ u16 resv0; /* pad */
u32 resv1;
u32 rq_page_size;
@@ -126,6 +130,6 @@ struct ocrdma_create_srq_uresp {
u64 rsvd2;
u64 rsvd3;
-} __packed;
+};
#endif /* __OCRDMA_ABI_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index a877a8ed790..ee499d94225 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -29,19 +29,17 @@
#include <net/netevent.h>
#include <rdma/ib_addr.h>
-#include <rdma/ib_cache.h>
#include "ocrdma.h"
#include "ocrdma_verbs.h"
#include "ocrdma_ah.h"
#include "ocrdma_hw.h"
-static inline int set_av_attr(struct ocrdma_ah *ah,
+static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
struct ib_ah_attr *attr, int pdid)
{
int status = 0;
u16 vlan_tag; bool vlan_enabled = false;
- struct ocrdma_dev *dev = ah->dev;
struct ocrdma_eth_vlan eth;
struct ocrdma_grh grh;
int eth_sz;
@@ -52,6 +50,8 @@ static inline int set_av_attr(struct ocrdma_ah *ah,
ah->sgid_index = attr->grh.sgid_index;
vlan_tag = rdma_get_vlan_id(&attr->grh.dgid);
+ if (!vlan_tag || (vlan_tag > 0xFFF))
+ vlan_tag = dev->pvid;
if (vlan_tag && (vlan_tag < 0x1000)) {
eth.eth_type = cpu_to_be16(0x8100);
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
@@ -93,7 +93,7 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
int status;
struct ocrdma_ah *ah;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
if (!(attr->ah_flags & IB_AH_GRH))
return ERR_PTR(-EINVAL);
@@ -101,12 +101,11 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
ah = kzalloc(sizeof *ah, GFP_ATOMIC);
if (!ah)
return ERR_PTR(-ENOMEM);
- ah->dev = pd->dev;
status = ocrdma_alloc_av(dev, ah);
if (status)
goto av_err;
- status = set_av_attr(ah, attr, pd->id);
+ status = set_av_attr(dev, ah, attr, pd->id);
if (status)
goto av_conf_err;
@@ -127,7 +126,9 @@ av_err:
int ocrdma_destroy_ah(struct ib_ah *ibah)
{
struct ocrdma_ah *ah = get_ocrdma_ah(ibah);
- ocrdma_free_av(ah->dev, ah);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibah->device);
+
+ ocrdma_free_av(dev, ah);
kfree(ah);
return 0;
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 0965278dd2e..4ed8235d2d3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -94,7 +94,7 @@ enum cqe_status {
static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
{
- return (u8 *)eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
+ return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
}
static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
@@ -105,8 +105,7 @@ static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
{
struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
- ((u8 *) dev->mq.cq.va +
- (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
+ (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
return NULL;
@@ -120,9 +119,7 @@ static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
{
- return (struct ocrdma_mqe *)((u8 *) dev->mq.sq.va +
- (dev->mq.sq.head *
- sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
}
static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
@@ -132,8 +129,7 @@ static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
{
- return (void *)((u8 *) dev->mq.sq.va +
- (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe)));
+ return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
}
enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
@@ -181,7 +177,7 @@ static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
static int ocrdma_get_mbx_errno(u32 status)
{
- int err_num = -EFAULT;
+ int err_num;
u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
OCRDMA_MBX_RSP_STATUS_SHIFT;
u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
@@ -260,10 +256,11 @@ static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
break;
case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
- err_num = -EAGAIN;
+ err_num = -EINVAL;
break;
case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
- err_num = -EIO;
+ default:
+ err_num = -EINVAL;
break;
}
return err_num;
@@ -367,22 +364,6 @@ static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
}
}
-static void ocrdma_assign_eq_vect_gen2(struct ocrdma_dev *dev,
- struct ocrdma_eq *eq)
-{
- /* assign vector and update vector id for next EQ */
- eq->vector = dev->nic_info.msix.start_vector;
- dev->nic_info.msix.start_vector += 1;
-}
-
-static void ocrdma_free_eq_vect_gen2(struct ocrdma_dev *dev)
-{
- /* this assumes that EQs are freed in exactly reverse order
- * as its allocation.
- */
- dev->nic_info.msix.start_vector -= 1;
-}
-
static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q,
int queue_type)
{
@@ -423,11 +404,8 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
memset(cmd, 0, sizeof(*cmd));
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
sizeof(*cmd));
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- cmd->req.rsvd_version = 0;
- else
- cmd->req.rsvd_version = 2;
+ cmd->req.rsvd_version = 2;
cmd->num_pages = 4;
cmd->valid = OCRDMA_CREATE_EQ_VALID;
cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
@@ -438,12 +416,7 @@ static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
NULL);
if (!status) {
eq->q.id = rsp->vector_eqid & 0xffff;
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_assign_eq_vect_gen2(dev, eq);
- else {
- eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
- dev->nic_info.msix.start_vector += 1;
- }
+ eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
eq->q.created = true;
}
return status;
@@ -486,8 +459,6 @@ static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
{
if (eq->q.created) {
ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
- if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY)
- ocrdma_free_eq_vect_gen2(dev);
ocrdma_free_q(dev, &eq->q);
}
}
@@ -506,13 +477,12 @@ static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
_ocrdma_destroy_eq(dev, eq);
}
-static void ocrdma_destroy_qp_eqs(struct ocrdma_dev *dev)
+static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
{
int i;
- /* deallocate the data path eqs */
for (i = 0; i < dev->eq_cnt; i++)
- ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
+ ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
}
static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
@@ -527,16 +497,21 @@ static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
- cmd->pgsz_pgcnt = PAGES_4K_SPANNED(cq->va, cq->size);
+ cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
+ OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
+ cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
+
cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- cmd->eqn = (eq->id << OCRDMA_CREATE_CQ_EQID_SHIFT);
+ cmd->eqn = eq->id;
+ cmd->cqe_count = cq->size / sizeof(struct ocrdma_mcqe);
- ocrdma_build_q_pages(&cmd->pa[0], cmd->pgsz_pgcnt,
+ ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
cq->dma, PAGE_SIZE_4K);
status = be_roce_mcc_cmd(dev->nic_info.netdev,
cmd, sizeof(*cmd), NULL, NULL);
if (!status) {
- cq->id = (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
+ cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
cq->created = true;
}
return status;
@@ -569,7 +544,10 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->cqid_pages = num_pages;
cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
- cmd->async_event_bitmap = Bit(20);
+
+ cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
+ cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
+
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
@@ -596,7 +574,7 @@ static int ocrdma_create_mq(struct ocrdma_dev *dev)
if (status)
goto alloc_err;
- status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->meq.q);
+ status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
if (status)
goto mbx_cq_free;
@@ -653,7 +631,7 @@ static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
if (qp == NULL)
BUG();
- ocrdma_qp_state_machine(qp, new_ib_qps, &old_ib_qps);
+ ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
}
static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
@@ -746,11 +724,35 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
qp->srq->ibsrq.event_handler(&ib_evt,
qp->srq->ibsrq.
srq_context);
- } else if (dev_event)
+ } else if (dev_event) {
ib_dispatch_event(&ib_evt);
+ }
}
+static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_pvid_mcqe *evt;
+ int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
+ OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
+
+ switch (type) {
+ case OCRDMA_ASYNC_EVENT_PVID_STATE:
+ evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
+ if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
+ dev->pvid = ((evt->tag_enabled &
+ OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
+ break;
+ default:
+ /* Not interested evts. */
+ break;
+ }
+}
+
+
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
@@ -758,8 +760,10 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
- if (evt_code == OCRDMA_ASYNC_EVE_CODE)
+ if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
ocrdma_dispatch_ibevent(dev, cqe);
+ else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
+ ocrdma_process_grp5_aync(dev, cqe);
else
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code);
@@ -957,9 +961,8 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = ocrdma_get_mqe_rsp(dev);
ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe)));
if (cqe_status || ext_status) {
- pr_err
- ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
- __func__,
+ pr_err("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n",
+ __func__,
(rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
OCRDMA_MBX_RSP_OPCODE_SHIFT, cqe_status, ext_status);
status = ocrdma_get_mbx_cqe_errno(cqe_status);
@@ -991,9 +994,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
+ attr->max_rdma_sge = (rsp->max_write_send_sge &
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
+ attr->max_srq =
+ (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
@@ -1013,6 +1022,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
attr->max_cqe = rsp->max_cq_cqes_per_cq &
OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
+ attr->max_cq = (rsp->max_cq_cqes_per_cq &
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
+ OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
@@ -1045,7 +1057,6 @@ static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
return -EINVAL;
dev->base_eqid = conf->base_eqid;
dev->max_eq = conf->max_eq;
- dev->attr.max_cq = OCRDMA_MAX_CQ - 1;
return 0;
}
@@ -1118,6 +1129,34 @@ mbx_err:
return status;
}
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
+{
+ int status = -ENOMEM;
+ struct ocrdma_get_link_speed_rsp *rsp;
+ struct ocrdma_mqe *cmd;
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ sizeof(*cmd));
+ if (!cmd)
+ return status;
+ ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
+ OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+ ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
+
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+ if (status)
+ goto mbx_err;
+
+ rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
+ *lnk_speed = rsp->phys_port_speed;
+
+mbx_err:
+ kfree(cmd);
+ return status;
+}
+
int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
{
int status = -ENOMEM;
@@ -1296,19 +1335,19 @@ static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
u16 eq_id;
mutex_lock(&dev->dev_lock);
- cq_cnt = dev->qp_eq_tbl[0].cq_cnt;
- eq_id = dev->qp_eq_tbl[0].q.id;
+ cq_cnt = dev->eq_tbl[0].cq_cnt;
+ eq_id = dev->eq_tbl[0].q.id;
/* find the EQ which is has the least number of
* CQs associated with it.
*/
for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].cq_cnt < cq_cnt) {
- cq_cnt = dev->qp_eq_tbl[i].cq_cnt;
- eq_id = dev->qp_eq_tbl[i].q.id;
+ if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
+ cq_cnt = dev->eq_tbl[i].cq_cnt;
+ eq_id = dev->eq_tbl[i].q.id;
selected_eq = i;
}
}
- dev->qp_eq_tbl[selected_eq].cq_cnt += 1;
+ dev->eq_tbl[selected_eq].cq_cnt += 1;
mutex_unlock(&dev->dev_lock);
return eq_id;
}
@@ -1319,16 +1358,16 @@ static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
mutex_lock(&dev->dev_lock);
for (i = 0; i < dev->eq_cnt; i++) {
- if (dev->qp_eq_tbl[i].q.id != eq_id)
+ if (dev->eq_tbl[i].q.id != eq_id)
continue;
- dev->qp_eq_tbl[i].cq_cnt -= 1;
+ dev->eq_tbl[i].cq_cnt -= 1;
break;
}
mutex_unlock(&dev->dev_lock);
}
int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
- int entries, int dpp_cq)
+ int entries, int dpp_cq, u16 pd_id)
{
int status = -ENOMEM; int max_hw_cqe;
struct pci_dev *pdev = dev->nic_info.pdev;
@@ -1336,8 +1375,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
struct ocrdma_create_cq_rsp *rsp;
u32 hw_pages, cqe_size, page_size, cqe_count;
- if (dpp_cq)
- return -EINVAL;
if (entries > dev->attr.max_cqe) {
pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
__func__, dev->id, dev->attr.max_cqe, entries);
@@ -1377,15 +1414,13 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cmd->cmd.pgsz_pgcnt |= hw_pages;
cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
- if (dev->eq_cnt < 0)
- goto eq_err;
cq->eqn = ocrdma_bind_eq(dev);
- cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
+ cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
cqe_count = cq->len / cqe_size;
- if (cqe_count > 1024)
+ if (cqe_count > 1024) {
/* Set cnt to 3 to indicate more than 1024 cq entries */
cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
- else {
+ } else {
u8 count = 0;
switch (cqe_count) {
case 256:
@@ -1416,6 +1451,7 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
cq->phase_change = true;
}
+ cmd->cmd.pd_id = pd_id; /* valid only for v3 */
ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -1427,7 +1463,6 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
return 0;
mbx_err:
ocrdma_unbind_eq(dev, cq->eqn);
-eq_err:
dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
mem_err:
kfree(cmd);
@@ -1524,6 +1559,7 @@ static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
return -ENOMEM;
cmd->num_pbl_pdid =
pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
+ cmd->fr_mr = hwmr->fr_mr;
cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
@@ -1678,8 +1714,16 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
}
-int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
- enum ib_qp_state *old_ib_state)
+static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
+{
+ qp->sq.head = 0;
+ qp->sq.tail = 0;
+ qp->rq.head = 0;
+ qp->rq.tail = 0;
+}
+
+int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
+ enum ib_qp_state *old_ib_state)
{
unsigned long flags;
int status = 0;
@@ -1696,96 +1740,15 @@ int ocrdma_qp_state_machine(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
return 1;
}
- switch (qp->state) {
- case OCRDMA_QPS_RST:
- switch (new_state) {
- case OCRDMA_QPS_RST:
- case OCRDMA_QPS_INIT:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_INIT:
- /* qps: INIT->XXX */
- switch (new_state) {
- case OCRDMA_QPS_INIT:
- case OCRDMA_QPS_RTR:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTR:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_RTS:
- /* qps: RTS->XXX */
- switch (new_state) {
- case OCRDMA_QPS_SQD:
- case OCRDMA_QPS_SQE:
- break;
- case OCRDMA_QPS_ERR:
- ocrdma_flush_qp(qp);
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQD:
- /* qps: SQD->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_SQE:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_SQE:
- switch (new_state) {
- case OCRDMA_QPS_RTS:
- case OCRDMA_QPS_ERR:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- case OCRDMA_QPS_ERR:
- /* qps: ERR->XXX */
- switch (new_state) {
- case OCRDMA_QPS_RST:
- break;
- default:
- status = -EINVAL;
- break;
- };
- break;
- default:
- status = -EINVAL;
- break;
- };
- if (!status)
- qp->state = new_state;
+
+ if (new_state == OCRDMA_QPS_INIT) {
+ ocrdma_init_hwq_ptr(qp);
+ ocrdma_del_flush_qp(qp);
+ } else if (new_state == OCRDMA_QPS_ERR) {
+ ocrdma_flush_qp(qp);
+ }
+
+ qp->state = new_state;
spin_unlock_irqrestore(&qp->q_lock, flags);
return status;
@@ -1819,10 +1782,9 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
u32 max_wqe_allocated;
u32 max_sges = attrs->cap.max_send_sge;
- max_wqe_allocated = attrs->cap.max_send_wr;
- /* need to allocate one extra to for GEN1 family */
- if (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)
- max_wqe_allocated += 1;
+ /* QP1 may exceed 127 */
+ max_wqe_allocated = min_t(int, attrs->cap.max_send_wr + 1,
+ dev->attr.max_wqe);
status = ocrdma_build_q_conf(&max_wqe_allocated,
dev->attr.wqe_size, &hw_pages, &hw_page_size);
@@ -1934,6 +1896,8 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
dma_addr_t pa = 0;
int ird_page_size = dev->attr.ird_page_size;
int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
+ struct ocrdma_hdr_wqe *rqe;
+ int i = 0;
if (dev->attr.ird == 0)
return 0;
@@ -1945,6 +1909,15 @@ static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
memset(qp->ird_q_va, 0, ird_q_len);
ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
pa, ird_page_size);
+ for (; i < ird_q_len / dev->attr.rqe_size; i++) {
+ rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
+ (i * dev->attr.rqe_size));
+ rqe->cw = 0;
+ rqe->cw |= 2;
+ rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
+ rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
+ }
return 0;
}
@@ -2057,9 +2030,10 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
qp->rq_cq = cq;
if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
- (attrs->cap.max_inline_data <= dev->attr.max_inline_data))
+ (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
dpp_cq_id);
+ }
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status)
@@ -2108,38 +2082,48 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid,
struct in6_addr in6;
memcpy(&in6, dgid, sizeof in6);
- if (rdma_is_multicast_addr(&in6))
+ if (rdma_is_multicast_addr(&in6)) {
rdma_get_mcast_mac(&in6, mac_addr);
- else if (rdma_link_local_addr(&in6))
+ } else if (rdma_link_local_addr(&in6)) {
rdma_get_ll_mac(&in6, mac_addr);
- else {
+ } else {
pr_err("%s() fail to resolve mac_addr.\n", __func__);
return -EINVAL;
}
return 0;
}
-static void ocrdma_set_av_params(struct ocrdma_qp *qp,
+static int ocrdma_set_av_params(struct ocrdma_qp *qp,
struct ocrdma_modify_qp *cmd,
struct ib_qp_attr *attrs)
{
+ int status;
struct ib_ah_attr *ah_attr = &attrs->ah_attr;
- union ib_gid sgid;
+ union ib_gid sgid, zgid;
u32 vlan_id;
u8 mac_addr[6];
+
if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
- return;
+ return -EINVAL;
cmd->params.tclass_sq_psn |=
(ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
cmd->params.rnt_rc_sl_fl |=
(ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
+ cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
cmd->params.hop_lmt_rq_psn |=
(ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
sizeof(cmd->params.dgid));
- ocrdma_query_gid(&qp->dev->ibdev, 1,
+ status = ocrdma_query_gid(&qp->dev->ibdev, 1,
ah_attr->grh.sgid_index, &sgid);
+ if (status)
+ return status;
+
+ memset(&zgid, 0, sizeof(zgid));
+ if (!memcmp(&sgid, &zgid, sizeof(zgid)))
+ return -EINVAL;
+
qp->sgid_idx = ah_attr->grh.sgid_index;
memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
ocrdma_resolve_dgid(qp->dev, &ah_attr->grh.dgid, &mac_addr[0]);
@@ -2155,6 +2139,7 @@ static void ocrdma_set_av_params(struct ocrdma_qp *qp,
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
}
+ return 0;
}
static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
@@ -2163,8 +2148,6 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
enum ib_qp_state old_qps)
{
int status = 0;
- struct net_device *netdev = qp->dev->nic_info.netdev;
- int eth_mtu = iboe_get_mtu(netdev->mtu);
if (attr_mask & IB_QP_PKEY_INDEX) {
cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2176,9 +2159,11 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->params.qkey = attrs->qkey;
cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
}
- if (attr_mask & IB_QP_AV)
- ocrdma_set_av_params(qp, cmd, attrs);
- else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
+ if (attr_mask & IB_QP_AV) {
+ status = ocrdma_set_av_params(qp, cmd, attrs);
+ if (status)
+ return status;
+ } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
/* set the default mac address for UD, GSI QPs */
cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
(qp->dev->nic_info.mac_addr[1] << 8) |
@@ -2199,8 +2184,8 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
}
if (attr_mask & IB_QP_PATH_MTU) {
- if (ib_mtu_enum_to_int(eth_mtu) <
- ib_mtu_enum_to_int(attrs->path_mtu)) {
+ if (attrs->path_mtu < IB_MTU_256 ||
+ attrs->path_mtu > IB_MTU_4096) {
status = -EINVAL;
goto pmtu_err;
}
@@ -2283,10 +2268,12 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
- } else
+ } else {
cmd->params.max_sge_recv_flags |=
(qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
OCRDMA_QP_PARAMS_STATE_MASK;
+ }
+
status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask, old_qps);
if (status)
goto mbx_err;
@@ -2324,7 +2311,7 @@ mbx_err:
return status;
}
-int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
struct ib_srq_init_attr *srq_attr,
struct ocrdma_pd *pd)
{
@@ -2334,7 +2321,6 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq,
struct ocrdma_create_srq_rsp *rsp;
struct ocrdma_create_srq *cmd;
dma_addr_t pa;
- struct ocrdma_dev *dev = srq->dev;
struct pci_dev *pdev = dev->nic_info.pdev;
u32 max_rqe_allocated;
@@ -2404,13 +2390,16 @@ int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_modify_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_pd *pd = srq->pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->id;
cmd->limit_max_rqe |= srq_attr->srq_limit <<
OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
kfree(cmd);
return status;
}
@@ -2419,11 +2408,13 @@ int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
{
int status = -ENOMEM;
struct ocrdma_query_srq *cmd;
- cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
+ struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
+
+ cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
if (!cmd)
return status;
cmd->id = srq->rq.dbid;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (status == 0) {
struct ocrdma_query_srq_rsp *rsp =
(struct ocrdma_query_srq_rsp *)cmd;
@@ -2448,7 +2439,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
if (!cmd)
return status;
cmd->id = srq->id;
- status = ocrdma_mbx_cmd(srq->dev, (struct ocrdma_mqe *)cmd);
+ status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
if (srq->rq.va)
dma_free_coherent(&pdev->dev, srq->rq.len,
srq->rq.va, srq->rq.pa);
@@ -2490,38 +2481,7 @@ int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
return 0;
}
-static int ocrdma_create_mq_eq(struct ocrdma_dev *dev)
-{
- int status;
- int irq;
- unsigned long flags = 0;
- int num_eq = 0;
-
- if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
- flags = IRQF_SHARED;
- else {
- num_eq = dev->nic_info.msix.num_vectors -
- dev->nic_info.msix.start_vector;
- /* minimum two vectors/eq are required for rdma to work.
- * one for control path and one for data path.
- */
- if (num_eq < 2)
- return -EBUSY;
- }
-
- status = ocrdma_create_eq(dev, &dev->meq, OCRDMA_EQ_LEN);
- if (status)
- return status;
- sprintf(dev->meq.irq_name, "ocrdma_mq%d", dev->id);
- irq = ocrdma_get_irq(dev, &dev->meq);
- status = request_irq(irq, ocrdma_irq_handler, flags, dev->meq.irq_name,
- &dev->meq);
- if (status)
- _ocrdma_destroy_eq(dev, &dev->meq);
- return status;
-}
-
-static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
+static int ocrdma_create_eqs(struct ocrdma_dev *dev)
{
int num_eq, i, status = 0;
int irq;
@@ -2532,49 +2492,47 @@ static int ocrdma_create_qp_eqs(struct ocrdma_dev *dev)
if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
num_eq = 1;
flags = IRQF_SHARED;
- } else
+ } else {
num_eq = min_t(u32, num_eq, num_online_cpus());
- dev->qp_eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
- if (!dev->qp_eq_tbl)
+ }
+
+ if (!num_eq)
+ return -EINVAL;
+
+ dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
+ if (!dev->eq_tbl)
return -ENOMEM;
for (i = 0; i < num_eq; i++) {
- status = ocrdma_create_eq(dev, &dev->qp_eq_tbl[i],
+ status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
OCRDMA_EQ_LEN);
if (status) {
status = -EINVAL;
break;
}
- sprintf(dev->qp_eq_tbl[i].irq_name, "ocrdma_qp%d-%d",
+ sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
dev->id, i);
- irq = ocrdma_get_irq(dev, &dev->qp_eq_tbl[i]);
+ irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
status = request_irq(irq, ocrdma_irq_handler, flags,
- dev->qp_eq_tbl[i].irq_name,
- &dev->qp_eq_tbl[i]);
- if (status) {
- _ocrdma_destroy_eq(dev, &dev->qp_eq_tbl[i]);
- status = -EINVAL;
- break;
- }
+ dev->eq_tbl[i].irq_name,
+ &dev->eq_tbl[i]);
+ if (status)
+ goto done;
dev->eq_cnt += 1;
}
/* one eq is sufficient for data path to work */
- if (dev->eq_cnt >= 1)
- return 0;
- if (status)
- ocrdma_destroy_qp_eqs(dev);
+ return 0;
+done:
+ ocrdma_destroy_eqs(dev);
return status;
}
int ocrdma_init_hw(struct ocrdma_dev *dev)
{
int status;
- /* set up control path eq */
- status = ocrdma_create_mq_eq(dev);
- if (status)
- return status;
- /* set up data path eq */
- status = ocrdma_create_qp_eqs(dev);
+
+ /* create the eqs */
+ status = ocrdma_create_eqs(dev);
if (status)
goto qpeq_err;
status = ocrdma_create_mq(dev);
@@ -2597,9 +2555,8 @@ int ocrdma_init_hw(struct ocrdma_dev *dev)
conf_err:
ocrdma_destroy_mq(dev);
mq_err:
- ocrdma_destroy_qp_eqs(dev);
+ ocrdma_destroy_eqs(dev);
qpeq_err:
- ocrdma_destroy_eq(dev, &dev->meq);
pr_err("%s() status=%d\n", __func__, status);
return status;
}
@@ -2608,10 +2565,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
{
ocrdma_mbx_delete_ah_tbl(dev);
- /* cleanup the data path eqs */
- ocrdma_destroy_qp_eqs(dev);
+ /* cleanup the eqs */
+ ocrdma_destroy_eqs(dev);
/* cleanup the control path */
ocrdma_destroy_mq(dev);
- ocrdma_destroy_eq(dev, &dev->meq);
}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index be5db77404d..f2a89d4cc7c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -78,6 +78,11 @@ static inline void ocrdma_copy_le32_to_cpu(void *dst, void *src, u32 len)
#endif
}
+static inline u64 ocrdma_get_db_addr(struct ocrdma_dev *dev, u32 pdid)
+{
+ return dev->nic_info.unmapped_db + (pdid * dev->nic_info.db_page_size);
+}
+
int ocrdma_init_hw(struct ocrdma_dev *);
void ocrdma_cleanup_hw(struct ocrdma_dev *);
@@ -86,6 +91,7 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
int ocrdma_resolve_dgid(struct ocrdma_dev *, union ib_gid *dgid, u8 *mac_addr);
@@ -100,7 +106,7 @@ int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *, int fmr, u32 lkey);
int ocrdma_reg_mr(struct ocrdma_dev *, struct ocrdma_hw_mr *hwmr,
u32 pd_id, int acc);
int ocrdma_mbx_create_cq(struct ocrdma_dev *, struct ocrdma_cq *,
- int entries, int dpp_cq);
+ int entries, int dpp_cq, u16 pd_id);
int ocrdma_mbx_destroy_cq(struct ocrdma_dev *, struct ocrdma_cq *);
int ocrdma_mbx_create_qp(struct ocrdma_qp *, struct ib_qp_init_attr *attrs,
@@ -112,8 +118,7 @@ int ocrdma_mbx_modify_qp(struct ocrdma_dev *, struct ocrdma_qp *,
int ocrdma_mbx_query_qp(struct ocrdma_dev *, struct ocrdma_qp *,
struct ocrdma_qp_params *param);
int ocrdma_mbx_destroy_qp(struct ocrdma_dev *, struct ocrdma_qp *);
-
-int ocrdma_mbx_create_srq(struct ocrdma_srq *,
+int ocrdma_mbx_create_srq(struct ocrdma_dev *, struct ocrdma_srq *,
struct ib_srq_init_attr *,
struct ocrdma_pd *);
int ocrdma_mbx_modify_srq(struct ocrdma_srq *, struct ib_srq_attr *);
@@ -123,7 +128,7 @@ int ocrdma_mbx_destroy_srq(struct ocrdma_dev *, struct ocrdma_srq *);
int ocrdma_alloc_av(struct ocrdma_dev *, struct ocrdma_ah *);
int ocrdma_free_av(struct ocrdma_dev *, struct ocrdma_ah *);
-int ocrdma_qp_state_machine(struct ocrdma_qp *, enum ib_qp_state new_state,
+int ocrdma_qp_state_change(struct ocrdma_qp *, enum ib_qp_state new_state,
enum ib_qp_state *old_ib_state);
bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *, struct ocrdma_qp *);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index ded416f1ade..56e004940f1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -39,6 +39,7 @@
#include "ocrdma_ah.h"
#include "be_roce.h"
#include "ocrdma_hw.h"
+#include "ocrdma_abi.h"
MODULE_VERSION(OCRDMA_ROCE_DEV_VERSION);
MODULE_DESCRIPTION("Emulex RoCE HCA Driver");
@@ -265,6 +266,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
sizeof(OCRDMA_NODE_DESC));
dev->ibdev.owner = THIS_MODULE;
+ dev->ibdev.uverbs_abi_ver = OCRDMA_ABI_VERSION;
dev->ibdev.uverbs_cmd_mask =
OCRDMA_UVERBS(GET_CONTEXT) |
OCRDMA_UVERBS(QUERY_DEVICE) |
@@ -326,9 +328,14 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
dev->ibdev.req_notify_cq = ocrdma_arm_cq;
dev->ibdev.get_dma_mr = ocrdma_get_dma_mr;
+ dev->ibdev.reg_phys_mr = ocrdma_reg_kernel_mr;
dev->ibdev.dereg_mr = ocrdma_dereg_mr;
dev->ibdev.reg_user_mr = ocrdma_reg_user_mr;
+ dev->ibdev.alloc_fast_reg_mr = ocrdma_alloc_frmr;
+ dev->ibdev.alloc_fast_reg_page_list = ocrdma_alloc_frmr_page_list;
+ dev->ibdev.free_fast_reg_page_list = ocrdma_free_frmr_page_list;
+
/* mandatory to support user space verbs consumer. */
dev->ibdev.alloc_ucontext = ocrdma_alloc_ucontext;
dev->ibdev.dealloc_ucontext = ocrdma_dealloc_ucontext;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 36b062da2ae..9f9570ec3c2 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -70,6 +70,7 @@ enum {
#define OCRDMA_SUBSYS_COMMON 1
enum {
+ OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1 = 5,
OCRDMA_CMD_CREATE_CQ = 12,
OCRDMA_CMD_CREATE_EQ = 13,
OCRDMA_CMD_CREATE_MQ = 21,
@@ -91,15 +92,15 @@ enum {
#define OCRDMA_MAX_QP 2048
#define OCRDMA_MAX_CQ 2048
+#define OCRDMA_MAX_STAG 8192
enum {
OCRDMA_DB_RQ_OFFSET = 0xE0,
- OCRDMA_DB_GEN2_RQ1_OFFSET = 0x100,
- OCRDMA_DB_GEN2_RQ2_OFFSET = 0xC0,
+ OCRDMA_DB_GEN2_RQ_OFFSET = 0x100,
OCRDMA_DB_SQ_OFFSET = 0x60,
OCRDMA_DB_GEN2_SQ_OFFSET = 0x1C0,
OCRDMA_DB_SRQ_OFFSET = OCRDMA_DB_RQ_OFFSET,
- OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ1_OFFSET,
+ OCRDMA_DB_GEN2_SRQ_OFFSET = OCRDMA_DB_GEN2_RQ_OFFSET,
OCRDMA_DB_CQ_OFFSET = 0x120,
OCRDMA_DB_EQ_OFFSET = OCRDMA_DB_CQ_OFFSET,
OCRDMA_DB_MQ_OFFSET = 0x140
@@ -143,8 +144,11 @@ enum {
# 2: 16K Bytes
# 3: 32K Bytes
# 4: 64K Bytes
+# 5: 128K Bytes
+# 6: 256K Bytes
+# 7: 512K Bytes
*/
-#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (5)
+#define OCRDMA_MAX_Q_PAGE_SIZE_CNT (8)
#define OCRDMA_Q_PAGE_BASE_SIZE (OCRDMA_MIN_Q_PAGE_SIZE * OCRDMA_MAX_Q_PAGES)
#define MAX_OCRDMA_QP_PAGES (8)
@@ -177,7 +181,7 @@ struct ocrdma_mbx_hdr {
u32 timeout; /* in seconds */
u32 cmd_len;
u32 rsvd_version;
-} __packed;
+};
enum {
OCRDMA_MBX_RSP_OPCODE_SHIFT = 0,
@@ -197,7 +201,7 @@ struct ocrdma_mbx_rsp {
u32 status;
u32 rsp_len;
u32 add_rsp_len;
-} __packed;
+};
enum {
OCRDMA_MQE_EMBEDDED = 1,
@@ -208,7 +212,7 @@ struct ocrdma_mqe_sge {
u32 pa_lo;
u32 pa_hi;
u32 len;
-} __packed;
+};
enum {
OCRDMA_MQE_HDR_EMB_SHIFT = 0,
@@ -225,12 +229,12 @@ struct ocrdma_mqe_hdr {
u32 tag_lo;
u32 tag_hi;
u32 rsvd3;
-} __packed;
+};
struct ocrdma_mqe_emb_cmd {
struct ocrdma_mbx_hdr mch;
u8 pyld[220];
-} __packed;
+};
struct ocrdma_mqe {
struct ocrdma_mqe_hdr hdr;
@@ -242,7 +246,7 @@ struct ocrdma_mqe {
u8 cmd[236];
struct ocrdma_mbx_rsp rsp;
} u;
-} __packed;
+};
#define OCRDMA_EQ_LEN 4096
#define OCRDMA_MQ_CQ_LEN 256
@@ -259,12 +263,12 @@ struct ocrdma_mqe {
struct ocrdma_delete_q_req {
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_pa {
u32 lo;
u32 hi;
-} __packed;
+};
#define MAX_OCRDMA_EQ_PAGES (8)
struct ocrdma_create_eq_req {
@@ -275,7 +279,7 @@ struct ocrdma_create_eq_req {
u32 delay;
u32 rsvd;
struct ocrdma_pa pa[MAX_OCRDMA_EQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_EQ_VALID = Bit(29),
@@ -310,7 +314,7 @@ struct ocrdma_mcqe {
u32 tag_lo;
u32 tag_hi;
u32 valid_ae_cmpl_cons;
-} __packed;
+};
enum {
OCRDMA_AE_MCQE_QPVALID = Bit(31),
@@ -332,7 +336,21 @@ struct ocrdma_ae_mcqe {
u32 cqvalid_cqid;
u32 evt_tag;
u32 valid_ae_event;
-} __packed;
+};
+
+enum {
+ OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT = 0,
+ OCRDMA_AE_PVID_MCQE_ENABLED_MASK = 0xFF,
+ OCRDMA_AE_PVID_MCQE_TAG_SHIFT = 16,
+ OCRDMA_AE_PVID_MCQE_TAG_MASK = 0xFFFF << OCRDMA_AE_PVID_MCQE_TAG_SHIFT
+};
+
+struct ocrdma_ae_pvid_mcqe {
+ u32 tag_enabled;
+ u32 event_tag;
+ u32 rsvd1;
+ u32 rsvd2;
+};
enum {
OCRDMA_AE_MPA_MCQE_REQ_ID_SHIFT = 16,
@@ -356,7 +374,7 @@ struct ocrdma_ae_mpa_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
enum {
OCRDMA_AE_QP_MCQE_NEW_QP_STATE_SHIFT = 0,
@@ -382,9 +400,11 @@ struct ocrdma_ae_qp_mcqe {
u32 w1;
u32 w2;
u32 valid_ae_event;
-} __packed;
+};
-#define OCRDMA_ASYNC_EVE_CODE 0x14
+#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
+#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
+#define OCRDMA_ASYNC_EVENT_PVID_STATE 0x3
enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_CQ_ERROR = 0x00,
@@ -487,7 +507,8 @@ struct ocrdma_mbx_query_config {
u32 max_ird_ord_per_qp;
u32 max_shared_ird_ord;
u32 max_mr;
- u64 max_mr_size;
+ u32 max_mr_size_lo;
+ u32 max_mr_size_hi;
u32 max_num_mr_pbl;
u32 max_mw;
u32 max_fmr;
@@ -502,14 +523,14 @@ struct ocrdma_mbx_query_config {
u32 max_wqes_rqes_per_q;
u32 max_cq_cqes_per_cq;
u32 max_srq_rqe_sge;
-} __packed;
+};
struct ocrdma_fw_ver_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u8 running_ver[32];
-} __packed;
+};
struct ocrdma_fw_conf_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -535,14 +556,41 @@ struct ocrdma_fw_conf_rsp {
u32 base_eqid;
u32 max_eq;
-} __packed;
+};
enum {
OCRDMA_FN_MODE_RDMA = 0x4
};
+struct ocrdma_get_link_speed_rsp {
+ struct ocrdma_mqe_hdr hdr;
+ struct ocrdma_mbx_rsp rsp;
+
+ u8 pt_port_num;
+ u8 link_duplex;
+ u8 phys_port_speed;
+ u8 phys_port_fault;
+ u16 rsvd1;
+ u16 qos_lnk_speed;
+ u8 logical_lnk_status;
+ u8 rsvd2[3];
+};
+
+enum {
+ OCRDMA_PHYS_LINK_SPEED_ZERO = 0x0,
+ OCRDMA_PHYS_LINK_SPEED_10MBPS = 0x1,
+ OCRDMA_PHYS_LINK_SPEED_100MBPS = 0x2,
+ OCRDMA_PHYS_LINK_SPEED_1GBPS = 0x3,
+ OCRDMA_PHYS_LINK_SPEED_10GBPS = 0x4,
+ OCRDMA_PHYS_LINK_SPEED_20GBPS = 0x5,
+ OCRDMA_PHYS_LINK_SPEED_25GBPS = 0x6,
+ OCRDMA_PHYS_LINK_SPEED_40GBPS = 0x7,
+ OCRDMA_PHYS_LINK_SPEED_100GBPS = 0x8
+};
+
enum {
OCRDMA_CREATE_CQ_VER2 = 2,
+ OCRDMA_CREATE_CQ_VER3 = 3,
OCRDMA_CREATE_CQ_PAGE_CNT_MASK = 0xFFFF,
OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT = 16,
@@ -576,7 +624,8 @@ struct ocrdma_create_cq_cmd {
u32 pgsz_pgcnt;
u32 ev_cnt_flags;
u32 eqn;
- u32 cqe_count;
+ u16 cqe_count;
+ u16 pd_id;
u32 rsvd6;
struct ocrdma_pa pa[OCRDMA_CREATE_CQ_MAX_PAGES];
};
@@ -584,7 +633,7 @@ struct ocrdma_create_cq_cmd {
struct ocrdma_create_cq {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd cmd;
-} __packed;
+};
enum {
OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK = 0xFFFF
@@ -593,12 +642,12 @@ enum {
struct ocrdma_create_cq_cmd_rsp {
struct ocrdma_mbx_rsp rsp;
u32 cq_id;
-} __packed;
+};
struct ocrdma_create_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_create_cq_cmd_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_MQ_V0_CQ_ID_SHIFT = 22,
@@ -617,12 +666,12 @@ struct ocrdma_create_mq_req {
u32 async_cqid_valid;
u32 rsvd;
struct ocrdma_pa pa[8];
-} __packed;
+};
struct ocrdma_create_mq_rsp {
struct ocrdma_mbx_rsp rsp;
u32 id;
-} __packed;
+};
enum {
OCRDMA_DESTROY_CQ_QID_SHIFT = 0,
@@ -637,12 +686,12 @@ struct ocrdma_destroy_cq {
struct ocrdma_mbx_hdr req;
u32 bypass_flush_qid;
-} __packed;
+};
struct ocrdma_destroy_cq_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_QPT_GSI = 1,
@@ -766,7 +815,7 @@ struct ocrdma_create_qp_req {
u32 dpp_credits_cqid;
u32 rpir_lkey;
struct ocrdma_pa ird_addr[MAX_OCRDMA_IRD_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_QP_RSP_QP_ID_SHIFT = 0,
@@ -820,18 +869,18 @@ struct ocrdma_create_qp_rsp {
u32 max_ord_ird;
u32 sq_rq_id;
u32 dpp_response;
-} __packed;
+};
struct ocrdma_destroy_qp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 qp_id;
-} __packed;
+};
struct ocrdma_destroy_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_ID_SHIFT = 0,
@@ -975,7 +1024,7 @@ struct ocrdma_qp_params {
u32 dmac_b0_to_b3;
u32 vlan_dmac_b4_to_b5;
u32 qkey;
-} __packed;
+};
struct ocrdma_modify_qp {
@@ -986,7 +1035,7 @@ struct ocrdma_modify_qp {
u32 flags;
u32 rdma_flags;
u32 num_outstanding_atomic_rd;
-} __packed;
+};
enum {
OCRDMA_MODIFY_QP_RSP_MAX_RQE_SHIFT = 0,
@@ -1007,7 +1056,7 @@ struct ocrdma_modify_qp_rsp {
u32 max_wqe_rqe;
u32 max_ord_ird;
-} __packed;
+};
struct ocrdma_query_qp {
struct ocrdma_mqe_hdr hdr;
@@ -1016,13 +1065,13 @@ struct ocrdma_query_qp {
#define OCRDMA_QUERY_UP_QP_ID_SHIFT 0
#define OCRDMA_QUERY_UP_QP_ID_MASK 0xFFFFFF
u32 qp_id;
-} __packed;
+};
struct ocrdma_query_qp_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
struct ocrdma_qp_params params;
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_PD_ID_SHIFT = 0,
@@ -1051,7 +1100,7 @@ struct ocrdma_create_srq {
u32 max_sge_rqe;
u32 pages_rqe_sz;
struct ocrdma_pa rq_addr[MAX_OCRDMA_SRQ_PAGES];
-} __packed;
+};
enum {
OCRDMA_CREATE_SRQ_RSP_SRQ_ID_SHIFT = 0,
@@ -1070,7 +1119,7 @@ struct ocrdma_create_srq_rsp {
u32 id;
u32 max_sge_rqe_allocated;
-} __packed;
+};
enum {
OCRDMA_MODIFY_SRQ_ID_SHIFT = 0,
@@ -1089,7 +1138,7 @@ struct ocrdma_modify_srq {
u32 id;
u32 limit_max_rqe;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_ID_SHIFT = 0,
@@ -1101,7 +1150,7 @@ struct ocrdma_query_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_QUERY_SRQ_RSP_PD_ID_SHIFT = 0,
@@ -1123,7 +1172,7 @@ struct ocrdma_query_srq_rsp {
u32 max_rqe_pdid;
u32 srq_lmt_max_sge;
-} __packed;
+};
enum {
OCRDMA_DESTROY_SRQ_ID_SHIFT = 0,
@@ -1135,7 +1184,7 @@ struct ocrdma_destroy_srq {
struct ocrdma_mbx_rsp req;
u32 id;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_ENABLE_DPP = BIT(16),
@@ -1147,7 +1196,7 @@ struct ocrdma_alloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 enable_dpp_rsvd;
-} __packed;
+};
enum {
OCRDMA_ALLOC_PD_RSP_DPP = Bit(16),
@@ -1159,18 +1208,18 @@ struct ocrdma_alloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 dpp_page_pdid;
-} __packed;
+};
struct ocrdma_dealloc_pd {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 id;
-} __packed;
+};
struct ocrdma_dealloc_pd_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_ADDR_CHECK_ENABLE = 1,
@@ -1206,7 +1255,7 @@ struct ocrdma_alloc_lkey {
u32 pdid;
u32 pbl_sz_flags;
-} __packed;
+};
struct ocrdma_alloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
@@ -1214,7 +1263,7 @@ struct ocrdma_alloc_lkey_rsp {
u32 lrkey;
u32 num_pbl_rsvd;
-} __packed;
+};
struct ocrdma_dealloc_lkey {
struct ocrdma_mqe_hdr hdr;
@@ -1222,12 +1271,12 @@ struct ocrdma_dealloc_lkey {
u32 lkey;
u32 rsvd_frmr;
-} __packed;
+};
struct ocrdma_dealloc_lkey_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
#define MAX_OCRDMA_NSMR_PBL (u32)22
#define MAX_OCRDMA_PBL_SIZE 65536
@@ -1273,7 +1322,7 @@ struct ocrdma_reg_nsmr {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr cmd;
- u32 lrkey_key_index;
+ u32 fr_mr;
u32 num_pbl_pdid;
u32 flags_hpage_pbe_sz;
u32 totlen_low;
@@ -1283,7 +1332,7 @@ struct ocrdma_reg_nsmr {
u32 va_loaddr;
u32 va_hiaddr;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_PBL_SHIFT = 0,
@@ -1305,12 +1354,12 @@ struct ocrdma_reg_nsmr_cont {
u32 last;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
-} __packed;
+};
struct ocrdma_pbe {
u32 pa_hi;
u32 pa_lo;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_RSP_NUM_PBL_SHIFT = 16,
@@ -1322,7 +1371,7 @@ struct ocrdma_reg_nsmr_rsp {
u32 lrkey;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_REG_NSMR_CONT_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1342,7 +1391,7 @@ struct ocrdma_reg_nsmr_cont_rsp {
u32 lrkey_key_index;
u32 num_pbl;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_PD_ID_SHIFT = 0,
@@ -1354,7 +1403,7 @@ struct ocrdma_alloc_mw {
struct ocrdma_mbx_hdr req;
u32 pdid;
-} __packed;
+};
enum {
OCRDMA_ALLOC_MW_RSP_LRKEY_INDEX_SHIFT = 0,
@@ -1366,7 +1415,7 @@ struct ocrdma_alloc_mw_rsp {
struct ocrdma_mbx_rsp rsp;
u32 lrkey_index;
-} __packed;
+};
struct ocrdma_attach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1375,12 +1424,12 @@ struct ocrdma_attach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_attach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
struct ocrdma_detach_mcast {
struct ocrdma_mqe_hdr hdr;
@@ -1389,12 +1438,12 @@ struct ocrdma_detach_mcast {
u8 mgid[16];
u32 mac_b0_to_b3;
u32 vlan_mac_b4_to_b5;
-} __packed;
+};
struct ocrdma_detach_mcast_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_CREATE_AH_NUM_PAGES_SHIFT = 19,
@@ -1418,24 +1467,24 @@ struct ocrdma_create_ah_tbl {
u32 ah_conf;
struct ocrdma_pa tbl_addr[8];
-} __packed;
+};
struct ocrdma_create_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_hdr req;
u32 ahid;
-} __packed;
+};
struct ocrdma_delete_ah_tbl_rsp {
struct ocrdma_mqe_hdr hdr;
struct ocrdma_mbx_rsp rsp;
-} __packed;
+};
enum {
OCRDMA_EQE_VALID_SHIFT = 0,
@@ -1448,7 +1497,7 @@ enum {
struct ocrdma_eqe {
u32 id_valid;
-} __packed;
+};
enum OCRDMA_CQE_STATUS {
OCRDMA_CQE_SUCCESS = 0,
@@ -1532,14 +1581,14 @@ struct ocrdma_cqe {
} cmn;
};
u32 flags_status_srcqpn; /* w3 */
-} __packed;
+};
struct ocrdma_sge {
u32 addr_hi;
u32 addr_lo;
u32 lrkey;
u32 len;
-} __packed;
+};
enum {
OCRDMA_FLAG_SIG = 0x1,
@@ -1563,6 +1612,7 @@ enum OCRDMA_WQE_OPCODE {
OCRDMA_SEND = 0x00,
OCRDMA_CMP_SWP = 0x14,
OCRDMA_BIND_MW = 0x10,
+ OCRDMA_FR_MR = 0x11,
OCRDMA_RESV1 = 0x0A,
OCRDMA_LKEY_INV = 0x15,
OCRDMA_FETCH_ADD = 0x13,
@@ -1600,14 +1650,26 @@ struct ocrdma_hdr_wqe {
u32 lkey;
};
u32 total_len;
-} __packed;
+};
struct ocrdma_ewqe_ud_hdr {
u32 rsvd_dest_qpn;
u32 qkey;
u32 rsvd_ahid;
u32 rsvd;
-} __packed;
+};
+
+/* extended wqe followed by hdr_wqe for Fast Memory register */
+struct ocrdma_ewqe_fr {
+ u32 va_hi;
+ u32 va_lo;
+ u32 fbo_hi;
+ u32 fbo_lo;
+ u32 size_sge;
+ u32 num_sges;
+ u32 rsvd;
+ u32 rsvd2;
+};
struct ocrdma_eth_basic {
u8 dmac[6];
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index dcfbab177fa..6e982bb43c3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -75,14 +75,15 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->vendor_part_id = dev->nic_info.pdev->device;
attr->hw_ver = 0;
attr->max_qp = dev->attr.max_qp;
- attr->max_ah = dev->attr.max_qp;
+ attr->max_ah = OCRDMA_MAX_AH;
attr->max_qp_wr = dev->attr.max_wqe;
attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
IB_DEVICE_RC_RNR_NAK_GEN |
IB_DEVICE_SHUTDOWN_PORT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_LOCAL_DMA_LKEY;
+ IB_DEVICE_LOCAL_DMA_LKEY |
+ IB_DEVICE_MEM_MGT_EXTENSIONS;
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
attr->max_sge_rd = 0;
attr->max_cq = dev->attr.max_cq;
@@ -96,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
attr->max_qp_rd_atom =
min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
- attr->max_srq = (dev->attr.max_qp - 1);
+ attr->max_srq = dev->attr.max_srq;
attr->max_srq_sge = dev->attr.max_srq_sge;
attr->max_srq_wr = dev->attr.max_rqe;
attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
@@ -105,6 +106,45 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
return 0;
}
+static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
+ u8 *ib_speed, u8 *ib_width)
+{
+ int status;
+ u8 speed;
+
+ status = ocrdma_mbx_get_link_speed(dev, &speed);
+ if (status)
+ speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
+
+ switch (speed) {
+ case OCRDMA_PHYS_LINK_SPEED_1GBPS:
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_10GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_1X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_20GBPS:
+ *ib_speed = IB_SPEED_DDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ case OCRDMA_PHYS_LINK_SPEED_40GBPS:
+ *ib_speed = IB_SPEED_QDR;
+ *ib_width = IB_WIDTH_4X;
+ break;
+
+ default:
+ /* Unsupported */
+ *ib_speed = IB_SPEED_SDR;
+ *ib_width = IB_WIDTH_1X;
+ };
+}
+
+
int ocrdma_query_port(struct ib_device *ibdev,
u8 port, struct ib_port_attr *props)
{
@@ -141,8 +181,8 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
props->qkey_viol_cntr = 0;
- props->active_width = IB_WIDTH_1X;
- props->active_speed = 4;
+ get_link_speed_and_width(dev, &props->active_speed,
+ &props->active_width);
props->max_msg_sz = 0x80000000;
props->max_vl_num = 4;
return 0;
@@ -186,7 +226,7 @@ static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
list_del(&mm->entry);
@@ -204,7 +244,7 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
mutex_lock(&uctx->mm_list_lock);
list_for_each_entry(mm, &uctx->mm_head, entry) {
- if (len != mm->key.len || phy_addr != mm->key.phy_addr)
+ if (len != mm->key.len && phy_addr != mm->key.phy_addr)
continue;
found = true;
@@ -214,6 +254,108 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
return found;
}
+static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ struct ocrdma_pd *pd = NULL;
+ int status = 0;
+
+ pd = kzalloc(sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return ERR_PTR(-ENOMEM);
+
+ if (udata && uctx) {
+ pd->dpp_enabled =
+ dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY;
+ pd->num_dpp_qp =
+ pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ }
+
+retry:
+ status = ocrdma_mbx_alloc_pd(dev, pd);
+ if (status) {
+ if (pd->dpp_enabled) {
+ pd->dpp_enabled = false;
+ pd->num_dpp_qp = 0;
+ goto retry;
+ } else {
+ kfree(pd);
+ return ERR_PTR(status);
+ }
+ }
+
+ return pd;
+}
+
+static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
+ struct ocrdma_pd *pd)
+{
+ return (uctx->cntxt_pd == pd ? true : false);
+}
+
+static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
+ struct ocrdma_pd *pd)
+{
+ int status = 0;
+
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ return status;
+}
+
+static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
+ struct ocrdma_ucontext *uctx,
+ struct ib_udata *udata)
+{
+ int status = 0;
+
+ uctx->cntxt_pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(uctx->cntxt_pd)) {
+ status = PTR_ERR(uctx->cntxt_pd);
+ uctx->cntxt_pd = NULL;
+ goto err;
+ }
+
+ uctx->cntxt_pd->uctx = uctx;
+ uctx->cntxt_pd->ibpd.device = &dev->ibdev;
+err:
+ return status;
+}
+
+static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ int status = 0;
+ struct ocrdma_pd *pd = uctx->cntxt_pd;
+ struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
+
+ BUG_ON(uctx->pd_in_use);
+ uctx->cntxt_pd = NULL;
+ status = _ocrdma_dealloc_pd(dev, pd);
+ return status;
+}
+
+static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ struct ocrdma_pd *pd = NULL;
+
+ mutex_lock(&uctx->mm_list_lock);
+ if (!uctx->pd_in_use) {
+ uctx->pd_in_use = true;
+ pd = uctx->cntxt_pd;
+ }
+ mutex_unlock(&uctx->mm_list_lock);
+
+ return pd;
+}
+
+static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
+{
+ mutex_lock(&uctx->mm_list_lock);
+ uctx->pd_in_use = false;
+ mutex_unlock(&uctx->mm_list_lock);
+}
+
struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
@@ -229,7 +371,6 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return ERR_PTR(-ENOMEM);
- ctx->dev = dev;
INIT_LIST_HEAD(&ctx->mm_head);
mutex_init(&ctx->mm_list_lock);
@@ -242,18 +383,23 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
memset(ctx->ah_tbl.va, 0, map_len);
ctx->ah_tbl.len = map_len;
+ memset(&resp, 0, sizeof(resp));
resp.ah_tbl_len = ctx->ah_tbl.len;
resp.ah_tbl_page = ctx->ah_tbl.pa;
status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
if (status)
goto map_err;
+
+ status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
+ if (status)
+ goto pd_err;
+
resp.dev_id = dev->id;
resp.max_inline_data = dev->attr.max_inline_data;
resp.wqe_size = dev->attr.wqe_size;
resp.rqe_size = dev->attr.rqe_size;
resp.dpp_wqe_size = dev->attr.wqe_size;
- resp.rsvd = 0;
memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
status = ib_copy_to_udata(udata, &resp, sizeof(resp));
@@ -262,6 +408,7 @@ struct ib_ucontext *ocrdma_alloc_ucontext(struct ib_device *ibdev,
return &ctx->ibucontext;
cpy_err:
+pd_err:
ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
map_err:
dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
@@ -272,9 +419,13 @@ map_err:
int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
{
+ int status = 0;
struct ocrdma_mm *mm, *tmp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
- struct pci_dev *pdev = uctx->dev->nic_info.pdev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
+ struct pci_dev *pdev = dev->nic_info.pdev;
+
+ status = ocrdma_dealloc_ucontext_pd(uctx);
ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
@@ -285,13 +436,13 @@ int ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
kfree(mm);
}
kfree(uctx);
- return 0;
+ return status;
}
int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
{
struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
- struct ocrdma_dev *dev = ucontext->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
unsigned long len = (vma->vm_end - vma->vm_start);
@@ -307,7 +458,10 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
dev->nic_info.db_total_size)) &&
(len <= dev->nic_info.db_page_size)) {
- /* doorbell mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else if (dev->nic_info.dpp_unmapped_len &&
@@ -315,19 +469,20 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
(vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
dev->nic_info.dpp_unmapped_len)) &&
(len <= dev->nic_info.dpp_unmapped_len)) {
- /* dpp area mapping */
+ if (vma->vm_flags & VM_READ)
+ return -EPERM;
+
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
len, vma->vm_page_prot);
} else {
- /* queue memory mapping */
status = remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff, len, vma->vm_page_prot);
}
return status;
}
-static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
+static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
@@ -338,21 +493,21 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
struct ocrdma_alloc_pd_uresp rsp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
+ memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
rsp.dpp_enabled = pd->dpp_enabled;
- db_page_addr = pd->dev->nic_info.unmapped_db +
- (pd->id * pd->dev->nic_info.db_page_size);
- db_page_size = pd->dev->nic_info.db_page_size;
+ db_page_addr = ocrdma_get_db_addr(dev, pd->id);
+ db_page_size = dev->nic_info.db_page_size;
status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
if (status)
return status;
if (pd->dpp_enabled) {
- dpp_page_addr = pd->dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
+ (pd->id * PAGE_SIZE);
status = ocrdma_add_mmap(uctx, dpp_page_addr,
- OCRDMA_DPP_PAGE_SIZE);
+ PAGE_SIZE);
if (status)
goto dpp_map_err;
rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
@@ -368,7 +523,7 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
ucopy_err:
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_page_addr, OCRDMA_DPP_PAGE_SIZE);
+ ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
dpp_map_err:
ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
return status;
@@ -380,76 +535,75 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
{
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
struct ocrdma_pd *pd;
+ struct ocrdma_ucontext *uctx = NULL;
int status;
+ u8 is_uctx_pd = false;
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd)
- return ERR_PTR(-ENOMEM);
- pd->dev = dev;
if (udata && context) {
- pd->dpp_enabled = (dev->nic_info.dev_family ==
- OCRDMA_GEN2_FAMILY) ? true : false;
- pd->num_dpp_qp =
- pd->dpp_enabled ? OCRDMA_PD_MAX_DPP_ENABLED_QP : 0;
+ uctx = get_ocrdma_ucontext(context);
+ pd = ocrdma_get_ucontext_pd(uctx);
+ if (pd) {
+ is_uctx_pd = true;
+ goto pd_mapping;
+ }
}
- status = ocrdma_mbx_alloc_pd(dev, pd);
- if (status) {
- kfree(pd);
- return ERR_PTR(status);
+
+ pd = _ocrdma_alloc_pd(dev, uctx, udata);
+ if (IS_ERR(pd)) {
+ status = PTR_ERR(pd);
+ goto exit;
}
+pd_mapping:
if (udata && context) {
- status = ocrdma_copy_pd_uresp(pd, context, udata);
+ status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
if (status)
goto err;
}
return &pd->ibpd;
err:
- ocrdma_dealloc_pd(&pd->ibpd);
+ if (is_uctx_pd) {
+ ocrdma_release_ucontext_pd(uctx);
+ } else {
+ status = ocrdma_mbx_dealloc_pd(dev, pd);
+ kfree(pd);
+ }
+exit:
return ERR_PTR(status);
}
int ocrdma_dealloc_pd(struct ib_pd *ibpd)
{
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
- int status;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ struct ocrdma_ucontext *uctx = NULL;
+ int status = 0;
u64 usr_db;
- status = ocrdma_mbx_dealloc_pd(dev, pd);
- if (pd->uctx) {
+ uctx = pd->uctx;
+ if (uctx) {
u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
- (pd->id * OCRDMA_DPP_PAGE_SIZE);
+ (pd->id * PAGE_SIZE);
if (pd->dpp_enabled)
- ocrdma_del_mmap(pd->uctx, dpp_db, OCRDMA_DPP_PAGE_SIZE);
- usr_db = dev->nic_info.unmapped_db +
- (pd->id * dev->nic_info.db_page_size);
+ ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
+ usr_db = ocrdma_get_db_addr(dev, pd->id);
ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
+
+ if (is_ucontext_pd(uctx, pd)) {
+ ocrdma_release_ucontext_pd(uctx);
+ return status;
+ }
}
- kfree(pd);
+ status = _ocrdma_dealloc_pd(dev, pd);
return status;
}
-static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
- int acc, u32 num_pbls,
- u32 addr_check)
+static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 pdid, int acc, u32 num_pbls, u32 addr_check)
{
int status;
- struct ocrdma_mr *mr;
- struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
-
- if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
- pr_err("%s(%d) leaving err, invalid access rights\n",
- __func__, dev->id);
- return ERR_PTR(-EINVAL);
- }
- mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr)
- return ERR_PTR(-ENOMEM);
- mr->hwmr.dev = dev;
mr->hwmr.fr_mr = 0;
mr->hwmr.local_rd = 1;
mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
@@ -459,25 +613,38 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd,
mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
mr->hwmr.num_pbls = num_pbls;
- status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pd->id, addr_check);
- if (status) {
- kfree(mr);
- return ERR_PTR(-ENOMEM);
- }
- mr->pd = pd;
+ status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
+ if (status)
+ return status;
+
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
- return mr;
+ return 0;
}
struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
{
+ int status;
struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
- mr = ocrdma_alloc_lkey(ibpd, acc, 0, OCRDMA_ADDR_CHECK_DISABLE);
- if (IS_ERR(mr))
- return ERR_CAST(mr);
+ if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
+ pr_err("%s err, invalid access rights\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
+ OCRDMA_ADDR_CHECK_DISABLE);
+ if (status) {
+ kfree(mr);
+ return ERR_PTR(status);
+ }
return &mr->ibmr;
}
@@ -501,7 +668,8 @@ static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
}
}
-static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
+static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
+ u32 num_pbes)
{
u32 num_pbls = 0;
u32 idx = 0;
@@ -517,7 +685,7 @@ static int ocrdma_get_pbl_info(struct ocrdma_mr *mr, u32 num_pbes)
num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
num_pbls = num_pbls / (pbl_size / sizeof(u64));
idx++;
- } while (num_pbls >= mr->hwmr.dev->attr.max_num_mr_pbl);
+ } while (num_pbls >= dev->attr.max_num_mr_pbl);
mr->hwmr.num_pbes = num_pbes;
mr->hwmr.num_pbls = num_pbls;
@@ -612,13 +780,12 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
u64 usr_addr, int acc, struct ib_udata *udata)
{
int status = -ENOMEM;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_mr *mr;
struct ocrdma_pd *pd;
u32 num_pbes;
pd = get_ocrdma_pd(ibpd);
- dev = pd->dev;
if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
return ERR_PTR(-EINVAL);
@@ -626,14 +793,13 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr)
return ERR_PTR(status);
- mr->hwmr.dev = dev;
mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
if (IS_ERR(mr->umem)) {
status = -EFAULT;
goto umem_err;
}
num_pbes = ib_umem_page_count(mr->umem);
- status = ocrdma_get_pbl_info(mr, num_pbes);
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
if (status)
goto umem_err;
@@ -653,7 +819,6 @@ struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
if (status)
goto mbx_err;
- mr->pd = pd;
mr->ibmr.lkey = mr->hwmr.lkey;
if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
mr->ibmr.rkey = mr->hwmr.lkey;
@@ -670,7 +835,7 @@ umem_err:
int ocrdma_dereg_mr(struct ib_mr *ib_mr)
{
struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
- struct ocrdma_dev *dev = mr->hwmr.dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
int status;
status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
@@ -685,28 +850,29 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
return status;
}
-static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata,
+static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
+ struct ib_udata *udata,
struct ib_ucontext *ib_ctx)
{
int status;
- struct ocrdma_ucontext *uctx;
+ struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
struct ocrdma_create_cq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.cq_id = cq->id;
- uresp.page_size = cq->len;
+ uresp.page_size = PAGE_ALIGN(cq->len);
uresp.num_pages = 1;
uresp.max_hw_cqe = cq->max_hw_cqe;
uresp.page_addr[0] = cq->pa;
- uresp.db_page_addr = cq->dev->nic_info.unmapped_db;
- uresp.db_page_size = cq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.phase_change = cq->phase_change ? 1 : 0;
status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
if (status) {
pr_err("%s(%d) copy error cqid=0x%x.\n",
- __func__, cq->dev->id, cq->id);
+ __func__, dev->id, cq->id);
goto err;
}
- uctx = get_ocrdma_ucontext(ib_ctx);
status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
if (status)
goto err;
@@ -726,6 +892,8 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
{
struct ocrdma_cq *cq;
struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
+ struct ocrdma_ucontext *uctx = NULL;
+ u16 pd_id = 0;
int status;
struct ocrdma_create_cq_ureq ureq;
@@ -742,15 +910,19 @@ struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev, int entries, int vector,
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- cq->dev = dev;
- status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq);
+ if (ib_ctx) {
+ uctx = get_ocrdma_ucontext(ib_ctx);
+ pd_id = uctx->cntxt_pd->id;
+ }
+
+ status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
if (status) {
kfree(cq);
return ERR_PTR(status);
}
if (ib_ctx) {
- status = ocrdma_copy_cq_uresp(cq, udata, ib_ctx);
+ status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
if (status)
goto ctx_err;
}
@@ -784,13 +956,17 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
{
int status;
struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
+ int pdid = 0;
status = ocrdma_mbx_destroy_cq(dev, cq);
if (cq->ucontext) {
- ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, cq->len);
- ocrdma_del_mmap(cq->ucontext, dev->nic_info.unmapped_db,
+ pdid = cq->ucontext->cntxt_pd->id;
+ ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
+ PAGE_ALIGN(cq->len));
+ ocrdma_del_mmap(cq->ucontext,
+ ocrdma_get_db_addr(dev, pdid),
dev->nic_info.db_page_size);
}
dev->cq_tbl[cq->id] = NULL;
@@ -818,14 +994,17 @@ static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
struct ib_qp_init_attr *attrs)
{
- if (attrs->qp_type != IB_QPT_GSI &&
- attrs->qp_type != IB_QPT_RC &&
- attrs->qp_type != IB_QPT_UD) {
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->qp_type != IB_QPT_RC) &&
+ (attrs->qp_type != IB_QPT_UC) &&
+ (attrs->qp_type != IB_QPT_UD)) {
pr_err("%s(%d) unsupported qp type=0x%x requested\n",
__func__, dev->id, attrs->qp_type);
return -EINVAL;
}
- if (attrs->cap.max_send_wr > dev->attr.max_wqe) {
+ /* Skip the check for QP1 to support CM size of 128 */
+ if ((attrs->qp_type != IB_QPT_GSI) &&
+ (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
__func__, dev->id, attrs->cap.max_send_wr);
pr_err("%s(%d) supported send_wr=0x%x\n",
@@ -876,11 +1055,9 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
/* verify consumer QPs are not trying to use GSI QP's CQ */
if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) ||
- (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
+ (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
- __func__, dev->id);
+ __func__, dev->id);
return -EINVAL;
}
}
@@ -903,13 +1080,13 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.qp_id = qp->id;
uresp.sq_dbid = qp->sq.dbid;
uresp.num_sq_pages = 1;
- uresp.sq_page_size = qp->sq.len;
+ uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
uresp.sq_page_addr[0] = qp->sq.pa;
uresp.num_wqe_allocated = qp->sq.max_cnt;
if (!srq) {
uresp.rq_dbid = qp->rq.dbid;
uresp.num_rq_pages = 1;
- uresp.rq_page_size = qp->rq.len;
+ uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
uresp.rq_page_addr[0] = qp->rq.pa;
uresp.num_rqe_allocated = qp->rq.max_cnt;
}
@@ -917,9 +1094,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
uresp.db_page_size = dev->nic_info.db_page_size;
if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
- uresp.db_rq_offset = ((qp->id & 0xFFFF) < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET;
- uresp.db_shift = (qp->id < 128) ? 24 : 16;
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
+ uresp.db_shift = 24;
} else {
uresp.db_sq_offset = OCRDMA_DB_SQ_OFFSET;
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
@@ -962,8 +1138,7 @@ static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
OCRDMA_DB_GEN2_SQ_OFFSET;
qp->rq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
- ((qp->id < 128) ?
- OCRDMA_DB_GEN2_RQ1_OFFSET : OCRDMA_DB_GEN2_RQ2_OFFSET);
+ OCRDMA_DB_GEN2_RQ_OFFSET;
} else {
qp->sq_db = dev->nic_info.db +
(pd->id * dev->nic_info.db_page_size) +
@@ -1004,6 +1179,7 @@ static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
qp->sq.max_sges = attrs->cap.max_send_sge;
qp->rq.max_sges = attrs->cap.max_recv_sge;
qp->state = OCRDMA_QPS_RST;
+ qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
}
@@ -1024,7 +1200,7 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
int status;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
struct ocrdma_qp *qp;
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_create_qp_ureq ureq;
u16 dpp_credit_lmt, dpp_offset;
@@ -1044,6 +1220,9 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
}
qp->dev = dev;
ocrdma_set_qp_init_params(qp, pd, attrs);
+ if (udata == NULL)
+ qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
+ OCRDMA_QP_FAST_REG);
mutex_lock(&dev->dev_lock);
status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
@@ -1054,8 +1233,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
/* user space QP's wr_id table are managed in library */
if (udata == NULL) {
- qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
- OCRDMA_QP_FAST_REG);
status = ocrdma_alloc_wr_id_tbl(qp);
if (status)
goto map_err;
@@ -1091,6 +1268,17 @@ gen_err:
return ERR_PTR(status);
}
+
+static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
+{
+ if (qp->db_cache) {
+ u32 val = qp->rq.dbid | (qp->db_cache <<
+ ocrdma_get_num_posted_shift(qp));
+ iowrite32(val, qp->rq_db);
+ qp->db_cache = 0;
+ }
+}
+
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
@@ -1102,13 +1290,16 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp = get_ocrdma_qp(ibqp);
dev = qp->dev;
if (attr_mask & IB_QP_STATE)
- status = ocrdma_qp_state_machine(qp, attr->qp_state, &old_qps);
+ status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
/* if new and previous states are same hw doesn't need to
* know about it.
*/
if (status < 0)
return status;
status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
+ if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
+ ocrdma_flush_rq_db(qp);
+
return status;
}
@@ -1213,7 +1404,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = dev->attr.max_inline_data;
+ qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid, &params.dgid[0],
sizeof(params.dgid));
@@ -1276,23 +1467,17 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
{
- int free_cnt;
- if (q->head >= q->tail)
- free_cnt = (q->max_cnt - q->head) + q->tail;
- else
- free_cnt = q->tail - q->head;
- return free_cnt;
+ return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
}
static int is_hw_sq_empty(struct ocrdma_qp *qp)
{
- return (qp->sq.tail == qp->sq.head &&
- ocrdma_hwq_free_cnt(&qp->sq) ? 1 : 0);
+ return (qp->sq.tail == qp->sq.head);
}
static int is_hw_rq_empty(struct ocrdma_qp *qp)
{
- return (qp->rq.tail == qp->rq.head) ? 1 : 0;
+ return (qp->rq.tail == qp->rq.head);
}
static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
@@ -1358,17 +1543,18 @@ static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
*/
discard_cnt += 1;
cqe->cmn.qpn = 0;
- if (is_cqe_for_sq(cqe))
+ if (is_cqe_for_sq(cqe)) {
ocrdma_hwq_inc_tail(&qp->sq);
- else {
+ } else {
if (qp->srq) {
spin_lock_irqsave(&qp->srq->q_lock, flags);
ocrdma_hwq_inc_tail(&qp->srq->rq);
ocrdma_srq_toggle_bit(qp->srq, cur_getp);
spin_unlock_irqrestore(&qp->srq->q_lock, flags);
- } else
+ } else {
ocrdma_hwq_inc_tail(&qp->rq);
+ }
}
skip_cqe:
cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
@@ -1376,7 +1562,7 @@ skip_cqe:
spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
}
-static void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
{
int found = false;
unsigned long flags;
@@ -1442,9 +1628,11 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
mutex_unlock(&dev->dev_lock);
if (pd->uctx) {
- ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa, qp->sq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
+ PAGE_ALIGN(qp->sq.len));
if (!qp->srq)
- ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa, qp->rq.len);
+ ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
+ PAGE_ALIGN(qp->rq.len));
}
ocrdma_del_flush_qp(qp);
@@ -1455,21 +1643,23 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
return status;
}
-static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
+static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
+ struct ib_udata *udata)
{
int status;
struct ocrdma_create_srq_uresp uresp;
+ memset(&uresp, 0, sizeof(uresp));
uresp.rq_dbid = srq->rq.dbid;
uresp.num_rq_pages = 1;
uresp.rq_page_addr[0] = srq->rq.pa;
uresp.rq_page_size = srq->rq.len;
- uresp.db_page_addr = srq->dev->nic_info.unmapped_db +
- (srq->pd->id * srq->dev->nic_info.db_page_size);
- uresp.db_page_size = srq->dev->nic_info.db_page_size;
+ uresp.db_page_addr = dev->nic_info.unmapped_db +
+ (srq->pd->id * dev->nic_info.db_page_size);
+ uresp.db_page_size = dev->nic_info.db_page_size;
uresp.num_rqe_allocated = srq->rq.max_cnt;
- if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
- uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
+ if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
+ uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
uresp.db_shift = 24;
} else {
uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
@@ -1492,7 +1682,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
{
int status = -ENOMEM;
struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
- struct ocrdma_dev *dev = pd->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
struct ocrdma_srq *srq;
if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
@@ -1505,10 +1695,9 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
return ERR_PTR(status);
spin_lock_init(&srq->q_lock);
- srq->dev = dev;
srq->pd = pd;
srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
- status = ocrdma_mbx_create_srq(srq, init_attr, pd);
+ status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
if (status)
goto err;
@@ -1535,7 +1724,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
}
if (udata) {
- status = ocrdma_copy_srq_uresp(srq, udata);
+ status = ocrdma_copy_srq_uresp(dev, srq, udata);
if (status)
goto arm_err;
}
@@ -1581,15 +1770,15 @@ int ocrdma_destroy_srq(struct ib_srq *ibsrq)
{
int status;
struct ocrdma_srq *srq;
- struct ocrdma_dev *dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
srq = get_ocrdma_srq(ibsrq);
- dev = srq->dev;
status = ocrdma_mbx_destroy_srq(dev, srq);
if (srq->pd->uctx)
- ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa, srq->rq.len);
+ ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
+ PAGE_ALIGN(srq->rq.len));
kfree(srq->idx_bit_fields);
kfree(srq->rqe_wr_id_tbl);
@@ -1631,23 +1820,43 @@ static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
memset(sge, 0, sizeof(*sge));
}
+static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
+{
+ uint32_t total_len = 0, i;
+
+ for (i = 0; i < num_sge; i++)
+ total_len += sg_list[i].length;
+ return total_len;
+}
+
+
static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
struct ocrdma_hdr_wqe *hdr,
struct ocrdma_sge *sge,
struct ib_send_wr *wr, u32 wqe_size)
{
- if (wr->send_flags & IB_SEND_INLINE) {
- if (wr->sg_list[0].length > qp->max_inline_data) {
+ int i;
+ char *dpp_addr;
+
+ if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
+ hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
+ if (unlikely(hdr->total_len > qp->max_inline_data)) {
pr_err("%s() supported_len=0x%x,\n"
" unspported len req=0x%x\n", __func__,
- qp->max_inline_data, wr->sg_list[0].length);
+ qp->max_inline_data, hdr->total_len);
return -EINVAL;
}
- memcpy(sge,
- (void *)(unsigned long)wr->sg_list[0].addr,
- wr->sg_list[0].length);
- hdr->total_len = wr->sg_list[0].length;
+ dpp_addr = (char *)sge;
+ for (i = 0; i < wr->num_sge; i++) {
+ memcpy(dpp_addr,
+ (void *)(unsigned long)wr->sg_list[i].addr,
+ wr->sg_list[i].length);
+ dpp_addr += wr->sg_list[i].length;
+ }
+
wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
+ if (0 == hdr->total_len)
+ wqe_size += sizeof(struct ocrdma_sge);
hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
} else {
ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
@@ -1672,8 +1881,9 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ocrdma_build_ud_hdr(qp, hdr, wr);
sge = (struct ocrdma_sge *)(hdr + 2);
wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
- } else
+ } else {
sge = (struct ocrdma_sge *)(hdr + 1);
+ }
status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
return status;
@@ -1716,6 +1926,96 @@ static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
ext_rw->len = hdr->total_len;
}
+static void build_frmr_pbes(struct ib_send_wr *wr, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) {
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ buf_addr = wr->wr.fast_reg.page_list->page_list[i];
+ pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
+ num_pbes += 1;
+ pbe++;
+
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ }
+ }
+ return;
+}
+
+static int get_encoded_page_size(int pg_sz)
+{
+ /* Max size is 256M 4096 << 16 */
+ int i = 0;
+ for (; i < 17; i++)
+ if (pg_sz == (4096 << i))
+ break;
+ return i;
+}
+
+
+static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
+ struct ib_send_wr *wr)
+{
+ u64 fbo;
+ struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
+ struct ocrdma_mr *mr;
+ u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
+
+ wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
+
+ if ((wr->wr.fast_reg.page_list_len >
+ qp->dev->attr.max_pages_per_frmr) ||
+ (wr->wr.fast_reg.length > 0xffffffffULL))
+ return -EINVAL;
+
+ hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
+ hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
+
+ if (wr->wr.fast_reg.page_list_len == 0)
+ BUG();
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_LOCAL_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_WRITE)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
+ if (wr->wr.fast_reg.access_flags & IB_ACCESS_REMOTE_READ)
+ hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
+ hdr->lkey = wr->wr.fast_reg.rkey;
+ hdr->total_len = wr->wr.fast_reg.length;
+
+ fbo = wr->wr.fast_reg.iova_start -
+ (wr->wr.fast_reg.page_list->page_list[0] & PAGE_MASK);
+
+ fast_reg->va_hi = upper_32_bits(wr->wr.fast_reg.iova_start);
+ fast_reg->va_lo = (u32) (wr->wr.fast_reg.iova_start & 0xffffffff);
+ fast_reg->fbo_hi = upper_32_bits(fbo);
+ fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
+ fast_reg->num_sges = wr->wr.fast_reg.page_list_len;
+ fast_reg->size_sge =
+ get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
+ mr = (struct ocrdma_mr *) (unsigned long) qp->dev->stag_arr[(hdr->lkey >> 8) &
+ (OCRDMA_MAX_STAG - 1)];
+ build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
+ return 0;
+}
+
static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
{
u32 val = qp->sq.dbid | (1 << 16);
@@ -1747,7 +2047,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
hdr = ocrdma_hwq_head(&qp->sq);
hdr->cw = 0;
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
if (wr->send_flags & IB_SEND_FENCE)
hdr->cw |=
@@ -1785,10 +2085,14 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
case IB_WR_LOCAL_INV:
hdr->cw |=
(OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
- hdr->cw |= (sizeof(struct ocrdma_hdr_wqe) /
+ hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
+ sizeof(struct ocrdma_sge)) /
OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
hdr->lkey = wr->ex.invalidate_rkey;
break;
+ case IB_WR_FAST_REG_MR:
+ status = ocrdma_build_fr(qp, hdr, wr);
+ break;
default:
status = -EINVAL;
break;
@@ -1797,7 +2101,7 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*bad_wr = wr;
break;
}
- if (wr->send_flags & IB_SEND_SIGNALED)
+ if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
else
qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
@@ -1821,7 +2125,10 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
- iowrite32(val, qp->rq_db);
+ if (qp->state != OCRDMA_QPS_INIT)
+ iowrite32(val, qp->rq_db);
+ else
+ qp->db_cache++;
}
static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
@@ -1955,7 +2262,7 @@ int ocrdma_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
{
- enum ib_wc_status ibwc_status = IB_WC_GENERAL_ERR;
+ enum ib_wc_status ibwc_status;
switch (status) {
case OCRDMA_CQE_GENERAL_ERR:
@@ -2052,6 +2359,9 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
case OCRDMA_SEND:
ibwc->opcode = IB_WC_SEND;
break;
+ case OCRDMA_FR_MR:
+ ibwc->opcode = IB_WC_FAST_REG_MR;
+ break;
case OCRDMA_LKEY_INV:
ibwc->opcode = IB_WC_LOCAL_INV;
break;
@@ -2105,7 +2415,7 @@ static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
ibwc->status = ocrdma_to_ibwc_err(status);
ocrdma_flush_qp(qp);
- ocrdma_qp_state_machine(qp, IB_QPS_ERR, NULL);
+ ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
/* if wqe/rqe pending for which cqe needs to be returned,
* trigger inflating it.
@@ -2190,7 +2500,8 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
ocrdma_update_wc(qp, ibwc, tail);
*polled = true;
}
- wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK;
+ wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
+ OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
if (tail != wqe_idx)
expand = true; /* Coalesced CQE can't be consumed yet */
@@ -2239,7 +2550,8 @@ static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
u32 wqe_idx;
srq = get_ocrdma_srq(qp->ibqp.srq);
- wqe_idx = le32_to_cpu(cqe->rq.buftag_qpn) >> OCRDMA_CQE_BUFTAG_SHIFT;
+ wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
+ OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
spin_lock_irqsave(&srq->q_lock, flags);
ocrdma_srq_toggle_bit(srq, wqe_idx);
@@ -2296,9 +2608,9 @@ static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
}
- if (qp->ibqp.srq)
+ if (qp->ibqp.srq) {
ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
- else {
+ } else {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
}
@@ -2311,13 +2623,14 @@ static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
bool expand = false;
ibwc->wc_flags = 0;
- if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
+ if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_UD_STATUS_MASK) >>
OCRDMA_CQE_UD_STATUS_SHIFT;
- else
+ } else {
status = (le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+ }
if (status == OCRDMA_CQE_SUCCESS) {
*polled = true;
@@ -2335,9 +2648,10 @@ static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
if (cq->phase_change) {
if (cur_getp == 0)
cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
- } else
+ } else {
/* clear valid bit */
cqe->flags_status_srcqpn = 0;
+ }
}
static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
@@ -2348,7 +2662,7 @@ static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
bool expand = false;
int polled_hw_cqes = 0;
struct ocrdma_qp *qp = NULL;
- struct ocrdma_dev *dev = cq->dev;
+ struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
struct ocrdma_cqe *cqe;
u16 cur_getp; bool polled = false; bool stop = false;
@@ -2414,8 +2728,9 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
} else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
ocrdma_hwq_inc_tail(&qp->rq);
- } else
+ } else {
return err_cqes;
+ }
ibwc->byte_len = 0;
ibwc->status = IB_WC_WR_FLUSH_ERR;
ibwc = ibwc + 1;
@@ -2428,14 +2743,11 @@ static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
{
int cqes_to_poll = num_entries;
- struct ocrdma_cq *cq = NULL;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
int num_os_cqe = 0, err_cqes = 0;
struct ocrdma_qp *qp;
-
- cq = get_ocrdma_cq(ibcq);
- dev = cq->dev;
+ unsigned long flags;
/* poll cqes from adapter CQ */
spin_lock_irqsave(&cq->cq_lock, flags);
@@ -2466,16 +2778,14 @@ int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
{
- struct ocrdma_cq *cq;
- unsigned long flags;
- struct ocrdma_dev *dev;
+ struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
u16 cq_id;
u16 cur_getp;
struct ocrdma_cqe *cqe;
+ unsigned long flags;
- cq = get_ocrdma_cq(ibcq);
cq_id = cq->id;
- dev = cq->dev;
spin_lock_irqsave(&cq->cq_lock, flags);
if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
@@ -2497,3 +2807,226 @@ int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0;
}
+
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *ibpd, int max_page_list_len)
+{
+ int status;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+
+ if (max_page_list_len > dev->attr.max_pages_per_frmr)
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
+
+ status = ocrdma_get_pbl_info(dev, mr, max_page_list_len);
+ if (status)
+ goto pbl_err;
+ mr->hwmr.fr_mr = 1;
+ mr->hwmr.remote_rd = 0;
+ mr->hwmr.remote_wr = 0;
+ mr->hwmr.local_rd = 0;
+ mr->hwmr.local_wr = 0;
+ mr->hwmr.mw_bind = 0;
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
+ if (status)
+ goto mbx_err;
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] = (unsigned long) mr;
+ return &mr->ibmr;
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(-ENOMEM);
+}
+
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len)
+{
+ struct ib_fast_reg_page_list *frmr_list;
+ int size;
+
+ size = sizeof(*frmr_list) + (page_list_len * sizeof(u64));
+ frmr_list = kzalloc(size, GFP_KERNEL);
+ if (!frmr_list)
+ return ERR_PTR(-ENOMEM);
+ frmr_list->page_list = (u64 *)(frmr_list + 1);
+ return frmr_list;
+}
+
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list)
+{
+ kfree(page_list);
+}
+
+#define MAX_KERNEL_PBE_SIZE 65536
+static inline int count_kernel_pbes(struct ib_phys_buf *buf_list,
+ int buf_cnt, u32 *pbe_size)
+{
+ u64 total_size = 0;
+ u64 buf_size = 0;
+ int i;
+ *pbe_size = roundup(buf_list[0].size, PAGE_SIZE);
+ *pbe_size = roundup_pow_of_two(*pbe_size);
+
+ /* find the smallest PBE size that we can have */
+ for (i = 0; i < buf_cnt; i++) {
+ /* first addr may not be page aligned, so ignore checking */
+ if ((i != 0) && ((buf_list[i].addr & ~PAGE_MASK) ||
+ (buf_list[i].size & ~PAGE_MASK))) {
+ return 0;
+ }
+
+ /* if configured PBE size is greater then the chosen one,
+ * reduce the PBE size.
+ */
+ buf_size = roundup(buf_list[i].size, PAGE_SIZE);
+ /* pbe_size has to be even multiple of 4K 1,2,4,8...*/
+ buf_size = roundup_pow_of_two(buf_size);
+ if (*pbe_size > buf_size)
+ *pbe_size = buf_size;
+
+ total_size += buf_size;
+ }
+ *pbe_size = *pbe_size > MAX_KERNEL_PBE_SIZE ?
+ (MAX_KERNEL_PBE_SIZE) : (*pbe_size);
+
+ /* num_pbes = total_size / (*pbe_size); this is implemented below. */
+
+ return total_size >> ilog2(*pbe_size);
+}
+
+static void build_kernel_pbes(struct ib_phys_buf *buf_list, int ib_buf_cnt,
+ u32 pbe_size, struct ocrdma_pbl *pbl_tbl,
+ struct ocrdma_hw_mr *hwmr)
+{
+ int i;
+ int idx;
+ int pbes_per_buf = 0;
+ u64 buf_addr = 0;
+ int num_pbes;
+ struct ocrdma_pbe *pbe;
+ int total_num_pbes = 0;
+
+ if (!hwmr->num_pbes)
+ return;
+
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+
+ /* go through the OS phy regions & fill hw pbe entries into pbls. */
+ for (i = 0; i < ib_buf_cnt; i++) {
+ buf_addr = buf_list[i].addr;
+ pbes_per_buf =
+ roundup_pow_of_two(roundup(buf_list[i].size, PAGE_SIZE)) /
+ pbe_size;
+ hwmr->len += buf_list[i].size;
+ /* number of pbes can be more for one OS buf, when
+ * buffers are of different sizes.
+ * split the ib_buf to one or more pbes.
+ */
+ for (idx = 0; idx < pbes_per_buf; idx++) {
+ /* we program always page aligned addresses,
+ * first unaligned address is taken care by fbo.
+ */
+ if (i == 0) {
+ /* for non zero fbo, assign the
+ * start of the page.
+ */
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & PAGE_MASK));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ } else {
+ pbe->pa_lo =
+ cpu_to_le32((u32) (buf_addr & 0xffffffff));
+ pbe->pa_hi =
+ cpu_to_le32((u32) upper_32_bits(buf_addr));
+ }
+ buf_addr += pbe_size;
+ num_pbes += 1;
+ total_num_pbes += 1;
+ pbe++;
+
+ if (total_num_pbes == hwmr->num_pbes)
+ goto mr_tbl_done;
+ /* if the pbl is full storing the pbes,
+ * move to next pbl.
+ */
+ if (num_pbes == (hwmr->pbl_size/sizeof(u64))) {
+ pbl_tbl++;
+ pbe = (struct ocrdma_pbe *)pbl_tbl->va;
+ num_pbes = 0;
+ }
+ }
+ }
+mr_tbl_done:
+ return;
+}
+
+struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *ibpd,
+ struct ib_phys_buf *buf_list,
+ int buf_cnt, int acc, u64 *iova_start)
+{
+ int status = -ENOMEM;
+ struct ocrdma_mr *mr;
+ struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
+ struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
+ u32 num_pbes;
+ u32 pbe_size = 0;
+
+ if ((acc & IB_ACCESS_REMOTE_WRITE) && !(acc & IB_ACCESS_LOCAL_WRITE))
+ return ERR_PTR(-EINVAL);
+
+ mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+ if (!mr)
+ return ERR_PTR(status);
+
+ num_pbes = count_kernel_pbes(buf_list, buf_cnt, &pbe_size);
+ if (num_pbes == 0) {
+ status = -EINVAL;
+ goto pbl_err;
+ }
+ status = ocrdma_get_pbl_info(dev, mr, num_pbes);
+ if (status)
+ goto pbl_err;
+
+ mr->hwmr.pbe_size = pbe_size;
+ mr->hwmr.fbo = *iova_start - (buf_list[0].addr & PAGE_MASK);
+ mr->hwmr.va = *iova_start;
+ mr->hwmr.local_rd = 1;
+ mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
+ mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
+ mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
+ mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
+ mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
+
+ status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
+ if (status)
+ goto pbl_err;
+ build_kernel_pbes(buf_list, buf_cnt, pbe_size, mr->hwmr.pbl_table,
+ &mr->hwmr);
+ status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
+ if (status)
+ goto mbx_err;
+
+ mr->ibmr.lkey = mr->hwmr.lkey;
+ if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
+ mr->ibmr.rkey = mr->hwmr.lkey;
+ return &mr->ibmr;
+
+mbx_err:
+ ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
+pbl_err:
+ kfree(mr);
+ return ERR_PTR(status);
+}
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index 633f03d8027..b8f7853fd36 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -72,6 +72,7 @@ int ocrdma_query_qp(struct ib_qp *,
struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *);
int ocrdma_destroy_qp(struct ib_qp *);
+void ocrdma_del_flush_qp(struct ocrdma_qp *qp);
struct ib_srq *ocrdma_create_srq(struct ib_pd *, struct ib_srq_init_attr *,
struct ib_udata *);
@@ -89,5 +90,10 @@ struct ib_mr *ocrdma_reg_kernel_mr(struct ib_pd *,
int num_phys_buf, int acc, u64 *iova_start);
struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *);
+struct ib_mr *ocrdma_alloc_frmr(struct ib_pd *pd, int max_page_list_len);
+struct ib_fast_reg_page_list *ocrdma_alloc_frmr_page_list(struct ib_device
+ *ibdev,
+ int page_list_len);
+void ocrdma_free_frmr_page_list(struct ib_fast_reg_page_list *page_list);
#endif /* __OCRDMA_VERBS_H__ */
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 4a9af795b88..1946101419a 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -89,7 +89,6 @@ struct qlogic_ib_stats {
extern struct qlogic_ib_stats qib_stats;
extern const struct pci_error_handlers qib_pci_err_handler;
-extern struct pci_driver qib_driver;
#define QIB_CHIP_SWVERSION QIB_CHIP_VERS_MAJ
/*
@@ -576,11 +575,13 @@ struct qib_pportdata {
/* read/write using lock */
spinlock_t sdma_lock ____cacheline_aligned_in_smp;
struct list_head sdma_activelist;
+ struct list_head sdma_userpending;
u64 sdma_descq_added;
u64 sdma_descq_removed;
u16 sdma_descq_tail;
u16 sdma_descq_head;
u8 sdma_generation;
+ u8 sdma_intrequest;
struct tasklet_struct sdma_sw_clean_up_task
____cacheline_aligned_in_smp;
@@ -1326,6 +1327,8 @@ int qib_setup_sdma(struct qib_pportdata *);
void qib_teardown_sdma(struct qib_pportdata *);
void __qib_sdma_intr(struct qib_pportdata *);
void qib_sdma_intr(struct qib_pportdata *);
+void qib_user_sdma_send_desc(struct qib_pportdata *dd,
+ struct list_head *pktlist);
int qib_sdma_verbs_send(struct qib_pportdata *, struct qib_sge_state *,
u32, struct qib_verbs_txreq *);
/* ppd->sdma_lock should be locked before calling this. */
diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h
index 4f255b723ff..5670ace27c6 100644
--- a/drivers/infiniband/hw/qib/qib_common.h
+++ b/drivers/infiniband/hw/qib/qib_common.h
@@ -279,7 +279,7 @@ struct qib_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
-#define QIB_USER_SWMINOR 12
+#define QIB_USER_SWMINOR 13
#define QIB_USER_SWVERSION ((QIB_USER_SWMAJOR << 16) | QIB_USER_SWMINOR)
@@ -701,7 +701,37 @@ struct qib_message_header {
__be32 bth[3];
/* fields below this point are in host byte order */
struct qib_header iph;
+ /* fields below are simplified, but should match PSM */
+ /* some are accessed by driver when packet spliting is needed */
__u8 sub_opcode;
+ __u8 flags;
+ __u16 commidx;
+ __u32 ack_seq_num;
+ __u8 flowid;
+ __u8 hdr_dlen;
+ __u16 mqhdr;
+ __u32 uwords[4];
+};
+
+/* sequence number bits for message */
+union qib_seqnum {
+ struct {
+ __u32 seq:11;
+ __u32 gen:8;
+ __u32 flow:5;
+ };
+ struct {
+ __u32 pkt:16;
+ __u32 msg:8;
+ };
+ __u32 val;
+};
+
+/* qib receiving-dma tid-session-member */
+struct qib_tid_session_member {
+ __u16 tid;
+ __u16 offset;
+ __u16 length;
};
/* IB - LRH header consts */
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index b51a51486cb..275f247f9fc 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1220,7 +1220,7 @@ static int qib_compatible_subctxts(int user_swmajor, int user_swminor)
return user_swminor == 3;
default:
/* >= 4 are compatible (or are expected to be) */
- return user_swminor >= 4;
+ return user_swminor <= QIB_USER_SWMINOR;
}
}
/* make no promises yet for future major versions */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 21e8b09d4bf..016e7429adf 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -1596,6 +1596,8 @@ static void sdma_7322_p_errors(struct qib_pportdata *ppd, u64 errs)
struct qib_devdata *dd = ppd->dd;
errs &= QIB_E_P_SDMAERRS;
+ err_decode(ppd->cpspec->sdmamsgbuf, sizeof(ppd->cpspec->sdmamsgbuf),
+ errs, qib_7322p_error_msgs);
if (errs & QIB_E_P_SDMAUNEXPDATA)
qib_dev_err(dd, "IB%u:%u SDmaUnexpData\n", dd->unit,
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 36e048e0e1d..24e802f4ea2 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1193,7 +1193,7 @@ static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, qib_pci_tbl);
-struct pci_driver qib_driver = {
+static struct pci_driver qib_driver = {
.name = QIB_DRV_NAME,
.probe = qib_init_one,
.remove = qib_remove_one,
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 57bd3fa016b..28874f8606f 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -415,7 +415,6 @@ struct cc_table_shadow {
struct ib_cc_table_entry_shadow entries[CC_TABLE_SHADOW_MAX];
} __packed;
-#endif /* _QIB_MAD_H */
/*
* The PortSamplesControl.CounterMasks field is an array of 3 bit fields
* which specify the N'th counter's capabilities. See ch. 16.1.3.2.
@@ -428,3 +427,5 @@ struct cc_table_shadow {
COUNTER_MASK(1, 2) | \
COUNTER_MASK(1, 3) | \
COUNTER_MASK(1, 4))
+
+#endif /* _QIB_MAD_H */
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index c574ec7c85e..3f14009fb66 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -283,12 +283,12 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
goto bail;
}
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSIX);
+ pos = dd->pcidev->msix_cap;
if (nent && *nent && pos) {
qib_msix_setup(dd, pos, nent, entry);
ret = 0; /* did it, either MSIx or INTx */
} else {
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ pos = dd->pcidev->msi_cap;
if (pos)
ret = qib_msi_setup(dd, pos);
else
@@ -357,7 +357,7 @@ int qib_reinit_intr(struct qib_devdata *dd)
if (!dd->msi_lo)
goto bail;
- pos = pci_find_capability(dd->pcidev, PCI_CAP_ID_MSI);
+ pos = dd->pcidev->msi_cap;
if (!pos) {
qib_dev_err(dd,
"Can't find MSI capability, can't restore MSI settings\n");
@@ -426,7 +426,7 @@ void qib_enable_intx(struct pci_dev *pdev)
if (new != cw)
pci_write_config_word(pdev, PCI_COMMAND, new);
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+ pos = pdev->msi_cap;
if (pos) {
/* then turn off MSI */
pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &cw);
@@ -434,7 +434,7 @@ void qib_enable_intx(struct pci_dev *pdev)
if (new != cw)
pci_write_config_word(pdev, pos + PCI_MSI_FLAGS, new);
}
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ pos = pdev->msix_cap;
if (pos) {
/* then turn off MSIx */
pci_read_config_word(pdev, pos + PCI_MSIX_FLAGS, &cw);
diff --git a/drivers/infiniband/hw/qib/qib_sdma.c b/drivers/infiniband/hw/qib/qib_sdma.c
index 32162d35537..c6d6a54d2e1 100644
--- a/drivers/infiniband/hw/qib/qib_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_sdma.c
@@ -423,8 +423,11 @@ void qib_sdma_intr(struct qib_pportdata *ppd)
void __qib_sdma_intr(struct qib_pportdata *ppd)
{
- if (__qib_sdma_running(ppd))
+ if (__qib_sdma_running(ppd)) {
qib_sdma_make_progress(ppd);
+ if (!list_empty(&ppd->sdma_userpending))
+ qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+ }
}
int qib_setup_sdma(struct qib_pportdata *ppd)
@@ -452,6 +455,9 @@ int qib_setup_sdma(struct qib_pportdata *ppd)
ppd->sdma_descq_removed = 0;
ppd->sdma_descq_added = 0;
+ ppd->sdma_intrequest = 0;
+ INIT_LIST_HEAD(&ppd->sdma_userpending);
+
INIT_LIST_HEAD(&ppd->sdma_activelist);
tasklet_init(&ppd->sdma_sw_clean_up_task, sdma_sw_clean_up_task,
@@ -717,7 +723,7 @@ void dump_sdma_state(struct qib_pportdata *ppd)
struct qib_sdma_txreq *txp, *txpnext;
__le64 *descqp;
u64 desc[2];
- dma_addr_t addr;
+ u64 addr;
u16 gen, dwlen, dwoffset;
u16 head, tail, cnt;
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index 82442085cbe..d0a0ea0c14d 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -53,20 +53,36 @@
#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
struct qib_user_sdma_pkt {
- u8 naddr; /* dimension of addr (1..3) ... */
+ struct list_head list; /* list element */
+
+ u8 tiddma; /* if this is NEW tid-sdma */
+ u8 largepkt; /* this is large pkt from kmalloc */
+ u16 frag_size; /* frag size used by PSM */
+ u16 index; /* last header index or push index */
+ u16 naddr; /* dimension of addr (1..3) ... */
+ u16 addrlimit; /* addr array size */
+ u16 tidsmidx; /* current tidsm index */
+ u16 tidsmcount; /* tidsm array item count */
+ u16 payload_size; /* payload size so far for header */
+ u32 bytes_togo; /* bytes for processing */
u32 counter; /* sdma pkts queued counter for this entry */
+ struct qib_tid_session_member *tidsm; /* tid session member array */
+ struct qib_user_sdma_queue *pq; /* which pq this pkt belongs to */
u64 added; /* global descq number of entries */
struct {
- u32 offset; /* offset for kvaddr, addr */
- u32 length; /* length in page */
- u8 put_page; /* should we put_page? */
- u8 dma_mapped; /* is page dma_mapped? */
+ u16 offset; /* offset for kvaddr, addr */
+ u16 length; /* length in page */
+ u16 first_desc; /* first desc */
+ u16 last_desc; /* last desc */
+ u16 put_page; /* should we put_page? */
+ u16 dma_mapped; /* is page dma_mapped? */
+ u16 dma_length; /* for dma_unmap_page() */
+ u16 padding;
struct page *page; /* may be NULL (coherent mem) */
void *kvaddr; /* FIXME: only for pio hack */
dma_addr_t addr;
} addr[4]; /* max pages, any more and we coalesce */
- struct list_head list; /* list element */
};
struct qib_user_sdma_queue {
@@ -77,6 +93,12 @@ struct qib_user_sdma_queue {
*/
struct list_head sent;
+ /*
+ * Because above list will be accessed by both process and
+ * signal handler, we need a spinlock for it.
+ */
+ spinlock_t sent_lock ____cacheline_aligned_in_smp;
+
/* headers with expected length are allocated from here... */
char header_cache_name[64];
struct dma_pool *header_cache;
@@ -88,6 +110,12 @@ struct qib_user_sdma_queue {
/* as packets go on the queued queue, they are counted... */
u32 counter;
u32 sent_counter;
+ /* pending packets, not sending yet */
+ u32 num_pending;
+ /* sending packets, not complete yet */
+ u32 num_sending;
+ /* global descq number of entry of last sending packet */
+ u64 added;
/* dma page table */
struct rb_root dma_pages_root;
@@ -107,8 +135,12 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
pq->counter = 0;
pq->sent_counter = 0;
- INIT_LIST_HEAD(&pq->sent);
+ pq->num_pending = 0;
+ pq->num_sending = 0;
+ pq->added = 0;
+ INIT_LIST_HEAD(&pq->sent);
+ spin_lock_init(&pq->sent_lock);
mutex_init(&pq->lock);
snprintf(pq->pkt_slab_name, sizeof(pq->pkt_slab_name),
@@ -144,34 +176,310 @@ done:
}
static void qib_user_sdma_init_frag(struct qib_user_sdma_pkt *pkt,
- int i, size_t offset, size_t len,
- int put_page, int dma_mapped,
- struct page *page,
- void *kvaddr, dma_addr_t dma_addr)
+ int i, u16 offset, u16 len,
+ u16 first_desc, u16 last_desc,
+ u16 put_page, u16 dma_mapped,
+ struct page *page, void *kvaddr,
+ dma_addr_t dma_addr, u16 dma_length)
{
pkt->addr[i].offset = offset;
pkt->addr[i].length = len;
+ pkt->addr[i].first_desc = first_desc;
+ pkt->addr[i].last_desc = last_desc;
pkt->addr[i].put_page = put_page;
pkt->addr[i].dma_mapped = dma_mapped;
pkt->addr[i].page = page;
pkt->addr[i].kvaddr = kvaddr;
pkt->addr[i].addr = dma_addr;
+ pkt->addr[i].dma_length = dma_length;
}
-static void qib_user_sdma_init_header(struct qib_user_sdma_pkt *pkt,
- u32 counter, size_t offset,
- size_t len, int dma_mapped,
- struct page *page,
- void *kvaddr, dma_addr_t dma_addr)
+static void *qib_user_sdma_alloc_header(struct qib_user_sdma_queue *pq,
+ size_t len, dma_addr_t *dma_addr)
{
- pkt->naddr = 1;
- pkt->counter = counter;
- qib_user_sdma_init_frag(pkt, 0, offset, len, 0, dma_mapped, page,
- kvaddr, dma_addr);
+ void *hdr;
+
+ if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
+ hdr = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
+ dma_addr);
+ else
+ hdr = NULL;
+
+ if (!hdr) {
+ hdr = kmalloc(len, GFP_KERNEL);
+ if (!hdr)
+ return NULL;
+
+ *dma_addr = 0;
+ }
+
+ return hdr;
+}
+
+static int qib_user_sdma_page_to_frags(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
+ struct qib_user_sdma_pkt *pkt,
+ struct page *page, u16 put,
+ u16 offset, u16 len, void *kvaddr)
+{
+ __le16 *pbc16;
+ void *pbcvaddr;
+ struct qib_message_header *hdr;
+ u16 newlen, pbclen, lastdesc, dma_mapped;
+ u32 vcto;
+ union qib_seqnum seqnum;
+ dma_addr_t pbcdaddr;
+ dma_addr_t dma_addr =
+ dma_map_page(&dd->pcidev->dev,
+ page, offset, len, DMA_TO_DEVICE);
+ int ret = 0;
+
+ if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ /*
+ * dma mapping error, pkt has not managed
+ * this page yet, return the page here so
+ * the caller can ignore this page.
+ */
+ if (put) {
+ put_page(page);
+ } else {
+ /* coalesce case */
+ kunmap(page);
+ __free_page(page);
+ }
+ ret = -ENOMEM;
+ goto done;
+ }
+ offset = 0;
+ dma_mapped = 1;
+
+
+next_fragment:
+
+ /*
+ * In tid-sdma, the transfer length is restricted by
+ * receiver side current tid page length.
+ */
+ if (pkt->tiddma && len > pkt->tidsm[pkt->tidsmidx].length)
+ newlen = pkt->tidsm[pkt->tidsmidx].length;
+ else
+ newlen = len;
+
+ /*
+ * Then the transfer length is restricted by MTU.
+ * the last descriptor flag is determined by:
+ * 1. the current packet is at frag size length.
+ * 2. the current tid page is done if tid-sdma.
+ * 3. there is no more byte togo if sdma.
+ */
+ lastdesc = 0;
+ if ((pkt->payload_size + newlen) >= pkt->frag_size) {
+ newlen = pkt->frag_size - pkt->payload_size;
+ lastdesc = 1;
+ } else if (pkt->tiddma) {
+ if (newlen == pkt->tidsm[pkt->tidsmidx].length)
+ lastdesc = 1;
+ } else {
+ if (newlen == pkt->bytes_togo)
+ lastdesc = 1;
+ }
+
+ /* fill the next fragment in this page */
+ qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
+ offset, newlen, /* offset, len */
+ 0, lastdesc, /* first last desc */
+ put, dma_mapped, /* put page, dma mapped */
+ page, kvaddr, /* struct page, virt addr */
+ dma_addr, len); /* dma addr, dma length */
+ pkt->bytes_togo -= newlen;
+ pkt->payload_size += newlen;
+ pkt->naddr++;
+ if (pkt->naddr == pkt->addrlimit) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ /* If there is no more byte togo. (lastdesc==1) */
+ if (pkt->bytes_togo == 0) {
+ /* The packet is done, header is not dma mapped yet.
+ * it should be from kmalloc */
+ if (!pkt->addr[pkt->index].addr) {
+ pkt->addr[pkt->index].addr =
+ dma_map_single(&dd->pcidev->dev,
+ pkt->addr[pkt->index].kvaddr,
+ pkt->addr[pkt->index].dma_length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev,
+ pkt->addr[pkt->index].addr)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ pkt->addr[pkt->index].dma_mapped = 1;
+ }
+
+ goto done;
+ }
+
+ /* If tid-sdma, advance tid info. */
+ if (pkt->tiddma) {
+ pkt->tidsm[pkt->tidsmidx].length -= newlen;
+ if (pkt->tidsm[pkt->tidsmidx].length) {
+ pkt->tidsm[pkt->tidsmidx].offset += newlen;
+ } else {
+ pkt->tidsmidx++;
+ if (pkt->tidsmidx == pkt->tidsmcount) {
+ ret = -EFAULT;
+ goto done;
+ }
+ }
+ }
+
+ /*
+ * If this is NOT the last descriptor. (newlen==len)
+ * the current packet is not done yet, but the current
+ * send side page is done.
+ */
+ if (lastdesc == 0)
+ goto done;
+
+ /*
+ * If running this driver under PSM with message size
+ * fitting into one transfer unit, it is not possible
+ * to pass this line. otherwise, it is a buggggg.
+ */
+
+ /*
+ * Since the current packet is done, and there are more
+ * bytes togo, we need to create a new sdma header, copying
+ * from previous sdma header and modify both.
+ */
+ pbclen = pkt->addr[pkt->index].length;
+ pbcvaddr = qib_user_sdma_alloc_header(pq, pbclen, &pbcdaddr);
+ if (!pbcvaddr) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ /* Copy the previous sdma header to new sdma header */
+ pbc16 = (__le16 *)pkt->addr[pkt->index].kvaddr;
+ memcpy(pbcvaddr, pbc16, pbclen);
+
+ /* Modify the previous sdma header */
+ hdr = (struct qib_message_header *)&pbc16[4];
+
+ /* New pbc length */
+ pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->bytes_togo>>2));
+
+ /* New packet length */
+ hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
+
+ if (pkt->tiddma) {
+ /* turn on the header suppression */
+ hdr->iph.pkt_flags =
+ cpu_to_le16(le16_to_cpu(hdr->iph.pkt_flags)|0x2);
+ /* turn off ACK_REQ: 0x04 and EXPECTED_DONE: 0x20 */
+ hdr->flags &= ~(0x04|0x20);
+ } else {
+ /* turn off extra bytes: 20-21 bits */
+ hdr->bth[0] = cpu_to_be32(be32_to_cpu(hdr->bth[0])&0xFFCFFFFF);
+ /* turn off ACK_REQ: 0x04 */
+ hdr->flags &= ~(0x04);
+ }
+
+ /* New kdeth checksum */
+ vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
+ hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
+ be16_to_cpu(hdr->lrh[2]) -
+ ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
+ le16_to_cpu(hdr->iph.pkt_flags));
+
+ /* The packet is done, header is not dma mapped yet.
+ * it should be from kmalloc */
+ if (!pkt->addr[pkt->index].addr) {
+ pkt->addr[pkt->index].addr =
+ dma_map_single(&dd->pcidev->dev,
+ pkt->addr[pkt->index].kvaddr,
+ pkt->addr[pkt->index].dma_length,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev,
+ pkt->addr[pkt->index].addr)) {
+ ret = -ENOMEM;
+ goto done;
+ }
+ pkt->addr[pkt->index].dma_mapped = 1;
+ }
+
+ /* Modify the new sdma header */
+ pbc16 = (__le16 *)pbcvaddr;
+ hdr = (struct qib_message_header *)&pbc16[4];
+
+ /* New pbc length */
+ pbc16[0] = cpu_to_le16(le16_to_cpu(pbc16[0])-(pkt->payload_size>>2));
+
+ /* New packet length */
+ hdr->lrh[2] = cpu_to_be16(le16_to_cpu(pbc16[0]));
+
+ if (pkt->tiddma) {
+ /* Set new tid and offset for new sdma header */
+ hdr->iph.ver_ctxt_tid_offset = cpu_to_le32(
+ (le32_to_cpu(hdr->iph.ver_ctxt_tid_offset)&0xFF000000) +
+ (pkt->tidsm[pkt->tidsmidx].tid<<QLOGIC_IB_I_TID_SHIFT) +
+ (pkt->tidsm[pkt->tidsmidx].offset>>2));
+ } else {
+ /* Middle protocol new packet offset */
+ hdr->uwords[2] += pkt->payload_size;
+ }
+
+ /* New kdeth checksum */
+ vcto = le32_to_cpu(hdr->iph.ver_ctxt_tid_offset);
+ hdr->iph.chksum = cpu_to_le16(QIB_LRH_BTH +
+ be16_to_cpu(hdr->lrh[2]) -
+ ((vcto>>16)&0xFFFF) - (vcto&0xFFFF) -
+ le16_to_cpu(hdr->iph.pkt_flags));
+
+ /* Next sequence number in new sdma header */
+ seqnum.val = be32_to_cpu(hdr->bth[2]);
+ if (pkt->tiddma)
+ seqnum.seq++;
+ else
+ seqnum.pkt++;
+ hdr->bth[2] = cpu_to_be32(seqnum.val);
+
+ /* Init new sdma header. */
+ qib_user_sdma_init_frag(pkt, pkt->naddr, /* index */
+ 0, pbclen, /* offset, len */
+ 1, 0, /* first last desc */
+ 0, 0, /* put page, dma mapped */
+ NULL, pbcvaddr, /* struct page, virt addr */
+ pbcdaddr, pbclen); /* dma addr, dma length */
+ pkt->index = pkt->naddr;
+ pkt->payload_size = 0;
+ pkt->naddr++;
+ if (pkt->naddr == pkt->addrlimit) {
+ ret = -EFAULT;
+ goto done;
+ }
+
+ /* Prepare for next fragment in this page */
+ if (newlen != len) {
+ if (dma_mapped) {
+ put = 0;
+ dma_mapped = 0;
+ page = NULL;
+ kvaddr = NULL;
+ }
+ len -= newlen;
+ offset += newlen;
+
+ goto next_fragment;
+ }
+
+done:
+ return ret;
}
/* we've too many pages in the iovec, coalesce to a single page */
static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
const struct iovec *iov,
unsigned long niov)
@@ -182,7 +490,6 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
char *mpage;
int i;
int len = 0;
- dma_addr_t dma_addr;
if (!page) {
ret = -ENOMEM;
@@ -205,17 +512,8 @@ static int qib_user_sdma_coalesce(const struct qib_devdata *dd,
len += iov[i].iov_len;
}
- dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
- ret = -ENOMEM;
- goto free_unmap;
- }
-
- qib_user_sdma_init_frag(pkt, 1, 0, len, 0, 1, page, mpage_save,
- dma_addr);
- pkt->naddr = 2;
-
+ ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
+ page, 0, 0, len, mpage_save);
goto done;
free_unmap:
@@ -238,16 +536,6 @@ static int qib_user_sdma_num_pages(const struct iovec *iov)
return 1 + ((epage - spage) >> PAGE_SHIFT);
}
-/*
- * Truncate length to page boundary.
- */
-static int qib_user_sdma_page_length(unsigned long addr, unsigned long len)
-{
- const unsigned long offset = addr & ~PAGE_MASK;
-
- return ((offset + len) > PAGE_SIZE) ? (PAGE_SIZE - offset) : len;
-}
-
static void qib_user_sdma_free_pkt_frag(struct device *dev,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
@@ -256,10 +544,11 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
const int i = frag;
if (pkt->addr[i].page) {
+ /* only user data has page */
if (pkt->addr[i].dma_mapped)
dma_unmap_page(dev,
pkt->addr[i].addr,
- pkt->addr[i].length,
+ pkt->addr[i].dma_length,
DMA_TO_DEVICE);
if (pkt->addr[i].kvaddr)
@@ -269,55 +558,81 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
put_page(pkt->addr[i].page);
else
__free_page(pkt->addr[i].page);
- } else if (pkt->addr[i].kvaddr)
- /* free coherent mem from cache... */
- dma_pool_free(pq->header_cache,
+ } else if (pkt->addr[i].kvaddr) {
+ /* for headers */
+ if (pkt->addr[i].dma_mapped) {
+ /* from kmalloc & dma mapped */
+ dma_unmap_single(dev,
+ pkt->addr[i].addr,
+ pkt->addr[i].dma_length,
+ DMA_TO_DEVICE);
+ kfree(pkt->addr[i].kvaddr);
+ } else if (pkt->addr[i].addr) {
+ /* free coherent mem from cache... */
+ dma_pool_free(pq->header_cache,
pkt->addr[i].kvaddr, pkt->addr[i].addr);
+ } else {
+ /* from kmalloc but not dma mapped */
+ kfree(pkt->addr[i].kvaddr);
+ }
+ }
}
/* return number of pages pinned... */
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
+ struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
unsigned long addr, int tlen, int npages)
{
- struct page *pages[2];
- int j;
- int ret;
-
- ret = get_user_pages(current, current->mm, addr,
- npages, 0, 1, pages, NULL);
-
- if (ret != npages) {
- int i;
-
- for (i = 0; i < ret; i++)
- put_page(pages[i]);
-
- ret = -ENOMEM;
- goto done;
- }
+ struct page *pages[8];
+ int i, j;
+ int ret = 0;
- for (j = 0; j < npages; j++) {
- /* map the pages... */
- const int flen = qib_user_sdma_page_length(addr, tlen);
- dma_addr_t dma_addr =
- dma_map_page(&dd->pcidev->dev,
- pages[j], 0, flen, DMA_TO_DEVICE);
- unsigned long fofs = addr & ~PAGE_MASK;
+ while (npages) {
+ if (npages > 8)
+ j = 8;
+ else
+ j = npages;
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ ret = get_user_pages(current, current->mm, addr,
+ j, 0, 1, pages, NULL);
+ if (ret != j) {
+ i = 0;
+ j = ret;
ret = -ENOMEM;
- goto done;
+ goto free_pages;
}
- qib_user_sdma_init_frag(pkt, pkt->naddr, fofs, flen, 1, 1,
- pages[j], kmap(pages[j]), dma_addr);
+ for (i = 0; i < j; i++) {
+ /* map the pages... */
+ unsigned long fofs = addr & ~PAGE_MASK;
+ int flen = ((fofs + tlen) > PAGE_SIZE) ?
+ (PAGE_SIZE - fofs) : tlen;
+
+ ret = qib_user_sdma_page_to_frags(dd, pq, pkt,
+ pages[i], 1, fofs, flen, NULL);
+ if (ret < 0) {
+ /* current page has beed taken
+ * care of inside above call.
+ */
+ i++;
+ goto free_pages;
+ }
- pkt->naddr++;
- addr += flen;
- tlen -= flen;
+ addr += flen;
+ tlen -= flen;
+ }
+
+ npages -= j;
}
+ goto done;
+
+ /* if error, return all pages not managed by pkt */
+free_pages:
+ while (i < j)
+ put_page(pages[i++]);
+
done:
return ret;
}
@@ -335,7 +650,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
const int npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
- ret = qib_user_sdma_pin_pages(dd, pkt, addr,
+ ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
iov[idx].iov_len, npages);
if (ret < 0)
goto free_pkt;
@@ -344,9 +659,22 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
goto done;
free_pkt:
- for (idx = 0; idx < pkt->naddr; idx++)
+ /* we need to ignore the first entry here */
+ for (idx = 1; idx < pkt->naddr; idx++)
qib_user_sdma_free_pkt_frag(&dd->pcidev->dev, pq, pkt, idx);
+ /* need to dma unmap the first entry, this is to restore to
+ * the original state so that caller can free the memory in
+ * error condition. Caller does not know if dma mapped or not*/
+ if (pkt->addr[0].dma_mapped) {
+ dma_unmap_single(&dd->pcidev->dev,
+ pkt->addr[0].addr,
+ pkt->addr[0].dma_length,
+ DMA_TO_DEVICE);
+ pkt->addr[0].addr = 0;
+ pkt->addr[0].dma_mapped = 0;
+ }
+
done:
return ret;
}
@@ -359,8 +687,9 @@ static int qib_user_sdma_init_payload(const struct qib_devdata *dd,
{
int ret = 0;
- if (npages >= ARRAY_SIZE(pkt->addr))
- ret = qib_user_sdma_coalesce(dd, pkt, iov, niov);
+ if (pkt->frag_size == pkt->bytes_togo &&
+ npages >= ARRAY_SIZE(pkt->addr))
+ ret = qib_user_sdma_coalesce(dd, pq, pkt, iov, niov);
else
ret = qib_user_sdma_pin_pkt(dd, pq, pkt, iov, niov);
@@ -380,7 +709,10 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
for (i = 0; i < pkt->naddr; i++)
qib_user_sdma_free_pkt_frag(dev, pq, pkt, i);
- kmem_cache_free(pq->pkt_slab, pkt);
+ if (pkt->largepkt)
+ kfree(pkt);
+ else
+ kmem_cache_free(pq->pkt_slab, pkt);
}
INIT_LIST_HEAD(list);
}
@@ -393,63 +725,48 @@ static void qib_user_sdma_free_pkt_list(struct device *dev,
* as, if there is an error we clean it...
*/
static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
+ struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq,
- struct list_head *list,
const struct iovec *iov,
unsigned long niov,
- int maxpkts)
+ struct list_head *list,
+ int *maxpkts, int *ndesc)
{
unsigned long idx = 0;
int ret = 0;
int npkts = 0;
- struct page *page = NULL;
__le32 *pbc;
dma_addr_t dma_addr;
struct qib_user_sdma_pkt *pkt = NULL;
size_t len;
size_t nw;
u32 counter = pq->counter;
- int dma_mapped = 0;
+ u16 frag_size;
- while (idx < niov && npkts < maxpkts) {
+ while (idx < niov && npkts < *maxpkts) {
const unsigned long addr = (unsigned long) iov[idx].iov_base;
const unsigned long idx_save = idx;
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
int npages = 0;
+ int bytes_togo = 0;
+ int tiddma = 0;
int cfur;
- dma_mapped = 0;
len = iov[idx].iov_len;
nw = len >> 2;
- page = NULL;
-
- pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
- if (!pkt) {
- ret = -ENOMEM;
- goto free_list;
- }
if (len < QIB_USER_SDMA_MIN_HEADER_LENGTH ||
len > PAGE_SIZE || len & 3 || addr & 3) {
ret = -EINVAL;
- goto free_pkt;
+ goto free_list;
}
- if (len == QIB_USER_SDMA_EXP_HEADER_LENGTH)
- pbc = dma_pool_alloc(pq->header_cache, GFP_KERNEL,
- &dma_addr);
- else
- pbc = NULL;
-
+ pbc = qib_user_sdma_alloc_header(pq, len, &dma_addr);
if (!pbc) {
- page = alloc_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto free_pkt;
- }
- pbc = kmap(page);
+ ret = -ENOMEM;
+ goto free_list;
}
cfur = copy_from_user(pbc, iov[idx].iov_base, len);
@@ -474,8 +791,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
* we can verify that the packet is consistent with the
* iovec lengths.
*/
- pktnw = le32_to_cpu(*pbc) & QIB_PBC_LENGTH_MASK;
- if (pktnw < pktnwc || pktnw > pktnwc + (PAGE_SIZE >> 2)) {
+ pktnw = le32_to_cpu(*pbc) & 0xFFFF;
+ if (pktnw < pktnwc) {
ret = -EINVAL;
goto free_pbc;
}
@@ -486,17 +803,14 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
const unsigned long faddr =
(unsigned long) iov[idx].iov_base;
- if (slen & 3 || faddr & 3 || !slen ||
- slen > PAGE_SIZE) {
+ if (slen & 3 || faddr & 3 || !slen) {
ret = -EINVAL;
goto free_pbc;
}
- npages++;
- if ((faddr & PAGE_MASK) !=
- ((faddr + slen - 1) & PAGE_MASK))
- npages++;
+ npages += qib_user_sdma_num_pages(&iov[idx]);
+ bytes_togo += slen;
pktnwc += slen >> 2;
idx++;
nfrags++;
@@ -507,48 +821,139 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
goto free_pbc;
}
- if (page) {
- dma_addr = dma_map_page(&dd->pcidev->dev,
- page, 0, len, DMA_TO_DEVICE);
- if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
+ frag_size = ((le32_to_cpu(*pbc))>>16) & 0xFFFF;
+ if (((frag_size ? frag_size : bytes_togo) + len) >
+ ppd->ibmaxlen) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+
+ if (frag_size) {
+ int pktsize, tidsmsize, n;
+
+ n = npages*((2*PAGE_SIZE/frag_size)+1);
+ pktsize = sizeof(*pkt) + sizeof(pkt->addr[0])*n;
+
+ /*
+ * Determine if this is tid-sdma or just sdma.
+ */
+ tiddma = (((le32_to_cpu(pbc[7])>>
+ QLOGIC_IB_I_TID_SHIFT)&
+ QLOGIC_IB_I_TID_MASK) !=
+ QLOGIC_IB_I_TID_MASK);
+
+ if (tiddma)
+ tidsmsize = iov[idx].iov_len;
+ else
+ tidsmsize = 0;
+
+ pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+ if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
+ pkt->largepkt = 1;
+ pkt->frag_size = frag_size;
+ pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+
+ if (tiddma) {
+ char *tidsm = (char *)pkt + pktsize;
+ cfur = copy_from_user(tidsm,
+ iov[idx].iov_base, tidsmsize);
+ if (cfur) {
+ ret = -EFAULT;
+ goto free_pkt;
+ }
+ pkt->tidsm =
+ (struct qib_tid_session_member *)tidsm;
+ pkt->tidsmcount = tidsmsize/
+ sizeof(struct qib_tid_session_member);
+ pkt->tidsmidx = 0;
+ idx++;
+ }
- dma_mapped = 1;
+ /*
+ * pbc 'fill1' field is borrowed to pass frag size,
+ * we need to clear it after picking frag size, the
+ * hardware requires this field to be zero.
+ */
+ *pbc = cpu_to_le32(le32_to_cpu(*pbc) & 0x0000FFFF);
+ } else {
+ pkt = kmem_cache_alloc(pq->pkt_slab, GFP_KERNEL);
+ if (!pkt) {
+ ret = -ENOMEM;
+ goto free_pbc;
+ }
+ pkt->largepkt = 0;
+ pkt->frag_size = bytes_togo;
+ pkt->addrlimit = ARRAY_SIZE(pkt->addr);
}
-
- qib_user_sdma_init_header(pkt, counter, 0, len, dma_mapped,
- page, pbc, dma_addr);
+ pkt->bytes_togo = bytes_togo;
+ pkt->payload_size = 0;
+ pkt->counter = counter;
+ pkt->tiddma = tiddma;
+
+ /* setup the first header */
+ qib_user_sdma_init_frag(pkt, 0, /* index */
+ 0, len, /* offset, len */
+ 1, 0, /* first last desc */
+ 0, 0, /* put page, dma mapped */
+ NULL, pbc, /* struct page, virt addr */
+ dma_addr, len); /* dma addr, dma length */
+ pkt->index = 0;
+ pkt->naddr = 1;
if (nfrags) {
ret = qib_user_sdma_init_payload(dd, pq, pkt,
iov + idx_save + 1,
nfrags, npages);
if (ret < 0)
- goto free_pbc_dma;
+ goto free_pkt;
+ } else {
+ /* since there is no payload, mark the
+ * header as the last desc. */
+ pkt->addr[0].last_desc = 1;
+
+ if (dma_addr == 0) {
+ /*
+ * the header is not dma mapped yet.
+ * it should be from kmalloc.
+ */
+ dma_addr = dma_map_single(&dd->pcidev->dev,
+ pbc, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&dd->pcidev->dev,
+ dma_addr)) {
+ ret = -ENOMEM;
+ goto free_pkt;
+ }
+ pkt->addr[0].addr = dma_addr;
+ pkt->addr[0].dma_mapped = 1;
+ }
}
counter++;
npkts++;
+ pkt->pq = pq;
+ pkt->index = 0; /* reset index for push on hw */
+ *ndesc += pkt->naddr;
list_add_tail(&pkt->list, list);
}
+ *maxpkts = npkts;
ret = idx;
goto done;
-free_pbc_dma:
- if (dma_mapped)
- dma_unmap_page(&dd->pcidev->dev, dma_addr, len, DMA_TO_DEVICE);
+free_pkt:
+ if (pkt->largepkt)
+ kfree(pkt);
+ else
+ kmem_cache_free(pq->pkt_slab, pkt);
free_pbc:
- if (page) {
- kunmap(page);
- __free_page(page);
- } else
+ if (dma_addr)
dma_pool_free(pq->header_cache, pbc, dma_addr);
-free_pkt:
- kmem_cache_free(pq->pkt_slab, pkt);
+ else
+ kfree(pbc);
free_list:
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, list);
done:
@@ -569,10 +974,20 @@ static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
struct list_head free_list;
struct qib_user_sdma_pkt *pkt;
struct qib_user_sdma_pkt *pkt_prev;
+ unsigned long flags;
int ret = 0;
+ if (!pq->num_sending)
+ return 0;
+
INIT_LIST_HEAD(&free_list);
+ /*
+ * We need this spin lock here because interrupt handler
+ * might modify this list in qib_user_sdma_send_desc(), also
+ * we can not get interrupted, otherwise it is a deadlock.
+ */
+ spin_lock_irqsave(&pq->sent_lock, flags);
list_for_each_entry_safe(pkt, pkt_prev, &pq->sent, list) {
s64 descd = ppd->sdma_descq_removed - pkt->added;
@@ -583,7 +998,9 @@ static int qib_user_sdma_queue_clean(struct qib_pportdata *ppd,
/* one more packet cleaned */
ret++;
+ pq->num_sending--;
}
+ spin_unlock_irqrestore(&pq->sent_lock, flags);
if (!list_empty(&free_list)) {
u32 counter;
@@ -627,6 +1044,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
struct qib_user_sdma_queue *pq)
{
struct qib_devdata *dd = ppd->dd;
+ unsigned long flags;
int i;
if (!pq)
@@ -634,7 +1052,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
for (i = 0; i < QIB_USER_SDMA_DRAIN_TIMEOUT; i++) {
mutex_lock(&pq->lock);
- if (list_empty(&pq->sent)) {
+ if (!pq->num_pending && !pq->num_sending) {
mutex_unlock(&pq->lock);
break;
}
@@ -644,29 +1062,44 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
msleep(10);
}
- if (!list_empty(&pq->sent)) {
+ if (pq->num_pending || pq->num_sending) {
+ struct qib_user_sdma_pkt *pkt;
+ struct qib_user_sdma_pkt *pkt_prev;
struct list_head free_list;
+ mutex_lock(&pq->lock);
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+ /*
+ * Since we hold sdma_lock, it is safe without sent_lock.
+ */
+ if (pq->num_pending) {
+ list_for_each_entry_safe(pkt, pkt_prev,
+ &ppd->sdma_userpending, list) {
+ if (pkt->pq == pq) {
+ list_move_tail(&pkt->list, &pq->sent);
+ pq->num_pending--;
+ pq->num_sending++;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+
qib_dev_err(dd, "user sdma lists not empty: forcing!\n");
INIT_LIST_HEAD(&free_list);
- mutex_lock(&pq->lock);
list_splice_init(&pq->sent, &free_list);
+ pq->num_sending = 0;
qib_user_sdma_free_pkt_list(&dd->pcidev->dev, pq, &free_list);
mutex_unlock(&pq->lock);
}
}
-static inline __le64 qib_sdma_make_desc0(struct qib_pportdata *ppd,
+static inline __le64 qib_sdma_make_desc0(u8 gen,
u64 addr, u64 dwlen, u64 dwoffset)
{
- u8 tmpgen;
-
- tmpgen = ppd->sdma_generation;
-
return cpu_to_le64(/* SDmaPhyAddr[31:0] */
((addr & 0xfffffffcULL) << 32) |
/* SDmaGeneration[1:0] */
- ((tmpgen & 3ULL) << 30) |
+ ((gen & 3ULL) << 30) |
/* SDmaDwordCount[10:0] */
((dwlen & 0x7ffULL) << 16) |
/* SDmaBufOffset[12:2] */
@@ -692,7 +1125,7 @@ static inline __le64 qib_sdma_make_desc1(u64 addr)
static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
struct qib_user_sdma_pkt *pkt, int idx,
- unsigned ofs, u16 tail)
+ unsigned ofs, u16 tail, u8 gen)
{
const u64 addr = (u64) pkt->addr[idx].addr +
(u64) pkt->addr[idx].offset;
@@ -702,104 +1135,132 @@ static void qib_user_sdma_send_frag(struct qib_pportdata *ppd,
descqp = &ppd->sdma_descq[tail].qw[0];
- descq0 = qib_sdma_make_desc0(ppd, addr, dwlen, ofs);
- if (idx == 0)
+ descq0 = qib_sdma_make_desc0(gen, addr, dwlen, ofs);
+ if (pkt->addr[idx].first_desc)
descq0 = qib_sdma_make_first_desc0(descq0);
- if (idx == pkt->naddr - 1)
+ if (pkt->addr[idx].last_desc) {
descq0 = qib_sdma_make_last_desc0(descq0);
+ if (ppd->sdma_intrequest) {
+ descq0 |= cpu_to_le64(1ULL << 15);
+ ppd->sdma_intrequest = 0;
+ }
+ }
descqp[0] = descq0;
descqp[1] = qib_sdma_make_desc1(addr);
}
-/* pq->lock must be held, get packets on the wire... */
-static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
- struct qib_user_sdma_queue *pq,
- struct list_head *pktlist)
+void qib_user_sdma_send_desc(struct qib_pportdata *ppd,
+ struct list_head *pktlist)
{
struct qib_devdata *dd = ppd->dd;
- int ret = 0;
- unsigned long flags;
- u16 tail;
- u8 generation;
- u64 descq_added;
-
- if (list_empty(pktlist))
- return 0;
+ u16 nfree, nsent;
+ u16 tail, tail_c;
+ u8 gen, gen_c;
- if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
- return -ECOMM;
-
- spin_lock_irqsave(&ppd->sdma_lock, flags);
-
- /* keep a copy for restoring purposes in case of problems */
- generation = ppd->sdma_generation;
- descq_added = ppd->sdma_descq_added;
-
- if (unlikely(!__qib_sdma_running(ppd))) {
- ret = -ECOMM;
- goto unlock;
- }
+ nfree = qib_sdma_descq_freecnt(ppd);
+ if (!nfree)
+ return;
- tail = ppd->sdma_descq_tail;
+retry:
+ nsent = 0;
+ tail_c = tail = ppd->sdma_descq_tail;
+ gen_c = gen = ppd->sdma_generation;
while (!list_empty(pktlist)) {
struct qib_user_sdma_pkt *pkt =
list_entry(pktlist->next, struct qib_user_sdma_pkt,
list);
- int i;
+ int i, j, c = 0;
unsigned ofs = 0;
u16 dtail = tail;
- if (pkt->naddr > qib_sdma_descq_freecnt(ppd))
- goto unlock_check_tail;
-
- for (i = 0; i < pkt->naddr; i++) {
- qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail);
+ for (i = pkt->index; i < pkt->naddr && nfree; i++) {
+ qib_user_sdma_send_frag(ppd, pkt, i, ofs, tail, gen);
ofs += pkt->addr[i].length >> 2;
if (++tail == ppd->sdma_descq_cnt) {
tail = 0;
- ++ppd->sdma_generation;
+ ++gen;
+ ppd->sdma_intrequest = 1;
+ } else if (tail == (ppd->sdma_descq_cnt>>1)) {
+ ppd->sdma_intrequest = 1;
}
- }
+ nfree--;
+ if (pkt->addr[i].last_desc == 0)
+ continue;
- if ((ofs << 2) > ppd->ibmaxlen) {
- ret = -EMSGSIZE;
- goto unlock;
- }
-
- /*
- * If the packet is >= 2KB mtu equivalent, we have to use
- * the large buffers, and have to mark each descriptor as
- * part of a large buffer packet.
- */
- if (ofs > dd->piosize2kmax_dwords) {
- for (i = 0; i < pkt->naddr; i++) {
- ppd->sdma_descq[dtail].qw[0] |=
- cpu_to_le64(1ULL << 14);
- if (++dtail == ppd->sdma_descq_cnt)
- dtail = 0;
+ /*
+ * If the packet is >= 2KB mtu equivalent, we
+ * have to use the large buffers, and have to
+ * mark each descriptor as part of a large
+ * buffer packet.
+ */
+ if (ofs > dd->piosize2kmax_dwords) {
+ for (j = pkt->index; j <= i; j++) {
+ ppd->sdma_descq[dtail].qw[0] |=
+ cpu_to_le64(1ULL << 14);
+ if (++dtail == ppd->sdma_descq_cnt)
+ dtail = 0;
+ }
}
+ c += i + 1 - pkt->index;
+ pkt->index = i + 1; /* index for next first */
+ tail_c = dtail = tail;
+ gen_c = gen;
+ ofs = 0; /* reset for next packet */
}
- ppd->sdma_descq_added += pkt->naddr;
- pkt->added = ppd->sdma_descq_added;
- list_move_tail(&pkt->list, &pq->sent);
- ret++;
+ ppd->sdma_descq_added += c;
+ nsent += c;
+ if (pkt->index == pkt->naddr) {
+ pkt->added = ppd->sdma_descq_added;
+ pkt->pq->added = pkt->added;
+ pkt->pq->num_pending--;
+ spin_lock(&pkt->pq->sent_lock);
+ pkt->pq->num_sending++;
+ list_move_tail(&pkt->list, &pkt->pq->sent);
+ spin_unlock(&pkt->pq->sent_lock);
+ }
+ if (!nfree || (nsent<<2) > ppd->sdma_descq_cnt)
+ break;
}
-unlock_check_tail:
/* advance the tail on the chip if necessary */
- if (ppd->sdma_descq_tail != tail)
- dd->f_sdma_update_tail(ppd, tail);
+ if (ppd->sdma_descq_tail != tail_c) {
+ ppd->sdma_generation = gen_c;
+ dd->f_sdma_update_tail(ppd, tail_c);
+ }
-unlock:
- if (unlikely(ret < 0)) {
- ppd->sdma_generation = generation;
- ppd->sdma_descq_added = descq_added;
+ if (nfree && !list_empty(pktlist))
+ goto retry;
+
+ return;
+}
+
+/* pq->lock must be held, get packets on the wire... */
+static int qib_user_sdma_push_pkts(struct qib_pportdata *ppd,
+ struct qib_user_sdma_queue *pq,
+ struct list_head *pktlist, int count)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (unlikely(!(ppd->lflags & QIBL_LINKACTIVE)))
+ return -ECOMM;
+
+ spin_lock_irqsave(&ppd->sdma_lock, flags);
+
+ if (unlikely(!__qib_sdma_running(ppd))) {
+ ret = -ECOMM;
+ goto unlock;
}
- spin_unlock_irqrestore(&ppd->sdma_lock, flags);
+ pq->num_pending += count;
+ list_splice_tail_init(pktlist, &ppd->sdma_userpending);
+ qib_user_sdma_send_desc(ppd, &ppd->sdma_userpending);
+
+unlock:
+ spin_unlock_irqrestore(&ppd->sdma_lock, flags);
return ret;
}
@@ -822,19 +1283,23 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
if (!qib_sdma_running(ppd))
goto done_unlock;
- if (ppd->sdma_descq_added != ppd->sdma_descq_removed) {
+ /* if I have packets not complete yet */
+ if (pq->added > ppd->sdma_descq_removed)
qib_user_sdma_hwqueue_clean(ppd);
+ /* if I have complete packets to be freed */
+ if (pq->num_sending)
qib_user_sdma_queue_clean(ppd, pq);
- }
while (dim) {
- const int mxp = 8;
+ int mxp = 8;
+ int ndesc = 0;
down_write(&current->mm->mmap_sem);
- ret = qib_user_sdma_queue_pkts(dd, pq, &list, iov, dim, mxp);
+ ret = qib_user_sdma_queue_pkts(dd, ppd, pq,
+ iov, dim, &list, &mxp, &ndesc);
up_write(&current->mm->mmap_sem);
- if (ret <= 0)
+ if (ret < 0)
goto done_unlock;
else {
dim -= ret;
@@ -844,24 +1309,20 @@ int qib_user_sdma_writev(struct qib_ctxtdata *rcd,
/* force packets onto the sdma hw queue... */
if (!list_empty(&list)) {
/*
- * Lazily clean hw queue. the 4 is a guess of about
- * how many sdma descriptors a packet will take (it
- * doesn't have to be perfect).
+ * Lazily clean hw queue.
*/
- if (qib_sdma_descq_freecnt(ppd) < ret * 4) {
+ if (qib_sdma_descq_freecnt(ppd) < ndesc) {
qib_user_sdma_hwqueue_clean(ppd);
- qib_user_sdma_queue_clean(ppd, pq);
+ if (pq->num_sending)
+ qib_user_sdma_queue_clean(ppd, pq);
}
- ret = qib_user_sdma_push_pkts(ppd, pq, &list);
+ ret = qib_user_sdma_push_pkts(ppd, pq, &list, mxp);
if (ret < 0)
goto done_unlock;
else {
- npkts += ret;
- pq->counter += ret;
-
- if (!list_empty(&list))
- goto done_unlock;
+ npkts += mxp;
+ pq->counter += mxp;
}
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 3eceb61e353..7a3175400b2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -817,7 +817,6 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
if (neigh) {
neigh->cm = NULL;
- list_del(&neigh->list);
ipoib_neigh_free(neigh);
tx->neigh = NULL;
@@ -1234,7 +1233,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
if (neigh) {
neigh->cm = NULL;
- list_del(&neigh->list);
ipoib_neigh_free(neigh);
tx->neigh = NULL;
@@ -1325,7 +1323,6 @@ static void ipoib_cm_tx_start(struct work_struct *work)
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
- list_del(&neigh->list);
ipoib_neigh_free(neigh);
}
list_del(&p->list);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 2cfa76f5d99..196b1d13cbc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -932,12 +932,47 @@ int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
return 0;
}
+/*
+ * Takes whatever value which is in pkey index 0 and updates priv->pkey
+ * returns 0 if the pkey value was changed.
+ */
+static inline int update_parent_pkey(struct ipoib_dev_priv *priv)
+{
+ int result;
+ u16 prev_pkey;
+
+ prev_pkey = priv->pkey;
+ result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
+ if (result) {
+ ipoib_warn(priv, "ib_query_pkey port %d failed (ret = %d)\n",
+ priv->port, result);
+ return result;
+ }
+
+ priv->pkey |= 0x8000;
+
+ if (prev_pkey != priv->pkey) {
+ ipoib_dbg(priv, "pkey changed from 0x%x to 0x%x\n",
+ prev_pkey, priv->pkey);
+ /*
+ * Update the pkey in the broadcast address, while making sure to set
+ * the full membership bit, so that we join the right broadcast group.
+ */
+ priv->dev->broadcast[8] = priv->pkey >> 8;
+ priv->dev->broadcast[9] = priv->pkey & 0xff;
+ return 0;
+ }
+
+ return 1;
+}
+
static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
enum ipoib_flush_level level)
{
struct ipoib_dev_priv *cpriv;
struct net_device *dev = priv->dev;
u16 new_index;
+ int result;
mutex_lock(&priv->vlan_mutex);
@@ -951,6 +986,10 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
mutex_unlock(&priv->vlan_mutex);
if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) {
+ /* for non-child devices must check/update the pkey value here */
+ if (level == IPOIB_FLUSH_HEAVY &&
+ !test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags))
+ update_parent_pkey(priv);
ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n");
return;
}
@@ -961,21 +1000,32 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
}
if (level == IPOIB_FLUSH_HEAVY) {
- if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
- clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
- ipoib_ib_dev_down(dev, 0);
- ipoib_ib_dev_stop(dev, 0);
- if (ipoib_pkey_dev_delay_open(dev))
+ /* child devices chase their origin pkey value, while non-child
+ * (parent) devices should always takes what present in pkey index 0
+ */
+ if (test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
+ if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) {
+ clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
+ ipoib_ib_dev_down(dev, 0);
+ ipoib_ib_dev_stop(dev, 0);
+ if (ipoib_pkey_dev_delay_open(dev))
+ return;
+ }
+ /* restart QP only if P_Key index is changed */
+ if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
+ new_index == priv->pkey_index) {
+ ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
return;
+ }
+ priv->pkey_index = new_index;
+ } else {
+ result = update_parent_pkey(priv);
+ /* restart QP only if P_Key value changed */
+ if (result) {
+ ipoib_dbg(priv, "Not flushing - P_Key value not changed.\n");
+ return;
+ }
}
-
- /* restart QP only if P_Key index is changed */
- if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) &&
- new_index == priv->pkey_index) {
- ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n");
- return;
- }
- priv->pkey_index = new_index;
}
if (level == IPOIB_FLUSH_LIGHT) {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index b6e049a3c7a..82cec1af902 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -493,7 +493,6 @@ static void path_rec_completion(int status,
path,
neigh));
if (!ipoib_cm_get(neigh)) {
- list_del(&neigh->list);
ipoib_neigh_free(neigh);
continue;
}
@@ -618,7 +617,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
if (!ipoib_cm_get(neigh))
ipoib_cm_set(neigh, ipoib_cm_create_tx(dev, path, neigh));
if (!ipoib_cm_get(neigh)) {
- list_del(&neigh->list);
ipoib_neigh_free(neigh);
goto err_drop;
}
@@ -639,7 +637,7 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
neigh->ah = NULL;
if (!path->query && path_rec_start(dev, path))
- goto err_list;
+ goto err_path;
__skb_queue_tail(&neigh->queue, skb);
}
@@ -648,9 +646,6 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr,
ipoib_neigh_put(neigh);
return;
-err_list:
- list_del(&neigh->list);
-
err_path:
ipoib_neigh_free(neigh);
err_drop:
@@ -1098,6 +1093,8 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh)
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
+ /* remove from parent list */
+ list_del(&neigh->list);
call_rcu(&neigh->rcu, ipoib_neigh_reclaim);
return;
} else {
@@ -1461,7 +1458,7 @@ static ssize_t create_child(struct device *dev,
if (sscanf(buf, "%i", &pkey) != 1)
return -EINVAL;
- if (pkey < 0 || pkey > 0xffff)
+ if (pkey <= 0 || pkey > 0xffff || pkey == 0x8000)
return -EINVAL;
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index 74685936c94..f81abe16cf0 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -119,6 +119,15 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
} else
child_pkey = nla_get_u16(data[IFLA_IPOIB_PKEY]);
+ if (child_pkey == 0 || child_pkey == 0x8000)
+ return -EINVAL;
+
+ /*
+ * Set the full membership bit, so that we join the right
+ * broadcast group, etc.
+ */
+ child_pkey |= 0x8000;
+
err = __ipoib_vlan_add(ppriv, netdev_priv(dev), child_pkey, IPOIB_RTNL_CHILD);
if (!err && data)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 2e84ef859c5..dd03cfe596d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -347,6 +347,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_iser_conn *iser_conn;
+ struct iscsi_session *session;
struct iser_conn *ib_conn;
struct iscsi_endpoint *ep;
int error;
@@ -365,7 +366,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
}
ib_conn = ep->dd_data;
- if (iser_alloc_rx_descriptors(ib_conn))
+ session = conn->session;
+ if (iser_alloc_rx_descriptors(ib_conn, session))
return -ENOMEM;
/* binds the iSER connection retrieved from the previously
@@ -419,12 +421,13 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_cls_session *cls_session;
struct iscsi_session *session;
struct Scsi_Host *shost;
- struct iser_conn *ib_conn;
+ struct iser_conn *ib_conn = NULL;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
+ shost->cmd_per_lun = qdepth;
shost->max_lun = iscsi_max_lun;
shost->max_id = 0;
shost->max_channel = 0;
@@ -441,12 +444,14 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
ep ? ib_conn->device->ib_device->dma_device : NULL))
goto free_host;
- /*
- * we do not support setting can_queue cmd_per_lun from userspace yet
- * because we preallocate so many resources
- */
+ if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
+ iser_info("cmds_max changed from %u to %u\n",
+ cmds_max, ISER_DEF_XMIT_CMDS_MAX);
+ cmds_max = ISER_DEF_XMIT_CMDS_MAX;
+ }
+
cls_session = iscsi_session_setup(&iscsi_iser_transport, shost,
- ISCSI_DEF_XMIT_CMDS_MAX, 0,
+ cmds_max, 0,
sizeof(struct iscsi_iser_task),
initial_cmdsn, 0);
if (!cls_session)
@@ -672,6 +677,7 @@ static umode_t iser_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_TGT_RESET_TMO:
case ISCSI_PARAM_IFACE_NAME:
case ISCSI_PARAM_INITIATOR_NAME:
+ case ISCSI_PARAM_DISCOVERY_SESS:
return S_IRUGO;
default:
return 0;
@@ -701,7 +707,7 @@ static struct scsi_host_template iscsi_iser_sht = {
static struct iscsi_transport iscsi_iser_transport = {
.owner = THIS_MODULE,
.name = "iser",
- .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T,
+ .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_TEXT_NEGO,
/* session management */
.create_session = iscsi_iser_session_create,
.destroy_session = iscsi_iser_session_destroy,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 4f069c0d4c0..67914027c61 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -78,14 +78,14 @@
#define iser_warn(fmt, arg...) \
do { \
- if (iser_debug_level > 1) \
+ if (iser_debug_level > 0) \
pr_warn(PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
#define iser_info(fmt, arg...) \
do { \
- if (iser_debug_level > 0) \
+ if (iser_debug_level > 1) \
pr_info(PFX "%s:" fmt, \
__func__ , ## arg); \
} while (0)
@@ -102,7 +102,13 @@
/* support up to 512KB in one RDMA */
#define ISCSI_ISER_SG_TABLESIZE (0x80000 >> SHIFT_4K)
-#define ISER_DEF_CMD_PER_LUN ISCSI_DEF_XMIT_CMDS_MAX
+#define ISER_DEF_XMIT_CMDS_DEFAULT 512
+#if ISCSI_DEF_XMIT_CMDS_MAX > ISER_DEF_XMIT_CMDS_DEFAULT
+ #define ISER_DEF_XMIT_CMDS_MAX ISCSI_DEF_XMIT_CMDS_MAX
+#else
+ #define ISER_DEF_XMIT_CMDS_MAX ISER_DEF_XMIT_CMDS_DEFAULT
+#endif
+#define ISER_DEF_CMD_PER_LUN ISER_DEF_XMIT_CMDS_MAX
/* QP settings */
/* Maximal bounds on received asynchronous PDUs */
@@ -111,9 +117,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+#define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX)
-#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
+#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -123,7 +129,7 @@
#define ISER_INFLIGHT_DATAOUTS 8
-#define ISER_QP_MAX_REQ_DTOS (ISCSI_DEF_XMIT_CMDS_MAX * \
+#define ISER_QP_MAX_REQ_DTOS (ISER_DEF_XMIT_CMDS_MAX * \
(1 + ISER_INFLIGHT_DATAOUTS) + \
ISER_MAX_TX_MISC_PDUS + \
ISER_MAX_RX_MISC_PDUS)
@@ -205,7 +211,7 @@ struct iser_mem_reg {
u64 va;
u64 len;
void *mem_h;
- int is_fmr;
+ int is_mr;
};
struct iser_regd_buf {
@@ -246,6 +252,9 @@ struct iser_rx_desc {
#define ISER_MAX_CQ 4
+struct iser_conn;
+struct iscsi_iser_task;
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
@@ -259,6 +268,22 @@ struct iser_device {
int cq_active_qps[ISER_MAX_CQ];
int cqs_used;
struct iser_cq_desc *cq_desc;
+ int (*iser_alloc_rdma_reg_res)(struct iser_conn *ib_conn,
+ unsigned cmds_max);
+ void (*iser_free_rdma_reg_res)(struct iser_conn *ib_conn);
+ int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+ void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+};
+
+struct fast_reg_descriptor {
+ struct list_head list;
+ /* For fast registration - FRWR */
+ struct ib_mr *data_mr;
+ struct ib_fast_reg_page_list *data_frpl;
+ /* Valid for fast registration flag */
+ bool valid;
};
struct iser_conn {
@@ -270,13 +295,13 @@ struct iser_conn {
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
- struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
wait_queue_head_t wait; /* waitq for conn/disconn */
+ unsigned qp_max_recv_dtos; /* num of rx buffers */
+ unsigned qp_max_recv_dtos_mask; /* above minus 1 */
+ unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE];
- struct iser_page_vec *page_vec; /* represents SG to fmr maps*
- * maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */
char *login_buf;
@@ -285,6 +310,17 @@ struct iser_conn {
unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
+ union {
+ struct {
+ struct ib_fmr_pool *pool; /* pool of IB FMRs */
+ struct iser_page_vec *page_vec; /* represents SG to fmr maps*
+ * maps serialized as tx is*/
+ } fmr;
+ struct {
+ struct list_head pool;
+ int pool_size;
+ } frwr;
+ } fastreg;
};
struct iscsi_iser_conn {
@@ -368,8 +404,10 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
-int iser_reg_rdma_mem(struct iscsi_iser_task *task,
- enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *task,
+ enum iser_data_dir cmd_dir);
+int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *task,
+ enum iser_data_dir cmd_dir);
int iser_connect(struct iser_conn *ib_conn,
struct sockaddr_in *src_addr,
@@ -380,7 +418,10 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg);
-void iser_unreg_mem(struct iser_mem_reg *mem_reg);
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
+void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir);
int iser_post_recvl(struct iser_conn *ib_conn);
int iser_post_recvm(struct iser_conn *ib_conn, int count);
@@ -394,5 +435,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc);
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn);
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session);
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_fmr_pool(struct iser_conn *ib_conn);
+int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max);
+void iser_free_frwr_pool(struct iser_conn *ib_conn);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index b6d81a86c97..538822684d5 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -49,6 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_device *device = iser_task->iser_conn->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -69,7 +70,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task,
return -EINVAL;
}
- err = iser_reg_rdma_mem(iser_task,ISER_DIR_IN);
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_IN);
if (err) {
iser_err("Failed to set up Data-IN RDMA\n");
return err;
@@ -98,6 +99,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl)
{
struct iscsi_iser_task *iser_task = task->dd_data;
+ struct iser_device *device = iser_task->iser_conn->ib_conn->device;
struct iser_regd_buf *regd_buf;
int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
@@ -119,7 +121,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
return -EINVAL;
}
- err = iser_reg_rdma_mem(iser_task,ISER_DIR_OUT);
+ err = device->iser_reg_rdma_mem(iser_task, ISER_DIR_OUT);
if (err != 0) {
iser_err("Failed to register write cmd RDMA mem\n");
return err;
@@ -170,8 +172,78 @@ static void iser_create_send_desc(struct iser_conn *ib_conn,
}
}
+static void iser_free_login_buf(struct iser_conn *ib_conn)
+{
+ if (!ib_conn->login_buf)
+ return;
+
+ if (ib_conn->login_req_dma)
+ ib_dma_unmap_single(ib_conn->device->ib_device,
+ ib_conn->login_req_dma,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ if (ib_conn->login_resp_dma)
+ ib_dma_unmap_single(ib_conn->device->ib_device,
+ ib_conn->login_resp_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+ kfree(ib_conn->login_buf);
+
+ /* make sure we never redo any unmapping */
+ ib_conn->login_req_dma = 0;
+ ib_conn->login_resp_dma = 0;
+ ib_conn->login_buf = NULL;
+}
+
+static int iser_alloc_login_buf(struct iser_conn *ib_conn)
+{
+ struct iser_device *device;
+ int req_err, resp_err;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
+
+ ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
+ ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!ib_conn->login_buf)
+ goto out_err;
+
+ ib_conn->login_req_buf = ib_conn->login_buf;
+ ib_conn->login_resp_buf = ib_conn->login_buf +
+ ISCSI_DEF_MAX_RECV_SEG_LEN;
+
+ ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_req_buf,
+ ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
+
+ ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_resp_buf,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+
+ req_err = ib_dma_mapping_error(device->ib_device,
+ ib_conn->login_req_dma);
+ resp_err = ib_dma_mapping_error(device->ib_device,
+ ib_conn->login_resp_dma);
+
+ if (req_err || resp_err) {
+ if (req_err)
+ ib_conn->login_req_dma = 0;
+ if (resp_err)
+ ib_conn->login_resp_dma = 0;
+ goto free_login_buf;
+ }
+ return 0;
+
+free_login_buf:
+ iser_free_login_buf(ib_conn);
-int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+out_err:
+ iser_err("unable to alloc or map login buf\n");
+ return -ENOMEM;
+}
+
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn, struct iscsi_session *session)
{
int i, j;
u64 dma_addr;
@@ -179,14 +251,24 @@ int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
struct ib_sge *rx_sg;
struct iser_device *device = ib_conn->device;
- ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+ ib_conn->qp_max_recv_dtos = session->cmds_max;
+ ib_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
+ ib_conn->min_posted_rx = ib_conn->qp_max_recv_dtos >> 2;
+
+ if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
+ goto create_rdma_reg_res_failed;
+
+ if (iser_alloc_login_buf(ib_conn))
+ goto alloc_login_buf_fail;
+
+ ib_conn->rx_descs = kmalloc(session->cmds_max *
sizeof(struct iser_rx_desc), GFP_KERNEL);
if (!ib_conn->rx_descs)
goto rx_desc_alloc_fail;
rx_desc = ib_conn->rx_descs;
- for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++) {
dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(device->ib_device, dma_addr))
@@ -207,10 +289,14 @@ rx_desc_dma_map_failed:
rx_desc = ib_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
ib_conn->rx_descs = NULL;
rx_desc_alloc_fail:
+ iser_free_login_buf(ib_conn);
+alloc_login_buf_fail:
+ device->iser_free_rdma_reg_res(ib_conn);
+create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM;
}
@@ -222,18 +308,27 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
struct iser_device *device = ib_conn->device;
if (!ib_conn->rx_descs)
- return;
+ goto free_login_buf;
+
+ if (device->iser_free_rdma_reg_res)
+ device->iser_free_rdma_reg_res(ib_conn);
rx_desc = ib_conn->rx_descs;
- for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+ for (i = 0; i < ib_conn->qp_max_recv_dtos; i++, rx_desc++)
ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
- ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->rx_descs);
+ /* make sure we never redo any unmapping */
+ ib_conn->rx_descs = NULL;
+
+free_login_buf:
+ iser_free_login_buf(ib_conn);
}
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ struct iscsi_session *session = conn->session;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
/* check if this is the last login - going to full feature phase */
@@ -248,9 +343,16 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1);
WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
- iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
+ if (session->discovery_sess) {
+ iser_info("Discovery session, re-using login RX buffer\n");
+ return 0;
+ } else
+ iser_info("Normal session, posting batch of RX %d buffers\n",
+ iser_conn->ib_conn->min_posted_rx);
+
/* Initial post receive buffers */
- if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ if (iser_post_recvm(iser_conn->ib_conn,
+ iser_conn->ib_conn->min_posted_rx))
return -ENOMEM;
return 0;
@@ -425,6 +527,8 @@ int iser_send_control(struct iscsi_conn *conn,
}
if (task == conn->login_task) {
+ iser_dbg("op %x dsl %lx, posting login rx buffer\n",
+ task->hdr->opcode, data_seg_len);
err = iser_post_recvl(iser_conn->ib_conn);
if (err)
goto send_control_error;
@@ -487,9 +591,9 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
return;
outstanding = ib_conn->post_recv_buf_count;
- if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
- count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
- ISER_MIN_POSTED_RX);
+ if (outstanding + ib_conn->min_posted_rx <= ib_conn->qp_max_recv_dtos) {
+ count = min(ib_conn->qp_max_recv_dtos - outstanding,
+ ib_conn->min_posted_rx);
err = iser_post_recvm(ib_conn, count);
if (err)
iser_err("posting %d rx bufs err %d\n", count, err);
@@ -538,8 +642,8 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
+ struct iser_device *device = iser_task->iser_conn->ib_conn->device;
int is_rdma_aligned = 1;
- struct iser_regd_buf *regd;
/* if we were reading, copy back to unaligned sglist,
* anyway dma_unmap and free the copy
@@ -553,17 +657,11 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
iser_finalize_rdma_unaligned_sg(iser_task, ISER_DIR_OUT);
}
- if (iser_task->dir[ISER_DIR_IN]) {
- regd = &iser_task->rdma_regd[ISER_DIR_IN];
- if (regd->reg.is_fmr)
- iser_unreg_mem(&regd->reg);
- }
+ if (iser_task->dir[ISER_DIR_IN])
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
- if (iser_task->dir[ISER_DIR_OUT]) {
- regd = &iser_task->rdma_regd[ISER_DIR_OUT];
- if (regd->reg.is_fmr)
- iser_unreg_mem(&regd->reg);
- }
+ if (iser_task->dir[ISER_DIR_OUT])
+ device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 7827baf455a..1ce0c97d2cc 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -170,8 +170,8 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
*/
static int iser_sg_to_page_vec(struct iser_data_buf *data,
- struct iser_page_vec *page_vec,
- struct ib_device *ibdev)
+ struct ib_device *ibdev, u64 *pages,
+ int *offset, int *data_size)
{
struct scatterlist *sg, *sgl = (struct scatterlist *)data->buf;
u64 start_addr, end_addr, page, chunk_start = 0;
@@ -180,7 +180,7 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
int i, new_chunk, cur_page, last_ent = data->dma_nents - 1;
/* compute the offset of first element */
- page_vec->offset = (u64) sgl[0].offset & ~MASK_4K;
+ *offset = (u64) sgl[0].offset & ~MASK_4K;
new_chunk = 1;
cur_page = 0;
@@ -204,13 +204,14 @@ static int iser_sg_to_page_vec(struct iser_data_buf *data,
which might be unaligned */
page = chunk_start & MASK_4K;
do {
- page_vec->pages[cur_page++] = page;
+ pages[cur_page++] = page;
page += SIZE_4K;
} while (page < end_addr);
}
- page_vec->data_size = total_sz;
- iser_dbg("page_vec->data_size:%d cur_page %d\n", page_vec->data_size,cur_page);
+ *data_size = total_sz;
+ iser_dbg("page_vec->data_size:%d cur_page %d\n",
+ *data_size, cur_page);
return cur_page;
}
@@ -267,11 +268,8 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
struct scatterlist *sg;
int i;
- if (iser_debug_level == 0)
- return;
-
for_each_sg(sgl, sg, data->dma_nents, i)
- iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
+ iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
"off:0x%x sz:0x%x dma_len:0x%x\n",
i, (unsigned long)ib_sg_dma_address(ibdev, sg),
sg_page(sg), sg->offset,
@@ -298,8 +296,10 @@ static void iser_page_vec_build(struct iser_data_buf *data,
page_vec->offset = 0;
iser_dbg("Translating sg sz: %d\n", data->dma_nents);
- page_vec_len = iser_sg_to_page_vec(data, page_vec, ibdev);
- iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents,page_vec_len);
+ page_vec_len = iser_sg_to_page_vec(data, ibdev, page_vec->pages,
+ &page_vec->offset,
+ &page_vec->data_size);
+ iser_dbg("sg len %d page_vec_len %d\n", data->dma_nents, page_vec_len);
page_vec->length = page_vec_len;
@@ -347,16 +347,41 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task)
}
}
+static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
+ struct ib_device *ibdev,
+ enum iser_data_dir cmd_dir,
+ int aligned_len)
+{
+ struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
+ struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+
+ iscsi_conn->fmr_unalign_cnt++;
+ iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
+ aligned_len, mem->size);
+
+ if (iser_debug_level > 0)
+ iser_data_buf_dump(mem, ibdev);
+
+ /* unmap the command data before accessing it */
+ iser_dma_unmap_task_data(iser_task);
+
+ /* allocate copy buf, if we are writing, copy the */
+ /* unaligned scatterlist, dma map the copy */
+ if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
/**
- * iser_reg_rdma_mem - Registers memory intended for RDMA,
- * obtaining rkey and va
+ * iser_reg_rdma_mem_fmr - Registers memory intended for RDMA,
+ * using FMR (if possible) obtaining rkey and va
*
* returns 0 on success, errno code on failure
*/
-int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
- enum iser_data_dir cmd_dir)
+int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
- struct iscsi_conn *iscsi_conn = iser_task->iser_conn->iscsi_conn;
struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device;
@@ -370,20 +395,13 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf = &iser_task->rdma_regd[cmd_dir];
aligned_len = iser_data_buf_aligned_len(mem, ibdev);
- if (aligned_len != mem->dma_nents ||
- (!ib_conn->fmr_pool && mem->dma_nents > 1)) {
- iscsi_conn->fmr_unalign_cnt++;
- iser_warn("rdma alignment violation (%d/%d aligned) or FMR not supported\n",
- aligned_len, mem->size);
- iser_data_buf_dump(mem, ibdev);
-
- /* unmap the command data before accessing it */
- iser_dma_unmap_task_data(iser_task);
-
- /* allocate copy buf, if we are writing, copy the */
- /* unaligned scatterlist, dma map the copy */
- if (iser_start_rdma_unaligned_sg(iser_task, cmd_dir) != 0)
- return -ENOMEM;
+ if (aligned_len != mem->dma_nents) {
+ err = fall_to_bounce_buf(iser_task, ibdev,
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
mem = &iser_task->data_copy[cmd_dir];
}
@@ -395,7 +413,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
regd_buf->reg.rkey = device->mr->rkey;
regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
- regd_buf->reg.is_fmr = 0;
+ regd_buf->reg.is_mr = 0;
iser_dbg("PHYSICAL Mem.register: lkey: 0x%08X rkey: 0x%08X "
"va: 0x%08lX sz: %ld]\n",
@@ -404,22 +422,159 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */
- iser_page_vec_build(mem, ib_conn->page_vec, ibdev);
- err = iser_reg_page_vec(ib_conn, ib_conn->page_vec, &regd_buf->reg);
+ iser_page_vec_build(mem, ib_conn->fastreg.fmr.page_vec, ibdev);
+ err = iser_reg_page_vec(ib_conn, ib_conn->fastreg.fmr.page_vec,
+ &regd_buf->reg);
if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev);
iser_err("mem->dma_nents = %d (dlength = 0x%x)\n",
mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
- ib_conn->page_vec->data_size, ib_conn->page_vec->length,
- ib_conn->page_vec->offset);
- for (i=0 ; i<ib_conn->page_vec->length ; i++)
+ ib_conn->fastreg.fmr.page_vec->data_size,
+ ib_conn->fastreg.fmr.page_vec->length,
+ ib_conn->fastreg.fmr.page_vec->offset);
+ for (i = 0; i < ib_conn->fastreg.fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i,
- (unsigned long long) ib_conn->page_vec->pages[i]);
+ (unsigned long long) ib_conn->fastreg.fmr.page_vec->pages[i]);
}
if (err)
return err;
}
return 0;
}
+
+static int iser_fast_reg_mr(struct fast_reg_descriptor *desc,
+ struct iser_conn *ib_conn,
+ struct iser_regd_buf *regd_buf,
+ u32 offset, unsigned int data_size,
+ unsigned int page_list_len)
+{
+ struct ib_send_wr fastreg_wr, inv_wr;
+ struct ib_send_wr *bad_wr, *wr = NULL;
+ u8 key;
+ int ret;
+
+ if (!desc->valid) {
+ memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.opcode = IB_WR_LOCAL_INV;
+ inv_wr.send_flags = IB_SEND_SIGNALED;
+ inv_wr.ex.invalidate_rkey = desc->data_mr->rkey;
+ wr = &inv_wr;
+ /* Bump the key */
+ key = (u8)(desc->data_mr->rkey & 0x000000FF);
+ ib_update_fast_reg_key(desc->data_mr, ++key);
+ }
+
+ /* Prepare FASTREG WR */
+ memset(&fastreg_wr, 0, sizeof(fastreg_wr));
+ fastreg_wr.opcode = IB_WR_FAST_REG_MR;
+ fastreg_wr.send_flags = IB_SEND_SIGNALED;
+ fastreg_wr.wr.fast_reg.iova_start = desc->data_frpl->page_list[0] + offset;
+ fastreg_wr.wr.fast_reg.page_list = desc->data_frpl;
+ fastreg_wr.wr.fast_reg.page_list_len = page_list_len;
+ fastreg_wr.wr.fast_reg.page_shift = SHIFT_4K;
+ fastreg_wr.wr.fast_reg.length = data_size;
+ fastreg_wr.wr.fast_reg.rkey = desc->data_mr->rkey;
+ fastreg_wr.wr.fast_reg.access_flags = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ);
+
+ if (!wr) {
+ wr = &fastreg_wr;
+ atomic_inc(&ib_conn->post_send_buf_count);
+ } else {
+ wr->next = &fastreg_wr;
+ atomic_add(2, &ib_conn->post_send_buf_count);
+ }
+
+ ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
+ if (ret) {
+ if (bad_wr->next)
+ atomic_sub(2, &ib_conn->post_send_buf_count);
+ else
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_err("fast registration failed, ret:%d\n", ret);
+ return ret;
+ }
+ desc->valid = false;
+
+ regd_buf->reg.mem_h = desc;
+ regd_buf->reg.lkey = desc->data_mr->lkey;
+ regd_buf->reg.rkey = desc->data_mr->rkey;
+ regd_buf->reg.va = desc->data_frpl->page_list[0] + offset;
+ regd_buf->reg.len = data_size;
+ regd_buf->reg.is_mr = 1;
+
+ return ret;
+}
+
+/**
+ * iser_reg_rdma_mem_frwr - Registers memory intended for RDMA,
+ * using Fast Registration WR (if possible) obtaining rkey and va
+ *
+ * returns 0 on success, errno code on failure
+ */
+int iser_reg_rdma_mem_frwr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct iser_device *device = ib_conn->device;
+ struct ib_device *ibdev = device->ib_device;
+ struct iser_data_buf *mem = &iser_task->data[cmd_dir];
+ struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
+ struct fast_reg_descriptor *desc;
+ unsigned int data_size, page_list_len;
+ int err, aligned_len;
+ unsigned long flags;
+ u32 offset;
+
+ aligned_len = iser_data_buf_aligned_len(mem, ibdev);
+ if (aligned_len != mem->dma_nents) {
+ err = fall_to_bounce_buf(iser_task, ibdev,
+ cmd_dir, aligned_len);
+ if (err) {
+ iser_err("failed to allocate bounce buffer\n");
+ return err;
+ }
+ mem = &iser_task->data_copy[cmd_dir];
+ }
+
+ /* if there a single dma entry, dma mr suffices */
+ if (mem->dma_nents == 1) {
+ struct scatterlist *sg = (struct scatterlist *)mem->buf;
+
+ regd_buf->reg.lkey = device->mr->lkey;
+ regd_buf->reg.rkey = device->mr->rkey;
+ regd_buf->reg.len = ib_sg_dma_len(ibdev, &sg[0]);
+ regd_buf->reg.va = ib_sg_dma_address(ibdev, &sg[0]);
+ regd_buf->reg.is_mr = 0;
+ } else {
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ desc = list_first_entry(&ib_conn->fastreg.frwr.pool,
+ struct fast_reg_descriptor, list);
+ list_del(&desc->list);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+ page_list_len = iser_sg_to_page_vec(mem, device->ib_device,
+ desc->data_frpl->page_list,
+ &offset, &data_size);
+
+ if (page_list_len * SIZE_4K < data_size) {
+ iser_err("fast reg page_list too short to hold this SG\n");
+ err = -EINVAL;
+ goto err_reg;
+ }
+
+ err = iser_fast_reg_mr(desc, ib_conn, regd_buf,
+ offset, data_size, page_list_len);
+ if (err)
+ goto err_reg;
+ }
+
+ return 0;
+err_reg:
+ spin_lock_irqsave(&ib_conn->lock, flags);
+ list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+ spin_unlock_irqrestore(&ib_conn->lock, flags);
+ return err;
+}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 2c4941d0656..afe95674008 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -73,6 +73,36 @@ static int iser_create_device_ib_res(struct iser_device *device)
{
int i, j;
struct iser_cq_desc *cq_desc;
+ struct ib_device_attr *dev_attr;
+
+ dev_attr = kmalloc(sizeof(*dev_attr), GFP_KERNEL);
+ if (!dev_attr)
+ return -ENOMEM;
+
+ if (ib_query_device(device->ib_device, dev_attr)) {
+ pr_warn("Query device failed for %s\n", device->ib_device->name);
+ goto dev_attr_err;
+ }
+
+ /* Assign function handles - based on FMR support */
+ if (device->ib_device->alloc_fmr && device->ib_device->dealloc_fmr &&
+ device->ib_device->map_phys_fmr && device->ib_device->unmap_fmr) {
+ iser_info("FMR supported, using FMR for registration\n");
+ device->iser_alloc_rdma_reg_res = iser_create_fmr_pool;
+ device->iser_free_rdma_reg_res = iser_free_fmr_pool;
+ device->iser_reg_rdma_mem = iser_reg_rdma_mem_fmr;
+ device->iser_unreg_rdma_mem = iser_unreg_mem_fmr;
+ } else
+ if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ iser_info("FRWR supported, using FRWR for registration\n");
+ device->iser_alloc_rdma_reg_res = iser_create_frwr_pool;
+ device->iser_free_rdma_reg_res = iser_free_frwr_pool;
+ device->iser_reg_rdma_mem = iser_reg_rdma_mem_frwr;
+ device->iser_unreg_rdma_mem = iser_unreg_mem_frwr;
+ } else {
+ iser_err("IB device does not support FMRs nor FRWRs, can't register memory\n");
+ goto dev_attr_err;
+ }
device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors);
iser_info("using %d CQs, device %s supports %d vectors\n",
@@ -128,6 +158,7 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (ib_register_event_handler(&device->event_handler))
goto handler_err;
+ kfree(dev_attr);
return 0;
handler_err:
@@ -147,6 +178,8 @@ pd_err:
kfree(device->cq_desc);
cq_desc_err:
iser_err("failed to allocate an IB resource\n");
+dev_attr_err:
+ kfree(dev_attr);
return -1;
}
@@ -178,56 +211,23 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
/**
- * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
+ * iser_create_fmr_pool - Creates FMR pool and page_vector
*
- * returns 0 on success, -1 on failure
+ * returns 0 on success, or errno code on failure
*/
-static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+int iser_create_fmr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
{
- struct iser_device *device;
- struct ib_qp_init_attr init_attr;
- int req_err, resp_err, ret = -ENOMEM;
+ struct iser_device *device = ib_conn->device;
struct ib_fmr_pool_param params;
- int index, min_index = 0;
-
- BUG_ON(ib_conn->device == NULL);
-
- device = ib_conn->device;
-
- ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
- ISER_RX_LOGIN_SIZE, GFP_KERNEL);
- if (!ib_conn->login_buf)
- goto out_err;
-
- ib_conn->login_req_buf = ib_conn->login_buf;
- ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
-
- ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
- (void *)ib_conn->login_req_buf,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
-
- ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
- (void *)ib_conn->login_resp_buf,
- ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
-
- req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
- resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
-
- if (req_err || resp_err) {
- if (req_err)
- ib_conn->login_req_dma = 0;
- if (resp_err)
- ib_conn->login_resp_dma = 0;
- goto out_err;
- }
+ int ret = -ENOMEM;
- ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
- (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
- GFP_KERNEL);
- if (!ib_conn->page_vec)
- goto out_err;
+ ib_conn->fastreg.fmr.page_vec = kmalloc(sizeof(struct iser_page_vec) +
+ (sizeof(u64)*(ISCSI_ISER_SG_TABLESIZE + 1)),
+ GFP_KERNEL);
+ if (!ib_conn->fastreg.fmr.page_vec)
+ return ret;
- ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
+ ib_conn->fastreg.fmr.page_vec->pages = (u64 *)(ib_conn->fastreg.fmr.page_vec + 1);
params.page_shift = SHIFT_4K;
/* when the first/last SG element are not start/end *
@@ -235,24 +235,143 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
/* make the pool size twice the max number of SCSI commands *
* the ML is expected to queue, watermark for unmap at 50% */
- params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
- params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
+ params.pool_size = cmds_max * 2;
+ params.dirty_watermark = cmds_max;
params.cache = 0;
params.flush_function = NULL;
params.access = (IB_ACCESS_LOCAL_WRITE |
IB_ACCESS_REMOTE_WRITE |
IB_ACCESS_REMOTE_READ);
- ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, &params);
- ret = PTR_ERR(ib_conn->fmr_pool);
- if (IS_ERR(ib_conn->fmr_pool) && ret != -ENOSYS) {
- ib_conn->fmr_pool = NULL;
- goto out_err;
- } else if (ret == -ENOSYS) {
- ib_conn->fmr_pool = NULL;
+ ib_conn->fastreg.fmr.pool = ib_create_fmr_pool(device->pd, &params);
+ if (!IS_ERR(ib_conn->fastreg.fmr.pool))
+ return 0;
+
+ /* no FMR => no need for page_vec */
+ kfree(ib_conn->fastreg.fmr.page_vec);
+ ib_conn->fastreg.fmr.page_vec = NULL;
+
+ ret = PTR_ERR(ib_conn->fastreg.fmr.pool);
+ ib_conn->fastreg.fmr.pool = NULL;
+ if (ret != -ENOSYS) {
+ iser_err("FMR allocation failed, err %d\n", ret);
+ return ret;
+ } else {
iser_warn("FMRs are not supported, using unaligned mode\n");
- ret = 0;
+ return 0;
}
+}
+
+/**
+ * iser_free_fmr_pool - releases the FMR pool and page vec
+ */
+void iser_free_fmr_pool(struct iser_conn *ib_conn)
+{
+ iser_info("freeing conn %p fmr pool %p\n",
+ ib_conn, ib_conn->fastreg.fmr.pool);
+
+ if (ib_conn->fastreg.fmr.pool != NULL)
+ ib_destroy_fmr_pool(ib_conn->fastreg.fmr.pool);
+
+ ib_conn->fastreg.fmr.pool = NULL;
+
+ kfree(ib_conn->fastreg.fmr.page_vec);
+ ib_conn->fastreg.fmr.page_vec = NULL;
+}
+
+/**
+ * iser_create_frwr_pool - Creates pool of fast_reg descriptors
+ * for fast registration work requests.
+ * returns 0 on success, or errno code on failure
+ */
+int iser_create_frwr_pool(struct iser_conn *ib_conn, unsigned cmds_max)
+{
+ struct iser_device *device = ib_conn->device;
+ struct fast_reg_descriptor *desc;
+ int i, ret;
+
+ INIT_LIST_HEAD(&ib_conn->fastreg.frwr.pool);
+ ib_conn->fastreg.frwr.pool_size = 0;
+ for (i = 0; i < cmds_max; i++) {
+ desc = kmalloc(sizeof(*desc), GFP_KERNEL);
+ if (!desc) {
+ iser_err("Failed to allocate a new fast_reg descriptor\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ desc->data_frpl = ib_alloc_fast_reg_page_list(device->ib_device,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_frpl)) {
+ ret = PTR_ERR(desc->data_frpl);
+ iser_err("Failed to allocate ib_fast_reg_page_list err=%d\n", ret);
+ goto fast_reg_page_failure;
+ }
+
+ desc->data_mr = ib_alloc_fast_reg_mr(device->pd,
+ ISCSI_ISER_SG_TABLESIZE + 1);
+ if (IS_ERR(desc->data_mr)) {
+ ret = PTR_ERR(desc->data_mr);
+ iser_err("Failed to allocate ib_fast_reg_mr err=%d\n", ret);
+ goto fast_reg_mr_failure;
+ }
+ desc->valid = true;
+ list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+ ib_conn->fastreg.frwr.pool_size++;
+ }
+
+ return 0;
+
+fast_reg_mr_failure:
+ ib_free_fast_reg_page_list(desc->data_frpl);
+fast_reg_page_failure:
+ kfree(desc);
+err:
+ iser_free_frwr_pool(ib_conn);
+ return ret;
+}
+
+/**
+ * iser_free_frwr_pool - releases the pool of fast_reg descriptors
+ */
+void iser_free_frwr_pool(struct iser_conn *ib_conn)
+{
+ struct fast_reg_descriptor *desc, *tmp;
+ int i = 0;
+
+ if (list_empty(&ib_conn->fastreg.frwr.pool))
+ return;
+
+ iser_info("freeing conn %p frwr pool\n", ib_conn);
+
+ list_for_each_entry_safe(desc, tmp, &ib_conn->fastreg.frwr.pool, list) {
+ list_del(&desc->list);
+ ib_free_fast_reg_page_list(desc->data_frpl);
+ ib_dereg_mr(desc->data_mr);
+ kfree(desc);
+ ++i;
+ }
+
+ if (i < ib_conn->fastreg.frwr.pool_size)
+ iser_warn("pool still has %d regions registered\n",
+ ib_conn->fastreg.frwr.pool_size - i);
+}
+
+/**
+ * iser_create_ib_conn_res - Queue-Pair (QP)
+ *
+ * returns 0 on success, -1 on failure
+ */
+static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
+{
+ struct iser_device *device;
+ struct ib_qp_init_attr init_attr;
+ int ret = -ENOMEM;
+ int index, min_index = 0;
+
+ BUG_ON(ib_conn->device == NULL);
+
+ device = ib_conn->device;
memset(&init_attr, 0, sizeof init_attr);
@@ -282,9 +401,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
goto out_err;
ib_conn->qp = ib_conn->cma_id->qp;
- iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
+ iser_info("setting conn %p cma_id %p qp %p\n",
ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->cma_id->qp);
+ ib_conn->cma_id->qp);
return ret;
out_err:
@@ -293,7 +412,7 @@ out_err:
}
/**
- * releases the FMR pool and QP objects, returns 0 on success,
+ * releases the QP objects, returns 0 on success,
* -1 on failure
*/
static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
@@ -301,13 +420,11 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
int cq_index;
BUG_ON(ib_conn == NULL);
- iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n",
+ iser_info("freeing conn %p cma_id %p qp %p\n",
ib_conn, ib_conn->cma_id,
- ib_conn->fmr_pool, ib_conn->qp);
+ ib_conn->qp);
/* qp is created only once both addr & route are resolved */
- if (ib_conn->fmr_pool != NULL)
- ib_destroy_fmr_pool(ib_conn->fmr_pool);
if (ib_conn->qp != NULL) {
cq_index = ((struct iser_cq_desc *)ib_conn->qp->recv_cq->cq_context)->cq_index;
@@ -316,21 +433,7 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
rdma_destroy_qp(ib_conn->cma_id);
}
- ib_conn->fmr_pool = NULL;
ib_conn->qp = NULL;
- kfree(ib_conn->page_vec);
-
- if (ib_conn->login_buf) {
- if (ib_conn->login_req_dma)
- ib_dma_unmap_single(ib_conn->device->ib_device,
- ib_conn->login_req_dma,
- ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
- if (ib_conn->login_resp_dma)
- ib_dma_unmap_single(ib_conn->device->ib_device,
- ib_conn->login_resp_dma,
- ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
- kfree(ib_conn->login_buf);
- }
return 0;
}
@@ -694,7 +797,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
page_list = page_vec->pages;
io_addr = page_list[0];
- mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
+ mem = ib_fmr_pool_map_phys(ib_conn->fastreg.fmr.pool,
page_list,
page_vec->length,
io_addr);
@@ -709,7 +812,7 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
mem_reg->rkey = mem->fmr->rkey;
mem_reg->len = page_vec->length * SIZE_4K;
mem_reg->va = io_addr;
- mem_reg->is_fmr = 1;
+ mem_reg->is_mr = 1;
mem_reg->mem_h = (void *)mem;
mem_reg->va += page_vec->offset;
@@ -727,12 +830,18 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
}
/**
- * Unregister (previosuly registered) memory.
+ * Unregister (previosuly registered using FMR) memory.
+ * If memory is non-FMR does nothing.
*/
-void iser_unreg_mem(struct iser_mem_reg *reg)
+void iser_unreg_mem_fmr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
int ret;
+ if (!reg->is_mr)
+ return;
+
iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
@@ -742,6 +851,23 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
reg->mem_h = NULL;
}
+void iser_unreg_mem_frwr(struct iscsi_iser_task *iser_task,
+ enum iser_data_dir cmd_dir)
+{
+ struct iser_mem_reg *reg = &iser_task->rdma_regd[cmd_dir].reg;
+ struct iser_conn *ib_conn = iser_task->iser_conn->ib_conn;
+ struct fast_reg_descriptor *desc = reg->mem_h;
+
+ if (!reg->is_mr)
+ return;
+
+ reg->mem_h = NULL;
+ reg->is_mr = 0;
+ spin_lock_bh(&ib_conn->lock);
+ list_add_tail(&desc->list, &ib_conn->fastreg.frwr.pool);
+ spin_unlock_bh(&ib_conn->lock);
+}
+
int iser_post_recvl(struct iser_conn *ib_conn)
{
struct ib_recv_wr rx_wr, *rx_wr_failed;
@@ -779,7 +905,7 @@ int iser_post_recvm(struct iser_conn *ib_conn, int count)
rx_wr->sg_list = &rx_desc->rx_sg;
rx_wr->num_sge = 1;
rx_wr->next = rx_wr + 1;
- my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
+ my_rx_head = (my_rx_head + 1) & ib_conn->qp_max_recv_dtos_mask;
}
rx_wr--;
@@ -863,7 +989,11 @@ static int iser_drain_tx_cq(struct iser_device *device, int cq_index)
if (wc.status == IB_WC_SUCCESS) {
if (wc.opcode == IB_WC_SEND)
iser_snd_completion(tx_desc, ib_conn);
- else
+ else if (wc.opcode == IB_WC_LOCAL_INV ||
+ wc.opcode == IB_WC_FAST_REG_MR) {
+ atomic_dec(&ib_conn->post_send_buf_count);
+ continue;
+ } else
iser_err("expected opcode %d got %d\n",
IB_WC_SEND, wc.opcode);
} else {
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index da739d9d190..922a7fea2ce 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -639,16 +639,18 @@ EXPORT_SYMBOL(gameport_unregister_port);
* Gameport driver operations
*/
-static ssize_t gameport_driver_show_description(struct device_driver *drv, char *buf)
+static ssize_t description_show(struct device_driver *drv, char *buf)
{
struct gameport_driver *driver = to_gameport_driver(drv);
return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
}
+static DRIVER_ATTR_RO(description);
-static struct driver_attribute gameport_driver_attrs[] = {
- __ATTR(description, S_IRUGO, gameport_driver_show_description, NULL),
- __ATTR_NULL
+static struct attribute *gameport_driver_attrs[] = {
+ &driver_attr_description.attr,
+ NULL
};
+ATTRIBUTE_GROUPS(gameport_driver);
static int gameport_driver_probe(struct device *dev)
{
@@ -749,7 +751,7 @@ static int gameport_bus_match(struct device *dev, struct device_driver *drv)
static struct bus_type gameport_bus = {
.name = "gameport",
.dev_attrs = gameport_device_attrs,
- .drv_attrs = gameport_driver_attrs,
+ .drv_groups = gameport_driver_groups,
.match = gameport_bus_match,
.probe = gameport_driver_probe,
.remove = gameport_driver_remove,
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index fa061d46527..75e3b102ce4 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -167,6 +167,7 @@ static const struct xpad_device {
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1689, 0xfd00, "Razer Onza Tournament Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
+ { 0x1689, 0xfd01, "Razer Onza Classic Edition", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0x0002, "Harmonix Rock Band Guitar", 0, XTYPE_XBOX360 },
{ 0x1bad, 0x0003, "Harmonix Rock Band Drumkit", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1bad, 0xf016, "Mad Catz Xbox 360 Controller", 0, XTYPE_XBOX360 },
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 4ef4d5e198a..a73f9618b0a 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -89,9 +89,9 @@
#define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
#define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
/* MacbookAir6,2 (unibody, June 2013) */
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
-#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
+#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
#define BCM5974_DEVICE(prod) { \
.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 57b2637e153..8551dcaf24d 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -672,6 +672,7 @@ static int elantech_packet_check_v2(struct psmouse *psmouse)
*/
static int elantech_packet_check_v3(struct psmouse *psmouse)
{
+ struct elantech_data *etd = psmouse->private;
const u8 debounce_packet[] = { 0xc4, 0xff, 0xff, 0x02, 0xff, 0xff };
unsigned char *packet = psmouse->packet;
@@ -682,19 +683,48 @@ static int elantech_packet_check_v3(struct psmouse *psmouse)
if (!memcmp(packet, debounce_packet, sizeof(debounce_packet)))
return PACKET_DEBOUNCE;
- if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
- return PACKET_V3_HEAD;
+ /*
+ * If the hardware flag 'crc_enabled' is set the packets have
+ * different signatures.
+ */
+ if (etd->crc_enabled) {
+ if ((packet[3] & 0x09) == 0x08)
+ return PACKET_V3_HEAD;
+
+ if ((packet[3] & 0x09) == 0x09)
+ return PACKET_V3_TAIL;
+ } else {
+ if ((packet[0] & 0x0c) == 0x04 && (packet[3] & 0xcf) == 0x02)
+ return PACKET_V3_HEAD;
- if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
- return PACKET_V3_TAIL;
+ if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c)
+ return PACKET_V3_TAIL;
+ }
return PACKET_UNKNOWN;
}
static int elantech_packet_check_v4(struct psmouse *psmouse)
{
+ struct elantech_data *etd = psmouse->private;
unsigned char *packet = psmouse->packet;
unsigned char packet_type = packet[3] & 0x03;
+ bool sanity_check;
+
+ /*
+ * Sanity check based on the constant bits of a packet.
+ * The constant bits change depending on the value of
+ * the hardware flag 'crc_enabled' but are the same for
+ * every packet, regardless of the type.
+ */
+ if (etd->crc_enabled)
+ sanity_check = ((packet[3] & 0x08) == 0x00);
+ else
+ sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+ (packet[3] & 0x1c) == 0x10);
+
+ if (!sanity_check)
+ return PACKET_UNKNOWN;
switch (packet_type) {
case 0:
@@ -1313,6 +1343,12 @@ static int elantech_set_properties(struct elantech_data *etd)
etd->reports_pressure = true;
}
+ /*
+ * The signatures of v3 and v4 packets change depending on the
+ * value of this hardware flag.
+ */
+ etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000);
+
return 0;
}
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 46db3be45ac..036a04abaef 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
bool paritycheck;
bool jumpy_cursor;
bool reports_pressure;
+ bool crc_enabled;
unsigned char hw_version;
unsigned int fw_version;
unsigned int single_finger_reports;
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index 94c17c28d26..1e691a3a79c 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -22,7 +22,8 @@ config SERIO_I8042
tristate "i8042 PC Keyboard controller" if EXPERT || !X86
default y
depends on !PARISC && (!ARM || ARCH_SHARK || FOOTBRIDGE_HOST) && \
- (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390
+ (!SUPERH || SH_CAYMAN) && !M68K && !BLACKFIN && !S390 && \
+ !ARC
help
i8042 is the chip over which the standard AT keyboard and PS/2
mouse are connected to the computer. If you use these devices,
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 25fc5971f42..2b56855c2c7 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -732,19 +732,20 @@ EXPORT_SYMBOL(serio_unregister_child_port);
* Serio driver operations
*/
-static ssize_t serio_driver_show_description(struct device_driver *drv, char *buf)
+static ssize_t description_show(struct device_driver *drv, char *buf)
{
struct serio_driver *driver = to_serio_driver(drv);
return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
}
+static DRIVER_ATTR_RO(description);
-static ssize_t serio_driver_show_bind_mode(struct device_driver *drv, char *buf)
+static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
{
struct serio_driver *serio_drv = to_serio_driver(drv);
return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
}
-static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char *buf, size_t count)
+static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
{
struct serio_driver *serio_drv = to_serio_driver(drv);
int retval;
@@ -760,14 +761,14 @@ static ssize_t serio_driver_set_bind_mode(struct device_driver *drv, const char
return retval;
}
+static DRIVER_ATTR_RW(bind_mode);
-
-static struct driver_attribute serio_driver_attrs[] = {
- __ATTR(description, S_IRUGO, serio_driver_show_description, NULL),
- __ATTR(bind_mode, S_IWUSR | S_IRUGO,
- serio_driver_show_bind_mode, serio_driver_set_bind_mode),
- __ATTR_NULL
+static struct attribute *serio_driver_attrs[] = {
+ &driver_attr_description.attr,
+ &driver_attr_bind_mode.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(serio_driver);
static int serio_driver_probe(struct device *dev)
{
@@ -996,7 +997,7 @@ EXPORT_SYMBOL(serio_interrupt);
static struct bus_type serio_bus = {
.name = "serio",
.dev_attrs = serio_device_attrs,
- .drv_attrs = serio_driver_attrs,
+ .drv_groups = serio_driver_groups,
.match = serio_bus_match,
.uevent = serio_uevent,
.probe = serio_driver_probe,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 384fbcd0cee..f3e91f0b57a 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -2112,7 +2112,7 @@ static const struct wacom_features wacom_features_0xDA =
{ "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 2 };
-static struct wacom_features wacom_features_0xDB =
+static const struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 2 };
@@ -2127,6 +2127,12 @@ static const struct wacom_features wacom_features_0xDF =
{ "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
.touch_max = 16 };
+static const struct wacom_features wacom_features_0x300 =
+ { "Wacom Bamboo One S", WACOM_PKGLEN_BBPEN, 14720, 9225, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0x301 =
+ { "Wacom Bamboo One M", WACOM_PKGLEN_BBPEN, 21648, 13530, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2253,6 +2259,8 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x100) },
{ USB_DEVICE_WACOM(0x101) },
{ USB_DEVICE_WACOM(0x10D) },
+ { USB_DEVICE_WACOM(0x300) },
+ { USB_DEVICE_WACOM(0x301) },
{ USB_DEVICE_WACOM(0x304) },
{ USB_DEVICE_WACOM(0x4001) },
{ USB_DEVICE_WACOM(0x47) },
diff --git a/drivers/irqchip/irq-sirfsoc.c b/drivers/irqchip/irq-sirfsoc.c
index 69ea44ebcf6..4851afae38d 100644
--- a/drivers/irqchip/irq-sirfsoc.c
+++ b/drivers/irqchip/irq-sirfsoc.c
@@ -23,7 +23,7 @@
#define SIRFSOC_INT_RISC_LEVEL1 0x0024
#define SIRFSOC_INIT_IRQ_ID 0x0038
-#define SIRFSOC_NUM_IRQS 128
+#define SIRFSOC_NUM_IRQS 64
static struct irq_domain *sirfsoc_irqdomain;
@@ -32,15 +32,18 @@ sirfsoc_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ int ret;
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
- gc = irq_alloc_generic_chip("SIRFINTC", 1, irq_start, base, handle_level_irq);
- ct = gc->chip_types;
+ ret = irq_alloc_domain_generic_chips(sirfsoc_irqdomain, num, 1, "irq_sirfsoc",
+ handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ gc = irq_get_domain_generic_chip(sirfsoc_irqdomain, irq_start);
+ gc->reg_base = base;
+ ct = gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->regs.mask = SIRFSOC_INT_RISC_MASK0;
-
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 0);
}
static asmlinkage void __exception_irq_entry sirfsoc_handle_irq(struct pt_regs *regs)
@@ -60,9 +63,8 @@ static int __init sirfsoc_irq_init(struct device_node *np, struct device_node *p
if (!base)
panic("unable to map intc cpu registers\n");
- /* using legacy because irqchip_generic does not work with linear */
- sirfsoc_irqdomain = irq_domain_add_legacy(np, SIRFSOC_NUM_IRQS, 0, 0,
- &irq_domain_simple_ops, base);
+ sirfsoc_irqdomain = irq_domain_add_linear(np, SIRFSOC_NUM_IRQS,
+ &irq_generic_chip_ops, base);
sirfsoc_alloc_gc(base, 0, 32);
sirfsoc_alloc_gc(base + 4, 32, SIRFSOC_NUM_IRQS - 32);
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index a7e4939787c..7f910c76ca0 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1307,11 +1307,11 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
}
if (fifo2 & 2) {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B2;
- hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
} else {
hc->hw.fifo_en &= ~HFCPCI_FIFOEN_B1;
- hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 &= ~(HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
}
#ifdef REVERSE_BITORDER
@@ -1346,14 +1346,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
if (!tics)
- hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt |= 2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
if (!tics)
- hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt |= 1;
hc->hw.conn &= ~0x03;
@@ -1375,14 +1375,14 @@ mode_hfcpci(struct bchannel *bch, int bc, int protocol)
if (fifo2 & 2) {
hc->hw.last_bfifo_cnt[1] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B2;
- hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B2TRANS |
HFCPCI_INTS_B2REC);
hc->hw.ctmt &= ~2;
hc->hw.conn &= ~0x18;
} else {
hc->hw.last_bfifo_cnt[0] = 0;
hc->hw.fifo_en |= HFCPCI_FIFOEN_B1;
- hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS +
+ hc->hw.int_m1 |= (HFCPCI_INTS_B1TRANS |
HFCPCI_INTS_B1REC);
hc->hw.ctmt &= ~1;
hc->hw.conn &= ~0x03;
diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c
index da30c5cb960..faf505462a4 100644
--- a/drivers/isdn/mISDN/core.c
+++ b/drivers/isdn/mISDN/core.c
@@ -37,8 +37,8 @@ static void mISDN_dev_release(struct device *dev)
/* nothing to do: the device is part of its parent's data structure */
}
-static ssize_t _show_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -46,9 +46,10 @@ static ssize_t _show_id(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n", mdev->id);
}
+static DEVICE_ATTR_RO(id);
-static ssize_t _show_nrbchan(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t nrbchan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -56,9 +57,10 @@ static ssize_t _show_nrbchan(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n", mdev->nrbchan);
}
+static DEVICE_ATTR_RO(nrbchan);
-static ssize_t _show_d_protocols(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t d_protocols_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -66,9 +68,10 @@ static ssize_t _show_d_protocols(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n", mdev->Dprotocols);
}
+static DEVICE_ATTR_RO(d_protocols);
-static ssize_t _show_b_protocols(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t b_protocols_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -76,9 +79,10 @@ static ssize_t _show_b_protocols(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n", mdev->Bprotocols | get_all_Bprotocols());
}
+static DEVICE_ATTR_RO(b_protocols);
-static ssize_t _show_protocol(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t protocol_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
@@ -86,17 +90,19 @@ static ssize_t _show_protocol(struct device *dev,
return -ENODEV;
return sprintf(buf, "%d\n", mdev->D.protocol);
}
+static DEVICE_ATTR_RO(protocol);
-static ssize_t _show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
strcpy(buf, dev_name(dev));
return strlen(buf);
}
+static DEVICE_ATTR_RO(name);
#if 0 /* hangs */
-static ssize_t _set_name(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t name_set(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int err = 0;
char *out = kmalloc(count + 1, GFP_KERNEL);
@@ -113,10 +119,11 @@ static ssize_t _set_name(struct device *dev, struct device_attribute *attr,
return (err < 0) ? err : count;
}
+static DEVICE_ATTR_RW(name);
#endif
-static ssize_t _show_channelmap(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t channelmap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct mISDNdevice *mdev = dev_to_mISDN(dev);
char *bp = buf;
@@ -127,18 +134,19 @@ static ssize_t _show_channelmap(struct device *dev,
return bp - buf;
}
-
-static struct device_attribute mISDN_dev_attrs[] = {
- __ATTR(id, S_IRUGO, _show_id, NULL),
- __ATTR(d_protocols, S_IRUGO, _show_d_protocols, NULL),
- __ATTR(b_protocols, S_IRUGO, _show_b_protocols, NULL),
- __ATTR(protocol, S_IRUGO, _show_protocol, NULL),
- __ATTR(channelmap, S_IRUGO, _show_channelmap, NULL),
- __ATTR(nrbchan, S_IRUGO, _show_nrbchan, NULL),
- __ATTR(name, S_IRUGO, _show_name, NULL),
-/* __ATTR(name, S_IRUGO | S_IWUSR, _show_name, _set_name), */
- {}
+static DEVICE_ATTR_RO(channelmap);
+
+static struct attribute *mISDN_attrs[] = {
+ &dev_attr_id.attr,
+ &dev_attr_d_protocols.attr,
+ &dev_attr_b_protocols.attr,
+ &dev_attr_protocol.attr,
+ &dev_attr_channelmap.attr,
+ &dev_attr_nrbchan.attr,
+ &dev_attr_name.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mISDN);
static int mISDN_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -162,7 +170,7 @@ static struct class mISDN_class = {
.name = "mISDN",
.owner = THIS_MODULE,
.dev_uevent = mISDN_uevent,
- .dev_attrs = mISDN_dev_attrs,
+ .dev_groups = mISDN_groups,
.dev_release = mISDN_dev_release,
.class_release = mISDN_class_release,
};
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 22b720ec80c..77025f5cb57 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -288,8 +288,10 @@ dsp_control_req(struct dsp *dsp, struct mISDNhead *hh, struct sk_buff *skb)
u8 *data;
int len;
- if (skb->len < sizeof(int))
+ if (skb->len < sizeof(int)) {
printk(KERN_ERR "%s: PH_CONTROL message too short\n", __func__);
+ return -EINVAL;
+ }
cont = *((int *)skb->data);
len = skb->len - sizeof(int);
data = skb->data + sizeof(int);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 4336e37a97f..f37d63cf726 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
led_cdev->brightness = led_cdev->brightness_get(led_cdev);
}
-static ssize_t led_brightness_show(struct device *dev,
+static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -40,7 +40,7 @@ static ssize_t led_brightness_show(struct device *dev,
return sprintf(buf, "%u\n", led_cdev->brightness);
}
-static ssize_t led_brightness_store(struct device *dev,
+static ssize_t brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -57,6 +57,7 @@ static ssize_t led_brightness_store(struct device *dev,
return size;
}
+static DEVICE_ATTR_RW(brightness);
static ssize_t led_max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -65,14 +66,35 @@ static ssize_t led_max_brightness_show(struct device *dev,
return sprintf(buf, "%u\n", led_cdev->max_brightness);
}
+static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL);
-static struct device_attribute led_class_attrs[] = {
- __ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
- __ATTR(max_brightness, 0444, led_max_brightness_show, NULL),
#ifdef CONFIG_LEDS_TRIGGERS
- __ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
+static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+static struct attribute *led_trigger_attrs[] = {
+ &dev_attr_trigger.attr,
+ NULL,
+};
+static const struct attribute_group led_trigger_group = {
+ .attrs = led_trigger_attrs,
+};
+#endif
+
+static struct attribute *led_class_attrs[] = {
+ &dev_attr_brightness.attr,
+ &dev_attr_max_brightness.attr,
+ NULL,
+};
+
+static const struct attribute_group led_group = {
+ .attrs = led_class_attrs,
+};
+
+static const struct attribute_group *led_groups[] = {
+ &led_group,
+#ifdef CONFIG_LEDS_TRIGGERS
+ &led_trigger_group,
#endif
- __ATTR_NULL,
+ NULL,
};
static void led_timer_function(unsigned long data)
@@ -258,7 +280,7 @@ static int __init leds_init(void)
if (IS_ERR(leds_class))
return PTR_ERR(leds_class);
leds_class->pm = &leds_class_dev_pm_ops;
- leds_class->dev_attrs = led_class_attrs;
+ leds_class->dev_groups = led_groups;
return 0;
}
diff --git a/drivers/macintosh/windfarm_rm31.c b/drivers/macintosh/windfarm_rm31.c
index 0b9a79b2f48..82fc86a90c1 100644
--- a/drivers/macintosh/windfarm_rm31.c
+++ b/drivers/macintosh/windfarm_rm31.c
@@ -439,15 +439,15 @@ static void backside_setup_pid(void)
/* Slots fan */
static const struct wf_pid_param slots_param = {
- .interval = 5,
- .history_len = 2,
- .gd = 30 << 20,
- .gp = 5 << 20,
- .gr = 0,
- .itarget = 40 << 16,
- .additive = 1,
- .min = 300,
- .max = 4000,
+ .interval = 1,
+ .history_len = 20,
+ .gd = 0,
+ .gp = 0,
+ .gr = 0x00100000,
+ .itarget = 3200000,
+ .additive = 0,
+ .min = 20,
+ .max = 100,
};
static void slots_fan_tick(void)
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index dc112a7137f..4296155090b 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -959,23 +959,21 @@ out:
return r;
}
-static void remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
+static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
{
- struct entry *e = hash_lookup(mq, oblock);
+ struct mq_policy *mq = to_mq_policy(p);
+ struct entry *e;
+
+ mutex_lock(&mq->lock);
+
+ e = hash_lookup(mq, oblock);
BUG_ON(!e || !e->in_cache);
del(mq, e);
e->in_cache = false;
push(mq, e);
-}
-static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
-{
- struct mq_policy *mq = to_mq_policy(p);
-
- mutex_lock(&mq->lock);
- remove_mapping(mq, oblock);
mutex_unlock(&mq->lock);
}
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 5adede17ddf..b759a127f9c 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1261,6 +1261,20 @@ static void activate_path(struct work_struct *work)
pg_init_done, pgpath);
}
+static int noretry_error(int error)
+{
+ switch (error) {
+ case -EOPNOTSUPP:
+ case -EREMOTEIO:
+ case -EILSEQ:
+ case -ENODATA:
+ return 1;
+ }
+
+ /* Anything else could be a path failure, so should be retried */
+ return 0;
+}
+
/*
* end_io handling
*/
@@ -1284,7 +1298,7 @@ static int do_end_io(struct multipath *m, struct request *clone,
if (!error && !clone->errors)
return 0; /* I/O complete */
- if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ)
+ if (noretry_error(error))
return error;
if (mpio->pgpath)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 957a719e8c2..df7b0a06b0e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -2290,12 +2290,18 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[1].devnum;
wbio = r10_bio->devs[1].bio;
wbio2 = r10_bio->devs[1].repl_bio;
+ /* Need to test wbio2->bi_end_io before we call
+ * generic_make_request as if the former is NULL,
+ * the latter is free to free wbio2.
+ */
+ if (wbio2 && !wbio2->bi_end_io)
+ wbio2 = NULL;
if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio);
}
- if (wbio2 && wbio2->bi_end_io) {
+ if (wbio2) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev,
bio_sectors(wbio2));
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 2bf094a587c..78ea44336e7 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3462,6 +3462,7 @@ static void handle_stripe(struct stripe_head *sh)
test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) {
set_bit(STRIPE_SYNCING, &sh->state);
clear_bit(STRIPE_INSYNC, &sh->state);
+ clear_bit(STRIPE_REPLACED, &sh->state);
}
spin_unlock(&sh->stripe_lock);
}
@@ -3607,19 +3608,23 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}
- if (s.replacing && s.locked == 0
- && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ if ((s.replacing || s.syncing) && s.locked == 0
+ && !test_bit(STRIPE_COMPUTE_RUN, &sh->state)
+ && !test_bit(STRIPE_REPLACED, &sh->state)) {
/* Write out to replacement devices where possible */
for (i = 0; i < conf->raid_disks; i++)
- if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
- test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags));
set_bit(R5_WantReplace, &sh->dev[i].flags);
set_bit(R5_LOCKED, &sh->dev[i].flags);
s.locked++;
}
- set_bit(STRIPE_INSYNC, &sh->state);
+ if (s.replacing)
+ set_bit(STRIPE_INSYNC, &sh->state);
+ set_bit(STRIPE_REPLACED, &sh->state);
}
if ((s.syncing || s.replacing) && s.locked == 0 &&
+ !test_bit(STRIPE_COMPUTE_RUN, &sh->state) &&
test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index b0b663b119a..70c49329ca9 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -306,6 +306,7 @@ enum {
STRIPE_SYNC_REQUESTED,
STRIPE_SYNCING,
STRIPE_INSYNC,
+ STRIPE_REPLACED,
STRIPE_PREREAD_ACTIVE,
STRIPE_DELAYED,
STRIPE_DEGRADED,
diff --git a/drivers/media/common/siano/Kconfig b/drivers/media/common/siano/Kconfig
index f3f5ec44e68..f953d33ee15 100644
--- a/drivers/media/common/siano/Kconfig
+++ b/drivers/media/common/siano/Kconfig
@@ -23,6 +23,8 @@ config SMS_SIANO_DEBUGFS
depends on SMS_SIANO_MDTV
depends on DEBUG_FS
depends on SMS_USB_DRV
+ depends on CONFIG_SMS_USB_DRV = CONFIG_SMS_SDIO_DRV
+
---help---
Choose Y to enable visualizing a dump of the frontend
statistics response packets via debugfs. Currently, works
diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
index 08626225223..63676a8b024 100644
--- a/drivers/media/common/siano/smsdvb-main.c
+++ b/drivers/media/common/siano/smsdvb-main.c
@@ -276,7 +276,8 @@ static void smsdvb_update_per_slices(struct smsdvb_client_t *client,
/* Legacy PER/BER */
tmp = p->ets_packets * 65535;
- do_div(tmp, p->ts_packets + p->ets_packets);
+ if (p->ts_packets + p->ets_packets)
+ do_div(tmp, p->ts_packets + p->ets_packets);
client->legacy_per = tmp;
}
diff --git a/drivers/media/dvb-core/dvb-usb-ids.h b/drivers/media/dvb-core/dvb-usb-ids.h
index 886da16e14f..419a2d6b434 100644
--- a/drivers/media/dvb-core/dvb-usb-ids.h
+++ b/drivers/media/dvb-core/dvb-usb-ids.h
@@ -369,4 +369,6 @@
#define USB_PID_TECHNISAT_USB2_DVB_S2 0x0500
#define USB_PID_CPYTO_REDI_PC50A 0xa803
#define USB_PID_CTVDIGDUAL_V2 0xe410
+#define USB_PID_PCTV_2002E 0x025c
+#define USB_PID_PCTV_2002E_SE 0x025d
#endif
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index 856374bd367..2c7217fb141 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -157,7 +157,6 @@ static struct regdata mb86a20s_init2[] = {
{ 0x45, 0x04 }, /* CN symbol 4 */
{ 0x48, 0x04 }, /* CN manual mode */
- { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
{ 0x50, 0xd6 }, { 0x51, 0x1f },
{ 0x50, 0xd2 }, { 0x51, 0x03 },
{ 0x50, 0xd7 }, { 0x51, 0xbf },
@@ -1860,16 +1859,15 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
__func__, state->if_freq, (long long)pll);
- if (!state->config->is_serial) {
+ if (!state->config->is_serial)
regD5 &= ~1;
- rc = mb86a20s_writereg(state, 0x50, 0xd5);
- if (rc < 0)
- goto err;
- rc = mb86a20s_writereg(state, 0x51, regD5);
- if (rc < 0)
- goto err;
- }
+ rc = mb86a20s_writereg(state, 0x50, 0xd5);
+ if (rc < 0)
+ goto err;
+ rc = mb86a20s_writereg(state, 0x51, regD5);
+ if (rc < 0)
+ goto err;
rc = mb86a20s_writeregdata(state, mb86a20s_init2);
if (rc < 0)
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b2cd8ca51af..d18be19c96c 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -206,6 +206,18 @@ config VIDEO_ADV7604
To compile this driver as a module, choose M here: the
module will be called adv7604.
+config VIDEO_ADV7842
+ tristate "Analog Devices ADV7842 decoder"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ Support for the Analog Devices ADV7842 video decoder.
+
+ This is a Analog Devices Component/Graphics/SD Digitizer
+ with 2:1 Multiplexed HDMI Receiver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7842.
+
config VIDEO_BT819
tristate "BT819A VideoStream decoder"
depends on VIDEO_V4L2 && I2C
@@ -417,6 +429,17 @@ config VIDEO_ADV7393
To compile this driver as a module, choose M here: the
module will be called adv7393.
+config VIDEO_ADV7511
+ tristate "Analog Devices ADV7511 encoder"
+ depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
+ ---help---
+ Support for the Analog Devices ADV7511 video encoder.
+
+ This is a Analog Devices HDMI transmitter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called adv7511.
+
config VIDEO_AD9389B
tristate "Analog Devices AD9389B encoder"
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index dc20653bb5a..9f462df77b4 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -26,7 +26,9 @@ obj-$(CONFIG_VIDEO_ADV7183) += adv7183.o
obj-$(CONFIG_VIDEO_ADV7343) += adv7343.o
obj-$(CONFIG_VIDEO_ADV7393) += adv7393.o
obj-$(CONFIG_VIDEO_ADV7604) += adv7604.o
+obj-$(CONFIG_VIDEO_ADV7842) += adv7842.o
obj-$(CONFIG_VIDEO_AD9389B) += ad9389b.o
+obj-$(CONFIG_VIDEO_ADV7511) += adv7511.o
obj-$(CONFIG_VIDEO_VPX3220) += vpx3220.o
obj-$(CONFIG_VIDEO_VS6624) += vs6624.o
obj-$(CONFIG_VIDEO_BT819) += bt819.o
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index ba4364dfae6..bb0c99d7a4f 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -33,6 +33,7 @@
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-dv-timings.h>
#include <media/v4l2-ctrls.h>
#include <media/ad9389b.h>
@@ -442,22 +443,11 @@ static int ad9389b_log_status(struct v4l2_subdev *sd)
vic_detect, vic_sent);
}
}
- if (state->dv_timings.type == V4L2_DV_BT_656_1120) {
- struct v4l2_bt_timings *bt = bt = &state->dv_timings.bt;
- u32 frame_width = bt->width + bt->hfrontporch +
- bt->hsync + bt->hbackporch;
- u32 frame_height = bt->height + bt->vfrontporch +
- bt->vsync + bt->vbackporch;
- u32 frame_size = frame_width * frame_height;
-
- v4l2_info(sd, "timings: %ux%u%s%u (%ux%u). Pix freq. = %u Hz. Polarities = 0x%x\n",
- bt->width, bt->height, bt->interlaced ? "i" : "p",
- frame_size > 0 ? (unsigned)bt->pixelclock / frame_size : 0,
- frame_width, frame_height,
- (unsigned)bt->pixelclock, bt->polarities);
- } else {
+ if (state->dv_timings.type == V4L2_DV_BT_656_1120)
+ v4l2_print_dv_timings(sd->name, "timings: ",
+ &state->dv_timings, false);
+ else
v4l2_info(sd, "no timings set\n");
- }
return 0;
}
@@ -636,95 +626,34 @@ static int ad9389b_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
}
-static const struct v4l2_dv_timings ad9389b_timings[] = {
- V4L2_DV_BT_CEA_720X480P59_94,
- V4L2_DV_BT_CEA_720X576P50,
- V4L2_DV_BT_CEA_1280X720P24,
- V4L2_DV_BT_CEA_1280X720P25,
- V4L2_DV_BT_CEA_1280X720P30,
- V4L2_DV_BT_CEA_1280X720P50,
- V4L2_DV_BT_CEA_1280X720P60,
- V4L2_DV_BT_CEA_1920X1080P24,
- V4L2_DV_BT_CEA_1920X1080P25,
- V4L2_DV_BT_CEA_1920X1080P30,
- V4L2_DV_BT_CEA_1920X1080P50,
- V4L2_DV_BT_CEA_1920X1080P60,
-
- V4L2_DV_BT_DMT_640X350P85,
- V4L2_DV_BT_DMT_640X400P85,
- V4L2_DV_BT_DMT_720X400P85,
- V4L2_DV_BT_DMT_640X480P60,
- V4L2_DV_BT_DMT_640X480P72,
- V4L2_DV_BT_DMT_640X480P75,
- V4L2_DV_BT_DMT_640X480P85,
- V4L2_DV_BT_DMT_800X600P56,
- V4L2_DV_BT_DMT_800X600P60,
- V4L2_DV_BT_DMT_800X600P72,
- V4L2_DV_BT_DMT_800X600P75,
- V4L2_DV_BT_DMT_800X600P85,
- V4L2_DV_BT_DMT_848X480P60,
- V4L2_DV_BT_DMT_1024X768P60,
- V4L2_DV_BT_DMT_1024X768P70,
- V4L2_DV_BT_DMT_1024X768P75,
- V4L2_DV_BT_DMT_1024X768P85,
- V4L2_DV_BT_DMT_1152X864P75,
- V4L2_DV_BT_DMT_1280X768P60_RB,
- V4L2_DV_BT_DMT_1280X768P60,
- V4L2_DV_BT_DMT_1280X768P75,
- V4L2_DV_BT_DMT_1280X768P85,
- V4L2_DV_BT_DMT_1280X800P60_RB,
- V4L2_DV_BT_DMT_1280X800P60,
- V4L2_DV_BT_DMT_1280X800P75,
- V4L2_DV_BT_DMT_1280X800P85,
- V4L2_DV_BT_DMT_1280X960P60,
- V4L2_DV_BT_DMT_1280X960P85,
- V4L2_DV_BT_DMT_1280X1024P60,
- V4L2_DV_BT_DMT_1280X1024P75,
- V4L2_DV_BT_DMT_1280X1024P85,
- V4L2_DV_BT_DMT_1360X768P60,
- V4L2_DV_BT_DMT_1400X1050P60_RB,
- V4L2_DV_BT_DMT_1400X1050P60,
- V4L2_DV_BT_DMT_1400X1050P75,
- V4L2_DV_BT_DMT_1400X1050P85,
- V4L2_DV_BT_DMT_1440X900P60_RB,
- V4L2_DV_BT_DMT_1440X900P60,
- V4L2_DV_BT_DMT_1600X1200P60,
- V4L2_DV_BT_DMT_1680X1050P60_RB,
- V4L2_DV_BT_DMT_1680X1050P60,
- V4L2_DV_BT_DMT_1792X1344P60,
- V4L2_DV_BT_DMT_1856X1392P60,
- V4L2_DV_BT_DMT_1920X1200P60_RB,
- V4L2_DV_BT_DMT_1366X768P60,
- V4L2_DV_BT_DMT_1920X1080P60,
- {},
+static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .max_width = 1920,
+ .max_height = 1200,
+ .min_pixelclock = 25000000,
+ .max_pixelclock = 170000000,
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
+ },
};
static int ad9389b_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
struct ad9389b_state *state = get_ad9389b_state(sd);
- int i;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
/* quick sanity check */
- if (timings->type != V4L2_DV_BT_656_1120)
- return -EINVAL;
-
- if (timings->bt.interlaced)
- return -EINVAL;
- if (timings->bt.pixelclock < 27000000 ||
- timings->bt.pixelclock > 170000000)
+ if (!v4l2_valid_dv_timings(timings, &ad9389b_timings_cap, NULL, NULL))
return -EINVAL;
/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
- if the format is listed in ad9389b_timings[] */
- for (i = 0; ad9389b_timings[i].bt.width; i++) {
- if (v4l_match_dv_timings(timings, &ad9389b_timings[i], 0)) {
- *timings = ad9389b_timings[i];
- break;
- }
- }
+ if the format is one of the CEA or DMT timings. */
+ v4l2_find_dv_timings_cap(timings, &ad9389b_timings_cap, 0, NULL, NULL);
timings->bt.flags &= ~V4L2_DV_FL_REDUCED_FPS;
@@ -762,26 +691,14 @@ static int ad9389b_g_dv_timings(struct v4l2_subdev *sd,
static int ad9389b_enum_dv_timings(struct v4l2_subdev *sd,
struct v4l2_enum_dv_timings *timings)
{
- if (timings->index >= ARRAY_SIZE(ad9389b_timings))
- return -EINVAL;
-
- memset(timings->reserved, 0, sizeof(timings->reserved));
- timings->timings = ad9389b_timings[timings->index];
- return 0;
+ return v4l2_enum_dv_timings_cap(timings, &ad9389b_timings_cap,
+ NULL, NULL);
}
static int ad9389b_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
- cap->type = V4L2_DV_BT_656_1120;
- cap->bt.max_width = 1920;
- cap->bt.max_height = 1200;
- cap->bt.min_pixelclock = 27000000;
- cap->bt.max_pixelclock = 170000000;
- cap->bt.standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
- V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT;
- cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
- V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM;
+ *cap = ad9389b_timings_cap;
return 0;
}
@@ -930,8 +847,10 @@ static void ad9389b_edid_handler(struct work_struct *work)
* (DVI connectors are particularly prone to this problem). */
if (state->edid.read_retries) {
state->edid.read_retries--;
- /* EDID read failed, trigger a retry */
- ad9389b_wr(sd, 0xc9, 0xf);
+ v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
+ state->have_monitor = false;
+ ad9389b_s_power(sd, false);
+ ad9389b_s_power(sd, true);
queue_delayed_work(state->work_queue,
&state->edid_handler, EDID_DELAY);
return;
@@ -967,11 +886,9 @@ static void ad9389b_setup(struct v4l2_subdev *sd)
ad9389b_wr_and_or(sd, 0x15, 0xf1, 0x0);
/* Output format: RGB 4:4:4 */
ad9389b_wr_and_or(sd, 0x16, 0x3f, 0x0);
- /* CSC fixed point: +/-2, 1st order interpolation 4:2:2 -> 4:4:4 up
- conversion, Aspect ratio: 16:9 */
- ad9389b_wr_and_or(sd, 0x17, 0xe1, 0x0e);
- /* Disable pixel repetition and CSC */
- ad9389b_wr_and_or(sd, 0x3b, 0x9e, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion,
+ Aspect ratio: 16:9 */
+ ad9389b_wr_and_or(sd, 0x17, 0xf9, 0x06);
/* Output format: RGB 4:4:4, Active Format Information is valid. */
ad9389b_wr_and_or(sd, 0x45, 0xc7, 0x08);
/* Underscanned */
@@ -1056,12 +973,12 @@ static void ad9389b_check_monitor_present_status(struct v4l2_subdev *sd)
static bool edid_block_verify_crc(u8 *edid_block)
{
- int i;
u8 sum = 0;
+ int i;
- for (i = 0; i < 127; i++)
- sum += *(edid_block + i);
- return ((255 - sum + 1) == edid_block[127]);
+ for (i = 0; i < 128; i++)
+ sum += edid_block[i];
+ return sum == 0;
}
static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
@@ -1107,6 +1024,8 @@ static bool ad9389b_check_edid_status(struct v4l2_subdev *sd)
}
if (!edid_segment_verify_crc(sd, segment)) {
/* edid crc error, force reread of edid segment */
+ v4l2_err(sd, "%s: edid crc error\n", __func__);
+ state->have_monitor = false;
ad9389b_s_power(sd, false);
ad9389b_s_power(sd, true);
return false;
@@ -1190,27 +1109,27 @@ static int ad9389b_probe(struct i2c_client *client, const struct i2c_device_id *
state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops,
V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
0, V4L2_DV_TX_MODE_DVI_D);
- state->hdmi_mode_ctrl->is_private = true;
state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
- state->hotplug_ctrl->is_private = true;
state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
- state->rx_sense_ctrl->is_private = true;
state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
- state->have_edid0_ctrl->is_private = true;
state->rgb_quantization_range_ctrl =
v4l2_ctrl_new_std_menu(hdl, &ad9389b_ctrl_ops,
V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
- state->rgb_quantization_range_ctrl->is_private = true;
sd->ctrl_handler = hdl;
if (hdl->error) {
err = hdl->error;
goto err_hdl;
}
+ state->hdmi_mode_ctrl->is_private = true;
+ state->hotplug_ctrl->is_private = true;
+ state->rx_sense_ctrl->is_private = true;
+ state->have_edid0_ctrl->is_private = true;
+ state->rgb_quantization_range_ctrl->is_private = true;
state->pad.flags = MEDIA_PAD_FL_SINK;
err = media_entity_init(&sd->entity, 1, &state->pad, 0);
diff --git a/drivers/media/i2c/adv7343.c b/drivers/media/i2c/adv7343.c
index 7606218ec4a..aeb56c53e39 100644
--- a/drivers/media/i2c/adv7343.c
+++ b/drivers/media/i2c/adv7343.c
@@ -27,8 +27,10 @@
#include <linux/uaccess.h>
#include <media/adv7343.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
#include "adv7343_regs.h"
@@ -226,12 +228,12 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
else
val = state->pdata->mode_config.sleep_mode << 0 |
state->pdata->mode_config.pll_control << 1 |
- state->pdata->mode_config.dac_3 << 2 |
- state->pdata->mode_config.dac_2 << 3 |
- state->pdata->mode_config.dac_1 << 4 |
- state->pdata->mode_config.dac_6 << 5 |
- state->pdata->mode_config.dac_5 << 6 |
- state->pdata->mode_config.dac_4 << 7;
+ state->pdata->mode_config.dac[2] << 2 |
+ state->pdata->mode_config.dac[1] << 3 |
+ state->pdata->mode_config.dac[0] << 4 |
+ state->pdata->mode_config.dac[5] << 5 |
+ state->pdata->mode_config.dac[4] << 6 |
+ state->pdata->mode_config.dac[3] << 7;
err = adv7343_write(sd, ADV7343_POWER_MODE_REG, val);
if (err < 0)
@@ -250,15 +252,15 @@ static int adv7343_setoutput(struct v4l2_subdev *sd, u32 output_type)
/* configure SD DAC Output 2 and SD DAC Output 1 bit to zero */
val = state->reg82 & (SD_DAC_1_DI & SD_DAC_2_DI);
- if (state->pdata && state->pdata->sd_config.sd_dac_out1)
- val = val | (state->pdata->sd_config.sd_dac_out1 << 1);
- else if (state->pdata && !state->pdata->sd_config.sd_dac_out1)
- val = val & ~(state->pdata->sd_config.sd_dac_out1 << 1);
+ if (state->pdata && state->pdata->sd_config.sd_dac_out[0])
+ val = val | (state->pdata->sd_config.sd_dac_out[0] << 1);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out[0])
+ val = val & ~(state->pdata->sd_config.sd_dac_out[0] << 1);
- if (state->pdata && state->pdata->sd_config.sd_dac_out2)
- val = val | (state->pdata->sd_config.sd_dac_out2 << 2);
- else if (state->pdata && !state->pdata->sd_config.sd_dac_out2)
- val = val & ~(state->pdata->sd_config.sd_dac_out2 << 2);
+ if (state->pdata && state->pdata->sd_config.sd_dac_out[1])
+ val = val | (state->pdata->sd_config.sd_dac_out[1] << 2);
+ else if (state->pdata && !state->pdata->sd_config.sd_dac_out[1])
+ val = val & ~(state->pdata->sd_config.sd_dac_out[1] << 2);
err = adv7343_write(sd, ADV7343_SD_MODE_REG2, val);
if (err < 0)
@@ -398,6 +400,40 @@ static int adv7343_initialize(struct v4l2_subdev *sd)
return err;
}
+static struct adv7343_platform_data *
+adv7343_get_pdata(struct i2c_client *client)
+{
+ struct adv7343_platform_data *pdata;
+ struct device_node *np;
+
+ if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
+ return client->dev.platform_data;
+
+ np = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ if (!np)
+ return NULL;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ pdata->mode_config.sleep_mode =
+ of_property_read_bool(np, "adi,power-mode-sleep-mode");
+
+ pdata->mode_config.pll_control =
+ of_property_read_bool(np, "adi,power-mode-pll-ctrl");
+
+ of_property_read_u32_array(np, "adi,dac-enable",
+ pdata->mode_config.dac, 6);
+
+ of_property_read_u32_array(np, "adi,sd-dac-enable",
+ pdata->sd_config.sd_dac_out, 2);
+
+done:
+ of_node_put(np);
+ return pdata;
+}
+
static int adv7343_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -416,7 +452,7 @@ static int adv7343_probe(struct i2c_client *client,
return -ENOMEM;
/* Copy board specific information here */
- state->pdata = client->dev.platform_data;
+ state->pdata = adv7343_get_pdata(client);
state->reg00 = 0x80;
state->reg01 = 0x00;
@@ -445,16 +481,21 @@ static int adv7343_probe(struct i2c_client *client,
ADV7343_GAIN_DEF);
state->sd.ctrl_handler = &state->hdl;
if (state->hdl.error) {
- int err = state->hdl.error;
-
- v4l2_ctrl_handler_free(&state->hdl);
- return err;
+ err = state->hdl.error;
+ goto done;
}
v4l2_ctrl_handler_setup(&state->hdl);
err = adv7343_initialize(&state->sd);
if (err)
+ goto done;
+
+ err = v4l2_async_register_subdev(&state->sd);
+
+done:
+ if (err < 0)
v4l2_ctrl_handler_free(&state->hdl);
+
return err;
}
@@ -463,6 +504,7 @@ static int adv7343_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct adv7343_state *state = to_state(sd);
+ v4l2_async_unregister_subdev(&state->sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
@@ -476,8 +518,17 @@ static const struct i2c_device_id adv7343_id[] = {
MODULE_DEVICE_TABLE(i2c, adv7343_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id adv7343_of_match[] = {
+ {.compatible = "adi,adv7343", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, adv7343_of_match);
+#endif
+
static struct i2c_driver adv7343_driver = {
.driver = {
+ .of_match_table = of_match_ptr(adv7343_of_match),
.owner = THIS_MODULE,
.name = "adv7343",
},
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
new file mode 100644
index 00000000000..7a576097471
--- /dev/null
+++ b/drivers/media/i2c/adv7511.c
@@ -0,0 +1,1198 @@
+/*
+ * Analog Devices ADV7511 HDMI Transmitter Device Driver
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/adv7511.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+MODULE_DESCRIPTION("Analog Devices ADV7511 HDMI Transmitter Device Driver");
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_LICENSE("GPL");
+
+#define MASK_ADV7511_EDID_RDY_INT 0x04
+#define MASK_ADV7511_MSEN_INT 0x40
+#define MASK_ADV7511_HPD_INT 0x80
+
+#define MASK_ADV7511_HPD_DETECT 0x40
+#define MASK_ADV7511_MSEN_DETECT 0x20
+#define MASK_ADV7511_EDID_RDY 0x10
+
+#define EDID_MAX_RETRIES (8)
+#define EDID_DELAY 250
+#define EDID_MAX_SEGM 8
+
+#define ADV7511_MAX_WIDTH 1920
+#define ADV7511_MAX_HEIGHT 1200
+#define ADV7511_MIN_PIXELCLOCK 20000000
+#define ADV7511_MAX_PIXELCLOCK 225000000
+
+/*
+**********************************************************************
+*
+* Arrays with configuration parameters for the ADV7511
+*
+**********************************************************************
+*/
+
+struct i2c_reg_value {
+ unsigned char reg;
+ unsigned char value;
+};
+
+struct adv7511_state_edid {
+ /* total number of blocks */
+ u32 blocks;
+ /* Number of segments read */
+ u32 segments;
+ uint8_t data[EDID_MAX_SEGM * 256];
+ /* Number of EDID read retries left */
+ unsigned read_retries;
+ bool complete;
+};
+
+struct adv7511_state {
+ struct adv7511_platform_data pdata;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler hdl;
+ int chip_revision;
+ uint8_t i2c_edid_addr;
+ uint8_t i2c_cec_addr;
+ /* Is the adv7511 powered on? */
+ bool power_on;
+ /* Did we receive hotplug and rx-sense signals? */
+ bool have_monitor;
+ /* timings from s_dv_timings */
+ struct v4l2_dv_timings dv_timings;
+ /* controls */
+ struct v4l2_ctrl *hdmi_mode_ctrl;
+ struct v4l2_ctrl *hotplug_ctrl;
+ struct v4l2_ctrl *rx_sense_ctrl;
+ struct v4l2_ctrl *have_edid0_ctrl;
+ struct v4l2_ctrl *rgb_quantization_range_ctrl;
+ struct i2c_client *i2c_edid;
+ struct adv7511_state_edid edid;
+ /* Running counter of the number of detected EDIDs (for debugging) */
+ unsigned edid_detect_counter;
+ struct workqueue_struct *work_queue;
+ struct delayed_work edid_handler; /* work entry */
+};
+
+static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd);
+static bool adv7511_check_edid_status(struct v4l2_subdev *sd);
+static void adv7511_setup(struct v4l2_subdev *sd);
+static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq);
+
+
+static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .max_width = ADV7511_MAX_WIDTH,
+ .max_height = ADV7511_MAX_HEIGHT,
+ .min_pixelclock = ADV7511_MIN_PIXELCLOCK,
+ .max_pixelclock = ADV7511_MAX_PIXELCLOCK,
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static inline struct adv7511_state *get_adv7511_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7511_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7511_state, hdl)->sd;
+}
+
+/* ------------------------ I2C ----------------------------------------------- */
+
+static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
+ u8 command, bool check)
+{
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ if (check)
+ v4l_err(client, "error reading %02x, %02x\n",
+ client->addr, command);
+ return -1;
+}
+
+static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
+{
+ int i;
+ for (i = 0; i < 3; i++) {
+ int ret = adv_smbus_read_byte_data_check(client, command, true);
+ if (ret >= 0) {
+ if (i)
+ v4l_err(client, "read ok after %d retries\n", i);
+ return ret;
+ }
+ }
+ v4l_err(client, "read failed\n");
+ return -1;
+}
+
+static int adv7511_rd(struct v4l2_subdev *sd, u8 reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_read_byte_data(client, reg);
+}
+
+static int adv7511_wr(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret;
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (ret == 0)
+ return 0;
+ }
+ v4l2_err(sd, "%s: i2c write error\n", __func__);
+ return ret;
+}
+
+/* To set specific bits in the register, a clear-mask is given (to be AND-ed),
+ and then the value-mask (to be OR-ed). */
+static inline void adv7511_wr_and_or(struct v4l2_subdev *sd, u8 reg, uint8_t clr_mask, uint8_t val_mask)
+{
+ adv7511_wr(sd, reg, (adv7511_rd(sd, reg) & clr_mask) | val_mask);
+}
+
+static int adv_smbus_read_i2c_block_data(struct i2c_client *client,
+ u8 command, unsigned length, u8 *values)
+{
+ union i2c_smbus_data data;
+ int ret;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+ data.block[0] = length;
+
+ ret = i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+ memcpy(values, data.block + 1, length);
+ return ret;
+}
+
+static inline void adv7511_edid_rd(struct v4l2_subdev *sd, uint16_t len, uint8_t *buf)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ int i;
+ int err = 0;
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ for (i = 0; !err && i < len; i += I2C_SMBUS_BLOCK_MAX)
+ err = adv_smbus_read_i2c_block_data(state->i2c_edid, i,
+ I2C_SMBUS_BLOCK_MAX, buf + i);
+ if (err)
+ v4l2_err(sd, "%s: i2c read error\n", __func__);
+}
+
+static inline bool adv7511_have_hotplug(struct v4l2_subdev *sd)
+{
+ return adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT;
+}
+
+static inline bool adv7511_have_rx_sense(struct v4l2_subdev *sd)
+{
+ return adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT;
+}
+
+static void adv7511_csc_conversion_mode(struct v4l2_subdev *sd, uint8_t mode)
+{
+ adv7511_wr_and_or(sd, 0x18, 0x9f, (mode & 0x3)<<5);
+}
+
+static void adv7511_csc_coeff(struct v4l2_subdev *sd,
+ u16 A1, u16 A2, u16 A3, u16 A4,
+ u16 B1, u16 B2, u16 B3, u16 B4,
+ u16 C1, u16 C2, u16 C3, u16 C4)
+{
+ /* A */
+ adv7511_wr_and_or(sd, 0x18, 0xe0, A1>>8);
+ adv7511_wr(sd, 0x19, A1);
+ adv7511_wr_and_or(sd, 0x1A, 0xe0, A2>>8);
+ adv7511_wr(sd, 0x1B, A2);
+ adv7511_wr_and_or(sd, 0x1c, 0xe0, A3>>8);
+ adv7511_wr(sd, 0x1d, A3);
+ adv7511_wr_and_or(sd, 0x1e, 0xe0, A4>>8);
+ adv7511_wr(sd, 0x1f, A4);
+
+ /* B */
+ adv7511_wr_and_or(sd, 0x20, 0xe0, B1>>8);
+ adv7511_wr(sd, 0x21, B1);
+ adv7511_wr_and_or(sd, 0x22, 0xe0, B2>>8);
+ adv7511_wr(sd, 0x23, B2);
+ adv7511_wr_and_or(sd, 0x24, 0xe0, B3>>8);
+ adv7511_wr(sd, 0x25, B3);
+ adv7511_wr_and_or(sd, 0x26, 0xe0, B4>>8);
+ adv7511_wr(sd, 0x27, B4);
+
+ /* C */
+ adv7511_wr_and_or(sd, 0x28, 0xe0, C1>>8);
+ adv7511_wr(sd, 0x29, C1);
+ adv7511_wr_and_or(sd, 0x2A, 0xe0, C2>>8);
+ adv7511_wr(sd, 0x2B, C2);
+ adv7511_wr_and_or(sd, 0x2C, 0xe0, C3>>8);
+ adv7511_wr(sd, 0x2D, C3);
+ adv7511_wr_and_or(sd, 0x2E, 0xe0, C4>>8);
+ adv7511_wr(sd, 0x2F, C4);
+}
+
+static void adv7511_csc_rgb_full2limit(struct v4l2_subdev *sd, bool enable)
+{
+ if (enable) {
+ uint8_t csc_mode = 0;
+ adv7511_csc_conversion_mode(sd, csc_mode);
+ adv7511_csc_coeff(sd,
+ 4096-564, 0, 0, 256,
+ 0, 4096-564, 0, 256,
+ 0, 0, 4096-564, 256);
+ /* enable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x80);
+ /* AVI infoframe: Limited range RGB (16-235) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x04);
+ } else {
+ /* disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* AVI infoframe: Full range RGB (0-255) */
+ adv7511_wr_and_or(sd, 0x57, 0xf3, 0x08);
+ }
+}
+
+static void adv7511_set_IT_content_AVI_InfoFrame(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* CEA format, not IT */
+ adv7511_wr_and_or(sd, 0x57, 0x7f, 0x00);
+ } else {
+ /* IT format */
+ adv7511_wr_and_or(sd, 0x57, 0x7f, 0x80);
+ }
+}
+
+static int adv7511_set_rgb_quantization_mode(struct v4l2_subdev *sd, struct v4l2_ctrl *ctrl)
+{
+ switch (ctrl->val) {
+ default:
+ return -EINVAL;
+ break;
+ case V4L2_DV_RGB_RANGE_AUTO: {
+ /* automatic */
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if (state->dv_timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* cea format, RGB limited range (16-235) */
+ adv7511_csc_rgb_full2limit(sd, true);
+ } else {
+ /* not cea format, RGB full range (0-255) */
+ adv7511_csc_rgb_full2limit(sd, false);
+ }
+ }
+ break;
+ case V4L2_DV_RGB_RANGE_LIMITED:
+ /* RGB limited range (16-235) */
+ adv7511_csc_rgb_full2limit(sd, true);
+ break;
+ case V4L2_DV_RGB_RANGE_FULL:
+ /* RGB full range (0-255) */
+ adv7511_csc_rgb_full2limit(sd, false);
+ break;
+ }
+ return 0;
+}
+
+/* ------------------------------ CTRL OPS ------------------------------ */
+
+static int adv7511_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: ctrl id: %d, ctrl->val %d\n", __func__, ctrl->id, ctrl->val);
+
+ if (state->hdmi_mode_ctrl == ctrl) {
+ /* Set HDMI or DVI-D */
+ adv7511_wr_and_or(sd, 0xaf, 0xfd, ctrl->val == V4L2_DV_TX_MODE_HDMI ? 0x02 : 0x00);
+ return 0;
+ }
+ if (state->rgb_quantization_range_ctrl == ctrl)
+ return adv7511_set_rgb_quantization_mode(sd, ctrl);
+
+ return -EINVAL;
+}
+
+static const struct v4l2_ctrl_ops adv7511_ctrl_ops = {
+ .s_ctrl = adv7511_s_ctrl,
+};
+
+/* ---------------------------- CORE OPS ------------------------------------------- */
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void adv7511_inv_register(struct v4l2_subdev *sd)
+{
+ v4l2_info(sd, "0x000-0x0ff: Main Map\n");
+}
+
+static int adv7511_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg)
+{
+ reg->size = 1;
+ switch (reg->reg >> 8) {
+ case 0:
+ reg->val = adv7511_rd(sd, reg->reg & 0xff);
+ break;
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7511_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+
+static int adv7511_s_register(struct v4l2_subdev *sd, const struct v4l2_dbg_register *reg)
+{
+ switch (reg->reg >> 8) {
+ case 0:
+ adv7511_wr(sd, reg->reg & 0xff, reg->val & 0xff);
+ break;
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7511_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+#endif
+
+static int adv7511_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_state_edid *edid = &state->edid;
+
+ static const char * const states[] = {
+ "in reset",
+ "reading EDID",
+ "idle",
+ "initializing HDCP",
+ "HDCP enabled",
+ "initializing HDCP repeater",
+ "6", "7", "8", "9", "A", "B", "C", "D", "E", "F"
+ };
+ static const char * const errors[] = {
+ "no error",
+ "bad receiver BKSV",
+ "Ri mismatch",
+ "Pj mismatch",
+ "i2c error",
+ "timed out",
+ "max repeater cascade exceeded",
+ "hash check failed",
+ "too many devices",
+ "9", "A", "B", "C", "D", "E", "F"
+ };
+
+ v4l2_info(sd, "power %s\n", state->power_on ? "on" : "off");
+ v4l2_info(sd, "%s hotplug, %s Rx Sense, %s EDID (%d block(s))\n",
+ (adv7511_rd(sd, 0x42) & MASK_ADV7511_HPD_DETECT) ? "detected" : "no",
+ (adv7511_rd(sd, 0x42) & MASK_ADV7511_MSEN_DETECT) ? "detected" : "no",
+ edid->segments ? "found" : "no",
+ edid->blocks);
+ v4l2_info(sd, "%s output %s\n",
+ (adv7511_rd(sd, 0xaf) & 0x02) ?
+ "HDMI" : "DVI-D",
+ (adv7511_rd(sd, 0xa1) & 0x3c) ?
+ "disabled" : "enabled");
+ v4l2_info(sd, "state: %s, error: %s, detect count: %u, msk/irq: %02x/%02x\n",
+ states[adv7511_rd(sd, 0xc8) & 0xf],
+ errors[adv7511_rd(sd, 0xc8) >> 4], state->edid_detect_counter,
+ adv7511_rd(sd, 0x94), adv7511_rd(sd, 0x96));
+ v4l2_info(sd, "RGB quantization: %s range\n", adv7511_rd(sd, 0x18) & 0x80 ? "limited" : "full");
+ if (state->dv_timings.type == V4L2_DV_BT_656_1120)
+ v4l2_print_dv_timings(sd->name, "timings: ",
+ &state->dv_timings, false);
+ else
+ v4l2_info(sd, "no timings set\n");
+ v4l2_info(sd, "i2c edid addr: 0x%x\n", state->i2c_edid_addr);
+ v4l2_info(sd, "i2c cec addr: 0x%x\n", state->i2c_cec_addr);
+ return 0;
+}
+
+/* Power up/down adv7511 */
+static int adv7511_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ const int retries = 20;
+ int i;
+
+ v4l2_dbg(1, debug, sd, "%s: power %s\n", __func__, on ? "on" : "off");
+
+ state->power_on = on;
+
+ if (!on) {
+ /* Power down */
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+ return true;
+ }
+
+ /* Power up */
+ /* The adv7511 does not always come up immediately.
+ Retry multiple times. */
+ for (i = 0; i < retries; i++) {
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x0);
+ if ((adv7511_rd(sd, 0x41) & 0x40) == 0)
+ break;
+ adv7511_wr_and_or(sd, 0x41, 0xbf, 0x40);
+ msleep(10);
+ }
+ if (i == retries) {
+ v4l2_dbg(1, debug, sd, "%s: failed to powerup the adv7511!\n", __func__);
+ adv7511_s_power(sd, 0);
+ return false;
+ }
+ if (i > 1)
+ v4l2_dbg(1, debug, sd, "%s: needed %d retries to powerup the adv7511\n", __func__, i);
+
+ /* Reserved registers that must be set */
+ adv7511_wr(sd, 0x98, 0x03);
+ adv7511_wr_and_or(sd, 0x9a, 0xfe, 0x70);
+ adv7511_wr(sd, 0x9c, 0x30);
+ adv7511_wr_and_or(sd, 0x9d, 0xfc, 0x01);
+ adv7511_wr(sd, 0xa2, 0xa4);
+ adv7511_wr(sd, 0xa3, 0xa4);
+ adv7511_wr(sd, 0xe0, 0xd0);
+ adv7511_wr(sd, 0xf9, 0x00);
+
+ adv7511_wr(sd, 0x43, state->i2c_edid_addr);
+
+ /* Set number of attempts to read the EDID */
+ adv7511_wr(sd, 0xc9, 0xf);
+ return true;
+}
+
+/* Enable interrupts */
+static void adv7511_set_isr(struct v4l2_subdev *sd, bool enable)
+{
+ uint8_t irqs = MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT;
+ uint8_t irqs_rd;
+ int retries = 100;
+
+ v4l2_dbg(2, debug, sd, "%s: %s\n", __func__, enable ? "enable" : "disable");
+
+ /* The datasheet says that the EDID ready interrupt should be
+ disabled if there is no hotplug. */
+ if (!enable)
+ irqs = 0;
+ else if (adv7511_have_hotplug(sd))
+ irqs |= MASK_ADV7511_EDID_RDY_INT;
+
+ /*
+ * This i2c write can fail (approx. 1 in 1000 writes). But it
+ * is essential that this register is correct, so retry it
+ * multiple times.
+ *
+ * Note that the i2c write does not report an error, but the readback
+ * clearly shows the wrong value.
+ */
+ do {
+ adv7511_wr(sd, 0x94, irqs);
+ irqs_rd = adv7511_rd(sd, 0x94);
+ } while (retries-- && irqs_rd != irqs);
+
+ if (irqs_rd == irqs)
+ return;
+ v4l2_err(sd, "Could not set interrupts: hw failure?\n");
+}
+
+/* Interrupt handler */
+static int adv7511_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ uint8_t irq_status;
+
+ /* disable interrupts to prevent a race condition */
+ adv7511_set_isr(sd, false);
+ irq_status = adv7511_rd(sd, 0x96);
+ /* clear detected interrupts */
+ adv7511_wr(sd, 0x96, irq_status);
+
+ v4l2_dbg(1, debug, sd, "%s: irq 0x%x\n", __func__, irq_status);
+
+ if (irq_status & (MASK_ADV7511_HPD_INT | MASK_ADV7511_MSEN_INT))
+ adv7511_check_monitor_present_status(sd);
+ if (irq_status & MASK_ADV7511_EDID_RDY_INT)
+ adv7511_check_edid_status(sd);
+
+ /* enable interrupts */
+ adv7511_set_isr(sd, true);
+
+ if (handled)
+ *handled = true;
+ return 0;
+}
+
+static int adv7511_get_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *edid)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ if (edid->pad != 0)
+ return -EINVAL;
+ if ((edid->blocks == 0) || (edid->blocks > 256))
+ return -EINVAL;
+ if (!edid->edid)
+ return -EINVAL;
+ if (!state->edid.segments) {
+ v4l2_dbg(1, debug, sd, "EDID segment 0 not found\n");
+ return -ENODATA;
+ }
+ if (edid->start_block >= state->edid.segments * 2)
+ return -E2BIG;
+ if ((edid->blocks + edid->start_block) >= state->edid.segments * 2)
+ edid->blocks = state->edid.segments * 2 - edid->start_block;
+
+ memcpy(edid->edid, &state->edid.data[edid->start_block * 128],
+ 128 * edid->blocks);
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops adv7511_pad_ops = {
+ .get_edid = adv7511_get_edid,
+};
+
+static const struct v4l2_subdev_core_ops adv7511_core_ops = {
+ .log_status = adv7511_log_status,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = adv7511_g_register,
+ .s_register = adv7511_s_register,
+#endif
+ .s_power = adv7511_s_power,
+ .interrupt_service_routine = adv7511_isr,
+};
+
+/* ------------------------------ VIDEO OPS ------------------------------ */
+
+/* Enable/disable adv7511 output */
+static int adv7511_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+ adv7511_wr_and_or(sd, 0xa1, ~0x3c, (enable ? 0 : 0x3c));
+ if (enable) {
+ adv7511_check_monitor_present_status(sd);
+ } else {
+ adv7511_s_power(sd, 0);
+ state->have_monitor = false;
+ }
+ return 0;
+}
+
+static int adv7511_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ /* quick sanity check */
+ if (!v4l2_valid_dv_timings(timings, &adv7511_timings_cap, NULL, NULL))
+ return -EINVAL;
+
+ /* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
+ if the format is one of the CEA or DMT timings. */
+ v4l2_find_dv_timings_cap(timings, &adv7511_timings_cap, 0, NULL, NULL);
+
+ timings->bt.flags &= ~V4L2_DV_FL_REDUCED_FPS;
+
+ /* save timings */
+ state->dv_timings = *timings;
+
+ /* update quantization range based on new dv_timings */
+ adv7511_set_rgb_quantization_mode(sd, state->rgb_quantization_range_ctrl);
+
+ /* update AVI infoframe */
+ adv7511_set_IT_content_AVI_InfoFrame(sd);
+
+ return 0;
+}
+
+static int adv7511_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (!timings)
+ return -EINVAL;
+
+ *timings = state->dv_timings;
+
+ return 0;
+}
+
+static int adv7511_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings, &adv7511_timings_cap, NULL, NULL);
+}
+
+static int adv7511_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = adv7511_timings_cap;
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops adv7511_video_ops = {
+ .s_stream = adv7511_s_stream,
+ .s_dv_timings = adv7511_s_dv_timings,
+ .g_dv_timings = adv7511_g_dv_timings,
+ .enum_dv_timings = adv7511_enum_dv_timings,
+ .dv_timings_cap = adv7511_dv_timings_cap,
+};
+
+/* ------------------------------ AUDIO OPS ------------------------------ */
+static int adv7511_s_audio_stream(struct v4l2_subdev *sd, int enable)
+{
+ v4l2_dbg(1, debug, sd, "%s: %sable\n", __func__, (enable ? "en" : "dis"));
+
+ if (enable)
+ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x80);
+ else
+ adv7511_wr_and_or(sd, 0x4b, 0x3f, 0x40);
+
+ return 0;
+}
+
+static int adv7511_s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+{
+ u32 N;
+
+ switch (freq) {
+ case 32000: N = 4096; break;
+ case 44100: N = 6272; break;
+ case 48000: N = 6144; break;
+ case 88200: N = 12544; break;
+ case 96000: N = 12288; break;
+ case 176400: N = 25088; break;
+ case 192000: N = 24576; break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set N (used with CTS to regenerate the audio clock) */
+ adv7511_wr(sd, 0x01, (N >> 16) & 0xf);
+ adv7511_wr(sd, 0x02, (N >> 8) & 0xff);
+ adv7511_wr(sd, 0x03, N & 0xff);
+
+ return 0;
+}
+
+static int adv7511_s_i2s_clock_freq(struct v4l2_subdev *sd, u32 freq)
+{
+ u32 i2s_sf;
+
+ switch (freq) {
+ case 32000: i2s_sf = 0x30; break;
+ case 44100: i2s_sf = 0x00; break;
+ case 48000: i2s_sf = 0x20; break;
+ case 88200: i2s_sf = 0x80; break;
+ case 96000: i2s_sf = 0xa0; break;
+ case 176400: i2s_sf = 0xc0; break;
+ case 192000: i2s_sf = 0xe0; break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Set sampling frequency for I2S audio to 48 kHz */
+ adv7511_wr_and_or(sd, 0x15, 0xf, i2s_sf);
+
+ return 0;
+}
+
+static int adv7511_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config)
+{
+ /* Only 2 channels in use for application */
+ adv7511_wr_and_or(sd, 0x73, 0xf8, 0x1);
+ /* Speaker mapping */
+ adv7511_wr(sd, 0x76, 0x00);
+
+ /* 16 bit audio word length */
+ adv7511_wr_and_or(sd, 0x14, 0xf0, 0x02);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_audio_ops adv7511_audio_ops = {
+ .s_stream = adv7511_s_audio_stream,
+ .s_clock_freq = adv7511_s_clock_freq,
+ .s_i2s_clock_freq = adv7511_s_i2s_clock_freq,
+ .s_routing = adv7511_s_routing,
+};
+
+/* --------------------- SUBDEV OPS --------------------------------------- */
+
+static const struct v4l2_subdev_ops adv7511_ops = {
+ .core = &adv7511_core_ops,
+ .pad = &adv7511_pad_ops,
+ .video = &adv7511_video_ops,
+ .audio = &adv7511_audio_ops,
+};
+
+/* ----------------------------------------------------------------------- */
+static void adv7511_dbg_dump_edid(int lvl, int debug, struct v4l2_subdev *sd, int segment, uint8_t *buf)
+{
+ if (debug >= lvl) {
+ int i, j;
+ v4l2_dbg(lvl, debug, sd, "edid segment %d\n", segment);
+ for (i = 0; i < 256; i += 16) {
+ u8 b[128];
+ u8 *bp = b;
+ if (i == 128)
+ v4l2_dbg(lvl, debug, sd, "\n");
+ for (j = i; j < i + 16; j++) {
+ sprintf(bp, "0x%02x, ", buf[j]);
+ bp += 6;
+ }
+ bp[0] = '\0';
+ v4l2_dbg(lvl, debug, sd, "%s\n", b);
+ }
+ }
+}
+
+static void adv7511_edid_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7511_state *state = container_of(dwork, struct adv7511_state, edid_handler);
+ struct v4l2_subdev *sd = &state->sd;
+ struct adv7511_edid_detect ed;
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (adv7511_check_edid_status(sd)) {
+ /* Return if we received the EDID. */
+ return;
+ }
+
+ if (adv7511_have_hotplug(sd)) {
+ /* We must retry reading the EDID several times, it is possible
+ * that initially the EDID couldn't be read due to i2c errors
+ * (DVI connectors are particularly prone to this problem). */
+ if (state->edid.read_retries) {
+ state->edid.read_retries--;
+ v4l2_dbg(1, debug, sd, "%s: edid read failed\n", __func__);
+ state->have_monitor = false;
+ adv7511_s_power(sd, false);
+ adv7511_s_power(sd, true);
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ return;
+ }
+ }
+
+ /* We failed to read the EDID, so send an event for this. */
+ ed.present = false;
+ ed.segment = adv7511_rd(sd, 0xc4);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ v4l2_dbg(1, debug, sd, "%s: no edid found\n", __func__);
+}
+
+static void adv7511_audio_setup(struct v4l2_subdev *sd)
+{
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ adv7511_s_i2s_clock_freq(sd, 48000);
+ adv7511_s_clock_freq(sd, 48000);
+ adv7511_s_routing(sd, 0, 0, 0);
+}
+
+/* Configure hdmi transmitter. */
+static void adv7511_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ /* Input format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x15, 0xf0, 0x0);
+ /* Output format: RGB 4:4:4 */
+ adv7511_wr_and_or(sd, 0x16, 0x7f, 0x0);
+ /* 1st order interpolation 4:2:2 -> 4:4:4 up conversion, Aspect ratio: 16:9 */
+ adv7511_wr_and_or(sd, 0x17, 0xf9, 0x06);
+ /* Disable pixel repetition */
+ adv7511_wr_and_or(sd, 0x3b, 0x9f, 0x0);
+ /* Disable CSC */
+ adv7511_wr_and_or(sd, 0x18, 0x7f, 0x0);
+ /* Output format: RGB 4:4:4, Active Format Information is valid,
+ * underscanned */
+ adv7511_wr_and_or(sd, 0x55, 0x9c, 0x12);
+ /* AVI Info frame packet enable, Audio Info frame disable */
+ adv7511_wr_and_or(sd, 0x44, 0xe7, 0x10);
+ /* Colorimetry, Active format aspect ratio: same as picure. */
+ adv7511_wr(sd, 0x56, 0xa8);
+ /* No encryption */
+ adv7511_wr_and_or(sd, 0xaf, 0xed, 0x0);
+
+ /* Positive clk edge capture for input video clock */
+ adv7511_wr_and_or(sd, 0xba, 0x1f, 0x60);
+
+ adv7511_audio_setup(sd);
+
+ v4l2_ctrl_handler_setup(&state->hdl);
+}
+
+static void adv7511_notify_monitor_detect(struct v4l2_subdev *sd)
+{
+ struct adv7511_monitor_detect mdt;
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ mdt.present = state->have_monitor;
+ v4l2_subdev_notify(sd, ADV7511_MONITOR_DETECT, (void *)&mdt);
+}
+
+static void adv7511_check_monitor_present_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ /* read hotplug and rx-sense state */
+ uint8_t status = adv7511_rd(sd, 0x42);
+
+ v4l2_dbg(1, debug, sd, "%s: status: 0x%x%s%s\n",
+ __func__,
+ status,
+ status & MASK_ADV7511_HPD_DETECT ? ", hotplug" : "",
+ status & MASK_ADV7511_MSEN_DETECT ? ", rx-sense" : "");
+
+ /* update read only ctrls */
+ v4l2_ctrl_s_ctrl(state->hotplug_ctrl, adv7511_have_hotplug(sd) ? 0x1 : 0x0);
+ v4l2_ctrl_s_ctrl(state->rx_sense_ctrl, adv7511_have_rx_sense(sd) ? 0x1 : 0x0);
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+
+ if ((status & MASK_ADV7511_HPD_DETECT) && ((status & MASK_ADV7511_MSEN_DETECT) || state->edid.segments)) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug and (rx-sense or edid)\n", __func__);
+ if (!state->have_monitor) {
+ v4l2_dbg(1, debug, sd, "%s: monitor detected\n", __func__);
+ state->have_monitor = true;
+ adv7511_set_isr(sd, true);
+ if (!adv7511_s_power(sd, true)) {
+ v4l2_dbg(1, debug, sd, "%s: monitor detected, powerup failed\n", __func__);
+ return;
+ }
+ adv7511_setup(sd);
+ adv7511_notify_monitor_detect(sd);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ }
+ } else if (status & MASK_ADV7511_HPD_DETECT) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug detected\n", __func__);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ } else if (!(status & MASK_ADV7511_HPD_DETECT)) {
+ v4l2_dbg(1, debug, sd, "%s: hotplug not detected\n", __func__);
+ if (state->have_monitor) {
+ v4l2_dbg(1, debug, sd, "%s: monitor not detected\n", __func__);
+ state->have_monitor = false;
+ adv7511_notify_monitor_detect(sd);
+ }
+ adv7511_s_power(sd, false);
+ memset(&state->edid, 0, sizeof(struct adv7511_state_edid));
+ }
+}
+
+static bool edid_block_verify_crc(uint8_t *edid_block)
+{
+ int i;
+ uint8_t sum = 0;
+
+ for (i = 0; i < 128; i++)
+ sum += *(edid_block + i);
+ return (sum == 0);
+}
+
+static bool edid_segment_verify_crc(struct v4l2_subdev *sd, u32 segment)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ u32 blocks = state->edid.blocks;
+ uint8_t *data = state->edid.data;
+
+ if (edid_block_verify_crc(&data[segment * 256])) {
+ if ((segment + 1) * 2 <= blocks)
+ return edid_block_verify_crc(&data[segment * 256 + 128]);
+ return true;
+ }
+ return false;
+}
+
+static bool adv7511_check_edid_status(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ uint8_t edidRdy = adv7511_rd(sd, 0xc5);
+
+ v4l2_dbg(1, debug, sd, "%s: edid ready (retries: %d)\n",
+ __func__, EDID_MAX_RETRIES - state->edid.read_retries);
+
+ if (state->edid.complete)
+ return true;
+
+ if (edidRdy & MASK_ADV7511_EDID_RDY) {
+ int segment = adv7511_rd(sd, 0xc4);
+ struct adv7511_edid_detect ed;
+
+ if (segment >= EDID_MAX_SEGM) {
+ v4l2_err(sd, "edid segment number too big\n");
+ return false;
+ }
+ v4l2_dbg(1, debug, sd, "%s: got segment %d\n", __func__, segment);
+ adv7511_edid_rd(sd, 256, &state->edid.data[segment * 256]);
+ adv7511_dbg_dump_edid(2, debug, sd, segment, &state->edid.data[segment * 256]);
+ if (segment == 0) {
+ state->edid.blocks = state->edid.data[0x7e] + 1;
+ v4l2_dbg(1, debug, sd, "%s: %d blocks in total\n", __func__, state->edid.blocks);
+ }
+ if (!edid_segment_verify_crc(sd, segment)) {
+ /* edid crc error, force reread of edid segment */
+ v4l2_dbg(1, debug, sd, "%s: edid crc error\n", __func__);
+ state->have_monitor = false;
+ adv7511_s_power(sd, false);
+ adv7511_s_power(sd, true);
+ return false;
+ }
+ /* one more segment read ok */
+ state->edid.segments = segment + 1;
+ if (((state->edid.data[0x7e] >> 1) + 1) > state->edid.segments) {
+ /* Request next EDID segment */
+ v4l2_dbg(1, debug, sd, "%s: request segment %d\n", __func__, state->edid.segments);
+ adv7511_wr(sd, 0xc9, 0xf);
+ adv7511_wr(sd, 0xc4, state->edid.segments);
+ state->edid.read_retries = EDID_MAX_RETRIES;
+ queue_delayed_work(state->work_queue, &state->edid_handler, EDID_DELAY);
+ return false;
+ }
+
+ v4l2_dbg(1, debug, sd, "%s: edid complete with %d segment(s)\n", __func__, state->edid.segments);
+ state->edid.complete = true;
+
+ /* report when we have all segments
+ but report only for segment 0
+ */
+ ed.present = true;
+ ed.segment = 0;
+ state->edid_detect_counter++;
+ v4l2_ctrl_s_ctrl(state->have_edid0_ctrl, state->edid.segments ? 0x1 : 0x0);
+ v4l2_subdev_notify(sd, ADV7511_EDID_DETECT, (void *)&ed);
+ return ed.present;
+ }
+
+ return false;
+}
+
+/* ----------------------------------------------------------------------- */
+/* Setup ADV7511 */
+static void adv7511_init_setup(struct v4l2_subdev *sd)
+{
+ struct adv7511_state *state = get_adv7511_state(sd);
+ struct adv7511_state_edid *edid = &state->edid;
+
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ /* clear all interrupts */
+ adv7511_wr(sd, 0x96, 0xff);
+ memset(edid, 0, sizeof(struct adv7511_state_edid));
+ state->have_monitor = false;
+ adv7511_set_isr(sd, false);
+ adv7511_s_stream(sd, false);
+ adv7511_s_audio_stream(sd, false);
+}
+
+static int adv7511_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct adv7511_state *state;
+ struct adv7511_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_subdev *sd;
+ u8 chip_id[2];
+ int err = -EIO;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ state = devm_kzalloc(&client->dev, sizeof(struct adv7511_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ /* Platform data */
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+ memcpy(&state->pdata, pdata, sizeof(state->pdata));
+
+ sd = &state->sd;
+
+ v4l2_dbg(1, debug, sd, "detecting adv7511 client on address 0x%x\n",
+ client->addr << 1);
+
+ v4l2_i2c_subdev_init(sd, client, &adv7511_ops);
+
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, 10);
+ /* add in ascending ID order */
+ state->hdmi_mode_ctrl = v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_MODE, V4L2_DV_TX_MODE_HDMI,
+ 0, V4L2_DV_TX_MODE_DVI_D);
+ state->hotplug_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_HOTPLUG, 0, 1, 0, 0);
+ state->rx_sense_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_RXSENSE, 0, 1, 0, 0);
+ state->have_edid0_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_TX_EDID_PRESENT, 0, 1, 0, 0);
+ state->rgb_quantization_range_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7511_ctrl_ops,
+ V4L2_CID_DV_TX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ err = hdl->error;
+ goto err_hdl;
+ }
+ state->hdmi_mode_ctrl->is_private = true;
+ state->hotplug_ctrl->is_private = true;
+ state->rx_sense_ctrl->is_private = true;
+ state->have_edid0_ctrl->is_private = true;
+ state->rgb_quantization_range_ctrl->is_private = true;
+
+ state->pad.flags = MEDIA_PAD_FL_SINK;
+ err = media_entity_init(&sd->entity, 1, &state->pad, 0);
+ if (err)
+ goto err_hdl;
+
+ /* EDID and CEC i2c addr */
+ state->i2c_edid_addr = state->pdata.i2c_edid << 1;
+ state->i2c_cec_addr = state->pdata.i2c_cec << 1;
+
+ state->chip_revision = adv7511_rd(sd, 0x0);
+ chip_id[0] = adv7511_rd(sd, 0xf5);
+ chip_id[1] = adv7511_rd(sd, 0xf6);
+ if (chip_id[0] != 0x75 || chip_id[1] != 0x11) {
+ v4l2_err(sd, "chip_id != 0x7511, read 0x%02x%02x\n", chip_id[0], chip_id[1]);
+ err = -EIO;
+ goto err_entity;
+ }
+
+ state->i2c_edid = i2c_new_dummy(client->adapter, state->i2c_edid_addr >> 1);
+ if (state->i2c_edid == NULL) {
+ v4l2_err(sd, "failed to register edid i2c client\n");
+ goto err_entity;
+ }
+
+ adv7511_wr(sd, 0xe2, 0x01); /* power down cec section */
+ state->work_queue = create_singlethread_workqueue(sd->name);
+ if (state->work_queue == NULL) {
+ v4l2_err(sd, "could not create workqueue\n");
+ goto err_unreg_cec;
+ }
+
+ INIT_DELAYED_WORK(&state->edid_handler, adv7511_edid_handler);
+
+ adv7511_init_setup(sd);
+ adv7511_set_isr(sd, true);
+ adv7511_check_monitor_present_status(sd);
+
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+ return 0;
+
+err_unreg_cec:
+ i2c_unregister_device(state->i2c_edid);
+err_entity:
+ media_entity_cleanup(&sd->entity);
+err_hdl:
+ v4l2_ctrl_handler_free(&state->hdl);
+ return err;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7511_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7511_state *state = get_adv7511_state(sd);
+
+ state->chip_revision = -1;
+
+ v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+
+ adv7511_init_setup(sd);
+ cancel_delayed_work(&state->edid_handler);
+ i2c_unregister_device(state->i2c_edid);
+ destroy_workqueue(state->work_queue);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_device_id adv7511_id[] = {
+ { "adv7511", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7511_id);
+
+static struct i2c_driver adv7511_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7511",
+ },
+ .probe = adv7511_probe,
+ .remove = adv7511_remove,
+ .id_table = adv7511_id,
+};
+
+module_i2c_driver(adv7511_driver);
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 1d675b58fd7..fbfdd2fc2a3 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -38,6 +38,7 @@
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dv-timings.h>
#include <media/adv7604.h>
static int debug;
@@ -76,6 +77,7 @@ struct adv7604_state {
struct delayed_work delayed_work_enable_hotplug;
bool connector_hdmi;
bool restart_stdi_once;
+ u32 prev_input_status;
/* i2c clients */
struct i2c_client *i2c_avlink;
@@ -260,22 +262,22 @@ static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
static inline unsigned hblanking(const struct v4l2_bt_timings *t)
{
- return t->hfrontporch + t->hsync + t->hbackporch;
+ return V4L2_DV_BT_BLANKING_WIDTH(t);
}
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
- return t->width + t->hfrontporch + t->hsync + t->hbackporch;
+ return V4L2_DV_BT_FRAME_WIDTH(t);
}
static inline unsigned vblanking(const struct v4l2_bt_timings *t)
{
- return t->vfrontporch + t->vsync + t->vbackporch;
+ return V4L2_DV_BT_BLANKING_HEIGHT(t);
}
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
- return t->height + t->vfrontporch + t->vsync + t->vbackporch;
+ return V4L2_DV_BT_FRAME_HEIGHT(t);
}
/* ----------------------------------------------------------------------- */
@@ -761,7 +763,7 @@ static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
int i;
for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
- if (!v4l_match_dv_timings(timings, &predef_vid_timings[i].timings,
+ if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings,
DIGITAL_INPUT ? 250000 : 1000000))
continue;
io_write(sd, 0x00, predef_vid_timings[i].vid_std); /* video std */
@@ -990,6 +992,11 @@ static inline bool no_lock_tmds(struct v4l2_subdev *sd)
return (io_read(sd, 0x6a) & 0xe0) != 0xe0;
}
+static inline bool is_hdmi(struct v4l2_subdev *sd)
+{
+ return hdmi_read(sd, 0x05) & 0x80;
+}
+
static inline bool no_lock_sspd(struct v4l2_subdev *sd)
{
/* TODO channel 2 */
@@ -1044,38 +1051,6 @@ static int adv7604_g_input_status(struct v4l2_subdev *sd, u32 *status)
/* ----------------------------------------------------------------------- */
-static void adv7604_print_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings, const char *txt, bool detailed)
-{
- struct v4l2_bt_timings *bt = &timings->bt;
- u32 htot, vtot;
-
- if (timings->type != V4L2_DV_BT_656_1120)
- return;
-
- htot = htotal(bt);
- vtot = vtotal(bt);
-
- v4l2_info(sd, "%s %dx%d%s%d (%dx%d)",
- txt, bt->width, bt->height, bt->interlaced ? "i" : "p",
- (htot * vtot) > 0 ? ((u32)bt->pixelclock /
- (htot * vtot)) : 0,
- htot, vtot);
-
- if (detailed) {
- v4l2_info(sd, " horizontal: fp = %d, %ssync = %d, bp = %d\n",
- bt->hfrontporch,
- (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
- bt->hsync, bt->hbackporch);
- v4l2_info(sd, " vertical: fp = %d, %ssync = %d, bp = %d\n",
- bt->vfrontporch,
- (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
- bt->vsync, bt->vbackporch);
- v4l2_info(sd, " pixelclock: %lld, flags: 0x%x, standards: 0x%x\n",
- bt->pixelclock, bt->flags, bt->standards);
- }
-}
-
struct stdi_readback {
u16 bl, lcf, lcvs;
u8 hs_pol, vs_pol;
@@ -1187,7 +1162,7 @@ static int adv7604_dv_timings_cap(struct v4l2_subdev *sd,
cap->type = V4L2_DV_BT_656_1120;
cap->bt.max_width = 1920;
cap->bt.max_height = 1200;
- cap->bt.min_pixelclock = 27000000;
+ cap->bt.min_pixelclock = 25000000;
if (DIGITAL_INPUT)
cap->bt.max_pixelclock = 225000000;
else
@@ -1208,7 +1183,7 @@ static void adv7604_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
int i;
for (i = 0; adv7604_timings[i].bt.width; i++) {
- if (v4l_match_dv_timings(timings, &adv7604_timings[i],
+ if (v4l2_match_dv_timings(timings, &adv7604_timings[i],
DIGITAL_INPUT ? 250000 : 1000000)) {
*timings = adv7604_timings[i];
break;
@@ -1242,12 +1217,21 @@ static int adv7604_query_dv_timings(struct v4l2_subdev *sd,
V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
if (DIGITAL_INPUT) {
+ uint32_t freq;
+
timings->type = V4L2_DV_BT_656_1120;
bt->width = (hdmi_read(sd, 0x07) & 0x0f) * 256 + hdmi_read(sd, 0x08);
bt->height = (hdmi_read(sd, 0x09) & 0x0f) * 256 + hdmi_read(sd, 0x0a);
- bt->pixelclock = (hdmi_read(sd, 0x06) * 1000000) +
+ freq = (hdmi_read(sd, 0x06) * 1000000) +
((hdmi_read(sd, 0x3b) & 0x30) >> 4) * 250000;
+ if (is_hdmi(sd)) {
+ /* adjust for deep color mode */
+ unsigned bits_per_channel = ((hdmi_read(sd, 0x0b) & 0x60) >> 4) + 8;
+
+ freq = freq * 8 / bits_per_channel;
+ }
+ bt->pixelclock = freq;
bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x03) * 256 +
hdmi_read(sd, 0x21);
bt->hsync = (hdmi_read(sd, 0x22) & 0x03) * 256 +
@@ -1329,8 +1313,8 @@ found:
}
if (debug > 1)
- adv7604_print_timings(sd, timings,
- "adv7604_query_dv_timings:", true);
+ v4l2_print_dv_timings(sd->name, "adv7604_query_dv_timings: ",
+ timings, true);
return 0;
}
@@ -1372,8 +1356,8 @@ static int adv7604_s_dv_timings(struct v4l2_subdev *sd,
if (debug > 1)
- adv7604_print_timings(sd, timings,
- "adv7604_s_dv_timings:", true);
+ v4l2_print_dv_timings(sd->name, "adv7604_s_dv_timings: ",
+ timings, true);
return 0;
}
@@ -1534,6 +1518,7 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
{
struct adv7604_state *state = to_state(sd);
u8 fmt_change, fmt_change_digital, tx_5v;
+ u32 input_status;
/* format change */
fmt_change = io_read(sd, 0x43) & 0x98;
@@ -1544,9 +1529,18 @@ static int adv7604_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
io_write(sd, 0x6c, fmt_change_digital);
if (fmt_change || fmt_change_digital) {
v4l2_dbg(1, debug, sd,
- "%s: ADV7604_FMT_CHANGE, fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
+ "%s: fmt_change = 0x%x, fmt_change_digital = 0x%x\n",
__func__, fmt_change, fmt_change_digital);
- v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
+
+ adv7604_g_input_status(sd, &input_status);
+ if (input_status != state->prev_input_status) {
+ v4l2_dbg(1, debug, sd,
+ "%s: input_status = 0x%x, prev_input_status = 0x%x\n",
+ __func__, input_status, state->prev_input_status);
+ state->prev_input_status = input_status;
+ v4l2_subdev_notify(sd, ADV7604_FMT_CHANGE, NULL);
+ }
+
if (handled)
*handled = true;
}
@@ -1625,7 +1619,7 @@ static void print_avi_infoframe(struct v4l2_subdev *sd)
u8 avi_len;
u8 avi_ver;
- if (!(hdmi_read(sd, 0x05) & 0x80)) {
+ if (!is_hdmi(sd)) {
v4l2_info(sd, "receive DVI-D signal (AVI infoframe not supported)\n");
return;
}
@@ -1686,6 +1680,12 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
"RGB limited range (16-235)",
"RGB full range (0-255)",
};
+ char *deep_color_mode_txt[4] = {
+ "8-bits per channel",
+ "10-bits per channel",
+ "12-bits per channel",
+ "16-bits per channel (not supported)"
+ };
v4l2_info(sd, "-----Chip status-----\n");
v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
@@ -1723,8 +1723,13 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
if (adv7604_query_dv_timings(sd, &timings))
v4l2_info(sd, "No video detected\n");
else
- adv7604_print_timings(sd, &timings, "Detected format:", true);
- adv7604_print_timings(sd, &state->timings, "Configured format:", true);
+ v4l2_print_dv_timings(sd->name, "Detected format: ",
+ &timings, true);
+ v4l2_print_dv_timings(sd->name, "Configured format: ",
+ &state->timings, true);
+
+ if (no_signal(sd))
+ return 0;
v4l2_info(sd, "-----Color space-----\n");
v4l2_info(sd, "RGB quantization range ctrl: %s\n",
@@ -1735,15 +1740,40 @@ static int adv7604_log_status(struct v4l2_subdev *sd)
(reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
(reg_io_0x02 & 0x04) ? "(16-235)" : "(0-255)",
((reg_io_0x02 & 0x04) ^ (reg_io_0x02 & 0x01)) ?
- "enabled" : "disabled");
+ "enabled" : "disabled");
v4l2_info(sd, "Color space conversion: %s\n",
csc_coeff_sel_rb[cp_read(sd, 0xfc) >> 4]);
- /* Digital video */
- if (DIGITAL_INPUT) {
- v4l2_info(sd, "-----HDMI status-----\n");
- v4l2_info(sd, "HDCP encrypted content: %s\n",
- hdmi_read(sd, 0x05) & 0x40 ? "true" : "false");
+ if (!DIGITAL_INPUT)
+ return 0;
+
+ v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
+ v4l2_info(sd, "HDCP encrypted content: %s\n", (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
+ v4l2_info(sd, "HDCP keys read: %s%s\n",
+ (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
+ (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
+ if (!is_hdmi(sd)) {
+ bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
+ bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
+ bool audio_mute = io_read(sd, 0x65) & 0x40;
+
+ v4l2_info(sd, "Audio: pll %s, samples %s, %s\n",
+ audio_pll_locked ? "locked" : "not locked",
+ audio_sample_packet_detect ? "detected" : "not detected",
+ audio_mute ? "muted" : "enabled");
+ if (audio_pll_locked && audio_sample_packet_detect) {
+ v4l2_info(sd, "Audio format: %s\n",
+ (hdmi_read(sd, 0x07) & 0x20) ? "multi-channel" : "stereo");
+ }
+ v4l2_info(sd, "Audio CTS: %u\n", (hdmi_read(sd, 0x5b) << 12) +
+ (hdmi_read(sd, 0x5c) << 8) +
+ (hdmi_read(sd, 0x5d) & 0xf0));
+ v4l2_info(sd, "Audio N: %u\n", ((hdmi_read(sd, 0x5d) & 0x0f) << 16) +
+ (hdmi_read(sd, 0x5e) << 8) +
+ hdmi_read(sd, 0x5f));
+ v4l2_info(sd, "AV Mute: %s\n", (hdmi_read(sd, 0x04) & 0x40) ? "on" : "off");
+
+ v4l2_info(sd, "Deep color mode: %s\n", deep_color_mode_txt[(hdmi_read(sd, 0x0b) & 0x60) >> 5]);
print_avi_infoframe(sd);
}
@@ -1952,6 +1982,10 @@ static int adv7604_probe(struct i2c_client *client,
return -ENOMEM;
}
+ /* initialize variables */
+ state->restart_stdi_once = true;
+ state->prev_input_status = ~0;
+
/* platform data */
if (!pdata) {
v4l_err(client, "No platform data!\n");
@@ -1987,29 +2021,30 @@ static int adv7604_probe(struct i2c_client *client,
/* private controls */
state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
V4L2_CID_DV_RX_POWER_PRESENT, 0, 1, 0, 0);
- state->detect_tx_5v_ctrl->is_private = true;
state->rgb_quantization_range_ctrl =
v4l2_ctrl_new_std_menu(hdl, &adv7604_ctrl_ops,
V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
0, V4L2_DV_RGB_RANGE_AUTO);
- state->rgb_quantization_range_ctrl->is_private = true;
/* custom controls */
state->analog_sampling_phase_ctrl =
v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_analog_sampling_phase, NULL);
- state->analog_sampling_phase_ctrl->is_private = true;
state->free_run_color_manual_ctrl =
v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color_manual, NULL);
- state->free_run_color_manual_ctrl->is_private = true;
state->free_run_color_ctrl =
v4l2_ctrl_new_custom(hdl, &adv7604_ctrl_free_run_color, NULL);
- state->free_run_color_ctrl->is_private = true;
sd->ctrl_handler = hdl;
if (hdl->error) {
err = hdl->error;
goto err_hdl;
}
+ state->detect_tx_5v_ctrl->is_private = true;
+ state->rgb_quantization_range_ctrl->is_private = true;
+ state->analog_sampling_phase_ctrl->is_private = true;
+ state->free_run_color_manual_ctrl->is_private = true;
+ state->free_run_color_ctrl->is_private = true;
+
if (adv7604_s_detect_tx_5v_ctrl(sd)) {
err = -ENODEV;
goto err_hdl;
@@ -2035,7 +2070,6 @@ static int adv7604_probe(struct i2c_client *client,
v4l2_err(sd, "failed to create all i2c clients\n");
goto err_i2c;
}
- state->restart_stdi_once = true;
/* work queues */
state->work_queues = create_singlethread_workqueue(client->name);
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
new file mode 100644
index 00000000000..d1748901337
--- /dev/null
+++ b/drivers/media/i2c/adv7842.c
@@ -0,0 +1,2946 @@
+/*
+ * adv7842 - Analog Devices ADV7842 video decoder driver
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/*
+ * References (c = chapter, p = page):
+ * REF_01 - Analog devices, ADV7842, Register Settings Recommendations,
+ * Revision 2.5, June 2010
+ * REF_02 - Analog devices, Register map documentation, Documentation of
+ * the register maps, Software manual, Rev. F, June 2010
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/videodev2.h>
+#include <linux/workqueue.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/adv7842.h>
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "debug level (0-2)");
+
+MODULE_DESCRIPTION("Analog Devices ADV7842 video decoder driver");
+MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
+MODULE_AUTHOR("Martin Bugge <marbugge@cisco.com>");
+MODULE_LICENSE("GPL");
+
+/* ADV7842 system clock frequency */
+#define ADV7842_fsc (28636360)
+
+/*
+**********************************************************************
+*
+* Arrays with configuration parameters for the ADV7842
+*
+**********************************************************************
+*/
+
+struct adv7842_state {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler hdl;
+ enum adv7842_mode mode;
+ struct v4l2_dv_timings timings;
+ enum adv7842_vid_std_select vid_std_select;
+ v4l2_std_id norm;
+ struct {
+ u8 edid[256];
+ u32 present;
+ } hdmi_edid;
+ struct {
+ u8 edid[256];
+ u32 present;
+ } vga_edid;
+ struct v4l2_fract aspect_ratio;
+ u32 rgb_quantization_range;
+ bool is_cea_format;
+ struct workqueue_struct *work_queues;
+ struct delayed_work delayed_work_enable_hotplug;
+ bool connector_hdmi;
+ bool hdmi_port_a;
+
+ /* i2c clients */
+ struct i2c_client *i2c_sdp_io;
+ struct i2c_client *i2c_sdp;
+ struct i2c_client *i2c_cp;
+ struct i2c_client *i2c_vdp;
+ struct i2c_client *i2c_afe;
+ struct i2c_client *i2c_hdmi;
+ struct i2c_client *i2c_repeater;
+ struct i2c_client *i2c_edid;
+ struct i2c_client *i2c_infoframe;
+ struct i2c_client *i2c_cec;
+ struct i2c_client *i2c_avlink;
+
+ /* controls */
+ struct v4l2_ctrl *detect_tx_5v_ctrl;
+ struct v4l2_ctrl *analog_sampling_phase_ctrl;
+ struct v4l2_ctrl *free_run_color_ctrl_manual;
+ struct v4l2_ctrl *free_run_color_ctrl;
+ struct v4l2_ctrl *rgb_quantization_range_ctrl;
+};
+
+/* Unsupported timings. This device cannot support 720p30. */
+static const struct v4l2_dv_timings adv7842_timings_exceptions[] = {
+ V4L2_DV_BT_CEA_1280X720P30,
+ { }
+};
+
+static bool adv7842_check_dv_timings(const struct v4l2_dv_timings *t, void *hdl)
+{
+ int i;
+
+ for (i = 0; adv7842_timings_exceptions[i].bt.width; i++)
+ if (v4l2_match_dv_timings(t, adv7842_timings_exceptions + i, 0))
+ return false;
+ return true;
+}
+
+struct adv7842_video_standards {
+ struct v4l2_dv_timings timings;
+ u8 vid_std;
+ u8 v_freq;
+};
+
+/* sorted by number of lines */
+static const struct adv7842_video_standards adv7842_prim_mode_comp[] = {
+ /* { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 }, TODO flickering */
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x19, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x19, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ /* TODO add 1920x1080P60_RB (CVT timing) */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7842_video_standards adv7842_prim_mode_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { V4L2_DV_BT_DMT_1360X768P60, 0x12, 0x00 },
+ { V4L2_DV_BT_DMT_1366X768P60, 0x13, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P60, 0x14, 0x00 },
+ { V4L2_DV_BT_DMT_1400X1050P75, 0x15, 0x00 },
+ { V4L2_DV_BT_DMT_1600X1200P60, 0x16, 0x00 }, /* TODO not tested */
+ /* TODO add 1600X1200P60_RB (not a DMT timing) */
+ { V4L2_DV_BT_DMT_1680X1050P60, 0x18, 0x00 },
+ { V4L2_DV_BT_DMT_1920X1200P60_RB, 0x19, 0x00 }, /* TODO not tested */
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7842_video_standards adv7842_prim_mode_hdmi_comp[] = {
+ { V4L2_DV_BT_CEA_720X480P59_94, 0x0a, 0x00 },
+ { V4L2_DV_BT_CEA_720X576P50, 0x0b, 0x00 },
+ { V4L2_DV_BT_CEA_1280X720P50, 0x13, 0x01 },
+ { V4L2_DV_BT_CEA_1280X720P60, 0x13, 0x00 },
+ { V4L2_DV_BT_CEA_1920X1080P24, 0x1e, 0x04 },
+ { V4L2_DV_BT_CEA_1920X1080P25, 0x1e, 0x03 },
+ { V4L2_DV_BT_CEA_1920X1080P30, 0x1e, 0x02 },
+ { V4L2_DV_BT_CEA_1920X1080P50, 0x1e, 0x01 },
+ { V4L2_DV_BT_CEA_1920X1080P60, 0x1e, 0x00 },
+ { },
+};
+
+/* sorted by number of lines */
+static const struct adv7842_video_standards adv7842_prim_mode_hdmi_gr[] = {
+ { V4L2_DV_BT_DMT_640X480P60, 0x08, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P72, 0x09, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P75, 0x0a, 0x00 },
+ { V4L2_DV_BT_DMT_640X480P85, 0x0b, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P56, 0x00, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P60, 0x01, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P72, 0x02, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P75, 0x03, 0x00 },
+ { V4L2_DV_BT_DMT_800X600P85, 0x04, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P60, 0x0c, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P70, 0x0d, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P75, 0x0e, 0x00 },
+ { V4L2_DV_BT_DMT_1024X768P85, 0x0f, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P60, 0x05, 0x00 },
+ { V4L2_DV_BT_DMT_1280X1024P75, 0x06, 0x00 },
+ { },
+};
+
+/* ----------------------------------------------------------------------- */
+
+static inline struct adv7842_state *to_state(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct adv7842_state, sd);
+}
+
+static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
+{
+ return &container_of(ctrl->handler, struct adv7842_state, hdl)->sd;
+}
+
+static inline unsigned hblanking(const struct v4l2_bt_timings *t)
+{
+ return V4L2_DV_BT_BLANKING_WIDTH(t);
+}
+
+static inline unsigned htotal(const struct v4l2_bt_timings *t)
+{
+ return V4L2_DV_BT_FRAME_WIDTH(t);
+}
+
+static inline unsigned vblanking(const struct v4l2_bt_timings *t)
+{
+ return V4L2_DV_BT_BLANKING_HEIGHT(t);
+}
+
+static inline unsigned vtotal(const struct v4l2_bt_timings *t)
+{
+ return V4L2_DV_BT_FRAME_HEIGHT(t);
+}
+
+
+/* ----------------------------------------------------------------------- */
+
+static s32 adv_smbus_read_byte_data_check(struct i2c_client *client,
+ u8 command, bool check)
+{
+ union i2c_smbus_data data;
+
+ if (!i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_READ, command,
+ I2C_SMBUS_BYTE_DATA, &data))
+ return data.byte;
+ if (check)
+ v4l_err(client, "error reading %02x, %02x\n",
+ client->addr, command);
+ return -EIO;
+}
+
+static s32 adv_smbus_read_byte_data(struct i2c_client *client, u8 command)
+{
+ int i;
+
+ for (i = 0; i < 3; i++) {
+ int ret = adv_smbus_read_byte_data_check(client, command, true);
+
+ if (ret >= 0) {
+ if (i)
+ v4l_err(client, "read ok after %d retries\n", i);
+ return ret;
+ }
+ }
+ v4l_err(client, "read failed\n");
+ return -EIO;
+}
+
+static s32 adv_smbus_write_byte_data(struct i2c_client *client,
+ u8 command, u8 value)
+{
+ union i2c_smbus_data data;
+ int err;
+ int i;
+
+ data.byte = value;
+ for (i = 0; i < 3; i++) {
+ err = i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+ if (!err)
+ break;
+ }
+ if (err < 0)
+ v4l_err(client, "error writing %02x, %02x, %02x\n",
+ client->addr, command, value);
+ return err;
+}
+
+static void adv_smbus_write_byte_no_check(struct i2c_client *client,
+ u8 command, u8 value)
+{
+ union i2c_smbus_data data;
+ data.byte = value;
+
+ i2c_smbus_xfer(client->adapter, client->addr,
+ client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_BYTE_DATA, &data);
+}
+
+static s32 adv_smbus_write_i2c_block_data(struct i2c_client *client,
+ u8 command, unsigned length, const u8 *values)
+{
+ union i2c_smbus_data data;
+
+ if (length > I2C_SMBUS_BLOCK_MAX)
+ length = I2C_SMBUS_BLOCK_MAX;
+ data.block[0] = length;
+ memcpy(data.block + 1, values, length);
+ return i2c_smbus_xfer(client->adapter, client->addr, client->flags,
+ I2C_SMBUS_WRITE, command,
+ I2C_SMBUS_I2C_BLOCK_DATA, &data);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static inline int io_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_read_byte_data(client, reg);
+}
+
+static inline int io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ return adv_smbus_write_byte_data(client, reg, val);
+}
+
+static inline int io_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return io_write(sd, reg, (io_read(sd, reg) & mask) | val);
+}
+
+static inline int avlink_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_avlink, reg);
+}
+
+static inline int avlink_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_avlink, reg, val);
+}
+
+static inline int cec_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_cec, reg);
+}
+
+static inline int cec_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_cec, reg, val);
+}
+
+static inline int cec_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return cec_write(sd, reg, (cec_read(sd, reg) & mask) | val);
+}
+
+static inline int infoframe_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_infoframe, reg);
+}
+
+static inline int infoframe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_infoframe, reg, val);
+}
+
+static inline int sdp_io_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_sdp_io, reg);
+}
+
+static inline int sdp_io_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_sdp_io, reg, val);
+}
+
+static inline int sdp_io_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return sdp_io_write(sd, reg, (sdp_io_read(sd, reg) & mask) | val);
+}
+
+static inline int sdp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_sdp, reg);
+}
+
+static inline int sdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_sdp, reg, val);
+}
+
+static inline int sdp_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return sdp_write(sd, reg, (sdp_read(sd, reg) & mask) | val);
+}
+
+static inline int afe_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_afe, reg);
+}
+
+static inline int afe_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_afe, reg, val);
+}
+
+static inline int afe_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return afe_write(sd, reg, (afe_read(sd, reg) & mask) | val);
+}
+
+static inline int rep_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_repeater, reg);
+}
+
+static inline int rep_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_repeater, reg, val);
+}
+
+static inline int rep_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return rep_write(sd, reg, (rep_read(sd, reg) & mask) | val);
+}
+
+static inline int edid_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_edid, reg);
+}
+
+static inline int edid_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_edid, reg, val);
+}
+
+static inline int hdmi_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_hdmi, reg);
+}
+
+static inline int hdmi_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_hdmi, reg, val);
+}
+
+static inline int cp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_cp, reg);
+}
+
+static inline int cp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_cp, reg, val);
+}
+
+static inline int cp_write_and_or(struct v4l2_subdev *sd, u8 reg, u8 mask, u8 val)
+{
+ return cp_write(sd, reg, (cp_read(sd, reg) & mask) | val);
+}
+
+static inline int vdp_read(struct v4l2_subdev *sd, u8 reg)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_read_byte_data(state->i2c_vdp, reg);
+}
+
+static inline int vdp_write(struct v4l2_subdev *sd, u8 reg, u8 val)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return adv_smbus_write_byte_data(state->i2c_vdp, reg, val);
+}
+
+static void main_reset(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ adv_smbus_write_byte_no_check(client, 0xff, 0x80);
+
+ mdelay(2);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static inline bool is_digital_input(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ return state->mode == ADV7842_MODE_HDMI;
+}
+
+static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .max_width = 1920,
+ .max_height = 1200,
+ .min_pixelclock = 25000000,
+ .max_pixelclock = 170000000,
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .max_width = 1920,
+ .max_height = 1200,
+ .min_pixelclock = 25000000,
+ .max_pixelclock = 225000000,
+ .standards = V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
+ V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE |
+ V4L2_DV_BT_CAP_REDUCED_BLANKING | V4L2_DV_BT_CAP_CUSTOM,
+ },
+};
+
+static inline const struct v4l2_dv_timings_cap *
+adv7842_get_dv_timings_cap(struct v4l2_subdev *sd)
+{
+ return is_digital_input(sd) ? &adv7842_timings_cap_digital :
+ &adv7842_timings_cap_analog;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void adv7842_delayed_work_enable_hotplug(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct adv7842_state *state = container_of(dwork,
+ struct adv7842_state, delayed_work_enable_hotplug);
+ struct v4l2_subdev *sd = &state->sd;
+ int present = state->hdmi_edid.present;
+ u8 mask = 0;
+
+ v4l2_dbg(2, debug, sd, "%s: enable hotplug on ports: 0x%x\n",
+ __func__, present);
+
+ if (present & 0x1)
+ mask |= 0x20; /* port A */
+ if (present & 0x2)
+ mask |= 0x10; /* port B */
+ io_write_and_or(sd, 0x20, 0xcf, mask);
+}
+
+static int edid_write_vga_segment(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct adv7842_state *state = to_state(sd);
+ const u8 *val = state->vga_edid.edid;
+ int err = 0;
+ int i;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID on VGA port\n", __func__);
+
+ /* HPA disable on port A and B */
+ io_write_and_or(sd, 0x20, 0xcf, 0x00);
+
+ /* Disable I2C access to internal EDID ram from VGA DDC port */
+ rep_write_and_or(sd, 0x7f, 0x7f, 0x00);
+
+ /* edid segment pointer '1' for VGA port */
+ rep_write_and_or(sd, 0x77, 0xef, 0x10);
+
+ for (i = 0; !err && i < 256; i += I2C_SMBUS_BLOCK_MAX)
+ err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
+ I2C_SMBUS_BLOCK_MAX, val + i);
+ if (err)
+ return err;
+
+ /* Calculates the checksums and enables I2C access
+ * to internal EDID ram from VGA DDC port.
+ */
+ rep_write_and_or(sd, 0x7f, 0x7f, 0x80);
+
+ for (i = 0; i < 1000; i++) {
+ if (rep_read(sd, 0x79) & 0x20)
+ break;
+ mdelay(1);
+ }
+ if (i == 1000) {
+ v4l_err(client, "error enabling edid on VGA port\n");
+ return -EIO;
+ }
+
+ /* enable hotplug after 200 ms */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 5);
+
+ return 0;
+}
+
+static int edid_spa_location(const u8 *edid)
+{
+ u8 d;
+
+ /*
+ * TODO, improve and update for other CEA extensions
+ * currently only for 1 segment (256 bytes),
+ * i.e. 1 extension block and CEA revision 3.
+ */
+ if ((edid[0x7e] != 1) ||
+ (edid[0x80] != 0x02) ||
+ (edid[0x81] != 0x03)) {
+ return -EINVAL;
+ }
+ /*
+ * search Vendor Specific Data Block (tag 3)
+ */
+ d = edid[0x82] & 0x7f;
+ if (d > 4) {
+ int i = 0x84;
+ int end = 0x80 + d;
+ do {
+ u8 tag = edid[i]>>5;
+ u8 len = edid[i] & 0x1f;
+
+ if ((tag == 3) && (len >= 5))
+ return i + 4;
+ i += len + 1;
+ } while (i < end);
+ }
+ return -EINVAL;
+}
+
+static int edid_write_hdmi_segment(struct v4l2_subdev *sd, u8 port)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct adv7842_state *state = to_state(sd);
+ const u8 *val = state->hdmi_edid.edid;
+ u8 cur_mask = rep_read(sd, 0x77) & 0x0c;
+ u8 mask = port == 0 ? 0x4 : 0x8;
+ int spa_loc = edid_spa_location(val);
+ int err = 0;
+ int i;
+
+ v4l2_dbg(2, debug, sd, "%s: write EDID on port %d (spa at 0x%x)\n",
+ __func__, port, spa_loc);
+
+ /* HPA disable on port A and B */
+ io_write_and_or(sd, 0x20, 0xcf, 0x00);
+
+ /* Disable I2C access to internal EDID ram from HDMI DDC ports */
+ rep_write_and_or(sd, 0x77, 0xf3, 0x00);
+
+ /* edid segment pointer '0' for HDMI ports */
+ rep_write_and_or(sd, 0x77, 0xef, 0x00);
+
+ for (i = 0; !err && i < 256; i += I2C_SMBUS_BLOCK_MAX)
+ err = adv_smbus_write_i2c_block_data(state->i2c_edid, i,
+ I2C_SMBUS_BLOCK_MAX, val + i);
+ if (err)
+ return err;
+
+ if (spa_loc > 0) {
+ if (port == 0) {
+ /* port A SPA */
+ rep_write(sd, 0x72, val[spa_loc]);
+ rep_write(sd, 0x73, val[spa_loc + 1]);
+ } else {
+ /* port B SPA */
+ rep_write(sd, 0x74, val[spa_loc]);
+ rep_write(sd, 0x75, val[spa_loc + 1]);
+ }
+ rep_write(sd, 0x76, spa_loc);
+ } else {
+ /* default register values for SPA */
+ if (port == 0) {
+ /* port A SPA */
+ rep_write(sd, 0x72, 0);
+ rep_write(sd, 0x73, 0);
+ } else {
+ /* port B SPA */
+ rep_write(sd, 0x74, 0);
+ rep_write(sd, 0x75, 0);
+ }
+ rep_write(sd, 0x76, 0xc0);
+ }
+ rep_write_and_or(sd, 0x77, 0xbf, 0x00);
+
+ /* Calculates the checksums and enables I2C access to internal
+ * EDID ram from HDMI DDC ports
+ */
+ rep_write_and_or(sd, 0x77, 0xf3, mask | cur_mask);
+
+ for (i = 0; i < 1000; i++) {
+ if (rep_read(sd, 0x7d) & mask)
+ break;
+ mdelay(1);
+ }
+ if (i == 1000) {
+ v4l_err(client, "error enabling edid on port %d\n", port);
+ return -EIO;
+ }
+
+ /* enable hotplug after 200 ms */
+ queue_delayed_work(state->work_queues,
+ &state->delayed_work_enable_hotplug, HZ / 5);
+
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+static void adv7842_inv_register(struct v4l2_subdev *sd)
+{
+ v4l2_info(sd, "0x000-0x0ff: IO Map\n");
+ v4l2_info(sd, "0x100-0x1ff: AVLink Map\n");
+ v4l2_info(sd, "0x200-0x2ff: CEC Map\n");
+ v4l2_info(sd, "0x300-0x3ff: InfoFrame Map\n");
+ v4l2_info(sd, "0x400-0x4ff: SDP_IO Map\n");
+ v4l2_info(sd, "0x500-0x5ff: SDP Map\n");
+ v4l2_info(sd, "0x600-0x6ff: AFE Map\n");
+ v4l2_info(sd, "0x700-0x7ff: Repeater Map\n");
+ v4l2_info(sd, "0x800-0x8ff: EDID Map\n");
+ v4l2_info(sd, "0x900-0x9ff: HDMI Map\n");
+ v4l2_info(sd, "0xa00-0xaff: CP Map\n");
+ v4l2_info(sd, "0xb00-0xbff: VDP Map\n");
+}
+
+static int adv7842_g_register(struct v4l2_subdev *sd,
+ struct v4l2_dbg_register *reg)
+{
+ reg->size = 1;
+ switch (reg->reg >> 8) {
+ case 0:
+ reg->val = io_read(sd, reg->reg & 0xff);
+ break;
+ case 1:
+ reg->val = avlink_read(sd, reg->reg & 0xff);
+ break;
+ case 2:
+ reg->val = cec_read(sd, reg->reg & 0xff);
+ break;
+ case 3:
+ reg->val = infoframe_read(sd, reg->reg & 0xff);
+ break;
+ case 4:
+ reg->val = sdp_io_read(sd, reg->reg & 0xff);
+ break;
+ case 5:
+ reg->val = sdp_read(sd, reg->reg & 0xff);
+ break;
+ case 6:
+ reg->val = afe_read(sd, reg->reg & 0xff);
+ break;
+ case 7:
+ reg->val = rep_read(sd, reg->reg & 0xff);
+ break;
+ case 8:
+ reg->val = edid_read(sd, reg->reg & 0xff);
+ break;
+ case 9:
+ reg->val = hdmi_read(sd, reg->reg & 0xff);
+ break;
+ case 0xa:
+ reg->val = cp_read(sd, reg->reg & 0xff);
+ break;
+ case 0xb:
+ reg->val = vdp_read(sd, reg->reg & 0xff);
+ break;
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7842_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+
+static int adv7842_s_register(struct v4l2_subdev *sd,
+ const struct v4l2_dbg_register *reg)
+{
+ u8 val = reg->val & 0xff;
+
+ switch (reg->reg >> 8) {
+ case 0:
+ io_write(sd, reg->reg & 0xff, val);
+ break;
+ case 1:
+ avlink_write(sd, reg->reg & 0xff, val);
+ break;
+ case 2:
+ cec_write(sd, reg->reg & 0xff, val);
+ break;
+ case 3:
+ infoframe_write(sd, reg->reg & 0xff, val);
+ break;
+ case 4:
+ sdp_io_write(sd, reg->reg & 0xff, val);
+ break;
+ case 5:
+ sdp_write(sd, reg->reg & 0xff, val);
+ break;
+ case 6:
+ afe_write(sd, reg->reg & 0xff, val);
+ break;
+ case 7:
+ rep_write(sd, reg->reg & 0xff, val);
+ break;
+ case 8:
+ edid_write(sd, reg->reg & 0xff, val);
+ break;
+ case 9:
+ hdmi_write(sd, reg->reg & 0xff, val);
+ break;
+ case 0xa:
+ cp_write(sd, reg->reg & 0xff, val);
+ break;
+ case 0xb:
+ vdp_write(sd, reg->reg & 0xff, val);
+ break;
+ default:
+ v4l2_info(sd, "Register %03llx not supported\n", reg->reg);
+ adv7842_inv_register(sd);
+ break;
+ }
+ return 0;
+}
+#endif
+
+static int adv7842_s_detect_tx_5v_ctrl(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+ int prev = v4l2_ctrl_g_ctrl(state->detect_tx_5v_ctrl);
+ u8 reg_io_6f = io_read(sd, 0x6f);
+ int val = 0;
+
+ if (reg_io_6f & 0x02)
+ val |= 1; /* port A */
+ if (reg_io_6f & 0x01)
+ val |= 2; /* port B */
+
+ v4l2_dbg(1, debug, sd, "%s: 0x%x -> 0x%x\n", __func__, prev, val);
+
+ if (val != prev)
+ return v4l2_ctrl_s_ctrl(state->detect_tx_5v_ctrl, val);
+ return 0;
+}
+
+static int find_and_set_predefined_video_timings(struct v4l2_subdev *sd,
+ u8 prim_mode,
+ const struct adv7842_video_standards *predef_vid_timings,
+ const struct v4l2_dv_timings *timings)
+{
+ int i;
+
+ for (i = 0; predef_vid_timings[i].timings.bt.width; i++) {
+ if (!v4l2_match_dv_timings(timings, &predef_vid_timings[i].timings,
+ is_digital_input(sd) ? 250000 : 1000000))
+ continue;
+ /* video std */
+ io_write(sd, 0x00, predef_vid_timings[i].vid_std);
+ /* v_freq and prim mode */
+ io_write(sd, 0x01, (predef_vid_timings[i].v_freq << 4) + prim_mode);
+ return 0;
+ }
+
+ return -1;
+}
+
+static int configure_predefined_video_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7842_state *state = to_state(sd);
+ int err;
+
+ v4l2_dbg(1, debug, sd, "%s\n", __func__);
+
+ /* reset to default values */
+ io_write(sd, 0x16, 0x43);
+ io_write(sd, 0x17, 0x5a);
+ /* disable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x00);
+ cp_write(sd, 0x26, 0x00);
+ cp_write(sd, 0x27, 0x00);
+ cp_write(sd, 0x28, 0x00);
+ cp_write(sd, 0x29, 0x00);
+ cp_write(sd, 0x8f, 0x00);
+ cp_write(sd, 0x90, 0x00);
+ cp_write(sd, 0xa5, 0x00);
+ cp_write(sd, 0xa6, 0x00);
+ cp_write(sd, 0xa7, 0x00);
+ cp_write(sd, 0xab, 0x00);
+ cp_write(sd, 0xac, 0x00);
+
+ switch (state->mode) {
+ case ADV7842_MODE_COMP:
+ case ADV7842_MODE_RGB:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x01, adv7842_prim_mode_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x02, adv7842_prim_mode_gr, timings);
+ break;
+ case ADV7842_MODE_HDMI:
+ err = find_and_set_predefined_video_timings(sd,
+ 0x05, adv7842_prim_mode_hdmi_comp, timings);
+ if (err)
+ err = find_and_set_predefined_video_timings(sd,
+ 0x06, adv7842_prim_mode_hdmi_gr, timings);
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ err = -1;
+ break;
+ }
+
+
+ return err;
+}
+
+static void configure_custom_video_timings(struct v4l2_subdev *sd,
+ const struct v4l2_bt_timings *bt)
+{
+ struct adv7842_state *state = to_state(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ u32 width = htotal(bt);
+ u32 height = vtotal(bt);
+ u16 cp_start_sav = bt->hsync + bt->hbackporch - 4;
+ u16 cp_start_eav = width - bt->hfrontporch;
+ u16 cp_start_vbi = height - bt->vfrontporch + 1;
+ u16 cp_end_vbi = bt->vsync + bt->vbackporch + 1;
+ u16 ch1_fr_ll = (((u32)bt->pixelclock / 100) > 0) ?
+ ((width * (ADV7842_fsc / 100)) / ((u32)bt->pixelclock / 100)) : 0;
+ const u8 pll[2] = {
+ 0xc0 | ((width >> 8) & 0x1f),
+ width & 0xff
+ };
+
+ v4l2_dbg(2, debug, sd, "%s\n", __func__);
+
+ switch (state->mode) {
+ case ADV7842_MODE_COMP:
+ case ADV7842_MODE_RGB:
+ /* auto graphics */
+ io_write(sd, 0x00, 0x07); /* video std */
+ io_write(sd, 0x01, 0x02); /* prim mode */
+ /* enable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x10);
+
+ /* Should only be set in auto-graphics mode [REF_02, p. 91-92] */
+ /* setup PLL_DIV_MAN_EN and PLL_DIV_RATIO */
+ /* IO-map reg. 0x16 and 0x17 should be written in sequence */
+ if (adv_smbus_write_i2c_block_data(client, 0x16, 2, pll)) {
+ v4l2_err(sd, "writing to reg 0x16 and 0x17 failed\n");
+ break;
+ }
+
+ /* active video - horizontal timing */
+ cp_write(sd, 0x26, (cp_start_sav >> 8) & 0xf);
+ cp_write(sd, 0x27, (cp_start_sav & 0xff));
+ cp_write(sd, 0x28, (cp_start_eav >> 8) & 0xf);
+ cp_write(sd, 0x29, (cp_start_eav & 0xff));
+
+ /* active video - vertical timing */
+ cp_write(sd, 0xa5, (cp_start_vbi >> 4) & 0xff);
+ cp_write(sd, 0xa6, ((cp_start_vbi & 0xf) << 4) |
+ ((cp_end_vbi >> 8) & 0xf));
+ cp_write(sd, 0xa7, cp_end_vbi & 0xff);
+ break;
+ case ADV7842_MODE_HDMI:
+ /* set default prim_mode/vid_std for HDMI
+ accoring to [REF_03, c. 4.2] */
+ io_write(sd, 0x00, 0x02); /* video std */
+ io_write(sd, 0x01, 0x06); /* prim mode */
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+
+ cp_write(sd, 0x8f, (ch1_fr_ll >> 8) & 0x7);
+ cp_write(sd, 0x90, ch1_fr_ll & 0xff);
+ cp_write(sd, 0xab, (height >> 4) & 0xff);
+ cp_write(sd, 0xac, (height & 0x0f) << 4);
+}
+
+static void set_rgb_quantization_range(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ switch (state->rgb_quantization_range) {
+ case V4L2_DV_RGB_RANGE_AUTO:
+ /* automatic */
+ if (is_digital_input(sd) && !(hdmi_read(sd, 0x05) & 0x80)) {
+ /* receiving DVI-D signal */
+
+ /* ADV7842 selects RGB limited range regardless of
+ input format (CE/IT) in automatic mode */
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+
+ } else {
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ }
+ } else {
+ /* receiving HDMI or analog signal, set automode */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ }
+ break;
+ case V4L2_DV_RGB_RANGE_LIMITED:
+ /* RGB limited range (16-235) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x00);
+ break;
+ case V4L2_DV_RGB_RANGE_FULL:
+ /* RGB full range (0-255) */
+ io_write_and_or(sd, 0x02, 0x0f, 0x10);
+ break;
+ }
+}
+
+static int adv7842_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct v4l2_subdev *sd = to_sd(ctrl);
+ struct adv7842_state *state = to_state(sd);
+
+ /* TODO SDP ctrls
+ contrast/brightness/hue/free run is acting a bit strange,
+ not sure if sdp csc is correct.
+ */
+ switch (ctrl->id) {
+ /* standard ctrls */
+ case V4L2_CID_BRIGHTNESS:
+ cp_write(sd, 0x3c, ctrl->val);
+ sdp_write(sd, 0x14, ctrl->val);
+ /* ignore lsb sdp 0x17[3:2] */
+ return 0;
+ case V4L2_CID_CONTRAST:
+ cp_write(sd, 0x3a, ctrl->val);
+ sdp_write(sd, 0x13, ctrl->val);
+ /* ignore lsb sdp 0x17[1:0] */
+ return 0;
+ case V4L2_CID_SATURATION:
+ cp_write(sd, 0x3b, ctrl->val);
+ sdp_write(sd, 0x15, ctrl->val);
+ /* ignore lsb sdp 0x17[5:4] */
+ return 0;
+ case V4L2_CID_HUE:
+ cp_write(sd, 0x3d, ctrl->val);
+ sdp_write(sd, 0x16, ctrl->val);
+ /* ignore lsb sdp 0x17[7:6] */
+ return 0;
+ /* custom ctrls */
+ case V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE:
+ afe_write(sd, 0xc8, ctrl->val);
+ return 0;
+ case V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL:
+ cp_write_and_or(sd, 0xbf, ~0x04, (ctrl->val << 2));
+ sdp_write_and_or(sd, 0xdd, ~0x04, (ctrl->val << 2));
+ return 0;
+ case V4L2_CID_ADV_RX_FREE_RUN_COLOR: {
+ u8 R = (ctrl->val & 0xff0000) >> 16;
+ u8 G = (ctrl->val & 0x00ff00) >> 8;
+ u8 B = (ctrl->val & 0x0000ff);
+ /* RGB -> YUV, numerical approximation */
+ int Y = 66 * R + 129 * G + 25 * B;
+ int U = -38 * R - 74 * G + 112 * B;
+ int V = 112 * R - 94 * G - 18 * B;
+
+ /* Scale down to 8 bits with rounding */
+ Y = (Y + 128) >> 8;
+ U = (U + 128) >> 8;
+ V = (V + 128) >> 8;
+ /* make U,V positive */
+ Y += 16;
+ U += 128;
+ V += 128;
+
+ v4l2_dbg(1, debug, sd, "R %x, G %x, B %x\n", R, G, B);
+ v4l2_dbg(1, debug, sd, "Y %x, U %x, V %x\n", Y, U, V);
+
+ /* CP */
+ cp_write(sd, 0xc1, R);
+ cp_write(sd, 0xc0, G);
+ cp_write(sd, 0xc2, B);
+ /* SDP */
+ sdp_write(sd, 0xde, Y);
+ sdp_write(sd, 0xdf, (V & 0xf0) | ((U >> 4) & 0x0f));
+ return 0;
+ }
+ case V4L2_CID_DV_RX_RGB_RANGE:
+ state->rgb_quantization_range = ctrl->val;
+ set_rgb_quantization_range(sd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static inline bool no_power(struct v4l2_subdev *sd)
+{
+ return io_read(sd, 0x0c) & 0x24;
+}
+
+static inline bool no_cp_signal(struct v4l2_subdev *sd)
+{
+ return ((cp_read(sd, 0xb5) & 0xd0) != 0xd0) || !(cp_read(sd, 0xb1) & 0x80);
+}
+
+static inline bool is_hdmi(struct v4l2_subdev *sd)
+{
+ return hdmi_read(sd, 0x05) & 0x80;
+}
+
+static int adv7842_g_input_status(struct v4l2_subdev *sd, u32 *status)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ *status = 0;
+
+ if (io_read(sd, 0x0c) & 0x24)
+ *status |= V4L2_IN_ST_NO_POWER;
+
+ if (state->mode == ADV7842_MODE_SDP) {
+ /* status from SDP block */
+ if (!(sdp_read(sd, 0x5A) & 0x01))
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+
+ v4l2_dbg(1, debug, sd, "%s: SDP status = 0x%x\n",
+ __func__, *status);
+ return 0;
+ }
+ /* status from CP block */
+ if ((cp_read(sd, 0xb5) & 0xd0) != 0xd0 ||
+ !(cp_read(sd, 0xb1) & 0x80))
+ /* TODO channel 2 */
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+
+ if (is_digital_input(sd) && ((io_read(sd, 0x74) & 0x03) != 0x03))
+ *status |= V4L2_IN_ST_NO_SIGNAL;
+
+ v4l2_dbg(1, debug, sd, "%s: CP status = 0x%x\n",
+ __func__, *status);
+
+ return 0;
+}
+
+struct stdi_readback {
+ u16 bl, lcf, lcvs;
+ u8 hs_pol, vs_pol;
+ bool interlaced;
+};
+
+static int stdi2dv_timings(struct v4l2_subdev *sd,
+ struct stdi_readback *stdi,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7842_state *state = to_state(sd);
+ u32 hfreq = (ADV7842_fsc * 8) / stdi->bl;
+ u32 pix_clk;
+ int i;
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+
+ if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
+ adv7842_get_dv_timings_cap(sd),
+ adv7842_check_dv_timings, NULL))
+ continue;
+ if (vtotal(bt) != stdi->lcf + 1)
+ continue;
+ if (bt->vsync != stdi->lcvs)
+ continue;
+
+ pix_clk = hfreq * htotal(bt);
+
+ if ((pix_clk < bt->pixelclock + 1000000) &&
+ (pix_clk > bt->pixelclock - 1000000)) {
+ *timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+
+ if (v4l2_detect_cvt(stdi->lcf + 1, hfreq, stdi->lcvs,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+ timings))
+ return 0;
+ if (v4l2_detect_gtf(stdi->lcf + 1, hfreq, stdi->lcvs,
+ (stdi->hs_pol == '+' ? V4L2_DV_HSYNC_POS_POL : 0) |
+ (stdi->vs_pol == '+' ? V4L2_DV_VSYNC_POS_POL : 0),
+ state->aspect_ratio, timings))
+ return 0;
+
+ v4l2_dbg(2, debug, sd,
+ "%s: No format candidate found for lcvs = %d, lcf=%d, bl = %d, %chsync, %cvsync\n",
+ __func__, stdi->lcvs, stdi->lcf, stdi->bl,
+ stdi->hs_pol, stdi->vs_pol);
+ return -1;
+}
+
+static int read_stdi(struct v4l2_subdev *sd, struct stdi_readback *stdi)
+{
+ u32 status;
+
+ adv7842_g_input_status(sd, &status);
+ if (status & V4L2_IN_ST_NO_SIGNAL) {
+ v4l2_dbg(2, debug, sd, "%s: no signal\n", __func__);
+ return -ENOLINK;
+ }
+
+ stdi->bl = ((cp_read(sd, 0xb1) & 0x3f) << 8) | cp_read(sd, 0xb2);
+ stdi->lcf = ((cp_read(sd, 0xb3) & 0x7) << 8) | cp_read(sd, 0xb4);
+ stdi->lcvs = cp_read(sd, 0xb3) >> 3;
+
+ if ((cp_read(sd, 0xb5) & 0x80) && ((cp_read(sd, 0xb5) & 0x03) == 0x01)) {
+ stdi->hs_pol = ((cp_read(sd, 0xb5) & 0x10) ?
+ ((cp_read(sd, 0xb5) & 0x08) ? '+' : '-') : 'x');
+ stdi->vs_pol = ((cp_read(sd, 0xb5) & 0x40) ?
+ ((cp_read(sd, 0xb5) & 0x20) ? '+' : '-') : 'x');
+ } else {
+ stdi->hs_pol = 'x';
+ stdi->vs_pol = 'x';
+ }
+ stdi->interlaced = (cp_read(sd, 0xb1) & 0x40) ? true : false;
+
+ if (stdi->lcf < 239 || stdi->bl < 8 || stdi->bl == 0x3fff) {
+ v4l2_dbg(2, debug, sd, "%s: invalid signal\n", __func__);
+ return -ENOLINK;
+ }
+
+ v4l2_dbg(2, debug, sd,
+ "%s: lcf (frame height - 1) = %d, bl = %d, lcvs (vsync) = %d, %chsync, %cvsync, %s\n",
+ __func__, stdi->lcf, stdi->bl, stdi->lcvs,
+ stdi->hs_pol, stdi->vs_pol,
+ stdi->interlaced ? "interlaced" : "progressive");
+
+ return 0;
+}
+
+static int adv7842_enum_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_enum_dv_timings *timings)
+{
+ return v4l2_enum_dv_timings_cap(timings,
+ adv7842_get_dv_timings_cap(sd), adv7842_check_dv_timings, NULL);
+}
+
+static int adv7842_dv_timings_cap(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings_cap *cap)
+{
+ *cap = *adv7842_get_dv_timings_cap(sd);
+ return 0;
+}
+
+/* Fill the optional fields .standards and .flags in struct v4l2_dv_timings
+ if the format is listed in adv7604_timings[] */
+static void adv7842_fill_optional_dv_timings_fields(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ v4l2_find_dv_timings_cap(timings, adv7842_get_dv_timings_cap(sd),
+ is_digital_input(sd) ? 250000 : 1000000,
+ adv7842_check_dv_timings, NULL);
+}
+
+static int adv7842_query_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7842_state *state = to_state(sd);
+ struct v4l2_bt_timings *bt = &timings->bt;
+ struct stdi_readback stdi = { 0 };
+
+ /* SDP block */
+ if (state->mode == ADV7842_MODE_SDP)
+ return -ENODATA;
+
+ /* read STDI */
+ if (read_stdi(sd, &stdi)) {
+ v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
+ return -ENOLINK;
+ }
+ bt->interlaced = stdi.interlaced ?
+ V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ? V4L2_DV_VSYNC_POS_POL : 0) |
+ ((hdmi_read(sd, 0x05) & 0x20) ? V4L2_DV_HSYNC_POS_POL : 0);
+ bt->vsync = stdi.lcvs;
+
+ if (is_digital_input(sd)) {
+ bool lock = hdmi_read(sd, 0x04) & 0x02;
+ bool interlaced = hdmi_read(sd, 0x0b) & 0x20;
+ unsigned w = (hdmi_read(sd, 0x07) & 0x1f) * 256 + hdmi_read(sd, 0x08);
+ unsigned h = (hdmi_read(sd, 0x09) & 0x1f) * 256 + hdmi_read(sd, 0x0a);
+ unsigned w_total = (hdmi_read(sd, 0x1e) & 0x3f) * 256 +
+ hdmi_read(sd, 0x1f);
+ unsigned h_total = ((hdmi_read(sd, 0x26) & 0x3f) * 256 +
+ hdmi_read(sd, 0x27)) / 2;
+ unsigned freq = (((hdmi_read(sd, 0x51) << 1) +
+ (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
+ ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
+ int i;
+
+ if (is_hdmi(sd)) {
+ /* adjust for deep color mode */
+ freq = freq * 8 / (((hdmi_read(sd, 0x0b) & 0xc0)>>6) * 2 + 8);
+ }
+
+ /* No lock? */
+ if (!lock) {
+ v4l2_dbg(1, debug, sd, "%s: no lock on TMDS signal\n", __func__);
+ return -ENOLCK;
+ }
+ /* Interlaced? */
+ if (interlaced) {
+ v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
+ return -ERANGE;
+ }
+
+ for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ const struct v4l2_bt_timings *bt = &v4l2_dv_timings_presets[i].bt;
+
+ if (!v4l2_valid_dv_timings(&v4l2_dv_timings_presets[i],
+ adv7842_get_dv_timings_cap(sd),
+ adv7842_check_dv_timings, NULL))
+ continue;
+ if (w_total != htotal(bt) || h_total != vtotal(bt))
+ continue;
+
+ if (w != bt->width || h != bt->height)
+ continue;
+
+ if (abs(freq - bt->pixelclock) > 1000000)
+ continue;
+ *timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+
+ timings->type = V4L2_DV_BT_656_1120;
+
+ bt->width = w;
+ bt->height = h;
+ bt->interlaced = (hdmi_read(sd, 0x0b) & 0x20) ?
+ V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ bt->polarities = ((hdmi_read(sd, 0x05) & 0x10) ?
+ V4L2_DV_VSYNC_POS_POL : 0) | ((hdmi_read(sd, 0x05) & 0x20) ?
+ V4L2_DV_HSYNC_POS_POL : 0);
+ bt->pixelclock = (((hdmi_read(sd, 0x51) << 1) +
+ (hdmi_read(sd, 0x52) >> 7)) * 1000000) +
+ ((hdmi_read(sd, 0x52) & 0x7f) * 1000000) / 128;
+ bt->hfrontporch = (hdmi_read(sd, 0x20) & 0x1f) * 256 +
+ hdmi_read(sd, 0x21);
+ bt->hsync = (hdmi_read(sd, 0x22) & 0x1f) * 256 +
+ hdmi_read(sd, 0x23);
+ bt->hbackporch = (hdmi_read(sd, 0x24) & 0x1f) * 256 +
+ hdmi_read(sd, 0x25);
+ bt->vfrontporch = ((hdmi_read(sd, 0x2a) & 0x3f) * 256 +
+ hdmi_read(sd, 0x2b)) / 2;
+ bt->il_vfrontporch = ((hdmi_read(sd, 0x2c) & 0x3f) * 256 +
+ hdmi_read(sd, 0x2d)) / 2;
+ bt->vsync = ((hdmi_read(sd, 0x2e) & 0x3f) * 256 +
+ hdmi_read(sd, 0x2f)) / 2;
+ bt->il_vsync = ((hdmi_read(sd, 0x30) & 0x3f) * 256 +
+ hdmi_read(sd, 0x31)) / 2;
+ bt->vbackporch = ((hdmi_read(sd, 0x32) & 0x3f) * 256 +
+ hdmi_read(sd, 0x33)) / 2;
+ bt->il_vbackporch = ((hdmi_read(sd, 0x34) & 0x3f) * 256 +
+ hdmi_read(sd, 0x35)) / 2;
+
+ bt->standards = 0;
+ bt->flags = 0;
+ } else {
+ /* Interlaced? */
+ if (stdi.interlaced) {
+ v4l2_dbg(1, debug, sd, "%s: interlaced video not supported\n", __func__);
+ return -ERANGE;
+ }
+
+ if (stdi2dv_timings(sd, &stdi, timings)) {
+ v4l2_dbg(1, debug, sd, "%s: format not supported\n", __func__);
+ return -ERANGE;
+ }
+ }
+
+ if (debug > 1)
+ v4l2_print_dv_timings(sd->name, "adv7842_query_dv_timings: ",
+ timings, true);
+ return 0;
+}
+
+static int adv7842_s_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7842_state *state = to_state(sd);
+ struct v4l2_bt_timings *bt;
+ int err;
+
+ if (state->mode == ADV7842_MODE_SDP)
+ return -ENODATA;
+
+ bt = &timings->bt;
+
+ if (!v4l2_valid_dv_timings(timings, adv7842_get_dv_timings_cap(sd),
+ adv7842_check_dv_timings, NULL))
+ return -ERANGE;
+
+ adv7842_fill_optional_dv_timings_fields(sd, timings);
+
+ state->timings = *timings;
+
+ cp_write(sd, 0x91, bt->interlaced ? 0x50 : 0x10);
+
+ /* Use prim_mode and vid_std when available */
+ err = configure_predefined_video_timings(sd, timings);
+ if (err) {
+ /* custom settings when the video format
+ does not have prim_mode/vid_std */
+ configure_custom_video_timings(sd, bt);
+ }
+
+ set_rgb_quantization_range(sd);
+
+
+ if (debug > 1)
+ v4l2_print_dv_timings(sd->name, "adv7842_s_dv_timings: ",
+ timings, true);
+ return 0;
+}
+
+static int adv7842_g_dv_timings(struct v4l2_subdev *sd,
+ struct v4l2_dv_timings *timings)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ if (state->mode == ADV7842_MODE_SDP)
+ return -ENODATA;
+ *timings = state->timings;
+ return 0;
+}
+
+static void enable_input(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+ switch (state->mode) {
+ case ADV7842_MODE_SDP:
+ case ADV7842_MODE_COMP:
+ case ADV7842_MODE_RGB:
+ /* enable */
+ io_write(sd, 0x15, 0xb0); /* Disable Tristate of Pins (no audio) */
+ break;
+ case ADV7842_MODE_HDMI:
+ /* enable */
+ hdmi_write(sd, 0x1a, 0x0a); /* Unmute audio */
+ hdmi_write(sd, 0x01, 0x00); /* Enable HDMI clock terminators */
+ io_write(sd, 0x15, 0xa0); /* Disable Tristate of Pins */
+ break;
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+}
+
+static void disable_input(struct v4l2_subdev *sd)
+{
+ /* disable */
+ io_write(sd, 0x15, 0xbe); /* Tristate all outputs from video core */
+ hdmi_write(sd, 0x1a, 0x1a); /* Mute audio */
+ hdmi_write(sd, 0x01, 0x78); /* Disable HDMI clock terminators */
+}
+
+static void sdp_csc_coeff(struct v4l2_subdev *sd,
+ const struct adv7842_sdp_csc_coeff *c)
+{
+ /* csc auto/manual */
+ sdp_io_write_and_or(sd, 0xe0, 0xbf, c->manual ? 0x00 : 0x40);
+
+ if (!c->manual)
+ return;
+
+ /* csc scaling */
+ sdp_io_write_and_or(sd, 0xe0, 0x7f, c->scaling == 2 ? 0x80 : 0x00);
+
+ /* A coeff */
+ sdp_io_write_and_or(sd, 0xe0, 0xe0, c->A1 >> 8);
+ sdp_io_write(sd, 0xe1, c->A1);
+ sdp_io_write_and_or(sd, 0xe2, 0xe0, c->A2 >> 8);
+ sdp_io_write(sd, 0xe3, c->A2);
+ sdp_io_write_and_or(sd, 0xe4, 0xe0, c->A3 >> 8);
+ sdp_io_write(sd, 0xe5, c->A3);
+
+ /* A scale */
+ sdp_io_write_and_or(sd, 0xe6, 0x80, c->A4 >> 8);
+ sdp_io_write(sd, 0xe7, c->A4);
+
+ /* B coeff */
+ sdp_io_write_and_or(sd, 0xe8, 0xe0, c->B1 >> 8);
+ sdp_io_write(sd, 0xe9, c->B1);
+ sdp_io_write_and_or(sd, 0xea, 0xe0, c->B2 >> 8);
+ sdp_io_write(sd, 0xeb, c->B2);
+ sdp_io_write_and_or(sd, 0xec, 0xe0, c->B3 >> 8);
+ sdp_io_write(sd, 0xed, c->B3);
+
+ /* B scale */
+ sdp_io_write_and_or(sd, 0xee, 0x80, c->B4 >> 8);
+ sdp_io_write(sd, 0xef, c->B4);
+
+ /* C coeff */
+ sdp_io_write_and_or(sd, 0xf0, 0xe0, c->C1 >> 8);
+ sdp_io_write(sd, 0xf1, c->C1);
+ sdp_io_write_and_or(sd, 0xf2, 0xe0, c->C2 >> 8);
+ sdp_io_write(sd, 0xf3, c->C2);
+ sdp_io_write_and_or(sd, 0xf4, 0xe0, c->C3 >> 8);
+ sdp_io_write(sd, 0xf5, c->C3);
+
+ /* C scale */
+ sdp_io_write_and_or(sd, 0xf6, 0x80, c->C4 >> 8);
+ sdp_io_write(sd, 0xf7, c->C4);
+}
+
+static void select_input(struct v4l2_subdev *sd,
+ enum adv7842_vid_std_select vid_std_select)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ switch (state->mode) {
+ case ADV7842_MODE_SDP:
+ io_write(sd, 0x00, vid_std_select); /* video std: CVBS or YC mode */
+ io_write(sd, 0x01, 0); /* prim mode */
+ /* enable embedded syncs for auto graphics mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x10);
+
+ afe_write(sd, 0x00, 0x00); /* power up ADC */
+ afe_write(sd, 0xc8, 0x00); /* phase control */
+
+ io_write(sd, 0x19, 0x83); /* LLC DLL phase */
+ io_write(sd, 0x33, 0x40); /* LLC DLL enable */
+
+ io_write(sd, 0xdd, 0x90); /* Manual 2x output clock */
+ /* script says register 0xde, which don't exist in manual */
+
+ /* Manual analog input muxing mode, CVBS (6.4)*/
+ afe_write_and_or(sd, 0x02, 0x7f, 0x80);
+ if (vid_std_select == ADV7842_SDP_VID_STD_CVBS_SD_4x1) {
+ afe_write(sd, 0x03, 0xa0); /* ADC0 to AIN10 (CVBS), ADC1 N/C*/
+ afe_write(sd, 0x04, 0x00); /* ADC2 N/C,ADC3 N/C*/
+ } else {
+ afe_write(sd, 0x03, 0xa0); /* ADC0 to AIN10 (CVBS), ADC1 N/C*/
+ afe_write(sd, 0x04, 0xc0); /* ADC2 to AIN12, ADC3 N/C*/
+ }
+ afe_write(sd, 0x0c, 0x1f); /* ADI recommend write */
+ afe_write(sd, 0x12, 0x63); /* ADI recommend write */
+
+ sdp_io_write(sd, 0xb2, 0x60); /* Disable AV codes */
+ sdp_io_write(sd, 0xc8, 0xe3); /* Disable Ancillary data */
+
+ /* SDP recommended settings */
+ sdp_write(sd, 0x00, 0x3F); /* Autodetect PAL NTSC (not SECAM) */
+ sdp_write(sd, 0x01, 0x00); /* Pedestal Off */
+
+ sdp_write(sd, 0x03, 0xE4); /* Manual VCR Gain Luma 0x40B */
+ sdp_write(sd, 0x04, 0x0B); /* Manual Luma setting */
+ sdp_write(sd, 0x05, 0xC3); /* Manual Chroma setting 0x3FE */
+ sdp_write(sd, 0x06, 0xFE); /* Manual Chroma setting */
+ sdp_write(sd, 0x12, 0x0D); /* Frame TBC,I_P, 3D comb enabled */
+ sdp_write(sd, 0xA7, 0x00); /* ADI Recommended Write */
+ sdp_io_write(sd, 0xB0, 0x00); /* Disable H and v blanking */
+
+ /* deinterlacer enabled and 3D comb */
+ sdp_write_and_or(sd, 0x12, 0xf6, 0x09);
+
+ sdp_write(sd, 0xdd, 0x08); /* free run auto */
+
+ break;
+
+ case ADV7842_MODE_COMP:
+ case ADV7842_MODE_RGB:
+ /* Automatic analog input muxing mode */
+ afe_write_and_or(sd, 0x02, 0x7f, 0x00);
+ /* set mode and select free run resolution */
+ io_write(sd, 0x00, vid_std_select); /* video std */
+ io_write(sd, 0x01, 0x02); /* prim mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x10); /* enable embedded syncs
+ for auto graphics mode */
+
+ afe_write(sd, 0x00, 0x00); /* power up ADC */
+ afe_write(sd, 0xc8, 0x00); /* phase control */
+
+ /* set ADI recommended settings for digitizer */
+ /* "ADV7842 Register Settings Recommendations
+ * (rev. 1.8, November 2010)" p. 9. */
+ afe_write(sd, 0x0c, 0x1f); /* ADC Range improvement */
+ afe_write(sd, 0x12, 0x63); /* ADC Range improvement */
+
+ /* set to default gain for RGB */
+ cp_write(sd, 0x73, 0x10);
+ cp_write(sd, 0x74, 0x04);
+ cp_write(sd, 0x75, 0x01);
+ cp_write(sd, 0x76, 0x00);
+
+ cp_write(sd, 0x3e, 0x04); /* CP core pre-gain control */
+ cp_write(sd, 0xc3, 0x39); /* CP coast control. Graphics mode */
+ cp_write(sd, 0x40, 0x5c); /* CP core pre-gain control. Graphics mode */
+ break;
+
+ case ADV7842_MODE_HDMI:
+ /* Automatic analog input muxing mode */
+ afe_write_and_or(sd, 0x02, 0x7f, 0x00);
+ /* set mode and select free run resolution */
+ if (state->hdmi_port_a)
+ hdmi_write(sd, 0x00, 0x02); /* select port A */
+ else
+ hdmi_write(sd, 0x00, 0x03); /* select port B */
+ io_write(sd, 0x00, vid_std_select); /* video std */
+ io_write(sd, 0x01, 5); /* prim mode */
+ cp_write_and_or(sd, 0x81, 0xef, 0x00); /* disable embedded syncs
+ for auto graphics mode */
+
+ /* set ADI recommended settings for HDMI: */
+ /* "ADV7842 Register Settings Recommendations
+ * (rev. 1.8, November 2010)" p. 3. */
+ hdmi_write(sd, 0xc0, 0x00);
+ hdmi_write(sd, 0x0d, 0x34); /* ADI recommended write */
+ hdmi_write(sd, 0x3d, 0x10); /* ADI recommended write */
+ hdmi_write(sd, 0x44, 0x85); /* TMDS PLL optimization */
+ hdmi_write(sd, 0x46, 0x1f); /* ADI recommended write */
+ hdmi_write(sd, 0x57, 0xb6); /* TMDS PLL optimization */
+ hdmi_write(sd, 0x58, 0x03); /* TMDS PLL optimization */
+ hdmi_write(sd, 0x60, 0x88); /* TMDS PLL optimization */
+ hdmi_write(sd, 0x61, 0x88); /* TMDS PLL optimization */
+ hdmi_write(sd, 0x6c, 0x18); /* Disable ISRC clearing bit,
+ Improve robustness */
+ hdmi_write(sd, 0x75, 0x10); /* DDC drive strength */
+ hdmi_write(sd, 0x85, 0x1f); /* equaliser */
+ hdmi_write(sd, 0x87, 0x70); /* ADI recommended write */
+ hdmi_write(sd, 0x89, 0x04); /* equaliser */
+ hdmi_write(sd, 0x8a, 0x1e); /* equaliser */
+ hdmi_write(sd, 0x93, 0x04); /* equaliser */
+ hdmi_write(sd, 0x94, 0x1e); /* equaliser */
+ hdmi_write(sd, 0x99, 0xa1); /* ADI recommended write */
+ hdmi_write(sd, 0x9b, 0x09); /* ADI recommended write */
+ hdmi_write(sd, 0x9d, 0x02); /* equaliser */
+
+ afe_write(sd, 0x00, 0xff); /* power down ADC */
+ afe_write(sd, 0xc8, 0x40); /* phase control */
+
+ /* set to default gain for HDMI */
+ cp_write(sd, 0x73, 0x10);
+ cp_write(sd, 0x74, 0x04);
+ cp_write(sd, 0x75, 0x01);
+ cp_write(sd, 0x76, 0x00);
+
+ /* reset ADI recommended settings for digitizer */
+ /* "ADV7842 Register Settings Recommendations
+ * (rev. 2.5, June 2010)" p. 17. */
+ afe_write(sd, 0x12, 0xfb); /* ADC noise shaping filter controls */
+ afe_write(sd, 0x0c, 0x0d); /* CP core gain controls */
+ cp_write(sd, 0x3e, 0x80); /* CP core pre-gain control,
+ enable color control */
+ /* CP coast control */
+ cp_write(sd, 0xc3, 0x33); /* Component mode */
+
+ /* color space conversion, autodetect color space */
+ io_write_and_or(sd, 0x02, 0x0f, 0xf0);
+ break;
+
+ default:
+ v4l2_dbg(2, debug, sd, "%s: Unknown mode %d\n",
+ __func__, state->mode);
+ break;
+ }
+}
+
+static int adv7842_s_routing(struct v4l2_subdev *sd,
+ u32 input, u32 output, u32 config)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ v4l2_dbg(2, debug, sd, "%s: input %d\n", __func__, input);
+
+ switch (input) {
+ case ADV7842_SELECT_HDMI_PORT_A:
+ /* TODO select HDMI_COMP or HDMI_GR */
+ state->mode = ADV7842_MODE_HDMI;
+ state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
+ state->hdmi_port_a = true;
+ break;
+ case ADV7842_SELECT_HDMI_PORT_B:
+ /* TODO select HDMI_COMP or HDMI_GR */
+ state->mode = ADV7842_MODE_HDMI;
+ state->vid_std_select = ADV7842_HDMI_COMP_VID_STD_HD_1250P;
+ state->hdmi_port_a = false;
+ break;
+ case ADV7842_SELECT_VGA_COMP:
+ v4l2_info(sd, "%s: VGA component: todo\n", __func__);
+ case ADV7842_SELECT_VGA_RGB:
+ state->mode = ADV7842_MODE_RGB;
+ state->vid_std_select = ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE;
+ break;
+ case ADV7842_SELECT_SDP_CVBS:
+ state->mode = ADV7842_MODE_SDP;
+ state->vid_std_select = ADV7842_SDP_VID_STD_CVBS_SD_4x1;
+ break;
+ case ADV7842_SELECT_SDP_YC:
+ state->mode = ADV7842_MODE_SDP;
+ state->vid_std_select = ADV7842_SDP_VID_STD_YC_SD4_x1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ disable_input(sd);
+ select_input(sd, state->vid_std_select);
+ enable_input(sd);
+
+ v4l2_subdev_notify(sd, ADV7842_FMT_CHANGE, NULL);
+
+ return 0;
+}
+
+static int adv7842_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index,
+ enum v4l2_mbus_pixelcode *code)
+{
+ if (index)
+ return -EINVAL;
+ /* Good enough for now */
+ *code = V4L2_MBUS_FMT_FIXED;
+ return 0;
+}
+
+static int adv7842_g_mbus_fmt(struct v4l2_subdev *sd,
+ struct v4l2_mbus_framefmt *fmt)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ fmt->width = state->timings.bt.width;
+ fmt->height = state->timings.bt.height;
+ fmt->code = V4L2_MBUS_FMT_FIXED;
+ fmt->field = V4L2_FIELD_NONE;
+
+ if (state->mode == ADV7842_MODE_SDP) {
+ /* SPD block */
+ if (!(sdp_read(sd, 0x5A) & 0x01))
+ return -EINVAL;
+ fmt->width = 720;
+ /* valid signal */
+ if (state->norm & V4L2_STD_525_60)
+ fmt->height = 480;
+ else
+ fmt->height = 576;
+ fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
+ return 0;
+ }
+
+ if (state->timings.bt.standards & V4L2_DV_BT_STD_CEA861) {
+ fmt->colorspace = (state->timings.bt.height <= 576) ?
+ V4L2_COLORSPACE_SMPTE170M : V4L2_COLORSPACE_REC709;
+ }
+ return 0;
+}
+
+static void adv7842_irq_enable(struct v4l2_subdev *sd, bool enable)
+{
+ if (enable) {
+ /* Enable SSPD, STDI and CP locked/unlocked interrupts */
+ io_write(sd, 0x46, 0x9c);
+ /* ESDP_50HZ_DET interrupt */
+ io_write(sd, 0x5a, 0x10);
+ /* Enable CABLE_DET_A/B_ST (+5v) interrupt */
+ io_write(sd, 0x73, 0x03);
+ /* Enable V_LOCKED and DE_REGEN_LCK interrupts */
+ io_write(sd, 0x78, 0x03);
+ /* Enable SDP Standard Detection Change and SDP Video Detected */
+ io_write(sd, 0xa0, 0x09);
+ } else {
+ io_write(sd, 0x46, 0x0);
+ io_write(sd, 0x5a, 0x0);
+ io_write(sd, 0x73, 0x0);
+ io_write(sd, 0x78, 0x0);
+ io_write(sd, 0xa0, 0x0);
+ }
+}
+
+static int adv7842_isr(struct v4l2_subdev *sd, u32 status, bool *handled)
+{
+ struct adv7842_state *state = to_state(sd);
+ u8 fmt_change_cp, fmt_change_digital, fmt_change_sdp;
+ u8 irq_status[5];
+ u8 irq_cfg = io_read(sd, 0x40);
+
+ /* disable irq-pin output */
+ io_write(sd, 0x40, irq_cfg | 0x3);
+
+ /* read status */
+ irq_status[0] = io_read(sd, 0x43);
+ irq_status[1] = io_read(sd, 0x57);
+ irq_status[2] = io_read(sd, 0x70);
+ irq_status[3] = io_read(sd, 0x75);
+ irq_status[4] = io_read(sd, 0x9d);
+
+ /* and clear */
+ if (irq_status[0])
+ io_write(sd, 0x44, irq_status[0]);
+ if (irq_status[1])
+ io_write(sd, 0x58, irq_status[1]);
+ if (irq_status[2])
+ io_write(sd, 0x71, irq_status[2]);
+ if (irq_status[3])
+ io_write(sd, 0x76, irq_status[3]);
+ if (irq_status[4])
+ io_write(sd, 0x9e, irq_status[4]);
+
+ v4l2_dbg(1, debug, sd, "%s: irq %x, %x, %x, %x, %x\n", __func__,
+ irq_status[0], irq_status[1], irq_status[2],
+ irq_status[3], irq_status[4]);
+
+ /* format change CP */
+ fmt_change_cp = irq_status[0] & 0x9c;
+
+ /* format change SDP */
+ if (state->mode == ADV7842_MODE_SDP)
+ fmt_change_sdp = (irq_status[1] & 0x30) | (irq_status[4] & 0x09);
+ else
+ fmt_change_sdp = 0;
+
+ /* digital format CP */
+ if (is_digital_input(sd))
+ fmt_change_digital = irq_status[3] & 0x03;
+ else
+ fmt_change_digital = 0;
+
+ /* notify */
+ if (fmt_change_cp || fmt_change_digital || fmt_change_sdp) {
+ v4l2_dbg(1, debug, sd,
+ "%s: fmt_change_cp = 0x%x, fmt_change_digital = 0x%x, fmt_change_sdp = 0x%x\n",
+ __func__, fmt_change_cp, fmt_change_digital,
+ fmt_change_sdp);
+ v4l2_subdev_notify(sd, ADV7842_FMT_CHANGE, NULL);
+ }
+
+ /* 5v cable detect */
+ if (irq_status[2])
+ adv7842_s_detect_tx_5v_ctrl(sd);
+
+ if (handled)
+ *handled = true;
+
+ /* re-enable irq-pin output */
+ io_write(sd, 0x40, irq_cfg);
+
+ return 0;
+}
+
+static int adv7842_set_edid(struct v4l2_subdev *sd, struct v4l2_subdev_edid *e)
+{
+ struct adv7842_state *state = to_state(sd);
+ int err = 0;
+
+ if (e->pad > 2)
+ return -EINVAL;
+ if (e->start_block != 0)
+ return -EINVAL;
+ if (e->blocks > 2)
+ return -E2BIG;
+ if (!e->edid)
+ return -EINVAL;
+
+ /* todo, per edid */
+ state->aspect_ratio = v4l2_calc_aspect_ratio(e->edid[0x15],
+ e->edid[0x16]);
+
+ if (e->pad == 2) {
+ memset(&state->vga_edid.edid, 0, 256);
+ state->vga_edid.present = e->blocks ? 0x1 : 0x0;
+ memcpy(&state->vga_edid.edid, e->edid, 128 * e->blocks);
+ err = edid_write_vga_segment(sd);
+ } else {
+ u32 mask = 0x1<<e->pad;
+ memset(&state->hdmi_edid.edid, 0, 256);
+ if (e->blocks)
+ state->hdmi_edid.present |= mask;
+ else
+ state->hdmi_edid.present &= ~mask;
+ memcpy(&state->hdmi_edid.edid, e->edid, 128*e->blocks);
+ err = edid_write_hdmi_segment(sd, e->pad);
+ }
+ if (err < 0)
+ v4l2_err(sd, "error %d writing edid on port %d\n", err, e->pad);
+ return err;
+}
+
+/*********** avi info frame CEA-861-E **************/
+/* TODO move to common library */
+
+struct avi_info_frame {
+ uint8_t f17;
+ uint8_t y10;
+ uint8_t a0;
+ uint8_t b10;
+ uint8_t s10;
+ uint8_t c10;
+ uint8_t m10;
+ uint8_t r3210;
+ uint8_t itc;
+ uint8_t ec210;
+ uint8_t q10;
+ uint8_t sc10;
+ uint8_t f47;
+ uint8_t vic;
+ uint8_t yq10;
+ uint8_t cn10;
+ uint8_t pr3210;
+ uint16_t etb;
+ uint16_t sbb;
+ uint16_t elb;
+ uint16_t srb;
+};
+
+static const char *y10_txt[4] = {
+ "RGB",
+ "YCbCr 4:2:2",
+ "YCbCr 4:4:4",
+ "Future",
+};
+
+static const char *c10_txt[4] = {
+ "No Data",
+ "SMPTE 170M",
+ "ITU-R 709",
+ "Extended Colorimetry information valied",
+};
+
+static const char *itc_txt[2] = {
+ "No Data",
+ "IT content",
+};
+
+static const char *ec210_txt[8] = {
+ "xvYCC601",
+ "xvYCC709",
+ "sYCC601",
+ "AdobeYCC601",
+ "AdobeRGB",
+ "5 reserved",
+ "6 reserved",
+ "7 reserved",
+};
+
+static const char *q10_txt[4] = {
+ "Default",
+ "Limited Range",
+ "Full Range",
+ "Reserved",
+};
+
+static void parse_avi_infoframe(struct v4l2_subdev *sd, uint8_t *buf,
+ struct avi_info_frame *avi)
+{
+ avi->f17 = (buf[1] >> 7) & 0x1;
+ avi->y10 = (buf[1] >> 5) & 0x3;
+ avi->a0 = (buf[1] >> 4) & 0x1;
+ avi->b10 = (buf[1] >> 2) & 0x3;
+ avi->s10 = buf[1] & 0x3;
+ avi->c10 = (buf[2] >> 6) & 0x3;
+ avi->m10 = (buf[2] >> 4) & 0x3;
+ avi->r3210 = buf[2] & 0xf;
+ avi->itc = (buf[3] >> 7) & 0x1;
+ avi->ec210 = (buf[3] >> 4) & 0x7;
+ avi->q10 = (buf[3] >> 2) & 0x3;
+ avi->sc10 = buf[3] & 0x3;
+ avi->f47 = (buf[4] >> 7) & 0x1;
+ avi->vic = buf[4] & 0x7f;
+ avi->yq10 = (buf[5] >> 6) & 0x3;
+ avi->cn10 = (buf[5] >> 4) & 0x3;
+ avi->pr3210 = buf[5] & 0xf;
+ avi->etb = buf[6] + 256*buf[7];
+ avi->sbb = buf[8] + 256*buf[9];
+ avi->elb = buf[10] + 256*buf[11];
+ avi->srb = buf[12] + 256*buf[13];
+}
+
+static void print_avi_infoframe(struct v4l2_subdev *sd)
+{
+ int i;
+ uint8_t buf[14];
+ uint8_t avi_inf_len;
+ struct avi_info_frame avi;
+
+ if (!(hdmi_read(sd, 0x05) & 0x80)) {
+ v4l2_info(sd, "receive DVI-D signal (AVI infoframe not supported)\n");
+ return;
+ }
+ if (!(io_read(sd, 0x60) & 0x01)) {
+ v4l2_info(sd, "AVI infoframe not received\n");
+ return;
+ }
+
+ if (io_read(sd, 0x88) & 0x10) {
+ /* Note: the ADV7842 calculated incorrect checksums for InfoFrames
+ with a length of 14 or 15. See the ADV7842 Register Settings
+ Recommendations document for more details. */
+ v4l2_info(sd, "AVI infoframe checksum error\n");
+ return;
+ }
+
+ avi_inf_len = infoframe_read(sd, 0xe2);
+ v4l2_info(sd, "AVI infoframe version %d (%d byte)\n",
+ infoframe_read(sd, 0xe1), avi_inf_len);
+
+ if (infoframe_read(sd, 0xe1) != 0x02)
+ return;
+
+ for (i = 0; i < 14; i++)
+ buf[i] = infoframe_read(sd, i);
+
+ v4l2_info(sd, "\t%02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7],
+ buf[8], buf[9], buf[10], buf[11], buf[12], buf[13]);
+
+ parse_avi_infoframe(sd, buf, &avi);
+
+ if (avi.vic)
+ v4l2_info(sd, "\tVIC: %d\n", avi.vic);
+ if (avi.itc)
+ v4l2_info(sd, "\t%s\n", itc_txt[avi.itc]);
+
+ if (avi.y10)
+ v4l2_info(sd, "\t%s %s\n", y10_txt[avi.y10], !avi.c10 ? "" :
+ (avi.c10 == 0x3 ? ec210_txt[avi.ec210] : c10_txt[avi.c10]));
+ else
+ v4l2_info(sd, "\t%s %s\n", y10_txt[avi.y10], q10_txt[avi.q10]);
+}
+
+static const char * const prim_mode_txt[] = {
+ "SDP",
+ "Component",
+ "Graphics",
+ "Reserved",
+ "CVBS & HDMI AUDIO",
+ "HDMI-Comp",
+ "HDMI-GR",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+ "Reserved",
+};
+
+static int adv7842_sdp_log_status(struct v4l2_subdev *sd)
+{
+ /* SDP (Standard definition processor) block */
+ uint8_t sdp_signal_detected = sdp_read(sd, 0x5A) & 0x01;
+
+ v4l2_info(sd, "Chip powered %s\n", no_power(sd) ? "off" : "on");
+ v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x\n",
+ io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f);
+
+ v4l2_info(sd, "SDP: free run: %s\n",
+ (sdp_read(sd, 0x56) & 0x01) ? "on" : "off");
+ v4l2_info(sd, "SDP: %s\n", sdp_signal_detected ?
+ "valid SD/PR signal detected" : "invalid/no signal");
+ if (sdp_signal_detected) {
+ static const char * const sdp_std_txt[] = {
+ "NTSC-M/J",
+ "1?",
+ "NTSC-443",
+ "60HzSECAM",
+ "PAL-M",
+ "5?",
+ "PAL-60",
+ "7?", "8?", "9?", "a?", "b?",
+ "PAL-CombN",
+ "d?",
+ "PAL-BGHID",
+ "SECAM"
+ };
+ v4l2_info(sd, "SDP: standard %s\n",
+ sdp_std_txt[sdp_read(sd, 0x52) & 0x0f]);
+ v4l2_info(sd, "SDP: %s\n",
+ (sdp_read(sd, 0x59) & 0x08) ? "50Hz" : "60Hz");
+ v4l2_info(sd, "SDP: %s\n",
+ (sdp_read(sd, 0x57) & 0x08) ? "Interlaced" : "Progressive");
+ v4l2_info(sd, "SDP: deinterlacer %s\n",
+ (sdp_read(sd, 0x12) & 0x08) ? "enabled" : "disabled");
+ v4l2_info(sd, "SDP: csc %s mode\n",
+ (sdp_io_read(sd, 0xe0) & 0x40) ? "auto" : "manual");
+ }
+ return 0;
+}
+
+static int adv7842_cp_log_status(struct v4l2_subdev *sd)
+{
+ /* CP block */
+ struct adv7842_state *state = to_state(sd);
+ struct v4l2_dv_timings timings;
+ uint8_t reg_io_0x02 = io_read(sd, 0x02);
+ uint8_t reg_io_0x21 = io_read(sd, 0x21);
+ uint8_t reg_rep_0x77 = rep_read(sd, 0x77);
+ uint8_t reg_rep_0x7d = rep_read(sd, 0x7d);
+ bool audio_pll_locked = hdmi_read(sd, 0x04) & 0x01;
+ bool audio_sample_packet_detect = hdmi_read(sd, 0x18) & 0x01;
+ bool audio_mute = io_read(sd, 0x65) & 0x40;
+
+ static const char * const csc_coeff_sel_rb[16] = {
+ "bypassed", "YPbPr601 -> RGB", "reserved", "YPbPr709 -> RGB",
+ "reserved", "RGB -> YPbPr601", "reserved", "RGB -> YPbPr709",
+ "reserved", "YPbPr709 -> YPbPr601", "YPbPr601 -> YPbPr709",
+ "reserved", "reserved", "reserved", "reserved", "manual"
+ };
+ static const char * const input_color_space_txt[16] = {
+ "RGB limited range (16-235)", "RGB full range (0-255)",
+ "YCbCr Bt.601 (16-235)", "YCbCr Bt.709 (16-235)",
+ "XvYCC Bt.601", "XvYCC Bt.709",
+ "YCbCr Bt.601 (0-255)", "YCbCr Bt.709 (0-255)",
+ "invalid", "invalid", "invalid", "invalid", "invalid",
+ "invalid", "invalid", "automatic"
+ };
+ static const char * const rgb_quantization_range_txt[] = {
+ "Automatic",
+ "RGB limited range (16-235)",
+ "RGB full range (0-255)",
+ };
+ static const char * const deep_color_mode_txt[4] = {
+ "8-bits per channel",
+ "10-bits per channel",
+ "12-bits per channel",
+ "16-bits per channel (not supported)"
+ };
+
+ v4l2_info(sd, "-----Chip status-----\n");
+ v4l2_info(sd, "Chip power: %s\n", no_power(sd) ? "off" : "on");
+ v4l2_info(sd, "Connector type: %s\n", state->connector_hdmi ?
+ "HDMI" : (is_digital_input(sd) ? "DVI-D" : "DVI-A"));
+ v4l2_info(sd, "HDMI/DVI-D port selected: %s\n",
+ state->hdmi_port_a ? "A" : "B");
+ v4l2_info(sd, "EDID A %s, B %s\n",
+ ((reg_rep_0x7d & 0x04) && (reg_rep_0x77 & 0x04)) ?
+ "enabled" : "disabled",
+ ((reg_rep_0x7d & 0x08) && (reg_rep_0x77 & 0x08)) ?
+ "enabled" : "disabled");
+ v4l2_info(sd, "HPD A %s, B %s\n",
+ reg_io_0x21 & 0x02 ? "enabled" : "disabled",
+ reg_io_0x21 & 0x01 ? "enabled" : "disabled");
+ v4l2_info(sd, "CEC %s\n", !!(cec_read(sd, 0x2a) & 0x01) ?
+ "enabled" : "disabled");
+
+ v4l2_info(sd, "-----Signal status-----\n");
+ if (state->hdmi_port_a) {
+ v4l2_info(sd, "Cable detected (+5V power): %s\n",
+ io_read(sd, 0x6f) & 0x02 ? "true" : "false");
+ v4l2_info(sd, "TMDS signal detected: %s\n",
+ (io_read(sd, 0x6a) & 0x02) ? "true" : "false");
+ v4l2_info(sd, "TMDS signal locked: %s\n",
+ (io_read(sd, 0x6a) & 0x20) ? "true" : "false");
+ } else {
+ v4l2_info(sd, "Cable detected (+5V power):%s\n",
+ io_read(sd, 0x6f) & 0x01 ? "true" : "false");
+ v4l2_info(sd, "TMDS signal detected: %s\n",
+ (io_read(sd, 0x6a) & 0x01) ? "true" : "false");
+ v4l2_info(sd, "TMDS signal locked: %s\n",
+ (io_read(sd, 0x6a) & 0x10) ? "true" : "false");
+ }
+ v4l2_info(sd, "CP free run: %s\n",
+ (!!(cp_read(sd, 0xff) & 0x10) ? "on" : "off"));
+ v4l2_info(sd, "Prim-mode = 0x%x, video std = 0x%x, v_freq = 0x%x\n",
+ io_read(sd, 0x01) & 0x0f, io_read(sd, 0x00) & 0x3f,
+ (io_read(sd, 0x01) & 0x70) >> 4);
+
+ v4l2_info(sd, "-----Video Timings-----\n");
+ if (no_cp_signal(sd)) {
+ v4l2_info(sd, "STDI: not locked\n");
+ } else {
+ uint32_t bl = ((cp_read(sd, 0xb1) & 0x3f) << 8) | cp_read(sd, 0xb2);
+ uint32_t lcf = ((cp_read(sd, 0xb3) & 0x7) << 8) | cp_read(sd, 0xb4);
+ uint32_t lcvs = cp_read(sd, 0xb3) >> 3;
+ uint32_t fcl = ((cp_read(sd, 0xb8) & 0x1f) << 8) | cp_read(sd, 0xb9);
+ char hs_pol = ((cp_read(sd, 0xb5) & 0x10) ?
+ ((cp_read(sd, 0xb5) & 0x08) ? '+' : '-') : 'x');
+ char vs_pol = ((cp_read(sd, 0xb5) & 0x40) ?
+ ((cp_read(sd, 0xb5) & 0x20) ? '+' : '-') : 'x');
+ v4l2_info(sd,
+ "STDI: lcf (frame height - 1) = %d, bl = %d, lcvs (vsync) = %d, fcl = %d, %s, %chsync, %cvsync\n",
+ lcf, bl, lcvs, fcl,
+ (cp_read(sd, 0xb1) & 0x40) ?
+ "interlaced" : "progressive",
+ hs_pol, vs_pol);
+ }
+ if (adv7842_query_dv_timings(sd, &timings))
+ v4l2_info(sd, "No video detected\n");
+ else
+ v4l2_print_dv_timings(sd->name, "Detected format: ",
+ &timings, true);
+ v4l2_print_dv_timings(sd->name, "Configured format: ",
+ &state->timings, true);
+
+ if (no_cp_signal(sd))
+ return 0;
+
+ v4l2_info(sd, "-----Color space-----\n");
+ v4l2_info(sd, "RGB quantization range ctrl: %s\n",
+ rgb_quantization_range_txt[state->rgb_quantization_range]);
+ v4l2_info(sd, "Input color space: %s\n",
+ input_color_space_txt[reg_io_0x02 >> 4]);
+ v4l2_info(sd, "Output color space: %s %s, saturator %s\n",
+ (reg_io_0x02 & 0x02) ? "RGB" : "YCbCr",
+ (reg_io_0x02 & 0x04) ? "(16-235)" : "(0-255)",
+ ((reg_io_0x02 & 0x04) ^ (reg_io_0x02 & 0x01)) ?
+ "enabled" : "disabled");
+ v4l2_info(sd, "Color space conversion: %s\n",
+ csc_coeff_sel_rb[cp_read(sd, 0xf4) >> 4]);
+
+ if (!is_digital_input(sd))
+ return 0;
+
+ v4l2_info(sd, "-----%s status-----\n", is_hdmi(sd) ? "HDMI" : "DVI-D");
+ v4l2_info(sd, "HDCP encrypted content: %s\n",
+ (hdmi_read(sd, 0x05) & 0x40) ? "true" : "false");
+ v4l2_info(sd, "HDCP keys read: %s%s\n",
+ (hdmi_read(sd, 0x04) & 0x20) ? "yes" : "no",
+ (hdmi_read(sd, 0x04) & 0x10) ? "ERROR" : "");
+ if (!is_hdmi(sd))
+ return 0;
+
+ v4l2_info(sd, "Audio: pll %s, samples %s, %s\n",
+ audio_pll_locked ? "locked" : "not locked",
+ audio_sample_packet_detect ? "detected" : "not detected",
+ audio_mute ? "muted" : "enabled");
+ if (audio_pll_locked && audio_sample_packet_detect) {
+ v4l2_info(sd, "Audio format: %s\n",
+ (hdmi_read(sd, 0x07) & 0x40) ? "multi-channel" : "stereo");
+ }
+ v4l2_info(sd, "Audio CTS: %u\n", (hdmi_read(sd, 0x5b) << 12) +
+ (hdmi_read(sd, 0x5c) << 8) +
+ (hdmi_read(sd, 0x5d) & 0xf0));
+ v4l2_info(sd, "Audio N: %u\n", ((hdmi_read(sd, 0x5d) & 0x0f) << 16) +
+ (hdmi_read(sd, 0x5e) << 8) +
+ hdmi_read(sd, 0x5f));
+ v4l2_info(sd, "AV Mute: %s\n",
+ (hdmi_read(sd, 0x04) & 0x40) ? "on" : "off");
+ v4l2_info(sd, "Deep color mode: %s\n",
+ deep_color_mode_txt[hdmi_read(sd, 0x0b) >> 6]);
+
+ print_avi_infoframe(sd);
+ return 0;
+}
+
+static int adv7842_log_status(struct v4l2_subdev *sd)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ if (state->mode == ADV7842_MODE_SDP)
+ return adv7842_sdp_log_status(sd);
+ return adv7842_cp_log_status(sd);
+}
+
+static int adv7842_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (state->mode != ADV7842_MODE_SDP)
+ return -ENODATA;
+
+ if (!(sdp_read(sd, 0x5A) & 0x01)) {
+ *std = 0;
+ v4l2_dbg(1, debug, sd, "%s: no valid signal\n", __func__);
+ return 0;
+ }
+
+ switch (sdp_read(sd, 0x52) & 0x0f) {
+ case 0:
+ /* NTSC-M/J */
+ *std &= V4L2_STD_NTSC;
+ break;
+ case 2:
+ /* NTSC-443 */
+ *std &= V4L2_STD_NTSC_443;
+ break;
+ case 3:
+ /* 60HzSECAM */
+ *std &= V4L2_STD_SECAM;
+ break;
+ case 4:
+ /* PAL-M */
+ *std &= V4L2_STD_PAL_M;
+ break;
+ case 6:
+ /* PAL-60 */
+ *std &= V4L2_STD_PAL_60;
+ break;
+ case 0xc:
+ /* PAL-CombN */
+ *std &= V4L2_STD_PAL_Nc;
+ break;
+ case 0xe:
+ /* PAL-BGHID */
+ *std &= V4L2_STD_PAL;
+ break;
+ case 0xf:
+ /* SECAM */
+ *std &= V4L2_STD_SECAM;
+ break;
+ default:
+ *std &= V4L2_STD_ALL;
+ break;
+ }
+ return 0;
+}
+
+static int adv7842_s_std(struct v4l2_subdev *sd, v4l2_std_id norm)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (state->mode != ADV7842_MODE_SDP)
+ return -ENODATA;
+
+ if (norm & V4L2_STD_ALL) {
+ state->norm = norm;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int adv7842_g_std(struct v4l2_subdev *sd, v4l2_std_id *norm)
+{
+ struct adv7842_state *state = to_state(sd);
+
+ v4l2_dbg(1, debug, sd, "%s:\n", __func__);
+
+ if (state->mode != ADV7842_MODE_SDP)
+ return -ENODATA;
+
+ *norm = state->norm;
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7842_core_init(struct v4l2_subdev *sd,
+ const struct adv7842_platform_data *pdata)
+{
+ hdmi_write(sd, 0x48,
+ (pdata->disable_pwrdnb ? 0x80 : 0) |
+ (pdata->disable_cable_det_rst ? 0x40 : 0));
+
+ disable_input(sd);
+
+ /* power */
+ io_write(sd, 0x0c, 0x42); /* Power up part and power down VDP */
+ io_write(sd, 0x15, 0x80); /* Power up pads */
+
+ /* video format */
+ io_write(sd, 0x02,
+ pdata->inp_color_space << 4 |
+ pdata->alt_gamma << 3 |
+ pdata->op_656_range << 2 |
+ pdata->rgb_out << 1 |
+ pdata->alt_data_sat << 0);
+ io_write(sd, 0x03, pdata->op_format_sel);
+ io_write_and_or(sd, 0x04, 0x1f, pdata->op_ch_sel << 5);
+ io_write_and_or(sd, 0x05, 0xf0, pdata->blank_data << 3 |
+ pdata->insert_av_codes << 2 |
+ pdata->replicate_av_codes << 1 |
+ pdata->invert_cbcr << 0);
+
+ /* Drive strength */
+ io_write_and_or(sd, 0x14, 0xc0, pdata->drive_strength.data<<4 |
+ pdata->drive_strength.clock<<2 |
+ pdata->drive_strength.sync);
+
+ /* HDMI free run */
+ cp_write(sd, 0xba, (pdata->hdmi_free_run_mode << 1) | 0x01);
+
+ /* TODO from platform data */
+ cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
+ io_write(sd, 0x06, 0xa6); /* positive VS and HS and DE */
+ cp_write(sd, 0xf3, 0xdc); /* Low threshold to enter/exit free run mode */
+ afe_write(sd, 0xb5, 0x01); /* Setting MCLK to 256Fs */
+
+ afe_write(sd, 0x02, pdata->ain_sel); /* Select analog input muxing mode */
+ io_write_and_or(sd, 0x30, ~(1 << 4), pdata->output_bus_lsb_to_msb << 4);
+
+ sdp_csc_coeff(sd, &pdata->sdp_csc_coeff);
+
+ if (pdata->sdp_io_sync.adjust) {
+ const struct adv7842_sdp_io_sync_adjustment *s = &pdata->sdp_io_sync;
+ sdp_io_write(sd, 0x94, (s->hs_beg>>8) & 0xf);
+ sdp_io_write(sd, 0x95, s->hs_beg & 0xff);
+ sdp_io_write(sd, 0x96, (s->hs_width>>8) & 0xf);
+ sdp_io_write(sd, 0x97, s->hs_width & 0xff);
+ sdp_io_write(sd, 0x98, (s->de_beg>>8) & 0xf);
+ sdp_io_write(sd, 0x99, s->de_beg & 0xff);
+ sdp_io_write(sd, 0x9a, (s->de_end>>8) & 0xf);
+ sdp_io_write(sd, 0x9b, s->de_end & 0xff);
+ }
+
+ /* todo, improve settings for sdram */
+ if (pdata->sd_ram_size >= 128) {
+ sdp_write(sd, 0x12, 0x0d); /* Frame TBC,3D comb enabled */
+ if (pdata->sd_ram_ddr) {
+ /* SDP setup for the AD eval board */
+ sdp_io_write(sd, 0x6f, 0x00); /* DDR mode */
+ sdp_io_write(sd, 0x75, 0x0a); /* 128 MB memory size */
+ sdp_io_write(sd, 0x7a, 0xa5); /* Timing Adjustment */
+ sdp_io_write(sd, 0x7b, 0x8f); /* Timing Adjustment */
+ sdp_io_write(sd, 0x60, 0x01); /* SDRAM reset */
+ } else {
+ sdp_io_write(sd, 0x75, 0x0a); /* 64 MB memory size ?*/
+ sdp_io_write(sd, 0x74, 0x00); /* must be zero for sdr sdram */
+ sdp_io_write(sd, 0x79, 0x33); /* CAS latency to 3,
+ depends on memory */
+ sdp_io_write(sd, 0x6f, 0x01); /* SDR mode */
+ sdp_io_write(sd, 0x7a, 0xa5); /* Timing Adjustment */
+ sdp_io_write(sd, 0x7b, 0x8f); /* Timing Adjustment */
+ sdp_io_write(sd, 0x60, 0x01); /* SDRAM reset */
+ }
+ } else {
+ /*
+ * Manual UG-214, rev 0 is bit confusing on this bit
+ * but a '1' disables any signal if the Ram is active.
+ */
+ sdp_io_write(sd, 0x29, 0x10); /* Tristate memory interface */
+ }
+
+ select_input(sd, pdata->vid_std_select);
+
+ enable_input(sd);
+
+ /* disable I2C access to internal EDID ram from HDMI DDC ports */
+ rep_write_and_or(sd, 0x77, 0xf3, 0x00);
+
+ hdmi_write(sd, 0x69, 0xa3); /* HPA manual */
+ /* HPA disable on port A and B */
+ io_write_and_or(sd, 0x20, 0xcf, 0x00);
+
+ /* LLC */
+ /* Set phase to 16. TODO: get this from platform_data */
+ io_write(sd, 0x19, 0x90);
+ io_write(sd, 0x33, 0x40);
+
+ /* interrupts */
+ io_write(sd, 0x40, 0xe2); /* Configure INT1 */
+
+ adv7842_irq_enable(sd, true);
+
+ return v4l2_ctrl_handler_setup(sd->ctrl_handler);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7842_ddr_ram_test(struct v4l2_subdev *sd)
+{
+ /*
+ * From ADV784x external Memory test.pdf
+ *
+ * Reset must just been performed before running test.
+ * Recommended to reset after test.
+ */
+ int i;
+ int pass = 0;
+ int fail = 0;
+ int complete = 0;
+
+ io_write(sd, 0x00, 0x01); /* Program SDP 4x1 */
+ io_write(sd, 0x01, 0x00); /* Program SDP mode */
+ afe_write(sd, 0x80, 0x92); /* SDP Recommeneded Write */
+ afe_write(sd, 0x9B, 0x01); /* SDP Recommeneded Write ADV7844ES1 */
+ afe_write(sd, 0x9C, 0x60); /* SDP Recommeneded Write ADV7844ES1 */
+ afe_write(sd, 0x9E, 0x02); /* SDP Recommeneded Write ADV7844ES1 */
+ afe_write(sd, 0xA0, 0x0B); /* SDP Recommeneded Write ADV7844ES1 */
+ afe_write(sd, 0xC3, 0x02); /* Memory BIST Initialisation */
+ io_write(sd, 0x0C, 0x40); /* Power up ADV7844 */
+ io_write(sd, 0x15, 0xBA); /* Enable outputs */
+ sdp_write(sd, 0x12, 0x00); /* Disable 3D comb, Frame TBC & 3DNR */
+ io_write(sd, 0xFF, 0x04); /* Reset memory controller */
+
+ mdelay(5);
+
+ sdp_write(sd, 0x12, 0x00); /* Disable 3D Comb, Frame TBC & 3DNR */
+ sdp_io_write(sd, 0x2A, 0x01); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x7c, 0x19); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x80, 0x87); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x81, 0x4a); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x82, 0x2c); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x83, 0x0e); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x84, 0x94); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x85, 0x62); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x7d, 0x00); /* Memory BIST Initialisation */
+ sdp_io_write(sd, 0x7e, 0x1a); /* Memory BIST Initialisation */
+
+ mdelay(5);
+
+ sdp_io_write(sd, 0xd9, 0xd5); /* Enable BIST Test */
+ sdp_write(sd, 0x12, 0x05); /* Enable FRAME TBC & 3D COMB */
+
+ mdelay(20);
+
+ for (i = 0; i < 10; i++) {
+ u8 result = sdp_io_read(sd, 0xdb);
+ if (result & 0x10) {
+ complete++;
+ if (result & 0x20)
+ fail++;
+ else
+ pass++;
+ }
+ mdelay(20);
+ }
+
+ v4l2_dbg(1, debug, sd,
+ "Ram Test: completed %d of %d: pass %d, fail %d\n",
+ complete, i, pass, fail);
+
+ if (!complete || fail)
+ return -EIO;
+ return 0;
+}
+
+static void adv7842_rewrite_i2c_addresses(struct v4l2_subdev *sd,
+ struct adv7842_platform_data *pdata)
+{
+ io_write(sd, 0xf1, pdata->i2c_sdp << 1);
+ io_write(sd, 0xf2, pdata->i2c_sdp_io << 1);
+ io_write(sd, 0xf3, pdata->i2c_avlink << 1);
+ io_write(sd, 0xf4, pdata->i2c_cec << 1);
+ io_write(sd, 0xf5, pdata->i2c_infoframe << 1);
+
+ io_write(sd, 0xf8, pdata->i2c_afe << 1);
+ io_write(sd, 0xf9, pdata->i2c_repeater << 1);
+ io_write(sd, 0xfa, pdata->i2c_edid << 1);
+ io_write(sd, 0xfb, pdata->i2c_hdmi << 1);
+
+ io_write(sd, 0xfd, pdata->i2c_cp << 1);
+ io_write(sd, 0xfe, pdata->i2c_vdp << 1);
+}
+
+static int adv7842_command_ram_test(struct v4l2_subdev *sd)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ struct adv7842_state *state = to_state(sd);
+ struct adv7842_platform_data *pdata = client->dev.platform_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -ENODEV;
+
+ if (!pdata->sd_ram_size || !pdata->sd_ram_ddr) {
+ v4l2_info(sd, "no sdram or no ddr sdram\n");
+ return -EINVAL;
+ }
+
+ main_reset(sd);
+
+ adv7842_rewrite_i2c_addresses(sd, pdata);
+
+ /* run ram test */
+ ret = adv7842_ddr_ram_test(sd);
+
+ main_reset(sd);
+
+ adv7842_rewrite_i2c_addresses(sd, pdata);
+
+ /* and re-init chip and state */
+ adv7842_core_init(sd, pdata);
+
+ disable_input(sd);
+
+ select_input(sd, state->vid_std_select);
+
+ enable_input(sd);
+
+ adv7842_s_dv_timings(sd, &state->timings);
+
+ edid_write_vga_segment(sd);
+ edid_write_hdmi_segment(sd, 0);
+ edid_write_hdmi_segment(sd, 1);
+
+ return ret;
+}
+
+static long adv7842_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
+{
+ switch (cmd) {
+ case ADV7842_CMD_RAM_TEST:
+ return adv7842_command_ram_test(sd);
+ }
+ return -ENOTTY;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static const struct v4l2_ctrl_ops adv7842_ctrl_ops = {
+ .s_ctrl = adv7842_s_ctrl,
+};
+
+static const struct v4l2_subdev_core_ops adv7842_core_ops = {
+ .log_status = adv7842_log_status,
+ .g_std = adv7842_g_std,
+ .s_std = adv7842_s_std,
+ .ioctl = adv7842_ioctl,
+ .interrupt_service_routine = adv7842_isr,
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ .g_register = adv7842_g_register,
+ .s_register = adv7842_s_register,
+#endif
+};
+
+static const struct v4l2_subdev_video_ops adv7842_video_ops = {
+ .s_routing = adv7842_s_routing,
+ .querystd = adv7842_querystd,
+ .g_input_status = adv7842_g_input_status,
+ .s_dv_timings = adv7842_s_dv_timings,
+ .g_dv_timings = adv7842_g_dv_timings,
+ .query_dv_timings = adv7842_query_dv_timings,
+ .enum_dv_timings = adv7842_enum_dv_timings,
+ .dv_timings_cap = adv7842_dv_timings_cap,
+ .enum_mbus_fmt = adv7842_enum_mbus_fmt,
+ .g_mbus_fmt = adv7842_g_mbus_fmt,
+ .try_mbus_fmt = adv7842_g_mbus_fmt,
+ .s_mbus_fmt = adv7842_g_mbus_fmt,
+};
+
+static const struct v4l2_subdev_pad_ops adv7842_pad_ops = {
+ .set_edid = adv7842_set_edid,
+};
+
+static const struct v4l2_subdev_ops adv7842_ops = {
+ .core = &adv7842_core_ops,
+ .video = &adv7842_video_ops,
+ .pad = &adv7842_pad_ops,
+};
+
+/* -------------------------- custom ctrls ---------------------------------- */
+
+static const struct v4l2_ctrl_config adv7842_ctrl_analog_sampling_phase = {
+ .ops = &adv7842_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE,
+ .name = "Analog Sampling Phase",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .min = 0,
+ .max = 0x1f,
+ .step = 1,
+ .def = 0,
+};
+
+static const struct v4l2_ctrl_config adv7842_ctrl_free_run_color_manual = {
+ .ops = &adv7842_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL,
+ .name = "Free Running Color, Manual",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
+static const struct v4l2_ctrl_config adv7842_ctrl_free_run_color = {
+ .ops = &adv7842_ctrl_ops,
+ .id = V4L2_CID_ADV_RX_FREE_RUN_COLOR,
+ .name = "Free Running Color",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .max = 0xffffff,
+ .step = 0x1,
+};
+
+
+static void adv7842_unregister_clients(struct adv7842_state *state)
+{
+ if (state->i2c_avlink)
+ i2c_unregister_device(state->i2c_avlink);
+ if (state->i2c_cec)
+ i2c_unregister_device(state->i2c_cec);
+ if (state->i2c_infoframe)
+ i2c_unregister_device(state->i2c_infoframe);
+ if (state->i2c_sdp_io)
+ i2c_unregister_device(state->i2c_sdp_io);
+ if (state->i2c_sdp)
+ i2c_unregister_device(state->i2c_sdp);
+ if (state->i2c_afe)
+ i2c_unregister_device(state->i2c_afe);
+ if (state->i2c_repeater)
+ i2c_unregister_device(state->i2c_repeater);
+ if (state->i2c_edid)
+ i2c_unregister_device(state->i2c_edid);
+ if (state->i2c_hdmi)
+ i2c_unregister_device(state->i2c_hdmi);
+ if (state->i2c_cp)
+ i2c_unregister_device(state->i2c_cp);
+ if (state->i2c_vdp)
+ i2c_unregister_device(state->i2c_vdp);
+}
+
+static struct i2c_client *adv7842_dummy_client(struct v4l2_subdev *sd,
+ u8 addr, u8 io_reg)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ io_write(sd, io_reg, addr << 1);
+ return i2c_new_dummy(client->adapter, io_read(sd, io_reg) >> 1);
+}
+
+static int adv7842_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adv7842_state *state;
+ struct adv7842_platform_data *pdata = client->dev.platform_data;
+ struct v4l2_ctrl_handler *hdl;
+ struct v4l2_subdev *sd;
+ u16 rev;
+ int err;
+
+ /* Check if the adapter supports the needed features */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ v4l_dbg(1, debug, client, "detecting adv7842 client on address 0x%x\n",
+ client->addr << 1);
+
+ if (!pdata) {
+ v4l_err(client, "No platform data!\n");
+ return -ENODEV;
+ }
+
+ state = devm_kzalloc(&client->dev, sizeof(struct adv7842_state), GFP_KERNEL);
+ if (!state) {
+ v4l_err(client, "Could not allocate adv7842_state memory!\n");
+ return -ENOMEM;
+ }
+
+ sd = &state->sd;
+ v4l2_i2c_subdev_init(sd, client, &adv7842_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ state->connector_hdmi = pdata->connector_hdmi;
+ state->mode = pdata->mode;
+
+ state->hdmi_port_a = true;
+
+ /* i2c access to adv7842? */
+ rev = adv_smbus_read_byte_data_check(client, 0xea, false) << 8 |
+ adv_smbus_read_byte_data_check(client, 0xeb, false);
+ if (rev != 0x2012) {
+ v4l2_info(sd, "got rev=0x%04x on first read attempt\n", rev);
+ rev = adv_smbus_read_byte_data_check(client, 0xea, false) << 8 |
+ adv_smbus_read_byte_data_check(client, 0xeb, false);
+ }
+ if (rev != 0x2012) {
+ v4l2_info(sd, "not an adv7842 on address 0x%x (rev=0x%04x)\n",
+ client->addr << 1, rev);
+ return -ENODEV;
+ }
+
+ if (pdata->chip_reset)
+ main_reset(sd);
+
+ /* control handlers */
+ hdl = &state->hdl;
+ v4l2_ctrl_handler_init(hdl, 6);
+
+ /* add in ascending ID order */
+ v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_BRIGHTNESS, -128, 127, 1, 0);
+ v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_CONTRAST, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_SATURATION, 0, 255, 1, 128);
+ v4l2_ctrl_new_std(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_HUE, 0, 128, 1, 0);
+
+ /* custom controls */
+ state->detect_tx_5v_ctrl = v4l2_ctrl_new_std(hdl, NULL,
+ V4L2_CID_DV_RX_POWER_PRESENT, 0, 3, 0, 0);
+ state->analog_sampling_phase_ctrl = v4l2_ctrl_new_custom(hdl,
+ &adv7842_ctrl_analog_sampling_phase, NULL);
+ state->free_run_color_ctrl_manual = v4l2_ctrl_new_custom(hdl,
+ &adv7842_ctrl_free_run_color_manual, NULL);
+ state->free_run_color_ctrl = v4l2_ctrl_new_custom(hdl,
+ &adv7842_ctrl_free_run_color, NULL);
+ state->rgb_quantization_range_ctrl =
+ v4l2_ctrl_new_std_menu(hdl, &adv7842_ctrl_ops,
+ V4L2_CID_DV_RX_RGB_RANGE, V4L2_DV_RGB_RANGE_FULL,
+ 0, V4L2_DV_RGB_RANGE_AUTO);
+ sd->ctrl_handler = hdl;
+ if (hdl->error) {
+ err = hdl->error;
+ goto err_hdl;
+ }
+ state->detect_tx_5v_ctrl->is_private = true;
+ state->rgb_quantization_range_ctrl->is_private = true;
+ state->analog_sampling_phase_ctrl->is_private = true;
+ state->free_run_color_ctrl_manual->is_private = true;
+ state->free_run_color_ctrl->is_private = true;
+
+ if (adv7842_s_detect_tx_5v_ctrl(sd)) {
+ err = -ENODEV;
+ goto err_hdl;
+ }
+
+ state->i2c_avlink = adv7842_dummy_client(sd, pdata->i2c_avlink, 0xf3);
+ state->i2c_cec = adv7842_dummy_client(sd, pdata->i2c_cec, 0xf4);
+ state->i2c_infoframe = adv7842_dummy_client(sd, pdata->i2c_infoframe, 0xf5);
+ state->i2c_sdp_io = adv7842_dummy_client(sd, pdata->i2c_sdp_io, 0xf2);
+ state->i2c_sdp = adv7842_dummy_client(sd, pdata->i2c_sdp, 0xf1);
+ state->i2c_afe = adv7842_dummy_client(sd, pdata->i2c_afe, 0xf8);
+ state->i2c_repeater = adv7842_dummy_client(sd, pdata->i2c_repeater, 0xf9);
+ state->i2c_edid = adv7842_dummy_client(sd, pdata->i2c_edid, 0xfa);
+ state->i2c_hdmi = adv7842_dummy_client(sd, pdata->i2c_hdmi, 0xfb);
+ state->i2c_cp = adv7842_dummy_client(sd, pdata->i2c_cp, 0xfd);
+ state->i2c_vdp = adv7842_dummy_client(sd, pdata->i2c_vdp, 0xfe);
+ if (!state->i2c_avlink || !state->i2c_cec || !state->i2c_infoframe ||
+ !state->i2c_sdp_io || !state->i2c_sdp || !state->i2c_afe ||
+ !state->i2c_repeater || !state->i2c_edid || !state->i2c_hdmi ||
+ !state->i2c_cp || !state->i2c_vdp) {
+ err = -ENOMEM;
+ v4l2_err(sd, "failed to create all i2c clients\n");
+ goto err_i2c;
+ }
+
+ /* work queues */
+ state->work_queues = create_singlethread_workqueue(client->name);
+ if (!state->work_queues) {
+ v4l2_err(sd, "Could not create work queue\n");
+ err = -ENOMEM;
+ goto err_i2c;
+ }
+
+ INIT_DELAYED_WORK(&state->delayed_work_enable_hotplug,
+ adv7842_delayed_work_enable_hotplug);
+
+ state->pad.flags = MEDIA_PAD_FL_SOURCE;
+ err = media_entity_init(&sd->entity, 1, &state->pad, 0);
+ if (err)
+ goto err_work_queues;
+
+ err = adv7842_core_init(sd, pdata);
+ if (err)
+ goto err_entity;
+
+ v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
+ client->addr << 1, client->adapter->name);
+ return 0;
+
+err_entity:
+ media_entity_cleanup(&sd->entity);
+err_work_queues:
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+err_i2c:
+ adv7842_unregister_clients(state);
+err_hdl:
+ v4l2_ctrl_handler_free(hdl);
+ return err;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int adv7842_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct adv7842_state *state = to_state(sd);
+
+ adv7842_irq_enable(sd, false);
+
+ cancel_delayed_work(&state->delayed_work_enable_hotplug);
+ destroy_workqueue(state->work_queues);
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ adv7842_unregister_clients(to_state(sd));
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+ return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_device_id adv7842_id[] = {
+ { "adv7842", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adv7842_id);
+
+/* ----------------------------------------------------------------------- */
+
+static struct i2c_driver adv7842_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "adv7842",
+ },
+ .probe = adv7842_probe,
+ .remove = adv7842_remove,
+ .id_table = adv7842_id,
+};
+
+module_i2c_driver(adv7842_driver);
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c
index efdc873e58d..a9110d8bbbc 100644
--- a/drivers/media/i2c/ml86v7667.c
+++ b/drivers/media/i2c/ml86v7667.c
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct i2c_client *client = v4l2_get_subdevdata(sd);
- int ret;
+ int ret = -EINVAL;
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl)
break;
}
- return 0;
+ return ret;
}
static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
@@ -209,7 +209,8 @@ static int ml86v7667_mbus_fmt(struct v4l2_subdev *sd,
fmt->code = V4L2_MBUS_FMT_YUYV8_2X8;
fmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
- fmt->field = V4L2_FIELD_INTERLACED;
+ /* The top field is always transferred first by the chip */
+ fmt->field = V4L2_FIELD_INTERLACED_TB;
fmt->width = 720;
fmt->height = priv->std & V4L2_STD_525_60 ? 480 : 576;
diff --git a/drivers/media/i2c/mt9v032.c b/drivers/media/i2c/mt9v032.c
index 60c6f673956..2c50effaa33 100644
--- a/drivers/media/i2c/mt9v032.c
+++ b/drivers/media/i2c/mt9v032.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/log2.h>
@@ -135,6 +136,8 @@ struct mt9v032 {
struct mutex power_lock;
int power_count;
+ struct clk *clk;
+
struct mt9v032_platform_data *pdata;
u32 sysclk;
@@ -219,10 +222,9 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
struct i2c_client *client = v4l2_get_subdevdata(&mt9v032->subdev);
int ret;
- if (mt9v032->pdata->set_clock) {
- mt9v032->pdata->set_clock(&mt9v032->subdev, mt9v032->sysclk);
- udelay(1);
- }
+ clk_set_rate(mt9v032->clk, mt9v032->sysclk);
+ clk_prepare_enable(mt9v032->clk);
+ udelay(1);
/* Reset the chip and stop data read out */
ret = mt9v032_write(client, MT9V032_RESET, 1);
@@ -238,8 +240,7 @@ static int mt9v032_power_on(struct mt9v032 *mt9v032)
static void mt9v032_power_off(struct mt9v032 *mt9v032)
{
- if (mt9v032->pdata->set_clock)
- mt9v032->pdata->set_clock(&mt9v032->subdev, 0);
+ clk_disable_unprepare(mt9v032->clk);
}
static int __mt9v032_set_power(struct mt9v032 *mt9v032, bool on)
@@ -748,6 +749,10 @@ static int mt9v032_probe(struct i2c_client *client,
if (!mt9v032)
return -ENOMEM;
+ mt9v032->clk = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(mt9v032->clk))
+ return PTR_ERR(mt9v032->clk);
+
mutex_init(&mt9v032->power_lock);
mt9v032->pdata = pdata;
diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
index 1dbb8118a28..4da90c621f7 100644
--- a/drivers/media/i2c/ov9650.c
+++ b/drivers/media/i2c/ov9650.c
@@ -1083,7 +1083,7 @@ static int ov965x_enum_frame_sizes(struct v4l2_subdev *sd,
{
int i = ARRAY_SIZE(ov965x_formats);
- if (fse->index > ARRAY_SIZE(ov965x_framesizes))
+ if (fse->index >= ARRAY_SIZE(ov965x_framesizes))
return -EINVAL;
while (--i)
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 825ea86d982..b76ec0e7e68 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1111,6 +1111,11 @@ static int s5c73m3_oif_set_fmt(struct v4l2_subdev *sd,
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
mf = v4l2_subdev_get_try_format(fh, fmt->pad);
*mf = fmt->format;
+ if (fmt->pad == OIF_ISP_PAD) {
+ mf = v4l2_subdev_get_try_format(fh, OIF_SOURCE_PAD);
+ mf->width = fmt->format.width;
+ mf->height = fmt->format.height;
+ }
} else {
switch (fmt->pad) {
case OIF_ISP_PAD:
diff --git a/drivers/media/i2c/s5k6aa.c b/drivers/media/i2c/s5k6aa.c
index 789c02a6ca1..629a5cdadd3 100644
--- a/drivers/media/i2c/s5k6aa.c
+++ b/drivers/media/i2c/s5k6aa.c
@@ -1003,7 +1003,7 @@ static int s5k6aa_enum_frame_interval(struct v4l2_subdev *sd,
const struct s5k6aa_interval *fi;
int ret = 0;
- if (fie->index > ARRAY_SIZE(s5k6aa_intervals))
+ if (fie->index >= ARRAY_SIZE(s5k6aa_intervals))
return -EINVAL;
v4l_bound_align_image(&fie->width, S5K6AA_WIN_WIDTH_MIN,
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 7fd766ec64c..637d0263452 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -225,19 +225,63 @@ static const unsigned char saa7111_init[] = {
0x00, 0x00
};
-/* SAA7113/GM7113C init codes
- * It's important that R_14... R_17 == 0x00
- * for the gm7113c chip to deliver stable video
+/*
+ * This table has one illegal value, and some values that are not
+ * correct according to the datasheet initialization table.
+ *
+ * If you need a table with legal/default values tell the driver in
+ * i2c_board_info.platform_data, and you will get the gm7113c_init
+ * table instead.
*/
+
+/* SAA7113 Init codes */
static const unsigned char saa7113_init[] = {
R_01_INC_DELAY, 0x08,
R_02_INPUT_CNTL_1, 0xc2,
R_03_INPUT_CNTL_2, 0x30,
R_04_INPUT_CNTL_3, 0x00,
R_05_INPUT_CNTL_4, 0x00,
- R_06_H_SYNC_START, 0x89,
+ R_06_H_SYNC_START, 0x89, /* Illegal value -119,
+ * min. value = -108 (0x94) */
+ R_07_H_SYNC_STOP, 0x0d,
+ R_08_SYNC_CNTL, 0x88, /* Not datasheet default.
+ * HTC = VTR mode, should be 0x98 */
+ R_09_LUMA_CNTL, 0x01,
+ R_0A_LUMA_BRIGHT_CNTL, 0x80,
+ R_0B_LUMA_CONTRAST_CNTL, 0x47,
+ R_0C_CHROMA_SAT_CNTL, 0x40,
+ R_0D_CHROMA_HUE_CNTL, 0x00,
+ R_0E_CHROMA_CNTL_1, 0x01,
+ R_0F_CHROMA_GAIN_CNTL, 0x2a,
+ R_10_CHROMA_CNTL_2, 0x08, /* Not datsheet default.
+ * VRLN enabled, should be 0x00 */
+ R_11_MODE_DELAY_CNTL, 0x0c,
+ R_12_RT_SIGNAL_CNTL, 0x07, /* Not datasheet default,
+ * should be 0x01 */
+ R_13_RT_X_PORT_OUT_CNTL, 0x00,
+ R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
+ R_15_VGATE_START_FID_CHG, 0x00,
+ R_16_VGATE_STOP, 0x00,
+ R_17_MISC_VGATE_CONF_AND_MSB, 0x00,
+
+ 0x00, 0x00
+};
+
+/*
+ * GM7113C is a clone of the SAA7113 chip
+ * This init table is copied out of the saa7113 datasheet.
+ * In R_08 we enable "Automatic Field Detection" [AUFD],
+ * this is disabled when saa711x_set_v4lstd is called.
+ */
+static const unsigned char gm7113c_init[] = {
+ R_01_INC_DELAY, 0x08,
+ R_02_INPUT_CNTL_1, 0xc0,
+ R_03_INPUT_CNTL_2, 0x33,
+ R_04_INPUT_CNTL_3, 0x00,
+ R_05_INPUT_CNTL_4, 0x00,
+ R_06_H_SYNC_START, 0xe9,
R_07_H_SYNC_STOP, 0x0d,
- R_08_SYNC_CNTL, 0x88,
+ R_08_SYNC_CNTL, 0x98,
R_09_LUMA_CNTL, 0x01,
R_0A_LUMA_BRIGHT_CNTL, 0x80,
R_0B_LUMA_CONTRAST_CNTL, 0x47,
@@ -245,9 +289,9 @@ static const unsigned char saa7113_init[] = {
R_0D_CHROMA_HUE_CNTL, 0x00,
R_0E_CHROMA_CNTL_1, 0x01,
R_0F_CHROMA_GAIN_CNTL, 0x2a,
- R_10_CHROMA_CNTL_2, 0x08,
+ R_10_CHROMA_CNTL_2, 0x00,
R_11_MODE_DELAY_CNTL, 0x0c,
- R_12_RT_SIGNAL_CNTL, 0x07,
+ R_12_RT_SIGNAL_CNTL, 0x01,
R_13_RT_X_PORT_OUT_CNTL, 0x00,
R_14_ANAL_ADC_COMPAT_CNTL, 0x00,
R_15_VGATE_START_FID_CHG, 0x00,
@@ -462,24 +506,6 @@ static const unsigned char saa7115_cfg_50hz_video[] = {
/* ============== SAA7715 VIDEO templates (end) ======= */
-/* ============== GM7113C VIDEO templates ============= */
-static const unsigned char gm7113c_cfg_60hz_video[] = {
- R_08_SYNC_CNTL, 0x68, /* 0xBO: auto detection, 0x68 = NTSC */
- R_0E_CHROMA_CNTL_1, 0x07, /* video autodetection is on */
-
- 0x00, 0x00
-};
-
-static const unsigned char gm7113c_cfg_50hz_video[] = {
- R_08_SYNC_CNTL, 0x28, /* 0x28 = PAL */
- R_0E_CHROMA_CNTL_1, 0x07,
-
- 0x00, 0x00
-};
-
-/* ============== GM7113C VIDEO templates (end) ======= */
-
-
static const unsigned char saa7115_cfg_vbi_on[] = {
R_80_GLOBAL_CNTL_1, 0x00, /* reset tasks */
R_88_POWER_SAVE_ADC_PORT_CNTL, 0xd0, /* reset scaler */
@@ -964,17 +990,24 @@ static void saa711x_set_v4lstd(struct v4l2_subdev *sd, v4l2_std_id std)
// This works for NTSC-M, SECAM-L and the 50Hz PAL variants.
if (std & V4L2_STD_525_60) {
v4l2_dbg(1, debug, sd, "decoder set standard 60 Hz\n");
- if (state->ident == GM7113C)
- saa711x_writeregs(sd, gm7113c_cfg_60hz_video);
- else
+ if (state->ident == GM7113C) {
+ u8 reg = saa711x_read(sd, R_08_SYNC_CNTL);
+ reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD);
+ reg |= SAA7113_R_08_FSEL;
+ saa711x_write(sd, R_08_SYNC_CNTL, reg);
+ } else {
saa711x_writeregs(sd, saa7115_cfg_60hz_video);
+ }
saa711x_set_size(sd, 720, 480);
} else {
v4l2_dbg(1, debug, sd, "decoder set standard 50 Hz\n");
- if (state->ident == GM7113C)
- saa711x_writeregs(sd, gm7113c_cfg_50hz_video);
- else
+ if (state->ident == GM7113C) {
+ u8 reg = saa711x_read(sd, R_08_SYNC_CNTL);
+ reg &= ~(SAA7113_R_08_FSEL | SAA7113_R_08_AUFD);
+ saa711x_write(sd, R_08_SYNC_CNTL, reg);
+ } else {
saa711x_writeregs(sd, saa7115_cfg_50hz_video);
+ }
saa711x_set_size(sd, 720, 576);
}
@@ -1596,6 +1629,65 @@ static const struct v4l2_subdev_ops saa711x_ops = {
/* ----------------------------------------------------------------------- */
+static void saa711x_write_platform_data(struct saa711x_state *state,
+ struct saa7115_platform_data *data)
+{
+ struct v4l2_subdev *sd = &state->sd;
+ u8 work;
+
+ if (state->ident != GM7113C &&
+ state->ident != SAA7113)
+ return;
+
+ if (data->saa7113_r08_htc) {
+ work = saa711x_read(sd, R_08_SYNC_CNTL);
+ work &= ~SAA7113_R_08_HTC_MASK;
+ work |= ((*data->saa7113_r08_htc) << SAA7113_R_08_HTC_OFFSET);
+ saa711x_write(sd, R_08_SYNC_CNTL, work);
+ }
+
+ if (data->saa7113_r10_vrln) {
+ work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
+ work &= ~SAA7113_R_10_VRLN_MASK;
+ if (*data->saa7113_r10_vrln)
+ work |= (1 << SAA7113_R_10_VRLN_OFFSET);
+ saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
+ }
+
+ if (data->saa7113_r10_ofts) {
+ work = saa711x_read(sd, R_10_CHROMA_CNTL_2);
+ work &= ~SAA7113_R_10_OFTS_MASK;
+ work |= (*data->saa7113_r10_ofts << SAA7113_R_10_OFTS_OFFSET);
+ saa711x_write(sd, R_10_CHROMA_CNTL_2, work);
+ }
+
+ if (data->saa7113_r12_rts0) {
+ work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
+ work &= ~SAA7113_R_12_RTS0_MASK;
+ work |= (*data->saa7113_r12_rts0 << SAA7113_R_12_RTS0_OFFSET);
+
+ /* According to the datasheet,
+ * SAA7113_RTS_DOT_IN should only be used on RTS1 */
+ WARN_ON(*data->saa7113_r12_rts0 == SAA7113_RTS_DOT_IN);
+ saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
+ }
+
+ if (data->saa7113_r12_rts1) {
+ work = saa711x_read(sd, R_12_RT_SIGNAL_CNTL);
+ work &= ~SAA7113_R_12_RTS1_MASK;
+ work |= (*data->saa7113_r12_rts1 << SAA7113_R_12_RTS1_OFFSET);
+ saa711x_write(sd, R_12_RT_SIGNAL_CNTL, work);
+ }
+
+ if (data->saa7113_r13_adlsb) {
+ work = saa711x_read(sd, R_13_RT_X_PORT_OUT_CNTL);
+ work &= ~SAA7113_R_13_ADLSB_MASK;
+ if (*data->saa7113_r13_adlsb)
+ work |= (1 << SAA7113_R_13_ADLSB_OFFSET);
+ saa711x_write(sd, R_13_RT_X_PORT_OUT_CNTL, work);
+ }
+}
+
/**
* saa711x_detect_chip - Detects the saa711x (or clone) variant
* @client: I2C client structure.
@@ -1704,6 +1796,7 @@ static int saa711x_probe(struct i2c_client *client,
struct saa711x_state *state;
struct v4l2_subdev *sd;
struct v4l2_ctrl_handler *hdl;
+ struct saa7115_platform_data *pdata;
int ident;
char name[CHIP_VER_SIZE + 1];
@@ -1767,21 +1860,31 @@ static int saa711x_probe(struct i2c_client *client,
/* init to 60hz/48khz */
state->crystal_freq = SAA7115_FREQ_24_576_MHZ;
+ pdata = client->dev.platform_data;
switch (state->ident) {
case SAA7111:
case SAA7111A:
saa711x_writeregs(sd, saa7111_init);
break;
case GM7113C:
+ saa711x_writeregs(sd, gm7113c_init);
+ break;
case SAA7113:
- saa711x_writeregs(sd, saa7113_init);
+ if (pdata && pdata->saa7113_force_gm7113c_init)
+ saa711x_writeregs(sd, gm7113c_init);
+ else
+ saa711x_writeregs(sd, saa7113_init);
break;
default:
state->crystal_freq = SAA7115_FREQ_32_11_MHZ;
saa711x_writeregs(sd, saa7115_init_auto_input);
}
- if (state->ident > SAA7111A)
+ if (state->ident > SAA7111A && state->ident != GM7113C)
saa711x_writeregs(sd, saa7115_init_misc);
+
+ if (pdata)
+ saa711x_write_platform_data(state, pdata);
+
saa711x_set_v4lstd(sd, V4L2_STD_NTSC);
v4l2_ctrl_handler_setup(hdl);
diff --git a/drivers/media/i2c/saa711x_regs.h b/drivers/media/i2c/saa711x_regs.h
index 4e5f2eb0a2c..730ca90b30a 100644
--- a/drivers/media/i2c/saa711x_regs.h
+++ b/drivers/media/i2c/saa711x_regs.h
@@ -201,6 +201,25 @@
#define R_FB_PULSE_C_POS_MSB 0xfb
#define R_FF_S_PLL_MAX_PHASE_ERR_THRESH_NUM_LINES 0xff
+/* SAA7113 bit-masks */
+#define SAA7113_R_08_HTC_OFFSET 3
+#define SAA7113_R_08_HTC_MASK (0x3 << SAA7113_R_08_HTC_OFFSET)
+#define SAA7113_R_08_FSEL 0x40
+#define SAA7113_R_08_AUFD 0x80
+
+#define SAA7113_R_10_VRLN_OFFSET 3
+#define SAA7113_R_10_VRLN_MASK (0x1 << SAA7113_R_10_VRLN_OFFSET)
+#define SAA7113_R_10_OFTS_OFFSET 6
+#define SAA7113_R_10_OFTS_MASK (0x3 << SAA7113_R_10_OFTS_OFFSET)
+
+#define SAA7113_R_12_RTS0_OFFSET 0
+#define SAA7113_R_12_RTS0_MASK (0xf << SAA7113_R_12_RTS0_OFFSET)
+#define SAA7113_R_12_RTS1_OFFSET 4
+#define SAA7113_R_12_RTS1_MASK (0xf << SAA7113_R_12_RTS1_OFFSET)
+
+#define SAA7113_R_13_ADLSB_OFFSET 7
+#define SAA7113_R_13_ADLSB_MASK (0x1 << SAA7113_R_13_ADLSB_OFFSET)
+
#if 0
/* Those structs will be used in the future for debug purposes */
struct saa711x_reg_descr {
diff --git a/drivers/media/i2c/smiapp-pll.c b/drivers/media/i2c/smiapp-pll.c
index d8d5da7c52d..2335529b195 100644
--- a/drivers/media/i2c/smiapp-pll.c
+++ b/drivers/media/i2c/smiapp-pll.c
@@ -87,6 +87,17 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
dev_dbg(dev, "vt_pix_clk_freq_hz \t%d\n", pll->vt_pix_clk_freq_hz);
}
+/*
+ * Heuristically guess the PLL tree for a given common multiplier and
+ * divisor. Begin with the operational timing and continue to video
+ * timing once operational timing has been verified.
+ *
+ * @mul is the PLL multiplier and @div is the common divisor
+ * (pre_pll_clk_div and op_sys_clk_div combined). The final PLL
+ * multiplier will be a multiple of @mul.
+ *
+ * @return Zero on success, error code on error.
+ */
static int __smiapp_pll_calculate(struct device *dev,
const struct smiapp_pll_limits *limits,
struct smiapp_pll *pll, uint32_t mul,
@@ -95,6 +106,12 @@ static int __smiapp_pll_calculate(struct device *dev,
uint32_t sys_div;
uint32_t best_pix_div = INT_MAX >> 1;
uint32_t vt_op_binning_div;
+ /*
+ * Higher multipliers (and divisors) are often required than
+ * necessitated by the external clock and the output clocks.
+ * There are limits for all values in the clock tree. These
+ * are the minimum and maximum multiplier for mul.
+ */
uint32_t more_mul_min, more_mul_max;
uint32_t more_mul_factor;
uint32_t min_vt_div, max_vt_div, vt_div;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 7ac7580f85c..ae66d91bf71 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -1122,9 +1122,9 @@ static int smiapp_power_on(struct smiapp_sensor *sensor)
rval = sensor->platform_data->set_xclk(
&sensor->src->sd, sensor->platform_data->ext_clk);
else
- rval = clk_enable(sensor->ext_clk);
+ rval = clk_prepare_enable(sensor->ext_clk);
if (rval < 0) {
- dev_dbg(&client->dev, "failed to set xclk\n");
+ dev_dbg(&client->dev, "failed to enable xclk\n");
goto out_xclk_fail;
}
usleep_range(1000, 1000);
@@ -1244,7 +1244,7 @@ out_cci_addr_fail:
if (sensor->platform_data->set_xclk)
sensor->platform_data->set_xclk(&sensor->src->sd, 0);
else
- clk_disable(sensor->ext_clk);
+ clk_disable_unprepare(sensor->ext_clk);
out_xclk_fail:
regulator_disable(sensor->vana);
@@ -1270,7 +1270,7 @@ static void smiapp_power_off(struct smiapp_sensor *sensor)
if (sensor->platform_data->set_xclk)
sensor->platform_data->set_xclk(&sensor->src->sd, 0);
else
- clk_disable(sensor->ext_clk);
+ clk_disable_unprepare(sensor->ext_clk);
usleep_range(5000, 5000);
regulator_disable(sensor->vana);
sensor->streaming = 0;
@@ -1835,12 +1835,12 @@ static void smiapp_set_compose_scaler(struct v4l2_subdev *subdev,
* sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN]
/ sensor->limits[SMIAPP_LIMIT_MIN_X_OUTPUT_SIZE];
- a = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
- max(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
- b = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
- max(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
- max_m = min(sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX],
- max(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN]));
+ a = clamp(a, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
+ sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
+ b = clamp(b, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
+ sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
+ max_m = clamp(max_m, sensor->limits[SMIAPP_LIMIT_SCALER_M_MIN],
+ sensor->limits[SMIAPP_LIMIT_SCALER_M_MAX]);
dev_dbg(&client->dev, "scaling: a %d b %d max_m %d\n", a, b, max_m);
@@ -2363,11 +2363,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
}
if (!sensor->platform_data->set_xclk) {
- sensor->ext_clk = devm_clk_get(&client->dev,
- sensor->platform_data->ext_clk_name);
+ sensor->ext_clk = devm_clk_get(&client->dev, "ext_clk");
if (IS_ERR(sensor->ext_clk)) {
- dev_err(&client->dev, "could not get clock %s\n",
- sensor->platform_data->ext_clk_name);
+ dev_err(&client->dev, "could not get clock\n");
return -ENODEV;
}
@@ -2375,8 +2373,7 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
sensor->platform_data->ext_clk);
if (rval < 0) {
dev_err(&client->dev,
- "unable to set clock %s freq to %u\n",
- sensor->platform_data->ext_clk_name,
+ "unable to set clock freq to %u\n",
sensor->platform_data->ext_clk);
return -ENODEV;
}
@@ -2839,7 +2836,7 @@ static int smiapp_remove(struct i2c_client *client)
if (sensor->platform_data->set_xclk)
sensor->platform_data->set_xclk(&sensor->src->sd, 0);
else
- clk_disable(sensor->ext_clk);
+ clk_disable_unprepare(sensor->ext_clk);
sensor->power_count = 0;
}
diff --git a/drivers/media/i2c/soc_camera/mt9m111.c b/drivers/media/i2c/soc_camera/mt9m111.c
index de3605df47c..6f4056668bb 100644
--- a/drivers/media/i2c/soc_camera/mt9m111.c
+++ b/drivers/media/i2c/soc_camera/mt9m111.c
@@ -946,6 +946,10 @@ static int mt9m111_probe(struct i2c_client *client,
if (!mt9m111)
return -ENOMEM;
+ mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
+ if (IS_ERR(mt9m111->clk))
+ return -EPROBE_DEFER;
+
/* Default HIGHPOWER context */
mt9m111->ctx = &context_b;
@@ -963,8 +967,10 @@ static int mt9m111_probe(struct i2c_client *client,
&mt9m111_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, 1, 0,
V4L2_EXPOSURE_AUTO);
mt9m111->subdev.ctrl_handler = &mt9m111->hdl;
- if (mt9m111->hdl.error)
- return mt9m111->hdl.error;
+ if (mt9m111->hdl.error) {
+ ret = mt9m111->hdl.error;
+ goto out_clkput;
+ }
/* Second stage probe - when a capture adapter is there */
mt9m111->rect.left = MT9M111_MIN_DARK_COLS;
@@ -975,18 +981,25 @@ static int mt9m111_probe(struct i2c_client *client,
mt9m111->lastpage = -1;
mutex_init(&mt9m111->power_lock);
- mt9m111->clk = v4l2_clk_get(&client->dev, "mclk");
- if (IS_ERR(mt9m111->clk)) {
- ret = PTR_ERR(mt9m111->clk);
- goto eclkget;
- }
+ ret = soc_camera_power_init(&client->dev, ssdd);
+ if (ret < 0)
+ goto out_hdlfree;
ret = mt9m111_video_probe(client);
- if (ret) {
- v4l2_clk_put(mt9m111->clk);
-eclkget:
- v4l2_ctrl_handler_free(&mt9m111->hdl);
- }
+ if (ret < 0)
+ goto out_hdlfree;
+
+ mt9m111->subdev.dev = &client->dev;
+ ret = v4l2_async_register_subdev(&mt9m111->subdev);
+ if (ret < 0)
+ goto out_hdlfree;
+
+ return 0;
+
+out_hdlfree:
+ v4l2_ctrl_handler_free(&mt9m111->hdl);
+out_clkput:
+ v4l2_clk_put(mt9m111->clk);
return ret;
}
@@ -995,6 +1008,7 @@ static int mt9m111_remove(struct i2c_client *client)
{
struct mt9m111 *mt9m111 = to_mt9m111(client);
+ v4l2_async_unregister_subdev(&mt9m111->subdev);
v4l2_clk_put(mt9m111->clk);
v4l2_device_unregister_subdev(&mt9m111->subdev);
v4l2_ctrl_handler_free(&mt9m111->hdl);
diff --git a/drivers/media/i2c/soc_camera/mt9t031.c b/drivers/media/i2c/soc_camera/mt9t031.c
index 47d18d0bafe..ee7bb0ffcec 100644
--- a/drivers/media/i2c/soc_camera/mt9t031.c
+++ b/drivers/media/i2c/soc_camera/mt9t031.c
@@ -594,9 +594,12 @@ static int mt9t031_s_power(struct v4l2_subdev *sd, int on)
ret = soc_camera_power_on(&client->dev, ssdd, mt9t031->clk);
if (ret < 0)
return ret;
- vdev->dev.type = &mt9t031_dev_type;
+ if (vdev)
+ /* Not needed during probing, when vdev isn't available yet */
+ vdev->dev.type = &mt9t031_dev_type;
} else {
- vdev->dev.type = NULL;
+ if (vdev)
+ vdev->dev.type = NULL;
soc_camera_power_off(&client->dev, ssdd, mt9t031->clk);
}
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 0a2dacbd7a6..42276d93624 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -291,10 +291,8 @@ static int ths7303_log_status(struct v4l2_subdev *sd)
struct v4l2_bt_timings *bt = bt = &state->bt;
u32 frame_width, frame_height;
- frame_width = bt->width + bt->hfrontporch +
- bt->hsync + bt->hbackporch;
- frame_height = bt->height + bt->vfrontporch +
- bt->vsync + bt->vbackporch;
+ frame_width = V4L2_DV_BT_FRAME_WIDTH(bt);
+ frame_height = V4L2_DV_BT_FRAME_HEIGHT(bt);
v4l2_info(sd,
"timings: %dx%d%s%d (%dx%d). Pix freq. = %d Hz. Polarities = 0x%x\n",
bt->width, bt->height, bt->interlaced ? "i" : "p",
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index a24f90c5261..a58a8f663ff 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -21,6 +21,8 @@
#include <linux/module.h>
#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include "ths8200_regs.h"
@@ -42,18 +44,16 @@ struct ths8200_state {
struct v4l2_dv_timings dv_timings;
};
-static const struct v4l2_dv_timings ths8200_timings[] = {
- V4L2_DV_BT_CEA_720X480P59_94,
- V4L2_DV_BT_CEA_1280X720P24,
- V4L2_DV_BT_CEA_1280X720P25,
- V4L2_DV_BT_CEA_1280X720P30,
- V4L2_DV_BT_CEA_1280X720P50,
- V4L2_DV_BT_CEA_1280X720P60,
- V4L2_DV_BT_CEA_1920X1080P24,
- V4L2_DV_BT_CEA_1920X1080P25,
- V4L2_DV_BT_CEA_1920X1080P30,
- V4L2_DV_BT_CEA_1920X1080P50,
- V4L2_DV_BT_CEA_1920X1080P60,
+static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
+ .type = V4L2_DV_BT_656_1120,
+ .bt = {
+ .max_width = 1920,
+ .max_height = 1080,
+ .min_pixelclock = 25000000,
+ .max_pixelclock = 148500000,
+ .standards = V4L2_DV_BT_STD_CEA861,
+ .capabilities = V4L2_DV_BT_CAP_PROGRESSIVE,
+ },
};
static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
@@ -63,22 +63,22 @@ static inline struct ths8200_state *to_state(struct v4l2_subdev *sd)
static inline unsigned hblanking(const struct v4l2_bt_timings *t)
{
- return t->hfrontporch + t->hsync + t->hbackporch;
+ return V4L2_DV_BT_BLANKING_WIDTH(t);
}
static inline unsigned htotal(const struct v4l2_bt_timings *t)
{
- return t->width + t->hfrontporch + t->hsync + t->hbackporch;
+ return V4L2_DV_BT_FRAME_WIDTH(t);
}
static inline unsigned vblanking(const struct v4l2_bt_timings *t)
{
- return t->vfrontporch + t->vsync + t->vbackporch;
+ return V4L2_DV_BT_BLANKING_HEIGHT(t);
}
static inline unsigned vtotal(const struct v4l2_bt_timings *t)
{
- return t->height + t->vfrontporch + t->vsync + t->vbackporch;
+ return V4L2_DV_BT_FRAME_HEIGHT(t);
}
static int ths8200_read(struct v4l2_subdev *sd, u8 reg)
@@ -133,39 +133,6 @@ static int ths8200_s_register(struct v4l2_subdev *sd,
}
#endif
-static void ths8200_print_timings(struct v4l2_subdev *sd,
- struct v4l2_dv_timings *timings,
- const char *txt, bool detailed)
-{
- struct v4l2_bt_timings *bt = &timings->bt;
- u32 htot, vtot;
-
- if (timings->type != V4L2_DV_BT_656_1120)
- return;
-
- htot = htotal(bt);
- vtot = vtotal(bt);
-
- v4l2_info(sd, "%s %dx%d%s%d (%dx%d)",
- txt, bt->width, bt->height, bt->interlaced ? "i" : "p",
- (htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
- htot, vtot);
-
- if (detailed) {
- v4l2_info(sd, " horizontal: fp = %d, %ssync = %d, bp = %d\n",
- bt->hfrontporch,
- (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
- bt->hsync, bt->hbackporch);
- v4l2_info(sd, " vertical: fp = %d, %ssync = %d, bp = %d\n",
- bt->vfrontporch,
- (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
- bt->vsync, bt->vbackporch);
- v4l2_info(sd,
- " pixelclock: %lld, flags: 0x%x, standards: 0x%x\n",
- bt->pixelclock, bt->flags, bt->standards);
- }
-}
-
static int ths8200_log_status(struct v4l2_subdev *sd)
{
struct ths8200_state *state = to_state(sd);
@@ -182,9 +149,8 @@ static int ths8200_log_status(struct v4l2_subdev *sd)
ths8200_read(sd, THS8200_DTG2_PIXEL_CNT_LSB),
(ths8200_read(sd, THS8200_DTG2_LINE_CNT_MSB) & 0x07) * 256 +
ths8200_read(sd, THS8200_DTG2_LINE_CNT_LSB));
- ths8200_print_timings(sd, &state->dv_timings,
- "Configured format:", true);
-
+ v4l2_print_dv_timings(sd->name, "Configured format:",
+ &state->dv_timings, true);
return 0;
}
@@ -409,25 +375,15 @@ static int ths8200_s_dv_timings(struct v4l2_subdev *sd,
struct v4l2_dv_timings *timings)
{
struct ths8200_state *state = to_state(sd);
- int i;
v4l2_dbg(1, debug, sd, "%s:\n", __func__);
- if (timings->type != V4L2_DV_BT_656_1120)
- return -EINVAL;
-
- /* TODO Support interlaced formats */
- if (timings->bt.interlaced) {
- v4l2_dbg(1, debug, sd, "TODO Support interlaced formats\n");
+ if (!v4l2_valid_dv_timings(timings, &ths8200_timings_cap,
+ NULL, NULL))
return -EINVAL;
- }
-
- for (i = 0; i < ARRAY_SIZE(ths8200_timings); i++) {
- if (v4l_match_dv_timings(&ths8200_timings[i], timings, 10))
- break;
- }
- if (i == ARRAY_SIZE(ths8200_timings)) {
+ if (!v4l2_find_dv_timings_cap(timings, &ths8200_timings_cap, 10,
+ NULL, NULL)) {
v4l2_dbg(1, debug, sd, "Unsupported format\n");
return -EINVAL;
}
@@ -457,26 +413,14 @@ static int ths8200_g_dv_timings(struct v4l2_subdev *sd,
static int ths8200_enum_dv_timings(struct v4l2_subdev *sd,
struct v4l2_enum_dv_timings *timings)
{
- /* Check requested format index is within range */
- if (timings->index >= ARRAY_SIZE(ths8200_timings))
- return -EINVAL;
-
- timings->timings = ths8200_timings[timings->index];
-
- return 0;
+ return v4l2_enum_dv_timings_cap(timings, &ths8200_timings_cap,
+ NULL, NULL);
}
static int ths8200_dv_timings_cap(struct v4l2_subdev *sd,
struct v4l2_dv_timings_cap *cap)
{
- cap->type = V4L2_DV_BT_656_1120;
- cap->bt.max_width = 1920;
- cap->bt.max_height = 1080;
- cap->bt.min_pixelclock = 27000000;
- cap->bt.max_pixelclock = 148500000;
- cap->bt.standards = V4L2_DV_BT_STD_CEA861;
- cap->bt.capabilities = V4L2_DV_BT_CAP_PROGRESSIVE;
-
+ *cap = ths8200_timings_cap;
return 0;
}
@@ -500,6 +444,7 @@ static int ths8200_probe(struct i2c_client *client,
{
struct ths8200_state *state;
struct v4l2_subdev *sd;
+ int error;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@@ -517,6 +462,10 @@ static int ths8200_probe(struct i2c_client *client,
ths8200_core_init(sd);
+ error = v4l2_async_register_subdev(&state->sd);
+ if (error)
+ return error;
+
v4l2_info(sd, "%s found @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
@@ -526,12 +475,13 @@ static int ths8200_probe(struct i2c_client *client,
static int ths8200_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ths8200_state *decoder = to_state(sd);
v4l2_dbg(1, debug, sd, "%s removed @ 0x%x (%s)\n", client->name,
client->addr << 1, client->adapter->name);
ths8200_s_power(sd, false);
-
+ v4l2_async_unregister_subdev(&decoder->sd);
v4l2_device_unregister_subdev(sd);
return 0;
@@ -543,10 +493,19 @@ static struct i2c_device_id ths8200_id[] = {
};
MODULE_DEVICE_TABLE(i2c, ths8200_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id ths8200_of_match[] = {
+ { .compatible = "ti,ths8200", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, ths8200_of_match);
+#endif
+
static struct i2c_driver ths8200_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "ths8200",
+ .of_match_table = of_match_ptr(ths8200_of_match),
},
.probe = ths8200_probe,
.remove = ths8200_remove,
diff --git a/drivers/media/i2c/tvp514x.c b/drivers/media/i2c/tvp514x.c
index 9c6d66a9868..91f3dd4cda1 100644
--- a/drivers/media/i2c/tvp514x.c
+++ b/drivers/media/i2c/tvp514x.c
@@ -36,6 +36,7 @@
#include <linux/module.h>
#include <linux/v4l2-mediabus.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-mediabus.h>
@@ -1175,16 +1176,22 @@ tvp514x_probe(struct i2c_client *client, const struct i2c_device_id *id)
sd->ctrl_handler = &decoder->hdl;
if (decoder->hdl.error) {
ret = decoder->hdl.error;
-
- v4l2_ctrl_handler_free(&decoder->hdl);
- return ret;
+ goto done;
}
v4l2_ctrl_handler_setup(&decoder->hdl);
- v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
-
- return 0;
+ ret = v4l2_async_register_subdev(&decoder->sd);
+ if (!ret)
+ v4l2_info(sd, "%s decoder driver registered !!\n", sd->name);
+done:
+ if (ret < 0) {
+ v4l2_ctrl_handler_free(&decoder->hdl);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+ media_entity_cleanup(&decoder->sd.entity);
+#endif
+ }
+ return ret;
}
/**
@@ -1199,6 +1206,7 @@ static int tvp514x_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct tvp514x_decoder *decoder = to_decoder(sd);
+ v4l2_async_unregister_subdev(&decoder->sd);
v4l2_device_unregister_subdev(sd);
#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&decoder->sd.entity);
diff --git a/drivers/media/i2c/tvp7002.c b/drivers/media/i2c/tvp7002.c
index a4e49483de6..24a08fa7e32 100644
--- a/drivers/media/i2c/tvp7002.c
+++ b/drivers/media/i2c/tvp7002.c
@@ -31,9 +31,12 @@
#include <linux/module.h>
#include <linux/v4l2-dv-timings.h>
#include <media/tvp7002.h>
+#include <media/v4l2-async.h>
#include <media/v4l2-device.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ctrls.h>
+#include <media/v4l2-of.h>
+
#include "tvp7002_reg.h"
MODULE_DESCRIPTION("TI TVP7002 Video and Graphics Digitizer driver");
@@ -942,6 +945,48 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
.pad = &tvp7002_pad_ops,
};
+static struct tvp7002_config *
+tvp7002_get_pdata(struct i2c_client *client)
+{
+ struct v4l2_of_endpoint bus_cfg;
+ struct tvp7002_config *pdata;
+ struct device_node *endpoint;
+ unsigned int flags;
+
+ if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
+ return client->dev.platform_data;
+
+ endpoint = v4l2_of_get_next_endpoint(client->dev.of_node, NULL);
+ if (!endpoint)
+ return NULL;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ goto done;
+
+ v4l2_of_parse_endpoint(endpoint, &bus_cfg);
+ flags = bus_cfg.bus.parallel.flags;
+
+ if (flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH)
+ pdata->hs_polarity = 1;
+
+ if (flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH)
+ pdata->vs_polarity = 1;
+
+ if (flags & V4L2_MBUS_PCLK_SAMPLE_RISING)
+ pdata->clk_polarity = 1;
+
+ if (flags & V4L2_MBUS_FIELD_EVEN_HIGH)
+ pdata->fid_polarity = 1;
+
+ if (flags & V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH)
+ pdata->sog_polarity = 1;
+
+done:
+ of_node_put(endpoint);
+ return pdata;
+}
+
/*
* tvp7002_probe - Probe a TVP7002 device
* @c: ptr to i2c_client struct
@@ -953,32 +998,32 @@ static const struct v4l2_subdev_ops tvp7002_ops = {
*/
static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
{
+ struct tvp7002_config *pdata = tvp7002_get_pdata(c);
struct v4l2_subdev *sd;
struct tvp7002 *device;
struct v4l2_dv_timings timings;
int polarity_a;
int polarity_b;
u8 revision;
-
int error;
+ if (pdata == NULL) {
+ dev_err(&c->dev, "No platform data\n");
+ return -EINVAL;
+ }
+
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(c->adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -EIO;
- if (!c->dev.platform_data) {
- v4l_err(c, "No platform data!!\n");
- return -ENODEV;
- }
-
device = devm_kzalloc(&c->dev, sizeof(struct tvp7002), GFP_KERNEL);
if (!device)
return -ENOMEM;
sd = &device->sd;
- device->pdata = c->dev.platform_data;
+ device->pdata = pdata;
device->current_timings = tvp7002_timings;
/* Tell v4l2 the device is ready */
@@ -1039,6 +1084,10 @@ static int tvp7002_probe(struct i2c_client *c, const struct i2c_device_id *id)
}
v4l2_ctrl_handler_setup(&device->hdl);
+ error = v4l2_async_register_subdev(&device->sd);
+ if (error)
+ goto error;
+
return 0;
error:
@@ -1063,6 +1112,7 @@ static int tvp7002_remove(struct i2c_client *c)
v4l2_dbg(1, debug, sd, "Removing tvp7002 adapter"
"on address 0x%x\n", c->addr);
+ v4l2_async_unregister_subdev(&device->sd);
#if defined(CONFIG_MEDIA_CONTROLLER)
media_entity_cleanup(&device->sd.entity);
#endif
@@ -1078,9 +1128,18 @@ static const struct i2c_device_id tvp7002_id[] = {
};
MODULE_DEVICE_TABLE(i2c, tvp7002_id);
+#if IS_ENABLED(CONFIG_OF)
+static const struct of_device_id tvp7002_of_match[] = {
+ { .compatible = "ti,tvp7002", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, tvp7002_of_match);
+#endif
+
/* I2C driver data */
static struct i2c_driver tvp7002_driver = {
.driver = {
+ .of_match_table = of_match_ptr(tvp7002_of_match),
.owner = THIS_MODULE,
.name = TVP7002_MODULE_NAME,
},
diff --git a/drivers/media/media-entity.c b/drivers/media/media-entity.c
index cb30ffbd5ba..2c286c30714 100644
--- a/drivers/media/media-entity.c
+++ b/drivers/media/media-entity.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/bitmap.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <media/media-entity.h>
@@ -121,7 +122,6 @@ static struct media_entity *stack_pop(struct media_entity_graph *graph)
return entity;
}
-#define stack_peek(en) ((en)->stack[(en)->top - 1].entity)
#define link_top(en) ((en)->stack[(en)->top].link)
#define stack_top(en) ((en)->stack[(en)->top].entity)
@@ -140,6 +140,12 @@ void media_entity_graph_walk_start(struct media_entity_graph *graph,
{
graph->top = 0;
graph->stack[graph->top].entity = NULL;
+ bitmap_zero(graph->entities, MEDIA_ENTITY_ENUM_MAX_ID);
+
+ if (WARN_ON(entity->id >= MEDIA_ENTITY_ENUM_MAX_ID))
+ return;
+
+ __set_bit(entity->id, graph->entities);
stack_push(graph, entity);
}
EXPORT_SYMBOL_GPL(media_entity_graph_walk_start);
@@ -180,9 +186,11 @@ media_entity_graph_walk_next(struct media_entity_graph *graph)
/* Get the entity in the other end of the link . */
next = media_entity_other(entity, link);
+ if (WARN_ON(next->id >= MEDIA_ENTITY_ENUM_MAX_ID))
+ return NULL;
- /* Was it the entity we came here from? */
- if (next == stack_peek(graph)) {
+ /* Has the entity already been visited? */
+ if (__test_and_set_bit(next->id, graph->entities)) {
link_top(graph)++;
continue;
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index e564aac0aa3..d85cb0ace4d 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -4441,9 +4441,7 @@ static void tibetCS16_init(struct bttv *btv)
* is {3, 0, 2, 1}, i.e. the first controller to be detected is logical
* unit 3, the second (which is the master) is logical unit 0, etc.
* We need to maintain the status of the analog switch (which of the 16
- * cameras is connected to which of the 4 controllers). Rather than
- * add to the bttv structure for this, we use the data reserved for
- * the mbox (unused for this card type).
+ * cameras is connected to which of the 4 controllers) in sw_status array.
*/
/*
@@ -4478,7 +4476,6 @@ static void kodicom4400r_write(struct bttv *btv,
*/
static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
{
- char *sw_status;
int xaddr, yaddr;
struct bttv *mctlr;
static unsigned char map[4] = {3, 0, 2, 1};
@@ -4489,14 +4486,13 @@ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
}
yaddr = (btv->c.nr - mctlr->c.nr + 1) & 3; /* the '&' is for safety */
yaddr = map[yaddr];
- sw_status = (char *)(&mctlr->mbox_we);
xaddr = input & 0xf;
/* Check if the controller/camera pair has changed, else ignore */
- if (sw_status[yaddr] != xaddr)
+ if (mctlr->sw_status[yaddr] != xaddr)
{
/* "open" the old switch, "close" the new one, save the new */
- kodicom4400r_write(mctlr, sw_status[yaddr], yaddr, 0);
- sw_status[yaddr] = xaddr;
+ kodicom4400r_write(mctlr, mctlr->sw_status[yaddr], yaddr, 0);
+ mctlr->sw_status[yaddr] = xaddr;
kodicom4400r_write(mctlr, xaddr, yaddr, 1);
}
}
@@ -4509,7 +4505,6 @@ static void kodicom4400r_muxsel(struct bttv *btv, unsigned int input)
*/
static void kodicom4400r_init(struct bttv *btv)
{
- char *sw_status = (char *)(&btv->mbox_we);
int ix;
gpio_inout(0x0003ff, 0x0003ff);
@@ -4517,7 +4512,7 @@ static void kodicom4400r_init(struct bttv *btv)
gpio_write(0);
/* Preset camera 0 to the 4 controllers */
for (ix = 0; ix < 4; ix++) {
- sw_status[ix] = ix;
+ btv->sw_status[ix] = ix;
kodicom4400r_write(btv, ix, ix, 1);
}
/*
@@ -4794,7 +4789,6 @@ static void gv800s_write(struct bttv *btv,
static void gv800s_muxsel(struct bttv *btv, unsigned int input)
{
struct bttv *mctlr;
- char *sw_status;
int xaddr, yaddr;
static unsigned int map[4][4] = { { 0x0, 0x4, 0xa, 0x6 },
{ 0x1, 0x5, 0xb, 0x7 },
@@ -4807,14 +4801,13 @@ static void gv800s_muxsel(struct bttv *btv, unsigned int input)
return;
}
yaddr = (btv->c.nr - mctlr->c.nr) & 3;
- sw_status = (char *)(&mctlr->mbox_we);
xaddr = map[yaddr][input] & 0xf;
/* Check if the controller/camera pair has changed, ignore otherwise */
- if (sw_status[yaddr] != xaddr) {
+ if (mctlr->sw_status[yaddr] != xaddr) {
/* disable the old switch, enable the new one and save status */
- gv800s_write(mctlr, sw_status[yaddr], yaddr, 0);
- sw_status[yaddr] = xaddr;
+ gv800s_write(mctlr, mctlr->sw_status[yaddr], yaddr, 0);
+ mctlr->sw_status[yaddr] = xaddr;
gv800s_write(mctlr, xaddr, yaddr, 1);
}
}
@@ -4822,7 +4815,6 @@ static void gv800s_muxsel(struct bttv *btv, unsigned int input)
/* GeoVision GV-800(S) "master" chip init */
static void gv800s_init(struct bttv *btv)
{
- char *sw_status = (char *)(&btv->mbox_we);
int ix;
gpio_inout(0xf107f, 0xf107f);
@@ -4831,7 +4823,7 @@ static void gv800s_init(struct bttv *btv)
/* Preset camera 0 to the 4 controllers */
for (ix = 0; ix < 4; ix++) {
- sw_status[ix] = ix;
+ btv->sw_status[ix] = ix;
gv800s_write(btv, ix, ix, 1);
}
diff --git a/drivers/media/pci/bt8xx/bttvp.h b/drivers/media/pci/bt8xx/bttvp.h
index 9c1cc2c50ee..6eefb595d0f 100644
--- a/drivers/media/pci/bt8xx/bttvp.h
+++ b/drivers/media/pci/bt8xx/bttvp.h
@@ -459,6 +459,9 @@ struct bttv {
int mbox_iow;
int mbox_csel;
+ /* switch status for multi-controller cards */
+ char sw_status[4];
+
/* risc memory management data
- must acquire s_lock before changing these
- only the irq handler is supported to touch top + bottom + vcurr */
diff --git a/drivers/media/pci/cx23885/Kconfig b/drivers/media/pci/cx23885/Kconfig
index b3688aa8acc..5104c802f72 100644
--- a/drivers/media/pci/cx23885/Kconfig
+++ b/drivers/media/pci/cx23885/Kconfig
@@ -29,6 +29,7 @@ config VIDEO_CX23885
select DVB_STV0367 if MEDIA_SUBDRV_AUTOSELECT
select DVB_TDA10071 if MEDIA_SUBDRV_AUTOSELECT
select DVB_A8293 if MEDIA_SUBDRV_AUTOSELECT
+ select DVB_MB86A20S if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2063 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_MT2131 if MEDIA_SUBDRV_AUTOSELECT
select MEDIA_TUNER_XC2028 if MEDIA_SUBDRV_AUTOSELECT
diff --git a/drivers/media/pci/cx23885/cx23885-av.c b/drivers/media/pci/cx23885/cx23885-av.c
index e958a01fd55..c443b7ac5ad 100644
--- a/drivers/media/pci/cx23885/cx23885-av.c
+++ b/drivers/media/pci/cx23885/cx23885-av.c
@@ -23,6 +23,7 @@
#include "cx23885.h"
#include "cx23885-av.h"
+#include "cx23885-video.h"
void cx23885_av_work_handler(struct work_struct *work)
{
@@ -32,5 +33,17 @@ void cx23885_av_work_handler(struct work_struct *work)
v4l2_subdev_call(dev->sd_cx25840, core, interrupt_service_routine,
PCI_MSK_AV_CORE, &handled);
+
+ /* Getting here with the interrupt not handled
+ then probbaly flatiron does have pending interrupts.
+ */
+ if (!handled) {
+ /* clear left and right adc channel interrupt request flag */
+ cx23885_flatiron_write(dev, 0x1f,
+ cx23885_flatiron_read(dev, 0x1f) | 0x80);
+ cx23885_flatiron_write(dev, 0x23,
+ cx23885_flatiron_read(dev, 0x23) | 0x80);
+ }
+
cx23885_irq_enable(dev, PCI_MSK_AV_CORE);
}
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index 7e923f8dd2f..6a71a965e75 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -528,11 +528,12 @@ struct cx23885_board cx23885_boards[] = {
} },
},
[CX23885_BOARD_MYGICA_X8507] = {
- .name = "Mygica X8507",
+ .name = "Mygica X8502/X8507 ISDB-T",
.tuner_type = TUNER_XC5000,
.tuner_addr = 0x61,
.tuner_bus = 1,
.porta = CX23885_ANALOG_VIDEO,
+ .portb = CX23885_MPEG_DVB,
.input = {
{
.type = CX23885_VMUX_TELEVISION,
@@ -1281,7 +1282,7 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
case CX23885_BOARD_MYGICA_X8507:
/* GPIO-0 (0)Analog / (1)Digital TV */
/* GPIO-1 reset XC5000 */
- /* GPIO-2 reset LGS8GL5 / LGS8G75 */
+ /* GPIO-2 demod reset */
cx23885_gpio_enable(dev, GPIO_0 | GPIO_1 | GPIO_2, 1);
cx23885_gpio_clear(dev, GPIO_1 | GPIO_2);
mdelay(100);
@@ -1677,6 +1678,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
break;
case CX23885_BOARD_MYGICA_X8506:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
+ case CX23885_BOARD_MYGICA_X8507:
ts1->gen_ctrl_val = 0x5; /* Parallel */
ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 268654ac9a9..9f63d93239e 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1941,10 +1941,7 @@ static irqreturn_t cx23885_irq(int irq, void *dev_id)
if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
- if (!schedule_work(&dev->cx25840_work))
- printk(KERN_ERR "%s: failed to set up deferred work for"
- " AV Core/IR interrupt. Interrupt is disabled"
- " and won't be re-enabled\n", dev->name);
+ schedule_work(&dev->cx25840_work);
handled++;
}
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 9c5ed10b2c5..971e4ff1b87 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -69,6 +69,7 @@
#include "stb6100_cfg.h"
#include "tda10071.h"
#include "a8293.h"
+#include "mb86a20s.h"
static unsigned int debug;
@@ -119,8 +120,6 @@ static void dvb_buf_release(struct videobuf_queue *q,
cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
}
-static int cx23885_dvb_set_frontend(struct dvb_frontend *fe);
-
static void cx23885_dvb_gate_ctrl(struct cx23885_tsport *port, int open)
{
struct videobuf_dvb_frontends *f;
@@ -135,12 +134,6 @@ static void cx23885_dvb_gate_ctrl(struct cx23885_tsport *port, int open)
if (fe && fe->dvb.frontend && fe->dvb.frontend->ops.i2c_gate_ctrl)
fe->dvb.frontend->ops.i2c_gate_ctrl(fe->dvb.frontend, open);
-
- /*
- * FIXME: Improve this path to avoid calling the
- * cx23885_dvb_set_frontend() every time it passes here.
- */
- cx23885_dvb_set_frontend(fe->dvb.frontend);
}
static struct videobuf_queue_ops dvb_qops = {
@@ -500,6 +493,15 @@ static struct xc5000_config mygica_x8506_xc5000_config = {
.if_khz = 5380,
};
+static struct mb86a20s_config mygica_x8507_mb86a20s_config = {
+ .demod_address = 0x10,
+};
+
+static struct xc5000_config mygica_x8507_xc5000_config = {
+ .i2c_address = 0x61,
+ .if_khz = 4000,
+};
+
static struct stv090x_config prof_8000_stv090x_config = {
.device = STV0903,
.demod_mode = STV090x_SINGLE,
@@ -556,14 +558,27 @@ static int cx23885_dvb_set_frontend(struct dvb_frontend *fe)
}
break;
case CX23885_BOARD_MYGICA_X8506:
+ case CX23885_BOARD_MYGICA_X8507:
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
/* Select Digital TV */
cx23885_gpio_set(dev, GPIO_0);
break;
}
+
+ /* Call the real set_frontend */
+ if (port->set_frontend)
+ return port->set_frontend(fe);
+
return 0;
}
+static void cx23885_set_frontend_hook(struct cx23885_tsport *port,
+ struct dvb_frontend *fe)
+{
+ port->set_frontend = fe->ops.set_frontend;
+ fe->ops.set_frontend = cx23885_dvb_set_frontend;
+}
+
static struct lgs8gxx_config magicpro_prohdtve2_lgs8g75_config = {
.prod = LGS8GXX_PROD_LGS8G75,
.demod_address = 0x19,
@@ -771,6 +786,8 @@ static int dvb_register(struct cx23885_tsport *port)
0x60, &dev->i2c_bus[1].i2c_adap,
&hauppauge_hvr127x_config);
}
+ if (dev->board == CX23885_BOARD_HAUPPAUGE_HVR1275)
+ cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1255:
case CX23885_BOARD_HAUPPAUGE_HVR1255_22111:
@@ -1106,6 +1123,21 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus2->i2c_adap,
&mygica_x8506_xc5000_config);
}
+ cx23885_set_frontend_hook(port, fe0->dvb.frontend);
+ break;
+ case CX23885_BOARD_MYGICA_X8507:
+ i2c_bus = &dev->i2c_bus[0];
+ i2c_bus2 = &dev->i2c_bus[1];
+ fe0->dvb.frontend = dvb_attach(mb86a20s_attach,
+ &mygica_x8507_mb86a20s_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(xc5000_attach,
+ fe0->dvb.frontend,
+ &i2c_bus2->i2c_adap,
+ &mygica_x8507_xc5000_config);
+ }
+ cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_MAGICPRO_PROHDTVE2:
i2c_bus = &dev->i2c_bus[0];
@@ -1119,6 +1151,7 @@ static int dvb_register(struct cx23885_tsport *port)
&i2c_bus2->i2c_adap,
&magicpro_prohdtve2_xc5000_config);
}
+ cx23885_set_frontend_hook(port, fe0->dvb.frontend);
break;
case CX23885_BOARD_HAUPPAUGE_HVR1850:
i2c_bus = &dev->i2c_bus[0];
@@ -1249,6 +1282,10 @@ static int dvb_register(struct cx23885_tsport *port)
fe0->dvb.frontend = dvb_attach(ds3000_attach,
&tevii_ds3000_config,
&i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(ts2020_attach, fe0->dvb.frontend,
+ &tevii_ts2020_config, &i2c_bus->i2c_adap);
+ }
break;
case CX23885_BOARD_PROF_8000:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/pci/cx23885/cx23885-video.c b/drivers/media/pci/cx23885/cx23885-video.c
index e33d1a7dfdd..161686832b2 100644
--- a/drivers/media/pci/cx23885/cx23885-video.c
+++ b/drivers/media/pci/cx23885/cx23885-video.c
@@ -32,6 +32,7 @@
#include <asm/div64.h>
#include "cx23885.h"
+#include "cx23885-video.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include "cx23885-ioctl.h"
@@ -417,7 +418,7 @@ static void res_free(struct cx23885_dev *dev, struct cx23885_fh *fh,
mutex_unlock(&dev->lock);
}
-static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
+int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
{
/* 8 bit registers, 8 bit values */
u8 buf[] = { reg, data };
@@ -428,7 +429,7 @@ static int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data)
return i2c_transfer(&dev->i2c_bus[2].i2c_adap, &msg, 1);
}
-static u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
+u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg)
{
/* 8 bit registers, 8 bit values */
int ret;
diff --git a/drivers/media/pci/cx23885/cx23885-video.h b/drivers/media/pci/cx23885/cx23885-video.h
new file mode 100644
index 00000000000..c961a2b0de0
--- /dev/null
+++ b/drivers/media/pci/cx23885/cx23885-video.h
@@ -0,0 +1,26 @@
+/*
+ * Driver for the Conexant CX23885/7/8 PCIe bridge
+ *
+ * Copyright (C) 2010 Andy Walls <awalls@md.metrocast.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA.
+ */
+
+#ifndef _CX23885_VIDEO_H_
+#define _CX23885_VIDEO_H_
+int cx23885_flatiron_write(struct cx23885_dev *dev, u8 reg, u8 data);
+u8 cx23885_flatiron_read(struct cx23885_dev *dev, u8 reg);
+#endif
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index 5687d3f678d..038caf53908 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -320,6 +320,8 @@ struct cx23885_tsport {
/* Workaround for a temp dvb_frontend that the tuner can attached to */
struct dvb_frontend analog_fe;
+
+ int (*set_frontend)(struct dvb_frontend *fe);
};
struct cx23885_kernel_ir {
diff --git a/drivers/media/pci/cx88/Kconfig b/drivers/media/pci/cx88/Kconfig
index bb05eca2da2..a63a9ad163b 100644
--- a/drivers/media/pci/cx88/Kconfig
+++ b/drivers/media/pci/cx88/Kconfig
@@ -72,9 +72,9 @@ config VIDEO_CX88_DVB
To compile this driver as a module, choose M here: the
module will be called cx88-dvb.
-config VIDEO_CX88_VP3054
- tristate "VP-3054 Secondary I2C Bus Support"
- default m
+config VIDEO_CX88_ENABLE_VP3054
+ bool "VP-3054 Secondary I2C Bus Support"
+ default y
depends on VIDEO_CX88_DVB && DVB_MT352
---help---
This adds DVB-T support for cards based on the
@@ -82,6 +82,11 @@ config VIDEO_CX88_VP3054
which also require support for the VP-3054
Secondary I2C bus, such at DNTV Live! DVB-T Pro.
+config VIDEO_CX88_VP3054
+ tristate
+ depends on VIDEO_CX88_DVB && VIDEO_CX88_ENABLE_VP3054
+ default y
+
config VIDEO_CX88_MPEG
tristate
depends on VIDEO_CX88_DVB || VIDEO_CX88_BLACKBIRD
diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
index afe0eaea81b..28893a6b249 100644
--- a/drivers/media/pci/cx88/cx88.h
+++ b/drivers/media/pci/cx88/cx88.h
@@ -259,7 +259,7 @@ struct cx88_input {
};
enum cx88_audio_chip {
- CX88_AUDIO_WM8775,
+ CX88_AUDIO_WM8775 = 1,
CX88_AUDIO_TVAUDIO,
};
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 08de865cc39..8068d7b6415 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -203,13 +203,23 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
+ depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS && HAS_DMA
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
Support for the Video Engine Unit (VEU) on SuperH and
SH-Mobile SoCs.
+config VIDEO_RENESAS_VSP1
+ tristate "Renesas VSP1 Video Processing Engine"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select VIDEOBUF2_DMA_CONTIG
+ ---help---
+ This is a V4L2 driver for the Renesas VSP1 video processing engine.
+
+ To compile this driver as a module, choose M here: the module
+ will be called vsp1.
+
endif # V4L_MEM2MEM_DRIVERS
menuconfig V4L_TEST_DRIVERS
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index eee28dd78d7..4e4da482c52 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -46,6 +46,8 @@ obj-$(CONFIG_VIDEO_SH_VOU) += sh_vou.o
obj-$(CONFIG_SOC_CAMERA) += soc_camera/
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1/
+
obj-y += davinci/
obj-$(CONFIG_ARCH_OMAP) += omap/
diff --git a/drivers/media/platform/blackfin/bfin_capture.c b/drivers/media/platform/blackfin/bfin_capture.c
index 7f838c681ce..4c110597709 100644
--- a/drivers/media/platform/blackfin/bfin_capture.c
+++ b/drivers/media/platform/blackfin/bfin_capture.c
@@ -388,13 +388,8 @@ static int bcap_start_streaming(struct vb2_queue *vq, unsigned int count)
params.hdelay = bt->hsync + bt->hbackporch;
params.vdelay = bt->vsync + bt->vbackporch;
- params.line = bt->hfrontporch + bt->hsync
- + bt->hbackporch + bt->width;
- params.frame = bt->vfrontporch + bt->vsync
- + bt->vbackporch + bt->height;
- if (bt->interlaced)
- params.frame += bt->il_vfrontporch + bt->il_vsync
- + bt->il_vbackporch;
+ params.line = V4L2_DV_BT_FRAME_WIDTH(bt);
+ params.frame = V4L2_DV_BT_FRAME_HEIGHT(bt);
} else if (bcap_dev->cfg->inputs[bcap_dev->cur_input].capabilities
& V4L2_IN_CAP_STD) {
params.hdelay = 0;
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index df4ada880e4..449d2fec9e8 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/kfifo.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
@@ -28,6 +29,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
@@ -41,13 +43,16 @@
#define CODA_FMO_BUF_SIZE 32
#define CODADX6_WORK_BUF_SIZE (288 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
-#define CODA7_WORK_BUF_SIZE (512 * 1024 + CODA_FMO_BUF_SIZE * 8 * 1024)
+#define CODA7_WORK_BUF_SIZE (128 * 1024)
+#define CODA7_TEMP_BUF_SIZE (304 * 1024)
#define CODA_PARA_BUF_SIZE (10 * 1024)
#define CODA_ISRAM_SIZE (2048 * 2)
#define CODADX6_IRAM_SIZE 0xb000
-#define CODA7_IRAM_SIZE 0x14000 /* 81920 bytes */
+#define CODA7_IRAM_SIZE 0x14000
-#define CODA_MAX_FRAMEBUFFERS 2
+#define CODA7_PS_BUF_SIZE 0x28000
+
+#define CODA_MAX_FRAMEBUFFERS 8
#define MAX_W 8192
#define MAX_H 8192
@@ -129,6 +134,7 @@ struct coda_dev {
struct clk *clk_ahb;
struct coda_aux_buf codebuf;
+ struct coda_aux_buf tempbuf;
struct coda_aux_buf workbuf;
struct gen_pool *iram_pool;
long unsigned int iram_vaddr;
@@ -153,6 +159,7 @@ struct coda_params {
u8 mpeg4_inter_qp;
u8 gop_size;
int codec_mode;
+ int codec_mode_aux;
enum v4l2_mpeg_video_multi_slice_mode slice_mode;
u32 framerate;
u16 bitrate;
@@ -160,13 +167,30 @@ struct coda_params {
u32 slice_max_mb;
};
+struct coda_iram_info {
+ u32 axi_sram_use;
+ phys_addr_t buf_bit_use;
+ phys_addr_t buf_ip_ac_dc_use;
+ phys_addr_t buf_dbk_y_use;
+ phys_addr_t buf_dbk_c_use;
+ phys_addr_t buf_ovl_use;
+ phys_addr_t buf_btp_use;
+ phys_addr_t search_ram_paddr;
+ int search_ram_size;
+};
+
struct coda_ctx {
struct coda_dev *dev;
+ struct mutex buffer_mutex;
struct list_head list;
+ struct work_struct skip_run;
int aborting;
+ int initialized;
int streamon_out;
int streamon_cap;
u32 isequence;
+ u32 qsequence;
+ u32 osequence;
struct coda_q_data q_data[2];
enum coda_inst_type inst_type;
struct coda_codec *codec;
@@ -176,12 +200,25 @@ struct coda_ctx {
struct v4l2_ctrl_handler ctrls;
struct v4l2_fh fh;
int gopcounter;
+ int runcounter;
char vpu_header[3][64];
int vpu_header_size[3];
+ struct kfifo bitstream_fifo;
+ struct mutex bitstream_mutex;
+ struct coda_aux_buf bitstream;
+ bool prescan_failed;
struct coda_aux_buf parabuf;
+ struct coda_aux_buf psbuf;
+ struct coda_aux_buf slicebuf;
struct coda_aux_buf internal_frames[CODA_MAX_FRAMEBUFFERS];
+ struct coda_aux_buf workbuf;
int num_internal_frames;
int idx;
+ int reg_idx;
+ struct coda_iram_info iram_info;
+ u32 bit_stream_param;
+ u32 frm_dis_flg;
+ int display_idx;
};
static const u8 coda_filler_nal[14] = { 0x00, 0x00, 0x00, 0x01, 0x0c, 0xff,
@@ -228,10 +265,22 @@ static int coda_wait_timeout(struct coda_dev *dev)
static void coda_command_async(struct coda_ctx *ctx, int cmd)
{
struct coda_dev *dev = ctx->dev;
+
+ if (dev->devtype->product == CODA_7541) {
+ /* Restore context related registers to CODA */
+ coda_write(dev, ctx->bit_stream_param,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR);
+ }
+
coda_write(dev, CODA_REG_BIT_BUSY_FLAG, CODA_REG_BIT_BUSY);
coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX);
coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD);
+ coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD);
+
coda_write(dev, cmd, CODA_REG_BIT_RUN_COMMAND);
}
@@ -297,6 +346,8 @@ static struct coda_codec codadx6_codecs[] = {
static struct coda_codec coda7_codecs[] = {
CODA_CODEC(CODA7_MODE_ENCODE_H264, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_H264, 1280, 720),
CODA_CODEC(CODA7_MODE_ENCODE_MP4, V4L2_PIX_FMT_YUV420, V4L2_PIX_FMT_MPEG4, 1280, 720),
+ CODA_CODEC(CODA7_MODE_DECODE_H264, V4L2_PIX_FMT_H264, V4L2_PIX_FMT_YUV420, 1920, 1080),
+ CODA_CODEC(CODA7_MODE_DECODE_MP4, V4L2_PIX_FMT_MPEG4, V4L2_PIX_FMT_YUV420, 1920, 1080),
};
static bool coda_format_is_yuv(u32 fourcc)
@@ -365,7 +416,7 @@ static int vidioc_querycap(struct file *file, void *priv,
}
static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
- enum v4l2_buf_type type)
+ enum v4l2_buf_type type, int src_fourcc)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
struct coda_codec *codecs = ctx->dev->devtype->codecs;
@@ -377,7 +428,8 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
for (i = 0; i < num_formats; i++) {
/* Both uncompressed formats are always supported */
- if (coda_format_is_yuv(formats[i].fourcc)) {
+ if (coda_format_is_yuv(formats[i].fourcc) &&
+ !coda_format_is_yuv(src_fourcc)) {
if (num == f->index)
break;
++num;
@@ -385,8 +437,10 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
}
/* Compressed formats may be supported, check the codec list */
for (k = 0; k < num_codecs; k++) {
+ /* if src_fourcc is set, only consider matching codecs */
if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
- formats[i].fourcc == codecs[k].dst_fourcc)
+ formats[i].fourcc == codecs[k].dst_fourcc &&
+ (!src_fourcc || src_fourcc == codecs[k].src_fourcc))
break;
if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
formats[i].fourcc == codecs[k].src_fourcc)
@@ -413,13 +467,26 @@ static int enum_fmt(void *priv, struct v4l2_fmtdesc *f,
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ struct coda_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq;
+ struct coda_q_data *q_data_src;
+
+ /* If the source format is already fixed, only list matching formats */
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+ q_data_src->fourcc);
+ }
+
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ return enum_fmt(priv, f, V4L2_BUF_TYPE_VIDEO_OUTPUT, 0);
}
static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
@@ -492,15 +559,45 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
- struct coda_codec *codec = NULL;
+ struct coda_codec *codec;
+ struct vb2_queue *src_vq;
+ int ret;
- /* Determine codec by the encoded format */
- codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
- f->fmt.pix.pixelformat);
+ /*
+ * If the source format is already fixed, try to find a codec that
+ * converts to the given destination format
+ */
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (vb2_is_streaming(src_vq)) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ codec = coda_find_codec(ctx->dev, q_data_src->fourcc,
+ f->fmt.pix.pixelformat);
+ if (!codec)
+ return -EINVAL;
+ } else {
+ /* Otherwise determine codec by encoded format, if possible */
+ codec = coda_find_codec(ctx->dev, V4L2_PIX_FMT_YUV420,
+ f->fmt.pix.pixelformat);
+ }
f->fmt.pix.colorspace = ctx->colorspace;
- return vidioc_try_fmt(codec, f);
+ ret = vidioc_try_fmt(codec, f);
+ if (ret < 0)
+ return ret;
+
+ /* The h.264 decoder only returns complete 16x16 macroblocks */
+ if (codec && codec->src_fourcc == V4L2_PIX_FMT_H264) {
+ f->fmt.pix.width = round_up(f->fmt.pix.width, 16);
+ f->fmt.pix.height = round_up(f->fmt.pix.height, 16);
+ f->fmt.pix.bytesperline = f->fmt.pix.width;
+ f->fmt.pix.sizeimage = f->fmt.pix.bytesperline *
+ f->fmt.pix.height * 3 / 2;
+ }
+
+ return 0;
}
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
@@ -610,11 +707,35 @@ static int vidioc_expbuf(struct file *file, void *priv,
return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb);
}
+static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx,
+ struct v4l2_buffer *buf)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+
+ return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) &&
+ (buf->sequence == (ctx->qsequence - 1)));
+}
+
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
+
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+
+ /* If this is the last capture buffer, emit an end-of-stream event */
+ if (buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
+ coda_buf_is_end_of_stream(ctx, buf)) {
+ const struct v4l2_event eos_event = {
+ .type = V4L2_EVENT_EOS
+ };
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ v4l2_event_queue_fh(&ctx->fh, &eos_event);
+ }
+
+ return ret;
}
static int vidioc_create_bufs(struct file *file, void *priv,
@@ -637,8 +758,53 @@ static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct coda_ctx *ctx = fh_to_ctx(priv);
+ int ret;
- return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+ /*
+ * This indirectly calls __vb2_queue_cancel, which dequeues all buffers.
+ * We therefore have to lock it against running hardware in this context,
+ * which still needs the buffers.
+ */
+ mutex_lock(&ctx->buffer_mutex);
+ ret = v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
+ mutex_unlock(&ctx->buffer_mutex);
+
+ return ret;
+}
+
+static int vidioc_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc)
+{
+ struct coda_ctx *ctx = fh_to_ctx(fh);
+
+ if (dc->cmd != V4L2_DEC_CMD_STOP)
+ return -EINVAL;
+
+ if ((dc->flags & V4L2_DEC_CMD_STOP_TO_BLACK) ||
+ (dc->flags & V4L2_DEC_CMD_STOP_IMMEDIATELY))
+ return -EINVAL;
+
+ if (dc->stop.pts != 0)
+ return -EINVAL;
+
+ if (ctx->inst_type != CODA_INST_DECODER)
+ return -EINVAL;
+
+ /* Set the strem-end flag on this context */
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ return 0;
+}
+
+static int vidioc_subscribe_event(struct v4l2_fh *fh,
+ const struct v4l2_event_subscription *sub)
+{
+ switch (sub->type) {
+ case V4L2_EVENT_EOS:
+ return v4l2_event_subscribe(fh, sub, 0, NULL);
+ default:
+ return v4l2_ctrl_subscribe_event(fh, sub);
+ }
}
static const struct v4l2_ioctl_ops coda_ioctl_ops = {
@@ -664,14 +830,206 @@ static const struct v4l2_ioctl_ops coda_ioctl_ops = {
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
+
+ .vidioc_decoder_cmd = vidioc_decoder_cmd,
+
+ .vidioc_subscribe_event = vidioc_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
+static int coda_start_decoding(struct coda_ctx *ctx);
+
+static void coda_skip_run(struct work_struct *work)
+{
+ struct coda_ctx *ctx = container_of(work, struct coda_ctx, skip_run);
+
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
+}
+
+static inline int coda_get_bitstream_payload(struct coda_ctx *ctx)
+{
+ return kfifo_len(&ctx->bitstream_fifo);
+}
+
+static void coda_kfifo_sync_from_device(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr;
+
+ rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ kfifo->out = (kfifo->in & ~kfifo->mask) |
+ (rd_ptr - ctx->bitstream.paddr);
+ if (kfifo->out > kfifo->in)
+ kfifo->out -= kfifo->mask + 1;
+}
+
+static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 rd_ptr, wr_ptr;
+
+ rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask);
+ coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx)
+{
+ struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr;
+
+ wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask);
+ coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+}
+
+static int coda_bitstream_queue(struct coda_ctx *ctx, struct vb2_buffer *src_buf)
+{
+ u32 src_size = vb2_get_plane_payload(src_buf, 0);
+ u32 n;
+
+ n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), src_size);
+ if (n < src_size)
+ return -ENOSPC;
+
+ dma_sync_single_for_device(&ctx->dev->plat_dev->dev, ctx->bitstream.paddr,
+ ctx->bitstream.size, DMA_TO_DEVICE);
+
+ ctx->qsequence++;
+
+ return 0;
+}
+
+static bool coda_bitstream_try_queue(struct coda_ctx *ctx,
+ struct vb2_buffer *src_buf)
+{
+ int ret;
+
+ if (coda_get_bitstream_payload(ctx) +
+ vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size)
+ return false;
+
+ if (vb2_plane_vaddr(src_buf, 0) == NULL) {
+ v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n");
+ return true;
+ }
+
+ ret = coda_bitstream_queue(ctx, src_buf);
+ if (ret < 0) {
+ v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n");
+ return false;
+ }
+ /* Sync read pointer to device */
+ if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev))
+ coda_kfifo_sync_to_device_write(ctx);
+
+ ctx->prescan_failed = false;
+
+ return true;
+}
+
+static void coda_fill_bitstream(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf;
+
+ while (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) {
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+
+ if (coda_bitstream_try_queue(ctx, src_buf)) {
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ } else {
+ break;
+ }
+ }
+}
+
/*
* Mem-to-mem operations.
*/
-static void coda_device_run(void *m2m_priv)
+static int coda_prepare_decode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_dst;
+ u32 stridey, height;
+ u32 picture_y, picture_cb, picture_cr;
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ if (ctx->params.rot_mode & CODA_ROT_90) {
+ stridey = q_data_dst->height;
+ height = q_data_dst->width;
+ } else {
+ stridey = q_data_dst->width;
+ height = q_data_dst->height;
+ }
+
+ /* Try to copy source buffer contents into the bitstream ringbuffer */
+ mutex_lock(&ctx->bitstream_mutex);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+
+ if (coda_get_bitstream_payload(ctx) < 512 &&
+ (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "bitstream payload: %d, skipping\n",
+ coda_get_bitstream_payload(ctx));
+ schedule_work(&ctx->skip_run);
+ return -EAGAIN;
+ }
+
+ /* Run coda_start_decoding (again) if not yet initialized */
+ if (!ctx->initialized) {
+ int ret = coda_start_decoding(ctx);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to start decoding\n");
+ schedule_work(&ctx->skip_run);
+ return -EAGAIN;
+ } else {
+ ctx->initialized = 1;
+ }
+ }
+
+ /* Set rotator output */
+ picture_y = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ if (q_data_dst->fourcc == V4L2_PIX_FMT_YVU420) {
+ /* Switch Cr and Cb for YVU420 format */
+ picture_cr = picture_y + stridey * height;
+ picture_cb = picture_cr + stridey / 2 * height / 2;
+ } else {
+ picture_cb = picture_y + stridey * height;
+ picture_cr = picture_cb + stridey / 2 * height / 2;
+ }
+ coda_write(dev, picture_y, CODA_CMD_DEC_PIC_ROT_ADDR_Y);
+ coda_write(dev, picture_cb, CODA_CMD_DEC_PIC_ROT_ADDR_CB);
+ coda_write(dev, picture_cr, CODA_CMD_DEC_PIC_ROT_ADDR_CR);
+ coda_write(dev, stridey, CODA_CMD_DEC_PIC_ROT_STRIDE);
+ coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode,
+ CODA_CMD_DEC_PIC_ROT_MODE);
+
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ /* TBD */
+ case CODA_7541:
+ coda_write(dev, CODA_PRE_SCAN_EN, CODA_CMD_DEC_PIC_OPTION);
+ break;
+ }
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_SKIP_NUM);
+
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_BB_START);
+ coda_write(dev, 0, CODA_CMD_DEC_PIC_START_BYTE);
+
+ return 0;
+}
+
+static void coda_prepare_encode(struct coda_ctx *ctx)
{
- struct coda_ctx *ctx = m2m_priv;
struct coda_q_data *q_data_src, *q_data_dst;
struct vb2_buffer *src_buf, *dst_buf;
struct coda_dev *dev = ctx->dev;
@@ -681,17 +1039,15 @@ static void coda_device_run(void *m2m_priv)
u32 pic_stream_buffer_addr, pic_stream_buffer_size;
u32 dst_fourcc;
- mutex_lock(&dev->coda_mutex);
-
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
dst_fourcc = q_data_dst->fourcc;
- src_buf->v4l2_buf.sequence = ctx->isequence;
- dst_buf->v4l2_buf.sequence = ctx->isequence;
- ctx->isequence++;
+ src_buf->v4l2_buf.sequence = ctx->osequence;
+ dst_buf->v4l2_buf.sequence = ctx->osequence;
+ ctx->osequence++;
/*
* Workaround coda firmware BUG that only marks the first
@@ -793,16 +1149,53 @@ static void coda_device_run(void *m2m_priv)
coda_write(dev, pic_stream_buffer_addr, CODA_CMD_ENC_PIC_BB_START);
coda_write(dev, pic_stream_buffer_size / 1024,
CODA_CMD_ENC_PIC_BB_SIZE);
+}
- if (dev->devtype->product == CODA_7541) {
- coda_write(dev, CODA7_USE_BIT_ENABLE | CODA7_USE_HOST_BIT_ENABLE |
- CODA7_USE_ME_ENABLE | CODA7_USE_HOST_ME_ENABLE,
- CODA7_REG_BIT_AXI_SRAM_USE);
+static void coda_device_run(void *m2m_priv)
+{
+ struct coda_ctx *ctx = m2m_priv;
+ struct coda_dev *dev = ctx->dev;
+ int ret;
+
+ mutex_lock(&ctx->buffer_mutex);
+
+ /*
+ * If streamoff dequeued all buffers before we could get the lock,
+ * just bail out immediately.
+ */
+ if ((!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+ ctx->inst_type != CODA_INST_DECODER) ||
+ !v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%d: device_run without buffers\n", ctx->idx);
+ mutex_unlock(&ctx->buffer_mutex);
+ schedule_work(&ctx->skip_run);
+ return;
}
+ mutex_lock(&dev->coda_mutex);
+
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ ret = coda_prepare_decode(ctx);
+ if (ret < 0) {
+ mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
+ /* job_finish scheduled by prepare_decode */
+ return;
+ }
+ } else {
+ coda_prepare_encode(ctx);
+ }
+
+ if (dev->devtype->product != CODA_DX6)
+ coda_write(dev, ctx->iram_info.axi_sram_use,
+ CODA7_REG_BIT_AXI_SRAM_USE);
+
/* 1 second timeout in case CODA locks up */
schedule_delayed_work(&dev->timeout, HZ);
+ if (ctx->inst_type == CODA_INST_DECODER)
+ coda_kfifo_sync_to_device_full(ctx);
coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
}
@@ -812,15 +1205,32 @@ static int coda_job_ready(void *m2m_priv)
/*
* For both 'P' and 'key' frame cases 1 picture
- * and 1 frame are needed.
+ * and 1 frame are needed. In the decoder case,
+ * the compressed frame can be in the bitstream.
*/
- if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) ||
- !v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+ if (!v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) &&
+ ctx->inst_type != CODA_INST_DECODER) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"not ready: not enough video buffers.\n");
return 0;
}
+ if (!v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "not ready: not enough video capture buffers.\n");
+ return 0;
+ }
+
+ if (ctx->prescan_failed ||
+ ((ctx->inst_type == CODA_INST_DECODER) &&
+ (coda_get_bitstream_payload(ctx) < 512) &&
+ !(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "%d: not ready: not enough bitstream data.\n",
+ ctx->idx);
+ return 0;
+ }
+
if (ctx->aborting) {
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
"not ready: aborting\n");
@@ -936,7 +1346,29 @@ static int coda_buf_prepare(struct vb2_buffer *vb)
static void coda_buf_queue(struct vb2_buffer *vb)
{
struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ struct coda_q_data *q_data;
+
+ q_data = get_q_data(ctx, vb->vb2_queue->type);
+
+ /*
+ * In the decoder case, immediately try to copy the buffer into the
+ * bitstream ringbuffer and mark it as ready to be dequeued.
+ */
+ if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
+ vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ /*
+ * For backwards compatiblity, queuing an empty buffer marks
+ * the stream end
+ */
+ if (vb2_get_plane_payload(vb, 0) == 0)
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+ mutex_lock(&ctx->bitstream_mutex);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ coda_fill_bitstream(ctx);
+ mutex_unlock(&ctx->bitstream_mutex);
+ } else {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ }
}
static void coda_wait_prepare(struct vb2_queue *q)
@@ -951,21 +1383,6 @@ static void coda_wait_finish(struct vb2_queue *q)
coda_lock(ctx);
}
-static void coda_free_framebuffers(struct coda_ctx *ctx)
-{
- int i;
-
- for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++) {
- if (ctx->internal_frames[i].vaddr) {
- dma_free_coherent(&ctx->dev->plat_dev->dev,
- ctx->internal_frames[i].size,
- ctx->internal_frames[i].vaddr,
- ctx->internal_frames[i].paddr);
- ctx->internal_frames[i].vaddr = NULL;
- }
- }
-}
-
static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
{
struct coda_dev *dev = ctx->dev;
@@ -977,29 +1394,69 @@ static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value)
p[index ^ 1] = value;
}
+static int coda_alloc_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf, size_t size)
+{
+ buf->vaddr = dma_alloc_coherent(&dev->plat_dev->dev, size, &buf->paddr,
+ GFP_KERNEL);
+ if (!buf->vaddr)
+ return -ENOMEM;
+
+ buf->size = size;
+
+ return 0;
+}
+
+static inline int coda_alloc_context_buf(struct coda_ctx *ctx,
+ struct coda_aux_buf *buf, size_t size)
+{
+ return coda_alloc_aux_buf(ctx->dev, buf, size);
+}
+
+static void coda_free_aux_buf(struct coda_dev *dev,
+ struct coda_aux_buf *buf)
+{
+ if (buf->vaddr) {
+ dma_free_coherent(&dev->plat_dev->dev, buf->size,
+ buf->vaddr, buf->paddr);
+ buf->vaddr = NULL;
+ buf->size = 0;
+ }
+}
+
+static void coda_free_framebuffers(struct coda_ctx *ctx)
+{
+ int i;
+
+ for (i = 0; i < CODA_MAX_FRAMEBUFFERS; i++)
+ coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]);
+}
+
static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_data, u32 fourcc)
{
struct coda_dev *dev = ctx->dev;
-
int height = q_data->height;
dma_addr_t paddr;
int ysize;
+ int ret;
int i;
+ if (ctx->codec && ctx->codec->src_fourcc == V4L2_PIX_FMT_H264)
+ height = round_up(height, 16);
ysize = round_up(q_data->width, 8) * height;
/* Allocate frame buffers */
- ctx->num_internal_frames = CODA_MAX_FRAMEBUFFERS;
for (i = 0; i < ctx->num_internal_frames; i++) {
- ctx->internal_frames[i].size = q_data->sizeimage;
- if (fourcc == V4L2_PIX_FMT_H264 && dev->devtype->product != CODA_DX6)
+ size_t size;
+
+ size = q_data->sizeimage;
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
ctx->internal_frames[i].size += ysize/4;
- ctx->internal_frames[i].vaddr = dma_alloc_coherent(
- &dev->plat_dev->dev, ctx->internal_frames[i].size,
- &ctx->internal_frames[i].paddr, GFP_KERNEL);
- if (!ctx->internal_frames[i].vaddr) {
+ ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i], size);
+ if (ret < 0) {
coda_free_framebuffers(ctx);
- return -ENOMEM;
+ return ret;
}
}
@@ -1010,10 +1467,20 @@ static int coda_alloc_framebuffers(struct coda_ctx *ctx, struct coda_q_data *q_d
coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); /* Cb */
coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize/4); /* Cr */
- if (dev->devtype->product != CODA_DX6 && fourcc == V4L2_PIX_FMT_H264)
- coda_parabuf_write(ctx, 96 + i, ctx->internal_frames[i].paddr + ysize + ysize/4 + ysize/4);
+ /* mvcol buffer for h.264 */
+ if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 &&
+ dev->devtype->product != CODA_DX6)
+ coda_parabuf_write(ctx, 96 + i,
+ ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
}
+ /* mvcol buffer for mpeg4 */
+ if ((dev->devtype->product != CODA_DX6) &&
+ (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4))
+ coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr +
+ ysize + ysize/4 + ysize/4);
+
return 0;
}
@@ -1035,6 +1502,371 @@ static int coda_h264_padding(int size, char *p)
return nal_size;
}
+static void coda_setup_iram(struct coda_ctx *ctx)
+{
+ struct coda_iram_info *iram_info = &ctx->iram_info;
+ struct coda_dev *dev = ctx->dev;
+ int ipacdc_size;
+ int bitram_size;
+ int dbk_size;
+ int ovl_size;
+ int mb_width;
+ int me_size;
+ int size;
+
+ memset(iram_info, 0, sizeof(*iram_info));
+ size = dev->iram_size;
+
+ if (dev->devtype->product == CODA_DX6)
+ return;
+
+ if (ctx->inst_type == CODA_INST_ENCODER) {
+ struct coda_q_data *q_data_src;
+
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ mb_width = DIV_ROUND_UP(q_data_src->width, 16);
+
+ /* Prioritize in case IRAM is too small for everything */
+ me_size = round_up(round_up(q_data_src->width, 16) * 36 + 2048,
+ 1024);
+ iram_info->search_ram_size = me_size;
+ if (size >= iram_info->search_ram_size) {
+ if (dev->devtype->product == CODA_7541)
+ iram_info->axi_sram_use |= CODA7_USE_HOST_ME_ENABLE;
+ iram_info->search_ram_paddr = dev->iram_paddr;
+ size -= iram_info->search_ram_size;
+ } else {
+ pr_err("IRAM is smaller than the search ram size\n");
+ goto out;
+ }
+
+ /* Only H.264BP and H.263P3 are considered */
+ dbk_size = round_up(128 * mb_width, 1024);
+ if (size >= dbk_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+ iram_info->buf_dbk_y_use = dev->iram_paddr +
+ iram_info->search_ram_size;
+ iram_info->buf_dbk_c_use = iram_info->buf_dbk_y_use +
+ dbk_size / 2;
+ size -= dbk_size;
+ } else {
+ goto out;
+ }
+
+ bitram_size = round_up(128 * mb_width, 1024);
+ if (size >= bitram_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+ iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+ dbk_size / 2;
+ size -= bitram_size;
+ } else {
+ goto out;
+ }
+
+ ipacdc_size = round_up(128 * mb_width, 1024);
+ if (size >= ipacdc_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+ iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+ bitram_size;
+ size -= ipacdc_size;
+ }
+
+ /* OVL and BTP disabled for encoder */
+ } else if (ctx->inst_type == CODA_INST_DECODER) {
+ struct coda_q_data *q_data_dst;
+ int mb_height;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ mb_width = DIV_ROUND_UP(q_data_dst->width, 16);
+ mb_height = DIV_ROUND_UP(q_data_dst->height, 16);
+
+ dbk_size = round_up(256 * mb_width, 1024);
+ if (size >= dbk_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_DBK_ENABLE;
+ iram_info->buf_dbk_y_use = dev->iram_paddr;
+ iram_info->buf_dbk_c_use = dev->iram_paddr +
+ dbk_size / 2;
+ size -= dbk_size;
+ } else {
+ goto out;
+ }
+
+ bitram_size = round_up(128 * mb_width, 1024);
+ if (size >= bitram_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_BIT_ENABLE;
+ iram_info->buf_bit_use = iram_info->buf_dbk_c_use +
+ dbk_size / 2;
+ size -= bitram_size;
+ } else {
+ goto out;
+ }
+
+ ipacdc_size = round_up(128 * mb_width, 1024);
+ if (size >= ipacdc_size) {
+ iram_info->axi_sram_use |= CODA7_USE_HOST_IP_ENABLE;
+ iram_info->buf_ip_ac_dc_use = iram_info->buf_bit_use +
+ bitram_size;
+ size -= ipacdc_size;
+ } else {
+ goto out;
+ }
+
+ ovl_size = round_up(80 * mb_width, 1024);
+ }
+
+out:
+ switch (dev->devtype->product) {
+ case CODA_DX6:
+ break;
+ case CODA_7541:
+ /* i.MX53 uses secondary AXI for IRAM access */
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_BIT_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_BIT_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_IP_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_DBK_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_DBK_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_OVL_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_OVL_ENABLE;
+ if (iram_info->axi_sram_use & CODA7_USE_HOST_ME_ENABLE)
+ iram_info->axi_sram_use |= CODA7_USE_ME_ENABLE;
+ }
+
+ if (!(iram_info->axi_sram_use & CODA7_USE_HOST_IP_ENABLE))
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "IRAM smaller than needed\n");
+
+ if (dev->devtype->product == CODA_7541) {
+ /* TODO - Enabling these causes picture errors on CODA7541 */
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ /* fw 1.4.50 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_IP_ENABLE);
+ } else {
+ /* fw 13.4.29 */
+ iram_info->axi_sram_use &= ~(CODA7_USE_HOST_IP_ENABLE |
+ CODA7_USE_HOST_DBK_ENABLE |
+ CODA7_USE_IP_ENABLE |
+ CODA7_USE_DBK_ENABLE);
+ }
+ }
+}
+
+static void coda_free_context_buffers(struct coda_ctx *ctx)
+{
+ struct coda_dev *dev = ctx->dev;
+
+ coda_free_aux_buf(dev, &ctx->slicebuf);
+ coda_free_aux_buf(dev, &ctx->psbuf);
+ if (dev->devtype->product != CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+}
+
+static int coda_alloc_context_buffers(struct coda_ctx *ctx,
+ struct coda_q_data *q_data)
+{
+ struct coda_dev *dev = ctx->dev;
+ size_t size;
+ int ret;
+
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ size = CODA7_WORK_BUF_SIZE;
+ break;
+ default:
+ return 0;
+ }
+
+ if (ctx->psbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "psmembuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->slicebuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "slicebuf still allocated\n");
+ return -EBUSY;
+ }
+ if (ctx->workbuf.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "context buffer still allocated\n");
+ ret = -EBUSY;
+ return -ENOMEM;
+ }
+
+ if (q_data->fourcc == V4L2_PIX_FMT_H264) {
+ /* worst case slice size */
+ size = (DIV_ROUND_UP(q_data->width, 16) *
+ DIV_ROUND_UP(q_data->height, 16)) * 3200 / 8 + 512;
+ ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte slice buffer",
+ ctx->slicebuf.size);
+ return ret;
+ }
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ ret = coda_alloc_context_buf(ctx, &ctx->psbuf, CODA7_PS_BUF_SIZE);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate psmem buffer");
+ goto err;
+ }
+ }
+
+ ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size);
+ if (ret < 0) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate %d byte context buffer",
+ ctx->workbuf.size);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ coda_free_context_buffers(ctx);
+ return ret;
+}
+
+static int coda_start_decoding(struct coda_ctx *ctx)
+{
+ struct coda_q_data *q_data_src, *q_data_dst;
+ u32 bitstream_buf, bitstream_size;
+ struct coda_dev *dev = ctx->dev;
+ int width, height;
+ u32 src_fourcc;
+ u32 val;
+ int ret;
+
+ /* Start decoding */
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+ bitstream_buf = ctx->bitstream.paddr;
+ bitstream_size = ctx->bitstream.size;
+ src_fourcc = q_data_src->fourcc;
+
+ coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
+
+ /* Update coda bitstream read and write pointers from kfifo */
+ coda_kfifo_sync_to_device_full(ctx);
+
+ ctx->display_idx = -1;
+ ctx->frm_dis_flg = 0;
+ coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ coda_write(dev, CODA_BIT_DEC_SEQ_INIT_ESCAPE,
+ CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ coda_write(dev, bitstream_buf, CODA_CMD_DEC_SEQ_BB_START);
+ coda_write(dev, bitstream_size / 1024, CODA_CMD_DEC_SEQ_BB_SIZE);
+ val = 0;
+ if (dev->devtype->product == CODA_7541)
+ val |= CODA_REORDER_ENABLE;
+ coda_write(dev, val, CODA_CMD_DEC_SEQ_OPTION);
+
+ ctx->params.codec_mode = ctx->codec->mode;
+ ctx->params.codec_mode_aux = 0;
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, ctx->psbuf.paddr,
+ CODA_CMD_DEC_SEQ_PS_BB_START);
+ coda_write(dev, (CODA7_PS_BUF_SIZE / 1024),
+ CODA_CMD_DEC_SEQ_PS_BB_SIZE);
+ }
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) {
+ v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_SEQ_INIT timeout\n");
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ return -ETIMEDOUT;
+ }
+
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
+
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+
+ if (coda_read(dev, CODA_RET_DEC_SEQ_SUCCESS) == 0) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_INIT failed, error code = %d\n",
+ coda_read(dev, CODA_RET_DEC_SEQ_ERR_REASON));
+ return -EAGAIN;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_SEQ_SRC_SIZE);
+ if (dev->devtype->product == CODA_DX6) {
+ width = (val >> CODADX6_PICWIDTH_OFFSET) & CODADX6_PICWIDTH_MASK;
+ height = val & CODADX6_PICHEIGHT_MASK;
+ } else {
+ width = (val >> CODA7_PICWIDTH_OFFSET) & CODA7_PICWIDTH_MASK;
+ height = val & CODA7_PICHEIGHT_MASK;
+ }
+
+ if (width > q_data_dst->width || height > q_data_dst->height) {
+ v4l2_err(&dev->v4l2_dev, "stream is %dx%d, not %dx%d\n",
+ width, height, q_data_dst->width, q_data_dst->height);
+ return -EINVAL;
+ }
+
+ width = round_up(width, 16);
+ height = round_up(height, 16);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "%s instance %d now: %dx%d\n",
+ __func__, ctx->idx, width, height);
+
+ ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED) + 1;
+ if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) {
+ v4l2_err(&dev->v4l2_dev,
+ "not enough framebuffers to decode (%d < %d)\n",
+ CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames);
+ return -EINVAL;
+ }
+
+ ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc);
+ if (ret < 0)
+ return ret;
+
+ /* Tell the decoder how many frame buffers we allocated. */
+ coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
+ coda_write(dev, width, CODA_CMD_SET_FRAME_BUF_STRIDE);
+
+ if (dev->devtype->product != CODA_DX6) {
+ /* Set secondary AXI IRAM */
+ coda_setup_iram(ctx);
+
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ }
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ coda_write(dev, ctx->slicebuf.paddr,
+ CODA_CMD_SET_FRAME_SLICE_BB_START);
+ coda_write(dev, ctx->slicebuf.size / 1024,
+ CODA_CMD_SET_FRAME_SLICE_BB_SIZE);
+ }
+
+ if (dev->devtype->product == CODA_7541) {
+ int max_mb_x = 1920 / 16;
+ int max_mb_y = 1088 / 16;
+ int max_mb_num = max_mb_x * max_mb_y;
+ coda_write(dev, max_mb_num << 16 | max_mb_x << 8 | max_mb_y,
+ CODA7_CMD_SET_FRAME_MAX_DEC_SIZE);
+ }
+
+ if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) {
+ v4l2_err(&ctx->dev->v4l2_dev,
+ "CODA_COMMAND_SET_FRAME_BUF timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
int header_code, u8 *header, int *size)
{
@@ -1050,7 +1882,7 @@ static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf,
v4l2_err(&dev->v4l2_dev, "CODA_COMMAND_ENCODE_HEADER timeout\n");
return ret;
}
- *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx)) -
+ *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) -
coda_read(dev, CODA_CMD_ENC_HEADER_BB_START);
memcpy(header, vb2_plane_vaddr(buf, 0), *size);
@@ -1069,26 +1901,36 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
u32 value;
int ret = 0;
- if (count < 1)
- return -EINVAL;
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
+ if (q_data_src->fourcc == V4L2_PIX_FMT_H264) {
+ if (coda_get_bitstream_payload(ctx) < 512)
+ return -EINVAL;
+ } else {
+ if (count < 1)
+ return -EINVAL;
+ }
- if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
ctx->streamon_out = 1;
- else
- ctx->streamon_cap = 1;
- q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
- if (ctx->streamon_out) {
if (coda_format_is_yuv(q_data_src->fourcc))
ctx->inst_type = CODA_INST_ENCODER;
else
ctx->inst_type = CODA_INST_DECODER;
+ } else {
+ if (count < 1)
+ return -EINVAL;
+
+ ctx->streamon_cap = 1;
}
/* Don't start the coda unless both queues are on */
if (!(ctx->streamon_out & ctx->streamon_cap))
return 0;
+ /* Allow device_run with no buffers queued and after streamoff */
+ v4l2_m2m_set_src_buffered(ctx->m2m_ctx, true);
+
ctx->gopcounter = ctx->params.gop_size - 1;
buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
bitstream_buf = vb2_dma_contig_plane_dma_addr(buf, 0);
@@ -1103,6 +1945,25 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
return -EINVAL;
}
+ /* Allocate per-instance buffers */
+ ret = coda_alloc_context_buffers(ctx, q_data_src);
+ if (ret < 0)
+ return ret;
+
+ if (ctx->inst_type == CODA_INST_DECODER) {
+ mutex_lock(&dev->coda_mutex);
+ ret = coda_start_decoding(ctx);
+ mutex_unlock(&dev->coda_mutex);
+ if (ret == -EAGAIN) {
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ } else {
+ ctx->initialized = 1;
+ return 0;
+ }
+ }
+
if (!coda_is_initialized(dev)) {
v4l2_err(v4l2_dev, "coda is not initialized.\n");
return -EFAULT;
@@ -1111,8 +1972,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
mutex_lock(&dev->coda_mutex);
coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR);
- coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->idx));
- coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->idx));
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx));
+ coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
switch (dev->devtype->product) {
case CODA_DX6:
coda_write(dev, CODADX6_STREAM_BUF_DYNALLOC_EN |
@@ -1207,6 +2068,8 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
}
coda_write(dev, value, CODA_CMD_ENC_SEQ_OPTION);
+ coda_setup_iram(ctx);
+
if (dst_fourcc == V4L2_PIX_FMT_H264) {
value = (FMO_SLICE_SAVE_BUF_SIZE << 7);
value |= (0 & CODA_FMOPARAM_TYPE_MASK) << CODA_FMOPARAM_TYPE_OFFSET;
@@ -1214,8 +2077,10 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
if (dev->devtype->product == CODA_DX6) {
coda_write(dev, value, CODADX6_CMD_ENC_SEQ_FMO);
} else {
- coda_write(dev, dev->iram_paddr, CODA7_CMD_ENC_SEQ_SEARCH_BASE);
- coda_write(dev, 48 * 1024, CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
+ coda_write(dev, ctx->iram_info.search_ram_paddr,
+ CODA7_CMD_ENC_SEQ_SEARCH_BASE);
+ coda_write(dev, ctx->iram_info.search_ram_size,
+ CODA7_CMD_ENC_SEQ_SEARCH_SIZE);
}
}
@@ -1231,6 +2096,7 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
goto out;
}
+ ctx->num_internal_frames = 2;
ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc);
if (ret < 0) {
v4l2_err(v4l2_dev, "failed to allocate framebuffers\n");
@@ -1239,13 +2105,20 @@ static int coda_start_streaming(struct vb2_queue *q, unsigned int count)
coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM);
coda_write(dev, round_up(q_data_src->width, 8), CODA_CMD_SET_FRAME_BUF_STRIDE);
+ if (dev->devtype->product == CODA_7541)
+ coda_write(dev, round_up(q_data_src->width, 8),
+ CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
if (dev->devtype->product != CODA_DX6) {
- coda_write(dev, round_up(q_data_src->width, 8), CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE);
- coda_write(dev, dev->iram_paddr + 48 * 1024, CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
- coda_write(dev, dev->iram_paddr + 53 * 1024, CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
- coda_write(dev, dev->iram_paddr + 58 * 1024, CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
- coda_write(dev, dev->iram_paddr + 68 * 1024, CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
- coda_write(dev, 0x0, CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
+ coda_write(dev, ctx->iram_info.buf_bit_use,
+ CODA7_CMD_SET_FRAME_AXI_BIT_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use,
+ CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_y_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR);
+ coda_write(dev, ctx->iram_info.buf_dbk_c_use,
+ CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR);
+ coda_write(dev, ctx->iram_info.buf_ovl_use,
+ CODA7_CMD_SET_FRAME_AXI_OVL_ADDR);
}
ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF);
if (ret < 0) {
@@ -1326,32 +2199,26 @@ static int coda_stop_streaming(struct vb2_queue *q)
struct coda_dev *dev = ctx->dev;
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"%s: output\n", __func__);
ctx->streamon_out = 0;
+
+ ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG;
+
+ ctx->isequence = 0;
} else {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
"%s: capture\n", __func__);
ctx->streamon_cap = 0;
- }
-
- /* Don't stop the coda unless both queues are off */
- if (ctx->streamon_out || ctx->streamon_cap)
- return 0;
- cancel_delayed_work(&dev->timeout);
-
- mutex_lock(&dev->coda_mutex);
- v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
- "%s: sent command 'SEQ_END' to coda\n", __func__);
- if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
- v4l2_err(&dev->v4l2_dev,
- "CODA_COMMAND_SEQ_END failed\n");
- return -ETIMEDOUT;
+ ctx->osequence = 0;
}
- mutex_unlock(&dev->coda_mutex);
- coda_free_framebuffers(ctx);
+ if (!ctx->streamon_out && !ctx->streamon_cap) {
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ ctx->runcounter = 0;
+ }
return 0;
}
@@ -1511,23 +2378,41 @@ static int coda_open(struct file *file)
{
struct coda_dev *dev = video_drvdata(file);
struct coda_ctx *ctx = NULL;
- int ret = 0;
+ int ret;
int idx;
- idx = coda_next_free_instance(dev);
- if (idx >= CODA_MAX_INSTANCES)
- return -EBUSY;
- set_bit(idx, &dev->instance_mask);
-
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
+ idx = coda_next_free_instance(dev);
+ if (idx >= CODA_MAX_INSTANCES) {
+ ret = -EBUSY;
+ goto err_coda_max;
+ }
+ set_bit(idx, &dev->instance_mask);
+
+ INIT_WORK(&ctx->skip_run, coda_skip_run);
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
v4l2_fh_add(&ctx->fh);
ctx->dev = dev;
ctx->idx = idx;
+ switch (dev->devtype->product) {
+ case CODA_7541:
+ ctx->reg_idx = 0;
+ break;
+ default:
+ ctx->reg_idx = idx;
+ }
+
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ goto err_clk_per;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
set_default_params(ctx);
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx,
@@ -1537,39 +2422,62 @@ static int coda_open(struct file *file)
v4l2_err(&dev->v4l2_dev, "%s return error (%d)\n",
__func__, ret);
- goto err;
+ goto err_ctx_init;
}
ret = coda_ctrls_setup(ctx);
if (ret) {
v4l2_err(&dev->v4l2_dev, "failed to setup coda controls\n");
- goto err;
+ goto err_ctrls_setup;
}
ctx->fh.ctrl_handler = &ctx->ctrls;
- ctx->parabuf.vaddr = dma_alloc_coherent(&dev->plat_dev->dev,
- CODA_PARA_BUF_SIZE, &ctx->parabuf.paddr, GFP_KERNEL);
- if (!ctx->parabuf.vaddr) {
+ ret = coda_alloc_context_buf(ctx, &ctx->parabuf, CODA_PARA_BUF_SIZE);
+ if (ret < 0) {
v4l2_err(&dev->v4l2_dev, "failed to allocate parabuf");
+ goto err_dma_alloc;
+ }
+
+ ctx->bitstream.size = CODA_MAX_FRAME_SIZE;
+ ctx->bitstream.vaddr = dma_alloc_writecombine(&dev->plat_dev->dev,
+ ctx->bitstream.size, &ctx->bitstream.paddr, GFP_KERNEL);
+ if (!ctx->bitstream.vaddr) {
+ v4l2_err(&dev->v4l2_dev, "failed to allocate bitstream ringbuffer");
ret = -ENOMEM;
- goto err;
+ goto err_dma_writecombine;
}
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+ mutex_init(&ctx->bitstream_mutex);
+ mutex_init(&ctx->buffer_mutex);
coda_lock(ctx);
list_add(&ctx->list, &dev->instances);
coda_unlock(ctx);
- clk_prepare_enable(dev->clk_per);
- clk_prepare_enable(dev->clk_ahb);
-
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Created instance %d (%p)\n",
ctx->idx, ctx);
return 0;
-err:
+err_dma_writecombine:
+ coda_free_context_buffers(ctx);
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+ coda_free_aux_buf(dev, &ctx->parabuf);
+err_dma_alloc:
+ v4l2_ctrl_handler_free(&ctx->ctrls);
+err_ctrls_setup:
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+err_ctx_init:
+ clk_disable_unprepare(dev->clk_ahb);
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+err_clk_per:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
+ clear_bit(ctx->idx, &dev->instance_mask);
+err_coda_max:
kfree(ctx);
return ret;
}
@@ -1582,16 +2490,37 @@ static int coda_release(struct file *file)
v4l2_dbg(1, coda_debug, &dev->v4l2_dev, "Releasing instance %p\n",
ctx);
+ /* If this instance is running, call .job_abort and wait for it to end */
+ v4l2_m2m_ctx_release(ctx->m2m_ctx);
+
+ /* In case the instance was not running, we still need to call SEQ_END */
+ mutex_lock(&dev->coda_mutex);
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: sent command 'SEQ_END' to coda\n", __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ mutex_unlock(&dev->coda_mutex);
+ return -ETIMEDOUT;
+ }
+ mutex_unlock(&dev->coda_mutex);
+
+ coda_free_framebuffers(ctx);
+
coda_lock(ctx);
list_del(&ctx->list);
coda_unlock(ctx);
- dma_free_coherent(&dev->plat_dev->dev, CODA_PARA_BUF_SIZE,
- ctx->parabuf.vaddr, ctx->parabuf.paddr);
- v4l2_m2m_ctx_release(ctx->m2m_ctx);
+ dma_free_writecombine(&dev->plat_dev->dev, ctx->bitstream.size,
+ ctx->bitstream.vaddr, ctx->bitstream.paddr);
+ coda_free_context_buffers(ctx);
+ if (ctx->dev->devtype->product == CODA_DX6)
+ coda_free_aux_buf(dev, &ctx->workbuf);
+
+ coda_free_aux_buf(dev, &ctx->parabuf);
v4l2_ctrl_handler_free(&ctx->ctrls);
- clk_disable_unprepare(dev->clk_per);
clk_disable_unprepare(dev->clk_ahb);
+ clk_disable_unprepare(dev->clk_per);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
clear_bit(ctx->idx, &dev->instance_mask);
@@ -1628,55 +2557,180 @@ static const struct v4l2_file_operations coda_fops = {
.mmap = coda_mmap,
};
-static irqreturn_t coda_irq_handler(int irq, void *data)
+static void coda_finish_decode(struct coda_ctx *ctx)
{
- struct vb2_buffer *src_buf, *dst_buf;
- struct coda_dev *dev = data;
- u32 wr_ptr, start_ptr;
- struct coda_ctx *ctx;
+ struct coda_dev *dev = ctx->dev;
+ struct coda_q_data *q_data_src;
+ struct coda_q_data *q_data_dst;
+ struct vb2_buffer *dst_buf;
+ int width, height;
+ int decoded_idx;
+ int display_idx;
+ u32 src_fourcc;
+ int success;
+ u32 val;
- cancel_delayed_work(&dev->timeout);
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- /* read status register to attend the IRQ */
- coda_read(dev, CODA_REG_BIT_INT_STATUS);
- coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
- CODA_REG_BIT_INT_CLEAR);
+ /* Update kfifo out pointer from coda bitstream read pointer */
+ coda_kfifo_sync_from_device(ctx);
- ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
- if (ctx == NULL) {
- v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
- mutex_unlock(&dev->coda_mutex);
- return IRQ_HANDLED;
+ /*
+ * in stream-end mode, the read pointer can overshoot the write pointer
+ * by up to 512 bytes
+ */
+ if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) {
+ if (coda_get_bitstream_payload(ctx) >= 0x100000 - 512)
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
}
- if (ctx->aborting) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "task has been aborted\n");
- mutex_unlock(&dev->coda_mutex);
- return IRQ_HANDLED;
+ q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
+ src_fourcc = q_data_src->fourcc;
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_SUCCESS);
+ if (val != 1)
+ pr_err("DEC_PIC_SUCCESS = %d\n", val);
+
+ success = val & 0x1;
+ if (!success)
+ v4l2_err(&dev->v4l2_dev, "decode failed\n");
+
+ if (src_fourcc == V4L2_PIX_FMT_H264) {
+ if (val & (1 << 3))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient PS buffer space (%d bytes)\n",
+ ctx->psbuf.size);
+ if (val & (1 << 2))
+ v4l2_err(&dev->v4l2_dev,
+ "insufficient slice buffer space (%d bytes)\n",
+ ctx->slicebuf.size);
}
- if (coda_isbusy(ctx->dev)) {
- v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
- "coda is still busy!!!!\n");
- return IRQ_NONE;
+ val = coda_read(dev, CODA_RET_DEC_PIC_SIZE);
+ width = (val >> 16) & 0xffff;
+ height = val & 0xffff;
+
+ q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_TYPE);
+ if ((val & 0x7) == 0) {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_KEYFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_PFRAME;
+ } else {
+ dst_buf->v4l2_buf.flags |= V4L2_BUF_FLAG_PFRAME;
+ dst_buf->v4l2_buf.flags &= ~V4L2_BUF_FLAG_KEYFRAME;
+ }
+
+ val = coda_read(dev, CODA_RET_DEC_PIC_ERR_MB);
+ if (val > 0)
+ v4l2_err(&dev->v4l2_dev,
+ "errors in %d macroblocks\n", val);
+
+ if (dev->devtype->product == CODA_7541) {
+ val = coda_read(dev, CODA_RET_DEC_PIC_OPTION);
+ if (val == 0) {
+ /* not enough bitstream data */
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "prescan failed: %d\n", val);
+ ctx->prescan_failed = true;
+ return;
+ }
+ }
+
+ ctx->frm_dis_flg = coda_read(dev, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+
+ /*
+ * The previous display frame was copied out by the rotator,
+ * now it can be overwritten again
+ */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ ctx->frm_dis_flg &= ~(1 << ctx->display_idx);
+ coda_write(dev, ctx->frm_dis_flg,
+ CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx));
+ }
+
+ /*
+ * The index of the last decoded frame, not necessarily in
+ * display order, and the index of the next display frame.
+ * The latter could have been decoded in a previous run.
+ */
+ decoded_idx = coda_read(dev, CODA_RET_DEC_PIC_CUR_IDX);
+ display_idx = coda_read(dev, CODA_RET_DEC_PIC_FRAME_IDX);
+
+ if (decoded_idx == -1) {
+ /* no frame was decoded, but we might have a display frame */
+ if (display_idx < 0 && ctx->display_idx < 0)
+ ctx->prescan_failed = true;
+ } else if (decoded_idx == -2) {
+ /* no frame was decoded, we still return the remaining buffers */
+ } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "decoded frame index out of range: %d\n", decoded_idx);
}
+ if (display_idx == -1) {
+ /*
+ * no more frames to be decoded, but there could still
+ * be rotator output to dequeue
+ */
+ ctx->prescan_failed = true;
+ } else if (display_idx == -3) {
+ /* possibly prescan failure */
+ } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) {
+ v4l2_err(&dev->v4l2_dev,
+ "presentation frame index out of range: %d\n",
+ display_idx);
+ }
+
+ /* If a frame was copied out, return it */
+ if (ctx->display_idx >= 0 &&
+ ctx->display_idx < ctx->num_internal_frames) {
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ dst_buf->v4l2_buf.sequence = ctx->osequence++;
+
+ vb2_set_plane_payload(dst_buf, 0, width * height * 3 / 2);
+
+ v4l2_m2m_buf_done(dst_buf, success ? VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR);
+
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: decoding frame (%d) (%s)\n",
+ dst_buf->v4l2_buf.sequence,
+ (dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
+ "KEYFRAME" : "PFRAME");
+ } else {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "job finished: no frame decoded\n");
+ }
+
+ /* The rotator will copy the current display frame next time */
+ ctx->display_idx = display_idx;
+}
+
+static void coda_finish_encode(struct coda_ctx *ctx)
+{
+ struct vb2_buffer *src_buf, *dst_buf;
+ struct coda_dev *dev = ctx->dev;
+ u32 wr_ptr, start_ptr;
+
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
/* Get results from the coda */
coda_read(dev, CODA_RET_ENC_PIC_TYPE);
start_ptr = coda_read(dev, CODA_CMD_ENC_PIC_BB_START);
- wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->idx));
+ wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx));
+
/* Calculate bytesused field */
if (dst_buf->v4l2_buf.sequence == 0) {
- dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr) +
- ctx->vpu_header_size[0] +
- ctx->vpu_header_size[1] +
- ctx->vpu_header_size[2];
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr +
+ ctx->vpu_header_size[0] +
+ ctx->vpu_header_size[1] +
+ ctx->vpu_header_size[2]);
} else {
- dst_buf->v4l2_planes[0].bytesused = (wr_ptr - start_ptr);
+ vb2_set_plane_payload(dst_buf, 0, wr_ptr - start_ptr);
}
v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n",
@@ -1708,8 +2762,62 @@ static irqreturn_t coda_irq_handler(int irq, void *data)
dst_buf->v4l2_buf.sequence,
(dst_buf->v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) ?
"KEYFRAME" : "PFRAME");
+}
+
+static irqreturn_t coda_irq_handler(int irq, void *data)
+{
+ struct coda_dev *dev = data;
+ struct coda_ctx *ctx;
+
+ cancel_delayed_work(&dev->timeout);
+
+ /* read status register to attend the IRQ */
+ coda_read(dev, CODA_REG_BIT_INT_STATUS);
+ coda_write(dev, CODA_REG_BIT_INT_CLEAR_SET,
+ CODA_REG_BIT_INT_CLEAR);
+
+ ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev);
+ if (ctx == NULL) {
+ v4l2_err(&dev->v4l2_dev, "Instance released before the end of transaction\n");
+ mutex_unlock(&dev->coda_mutex);
+ return IRQ_HANDLED;
+ }
+
+ if (ctx->aborting) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "task has been aborted\n");
+ goto out;
+ }
+
+ if (coda_isbusy(ctx->dev)) {
+ v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev,
+ "coda is still busy!!!!\n");
+ return IRQ_NONE;
+ }
+
+ if (ctx->inst_type == CODA_INST_DECODER)
+ coda_finish_decode(ctx);
+ else
+ coda_finish_encode(ctx);
+
+out:
+ if (ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) {
+ v4l2_dbg(1, coda_debug, &dev->v4l2_dev,
+ "%s: sent command 'SEQ_END' to coda\n", __func__);
+ if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) {
+ v4l2_err(&dev->v4l2_dev,
+ "CODA_COMMAND_SEQ_END failed\n");
+ }
+
+ kfifo_init(&ctx->bitstream_fifo,
+ ctx->bitstream.vaddr, ctx->bitstream.size);
+
+ coda_free_framebuffers(ctx);
+ coda_free_context_buffers(ctx);
+ }
mutex_unlock(&dev->coda_mutex);
+ mutex_unlock(&ctx->buffer_mutex);
v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->m2m_ctx);
@@ -1726,6 +2834,8 @@ static void coda_timeout(struct work_struct *work)
mutex_lock(&dev->dev_mutex);
list_for_each_entry(ctx, &dev->instances, list) {
+ if (mutex_is_locked(&ctx->buffer_mutex))
+ mutex_unlock(&ctx->buffer_mutex);
v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT);
v4l2_m2m_streamoff(NULL, ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE);
}
@@ -1738,7 +2848,7 @@ static void coda_timeout(struct work_struct *work)
static u32 coda_supported_firmwares[] = {
CODA_FIRMWARE_VERNUM(CODA_DX6, 2, 2, 5),
- CODA_FIRMWARE_VERNUM(CODA_7541, 13, 4, 29),
+ CODA_FIRMWARE_VERNUM(CODA_7541, 1, 4, 50),
};
static bool coda_firmware_supported(u32 vernum)
@@ -1771,10 +2881,15 @@ static int coda_hw_init(struct coda_dev *dev)
u16 product, major, minor, release;
u32 data;
u16 *p;
- int i;
+ int i, ret;
- clk_prepare_enable(dev->clk_per);
- clk_prepare_enable(dev->clk_ahb);
+ ret = clk_prepare_enable(dev->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dev->clk_ahb);
+ if (ret)
+ goto err_clk_ahb;
/*
* Copy the first CODA_ISRAM_SIZE in the internal SRAM.
@@ -1803,8 +2918,14 @@ static int coda_hw_init(struct coda_dev *dev)
coda_write(dev, 0, CODA_REG_BIT_CODE_BUF_ADDR + i * 4);
/* Tell the BIT where to find everything it needs */
- coda_write(dev, dev->workbuf.paddr,
- CODA_REG_BIT_WORK_BUF_ADDR);
+ if (dev->devtype->product == CODA_7541) {
+ coda_write(dev, dev->tempbuf.paddr,
+ CODA_REG_BIT_TEMP_BUF_ADDR);
+ coda_write(dev, 0, CODA_REG_BIT_BIT_STREAM_PARAM);
+ } else {
+ coda_write(dev, dev->workbuf.paddr,
+ CODA_REG_BIT_WORK_BUF_ADDR);
+ }
coda_write(dev, dev->codebuf.paddr,
CODA_REG_BIT_CODE_BUF_ADDR);
coda_write(dev, 0, CODA_REG_BIT_CODE_RUN);
@@ -1877,6 +2998,10 @@ static int coda_hw_init(struct coda_dev *dev)
}
return 0;
+
+err_clk_ahb:
+ clk_disable_unprepare(dev->clk_per);
+ return ret;
}
static void coda_fw_callback(const struct firmware *fw, void *context)
@@ -1891,11 +3016,8 @@ static void coda_fw_callback(const struct firmware *fw, void *context)
}
/* allocate auxiliary per-device code buffer for the BIT processor */
- dev->codebuf.size = fw->size;
- dev->codebuf.vaddr = dma_alloc_coherent(&pdev->dev, fw->size,
- &dev->codebuf.paddr,
- GFP_KERNEL);
- if (!dev->codebuf.vaddr) {
+ ret = coda_alloc_aux_buf(dev, &dev->codebuf, fw->size);
+ if (ret < 0) {
dev_err(&pdev->dev, "failed to allocate code buffer\n");
return;
}
@@ -1987,7 +3109,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids);
#ifdef CONFIG_OF
static const struct of_device_id coda_dt_ids[] = {
- { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] },
+ { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] },
{ .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] },
{ /* sentinel */ }
};
@@ -2032,11 +3154,6 @@ static int coda_probe(struct platform_device *pdev)
/* Get memory for physical registers */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "failed to get memory region resource\n");
- return -ENOENT;
- }
-
dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
@@ -2048,8 +3165,8 @@ static int coda_probe(struct platform_device *pdev)
return -ENOENT;
}
- if (devm_request_irq(&pdev->dev, irq, coda_irq_handler,
- 0, CODA_NAME, dev) < 0) {
+ if (devm_request_threaded_irq(&pdev->dev, irq, NULL, coda_irq_handler,
+ IRQF_ONESHOT, CODA_NAME, dev) < 0) {
dev_err(&pdev->dev, "failed to request irq\n");
return -ENOENT;
}
@@ -2085,24 +3202,36 @@ static int coda_probe(struct platform_device *pdev)
/* allocate auxiliary per-device buffers for the BIT processor */
switch (dev->devtype->product) {
case CODA_DX6:
- dev->workbuf.size = CODADX6_WORK_BUF_SIZE;
+ ret = coda_alloc_aux_buf(dev, &dev->workbuf,
+ CODADX6_WORK_BUF_SIZE);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate work buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
+ break;
+ case CODA_7541:
+ dev->tempbuf.size = CODA7_TEMP_BUF_SIZE;
break;
- default:
- dev->workbuf.size = CODA7_WORK_BUF_SIZE;
}
- dev->workbuf.vaddr = dma_alloc_coherent(&pdev->dev, dev->workbuf.size,
- &dev->workbuf.paddr,
- GFP_KERNEL);
- if (!dev->workbuf.vaddr) {
- dev_err(&pdev->dev, "failed to allocate work buffer\n");
- v4l2_device_unregister(&dev->v4l2_dev);
- return -ENOMEM;
+ if (dev->tempbuf.size) {
+ ret = coda_alloc_aux_buf(dev, &dev->tempbuf,
+ dev->tempbuf.size);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate temp buffer\n");
+ v4l2_device_unregister(&dev->v4l2_dev);
+ return ret;
+ }
}
- if (dev->devtype->product == CODA_DX6)
+ switch (dev->devtype->product) {
+ case CODA_DX6:
dev->iram_size = CODADX6_IRAM_SIZE;
- else
+ break;
+ case CODA_7541:
dev->iram_size = CODA7_IRAM_SIZE;
+ break;
+ }
dev->iram_vaddr = gen_pool_alloc(dev->iram_pool, dev->iram_size);
if (!dev->iram_vaddr) {
dev_err(&pdev->dev, "unable to alloc iram\n");
@@ -2128,12 +3257,9 @@ static int coda_remove(struct platform_device *pdev)
v4l2_device_unregister(&dev->v4l2_dev);
if (dev->iram_vaddr)
gen_pool_free(dev->iram_pool, dev->iram_vaddr, dev->iram_size);
- if (dev->codebuf.vaddr)
- dma_free_coherent(&pdev->dev, dev->codebuf.size,
- &dev->codebuf.vaddr, dev->codebuf.paddr);
- if (dev->workbuf.vaddr)
- dma_free_coherent(&pdev->dev, dev->workbuf.size, &dev->workbuf.vaddr,
- dev->workbuf.paddr);
+ coda_free_aux_buf(dev, &dev->codebuf);
+ coda_free_aux_buf(dev, &dev->tempbuf);
+ coda_free_aux_buf(dev, &dev->workbuf);
return 0;
}
diff --git a/drivers/media/platform/coda.h b/drivers/media/platform/coda.h
index ace0bf0a3b9..4e32e2edea6 100644
--- a/drivers/media/platform/coda.h
+++ b/drivers/media/platform/coda.h
@@ -43,14 +43,26 @@
#define CODA_STREAM_ENDIAN_SELECT (1 << 0)
#define CODA_REG_BIT_FRAME_MEM_CTRL 0x110
#define CODA_IMAGE_ENDIAN_SELECT (1 << 0)
+#define CODA_REG_BIT_BIT_STREAM_PARAM 0x114
+#define CODA_BIT_STREAM_END_FLAG (1 << 2)
+#define CODA_BIT_DEC_SEQ_INIT_ESCAPE (1 << 0)
+#define CODA_REG_BIT_TEMP_BUF_ADDR 0x118
#define CODA_REG_BIT_RD_PTR(x) (0x120 + 8 * (x))
#define CODA_REG_BIT_WR_PTR(x) (0x124 + 8 * (x))
+#define CODA_REG_BIT_FRM_DIS_FLG(x) (0x150 + 4 * (x))
#define CODADX6_REG_BIT_SEARCH_RAM_BASE_ADDR 0x140
#define CODA7_REG_BIT_AXI_SRAM_USE 0x140
-#define CODA7_USE_BIT_ENABLE (1 << 0)
+#define CODA7_USE_HOST_ME_ENABLE (1 << 11)
+#define CODA7_USE_HOST_OVL_ENABLE (1 << 10)
+#define CODA7_USE_HOST_DBK_ENABLE (1 << 9)
+#define CODA7_USE_HOST_IP_ENABLE (1 << 8)
#define CODA7_USE_HOST_BIT_ENABLE (1 << 7)
#define CODA7_USE_ME_ENABLE (1 << 4)
-#define CODA7_USE_HOST_ME_ENABLE (1 << 11)
+#define CODA7_USE_OVL_ENABLE (1 << 3)
+#define CODA7_USE_DBK_ENABLE (1 << 2)
+#define CODA7_USE_IP_ENABLE (1 << 1)
+#define CODA7_USE_BIT_ENABLE (1 << 0)
+
#define CODA_REG_BIT_BUSY 0x160
#define CODA_REG_BIT_BUSY_FLAG 1
#define CODA_REG_BIT_RUN_COMMAND 0x164
@@ -84,6 +96,15 @@
#define CODA_MODE_INVALID 0xffff
#define CODA_REG_BIT_INT_ENABLE 0x170
#define CODA_INT_INTERRUPT_ENABLE (1 << 3)
+#define CODA_REG_BIT_INT_REASON 0x174
+#define CODA7_REG_BIT_RUN_AUX_STD 0x178
+#define CODA_MP4_AUX_MPEG4 0
+#define CODA_MP4_AUX_DIVX3 1
+#define CODA_VPX_AUX_THO 0
+#define CODA_VPX_AUX_VP6 1
+#define CODA_VPX_AUX_VP8 2
+#define CODA_H264_AUX_AVC 0
+#define CODA_H264_AUX_MVC 1
/*
* Commands' mailbox:
@@ -92,15 +113,89 @@
* issued.
*/
+/* Decoder Sequence Initialization */
+#define CODA_CMD_DEC_SEQ_BB_START 0x180
+#define CODA_CMD_DEC_SEQ_BB_SIZE 0x184
+#define CODA_CMD_DEC_SEQ_OPTION 0x188
+#define CODA_REORDER_ENABLE (1 << 1)
+#define CODADX6_QP_REPORT (1 << 0)
+#define CODA7_MP4_DEBLK_ENABLE (1 << 0)
+#define CODA_CMD_DEC_SEQ_SRC_SIZE 0x18c
+#define CODA_CMD_DEC_SEQ_START_BYTE 0x190
+#define CODA_CMD_DEC_SEQ_PS_BB_START 0x194
+#define CODA_CMD_DEC_SEQ_PS_BB_SIZE 0x198
+#define CODA_CMD_DEC_SEQ_MP4_ASP_CLASS 0x19c
+#define CODA_CMD_DEC_SEQ_X264_MV_EN 0x19c
+#define CODA_CMD_DEC_SEQ_SPP_CHUNK_SIZE 0x1a0
+
+#define CODA7_RET_DEC_SEQ_ASPECT 0x1b0
+#define CODA_RET_DEC_SEQ_SUCCESS 0x1c0
+#define CODA_RET_DEC_SEQ_SRC_FMT 0x1c4 /* SRC_SIZE on CODA7 */
+#define CODA_RET_DEC_SEQ_SRC_SIZE 0x1c4
+#define CODA_RET_DEC_SEQ_SRC_F_RATE 0x1c8
+#define CODA9_RET_DEC_SEQ_ASPECT 0x1c8
+#define CODA_RET_DEC_SEQ_FRAME_NEED 0x1cc
+#define CODA_RET_DEC_SEQ_FRAME_DELAY 0x1d0
+#define CODA_RET_DEC_SEQ_INFO 0x1d4
+#define CODA_RET_DEC_SEQ_CROP_LEFT_RIGHT 0x1d8
+#define CODA_RET_DEC_SEQ_CROP_TOP_BOTTOM 0x1dc
+#define CODA_RET_DEC_SEQ_NEXT_FRAME_NUM 0x1e0
+#define CODA_RET_DEC_SEQ_ERR_REASON 0x1e0
+#define CODA_RET_DEC_SEQ_FRATE_NR 0x1e4
+#define CODA_RET_DEC_SEQ_FRATE_DR 0x1e8
+#define CODA_RET_DEC_SEQ_JPG_PARA 0x1e4
+#define CODA_RET_DEC_SEQ_JPG_THUMB_IND 0x1e8
+
+/* Decoder Picture Run */
+#define CODA_CMD_DEC_PIC_ROT_MODE 0x180
+#define CODA_CMD_DEC_PIC_ROT_ADDR_Y 0x184
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CB 0x188
+#define CODA_CMD_DEC_PIC_ROT_ADDR_CR 0x18c
+#define CODA_CMD_DEC_PIC_ROT_STRIDE 0x190
+
+#define CODA_CMD_DEC_PIC_OPTION 0x194
+#define CODA_PRE_SCAN_EN (1 << 0)
+#define CODA_PRE_SCAN_MODE_DECODE (0 << 1)
+#define CODA_PRE_SCAN_MODE_RETURN (1 << 1)
+#define CODA_IFRAME_SEARCH_EN (1 << 2)
+#define CODA_SKIP_FRAME_MODE (0x3 << 3)
+#define CODA_CMD_DEC_PIC_SKIP_NUM 0x198
+#define CODA_CMD_DEC_PIC_CHUNK_SIZE 0x19c
+#define CODA_CMD_DEC_PIC_BB_START 0x1a0
+#define CODA_CMD_DEC_PIC_START_BYTE 0x1a4
+#define CODA_RET_DEC_PIC_SIZE 0x1bc
+#define CODA_RET_DEC_PIC_FRAME_NUM 0x1c0
+#define CODA_RET_DEC_PIC_FRAME_IDX 0x1c4
+#define CODA_RET_DEC_PIC_ERR_MB 0x1c8
+#define CODA_RET_DEC_PIC_TYPE 0x1cc
+#define CODA_PIC_TYPE_MASK 0x7
+#define CODA_PIC_TYPE_MASK_VC1 0x3f
+#define CODA9_PIC_TYPE_FIRST_MASK (0x7 << 3)
+#define CODA9_PIC_TYPE_IDR_MASK (0x3 << 6)
+#define CODA7_PIC_TYPE_H264_NPF_MASK (0x3 << 16)
+#define CODA7_PIC_TYPE_INTERLACED (1 << 18)
+#define CODA_RET_DEC_PIC_POST 0x1d0
+#define CODA_RET_DEC_PIC_MVC_REPORT 0x1d0
+#define CODA_RET_DEC_PIC_OPTION 0x1d4
+#define CODA_RET_DEC_PIC_SUCCESS 0x1d8
+#define CODA_RET_DEC_PIC_CUR_IDX 0x1dc
+#define CODA_RET_DEC_PIC_CROP_LEFT_RIGHT 0x1e0
+#define CODA_RET_DEC_PIC_CROP_TOP_BOTTOM 0x1e4
+#define CODA_RET_DEC_PIC_FRAME_NEED 0x1ec
+
/* Encoder Sequence Initialization */
#define CODA_CMD_ENC_SEQ_BB_START 0x180
#define CODA_CMD_ENC_SEQ_BB_SIZE 0x184
#define CODA_CMD_ENC_SEQ_OPTION 0x188
+#define CODA7_OPTION_AVCINTRA16X16ONLY_OFFSET 9
#define CODA7_OPTION_GAMMA_OFFSET 8
+#define CODA7_OPTION_RCQPMAX_OFFSET 7
#define CODADX6_OPTION_GAMMA_OFFSET 7
+#define CODA7_OPTION_RCQPMIN_OFFSET 6
#define CODA_OPTION_LIMITQP_OFFSET 6
#define CODA_OPTION_RCINTRAQP_OFFSET 5
#define CODA_OPTION_FMO_OFFSET 4
+#define CODA_OPTION_AVC_AUD_OFFSET 2
#define CODA_OPTION_SLICEREPORT_OFFSET 1
#define CODA_CMD_ENC_SEQ_COD_STD 0x18c
#define CODA_STD_MPEG4 0
@@ -169,8 +264,10 @@
#define CODA_FMOPARAM_TYPE_MASK 1
#define CODA_FMOPARAM_SLICENUM_OFFSET 0
#define CODA_FMOPARAM_SLICENUM_MASK 0x0f
+#define CODADX6_CMD_ENC_SEQ_INTRA_QP 0x1bc
#define CODA7_CMD_ENC_SEQ_SEARCH_BASE 0x1b8
#define CODA7_CMD_ENC_SEQ_SEARCH_SIZE 0x1bc
+#define CODA7_CMD_ENC_SEQ_INTRA_QP 0x1c4
#define CODA_CMD_ENC_SEQ_RC_QP_MAX 0x1c8
#define CODA_QPMAX_OFFSET 0
#define CODA_QPMAX_MASK 0x3f
@@ -197,18 +294,24 @@
#define CODA_CMD_ENC_PIC_OPTION 0x194
#define CODA_CMD_ENC_PIC_BB_START 0x198
#define CODA_CMD_ENC_PIC_BB_SIZE 0x19c
+#define CODA_RET_ENC_FRAME_NUM 0x1c0
#define CODA_RET_ENC_PIC_TYPE 0x1c4
+#define CODA_RET_ENC_PIC_FRAME_IDX 0x1c8
#define CODA_RET_ENC_PIC_SLICE_NUM 0x1cc
#define CODA_RET_ENC_PIC_FLAG 0x1d0
+#define CODA_RET_ENC_PIC_SUCCESS 0x1d8
/* Set Frame Buffer */
#define CODA_CMD_SET_FRAME_BUF_NUM 0x180
#define CODA_CMD_SET_FRAME_BUF_STRIDE 0x184
+#define CODA_CMD_SET_FRAME_SLICE_BB_START 0x188
+#define CODA_CMD_SET_FRAME_SLICE_BB_SIZE 0x18c
#define CODA7_CMD_SET_FRAME_AXI_BIT_ADDR 0x190
#define CODA7_CMD_SET_FRAME_AXI_IPACDC_ADDR 0x194
#define CODA7_CMD_SET_FRAME_AXI_DBKY_ADDR 0x198
#define CODA7_CMD_SET_FRAME_AXI_DBKC_ADDR 0x19c
#define CODA7_CMD_SET_FRAME_AXI_OVL_ADDR 0x1a0
+#define CODA7_CMD_SET_FRAME_MAX_DEC_SIZE 0x1a4
#define CODA7_CMD_SET_FRAME_SOURCE_BUF_STRIDE 0x1a8
/* Encoder Header */
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index e180ff7282d..04609cc6eba 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -1743,11 +1743,10 @@ static int vpbe_display_probe(struct platform_device *pdev)
printk(KERN_DEBUG "vpbe_display_probe\n");
/* Allocate memory for vpbe_display */
- disp_dev = kzalloc(sizeof(struct vpbe_display), GFP_KERNEL);
- if (!disp_dev) {
- printk(KERN_ERR "ran out of memory\n");
+ disp_dev = devm_kzalloc(&pdev->dev, sizeof(struct vpbe_display),
+ GFP_KERNEL);
+ if (!disp_dev)
return -ENOMEM;
- }
spin_lock_init(&disp_dev->dma_queue_lock);
/*
@@ -1786,26 +1785,24 @@ static int vpbe_display_probe(struct platform_device *pdev)
}
irq = res->start;
- if (request_irq(irq, venc_isr, IRQF_DISABLED, VPBE_DISPLAY_DRIVER,
- disp_dev)) {
+ err = devm_request_irq(&pdev->dev, irq, venc_isr, IRQF_DISABLED,
+ VPBE_DISPLAY_DRIVER, disp_dev);
+ if (err) {
v4l2_err(&disp_dev->vpbe_dev->v4l2_dev,
"Unable to request interrupt\n");
- err = -ENODEV;
goto probe_out;
}
for (i = 0; i < VPBE_DISPLAY_MAX_DEVICES; i++) {
if (register_device(disp_dev->dev[i], disp_dev, pdev)) {
err = -ENODEV;
- goto probe_out_irq;
+ goto probe_out;
}
}
printk(KERN_DEBUG "Successfully completed the probing of vpbe v4l2 device\n");
return 0;
-probe_out_irq:
- free_irq(res->start, disp_dev);
probe_out:
for (k = 0; k < VPBE_DISPLAY_MAX_DEVICES; k++) {
/* Get the pointer to the layer object */
@@ -1817,7 +1814,6 @@ probe_out:
kfree(disp_dev->dev[k]);
}
}
- kfree(disp_dev);
return err;
}
@@ -1830,15 +1826,10 @@ static int vpbe_display_remove(struct platform_device *pdev)
struct vpbe_layer *vpbe_display_layer;
struct vpbe_display *disp_dev = platform_get_drvdata(pdev);
struct vpbe_device *vpbe_dev = disp_dev->vpbe_dev;
- struct resource *res;
int i;
v4l2_dbg(1, debug, &vpbe_dev->v4l2_dev, "vpbe_display_remove\n");
- /* unregister irq */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- free_irq(res->start, disp_dev);
-
/* deinitialize the vpbe display controller */
if (NULL != vpbe_dev->ops.deinitialize)
vpbe_dev->ops.deinitialize(&pdev->dev, vpbe_dev);
diff --git a/drivers/media/platform/davinci/vpbe_osd.c b/drivers/media/platform/davinci/vpbe_osd.c
index 6ed82e8b297..d053c2669c1 100644
--- a/drivers/media/platform/davinci/vpbe_osd.c
+++ b/drivers/media/platform/davinci/vpbe_osd.c
@@ -1547,61 +1547,36 @@ static int osd_probe(struct platform_device *pdev)
const struct platform_device_id *pdev_id;
struct osd_state *osd;
struct resource *res;
- int ret = 0;
- osd = kzalloc(sizeof(struct osd_state), GFP_KERNEL);
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ osd = devm_kzalloc(&pdev->dev, sizeof(struct osd_state), GFP_KERNEL);
if (osd == NULL)
return -ENOMEM;
- pdev_id = platform_get_device_id(pdev);
- if (!pdev_id) {
- ret = -EINVAL;
- goto free_mem;
- }
osd->dev = &pdev->dev;
osd->vpbe_type = pdev_id->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(osd->dev, "Unable to get OSD register address map\n");
- ret = -ENODEV;
- goto free_mem;
- }
+ osd->osd_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(osd->osd_base))
+ return PTR_ERR(osd->osd_base);
+
osd->osd_base_phys = res->start;
osd->osd_size = resource_size(res);
- if (!request_mem_region(osd->osd_base_phys, osd->osd_size,
- MODULE_NAME)) {
- dev_err(osd->dev, "Unable to reserve OSD MMIO region\n");
- ret = -ENODEV;
- goto free_mem;
- }
- osd->osd_base = ioremap_nocache(res->start, osd->osd_size);
- if (!osd->osd_base) {
- dev_err(osd->dev, "Unable to map the OSD region\n");
- ret = -ENODEV;
- goto release_mem_region;
- }
spin_lock_init(&osd->lock);
osd->ops = osd_ops;
platform_set_drvdata(pdev, osd);
dev_notice(osd->dev, "OSD sub device probe success\n");
- return ret;
-release_mem_region:
- release_mem_region(osd->osd_base_phys, osd->osd_size);
-free_mem:
- kfree(osd);
- return ret;
+ return 0;
}
static int osd_remove(struct platform_device *pdev)
{
- struct osd_state *osd = platform_get_drvdata(pdev);
-
- iounmap((void *)osd->osd_base);
- release_mem_region(osd->osd_base_phys, osd->osd_size);
- kfree(osd);
return 0;
}
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index 87eef9be08e..14a023a75d2 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -639,105 +639,46 @@ static int venc_probe(struct platform_device *pdev)
const struct platform_device_id *pdev_id;
struct venc_state *venc;
struct resource *res;
- int ret;
- venc = kzalloc(sizeof(struct venc_state), GFP_KERNEL);
+ if (!pdev->dev.platform_data) {
+ dev_err(&pdev->dev, "No platform data for VENC sub device");
+ return -EINVAL;
+ }
+
+ pdev_id = platform_get_device_id(pdev);
+ if (!pdev_id)
+ return -EINVAL;
+
+ venc = devm_kzalloc(&pdev->dev, sizeof(struct venc_state), GFP_KERNEL);
if (venc == NULL)
return -ENOMEM;
- pdev_id = platform_get_device_id(pdev);
- if (!pdev_id) {
- ret = -EINVAL;
- goto free_mem;
- }
venc->venc_type = pdev_id->driver_data;
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
- if (NULL == venc->pdata) {
- dev_err(venc->pdev, "Unable to get platform data for"
- " VENC sub device");
- ret = -ENOENT;
- goto free_mem;
- }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(venc->pdev,
- "Unable to get VENC register address map\n");
- ret = -ENODEV;
- goto free_mem;
- }
- if (!request_mem_region(res->start, resource_size(res), "venc")) {
- dev_err(venc->pdev, "Unable to reserve VENC MMIO region\n");
- ret = -ENODEV;
- goto free_mem;
- }
-
- venc->venc_base = ioremap_nocache(res->start, resource_size(res));
- if (!venc->venc_base) {
- dev_err(venc->pdev, "Unable to map VENC IO space\n");
- ret = -ENODEV;
- goto release_venc_mem_region;
- }
+ venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->venc_base))
+ return PTR_ERR(venc->venc_base);
if (venc->venc_type != VPBE_VERSION_1) {
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(venc->pdev,
- "Unable to get VDAC_CONFIG address map\n");
- ret = -ENODEV;
- goto unmap_venc_io;
- }
-
- if (!request_mem_region(res->start,
- resource_size(res), "venc")) {
- dev_err(venc->pdev,
- "Unable to reserve VDAC_CONFIG MMIO region\n");
- ret = -ENODEV;
- goto unmap_venc_io;
- }
-
- venc->vdaccfg_reg = ioremap_nocache(res->start,
- resource_size(res));
- if (!venc->vdaccfg_reg) {
- dev_err(venc->pdev,
- "Unable to map VDAC_CONFIG IO space\n");
- ret = -ENODEV;
- goto release_vdaccfg_mem_region;
- }
+
+ venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(venc->vdaccfg_reg))
+ return PTR_ERR(venc->vdaccfg_reg);
}
spin_lock_init(&venc->lock);
platform_set_drvdata(pdev, venc);
dev_notice(venc->pdev, "VENC sub device probe success\n");
- return 0;
-release_vdaccfg_mem_region:
- release_mem_region(res->start, resource_size(res));
-unmap_venc_io:
- iounmap(venc->venc_base);
-release_venc_mem_region:
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-free_mem:
- kfree(venc);
- return ret;
+ return 0;
}
static int venc_remove(struct platform_device *pdev)
{
- struct venc_state *venc = platform_get_drvdata(pdev);
- struct resource *res;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- iounmap((void *)venc->venc_base);
- release_mem_region(res->start, resource_size(res));
- if (venc->venc_type != VPBE_VERSION_1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- iounmap((void *)venc->vdaccfg_reg);
- release_mem_region(res->start, resource_size(res));
- }
- kfree(venc);
-
return 0;
}
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index 5514175bbd0..1089834a4ef 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1799,19 +1799,15 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
/* Configure video port timings */
- std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
- bt->hsync - 8;
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
std_info->sav2eav = bt->width;
std_info->l1 = 1;
std_info->l3 = bt->vsync + bt->vbackporch + 1;
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
if (bt->interlaced) {
if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
- std_info->vsize = bt->height * 2 +
- bt->vfrontporch + bt->vsync + bt->vbackporch +
- bt->il_vfrontporch + bt->il_vsync +
- bt->il_vbackporch;
std_info->l5 = std_info->vsize/2 -
(bt->vfrontporch - 1);
std_info->l7 = std_info->vsize/2 + 1;
@@ -1825,8 +1821,6 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
return -EINVAL;
}
} else {
- std_info->vsize = bt->height + bt->vfrontporch +
- bt->vsync + bt->vbackporch;
std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
}
strncpy(std_info->name, "Custom timings BT656/1120", VPIF_MAX_NAME);
@@ -1979,6 +1973,76 @@ vpif_init_free_channel_objects:
return err;
}
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdev_info[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int i, j, err, k;
+
+ for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ ch->channel_id = j;
+ common = &(ch->common[VPIF_VIDEO_INDEX]);
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+ ch->video_dev->lock = &common->lock;
+ /* Initialize prio member of channel object */
+ v4l2_prio_init(&ch->prio);
+ video_set_drvdata(ch->video_dev, ch);
+
+ /* select input 0 */
+ err = vpif_set_input(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ err = video_register_device(ch->video_dev,
+ VFL_TYPE_GRABBER, (j ? 1 : 0));
+ if (err)
+ goto probe_out;
+ }
+
+ v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ /* Get the pointer to the channel object */
+ ch = vpif_obj.dev[k];
+ /* Unregister video device */
+ video_unregister_device(ch->video_dev);
+ }
+ kfree(vpif_obj.sd);
+ for (i = 0; i < VPIF_CAPTURE_MAX_DEVICES; i++) {
+ ch = vpif_obj.dev[i];
+ /* Note: does nothing if ch->video_dev == NULL */
+ video_device_release(ch->video_dev);
+ }
+ v4l2_device_unregister(&vpif_obj.v4l2_dev);
+
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
/**
* vpif_probe : This function probes the vpif capture driver
* @pdev: platform device pointer
@@ -1989,12 +2053,10 @@ vpif_init_free_channel_objects:
static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
- struct vpif_capture_config *config;
- int i, j, k, err;
+ int i, j, err;
int res_idx = 0;
struct i2c_adapter *i2c_adap;
struct channel_obj *ch;
- struct common_obj *common;
struct video_device *vfd;
struct resource *res;
int subdev_count;
@@ -2068,10 +2130,9 @@ static __init int vpif_probe(struct platform_device *pdev)
}
}
- i2c_adap = i2c_get_adapter(1);
- config = pdev->dev.platform_data;
+ vpif_obj.config = pdev->dev.platform_data;
- subdev_count = config->subdev_count;
+ subdev_count = vpif_obj.config->subdev_count;
vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
@@ -2080,54 +2141,43 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_sd_error;
}
- for (i = 0; i < subdev_count; i++) {
- subdevdata = &config->subdev_info[i];
- vpif_obj.sd[i] =
- v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
- i2c_adap,
- &subdevdata->board_info,
- NULL);
-
- if (!vpif_obj.sd[i]) {
- vpif_err("Error registering v4l2 subdevice\n");
- err = -ENODEV;
+ if (!vpif_obj.config->asd_sizes) {
+ i2c_adap = i2c_get_adapter(1);
+ for (i = 0; i < subdev_count; i++) {
+ subdevdata = &vpif_obj.config->subdev_info[i];
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata->
+ board_info,
+ NULL);
+
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENOMEM;
+ goto probe_subdev_out;
+ }
+ v4l2_info(&vpif_obj.v4l2_dev,
+ "registered sub device %s\n",
+ subdevdata->name);
+ }
+ vpif_probe_complete();
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.bound = vpif_async_bound;
+ vpif_obj.notifier.complete = vpif_async_complete;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
goto probe_subdev_out;
}
- v4l2_info(&vpif_obj.v4l2_dev, "registered sub device %s\n",
- subdevdata->name);
}
- for (j = 0; j < VPIF_CAPTURE_MAX_DEVICES; j++) {
- ch = vpif_obj.dev[j];
- ch->channel_id = j;
- common = &(ch->common[VPIF_VIDEO_INDEX]);
- spin_lock_init(&common->irqlock);
- mutex_init(&common->lock);
- ch->video_dev->lock = &common->lock;
- /* Initialize prio member of channel object */
- v4l2_prio_init(&ch->prio);
- video_set_drvdata(ch->video_dev, ch);
-
- /* select input 0 */
- err = vpif_set_input(config, ch, 0);
- if (err)
- goto probe_out;
-
- err = video_register_device(ch->video_dev,
- VFL_TYPE_GRABBER, (j ? 1 : 0));
- if (err)
- goto probe_out;
- }
- v4l2_info(&vpif_obj.v4l2_dev, "VPIF capture driver initialized\n");
return 0;
-probe_out:
- for (k = 0; k < j; k++) {
- /* Get the pointer to the channel object */
- ch = vpif_obj.dev[k];
- /* Unregister video device */
- video_unregister_device(ch->video_dev);
- }
probe_subdev_out:
/* free sub devices memory */
kfree(vpif_obj.sd);
diff --git a/drivers/media/platform/davinci/vpif_capture.h b/drivers/media/platform/davinci/vpif_capture.h
index 0ebb3126036..5a29d9a0cae 100644
--- a/drivers/media/platform/davinci/vpif_capture.h
+++ b/drivers/media/platform/davinci/vpif_capture.h
@@ -142,6 +142,8 @@ struct vpif_device {
struct v4l2_device v4l2_dev;
struct channel_obj *dev[VPIF_CAPTURE_NUM_CHANNELS];
struct v4l2_subdev **sd;
+ struct v4l2_async_notifier notifier;
+ struct vpif_capture_config *config;
};
struct vpif_config_params {
diff --git a/drivers/media/platform/davinci/vpif_display.c b/drivers/media/platform/davinci/vpif_display.c
index e6e57365025..c31bcf129a5 100644
--- a/drivers/media/platform/davinci/vpif_display.c
+++ b/drivers/media/platform/davinci/vpif_display.c
@@ -1436,19 +1436,15 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
/* Configure video port timings */
- std_info->eav2sav = bt->hbackporch + bt->hfrontporch +
- bt->hsync - 8;
+ std_info->eav2sav = V4L2_DV_BT_BLANKING_WIDTH(bt) - 8;
std_info->sav2eav = bt->width;
std_info->l1 = 1;
std_info->l3 = bt->vsync + bt->vbackporch + 1;
+ std_info->vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
if (bt->interlaced) {
if (bt->il_vbackporch || bt->il_vfrontporch || bt->il_vsync) {
- std_info->vsize = bt->height * 2 +
- bt->vfrontporch + bt->vsync + bt->vbackporch +
- bt->il_vfrontporch + bt->il_vsync +
- bt->il_vbackporch;
std_info->l5 = std_info->vsize/2 -
(bt->vfrontporch - 1);
std_info->l7 = std_info->vsize/2 + 1;
@@ -1462,8 +1458,6 @@ static int vpif_s_dv_timings(struct file *file, void *priv,
return -EINVAL;
}
} else {
- std_info->vsize = bt->height + bt->vfrontporch +
- bt->vsync + bt->vbackporch;
std_info->l5 = std_info->vsize - (bt->vfrontporch - 1);
}
strncpy(std_info->name, "Custom timings BT656/1120",
@@ -1618,6 +1612,102 @@ vpif_init_free_channel_objects:
return err;
}
+static int vpif_async_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ int i;
+
+ for (i = 0; i < vpif_obj.config->subdev_count; i++)
+ if (!strcmp(vpif_obj.config->subdevinfo[i].name,
+ subdev->name)) {
+ vpif_obj.sd[i] = subdev;
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int vpif_probe_complete(void)
+{
+ struct common_obj *common;
+ struct channel_obj *ch;
+ int j, err, k;
+
+ for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
+ ch = vpif_obj.dev[j];
+ /* Initialize field of the channel objects */
+ atomic_set(&ch->usrs, 0);
+ for (k = 0; k < VPIF_NUMOBJECTS; k++) {
+ ch->common[k].numbuffers = 0;
+ common = &ch->common[k];
+ common->io_usrs = 0;
+ common->started = 0;
+ spin_lock_init(&common->irqlock);
+ mutex_init(&common->lock);
+ common->numbuffers = 0;
+ common->set_addr = NULL;
+ common->ytop_off = 0;
+ common->ybtm_off = 0;
+ common->ctop_off = 0;
+ common->cbtm_off = 0;
+ common->cur_frm = NULL;
+ common->next_frm = NULL;
+ memset(&common->fmt, 0, sizeof(common->fmt));
+ common->numbuffers = config_params.numbuffers[k];
+ }
+ ch->initialized = 0;
+ if (vpif_obj.config->subdev_count)
+ ch->sd = vpif_obj.sd[0];
+ ch->channel_id = j;
+ if (j < 2)
+ ch->common[VPIF_VIDEO_INDEX].numbuffers =
+ config_params.numbuffers[ch->channel_id];
+ else
+ ch->common[VPIF_VIDEO_INDEX].numbuffers = 0;
+
+ memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
+
+ /* Initialize prio member of channel object */
+ v4l2_prio_init(&ch->prio);
+ ch->common[VPIF_VIDEO_INDEX].fmt.type =
+ V4L2_BUF_TYPE_VIDEO_OUTPUT;
+ ch->video_dev->lock = &common->lock;
+ video_set_drvdata(ch->video_dev, ch);
+
+ /* select output 0 */
+ err = vpif_set_output(vpif_obj.config, ch, 0);
+ if (err)
+ goto probe_out;
+
+ /* register video device */
+ vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
+ (int)ch, (int)&ch->video_dev);
+
+ err = video_register_device(ch->video_dev,
+ VFL_TYPE_GRABBER, (j ? 3 : 2));
+ if (err < 0)
+ goto probe_out;
+ }
+
+ return 0;
+
+probe_out:
+ for (k = 0; k < j; k++) {
+ ch = vpif_obj.dev[k];
+ video_unregister_device(ch->video_dev);
+ video_device_release(ch->video_dev);
+ ch->video_dev = NULL;
+ }
+ return err;
+}
+
+static int vpif_async_complete(struct v4l2_async_notifier *notifier)
+{
+ return vpif_probe_complete();
+}
+
/*
* vpif_probe: This function creates device entries by register itself to the
* V4L2 driver and initializes fields of each channel objects
@@ -1625,11 +1715,9 @@ vpif_init_free_channel_objects:
static __init int vpif_probe(struct platform_device *pdev)
{
struct vpif_subdev_info *subdevdata;
- struct vpif_display_config *config;
- int i, j = 0, k, err = 0;
+ int i, j = 0, err = 0;
int res_idx = 0;
struct i2c_adapter *i2c_adap;
- struct common_obj *common;
struct channel_obj *ch;
struct video_device *vfd;
struct resource *res;
@@ -1708,11 +1796,9 @@ static __init int vpif_probe(struct platform_device *pdev)
size/2;
}
}
-
- i2c_adap = i2c_get_adapter(1);
- config = pdev->dev.platform_data;
- subdev_count = config->subdev_count;
- subdevdata = config->subdevinfo;
+ vpif_obj.config = pdev->dev.platform_data;
+ subdev_count = vpif_obj.config->subdev_count;
+ subdevdata = vpif_obj.config->subdevinfo;
vpif_obj.sd = kzalloc(sizeof(struct v4l2_subdev *) * subdev_count,
GFP_KERNEL);
if (vpif_obj.sd == NULL) {
@@ -1721,86 +1807,41 @@ static __init int vpif_probe(struct platform_device *pdev)
goto vpif_sd_error;
}
- for (i = 0; i < subdev_count; i++) {
- vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
- i2c_adap,
- &subdevdata[i].board_info,
- NULL);
- if (!vpif_obj.sd[i]) {
- vpif_err("Error registering v4l2 subdevice\n");
- err = -ENODEV;
- goto probe_subdev_out;
- }
-
- if (vpif_obj.sd[i])
- vpif_obj.sd[i]->grp_id = 1 << i;
- }
-
- for (j = 0; j < VPIF_DISPLAY_MAX_DEVICES; j++) {
- ch = vpif_obj.dev[j];
- /* Initialize field of the channel objects */
- atomic_set(&ch->usrs, 0);
- for (k = 0; k < VPIF_NUMOBJECTS; k++) {
- ch->common[k].numbuffers = 0;
- common = &ch->common[k];
- common->io_usrs = 0;
- common->started = 0;
- spin_lock_init(&common->irqlock);
- mutex_init(&common->lock);
- common->numbuffers = 0;
- common->set_addr = NULL;
- common->ytop_off = common->ybtm_off = 0;
- common->ctop_off = common->cbtm_off = 0;
- common->cur_frm = common->next_frm = NULL;
- memset(&common->fmt, 0, sizeof(common->fmt));
- common->numbuffers = config_params.numbuffers[k];
+ if (!vpif_obj.config->asd_sizes) {
+ i2c_adap = i2c_get_adapter(1);
+ for (i = 0; i < subdev_count; i++) {
+ vpif_obj.sd[i] =
+ v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
+ i2c_adap,
+ &subdevdata[i].
+ board_info,
+ NULL);
+ if (!vpif_obj.sd[i]) {
+ vpif_err("Error registering v4l2 subdevice\n");
+ err = -ENODEV;
+ goto probe_subdev_out;
+ }
+ if (vpif_obj.sd[i])
+ vpif_obj.sd[i]->grp_id = 1 << i;
+ }
+ vpif_probe_complete();
+ } else {
+ vpif_obj.notifier.subdevs = vpif_obj.config->asd;
+ vpif_obj.notifier.num_subdevs = vpif_obj.config->asd_sizes[0];
+ vpif_obj.notifier.bound = vpif_async_bound;
+ vpif_obj.notifier.complete = vpif_async_complete;
+ err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
+ if (err) {
+ vpif_err("Error registering async notifier\n");
+ err = -EINVAL;
+ goto probe_subdev_out;
}
- ch->initialized = 0;
- if (subdev_count)
- ch->sd = vpif_obj.sd[0];
- ch->channel_id = j;
- if (j < 2)
- ch->common[VPIF_VIDEO_INDEX].numbuffers =
- config_params.numbuffers[ch->channel_id];
- else
- ch->common[VPIF_VIDEO_INDEX].numbuffers = 0;
-
- memset(&ch->vpifparams, 0, sizeof(ch->vpifparams));
-
- /* Initialize prio member of channel object */
- v4l2_prio_init(&ch->prio);
- ch->common[VPIF_VIDEO_INDEX].fmt.type =
- V4L2_BUF_TYPE_VIDEO_OUTPUT;
- ch->video_dev->lock = &common->lock;
- video_set_drvdata(ch->video_dev, ch);
-
- /* select output 0 */
- err = vpif_set_output(config, ch, 0);
- if (err)
- goto probe_out;
-
- /* register video device */
- vpif_dbg(1, debug, "channel=%x,channel->video_dev=%x\n",
- (int)ch, (int)&ch->video_dev);
-
- err = video_register_device(ch->video_dev,
- VFL_TYPE_GRABBER, (j ? 3 : 2));
- if (err < 0)
- goto probe_out;
}
- v4l2_info(&vpif_obj.v4l2_dev,
- " VPIF display driver initialized\n");
return 0;
-probe_out:
- for (k = 0; k < j; k++) {
- ch = vpif_obj.dev[k];
- video_unregister_device(ch->video_dev);
- video_device_release(ch->video_dev);
- ch->video_dev = NULL;
- }
probe_subdev_out:
kfree(vpif_obj.sd);
vpif_sd_error:
diff --git a/drivers/media/platform/davinci/vpif_display.h b/drivers/media/platform/davinci/vpif_display.h
index 5d87fc86e58..4d0485b99a8 100644
--- a/drivers/media/platform/davinci/vpif_display.h
+++ b/drivers/media/platform/davinci/vpif_display.h
@@ -148,7 +148,8 @@ struct vpif_device {
struct v4l2_device v4l2_dev;
struct channel_obj *dev[VPIF_DISPLAY_NUM_CHANNELS];
struct v4l2_subdev **sd;
-
+ struct v4l2_async_notifier notifier;
+ struct vpif_display_config *config;
};
struct vpif_config_params {
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 8a2f01e344e..31120b4a4a3 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -21,6 +21,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/pm_runtime.h>
+#include <linux/err.h>
#include <media/davinci/vpss.h>
@@ -404,9 +405,8 @@ EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
static int vpss_probe(struct platform_device *pdev)
{
- struct resource *r1, *r2;
+ struct resource *res;
char *platform_name;
- int status;
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "no platform data\n");
@@ -427,38 +427,19 @@ static int vpss_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
- r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r1)
- return -ENOENT;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- r1 = request_mem_region(r1->start, resource_size(r1), r1->name);
- if (!r1)
- return -EBUSY;
-
- oper_cfg.vpss_regs_base0 = ioremap(r1->start, resource_size(r1));
- if (!oper_cfg.vpss_regs_base0) {
- status = -EBUSY;
- goto fail1;
- }
+ oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(oper_cfg.vpss_regs_base0))
+ return PTR_ERR(oper_cfg.vpss_regs_base0);
if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!r2) {
- status = -ENOENT;
- goto fail2;
- }
- r2 = request_mem_region(r2->start, resource_size(r2), r2->name);
- if (!r2) {
- status = -EBUSY;
- goto fail2;
- }
-
- oper_cfg.vpss_regs_base1 = ioremap(r2->start,
- resource_size(r2));
- if (!oper_cfg.vpss_regs_base1) {
- status = -EBUSY;
- goto fail3;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+
+ oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(oper_cfg.vpss_regs_base1))
+ return PTR_ERR(oper_cfg.vpss_regs_base1);
}
if (oper_cfg.platform == DM355) {
@@ -493,30 +474,13 @@ static int vpss_probe(struct platform_device *pdev)
spin_lock_init(&oper_cfg.vpss_lock);
dev_info(&pdev->dev, "%s vpss probe success\n", platform_name);
- return 0;
-fail3:
- release_mem_region(r2->start, resource_size(r2));
-fail2:
- iounmap(oper_cfg.vpss_regs_base0);
-fail1:
- release_mem_region(r1->start, resource_size(r1));
- return status;
+ return 0;
}
static int vpss_remove(struct platform_device *pdev)
{
- struct resource *res;
-
pm_runtime_disable(&pdev->dev);
- iounmap(oper_cfg.vpss_regs_base0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
- if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- iounmap(oper_cfg.vpss_regs_base1);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- release_mem_region(res->start, resource_size(res));
- }
return 0;
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index 559fab2a2d6..9d0cc04d7ab 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1122,10 +1122,14 @@ static int gsc_probe(struct platform_device *pdev)
goto err_clk;
}
- ret = gsc_register_m2m_device(gsc);
+ ret = v4l2_device_register(dev, &gsc->v4l2_dev);
if (ret)
goto err_clk;
+ ret = gsc_register_m2m_device(gsc);
+ if (ret)
+ goto err_v4l2;
+
platform_set_drvdata(pdev, gsc);
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -1147,6 +1151,8 @@ err_pm:
pm_runtime_put(dev);
err_m2m:
gsc_unregister_m2m_device(gsc);
+err_v4l2:
+ v4l2_device_unregister(&gsc->v4l2_dev);
err_clk:
gsc_clk_put(gsc);
return ret;
@@ -1157,6 +1163,7 @@ static int gsc_remove(struct platform_device *pdev)
struct gsc_dev *gsc = platform_get_drvdata(pdev);
gsc_unregister_m2m_device(gsc);
+ v4l2_device_unregister(&gsc->v4l2_dev);
vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
pm_runtime_disable(&pdev->dev);
@@ -1210,12 +1217,12 @@ static int gsc_resume(struct device *dev)
spin_unlock_irqrestore(&gsc->slock, flags);
return 0;
}
- gsc_hw_set_sw_reset(gsc);
- gsc_wait_reset(gsc);
-
spin_unlock_irqrestore(&gsc->slock, flags);
- return gsc_m2m_resume(gsc);
+ if (!pm_runtime_suspended(dev))
+ return gsc_runtime_resume(dev);
+
+ return 0;
}
static int gsc_suspend(struct device *dev)
@@ -1227,7 +1234,10 @@ static int gsc_suspend(struct device *dev)
if (test_and_set_bit(ST_SUSPEND, &gsc->state))
return 0;
- return gsc_m2m_suspend(gsc);
+ if (!pm_runtime_suspended(dev))
+ return gsc_runtime_suspend(dev);
+
+ return 0;
}
static const struct dev_pm_ops gsc_pm_ops = {
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
index cc19bba09bd..76435d3bf62 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.h
+++ b/drivers/media/platform/exynos-gsc/gsc-core.h
@@ -343,6 +343,7 @@ struct gsc_dev {
unsigned long state;
struct vb2_alloc_ctx *alloc_ctx;
struct video_device vdev;
+ struct v4l2_device v4l2_dev;
};
/**
diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
index 40a73f7d20d..e576ff2de3d 100644
--- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
+++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
@@ -751,6 +751,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
gsc->vdev.release = video_device_release_empty;
gsc->vdev.lock = &gsc->lock;
gsc->vdev.vfl_dir = VFL_DIR_M2M;
+ gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
GSC_MODULE_NAME, gsc->id);
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 6489c5160ee..3d66d88ea3a 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1110,6 +1110,8 @@ static int fimc_remove(struct platform_device *pdev)
struct fimc_dev *fimc = platform_get_drvdata(pdev);
pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ clk_disable(fimc->clock[CLK_GATE]);
pm_runtime_set_suspended(&pdev->dev);
fimc_unregister_capture_subdev(fimc);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-i2c.c b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
index 617a798d923..371cad4fcce 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-i2c.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-i2c.c
@@ -12,7 +12,7 @@
#include <linux/clk.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
@@ -67,8 +67,6 @@ static int fimc_is_i2c_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_enable(&i2c_adap->dev);
- of_i2c_register_devices(i2c_adap);
-
return 0;
}
@@ -83,21 +81,46 @@ static int fimc_is_i2c_remove(struct platform_device *pdev)
return 0;
}
-static int fimc_is_i2c_suspend(struct device *dev)
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
+static int fimc_is_i2c_runtime_suspend(struct device *dev)
{
struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
clk_disable_unprepare(isp_i2c->clock);
return 0;
}
-static int fimc_is_i2c_resume(struct device *dev)
+static int fimc_is_i2c_runtime_resume(struct device *dev)
{
struct fimc_is_i2c *isp_i2c = dev_get_drvdata(dev);
+
return clk_prepare_enable(isp_i2c->clock);
}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int fimc_is_i2c_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_suspend(dev);
+}
+
+static int fimc_is_i2c_resume(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return fimc_is_i2c_runtime_resume(dev);
+}
+#endif
-static UNIVERSAL_DEV_PM_OPS(fimc_is_i2c_pm_ops, fimc_is_i2c_suspend,
- fimc_is_i2c_resume, NULL);
+static struct dev_pm_ops fimc_is_i2c_pm_ops = {
+ SET_RUNTIME_PM_OPS(fimc_is_i2c_runtime_suspend,
+ fimc_is_i2c_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(fimc_is_i2c_suspend, fimc_is_i2c_resume)
+};
static const struct of_device_id fimc_is_i2c_of_match[] = {
{ .compatible = FIMC_IS_I2C_COMPATIBLE },
diff --git a/drivers/media/platform/exynos4-is/fimc-is-param.c b/drivers/media/platform/exynos4-is/fimc-is-param.c
index c7e7f694c6e..9bf3ddd9e02 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-param.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-param.c
@@ -56,7 +56,7 @@ static void __fimc_is_hw_update_param_sensor_framerate(struct fimc_is *is)
__hw_param_copy(dst, src);
}
-int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
+static int __fimc_is_hw_update_param(struct fimc_is *is, u32 offset)
{
struct is_param_region *par = &is->is_p_region->parameter;
struct chain_config *cfg = &is->config[is->config_index];
@@ -287,7 +287,7 @@ void __is_set_sensor(struct fimc_is *is, int fps)
fimc_is_set_param_bit(is, PARAM_ISP_OTF_INPUT);
}
-void __is_set_init_isp_aa(struct fimc_is *is)
+static void __maybe_unused __is_set_init_isp_aa(struct fimc_is *is)
{
struct isp_param *isp;
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index 63c68ec7cfa..f758e2694fa 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -96,7 +96,7 @@ int fimc_is_hw_set_param(struct fimc_is *is)
return 0;
}
-int fimc_is_hw_set_tune(struct fimc_is *is)
+static int __maybe_unused fimc_is_hw_set_tune(struct fimc_is *is)
{
fimc_is_hw_wait_intmsr0_intmsd0(is);
@@ -236,7 +236,7 @@ int fimc_is_itf_mode_change(struct fimc_is *is)
fimc_is_hw_change_mode(is);
ret = fimc_is_wait_event(is, IS_ST_CHANGE_MODE, 1,
FIMC_IS_CONFIG_TIMEOUT);
- if (!ret < 0)
+ if (ret < 0)
dev_err(&is->pdev->dev, "%s(): mode change (%d) timeout\n",
__func__, is->config_index);
return ret;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 967f6a93934..9770fa98d6a 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -21,7 +21,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@@ -993,3 +993,4 @@ module_exit(fimc_is_module_exit);
MODULE_ALIAS("platform:" FIMC_IS_DRV_NAME);
MODULE_AUTHOR("Younghwan Joo <yhwan.joo@samsung.com>");
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index cf520a7d7f7..d2e6cba3566 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -672,6 +672,8 @@ int fimc_isp_subdev_create(struct fimc_isp *isp)
mutex_init(&isp->subdev_lock);
v4l2_subdev_init(sd, &fimc_is_subdev_ops);
+
+ sd->owner = THIS_MODULE;
sd->grp_id = GRP_ID_FIMC_IS;
sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
snprintf(sd->name, sizeof(sd->name), "FIMC-IS-ISP");
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 08fbfedea90..e5798f70d14 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -90,7 +90,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.name = "RAW10 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG10,
.colorspace = V4L2_COLORSPACE_SRGB,
- .depth = { 10 },
+ .depth = { 16 },
.color = FIMC_FMT_RAW10,
.memplanes = 1,
.mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
@@ -99,7 +99,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
.name = "RAW12 (GRBG)",
.fourcc = V4L2_PIX_FMT_SGRBG12,
.colorspace = V4L2_COLORSPACE_SRGB,
- .depth = { 12 },
+ .depth = { 16 },
.color = FIMC_FMT_RAW12,
.memplanes = 1,
.mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
@@ -1504,16 +1504,17 @@ static int fimc_lite_probe(struct platform_device *pdev)
struct resource *res;
int ret;
+ if (!dev->of_node)
+ return -ENODEV;
+
fimc = devm_kzalloc(dev, sizeof(*fimc), GFP_KERNEL);
if (!fimc)
return -ENOMEM;
- if (dev->of_node) {
- of_id = of_match_node(flite_of_match, dev->of_node);
- if (of_id)
- drv_data = (struct flite_drvdata *)of_id->data;
- fimc->index = of_alias_get_id(dev->of_node, "fimc-lite");
- }
+ of_id = of_match_node(flite_of_match, dev->of_node);
+ if (of_id)
+ drv_data = (struct flite_drvdata *)of_id->data;
+ fimc->index = of_alias_get_id(dev->of_node, "fimc-lite");
if (!drv_data || fimc->index >= drv_data->num_instances ||
fimc->index < 0) {
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 19f556c5957..a8351127831 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -20,7 +20,6 @@
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
-#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/types.h>
@@ -1150,7 +1149,6 @@ static void fimc_md_put_clocks(struct fimc_md *fmd)
while (--i >= 0) {
if (IS_ERR(fmd->camclk[i].clock))
continue;
- clk_unprepare(fmd->camclk[i].clock);
clk_put(fmd->camclk[i].clock);
fmd->camclk[i].clock = ERR_PTR(-EINVAL);
}
@@ -1169,7 +1167,7 @@ static int fimc_md_get_clocks(struct fimc_md *fmd)
struct device *dev = NULL;
char clk_name[32];
struct clk *clock;
- int ret, i;
+ int i, ret = 0;
for (i = 0; i < FIMC_MAX_CAMCLKS; i++)
fmd->camclk[i].clock = ERR_PTR(-EINVAL);
@@ -1187,12 +1185,6 @@ static int fimc_md_get_clocks(struct fimc_md *fmd)
ret = PTR_ERR(clock);
break;
}
- ret = clk_prepare(clock);
- if (ret < 0) {
- clk_put(clock);
- fmd->camclk[i].clock = ERR_PTR(-EINVAL);
- break;
- }
fmd->camclk[i].clock = clock;
}
if (ret)
@@ -1249,7 +1241,7 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
ret = pm_runtime_get_sync(fmd->pmf);
if (ret < 0)
return ret;
- ret = clk_enable(camclk->clock);
+ ret = clk_prepare_enable(camclk->clock);
dbg("Enabled camclk %d: f: %lu", si->clk_id,
clk_get_rate(camclk->clock));
}
@@ -1260,7 +1252,7 @@ static int __fimc_md_set_camclk(struct fimc_md *fmd,
return 0;
if (--camclk->use_count == 0) {
- clk_disable(camclk->clock);
+ clk_disable_unprepare(camclk->clock);
pm_runtime_put(fmd->pmf);
dbg("Disabled camclk %d", si->clk_id);
}
@@ -1530,9 +1522,9 @@ static int fimc_md_probe(struct platform_device *pdev)
err_unlock:
mutex_unlock(&fmd->media_dev.graph_mutex);
err_clk:
- media_device_unregister(&fmd->media_dev);
fimc_md_put_clocks(fmd);
fimc_md_unregister_entities(fmd);
+ media_device_unregister(&fmd->media_dev);
err_md:
v4l2_device_unregister(&fmd->v4l2_dev);
return ret;
@@ -1544,6 +1536,8 @@ static int fimc_md_remove(struct platform_device *pdev)
if (!fmd)
return 0;
+
+ v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
fimc_md_unregister_entities(fmd);
fimc_md_pipelines_free(fmd);
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 1f079ff33d4..56284536124 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -399,7 +399,7 @@ static void cafe_ctlr_init(struct mcam_camera *mcam)
}
-static void cafe_ctlr_power_up(struct mcam_camera *mcam)
+static int cafe_ctlr_power_up(struct mcam_camera *mcam)
{
/*
* Part one of the sensor dance: turn the global
@@ -414,6 +414,8 @@ static void cafe_ctlr_power_up(struct mcam_camera *mcam)
*/
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN); /* pwr up, reset */
mcam_reg_write(mcam, REG_GPR, GPR_C1EN|GPR_C0EN|GPR_C0);
+
+ return 0;
}
static void cafe_ctlr_power_down(struct mcam_camera *mcam)
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 0821ed08c12..5184887b155 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -19,6 +19,7 @@
#include <linux/delay.h>
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <linux/clk.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
@@ -93,6 +94,9 @@ MODULE_PARM_DESC(buffer_mode,
#define CF_CONFIG_NEEDED 4 /* Must configure hardware */
#define CF_SINGLE_BUFFER 5 /* Running with a single buffer */
#define CF_SG_RESTART 6 /* SG restart needed */
+#define CF_FRAME_SOF0 7 /* Frame 0 started */
+#define CF_FRAME_SOF1 8
+#define CF_FRAME_SOF2 9
#define sensor_call(cam, o, f, args...) \
v4l2_subdev_call(cam->sensor, o, f, ##args)
@@ -101,6 +105,7 @@ static struct mcam_format_struct {
__u8 *desc;
__u32 pixelformat;
int bpp; /* Bytes per pixel */
+ bool planar;
enum v4l2_mbus_pixelcode mbus_code;
} mcam_formats[] = {
{
@@ -108,24 +113,56 @@ static struct mcam_format_struct {
.pixelformat = V4L2_PIX_FMT_YUYV,
.mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
.bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "UYVY 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_UYVY,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = false,
+ },
+ {
+ .desc = "YUV 4:2:2 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YUV422P,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
+ },
+ {
+ .desc = "YUV 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YUV420,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
+ },
+ {
+ .desc = "YVU 4:2:0 PLANAR",
+ .pixelformat = V4L2_PIX_FMT_YVU420,
+ .mbus_code = V4L2_MBUS_FMT_YUYV8_2X8,
+ .bpp = 2,
+ .planar = true,
},
{
.desc = "RGB 444",
.pixelformat = V4L2_PIX_FMT_RGB444,
.mbus_code = V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE,
.bpp = 2,
+ .planar = false,
},
{
.desc = "RGB 565",
.pixelformat = V4L2_PIX_FMT_RGB565,
.mbus_code = V4L2_MBUS_FMT_RGB565_2X8_LE,
.bpp = 2,
+ .planar = false,
},
{
.desc = "Raw RGB Bayer",
.pixelformat = V4L2_PIX_FMT_SBGGR8,
.mbus_code = V4L2_MBUS_FMT_SBGGR8_1X8,
- .bpp = 1
+ .bpp = 1,
+ .planar = false,
},
};
#define N_MCAM_FMTS ARRAY_SIZE(mcam_formats)
@@ -168,6 +205,12 @@ struct mcam_dma_desc {
u32 segment_len;
};
+struct yuv_pointer_t {
+ dma_addr_t y;
+ dma_addr_t u;
+ dma_addr_t v;
+};
+
/*
* Our buffer type for working with videobuf2. Note that the vb2
* developers have decreed that struct vb2_buffer must be at the
@@ -179,6 +222,7 @@ struct mcam_vb_buffer {
struct mcam_dma_desc *dma_desc; /* Descriptor virtual address */
dma_addr_t dma_desc_pa; /* Descriptor physical address */
int dma_desc_nent; /* Number of mapped descriptors */
+ struct yuv_pointer_t yuv_p;
};
static inline struct mcam_vb_buffer *vb_to_mvb(struct vb2_buffer *vb)
@@ -219,8 +263,10 @@ static void mcam_reset_buffers(struct mcam_camera *cam)
int i;
cam->next_buf = -1;
- for (i = 0; i < cam->nbufs; i++)
+ for (i = 0; i < cam->nbufs; i++) {
clear_bit(i, &cam->flags);
+ clear_bit(CF_FRAME_SOF0 + i, &cam->flags);
+ }
}
static inline int mcam_needs_config(struct mcam_camera *cam)
@@ -253,6 +299,45 @@ static void mcam_ctlr_stop(struct mcam_camera *cam)
mcam_reg_clear_bit(cam, REG_CTRL0, C0_ENABLE);
}
+static void mcam_enable_mipi(struct mcam_camera *mcam)
+{
+ /* Using MIPI mode and enable MIPI */
+ cam_dbg(mcam, "camera: DPHY3=0x%x, DPHY5=0x%x, DPHY6=0x%x\n",
+ mcam->dphy[0], mcam->dphy[1], mcam->dphy[2]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, mcam->dphy[0]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, mcam->dphy[1]);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, mcam->dphy[2]);
+
+ if (!mcam->mipi_enabled) {
+ if (mcam->lane > 4 || mcam->lane <= 0) {
+ cam_warn(mcam, "lane number error\n");
+ mcam->lane = 1; /* set the default value */
+ }
+ /*
+ * 0x41 actives 1 lane
+ * 0x43 actives 2 lanes
+ * 0x45 actives 3 lanes (never happen)
+ * 0x47 actives 4 lanes
+ */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0,
+ CSI2_C0_MIPI_EN | CSI2_C0_ACT_LANE(mcam->lane));
+ mcam_reg_write(mcam, REG_CLKCTRL,
+ (mcam->mclk_src << 29) | mcam->mclk_div);
+
+ mcam->mipi_enabled = true;
+ }
+}
+
+static void mcam_disable_mipi(struct mcam_camera *mcam)
+{
+ /* Using Parallel mode or disable MIPI */
+ mcam_reg_write(mcam, REG_CSI2_CTRL0, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY3, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY5, 0x0);
+ mcam_reg_write(mcam, REG_CSI2_DPHY6, 0x0);
+ mcam->mipi_enabled = false;
+}
+
/* ------------------------------------------------------------------- */
#ifdef MCAM_MODE_VMALLOC
@@ -425,6 +510,15 @@ static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
/*
* DMA-contiguous code.
*/
+
+static bool mcam_fmt_is_planar(__u32 pfmt)
+{
+ struct mcam_format_struct *f;
+
+ f = mcam_find_format(pfmt);
+ return f->planar;
+}
+
/*
* Set up a contiguous buffer for the given frame. Here also is where
* the underrun strategy is set: if there is no buffer available, reuse
@@ -436,27 +530,58 @@ static inline int mcam_check_dma_buffers(struct mcam_camera *cam)
static void mcam_set_contig_buffer(struct mcam_camera *cam, int frame)
{
struct mcam_vb_buffer *buf;
+ struct v4l2_pix_format *fmt = &cam->pix_format;
+ dma_addr_t dma_handle;
+ u32 pixel_count = fmt->width * fmt->height;
+ struct vb2_buffer *vb;
+
/*
* If there are no available buffers, go into single mode
*/
if (list_empty(&cam->buffers)) {
buf = cam->vb_bufs[frame ^ 0x1];
- cam->vb_bufs[frame] = buf;
- mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
set_bit(CF_SINGLE_BUFFER, &cam->flags);
cam->frame_state.singles++;
- return;
+ } else {
+ /*
+ * OK, we have a buffer we can use.
+ */
+ buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer,
+ queue);
+ list_del_init(&buf->queue);
+ clear_bit(CF_SINGLE_BUFFER, &cam->flags);
}
- /*
- * OK, we have a buffer we can use.
- */
- buf = list_first_entry(&cam->buffers, struct mcam_vb_buffer, queue);
- list_del_init(&buf->queue);
- mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR,
- vb2_dma_contig_plane_dma_addr(&buf->vb_buf, 0));
+
cam->vb_bufs[frame] = buf;
- clear_bit(CF_SINGLE_BUFFER, &cam->flags);
+ vb = &buf->vb_buf;
+
+ dma_handle = vb2_dma_contig_plane_dma_addr(vb, 0);
+ buf->yuv_p.y = dma_handle;
+
+ switch (cam->pix_format.pixelformat) {
+ case V4L2_PIX_FMT_YUV422P:
+ buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.v = buf->yuv_p.u + pixel_count / 2;
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ buf->yuv_p.u = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.v = buf->yuv_p.u + pixel_count / 4;
+ break;
+ case V4L2_PIX_FMT_YVU420:
+ buf->yuv_p.v = buf->yuv_p.y + pixel_count;
+ buf->yuv_p.u = buf->yuv_p.v + pixel_count / 4;
+ break;
+ default:
+ break;
+ }
+
+ mcam_reg_write(cam, frame == 0 ? REG_Y0BAR : REG_Y1BAR, buf->yuv_p.y);
+ if (mcam_fmt_is_planar(fmt->pixelformat)) {
+ mcam_reg_write(cam, frame == 0 ?
+ REG_U0BAR : REG_U1BAR, buf->yuv_p.u);
+ mcam_reg_write(cam, frame == 0 ?
+ REG_V0BAR : REG_V1BAR, buf->yuv_p.v);
+ }
}
/*
@@ -614,48 +739,90 @@ static inline void mcam_sg_restart(struct mcam_camera *cam)
*/
static void mcam_ctlr_image(struct mcam_camera *cam)
{
- int imgsz;
struct v4l2_pix_format *fmt = &cam->pix_format;
+ u32 widthy = 0, widthuv = 0, imgsz_h, imgsz_w;
+
+ cam_dbg(cam, "camera: bytesperline = %d; height = %d\n",
+ fmt->bytesperline, fmt->sizeimage / fmt->bytesperline);
+ imgsz_h = (fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK;
+ imgsz_w = (fmt->width * 2) & IMGSZ_H_MASK;
+
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_UYVY:
+ widthy = fmt->width * 2;
+ widthuv = 0;
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ imgsz_h = (fmt->sizeimage / fmt->bytesperline) << IMGSZ_V_SHIFT;
+ widthy = fmt->bytesperline;
+ widthuv = 0;
+ break;
+ case V4L2_PIX_FMT_YUV422P:
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ widthy = fmt->width;
+ widthuv = fmt->width / 2;
+ break;
+ default:
+ widthy = fmt->bytesperline;
+ widthuv = 0;
+ }
+
+ mcam_reg_write_mask(cam, REG_IMGPITCH, widthuv << 16 | widthy,
+ IMGP_YP_MASK | IMGP_UVP_MASK);
+ mcam_reg_write(cam, REG_IMGSIZE, imgsz_h | imgsz_w);
+ mcam_reg_write(cam, REG_IMGOFFSET, 0x0);
- imgsz = ((fmt->height << IMGSZ_V_SHIFT) & IMGSZ_V_MASK) |
- (fmt->bytesperline & IMGSZ_H_MASK);
- mcam_reg_write(cam, REG_IMGSIZE, imgsz);
- mcam_reg_write(cam, REG_IMGOFFSET, 0);
- /* YPITCH just drops the last two bits */
- mcam_reg_write_mask(cam, REG_IMGPITCH, fmt->bytesperline,
- IMGP_YP_MASK);
/*
* Tell the controller about the image format we are using.
*/
- switch (cam->pix_format.pixelformat) {
+ switch (fmt->pixelformat) {
+ case V4L2_PIX_FMT_YUV422P:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PLANAR | C0_YUVE_YVYU, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+ break;
case V4L2_PIX_FMT_YUYV:
- mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV|C0_YUV_PACKED|C0_YUVE_YUYV,
- C0_DF_MASK);
- break;
-
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ break;
+ case V4L2_PIX_FMT_JPEG:
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ break;
case V4L2_PIX_FMT_RGB444:
- mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_444|C0_RGB4_XRGB,
- C0_DF_MASK);
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_444 | C0_RGB4_XRGB, C0_DF_MASK);
/* Alpha value? */
- break;
-
+ break;
case V4L2_PIX_FMT_RGB565:
- mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_RGB|C0_RGBF_565|C0_RGB5_BGGR,
- C0_DF_MASK);
- break;
-
+ mcam_reg_write_mask(cam, REG_CTRL0,
+ C0_DF_RGB | C0_RGBF_565 | C0_RGB5_BGGR, C0_DF_MASK);
+ break;
default:
- cam_err(cam, "Unknown format %x\n", cam->pix_format.pixelformat);
- break;
+ cam_err(cam, "camera: unknown format: %#x\n", fmt->pixelformat);
+ break;
}
+
/*
* Make sure it knows we want to use hsync/vsync.
*/
- mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC,
- C0_SIFM_MASK);
+ mcam_reg_write_mask(cam, REG_CTRL0, C0_SIF_HVSYNC, C0_SIFM_MASK);
+ /*
+ * This field controls the generation of EOF(DVP only)
+ */
+ if (cam->bus_type != V4L2_MBUS_CSI2)
+ mcam_reg_set_bit(cam, REG_CTRL0,
+ C0_EOF_VSYNC | C0_VEDGE_CTRL);
}
@@ -753,15 +920,21 @@ static void mcam_ctlr_stop_dma(struct mcam_camera *cam)
/*
* Power up and down.
*/
-static void mcam_ctlr_power_up(struct mcam_camera *cam)
+static int mcam_ctlr_power_up(struct mcam_camera *cam)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&cam->dev_lock, flags);
- cam->plat_power_up(cam);
+ ret = cam->plat_power_up(cam);
+ if (ret) {
+ spin_unlock_irqrestore(&cam->dev_lock, flags);
+ return ret;
+ }
mcam_reg_clear_bit(cam, REG_CTRL1, C1_PWRDWN);
spin_unlock_irqrestore(&cam->dev_lock, flags);
msleep(5); /* Just to be sure */
+ return 0;
}
static void mcam_ctlr_power_down(struct mcam_camera *cam)
@@ -869,6 +1042,17 @@ static int mcam_read_setup(struct mcam_camera *cam)
spin_lock_irqsave(&cam->dev_lock, flags);
clear_bit(CF_DMA_ACTIVE, &cam->flags);
mcam_reset_buffers(cam);
+ /*
+ * Update CSI2_DPHY value
+ */
+ if (cam->calc_dphy)
+ cam->calc_dphy(cam);
+ cam_dbg(cam, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ cam->dphy[0], cam->dphy[1], cam->dphy[2]);
+ if (cam->bus_type == V4L2_MBUS_CSI2)
+ mcam_enable_mipi(cam);
+ else
+ mcam_disable_mipi(cam);
mcam_ctlr_irq_enable(cam);
cam->state = S_STREAMING;
if (!test_bit(CF_SG_RESTART, &cam->flags))
@@ -943,6 +1127,7 @@ static void mcam_vb_wait_finish(struct vb2_queue *vq)
static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct mcam_camera *cam = vb2_get_drv_priv(vq);
+ unsigned int frame;
if (cam->state != S_IDLE) {
INIT_LIST_HEAD(&cam->buffers);
@@ -960,6 +1145,14 @@ static int mcam_vb_start_streaming(struct vb2_queue *vq, unsigned int count)
cam->state = S_BUFWAIT;
return 0;
}
+
+ /*
+ * Ensure clear the left over frame flags
+ * before every really start streaming
+ */
+ for (frame = 0; frame < cam->nbufs; frame++)
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+
return mcam_read_setup(cam);
}
@@ -977,6 +1170,12 @@ static int mcam_vb_stop_streaming(struct vb2_queue *vq)
return -EINVAL;
mcam_ctlr_stop_dma(cam);
/*
+ * Reset the CCIC PHY after stopping streaming,
+ * otherwise, the CCIC may be unstable.
+ */
+ if (cam->ctlr_reset)
+ cam->ctlr_reset(cam);
+ /*
* VB2 reclaims the buffers, so we need to forget
* about them.
*/
@@ -1087,6 +1286,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
#ifdef MCAM_MODE_DMA_CONTIG
vq->ops = &mcam_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
cam->vb_alloc_ctx = vb2_dma_contig_init_ctx(cam->dev);
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_contig;
@@ -1097,6 +1297,7 @@ static int mcam_setup_vb2(struct mcam_camera *cam)
#ifdef MCAM_MODE_DMA_SG
vq->ops = &mcam_vb2_sg_ops;
vq->mem_ops = &vb2_dma_sg_memops;
+ vq->buf_struct_size = sizeof(struct mcam_vb_buffer);
vq->io_modes = VB2_MMAP | VB2_USERPTR;
cam->dma_setup = mcam_ctlr_dma_sg;
cam->frame_complete = mcam_dma_sg_done;
@@ -1247,7 +1448,15 @@ static int mcam_vidioc_try_fmt_vid_cap(struct file *filp, void *priv,
ret = sensor_call(cam, video, try_mbus_fmt, &mbus_fmt);
mutex_unlock(&cam->s_mutex);
v4l2_fill_pix_format(pix, &mbus_fmt);
- pix->bytesperline = pix->width * f->bpp;
+ switch (f->pixelformat) {
+ case V4L2_PIX_FMT_YUV420:
+ case V4L2_PIX_FMT_YVU420:
+ pix->bytesperline = pix->width * 3 / 2;
+ break;
+ default:
+ pix->bytesperline = pix->width * f->bpp;
+ break;
+ }
pix->sizeimage = pix->height * pix->bytesperline;
return ret;
}
@@ -1475,7 +1684,9 @@ static int mcam_v4l_open(struct file *filp)
ret = mcam_setup_vb2(cam);
if (ret)
goto out;
- mcam_ctlr_power_up(cam);
+ ret = mcam_ctlr_power_up(cam);
+ if (ret)
+ goto out;
__mcam_cam_reset(cam);
mcam_set_config_needed(cam, 1);
}
@@ -1498,10 +1709,12 @@ static int mcam_v4l_release(struct file *filp)
if (cam->users == 0) {
mcam_ctlr_stop_dma(cam);
mcam_cleanup_vb2(cam);
+ mcam_disable_mipi(cam);
mcam_ctlr_power_down(cam);
if (cam->buffer_mode == B_vmalloc && alloc_bufs_at_read)
mcam_free_dma_bufs(cam);
}
+
mutex_unlock(&cam->s_mutex);
return 0;
}
@@ -1617,9 +1830,11 @@ int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
* each time.
*/
for (frame = 0; frame < cam->nbufs; frame++)
- if (irqs & (IRQ_EOF0 << frame)) {
+ if (irqs & (IRQ_EOF0 << frame) &&
+ test_bit(CF_FRAME_SOF0 + frame, &cam->flags)) {
mcam_frame_complete(cam, frame);
handled = 1;
+ clear_bit(CF_FRAME_SOF0 + frame, &cam->flags);
if (cam->buffer_mode == B_DMA_sg)
break;
}
@@ -1628,9 +1843,15 @@ int mccic_irq(struct mcam_camera *cam, unsigned int irqs)
* code assumes that we won't get multiple frame interrupts
* at once; may want to rethink that.
*/
- if (irqs & (IRQ_SOF0 | IRQ_SOF1 | IRQ_SOF2)) {
+ for (frame = 0; frame < cam->nbufs; frame++) {
+ if (irqs & (IRQ_SOF0 << frame)) {
+ set_bit(CF_FRAME_SOF0 + frame, &cam->flags);
+ handled = IRQ_HANDLED;
+ }
+ }
+
+ if (handled == IRQ_HANDLED) {
set_bit(CF_DMA_ACTIVE, &cam->flags);
- handled = 1;
if (cam->buffer_mode == B_DMA_sg)
mcam_ctlr_stop(cam);
}
@@ -1787,7 +2008,11 @@ int mccic_resume(struct mcam_camera *cam)
mutex_lock(&cam->s_mutex);
if (cam->users > 0) {
- mcam_ctlr_power_up(cam);
+ ret = mcam_ctlr_power_up(cam);
+ if (ret) {
+ mutex_unlock(&cam->s_mutex);
+ return ret;
+ }
__mcam_cam_reset(cam);
} else {
mcam_ctlr_power_down(cam);
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index 520c8ded944..e0e628cb98f 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -88,6 +88,8 @@ struct mcam_frame_state {
unsigned int delivered;
};
+#define NR_MCAM_CLK 3
+
/*
* A description of one of our devices.
* Locking: controlled by s_mutex. Certain fields, however, require
@@ -108,11 +110,33 @@ struct mcam_camera {
short int clock_speed; /* Sensor clock speed, default 30 */
short int use_smbus; /* SMBUS or straight I2c? */
enum mcam_buffer_mode buffer_mode;
+
+ int mclk_min; /* The minimal value of mclk */
+ int mclk_src; /* which clock source the mclk derives from */
+ int mclk_div; /* Clock Divider Value for MCLK */
+
+ int ccic_id;
+ enum v4l2_mbus_type bus_type;
+ /* MIPI support */
+ /* The dphy config value, allocated in board file
+ * dphy[0]: DPHY3
+ * dphy[1]: DPHY5
+ * dphy[2]: DPHY6
+ */
+ int *dphy;
+ bool mipi_enabled; /* flag whether mipi is enabled already */
+ int lane; /* lane number */
+
+ /* clock tree support */
+ struct clk *clk[NR_MCAM_CLK];
+
/*
* Callbacks from the core to the platform code.
*/
- void (*plat_power_up) (struct mcam_camera *cam);
+ int (*plat_power_up) (struct mcam_camera *cam);
void (*plat_power_down) (struct mcam_camera *cam);
+ void (*calc_dphy) (struct mcam_camera *cam);
+ void (*ctlr_reset) (struct mcam_camera *cam);
/*
* Everything below here is private to the mcam core and
@@ -225,6 +249,23 @@ int mccic_resume(struct mcam_camera *cam);
#define REG_Y0BAR 0x00
#define REG_Y1BAR 0x04
#define REG_Y2BAR 0x08
+#define REG_U0BAR 0x0c
+#define REG_U1BAR 0x10
+#define REG_U2BAR 0x14
+#define REG_V0BAR 0x18
+#define REG_V1BAR 0x1C
+#define REG_V2BAR 0x20
+
+/*
+ * register definitions for MIPI support
+ */
+#define REG_CSI2_CTRL0 0x100
+#define CSI2_C0_MIPI_EN (0x1 << 0)
+#define CSI2_C0_ACT_LANE(n) ((n-1) << 1)
+#define REG_CSI2_DPHY3 0x12c
+#define REG_CSI2_DPHY5 0x134
+#define REG_CSI2_DPHY6 0x138
+
/* ... */
#define REG_IMGPITCH 0x24 /* Image pitch register */
@@ -293,13 +334,16 @@ int mccic_resume(struct mcam_camera *cam);
#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
/* Bayer bits 18,19 if needed */
+#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
+#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
#define C0_HPOL_LOW 0x01000000 /* HSYNC polarity active low */
#define C0_VPOL_LOW 0x02000000 /* VSYNC polarity active low */
#define C0_VCLK_LOW 0x04000000 /* VCLK on falling edge */
#define C0_DOWNSCALE 0x08000000 /* Enable downscaler */
-#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
+/* SIFMODE */
#define C0_SIF_HVSYNC 0x00000000 /* Use H/VSYNC */
-#define CO_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+#define C0_SOF_NOSYNC 0x40000000 /* Use inband active signaling */
+#define C0_SIFM_MASK 0xc0000000 /* SIF mode bits */
/* Bits below C1_444ALPHA are not present in Cafe */
#define REG_CTRL1 0x40 /* Control 1 */
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index a634888271c..b5a19af5c58 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/pm.h>
+#include <linux/clk.h>
#include "mcam-core.h"
@@ -33,11 +34,14 @@ MODULE_ALIAS("platform:mmp-camera");
MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_LICENSE("GPL");
+static char *mcam_clks[] = {"CCICAXICLK", "CCICFUNCLK", "CCICPHYCLK"};
+
struct mmp_camera {
void *power_regs;
struct platform_device *pdev;
struct mcam_camera mcam;
struct list_head devlist;
+ struct clk *mipi_clk;
int irq;
};
@@ -101,6 +105,27 @@ static struct mmp_camera *mmpcam_find_device(struct platform_device *pdev)
#define CPU_SUBSYS_PMU_BASE 0xd4282800
#define REG_CCIC_DCGCR 0x28 /* CCIC dyn clock gate ctrl reg */
#define REG_CCIC_CRCR 0x50 /* CCIC clk reset ctrl reg */
+#define REG_CCIC2_CRCR 0xf4 /* CCIC2 clk reset ctrl reg */
+
+static void mcam_clk_enable(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_prepare_enable(mcam->clk[i]);
+ }
+}
+
+static void mcam_clk_disable(struct mcam_camera *mcam)
+{
+ int i;
+
+ for (i = NR_MCAM_CLK - 1; i >= 0; i--) {
+ if (!IS_ERR(mcam->clk[i]))
+ clk_disable_unprepare(mcam->clk[i]);
+ }
+}
/*
* Power control.
@@ -112,10 +137,17 @@ static void mmpcam_power_up_ctlr(struct mmp_camera *cam)
mdelay(1);
}
-static void mmpcam_power_up(struct mcam_camera *mcam)
+static int mmpcam_power_up(struct mcam_camera *mcam)
{
struct mmp_camera *cam = mcam_to_cam(mcam);
struct mmp_camera_platform_data *pdata;
+
+ if (mcam->bus_type == V4L2_MBUS_CSI2) {
+ cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+ if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+ return PTR_ERR(cam->mipi_clk);
+ }
+
/*
* Turn on power and clocks to the controller.
*/
@@ -132,6 +164,10 @@ static void mmpcam_power_up(struct mcam_camera *mcam)
mdelay(5);
gpio_set_value(pdata->sensor_reset_gpio, 1); /* reset is active low */
mdelay(5);
+
+ mcam_clk_enable(mcam);
+
+ return 0;
}
static void mmpcam_power_down(struct mcam_camera *mcam)
@@ -149,8 +185,133 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
pdata = cam->pdev->dev.platform_data;
gpio_set_value(pdata->sensor_power_gpio, 0);
gpio_set_value(pdata->sensor_reset_gpio, 0);
+
+ if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
+ if (cam->mipi_clk)
+ devm_clk_put(mcam->dev, cam->mipi_clk);
+ cam->mipi_clk = NULL;
+ }
+
+ mcam_clk_disable(mcam);
}
+void mcam_ctlr_reset(struct mcam_camera *mcam)
+{
+ unsigned long val;
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+
+ if (mcam->ccic_id) {
+ /*
+ * Using CCIC2
+ */
+ val = ioread32(cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC2_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC2_CRCR);
+ } else {
+ /*
+ * Using CCIC1
+ */
+ val = ioread32(cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val & ~0x2, cam->power_regs + REG_CCIC_CRCR);
+ iowrite32(val | 0x2, cam->power_regs + REG_CCIC_CRCR);
+ }
+}
+
+/*
+ * calc the dphy register values
+ * There are three dphy registers being used.
+ * dphy[0] - CSI2_DPHY3
+ * dphy[1] - CSI2_DPHY5
+ * dphy[2] - CSI2_DPHY6
+ * CSI2_DPHY3 and CSI2_DPHY6 can be set with a default value
+ * or be calculated dynamically
+ */
+void mmpcam_calc_dphy(struct mcam_camera *mcam)
+{
+ struct mmp_camera *cam = mcam_to_cam(mcam);
+ struct mmp_camera_platform_data *pdata = cam->pdev->dev.platform_data;
+ struct device *dev = &cam->pdev->dev;
+ unsigned long tx_clk_esc;
+
+ /*
+ * If CSI2_DPHY3 is calculated dynamically,
+ * pdata->lane_clk should be already set
+ * either in the board driver statically
+ * or in the sensor driver dynamically.
+ */
+ /*
+ * dphy[0] - CSI2_DPHY3:
+ * bit 0 ~ bit 7: HS Term Enable.
+ * defines the time that the DPHY
+ * wait before enabling the data
+ * lane termination after detecting
+ * that the sensor has driven the data
+ * lanes to the LP00 bridge state.
+ * The value is calculated by:
+ * (Max T(D_TERM_EN)/Period(DDR)) - 1
+ * bit 8 ~ bit 15: HS_SETTLE
+ * Time interval during which the HS
+ * receiver shall ignore any Data Lane
+ * HS transistions.
+ * The vaule has been calibrated on
+ * different boards. It seems to work well.
+ *
+ * More detail please refer
+ * MIPI Alliance Spectification for D-PHY
+ * document for explanation of HS-SETTLE
+ * and D-TERM-EN.
+ */
+ switch (pdata->dphy3_algo) {
+ case DPHY3_ALGO_PXA910:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA910
+ */
+ pdata->dphy[0] =
+ (((1 + (pdata->lane_clk * 80) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ case DPHY3_ALGO_PXA2128:
+ /*
+ * Calculate CSI2_DPHY3 algo for PXA2128
+ */
+ pdata->dphy[0] =
+ (((2 + (pdata->lane_clk * 110) / 1000) & 0xff) << 8)
+ | (1 + pdata->lane_clk * 35 / 1000);
+ break;
+ default:
+ /*
+ * Use default CSI2_DPHY3 value for PXA688/PXA988
+ */
+ dev_dbg(dev, "camera: use the default CSI2_DPHY3 value\n");
+ }
+
+ /*
+ * mipi_clk will never be changed, it is a fixed value on MMP
+ */
+ if (IS_ERR(cam->mipi_clk))
+ return;
+
+ /* get the escape clk, this is hard coded */
+ tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
+
+ /*
+ * dphy[2] - CSI2_DPHY6:
+ * bit 0 ~ bit 7: CK Term Enable
+ * Time for the Clock Lane receiver to enable the HS line
+ * termination. The value is calculated similarly with
+ * HS Term Enable
+ * bit 8 ~ bit 15: CK Settle
+ * Time interval during which the HS receiver shall ignore
+ * any Clock Lane HS transitions.
+ * The value is calibrated on the boards.
+ */
+ pdata->dphy[2] =
+ ((((534 * tx_clk_esc) / 2000 - 1) & 0xff) << 8)
+ | (((38 * tx_clk_esc) / 1000 - 1) & 0xff);
+
+ dev_dbg(dev, "camera: DPHY sets: dphy3=0x%x, dphy5=0x%x, dphy6=0x%x\n",
+ pdata->dphy[0], pdata->dphy[1], pdata->dphy[2]);
+}
static irqreturn_t mmpcam_irq(int irq, void *data)
{
@@ -164,6 +325,35 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
return IRQ_RETVAL(handled);
}
+static void mcam_deinit_clk(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (!IS_ERR(mcam->clk[i])) {
+ if (mcam->clk[i])
+ devm_clk_put(mcam->dev, mcam->clk[i]);
+ }
+ mcam->clk[i] = NULL;
+ }
+}
+
+static void mcam_init_clk(struct mcam_camera *mcam)
+{
+ unsigned int i;
+
+ for (i = 0; i < NR_MCAM_CLK; i++) {
+ if (mcam_clks[i] != NULL) {
+ /* Some clks are not necessary on some boards
+ * We still try to run even it fails getting clk
+ */
+ mcam->clk[i] = devm_clk_get(mcam->dev, mcam_clks[i]);
+ if (IS_ERR(mcam->clk[i]))
+ dev_warn(mcam->dev, "Could not get clk: %s\n",
+ mcam_clks[i]);
+ }
+ }
+}
static int mmpcam_probe(struct platform_device *pdev)
{
@@ -173,17 +363,32 @@ static int mmpcam_probe(struct platform_device *pdev)
struct mmp_camera_platform_data *pdata;
int ret;
- cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -ENODEV;
+
+ cam = devm_kzalloc(&pdev->dev, sizeof(*cam), GFP_KERNEL);
if (cam == NULL)
return -ENOMEM;
cam->pdev = pdev;
+ cam->mipi_clk = NULL;
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
mcam->plat_power_up = mmpcam_power_up;
mcam->plat_power_down = mmpcam_power_down;
+ mcam->ctlr_reset = mcam_ctlr_reset;
+ mcam->calc_dphy = mmpcam_calc_dphy;
mcam->dev = &pdev->dev;
mcam->use_smbus = 0;
+ mcam->ccic_id = pdev->id;
+ mcam->mclk_min = pdata->mclk_min;
+ mcam->mclk_src = pdata->mclk_src;
+ mcam->mclk_div = pdata->mclk_div;
+ mcam->bus_type = pdata->bus_type;
+ mcam->dphy = pdata->dphy;
+ mcam->mipi_enabled = false;
+ mcam->lane = pdata->lane;
mcam->chip_id = MCAM_ARMADA610;
mcam->buffer_mode = B_DMA_sg;
spin_lock_init(&mcam->dev_lock);
@@ -191,69 +396,58 @@ static int mmpcam_probe(struct platform_device *pdev)
* Get our I/O memory.
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- dev_err(&pdev->dev, "no iomem resource!\n");
- ret = -ENODEV;
- goto out_free;
- }
- mcam->regs = ioremap(res->start, resource_size(res));
- if (mcam->regs == NULL) {
- dev_err(&pdev->dev, "MMIO ioremap fail\n");
- ret = -ENODEV;
- goto out_free;
- }
+ mcam->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcam->regs))
+ return PTR_ERR(mcam->regs);
mcam->regs_size = resource_size(res);
/*
* Power/clock memory is elsewhere; get it too. Perhaps this
* should really be managed outside of this driver?
*/
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (res == NULL) {
- dev_err(&pdev->dev, "no power resource!\n");
- ret = -ENODEV;
- goto out_unmap1;
- }
- cam->power_regs = ioremap(res->start, resource_size(res));
- if (cam->power_regs == NULL) {
- dev_err(&pdev->dev, "power MMIO ioremap fail\n");
- ret = -ENODEV;
- goto out_unmap1;
- }
+ cam->power_regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(cam->power_regs))
+ return PTR_ERR(cam->power_regs);
/*
* Find the i2c adapter. This assumes, of course, that the
* i2c bus is already up and functioning.
*/
- pdata = pdev->dev.platform_data;
mcam->i2c_adapter = platform_get_drvdata(pdata->i2c_device);
if (mcam->i2c_adapter == NULL) {
- ret = -ENODEV;
dev_err(&pdev->dev, "No i2c adapter\n");
- goto out_unmap2;
+ return -ENODEV;
}
/*
* Sensor GPIO pins.
*/
- ret = gpio_request(pdata->sensor_power_gpio, "cam-power");
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_power_gpio,
+ "cam-power");
if (ret) {
dev_err(&pdev->dev, "Can't get sensor power gpio %d",
pdata->sensor_power_gpio);
- goto out_unmap2;
+ return ret;
}
gpio_direction_output(pdata->sensor_power_gpio, 0);
- ret = gpio_request(pdata->sensor_reset_gpio, "cam-reset");
+ ret = devm_gpio_request(&pdev->dev, pdata->sensor_reset_gpio,
+ "cam-reset");
if (ret) {
dev_err(&pdev->dev, "Can't get sensor reset gpio %d",
pdata->sensor_reset_gpio);
- goto out_gpio;
+ return ret;
}
gpio_direction_output(pdata->sensor_reset_gpio, 0);
+
+ mcam_init_clk(mcam);
+
/*
* Power the device up and hand it off to the core.
*/
- mmpcam_power_up(mcam);
+ ret = mmpcam_power_up(mcam);
+ if (ret)
+ goto out_deinit_clk;
ret = mccic_register(mcam);
if (ret)
- goto out_gpio2;
+ goto out_power_down;
/*
* Finally, set up our IRQ now that the core is ready to
* deal with it.
@@ -264,8 +458,8 @@ static int mmpcam_probe(struct platform_device *pdev)
goto out_unregister;
}
cam->irq = res->start;
- ret = request_irq(cam->irq, mmpcam_irq, IRQF_SHARED,
- "mmp-camera", mcam);
+ ret = devm_request_irq(&pdev->dev, cam->irq, mmpcam_irq, IRQF_SHARED,
+ "mmp-camera", mcam);
if (ret == 0) {
mmpcam_add_device(cam);
return 0;
@@ -273,17 +467,10 @@ static int mmpcam_probe(struct platform_device *pdev)
out_unregister:
mccic_shutdown(mcam);
-out_gpio2:
+out_power_down:
mmpcam_power_down(mcam);
- gpio_free(pdata->sensor_reset_gpio);
-out_gpio:
- gpio_free(pdata->sensor_power_gpio);
-out_unmap2:
- iounmap(cam->power_regs);
-out_unmap1:
- iounmap(mcam->regs);
-out_free:
- kfree(cam);
+out_deinit_clk:
+ mcam_deinit_clk(mcam);
return ret;
}
@@ -300,6 +487,7 @@ static int mmpcam_remove(struct mmp_camera *cam)
pdata = cam->pdev->dev.platform_data;
gpio_free(pdata->sensor_reset_gpio);
gpio_free(pdata->sensor_power_gpio);
+ mcam_deinit_clk(mcam);
iounmap(cam->power_regs);
iounmap(mcam->regs);
kfree(cam);
diff --git a/drivers/media/platform/s3c-camif/camif-regs.c b/drivers/media/platform/s3c-camif/camif-regs.c
index a9e3b16460b..ebf5b184cce 100644
--- a/drivers/media/platform/s3c-camif/camif-regs.c
+++ b/drivers/media/platform/s3c-camif/camif-regs.c
@@ -106,15 +106,15 @@ static const u32 src_pixfmt_map[8][2] = {
void camif_hw_set_source_format(struct camif_dev *camif)
{
struct v4l2_mbus_framefmt *mf = &camif->mbus_fmt;
- unsigned int i = ARRAY_SIZE(src_pixfmt_map);
+ int i;
u32 cfg;
- while (i-- >= 0) {
+ for (i = ARRAY_SIZE(src_pixfmt_map) - 1; i >= 0; i--) {
if (src_pixfmt_map[i][0] == mf->code)
break;
}
-
- if (i == 0 && src_pixfmt_map[i][0] != mf->code) {
+ if (i < 0) {
+ i = 0;
dev_err(camif->dev,
"Unsupported pixel code, falling back to %#08x\n",
src_pixfmt_map[i][0]);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 553d87e5cea..fd6289d60cd 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev)
}
*vfd = g2d_videodev;
vfd->lock = &dev->mutex;
+ vfd->v4l2_dev = &dev->v4l2_dev;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
index 363a97cc768..2398cdf6134 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v6.h
@@ -374,9 +374,9 @@
#define S5P_FIMV_NUM_PIXELS_IN_MB_COL_V6 16
/* Buffer size requirements defined by hardware */
-#define S5P_FIMV_TMV_BUFFER_SIZE_V6(w, h) (((w) + 1) * ((h) + 1) * 8)
+#define S5P_FIMV_TMV_BUFFER_SIZE_V6(w, h) (((w) + 1) * ((h) + 3) * 8)
#define S5P_FIMV_ME_BUFFER_SIZE_V6(imw, imh, mbw, mbh) \
- ((DIV_ROUND_UP(imw, 64) * DIV_ROUND_UP(imh, 64) * 256) + \
+ (((((imw + 127) / 64) * 16) * DIV_ROUND_UP(imh, 64) * 256) + \
(DIV_ROUND_UP((mbw) * (mbh), 32) * 16))
#define S5P_FIMV_SCRATCH_BUF_SIZE_H264_DEC_V6(w, h) (((w) * 192) + 64)
#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(w, h) \
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc-v7.h b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
new file mode 100644
index 00000000000..ea5ec2a711a
--- /dev/null
+++ b/drivers/media/platform/s5p-mfc/regs-mfc-v7.h
@@ -0,0 +1,61 @@
+/*
+ * Register definition file for Samsung MFC V7.x Interface (FIMV) driver
+ *
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _REGS_MFC_V7_H
+#define _REGS_MFC_V7_H
+
+#include "regs-mfc-v6.h"
+
+/* Additional features of v7 */
+#define S5P_FIMV_CODEC_VP8_ENC_V7 25
+
+/* Additional registers for v7 */
+#define S5P_FIMV_D_INIT_BUFFER_OPTIONS_V7 0xf47c
+
+#define S5P_FIMV_E_SOURCE_FIRST_ADDR_V7 0xf9e0
+#define S5P_FIMV_E_SOURCE_SECOND_ADDR_V7 0xf9e4
+#define S5P_FIMV_E_SOURCE_THIRD_ADDR_V7 0xf9e8
+#define S5P_FIMV_E_SOURCE_FIRST_STRIDE_V7 0xf9ec
+#define S5P_FIMV_E_SOURCE_SECOND_STRIDE_V7 0xf9f0
+#define S5P_FIMV_E_SOURCE_THIRD_STRIDE_V7 0xf9f4
+
+#define S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7 0xfa70
+#define S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7 0xfa74
+
+#define S5P_FIMV_E_VP8_OPTIONS_V7 0xfdb0
+#define S5P_FIMV_E_VP8_FILTER_OPTIONS_V7 0xfdb4
+#define S5P_FIMV_E_VP8_GOLDEN_FRAME_OPTION_V7 0xfdb8
+#define S5P_FIMV_E_VP8_NUM_T_LAYER_V7 0xfdc4
+
+/* MFCv7 variant defines */
+#define MAX_FW_SIZE_V7 (SZ_1M) /* 1MB */
+#define MAX_CPB_SIZE_V7 (3 * SZ_1M) /* 3MB */
+#define MFC_VERSION_V7 0x72
+#define MFC_NUM_PORTS_V7 1
+
+#define MFC_LUMA_PAD_BYTES_V7 256
+#define MFC_CHROMA_PAD_BYTES_V7 128
+
+/* MFCv7 Context buffer sizes */
+#define MFC_CTX_BUF_SIZE_V7 (30 * SZ_1K) /* 30KB */
+#define MFC_H264_DEC_CTX_BUF_SIZE_V7 (2 * SZ_1M) /* 2MB */
+#define MFC_OTHER_DEC_CTX_BUF_SIZE_V7 (20 * SZ_1K) /* 20KB */
+#define MFC_H264_ENC_CTX_BUF_SIZE_V7 (100 * SZ_1K) /* 100KB */
+#define MFC_OTHER_ENC_CTX_BUF_SIZE_V7 (10 * SZ_1K) /* 10KB */
+
+/* Buffer size defines */
+#define S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V7(w, h) \
+ (SZ_1M + ((w) * 144) + (8192 * (h)) + 49216)
+
+#define S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V7(w, h) \
+ (((w) * 48) + (((w) + 1) / 2 * 128) + 144 + 8192)
+
+#endif /*_REGS_MFC_V7_H*/
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index a130dcdb720..084263dd126 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1391,6 +1391,32 @@ static struct s5p_mfc_variant mfc_drvdata_v6 = {
.fw_name = "s5p-mfc-v6.fw",
};
+struct s5p_mfc_buf_size_v6 mfc_buf_size_v7 = {
+ .dev_ctx = MFC_CTX_BUF_SIZE_V7,
+ .h264_dec_ctx = MFC_H264_DEC_CTX_BUF_SIZE_V7,
+ .other_dec_ctx = MFC_OTHER_DEC_CTX_BUF_SIZE_V7,
+ .h264_enc_ctx = MFC_H264_ENC_CTX_BUF_SIZE_V7,
+ .other_enc_ctx = MFC_OTHER_ENC_CTX_BUF_SIZE_V7,
+};
+
+struct s5p_mfc_buf_size buf_size_v7 = {
+ .fw = MAX_FW_SIZE_V7,
+ .cpb = MAX_CPB_SIZE_V7,
+ .priv = &mfc_buf_size_v7,
+};
+
+struct s5p_mfc_buf_align mfc_buf_align_v7 = {
+ .base = 0,
+};
+
+static struct s5p_mfc_variant mfc_drvdata_v7 = {
+ .version = MFC_VERSION_V7,
+ .port_num = MFC_NUM_PORTS_V7,
+ .buf_size = &buf_size_v7,
+ .buf_align = &mfc_buf_align_v7,
+ .fw_name = "s5p-mfc-v7.fw",
+};
+
static struct platform_device_id mfc_driver_ids[] = {
{
.name = "s5p-mfc",
@@ -1401,6 +1427,9 @@ static struct platform_device_id mfc_driver_ids[] = {
}, {
.name = "s5p-mfc-v6",
.driver_data = (unsigned long)&mfc_drvdata_v6,
+ }, {
+ .name = "s5p-mfc-v7",
+ .driver_data = (unsigned long)&mfc_drvdata_v7,
},
{},
};
@@ -1413,6 +1442,9 @@ static const struct of_device_id exynos_mfc_match[] = {
}, {
.compatible = "samsung,mfc-v6",
.data = &mfc_drvdata_v6,
+ }, {
+ .compatible = "samsung,mfc-v7",
+ .data = &mfc_drvdata_v7,
},
{},
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
index f0a41c95df8..242c033cf8b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd.c
@@ -20,7 +20,7 @@ static struct s5p_mfc_hw_cmds *s5p_mfc_cmds;
void s5p_mfc_init_hw_cmds(struct s5p_mfc_dev *dev)
{
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v6();
else
s5p_mfc_cmds = s5p_mfc_init_hw_cmds_v5();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
index 5708fc3d9b4..db796c8e787 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_cmd_v6.c
@@ -108,6 +108,9 @@ static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx)
case S5P_MFC_CODEC_H263_ENC:
codec_type = S5P_FIMV_CODEC_H263_ENC_V6;
break;
+ case S5P_MFC_CODEC_VP8_ENC:
+ codec_type = S5P_FIMV_CODEC_VP8_ENC_V7;
+ break;
default:
codec_type = S5P_FIMV_CODEC_NONE_V6;
};
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index ef4074cd531..6920b546181 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -24,6 +24,7 @@
#include <media/videobuf2-core.h>
#include "regs-mfc.h"
#include "regs-mfc-v6.h"
+#include "regs-mfc-v7.h"
/* Definitions related to MFC memory */
@@ -64,7 +65,7 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
#define MFC_ENC_CAP_PLANE_COUNT 1
#define MFC_ENC_OUT_PLANE_COUNT 2
#define STUFF_BYTE 4
-#define MFC_MAX_CTRLS 70
+#define MFC_MAX_CTRLS 77
#define S5P_MFC_CODEC_NONE -1
#define S5P_MFC_CODEC_H264_DEC 0
@@ -80,6 +81,7 @@ static inline dma_addr_t s5p_mfc_mem_cookie(void *a, void *b)
#define S5P_MFC_CODEC_H264_MVC_ENC 21
#define S5P_MFC_CODEC_MPEG4_ENC 22
#define S5P_MFC_CODEC_H263_ENC 23
+#define S5P_MFC_CODEC_VP8_ENC 24
#define S5P_MFC_R2H_CMD_EMPTY 0
#define S5P_MFC_R2H_CMD_SYS_INIT_RET 1
@@ -408,6 +410,21 @@ struct s5p_mfc_mpeg4_enc_params {
};
/**
+ * struct s5p_mfc_vp8_enc_params - encoding parameters for vp8
+ */
+struct s5p_mfc_vp8_enc_params {
+ u8 imd_4x4;
+ enum v4l2_vp8_num_partitions num_partitions;
+ enum v4l2_vp8_num_ref_frames num_ref;
+ u8 filter_level;
+ u8 filter_sharpness;
+ u32 golden_frame_ref_period;
+ enum v4l2_vp8_golden_frame_sel golden_frame_sel;
+ u8 hier_layer;
+ u8 hier_layer_qp[3];
+};
+
+/**
* struct s5p_mfc_enc_params - general encoding parameters
*/
struct s5p_mfc_enc_params {
@@ -441,6 +458,7 @@ struct s5p_mfc_enc_params {
struct {
struct s5p_mfc_h264_enc_params h264;
struct s5p_mfc_mpeg4_enc_params mpeg4;
+ struct s5p_mfc_vp8_enc_params vp8;
} codec;
};
@@ -683,6 +701,7 @@ void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx);
#define HAS_PORTNUM(dev) (dev ? (dev->variant ? \
(dev->variant->port_num ? 1 : 0) : 0) : 0)
#define IS_TWOPORT(dev) (dev->variant->port_num == 2 ? 1 : 0)
-#define IS_MFCV6(dev) (dev->variant->version >= 0x60 ? 1 : 0)
+#define IS_MFCV6_PLUS(dev) (dev->variant->version >= 0x60 ? 1 : 0)
+#define IS_MFCV7(dev) (dev->variant->version >= 0x70 ? 1 : 0)
#endif /* S5P_MFC_COMMON_H_ */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index dc1fc94a488..7cab6849fb5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -164,7 +164,7 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
mfc_debug_enter();
- if (IS_MFCV6(dev)) {
+ if (IS_MFCV6_PLUS(dev)) {
/* Reset IP */
/* except RISC, reset */
mfc_write(dev, 0xFEE, S5P_FIMV_MFC_RESET_V6);
@@ -213,7 +213,7 @@ int s5p_mfc_reset(struct s5p_mfc_dev *dev)
static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
{
- if (IS_MFCV6(dev)) {
+ if (IS_MFCV6_PLUS(dev)) {
mfc_write(dev, dev->bank1, S5P_FIMV_RISC_BASE_ADDRESS_V6);
mfc_debug(2, "Base Address : %08x\n", dev->bank1);
} else {
@@ -226,7 +226,7 @@ static inline void s5p_mfc_init_memctrl(struct s5p_mfc_dev *dev)
static inline void s5p_mfc_clear_cmds(struct s5p_mfc_dev *dev)
{
- if (IS_MFCV6(dev)) {
+ if (IS_MFCV6_PLUS(dev)) {
/* Zero initialization should be done before RESET.
* Nothing to do here. */
} else {
@@ -264,7 +264,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
s5p_mfc_clear_cmds(dev);
/* 3. Release reset signal to the RISC */
s5p_mfc_clean_dev_int_flags(dev);
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
else
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
@@ -301,7 +301,7 @@ int s5p_mfc_init_hw(struct s5p_mfc_dev *dev)
s5p_mfc_clock_off();
return -EIO;
}
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
ver = mfc_read(dev, S5P_FIMV_FW_VERSION_V6);
else
ver = mfc_read(dev, S5P_FIMV_FW_VERSION);
@@ -380,7 +380,7 @@ int s5p_mfc_wakeup(struct s5p_mfc_dev *dev)
return ret;
}
/* 4. Release reset signal to the RISC */
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
mfc_write(dev, 0x1, S5P_FIMV_RISC_ON_V6);
else
mfc_write(dev, 0x3ff, S5P_FIMV_SW_RESET);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 5296385153d..8faf9691712 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
pix_mp->num_planes = 2;
/* Set pixelformat to the format in which MFC
outputs the decoded frame */
- pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT;
+ pix_mp->pixelformat = ctx->dst_fmt->fourcc;
pix_mp->plane_fmt[0].bytesperline = ctx->buf_width;
pix_mp->plane_fmt[0].sizeimage = ctx->luma_size;
pix_mp->plane_fmt[1].bytesperline = ctx->buf_width;
@@ -382,20 +382,27 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
mfc_err("Unsupported format for source.\n");
return -EINVAL;
}
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
+ if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) {
+ mfc_err("Unknown codec\n");
return -EINVAL;
}
+ if (!IS_MFCV6_PLUS(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_VP8) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
fmt = find_format(f, MFC_FMT_RAW);
if (!fmt) {
mfc_err("Unsupported format for destination.\n");
return -EINVAL;
}
- if (IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
+ if (IS_MFCV6_PLUS(dev) &&
+ (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
mfc_err("Not supported format.\n");
return -EINVAL;
- } else if (!IS_MFCV6(dev) &&
+ } else if (!IS_MFCV6_PLUS(dev) &&
(fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
mfc_err("Not supported format.\n");
return -EINVAL;
@@ -411,7 +418,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
int ret = 0;
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_mp;
mfc_debug_enter();
@@ -425,54 +431,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("Unsupported format for source.\n");
- return -EINVAL;
- }
- if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->dst_fmt = fmt;
- mfc_debug_leave();
- return ret;
- } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- mfc_err("Wrong type error for S_FMT : %d", f->type);
- return -EINVAL;
- }
- fmt = find_format(f, MFC_FMT_DEC);
- if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) {
- mfc_err("Unknown codec\n");
- ret = -EINVAL;
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_RAW);
+ ret = 0;
goto out;
- }
- if (fmt->type != MFC_FMT_DEC) {
- mfc_err("Wrong format selected, you should choose "
- "format for decoding\n");
+ } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_DEC);
+ ctx->codec_mode = ctx->src_fmt->codec_mode;
+ mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
+ pix_mp->height = 0;
+ pix_mp->width = 0;
+ if (pix_mp->plane_fmt[0].sizeimage)
+ ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
+ else
+ pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
+ DEF_CPB_SIZE;
+ pix_mp->plane_fmt[0].bytesperline = 0;
+ ctx->state = MFCINST_INIT;
+ ret = 0;
+ goto out;
+ } else {
+ mfc_err("Wrong type error for S_FMT : %d", f->type);
ret = -EINVAL;
goto out;
}
- if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
- ctx->src_fmt = fmt;
- ctx->codec_mode = fmt->codec_mode;
- mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode);
- pix_mp->height = 0;
- pix_mp->width = 0;
- if (pix_mp->plane_fmt[0].sizeimage)
- ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage;
- else
- pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size =
- DEF_CPB_SIZE;
- pix_mp->plane_fmt[0].bytesperline = 0;
- ctx->state = MFCINST_INIT;
+
out:
mfc_debug_leave();
return ret;
@@ -942,7 +926,7 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
psize[0] = ctx->luma_size;
psize[1] = ctx->chroma_size;
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
allocators[0] =
ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
else
@@ -1067,7 +1051,7 @@ static int s5p_mfc_stop_streaming(struct vb2_queue *q)
ctx->dpb_flush_flag = 1;
ctx->dec_dst_flag = 0;
spin_unlock_irqrestore(&dev->irqlock, flags);
- if (IS_MFCV6(dev) && (ctx->state == MFCINST_RUNNING)) {
+ if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) {
ctx->state = MFCINST_FLUSH;
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 2549967b2f8..41f5a3c10db 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -84,6 +84,13 @@ static struct s5p_mfc_fmt formats[] = {
.type = MFC_FMT_ENC,
.num_planes = 1,
},
+ {
+ .name = "VP8 Encoded Stream",
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .codec_mode = S5P_MFC_CODEC_VP8_ENC,
+ .type = MFC_FMT_ENC,
+ .num_planes = 1,
+ },
};
#define NUM_FORMATS ARRAY_SIZE(formats)
@@ -557,6 +564,60 @@ static struct mfc_control controls[] = {
.step = 1,
.default_value = 0,
},
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS,
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4,
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = 0,
+ .maximum = 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES,
+ .type = V4L2_CTRL_TYPE_INTEGER_MENU,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME,
+ .menu_skip_mask = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 63,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = 7,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = 0,
+ .maximum = (1 << 16) - 1,
+ .step = 1,
+ .default_value = 0,
+ },
+ {
+ .id = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .minimum = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
+ .maximum = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD,
+ .default_value = V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV,
+ .menu_skip_mask = 0,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(controls)
@@ -663,7 +724,7 @@ static int enc_post_seq_start(struct s5p_mfc_ctx *ctx)
spin_unlock_irqrestore(&dev->irqlock, flags);
}
- if (!IS_MFCV6(dev)) {
+ if (!IS_MFCV6_PLUS(dev)) {
ctx->state = MFCINST_RUNNING;
if (s5p_mfc_ctx_ready(ctx))
set_work_bit_irqsave(ctx);
@@ -906,6 +967,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
+ struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
@@ -916,6 +978,11 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
return -EINVAL;
}
+ if (!IS_MFCV7(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) {
+ mfc_err("VP8 is supported only in MFC v7\n");
+ return -EINVAL;
+ }
+
if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
mfc_err("must be set encoding output size\n");
return -EINVAL;
@@ -930,6 +997,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
return -EINVAL;
}
+ if (!IS_MFCV6_PLUS(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ } else if (IS_MFCV6_PLUS(dev)) {
+ if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) {
+ mfc_err("Not supported format.\n");
+ return -EINVAL;
+ }
+ }
+
if (fmt->num_planes != pix_fmt_mp->num_planes) {
mfc_err("failed to try output format\n");
return -EINVAL;
@@ -947,7 +1026,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
{
struct s5p_mfc_dev *dev = video_drvdata(file);
struct s5p_mfc_ctx *ctx = fh_to_ctx(priv);
- struct s5p_mfc_fmt *fmt;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
int ret = 0;
@@ -960,13 +1038,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
goto out;
}
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
- fmt = find_format(f, MFC_FMT_ENC);
- if (!fmt) {
- mfc_err("failed to set capture format\n");
- return -EINVAL;
- }
+ /* dst_fmt is validated by call to vidioc_try_fmt */
+ ctx->dst_fmt = find_format(f, MFC_FMT_ENC);
ctx->state = MFCINST_INIT;
- ctx->dst_fmt = fmt;
ctx->codec_mode = ctx->dst_fmt->codec_mode;
ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
@@ -987,28 +1061,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
}
mfc_debug(2, "Got instance number: %d\n", ctx->inst_no);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- fmt = find_format(f, MFC_FMT_RAW);
- if (!fmt) {
- mfc_err("failed to set output format\n");
- return -EINVAL;
- }
-
- if (!IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- } else if (IS_MFCV6(dev) &&
- (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) {
- mfc_err("Not supported format.\n");
- return -EINVAL;
- }
-
- if (fmt->num_planes != pix_fmt_mp->num_planes) {
- mfc_err("failed to set output format\n");
- ret = -EINVAL;
- goto out;
- }
- ctx->src_fmt = fmt;
+ /* src_fmt is validated by call to vidioc_try_fmt */
+ ctx->src_fmt = find_format(f, MFC_FMT_RAW);
ctx->img_width = pix_fmt_mp->width;
ctx->img_height = pix_fmt_mp->height;
mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode);
@@ -1072,7 +1126,7 @@ static int vidioc_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (IS_MFCV6(dev)) {
+ if (IS_MFCV6_PLUS(dev)) {
/* Check for min encoder buffers */
if (ctx->pb_count &&
(reqbufs->count < ctx->pb_count)) {
@@ -1353,7 +1407,7 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
S5P_FIMV_ENC_PROFILE_H264_BASELINE;
break;
case V4L2_MPEG_VIDEO_H264_PROFILE_CONSTRAINED_BASELINE:
- if (IS_MFCV6(dev))
+ if (IS_MFCV6_PLUS(dev))
p->codec.h264.profile =
S5P_FIMV_ENC_PROFILE_H264_CONSTRAINED_BASELINE;
else
@@ -1482,6 +1536,27 @@ static int s5p_mfc_enc_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:
p->codec.mpeg4.quarter_pixel = ctrl->val;
break;
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ p->codec.vp8.num_partitions = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4:
+ p->codec.vp8.imd_4x4 = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ p->codec.vp8.num_ref = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL:
+ p->codec.vp8.filter_level = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS:
+ p->codec.vp8.filter_sharpness = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD:
+ p->codec.vp8.golden_frame_ref_period = ctrl->val;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ p->codec.vp8.golden_frame_sel = ctrl->val;
+ break;
default:
v4l2_err(&dev->v4l2_dev, "Invalid control, id=%d, val=%d\n",
ctrl->id, ctrl->val);
@@ -1662,9 +1737,11 @@ static int s5p_mfc_queue_setup(struct vb2_queue *vq,
*buf_count = 1;
if (*buf_count > MFC_MAX_BUFFERS)
*buf_count = MFC_MAX_BUFFERS;
+
psize[0] = ctx->luma_size;
psize[1] = ctx->chroma_size;
- if (IS_MFCV6(dev)) {
+
+ if (IS_MFCV6_PLUS(dev)) {
allocators[0] =
ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX];
allocators[1] =
@@ -1773,7 +1850,8 @@ static int s5p_mfc_start_streaming(struct vb2_queue *q, unsigned int count)
struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
struct s5p_mfc_dev *dev = ctx->dev;
- if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+ if (IS_MFCV6_PLUS(dev) &&
+ (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
if ((ctx->state == MFCINST_GOT_INST) &&
(dev->curr_ctx == ctx->num) && dev->hw_lock) {
@@ -1927,7 +2005,9 @@ int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx)
ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler,
&cfg, NULL);
} else {
- if (controls[i].type == V4L2_CTRL_TYPE_MENU) {
+ if ((controls[i].type == V4L2_CTRL_TYPE_MENU) ||
+ (controls[i].type ==
+ V4L2_CTRL_TYPE_INTEGER_MENU)) {
ctx->ctrls[i] = v4l2_ctrl_new_std_menu(
&ctx->ctrl_handler,
&s5p_mfc_enc_ctrl_ops, controls[i].id,
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
index 10f8ac37cec..3c01c339d69 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr.c
@@ -21,7 +21,7 @@ static struct s5p_mfc_hw_ops *s5p_mfc_ops;
void s5p_mfc_init_hw_ops(struct s5p_mfc_dev *dev)
{
- if (IS_MFCV6(dev)) {
+ if (IS_MFCV6_PLUS(dev)) {
s5p_mfc_ops = s5p_mfc_init_hw_ops_v6();
dev->warn_start = S5P_FIMV_ERR_WARNINGS_START_V6;
} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 66f0d042357..461358c4a79 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -80,6 +80,7 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 *
ALIGN(S5P_FIMV_TMV_BUFFER_SIZE_V6(mb_width, mb_height),
S5P_FIMV_TMV_BUFFER_ALIGN_V6);
+
ctx->luma_dpb_size = ALIGN((mb_width * mb_height) *
S5P_FIMV_LUMA_MB_TO_PIXEL_V6,
S5P_FIMV_LUMA_DPB_BUFFER_ALIGN_V6);
@@ -112,10 +113,18 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
(ctx->mv_count * ctx->mv_size);
break;
case S5P_MFC_CODEC_MPEG4_DEC:
- ctx->scratch_buf_size =
- S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(
- mb_width,
- mb_height);
+ if (IS_MFCV7(dev)) {
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V7(
+ mb_width,
+ mb_height);
+ } else {
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_MPEG4_DEC_V6(
+ mb_width,
+ mb_height);
+ }
+
ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
ctx->bank1.size = ctx->scratch_buf_size;
@@ -179,6 +188,19 @@ static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
ctx->chroma_dpb_size + ctx->me_buffer_size));
ctx->bank2.size = 0;
break;
+ case S5P_MFC_CODEC_VP8_ENC:
+ ctx->scratch_buf_size =
+ S5P_FIMV_SCRATCH_BUF_SIZE_VP8_ENC_V7(
+ mb_width,
+ mb_height);
+ ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size,
+ S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
+ ctx->bank1.size =
+ ctx->scratch_buf_size + ctx->tmv_buffer_size +
+ (ctx->pb_count * (ctx->luma_dpb_size +
+ ctx->chroma_dpb_size + ctx->me_buffer_size));
+ ctx->bank2.size = 0;
+ break;
default:
break;
}
@@ -228,6 +250,7 @@ static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx)
break;
case S5P_MFC_CODEC_MPEG4_ENC:
case S5P_MFC_CODEC_H263_ENC:
+ case S5P_MFC_CODEC_VP8_ENC:
ctx->ctx.size = buf_size->other_enc_ctx;
break;
default:
@@ -329,6 +352,12 @@ static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx)
ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6);
ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256);
ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256);
+
+ /* MFCv7 needs pad bytes for Luma and Chroma */
+ if (IS_MFCV7(ctx->dev)) {
+ ctx->luma_size += MFC_LUMA_PAD_BYTES_V7;
+ ctx->chroma_size += MFC_CHROMA_PAD_BYTES_V7;
+ }
}
/* Set registers for decoding stream buffer */
@@ -453,8 +482,13 @@ static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
{
struct s5p_mfc_dev *dev = ctx->dev;
- WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
- WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
+ if (IS_MFCV7(dev)) {
+ WRITEL(y_addr, S5P_FIMV_E_SOURCE_FIRST_ADDR_V7);
+ WRITEL(c_addr, S5P_FIMV_E_SOURCE_SECOND_ADDR_V7);
+ } else {
+ WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6);
+ WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
+ }
mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
@@ -466,8 +500,13 @@ static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
struct s5p_mfc_dev *dev = ctx->dev;
unsigned long enc_recon_y_addr, enc_recon_c_addr;
- *y_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6);
- *c_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6);
+ if (IS_MFCV7(dev)) {
+ *y_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_FIRST_ADDR_V7);
+ *c_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_SECOND_ADDR_V7);
+ } else {
+ *y_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_LUMA_ADDR_V6);
+ *c_addr = READL(S5P_FIMV_E_ENCODED_SOURCE_CHROMA_ADDR_V6);
+ }
enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
@@ -1140,6 +1179,80 @@ static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx)
return 0;
}
+static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx)
+{
+ struct s5p_mfc_dev *dev = ctx->dev;
+ struct s5p_mfc_enc_params *p = &ctx->enc_params;
+ struct s5p_mfc_vp8_enc_params *p_vp8 = &p->codec.vp8;
+ unsigned int reg = 0;
+ unsigned int val = 0;
+
+ mfc_debug_enter();
+
+ s5p_mfc_set_enc_params(ctx);
+
+ /* pictype : number of B */
+ reg = READL(S5P_FIMV_E_GOP_CONFIG_V6);
+ reg &= ~(0x3 << 16);
+ reg |= ((p->num_b_frame & 0x3) << 16);
+ WRITEL(reg, S5P_FIMV_E_GOP_CONFIG_V6);
+
+ /* profile & level */
+ reg = 0;
+ /** profile */
+ reg |= (0x1 << 4);
+ WRITEL(reg, S5P_FIMV_E_PICTURE_PROFILE_V6);
+
+ /* rate control config. */
+ reg = READL(S5P_FIMV_E_RC_CONFIG_V6);
+ /** macroblock level rate control */
+ reg &= ~(0x1 << 8);
+ reg |= ((p->rc_mb & 0x1) << 8);
+ WRITEL(reg, S5P_FIMV_E_RC_CONFIG_V6);
+
+ /* frame rate */
+ if (p->rc_frame && p->rc_framerate_num && p->rc_framerate_denom) {
+ reg = 0;
+ reg |= ((p->rc_framerate_num & 0xFFFF) << 16);
+ reg |= p->rc_framerate_denom & 0xFFFF;
+ WRITEL(reg, S5P_FIMV_E_RC_FRAME_RATE_V6);
+ }
+
+ /* vbv buffer size */
+ if (p->frame_skip_mode ==
+ V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) {
+ WRITEL(p->vbv_size & 0xFFFF, S5P_FIMV_E_VBV_BUFFER_SIZE_V6);
+
+ if (p->rc_frame)
+ WRITEL(p->vbv_delay, S5P_FIMV_E_VBV_INIT_DELAY_V6);
+ }
+
+ /* VP8 specific params */
+ reg = 0;
+ reg |= (p_vp8->imd_4x4 & 0x1) << 10;
+ switch (p_vp8->num_partitions) {
+ case V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION:
+ val = 0;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS:
+ val = 2;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS:
+ val = 4;
+ break;
+ case V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS:
+ val = 8;
+ break;
+ }
+ reg |= (val & 0xF) << 3;
+ reg |= (p_vp8->num_ref & 0x2);
+ WRITEL(reg, S5P_FIMV_E_VP8_OPTIONS_V7);
+
+ mfc_debug_leave();
+
+ return 0;
+}
+
/* Initialize decoding */
static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
{
@@ -1166,6 +1279,12 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
reg |= (0x1 << S5P_FIMV_D_OPT_DDELAY_EN_SHIFT_V6);
WRITEL(ctx->display_delay, S5P_FIMV_D_DISPLAY_DELAY_V6);
}
+
+ if (IS_MFCV7(dev)) {
+ WRITEL(reg, S5P_FIMV_D_DEC_OPTIONS_V6);
+ reg = 0;
+ }
+
/* Setup loop filter, for decoding this is only valid for MPEG4 */
if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) {
mfc_debug(2, "Set loop filter to: %d\n",
@@ -1176,7 +1295,10 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)
reg |= (0x1 << S5P_FIMV_D_OPT_TILE_MODE_SHIFT_V6);
- WRITEL(reg, S5P_FIMV_D_DEC_OPTIONS_V6);
+ if (IS_MFCV7(dev))
+ WRITEL(reg, S5P_FIMV_D_INIT_BUFFER_OPTIONS_V7);
+ else
+ WRITEL(reg, S5P_FIMV_D_DEC_OPTIONS_V6);
/* 0: NV12(CbCr), 1: NV21(CrCb) */
if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M)
@@ -1184,6 +1306,7 @@ static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx)
else
WRITEL(0x0, S5P_FIMV_PIXEL_FORMAT_V6);
+
/* sei parse */
WRITEL(ctx->sei_fp_parse & 0x1, S5P_FIMV_D_SEI_ENABLE_V6);
@@ -1248,12 +1371,20 @@ static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx)
s5p_mfc_set_enc_params_mpeg4(ctx);
else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC)
s5p_mfc_set_enc_params_h263(ctx);
+ else if (ctx->codec_mode == S5P_MFC_CODEC_VP8_ENC)
+ s5p_mfc_set_enc_params_vp8(ctx);
else {
mfc_err("Unknown codec for encoding (%x).\n",
ctx->codec_mode);
return -EINVAL;
}
+ /* Set stride lengths */
+ if (IS_MFCV7(dev)) {
+ WRITEL(ctx->img_width, S5P_FIMV_E_SOURCE_FIRST_STRIDE_V7);
+ WRITEL(ctx->img_width, S5P_FIMV_E_SOURCE_SECOND_STRIDE_V7);
+ }
+
WRITEL(ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6);
s5p_mfc_hw_call(dev->mfc_cmds, cmd_host2risc, dev,
S5P_FIMV_CH_SEQ_HEADER_V6, NULL);
diff --git a/drivers/media/platform/s5p-tv/hdmi_drv.c b/drivers/media/platform/s5p-tv/hdmi_drv.c
index 1b34c362985..534722c04ec 100644
--- a/drivers/media/platform/s5p-tv/hdmi_drv.c
+++ b/drivers/media/platform/s5p-tv/hdmi_drv.c
@@ -37,6 +37,7 @@
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-dv-timings.h>
#include "regs-hdmi.h"
@@ -625,7 +626,7 @@ static int hdmi_s_dv_timings(struct v4l2_subdev *sd,
int i;
for (i = 0; i < ARRAY_SIZE(hdmi_timings); i++)
- if (v4l_match_dv_timings(&hdmi_timings[i].dv_timings,
+ if (v4l2_match_dv_timings(&hdmi_timings[i].dv_timings,
timings, 0))
break;
if (i == ARRAY_SIZE(hdmi_timings)) {
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index aa4cca371cb..744e43b480b 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -359,7 +359,7 @@ static int sh_veu_context_init(struct sh_veu_dev *veu)
veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
sh_veu_queue_init);
- return PTR_RET(veu->m2m_ctx);
+ return PTR_ERR_OR_ZERO(veu->m2m_ctx);
}
static int sh_veu_querycap(struct file *file, void *priv,
diff --git a/drivers/media/platform/soc_camera/Kconfig b/drivers/media/platform/soc_camera/Kconfig
index 626dcccc37d..af39c466555 100644
--- a/drivers/media/platform/soc_camera/Kconfig
+++ b/drivers/media/platform/soc_camera/Kconfig
@@ -44,6 +44,14 @@ config VIDEO_PXA27x
---help---
This is a v4l2 driver for the PXA27x Quick Capture Interface
+config VIDEO_RCAR_VIN
+ tristate "R-Car Video Input (VIN) support"
+ depends on VIDEO_DEV && SOC_CAMERA
+ select VIDEOBUF2_DMA_CONTIG
+ select SOC_CAMERA_SCALE_CROP
+ ---help---
+ This is a v4l2 driver for the R-Car VIN Interface
+
config VIDEO_SH_MOBILE_CSI2
tristate "SuperH Mobile MIPI CSI-2 Interface driver"
depends on VIDEO_DEV && SOC_CAMERA && HAVE_CLK
diff --git a/drivers/media/platform/soc_camera/Makefile b/drivers/media/platform/soc_camera/Makefile
index 39186224c16..8aed26d7a64 100644
--- a/drivers/media/platform/soc_camera/Makefile
+++ b/drivers/media/platform/soc_camera/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_VIDEO_OMAP1) += omap1_camera.o
obj-$(CONFIG_VIDEO_PXA27x) += pxa_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CEU) += sh_mobile_ceu_camera.o
obj-$(CONFIG_VIDEO_SH_MOBILE_CSI2) += sh_mobile_csi2.o
+obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar_vin.o
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index 1047e3e8db7..8f9f6211c52 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -672,7 +672,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
dev_warn(icd->parent,
- "Unsupported format code #%u: %d\n", idx, code);
+ "Unsupported format code #%u: 0x%x\n", idx, code);
return 0;
}
@@ -688,7 +688,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
xlate->host_fmt = &mx3_camera_formats[0];
xlate->code = code;
xlate++;
- dev_dbg(dev, "Providing format %s using code %d\n",
+ dev_dbg(dev, "Providing format %s using code 0x%x\n",
mx3_camera_formats[0].name, code);
}
break;
@@ -698,7 +698,7 @@ static int mx3_camera_get_formats(struct soc_camera_device *icd, unsigned int id
xlate->host_fmt = &mx3_camera_formats[1];
xlate->code = code;
xlate++;
- dev_dbg(dev, "Providing format %s using code %d\n",
+ dev_dbg(dev, "Providing format %s using code 0x%x\n",
mx3_camera_formats[1].name, code);
}
break;
@@ -1144,6 +1144,7 @@ static struct soc_camera_host_ops mx3_soc_camera_host_ops = {
static int mx3_camera_probe(struct platform_device *pdev)
{
+ struct mx3_camera_pdata *pdata = pdev->dev.platform_data;
struct mx3_camera_dev *mx3_cam;
struct resource *res;
void __iomem *base;
@@ -1151,26 +1152,25 @@ static int mx3_camera_probe(struct platform_device *pdev)
struct soc_camera_host *soc_host;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- err = -ENODEV;
- goto egetres;
- }
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (!pdata)
+ return -EINVAL;
- mx3_cam = vzalloc(sizeof(*mx3_cam));
+ mx3_cam = devm_kzalloc(&pdev->dev, sizeof(*mx3_cam), GFP_KERNEL);
if (!mx3_cam) {
dev_err(&pdev->dev, "Could not allocate mx3 camera object\n");
- err = -ENOMEM;
- goto ealloc;
+ return -ENOMEM;
}
- mx3_cam->clk = clk_get(&pdev->dev, NULL);
- if (IS_ERR(mx3_cam->clk)) {
- err = PTR_ERR(mx3_cam->clk);
- goto eclkget;
- }
+ mx3_cam->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(mx3_cam->clk))
+ return PTR_ERR(mx3_cam->clk);
- mx3_cam->pdata = pdev->dev.platform_data;
- mx3_cam->platform_flags = mx3_cam->pdata->flags;
+ mx3_cam->pdata = pdata;
+ mx3_cam->platform_flags = pdata->flags;
if (!(mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_MASK)) {
/*
* Platform hasn't set available data widths. This is bad.
@@ -1189,7 +1189,7 @@ static int mx3_camera_probe(struct platform_device *pdev)
if (mx3_cam->platform_flags & MX3_CAMERA_DATAWIDTH_15)
mx3_cam->width_flags |= 1 << 14;
- mx3_cam->mclk = mx3_cam->pdata->mclk_10khz * 10000;
+ mx3_cam->mclk = pdata->mclk_10khz * 10000;
if (!mx3_cam->mclk) {
dev_warn(&pdev->dev,
"mclk_10khz == 0! Please, fix your platform data. "
@@ -1201,13 +1201,6 @@ static int mx3_camera_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&mx3_cam->capture);
spin_lock_init(&mx3_cam->lock);
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- pr_err("Couldn't map %x@%x\n", resource_size(res), res->start);
- err = -ENOMEM;
- goto eioremap;
- }
-
mx3_cam->base = base;
soc_host = &mx3_cam->soc_host;
@@ -1218,9 +1211,12 @@ static int mx3_camera_probe(struct platform_device *pdev)
soc_host->nr = pdev->id;
mx3_cam->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
- if (IS_ERR(mx3_cam->alloc_ctx)) {
- err = PTR_ERR(mx3_cam->alloc_ctx);
- goto eallocctx;
+ if (IS_ERR(mx3_cam->alloc_ctx))
+ return PTR_ERR(mx3_cam->alloc_ctx);
+
+ if (pdata->asd_sizes) {
+ soc_host->asd = pdata->asd;
+ soc_host->asd_sizes = pdata->asd_sizes;
}
err = soc_camera_host_register(soc_host);
@@ -1234,14 +1230,6 @@ static int mx3_camera_probe(struct platform_device *pdev)
ecamhostreg:
vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
-eallocctx:
- iounmap(base);
-eioremap:
- clk_put(mx3_cam->clk);
-eclkget:
- vfree(mx3_cam);
-ealloc:
-egetres:
return err;
}
@@ -1251,12 +1239,8 @@ static int mx3_camera_remove(struct platform_device *pdev)
struct mx3_camera_dev *mx3_cam = container_of(soc_host,
struct mx3_camera_dev, soc_host);
- clk_put(mx3_cam->clk);
-
soc_camera_host_unregister(soc_host);
- iounmap(mx3_cam->base);
-
/*
* The channel has either not been allocated,
* or should have been released
@@ -1266,8 +1250,6 @@ static int mx3_camera_remove(struct platform_device *pdev)
vb2_dma_contig_cleanup_ctx(mx3_cam->alloc_ctx);
- vfree(mx3_cam);
-
dmaengine_put();
return 0;
@@ -1276,6 +1258,7 @@ static int mx3_camera_remove(struct platform_device *pdev)
static struct platform_driver mx3_camera_driver = {
.driver = {
.name = MX3_CAM_DRV_NAME,
+ .owner = THIS_MODULE,
},
.probe = mx3_camera_probe,
.remove = mx3_camera_remove,
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
new file mode 100644
index 00000000000..d02a7e0b773
--- /dev/null
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -0,0 +1,1486 @@
+/*
+ * SoC-camera host driver for Renesas R-Car VIN unit
+ *
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ *
+ * Based on V4L2 Driver for SuperH Mobile CEU interface "sh_mobile_ceu_camera.c"
+ *
+ * Copyright (C) 2008 Magnus Damm
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_data/camera-rcar.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/slab.h>
+#include <linux/videodev2.h>
+
+#include <media/soc_camera.h>
+#include <media/soc_mediabus.h>
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-mediabus.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "soc_scale_crop.h"
+
+#define DRV_NAME "rcar_vin"
+
+/* Register offsets for R-Car VIN */
+#define VNMC_REG 0x00 /* Video n Main Control Register */
+#define VNMS_REG 0x04 /* Video n Module Status Register */
+#define VNFC_REG 0x08 /* Video n Frame Capture Register */
+#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
+#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
+#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
+#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
+#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
+#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
+#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
+#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
+#define VNIS_REG 0x2C /* Video n Image Stride Register */
+#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
+#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
+#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
+#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
+#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
+#define VNYS_REG 0x50 /* Video n Y Scale Register */
+#define VNXS_REG 0x54 /* Video n X Scale Register */
+#define VNDMR_REG 0x58 /* Video n Data Mode Register */
+#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
+#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
+
+/* Register bit fields for R-Car VIN */
+/* Video n Main Control Register bits */
+#define VNMC_FOC (1 << 21)
+#define VNMC_YCAL (1 << 19)
+#define VNMC_INF_YUV8_BT656 (0 << 16)
+#define VNMC_INF_YUV8_BT601 (1 << 16)
+#define VNMC_INF_YUV16 (5 << 16)
+#define VNMC_VUP (1 << 10)
+#define VNMC_IM_ODD (0 << 3)
+#define VNMC_IM_ODD_EVEN (1 << 3)
+#define VNMC_IM_EVEN (2 << 3)
+#define VNMC_IM_FULL (3 << 3)
+#define VNMC_BPS (1 << 1)
+#define VNMC_ME (1 << 0)
+
+/* Video n Module Status Register bits */
+#define VNMS_FBS_MASK (3 << 3)
+#define VNMS_FBS_SHIFT 3
+#define VNMS_AV (1 << 1)
+#define VNMS_CA (1 << 0)
+
+/* Video n Frame Capture Register bits */
+#define VNFC_C_FRAME (1 << 1)
+#define VNFC_S_FRAME (1 << 0)
+
+/* Video n Interrupt Enable Register bits */
+#define VNIE_FIE (1 << 4)
+#define VNIE_EFE (1 << 1)
+
+/* Video n Data Mode Register bits */
+#define VNDMR_EXRGB (1 << 8)
+#define VNDMR_BPSM (1 << 4)
+#define VNDMR_DTMD_YCSEP (1 << 1)
+#define VNDMR_DTMD_ARGB1555 (1 << 0)
+
+/* Video n Data Mode Register 2 bits */
+#define VNDMR2_VPS (1 << 30)
+#define VNDMR2_HPS (1 << 29)
+#define VNDMR2_FTEV (1 << 17)
+
+#define VIN_MAX_WIDTH 2048
+#define VIN_MAX_HEIGHT 2048
+
+enum chip_id {
+ RCAR_H1,
+ RCAR_M1,
+ RCAR_E1,
+};
+
+enum rcar_vin_state {
+ STOPPED = 0,
+ RUNNING,
+ STOPPING,
+};
+
+struct rcar_vin_priv {
+ void __iomem *base;
+ spinlock_t lock;
+ int sequence;
+ /* State of the VIN module in capturing mode */
+ enum rcar_vin_state state;
+ struct rcar_vin_platform_data *pdata;
+ struct soc_camera_host ici;
+ struct list_head capture;
+#define MAX_BUFFER_NUM 3
+ struct vb2_buffer *queue_buf[MAX_BUFFER_NUM];
+ struct vb2_alloc_ctx *alloc_ctx;
+ enum v4l2_field field;
+ unsigned int vb_count;
+ unsigned int nr_hw_slots;
+ bool request_to_stop;
+ struct completion capture_stop;
+ enum chip_id chip;
+};
+
+#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM)
+
+struct rcar_vin_buffer {
+ struct vb2_buffer vb;
+ struct list_head list;
+};
+
+#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
+ struct rcar_vin_buffer, \
+ vb)->list)
+
+struct rcar_vin_cam {
+ /* VIN offsets within the camera output, before the VIN scaler */
+ unsigned int vin_left;
+ unsigned int vin_top;
+ /* Client output, as seen by the VIN */
+ unsigned int width;
+ unsigned int height;
+ /*
+ * User window from S_CROP / G_CROP, produced by client cropping and
+ * scaling, VIN scaling and VIN cropping, mapped back onto the client
+ * input window
+ */
+ struct v4l2_rect subrect;
+ /* Camera cropping rectangle */
+ struct v4l2_rect rect;
+ const struct soc_mbus_pixelfmt *extra_fmt;
+};
+
+/*
+ * .queue_setup() is called to check whether the driver can accept the requested
+ * number of buffers and to fill in plane sizes for the current frame format if
+ * required
+ */
+static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt,
+ unsigned int *count,
+ unsigned int *num_planes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+
+ if (fmt) {
+ const struct soc_camera_format_xlate *xlate;
+ unsigned int bytes_per_line;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd,
+ fmt->fmt.pix.pixelformat);
+ if (!xlate)
+ return -EINVAL;
+ ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
+ xlate->host_fmt);
+ if (ret < 0)
+ return ret;
+
+ bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
+
+ ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
+ fmt->fmt.pix.height);
+ if (ret < 0)
+ return ret;
+
+ sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
+ } else {
+ /* Called from VIDIOC_REQBUFS or in compatibility mode */
+ sizes[0] = icd->sizeimage;
+ }
+
+ alloc_ctxs[0] = priv->alloc_ctx;
+
+ if (!vq->num_buffers)
+ priv->sequence = 0;
+
+ if (!*count)
+ *count = 2;
+ priv->vb_count = *count;
+
+ *num_planes = 1;
+
+ /* Number of hardware slots */
+ if (is_continuous_transfer(priv))
+ priv->nr_hw_slots = MAX_BUFFER_NUM;
+ else
+ priv->nr_hw_slots = 1;
+
+ dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
+
+ return 0;
+}
+
+static int rcar_vin_setup(struct rcar_vin_priv *priv)
+{
+ struct soc_camera_device *icd = priv->ici.icd;
+ struct rcar_vin_cam *cam = icd->host_priv;
+ u32 vnmc, dmr, interrupts;
+ bool progressive = false, output_is_yuv = false;
+
+ switch (priv->field) {
+ case V4L2_FIELD_TOP:
+ vnmc = VNMC_IM_ODD;
+ break;
+ case V4L2_FIELD_BOTTOM:
+ vnmc = VNMC_IM_EVEN;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ vnmc = VNMC_IM_FULL;
+ break;
+ case V4L2_FIELD_INTERLACED_BT:
+ vnmc = VNMC_IM_FULL | VNMC_FOC;
+ break;
+ case V4L2_FIELD_NONE:
+ if (is_continuous_transfer(priv)) {
+ vnmc = VNMC_IM_ODD_EVEN;
+ progressive = true;
+ } else {
+ vnmc = VNMC_IM_ODD;
+ }
+ break;
+ default:
+ vnmc = VNMC_IM_ODD;
+ break;
+ }
+
+ /* input interface */
+ switch (icd->current_fmt->code) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ /* BT.601/BT.1358 16bit YCbCr422 */
+ vnmc |= VNMC_INF_YUV16;
+ break;
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ /* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
+ vnmc |= priv->pdata->flags & RCAR_VIN_BT656 ?
+ VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
+ default:
+ break;
+ }
+
+ /* output format */
+ switch (icd->current_fmt->host_fmt->fourcc) {
+ case V4L2_PIX_FMT_NV16:
+ iowrite32(ALIGN(cam->width * cam->height, 0x80),
+ priv->base + VNUVAOF_REG);
+ dmr = VNDMR_DTMD_YCSEP;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_YUYV:
+ dmr = VNDMR_BPSM;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ dmr = 0;
+ output_is_yuv = true;
+ break;
+ case V4L2_PIX_FMT_RGB555X:
+ dmr = VNDMR_DTMD_ARGB1555;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ dmr = 0;
+ break;
+ case V4L2_PIX_FMT_RGB32:
+ if (priv->chip == RCAR_H1 || priv->chip == RCAR_E1) {
+ dmr = VNDMR_EXRGB;
+ break;
+ }
+ default:
+ dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
+ icd->current_fmt->host_fmt->fourcc);
+ return -EINVAL;
+ }
+
+ /* Always update on field change */
+ vnmc |= VNMC_VUP;
+
+ /* If input and output use the same colorspace, use bypass mode */
+ if (output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ /* progressive or interlaced mode */
+ interrupts = progressive ? VNIE_FIE | VNIE_EFE : VNIE_EFE;
+
+ /* ack interrupts */
+ iowrite32(interrupts, priv->base + VNINTS_REG);
+ /* enable interrupts */
+ iowrite32(interrupts, priv->base + VNIE_REG);
+ /* start capturing */
+ iowrite32(dmr, priv->base + VNDMR_REG);
+ iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
+
+ return 0;
+}
+
+static void rcar_vin_capture(struct rcar_vin_priv *priv)
+{
+ if (is_continuous_transfer(priv))
+ /* Continuous Frame Capture Mode */
+ iowrite32(VNFC_C_FRAME, priv->base + VNFC_REG);
+ else
+ /* Single Frame Capture Mode */
+ iowrite32(VNFC_S_FRAME, priv->base + VNFC_REG);
+}
+
+static void rcar_vin_request_capture_stop(struct rcar_vin_priv *priv)
+{
+ priv->state = STOPPING;
+
+ /* set continuous & single transfer off */
+ iowrite32(0, priv->base + VNFC_REG);
+ /* disable capture (release DMA buffer), reset */
+ iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
+ priv->base + VNMC_REG);
+
+ /* update the status if stopped already */
+ if (!(ioread32(priv->base + VNMS_REG) & VNMS_CA))
+ priv->state = STOPPED;
+}
+
+static int rcar_vin_get_free_hw_slot(struct rcar_vin_priv *priv)
+{
+ int slot;
+
+ for (slot = 0; slot < priv->nr_hw_slots; slot++)
+ if (priv->queue_buf[slot] == NULL)
+ return slot;
+
+ return -1;
+}
+
+static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
+{
+ /* Ensure all HW slots are filled */
+ return rcar_vin_get_free_hw_slot(priv) < 0 ? 1 : 0;
+}
+
+/* Moves a buffer from the queue to the HW slots */
+static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
+{
+ struct vb2_buffer *vb;
+ dma_addr_t phys_addr_top;
+ int slot;
+
+ if (list_empty(&priv->capture))
+ return 0;
+
+ /* Find a free HW slot */
+ slot = rcar_vin_get_free_hw_slot(priv);
+ if (slot < 0)
+ return 0;
+
+ vb = &list_entry(priv->capture.next, struct rcar_vin_buffer, list)->vb;
+ list_del_init(to_buf_list(vb));
+ priv->queue_buf[slot] = vb;
+ phys_addr_top = vb2_dma_contig_plane_dma_addr(vb, 0);
+ iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
+
+ return 1;
+}
+
+static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ unsigned long size;
+
+ size = icd->sizeimage;
+
+ if (vb2_plane_size(vb, 0) < size) {
+ dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
+ vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
+ goto error;
+ }
+
+ vb2_set_plane_payload(vb, 0, size);
+
+ dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
+ vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
+
+ spin_lock_irq(&priv->lock);
+
+ list_add_tail(to_buf_list(vb), &priv->capture);
+ rcar_vin_fill_hw_slot(priv);
+
+ /* If we weren't running, and have enough buffers, start capturing! */
+ if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
+ if (rcar_vin_setup(priv)) {
+ /* Submit error */
+ list_del_init(to_buf_list(vb));
+ spin_unlock_irq(&priv->lock);
+ goto error;
+ }
+ priv->request_to_stop = false;
+ init_completion(&priv->capture_stop);
+ priv->state = RUNNING;
+ rcar_vin_capture(priv);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return;
+
+error:
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+}
+
+static void rcar_vin_videobuf_release(struct vb2_buffer *vb)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ unsigned int i;
+ int buf_in_use = 0;
+
+ spin_lock_irq(&priv->lock);
+
+ /* Is the buffer in use by the VIN hardware? */
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ if (priv->queue_buf[i] == vb) {
+ buf_in_use = 1;
+ break;
+ }
+ }
+
+ if (buf_in_use) {
+ while (priv->state != STOPPED) {
+
+ /* issue stop if running */
+ if (priv->state == RUNNING)
+ rcar_vin_request_capture_stop(priv);
+
+ /* wait until capturing has been stopped */
+ if (priv->state == STOPPING) {
+ priv->request_to_stop = true;
+ spin_unlock_irq(&priv->lock);
+ wait_for_completion(&priv->capture_stop);
+ spin_lock_irq(&priv->lock);
+ }
+ }
+ /*
+ * Capturing has now stopped. The buffer we have been asked
+ * to release could be any of the current buffers in use, so
+ * release all buffers that are in use by HW
+ */
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ if (priv->queue_buf[i]) {
+ vb2_buffer_done(priv->queue_buf[i],
+ VB2_BUF_STATE_ERROR);
+ priv->queue_buf[i] = NULL;
+ }
+ }
+ } else {
+ list_del_init(to_buf_list(vb));
+ }
+
+ spin_unlock_irq(&priv->lock);
+}
+
+static int rcar_vin_videobuf_init(struct vb2_buffer *vb)
+{
+ INIT_LIST_HEAD(to_buf_list(vb));
+ return 0;
+}
+
+static int rcar_vin_stop_streaming(struct vb2_queue *vq)
+{
+ struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ struct list_head *buf_head, *tmp;
+
+ spin_lock_irq(&priv->lock);
+ list_for_each_safe(buf_head, tmp, &priv->capture)
+ list_del_init(buf_head);
+ spin_unlock_irq(&priv->lock);
+
+ return 0;
+}
+
+static struct vb2_ops rcar_vin_vb2_ops = {
+ .queue_setup = rcar_vin_videobuf_setup,
+ .buf_init = rcar_vin_videobuf_init,
+ .buf_cleanup = rcar_vin_videobuf_release,
+ .buf_queue = rcar_vin_videobuf_queue,
+ .stop_streaming = rcar_vin_stop_streaming,
+ .wait_prepare = soc_camera_unlock,
+ .wait_finish = soc_camera_lock,
+};
+
+static irqreturn_t rcar_vin_irq(int irq, void *data)
+{
+ struct rcar_vin_priv *priv = data;
+ u32 int_status;
+ bool can_run = false, hw_stopped;
+ int slot;
+ unsigned int handled = 0;
+
+ spin_lock(&priv->lock);
+
+ int_status = ioread32(priv->base + VNINTS_REG);
+ if (!int_status)
+ goto done;
+ /* ack interrupts */
+ iowrite32(int_status, priv->base + VNINTS_REG);
+ handled = 1;
+
+ /* nothing to do if capture status is 'STOPPED' */
+ if (priv->state == STOPPED)
+ goto done;
+
+ hw_stopped = !(ioread32(priv->base + VNMS_REG) & VNMS_CA);
+
+ if (!priv->request_to_stop) {
+ if (is_continuous_transfer(priv))
+ slot = (ioread32(priv->base + VNMS_REG) &
+ VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
+ else
+ slot = 0;
+
+ priv->queue_buf[slot]->v4l2_buf.field = priv->field;
+ priv->queue_buf[slot]->v4l2_buf.sequence = priv->sequence++;
+ do_gettimeofday(&priv->queue_buf[slot]->v4l2_buf.timestamp);
+ vb2_buffer_done(priv->queue_buf[slot], VB2_BUF_STATE_DONE);
+ priv->queue_buf[slot] = NULL;
+
+ if (priv->state != STOPPING)
+ can_run = rcar_vin_fill_hw_slot(priv);
+
+ if (hw_stopped || !can_run) {
+ priv->state = STOPPED;
+ } else if (is_continuous_transfer(priv) &&
+ list_empty(&priv->capture) &&
+ priv->state == RUNNING) {
+ /*
+ * The continuous capturing requires an explicit stop
+ * operation when there is no buffer to be set into
+ * the VnMBm registers.
+ */
+ rcar_vin_request_capture_stop(priv);
+ } else {
+ rcar_vin_capture(priv);
+ }
+
+ } else if (hw_stopped) {
+ priv->state = STOPPED;
+ priv->request_to_stop = false;
+ complete(&priv->capture_stop);
+ }
+
+done:
+ spin_unlock(&priv->lock);
+
+ return IRQ_RETVAL(handled);
+}
+
+static int rcar_vin_add_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ int i;
+
+ for (i = 0; i < MAX_BUFFER_NUM; i++)
+ priv->queue_buf[i] = NULL;
+
+ pm_runtime_get_sync(ici->v4l2_dev.dev);
+
+ dev_dbg(icd->parent, "R-Car VIN driver attached to camera %d\n",
+ icd->devnum);
+
+ return 0;
+}
+
+static void rcar_vin_remove_device(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ struct vb2_buffer *vb;
+ int i;
+
+ /* disable capture, disable interrupts */
+ iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
+ priv->base + VNMC_REG);
+ iowrite32(0, priv->base + VNIE_REG);
+
+ priv->state = STOPPED;
+ priv->request_to_stop = false;
+
+ /* make sure active buffer is cancelled */
+ spin_lock_irq(&priv->lock);
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ vb = priv->queue_buf[i];
+ if (vb) {
+ list_del_init(to_buf_list(vb));
+ vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
+ }
+ }
+ spin_unlock_irq(&priv->lock);
+
+ pm_runtime_put(ici->v4l2_dev.dev);
+
+ dev_dbg(icd->parent, "R-Car VIN driver detached from camera %d\n",
+ icd->devnum);
+}
+
+/* Called with .host_lock held */
+static int rcar_vin_clock_start(struct soc_camera_host *ici)
+{
+ /* VIN does not have "mclk" */
+ return 0;
+}
+
+/* Called with .host_lock held */
+static void rcar_vin_clock_stop(struct soc_camera_host *ici)
+{
+ /* VIN does not have "mclk" */
+}
+
+/* rect is guaranteed to not exceed the scaled camera rectangle */
+static int rcar_vin_set_rect(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_cam *cam = icd->host_priv;
+ struct rcar_vin_priv *priv = ici->priv;
+ unsigned int left_offset, top_offset;
+ unsigned char dsize = 0;
+ struct v4l2_rect *cam_subrect = &cam->subrect;
+
+ dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
+
+ left_offset = cam->vin_left;
+ top_offset = cam->vin_top;
+
+ if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_RGB32 &&
+ priv->chip == RCAR_E1)
+ dsize = 1;
+
+ dev_dbg(icd->parent, "Cam %ux%u@%u:%u\n",
+ cam->width, cam->height, cam->vin_left, cam->vin_top);
+ dev_dbg(icd->parent, "Cam subrect %ux%u@%u:%u\n",
+ cam_subrect->width, cam_subrect->height,
+ cam_subrect->left, cam_subrect->top);
+
+ /* Set Start/End Pixel/Line Pre-Clip */
+ iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
+ iowrite32((left_offset + cam->width - 1) << dsize,
+ priv->base + VNEPPRC_REG);
+ switch (priv->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
+ iowrite32((top_offset + cam->height) / 2 - 1,
+ priv->base + VNELPRC_REG);
+ break;
+ default:
+ iowrite32(top_offset, priv->base + VNSLPRC_REG);
+ iowrite32(top_offset + cam->height - 1,
+ priv->base + VNELPRC_REG);
+ break;
+ }
+
+ /* Set Start/End Pixel/Line Post-Clip */
+ iowrite32(0, priv->base + VNSPPOC_REG);
+ iowrite32(0, priv->base + VNSLPOC_REG);
+ iowrite32((cam_subrect->width - 1) << dsize, priv->base + VNEPPOC_REG);
+ switch (priv->field) {
+ case V4L2_FIELD_INTERLACED:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ iowrite32(cam_subrect->height / 2 - 1,
+ priv->base + VNELPOC_REG);
+ break;
+ default:
+ iowrite32(cam_subrect->height - 1, priv->base + VNELPOC_REG);
+ break;
+ }
+
+ iowrite32(ALIGN(cam->width, 0x10), priv->base + VNIS_REG);
+
+ return 0;
+}
+
+static void capture_stop_preserve(struct rcar_vin_priv *priv, u32 *vnmc)
+{
+ *vnmc = ioread32(priv->base + VNMC_REG);
+ /* module disable */
+ iowrite32(*vnmc & ~VNMC_ME, priv->base + VNMC_REG);
+}
+
+static void capture_restore(struct rcar_vin_priv *priv, u32 vnmc)
+{
+ unsigned long timeout = jiffies + 10 * HZ;
+
+ /*
+ * Wait until the end of the current frame. It can take a long time,
+ * but if it has been aborted by a MRST1 reset, it should exit sooner.
+ */
+ while ((ioread32(priv->base + VNMS_REG) & VNMS_AV) &&
+ time_before(jiffies, timeout))
+ msleep(1);
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(priv->ici.v4l2_dev.dev,
+ "Timeout waiting for frame end! Interface problem?\n");
+ return;
+ }
+
+ iowrite32(vnmc, priv->base + VNMC_REG);
+}
+
+#define VIN_MBUS_FLAGS (V4L2_MBUS_MASTER | \
+ V4L2_MBUS_PCLK_SAMPLE_RISING | \
+ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_HSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
+ V4L2_MBUS_VSYNC_ACTIVE_LOW | \
+ V4L2_MBUS_DATA_ACTIVE_HIGH)
+
+static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_config cfg;
+ unsigned long common_flags;
+ u32 vnmc;
+ u32 val;
+ int ret;
+
+ capture_stop_preserve(priv, &vnmc);
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (!ret) {
+ common_flags = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
+ if (!common_flags) {
+ dev_warn(icd->parent,
+ "MBUS flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, VIN_MBUS_FLAGS);
+ return -EINVAL;
+ }
+ } else if (ret != -ENOIOCTLCMD) {
+ return ret;
+ } else {
+ common_flags = VIN_MBUS_FLAGS;
+ }
+
+ /* Make choises, based on platform preferences */
+ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
+ if (priv->pdata->flags & RCAR_VIN_HSYNC_ACTIVE_LOW)
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
+ }
+
+ if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
+ (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
+ if (priv->pdata->flags & RCAR_VIN_VSYNC_ACTIVE_LOW)
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
+ else
+ common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
+ }
+
+ cfg.flags = common_flags;
+ ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ val = priv->field == V4L2_FIELD_NONE ? VNDMR2_FTEV : 0;
+ if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
+ val |= VNDMR2_VPS;
+ if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
+ val |= VNDMR2_HPS;
+ iowrite32(val, priv->base + VNDMR2_REG);
+
+ ret = rcar_vin_set_rect(icd);
+ if (ret < 0)
+ return ret;
+
+ capture_restore(priv, vnmc);
+
+ return 0;
+}
+
+static int rcar_vin_try_bus_param(struct soc_camera_device *icd,
+ unsigned char buswidth)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_config cfg;
+ int ret;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
+ if (ret == -ENOIOCTLCMD)
+ return 0;
+ else if (ret)
+ return ret;
+
+ if (buswidth > 24)
+ return -EINVAL;
+
+ /* check is there common mbus flags */
+ ret = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
+ if (ret)
+ return 0;
+
+ dev_warn(icd->parent,
+ "MBUS flags incompatible: camera 0x%x, host 0x%x\n",
+ cfg.flags, VIN_MBUS_FLAGS);
+
+ return -EINVAL;
+}
+
+static bool rcar_vin_packing_supported(const struct soc_mbus_pixelfmt *fmt)
+{
+ return fmt->packing == SOC_MBUS_PACKING_NONE ||
+ (fmt->bits_per_sample > 8 &&
+ fmt->packing == SOC_MBUS_PACKING_EXTEND16);
+}
+
+static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_NV16,
+ .name = "NV16",
+ .bits_per_sample = 8,
+ .packing = SOC_MBUS_PACKING_2X8_PADHI,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_UYVY,
+ .name = "UYVY",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .name = "RGB565",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB555X,
+ .name = "ARGB1555",
+ .bits_per_sample = 16,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .name = "RGB888",
+ .bits_per_sample = 32,
+ .packing = SOC_MBUS_PACKING_NONE,
+ .order = SOC_MBUS_ORDER_LE,
+ .layout = SOC_MBUS_LAYOUT_PACKED,
+ },
+};
+
+static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
+ struct soc_camera_format_xlate *xlate)
+{
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ int ret, k, n;
+ int formats = 0;
+ struct rcar_vin_cam *cam;
+ enum v4l2_mbus_pixelcode code;
+ const struct soc_mbus_pixelfmt *fmt;
+
+ ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
+ if (ret < 0)
+ return 0;
+
+ fmt = soc_mbus_get_fmtdesc(code);
+ if (!fmt) {
+ dev_warn(dev, "unsupported format code #%u: %d\n", idx, code);
+ return 0;
+ }
+
+ ret = rcar_vin_try_bus_param(icd, fmt->bits_per_sample);
+ if (ret < 0)
+ return 0;
+
+ if (!icd->host_priv) {
+ struct v4l2_mbus_framefmt mf;
+ struct v4l2_rect rect;
+ struct device *dev = icd->parent;
+ int shift;
+
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ /* Cache current client geometry */
+ ret = soc_camera_client_g_rect(sd, &rect);
+ if (ret == -ENOIOCTLCMD) {
+ /* Sensor driver doesn't support cropping */
+ rect.left = 0;
+ rect.top = 0;
+ rect.width = mf.width;
+ rect.height = mf.height;
+ } else if (ret < 0) {
+ return ret;
+ }
+
+ /*
+ * If sensor proposes too large format then try smaller ones:
+ * 1280x960, 640x480, 320x240
+ */
+ for (shift = 0; shift < 3; shift++) {
+ if (mf.width <= VIN_MAX_WIDTH &&
+ mf.height <= VIN_MAX_HEIGHT)
+ break;
+
+ mf.width = 1280 >> shift;
+ mf.height = 960 >> shift;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd),
+ video, s_mbus_fmt,
+ &mf);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (shift == 3) {
+ dev_err(dev,
+ "Failed to configure the client below %ux%x\n",
+ mf.width, mf.height);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "camera fmt %ux%u\n", mf.width, mf.height);
+
+ cam = kzalloc(sizeof(*cam), GFP_KERNEL);
+ if (!cam)
+ return -ENOMEM;
+ /*
+ * We are called with current camera crop,
+ * initialise subrect with it
+ */
+ cam->rect = rect;
+ cam->subrect = rect;
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ icd->host_priv = cam;
+ } else {
+ cam = icd->host_priv;
+ }
+
+ /* Beginning of a pass */
+ if (!idx)
+ cam->extra_fmt = NULL;
+
+ switch (code) {
+ case V4L2_MBUS_FMT_YUYV8_1X16:
+ case V4L2_MBUS_FMT_YUYV8_2X8:
+ if (cam->extra_fmt)
+ break;
+
+ /* Add all our formats that can be generated by VIN */
+ cam->extra_fmt = rcar_vin_formats;
+
+ n = ARRAY_SIZE(rcar_vin_formats);
+ formats += n;
+ for (k = 0; xlate && k < n; k++, xlate++) {
+ xlate->host_fmt = &rcar_vin_formats[k];
+ xlate->code = code;
+ dev_dbg(dev, "Providing format %s using code %d\n",
+ rcar_vin_formats[k].name, code);
+ }
+ break;
+ default:
+ if (!rcar_vin_packing_supported(fmt))
+ return 0;
+
+ dev_dbg(dev, "Providing format %s in pass-through mode\n",
+ fmt->name);
+ break;
+ }
+
+ /* Generic pass-through */
+ formats++;
+ if (xlate) {
+ xlate->host_fmt = fmt;
+ xlate->code = code;
+ xlate++;
+ }
+
+ return formats;
+}
+
+static void rcar_vin_put_formats(struct soc_camera_device *icd)
+{
+ kfree(icd->host_priv);
+ icd->host_priv = NULL;
+}
+
+static int rcar_vin_set_crop(struct soc_camera_device *icd,
+ const struct v4l2_crop *a)
+{
+ struct v4l2_crop a_writable = *a;
+ const struct v4l2_rect *rect = &a_writable.c;
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ struct v4l2_crop cam_crop;
+ struct rcar_vin_cam *cam = icd->host_priv;
+ struct v4l2_rect *cam_rect = &cam_crop.c;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct device *dev = icd->parent;
+ struct v4l2_mbus_framefmt mf;
+ u32 vnmc;
+ int ret, i;
+
+ dev_dbg(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
+ rect->left, rect->top);
+
+ /* During camera cropping its output window can change too, stop VIN */
+ capture_stop_preserve(priv, &vnmc);
+ dev_dbg(dev, "VNMC_REG 0x%x\n", vnmc);
+
+ /* Apply iterative camera S_CROP for new input window. */
+ ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
+ &cam->rect, &cam->subrect);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "camera cropped to %ux%u@%u:%u\n",
+ cam_rect->width, cam_rect->height,
+ cam_rect->left, cam_rect->top);
+
+ /* On success cam_crop contains current camera crop */
+
+ /* Retrieve camera output window */
+ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ if (mf.width > VIN_MAX_WIDTH || mf.height > VIN_MAX_HEIGHT)
+ return -EINVAL;
+
+ /* Cache camera output window */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ icd->user_width = cam->width;
+ icd->user_height = cam->height;
+
+ cam->vin_left = rect->left & ~1;
+ cam->vin_top = rect->top & ~1;
+
+ /* Use VIN cropping to crop to the new window. */
+ ret = rcar_vin_set_rect(icd);
+ if (ret < 0)
+ return ret;
+
+ cam->subrect = *rect;
+
+ dev_dbg(dev, "VIN cropped to %ux%u@%u:%u\n",
+ icd->user_width, icd->user_height,
+ cam->vin_left, cam->vin_top);
+
+ /* Restore capture */
+ for (i = 0; i < MAX_BUFFER_NUM; i++) {
+ if (priv->queue_buf[i] && priv->state == STOPPED) {
+ vnmc |= VNMC_ME;
+ break;
+ }
+ }
+ capture_restore(priv, vnmc);
+
+ /* Even if only camera cropping succeeded */
+ return ret;
+}
+
+static int rcar_vin_get_crop(struct soc_camera_device *icd,
+ struct v4l2_crop *a)
+{
+ struct rcar_vin_cam *cam = icd->host_priv;
+
+ a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ a->c = cam->subrect;
+
+ return 0;
+}
+
+/* Similar to set_crop multistage iterative algorithm */
+static int rcar_vin_set_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
+ struct rcar_vin_priv *priv = ici->priv;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct rcar_vin_cam *cam = icd->host_priv;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_mbus_framefmt mf;
+ struct device *dev = icd->parent;
+ __u32 pixfmt = pix->pixelformat;
+ const struct soc_camera_format_xlate *xlate;
+ unsigned int vin_sub_width = 0, vin_sub_height = 0;
+ int ret;
+ bool can_scale;
+ enum v4l2_field field;
+ v4l2_std_id std;
+
+ dev_dbg(dev, "S_FMT(pix=0x%x, %ux%u)\n",
+ pixfmt, pix->width, pix->height);
+
+ switch (pix->field) {
+ default:
+ pix->field = V4L2_FIELD_NONE;
+ /* fall-through */
+ case V4L2_FIELD_NONE:
+ case V4L2_FIELD_TOP:
+ case V4L2_FIELD_BOTTOM:
+ case V4L2_FIELD_INTERLACED_TB:
+ case V4L2_FIELD_INTERLACED_BT:
+ field = pix->field;
+ break;
+ case V4L2_FIELD_INTERLACED:
+ /* Query for standard if not explicitly mentioned _TB/_BT */
+ ret = v4l2_subdev_call(sd, video, querystd, &std);
+ if (ret < 0)
+ std = V4L2_STD_625_50;
+
+ field = std & V4L2_STD_625_50 ? V4L2_FIELD_INTERLACED_TB :
+ V4L2_FIELD_INTERLACED_BT;
+ break;
+ }
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ dev_warn(dev, "Format %x not found\n", pixfmt);
+ return -EINVAL;
+ }
+ /* Calculate client output geometry */
+ soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf,
+ 12);
+ mf.field = pix->field;
+ mf.colorspace = pix->colorspace;
+ mf.code = xlate->code;
+
+ switch (pixfmt) {
+ case V4L2_PIX_FMT_RGB32:
+ can_scale = priv->chip != RCAR_E1;
+ break;
+ case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YUYV:
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_RGB555X:
+ can_scale = true;
+ break;
+ default:
+ can_scale = false;
+ break;
+ }
+
+ dev_dbg(dev, "request camera output %ux%u\n", mf.width, mf.height);
+
+ ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
+ &mf, &vin_sub_width, &vin_sub_height,
+ can_scale, 12);
+
+ /* Done with the camera. Now see if we can improve the result */
+ dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
+ ret, mf.width, mf.height, pix->width, pix->height);
+
+ if (ret == -ENOIOCTLCMD)
+ dev_dbg(dev, "Sensor doesn't support scaling\n");
+ else if (ret < 0)
+ return ret;
+
+ if (mf.code != xlate->code)
+ return -EINVAL;
+
+ /* Prepare VIN crop */
+ cam->width = mf.width;
+ cam->height = mf.height;
+
+ /* Use VIN scaling to scale to the requested user window. */
+
+ /* We cannot scale up */
+ if (pix->width > vin_sub_width)
+ vin_sub_width = pix->width;
+
+ if (pix->height > vin_sub_height)
+ vin_sub_height = pix->height;
+
+ pix->colorspace = mf.colorspace;
+
+ if (!can_scale) {
+ pix->width = vin_sub_width;
+ pix->height = vin_sub_height;
+ }
+
+ /*
+ * We have calculated CFLCR, the actual configuration will be performed
+ * in rcar_vin_set_bus_param()
+ */
+
+ dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
+ vin_sub_width, pix->width, vin_sub_height, pix->height);
+
+ icd->current_fmt = xlate;
+
+ priv->field = field;
+
+ return 0;
+}
+
+static int rcar_vin_try_fmt(struct soc_camera_device *icd,
+ struct v4l2_format *f)
+{
+ const struct soc_camera_format_xlate *xlate;
+ struct v4l2_pix_format *pix = &f->fmt.pix;
+ struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
+ struct v4l2_mbus_framefmt mf;
+ __u32 pixfmt = pix->pixelformat;
+ int width, height;
+ int ret;
+
+ xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
+ if (!xlate) {
+ xlate = icd->current_fmt;
+ dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
+ pixfmt, xlate->host_fmt->fourcc);
+ pixfmt = xlate->host_fmt->fourcc;
+ pix->pixelformat = pixfmt;
+ pix->colorspace = icd->colorspace;
+ }
+
+ /* FIXME: calculate using depth and bus width */
+ v4l_bound_align_image(&pix->width, 2, VIN_MAX_WIDTH, 1,
+ &pix->height, 4, VIN_MAX_HEIGHT, 2, 0);
+
+ width = pix->width;
+ height = pix->height;
+
+ /* let soc-camera calculate these values */
+ pix->bytesperline = 0;
+ pix->sizeimage = 0;
+
+ /* limit to sensor capabilities */
+ mf.width = pix->width;
+ mf.height = pix->height;
+ mf.field = pix->field;
+ mf.code = xlate->code;
+ mf.colorspace = pix->colorspace;
+
+ ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
+ video, try_mbus_fmt, &mf);
+ if (ret < 0)
+ return ret;
+
+ pix->width = mf.width;
+ pix->height = mf.height;
+ pix->field = mf.field;
+ pix->colorspace = mf.colorspace;
+
+ if (pixfmt == V4L2_PIX_FMT_NV16) {
+ /* FIXME: check against rect_max after converting soc-camera */
+ /* We can scale precisely, need a bigger image from camera */
+ if (pix->width < width || pix->height < height) {
+ /*
+ * We presume, the sensor behaves sanely, i.e. if
+ * requested a bigger rectangle, it will not return a
+ * smaller one.
+ */
+ mf.width = VIN_MAX_WIDTH;
+ mf.height = VIN_MAX_HEIGHT;
+ ret = v4l2_device_call_until_err(sd->v4l2_dev,
+ soc_camera_grp_id(icd),
+ video, try_mbus_fmt,
+ &mf);
+ if (ret < 0) {
+ dev_err(icd->parent,
+ "client try_fmt() = %d\n", ret);
+ return ret;
+ }
+ }
+ /* We will scale exactly */
+ if (mf.width > width)
+ pix->width = width;
+ if (mf.height > height)
+ pix->height = height;
+ }
+
+ return ret;
+}
+
+static unsigned int rcar_vin_poll(struct file *file, poll_table *pt)
+{
+ struct soc_camera_device *icd = file->private_data;
+
+ return vb2_poll(&icd->vb2_vidq, file, pt);
+}
+
+static int rcar_vin_querycap(struct soc_camera_host *ici,
+ struct v4l2_capability *cap)
+{
+ strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ return 0;
+}
+
+static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
+ struct soc_camera_device *icd)
+{
+ vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ vq->io_modes = VB2_MMAP | VB2_USERPTR;
+ vq->drv_priv = icd;
+ vq->ops = &rcar_vin_vb2_ops;
+ vq->mem_ops = &vb2_dma_contig_memops;
+ vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
+ vq->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+
+ return vb2_queue_init(vq);
+}
+
+static struct soc_camera_host_ops rcar_vin_host_ops = {
+ .owner = THIS_MODULE,
+ .add = rcar_vin_add_device,
+ .remove = rcar_vin_remove_device,
+ .clock_start = rcar_vin_clock_start,
+ .clock_stop = rcar_vin_clock_stop,
+ .get_formats = rcar_vin_get_formats,
+ .put_formats = rcar_vin_put_formats,
+ .get_crop = rcar_vin_get_crop,
+ .set_crop = rcar_vin_set_crop,
+ .try_fmt = rcar_vin_try_fmt,
+ .set_fmt = rcar_vin_set_fmt,
+ .poll = rcar_vin_poll,
+ .querycap = rcar_vin_querycap,
+ .set_bus_param = rcar_vin_set_bus_param,
+ .init_videobuf2 = rcar_vin_init_videobuf2,
+};
+
+static struct platform_device_id rcar_vin_id_table[] = {
+ { "r8a7779-vin", RCAR_H1 },
+ { "r8a7778-vin", RCAR_M1 },
+ { "uPD35004-vin", RCAR_E1 },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, rcar_vin_id_table);
+
+static int rcar_vin_probe(struct platform_device *pdev)
+{
+ struct rcar_vin_priv *priv;
+ struct resource *mem;
+ struct rcar_vin_platform_data *pdata;
+ int irq, ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata || !pdata->flags) {
+ dev_err(&pdev->dev, "platform data not set\n");
+ return -EINVAL;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem == NULL)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct rcar_vin_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
+
+ ret = devm_request_irq(&pdev->dev, irq, rcar_vin_irq, IRQF_SHARED,
+ dev_name(&pdev->dev), priv);
+ if (ret)
+ return ret;
+
+ priv->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
+ if (IS_ERR(priv->alloc_ctx))
+ return PTR_ERR(priv->alloc_ctx);
+
+ priv->ici.priv = priv;
+ priv->ici.v4l2_dev.dev = &pdev->dev;
+ priv->ici.nr = pdev->id;
+ priv->ici.drv_name = dev_name(&pdev->dev);
+ priv->ici.ops = &rcar_vin_host_ops;
+
+ priv->pdata = pdata;
+ priv->chip = pdev->id_entry->driver_data;
+ spin_lock_init(&priv->lock);
+ INIT_LIST_HEAD(&priv->capture);
+
+ priv->state = STOPPED;
+
+ pm_suspend_ignore_children(&pdev->dev, true);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = soc_camera_host_register(&priv->ici);
+ if (ret)
+ goto cleanup;
+
+ return 0;
+
+cleanup:
+ pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
+
+ return ret;
+}
+
+static int rcar_vin_remove(struct platform_device *pdev)
+{
+ struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
+ struct rcar_vin_priv *priv = container_of(soc_host,
+ struct rcar_vin_priv, ici);
+
+ soc_camera_host_unregister(soc_host);
+ pm_runtime_disable(&pdev->dev);
+ vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
+
+ return 0;
+}
+
+static struct platform_driver rcar_vin_driver = {
+ .probe = rcar_vin_probe,
+ .remove = rcar_vin_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+ .id_table = rcar_vin_id_table,
+};
+
+module_platform_driver(rcar_vin_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:rcar_vin");
+MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index f2de0066089..8df22f77917 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -610,13 +610,12 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici)
{
struct sh_mobile_ceu_dev *pcdev = ici->priv;
- int ret;
pm_runtime_get_sync(ici->v4l2_dev.dev);
pcdev->buf_total = 0;
- ret = sh_mobile_ceu_soft_reset(pcdev);
+ sh_mobile_ceu_soft_reset(pcdev);
return 0;
}
@@ -1837,9 +1836,9 @@ static int sh_mobile_ceu_probe(struct platform_device *pdev)
for (j = 0; pcdev->pdata->asd_sizes[j]; j++) {
for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) {
dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n",
- __func__, i, (*asd)->bus_type);
- if ((*asd)->bus_type == V4L2_ASYNC_BUS_PLATFORM &&
- !strncmp(name, (*asd)->match.platform.name,
+ __func__, i, (*asd)->match_type);
+ if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME &&
+ !strncmp(name, (*asd)->match.device_name.name,
sizeof(name) - 1)) {
pcdev->csi2_asd = *asd;
break;
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index 2dd0e527294..387a232d95a 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -136,7 +136,7 @@ EXPORT_SYMBOL(soc_camera_power_off);
int soc_camera_power_init(struct device *dev, struct soc_camera_subdev_desc *ssdd)
{
-
+ /* Should not have any effect in synchronous case */
return devm_regulator_bulk_get(dev, ssdd->num_regulators,
ssdd->regulators);
}
@@ -1311,6 +1311,7 @@ eusrfmt:
static int soc_camera_i2c_init(struct soc_camera_device *icd,
struct soc_camera_desc *sdesc)
{
+ struct soc_camera_subdev_desc *ssdd;
struct i2c_client *client;
struct soc_camera_host *ici;
struct soc_camera_host_desc *shd = &sdesc->host_desc;
@@ -1333,7 +1334,21 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
return -ENODEV;
}
- shd->board_info->platform_data = &sdesc->subdev_desc;
+ ssdd = kzalloc(sizeof(*ssdd), GFP_KERNEL);
+ if (!ssdd) {
+ ret = -ENOMEM;
+ goto ealloc;
+ }
+
+ memcpy(ssdd, &sdesc->subdev_desc, sizeof(*ssdd));
+ /*
+ * In synchronous case we request regulators ourselves in
+ * soc_camera_pdrv_probe(), make sure the subdevice driver doesn't try
+ * to allocate them again.
+ */
+ ssdd->num_regulators = 0;
+ ssdd->regulators = NULL;
+ shd->board_info->platform_data = ssdd;
snprintf(clk_name, sizeof(clk_name), "%d-%04x",
shd->i2c_adapter_id, shd->board_info->addr);
@@ -1359,8 +1374,10 @@ static int soc_camera_i2c_init(struct soc_camera_device *icd,
return 0;
ei2cnd:
v4l2_clk_unregister(icd->clk);
-eclkreg:
icd->clk = NULL;
+eclkreg:
+ kfree(ssdd);
+ealloc:
i2c_put_adapter(adap);
return ret;
}
@@ -1370,15 +1387,18 @@ static void soc_camera_i2c_free(struct soc_camera_device *icd)
struct i2c_client *client =
to_i2c_client(to_soc_camera_control(icd));
struct i2c_adapter *adap;
+ struct soc_camera_subdev_desc *ssdd;
icd->control = NULL;
if (icd->sasc)
return;
adap = client->adapter;
+ ssdd = client->dev.platform_data;
v4l2_device_unregister_subdev(i2c_get_clientdata(client));
i2c_unregister_device(client);
i2c_put_adapter(adap);
+ kfree(ssdd);
v4l2_clk_unregister(icd->clk);
icd->clk = NULL;
}
@@ -1466,7 +1486,8 @@ static int scan_async_group(struct soc_camera_host *ici,
struct soc_camera_device *icd;
struct soc_camera_desc sdesc = {.host_desc.bus_id = ici->nr,};
char clk_name[V4L2_SUBDEV_NAME_SIZE];
- int ret, i;
+ unsigned int i;
+ int ret;
/* First look for a sensor */
for (i = 0; i < size; i++) {
@@ -1475,7 +1496,7 @@ static int scan_async_group(struct soc_camera_host *ici,
break;
}
- if (i == size || asd[i]->bus_type != V4L2_ASYNC_BUS_I2C) {
+ if (i >= size || asd[i]->match_type != V4L2_ASYNC_MATCH_I2C) {
/* All useless */
dev_err(ici->v4l2_dev.dev, "No I2C data source found!\n");
return -ENODEV;
@@ -1501,7 +1522,7 @@ static int scan_async_group(struct soc_camera_host *ici,
return -ENOMEM;
}
- sasc->notifier.subdev = asd;
+ sasc->notifier.subdevs = asd;
sasc->notifier.num_subdevs = size;
sasc->notifier.bound = soc_camera_async_bound;
sasc->notifier.unbind = soc_camera_async_unbind;
@@ -1994,9 +2015,10 @@ static int soc_camera_pdrv_probe(struct platform_device *pdev)
/*
* In the asynchronous case ssdd->num_regulators == 0 yet, so, the below
- * regulator allocation is a dummy. They will be really requested later
- * in soc_camera_async_bind(). Also note, that in that case regulators
- * are attached to the I2C device and not to the camera platform device.
+ * regulator allocation is a dummy. They are actually requested by the
+ * subdevice driver, using soc_camera_power_init(). Also note, that in
+ * that case regulators are attached to the I2C device and not to the
+ * camera platform device.
*/
ret = devm_regulator_bulk_get(&pdev->dev, ssdd->num_regulators,
ssdd->regulators);
diff --git a/drivers/media/platform/vsp1/Makefile b/drivers/media/platform/vsp1/Makefile
new file mode 100644
index 00000000000..4da226169e1
--- /dev/null
+++ b/drivers/media/platform/vsp1/Makefile
@@ -0,0 +1,5 @@
+vsp1-y := vsp1_drv.o vsp1_entity.o vsp1_video.o
+vsp1-y += vsp1_rpf.o vsp1_rwpf.o vsp1_wpf.o
+vsp1-y += vsp1_lif.o vsp1_uds.o
+
+obj-$(CONFIG_VIDEO_RENESAS_VSP1) += vsp1.o
diff --git a/drivers/media/platform/vsp1/vsp1.h b/drivers/media/platform/vsp1/vsp1.h
new file mode 100644
index 00000000000..d6c6ecd039f
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1.h
@@ -0,0 +1,74 @@
+/*
+ * vsp1.h -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_H__
+#define __VSP1_H__
+
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/platform_data/vsp1.h>
+
+#include <media/media-device.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_regs.h"
+
+struct clk;
+struct device;
+
+struct vsp1_platform_data;
+struct vsp1_lif;
+struct vsp1_rwpf;
+struct vsp1_uds;
+
+#define VPS1_MAX_RPF 5
+#define VPS1_MAX_UDS 3
+#define VPS1_MAX_WPF 4
+
+struct vsp1_device {
+ struct device *dev;
+ struct vsp1_platform_data *pdata;
+
+ void __iomem *mmio;
+ struct clk *clock;
+ struct clk *rt_clock;
+
+ struct mutex lock;
+ int ref_count;
+
+ struct vsp1_lif *lif;
+ struct vsp1_rwpf *rpf[VPS1_MAX_RPF];
+ struct vsp1_uds *uds[VPS1_MAX_UDS];
+ struct vsp1_rwpf *wpf[VPS1_MAX_WPF];
+
+ struct list_head entities;
+
+ struct v4l2_device v4l2_dev;
+ struct media_device media_dev;
+};
+
+struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1);
+void vsp1_device_put(struct vsp1_device *vsp1);
+
+static inline u32 vsp1_read(struct vsp1_device *vsp1, u32 reg)
+{
+ return ioread32(vsp1->mmio + reg);
+}
+
+static inline void vsp1_write(struct vsp1_device *vsp1, u32 reg, u32 data)
+{
+ iowrite32(data, vsp1->mmio + reg);
+}
+
+#endif /* __VSP1_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
new file mode 100644
index 00000000000..1c9e771aa15
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -0,0 +1,527 @@
+/*
+ * vsp1_drv.c -- R-Car VSP1 Driver
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include "vsp1.h"
+#include "vsp1_lif.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_uds.h"
+
+/* -----------------------------------------------------------------------------
+ * Interrupt Handling
+ */
+
+static irqreturn_t vsp1_irq_handler(int irq, void *data)
+{
+ u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
+ struct vsp1_device *vsp1 = data;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned int i;
+
+ for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf = vsp1->wpf[i];
+ struct vsp1_pipeline *pipe;
+ u32 status;
+
+ if (wpf == NULL)
+ continue;
+
+ pipe = to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
+ vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
+
+ if (status & VI6_WFP_IRQ_STA_FRE) {
+ vsp1_pipeline_frame_end(pipe);
+ ret = IRQ_HANDLED;
+ }
+ }
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Entities
+ */
+
+/*
+ * vsp1_create_links - Create links from all sources to the given sink
+ *
+ * This function creates media links from all valid sources to the given sink
+ * pad. Links that would be invalid according to the VSP1 hardware capabilities
+ * are skipped. Those include all links
+ *
+ * - from a UDS to a UDS (UDS entities can't be chained)
+ * - from an entity to itself (no loops are allowed)
+ */
+static int vsp1_create_links(struct vsp1_device *vsp1, struct vsp1_entity *sink)
+{
+ struct media_entity *entity = &sink->subdev.entity;
+ struct vsp1_entity *source;
+ unsigned int pad;
+ int ret;
+
+ list_for_each_entry(source, &vsp1->entities, list_dev) {
+ u32 flags;
+
+ if (source->type == sink->type)
+ continue;
+
+ if (source->type == VSP1_ENTITY_LIF ||
+ source->type == VSP1_ENTITY_WPF)
+ continue;
+
+ flags = source->type == VSP1_ENTITY_RPF &&
+ sink->type == VSP1_ENTITY_WPF &&
+ source->index == sink->index
+ ? MEDIA_LNK_FL_ENABLED : 0;
+
+ for (pad = 0; pad < entity->num_pads; ++pad) {
+ if (!(entity->pads[pad].flags & MEDIA_PAD_FL_SINK))
+ continue;
+
+ ret = media_entity_create_link(&source->subdev.entity,
+ source->source_pad,
+ entity, pad, flags);
+ if (ret < 0)
+ return ret;
+
+ if (flags & MEDIA_LNK_FL_ENABLED)
+ source->sink = entity;
+ }
+ }
+
+ return 0;
+}
+
+static void vsp1_destroy_entities(struct vsp1_device *vsp1)
+{
+ struct vsp1_entity *entity;
+ struct vsp1_entity *next;
+
+ list_for_each_entry_safe(entity, next, &vsp1->entities, list_dev) {
+ list_del(&entity->list_dev);
+ vsp1_entity_destroy(entity);
+ }
+
+ v4l2_device_unregister(&vsp1->v4l2_dev);
+ media_device_unregister(&vsp1->media_dev);
+}
+
+static int vsp1_create_entities(struct vsp1_device *vsp1)
+{
+ struct media_device *mdev = &vsp1->media_dev;
+ struct v4l2_device *vdev = &vsp1->v4l2_dev;
+ struct vsp1_entity *entity;
+ unsigned int i;
+ int ret;
+
+ mdev->dev = vsp1->dev;
+ strlcpy(mdev->model, "VSP1", sizeof(mdev->model));
+ snprintf(mdev->bus_info, sizeof(mdev->bus_info), "platform:%s",
+ dev_name(mdev->dev));
+ ret = media_device_register(mdev);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "media device registration failed (%d)\n",
+ ret);
+ return ret;
+ }
+
+ vdev->mdev = mdev;
+ ret = v4l2_device_register(vsp1->dev, vdev);
+ if (ret < 0) {
+ dev_err(vsp1->dev, "V4L2 device registration failed (%d)\n",
+ ret);
+ goto done;
+ }
+
+ /* Instantiate all the entities. */
+ if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ vsp1->lif = vsp1_lif_create(vsp1);
+ if (IS_ERR(vsp1->lif)) {
+ ret = PTR_ERR(vsp1->lif);
+ goto done;
+ }
+
+ list_add_tail(&vsp1->lif->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->pdata->rpf_count; ++i) {
+ struct vsp1_rwpf *rpf;
+
+ rpf = vsp1_rpf_create(vsp1, i);
+ if (IS_ERR(rpf)) {
+ ret = PTR_ERR(rpf);
+ goto done;
+ }
+
+ vsp1->rpf[i] = rpf;
+ list_add_tail(&rpf->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->pdata->uds_count; ++i) {
+ struct vsp1_uds *uds;
+
+ uds = vsp1_uds_create(vsp1, i);
+ if (IS_ERR(uds)) {
+ ret = PTR_ERR(uds);
+ goto done;
+ }
+
+ vsp1->uds[i] = uds;
+ list_add_tail(&uds->entity.list_dev, &vsp1->entities);
+ }
+
+ for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ struct vsp1_rwpf *wpf;
+
+ wpf = vsp1_wpf_create(vsp1, i);
+ if (IS_ERR(wpf)) {
+ ret = PTR_ERR(wpf);
+ goto done;
+ }
+
+ vsp1->wpf[i] = wpf;
+ list_add_tail(&wpf->entity.list_dev, &vsp1->entities);
+ }
+
+ /* Create links. */
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ if (entity->type == VSP1_ENTITY_LIF ||
+ entity->type == VSP1_ENTITY_RPF)
+ continue;
+
+ ret = vsp1_create_links(vsp1, entity);
+ if (ret < 0)
+ goto done;
+ }
+
+ if (vsp1->pdata->features & VSP1_HAS_LIF) {
+ ret = media_entity_create_link(
+ &vsp1->wpf[0]->entity.subdev.entity, RWPF_PAD_SOURCE,
+ &vsp1->lif->entity.subdev.entity, LIF_PAD_SINK, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Register all subdevs. */
+ list_for_each_entry(entity, &vsp1->entities, list_dev) {
+ ret = v4l2_device_register_subdev(&vsp1->v4l2_dev,
+ &entity->subdev);
+ if (ret < 0)
+ goto done;
+ }
+
+ ret = v4l2_device_register_subdev_nodes(&vsp1->v4l2_dev);
+
+done:
+ if (ret < 0)
+ vsp1_destroy_entities(vsp1);
+
+ return ret;
+}
+
+static int vsp1_device_init(struct vsp1_device *vsp1)
+{
+ unsigned int i;
+ u32 status;
+
+ /* Reset any channel that might be running. */
+ status = vsp1_read(vsp1, VI6_STATUS);
+
+ for (i = 0; i < vsp1->pdata->wpf_count; ++i) {
+ unsigned int timeout;
+
+ if (!(status & VI6_STATUS_SYS_ACT(i)))
+ continue;
+
+ vsp1_write(vsp1, VI6_SRESET, VI6_SRESET_SRTS(i));
+ for (timeout = 10; timeout > 0; --timeout) {
+ status = vsp1_read(vsp1, VI6_STATUS);
+ if (!(status & VI6_STATUS_SYS_ACT(i)))
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (!timeout) {
+ dev_err(vsp1->dev, "failed to reset wpf.%u\n", i);
+ return -ETIMEDOUT;
+ }
+ }
+
+ vsp1_write(vsp1, VI6_CLK_DCSWT, (8 << VI6_CLK_DCSWT_CSTPW_SHIFT) |
+ (8 << VI6_CLK_DCSWT_CSTRW_SHIFT));
+
+ for (i = 0; i < vsp1->pdata->rpf_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_RPF_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ for (i = 0; i < vsp1->pdata->uds_count; ++i)
+ vsp1_write(vsp1, VI6_DPR_UDS_ROUTE(i), VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_SRU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_LUT_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_CLU_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HST_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_HSI_ROUTE, VI6_DPR_NODE_UNUSED);
+ vsp1_write(vsp1, VI6_DPR_BRU_ROUTE, VI6_DPR_NODE_UNUSED);
+
+ vsp1_write(vsp1, VI6_DPR_HGO_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+ vsp1_write(vsp1, VI6_DPR_HGT_SMPPT, (7 << VI6_DPR_SMPPT_TGW_SHIFT) |
+ (VI6_DPR_NODE_UNUSED << VI6_DPR_SMPPT_PT_SHIFT));
+
+ return 0;
+}
+
+static int vsp1_clocks_enable(struct vsp1_device *vsp1)
+{
+ int ret;
+
+ ret = clk_prepare_enable(vsp1->clock);
+ if (ret < 0)
+ return ret;
+
+ if (IS_ERR(vsp1->rt_clock))
+ return 0;
+
+ ret = clk_prepare_enable(vsp1->rt_clock);
+ if (ret < 0) {
+ clk_disable_unprepare(vsp1->clock);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vsp1_clocks_disable(struct vsp1_device *vsp1)
+{
+ if (!IS_ERR(vsp1->rt_clock))
+ clk_disable_unprepare(vsp1->rt_clock);
+ clk_disable_unprepare(vsp1->clock);
+}
+
+/*
+ * vsp1_device_get - Acquire the VSP1 device
+ *
+ * Increment the VSP1 reference count and initialize the device if the first
+ * reference is taken.
+ *
+ * Return a pointer to the VSP1 device or NULL if an error occured.
+ */
+struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1)
+{
+ struct vsp1_device *__vsp1 = vsp1;
+ int ret;
+
+ mutex_lock(&vsp1->lock);
+ if (vsp1->ref_count > 0)
+ goto done;
+
+ ret = vsp1_clocks_enable(vsp1);
+ if (ret < 0) {
+ __vsp1 = NULL;
+ goto done;
+ }
+
+ ret = vsp1_device_init(vsp1);
+ if (ret < 0) {
+ vsp1_clocks_disable(vsp1);
+ __vsp1 = NULL;
+ goto done;
+ }
+
+done:
+ if (__vsp1)
+ vsp1->ref_count++;
+
+ mutex_unlock(&vsp1->lock);
+ return __vsp1;
+}
+
+/*
+ * vsp1_device_put - Release the VSP1 device
+ *
+ * Decrement the VSP1 reference count and cleanup the device if the last
+ * reference is released.
+ */
+void vsp1_device_put(struct vsp1_device *vsp1)
+{
+ mutex_lock(&vsp1->lock);
+
+ if (--vsp1->ref_count == 0)
+ vsp1_clocks_disable(vsp1);
+
+ mutex_unlock(&vsp1->lock);
+}
+
+/* -----------------------------------------------------------------------------
+ * Power Management
+ */
+
+#ifdef CONFIG_PM_SLEEP
+static int vsp1_pm_suspend(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ WARN_ON(mutex_is_locked(&vsp1->lock));
+
+ if (vsp1->ref_count == 0)
+ return 0;
+
+ vsp1_clocks_disable(vsp1);
+ return 0;
+}
+
+static int vsp1_pm_resume(struct device *dev)
+{
+ struct vsp1_device *vsp1 = dev_get_drvdata(dev);
+
+ WARN_ON(mutex_is_locked(&vsp1->lock));
+
+ if (vsp1->ref_count)
+ return 0;
+
+ return vsp1_clocks_enable(vsp1);
+}
+#endif
+
+static const struct dev_pm_ops vsp1_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(vsp1_pm_suspend, vsp1_pm_resume)
+};
+
+/* -----------------------------------------------------------------------------
+ * Platform Driver
+ */
+
+static struct vsp1_platform_data *
+vsp1_get_platform_data(struct platform_device *pdev)
+{
+ struct vsp1_platform_data *pdata = pdev->dev.platform_data;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return NULL;
+ }
+
+ if (pdata->rpf_count <= 0 || pdata->rpf_count > VPS1_MAX_RPF) {
+ dev_err(&pdev->dev, "invalid number of RPF (%u)\n",
+ pdata->rpf_count);
+ return NULL;
+ }
+
+ if (pdata->uds_count <= 0 || pdata->uds_count > VPS1_MAX_UDS) {
+ dev_err(&pdev->dev, "invalid number of UDS (%u)\n",
+ pdata->uds_count);
+ return NULL;
+ }
+
+ if (pdata->wpf_count <= 0 || pdata->wpf_count > VPS1_MAX_WPF) {
+ dev_err(&pdev->dev, "invalid number of WPF (%u)\n",
+ pdata->wpf_count);
+ return NULL;
+ }
+
+ return pdata;
+}
+
+static int vsp1_probe(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1;
+ struct resource *irq;
+ struct resource *io;
+ int ret;
+
+ vsp1 = devm_kzalloc(&pdev->dev, sizeof(*vsp1), GFP_KERNEL);
+ if (vsp1 == NULL)
+ return -ENOMEM;
+
+ vsp1->dev = &pdev->dev;
+ mutex_init(&vsp1->lock);
+ INIT_LIST_HEAD(&vsp1->entities);
+
+ vsp1->pdata = vsp1_get_platform_data(pdev);
+ if (vsp1->pdata == NULL)
+ return -ENODEV;
+
+ /* I/O, IRQ and clock resources */
+ io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
+ if (IS_ERR(vsp1->mmio))
+ return PTR_ERR(vsp1->mmio);
+
+ vsp1->clock = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(vsp1->clock)) {
+ dev_err(&pdev->dev, "failed to get clock\n");
+ return PTR_ERR(vsp1->clock);
+ }
+
+ /* The RT clock is optional */
+ vsp1->rt_clock = devm_clk_get(&pdev->dev, "rt");
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "missing IRQ\n");
+ return -EINVAL;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq->start, vsp1_irq_handler,
+ IRQF_SHARED, dev_name(&pdev->dev), vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ\n");
+ return ret;
+ }
+
+ /* Instanciate entities */
+ ret = vsp1_create_entities(vsp1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to create entities\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, vsp1);
+
+ return 0;
+}
+
+static int vsp1_remove(struct platform_device *pdev)
+{
+ struct vsp1_device *vsp1 = platform_get_drvdata(pdev);
+
+ vsp1_destroy_entities(vsp1);
+
+ return 0;
+}
+
+static struct platform_driver vsp1_platform_driver = {
+ .probe = vsp1_probe,
+ .remove = vsp1_remove,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vsp1",
+ .pm = &vsp1_pm_ops,
+ },
+};
+
+module_platform_driver(vsp1_platform_driver);
+
+MODULE_ALIAS("vsp1");
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Renesas VSP1 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/vsp1/vsp1_entity.c b/drivers/media/platform/vsp1/vsp1_entity.c
new file mode 100644
index 00000000000..9028f9d524f
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_entity.c
@@ -0,0 +1,181 @@
+/*
+ * vsp1_entity.c -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_entity.h"
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which)
+{
+ switch (which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ return v4l2_subdev_get_try_format(fh, pad);
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ return &entity->formats[pad];
+ default:
+ return NULL;
+ }
+}
+
+/*
+ * vsp1_entity_init_formats - Initialize formats on all pads
+ * @subdev: V4L2 subdevice
+ * @fh: V4L2 subdev file handle
+ *
+ * Initialize all pad formats with default values. If fh is not NULL, try
+ * formats are initialized on the file handle. Otherwise active formats are
+ * initialized on the device.
+ */
+void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ struct v4l2_subdev_format format;
+ unsigned int pad;
+
+ for (pad = 0; pad < subdev->entity.num_pads - 1; ++pad) {
+ memset(&format, 0, sizeof(format));
+
+ format.pad = pad;
+ format.which = fh ? V4L2_SUBDEV_FORMAT_TRY
+ : V4L2_SUBDEV_FORMAT_ACTIVE;
+
+ v4l2_subdev_call(subdev, pad, set_fmt, fh, &format);
+ }
+}
+
+static int vsp1_entity_open(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh)
+{
+ vsp1_entity_init_formats(subdev, fh);
+
+ return 0;
+}
+
+const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops = {
+ .open = vsp1_entity_open,
+};
+
+/* -----------------------------------------------------------------------------
+ * Media Operations
+ */
+
+static int vsp1_entity_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct vsp1_entity *source;
+
+ if (!(local->flags & MEDIA_PAD_FL_SOURCE))
+ return 0;
+
+ source = container_of(local->entity, struct vsp1_entity, subdev.entity);
+
+ if (!source->route)
+ return 0;
+
+ if (flags & MEDIA_LNK_FL_ENABLED) {
+ if (source->sink)
+ return -EBUSY;
+ source->sink = remote->entity;
+ } else {
+ source->sink = NULL;
+ }
+
+ return 0;
+}
+
+const struct media_entity_operations vsp1_media_ops = {
+ .link_setup = vsp1_entity_link_setup,
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization
+ */
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ unsigned int num_pads)
+{
+ static const struct {
+ unsigned int id;
+ unsigned int reg;
+ } routes[] = {
+ { VI6_DPR_NODE_LIF, 0 },
+ { VI6_DPR_NODE_RPF(0), VI6_DPR_RPF_ROUTE(0) },
+ { VI6_DPR_NODE_RPF(1), VI6_DPR_RPF_ROUTE(1) },
+ { VI6_DPR_NODE_RPF(2), VI6_DPR_RPF_ROUTE(2) },
+ { VI6_DPR_NODE_RPF(3), VI6_DPR_RPF_ROUTE(3) },
+ { VI6_DPR_NODE_RPF(4), VI6_DPR_RPF_ROUTE(4) },
+ { VI6_DPR_NODE_UDS(0), VI6_DPR_UDS_ROUTE(0) },
+ { VI6_DPR_NODE_UDS(1), VI6_DPR_UDS_ROUTE(1) },
+ { VI6_DPR_NODE_UDS(2), VI6_DPR_UDS_ROUTE(2) },
+ { VI6_DPR_NODE_WPF(0), 0 },
+ { VI6_DPR_NODE_WPF(1), 0 },
+ { VI6_DPR_NODE_WPF(2), 0 },
+ { VI6_DPR_NODE_WPF(3), 0 },
+ };
+
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(routes); ++i) {
+ if (routes[i].id == entity->id) {
+ entity->route = routes[i].reg;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(routes))
+ return -EINVAL;
+
+ entity->vsp1 = vsp1;
+ entity->source_pad = num_pads - 1;
+
+ /* Allocate formats and pads. */
+ entity->formats = devm_kzalloc(vsp1->dev,
+ num_pads * sizeof(*entity->formats),
+ GFP_KERNEL);
+ if (entity->formats == NULL)
+ return -ENOMEM;
+
+ entity->pads = devm_kzalloc(vsp1->dev, num_pads * sizeof(*entity->pads),
+ GFP_KERNEL);
+ if (entity->pads == NULL)
+ return -ENOMEM;
+
+ /* Initialize pads. */
+ for (i = 0; i < num_pads - 1; ++i)
+ entity->pads[i].flags = MEDIA_PAD_FL_SINK;
+
+ entity->pads[num_pads - 1].flags = MEDIA_PAD_FL_SOURCE;
+
+ /* Initialize the media entity. */
+ return media_entity_init(&entity->subdev.entity, num_pads,
+ entity->pads, 0);
+}
+
+void vsp1_entity_destroy(struct vsp1_entity *entity)
+{
+ media_entity_cleanup(&entity->subdev.entity);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_entity.h b/drivers/media/platform/vsp1/vsp1_entity.h
new file mode 100644
index 00000000000..c4feab2cbb8
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_entity.h
@@ -0,0 +1,68 @@
+/*
+ * vsp1_entity.h -- R-Car VSP1 Base Entity
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_ENTITY_H__
+#define __VSP1_ENTITY_H__
+
+#include <linux/list.h>
+
+#include <media/v4l2-subdev.h>
+
+struct vsp1_device;
+
+enum vsp1_entity_type {
+ VSP1_ENTITY_LIF,
+ VSP1_ENTITY_RPF,
+ VSP1_ENTITY_UDS,
+ VSP1_ENTITY_WPF,
+};
+
+struct vsp1_entity {
+ struct vsp1_device *vsp1;
+
+ enum vsp1_entity_type type;
+ unsigned int index;
+ unsigned int id;
+ unsigned int route;
+
+ struct list_head list_dev;
+ struct list_head list_pipe;
+
+ struct media_pad *pads;
+ unsigned int source_pad;
+
+ struct media_entity *sink;
+
+ struct v4l2_subdev subdev;
+ struct v4l2_mbus_framefmt *formats;
+};
+
+static inline struct vsp1_entity *to_vsp1_entity(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_entity, subdev);
+}
+
+int vsp1_entity_init(struct vsp1_device *vsp1, struct vsp1_entity *entity,
+ unsigned int num_pads);
+void vsp1_entity_destroy(struct vsp1_entity *entity);
+
+extern const struct v4l2_subdev_internal_ops vsp1_subdev_internal_ops;
+extern const struct media_entity_operations vsp1_media_ops;
+
+struct v4l2_mbus_framefmt *
+vsp1_entity_get_pad_format(struct vsp1_entity *entity,
+ struct v4l2_subdev_fh *fh,
+ unsigned int pad, u32 which);
+void vsp1_entity_init_formats(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh);
+
+#endif /* __VSP1_ENTITY_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_lif.c b/drivers/media/platform/vsp1/vsp1_lif.c
new file mode 100644
index 00000000000..74a32e69ef1
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lif.c
@@ -0,0 +1,238 @@
+/*
+ * vsp1_lif.c -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_lif.h"
+
+#define LIF_MIN_SIZE 2U
+#define LIF_MAX_SIZE 2048U
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_lif_read(struct vsp1_lif *lif, u32 reg)
+{
+ return vsp1_read(lif->entity.vsp1, reg);
+}
+
+static inline void vsp1_lif_write(struct vsp1_lif *lif, u32 reg, u32 data)
+{
+ vsp1_write(lif->entity.vsp1, reg, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int lif_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_lif *lif = to_lif(subdev);
+ unsigned int hbth = 1300;
+ unsigned int obth = 400;
+ unsigned int lbth = 200;
+
+ if (!enable) {
+ vsp1_lif_write(lif, VI6_LIF_CTRL, 0);
+ return 0;
+ }
+
+ format = &lif->entity.formats[LIF_PAD_SOURCE];
+
+ obth = min(obth, (format->width + 1) / 2 * format->height - 4);
+
+ vsp1_lif_write(lif, VI6_LIF_CSBTH,
+ (hbth << VI6_LIF_CSBTH_HBTH_SHIFT) |
+ (lbth << VI6_LIF_CSBTH_LBTH_SHIFT));
+
+ vsp1_lif_write(lif, VI6_LIF_CTRL,
+ (obth << VI6_LIF_CTRL_OBTH_SHIFT) |
+ (format->code == 0 ? VI6_LIF_CTRL_CFMT : 0) |
+ VI6_LIF_CTRL_REQSEL | VI6_LIF_CTRL_LIF_EN);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int lif_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+
+ if (code->pad == LIF_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ struct v4l2_mbus_framefmt *format;
+
+ /* The LIF can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, LIF_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int lif_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, LIF_PAD_SINK);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == LIF_PAD_SINK) {
+ fse->min_width = LIF_MIN_SIZE;
+ fse->max_width = LIF_MAX_SIZE;
+ fse->min_height = LIF_MIN_SIZE;
+ fse->max_height = LIF_MAX_SIZE;
+ } else {
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+static int lif_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lif *lif = to_lif(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static int lif_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_lif *lif = to_lif(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&lif->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == LIF_PAD_SOURCE) {
+ /* The LIF source format is always identical to its sink
+ * format.
+ */
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ LIF_MIN_SIZE, LIF_MAX_SIZE);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ LIF_MIN_SIZE, LIF_MAX_SIZE);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&lif->entity, fh, LIF_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops lif_video_ops = {
+ .s_stream = lif_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops lif_pad_ops = {
+ .enum_mbus_code = lif_enum_mbus_code,
+ .enum_frame_size = lif_enum_frame_size,
+ .get_fmt = lif_get_format,
+ .set_fmt = lif_set_format,
+};
+
+static struct v4l2_subdev_ops lif_ops = {
+ .video = &lif_video_ops,
+ .pad = &lif_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_lif *lif;
+ int ret;
+
+ lif = devm_kzalloc(vsp1->dev, sizeof(*lif), GFP_KERNEL);
+ if (lif == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ lif->entity.type = VSP1_ENTITY_LIF;
+ lif->entity.id = VI6_DPR_NODE_LIF;
+
+ ret = vsp1_entity_init(vsp1, &lif->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &lif->entity.subdev;
+ v4l2_subdev_init(subdev, &lif_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s lif",
+ dev_name(vsp1->dev));
+ v4l2_set_subdevdata(subdev, lif);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return lif;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_lif.h b/drivers/media/platform/vsp1/vsp1_lif.h
new file mode 100644
index 00000000000..89b93af56fd
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_lif.h
@@ -0,0 +1,37 @@
+/*
+ * vsp1_lif.h -- R-Car VSP1 LCD Controller Interface
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_LIF_H__
+#define __VSP1_LIF_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define LIF_PAD_SINK 0
+#define LIF_PAD_SOURCE 1
+
+struct vsp1_lif {
+ struct vsp1_entity entity;
+};
+
+static inline struct vsp1_lif *to_lif(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_lif, entity.subdev);
+}
+
+struct vsp1_lif *vsp1_lif_create(struct vsp1_device *vsp1);
+
+#endif /* __VSP1_LIF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
new file mode 100644
index 00000000000..1d3304f1365
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -0,0 +1,581 @@
+/*
+ * vsp1_regs.h -- R-Car VSP1 Registers Definitions
+ *
+ * Copyright (C) 2013 Renesas Electronics Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __VSP1_REGS_H__
+#define __VSP1_REGS_H__
+
+/* -----------------------------------------------------------------------------
+ * General Control Registers
+ */
+
+#define VI6_CMD(n) (0x0000 + (n) * 4)
+#define VI6_CMD_STRCMD (1 << 0)
+
+#define VI6_CLK_DCSWT 0x0018
+#define VI6_CLK_DCSWT_CSTPW_MASK (0xff << 8)
+#define VI6_CLK_DCSWT_CSTPW_SHIFT 8
+#define VI6_CLK_DCSWT_CSTRW_MASK (0xff << 0)
+#define VI6_CLK_DCSWT_CSTRW_SHIFT 0
+
+#define VI6_SRESET 0x0028
+#define VI6_SRESET_SRTS(n) (1 << (n))
+
+#define VI6_STATUS 0x0038
+#define VI6_STATUS_SYS_ACT(n) (1 << ((n) + 8))
+
+#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
+#define VI6_WFP_IRQ_ENB_DFEE (1 << 1)
+#define VI6_WFP_IRQ_ENB_FREE (1 << 0)
+
+#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
+#define VI6_WFP_IRQ_STA_DFE (1 << 1)
+#define VI6_WFP_IRQ_STA_FRE (1 << 0)
+
+#define VI6_DISP_IRQ_ENB 0x0078
+#define VI6_DISP_IRQ_ENB_DSTE (1 << 8)
+#define VI6_DISP_IRQ_ENB_MAEE (1 << 5)
+#define VI6_DISP_IRQ_ENB_LNEE(n) (1 << ((n) + 4))
+
+#define VI6_DISP_IRQ_STA 0x007c
+#define VI6_DISP_IRQ_STA_DSE (1 << 8)
+#define VI6_DISP_IRQ_STA_MAE (1 << 5)
+#define VI6_DISP_IRQ_STA_LNE(n) (1 << ((n) + 4))
+
+#define VI6_WPF_LINE_COUNT(n) (0x0084 + (n) * 4)
+#define VI6_WPF_LINE_COUNT_MASK (0x1fffff << 0)
+
+/* -----------------------------------------------------------------------------
+ * Display List Control Registers
+ */
+
+#define VI6_DL_CTRL 0x0100
+#define VI6_DL_CTRL_AR_WAIT_MASK (0xffff << 16)
+#define VI6_DL_CTRL_AR_WAIT_SHIFT 16
+#define VI6_DL_CTRL_DC2 (1 << 12)
+#define VI6_DL_CTRL_DC1 (1 << 8)
+#define VI6_DL_CTRL_DC0 (1 << 4)
+#define VI6_DL_CTRL_CFM0 (1 << 2)
+#define VI6_DL_CTRL_NH0 (1 << 1)
+#define VI6_DL_CTRL_DLE (1 << 0)
+
+#define VI6_DL_HDR_ADDR(n) (0x0104 + (n) * 4)
+
+#define VI6_DL_SWAP 0x0114
+#define VI6_DL_SWAP_LWS (1 << 2)
+#define VI6_DL_SWAP_WDS (1 << 1)
+#define VI6_DL_SWAP_BTS (1 << 0)
+
+#define VI6_DL_EXT_CTRL 0x011c
+#define VI6_DL_EXT_CTRL_NWE (1 << 16)
+#define VI6_DL_EXT_CTRL_POLINT_MASK (0x3f << 8)
+#define VI6_DL_EXT_CTRL_POLINT_SHIFT 8
+#define VI6_DL_EXT_CTRL_DLPRI (1 << 5)
+#define VI6_DL_EXT_CTRL_EXPRI (1 << 4)
+#define VI6_DL_EXT_CTRL_EXT (1 << 0)
+
+#define VI6_DL_BODY_SIZE 0x0120
+#define VI6_DL_BODY_SIZE_UPD (1 << 24)
+#define VI6_DL_BODY_SIZE_BS_MASK (0x1ffff << 0)
+#define VI6_DL_BODY_SIZE_BS_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * RPF Control Registers
+ */
+
+#define VI6_RPF_OFFSET 0x100
+
+#define VI6_RPF_SRC_BSIZE 0x0300
+#define VI6_RPF_SRC_BSIZE_BHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT 16
+#define VI6_RPF_SRC_BSIZE_BVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT 0
+
+#define VI6_RPF_SRC_ESIZE 0x0304
+#define VI6_RPF_SRC_ESIZE_EHSIZE_MASK (0x1fff << 16)
+#define VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT 16
+#define VI6_RPF_SRC_ESIZE_EVSIZE_MASK (0x1fff << 0)
+#define VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT 0
+
+#define VI6_RPF_INFMT 0x0308
+#define VI6_RPF_INFMT_VIR (1 << 28)
+#define VI6_RPF_INFMT_CIPM (1 << 16)
+#define VI6_RPF_INFMT_SPYCS (1 << 15)
+#define VI6_RPF_INFMT_SPUVS (1 << 14)
+#define VI6_RPF_INFMT_CEXT_ZERO (0 << 12)
+#define VI6_RPF_INFMT_CEXT_EXT (1 << 12)
+#define VI6_RPF_INFMT_CEXT_ONE (2 << 12)
+#define VI6_RPF_INFMT_CEXT_MASK (3 << 12)
+#define VI6_RPF_INFMT_RDTM_BT601 (0 << 9)
+#define VI6_RPF_INFMT_RDTM_BT601_EXT (1 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709 (2 << 9)
+#define VI6_RPF_INFMT_RDTM_BT709_EXT (3 << 9)
+#define VI6_RPF_INFMT_RDTM_MASK (7 << 9)
+#define VI6_RPF_INFMT_CSC (1 << 8)
+#define VI6_RPF_INFMT_RDFMT_MASK (0x7f << 0)
+#define VI6_RPF_INFMT_RDFMT_SHIFT 0
+
+#define VI6_RPF_DSWAP 0x030c
+#define VI6_RPF_DSWAP_A_LLS (1 << 11)
+#define VI6_RPF_DSWAP_A_LWS (1 << 10)
+#define VI6_RPF_DSWAP_A_WDS (1 << 9)
+#define VI6_RPF_DSWAP_A_BTS (1 << 8)
+#define VI6_RPF_DSWAP_P_LLS (1 << 3)
+#define VI6_RPF_DSWAP_P_LWS (1 << 2)
+#define VI6_RPF_DSWAP_P_WDS (1 << 1)
+#define VI6_RPF_DSWAP_P_BTS (1 << 0)
+
+#define VI6_RPF_LOC 0x0310
+#define VI6_RPF_LOC_HCOORD_MASK (0x1fff << 16)
+#define VI6_RPF_LOC_HCOORD_SHIFT 16
+#define VI6_RPF_LOC_VCOORD_MASK (0x1fff << 0)
+#define VI6_RPF_LOC_VCOORD_SHIFT 0
+
+#define VI6_RPF_ALPH_SEL 0x0314
+#define VI6_RPF_ALPH_SEL_ASEL_PACKED (0 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_8B_PLANE (1 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SELECT (2 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_1B_PLANE (3 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_FIXED (4 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_MASK (7 << 28)
+#define VI6_RPF_ALPH_SEL_ASEL_SHIFT 28
+#define VI6_RPF_ALPH_SEL_IROP_MASK (0xf << 24)
+#define VI6_RPF_ALPH_SEL_IROP_SHIFT 24
+#define VI6_RPF_ALPH_SEL_BSEL (1 << 23)
+#define VI6_RPF_ALPH_SEL_AEXT_ZERO (0 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_EXT (1 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_ONE (2 << 18)
+#define VI6_RPF_ALPH_SEL_AEXT_MASK (3 << 18)
+#define VI6_RPF_ALPH_SEL_ALPHA0_MASK (0xff << 8)
+#define VI6_RPF_ALPH_SEL_ALPHA0_SHIFT 8
+#define VI6_RPF_ALPH_SEL_ALPHA1_MASK (0xff << 0)
+#define VI6_RPF_ALPH_SEL_ALPHA1_SHIFT 0
+
+#define VI6_RPF_VRTCOL_SET 0x0318
+#define VI6_RPF_VRTCOL_SET_LAYA_MASK (0xff << 24)
+#define VI6_RPF_VRTCOL_SET_LAYA_SHIFT 24
+#define VI6_RPF_VRTCOL_SET_LAYR_MASK (0xff << 16)
+#define VI6_RPF_VRTCOL_SET_LAYR_SHIFT 16
+#define VI6_RPF_VRTCOL_SET_LAYG_MASK (0xff << 8)
+#define VI6_RPF_VRTCOL_SET_LAYG_SHIFT 8
+#define VI6_RPF_VRTCOL_SET_LAYB_MASK (0xff << 0)
+#define VI6_RPF_VRTCOL_SET_LAYB_SHIFT 0
+
+#define VI6_RPF_MSK_CTRL 0x031c
+#define VI6_RPF_MSK_CTRL_MSK_EN (1 << 24)
+#define VI6_RPF_MSK_CTRL_MGR_MASK (0xff << 16)
+#define VI6_RPF_MSK_CTRL_MGR_SHIFT 16
+#define VI6_RPF_MSK_CTRL_MGG_MASK (0xff << 8)
+#define VI6_RPF_MSK_CTRL_MGG_SHIFT 8
+#define VI6_RPF_MSK_CTRL_MGB_MASK (0xff << 0)
+#define VI6_RPF_MSK_CTRL_MGB_SHIFT 0
+
+#define VI6_RPF_MSK_SET0 0x0320
+#define VI6_RPF_MSK_SET1 0x0324
+#define VI6_RPF_MSK_SET_MSA_MASK (0xff << 24)
+#define VI6_RPF_MSK_SET_MSA_SHIFT 24
+#define VI6_RPF_MSK_SET_MSR_MASK (0xff << 16)
+#define VI6_RPF_MSK_SET_MSR_SHIFT 16
+#define VI6_RPF_MSK_SET_MSG_MASK (0xff << 8)
+#define VI6_RPF_MSK_SET_MSG_SHIFT 8
+#define VI6_RPF_MSK_SET_MSB_MASK (0xff << 0)
+#define VI6_RPF_MSK_SET_MSB_SHIFT 0
+
+#define VI6_RPF_CKEY_CTRL 0x0328
+#define VI6_RPF_CKEY_CTRL_CV (1 << 4)
+#define VI6_RPF_CKEY_CTRL_SAPE1 (1 << 1)
+#define VI6_RPF_CKEY_CTRL_SAPE0 (1 << 0)
+
+#define VI6_RPF_CKEY_SET0 0x032c
+#define VI6_RPF_CKEY_SET1 0x0330
+#define VI6_RPF_CKEY_SET_AP_MASK (0xff << 24)
+#define VI6_RPF_CKEY_SET_AP_SHIFT 24
+#define VI6_RPF_CKEY_SET_R_MASK (0xff << 16)
+#define VI6_RPF_CKEY_SET_R_SHIFT 16
+#define VI6_RPF_CKEY_SET_GY_MASK (0xff << 8)
+#define VI6_RPF_CKEY_SET_GY_SHIFT 8
+#define VI6_RPF_CKEY_SET_B_MASK (0xff << 0)
+#define VI6_RPF_CKEY_SET_B_SHIFT 0
+
+#define VI6_RPF_SRCM_PSTRIDE 0x0334
+#define VI6_RPF_SRCM_PSTRIDE_Y_SHIFT 16
+#define VI6_RPF_SRCM_PSTRIDE_C_SHIFT 0
+
+#define VI6_RPF_SRCM_ASTRIDE 0x0338
+#define VI6_RPF_SRCM_PSTRIDE_A_SHIFT 0
+
+#define VI6_RPF_SRCM_ADDR_Y 0x033c
+#define VI6_RPF_SRCM_ADDR_C0 0x0340
+#define VI6_RPF_SRCM_ADDR_C1 0x0344
+#define VI6_RPF_SRCM_ADDR_AI 0x0348
+
+/* -----------------------------------------------------------------------------
+ * WPF Control Registers
+ */
+
+#define VI6_WPF_OFFSET 0x100
+
+#define VI6_WPF_SRCRPF 0x1000
+#define VI6_WPF_SRCRPF_VIRACT_DIS (0 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_SUB (1 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MST (2 << 28)
+#define VI6_WPF_SRCRPF_VIRACT_MASK (3 << 28)
+#define VI6_WPF_SRCRPF_RPF_ACT_DIS(n) (0 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_SUB(n) (1 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MST(n) (2 << ((n) * 2))
+#define VI6_WPF_SRCRPF_RPF_ACT_MASK(n) (3 << ((n) * 2))
+
+#define VI6_WPF_HSZCLIP 0x1004
+#define VI6_WPF_VSZCLIP 0x1008
+#define VI6_WPF_SZCLIP_EN (1 << 28)
+#define VI6_WPF_SZCLIP_OFST_MASK (0xff << 16)
+#define VI6_WPF_SZCLIP_OFST_SHIFT 16
+#define VI6_WPF_SZCLIP_SIZE_MASK (0x1fff << 0)
+#define VI6_WPF_SZCLIP_SIZE_SHIFT 0
+
+#define VI6_WPF_OUTFMT 0x100c
+#define VI6_WPF_OUTFMT_PDV_MASK (0xff << 24)
+#define VI6_WPF_OUTFMT_PDV_SHIFT 24
+#define VI6_WPF_OUTFMT_PXA (1 << 23)
+#define VI6_WPF_OUTFMT_FLP (1 << 16)
+#define VI6_WPF_OUTFMT_SPYCS (1 << 15)
+#define VI6_WPF_OUTFMT_SPUVS (1 << 14)
+#define VI6_WPF_OUTFMT_DITH_DIS (0 << 12)
+#define VI6_WPF_OUTFMT_DITH_EN (3 << 12)
+#define VI6_WPF_OUTFMT_DITH_MASK (3 << 12)
+#define VI6_WPF_OUTFMT_WRTM_BT601 (0 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT601_EXT (1 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709 (2 << 9)
+#define VI6_WPF_OUTFMT_WRTM_BT709_EXT (3 << 9)
+#define VI6_WPF_OUTFMT_WRTM_MASK (7 << 9)
+#define VI6_WPF_OUTFMT_CSC (1 << 8)
+#define VI6_WPF_OUTFMT_WRFMT_MASK (0x7f << 0)
+#define VI6_WPF_OUTFMT_WRFMT_SHIFT 0
+
+#define VI6_WPF_DSWAP 0x1010
+#define VI6_WPF_DSWAP_P_LLS (1 << 3)
+#define VI6_WPF_DSWAP_P_LWS (1 << 2)
+#define VI6_WPF_DSWAP_P_WDS (1 << 1)
+#define VI6_WPF_DSWAP_P_BTS (1 << 0)
+
+#define VI6_WPF_RNDCTRL 0x1014
+#define VI6_WPF_RNDCTRL_CBRM (1 << 28)
+#define VI6_WPF_RNDCTRL_ABRM_TRUNC (0 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_ROUND (1 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_THRESH (2 << 24)
+#define VI6_WPF_RNDCTRL_ABRM_MASK (3 << 24)
+#define VI6_WPF_RNDCTRL_ATHRESH_MASK (0xff << 16)
+#define VI6_WPF_RNDCTRL_ATHRESH_SHIFT 16
+#define VI6_WPF_RNDCTRL_CLMD_FULL (0 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_CLIP (1 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_EXT (2 << 12)
+#define VI6_WPF_RNDCTRL_CLMD_MASK (3 << 12)
+
+#define VI6_WPF_DSTM_STRIDE_Y 0x101c
+#define VI6_WPF_DSTM_STRIDE_C 0x1020
+#define VI6_WPF_DSTM_ADDR_Y 0x1024
+#define VI6_WPF_DSTM_ADDR_C0 0x1028
+#define VI6_WPF_DSTM_ADDR_C1 0x102c
+
+#define VI6_WPF_WRBCK_CTRL 0x1034
+#define VI6_WPF_WRBCK_CTRL_WBMD (1 << 0)
+
+/* -----------------------------------------------------------------------------
+ * DPR Control Registers
+ */
+
+#define VI6_DPR_RPF_ROUTE(n) (0x2000 + (n) * 4)
+
+#define VI6_DPR_WPF_FPORCH(n) (0x2014 + (n) * 4)
+#define VI6_DPR_WPF_FPORCH_FP_WPFN (5 << 8)
+
+#define VI6_DPR_SRU_ROUTE 0x2024
+#define VI6_DPR_UDS_ROUTE(n) (0x2028 + (n) * 4)
+#define VI6_DPR_LUT_ROUTE 0x203c
+#define VI6_DPR_CLU_ROUTE 0x2040
+#define VI6_DPR_HST_ROUTE 0x2044
+#define VI6_DPR_HSI_ROUTE 0x2048
+#define VI6_DPR_BRU_ROUTE 0x204c
+#define VI6_DPR_ROUTE_FXA_MASK (0xff << 8)
+#define VI6_DPR_ROUTE_FXA_SHIFT 16
+#define VI6_DPR_ROUTE_FP_MASK (0xff << 8)
+#define VI6_DPR_ROUTE_FP_SHIFT 8
+#define VI6_DPR_ROUTE_RT_MASK (0x3f << 0)
+#define VI6_DPR_ROUTE_RT_SHIFT 0
+
+#define VI6_DPR_HGO_SMPPT 0x2050
+#define VI6_DPR_HGT_SMPPT 0x2054
+#define VI6_DPR_SMPPT_TGW_MASK (7 << 8)
+#define VI6_DPR_SMPPT_TGW_SHIFT 8
+#define VI6_DPR_SMPPT_PT_MASK (0x3f << 0)
+#define VI6_DPR_SMPPT_PT_SHIFT 0
+
+#define VI6_DPR_NODE_RPF(n) (n)
+#define VI6_DPR_NODE_SRU 16
+#define VI6_DPR_NODE_UDS(n) (17 + (n))
+#define VI6_DPR_NODE_LUT 22
+#define VI6_DPR_NODE_BRU_IN(n) (23 + (n))
+#define VI6_DPR_NODE_BRU_OUT 27
+#define VI6_DPR_NODE_CLU 29
+#define VI6_DPR_NODE_HST 30
+#define VI6_DPR_NODE_HSI 31
+#define VI6_DPR_NODE_LIF 55
+#define VI6_DPR_NODE_WPF(n) (56 + (n))
+#define VI6_DPR_NODE_UNUSED 63
+
+/* -----------------------------------------------------------------------------
+ * SRU Control Registers
+ */
+
+#define VI6_SRU_CTRL0 0x2200
+#define VI6_SRU_CTRL1 0x2204
+#define VI6_SRU_CTRL2 0x2208
+
+/* -----------------------------------------------------------------------------
+ * UDS Control Registers
+ */
+
+#define VI6_UDS_OFFSET 0x100
+
+#define VI6_UDS_CTRL 0x2300
+#define VI6_UDS_CTRL_AMD (1 << 30)
+#define VI6_UDS_CTRL_FMD (1 << 29)
+#define VI6_UDS_CTRL_BLADV (1 << 28)
+#define VI6_UDS_CTRL_AON (1 << 25)
+#define VI6_UDS_CTRL_ATHON (1 << 24)
+#define VI6_UDS_CTRL_BC (1 << 20)
+#define VI6_UDS_CTRL_NE_A (1 << 19)
+#define VI6_UDS_CTRL_NE_RCR (1 << 18)
+#define VI6_UDS_CTRL_NE_GY (1 << 17)
+#define VI6_UDS_CTRL_NE_BCB (1 << 16)
+#define VI6_UDS_CTRL_TDIPC (1 << 1)
+
+#define VI6_UDS_SCALE 0x2304
+#define VI6_UDS_SCALE_HMANT_MASK (0xf << 28)
+#define VI6_UDS_SCALE_HMANT_SHIFT 28
+#define VI6_UDS_SCALE_HFRAC_MASK (0xfff << 16)
+#define VI6_UDS_SCALE_HFRAC_SHIFT 16
+#define VI6_UDS_SCALE_VMANT_MASK (0xf << 12)
+#define VI6_UDS_SCALE_VMANT_SHIFT 12
+#define VI6_UDS_SCALE_VFRAC_MASK (0xfff << 0)
+#define VI6_UDS_SCALE_VFRAC_SHIFT 0
+
+#define VI6_UDS_ALPTH 0x2308
+#define VI6_UDS_ALPTH_TH1_MASK (0xff << 8)
+#define VI6_UDS_ALPTH_TH1_SHIFT 8
+#define VI6_UDS_ALPTH_TH0_MASK (0xff << 0)
+#define VI6_UDS_ALPTH_TH0_SHIFT 0
+
+#define VI6_UDS_ALPVAL 0x230c
+#define VI6_UDS_ALPVAL_VAL2_MASK (0xff << 16)
+#define VI6_UDS_ALPVAL_VAL2_SHIFT 16
+#define VI6_UDS_ALPVAL_VAL1_MASK (0xff << 8)
+#define VI6_UDS_ALPVAL_VAL1_SHIFT 8
+#define VI6_UDS_ALPVAL_VAL0_MASK (0xff << 0)
+#define VI6_UDS_ALPVAL_VAL0_SHIFT 0
+
+#define VI6_UDS_PASS_BWIDTH 0x2310
+#define VI6_UDS_PASS_BWIDTH_H_MASK (0x7f << 16)
+#define VI6_UDS_PASS_BWIDTH_H_SHIFT 16
+#define VI6_UDS_PASS_BWIDTH_V_MASK (0x7f << 0)
+#define VI6_UDS_PASS_BWIDTH_V_SHIFT 0
+
+#define VI6_UDS_IPC 0x2318
+#define VI6_UDS_IPC_FIELD (1 << 27)
+#define VI6_UDS_IPC_VEDP_MASK (0xfff << 0)
+#define VI6_UDS_IPC_VEDP_SHIFT 0
+
+#define VI6_UDS_CLIP_SIZE 0x2324
+#define VI6_UDS_CLIP_SIZE_HSIZE_MASK (0x1fff << 16)
+#define VI6_UDS_CLIP_SIZE_HSIZE_SHIFT 16
+#define VI6_UDS_CLIP_SIZE_VSIZE_MASK (0x1fff << 0)
+#define VI6_UDS_CLIP_SIZE_VSIZE_SHIFT 0
+
+#define VI6_UDS_FILL_COLOR 0x2328
+#define VI6_UDS_FILL_COLOR_RFILC_MASK (0xff << 16)
+#define VI6_UDS_FILL_COLOR_RFILC_SHIFT 16
+#define VI6_UDS_FILL_COLOR_GFILC_MASK (0xff << 8)
+#define VI6_UDS_FILL_COLOR_GFILC_SHIFT 8
+#define VI6_UDS_FILL_COLOR_BFILC_MASK (0xff << 0)
+#define VI6_UDS_FILL_COLOR_BFILC_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * LUT Control Registers
+ */
+
+#define VI6_LUT_CTRL 0x2800
+
+/* -----------------------------------------------------------------------------
+ * CLU Control Registers
+ */
+
+#define VI6_CLU_CTRL 0x2900
+
+/* -----------------------------------------------------------------------------
+ * HST Control Registers
+ */
+
+#define VI6_HST_CTRL 0x2a00
+
+/* -----------------------------------------------------------------------------
+ * HSI Control Registers
+ */
+
+#define VI6_HSI_CTRL 0x2b00
+
+/* -----------------------------------------------------------------------------
+ * BRU Control Registers
+ */
+
+#define VI6_BRU_INCTRL 0x2c00
+#define VI6_BRU_VIRRPF_SIZE 0x2c04
+#define VI6_BRU_VIRRPF_LOC 0x2c08
+#define VI6_BRU_VIRRPF_COL 0x2c0c
+#define VI6_BRU_CTRL(n) (0x2c10 + (n) * 8)
+#define VI6_BRU_BLD(n) (0x2c14 + (n) * 8)
+#define VI6_BRU_ROP 0x2c30
+
+/* -----------------------------------------------------------------------------
+ * HGO Control Registers
+ */
+
+#define VI6_HGO_OFFSET 0x3000
+#define VI6_HGO_SIZE 0x3004
+#define VI6_HGO_MODE 0x3008
+#define VI6_HGO_LB_TH 0x300c
+#define VI6_HGO_LBn_H(n) (0x3010 + (n) * 8)
+#define VI6_HGO_LBn_V(n) (0x3014 + (n) * 8)
+#define VI6_HGO_R_HISTO 0x3030
+#define VI6_HGO_R_MAXMIN 0x3130
+#define VI6_HGO_R_SUM 0x3134
+#define VI6_HGO_R_LB_DET 0x3138
+#define VI6_HGO_G_HISTO 0x3140
+#define VI6_HGO_G_MAXMIN 0x3240
+#define VI6_HGO_G_SUM 0x3244
+#define VI6_HGO_G_LB_DET 0x3248
+#define VI6_HGO_B_HISTO 0x3250
+#define VI6_HGO_B_MAXMIN 0x3350
+#define VI6_HGO_B_SUM 0x3354
+#define VI6_HGO_B_LB_DET 0x3358
+#define VI6_HGO_REGRST 0x33fc
+
+/* -----------------------------------------------------------------------------
+ * HGT Control Registers
+ */
+
+#define VI6_HGT_OFFSET 0x3400
+#define VI6_HGT_SIZE 0x3404
+#define VI6_HGT_MODE 0x3408
+#define VI6_HGT_HUE_AREA(n) (0x340c + (n) * 4)
+#define VI6_HGT_LB_TH 0x3424
+#define VI6_HGT_LBn_H(n) (0x3438 + (n) * 8)
+#define VI6_HGT_LBn_V(n) (0x342c + (n) * 8)
+#define VI6_HGT_HISTO(m, n) (0x3450 + (m) * 128 + (n) * 4)
+#define VI6_HGT_MAXMIN 0x3750
+#define VI6_HGT_SUM 0x3754
+#define VI6_HGT_LB_DET 0x3758
+#define VI6_HGT_REGRST 0x37fc
+
+/* -----------------------------------------------------------------------------
+ * LIF Control Registers
+ */
+
+#define VI6_LIF_CTRL 0x3b00
+#define VI6_LIF_CTRL_OBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CTRL_OBTH_SHIFT 16
+#define VI6_LIF_CTRL_CFMT (1 << 4)
+#define VI6_LIF_CTRL_REQSEL (1 << 1)
+#define VI6_LIF_CTRL_LIF_EN (1 << 0)
+
+#define VI6_LIF_CSBTH 0x3b04
+#define VI6_LIF_CSBTH_HBTH_MASK (0x7ff << 16)
+#define VI6_LIF_CSBTH_HBTH_SHIFT 16
+#define VI6_LIF_CSBTH_LBTH_MASK (0x7ff << 0)
+#define VI6_LIF_CSBTH_LBTH_SHIFT 0
+
+/* -----------------------------------------------------------------------------
+ * Security Control Registers
+ */
+
+#define VI6_SECURITY_CTRL0 0x3d00
+#define VI6_SECURITY_CTRL1 0x3d04
+
+/* -----------------------------------------------------------------------------
+ * RPF CLUT Registers
+ */
+
+#define VI6_CLUT_TABLE 0x4000
+
+/* -----------------------------------------------------------------------------
+ * 1D LUT Registers
+ */
+
+#define VI6_LUT_TABLE 0x7000
+
+/* -----------------------------------------------------------------------------
+ * 3D LUT Registers
+ */
+
+#define VI6_CLU_ADDR 0x7400
+#define VI6_CLU_DATA 0x7404
+
+/* -----------------------------------------------------------------------------
+ * Formats
+ */
+
+#define VI6_FMT_RGB_332 0x00
+#define VI6_FMT_XRGB_4444 0x01
+#define VI6_FMT_RGBX_4444 0x02
+#define VI6_FMT_XRGB_1555 0x04
+#define VI6_FMT_RGBX_5551 0x05
+#define VI6_FMT_RGB_565 0x06
+#define VI6_FMT_AXRGB_86666 0x07
+#define VI6_FMT_RGBXA_66668 0x08
+#define VI6_FMT_XRGBA_66668 0x09
+#define VI6_FMT_ARGBX_86666 0x0a
+#define VI6_FMT_AXRXGXB_8262626 0x0b
+#define VI6_FMT_XRXGXBA_2626268 0x0c
+#define VI6_FMT_ARXGXBX_8626262 0x0d
+#define VI6_FMT_RXGXBXA_6262628 0x0e
+#define VI6_FMT_XRGB_6666 0x0f
+#define VI6_FMT_RGBX_6666 0x10
+#define VI6_FMT_XRXGXB_262626 0x11
+#define VI6_FMT_RXGXBX_626262 0x12
+#define VI6_FMT_ARGB_8888 0x13
+#define VI6_FMT_RGBA_8888 0x14
+#define VI6_FMT_RGB_888 0x15
+#define VI6_FMT_XRGXGB_763763 0x16
+#define VI6_FMT_XXRGB_86666 0x17
+#define VI6_FMT_BGR_888 0x18
+#define VI6_FMT_ARGB_4444 0x19
+#define VI6_FMT_RGBA_4444 0x1a
+#define VI6_FMT_ARGB_1555 0x1b
+#define VI6_FMT_RGBA_5551 0x1c
+#define VI6_FMT_ABGR_4444 0x1d
+#define VI6_FMT_BGRA_4444 0x1e
+#define VI6_FMT_ABGR_1555 0x1f
+#define VI6_FMT_BGRA_5551 0x20
+#define VI6_FMT_XBXGXR_262626 0x21
+#define VI6_FMT_ABGR_8888 0x22
+#define VI6_FMT_XXRGB_88565 0x23
+
+#define VI6_FMT_Y_UV_444 0x40
+#define VI6_FMT_Y_UV_422 0x41
+#define VI6_FMT_Y_UV_420 0x42
+#define VI6_FMT_YUV_444 0x46
+#define VI6_FMT_YUYV_422 0x47
+#define VI6_FMT_YYUV_422 0x48
+#define VI6_FMT_YUV_420 0x49
+#define VI6_FMT_Y_U_V_444 0x4a
+#define VI6_FMT_Y_U_V_422 0x4b
+#define VI6_FMT_Y_U_V_420 0x4c
+
+#endif /* __VSP1_REGS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_rpf.c b/drivers/media/platform/vsp1/vsp1_rpf.c
new file mode 100644
index 00000000000..254871d3423
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rpf.c
@@ -0,0 +1,209 @@
+/*
+ * vsp1_rpf.c -- R-Car VSP1 Read Pixel Formatter
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RPF_MAX_WIDTH 8190
+#define RPF_MAX_HEIGHT 8190
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_rpf_read(struct vsp1_rwpf *rpf, u32 reg)
+{
+ return vsp1_read(rpf->entity.vsp1,
+ reg + rpf->entity.index * VI6_RPF_OFFSET);
+}
+
+static inline void vsp1_rpf_write(struct vsp1_rwpf *rpf, u32 reg, u32 data)
+{
+ vsp1_write(rpf->entity.vsp1,
+ reg + rpf->entity.index * VI6_RPF_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int rpf_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_rwpf *rpf = to_rwpf(subdev);
+ const struct vsp1_format_info *fmtinfo = rpf->video.fmtinfo;
+ const struct v4l2_pix_format_mplane *format = &rpf->video.format;
+ u32 pstride;
+ u32 infmt;
+
+ if (!enable)
+ return 0;
+
+ /* Source size and stride. Cropping isn't supported yet. */
+ vsp1_rpf_write(rpf, VI6_RPF_SRC_BSIZE,
+ (format->width << VI6_RPF_SRC_BSIZE_BHSIZE_SHIFT) |
+ (format->height << VI6_RPF_SRC_BSIZE_BVSIZE_SHIFT));
+ vsp1_rpf_write(rpf, VI6_RPF_SRC_ESIZE,
+ (format->width << VI6_RPF_SRC_ESIZE_EHSIZE_SHIFT) |
+ (format->height << VI6_RPF_SRC_ESIZE_EVSIZE_SHIFT));
+
+ pstride = format->plane_fmt[0].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_Y_SHIFT;
+ if (format->num_planes > 1)
+ pstride |= format->plane_fmt[1].bytesperline
+ << VI6_RPF_SRCM_PSTRIDE_C_SHIFT;
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_PSTRIDE, pstride);
+
+ /* Format */
+ infmt = VI6_RPF_INFMT_CIPM
+ | (fmtinfo->hwfmt << VI6_RPF_INFMT_RDFMT_SHIFT);
+
+ if (fmtinfo->swap_yc)
+ infmt |= VI6_RPF_INFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ infmt |= VI6_RPF_INFMT_SPUVS;
+
+ if (rpf->entity.formats[RWPF_PAD_SINK].code !=
+ rpf->entity.formats[RWPF_PAD_SOURCE].code)
+ infmt |= VI6_RPF_INFMT_CSC;
+
+ vsp1_rpf_write(rpf, VI6_RPF_INFMT, infmt);
+ vsp1_rpf_write(rpf, VI6_RPF_DSWAP, fmtinfo->swap);
+
+ /* Output location. Composing isn't supported yet. */
+ vsp1_rpf_write(rpf, VI6_RPF_LOC, 0);
+
+ /* Disable alpha, mask and color key. Set the alpha channel to a fixed
+ * value of 255.
+ */
+ vsp1_rpf_write(rpf, VI6_RPF_ALPH_SEL, VI6_RPF_ALPH_SEL_ASEL_FIXED);
+ vsp1_rpf_write(rpf, VI6_RPF_VRTCOL_SET,
+ 255 << VI6_RPF_VRTCOL_SET_LAYA_SHIFT);
+ vsp1_rpf_write(rpf, VI6_RPF_MSK_CTRL, 0);
+ vsp1_rpf_write(rpf, VI6_RPF_CKEY_CTRL, 0);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops rpf_video_ops = {
+ .s_stream = rpf_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops rpf_pad_ops = {
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+ .get_fmt = vsp1_rwpf_get_format,
+ .set_fmt = vsp1_rwpf_set_format,
+};
+
+static struct v4l2_subdev_ops rpf_ops = {
+ .video = &rpf_video_ops,
+ .pad = &rpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Device Operations
+ */
+
+static void rpf_vdev_queue(struct vsp1_video *video,
+ struct vsp1_video_buffer *buf)
+{
+ struct vsp1_rwpf *rpf = container_of(video, struct vsp1_rwpf, video);
+
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_Y, buf->addr[0]);
+ if (buf->buf.num_planes > 1)
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C0, buf->addr[1]);
+ if (buf->buf.num_planes > 2)
+ vsp1_rpf_write(rpf, VI6_RPF_SRCM_ADDR_C1, buf->addr[2]);
+}
+
+static const struct vsp1_video_operations rpf_vdev_ops = {
+ .queue = rpf_vdev_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_video *video;
+ struct vsp1_rwpf *rpf;
+ int ret;
+
+ rpf = devm_kzalloc(vsp1->dev, sizeof(*rpf), GFP_KERNEL);
+ if (rpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rpf->max_width = RPF_MAX_WIDTH;
+ rpf->max_height = RPF_MAX_HEIGHT;
+
+ rpf->entity.type = VSP1_ENTITY_RPF;
+ rpf->entity.index = index;
+ rpf->entity.id = VI6_DPR_NODE_RPF(index);
+
+ ret = vsp1_entity_init(vsp1, &rpf->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &rpf->entity.subdev;
+ v4l2_subdev_init(subdev, &rpf_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s rpf.%u",
+ dev_name(vsp1->dev), index);
+ v4l2_set_subdevdata(subdev, rpf);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ /* Initialize the video device. */
+ video = &rpf->video;
+
+ video->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ video->vsp1 = vsp1;
+ video->ops = &rpf_vdev_ops;
+
+ ret = vsp1_video_init(video, &rpf->entity);
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the video device to the RPF. */
+ ret = media_entity_create_link(&rpf->video.video.entity, 0,
+ &rpf->entity.subdev.entity,
+ RWPF_PAD_SINK,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret < 0)
+ goto error_link;
+
+ return rpf;
+
+error_link:
+ vsp1_video_cleanup(video);
+error_video:
+ media_entity_cleanup(&rpf->entity.subdev.entity);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.c b/drivers/media/platform/vsp1/vsp1_rwpf.c
new file mode 100644
index 00000000000..9752d5516ce
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.c
@@ -0,0 +1,124 @@
+/*
+ * vsp1_rwpf.c -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define RWPF_MIN_WIDTH 1
+#define RWPF_MIN_HEIGHT 1
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+
+ return 0;
+}
+
+int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, fse->pad);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == RWPF_PAD_SINK) {
+ fse->min_width = RWPF_MIN_WIDTH;
+ fse->max_width = rwpf->max_width;
+ fse->min_height = RWPF_MIN_HEIGHT;
+ fse->max_height = rwpf->max_height;
+ } else {
+ /* The size on the source pad are fixed and always identical to
+ * the size on the sink pad.
+ */
+ fse->min_width = format->width;
+ fse->max_width = format->width;
+ fse->min_height = format->height;
+ fse->max_height = format->height;
+ }
+
+ return 0;
+}
+
+int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_rwpf *rwpf = to_rwpf(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->format.code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->format.code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->format.code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, fmt->pad,
+ fmt->which);
+
+ if (fmt->pad == RWPF_PAD_SOURCE) {
+ /* The RWPF performs format conversion but can't scale, only the
+ * format code can be changed on the source pad.
+ */
+ format->code = fmt->format.code;
+ fmt->format = *format;
+ return 0;
+ }
+
+ format->code = fmt->format.code;
+ format->width = clamp_t(unsigned int, fmt->format.width,
+ RWPF_MIN_WIDTH, rwpf->max_width);
+ format->height = clamp_t(unsigned int, fmt->format.height,
+ RWPF_MIN_HEIGHT, rwpf->max_height);
+ format->field = V4L2_FIELD_NONE;
+ format->colorspace = V4L2_COLORSPACE_SRGB;
+
+ fmt->format = *format;
+
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&rwpf->entity, fh, RWPF_PAD_SOURCE,
+ fmt->which);
+ *format = fmt->format;
+
+ return 0;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_rwpf.h b/drivers/media/platform/vsp1/vsp1_rwpf.h
new file mode 100644
index 00000000000..c182d85f36b
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_rwpf.h
@@ -0,0 +1,53 @@
+/*
+ * vsp1_rwpf.h -- R-Car VSP1 Read and Write Pixel Formatters
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_RWPF_H__
+#define __VSP1_RWPF_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_entity.h"
+#include "vsp1_video.h"
+
+#define RWPF_PAD_SINK 0
+#define RWPF_PAD_SOURCE 1
+
+struct vsp1_rwpf {
+ struct vsp1_entity entity;
+ struct vsp1_video video;
+
+ unsigned int max_width;
+ unsigned int max_height;
+};
+
+static inline struct vsp1_rwpf *to_rwpf(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_rwpf, entity.subdev);
+}
+
+struct vsp1_rwpf *vsp1_rpf_create(struct vsp1_device *vsp1, unsigned int index);
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index);
+
+int vsp1_rwpf_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code);
+int vsp1_rwpf_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse);
+int vsp1_rwpf_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt);
+int vsp1_rwpf_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt);
+
+#endif /* __VSP1_RWPF_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_uds.c b/drivers/media/platform/vsp1/vsp1_uds.c
new file mode 100644
index 00000000000..0e50b37f060
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uds.c
@@ -0,0 +1,346 @@
+/*
+ * vsp1_uds.c -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_uds.h"
+
+#define UDS_MIN_SIZE 4U
+#define UDS_MAX_SIZE 8190U
+
+#define UDS_MIN_FACTOR 0x0100
+#define UDS_MAX_FACTOR 0xffff
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_uds_read(struct vsp1_uds *uds, u32 reg)
+{
+ return vsp1_read(uds->entity.vsp1,
+ reg + uds->entity.index * VI6_UDS_OFFSET);
+}
+
+static inline void vsp1_uds_write(struct vsp1_uds *uds, u32 reg, u32 data)
+{
+ vsp1_write(uds->entity.vsp1,
+ reg + uds->entity.index * VI6_UDS_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * Scaling Computation
+ */
+
+/*
+ * uds_output_size - Return the output size for an input size and scaling ratio
+ * @input: input size in pixels
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_output_size(unsigned int input, unsigned int ratio)
+{
+ if (ratio > 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return (input - 1) / mp * mp * 4096 / ratio + 1;
+ } else {
+ /* Up-scaling */
+ return (input - 1) * 4096 / ratio + 1;
+ }
+}
+
+/*
+ * uds_output_limits - Return the min and max output sizes for an input size
+ * @input: input size in pixels
+ * @minimum: minimum output size (returned)
+ * @maximum: maximum output size (returned)
+ */
+static void uds_output_limits(unsigned int input,
+ unsigned int *minimum, unsigned int *maximum)
+{
+ *minimum = max(uds_output_size(input, UDS_MAX_FACTOR), UDS_MIN_SIZE);
+ *maximum = min(uds_output_size(input, UDS_MIN_FACTOR), UDS_MAX_SIZE);
+}
+
+/*
+ * uds_passband_width - Return the passband filter width for a scaling ratio
+ * @ratio: scaling ratio in U4.12 fixed-point format
+ */
+static unsigned int uds_passband_width(unsigned int ratio)
+{
+ if (ratio >= 4096) {
+ /* Down-scaling */
+ unsigned int mp;
+
+ mp = ratio / 4096;
+ mp = mp < 4 ? 1 : (mp < 8 ? 2 : 4);
+
+ return 64 * 4096 * mp / ratio;
+ } else {
+ /* Up-scaling */
+ return 64;
+ }
+}
+
+static unsigned int uds_compute_ratio(unsigned int input, unsigned int output)
+{
+ /* TODO: This is an approximation that will need to be refined. */
+ return (input - 1) * 4096 / (output - 1);
+}
+
+static void uds_compute_ratios(struct vsp1_uds *uds)
+{
+ struct v4l2_mbus_framefmt *input = &uds->entity.formats[UDS_PAD_SINK];
+ struct v4l2_mbus_framefmt *output =
+ &uds->entity.formats[UDS_PAD_SOURCE];
+
+ uds->hscale = uds_compute_ratio(input->width, output->width);
+ uds->vscale = uds_compute_ratio(input->height, output->height);
+
+ dev_dbg(uds->entity.vsp1->dev, "hscale %u vscale %u\n",
+ uds->hscale, uds->vscale);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int uds_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ const struct v4l2_mbus_framefmt *format;
+ struct vsp1_uds *uds = to_uds(subdev);
+
+ if (!enable)
+ return 0;
+
+ /* Enable multi-tap scaling. */
+ vsp1_uds_write(uds, VI6_UDS_CTRL, VI6_UDS_CTRL_BC);
+
+ vsp1_uds_write(uds, VI6_UDS_PASS_BWIDTH,
+ (uds_passband_width(uds->hscale)
+ << VI6_UDS_PASS_BWIDTH_H_SHIFT) |
+ (uds_passband_width(uds->vscale)
+ << VI6_UDS_PASS_BWIDTH_V_SHIFT));
+
+
+ /* Set the scaling ratios and the output size. */
+ format = &uds->entity.formats[UDS_PAD_SOURCE];
+
+ vsp1_uds_write(uds, VI6_UDS_SCALE,
+ (uds->hscale << VI6_UDS_SCALE_HFRAC_SHIFT) |
+ (uds->vscale << VI6_UDS_SCALE_VFRAC_SHIFT));
+ vsp1_uds_write(uds, VI6_UDS_CLIP_SIZE,
+ (format->width << VI6_UDS_CLIP_SIZE_HSIZE_SHIFT) |
+ (format->height << VI6_UDS_CLIP_SIZE_VSIZE_SHIFT));
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Pad Operations
+ */
+
+static int uds_enum_mbus_code(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ static const unsigned int codes[] = {
+ V4L2_MBUS_FMT_ARGB8888_1X32,
+ V4L2_MBUS_FMT_AYUV8_1X32,
+ };
+
+ if (code->pad == UDS_PAD_SINK) {
+ if (code->index >= ARRAY_SIZE(codes))
+ return -EINVAL;
+
+ code->code = codes[code->index];
+ } else {
+ struct v4l2_mbus_framefmt *format;
+
+ /* The UDS can't perform format conversion, the sink format is
+ * always identical to the source format.
+ */
+ if (code->index)
+ return -EINVAL;
+
+ format = v4l2_subdev_get_try_format(fh, UDS_PAD_SINK);
+ code->code = format->code;
+ }
+
+ return 0;
+}
+
+static int uds_enum_frame_size(struct v4l2_subdev *subdev,
+ struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct v4l2_mbus_framefmt *format;
+
+ format = v4l2_subdev_get_try_format(fh, UDS_PAD_SINK);
+
+ if (fse->index || fse->code != format->code)
+ return -EINVAL;
+
+ if (fse->pad == UDS_PAD_SINK) {
+ fse->min_width = UDS_MIN_SIZE;
+ fse->max_width = UDS_MAX_SIZE;
+ fse->min_height = UDS_MIN_SIZE;
+ fse->max_height = UDS_MAX_SIZE;
+ } else {
+ uds_output_limits(format->width, &fse->min_width,
+ &fse->max_width);
+ uds_output_limits(format->height, &fse->min_height,
+ &fse->max_height);
+ }
+
+ return 0;
+}
+
+static int uds_get_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+
+ fmt->format = *vsp1_entity_get_pad_format(&uds->entity, fh, fmt->pad,
+ fmt->which);
+
+ return 0;
+}
+
+static void uds_try_format(struct vsp1_uds *uds, struct v4l2_subdev_fh *fh,
+ unsigned int pad, struct v4l2_mbus_framefmt *fmt,
+ enum v4l2_subdev_format_whence which)
+{
+ struct v4l2_mbus_framefmt *format;
+ unsigned int minimum;
+ unsigned int maximum;
+
+ switch (pad) {
+ case UDS_PAD_SINK:
+ /* Default to YUV if the requested format is not supported. */
+ if (fmt->code != V4L2_MBUS_FMT_ARGB8888_1X32 &&
+ fmt->code != V4L2_MBUS_FMT_AYUV8_1X32)
+ fmt->code = V4L2_MBUS_FMT_AYUV8_1X32;
+
+ fmt->width = clamp(fmt->width, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ fmt->height = clamp(fmt->height, UDS_MIN_SIZE, UDS_MAX_SIZE);
+ break;
+
+ case UDS_PAD_SOURCE:
+ /* The UDS scales but can't perform format conversion. */
+ format = vsp1_entity_get_pad_format(&uds->entity, fh,
+ UDS_PAD_SINK, which);
+ fmt->code = format->code;
+
+ uds_output_limits(format->width, &minimum, &maximum);
+ fmt->width = clamp(fmt->width, minimum, maximum);
+ uds_output_limits(format->height, &minimum, &maximum);
+ fmt->height = clamp(fmt->height, minimum, maximum);
+ break;
+ }
+
+ fmt->field = V4L2_FIELD_NONE;
+ fmt->colorspace = V4L2_COLORSPACE_SRGB;
+}
+
+static int uds_set_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh,
+ struct v4l2_subdev_format *fmt)
+{
+ struct vsp1_uds *uds = to_uds(subdev);
+ struct v4l2_mbus_framefmt *format;
+
+ uds_try_format(uds, fh, fmt->pad, &fmt->format, fmt->which);
+
+ format = vsp1_entity_get_pad_format(&uds->entity, fh, fmt->pad,
+ fmt->which);
+ *format = fmt->format;
+
+ if (fmt->pad == UDS_PAD_SINK) {
+ /* Propagate the format to the source pad. */
+ format = vsp1_entity_get_pad_format(&uds->entity, fh,
+ UDS_PAD_SOURCE, fmt->which);
+ *format = fmt->format;
+
+ uds_try_format(uds, fh, UDS_PAD_SOURCE, format, fmt->which);
+ }
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ uds_compute_ratios(uds);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops uds_video_ops = {
+ .s_stream = uds_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops uds_pad_ops = {
+ .enum_mbus_code = uds_enum_mbus_code,
+ .enum_frame_size = uds_enum_frame_size,
+ .get_fmt = uds_get_format,
+ .set_fmt = uds_set_format,
+};
+
+static struct v4l2_subdev_ops uds_ops = {
+ .video = &uds_video_ops,
+ .pad = &uds_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_uds *uds;
+ int ret;
+
+ uds = devm_kzalloc(vsp1->dev, sizeof(*uds), GFP_KERNEL);
+ if (uds == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ uds->entity.type = VSP1_ENTITY_UDS;
+ uds->entity.index = index;
+ uds->entity.id = VI6_DPR_NODE_UDS(index);
+
+ ret = vsp1_entity_init(vsp1, &uds->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &uds->entity.subdev;
+ v4l2_subdev_init(subdev, &uds_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s uds.%u",
+ dev_name(vsp1->dev), index);
+ v4l2_set_subdevdata(subdev, uds);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ return uds;
+}
diff --git a/drivers/media/platform/vsp1/vsp1_uds.h b/drivers/media/platform/vsp1/vsp1_uds.h
new file mode 100644
index 00000000000..972a285abdb
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_uds.h
@@ -0,0 +1,40 @@
+/*
+ * vsp1_uds.h -- R-Car VSP1 Up and Down Scaler
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_UDS_H__
+#define __VSP1_UDS_H__
+
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+
+#include "vsp1_entity.h"
+
+struct vsp1_device;
+
+#define UDS_PAD_SINK 0
+#define UDS_PAD_SOURCE 1
+
+struct vsp1_uds {
+ struct vsp1_entity entity;
+
+ unsigned int hscale;
+ unsigned int vscale;
+};
+
+static inline struct vsp1_uds *to_uds(struct v4l2_subdev *subdev)
+{
+ return container_of(subdev, struct vsp1_uds, entity.subdev);
+}
+
+struct vsp1_uds *vsp1_uds_create(struct vsp1_device *vsp1, unsigned int index);
+
+#endif /* __VSP1_UDS_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
new file mode 100644
index 00000000000..714c53ef6c1
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -0,0 +1,1069 @@
+/*
+ * vsp1_video.c -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/videodev2.h>
+
+#include <media/media-entity.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-fh.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-subdev.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "vsp1.h"
+#include "vsp1_entity.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define VSP1_VIDEO_DEF_FORMAT V4L2_PIX_FMT_YUYV
+#define VSP1_VIDEO_DEF_WIDTH 1024
+#define VSP1_VIDEO_DEF_HEIGHT 768
+
+#define VSP1_VIDEO_MIN_WIDTH 2U
+#define VSP1_VIDEO_MAX_WIDTH 8190U
+#define VSP1_VIDEO_MIN_HEIGHT 2U
+#define VSP1_VIDEO_MAX_HEIGHT 8190U
+
+/* -----------------------------------------------------------------------------
+ * Helper functions
+ */
+
+static const struct vsp1_format_info vsp1_video_formats[] = {
+ { V4L2_PIX_FMT_RGB332, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_332, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 8, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_RGB444, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_4444, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_RGB555, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_XRGB_1555, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_RGB565, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_565, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS,
+ 1, { 16, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_BGR24, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_BGR_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_RGB24, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_RGB_888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 24, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_BGR32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS,
+ 1, { 32, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_RGB32, V4L2_MBUS_FMT_ARGB8888_1X32,
+ VI6_FMT_ARGB_8888, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 32, 0, 0 }, false, false, 1, 1 },
+ { V4L2_PIX_FMT_UYVY, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, false, 2, 1 },
+ { V4L2_PIX_FMT_VYUY, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, false, true, 2, 1 },
+ { V4L2_PIX_FMT_YUYV, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, false, 2, 1 },
+ { V4L2_PIX_FMT_YVYU, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_YUYV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 1, { 16, 0, 0 }, true, true, 2, 1 },
+ { V4L2_PIX_FMT_NV12M, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 2 },
+ { V4L2_PIX_FMT_NV21M, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 2 },
+ { V4L2_PIX_FMT_NV16M, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, false, 2, 1 },
+ { V4L2_PIX_FMT_NV61M, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_UV_422, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 2, { 8, 16, 0 }, false, true, 2, 1 },
+ { V4L2_PIX_FMT_YUV420M, V4L2_MBUS_FMT_AYUV8_1X32,
+ VI6_FMT_Y_U_V_420, VI6_RPF_DSWAP_P_LLS | VI6_RPF_DSWAP_P_LWS |
+ VI6_RPF_DSWAP_P_WDS | VI6_RPF_DSWAP_P_BTS,
+ 3, { 8, 8, 8 }, false, false, 2, 2 },
+};
+
+/*
+ * vsp1_get_format_info - Retrieve format information for a 4CC
+ * @fourcc: the format 4CC
+ *
+ * Return a pointer to the format information structure corresponding to the
+ * given V4L2 format 4CC, or NULL if no corresponding format can be found.
+ */
+static const struct vsp1_format_info *vsp1_get_format_info(u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(vsp1_video_formats); ++i) {
+ const struct vsp1_format_info *info = &vsp1_video_formats[i];
+
+ if (info->fourcc == fourcc)
+ return info;
+ }
+
+ return NULL;
+}
+
+
+static struct v4l2_subdev *
+vsp1_video_remote_subdev(struct media_pad *local, u32 *pad)
+{
+ struct media_pad *remote;
+
+ remote = media_entity_remote_pad(local);
+ if (remote == NULL ||
+ media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return NULL;
+
+ if (pad)
+ *pad = remote->index;
+
+ return media_entity_to_v4l2_subdev(remote->entity);
+}
+
+static int vsp1_video_verify_format(struct vsp1_video *video)
+{
+ struct v4l2_subdev_format fmt;
+ struct v4l2_subdev *subdev;
+ int ret;
+
+ subdev = vsp1_video_remote_subdev(&video->pad, &fmt.pad);
+ if (subdev == NULL)
+ return -EINVAL;
+
+ fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
+ if (ret < 0)
+ return ret == -ENOIOCTLCMD ? -EINVAL : ret;
+
+ if (video->fmtinfo->mbus != fmt.format.code ||
+ video->format.height != fmt.format.height ||
+ video->format.width != fmt.format.width)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int __vsp1_video_try_format(struct vsp1_video *video,
+ struct v4l2_pix_format_mplane *pix,
+ const struct vsp1_format_info **fmtinfo)
+{
+ const struct vsp1_format_info *info;
+ unsigned int width = pix->width;
+ unsigned int height = pix->height;
+ unsigned int i;
+
+ /* Retrieve format information and select the default format if the
+ * requested format isn't supported.
+ */
+ info = vsp1_get_format_info(pix->pixelformat);
+ if (info == NULL)
+ info = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
+
+ pix->pixelformat = info->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_SRGB;
+ pix->field = V4L2_FIELD_NONE;
+ memset(pix->reserved, 0, sizeof(pix->reserved));
+
+ /* Align the width and height for YUV 4:2:2 and 4:2:0 formats. */
+ width = round_down(width, info->hsub);
+ height = round_down(height, info->vsub);
+
+ /* Clamp the width and height. */
+ pix->width = clamp(width, VSP1_VIDEO_MIN_WIDTH, VSP1_VIDEO_MAX_WIDTH);
+ pix->height = clamp(height, VSP1_VIDEO_MIN_HEIGHT,
+ VSP1_VIDEO_MAX_HEIGHT);
+
+ /* Compute and clamp the stride and image size. While not documented in
+ * the datasheet, strides not aligned to a multiple of 128 bytes result
+ * in image corruption.
+ */
+ for (i = 0; i < max(info->planes, 2U); ++i) {
+ unsigned int hsub = i > 0 ? info->hsub : 1;
+ unsigned int vsub = i > 0 ? info->vsub : 1;
+ unsigned int align = 128;
+ unsigned int bpl;
+
+ bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
+ pix->width / hsub * info->bpp[i] / 8,
+ round_down(65535U, align));
+
+ pix->plane_fmt[i].bytesperline = round_up(bpl, align);
+ pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
+ * pix->height / vsub;
+ }
+
+ if (info->planes == 3) {
+ /* The second and third planes must have the same stride. */
+ pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
+ pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
+ }
+
+ pix->num_planes = info->planes;
+
+ if (fmtinfo)
+ *fmtinfo = info;
+
+ return 0;
+}
+
+static bool
+vsp1_video_format_adjust(struct vsp1_video *video,
+ const struct v4l2_pix_format_mplane *format,
+ struct v4l2_pix_format_mplane *adjust)
+{
+ unsigned int i;
+
+ *adjust = *format;
+ __vsp1_video_try_format(video, adjust, NULL);
+
+ if (format->width != adjust->width ||
+ format->height != adjust->height ||
+ format->pixelformat != adjust->pixelformat ||
+ format->num_planes != adjust->num_planes)
+ return false;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ if (format->plane_fmt[i].bytesperline !=
+ adjust->plane_fmt[i].bytesperline)
+ return false;
+
+ adjust->plane_fmt[i].sizeimage =
+ max(adjust->plane_fmt[i].sizeimage,
+ format->plane_fmt[i].sizeimage);
+ }
+
+ return true;
+}
+
+/* -----------------------------------------------------------------------------
+ * Pipeline Management
+ */
+
+static int vsp1_pipeline_validate_branch(struct vsp1_rwpf *input,
+ struct vsp1_rwpf *output)
+{
+ struct vsp1_entity *entity;
+ unsigned int entities = 0;
+ struct media_pad *pad;
+ bool uds_found = false;
+
+ pad = media_entity_remote_pad(&input->entity.pads[RWPF_PAD_SOURCE]);
+
+ while (1) {
+ if (pad == NULL)
+ return -EPIPE;
+
+ /* We've reached a video node, that shouldn't have happened. */
+ if (media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
+ return -EPIPE;
+
+ entity = to_vsp1_entity(media_entity_to_v4l2_subdev(pad->entity));
+
+ /* We've reached the WPF, we're done. */
+ if (entity->type == VSP1_ENTITY_WPF)
+ break;
+
+ /* Ensure the branch has no loop. */
+ if (entities & (1 << entity->subdev.entity.id))
+ return -EPIPE;
+
+ entities |= 1 << entity->subdev.entity.id;
+
+ /* UDS can't be chained. */
+ if (entity->type == VSP1_ENTITY_UDS) {
+ if (uds_found)
+ return -EPIPE;
+ uds_found = true;
+ }
+
+ /* Follow the source link. The link setup operations ensure
+ * that the output fan-out can't be more than one, there is thus
+ * no need to verify here that only a single source link is
+ * activated.
+ */
+ pad = &entity->pads[entity->source_pad];
+ pad = media_entity_remote_pad(pad);
+ }
+
+ /* The last entity must be the output WPF. */
+ if (entity != &output->entity)
+ return -EPIPE;
+
+ return 0;
+}
+
+static int vsp1_pipeline_validate(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ struct media_entity_graph graph;
+ struct media_entity *entity = &video->video.entity;
+ struct media_device *mdev = entity->parent;
+ unsigned int i;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Walk the graph to locate the entities and video nodes. */
+ media_entity_graph_walk_start(&graph, entity);
+
+ while ((entity = media_entity_graph_walk_next(&graph))) {
+ struct v4l2_subdev *subdev;
+ struct vsp1_rwpf *rwpf;
+ struct vsp1_entity *e;
+
+ if (media_entity_type(entity) != MEDIA_ENT_T_V4L2_SUBDEV) {
+ pipe->num_video++;
+ continue;
+ }
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+ e = to_vsp1_entity(subdev);
+ list_add_tail(&e->list_pipe, &pipe->entities);
+
+ if (e->type == VSP1_ENTITY_RPF) {
+ rwpf = to_rwpf(subdev);
+ pipe->inputs[pipe->num_inputs++] = rwpf;
+ rwpf->video.pipe_index = pipe->num_inputs;
+ } else if (e->type == VSP1_ENTITY_WPF) {
+ rwpf = to_rwpf(subdev);
+ pipe->output = to_rwpf(subdev);
+ rwpf->video.pipe_index = 0;
+ } else if (e->type == VSP1_ENTITY_LIF) {
+ pipe->lif = e;
+ }
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ /* We need one output and at least one input. */
+ if (pipe->num_inputs == 0 || !pipe->output) {
+ ret = -EPIPE;
+ goto error;
+ }
+
+ /* Follow links downstream for each input and make sure the graph
+ * contains no loop and that all branches end at the output WPF.
+ */
+ for (i = 0; i < pipe->num_inputs; ++i) {
+ ret = vsp1_pipeline_validate_branch(pipe->inputs[i],
+ pipe->output);
+ if (ret < 0)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->buffers_ready = 0;
+ pipe->num_video = 0;
+ pipe->num_inputs = 0;
+ pipe->output = NULL;
+ pipe->lif = NULL;
+ return ret;
+}
+
+static int vsp1_pipeline_init(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ int ret;
+
+ mutex_lock(&pipe->lock);
+
+ /* If we're the first user validate and initialize the pipeline. */
+ if (pipe->use_count == 0) {
+ ret = vsp1_pipeline_validate(pipe, video);
+ if (ret < 0)
+ goto done;
+ }
+
+ pipe->use_count++;
+ ret = 0;
+
+done:
+ mutex_unlock(&pipe->lock);
+ return ret;
+}
+
+static void vsp1_pipeline_cleanup(struct vsp1_pipeline *pipe)
+{
+ mutex_lock(&pipe->lock);
+
+ /* If we're the last user clean up the pipeline. */
+ if (--pipe->use_count == 0) {
+ INIT_LIST_HEAD(&pipe->entities);
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ pipe->buffers_ready = 0;
+ pipe->num_video = 0;
+ pipe->num_inputs = 0;
+ pipe->output = NULL;
+ pipe->lif = NULL;
+ }
+
+ mutex_unlock(&pipe->lock);
+}
+
+static void vsp1_pipeline_run(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_device *vsp1 = pipe->output->entity.vsp1;
+
+ vsp1_write(vsp1, VI6_CMD(pipe->output->entity.index), VI6_CMD_STRCMD);
+ pipe->state = VSP1_PIPELINE_RUNNING;
+ pipe->buffers_ready = 0;
+}
+
+static int vsp1_pipeline_stop(struct vsp1_pipeline *pipe)
+{
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ pipe->state = VSP1_PIPELINE_STOPPING;
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ ret = wait_event_timeout(pipe->wq, pipe->state == VSP1_PIPELINE_STOPPED,
+ msecs_to_jiffies(500));
+ ret = ret == 0 ? -ETIMEDOUT : 0;
+
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ if (entity->route)
+ vsp1_write(entity->vsp1, entity->route,
+ VI6_DPR_NODE_UNUSED);
+
+ v4l2_subdev_call(&entity->subdev, video, s_stream, 0);
+ }
+
+ return ret;
+}
+
+static bool vsp1_pipeline_ready(struct vsp1_pipeline *pipe)
+{
+ unsigned int mask;
+
+ mask = ((1 << pipe->num_inputs) - 1) << 1;
+ if (!pipe->lif)
+ mask |= 1 << 0;
+
+ return pipe->buffers_ready == mask;
+}
+
+/*
+ * vsp1_video_complete_buffer - Complete the current buffer
+ * @video: the video node
+ *
+ * This function completes the current buffer by filling its sequence number,
+ * time stamp and payload size, and hands it back to the videobuf core.
+ *
+ * Return the next queued buffer or NULL if the queue is empty.
+ */
+static struct vsp1_video_buffer *
+vsp1_video_complete_buffer(struct vsp1_video *video)
+{
+ struct vsp1_video_buffer *next = NULL;
+ struct vsp1_video_buffer *done;
+ unsigned long flags;
+ unsigned int i;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+
+ if (list_empty(&video->irqqueue)) {
+ spin_unlock_irqrestore(&video->irqlock, flags);
+ return NULL;
+ }
+
+ done = list_first_entry(&video->irqqueue,
+ struct vsp1_video_buffer, queue);
+ list_del(&done->queue);
+
+ if (!list_empty(&video->irqqueue))
+ next = list_first_entry(&video->irqqueue,
+ struct vsp1_video_buffer, queue);
+
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ done->buf.v4l2_buf.sequence = video->sequence++;
+ v4l2_get_timestamp(&done->buf.v4l2_buf.timestamp);
+ for (i = 0; i < done->buf.num_planes; ++i)
+ vb2_set_plane_payload(&done->buf, i, done->length[i]);
+ vb2_buffer_done(&done->buf, VB2_BUF_STATE_DONE);
+
+ return next;
+}
+
+static void vsp1_video_frame_end(struct vsp1_pipeline *pipe,
+ struct vsp1_video *video)
+{
+ struct vsp1_video_buffer *buf;
+ unsigned long flags;
+
+ buf = vsp1_video_complete_buffer(video);
+ if (buf == NULL)
+ return;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ video->ops->queue(video, buf);
+ pipe->buffers_ready |= 1 << video->pipe_index;
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe)
+{
+ unsigned long flags;
+ unsigned int i;
+
+ if (pipe == NULL)
+ return;
+
+ /* Complete buffers on all video nodes. */
+ for (i = 0; i < pipe->num_inputs; ++i)
+ vsp1_video_frame_end(pipe, &pipe->inputs[i]->video);
+
+ if (!pipe->lif)
+ vsp1_video_frame_end(pipe, &pipe->output->video);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ /* If a stop has been requested, mark the pipeline as stopped and
+ * return.
+ */
+ if (pipe->state == VSP1_PIPELINE_STOPPING) {
+ pipe->state = VSP1_PIPELINE_STOPPED;
+ wake_up(&pipe->wq);
+ goto done;
+ }
+
+ /* Restart the pipeline if ready. */
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+
+done:
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+/* -----------------------------------------------------------------------------
+ * videobuf2 Queue Operations
+ */
+
+static int
+vsp1_video_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt,
+ unsigned int *nbuffers, unsigned int *nplanes,
+ unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ const struct v4l2_pix_format_mplane *format;
+ struct v4l2_pix_format_mplane pix_mp;
+ unsigned int i;
+
+ if (fmt) {
+ /* Make sure the format is valid and adjust the sizeimage field
+ * if needed.
+ */
+ if (!vsp1_video_format_adjust(video, &fmt->fmt.pix_mp, &pix_mp))
+ return -EINVAL;
+
+ format = &pix_mp;
+ } else {
+ format = &video->format;
+ }
+
+ *nplanes = format->num_planes;
+
+ for (i = 0; i < format->num_planes; ++i) {
+ sizes[i] = format->plane_fmt[i].sizeimage;
+ alloc_ctxs[i] = video->alloc_ctx;
+ }
+
+ return 0;
+}
+
+static int vsp1_video_buffer_prepare(struct vb2_buffer *vb)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+ const struct v4l2_pix_format_mplane *format = &video->format;
+ unsigned int i;
+
+ if (vb->num_planes < format->num_planes)
+ return -EINVAL;
+
+ buf->video = video;
+
+ for (i = 0; i < vb->num_planes; ++i) {
+ buf->addr[i] = vb2_dma_contig_plane_dma_addr(vb, i);
+ buf->length[i] = vb2_plane_size(vb, i);
+
+ if (buf->length[i] < format->plane_fmt[i].sizeimage)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void vsp1_video_buffer_queue(struct vb2_buffer *vb)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vb->vb2_queue);
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+ struct vsp1_video_buffer *buf = to_vsp1_video_buffer(vb);
+ unsigned long flags;
+ bool empty;
+
+ spin_lock_irqsave(&video->irqlock, flags);
+ empty = list_empty(&video->irqqueue);
+ list_add_tail(&buf->queue, &video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ if (!empty)
+ return;
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+
+ video->ops->queue(video, buf);
+ pipe->buffers_ready |= 1 << video->pipe_index;
+
+ if (vb2_is_streaming(&video->queue) &&
+ vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+}
+
+static void vsp1_entity_route_setup(struct vsp1_entity *source)
+{
+ struct vsp1_entity *sink;
+
+ if (source->route == 0)
+ return;
+
+ sink = container_of(source->sink, struct vsp1_entity, subdev.entity);
+ vsp1_write(source->vsp1, source->route, sink->id);
+}
+
+static int vsp1_video_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+ struct vsp1_entity *entity;
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&pipe->lock);
+ if (pipe->stream_count == pipe->num_video - 1) {
+ list_for_each_entry(entity, &pipe->entities, list_pipe) {
+ vsp1_entity_route_setup(entity);
+
+ ret = v4l2_subdev_call(&entity->subdev, video,
+ s_stream, 1);
+ if (ret < 0) {
+ mutex_unlock(&pipe->lock);
+ return ret;
+ }
+ }
+ }
+
+ pipe->stream_count++;
+ mutex_unlock(&pipe->lock);
+
+ spin_lock_irqsave(&pipe->irqlock, flags);
+ if (vsp1_pipeline_ready(pipe))
+ vsp1_pipeline_run(pipe);
+ spin_unlock_irqrestore(&pipe->irqlock, flags);
+
+ return 0;
+}
+
+static int vsp1_video_stop_streaming(struct vb2_queue *vq)
+{
+ struct vsp1_video *video = vb2_get_drv_priv(vq);
+ struct vsp1_pipeline *pipe = to_vsp1_pipeline(&video->video.entity);
+ unsigned long flags;
+ int ret;
+
+ mutex_lock(&pipe->lock);
+ if (--pipe->stream_count == 0) {
+ /* Stop the pipeline. */
+ ret = vsp1_pipeline_stop(pipe);
+ if (ret == -ETIMEDOUT)
+ dev_err(video->vsp1->dev, "pipeline stop timeout\n");
+ }
+ mutex_unlock(&pipe->lock);
+
+ vsp1_pipeline_cleanup(pipe);
+ media_entity_pipeline_stop(&video->video.entity);
+
+ /* Remove all buffers from the IRQ queue. */
+ spin_lock_irqsave(&video->irqlock, flags);
+ INIT_LIST_HEAD(&video->irqqueue);
+ spin_unlock_irqrestore(&video->irqlock, flags);
+
+ return 0;
+}
+
+static struct vb2_ops vsp1_video_queue_qops = {
+ .queue_setup = vsp1_video_queue_setup,
+ .buf_prepare = vsp1_video_buffer_prepare,
+ .buf_queue = vsp1_video_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vsp1_video_start_streaming,
+ .stop_streaming = vsp1_video_stop_streaming,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 ioctls
+ */
+
+static int
+vsp1_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
+ | V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_VIDEO_OUTPUT_MPLANE;
+
+ if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE
+ | V4L2_CAP_STREAMING;
+ else
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT_MPLANE
+ | V4L2_CAP_STREAMING;
+
+ strlcpy(cap->driver, "vsp1", sizeof(cap->driver));
+ strlcpy(cap->card, video->video.name, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s",
+ dev_name(video->vsp1->dev));
+
+ return 0;
+}
+
+static int
+vsp1_video_get_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ mutex_lock(&video->lock);
+ format->fmt.pix_mp = video->format;
+ mutex_unlock(&video->lock);
+
+ return 0;
+}
+
+static int
+vsp1_video_try_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ return __vsp1_video_try_format(video, &format->fmt.pix_mp, NULL);
+}
+
+static int
+vsp1_video_set_format(struct file *file, void *fh, struct v4l2_format *format)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ const struct vsp1_format_info *info;
+ int ret;
+
+ if (format->type != video->queue.type)
+ return -EINVAL;
+
+ ret = __vsp1_video_try_format(video, &format->fmt.pix_mp, &info);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&video->lock);
+
+ if (vb2_is_busy(&video->queue)) {
+ ret = -EBUSY;
+ goto done;
+ }
+
+ video->format = format->fmt.pix_mp;
+ video->fmtinfo = info;
+
+done:
+ mutex_unlock(&video->lock);
+ return ret;
+}
+
+static int
+vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type)
+{
+ struct v4l2_fh *vfh = file->private_data;
+ struct vsp1_video *video = to_vsp1_video(vfh->vdev);
+ struct vsp1_pipeline *pipe;
+ int ret;
+
+ if (video->queue.owner && video->queue.owner != file->private_data)
+ return -EBUSY;
+
+ video->sequence = 0;
+
+ /* Start streaming on the pipeline. No link touching an entity in the
+ * pipeline can be activated or deactivated once streaming is started.
+ *
+ * Use the VSP1 pipeline object embedded in the first video object that
+ * starts streaming.
+ */
+ pipe = video->video.entity.pipe
+ ? to_vsp1_pipeline(&video->video.entity) : &video->pipe;
+
+ ret = media_entity_pipeline_start(&video->video.entity, &pipe->pipe);
+ if (ret < 0)
+ return ret;
+
+ /* Verify that the configured format matches the output of the connected
+ * subdev.
+ */
+ ret = vsp1_video_verify_format(video);
+ if (ret < 0)
+ goto err_stop;
+
+ ret = vsp1_pipeline_init(pipe, video);
+ if (ret < 0)
+ goto err_stop;
+
+ /* Start the queue. */
+ ret = vb2_streamon(&video->queue, type);
+ if (ret < 0)
+ goto err_cleanup;
+
+ return 0;
+
+err_cleanup:
+ vsp1_pipeline_cleanup(pipe);
+err_stop:
+ media_entity_pipeline_stop(&video->video.entity);
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops vsp1_video_ioctl_ops = {
+ .vidioc_querycap = vsp1_video_querycap,
+ .vidioc_g_fmt_vid_cap_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_cap_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_cap_mplane = vsp1_video_try_format,
+ .vidioc_g_fmt_vid_out_mplane = vsp1_video_get_format,
+ .vidioc_s_fmt_vid_out_mplane = vsp1_video_set_format,
+ .vidioc_try_fmt_vid_out_mplane = vsp1_video_try_format,
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_streamon = vsp1_video_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+};
+
+/* -----------------------------------------------------------------------------
+ * V4L2 File Operations
+ */
+
+static int vsp1_video_open(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh;
+ int ret = 0;
+
+ vfh = kzalloc(sizeof(*vfh), GFP_KERNEL);
+ if (vfh == NULL)
+ return -ENOMEM;
+
+ v4l2_fh_init(vfh, &video->video);
+ v4l2_fh_add(vfh);
+
+ file->private_data = vfh;
+
+ if (!vsp1_device_get(video->vsp1)) {
+ ret = -EBUSY;
+ v4l2_fh_del(vfh);
+ kfree(vfh);
+ }
+
+ return ret;
+}
+
+static int vsp1_video_release(struct file *file)
+{
+ struct vsp1_video *video = video_drvdata(file);
+ struct v4l2_fh *vfh = file->private_data;
+
+ mutex_lock(&video->lock);
+ if (video->queue.owner == vfh) {
+ vb2_queue_release(&video->queue);
+ video->queue.owner = NULL;
+ }
+ mutex_unlock(&video->lock);
+
+ vsp1_device_put(video->vsp1);
+
+ v4l2_fh_release(file);
+
+ file->private_data = NULL;
+
+ return 0;
+}
+
+static struct v4l2_file_operations vsp1_video_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = video_ioctl2,
+ .open = vsp1_video_open,
+ .release = vsp1_video_release,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
+{
+ const char *direction;
+ int ret;
+
+ switch (video->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ direction = "output";
+ video->pad.flags = MEDIA_PAD_FL_SINK;
+ break;
+
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ direction = "input";
+ video->pad.flags = MEDIA_PAD_FL_SOURCE;
+ video->video.vfl_dir = VFL_DIR_TX;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ video->rwpf = rwpf;
+
+ mutex_init(&video->lock);
+ spin_lock_init(&video->irqlock);
+ INIT_LIST_HEAD(&video->irqqueue);
+
+ mutex_init(&video->pipe.lock);
+ spin_lock_init(&video->pipe.irqlock);
+ INIT_LIST_HEAD(&video->pipe.entities);
+ init_waitqueue_head(&video->pipe.wq);
+ video->pipe.state = VSP1_PIPELINE_STOPPED;
+
+ /* Initialize the media entity... */
+ ret = media_entity_init(&video->video.entity, 1, &video->pad, 0);
+ if (ret < 0)
+ return ret;
+
+ /* ... and the format ... */
+ video->fmtinfo = vsp1_get_format_info(VSP1_VIDEO_DEF_FORMAT);
+ video->format.pixelformat = video->fmtinfo->fourcc;
+ video->format.colorspace = V4L2_COLORSPACE_SRGB;
+ video->format.field = V4L2_FIELD_NONE;
+ video->format.width = VSP1_VIDEO_DEF_WIDTH;
+ video->format.height = VSP1_VIDEO_DEF_HEIGHT;
+ video->format.num_planes = 1;
+ video->format.plane_fmt[0].bytesperline =
+ video->format.width * video->fmtinfo->bpp[0] / 8;
+ video->format.plane_fmt[0].sizeimage =
+ video->format.plane_fmt[0].bytesperline * video->format.height;
+
+ /* ... and the video node... */
+ video->video.v4l2_dev = &video->vsp1->v4l2_dev;
+ video->video.fops = &vsp1_video_fops;
+ snprintf(video->video.name, sizeof(video->video.name), "%s %s",
+ rwpf->subdev.name, direction);
+ video->video.vfl_type = VFL_TYPE_GRABBER;
+ video->video.release = video_device_release_empty;
+ video->video.ioctl_ops = &vsp1_video_ioctl_ops;
+
+ video_set_drvdata(&video->video, video);
+
+ /* ... and the buffers queue... */
+ video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
+ if (IS_ERR(video->alloc_ctx))
+ goto error;
+
+ video->queue.type = video->type;
+ video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ video->queue.lock = &video->lock;
+ video->queue.drv_priv = video;
+ video->queue.buf_struct_size = sizeof(struct vsp1_video_buffer);
+ video->queue.ops = &vsp1_video_queue_qops;
+ video->queue.mem_ops = &vb2_dma_contig_memops;
+ video->queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ ret = vb2_queue_init(&video->queue);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to initialize vb2 queue\n");
+ goto error;
+ }
+
+ /* ... and register the video device. */
+ video->video.queue = &video->queue;
+ ret = video_register_device(&video->video, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(video->vsp1->dev, "failed to register video device\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+ vsp1_video_cleanup(video);
+ return ret;
+}
+
+void vsp1_video_cleanup(struct vsp1_video *video)
+{
+ if (video_is_registered(&video->video))
+ video_unregister_device(&video->video);
+
+ vb2_dma_contig_cleanup_ctx(video->alloc_ctx);
+ media_entity_cleanup(&video->video.entity);
+}
diff --git a/drivers/media/platform/vsp1/vsp1_video.h b/drivers/media/platform/vsp1/vsp1_video.h
new file mode 100644
index 00000000000..d8612a37834
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_video.h
@@ -0,0 +1,144 @@
+/*
+ * vsp1_video.h -- R-Car VSP1 Video Node
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __VSP1_VIDEO_H__
+#define __VSP1_VIDEO_H__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#include <media/media-entity.h>
+#include <media/videobuf2-core.h>
+
+struct vsp1_video;
+
+/*
+ * struct vsp1_format_info - VSP1 video format description
+ * @mbus: media bus format code
+ * @fourcc: V4L2 pixel format FCC identifier
+ * @planes: number of planes
+ * @bpp: bits per pixel
+ * @hwfmt: VSP1 hardware format
+ * @swap_yc: the Y and C components are swapped (Y comes before C)
+ * @swap_uv: the U and V components are swapped (V comes before U)
+ * @hsub: horizontal subsampling factor
+ * @vsub: vertical subsampling factor
+ */
+struct vsp1_format_info {
+ u32 fourcc;
+ unsigned int mbus;
+ unsigned int hwfmt;
+ unsigned int swap;
+ unsigned int planes;
+ unsigned int bpp[3];
+ bool swap_yc;
+ bool swap_uv;
+ unsigned int hsub;
+ unsigned int vsub;
+};
+
+enum vsp1_pipeline_state {
+ VSP1_PIPELINE_STOPPED,
+ VSP1_PIPELINE_RUNNING,
+ VSP1_PIPELINE_STOPPING,
+};
+
+/*
+ * struct vsp1_pipeline - A VSP1 hardware pipeline
+ * @media: the media pipeline
+ * @irqlock: protects the pipeline state
+ * @lock: protects the pipeline use count and stream count
+ */
+struct vsp1_pipeline {
+ struct media_pipeline pipe;
+
+ spinlock_t irqlock;
+ enum vsp1_pipeline_state state;
+ wait_queue_head_t wq;
+
+ struct mutex lock;
+ unsigned int use_count;
+ unsigned int stream_count;
+ unsigned int buffers_ready;
+
+ unsigned int num_video;
+ unsigned int num_inputs;
+ struct vsp1_rwpf *inputs[VPS1_MAX_RPF];
+ struct vsp1_rwpf *output;
+ struct vsp1_entity *lif;
+
+ struct list_head entities;
+};
+
+static inline struct vsp1_pipeline *to_vsp1_pipeline(struct media_entity *e)
+{
+ if (likely(e->pipe))
+ return container_of(e->pipe, struct vsp1_pipeline, pipe);
+ else
+ return NULL;
+}
+
+struct vsp1_video_buffer {
+ struct vsp1_video *video;
+ struct vb2_buffer buf;
+ struct list_head queue;
+
+ dma_addr_t addr[3];
+ unsigned int length[3];
+};
+
+static inline struct vsp1_video_buffer *
+to_vsp1_video_buffer(struct vb2_buffer *vb)
+{
+ return container_of(vb, struct vsp1_video_buffer, buf);
+}
+
+struct vsp1_video_operations {
+ void (*queue)(struct vsp1_video *video, struct vsp1_video_buffer *buf);
+};
+
+struct vsp1_video {
+ struct vsp1_device *vsp1;
+ struct vsp1_entity *rwpf;
+
+ const struct vsp1_video_operations *ops;
+
+ struct video_device video;
+ enum v4l2_buf_type type;
+ struct media_pad pad;
+
+ struct mutex lock;
+ struct v4l2_pix_format_mplane format;
+ const struct vsp1_format_info *fmtinfo;
+
+ struct vsp1_pipeline pipe;
+ unsigned int pipe_index;
+
+ struct vb2_queue queue;
+ void *alloc_ctx;
+ spinlock_t irqlock;
+ struct list_head irqqueue;
+ unsigned int sequence;
+};
+
+static inline struct vsp1_video *to_vsp1_video(struct video_device *vdev)
+{
+ return container_of(vdev, struct vsp1_video, video);
+}
+
+int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf);
+void vsp1_video_cleanup(struct vsp1_video *video);
+
+void vsp1_pipeline_frame_end(struct vsp1_pipeline *pipe);
+
+#endif /* __VSP1_VIDEO_H__ */
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
new file mode 100644
index 00000000000..db4b85ee05f
--- /dev/null
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -0,0 +1,233 @@
+/*
+ * vsp1_wpf.c -- R-Car VSP1 Write Pixel Formatter
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/device.h>
+
+#include <media/v4l2-subdev.h>
+
+#include "vsp1.h"
+#include "vsp1_rwpf.h"
+#include "vsp1_video.h"
+
+#define WPF_MAX_WIDTH 2048
+#define WPF_MAX_HEIGHT 2048
+
+/* -----------------------------------------------------------------------------
+ * Device Access
+ */
+
+static inline u32 vsp1_wpf_read(struct vsp1_rwpf *wpf, u32 reg)
+{
+ return vsp1_read(wpf->entity.vsp1,
+ reg + wpf->entity.index * VI6_WPF_OFFSET);
+}
+
+static inline void vsp1_wpf_write(struct vsp1_rwpf *wpf, u32 reg, u32 data)
+{
+ vsp1_write(wpf->entity.vsp1,
+ reg + wpf->entity.index * VI6_WPF_OFFSET, data);
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Core Operations
+ */
+
+static int wpf_s_stream(struct v4l2_subdev *subdev, int enable)
+{
+ struct vsp1_rwpf *wpf = to_rwpf(subdev);
+ struct vsp1_pipeline *pipe =
+ to_vsp1_pipeline(&wpf->entity.subdev.entity);
+ struct vsp1_device *vsp1 = wpf->entity.vsp1;
+ const struct v4l2_mbus_framefmt *format =
+ &wpf->entity.formats[RWPF_PAD_SOURCE];
+ unsigned int i;
+ u32 srcrpf = 0;
+ u32 outfmt = 0;
+
+ if (!enable) {
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index), 0);
+ return 0;
+ }
+
+ /* Sources */
+ for (i = 0; i < pipe->num_inputs; ++i) {
+ struct vsp1_rwpf *input = pipe->inputs[i];
+
+ srcrpf |= VI6_WPF_SRCRPF_RPF_ACT_MST(input->entity.index);
+ }
+
+ vsp1_wpf_write(wpf, VI6_WPF_SRCRPF, srcrpf);
+
+ /* Destination stride. Cropping isn't supported yet. */
+ if (!pipe->lif) {
+ struct v4l2_pix_format_mplane *format = &wpf->video.format;
+
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_Y,
+ format->plane_fmt[0].bytesperline);
+ if (format->num_planes > 1)
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_STRIDE_C,
+ format->plane_fmt[1].bytesperline);
+ }
+
+ vsp1_wpf_write(wpf, VI6_WPF_HSZCLIP,
+ format->width << VI6_WPF_SZCLIP_SIZE_SHIFT);
+ vsp1_wpf_write(wpf, VI6_WPF_VSZCLIP,
+ format->height << VI6_WPF_SZCLIP_SIZE_SHIFT);
+
+ /* Format */
+ if (!pipe->lif) {
+ const struct vsp1_format_info *fmtinfo = wpf->video.fmtinfo;
+
+ outfmt = fmtinfo->hwfmt << VI6_WPF_OUTFMT_WRFMT_SHIFT;
+
+ if (fmtinfo->swap_yc)
+ outfmt |= VI6_WPF_OUTFMT_SPYCS;
+ if (fmtinfo->swap_uv)
+ outfmt |= VI6_WPF_OUTFMT_SPUVS;
+
+ vsp1_wpf_write(wpf, VI6_WPF_DSWAP, fmtinfo->swap);
+ }
+
+ if (wpf->entity.formats[RWPF_PAD_SINK].code !=
+ wpf->entity.formats[RWPF_PAD_SOURCE].code)
+ outfmt |= VI6_WPF_OUTFMT_CSC;
+
+ vsp1_wpf_write(wpf, VI6_WPF_OUTFMT, outfmt);
+
+ vsp1_write(vsp1, VI6_DPR_WPF_FPORCH(wpf->entity.index),
+ VI6_DPR_WPF_FPORCH_FP_WPFN);
+
+ vsp1_write(vsp1, VI6_WPF_WRBCK_CTRL, 0);
+
+ /* Enable interrupts */
+ vsp1_write(vsp1, VI6_WPF_IRQ_STA(wpf->entity.index), 0);
+ vsp1_write(vsp1, VI6_WPF_IRQ_ENB(wpf->entity.index),
+ VI6_WFP_IRQ_ENB_FREE);
+
+ return 0;
+}
+
+/* -----------------------------------------------------------------------------
+ * V4L2 Subdevice Operations
+ */
+
+static struct v4l2_subdev_video_ops wpf_video_ops = {
+ .s_stream = wpf_s_stream,
+};
+
+static struct v4l2_subdev_pad_ops wpf_pad_ops = {
+ .enum_mbus_code = vsp1_rwpf_enum_mbus_code,
+ .enum_frame_size = vsp1_rwpf_enum_frame_size,
+ .get_fmt = vsp1_rwpf_get_format,
+ .set_fmt = vsp1_rwpf_set_format,
+};
+
+static struct v4l2_subdev_ops wpf_ops = {
+ .video = &wpf_video_ops,
+ .pad = &wpf_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Video Device Operations
+ */
+
+static void wpf_vdev_queue(struct vsp1_video *video,
+ struct vsp1_video_buffer *buf)
+{
+ struct vsp1_rwpf *wpf = container_of(video, struct vsp1_rwpf, video);
+
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_Y, buf->addr[0]);
+ if (buf->buf.num_planes > 1)
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C0, buf->addr[1]);
+ if (buf->buf.num_planes > 2)
+ vsp1_wpf_write(wpf, VI6_WPF_DSTM_ADDR_C1, buf->addr[2]);
+}
+
+static const struct vsp1_video_operations wpf_vdev_ops = {
+ .queue = wpf_vdev_queue,
+};
+
+/* -----------------------------------------------------------------------------
+ * Initialization and Cleanup
+ */
+
+struct vsp1_rwpf *vsp1_wpf_create(struct vsp1_device *vsp1, unsigned int index)
+{
+ struct v4l2_subdev *subdev;
+ struct vsp1_video *video;
+ struct vsp1_rwpf *wpf;
+ unsigned int flags;
+ int ret;
+
+ wpf = devm_kzalloc(vsp1->dev, sizeof(*wpf), GFP_KERNEL);
+ if (wpf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ wpf->max_width = WPF_MAX_WIDTH;
+ wpf->max_height = WPF_MAX_HEIGHT;
+
+ wpf->entity.type = VSP1_ENTITY_WPF;
+ wpf->entity.index = index;
+ wpf->entity.id = VI6_DPR_NODE_WPF(index);
+
+ ret = vsp1_entity_init(vsp1, &wpf->entity, 2);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ /* Initialize the V4L2 subdev. */
+ subdev = &wpf->entity.subdev;
+ v4l2_subdev_init(subdev, &wpf_ops);
+
+ subdev->entity.ops = &vsp1_media_ops;
+ subdev->internal_ops = &vsp1_subdev_internal_ops;
+ snprintf(subdev->name, sizeof(subdev->name), "%s wpf.%u",
+ dev_name(vsp1->dev), index);
+ v4l2_set_subdevdata(subdev, wpf);
+ subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ vsp1_entity_init_formats(subdev, NULL);
+
+ /* Initialize the video device. */
+ video = &wpf->video;
+
+ video->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ video->vsp1 = vsp1;
+ video->ops = &wpf_vdev_ops;
+
+ ret = vsp1_video_init(video, &wpf->entity);
+ if (ret < 0)
+ goto error_video;
+
+ /* Connect the video device to the WPF. All connections are immutable
+ * except for the WPF0 source link if a LIF is present.
+ */
+ flags = MEDIA_LNK_FL_ENABLED;
+ if (!(vsp1->pdata->features & VSP1_HAS_LIF) || index != 0)
+ flags |= MEDIA_LNK_FL_IMMUTABLE;
+
+ ret = media_entity_create_link(&wpf->entity.subdev.entity,
+ RWPF_PAD_SOURCE,
+ &wpf->video.video.entity, 0, flags);
+ if (ret < 0)
+ goto error_link;
+
+ wpf->entity.sink = &wpf->video.video.entity;
+
+ return wpf;
+
+error_link:
+ vsp1_video_cleanup(video);
+error_video:
+ media_entity_cleanup(&wpf->entity.subdev.entity);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index d529ba788f4..39882ddd259 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -12,6 +12,9 @@ menuconfig RADIO_ADAPTERS
if RADIO_ADAPTERS && VIDEO_V4L2
+config RADIO_TEA575X
+ tristate
+
config RADIO_SI470X
bool "Silicon Labs Si470x FM Radio Receiver support"
depends on VIDEO_V4L2
@@ -61,7 +64,8 @@ config USB_DSBR
config RADIO_MAXIRADIO
tristate "Guillemot MAXI Radio FM 2000 radio"
- depends on VIDEO_V4L2 && PCI && SND
+ depends on VIDEO_V4L2 && PCI
+ select RADIO_TEA575X
---help---
Choose Y here if you have this radio card. This card may also be
found as Gemtek PCI FM.
@@ -76,7 +80,8 @@ config RADIO_MAXIRADIO
config RADIO_SHARK
tristate "Griffin radioSHARK USB radio receiver"
- depends on USB && SND
+ depends on USB
+ select RADIO_TEA575X
---help---
Choose Y here if you have this radio receiver.
@@ -393,7 +398,8 @@ config RADIO_SF16FMI
config RADIO_SF16FMR2
tristate "SF16-FMR2/SF16-FMD2 Radio"
- depends on ISA && VIDEO_V4L2 && SND
+ depends on ISA && VIDEO_V4L2
+ select RADIO_TEA575X
---help---
Choose Y here if you have one of these FM radio cards.
diff --git a/drivers/media/radio/Makefile b/drivers/media/radio/Makefile
index 0dcdb320cfc..3b645601800 100644
--- a/drivers/media/radio/Makefile
+++ b/drivers/media/radio/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_RADIO_TEF6862) += tef6862.o
obj-$(CONFIG_RADIO_TIMBERDALE) += radio-timb.o
obj-$(CONFIG_RADIO_WL1273) += radio-wl1273.o
obj-$(CONFIG_RADIO_WL128X) += wl128x/
+obj-$(CONFIG_RADIO_TEA575X) += tea575x.o
shark2-objs := radio-shark2.o radio-tea5777.o
diff --git a/drivers/media/radio/radio-aztech.c b/drivers/media/radio/radio-aztech.c
index 177bcbd7a7c..705dd6f9162 100644
--- a/drivers/media/radio/radio-aztech.c
+++ b/drivers/media/radio/radio-aztech.c
@@ -26,6 +26,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-ctrls.h>
#include "radio-isa.h"
+#include "lm7000.h"
MODULE_AUTHOR("Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath");
MODULE_DESCRIPTION("A driver for the Aztech radio card.");
@@ -54,18 +55,29 @@ struct aztech {
int curvol;
};
-static void send_0_byte(struct aztech *az)
-{
- udelay(radio_wait_time);
- outb_p(2 + az->curvol, az->isa.io);
- outb_p(64 + 2 + az->curvol, az->isa.io);
-}
+/* bit definitions for register read */
+#define AZTECH_BIT_NOT_TUNED (1 << 0)
+#define AZTECH_BIT_MONO (1 << 1)
+/* bit definitions for register write */
+#define AZTECH_BIT_TUN_CE (1 << 1)
+#define AZTECH_BIT_TUN_CLK (1 << 6)
+#define AZTECH_BIT_TUN_DATA (1 << 7)
+/* bits 0 and 2 are volume control, bits 3..5 are not connected */
-static void send_1_byte(struct aztech *az)
+static void aztech_set_pins(void *handle, u8 pins)
{
- udelay(radio_wait_time);
- outb_p(128 + 2 + az->curvol, az->isa.io);
- outb_p(128 + 64 + 2 + az->curvol, az->isa.io);
+ struct radio_isa_card *isa = handle;
+ struct aztech *az = container_of(isa, struct aztech, isa);
+ u8 bits = az->curvol;
+
+ if (pins & LM7000_DATA)
+ bits |= AZTECH_BIT_TUN_DATA;
+ if (pins & LM7000_CLK)
+ bits |= AZTECH_BIT_TUN_CLK;
+ if (pins & LM7000_CE)
+ bits |= AZTECH_BIT_TUN_CE;
+
+ outb_p(bits, az->isa.io);
}
static struct radio_isa_card *aztech_alloc(void)
@@ -77,58 +89,21 @@ static struct radio_isa_card *aztech_alloc(void)
static int aztech_s_frequency(struct radio_isa_card *isa, u32 freq)
{
- struct aztech *az = container_of(isa, struct aztech, isa);
- int i;
-
- freq += 171200; /* Add 10.7 MHz IF */
- freq /= 800; /* Convert to 50 kHz units */
-
- send_0_byte(az); /* 0: LSB of frequency */
-
- for (i = 0; i < 13; i++) /* : frequency bits (1-13) */
- if (freq & (1 << i))
- send_1_byte(az);
- else
- send_0_byte(az);
-
- send_0_byte(az); /* 14: test bit - always 0 */
- send_0_byte(az); /* 15: test bit - always 0 */
- send_0_byte(az); /* 16: band data 0 - always 0 */
- if (isa->stereo) /* 17: stereo (1 to enable) */
- send_1_byte(az);
- else
- send_0_byte(az);
-
- send_1_byte(az); /* 18: band data 1 - unknown */
- send_0_byte(az); /* 19: time base - always 0 */
- send_0_byte(az); /* 20: spacing (0 = 25 kHz) */
- send_1_byte(az); /* 21: spacing (1 = 25 kHz) */
- send_0_byte(az); /* 22: spacing (0 = 25 kHz) */
- send_1_byte(az); /* 23: AM/FM (FM = 1, always) */
-
- /* latch frequency */
-
- udelay(radio_wait_time);
- outb_p(128 + 64 + az->curvol, az->isa.io);
+ lm7000_set_freq(freq, isa, aztech_set_pins);
return 0;
}
-/* thanks to Michael Dwyer for giving me a dose of clues in
- * the signal strength department..
- *
- * This card has a stereo bit - bit 0 set = mono, not set = stereo
- */
static u32 aztech_g_rxsubchans(struct radio_isa_card *isa)
{
- if (inb(isa->io) & 1)
+ if (inb(isa->io) & AZTECH_BIT_MONO)
return V4L2_TUNER_SUB_MONO;
return V4L2_TUNER_SUB_STEREO;
}
-static int aztech_s_stereo(struct radio_isa_card *isa, bool stereo)
+static u32 aztech_g_signal(struct radio_isa_card *isa)
{
- return aztech_s_frequency(isa, isa->freq);
+ return (inb(isa->io) & AZTECH_BIT_NOT_TUNED) ? 0 : 0xffff;
}
static int aztech_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol)
@@ -146,8 +121,8 @@ static const struct radio_isa_ops aztech_ops = {
.alloc = aztech_alloc,
.s_mute_volume = aztech_s_mute_volume,
.s_frequency = aztech_s_frequency,
- .s_stereo = aztech_s_stereo,
.g_rxsubchans = aztech_g_rxsubchans,
+ .g_signal = aztech_g_signal,
};
static const int aztech_ioports[] = { 0x350, 0x358 };
@@ -165,7 +140,7 @@ static struct radio_isa_driver aztech_driver = {
.radio_nr_params = radio_nr,
.io_ports = aztech_ioports,
.num_of_io_ports = ARRAY_SIZE(aztech_ioports),
- .region_size = 2,
+ .region_size = 8,
.card = "Aztech Radio",
.ops = &aztech_ops,
.has_stereo = true,
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
index bd4d3a7cdad..5236035f0f2 100644
--- a/drivers/media/radio/radio-maxiradio.c
+++ b/drivers/media/radio/radio-maxiradio.c
@@ -42,7 +42,7 @@
#include <linux/videodev2.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-fh.h>
@@ -200,15 +200,4 @@ static struct pci_driver maxiradio_driver = {
.remove = maxiradio_remove,
};
-static int __init maxiradio_init(void)
-{
- return pci_register_driver(&maxiradio_driver);
-}
-
-static void __exit maxiradio_exit(void)
-{
- pci_unregister_driver(&maxiradio_driver);
-}
-
-module_init(maxiradio_init);
-module_exit(maxiradio_exit);
+module_pci_driver(maxiradio_driver);
diff --git a/drivers/media/radio/radio-sf16fmr2.c b/drivers/media/radio/radio-sf16fmr2.c
index 9c0990457a7..f1e3714b5f1 100644
--- a/drivers/media/radio/radio-sf16fmr2.c
+++ b/drivers/media/radio/radio-sf16fmr2.c
@@ -14,7 +14,7 @@
#include <linux/io.h> /* outb, outb_p */
#include <linux/isa.h>
#include <linux/pnp.h>
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
MODULE_AUTHOR("Ondrej Zary");
MODULE_DESCRIPTION("MediaForte SF16-FMR2 and SF16-FMD2 FM radio card driver");
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 8fa18ab5b72..b9147721241 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -33,7 +33,7 @@
#include <linux/usb.h>
#include <linux/workqueue.h>
#include <media/v4l2-device.h>
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
#if defined(CONFIG_LEDS_CLASS) || \
(defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE))
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index 62f3edec39b..d6d4d60261d 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -142,8 +142,6 @@ MODULE_PARM_DESC(max_rds_errors, "RDS maximum block errors: *1*");
/**************************************************************************
* Software/Hardware Versions from Scratch Page
**************************************************************************/
-#define RADIO_SW_VERSION_NOT_BOOTLOADABLE 6
-#define RADIO_SW_VERSION 1
#define RADIO_HW_VERSION 1
@@ -682,15 +680,6 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
}
dev_info(&intf->dev, "software version %d, hardware version %d\n",
radio->software_version, radio->hardware_version);
- if (radio->software_version < RADIO_SW_VERSION) {
- dev_warn(&intf->dev,
- "This driver is known to work with "
- "software version %hu,\n", RADIO_SW_VERSION);
- dev_warn(&intf->dev,
- "but the device has software version %hu.\n",
- radio->software_version);
- version_warning = 1;
- }
if (radio->hardware_version < RADIO_HW_VERSION) {
dev_warn(&intf->dev,
"This driver is known to work with "
diff --git a/sound/i2c/other/tea575x-tuner.c b/drivers/media/radio/tea575x.c
index 8a36a1d9803..cef06981b7c 100644
--- a/sound/i2c/other/tea575x-tuner.c
+++ b/drivers/media/radio/tea575x.c
@@ -31,7 +31,7 @@
#include <media/v4l2-fh.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
MODULE_DESCRIPTION("Routines for control of TEA5757/5759 Philips AM/FM radio tuner chips");
@@ -486,13 +486,9 @@ static const struct v4l2_ctrl_ops tea575x_ctrl_ops = {
.s_ctrl = tea575x_s_ctrl,
};
-/*
- * initialize all the tea575x chips
- */
-int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
-{
- int retval;
+int snd_tea575x_hw_init(struct snd_tea575x *tea)
+{
tea->mute = true;
/* Not all devices can or know how to read the data back.
@@ -507,6 +503,17 @@ int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
tea->freq = 90500 * 16; /* 90.5Mhz default */
snd_tea575x_set_freq(tea);
+ return 0;
+}
+EXPORT_SYMBOL(snd_tea575x_hw_init);
+
+int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner)
+{
+ int retval = snd_tea575x_hw_init(tea);
+
+ if (retval)
+ return retval;
+
tea->vd = tea575x_radio;
video_set_drvdata(&tea->vd, tea);
mutex_init(&tea->mutex);
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index 5a79c333d45..11e84bcc23a 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -223,6 +223,8 @@ config IR_REDRAT3
tristate "RedRat3 IR Transceiver"
depends on USB_ARCH_HAS_HCD
depends on RC_CORE
+ select NEW_LEDS
+ select LEDS_CLASS
select USB
---help---
Say Y here if you want to use a RedRat3 Infrared Transceiver.
@@ -248,7 +250,6 @@ config IR_WINBOND_CIR
depends on RC_CORE
select NEW_LEDS
select LEDS_CLASS
- select LEDS_TRIGGERS
select BITREVERSE
---help---
Say Y here if you want to use the IR remote functionality found
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index ed184f68c17..c1444f84717 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -476,7 +476,7 @@ select_timeout:
}
/* Enable the device for receive */
-static void ene_rx_enable(struct ene_device *dev)
+static void ene_rx_enable_hw(struct ene_device *dev)
{
u8 reg_value;
@@ -504,11 +504,17 @@ static void ene_rx_enable(struct ene_device *dev)
/* enter idle mode */
ir_raw_event_set_idle(dev->rdev, true);
+}
+
+/* Enable the device for receive - wrapper to track the state*/
+static void ene_rx_enable(struct ene_device *dev)
+{
+ ene_rx_enable_hw(dev);
dev->rx_enabled = true;
}
/* Disable the device receiver */
-static void ene_rx_disable(struct ene_device *dev)
+static void ene_rx_disable_hw(struct ene_device *dev)
{
/* disable inputs */
ene_rx_enable_cir_engine(dev, false);
@@ -516,8 +522,13 @@ static void ene_rx_disable(struct ene_device *dev)
/* disable hardware IRQ and firmware flag */
ene_clear_reg_mask(dev, ENE_FW1, ENE_FW1_ENABLE | ENE_FW1_IRQ);
-
ir_raw_event_set_idle(dev->rdev, true);
+}
+
+/* Disable the device receiver - wrapper to track the state */
+static void ene_rx_disable(struct ene_device *dev)
+{
+ ene_rx_disable_hw(dev);
dev->rx_enabled = false;
}
@@ -1022,6 +1033,8 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
spin_lock_init(&dev->hw_lock);
dev->hw_io = pnp_port_start(pnp_dev, 0);
+ dev->irq = pnp_irq(pnp_dev, 0);
+
pnp_set_drvdata(pnp_dev, dev);
dev->pnp_dev = pnp_dev;
@@ -1085,7 +1098,6 @@ static int ene_probe(struct pnp_dev *pnp_dev, const struct pnp_device_id *id)
goto exit_unregister_device;
}
- dev->irq = pnp_irq(pnp_dev, 0);
if (request_irq(dev->irq, ene_isr,
IRQF_SHARED, ENE_DRIVER_NAME, (void *)dev)) {
goto exit_release_hw_io;
@@ -1123,9 +1135,8 @@ static void ene_remove(struct pnp_dev *pnp_dev)
}
/* enable wake on IR (wakes on specific button on original remote) */
-static void ene_enable_wake(struct ene_device *dev, int enable)
+static void ene_enable_wake(struct ene_device *dev, bool enable)
{
- enable = enable && device_may_wakeup(&dev->pnp_dev->dev);
dbg("wake on IR %s", enable ? "enabled" : "disabled");
ene_set_clear_reg_mask(dev, ENE_FW1, ENE_FW1_WAKE, enable);
}
@@ -1134,9 +1145,12 @@ static void ene_enable_wake(struct ene_device *dev, int enable)
static int ene_suspend(struct pnp_dev *pnp_dev, pm_message_t state)
{
struct ene_device *dev = pnp_get_drvdata(pnp_dev);
- ene_enable_wake(dev, true);
+ bool wake = device_may_wakeup(&dev->pnp_dev->dev);
+
+ if (!wake && dev->rx_enabled)
+ ene_rx_disable_hw(dev);
- /* TODO: add support for wake pattern */
+ ene_enable_wake(dev, wake);
return 0;
}
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index 6f978e85db8..a7911e3b9bc 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -185,7 +185,7 @@
#define __dbg(level, format, ...) \
do { \
if (debug >= level) \
- pr_debug(format "\n", ## __VA_ARGS__); \
+ pr_info(format "\n", ## __VA_ARGS__); \
} while (0)
#define dbg(format, ...) __dbg(1, format, ## __VA_ARGS__)
diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
index a4ab2e6b3f8..19632b1c219 100644
--- a/drivers/media/rc/iguanair.c
+++ b/drivers/media/rc/iguanair.c
@@ -364,8 +364,8 @@ static int iguanair_tx(struct rc_dev *dev, unsigned *txbuf, unsigned count)
periods = DIV_ROUND_CLOSEST(txbuf[i] * ir->carrier, 1000000);
bytes = DIV_ROUND_UP(periods, 127);
if (size + bytes > ir->bufsize) {
- count = i;
- break;
+ rc = -EINVAL;
+ goto out;
}
while (periods > 127) {
ir->packet->payload[size++] = 127 | space;
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index e4561264e12..ed2c8a1ed8c 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -140,11 +140,20 @@ static ssize_t ir_lirc_transmit_ir(struct file *file, const char __user *buf,
goto out;
}
+ for (i = 0; i < count; i++) {
+ if (txbuf[i] > IR_MAX_DURATION / 1000 - duration || !txbuf[i]) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ duration += txbuf[i];
+ }
+
ret = dev->tx_ir(dev, txbuf, count);
if (ret < 0)
goto out;
- for (i = 0; i < ret; i++)
+ for (duration = i = 0; i < ret; i++)
duration += txbuf[i];
ret *= sizeof(unsigned int);
@@ -375,6 +384,7 @@ static int ir_lirc_register(struct rc_dev *dev)
drv->code_length = sizeof(struct ir_raw_event) * 8;
drv->fops = &lirc_fops;
drv->dev = &dev->dev;
+ drv->rdev = dev;
drv->owner = THIS_MODULE;
drv->minor = lirc_register_driver(drv);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 8dc057b273f..dc5cbffcd5a 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -35,6 +35,7 @@
#include <linux/device.h>
#include <linux/cdev.h>
+#include <media/rc-core.h>
#include <media/lirc.h>
#include <media/lirc_dev.h>
@@ -467,6 +468,12 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
goto error;
}
+ if (ir->d.rdev) {
+ retval = rc_open(ir->d.rdev);
+ if (retval)
+ goto error;
+ }
+
cdev = ir->cdev;
if (try_module_get(cdev->owner)) {
ir->open++;
@@ -511,6 +518,9 @@ int lirc_dev_fop_close(struct inode *inode, struct file *file)
WARN_ON(mutex_lock_killable(&lirc_dev_lock));
+ if (ir->d.rdev)
+ rc_close(ir->d.rdev);
+
ir->open--;
if (ir->attached) {
ir->d.set_use_dec(ir->d.data);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 1cf382a0b27..46da365c9c8 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -16,6 +16,7 @@
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <linux/input.h>
+#include <linux/leds.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -31,6 +32,7 @@
/* Used to keep track of known keymaps */
static LIST_HEAD(rc_map_list);
static DEFINE_SPINLOCK(rc_map_lock);
+static struct led_trigger *led_feedback;
static struct rc_map_list *seek_rc_map(const char *name)
{
@@ -535,6 +537,7 @@ static void ir_do_keyup(struct rc_dev *dev, bool sync)
IR_dprintk(1, "keyup key 0x%04x\n", dev->last_keycode);
input_report_key(dev->input_dev, dev->last_keycode, 0);
+ led_trigger_event(led_feedback, LED_OFF);
if (sync)
input_sync(dev->input_dev);
dev->keypressed = false;
@@ -648,6 +651,7 @@ static void ir_do_keydown(struct rc_dev *dev, int scancode,
input_report_key(dev->input_dev, keycode, 1);
}
+ led_trigger_event(led_feedback, LED_FULL);
input_sync(dev->input_dev);
}
@@ -699,19 +703,50 @@ void rc_keydown_notimeout(struct rc_dev *dev, int scancode, u8 toggle)
}
EXPORT_SYMBOL_GPL(rc_keydown_notimeout);
+int rc_open(struct rc_dev *rdev)
+{
+ int rval = 0;
+
+ if (!rdev)
+ return -EINVAL;
+
+ mutex_lock(&rdev->lock);
+ if (!rdev->users++ && rdev->open != NULL)
+ rval = rdev->open(rdev);
+
+ if (rval)
+ rdev->users--;
+
+ mutex_unlock(&rdev->lock);
+
+ return rval;
+}
+EXPORT_SYMBOL_GPL(rc_open);
+
static int ir_open(struct input_dev *idev)
{
struct rc_dev *rdev = input_get_drvdata(idev);
- return rdev->open(rdev);
+ return rc_open(rdev);
+}
+
+void rc_close(struct rc_dev *rdev)
+{
+ if (rdev) {
+ mutex_lock(&rdev->lock);
+
+ if (!--rdev->users && rdev->close != NULL)
+ rdev->close(rdev);
+
+ mutex_unlock(&rdev->lock);
+ }
}
+EXPORT_SYMBOL_GPL(rc_close);
static void ir_close(struct input_dev *idev)
{
struct rc_dev *rdev = input_get_drvdata(idev);
-
- if (rdev)
- rdev->close(rdev);
+ rc_close(rdev);
}
/* class for /sys/class/rc */
@@ -1076,7 +1111,14 @@ int rc_register_device(struct rc_dev *dev)
memcpy(&dev->input_dev->id, &dev->input_id, sizeof(dev->input_id));
dev->input_dev->phys = dev->input_phys;
dev->input_dev->name = dev->input_name;
+
+ /* input_register_device can call ir_open, so unlock mutex here */
+ mutex_unlock(&dev->lock);
+
rc = input_register_device(dev->input_dev);
+
+ mutex_lock(&dev->lock);
+
if (rc)
goto out_table;
@@ -1184,6 +1226,7 @@ static int __init rc_core_init(void)
return rc;
}
+ led_trigger_register_simple("rc-feedback", &led_feedback);
rc_map_register(&empty_map);
return 0;
@@ -1192,6 +1235,7 @@ static int __init rc_core_init(void)
static void __exit rc_core_exit(void)
{
class_unregister(&rc_class);
+ led_trigger_unregister_simple(led_feedback);
rc_map_unregister(&empty_map);
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 12167a6b547..094484fac94 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -47,6 +47,7 @@
#include <asm/unaligned.h>
#include <linux/device.h>
+#include <linux/leds.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
@@ -186,6 +187,13 @@ struct redrat3_dev {
struct rc_dev *rc;
struct device *dev;
+ /* led control */
+ struct led_classdev led;
+ atomic_t flash;
+ struct usb_ctrlrequest flash_control;
+ struct urb *flash_urb;
+ u8 flash_in_buf;
+
/* save off the usb device pointer */
struct usb_device *udev;
@@ -206,8 +214,6 @@ struct redrat3_dev {
struct timer_list rx_timeout;
u32 hw_timeout;
- /* is the detector enabled*/
- bool det_enabled;
/* Is the device currently transmitting?*/
bool transmitting;
@@ -472,40 +478,19 @@ static int redrat3_enable_detector(struct redrat3_dev *rr3)
return -EIO;
}
- rr3->det_enabled = true;
redrat3_issue_async(rr3);
return 0;
}
-/* Disables the rr3 long range detector */
-static void redrat3_disable_detector(struct redrat3_dev *rr3)
-{
- struct device *dev = rr3->dev;
- u8 ret;
-
- rr3_ftr(dev, "Entering %s\n", __func__);
-
- ret = redrat3_send_cmd(RR3_RC_DET_DISABLE, rr3);
- if (ret != 0)
- dev_err(dev, "%s: failure!\n", __func__);
-
- ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3);
- if (ret != 0)
- dev_warn(dev, "%s: detector status: %d, should be 0\n",
- __func__, ret);
-
- rr3->det_enabled = false;
-}
-
static inline void redrat3_delete(struct redrat3_dev *rr3,
struct usb_device *udev)
{
rr3_ftr(rr3->dev, "%s cleaning up\n", __func__);
usb_kill_urb(rr3->read_urb);
-
+ usb_kill_urb(rr3->flash_urb);
usb_free_urb(rr3->read_urb);
-
+ usb_free_urb(rr3->flash_urb);
usb_free_coherent(udev, le16_to_cpu(rr3->ep_in->wMaxPacketSize),
rr3->bulk_in_buf, rr3->dma_in);
@@ -686,7 +671,8 @@ static int redrat3_get_ir_data(struct redrat3_dev *rr3, unsigned len)
goto out;
}
- if (rr3->bytes_read < be16_to_cpu(rr3->irdata.header.length))
+ if (rr3->bytes_read < be16_to_cpu(rr3->irdata.header.length) +
+ sizeof(struct redrat3_header))
/* we're still accumulating data */
return 0;
@@ -785,10 +771,10 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
return -EAGAIN;
}
- count = min_t(unsigned, count, RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN);
+ if (count > RR3_MAX_SIG_SIZE - RR3_TX_TRAILER_LEN)
+ return -EINVAL;
/* rr3 will disable rc detector on transmit */
- rr3->det_enabled = false;
rr3->transmitting = true;
sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL);
@@ -825,8 +811,8 @@ static int redrat3_transmit_ir(struct rc_dev *rcdev, unsigned *txbuf,
&irdata->lens[curlencheck]);
curlencheck++;
} else {
- count = i - 1;
- break;
+ ret = -EINVAL;
+ goto out;
}
}
irdata->sigdata[i] = lencheck;
@@ -868,11 +854,48 @@ out:
rr3->transmitting = false;
/* rr3 re-enables rc detector because it was enabled before */
- rr3->det_enabled = true;
return ret;
}
+static void redrat3_brightness_set(struct led_classdev *led_dev, enum
+ led_brightness brightness)
+{
+ struct redrat3_dev *rr3 = container_of(led_dev, struct redrat3_dev,
+ led);
+
+ if (brightness != LED_OFF && atomic_cmpxchg(&rr3->flash, 0, 1) == 0) {
+ int ret = usb_submit_urb(rr3->flash_urb, GFP_ATOMIC);
+ if (ret != 0) {
+ dev_dbg(rr3->dev, "%s: unexpected ret of %d\n",
+ __func__, ret);
+ atomic_set(&rr3->flash, 0);
+ }
+ }
+}
+
+static void redrat3_led_complete(struct urb *urb)
+{
+ struct redrat3_dev *rr3 = urb->context;
+
+ switch (urb->status) {
+ case 0:
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ usb_unlink_urb(urb);
+ return;
+ case -EPIPE:
+ default:
+ dev_dbg(rr3->dev, "Error: urb status = %d\n", urb->status);
+ break;
+ }
+
+ rr3->led.brightness = LED_OFF;
+ atomic_dec(&rr3->flash);
+}
+
static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3)
{
struct device *dev = rr3->dev;
@@ -1016,10 +1039,35 @@ static int redrat3_dev_probe(struct usb_interface *intf,
/* default.. will get overridden by any sends with a freq defined */
rr3->carrier = 38000;
+ /* led control */
+ rr3->led.name = "redrat3:red:feedback";
+ rr3->led.default_trigger = "rc-feedback";
+ rr3->led.brightness_set = redrat3_brightness_set;
+ retval = led_classdev_register(&intf->dev, &rr3->led);
+ if (retval)
+ goto error;
+
+ atomic_set(&rr3->flash, 0);
+ rr3->flash_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!rr3->flash_urb) {
+ retval = -ENOMEM;
+ goto led_free_error;
+ }
+
+ /* setup packet is 'c0 b9 0000 0000 0001' */
+ rr3->flash_control.bRequestType = 0xc0;
+ rr3->flash_control.bRequest = RR3_BLINK_LED;
+ rr3->flash_control.wLength = cpu_to_le16(1);
+
+ usb_fill_control_urb(rr3->flash_urb, udev, usb_rcvctrlpipe(udev, 0),
+ (unsigned char *)&rr3->flash_control,
+ &rr3->flash_in_buf, sizeof(rr3->flash_in_buf),
+ redrat3_led_complete, rr3);
+
rr3->rc = redrat3_init_rc_dev(rr3);
if (!rr3->rc) {
retval = -ENOMEM;
- goto error;
+ goto led_free_error;
}
setup_timer(&rr3->rx_timeout, redrat3_rx_timeout, (unsigned long)rr3);
@@ -1029,6 +1077,8 @@ static int redrat3_dev_probe(struct usb_interface *intf,
rr3_ftr(dev, "Exiting %s\n", __func__);
return 0;
+led_free_error:
+ led_classdev_unregister(&rr3->led);
error:
redrat3_delete(rr3, rr3->udev);
@@ -1048,10 +1098,9 @@ static void redrat3_dev_disconnect(struct usb_interface *intf)
if (!rr3)
return;
- redrat3_disable_detector(rr3);
-
usb_set_intfdata(intf, NULL);
rc_unregister_device(rr3->rc);
+ led_classdev_unregister(&rr3->led);
del_timer_sync(&rr3->rx_timeout);
redrat3_delete(rr3, udev);
@@ -1062,7 +1111,9 @@ static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message)
{
struct redrat3_dev *rr3 = usb_get_intfdata(intf);
rr3_ftr(rr3->dev, "suspend\n");
+ led_classdev_suspend(&rr3->led);
usb_kill_urb(rr3->read_urb);
+ usb_kill_urb(rr3->flash_urb);
return 0;
}
@@ -1072,6 +1123,7 @@ static int redrat3_dev_resume(struct usb_interface *intf)
rr3_ftr(rr3->dev, "resume\n");
if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC))
return -EIO;
+ led_classdev_resume(&rr3->led);
return 0;
}
diff --git a/drivers/media/rc/ttusbir.c b/drivers/media/rc/ttusbir.c
index 891762d167e..d8de2056a4f 100644
--- a/drivers/media/rc/ttusbir.c
+++ b/drivers/media/rc/ttusbir.c
@@ -302,6 +302,7 @@ static int ttusbir_probe(struct usb_interface *intf,
ttusbir_bulk_complete, tt);
tt->led.name = "ttusbir:green:power";
+ tt->led.default_trigger = "rc-feedback";
tt->led.brightness_set = ttusbir_brightness_set;
tt->led.brightness_get = ttusbir_brightness_get;
tt->is_led_on = tt->led_on = true;
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 87af2d3ba60..98bd4960c75 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -213,13 +213,11 @@ struct wbcir_data {
/* RX state */
enum wbcir_rxstate rxstate;
- struct led_trigger *rxtrigger;
int carrier_report_enabled;
u32 pulse_duration;
/* TX state */
enum wbcir_txstate txstate;
- struct led_trigger *txtrigger;
u32 txlen;
u32 txoff;
u32 *txbuf;
@@ -366,14 +364,11 @@ wbcir_idle_rx(struct rc_dev *dev, bool idle)
{
struct wbcir_data *data = dev->priv;
- if (!idle && data->rxstate == WBCIR_RXSTATE_INACTIVE) {
+ if (!idle && data->rxstate == WBCIR_RXSTATE_INACTIVE)
data->rxstate = WBCIR_RXSTATE_ACTIVE;
- led_trigger_event(data->rxtrigger, LED_FULL);
- }
if (idle && data->rxstate != WBCIR_RXSTATE_INACTIVE) {
data->rxstate = WBCIR_RXSTATE_INACTIVE;
- led_trigger_event(data->rxtrigger, LED_OFF);
if (data->carrier_report_enabled)
wbcir_carrier_report(data);
@@ -425,7 +420,6 @@ wbcir_irq_tx(struct wbcir_data *data)
case WBCIR_TXSTATE_INACTIVE:
/* TX FIFO empty */
space = 16;
- led_trigger_event(data->txtrigger, LED_FULL);
break;
case WBCIR_TXSTATE_ACTIVE:
/* TX FIFO low (3 bytes or less) */
@@ -464,7 +458,6 @@ wbcir_irq_tx(struct wbcir_data *data)
/* Clear TX underrun bit */
outb(WBCIR_TX_UNDERRUN, data->sbase + WBCIR_REG_SP3_ASCR);
wbcir_set_irqmask(data, WBCIR_IRQ_RX | WBCIR_IRQ_ERR);
- led_trigger_event(data->txtrigger, LED_OFF);
kfree(data->txbuf);
data->txbuf = NULL;
data->txstate = WBCIR_TXSTATE_INACTIVE;
@@ -878,15 +871,13 @@ finish:
*/
wbcir_set_irqmask(data, WBCIR_IRQ_NONE);
disable_irq(data->irq);
-
- /* Disable LED */
- led_trigger_event(data->rxtrigger, LED_OFF);
- led_trigger_event(data->txtrigger, LED_OFF);
}
static int
wbcir_suspend(struct pnp_dev *device, pm_message_t state)
{
+ struct wbcir_data *data = pnp_get_drvdata(device);
+ led_classdev_suspend(&data->led);
wbcir_shutdown(device);
return 0;
}
@@ -1015,6 +1006,7 @@ wbcir_resume(struct pnp_dev *device)
wbcir_init_hw(data);
enable_irq(data->irq);
+ led_classdev_resume(&data->led);
return 0;
}
@@ -1058,25 +1050,13 @@ wbcir_probe(struct pnp_dev *device, const struct pnp_device_id *dev_id)
"(w: 0x%lX, e: 0x%lX, s: 0x%lX, i: %u)\n",
data->wbase, data->ebase, data->sbase, data->irq);
- led_trigger_register_simple("cir-tx", &data->txtrigger);
- if (!data->txtrigger) {
- err = -ENOMEM;
- goto exit_free_data;
- }
-
- led_trigger_register_simple("cir-rx", &data->rxtrigger);
- if (!data->rxtrigger) {
- err = -ENOMEM;
- goto exit_unregister_txtrigger;
- }
-
data->led.name = "cir::activity";
- data->led.default_trigger = "cir-rx";
+ data->led.default_trigger = "rc-feedback";
data->led.brightness_set = wbcir_led_brightness_set;
data->led.brightness_get = wbcir_led_brightness_get;
err = led_classdev_register(&device->dev, &data->led);
if (err)
- goto exit_unregister_rxtrigger;
+ goto exit_free_data;
data->dev = rc_allocate_device();
if (!data->dev) {
@@ -1156,10 +1136,6 @@ exit_free_rc:
rc_free_device(data->dev);
exit_unregister_led:
led_classdev_unregister(&data->led);
-exit_unregister_rxtrigger:
- led_trigger_unregister_simple(data->rxtrigger);
-exit_unregister_txtrigger:
- led_trigger_unregister_simple(data->txtrigger);
exit_free_data:
kfree(data);
pnp_set_drvdata(device, NULL);
@@ -1187,8 +1163,6 @@ wbcir_remove(struct pnp_dev *device)
rc_unregister_device(data->dev);
- led_trigger_unregister_simple(data->rxtrigger);
- led_trigger_unregister_simple(data->txtrigger);
led_classdev_unregister(&data->led);
/* This is ok since &data->led isn't actually used */
diff --git a/drivers/media/tuners/e4000.c b/drivers/media/tuners/e4000.c
index 1b33ed368ab..ad9309da4a9 100644
--- a/drivers/media/tuners/e4000.c
+++ b/drivers/media/tuners/e4000.c
@@ -41,8 +41,9 @@ static int e4000_wr_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
if (ret == 1) {
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c wr failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c wr failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
@@ -72,8 +73,9 @@ static int e4000_rd_regs(struct e4000_priv *priv, u8 reg, u8 *val, int len)
memcpy(val, buf, len);
ret = 0;
} else {
- dev_warn(&priv->i2c->dev, "%s: i2c rd failed=%d reg=%02x " \
- "len=%d\n", KBUILD_MODNAME, ret, reg, len);
+ dev_warn(&priv->i2c->dev,
+ "%s: i2c rd failed=%d reg=%02x len=%d\n",
+ KBUILD_MODNAME, ret, reg, len);
ret = -EREMOTEIO;
}
@@ -140,14 +142,12 @@ static int e4000_init(struct dvb_frontend *fe)
if (ret < 0)
goto err;
- /*
- * TODO: Implement DC offset control correctly.
- * DC offsets has quite much effect for received signal quality in case
- * of direct conversion tuners (Zero-IF). Surely we will now lose few
- * decimals or even decibels from SNR...
- */
/* DC offset control */
- ret = e4000_wr_reg(priv, 0x2d, 0x0c);
+ ret = e4000_wr_reg(priv, 0x2d, 0x1f);
+ if (ret < 0)
+ goto err;
+
+ ret = e4000_wr_regs(priv, 0x70, "\x01\x01", 2);
if (ret < 0)
goto err;
@@ -203,12 +203,13 @@ static int e4000_set_params(struct dvb_frontend *fe)
struct e4000_priv *priv = fe->tuner_priv;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i, sigma_delta;
- unsigned int f_VCO;
- u8 buf[5];
+ unsigned int f_vco;
+ u8 buf[5], i_data[4], q_data[4];
- dev_dbg(&priv->i2c->dev, "%s: delivery_system=%d frequency=%d " \
- "bandwidth_hz=%d\n", __func__,
- c->delivery_system, c->frequency, c->bandwidth_hz);
+ dev_dbg(&priv->i2c->dev,
+ "%s: delivery_system=%d frequency=%d bandwidth_hz=%d\n",
+ __func__, c->delivery_system, c->frequency,
+ c->bandwidth_hz);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
@@ -228,19 +229,19 @@ static int e4000_set_params(struct dvb_frontend *fe)
goto err;
/*
- * Note: Currently f_VCO overflows when c->frequency is 1 073 741 824 Hz
+ * Note: Currently f_vco overflows when c->frequency is 1 073 741 824 Hz
* or more.
*/
- f_VCO = c->frequency * e4000_pll_lut[i].mul;
- sigma_delta = 0x10000UL * (f_VCO % priv->cfg->clock) / priv->cfg->clock;
- buf[0] = f_VCO / priv->cfg->clock;
+ f_vco = c->frequency * e4000_pll_lut[i].mul;
+ sigma_delta = 0x10000UL * (f_vco % priv->cfg->clock) / priv->cfg->clock;
+ buf[0] = f_vco / priv->cfg->clock;
buf[1] = (sigma_delta >> 0) & 0xff;
buf[2] = (sigma_delta >> 8) & 0xff;
buf[3] = 0x00;
buf[4] = e4000_pll_lut[i].div;
- dev_dbg(&priv->i2c->dev, "%s: f_VCO=%u pll div=%d sigma_delta=%04x\n",
- __func__, f_VCO, buf[0], sigma_delta);
+ dev_dbg(&priv->i2c->dev, "%s: f_vco=%u pll div=%d sigma_delta=%04x\n",
+ __func__, f_vco, buf[0], sigma_delta);
ret = e4000_wr_regs(priv, 0x09, buf, 5);
if (ret < 0)
@@ -292,6 +293,43 @@ static int e4000_set_params(struct dvb_frontend *fe)
if (ret < 0)
goto err;
+ /* DC offset */
+ for (i = 0; i < 4; i++) {
+ if (i == 0)
+ ret = e4000_wr_regs(priv, 0x15, "\x00\x7e\x24", 3);
+ else if (i == 1)
+ ret = e4000_wr_regs(priv, 0x15, "\x00\x7f", 2);
+ else if (i == 2)
+ ret = e4000_wr_regs(priv, 0x15, "\x01", 1);
+ else
+ ret = e4000_wr_regs(priv, 0x16, "\x7e", 1);
+
+ if (ret < 0)
+ goto err;
+
+ ret = e4000_wr_reg(priv, 0x29, 0x01);
+ if (ret < 0)
+ goto err;
+
+ ret = e4000_rd_regs(priv, 0x2a, buf, 3);
+ if (ret < 0)
+ goto err;
+
+ i_data[i] = (((buf[2] >> 0) & 0x3) << 6) | (buf[0] & 0x3f);
+ q_data[i] = (((buf[2] >> 4) & 0x3) << 6) | (buf[1] & 0x3f);
+ }
+
+ swap(q_data[2], q_data[3]);
+ swap(i_data[2], i_data[3]);
+
+ ret = e4000_wr_regs(priv, 0x50, q_data, 4);
+ if (ret < 0)
+ goto err;
+
+ ret = e4000_wr_regs(priv, 0x60, i_data, 4);
+ if (ret < 0)
+ goto err;
+
/* gain control auto */
ret = e4000_wr_reg(priv, 0x1a, 0x17);
if (ret < 0)
diff --git a/drivers/media/tuners/e4000.h b/drivers/media/tuners/e4000.h
index 3783a0bdf85..25ee7c07abf 100644
--- a/drivers/media/tuners/e4000.h
+++ b/drivers/media/tuners/e4000.h
@@ -44,7 +44,7 @@ extern struct dvb_frontend *e4000_attach(struct dvb_frontend *fe,
static inline struct dvb_frontend *e4000_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, const struct e4000_config *cfg)
{
- pr_warn("%s: driver disabled by Kconfig\n", __func__);
+ dev_warn(&i2c->dev, "%s: driver disabled by Kconfig\n", __func__);
return NULL;
}
#endif
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 27948e1798e..a384f80f595 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -443,6 +443,44 @@ struct cx231xx_board cx231xx_boards[] = {
.gpio = NULL,
} },
},
+ [CX231XX_BOARD_KWORLD_UB445_USB_HYBRID] = {
+ .name = "Kworld UB445 USB Hybrid",
+ .tuner_type = TUNER_NXP_TDA18271,
+ .tuner_addr = 0x60,
+ .decoder = CX231XX_AVDECODER,
+ .output_mode = OUT_MODE_VIP11,
+ .demod_xfer_mode = 0,
+ .ctl_pin_status_mask = 0xFFFFFFC4,
+ .agc_analog_digital_select_gpio = 0x11, /* According with PV cxPolaris.inf file */
+ .tuner_sif_gpio = -1,
+ .tuner_scl_gpio = -1,
+ .tuner_sda_gpio = -1,
+ .gpio_pin_status_mask = 0x4001000,
+ .tuner_i2c_master = 2,
+ .demod_i2c_master = 1,
+ .ir_i2c_master = 2,
+ .has_dvb = 1,
+ .demod_addr = 0x10,
+ .norm = V4L2_STD_NTSC_M,
+ .input = {{
+ .type = CX231XX_VMUX_TELEVISION,
+ .vmux = CX231XX_VIN_3_1,
+ .amux = CX231XX_AMUX_VIDEO,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_COMPOSITE1,
+ .vmux = CX231XX_VIN_2_1,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ }, {
+ .type = CX231XX_VMUX_SVIDEO,
+ .vmux = CX231XX_VIN_1_1 |
+ (CX231XX_VIN_1_2 << 8) |
+ CX25840_SVIDEO_ON,
+ .amux = CX231XX_AMUX_LINE_IN,
+ .gpio = NULL,
+ } },
+ },
[CX231XX_BOARD_PV_PLAYTV_USB_HYBRID] = {
.name = "Pixelview PlayTV USB Hybrid",
.tuner_type = TUNER_NXP_TDA18271,
@@ -703,6 +741,8 @@ struct usb_device_id cx231xx_id_table[] = {
.driver_info = CX231XX_BOARD_PV_XCAPTURE_USB},
{USB_DEVICE(0x1b80, 0xe424),
.driver_info = CX231XX_BOARD_KWORLD_UB430_USB_HYBRID},
+ {USB_DEVICE(0x1b80, 0xe421),
+ .driver_info = CX231XX_BOARD_KWORLD_UB445_USB_HYBRID},
{USB_DEVICE(0x1f4d, 0x0237),
.driver_info = CX231XX_BOARD_ICONBIT_U100},
{USB_DEVICE(0x0fd9, 0x0037),
diff --git a/drivers/media/usb/cx231xx/cx231xx-dvb.c b/drivers/media/usb/cx231xx/cx231xx-dvb.c
index 14e26106fd7..4504bc6a700 100644
--- a/drivers/media/usb/cx231xx/cx231xx-dvb.c
+++ b/drivers/media/usb/cx231xx/cx231xx-dvb.c
@@ -657,6 +657,7 @@ static int dvb_init(struct cx231xx *dev)
}
break;
case CX231XX_BOARD_CNXT_RDU_253S:
+ case CX231XX_BOARD_KWORLD_UB445_USB_HYBRID:
dev->dvb->frontend = dvb_attach(s5h1411_attach,
&tda18271_s5h1411_config,
diff --git a/drivers/media/usb/cx231xx/cx231xx.h b/drivers/media/usb/cx231xx/cx231xx.h
index e812119ea7a..babca7fb85e 100644
--- a/drivers/media/usb/cx231xx/cx231xx.h
+++ b/drivers/media/usb/cx231xx/cx231xx.h
@@ -72,6 +72,7 @@
#define CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC 15
#define CX231XX_BOARD_ELGATO_VIDEO_CAPTURE_V2 16
#define CX231XX_BOARD_OTG102 17
+#define CX231XX_BOARD_KWORLD_UB445_USB_HYBRID 18
/* Limits minimum and default number of buffers */
#define CX231XX_MIN_BUF 4
diff --git a/drivers/media/usb/dvb-usb-v2/Kconfig b/drivers/media/usb/dvb-usb-v2/Kconfig
index a3c8ecf2207..2059d0c86ad 100644
--- a/drivers/media/usb/dvb-usb-v2/Kconfig
+++ b/drivers/media/usb/dvb-usb-v2/Kconfig
@@ -1,6 +1,6 @@
config DVB_USB_V2
tristate "Support for various USB DVB devices v2"
- depends on DVB_CORE && USB && I2C
+ depends on DVB_CORE && USB && I2C && (RC_CORE || RC_CORE=n)
help
By enabling this you will be able to choose the various supported
USB1.1 and USB2.0 DVB devices.
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb.h b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
index 399916bd588..124b4baa7e9 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb.h
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb.h
@@ -352,9 +352,7 @@ struct dvb_usb_adapter {
* @rc_map: name of rc codes table
* @rc_polling_active: set when RC polling is active
* @udev: pointer to the device's struct usb_device
- * @intf: pointer to the device's usb interface
* @rc: remote controller configuration
- * @probe_work: work to defer .probe()
* @powered: indicated whether the device is power or not
* @usb_mutex: mutex for usb control messages
* @i2c_mutex: mutex for i2c-transfers
@@ -370,10 +368,7 @@ struct dvb_usb_device {
const char *rc_map;
bool rc_polling_active;
struct usb_device *udev;
- struct usb_interface *intf;
struct dvb_usb_rc rc;
- struct work_struct probe_work;
- pid_t work_pid;
int powered;
/* locking */
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index 19f6737d981..8a054d66e70 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -833,20 +833,44 @@ err:
return ret;
}
-/*
- * udev, which is used for the firmware downloading, requires we cannot
- * block during module_init(). module_init() calls USB probe() which
- * is this routine. Due to that we delay actual operation using workqueue
- * and return always success here.
- */
-static void dvb_usbv2_init_work(struct work_struct *work)
+int dvb_usbv2_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
{
int ret;
- struct dvb_usb_device *d =
- container_of(work, struct dvb_usb_device, probe_work);
+ struct dvb_usb_device *d;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct dvb_usb_driver_info *driver_info =
+ (struct dvb_usb_driver_info *) id->driver_info;
+
+ dev_dbg(&udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
+
+ if (!id->driver_info) {
+ dev_err(&udev->dev, "%s: driver_info failed\n", KBUILD_MODNAME);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
+ if (!d) {
+ dev_err(&udev->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
+ ret = -ENOMEM;
+ goto err;
+ }
- d->work_pid = current->pid;
- dev_dbg(&d->udev->dev, "%s: work_pid=%d\n", __func__, d->work_pid);
+ d->name = driver_info->name;
+ d->rc_map = driver_info->rc_map;
+ d->udev = udev;
+ d->props = driver_info->props;
+
+ if (intf->cur_altsetting->desc.bInterfaceNumber !=
+ d->props->bInterfaceNumber) {
+ ret = -ENODEV;
+ goto err_free_all;
+ }
+
+ mutex_init(&d->usb_mutex);
+ mutex_init(&d->i2c_mutex);
if (d->props->size_of_priv) {
d->priv = kzalloc(d->props->size_of_priv, GFP_KERNEL);
@@ -854,7 +878,7 @@ static void dvb_usbv2_init_work(struct work_struct *work)
dev_err(&d->udev->dev, "%s: kzalloc() failed\n",
KBUILD_MODNAME);
ret = -ENOMEM;
- goto err_usb_driver_release_interface;
+ goto err_free_all;
}
}
@@ -884,20 +908,12 @@ static void dvb_usbv2_init_work(struct work_struct *work)
* device. As 'new' device is warm we should
* never go here again.
*/
- return;
+ goto exit;
} else {
- /*
- * Unexpected error. We must unregister driver
- * manually from the device, because device is
- * already register by returning from probe()
- * with success. usb_driver_release_interface()
- * finally calls disconnect() in order to free
- * resources.
- */
- goto err_usb_driver_release_interface;
+ goto err_free_all;
}
} else {
- goto err_usb_driver_release_interface;
+ goto err_free_all;
}
}
@@ -906,73 +922,17 @@ static void dvb_usbv2_init_work(struct work_struct *work)
ret = dvb_usbv2_init(d);
if (ret < 0)
- goto err_usb_driver_release_interface;
+ goto err_free_all;
dev_info(&d->udev->dev,
"%s: '%s' successfully initialized and connected\n",
KBUILD_MODNAME, d->name);
-
- return;
-err_usb_driver_release_interface:
- dev_info(&d->udev->dev, "%s: '%s' error while loading driver (%d)\n",
- KBUILD_MODNAME, d->name, ret);
- usb_driver_release_interface(to_usb_driver(d->intf->dev.driver),
- d->intf);
- dev_dbg(&d->udev->dev, "%s: failed=%d\n", __func__, ret);
- return;
-}
-
-int dvb_usbv2_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- int ret;
- struct dvb_usb_device *d;
- struct usb_device *udev = interface_to_usbdev(intf);
- struct dvb_usb_driver_info *driver_info =
- (struct dvb_usb_driver_info *) id->driver_info;
-
- dev_dbg(&udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
- intf->cur_altsetting->desc.bInterfaceNumber);
-
- if (!id->driver_info) {
- dev_err(&udev->dev, "%s: driver_info failed\n", KBUILD_MODNAME);
- ret = -ENODEV;
- goto err;
- }
-
- d = kzalloc(sizeof(struct dvb_usb_device), GFP_KERNEL);
- if (!d) {
- dev_err(&udev->dev, "%s: kzalloc() failed\n", KBUILD_MODNAME);
- ret = -ENOMEM;
- goto err;
- }
-
- d->name = driver_info->name;
- d->rc_map = driver_info->rc_map;
- d->udev = udev;
- d->intf = intf;
- d->props = driver_info->props;
-
- if (d->intf->cur_altsetting->desc.bInterfaceNumber !=
- d->props->bInterfaceNumber) {
- ret = -ENODEV;
- goto err_kfree;
- }
-
- mutex_init(&d->usb_mutex);
- mutex_init(&d->i2c_mutex);
- INIT_WORK(&d->probe_work, dvb_usbv2_init_work);
+exit:
usb_set_intfdata(intf, d);
- ret = schedule_work(&d->probe_work);
- if (ret < 0) {
- dev_err(&d->udev->dev, "%s: schedule_work() failed\n",
- KBUILD_MODNAME);
- goto err_kfree;
- }
return 0;
-err_kfree:
- kfree(d);
+err_free_all:
+ dvb_usbv2_exit(d);
err:
dev_dbg(&udev->dev, "%s: failed=%d\n", __func__, ret);
return ret;
@@ -984,12 +944,8 @@ void dvb_usbv2_disconnect(struct usb_interface *intf)
struct dvb_usb_device *d = usb_get_intfdata(intf);
const char *name = d->name;
struct device dev = d->udev->dev;
- dev_dbg(&d->udev->dev, "%s: pid=%d work_pid=%d\n", __func__,
- current->pid, d->work_pid);
-
- /* ensure initialization work is finished until release resources */
- if (d->work_pid != current->pid)
- cancel_work_sync(&d->probe_work);
+ dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
+ intf->cur_altsetting->desc.bInterfaceNumber);
if (d->props->exit)
d->props->exit(d);
diff --git a/drivers/media/usb/dvb-usb-v2/lmedm04.c b/drivers/media/usb/dvb-usb-v2/lmedm04.c
index b3fd0ffa3c3..f674dc024d0 100644
--- a/drivers/media/usb/dvb-usb-v2/lmedm04.c
+++ b/drivers/media/usb/dvb-usb-v2/lmedm04.c
@@ -1225,7 +1225,7 @@ static int lme2510_identify_state(struct dvb_usb_device *d, const char **name)
usb_reset_configuration(d->udev);
usb_set_interface(d->udev,
- d->intf->cur_altsetting->desc.bInterfaceNumber, 1);
+ d->props->bInterfaceNumber, 1);
st->dvb_usb_lme2510_firmware = dvb_usb_lme2510_firmware;
diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
index f08136052f9..829323e42ca 100644
--- a/drivers/media/usb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
@@ -3589,6 +3589,8 @@ struct usb_device_id dib0700_usb_id_table[] = {
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE7790P) },
{ USB_DEVICE(USB_VID_DIBCOM, USB_PID_DIBCOM_TFE8096P) },
/* 80 */{ USB_DEVICE(USB_VID_ELGATO, USB_PID_ELGATO_EYETV_DTT_2) },
+ { USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E) },
+ { USB_DEVICE(USB_VID_PCTV, USB_PID_PCTV_2002E_SE) },
{ 0 } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, dib0700_usb_id_table);
@@ -3993,12 +3995,20 @@ struct dvb_usb_device_properties dib0700_devices[] = {
}
},
- .num_device_descs = 1,
+ .num_device_descs = 3,
.devices = {
{ "Hauppauge Nova-TD Stick (52009)",
{ &dib0700_usb_id_table[35], NULL },
{ NULL },
},
+ { "PCTV 2002e",
+ { &dib0700_usb_id_table[81], NULL },
+ { NULL },
+ },
+ { "PCTV 2002e SE",
+ { &dib0700_usb_id_table[82], NULL },
+ { NULL },
+ },
},
.rc.core = {
diff --git a/drivers/media/usb/dvb-usb/m920x.c b/drivers/media/usb/dvb-usb/m920x.c
index c2b635d6a17..0306cb778df 100644
--- a/drivers/media/usb/dvb-usb/m920x.c
+++ b/drivers/media/usb/dvb-usb/m920x.c
@@ -1212,7 +1212,7 @@ static struct dvb_usb_device_properties vp7049_properties = {
.rc_interval = 150,
.rc_codes = RC_MAP_TWINHAN_VP1027_DVBS,
.rc_query = m920x_rc_core_query,
- .allowed_protos = RC_TYPE_UNKNOWN,
+ .allowed_protos = RC_BIT_UNKNOWN,
},
.size_of_priv = sizeof(struct m920x_state),
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c
index 4851cc2e4a4..c4ff9739a7a 100644
--- a/drivers/media/usb/em28xx/em28xx-i2c.c
+++ b/drivers/media/usb/em28xx/em28xx-i2c.c
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus,
*eedata = data;
*eedata_len = len;
- dev_config = (void *)eedata;
+ dev_config = (void *)*eedata;
switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) {
case 0:
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index 1a577ed8ea0..9d103344f34 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1008,6 +1008,7 @@ static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
else
f->fmt.pix.field = dev->interlaced ?
V4L2_FIELD_INTERLACED : V4L2_FIELD_TOP;
+ f->fmt.pix.priv = 0;
return 0;
}
diff --git a/drivers/media/usb/gspca/Kconfig b/drivers/media/usb/gspca/Kconfig
index 6345f9331e7..4f0c6d566c8 100644
--- a/drivers/media/usb/gspca/Kconfig
+++ b/drivers/media/usb/gspca/Kconfig
@@ -338,6 +338,15 @@ config USB_GSPCA_STK014
To compile this driver as a module, choose M here: the
module will be called gspca_stk014.
+config USB_GSPCA_STK1135
+ tristate "Syntek STK1135 USB Camera Driver"
+ depends on VIDEO_V4L2 && USB_GSPCA
+ help
+ Say Y here if you want support for cameras based on the STK1135 chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gspca_stk1135.
+
config USB_GSPCA_STV0680
tristate "STV0680 USB Camera Driver"
depends on VIDEO_V4L2 && USB_GSPCA
diff --git a/drivers/media/usb/gspca/Makefile b/drivers/media/usb/gspca/Makefile
index c901da0bd65..5855131ab8b 100644
--- a/drivers/media/usb/gspca/Makefile
+++ b/drivers/media/usb/gspca/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_USB_GSPCA_SQ905C) += gspca_sq905c.o
obj-$(CONFIG_USB_GSPCA_SQ930X) += gspca_sq930x.o
obj-$(CONFIG_USB_GSPCA_SUNPLUS) += gspca_sunplus.o
obj-$(CONFIG_USB_GSPCA_STK014) += gspca_stk014.o
+obj-$(CONFIG_USB_GSPCA_STK1135) += gspca_stk1135.o
obj-$(CONFIG_USB_GSPCA_STV0680) += gspca_stv0680.o
obj-$(CONFIG_USB_GSPCA_T613) += gspca_t613.o
obj-$(CONFIG_USB_GSPCA_TOPRO) += gspca_topro.o
@@ -78,6 +79,7 @@ gspca_sq905-objs := sq905.o
gspca_sq905c-objs := sq905c.o
gspca_sq930x-objs := sq930x.o
gspca_stk014-objs := stk014.o
+gspca_stk1135-objs := stk1135.o
gspca_stv0680-objs := stv0680.o
gspca_sunplus-objs := sunplus.o
gspca_t613-objs := t613.o
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index b7ae8721b84..048507b27bb 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -1266,6 +1266,7 @@ static void gspca_release(struct v4l2_device *v4l2_device)
static int dev_open(struct file *file)
{
struct gspca_dev *gspca_dev = video_drvdata(file);
+ int ret;
PDEBUG(D_STREAM, "[%s] open", current->comm);
@@ -1273,7 +1274,10 @@ static int dev_open(struct file *file)
if (!try_module_get(gspca_dev->module))
return -ENODEV;
- return v4l2_fh_open(file);
+ ret = v4l2_fh_open(file);
+ if (ret)
+ module_put(gspca_dev->module);
+ return ret;
}
static int dev_close(struct file *file)
diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
index a3958ee8681..8937d79fd17 100644
--- a/drivers/media/usb/gspca/ov519.c
+++ b/drivers/media/usb/gspca/ov519.c
@@ -75,6 +75,8 @@ struct sd {
struct v4l2_ctrl *brightness;
};
+ u8 revision;
+
u8 packet_nr;
char bridge;
@@ -3080,8 +3082,8 @@ static void ov518_configure(struct gspca_dev *gspca_dev)
};
/* First 5 bits of custom ID reg are a revision ID on OV518 */
- PDEBUG(D_PROBE, "Device revision %d",
- 0x1f & reg_r(sd, R51x_SYS_CUST_ID));
+ sd->revision = reg_r(sd, R51x_SYS_CUST_ID) & 0x1f;
+ PDEBUG(D_PROBE, "Device revision %d", sd->revision);
write_regvals(sd, init_518, ARRAY_SIZE(init_518));
@@ -3657,7 +3659,11 @@ static void ov518_mode_init_regs(struct sd *sd)
reg_w(sd, 0x2f, 0x80);
/******** Set the framerate ********/
- sd->clockdiv = 1;
+ if (sd->bridge == BRIDGE_OV518PLUS && sd->revision == 0 &&
+ sd->sensor == SEN_OV7620AE)
+ sd->clockdiv = 0;
+ else
+ sd->clockdiv = 1;
/* Mode independent, but framerate dependent, regs */
/* 0x51: Clock divider; Only works on some cams which use 2 crystals */
@@ -3668,12 +3674,24 @@ static void ov518_mode_init_regs(struct sd *sd)
if (sd->bridge == BRIDGE_OV518PLUS) {
switch (sd->sensor) {
case SEN_OV7620AE:
- if (sd->gspca_dev.width == 320) {
- reg_w(sd, 0x20, 0x00);
- reg_w(sd, 0x21, 0x19);
- } else {
+ /*
+ * HdG: 640x480 needs special handling on device
+ * revision 2, we check for device revison > 0 to
+ * avoid regressions, as we don't know the correct
+ * thing todo for revision 1.
+ *
+ * Also this likely means we don't need to
+ * differentiate between the OV7620 and OV7620AE,
+ * earlier testing hitting this same problem likely
+ * happened to be with revision < 2 cams using an
+ * OV7620 and revision 2 cams using an OV7620AE.
+ */
+ if (sd->revision > 0 && sd->gspca_dev.width == 640) {
reg_w(sd, 0x20, 0x60);
reg_w(sd, 0x21, 0x1f);
+ } else {
+ reg_w(sd, 0x20, 0x00);
+ reg_w(sd, 0x21, 0x19);
}
break;
case SEN_OV7620:
diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
index 2e28c81a03a..03a33c46ca2 100644
--- a/drivers/media/usb/gspca/ov534.c
+++ b/drivers/media/usb/gspca/ov534.c
@@ -1305,8 +1305,7 @@ static int sd_init(struct gspca_dev *gspca_dev)
ov534_set_led(gspca_dev, 1);
sccb_w_array(gspca_dev, sensor_init[sd->sensor].val,
sensor_init[sd->sensor].len);
- if (sd->sensor == SENSOR_OV767x)
- sd_start(gspca_dev);
+
sd_stopN(gspca_dev);
/* set_frame_rate(gspca_dev); */
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
new file mode 100644
index 00000000000..585868835ac
--- /dev/null
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -0,0 +1,685 @@
+/*
+ * Syntek STK1135 subdriver
+ *
+ * Copyright (c) 2013 Ondrej Zary
+ *
+ * Based on Syntekdriver (stk11xx) by Nicolas VIVIEN:
+ * http://syntekdriver.sourceforge.net
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#define MODULE_NAME "stk1135"
+
+#include "gspca.h"
+#include "stk1135.h"
+
+MODULE_AUTHOR("Ondrej Zary");
+MODULE_DESCRIPTION("Syntek STK1135 USB Camera Driver");
+MODULE_LICENSE("GPL");
+
+
+/* specific webcam descriptor */
+struct sd {
+ struct gspca_dev gspca_dev; /* !! must be the first item */
+
+ u8 pkt_seq;
+ u8 sensor_page;
+
+ bool flip_status;
+ u8 flip_debounce;
+
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+};
+
+static const struct v4l2_pix_format stk1135_modes[] = {
+ {160, 120, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 160,
+ .sizeimage = 160 * 120,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {176, 144, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 176,
+ .sizeimage = 176 * 144,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {320, 240, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 320,
+ .sizeimage = 320 * 240,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {352, 288, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 352,
+ .sizeimage = 352 * 288,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {640, 480, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 640,
+ .sizeimage = 640 * 480,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {720, 576, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 720,
+ .sizeimage = 720 * 576,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {800, 600, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 800,
+ .sizeimage = 800 * 600,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1024, 768, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1024,
+ .sizeimage = 1024 * 768,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+ {1280, 1024, V4L2_PIX_FMT_SBGGR8, V4L2_FIELD_NONE,
+ .bytesperline = 1280,
+ .sizeimage = 1280 * 1024,
+ .colorspace = V4L2_COLORSPACE_SRGB},
+};
+
+/* -- read a register -- */
+static u8 reg_r(struct gspca_dev *gspca_dev, u16 index)
+{
+ struct usb_device *dev = gspca_dev->dev;
+ int ret;
+
+ if (gspca_dev->usb_err < 0)
+ return 0;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ 0x00,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ 0x00,
+ index,
+ gspca_dev->usb_buf, 1,
+ 500);
+
+ PDEBUG(D_USBI, "reg_r 0x%x=0x%02x", index, gspca_dev->usb_buf[0]);
+ if (ret < 0) {
+ pr_err("reg_r 0x%x err %d\n", index, ret);
+ gspca_dev->usb_err = ret;
+ return 0;
+ }
+
+ return gspca_dev->usb_buf[0];
+}
+
+/* -- write a register -- */
+static void reg_w(struct gspca_dev *gspca_dev, u16 index, u8 val)
+{
+ int ret;
+ struct usb_device *dev = gspca_dev->dev;
+
+ if (gspca_dev->usb_err < 0)
+ return;
+ ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
+ 0x01,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ val,
+ index,
+ NULL,
+ 0,
+ 500);
+ PDEBUG(D_USBO, "reg_w 0x%x:=0x%02x", index, val);
+ if (ret < 0) {
+ pr_err("reg_w 0x%x err %d\n", index, ret);
+ gspca_dev->usb_err = ret;
+ }
+}
+
+static void reg_w_mask(struct gspca_dev *gspca_dev, u16 index, u8 val, u8 mask)
+{
+ val = (reg_r(gspca_dev, index) & ~mask) | (val & mask);
+ reg_w(gspca_dev, index, val);
+}
+
+/* this function is called at probe time */
+static int sd_config(struct gspca_dev *gspca_dev,
+ const struct usb_device_id *id)
+{
+ gspca_dev->cam.cam_mode = stk1135_modes;
+ gspca_dev->cam.nmodes = ARRAY_SIZE(stk1135_modes);
+ return 0;
+}
+
+static int stk1135_serial_wait_ready(struct gspca_dev *gspca_dev)
+{
+ int i = 0;
+ u8 val;
+
+ do {
+ val = reg_r(gspca_dev, STK1135_REG_SICTL + 1);
+ if (i++ > 500) { /* maximum retry count */
+ pr_err("serial bus timeout: status=0x%02x\n", val);
+ return -1;
+ }
+ /* repeat if BUSY or WRITE/READ not finished */
+ } while ((val & 0x10) || !(val & 0x05));
+
+ return 0;
+}
+
+static u8 sensor_read_8(struct gspca_dev *gspca_dev, u8 addr)
+{
+ reg_w(gspca_dev, STK1135_REG_SBUSR, addr);
+ /* begin read */
+ reg_w(gspca_dev, STK1135_REG_SICTL, 0x20);
+ /* wait until finished */
+ if (stk1135_serial_wait_ready(gspca_dev)) {
+ pr_err("Sensor read failed\n");
+ return 0;
+ }
+
+ return reg_r(gspca_dev, STK1135_REG_SBUSR + 1);
+}
+
+static u16 sensor_read_16(struct gspca_dev *gspca_dev, u8 addr)
+{
+ return (sensor_read_8(gspca_dev, addr) << 8) |
+ sensor_read_8(gspca_dev, 0xf1);
+}
+
+static void sensor_write_8(struct gspca_dev *gspca_dev, u8 addr, u8 data)
+{
+ /* load address and data registers */
+ reg_w(gspca_dev, STK1135_REG_SBUSW, addr);
+ reg_w(gspca_dev, STK1135_REG_SBUSW + 1, data);
+ /* begin write */
+ reg_w(gspca_dev, STK1135_REG_SICTL, 0x01);
+ /* wait until finished */
+ if (stk1135_serial_wait_ready(gspca_dev)) {
+ pr_err("Sensor write failed\n");
+ return;
+ }
+}
+
+static void sensor_write_16(struct gspca_dev *gspca_dev, u8 addr, u16 data)
+{
+ sensor_write_8(gspca_dev, addr, data >> 8);
+ sensor_write_8(gspca_dev, 0xf1, data & 0xff);
+}
+
+static void sensor_set_page(struct gspca_dev *gspca_dev, u8 page)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (page != sd->sensor_page) {
+ sensor_write_16(gspca_dev, 0xf0, page);
+ sd->sensor_page = page;
+ }
+}
+
+static u16 sensor_read(struct gspca_dev *gspca_dev, u16 reg)
+{
+ sensor_set_page(gspca_dev, reg >> 8);
+ return sensor_read_16(gspca_dev, reg & 0xff);
+}
+
+static void sensor_write(struct gspca_dev *gspca_dev, u16 reg, u16 val)
+{
+ sensor_set_page(gspca_dev, reg >> 8);
+ sensor_write_16(gspca_dev, reg & 0xff, val);
+}
+
+static void sensor_write_mask(struct gspca_dev *gspca_dev,
+ u16 reg, u16 val, u16 mask)
+{
+ val = (sensor_read(gspca_dev, reg) & ~mask) | (val & mask);
+ sensor_write(gspca_dev, reg, val);
+}
+
+struct sensor_val {
+ u16 reg;
+ u16 val;
+};
+
+/* configure MT9M112 sensor */
+static void stk1135_configure_mt9m112(struct gspca_dev *gspca_dev)
+{
+ static const struct sensor_val cfg[] = {
+ /* restart&reset, chip enable, reserved */
+ { 0x00d, 0x000b }, { 0x00d, 0x0008 }, { 0x035, 0x0022 },
+ /* mode ctl: AWB on, AE both, clip aper corr, defect corr, AE */
+ { 0x106, 0x700e },
+
+ { 0x2dd, 0x18e0 }, /* B-R thresholds, */
+
+ /* AWB */
+ { 0x21f, 0x0180 }, /* Cb and Cr limits */
+ { 0x220, 0xc814 }, { 0x221, 0x8080 }, /* lum limits, RGB gain */
+ { 0x222, 0xa078 }, { 0x223, 0xa078 }, /* R, B limit */
+ { 0x224, 0x5f20 }, { 0x228, 0xea02 }, /* mtx adj lim, adv ctl */
+ { 0x229, 0x867a }, /* wide gates */
+
+ /* Color correction */
+ /* imager gains base, delta, delta signs */
+ { 0x25e, 0x594c }, { 0x25f, 0x4d51 }, { 0x260, 0x0002 },
+ /* AWB adv ctl 2, gain offs */
+ { 0x2ef, 0x0008 }, { 0x2f2, 0x0000 },
+ /* base matrix signs, scale K1-5, K6-9 */
+ { 0x202, 0x00ee }, { 0x203, 0x3923 }, { 0x204, 0x0724 },
+ /* base matrix coef */
+ { 0x209, 0x00cd }, { 0x20a, 0x0093 }, { 0x20b, 0x0004 },/*K1-3*/
+ { 0x20c, 0x005c }, { 0x20d, 0x00d9 }, { 0x20e, 0x0053 },/*K4-6*/
+ { 0x20f, 0x0008 }, { 0x210, 0x0091 }, { 0x211, 0x00cf },/*K7-9*/
+ { 0x215, 0x0000 }, /* delta mtx signs */
+ /* delta matrix coef */
+ { 0x216, 0x0000 }, { 0x217, 0x0000 }, { 0x218, 0x0000 },/*D1-3*/
+ { 0x219, 0x0000 }, { 0x21a, 0x0000 }, { 0x21b, 0x0000 },/*D4-6*/
+ { 0x21c, 0x0000 }, { 0x21d, 0x0000 }, { 0x21e, 0x0000 },/*D7-9*/
+ /* enable & disable manual WB to apply color corr. settings */
+ { 0x106, 0xf00e }, { 0x106, 0x700e },
+
+ /* Lens shading correction */
+ { 0x180, 0x0007 }, /* control */
+ /* vertical knee 0, 2+1, 4+3 */
+ { 0x181, 0xde13 }, { 0x182, 0xebe2 }, { 0x183, 0x00f6 }, /* R */
+ { 0x184, 0xe114 }, { 0x185, 0xeadd }, { 0x186, 0xfdf6 }, /* G */
+ { 0x187, 0xe511 }, { 0x188, 0xede6 }, { 0x189, 0xfbf7 }, /* B */
+ /* horizontal knee 0, 2+1, 4+3, 5 */
+ { 0x18a, 0xd613 }, { 0x18b, 0xedec }, /* R .. */
+ { 0x18c, 0xf9f2 }, { 0x18d, 0x0000 }, /* .. R */
+ { 0x18e, 0xd815 }, { 0x18f, 0xe9ea }, /* G .. */
+ { 0x190, 0xf9f1 }, { 0x191, 0x0002 }, /* .. G */
+ { 0x192, 0xde10 }, { 0x193, 0xefef }, /* B .. */
+ { 0x194, 0xfbf4 }, { 0x195, 0x0002 }, /* .. B */
+ /* vertical knee 6+5, 8+7 */
+ { 0x1b6, 0x0e06 }, { 0x1b7, 0x2713 }, /* R */
+ { 0x1b8, 0x1106 }, { 0x1b9, 0x2713 }, /* G */
+ { 0x1ba, 0x0c03 }, { 0x1bb, 0x2a0f }, /* B */
+ /* horizontal knee 7+6, 9+8, 10 */
+ { 0x1bc, 0x1208 }, { 0x1bd, 0x1a16 }, { 0x1be, 0x0022 }, /* R */
+ { 0x1bf, 0x150a }, { 0x1c0, 0x1c1a }, { 0x1c1, 0x002d }, /* G */
+ { 0x1c2, 0x1109 }, { 0x1c3, 0x1414 }, { 0x1c4, 0x002a }, /* B */
+ { 0x106, 0x740e }, /* enable lens shading correction */
+
+ /* Gamma correction - context A */
+ { 0x153, 0x0b03 }, { 0x154, 0x4722 }, { 0x155, 0xac82 },
+ { 0x156, 0xdac7 }, { 0x157, 0xf5e9 }, { 0x158, 0xff00 },
+ /* Gamma correction - context B */
+ { 0x1dc, 0x0b03 }, { 0x1dd, 0x4722 }, { 0x1de, 0xac82 },
+ { 0x1df, 0xdac7 }, { 0x1e0, 0xf5e9 }, { 0x1e1, 0xff00 },
+
+ /* output format: RGB, invert output pixclock, output bayer */
+ { 0x13a, 0x4300 }, { 0x19b, 0x4300 }, /* for context A, B */
+ { 0x108, 0x0180 }, /* format control - enable bayer row flip */
+
+ { 0x22f, 0xd100 }, { 0x29c, 0xd100 }, /* AE A, B */
+
+ /* default prg conf, prg ctl - by 0x2d2, prg advance - PA1 */
+ { 0x2d2, 0x0000 }, { 0x2cc, 0x0004 }, { 0x2cb, 0x0001 },
+
+ { 0x22e, 0x0c3c }, { 0x267, 0x1010 }, /* AE tgt ctl, gain lim */
+
+ /* PLL */
+ { 0x065, 0xa000 }, /* clk ctl - enable PLL (clear bit 14) */
+ { 0x066, 0x2003 }, { 0x067, 0x0501 }, /* PLL M=128, N=3, P=1 */
+ { 0x065, 0x2000 }, /* disable PLL bypass (clear bit 15) */
+
+ { 0x005, 0x01b8 }, { 0x007, 0x00d8 }, /* horiz blanking B, A */
+
+ /* AE line size, shutter delay limit */
+ { 0x239, 0x06c0 }, { 0x23b, 0x040e }, /* for context A */
+ { 0x23a, 0x06c0 }, { 0x23c, 0x0564 }, /* for context B */
+ /* shutter width basis 60Hz, 50Hz */
+ { 0x257, 0x0208 }, { 0x258, 0x0271 }, /* for context A */
+ { 0x259, 0x0209 }, { 0x25a, 0x0271 }, /* for context B */
+
+ { 0x25c, 0x120d }, { 0x25d, 0x1712 }, /* flicker 60Hz, 50Hz */
+ { 0x264, 0x5e1c }, /* reserved */
+ /* flicker, AE gain limits, gain zone limits */
+ { 0x25b, 0x0003 }, { 0x236, 0x7810 }, { 0x237, 0x8304 },
+
+ { 0x008, 0x0021 }, /* vert blanking A */
+ };
+ int i;
+ u16 width, height;
+
+ for (i = 0; i < ARRAY_SIZE(cfg); i++)
+ sensor_write(gspca_dev, cfg[i].reg, cfg[i].val);
+
+ /* set output size */
+ width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
+ height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+ if (width <= 640) { /* use context A (half readout speed by default) */
+ sensor_write(gspca_dev, 0x1a7, width);
+ sensor_write(gspca_dev, 0x1aa, height);
+ /* set read mode context A */
+ sensor_write(gspca_dev, 0x0c8, 0x0000);
+ /* set resize, read mode, vblank, hblank context A */
+ sensor_write(gspca_dev, 0x2c8, 0x0000);
+ } else { /* use context B (full readout speed by default) */
+ sensor_write(gspca_dev, 0x1a1, width);
+ sensor_write(gspca_dev, 0x1a4, height);
+ /* set read mode context B */
+ sensor_write(gspca_dev, 0x0c8, 0x0008);
+ /* set resize, read mode, vblank, hblank context B */
+ sensor_write(gspca_dev, 0x2c8, 0x040b);
+ }
+}
+
+static void stk1135_configure_clock(struct gspca_dev *gspca_dev)
+{
+ /* configure SCLKOUT */
+ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x12);
+ /* set 1 clock per pixel */
+ /* and positive edge clocked pulse high when pixel counter = 0 */
+ reg_w(gspca_dev, STK1135_REG_TCP1 + 0, 0x41);
+ reg_w(gspca_dev, STK1135_REG_TCP1 + 1, 0x00);
+ reg_w(gspca_dev, STK1135_REG_TCP1 + 2, 0x00);
+ reg_w(gspca_dev, STK1135_REG_TCP1 + 3, 0x00);
+
+ /* enable CLKOUT for sensor */
+ reg_w(gspca_dev, STK1135_REG_SENSO + 0, 0x10);
+ /* disable STOP clock */
+ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x00);
+ /* set lower 8 bits of PLL feedback divider */
+ reg_w(gspca_dev, STK1135_REG_SENSO + 3, 0x07);
+ /* set other PLL parameters */
+ reg_w(gspca_dev, STK1135_REG_PLLFD, 0x06);
+ /* enable timing generator */
+ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x80);
+ /* enable PLL */
+ reg_w(gspca_dev, STK1135_REG_SENSO + 2, 0x04);
+
+ /* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */
+ reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f);
+}
+
+static void stk1135_camera_disable(struct gspca_dev *gspca_dev)
+{
+ /* set capture end Y position to 0 */
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 2, 0x00);
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 3, 0x00);
+ /* disable capture */
+ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x00, 0x80);
+
+ /* enable sensor standby and diasble chip enable */
+ sensor_write_mask(gspca_dev, 0x00d, 0x0004, 0x000c);
+
+ /* disable PLL */
+ reg_w_mask(gspca_dev, STK1135_REG_SENSO + 2, 0x00, 0x01);
+ /* disable timing generator */
+ reg_w(gspca_dev, STK1135_REG_TMGEN, 0x00);
+ /* enable STOP clock */
+ reg_w(gspca_dev, STK1135_REG_SENSO + 1, 0x20);
+ /* disable CLKOUT for sensor */
+ reg_w(gspca_dev, STK1135_REG_SENSO, 0x00);
+
+ /* disable sensor (GPIO5) and enable GPIO0,3,6 (?) - sensor standby? */
+ reg_w(gspca_dev, STK1135_REG_GCTRL, 0x49);
+}
+
+/* this function is called at probe and resume time */
+static int sd_init(struct gspca_dev *gspca_dev)
+{
+ u16 sensor_id;
+ char *sensor_name;
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ /* set GPIO3,4,5,6 direction to output */
+ reg_w(gspca_dev, STK1135_REG_GCTRL + 2, 0x78);
+ /* enable sensor (GPIO5) */
+ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5));
+ /* disable ROM interface */
+ reg_w(gspca_dev, STK1135_REG_GCTRL + 3, 0x80);
+ /* enable interrupts from GPIO8 (flip sensor) and GPIO9 (???) */
+ reg_w(gspca_dev, STK1135_REG_ICTRL + 1, 0x00);
+ reg_w(gspca_dev, STK1135_REG_ICTRL + 3, 0x03);
+ /* enable remote wakeup from GPIO9 (???) */
+ reg_w(gspca_dev, STK1135_REG_RMCTL + 1, 0x00);
+ reg_w(gspca_dev, STK1135_REG_RMCTL + 3, 0x02);
+
+ /* reset serial interface */
+ reg_w(gspca_dev, STK1135_REG_SICTL, 0x80);
+ reg_w(gspca_dev, STK1135_REG_SICTL, 0x00);
+ /* set sensor address */
+ reg_w(gspca_dev, STK1135_REG_SICTL + 3, 0xba);
+ /* disable alt 2-wire serial interface */
+ reg_w(gspca_dev, STK1135_REG_ASIC + 3, 0x00);
+
+ stk1135_configure_clock(gspca_dev);
+
+ /* read sensor ID */
+ sd->sensor_page = 0xff;
+ sensor_id = sensor_read(gspca_dev, 0x000);
+
+ switch (sensor_id) {
+ case 0x148c:
+ sensor_name = "MT9M112";
+ break;
+ default:
+ sensor_name = "unknown";
+ }
+ pr_info("Detected sensor type %s (0x%x)\n", sensor_name, sensor_id);
+
+ stk1135_camera_disable(gspca_dev);
+
+ return gspca_dev->usb_err;
+}
+
+/* -- start the camera -- */
+static int sd_start(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ u16 width, height;
+
+ /* enable sensor (GPIO5) */
+ reg_w(gspca_dev, STK1135_REG_GCTRL, (1 << 5));
+
+ stk1135_configure_clock(gspca_dev);
+
+ /* set capture start position X = 0, Y = 0 */
+ reg_w(gspca_dev, STK1135_REG_CISPO + 0, 0x00);
+ reg_w(gspca_dev, STK1135_REG_CISPO + 1, 0x00);
+ reg_w(gspca_dev, STK1135_REG_CISPO + 2, 0x00);
+ reg_w(gspca_dev, STK1135_REG_CISPO + 3, 0x00);
+
+ /* set capture end position */
+ width = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].width;
+ height = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].height;
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 0, width & 0xff);
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 1, width >> 8);
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 2, height & 0xff);
+ reg_w(gspca_dev, STK1135_REG_CIEPO + 3, height >> 8);
+
+ /* set 8-bit mode */
+ reg_w(gspca_dev, STK1135_REG_SCTRL, 0x20);
+
+ stk1135_configure_mt9m112(gspca_dev);
+
+ /* enable capture */
+ reg_w_mask(gspca_dev, STK1135_REG_SCTRL, 0x80, 0x80);
+
+ if (gspca_dev->usb_err >= 0)
+ PDEBUG(D_STREAM, "camera started alt: 0x%02x",
+ gspca_dev->alt);
+
+ sd->pkt_seq = 0;
+
+ return gspca_dev->usb_err;
+}
+
+static void sd_stopN(struct gspca_dev *gspca_dev)
+{
+ struct usb_device *dev = gspca_dev->dev;
+
+ usb_set_interface(dev, gspca_dev->iface, 0);
+
+ stk1135_camera_disable(gspca_dev);
+
+ PDEBUG(D_STREAM, "camera stopped");
+}
+
+static void sd_pkt_scan(struct gspca_dev *gspca_dev,
+ u8 *data, /* isoc packet */
+ int len) /* iso packet length */
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ int skip = sizeof(struct stk1135_pkt_header);
+ bool flip;
+ enum gspca_packet_type pkt_type = INTER_PACKET;
+ struct stk1135_pkt_header *hdr = (void *)data;
+ u8 seq;
+
+ if (len < 4) {
+ PDEBUG(D_PACK, "received short packet (less than 4 bytes)");
+ return;
+ }
+
+ /* GPIO 8 is flip sensor (1 = normal position, 0 = flipped to back) */
+ flip = !(le16_to_cpu(hdr->gpio) & (1 << 8));
+ /* it's a switch, needs software debounce */
+ if (sd->flip_status != flip)
+ sd->flip_debounce++;
+ else
+ sd->flip_debounce = 0;
+
+ /* check sequence number (not present in new frame packets) */
+ if (!(hdr->flags & STK1135_HDR_FRAME_START)) {
+ seq = hdr->seq & STK1135_HDR_SEQ_MASK;
+ if (seq != sd->pkt_seq) {
+ PDEBUG(D_PACK, "received out-of-sequence packet");
+ /* resync sequence and discard packet */
+ sd->pkt_seq = seq;
+ gspca_dev->last_packet_type = DISCARD_PACKET;
+ return;
+ }
+ }
+ sd->pkt_seq++;
+ if (sd->pkt_seq > STK1135_HDR_SEQ_MASK)
+ sd->pkt_seq = 0;
+
+ if (len == sizeof(struct stk1135_pkt_header))
+ return;
+
+ if (hdr->flags & STK1135_HDR_FRAME_START) { /* new frame */
+ skip = 8; /* the header is longer */
+ gspca_frame_add(gspca_dev, LAST_PACKET, data, 0);
+ pkt_type = FIRST_PACKET;
+ }
+ gspca_frame_add(gspca_dev, pkt_type, data + skip, len - skip);
+}
+
+static void sethflip(struct gspca_dev *gspca_dev, s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->flip_status)
+ val = !val;
+ sensor_write_mask(gspca_dev, 0x020, val ? 0x0002 : 0x0000 , 0x0002);
+}
+
+static void setvflip(struct gspca_dev *gspca_dev, s32 val)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->flip_status)
+ val = !val;
+ sensor_write_mask(gspca_dev, 0x020, val ? 0x0001 : 0x0000 , 0x0001);
+}
+
+static void stk1135_dq_callback(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+
+ if (sd->flip_debounce > 100) {
+ sd->flip_status = !sd->flip_status;
+ sethflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->hflip));
+ setvflip(gspca_dev, v4l2_ctrl_g_ctrl(sd->vflip));
+ }
+}
+
+static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct gspca_dev *gspca_dev =
+ container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
+
+ gspca_dev->usb_err = 0;
+
+ if (!gspca_dev->streaming)
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ sethflip(gspca_dev, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ setvflip(gspca_dev, ctrl->val);
+ break;
+ }
+
+ return gspca_dev->usb_err;
+}
+
+static const struct v4l2_ctrl_ops sd_ctrl_ops = {
+ .s_ctrl = sd_s_ctrl,
+};
+
+static int sd_init_controls(struct gspca_dev *gspca_dev)
+{
+ struct sd *sd = (struct sd *) gspca_dev;
+ struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler;
+
+ gspca_dev->vdev.ctrl_handler = hdl;
+ v4l2_ctrl_handler_init(hdl, 2);
+ sd->hflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ sd->vflip = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (hdl->error) {
+ pr_err("Could not initialize controls\n");
+ return hdl->error;
+ }
+ return 0;
+}
+
+/* sub-driver description */
+static const struct sd_desc sd_desc = {
+ .name = MODULE_NAME,
+ .config = sd_config,
+ .init = sd_init,
+ .init_controls = sd_init_controls,
+ .start = sd_start,
+ .stopN = sd_stopN,
+ .pkt_scan = sd_pkt_scan,
+ .dq_callback = stk1135_dq_callback,
+};
+
+/* -- module initialisation -- */
+static const struct usb_device_id device_table[] = {
+ {USB_DEVICE(0x174f, 0x6a31)}, /* ASUS laptop, MT9M112 sensor */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, device_table);
+
+/* -- device connect -- */
+static int sd_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
+ THIS_MODULE);
+}
+
+static struct usb_driver sd_driver = {
+ .name = MODULE_NAME,
+ .id_table = device_table,
+ .probe = sd_probe,
+ .disconnect = gspca_disconnect,
+#ifdef CONFIG_PM
+ .suspend = gspca_suspend,
+ .resume = gspca_resume,
+ .reset_resume = gspca_resume,
+#endif
+};
+
+module_usb_driver(sd_driver);
diff --git a/drivers/media/usb/gspca/stk1135.h b/drivers/media/usb/gspca/stk1135.h
new file mode 100644
index 00000000000..e1dd92ab49b
--- /dev/null
+++ b/drivers/media/usb/gspca/stk1135.h
@@ -0,0 +1,57 @@
+/*
+ * STK1135 registers
+ *
+ * Copyright (c) 2013 Ondrej Zary
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define STK1135_REG_GCTRL 0x000 /* GPIO control */
+#define STK1135_REG_ICTRL 0x004 /* Interrupt control */
+#define STK1135_REG_IDATA 0x008 /* Interrupt data */
+#define STK1135_REG_RMCTL 0x00c /* Remote wakeup control */
+#define STK1135_REG_POSVA 0x010 /* Power-on strapping data */
+
+#define STK1135_REG_SENSO 0x018 /* Sensor select options */
+#define STK1135_REG_PLLFD 0x01c /* PLL frequency divider */
+
+#define STK1135_REG_SCTRL 0x100 /* Sensor control register */
+#define STK1135_REG_DCTRL 0x104 /* Decimation control register */
+#define STK1135_REG_CISPO 0x110 /* Capture image starting position */
+#define STK1135_REG_CIEPO 0x114 /* Capture image ending position */
+#define STK1135_REG_TCTRL 0x120 /* Test data control */
+
+#define STK1135_REG_SICTL 0x200 /* Serial interface control register */
+#define STK1135_REG_SBUSW 0x204 /* Serial bus write */
+#define STK1135_REG_SBUSR 0x208 /* Serial bus read */
+#define STK1135_REG_SCSI 0x20c /* Software control serial interface */
+#define STK1135_REG_GSBWP 0x210 /* General serial bus write port */
+#define STK1135_REG_GSBRP 0x214 /* General serial bus read port */
+#define STK1135_REG_ASIC 0x2fc /* Alternate serial interface control */
+
+#define STK1135_REG_TMGEN 0x300 /* Timing generator */
+#define STK1135_REG_TCP1 0x350 /* Timing control parameter 1 */
+
+struct stk1135_pkt_header {
+ u8 flags;
+ u8 seq;
+ __le16 gpio;
+} __packed;
+
+#define STK1135_HDR_FRAME_START (1 << 7)
+#define STK1135_HDR_ODD (1 << 6)
+#define STK1135_HDR_I2C_VBLANK (1 << 5)
+
+#define STK1135_HDR_SEQ_MASK 0x3f
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index cb694055ba7..6e5070774dc 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface,
dev->workqueue = 0;
+ /* init video transfer queues first of all */
+ /* to prevent oops in hdpvr_delete() on error paths */
+ INIT_LIST_HEAD(&dev->free_buff_list);
+ INIT_LIST_HEAD(&dev->rec_buff_list);
+
/* register v4l2_device early so it can be used for printks */
if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) {
dev_err(&interface->dev, "v4l2_device_register failed\n");
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface,
if (!dev->workqueue)
goto error;
- /* init video transfer queues */
- INIT_LIST_HEAD(&dev->free_buff_list);
- INIT_LIST_HEAD(&dev->rec_buff_list);
-
dev->options = hdpvr_default_options;
if (default_video_input < HDPVR_VIDEO_INPUTS)
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface,
video_nr[atomic_inc_return(&dev_nr)]);
if (retval < 0) {
v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
- goto error;
+ goto reg_fail;
}
/* let the user know what node this device is now attached to */
diff --git a/drivers/media/usb/hdpvr/hdpvr-video.c b/drivers/media/usb/hdpvr/hdpvr-video.c
index 4f8567aa99d..0500c4175d5 100644
--- a/drivers/media/usb/hdpvr/hdpvr-video.c
+++ b/drivers/media/usb/hdpvr/hdpvr-video.c
@@ -24,6 +24,7 @@
#include <linux/v4l2-dv-timings.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-common.h>
+#include <media/v4l2-dv-timings.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
#include "hdpvr.h"
@@ -641,7 +642,7 @@ static int vidioc_s_dv_timings(struct file *file, void *_fh,
if (dev->status != STATUS_IDLE)
return -EBUSY;
for (i = 0; i < ARRAY_SIZE(hdpvr_dv_timings); i++)
- if (v4l_match_dv_timings(timings, hdpvr_dv_timings + i, 0))
+ if (v4l2_match_dv_timings(timings, hdpvr_dv_timings + i, 0))
break;
if (i == ARRAY_SIZE(hdpvr_dv_timings))
return -EINVAL;
@@ -689,10 +690,8 @@ static int vidioc_query_dv_timings(struct file *file, void *_fh,
unsigned vsize;
unsigned fps;
- hsize = bt->hfrontporch + bt->hsync + bt->hbackporch + bt->width;
- vsize = bt->vfrontporch + bt->vsync + bt->vbackporch +
- bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch +
- bt->height;
+ hsize = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vsize = V4L2_DV_BT_FRAME_HEIGHT(bt);
fps = (unsigned)bt->pixelclock / (hsize * vsize);
if (bt->width != vid_info.width ||
bt->height != vid_info.height ||
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index ab97e7d0b4f..6bc9b8e19e2 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -1,7 +1,7 @@
/*
* s2255drv.c - a driver for the Sensoray 2255 USB video capture device
*
- * Copyright (C) 2007-2010 by Sensoray Company Inc.
+ * Copyright (C) 2007-2013 by Sensoray Company Inc.
* Dean Anderson
*
* Some video buffer code based on vivi driver:
@@ -52,7 +52,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-event.h>
-#define S2255_VERSION "1.22.1"
+#define S2255_VERSION "1.23.1"
#define FIRMWARE_FILE_NAME "f2255usb.bin"
/* default JPEG quality */
@@ -1303,11 +1303,6 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id i)
int ret = 0;
mutex_lock(&q->vb_lock);
- if (videobuf_queue_is_busy(q)) {
- dprintk(1, "queue busy\n");
- ret = -EBUSY;
- goto out_s_std;
- }
if (res_locked(fh)) {
dprintk(1, "can't change standard after started\n");
ret = -EBUSY;
diff --git a/drivers/media/usb/stk1160/Kconfig b/drivers/media/usb/stk1160/Kconfig
index 1c3a1ec0023..95584c15dc5 100644
--- a/drivers/media/usb/stk1160/Kconfig
+++ b/drivers/media/usb/stk1160/Kconfig
@@ -1,8 +1,6 @@
-config VIDEO_STK1160
+config VIDEO_STK1160_COMMON
tristate "STK1160 USB video capture support"
depends on VIDEO_DEV && I2C
- select VIDEOBUF2_VMALLOC
- select VIDEO_SAA711X
---help---
This is a video4linux driver for STK1160 based video capture devices.
@@ -12,9 +10,15 @@ config VIDEO_STK1160
config VIDEO_STK1160_AC97
bool "STK1160 AC97 codec support"
- depends on VIDEO_STK1160 && SND
- select SND_AC97_CODEC
+ depends on VIDEO_STK1160_COMMON && SND
---help---
Enables AC97 codec support for stk1160 driver.
-.
+
+config VIDEO_STK1160
+ tristate
+ depends on (!VIDEO_STK1160_AC97 || (SND='n') || SND) && VIDEO_STK1160_COMMON
+ default y
+ select VIDEOBUF2_VMALLOC
+ select VIDEO_SAA711X
+ select SND_AC97_CODEC if SND
diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
index 876fc26565e..c45c9881bb5 100644
--- a/drivers/media/usb/stk1160/stk1160-v4l.c
+++ b/drivers/media/usb/stk1160/stk1160-v4l.c
@@ -379,6 +379,9 @@ static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id norm)
struct stk1160 *dev = video_drvdata(file);
struct vb2_queue *q = &dev->vb_vidq;
+ if (dev->norm == norm)
+ return 0;
+
if (vb2_is_busy(q))
return -EBUSY;
@@ -440,9 +443,6 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
struct stk1160 *dev = video_drvdata(file);
- if (vb2_is_busy(&dev->vb_vidq))
- return -EBUSY;
-
if (i > STK1160_MAX_INPUT)
return -EINVAL;
diff --git a/drivers/media/usb/tlg2300/pd-main.c b/drivers/media/usb/tlg2300/pd-main.c
index e07e4c699cc..95f94e5aa66 100644
--- a/drivers/media/usb/tlg2300/pd-main.c
+++ b/drivers/media/usb/tlg2300/pd-main.c
@@ -375,7 +375,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
}
#endif
-static int check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev)
{
void *buf;
int ret;
@@ -395,10 +395,8 @@ static int check_firmware(struct usb_device *udev, int *down_firmware)
USB_CTRL_GET_TIMEOUT);
kfree(buf);
- if (ret < 0) {
- *down_firmware = 1;
+ if (ret < 0)
return firmware_download(udev);
- }
return 0;
}
@@ -411,9 +409,9 @@ static int poseidon_probe(struct usb_interface *interface,
int new_one = 0;
/* download firmware */
- check_firmware(udev, &ret);
+ ret = check_firmware(udev);
if (ret)
- return 0;
+ return ret;
/* Do I recovery from the hibernate ? */
pd = find_old_poseidon(udev);
@@ -436,12 +434,22 @@ static int poseidon_probe(struct usb_interface *interface,
/* register v4l2 device */
ret = v4l2_device_register(&interface->dev, &pd->v4l2_dev);
+ if (ret)
+ goto err_v4l2;
/* register devices in directory /dev */
ret = pd_video_init(pd);
- poseidon_audio_init(pd);
- poseidon_fm_init(pd);
- pd_dvb_usb_device_init(pd);
+ if (ret)
+ goto err_video;
+ ret = poseidon_audio_init(pd);
+ if (ret)
+ goto err_audio;
+ ret = poseidon_fm_init(pd);
+ if (ret)
+ goto err_fm;
+ ret = pd_dvb_usb_device_init(pd);
+ if (ret)
+ goto err_dvb;
INIT_LIST_HEAD(&pd->device_list);
list_add_tail(&pd->device_list, &pd_device_list);
@@ -459,6 +467,17 @@ static int poseidon_probe(struct usb_interface *interface,
}
#endif
return 0;
+err_dvb:
+ poseidon_fm_exit(pd);
+err_fm:
+ poseidon_audio_free(pd);
+err_audio:
+ pd_video_exit(pd);
+err_video:
+ v4l2_device_unregister(&pd->v4l2_dev);
+err_v4l2:
+ kfree(pd);
+ return ret;
}
static void poseidon_disconnect(struct usb_interface *interface)
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig
index 8864436464b..7c5b86006ee 100644
--- a/drivers/media/usb/usbtv/Kconfig
+++ b/drivers/media/usb/usbtv/Kconfig
@@ -1,6 +1,6 @@
config VIDEO_USBTV
tristate "USBTV007 video capture support"
- depends on VIDEO_DEV
+ depends on VIDEO_V4L2
select VIDEOBUF2_VMALLOC
---help---
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index bf43f874685..8a505a90d31 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -33,7 +33,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
-#include <linux/version.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
@@ -57,7 +56,7 @@
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
- / 2 / USBTV_CHUNK)
+ / 4 / USBTV_CHUNK)
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -89,18 +88,80 @@ struct usbtv {
/* Number of currently processed frame, useful find
* out when a new one begins. */
u32 frame_id;
+ int chunks_done;
+ enum {
+ USBTV_COMPOSITE_INPUT,
+ USBTV_SVIDEO_INPUT,
+ } input;
int iso_size;
unsigned int sequence;
struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
};
-static int usbtv_setup_capture(struct usbtv *usbtv)
+static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
{
int ret;
int pipe = usb_rcvctrlpipe(usbtv->udev, 0);
int i;
- static const u16 protoregs[][2] = {
+
+ for (i = 0; i < size; i++) {
+ u16 index = regs[i][0];
+ u16 value = regs[i][1];
+
+ ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+ value, index, NULL, 0, 0);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int usbtv_select_input(struct usbtv *usbtv, int input)
+{
+ int ret;
+
+ static const u16 composite[][2] = {
+ { USBTV_BASE + 0x0105, 0x0060 },
+ { USBTV_BASE + 0x011f, 0x00f2 },
+ { USBTV_BASE + 0x0127, 0x0060 },
+ { USBTV_BASE + 0x00ae, 0x0010 },
+ { USBTV_BASE + 0x0284, 0x00aa },
+ { USBTV_BASE + 0x0239, 0x0060 },
+ };
+
+ static const u16 svideo[][2] = {
+ { USBTV_BASE + 0x0105, 0x0010 },
+ { USBTV_BASE + 0x011f, 0x00ff },
+ { USBTV_BASE + 0x0127, 0x0060 },
+ { USBTV_BASE + 0x00ae, 0x0030 },
+ { USBTV_BASE + 0x0284, 0x0088 },
+ { USBTV_BASE + 0x0239, 0x0060 },
+ };
+
+ switch (input) {
+ case USBTV_COMPOSITE_INPUT:
+ ret = usbtv_set_regs(usbtv, composite, ARRAY_SIZE(composite));
+ break;
+ case USBTV_SVIDEO_INPUT:
+ ret = usbtv_set_regs(usbtv, svideo, ARRAY_SIZE(svideo));
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (!ret)
+ usbtv->input = input;
+
+ return ret;
+}
+
+static int usbtv_setup_capture(struct usbtv *usbtv)
+{
+ int ret;
+ static const u16 setup[][2] = {
/* These seem to enable the device. */
{ USBTV_BASE + 0x0008, 0x0001 },
{ USBTV_BASE + 0x01d0, 0x00ff },
@@ -188,20 +249,37 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
{ USBTV_BASE + 0x024f, 0x0002 },
};
- for (i = 0; i < ARRAY_SIZE(protoregs); i++) {
- u16 index = protoregs[i][0];
- u16 value = protoregs[i][1];
+ ret = usbtv_set_regs(usbtv, setup, ARRAY_SIZE(setup));
+ if (ret)
+ return ret;
- ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- value, index, NULL, 0, 0);
- if (ret < 0)
- return ret;
- }
+ ret = usbtv_select_input(usbtv, usbtv->input);
+ if (ret)
+ return ret;
return 0;
}
+/* Copy data from chunk into a frame buffer, deinterlacing the data
+ * into every second line. Unfortunately, they don't align nicely into
+ * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels.
+ * Therefore, we break down the chunk into two halves before copyting,
+ * so that we can interleave a line if needed. */
+static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd)
+{
+ int half;
+
+ for (half = 0; half < 2; half++) {
+ int part_no = chunk_no * 2 + half;
+ int line = part_no / 3;
+ int part_index = (line * 2 + !odd) * 3 + (part_no % 3);
+
+ u32 *dst = &frame[part_index * USBTV_CHUNK/2];
+ memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src));
+ src += USBTV_CHUNK/2;
+ }
+}
+
/* Called for each 256-byte image chunk.
* First word identifies the chunk, followed by 240 words of image
* data and padding. */
@@ -218,17 +296,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
frame_id = USBTV_FRAME_ID(chunk);
odd = USBTV_ODD(chunk);
chunk_no = USBTV_CHUNK_NO(chunk);
-
- /* Deinterlace. TODO: Use interlaced frame format. */
- chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3;
- chunk_no += !odd * 3;
-
if (chunk_no >= USBTV_CHUNKS)
return;
/* Beginning of a frame. */
- if (chunk_no == 0)
+ if (chunk_no == 0) {
usbtv->frame_id = frame_id;
+ usbtv->chunks_done = 0;
+ }
+
+ if (usbtv->frame_id != frame_id)
+ return;
spin_lock_irqsave(&usbtv->buflock, flags);
if (list_empty(&usbtv->bufs)) {
@@ -241,19 +319,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
frame = vb2_plane_vaddr(&buf->vb, 0);
- /* Copy the chunk. */
- memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1],
- USBTV_CHUNK * sizeof(chunk[1]));
+ /* Copy the chunk data. */
+ usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
+ usbtv->chunks_done++;
/* Last chunk in a frame, signalling an end */
- if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) {
+ if (odd && chunk_no == USBTV_CHUNKS-1) {
int size = vb2_plane_size(&buf->vb, 0);
+ enum vb2_buffer_state state = usbtv->chunks_done ==
+ USBTV_CHUNKS ?
+ VB2_BUF_STATE_DONE :
+ VB2_BUF_STATE_ERROR;
buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
buf->vb.v4l2_buf.sequence = usbtv->sequence++;
v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
vb2_set_plane_payload(&buf->vb, 0, size);
- vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
+ vb2_buffer_done(&buf->vb, state);
list_del(&buf->list);
}
@@ -418,10 +500,17 @@ static int usbtv_querycap(struct file *file, void *priv,
static int usbtv_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
- if (i->index > 0)
+ switch (i->index) {
+ case USBTV_COMPOSITE_INPUT:
+ strlcpy(i->name, "Composite", sizeof(i->name));
+ break;
+ case USBTV_SVIDEO_INPUT:
+ strlcpy(i->name, "S-Video", sizeof(i->name));
+ break;
+ default:
return -EINVAL;
+ }
- strlcpy(i->name, "Composite", sizeof(i->name));
i->type = V4L2_INPUT_TYPE_CAMERA;
i->std = V4L2_STD_525_60;
return 0;
@@ -461,15 +550,15 @@ static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
{
- *i = 0;
+ struct usbtv *usbtv = video_drvdata(file);
+ *i = usbtv->input;
return 0;
}
static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
{
- if (i > 0)
- return -EINVAL;
- return 0;
+ struct usbtv *usbtv = video_drvdata(file);
+ return usbtv_select_input(usbtv, i);
}
static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
@@ -518,7 +607,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
if (*nbuffers < 2)
*nbuffers = 2;
*nplanes = 1;
- sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32);
+ sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
return 0;
}
diff --git a/drivers/media/v4l2-core/Makefile b/drivers/media/v4l2-core/Makefile
index 4c33b8d6520..1a85eee581f 100644
--- a/drivers/media/v4l2-core/Makefile
+++ b/drivers/media/v4l2-core/Makefile
@@ -17,6 +17,7 @@ endif
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o
+obj-$(CONFIG_VIDEO_V4L2) += v4l2-dv-timings.o
obj-$(CONFIG_VIDEO_TUNER) += tuner.o
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index aae241730ca..c85d69da35b 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -27,7 +27,6 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
#if IS_ENABLED(CONFIG_I2C)
struct i2c_client *client = i2c_verify_client(dev);
return client &&
- asd->bus_type == V4L2_ASYNC_BUS_I2C &&
asd->match.i2c.adapter_id == client->adapter->nr &&
asd->match.i2c.address == client->addr;
#else
@@ -35,10 +34,14 @@ static bool match_i2c(struct device *dev, struct v4l2_async_subdev *asd)
#endif
}
-static bool match_platform(struct device *dev, struct v4l2_async_subdev *asd)
+static bool match_devname(struct device *dev, struct v4l2_async_subdev *asd)
{
- return asd->bus_type == V4L2_ASYNC_BUS_PLATFORM &&
- !strcmp(asd->match.platform.name, dev_name(dev));
+ return !strcmp(asd->match.device_name.name, dev_name(dev));
+}
+
+static bool match_of(struct device *dev, struct v4l2_async_subdev *asd)
+{
+ return dev->of_node == asd->match.of.node;
}
static LIST_HEAD(subdev_list);
@@ -46,28 +49,29 @@ static LIST_HEAD(notifier_list);
static DEFINE_MUTEX(list_lock);
static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev_list *asdl)
+ struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
struct v4l2_async_subdev *asd;
- bool (*match)(struct device *,
- struct v4l2_async_subdev *);
+ bool (*match)(struct device *, struct v4l2_async_subdev *);
list_for_each_entry(asd, &notifier->waiting, list) {
/* bus_type has been verified valid before */
- switch (asd->bus_type) {
- case V4L2_ASYNC_BUS_CUSTOM:
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
match = asd->match.custom.match;
if (!match)
/* Match always */
return asd;
break;
- case V4L2_ASYNC_BUS_PLATFORM:
- match = match_platform;
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ match = match_devname;
break;
- case V4L2_ASYNC_BUS_I2C:
+ case V4L2_ASYNC_MATCH_I2C:
match = match_i2c;
break;
+ case V4L2_ASYNC_MATCH_OF:
+ match = match_of;
+ break;
default:
/* Cannot happen, unless someone breaks us */
WARN_ON(true);
@@ -83,16 +87,15 @@ static struct v4l2_async_subdev *v4l2_async_belongs(struct v4l2_async_notifier *
}
static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev_list *asdl,
+ struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
int ret;
/* Remove from the waiting list */
list_del(&asd->list);
- asdl->asd = asd;
- asdl->notifier = notifier;
+ sd->asd = asd;
+ sd->notifier = notifier;
if (notifier->bound) {
ret = notifier->bound(notifier, sd, asd);
@@ -100,7 +103,7 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
return ret;
}
/* Move from the global subdevice list to notifier's done */
- list_move(&asdl->list, &notifier->done);
+ list_move(&sd->async_list, &notifier->done);
ret = v4l2_device_register_subdev(notifier->v4l2_dev, sd);
if (ret < 0) {
@@ -115,21 +118,19 @@ static int v4l2_async_test_notify(struct v4l2_async_notifier *notifier,
return 0;
}
-static void v4l2_async_cleanup(struct v4l2_async_subdev_list *asdl)
+static void v4l2_async_cleanup(struct v4l2_subdev *sd)
{
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
-
v4l2_device_unregister_subdev(sd);
- /* Subdevice driver will reprobe and put asdl back onto the list */
- list_del_init(&asdl->list);
- asdl->asd = NULL;
+ /* Subdevice driver will reprobe and put the subdev back onto the list */
+ list_del_init(&sd->async_list);
+ sd->asd = NULL;
sd->dev = NULL;
}
int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
struct v4l2_async_notifier *notifier)
{
- struct v4l2_async_subdev_list *asdl, *tmp;
+ struct v4l2_subdev *sd, *tmp;
struct v4l2_async_subdev *asd;
int i;
@@ -141,17 +142,18 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
INIT_LIST_HEAD(&notifier->done);
for (i = 0; i < notifier->num_subdevs; i++) {
- asd = notifier->subdev[i];
+ asd = notifier->subdevs[i];
- switch (asd->bus_type) {
- case V4L2_ASYNC_BUS_CUSTOM:
- case V4L2_ASYNC_BUS_PLATFORM:
- case V4L2_ASYNC_BUS_I2C:
+ switch (asd->match_type) {
+ case V4L2_ASYNC_MATCH_CUSTOM:
+ case V4L2_ASYNC_MATCH_DEVNAME:
+ case V4L2_ASYNC_MATCH_I2C:
+ case V4L2_ASYNC_MATCH_OF:
break;
default:
dev_err(notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL,
- "Invalid bus-type %u on %p\n",
- asd->bus_type, asd);
+ "Invalid match type %u on %p\n",
+ asd->match_type, asd);
return -EINVAL;
}
list_add_tail(&asd->list, &notifier->waiting);
@@ -162,14 +164,14 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
/* Keep also completed notifiers on the list */
list_add(&notifier->list, &notifier_list);
- list_for_each_entry_safe(asdl, tmp, &subdev_list, list) {
+ list_for_each_entry_safe(sd, tmp, &subdev_list, async_list) {
int ret;
- asd = v4l2_async_belongs(notifier, asdl);
+ asd = v4l2_async_belongs(notifier, sd);
if (!asd)
continue;
- ret = v4l2_async_test_notify(notifier, asdl, asd);
+ ret = v4l2_async_test_notify(notifier, sd, asd);
if (ret < 0) {
mutex_unlock(&list_lock);
return ret;
@@ -184,28 +186,29 @@ EXPORT_SYMBOL(v4l2_async_notifier_register);
void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
{
- struct v4l2_async_subdev_list *asdl, *tmp;
+ struct v4l2_subdev *sd, *tmp;
unsigned int notif_n_subdev = notifier->num_subdevs;
unsigned int n_subdev = min(notif_n_subdev, V4L2_MAX_SUBDEVS);
struct device *dev[n_subdev];
int i = 0;
+ if (!notifier->v4l2_dev)
+ return;
+
mutex_lock(&list_lock);
list_del(&notifier->list);
- list_for_each_entry_safe(asdl, tmp, &notifier->done, list) {
- struct v4l2_subdev *sd = v4l2_async_to_subdev(asdl);
-
+ list_for_each_entry_safe(sd, tmp, &notifier->done, async_list) {
dev[i] = get_device(sd->dev);
- v4l2_async_cleanup(asdl);
+ v4l2_async_cleanup(sd);
/* If we handled USB devices, we'd have to lock the parent too */
device_release_driver(dev[i++]);
if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asdl.asd);
+ notifier->unbind(notifier, sd, sd->asd);
}
mutex_unlock(&list_lock);
@@ -225,6 +228,9 @@ void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
}
put_device(d);
}
+
+ notifier->v4l2_dev = NULL;
+
/*
* Don't care about the waiting list, it is initialised and populated
* upon notifier registration.
@@ -234,24 +240,23 @@ EXPORT_SYMBOL(v4l2_async_notifier_unregister);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_subdev_list *asdl = &sd->asdl;
struct v4l2_async_notifier *notifier;
mutex_lock(&list_lock);
- INIT_LIST_HEAD(&asdl->list);
+ INIT_LIST_HEAD(&sd->async_list);
list_for_each_entry(notifier, &notifier_list, list) {
- struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, asdl);
+ struct v4l2_async_subdev *asd = v4l2_async_belongs(notifier, sd);
if (asd) {
- int ret = v4l2_async_test_notify(notifier, asdl, asd);
+ int ret = v4l2_async_test_notify(notifier, sd, asd);
mutex_unlock(&list_lock);
return ret;
}
}
/* None matched, wait for hot-plugging */
- list_add(&asdl->list, &subdev_list);
+ list_add(&sd->async_list, &subdev_list);
mutex_unlock(&list_lock);
@@ -261,23 +266,22 @@ EXPORT_SYMBOL(v4l2_async_register_subdev);
void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
{
- struct v4l2_async_subdev_list *asdl = &sd->asdl;
- struct v4l2_async_notifier *notifier = asdl->notifier;
+ struct v4l2_async_notifier *notifier = sd->notifier;
- if (!asdl->asd) {
- if (!list_empty(&asdl->list))
- v4l2_async_cleanup(asdl);
+ if (!sd->asd) {
+ if (!list_empty(&sd->async_list))
+ v4l2_async_cleanup(sd);
return;
}
mutex_lock(&list_lock);
- list_add(&asdl->asd->list, &notifier->waiting);
+ list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_cleanup(asdl);
+ v4l2_async_cleanup(sd);
if (notifier->unbind)
- notifier->unbind(notifier, sd, sd->asdl.asd);
+ notifier->unbind(notifier, sd, sd->asd);
mutex_unlock(&list_lock);
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index a95e5e23403..037d7a55aa8 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -495,363 +495,6 @@ void v4l_bound_align_image(u32 *w, unsigned int wmin, unsigned int wmax,
}
EXPORT_SYMBOL_GPL(v4l_bound_align_image);
-/**
- * v4l_match_dv_timings - check if two timings match
- * @t1 - compare this v4l2_dv_timings struct...
- * @t2 - with this struct.
- * @pclock_delta - the allowed pixelclock deviation.
- *
- * Compare t1 with t2 with a given margin of error for the pixelclock.
- */
-bool v4l_match_dv_timings(const struct v4l2_dv_timings *t1,
- const struct v4l2_dv_timings *t2,
- unsigned pclock_delta)
-{
- if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
- return false;
- if (t1->bt.width == t2->bt.width &&
- t1->bt.height == t2->bt.height &&
- t1->bt.interlaced == t2->bt.interlaced &&
- t1->bt.polarities == t2->bt.polarities &&
- t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
- t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
- t1->bt.hfrontporch == t2->bt.hfrontporch &&
- t1->bt.vfrontporch == t2->bt.vfrontporch &&
- t1->bt.vsync == t2->bt.vsync &&
- t1->bt.vbackporch == t2->bt.vbackporch &&
- (!t1->bt.interlaced ||
- (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
- t1->bt.il_vsync == t2->bt.il_vsync &&
- t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
- return true;
- return false;
-}
-EXPORT_SYMBOL_GPL(v4l_match_dv_timings);
-
-/*
- * CVT defines
- * Based on Coordinated Video Timings Standard
- * version 1.1 September 10, 2003
- */
-
-#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-/* Normal blanking */
-#define CVT_MIN_V_BPORCH 7 /* lines */
-#define CVT_MIN_V_PORCH_RND 3 /* lines */
-#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-
-/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
-#define CVT_CELL_GRAN 8 /* character cell granularity */
-#define CVT_M 600 /* blanking formula gradient */
-#define CVT_C 40 /* blanking formula offset */
-#define CVT_K 128 /* blanking formula scaling factor */
-#define CVT_J 20 /* blanking formula scaling factor */
-#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
-#define CVT_M_PRIME (CVT_K * CVT_M / 256)
-
-/* Reduced Blanking */
-#define CVT_RB_MIN_V_BPORCH 7 /* lines */
-#define CVT_RB_V_FPORCH 3 /* lines */
-#define CVT_RB_MIN_V_BLANK 460 /* us */
-#define CVT_RB_H_SYNC 32 /* pixels */
-#define CVT_RB_H_BPORCH 80 /* pixels */
-#define CVT_RB_H_BLANK 160 /* pixels */
-
-/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid CVT format. If so, then it will return true, and fmt will be filled
- * in with the found CVT timings.
- */
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_dv_timings *fmt)
-{
- int v_fp, v_bp, h_fp, h_bp, hsync;
- int frame_width, image_height, image_width;
- bool reduced_blanking;
- unsigned pix_clk;
-
- if (vsync < 4 || vsync > 7)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- reduced_blanking = false;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- reduced_blanking = true;
- else
- return false;
-
- /* Vertical */
- if (reduced_blanking) {
- v_fp = CVT_RB_V_FPORCH;
- v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 999999) / 1000000;
- v_bp -= vsync + v_fp;
-
- if (v_bp < CVT_RB_MIN_V_BPORCH)
- v_bp = CVT_RB_MIN_V_BPORCH;
- } else {
- v_fp = CVT_MIN_V_PORCH_RND;
- v_bp = (CVT_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
-
- if (v_bp < CVT_MIN_V_BPORCH)
- v_bp = CVT_MIN_V_BPORCH;
- }
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
-
- /* Aspect ratio based on vsync */
- switch (vsync) {
- case 4:
- image_width = (image_height * 4) / 3;
- break;
- case 5:
- image_width = (image_height * 16) / 9;
- break;
- case 6:
- image_width = (image_height * 16) / 10;
- break;
- case 7:
- /* special case */
- if (image_height == 1024)
- image_width = (image_height * 5) / 4;
- else if (image_height == 768)
- image_width = (image_height * 15) / 9;
- else
- return false;
- break;
- default:
- return false;
- }
-
- image_width = image_width & ~7;
-
- /* Horizontal */
- if (reduced_blanking) {
- pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
-
- h_bp = CVT_RB_H_BPORCH;
- hsync = CVT_RB_H_SYNC;
- h_fp = CVT_RB_H_BLANK - h_bp - hsync;
-
- frame_width = image_width + CVT_RB_H_BLANK;
- } else {
- int h_blank;
- unsigned ideal_duty_cycle = CVT_C_PRIME - (CVT_M_PRIME * 1000) / hfreq;
-
- h_blank = (image_width * ideal_duty_cycle + (100 - ideal_duty_cycle) / 2) /
- (100 - ideal_duty_cycle);
- h_blank = h_blank - h_blank % (2 * CVT_CELL_GRAN);
-
- if (h_blank * 100 / image_width < 20) {
- h_blank = image_width / 5;
- h_blank = (h_blank + 0x7) & ~0x7;
- }
-
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
-
- h_bp = h_blank / 2;
- frame_width = image_width + h_blank;
-
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % CVT_CELL_GRAN;
- h_fp = h_blank - hsync - h_bp;
- }
-
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_CVT;
- if (reduced_blanking)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
-}
-EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
-
-/*
- * GTF defines
- * Based on Generalized Timing Formula Standard
- * Version 1.1 September 2, 1999
- */
-
-#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
-
-#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
-#define GTF_V_FP 1 /* vertical front porch (lines) */
-#define GTF_CELL_GRAN 8 /* character cell granularity */
-
-/* Default */
-#define GTF_D_M 600 /* blanking formula gradient */
-#define GTF_D_C 40 /* blanking formula offset */
-#define GTF_D_K 128 /* blanking formula scaling factor */
-#define GTF_D_J 20 /* blanking formula scaling factor */
-#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
-#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
-
-/* Secondary */
-#define GTF_S_M 3600 /* blanking formula gradient */
-#define GTF_S_C 40 /* blanking formula offset */
-#define GTF_S_K 128 /* blanking formula scaling factor */
-#define GTF_S_J 35 /* blanking formula scaling factor */
-#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
-#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
-
-/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
- * @frame_height - the total height of the frame (including blanking) in lines.
- * @hfreq - the horizontal frequency in Hz.
- * @vsync - the height of the vertical sync in lines.
- * @polarities - the horizontal and vertical polarities (same as struct
- * v4l2_bt_timings polarities).
- * @aspect - preferred aspect ratio. GTF has no method of determining the
- * aspect ratio in order to derive the image width from the
- * image height, so it has to be passed explicitly. Usually
- * the native screen aspect ratio is used for this. If it
- * is not filled in correctly, then 16:9 will be assumed.
- * @fmt - the resulting timings.
- *
- * This function will attempt to detect if the given values correspond to a
- * valid GTF format. If so, then it will return true, and fmt will be filled
- * in with the found GTF timings.
- */
-bool v4l2_detect_gtf(unsigned frame_height,
- unsigned hfreq,
- unsigned vsync,
- u32 polarities,
- struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt)
-{
- int pix_clk;
- int v_fp, v_bp, h_fp, hsync;
- int frame_width, image_height, image_width;
- bool default_gtf;
- int h_blank;
-
- if (vsync != 3)
- return false;
-
- if (polarities == V4L2_DV_VSYNC_POS_POL)
- default_gtf = true;
- else if (polarities == V4L2_DV_HSYNC_POS_POL)
- default_gtf = false;
- else
- return false;
-
- /* Vertical */
- v_fp = GTF_V_FP;
- v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
- image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
-
- if (aspect.numerator == 0 || aspect.denominator == 0) {
- aspect.numerator = 16;
- aspect.denominator = 9;
- }
- image_width = ((image_height * aspect.numerator) / aspect.denominator);
-
- /* Horizontal */
- if (default_gtf)
- h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
- (image_width * GTF_D_M_PRIME * 1000) +
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
- else
- h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
- (image_width * GTF_S_M_PRIME * 1000) +
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
- (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
-
- h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
- frame_width = image_width + h_blank;
-
- pix_clk = (image_width + h_blank) * hfreq;
- pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
-
- hsync = (frame_width * 8 + 50) / 100;
- hsync = hsync - hsync % GTF_CELL_GRAN;
-
- h_fp = h_blank / 2 - hsync;
-
- fmt->bt.polarities = polarities;
- fmt->bt.width = image_width;
- fmt->bt.height = image_height;
- fmt->bt.hfrontporch = h_fp;
- fmt->bt.vfrontporch = v_fp;
- fmt->bt.hsync = hsync;
- fmt->bt.vsync = vsync;
- fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
- fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
- fmt->bt.pixelclock = pix_clk;
- fmt->bt.standards = V4L2_DV_BT_STD_GTF;
- if (!default_gtf)
- fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
- return true;
-}
-EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
-
-/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
- * 0x15 and 0x16 from the EDID.
- * @hor_landscape - byte 0x15 from the EDID.
- * @vert_portrait - byte 0x16 from the EDID.
- *
- * Determines the aspect ratio from the EDID.
- * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
- * "Horizontal and Vertical Screen Size or Aspect Ratio"
- */
-struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
-{
- struct v4l2_fract aspect = { 16, 9 };
- u32 tmp;
- u8 ratio;
-
- /* Nothing filled in, fallback to 16:9 */
- if (!hor_landscape && !vert_portrait)
- return aspect;
- /* Both filled in, so they are interpreted as the screen size in cm */
- if (hor_landscape && vert_portrait) {
- aspect.numerator = hor_landscape;
- aspect.denominator = vert_portrait;
- return aspect;
- }
- /* Only one is filled in, so interpret them as a ratio:
- (val + 99) / 100 */
- ratio = hor_landscape | vert_portrait;
- /* Change some rounded values into the exact aspect ratio */
- if (ratio == 79) {
- aspect.numerator = 16;
- aspect.denominator = 9;
- } else if (ratio == 34) {
- aspect.numerator = 4;
- aspect.numerator = 3;
- } else if (ratio == 68) {
- aspect.numerator = 15;
- aspect.numerator = 9;
- } else {
- aspect.numerator = hor_landscape + 99;
- aspect.denominator = 100;
- }
- if (hor_landscape)
- return aspect;
- /* The aspect ratio is for portrait, so swap numerator and denominator */
- tmp = aspect.denominator;
- aspect.denominator = aspect.numerator;
- aspect.numerator = tmp;
- return aspect;
-}
-EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
-
const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
const struct v4l2_discrete_probe *probe,
s32 width, s32 height)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index fccd08b66d1..c3f08038868 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -424,6 +424,12 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
NULL,
};
+ static const char * const vpx_golden_frame_sel[] = {
+ "Use Previous Frame",
+ "Use Previous Specific Frame",
+ NULL,
+ };
+
static const char * const flash_led_mode[] = {
"Off",
"Flash",
@@ -538,6 +544,8 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
return mpeg_mpeg4_level;
case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:
return mpeg4_profile;
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
+ return vpx_golden_frame_sel;
case V4L2_CID_JPEG_CHROMA_SUBSAMPLING:
return jpeg_chroma_subsampling;
case V4L2_CID_DV_TX_MODE:
@@ -552,6 +560,33 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
}
EXPORT_SYMBOL(v4l2_ctrl_get_menu);
+#define __v4l2_qmenu_int_len(arr, len) ({ *(len) = ARRAY_SIZE(arr); arr; })
+/*
+ * Returns NULL or an s64 type array containing the menu for given
+ * control ID. The total number of the menu items is returned in @len.
+ */
+const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len)
+{
+ static const s64 const qmenu_int_vpx_num_partitions[] = {
+ 1, 2, 4, 8,
+ };
+
+ static const s64 const qmenu_int_vpx_num_ref_frames[] = {
+ 1, 2, 3,
+ };
+
+ switch (id) {
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_partitions, len);
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
+ return __v4l2_qmenu_int_len(qmenu_int_vpx_num_ref_frames, len);
+ default:
+ *len = 0;
+ return NULL;
+ };
+}
+EXPORT_SYMBOL(v4l2_ctrl_get_int_menu);
+
/* Return the control name. */
const char *v4l2_ctrl_get_name(u32 id)
{
@@ -600,9 +635,11 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_ALPHA_COMPONENT: return "Alpha Component";
case V4L2_CID_COLORFX_CBCR: return "Color Effects, CbCr";
- /* MPEG controls */
+ /* Codec controls */
+ /* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
/* Keep the order of the 'case's the same as in videodev2.h! */
- case V4L2_CID_MPEG_CLASS: return "MPEG Encoder Controls";
+ case V4L2_CID_MPEG_CLASS: return "Codec Controls";
case V4L2_CID_MPEG_STREAM_TYPE: return "Stream Type";
case V4L2_CID_MPEG_STREAM_PID_PMT: return "Stream PMT Program ID";
case V4L2_CID_MPEG_STREAM_PID_AUDIO: return "Stream Audio Program ID";
@@ -700,6 +737,15 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_VBV_DELAY: return "Initial Delay for VBV Control";
case V4L2_CID_MPEG_VIDEO_REPEAT_SEQ_HEADER: return "Repeat Sequence Header";
+ /* VPX controls */
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS: return "VPX Number of Partitions";
+ case V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4: return "VPX Intra Mode Decision Disable";
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES: return "VPX No. of Refs for P Frame";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL: return "VPX Loop Filter Level Range";
+ case V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS: return "VPX Deblocking Effect Control";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD: return "VPX Golden Frame Refresh Period";
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL: return "VPX Golden Frame Indicator";
+
/* CAMERA controls */
/* Keep the order of the 'case's the same as in videodev2.h! */
case V4L2_CID_CAMERA_CLASS: return "Camera Controls";
@@ -914,6 +960,7 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_DV_RX_RGB_RANGE:
case V4L2_CID_TEST_PATTERN:
case V4L2_CID_TUNE_DEEMPHASIS:
+ case V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL:
*type = V4L2_CTRL_TYPE_MENU;
break;
case V4L2_CID_LINK_FREQ:
@@ -925,6 +972,8 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
break;
case V4L2_CID_ISO_SENSITIVITY:
case V4L2_CID_AUTO_EXPOSURE_BIAS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS:
+ case V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES:
*type = V4L2_CTRL_TYPE_INTEGER_MENU;
break;
case V4L2_CID_USER_CLASS:
@@ -1712,7 +1761,9 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
const struct v4l2_ctrl_ops *ops,
u32 id, s32 max, s32 mask, s32 def)
{
- const char * const *qmenu = v4l2_ctrl_get_menu(id);
+ const char * const *qmenu = NULL;
+ const s64 *qmenu_int = NULL;
+ unsigned int qmenu_int_len = 0;
const char *name;
enum v4l2_ctrl_type type;
s32 min;
@@ -1720,12 +1771,18 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu(struct v4l2_ctrl_handler *hdl,
u32 flags;
v4l2_ctrl_fill(id, &name, &type, &min, &max, &step, &def, &flags);
- if (type != V4L2_CTRL_TYPE_MENU) {
+
+ if (type == V4L2_CTRL_TYPE_MENU)
+ qmenu = v4l2_ctrl_get_menu(id);
+ else if (type == V4L2_CTRL_TYPE_INTEGER_MENU)
+ qmenu_int = v4l2_ctrl_get_int_menu(id, &qmenu_int_len);
+
+ if ((!qmenu && !qmenu_int) || (qmenu_int && max > qmenu_int_len)) {
handler_set_err(hdl, -EINVAL);
return NULL;
}
return v4l2_ctrl_new(hdl, ops, id, name, type,
- 0, max, mask, def, flags, qmenu, NULL, NULL);
+ 0, max, mask, def, flags, qmenu, qmenu_int, NULL);
}
EXPORT_SYMBOL(v4l2_ctrl_new_std_menu);
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index c8859d6ff6a..b5aaaac427a 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -38,24 +38,25 @@
* sysfs stuff
*/
-static ssize_t show_index(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t index_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->index);
}
+static DEVICE_ATTR_RO(index);
-static ssize_t show_debug(struct device *cd,
- struct device_attribute *attr, char *buf)
+static ssize_t debug_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%i\n", vdev->debug);
}
-static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t debug_store(struct device *cd, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct video_device *vdev = to_video_device(cd);
int res = 0;
@@ -68,21 +69,24 @@ static ssize_t set_debug(struct device *cd, struct device_attribute *attr,
vdev->debug = value;
return len;
}
+static DEVICE_ATTR_RW(debug);
-static ssize_t show_name(struct device *cd,
+static ssize_t name_show(struct device *cd,
struct device_attribute *attr, char *buf)
{
struct video_device *vdev = to_video_device(cd);
return sprintf(buf, "%.*s\n", (int)sizeof(vdev->name), vdev->name);
}
+static DEVICE_ATTR_RO(name);
-static struct device_attribute video_device_attrs[] = {
- __ATTR(name, S_IRUGO, show_name, NULL),
- __ATTR(debug, 0644, show_debug, set_debug),
- __ATTR(index, S_IRUGO, show_index, NULL),
- __ATTR_NULL
+static struct attribute *video_device_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_debug.attr,
+ &dev_attr_index.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(video_device);
/*
* Active devices
@@ -217,7 +221,7 @@ static void v4l2_device_release(struct device *cd)
static struct class video_class = {
.name = VIDEO_NAME,
- .dev_attrs = video_device_attrs,
+ .dev_groups = video_device_groups,
};
struct video_device *video_devdata(struct file *file)
@@ -868,6 +872,7 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
/* Should not happen since we thought this minor was free */
WARN_ON(video_device[vdev->minor] != NULL);
+ video_device[vdev->minor] = vdev;
vdev->index = get_index(vdev);
mutex_unlock(&videodev_lock);
@@ -930,9 +935,6 @@ int __video_register_device(struct video_device *vdev, int type, int nr,
#endif
/* Part 6: Activate this minor. The char device can now be used. */
set_bit(V4L2_FL_REGISTERED, &vdev->flags);
- mutex_lock(&videodev_lock);
- video_device[vdev->minor] = vdev;
- mutex_unlock(&videodev_lock);
return 0;
@@ -940,6 +942,7 @@ cleanup:
mutex_lock(&videodev_lock);
if (vdev->cdev)
cdev_del(vdev->cdev);
+ video_device[vdev->minor] = NULL;
devnode_clear(vdev);
mutex_unlock(&videodev_lock);
/* Mark this video device as never having been registered. */
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
new file mode 100644
index 00000000000..ee52b9f4a94
--- /dev/null
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -0,0 +1,609 @@
+/*
+ * v4l2-dv-timings - dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/videodev2.h>
+#include <linux/v4l2-dv-timings.h>
+#include <media/v4l2-dv-timings.h>
+
+const struct v4l2_dv_timings v4l2_dv_timings_presets[] = {
+ V4L2_DV_BT_CEA_640X480P59_94,
+ V4L2_DV_BT_CEA_720X480I59_94,
+ V4L2_DV_BT_CEA_720X480P59_94,
+ V4L2_DV_BT_CEA_720X576I50,
+ V4L2_DV_BT_CEA_720X576P50,
+ V4L2_DV_BT_CEA_1280X720P24,
+ V4L2_DV_BT_CEA_1280X720P25,
+ V4L2_DV_BT_CEA_1280X720P30,
+ V4L2_DV_BT_CEA_1280X720P50,
+ V4L2_DV_BT_CEA_1280X720P60,
+ V4L2_DV_BT_CEA_1920X1080P24,
+ V4L2_DV_BT_CEA_1920X1080P25,
+ V4L2_DV_BT_CEA_1920X1080P30,
+ V4L2_DV_BT_CEA_1920X1080I50,
+ V4L2_DV_BT_CEA_1920X1080P50,
+ V4L2_DV_BT_CEA_1920X1080I60,
+ V4L2_DV_BT_CEA_1920X1080P60,
+ V4L2_DV_BT_DMT_640X350P85,
+ V4L2_DV_BT_DMT_640X400P85,
+ V4L2_DV_BT_DMT_720X400P85,
+ V4L2_DV_BT_DMT_640X480P72,
+ V4L2_DV_BT_DMT_640X480P75,
+ V4L2_DV_BT_DMT_640X480P85,
+ V4L2_DV_BT_DMT_800X600P56,
+ V4L2_DV_BT_DMT_800X600P60,
+ V4L2_DV_BT_DMT_800X600P72,
+ V4L2_DV_BT_DMT_800X600P75,
+ V4L2_DV_BT_DMT_800X600P85,
+ V4L2_DV_BT_DMT_800X600P120_RB,
+ V4L2_DV_BT_DMT_848X480P60,
+ V4L2_DV_BT_DMT_1024X768I43,
+ V4L2_DV_BT_DMT_1024X768P60,
+ V4L2_DV_BT_DMT_1024X768P70,
+ V4L2_DV_BT_DMT_1024X768P75,
+ V4L2_DV_BT_DMT_1024X768P85,
+ V4L2_DV_BT_DMT_1024X768P120_RB,
+ V4L2_DV_BT_DMT_1152X864P75,
+ V4L2_DV_BT_DMT_1280X768P60_RB,
+ V4L2_DV_BT_DMT_1280X768P60,
+ V4L2_DV_BT_DMT_1280X768P75,
+ V4L2_DV_BT_DMT_1280X768P85,
+ V4L2_DV_BT_DMT_1280X768P120_RB,
+ V4L2_DV_BT_DMT_1280X800P60_RB,
+ V4L2_DV_BT_DMT_1280X800P60,
+ V4L2_DV_BT_DMT_1280X800P75,
+ V4L2_DV_BT_DMT_1280X800P85,
+ V4L2_DV_BT_DMT_1280X800P120_RB,
+ V4L2_DV_BT_DMT_1280X960P60,
+ V4L2_DV_BT_DMT_1280X960P85,
+ V4L2_DV_BT_DMT_1280X960P120_RB,
+ V4L2_DV_BT_DMT_1280X1024P60,
+ V4L2_DV_BT_DMT_1280X1024P75,
+ V4L2_DV_BT_DMT_1280X1024P85,
+ V4L2_DV_BT_DMT_1280X1024P120_RB,
+ V4L2_DV_BT_DMT_1360X768P60,
+ V4L2_DV_BT_DMT_1360X768P120_RB,
+ V4L2_DV_BT_DMT_1366X768P60,
+ V4L2_DV_BT_DMT_1366X768P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60_RB,
+ V4L2_DV_BT_DMT_1400X1050P60,
+ V4L2_DV_BT_DMT_1400X1050P75,
+ V4L2_DV_BT_DMT_1400X1050P85,
+ V4L2_DV_BT_DMT_1400X1050P120_RB,
+ V4L2_DV_BT_DMT_1440X900P60_RB,
+ V4L2_DV_BT_DMT_1440X900P60,
+ V4L2_DV_BT_DMT_1440X900P75,
+ V4L2_DV_BT_DMT_1440X900P85,
+ V4L2_DV_BT_DMT_1440X900P120_RB,
+ V4L2_DV_BT_DMT_1600X900P60_RB,
+ V4L2_DV_BT_DMT_1600X1200P60,
+ V4L2_DV_BT_DMT_1600X1200P65,
+ V4L2_DV_BT_DMT_1600X1200P70,
+ V4L2_DV_BT_DMT_1600X1200P75,
+ V4L2_DV_BT_DMT_1600X1200P85,
+ V4L2_DV_BT_DMT_1600X1200P120_RB,
+ V4L2_DV_BT_DMT_1680X1050P60_RB,
+ V4L2_DV_BT_DMT_1680X1050P60,
+ V4L2_DV_BT_DMT_1680X1050P75,
+ V4L2_DV_BT_DMT_1680X1050P85,
+ V4L2_DV_BT_DMT_1680X1050P120_RB,
+ V4L2_DV_BT_DMT_1792X1344P60,
+ V4L2_DV_BT_DMT_1792X1344P75,
+ V4L2_DV_BT_DMT_1792X1344P120_RB,
+ V4L2_DV_BT_DMT_1856X1392P60,
+ V4L2_DV_BT_DMT_1856X1392P75,
+ V4L2_DV_BT_DMT_1856X1392P120_RB,
+ V4L2_DV_BT_DMT_1920X1200P60_RB,
+ V4L2_DV_BT_DMT_1920X1200P60,
+ V4L2_DV_BT_DMT_1920X1200P75,
+ V4L2_DV_BT_DMT_1920X1200P85,
+ V4L2_DV_BT_DMT_1920X1200P120_RB,
+ V4L2_DV_BT_DMT_1920X1440P60,
+ V4L2_DV_BT_DMT_1920X1440P75,
+ V4L2_DV_BT_DMT_1920X1440P120_RB,
+ V4L2_DV_BT_DMT_2048X1152P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60_RB,
+ V4L2_DV_BT_DMT_2560X1600P60,
+ V4L2_DV_BT_DMT_2560X1600P75,
+ V4L2_DV_BT_DMT_2560X1600P85,
+ V4L2_DV_BT_DMT_2560X1600P120_RB,
+ { }
+};
+EXPORT_SYMBOL_GPL(v4l2_dv_timings_presets);
+
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *dvcap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ const struct v4l2_bt_timings_cap *cap = &dvcap->bt;
+ u32 caps = cap->capabilities;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t->type != dvcap->type ||
+ bt->height < cap->min_height ||
+ bt->height > cap->max_height ||
+ bt->width < cap->min_width ||
+ bt->width > cap->max_width ||
+ bt->pixelclock < cap->min_pixelclock ||
+ bt->pixelclock > cap->max_pixelclock ||
+ (cap->standards && !(bt->standards & cap->standards)) ||
+ (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) ||
+ (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE)))
+ return false;
+ return fnc == NULL || fnc(t, fnc_handle);
+}
+EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings);
+
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ u32 i, idx;
+
+ memset(t->reserved, 0, sizeof(t->reserved));
+ for (i = idx = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ idx++ == t->index) {
+ t->timings = v4l2_dv_timings_presets[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(v4l2_enum_dv_timings_cap);
+
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle)
+{
+ int i;
+
+ if (!v4l2_valid_dv_timings(t, cap, fnc, fnc_handle))
+ return false;
+
+ for (i = 0; i < v4l2_dv_timings_presets[i].bt.width; i++) {
+ if (v4l2_valid_dv_timings(v4l2_dv_timings_presets + i, cap,
+ fnc, fnc_handle) &&
+ v4l2_match_dv_timings(t, v4l2_dv_timings_presets + i,
+ pclock_delta)) {
+ *t = v4l2_dv_timings_presets[i];
+ return true;
+ }
+ }
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_find_dv_timings_cap);
+
+/**
+ * v4l2_match_dv_timings - check if two timings match
+ * @t1 - compare this v4l2_dv_timings struct...
+ * @t2 - with this struct.
+ * @pclock_delta - the allowed pixelclock deviation.
+ *
+ * Compare t1 with t2 with a given margin of error for the pixelclock.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *t1,
+ const struct v4l2_dv_timings *t2,
+ unsigned pclock_delta)
+{
+ if (t1->type != t2->type || t1->type != V4L2_DV_BT_656_1120)
+ return false;
+ if (t1->bt.width == t2->bt.width &&
+ t1->bt.height == t2->bt.height &&
+ t1->bt.interlaced == t2->bt.interlaced &&
+ t1->bt.polarities == t2->bt.polarities &&
+ t1->bt.pixelclock >= t2->bt.pixelclock - pclock_delta &&
+ t1->bt.pixelclock <= t2->bt.pixelclock + pclock_delta &&
+ t1->bt.hfrontporch == t2->bt.hfrontporch &&
+ t1->bt.vfrontporch == t2->bt.vfrontporch &&
+ t1->bt.vsync == t2->bt.vsync &&
+ t1->bt.vbackporch == t2->bt.vbackporch &&
+ (!t1->bt.interlaced ||
+ (t1->bt.il_vfrontporch == t2->bt.il_vfrontporch &&
+ t1->bt.il_vsync == t2->bt.il_vsync &&
+ t1->bt.il_vbackporch == t2->bt.il_vbackporch)))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(v4l2_match_dv_timings);
+
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed)
+{
+ const struct v4l2_bt_timings *bt = &t->bt;
+ u32 htot, vtot;
+
+ if (t->type != V4L2_DV_BT_656_1120)
+ return;
+
+ htot = V4L2_DV_BT_FRAME_WIDTH(bt);
+ vtot = V4L2_DV_BT_FRAME_HEIGHT(bt);
+
+ if (prefix == NULL)
+ prefix = "";
+
+ pr_info("%s: %s%ux%u%s%u (%ux%u)\n", dev_prefix, prefix,
+ bt->width, bt->height, bt->interlaced ? "i" : "p",
+ (htot * vtot) > 0 ? ((u32)bt->pixelclock / (htot * vtot)) : 0,
+ htot, vtot);
+
+ if (!detailed)
+ return;
+
+ pr_info("%s: horizontal: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->hfrontporch,
+ (bt->polarities & V4L2_DV_HSYNC_POS_POL) ? "+" : "-",
+ bt->hsync, bt->hbackporch);
+ pr_info("%s: vertical: fp = %u, %ssync = %u, bp = %u\n",
+ dev_prefix, bt->vfrontporch,
+ (bt->polarities & V4L2_DV_VSYNC_POS_POL) ? "+" : "-",
+ bt->vsync, bt->vbackporch);
+ pr_info("%s: pixelclock: %llu\n", dev_prefix, bt->pixelclock);
+ pr_info("%s: flags (0x%x):%s%s%s%s\n", dev_prefix, bt->flags,
+ (bt->flags & V4L2_DV_FL_REDUCED_BLANKING) ?
+ " REDUCED_BLANKING" : "",
+ (bt->flags & V4L2_DV_FL_CAN_REDUCE_FPS) ?
+ " CAN_REDUCE_FPS" : "",
+ (bt->flags & V4L2_DV_FL_REDUCED_FPS) ?
+ " REDUCED_FPS" : "",
+ (bt->flags & V4L2_DV_FL_HALF_LINE) ?
+ " HALF_LINE" : "");
+ pr_info("%s: standards (0x%x):%s%s%s%s\n", dev_prefix, bt->standards,
+ (bt->standards & V4L2_DV_BT_STD_CEA861) ? " CEA" : "",
+ (bt->standards & V4L2_DV_BT_STD_DMT) ? " DMT" : "",
+ (bt->standards & V4L2_DV_BT_STD_CVT) ? " CVT" : "",
+ (bt->standards & V4L2_DV_BT_STD_GTF) ? " GTF" : "");
+}
+EXPORT_SYMBOL_GPL(v4l2_print_dv_timings);
+
+/*
+ * CVT defines
+ * Based on Coordinated Video Timings Standard
+ * version 1.1 September 10, 2003
+ */
+
+#define CVT_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+/* Normal blanking */
+#define CVT_MIN_V_BPORCH 7 /* lines */
+#define CVT_MIN_V_PORCH_RND 3 /* lines */
+#define CVT_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+
+/* Normal blanking for CVT uses GTF to calculate horizontal blanking */
+#define CVT_CELL_GRAN 8 /* character cell granularity */
+#define CVT_M 600 /* blanking formula gradient */
+#define CVT_C 40 /* blanking formula offset */
+#define CVT_K 128 /* blanking formula scaling factor */
+#define CVT_J 20 /* blanking formula scaling factor */
+#define CVT_C_PRIME (((CVT_C - CVT_J) * CVT_K / 256) + CVT_J)
+#define CVT_M_PRIME (CVT_K * CVT_M / 256)
+
+/* Reduced Blanking */
+#define CVT_RB_MIN_V_BPORCH 7 /* lines */
+#define CVT_RB_V_FPORCH 3 /* lines */
+#define CVT_RB_MIN_V_BLANK 460 /* us */
+#define CVT_RB_H_SYNC 32 /* pixels */
+#define CVT_RB_H_BPORCH 80 /* pixels */
+#define CVT_RB_H_BLANK 160 /* pixels */
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+ u32 polarities, struct v4l2_dv_timings *fmt)
+{
+ int v_fp, v_bp, h_fp, h_bp, hsync;
+ int frame_width, image_height, image_width;
+ bool reduced_blanking;
+ unsigned pix_clk;
+
+ if (vsync < 4 || vsync > 7)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ reduced_blanking = false;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ reduced_blanking = true;
+ else
+ return false;
+
+ /* Vertical */
+ if (reduced_blanking) {
+ v_fp = CVT_RB_V_FPORCH;
+ v_bp = (CVT_RB_MIN_V_BLANK * hfreq + 1999999) / 1000000;
+ v_bp -= vsync + v_fp;
+
+ if (v_bp < CVT_RB_MIN_V_BPORCH)
+ v_bp = CVT_RB_MIN_V_BPORCH;
+ } else {
+ v_fp = CVT_MIN_V_PORCH_RND;
+ v_bp = (CVT_MIN_VSYNC_BP * hfreq + 1999999) / 1000000 - vsync;
+
+ if (v_bp < CVT_MIN_V_BPORCH)
+ v_bp = CVT_MIN_V_BPORCH;
+ }
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ /* Aspect ratio based on vsync */
+ switch (vsync) {
+ case 4:
+ image_width = (image_height * 4) / 3;
+ break;
+ case 5:
+ image_width = (image_height * 16) / 9;
+ break;
+ case 6:
+ image_width = (image_height * 16) / 10;
+ break;
+ case 7:
+ /* special case */
+ if (image_height == 1024)
+ image_width = (image_height * 5) / 4;
+ else if (image_height == 768)
+ image_width = (image_height * 15) / 9;
+ else
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ image_width = image_width & ~7;
+
+ /* Horizontal */
+ if (reduced_blanking) {
+ pix_clk = (image_width + CVT_RB_H_BLANK) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = CVT_RB_H_BPORCH;
+ hsync = CVT_RB_H_SYNC;
+ h_fp = CVT_RB_H_BLANK - h_bp - hsync;
+
+ frame_width = image_width + CVT_RB_H_BLANK;
+ } else {
+ unsigned ideal_duty_cycle_per_myriad =
+ 100 * CVT_C_PRIME - (CVT_M_PRIME * 100000) / hfreq;
+ int h_blank;
+
+ if (ideal_duty_cycle_per_myriad < 2000)
+ ideal_duty_cycle_per_myriad = 2000;
+
+ h_blank = image_width * ideal_duty_cycle_per_myriad /
+ (10000 - ideal_duty_cycle_per_myriad);
+ h_blank = (h_blank / (2 * CVT_CELL_GRAN)) * 2 * CVT_CELL_GRAN;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = (pix_clk / CVT_PXL_CLK_GRAN) * CVT_PXL_CLK_GRAN;
+
+ h_bp = h_blank / 2;
+ frame_width = image_width + h_blank;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % CVT_CELL_GRAN;
+ h_fp = h_blank - hsync - h_bp;
+ }
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_CVT;
+ if (reduced_blanking)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_cvt);
+
+/*
+ * GTF defines
+ * Based on Generalized Timing Formula Standard
+ * Version 1.1 September 2, 1999
+ */
+
+#define GTF_PXL_CLK_GRAN 250000 /* pixel clock granularity */
+
+#define GTF_MIN_VSYNC_BP 550 /* min time of vsync + back porch (us) */
+#define GTF_V_FP 1 /* vertical front porch (lines) */
+#define GTF_CELL_GRAN 8 /* character cell granularity */
+
+/* Default */
+#define GTF_D_M 600 /* blanking formula gradient */
+#define GTF_D_C 40 /* blanking formula offset */
+#define GTF_D_K 128 /* blanking formula scaling factor */
+#define GTF_D_J 20 /* blanking formula scaling factor */
+#define GTF_D_C_PRIME ((((GTF_D_C - GTF_D_J) * GTF_D_K) / 256) + GTF_D_J)
+#define GTF_D_M_PRIME ((GTF_D_K * GTF_D_M) / 256)
+
+/* Secondary */
+#define GTF_S_M 3600 /* blanking formula gradient */
+#define GTF_S_C 40 /* blanking formula offset */
+#define GTF_S_K 128 /* blanking formula scaling factor */
+#define GTF_S_J 35 /* blanking formula scaling factor */
+#define GTF_S_C_PRIME ((((GTF_S_C - GTF_S_J) * GTF_S_K) / 256) + GTF_S_J)
+#define GTF_S_M_PRIME ((GTF_S_K * GTF_S_M) / 256)
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned frame_height,
+ unsigned hfreq,
+ unsigned vsync,
+ u32 polarities,
+ struct v4l2_fract aspect,
+ struct v4l2_dv_timings *fmt)
+{
+ int pix_clk;
+ int v_fp, v_bp, h_fp, hsync;
+ int frame_width, image_height, image_width;
+ bool default_gtf;
+ int h_blank;
+
+ if (vsync != 3)
+ return false;
+
+ if (polarities == V4L2_DV_VSYNC_POS_POL)
+ default_gtf = true;
+ else if (polarities == V4L2_DV_HSYNC_POS_POL)
+ default_gtf = false;
+ else
+ return false;
+
+ /* Vertical */
+ v_fp = GTF_V_FP;
+ v_bp = (GTF_MIN_VSYNC_BP * hfreq + 999999) / 1000000 - vsync;
+ image_height = (frame_height - v_fp - vsync - v_bp + 1) & ~0x1;
+
+ if (aspect.numerator == 0 || aspect.denominator == 0) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ }
+ image_width = ((image_height * aspect.numerator) / aspect.denominator);
+
+ /* Horizontal */
+ if (default_gtf)
+ h_blank = ((image_width * GTF_D_C_PRIME * hfreq) -
+ (image_width * GTF_D_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_D_C_PRIME) + GTF_D_M_PRIME * 1000);
+ else
+ h_blank = ((image_width * GTF_S_C_PRIME * hfreq) -
+ (image_width * GTF_S_M_PRIME * 1000) +
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000) / 2) /
+ (hfreq * (100 - GTF_S_C_PRIME) + GTF_S_M_PRIME * 1000);
+
+ h_blank = h_blank - h_blank % (2 * GTF_CELL_GRAN);
+ frame_width = image_width + h_blank;
+
+ pix_clk = (image_width + h_blank) * hfreq;
+ pix_clk = pix_clk / GTF_PXL_CLK_GRAN * GTF_PXL_CLK_GRAN;
+
+ hsync = (frame_width * 8 + 50) / 100;
+ hsync = hsync - hsync % GTF_CELL_GRAN;
+
+ h_fp = h_blank / 2 - hsync;
+
+ fmt->type = V4L2_DV_BT_656_1120;
+ fmt->bt.polarities = polarities;
+ fmt->bt.width = image_width;
+ fmt->bt.height = image_height;
+ fmt->bt.hfrontporch = h_fp;
+ fmt->bt.vfrontporch = v_fp;
+ fmt->bt.hsync = hsync;
+ fmt->bt.vsync = vsync;
+ fmt->bt.hbackporch = frame_width - image_width - h_fp - hsync;
+ fmt->bt.vbackporch = frame_height - image_height - v_fp - vsync;
+ fmt->bt.pixelclock = pix_clk;
+ fmt->bt.standards = V4L2_DV_BT_STD_GTF;
+ if (!default_gtf)
+ fmt->bt.flags |= V4L2_DV_FL_REDUCED_BLANKING;
+ return true;
+}
+EXPORT_SYMBOL_GPL(v4l2_detect_gtf);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
+{
+ struct v4l2_fract aspect = { 16, 9 };
+ u32 tmp;
+ u8 ratio;
+
+ /* Nothing filled in, fallback to 16:9 */
+ if (!hor_landscape && !vert_portrait)
+ return aspect;
+ /* Both filled in, so they are interpreted as the screen size in cm */
+ if (hor_landscape && vert_portrait) {
+ aspect.numerator = hor_landscape;
+ aspect.denominator = vert_portrait;
+ return aspect;
+ }
+ /* Only one is filled in, so interpret them as a ratio:
+ (val + 99) / 100 */
+ ratio = hor_landscape | vert_portrait;
+ /* Change some rounded values into the exact aspect ratio */
+ if (ratio == 79) {
+ aspect.numerator = 16;
+ aspect.denominator = 9;
+ } else if (ratio == 34) {
+ aspect.numerator = 4;
+ aspect.numerator = 3;
+ } else if (ratio == 68) {
+ aspect.numerator = 15;
+ aspect.numerator = 9;
+ } else {
+ aspect.numerator = hor_landscape + 99;
+ aspect.denominator = 100;
+ }
+ if (hor_landscape)
+ return aspect;
+ /* The aspect ratio is for portrait, so swap numerator and denominator */
+ tmp = aspect.denominator;
+ aspect.denominator = aspect.numerator;
+ aspect.numerator = tmp;
+ return aspect;
+}
+EXPORT_SYMBOL_GPL(v4l2_calc_aspect_ratio);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index e96497f7c3e..7c437128821 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -196,6 +196,10 @@ static void v4l2_m2m_try_run(struct v4l2_m2m_dev *m2m_dev)
* 2) at least one destination buffer has to be queued,
* 3) streaming has to be on.
*
+ * If a queue is buffered (for example a decoder hardware ringbuffer that has
+ * to be drained before doing streamoff), allow scheduling without v4l2 buffers
+ * on that queue.
+ *
* There may also be additional, custom requirements. In such case the driver
* should supply a custom callback (job_ready in v4l2_m2m_ops) that should
* return 1 if the instance is ready.
@@ -224,7 +228,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
- if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
+ if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)
+ && !m2m_ctx->out_q_ctx.buffered) {
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
flags_out);
spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
@@ -232,7 +237,8 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
return;
}
spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
- if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
+ if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)
+ && !m2m_ctx->cap_q_ctx.buffered) {
spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
flags_cap);
spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
@@ -260,6 +266,39 @@ static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_cancel_job() - cancel pending jobs for the context
+ *
+ * In case of streamoff or release called on any context,
+ * 1] If the context is currently running, then abort job will be called
+ * 2] If the context is queued, then the context will be removed from
+ * the job_queue
+ */
+static void v4l2_m2m_cancel_job(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ struct v4l2_m2m_dev *m2m_dev;
+ unsigned long flags;
+
+ m2m_dev = m2m_ctx->m2m_dev;
+ spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
+ if (m2m_ctx->job_flags & TRANS_RUNNING) {
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
+ dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
+ wait_event(m2m_ctx->finished,
+ !(m2m_ctx->job_flags & TRANS_RUNNING));
+ } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
+ list_del(&m2m_ctx->queue);
+ m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ dprintk("m2m_ctx: %p had been on queue and was removed\n",
+ m2m_ctx);
+ } else {
+ /* Do nothing, was not on queue/running */
+ spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
+ }
+}
+
+/**
* v4l2_m2m_job_finish() - inform the framework that a job has been finished
* and have it clean up
*
@@ -430,6 +469,9 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
unsigned long flags_job, flags;
int ret;
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
+
q_ctx = get_queue_ctx(m2m_ctx, type);
ret = vb2_streamoff(&q_ctx->q, type);
if (ret)
@@ -652,27 +694,8 @@ EXPORT_SYMBOL_GPL(v4l2_m2m_ctx_init);
*/
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx)
{
- struct v4l2_m2m_dev *m2m_dev;
- unsigned long flags;
-
- m2m_dev = m2m_ctx->m2m_dev;
-
- spin_lock_irqsave(&m2m_dev->job_spinlock, flags);
- if (m2m_ctx->job_flags & TRANS_RUNNING) {
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- m2m_dev->m2m_ops->job_abort(m2m_ctx->priv);
- dprintk("m2m_ctx %p running, will wait to complete", m2m_ctx);
- wait_event(m2m_ctx->finished, !(m2m_ctx->job_flags & TRANS_RUNNING));
- } else if (m2m_ctx->job_flags & TRANS_QUEUED) {
- list_del(&m2m_ctx->queue);
- m2m_ctx->job_flags &= ~(TRANS_QUEUED | TRANS_RUNNING);
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- dprintk("m2m_ctx: %p had been on queue and was removed\n",
- m2m_ctx);
- } else {
- /* Do nothing, was not on queue/running */
- spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags);
- }
+ /* wait until the current context is dequeued from job_queue */
+ v4l2_m2m_cancel_job(m2m_ctx);
vb2_queue_release(&m2m_ctx->cap_q_ctx.q);
vb2_queue_release(&m2m_ctx->out_q_ctx.q);
diff --git a/drivers/media/v4l2-core/v4l2-of.c b/drivers/media/v4l2-core/v4l2-of.c
index aa59639d013..a6478dca0cd 100644
--- a/drivers/media/v4l2-core/v4l2-of.c
+++ b/drivers/media/v4l2-core/v4l2-of.c
@@ -100,6 +100,10 @@ static void v4l2_of_parse_parallel_bus(const struct device_node *node,
if (!of_property_read_u32(node, "data-shift", &v))
bus->data_shift = v;
+ if (!of_property_read_u32(node, "sync-on-green-active", &v))
+ flags |= v ? V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH :
+ V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW;
+
bus->flags = flags;
}
@@ -173,12 +177,8 @@ struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
if (node)
parent = node;
- for_each_child_of_node(parent, node) {
- if (!of_node_cmp(node->name, "port")) {
- port = node;
- break;
- }
- }
+ port = of_get_child_by_name(parent, "port");
+
if (port) {
/* Found a port, get an endpoint. */
endpoint = of_get_next_child(port, NULL);
@@ -190,6 +190,7 @@ struct device_node *v4l2_of_get_next_endpoint(const struct device_node *parent,
if (!endpoint)
pr_err("%s(): no endpoint nodes specified for %s\n",
__func__, parent->full_name);
+ of_node_put(node);
} else {
port = of_get_parent(prev);
if (!port)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 9fc4bab2da9..594c75eab5a 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -334,6 +334,41 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
}
/**
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ unsigned int length;
+ unsigned int plane;
+
+ if (!V4L2_TYPE_IS_OUTPUT(b->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ length = (b->memory == V4L2_MEMORY_USERPTR)
+ ? b->m.planes[plane].length
+ : vb->v4l2_planes[plane].length;
+
+ if (b->m.planes[plane].bytesused > length)
+ return -EINVAL;
+ if (b->m.planes[plane].data_offset >=
+ b->m.planes[plane].bytesused)
+ return -EINVAL;
+ }
+ } else {
+ length = (b->memory == V4L2_MEMORY_USERPTR)
+ ? b->length : vb->v4l2_planes[0].length;
+
+ if (b->bytesused > length)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* __buffer_in_use() - return true if the buffer is in use and
* the queue cannot be freed (by the means of REQBUFS(0)) call
*/
@@ -1167,6 +1202,10 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
struct vb2_queue *q = vb->vb2_queue;
int ret;
+ ret = __verify_length(vb, b);
+ if (ret < 0)
+ return ret;
+
switch (q->memory) {
case V4L2_MEMORY_MMAP:
ret = __qbuf_mmap(vb, b);
@@ -1192,108 +1231,31 @@ static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
return ret;
}
-/**
- * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_prepare_buf
- * handler in driver
- *
- * Should be called from vidioc_prepare_buf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) calls buf_prepare callback in the driver (if provided), in which
- * driver-specific buffer initialization can be performed,
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_prepare_buf handler in driver.
- */
-int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
-{
- struct vb2_buffer *vb;
- int ret;
-
- if (q->fileio) {
- dprintk(1, "%s(): file io in progress\n", __func__);
- return -EBUSY;
- }
-
- if (b->type != q->type) {
- dprintk(1, "%s(): invalid buffer type\n", __func__);
- return -EINVAL;
- }
-
- if (b->index >= q->num_buffers) {
- dprintk(1, "%s(): buffer index out of range\n", __func__);
- return -EINVAL;
- }
-
- vb = q->bufs[b->index];
- if (NULL == vb) {
- /* Should never happen */
- dprintk(1, "%s(): buffer is NULL\n", __func__);
- return -EINVAL;
- }
-
- if (b->memory != q->memory) {
- dprintk(1, "%s(): invalid memory type\n", __func__);
- return -EINVAL;
- }
-
- if (vb->state != VB2_BUF_STATE_DEQUEUED) {
- dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state);
- return -EINVAL;
- }
- ret = __verify_planes_array(vb, b);
- if (ret < 0)
- return ret;
- ret = __buf_prepare(vb, b);
- if (ret < 0)
- return ret;
-
- __fill_v4l2_buffer(vb, b);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(vb2_prepare_buf);
-
-/**
- * vb2_qbuf() - Queue a buffer from userspace
- * @q: videobuf2 queue
- * @b: buffer structure passed from userspace to vidioc_qbuf handler
- * in driver
- *
- * Should be called from vidioc_qbuf ioctl handler of a driver.
- * This function:
- * 1) verifies the passed buffer,
- * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
- * which driver-specific buffer initialization can be performed,
- * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
- * callback for processing.
- *
- * The return values from this function are intended to be directly returned
- * from vidioc_qbuf handler in driver.
- */
-int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+ const char *opname,
+ int (*handler)(struct vb2_queue *,
+ struct v4l2_buffer *,
+ struct vb2_buffer *))
{
struct rw_semaphore *mmap_sem = NULL;
struct vb2_buffer *vb;
- int ret = 0;
+ int ret;
/*
- * In case of user pointer buffers vb2 allocator needs to get direct
- * access to userspace pages. This requires getting read access on
- * mmap semaphore in the current process structure. The same
- * semaphore is taken before calling mmap operation, while both mmap
- * and qbuf are called by the driver or v4l2 core with driver's lock
- * held. To avoid a AB-BA deadlock (mmap_sem then driver's lock in
- * mmap and driver's lock then mmap_sem in qbuf) the videobuf2 core
- * release driver's lock, takes mmap_sem and then takes again driver's
- * lock.
+ * In case of user pointer buffers vb2 allocators need to get direct
+ * access to userspace pages. This requires getting the mmap semaphore
+ * for read access in the current process structure. The same semaphore
+ * is taken before calling mmap operation, while both qbuf/prepare_buf
+ * and mmap are called by the driver or v4l2 core with the driver's lock
+ * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap
+ * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2
+ * core releases the driver's lock, takes mmap_sem and then takes the
+ * driver's lock again.
*
- * To avoid race with other vb2 calls, which might be called after
- * releasing driver's lock, this operation is performed at the
- * beggining of qbuf processing. This way the queue status is
- * consistent after getting driver's lock back.
+ * To avoid racing with other vb2 calls, which might be called after
+ * releasing the driver's lock, this operation is performed at the
+ * beginning of qbuf/prepare_buf processing. This way the queue status
+ * is consistent after getting the driver's lock back.
*/
if (q->memory == V4L2_MEMORY_USERPTR) {
mmap_sem = &current->mm->mmap_sem;
@@ -1303,19 +1265,19 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
}
if (q->fileio) {
- dprintk(1, "qbuf: file io in progress\n");
+ dprintk(1, "%s(): file io in progress\n", opname);
ret = -EBUSY;
goto unlock;
}
if (b->type != q->type) {
- dprintk(1, "qbuf: invalid buffer type\n");
+ dprintk(1, "%s(): invalid buffer type\n", opname);
ret = -EINVAL;
goto unlock;
}
if (b->index >= q->num_buffers) {
- dprintk(1, "qbuf: buffer index out of range\n");
+ dprintk(1, "%s(): buffer index out of range\n", opname);
ret = -EINVAL;
goto unlock;
}
@@ -1323,31 +1285,83 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
vb = q->bufs[b->index];
if (NULL == vb) {
/* Should never happen */
- dprintk(1, "qbuf: buffer is NULL\n");
+ dprintk(1, "%s(): buffer is NULL\n", opname);
ret = -EINVAL;
goto unlock;
}
if (b->memory != q->memory) {
- dprintk(1, "qbuf: invalid memory type\n");
+ dprintk(1, "%s(): invalid memory type\n", opname);
ret = -EINVAL;
goto unlock;
}
+
ret = __verify_planes_array(vb, b);
if (ret)
goto unlock;
+ ret = handler(q, b, vb);
+ if (ret)
+ goto unlock;
+
+ /* Fill buffer information for the userspace */
+ __fill_v4l2_buffer(vb, b);
+
+ dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index);
+unlock:
+ if (mmap_sem)
+ up_read(mmap_sem);
+ return ret;
+}
+
+static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
+ struct vb2_buffer *vb)
+{
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(1, "%s(): invalid buffer state %d\n", __func__,
+ vb->state);
+ return -EINVAL;
+ }
+
+ return __buf_prepare(vb, b);
+}
+
+/**
+ * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_prepare_buf
+ * handler in driver
+ *
+ * Should be called from vidioc_prepare_buf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) calls buf_prepare callback in the driver (if provided), in which
+ * driver-specific buffer initialization can be performed,
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_prepare_buf handler in driver.
+ */
+int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf);
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b,
+ struct vb2_buffer *vb)
+{
+ int ret;
+
switch (vb->state) {
case VB2_BUF_STATE_DEQUEUED:
ret = __buf_prepare(vb, b);
if (ret)
- goto unlock;
+ return ret;
case VB2_BUF_STATE_PREPARED:
break;
default:
dprintk(1, "qbuf: buffer already in use\n");
- ret = -EINVAL;
- goto unlock;
+ return -EINVAL;
}
/*
@@ -1364,14 +1378,29 @@ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
if (q->streaming)
__enqueue_in_driver(vb);
- /* Fill buffer information for the userspace */
- __fill_v4l2_buffer(vb, b);
+ return 0;
+}
- dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
-unlock:
- if (mmap_sem)
- up_read(mmap_sem);
- return ret;
+/**
+ * vb2_qbuf() - Queue a buffer from userspace
+ * @q: videobuf2 queue
+ * @b: buffer structure passed from userspace to vidioc_qbuf handler
+ * in driver
+ *
+ * Should be called from vidioc_qbuf ioctl handler of a driver.
+ * This function:
+ * 1) verifies the passed buffer,
+ * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
+ * which driver-specific buffer initialization can be performed,
+ * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
+ * callback for processing.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_qbuf handler in driver.
+ */
+int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf);
}
EXPORT_SYMBOL_GPL(vb2_qbuf);
@@ -2578,8 +2607,15 @@ EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
{
struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err;
- return vb2_mmap(vdev->queue, vma);
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ err = vb2_mmap(vdev->queue, vma);
+ if (lock)
+ mutex_unlock(lock);
+ return err;
}
EXPORT_SYMBOL_GPL(vb2_fop_mmap);
@@ -2685,8 +2721,15 @@ unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags)
{
struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int ret;
- return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+ if (lock)
+ mutex_unlock(lock);
+ return ret;
}
EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
#endif
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 3c157faee64..0d68eb1a5ec 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -3094,6 +3094,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
.pdata_size = sizeof(db8500_cpufreq_table),
},
{
+ .name = "cpuidle-dbx500",
+ .of_compatible = "stericsson,cpuidle-dbx500",
+ },
+ {
.name = "db8500-thermal",
.num_resources = ARRAY_SIZE(db8500_thsens_resources),
.resources = db8500_thsens_resources,
diff --git a/drivers/mfd/rts5227.c b/drivers/mfd/rts5227.c
index fc831dcb148..164b7faa70c 100644
--- a/drivers/mfd/rts5227.c
+++ b/drivers/mfd/rts5227.c
@@ -44,7 +44,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OLT_LED_CTL, 0x0F, 0x02);
/* Configure LTR */
pcie_capability_read_word(pcr->pci, PCI_EXP_DEVCTL2, &cap);
- if (cap & PCI_EXP_LTR_EN)
+ if (cap & PCI_EXP_DEVCTL2_LTR_EN)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, LTR_CTL, 0xFF, 0xA3);
/* Configure OBFF */
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, OBFF_CFG, 0x03, 0x03);
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index f7b90661e32..5be808406ed 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -19,7 +19,6 @@
#include <linux/module.h>
#include <linux/of.h>
-#include <linux/pinctrl/consumer.h>
/* Serialize access to ssc_list and user count */
static DEFINE_SPINLOCK(user_lock);
@@ -66,14 +65,19 @@ EXPORT_SYMBOL(ssc_request);
void ssc_free(struct ssc_device *ssc)
{
+ bool disable_clk = true;
+
spin_lock(&user_lock);
- if (ssc->user) {
+ if (ssc->user)
ssc->user--;
- clk_disable_unprepare(ssc->clk);
- } else {
+ else {
+ disable_clk = false;
dev_dbg(&ssc->pdev->dev, "device already free\n");
}
spin_unlock(&user_lock);
+
+ if (disable_clk)
+ clk_disable_unprepare(ssc->clk);
}
EXPORT_SYMBOL(ssc_free);
@@ -132,13 +136,6 @@ static int ssc_probe(struct platform_device *pdev)
struct resource *regs;
struct ssc_device *ssc;
const struct atmel_ssc_platform_data *plat_dat;
- struct pinctrl *pinctrl;
-
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- dev_err(&pdev->dev, "Failed to request pinctrl\n");
- return PTR_ERR(pinctrl);
- }
ssc = devm_kzalloc(&pdev->dev, sizeof(struct ssc_device), GFP_KERNEL);
if (!ssc) {
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index f32550a74bd..464419b3644 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -311,6 +311,7 @@ static ssize_t c2port_show_name(struct device *dev,
return sprintf(buf, "%s\n", c2dev->name);
}
+static DEVICE_ATTR(name, 0444, c2port_show_name, NULL);
static ssize_t c2port_show_flash_blocks_num(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -320,6 +321,7 @@ static ssize_t c2port_show_flash_blocks_num(struct device *dev,
return sprintf(buf, "%d\n", ops->blocks_num);
}
+static DEVICE_ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL);
static ssize_t c2port_show_flash_block_size(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -329,6 +331,7 @@ static ssize_t c2port_show_flash_block_size(struct device *dev,
return sprintf(buf, "%d\n", ops->block_size);
}
+static DEVICE_ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL);
static ssize_t c2port_show_flash_size(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -338,18 +341,18 @@ static ssize_t c2port_show_flash_size(struct device *dev,
return sprintf(buf, "%d\n", ops->blocks_num * ops->block_size);
}
+static DEVICE_ATTR(flash_size, 0444, c2port_show_flash_size, NULL);
-static ssize_t c2port_show_access(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t access_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", c2dev->access);
}
-static ssize_t c2port_store_access(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t access_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct c2port_device *c2dev = dev_get_drvdata(dev);
struct c2port_ops *ops = c2dev->ops;
@@ -375,6 +378,7 @@ static ssize_t c2port_store_access(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(access);
static ssize_t c2port_store_reset(struct device *dev,
struct device_attribute *attr,
@@ -395,6 +399,7 @@ static ssize_t c2port_store_reset(struct device *dev,
return count;
}
+static DEVICE_ATTR(reset, 0200, NULL, c2port_store_reset);
static ssize_t __c2port_show_dev_id(struct c2port_device *dev, char *buf)
{
@@ -431,6 +436,7 @@ static ssize_t c2port_show_dev_id(struct device *dev,
return ret;
}
+static DEVICE_ATTR(dev_id, 0444, c2port_show_dev_id, NULL);
static ssize_t __c2port_show_rev_id(struct c2port_device *dev, char *buf)
{
@@ -467,6 +473,7 @@ static ssize_t c2port_show_rev_id(struct device *dev,
return ret;
}
+static DEVICE_ATTR(rev_id, 0444, c2port_show_rev_id, NULL);
static ssize_t c2port_show_flash_access(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -536,6 +543,8 @@ static ssize_t c2port_store_flash_access(struct device *dev,
return count;
}
+static DEVICE_ATTR(flash_access, 0644, c2port_show_flash_access,
+ c2port_store_flash_access);
static ssize_t __c2port_write_flash_erase(struct c2port_device *dev)
{
@@ -616,6 +625,7 @@ static ssize_t c2port_store_flash_erase(struct device *dev,
return count;
}
+static DEVICE_ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase);
static ssize_t __c2port_read_flash_data(struct c2port_device *dev,
char *buffer, loff_t offset, size_t count)
@@ -846,35 +856,40 @@ static ssize_t c2port_write_flash_data(struct file *filp, struct kobject *kobj,
return ret;
}
+/* size is computed at run-time */
+static BIN_ATTR(flash_data, 0644, c2port_read_flash_data,
+ c2port_write_flash_data, 0);
/*
* Class attributes
*/
+static struct attribute *c2port_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_flash_blocks_num.attr,
+ &dev_attr_flash_block_size.attr,
+ &dev_attr_flash_size.attr,
+ &dev_attr_access.attr,
+ &dev_attr_reset.attr,
+ &dev_attr_dev_id.attr,
+ &dev_attr_rev_id.attr,
+ &dev_attr_flash_access.attr,
+ &dev_attr_flash_erase.attr,
+ NULL,
+};
-static struct device_attribute c2port_attrs[] = {
- __ATTR(name, 0444, c2port_show_name, NULL),
- __ATTR(flash_blocks_num, 0444, c2port_show_flash_blocks_num, NULL),
- __ATTR(flash_block_size, 0444, c2port_show_flash_block_size, NULL),
- __ATTR(flash_size, 0444, c2port_show_flash_size, NULL),
- __ATTR(access, 0644, c2port_show_access, c2port_store_access),
- __ATTR(reset, 0200, NULL, c2port_store_reset),
- __ATTR(dev_id, 0444, c2port_show_dev_id, NULL),
- __ATTR(rev_id, 0444, c2port_show_rev_id, NULL),
-
- __ATTR(flash_access, 0644, c2port_show_flash_access,
- c2port_store_flash_access),
- __ATTR(flash_erase, 0200, NULL, c2port_store_flash_erase),
- __ATTR_NULL,
+static struct bin_attribute *c2port_bin_attrs[] = {
+ &bin_attr_flash_data,
+ NULL,
};
-static struct bin_attribute c2port_bin_attrs = {
- .attr = {
- .name = "flash_data",
- .mode = 0644
- },
- .read = c2port_read_flash_data,
- .write = c2port_write_flash_data,
- /* .size is computed at run-time */
+static const struct attribute_group c2port_group = {
+ .attrs = c2port_attrs,
+ .bin_attrs = c2port_bin_attrs,
+};
+
+static const struct attribute_group *c2port_groups[] = {
+ &c2port_group,
+ NULL,
};
/*
@@ -907,6 +922,8 @@ struct c2port_device *c2port_device_register(char *name,
goto error_idr_alloc;
c2dev->id = ret;
+ bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
+
c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
"c2port%d", c2dev->id);
if (unlikely(IS_ERR(c2dev->dev))) {
@@ -919,12 +936,6 @@ struct c2port_device *c2port_device_register(char *name,
c2dev->ops = ops;
mutex_init(&c2dev->mutex);
- /* Create binary file */
- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
- ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
- if (unlikely(ret))
- goto error_device_create_bin_file;
-
/* By default C2 port access is off */
c2dev->access = c2dev->flash_access = 0;
ops->access(c2dev, 0);
@@ -937,9 +948,6 @@ struct c2port_device *c2port_device_register(char *name,
return c2dev;
-error_device_create_bin_file:
- device_destroy(c2port_class, 0);
-
error_device_create:
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
@@ -959,7 +967,6 @@ void c2port_device_unregister(struct c2port_device *c2dev)
dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
- device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
spin_unlock_irq(&c2port_idr_lock);
@@ -984,7 +991,7 @@ static int __init c2port_init(void)
printk(KERN_ERR "c2port: failed to allocate class\n");
return PTR_ERR(c2port_class);
}
- c2port_class->dev_attrs = c2port_attrs;
+ c2port_class->dev_groups = c2port_groups;
return 0;
}
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 00e5fcac8fd..0e8df41aaf1 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -239,7 +239,7 @@ static void enclosure_component_release(struct device *dev)
put_device(dev->parent);
}
-static const struct attribute_group *enclosure_groups[];
+static const struct attribute_group *enclosure_component_groups[];
/**
* enclosure_component_register - add a particular component to an enclosure
@@ -282,7 +282,7 @@ enclosure_component_register(struct enclosure_device *edev,
dev_set_name(cdev, "%u", number);
cdev->release = enclosure_component_release;
- cdev->groups = enclosure_groups;
+ cdev->groups = enclosure_component_groups;
err = device_register(cdev);
if (err) {
@@ -365,25 +365,26 @@ EXPORT_SYMBOL_GPL(enclosure_remove_device);
* sysfs pieces below
*/
-static ssize_t enclosure_show_components(struct device *cdev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t components_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
{
struct enclosure_device *edev = to_enclosure_device(cdev);
return snprintf(buf, 40, "%d\n", edev->components);
}
+static DEVICE_ATTR_RO(components);
-static struct device_attribute enclosure_attrs[] = {
- __ATTR(components, S_IRUGO, enclosure_show_components, NULL),
- __ATTR_NULL
+static struct attribute *enclosure_class_attrs[] = {
+ &dev_attr_components.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(enclosure_class);
static struct class enclosure_class = {
.name = "enclosure",
.owner = THIS_MODULE,
.dev_release = enclosure_release,
- .dev_attrs = enclosure_attrs,
+ .dev_groups = enclosure_class_groups,
};
static const char *const enclosure_status [] = {
@@ -536,15 +537,7 @@ static struct attribute *enclosure_component_attrs[] = {
&dev_attr_type.attr,
NULL
};
-
-static struct attribute_group enclosure_group = {
- .attrs = enclosure_component_attrs,
-};
-
-static const struct attribute_group *enclosure_groups[] = {
- &enclosure_group,
- NULL
-};
+ATTRIBUTE_GROUPS(enclosure_component);
static int __init enclosure_init(void)
{
diff --git a/drivers/misc/hpilo.c b/drivers/misc/hpilo.c
index 621c7a37339..b83e3ca12a4 100644
--- a/drivers/misc/hpilo.c
+++ b/drivers/misc/hpilo.c
@@ -759,7 +759,7 @@ static int ilo_probe(struct pci_dev *pdev,
/* Ignore subsystem_device = 0x1979 (set by BIOS) */
if (pdev->subsystem_device == 0x1979)
- goto out;
+ return 0;
if (max_ccb > MAX_CCB)
max_ccb = MAX_CCB;
@@ -899,7 +899,7 @@ static void __exit ilo_exit(void)
class_destroy(ilo_class);
}
-MODULE_VERSION("1.4");
+MODULE_VERSION("1.4.1");
MODULE_ALIAS(ILO_NAME);
MODULE_DESCRIPTION(ILO_NAME);
MODULE_AUTHOR("David Altobelli <david.altobelli@hp.com>");
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index ce5b75616b4..e8b933111e0 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -149,8 +149,7 @@ static struct inode *ibmasmfs_make_inode(struct super_block *sb, int mode)
return ret;
}
-static struct dentry *ibmasmfs_create_file (struct super_block *sb,
- struct dentry *parent,
+static struct dentry *ibmasmfs_create_file(struct dentry *parent,
const char *name,
const struct file_operations *fops,
void *data,
@@ -163,7 +162,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
if (!dentry)
return NULL;
- inode = ibmasmfs_make_inode(sb, S_IFREG | mode);
+ inode = ibmasmfs_make_inode(parent->d_sb, S_IFREG | mode);
if (!inode) {
dput(dentry);
return NULL;
@@ -176,8 +175,7 @@ static struct dentry *ibmasmfs_create_file (struct super_block *sb,
return dentry;
}
-static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
- struct dentry *parent,
+static struct dentry *ibmasmfs_create_dir(struct dentry *parent,
const char *name)
{
struct dentry *dentry;
@@ -187,7 +185,7 @@ static struct dentry *ibmasmfs_create_dir (struct super_block *sb,
if (!dentry)
return NULL;
- inode = ibmasmfs_make_inode(sb, S_IFDIR | 0500);
+ inode = ibmasmfs_make_inode(parent->d_sb, S_IFDIR | 0500);
if (!inode) {
dput(dentry);
return NULL;
@@ -612,20 +610,20 @@ static void ibmasmfs_create_files (struct super_block *sb)
struct dentry *dir;
struct dentry *remote_dir;
sp = list_entry(entry, struct service_processor, node);
- dir = ibmasmfs_create_dir(sb, sb->s_root, sp->dirname);
+ dir = ibmasmfs_create_dir(sb->s_root, sp->dirname);
if (!dir)
continue;
- ibmasmfs_create_file(sb, dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "command", &command_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "event", &event_fops, sp, S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(dir, "reverse_heartbeat", &r_heartbeat_fops, sp, S_IRUSR|S_IWUSR);
- remote_dir = ibmasmfs_create_dir(sb, dir, "remote_video");
+ remote_dir = ibmasmfs_create_dir(dir, "remote_video");
if (!remote_dir)
continue;
- ibmasmfs_create_file(sb, remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
- ibmasmfs_create_file(sb, remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "width", &remote_settings_fops, (void *)display_width(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "height", &remote_settings_fops, (void *)display_height(sp), S_IRUSR|S_IWUSR);
+ ibmasmfs_create_file(remote_dir, "depth", &remote_settings_fops, (void *)display_depth(sp), S_IRUSR|S_IWUSR);
}
}
diff --git a/drivers/misc/ics932s401.c b/drivers/misc/ics932s401.c
index 00295367c06..28f51e01fd2 100644
--- a/drivers/misc/ics932s401.c
+++ b/drivers/misc/ics932s401.c
@@ -2,7 +2,7 @@
* A driver for the Integrated Circuits ICS932S401
* Copyright (C) 2008 IBM
*
- * Author: Darrick J. Wong <djwong@us.ibm.com>
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -482,7 +482,7 @@ static int ics932s401_remove(struct i2c_client *client)
module_i2c_driver(ics932s401_driver);
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
+MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>");
MODULE_DESCRIPTION("ICS932S401 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 08aad69c8da..2fc0586ce3b 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
+#include <linux/vmalloc.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
@@ -50,6 +51,7 @@
#define DEFAULT_COUNT 10
#define REC_NUM_DEFAULT 10
+#define EXEC_SIZE 64
enum cname {
CN_INVALID,
@@ -68,6 +70,7 @@ enum ctype {
CT_NONE,
CT_PANIC,
CT_BUG,
+ CT_WARNING,
CT_EXCEPTION,
CT_LOOP,
CT_OVERFLOW,
@@ -77,7 +80,12 @@ enum ctype {
CT_WRITE_AFTER_FREE,
CT_SOFTLOCKUP,
CT_HARDLOCKUP,
+ CT_SPINLOCKUP,
CT_HUNG_TASK,
+ CT_EXEC_DATA,
+ CT_EXEC_STACK,
+ CT_EXEC_KMALLOC,
+ CT_EXEC_VMALLOC,
};
static char* cp_name[] = {
@@ -95,6 +103,7 @@ static char* cp_name[] = {
static char* cp_type[] = {
"PANIC",
"BUG",
+ "WARNING",
"EXCEPTION",
"LOOP",
"OVERFLOW",
@@ -104,7 +113,12 @@ static char* cp_type[] = {
"WRITE_AFTER_FREE",
"SOFTLOCKUP",
"HARDLOCKUP",
+ "SPINLOCKUP",
"HUNG_TASK",
+ "EXEC_DATA",
+ "EXEC_STACK",
+ "EXEC_KMALLOC",
+ "EXEC_VMALLOC",
};
static struct jprobe lkdtm;
@@ -121,6 +135,9 @@ static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(count_lock);
+static DEFINE_SPINLOCK(lock_me_up);
+
+static u8 data_area[EXEC_SIZE];
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test, "\
@@ -275,6 +292,19 @@ static int recursive_loop(int a)
return recursive_loop(a);
}
+static void do_nothing(void)
+{
+ return;
+}
+
+static void execute_location(void *dst)
+{
+ void (*func)(void) = dst;
+
+ memcpy(dst, do_nothing, EXEC_SIZE);
+ func();
+}
+
static void lkdtm_do_action(enum ctype which)
{
switch (which) {
@@ -284,6 +314,9 @@ static void lkdtm_do_action(enum ctype which)
case CT_BUG:
BUG();
break;
+ case CT_WARNING:
+ WARN_ON(1);
+ break;
case CT_EXCEPTION:
*((int *) 0) = 0;
break;
@@ -295,10 +328,10 @@ static void lkdtm_do_action(enum ctype which)
(void) recursive_loop(0);
break;
case CT_CORRUPT_STACK: {
- volatile u32 data[8];
- volatile u32 *p = data;
+ /* Make sure the compiler creates and uses an 8 char array. */
+ volatile char data[8];
- p[12] = 0x12345678;
+ memset((void *)data, 0, 64);
break;
}
case CT_UNALIGNED_LOAD_STORE_WRITE: {
@@ -340,10 +373,34 @@ static void lkdtm_do_action(enum ctype which)
for (;;)
cpu_relax();
break;
+ case CT_SPINLOCKUP:
+ /* Must be called twice to trigger. */
+ spin_lock(&lock_me_up);
+ break;
case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
break;
+ case CT_EXEC_DATA:
+ execute_location(data_area);
+ break;
+ case CT_EXEC_STACK: {
+ u8 stack_area[EXEC_SIZE];
+ execute_location(stack_area);
+ break;
+ }
+ case CT_EXEC_KMALLOC: {
+ u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
+ execute_location(kmalloc_area);
+ kfree(kmalloc_area);
+ break;
+ }
+ case CT_EXEC_VMALLOC: {
+ u32 *vmalloc_area = vmalloc(EXEC_SIZE);
+ execute_location(vmalloc_area);
+ vfree(vmalloc_area);
+ break;
+ }
case CT_NONE:
default:
break;
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 749452f8e2f..d0fdc134068 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -418,15 +418,23 @@ unsigned int mei_amthif_poll(struct mei_device *dev,
struct file *file, poll_table *wait)
{
unsigned int mask = 0;
- mutex_unlock(&dev->device_lock);
+
poll_wait(file, &dev->iamthif_cl.wait, wait);
+
mutex_lock(&dev->device_lock);
- if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
- dev->iamthif_file_object == file) {
+ if (!mei_cl_is_connected(&dev->iamthif_cl)) {
+
+ mask = POLLERR;
+
+ } else if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
+ dev->iamthif_file_object == file) {
+
mask |= (POLLIN | POLLRDNORM);
dev_dbg(&dev->pdev->dev, "run next amthif cb\n");
mei_amthif_run_next_cmd(dev);
}
+ mutex_unlock(&dev->device_lock);
+
return mask;
}
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 9ecd49a7be1..6d0282c08a0 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -47,7 +47,7 @@ static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
id = driver->id_table;
while (id->name[0]) {
- if (!strcmp(dev_name(dev), id->name))
+ if (!strncmp(dev_name(dev), id->name, sizeof(id->name)))
return 1;
id++;
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
dev_dbg(dev, "Device probe\n");
- strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE);
+ strncpy(id.name, dev_name(dev), sizeof(id.name));
return driver->probe(device, &id);
}
@@ -108,11 +108,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
return (len >= PAGE_SIZE) ? (PAGE_SIZE - 1) : len;
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute mei_cl_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL,
+static struct attribute *mei_cl_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(mei_cl_dev);
static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -124,7 +126,7 @@ static int mei_cl_uevent(struct device *dev, struct kobj_uevent_env *env)
static struct bus_type mei_cl_bus_type = {
.name = "mei",
- .dev_attrs = mei_cl_dev_attrs,
+ .dev_groups = mei_cl_dev_groups,
.match = mei_cl_device_match,
.probe = mei_cl_device_probe,
.remove = mei_cl_device_remove,
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 21d3f5aa835..e0684b4d9a0 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -635,10 +635,7 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
dev = cl->dev;
- if (cl->state != MEI_FILE_CONNECTED)
- return -ENODEV;
-
- if (dev->dev_state != MEI_DEV_ENABLED)
+ if (!mei_cl_is_connected(cl))
return -ENODEV;
if (cl->read_cb) {
@@ -892,18 +889,22 @@ void mei_cl_all_disconnect(struct mei_device *dev)
/**
- * mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
+ * mei_cl_all_wakeup - wake up all readers and writers they can be interrupted
*
* @dev - mei device
*/
-void mei_cl_all_read_wakeup(struct mei_device *dev)
+void mei_cl_all_wakeup(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
- dev_dbg(&dev->pdev->dev, "Waking up client!\n");
+ dev_dbg(&dev->pdev->dev, "Waking up reading client!\n");
wake_up_interruptible(&cl->rx_wait);
}
+ if (waitqueue_active(&cl->tx_wait)) {
+ dev_dbg(&dev->pdev->dev, "Waking up writing client!\n");
+ wake_up_interruptible(&cl->tx_wait);
+ }
}
}
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 26b157d8bad..9eb031e9207 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -84,6 +84,13 @@ int mei_cl_flow_ctrl_reduce(struct mei_cl *cl);
/*
* MEI input output function prototype
*/
+static inline bool mei_cl_is_connected(struct mei_cl *cl)
+{
+ return (cl->dev &&
+ cl->dev->dev_state == MEI_DEV_ENABLED &&
+ cl->state == MEI_FILE_CONNECTED);
+}
+
bool mei_cl_is_other_connecting(struct mei_cl *cl);
int mei_cl_disconnect(struct mei_cl *cl);
int mei_cl_connect(struct mei_cl *cl, struct file *file);
@@ -99,7 +106,7 @@ void mei_host_client_init(struct work_struct *work);
void mei_cl_all_disconnect(struct mei_device *dev);
-void mei_cl_all_read_wakeup(struct mei_device *dev);
+void mei_cl_all_wakeup(struct mei_device *dev);
void mei_cl_all_write_clear(struct mei_device *dev);
#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index f9296abcf02..6127ab64bb3 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -167,7 +167,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev->hbm_state = MEI_HBM_IDLE;
if (mei_write_message(dev, mei_hdr, dev->wr_msg.data)) {
- dev_err(&dev->pdev->dev, "version message writet failed\n");
+ dev_err(&dev->pdev->dev, "version message write failed\n");
dev->dev_state = MEI_DEV_RESETTING;
mei_reset(dev, 1);
return -ENODEV;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index e4f8dec4dc3..3412adcdaeb 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -176,21 +176,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
- dev_dbg(&dev->pdev->dev, "before reset HCSR = 0x%08x.\n", hcsr);
-
- hcsr |= (H_RST | H_IG);
+ hcsr |= H_RST | H_IG | H_IS;
if (intr_enable)
hcsr |= H_IE;
else
- hcsr |= ~H_IE;
+ hcsr &= ~H_IE;
- mei_hcsr_set(hw, hcsr);
+ mei_me_reg_write(hw, H_CSR, hcsr);
if (dev->dev_state == MEI_DEV_POWER_DOWN)
mei_me_hw_reset_release(dev);
- dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
return 0;
}
@@ -239,14 +236,18 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
if (mei_me_hw_is_ready(dev))
return 0;
+ dev->recvd_hw_ready = false;
mutex_unlock(&dev->device_lock);
err = wait_event_interruptible_timeout(dev->wait_hw_ready,
- dev->recvd_hw_ready, MEI_INTEROP_TIMEOUT);
+ dev->recvd_hw_ready,
+ mei_secs_to_jiffies(MEI_INTEROP_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!err && !dev->recvd_hw_ready) {
+ if (!err)
+ err = -ETIMEDOUT;
dev_err(&dev->pdev->dev,
- "wait hw ready failed. status = 0x%x\n", err);
- return -ETIMEDOUT;
+ "wait hw ready failed. status = %d\n", err);
+ return err;
}
dev->recvd_hw_ready = false;
@@ -483,7 +484,9 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) &&
dev->dev_state != MEI_DEV_RESETTING &&
- dev->dev_state != MEI_DEV_INITIALIZING) {
+ dev->dev_state != MEI_DEV_INITIALIZING &&
+ dev->dev_state != MEI_DEV_POWER_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_UP) {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
mei_reset(dev, 1);
mutex_unlock(&dev->device_lock);
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index ed1d75203af..92c73118b13 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -148,13 +148,20 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
dev->hbm_state = MEI_HBM_IDLE;
- if (dev->dev_state != MEI_DEV_INITIALIZING) {
+ if (dev->dev_state != MEI_DEV_INITIALIZING &&
+ dev->dev_state != MEI_DEV_POWER_UP) {
if (dev->dev_state != MEI_DEV_DISABLED &&
dev->dev_state != MEI_DEV_POWER_DOWN)
dev->dev_state = MEI_DEV_RESETTING;
+ /* remove all waiting requests */
+ mei_cl_all_write_clear(dev);
+
mei_cl_all_disconnect(dev);
+ /* wake up all readings so they can be interrupted */
+ mei_cl_all_wakeup(dev);
+
/* remove entry if already in list */
dev_dbg(&dev->pdev->dev, "remove iamthif and wd from the file list.\n");
mei_cl_unlink(&dev->wd_cl);
@@ -195,11 +202,6 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
mei_hbm_start_req(dev);
- /* wake up all readings so they can be interrupted */
- mei_cl_all_read_wakeup(dev);
-
- /* remove all waiting requests */
- mei_cl_all_write_clear(dev);
}
EXPORT_SYMBOL_GPL(mei_reset);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 5e11b5b9b65..173ff095be0 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -625,24 +625,32 @@ static unsigned int mei_poll(struct file *file, poll_table *wait)
unsigned int mask = 0;
if (WARN_ON(!cl || !cl->dev))
- return mask;
+ return POLLERR;
dev = cl->dev;
mutex_lock(&dev->device_lock);
- if (dev->dev_state != MEI_DEV_ENABLED)
- goto out;
-
-
- if (cl == &dev->iamthif_cl) {
- mask = mei_amthif_poll(dev, file, wait);
+ if (!mei_cl_is_connected(cl)) {
+ mask = POLLERR;
goto out;
}
mutex_unlock(&dev->device_lock);
+
+
+ if (cl == &dev->iamthif_cl)
+ return mei_amthif_poll(dev, file, wait);
+
poll_wait(file, &cl->tx_wait, wait);
+
mutex_lock(&dev->device_lock);
+
+ if (!mei_cl_is_connected(cl)) {
+ mask = POLLERR;
+ goto out;
+ }
+
if (MEI_WRITE_COMPLETE == cl->writing_state)
mask |= (POLLIN | POLLRDNORM);
diff --git a/drivers/misc/sram.c b/drivers/misc/sram.c
index d87cc91bc01..afe66571ce0 100644
--- a/drivers/misc/sram.c
+++ b/drivers/misc/sram.c
@@ -68,7 +68,8 @@ static int sram_probe(struct platform_device *pdev)
ret = gen_pool_add_virt(sram->pool, (unsigned long)virt_base,
res->start, size, -1);
if (ret < 0) {
- gen_pool_destroy(sram->pool);
+ if (sram->clk)
+ clk_disable_unprepare(sram->clk);
return ret;
}
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index 0a142801635..8d64b681dd9 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -562,7 +562,9 @@ long st_register(struct st_proto_s *new_proto)
if ((st_gdata->protos_registered != ST_EMPTY) &&
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
+ spin_lock_irqsave(&st_gdata->lock, flags);
st_reg_complete(st_gdata, err);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cb56e270da1..2421835d5da 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -133,7 +133,7 @@ MODULE_LICENSE("GPL");
#define VMWARE_BALLOON_CMD(cmd, data, result) \
({ \
unsigned long __stat, __dummy1, __dummy2; \
- __asm__ __volatile__ ("inl (%%dx)" : \
+ __asm__ __volatile__ ("inl %%dx" : \
"=a"(__stat), \
"=c"(__dummy1), \
"=d"(__dummy2), \
diff --git a/drivers/misc/vmw_vmci/vmci_driver.c b/drivers/misc/vmw_vmci/vmci_driver.c
index 7b3fce2da6c..3dee7ae123e 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.c
+++ b/drivers/misc/vmw_vmci/vmci_driver.c
@@ -113,5 +113,5 @@ module_exit(vmci_drv_exit);
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Virtual Machine Communication Interface.");
-MODULE_VERSION("1.0.0.0-k");
+MODULE_VERSION("1.1.0.0-k");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h
index f69156a1f30..cee9e977d31 100644
--- a/drivers/misc/vmw_vmci/vmci_driver.h
+++ b/drivers/misc/vmw_vmci/vmci_driver.h
@@ -35,6 +35,13 @@ struct vmci_obj {
enum vmci_obj_type type;
};
+/*
+ * Needed by other components of this module. It's okay to have one global
+ * instance of this because there can only ever be one VMCI device. Our
+ * virtual hardware enforces this.
+ */
+extern struct pci_dev *vmci_pdev;
+
u32 vmci_get_context_id(void);
int vmci_send_datagram(struct vmci_datagram *dg);
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 60c01999f48..b3a2b763ecf 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -65,9 +65,11 @@ struct vmci_guest_device {
void *data_buffer;
void *notification_bitmap;
+ dma_addr_t notification_base;
};
/* vmci_dev singleton device and supporting data*/
+struct pci_dev *vmci_pdev;
static struct vmci_guest_device *vmci_dev_g;
static DEFINE_SPINLOCK(vmci_dev_spinlock);
@@ -528,7 +530,9 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* well.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
- vmci_dev->notification_bitmap = vmalloc(PAGE_SIZE);
+ vmci_dev->notification_bitmap = dma_alloc_coherent(
+ &pdev->dev, PAGE_SIZE, &vmci_dev->notification_base,
+ GFP_KERNEL);
if (!vmci_dev->notification_bitmap) {
dev_warn(&pdev->dev,
"Unable to allocate notification bitmap\n");
@@ -546,6 +550,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = vmci_dev;
+ vmci_pdev = pdev;
spin_unlock_irq(&vmci_dev_spinlock);
/*
@@ -553,9 +558,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
* used.
*/
if (capabilities & VMCI_CAPS_NOTIFICATIONS) {
- struct page *page =
- vmalloc_to_page(vmci_dev->notification_bitmap);
- unsigned long bitmap_ppn = page_to_pfn(page);
+ unsigned long bitmap_ppn =
+ vmci_dev->notification_base >> PAGE_SHIFT;
if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) {
dev_warn(&pdev->dev,
"VMCI device unable to register notification bitmap with PPN 0x%x\n",
@@ -665,11 +669,14 @@ err_remove_bitmap:
if (vmci_dev->notification_bitmap) {
iowrite32(VMCI_CONTROL_RESET,
vmci_dev->iobase + VMCI_CONTROL_ADDR);
- vfree(vmci_dev->notification_bitmap);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vmci_dev->notification_bitmap,
+ vmci_dev->notification_base);
}
err_remove_vmci_dev_g:
spin_lock_irq(&vmci_dev_spinlock);
+ vmci_pdev = NULL;
vmci_dev_g = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
@@ -699,6 +706,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
spin_lock_irq(&vmci_dev_spinlock);
vmci_dev_g = NULL;
+ vmci_pdev = NULL;
spin_unlock_irq(&vmci_dev_spinlock);
dev_dbg(&pdev->dev, "Resetting vmci device\n");
@@ -727,7 +735,9 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
* device, so we can safely free it here.
*/
- vfree(vmci_dev->notification_bitmap);
+ dma_free_coherent(&pdev->dev, PAGE_SIZE,
+ vmci_dev->notification_bitmap,
+ vmci_dev->notification_base);
}
vfree(vmci_dev->data_buffer);
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index 8ff2e5ee8fb..a0515a6d6eb 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -21,6 +21,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
+#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uio.h>
@@ -146,14 +147,20 @@ typedef int vmci_memcpy_from_queue_func(void *dest, size_t dest_offset,
/* The Kernel specific component of the struct vmci_queue structure. */
struct vmci_queue_kern_if {
- struct page **page;
- struct page **header_page;
- void *va;
struct mutex __mutex; /* Protects the queue. */
struct mutex *mutex; /* Shared by producer and consumer queues. */
- bool host;
- size_t num_pages;
- bool mapped;
+ size_t num_pages; /* Number of pages incl. header. */
+ bool host; /* Host or guest? */
+ union {
+ struct {
+ dma_addr_t *pas;
+ void **vas;
+ } g; /* Used by the guest. */
+ struct {
+ struct page **page;
+ struct page **header_page;
+ } h; /* Used by the host. */
+ } u;
};
/*
@@ -265,76 +272,65 @@ static void qp_free_queue(void *q, u64 size)
struct vmci_queue *queue = q;
if (queue) {
- u64 i = DIV_ROUND_UP(size, PAGE_SIZE);
+ u64 i;
- if (queue->kernel_if->mapped) {
- vunmap(queue->kernel_if->va);
- queue->kernel_if->va = NULL;
+ /* Given size does not include header, so add in a page here. */
+ for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
+ dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
+ queue->kernel_if->u.g.vas[i],
+ queue->kernel_if->u.g.pas[i]);
}
- while (i)
- __free_page(queue->kernel_if->page[--i]);
-
- vfree(queue->q_header);
+ vfree(queue);
}
}
/*
- * Allocates kernel VA space of specified size, plus space for the
- * queue structure/kernel interface and the queue header. Allocates
- * physical pages for the queue data pages.
- *
- * PAGE m: struct vmci_queue_header (struct vmci_queue->q_header)
- * PAGE m+1: struct vmci_queue
- * PAGE m+1+q: struct vmci_queue_kern_if (struct vmci_queue->kernel_if)
- * PAGE n-size: Data pages (struct vmci_queue->kernel_if->page[])
+ * Allocates kernel queue pages of specified size with IOMMU mappings,
+ * plus space for the queue structure/kernel interface and the queue
+ * header.
*/
static void *qp_alloc_queue(u64 size, u32 flags)
{
u64 i;
struct vmci_queue *queue;
- struct vmci_queue_header *q_header;
- const u64 num_data_pages = DIV_ROUND_UP(size, PAGE_SIZE);
- const uint queue_size =
- PAGE_SIZE +
- sizeof(*queue) + sizeof(*(queue->kernel_if)) +
- num_data_pages * sizeof(*(queue->kernel_if->page));
-
- q_header = vmalloc(queue_size);
- if (!q_header)
+ const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
+ const size_t pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
+ const size_t vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
+ const size_t queue_size =
+ sizeof(*queue) + sizeof(*queue->kernel_if) +
+ pas_size + vas_size;
+
+ queue = vmalloc(queue_size);
+ if (!queue)
return NULL;
- queue = (void *)q_header + PAGE_SIZE;
- queue->q_header = q_header;
+ queue->q_header = NULL;
queue->saved_header = NULL;
queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
- queue->kernel_if->header_page = NULL; /* Unused in guest. */
- queue->kernel_if->page = (struct page **)(queue->kernel_if + 1);
+ queue->kernel_if->mutex = NULL;
+ queue->kernel_if->num_pages = num_pages;
+ queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
+ queue->kernel_if->u.g.vas =
+ (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
queue->kernel_if->host = false;
- queue->kernel_if->va = NULL;
- queue->kernel_if->mapped = false;
-
- for (i = 0; i < num_data_pages; i++) {
- queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0);
- if (!queue->kernel_if->page[i])
- goto fail;
- }
- if (vmci_qp_pinned(flags)) {
- queue->kernel_if->va =
- vmap(queue->kernel_if->page, num_data_pages, VM_MAP,
- PAGE_KERNEL);
- if (!queue->kernel_if->va)
- goto fail;
-
- queue->kernel_if->mapped = true;
+ for (i = 0; i < num_pages; i++) {
+ queue->kernel_if->u.g.vas[i] =
+ dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
+ &queue->kernel_if->u.g.pas[i],
+ GFP_KERNEL);
+ if (!queue->kernel_if->u.g.vas[i]) {
+ /* Size excl. the header. */
+ qp_free_queue(queue, i * PAGE_SIZE);
+ return NULL;
+ }
}
- return (void *)queue;
+ /* Queue header is the first page. */
+ queue->q_header = queue->kernel_if->u.g.vas[0];
- fail:
- qp_free_queue(queue, i * PAGE_SIZE);
- return NULL;
+ return queue;
}
/*
@@ -353,17 +349,18 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
size_t bytes_copied = 0;
while (bytes_copied < size) {
- u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
- size_t page_offset =
+ const u64 page_index =
+ (queue_offset + bytes_copied) / PAGE_SIZE;
+ const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
- if (!kernel_if->mapped)
- va = kmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ va = kmap(kernel_if->u.h.page[page_index]);
else
- va = (void *)((u8 *)kernel_if->va +
- (page_index * PAGE_SIZE));
+ va = kernel_if->u.g.vas[page_index + 1];
+ /* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up from this page. */
@@ -379,7 +376,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
err = memcpy_fromiovec((u8 *)va + page_offset,
iov, to_copy);
if (err != 0) {
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
return VMCI_ERROR_INVALID_ARGS;
}
} else {
@@ -388,8 +386,8 @@ static int __qp_memcpy_to_queue(struct vmci_queue *queue,
}
bytes_copied += to_copy;
- if (!kernel_if->mapped)
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
}
return VMCI_SUCCESS;
@@ -411,17 +409,18 @@ static int __qp_memcpy_from_queue(void *dest,
size_t bytes_copied = 0;
while (bytes_copied < size) {
- u64 page_index = (queue_offset + bytes_copied) / PAGE_SIZE;
- size_t page_offset =
+ const u64 page_index =
+ (queue_offset + bytes_copied) / PAGE_SIZE;
+ const size_t page_offset =
(queue_offset + bytes_copied) & (PAGE_SIZE - 1);
void *va;
size_t to_copy;
- if (!kernel_if->mapped)
- va = kmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ va = kmap(kernel_if->u.h.page[page_index]);
else
- va = (void *)((u8 *)kernel_if->va +
- (page_index * PAGE_SIZE));
+ va = kernel_if->u.g.vas[page_index + 1];
+ /* Skip header. */
if (size - bytes_copied > PAGE_SIZE - page_offset)
/* Enough payload to fill up this page. */
@@ -437,7 +436,8 @@ static int __qp_memcpy_from_queue(void *dest,
err = memcpy_toiovec(iov, (u8 *)va + page_offset,
to_copy);
if (err != 0) {
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
return VMCI_ERROR_INVALID_ARGS;
}
} else {
@@ -446,8 +446,8 @@ static int __qp_memcpy_from_queue(void *dest,
}
bytes_copied += to_copy;
- if (!kernel_if->mapped)
- kunmap(kernel_if->page[page_index]);
+ if (kernel_if->host)
+ kunmap(kernel_if->u.h.page[page_index]);
}
return VMCI_SUCCESS;
@@ -489,12 +489,11 @@ static int qp_alloc_ppn_set(void *prod_q,
return VMCI_ERROR_NO_MEM;
}
- produce_ppns[0] = page_to_pfn(vmalloc_to_page(produce_q->q_header));
- for (i = 1; i < num_produce_pages; i++) {
+ for (i = 0; i < num_produce_pages; i++) {
unsigned long pfn;
produce_ppns[i] =
- page_to_pfn(produce_q->kernel_if->page[i - 1]);
+ produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
pfn = produce_ppns[i];
/* Fail allocation if PFN isn't supported by hypervisor. */
@@ -503,12 +502,11 @@ static int qp_alloc_ppn_set(void *prod_q,
goto ppn_error;
}
- consume_ppns[0] = page_to_pfn(vmalloc_to_page(consume_q->q_header));
- for (i = 1; i < num_consume_pages; i++) {
+ for (i = 0; i < num_consume_pages; i++) {
unsigned long pfn;
consume_ppns[i] =
- page_to_pfn(consume_q->kernel_if->page[i - 1]);
+ consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
pfn = consume_ppns[i];
/* Fail allocation if PFN isn't supported by hypervisor. */
@@ -619,23 +617,20 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size)
const size_t num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
const size_t queue_page_size =
- num_pages * sizeof(*queue->kernel_if->page);
+ num_pages * sizeof(*queue->kernel_if->u.h.page);
queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
if (queue) {
queue->q_header = NULL;
queue->saved_header = NULL;
- queue->kernel_if =
- (struct vmci_queue_kern_if *)((u8 *)queue +
- sizeof(*queue));
+ queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
queue->kernel_if->host = true;
queue->kernel_if->mutex = NULL;
queue->kernel_if->num_pages = num_pages;
- queue->kernel_if->header_page =
+ queue->kernel_if->u.h.header_page =
(struct page **)((u8 *)queue + queue_size);
- queue->kernel_if->page = &queue->kernel_if->header_page[1];
- queue->kernel_if->va = NULL;
- queue->kernel_if->mapped = false;
+ queue->kernel_if->u.h.page =
+ &queue->kernel_if->u.h.header_page[1];
}
return queue;
@@ -742,11 +737,12 @@ static int qp_host_get_user_memory(u64 produce_uva,
current->mm,
(uintptr_t) produce_uva,
produce_q->kernel_if->num_pages,
- 1, 0, produce_q->kernel_if->header_page, NULL);
+ 1, 0,
+ produce_q->kernel_if->u.h.header_page, NULL);
if (retval < produce_q->kernel_if->num_pages) {
pr_warn("get_user_pages(produce) failed (retval=%d)", retval);
- qp_release_pages(produce_q->kernel_if->header_page, retval,
- false);
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
+ retval, false);
err = VMCI_ERROR_NO_MEM;
goto out;
}
@@ -755,12 +751,13 @@ static int qp_host_get_user_memory(u64 produce_uva,
current->mm,
(uintptr_t) consume_uva,
consume_q->kernel_if->num_pages,
- 1, 0, consume_q->kernel_if->header_page, NULL);
+ 1, 0,
+ consume_q->kernel_if->u.h.header_page, NULL);
if (retval < consume_q->kernel_if->num_pages) {
pr_warn("get_user_pages(consume) failed (retval=%d)", retval);
- qp_release_pages(consume_q->kernel_if->header_page, retval,
- false);
- qp_release_pages(produce_q->kernel_if->header_page,
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
+ retval, false);
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, false);
err = VMCI_ERROR_NO_MEM;
}
@@ -803,15 +800,15 @@ static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
struct vmci_queue *consume_q)
{
- qp_release_pages(produce_q->kernel_if->header_page,
+ qp_release_pages(produce_q->kernel_if->u.h.header_page,
produce_q->kernel_if->num_pages, true);
- memset(produce_q->kernel_if->header_page, 0,
- sizeof(*produce_q->kernel_if->header_page) *
+ memset(produce_q->kernel_if->u.h.header_page, 0,
+ sizeof(*produce_q->kernel_if->u.h.header_page) *
produce_q->kernel_if->num_pages);
- qp_release_pages(consume_q->kernel_if->header_page,
+ qp_release_pages(consume_q->kernel_if->u.h.header_page,
consume_q->kernel_if->num_pages, true);
- memset(consume_q->kernel_if->header_page, 0,
- sizeof(*consume_q->kernel_if->header_page) *
+ memset(consume_q->kernel_if->u.h.header_page, 0,
+ sizeof(*consume_q->kernel_if->u.h.header_page) *
consume_q->kernel_if->num_pages);
}
@@ -834,12 +831,12 @@ static int qp_host_map_queues(struct vmci_queue *produce_q,
if (produce_q->q_header != consume_q->q_header)
return VMCI_ERROR_QUEUEPAIR_MISMATCH;
- if (produce_q->kernel_if->header_page == NULL ||
- *produce_q->kernel_if->header_page == NULL)
+ if (produce_q->kernel_if->u.h.header_page == NULL ||
+ *produce_q->kernel_if->u.h.header_page == NULL)
return VMCI_ERROR_UNAVAILABLE;
- headers[0] = *produce_q->kernel_if->header_page;
- headers[1] = *consume_q->kernel_if->header_page;
+ headers[0] = *produce_q->kernel_if->u.h.header_page;
+ headers[1] = *consume_q->kernel_if->u.h.header_page;
produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
if (produce_q->q_header != NULL) {
@@ -1720,21 +1717,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
if (result < VMCI_SUCCESS)
return result;
- /*
- * Preemptively load in the headers if non-blocking to
- * prevent blocking later.
- */
- if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) {
- result = qp_host_map_queues(entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS) {
- qp_host_unregister_user_memory(
- entry->produce_q,
- entry->consume_q);
- return result;
- }
- }
-
entry->state = VMCIQPB_ATTACHED_MEM;
} else {
entry->state = VMCIQPB_ATTACHED_NO_MEM;
@@ -1749,24 +1731,6 @@ static int qp_broker_attach(struct qp_broker_entry *entry,
return VMCI_ERROR_UNAVAILABLE;
} else {
- /*
- * For non-blocking queue pairs, we cannot rely on
- * enqueue/dequeue to map in the pages on the
- * host-side, since it may block, so we make an
- * attempt here.
- */
-
- if (flags & VMCI_QPFLAG_NONBLOCK) {
- result =
- qp_host_map_queues(entry->produce_q,
- entry->consume_q);
- if (result < VMCI_SUCCESS)
- return result;
-
- entry->qp.flags |= flags &
- (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED);
- }
-
/* The host side has successfully attached to a queue pair. */
entry->state = VMCIQPB_ATTACHED_MEM;
}
@@ -2543,24 +2507,19 @@ void vmci_qp_guest_endpoints_exit(void)
* Since non-blocking isn't yet implemented on the host personality we
* have no reason to acquire a spin lock. So to avoid the use of an
* unnecessary lock only acquire the mutex if we can block.
- * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore
- * we can use the same locking function for access to both the queue
- * and the queue headers as it is the same logic. Assert this behvior.
*/
static void qp_lock(const struct vmci_qp *qpair)
{
- if (vmci_can_block(qpair->flags))
- qp_acquire_queue_mutex(qpair->produce_q);
+ qp_acquire_queue_mutex(qpair->produce_q);
}
/*
* Helper routine that unlocks the queue pair after calling
- * qp_lock. Respects non-blocking and pinning flags.
+ * qp_lock.
*/
static void qp_unlock(const struct vmci_qp *qpair)
{
- if (vmci_can_block(qpair->flags))
- qp_release_queue_mutex(qpair->produce_q);
+ qp_release_queue_mutex(qpair->produce_q);
}
/*
@@ -2568,17 +2527,12 @@ static void qp_unlock(const struct vmci_qp *qpair)
* currently not mapped, it will be attempted to do so.
*/
static int qp_map_queue_headers(struct vmci_queue *produce_q,
- struct vmci_queue *consume_q,
- bool can_block)
+ struct vmci_queue *consume_q)
{
int result;
if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
- if (can_block)
- result = qp_host_map_queues(produce_q, consume_q);
- else
- result = VMCI_ERROR_QUEUEPAIR_NOT_READY;
-
+ result = qp_host_map_queues(produce_q, consume_q);
if (result < VMCI_SUCCESS)
return (produce_q->saved_header &&
consume_q->saved_header) ?
@@ -2601,8 +2555,7 @@ static int qp_get_queue_headers(const struct vmci_qp *qpair,
{
int result;
- result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q,
- vmci_can_block(qpair->flags));
+ result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
if (result == VMCI_SUCCESS) {
*produce_q_header = qpair->produce_q->q_header;
*consume_q_header = qpair->consume_q->q_header;
@@ -2645,9 +2598,6 @@ static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
{
unsigned int generation;
- if (qpair->flags & VMCI_QPFLAG_NONBLOCK)
- return false;
-
qpair->blocked++;
generation = qpair->generation;
qp_unlock(qpair);
@@ -2674,15 +2624,14 @@ static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
const u64 produce_q_size,
const void *buf,
size_t buf_size,
- vmci_memcpy_to_queue_func memcpy_to_queue,
- bool can_block)
+ vmci_memcpy_to_queue_func memcpy_to_queue)
{
s64 free_space;
u64 tail;
size_t written;
ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
@@ -2737,15 +2686,14 @@ static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
void *buf,
size_t buf_size,
vmci_memcpy_from_queue_func memcpy_from_queue,
- bool update_consumer,
- bool can_block)
+ bool update_consumer)
{
s64 buf_ready;
u64 head;
size_t read;
ssize_t result;
- result = qp_map_queue_headers(produce_q, consume_q, can_block);
+ result = qp_map_queue_headers(produce_q, consume_q);
if (unlikely(result != VMCI_SUCCESS))
return result;
@@ -2842,32 +2790,11 @@ int vmci_qpair_alloc(struct vmci_qp **qpair,
route = vmci_guest_code_active() ?
VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
- /* If NONBLOCK or PINNED is set, we better be the guest personality. */
- if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) &&
- VMCI_ROUTE_AS_GUEST != route) {
- pr_devel("Not guest personality w/ NONBLOCK OR PINNED set");
+ if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
+ pr_devel("NONBLOCK OR PINNED set");
return VMCI_ERROR_INVALID_ARGS;
}
- /*
- * Limit the size of pinned QPs and check sanity.
- *
- * Pinned pages implies non-blocking mode. Mutexes aren't acquired
- * when the NONBLOCK flag is set in qpair code; and also should not be
- * acquired when the PINNED flagged is set. Since pinning pages
- * implies we want speed, it makes no sense not to have NONBLOCK
- * set if PINNED is set. Hence enforce this implication.
- */
- if (vmci_qp_pinned(flags)) {
- if (vmci_can_block(flags)) {
- pr_err("Attempted to enable pinning w/o non-blocking");
- return VMCI_ERROR_INVALID_ARGS;
- }
-
- if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY)
- return VMCI_ERROR_NO_RESOURCES;
- }
-
my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
if (!my_qpair)
return VMCI_ERROR_NO_MEM;
@@ -3195,8 +3122,7 @@ ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
qpair->consume_q,
qpair->produce_q_size,
buf, buf_size,
- qp_memcpy_to_queue,
- vmci_can_block(qpair->flags));
+ qp_memcpy_to_queue);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3237,8 +3163,7 @@ ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
qpair->consume_q,
qpair->consume_q_size,
buf, buf_size,
- qp_memcpy_from_queue, true,
- vmci_can_block(qpair->flags));
+ qp_memcpy_from_queue, true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3280,8 +3205,7 @@ ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
qpair->consume_q,
qpair->consume_q_size,
buf, buf_size,
- qp_memcpy_from_queue, false,
- vmci_can_block(qpair->flags));
+ qp_memcpy_from_queue, false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3323,8 +3247,7 @@ ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
qpair->consume_q,
qpair->produce_q_size,
iov, iov_size,
- qp_memcpy_to_queue_iov,
- vmci_can_block(qpair->flags));
+ qp_memcpy_to_queue_iov);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3367,7 +3290,7 @@ ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
qpair->consume_q_size,
iov, iov_size,
qp_memcpy_from_queue_iov,
- true, vmci_can_block(qpair->flags));
+ true);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
@@ -3411,7 +3334,7 @@ ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
qpair->consume_q_size,
iov, iov_size,
qp_memcpy_from_queue_iov,
- false, vmci_can_block(qpair->flags));
+ false);
if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
!qp_wait_for_ready_queue(qpair))
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h
index 58c6959f6b6..ed177f04ef2 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.h
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h
@@ -146,24 +146,6 @@ VMCI_QP_PAGESTORE_IS_WELLFORMED(struct vmci_qp_page_store *page_store)
return page_store->len >= 2;
}
-/*
- * Helper function to check if the non-blocking flag
- * is set for a given queue pair.
- */
-static inline bool vmci_can_block(u32 flags)
-{
- return !(flags & VMCI_QPFLAG_NONBLOCK);
-}
-
-/*
- * Helper function to check if the queue pair is pinned
- * into memory.
- */
-static inline bool vmci_qp_pinned(u32 flags)
-{
- return flags & VMCI_QPFLAG_PINNED;
-}
-
void vmci_qp_broker_exit(void);
int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
u32 flags, u32 priv_flags,
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 49a5bca418b..5d088551196 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1313,7 +1313,7 @@ int mmc_regulator_get_supply(struct mmc_host *mmc)
supply = devm_regulator_get(dev, "vmmc");
mmc->supply.vmmc = supply;
- mmc->supply.vqmmc = devm_regulator_get(dev, "vqmmc");
+ mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc");
if (IS_ERR(supply))
return PTR_ERR(supply);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ee5f1676f14..542407363dd 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2231,7 +2231,7 @@ int dw_mci_probe(struct dw_mci *host)
}
}
- host->vmmc = devm_regulator_get(host->dev, "vmmc");
+ host->vmmc = devm_regulator_get_optional(host->dev, "vmmc");
if (IS_ERR(host->vmmc)) {
ret = PTR_ERR(host->vmmc);
if (ret == -EPROBE_DEFER)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 847b1996ce8..1956a3df7cf 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -83,7 +83,7 @@ struct pxamci_host {
static inline void pxamci_init_ocr(struct pxamci_host *host)
{
#ifdef CONFIG_REGULATOR
- host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc");
+ host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc))
host->vcc = NULL;
@@ -128,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,
!!on ^ host->pdata->gpio_power_invert);
}
if (!host->vcc && host->pdata && host->pdata->setpower)
- host->pdata->setpower(mmc_dev(host->mmc), vdd);
+ return host->pdata->setpower(mmc_dev(host->mmc), vdd);
return 0;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index a78bd4f3aec..dd2c083c434 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2966,7 +2966,7 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */
- host->vqmmc = regulator_get(mmc_dev(mmc), "vqmmc");
+ host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc");
if (IS_ERR_OR_NULL(host->vqmmc)) {
if (PTR_ERR(host->vqmmc) < 0) {
pr_info("%s: no vqmmc regulator found\n",
@@ -3042,7 +3042,7 @@ int sdhci_add_host(struct sdhci_host *host)
ocr_avail = 0;
- host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc");
if (IS_ERR_OR_NULL(host->vmmc)) {
if (PTR_ERR(host->vmmc) < 0) {
pr_info("%s: no vmmc regulator found\n",
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index cff6f023c03..7f2a032c354 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -996,7 +996,7 @@ static int __init cops_module_init(void)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
cardname);
cops_dev = cops_probe(-1);
- return PTR_RET(cops_dev);
+ return PTR_ERR_OR_ZERO(cops_dev);
}
static void __exit cops_module_exit(void)
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index b5782cdf0bc..01e2ac55c13 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1243,7 +1243,7 @@ static int __init ltpc_module_init(void)
"ltpc: Autoprobing is not recommended for modules\n");
dev_ltpc = ltpc_probe();
- return PTR_RET(dev_ltpc);
+ return PTR_ERR_OR_ZERO(dev_ltpc);
}
module_init(ltpc_module_init);
#endif
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index a746ba272f0..a956053608f 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
soft = &pkt.soft.rfc1201;
- lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE));
+ lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
if (pkt.hard.offset[0]) {
ofs = pkt.hard.offset[0];
length = 256 - ofs;
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 390061d0969..0d8f427ade9 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -143,10 +143,9 @@ static inline struct bonding *__get_bond_by_port(struct port *port)
*/
static inline struct port *__get_first_port(struct bonding *bond)
{
- if (bond->slave_cnt == 0)
- return NULL;
+ struct slave *first_slave = bond_first_slave(bond);
- return &(SLAVE_AD_INFO(bond->first_slave).port);
+ return first_slave ? &(SLAVE_AD_INFO(first_slave).port) : NULL;
}
/**
@@ -159,13 +158,16 @@ static inline struct port *__get_first_port(struct bonding *bond)
static inline struct port *__get_next_port(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
- struct slave *slave = port->slave;
+ struct slave *slave = port->slave, *slave_next;
// If there's no bond for this port, or this is the last slave
- if ((bond == NULL) || (slave->next == bond->first_slave))
+ if (bond == NULL)
+ return NULL;
+ slave_next = bond_next_slave(bond, slave);
+ if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL;
- return &(SLAVE_AD_INFO(slave->next).port);
+ return &(SLAVE_AD_INFO(slave_next).port);
}
/**
@@ -178,12 +180,14 @@ static inline struct port *__get_next_port(struct port *port)
static inline struct aggregator *__get_first_agg(struct port *port)
{
struct bonding *bond = __get_bond_by_port(port);
+ struct slave *first_slave;
// If there's no bond for this port, or bond has no slaves
- if ((bond == NULL) || (bond->slave_cnt == 0))
+ if (bond == NULL)
return NULL;
+ first_slave = bond_first_slave(bond);
- return &(SLAVE_AD_INFO(bond->first_slave).aggregator);
+ return first_slave ? &(SLAVE_AD_INFO(first_slave).aggregator) : NULL;
}
/**
@@ -195,14 +199,17 @@ static inline struct aggregator *__get_first_agg(struct port *port)
*/
static inline struct aggregator *__get_next_agg(struct aggregator *aggregator)
{
- struct slave *slave = aggregator->slave;
+ struct slave *slave = aggregator->slave, *slave_next;
struct bonding *bond = bond_get_bond_by_slave(slave);
// If there's no bond for this aggregator, or this is the last slave
- if ((bond == NULL) || (slave->next == bond->first_slave))
+ if (bond == NULL)
+ return NULL;
+ slave_next = bond_next_slave(bond, slave);
+ if (!slave_next || bond_is_first_slave(bond, slave_next))
return NULL;
- return &(SLAVE_AD_INFO(slave->next).aggregator);
+ return &(SLAVE_AD_INFO(slave_next).aggregator);
}
/*
@@ -2110,7 +2117,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
read_lock(&bond->lock);
//check if there are any slaves
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
// check if agg_select_timer timer after initialize is timed out
@@ -2336,8 +2343,12 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
int bond_3ad_set_carrier(struct bonding *bond)
{
struct aggregator *active;
+ struct slave *first_slave;
- active = __get_active_agg(&(SLAVE_AD_INFO(bond->first_slave).aggregator));
+ first_slave = bond_first_slave(bond);
+ if (!first_slave)
+ return 0;
+ active = __get_active_agg(&(SLAVE_AD_INFO(first_slave).aggregator));
if (active) {
/* are enough slaves available to consider link up? */
if (active->num_of_ports < bond->params.min_links) {
@@ -2415,6 +2426,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
struct ad_info ad_info;
int res = 1;
+ read_lock(&bond->lock);
if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
dev->name);
@@ -2432,7 +2444,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
if (agg && (agg->aggregator_identifier == agg_id)) {
@@ -2464,6 +2476,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
}
out:
+ read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
@@ -2501,18 +2514,13 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
*/
void bond_3ad_update_lacp_rate(struct bonding *bond)
{
- int i;
- struct slave *slave;
struct port *port = NULL;
+ struct slave *slave;
int lacp_fast;
- write_lock_bh(&bond->lock);
lacp_fast = bond->params.lacp_fast;
-
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
port = &(SLAVE_AD_INFO(slave).port);
- if (port->slave == NULL)
- continue;
__get_state_machine_lock(port);
if (lacp_fast)
port->actor_oper_port_state |= AD_STATE_LACP_TIMEOUT;
@@ -2520,6 +2528,4 @@ void bond_3ad_update_lacp_rate(struct bonding *bond)
port->actor_oper_port_state &= ~AD_STATE_LACP_TIMEOUT;
__release_state_machine_lock(port);
}
-
- write_unlock_bh(&bond->lock);
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 4ea8ed150d4..91f179d5135 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -224,13 +224,12 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
{
struct slave *slave, *least_loaded;
long long max_gap;
- int i;
least_loaded = NULL;
max_gap = LLONG_MIN;
/* Find the slave with the largest gap */
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (SLAVE_IS_OK(slave)) {
long long gap = compute_gap(slave);
@@ -386,11 +385,10 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
struct slave *rx_slave, *slave, *start_at;
int i = 0;
- if (bond_info->next_rx_slave) {
+ if (bond_info->next_rx_slave)
start_at = bond_info->next_rx_slave;
- } else {
- start_at = bond->first_slave;
- }
+ else
+ start_at = bond_first_slave(bond);
rx_slave = NULL;
@@ -405,7 +403,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
}
if (rx_slave) {
- bond_info->next_rx_slave = rx_slave->next;
+ slave = bond_next_slave(bond, rx_slave);
+ bond_info->next_rx_slave = slave;
}
return rx_slave;
@@ -513,7 +512,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
skb->dev = client_info->slave->dev;
- if (client_info->tag) {
+ if (client_info->vlan_id) {
skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
if (!skb) {
pr_err("%s: Error: failed to insert VLAN tag\n",
@@ -695,10 +694,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (bond_vlan_used(bond)) {
- if (!vlan_get_tag(skb, &client_info->vlan_id))
- client_info->tag = 1;
- }
+ if (!vlan_get_tag(skb, &client_info->vlan_id))
+ client_info->vlan_id = 0;
if (!client_info->assigned) {
u32 prev_tbl_head = bond_info->rx_hashtbl_used_head;
@@ -804,7 +801,7 @@ static void rlb_init_table_entry_dst(struct rlb_client_info *entry)
entry->used_prev = RLB_NULL_INDEX;
entry->assigned = 0;
entry->slave = NULL;
- entry->tag = 0;
+ entry->vlan_id = 0;
}
static void rlb_init_table_entry_src(struct rlb_client_info *entry)
{
@@ -961,7 +958,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct rlb_client_info *curr = &(bond_info->rx_hashtbl[curr_index]);
u32 next_index = bond_info->rx_hashtbl[curr_index].used_next;
- if (curr->tag && (curr->vlan_id == vlan_id))
+ if (curr->vlan_id == vlan_id)
rlb_delete_table_entry(bond, curr_index);
curr_index = next_index;
@@ -972,58 +969,62 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
+ u16 vid)
{
- struct bonding *bond = bond_get_bond_by_slave(slave);
struct learning_pkt pkt;
+ struct sk_buff *skb;
int size = sizeof(struct learning_pkt);
- int i;
+ char *data;
memset(&pkt, 0, size);
memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
pkt.type = cpu_to_be16(ETH_P_LOOP);
- for (i = 0; i < MAX_LP_BURST; i++) {
- struct sk_buff *skb;
- char *data;
+ skb = dev_alloc_skb(size);
+ if (!skb)
+ return;
+
+ data = skb_put(skb, size);
+ memcpy(data, &pkt, size);
+
+ skb_reset_mac_header(skb);
+ skb->network_header = skb->mac_header + ETH_HLEN;
+ skb->protocol = pkt.type;
+ skb->priority = TC_PRIO_CONTROL;
+ skb->dev = slave->dev;
- skb = dev_alloc_skb(size);
+ if (vid) {
+ skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vid);
if (!skb) {
+ pr_err("%s: Error: failed to insert VLAN tag\n",
+ slave->bond->dev->name);
return;
}
+ }
- data = skb_put(skb, size);
- memcpy(data, &pkt, size);
-
- skb_reset_mac_header(skb);
- skb->network_header = skb->mac_header + ETH_HLEN;
- skb->protocol = pkt.type;
- skb->priority = TC_PRIO_CONTROL;
- skb->dev = slave->dev;
+ dev_queue_xmit(skb);
+}
- if (bond_vlan_used(bond)) {
- struct vlan_entry *vlan;
- vlan = bond_next_vlan(bond,
- bond->alb_info.current_alb_vlan);
+static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
+{
+ struct bonding *bond = bond_get_bond_by_slave(slave);
+ struct net_device *upper;
+ struct list_head *iter;
- bond->alb_info.current_alb_vlan = vlan;
- if (!vlan) {
- kfree_skb(skb);
- continue;
- }
+ /* send untagged */
+ alb_send_lp_vid(slave, mac_addr, 0);
- skb = vlan_put_tag(skb, htons(ETH_P_8021Q), vlan->vlan_id);
- if (!skb) {
- pr_err("%s: Error: failed to insert VLAN tag\n",
- bond->dev->name);
- continue;
- }
- }
-
- dev_queue_xmit(skb);
+ /* loop through vlans and send one packet for each */
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (upper->priv_flags & IFF_802_1Q_VLAN)
+ alb_send_lp_vid(slave, mac_addr,
+ vlan_dev_vlan_id(upper));
}
+ rcu_read_unlock();
}
static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
@@ -1173,9 +1174,8 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
{
struct slave *tmp_slave1, *free_mac_slave = NULL;
struct slave *has_bond_addr = bond->curr_active_slave;
- int i;
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
/* this is the first slave */
return 0;
}
@@ -1196,7 +1196,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* The slave's address is equal to the address of the bond.
* Search for a spare address in the bond for this slave.
*/
- bond_for_each_slave(bond, tmp_slave1, i) {
+ bond_for_each_slave(bond, tmp_slave1) {
if (!bond_slave_has_mac(bond, tmp_slave1->perm_hwaddr)) {
/* no slave has tmp_slave1's perm addr
* as its curr addr
@@ -1246,17 +1246,15 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
*/
static int alb_set_mac_address(struct bonding *bond, void *addr)
{
- struct sockaddr sa;
- struct slave *slave, *stop_at;
char tmp_addr[ETH_ALEN];
+ struct slave *slave;
+ struct sockaddr sa;
int res;
- int i;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
return 0;
- }
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
/* save net_device's current hw address */
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
@@ -1276,8 +1274,7 @@ unwind:
sa.sa_family = bond->dev->type;
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
dev_set_mac_address(slave->dev, &sa);
memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
@@ -1342,6 +1339,7 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
/* make sure that the curr_active_slave do not change during tx
*/
+ read_lock(&bond->lock);
read_lock(&bond->curr_slave_lock);
switch (ntohs(skb->protocol)) {
@@ -1446,11 +1444,12 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
}
read_unlock(&bond->curr_slave_lock);
-
+ read_unlock(&bond->lock);
if (res) {
/* no suitable interface, frame not sent */
kfree_skb(skb);
}
+
return NETDEV_TX_OK;
}
@@ -1460,11 +1459,10 @@ void bond_alb_monitor(struct work_struct *work)
alb_work.work);
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct slave *slave;
- int i;
read_lock(&bond->lock);
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
bond_info->tx_rebalance_counter = 0;
bond_info->lp_counter = 0;
goto re_arm;
@@ -1482,9 +1480,8 @@ void bond_alb_monitor(struct work_struct *work)
*/
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave)
alb_send_learning_packets(slave, slave->dev->dev_addr);
- }
read_unlock(&bond->curr_slave_lock);
@@ -1496,7 +1493,7 @@ void bond_alb_monitor(struct work_struct *work)
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
tlb_clear_slave(bond, slave, 1);
if (slave == bond->curr_active_slave) {
SLAVE_TLB_INFO(slave).load =
@@ -1602,9 +1599,8 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
*/
void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
{
- if (bond->slave_cnt > 1) {
+ if (!list_empty(&bond->slave_list))
alb_change_hw_addr_on_detach(bond, slave);
- }
tlb_clear_slave(bond, slave, 0);
@@ -1661,9 +1657,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
{
struct slave *swap_slave;
- if (bond->curr_active_slave == new_slave) {
+ if (bond->curr_active_slave == new_slave)
return;
- }
if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
dev_set_promiscuity(bond->curr_active_slave->dev, -1);
@@ -1672,11 +1667,10 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
}
swap_slave = bond->curr_active_slave;
- bond->curr_active_slave = new_slave;
+ rcu_assign_pointer(bond->curr_active_slave, new_slave);
- if (!new_slave || (bond->slave_cnt == 0)) {
+ if (!new_slave || list_empty(&bond->slave_list))
return;
- }
/* set the new curr_active_slave to the bonds mac address
* i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
@@ -1689,9 +1683,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
* ignored so we can mess with their MAC addresses without
* fear of interference from transmit activity.
*/
- if (swap_slave) {
+ if (swap_slave)
tlb_clear_slave(bond, swap_slave, 1);
- }
tlb_clear_slave(bond, new_slave, 1);
write_unlock_bh(&bond->curr_slave_lock);
@@ -1768,11 +1761,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.current_alb_vlan &&
- (bond->alb_info.current_alb_vlan->vlan_id == vlan_id)) {
- bond->alb_info.current_alb_vlan = NULL;
- }
-
if (bond->alb_info.rlb_enabled) {
rlb_clear_vlan(bond, vlan_id);
}
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index e7a5b8b37ea..28d8e4c7dc0 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -53,7 +53,6 @@ struct slave;
#define TLB_NULL_INDEX 0xffffffff
-#define MAX_LP_BURST 3
/* rlb defs */
#define RLB_HASH_TABLE_SIZE 256
@@ -126,7 +125,6 @@ struct rlb_client_info {
u8 assigned; /* checking whether this entry is assigned */
u8 ntt; /* flag - need to transmit client info */
struct slave *slave; /* the slave assigned to this client */
- u8 tag; /* flag - need to tag skb */
unsigned short vlan_id; /* VLAN tag associated with IP address */
};
@@ -170,7 +168,6 @@ struct alb_bond_info {
* rx traffic should be
* rebalanced
*/
- struct vlan_entry *current_alb_vlan;
};
int bond_alb_initialize(struct bonding *bond, int rlb_enabled);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07f257d44a1..39e5b1c7ffe 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -77,6 +77,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/pkt_sched.h>
+#include <linux/rculist.h>
#include "bonding.h"
#include "bond_3ad.h"
#include "bond_alb.h"
@@ -106,7 +107,7 @@ static char *arp_ip_target[BOND_MAX_ARP_TARGETS];
static char *arp_validate;
static char *arp_all_targets;
static char *fail_over_mac;
-static int all_slaves_active = 0;
+static int all_slaves_active;
static struct bond_params bonding_defaults;
static int resend_igmp = BOND_DEFAULT_RESEND_IGMP;
@@ -273,7 +274,7 @@ const char *bond_mode_name(int mode)
[BOND_MODE_ALB] = "adaptive load balancing",
};
- if (mode < 0 || mode > BOND_MODE_ALB)
+ if (mode < BOND_MODE_ROUNDROBIN || mode > BOND_MODE_ALB)
return "unknown";
return names[mode];
@@ -282,116 +283,6 @@ const char *bond_mode_name(int mode)
/*---------------------------------- VLAN -----------------------------------*/
/**
- * bond_add_vlan - add a new vlan id on bond
- * @bond: bond that got the notification
- * @vlan_id: the vlan id to add
- *
- * Returns -ENOMEM if allocation failed.
- */
-static int bond_add_vlan(struct bonding *bond, unsigned short vlan_id)
-{
- struct vlan_entry *vlan;
-
- pr_debug("bond: %s, vlan id %d\n",
- (bond ? bond->dev->name : "None"), vlan_id);
-
- vlan = kzalloc(sizeof(struct vlan_entry), GFP_KERNEL);
- if (!vlan)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&vlan->vlan_list);
- vlan->vlan_id = vlan_id;
-
- write_lock_bh(&bond->lock);
-
- list_add_tail(&vlan->vlan_list, &bond->vlan_list);
-
- write_unlock_bh(&bond->lock);
-
- pr_debug("added VLAN ID %d on bond %s\n", vlan_id, bond->dev->name);
-
- return 0;
-}
-
-/**
- * bond_del_vlan - delete a vlan id from bond
- * @bond: bond that got the notification
- * @vlan_id: the vlan id to delete
- *
- * returns -ENODEV if @vlan_id was not found in @bond.
- */
-static int bond_del_vlan(struct bonding *bond, unsigned short vlan_id)
-{
- struct vlan_entry *vlan;
- int res = -ENODEV;
-
- pr_debug("bond: %s, vlan id %d\n", bond->dev->name, vlan_id);
-
- block_netpoll_tx();
- write_lock_bh(&bond->lock);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (vlan->vlan_id == vlan_id) {
- list_del(&vlan->vlan_list);
-
- if (bond_is_lb(bond))
- bond_alb_clear_vlan(bond, vlan_id);
-
- pr_debug("removed VLAN ID %d from bond %s\n",
- vlan_id, bond->dev->name);
-
- kfree(vlan);
-
- res = 0;
- goto out;
- }
- }
-
- pr_debug("couldn't find VLAN ID %d in bond %s\n",
- vlan_id, bond->dev->name);
-
-out:
- write_unlock_bh(&bond->lock);
- unblock_netpoll_tx();
- return res;
-}
-
-/**
- * bond_next_vlan - safely skip to the next item in the vlans list.
- * @bond: the bond we're working on
- * @curr: item we're advancing from
- *
- * Returns %NULL if list is empty, bond->next_vlan if @curr is %NULL,
- * or @curr->next otherwise (even if it is @curr itself again).
- *
- * Caller must hold bond->lock
- */
-struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
-{
- struct vlan_entry *next, *last;
-
- if (list_empty(&bond->vlan_list))
- return NULL;
-
- if (!curr) {
- next = list_entry(bond->vlan_list.next,
- struct vlan_entry, vlan_list);
- } else {
- last = list_entry(bond->vlan_list.prev,
- struct vlan_entry, vlan_list);
- if (last == curr) {
- next = list_entry(bond->vlan_list.next,
- struct vlan_entry, vlan_list);
- } else {
- next = list_entry(curr->vlan_list.next,
- struct vlan_entry, vlan_list);
- }
- }
-
- return next;
-}
-
-/**
* bond_dev_queue_xmit - Prepare skb for xmit.
*
* @bond: bond device that got this skb for tx.
@@ -441,28 +332,20 @@ static int bond_vlan_rx_add_vid(struct net_device *bond_dev,
__be16 proto, u16 vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *stop_at;
- int i, res;
+ struct slave *slave;
+ int res;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
res = vlan_vid_add(slave->dev, proto, vid);
if (res)
goto unwind;
}
- res = bond_add_vlan(bond, vid);
- if (res) {
- pr_err("%s: Error: Failed to add vlan id %d\n",
- bond_dev->name, vid);
- return res;
- }
-
return 0;
unwind:
- /* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ /* unwind from the slave that failed */
+ bond_for_each_slave_continue_reverse(bond, slave)
vlan_vid_del(slave->dev, proto, vid);
return res;
@@ -478,48 +361,16 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i, res;
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
vlan_vid_del(slave->dev, proto, vid);
- res = bond_del_vlan(bond, vid);
- if (res) {
- pr_err("%s: Error: Failed to remove vlan id %d\n",
- bond_dev->name, vid);
- return res;
- }
+ if (bond_is_lb(bond))
+ bond_alb_clear_vlan(bond, vid);
return 0;
}
-static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
-{
- struct vlan_entry *vlan;
- int res;
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- res = vlan_vid_add(slave_dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- if (res)
- pr_warning("%s: Failed to add vlan id %d to device %s\n",
- bond->dev->name, vlan->vlan_id,
- slave_dev->name);
- }
-}
-
-static void bond_del_vlans_from_slave(struct bonding *bond,
- struct net_device *slave_dev)
-{
- struct vlan_entry *vlan;
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!vlan->vlan_id)
- continue;
- vlan_vid_del(slave_dev, htons(ETH_P_8021Q), vlan->vlan_id);
- }
-}
-
/*------------------------------- Link status -------------------------------*/
/*
@@ -532,15 +383,14 @@ static void bond_del_vlans_from_slave(struct bonding *bond,
static int bond_set_carrier(struct bonding *bond)
{
struct slave *slave;
- int i;
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto down;
if (bond->params.mode == BOND_MODE_8023AD)
return bond_3ad_set_carrier(bond);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (slave->link == BOND_LINK_UP) {
if (!netif_carrier_ok(bond->dev)) {
netif_carrier_on(bond->dev);
@@ -681,8 +531,8 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
}
} else {
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+
+ bond_for_each_slave(bond, slave) {
err = dev_set_promiscuity(slave->dev, inc);
if (err)
return err;
@@ -705,8 +555,8 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
}
} else {
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+
+ bond_for_each_slave(bond, slave) {
err = dev_set_allmulti(slave->dev, inc);
if (err)
return err;
@@ -715,15 +565,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
return err;
}
-static void __bond_resend_igmp_join_requests(struct net_device *dev)
-{
- struct in_device *in_dev;
-
- in_dev = __in_dev_get_rcu(dev);
- if (in_dev)
- ip_mc_rejoin_groups(in_dev);
-}
-
/*
* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
@@ -731,33 +572,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
- struct net_device *bond_dev, *vlan_dev, *upper_dev;
- struct vlan_entry *vlan;
-
- read_lock(&bond->lock);
- rcu_read_lock();
-
- bond_dev = bond->dev;
-
- /* rejoin all groups on bond device */
- __bond_resend_igmp_join_requests(bond_dev);
-
- /*
- * if bond is enslaved to a bridge,
- * then rejoin all groups on its master
- */
- upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
- if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
- __bond_resend_igmp_join_requests(upper_dev);
-
- /* rejoin all groups on vlan devices */
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- if (vlan_dev)
- __bond_resend_igmp_join_requests(vlan_dev);
+ if (!rtnl_trylock()) {
+ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
+ return;
}
- rcu_read_unlock();
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
+ rtnl_unlock();
/* We use curr_slave_lock to protect against concurrent access to
* igmp_retrans from multiple running instances of this function and
@@ -769,7 +589,6 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
}
write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
}
static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -808,6 +627,8 @@ static void bond_hw_addr_flush(struct net_device *bond_dev,
static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
struct slave *old_active)
{
+ ASSERT_RTNL();
+
if (old_active) {
if (bond->dev->flags & IFF_PROMISC)
dev_set_promiscuity(old_active->dev, -1);
@@ -966,9 +787,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
new_active = bond->curr_active_slave;
if (!new_active) { /* there were no active slaves left */
- if (bond->slave_cnt > 0) /* found one slave */
- new_active = bond->first_slave;
- else
+ new_active = bond_first_slave(bond);
+ if (!new_active)
return NULL; /* still no slave, return NULL */
}
@@ -1008,7 +828,6 @@ static bool bond_should_notify_peers(struct bonding *bond)
test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
return false;
- bond->send_peer_notif--;
return true;
}
@@ -1071,7 +890,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (new_active)
bond_set_slave_active_flags(new_active);
} else {
- bond->curr_active_slave = new_active;
+ rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
@@ -1115,7 +934,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
((USES_PRIMARY(bond->params.mode) && new_active) ||
bond->params.mode == BOND_MODE_ROUNDROBIN)) {
bond->igmp_retrans = bond->params.resend_igmp;
- queue_delayed_work(bond->wq, &bond->mcast_work, 0);
+ queue_delayed_work(bond->wq, &bond->mcast_work, 1);
}
}
@@ -1161,17 +980,7 @@ void bond_select_active_slave(struct bonding *bond)
*/
static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
{
- if (bond->first_slave == NULL) { /* attaching the first slave */
- new_slave->next = new_slave;
- new_slave->prev = new_slave;
- bond->first_slave = new_slave;
- } else {
- new_slave->next = bond->first_slave;
- new_slave->prev = bond->first_slave->prev;
- new_slave->next->prev = new_slave;
- new_slave->prev->next = new_slave;
- }
-
+ list_add_tail_rcu(&new_slave->list, &bond->slave_list);
bond->slave_cnt++;
}
@@ -1187,22 +996,7 @@ static void bond_attach_slave(struct bonding *bond, struct slave *new_slave)
*/
static void bond_detach_slave(struct bonding *bond, struct slave *slave)
{
- if (slave->next)
- slave->next->prev = slave->prev;
-
- if (slave->prev)
- slave->prev->next = slave->next;
-
- if (bond->first_slave == slave) { /* slave is the first slave */
- if (bond->slave_cnt > 1) { /* there are more slave */
- bond->first_slave = slave->next;
- } else {
- bond->first_slave = NULL; /* slave was the last one */
- }
- }
-
- slave->next = NULL;
- slave->prev = NULL;
+ list_del_rcu(&slave->list);
bond->slave_cnt--;
}
@@ -1249,47 +1043,31 @@ static void bond_poll_controller(struct net_device *bond_dev)
{
}
-static void __bond_netpoll_cleanup(struct bonding *bond)
+static void bond_netpoll_cleanup(struct net_device *bond_dev)
{
+ struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
if (IS_UP(slave->dev))
slave_disable_netpoll(slave);
}
-static void bond_netpoll_cleanup(struct net_device *bond_dev)
-{
- struct bonding *bond = netdev_priv(bond_dev);
-
- read_lock(&bond->lock);
- __bond_netpoll_cleanup(bond);
- read_unlock(&bond->lock);
-}
static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp)
{
struct bonding *bond = netdev_priv(dev);
struct slave *slave;
- int i, err = 0;
+ int err = 0;
- read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
err = slave_enable_netpoll(slave);
if (err) {
- __bond_netpoll_cleanup(bond);
+ bond_netpoll_cleanup(dev);
break;
}
}
- read_unlock(&bond->lock);
return err;
}
-
-static struct netpoll_info *bond_netpoll_info(struct bonding *bond)
-{
- return bond->dev->npinfo;
-}
-
#else
static inline int slave_enable_netpoll(struct slave *slave)
{
@@ -1306,34 +1084,29 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev)
/*---------------------------------- IOCTL ----------------------------------*/
static netdev_features_t bond_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
- struct slave *slave;
struct bonding *bond = netdev_priv(dev);
netdev_features_t mask;
- int i;
-
- read_lock(&bond->lock);
+ struct slave *slave;
- if (!bond->first_slave) {
+ if (list_empty(&bond->slave_list)) {
/* Disable adding VLANs to empty bond. But why? --mq */
features |= NETIF_F_VLAN_CHALLENGED;
- goto out;
+ return features;
}
mask = features;
features &= ~NETIF_F_ONE_FOR_ALL;
features |= NETIF_F_ALL_FOR_ALL;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
features = netdev_increment_features(features,
slave->dev->features,
mask);
}
features = netdev_add_tso_features(features, mask);
-out:
- read_unlock(&bond->lock);
return features;
}
@@ -1343,21 +1116,18 @@ out:
static void bond_compute_features(struct bonding *bond)
{
- struct slave *slave;
- struct net_device *bond_dev = bond->dev;
+ unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
unsigned int gso_max_size = GSO_MAX_SIZE;
+ struct net_device *bond_dev = bond->dev;
u16 gso_max_segs = GSO_MAX_SEGS;
- int i;
- unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
-
- read_lock(&bond->lock);
+ struct slave *slave;
- if (!bond->first_slave)
+ if (list_empty(&bond->slave_list))
goto done;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
vlan_features = netdev_increment_features(vlan_features,
slave->dev->vlan_features, BOND_VLAN_FEATURES);
@@ -1378,8 +1148,6 @@ done:
flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
bond_dev->priv_flags = flags | dst_release_flag;
- read_unlock(&bond->lock);
-
netdev_change_features(bond_dev);
}
@@ -1545,7 +1313,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* bond ether type mutual exclusion - don't allow slaves of dissimilar
* ether type (eg ARPHRD_ETHER and ARPHRD_INFINIBAND) share the same bond
*/
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
if (bond_dev->type != slave_dev->type) {
pr_debug("%s: change device type from %d to %d\n",
bond_dev->name,
@@ -1584,7 +1352,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (slave_ops->ndo_set_mac_address == NULL) {
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
bond_dev->name);
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
@@ -1600,7 +1368,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
/* If this is the first slave, then we need to set the master's hardware
* address to be the same as the slave's. */
- if (!bond->slave_cnt && bond->dev->addr_assign_type == NET_ADDR_RANDOM)
+ if (list_empty(&bond->slave_list) &&
+ bond->dev->addr_assign_type == NET_ADDR_RANDOM)
bond_set_dev_addr(bond->dev, slave_dev);
new_slave = kzalloc(sizeof(struct slave), GFP_KERNEL);
@@ -1608,7 +1377,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = -ENOMEM;
goto err_undo_flags;
}
-
+ INIT_LIST_HEAD(&new_slave->list);
/*
* Set the new_slave's queue_id to be zero. Queue ID mapping
* is set via sysfs or module option if desired.
@@ -1703,7 +1472,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
dev_mc_add(slave_dev, lacpdu_multicast);
}
- bond_add_vlans_on_slave(bond, slave_dev);
+ res = vlan_vids_add_by_dev(slave_dev, bond_dev);
+ if (res) {
+ pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
+ bond_dev->name, slave_dev->name);
+ goto err_close;
+ }
write_lock_bh(&bond->lock);
@@ -1794,15 +1568,18 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
bond_set_slave_inactive_flags(new_slave);
/* if this is the first slave */
- if (bond->slave_cnt == 1) {
+ if (bond_first_slave(bond) == new_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
/* Initialize AD with the number of times that the AD timer is called in 1 second
* can be called only after the mac address of the bond is set
*/
bond_3ad_initialize(bond, 1000/AD_TIMER_INTERVAL);
} else {
+ struct slave *prev_slave;
+
+ prev_slave = bond_prev_slave(bond, new_slave);
SLAVE_AD_INFO(new_slave).id =
- SLAVE_AD_INFO(new_slave->prev).id + 1;
+ SLAVE_AD_INFO(prev_slave).id + 1;
}
bond_3ad_bind_slave(new_slave);
@@ -1824,7 +1601,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* so we can change it without calling change_active_interface()
*/
if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
- bond->curr_active_slave = new_slave;
+ rcu_assign_pointer(bond->curr_active_slave, new_slave);
break;
} /* switch(bond_mode) */
@@ -1834,7 +1611,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond_netpoll_info(bond);
+ slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
read_unlock(&bond->lock);
@@ -1876,7 +1653,7 @@ err_detach:
if (!USES_PRIMARY(bond->params.mode))
bond_hw_addr_flush(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
+ vlan_vids_del_by_dev(slave_dev, bond_dev);
write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
@@ -1921,7 +1698,7 @@ err_free:
err_undo_flags:
bond_compute_features(bond);
/* Enslave of first slave has failed and we need to fix master's mac */
- if (bond->slave_cnt == 0 &&
+ if (list_empty(&bond->slave_list) &&
ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
eth_hw_addr_random(bond_dev);
@@ -1977,15 +1754,6 @@ static int __bond_release_one(struct net_device *bond_dev,
netdev_rx_handler_unregister(slave_dev);
write_lock_bh(&bond->lock);
- if (!all && !bond->params.fail_over_mac) {
- if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
- bond->slave_cnt > 1)
- pr_warning("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
- }
-
/* Inform AD package of unbinding of slave. */
if (bond->params.mode == BOND_MODE_8023AD) {
/* must be called before the slave is
@@ -2006,6 +1774,15 @@ static int __bond_release_one(struct net_device *bond_dev,
/* release the slave from its bond */
bond_detach_slave(bond, slave);
+ if (!all && !bond->params.fail_over_mac) {
+ if (ether_addr_equal(bond_dev->dev_addr, slave->perm_hwaddr) &&
+ !list_empty(&bond->slave_list))
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
+ }
+
if (bond->primary_slave == slave)
bond->primary_slave = NULL;
@@ -2024,7 +1801,7 @@ static int __bond_release_one(struct net_device *bond_dev,
}
if (all) {
- bond->curr_active_slave = NULL;
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
} else if (oldcurrent == slave) {
/*
* Note that we hold RTNL over this sequence, so there
@@ -2042,11 +1819,11 @@ static int __bond_release_one(struct net_device *bond_dev,
write_lock_bh(&bond->lock);
}
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
bond_set_carrier(bond);
eth_hw_addr_random(bond_dev);
- if (bond_vlan_used(bond)) {
+ if (vlan_uses_dev(bond_dev)) {
pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
bond_dev->name, bond_dev->name);
pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2056,8 +1833,9 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
unblock_netpoll_tx();
+ synchronize_rcu();
- if (bond->slave_cnt == 0) {
+ if (list_empty(&bond->slave_list)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
call_netdevice_notifiers(NETDEV_RELEASE, bond->dev);
}
@@ -2071,7 +1849,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* must do this from outside any spinlocks */
bond_destroy_slave_symlinks(bond_dev, slave_dev);
- bond_del_vlans_from_slave(bond, slave_dev);
+ vlan_vids_del_by_dev(slave_dev, bond_dev);
/* If the mode USES_PRIMARY, then this cases was handled above by
* bond_change_active_slave(..., NULL)
@@ -2128,7 +1906,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
int ret;
ret = bond_release(bond_dev, slave_dev);
- if ((ret == 0) && (bond->slave_cnt == 0)) {
+ if (ret == 0 && list_empty(&bond->slave_list)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
pr_info("%s: destroying bond %s.\n",
bond_dev->name, bond_dev->name);
@@ -2165,23 +1943,19 @@ static int bond_ioctl_change_active(struct net_device *bond_dev, struct net_devi
read_lock(&bond->lock);
- read_lock(&bond->curr_slave_lock);
old_active = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
new_active = bond_get_slave_by_dev(bond, slave_dev);
-
/*
* Changing to the current active: do nothing; return success.
*/
- if (new_active && (new_active == old_active)) {
+ if (new_active && new_active == old_active) {
read_unlock(&bond->lock);
return 0;
}
- if ((new_active) &&
- (old_active) &&
- (new_active->link == BOND_LINK_UP) &&
+ if (new_active &&
+ old_active &&
+ new_active->link == BOND_LINK_UP &&
IS_UP(new_active->dev)) {
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2213,13 +1987,12 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *info)
{
struct bonding *bond = netdev_priv(bond_dev);
+ int i = 0, res = -ENODEV;
struct slave *slave;
- int i, res = -ENODEV;
read_lock(&bond->lock);
-
- bond_for_each_slave(bond, slave, i) {
- if (i == (int)info->slave_id) {
+ bond_for_each_slave(bond, slave) {
+ if (i++ == (int)info->slave_id) {
res = 0;
strcpy(info->slave_name, slave->dev->name);
info->link = slave->link;
@@ -2228,7 +2001,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
break;
}
}
-
read_unlock(&bond->lock);
return res;
@@ -2239,13 +2011,13 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
static int bond_miimon_inspect(struct bonding *bond)
{
+ int link_state, commit = 0;
struct slave *slave;
- int i, link_state, commit = 0;
bool ignore_updelay;
ignore_updelay = !bond->curr_active_slave ? true : false;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
slave->new_link = BOND_LINK_NOCHANGE;
link_state = bond_check_dev_link(bond, slave->dev, 0);
@@ -2340,9 +2112,8 @@ static int bond_miimon_inspect(struct bonding *bond)
static void bond_miimon_commit(struct bonding *bond)
{
struct slave *slave;
- int i;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
@@ -2447,7 +2218,7 @@ void bond_mii_monitor(struct work_struct *work)
delay = msecs_to_jiffies(bond->params.miimon);
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
@@ -2479,35 +2250,32 @@ re_arm:
read_unlock(&bond->lock);
if (should_notify_peers) {
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
- bond->send_peer_notif++;
- read_unlock(&bond->lock);
+ if (!rtnl_trylock())
return;
- }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
}
-static int bond_has_this_ip(struct bonding *bond, __be32 ip)
+static bool bond_has_this_ip(struct bonding *bond, __be32 ip)
{
- struct vlan_entry *vlan;
- struct net_device *vlan_dev;
+ struct net_device *upper;
+ struct list_head *iter;
+ bool ret = false;
if (ip == bond_confirm_addr(bond->dev, 0, ip))
- return 1;
+ return true;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev, htons(ETH_P_8021Q),
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev && ip == bond_confirm_addr(vlan_dev, 0, ip))
- return 1;
+ rcu_read_lock();
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (ip == bond_confirm_addr(upper, 0, ip)) {
+ ret = true;
+ break;
+ }
}
+ rcu_read_unlock();
- return 0;
+ return ret;
}
/*
@@ -2542,81 +2310,79 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
{
- int i, vlan_id;
- __be32 *targets = bond->params.arp_targets;
- struct vlan_entry *vlan;
- struct net_device *vlan_dev = NULL;
+ struct net_device *upper, *vlan_upper;
+ struct list_head *iter, *vlan_iter;
struct rtable *rt;
+ __be32 *targets = bond->params.arp_targets, addr;
+ int i, vlan_id;
- for (i = 0; (i < BOND_MAX_ARP_TARGETS); i++) {
- __be32 addr;
- if (!targets[i])
- break;
+ for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
pr_debug("basa: target %pI4\n", &targets[i]);
- if (!bond_vlan_used(bond)) {
- pr_debug("basa: empty vlan: arp_send\n");
- addr = bond_confirm_addr(bond->dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, 0);
- continue;
- }
- /*
- * If VLANs are configured, we do a route lookup to
- * determine which VLAN interface would be used, so we
- * can tag the ARP with the proper VLAN tag.
- */
+ /* Find out through which dev should the packet go */
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
- if (net_ratelimit()) {
- pr_warning("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &targets[i]);
- }
+ pr_debug("%s: no route to arp_ip_target %pI4\n",
+ bond->dev->name, &targets[i]);
continue;
}
- /*
- * This target is not on a VLAN
+ vlan_id = 0;
+
+ /* bond device itself */
+ if (rt->dst.dev == bond->dev)
+ goto found;
+
+ rcu_read_lock();
+ /* first we search only for vlan devices. for every vlan
+ * found we verify its upper dev list, searching for the
+ * rt->dst.dev. If found we save the tag of the vlan and
+ * proceed to send the packet.
+ *
+ * TODO: QinQ?
*/
- if (rt->dst.dev == bond->dev) {
- ip_rt_put(rt);
- pr_debug("basa: rtdev == bond->dev: arp_send\n");
- addr = bond_confirm_addr(bond->dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, 0);
- continue;
+ netdev_for_each_upper_dev_rcu(bond->dev, vlan_upper, vlan_iter) {
+ if (!is_vlan_dev(vlan_upper))
+ continue;
+ netdev_for_each_upper_dev_rcu(vlan_upper, upper, iter) {
+ if (upper == rt->dst.dev) {
+ vlan_id = vlan_dev_vlan_id(vlan_upper);
+ rcu_read_unlock();
+ goto found;
+ }
+ }
}
- vlan_id = 0;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- htons(ETH_P_8021Q),
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == rt->dst.dev) {
- vlan_id = vlan->vlan_id;
- pr_debug("basa: vlan match on %s %d\n",
- vlan_dev->name, vlan_id);
- break;
+ /* if the device we're looking for is not on top of any of
+ * our upper vlans, then just search for any dev that
+ * matches, and in case it's a vlan - save the id
+ */
+ netdev_for_each_upper_dev_rcu(bond->dev, upper, iter) {
+ if (upper == rt->dst.dev) {
+ /* if it's a vlan - get its VID */
+ if (is_vlan_dev(upper))
+ vlan_id = vlan_dev_vlan_id(upper);
+
+ rcu_read_unlock();
+ goto found;
}
}
+ rcu_read_unlock();
- if (vlan_id && vlan_dev) {
- ip_rt_put(rt);
- addr = bond_confirm_addr(vlan_dev, targets[i], 0);
- bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
- addr, vlan_id);
- continue;
- }
+ /* Not our device - skip */
+ pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
+ bond->dev->name, &targets[i],
+ rt->dst.dev ? rt->dst.dev->name : "NULL");
- if (net_ratelimit()) {
- pr_warning("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
- bond->dev->name, &targets[i],
- rt->dst.dev ? rt->dst.dev->name : "NULL");
- }
ip_rt_put(rt);
+ continue;
+
+found:
+ addr = bond_confirm_addr(rt->dst.dev, targets[i], 0);
+ ip_rt_put(rt);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
+ addr, vlan_id);
}
}
@@ -2713,6 +2479,20 @@ out_unlock:
return RX_HANDLER_ANOTHER;
}
+/* function to verify if we're in the arp_interval timeslice, returns true if
+ * (last_act - arp_interval) <= jiffies <= (last_act + mod * arp_interval +
+ * arp_interval/2) . the arp_interval/2 is needed for really fast networks.
+ */
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod)
+{
+ int delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
+
+ return time_in_range(jiffies,
+ last_act - delta_in_ticks,
+ last_act + mod * delta_in_ticks + delta_in_ticks/2);
+}
+
/*
* this function is called regularly to monitor each slave's link
* ensuring that traffic is being sent and received when arp monitoring
@@ -2726,21 +2506,13 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
arp_work.work);
struct slave *slave, *oldcurrent;
int do_failover = 0;
- int delta_in_ticks, extra_ticks;
- int i;
read_lock(&bond->lock);
- delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- extra_ticks = delta_in_ticks / 2;
-
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
- read_lock(&bond->curr_slave_lock);
oldcurrent = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
* the picture unless it is null. also, slave->jiffies is not needed
@@ -2749,16 +2521,12 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
unsigned long trans_start = dev_trans_start(slave->dev);
if (slave->link != BOND_LINK_UP) {
- if (time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + delta_in_ticks + extra_ticks) &&
- time_in_range(jiffies,
- slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + delta_in_ticks + extra_ticks)) {
+ if (bond_time_in_interval(bond, trans_start, 1) &&
+ bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
slave->link = BOND_LINK_UP;
bond_set_active_slave(slave);
@@ -2786,12 +2554,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
* when the source ip is 0, so don't take the link down
* if we don't know our ip yet
*/
- if (!time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks + extra_ticks) ||
- !time_in_range(jiffies,
- slave->dev->last_rx - delta_in_ticks,
- slave->dev->last_rx + 2 * delta_in_ticks + extra_ticks)) {
+ if (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
bond_set_backup_slave(slave);
@@ -2831,7 +2595,8 @@ void bond_loadbalance_arp_mon(struct work_struct *work)
re_arm:
if (bond->params.arp_interval)
- queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
+ queue_delayed_work(bond->wq, &bond->arp_work,
+ msecs_to_jiffies(bond->params.arp_interval));
read_unlock(&bond->lock);
}
@@ -2844,32 +2609,21 @@ re_arm:
*
* Called with bond->lock held for read.
*/
-static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
+static int bond_ab_arp_inspect(struct bonding *bond)
{
+ unsigned long trans_start, last_rx;
struct slave *slave;
- int i, commit = 0;
- unsigned long trans_start;
- int extra_ticks;
-
- /* All the time comparisons below need some extra time. Otherwise, on
- * fast networks the ARP probe/reply may arrive within the same jiffy
- * as it was sent. Then, the next time the ARP monitor is run, one
- * arp_interval will already have passed in the comparisons.
- */
- extra_ticks = delta_in_ticks / 2;
+ int commit = 0;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
slave->new_link = BOND_LINK_NOCHANGE;
+ last_rx = slave_last_rx(bond, slave);
if (slave->link != BOND_LINK_UP) {
- if (time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + delta_in_ticks + extra_ticks)) {
-
+ if (bond_time_in_interval(bond, last_rx, 1)) {
slave->new_link = BOND_LINK_UP;
commit++;
}
-
continue;
}
@@ -2878,9 +2632,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (time_in_range(jiffies,
- slave->jiffies - delta_in_ticks,
- slave->jiffies + 2 * delta_in_ticks + extra_ticks))
+ if (bond_time_in_interval(bond, slave->jiffies, 2))
continue;
/*
@@ -2898,10 +2650,7 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
if (!bond_is_active_slave(slave) &&
!bond->current_arp_slave &&
- !time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 3 * delta_in_ticks + extra_ticks)) {
-
+ !bond_time_in_interval(bond, last_rx, 3)) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2914,13 +2663,8 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*/
trans_start = dev_trans_start(slave->dev);
if (bond_is_active_slave(slave) &&
- (!time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + 2 * delta_in_ticks + extra_ticks) ||
- !time_in_range(jiffies,
- slave_last_rx(bond, slave) - delta_in_ticks,
- slave_last_rx(bond, slave) + 2 * delta_in_ticks + extra_ticks))) {
-
+ (!bond_time_in_interval(bond, trans_start, 2) ||
+ !bond_time_in_interval(bond, last_rx, 2))) {
slave->new_link = BOND_LINK_DOWN;
commit++;
}
@@ -2935,24 +2679,21 @@ static int bond_ab_arp_inspect(struct bonding *bond, int delta_in_ticks)
*
* Called with RTNL and bond->lock for read.
*/
-static void bond_ab_arp_commit(struct bonding *bond, int delta_in_ticks)
+static void bond_ab_arp_commit(struct bonding *bond)
{
- struct slave *slave;
- int i;
unsigned long trans_start;
+ struct slave *slave;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
continue;
case BOND_LINK_UP:
trans_start = dev_trans_start(slave->dev);
- if ((!bond->curr_active_slave &&
- time_in_range(jiffies,
- trans_start - delta_in_ticks,
- trans_start + delta_in_ticks + delta_in_ticks / 2)) ||
- bond->curr_active_slave != slave) {
+ if (bond->curr_active_slave != slave ||
+ (!bond->curr_active_slave &&
+ bond_time_in_interval(bond, trans_start, 1))) {
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
@@ -3014,7 +2755,7 @@ do_failover:
*/
static void bond_ab_arp_probe(struct bonding *bond)
{
- struct slave *slave;
+ struct slave *slave, *next_slave;
int i;
read_lock(&bond->curr_slave_lock);
@@ -3038,7 +2779,7 @@ static void bond_ab_arp_probe(struct bonding *bond)
*/
if (!bond->current_arp_slave) {
- bond->current_arp_slave = bond->first_slave;
+ bond->current_arp_slave = bond_first_slave(bond);
if (!bond->current_arp_slave)
return;
}
@@ -3046,7 +2787,8 @@ static void bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(bond->current_arp_slave);
/* search for next candidate */
- bond_for_each_slave_from(bond, slave, i, bond->current_arp_slave->next) {
+ next_slave = bond_next_slave(bond, bond->current_arp_slave);
+ bond_for_each_slave_from(bond, slave, i, next_slave) {
if (IS_UP(slave->dev)) {
slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(slave);
@@ -3087,12 +2829,12 @@ void bond_activebackup_arp_mon(struct work_struct *work)
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
- if (bond->slave_cnt == 0)
+ if (list_empty(&bond->slave_list))
goto re_arm;
should_notify_peers = bond_should_notify_peers(bond);
- if (bond_ab_arp_inspect(bond, delta_in_ticks)) {
+ if (bond_ab_arp_inspect(bond)) {
read_unlock(&bond->lock);
/* Race avoidance with bond_close flush of workqueue */
@@ -3105,7 +2847,7 @@ void bond_activebackup_arp_mon(struct work_struct *work)
read_lock(&bond->lock);
- bond_ab_arp_commit(bond, delta_in_ticks);
+ bond_ab_arp_commit(bond);
read_unlock(&bond->lock);
rtnl_unlock();
@@ -3121,12 +2863,8 @@ re_arm:
read_unlock(&bond->lock);
if (should_notify_peers) {
- if (!rtnl_trylock()) {
- read_lock(&bond->lock);
- bond->send_peer_notif++;
- read_unlock(&bond->lock);
+ if (!rtnl_trylock())
return;
- }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
rtnl_unlock();
}
@@ -3161,6 +2899,10 @@ static int bond_master_netdev_event(unsigned long event,
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
break;
+ case NETDEV_NOTIFY_PEERS:
+ if (event_bond->send_peer_notif)
+ event_bond->send_peer_notif--;
+ break;
default:
break;
}
@@ -3234,6 +2976,10 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
break;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, slave->bond->dev);
+ break;
default:
break;
}
@@ -3403,13 +3149,12 @@ static int bond_open(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
/* reset slave->backup and slave->inactive */
read_lock(&bond->lock);
- if (bond->slave_cnt > 0) {
+ if (!list_empty(&bond->slave_list)) {
read_lock(&bond->curr_slave_lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
bond_set_slave_inactive_flags(slave);
@@ -3455,17 +3200,10 @@ static int bond_close(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- write_lock_bh(&bond->lock);
- bond->send_peer_notif = 0;
- write_unlock_bh(&bond->lock);
-
bond_work_cancel_all(bond);
- if (bond_is_lb(bond)) {
- /* Must be called only after all
- * slaves have been released
- */
+ bond->send_peer_notif = 0;
+ if (bond_is_lb(bond))
bond_alb_deinitialize(bond);
- }
bond->recv_probe = NULL;
return 0;
@@ -3477,13 +3215,11 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct bonding *bond = netdev_priv(bond_dev);
struct rtnl_link_stats64 temp;
struct slave *slave;
- int i;
memset(stats, 0, sizeof(*stats));
read_lock_bh(&bond->lock);
-
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
const struct rtnl_link_stats64 *sstats =
dev_get_stats(slave->dev, &temp);
@@ -3513,7 +3249,6 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
stats->tx_heartbeat_errors += sstats->tx_heartbeat_errors;
stats->tx_window_errors += sstats->tx_window_errors;
}
-
read_unlock_bh(&bond->lock);
return stats;
@@ -3652,41 +3387,35 @@ static void bond_set_rx_mode(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
- int i;
- read_lock(&bond->lock);
+ ASSERT_RTNL();
if (USES_PRIMARY(bond->params.mode)) {
- read_lock(&bond->curr_slave_lock);
- slave = bond->curr_active_slave;
+ slave = rtnl_dereference(bond->curr_active_slave);
if (slave) {
dev_uc_sync(slave->dev, bond_dev);
dev_mc_sync(slave->dev, bond_dev);
}
- read_unlock(&bond->curr_slave_lock);
} else {
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
dev_uc_sync_multiple(slave->dev, bond_dev);
dev_mc_sync_multiple(slave->dev, bond_dev);
}
}
-
- read_unlock(&bond->lock);
}
static int bond_neigh_init(struct neighbour *n)
{
struct bonding *bond = netdev_priv(n->dev);
- struct slave *slave = bond->first_slave;
const struct net_device_ops *slave_ops;
struct neigh_parms parms;
+ struct slave *slave;
int ret;
+ slave = bond_first_slave(bond);
if (!slave)
return 0;
-
slave_ops = slave->dev->netdev_ops;
-
if (!slave_ops->ndo_neigh_setup)
return 0;
@@ -3714,11 +3443,17 @@ static int bond_neigh_init(struct neighbour *n)
* The bonding ndo_neigh_setup is called at init time beofre any
* slave exists. So we must declare proxy setup function which will
* be used at run time to resolve the actual slave neigh param setup.
+ *
+ * It's also called by master devices (such as vlans) to setup their
+ * underlying devices. In that case - do nothing, we're already set up from
+ * our init.
*/
static int bond_neigh_setup(struct net_device *dev,
struct neigh_parms *parms)
{
- parms->neigh_setup = bond_neigh_init;
+ /* modify only our neigh_parms */
+ if (parms->dev == dev)
+ parms->neigh_setup = bond_neigh_init;
return 0;
}
@@ -3729,9 +3464,8 @@ static int bond_neigh_setup(struct net_device *dev,
static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *stop_at;
+ struct slave *slave;
int res = 0;
- int i;
pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
(bond_dev ? bond_dev->name : "None"), new_mtu);
@@ -3751,10 +3485,10 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
pr_debug("s %p s->p %p c_m %p\n",
slave,
- slave->prev,
+ bond_prev_slave(bond, slave),
slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3779,8 +3513,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
unwind:
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
int tmp_res;
tmp_res = dev_set_mtu(slave->dev, bond_dev->mtu);
@@ -3804,9 +3537,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
{
struct bonding *bond = netdev_priv(bond_dev);
struct sockaddr *sa = addr, tmp_sa;
- struct slave *slave, *stop_at;
+ struct slave *slave;
int res = 0;
- int i;
if (bond->params.mode == BOND_MODE_ALB)
return bond_alb_set_mac_address(bond_dev, addr);
@@ -3839,7 +3571,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
* call to the base driver.
*/
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
@@ -3871,8 +3603,7 @@ unwind:
tmp_sa.sa_family = bond_dev->type;
/* unwind from head to the slave that failed */
- stop_at = slave;
- bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at) {
+ bond_for_each_slave_continue_reverse(bond, slave) {
int tmp_res;
tmp_res = dev_set_mac_address(slave->dev, &tmp_sa);
@@ -3885,12 +3616,50 @@ unwind:
return res;
}
+/**
+ * bond_xmit_slave_id - transmit skb through slave with slave_id
+ * @bond: bonding device that is transmitting
+ * @skb: buffer to transmit
+ * @slave_id: slave id up to slave_cnt-1 through which to transmit
+ *
+ * This function tries to transmit through slave with slave_id but in case
+ * it fails, it tries to find the first available slave for transmission.
+ * The skb is consumed in all cases, thus the function is void.
+ */
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id)
+{
+ struct slave *slave;
+ int i = slave_id;
+
+ /* Here we start from the slave with slave_id */
+ bond_for_each_slave_rcu(bond, slave) {
+ if (--i < 0) {
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return;
+ }
+ }
+ }
+
+ /* Here we start from the first slave up to slave_id */
+ i = slave_id;
+ bond_for_each_slave_rcu(bond, slave) {
+ if (--i < 0)
+ break;
+ if (slave_can_tx(slave)) {
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ return;
+ }
+ }
+ /* no slave that can tx has been found */
+ kfree_skb(skb);
+}
+
static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- int i, slave_no, res = 1;
struct iphdr *iph = ip_hdr(skb);
+ struct slave *slave;
/*
* Start with the curr_active_slave that joined the bond as the
@@ -3899,50 +3668,20 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
* send the join/membership reports. The curr_active_slave found
* will send all of this type of traffic.
*/
- if ((iph->protocol == IPPROTO_IGMP) &&
- (skb->protocol == htons(ETH_P_IP))) {
-
- read_lock(&bond->curr_slave_lock);
- slave = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- if (!slave)
- goto out;
+ if (iph->protocol == IPPROTO_IGMP && skb->protocol == htons(ETH_P_IP)) {
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave && slave_can_tx(slave))
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
+ bond_xmit_slave_id(bond, skb, 0);
} else {
- /*
- * Concurrent TX may collide on rr_tx_counter; we accept
- * that as being rare enough not to justify using an
- * atomic op here.
- */
- slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
-
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
- }
- }
-
- start_at = slave;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
-
-out:
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
+ bond_xmit_slave_id(bond, skb,
+ bond->rr_tx_counter++ % bond->slave_cnt);
}
return NETDEV_TX_OK;
}
-
/*
* in active-backup mode, we know that bond->curr_active_slave is always valid if
* the bond has a usable interface.
@@ -3950,18 +3689,12 @@ out:
static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- int res = 1;
-
- read_lock(&bond->curr_slave_lock);
-
- if (bond->curr_active_slave)
- res = bond_dev_queue_xmit(bond, skb,
- bond->curr_active_slave->dev);
-
- read_unlock(&bond->curr_slave_lock);
+ struct slave *slave;
- if (res)
- /* no suitable interface, frame not sent */
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (slave)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
kfree_skb(skb);
return NETDEV_TX_OK;
@@ -3975,87 +3708,39 @@ static int bond_xmit_activebackup(struct sk_buff *skb, struct net_device *bond_d
static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- int slave_no;
- int i;
- int res = 1;
- slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
-
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
- }
-
- start_at = slave;
-
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- res = bond_dev_queue_xmit(bond, skb, slave->dev);
- break;
- }
- }
-
- if (res) {
- /* no suitable interface, frame not sent */
- kfree_skb(skb);
- }
+ bond_xmit_slave_id(bond, skb,
+ bond->xmit_hash_policy(skb, bond->slave_cnt));
return NETDEV_TX_OK;
}
-/*
- * in broadcast mode, we send everything to all usable interfaces.
- */
+/* in broadcast mode, we send everything to all usable interfaces. */
static int bond_xmit_broadcast(struct sk_buff *skb, struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave, *start_at;
- struct net_device *tx_dev = NULL;
- int i;
- int res = 1;
-
- read_lock(&bond->curr_slave_lock);
- start_at = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- if (!start_at)
- goto out;
+ struct slave *slave = NULL;
- bond_for_each_slave_from(bond, slave, i, start_at) {
- if (IS_UP(slave->dev) &&
- (slave->link == BOND_LINK_UP) &&
- bond_is_active_slave(slave)) {
- if (tx_dev) {
- struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
- if (!skb2) {
- pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
- bond_dev->name);
- continue;
- }
+ bond_for_each_slave_rcu(bond, slave) {
+ if (bond_is_last_slave(bond, slave))
+ break;
+ if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
- res = bond_dev_queue_xmit(bond, skb2, tx_dev);
- if (res) {
- kfree_skb(skb2);
- continue;
- }
+ if (!skb2) {
+ pr_err("%s: Error: bond_xmit_broadcast(): skb_clone() failed\n",
+ bond_dev->name);
+ continue;
}
- tx_dev = slave->dev;
+ /* bond_dev_queue_xmit always returns 0 */
+ bond_dev_queue_xmit(bond, skb2, slave->dev);
}
}
-
- if (tx_dev)
- res = bond_dev_queue_xmit(bond, skb, tx_dev);
-
-out:
- if (res)
- /* no suitable interface, frame not sent */
+ if (slave && IS_UP(slave->dev) && slave->link == BOND_LINK_UP)
+ bond_dev_queue_xmit(bond, skb, slave->dev);
+ else
kfree_skb(skb);
- /* frame sent to all suitable interfaces */
return NETDEV_TX_OK;
}
@@ -4083,15 +3768,15 @@ static void bond_set_xmit_hash_policy(struct bonding *bond)
static inline int bond_slave_override(struct bonding *bond,
struct sk_buff *skb)
{
- int i, res = 1;
struct slave *slave = NULL;
struct slave *check_slave;
+ int res = 1;
if (!skb->queue_mapping)
return 1;
/* Find out if any slaves have the same mapping as this skb. */
- bond_for_each_slave(bond, check_slave, i) {
+ bond_for_each_slave_rcu(bond, check_slave) {
if (check_slave->queue_id == skb->queue_mapping) {
slave = check_slave;
break;
@@ -4176,14 +3861,12 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (is_netpoll_tx_blocked(dev))
return NETDEV_TX_BUSY;
- read_lock(&bond->lock);
-
- if (bond->slave_cnt)
+ rcu_read_lock();
+ if (!list_empty(&bond->slave_list))
ret = __bond_start_xmit(skb, dev);
else
kfree_skb(skb);
-
- read_unlock(&bond->lock);
+ rcu_read_unlock();
return ret;
}
@@ -4224,9 +3907,8 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
struct ethtool_cmd *ecmd)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
- int i;
unsigned long speed = 0;
+ struct slave *slave;
ecmd->duplex = DUPLEX_UNKNOWN;
ecmd->port = PORT_OTHER;
@@ -4237,7 +3919,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
* this is an accurate maximum.
*/
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (SLAVE_IS_OK(slave)) {
if (slave->speed != SPEED_UNKNOWN)
speed += slave->speed;
@@ -4248,6 +3930,7 @@ static int bond_ethtool_get_settings(struct net_device *bond_dev,
}
ethtool_cmd_speed_set(ecmd, speed ? : SPEED_UNKNOWN);
read_unlock(&bond->lock);
+
return 0;
}
@@ -4311,12 +3994,11 @@ static void bond_setup(struct net_device *bond_dev)
/* initialize rwlocks */
rwlock_init(&bond->lock);
rwlock_init(&bond->curr_slave_lock);
-
+ INIT_LIST_HEAD(&bond->slave_list);
bond->params = bonding_defaults;
/* Initialize pointers */
bond->dev = bond_dev;
- INIT_LIST_HEAD(&bond->vlan_list);
/* Initialize the device entry points */
ether_setup(bond_dev);
@@ -4368,23 +4050,18 @@ static void bond_setup(struct net_device *bond_dev)
static void bond_uninit(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct vlan_entry *vlan, *tmp;
+ struct slave *slave, *tmp_slave;
bond_netpoll_cleanup(bond_dev);
/* Release the bonded slaves */
- while (bond->first_slave != NULL)
- __bond_release_one(bond_dev, bond->first_slave->dev, true);
+ list_for_each_entry_safe(slave, tmp_slave, &bond->slave_list, list)
+ __bond_release_one(bond_dev, slave->dev, true);
pr_info("%s: released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
bond_debug_unregister(bond);
-
- list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
- list_del(&vlan->vlan_list);
- kfree(vlan);
- }
}
/*------------------------- Module initialization ---------------------------*/
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 4060d41f0ee..20a6ee25bb6 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -12,7 +12,6 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
struct bonding *bond = seq->private;
loff_t off = 0;
struct slave *slave;
- int i;
/* make sure the bond won't be taken away */
rcu_read_lock();
@@ -21,10 +20,9 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
if (*pos == 0)
return SEQ_START_TOKEN;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave)
if (++off == *pos)
return slave;
- }
return NULL;
}
@@ -36,11 +34,13 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
if (v == SEQ_START_TOKEN)
- return bond->first_slave;
+ return bond_first_slave(bond);
- slave = slave->next;
+ if (bond_is_last_slave(bond, slave))
+ return NULL;
+ slave = bond_next_slave(bond, slave);
- return (slave == bond->first_slave) ? NULL : slave;
+ return slave;
}
static void bond_info_seq_stop(struct seq_file *seq, void *v)
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index dc36a3d7d9e..ce4677668e2 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -209,12 +209,12 @@ void bond_destroy_slave_symlinks(struct net_device *master,
static ssize_t bonding_show_slaves(struct device *d,
struct device_attribute *attr, char *buf)
{
- struct slave *slave;
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ struct slave *slave;
+ int res = 0;
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ)) {
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
@@ -227,6 +227,7 @@ static ssize_t bonding_show_slaves(struct device *d,
read_unlock(&bond->lock);
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
+
return res;
}
@@ -325,7 +326,7 @@ static ssize_t bonding_store_mode(struct device *d,
goto out;
}
- if (bond->slave_cnt > 0) {
+ if (!list_empty(&bond->slave_list)) {
pr_err("unable to update mode of %s because it has slaves.\n",
bond->dev->name);
ret = -EPERM;
@@ -501,20 +502,25 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value;
+ int new_value, ret = count;
struct bonding *bond = to_bond(d);
- if (bond->slave_cnt != 0) {
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (!list_empty(&bond->slave_list)) {
pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n",
bond->dev->name);
- return -EPERM;
+ ret = -EPERM;
+ goto out;
}
new_value = bond_parse_parm(buf, fail_over_mac_tbl);
if (new_value < 0) {
pr_err("%s: Ignoring invalid fail_over_mac value %s.\n",
bond->dev->name, buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
bond->params.fail_over_mac = new_value;
@@ -522,7 +528,9 @@ static ssize_t bonding_store_fail_over_mac(struct device *d,
bond->dev->name, fail_over_mac_tbl[new_value].modename,
new_value);
- return count;
+out:
+ rtnl_unlock();
+ return ret;
}
static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR,
@@ -661,7 +669,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, i)
+ bond_for_each_slave(bond, slave)
slave->target_last_arp_rx[ind] = jiffies;
targets[ind] = newtarget;
write_unlock_bh(&bond->lock);
@@ -687,7 +695,7 @@ static ssize_t bonding_store_arp_targets(struct device *d,
&newtarget);
write_lock_bh(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
targets_rx = slave->target_last_arp_rx;
j = ind;
for (; (j < BOND_MAX_ARP_TARGETS-1) && targets[j+1]; j++)
@@ -844,8 +852,11 @@ static ssize_t bonding_store_lacp(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
if (bond->dev->flags & IFF_UP) {
pr_err("%s: Unable to update LACP rate because interface is up.\n",
@@ -875,6 +886,8 @@ static ssize_t bonding_store_lacp(struct device *d,
ret = -EINVAL;
}
out:
+ rtnl_unlock();
+
return ret;
}
static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR,
@@ -1078,10 +1091,9 @@ static ssize_t bonding_store_primary(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
- struct slave *slave;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
+ struct slave *slave;
if (!rtnl_trylock())
return restart_syscall();
@@ -1107,7 +1119,7 @@ static ssize_t bonding_store_primary(struct device *d,
goto out;
}
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
pr_info("%s: Setting %s as primary slave.\n",
bond->dev->name, slave->dev->name);
@@ -1236,16 +1248,16 @@ static ssize_t bonding_show_active_slave(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *curr;
struct bonding *bond = to_bond(d);
+ struct slave *curr;
int count = 0;
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
+ rcu_read_lock();
+ curr = rcu_dereference(bond->curr_active_slave);
if (USES_PRIMARY(bond->params.mode) && curr)
count = sprintf(buf, "%s\n", curr->dev->name);
+ rcu_read_unlock();
+
return count;
}
@@ -1253,16 +1265,14 @@ static ssize_t bonding_store_active_slave(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i;
- struct slave *slave;
- struct slave *old_active = NULL;
- struct slave *new_active = NULL;
+ struct slave *slave, *old_active, *new_active;
struct bonding *bond = to_bond(d);
char ifname[IFNAMSIZ];
if (!rtnl_trylock())
return restart_syscall();
+ old_active = new_active = NULL;
block_netpoll_tx();
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
@@ -1279,12 +1289,12 @@ static ssize_t bonding_store_active_slave(struct device *d,
if (!strlen(ifname) || buf[0] == '\n') {
pr_info("%s: Clearing current active slave.\n",
bond->dev->name);
- bond->curr_active_slave = NULL;
+ rcu_assign_pointer(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
goto out;
}
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) {
old_active = bond->curr_active_slave;
new_active = slave;
@@ -1295,8 +1305,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
bond->dev->name,
slave->dev->name);
goto out;
- }
- else {
+ } else {
if ((new_active) &&
(old_active) &&
(new_active->link == BOND_LINK_UP) &&
@@ -1307,8 +1316,7 @@ static ssize_t bonding_store_active_slave(struct device *d,
slave->dev->name);
bond_change_active_slave(bond,
new_active);
- }
- else {
+ } else {
pr_info("%s: Could not set %s as"
" active slave; either %s is"
" down or the link is down.\n",
@@ -1344,14 +1352,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *curr;
struct bonding *bond = to_bond(d);
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
-
- return sprintf(buf, "%s\n", curr ? "up" : "down");
+ return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
}
static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
@@ -1470,15 +1473,15 @@ static ssize_t bonding_show_queue_id(struct device *d,
struct device_attribute *attr,
char *buf)
{
- struct slave *slave;
- int i, res = 0;
struct bonding *bond = to_bond(d);
+ struct slave *slave;
+ int res = 0;
if (!rtnl_trylock())
return restart_syscall();
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (res > (PAGE_SIZE - IFNAMSIZ - 6)) {
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
@@ -1493,6 +1496,7 @@ static ssize_t bonding_show_queue_id(struct device *d,
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
rtnl_unlock();
+
return res;
}
@@ -1507,7 +1511,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
struct slave *slave, *update_slave;
struct bonding *bond = to_bond(d);
u16 qid;
- int i, ret = count;
+ int ret = count;
char *delim;
struct net_device *sdev = NULL;
@@ -1542,7 +1546,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Search for thes slave and check for duplicate qids */
update_slave = NULL;
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (sdev == slave->dev)
/*
* We don't need to check the matching
@@ -1594,8 +1598,8 @@ static ssize_t bonding_store_slaves_active(struct device *d,
struct device_attribute *attr,
const char *buf, size_t count)
{
- int i, new_value, ret = count;
struct bonding *bond = to_bond(d);
+ int new_value, ret = count;
struct slave *slave;
if (sscanf(buf, "%d", &new_value) != 1) {
@@ -1618,7 +1622,7 @@ static ssize_t bonding_store_slaves_active(struct device *d,
}
read_lock(&bond->lock);
- bond_for_each_slave(bond, slave, i) {
+ bond_for_each_slave(bond, slave) {
if (!bond_is_active_slave(slave)) {
if (new_value)
slave->inactive = 0;
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 42d1c6599cb..f7ab16185f6 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -71,6 +71,28 @@
set_fs(fs); \
res; })
+/* slave list primitives */
+#define bond_to_slave(ptr) list_entry(ptr, struct slave, list)
+
+/* IMPORTANT: bond_first/last_slave can return NULL in case of an empty list */
+#define bond_first_slave(bond) \
+ list_first_entry_or_null(&(bond)->slave_list, struct slave, list)
+#define bond_last_slave(bond) \
+ (list_empty(&(bond)->slave_list) ? NULL : \
+ bond_to_slave((bond)->slave_list.prev))
+
+#define bond_is_first_slave(bond, pos) ((pos)->list.prev == &(bond)->slave_list)
+#define bond_is_last_slave(bond, pos) ((pos)->list.next == &(bond)->slave_list)
+
+/* Since bond_first/last_slave can return NULL, these can return NULL too */
+#define bond_next_slave(bond, pos) \
+ (bond_is_last_slave(bond, pos) ? bond_first_slave(bond) : \
+ bond_to_slave((pos)->list.next))
+
+#define bond_prev_slave(bond, pos) \
+ (bond_is_first_slave(bond, pos) ? bond_last_slave(bond) : \
+ bond_to_slave((pos)->list.prev))
+
/**
* bond_for_each_slave_from - iterate the slaves list from a starting point
* @bond: the bond holding this list.
@@ -80,37 +102,33 @@
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave_from(bond, pos, cnt, start) \
- for (cnt = 0, pos = start; \
- cnt < (bond)->slave_cnt; \
- cnt++, pos = (pos)->next)
+#define bond_for_each_slave_from(bond, pos, cnt, start) \
+ for (cnt = 0, pos = start; pos && cnt < (bond)->slave_cnt; \
+ cnt++, pos = bond_next_slave(bond, pos))
/**
- * bond_for_each_slave_from_to - iterate the slaves list from start point to stop point
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for number max of moves
- * @start: start point.
- * @stop: stop point.
+ * bond_for_each_slave - iterate over all slaves
+ * @bond: the bond holding this list
+ * @pos: current slave
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave_from_to(bond, pos, cnt, start, stop) \
- for (cnt = 0, pos = start; \
- ((cnt < (bond)->slave_cnt) && (pos != (stop)->next)); \
- cnt++, pos = (pos)->next)
+#define bond_for_each_slave(bond, pos) \
+ list_for_each_entry(pos, &(bond)->slave_list, list)
+
+/* Caller must have rcu_read_lock */
+#define bond_for_each_slave_rcu(bond, pos) \
+ list_for_each_entry_rcu(pos, &(bond)->slave_list, list)
/**
- * bond_for_each_slave - iterate the slaves list from head
- * @bond: the bond holding this list.
- * @pos: current slave.
- * @cnt: counter for max number of moves
+ * bond_for_each_slave_reverse - iterate in reverse from a given position
+ * @bond: the bond holding this list
+ * @pos: slave to continue from
*
* Caller must hold bond->lock
*/
-#define bond_for_each_slave(bond, pos, cnt) \
- bond_for_each_slave_from(bond, pos, cnt, (bond)->first_slave)
-
+#define bond_for_each_slave_continue_reverse(bond, pos) \
+ list_for_each_entry_continue_reverse(pos, &(bond)->slave_list, list)
#ifdef CONFIG_NET_POLL_CONTROLLER
extern atomic_t netpoll_block_tx;
@@ -167,15 +185,9 @@ struct bond_parm_tbl {
#define BOND_MAX_MODENAME_LEN 20
-struct vlan_entry {
- struct list_head vlan_list;
- unsigned short vlan_id;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
- struct slave *next;
- struct slave *prev;
+ struct list_head list;
struct bonding *bond; /* our master */
int delay;
unsigned long jiffies;
@@ -215,7 +227,7 @@ struct slave {
*/
struct bonding {
struct net_device *dev; /* first - useful for panic debug */
- struct slave *first_slave;
+ struct list_head slave_list;
struct slave *curr_active_slave;
struct slave *current_arp_slave;
struct slave *primary_slave;
@@ -237,7 +249,6 @@ struct bonding {
struct ad_bond_info ad_info;
struct alb_bond_info alb_info;
struct bond_params params;
- struct list_head vlan_list;
struct workqueue_struct *wq;
struct delayed_work mii_work;
struct delayed_work arp_work;
@@ -250,11 +261,6 @@ struct bonding {
#endif /* CONFIG_DEBUG_FS */
};
-static inline bool bond_vlan_used(struct bonding *bond)
-{
- return !list_empty(&bond->vlan_list);
-}
-
#define bond_slave_get_rcu(dev) \
((struct slave *) rcu_dereference(dev->rx_handler_data))
@@ -270,13 +276,10 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond,
struct net_device *slave_dev)
{
struct slave *slave = NULL;
- int i;
- bond_for_each_slave(bond, slave, i) {
- if (slave->dev == slave_dev) {
+ bond_for_each_slave(bond, slave)
+ if (slave->dev == slave_dev)
return slave;
- }
- }
return NULL;
}
@@ -416,10 +419,20 @@ static inline __be32 bond_confirm_addr(struct net_device *dev, __be32 dst, __be3
return addr;
}
+static inline bool slave_can_tx(struct slave *slave)
+{
+ if (IS_UP(slave->dev) && slave->link == BOND_LINK_UP &&
+ bond_is_active_slave(slave))
+ return true;
+ else
+ return false;
+}
+
struct bond_net;
struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
+void bond_xmit_slave_id(struct bonding *bond, struct sk_buff *skb, int slave_id);
int bond_create(struct net *net, const char *name);
int bond_create_sysfs(struct bond_net *net);
void bond_destroy_sysfs(struct bond_net *net);
@@ -477,10 +490,9 @@ static inline void bond_destroy_proc_dir(struct bond_net *bn)
static inline struct slave *bond_slave_has_mac(struct bonding *bond,
const u8 *mac)
{
- int i = 0;
struct slave *tmp;
- bond_for_each_slave(bond, tmp, i)
+ bond_for_each_slave(bond, tmp)
if (ether_addr_equal_64bits(mac, tmp->dev->dev_addr))
return tmp;
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 34dea95d58d..88a6a5810ec 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -347,7 +347,9 @@ static int ldisc_open(struct tty_struct *tty)
/* release devices to avoid name collision */
ser_release(NULL);
- sprintf(name, "cf%s", tty->name);
+ result = snprintf(name, sizeof(name), "cf%s", tty->name);
+ if (result >= IFNAMSIZ)
+ return -EINVAL;
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
if (!dev)
return -ENOMEM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index dbbe97ae121..3b1ff614870 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1355,7 +1355,7 @@ static int at91_can_probe(struct platform_device *pdev)
if (at91_is_sam9263(priv))
dev->sysfs_groups[0] = &at91_sysfs_attr_group;
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_candev(dev);
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index c6f838d922a..294ced3cc22 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -195,7 +195,7 @@ static int c_can_plat_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
+ if (IS_ERR(priv->raminit_ctrlreg) || (int)priv->instance < 0)
dev_info(&pdev->dev, "control memory is not used for raminit\n");
else
priv->raminit = c_can_hw_raminit;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7b0be0910f4..71c677e651d 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -850,12 +850,17 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_prepare_enable(priv->clk_ipg);
- clk_prepare_enable(priv->clk_per);
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ goto out_disable_ipg;
err = open_candev(dev);
if (err)
- goto out;
+ goto out_disable_per;
err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev);
if (err)
@@ -875,8 +880,9 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
- out:
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
return err;
@@ -933,8 +939,13 @@ static int register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_prepare_enable(priv->clk_ipg);
- clk_prepare_enable(priv->clk_per);
+ err = clk_prepare_enable(priv->clk_ipg);
+ if (err)
+ return err;
+
+ err = clk_prepare_enable(priv->clk_per);
+ if (err)
+ goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -959,15 +970,16 @@ static int register_flexcandev(struct net_device *dev)
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out;
+ goto out_disable_per;
}
err = register_candev(dev);
- out:
+ out_disable_per:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
return err;
@@ -1001,7 +1013,6 @@ static int flexcan_probe(struct platform_device *pdev)
struct resource *mem;
struct clk *clk_ipg = NULL, *clk_per = NULL;
void __iomem *base;
- resource_size_t mem_size;
int err, irq;
u32 clock_freq = 0;
@@ -1013,43 +1024,25 @@ static int flexcan_probe(struct platform_device *pdev)
clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(clk_ipg)) {
dev_err(&pdev->dev, "no ipg clock defined\n");
- err = PTR_ERR(clk_ipg);
- goto failed_clock;
+ return PTR_ERR(clk_ipg);
}
clock_freq = clk_get_rate(clk_ipg);
clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(clk_per)) {
dev_err(&pdev->dev, "no per clock defined\n");
- err = PTR_ERR(clk_per);
- goto failed_clock;
+ return PTR_ERR(clk_per);
}
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!mem || irq <= 0) {
- err = -ENODEV;
- goto failed_get;
- }
+ if (irq <= 0)
+ return -ENODEV;
- mem_size = resource_size(mem);
- if (!request_mem_region(mem->start, mem_size, pdev->name)) {
- err = -EBUSY;
- goto failed_get;
- }
-
- base = ioremap(mem->start, mem_size);
- if (!base) {
- err = -ENOMEM;
- goto failed_map;
- }
-
- dev = alloc_candev(sizeof(struct flexcan_priv), 1);
- if (!dev) {
- err = -ENOMEM;
- goto failed_alloc;
- }
+ base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
of_id = of_match_device(flexcan_of_match, &pdev->dev);
if (of_id) {
@@ -1058,10 +1051,13 @@ static int flexcan_probe(struct platform_device *pdev)
devtype_data = (struct flexcan_devtype_data *)
pdev->id_entry->driver_data;
} else {
- err = -ENODEV;
- goto failed_devtype;
+ return -ENODEV;
}
+ dev = alloc_candev(sizeof(struct flexcan_priv), 1);
+ if (!dev)
+ return -ENOMEM;
+
dev->netdev_ops = &flexcan_netdev_ops;
dev->irq = irq;
dev->flags |= IFF_ECHO;
@@ -1087,7 +1083,7 @@ static int flexcan_probe(struct platform_device *pdev)
netif_napi_add(dev, &priv->napi, flexcan_poll, FLEXCAN_NAPI_WEIGHT);
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
err = register_flexcandev(dev);
@@ -1104,28 +1100,15 @@ static int flexcan_probe(struct platform_device *pdev)
return 0;
failed_register:
- failed_devtype:
free_candev(dev);
- failed_alloc:
- iounmap(base);
- failed_map:
- release_mem_region(mem->start, mem_size);
- failed_get:
- failed_clock:
return err;
}
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct flexcan_priv *priv = netdev_priv(dev);
- struct resource *mem;
unregister_flexcandev(dev);
- iounmap(priv->base);
-
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(mem->start, resource_size(mem));
free_candev(dev);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 8cda23bf061..fe7dd696957 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -37,9 +37,6 @@
*
* static struct mcp251x_platform_data mcp251x_info = {
* .oscillator_frequency = 8000000,
- * .board_specific_setup = &mcp251x_setup,
- * .power_enable = mcp251x_power_enable,
- * .transceiver_enable = NULL,
* };
*
* static struct spi_board_info spi_board_info[] = {
@@ -76,6 +73,7 @@
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/uaccess.h>
+#include <linux/regulator/consumer.h>
/* SPI interface instruction set */
#define INSTRUCTION_WRITE 0x02
@@ -264,6 +262,8 @@ struct mcp251x_priv {
#define AFTER_SUSPEND_POWER 4
#define AFTER_SUSPEND_RESTART 8
int restart_tx;
+ struct regulator *power;
+ struct regulator *transceiver;
};
#define MCP251X_IS(_model) \
@@ -667,16 +667,25 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
+static int mcp251x_power_enable(struct regulator *reg, int enable)
+{
+ if (IS_ERR(reg))
+ return 0;
+
+ if (enable)
+ return regulator_enable(reg);
+ else
+ return regulator_disable(reg);
+}
+
static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
free_irq(spi->irq, priv);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
}
@@ -684,7 +693,6 @@ static int mcp251x_stop(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
close_candev(net);
@@ -704,8 +712,7 @@ static int mcp251x_stop(struct net_device *net)
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->can.state = CAN_STATE_STOPPED;
@@ -928,8 +935,7 @@ static int mcp251x_open(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- unsigned long flags;
+ unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_FALLING;
int ret;
ret = open_candev(net);
@@ -939,25 +945,17 @@ static int mcp251x_open(struct net_device *net)
}
mutex_lock(&priv->mcp_lock);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
priv->force_quit = 0;
priv->tx_skb = NULL;
priv->tx_len = 0;
- flags = IRQF_ONESHOT;
- if (pdata->irq_flags)
- flags |= pdata->irq_flags;
- else
- flags |= IRQF_TRIGGER_FALLING;
-
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
flags, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
close_candev(net);
goto open_unlock;
}
@@ -1026,6 +1024,19 @@ static int mcp251x_can_probe(struct spi_device *spi)
CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->model = spi_get_device_id(spi)->driver_data;
priv->net = net;
+
+ priv->power = devm_regulator_get(&spi->dev, "vdd");
+ priv->transceiver = devm_regulator_get(&spi->dev, "xceiver");
+ if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
+ (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
+ ret = -EPROBE_DEFER;
+ goto error_power;
+ }
+
+ ret = mcp251x_power_enable(priv->power, 1);
+ if (ret)
+ goto error_power;
+
spi_set_drvdata(spi, priv);
priv->spi = spi;
@@ -1068,30 +1079,24 @@ static int mcp251x_can_probe(struct spi_device *spi)
}
}
- if (pdata->power_enable)
- pdata->power_enable(1);
-
- /* Call out to platform specific setup */
- if (pdata->board_specific_setup)
- pdata->board_specific_setup(spi);
-
SET_NETDEV_DEV(net, &spi->dev);
/* Configure the SPI bus */
- spi->mode = SPI_MODE_0;
+ spi->mode = spi->mode ? : SPI_MODE_0;
+ if (mcp251x_is_2510(spi))
+ spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000;
+ else
+ spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000;
spi->bits_per_word = 8;
spi_setup(spi);
/* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
- dev_info(&spi->dev, "Probe failed\n");
+ ret = -ENODEV;
goto error_probe;
}
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
-
ret = register_candev(net);
if (ret)
goto error_probe;
@@ -1109,13 +1114,13 @@ error_rx_buf:
if (!mcp251x_enable_dma)
kfree(priv->spi_tx_buf);
error_tx_buf:
- free_candev(net);
if (mcp251x_enable_dma)
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
+ mcp251x_power_enable(priv->power, 0);
+error_power:
+ free_candev(net);
error_alloc:
- if (pdata->power_enable)
- pdata->power_enable(0);
dev_err(&spi->dev, "probe failed\n");
error_out:
return ret;
@@ -1123,12 +1128,10 @@ error_out:
static int mcp251x_can_remove(struct spi_device *spi)
{
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
unregister_candev(net);
- free_candev(net);
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
@@ -1138,8 +1141,9 @@ static int mcp251x_can_remove(struct spi_device *spi)
kfree(priv->spi_rx_buf);
}
- if (pdata->power_enable)
- pdata->power_enable(0);
+ mcp251x_power_enable(priv->power, 0);
+
+ free_candev(net);
return 0;
}
@@ -1149,7 +1153,6 @@ static int mcp251x_can_remove(struct spi_device *spi)
static int mcp251x_can_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
struct net_device *net = priv->net;
@@ -1163,15 +1166,14 @@ static int mcp251x_can_suspend(struct device *dev)
netif_device_detach(net);
mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
+ mcp251x_power_enable(priv->transceiver, 0);
priv->after_suspend = AFTER_SUSPEND_UP;
} else {
priv->after_suspend = AFTER_SUSPEND_DOWN;
}
- if (pdata->power_enable) {
- pdata->power_enable(0);
+ if (!IS_ERR(priv->power)) {
+ regulator_disable(priv->power);
priv->after_suspend |= AFTER_SUSPEND_POWER;
}
@@ -1181,16 +1183,14 @@ static int mcp251x_can_suspend(struct device *dev)
static int mcp251x_can_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
- struct mcp251x_platform_data *pdata = spi->dev.platform_data;
struct mcp251x_priv *priv = spi_get_drvdata(spi);
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
- pdata->power_enable(1);
+ mcp251x_power_enable(priv->power, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
+ mcp251x_power_enable(priv->transceiver, 1);
queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5b0ee8ef588..e59b3a392af 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -40,6 +40,7 @@ struct mpc5xxx_can_data {
unsigned int type;
u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name,
int *mscan_clksrc);
+ void (*put_clock)(struct platform_device *ofdev);
};
#ifdef CONFIG_PPC_MPC52xx
@@ -148,7 +149,10 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
goto exit_put;
}
- /* Determine the MSCAN device index from the physical address */
+ /* Determine the MSCAN device index from the peripheral's
+ * physical address. Register address offsets against the
+ * IMMR base are: 0x1300, 0x1380, 0x2300, 0x2380
+ */
pval = of_get_property(ofdev->dev.of_node, "reg", &plen);
BUG_ON(!pval || plen < sizeof(*pval));
clockidx = (*pval & 0x80) ? 1 : 0;
@@ -177,7 +181,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
clockdiv = 1;
if (!clock_name || !strcmp(clock_name, "sys")) {
- sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ sys_clk = devm_clk_get(&ofdev->dev, "sys_clk");
if (IS_ERR(sys_clk)) {
dev_err(&ofdev->dev, "couldn't get sys_clk\n");
goto exit_unmap;
@@ -200,7 +204,7 @@ static u32 mpc512x_can_get_clock(struct platform_device *ofdev,
}
if (clocksrc < 0) {
- ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ ref_clk = devm_clk_get(&ofdev->dev, "ref_clk");
if (IS_ERR(ref_clk)) {
dev_err(&ofdev->dev, "couldn't get ref_clk\n");
goto exit_unmap;
@@ -277,6 +281,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
dev = alloc_mscandev();
if (!dev)
goto exit_dispose_irq;
+ platform_set_drvdata(ofdev, dev);
+ SET_NETDEV_DEV(dev, &ofdev->dev);
priv = netdev_priv(dev);
priv->reg_base = base;
@@ -293,8 +299,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan;
}
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
@@ -302,8 +306,6 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
goto exit_free_mscan;
}
- platform_set_drvdata(ofdev, dev);
-
dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
priv->reg_base, dev->irq, priv->can.clock.freq);
@@ -321,10 +323,17 @@ exit_unmap_mem:
static int mpc5xxx_can_remove(struct platform_device *ofdev)
{
+ const struct of_device_id *match;
+ const struct mpc5xxx_can_data *data;
struct net_device *dev = platform_get_drvdata(ofdev);
struct mscan_priv *priv = netdev_priv(dev);
+ match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
+ data = match ? match->data : NULL;
+
unregister_mscandev(dev);
+ if (data && data->put_clock)
+ data->put_clock(ofdev);
iounmap(priv->reg_base);
irq_dispose_mapping(dev->irq);
free_candev(dev);
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index e6b40954e20..a955ec8c4b9 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -573,10 +573,21 @@ static int mscan_open(struct net_device *dev)
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs __iomem *regs = priv->reg_base;
+ if (priv->clk_ipg) {
+ ret = clk_prepare_enable(priv->clk_ipg);
+ if (ret)
+ goto exit_retcode;
+ }
+ if (priv->clk_can) {
+ ret = clk_prepare_enable(priv->clk_can);
+ if (ret)
+ goto exit_dis_ipg_clock;
+ }
+
/* common open */
ret = open_candev(dev);
if (ret)
- return ret;
+ goto exit_dis_can_clock;
napi_enable(&priv->napi);
@@ -604,6 +615,13 @@ exit_free_irq:
exit_napi_disable:
napi_disable(&priv->napi);
close_candev(dev);
+exit_dis_can_clock:
+ if (priv->clk_can)
+ clk_disable_unprepare(priv->clk_can);
+exit_dis_ipg_clock:
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
+exit_retcode:
return ret;
}
@@ -621,6 +639,11 @@ static int mscan_close(struct net_device *dev)
close_candev(dev);
free_irq(dev->irq, dev);
+ if (priv->clk_can)
+ clk_disable_unprepare(priv->clk_can);
+ if (priv->clk_ipg)
+ clk_disable_unprepare(priv->clk_ipg);
+
return 0;
}
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index af2ed8baf0a..9c24d60a23b 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -21,6 +21,7 @@
#ifndef __MSCAN_H__
#define __MSCAN_H__
+#include <linux/clk.h>
#include <linux/types.h>
/* MSCAN control register 0 (CANCTL0) bits */
@@ -283,6 +284,8 @@ struct mscan_priv {
unsigned int type; /* MSCAN type variants */
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
+ struct clk *clk_ipg; /* clock for registers */
+ struct clk *clk_can; /* clock for bitrates */
u8 shadow_statflg;
u8 shadow_canrier;
u8 cur_pri;
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 6aa7b3266c8..ac6177d3bef 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -412,10 +412,20 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
switch (msg->msg.hdr.cmd) {
case CMD_CAN_RX:
+ if (msg->msg.rx.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
esd_usb2_rx_can_msg(dev->nets[msg->msg.rx.net], msg);
break;
case CMD_CAN_TX:
+ if (msg->msg.txdone.net >= dev->net_count) {
+ dev_err(dev->udev->dev.parent, "format error\n");
+ break;
+ }
+
esd_usb2_tx_done_msg(dev->nets[msg->msg.txdone.net],
msg);
break;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 25723d8ee20..925ab8ec932 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
if ((mc->ptr + rec_len) > mc->end)
goto decode_failed;
- memcpy(cf->data, mc->ptr, rec_len);
+ memcpy(cf->data, mc->ptr, cf->can_dlc);
mc->ptr += rec_len;
}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index cbd388eea68..8becd3d838b 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -779,6 +779,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
usb_unanchor_urb(urb);
usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ usb_free_urb(urb);
break;
}
diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig
index a5f91e1e8fe..becef25fa19 100644
--- a/drivers/net/ethernet/8390/Kconfig
+++ b/drivers/net/ethernet/8390/Kconfig
@@ -148,7 +148,7 @@ config PCMCIA_PCNET
config NE_H8300
tristate "NE2000 compatible support for H8/300"
- depends on H8300
+ depends on H8300H_AKI3068NET || H8300H_H8MAX
---help---
Say Y here if you want to use the NE2000 compatible
controller on the Renesas H8/300 processor.
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e1d26433d61..f92f001551d 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -707,7 +707,7 @@ static int ax_init_dev(struct net_device *dev)
#ifdef CONFIG_AX88796_93CX6
if (ax->plat->flags & AXFLG_HAS_93CX6) {
- unsigned char mac_addr[6];
+ unsigned char mac_addr[ETH_ALEN];
struct eeprom_93cx6 eeprom;
eeprom.data = ei_local;
@@ -719,7 +719,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, 6);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -840,7 +840,7 @@ static int ax_probe(struct platform_device *pdev)
ei_local = netdev_priv(dev);
ax = to_ax_dev(dev);
- ax->plat = pdev->dev.platform_data;
+ ax->plat = dev_get_platdata(&pdev->dev);
platform_set_drvdata(pdev, dev);
ei_local->rxcr_base = ax->plat->rcr_val;
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 2037080c504..506b0248c40 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -90,6 +90,7 @@ source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mellanox/Kconfig"
source "drivers/net/ethernet/micrel/Kconfig"
source "drivers/net/ethernet/microchip/Kconfig"
+source "drivers/net/ethernet/moxa/Kconfig"
source "drivers/net/ethernet/myricom/Kconfig"
config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 390bd0bfaa2..c0b8789952e 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -42,6 +42,7 @@ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/
obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/
obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
+obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
obj-$(CONFIG_FEALNX) += fealnx.o
obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index e904b3838dc..e66684a438f 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1647,12 +1647,12 @@ static int bfin_mac_probe(struct platform_device *pdev)
setup_mac_addr(ndev->dev_addr);
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
rc = -ENODEV;
goto out_err_probe_mac;
}
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
lp->mii_bus = platform_get_drvdata(pd);
if (!lp->mii_bus) {
dev_err(&pdev->dev, "Cannot get mii_bus!\n");
@@ -1660,7 +1660,7 @@ static int bfin_mac_probe(struct platform_device *pdev)
goto out_err_probe_mac;
}
lp->mii_bus->priv = ndev;
- mii_bus_data = pd->dev.platform_data;
+ mii_bus_data = dev_get_platdata(&pd->dev);
rc = mii_probe(ndev, mii_bus_data->phy_mode);
if (rc) {
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 7ff4b30d55e..e0669455514 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1464,18 +1464,18 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* Allocate TX descriptor ring in coherent memory */
- greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->tx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->tx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->tx_bd_base) {
err = -ENOMEM;
goto error3;
}
/* Allocate RX descriptor ring in coherent memory */
- greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
- &greth->rx_bd_base_phys,
- GFP_KERNEL | __GFP_ZERO);
+ greth->rx_bd_base = dma_zalloc_coherent(greth->dev, 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
if (!greth->rx_bd_base) {
err = -ENOMEM;
goto error4;
diff --git a/drivers/net/ethernet/allwinner/Kconfig b/drivers/net/ethernet/allwinner/Kconfig
index 53ad213e865..d8d95d4cd45 100644
--- a/drivers/net/ethernet/allwinner/Kconfig
+++ b/drivers/net/ethernet/allwinner/Kconfig
@@ -3,19 +3,20 @@
#
config NET_VENDOR_ALLWINNER
- bool "Allwinner devices"
- default y
- depends on ARCH_SUNXI
- ---help---
- If you have a network (Ethernet) card belonging to this
- class, say Y and read the Ethernet-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>.
+ bool "Allwinner devices"
+ default y
- Note that the answer to this question doesn't directly
- affect the kernel: saying N will just cause the configurator
- to skip all the questions about Allwinner cards. If you say Y,
- you will be asked for your specific card in the following
- questions.
+ depends on ARCH_SUNXI
+ ---help---
+ If you have a network (Ethernet) card belonging to this
+ class, say Y and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly
+ affect the kernel: saying N will just cause the configurator
+ to skip all the questions about Allwinner cards. If you say Y,
+ you will be asked for your specific card in the following
+ questions.
if NET_VENDOR_ALLWINNER
@@ -26,6 +27,7 @@ config SUN4I_EMAC
select CRC32
select MII
select PHYLIB
+ select MDIO_SUN4I
---help---
Support for Allwinner A10 EMAC ethernet driver.
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index e8d0ef508f4..10ceca523fc 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -1147,7 +1147,7 @@ static struct net_device *atarilance_dev;
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
- return PTR_RET(atarilance_dev);
+ return PTR_ERR_OR_ZERO(atarilance_dev);
}
static void __exit atarilance_module_exit(void)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index ceb45bc963a..91d52b49584 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1131,7 +1131,7 @@ static int au1000_probe(struct platform_device *pdev)
writel(0, aup->enable);
aup->mac_enabled = 0;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (!pd) {
dev_info(&pdev->dev, "no platform_data passed,"
" PHY search on MAC0\n");
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index a51497c9d2a..e108e911da0 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -188,7 +188,7 @@ static struct net_device *dev_mvme147_lance;
int __init init_module(void)
{
dev_mvme147_lance = mvme147lance_probe(-1);
- return PTR_RET(dev_mvme147_lance);
+ return PTR_ERR_OR_ZERO(dev_mvme147_lance);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 26fc0ce0faa..1cf33addd15 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -1238,7 +1238,7 @@ MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)");
int __init init_module(void)
{
dev_ni65 = ni65_probe(-1);
- return PTR_RET(dev_ni65);
+ return PTR_ERR_OR_ZERO(dev_ni65);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index ed213072764..2d8e2881977 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1521,7 +1521,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
char *chipname;
struct net_device *dev;
const struct pcnet32_access *a = NULL;
- u8 promaddr[6];
+ u8 promaddr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1665,10 +1665,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
/* read PROM address and compare with CSR address */
- for (i = 0; i < 6; i++)
+ for (i = 0; i < ETH_ALEN; i++)
promaddr[i] = inb(ioaddr + i);
- if (memcmp(promaddr, dev->dev_addr, 6) ||
+ if (memcmp(promaddr, dev->dev_addr, ETH_ALEN) ||
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4375abe61da..d6b20296b8e 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -940,7 +940,7 @@ static struct net_device *sun3lance_dev;
int __init init_module(void)
{
sun3lance_dev = sun3lance_probe(-1);
- return PTR_RET(sun3lance_dev);
+ return PTR_ERR_OR_ZERO(sun3lance_dev);
}
void __exit cleanup_module(void)
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index f1b121ee552..9e160148726 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -149,8 +149,6 @@ static void arc_emac_tx_clean(struct net_device *ndev)
struct sk_buff *skb = tx_buff->skb;
unsigned int info = le32_to_cpu(txbd->info);
- *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
-
if ((info & FOR_EMAC) || !txbd->data)
break;
@@ -180,6 +178,8 @@ static void arc_emac_tx_clean(struct net_device *ndev)
txbd->data = 0;
txbd->info = 0;
+ *txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
+
if (netif_queue_stopped(ndev))
netif_wake_queue(ndev);
}
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
struct arc_emac_priv *priv = netdev_priv(ndev);
unsigned int work_done;
- for (work_done = 0; work_done <= budget; work_done++) {
+ for (work_done = 0; work_done < budget; work_done++) {
unsigned int *last_rx_bd = &priv->last_rx_bd;
struct net_device_stats *stats = &priv->stats;
struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b2bf324631d..0f0556526ba 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -520,6 +520,9 @@ struct atl1c_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct napi_struct napi;
+ struct page *rx_page;
+ unsigned int rx_page_offset;
+ unsigned int rx_frag_size;
struct atl1c_hw hw;
struct atl1c_hw_stats hw_stats;
struct mii_if_info mii; /* MII interface info */
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 786a8748329..a36a760ada2 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -481,10 +481,15 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
struct net_device *dev)
{
+ unsigned int head_size;
int mtu = dev->mtu;
adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ?
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
+
+ head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ adapter->rx_frag_size = roundup_pow_of_two(head_size);
}
static netdev_features_t atl1c_fix_features(struct net_device *netdev,
@@ -952,6 +957,10 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter)
kfree(adapter->tpd_ring[0].buffer_info);
adapter->tpd_ring[0].buffer_info = NULL;
}
+ if (adapter->rx_page) {
+ put_page(adapter->rx_page);
+ adapter->rx_page = NULL;
+ }
}
/**
@@ -1639,6 +1648,35 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter,
skb_checksum_none_assert(skb);
}
+static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
+{
+ struct sk_buff *skb;
+ struct page *page;
+
+ if (adapter->rx_frag_size > PAGE_SIZE)
+ return netdev_alloc_skb(adapter->netdev,
+ adapter->rx_buffer_len);
+
+ page = adapter->rx_page;
+ if (!page) {
+ adapter->rx_page = page = alloc_page(GFP_ATOMIC);
+ if (unlikely(!page))
+ return NULL;
+ adapter->rx_page_offset = 0;
+ }
+
+ skb = build_skb(page_address(page) + adapter->rx_page_offset,
+ adapter->rx_frag_size);
+ if (likely(skb)) {
+ adapter->rx_page_offset += adapter->rx_frag_size;
+ if (adapter->rx_page_offset >= PAGE_SIZE)
+ adapter->rx_page = NULL;
+ else
+ get_page(page);
+ }
+ return skb;
+}
+
static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
{
struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring;
@@ -1660,7 +1698,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter)
while (next_info->flags & ATL1C_BUFFER_FREE) {
rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
- skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len);
+ skb = atl1c_alloc_skb(adapter);
if (unlikely(!skb)) {
if (netif_msg_rx_err(adapter))
dev_warn(&pdev->dev, "alloc rx buffer failed\n");
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 52c96036dcc..2fa5b86f139 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -130,7 +130,7 @@ config BNX2X_SRIOV
config BGMAC
tristate "BCMA bus GBit core support"
- depends on BCMA_HOST_SOC && HAS_DMA
+ depends on BCMA_HOST_SOC && HAS_DMA && BCM47XX
select PHYLIB
---help---
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index b1bcd4ba474..8ac48fbf8a6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -948,8 +948,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate rx dma ring */
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_freeirq_tx;
@@ -960,8 +959,7 @@ static int bcm_enet_open(struct net_device *dev)
/* allocate tx dma ring */
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
- p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ p = dma_zalloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) {
ret = -ENOMEM;
goto out_free_rx_ring;
@@ -1747,11 +1745,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (!bcm_enet_shared_base[0])
return -ENODEV;
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
- if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+ if (!res_irq || !res_irq_rx || !res_irq_tx)
return -ENODEV;
ret = 0;
@@ -1767,9 +1764,10 @@ static int bcm_enet_probe(struct platform_device *pdev)
if (ret)
goto out;
- priv->base = devm_request_and_ioremap(&pdev->dev, res_mem);
- if (priv->base == NULL) {
- ret = -ENOMEM;
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, res_mem);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
goto out;
}
@@ -1800,7 +1798,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
priv->rx_ring_size = BCMENET_DEF_RX_DESC;
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
priv->has_phy = pd->has_phy;
@@ -1964,7 +1962,7 @@ static int bcm_enet_remove(struct platform_device *pdev)
} else {
struct bcm63xx_enet_platform_data *pd;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd && pd->mii_config)
pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii);
@@ -2742,7 +2740,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
priv->tx_ring_size = BCMENET_DEF_TX_DESC;
priv->dma_maxburst = BCMENETSW_DMA_MAXBURST;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd) {
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
memcpy(priv->used_ports, pd->used_ports,
@@ -2836,7 +2834,6 @@ static int bcm_enetsw_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- platform_set_drvdata(pdev, NULL);
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 6a2de1d79ff..e838a3f74b6 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,8 +58,8 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.2.3"
-#define DRV_MODULE_RELDATE "June 27, 2012"
+#define DRV_MODULE_VERSION "2.2.4"
+#define DRV_MODULE_RELDATE "Aug 05, 2013"
#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
@@ -853,9 +853,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
bp->status_stats_size = status_blk_size +
sizeof(struct statistics_block);
- status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
- &bp->status_blk_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ status_blk = dma_zalloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+ &bp->status_blk_mapping, GFP_KERNEL);
if (status_blk == NULL)
goto alloc_mem_err;
@@ -3908,136 +3907,121 @@ init_cpu_err:
return rc;
}
-static int
-bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+static void
+bnx2_setup_wol(struct bnx2 *bp)
{
- u16 pmcsr;
+ int i;
+ u32 val, wol_msg;
- pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+ if (bp->wol) {
+ u32 advertising;
+ u8 autoneg;
- switch (state) {
- case PCI_D0: {
- u32 val;
+ autoneg = bp->autoneg;
+ advertising = bp->advertising;
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
- PCI_PM_CTRL_PME_STATUS);
+ if (bp->phy_port == PORT_TP) {
+ bp->autoneg = AUTONEG_SPEED;
+ bp->advertising = ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_Autoneg;
+ }
- if (pmcsr & PCI_PM_CTRL_STATE_MASK)
- /* delay required during transition out of D3hot */
- msleep(20);
+ spin_lock_bh(&bp->phy_lock);
+ bnx2_setup_phy(bp, bp->phy_port);
+ spin_unlock_bh(&bp->phy_lock);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
- val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
- val &= ~BNX2_EMAC_MODE_MPKT;
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ bp->autoneg = autoneg;
+ bp->advertising = advertising;
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- break;
- }
- case PCI_D3hot: {
- int i;
- u32 val, wol_msg;
-
- if (bp->wol) {
- u32 advertising;
- u8 autoneg;
-
- autoneg = bp->autoneg;
- advertising = bp->advertising;
-
- if (bp->phy_port == PORT_TP) {
- bp->autoneg = AUTONEG_SPEED;
- bp->advertising = ADVERTISED_10baseT_Half |
- ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half |
- ADVERTISED_100baseT_Full |
- ADVERTISED_Autoneg;
- }
+ bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
- spin_lock_bh(&bp->phy_lock);
- bnx2_setup_phy(bp, bp->phy_port);
- spin_unlock_bh(&bp->phy_lock);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
- bp->autoneg = autoneg;
- bp->advertising = advertising;
+ /* Enable port mode. */
+ val &= ~BNX2_EMAC_MODE_PORT;
+ val |= BNX2_EMAC_MODE_MPKT_RCVD |
+ BNX2_EMAC_MODE_ACPI_RCVD |
+ BNX2_EMAC_MODE_MPKT;
+ if (bp->phy_port == PORT_TP) {
+ val |= BNX2_EMAC_MODE_PORT_MII;
+ } else {
+ val |= BNX2_EMAC_MODE_PORT_GMII;
+ if (bp->line_speed == SPEED_2500)
+ val |= BNX2_EMAC_MODE_25G_MODE;
+ }
- bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ /* receive all multicast */
+ for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+ BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+ 0xffffffff);
+ }
+ BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
- /* Enable port mode. */
- val &= ~BNX2_EMAC_MODE_PORT;
- val |= BNX2_EMAC_MODE_MPKT_RCVD |
- BNX2_EMAC_MODE_ACPI_RCVD |
- BNX2_EMAC_MODE_MPKT;
- if (bp->phy_port == PORT_TP)
- val |= BNX2_EMAC_MODE_PORT_MII;
- else {
- val |= BNX2_EMAC_MODE_PORT_GMII;
- if (bp->line_speed == SPEED_2500)
- val |= BNX2_EMAC_MODE_25G_MODE;
- }
+ val = 1 | BNX2_RPM_SORT_USER0_BC_EN | BNX2_RPM_SORT_USER0_MC_EN;
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
+ BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
- BNX2_WR(bp, BNX2_EMAC_MODE, val);
+ /* Need to enable EMAC and RPM for WOL. */
+ BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+ BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+ BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
- /* receive all multicast */
- for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
- BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
- 0xffffffff);
- }
- BNX2_WR(bp, BNX2_EMAC_RX_MODE,
- BNX2_EMAC_RX_MODE_SORT_MODE);
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
- val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
- BNX2_RPM_SORT_USER0_MC_EN;
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
- BNX2_WR(bp, BNX2_RPM_SORT_USER0, val |
- BNX2_RPM_SORT_USER0_ENA);
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+ } else {
+ wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+ }
- /* Need to enable EMAC and RPM for WOL. */
- BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
- BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
- BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL))
+ bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
- val = BNX2_RD(bp, BNX2_RPM_CONFIG);
- val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
- BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+}
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
- }
- else {
- wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
- }
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+ switch (state) {
+ case PCI_D0: {
+ u32 val;
+
+ pci_enable_wake(bp->pdev, PCI_D0, false);
+ pci_set_power_state(bp->pdev, PCI_D0);
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
- 1, 0);
+ val = BNX2_RD(bp, BNX2_EMAC_MODE);
+ val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+ val &= ~BNX2_EMAC_MODE_MPKT;
+ BNX2_WR(bp, BNX2_EMAC_MODE, val);
- pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ val = BNX2_RD(bp, BNX2_RPM_CONFIG);
+ val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+ BNX2_WR(bp, BNX2_RPM_CONFIG, val);
+ break;
+ }
+ case PCI_D3hot: {
+ bnx2_setup_wol(bp);
+ pci_wake_from_d3(bp->pdev, bp->wol);
if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
(BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
if (bp->wol)
- pmcsr |= 3;
- }
- else {
- pmcsr |= 3;
- }
- if (bp->wol) {
- pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+ pci_set_power_state(bp->pdev, PCI_D3hot);
+ } else {
+ pci_set_power_state(bp->pdev, PCI_D3hot);
}
- pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
- pmcsr);
/* No more memory access after this point until
* device is brought back to D0.
*/
- udelay(50);
break;
}
default:
@@ -6317,7 +6301,6 @@ bnx2_open(struct net_device *dev)
netif_carrier_off(dev);
- bnx2_set_power_state(bp, PCI_D0);
bnx2_disable_int(bp);
rc = bnx2_setup_int_mode(bp, disable_msi);
@@ -6724,7 +6707,6 @@ bnx2_close(struct net_device *dev)
bnx2_del_napi(bp);
bp->link_up = 0;
netif_carrier_off(bp->dev);
- bnx2_set_power_state(bp, PCI_D3hot);
return 0;
}
@@ -7081,6 +7063,9 @@ bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
else {
bp->wol = 0;
}
+
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
return 0;
}
@@ -7156,9 +7141,6 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_get_eeprom */
rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7173,9 +7155,6 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
- return -EAGAIN;
-
/* parameters already validated in ethtool_set_eeprom */
rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
@@ -7535,8 +7514,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
{
struct bnx2 *bp = netdev_priv(dev);
- bnx2_set_power_state(bp, PCI_D0);
-
memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
if (etest->flags & ETH_TEST_FL_OFFLINE) {
int i;
@@ -7585,8 +7562,6 @@ bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
etest->flags |= ETH_TEST_FL_FAILED;
}
- if (!netif_running(bp->dev))
- bnx2_set_power_state(bp, PCI_D3hot);
}
static void
@@ -7658,8 +7633,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
switch (state) {
case ETHTOOL_ID_ACTIVE:
- bnx2_set_power_state(bp, PCI_D0);
-
bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
return 1; /* cycle on/off once per second */
@@ -7680,9 +7653,6 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
case ETHTOOL_ID_INACTIVE:
BNX2_WR(bp, BNX2_EMAC_LED, 0);
BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
-
- if (!netif_running(dev))
- bnx2_set_power_state(bp, PCI_D3hot);
break;
}
@@ -8130,8 +8100,6 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
goto err_out_release;
}
- bnx2_set_power_state(bp, PCI_D0);
-
/* Configure byte swap and enable write to the reg_window registers.
* Rely on CPU to do target byte swapping on big endian systems
* The chip's target access swapping will not swap all accesses
@@ -8170,13 +8138,13 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ if (pdev->msix_cap)
bp->flags |= BNX2_FLAG_MSIX_CAP;
}
if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
- if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+ if (pdev->msi_cap)
bp->flags |= BNX2_FLAG_MSI_CAP;
}
@@ -8369,6 +8337,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->wol = 0;
}
+ if (bp->flags & BNX2_FLAG_NO_WOL)
+ device_set_wakeup_capable(&bp->pdev->dev, false);
+ else
+ device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
+
if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
bp->tx_quick_cons_trip_int =
bp->tx_quick_cons_trip;
@@ -8609,46 +8582,52 @@ bnx2_remove_one(struct pci_dev *pdev)
}
static int
-bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+bnx2_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- /* PCI register 4 needs to be saved whether netif_running() or not.
- * MSI address and data need to be saved if using MSI and
- * netif_running().
- */
- pci_save_state(pdev);
- if (!netif_running(dev))
- return 0;
-
- cancel_work_sync(&bp->reset_task);
- bnx2_netif_stop(bp, true);
- netif_device_detach(dev);
- del_timer_sync(&bp->timer);
- bnx2_shutdown_chip(bp);
- bnx2_free_skbs(bp);
- bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+ if (netif_running(dev)) {
+ cancel_work_sync(&bp->reset_task);
+ bnx2_netif_stop(bp, true);
+ netif_device_detach(dev);
+ del_timer_sync(&bp->timer);
+ bnx2_shutdown_chip(bp);
+ __bnx2_free_irq(bp);
+ bnx2_free_skbs(bp);
+ }
+ bnx2_setup_wol(bp);
return 0;
}
static int
-bnx2_resume(struct pci_dev *pdev)
+bnx2_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_restore_state(pdev);
if (!netif_running(dev))
return 0;
bnx2_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
+ bnx2_request_irq(bp);
bnx2_init_nic(bp, 1);
bnx2_netif_start(bp, true);
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(bnx2_pm_ops, bnx2_suspend, bnx2_resume);
+#define BNX2_PM_OPS (&bnx2_pm_ops)
+
+#else
+
+#define BNX2_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
/**
* bnx2_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -8694,24 +8673,28 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct bnx2 *bp = netdev_priv(dev);
- pci_ers_result_t result;
- int err;
+ pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
+ int err = 0;
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
"Cannot re-enable PCI device after reset\n");
- result = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
- if (netif_running(dev)) {
- bnx2_set_power_state(bp, PCI_D0);
- bnx2_init_nic(bp, 1);
- }
- result = PCI_ERS_RESULT_RECOVERED;
+ if (netif_running(dev))
+ err = bnx2_init_nic(bp, 1);
+
+ if (!err)
+ result = PCI_ERS_RESULT_RECOVERED;
+ }
+
+ if (result != PCI_ERS_RESULT_RECOVERED && netif_running(dev)) {
+ bnx2_napi_enable(bp);
+ dev_close(dev);
}
rtnl_unlock();
@@ -8748,6 +8731,28 @@ static void bnx2_io_resume(struct pci_dev *pdev)
rtnl_unlock();
}
+static void bnx2_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct bnx2 *bp;
+
+ if (!dev)
+ return;
+
+ bp = netdev_priv(dev);
+ if (!bp)
+ return;
+
+ rtnl_lock();
+ if (netif_running(dev))
+ dev_close(bp->dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ bnx2_set_power_state(bp, PCI_D3hot);
+
+ rtnl_unlock();
+}
+
static const struct pci_error_handlers bnx2_err_handler = {
.error_detected = bnx2_io_error_detected,
.slot_reset = bnx2_io_slot_reset,
@@ -8759,9 +8764,9 @@ static struct pci_driver bnx2_pci_driver = {
.id_table = bnx2_pci_tbl,
.probe = bnx2_init_one,
.remove = bnx2_remove_one,
- .suspend = bnx2_suspend,
- .resume = bnx2_resume,
+ .driver.pm = BNX2_PM_OPS,
.err_handler = &bnx2_err_handler,
+ .shutdown = bnx2_shutdown,
};
module_pci_driver(bnx2_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 172efbecfea..18cb2d23e56 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2011 Broadcom Corporation
+ * Copyright (c) 2004-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index dedbd76c033..0c338026ce0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -486,7 +486,7 @@ struct bnx2x_fastpath {
struct napi_struct napi;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
@@ -498,7 +498,7 @@ struct bnx2x_fastpath {
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */
@@ -572,7 +572,7 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{
spin_lock_init(&fp->lock);
@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500
@@ -825,15 +825,13 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
#define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
#define BNX2X_DB_MIN_SHIFT 3 /* 8 bytes */
-#define BNX2X_DB_SHIFT 7 /* 128 bytes*/
+#define BNX2X_DB_SHIFT 3 /* 8 bytes*/
#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
#error "Min DB doorbell stride is 8"
#endif
-#define DPM_TRIGER_TYPE 0x40
#define DOORBELL(bp, cid, val) \
do { \
- writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
- DPM_TRIGER_TYPE); \
+ writel((u32)(val), bp->doorbells + (bp->db_size * (cid))); \
} while (0)
/* TX CSUM helpers */
@@ -1100,13 +1098,27 @@ struct bnx2x_port {
extern struct workqueue_struct *bnx2x_wq;
#define BNX2X_MAX_NUM_OF_VFS 64
-#define BNX2X_VF_CID_WND 0
+#define BNX2X_VF_CID_WND 4 /* log num of queues per VF. HW config. */
#define BNX2X_CIDS_PER_VF (1 << BNX2X_VF_CID_WND)
-#define BNX2X_CLIENTS_PER_VF 1
-#define BNX2X_FIRST_VF_CID 256
+
+/* We need to reserve doorbell addresses for all VF and queue combinations */
#define BNX2X_VF_CIDS (BNX2X_MAX_NUM_OF_VFS * BNX2X_CIDS_PER_VF)
+
+/* The doorbell is configured to have the same number of CIDs for PFs and for
+ * VFs. For this reason the PF CID zone is as large as the VF zone.
+ */
+#define BNX2X_FIRST_VF_CID BNX2X_VF_CIDS
+#define BNX2X_MAX_NUM_VF_QUEUES 64
#define BNX2X_VF_ID_INVALID 0xFF
+/* the number of VF CIDS multiplied by the amount of bytes reserved for each
+ * cid must not exceed the size of the VF doorbell
+ */
+#define BNX2X_VF_BAR_SIZE 512
+#if (BNX2X_VF_BAR_SIZE < BNX2X_CIDS_PER_VF * (1 << BNX2X_DB_SHIFT))
+#error "VF doorbell bar size is 512"
+#endif
+
/*
* The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
* control by the number of fast-path status blocks supported by the
@@ -1331,8 +1343,10 @@ enum {
BNX2X_SP_RTNL_ENABLE_SRIOV,
BNX2X_SP_RTNL_VFPF_MCAST,
BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
+ BNX2X_SP_RTNL_RX_MODE,
BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_SP_RTNL_TX_STOP,
+ BNX2X_SP_RTNL_TX_RESUME,
};
struct bnx2x_prev_path_list {
@@ -1502,6 +1516,7 @@ struct bnx2x {
#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21)
#define IS_VF_FLAG (1 << 22)
#define INTERRUPTS_ENABLED_FLAG (1 << 23)
+#define BC_SUPPORTS_RMMOD_CMD (1 << 24)
#define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG)
@@ -1647,10 +1662,10 @@ struct bnx2x {
dma_addr_t fw_stats_data_mapping;
int fw_stats_data_sz;
- /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB
+ /* For max 1024 cids (VF RSS), 32KB ILT page size and 1KB
* context size we need 8 ILT entries.
*/
-#define ILT_MAX_L2_LINES 8
+#define ILT_MAX_L2_LINES 32
struct hw_context context[ILT_MAX_L2_LINES];
struct bnx2x_ilt *ilt;
@@ -1830,6 +1845,8 @@ struct bnx2x {
int fp_array_size;
u32 dump_preset_idx;
+ bool stats_started;
+ struct semaphore stats_sema;
};
/* Tx queues may be less or equal to Rx queues */
@@ -1864,7 +1881,7 @@ extern int num_queues;
#define FUNC_FLG_TPA 0x0008
#define FUNC_FLG_SPQ 0x0010
#define FUNC_FLG_LEADING 0x0020 /* PF only */
-
+#define FUNC_FLG_LEADING_STATS 0x0040
struct bnx2x_func_init_params {
/* dma */
dma_addr_t fw_stat_map; /* valid iff FUNC_FLG_STATS */
@@ -2064,9 +2081,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id,
bool is_pf);
-#define BNX2X_ILT_ZALLOC(x, y, size) \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO)
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL)
#define BNX2X_ILT_FREE(x, y, size) \
do { \
@@ -2451,4 +2467,6 @@ enum bnx2x_pci_bus_speed {
BNX2X_PCI_LINK_SPEED_5000 = 5000,
BNX2X_PCI_LINK_SPEED_8000 = 8000
};
+
+void bnx2x_set_local_cmng(struct bnx2x *bp);
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ee350bde181..2361bf236ce 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -53,6 +53,7 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
int old_max_eth_txqs, new_max_eth_txqs;
int old_txdata_index = 0, new_txdata_index = 0;
+ struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
/* Copy the NAPI object as it has been already initialized */
from_fp->napi = to_fp->napi;
@@ -61,6 +62,11 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
+ /* Retain the tpa_info of the original `to' version as we don't want
+ * 2 FPs to contain the same tpa_info pointer.
+ */
+ to_fp->tpa_info = old_tpa_info;
+
/* move sp_objs contents as well, as their indices match fp ones */
memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
@@ -1942,7 +1948,7 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
}
}
-static int bnx2x_init_rss_pf(struct bnx2x *bp)
+static int bnx2x_init_rss(struct bnx2x *bp)
{
int i;
u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1966,8 +1972,8 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp)
return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
}
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash)
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable)
{
struct bnx2x_config_rss_params params = {NULL};
@@ -1982,17 +1988,21 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
- __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
-
- /* RSS configuration */
- __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
- __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
- if (rss_obj->udp_rss_v4)
- __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
- if (rss_obj->udp_rss_v6)
- __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ if (enable) {
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+
+ /* RSS configuration */
+ __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+ __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+ if (rss_obj->udp_rss_v4)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
+ if (rss_obj->udp_rss_v6)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
+ } else {
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+ }
/* Hash bits */
params.rss_result_mask = MULTI_MASK;
@@ -2001,11 +2011,14 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
if (config_hash) {
/* RSS keys */
- prandom_bytes(params.rss_key, sizeof(params.rss_key));
+ prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
}
- return bnx2x_config_rss(bp, &params);
+ if (IS_PF(bp))
+ return bnx2x_config_rss(bp, &params);
+ else
+ return bnx2x_vfpf_config_rss(bp, &params);
}
static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
@@ -2060,7 +2073,11 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
rparam.mcast_obj = &bp->mcast_obj;
__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
- /* Add a DEL command... */
+ /* Add a DEL command... - Since we're doing a driver cleanup only,
+ * we take a lock surrounding both the initial send and the CONTs,
+ * as we don't want a true completion to disrupt us in the middle.
+ */
+ netif_addr_lock_bh(bp->dev);
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
@@ -2072,11 +2089,13 @@ void bnx2x_squeeze_objects(struct bnx2x *bp)
if (rc < 0) {
BNX2X_ERR("Failed to clean multi-cast object: %d\n",
rc);
+ netif_addr_unlock_bh(bp->dev);
return;
}
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
}
+ netif_addr_unlock_bh(bp->dev);
}
#ifndef BNX2X_STOP_ON_ERROR
@@ -2432,9 +2451,7 @@ int bnx2x_load_cnic(struct bnx2x *bp)
}
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* re-read iscsi info */
bnx2x_get_iscsi_info(bp);
@@ -2641,38 +2658,32 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);
+ }
- /* setup the leading queue */
- rc = bnx2x_setup_leading(bp);
- if (rc) {
- BNX2X_ERR("Setup leading failed!\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
-
- /* set up the rest of the queues */
- for_each_nondefault_eth_queue(bp, i) {
- rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup the leading queue */
+ rc = bnx2x_setup_leading(bp);
+ if (rc) {
+ BNX2X_ERR("Setup leading failed!\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
+ }
- /* setup rss */
- rc = bnx2x_init_rss_pf(bp);
+ /* set up the rest of the queues */
+ for_each_nondefault_eth_queue(bp, i) {
+ if (IS_PF(bp))
+ rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
+ else /* VF */
+ rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
if (rc) {
- BNX2X_ERR("PF RSS init failed\n");
+ BNX2X_ERR("Queue %d setup failed\n", i);
LOAD_ERROR_EXIT(bp, load_error3);
}
+ }
- } else { /* vf */
- for_each_eth_queue(bp, i) {
- rc = bnx2x_vfpf_setup_q(bp, i);
- if (rc) {
- BNX2X_ERR("Queue setup failed\n");
- LOAD_ERROR_EXIT(bp, load_error3);
- }
- }
+ /* setup rss */
+ rc = bnx2x_init_rss(bp);
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
+ LOAD_ERROR_EXIT(bp, load_error3);
}
/* Now when Clients are configured we are ready to work */
@@ -2704,9 +2715,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Start fast path */
/* Initialize Rx filter. */
- netif_addr_lock_bh(bp->dev);
- bnx2x_set_rx_mode(bp->dev);
- netif_addr_unlock_bh(bp->dev);
+ bnx2x_set_rx_mode_inner(bp);
/* Start the Tx */
switch (load_mode) {
@@ -2956,8 +2965,9 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
if (IS_PF(bp)) {
if (CNIC_LOADED(bp))
bnx2x_free_mem_cnic(bp);
- bnx2x_free_mem(bp);
}
+ bnx2x_free_mem(bp);
+
bp->state = BNX2X_STATE_CLOSED;
bp->cnic_loaded = false;
@@ -3117,7 +3127,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi)
{
@@ -4782,6 +4792,11 @@ int bnx2x_resume(struct pci_dev *pdev)
void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
u32 cid)
{
+ if (!cxt) {
+ BNX2X_ERR("bad context pointer %p\n", cxt);
+ return;
+ }
+
/* ustorm cxt validation */
cxt->ustorm_ag_context.cdu_usage =
CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index c07a6d054cf..da8fcaa7449 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -51,8 +51,7 @@ extern int int_mode;
#define BNX2X_PCI_ALLOC(x, y, size) \
do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, \
- GFP_KERNEL | __GFP_ZERO); \
+ x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
if (x == NULL) \
goto alloc_mem_err; \
DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
@@ -106,9 +105,10 @@ void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link);
* @rss_obj: RSS object to use
* @ind_table: indirection table to configure
* @config_hash: re-configure RSS hash keys configuration
+ * @enable: enabled or disabled configuration
*/
-int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
- bool config_hash);
+int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
+ bool config_hash, bool enable);
/**
* bnx2x__init_func_obj - init function object
@@ -418,6 +418,7 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
* netif_addr_lock_bh()
*/
void bnx2x_set_rx_mode(struct net_device *dev);
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp);
/**
* bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
@@ -980,7 +981,7 @@ static inline int func_by_vn(struct bnx2x *bp, int vn)
static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash)
{
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, config_hash, true);
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 0c94df47e0e..fcf2761d882 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -30,10 +30,8 @@
#include "bnx2x_dcb.h"
/* forward declarations of dcbx related functions */
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
u32 *set_configuration_ets_pg,
u32 *pri_pg_tbl);
@@ -425,30 +423,52 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
bnx2x_pfc_clear(bp);
}
-static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to hold traffic for HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
-static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
+ int rc;
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_START;
+ __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+ __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
+
bnx2x_dcbx_fw_struct(bp, tx_params);
DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
- return bnx2x_func_state_change(bp, &func_params);
+
+ rc = bnx2x_func_state_change(bp, &func_params);
+ if (rc) {
+ BNX2X_ERR("Unable to resume traffic after HW configuration\n");
+ bnx2x_panic();
+ }
+
+ return rc;
}
static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
@@ -744,7 +764,9 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- bnx2x_dcbx_stop_hw_tx(bp);
+ set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
}
@@ -753,7 +775,13 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
- bnx2x_dcbx_resume_hw_tx(bp);
+
+ /* ets may affect cmng configuration: reinit it in hw */
+ bnx2x_set_local_cmng(bp);
+
+ set_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state);
+
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
@@ -2363,21 +2391,24 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
case DCB_FEATCFG_ATTR_PG:
if (bp->dcbx_local_feat.ets.enabled)
*flags |= DCB_FEATCFG_ENABLE;
- if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+ if (bp->dcbx_error & (DCBX_LOCAL_ETS_ERROR |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_PFC:
if (bp->dcbx_local_feat.pfc.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
- DCBX_LOCAL_PFC_MISMATCH))
+ DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
case DCB_FEATCFG_ATTR_APP:
if (bp->dcbx_local_feat.app.enabled)
*flags |= DCB_FEATCFG_ENABLE;
if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
- DCBX_LOCAL_APP_MISMATCH))
+ DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_MIB_ERROR))
*flags |= DCB_FEATCFG_ERROR;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 125bd1b6586..804b8f64463 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -199,4 +199,7 @@ extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
#endif /* BCM_DCBNL */
+int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+
#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index c5f22510168..2612e3c715d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3281,14 +3281,14 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0);
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
}
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 5018e52ae2a..32767f6aa33 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1300,6 +1300,9 @@ struct drv_func_mb {
#define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000
+ #define DRV_MSG_CODE_RMMOD 0xdb000000
+ #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f
+
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
@@ -1372,6 +1375,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000
+ #define FW_MSG_CODE_RMMOD_ACK 0xdb100000
+
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9d64b988ab3..664568420c9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6501,12 +6501,13 @@ static int bnx2x_link_initialize(struct link_params *params,
struct bnx2x_phy *phy = &params->phy[INT_PHY];
if (vars->line_speed == SPEED_AUTO_NEG &&
(CHIP_IS_E1x(bp) ||
- CHIP_IS_E2(bp)))
+ CHIP_IS_E2(bp))) {
bnx2x_set_parallel_detection(phy, params);
if (params->phy[INT_PHY].config_init)
params->phy[INT_PHY].config_init(phy,
params,
vars);
+ }
}
/* Init external phy*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e5da07858a2..634a793c1c4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -2261,6 +2261,23 @@ static void bnx2x_set_requested_fc(struct bnx2x *bp)
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
}
+static void bnx2x_init_dropless_fc(struct bnx2x *bp)
+{
+ u32 pause_enabled = 0;
+
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc && bp->link_vars.link_up) {
+ if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+ pause_enabled = 1;
+
+ REG_WR(bp, BAR_USTRORM_INTMEM +
+ USTORM_ETH_PAUSE_ENABLED_OFFSET(BP_PORT(bp)),
+ pause_enabled);
+ }
+
+ DP(NETIF_MSG_IFUP | NETIF_MSG_LINK, "dropless_fc is %s\n",
+ pause_enabled ? "enabled" : "disabled");
+}
+
int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
{
int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
@@ -2294,6 +2311,8 @@ int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
if (bp->link_vars.link_up) {
@@ -2315,6 +2334,8 @@ void bnx2x_link_set(struct bnx2x *bp)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
bnx2x_release_phy_lock(bp);
+ bnx2x_init_dropless_fc(bp);
+
bnx2x_calc_fc_adv(bp);
} else
BNX2X_ERR("Bootcode is missing - can not set link\n");
@@ -2476,7 +2497,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
input.port_rate = bp->link_vars.line_speed;
- if (cmng_type == CMNG_FNS_MINMAX) {
+ if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) {
int vn;
/* read mf conf from shmem */
@@ -2533,6 +2554,21 @@ static void storm_memset_cmng(struct bnx2x *bp,
}
}
+/* init cmng mode in HW according to local configuration */
+void bnx2x_set_local_cmng(struct bnx2x *bp)
+{
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+ if (cmng_fns != CMNG_FNS_NONE) {
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+ } else {
+ /* rate shaping and fairness are disabled */
+ DP(NETIF_MSG_IFUP,
+ "single function mode without fairness\n");
+ }
+}
+
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2541,20 +2577,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_link_update(&bp->link_params, &bp->link_vars);
- if (bp->link_vars.link_up) {
-
- /* dropless flow control */
- if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
- int port = BP_PORT(bp);
- u32 pause_enabled = 0;
-
- if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
- pause_enabled = 1;
+ bnx2x_init_dropless_fc(bp);
- REG_WR(bp, BAR_USTRORM_INTMEM +
- USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
- pause_enabled);
- }
+ if (bp->link_vars.link_up) {
if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
struct host_port_stats *pstats;
@@ -2568,17 +2593,8 @@ static void bnx2x_link_attn(struct bnx2x *bp)
bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
}
- if (bp->link_vars.link_up && bp->link_vars.line_speed) {
- int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
-
- if (cmng_fns != CMNG_FNS_NONE) {
- bnx2x_cmng_fns_init(bp, false, cmng_fns);
- storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
- } else
- /* rate shaping and fairness are disabled */
- DP(NETIF_MSG_IFUP,
- "single function mode without fairness\n");
- }
+ if (bp->link_vars.link_up && bp->link_vars.line_speed)
+ bnx2x_set_local_cmng(bp);
__bnx2x_link_report(bp);
@@ -6877,7 +6893,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
- REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+
if (!CHIP_REV_IS_SLOW(bp))
/* enable hw interrupt from doorbell Q */
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
@@ -7839,12 +7855,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
{
int i;
- BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
-
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (IS_VF(bp))
+ return;
+
+ BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+
BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
@@ -8044,7 +8063,10 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
int bnx2x_setup_leading(struct bnx2x *bp)
{
- return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+ if (IS_PF(bp))
+ return bnx2x_setup_queue(bp, &bp->fp[0], true);
+ else /* VF */
+ return bnx2x_vfpf_setup_q(bp, &bp->fp[0], true);
}
/**
@@ -8058,8 +8080,10 @@ int bnx2x_set_int_mode(struct bnx2x *bp)
{
int rc = 0;
- if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
+ if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX) {
+ BNX2X_ERR("VF not loaded since interrupt mode not msix\n");
return -EINVAL;
+ }
switch (int_mode) {
case BNX2X_INT_MODE_MSIX:
@@ -9628,17 +9652,21 @@ sp_rtnl_not_reset:
}
}
- if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP,
- "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
- bnx2x_vfpf_storm_rx_mode(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "Handling Rx Mode setting\n");
+ bnx2x_set_rx_mode_inner(bp);
}
if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
&bp->sp_rtnl_state))
bnx2x_pf_set_vfs_vlan(bp);
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state))
+ bnx2x_dcbx_stop_hw_tx(bp);
+
+ if (test_and_clear_bit(BNX2X_SP_RTNL_TX_RESUME, &bp->sp_rtnl_state))
+ bnx2x_dcbx_resume_hw_tx(bp);
+
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
*/
@@ -9935,8 +9963,6 @@ static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
static int bnx2x_do_flr(struct bnx2x *bp)
{
- int i;
- u16 status;
struct pci_dev *dev = bp->pdev;
if (CHIP_IS_E1x(bp)) {
@@ -9951,20 +9977,8 @@ static int bnx2x_do_flr(struct bnx2x *bp)
return -EINVAL;
}
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev,
- "transaction is not cleared; proceeding with reset anyway\n");
-
-clear:
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
BNX2X_DEV_INFO("Initiating FLR\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
@@ -10362,6 +10376,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
+
+ bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ?
+ BC_SUPPORTS_RMMOD_CMD : 0;
+
boot_mode = SHMEM_RD(bp,
dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
@@ -11137,6 +11155,9 @@ static bool bnx2x_get_dropless_info(struct bnx2x *bp)
int tmp;
u32 cfg;
+ if (IS_VF(bp))
+ return 0;
+
if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
/* Take function: tmp = func */
tmp = BP_ABS_FUNC(bp);
@@ -11524,6 +11545,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
spin_lock_init(&bp->stats_lock);
+ sema_init(&bp->stats_sema, 1);
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
@@ -11630,9 +11652,11 @@ static int bnx2x_init_bp(struct bnx2x *bp)
* second status block for the L2 queue, and a third status block for
* CNIC if supported.
*/
- if (CNIC_SUPPORT(bp))
+ if (IS_VF(bp))
+ bp->min_msix_vec_cnt = 1;
+ else if (CNIC_SUPPORT(bp))
bp->min_msix_vec_cnt = 3;
- else
+ else /* PF w/o cnic */
bp->min_msix_vec_cnt = 2;
BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
@@ -11849,34 +11873,48 @@ static int bnx2x_set_mc_list(struct bnx2x *bp)
void bnx2x_set_rx_mode(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 rx_mode = BNX2X_RX_MODE_NORMAL;
if (bp->state != BNX2X_STATE_OPEN) {
DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
return;
+ } else {
+ /* Schedule an SP task to handle rest of change */
+ DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
}
+}
+
+void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
+{
+ u32 rx_mode = BNX2X_RX_MODE_NORMAL;
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
- if (dev->flags & IFF_PROMISC)
+ netif_addr_lock_bh(bp->dev);
+
+ if (bp->dev->flags & IFF_PROMISC) {
rx_mode = BNX2X_RX_MODE_PROMISC;
- else if ((dev->flags & IFF_ALLMULTI) ||
- ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
- CHIP_IS_E1(bp)))
+ } else if ((bp->dev->flags & IFF_ALLMULTI) ||
+ ((netdev_mc_count(bp->dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp))) {
rx_mode = BNX2X_RX_MODE_ALLMULTI;
- else {
+ } else {
if (IS_PF(bp)) {
/* some multicasts */
if (bnx2x_set_mc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_ALLMULTI;
+ /* release bh lock, as bnx2x_set_uc_list might sleep */
+ netif_addr_unlock_bh(bp->dev);
if (bnx2x_set_uc_list(bp) < 0)
rx_mode = BNX2X_RX_MODE_PROMISC;
+ netif_addr_lock_bh(bp->dev);
} else {
/* configuring mcast to a vf involves sleeping (when we
- * wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ * wait for the pf's response).
*/
smp_mb__before_clear_bit();
set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
@@ -11894,22 +11932,20 @@ void bnx2x_set_rx_mode(struct net_device *dev)
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+ netif_addr_unlock_bh(bp->dev);
return;
}
if (IS_PF(bp)) {
bnx2x_set_storm_rx_mode(bp);
+ netif_addr_unlock_bh(bp->dev);
} else {
- /* configuring rx mode to storms in a vf involves sleeping (when
- * we wait for the pf's response). Since this function is
- * called from non sleepable context we must schedule
- * a work item for this purpose
+ /* VF will need to request the PF to make this change, and so
+ * the VF needs to release the bottom-half lock prior to the
+ * request (as it will likely require sleep on the VF side)
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ netif_addr_unlock_bh(bp->dev);
+ bnx2x_vfpf_storm_rx_mode(bp);
}
}
@@ -12026,7 +12062,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv,
#endif
};
@@ -12531,19 +12567,16 @@ static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
* @dev: pci device
*
*/
-static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
- int cnic_cnt, bool is_vf)
+static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
{
- int pos, index;
+ int index;
u16 control = 0;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-
/*
* If MSI-X is not supported - return number of SBs needed to support
* one fast path queue: one FP queue + SB for CNIC
*/
- if (!pos) {
+ if (!pdev->msix_cap) {
dev_info(&pdev->dev, "no msix capability found\n");
return 1 + cnic_cnt;
}
@@ -12556,11 +12589,11 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
* without the default SB.
* For VFs there is no default SB, then we return (index+1).
*/
- pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
+ pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
index = control & PCI_MSIX_FLAGS_QSIZE;
- return is_vf ? index + 1 : index;
+ return index;
}
static int set_max_cos_est(int chip_id)
@@ -12640,10 +12673,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
is_vf = set_is_vf(ent->driver_data);
cnic_cnt = is_vf ? 0 : 1;
- max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
+ max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt);
+
+ /* add another SB for VF as it has no default SB */
+ max_non_def_sbs += is_vf ? 1 : 0;
/* Maximum number of RSS queues: one IGU SB goes to CNIC */
- rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
+ rss_count = max_non_def_sbs - cnic_cnt;
if (rss_count < 1)
return -EINVAL;
@@ -12817,13 +12853,17 @@ static void __bnx2x_remove(struct pci_dev *pdev,
bnx2x_dcbnl_update_applist(bp, true);
#endif
+ if (IS_PF(bp) &&
+ !BP_NOMCP(bp) &&
+ (bp->flags & BC_SUPPORTS_RMMOD_CMD))
+ bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0);
+
/* Close the interface - either directly or implicitly */
if (remove_netdev) {
unregister_netdev(dev);
} else {
rtnl_lock();
- if (netif_running(dev))
- bnx2x_close(dev);
+ dev_close(dev);
rtnl_unlock();
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 8e627b886d7..5ecf267dc4c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -6335,6 +6335,7 @@
#define PCI_ID_VAL2 0x438
#define PCI_ID_VAL3 0x43c
+#define GRC_CONFIG_REG_VF_MSIX_CONTROL 0x61C
#define GRC_CONFIG_REG_PF_INIT_VF 0x624
#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xf
/* First VF_NUM for PF is encoded in this register.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 8f03c984550..9fbeee522d2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -159,16 +159,6 @@ static inline void __bnx2x_exe_queue_reset_pending(
}
}
-static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
- struct bnx2x_exe_queue_obj *o)
-{
- spin_lock_bh(&o->lock);
-
- __bnx2x_exe_queue_reset_pending(bp, o);
-
- spin_unlock_bh(&o->lock);
-}
-
/**
* bnx2x_exe_queue_step - execute one execution chunk atomically
*
@@ -176,7 +166,7 @@ static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
* @o: queue
* @ramrod_flags: flags
*
- * (Atomicity is ensured using the exe_queue->lock).
+ * (Should be called while holding the exe_queue->lock).
*/
static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
struct bnx2x_exe_queue_obj *o,
@@ -187,8 +177,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
memset(&spacer, 0, sizeof(spacer));
- spin_lock_bh(&o->lock);
-
/* Next step should not be performed until the current is finished,
* unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
* properly clear object internals without sending any command to the FW
@@ -200,7 +188,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
- spin_unlock_bh(&o->lock);
return 1;
}
}
@@ -228,10 +215,8 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
}
/* Sanity check */
- if (!cur_len) {
- spin_unlock_bh(&o->lock);
+ if (!cur_len)
return 0;
- }
rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
if (rc < 0)
@@ -245,7 +230,6 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
__bnx2x_exe_queue_reset_pending(bp, o);
- spin_unlock_bh(&o->lock);
return rc;
}
@@ -432,12 +416,219 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+/**
+ * __bnx2x_vlan_mac_h_write_trylock - try getting the vlan mac writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details: Non-blocking implementation; should be called under execution
+ * queue lock.
+ */
+static int __bnx2x_vlan_mac_h_write_trylock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (o->head_reader) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - There are readers; Busy\n");
+ return -EBUSY;
+ }
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock writer - Taken\n");
+ return 0;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_exec_pending - execute step instead of a previous step
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock; notice it might release
+ * and reclaim it during its run.
+ */
+static void __bnx2x_vlan_mac_h_exec_pending(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+ unsigned long ramrod_flags = o->saved_ramrod_flags;
+
+ DP(BNX2X_MSG_SP, "vlan_mac_lock execute pending command with ramrod flags %lu\n",
+ ramrod_flags);
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags);
+ if (rc != 0) {
+ BNX2X_ERR("execution of pending commands failed with rc %d\n",
+ rc);
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ }
+}
+
+/**
+ * __bnx2x_vlan_mac_h_pend - Pend an execution step which couldn't run
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ * @ramrod_flags: ramrod flags of missed execution
+ *
+ * @details Should be called under execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_pend(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long ramrod_flags)
+{
+ o->head_exe_request = true;
+ o->saved_ramrod_flags = ramrod_flags;
+ DP(BNX2X_MSG_SP, "Placing pending execution with ramrod flags %lu\n",
+ ramrod_flags);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would perform it - possibly releasing and
+ * reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* It's possible a new pending execution was added since this writer
+ * executed. If so, execute again. [Ad infinitum]
+ */
+ while (o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - writer release encountered a pending request\n");
+ __bnx2x_vlan_mac_h_exec_pending(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_write_unlock - unlock the vlan mac head list writer lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would perform it -
+ * possibly releasing and reclaiming the execution queue lock.
+ */
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under the execution queue lock. May sleep. May
+ * release and reclaim execution queue lock during its run.
+ */
+static int __bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ /* If we got here, we're holding lock --> no WRITER exists */
+ o->head_reader++;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - locked reader - number %d\n",
+ o->head_reader);
+
+ return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_lock - lock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details May sleep. Claims and releases execution queue lock during its run.
+ */
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ int rc;
+
+ spin_lock_bh(&o->exe_queue.lock);
+ rc = __bnx2x_vlan_mac_h_read_lock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
+/**
+ * __bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Should be called under execution queue lock. Notice if a pending
+ * execution exists, it would be performed if this was the last
+ * reader. possibly releasing and reclaiming the execution queue lock.
+ */
+static void __bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ if (!o->head_reader) {
+ BNX2X_ERR("Need to release vlan mac reader lock, but lock isn't taken\n");
+#ifdef BNX2X_STOP_ON_ERROR
+ bnx2x_panic();
+#endif
+ } else {
+ o->head_reader--;
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - decreased readers to %d\n",
+ o->head_reader);
+ }
+
+ /* It's possible a new pending execution was added, and that this reader
+ * was last - if so we need to execute the command.
+ */
+ if (!o->head_reader && o->head_exe_request) {
+ DP(BNX2X_MSG_SP, "vlan_mac_lock - reader release encountered a pending request\n");
+
+ /* Writer release will do the trick */
+ __bnx2x_vlan_mac_h_write_unlock(bp, o);
+ }
+}
+
+/**
+ * bnx2x_vlan_mac_h_read_unlock - unlock the vlan mac head list reader lock
+ *
+ * @bp: device handle
+ * @o: vlan_mac object
+ *
+ * @details Notice if a pending execution exists, it would be performed if this
+ * was the last reader. Claims and releases the execution queue lock
+ * during its run.
+ */
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o)
+{
+ spin_lock_bh(&o->exe_queue.lock);
+ __bnx2x_vlan_mac_h_read_unlock(bp, o);
+ spin_unlock_bh(&o->exe_queue.lock);
+}
+
static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
int n, u8 *base, u8 stride, u8 size)
{
struct bnx2x_vlan_mac_registry_elem *pos;
u8 *next = base;
int counter = 0;
+ int read_lock;
+
+ DP(BNX2X_MSG_SP, "get_n_elements - taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ BNX2X_ERR("get_n_elements failed to get vlan mac reader lock; Access without lock\n");
/* traverse list */
list_for_each_entry(pos, &o->head, link) {
@@ -449,6 +640,12 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
next += stride + size;
}
}
+
+ if (read_lock == 0) {
+ DP(BNX2X_MSG_SP, "get_n_elements - releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+ }
+
return counter * ETH_ALEN;
}
@@ -1397,6 +1594,32 @@ static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
return -EBUSY;
}
+static int __bnx2x_vlan_mac_execute_step(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
+ unsigned long *ramrod_flags)
+{
+ int rc = 0;
+
+ spin_lock_bh(&o->exe_queue.lock);
+
+ DP(BNX2X_MSG_SP, "vlan_mac_execute_step - trying to take writer lock\n");
+ rc = __bnx2x_vlan_mac_h_write_trylock(bp, o);
+
+ if (rc != 0) {
+ __bnx2x_vlan_mac_h_pend(bp, o, *ramrod_flags);
+
+ /* Calling function should not diffrentiate between this case
+ * and the case in which there is already a pending ramrod
+ */
+ rc = 1;
+ } else {
+ rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ }
+ spin_unlock_bh(&o->exe_queue.lock);
+
+ return rc;
+}
+
/**
* bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
*
@@ -1414,19 +1637,27 @@ static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
int rc;
+ /* Clearing the pending list & raw state should be made
+ * atomically (as execution flow assumes they represent the same).
+ */
+ spin_lock_bh(&o->exe_queue.lock);
+
/* Reset pending list */
- bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+ __bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
/* Clear pending */
r->clear_pending(r);
+ spin_unlock_bh(&o->exe_queue.lock);
+
/* If ramrod failed this is most likely a SW bug */
if (cqe->message.error)
return -EINVAL;
/* Run the next bulk of pending commands if requested */
if (test_bit(RAMROD_CONT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, o, ramrod_flags);
+
if (rc < 0)
return rc;
}
@@ -1719,9 +1950,8 @@ static inline int bnx2x_vlan_mac_push_new_cmd(
* @p:
*
*/
-int bnx2x_config_vlan_mac(
- struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p)
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_ramrod_params *p)
{
int rc = 0;
struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
@@ -1752,7 +1982,8 @@ int bnx2x_config_vlan_mac(
/* Execute commands if required */
if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp, p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1775,8 +2006,9 @@ int bnx2x_config_vlan_mac(
return rc;
/* Make a next step */
- rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
- ramrod_flags);
+ rc = __bnx2x_vlan_mac_execute_step(bp,
+ p->vlan_mac_obj,
+ &p->ramrod_flags);
if (rc < 0)
return rc;
}
@@ -1806,10 +2038,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
unsigned long *ramrod_flags)
{
struct bnx2x_vlan_mac_registry_elem *pos = NULL;
- int rc = 0;
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ int read_lock;
+ int rc = 0;
/* Clear pending commands first */
@@ -1844,6 +2077,11 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- taking vlan_mac_lock (reader)\n");
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, o);
+ if (read_lock != 0)
+ return read_lock;
+
list_for_each_entry(pos, &o->head, link) {
if (pos->vlan_mac_flags == *vlan_mac_flags) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
@@ -1851,11 +2089,15 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = bnx2x_config_vlan_mac(bp, &p);
if (rc < 0) {
BNX2X_ERR("Failed to add a new DEL command\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
return rc;
}
}
}
+ DP(BNX2X_MSG_SP, "vlan_mac_del_all -- releasing vlan_mac_lock (reader)\n");
+ bnx2x_vlan_mac_h_read_unlock(bp, o);
+
p.ramrod_flags = *ramrod_flags;
__set_bit(RAMROD_CONT, &p.ramrod_flags);
@@ -1887,6 +2129,9 @@ static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
struct bnx2x_credit_pool_obj *vlans_pool)
{
INIT_LIST_HEAD(&o->head);
+ o->head_reader = 0;
+ o->head_exe_request = false;
+ o->saved_ramrod_flags = 0;
o->macs_pool = macs_pool;
o->vlans_pool = vlans_pool;
@@ -4171,6 +4416,16 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac)
+{
+ if (!vlan_mac->get_n_elements) {
+ BNX2X_ERR("vlan mac object was not intialized\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
/********************** Queue state object ***********************************/
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 798dfe99673..658f4e33abf 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -285,6 +285,12 @@ struct bnx2x_vlan_mac_obj {
* entries.
*/
struct list_head head;
+ /* Implement a simple reader/writer lock on the head list.
+ * all these fields should only be accessed under the exe_queue lock
+ */
+ u8 head_reader; /* Num. of readers accessing head list */
+ bool head_exe_request; /* Pending execution request. */
+ unsigned long saved_ramrod_flags; /* Ramrods of pending execution */
/* TODO: Add it's initialization in the init functions */
struct bnx2x_exe_queue_obj exe_queue;
@@ -1302,8 +1308,16 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
struct bnx2x_credit_pool_obj *macs_pool,
struct bnx2x_credit_pool_obj *vlans_pool);
+int bnx2x_vlan_mac_h_read_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_read_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+int bnx2x_vlan_mac_h_write_lock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
+void bnx2x_vlan_mac_h_write_unlock(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o);
int bnx2x_config_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *p);
+ struct bnx2x_vlan_mac_ramrod_params *p);
int bnx2x_vlan_mac_move(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params *p,
@@ -1393,4 +1407,6 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
+int validate_vlan_mac(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 95861efb505..b26eb83069b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -170,6 +170,11 @@ enum bnx2x_vfop_qteardown_state {
BNX2X_VFOP_QTEARDOWN_DONE
};
+enum bnx2x_vfop_rss_state {
+ BNX2X_VFOP_RSS_CONFIG,
+ BNX2X_VFOP_RSS_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -265,11 +270,6 @@ void bnx2x_vfop_qctor_prep(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_TX_SEC, &setup_p->flags);
__set_bit(BNX2X_Q_FLG_ANTI_SPOOF, &setup_p->flags);
- if (vfq_is_leading(q)) {
- __set_bit(BNX2X_Q_FLG_LEADING_RSS, &setup_p->flags);
- __set_bit(BNX2X_Q_FLG_MCAST, &setup_p->flags);
- }
-
/* Setup-op rx parameters */
if (test_bit(BNX2X_Q_TYPE_HAS_RX, &q_type)) {
struct bnx2x_rxq_setup_params *rxq_p = &setup_p->rxq_params;
@@ -398,7 +398,11 @@ static void bnx2x_vfop_qdtor(struct bnx2x *bp, struct bnx2x_virtf *vf)
BNX2X_Q_LOGICAL_STATE_STOPPED) {
DP(BNX2X_MSG_IOV,
"Entered qdtor but queue was already stopped. Aborting gracefully\n");
- goto op_done;
+
+ /* next state */
+ vfop->state = BNX2X_VFOP_QDTOR_DONE;
+
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
}
/* next state */
@@ -432,8 +436,10 @@ op_err:
op_done:
case BNX2X_VFOP_QDTOR_DONE:
/* invalidate the context */
- qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
- qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ if (qdtor->cxt) {
+ qdtor->cxt->ustorm_ag_context.cdu_usage = 0;
+ qdtor->cxt->xstorm_ag_context.cdu_reserved = 0;
+ }
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -465,7 +471,8 @@ static int bnx2x_vfop_qdtor_cmd(struct bnx2x *bp,
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdtor,
cmd->block);
}
- DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop.\n", vf->abs_vfid);
+ DP(BNX2X_MSG_IOV, "VF[%d] failed to add a vfop. rc %d\n",
+ vf->abs_vfid, vfop->rc);
return -ENOMEM;
}
@@ -474,10 +481,18 @@ bnx2x_vf_set_igu_info(struct bnx2x *bp, u8 igu_sb_id, u8 abs_vfid)
{
struct bnx2x_virtf *vf = bnx2x_vf_by_abs_fid(bp, abs_vfid);
if (vf) {
+ /* the first igu entry belonging to VFs of this PF */
+ if (!BP_VFDB(bp)->first_vf_igu_entry)
+ BP_VFDB(bp)->first_vf_igu_entry = igu_sb_id;
+
+ /* the first igu entry belonging to this VF */
if (!vf_sb_count(vf))
vf->igu_base_id = igu_sb_id;
+
++vf_sb_count(vf);
+ ++vf->sb_count;
}
+ BP_VFDB(bp)->vf_sbs_pool++;
}
/* VFOP MAC/VLAN helpers */
@@ -491,12 +506,20 @@ static inline void bnx2x_vfop_credit(struct bnx2x *bp,
* and a valid credit counter
*/
if (!vfop->rc && args->credit) {
- int cnt = 0;
struct list_head *pos;
+ int read_lock;
+ int cnt = 0;
+
+ read_lock = bnx2x_vlan_mac_h_read_lock(bp, obj);
+ if (read_lock)
+ DP(BNX2X_MSG_SP, "Failed to take vlan mac read head; continuing anyway\n");
list_for_each(pos, &obj->head)
cnt++;
+ if (!read_lock)
+ bnx2x_vlan_mac_h_read_unlock(bp, obj);
+
atomic_set(args->credit, cnt);
}
}
@@ -522,23 +545,6 @@ static int bnx2x_vfop_set_user_req(struct bnx2x *bp,
return 0;
}
-static int
-bnx2x_vfop_config_vlan0(struct bnx2x *bp,
- struct bnx2x_vlan_mac_ramrod_params *vlan_mac,
- bool add)
-{
- int rc;
-
- vlan_mac->user_req.cmd = add ? BNX2X_VLAN_MAC_ADD :
- BNX2X_VLAN_MAC_DEL;
- vlan_mac->user_req.u.vlan.vlan = 0;
-
- rc = bnx2x_config_vlan_mac(bp, vlan_mac);
- if (rc == -EEXIST)
- rc = 0;
- return rc;
-}
-
static int bnx2x_vfop_config_list(struct bnx2x *bp,
struct bnx2x_vfop_filters *filters,
struct bnx2x_vlan_mac_ramrod_params *vlan_mac)
@@ -643,30 +649,14 @@ static void bnx2x_vfop_vlan_mac(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_VLAN_CONFIG_LIST:
/* next state */
- vfop->state = BNX2X_VFOP_VLAN_CONFIG_LIST_0;
-
- /* remove vlan0 - could be no-op */
- vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, false);
- if (vfop->rc)
- goto op_err;
+ vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
- /* Do vlan list config. if this operation fails we try to
- * restore vlan0 to keep the queue is working order
- */
+ /* do list config */
vfop->rc = bnx2x_vfop_config_list(bp, filters, vlan_mac);
if (!vfop->rc) {
set_bit(RAMROD_CONT, &vlan_mac->ramrod_flags);
vfop->rc = bnx2x_config_vlan_mac(bp, vlan_mac);
}
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT); /* fall-through */
-
- case BNX2X_VFOP_VLAN_CONFIG_LIST_0:
- /* next state */
- vfop->state = BNX2X_VFOP_VLAN_MAC_CHK_DONE;
-
- if (list_empty(&obj->head))
- /* add vlan0 */
- vfop->rc = bnx2x_vfop_config_vlan0(bp, vlan_mac, true);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
default:
@@ -725,6 +715,7 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -744,6 +735,9 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -764,6 +758,7 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -786,6 +781,9 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -806,6 +804,7 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
int qid, u16 vid, bool add)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -826,6 +825,9 @@ int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -845,6 +847,7 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -864,6 +867,9 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -884,6 +890,7 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -903,6 +910,9 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
+ rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
+ if (rc)
+ return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -1013,21 +1023,25 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
case BNX2X_VFOP_QFLR_CLR_VLAN:
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj)))
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid,
+ true);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj)))
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid,
+ true);
DP(BNX2X_MSG_IOV,
"VF[%d] vfop->rc after bnx2x_vfop_mac_delall_cmd was %d",
vf->abs_vfid, vfop->rc);
if (vfop->rc)
goto op_err;
- return;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1324,10 +1338,13 @@ int bnx2x_vfop_qdown_cmd(struct bnx2x *bp,
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+ /* for non leading queues skip directly to qdown sate */
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QTEARDOWN_RXMODE,
- bnx2x_vfop_qdown, cmd->done);
+ bnx2x_vfop_opset(qid == LEADING_IDX ?
+ BNX2X_VFOP_QTEARDOWN_RXMODE :
+ BNX2X_VFOP_QTEARDOWN_QDTOR, bnx2x_vfop_qdown,
+ cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qdown,
cmd->block);
}
@@ -1480,15 +1497,16 @@ int bnx2x_vf_flr_clnup_epilog(struct bnx2x *bp, u8 abs_vfid)
* both known
*/
static void
-bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
+bnx2x_iov_static_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
+ struct vf_pf_resc_request *resc = &vf->alloc_resc;
u16 vlan_count = 0;
/* will be set only during VF-ACQUIRE */
resc->num_rxqs = 0;
resc->num_txqs = 0;
- /* no credit calculcis for macs (just yet) */
+ /* no credit calculations for macs (just yet) */
resc->num_mac_filters = 1;
/* divvy up vlan rules */
@@ -1500,13 +1518,14 @@ bnx2x_iov_static_resc(struct bnx2x *bp, struct vf_pf_resc_request *resc)
resc->num_mc_filters = 0;
/* num_sbs already set */
+ resc->num_sbs = vf->sb_count;
}
/* FLR routines: */
static void bnx2x_vf_free_resc(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
/* reset the state variables */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
vf->state = VF_FREE;
}
@@ -1726,8 +1745,7 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
/* The VF doorbell size 0 - *B, 4 - 128B. We set it here to match
* the Pf doorbell size although the 2 are independent.
*/
- REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST,
- BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT);
+ REG_WR(bp, DORQ_REG_VF_NORM_CID_OFST, 3);
/* No security checks for now -
* configure single rule (out of 16) mask = 0x1, value = 0x0,
@@ -1747,11 +1765,8 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
void bnx2x_iov_init_dmae(struct bnx2x *bp)
{
- DP(BNX2X_MSG_IOV, "SRIOV is %s\n", IS_SRIOV(bp) ? "ON" : "OFF");
- if (!IS_SRIOV(bp))
- return;
-
- REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
+ if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
+ REG_WR(bp, DMAE_REG_BACKWARD_COMP_EN, 0);
}
static int bnx2x_vf_bus(struct bnx2x *bp, int vfid)
@@ -1797,7 +1812,7 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
{
int sb_id;
u32 val;
- u8 fid;
+ u8 fid, current_pf = 0;
/* IGU in normal mode - read CAM */
for (sb_id = 0; sb_id < IGU_REG_MAPPING_MEMORY_SIZE; sb_id++) {
@@ -1805,16 +1820,18 @@ bnx2x_get_vf_igu_cam_info(struct bnx2x *bp)
if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
continue;
fid = GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID);
- if (!(fid & IGU_FID_ENCODE_IS_PF))
+ if (fid & IGU_FID_ENCODE_IS_PF)
+ current_pf = fid & IGU_FID_PF_NUM_MASK;
+ else if (current_pf == BP_ABS_FUNC(bp))
bnx2x_vf_set_igu_info(bp, sb_id,
(fid & IGU_FID_VF_NUM_MASK));
-
DP(BNX2X_MSG_IOV, "%s[%d], igu_sb_id=%d, msix=%d\n",
((fid & IGU_FID_ENCODE_IS_PF) ? "PF" : "VF"),
((fid & IGU_FID_ENCODE_IS_PF) ? (fid & IGU_FID_PF_NUM_MASK) :
(fid & IGU_FID_VF_NUM_MASK)), sb_id,
GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR));
}
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool is %d\n", BP_VFDB(bp)->vf_sbs_pool);
}
static void __bnx2x_iov_free_vfdb(struct bnx2x *bp)
@@ -1880,23 +1897,11 @@ static int bnx2x_sriov_info(struct bnx2x *bp, struct bnx2x_sriov *iov)
return 0;
}
-static u8 bnx2x_iov_get_max_queue_count(struct bnx2x *bp)
-{
- int i;
- u8 queue_count = 0;
-
- if (IS_SRIOV(bp))
- for_each_vf(bp, i)
- queue_count += bnx2x_vf(bp, i, alloc_resc.num_sbs);
-
- return queue_count;
-}
-
/* must be called after PF bars are mapped */
int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
- int num_vfs_param)
+ int num_vfs_param)
{
- int err, i, qcount;
+ int err, i;
struct bnx2x_sriov *iov;
struct pci_dev *dev = bp->pdev;
@@ -1994,12 +1999,13 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
/* re-read the IGU CAM for VFs - index and abs_vfid must be set */
bnx2x_get_vf_igu_cam_info(bp);
- /* get the total queue count and allocate the global queue arrays */
- qcount = bnx2x_iov_get_max_queue_count(bp);
-
/* allocate the queue arrays for all VFs */
- bp->vfdb->vfqs = kzalloc(qcount * sizeof(struct bnx2x_vf_queue),
- GFP_KERNEL);
+ bp->vfdb->vfqs = kzalloc(
+ BNX2X_MAX_NUM_VF_QUEUES * sizeof(struct bnx2x_vf_queue),
+ GFP_KERNEL);
+
+ DP(BNX2X_MSG_IOV, "bp->vfdb->vfqs was %p\n", bp->vfdb->vfqs);
+
if (!bp->vfdb->vfqs) {
BNX2X_ERR("failed to allocate vf queue array\n");
err = -ENOMEM;
@@ -2120,49 +2126,14 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
q_type);
DP(BNX2X_MSG_IOV,
- "initialized vf %d's queue object. func id set to %d\n",
- vf->abs_vfid, q->sp_obj.func_id);
-
- /* mac/vlan objects are per queue, but only those
- * that belong to the leading queue are initialized
- */
- if (vfq_is_leading(q)) {
- /* mac */
- bnx2x_init_mac_obj(bp, &q->mac_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, mac_rdata),
- bnx2x_vf_sp_map(bp, vf, mac_rdata),
- BNX2X_FILTER_MAC_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->macs_pool);
- /* vlan */
- bnx2x_init_vlan_obj(bp, &q->vlan_obj,
- cl_id, q->cid, func_id,
- bnx2x_vf_sp(bp, vf, vlan_rdata),
- bnx2x_vf_sp_map(bp, vf, vlan_rdata),
- BNX2X_FILTER_VLAN_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX,
- &bp->vlans_pool);
-
- /* mcast */
- bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
- q->cid, func_id, func_id,
- bnx2x_vf_sp(bp, vf, mcast_rdata),
- bnx2x_vf_sp_map(bp, vf, mcast_rdata),
- BNX2X_FILTER_MCAST_PENDING,
- &vf->filter_state,
- BNX2X_OBJ_TYPE_RX_TX);
-
- vf->leading_rss = cl_id;
- }
+ "initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
+ vf->abs_vfid, q->sp_obj.func_id, q->cid);
}
/* called by bnx2x_nic_load */
int bnx2x_iov_nic_init(struct bnx2x *bp)
{
- int vfid, qcount, i;
+ int vfid;
if (!IS_SRIOV(bp)) {
DP(BNX2X_MSG_IOV, "vfdb was not allocated\n");
@@ -2191,7 +2162,7 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
BNX2X_FIRST_VF_CID + base_vf_cid, base_cxt);
/* init statically provisioned resources */
- bnx2x_iov_static_resc(bp, &vf->alloc_resc);
+ bnx2x_iov_static_resc(bp, vf);
/* queues are initialized during VF-ACQUIRE */
@@ -2227,13 +2198,12 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
}
/* Final VF init */
- qcount = 0;
- for_each_vf(bp, i) {
- struct bnx2x_virtf *vf = BP_VF(bp, i);
+ for_each_vf(bp, vfid) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vfid);
/* fill in the BDF and bars */
- vf->bus = bnx2x_vf_bus(bp, i);
- vf->devfn = bnx2x_vf_devfn(bp, i);
+ vf->bus = bnx2x_vf_bus(bp, vfid);
+ vf->devfn = bnx2x_vf_devfn(bp, vfid);
bnx2x_vf_set_bars(bp, vf);
DP(BNX2X_MSG_IOV,
@@ -2242,10 +2212,6 @@ int bnx2x_iov_nic_init(struct bnx2x *bp)
(unsigned)vf->bars[0].bar, vf->bars[0].size,
(unsigned)vf->bars[1].bar, vf->bars[1].size,
(unsigned)vf->bars[2].bar, vf->bars[2].size);
-
- /* set local queue arrays */
- vf->vfqs = &bp->vfdb->vfqs[qcount];
- qcount += bnx2x_vf(bp, i, alloc_resc.num_sbs);
}
return 0;
@@ -2551,6 +2517,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
for_each_vfq(vf, j) {
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
+ dma_addr_t q_stats_addr =
+ vf->fw_stat_map + j * vf->stats_stride;
+
/* collect stats fro active queues only */
if (bnx2x_get_q_logical_state(bp, &rxq->sp_obj) ==
BNX2X_Q_LOGICAL_STATE_STOPPED)
@@ -2558,13 +2527,13 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
/* create stats query entry for this queue */
cur_query_entry->kind = STATS_TYPE_QUEUE;
- cur_query_entry->index = vfq_cl_id(vf, rxq);
+ cur_query_entry->index = vfq_stat_id(vf, rxq);
cur_query_entry->funcID =
cpu_to_le16(FW_VF_HANDLE(vf->abs_vfid));
cur_query_entry->address.hi =
- cpu_to_le32(U64_HI(vf->fw_stat_map));
+ cpu_to_le32(U64_HI(q_stats_addr));
cur_query_entry->address.lo =
- cpu_to_le32(U64_LO(vf->fw_stat_map));
+ cpu_to_le32(U64_LO(q_stats_addr));
DP(BNX2X_MSG_IOV,
"added address %x %x for vf %d queue %d client %d\n",
cur_query_entry->address.hi,
@@ -2573,6 +2542,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
cur_query_entry++;
cur_data_offset += sizeof(struct per_queue_stats);
stats_count++;
+
+ /* all stats are coalesced to the leading queue */
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ break;
}
}
bp->fw_stats_req->hdr.cmd_num = bp->fw_stats_num + stats_count;
@@ -2591,6 +2564,11 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
+ if (!vf) {
+ BNX2X_ERR("VF was null! skipping...\n");
+ continue;
+ }
+
if (!list_empty(&vf->op_list_head) &&
atomic_read(&vf->op_in_progress)) {
DP(BNX2X_MSG_IOV, "running pending op for vf %d\n", i);
@@ -2738,7 +2716,7 @@ int bnx2x_vf_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_queue *q = vfq_get(vf, i);
if (!q) {
- DP(BNX2X_MSG_IOV, "q number %d was not allocated\n", i);
+ BNX2X_ERR("q number %d was not allocated\n", i);
return -EINVAL;
}
@@ -2822,6 +2800,18 @@ int bnx2x_vf_init(struct bnx2x *bp, struct bnx2x_virtf *vf, dma_addr_t *sb_map)
return 0;
}
+struct set_vf_state_cookie {
+ struct bnx2x_virtf *vf;
+ u8 state;
+};
+
+void bnx2x_set_vf_state(void *cookie)
+{
+ struct set_vf_state_cookie *p = (struct set_vf_state_cookie *)cookie;
+
+ p->vf->state = p->state;
+}
+
/* VFOP close (teardown the queues, delete mcasts and close HW) */
static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
{
@@ -2872,7 +2862,19 @@ static void bnx2x_vfop_close(struct bnx2x *bp, struct bnx2x_virtf *vf)
op_err:
BNX2X_ERR("VF[%d] CLOSE error: rc %d\n", vf->abs_vfid, vfop->rc);
op_done:
- vf->state = VF_ACQUIRED;
+
+ /* need to make sure there are no outstanding stats ramrods which may
+ * cause the device to access the VF's stats buffer which it will free
+ * as soon as we return from the close flow.
+ */
+ {
+ struct set_vf_state_cookie cookie;
+
+ cookie.vf = vf;
+ cookie.state = VF_ACQUIRED;
+ bnx2x_stats_safe_exec(bp, bnx2x_set_vf_state, &cookie);
+ }
+
DP(BNX2X_MSG_IOV, "set state to acquired\n");
bnx2x_vfop_end(bp, vf, vfop);
}
@@ -2942,6 +2944,43 @@ op_done:
bnx2x_vfop_end(bp, vf, vfop);
}
+static void bnx2x_vfop_rss(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ enum bnx2x_vfop_rss_state state;
+
+ if (!vfop) {
+ BNX2X_ERR("vfop was null\n");
+ return;
+ }
+
+ state = vfop->state;
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d] STATE: %d\n", vf->abs_vfid, state);
+
+ switch (state) {
+ case BNX2X_VFOP_RSS_CONFIG:
+ /* next state */
+ vfop->state = BNX2X_VFOP_RSS_DONE;
+ bnx2x_config_rss(bp, &vfop->op_p->rss);
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("RSS error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_RSS_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd)
@@ -2956,6 +2995,21 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
return -ENOMEM;
}
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ bnx2x_vfop_opset(BNX2X_VFOP_RSS_CONFIG, bnx2x_vfop_rss,
+ cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_rss,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
@@ -2967,6 +3021,8 @@ void bnx2x_vf_release(struct bnx2x *bp, struct bnx2x_virtf *vf, bool block)
.block = block,
};
int rc;
+
+ DP(BNX2X_MSG_IOV, "PF releasing vf %d\n", vf->abs_vfid);
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_RELEASE_VF);
rc = bnx2x_vfop_release_cmd(bp, vf, &cmd);
@@ -2995,6 +3051,12 @@ static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
+ /* we don't lock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(tlv)) {
+ BNX2X_ERR("attempting to lock with unsupported tlv. Aborting\n");
+ return;
+ }
+
/* lock the channel */
mutex_lock(&vf->op_mutex);
@@ -3009,19 +3071,32 @@ void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
void bnx2x_unlock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs expected_tlv)
{
+ enum channel_tlvs current_tlv;
+
+ if (!vf) {
+ BNX2X_ERR("VF was %p\n", vf);
+ return;
+ }
+
+ current_tlv = vf->op_current;
+
+ /* we don't unlock the channel for unsupported tlvs */
+ if (!bnx2x_tlv_supported(expected_tlv))
+ return;
+
WARN(expected_tlv != vf->op_current,
"lock mismatch: expected %d found %d", expected_tlv,
vf->op_current);
+ /* record the locking op */
+ vf->op_current = CHANNEL_TLV_NONE;
+
/* lock the channel */
mutex_unlock(&vf->op_mutex);
/* log the unlock */
DP(BNX2X_MSG_IOV, "VF[%d]: vf pf channel unlocked by %d\n",
vf->abs_vfid, vf->op_current);
-
- /* record the locking op */
- vf->op_current = CHANNEL_TLV_NONE;
}
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
@@ -3052,11 +3127,77 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
return bnx2x_enable_sriov(bp);
}
}
+#define IGU_ENTRY_SIZE 4
int bnx2x_enable_sriov(struct bnx2x *bp)
{
int rc = 0, req_vfs = bp->requested_nr_virtfn;
+ int vf_idx, sb_idx, vfq_idx, qcount, first_vf;
+ u32 igu_entry, address;
+ u16 num_vf_queues;
+
+ if (req_vfs == 0)
+ return 0;
+
+ first_vf = bp->vfdb->sriov.first_vf_in_pf;
+
+ /* statically distribute vf sb pool between VFs */
+ num_vf_queues = min_t(u16, BNX2X_VF_MAX_QUEUES,
+ BP_VFDB(bp)->vf_sbs_pool / req_vfs);
+
+ /* zero previous values learned from igu cam */
+ for (vf_idx = 0; vf_idx < req_vfs; vf_idx++) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ vf->sb_count = 0;
+ vf_sb_count(BP_VF(bp, vf_idx)) = 0;
+ }
+ bp->vfdb->vf_sbs_pool = 0;
+
+ /* prepare IGU cam */
+ sb_idx = BP_VFDB(bp)->first_vf_igu_entry;
+ address = IGU_REG_MAPPING_MEMORY + sb_idx * IGU_ENTRY_SIZE;
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ for (vfq_idx = 0; vfq_idx < num_vf_queues; vfq_idx++) {
+ igu_entry = vf_idx << IGU_REG_MAPPING_MEMORY_FID_SHIFT |
+ vfq_idx << IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT |
+ IGU_REG_MAPPING_MEMORY_VALID;
+ DP(BNX2X_MSG_IOV, "assigning sb %d to vf %d\n",
+ sb_idx, vf_idx);
+ REG_WR(bp, address, igu_entry);
+ sb_idx++;
+ address += IGU_ENTRY_SIZE;
+ }
+ }
+
+ /* Reinitialize vf database according to igu cam */
+ bnx2x_get_vf_igu_cam_info(bp);
+
+ DP(BNX2X_MSG_IOV, "vf_sbs_pool %d, num_vf_queues %d\n",
+ BP_VFDB(bp)->vf_sbs_pool, num_vf_queues);
+
+ qcount = 0;
+ for_each_vf(bp, vf_idx) {
+ struct bnx2x_virtf *vf = BP_VF(bp, vf_idx);
+
+ /* set local queue arrays */
+ vf->vfqs = &bp->vfdb->vfqs[qcount];
+ qcount += vf_sb_count(vf);
+ }
+ /* prepare msix vectors in VF configuration space */
+ for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
+ bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
+ REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
+ num_vf_queues);
+ }
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+ /* enable sriov. This will probe all the VFs, and consequentially cause
+ * the "acquire" messages to appear on the VF PF channel.
+ */
+ DP(BNX2X_MSG_IOV, "about to call enable sriov\n");
+ pci_disable_sriov(bp->pdev);
rc = pci_enable_sriov(bp->pdev, req_vfs);
if (rc) {
BNX2X_ERR("pci_enable_sriov failed with %d\n", rc);
@@ -3084,8 +3225,8 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
pci_disable_sriov(bp->pdev);
}
-static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
- struct bnx2x_virtf *vf)
+int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx, struct bnx2x_virtf **vf,
+ struct pf_vf_bulletin_content **bulletin)
{
if (bp->state != BNX2X_STATE_OPEN) {
BNX2X_ERR("vf ndo called though PF is down\n");
@@ -3103,8 +3244,24 @@ static int bnx2x_vf_ndo_sanity(struct bnx2x *bp, int vfidx,
return -EINVAL;
}
- if (!vf) {
- BNX2X_ERR("vf ndo called but vf was null. vfidx was %d\n",
+ /* init members */
+ *vf = BP_VF(bp, vfidx);
+ *bulletin = BP_VF_BULLETIN(bp, vfidx);
+
+ if (!*vf) {
+ BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!(*vf)->vfqs) {
+ BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+ vfidx);
+ return -EINVAL;
+ }
+
+ if (!*bulletin) {
+ BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
vfidx);
return -EINVAL;
}
@@ -3116,17 +3273,19 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
struct ifla_vf_info *ivi)
{
struct bnx2x *bp = netdev_priv(dev);
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
- struct bnx2x_vlan_mac_obj *vlan_obj = &bnx2x_vfq(vf, 0, vlan_obj);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_vlan_mac_obj *mac_obj;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
int rc;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
- if (!mac_obj || !vlan_obj || !bulletin) {
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ if (!mac_obj || !vlan_obj) {
BNX2X_ERR("VF partially initialized\n");
return -EINVAL;
}
@@ -3137,10 +3296,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
- 0, ETH_ALEN);
- vlan_obj->get_n_elements(bp, vlan_obj, 1, (u8 *)&ivi->vlan,
- 0, VLAN_HLEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
+ 0, ETH_ALEN);
+ if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
+ vlan_obj->get_n_elements(bp, vlan_obj, 1,
+ (u8 *)&ivi->vlan, 0,
+ VLAN_HLEN);
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3183,11 +3345,11 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
if (!is_valid_ether_addr(mac)) {
@@ -3208,14 +3370,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
return rc;
}
- /* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj = &bnx2x_vfq(vf, 0, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj =
+ &bnx2x_leading_vfq(vf, mac_obj);
+
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
@@ -3249,11 +3415,11 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
struct bnx2x *bp = netdev_priv(dev);
int rc, q_logical_state;
- struct bnx2x_virtf *vf = BP_VF(bp, vfidx);
- struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vfidx);
+ struct bnx2x_virtf *vf = NULL;
+ struct pf_vf_bulletin_content *bulletin = NULL;
- /* sanity */
- rc = bnx2x_vf_ndo_sanity(bp, vfidx, vf);
+ /* sanity and init */
+ rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
if (rc)
return rc;
@@ -3275,18 +3441,21 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* is vf initialized and queue set up? */
q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_vfq(vf, 0, sp_obj));
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
if (vf->state == VF_ENABLED &&
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the vlan in device on this vf's queue */
unsigned long ramrod_flags = 0;
unsigned long vlan_mac_flags = 0;
struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_vfq(vf, 0, vlan_obj);
+ &bnx2x_leading_vfq(vf, vlan_obj);
struct bnx2x_vlan_mac_ramrod_params ramrod_param;
struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_update_params *update_params;
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
memset(&ramrod_param, 0, sizeof(ramrod_param));
/* must lock vfpf channel to protect against vf flows */
@@ -3306,7 +3475,7 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
*/
__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_vfq(vf, 0, sp_obj);
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
update_params = &q_params.params.update;
__set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
&update_params->update_flags);
@@ -3463,7 +3632,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
alloc_mem_err:
BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
sizeof(struct bnx2x_vf_mbx_msg));
- BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping,
+ BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping,
sizeof(union pf_vf_bulletin));
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d143a7cdbbb..2a8c1dc65d9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -81,6 +81,7 @@ struct bnx2x_vf_queue {
u32 cid;
u16 index;
u16 sb_idx;
+ bool is_leading;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -194,6 +195,7 @@ struct bnx2x_virtf {
#define VF_CFG_INT_SIMD 0x0008
#define VF_CACHE_LINE 0x0010
#define VF_CFG_VLAN 0x0020
+#define VF_CFG_STATS_COALESCE 0x0040
u8 state;
#define VF_FREE 0 /* VF ready to be acquired holds no resc */
@@ -213,6 +215,7 @@ struct bnx2x_virtf {
/* dma */
dma_addr_t fw_stat_map; /* valid iff VF_CFG_STATS */
+ u16 stats_stride;
dma_addr_t spq_map;
dma_addr_t bulletin_map;
@@ -239,7 +242,10 @@ struct bnx2x_virtf {
u8 igu_base_id; /* base igu status block id */
struct bnx2x_vf_queue *vfqs;
-#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define LEADING_IDX 0
+#define bnx2x_vfq_is_leading(vfq) ((vfq)->index == LEADING_IDX)
+#define bnx2x_vfq(vf, nr, var) ((vf)->vfqs[(nr)].var)
+#define bnx2x_leading_vfq(vf, var) ((vf)->vfqs[LEADING_IDX].var)
u8 index; /* index in the vf array */
u8 abs_vfid;
@@ -358,6 +364,10 @@ struct bnx2x_vf_sp {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
} q_data;
+
+ union {
+ struct eth_rss_update_ramrod_data e2;
+ } rss_rdata;
};
struct hw_dma {
@@ -403,6 +413,10 @@ struct bnx2x_vfdb {
#define FLRD_VFS_DWORDS (BNX2X_MAX_NUM_OF_VFS / 32)
u32 flrd_vfs[FLRD_VFS_DWORDS];
+
+ /* the number of msix vectors belonging to this PF designated for VFs */
+ u16 vf_sbs_pool;
+ u16 first_vf_igu_entry;
};
/* queue access */
@@ -411,11 +425,6 @@ static inline struct bnx2x_vf_queue *vfq_get(struct bnx2x_virtf *vf, u8 index)
return &(vf->vfqs[index]);
}
-static inline bool vfq_is_leading(struct bnx2x_vf_queue *vfq)
-{
- return (vfq->index == 0);
-}
-
/* FW ids */
static inline u8 vf_igu_sb(struct bnx2x_virtf *vf, u16 sb_idx)
{
@@ -434,7 +443,10 @@ static u8 vfq_cl_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
static inline u8 vfq_stat_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
{
- return vfq_cl_id(vf, q);
+ if (vf->cfg_flags & VF_CFG_STATS_COALESCE)
+ return vf->leading_rss;
+ else
+ return vfq_cl_id(vf, q);
}
static inline u8 vfq_qzone_id(struct bnx2x_virtf *vf, struct bnx2x_vf_queue *q)
@@ -691,6 +703,10 @@ int bnx2x_vfop_release_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd);
+
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
@@ -730,9 +746,12 @@ int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_release(struct bnx2x *bp);
int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx);
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading);
int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx);
int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
@@ -758,7 +777,7 @@ int bnx2x_enable_sriov(struct bnx2x *bp);
void bnx2x_disable_sriov(struct bnx2x *bp);
static inline int bnx2x_vf_headroom(struct bnx2x *bp)
{
- return bp->vfdb->sriov.nr_virtfn * BNX2X_CLIENTS_PER_VF;
+ return bp->vfdb->sriov.nr_virtfn * BNX2X_CIDS_PER_VF;
}
void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp);
int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs);
@@ -793,7 +812,7 @@ static inline int bnx2x_vfpf_acquire(struct bnx2x *bp,
static inline int bnx2x_vfpf_release(struct bnx2x *bp) {return 0; }
static inline int bnx2x_vfpf_init(struct bnx2x *bp) {return 0; }
static inline void bnx2x_vfpf_close_vf(struct bnx2x *bp) {}
-static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx) {return 0; }
+static inline int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp, bool is_leading) {return 0; }
static inline int bnx2x_vfpf_teardown_queue(struct bnx2x *bp, int qidx) {return 0; }
static inline int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr,
u8 vf_qid, bool set) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 98366abd02b..86436c77af0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp)
* Statistics service functions
*/
-static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_pmf_update(struct bnx2x *bp)
{
struct dmae_command *dmae;
u32 opcode;
@@ -518,29 +519,47 @@ static void bnx2x_func_stats_init(struct bnx2x *bp)
*stats_comp = 0;
}
-static void bnx2x_stats_start(struct bnx2x *bp)
+/* should be called under stats_sema */
+static void __bnx2x_stats_start(struct bnx2x *bp)
{
- /* vfs travel through here as part of the statistics FSM, but no action
- * is required
- */
- if (IS_VF(bp))
- return;
+ if (IS_PF(bp)) {
+ if (bp->port.pmf)
+ bnx2x_port_stats_init(bp);
- if (bp->port.pmf)
- bnx2x_port_stats_init(bp);
+ else if (bp->func_stx)
+ bnx2x_func_stats_init(bp);
- else if (bp->func_stx)
- bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_storm_stats_post(bp);
+ }
- bnx2x_hw_stats_post(bp);
- bnx2x_storm_stats_post(bp);
+ bp->stats_started = true;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_pmf_start(struct bnx2x *bp)
{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_pmf_update(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_pmf_update(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ __bnx2x_stats_pmf_update(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_stats_restart(struct bnx2x *bp)
@@ -550,8 +569,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp)
*/
if (IS_VF(bp))
return;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
bnx2x_stats_comp(bp);
- bnx2x_stats_start(bp);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
}
static void bnx2x_bmac_stats_update(struct bnx2x *bp)
@@ -888,9 +910,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp)
/* Make sure we use the value of the counter
* used for sending the last stats ramrod.
*/
- spin_lock_bh(&bp->stats_lock);
cur_stats_counter = bp->stats_counter - 1;
- spin_unlock_bh(&bp->stats_lock);
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
@@ -1227,12 +1247,18 @@ static void bnx2x_stats_update(struct bnx2x *bp)
{
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
- if (bnx2x_edebug_stats_stopped(bp))
+ /* we run update from timer context, so give up
+ * if somebody is in the middle of transition
+ */
+ if (down_trylock(&bp->stats_sema))
return;
+ if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started)
+ goto out;
+
if (IS_PF(bp)) {
if (*stats_comp != DMAE_COMP_VAL)
- return;
+ goto out;
if (bp->port.pmf)
bnx2x_hw_stats_update(bp);
@@ -1242,7 +1268,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
BNX2X_ERR("storm stats were not updated for 3 times\n");
bnx2x_panic();
}
- return;
+ goto out;
}
} else {
/* vf doesn't collect HW statistics, and doesn't get completions
@@ -1256,7 +1282,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
/* vf is done */
if (IS_VF(bp))
- return;
+ goto out;
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
@@ -1267,6 +1293,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_storm_stats_post(bp);
+
+out:
+ up(&bp->stats_sema);
}
static void bnx2x_port_stats_stop(struct bnx2x *bp)
@@ -1332,6 +1361,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
{
int update = 0;
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+
+ bp->stats_started = false;
+
bnx2x_stats_comp(bp);
if (bp->port.pmf)
@@ -1348,6 +1382,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp)
bnx2x_hw_stats_post(bp);
bnx2x_stats_comp(bp);
}
+
+ up(&bp->stats_sema);
}
static void bnx2x_stats_do_nothing(struct bnx2x *bp)
@@ -1376,15 +1412,17 @@ static const struct {
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
{
enum bnx2x_stats_state state;
+ void (*action)(struct bnx2x *bp);
if (unlikely(bp->panic))
return;
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+ action = bnx2x_stats_stm[state][event].action;
spin_unlock_bh(&bp->stats_lock);
- bnx2x_stats_stm[state][event].action(bp);
+ action(bp);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1955,3 +1993,14 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats,
estats->mac_discard);
}
}
+
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie){
+ if (down_timeout(&bp->stats_sema, HZ/10))
+ BNX2X_ERR("Unable to acquire stats lock\n");
+ bnx2x_stats_comp(bp);
+ func_to_exec(cookie);
+ __bnx2x_stats_start(bp);
+ up(&bp->stats_sema);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 853824d258e..f35845006cd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -539,6 +539,9 @@ struct bnx2x;
void bnx2x_memset_stats(struct bnx2x *bp);
void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+void bnx2x_stats_safe_exec(struct bnx2x *bp,
+ void (func_to_exec)(void *cookie),
+ void *cookie);
/**
* bnx2x_save_statistics - save statistics when unloading.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 2088063151d..6cfb8873245 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -257,17 +257,23 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
/* humble our request */
req->resc_request.num_txqs =
- bp->acquire_resp.resc.num_txqs;
+ min(req->resc_request.num_txqs,
+ bp->acquire_resp.resc.num_txqs);
req->resc_request.num_rxqs =
- bp->acquire_resp.resc.num_rxqs;
+ min(req->resc_request.num_rxqs,
+ bp->acquire_resp.resc.num_rxqs);
req->resc_request.num_sbs =
- bp->acquire_resp.resc.num_sbs;
+ min(req->resc_request.num_sbs,
+ bp->acquire_resp.resc.num_sbs);
req->resc_request.num_mac_filters =
- bp->acquire_resp.resc.num_mac_filters;
+ min(req->resc_request.num_mac_filters,
+ bp->acquire_resp.resc.num_mac_filters);
req->resc_request.num_vlan_filters =
- bp->acquire_resp.resc.num_vlan_filters;
+ min(req->resc_request.num_vlan_filters,
+ bp->acquire_resp.resc.num_vlan_filters);
req->resc_request.num_mc_filters =
- bp->acquire_resp.resc.num_mc_filters;
+ min(req->resc_request.num_mc_filters,
+ bp->acquire_resp.resc.num_mc_filters);
/* Clear response buffer */
memset(&bp->vf2pf_mbox->resp, 0,
@@ -293,7 +299,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->common.flash_size = 0;
bp->flags |=
NO_WOL_FLAG | NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG | NO_FCOE_FLAG;
- bp->igu_sb_cnt = 1;
+ bp->igu_sb_cnt = bp->acquire_resp.resc.num_sbs;
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
@@ -373,6 +379,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp)
req->stats_addr = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ req->stats_stride = sizeof(struct per_queue_stats);
+
/* add list termination tlv */
bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
sizeof(struct channel_list_end_tlv));
@@ -452,12 +460,60 @@ free_irq:
bnx2x_free_irq(bp);
}
+static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_queue *q)
+{
+ u8 cl_id = vfq_cl_id(vf, q);
+ u8 func_id = FW_VF_HANDLE(vf->abs_vfid);
+
+ /* mac */
+ bnx2x_init_mac_obj(bp, &q->mac_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, mac_rdata),
+ bnx2x_vf_sp_map(bp, vf, mac_rdata),
+ BNX2X_FILTER_MAC_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->macs_pool);
+ /* vlan */
+ bnx2x_init_vlan_obj(bp, &q->vlan_obj,
+ cl_id, q->cid, func_id,
+ bnx2x_vf_sp(bp, vf, vlan_rdata),
+ bnx2x_vf_sp_map(bp, vf, vlan_rdata),
+ BNX2X_FILTER_VLAN_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX,
+ &bp->vlans_pool);
+
+ /* mcast */
+ bnx2x_init_mcast_obj(bp, &vf->mcast_obj, cl_id,
+ q->cid, func_id, func_id,
+ bnx2x_vf_sp(bp, vf, mcast_rdata),
+ bnx2x_vf_sp_map(bp, vf, mcast_rdata),
+ BNX2X_FILTER_MCAST_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ /* rss */
+ bnx2x_init_rss_config_obj(bp, &vf->rss_conf_obj, cl_id, q->cid,
+ func_id, func_id,
+ bnx2x_vf_sp(bp, vf, rss_rdata),
+ bnx2x_vf_sp_map(bp, vf, rss_rdata),
+ BNX2X_FILTER_RSS_CONF_PENDING,
+ &vf->filter_state,
+ BNX2X_OBJ_TYPE_RX_TX);
+
+ vf->leading_rss = cl_id;
+ q->is_leading = true;
+}
+
/* ask the pf to open a queue for the vf */
-int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
+int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ bool is_leading)
{
struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
- struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+ u8 fp_idx = fp->index;
u16 tpa_agg_size = 0, flags = 0;
int rc;
@@ -473,6 +529,9 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, int fp_idx)
tpa_agg_size = TPA_AGG_SIZE;
}
+ if (is_leading)
+ flags |= VFPF_QUEUE_FLG_LEADING_RSS;
+
/* calculate queue flags */
flags |= VFPF_QUEUE_FLG_STATS;
flags |= VFPF_QUEUE_FLG_CACHE_ALIGN;
@@ -646,6 +705,71 @@ out:
return 0;
}
+/* request pf to config rss table for vf queues*/
+int bnx2x_vfpf_config_rss(struct bnx2x *bp,
+ struct bnx2x_config_rss_params *params)
+{
+ struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
+ struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss;
+ int rc = 0;
+
+ /* clear mailbox and prep first tlv */
+ bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS,
+ sizeof(*req));
+
+ /* add list termination tlv */
+ bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key));
+ req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE;
+ req->rss_key_size = T_ETH_RSS_KEY;
+ req->rss_result_mask = params->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_DISABLED))
+ req->rss_flags |= VFPF_RSS_MODE_DISABLED;
+ if (params->rss_flags & (1 << BNX2X_RSS_MODE_REGULAR))
+ req->rss_flags |= VFPF_RSS_MODE_REGULAR;
+ if (params->rss_flags & (1 << BNX2X_RSS_SET_SRCH))
+ req->rss_flags |= VFPF_RSS_SET_SRCH;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4))
+ req->rss_flags |= VFPF_RSS_IPV4;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_TCP))
+ req->rss_flags |= VFPF_RSS_IPV4_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV4_UDP))
+ req->rss_flags |= VFPF_RSS_IPV4_UDP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6))
+ req->rss_flags |= VFPF_RSS_IPV6;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_TCP))
+ req->rss_flags |= VFPF_RSS_IPV6_TCP;
+ if (params->rss_flags & (1 << BNX2X_RSS_IPV6_UDP))
+ req->rss_flags |= VFPF_RSS_IPV6_UDP;
+
+ DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags);
+
+ /* output tlvs list */
+ bnx2x_dp_tlv_list(bp, req);
+
+ /* send message to pf */
+ rc = bnx2x_send_msg2pf(bp, &resp->hdr.status, bp->vf2pf_mbox_mapping);
+ if (rc) {
+ BNX2X_ERR("failed to send message to pf. rc was %d\n", rc);
+ goto out;
+ }
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ BNX2X_ERR("failed to send rss message to PF over Vf PF channel %d\n",
+ resp->hdr.status);
+ rc = -EINVAL;
+ }
+out:
+ bnx2x_vfpf_finalize(bp, &req->first_tlv);
+
+ return 0;
+}
+
int bnx2x_vfpf_set_mcast(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -948,7 +1072,7 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* fill in pfdev info */
resp->pfdev_info.chip_num = bp->common.chip_id;
- resp->pfdev_info.db_size = (1 << BNX2X_DB_SHIFT);
+ resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
/* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
@@ -1054,8 +1178,13 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* record ghost addresses from vf message */
vf->spq_map = init->spq_addr;
vf->fw_stat_map = init->stats_addr;
+ vf->stats_stride = init->stats_stride;
vf->op_rc = bnx2x_vf_init(bp, vf, (dma_addr_t *)init->sb_addr);
+ /* set VF multiqueue statistics collection mode */
+ if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
+ vf->cfg_flags |= VF_CFG_STATS_COALESCE;
+
/* response */
bnx2x_vf_mbx_resp(bp, vf);
}
@@ -1080,6 +1209,8 @@ static void bnx2x_vf_mbx_set_q_flags(struct bnx2x *bp, u32 mbx_q_flags,
__set_bit(BNX2X_Q_FLG_HC, sp_q_flags);
if (mbx_q_flags & VFPF_QUEUE_FLG_DHC)
__set_bit(BNX2X_Q_FLG_DHC, sp_q_flags);
+ if (mbx_q_flags & VFPF_QUEUE_FLG_LEADING_RSS)
+ __set_bit(BNX2X_Q_FLG_LEADING_RSS, sp_q_flags);
/* outer vlan removal is set according to PF's multi function mode */
if (IS_MF_SD(bp))
@@ -1113,6 +1244,9 @@ static void bnx2x_vf_mbx_setup_q(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_queue_init_params *init_p;
struct bnx2x_queue_setup_params *setup_p;
+ if (bnx2x_vfq_is_leading(q))
+ bnx2x_leading_vfq_init(bp, vf, q);
+
/* re-init the VF operation context */
memset(&vf->op_params.qctor, 0 , sizeof(vf->op_params.qctor));
setup_p = &vf->op_params.qctor.prep_qsetup;
@@ -1552,6 +1686,68 @@ static void bnx2x_vf_mbx_release_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_mbx_resp(bp, vf);
}
+static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_config_rss_params *vf_op_params = &vf->op_params.rss;
+ struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss;
+
+ if (rss_tlv->ind_table_size != T_ETH_INDIRECTION_TABLE_SIZE ||
+ rss_tlv->rss_key_size != T_ETH_RSS_KEY) {
+ BNX2X_ERR("failing rss configuration of vf %d due to size mismatch\n",
+ vf->index);
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ /* set vfop params according to rss tlv */
+ memcpy(vf_op_params->ind_table, rss_tlv->ind_table,
+ T_ETH_INDIRECTION_TABLE_SIZE);
+ memcpy(vf_op_params->rss_key, rss_tlv->rss_key,
+ sizeof(rss_tlv->rss_key));
+ vf_op_params->rss_obj = &vf->rss_conf_obj;
+ vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
+
+ /* flags handled individually for backward/forward compatability */
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
+ __set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
+ __set_bit(BNX2X_RSS_MODE_REGULAR, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_SET_SRCH)
+ __set_bit(BNX2X_RSS_SET_SRCH, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4)
+ __set_bit(BNX2X_RSS_IPV4, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP)
+ __set_bit(BNX2X_RSS_IPV4_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP)
+ __set_bit(BNX2X_RSS_IPV4_UDP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6)
+ __set_bit(BNX2X_RSS_IPV6, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP)
+ __set_bit(BNX2X_RSS_IPV6_TCP, &vf_op_params->rss_flags);
+ if (rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)
+ __set_bit(BNX2X_RSS_IPV6_UDP, &vf_op_params->rss_flags);
+
+ if ((!(rss_tlv->rss_flags & VFPF_RSS_IPV4_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV4_UDP) ||
+ (!(rss_tlv->rss_flags & VFPF_RSS_IPV6_TCP) &&
+ rss_tlv->rss_flags & VFPF_RSS_IPV6_UDP)) {
+ BNX2X_ERR("about to hit a FW assert. aborting...\n");
+ vf->op_rc = -EINVAL;
+ goto mbx_resp;
+ }
+
+ vf->op_rc = bnx2x_vfop_rss_cmd(bp, vf, &cmd);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
@@ -1588,6 +1784,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_RELEASE:
bnx2x_vf_mbx_release_vf(bp, vf, mbx);
break;
+ case CHANNEL_TLV_UPDATE_RSS:
+ bnx2x_vf_mbx_update_rss(bp, vf, mbx);
+ break;
}
} else {
@@ -1607,7 +1806,7 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
/* test whether we can respond to the VF (do we have an address
* for it?)
*/
- if (vf->state == VF_ACQUIRED) {
+ if (vf->state == VF_ACQUIRED || vf->state == VF_ENABLED) {
/* mbx_resp uses the op_rc of the VF */
vf->op_rc = PFVF_STATUS_NOT_SUPPORTED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index f3ad174a3a6..1179fe06d0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -51,6 +51,7 @@ struct hw_sb_info {
#define VFPF_QUEUE_FLG_COS 0x0080
#define VFPF_QUEUE_FLG_HC 0x0100
#define VFPF_QUEUE_FLG_DHC 0x0200
+#define VFPF_QUEUE_FLG_LEADING_RSS 0x0400
#define VFPF_QUEUE_DROP_IP_CS_ERR (1 << 0)
#define VFPF_QUEUE_DROP_TCP_CS_ERR (1 << 1)
@@ -131,6 +132,27 @@ struct vfpf_q_op_tlv {
u8 padding[3];
};
+/* receive side scaling tlv */
+struct vfpf_rss_tlv {
+ struct vfpf_first_tlv first_tlv;
+ u32 rss_flags;
+#define VFPF_RSS_MODE_DISABLED (1 << 0)
+#define VFPF_RSS_MODE_REGULAR (1 << 1)
+#define VFPF_RSS_SET_SRCH (1 << 2)
+#define VFPF_RSS_IPV4 (1 << 3)
+#define VFPF_RSS_IPV4_TCP (1 << 4)
+#define VFPF_RSS_IPV4_UDP (1 << 5)
+#define VFPF_RSS_IPV6 (1 << 6)
+#define VFPF_RSS_IPV6_TCP (1 << 7)
+#define VFPF_RSS_IPV6_UDP (1 << 8)
+ u8 rss_result_mask;
+ u8 ind_table_size;
+ u8 rss_key_size;
+ u8 padding;
+ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY]; /* hash values */
+};
+
/* acquire response tlv - carries the allocated resources */
struct pfvf_acquire_resp_tlv {
struct pfvf_tlv hdr;
@@ -166,12 +188,20 @@ struct pfvf_acquire_resp_tlv {
} resc;
};
+#define VFPF_INIT_FLG_STATS_COALESCE (1 << 0) /* when set the VFs queues
+ * stats will be coalesced on
+ * the leading RSS queue
+ */
+
/* Init VF */
struct vfpf_init_tlv {
struct vfpf_first_tlv first_tlv;
aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; /* vf_sb based */
aligned_u64 spq_addr;
aligned_u64 stats_addr;
+ u16 stats_stride;
+ u32 flags;
+ u32 padding[2];
};
/* Setup Queue */
@@ -293,13 +323,14 @@ union vfpf_tlvs {
struct vfpf_q_op_tlv q_op;
struct vfpf_setup_q_tlv setup_q;
struct vfpf_set_q_filters_tlv set_q_filters;
- struct vfpf_release_tlv release;
- struct channel_list_end_tlv list_end;
+ struct vfpf_release_tlv release;
+ struct vfpf_rss_tlv update_rss;
+ struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
union pfvf_tlvs {
- struct pfvf_general_resp_tlv general_resp;
+ struct pfvf_general_resp_tlv general_resp;
struct pfvf_acquire_resp_tlv acquire_resp;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
@@ -355,14 +386,18 @@ enum channel_tlvs {
CHANNEL_TLV_INIT,
CHANNEL_TLV_SETUP_Q,
CHANNEL_TLV_SET_Q_FILTERS,
+ CHANNEL_TLV_ACTIVATE_Q,
+ CHANNEL_TLV_DEACTIVATE_Q,
CHANNEL_TLV_TEARDOWN_Q,
CHANNEL_TLV_CLOSE,
CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_UPDATE_RSS_DEPRECATED,
CHANNEL_TLV_PF_RELEASE_VF,
CHANNEL_TLV_LIST_END,
CHANNEL_TLV_FLR,
CHANNEL_TLV_PF_SET_MAC,
CHANNEL_TLV_PF_SET_VLAN,
+ CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index d78d4cf140e..8142480d977 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1184,6 +1184,7 @@ error:
static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ctx_blk_size = cp->ethdev->ctx_blk_size;
int total_mem, blks, i;
@@ -1201,7 +1202,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
cp->ctx_blks = blks;
cp->ctx_blk_size = ctx_blk_size;
- if (!BNX2X_CHIP_IS_57710(cp->chip_id))
+ if (!CHIP_IS_E1(bp))
cp->ctx_align = 0;
else
cp->ctx_align = ctx_blk_size;
@@ -1231,6 +1232,7 @@ static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_eth_dev *ethdev = cp->ethdev;
u32 start_cid = ethdev->starting_cid;
int i, j, n, ret, pages;
@@ -1240,7 +1242,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
cp->iscsi_start_cid = start_cid;
cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->max_cid_space += dev->max_fcoe_conn;
cp->fcoe_init_cid = ethdev->fcoe_init_cid;
if (!cp->fcoe_init_cid)
@@ -1288,7 +1290,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
if (ret)
goto error;
@@ -1382,6 +1384,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
u32 type, union l5cm_specific_data *l5_data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct l5cm_spe kwqe;
struct kwqe_16 *kwq[1];
u16 type_16;
@@ -1389,10 +1392,10 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
kwqe.hdr.conn_and_cmd_data =
cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
- BNX2X_HW_CID(cp, cid)));
+ BNX2X_HW_CID(bp, cid)));
type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
- type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+ type_16 |= (bp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
SPE_HDR_FUNCTION_ID;
kwqe.hdr.type = cpu_to_le16(type_16);
@@ -1427,13 +1430,34 @@ static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
rcu_read_unlock();
}
+static void cnic_bnx2x_set_tcp_options(struct cnic_dev *dev, int time_stamps,
+ int en_tcp_dack)
+{
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+ u16 tstorm_flags = 0;
+
+ if (time_stamps) {
+ xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+ }
+ if (en_tcp_dack)
+ tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN;
+
+ CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+ XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), xstorm_flags);
+
+ CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+ TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(bp->pfid), tstorm_flags);
+}
+
static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
int hq_bds, pages;
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->num_iscsi_tasks = req1->num_tasks_per_conn;
cp->num_ccells = req1->num_ccells_per_conn;
@@ -1506,15 +1530,18 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
hq_bds);
+ cnic_bnx2x_set_tcp_options(dev,
+ req1->flags & ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE,
+ req1->flags & ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE);
+
return 0;
}
static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
{
struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1653,6 +1680,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct iscsi_kwqe_conn_offload1 *req1 =
(struct iscsi_kwqe_conn_offload1 *) wqes[0];
struct iscsi_kwqe_conn_offload2 *req2 =
@@ -1661,11 +1689,11 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
struct cnic_iscsi *iscsi = ctx->proto.iscsi;
u32 cid = ctx->cid;
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
struct iscsi_context *ictx;
struct regpair context_addr;
int i, j, n = 2, n_max;
- u8 port = CNIC_PORT(cp);
+ u8 port = BP_PORT(bp);
ctx->ctx_flags = 0;
if (!req2->num_additional_wqes)
@@ -1719,8 +1747,8 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
ETH_P_8021Q;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
- cp->port_mode == CHIP_2_PORT_MODE) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp) &&
+ bp->common.chip_port_mode == CHIP_2_PORT_MODE) {
port = 0;
}
@@ -1841,6 +1869,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
struct iscsi_kwqe_conn_offload1 *req1;
struct iscsi_kwqe_conn_offload2 *req2;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx;
struct iscsi_kcqe kcqe;
struct kcqe *cqes[1];
@@ -1894,7 +1923,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
}
kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
- kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
+ kcqe.iscsi_conn_context_id = BNX2X_HW_CID(bp, cp->ctx_tbl[l5_cid].cid);
done:
cqes[0] = (struct kcqe *) &kcqe;
@@ -1930,6 +1959,7 @@ static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
union l5cm_specific_data l5_data;
int ret;
@@ -1938,7 +1968,7 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
init_waitqueue_head(&ctx->waitq);
ctx->wait_cond = 0;
memset(&l5_data, 0, sizeof(l5_data));
- hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+ hw_cid = BNX2X_HW_CID(bp, ctx->cid);
ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
hw_cid, NONE_CONNECTION_TYPE, &l5_data);
@@ -2035,9 +2065,6 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
xstorm_buf->pseudo_header_checksum =
swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
- if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
- tstorm_buf->params |=
- L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
if (kwqe3->ka_timeout) {
tstorm_buf->ka_enable = 1;
tstorm_buf->ka_timeout = kwqe3->ka_timeout;
@@ -2049,9 +2076,8 @@ static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
u8 *mac = dev->mac_addr;
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
@@ -2084,25 +2110,6 @@ static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
mac[0]);
}
-static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
-{
- struct cnic_local *cp = dev->cnic_priv;
- struct bnx2x *bp = netdev_priv(dev->netdev);
- u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
- u16 tstorm_flags = 0;
-
- if (tcp_ts) {
- xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
- }
-
- CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
-
- CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
-}
-
static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
u32 num, int *work)
{
@@ -2176,10 +2183,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
-
- cnic_bnx2x_set_tcp_timestamp(dev,
- kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+ XSTORM_ISCSI_LOCAL_VLAN_OFFSET(bp->pfid), csk->vlan_id);
ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
@@ -2248,11 +2252,12 @@ static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_stat_ramrod_params *fcoe_stat;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
req = (struct fcoe_kwqe_stat *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
if (!fcoe_stat)
@@ -2271,6 +2276,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
{
int ret;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
u32 cid;
struct fcoe_init_ramrod_params *fcoe_init;
struct fcoe_kwqe_init1 *req1;
@@ -2315,7 +2321,7 @@ static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
cp->kcq2.sw_prod_idx = 0;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
FCOE_CONNECTION_TYPE, &l5_data);
*work = 3;
@@ -2328,6 +2334,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
int ret = 0;
u32 cid = -1, l5_cid;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct fcoe_kwqe_conn_offload1 *req1;
struct fcoe_kwqe_conn_offload2 *req2;
struct fcoe_kwqe_conn_offload3 *req3;
@@ -2370,7 +2377,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
if (fctx) {
- u32 hw_cid = BNX2X_HW_CID(cp, cid);
+ u32 hw_cid = BNX2X_HW_CID(bp, cid);
u32 val;
val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
@@ -2394,7 +2401,7 @@ static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
- cid = BNX2X_HW_CID(cp, cid);
+ cid = BNX2X_HW_CID(bp, cid);
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
FCOE_CONNECTION_TYPE, &l5_data);
if (!ret)
@@ -2552,13 +2559,14 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
struct fcoe_kwqe_destroy *req;
union l5cm_specific_data l5_data;
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int ret;
u32 cid;
cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
req = (struct fcoe_kwqe_destroy *) kwqe;
- cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+ cid = BNX2X_HW_CID(bp, cp->fcoe_init_cid);
memset(&l5_data, 0, sizeof(l5_data));
ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
@@ -2715,7 +2723,7 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
- struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
int i, work, ret;
u32 opcode;
struct kwqe *kwqe;
@@ -2723,7 +2731,7 @@ static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
return -EAGAIN; /* bnx2 is down */
- if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (!BNX2X_CHIP_IS_E2_PLUS(bp))
return -EINVAL;
for (i = 0; i < num_wqes; ) {
@@ -3039,8 +3047,8 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
u16 index, u8 op, u8 update)
{
- struct cnic_local *cp = dev->cnic_priv;
- u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+ struct bnx2x *bp = netdev_priv(dev->netdev);
+ u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp) * 32 +
COMMAND_REG_INT_ACK);
struct igu_ack_register igu_ack;
@@ -3603,6 +3611,7 @@ static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
csk1->rcv_buf = DEF_RCV_BUF;
csk1->snd_buf = DEF_SND_BUF;
csk1->seed = DEF_SEED;
+ csk1->tcp_flags = 0;
*csk = csk1;
return 0;
@@ -4020,15 +4029,18 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
cnic_cm_upcall(cp, csk, opcode);
break;
- case L5CM_RAMROD_CMD_ID_CLOSE:
- if (l4kcqe->status != 0) {
- netdev_warn(dev->netdev, "RAMROD CLOSE compl with "
- "status 0x%x\n", l4kcqe->status);
+ case L5CM_RAMROD_CMD_ID_CLOSE: {
+ struct iscsi_kcqe *l5kcqe = (struct iscsi_kcqe *) kcqe;
+
+ if (l4kcqe->status != 0 || l5kcqe->completion_status != 0) {
+ netdev_warn(dev->netdev, "RAMROD CLOSE compl with status 0x%x completion status 0x%x\n",
+ l4kcqe->status, l5kcqe->completion_status);
opcode = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
/* Fall through */
} else {
break;
}
+ }
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
@@ -4213,13 +4225,12 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
{
- struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
- u32 port = CNIC_PORT(cp);
+ u32 pfid = bp->pfid;
+ u32 port = BP_PORT(bp);
cnic_init_bnx2x_mac(dev);
- cnic_bnx2x_set_tcp_timestamp(dev, 1);
+ cnic_bnx2x_set_tcp_options(dev, 0, 1);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
@@ -4897,6 +4908,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
dma_addr_t buf_map, ring_map = udev->l2_ring_map;
@@ -4925,7 +4937,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
start_bd->general_data &= ~ETH_TX_START_BD_PARSE_NBDS;
start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+ if (BNX2X_CHIP_IS_E2_PLUS(bp))
pbd_e2->parsing_data = (UNICAST_ADDRESS <<
ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
else
@@ -4962,6 +4974,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct client_init_ramrod_data *data)
{
struct cnic_local *cp = dev->cnic_priv;
+ struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BNX2_PAGE_SIZE);
@@ -4970,7 +4983,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
- int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ int cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
u32 val;
dma_addr_t ring_map = udev->l2_ring_map;
@@ -4979,7 +4992,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
data->general.activate_flg = 1;
data->general.sp_client_id = cli;
data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
- data->general.func_id = cp->pfid;
+ data->general.func_id = bp->pfid;
for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
@@ -5029,13 +5042,13 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct bnx2x *bp = netdev_priv(dev->netdev);
- u32 pfid = cp->pfid;
+ u32 pfid = bp->pfid;
cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
cp->kcq1.sw_prod_idx = 0;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq1.hw_prod_idx_ptr =
@@ -5051,7 +5064,7 @@ static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
&sb->sb.running_index[SM_RX_ID];
}
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
@@ -5073,12 +5086,10 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
u32 pfid;
dev->stats_addr = ethdev->addr_drv_info_to_mcp;
- cp->port_mode = bp->common.chip_port_mode;
- cp->pfid = bp->pfid;
cp->func = bp->pf_num;
func = CNIC_FUNC(cp);
- pfid = cp->pfid;
+ pfid = bp->pfid;
ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
cp->iscsi_start_cid, 0);
@@ -5086,7 +5097,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
if (ret)
return -ENOMEM;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
cp->fcoe_start_cid, 0);
@@ -5168,12 +5179,12 @@ static void cnic_init_rings(struct cnic_dev *dev)
rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
barrier();
- cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+ cl_qzone_id = BNX2X_CL_QZONE_ID(bp, cli);
off = BAR_USTRORM_INTMEM +
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ?
USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
- USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
+ USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), cli));
for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
@@ -5271,6 +5282,13 @@ static int cnic_register_netdev(struct cnic_dev *dev)
if (err)
netdev_err(dev->netdev, "register_cnic failed\n");
+ /* Read iSCSI config again. On some bnx2x device, iSCSI config
+ * can change after firmware is downloaded.
+ */
+ dev->max_iscsi_conn = ethdev->max_iscsi_conn;
+ if (ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+ dev->max_iscsi_conn = 0;
+
return err;
}
@@ -5353,7 +5371,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
cnic_free_irq(dev);
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
idx_off = offsetof(struct hc_status_block_e2, index_values) +
(hc_index * sizeof(u16));
@@ -5370,7 +5388,7 @@ static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
*cp->kcq1.hw_prod_idx_ptr = 0;
CNIC_WR(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
+ CSTORM_ISCSI_EQ_CONS_OFFSET(bp->pfid, 0), 0);
CNIC_WR16(dev, cp->kcq1.io_addr, 0);
cnic_free_resc(dev);
}
@@ -5544,7 +5562,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
- if (CNIC_SUPPORTS_FCOE(cp)) {
+ if (CNIC_SUPPORTS_FCOE(bp)) {
cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
cdev->max_fcoe_exchanges = ethdev->max_fcoe_exchanges;
}
@@ -5564,7 +5582,7 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cp->stop_cm = cnic_cm_stop_bnx2x_hw;
cp->enable_int = cnic_enable_bnx2x_int;
cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
- if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+ if (BNX2X_CHIP_IS_E2_PLUS(bp)) {
cp->ack_int = cnic_ack_bnx2x_e2_msix;
cp->arm_int = cnic_arm_bnx2x_e2_msix;
} else {
@@ -5628,7 +5646,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
dev = cnic_from_netdev(netdev);
- if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+ if (!dev && event == NETDEV_REGISTER) {
/* Check for the hot-plug device */
dev = is_cnic_dev(netdev);
if (dev) {
@@ -5644,7 +5662,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
else if (event == NETDEV_UNREGISTER)
cnic_ulp_exit(dev);
- if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+ if (event == NETDEV_UP) {
if (cnic_register_netdev(dev) != 0) {
cnic_put(dev);
goto done;
@@ -5693,21 +5711,8 @@ static struct notifier_block cnic_netdev_notifier = {
static void cnic_release(void)
{
- struct cnic_dev *dev;
struct cnic_uio_dev *udev;
- while (!list_empty(&cnic_dev_list)) {
- dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
- if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
- cnic_ulp_stop(dev);
- cnic_stop_hw(dev);
- }
-
- cnic_ulp_exit(dev);
- cnic_unregister_netdev(dev);
- list_del_init(&dev->list);
- cnic_free_dev(dev);
- }
while (!list_empty(&cnic_udev_list)) {
udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
list);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 62c670619ae..0121a5d5519 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -303,8 +303,6 @@ struct cnic_local {
u32 chip_id;
int func;
- u32 pfid;
- u8 port_mode;
u32 shmem_base;
@@ -364,47 +362,7 @@ struct bnx2x_bd_chain_next {
#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
-#define BNX2X_CHIP_NUM_57710 0x164e
-#define BNX2X_CHIP_NUM_57711 0x164f
-#define BNX2X_CHIP_NUM_57711E 0x1650
-#define BNX2X_CHIP_NUM_57712 0x1662
-#define BNX2X_CHIP_NUM_57712E 0x1663
-#define BNX2X_CHIP_NUM_57713 0x1651
-#define BNX2X_CHIP_NUM_57713E 0x1652
-#define BNX2X_CHIP_NUM_57800 0x168a
-#define BNX2X_CHIP_NUM_57810 0x168e
-#define BNX2X_CHIP_NUM_57840 0x168d
-
-#define BNX2X_CHIP_NUM(x) (x >> 16)
-#define BNX2X_CHIP_IS_57710(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
-#define BNX2X_CHIP_IS_57711(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
-#define BNX2X_CHIP_IS_57711E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
-#define BNX2X_CHIP_IS_E1H(x) \
- (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
-#define BNX2X_CHIP_IS_57712(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
-#define BNX2X_CHIP_IS_57712E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
-#define BNX2X_CHIP_IS_57713(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
-#define BNX2X_CHIP_IS_57713E(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
-#define BNX2X_CHIP_IS_57800(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
-#define BNX2X_CHIP_IS_57810(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
-#define BNX2X_CHIP_IS_57840(x) \
- (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
-#define BNX2X_CHIP_IS_E2(x) \
- (BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
- BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
-#define BNX2X_CHIP_IS_E3(x) \
- (BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
- BNX2X_CHIP_IS_57840(x))
-#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
+#define BNX2X_CHIP_IS_E2_PLUS(bp) (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
#define BNX2X_RX_DESC_CNT (BNX2_PAGE_SIZE / \
sizeof(struct eth_rx_bd))
@@ -439,31 +397,26 @@ struct bnx2x_bd_chain_next {
#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
#endif
-#define CNIC_PORT(cp) ((cp)->pfid & 1)
#define CNIC_FUNC(cp) ((cp)->func)
-#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
- 0 : (CNIC_FUNC(cp) & 1))
-#define CNIC_E1HVN(cp) ((cp)->pfid >> 1)
-#define BNX2X_HW_CID(cp, x) ((CNIC_PORT(cp) << 23) | \
- (CNIC_E1HVN(cp) << 17) | (x))
+#define BNX2X_HW_CID(bp, x) ((BP_PORT(bp) << 23) | \
+ (BP_VN(bp) << 17) | (x))
#define BNX2X_SW_CID(x) (x & 0x1ffff)
-#define BNX2X_CL_QZONE_ID(cp, cli) \
- (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli : \
- cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+#define BNX2X_CL_QZONE_ID(bp, cli) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) ? cli : \
+ cli + (BP_PORT(bp) * ETH_MAX_RX_CLIENTS_E1H))
#ifndef MAX_STAT_COUNTER_ID
#define MAX_STAT_COUNTER_ID \
- (BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H : \
- ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+ (CHIP_IS_E1H(bp) ? MAX_STAT_COUNTER_ID_E1H : \
+ ((BNX2X_CHIP_IS_E2_PLUS(bp)) ? MAX_STAT_COUNTER_ID_E2 : \
MAX_STAT_COUNTER_ID_E1))
#endif
-#define CNIC_SUPPORTS_FCOE(cp) \
- (BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id) && \
- !((cp)->ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+#define CNIC_SUPPORTS_FCOE(cp) \
+ (BNX2X_CHIP_IS_E2_PLUS(bp) && !NO_FCOE(bp))
#define CNIC_RAMROD_TMO (HZ / 4)
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index ede3db35d75..95a8e4b11c9 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -5400,8 +5400,8 @@ struct tstorm_l5cm_tcp_flags {
u16 flags;
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
-#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_EN (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_DELAYED_ACK_SHIFT 12
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index ec9bb9ad4bb..0658b43e148 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2012 Broadcom Corporation
+ * Copyright (c) 2006-2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.16"
-#define CNIC_MODULE_RELDATE "Dec 05, 2012"
+#define CNIC_MODULE_VERSION "2.5.18"
+#define CNIC_MODULE_RELDATE "Sept 01, 2013"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -238,8 +238,8 @@ struct cnic_sock {
u16 src_port;
u16 dst_port;
u16 vlan_id;
- unsigned char old_ha[6];
- unsigned char ha[6];
+ unsigned char old_ha[ETH_ALEN];
+ unsigned char ha[ETH_ALEN];
u32 mtu;
u32 cid;
u32 l5_cid;
@@ -308,7 +308,7 @@ struct cnic_dev {
#define CNIC_F_BNX2_CLASS 3
#define CNIC_F_BNX2X_CLASS 4
atomic_t ref_count;
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
int max_iscsi_conn;
int max_fcoe_conn;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index d964f302ac9..5701f3d1a16 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 132
+#define TG3_MIN_NUM 133
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "May 21, 2013"
+#define DRV_MODULE_RELDATE "Jul 29, 2013"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3030,6 +3030,19 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
return false;
}
+static bool tg3_phy_led_bug(struct tg3 *tp)
+{
+ switch (tg3_asic_rev(tp)) {
+ case ASIC_REV_5719:
+ if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+ !tp->pci_fn)
+ return true;
+ return false;
+ }
+
+ return false;
+}
+
static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
{
u32 val;
@@ -3077,8 +3090,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
}
return;
} else if (do_low_power) {
- tg3_writephy(tp, MII_TG3_EXT_CTRL,
- MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+ if (!tg3_phy_led_bug(tp))
+ tg3_writephy(tp, MII_TG3_EXT_CTRL,
+ MII_TG3_EXT_CTRL_FORCE_LED_OFF);
val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
@@ -4226,8 +4240,6 @@ static int tg3_power_down_prepare(struct tg3 *tp)
static void tg3_power_down(struct tg3 *tp)
{
- tg3_power_down_prepare(tp);
-
pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
pci_set_power_state(tp->pdev, PCI_D3hot);
}
@@ -6095,10 +6107,12 @@ static u64 tg3_refclk_read(struct tg3 *tp)
/* tp->lock must be held */
static void tg3_refclk_write(struct tg3 *tp, u64 newval)
{
- tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
+ u32 clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_STOP);
tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
- tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
+ tw32_f(TG3_EAV_REF_CLCK_CTL, clock_ctl | TG3_EAV_REF_CLCK_CTL_RESUME);
}
static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
@@ -6214,6 +6228,59 @@ static int tg3_ptp_settime(struct ptp_clock_info *ptp,
static int tg3_ptp_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
+ struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
+ u32 clock_ctl;
+ int rval = 0;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ if (rq->perout.index != 0)
+ return -EINVAL;
+
+ tg3_full_lock(tp, 0);
+ clock_ctl = tr32(TG3_EAV_REF_CLCK_CTL);
+ clock_ctl &= ~TG3_EAV_CTL_TSYNC_GPIO_MASK;
+
+ if (on) {
+ u64 nsec;
+
+ nsec = rq->perout.start.sec * 1000000000ULL +
+ rq->perout.start.nsec;
+
+ if (rq->perout.period.sec || rq->perout.period.nsec) {
+ netdev_warn(tp->dev,
+ "Device supports only a one-shot timesync output, period must be 0\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ if (nsec & (1ULL << 63)) {
+ netdev_warn(tp->dev,
+ "Start value (nsec) is over limit. Maximum size of start is only 63 bits\n");
+ rval = -EINVAL;
+ goto err_out;
+ }
+
+ tw32(TG3_EAV_WATCHDOG0_LSB, (nsec & 0xffffffff));
+ tw32(TG3_EAV_WATCHDOG0_MSB,
+ TG3_EAV_WATCHDOG0_EN |
+ ((nsec >> 32) & TG3_EAV_WATCHDOG_MSB_MASK));
+
+ tw32(TG3_EAV_REF_CLCK_CTL,
+ clock_ctl | TG3_EAV_CTL_TSYNC_WDOG0);
+ } else {
+ tw32(TG3_EAV_WATCHDOG0_MSB, 0);
+ tw32(TG3_EAV_REF_CLCK_CTL, clock_ctl);
+ }
+
+err_out:
+ tg3_full_unlock(tp);
+ return rval;
+
+ default:
+ break;
+ }
+
return -EOPNOTSUPP;
}
@@ -6223,7 +6290,7 @@ static const struct ptp_clock_info tg3_ptp_caps = {
.max_adj = 250000000,
.n_alarm = 0,
.n_ext_ts = 0,
- .n_per_out = 0,
+ .n_per_out = 1,
.pps = 0,
.adjfreq = tg3_ptp_adjfreq,
.adjtime = tg3_ptp_adjtime,
@@ -8538,10 +8605,10 @@ static int tg3_mem_rx_acquire(struct tg3 *tp)
if (!i && tg3_flag(tp, ENABLE_RSS))
continue;
- tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->rx_rcb = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping,
+ GFP_KERNEL);
if (!tnapi->rx_rcb)
goto err_out;
}
@@ -8590,10 +8657,9 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tp->hw_stats = dma_zalloc_coherent(&tp->pdev->dev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping, GFP_KERNEL);
if (!tp->hw_stats)
goto err_out;
@@ -8601,10 +8667,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
struct tg3_napi *tnapi = &tp->napi[i];
struct tg3_hw_status *sblk;
- tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping,
- GFP_KERNEL | __GFP_ZERO);
+ tnapi->hw_status = dma_zalloc_coherent(&tp->pdev->dev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping,
+ GFP_KERNEL);
if (!tnapi->hw_status)
goto err_out;
@@ -10367,6 +10433,9 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_flag(tp, 5755_PLUS))
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tg3_asic_rev(tp) == ASIC_REV_5762)
+ tp->rx_mode |= RX_MODE_IPV4_FRAG_FIX;
+
if (tg3_flag(tp, ENABLE_RSS))
tp->rx_mode |= RX_MODE_RSS_ENABLE |
RX_MODE_RSS_ITBL_HASH_BITS_7 |
@@ -11502,7 +11571,7 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
tg3_carrier_off(tp);
@@ -11724,9 +11793,6 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
if (tg3_flag(tp, NO_NVRAM))
return -EINVAL;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
offset = eeprom->offset;
len = eeprom->len;
eeprom->len = 0;
@@ -11784,9 +11850,6 @@ static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
u8 *buf;
__be32 start, end;
- if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- return -EAGAIN;
-
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
return -EINVAL;
@@ -13515,7 +13578,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
tg3_phy_start(tp);
}
if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
- tg3_power_down(tp);
+ tg3_power_down_prepare(tp);
}
@@ -15917,7 +15980,7 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
*/
if (tg3_flag(tp, 5780_CLASS)) {
tg3_flag_set(tp, 40BIT_DMA_BUG);
- tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+ tp->msi_cap = tp->pdev->msi_cap;
} else {
struct pci_dev *bridge = NULL;
@@ -17547,11 +17610,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_asic_rev(tp) == ASIC_REV_5762)
tg3_flag_set(tp, PTP_CAPABLE);
- if (tg3_flag(tp, 5717_PLUS)) {
- /* Resume a low-power mode */
- tg3_frob_aux_power(tp, false);
- }
-
tg3_timer_init(tp);
tg3_carrier_off(tp);
@@ -17625,7 +17683,8 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_disable_pdev:
- pci_disable_device(pdev);
+ if (pci_is_enabled(pdev))
+ pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
@@ -17754,6 +17813,23 @@ out:
static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+static void tg3_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tg3 *tp = netdev_priv(dev);
+
+ rtnl_lock();
+ netif_device_detach(dev);
+
+ if (netif_running(dev))
+ dev_close(dev);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ tg3_power_down(tp);
+
+ rtnl_unlock();
+}
+
/**
* tg3_io_error_detected - called when PCI error is detected
* @pdev: Pointer to PCI device
@@ -17773,7 +17849,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- if (!netif_running(netdev))
+ /* We probably don't have netdev yet */
+ if (!netdev || !netif_running(netdev))
goto done;
tg3_phy_stop(tp);
@@ -17794,8 +17871,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
done:
if (state == pci_channel_io_perm_failure) {
- tg3_napi_enable(tp);
- dev_close(netdev);
+ if (netdev) {
+ tg3_napi_enable(tp);
+ dev_close(netdev);
+ }
err = PCI_ERS_RESULT_DISCONNECT;
} else {
pci_disable_device(pdev);
@@ -17825,7 +17904,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
rtnl_lock();
if (pci_enable_device(pdev)) {
- netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+ dev_err(&pdev->dev,
+ "Cannot re-enable PCI device after reset.\n");
goto done;
}
@@ -17833,7 +17913,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_save_state(pdev);
- if (!netif_running(netdev)) {
+ if (!netdev || !netif_running(netdev)) {
rc = PCI_ERS_RESULT_RECOVERED;
goto done;
}
@@ -17845,7 +17925,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
rc = PCI_ERS_RESULT_RECOVERED;
done:
- if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) {
+ if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) {
tg3_napi_enable(tp);
dev_close(netdev);
}
@@ -17909,6 +17989,7 @@ static struct pci_driver tg3_driver = {
.remove = tg3_remove_one,
.err_handler = &tg3_err_handler,
.driver.pm = &tg3_pm_ops,
+ .shutdown = tg3_shutdown,
};
module_pci_driver(tg3_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index cd63d1189aa..ddb8be1298e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -532,6 +532,7 @@
#define RX_MODE_RSS_ITBL_HASH_BITS_7 0x00700000
#define RX_MODE_RSS_ENABLE 0x00800000
#define RX_MODE_IPV6_CSUM_ENABLE 0x01000000
+#define RX_MODE_IPV4_FRAG_FIX 0x02000000
#define MAC_RX_STATUS 0x0000046c
#define RX_STATUS_REMOTE_TX_XOFFED 0x00000001
#define RX_STATUS_XOFF_RCVD 0x00000002
@@ -1818,12 +1819,21 @@
#define TG3_EAV_REF_CLCK_CTL 0x00006908
#define TG3_EAV_REF_CLCK_CTL_STOP 0x00000002
#define TG3_EAV_REF_CLCK_CTL_RESUME 0x00000004
+#define TG3_EAV_CTL_TSYNC_GPIO_MASK (0x3 << 16)
+#define TG3_EAV_CTL_TSYNC_WDOG0 (1 << 17)
+
+#define TG3_EAV_WATCHDOG0_LSB 0x00006918
+#define TG3_EAV_WATCHDOG0_MSB 0x0000691c
+#define TG3_EAV_WATCHDOG0_EN (1 << 31)
+#define TG3_EAV_WATCHDOG_MSB_MASK 0x7fffffff
+
#define TG3_EAV_REF_CLK_CORRECT_CTL 0x00006928
#define TG3_EAV_REF_CLK_CORRECT_EN (1 << 31)
#define TG3_EAV_REF_CLK_CORRECT_NEG (1 << 30)
#define TG3_EAV_REF_CLK_CORRECT_MASK 0xffffff
-/* 0x690c --> 0x7000 unused */
+
+/* 0x692c --> 0x7000 unused */
/* NVRAM Control registers */
#define NVRAM_CMD 0x00007000
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 57cd1bff59f..3c07064b2bc 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -1419,7 +1419,7 @@ static void bna_rx_sm_start_wait_entry(struct bna_rx *rx)
bna_bfi_rx_enet_start(rx);
}
-void
+static void
bna_rx_sm_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1472,7 +1472,7 @@ static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
bna_rxf_start(&rx->rxf);
}
-void
+static void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
}
@@ -1528,7 +1528,7 @@ bna_rx_sm_start_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
}
}
-void
+static void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
@@ -1593,12 +1593,12 @@ static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
}
}
-void
+static void
bna_rx_sm_cleanup_wait_entry(struct bna_rx *rx)
{
}
-void
+static void
bna_rx_sm_cleanup_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index c37f706d999..43405f654b4 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -37,8 +37,8 @@
extern char bfa_version[];
-#define CNA_FW_FILE_CT "ctfw-3.2.1.0.bin"
-#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
+#define CNA_FW_FILE_CT "ctfw-3.2.1.1.bin"
+#define CNA_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
#define FC_SYMNAME_MAX 256 /*!< max name server symbolic name size */
#pragma pack(1)
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index bb5d63fb2e6..ce75de9bae9 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
/* Detect MAC & PHY and perform ethernet interface initialization */
static int __init at91ether_probe(struct platform_device *pdev)
{
- struct macb_platform_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
struct resource *regs;
struct net_device *dev;
struct phy_device *phydev;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index e866608d7d9..92578690f6d 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -27,6 +27,7 @@
#include <linux/phy.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include <linux/pinctrl/consumer.h>
@@ -124,7 +125,7 @@ void macb_get_hwaddr(struct macb *bp)
u8 addr[6];
int i;
- pdata = bp->pdev->dev.platform_data;
+ pdata = dev_get_platdata(&bp->pdev->dev);
/* Check all 4 address register for vaild address */
for (i = 0; i < 4; i++) {
@@ -275,7 +276,7 @@ static int macb_mii_probe(struct net_device *dev)
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
netdev_err(dev, "no PHY found\n");
- return -1;
+ return -ENXIO;
}
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -314,6 +315,7 @@ static int macb_mii_probe(struct net_device *dev)
int macb_mii_init(struct macb *bp)
{
struct macb_platform_data *pdata;
+ struct device_node *np;
int err = -ENXIO, i;
/* Enable management port */
@@ -333,10 +335,7 @@ int macb_mii_init(struct macb *bp)
bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
- pdata = bp->pdev->dev.platform_data;
-
- if (pdata)
- bp->mii_bus->phy_mask = pdata->phy_mask;
+ pdata = dev_get_platdata(&bp->pdev->dev);
bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!bp->mii_bus->irq) {
@@ -344,17 +343,45 @@ int macb_mii_init(struct macb *bp)
goto err_out_free_mdiobus;
}
- for (i = 0; i < PHY_MAX_ADDR; i++)
- bp->mii_bus->irq[i] = PHY_POLL;
-
dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
- if (mdiobus_register(bp->mii_bus))
+ np = bp->pdev->dev.of_node;
+ if (np) {
+ /* try dt phy registration */
+ err = of_mdiobus_register(bp->mii_bus, np);
+
+ /* fallback to standard phy registration if no phy were
+ found during dt phy registration */
+ if (!err && !phy_find_first(bp->mii_bus)) {
+ for (i = 0; i < PHY_MAX_ADDR; i++) {
+ struct phy_device *phydev;
+
+ phydev = mdiobus_scan(bp->mii_bus, i);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
+ break;
+ }
+ }
+
+ if (err)
+ goto err_out_unregister_bus;
+ }
+ } else {
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ bp->mii_bus->irq[i] = PHY_POLL;
+
+ if (pdata)
+ bp->mii_bus->phy_mask = pdata->phy_mask;
+
+ err = mdiobus_register(bp->mii_bus);
+ }
+
+ if (err)
goto err_out_free_mdio_irq;
- if (macb_mii_probe(bp->dev) != 0) {
+ err = macb_mii_probe(bp->dev);
+ if (err)
goto err_out_unregister_bus;
- }
return 0;
@@ -1824,7 +1851,7 @@ static int __init macb_probe(struct platform_device *pdev)
err = of_get_phy_mode(pdev->dev.of_node);
if (err < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->is_rmii)
bp->phy_interface = PHY_INTERFACE_MODE_RMII;
else
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 7cb148c495c..78d6d6b970e 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -353,11 +353,9 @@ struct xgmac_extra_stats {
/* Receive errors */
unsigned long rx_watchdog;
unsigned long rx_da_filter_fail;
- unsigned long rx_sa_filter_fail;
unsigned long rx_payload_error;
unsigned long rx_ip_header_error;
/* Tx/Rx IRQ errors */
- unsigned long tx_undeflow;
unsigned long tx_process_stopped;
unsigned long rx_buf_unav;
unsigned long rx_process_stopped;
@@ -393,6 +391,7 @@ struct xgmac_priv {
char rx_pause;
char tx_pause;
int wolopts;
+ struct work_struct tx_timeout_work;
};
/* XGMAC Configuration Settings */
@@ -409,6 +408,9 @@ struct xgmac_priv {
#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+#define tx_dma_ring_space(p) \
+ dma_ring_space((p)->tx_head, (p)->tx_tail, DMA_TX_RING_SZ)
+
/* XGMAC Descriptor Access Helpers */
static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
{
@@ -421,7 +423,7 @@ static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
{
- u32 len = cpu_to_le32(p->flags);
+ u32 len = le32_to_cpu(p->buf_size);
return (len & DESC_BUFFER1_SZ_MASK) +
((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
}
@@ -464,11 +466,23 @@ static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
p->flags = cpu_to_le32(tmpflags);
}
+static inline void desc_clear_tx_owner(struct xgmac_dma_desc *p)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
}
+static inline int desc_get_tx_fs(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_FIRST_SEG;
+}
+
static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
{
return le32_to_cpu(p->buf1_addr);
@@ -609,10 +623,15 @@ static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
{
u32 data;
- data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
- writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ if (addr) {
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+ } else {
+ writel(0, ioaddr + XGMAC_ADDR_HIGH(num));
+ writel(0, ioaddr + XGMAC_ADDR_LOW(num));
+ }
}
static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
@@ -683,9 +702,14 @@ static void xgmac_rx_refill(struct xgmac_priv *priv)
if (unlikely(skb == NULL))
break;
- priv->rx_skbuff[entry] = skb;
paddr = dma_map_single(priv->device, skb->data,
- bufsz, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ priv->rx_skbuff[entry] = skb;
desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
}
@@ -782,20 +806,21 @@ static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
return;
for (i = 0; i < DMA_RX_RING_SZ; i++) {
- if (priv->rx_skbuff[i] == NULL)
+ struct sk_buff *skb = priv->rx_skbuff[i];
+ if (skb == NULL)
continue;
p = priv->dma_rx + i;
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
priv->rx_skbuff[i] = NULL;
}
}
static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
{
- int i, f;
+ int i;
struct xgmac_dma_desc *p;
if (!priv->tx_skbuff)
@@ -806,16 +831,15 @@ static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
continue;
p = priv->dma_tx + i;
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
- p = priv->dma_tx + i++;
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
- }
- dev_kfree_skb_any(priv->tx_skbuff[i]);
+ if (desc_get_tx_ls(p))
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
}
@@ -852,8 +876,6 @@ static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
*/
static void xgmac_tx_complete(struct xgmac_priv *priv)
{
- int i;
-
while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
unsigned int entry = priv->tx_tail;
struct sk_buff *skb = priv->tx_skbuff[entry];
@@ -863,55 +885,45 @@ static void xgmac_tx_complete(struct xgmac_priv *priv)
if (desc_get_owner(p))
break;
- /* Verify tx error by looking at the last segment */
- if (desc_get_tx_ls(p))
- desc_get_tx_status(priv, p);
-
netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
priv->tx_head, priv->tx_tail);
- dma_unmap_single(priv->device, desc_get_buf_addr(p),
- desc_get_buf_len(p), DMA_TO_DEVICE);
-
- priv->tx_skbuff[entry] = NULL;
- priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
-
- if (!skb) {
- continue;
- }
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
- DMA_TX_RING_SZ);
- p = priv->dma_tx + priv->tx_tail;
-
+ if (desc_get_tx_fs(p))
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ else
dma_unmap_page(priv->device, desc_get_buf_addr(p),
desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ /* Check tx error on the last segment */
+ if (desc_get_tx_ls(p)) {
+ desc_get_tx_status(priv, p);
+ dev_kfree_skb(skb);
}
- dev_kfree_skb(skb);
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
}
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
- MAX_SKB_FRAGS)
+ /* Ensure tx_tail is visible to xgmac_xmit */
+ smp_mb();
+ if (unlikely(netif_queue_stopped(priv->dev) &&
+ (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)))
netif_wake_queue(priv->dev);
}
-/**
- * xgmac_tx_err:
- * @priv: pointer to the private device structure
- * Description: it cleans the descriptors and restarts the transmission
- * in case of errors.
- */
-static void xgmac_tx_err(struct xgmac_priv *priv)
+static void xgmac_tx_timeout_work(struct work_struct *work)
{
- u32 reg, value, inten;
+ u32 reg, value;
+ struct xgmac_priv *priv =
+ container_of(work, struct xgmac_priv, tx_timeout_work);
- netif_stop_queue(priv->dev);
+ napi_disable(&priv->napi);
- inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_lock(priv->dev);
+
reg = readl(priv->base + XGMAC_DMA_CONTROL);
writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
do {
@@ -927,9 +939,15 @@ static void xgmac_tx_err(struct xgmac_priv *priv)
writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
priv->base + XGMAC_DMA_STATUS);
- writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+ netif_tx_unlock(priv->dev);
netif_wake_queue(priv->dev);
+
+ napi_enable(&priv->napi);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
static int xgmac_hw_init(struct net_device *dev)
@@ -957,9 +975,7 @@ static int xgmac_hw_init(struct net_device *dev)
DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
- /* Enable interrupts */
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
- writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+ writel(0, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
@@ -1027,6 +1043,10 @@ static int xgmac_open(struct net_device *dev)
napi_enable(&priv->napi);
netif_start_queue(dev);
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
return 0;
}
@@ -1087,7 +1107,7 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
if (dma_mapping_error(priv->device, paddr)) {
dev_kfree_skb(skb);
- return -EIO;
+ return NETDEV_TX_OK;
}
priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
@@ -1099,14 +1119,12 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
paddr = skb_frag_dma_map(priv->device, frag, 0, len,
DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, paddr)) {
- dev_kfree_skb(skb);
- return -EIO;
- }
+ if (dma_mapping_error(priv->device, paddr))
+ goto dma_err;
entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
desc = priv->dma_tx + entry;
- priv->tx_skbuff[entry] = NULL;
+ priv->tx_skbuff[entry] = skb;
desc_set_buf_addr_and_size(desc, paddr, len);
if (i < (nfrags - 1))
@@ -1124,13 +1142,35 @@ static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
wmb();
desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
- writel(1, priv->base + XGMAC_DMA_TX_POLL);
- if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
- MAX_SKB_FRAGS)
+ /* Ensure tx_head update is visible to tx completion */
+ smp_mb();
+ if (unlikely(tx_dma_ring_space(priv) <= MAX_SKB_FRAGS)) {
netif_stop_queue(dev);
+ /* Ensure netif_stop_queue is visible to tx completion */
+ smp_mb();
+ if (tx_dma_ring_space(priv) > MAX_SKB_FRAGS)
+ netif_start_queue(dev);
+ }
+ return NETDEV_TX_OK;
+dma_err:
+ entry = priv->tx_head;
+ for ( ; i > 0; i--) {
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+ dma_unmap_page(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ desc_clear_tx_owner(desc);
+ }
+ desc = first;
+ dma_unmap_single(priv->device, desc_get_buf_addr(desc),
+ desc_get_buf_len(desc), DMA_TO_DEVICE);
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1174,7 +1214,7 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
skb_put(skb, frame_len);
dma_unmap_single(priv->device, desc_get_buf_addr(p),
- frame_len, DMA_FROM_DEVICE);
+ priv->dma_buf_sz - NET_IP_ALIGN, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = ip_checksum;
@@ -1225,9 +1265,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
static void xgmac_tx_timeout(struct net_device *dev)
{
struct xgmac_priv *priv = netdev_priv(dev);
-
- /* Clear Tx resources and restart transmitting again */
- xgmac_tx_err(priv);
+ schedule_work(&priv->tx_timeout_work);
}
/**
@@ -1286,6 +1324,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
use_hash = true;
value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ } else {
+ use_hash = false;
}
netdev_for_each_mc_addr(ha, dev) {
if (use_hash) {
@@ -1302,6 +1342,8 @@ static void xgmac_set_rx_mode(struct net_device *dev)
}
out:
+ for (i = reg; i < XGMAC_MAX_FILTER_ADDR; i++)
+ xgmac_set_mac_addr(ioaddr, NULL, reg);
for (i = 0; i < XGMAC_NUM_HASH; i++)
writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
@@ -1366,7 +1408,6 @@ static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
{
u32 intr_status;
- bool tx_err = false;
struct net_device *dev = (struct net_device *)dev_id;
struct xgmac_priv *priv = netdev_priv(dev);
struct xgmac_extra_stats *x = &priv->xstats;
@@ -1396,16 +1437,12 @@ static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
if (intr_status & DMA_STATUS_TPS) {
netdev_err(priv->dev, "transmit process stopped\n");
x->tx_process_stopped++;
- tx_err = true;
+ schedule_work(&priv->tx_timeout_work);
}
if (intr_status & DMA_STATUS_FBI) {
netdev_err(priv->dev, "fatal bus error\n");
x->fatal_bus_error++;
- tx_err = true;
}
-
- if (tx_err)
- xgmac_tx_err(priv);
}
/* TX/RX NORMAL interrupts */
@@ -1569,7 +1606,6 @@ static const struct xgmac_stats xgmac_gstrings_stats[] = {
XGMAC_STAT(rx_payload_error),
XGMAC_STAT(rx_ip_header_error),
XGMAC_STAT(rx_da_filter_fail),
- XGMAC_STAT(rx_sa_filter_fail),
XGMAC_STAT(fatal_bus_error),
XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
@@ -1708,6 +1744,7 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->netdev_ops = &xgmac_netdev_ops;
SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
priv->device = &pdev->dev;
priv->dev = ndev;
@@ -1759,7 +1796,7 @@ static int xgmac_probe(struct platform_device *pdev)
if (device_can_wakeup(priv->device))
priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ ndev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA;
if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index 687ec4a8bb4..9c89dc8fe10 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) {
- __free_pages(q->pg_chunk.page, order);
- q->pg_chunk.page = NULL;
- return -EIO;
- }
q->pg_chunk.mapping = mapping;
}
sd->pg_chunk = q->pg_chunk;
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
return flits_to_desc(flits);
}
-
-/* map_skb - map a packet main body and its page fragments
- * @pdev: the PCI device
- * @skb: the packet
- * @addr: placeholder to save the mapped addresses
- *
- * map the main body of an sk_buff and its page fragments, if any.
- */
-static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb,
- dma_addr_t *addr)
-{
- const skb_frag_t *fp, *end;
- const struct skb_shared_info *si;
-
- *addr = pci_map_single(pdev, skb->data, skb_headlen(skb),
- PCI_DMA_TODEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto out_err;
-
- si = skb_shinfo(skb);
- end = &si->frags[si->nr_frags];
-
- for (fp = si->frags; fp < end; fp++) {
- *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp),
- DMA_TO_DEVICE);
- if (pci_dma_mapping_error(pdev, *addr))
- goto unwind;
- }
- return 0;
-
-unwind:
- while (fp-- > si->frags)
- dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp),
- DMA_TO_DEVICE);
-
- pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE);
-out_err:
- return -ENOMEM;
-}
-
/**
- * write_sgl - populate a scatter/gather list for a packet
+ * make_sgl - populate a scatter/gather list for a packet
* @skb: the packet
* @sgp: the SGL to populate
* @start: start address of skb main body data to include in the SGL
* @len: length of skb main body data to include in the SGL
- * @addr: the list of the mapped addresses
+ * @pdev: the PCI device
*
- * Copies the scatter/gather list for the buffers that make up a packet
+ * Generates a scatter/gather list for the buffers that make up a packet
* and returns the SGL size in 8-byte words. The caller must size the SGL
* appropriately.
*/
-static inline unsigned int write_sgl(const struct sk_buff *skb,
+static inline unsigned int make_sgl(const struct sk_buff *skb,
struct sg_ent *sgp, unsigned char *start,
- unsigned int len, const dma_addr_t *addr)
+ unsigned int len, struct pci_dev *pdev)
{
- unsigned int i, j = 0, k = 0, nfrags;
+ dma_addr_t mapping;
+ unsigned int i, j = 0, nfrags;
if (len) {
+ mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE);
sgp->len[0] = cpu_to_be32(len);
- sgp->addr[j++] = cpu_to_be64(addr[k++]);
+ sgp->addr[0] = cpu_to_be64(mapping);
+ j = 1;
}
nfrags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
sgp->len[j] = cpu_to_be32(skb_frag_size(frag));
- sgp->addr[j] = cpu_to_be64(addr[k++]);
+ sgp->addr[j] = cpu_to_be64(mapping);
j ^= 1;
if (j == 0)
++sgp;
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
const struct port_info *pi,
unsigned int pidx, unsigned int gen,
struct sge_txq *q, unsigned int ndesc,
- unsigned int compl, const dma_addr_t *addr)
+ unsigned int compl)
{
unsigned int flits, sgl_flits, cntrl, tso_info;
struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1];
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
}
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev);
write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen,
htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl),
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct netdev_queue *txq;
struct sge_qset *qs;
struct sge_txq *q;
- dma_addr_t addr[MAX_SKB_FRAGS + 1];
/*
* The chip min packet length is 9 octets but play safe and reject
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) {
- dev_kfree_skb(skb);
- return NETDEV_TX_OK;
- }
-
q->in_use += ndesc;
if (unlikely(credits - ndesc < q->stop_thres)) {
t3_stop_tx_queue(txq, qs, q);
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!skb_shared(skb)))
skb_orphan(skb);
- write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr);
+ write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl);
check_ring_tx_db(adap, q);
return NETDEV_TX_OK;
}
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev,
*/
static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
struct sge_txq *q, unsigned int pidx,
- unsigned int gen, unsigned int ndesc,
- const dma_addr_t *addr)
+ unsigned int gen, unsigned int ndesc)
{
unsigned int sgl_flits, flits;
struct work_request_hdr *from;
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb,
flits = skb_transport_offset(skb) / 8;
sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl;
- sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb),
- skb_tail_pointer(skb) -
- skb_transport_header(skb), addr);
+ sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb),
+ skb->tail - skb->transport_header,
+ adap->pdev);
if (need_skb_unmap()) {
setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits);
skb->destructor = deferred_unmap_destructor;
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
goto again;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) {
- spin_unlock(&q->lock);
- return NET_XMIT_SUCCESS;
- }
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
}
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
check_ring_tx_db(adap, q);
return NET_XMIT_SUCCESS;
}
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data)
struct sge_txq *q = &qs->txq[TXQ_OFLD];
const struct port_info *pi = netdev_priv(qs->netdev);
struct adapter *adap = pi->adapter;
- unsigned int written = 0;
spin_lock(&q->lock);
again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
break;
}
- if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head))
- break;
-
gen = q->gen;
q->in_use += ndesc;
pidx = q->pidx;
q->pidx += ndesc;
- written += ndesc;
if (q->pidx >= q->size) {
q->pidx -= q->size;
q->gen ^= 1;
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
__skb_unlink(skb, &q->sendq);
spin_unlock(&q->lock);
- write_ofld_wr(adap, skb, q, pidx, gen, ndesc,
- (dma_addr_t *)skb->head);
+ write_ofld_wr(adap, skb, q, pidx, gen, ndesc);
spin_lock(&q->lock);
}
spin_unlock(&q->lock);
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK);
set_bit(TXQ_LAST_PKT_DB, &q->flags);
#endif
wmb();
- if (likely(written))
- t3_write_reg(adap, A_SG_KDOORBELL,
- F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
+ t3_write_reg(adap, A_SG_KDOORBELL,
+ F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2aafb809e06..dfd1e36f575 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -576,6 +576,7 @@ struct adapter {
struct l2t_data *l2t;
void *uld_handle[CXGB4_ULD_MAX];
struct list_head list_node;
+ struct list_head rcu_node;
struct tid_info tids;
void **tid_release_head;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 5d5f2685ee2..0d0665ca6f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -60,6 +60,7 @@
#include <linux/workqueue.h>
#include <net/neighbour.h>
#include <net/netevent.h>
+#include <net/addrconf.h>
#include <asm/uaccess.h>
#include "cxgb4.h"
@@ -68,6 +69,11 @@
#include "t4fw_api.h"
#include "l2t.h"
+#include <../drivers/net/bonding/bonding.h>
+
+#ifdef DRV_VERSION
+#undef DRV_VERSION
+#endif
#define DRV_VERSION "2.0.0-ko"
#define DRV_DESC "Chelsio T4/T5 Network Driver"
@@ -400,6 +406,9 @@ static struct dentry *cxgb4_debugfs_root;
static LIST_HEAD(adapter_list);
static DEFINE_MUTEX(uld_mutex);
+/* Adapter list to be accessed from atomic context */
+static LIST_HEAD(adap_rcu_list);
+static DEFINE_SPINLOCK(adap_rcu_lock);
static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
static const char *uld_str[] = { "RDMA", "iSCSI" };
@@ -3227,6 +3236,38 @@ static int tid_init(struct tid_info *t)
return 0;
}
+static int cxgb4_clip_get(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap;
+ struct fw_clip_cmd c;
+
+ adap = netdev2adap(dev);
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
+static int cxgb4_clip_release(const struct net_device *dev,
+ const struct in6_addr *lip)
+{
+ struct adapter *adap;
+ struct fw_clip_cmd c;
+
+ adap = netdev2adap(dev);
+ memset(&c, 0, sizeof(c));
+ c.op_to_write = htonl(FW_CMD_OP(FW_CLIP_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c));
+ *(__be64 *)&c.ip_hi = *(__be64 *)(lip->s6_addr);
+ *(__be64 *)&c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
+}
+
/**
* cxgb4_create_server - create an IP server
* @dev: the device
@@ -3246,6 +3287,7 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
struct sk_buff *skb;
struct adapter *adap;
struct cpl_pass_open_req *req;
+ int ret;
skb = alloc_skb(sizeof(*req), GFP_KERNEL);
if (!skb)
@@ -3263,10 +3305,78 @@ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
req->opt0 = cpu_to_be64(TX_CHAN(chan));
req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
- return t4_mgmt_tx(adap, skb);
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
}
EXPORT_SYMBOL(cxgb4_create_server);
+/* cxgb4_create_server6 - create an IPv6 server
+ * @dev: the device
+ * @stid: the server TID
+ * @sip: local IPv6 address to bind server to
+ * @sport: the server's TCP port
+ * @queue: queue to direct messages from this server to
+ *
+ * Create an IPv6 server for the given port and address.
+ * Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue)
+{
+ unsigned int chan;
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_pass_open_req6 *req;
+ int ret;
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ adap = netdev2adap(dev);
+ req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+ req->local_port = sport;
+ req->peer_port = htons(0);
+ req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+ req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+ req->peer_ip_hi = cpu_to_be64(0);
+ req->peer_ip_lo = cpu_to_be64(0);
+ chan = rxq_to_chan(&adap->sge, queue);
+ req->opt0 = cpu_to_be64(TX_CHAN(chan));
+ req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+ SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6)
+{
+ struct sk_buff *skb;
+ struct adapter *adap;
+ struct cpl_close_listsvr_req *req;
+ int ret;
+
+ adap = netdev2adap(dev);
+
+ skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
+ INIT_TP_WR(req, 0);
+ OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
+ req->reply_ctrl = htons(NO_REPLY(0) | (ipv6 ? LISTSVR_IPV6(1) :
+ LISTSVR_IPV6(0)) | QUEUENO(queue));
+ ret = t4_mgmt_tx(adap, skb);
+ return net_xmit_eval(ret);
+}
+EXPORT_SYMBOL(cxgb4_remove_server);
+
/**
* cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
* @mtus: the HW MTU table
@@ -3721,6 +3831,10 @@ static void attach_ulds(struct adapter *adap)
{
unsigned int i;
+ spin_lock(&adap_rcu_lock);
+ list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
+ spin_unlock(&adap_rcu_lock);
+
mutex_lock(&uld_mutex);
list_add_tail(&adap->list_node, &adapter_list);
for (i = 0; i < CXGB4_ULD_MAX; i++)
@@ -3746,6 +3860,10 @@ static void detach_ulds(struct adapter *adap)
netevent_registered = false;
}
mutex_unlock(&uld_mutex);
+
+ spin_lock(&adap_rcu_lock);
+ list_del_rcu(&adap->rcu_node);
+ spin_unlock(&adap_rcu_lock);
}
static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
@@ -3809,6 +3927,168 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
}
EXPORT_SYMBOL(cxgb4_unregister_uld);
+/* Check if netdev on which event is occured belongs to us or not. Return
+ * suceess (1) if it belongs otherwise failure (0).
+ */
+static int cxgb4_netdev(struct net_device *netdev)
+{
+ struct adapter *adap;
+ int i;
+
+ spin_lock(&adap_rcu_lock);
+ list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
+ for (i = 0; i < MAX_NPORTS; i++)
+ if (adap->port[i] == netdev) {
+ spin_unlock(&adap_rcu_lock);
+ return 1;
+ }
+ spin_unlock(&adap_rcu_lock);
+ return 0;
+}
+
+static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
+ unsigned long event)
+{
+ int ret = NOTIFY_DONE;
+
+ rcu_read_lock();
+ if (cxgb4_netdev(event_dev)) {
+ switch (event) {
+ case NETDEV_UP:
+ ret = cxgb4_clip_get(event_dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ if (ret < 0) {
+ rcu_read_unlock();
+ return ret;
+ }
+ ret = NOTIFY_OK;
+ break;
+ case NETDEV_DOWN:
+ cxgb4_clip_release(event_dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ ret = NOTIFY_OK;
+ break;
+ default:
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+static int cxgb4_inet6addr_handler(struct notifier_block *this,
+ unsigned long event, void *data)
+{
+ struct inet6_ifaddr *ifa = data;
+ struct net_device *event_dev;
+ int ret = NOTIFY_DONE;
+ struct bonding *bond = netdev_priv(ifa->idev->dev);
+ struct slave *slave;
+ struct pci_dev *first_pdev = NULL;
+
+ if (ifa->idev->dev->priv_flags & IFF_802_1Q_VLAN) {
+ event_dev = vlan_dev_real_dev(ifa->idev->dev);
+ ret = clip_add(event_dev, ifa, event);
+ } else if (ifa->idev->dev->flags & IFF_MASTER) {
+ /* It is possible that two different adapters are bonded in one
+ * bond. We need to find such different adapters and add clip
+ * in all of them only once.
+ */
+ read_lock(&bond->lock);
+ bond_for_each_slave(bond, slave) {
+ if (!first_pdev) {
+ ret = clip_add(slave->dev, ifa, event);
+ /* If clip_add is success then only initialize
+ * first_pdev since it means it is our device
+ */
+ if (ret == NOTIFY_OK)
+ first_pdev = to_pci_dev(
+ slave->dev->dev.parent);
+ } else if (first_pdev !=
+ to_pci_dev(slave->dev->dev.parent))
+ ret = clip_add(slave->dev, ifa, event);
+ }
+ read_unlock(&bond->lock);
+ } else
+ ret = clip_add(ifa->idev->dev, ifa, event);
+
+ return ret;
+}
+
+static struct notifier_block cxgb4_inet6addr_notifier = {
+ .notifier_call = cxgb4_inet6addr_handler
+};
+
+/* Retrieves IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actul CLIP command.
+ */
+static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
+{
+ struct inet6_dev *idev = NULL;
+ struct inet6_ifaddr *ifa;
+ int ret = 0;
+
+ idev = __in6_dev_get(root_dev);
+ if (!idev)
+ return ret;
+
+ read_lock_bh(&idev->lock);
+ list_for_each_entry(ifa, &idev->addr_list, if_list) {
+ ret = cxgb4_clip_get(dev,
+ (const struct in6_addr *)ifa->addr.s6_addr);
+ if (ret < 0)
+ break;
+ }
+ read_unlock_bh(&idev->lock);
+
+ return ret;
+}
+
+static int update_root_dev_clip(struct net_device *dev)
+{
+ struct net_device *root_dev = NULL;
+ int i, ret = 0;
+
+ /* First populate the real net device's IPv6 addresses */
+ ret = update_dev_clip(dev, dev);
+ if (ret)
+ return ret;
+
+ /* Parse all bond and vlan devices layered on top of the physical dev */
+ for (i = 0; i < VLAN_N_VID; i++) {
+ root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+ if (!root_dev)
+ continue;
+
+ ret = update_dev_clip(root_dev, dev);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static void update_clip(const struct adapter *adap)
+{
+ int i;
+ struct net_device *dev;
+ int ret;
+
+ rcu_read_lock();
+
+ for (i = 0; i < MAX_NPORTS; i++) {
+ dev = adap->port[i];
+ ret = 0;
+
+ if (dev)
+ ret = update_root_dev_clip(dev);
+
+ if (ret < 0)
+ break;
+ }
+ rcu_read_unlock();
+}
+
/**
* cxgb_up - enable the adapter
* @adap: adapter being enabled
@@ -3854,6 +4134,7 @@ static int cxgb_up(struct adapter *adap)
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
notify_ulds(adap, CXGB4_STATE_UP);
+ update_clip(adap);
out:
return err;
irq_err:
@@ -5870,11 +6151,15 @@ static int __init cxgb4_init_module(void)
ret = pci_register_driver(&cxgb4_driver);
if (ret < 0)
debugfs_remove(cxgb4_debugfs_root);
+
+ register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
+
return ret;
}
static void __exit cxgb4_cleanup_module(void)
{
+ unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
pci_unregister_driver(&cxgb4_driver);
debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
flush_workqueue(workq);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4faf4d067ee..6f21f2451c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -154,6 +154,11 @@ struct in6_addr;
int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+ const struct in6_addr *sip, __be16 sport,
+ unsigned int queue);
+int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
+ unsigned int queue, bool ipv6);
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 01d48444120..cd6874b571e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -320,6 +320,21 @@ struct cpl_act_open_req6 {
__be32 opt2;
};
+struct cpl_t5_act_open_req6 {
+ WR_HDR;
+ union opcode_tid ot;
+ __be16 local_port;
+ __be16 peer_port;
+ __be64 local_ip_hi;
+ __be64 local_ip_lo;
+ __be64 peer_ip_hi;
+ __be64 peer_ip_lo;
+ __be64 opt0;
+ __be32 rsvd;
+ __be32 opt2;
+ __be64 params;
+};
+
struct cpl_act_open_rpl {
union opcode_tid ot;
__be32 atid_status;
@@ -405,7 +420,7 @@ struct cpl_close_listsvr_req {
WR_HDR;
union opcode_tid ot;
__be16 reply_ctrl;
-#define LISTSVR_IPV6 (1 << 14)
+#define LISTSVR_IPV6(x) ((x) << 14)
__be16 rsvd;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index d1c755f78aa..6f77ac48774 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -616,6 +616,7 @@ enum fw_cmd_opcodes {
FW_RSS_IND_TBL_CMD = 0x20,
FW_RSS_GLB_CONFIG_CMD = 0x22,
FW_RSS_VI_CONFIG_CMD = 0x23,
+ FW_CLIP_CMD = 0x28,
FW_LASTC2E_CMD = 0x40,
FW_ERROR_CMD = 0x80,
FW_DEBUG_CMD = 0x81,
@@ -2062,6 +2063,28 @@ struct fw_rss_vi_config_cmd {
} u;
};
+struct fw_clip_cmd {
+ __be32 op_to_write;
+ __be32 alloc_to_len16;
+ __be64 ip_hi;
+ __be64 ip_lo;
+ __be32 r4[2];
+};
+
+#define S_FW_CLIP_CMD_ALLOC 31
+#define M_FW_CLIP_CMD_ALLOC 0x1
+#define V_FW_CLIP_CMD_ALLOC(x) ((x) << S_FW_CLIP_CMD_ALLOC)
+#define G_FW_CLIP_CMD_ALLOC(x) \
+ (((x) >> S_FW_CLIP_CMD_ALLOC) & M_FW_CLIP_CMD_ALLOC)
+#define F_FW_CLIP_CMD_ALLOC V_FW_CLIP_CMD_ALLOC(1U)
+
+#define S_FW_CLIP_CMD_FREE 30
+#define M_FW_CLIP_CMD_FREE 0x1
+#define V_FW_CLIP_CMD_FREE(x) ((x) << S_FW_CLIP_CMD_FREE)
+#define G_FW_CLIP_CMD_FREE(x) \
+ (((x) >> S_FW_CLIP_CMD_FREE) & M_FW_CLIP_CMD_FREE)
+#define F_FW_CLIP_CMD_FREE V_FW_CLIP_CMD_FREE(1U)
+
enum fw_error_type {
FW_ERROR_TYPE_EXCEPTION = 0x0,
FW_ERROR_TYPE_HWMODULE = 0x1,
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index e3d4ec836f8..ec88de4ac16 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -814,7 +814,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
if (pdev == NULL)
return -ENODEV;
- data = pdev->dev.platform_data;
+ data = dev_get_platdata(&pdev->dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/net/ethernet/cisco/enic/Makefile b/drivers/net/ethernet/cisco/enic/Makefile
index 9d4974bba24..239e1e46545 100644
--- a/drivers/net/ethernet/cisco/enic/Makefile
+++ b/drivers/net/ethernet/cisco/enic/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_ENIC) := enic.o
enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
- enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o
+ enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
+ enic_ethtool.o enic_api.o
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index afe9b1662b8..e9f7c656ddd 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,12 +32,12 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.39"
-#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
+#define DRV_VERSION "2.1.1.50"
+#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
-#define ENIC_WQ_MAX 1
+#define ENIC_WQ_MAX 8
#define ENIC_RQ_MAX 8
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
@@ -96,6 +96,7 @@ struct enic {
#ifdef CONFIG_PCI_IOV
u16 num_vfs;
#endif
+ spinlock_t enic_api_lock;
struct enic_port_profile *pp;
/* work queue cache line section */
@@ -127,9 +128,57 @@ static inline struct device *enic_get_dev(struct enic *enic)
return &(enic->pdev->dev);
}
+static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
+{
+ return rq;
+}
+
+static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
+{
+ return enic->rq_count + wq;
+}
+
+static inline unsigned int enic_legacy_io_intr(void)
+{
+ return 0;
+}
+
+static inline unsigned int enic_legacy_err_intr(void)
+{
+ return 1;
+}
+
+static inline unsigned int enic_legacy_notify_intr(void)
+{
+ return 2;
+}
+
+static inline unsigned int enic_msix_rq_intr(struct enic *enic,
+ unsigned int rq)
+{
+ return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_wq_intr(struct enic *enic,
+ unsigned int wq)
+{
+ return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
+}
+
+static inline unsigned int enic_msix_err_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count;
+}
+
+static inline unsigned int enic_msix_notify_intr(struct enic *enic)
+{
+ return enic->rq_count + enic->wq_count + 1;
+}
+
void enic_reset_addr_lists(struct enic *enic);
int enic_sriov_enabled(struct enic *enic);
int enic_is_valid_vf(struct enic *enic, int vf);
int enic_is_dynamic(struct enic *enic);
+void enic_set_ethtool_ops(struct net_device *netdev);
#endif /* _ENIC_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.c b/drivers/net/ethernet/cisco/enic/enic_api.c
new file mode 100644
index 00000000000..e13efbdaa2e
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.c
@@ -0,0 +1,48 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_api.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait)
+{
+ int err;
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_dev *vdev = enic->vdev;
+
+ spin_lock(&enic->enic_api_lock);
+ spin_lock(&enic->devcmd_lock);
+
+ vnic_dev_cmd_proxy_by_index_start(vdev, vf);
+ err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
+ vnic_dev_cmd_proxy_end(vdev);
+
+ spin_unlock(&enic->devcmd_lock);
+ spin_unlock(&enic->enic_api_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(enic_api_devcmd_proxy_by_index);
diff --git a/drivers/net/ethernet/cisco/enic/enic_api.h b/drivers/net/ethernet/cisco/enic/enic_api.h
new file mode 100644
index 00000000000..6b9f9255af2
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_api.h
@@ -0,0 +1,30 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __ENIC_API_H__
+#define __ENIC_API_H__
+
+#include <linux/netdevice.h>
+
+#include "vnic_dev.h"
+#include "vnic_devcmd.h"
+
+int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
+ enum vnic_devcmd_cmd cmd, u64 *a0, u64 *a1, int wait);
+
+#endif
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 08bded051b9..129b14a4efb 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -20,6 +20,7 @@
#define _ENIC_DEV_H_
#include "vnic_dev.h"
+#include "vnic_vic.h"
/*
* Calls the devcmd function given by argument vnicdevcmdfn.
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
new file mode 100644
index 00000000000..47e3562f486
--- /dev/null
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -0,0 +1,257 @@
+/**
+ * Copyright 2013 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_dev.h"
+
+struct enic_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define ENIC_TX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
+}
+
+#define ENIC_RX_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
+}
+
+static const struct enic_stat enic_tx_stats[] = {
+ ENIC_TX_STAT(tx_frames_ok),
+ ENIC_TX_STAT(tx_unicast_frames_ok),
+ ENIC_TX_STAT(tx_multicast_frames_ok),
+ ENIC_TX_STAT(tx_broadcast_frames_ok),
+ ENIC_TX_STAT(tx_bytes_ok),
+ ENIC_TX_STAT(tx_unicast_bytes_ok),
+ ENIC_TX_STAT(tx_multicast_bytes_ok),
+ ENIC_TX_STAT(tx_broadcast_bytes_ok),
+ ENIC_TX_STAT(tx_drops),
+ ENIC_TX_STAT(tx_errors),
+ ENIC_TX_STAT(tx_tso),
+};
+
+static const struct enic_stat enic_rx_stats[] = {
+ ENIC_RX_STAT(rx_frames_ok),
+ ENIC_RX_STAT(rx_frames_total),
+ ENIC_RX_STAT(rx_unicast_frames_ok),
+ ENIC_RX_STAT(rx_multicast_frames_ok),
+ ENIC_RX_STAT(rx_broadcast_frames_ok),
+ ENIC_RX_STAT(rx_bytes_ok),
+ ENIC_RX_STAT(rx_unicast_bytes_ok),
+ ENIC_RX_STAT(rx_multicast_bytes_ok),
+ ENIC_RX_STAT(rx_broadcast_bytes_ok),
+ ENIC_RX_STAT(rx_drop),
+ ENIC_RX_STAT(rx_no_bufs),
+ ENIC_RX_STAT(rx_errors),
+ ENIC_RX_STAT(rx_rss),
+ ENIC_RX_STAT(rx_crc_errors),
+ ENIC_RX_STAT(rx_frames_64),
+ ENIC_RX_STAT(rx_frames_127),
+ ENIC_RX_STAT(rx_frames_255),
+ ENIC_RX_STAT(rx_frames_511),
+ ENIC_RX_STAT(rx_frames_1023),
+ ENIC_RX_STAT(rx_frames_1518),
+ ENIC_RX_STAT(rx_frames_to_max),
+};
+
+static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
+static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+
+static int enic_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
+ ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
+ ecmd->port = PORT_FIBRE;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ if (netif_carrier_ok(netdev)) {
+ ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ethtool_cmd_speed_set(ecmd, -1);
+ ecmd->duplex = -1;
+ }
+
+ ecmd->autoneg = AUTONEG_DISABLE;
+
+ return 0;
+}
+
+static void enic_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_devcmd_fw_info *fw_info;
+
+ enic_dev_fw_info(enic, &fw_info);
+
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static void enic_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ unsigned int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < enic_n_tx_stats; i++) {
+ memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < enic_n_rx_stats; i++) {
+ memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int enic_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return enic_n_tx_stats + enic_n_rx_stats;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void enic_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct enic *enic = netdev_priv(netdev);
+ struct vnic_stats *vstats;
+ unsigned int i;
+
+ enic_dev_stats_dump(enic, &vstats);
+
+ for (i = 0; i < enic_n_tx_stats; i++)
+ *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
+ for (i = 0; i < enic_n_rx_stats; i++)
+ *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
+}
+
+static u32 enic_get_msglevel(struct net_device *netdev)
+{
+ struct enic *enic = netdev_priv(netdev);
+ return enic->msg_enable;
+}
+
+static void enic_set_msglevel(struct net_device *netdev, u32 value)
+{
+ struct enic *enic = netdev_priv(netdev);
+ enic->msg_enable = value;
+}
+
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+ unsigned int i, intr;
+
+ tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
+ vnic_dev_get_intr_coal_timer_max(enic->vdev));
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ intr = enic_legacy_io_intr();
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ tx_coalesce_usecs);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->wq_count; i++) {
+ intr = enic_msix_wq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ tx_coalesce_usecs);
+ }
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr],
+ rx_coalesce_usecs);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
+static const struct ethtool_ops enic_ethtool_ops = {
+ .get_settings = enic_get_settings,
+ .get_drvinfo = enic_get_drvinfo,
+ .get_msglevel = enic_get_msglevel,
+ .set_msglevel = enic_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_strings = enic_get_strings,
+ .get_sset_count = enic_get_sset_count,
+ .get_ethtool_stats = enic_get_ethtool_stats,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
+};
+
+void enic_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 992ec2ee64d..7b756cf9474 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -31,7 +31,6 @@
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_vlan.h>
-#include <linux/ethtool.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
@@ -73,57 +72,6 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
-struct enic_stat {
- char name[ETH_GSTRING_LEN];
- unsigned int offset;
-};
-
-#define ENIC_TX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_tx_stats, stat) / 8 }
-#define ENIC_RX_STAT(stat) \
- { .name = #stat, .offset = offsetof(struct vnic_rx_stats, stat) / 8 }
-
-static const struct enic_stat enic_tx_stats[] = {
- ENIC_TX_STAT(tx_frames_ok),
- ENIC_TX_STAT(tx_unicast_frames_ok),
- ENIC_TX_STAT(tx_multicast_frames_ok),
- ENIC_TX_STAT(tx_broadcast_frames_ok),
- ENIC_TX_STAT(tx_bytes_ok),
- ENIC_TX_STAT(tx_unicast_bytes_ok),
- ENIC_TX_STAT(tx_multicast_bytes_ok),
- ENIC_TX_STAT(tx_broadcast_bytes_ok),
- ENIC_TX_STAT(tx_drops),
- ENIC_TX_STAT(tx_errors),
- ENIC_TX_STAT(tx_tso),
-};
-
-static const struct enic_stat enic_rx_stats[] = {
- ENIC_RX_STAT(rx_frames_ok),
- ENIC_RX_STAT(rx_frames_total),
- ENIC_RX_STAT(rx_unicast_frames_ok),
- ENIC_RX_STAT(rx_multicast_frames_ok),
- ENIC_RX_STAT(rx_broadcast_frames_ok),
- ENIC_RX_STAT(rx_bytes_ok),
- ENIC_RX_STAT(rx_unicast_bytes_ok),
- ENIC_RX_STAT(rx_multicast_bytes_ok),
- ENIC_RX_STAT(rx_broadcast_bytes_ok),
- ENIC_RX_STAT(rx_drop),
- ENIC_RX_STAT(rx_no_bufs),
- ENIC_RX_STAT(rx_errors),
- ENIC_RX_STAT(rx_rss),
- ENIC_RX_STAT(rx_crc_errors),
- ENIC_RX_STAT(rx_frames_64),
- ENIC_RX_STAT(rx_frames_127),
- ENIC_RX_STAT(rx_frames_255),
- ENIC_RX_STAT(rx_frames_511),
- ENIC_RX_STAT(rx_frames_1023),
- ENIC_RX_STAT(rx_frames_1518),
- ENIC_RX_STAT(rx_frames_to_max),
-};
-
-static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
-static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
-
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -148,222 +96,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
#endif
}
-static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq)
-{
- return rq;
-}
-
-static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq)
-{
- return enic->rq_count + wq;
-}
-
-static inline unsigned int enic_legacy_io_intr(void)
-{
- return 0;
-}
-
-static inline unsigned int enic_legacy_err_intr(void)
-{
- return 1;
-}
-
-static inline unsigned int enic_legacy_notify_intr(void)
-{
- return 2;
-}
-
-static inline unsigned int enic_msix_rq_intr(struct enic *enic, unsigned int rq)
-{
- return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_wq_intr(struct enic *enic, unsigned int wq)
-{
- return enic->cq[enic_cq_wq(enic, wq)].interrupt_offset;
-}
-
-static inline unsigned int enic_msix_err_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count;
-}
-
-static inline unsigned int enic_msix_notify_intr(struct enic *enic)
-{
- return enic->rq_count + enic->wq_count + 1;
-}
-
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
-
- if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
- } else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
- }
-
- ecmd->autoneg = AUTONEG_DISABLE;
-
- return 0;
-}
-
-static void enic_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_devcmd_fw_info *fw_info;
-
- enic_dev_fw_info(enic, &fw_info);
-
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
- sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
- sizeof(drvinfo->bus_info));
-}
-
-static void enic_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
-{
- unsigned int i;
-
- switch (stringset) {
- case ETH_SS_STATS:
- for (i = 0; i < enic_n_tx_stats; i++) {
- memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- for (i = 0; i < enic_n_rx_stats; i++) {
- memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
- break;
- }
-}
-
-static int enic_get_sset_count(struct net_device *netdev, int sset)
-{
- switch (sset) {
- case ETH_SS_STATS:
- return enic_n_tx_stats + enic_n_rx_stats;
- default:
- return -EOPNOTSUPP;
- }
-}
-
-static void enic_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct enic *enic = netdev_priv(netdev);
- struct vnic_stats *vstats;
- unsigned int i;
-
- enic_dev_stats_dump(enic, &vstats);
-
- for (i = 0; i < enic_n_tx_stats; i++)
- *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].offset];
- for (i = 0; i < enic_n_rx_stats; i++)
- *(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].offset];
-}
-
-static u32 enic_get_msglevel(struct net_device *netdev)
-{
- struct enic *enic = netdev_priv(netdev);
- return enic->msg_enable;
-}
-
-static void enic_set_msglevel(struct net_device *netdev, u32 value)
-{
- struct enic *enic = netdev_priv(netdev);
- enic->msg_enable = value;
-}
-
-static int enic_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
-
- ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
- ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
-
- return 0;
-}
-
-static int enic_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ecmd)
-{
- struct enic *enic = netdev_priv(netdev);
- u32 tx_coalesce_usecs;
- u32 rx_coalesce_usecs;
- unsigned int i, intr;
-
- tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
- rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
-
- switch (vnic_dev_get_intr_mode(enic->vdev)) {
- case VNIC_DEV_INTR_MODE_INTX:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- intr = enic_legacy_io_intr();
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSI:
- if (tx_coalesce_usecs != rx_coalesce_usecs)
- return -EINVAL;
-
- vnic_intr_coalescing_timer_set(&enic->intr[0],
- tx_coalesce_usecs);
- break;
- case VNIC_DEV_INTR_MODE_MSIX:
- for (i = 0; i < enic->wq_count; i++) {
- intr = enic_msix_wq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- tx_coalesce_usecs);
- }
-
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
- }
-
- break;
- default:
- break;
- }
-
- enic->tx_coalesce_usecs = tx_coalesce_usecs;
- enic->rx_coalesce_usecs = rx_coalesce_usecs;
-
- return 0;
-}
-
-static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
- .get_drvinfo = enic_get_drvinfo,
- .get_msglevel = enic_get_msglevel,
- .set_msglevel = enic_set_msglevel,
- .get_link = ethtool_op_get_link,
- .get_strings = enic_get_strings,
- .get_sset_count = enic_get_sset_count,
- .get_ethtool_stats = enic_get_ethtool_stats,
- .get_coalesce = enic_get_coalesce,
- .set_coalesce = enic_set_coalesce,
-};
-
static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
{
struct enic *enic = vnic_dev_priv(wq->vdev);
@@ -396,10 +128,10 @@ static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
completed_index, enic_wq_free_buf,
opaque);
- if (netif_queue_stopped(enic->netdev) &&
+ if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
vnic_wq_desc_avail(&enic->wq[q_number]) >=
(MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS))
- netif_wake_queue(enic->netdev);
+ netif_wake_subqueue(enic->netdev, q_number);
spin_unlock(&enic->wq_lock[q_number]);
@@ -560,10 +292,15 @@ static irqreturn_t enic_isr_msix_rq(int irq, void *data)
static irqreturn_t enic_isr_msix_wq(int irq, void *data)
{
struct enic *enic = data;
- unsigned int cq = enic_cq_wq(enic, 0);
- unsigned int intr = enic_msix_wq_intr(enic, 0);
+ unsigned int cq;
+ unsigned int intr;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int wq_work_done;
+ unsigned int wq_irq;
+
+ wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
+ cq = enic_cq_wq(enic, wq_irq);
+ intr = enic_msix_wq_intr(enic, wq_irq);
wq_work_done = vnic_cq_service(&enic->cq[cq],
wq_work_to_do, enic_wq_service, NULL);
@@ -779,14 +516,18 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct vnic_wq *wq = &enic->wq[0];
+ struct vnic_wq *wq;
unsigned long flags;
+ unsigned int txq_map;
if (skb->len <= 0) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
+ txq_map = skb_get_queue_mapping(skb) % enic->wq_count;
+ wq = &enic->wq[txq_map];
+
/* Non-TSO sends must fit within ENIC_NON_TSO_MAX_DESC descs,
* which is very likely. In the off chance it's going to take
* more than * ENIC_NON_TSO_MAX_DESC, linearize the skb.
@@ -799,23 +540,23 @@ static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
- spin_lock_irqsave(&enic->wq_lock[0], flags);
+ spin_lock_irqsave(&enic->wq_lock[txq_map], flags);
if (vnic_wq_desc_avail(wq) <
skb_shinfo(skb)->nr_frags + ENIC_DESC_MAX_SPLITS) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
/* This is a hard error, log it */
netdev_err(netdev, "BUG! Tx ring full when queue awake!\n");
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_BUSY;
}
enic_queue_wq_skb(enic, wq, skb);
if (vnic_wq_desc_avail(wq) < MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(netdev_get_tx_queue(netdev, txq_map));
- spin_unlock_irqrestore(&enic->wq_lock[0], flags);
+ spin_unlock_irqrestore(&enic->wq_lock[txq_map], flags);
return NETDEV_TX_OK;
}
@@ -1293,6 +1034,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
skb_put(skb, bytes_written);
skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, q_number);
+ if (netdev->features & NETIF_F_RXHASH) {
+ skb->rxhash = rss_hash;
+ if (rss_type & (NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 |
+ NIC_CFG_RSS_HASH_TYPE_TCP_IPV4))
+ skb->l4_rxhash = true;
+ }
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
skb->csum = htons(checksum);
@@ -1637,7 +1386,7 @@ static int enic_open(struct net_device *netdev)
enic_set_rx_mode(netdev);
- netif_wake_queue(netdev);
+ netif_tx_wake_all_queues(netdev);
for (i = 0; i < enic->rq_count; i++)
napi_enable(&enic->napi[i]);
@@ -2001,6 +1750,7 @@ static void enic_reset(struct work_struct *work)
rtnl_lock();
+ spin_lock(&enic->enic_api_lock);
enic_dev_hang_notify(enic);
enic_stop(enic->netdev);
enic_dev_hang_reset(enic);
@@ -2009,6 +1759,8 @@ static void enic_reset(struct work_struct *work)
enic_set_rss_nic_cfg(enic);
enic_dev_set_ig_vlan_rewrite_mode(enic);
enic_open(enic->netdev);
+ spin_unlock(&enic->enic_api_lock);
+ call_netdevice_notifiers(NETDEV_REBOOT, enic->netdev);
rtnl_unlock();
}
@@ -2297,7 +2049,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* instance data is initialized to zero.
*/
- netdev = alloc_etherdev(sizeof(struct enic));
+ netdev = alloc_etherdev_mqs(sizeof(struct enic),
+ ENIC_RQ_MAX, ENIC_WQ_MAX);
if (!netdev)
return -ENOMEM;
@@ -2327,11 +2080,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
/* Query PCI controller on system for DMA addressing
- * limitation for the device. Try 40-bit first, and
+ * limitation for the device. Try 64-bit first, and
* fail to 32-bit.
*/
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
@@ -2345,10 +2098,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_release_regions;
}
} else {
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(dev, "Unable to obtain %u-bit DMA "
- "for consistent allocations, aborting\n", 40);
+ "for consistent allocations, aborting\n", 64);
goto err_out_release_regions;
}
using_dac = 1;
@@ -2421,6 +2174,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
spin_lock_init(&enic->devcmd_lock);
+ spin_lock_init(&enic->enic_api_lock);
/*
* Set ingress vlan rewrite mode before vnic initialization
@@ -2462,6 +2216,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_dev_close;
}
+ netif_set_real_num_tx_queues(netdev, enic->wq_count);
+ netif_set_real_num_rx_queues(netdev, enic->rq_count);
+
/* Setup notification timer, HW reset task, and wq locks
*/
@@ -2496,7 +2253,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
- netdev->ethtool_ops = &enic_ethtool_ops;
+ enic_set_ethtool_ops(netdev);
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (ENIC_SETTING(enic, LOOP)) {
@@ -2510,6 +2267,8 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ENIC_SETTING(enic, TSO))
netdev->hw_features |= NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO_ECN;
+ if (ENIC_SETTING(enic, RSS))
+ netdev->hw_features |= NETIF_F_RXHASH;
if (ENIC_SETTING(enic, RXCSUM))
netdev->hw_features |= NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/cisco/enic/enic_res.h b/drivers/net/ethernet/cisco/enic/enic_res.h
index 25be2734c3f..69f60afd657 100644
--- a/drivers/net/ethernet/cisco/enic/enic_res.h
+++ b/drivers/net/ethernet/cisco/enic/enic_res.h
@@ -47,6 +47,9 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
int offload_mode, int cq_entry, int sop, int eop, int loopback)
{
struct wq_enet_desc *desc = vnic_wq_next_desc(wq);
+ u8 desc_skip_cnt = 1;
+ u8 compressed_send = 0;
+ u64 wrid = 0;
wq_enet_desc_enc(desc,
(u64)dma_addr | VNIC_PADDR_TARGET,
@@ -59,7 +62,8 @@ static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq,
(u16)vlan_tag,
(u8)loopback);
- vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop);
+ vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt,
+ (u8)cq_entry, compressed_send, wrid);
}
static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq,
@@ -120,6 +124,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
dma_addr_t dma_addr, unsigned int len)
{
struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
+ u64 wrid = 0;
u8 type = os_buf_index ?
RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
@@ -127,7 +132,7 @@ static inline void enic_queue_rq_desc(struct vnic_rq *rq,
(u64)dma_addr | VNIC_PADDR_TARGET,
type, (u16)len);
- vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len);
+ vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
}
struct enic;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 97455c573db..69dd92598b7 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -175,6 +175,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev,
{
return vdev->res[type].count;
}
+EXPORT_SYMBOL(vnic_dev_get_res_count);
void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
unsigned int index)
@@ -193,6 +194,7 @@ void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type,
return (char __iomem *)vdev->res[type].vaddr;
}
}
+EXPORT_SYMBOL(vnic_dev_get_res);
static unsigned int vnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
unsigned int desc_count, unsigned int desc_size)
@@ -942,6 +944,7 @@ void vnic_dev_unregister(struct vnic_dev *vdev)
kfree(vdev);
}
}
+EXPORT_SYMBOL(vnic_dev_unregister);
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
@@ -969,6 +972,13 @@ err_out:
vnic_dev_unregister(vdev);
return NULL;
}
+EXPORT_SYMBOL(vnic_dev_register);
+
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev)
+{
+ return vdev->pdev;
+}
+EXPORT_SYMBOL(vnic_dev_get_pdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len)
{
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index f3d9b79ba77..e670029862a 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -127,6 +127,7 @@ int vnic_dev_set_ig_vlan_rewrite_mode(struct vnic_dev *vdev,
struct vnic_dev *vnic_dev_register(struct vnic_dev *vdev,
void *priv, struct pci_dev *pdev, struct vnic_dev_bar *bar,
unsigned int num_bars);
+struct pci_dev *vnic_dev_get_pdev(struct vnic_dev *vdev);
int vnic_dev_init_prov2(struct vnic_dev *vdev, u8 *buf, u32 len);
int vnic_dev_enable2(struct vnic_dev *vdev, int active);
int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index 23d555255cf..b9a0d78fd63 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -281,11 +281,25 @@ enum vnic_devcmd_cmd {
* 0 if no VIF-CONFIG-INFO TLV was ever received. */
CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44),
+ /* INT13 API: (u64)a0=paddr to vnic_int13_params struct
+ * (u32)a1=INT13_CMD_xxx
+ */
+ CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45),
+
+ /* Set default vlan:
+ * in: (u16)a0=new default vlan
+ * (u16)a1=zero for overriding vlan with param a0,
+ * non-zero for resetting vlan to the default
+ * out: (u16)a0=old default vlan
+ */
+ CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46),
+
/* init_prov_info2:
* Variant of CMD_INIT_PROV_INFO, where it will not try to enable
* the vnic until CMD_ENABLE2 is issued.
* (u64)a0=paddr of vnic_devcmd_provinfo
- * (u32)a1=sizeof provision info */
+ * (u32)a1=sizeof provision info
+ */
CMD_INIT_PROV_INFO2 = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 47),
/* enable2:
@@ -339,16 +353,57 @@ enum vnic_devcmd_cmd {
CMD_INTR_COAL_CONVERT = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 50),
/*
- * cmd_set_mac_addr
- * set mac address
+ * Set the predefined mac address as default
* in:
* (u48)a0 = mac addr
- *
*/
CMD_SET_MAC_ADDR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 55),
+
+ /* Update the provisioning info of the given VIF
+ * (u64)a0=paddr of vnic_devcmd_provinfo
+ * (u32)a1=sizeof provision info
+ */
+ CMD_PROV_INFO_UPDATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 56),
+
+ /* Add a filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ */
+ CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
+
+ /* Delete a filter.
+ * in: (u32) a0=filter identifier
+ */
+ CMD_DEL_FILTER = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 59),
+
+ /* Enable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_ENABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 60),
+
+ /* Disable a Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u32) a1= command
+ */
+ CMD_QP_DISABLE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 61),
+
+ /* Stats dump Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ * (u64) a1=host buffer addr for status dump
+ * (u32) a2=length of the buffer
+ */
+ CMD_QP_STATS_DUMP = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 62),
+
+ /* Clear stats for Queue Pair in User space NIC
+ * in: (u32) a0=Queue Pair number
+ */
+ CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
};
/* CMD_ENABLE2 flags */
+#define CMD_ENABLE2_STANDBY 0x0
#define CMD_ENABLE2_ACTIVE 0x1
/* flags for CMD_OPEN */
@@ -364,6 +419,9 @@ enum vnic_devcmd_cmd {
#define CMD_PFILTER_PROMISCUOUS 0x08
#define CMD_PFILTER_ALL_MULTICAST 0x10
+/* Commands for CMD_QP_ENABLE/CM_QP_DISABLE */
+#define CMD_QP_RQWQ 0x0
+
/* rewrite modes for CMD_IG_VLAN_REWRITE_MODE */
#define IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK 0
#define IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN 1
@@ -390,6 +448,7 @@ enum vnic_devcmd_error {
ERR_EMAXRES = 10,
ERR_ENOTSUPPORTED = 11,
ERR_EINPROGRESS = 12,
+ ERR_MAX
};
/*
@@ -435,6 +494,115 @@ struct vnic_devcmd_provinfo {
u8 data[0];
};
+/* These are used in flags field of different filters to denote
+ * valid fields used.
+ */
+#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
+
+#define FILTER_FIELDS_USNIC ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4))
+
+#define FILTER_FIELDS_IPV4_5TUPLE ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2) | \
+ FILTER_FIELD_VALID(3) | \
+ FILTER_FIELD_VALID(4) | \
+ FILTER_FIELD_VALID(5))
+
+#define FILTER_FIELDS_MAC_VLAN ( \
+ FILTER_FIELD_VALID(1) | \
+ FILTER_FIELD_VALID(2))
+
+#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+
+struct filter_usnic_id {
+ u32 flags;
+ u16 vlan;
+ u16 ethtype;
+ u8 proto_version;
+ u32 usnic_id;
+} __packed;
+
+#define FILTER_FIELD_5TUP_PROTO FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_5TUP_SRC_AD FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_5TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+
+/* Enums for the protocol field. */
+enum protocol_e {
+ PROTO_UDP = 0,
+ PROTO_TCP = 1,
+};
+
+struct filter_ipv4_5tuple {
+ u32 flags;
+ u32 protocol;
+ u32 src_addr;
+ u32 dst_addr;
+ u16 src_port;
+ u16 dst_port;
+} __packed;
+
+#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+
+struct filter_mac_vlan {
+ u32 flags;
+ u16 vlan;
+ u8 mac_addr[6];
+} __packed;
+
+/* Specifies the filter_action type. */
+enum {
+ FILTER_ACTION_RQ_STEERING = 0,
+ FILTER_ACTION_MAX
+};
+
+struct filter_action {
+ u32 type;
+ union {
+ u32 rq_idx;
+ } u;
+} __packed;
+
+/* Specifies the filter type. */
+enum filter_type {
+ FILTER_USNIC_ID = 0,
+ FILTER_IPV4_5TUPLE = 1,
+ FILTER_MAC_VLAN = 2,
+ FILTER_MAX
+};
+
+struct filter {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ } u;
+} __packed;
+
+enum {
+ CLSF_TLV_FILTER = 0,
+ CLSF_TLV_ACTION = 1,
+};
+
+/* Maximum size of buffer to CMD_ADD_FILTER */
+#define FILTER_MAX_BUF_SIZE 100
+
+struct filter_tlv {
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
+};
+
/*
* Writing cmd register causes STAT_BUSY to get set in status register.
* When cmd completes, STAT_BUSY will be cleared.
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.c b/drivers/net/ethernet/cisco/enic/vnic_rq.c
index 7e1488fc8ab..36a2ed606c9 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.c
@@ -30,12 +30,9 @@
static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
{
struct vnic_rq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = rq->ring.desc_count;
unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
- vdev = rq->vdev;
-
for (i = 0; i < blks; i++) {
rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!rq->bufs[i])
@@ -141,7 +138,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
unsigned int error_interrupt_enable,
unsigned int error_interrupt_offset)
{
- u32 fetch_index;
+ u32 fetch_index = 0;
/* Use current fetch_index as the ring starting point */
fetch_index = ioread32(&rq->ctrl->fetch_index);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_rq.h b/drivers/net/ethernet/cisco/enic/vnic_rq.h
index 2056586f4d4..ee7bc95af27 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_rq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_rq.h
@@ -72,6 +72,7 @@ struct vnic_rq_buf {
unsigned int len;
unsigned int index;
void *desc;
+ uint64_t wr_id;
};
struct vnic_rq {
@@ -110,7 +111,8 @@ static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
static inline void vnic_rq_post(struct vnic_rq *rq,
void *os_buf, unsigned int os_buf_index,
- dma_addr_t dma_addr, unsigned int len)
+ dma_addr_t dma_addr, unsigned int len,
+ uint64_t wrid)
{
struct vnic_rq_buf *buf = rq->to_use;
@@ -118,6 +120,7 @@ static inline void vnic_rq_post(struct vnic_rq *rq,
buf->os_buf_index = os_buf_index;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
rq->to_use = buf;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.c b/drivers/net/ethernet/cisco/enic/vnic_wq.c
index 5e0d7a2be9b..3e6b8d54daf 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.c
@@ -30,12 +30,9 @@
static int vnic_wq_alloc_bufs(struct vnic_wq *wq)
{
struct vnic_wq_buf *buf;
- struct vnic_dev *vdev;
unsigned int i, j, count = wq->ring.desc_count;
unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count);
- vdev = wq->vdev;
-
for (i = 0; i < blks; i++) {
wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC);
if (!wq->bufs[i])
diff --git a/drivers/net/ethernet/cisco/enic/vnic_wq.h b/drivers/net/ethernet/cisco/enic/vnic_wq.h
index 7dd937ac11c..2c6c70804a3 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_wq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_wq.h
@@ -58,6 +58,10 @@ struct vnic_wq_buf {
unsigned int index;
int sop;
void *desc;
+ uint64_t wr_id; /* Cookie */
+ uint8_t cq_entry; /* Gets completion event from hw */
+ uint8_t desc_skip_cnt; /* Num descs to occupy */
+ uint8_t compressed_send; /* Both hdr and payload in one desc */
};
/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */
@@ -102,14 +106,20 @@ static inline void *vnic_wq_next_desc(struct vnic_wq *wq)
static inline void vnic_wq_post(struct vnic_wq *wq,
void *os_buf, dma_addr_t dma_addr,
- unsigned int len, int sop, int eop)
+ unsigned int len, int sop, int eop,
+ uint8_t desc_skip_cnt, uint8_t cq_entry,
+ uint8_t compressed_send, uint64_t wrid)
{
struct vnic_wq_buf *buf = wq->to_use;
buf->sop = sop;
+ buf->cq_entry = cq_entry;
+ buf->compressed_send = compressed_send;
+ buf->desc_skip_cnt = desc_skip_cnt;
buf->os_buf = eop ? os_buf : NULL;
buf->dma_addr = dma_addr;
buf->len = len;
+ buf->wr_id = wrid;
buf = buf->next;
if (eop) {
@@ -123,7 +133,7 @@ static inline void vnic_wq_post(struct vnic_wq *wq,
}
wq->to_use = buf;
- wq->ring.desc_avail--;
+ wq->ring.desc_avail -= desc_skip_cnt;
}
static inline void vnic_wq_service(struct vnic_wq *wq,
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index a13b312b50f..5f5896e522d 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1384,7 +1384,7 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
static int
dm9000_probe(struct platform_device *pdev)
{
- struct dm9000_plat_data *pdata = pdev->dev.platform_data;
+ struct dm9000_plat_data *pdata = dev_get_platdata(&pdev->dev);
struct board_info *db; /* Point a board information structure */
struct net_device *ndev;
const unsigned char *mac_src;
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 4c830030fb0..2db6c573cec 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2319,7 +2319,7 @@ static void de4x5_pci_remove(struct pci_dev *pdev)
struct net_device *dev;
u_long iobase;
- dev = dev_get_drvdata(&pdev->dev);
+ dev = pci_get_drvdata(pdev);
iobase = dev->base_addr;
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index c94152f1c6b..4e8cfa2ac80 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1304,7 +1304,9 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct tulip_private *tp;
/* See note below on the multiport cards. */
- static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+ static unsigned char last_phys_addr[ETH_ALEN] = {
+ 0x00, 'L', 'i', 'n', 'u', 'x'
+ };
static int last_irq;
static int multiport_cnt; /* For four-port boards w/one EEPROM */
int i, irq;
@@ -1627,8 +1629,8 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->dev_addr[i] = last_phys_addr[i] + 1;
#if defined(CONFIG_SPARC)
addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == 6)
- memcpy(dev->dev_addr, addr, 6);
+ if (addr && len == ETH_ALEN)
+ memcpy(dev->dev_addr, addr, ETH_ALEN);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 50d9c631593..bf3bf6f22c9 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -469,6 +469,17 @@ static void sundance_reset(struct net_device *dev, unsigned long reset_cmd)
}
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sundance_poll_controller(struct net_device *dev)
+{
+ struct netdev_private *np = netdev_priv(dev);
+
+ disable_irq(np->pci_dev->irq);
+ intr_handler(np->pci_dev->irq, dev);
+ enable_irq(np->pci_dev->irq);
+}
+#endif
+
static const struct net_device_ops netdev_ops = {
.ndo_open = netdev_open,
.ndo_stop = netdev_close,
@@ -480,6 +491,9 @@ static const struct net_device_ops netdev_ops = {
.ndo_change_mtu = change_mtu,
.ndo_set_mac_address = sundance_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = sundance_poll_controller,
+#endif
};
static int sundance_probe1(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index c827b1b6b1c..ace5050dba3 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -34,7 +34,7 @@
#include "be_hw.h"
#include "be_roce.h"
-#define DRV_VER "4.6.62.0u"
+#define DRV_VER "4.9.134.0u"
#define DRV_NAME "be2net"
#define BE_NAME "Emulex BladeEngine2"
#define BE3_NAME "Emulex BladeEngine3"
@@ -99,14 +99,18 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MCC_Q_LEN 128 /* total size not to exceed 8 pages */
#define MCC_CQ_LEN 256
-#define BE3_MAX_RSS_QS 8
#define BE2_MAX_RSS_QS 4
-#define MAX_RSS_QS BE3_MAX_RSS_QS
-#define MAX_RX_QS (MAX_RSS_QS + 1) /* RSS qs + 1 def Rx */
+#define BE3_MAX_RSS_QS 16
+#define BE3_MAX_TX_QS 16
+#define BE3_MAX_EVT_QS 16
+
+#define MAX_RX_QS 32
+#define MAX_EVT_QS 32
+#define MAX_TX_QS 32
-#define MAX_TX_QS 8
#define MAX_ROCE_EQS 5
-#define MAX_MSIX_VECTORS (MAX_RSS_QS + MAX_ROCE_EQS) /* RSS qs + RoCE */
+#define MAX_MSIX_VECTORS 32
+#define MIN_MSIX_VECTORS 1
#define BE_TX_BUDGET 256
#define BE_NAPI_WEIGHT 64
#define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */
@@ -189,6 +193,7 @@ struct be_eq_obj {
u32 cur_eqd; /* in usecs */
u8 idx; /* array index */
+ u8 msix_idx;
u16 tx_budget;
u16 spurious_intr;
struct napi_struct napi;
@@ -352,6 +357,18 @@ struct phy_info {
u32 supported;
};
+struct be_resources {
+ u16 max_vfs; /* Total VFs "really" supported by FW/HW */
+ u16 max_mcast_mac;
+ u16 max_tx_qs;
+ u16 max_rss_qs;
+ u16 max_rx_qs;
+ u16 max_uc_mac; /* Max UC MACs programmable */
+ u16 max_vlans; /* Number of vlans supported */
+ u16 max_evt_qs;
+ u32 if_cap_flags;
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -369,18 +386,19 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u32 num_msix_vec;
- u32 num_evt_qs;
- struct be_eq_obj eq_obj[MAX_MSIX_VECTORS];
+ u16 cfg_num_qs; /* configured via set-channels */
+ u16 num_evt_qs;
+ u16 num_msix_vec;
+ struct be_eq_obj eq_obj[MAX_EVT_QS];
struct msix_entry msix_entries[MAX_MSIX_VECTORS];
bool isr_registered;
/* TX Rings */
- u32 num_tx_qs;
+ u16 num_tx_qs;
struct be_tx_obj tx_obj[MAX_TX_QS];
/* Rx rings */
- u32 num_rx_qs;
+ u16 num_rx_qs;
struct be_rx_obj rx_obj[MAX_RX_QS];
u32 big_page_size; /* Compounded page size shared by rx wrbs */
@@ -430,8 +448,8 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- u32 num_vfs; /* Number of VFs provisioned by PF driver */
- u32 dev_num_vfs; /* Number of VFs supported by HW */
+ struct be_resources res; /* resources available for the func */
+ u16 num_vfs; /* Number of VFs provisioned by PF */
u8 virtfn;
struct be_vf_cfg *vf_cfg;
bool be3_native;
@@ -446,21 +464,13 @@ struct be_adapter {
u16 qnq_vid;
u32 msg_enable;
int be_get_temp_freq;
- u16 max_mcast_mac;
- u16 max_tx_queues;
- u16 max_rss_queues;
- u16 max_rx_queues;
- u16 max_pmac_cnt;
- u16 max_vlans;
- u16 max_event_queues;
- u32 if_cap_flags;
u8 pf_number;
u64 rss_flags;
};
#define be_physfn(adapter) (!adapter->virtfn)
#define sriov_enabled(adapter) (adapter->num_vfs > 0)
-#define sriov_want(adapter) (adapter->dev_num_vfs && num_vfs && \
+#define sriov_want(adapter) (be_max_vfs(adapter) && num_vfs && \
be_physfn(adapter))
#define for_all_vfs(adapter, vf_cfg, i) \
for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
@@ -469,6 +479,26 @@ struct be_adapter {
#define ON 1
#define OFF 0
+#define be_max_vlans(adapter) (adapter->res.max_vlans)
+#define be_max_uc(adapter) (adapter->res.max_uc_mac)
+#define be_max_mc(adapter) (adapter->res.max_mcast_mac)
+#define be_max_vfs(adapter) (adapter->res.max_vfs)
+#define be_max_rss(adapter) (adapter->res.max_rss_qs)
+#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
+#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
+#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
+#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
+
+static inline u16 be_max_qs(struct be_adapter *adapter)
+{
+ /* If no RSS, need atleast the one def RXQ */
+ u16 num = max_t(u16, be_max_rss(adapter), 1);
+
+ num = min(num, be_max_eqs(adapter));
+ return min_t(u16, num, num_online_cpus());
+}
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
@@ -672,6 +702,8 @@ extern int be_load_fw(struct be_adapter *adapter, u8 *func);
extern bool be_is_wol_supported(struct be_adapter *adapter);
extern bool be_pause_supported(struct be_adapter *adapter);
extern u32 be_get_fw_log_level(struct be_adapter *adapter);
+int be_update_queues(struct be_adapter *adapter);
+int be_poll(struct napi_struct *napi, int budget);
/*
* internal function to initialize-cleanup roce device.
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 6e6e0a117ee..1ab5dab11ef 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -258,7 +258,8 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
(struct be_async_event_grp5_pvid_state *)evt);
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
+ dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
+ event_type);
break;
}
}
@@ -279,7 +280,8 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
break;
default:
- dev_warn(&adapter->pdev->dev, "Unknown debug event\n");
+ dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
+ event_type);
break;
}
}
@@ -631,6 +633,12 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
+ unsigned long addr)
+{
+ wrb->tag0 = addr & 0xFFFFFFFF;
+ wrb->tag1 = upper_32_bits(addr);
+}
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
@@ -639,17 +647,12 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
{
struct be_sge *sge;
- unsigned long addr = (unsigned long)req_hdr;
- u64 req_addr = addr;
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
req_hdr->version = 0;
-
- wrb->tag0 = req_addr & 0xFFFFFFFF;
- wrb->tag1 = upper_32_bits(req_addr);
-
+ fill_wrb_tags(wrb, (ulong) req_hdr);
wrb->payload_length = cmd_len;
if (mem) {
wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
@@ -676,31 +679,6 @@ static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
}
}
-/* Converts interrupt delay in microseconds to multiplier value */
-static u32 eq_delay_to_mult(u32 usec_delay)
-{
-#define MAX_INTR_RATE 651042
- const u32 round = 10;
- u32 multiplier;
-
- if (usec_delay == 0)
- multiplier = 0;
- else {
- u32 interrupt_rate = 1000000 / usec_delay;
- /* Max delay, corresponding to the lowest interrupt rate */
- if (interrupt_rate == 0)
- multiplier = 1023;
- else {
- multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
- multiplier /= interrupt_rate;
- /* Round the multiplier to the closest value.*/
- multiplier = (multiplier + round/2) / round;
- multiplier = min(multiplier, (u32)1023);
- }
- }
- return multiplier;
-}
-
static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
{
struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
@@ -728,6 +706,78 @@ static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
return wrb;
}
+static bool use_mcc(struct be_adapter *adapter)
+{
+ return adapter->mcc_obj.q.created;
+}
+
+/* Must be used only in process context */
+static int be_cmd_lock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter)) {
+ spin_lock_bh(&adapter->mcc_lock);
+ return 0;
+ } else {
+ return mutex_lock_interruptible(&adapter->mbox_lock);
+ }
+}
+
+/* Must be used only in process context */
+static void be_cmd_unlock(struct be_adapter *adapter)
+{
+ if (use_mcc(adapter))
+ spin_unlock_bh(&adapter->mcc_lock);
+ else
+ return mutex_unlock(&adapter->mbox_lock);
+}
+
+static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+
+ if (use_mcc(adapter)) {
+ dest_wrb = wrb_from_mccq(adapter);
+ if (!dest_wrb)
+ return NULL;
+ } else {
+ dest_wrb = wrb_from_mbox(adapter);
+ }
+
+ memcpy(dest_wrb, wrb, sizeof(*wrb));
+ if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
+ fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
+
+ return dest_wrb;
+}
+
+/* Must be used only in process context */
+static int be_cmd_notify_wait(struct be_adapter *adapter,
+ struct be_mcc_wrb *wrb)
+{
+ struct be_mcc_wrb *dest_wrb;
+ int status;
+
+ status = be_cmd_lock(adapter);
+ if (status)
+ return status;
+
+ dest_wrb = be_cmd_copy(adapter, wrb);
+ if (!dest_wrb)
+ return -EBUSY;
+
+ if (use_mcc(adapter))
+ status = be_mcc_notify_wait(adapter);
+ else
+ status = be_mbox_notify_wait(adapter);
+
+ if (!status)
+ memcpy(wrb, dest_wrb, sizeof(*wrb));
+
+ be_cmd_unlock(adapter);
+ return status;
+}
+
/* Tell fw we're about to start firing cmds by writing a
* special pattern across the wrb hdr; uses mbox
*/
@@ -788,13 +838,12 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
return status;
}
-int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay)
+int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eq_create *req;
- struct be_dma_mem *q_mem = &eq->dma_mem;
- int status;
+ struct be_dma_mem *q_mem = &eqo->q.dma_mem;
+ int status, ver = 0;
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -805,15 +854,18 @@ int be_cmd_eq_create(struct be_adapter *adapter,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ /* Support for EQ_CREATEv2 available only SH-R onwards */
+ if (!(BEx_chip(adapter) || lancer_chip(adapter)))
+ ver = 2;
+
+ req->hdr.version = ver;
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
/* 4byte eqe*/
AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
AMAP_SET_BITS(struct amap_eq_context, count, req->context,
- __ilog2_u32(eq->len/256));
- AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
- eq_delay_to_mult(eq_delay));
+ __ilog2_u32(eqo->q.len / 256));
be_dws_cpu_to_le(req->context, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -821,8 +873,10 @@ int be_cmd_eq_create(struct be_adapter *adapter,
status = be_mbox_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
- eq->id = le16_to_cpu(resp->eq_id);
- eq->created = true;
+ eqo->q.id = le16_to_cpu(resp->eq_id);
+ eqo->msix_idx =
+ (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
+ eqo->q.created = true;
}
mutex_unlock(&adapter->mbox_lock);
@@ -1010,9 +1064,9 @@ static u32 be_encoded_q_len(int q_len)
return len_encoded;
}
-int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1068,9 +1122,9 @@ int be_cmd_mccq_ext_create(struct be_adapter *adapter,
return status;
}
-int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+static int be_cmd_mccq_org_create(struct be_adapter *adapter,
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1128,25 +1182,16 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_eth_tx_create *req;
struct be_queue_info *txq = &txo->q;
struct be_queue_info *cq = &txo->cq;
struct be_dma_mem *q_mem = &txq->dma_mem;
int status, ver = 0;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
-
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1164,12 +1209,11 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req->cq_id = cpu_to_le16(cq->id);
req->queue_size = be_encoded_q_len(txq->len);
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
-
ver = req->hdr.version;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
txq->id = le16_to_cpu(resp->cid);
if (ver == 2)
txo->db_offset = le32_to_cpu(resp->db_offset);
@@ -1178,9 +1222,6 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
txq->created = true;
}
-err:
- spin_unlock_bh(&adapter->mcc_lock);
-
return status;
}
@@ -1309,40 +1350,32 @@ err:
}
/* Create an rx filtering policy configuration on an i/f
- * Uses MCCQ
+ * Will use MBOX only if MCCQ has not been created.
*/
int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
u32 *if_handle, u32 domain)
{
- struct be_mcc_wrb *wrb;
+ struct be_mcc_wrb wrb = {0};
struct be_cmd_req_if_create *req;
int status;
- spin_lock_bh(&adapter->mcc_lock);
-
- wrb = wrb_from_mccq(adapter);
- if (!wrb) {
- status = -EBUSY;
- goto err;
- }
- req = embedded_payload(wrb);
-
+ req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
-
req->pmac_invalid = true;
- status = be_mcc_notify_wait(adapter);
+ status = be_cmd_notify_wait(adapter, &wrb);
if (!status) {
- struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
+ struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
*if_handle = le32_to_cpu(resp->interface_id);
- }
-err:
- spin_unlock_bh(&adapter->mcc_lock);
+ /* Hack to retrieve VF's pmac-id on BE3 */
+ if (BE3_chip(adapter) && !be_physfn(adapter))
+ adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
+ }
return status;
}
@@ -1460,6 +1493,12 @@ static int be_mac_to_link_speed(int mac_speed)
return 1000;
case PHY_LINK_SPEED_10GBPS:
return 10000;
+ case PHY_LINK_SPEED_20GBPS:
+ return 20000;
+ case PHY_LINK_SPEED_25GBPS:
+ return 25000;
+ case PHY_LINK_SPEED_40GBPS:
+ return 40000;
}
return 0;
}
@@ -1520,7 +1559,7 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_cntl_addnl_attribs *req;
- int status;
+ int status = 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1785,8 +1824,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
*/
req->if_flags_mask |=
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
- adapter->if_cap_flags);
-
+ be_if_cap_flags(adapter));
req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
netdev_for_each_mc_addr(ha, adapter->netdev)
memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
@@ -2444,6 +2482,12 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
le16_to_cpu(resp_phy_info->fixed_speeds_supported);
adapter->phy.misc_params =
le32_to_cpu(resp_phy_info->misc_params);
+
+ if (BE2_chip(adapter)) {
+ adapter->phy.fixed_speeds_supported =
+ BE_SUPPORTED_SPEED_10GBPS |
+ BE_SUPPORTED_SPEED_1GBPS;
+ }
}
pci_free_consistent(adapter->pdev, cmd.size,
cmd.va, cmd.dma);
@@ -2606,9 +2650,44 @@ err:
return status;
}
-/* Uses synchronous MCCQ */
+/* Set privilege(s) for a function */
+int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
+ u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_fn_privileges *req;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = embedded_payload(wrb);
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
+ wrb, NULL);
+ req->hdr.domain = domain;
+ if (lancer_chip(adapter))
+ req->privileges_lancer = cpu_to_le32(privileges);
+ else
+ req->privileges = cpu_to_le32(privileges);
+
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
+ * pmac_id_valid: false => pmac_id or MAC address is requested.
+ * If pmac_id is returned, pmac_id_valid is returned as true
+ */
int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
- bool *pmac_id_active, u32 *pmac_id, u8 domain)
+ bool *pmac_id_valid, u32 *pmac_id, u8 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_mac_list *req;
@@ -2644,12 +2723,25 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
req->hdr.domain = domain;
req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
- req->perm_override = 1;
+ if (*pmac_id_valid) {
+ req->mac_id = cpu_to_le32(*pmac_id);
+ req->iface_id = cpu_to_le16(adapter->if_handle);
+ req->perm_override = 0;
+ } else {
+ req->perm_override = 1;
+ }
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_mac_list *resp =
get_mac_list_cmd.va;
+
+ if (*pmac_id_valid) {
+ memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
+ ETH_ALEN);
+ goto out;
+ }
+
mac_count = resp->true_mac_count + resp->pseudo_mac_count;
/* Mac list returned could contain one or more active mac_ids
* or one or more true or pseudo permanant mac addresses.
@@ -2667,14 +2759,14 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
* is 6 bytes
*/
if (mac_addr_size == sizeof(u32)) {
- *pmac_id_active = true;
+ *pmac_id_valid = true;
mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
*pmac_id = le32_to_cpu(mac_id);
goto out;
}
}
/* If no active mac_id found, return first mac addr */
- *pmac_id_active = false;
+ *pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
ETH_ALEN);
}
@@ -2686,6 +2778,41 @@ out:
return status;
}
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
+{
+ bool active = true;
+
+ if (BEx_chip(adapter))
+ return be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, curr_pmac_id);
+ else
+ /* Fetch the MAC address using pmac_id */
+ return be_cmd_get_mac_from_list(adapter, mac, &active,
+ &curr_pmac_id, 0);
+}
+
+int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
+{
+ int status;
+ bool pmac_valid = false;
+
+ memset(mac, 0, ETH_ALEN);
+
+ if (BEx_chip(adapter)) {
+ if (be_physfn(adapter))
+ status = be_cmd_mac_addr_query(adapter, mac, true, 0,
+ 0);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac, false,
+ adapter->if_handle, 0);
+ } else {
+ status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
+ NULL, 0);
+ }
+
+ return status;
+}
+
/* Uses synchronous MCCQ */
int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain)
@@ -2729,8 +2856,27 @@ err:
return status;
}
+/* Wrapper to delete any active MACs and provision the new mac.
+ * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
+ * current list are active.
+ */
+int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
+{
+ bool active_mac = false;
+ u8 old_mac[ETH_ALEN];
+ u32 pmac_id;
+ int status;
+
+ status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
+ &pmac_id, dom);
+ if (!status && active_mac)
+ be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
+
+ return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
+}
+
int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u16 hsw_mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_set_hsw_config *req;
@@ -2757,6 +2903,13 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
+ if (!BEx_chip(adapter) && hsw_mode) {
+ AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
+ AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
+ ctxt, hsw_mode);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2768,7 +2921,7 @@ err:
/* Get Hyper switch config */
int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id)
+ u32 domain, u16 intf_id, u8 *mode)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_hsw_config *req;
@@ -2791,9 +2944,15 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
- AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
- intf_id);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, intf_id);
AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
+
+ if (!BEx_chip(adapter)) {
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
+ ctxt, adapter->hba_port_num);
+ AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
+ }
be_dws_cpu_to_le(req->context, sizeof(req->context));
status = be_mcc_notify_wait(adapter);
@@ -2804,7 +2963,11 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
pvid, &resp->context);
- *pvid = le16_to_cpu(vid);
+ if (pvid)
+ *pvid = le16_to_cpu(vid);
+ if (mode)
+ *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
+ port_fwd_type, &resp->context);
}
err:
@@ -2967,30 +3130,63 @@ err:
return status;
}
-static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
- u32 max_buf_size)
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
{
- struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
int i;
for (i = 0; i < desc_count; i++) {
- desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
- if (((void *)desc + desc->desc_len) >
- (void *)(buf + max_buf_size))
- return NULL;
+ if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
+ return (struct be_nic_res_desc *)hdr;
- if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
- desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
- return desc;
-
- desc = (void *)desc + desc->desc_len;
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
}
+ return NULL;
+}
+static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
+ u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_num == devfn)
+ return pcie;
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
return NULL;
}
+static void be_copy_nic_desc(struct be_resources *res,
+ struct be_nic_res_desc *desc)
+{
+ res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
+ res->max_vlans = le16_to_cpu(desc->vlan_count);
+ res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
+ res->max_tx_qs = le16_to_cpu(desc->txq_count);
+ res->max_rss_qs = le16_to_cpu(desc->rssq_count);
+ res->max_rx_qs = le16_to_cpu(desc->rq_count);
+ res->max_evt_qs = le16_to_cpu(desc->eq_count);
+ /* Clear flags that driver is not interested in */
+ res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
+ BE_IF_CAP_FLAGS_WANT;
+ /* Need 1 RXQ as the default RXQ */
+ if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
+ res->max_rss_qs -= 1;
+}
+
/* Uses Mbox */
-int be_cmd_get_func_config(struct be_adapter *adapter)
+int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_func_config *req;
@@ -3029,25 +3225,16 @@ int be_cmd_get_func_config(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_func_config *resp = cmd.va;
u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ struct be_nic_res_desc *desc;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ desc = be_get_nic_desc(resp->func_param, desc_count);
if (!desc) {
status = -EINVAL;
goto err;
}
adapter->pf_number = desc->pf_num;
- adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
- adapter->max_vlans = le16_to_cpu(desc->vlan_count);
- adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
- adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
- adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
- adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
-
- adapter->max_event_queues = le16_to_cpu(desc->eq_count);
- adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
+ be_copy_nic_desc(res, desc);
}
err:
mutex_unlock(&adapter->mbox_lock);
@@ -3057,8 +3244,8 @@ err:
}
/* Uses mbox */
-int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3085,8 +3272,8 @@ int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
}
/* Uses sync mcc */
-int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3118,54 +3305,51 @@ err:
}
/* Uses sync mcc, if MCCQ is already created otherwise mbox */
-int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain)
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain)
{
+ struct be_cmd_resp_get_profile_config *resp;
+ struct be_pcie_res_desc *pcie;
+ struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
+ u32 desc_count;
int status;
memset(&cmd, 0, sizeof(struct be_dma_mem));
- if (!lancer_chip(adapter))
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config_v1);
- else
- cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
- if (!cmd.va) {
- dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+ if (!cmd.va)
return -ENOMEM;
- }
if (!mccq->created)
status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
else
status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
- if (!status) {
- struct be_cmd_resp_get_profile_config *resp = cmd.va;
- u32 desc_count = le32_to_cpu(resp->desc_count);
- struct be_nic_resource_desc *desc;
+ if (status)
+ goto err;
- desc = be_get_nic_desc(resp->func_param, desc_count,
- sizeof(resp->func_param));
+ resp = cmd.va;
+ desc_count = le32_to_cpu(resp->desc_count);
+
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
+ if (pcie)
+ res->max_vfs = le16_to_cpu(pcie->num_vfs);
+
+ nic = be_get_nic_desc(resp->func_param, desc_count);
+ if (nic)
+ be_copy_nic_desc(res, nic);
- if (!desc) {
- status = -EINVAL;
- goto err;
- }
- if (cap_flags)
- *cap_flags = le32_to_cpu(desc->cap_flags);
- if (txq_count)
- *txq_count = le32_to_cpu(desc->txq_count);
- }
err:
if (cmd.va)
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
return status;
}
-/* Uses sync mcc */
+/* Currently only Lancer uses this command and it supports version 0 only
+ * Uses sync mcc
+ */
int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain)
{
@@ -3186,12 +3370,10 @@ int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
wrb, NULL);
-
req->hdr.domain = domain;
req->desc_count = cpu_to_le32(1);
-
- req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
- req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
+ req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
+ req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
req->nic_desc.pf_num = adapter->pf_number;
req->nic_desc.vf_num = domain;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 5228d88c5a0..d026226db88 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -202,6 +202,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
#define OPCODE_COMMON_GET_PORT_NAME 77
#define OPCODE_COMMON_SET_INTERRUPT_ENABLE 89
+#define OPCODE_COMMON_SET_FN_PRIVILEGES 100
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
@@ -306,7 +307,7 @@ struct be_cmd_req_eq_create {
struct be_cmd_resp_eq_create {
struct be_cmd_resp_hdr resp_hdr;
u16 eq_id; /* sword */
- u16 rsvd0; /* sword */
+ u16 msix_idx; /* available only in v2 */
} __packed;
/******************** Mac query ***************************/
@@ -563,6 +564,12 @@ enum be_if_flags {
BE_IF_FLAGS_MULTICAST = 0x1000
};
+#define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\
+ BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\
+ BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\
+ BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\
+ BE_IF_FLAGS_UNTAGGED)
+
/* An RX interface is an object with one or more MAC addresses and
* filtering capabilities. */
struct be_cmd_req_if_create {
@@ -959,7 +966,10 @@ enum {
PHY_LINK_SPEED_10MBPS = 0x1,
PHY_LINK_SPEED_100MBPS = 0x2,
PHY_LINK_SPEED_1GBPS = 0x3,
- PHY_LINK_SPEED_10GBPS = 0x4
+ PHY_LINK_SPEED_10GBPS = 0x4,
+ PHY_LINK_SPEED_20GBPS = 0x5,
+ PHY_LINK_SPEED_25GBPS = 0x6,
+ PHY_LINK_SPEED_40GBPS = 0x7
};
struct be_cmd_resp_link_status {
@@ -1474,6 +1484,11 @@ struct be_cmd_resp_get_fn_privileges {
u32 privilege_mask;
};
+struct be_cmd_req_set_fn_privileges {
+ struct be_cmd_req_hdr hdr;
+ u32 privileges; /* Used by BE3, SH-R */
+ u32 privileges_lancer; /* Used by Lancer */
+};
/******************** GET/SET_MACLIST **************************/
#define BE_MAX_MAC 64
@@ -1518,12 +1533,17 @@ struct be_cmd_req_set_mac_list {
} __packed;
/*********************** HSW Config ***********************/
+#define PORT_FWD_TYPE_VEPA 0x3
+#define PORT_FWD_TYPE_VEB 0x2
+
struct amap_set_hsw_context {
u8 interface_id[16];
u8 rsvd0[14];
u8 pvid_valid;
- u8 rsvd1;
- u8 rsvd2[16];
+ u8 pport;
+ u8 rsvd1[6];
+ u8 port_fwd_type[3];
+ u8 rsvd2[7];
u8 pvid[16];
u8 rsvd3[32];
u8 rsvd4[32];
@@ -1548,7 +1568,9 @@ struct amap_get_hsw_req_context {
} __packed;
struct amap_get_hsw_resp_context {
- u8 rsvd1[16];
+ u8 rsvd0[6];
+ u8 port_fwd_type[3];
+ u8 rsvd1[7];
u8 pvid[16];
u8 rsvd2[32];
u8 rsvd3[32];
@@ -1703,11 +1725,13 @@ struct be_cmd_req_set_ext_fat_caps {
struct be_fat_conf_params set_params;
};
-#define RESOURCE_DESC_SIZE 88
+#define RESOURCE_DESC_SIZE_V0 72
+#define RESOURCE_DESC_SIZE_V1 88
+#define PCIE_RESOURCE_DESC_TYPE_V0 0x40
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
+#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
-#define MAX_RESOURCE_DESC 4
-#define MAX_RESOURCE_DESC_V1 32
+#define MAX_RESOURCE_DESC 264
/* QOS unit number */
#define QUN 4
@@ -1716,9 +1740,30 @@ struct be_cmd_req_set_ext_fat_caps {
/* No save */
#define NOSV 7
-struct be_nic_resource_desc {
+struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
+} __packed;
+
+struct be_pcie_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u16 rsvd1;
+ u8 pf_num;
+ u8 rsvd2;
+ u32 rsvd3;
+ u8 sriov_state;
+ u8 pf_state;
+ u8 pf_type;
+ u8 rsvd4;
+ u16 num_vfs;
+ u16 rsvd5;
+ u32 rsvd6[17];
+} __packed;
+
+struct be_nic_res_desc {
+ struct be_res_desc_hdr hdr;
u8 rsvd1;
u8 flags;
u8 vf_num;
@@ -1747,7 +1792,7 @@ struct be_nic_resource_desc {
u8 wol_param;
u16 rsvd7;
u32 rsvd8[3];
-};
+} __packed;
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
@@ -1756,7 +1801,7 @@ struct be_cmd_req_get_func_config {
struct be_cmd_resp_get_func_config {
struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
#define ACTIVE_PROFILE_TYPE 0x2
@@ -1768,26 +1813,20 @@ struct be_cmd_req_get_profile_config {
};
struct be_cmd_resp_get_profile_config {
- struct be_cmd_req_hdr hdr;
- u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE];
-};
-
-struct be_cmd_resp_get_profile_config_v1 {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
u32 desc_count;
- u8 func_param[MAX_RESOURCE_DESC_V1 * RESOURCE_DESC_SIZE];
+ u8 func_param[MAX_RESOURCE_DESC * RESOURCE_DESC_SIZE_V1];
};
struct be_cmd_req_set_profile_config {
struct be_cmd_req_hdr hdr;
u32 rsvd;
u32 desc_count;
- struct be_nic_resource_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
};
struct be_cmd_resp_set_profile_config {
- struct be_cmd_req_hdr hdr;
+ struct be_cmd_resp_hdr hdr;
};
struct be_cmd_enable_disable_vf {
@@ -1836,8 +1875,7 @@ extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u32 *if_handle, u32 domain);
extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
-extern int be_cmd_eq_create(struct be_adapter *adapter,
- struct be_queue_info *eq, int eq_delay);
+extern int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo);
extern int be_cmd_cq_create(struct be_adapter *adapter,
struct be_queue_info *cq, struct be_queue_info *eq,
bool no_delay, int num_cqe_dma_coalesce);
@@ -1921,15 +1959,22 @@ extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
extern int be_cmd_get_fn_privileges(struct be_adapter *adapter,
u32 *privilege, u32 domain);
+extern int be_cmd_set_fn_privileges(struct be_adapter *adapter,
+ u32 privileges, u32 vf_num);
extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
bool *pmac_id_active, u32 *pmac_id,
u8 domain);
+extern int be_cmd_get_active_mac(struct be_adapter *adapter, u32 pmac_id,
+ u8 *mac);
+extern int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac);
extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
u8 mac_count, u32 domain);
+extern int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id,
+ u32 dom);
extern int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u16 hsw_mode);
extern int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
- u32 domain, u16 intf_id);
+ u32 domain, u16 intf_id, u8 *mode);
extern int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter);
extern int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
struct be_dma_mem *cmd);
@@ -1942,10 +1987,10 @@ extern int lancer_initiate_dump(struct be_adapter *adapter);
extern bool dump_present(struct be_adapter *adapter);
extern int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
extern int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
-extern int be_cmd_get_func_config(struct be_adapter *adapter);
-extern int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
- u16 *txq_count, u8 domain);
-
+int be_cmd_get_func_config(struct be_adapter *adapter,
+ struct be_resources *res);
+int be_cmd_get_profile_config(struct be_adapter *adapter,
+ struct be_resources *res, u8 domain);
extern int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
u8 domain);
extern int be_cmd_get_if_id(struct be_adapter *adapter,
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 4f8c941217c..b440a1fac77 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1119,6 +1119,29 @@ static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
return status;
}
+static void be_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ ch->combined_count = adapter->num_evt_qs;
+ ch->max_combined = be_max_qs(adapter);
+}
+
+static int be_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+
+ if (ch->rx_count || ch->tx_count || ch->other_count ||
+ !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ return -EINVAL;
+
+ adapter->cfg_num_qs = ch->combined_count;
+
+ return be_update_queues(adapter);
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1145,4 +1168,6 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_channels = be_get_channels,
+ .set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 181edb52245..3224d28cdad 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -21,6 +21,7 @@
#include "be_cmds.h"
#include <asm/div64.h>
#include <linux/aer.h>
+#include <linux/if_bridge.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -145,8 +146,8 @@ static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
q->len = len;
q->entry_size = entry_size;
mem->size = len * entry_size;
- mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
- GFP_KERNEL | __GFP_ZERO);
+ mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
+ GFP_KERNEL);
if (!mem->va)
return -ENOMEM;
return 0;
@@ -247,54 +248,54 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
static int be_mac_addr_set(struct net_device *netdev, void *p)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
struct sockaddr *addr = p;
- int status = 0;
- u8 current_mac[ETH_ALEN];
- u32 pmac_id = adapter->pmac_id[0];
- bool active_mac = true;
+ int status;
+ u8 mac[ETH_ALEN];
+ u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- /* For BE VF, MAC address is already activated by PF.
- * Hence only operation left is updating netdev->devaddr.
- * Update it if user is passing the same MAC which was used
- * during configuring VF MAC from PF(Hypervisor).
+ /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
+ * privilege or if PF did not provision the new MAC address.
+ * On BE3, this cmd will always fail if the VF doesn't have the
+ * FILTMGMT privilege. This failure is OK, only if the PF programmed
+ * the MAC for the VF.
*/
- if (!lancer_chip(adapter) && !be_physfn(adapter)) {
- status = be_cmd_mac_addr_query(adapter, current_mac,
- false, adapter->if_handle, 0);
- if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
- goto done;
- else
- goto err;
- }
+ status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
+ adapter->if_handle, &adapter->pmac_id[0], 0);
+ if (!status) {
+ curr_pmac_id = adapter->pmac_id[0];
- if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
- goto done;
+ /* Delete the old programmed MAC. This call may fail if the
+ * old MAC was already deleted by the PF driver.
+ */
+ if (adapter->pmac_id[0] != old_pmac_id)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ old_pmac_id, 0);
+ }
- /* For Lancer check if any MAC is active.
- * If active, get its mac id.
+ /* Decide if the new MAC is successfully activated only after
+ * querying the FW
*/
- if (lancer_chip(adapter) && !be_physfn(adapter))
- be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
- &pmac_id, 0);
-
- status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
- adapter->if_handle,
- &adapter->pmac_id[0], 0);
-
+ status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac);
if (status)
goto err;
- if (active_mac)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- pmac_id, 0);
-done:
+ /* The MAC change did not happen, either due to lack of privilege
+ * or PF didn't pre-provision.
+ */
+ if (memcmp(addr->sa_data, mac, ETH_ALEN)) {
+ status = -EPERM;
+ goto err;
+ }
+
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_info(dev, "MAC address changed to %pM\n", mac);
return 0;
err:
- dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
+ dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
return status;
}
@@ -472,7 +473,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
ACCESS_ONCE(*acc) = newacc;
}
-void populate_erx_stats(struct be_adapter *adapter,
+static void populate_erx_stats(struct be_adapter *adapter,
struct be_rx_obj *rxo,
u32 erx_stat)
{
@@ -1001,7 +1002,7 @@ static int be_vid_config(struct be_adapter *adapter)
if (adapter->promiscuous)
return 0;
- if (adapter->vlans_added > adapter->max_vlans)
+ if (adapter->vlans_added > be_max_vlans(adapter))
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
@@ -1042,7 +1043,7 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 1;
- if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ if (adapter->vlans_added <= (be_max_vlans(adapter) + 1))
status = be_vid_config(adapter);
if (!status)
@@ -1068,7 +1069,7 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
goto ret;
adapter->vlan_tag[vid] = 0;
- if (adapter->vlans_added <= adapter->max_vlans)
+ if (adapter->vlans_added <= be_max_vlans(adapter))
status = be_vid_config(adapter);
if (!status)
@@ -1101,7 +1102,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > adapter->max_mcast_mac) {
+ netdev_mc_count(netdev) > be_max_mc(adapter)) {
be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
goto done;
}
@@ -1115,7 +1116,7 @@ static void be_set_rx_mode(struct net_device *netdev)
adapter->pmac_id[i], 0);
}
- if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
+ if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
adapter->promiscuous = true;
goto done;
@@ -1146,9 +1147,6 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- bool active_mac = false;
- u32 pmac_id;
- u8 old_mac[ETH_ALEN];
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1156,20 +1154,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
- &pmac_id, vf + 1);
- if (!status && active_mac)
- be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- pmac_id, vf + 1);
-
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
- status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
- vf_cfg->pmac_id, vf + 1);
+ if (BEx_chip(adapter)) {
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
+ vf + 1);
status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
+ } else {
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
}
if (status)
@@ -1220,14 +1213,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
adapter->vf_cfg[vf].vlan_tag = vlan;
status = be_cmd_set_hsw_config(adapter, vlan,
- vf + 1, adapter->vf_cfg[vf].if_handle);
+ vf + 1, adapter->vf_cfg[vf].if_handle, 0);
}
} else {
/* Reset Transparent Vlan Tagging. */
adapter->vf_cfg[vf].vlan_tag = 0;
vlan = adapter->vf_cfg[vf].def_vid;
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- adapter->vf_cfg[vf].if_handle);
+ adapter->vf_cfg[vf].if_handle, 0);
}
@@ -1490,8 +1483,9 @@ static void be_rx_compl_process(struct be_rx_obj *rxo,
}
/* Process the RX completion indicated by rxcp when GRO is enabled */
-void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
- struct be_rx_compl_info *rxcp)
+static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
+ struct napi_struct *napi,
+ struct be_rx_compl_info *rxcp)
{
struct be_adapter *adapter = rxo->adapter;
struct be_rx_page_info *page_info;
@@ -1920,6 +1914,7 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
+ netif_napi_del(&eqo->napi);
}
be_queue_free(adapter, &eqo->q);
}
@@ -1931,9 +1926,12 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int i, rc;
- adapter->num_evt_qs = num_irqs(adapter);
+ adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
+ adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
+ BE_NAPI_WEIGHT);
eqo->adapter = adapter;
eqo->tx_budget = BE_TX_BUDGET;
eqo->idx = i;
@@ -1946,7 +1944,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
if (rc)
return rc;
- rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
+ rc = be_cmd_eq_create(adapter, eqo);
if (rc)
return rc;
}
@@ -2020,31 +2018,13 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
}
}
-static int be_num_txqs_want(struct be_adapter *adapter)
-{
- if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
- be_is_mc(adapter) ||
- (!lancer_chip(adapter) && !be_physfn(adapter)) ||
- BE2_chip(adapter))
- return 1;
- else
- return adapter->max_tx_queues;
-}
-
-static int be_tx_cqs_create(struct be_adapter *adapter)
+static int be_tx_qs_create(struct be_adapter *adapter)
{
struct be_queue_info *cq, *eq;
- int status;
struct be_tx_obj *txo;
- u8 i;
+ int status, i;
- adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS) {
- rtnl_lock();
- netif_set_real_num_tx_queues(adapter->netdev,
- adapter->num_tx_qs);
- rtnl_unlock();
- }
+ adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2060,16 +2040,7 @@ static int be_tx_cqs_create(struct be_adapter *adapter)
status = be_cmd_cq_create(adapter, cq, eq, false, 3);
if (status)
return status;
- }
- return 0;
-}
-static int be_tx_qs_create(struct be_adapter *adapter)
-{
- struct be_tx_obj *txo;
- int i, status;
-
- for_all_tx_queues(adapter, txo, i) {
status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
sizeof(struct be_eth_wrb));
if (status)
@@ -2105,17 +2076,14 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We'll create as many RSS rings as there are irqs.
- * But when there's only one irq there's no use creating RSS rings
+ /* We can create as many RSS rings as there are EQs. */
+ adapter->num_rx_qs = adapter->num_evt_qs;
+
+ /* We'll use RSS only if atleast 2 RSS rings are supported.
+ * When RSS is used, we'll need a default RXQ for non-IP traffic.
*/
- adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
- num_irqs(adapter) + 1 : 1;
- if (adapter->num_rx_qs != MAX_RX_QS) {
- rtnl_lock();
- netif_set_real_num_rx_queues(adapter->netdev,
- adapter->num_rx_qs);
- rtnl_unlock();
- }
+ if (adapter->num_rx_qs > 1)
+ adapter->num_rx_qs++;
adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
for_all_rx_queues(adapter, rxo, i) {
@@ -2379,38 +2347,24 @@ static void be_msix_disable(struct be_adapter *adapter)
if (msix_enabled(adapter)) {
pci_disable_msix(adapter->pdev);
adapter->num_msix_vec = 0;
+ adapter->num_msix_roce_vec = 0;
}
}
-static uint be_num_rss_want(struct be_adapter *adapter)
-{
- u32 num = 0;
-
- if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- (lancer_chip(adapter) ||
- (!sriov_want(adapter) && be_physfn(adapter)))) {
- num = adapter->max_rss_queues;
- num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
- }
- return num;
-}
-
static int be_msix_enable(struct be_adapter *adapter)
{
-#define BE_MIN_MSIX_VECTORS 1
- int i, status, num_vec, num_roce_vec = 0;
+ int i, status, num_vec;
struct device *dev = &adapter->pdev->dev;
- /* If RSS queues are not used, need a vec for default RX Q */
- num_vec = min(be_num_rss_want(adapter), num_online_cpus());
- if (be_roce_supported(adapter)) {
- num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
- (num_online_cpus() + 1));
- num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
- num_vec += num_roce_vec;
- num_vec = min(num_vec, MAX_MSIX_VECTORS);
- }
- num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
+ /* If RoCE is supported, program the max number of NIC vectors that
+ * may be configured via set-channels, along with vectors needed for
+ * RoCe. Else, just program the number we'll use initially.
+ */
+ if (be_roce_supported(adapter))
+ num_vec = min_t(int, 2 * be_max_eqs(adapter),
+ 2 * num_online_cpus());
+ else
+ num_vec = adapter->cfg_num_qs;
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -2418,7 +2372,7 @@ static int be_msix_enable(struct be_adapter *adapter)
status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
if (status == 0) {
goto done;
- } else if (status >= BE_MIN_MSIX_VECTORS) {
+ } else if (status >= MIN_MSIX_VECTORS) {
num_vec = status;
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
num_vec);
@@ -2427,30 +2381,29 @@ static int be_msix_enable(struct be_adapter *adapter)
}
dev_warn(dev, "MSIx enable failed\n");
+
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
if (!be_physfn(adapter))
return status;
return 0;
done:
- if (be_roce_supported(adapter)) {
- if (num_vec > num_roce_vec) {
- adapter->num_msix_vec = num_vec - num_roce_vec;
- adapter->num_msix_roce_vec =
- num_vec - adapter->num_msix_vec;
- } else {
- adapter->num_msix_vec = num_vec;
- adapter->num_msix_roce_vec = 0;
- }
- } else
- adapter->num_msix_vec = num_vec;
- dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
+ if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
+ adapter->num_msix_roce_vec = num_vec / 2;
+ dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
+ adapter->num_msix_roce_vec);
+ }
+
+ adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
+
+ dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
+ adapter->num_msix_vec);
return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
struct be_eq_obj *eqo)
{
- return adapter->msix_entries[eqo->idx].vector;
+ return adapter->msix_entries[eqo->msix_idx].vector;
}
static int be_msix_register(struct be_adapter *adapter)
@@ -2563,8 +2516,8 @@ static int be_close(struct net_device *netdev)
/* Wait for all pending tx completions to arrive so that
* all tx skbs are freed.
*/
- be_tx_compl_clean(adapter);
netif_tx_disable(netdev);
+ be_tx_compl_clean(adapter);
be_rx_qs_destroy(adapter);
@@ -2690,8 +2643,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
memset(mac, 0, ETH_ALEN);
cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+ GFP_KERNEL);
if (cmd.va == NULL)
return -1;
@@ -2735,13 +2688,13 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
be_vf_eth_addr_generate(adapter, mac);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter)) {
- status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
- } else {
+ if (BEx_chip(adapter))
status = be_cmd_pmac_add(adapter, mac,
vf_cfg->if_handle,
&vf_cfg->pmac_id, vf + 1);
- }
+ else
+ status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
+ vf + 1);
if (status)
dev_err(&adapter->pdev->dev,
@@ -2759,7 +2712,7 @@ static int be_vfs_mac_query(struct be_adapter *adapter)
int status, vf;
u8 mac[ETH_ALEN];
struct be_vf_cfg *vf_cfg;
- bool active;
+ bool active = false;
for_all_vfs(adapter, vf_cfg, vf) {
be_cmd_get_mac_from_list(adapter, mac, &active,
@@ -2788,11 +2741,12 @@ static void be_vf_clear(struct be_adapter *adapter)
pci_disable_sriov(adapter->pdev);
for_all_vfs(adapter, vf_cfg, vf) {
- if (lancer_chip(adapter))
- be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
- else
+ if (BEx_chip(adapter))
be_cmd_pmac_del(adapter, vf_cfg->if_handle,
vf_cfg->pmac_id, vf + 1);
+ else
+ be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
+ vf + 1);
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
@@ -2801,28 +2755,40 @@ done:
adapter->num_vfs = 0;
}
-static int be_clear(struct be_adapter *adapter)
+static void be_clear_queues(struct be_adapter *adapter)
{
- int i = 1;
+ be_mcc_queues_destroy(adapter);
+ be_rx_cqs_destroy(adapter);
+ be_tx_queues_destroy(adapter);
+ be_evt_queues_destroy(adapter);
+}
+static void be_cancel_worker(struct be_adapter *adapter)
+{
if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
cancel_delayed_work_sync(&adapter->work);
adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
}
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
+ int i;
+
+ be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
- for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
+ /* delete the primary mac along with the uc-mac list */
+ for (i = 0; i < (adapter->uc_macs + 1); i++)
be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
- be_mcc_queues_destroy(adapter);
- be_rx_cqs_destroy(adapter);
- be_tx_queues_destroy(adapter);
- be_evt_queues_destroy(adapter);
+ be_clear_queues(adapter);
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -2833,6 +2799,7 @@ static int be_clear(struct be_adapter *adapter)
static int be_vfs_if_create(struct be_adapter *adapter)
{
+ struct be_resources res = {0};
struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
int status;
@@ -2841,9 +2808,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
BE_IF_FLAGS_MULTICAST;
for_all_vfs(adapter, vf_cfg, vf) {
- if (!BE3_chip(adapter))
- be_cmd_get_profile_config(adapter, &cap_flags,
- NULL, vf + 1);
+ if (!BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res,
+ vf + 1);
+ if (!status)
+ cap_flags = res.if_cap_flags;
+ }
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
@@ -2880,6 +2850,7 @@ static int be_vf_setup(struct be_adapter *adapter)
u16 def_vlan, lnk_speed;
int status, old_vfs, vf;
struct device *dev = &adapter->pdev->dev;
+ u32 privileges;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -2888,10 +2859,10 @@ static int be_vf_setup(struct be_adapter *adapter)
dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
adapter->num_vfs = old_vfs;
} else {
- if (num_vfs > adapter->dev_num_vfs)
+ if (num_vfs > be_max_vfs(adapter))
dev_info(dev, "Device supports %d VFs and not %d\n",
- adapter->dev_num_vfs, num_vfs);
- adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
+ be_max_vfs(adapter), num_vfs);
+ adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
if (!adapter->num_vfs)
return 0;
}
@@ -2923,6 +2894,18 @@ static int be_vf_setup(struct be_adapter *adapter)
}
for_all_vfs(adapter, vf_cfg, vf) {
+ /* Allow VFs to programs MAC/VLAN filters */
+ status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
+ if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
+ status = be_cmd_set_fn_privileges(adapter,
+ privileges |
+ BE_PRIV_FILTMGMT,
+ vf + 1);
+ if (!status)
+ dev_info(dev, "VF%d has FILTMGMT privilege\n",
+ vf);
+ }
+
/* BE3 FW, by default, caps VF TX-rate to 100mbps.
* Allow full available bandwidth
*/
@@ -2935,7 +2918,7 @@ static int be_vf_setup(struct be_adapter *adapter)
vf_cfg->tx_rate = lnk_speed;
status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle);
+ vf + 1, vf_cfg->if_handle, NULL);
if (status)
goto err;
vf_cfg->def_vid = def_vlan;
@@ -2958,6 +2941,51 @@ err:
return status;
}
+/* On BE2/BE3 FW does not suggest the supported limits */
+static void BEx_get_resources(struct be_adapter *adapter,
+ struct be_resources *res)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ bool use_sriov = false;
+
+ if (BE3_chip(adapter) && be_physfn(adapter)) {
+ int max_vfs;
+
+ max_vfs = pci_sriov_get_totalvfs(pdev);
+ res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+ use_sriov = res->max_vfs && num_vfs;
+ }
+
+ if (be_physfn(adapter))
+ res->max_uc_mac = BE_UC_PMAC_COUNT;
+ else
+ res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
+
+ if (adapter->function_mode & FLEX10_MODE)
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ res->max_mcast_mac = BE_MAX_MC;
+
+ if (BE2_chip(adapter) || use_sriov || be_is_mc(adapter) ||
+ !be_physfn(adapter))
+ res->max_tx_qs = 1;
+ else
+ res->max_tx_qs = BE3_MAX_TX_QS;
+
+ if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
+ !use_sriov && be_physfn(adapter))
+ res->max_rss_qs = (adapter->be3_native) ?
+ BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
+ res->max_rx_qs = res->max_rss_qs + 1;
+
+ res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+
+ res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
+ if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
+ res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
+}
+
static void be_setup_init(struct be_adapter *adapter)
{
adapter->vlan_prio_bmap = 0xff;
@@ -2971,118 +2999,56 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
-static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
- bool *active_mac, u32 *pmac_id)
+static int be_get_resources(struct be_adapter *adapter)
{
- int status = 0;
-
- if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
- memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
- if (!lancer_chip(adapter) && !be_physfn(adapter))
- *active_mac = true;
- else
- *active_mac = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct be_resources res = {0};
+ int status;
- return status;
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ adapter->res = res;
}
- if (lancer_chip(adapter)) {
- status = be_cmd_get_mac_from_list(adapter, mac,
- active_mac, pmac_id, 0);
- if (*active_mac) {
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, *pmac_id);
- }
- } else if (be_physfn(adapter)) {
- /* For BE3, for PF get permanent MAC */
- status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
- *active_mac = false;
- } else {
- /* For BE3, for VF get soft MAC assigned by PF*/
- status = be_cmd_mac_addr_query(adapter, mac, false,
- if_handle, 0);
- *active_mac = true;
+ /* For BE3 only check if FW suggests a different max-txqs value */
+ if (BE3_chip(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (!status && res.max_tx_qs)
+ adapter->res.max_tx_qs =
+ min(adapter->res.max_tx_qs, res.max_tx_qs);
}
- return status;
-}
-
-static void be_get_resources(struct be_adapter *adapter)
-{
- u16 dev_num_vfs;
- int pos, status;
- bool profile_present = false;
- u16 txq_count = 0;
+ /* For Lancer, SH etc read per-function resource limits from FW.
+ * GET_FUNC_CONFIG returns per function guaranteed limits.
+ * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
+ */
if (!BEx_chip(adapter)) {
- status = be_cmd_get_func_config(adapter);
- if (!status)
- profile_present = true;
- } else if (BE3_chip(adapter) && be_physfn(adapter)) {
- be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
- }
-
- if (profile_present) {
- /* Sanity fixes for Lancer */
- adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
- BE_UC_PMAC_COUNT);
- adapter->max_vlans = min_t(u16, adapter->max_vlans,
- BE_NUM_VLANS_SUPPORTED);
- adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
- BE_MAX_MC);
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
- BE3_MAX_RSS_QS);
- adapter->max_event_queues = min_t(u16,
- adapter->max_event_queues,
- BE3_MAX_RSS_QS);
-
- if (adapter->max_rss_queues &&
- adapter->max_rss_queues == adapter->max_rx_queues)
- adapter->max_rss_queues -= 1;
-
- if (adapter->max_event_queues < adapter->max_rss_queues)
- adapter->max_rss_queues = adapter->max_event_queues;
-
- } else {
- if (be_physfn(adapter))
- adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
- else
- adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
-
- if (adapter->function_mode & FLEX10_MODE)
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else
- adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ status = be_cmd_get_func_config(adapter, &res);
+ if (status)
+ return status;
- adapter->max_mcast_mac = BE_MAX_MC;
- adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
- adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
- MAX_TX_QS);
- adapter->max_rss_queues = (adapter->be3_native) ?
- BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
- adapter->max_event_queues = BE3_MAX_RSS_QS;
+ /* If RoCE may be enabled stash away half the EQs for RoCE */
+ if (be_roce_supported(adapter))
+ res.max_evt_qs /= 2;
+ adapter->res = res;
- adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST |
- BE_IF_FLAGS_PASS_L3L4_ERRORS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ if (be_physfn(adapter)) {
+ status = be_cmd_get_profile_config(adapter, &res, 0);
+ if (status)
+ return status;
+ adapter->res.max_vfs = res.max_vfs;
+ }
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
+ dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
+ be_max_txqs(adapter), be_max_rxqs(adapter),
+ be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_vfs(adapter));
+ dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
+ be_max_uc(adapter), be_max_mc(adapter),
+ be_max_vlans(adapter));
}
- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
- &dev_num_vfs);
- if (BE3_chip(adapter))
- dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
- adapter->dev_num_vfs = dev_num_vfs;
- }
+ return 0;
}
/* Routine to query per function resource limits */
@@ -3095,100 +3061,171 @@ static int be_get_config(struct be_adapter *adapter)
&adapter->function_caps,
&adapter->asic_rev);
if (status)
- goto err;
+ return status;
- be_get_resources(adapter);
+ status = be_get_resources(adapter);
+ if (status)
+ return status;
/* primary mac needs 1 pmac entry */
- adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
- sizeof(u32), GFP_KERNEL);
- if (!adapter->pmac_id) {
- status = -ENOMEM;
- goto err;
- }
+ adapter->pmac_id = kcalloc(be_max_uc(adapter) + 1, sizeof(u32),
+ GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
-err:
- return status;
+ /* Sanitize cfg_num_qs based on HW and platform limits */
+ adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
+
+ return 0;
}
-static int be_setup(struct be_adapter *adapter)
+static int be_mac_setup(struct be_adapter *adapter)
{
- struct device *dev = &adapter->pdev->dev;
- u32 en_flags;
- u32 tx_fc, rx_fc;
- int status;
u8 mac[ETH_ALEN];
- bool active_mac;
+ int status;
- be_setup_init(adapter);
+ if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
+ status = be_cmd_get_perm_mac(adapter, mac);
+ if (status)
+ return status;
- if (!lancer_chip(adapter))
- be_cmd_req_native_mode(adapter);
+ memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ } else {
+ /* Maybe the HW was reset; dev_addr must be re-programmed */
+ memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
+ }
- status = be_get_config(adapter);
+ /* On BE3 VFs this cmd may fail due to lack of privilege.
+ * Ignore the failure as in this case pmac_id is fetched
+ * in the IFACE_CREATE cmd.
+ */
+ be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id[0], 0);
+ return 0;
+}
+
+static void be_schedule_worker(struct be_adapter *adapter)
+{
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+ adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+}
+
+static int be_setup_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ status = be_evt_queues_create(adapter);
if (status)
goto err;
- status = be_msix_enable(adapter);
+ status = be_tx_qs_create(adapter);
if (status)
goto err;
- status = be_evt_queues_create(adapter);
+ status = be_rx_cqs_create(adapter);
if (status)
goto err;
- status = be_tx_cqs_create(adapter);
+ status = be_mcc_queues_create(adapter);
if (status)
goto err;
- status = be_rx_cqs_create(adapter);
+ status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
if (status)
goto err;
- status = be_mcc_queues_create(adapter);
+ status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
if (status)
goto err;
- be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
- /* In UMC mode FW does not return right privileges.
- * Override with correct privilege equivalent to PF.
+ return 0;
+err:
+ dev_err(&adapter->pdev->dev, "queue_setup failed\n");
+ return status;
+}
+
+int be_update_queues(struct be_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int status;
+
+ if (netif_running(netdev))
+ be_close(netdev);
+
+ be_cancel_worker(adapter);
+
+ /* If any vectors have been shared with RoCE we cannot re-program
+ * the MSIx table.
*/
- if (be_is_mc(adapter))
- adapter->cmd_privileges = MAX_PRIVILEGES;
+ if (!adapter->num_msix_roce_vec)
+ be_msix_disable(adapter);
- en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
- BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ be_clear_queues(adapter);
- if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
- en_flags |= BE_IF_FLAGS_RSS;
+ if (!msix_enabled(adapter)) {
+ status = be_msix_enable(adapter);
+ if (status)
+ return status;
+ }
- en_flags = en_flags & adapter->if_cap_flags;
+ status = be_setup_queues(adapter);
+ if (status)
+ return status;
- status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
- &adapter->if_handle, 0);
- if (status != 0)
+ be_schedule_worker(adapter);
+
+ if (netif_running(netdev))
+ status = be_open(netdev);
+
+ return status;
+}
+
+static int be_setup(struct be_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ u32 tx_fc, rx_fc, en_flags;
+ int status;
+
+ be_setup_init(adapter);
+
+ if (!lancer_chip(adapter))
+ be_cmd_req_native_mode(adapter);
+
+ status = be_get_config(adapter);
+ if (status)
goto err;
- memset(mac, 0, ETH_ALEN);
- active_mac = false;
- status = be_get_mac_addr(adapter, mac, adapter->if_handle,
- &active_mac, &adapter->pmac_id[0]);
- if (status != 0)
+ status = be_msix_enable(adapter);
+ if (status)
goto err;
- if (!active_mac) {
- status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
- &adapter->pmac_id[0], 0);
- if (status != 0)
- goto err;
- }
+ en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
+ if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
+ en_flags |= BE_IF_FLAGS_RSS;
+ en_flags = en_flags & be_if_cap_flags(adapter);
+ status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
+ &adapter->if_handle, 0);
+ if (status)
+ goto err;
- if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
- memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
- }
+ /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
+ rtnl_lock();
+ status = be_setup_queues(adapter);
+ rtnl_unlock();
+ if (status)
+ goto err;
- status = be_tx_qs_create(adapter);
+ be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
+ /* In UMC mode FW does not return right privileges.
+ * Override with correct privilege equivalent to PF.
+ */
+ if (be_is_mc(adapter))
+ adapter->cmd_privileges = MAX_PRIVILEGES;
+
+ status = be_mac_setup(adapter);
if (status)
goto err;
@@ -3205,8 +3242,8 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (be_physfn(adapter)) {
- if (adapter->dev_num_vfs)
+ if (be_physfn(adapter) && num_vfs) {
+ if (be_max_vfs(adapter))
be_vf_setup(adapter);
else
dev_warn(dev, "device doesn't support SRIOV\n");
@@ -3216,8 +3253,7 @@ static int be_setup(struct be_adapter *adapter)
if (!status && be_pause_supported(adapter))
adapter->phy.fc_autoneg = 1;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
- adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
+ be_schedule_worker(adapter);
return 0;
err:
be_clear(adapter);
@@ -3241,7 +3277,7 @@ static void be_netpoll(struct net_device *netdev)
#endif
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -3298,7 +3334,7 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
-struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
+static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
int header_size,
const struct firmware *fw)
{
@@ -3760,6 +3796,74 @@ fw_exit:
return status;
}
+static int be_ndo_bridge_setlink(struct net_device *dev,
+ struct nlmsghdr *nlh)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ struct nlattr *attr, *br_spec;
+ int rem;
+ int status = 0;
+ u16 mode = 0;
+
+ if (!sriov_enabled(adapter))
+ return -EOPNOTSUPP;
+
+ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+
+ nla_for_each_nested(attr, br_spec, rem) {
+ if (nla_type(attr) != IFLA_BRIDGE_MODE)
+ continue;
+
+ mode = nla_get_u16(attr);
+ if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
+ return -EINVAL;
+
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ mode == BRIDGE_MODE_VEPA ?
+ PORT_FWD_TYPE_VEPA :
+ PORT_FWD_TYPE_VEB);
+ if (status)
+ goto err;
+
+ dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+ }
+err:
+ dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
+ mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
+
+ return status;
+}
+
+static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *dev,
+ u32 filter_mask)
+{
+ struct be_adapter *adapter = netdev_priv(dev);
+ int status = 0;
+ u8 hsw_mode;
+
+ if (!sriov_enabled(adapter))
+ return 0;
+
+ /* BE and Lancer chips support VEB mode only */
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ hsw_mode = PORT_FWD_TYPE_VEB;
+ } else {
+ status = be_cmd_get_hsw_config(adapter, NULL, 0,
+ adapter->if_handle, &hsw_mode);
+ if (status)
+ return 0;
+ }
+
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
+ hsw_mode == PORT_FWD_TYPE_VEPA ?
+ BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
+}
+
static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
@@ -3778,13 +3882,13 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = be_netpoll,
#endif
+ .ndo_bridge_setlink = be_ndo_bridge_setlink,
+ .ndo_bridge_getlink = be_ndo_bridge_getlink,
};
static void be_netdev_init(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
- struct be_eq_obj *eqo;
- int i;
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
@@ -3807,9 +3911,6 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
-
- for_all_evt_queues(adapter, eqo, i)
- netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -3916,9 +4017,9 @@ static int be_ctrl_init(struct be_adapter *adapter)
memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
- rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
- &rx_filter->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
+ rx_filter->size, &rx_filter->dma,
+ GFP_KERNEL);
if (rx_filter->va == NULL) {
status = -ENOMEM;
goto free_mbox;
@@ -3964,8 +4065,8 @@ static int be_stats_init(struct be_adapter *adapter)
/* BE3 and Skyhawk */
cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
- cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
- GFP_KERNEL | __GFP_ZERO);
+ cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
+ GFP_KERNEL);
if (cmd->va == NULL)
return -1;
return 0;
@@ -4072,6 +4173,7 @@ static int be_get_initial_config(struct be_adapter *adapter)
level = be_get_fw_log_level(adapter);
adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
+ adapter->cfg_num_qs = netif_get_num_default_rss_queues();
return 0;
}
@@ -4164,7 +4266,8 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
be_cmd_get_die_temperature(adapter);
for_all_rx_queues(adapter, rxo, i) {
@@ -4253,7 +4356,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
status = pci_enable_pcie_error_reporting(pdev);
if (status)
- dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
+ dev_info(&pdev->dev, "Could not use PCIe error reporting\n");
status = be_ctrl_init(adapter);
if (status)
@@ -4373,6 +4476,10 @@ static int be_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ status = be_fw_wait_ready(adapter);
+ if (status)
+ return status;
+
/* tell fw we're ready to fire cmds */
status = be_cmd_fw_init(adapter);
if (status)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index f3d126dcc10..9cd5415fe01 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -60,7 +60,7 @@ static void _be_roce_dev_add(struct be_adapter *adapter)
*/
num_vec = adapter->num_msix_vec + adapter->num_msix_roce_vec;
dev_info.intr_mode = BE_INTERRUPT_MODE_MSIX;
- dev_info.msix.num_vectors = min(num_vec, MAX_ROCE_MSIX_VECTORS);
+ dev_info.msix.num_vectors = min(num_vec, MAX_MSIX_VECTORS);
/* provide start index of the vector,
* so in case of linear usage,
* it can use the base as starting point.
@@ -93,7 +93,7 @@ void be_roce_dev_add(struct be_adapter *adapter)
}
}
-void _be_roce_dev_remove(struct be_adapter *adapter)
+static void _be_roce_dev_remove(struct be_adapter *adapter)
{
if (ocrdma_drv && ocrdma_drv->remove && adapter->ocrdma_dev)
ocrdma_drv->remove(adapter->ocrdma_dev);
@@ -110,7 +110,7 @@ void be_roce_dev_remove(struct be_adapter *adapter)
}
}
-void _be_roce_dev_open(struct be_adapter *adapter)
+static void _be_roce_dev_open(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
@@ -126,7 +126,7 @@ void be_roce_dev_open(struct be_adapter *adapter)
}
}
-void _be_roce_dev_close(struct be_adapter *adapter)
+static void _be_roce_dev_close(struct be_adapter *adapter)
{
if (ocrdma_drv && adapter->ocrdma_dev &&
ocrdma_drv->state_change_handler)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 27657299846..2cd1129e19a 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -29,7 +29,7 @@ enum be_interrupt_mode {
BE_INTERRUPT_MODE_MSI = 2,
};
-#define MAX_ROCE_MSIX_VECTORS 16
+#define MAX_MSIX_VECTORS 32
struct be_dev_info {
u8 __iomem *db;
u64 unmapped_db;
@@ -45,7 +45,7 @@ struct be_dev_info {
struct {
int num_vectors;
int start_vector;
- u32 vector_list[MAX_ROCE_MSIX_VECTORS];
+ u32 vector_list[MAX_MSIX_VECTORS];
} msix;
};
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index cf579fb39bc..4de8cfd149c 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1030,8 +1030,8 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (pdev->dev.platform_data) {
- struct ethoc_platform_data *pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev)) {
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 934e1ae279f..212f44b3a77 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -778,10 +778,9 @@ static int ftgmac100_alloc_buffers(struct ftgmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftgmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftgmac100_descs),
+ &priv->descs_dma_addr, GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 4658f4cc196..8be5b40c0a1 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -732,10 +732,10 @@ static int ftmac100_alloc_buffers(struct ftmac100 *priv)
{
int i;
- priv->descs = dma_alloc_coherent(priv->dev,
- sizeof(struct ftmac100_descs),
- &priv->descs_dma_addr,
- GFP_KERNEL | __GFP_ZERO);
+ priv->descs = dma_zalloc_coherent(priv->dev,
+ sizeof(struct ftmac100_descs),
+ &priv->descs_dma_addr,
+ GFP_KERNEL);
if (!priv->descs)
return -ENOMEM;
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 2b0a0ea4f8e..0120217a16d 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -259,6 +259,7 @@ struct bufdesc_ex {
struct fec_enet_delayed_work {
struct delayed_work delay_work;
bool timeout;
+ bool trig_tx;
};
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
@@ -295,6 +296,9 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short tx_ring_size;
+ unsigned short rx_ring_size;
+
struct platform_device *pdev;
int opened;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d3ad5ea711d..f9aacf5d852 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -69,7 +69,6 @@ static void set_multicast_list(struct net_device *ndev);
#endif
#define DRIVER_NAME "fec"
-#define FEC_NAPI_WEIGHT 64
/* Pause frame feild and FIFO threshold */
#define FEC_ENET_FCE (1 << 5)
@@ -93,6 +92,20 @@ static void set_multicast_list(struct net_device *ndev);
#define FEC_QUIRK_HAS_CSUM (1 << 5)
/* Controller has hardware vlan support */
#define FEC_QUIRK_HAS_VLAN (1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358 (1 << 7)
static struct platform_device_id fec_devtype[] = {
{
@@ -112,7 +125,7 @@ static struct platform_device_id fec_devtype[] = {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
- FEC_QUIRK_HAS_VLAN,
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358,
}, {
.name = "mvf600-fec",
.driver_data = FEC_QUIRK_ENET_MAC,
@@ -225,22 +238,57 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
static int mii_cnt;
-static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex + 1);
+ struct bufdesc *new_bd = bdp + 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ?
+ ex_base : ex_new_bd);
else
- return bdp + 1;
+ return (new_bd >= (base + ring_size)) ?
+ base : new_bd;
}
-static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, int is_ex)
+static inline
+struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep)
{
- struct bufdesc_ex *ex = (struct bufdesc_ex *)bdp;
- if (is_ex)
- return (struct bufdesc *)(ex - 1);
+ struct bufdesc *new_bd = bdp - 1;
+ struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1;
+ struct bufdesc_ex *ex_base;
+ struct bufdesc *base;
+ int ring_size;
+
+ if (bdp >= fep->tx_bd_base) {
+ base = fep->tx_bd_base;
+ ring_size = fep->tx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->tx_bd_base;
+ } else {
+ base = fep->rx_bd_base;
+ ring_size = fep->rx_ring_size;
+ ex_base = (struct bufdesc_ex *)fep->rx_bd_base;
+ }
+
+ if (fep->bufdesc_ex)
+ return (struct bufdesc *)((ex_new_bd < ex_base) ?
+ (ex_new_bd + ring_size) : ex_new_bd);
else
- return bdp - 1;
+ return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
static void *swap_buffer(void *bufaddr, int len)
@@ -275,16 +323,11 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp;
+ struct bufdesc *bdp, *bdp_pre;
void *bufaddr;
unsigned short status;
unsigned int index;
- if (!fep->link) {
- /* Link is down or auto-negotiation is in progress. */
- return NETDEV_TX_BUSY;
- }
-
/* Fill in a Tx ring entry */
bdp = fep->cur_tx;
@@ -370,11 +413,17 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ebdp->cbd_esc |= BD_ENET_TX_PINS;
}
}
+
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+ if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+ !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+ fep->delay_work.trig_tx = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work),
+ msecs_to_jiffies(1));
+ }
+
/* If this was the last BD in the ring, start at the beginning again. */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
fep->cur_tx = bdp;
@@ -399,18 +448,18 @@ static void fec_enet_bd_init(struct net_device *dev)
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY;
else
bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base;
@@ -418,7 +467,7 @@ static void fec_enet_bd_init(struct net_device *dev)
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
@@ -427,11 +476,11 @@ static void fec_enet_bd_init(struct net_device *dev)
fep->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
}
@@ -492,10 +541,10 @@ fec_restart(struct net_device *ndev, int duplex)
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
else
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
- * RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ * fep->rx_ring_size, fep->hwp + FEC_X_DES_START);
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
@@ -689,6 +738,11 @@ static void fec_enet_work(struct work_struct *work)
fec_restart(fep->netdev, fep->full_duplex);
netif_wake_queue(fep->netdev);
}
+
+ if (fep->delay_work.trig_tx) {
+ fep->delay_work.trig_tx = false;
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ }
}
static void
@@ -704,10 +758,7 @@ fec_enet_tx(struct net_device *ndev)
bdp = fep->dirty_tx;
/* get next bdp of dirty_tx */
- if (bdp->cbd_sc & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -777,10 +828,7 @@ fec_enet_tx(struct net_device *ndev)
fep->dirty_tx = bdp;
/* Update pointer to next buffer descriptor to be transmitted */
- if (status & BD_ENET_TX_WRAP)
- bdp = fep->tx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
/* Since we have freed up a buffer, the ring is no longer full
*/
@@ -948,8 +996,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
htons(ETH_P_8021Q),
vlan_tag);
- if (!skb_defer_rx_timestamp(skb))
- napi_gro_receive(&fep->napi, skb);
+ napi_gro_receive(&fep->napi, skb);
}
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
@@ -971,10 +1018,8 @@ rx_processing_done:
}
/* Update BD pointer to next entry */
- if (status & BD_ENET_RX_WRAP)
- bdp = fep->rx_bd_base;
- else
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+
/* Doing this here will keep the FEC running while we process
* incoming frames. On a heavily loaded network, we should be
* able to keep up at the expense of system resources.
@@ -1037,7 +1082,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
static void fec_get_mac(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+ struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
unsigned char *iap, tmpaddr[ETH_ALEN];
/*
@@ -1077,10 +1122,10 @@ static void fec_get_mac(struct net_device *ndev)
* 4) FEC mac registers set by bootloader
*/
if (!is_valid_ether_addr(iap)) {
- *((unsigned long *) &tmpaddr[0]) =
- be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
- *((unsigned short *) &tmpaddr[4]) =
- be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+ *((__be32 *) &tmpaddr[0]) =
+ cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+ *((__be16 *) &tmpaddr[4]) =
+ cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
iap = &tmpaddr[0];
}
@@ -1640,7 +1685,7 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = fep->rx_skbuff[i];
if (bdp->cbd_bufaddr)
@@ -1648,11 +1693,11 @@ static void fec_enet_free_buffers(struct net_device *ndev)
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
if (skb)
dev_kfree_skb(skb);
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++)
+ for (i = 0; i < fep->tx_ring_size; i++)
kfree(fep->tx_bounce[i]);
}
@@ -1664,7 +1709,7 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
struct bufdesc *bdp;
bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
+ for (i = 0; i < fep->rx_ring_size; i++) {
skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
if (!skb) {
fec_enet_free_buffers(ndev);
@@ -1681,15 +1726,15 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_RX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
+ for (i = 0; i < fep->tx_ring_size; i++) {
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
bdp->cbd_sc = 0;
@@ -1700,11 +1745,11 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
ebdp->cbd_esc = BD_ENET_TX_INT;
}
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
}
/* Set the last buffer to wrap. */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp = fec_enet_get_prevdesc(bdp, fep);
bdp->cbd_sc |= BD_SC_WRAP;
return 0;
@@ -1944,13 +1989,17 @@ static int fec_enet_init(struct net_device *ndev)
/* Get the Ethernet address */
fec_get_mac(ndev);
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
fep->tx_bd_base = (struct bufdesc *)
- (((struct bufdesc_ex *)cbd_base) + RX_RING_SIZE);
+ (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size);
else
- fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+ fep->tx_bd_base = cbd_base + fep->rx_ring_size;
/* The FEC Ethernet specific entries in the device structure */
ndev->watchdog_timeo = TX_TIMEOUT;
@@ -1958,7 +2007,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
/* enable hw VLAN support */
@@ -2033,10 +2082,6 @@ fec_probe(struct platform_device *pdev)
if (of_id)
pdev->id_entry = of_id->data;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENXIO;
-
/* Init network device */
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
if (!ndev)
@@ -2054,6 +2099,7 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
ret = PTR_ERR(fep->hwp);
@@ -2069,7 +2115,7 @@ fec_probe(struct platform_device *pdev)
ret = of_get_phy_mode(pdev->dev.of_node);
if (ret < 0) {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata)
fep->phy_interface = pdata->phy;
else
@@ -2103,10 +2149,25 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ptp);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -2137,14 +2198,10 @@ fec_probe(struct platform_device *pdev)
ret = irq;
goto failed_irq;
}
- ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
- if (ret) {
- while (--i >= 0) {
- irq = platform_get_irq(pdev, i);
- free_irq(irq, ndev);
- }
+ ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
+ IRQF_DISABLED, pdev->name, ndev);
+ if (ret)
goto failed_irq;
- }
}
ret = fec_enet_mii_init(pdev);
@@ -2168,19 +2225,19 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_irq:
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ptp);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2193,25 +2250,21 @@ fec_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
- int i;
cancel_delayed_work_sync(&(fep->delay_work.delay_work));
unregister_netdev(ndev);
fec_enet_mii_remove(fep);
del_timer_sync(&fep->time_keep);
- for (i = 0; i < FEC_IRQ_NUM; i++) {
- int irq = platform_get_irq(pdev, i);
- if (irq > 0)
- free_irq(irq, ndev);
- }
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
free_netdev(ndev);
return 0;
@@ -2228,9 +2281,12 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ahb);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
clk_disable_unprepare(fep->clk_ipg);
+ clk_disable_unprepare(fep->clk_ahb);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2251,15 +2307,44 @@ fec_resume(struct device *dev)
return ret;
}
- clk_prepare_enable(fep->clk_enet_out);
- clk_prepare_enable(fep->clk_ahb);
- clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
+
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
}
return 0;
+
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ if (fep->reg_phy)
+ regulator_disable(fep->reg_phy);
+ return ret;
}
#endif /* CONFIG_PM_SLEEP */
@@ -2279,4 +2364,5 @@ static struct platform_driver fec_driver = {
module_platform_driver(fec_driver);
+MODULE_ALIAS("platform:"DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
index 360a578c2bb..e0528900db0 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c
@@ -123,12 +123,10 @@ static int mpc52xx_fec_mdio_probe(struct platform_device *of)
static int mpc52xx_fec_mdio_remove(struct platform_device *of)
{
- struct device *dev = &of->dev;
- struct mii_bus *bus = dev_get_drvdata(dev);
+ struct mii_bus *bus = platform_get_drvdata(of);
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
mdiobus_unregister(bus);
- dev_set_drvdata(dev, NULL);
iounmap(priv->regs);
kfree(priv);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 8de53a14a6f..6b60582ce8c 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -583,7 +583,6 @@ static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
struct sk_buff *skb)
{
struct sk_buff *new_skb;
- struct fs_enet_private *fep = netdev_priv(dev);
/* Alloc new skb */
new_skb = netdev_alloc_skb(dev, skb->len + 4);
@@ -1000,6 +999,8 @@ static int fs_enet_probe(struct platform_device *ofdev)
struct fs_enet_private *fep;
struct fs_platform_info *fpi;
const u32 *data;
+ struct clk *clk;
+ int err;
const u8 *mac_addr;
const char *phy_connection_type;
int privsize, len, ret = -ENODEV;
@@ -1037,6 +1038,20 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_rmii = 1;
}
+ /* make clock lookup non-fatal (the driver is shared among platforms),
+ * but require enable to succeed when a clock was specified/found,
+ * keep a reference to the clock upon successful acquisition
+ */
+ clk = devm_clk_get(&ofdev->dev, "per");
+ if (!IS_ERR(clk)) {
+ err = clk_prepare_enable(clk);
+ if (err) {
+ ret = err;
+ goto out_free_fpi;
+ }
+ fpi->clk_per = clk;
+ }
+
privsize = sizeof(*fep) +
sizeof(struct sk_buff **) *
(fpi->rx_ring + fpi->tx_ring);
@@ -1108,6 +1123,8 @@ out_free_dev:
free_netdev(ndev);
out_put:
of_node_put(fpi->phy_node);
+ if (fpi->clk_per)
+ clk_disable_unprepare(fpi->clk_per);
out_free_fpi:
kfree(fpi);
return ret;
@@ -1124,6 +1141,8 @@ static int fs_enet_remove(struct platform_device *ofdev)
fep->ops->cleanup_data(ndev);
dev_set_drvdata(fep->dev, NULL);
of_node_put(fep->fpi->phy_node);
+ if (fep->fpi->clk_per)
+ clk_disable_unprepare(fep->fpi->clk_per);
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index c93a05654b4..c4f65067cf7 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -409,7 +409,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
priv->regs = priv->map + data->mii_offset;
new_bus->parent = &pdev->dev;
- dev_set_drvdata(&pdev->dev, new_bus);
+ platform_set_drvdata(pdev, new_bus);
if (data->get_tbipa) {
for_each_child_of_node(np, tbi) {
@@ -468,8 +468,6 @@ static int fsl_pq_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(bus);
- dev_set_drvdata(device, NULL);
-
iounmap(priv->map);
mdiobus_free(bus);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 8d2db7b808b..c4eaadeb572 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -593,7 +593,6 @@ static int gfar_parse_group(struct device_node *np,
return -EINVAL;
}
- grp->grp_id = priv->num_grps;
grp->priv = priv;
spin_lock_init(&grp->grplock);
if (priv->mode == MQ_MG_MODE) {
@@ -1017,7 +1016,14 @@ static int gfar_probe(struct platform_device *ofdev)
/* We need to delay at least 3 TX clocks */
udelay(2);
- tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval = 0;
+ if (!priv->pause_aneg_en && priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (!priv->pause_aneg_en && priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ /* the soft reset bit is not self-resetting, so we need to
+ * clear it before resuming normal operation
+ */
gfar_write(&regs->maccfg1, tempval);
/* Initialize MACCFG2. */
@@ -1461,7 +1467,7 @@ static int init_phy(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
uint gigabit_support =
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
- SUPPORTED_1000baseT_Full : 0;
+ GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
priv->oldlink = 0;
@@ -2052,6 +2058,24 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
return skip_txbd(bdp, 1, base, ring_size);
}
+/* eTSEC12: csum generation not supported for some fcb offsets */
+static inline bool gfar_csum_errata_12(struct gfar_private *priv,
+ unsigned long fcb_addr)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
+ (fcb_addr % 0x20) > 0x18);
+}
+
+/* eTSEC76: csum generation for frames larger than 2500 may
+ * cause excess delays before start of transmission
+ */
+static inline bool gfar_csum_errata_76(struct gfar_private *priv,
+ unsigned int len)
+{
+ return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
+ (len > 2500));
+}
+
/* This is called by the kernel when a frame is ready for transmission.
* It is pointed to by the dev->hard_start_xmit function pointer
*/
@@ -2064,23 +2088,11 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct txfcb *fcb = NULL;
struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
u32 lstatus;
- int i, rq = 0, do_tstamp = 0;
+ int i, rq = 0;
+ int do_tstamp, do_csum, do_vlan;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
-
- /* TOE=1 frames larger than 2500 bytes may see excess delays
- * before start of transmission.
- */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_76) &&
- skb->ip_summed == CHECKSUM_PARTIAL &&
- skb->len > 2500)) {
- int ret;
-
- ret = skb_checksum_help(skb);
- if (ret)
- return ret;
- }
+ unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
rq = skb->queue_mapping;
tx_queue = priv->tx_queue[rq];
@@ -2088,21 +2100,23 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
base = tx_queue->tx_bd_base;
regs = tx_queue->grp->regs;
+ do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
+ do_vlan = vlan_tx_tag_present(skb);
+ do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en;
+
+ if (do_csum || do_vlan)
+ fcb_len = GMAC_FCB_LEN;
+
/* check if time stamp should be generated */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en)) {
- do_tstamp = 1;
- fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
- }
+ if (unlikely(do_tstamp))
+ fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
/* make space for additional header when fcb is needed */
- if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
- vlan_tx_tag_present(skb) ||
- unlikely(do_tstamp)) &&
- (skb_headroom(skb) < fcb_length)) {
+ if (fcb_len && unlikely(skb_headroom(skb) < fcb_len)) {
struct sk_buff *skb_new;
- skb_new = skb_realloc_headroom(skb, fcb_length);
+ skb_new = skb_realloc_headroom(skb, fcb_len);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
@@ -2133,7 +2147,10 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
/* Update transmit stats */
- tx_queue->stats.tx_bytes += skb->len;
+ bytes_sent = skb->len;
+ tx_queue->stats.tx_bytes += bytes_sent;
+ /* keep Tx bytes on wire for BQL accounting */
+ GFAR_CB(skb)->bytes_sent = bytes_sent;
tx_queue->stats.tx_packets++;
txbdp = txbdp_start = tx_queue->cur_tx;
@@ -2153,12 +2170,13 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
/* Place the fragment addresses and lengths into the TxBDs */
for (i = 0; i < nr_frags; i++) {
+ unsigned int frag_len;
/* Point at the next BD, wrapping as needed */
txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
- length = skb_shinfo(skb)->frags[i].size;
+ frag_len = skb_shinfo(skb)->frags[i].size;
- lstatus = txbdp->lstatus | length |
+ lstatus = txbdp->lstatus | frag_len |
BD_LFLAG(TXBD_READY);
/* Handle the last BD specially */
@@ -2168,7 +2186,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
bufaddr = skb_frag_dma_map(priv->dev,
&skb_shinfo(skb)->frags[i],
0,
- length,
+ frag_len,
DMA_TO_DEVICE);
/* set the TxBD length and buffer pointer */
@@ -2185,36 +2203,38 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->data, 0, GMAC_TXPAL_LEN);
}
- /* Set up checksumming */
- if (CHECKSUM_PARTIAL == skb->ip_summed) {
+ /* Add TxFCB if required */
+ if (fcb_len) {
fcb = gfar_add_fcb(skb);
- /* as specified by errata */
- if (unlikely(gfar_has_errata(priv, GFAR_ERRATA_12) &&
- ((unsigned long)fcb % 0x20) > 0x18)) {
+ lstatus |= BD_LFLAG(TXBD_TOE);
+ }
+
+ /* Set up checksumming */
+ if (do_csum) {
+ gfar_tx_checksum(skb, fcb, fcb_len);
+
+ if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
+ unlikely(gfar_csum_errata_76(priv, skb->len))) {
__skb_pull(skb, GMAC_FCB_LEN);
skb_checksum_help(skb);
- } else {
- lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb, fcb_length);
+ if (do_vlan || do_tstamp) {
+ /* put back a new fcb for vlan/tstamp TOE */
+ fcb = gfar_add_fcb(skb);
+ } else {
+ /* Tx TOE not used */
+ lstatus &= ~(BD_LFLAG(TXBD_TOE));
+ fcb = NULL;
+ }
}
}
- if (vlan_tx_tag_present(skb)) {
- if (unlikely(NULL == fcb)) {
- fcb = gfar_add_fcb(skb);
- lstatus |= BD_LFLAG(TXBD_TOE);
- }
-
+ if (do_vlan)
gfar_tx_vlan(skb, fcb);
- }
/* Setup tx hardware time stamping if requested */
if (unlikely(do_tstamp)) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- if (fcb == NULL)
- fcb = gfar_add_fcb(skb);
fcb->ptp = 1;
- lstatus |= BD_LFLAG(TXBD_TOE);
}
txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
@@ -2226,15 +2246,15 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_len;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - fcb_length);
+ (skb_headlen(skb) - fcb_len);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
}
- netdev_tx_sent_queue(txq, skb->len);
+ netdev_tx_sent_queue(txq, bytes_sent);
/* We can work in parallel with gfar_clean_tx_ring(), except
* when modifying num_txbdfree. Note that we didn't grab the lock
@@ -2554,7 +2574,7 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
bdp = next_txbd(bdp, base, tx_ring_size);
}
- bytes_sent += skb->len;
+ bytes_sent += GFAR_CB(skb)->bytes_sent;
dev_kfree_skb_any(skb);
@@ -3014,6 +3034,41 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
return IRQ_HANDLED;
}
+static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
+{
+ struct phy_device *phydev = priv->phydev;
+ u32 val = 0;
+
+ if (!phydev->duplex)
+ return val;
+
+ if (!priv->pause_aneg_en) {
+ if (priv->tx_pause_en)
+ val |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ val |= MACCFG1_RX_FLOW;
+ } else {
+ u16 lcl_adv, rmt_adv;
+ u8 flowctrl;
+ /* get link partner capabilities */
+ rmt_adv = 0;
+ if (phydev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (phydev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ lcl_adv = mii_advertise_flowctrl(phydev->advertising);
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+ if (flowctrl & FLOW_CTRL_TX)
+ val |= MACCFG1_TX_FLOW;
+ if (flowctrl & FLOW_CTRL_RX)
+ val |= MACCFG1_RX_FLOW;
+ }
+
+ return val;
+}
+
/* Called every time the controller might need to be made
* aware of new link state. The PHY code conveys this
* information through variables in the phydev structure, and this
@@ -3032,6 +3087,7 @@ static void adjust_link(struct net_device *dev)
lock_tx_qs(priv);
if (phydev->link) {
+ u32 tempval1 = gfar_read(&regs->maccfg1);
u32 tempval = gfar_read(&regs->maccfg2);
u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -3080,6 +3136,10 @@ static void adjust_link(struct net_device *dev)
priv->oldspeed = phydev->speed;
}
+ tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ tempval1 |= gfar_get_flowctrl_cfg(priv);
+
+ gfar_write(&regs->maccfg1, tempval1);
gfar_write(&regs->maccfg2, tempval);
gfar_write(&regs->ecntrl, ecntrl);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 04b552cd419..04112b98ff5 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -146,6 +146,10 @@ extern const char gfar_driver_version[];
| SUPPORTED_Autoneg \
| SUPPORTED_MII)
+#define GFAR_SUPPORTED_GBIT (SUPPORTED_1000baseT_Full \
+ | SUPPORTED_Pause \
+ | SUPPORTED_Asym_Pause)
+
/* TBI register addresses */
#define MII_TBICON 0x11
@@ -571,7 +575,7 @@ struct rxfcb {
};
struct gianfar_skb_cb {
- int alignamount;
+ unsigned int bytes_sent; /* bytes-on-wire (i.e. no FCB) */
};
#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
@@ -1009,7 +1013,6 @@ struct gfar_irqinfo {
* @napi: the napi poll function
* @priv: back pointer to the priv structure
* @regs: the ioremapped register space for this group
- * @grp_id: group id for this group
* @irqinfo: TX/RX/ER irq data for this group
*/
@@ -1018,11 +1021,10 @@ struct gfar_priv_grp {
struct napi_struct napi;
struct gfar_private *priv;
struct gfar __iomem *regs;
- unsigned int grp_id;
+ unsigned int rstat;
unsigned long num_rx_queues;
unsigned long rx_bit_map;
/* cacheline 3 */
- unsigned int rstat;
unsigned int tstat;
unsigned long num_tx_queues;
unsigned long tx_bit_map;
@@ -1102,7 +1104,11 @@ struct gfar_private {
/* Wake-on-LAN enabled */
wol_en:1,
/* Enable priorty based Tx scheduling in Hw */
- prio_sched_en:1;
+ prio_sched_en:1,
+ /* Flow control flags */
+ pause_aneg_en:1,
+ tx_pause_en:1,
+ rx_pause_en:1;
/* The total tx and rx ring size for the enabled queues */
unsigned int total_tx_ring_size;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 21cd88124ca..d3d7ede27ef 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -535,6 +535,78 @@ static int gfar_sringparam(struct net_device *dev,
return err;
}
+static void gfar_gpauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+
+ epause->autoneg = !!priv->pause_aneg_en;
+ epause->rx_pause = !!priv->rx_pause_en;
+ epause->tx_pause = !!priv->tx_pause_en;
+}
+
+static int gfar_spauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 oldadv, newadv;
+
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ (epause->rx_pause != epause->tx_pause)))
+ return -EINVAL;
+
+ priv->rx_pause_en = priv->tx_pause_en = 0;
+ if (epause->rx_pause) {
+ priv->rx_pause_en = 1;
+
+ if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTRL_RX & TX */
+ newadv = ADVERTISED_Pause;
+ } else /* FLOW_CTLR_RX */
+ newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ priv->tx_pause_en = 1;
+ /* FLOW_CTLR_TX */
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ priv->pause_aneg_en = 1;
+ else
+ priv->pause_aneg_en = 0;
+
+ oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg)
+ /* inform link partner of our
+ * new flow ctrl settings
+ */
+ return phy_start_aneg(phydev);
+
+ if (!epause->autoneg) {
+ u32 tempval;
+ tempval = gfar_read(&regs->maccfg1);
+ tempval &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
+ if (priv->tx_pause_en)
+ tempval |= MACCFG1_TX_FLOW;
+ if (priv->rx_pause_en)
+ tempval |= MACCFG1_RX_FLOW;
+ gfar_write(&regs->maccfg1, tempval);
+ }
+ }
+
+ return 0;
+}
+
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
@@ -1806,6 +1878,8 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_coalesce = gfar_scoalesce,
.get_ringparam = gfar_gringparam,
.set_ringparam = gfar_sringparam,
+ .get_pauseparam = gfar_gpauseparam,
+ .set_pauseparam = gfar_spauseparam,
.get_strings = gfar_gstrings,
.get_sset_count = gfar_sset_count,
.get_ethtool_stats = gfar_fill_stats,
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3c43dac894e..5930c39672d 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3911,14 +3911,12 @@ static int ucc_geth_probe(struct platform_device* ofdev)
static int ucc_geth_remove(struct platform_device* ofdev)
{
- struct device *device = &ofdev->dev;
- struct net_device *dev = dev_get_drvdata(device);
+ struct net_device *dev = platform_get_drvdata(ofdev);
struct ucc_geth_private *ugeth = netdev_priv(dev);
unregister_netdev(dev);
free_netdev(dev);
ucc_geth_memclean(ugeth);
- dev_set_drvdata(device, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.h b/drivers/net/ethernet/i825xx/sun3_82586.h
index 93346f00486..79aef681ac8 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.h
+++ b/drivers/net/ethernet/i825xx/sun3_82586.h
@@ -133,8 +133,8 @@ struct rfd_struct
unsigned char last; /* Bit15,Last Frame on List / Bit14,suspend */
unsigned short next; /* linkoffset to next RFD */
unsigned short rbd_offset; /* pointeroffset to RBD-buffer */
- unsigned char dest[6]; /* ethernet-address, destination */
- unsigned char source[6]; /* ethernet-address, source */
+ unsigned char dest[ETH_ALEN]; /* ethernet-address, destination */
+ unsigned char source[ETH_ALEN]; /* ethernet-address, source */
unsigned short length; /* 802.3 frame-length */
unsigned short zero_dummy; /* dummy */
};
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index d300a0c0eaf..6b5c7222342 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2312,7 +2312,7 @@ static int emac_check_deps(struct emac_instance *dev,
if (deps[i].ofdev == NULL)
continue;
if (deps[i].drvdata == NULL)
- deps[i].drvdata = dev_get_drvdata(&deps[i].ofdev->dev);
+ deps[i].drvdata = platform_get_drvdata(deps[i].ofdev);
if (deps[i].drvdata != NULL)
there++;
}
@@ -2799,9 +2799,9 @@ static int emac_probe(struct platform_device *ofdev)
/* display more info about what's missing ? */
goto err_reg_unmap;
}
- dev->mal = dev_get_drvdata(&dev->mal_dev->dev);
+ dev->mal = platform_get_drvdata(dev->mal_dev);
if (dev->mdio_dev != NULL)
- dev->mdio_instance = dev_get_drvdata(&dev->mdio_dev->dev);
+ dev->mdio_instance = platform_get_drvdata(dev->mdio_dev);
/* Register with MAL */
dev->commac.ops = &emac_commac_ops;
@@ -2892,7 +2892,7 @@ static int emac_probe(struct platform_device *ofdev)
* fully initialized
*/
wmb();
- dev_set_drvdata(&ofdev->dev, dev);
+ platform_set_drvdata(ofdev, dev);
/* There's a new kid in town ! Let's tell everybody */
wake_up_all(&emac_probe_wait);
@@ -2951,12 +2951,10 @@ static int emac_probe(struct platform_device *ofdev)
static int emac_remove(struct platform_device *ofdev)
{
- struct emac_instance *dev = dev_get_drvdata(&ofdev->dev);
+ struct emac_instance *dev = platform_get_drvdata(ofdev);
DBG(dev, "remove" NL);
- dev_set_drvdata(&ofdev->dev, NULL);
-
unregister_netdev(dev->ndev);
cancel_work_sync(&dev->reset_work);
diff --git a/drivers/net/ethernet/ibm/emac/mal.c b/drivers/net/ethernet/ibm/emac/mal.c
index 856ea66c922..dac564c2544 100644
--- a/drivers/net/ethernet/ibm/emac/mal.c
+++ b/drivers/net/ethernet/ibm/emac/mal.c
@@ -637,8 +637,8 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans);
- mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
- GFP_KERNEL | __GFP_ZERO);
+ mal->bd_virt = dma_zalloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
+ GFP_KERNEL);
if (mal->bd_virt == NULL) {
err = -ENOMEM;
goto fail_unmap;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 70fd5596884..5d41aee69d1 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -106,7 +106,7 @@ struct ibmveth_stat ibmveth_stats[] = {
/* simple methods of getting data from the current rxq entry */
static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
}
static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
@@ -132,7 +132,7 @@ static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
- return adapter->rx_queue.queue_addr[adapter->rx_queue.index].length;
+ return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
}
static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 43a794fab9f..84066bafe05 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -164,14 +164,26 @@ struct ibmveth_adapter {
u64 tx_send_failed;
};
+/*
+ * We pass struct ibmveth_buf_desc_fields to the hypervisor in registers,
+ * so we don't need to byteswap the two elements. However since we use
+ * a union (ibmveth_buf_desc) to convert from the struct to a u64 we
+ * do end up with endian specific ordering of the elements and that
+ * needs correcting.
+ */
struct ibmveth_buf_desc_fields {
+#ifdef __BIG_ENDIAN
+ u32 flags_len;
+ u32 address;
+#else
+ u32 address;
u32 flags_len;
+#endif
#define IBMVETH_BUF_VALID 0x80000000
#define IBMVETH_BUF_TOGGLE 0x40000000
#define IBMVETH_BUF_NO_CSUM 0x02000000
#define IBMVETH_BUF_CSUM_GOOD 0x01000000
#define IBMVETH_BUF_LEN_MASK 0x00FFFFFF
- u32 address;
};
union ibmveth_buf_desc {
@@ -180,7 +192,7 @@ union ibmveth_buf_desc {
};
struct ibmveth_rx_q_entry {
- u32 flags_off;
+ __be32 flags_off;
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
@@ -188,7 +200,8 @@ struct ibmveth_rx_q_entry {
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
- u32 length;
+ __be32 length;
+ /* correlator is only used by the OS, no need to byte swap */
u64 correlator;
};
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 1fde90b9668..bdf5023724e 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -1004,7 +1004,7 @@ static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
/* Check to see if the NIC has been initialized via nic_open,
* before trying to read statistic registers.
*/
- if (!test_bit(__LINK_STATE_START, &dev->state))
+ if (!netif_running(dev))
return &sp->stats;
sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5115ae76a5d..ada6e210279 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1175,15 +1175,12 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
config->rx_discard_short_frames = 0x0; /* 1=discard, 0=save */
}
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
- netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
- "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
- c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[00-07]=%8ph\n",
+ c + 0);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[08-15]=%8ph\n",
+ c + 8);
+ netif_printk(nic, hw, KERN_DEBUG, nic->netdev, "[16-23]=%8ph\n",
+ c + 16);
return 0;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 82a967c9559..73a8aeefb92 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1019,8 +1019,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
ret_val = 2;
goto err_nomem;
@@ -1077,8 +1077,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
}
rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
- rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+ GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 6;
goto err_nomem;
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 4c303e2a7cb..8fed74e3fa5 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1011,6 +1011,11 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* Must release MDIO ownership and mutex after MAC reset. */
switch (hw->mac.type) {
+ case e1000_82573:
+ /* Release mutex only if the hw semaphore is acquired */
+ if (!ret_val)
+ e1000_put_hw_semaphore_82573(hw);
+ break;
case e1000_82574:
case e1000_82583:
/* Release mutex only if the hw semaphore is acquired */
@@ -2057,6 +2062,7 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
+ | FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ffbc08f56c4..ad0edd11015 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -90,9 +90,6 @@ struct e1000_info;
#define E1000_MNG_VLAN_NONE (-1)
-/* Number of packet split data buffers (not including the header buffer) */
-#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
-
#define DEFAULT_JUMBO 9234
/* Time to wait before putting the device into D3 if there's no link (in ms). */
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 59c22bf1870..a8633b8f0ac 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -173,7 +173,7 @@ static int e1000_get_settings(struct net_device *netdev,
speed = adapter->link_speed;
ecmd->duplex = adapter->link_duplex - 1;
}
- } else {
+ } else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
@@ -264,6 +264,9 @@ static int e1000_set_settings(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ int ret_val = 0;
+
+ pm_runtime_get_sync(netdev->dev.parent);
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
@@ -271,7 +274,8 @@ static int e1000_set_settings(struct net_device *netdev,
if (hw->phy.ops.check_reset_block &&
hw->phy.ops.check_reset_block(hw)) {
e_err("Cannot change link characteristics when SoL/IDER is active.\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
/* MDI setting is only allowed when autoneg enabled because
@@ -279,13 +283,16 @@ static int e1000_set_settings(struct net_device *netdev,
* duplex is forced.
*/
if (ecmd->eth_tp_mdix_ctrl) {
- if (hw->phy.media_type != e1000_media_type_copper)
- return -EOPNOTSUPP;
+ if (hw->phy.media_type != e1000_media_type_copper) {
+ ret_val = -EOPNOTSUPP;
+ goto out;
+ }
if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
(ecmd->autoneg != AUTONEG_ENABLE)) {
e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -307,8 +314,8 @@ static int e1000_set_settings(struct net_device *netdev,
u32 speed = ethtool_cmd_speed(ecmd);
/* calling this overrides forced MDI setting */
if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) {
- clear_bit(__E1000_RESETTING, &adapter->state);
- return -EINVAL;
+ ret_val = -EINVAL;
+ goto out;
}
}
@@ -331,8 +338,10 @@ static int e1000_set_settings(struct net_device *netdev,
e1000e_reset(adapter);
}
+out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
- return 0;
+ return ret_val;
}
static void e1000_get_pauseparam(struct net_device *netdev,
@@ -366,6 +375,8 @@ static int e1000_set_pauseparam(struct net_device *netdev,
while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
usleep_range(1000, 2000);
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
if (netif_running(adapter->netdev)) {
@@ -398,6 +409,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
}
out:
+ pm_runtime_put_sync(netdev->dev.parent);
clear_bit(__E1000_RESETTING, &adapter->state);
return retval;
}
@@ -428,6 +440,8 @@ static void e1000_get_regs(struct net_device *netdev,
u32 *regs_buff = p;
u16 phy_data;
+ pm_runtime_get_sync(netdev->dev.parent);
+
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
@@ -472,6 +486,8 @@ static void e1000_get_regs(struct net_device *netdev,
e1e_rphy(hw, MII_STAT1000, &phy_data);
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static int e1000_get_eeprom_len(struct net_device *netdev)
@@ -504,6 +520,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
if (!eeprom_buff)
return -ENOMEM;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (hw->nvm.type == e1000_nvm_eeprom_spi) {
ret_val = e1000_read_nvm(hw, first_word,
last_word - first_word + 1,
@@ -517,6 +535,8 @@ static int e1000_get_eeprom(struct net_device *netdev,
}
}
+ pm_runtime_put_sync(netdev->dev.parent);
+
if (ret_val) {
/* a read error occurred, throw away the result */
memset(eeprom_buff, 0xff, sizeof(u16) *
@@ -566,6 +586,8 @@ static int e1000_set_eeprom(struct net_device *netdev,
ptr = (void *)eeprom_buff;
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (eeprom->offset & 1) {
/* need read/modify/write of first changed EEPROM word */
/* only the second byte of the word is being modified */
@@ -606,6 +628,7 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000e_update_nvm_checksum(hw);
out:
+ pm_runtime_put_sync(netdev->dev.parent);
kfree(eeprom_buff);
return ret_val;
}
@@ -701,6 +724,8 @@ static int e1000_set_ringparam(struct net_device *netdev,
}
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_down(adapter);
/* We can't just free everything and then setup again, because the
@@ -739,6 +764,7 @@ err_setup_rx:
e1000e_free_tx_resources(temp_tx);
err_setup:
e1000e_up(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
free_temp:
vfree(temp_tx);
vfree(temp_rx);
@@ -1639,7 +1665,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
- if (jiffies >= (time + 20)) {
+ if (time_after(jiffies, time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
@@ -1732,6 +1758,8 @@ static void e1000_diag_test(struct net_device *netdev,
u8 autoneg;
bool if_running = netif_running(netdev);
+ pm_runtime_get_sync(netdev->dev.parent);
+
set_bit(__E1000_TESTING, &adapter->state);
if (!if_running) {
@@ -1817,6 +1845,8 @@ static void e1000_diag_test(struct net_device *netdev,
}
msleep_interruptible(4 * 1000);
+
+ pm_runtime_put_sync(netdev->dev.parent);
}
static void e1000_get_wol(struct net_device *netdev,
@@ -1891,6 +1921,8 @@ static int e1000_set_phys_id(struct net_device *netdev,
switch (state) {
case ETHTOOL_ID_ACTIVE:
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (!hw->mac.ops.blink_led)
return 2; /* cycle on/off twice per second */
@@ -1902,6 +1934,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
hw->mac.ops.led_off(hw);
hw->mac.ops.cleanup_led(hw);
+ pm_runtime_put_sync(netdev->dev.parent);
break;
case ETHTOOL_ID_ON:
@@ -1912,6 +1945,7 @@ static int e1000_set_phys_id(struct net_device *netdev,
hw->mac.ops.led_off(hw);
break;
}
+
return 0;
}
@@ -1950,11 +1984,15 @@ static int e1000_set_coalesce(struct net_device *netdev,
adapter->itr_setting = adapter->itr & ~3;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
if (adapter->itr_setting != 0)
e1000e_write_itr(adapter, adapter->itr);
else
e1000e_write_itr(adapter, 0);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -1968,7 +2006,9 @@ static int e1000_nway_reset(struct net_device *netdev)
if (!adapter->hw.mac.autoneg)
return -EINVAL;
+ pm_runtime_get_sync(netdev->dev.parent);
e1000e_reinit_locked(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
return 0;
}
@@ -1982,7 +2022,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
int i;
char *p = NULL;
+ pm_runtime_get_sync(netdev->dev.parent);
+
e1000e_get_stats64(netdev, &net_stats);
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
switch (e1000_gstrings_stats[i].type) {
case NETDEV_STATS:
@@ -2033,7 +2078,11 @@ static int e1000_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXFH: {
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 mrqc = er32(MRQC);
+ u32 mrqc;
+
+ pm_runtime_get_sync(netdev->dev.parent);
+ mrqc = er32(MRQC);
+ pm_runtime_put_sync(netdev->dev.parent);
if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK))
return 0;
@@ -2096,9 +2145,13 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
return -EOPNOTSUPP;
}
+ pm_runtime_get_sync(netdev->dev.parent);
+
ret_val = hw->phy.ops.acquire(hw);
- if (ret_val)
+ if (ret_val) {
+ pm_runtime_put_sync(netdev->dev.parent);
return -EBUSY;
+ }
/* EEE Capability */
ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data);
@@ -2117,14 +2170,11 @@ static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata)
/* EEE PCS Status */
ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data);
+ if (ret_val)
+ goto release;
if (hw->phy.type == e1000_phy_82579)
phy_data <<= 8;
-release:
- hw->phy.ops.release(hw);
- if (ret_val)
- return -ENODATA;
-
/* Result of the EEE auto negotiation - there is no register that
* has the status of the EEE negotiation so do a best-guess based
* on whether Tx or Rx LPI indications have been received.
@@ -2136,7 +2186,14 @@ release:
edata->tx_lpi_enabled = true;
edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT;
- return 0;
+release:
+ hw->phy.ops.release(hw);
+ if (ret_val)
+ ret_val = -ENODATA;
+
+ pm_runtime_put_sync(netdev->dev.parent);
+
+ return ret_val;
}
static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
@@ -2169,12 +2226,16 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled;
+ pm_runtime_get_sync(netdev->dev.parent);
+
/* reset the link */
if (netif_running(netdev))
e1000e_reinit_locked(adapter);
else
e1000e_reset(adapter);
+ pm_runtime_put_sync(netdev->dev.parent);
+
return 0;
}
@@ -2212,19 +2273,7 @@ static int e1000e_get_ts_info(struct net_device *netdev,
return 0;
}
-static int e1000e_ethtool_begin(struct net_device *netdev)
-{
- return pm_runtime_get_sync(netdev->dev.parent);
-}
-
-static void e1000e_ethtool_complete(struct net_device *netdev)
-{
- pm_runtime_put_sync(netdev->dev.parent);
-}
-
static const struct ethtool_ops e1000_ethtool_ops = {
- .begin = e1000e_ethtool_begin,
- .complete = e1000e_ethtool_complete,
.get_settings = e1000_get_settings,
.set_settings = e1000_set_settings,
.get_drvinfo = e1000_get_drvinfo,
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index a6f903a9b77..b7f38435d1f 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -90,6 +90,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B
#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A
#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559
+#define E1000_DEV_ID_PCH_I218_LM2 0x15A0
+#define E1000_DEV_ID_PCH_I218_V2 0x15A1
+#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */
+#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */
#define E1000_REVISION_4 4
@@ -227,6 +231,10 @@ union e1000_rx_desc_extended {
};
#define MAX_PS_BUFFERS 4
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1)
+
/* Receive Descriptor - Packet Split */
union e1000_rx_desc_packet_split {
struct {
@@ -251,7 +259,8 @@ union e1000_rx_desc_packet_split {
} middle;
struct {
__le16 header_status;
- __le16 length[3]; /* length of buffers 1-3 */
+ /* length of buffers 1-3 */
+ __le16 length[PS_PAGE_BUFFERS];
} upper;
__le64 reserved;
} wb; /* writeback */
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 9dde390f7e7..af08188d7e6 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -185,6 +185,7 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
u32 phy_id = 0;
s32 ret_val;
u16 retry_count;
+ u32 mac_reg = 0;
for (retry_count = 0; retry_count < 2; retry_count++) {
ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg);
@@ -203,11 +204,11 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
if (hw->phy.id) {
if (hw->phy.id == phy_id)
- return true;
+ goto out;
} else if (phy_id) {
hw->phy.id = phy_id;
hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK);
- return true;
+ goto out;
}
/* In case the PHY needs to be in mdio slow mode,
@@ -219,7 +220,22 @@ static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw)
ret_val = e1000e_get_phy_id(hw);
hw->phy.ops.acquire(hw);
- return !ret_val;
+ if (ret_val)
+ return false;
+out:
+ if (hw->mac.type == e1000_pch_lpt) {
+ /* Unforce SMBus mode in PHY */
+ e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
+ phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
+ e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
+
+ /* Unforce SMBus mode in MAC */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+ }
+
+ return true;
}
/**
@@ -233,7 +249,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
{
u32 mac_reg, fwsm = er32(FWSM);
s32 ret_val;
- u16 phy_reg;
/* Gate automatic PHY configuration by hardware on managed and
* non-managed 82579 and newer adapters.
@@ -262,22 +277,16 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_reg);
+ /* Wait 50 milliseconds for MAC to finish any retries
+ * that it might be trying to perform from previous
+ * attempts to acknowledge any phy read requests.
+ */
+ msleep(50);
+
/* fall-through */
case e1000_pch2lan:
- if (e1000_phy_is_accessible_pchlan(hw)) {
- if (hw->mac.type == e1000_pch_lpt) {
- /* Unforce SMBus mode in PHY */
- e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg);
- phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS;
- e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg);
-
- /* Unforce SMBus mode in MAC */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
+ if (e1000_phy_is_accessible_pchlan(hw))
break;
- }
/* fall-through */
case e1000_pchlan:
@@ -287,6 +296,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
if (hw->phy.ops.check_reset_block(hw)) {
e_dbg("Required LANPHYPC toggle blocked by ME\n");
+ ret_val = -E1000_ERR_PHY;
break;
}
@@ -298,15 +308,6 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC;
ew32(FEXTNVM3, mac_reg);
- if (hw->mac.type == e1000_pch_lpt) {
- /* Toggling LANPHYPC brings the PHY out of SMBus mode
- * So ensure that the MAC is also out of SMBus mode
- */
- mac_reg = er32(CTRL_EXT);
- mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
- ew32(CTRL_EXT, mac_reg);
- }
-
/* Toggle LANPHYPC Value bit */
mac_reg = er32(CTRL);
mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE;
@@ -325,6 +326,21 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
usleep_range(5000, 10000);
} while (!(er32(CTRL_EXT) &
E1000_CTRL_EXT_LPCD) && count--);
+ usleep_range(30000, 60000);
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ /* Toggling LANPHYPC brings the PHY out of SMBus mode
+ * so ensure that the MAC is also out of SMBus mode
+ */
+ mac_reg = er32(CTRL_EXT);
+ mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS;
+ ew32(CTRL_EXT, mac_reg);
+
+ if (e1000_phy_is_accessible_pchlan(hw))
+ break;
+
+ ret_val = -E1000_ERR_PHY;
}
break;
default:
@@ -332,13 +348,14 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
}
hw->phy.ops.release(hw);
-
- /* Reset the PHY before any access to it. Doing so, ensures
- * that the PHY is in a known good state before we read/write
- * PHY registers. The generic reset is sufficient here,
- * because we haven't determined the PHY type yet.
- */
- ret_val = e1000e_phy_hw_reset_generic(hw);
+ if (!ret_val) {
+ /* Reset the PHY before any access to it. Doing so, ensures
+ * that the PHY is in a known good state before we read/write
+ * PHY registers. The generic reset is sufficient here,
+ * because we haven't determined the PHY type yet.
+ */
+ ret_val = e1000e_phy_hw_reset_generic(hw);
+ }
out:
/* Ungate automatic PHY configuration on non-managed 82579 */
@@ -793,29 +810,31 @@ release:
* When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications
* preventing further DMA write requests. Workaround the issue by disabling
* the de-assertion of the clock request when in 1Gpbs mode.
+ * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link
+ * speeds in order to avoid Tx hangs.
**/
static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
{
u32 fextnvm6 = er32(FEXTNVM6);
+ u32 status = er32(STATUS);
s32 ret_val = 0;
+ u16 reg;
- if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) {
- u16 kmrn_reg;
-
+ if (link && (status & E1000_STATUS_SPEED_1000)) {
ret_val = hw->phy.ops.acquire(hw);
if (ret_val)
return ret_val;
ret_val =
e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG,
- &kmrn_reg);
+ &reg);
if (ret_val)
goto release;
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg &
+ reg &
~E1000_KMRNCTRLSTA_K1_ENABLE);
if (ret_val)
goto release;
@@ -827,12 +846,45 @@ static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link)
ret_val =
e1000e_write_kmrn_reg_locked(hw,
E1000_KMRNCTRLSTA_K1_CONFIG,
- kmrn_reg);
+ reg);
release:
hw->phy.ops.release(hw);
} else {
/* clear FEXTNVM6 bit 8 on link down or 10/100 */
- ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
+ fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK;
+
+ if (!link || ((status & E1000_STATUS_SPEED_100) &&
+ (status & E1000_STATUS_FD)))
+ goto update_fextnvm6;
+
+ ret_val = e1e_rphy(hw, I217_INBAND_CTRL, &reg);
+ if (ret_val)
+ return ret_val;
+
+ /* Clear link status transmit timeout */
+ reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK;
+
+ if (status & E1000_STATUS_SPEED_100) {
+ /* Set inband Tx timeout to 5x10us for 100Half */
+ reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Do not extend the K1 entry latency for 100Half */
+ fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ } else {
+ /* Set inband Tx timeout to 50x10us for 10Full/Half */
+ reg |= 50 <<
+ I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT;
+
+ /* Extend the K1 entry latency for 10 Mbps */
+ fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION;
+ }
+
+ ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg);
+ if (ret_val)
+ return ret_val;
+
+update_fextnvm6:
+ ew32(FEXTNVM6, fextnvm6);
}
return ret_val;
@@ -993,7 +1045,9 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
/* Work-around I218 hang issue */
if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) ||
+ (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) {
ret_val = e1000_k1_workaround_lpt_lp(hw, link);
if (ret_val)
return ret_val;
@@ -4168,7 +4222,9 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
u16 phy_reg, device_id = hw->adapter->pdev->device;
if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) ||
- (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) {
+ (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) ||
+ (device_id == E1000_DEV_ID_PCH_I218_LM3) ||
+ (device_id == E1000_DEV_ID_PCH_I218_V3)) {
u32 fextnvm6 = er32(FEXTNVM6);
ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 80034a2b297..59865695b28 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -93,6 +93,7 @@
#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3
#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100
+#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
@@ -197,6 +198,11 @@
#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */
+/* Inband Control */
+#define I217_INBAND_CTRL PHY_REG(770, 18)
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00
+#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8
+
/* PHY Low Power Idle Control */
#define I82579_LPI_CTRL PHY_REG(772, 20)
#define I82579_LPI_CTRL_100_ENABLE 0x2000
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 77f81cbb601..e87e9b01f40 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -64,8 +64,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
-
static const struct e1000_info *e1000_info_tbl[] = {
[board_82571] = &e1000_82571_info,
[board_82572] = &e1000_82572_info,
@@ -2979,17 +2977,10 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
u32 pages = 0;
/* Workaround Si errata on PCHx - configure jumbo frame flow */
- if (hw->mac.type >= e1000_pch2lan) {
- s32 ret_val;
-
- if (adapter->netdev->mtu > ETH_DATA_LEN)
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
- else
- ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
-
- if (ret_val)
- e_dbg("failed to enable jumbo frame workaround mode\n");
- }
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, true))
+ e_dbg("failed to enable jumbo frame workaround mode\n");
/* Program MC offset vector base */
rctl = er32(RCTL);
@@ -3826,6 +3817,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
break;
}
+ pba = 14;
+ ew32(PBA, pba);
fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
break;
@@ -4034,6 +4027,12 @@ void e1000e_down(struct e1000_adapter *adapter)
adapter->link_speed = 0;
adapter->link_duplex = 0;
+ /* Disable Si errata workaround on PCHx for jumbo frame flow */
+ if ((hw->mac.type >= e1000_pch2lan) &&
+ (adapter->netdev->mtu > ETH_DATA_LEN) &&
+ e1000_lv_jumbo_workaround_ich8lan(hw, false))
+ e_dbg("failed to disable jumbo frame workaround mode\n");
+
if (!pci_channel_offline(adapter->pdev))
e1000e_reset(adapter);
@@ -4683,11 +4682,11 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct e1000_phy_regs *phy = &adapter->phy_regs;
- if ((er32(STATUS) & E1000_STATUS_LU) &&
+ if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
+ (er32(STATUS) & E1000_STATUS_LU) &&
(adapter->hw.phy.media_type == e1000_media_type_copper)) {
int ret_val;
- pm_runtime_get_sync(&adapter->pdev->dev);
ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
@@ -4698,7 +4697,6 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
if (ret_val)
e_warn("Error reading PHY register\n");
- pm_runtime_put_sync(&adapter->pdev->dev);
} else {
/* Do not read PHY registers if link is not up
* Set values to typical power-on defaults
@@ -5995,15 +5993,24 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
*/
e1000e_release_hw_control(adapter);
+ pci_clear_master(pdev);
+
/* The pci-e switch on some quad port adapters will report a
* correctable error when the MAC transitions from D0 to D3. To
* prevent this we need to mask off the correctable errors on the
* downstream port of the pci-e switch.
+ *
+ * We don't have the associated upstream bridge while assigning
+ * the PCI device into guest. For example, the KVM on power is
+ * one of the cases.
*/
if (adapter->flags & FLAG_IS_QUAD_PORT) {
struct pci_dev *us_dev = pdev->bus->self;
u16 devctl;
+ if (!us_dev)
+ return 0;
+
pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
(devctl & ~PCI_EXP_DEVCTL_CERE));
@@ -6017,38 +6024,73 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
return 0;
}
-#ifdef CONFIG_PCIEASPM
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+/**
+ * e1000e_disable_aspm - Disable ASPM states
+ * @pdev: pointer to PCI device struct
+ * @state: bit-mask of ASPM states to disable
+ *
+ * Some devices *must* have certain ASPM states disabled per hardware errata.
+ **/
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
{
+ struct pci_dev *parent = pdev->bus->self;
+ u16 aspm_dis_mask = 0;
+ u16 pdev_aspmc, parent_aspmc;
+
+ switch (state) {
+ case PCIE_LINK_STATE_L0S:
+ case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
+ /* fall-through - can't have L1 without L0s */
+ case PCIE_LINK_STATE_L1:
+ aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
+ break;
+ default:
+ return;
+ }
+
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (parent) {
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
+ &parent_aspmc);
+ parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+ }
+
+ /* Nothing to do if the ASPM states to be disabled already are */
+ if (!(pdev_aspmc & aspm_dis_mask) &&
+ (!parent || !(parent_aspmc & aspm_dis_mask)))
+ return;
+
+ dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
+ "L0s" : "",
+ (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
+ "L1" : "");
+
+#ifdef CONFIG_PCIEASPM
pci_disable_link_state_locked(pdev, state);
-}
-#else
-static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- u16 aspm_ctl = 0;
- if (state & PCIE_LINK_STATE_L0S)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S;
- if (state & PCIE_LINK_STATE_L1)
- aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1;
+ /* Double-check ASPM control. If not disabled by the above, the
+ * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
+ * not enabled); override by writing PCI config space directly.
+ */
+ pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
+ pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
+
+ if (!(aspm_dis_mask & pdev_aspmc))
+ return;
+#endif
/* Both device and parent should have the same ASPM setting.
* Disable ASPM in downstream component first and then upstream.
*/
- pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl);
+ pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
- if (pdev->bus->self)
- pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL,
- aspm_ctl);
-}
-#endif
-static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
-{
- dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
- (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
- (state & PCIE_LINK_STATE_L1) ? "L1" : "");
-
- __e1000e_disable_aspm(pdev, state);
+ if (parent)
+ pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
+ aspm_dis_mask);
}
#ifdef CONFIG_PM
@@ -6723,10 +6765,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
- /* ring size defaults */
- adapter->rx_ring->count = E1000_DEFAULT_RXD;
- adapter->tx_ring->count = E1000_DEFAULT_TXD;
-
/* Initial Wake on LAN setting - If APM wake is enabled in
* the EEPROM, enable the ACPI Magic Packet filter
*/
@@ -6976,6 +7014,10 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index f21a91a299a..79b58353d84 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -176,7 +176,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
/* Verify phy id and set remaining function pointers */
switch (phy->id) {
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
case M88E1111_I_PHY_ID:
@@ -238,6 +238,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
+
/* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
@@ -250,86 +251,52 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
size = 15;
nvm->word_size = 1 << size;
- if (hw->mac.type < e1000_i210) {
- nvm->opcode_bits = 8;
- nvm->delay_usec = 1;
-
- switch (nvm->override) {
- case e1000_nvm_override_spi_large:
- nvm->page_size = 32;
- nvm->address_bits = 16;
- break;
- case e1000_nvm_override_spi_small:
- nvm->page_size = 8;
- nvm->address_bits = 8;
- break;
- default:
- nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
- nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
- 16 : 8;
- break;
- }
- if (nvm->word_size == (1 << 15))
- nvm->page_size = 128;
+ nvm->opcode_bits = 8;
+ nvm->delay_usec = 1;
- nvm->type = e1000_nvm_eeprom_spi;
- } else {
- nvm->type = e1000_nvm_flash_hw;
+ switch (nvm->override) {
+ case e1000_nvm_override_spi_large:
+ nvm->page_size = 32;
+ nvm->address_bits = 16;
+ break;
+ case e1000_nvm_override_spi_small:
+ nvm->page_size = 8;
+ nvm->address_bits = 8;
+ break;
+ default:
+ nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+ nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+ 16 : 8;
+ break;
}
+ if (nvm->word_size == (1 << 15))
+ nvm->page_size = 128;
+
+ nvm->type = e1000_nvm_eeprom_spi;
/* NVM Function Pointers */
+ nvm->ops.acquire = igb_acquire_nvm_82575;
+ nvm->ops.release = igb_release_nvm_82575;
+ nvm->ops.write = igb_write_nvm_spi;
+ nvm->ops.validate = igb_validate_nvm_checksum;
+ nvm->ops.update = igb_update_nvm_checksum;
+ if (nvm->word_size < (1 << 15))
+ nvm->ops.read = igb_read_nvm_eerd;
+ else
+ nvm->ops.read = igb_read_nvm_spi;
+
+ /* override generic family function pointers for specific descendants */
switch (hw->mac.type) {
case e1000_82580:
nvm->ops.validate = igb_validate_nvm_checksum_82580;
nvm->ops.update = igb_update_nvm_checksum_82580;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
case e1000_i354:
case e1000_i350:
nvm->ops.validate = igb_validate_nvm_checksum_i350;
nvm->ops.update = igb_update_nvm_checksum_i350;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
- break;
- case e1000_i210:
- nvm->ops.validate = igb_validate_nvm_checksum_i210;
- nvm->ops.update = igb_update_nvm_checksum_i210;
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_srrd_i210;
- nvm->ops.write = igb_write_nvm_srwr_i210;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- break;
- case e1000_i211:
- nvm->ops.acquire = igb_acquire_nvm_i210;
- nvm->ops.release = igb_release_nvm_i210;
- nvm->ops.read = igb_read_nvm_i211;
- nvm->ops.valid_led_default = igb_valid_led_default_i210;
- nvm->ops.validate = NULL;
- nvm->ops.update = NULL;
- nvm->ops.write = NULL;
break;
default:
- nvm->ops.validate = igb_validate_nvm_checksum;
- nvm->ops.update = igb_update_nvm_checksum;
- nvm->ops.acquire = igb_acquire_nvm_82575;
- nvm->ops.release = igb_release_nvm_82575;
- if (nvm->word_size < (1 << 15))
- nvm->ops.read = igb_read_nvm_eerd;
- else
- nvm->ops.read = igb_read_nvm_spi;
- nvm->ops.write = igb_write_nvm_spi;
break;
}
@@ -516,6 +483,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
case E1000_DEV_ID_I210_FIBER:
case E1000_DEV_ID_I210_SERDES:
case E1000_DEV_ID_I210_SGMII:
+ case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+ case E1000_DEV_ID_I210_SERDES_FLASHLESS:
mac->type = e1000_i210;
break;
case E1000_DEV_ID_I211_COPPER:
@@ -601,6 +570,15 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
/* NVM initialization */
ret_val = igb_init_nvm_params_82575(hw);
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ ret_val = igb_init_nvm_params_i210(hw);
+ break;
+ default:
+ break;
+ }
+
if (ret_val)
goto out;
@@ -1163,6 +1141,31 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
}
/**
+ * igb_get_link_up_info_82575 - Get link speed/duplex info
+ * @hw: pointer to the HW structure
+ * @speed: stores the current speed
+ * @duplex: stores the current duplex
+ *
+ * This is a wrapper function, if using the serial gigabit media independent
+ * interface, use PCS to retrieve the link speed and duplex information.
+ * Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+ u16 *duplex)
+{
+ s32 ret_val;
+
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
+ duplex);
+ else
+ ret_val = igb_get_speed_and_duplex_copper(hw, speed,
+ duplex);
+
+ return ret_val;
+}
+
+/**
* igb_check_for_link_82575 - Check for link
* @hw: pointer to the HW structure
*
@@ -1239,7 +1242,7 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
u16 *duplex)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 pcs;
+ u32 pcs, status;
/* Set up defaults for the return values of this function */
mac->serdes_has_link = false;
@@ -1260,20 +1263,31 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
mac->serdes_has_link = true;
/* Detect and store PCS speed */
- if (pcs & E1000_PCS_LSTS_SPEED_1000) {
+ if (pcs & E1000_PCS_LSTS_SPEED_1000)
*speed = SPEED_1000;
- } else if (pcs & E1000_PCS_LSTS_SPEED_100) {
+ else if (pcs & E1000_PCS_LSTS_SPEED_100)
*speed = SPEED_100;
- } else {
+ else
*speed = SPEED_10;
- }
/* Detect and store PCS duplex */
- if (pcs & E1000_PCS_LSTS_DUPLEX_FULL) {
+ if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
*duplex = FULL_DUPLEX;
- } else {
+ else
*duplex = HALF_DUPLEX;
+
+ /* Check if it is an I354 2.5Gb backplane connection. */
+ if (mac->type == e1000_i354) {
+ status = rd32(E1000_STATUS);
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ *speed = SPEED_2500;
+ *duplex = FULL_DUPLEX;
+ hw_dbg("2500 Mbs, ");
+ hw_dbg("Full Duplex\n");
+ }
}
+
}
return 0;
@@ -1320,7 +1334,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
**/
static s32 igb_reset_hw_82575(struct e1000_hw *hw)
{
- u32 ctrl, icr;
+ u32 ctrl;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -1365,7 +1379,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
/* Install any alternate MAC address into RAR0 */
ret_val = igb_check_alt_mac_addr(hw);
@@ -1443,11 +1457,18 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
wr32(E1000_CTRL, ctrl);
- /* Clear Go Link Disconnect bit */
- if (hw->mac.type >= e1000_82580) {
+ /* Clear Go Link Disconnect bit on supported devices */
+ switch (hw->mac.type) {
+ case e1000_82580:
+ case e1000_i350:
+ case e1000_i210:
+ case e1000_i211:
phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
phpm_reg &= ~E1000_82580_PM_GO_LINKD;
wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+ break;
+ default:
+ break;
}
ret_val = igb_setup_serdes_link_82575(hw);
@@ -1470,7 +1491,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
switch (hw->phy.id) {
case I347AT4_E_PHY_ID:
case M88E1112_E_PHY_ID:
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I210_I_PHY_ID:
ret_val = igb_copper_link_setup_m88_gen2(hw);
break;
@@ -2103,10 +2124,9 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
s32 ret_val = 0;
/* BH SW mailbox bit in SW_FW_SYNC */
u16 swmbsw_mask = E1000_SW_SYNCH_MB;
- u32 ctrl, icr;
+ u32 ctrl;
bool global_device_reset = hw->dev_spec._82575.global_device_reset;
-
hw->dev_spec._82575.global_device_reset = false;
/* due to hw errata, global device reset doesn't always
@@ -2165,7 +2185,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Clear any pending interrupt events. */
wr32(E1000_IMC, 0xffffffff);
- icr = rd32(E1000_ICR);
+ rd32(E1000_ICR);
ret_val = igb_reset_mdicnfg_82580(hw);
if (ret_val)
@@ -2500,28 +2520,28 @@ s32 igb_set_eee_i354(struct e1000_hw *hw)
u16 phy_data;
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
if (!hw->dev_spec._82575.eee_disable) {
/* Switch to PHY page 18. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 18);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
if (ret_val)
goto out;
- ret_val = phy->ops.read_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
&phy_data);
if (ret_val)
goto out;
- phy_data |= E1000_M88E1545_EEE_CTRL_1_MS;
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_EEE_CTRL_1,
+ phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
phy_data);
if (ret_val)
goto out;
/* Return the PHY to page 0. */
- ret_val = phy->ops.write_reg(hw, E1000_M88E1545_PAGE_ADDR, 0);
+ ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
if (ret_val)
goto out;
@@ -2572,7 +2592,7 @@ s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
/* Check if EEE is supported on this device. */
if ((hw->phy.media_type != e1000_media_type_copper) ||
- (phy->id != M88E1545_E_PHY_ID))
+ (phy->id != M88E1543_E_PHY_ID))
goto out;
ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
@@ -2728,7 +2748,7 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
.check_for_link = igb_check_for_link_82575,
.rar_set = igb_rar_set,
.read_mac_addr = igb_read_mac_addr_82575,
- .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
+ .get_speed_and_duplex = igb_get_link_up_info_82575,
#ifdef CONFIG_IGB_HWMON
.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index aa201abb8ad..978eca31ced 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -620,6 +620,7 @@
#define E1000_EECD_SIZE_EX_SHIFT 11
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210 0x00080000 /* FLASH detected */
#define E1000_FLUDONE_ATTEMPTS 20000
#define E1000_EERD_EEWR_MAX_COUNT 512 /* buffered EEPROM words rw */
#define E1000_I210_FIFO_SEL_RX 0x00
@@ -627,6 +628,11 @@
#define E1000_I210_FIFO_SEL_TX_LEGACY E1000_I210_FIFO_SEL_TX_QAV(0)
#define E1000_I210_FIFO_SEL_BMC2OS_TX 0x06
#define E1000_I210_FIFO_SEL_BMC2OS_RX 0x01
+#define E1000_I210_FLASH_SECTOR_SIZE 0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK 0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET 328
#define E1000_EECD_FLUPD_I210 0x00800000 /* Update FLASH */
#define E1000_EECD_FLUDONE_I210 0x04000000 /* Update FLASH done*/
#define E1000_FLUDONE_ATTEMPTS 20000
@@ -665,20 +671,26 @@
#define NVM_INIT_CTRL_4 0x0013
#define NVM_LED_1_CFG 0x001C
#define NVM_LED_0_2_CFG 0x001F
-
-/* NVM version defines */
#define NVM_ETRACK_WORD 0x0042
+#define NVM_ETRACK_HIWORD 0x0043
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
-#define NVM_MAJOR_MASK 0xF000
-#define NVM_MINOR_MASK 0x0FF0
-#define NVM_BUILD_MASK 0x000F
-#define NVM_COMB_VER_MASK 0x00FF
-#define NVM_MAJOR_SHIFT 12
-#define NVM_MINOR_SHIFT 4
-#define NVM_COMB_VER_SHFT 8
-#define NVM_VER_INVALID 0xFFFF
-#define NVM_ETRACK_SHIFT 16
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK 0xF000
+#define NVM_MINOR_MASK 0x0FF0
+#define NVM_IMAGE_ID_MASK 0x000F
+#define NVM_COMB_VER_MASK 0x00FF
+#define NVM_MAJOR_SHIFT 12
+#define NVM_MINOR_SHIFT 4
+#define NVM_COMB_VER_SHFT 8
+#define NVM_VER_INVALID 0xFFFF
+#define NVM_ETRACK_SHIFT 16
+#define NVM_ETRACK_VALID 0x8000
+#define NVM_NEW_DEC_MASK 0x0F00
+#define NVM_HEX_CONV 16
+#define NVM_HEX_TENS 10
+
#define NVM_ETS_CFG 0x003E
#define NVM_ETS_LTHRES_DELTA_MASK 0x07C0
#define NVM_ETS_LTHRES_DELTA_SHIFT 6
@@ -775,7 +787,7 @@
#define I350_I_PHY_ID 0x015403B0
#define M88_VENDOR 0x0141
#define I210_I_PHY_ID 0x01410C00
-#define M88E1545_E_PHY_ID 0x01410EA0
+#define M88E1543_E_PHY_ID 0x01410EA0
/* M88E1000 Specific Registers */
#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */
@@ -897,9 +909,9 @@
#define E1000_EEE_LP_ADV_DEV_I210 7 /* EEE LP Adv Device */
#define E1000_EEE_LP_ADV_ADDR_I210 61 /* EEE LP Adv Register */
#define E1000_MMDAC_FUNC_DATA 0x4000 /* Data, no post increment */
-#define E1000_M88E1545_PAGE_ADDR 0x16 /* Page Offset Register */
-#define E1000_M88E1545_EEE_CTRL_1 0x0
-#define E1000_M88E1545_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
+#define E1000_M88E1543_PAGE_ADDR 0x16 /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1 0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS 0x0001 /* EEE Master/Slave */
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 94d7866b9c2..37a9c06a6c6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -67,6 +67,8 @@ struct e1000_hw;
#define E1000_DEV_ID_I210_FIBER 0x1536
#define E1000_DEV_ID_I210_SERDES 0x1537
#define E1000_DEV_ID_I210_SGMII 0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS 0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS 0x157C
#define E1000_DEV_ID_I211_COPPER 0x1539
#define E1000_DEV_ID_I354_BACKPLANE_1GBPS 0x1F40
#define E1000_DEV_ID_I354_SGMII 0x1F41
@@ -110,6 +112,7 @@ enum e1000_nvm_type {
e1000_nvm_none,
e1000_nvm_eeprom_spi,
e1000_nvm_flash_hw,
+ e1000_nvm_invm,
e1000_nvm_flash_sw
};
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index ddb3cf51b9b..0c0393316a3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -335,57 +335,101 @@ s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_nvm_i211 - Read NVM wrapper function for I211
+ * igb_read_invm_word_i210 - Reads OTP
+ * @hw: pointer to the HW structure
+ * @address: the word address (aka eeprom offset) to read
+ * @data: pointer to the data read
+ *
+ * Reads 16-bit words from the OTP. Return error when the word is not
+ * stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+ s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ u32 invm_dword;
+ u16 i;
+ u8 record_type, word_address;
+
+ for (i = 0; i < E1000_INVM_SIZE; i++) {
+ invm_dword = rd32(E1000_INVM_DATA_REG(i));
+ /* Get record type */
+ record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+ if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+ break;
+ if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+ i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+ i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+ if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+ word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+ if (word_address == address) {
+ *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+ hw_dbg("Read INVM Word 0x%02x = %x",
+ address, *data);
+ status = E1000_SUCCESS;
+ break;
+ }
+ }
+ }
+ if (status != E1000_SUCCESS)
+ hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+ return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
* @hw: pointer to the HW structure
* @words: number of words to read
* @data: pointer to the data read
*
* Wrapper function to return data formerly found in the NVM.
**/
-s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+ u16 words __always_unused, u16 *data)
{
s32 ret_val = E1000_SUCCESS;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
case NVM_MAC_ADDR:
- ret_val = igb_read_invm_i211(hw, offset, &data[0]);
- ret_val |= igb_read_invm_i211(hw, offset+1, &data[1]);
- ret_val |= igb_read_invm_i211(hw, offset+2, &data[2]);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+ &data[1]);
+ ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+ &data[2]);
if (ret_val != E1000_SUCCESS)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_INIT_CTRL_4:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_1_CFG:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_LED_0_2_CFG:
- igb_read_invm_i211(hw, offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
ret_val = E1000_SUCCESS;
}
break;
case NVM_ID_LED_SETTINGS:
- ret_val = igb_read_invm_i211(hw, (u8)offset, data);
+ ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
if (ret_val != E1000_SUCCESS) {
*data = ID_LED_RESERVED_FFFF;
ret_val = E1000_SUCCESS;
@@ -411,48 +455,6 @@ s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
}
/**
- * igb_read_invm_i211 - Reads OTP
- * @hw: pointer to the HW structure
- * @address: the word address (aka eeprom offset) to read
- * @data: pointer to the data read
- *
- * Reads 16-bit words from the OTP. Return error when the word is not
- * stored in OTP.
- **/
-s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data)
-{
- s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
- u32 invm_dword;
- u16 i;
- u8 record_type, word_address;
-
- for (i = 0; i < E1000_INVM_SIZE; i++) {
- invm_dword = rd32(E1000_INVM_DATA_REG(i));
- /* Get record type */
- record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
- if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
- break;
- if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
- i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
- i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
- if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
- word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
- if (word_address == (u8)address) {
- *data = INVM_DWORD_TO_WORD_DATA(invm_dword);
- hw_dbg("Read INVM Word 0x%02x = %x",
- address, *data);
- status = E1000_SUCCESS;
- break;
- }
- }
- }
- if (status != E1000_SUCCESS)
- hw_dbg("Requested word 0x%02x not found in OTP\n", address);
- return status;
-}
-
-/**
* igb_read_invm_version - Reads iNVM version and image type
* @hw: pointer to the HW structure
* @invm_ver: version structure for the version read
@@ -661,6 +663,23 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
}
/**
+ * igb_get_flash_presence_i210 - Check if flash device is detected.
+ * @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+ u32 eec = 0;
+ bool ret_val = false;
+
+ eec = rd32(E1000_EECD);
+ if (eec & E1000_EECD_FLASH_DETECTED_I210)
+ ret_val = true;
+
+ return ret_val;
+}
+
+/**
* igb_update_flash_i210 - Commit EEPROM to the flash
* @hw: pointer to the HW structure
*
@@ -786,3 +805,33 @@ s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
{
return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
}
+
+/**
+ * igb_init_nvm_params_i210 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+ struct e1000_nvm_info *nvm = &hw->nvm;
+
+ nvm->ops.acquire = igb_acquire_nvm_i210;
+ nvm->ops.release = igb_release_nvm_i210;
+ nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+ /* NVM Function Pointers */
+ if (igb_get_flash_presence_i210(hw)) {
+ hw->nvm.type = e1000_nvm_flash_hw;
+ nvm->ops.read = igb_read_nvm_srrd_i210;
+ nvm->ops.write = igb_write_nvm_srwr_i210;
+ nvm->ops.validate = igb_validate_nvm_checksum_i210;
+ nvm->ops.update = igb_update_nvm_checksum_i210;
+ } else {
+ hw->nvm.type = e1000_nvm_invm;
+ nvm->ops.read = igb_read_invm_i210;
+ nvm->ops.write = NULL;
+ nvm->ops.validate = NULL;
+ nvm->ops.update = NULL;
+ }
+ return ret_val;
+}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 5caa332e755..dde3c4b7ea9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -35,20 +35,19 @@ extern s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
extern s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset,
u16 words, u16 *data);
-extern s32 igb_read_invm_i211(struct e1000_hw *hw, u16 address, u16 *data);
extern s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
extern s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
extern void igb_release_nvm_i210(struct e1000_hw *hw);
extern s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
-extern s32 igb_read_nvm_i211(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
extern s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
extern s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 *data);
extern s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr,
u16 data);
+extern s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+extern bool igb_get_flash_presence_i210(struct e1000_hw *hw);
#define E1000_STM_OPCODE 0xDB00
#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index bab556a47fc..f0dfd41dd4b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1171,17 +1171,6 @@ s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
hw_dbg("Half Duplex\n");
}
- /* Check if it is an I354 2.5Gb backplane connection. */
- if (hw->mac.type == e1000_i354) {
- if ((status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER)) {
- *speed = SPEED_2500;
- *duplex = FULL_DUPLEX;
- hw_dbg("2500 Mbs, ");
- hw_dbg("Full Duplex\n");
- }
- }
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 7f9cd7cbd35..a7db7f3db91 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -709,11 +709,16 @@ out:
**/
void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
{
- u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset;
- u16 fw_version;
+ u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+ u8 q, hval, rem, result;
+ u16 comb_verh, comb_verl, comb_offset;
memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+ /* basic eeprom version numbers and bits used vary by part and by tool
+ * used to create the nvm images. Check which data format we have.
+ */
+ hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
switch (hw->mac.type) {
case e1000_i211:
igb_read_invm_version(hw, fw_vers);
@@ -721,30 +726,30 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
case e1000_82575:
case e1000_82576:
case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- case e1000_i210:
+ /* Use this format, unless EETRACK ID exists,
+ * then use alternate format
+ */
+ if ((etrack_test & NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+ fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+ goto etrack_id;
+ }
break;
- default:
- return;
- }
- /* basic eeprom version numbers */
- hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
- fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK) >> NVM_MAJOR_SHIFT;
- fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK);
-
- /* etrack id */
- hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
- hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
- fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT) | eeprom_verl;
-
- switch (hw->mac.type) {
case e1000_i210:
- case e1000_i354:
+ if (!(igb_get_flash_presence_i210(hw))) {
+ igb_read_invm_version(hw, fw_vers);
+ return;
+ }
+ /* fall through */
case e1000_i350:
/* find combo image version */
hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
- if ((comb_offset != 0x0) && (comb_offset != NVM_VER_INVALID)) {
+ if ((comb_offset != 0x0) &&
+ (comb_offset != NVM_VER_INVALID)) {
hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+ 1), 1, &comb_verh);
@@ -760,15 +765,42 @@ void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
fw_vers->or_major =
comb_verl >> NVM_COMB_VER_SHFT;
fw_vers->or_build =
- ((comb_verl << NVM_COMB_VER_SHFT)
- | (comb_verh >> NVM_COMB_VER_SHFT));
+ (comb_verl << NVM_COMB_VER_SHFT)
+ | (comb_verh >> NVM_COMB_VER_SHFT);
fw_vers->or_patch =
comb_verh & NVM_COMB_VER_MASK;
}
}
break;
default:
- break;
+ return;
+ }
+ hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+ fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+ >> NVM_MAJOR_SHIFT;
+
+ /* check for old style version format in newer images*/
+ if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+ eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+ } else {
+ eeprom_verl = (fw_version & NVM_MINOR_MASK)
+ >> NVM_MINOR_SHIFT;
+ }
+ /* Convert minor value to hex before assigning to output struct
+ * Val to be converted will not be higher than 99, per tool output
+ */
+ q = eeprom_verl / NVM_HEX_CONV;
+ hval = q * NVM_HEX_TENS;
+ rem = eeprom_verl % NVM_HEX_CONV;
+ result = hval + rem;
+ fw_vers->eep_minor = result;
+
+etrack_id:
+ if ((etrack_test & NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+ hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+ hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+ fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+ | eeprom_verl;
}
return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 6bfc0c43aac..433b7419cb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -44,6 +44,7 @@ struct e1000_fw_version {
u32 etrack_id;
u16 eep_major;
u16 eep_minor;
+ u16 eep_build;
u8 invm_major;
u8 invm_minor;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 60461946f98..e7266759a10 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -731,15 +731,13 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
- if (phy->reset_disable) {
- ret_val = 0;
- goto out;
- }
+ if (phy->reset_disable)
+ return 0;
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Options:
* MDI/MDI-X = 0 (default)
@@ -780,23 +778,36 @@ s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
/* Enable downshift and setting it to X6 */
+ if (phy->id == M88E1543_E_PHY_ID) {
+ phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+ ret_val =
+ phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+ if (ret_val)
+ return ret_val;
+
+ ret_val = igb_phy_sw_reset(hw);
+ if (ret_val) {
+ hw_dbg("Error committing the PHY changes\n");
+ return ret_val;
+ }
+ }
+
phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
if (ret_val)
- goto out;
+ return ret_val;
/* Commit the changes. */
ret_val = igb_phy_sw_reset(hw);
if (ret_val) {
hw_dbg("Error committing the PHY changes\n");
- goto out;
+ return ret_val;
}
-out:
- return ret_val;
+ return 0;
}
/**
@@ -1806,7 +1817,7 @@ s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
phy->cable_length = phy_data / (is_cm ? 100 : 1);
break;
- case M88E1545_E_PHY_ID:
+ case M88E1543_E_PHY_ID:
case I347AT4_E_PHY_ID:
/* Remember the original page select and set it to 7 */
ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 15ea8dc9dad..6807b098eda 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -343,6 +343,8 @@ struct hwmon_buff {
};
#endif
+#define IGB_RETA_SIZE 128
+
/* board specific private data structure */
struct igb_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
@@ -444,6 +446,10 @@ struct igb_adapter {
struct i2c_algo_bit_data i2c_algo;
struct i2c_adapter i2c_adap;
struct i2c_client *i2c_client;
+ u32 rss_indir_tbl_init;
+ u8 rss_indir_tbl[IGB_RETA_SIZE];
+
+ unsigned long link_check_timeout;
};
#define IGB_FLAG_HAS_MSI (1 << 0)
@@ -455,6 +461,7 @@ struct igb_adapter {
#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
+#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
/* DMA Coalescing defines */
#define IGB_MIN_TXPBSIZE 20408
@@ -480,6 +487,7 @@ extern int igb_up(struct igb_adapter *);
extern void igb_down(struct igb_adapter *);
extern void igb_reinit_locked(struct igb_adapter *);
extern void igb_reset(struct igb_adapter *);
+extern void igb_write_rss_indir_tbl(struct igb_adapter *);
extern int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
extern int igb_setup_tx_resources(struct igb_ring *);
extern int igb_setup_rx_resources(struct igb_ring *);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 85fe7b52f43..48cbc833b05 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -172,10 +172,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_Autoneg |
SUPPORTED_Pause);
ecmd->advertising = ADVERTISED_FIBRE;
- if (hw->mac.type == e1000_i354) {
- ecmd->supported |= SUPPORTED_2500baseX_Full;
- ecmd->advertising |= ADVERTISED_2500baseX_Full;
- }
+
if ((eth_flags->e1000_base_lx) || (eth_flags->e1000_base_sx)) {
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising |= ADVERTISED_1000baseT_Full;
@@ -209,16 +206,23 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU) {
- if ((hw->mac.type == e1000_i354) &&
- (status & E1000_STATUS_2P5_SKU) &&
- !(status & E1000_STATUS_2P5_SKU_OVER))
- ecmd->speed = SPEED_2500;
- else if (status & E1000_STATUS_SPEED_1000)
+ if (hw->mac.type == e1000_i354) {
+ if ((status & E1000_STATUS_2P5_SKU) &&
+ !(status & E1000_STATUS_2P5_SKU_OVER)) {
+ ecmd->supported = SUPPORTED_2500baseX_Full;
+ ecmd->advertising = ADVERTISED_2500baseX_Full;
+ ecmd->speed = SPEED_2500;
+ } else {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ }
+ } else if (status & E1000_STATUS_SPEED_1000) {
ecmd->speed = SPEED_1000;
- else if (status & E1000_STATUS_SPEED_100)
+ } else if (status & E1000_STATUS_SPEED_100) {
ecmd->speed = SPEED_100;
- else
+ } else {
ecmd->speed = SPEED_10;
+ }
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
ecmd->duplex = DUPLEX_FULL;
@@ -1335,12 +1339,23 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
{
+ struct e1000_hw *hw = &adapter->hw;
+
*data = 0;
- /* Validate eeprom on all parts but i211 */
- if (adapter->hw.mac.type != e1000_i211) {
+ /* Validate eeprom on all parts but flashless */
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
+ *data = 2;
+ }
+ break;
+ default:
if (adapter->hw.nvm.ops.validate(&adapter->hw) < 0)
*data = 2;
+ break;
}
return *data;
@@ -2672,7 +2687,9 @@ static int igb_set_eee(struct net_device *netdev,
igb_set_eee_i350(hw);
/* reset link */
- if (!netif_running(netdev))
+ if (netif_running(netdev))
+ igb_reinit_locked(adapter);
+ else
igb_reset(adapter);
}
@@ -2771,6 +2788,90 @@ static void igb_ethtool_complete(struct net_device *netdev)
pm_runtime_put(&adapter->pdev->dev);
}
+static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return IGB_RETA_SIZE;
+}
+
+static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ indir[i] = adapter->rss_indir_tbl[i];
+
+ return 0;
+}
+
+void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ u32 reg = E1000_RETA(0);
+ u32 shift = 0;
+ int i = 0;
+
+ switch (hw->mac.type) {
+ case e1000_82575:
+ shift = 6;
+ break;
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ shift = 3;
+ break;
+ default:
+ break;
+ }
+
+ while (i < IGB_RETA_SIZE) {
+ u32 val = 0;
+ int j;
+
+ for (j = 3; j >= 0; j--) {
+ val <<= 8;
+ val |= adapter->rss_indir_tbl[i + j];
+ }
+
+ wr32(reg, val << shift);
+ reg += 4;
+ i += 4;
+ }
+}
+
+static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ int i;
+ u32 num_queues;
+
+ num_queues = adapter->rss_queues;
+
+ switch (hw->mac.type) {
+ case e1000_82576:
+ /* 82576 supports 2 RSS queues for SR-IOV */
+ if (adapter->vfs_allocated_count)
+ num_queues = 2;
+ break;
+ default:
+ break;
+ }
+
+ /* Verify user input. */
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ if (indir[i] >= num_queues)
+ return -EINVAL;
+
+
+ for (i = 0; i < IGB_RETA_SIZE; i++)
+ adapter->rss_indir_tbl[i] = indir[i];
+
+ igb_write_rss_indir_tbl(adapter);
+
+ return 0;
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2804,6 +2905,9 @@ static const struct ethtool_ops igb_ethtool_ops = {
.set_eee = igb_set_eee,
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
+ .get_rxfh_indir_size = igb_get_rxfh_indir_size,
+ .get_rxfh_indir = igb_get_rxfh_indir,
+ .set_rxfh_indir = igb_set_rxfh_indir,
.begin = igb_ethtool_begin,
.complete = igb_ethtool_complete,
};
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 6a0c1b66ce5..8cf44f2a8cc 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -62,7 +62,7 @@
#define MAJ 5
#define MIN 0
-#define BUILD 3
+#define BUILD 5
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD) "-k"
char igb_driver_name[] = "igb";
@@ -85,6 +85,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
@@ -1013,7 +1015,7 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
- /* ixgbe_get_stats64() might access the rings on this vector,
+ /* igb_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
@@ -1669,6 +1671,8 @@ void igb_down(struct igb_adapter *adapter)
igb_irq_disable(adapter);
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
for (i = 0; i < adapter->num_q_vectors; i++) {
napi_synchronize(&(adapter->q_vector[i]->napi));
napi_disable(&(adapter->q_vector[i]->napi));
@@ -1929,12 +1933,17 @@ void igb_set_fw_version(struct igb_adapter *adapter)
igb_get_fw_version(hw, &fw);
switch (hw->mac.type) {
+ case e1000_i210:
case e1000_i211:
- snprintf(adapter->fw_version, sizeof(adapter->fw_version),
- "%2d.%2d-%d",
- fw.invm_major, fw.invm_minor, fw.invm_img_type);
- break;
-
+ if (!(igb_get_flash_presence_i210(hw))) {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%2d.%2d-%d",
+ fw.invm_major, fw.invm_minor,
+ fw.invm_img_type);
+ break;
+ }
+ /* fall through */
default:
/* if option is rom valid, display its version too */
if (fw.or_valid) {
@@ -1944,11 +1953,16 @@ void igb_set_fw_version(struct igb_adapter *adapter)
fw.eep_major, fw.eep_minor, fw.etrack_id,
fw.or_major, fw.or_build, fw.or_patch);
/* no option rom */
- } else {
+ } else if (fw.etrack_id != 0X0000) {
snprintf(adapter->fw_version,
- sizeof(adapter->fw_version),
- "%d.%d, 0x%08x",
- fw.eep_major, fw.eep_minor, fw.etrack_id);
+ sizeof(adapter->fw_version),
+ "%d.%d, 0x%08x",
+ fw.eep_major, fw.eep_minor, fw.etrack_id);
+ } else {
+ snprintf(adapter->fw_version,
+ sizeof(adapter->fw_version),
+ "%d.%d.%d",
+ fw.eep_major, fw.eep_minor, fw.eep_build);
}
break;
}
@@ -2166,15 +2180,28 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
hw->mac.ops.reset_hw(hw);
- /* make sure the NVM is good , i211 parts have special NVM that
- * doesn't contain a checksum
+ /* make sure the NVM is good , i211/i210 parts can have special NVM
+ * that doesn't contain a checksum
*/
- if (hw->mac.type != e1000_i211) {
+ switch (hw->mac.type) {
+ case e1000_i210:
+ case e1000_i211:
+ if (igb_get_flash_presence_i210(hw)) {
+ if (hw->nvm.ops.validate(hw) < 0) {
+ dev_err(&pdev->dev,
+ "The NVM Checksum Is Not Valid\n");
+ err = -EIO;
+ goto err_eeprom;
+ }
+ }
+ break;
+ default:
if (hw->nvm.ops.validate(hw) < 0) {
dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
+ break;
}
/* copy the MAC address out of the NVM */
@@ -2342,7 +2369,14 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
"Width x1" : "unknown"), netdev->dev_addr);
}
- ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+ if ((hw->mac.type >= e1000_i210 ||
+ igb_get_flash_presence_i210(hw))) {
+ ret_val = igb_read_part_string(hw, part_str,
+ E1000_PBANUM_LENGTH);
+ } else {
+ ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+ }
+
if (ret_val)
strcpy(part_str, "Unknown");
dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
@@ -2436,6 +2470,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
int err = 0;
int i;
+ if (!adapter->msix_entries) {
+ err = -EPERM;
+ goto out;
+ }
+
if (!num_vfs)
goto out;
else if (old_vfs && old_vfs == num_vfs)
@@ -3096,7 +3135,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
u32 mrqc, rxcsum;
- u32 j, num_rx_queues, shift = 0;
+ u32 j, num_rx_queues;
static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
0xA32DCB77, 0x0CF23080, 0x3BB7426A,
@@ -3109,35 +3148,21 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
num_rx_queues = adapter->rss_queues;
switch (hw->mac.type) {
- case e1000_82575:
- shift = 6;
- break;
case e1000_82576:
/* 82576 supports 2 RSS queues for SR-IOV */
- if (adapter->vfs_allocated_count) {
- shift = 3;
+ if (adapter->vfs_allocated_count)
num_rx_queues = 2;
- }
break;
default:
break;
}
- /* Populate the indirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (j = 0; j < 32; j++) {
- /* first pass generates n and n+2 */
- u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
- u32 reta = (base & 0x07800780) >> (7 - shift);
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * num_rx_queues;
- reta |= (base & 0x07800780) << (1 + shift);
-
- wr32(E1000_RETA(j), reta);
+ if (adapter->rss_indir_tbl_init != num_rx_queues) {
+ for (j = 0; j < IGB_RETA_SIZE; j++)
+ adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl_init = num_rx_queues;
}
+ igb_write_rss_indir_tbl(adapter);
/* Disable raw packet checksumming so that RSS hash is placed in
* descriptor on writeback. No need to enable TCP/UDP/IP checksum
@@ -3739,9 +3764,8 @@ static void igb_set_rx_mode(struct net_device *netdev)
rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
if (netdev->flags & IFF_PROMISC) {
- u32 mrqc = rd32(E1000_MRQC);
/* retain VLAN HW filtering if in VT mode */
- if (mrqc & E1000_MRQC_ENABLE_VMDQ)
+ if (adapter->vfs_allocated_count)
rctl |= E1000_RCTL_VFE;
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
@@ -3845,7 +3869,6 @@ bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
- s32 ret_val = 0;
/* get_link_status is set on LSC (link status) interrupt or
* rx sequence error interrupt. get_link_status will stay
@@ -3854,22 +3877,28 @@ bool igb_has_link(struct igb_adapter *adapter)
*/
switch (hw->phy.media_type) {
case e1000_media_type_copper:
- if (hw->mac.get_link_status) {
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = !hw->mac.get_link_status;
- } else {
- link_active = true;
- }
- break;
+ if (!hw->mac.get_link_status)
+ return true;
case e1000_media_type_internal_serdes:
- ret_val = hw->mac.ops.check_for_link(hw);
- link_active = hw->mac.serdes_has_link;
+ hw->mac.ops.check_for_link(hw);
+ link_active = !hw->mac.get_link_status;
break;
default:
case e1000_media_type_unknown:
break;
}
+ if (((hw->mac.type == e1000_i210) ||
+ (hw->mac.type == e1000_i211)) &&
+ (hw->phy.id == I210_I_PHY_ID)) {
+ if (!netif_carrier_ok(adapter->netdev)) {
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+ adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ }
+ }
+
return link_active;
}
@@ -3914,6 +3943,14 @@ static void igb_watchdog_task(struct work_struct *work)
int i;
link = igb_has_link(adapter);
+
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+ if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+ adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+ else
+ link = false;
+ }
+
if (link) {
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4038,9 +4075,14 @@ static void igb_watchdog_task(struct work_struct *work)
igb_ptp_rx_hang(adapter);
/* Reset the timer */
- if (!test_bit(__IGB_DOWN, &adapter->state))
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 2 * HZ));
+ if (!test_bit(__IGB_DOWN, &adapter->state)) {
+ if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + HZ));
+ else
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 2 * HZ));
+ }
}
enum latency_range {
@@ -4815,6 +4857,10 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL;
}
+ /* adjust max frame to be at least the size of a standard frame */
+ if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
+ max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
+
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
msleep(1);
@@ -4866,6 +4912,8 @@ void igb_update_stats(struct igb_adapter *adapter,
bytes = 0;
packets = 0;
+
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc = rd32(E1000_RQDPC(i));
struct igb_ring *ring = adapter->rx_ring[i];
@@ -4901,6 +4949,7 @@ void igb_update_stats(struct igb_adapter *adapter,
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
+ rcu_read_unlock();
/* read stats registers */
adapter->stats.crcerrs += rd32(E1000_CRCERRS);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 7e8c477b0ab..5a54e3dc535 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -97,14 +97,14 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
{
struct igb_adapter *igb = container_of(cc, struct igb_adapter, cc);
struct e1000_hw *hw = &igb->hw;
+ u32 lo, hi;
u64 val;
- u32 lo, hi, jk;
/* The timestamp latches on lowest register read. For the 82580
* the lowest register is SYSTIMR instead of SYSTIML. However we only
* need to provide nanosecond resolution, so we just ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
lo = rd32(E1000_SYSTIML);
hi = rd32(E1000_SYSTIMH);
@@ -118,13 +118,13 @@ static cycle_t igb_ptp_read_82580(const struct cyclecounter *cc)
static void igb_ptp_read_i210(struct igb_adapter *adapter, struct timespec *ts)
{
struct e1000_hw *hw = &adapter->hw;
- u32 sec, nsec, jk;
+ u32 sec, nsec;
/* The timestamp latches on lowest register read. For I210/I211, the
* lowest register is SYSTIMR. Since we only need to provide nanosecond
* resolution, we can ignore it.
*/
- jk = rd32(E1000_SYSTIMR);
+ rd32(E1000_SYSTIMR);
nsec = rd32(E1000_SYSTIML);
sec = rd32(E1000_SYSTIMH);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index fce3e92f9d1..9f6b236828e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -718,8 +718,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->size = txdr->count * sizeof(struct ixgb_tx_desc);
txdr->size = ALIGN(txdr->size, 4096);
- txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
- GFP_KERNEL | __GFP_ZERO);
+ txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+ GFP_KERNEL);
if (!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 7be725cdfea..0ac6b11c6e4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -54,7 +54,7 @@
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS
#endif
/* common prefix used by pr_<> macros */
@@ -366,7 +366,7 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
@@ -377,12 +377,12 @@ struct ixgbe_q_vector {
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock;
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{
}
@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON
@@ -618,9 +618,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11)
-#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 12)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
+#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11)
/* Tx fast path data */
int num_tx_queues;
@@ -754,7 +753,7 @@ enum ixgbe_state_t {
__IXGBE_DOWN,
__IXGBE_SERVICE_SCHED,
__IXGBE_IN_SFP_INIT,
- __IXGBE_READ_I2C,
+ __IXGBE_PTP_RUNNING,
};
struct ixgbe_cb {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4a5bfb6b3af..a26f3fee4f3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1018,8 +1018,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
u16 sfp_addr = 0;
u16 sfp_data = 0;
u16 sfp_stat = 0;
+ u16 gssr;
u32 i;
+ if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
+ gssr = IXGBE_GSSR_PHY1_SM;
+ else
+ gssr = IXGBE_GSSR_PHY0_SM;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ return IXGBE_ERR_SWFW_SYNC;
+
if (hw->phy.type == ixgbe_phy_nl) {
/*
* phy SDA/SCL registers are at addresses 0xC30A to
@@ -1028,17 +1037,17 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
*/
sfp_addr = (dev_addr << 8) + byte_offset;
sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK);
- hw->phy.ops.write_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
- MDIO_MMD_PMAPMD,
- sfp_addr);
+ hw->phy.ops.write_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR,
+ MDIO_MMD_PMAPMD,
+ sfp_addr);
/* Poll status */
for (i = 0; i < 100; i++) {
- hw->phy.ops.read_reg(hw,
- IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
- MDIO_MMD_PMAPMD,
- &sfp_stat);
+ hw->phy.ops.read_reg_mdi(hw,
+ IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT,
+ MDIO_MMD_PMAPMD,
+ &sfp_stat);
sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK;
if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS)
break;
@@ -1052,8 +1061,8 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
/* Read data */
- hw->phy.ops.read_reg(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
- MDIO_MMD_PMAPMD, &sfp_data);
+ hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA,
+ MDIO_MMD_PMAPMD, &sfp_data);
*eeprom_data = (u8)(sfp_data >> 8);
} else {
@@ -1061,6 +1070,7 @@ static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr,
}
out:
+ hw->mac.ops.release_swfw_sync(hw, gssr);
return status;
}
@@ -1321,11 +1331,13 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
static struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
.write_reg = &ixgbe_write_phy_reg_generic,
+ .read_reg_mdi = &ixgbe_read_phy_reg_mdi,
+ .write_reg_mdi = &ixgbe_write_phy_reg_mdi,
.setup_link = &ixgbe_setup_phy_link_generic,
.setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 0b82d38bc97..007a0083a63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -49,6 +49,7 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -58,6 +59,10 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data);
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data);
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
@@ -137,11 +142,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
goto setup_sfp_out;
}
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
while (data_value != 0xffff) {
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value);
IXGBE_WRITE_FLUSH(hw);
- hw->eeprom.ops.read(hw, ++data_offset, &data_value);
+ if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
+ goto setup_sfp_err;
}
/* Release the semaphore */
@@ -187,6 +194,17 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
setup_sfp_out:
return ret_val;
+
+setup_sfp_err:
+ /* Release the semaphore */
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ /* Delay obtaining semaphore again to allow FW access,
+ * semaphore_delay is in ms usleep_range needs us.
+ */
+ usleep_range(hw->eeprom.semaphore_delay * 1000,
+ hw->eeprom.semaphore_delay * 2000);
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
@@ -219,6 +237,25 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
struct ixgbe_mac_info *mac = &hw->mac;
struct ixgbe_phy_info *phy = &hw->phy;
s32 ret_val = 0;
+ u32 esdp;
+
+ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
+ /* Store flag indicating I2C bus access control unit. */
+ hw->phy.qsfp_shared_i2c_bus = true;
+
+ /* Initialize access to QSFP+ I2C bus */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0_DIR;
+ esdp &= ~IXGBE_ESDP_SDP1_DIR;
+ esdp &= ~IXGBE_ESDP_SDP0;
+ esdp &= ~IXGBE_ESDP_SDP0_NATIVE;
+ esdp &= ~IXGBE_ESDP_SDP1_NATIVE;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599;
+ phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599;
+ }
/* Identify the PHY or SFP module */
ret_val = phy->ops.identify(hw);
@@ -342,8 +379,13 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
if (hw->phy.multispeed_fiber) {
*speed |= IXGBE_LINK_SPEED_10GB_FULL |
- IXGBE_LINK_SPEED_1GB_FULL;
- *autoneg = true;
+ IXGBE_LINK_SPEED_1GB_FULL;
+
+ /* QSFP must not enable auto-negotiation */
+ if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp)
+ *autoneg = false;
+ else
+ *autoneg = true;
}
out:
@@ -397,6 +439,9 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_LS:
media_type = ixgbe_media_type_fiber_lco;
break;
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ media_type = ixgbe_media_type_fiber_qsfp;
+ break;
default:
media_type = ixgbe_media_type_unknown;
break;
@@ -406,6 +451,24 @@ out:
}
/**
+ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3
+ * @hw: pointer to hardware structure
+ *
+ * Disables link, should be called during D3 power down sequence.
+ *
+ */
+static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
+{
+ u32 autoc2_reg;
+
+ if (!hw->mng_fw_enabled && !hw->wol_enabled) {
+ autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+ autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2_reg);
+ }
+}
+
+/**
* ixgbe_start_mac_link_82599 - Setup MAC link settings
* @hw: pointer to hardware structure
* @autoneg_wait_to_complete: true when waiting for completion is needed
@@ -527,6 +590,75 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_set_fiber_fixed_speed - Set module link speed for fixed fiber
+ * @hw: pointer to hardware structure
+ * @speed: link speed to set
+ *
+ * We set the module speed differently for fixed fiber. For other
+ * multi-speed devices we don't have an error value so here if we
+ * detect an error we just log it and exit.
+ */
+static void ixgbe_set_fiber_fixed_speed(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed)
+{
+ s32 status;
+ u8 rs, eeprom_data;
+
+ switch (speed) {
+ case IXGBE_LINK_SPEED_10GB_FULL:
+ /* one bit mask same as setting on */
+ rs = IXGBE_SFF_SOFT_RS_SELECT_10G;
+ break;
+ case IXGBE_LINK_SPEED_1GB_FULL:
+ rs = IXGBE_SFF_SOFT_RS_SELECT_1G;
+ break;
+ default:
+ hw_dbg(hw, "Invalid fixed module speed\n");
+ return;
+ }
+
+ /* Set RS0 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS0\n");
+ goto out;
+ }
+
+ /* Set RS1 */
+ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ &eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to read Rx Rate Select RS1\n");
+ goto out;
+ }
+
+ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) & rs;
+
+ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB,
+ IXGBE_I2C_EEPROM_DEV_ADDR2,
+ eeprom_data);
+ if (status) {
+ hw_dbg(hw, "Failed to write Rx Rate Select RS1\n");
+ goto out;
+ }
+out:
+ return;
+}
+
+/**
* ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -573,9 +705,19 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber:
+ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5);
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (1G->10G) */
msleep(40);
@@ -625,10 +767,24 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
goto out;
/* Set the module link speed */
- esdp_reg &= ~IXGBE_ESDP_SDP5;
- esdp_reg |= IXGBE_ESDP_SDP5_DIR;
- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
- IXGBE_WRITE_FLUSH(hw);
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ ixgbe_set_fiber_fixed_speed(hw,
+ IXGBE_LINK_SPEED_1GB_FULL);
+ break;
+ case ixgbe_media_type_fiber:
+ esdp_reg &= ~IXGBE_ESDP_SDP5;
+ esdp_reg |= IXGBE_ESDP_SDP5_DIR;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
+ IXGBE_WRITE_FLUSH(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ /* QSFP module automatically detects MAC link speed */
+ break;
+ default:
+ hw_dbg(hw, "Unexpected media type.\n");
+ break;
+ }
/* Allow module to change analog characteristics (10G->1G) */
msleep(40);
@@ -1872,7 +2028,7 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
goto out;
else
- status = ixgbe_identify_sfp_module_generic(hw);
+ status = ixgbe_identify_module_generic(hw);
}
/* Set PHY type none if no PHY detected */
@@ -1978,10 +2134,12 @@ sfp_check:
switch (hw->phy.type) {
case ixgbe_phy_sfp_passive_tyco:
case ixgbe_phy_sfp_passive_unknown:
+ case ixgbe_phy_qsfp_passive_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
break;
case ixgbe_phy_sfp_ftl_active:
case ixgbe_phy_sfp_active_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
break;
case ixgbe_phy_sfp_avago:
@@ -1999,6 +2157,15 @@ sfp_check:
else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
break;
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
+ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ break;
default:
break;
}
@@ -2045,6 +2212,7 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
{
s32 status = IXGBE_ERR_EEPROM_VERSION;
u16 fw_offset, fw_ptp_cfg_offset;
+ u16 offset;
u16 fw_version = 0;
/* firmware check is only necessary for SFI devices */
@@ -2054,29 +2222,35 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
}
/* get the offset to the Firmware Module block */
- hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
+ offset = IXGBE_FW_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_offset))
+ goto fw_version_err;
if ((fw_offset == 0) || (fw_offset == 0xFFFF))
goto fw_version_out;
/* get the offset to the Pass Through Patch Configuration block */
- hw->eeprom.ops.read(hw, (fw_offset +
- IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR),
- &fw_ptp_cfg_offset);
+ offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
+ goto fw_version_err;
if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
goto fw_version_out;
/* get the firmware version */
- hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset +
- IXGBE_FW_PATCH_VERSION_4),
- &fw_version);
+ offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
+ if (hw->eeprom.ops.read(hw, offset, &fw_version))
+ goto fw_version_err;
if (fw_version > 0x5)
status = 0;
fw_version_out:
return status;
+
+fw_version_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return IXGBE_ERR_EEPROM_VERSION;
}
/**
@@ -2236,6 +2410,112 @@ reset_pipeline_out:
return ret_val;
}
+/**
+ * ixgbe_read_i2c_byte_82599 - Reads 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to read
+ * @data: value read
+ *
+ * Performs byte read operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 *data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_write_i2c_byte_82599 - Writes 8 bit word over I2C
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset to write
+ * @data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface at
+ * a specified device address.
+ **/
+static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 dev_addr, u8 data)
+{
+ u32 esdp;
+ s32 status;
+ s32 timeout = 200;
+
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Acquire I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp |= IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+
+ while (timeout) {
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ if (esdp & IXGBE_ESDP_SDP1)
+ break;
+
+ usleep_range(5000, 10000);
+ timeout--;
+ }
+
+ if (!timeout) {
+ hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n");
+ status = IXGBE_ERR_I2C;
+ goto release_i2c_access;
+ }
+ }
+
+ status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data);
+
+release_i2c_access:
+ if (hw->phy.qsfp_shared_i2c_bus == true) {
+ /* Release I2C bus ownership. */
+ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ esdp &= ~IXGBE_ESDP_SDP0;
+ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
+ IXGBE_WRITE_FLUSH(hw);
+ }
+
+ return status;
+}
+
static struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
@@ -2255,6 +2535,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
.read_analog_reg8 = &ixgbe_read_analog_reg8_82599,
.write_analog_reg8 = &ixgbe_write_analog_reg8_82599,
+ .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599,
.setup_link = &ixgbe_setup_mac_link_82599,
.set_rxpba = &ixgbe_set_rxpba_generic,
.check_link = &ixgbe_check_mac_link_generic,
@@ -2300,7 +2581,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
static struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
- .identify_sfp = &ixgbe_identify_sfp_module_generic,
+ .identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
.reset = &ixgbe_reset_phy_generic,
.read_reg = &ixgbe_read_phy_reg_generic,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 9bcdeb89af5..b5c434b617b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -65,17 +65,42 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw);
* function check the device id to see if the associated phy supports
* autoneg flow control.
**/
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
{
+ bool supported = false;
+ ixgbe_link_speed speed;
+ bool link_up;
- switch (hw->device_id) {
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_82599_T3_LOM:
- return 0;
+ switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
+ case ixgbe_media_type_fiber:
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ /* if link is down, assume supported */
+ if (link_up)
+ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ?
+ true : false;
+ else
+ supported = true;
+ break;
+ case ixgbe_media_type_backplane:
+ supported = true;
+ break;
+ case ixgbe_media_type_copper:
+ /* only some copper devices support flow control autoneg */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_T3_LOM:
+ case IXGBE_DEV_ID_X540T:
+ case IXGBE_DEV_ID_X540T1:
+ supported = true;
+ break;
+ default:
+ break;
+ }
default:
- return IXGBE_ERR_FC_NOT_SUPPORTED;
+ break;
}
+
+ return supported;
}
/**
@@ -114,6 +139,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
@@ -234,7 +260,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
IXGBE_GSSR_MAC_CSR_SM);
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- (ixgbe_device_supports_autoneg_fc(hw) == 0)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -2380,6 +2406,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
switch (hw->phy.media_type) {
/* Autoneg flow control on fiber adapters */
+ case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
if (speed == IXGBE_LINK_SPEED_1GB_FULL)
ret_val = ixgbe_fc_autoneg_fiber(hw);
@@ -2392,7 +2419,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw)
/* Autoneg flow control on copper adapters */
case ixgbe_media_type_copper:
- if (ixgbe_device_supports_autoneg_fc(hw) == 0)
+ if (ixgbe_device_supports_autoneg_fc(hw))
ret_val = ixgbe_fc_autoneg_copper(hw);
break;
@@ -2479,42 +2506,39 @@ out:
**/
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask)
{
- u32 gssr;
+ u32 gssr = 0;
u32 swmask = mask;
u32 fwmask = mask << 5;
- s32 timeout = 200;
+ u32 timeout = 200;
+ u32 i;
- while (timeout) {
+ for (i = 0; i < timeout; i++) {
/*
- * SW EEPROM semaphore bit is used for access to all
- * SW_FW_SYNC/GSSR bits (not just EEPROM)
+ * SW NVM semaphore bit is used for access to all
+ * SW_FW_SYNC bits (not just NVM)
*/
if (ixgbe_get_eeprom_semaphore(hw))
return IXGBE_ERR_SWFW_SYNC;
gssr = IXGBE_READ_REG(hw, IXGBE_GSSR);
- if (!(gssr & (fwmask | swmask)))
- break;
-
- /*
- * Firmware currently using resource (fwmask) or other software
- * thread currently using resource (swmask)
- */
- ixgbe_release_eeprom_semaphore(hw);
- usleep_range(5000, 10000);
- timeout--;
- }
-
- if (!timeout) {
- hw_dbg(hw, "Driver can't access resource, SW_FW_SYNC timeout.\n");
- return IXGBE_ERR_SWFW_SYNC;
+ if (!(gssr & (fwmask | swmask))) {
+ gssr |= swmask;
+ IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ ixgbe_release_eeprom_semaphore(hw);
+ return 0;
+ } else {
+ /* Resource is currently in use by FW or SW */
+ ixgbe_release_eeprom_semaphore(hw);
+ usleep_range(5000, 10000);
+ }
}
- gssr |= swmask;
- IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr);
+ /* If time expired clear the bits holding the lock and retry */
+ if (gssr & (fwmask | swmask))
+ ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask));
- ixgbe_release_eeprom_semaphore(hw);
- return 0;
+ usleep_range(5000, 10000);
+ return IXGBE_ERR_SWFW_SYNC;
}
/**
@@ -2716,13 +2740,19 @@ out:
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
u16 *san_mac_offset)
{
+ s32 ret_val;
+
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available.
*/
- hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset);
+ ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR,
+ san_mac_offset);
+ if (ret_val)
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ IXGBE_SAN_MAC_ADDR_PTR);
- return 0;
+ return ret_val;
}
/**
@@ -2739,23 +2769,16 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
{
u16 san_mac_data, san_mac_offset;
u8 i;
+ s32 ret_val;
/*
* First read the EEPROM pointer to see if the MAC addresses are
* available. If they're not, no point in calling set_lan_id() here.
*/
- ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset);
+ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF)
- if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) {
- /*
- * No addresses available in this EEPROM. It's not an
- * error though, so just wipe the local address and return.
- */
- for (i = 0; i < 6; i++)
- san_mac_addr[i] = 0xFF;
-
- goto san_mac_addr_out;
- }
+ goto san_mac_addr_clr;
/* make sure we know which port we need to program */
hw->mac.ops.set_lan_id(hw);
@@ -2763,14 +2786,26 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
(san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
- hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data);
+ ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
+ &san_mac_data);
+ if (ret_val) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ san_mac_offset);
+ goto san_mac_addr_clr;
+ }
san_mac_addr[i * 2] = (u8)(san_mac_data);
san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8);
san_mac_offset++;
}
-
-san_mac_addr_out:
return 0;
+
+san_mac_addr_clr:
+ /* No addresses available in this EEPROM. It's not necessarily an
+ * error though, so just wipe the local address and return.
+ */
+ for (i = 0; i < 6; i++)
+ san_mac_addr[i] = 0xFF;
+ return ret_val;
}
/**
@@ -3219,8 +3254,9 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
*wwpn_prefix = 0xFFFF;
/* check if alternative SAN MAC is supported */
- hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
- &alt_san_mac_blk_offset);
+ offset = IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR;
+ if (hw->eeprom.ops.read(hw, offset, &alt_san_mac_blk_offset))
+ goto wwn_prefix_err;
if ((alt_san_mac_blk_offset == 0) ||
(alt_san_mac_blk_offset == 0xFFFF))
@@ -3228,19 +3264,26 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
/* check capability in alternative san mac address block */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
- hw->eeprom.ops.read(hw, offset, &caps);
+ if (hw->eeprom.ops.read(hw, offset, &caps))
+ goto wwn_prefix_err;
if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
goto wwn_prefix_out;
/* get the corresponding prefix for WWNN/WWPN */
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix))
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
- hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+ if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
+ goto wwn_prefix_err;
wwn_prefix_out:
return 0;
+
+wwn_prefix_err:
+ hw_err(hw, "eeprom read at offset %d failed\n", offset);
+ return 0;
}
/**
@@ -3754,7 +3797,11 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
u8 sensor_index;
u8 sensor_location;
- hw->eeprom.ops.read(hw, (ets_offset + 1 + i), &ets_sensor);
+ if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) {
+ hw_err(hw, "eeprom read at offset %d failed\n",
+ ets_offset + 1 + i);
+ continue;
+ }
sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
IXGBE_ETS_DATA_INDEX_SHIFT);
sensor_location = ((ets_sensor & IXGBE_ETS_DATA_LOC_MASK) >>
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 22eee38868f..d259dc76604 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -80,7 +80,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
-s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
+bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask);
@@ -143,8 +143,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS)
+#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev)
+
#define hw_dbg(hw, format, arg...) \
- netdev_dbg(((struct ixgbe_adapter *)(hw->back))->netdev, format, ##arg)
+ netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg)
+#define hw_err(hw, format, arg...) \
+ netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg)
#define e_dev_info(format, arg...) \
dev_info(&adapter->pdev->dev, format, ## arg)
#define e_dev_warn(format, arg...) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index ac780770863..7a77f37a7cb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -108,9 +108,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw,
/* Enable arbiter */
reg &= ~IXGBE_DPMCS_ARBDIS;
- /* Enable DFP and Recycle mode */
- reg |= (IXGBE_DPMCS_TDPAC | IXGBE_DPMCS_TRM);
reg |= IXGBE_DPMCS_TSOEF;
+
/* Configure Max TSO packet size 34KB including payload and headers */
reg |= (0x4 << IXGBE_DPMCS_MTSOS_SHIFT);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 24e2e7aafda..0e1b973659b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -311,9 +311,6 @@ static int ixgbe_set_settings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (ecmd->autoneg == AUTONEG_DISABLE)
- return -EINVAL;
-
if (ecmd->advertising & ~ecmd->supported)
return -EINVAL;
@@ -355,10 +352,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
- if (hw->fc.disable_fc_autoneg)
- pause->autoneg = 0;
- else
+ if (ixgbe_device_supports_autoneg_fc(hw) &&
+ !hw->fc.disable_fc_autoneg)
pause->autoneg = 1;
+ else
+ pause->autoneg = 0;
if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
pause->rx_pause = 1;
@@ -384,7 +382,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
/* some devices do not support autoneg of link flow control */
if ((pause->autoneg == AUTONEG_ENABLE) &&
- (ixgbe_device_supports_autoneg_fc(hw) != 0))
+ !ixgbe_device_supports_autoneg_fc(hw))
return -EINVAL;
fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE);
@@ -1048,7 +1046,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
+ for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
if (!ring) {
data[i] = 0;
@@ -1140,11 +1138,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "tx_q_%u_napi_yield", i);
+ sprintf(p, "tx_queue_%u_ll_napi_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_misses", i);
+ sprintf(p, "tx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "tx_q_%u_cleaned", i);
+ sprintf(p, "tx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1154,11 +1152,11 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
#ifdef LL_EXTENDED_STATS
- sprintf(p, "rx_q_%u_ll_poll_yield", i);
+ sprintf(p, "rx_queue_%u_ll_poll_yield", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_misses", i);
+ sprintf(p, "rx_queue_%u_ll_misses", i);
p += ETH_GSTRING_LEN;
- sprintf(p, "rx_q_%u_cleaned", i);
+ sprintf(p, "rx_queue_%u_ll_cleaned", i);
p += ETH_GSTRING_LEN;
#endif /* LL_EXTENDED_STATS */
}
@@ -1884,11 +1882,12 @@ static void ixgbe_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
bool if_running = netif_running(netdev);
set_bit(__IXGBE_TESTING, &adapter->state);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
int i;
for (i = 0; i < adapter->num_vfs; i++) {
@@ -1912,21 +1911,18 @@ static void ixgbe_diag_test(struct net_device *netdev,
/* Offline tests */
e_info(hw, "offline testing starting\n");
- if (if_running)
- /* indicate we're in test mode */
- dev_close(netdev);
-
- /* bringing adapter down disables SFP+ optics */
- if (hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
- ixgbe_reset(adapter);
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbe_reset(adapter);
+
e_info(hw, "register testing starting\n");
if (ixgbe_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1963,13 +1959,11 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
if (if_running)
dev_open(netdev);
+ else if (hw->mac.ops.disable_tx_laser)
+ hw->mac.ops.disable_tx_laser(hw);
} else {
e_info(hw, "online testing starting\n");
- /* if adapter is down, SFP+ optics will be disabled */
- if (!if_running && hw->mac.ops.enable_tx_laser)
- hw->mac.ops.enable_tx_laser(hw);
-
/* Online tests */
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1983,9 +1977,6 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
- /* if adapter was down, ensure SFP+ optics are disabled again */
- if (!if_running && hw->mac.ops.disable_tx_laser)
- hw->mac.ops.disable_tx_laser(hw);
skip_ol_tests:
msleep_interruptible(4 * 1000);
}
@@ -2909,33 +2900,21 @@ static int ixgbe_get_module_info(struct net_device *dev,
struct ixgbe_hw *hw = &adapter->hw;
u32 status;
u8 sff8472_rev, addr_mode;
- int ret_val = 0;
bool page_swap = false;
- /* avoid concurent i2c reads */
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
-
- /* used by the service task */
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
/* Check whether we support SFF-8472 or not */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_COMP,
&sff8472_rev);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
/* addressing mode is not supported */
status = hw->phy.ops.read_i2c_eeprom(hw,
IXGBE_SFF_SFF_8472_SWAP,
&addr_mode);
- if (status != 0) {
- ret_val = -EIO;
- goto err_out;
- }
+ if (status != 0)
+ return -EIO;
if (addr_mode & IXGBE_SFF_ADDRESSING_MODE) {
e_err(drv, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
@@ -2952,9 +2931,7 @@ static int ixgbe_get_module_info(struct net_device *dev,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
}
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
- return ret_val;
+ return 0;
}
static int ixgbe_get_module_eeprom(struct net_device *dev,
@@ -2966,51 +2943,27 @@ static int ixgbe_get_module_eeprom(struct net_device *dev,
u32 status = IXGBE_ERR_PHY_ADDR_INVALID;
u8 databyte = 0xFF;
int i = 0;
- int ret_val = 0;
- /* ixgbe_get_module_info is called before this function in all
- * cases, so we do not need any checks we already do above,
- * and can trust ee->len to be a known value.
- */
+ if (ee->len == 0)
+ return -EINVAL;
- while (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
- msleep(100);
- set_bit(__IXGBE_READ_I2C, &adapter->state);
-
- /* Read the first block, SFF-8079 */
- for (i = 0; i < ETH_MODULE_SFF_8079_LEN; i++) {
- status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
+ for (i = ee->offset; i < ee->offset + ee->len; i++) {
+ /* I2C reads can take long time */
+ if (test_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
+ return -EBUSY;
- /* If the second block is requested, check if SFF-8472 is supported. */
- if (ee->len == ETH_MODULE_SFF_8472_LEN) {
- if (data[IXGBE_SFF_SFF_8472_COMP] == IXGBE_SFF_SFF_8472_UNSUP)
- return -EOPNOTSUPP;
-
- /* Read the second block, SFF-8472 */
- for (i = ETH_MODULE_SFF_8079_LEN;
- i < ETH_MODULE_SFF_8472_LEN; i++) {
- status = hw->phy.ops.read_i2c_sff8472(hw,
- i - ETH_MODULE_SFF_8079_LEN, &databyte);
- if (status != 0) {
- /* Error occured while reading module */
- ret_val = -EIO;
- goto err_out;
- }
- data[i] = databyte;
- }
- }
+ if (i < ETH_MODULE_SFF_8079_LEN)
+ status = hw->phy.ops.read_i2c_eeprom(hw, i, &databyte);
+ else
+ status = hw->phy.ops.read_i2c_sff8472(hw, i, &databyte);
-err_out:
- clear_bit(__IXGBE_READ_I2C, &adapter->state);
+ if (status != 0)
+ return -EIO;
- return ret_val;
+ data[i - ee->offset] = databyte;
+ }
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bad8f14b194..7aba452833e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -63,7 +63,7 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "3.13.10-k"
+#define DRV_VERSION "3.15.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
"Copyright (c) 1999-2013 Intel Corporation.";
@@ -109,6 +109,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
@@ -195,6 +196,86 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter)
return 0;
}
+/**
+ * ixgbe_check_from_parent - Determine whether PCIe info should come from parent
+ * @hw: hw specific details
+ *
+ * This function is used by probe to determine whether a device's PCI-Express
+ * bandwidth details should be gathered from the parent bus instead of from the
+ * device. Used to ensure that various locations all have the correct device ID
+ * checks.
+ */
+static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw)
+{
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter,
+ int expected_gts)
+{
+ int max_gts = 0;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ struct pci_dev *pdev;
+
+ /* determine whether to use the the parent device
+ */
+ if (ixgbe_pcie_from_parent(&adapter->hw))
+ pdev = adapter->pdev->bus->parent->self;
+ else
+ pdev = adapter->pdev;
+
+ if (pcie_get_minimum_link(pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) {
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 2 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ /* 8b/10b encoding reduces max throughput by 20% */
+ max_gts = 4 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ /* 128b/130b encoding only reduces throughput by 1% */
+ max_gts = 8 * width;
+ break;
+ default:
+ e_dev_warn("Unable to determine PCI Express bandwidth.\n");
+ return;
+ }
+
+ e_dev_info("PCI Express bandwidth of %dGT/s available\n",
+ max_gts);
+ e_dev_info("(Speed:%s, Width: x%d, Encoding Loss:%s)\n",
+ (speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ "Unknown"),
+ width,
+ (speed == PCIE_SPEED_2_5GT ? "20%" :
+ speed == PCIE_SPEED_5_0GT ? "20%" :
+ speed == PCIE_SPEED_8_0GT ? "N/a" :
+ "Unknown"));
+
+ if (max_gts < expected_gts) {
+ e_dev_warn("This is not sufficient for optimal performance of this card.\n");
+ e_dev_warn("For optimal performance, at least %dGT/s of bandwidth is required.\n",
+ expected_gts);
+ e_dev_warn("A slot with more lanes and/or higher speed is suggested.\n");
+ }
+}
+
static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter)
{
if (!test_bit(__IXGBE_DOWN, &adapter->state) &&
@@ -1998,7 +2079,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int ixgbe_low_latency_recv(struct napi_struct *napi)
{
@@ -2030,7 +2111,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
return found;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* ixgbe_configure_msix - Configure MSI-X hardware
@@ -3724,8 +3805,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
- /* don't hardware filter vlans in promisc mode */
- ixgbe_vlan_filter_disable(adapter);
+ /* Only disable hardware filter vlans in promiscuous mode
+ * if SR-IOV and VMDQ are disabled - otherwise ensure
+ * that hardware VLAN filters remain enabled.
+ */
+ if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
+ IXGBE_FLAG_SRIOV_ENABLED)))
+ ixgbe_vlan_filter_disable(adapter);
+ else
+ ixgbe_vlan_filter_enable(adapter);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
@@ -4087,6 +4175,10 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_sfp_passive_unknown:
case ixgbe_phy_sfp_active_unknown:
case ixgbe_phy_sfp_ftl_active:
+ case ixgbe_phy_qsfp_passive_unknown:
+ case ixgbe_phy_qsfp_active_unknown:
+ case ixgbe_phy_qsfp_intel:
+ case ixgbe_phy_qsfp_unknown:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4352,7 +4444,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
if (hw->mac.san_mac_rar_index)
hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0));
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_reset(adapter);
}
@@ -4714,8 +4806,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
- hw->fc.disable_fc_autoneg =
- (ixgbe_device_supports_autoneg_fc(hw) == 0) ? false : true;
+ hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw);
#ifdef CONFIG_PCI_IOV
/* assign number of SR-IOV VFs */
@@ -5205,6 +5296,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
return retval;
#endif
+ if (hw->mac.ops.stop_link_on_d3)
+ hw->mac.ops.stop_link_on_d3(hw);
+
if (wufc) {
ixgbe_set_rx_mode(netdev);
@@ -5681,7 +5775,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
adapter->last_rx_ptp_check = jiffies;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
@@ -5727,7 +5821,7 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB)
adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP;
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state))
ixgbe_ptp_start_cyclecounter(adapter);
e_info(drv, "NIC Link is Down\n");
@@ -5826,10 +5920,6 @@ static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter)
!(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET))
return;
- /* concurent i2c reads are not supported */
- if (test_bit(__IXGBE_READ_I2C, &adapter->state))
- return;
-
/* someone else is in init, wait until next service event */
if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
return;
@@ -6038,7 +6128,7 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_fdir_reinit_subtask(adapter);
ixgbe_check_hang_subtask(adapter);
- if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
+ if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
ixgbe_ptp_rx_hang(adapter);
}
@@ -7227,7 +7317,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbe_low_latency_recv,
#endif
#ifdef IXGBE_FCOE
@@ -7247,6 +7337,42 @@ static const struct net_device_ops ixgbe_netdev_ops = {
};
/**
+ * ixgbe_enumerate_functions - Get the number of ports this device has
+ * @adapter: adapter structure
+ *
+ * This function enumerates the phsyical functions co-located on a single slot,
+ * in order to determine how many ports a device has. This is most useful in
+ * determining the required GT/s of PCIe bandwidth necessary for optimal
+ * performance.
+ **/
+static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct list_head *entry;
+ int physfns = 0;
+
+ /* Some cards can not use the generic count PCIe functions method, and
+ * so must be hardcoded to the correct value.
+ */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
+ case IXGBE_DEV_ID_82599_QSFP_SF_QP:
+ physfns = 4;
+ break;
+ default:
+ list_for_each(entry, &adapter->pdev->bus_list) {
+ struct pci_dev *pdev =
+ list_entry(entry, struct pci_dev, bus_list);
+ /* don't count virtual functions */
+ if (!pdev->is_virtfn)
+ physfns++;
+ }
+ }
+
+ return physfns;
+}
+
+/**
* ixgbe_wol_supported - Check whether device supports WoL
* @hw: hw specific details
* @device_id: the device ID
@@ -7328,7 +7454,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgbe_hw *hw;
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
- int i, err, pci_using_dac;
+ int i, err, pci_using_dac, expected_gts;
unsigned int indices = MAX_TX_QUEUES;
u8 part_str[IXGBE_PBANUM_LENGTH];
#ifdef IXGBE_FCOE
@@ -7483,10 +7609,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->mac.type == ixgbe_mac_82598EB) {
err = 0;
} else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- e_dev_err("failed to load because an unsupported SFP+ "
- "module type was detected.\n");
- e_dev_err("Reload the driver after installing a supported "
- "module.\n");
+ e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n");
+ e_dev_err("Reload the driver after installing a supported module.\n");
goto err_sw_init;
} else if (err) {
e_dev_err("HW Init failed: %d\n", err);
@@ -7617,7 +7741,7 @@ skip_sriov:
/* pick up the PCI bus settings for reporting later */
hw->mac.ops.get_bus_info(hw);
- if (hw->device_id == IXGBE_DEV_ID_82599_SFP_SF_QP)
+ if (ixgbe_pcie_from_parent(hw))
ixgbe_get_parent_bus_info(adapter);
/* print bus type/speed/width info */
@@ -7643,12 +7767,20 @@ skip_sriov:
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
- if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
- e_dev_warn("PCI-Express bandwidth available for this card is "
- "not sufficient for optimal performance.\n");
- e_dev_warn("For optimal performance a x8 PCI-Express slot "
- "is required.\n");
+ /* calculate the expected PCIe bandwidth required for optimal
+ * performance. Note that some older parts will never have enough
+ * bandwidth due to being older generation PCIe parts. We clamp these
+ * parts to ensure no warning is displayed if it can't be fixed.
+ */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16);
+ break;
+ default:
+ expected_gts = ixgbe_enumerate_functions(adapter) * 10;
+ break;
}
+ ixgbe_check_minimum_link(adapter, expected_gts);
/* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index e5691ccbce9..e4c676006be 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -204,7 +204,83 @@ out:
}
/**
+ * ixgbe_read_phy_mdi - Reads a value from a specified PHY register without
+ * the SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ **/
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
+ u16 *phy_data)
+{
+ u32 i, data, command;
+
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address command did not complete.\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Address cycle complete, setup and write the read
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
+
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
+
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY read command didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ /* Read operation is complete. Get the data
+ * from MSRWD
+ */
+ data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
+ data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
+ *phy_data = (u16)(data);
+
+ return 0;
+}
+
+/**
* ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register
+ * using the SWFW lock - this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit address of PHY register to read
* @phy_data: Pointer to read data from PHY register
@@ -212,10 +288,7 @@ out:
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data)
{
- u32 command;
- u32 i;
- u32 data;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -223,86 +296,93 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
+ hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
status = IXGBE_ERR_SWFW_SYNC;
+ }
- if (status == 0) {
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
+ return status;
+}
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
+/**
+ * ixgbe_write_phy_reg_mdi - Writes a value to specified PHY register
+ * without SWFW lock
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ **/
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 i, command;
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ /* Put the data in the MDI single read and write data register*/
+ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ /* Setup and write the address cycle command */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address command did not complete.\n");
- status = IXGBE_ERR_PHY;
- }
+ /*
+ * Check every 10 usec to see if the address cycle completed.
+ * The MDI Command bit will clear when the operation is
+ * complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the read
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
+ }
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY address cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
+ /*
+ * Address cycle complete, setup and write the write
+ * command
+ */
+ command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
+ (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
+ (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
+ (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY read command didn't complete\n");
- status = IXGBE_ERR_PHY;
- } else {
- /*
- * Read operation is complete. Get the data
- * from MSRWD
- */
- data = IXGBE_READ_REG(hw, IXGBE_MSRWD);
- data >>= IXGBE_MSRWD_READ_DATA_SHIFT;
- *phy_data = (u16)(data);
- }
- }
+ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
- hw->mac.ops.release_swfw_sync(hw, gssr);
+ /* Check every 10 usec to see if the address cycle
+ * completed. The MDI Command bit will clear when the
+ * operation is complete
+ */
+ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
+ udelay(10);
+
+ command = IXGBE_READ_REG(hw, IXGBE_MSCA);
+ if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
+ break;
}
- return status;
+ if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
+ hw_dbg(hw, "PHY write cmd didn't complete\n");
+ return IXGBE_ERR_PHY;
+ }
+
+ return 0;
}
/**
* ixgbe_write_phy_reg_generic - Writes a value to specified PHY register
+ * using SWFW lock- this function is needed in most cases
* @hw: pointer to hardware structure
* @reg_addr: 32 bit PHY register to write
* @device_type: 5 bit device type
@@ -311,9 +391,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data)
{
- u32 command;
- u32 i;
- s32 status = 0;
+ s32 status;
u16 gssr;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -321,74 +399,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
else
gssr = IXGBE_GSSR_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0)
- status = IXGBE_ERR_SWFW_SYNC;
-
- if (status == 0) {
- /* Put the data in the MDI single read and write data register*/
- IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data);
-
- /* Setup and write the address cycle command */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle completed.
- * The MDI Command bit will clear when the operation is
- * complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
-
- if (status == 0) {
- /*
- * Address cycle complete, setup and write the write
- * command
- */
- command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) |
- (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) |
- (hw->phy.mdio.prtad <<
- IXGBE_MSCA_PHY_ADDR_SHIFT) |
- (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND));
-
- IXGBE_WRITE_REG(hw, IXGBE_MSCA, command);
-
- /*
- * Check every 10 usec to see if the address cycle
- * completed. The MDI Command bit will clear when the
- * operation is complete
- */
- for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) {
- udelay(10);
-
- command = IXGBE_READ_REG(hw, IXGBE_MSCA);
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) == 0)
- break;
- }
-
- if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) {
- hw_dbg(hw, "PHY address cmd didn't complete\n");
- status = IXGBE_ERR_PHY;
- }
- }
-
+ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) {
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type,
+ phy_data);
hw->mac.ops.release_swfw_sync(hw, gssr);
+ } else {
+ status = IXGBE_ERR_SWFW_SYNC;
}
return status;
@@ -775,6 +791,8 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
* Read control word from PHY init contents offset
*/
ret_val = hw->eeprom.ops.read(hw, data_offset, &eword);
+ if (ret_val)
+ goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
@@ -787,10 +805,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
case IXGBE_DATA_NL:
hw_dbg(hw, "DATA:\n");
data_offset++;
- hw->eeprom.ops.read(hw, data_offset++,
- &phy_offset);
+ ret_val = hw->eeprom.ops.read(hw, data_offset++,
+ &phy_offset);
+ if (ret_val)
+ goto err_eeprom;
for (i = 0; i < edata; i++) {
- hw->eeprom.ops.read(hw, data_offset, &eword);
+ ret_val = hw->eeprom.ops.read(hw, data_offset,
+ &eword);
+ if (ret_val)
+ goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
@@ -822,12 +845,42 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
out:
return ret_val;
+
+err_eeprom:
+ hw_err(hw, "eeprom read at offset %d failed\n", data_offset);
+ return IXGBE_ERR_PHY;
}
/**
- * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * ixgbe_identify_module_generic - Identifies module type
* @hw: pointer to hardware structure
*
+ * Determines HW type and calls appropriate function.
+ **/
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
+{
+ s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
+
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ status = ixgbe_identify_sfp_module_generic(hw);
+ break;
+ case ixgbe_media_type_fiber_qsfp:
+ status = ixgbe_identify_qsfp_module_generic(hw);
+ break;
+ default:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_identify_sfp_module_generic - Identifies SFP modules
+ * @hw: pointer to hardware structure
+*
* Searches for and identifies the SFP module and assigns appropriate PHY type.
**/
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
@@ -1106,6 +1159,197 @@ err_read_i2c_eeprom:
}
/**
+ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules
+ * @hw: pointer to hardware structure
+ *
+ * Searches for and identifies the QSFP module and assigns appropriate PHY type
+ **/
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+ u32 vendor_oui = 0;
+ enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
+ u8 identifier = 0;
+ u8 comp_codes_1g = 0;
+ u8 comp_codes_10g = 0;
+ u8 oui_bytes[3] = {0, 0, 0};
+ u16 enforce_sfp = 0;
+ u8 connector = 0;
+ u8 cable_length = 0;
+ u8 device_tech = 0;
+ bool active_cable = false;
+
+ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ status = IXGBE_ERR_SFP_NOT_PRESENT;
+ goto out;
+ }
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
+ &identifier);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+
+ hw->phy.id = identifier;
+
+ /* LAN ID is needed for sfp_type determination */
+ hw->mac.ops.set_lan_id(hw);
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP,
+ &comp_codes_10g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP,
+ &comp_codes_1g);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) {
+ hw->phy.type = ixgbe_phy_qsfp_passive_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu_core1;
+ } else if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core0;
+ else
+ hw->phy.sfp_type = ixgbe_sfp_type_srlr_core1;
+ } else {
+ if (comp_codes_10g & IXGBE_SFF_QSFP_DA_ACTIVE_CABLE)
+ active_cable = true;
+
+ if (!active_cable) {
+ /* check for active DA cables that pre-date
+ * SFF-8436 v3.6
+ */
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CONNECTOR,
+ &connector);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_CABLE_LENGTH,
+ &cable_length);
+
+ hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_DEVICE_TECH,
+ &device_tech);
+
+ if ((connector ==
+ IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE) &&
+ (cable_length > 0) &&
+ ((device_tech >> 4) ==
+ IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL))
+ active_cable = true;
+ }
+
+ if (active_cable) {
+ hw->phy.type = ixgbe_phy_qsfp_active_unknown;
+ if (hw->bus.lan_id == 0)
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core0;
+ else
+ hw->phy.sfp_type =
+ ixgbe_sfp_type_da_act_lmt_core1;
+ } else {
+ /* unsupported module type */
+ hw->phy.type = ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ goto out;
+ }
+ }
+
+ if (hw->phy.sfp_type != stored_sfp_type)
+ hw->phy.sfp_setup_needed = true;
+
+ /* Determine if the QSFP+ PHY is dual speed or not. */
+ hw->phy.multispeed_fiber = false;
+ if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+ hw->phy.multispeed_fiber = true;
+
+ /* Determine PHY vendor for optical modules */
+ if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE |
+ IXGBE_SFF_10GBASELR_CAPABLE)) {
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0,
+ &oui_bytes[0]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ status = hw->phy.ops.read_i2c_eeprom(hw,
+ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
+
+ if (status != 0)
+ goto err_read_i2c_eeprom;
+
+ vendor_oui =
+ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+ if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL)
+ hw->phy.type = ixgbe_phy_qsfp_intel;
+ else
+ hw->phy.type = ixgbe_phy_qsfp_unknown;
+
+ hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+ if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
+ /* Make sure we're a supported PHY type */
+ if (hw->phy.type == ixgbe_phy_qsfp_intel) {
+ status = 0;
+ } else {
+ if (hw->allow_unsupported_sfp == true) {
+ e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+ status = 0;
+ } else {
+ hw_dbg(hw,
+ "QSFP module not supported\n");
+ hw->phy.type =
+ ixgbe_phy_sfp_unsupported;
+ status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+ }
+ }
+ } else {
+ status = 0;
+ }
+ }
+
+out:
+ return status;
+
+err_read_i2c_eeprom:
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ hw->phy.id = 0;
+ hw->phy.type = ixgbe_phy_unknown;
+
+ return IXGBE_ERR_SFP_NOT_PRESENT;
+}
+
+/**
* ixgbe_get_sfp_init_sequence_offsets - Provides offset of PHY init sequence
* @hw: pointer to hardware structure
* @list_offset: offset to the SFP ID list
@@ -1147,7 +1391,11 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
sfp_type = ixgbe_sfp_type_srlr_core1;
/* Read offset to PHY init contents */
- hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset);
+ if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) {
+ hw_err(hw, "eeprom read at %d failed\n",
+ IXGBE_PHY_INIT_OFFSET_NL);
+ return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
+ }
if ((!*list_offset) || (*list_offset == 0xFFFF))
return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT;
@@ -1159,12 +1407,14 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
* Find the matching SFP ID in the EEPROM
* and program the init sequence
*/
- hw->eeprom.ops.read(hw, *list_offset, &sfp_id);
+ if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
+ goto err_phy;
while (sfp_id != IXGBE_PHY_INIT_END_NL) {
if (sfp_id == sfp_type) {
(*list_offset)++;
- hw->eeprom.ops.read(hw, *list_offset, data_offset);
+ if (hw->eeprom.ops.read(hw, *list_offset, data_offset))
+ goto err_phy;
if ((!*data_offset) || (*data_offset == 0xFFFF)) {
hw_dbg(hw, "SFP+ module not supported\n");
return IXGBE_ERR_SFP_NOT_SUPPORTED;
@@ -1174,7 +1424,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
} else {
(*list_offset) += 2;
if (hw->eeprom.ops.read(hw, *list_offset, &sfp_id))
- return IXGBE_ERR_PHY;
+ goto err_phy;
}
}
@@ -1184,6 +1434,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
}
return 0;
+
+err_phy:
+ hw_err(hw, "eeprom read at offset %d failed\n", *list_offset);
+ return IXGBE_ERR_PHY;
}
/**
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 886a3431cf5..24af12e3719 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -33,17 +33,28 @@
#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2
/* EEPROM byte offsets */
-#define IXGBE_SFF_IDENTIFIER 0x0
-#define IXGBE_SFF_IDENTIFIER_SFP 0x3
-#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
-#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
-#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
-#define IXGBE_SFF_1GBE_COMP_CODES 0x6
-#define IXGBE_SFF_10GBE_COMP_CODES 0x3
-#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
-#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
-#define IXGBE_SFF_SFF_8472_SWAP 0x5C
-#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_IDENTIFIER 0x0
+#define IXGBE_SFF_IDENTIFIER_SFP 0x3
+#define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25
+#define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26
+#define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27
+#define IXGBE_SFF_1GBE_COMP_CODES 0x6
+#define IXGBE_SFF_10GBE_COMP_CODES 0x3
+#define IXGBE_SFF_CABLE_TECHNOLOGY 0x8
+#define IXGBE_SFF_CABLE_SPEC_COMP 0x3C
+#define IXGBE_SFF_SFF_8472_SWAP 0x5C
+#define IXGBE_SFF_SFF_8472_COMP 0x5E
+#define IXGBE_SFF_SFF_8472_OSCB 0x6E
+#define IXGBE_SFF_SFF_8472_ESCB 0x76
+#define IXGBE_SFF_IDENTIFIER_QSFP_PLUS 0xD
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0 0xA5
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1 0xA6
+#define IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2 0xA7
+#define IXGBE_SFF_QSFP_CONNECTOR 0x82
+#define IXGBE_SFF_QSFP_10GBE_COMP 0x83
+#define IXGBE_SFF_QSFP_1GBE_COMP 0x86
+#define IXGBE_SFF_QSFP_CABLE_LENGTH 0x92
+#define IXGBE_SFF_QSFP_DEVICE_TECH 0x93
/* Bitmasks */
#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4
@@ -54,7 +65,14 @@
#define IXGBE_SFF_1GBASET_CAPABLE 0x8
#define IXGBE_SFF_10GBASESR_CAPABLE 0x10
#define IXGBE_SFF_10GBASELR_CAPABLE 0x20
+#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8
+#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0
#define IXGBE_SFF_ADDRESSING_MODE 0x4
+#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1
+#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8
+#define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23
+#define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0
#define IXGBE_I2C_EEPROM_READ_MASK 0x100
#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3
#define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0
@@ -102,6 +120,10 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
+s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data);
+s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
@@ -121,7 +143,9 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
+s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
u16 *list_offset,
u16 *data_offset);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 331987d6815..5184e2a1a7d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -885,8 +885,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
ixgbe_ptp_reset(adapter);
- /* set the flag that PTP has been enabled */
- adapter->flags2 |= IXGBE_FLAG2_PTP_ENABLED;
+ /* enter the IXGBE_PTP_RUNNING state */
+ set_bit(__IXGBE_PTP_RUNNING, &adapter->state);
return;
}
@@ -899,10 +899,12 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
*/
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
{
- /* stop the overflow check task */
- adapter->flags2 &= ~(IXGBE_FLAG2_PTP_ENABLED |
- IXGBE_FLAG2_PTP_PPS_ENABLED);
+ /* Leave the IXGBE_PTP_RUNNING state. */
+ if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
+ return;
+ /* stop the PPS signal */
+ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
ixgbe_ptp_setup_sdp(adapter);
cancel_work_sync(&adapter->ptp_tx_work);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 1e7d587c4e5..276d7b13533 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -173,39 +173,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
ixgbe_disable_sriov(adapter);
}
-static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
-{
- struct pci_dev *pdev = adapter->pdev;
- struct pci_dev *vfdev;
- int dev_id;
-
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- dev_id = IXGBE_DEV_ID_82599_VF;
- break;
- case ixgbe_mac_X540:
- dev_id = IXGBE_DEV_ID_X540_VF;
- break;
- default:
- return false;
- }
-
- /* loop through all the VFs to see if we own any that are assigned */
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
- while (vfdev) {
- /* if we don't own it we don't care */
- if (vfdev->is_virtfn && vfdev->physfn == pdev) {
- /* if it is assigned we cannot release it */
- if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
- return true;
- }
-
- vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
- }
-
- return false;
-}
-
#endif /* #ifdef CONFIG_PCI_IOV */
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
{
@@ -235,7 +202,7 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
* without causing issues, so just leave the hardware
* available but disabled
*/
- if (ixgbe_vfs_are_assigned(adapter)) {
+ if (pci_vfs_assigned(adapter->pdev)) {
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
return -EPERM;
}
@@ -672,8 +639,8 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
- u32 reg, msgbuf[4];
- u32 reg_offset, vf_shift;
+ u32 reg, reg_offset, vf_shift;
+ u32 msgbuf[4] = {0, 0, 0, 0};
u8 *addr = (u8 *)(&msgbuf[1]);
e_info(probe, "VF Reset msg received from vf %d\n", vf);
@@ -768,6 +735,29 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0;
}
+static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan)
+{
+ u32 vlvf;
+ s32 regindex;
+
+ /* short cut the special case */
+ if (vlan == 0)
+ return 0;
+
+ /* Search for the vlan id in the VLVF entries */
+ for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
+ if ((vlvf & VLAN_VID_MASK) == vlan)
+ break;
+ }
+
+ /* Return a negative value if not found */
+ if (regindex >= IXGBE_VLVF_ENTRIES)
+ regindex = -1;
+
+ return regindex;
+}
+
static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 *msgbuf, u32 vf)
{
@@ -775,6 +765,9 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
int err;
+ s32 reg_ndx;
+ u32 vlvf;
+ u32 bits;
u8 tcs = netdev_get_num_tc(adapter->netdev);
if (adapter->vfinfo[vf].pf_vlan || tcs) {
@@ -790,10 +783,50 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
else if (adapter->vfinfo[vf].vlan_count)
adapter->vfinfo[vf].vlan_count--;
+ /* in case of promiscuous mode any VLAN filter set for a VF must
+ * also have the PF pool added to it.
+ */
+ if (add && adapter->netdev->flags & IFF_PROMISC)
+ err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+
err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
if (!err && adapter->vfinfo[vf].spoofchk_enabled)
hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
+ /* Go through all the checks to see if the VLAN filter should
+ * be wiped completely.
+ */
+ if (!add && adapter->netdev->flags & IFF_PROMISC) {
+ reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
+ if (reg_ndx < 0)
+ goto out;
+ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
+ /* See if any other pools are set for this VLAN filter
+ * entry other than the PF.
+ */
+ if (VMDQ_P(0) < 32) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ bits &= ~(1 << VMDQ_P(0));
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(reg_ndx * 2) + 1);
+ bits &= ~(1 << (VMDQ_P(0) - 32));
+ bits |= IXGBE_READ_REG(hw, IXGBE_VLVFB(reg_ndx * 2));
+ }
+
+ /* If the filter was removed then ensure PF pool bit
+ * is cleared if the PF only added itself to the pool
+ * because the PF is in promiscuous mode.
+ */
+ if ((vlvf & VLAN_VID_MASK) == vid &&
+ !test_bit(vid, adapter->active_vlans) && !bits)
+ ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
+ }
+
+out:
+
return err;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 70c6aa3d3f9..6442cf8f9dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -69,6 +69,7 @@
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
+#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558
#define IXGBE_DEV_ID_X540T1 0x1560
/* VF Device IDs */
@@ -1520,9 +1521,11 @@ enum {
#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */
#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */
#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */
+#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */
#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */
#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */
#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */
+#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */
/* LEDCTL Bit Masks */
#define IXGBE_LED_IVRT_BASE 0x00000040
@@ -1593,6 +1596,7 @@ enum {
#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
#define IXGBE_MACC_FLU 0x00000001
@@ -2582,6 +2586,10 @@ enum ixgbe_phy_type {
ixgbe_phy_sfp_ftl_active,
ixgbe_phy_sfp_unknown,
ixgbe_phy_sfp_intel,
+ ixgbe_phy_qsfp_passive_unknown,
+ ixgbe_phy_qsfp_active_unknown,
+ ixgbe_phy_qsfp_intel,
+ ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
ixgbe_phy_generic
};
@@ -2622,6 +2630,8 @@ enum ixgbe_sfp_type {
enum ixgbe_media_type {
ixgbe_media_type_unknown = 0,
ixgbe_media_type_fiber,
+ ixgbe_media_type_fiber_fixed,
+ ixgbe_media_type_fiber_qsfp,
ixgbe_media_type_fiber_lco,
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
@@ -2838,6 +2848,7 @@ struct ixgbe_mac_operations {
void (*disable_tx_laser)(struct ixgbe_hw *);
void (*enable_tx_laser)(struct ixgbe_hw *);
void (*flap_tx_laser)(struct ixgbe_hw *);
+ void (*stop_link_on_d3)(struct ixgbe_hw *);
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
@@ -2885,6 +2896,8 @@ struct ixgbe_phy_operations {
s32 (*reset)(struct ixgbe_hw *);
s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *);
s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16);
+ s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *);
+ s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16);
s32 (*setup_link)(struct ixgbe_hw *);
s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *);
@@ -2953,6 +2966,7 @@ struct ixgbe_phy_info {
bool smart_speed_active;
bool multispeed_fiber;
bool reset_if_overtemp;
+ bool qsfp_shared_i2c_bus;
};
#include "ixgbe_mbx.h"
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 1f5166ad6bb..59a62bbfb37 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -488,8 +488,8 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* source pruning.
*/
if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
- !(compare_ether_addr(adapter->netdev->dev_addr,
- eth_hdr(skb)->h_source))) {
+ ether_addr_equal(adapter->netdev->dev_addr,
+ eth_hdr(skb)->h_source)) {
dev_kfree_skb_irq(skb);
goto next_desc;
}
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 7fbe6abf605..23de82a9da8 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -3069,7 +3069,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, jme->rx_ring_size >> 2)
+ NETIF_NAPI_SET(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT)
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c35db735958..7fb5677451f 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2641,7 +2641,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
ret = mv643xx_eth_shared_of_probe(pdev);
if (ret)
return ret;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
pd->tx_csum_limit : 9 * 1024;
@@ -2833,7 +2833,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
struct resource *res;
int err;
- pd = pdev->dev.platform_data;
+ pd = dev_get_platdata(&pdev->dev);
if (pd == NULL) {
dev_err(&pdev->dev, "no mv643xx_eth_platform_data\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 712779fb12b..e35bac7cfdf 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -79,15 +79,17 @@
#define MVNETA_MAC_ADDR_HIGH 0x2418
#define MVNETA_SDMA_CONFIG 0x241c
#define MVNETA_SDMA_BRST_SIZE_16 4
-#define MVNETA_NO_DESC_SWAP 0x0
#define MVNETA_RX_BRST_SZ_MASK(burst) ((burst) << 1)
#define MVNETA_RX_NO_DATA_SWAP BIT(4)
#define MVNETA_TX_NO_DATA_SWAP BIT(5)
+#define MVNETA_DESC_SWAP BIT(6)
#define MVNETA_TX_BRST_SZ_MASK(burst) ((burst) << 22)
#define MVNETA_PORT_STATUS 0x2444
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
+#define MVNETA_SGMII_SERDES_CFG 0x24A0
+#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@@ -136,7 +138,9 @@
#define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
#define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
#define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
+#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
#define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
+#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
#define MVNETA_MIB_COUNTERS_BASE 0x3080
#define MVNETA_MIB_LATE_COLLISION 0x7c
#define MVNETA_DA_FILT_SPEC_MCAST 0x3400
@@ -262,8 +266,7 @@ struct mvneta_port {
* layout of the transmit and reception DMA descriptors, and their
* layout is therefore defined by the hardware design
*/
-struct mvneta_tx_desc {
- u32 command; /* Options used by HW for packet transmitting.*/
+
#define MVNETA_TX_L3_OFF_SHIFT 0
#define MVNETA_TX_IP_HLEN_SHIFT 8
#define MVNETA_TX_L4_UDP BIT(16)
@@ -278,15 +281,6 @@ struct mvneta_tx_desc {
#define MVNETA_TX_L4_CSUM_FULL BIT(30)
#define MVNETA_TX_L4_CSUM_NOT BIT(31)
- u16 reserverd1; /* csum_l4 (for future use) */
- u16 data_size; /* Data size of transmitted packet in bytes */
- u32 buf_phys_addr; /* Physical addr of transmitted buffer */
- u32 reserved2; /* hw_cmd - (for future use, PMT) */
- u32 reserved3[4]; /* Reserved - (for future use) */
-};
-
-struct mvneta_rx_desc {
- u32 status; /* Info about received packet */
#define MVNETA_RXD_ERR_CRC 0x0
#define MVNETA_RXD_ERR_SUMMARY BIT(16)
#define MVNETA_RXD_ERR_OVERRUN BIT(17)
@@ -297,16 +291,57 @@ struct mvneta_rx_desc {
#define MVNETA_RXD_FIRST_LAST_DESC (BIT(26) | BIT(27))
#define MVNETA_RXD_L4_CSUM_OK BIT(30)
+#if defined(__LITTLE_ENDIAN)
+struct mvneta_tx_desc {
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u32 status; /* Info about received packet */
u16 reserved1; /* pnc_info - (for future use, PnC) */
u16 data_size; /* Size of received packet in bytes */
+
u32 buf_phys_addr; /* Physical address of the buffer */
u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+
u32 buf_cookie; /* cookie for access to RX buffer in rx path */
u16 reserved3; /* prefetch_cmd, for future use */
u16 reserved4; /* csum_l4 - (for future use, PnC) */
+
+ u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
+ u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
+};
+#else
+struct mvneta_tx_desc {
+ u16 data_size; /* Data size of transmitted packet in bytes */
+ u16 reserverd1; /* csum_l4 (for future use) */
+ u32 command; /* Options used by HW for packet transmitting.*/
+ u32 reserved2; /* hw_cmd - (for future use, PMT) */
+ u32 buf_phys_addr; /* Physical addr of transmitted buffer */
+ u32 reserved3[4]; /* Reserved - (for future use) */
+};
+
+struct mvneta_rx_desc {
+ u16 data_size; /* Size of received packet in bytes */
+ u16 reserved1; /* pnc_info - (for future use, PnC) */
+ u32 status; /* Info about received packet */
+
+ u32 reserved2; /* pnc_flow_id (for future use, PnC) */
+ u32 buf_phys_addr; /* Physical address of the buffer */
+
+ u16 reserved4; /* csum_l4 - (for future use, PnC) */
+ u16 reserved3; /* prefetch_cmd, for future use */
+ u32 buf_cookie; /* cookie for access to RX buffer in rx path */
+
u32 reserved5; /* pnc_extra PnC (for future use, PnC) */
u32 reserved6; /* hw_cmd (for future use, PnC and HWF) */
};
+#endif
struct mvneta_tx_queue {
/* Number of this TX queue, in the range 0-7 */
@@ -655,6 +690,8 @@ static void mvneta_port_sgmii_config(struct mvneta_port *pp)
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val |= MVNETA_GMAC2_PSC_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
+
+ mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
}
/* Start the Ethernet port RX and TX activity */
@@ -904,13 +941,22 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Default burst size */
val |= MVNETA_TX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
val |= MVNETA_RX_BRST_SZ_MASK(MVNETA_SDMA_BRST_SIZE_16);
+ val |= MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP;
- val |= (MVNETA_RX_NO_DATA_SWAP | MVNETA_TX_NO_DATA_SWAP |
- MVNETA_NO_DESC_SWAP);
+#if defined(__BIG_ENDIAN)
+ val |= MVNETA_DESC_SWAP;
+#endif
/* Assign port SDMA configuration */
mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
+ /* Disable PHY polling in hardware, since we're using the
+ * kernel phylib to do this.
+ */
+ val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
+ val &= ~MVNETA_PHY_POLLING_ENABLE;
+ mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
+
mvneta_set_ucast_table(pp, -1);
mvneta_set_special_mcast_table(pp, -1);
mvneta_set_other_mcast_table(pp, -1);
@@ -2303,7 +2349,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
MVNETA_GMAC_CONFIG_GMII_SPEED |
- MVNETA_GMAC_CONFIG_FULL_DUPLEX);
+ MVNETA_GMAC_CONFIG_FULL_DUPLEX |
+ MVNETA_GMAC_AN_SPEED_EN |
+ MVNETA_GMAC_AN_DUPLEX_EN);
if (phydev->duplex)
val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
@@ -2436,6 +2484,21 @@ static int mvneta_stop(struct net_device *dev)
return 0;
}
+static int mvneta_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mvneta_port *pp = netdev_priv(dev);
+ int ret;
+
+ if (!pp->phy_dev)
+ return -ENOTSUPP;
+
+ ret = phy_mii_ioctl(pp->phy_dev, ifr, cmd);
+ if (!ret)
+ mvneta_adjust_link(dev);
+
+ return ret;
+}
+
/* Ethtool methods */
/* Get settings (phy address, speed) for ethtools */
@@ -2554,6 +2617,7 @@ static const struct net_device_ops mvneta_netdev_ops = {
.ndo_change_mtu = mvneta_change_mtu,
.ndo_tx_timeout = mvneta_tx_timeout,
.ndo_get_stats64 = mvneta_get_stats64,
+ .ndo_do_ioctl = mvneta_ioctl,
};
const struct ethtool_ops mvneta_eth_tool_ops = {
@@ -2728,28 +2792,24 @@ static int mvneta_probe(struct platform_device *pdev)
pp = netdev_priv(dev);
- pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
- init_timer(&pp->tx_done_timer);
- clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
-
pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
- pp->base = of_iomap(dn, 0);
- if (pp->base == NULL) {
- err = -ENOMEM;
- goto err_free_irq;
- }
-
pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk);
- goto err_unmap;
+ goto err_free_irq;
}
clk_prepare_enable(pp->clk);
+ pp->base = of_iomap(dn, 0);
+ if (pp->base == NULL) {
+ err = -ENOMEM;
+ goto err_clk;
+ }
+
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
mac_from = "device tree";
@@ -2766,6 +2826,9 @@ static int mvneta_probe(struct platform_device *pdev)
}
pp->tx_done_timer.data = (unsigned long)dev;
+ pp->tx_done_timer.function = mvneta_tx_done_timer_callback;
+ init_timer(&pp->tx_done_timer);
+ clear_bit(MVNETA_F_TX_DONE_TIMER_BIT, &pp->flags);
pp->tx_ring_size = MVNETA_MAX_TXD;
pp->rx_ring_size = MVNETA_MAX_RXD;
@@ -2776,7 +2839,7 @@ static int mvneta_probe(struct platform_device *pdev)
err = mvneta_init(pp, phy_addr);
if (err < 0) {
dev_err(&pdev->dev, "can't init eth hal\n");
- goto err_clk;
+ goto err_unmap;
}
mvneta_port_power_up(pp, phy_mode);
@@ -2806,10 +2869,10 @@ static int mvneta_probe(struct platform_device *pdev)
err_deinit:
mvneta_deinit(pp);
-err_clk:
- clk_disable_unprepare(pp->clk);
err_unmap:
iounmap(pp->base);
+err_clk:
+ clk_disable_unprepare(pp->clk);
err_free_irq:
irq_dispose_mapping(dev->irq);
err_free_netdev:
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index db481477bcc..4ae0c742601 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -583,10 +583,9 @@ static int init_hash_table(struct pxa168_eth_private *pep)
* table is full.
*/
if (pep->htpr == NULL) {
- pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
- HASH_ADDR_TABLE_SIZE,
- &pep->htpr_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->htpr = dma_zalloc_coherent(pep->dev->dev.parent,
+ HASH_ADDR_TABLE_SIZE,
+ &pep->htpr_dma, GFP_KERNEL);
if (pep->htpr == NULL)
return -ENOMEM;
} else {
@@ -1024,9 +1023,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_count = 0;
size = pep->rx_ring_size * sizeof(struct rx_desc);
pep->rx_desc_area_size = size;
- pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->rx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_rx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->rx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_rx_desc_area)
goto out;
@@ -1085,9 +1084,9 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_count = 0;
size = pep->tx_ring_size * sizeof(struct tx_desc);
pep->tx_desc_area_size = size;
- pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
- &pep->tx_desc_dma,
- GFP_KERNEL | __GFP_ZERO);
+ pep->p_tx_desc_area = dma_zalloc_coherent(pep->dev->dev.parent, size,
+ &pep->tx_desc_dma,
+ GFP_KERNEL);
if (!pep->p_tx_desc_area)
goto out;
/* Initialize the next_desc_ptr links in the Tx descriptors ring */
@@ -1517,7 +1516,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
printk(KERN_INFO "%s:Using random mac address\n", DRIVER_NAME);
eth_hw_addr_random(dev);
- pep->pd = pdev->dev.platform_data;
+ pep->pd = dev_get_platdata(&pdev->dev);
pep->rx_ring_size = NUM_RX_DESCS;
if (pep->pd->rx_queue_size)
pep->rx_ring_size = pep->pd->rx_queue_size;
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index c896079728e..ef94a591f9e 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
}
/* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
- struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct skge_port *skge, struct skge_element *e,
+ struct sk_buff *skb, unsigned int bufsize)
{
struct skge_rx_desc *rd = e->desc;
- u64 map;
+ dma_addr_t map;
map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
PCI_DMA_FROMDEVICE);
- rd->dma_lo = map;
- rd->dma_hi = map >> 32;
+ if (pci_dma_mapping_error(skge->hw->pdev, map))
+ return -1;
+
+ rd->dma_lo = lower_32_bits(map);
+ rd->dma_hi = upper_32_bits(map);
e->skb = skb;
rd->csum1_start = ETH_HLEN;
rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, bufsize);
+ return 0;
}
/* Resume receiving using existing skb,
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev)
return -ENOMEM;
skb_reserve(skb, NET_IP_ALIGN);
- skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+ if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
} while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev)
BUG_ON(skge->dma & 7);
- if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) {
+ if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) {
dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n");
err = -EINVAL;
goto free_pci_mem;
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
struct skge_tx_desc *td;
int i;
u32 control, len;
- u64 map;
+ dma_addr_t map;
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
e->skb = skb;
len = skb_headlen(skb);
map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(hw->pdev, map))
+ goto mapping_error;
+
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, len);
- td->dma_lo = map;
- td->dma_hi = map >> 32;
+ td->dma_lo = lower_32_bits(map);
+ td->dma_hi = upper_32_bits(map);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const int offset = skb_checksum_start_offset(skb);
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
+ if (dma_mapping_error(&hw->pdev->dev, map))
+ goto mapping_unwind;
e = e->next;
e->skb = skb;
tf = e->desc;
BUG_ON(tf->control & BMU_OWN);
- tf->dma_lo = map;
- tf->dma_hi = (u64) map >> 32;
+ tf->dma_lo = lower_32_bits(map);
+ tf->dma_hi = upper_32_bits(map);
dma_unmap_addr_set(e, mapaddr, map);
dma_unmap_len_set(e, maplen, skb_frag_size(frag));
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
}
return NETDEV_TX_OK;
+
+mapping_unwind:
+ e = skge->tx_ring.to_use;
+ pci_unmap_single(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ while (i-- > 0) {
+ e = e->next;
+ pci_unmap_page(hw->pdev,
+ dma_unmap_addr(e, mapaddr),
+ dma_unmap_len(e, maplen),
+ PCI_DMA_TODEVICE);
+ }
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
pci_dma_sync_single_for_cpu(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skb_copy_from_linear_data(e->skb, skb->data, len);
pci_dma_sync_single_for_device(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
- len, PCI_DMA_FROMDEVICE);
+ dma_unmap_len(e, maplen),
+ PCI_DMA_FROMDEVICE);
skge_rx_reuse(e, skge->rx_buf_size);
} else {
struct sk_buff *nskb;
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
if (!nskb)
goto resubmit;
+ if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
+ dev_kfree_skb(nskb);
+ goto resubmit;
+ }
+
pci_unmap_single(skge->hw->pdev,
dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
prefetch(skb->data);
- skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
skb_put(skb, len);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 299d0184f98..ea20182c696 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -800,7 +800,16 @@ static int mlx4_MAD_IFC_wrapper(struct mlx4_dev *dev, int slave,
vhcr->op, MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
}
-int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+static int MLX4_CMD_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return -EPERM;
+}
+
+static int MLX4_CMD_GET_OP_REQ_wrapper(struct mlx4_dev *dev, int slave,
struct mlx4_vhcr *vhcr,
struct mlx4_cmd_mailbox *inbox,
struct mlx4_cmd_mailbox *outbox,
@@ -1252,6 +1261,15 @@ static struct mlx4_cmd_info cmd_info[] = {
.wrapper = MLX4_CMD_UPDATE_QP_wrapper
},
{
+ .opcode = MLX4_CMD_GET_OP_REQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = MLX4_CMD_GET_OP_REQ_wrapper,
+ },
+ {
.opcode = MLX4_CMD_CONF_SPECIAL_QP,
.has_inbox = false,
.has_outbox = false,
@@ -1526,7 +1544,7 @@ static int calculate_transition(u16 oper_vlan, u16 admin_vlan)
return (2 * (oper_vlan == MLX4_VGT) + (admin_vlan == MLX4_VGT));
}
-int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
+static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
int slave, int port)
{
struct mlx4_vport_oper_state *vp_oper;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 9d4a1ea030d..b4881b68615 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -160,6 +160,7 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_port_profile *prof = priv->prof;
struct mlx4_en_dev *mdev = priv->mdev;
int err;
@@ -169,15 +170,17 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
pfc->mbc,
pfc->delay);
- priv->prof->rx_pause = priv->prof->tx_pause = !!pfc->pfc_en;
- priv->prof->rx_ppp = priv->prof->tx_ppp = pfc->pfc_en;
+ prof->rx_pause = !pfc->pfc_en;
+ prof->tx_pause = !pfc->pfc_en;
+ prof->rx_ppp = pfc->pfc_en;
+ prof->tx_ppp = pfc->pfc_en;
err = mlx4_SET_PORT_general(mdev->dev, priv->port,
priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
+ prof->tx_pause,
+ prof->tx_ppp,
+ prof->rx_pause,
+ prof->rx_ppp);
if (err)
en_err(priv, "Failed setting pause params\n");
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 727874f575c..a28cd801a23 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num * 2) +
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5);
#else
(priv->rx_ring_num * 2);
@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields;
data[index++] = priv->rx_ring[i].misses;
data[index++] = priv->rx_ring[i].cleaned;
@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_napi_yield", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 5eac871399d..fa37b7a6121 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0;
}
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{
@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
return done;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL
@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 7c492382da0..0698c82d6ff 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
}
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, int index,
+ u8 owner)
+{
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ void *end = ring->buf + ring->buf_size;
+ __be32 *ptr = (__be32 *)tx_desc;
+ int i;
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+ } else {
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+ i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *)ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+ }
+}
+
static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
void *end = ring->buf + ring->buf_size;
int frags = skb_shinfo(skb)->nr_frags;
int i;
- __be32 *ptr = (__be32 *)tx_desc;
- __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
struct skb_shared_hwtstamps hwts;
if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
skb_frag_size(frag), PCI_DMA_TODEVICE);
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- }
-
} else {
if (!tx_info->inl) {
if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
++data;
}
}
- /* Stamp the freed descriptor */
- for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
- *ptr = stamp;
- ptr += STAMP_DWORDS;
- if ((void *) ptr >= end) {
- ptr = ring->buf;
- stamp ^= cpu_to_be32(0x80000000);
- }
- }
-
}
dev_kfree_skb_any(skb);
return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
struct mlx4_cqe *cqe;
u16 index;
- u16 new_index, ring_index;
+ u16 new_index, ring_index, stamp_index;
u32 txbbs_skipped = 0;
+ u32 txbbs_stamp = 0;
u32 cons_index = mcq->cons_index;
int size = cq->size;
u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
index = cons_index & size_mask;
cqe = &buf[(index << factor) + factor];
ring_index = ring->cons & size_mask;
+ stamp_index = ring_index;
/* Process all completed CQEs */
while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -345,6 +362,15 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
*/
rmb();
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ struct mlx4_err_cqe *cqe_err = (struct mlx4_err_cqe *)cqe;
+
+ en_err(priv, "CQE error - vendor syndrome: 0x%x syndrome: 0x%x\n",
+ cqe_err->vendor_err_syndrome,
+ cqe_err->syndrome);
+ }
+
/* Skip over last polled CQE */
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
@@ -359,6 +385,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
priv, ring, ring_index,
!!((ring->cons + txbbs_skipped) &
ring->size), timestamp);
+
+ mlx4_en_stamp_wqe(priv, ring, stamp_index,
+ !!((ring->cons + txbbs_stamp) &
+ ring->size));
+ stamp_index = ring_index;
+ txbbs_stamp = txbbs_skipped;
packets++;
bytes += ring->tx_info[ring_index].nr_bytes;
} while (ring_index != new_index);
@@ -556,17 +588,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data;
- struct skb_frag_struct *frag;
struct mlx4_en_tx_info *tx_info;
- struct ethhdr *ethh;
int tx_ind = 0;
int nr_txbb;
int desc_size;
int real_size;
- dma_addr_t dma;
u32 index, bf_index;
__be32 op_own;
u16 vlan_tag = 0;
@@ -642,6 +672,61 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb;
+ if (lso_header_size)
+ data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
+ DS_SIZE));
+ else
+ data = &tx_desc->data;
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) &&
+ !is_inline(skb, NULL)) ? 1 : 0;
+
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (is_inline(skb, &fragptr)) {
+ tx_info->inl = 1;
+ } else {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ struct skb_frag_struct *frag;
+ dma_addr_t dma;
+
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = skb_frag_dma_map(ddev, frag,
+ 0, skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_frag_size(frag));
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ u32 byte_count = skb_headlen(skb) - lso_header_size;
+ dma_addr_t dma;
+
+ dma = dma_map_single(ddev, skb->data +
+ lso_header_size, byte_count,
+ PCI_DMA_TODEVICE);
+ if (dma_mapping_error(ddev, dma))
+ goto tx_drop_unmap;
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(byte_count);
+ }
+ tx_info->inl = 0;
+ }
+
/*
* For timestamping add flag to skb_shinfo and
* set flag for further reference
@@ -666,6 +751,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (priv->flags & MLX4_EN_FLAG_ENABLE_HW_LOOPBACK) {
+ struct ethhdr *ethh;
+
/* Copy dst mac address to wqe. This allows loopback in eSwitch,
* so that VFs and PF can communicate with each other
*/
@@ -688,8 +775,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Copy headers;
* note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size);
- data = ((void *) &tx_desc->lso +
- ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
@@ -701,7 +786,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
- data = &tx_desc->data;
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++;
@@ -710,38 +794,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
-
- /* valid only for none inline segments */
- tx_info->data_offset = (void *) data - (void *) tx_desc;
-
- tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
- data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
-
- if (!is_inline(skb, &fragptr)) {
- /* Map fragments */
- for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
- frag = &skb_shinfo(skb)->frags[i];
- dma = skb_frag_dma_map(priv->ddev, frag,
- 0, skb_frag_size(frag),
- DMA_TO_DEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_frag_size(frag));
- --data;
- }
-
- /* Map linear part */
- if (tx_info->linear) {
- dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
- skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
- data->addr = cpu_to_be64(dma);
- data->lkey = cpu_to_be32(mdev->mr.key);
- wmb();
- data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
- }
- tx_info->inl = 0;
- } else {
+ if (tx_info->inl) {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1;
}
@@ -781,6 +834,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
+tx_drop_unmap:
+ en_err(priv, "DMA mapping error\n");
+
+ for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
+ data++;
+ dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ }
+
tx_drop:
dev_kfree_skb_any(skb);
priv->stats.tx_dropped++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 7e042869ef0..0416c5b3b35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -79,6 +79,7 @@ enum {
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
(1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_OP_REQUIRED) | \
(1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
(1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \
(1ull << MLX4_EVENT_TYPE_FATAL_WARNING))
@@ -629,6 +630,14 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_OP_REQUIRED:
+ atomic_inc(&priv->opreq_count);
+ /* FW commands can't be executed from interrupt context
+ * working in deferred task
+ */
+ queue_work(mlx4_wq, &priv->opreq_task);
+ break;
+
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
mlx4_warn(dev, "Received comm channel event "
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 8873d6802c8..0d63daa2f42 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -845,16 +845,7 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
MLX4_CMD_NATIVE);
if (!err && dev->caps.function != slave) {
- /* if config MAC in DB use it */
- if (priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac)
- def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
- else {
- /* set slave default_mac address */
- MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
- def_mac += slave << 8;
- priv->mfunc.master.vf_admin[slave].vport[vhcr->in_modifier].mac = def_mac;
- }
-
+ def_mac = priv->mfunc.master.vf_oper[slave].vport[vhcr->in_modifier].state.mac;
MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
/* get port type - currently only eth is enabled */
@@ -1705,3 +1696,107 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
+
+enum {
+ ADD_TO_MCG = 0x26,
+};
+
+
+void mlx4_opreq_action(struct work_struct *work)
+{
+ struct mlx4_priv *priv = container_of(work, struct mlx4_priv,
+ opreq_task);
+ struct mlx4_dev *dev = &priv->dev;
+ int num_tasks = atomic_read(&priv->opreq_count);
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_mgm *mgm;
+ u32 *outbox;
+ u32 modifier;
+ u16 token;
+ u16 type_m;
+ u16 type;
+ int err;
+ u32 num_qps;
+ struct mlx4_qp qp;
+ int i;
+ u8 rem_mcg;
+ u8 prot;
+
+#define GET_OP_REQ_MODIFIER_OFFSET 0x08
+#define GET_OP_REQ_TOKEN_OFFSET 0x14
+#define GET_OP_REQ_TYPE_OFFSET 0x1a
+#define GET_OP_REQ_DATA_OFFSET 0x20
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ mlx4_err(dev, "Failed to allocate mailbox for GET_OP_REQ\n");
+ return;
+ }
+ outbox = mailbox->buf;
+
+ while (num_tasks) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to retreive required operation: %d\n",
+ err);
+ return;
+ }
+ MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET);
+ MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+ MLX4_GET(type, outbox, GET_OP_REQ_TYPE_OFFSET);
+ type_m = type >> 12;
+ type &= 0xfff;
+
+ switch (type) {
+ case ADD_TO_MCG:
+ if (dev->caps.steering_mode ==
+ MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ mlx4_warn(dev, "ADD MCG operation is not supported in DEVICE_MANAGED steering mode\n");
+ err = EPERM;
+ break;
+ }
+ mgm = (struct mlx4_mgm *)((u8 *)(outbox) +
+ GET_OP_REQ_DATA_OFFSET);
+ num_qps = be32_to_cpu(mgm->members_count) &
+ MGM_QPN_MASK;
+ rem_mcg = ((u8 *)(&mgm->members_count))[0] & 1;
+ prot = ((u8 *)(&mgm->members_count))[0] >> 6;
+
+ for (i = 0; i < num_qps; i++) {
+ qp.qpn = be32_to_cpu(mgm->qp[i]);
+ if (rem_mcg)
+ err = mlx4_multicast_detach(dev, &qp,
+ mgm->gid,
+ prot, 0);
+ else
+ err = mlx4_multicast_attach(dev, &qp,
+ mgm->gid,
+ mgm->gid[5]
+ , 0, prot,
+ NULL);
+ if (err)
+ break;
+ }
+ break;
+ default:
+ mlx4_warn(dev, "Bad type for required operation\n");
+ err = EINVAL;
+ break;
+ }
+ err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ 1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err) {
+ mlx4_err(dev, "Failed to acknowledge required request: %d\n",
+ err);
+ goto out;
+ }
+ memset(outbox, 0, 0xffc);
+ num_tasks = atomic_dec_return(&priv->opreq_count);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index fdf41665a05..a0a368b7c93 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -220,5 +220,6 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
int mlx4_NOP(struct mlx4_dev *dev);
int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg);
+void mlx4_opreq_action(struct work_struct *work);
#endif /* MLX4_FW_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index e85af922dcd..60c9f4f103f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -371,7 +371,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.sqp_demux = (mlx4_is_master(dev)) ? MLX4_MAX_NUM_SLAVES : 0;
- if (!enable_64b_cqe_eqe) {
+ if (!enable_64b_cqe_eqe && !mlx4_is_slave(dev)) {
if (dev_cap->flags &
(MLX4_DEV_CAP_FLAG_64B_CQE | MLX4_DEV_CAP_FLAG_64B_EQE)) {
mlx4_warn(dev, "64B EQEs/CQEs supported by the device but not enabled\n");
@@ -1692,11 +1692,19 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_xrcd_table_free;
}
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ goto err_mr_table_free;
+ }
+ }
+
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
- goto err_mr_table_free;
+ goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
@@ -1746,19 +1754,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- if (!mlx4_is_slave(dev)) {
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
- }
- }
-
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_mcg_table_free;
+ goto err_qp_table_free;
}
if (!mlx4_is_slave(dev)) {
@@ -1803,9 +1802,6 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1821,6 +1817,10 @@ err_cmd_poll:
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
+err_mcg_table_free:
+ if (!mlx4_is_slave(dev))
+ mlx4_cleanup_mcg_table(dev);
+
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
@@ -2197,6 +2197,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
}
}
+ atomic_set(&priv->opreq_count, 0);
+ INIT_WORK(&priv->opreq_task, mlx4_opreq_action);
+
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
@@ -2315,12 +2318,12 @@ err_port:
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
@@ -2403,12 +2406,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
RES_TR_FREE_SLAVES_ONLY);
mlx4_cleanup_counters_table(dev);
- mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
+ mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f3e804f2a35..55f6245efb6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -39,19 +39,8 @@
#include "mlx4.h"
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
-
static const u8 zero_gid[16]; /* automatically initialized to 0 */
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_MAX_QP_PER_MGM];
-};
-
int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
{
return 1 << dev->oper_log_mgm_entry_size;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 17d9277e33e..348bb8c7d9a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -554,6 +554,17 @@ struct mlx4_mfunc {
struct mlx4_mfunc_master_ctx master;
};
+#define MGM_QPN_MASK 0x00FFFFFF
+#define MGM_BLCK_LB_BIT 30
+
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
@@ -802,6 +813,8 @@ struct mlx4_priv {
u8 virt2phys_pkey[MLX4_MFUNC_MAX][MLX4_MAX_PORTS][MLX4_MAX_PORT_PKEYS];
__be64 slave_node_guids[MLX4_MFUNC_MAX];
+ atomic_t opreq_count;
+ struct work_struct opreq_task;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 35fb60e2320..5e0aa569306 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
void *rx_info;
unsigned long bytes;
unsigned long packets;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields;
unsigned long misses;
unsigned long cleaned;
@@ -318,7 +318,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
@@ -329,7 +329,7 @@ struct mlx4_en_cq {
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
};
struct mlx4_en_port_profile {
@@ -580,7 +580,7 @@ struct mlx4_mac_entry {
struct rcu_head rcu;
};
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{
spin_lock_init(&cq->poll_lock);
@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
{
return false;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index f984a89c27d..dd687632111 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1909,7 +1909,8 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
int log_rq_stride = qpc->rq_size_stride & 7;
int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
- int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
+ int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
int sq_size;
int rq_size;
int total_pages;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 205753a04cf..5472cbd3402 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -46,7 +46,7 @@
#include "mlx5_core.h"
enum {
- CMD_IF_REV = 3,
+ CMD_IF_REV = 5,
};
enum {
@@ -282,6 +282,12 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_TEARDOWN_HCA:
return "TEARDOWN_HCA";
+ case MLX5_CMD_OP_ENABLE_HCA:
+ return "MLX5_CMD_OP_ENABLE_HCA";
+
+ case MLX5_CMD_OP_DISABLE_HCA:
+ return "MLX5_CMD_OP_DISABLE_HCA";
+
case MLX5_CMD_OP_QUERY_PAGES:
return "QUERY_PAGES";
@@ -1113,7 +1119,13 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
for (i = 0; i < (1 << cmd->log_sz); i++) {
if (test_bit(i, &vector)) {
+ struct semaphore *sem;
+
ent = cmd->ent_arr[i];
+ if (ent->page_queue)
+ sem = &cmd->pages_sem;
+ else
+ sem = &cmd->sem;
ktime_get_ts(&ent->ts2);
memcpy(ent->out->first.data, ent->lay->out, sizeof(ent->lay->out));
dump_command(dev, ent, 0);
@@ -1136,10 +1148,7 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, unsigned long vector)
} else {
complete(&ent->done);
}
- if (ent->page_queue)
- up(&cmd->pages_sem);
- else
- up(&cmd->sem);
+ up(sem);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index c02cbcfd0fb..443cc4d7b02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
case MLX5_EVENT_TYPE_PAGE_REQUEST:
{
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
- s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages);
+ s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 72a5222447f..f012658b6a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f;
caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f;
caps->log_max_mcg = out->hca_cap.log_max_mcg;
- caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg);
+ caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff;
caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f);
caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f);
caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 748f10a155c..3e6670c4a7c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -55,33 +55,9 @@ enum {
};
static DEFINE_SPINLOCK(health_lock);
-
static LIST_HEAD(health_list);
static struct work_struct health_work;
-static health_handler_t reg_handler;
-int mlx5_register_health_report_handler(health_handler_t handler)
-{
- spin_lock_irq(&health_lock);
- if (reg_handler) {
- spin_unlock_irq(&health_lock);
- return -EEXIST;
- }
- reg_handler = handler;
- spin_unlock_irq(&health_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(mlx5_register_health_report_handler);
-
-void mlx5_unregister_health_report_handler(void)
-{
- spin_lock_irq(&health_lock);
- reg_handler = NULL;
- spin_unlock_irq(&health_lock);
-}
-EXPORT_SYMBOL(mlx5_unregister_health_report_handler);
-
static void health_care(struct work_struct *work)
{
struct mlx5_core_health *health, *n;
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work)
priv = container_of(health, struct mlx5_priv, health);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_warn(dev, "handling bad device here\n");
+ /* nothing yet */
spin_lock_irq(&health_lock);
- if (reg_handler)
- reg_handler(dev->pdev, health->health,
- sizeof(health->health));
-
list_del_init(&health->list);
spin_unlock_irq(&health_lock);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 12242de2b0e..b47739b0b5f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -249,6 +249,44 @@ static int set_hca_ctrl(struct mlx5_core_dev *dev)
return err;
}
+static int mlx5_core_enable_hca(struct mlx5_core_dev *dev)
+{
+ int err;
+ struct mlx5_enable_hca_mbox_in in;
+ struct mlx5_enable_hca_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ENABLE_HCA);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
+static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
+{
+ int err;
+ struct mlx5_disable_hca_mbox_in in;
+ struct mlx5_disable_hca_mbox_out out;
+
+ memset(&in, 0, sizeof(in));
+ memset(&out, 0, sizeof(out));
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DISABLE_HCA);
+ err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
+ if (err)
+ return err;
+
+ if (out.hdr.status)
+ return mlx5_cmd_status_to_err(&out.hdr);
+
+ return 0;
+}
+
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -304,28 +342,41 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
}
mlx5_pagealloc_init(dev);
+
+ err = mlx5_core_enable_hca(dev);
+ if (err) {
+ dev_err(&pdev->dev, "enable hca failed\n");
+ goto err_pagealloc_cleanup;
+ }
+
+ err = mlx5_satisfy_startup_pages(dev, 1);
+ if (err) {
+ dev_err(&pdev->dev, "failed to allocate boot pages\n");
+ goto err_disable_hca;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
dev_err(&pdev->dev, "set_hca_ctrl failed\n");
- goto err_pagealloc_cleanup;
+ goto reclaim_boot_pages;
}
err = handle_hca_cap(dev);
if (err) {
dev_err(&pdev->dev, "handle_hca_cap failed\n");
- goto err_pagealloc_cleanup;
+ goto reclaim_boot_pages;
}
- err = mlx5_satisfy_startup_pages(dev);
+ err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
- dev_err(&pdev->dev, "failed to allocate startup pages\n");
- goto err_pagealloc_cleanup;
+ dev_err(&pdev->dev, "failed to allocate init pages\n");
+ goto reclaim_boot_pages;
}
err = mlx5_pagealloc_start(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
- goto err_reclaim_pages;
+ goto reclaim_boot_pages;
}
err = mlx5_cmd_init_hca(dev);
@@ -396,9 +447,12 @@ err_stop_poll:
err_pagealloc_stop:
mlx5_pagealloc_stop(dev);
-err_reclaim_pages:
+reclaim_boot_pages:
mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+ mlx5_core_disable_hca(dev);
+
err_pagealloc_cleanup:
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
@@ -434,6 +488,7 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cmd_teardown_hca(dev);
mlx5_pagealloc_stop(dev);
mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev);
mlx5_pagealloc_cleanup(dev);
mlx5_cmd_cleanup(dev);
iounmap(dev->iseg);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index f0bf46339b2..3a2408d4482 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -43,10 +43,16 @@ enum {
MLX5_PAGES_TAKE = 2
};
+enum {
+ MLX5_BOOT_PAGES = 1,
+ MLX5_INIT_PAGES = 2,
+ MLX5_POST_INIT_PAGES = 3
+};
+
struct mlx5_pages_req {
struct mlx5_core_dev *dev;
u32 func_id;
- s16 npages;
+ s32 npages;
struct work_struct work;
};
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox {
struct mlx5_query_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 reserved[2];
+ __be16 rsvd;
__be16 func_id;
- __be16 init_pages;
- __be16 num_pages;
+ __be32 num_pages;
};
struct mlx5_manage_pages_inbox {
struct mlx5_inbox_hdr hdr;
- __be16 rsvd0;
+ __be16 rsvd;
__be16 func_id;
- __be16 rsvd1;
- __be16 num_entries;
- u8 rsvd2[16];
+ __be32 num_entries;
__be64 pas[0];
};
struct mlx5_manage_pages_outbox {
struct mlx5_outbox_hdr hdr;
- u8 rsvd0[2];
- __be16 num_entries;
- u8 rsvd1[20];
+ __be32 num_entries;
+ u8 rsvd[4];
__be64 pas[0];
};
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr)
}
static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
- s16 *pages, s16 *init_pages)
+ s32 *npages, int boot)
{
struct mlx5_query_pages_inbox in;
struct mlx5_query_pages_outbox out;
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES);
+ in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES);
+
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
if (err)
return err;
@@ -162,10 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
if (out.hdr.status)
return mlx5_cmd_status_to_err(&out.hdr);
- if (pages)
- *pages = be16_to_cpu(out.num_pages);
- if (init_pages)
- *init_pages = be16_to_cpu(out.init_pages);
+ *npages = be32_to_cpu(out.num_pages);
*func_id = be16_to_cpu(out.func_id);
return err;
@@ -219,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE);
in->func_id = cpu_to_be16(func_id);
- in->num_entries = cpu_to_be16(npages);
+ in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
mlx5_core_dbg(dev, "err %d\n", err);
if (err) {
@@ -287,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES);
in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE);
in.func_id = cpu_to_be16(func_id);
- in.num_entries = cpu_to_be16(npages);
+ in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
@@ -301,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
goto out_free;
}
- num_claimed = be16_to_cpu(out->num_entries);
+ num_claimed = be32_to_cpu(out->num_entries);
if (nclaimed)
*nclaimed = num_claimed;
@@ -340,7 +341,7 @@ static void pages_work_handler(struct work_struct *work)
}
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages)
+ s32 npages)
{
struct mlx5_pages_req *req;
@@ -357,19 +358,20 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
queue_work(dev->priv.pg_wq, &req->work);
}
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev)
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
{
- s16 uninitialized_var(init_pages);
u16 uninitialized_var(func_id);
+ s32 uninitialized_var(npages);
int err;
- err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages);
+ err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
if (err)
return err;
- mlx5_core_dbg(dev, "requested %d init pages for func_id 0x%x\n", init_pages, func_id);
+ mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
+ npages, boot ? "boot" : "init", func_id);
- return give_pages(dev, func_id, init_pages, 0);
+ return give_pages(dev, func_id, npages, 0);
}
static int optimal_reclaimed_pages(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 71d4a393720..68f5d9c77c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -164,6 +164,7 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
uuari->uars[i].map = ioremap(addr, PAGE_SIZE);
if (!uuari->uars[i].map) {
mlx5_cmd_free_uar(dev, uuari->uars[i].index);
+ err = -ENOMEM;
goto out_count;
}
mlx5_core_dbg(dev, "allocated uar index 0x%x, mmaped at %p\n",
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index e393d998be8..0951f7aca1e 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -705,7 +705,8 @@ static void ks8842_rx_frame(struct net_device *netdev,
ks8842_enable_bits(adapter, 0, 1 << 12, REG_QRFCR);
}
-void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_rx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 rx_data = ks8842_read16(adapter, 16, REG_RXMIR) & 0x1fff;
netdev_dbg(netdev, "%s Entry - rx_data: %d\n", __func__, rx_data);
@@ -715,7 +716,8 @@ void ks8842_handle_rx(struct net_device *netdev, struct ks8842_adapter *adapter)
}
}
-void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
+static void ks8842_handle_tx(struct net_device *netdev,
+ struct ks8842_adapter *adapter)
{
u16 sr = ks8842_read16(adapter, 16, REG_TXSR);
netdev_dbg(netdev, "%s - entry, sr: %x\n", __func__, sr);
@@ -724,7 +726,7 @@ void ks8842_handle_tx(struct net_device *netdev, struct ks8842_adapter *adapter)
netif_wake_queue(netdev);
}
-void ks8842_handle_rx_overrun(struct net_device *netdev,
+static void ks8842_handle_rx_overrun(struct net_device *netdev,
struct ks8842_adapter *adapter)
{
netdev_dbg(netdev, "%s: entry\n", __func__);
@@ -732,7 +734,7 @@ void ks8842_handle_rx_overrun(struct net_device *netdev,
netdev->stats.rx_fifo_errors++;
}
-void ks8842_tasklet(unsigned long arg)
+static void ks8842_tasklet(unsigned long arg)
{
struct net_device *netdev = (struct net_device *)arg;
struct ks8842_adapter *adapter = netdev_priv(netdev);
@@ -1146,7 +1148,7 @@ static int ks8842_probe(struct platform_device *pdev)
struct resource *iomem;
struct net_device *netdev;
struct ks8842_adapter *adapter;
- struct ks8842_platform_data *pdata = pdev->dev.platform_data;
+ struct ks8842_platform_data *pdata = dev_get_platdata(&pdev->dev);
u16 id;
unsigned i;
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index ac20098b542..0fba1532d32 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -688,7 +688,7 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
}
-void ks_enable_qmu(struct ks_net *ks)
+static void ks_enable_qmu(struct ks_net *ks)
{
u16 w;
@@ -1636,7 +1636,7 @@ static int ks8851_probe(struct platform_device *pdev)
} else {
struct ks8851_mll_platform_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
netdev_err(netdev, "No platform data\n");
err = -ENODEV;
diff --git a/drivers/net/ethernet/moxa/Kconfig b/drivers/net/ethernet/moxa/Kconfig
new file mode 100644
index 00000000000..1731e050fa2
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Kconfig
@@ -0,0 +1,30 @@
+#
+# MOXART device configuration
+#
+
+config NET_VENDOR_MOXART
+ bool "MOXA ART devices"
+ default y
+ depends on (ARM && ARCH_MOXART)
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about MOXA ART devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_MOXART
+
+config ARM_MOXART_ETHER
+ tristate "MOXART Ethernet support"
+ depends on ARM && ARCH_MOXART
+ select NET_CORE
+ ---help---
+ If you wish to compile a kernel for a hardware with MOXA ART SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
+
+endif # NET_VENDOR_MOXART
diff --git a/drivers/net/ethernet/moxa/Makefile b/drivers/net/ethernet/moxa/Makefile
new file mode 100644
index 00000000000..aa3c73e9e95
--- /dev/null
+++ b/drivers/net/ethernet/moxa/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the MOXART network device drivers.
+#
+
+obj-$(CONFIG_ARM_MOXART_ETHER) += moxart_ether.o
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
new file mode 100644
index 00000000000..83c2091c9c2
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -0,0 +1,559 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/dma-mapping.h>
+
+#include "moxart_ether.h"
+
+static inline void moxart_emac_write(struct net_device *ndev,
+ unsigned int reg, unsigned long value)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(value, priv->base + reg);
+}
+
+static void moxart_update_mac_address(struct net_device *ndev)
+{
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS,
+ ((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])));
+ moxart_emac_write(ndev, REG_MAC_MS_ADDRESS + 4,
+ ((ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) |
+ (ndev->dev_addr[5])));
+}
+
+static int moxart_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct sockaddr *address = addr;
+
+ if (!is_valid_ether_addr(address->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ moxart_update_mac_address(ndev);
+
+ return 0;
+}
+
+static void moxart_mac_free_memory(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ dma_unmap_single(&ndev->dev, priv->rx_mapping[i],
+ priv->rx_buf_size, DMA_FROM_DEVICE);
+
+ if (priv->tx_desc_base)
+ dma_free_coherent(NULL, TX_REG_DESC_SIZE * TX_DESC_NUM,
+ priv->tx_desc_base, priv->tx_base);
+
+ if (priv->rx_desc_base)
+ dma_free_coherent(NULL, RX_REG_DESC_SIZE * RX_DESC_NUM,
+ priv->rx_desc_base, priv->rx_base);
+
+ kfree(priv->tx_buf_base);
+ kfree(priv->rx_buf_base);
+}
+
+static void moxart_mac_reset(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(SW_RST, priv->base + REG_MAC_CTRL);
+ while (readl(priv->base + REG_MAC_CTRL) & SW_RST)
+ mdelay(10);
+
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr = RX_BROADPKT | FULLDUP | CRC_APD | RX_FTL;
+}
+
+static void moxart_mac_enable(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ writel(0x00001010, priv->base + REG_INT_TIMER_CTRL);
+ writel(0x00000001, priv->base + REG_APOLL_TIMER_CTRL);
+ writel(0x00000390, priv->base + REG_DMA_BLEN_CTRL);
+
+ priv->reg_imr |= (RPKT_FINISH_M | XPKT_FINISH_M);
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ priv->reg_maccr |= (RCV_EN | XMT_EN | RDMA_EN | XDMA_EN);
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+}
+
+static void moxart_mac_setup_desc_ring(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ int i;
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ desc = priv->tx_desc_base + i * TX_REG_DESC_SIZE;
+ memset(desc, 0, TX_REG_DESC_SIZE);
+
+ priv->tx_buf[i] = priv->tx_buf_base + priv->tx_buf_size * i;
+ }
+ writel(TX_DESC1_END, desc + TX_REG_OFFSET_DESC1);
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ desc = priv->rx_desc_base + i * RX_REG_DESC_SIZE;
+ memset(desc, 0, RX_REG_DESC_SIZE);
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+ writel(RX_BUF_SIZE & RX_DESC1_BUF_SIZE_MASK,
+ desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_buf[i] = priv->rx_buf_base + priv->rx_buf_size * i;
+ priv->rx_mapping[i] = dma_map_single(&ndev->dev,
+ priv->rx_buf[i],
+ priv->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->rx_mapping[i]))
+ netdev_err(ndev, "DMA mapping error\n");
+
+ writel(priv->rx_mapping[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_PHYS);
+ writel(priv->rx_buf[i],
+ desc + RX_REG_OFFSET_DESC2 + RX_DESC2_ADDRESS_VIRT);
+ }
+ writel(RX_DESC1_END, desc + RX_REG_OFFSET_DESC1);
+
+ priv->rx_head = 0;
+
+ /* reset the MAC controler TX/RX desciptor base address */
+ writel(priv->tx_base, priv->base + REG_TXR_BASE_ADDRESS);
+ writel(priv->rx_base, priv->base + REG_RXR_BASE_ADDRESS);
+}
+
+static int moxart_mac_open(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ napi_enable(&priv->napi);
+
+ moxart_mac_reset(ndev);
+ moxart_update_mac_address(ndev);
+ moxart_mac_setup_desc_ring(ndev);
+ moxart_mac_enable(ndev);
+ netif_start_queue(ndev);
+
+ netdev_dbg(ndev, "%s: IMR=0x%x, MACCR=0x%x\n",
+ __func__, readl(priv->base + REG_INTERRUPT_MASK),
+ readl(priv->base + REG_MAC_CTRL));
+
+ return 0;
+}
+
+static int moxart_mac_stop(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ napi_disable(&priv->napi);
+
+ netif_stop_queue(ndev);
+
+ /* disable all interrupts */
+ writel(0, priv->base + REG_INTERRUPT_MASK);
+
+ /* disable all functions */
+ writel(0, priv->base + REG_MAC_CTRL);
+
+ return 0;
+}
+
+static int moxart_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct moxart_mac_priv_t *priv = container_of(napi,
+ struct moxart_mac_priv_t,
+ napi);
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ void __iomem *desc;
+ unsigned int desc0, len;
+ int rx_head = priv->rx_head;
+ int rx = 0;
+
+ while (1) {
+ desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head);
+ desc0 = readl(desc + RX_REG_OFFSET_DESC0);
+
+ if (desc0 & RX_DESC0_DMA_OWN)
+ break;
+
+ if (desc0 & (RX_DESC0_ERR | RX_DESC0_CRC_ERR | RX_DESC0_FTL |
+ RX_DESC0_RUNT | RX_DESC0_ODD_NB)) {
+ net_dbg_ratelimited("packet error\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ continue;
+ }
+
+ len = desc0 & RX_DESC0_FRAME_LEN_MASK;
+
+ if (len > RX_BUF_SIZE)
+ len = RX_BUF_SIZE;
+
+ skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size);
+ if (unlikely(!skb)) {
+ net_dbg_ratelimited("build_skb failed\n");
+ priv->stats.rx_dropped++;
+ priv->stats.rx_errors++;
+ }
+
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ rx++;
+
+ ndev->last_rx = jiffies;
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+ if (desc0 & RX_DESC0_MULTICAST)
+ priv->stats.multicast++;
+
+ writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0);
+
+ rx_head = RX_NEXT(rx_head);
+ priv->rx_head = rx_head;
+
+ if (rx >= budget)
+ break;
+ }
+
+ if (rx < budget) {
+ napi_gro_flush(napi, false);
+ __napi_complete(napi);
+ }
+
+ priv->reg_imr |= RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+
+ return rx;
+}
+
+static void moxart_tx_finished(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned tx_head = priv->tx_head;
+ unsigned tx_tail = priv->tx_tail;
+
+ while (tx_tail != tx_head) {
+ dma_unmap_single(&ndev->dev, priv->tx_mapping[tx_tail],
+ priv->tx_len[tx_tail], DMA_TO_DEVICE);
+
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += priv->tx_skb[tx_tail]->len;
+
+ dev_kfree_skb_irq(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+
+ tx_tail = TX_NEXT(tx_tail);
+ }
+ priv->tx_tail = tx_tail;
+}
+
+static irqreturn_t moxart_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *) dev_id;
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ unsigned int ists = readl(priv->base + REG_INTERRUPT_STATUS);
+
+ if (ists & XPKT_OK_INT_STS)
+ moxart_tx_finished(ndev);
+
+ if (ists & RPKT_FINISH) {
+ if (napi_schedule_prep(&priv->napi)) {
+ priv->reg_imr &= ~RPKT_FINISH_M;
+ writel(priv->reg_imr, priv->base + REG_INTERRUPT_MASK);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ void __iomem *desc;
+ unsigned int len;
+ unsigned int tx_head = priv->tx_head;
+ u32 txdes1;
+ int ret = NETDEV_TX_BUSY;
+
+ desc = priv->tx_desc_base + (TX_REG_DESC_SIZE * tx_head);
+
+ spin_lock_irq(&priv->txlock);
+ if (readl(desc + TX_REG_OFFSET_DESC0) & TX_DESC0_DMA_OWN) {
+ net_dbg_ratelimited("no TX space for packet\n");
+ priv->stats.tx_dropped++;
+ goto out_unlock;
+ }
+
+ len = skb->len > TX_BUF_SIZE ? TX_BUF_SIZE : skb->len;
+
+ priv->tx_mapping[tx_head] = dma_map_single(&ndev->dev, skb->data,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, priv->tx_mapping[tx_head])) {
+ netdev_err(ndev, "DMA mapping error\n");
+ goto out_unlock;
+ }
+
+ priv->tx_len[tx_head] = len;
+ priv->tx_skb[tx_head] = skb;
+
+ writel(priv->tx_mapping[tx_head],
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_PHYS);
+ writel(skb->data,
+ desc + TX_REG_OFFSET_DESC2 + TX_DESC2_ADDRESS_VIRT);
+
+ if (skb->len < ETH_ZLEN) {
+ memset(&skb->data[skb->len],
+ 0, ETH_ZLEN - skb->len);
+ len = ETH_ZLEN;
+ }
+
+ txdes1 = readl(desc + TX_REG_OFFSET_DESC1);
+ txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS;
+ txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE);
+ txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK);
+ writel(txdes1, desc + TX_REG_OFFSET_DESC1);
+ writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0);
+
+ /* start to send packet */
+ writel(0xffffffff, priv->base + REG_TX_POLL_DEMAND);
+
+ priv->tx_head = TX_NEXT(tx_head);
+
+ ndev->trans_start = jiffies;
+ ret = NETDEV_TX_OK;
+out_unlock:
+ spin_unlock_irq(&priv->txlock);
+
+ return ret;
+}
+
+static struct net_device_stats *moxart_mac_get_stats(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ return &priv->stats;
+}
+
+static void moxart_mac_setmulticast(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *ha;
+ int crc_val;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_val = crc32_le(~0, ha->addr, ETH_ALEN);
+ crc_val = (crc_val >> 26) & 0x3f;
+ if (crc_val >= 32) {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE1) |
+ (1UL << (crc_val - 32)),
+ priv->base + REG_MCAST_HASH_TABLE1);
+ } else {
+ writel(readl(priv->base + REG_MCAST_HASH_TABLE0) |
+ (1UL << crc_val),
+ priv->base + REG_MCAST_HASH_TABLE0);
+ }
+ }
+}
+
+static void moxart_mac_set_rx_mode(struct net_device *ndev)
+{
+ struct moxart_mac_priv_t *priv = netdev_priv(ndev);
+
+ spin_lock_irq(&priv->txlock);
+
+ (ndev->flags & IFF_PROMISC) ? (priv->reg_maccr |= RCV_ALL) :
+ (priv->reg_maccr &= ~RCV_ALL);
+
+ (ndev->flags & IFF_ALLMULTI) ? (priv->reg_maccr |= RX_MULTIPKT) :
+ (priv->reg_maccr &= ~RX_MULTIPKT);
+
+ if ((ndev->flags & IFF_MULTICAST) && netdev_mc_count(ndev)) {
+ priv->reg_maccr |= HT_MULTI_EN;
+ moxart_mac_setmulticast(ndev);
+ } else {
+ priv->reg_maccr &= ~HT_MULTI_EN;
+ }
+
+ writel(priv->reg_maccr, priv->base + REG_MAC_CTRL);
+
+ spin_unlock_irq(&priv->txlock);
+}
+
+static struct net_device_ops moxart_netdev_ops = {
+ .ndo_open = moxart_mac_open,
+ .ndo_stop = moxart_mac_stop,
+ .ndo_start_xmit = moxart_mac_start_xmit,
+ .ndo_get_stats = moxart_mac_get_stats,
+ .ndo_set_rx_mode = moxart_mac_set_rx_mode,
+ .ndo_set_mac_address = moxart_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static int moxart_mac_probe(struct platform_device *pdev)
+{
+ struct device *p_dev = &pdev->dev;
+ struct device_node *node = p_dev->of_node;
+ struct net_device *ndev;
+ struct moxart_mac_priv_t *priv;
+ struct resource *res;
+ unsigned int irq;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct moxart_mac_priv_t));
+ if (!ndev)
+ return -ENOMEM;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ netdev_err(ndev, "irq_of_parse_and_map failed\n");
+ return -EINVAL;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ndev->base_addr = res->start;
+ priv->base = devm_ioremap_resource(p_dev, res);
+ ret = IS_ERR(priv->base);
+ if (ret) {
+ dev_err(p_dev, "devm_ioremap_resource failed\n");
+ goto init_fail;
+ }
+
+ spin_lock_init(&priv->txlock);
+
+ priv->tx_buf_size = TX_BUF_SIZE;
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE *
+ TX_DESC_NUM, &priv->tx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->tx_desc_base == NULL)
+ goto init_fail;
+
+ priv->rx_desc_base = dma_alloc_coherent(NULL, RX_REG_DESC_SIZE *
+ RX_DESC_NUM, &priv->rx_base,
+ GFP_DMA | GFP_KERNEL);
+ if (priv->rx_desc_base == NULL)
+ goto init_fail;
+
+ priv->tx_buf_base = kmalloc(priv->tx_buf_size * TX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->tx_buf_base)
+ goto init_fail;
+
+ priv->rx_buf_base = kmalloc(priv->rx_buf_size * RX_DESC_NUM,
+ GFP_ATOMIC);
+ if (!priv->rx_buf_base)
+ goto init_fail;
+
+ platform_set_drvdata(pdev, ndev);
+
+ ret = devm_request_irq(p_dev, irq, moxart_mac_interrupt, 0,
+ pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ ether_setup(ndev);
+ ndev->netdev_ops = &moxart_netdev_ops;
+ netif_napi_add(ndev, &priv->napi, moxart_rx_poll, RX_DESC_NUM);
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ free_netdev(ndev);
+ goto init_fail;
+ }
+
+ netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
+ __func__, ndev->irq, ndev->dev_addr);
+
+ return 0;
+
+init_fail:
+ netdev_err(ndev, "init failed\n");
+ moxart_mac_free_memory(ndev);
+
+ return ret;
+}
+
+static int moxart_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+
+ unregister_netdev(ndev);
+ free_irq(ndev->irq, ndev);
+ moxart_mac_free_memory(ndev);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id moxart_mac_match[] = {
+ { .compatible = "moxa,moxart-mac" },
+ { }
+};
+
+struct __initdata platform_driver moxart_mac_driver = {
+ .probe = moxart_mac_probe,
+ .remove = moxart_remove,
+ .driver = {
+ .name = "moxart-ethernet",
+ .owner = THIS_MODULE,
+ .of_match_table = moxart_mac_match,
+ },
+};
+module_platform_driver(moxart_mac_driver);
+
+MODULE_DESCRIPTION("MOXART RTL8201CP Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>");
diff --git a/drivers/net/ethernet/moxa/moxart_ether.h b/drivers/net/ethernet/moxa/moxart_ether.h
new file mode 100644
index 00000000000..2be9280d608
--- /dev/null
+++ b/drivers/net/ethernet/moxa/moxart_ether.h
@@ -0,0 +1,330 @@
+/* MOXA ART Ethernet (RTL8201CP) driver.
+ *
+ * Copyright (C) 2013 Jonas Jensen
+ *
+ * Jonas Jensen <jonas.jensen@gmail.com>
+ *
+ * Based on code from
+ * Moxa Technology Co., Ltd. <www.moxa.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef _MOXART_ETHERNET_H
+#define _MOXART_ETHERNET_H
+
+#define TX_REG_OFFSET_DESC0 0
+#define TX_REG_OFFSET_DESC1 4
+#define TX_REG_OFFSET_DESC2 8
+#define TX_REG_DESC_SIZE 16
+
+#define RX_REG_OFFSET_DESC0 0
+#define RX_REG_OFFSET_DESC1 4
+#define RX_REG_OFFSET_DESC2 8
+#define RX_REG_DESC_SIZE 16
+
+#define TX_DESC0_PKT_LATE_COL 0x1 /* abort, late collision */
+#define TX_DESC0_RX_PKT_EXS_COL 0x2 /* abort, >16 collisions */
+#define TX_DESC0_DMA_OWN 0x80000000 /* owned by controller */
+#define TX_DESC1_BUF_SIZE_MASK 0x7ff
+#define TX_DESC1_LTS 0x8000000 /* last TX packet */
+#define TX_DESC1_FTS 0x10000000 /* first TX packet */
+#define TX_DESC1_FIFO_COMPLETE 0x20000000
+#define TX_DESC1_INTR_COMPLETE 0x40000000
+#define TX_DESC1_END 0x80000000
+#define TX_DESC2_ADDRESS_PHYS 0
+#define TX_DESC2_ADDRESS_VIRT 4
+
+#define RX_DESC0_FRAME_LEN 0
+#define RX_DESC0_FRAME_LEN_MASK 0x7FF
+#define RX_DESC0_MULTICAST 0x10000
+#define RX_DESC0_BROADCAST 0x20000
+#define RX_DESC0_ERR 0x40000
+#define RX_DESC0_CRC_ERR 0x80000
+#define RX_DESC0_FTL 0x100000
+#define RX_DESC0_RUNT 0x200000 /* packet less than 64 bytes */
+#define RX_DESC0_ODD_NB 0x400000 /* receive odd nibbles */
+#define RX_DESC0_LRS 0x10000000 /* last receive segment */
+#define RX_DESC0_FRS 0x20000000 /* first receive segment */
+#define RX_DESC0_DMA_OWN 0x80000000
+#define RX_DESC1_BUF_SIZE_MASK 0x7FF
+#define RX_DESC1_END 0x80000000
+#define RX_DESC2_ADDRESS_PHYS 0
+#define RX_DESC2_ADDRESS_VIRT 4
+
+#define TX_DESC_NUM 64
+#define TX_DESC_NUM_MASK (TX_DESC_NUM-1)
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM_MASK))
+#define TX_BUF_SIZE 1600
+#define TX_BUF_SIZE_MAX (TX_DESC1_BUF_SIZE_MASK+1)
+
+#define RX_DESC_NUM 64
+#define RX_DESC_NUM_MASK (RX_DESC_NUM-1)
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM_MASK))
+#define RX_BUF_SIZE 1600
+#define RX_BUF_SIZE_MAX (RX_DESC1_BUF_SIZE_MASK+1)
+
+#define REG_INTERRUPT_STATUS 0
+#define REG_INTERRUPT_MASK 4
+#define REG_MAC_MS_ADDRESS 8
+#define REG_MAC_LS_ADDRESS 12
+#define REG_MCAST_HASH_TABLE0 16
+#define REG_MCAST_HASH_TABLE1 20
+#define REG_TX_POLL_DEMAND 24
+#define REG_RX_POLL_DEMAND 28
+#define REG_TXR_BASE_ADDRESS 32
+#define REG_RXR_BASE_ADDRESS 36
+#define REG_INT_TIMER_CTRL 40
+#define REG_APOLL_TIMER_CTRL 44
+#define REG_DMA_BLEN_CTRL 48
+#define REG_RESERVED1 52
+#define REG_MAC_CTRL 136
+#define REG_MAC_STATUS 140
+#define REG_PHY_CTRL 144
+#define REG_PHY_WRITE_DATA 148
+#define REG_FLOW_CTRL 152
+#define REG_BACK_PRESSURE 156
+#define REG_RESERVED2 160
+#define REG_TEST_SEED 196
+#define REG_DMA_FIFO_STATE 200
+#define REG_TEST_MODE 204
+#define REG_RESERVED3 208
+#define REG_TX_COL_COUNTER 212
+#define REG_RPF_AEP_COUNTER 216
+#define REG_XM_PG_COUNTER 220
+#define REG_RUNT_TLC_COUNTER 224
+#define REG_CRC_FTL_COUNTER 228
+#define REG_RLC_RCC_COUNTER 232
+#define REG_BROC_COUNTER 236
+#define REG_MULCA_COUNTER 240
+#define REG_RP_COUNTER 244
+#define REG_XP_COUNTER 248
+
+#define REG_PHY_CTRL_OFFSET 0x0
+#define REG_PHY_STATUS 0x1
+#define REG_PHY_ID1 0x2
+#define REG_PHY_ID2 0x3
+#define REG_PHY_ANA 0x4
+#define REG_PHY_ANLPAR 0x5
+#define REG_PHY_ANE 0x6
+#define REG_PHY_ECTRL1 0x10
+#define REG_PHY_QPDS 0x11
+#define REG_PHY_10BOP 0x12
+#define REG_PHY_ECTRL2 0x13
+#define REG_PHY_FTMAC100_WRITE 0x8000000
+#define REG_PHY_FTMAC100_READ 0x4000000
+
+/* REG_INTERRUPT_STATUS */
+#define RPKT_FINISH BIT(0) /* DMA data received */
+#define NORXBUF BIT(1) /* receive buffer unavailable */
+#define XPKT_FINISH BIT(2) /* DMA moved data to TX FIFO */
+#define NOTXBUF BIT(3) /* transmit buffer unavailable */
+#define XPKT_OK_INT_STS BIT(4) /* transmit to ethernet success */
+#define XPKT_LOST_INT_STS BIT(5) /* transmit ethernet lost (collision) */
+#define RPKT_SAV BIT(6) /* FIFO receive success */
+#define RPKT_LOST_INT_STS BIT(7) /* FIFO full, receive failed */
+#define AHB_ERR BIT(8) /* AHB error */
+#define PHYSTS_CHG BIT(9) /* PHY link status change */
+
+/* REG_INTERRUPT_MASK */
+#define RPKT_FINISH_M BIT(0)
+#define NORXBUF_M BIT(1)
+#define XPKT_FINISH_M BIT(2)
+#define NOTXBUF_M BIT(3)
+#define XPKT_OK_M BIT(4)
+#define XPKT_LOST_M BIT(5)
+#define RPKT_SAV_M BIT(6)
+#define RPKT_LOST_M BIT(7)
+#define AHB_ERR_M BIT(8)
+#define PHYSTS_CHG_M BIT(9)
+
+/* REG_MAC_MS_ADDRESS */
+#define MAC_MADR_MASK 0xffff /* 2 MSB MAC address */
+
+/* REG_INT_TIMER_CTRL */
+#define TXINT_TIME_SEL BIT(15) /* TX cycle time period */
+#define TXINT_THR_MASK 0x7000
+#define TXINT_CNT_MASK 0xf00
+#define RXINT_TIME_SEL BIT(7) /* RX cycle time period */
+#define RXINT_THR_MASK 0x70
+#define RXINT_CNT_MASK 0xF
+
+/* REG_APOLL_TIMER_CTRL */
+#define TXPOLL_TIME_SEL BIT(12) /* TX poll time period */
+#define TXPOLL_CNT_MASK 0xf00
+#define TXPOLL_CNT_SHIFT_BIT 8
+#define RXPOLL_TIME_SEL BIT(4) /* RX poll time period */
+#define RXPOLL_CNT_MASK 0xF
+#define RXPOLL_CNT_SHIFT_BIT 0
+
+/* REG_DMA_BLEN_CTRL */
+#define RX_THR_EN BIT(9) /* RX FIFO threshold arbitration */
+#define RXFIFO_HTHR_MASK 0x1c0
+#define RXFIFO_LTHR_MASK 0x38
+#define INCR16_EN BIT(2) /* AHB bus INCR16 burst command */
+#define INCR8_EN BIT(1) /* AHB bus INCR8 burst command */
+#define INCR4_EN BIT(0) /* AHB bus INCR4 burst command */
+
+/* REG_MAC_CTRL */
+#define RX_BROADPKT BIT(17) /* receive broadcast packets */
+#define RX_MULTIPKT BIT(16) /* receive all multicast packets */
+#define FULLDUP BIT(15) /* full duplex */
+#define CRC_APD BIT(14) /* append CRC to transmitted packet */
+#define RCV_ALL BIT(12) /* ignore incoming packet destination */
+#define RX_FTL BIT(11) /* accept packets larger than 1518 B */
+#define RX_RUNT BIT(10) /* accept packets smaller than 64 B */
+#define HT_MULTI_EN BIT(9) /* accept on hash and mcast pass */
+#define RCV_EN BIT(8) /* receiver enable */
+#define ENRX_IN_HALFTX BIT(6) /* enable receive in half duplex mode */
+#define XMT_EN BIT(5) /* transmit enable */
+#define CRC_DIS BIT(4) /* disable CRC check when receiving */
+#define LOOP_EN BIT(3) /* internal loop-back */
+#define SW_RST BIT(2) /* software reset, last 64 AHB clocks */
+#define RDMA_EN BIT(1) /* enable receive DMA chan */
+#define XDMA_EN BIT(0) /* enable transmit DMA chan */
+
+/* REG_MAC_STATUS */
+#define COL_EXCEED BIT(11) /* more than 16 collisions */
+#define LATE_COL BIT(10) /* transmit late collision detected */
+#define XPKT_LOST BIT(9) /* transmit to ethernet lost */
+#define XPKT_OK BIT(8) /* transmit to ethernet success */
+#define RUNT_MAC_STS BIT(7) /* receive runt detected */
+#define FTL_MAC_STS BIT(6) /* receive frame too long detected */
+#define CRC_ERR_MAC_STS BIT(5)
+#define RPKT_LOST BIT(4) /* RX FIFO full, receive failed */
+#define RPKT_SAVE BIT(3) /* RX FIFO receive success */
+#define COL BIT(2) /* collision, incoming packet dropped */
+#define MCPU_BROADCAST BIT(1)
+#define MCPU_MULTICAST BIT(0)
+
+/* REG_PHY_CTRL */
+#define MIIWR BIT(27) /* init write sequence (auto cleared)*/
+#define MIIRD BIT(26)
+#define REGAD_MASK 0x3e00000
+#define PHYAD_MASK 0x1f0000
+#define MIIRDATA_MASK 0xffff
+
+/* REG_PHY_WRITE_DATA */
+#define MIIWDATA_MASK 0xffff
+
+/* REG_FLOW_CTRL */
+#define PAUSE_TIME_MASK 0xffff0000
+#define FC_HIGH_MASK 0xf000
+#define FC_LOW_MASK 0xf00
+#define RX_PAUSE BIT(4) /* receive pause frame */
+#define TX_PAUSED BIT(3) /* transmit pause due to receive */
+#define FCTHR_EN BIT(2) /* enable threshold mode. */
+#define TX_PAUSE BIT(1) /* transmit pause frame */
+#define FC_EN BIT(0) /* flow control mode enable */
+
+/* REG_BACK_PRESSURE */
+#define BACKP_LOW_MASK 0xf00
+#define BACKP_JAM_LEN_MASK 0xf0
+#define BACKP_MODE BIT(1) /* address mode */
+#define BACKP_ENABLE BIT(0)
+
+/* REG_TEST_SEED */
+#define TEST_SEED_MASK 0x3fff
+
+/* REG_DMA_FIFO_STATE */
+#define TX_DMA_REQUEST BIT(31)
+#define RX_DMA_REQUEST BIT(30)
+#define TX_DMA_GRANT BIT(29)
+#define RX_DMA_GRANT BIT(28)
+#define TX_FIFO_EMPTY BIT(27)
+#define RX_FIFO_EMPTY BIT(26)
+#define TX_DMA2_SM_MASK 0x7000
+#define TX_DMA1_SM_MASK 0xf00
+#define RX_DMA2_SM_MASK 0x70
+#define RX_DMA1_SM_MASK 0xF
+
+/* REG_TEST_MODE */
+#define SINGLE_PKT BIT(26) /* single packet mode */
+#define PTIMER_TEST BIT(25) /* automatic polling timer test mode */
+#define ITIMER_TEST BIT(24) /* interrupt timer test mode */
+#define TEST_SEED_SELECT BIT(22)
+#define SEED_SELECT BIT(21)
+#define TEST_MODE BIT(20)
+#define TEST_TIME_MASK 0xffc00
+#define TEST_EXCEL_MASK 0x3e0
+
+/* REG_TX_COL_COUNTER */
+#define TX_MCOL_MASK 0xffff0000
+#define TX_MCOL_SHIFT_BIT 16
+#define TX_SCOL_MASK 0xffff
+#define TX_SCOL_SHIFT_BIT 0
+
+/* REG_RPF_AEP_COUNTER */
+#define RPF_MASK 0xffff0000
+#define RPF_SHIFT_BIT 16
+#define AEP_MASK 0xffff
+#define AEP_SHIFT_BIT 0
+
+/* REG_XM_PG_COUNTER */
+#define XM_MASK 0xffff0000
+#define XM_SHIFT_BIT 16
+#define PG_MASK 0xffff
+#define PG_SHIFT_BIT 0
+
+/* REG_RUNT_TLC_COUNTER */
+#define RUNT_CNT_MASK 0xffff0000
+#define RUNT_CNT_SHIFT_BIT 16
+#define TLCC_MASK 0xffff
+#define TLCC_SHIFT_BIT 0
+
+/* REG_CRC_FTL_COUNTER */
+#define CRCER_CNT_MASK 0xffff0000
+#define CRCER_CNT_SHIFT_BIT 16
+#define FTL_CNT_MASK 0xffff
+#define FTL_CNT_SHIFT_BIT 0
+
+/* REG_RLC_RCC_COUNTER */
+#define RLC_MASK 0xffff0000
+#define RLC_SHIFT_BIT 16
+#define RCC_MASK 0xffff
+#define RCC_SHIFT_BIT 0
+
+/* REG_PHY_STATUS */
+#define AN_COMPLETE 0x20
+#define LINK_STATUS 0x4
+
+struct moxart_mac_priv_t {
+ void __iomem *base;
+ struct net_device_stats stats;
+ unsigned int reg_maccr;
+ unsigned int reg_imr;
+ struct napi_struct napi;
+ struct net_device *ndev;
+
+ dma_addr_t rx_base;
+ dma_addr_t rx_mapping[RX_DESC_NUM];
+ void __iomem *rx_desc_base;
+ unsigned char *rx_buf_base;
+ unsigned char *rx_buf[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+
+ dma_addr_t tx_base;
+ dma_addr_t tx_mapping[TX_DESC_NUM];
+ void __iomem *tx_desc_base;
+ unsigned char *tx_buf_base;
+ unsigned char *tx_buf[RX_DESC_NUM];
+ unsigned int tx_head;
+ unsigned int tx_buf_size;
+
+ spinlock_t txlock;
+ unsigned int tx_len[TX_DESC_NUM];
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ unsigned int tx_tail;
+};
+
+#if TX_BUF_SIZE >= TX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver TX buffer is too large!
+#endif
+#if RX_BUF_SIZE >= RX_BUF_SIZE_MAX
+#error MOXA ART Ethernet device driver RX buffer is too large!
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 967bae8b85c..149355b52ad 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -74,6 +74,7 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
+#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
@@ -194,6 +195,21 @@ struct myri10ge_slice_state {
int cpu;
__be32 __iomem *dca_tag;
#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int state;
+#define SLICE_STATE_IDLE 0
+#define SLICE_STATE_NAPI 1 /* NAPI owns this slice */
+#define SLICE_STATE_POLL 2 /* poll owns this slice */
+#define SLICE_LOCKED (SLICE_STATE_NAPI | SLICE_STATE_POLL)
+#define SLICE_STATE_NAPI_YIELD 4 /* NAPI yielded this slice */
+#define SLICE_STATE_POLL_YIELD 8 /* poll yielded this slice */
+#define SLICE_USER_PEND (SLICE_STATE_POLL | SLICE_STATE_POLL_YIELD)
+ spinlock_t lock;
+ unsigned long lock_napi_yield;
+ unsigned long lock_poll_yield;
+ unsigned long busy_poll_miss;
+ unsigned long busy_poll_cnt;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
char irq_desc[32];
};
@@ -244,7 +260,7 @@ struct myri10ge_priv {
int fw_ver_minor;
int fw_ver_tiny;
int adopted_rx_filter_bug;
- u8 mac_addr[6]; /* eeprom mac address */
+ u8 mac_addr[ETH_ALEN]; /* eeprom mac address */
unsigned long serial_number;
int vendor_specific_offset;
int fw_multicast_support;
@@ -909,6 +925,92 @@ abort:
return status;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+ spin_lock_init(&ss->lock);
+ ss->state = SLICE_STATE_IDLE;
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state |= SLICE_STATE_NAPI_YIELD;
+ rc = false;
+ ss->lock_napi_yield++;
+ } else
+ ss->state = SLICE_STATE_NAPI;
+ spin_unlock(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+ spin_lock(&ss->lock);
+ WARN_ON((ss->state & (SLICE_STATE_POLL | SLICE_STATE_NAPI_YIELD)));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock(&ss->lock);
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ int rc = true;
+ spin_lock_bh(&ss->lock);
+ if ((ss->state & SLICE_LOCKED)) {
+ ss->state |= SLICE_STATE_POLL_YIELD;
+ rc = false;
+ ss->lock_poll_yield++;
+ } else
+ ss->state |= SLICE_STATE_POLL;
+ spin_unlock_bh(&ss->lock);
+ return rc;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+ spin_lock_bh(&ss->lock);
+ WARN_ON((ss->state & SLICE_STATE_NAPI));
+ ss->state = SLICE_STATE_IDLE;
+ spin_unlock_bh(&ss->lock);
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ WARN_ON(!(ss->state & SLICE_LOCKED));
+ return (ss->state & SLICE_USER_PEND);
+}
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline void myri10ge_ss_init_lock(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_napi(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_napi(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_lock_poll(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+
+static inline void myri10ge_ss_unlock_poll(struct myri10ge_slice_state *ss)
+{
+}
+
+static inline bool myri10ge_ss_busy_polling(struct myri10ge_slice_state *ss)
+{
+ return false;
+}
+#endif
+
static int myri10ge_reset(struct myri10ge_priv *mgp)
{
struct myri10ge_cmd cmd;
@@ -1300,6 +1402,8 @@ myri10ge_vlan_rx(struct net_device *dev, void *addr, struct sk_buff *skb)
}
}
+#define MYRI10GE_HLEN 64 /* Bytes to copy from page to skb linear memory */
+
static inline int
myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
{
@@ -1311,6 +1415,7 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
struct pci_dev *pdev = mgp->pdev;
struct net_device *dev = mgp->dev;
u8 *va;
+ bool polling;
if (len <= mgp->small_bytes) {
rx = &ss->rx_small;
@@ -1325,7 +1430,15 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
va = page_address(rx->info[idx].page) + rx->info[idx].page_offset;
prefetch(va);
- skb = napi_get_frags(&ss->napi);
+ /* When busy polling in user context, allocate skb and copy headers to
+ * skb's linear memory ourselves. When not busy polling, use the napi
+ * gro api.
+ */
+ polling = myri10ge_ss_busy_polling(ss);
+ if (polling)
+ skb = netdev_alloc_skb(dev, MYRI10GE_HLEN + 16);
+ else
+ skb = napi_get_frags(&ss->napi);
if (unlikely(skb == NULL)) {
ss->stats.rx_dropped++;
for (i = 0, remainder = len; remainder > 0; i++) {
@@ -1364,8 +1477,29 @@ myri10ge_rx_done(struct myri10ge_slice_state *ss, int len, __wsum csum)
}
myri10ge_vlan_rx(mgp->dev, va, skb);
skb_record_rx_queue(skb, ss - &mgp->ss[0]);
+ skb_mark_napi_id(skb, &ss->napi);
+
+ if (polling) {
+ int hlen;
+
+ /* myri10ge_vlan_rx might have moved the header, so compute
+ * length and address again.
+ */
+ hlen = MYRI10GE_HLEN > skb->len ? skb->len : MYRI10GE_HLEN;
+ va = page_address(skb_frag_page(&rx_frags[0])) +
+ rx_frags[0].page_offset;
+ /* Copy header into the skb linear memory */
+ skb_copy_to_linear_data(skb, va, hlen);
+ rx_frags[0].page_offset += hlen;
+ rx_frags[0].size -= hlen;
+ skb->data_len -= hlen;
+ skb->tail += hlen;
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_receive_skb(skb);
+ }
+ else
+ napi_gro_frags(&ss->napi);
- napi_gro_frags(&ss->napi);
return 1;
}
@@ -1524,10 +1658,14 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
if (ss->mgp->dca_enabled)
myri10ge_update_dca(ss);
#endif
+ /* Try later if the busy_poll handler is running. */
+ if (!myri10ge_ss_lock_napi(ss))
+ return budget;
/* process as many rx events as NAPI will allow */
work_done = myri10ge_clean_rx_done(ss, budget);
+ myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
napi_complete(napi);
put_be32(htonl(3), ss->irq_claim);
@@ -1535,6 +1673,34 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
return work_done;
}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int myri10ge_busy_poll(struct napi_struct *napi)
+{
+ struct myri10ge_slice_state *ss =
+ container_of(napi, struct myri10ge_slice_state, napi);
+ struct myri10ge_priv *mgp = ss->mgp;
+ int work_done;
+
+ /* Poll only when the link is up */
+ if (mgp->link_state != MXGEFW_LINK_UP)
+ return LL_FLUSH_FAILED;
+
+ if (!myri10ge_ss_lock_poll(ss))
+ return LL_FLUSH_BUSY;
+
+ /* Process a small number of packets */
+ work_done = myri10ge_clean_rx_done(ss, 4);
+ if (work_done)
+ ss->busy_poll_cnt += work_done;
+ else
+ ss->busy_poll_miss++;
+
+ myri10ge_ss_unlock_poll(ss);
+
+ return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
static irqreturn_t myri10ge_intr(int irq, void *arg)
{
struct myri10ge_slice_state *ss = arg;
@@ -1742,6 +1908,10 @@ static const char myri10ge_gstrings_slice_stats[][ETH_GSTRING_LEN] = {
"tx_pkt_start", "tx_pkt_done", "tx_req", "tx_done",
"rx_small_cnt", "rx_big_cnt",
"wake_queue", "stop_queue", "tx_linearized",
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ "rx_lock_napi_yield", "rx_lock_poll_yield", "rx_busy_poll_miss",
+ "rx_busy_poll_cnt",
+#endif
};
#define MYRI10GE_NET_STATS_LEN 21
@@ -1842,6 +2012,12 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i++] = (unsigned int)ss->tx.wake_queue;
data[i++] = (unsigned int)ss->tx.stop_queue;
data[i++] = (unsigned int)ss->tx.linearized;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ data[i++] = ss->lock_napi_yield;
+ data[i++] = ss->lock_poll_yield;
+ data[i++] = ss->busy_poll_miss;
+ data[i++] = ss->busy_poll_cnt;
+#endif
}
}
@@ -2405,6 +2581,9 @@ static int myri10ge_open(struct net_device *dev)
goto abort_with_rings;
}
+ /* Initialize the slice spinlock and state used for polling */
+ myri10ge_ss_init_lock(ss);
+
/* must happen prior to any irq */
napi_enable(&(ss)->napi);
}
@@ -2481,9 +2660,19 @@ static int myri10ge_close(struct net_device *dev)
del_timer_sync(&mgp->watchdog_timer);
mgp->running = MYRI10GE_ETH_STOPPING;
+ local_bh_disable(); /* myri10ge_ss_lock_napi needs bh disabled */
for (i = 0; i < mgp->num_slices; i++) {
napi_disable(&mgp->ss[i].napi);
+ /* Lock the slice to prevent the busy_poll handler from
+ * accessing it. Later when we bring the NIC up, myri10ge_open
+ * resets the slice including this lock.
+ */
+ while (!myri10ge_ss_lock_napi(&mgp->ss[i])) {
+ pr_info("Slice %d locked\n", i);
+ mdelay(1);
+ }
}
+ local_bh_enable();
netif_carrier_off(dev);
netif_tx_stop_all_queues(dev);
@@ -3569,8 +3758,11 @@ static void myri10ge_free_slices(struct myri10ge_priv *mgp)
ss->fw_stats, ss->fw_stats_bus);
ss->fw_stats = NULL;
}
+ napi_hash_del(&ss->napi);
netif_napi_del(&ss->napi);
}
+ /* Wait till napi structs are no longer used, and then free ss. */
+ synchronize_rcu();
kfree(mgp->ss);
mgp->ss = NULL;
}
@@ -3591,9 +3783,9 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
for (i = 0; i < mgp->num_slices; i++) {
ss = &mgp->ss[i];
bytes = mgp->max_intr_slots * sizeof(*ss->rx_done.entry);
- ss->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
- &ss->rx_done.bus,
- GFP_KERNEL | __GFP_ZERO);
+ ss->rx_done.entry = dma_zalloc_coherent(&pdev->dev, bytes,
+ &ss->rx_done.bus,
+ GFP_KERNEL);
if (ss->rx_done.entry == NULL)
goto abort;
bytes = sizeof(*ss->fw_stats);
@@ -3606,6 +3798,7 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
ss->dev = mgp->dev;
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
myri10ge_napi_weight);
+ napi_hash_add(&ss->napi);
}
return 0;
abort:
@@ -3625,13 +3818,12 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
struct pci_dev *pdev = mgp->pdev;
char *old_fw;
bool old_allocated;
- int i, status, ncpus, msix_cap;
+ int i, status, ncpus;
mgp->num_slices = 1;
- msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
ncpus = netif_get_num_default_rss_queues();
- if (myri10ge_max_slices == 1 || msix_cap == 0 ||
+ if (myri10ge_max_slices == 1 || !pdev->msix_cap ||
(myri10ge_max_slices == -1 && ncpus < 2))
return;
@@ -3749,6 +3941,9 @@ static const struct net_device_ops myri10ge_netdev_ops = {
.ndo_change_mtu = myri10ge_change_mtu,
.ndo_set_rx_mode = myri10ge_set_multicast_list,
.ndo_set_mac_address = myri10ge_set_mac_address,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ .ndo_busy_poll = myri10ge_busy_poll,
+#endif
};
static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index dc2c6f561e9..e6f0a4366f9 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -390,7 +390,7 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
- pdata = (struct netxeth_platform_data *)pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
priv->xc = request_xc(pdata->xcno, &pdev->dev);
if (!priv->xc) {
dev_err(&pdev->dev, "unable to request xc engine\n");
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index e88bdb1aa66..79645f74b3a 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -922,7 +922,7 @@ static void __init get_mac_address(struct net_device *dev)
{
struct w90p910_ether *ether = netdev_priv(dev);
struct platform_device *pdev;
- char addr[6];
+ char addr[ETH_ALEN];
pdev = ether->pdev;
@@ -934,7 +934,7 @@ static void __init get_mac_address(struct net_device *dev)
addr[5] = 0xa8;
if (is_valid_ether_addr(addr))
- memcpy(dev->dev_addr, &addr, 0x06);
+ memcpy(dev->dev_addr, &addr, ETH_ALEN);
else
dev_err(&pdev->dev, "invalid mac address\n");
}
@@ -1014,7 +1014,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (ether->rxirq < 0) {
dev_err(&pdev->dev, "failed to get ether rx irq\n");
error = -ENXIO;
- goto failed_free_txirq;
+ goto failed_free_io;
}
platform_set_drvdata(pdev, dev);
@@ -1023,7 +1023,7 @@ static int w90p910_ether_probe(struct platform_device *pdev)
if (IS_ERR(ether->clk)) {
dev_err(&pdev->dev, "failed to get ether clock\n");
error = PTR_ERR(ether->clk);
- goto failed_free_rxirq;
+ goto failed_free_io;
}
ether->rmiiclk = clk_get(&pdev->dev, "RMII");
@@ -1049,10 +1049,6 @@ failed_put_rmiiclk:
clk_put(ether->rmiiclk);
failed_put_clk:
clk_put(ether->clk);
-failed_free_rxirq:
- free_irq(ether->rxirq, pdev);
-failed_free_txirq:
- free_irq(ether->txirq, pdev);
failed_free_io:
iounmap(ether->reg);
failed_free_mem:
@@ -1075,9 +1071,6 @@ static int w90p910_ether_remove(struct platform_device *pdev)
iounmap(ether->reg);
release_mem_region(ether->res->start, resource_size(ether->res));
- free_irq(ether->txirq, dev);
- free_irq(ether->rxirq, dev);
-
del_timer_sync(&ether->check_timer);
free_netdev(dev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index cb22341a14a..a588ffde970 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI
+ depends on PCI && (X86 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 7779036690c..6797b107587 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -582,6 +582,19 @@ struct pch_gbe_hw_stats {
};
/**
+ * struct pch_gbe_privdata - PCI Device ID driver data
+ * @phy_tx_clk_delay: Bool, configure the PHY TX delay in software
+ * @phy_disable_hibernate: Bool, disable PHY hibernation
+ * @platform_init: Platform initialization callback, called from
+ * probe, prior to PHY initialization.
+ */
+struct pch_gbe_privdata {
+ bool phy_tx_clk_delay;
+ bool phy_disable_hibernate;
+ int (*platform_init)(struct pci_dev *pdev);
+};
+
+/**
* struct pch_gbe_adapter - board specific private data structure
* @stats_lock: Spinlock structure for status
* @ethtool_lock: Spinlock structure for ethtool
@@ -604,6 +617,7 @@ struct pch_gbe_hw_stats {
* @rx_buffer_len: Receive buffer length
* @tx_queue_len: Transmit queue length
* @have_msi: PCI MSI mode flag
+ * @pch_gbe_privdata: PCI Device ID driver_data
*/
struct pch_gbe_adapter {
@@ -631,6 +645,7 @@ struct pch_gbe_adapter {
int hwts_tx_en;
int hwts_rx_en;
struct pci_dev *ptp_pdev;
+ struct pch_gbe_privdata *pdata;
};
#define pch_gbe_hw_to_adapter(hw) container_of(hw, struct pch_gbe_adapter, hw)
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 1129db0cdf8..f0ceb89af93 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -118,6 +118,7 @@ static int pch_gbe_set_settings(struct net_device *netdev,
* filled by get_settings() on a down link, speed is -1: */
if (speed == UINT_MAX) {
speed = SPEED_1000;
+ ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
}
ret = mii_ethtool_sset(&adapter->mii, ecmd);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ab1039a95bf..5a0f04c2c81 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/net_tstamp.h>
#include <linux/ptp_classify.h>
+#include <linux/gpio.h>
#define DRV_VERSION "1.01"
const char pch_driver_version[] = DRV_VERSION;
@@ -111,6 +112,8 @@ const char pch_driver_version[] = DRV_VERSION;
#define PTP_L4_MULTICAST_SA "01:00:5e:00:01:81"
#define PTP_L2_MULTICAST_SA "01:1b:19:00:00:00"
+#define MINNOW_PHY_RESET_GPIO 13
+
static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
@@ -682,7 +685,7 @@ static int pch_gbe_init_phy(struct pch_gbe_adapter *adapter)
}
adapter->hw.phy.addr = adapter->mii.phy_id;
netdev_dbg(netdev, "phy_addr = %d\n", adapter->mii.phy_id);
- if (addr == 32)
+ if (addr == PCH_GBE_PHY_REGS_LEN)
return -EAGAIN;
/* Selected the phy and isolate the rest */
for (addr = 0; addr < PCH_GBE_PHY_REGS_LEN; addr++) {
@@ -1488,9 +1491,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
bufsz = adapter->rx_buffer_len;
size = rx_ring->count * bufsz + PCH_GBE_RESERVE_MEMORY;
- rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
- &rx_ring->rx_buff_pool_logic,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->rx_buff_pool =
+ dma_zalloc_coherent(&pdev->dev, size,
+ &rx_ring->rx_buff_pool_logic, GFP_KERNEL);
if (!rx_ring->rx_buff_pool)
return -ENOMEM;
@@ -1804,9 +1807,8 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
- tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
- &tx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ tx_ring->desc = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
+ &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) {
vfree(tx_ring->buffer_info);
return -ENOMEM;
@@ -1849,9 +1851,8 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
return -ENOMEM;
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
- rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
- &rx_ring->dma,
- GFP_KERNEL | __GFP_ZERO);
+ rx_ring->desc = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
+ &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) {
vfree(rx_ring->buffer_info);
return -ENOMEM;
@@ -2635,6 +2636,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->hw.back = adapter;
adapter->hw.reg = pcim_iomap_table(pdev)[PCH_GBE_PCI_BAR];
+ adapter->pdata = (struct pch_gbe_privdata *)pci_id->driver_data;
+ if (adapter->pdata && adapter->pdata->platform_init)
+ adapter->pdata->platform_init(pdev);
adapter->ptp_pdev = pci_get_bus_and_slot(adapter->pdev->bus->number,
PCI_DEVFN(12, 4));
@@ -2710,6 +2714,10 @@ static int pch_gbe_probe(struct pci_dev *pdev,
dev_dbg(&pdev->dev, "PCH Network Connection\n");
+ /* Disable hibernation on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_disable_hibernate)
+ pch_gbe_phy_disable_hibernate(&adapter->hw);
+
device_set_wakeup_enable(&pdev->dev, 1);
return 0;
@@ -2720,9 +2728,48 @@ err_free_netdev:
return ret;
}
+/* The AR803X PHY on the MinnowBoard requires a physical pin to be toggled to
+ * ensure it is awake for probe and init. Request the line and reset the PHY.
+ */
+static int pch_gbe_minnow_platform_init(struct pci_dev *pdev)
+{
+ unsigned long flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH | GPIOF_EXPORT;
+ unsigned gpio = MINNOW_PHY_RESET_GPIO;
+ int ret;
+
+ ret = devm_gpio_request_one(&pdev->dev, gpio, flags,
+ "minnow_phy_reset");
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ERR: Can't request PHY reset GPIO line '%d'\n", gpio);
+ return ret;
+ }
+
+ gpio_set_value(gpio, 0);
+ usleep_range(1250, 1500);
+ gpio_set_value(gpio, 1);
+ usleep_range(1250, 1500);
+
+ return ret;
+}
+
+static struct pch_gbe_privdata pch_gbe_minnow_privdata = {
+ .phy_tx_clk_delay = true,
+ .phy_disable_hibernate = true,
+ .platform_init = pch_gbe_minnow_platform_init,
+};
+
static DEFINE_PCI_DEVICE_TABLE(pch_gbe_pcidev_id) = {
{.vendor = PCI_VENDOR_ID_INTEL,
.device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
+ .subvendor = PCI_VENDOR_ID_CIRCUITCO,
+ .subdevice = PCI_SUBSYSTEM_ID_CIRCUITCO_MINNOWBOARD,
+ .class = (PCI_CLASS_NETWORK_ETHERNET << 8),
+ .class_mask = (0xFFFF00),
+ .driver_data = (kernel_ulong_t)&pch_gbe_minnow_privdata
+ },
+ {.vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_IOH1_GBE,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.class = (PCI_CLASS_NETWORK_ETHERNET << 8),
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
index da079073a6c..8b7ff75fc8e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c
@@ -74,6 +74,15 @@
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
+/* AR8031 PHY Debug Registers */
+#define PHY_AR803X_ID 0x00001374
+#define PHY_AR8031_DBG_OFF 0x1D
+#define PHY_AR8031_DBG_DAT 0x1E
+#define PHY_AR8031_SERDES 0x05
+#define PHY_AR8031_HIBERNATE 0x0B
+#define PHY_AR8031_SERDES_TX_CLK_DLY 0x0100 /* TX clock delay of 2.0ns */
+#define PHY_AR8031_PS_HIB_EN 0x8000 /* Hibernate enable */
+
/* Phy Id Register (word 2) */
#define PHY_REVISION_MASK 0x000F
@@ -249,6 +258,51 @@ void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw)
}
/**
+ * pch_gbe_phy_tx_clk_delay - Setup TX clock delay via the PHY
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+static int pch_gbe_phy_tx_clk_delay(struct pch_gbe_hw *hw)
+{
+ /* The RGMII interface requires a ~2ns TX clock delay. This is typically
+ * done in layout with a longer trace or via PHY strapping, but can also
+ * be done via PHY configuration registers.
+ */
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Configuring AR803X PHY for 2ns TX clock delay\n");
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_OFF, &mii_reg);
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_SERDES);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg |= PHY_AR8031_SERDES_TX_CLK_DLY;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not set TX clock delay\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not configure tx clock delay for PHY\n");
+ return ret;
+}
+
+/**
* pch_gbe_phy_init_setting - PHY initial setting
* @hw: Pointer to the HW structure
*/
@@ -277,4 +331,48 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw)
pch_gbe_phy_read_reg_miic(hw, PHY_PHYSP_CONTROL, &mii_reg);
mii_reg |= PHYSP_CTRL_ASSERT_CRS_TX;
pch_gbe_phy_write_reg_miic(hw, PHY_PHYSP_CONTROL, mii_reg);
+
+ /* Setup a TX clock delay on certain platforms */
+ if (adapter->pdata && adapter->pdata->phy_tx_clk_delay)
+ pch_gbe_phy_tx_clk_delay(hw);
+}
+
+/**
+ * pch_gbe_phy_disable_hibernate - Disable the PHY low power state
+ * @hw: Pointer to the HW structure
+ * Returns
+ * 0: Successful.
+ * -EINVAL: Invalid argument.
+ */
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw)
+{
+ struct pch_gbe_adapter *adapter = pch_gbe_hw_to_adapter(hw);
+ u16 mii_reg;
+ int ret = 0;
+
+ switch (hw->phy.id) {
+ case PHY_AR803X_ID:
+ netdev_dbg(adapter->netdev,
+ "Disabling hibernation for AR803X PHY\n");
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_OFF,
+ PHY_AR8031_HIBERNATE);
+ if (ret)
+ break;
+
+ pch_gbe_phy_read_reg_miic(hw, PHY_AR8031_DBG_DAT, &mii_reg);
+ mii_reg &= ~PHY_AR8031_PS_HIB_EN;
+ ret = pch_gbe_phy_write_reg_miic(hw, PHY_AR8031_DBG_DAT,
+ mii_reg);
+ break;
+ default:
+ netdev_err(adapter->netdev,
+ "Unknown PHY (%x), could not disable hibernation\n",
+ hw->phy.id);
+ return -EINVAL;
+ }
+
+ if (ret)
+ netdev_err(adapter->netdev,
+ "Could not disable PHY hibernation\n");
+ return ret;
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
index 03264dc7b5e..0cbe69206e0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.h
@@ -33,5 +33,6 @@ void pch_gbe_phy_power_up(struct pch_gbe_hw *hw);
void pch_gbe_phy_power_down(struct pch_gbe_hw *hw);
void pch_gbe_phy_set_rgmii(struct pch_gbe_hw *hw);
void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw);
+int pch_gbe_phy_disable_hibernate(struct pch_gbe_hw *hw);
#endif /* _PCH_GBE_PHY_H_ */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index a5f0b5da614..c498181a9aa 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -191,7 +191,7 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
struct device_node *dn = pci_device_to_OF_node(pdev);
int len;
const u8 *maddr;
- u8 addr[6];
+ u8 addr[ETH_ALEN];
if (!dn) {
dev_dbg(&pdev->dev,
@@ -201,8 +201,8 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
maddr = of_get_property(dn, "local-mac-address", &len);
- if (maddr && len == 6) {
- memcpy(mac->mac_addr, maddr, 6);
+ if (maddr && len == ETH_ALEN) {
+ memcpy(mac->mac_addr, maddr, ETH_ALEN);
return 0;
}
@@ -219,14 +219,15 @@ static int pasemi_get_mac_addr(struct pasemi_mac *mac)
return -ENOENT;
}
- if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
- &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
+ if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN) {
dev_warn(&pdev->dev,
"can't parse mac address, not configuring\n");
return -EINVAL;
}
- memcpy(mac->mac_addr, addr, 6);
+ memcpy(mac->mac_addr, addr, ETH_ALEN);
return 0;
}
@@ -439,10 +440,9 @@ static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
goto out_ring_desc;
- ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
- RX_RING_SIZE * sizeof(u64),
- &ring->buf_dma,
- GFP_KERNEL | __GFP_ZERO);
+ ring->buffers = dma_zalloc_coherent(&mac->dma_pdev->dev,
+ RX_RING_SIZE * sizeof(u64),
+ &ring->buf_dma, GFP_KERNEL);
if (!ring->buffers)
goto out_ring_desc;
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index e2f4efa8ad4..f2749d46c12 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -83,7 +83,7 @@ struct pasemi_mac {
#define MAC_TYPE_GMAC 1
#define MAC_TYPE_XAUI 2
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
struct net_lro_mgr lro_mgr;
struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS];
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index 0e1797295a4..f59e6be4a66 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -45,6 +45,17 @@ config QLCNIC_SRIOV
This allows for virtual function acceleration in virtualized
environments.
+config QLCNIC_DCB
+ bool "QLOGIC QLCNIC 82XX and 83XX family DCB Support"
+ depends on QLCNIC && DCB
+ default y
+ ---help---
+ This configuration parameter enables DCB support in QLE83XX
+ and QLE82XX Converged Ethernet devices. This allows for DCB
+ get operations support through rtNetlink interface. Only CEE
+ mode of DCB is supported. PG and PFC values are related only
+ to Tx.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
index 3fe09ab2d7c..32675e16021 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h
@@ -1171,7 +1171,6 @@ typedef struct {
#define NETXEN_DB_MAPSIZE_BYTES 0x1000
-#define NETXEN_NETDEV_WEIGHT 128
#define NETXEN_ADAPTER_UP_MAGIC 777
#define NETXEN_NIC_PEG_TUNE 0
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
index 9fbb1cdbfa4..8375cbde996 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
@@ -536,10 +536,10 @@ static void netxen_p2_nic_set_multi(struct net_device *netdev)
{
struct netxen_adapter *adapter = netdev_priv(netdev);
struct netdev_hw_addr *ha;
- u8 null_addr[6];
+ u8 null_addr[ETH_ALEN];
int i;
- memset(null_addr, 0, 6);
+ memset(null_addr, 0, ETH_ALEN);
if (netdev->flags & IFF_PROMISC) {
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index c401b0b4353..cbd75f97ffb 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -197,7 +197,7 @@ netxen_napi_add(struct netxen_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
netif_napi_add(netdev, &sds_ring->napi,
- netxen_nic_poll, NETXEN_NETDEV_WEIGHT);
+ netxen_nic_poll, NAPI_POLL_WEIGHT);
}
return 0;
@@ -459,16 +459,14 @@ static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
- int pos;
- pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
- if (pos) {
- pci_read_config_dword(pdev, pos, &control);
+ if (pdev->msix_cap) {
+ pci_read_config_dword(pdev, pdev->msix_cap, &control);
if (enable)
control |= PCI_MSIX_FLAGS_ENABLE;
else
control = 0;
- pci_write_config_dword(pdev, pos, control);
+ pci_write_config_dword(pdev, pdev->msix_cap, control);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/Makefile b/drivers/net/ethernet/qlogic/qlcnic/Makefile
index 4b1fb3faa3b..a848d297972 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/Makefile
+++ b/drivers/net/ethernet/qlogic/qlcnic/Makefile
@@ -11,3 +11,5 @@ qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
qlcnic_minidump.o qlcnic_sriov_common.o
qlcnic-$(CONFIG_QLCNIC_SRIOV) += qlcnic_sriov_pf.o
+
+qlcnic-$(CONFIG_QLCNIC_DCB) += qlcnic_dcb.o
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index b00cf5665ea..88349b8fa39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -20,7 +20,6 @@
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/firmware.h>
-
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/timer.h>
@@ -35,11 +34,12 @@
#include "qlcnic_hdr.h"
#include "qlcnic_hw.h"
#include "qlcnic_83xx_hw.h"
+#include "qlcnic_dcb.h"
#define _QLCNIC_LINUX_MAJOR 5
-#define _QLCNIC_LINUX_MINOR 2
-#define _QLCNIC_LINUX_SUBVERSION 44
-#define QLCNIC_LINUX_VERSIONID "5.2.44"
+#define _QLCNIC_LINUX_MINOR 3
+#define _QLCNIC_LINUX_SUBVERSION 50
+#define QLCNIC_LINUX_VERSIONID "5.3.50"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -98,6 +98,9 @@
#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+ MGMT_CMD_DESC_RESV)
#define QLCNIC_MAX_TX_TIMEOUTS 2
+#define QLCNIC_MAX_TX_RINGS 8
+#define QLCNIC_MAX_SDS_RINGS 8
+
/*
* Following are the states of the Phantom. Phantom will set them and
* Host will read to check if the fields are correct.
@@ -389,7 +392,7 @@ struct qlcnic_dump_template_hdr {
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
- u8 enable; /* enable/disable dump */
+ bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
void *data; /* dump data area */
struct qlcnic_dump_template_hdr *tmpl_hdr;
@@ -460,14 +463,16 @@ struct qlcnic_hardware_context {
struct qlcnic_fdt fdt;
struct qlc_83xx_reset reset;
struct qlc_83xx_idc idc;
- struct qlc_83xx_fw_info fw_info;
+ struct qlc_83xx_fw_info *fw_info;
struct qlcnic_intrpt_config *intr_tbl;
struct qlcnic_sriov *sriov;
u32 *reg_tbl;
u32 *ext_reg_tbl;
u32 mbox_aen[QLC_83XX_MBX_AEN_CNT];
u32 mbox_reg[4];
- spinlock_t mbx_lock;
+ struct qlcnic_mailbox *mailbox;
+ u8 extend_lb_time;
+ u8 phys_port_id[ETH_ALEN];
};
struct qlcnic_adapter_stats {
@@ -515,6 +520,7 @@ struct qlcnic_host_sds_ring {
u32 num_desc;
void __iomem *crb_sts_consumer;
+ struct qlcnic_host_tx_ring *tx_ring;
struct status_desc *desc_head;
struct qlcnic_adapter *adapter;
struct napi_struct napi;
@@ -532,9 +538,17 @@ struct qlcnic_host_tx_ring {
void __iomem *crb_intr_mask;
char name[IFNAMSIZ + 12];
u16 ctx_id;
+
+ u32 state;
u32 producer;
u32 sw_consumer;
u32 num_desc;
+
+ u64 xmit_on;
+ u64 xmit_off;
+ u64 xmit_called;
+ u64 xmit_finished;
+
void __iomem *crb_cmd_producer;
struct cmd_desc_type0 *desc_head;
struct qlcnic_adapter *adapter;
@@ -559,7 +573,6 @@ struct qlcnic_recv_context {
u32 state;
u16 context_id;
u16 virt_port;
-
};
/* HW context creation */
@@ -604,6 +617,7 @@ struct qlcnic_recv_context {
#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
#define QLCNIC_CAP0_VALIDOFF (1 << 11)
#define QLCNIC_CAP0_LRO_MSS (1 << 21)
+#define QLCNIC_CAP0_TX_MULTI (1 << 22)
/*
* Context state
@@ -631,7 +645,7 @@ struct qlcnic_hostrq_rds_ring {
struct qlcnic_hostrq_rx_ctx {
__le64 host_rsp_dma_addr; /* Response dma'd here */
- __le32 capabilities[4]; /* Flag bit vector */
+ __le32 capabilities[4]; /* Flag bit vector */
__le32 host_int_crb_mode; /* Interrupt crb usage */
__le32 host_rds_crb_mode; /* RDS crb usage */
/* These ring offsets are relative to data[0] below */
@@ -802,6 +816,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_C2H_OPCODE_CONFIG_LOOPBACK 0x8f
#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 0x8D
+#define QLCNIC_C2H_OPCODE_GET_DCB_AEN 0x90
#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
@@ -814,6 +829,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAPABILITY_BDG BIT_8
#define QLCNIC_FW_CAPABILITY_FVLANTX BIT_9
#define QLCNIC_FW_CAPABILITY_HW_LRO BIT_10
+#define QLCNIC_FW_CAPABILITY_2_MULTI_TX BIT_4
#define QLCNIC_FW_CAPABILITY_MULTI_LOOPBACK BIT_27
#define QLCNIC_FW_CAPABILITY_MORE_CAPS BIT_31
@@ -821,6 +837,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_FW_CAP2_HW_LRO_IPV6 BIT_3
#define QLCNIC_FW_CAPABILITY_SET_DRV_VER BIT_5
#define QLCNIC_FW_CAPABILITY_2_BEACON BIT_7
+#define QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG BIT_8
/* module types */
#define LINKEVENT_MODULE_NOT_PRESENT 1
@@ -913,6 +930,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_FW_LRO_MSS_CAP 0x8000
#define QLCNIC_TX_INTR_SHARED 0x10000
#define QLCNIC_APP_CHANGED_FLAGS 0x20000
+#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -922,11 +941,11 @@ struct qlcnic_ipaddr {
#define QLCNIC_BEACON_DISABLE 0xD
#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
+#define QLCNIC_DEF_NUM_TX_RINGS 4
#define QLCNIC_MSIX_TBL_SPACE 8192
#define QLCNIC_PCI_REG_MSIX_TBL 0x44
#define QLCNIC_MSIX_TBL_PGSIZE 4096
-#define QLCNIC_NETDEV_WEIGHT 128
#define QLCNIC_ADAPTER_UP_MAGIC 777
#define __QLCNIC_FW_ATTACHED 0
@@ -937,10 +956,13 @@ struct qlcnic_ipaddr {
#define __QLCNIC_DIAG_RES_ALLOC 6
#define __QLCNIC_LED_ENABLE 7
#define __QLCNIC_ELB_INPROGRESS 8
+#define __QLCNIC_MULTI_TX_UNIQUE 9
#define __QLCNIC_SRIOV_ENABLE 10
#define __QLCNIC_SRIOV_CAPABLE 11
#define __QLCNIC_MBX_POLL_ENABLE 12
#define __QLCNIC_DIAG_MODE 13
+#define __QLCNIC_DCB_STATE 14
+#define __QLCNIC_DCB_IN_AEN 15
#define QLCNIC_INTERRUPT_TEST 1
#define QLCNIC_LOOPBACK_TEST 2
@@ -950,12 +972,6 @@ struct qlcnic_ipaddr {
#define QLCNIC_READD_AGE 20
#define QLCNIC_LB_MAX_FILTERS 64
#define QLCNIC_LB_BUCKET_SIZE 32
-
-/* QLCNIC Driver Error Code */
-#define QLCNIC_FW_NOT_RESPOND 51
-#define QLCNIC_TEST_IN_PROGRESS 52
-#define QLCNIC_UNDEFINED_ERROR 53
-#define QLCNIC_LB_CABLE_NOT_CONN 54
#define QLCNIC_ILB_MAX_RCV_LOOP 10
struct qlcnic_filter {
@@ -972,6 +988,21 @@ struct qlcnic_filter_hash {
u16 fbucket_size;
};
+/* Mailbox specific data structures */
+struct qlcnic_mailbox {
+ struct workqueue_struct *work_q;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_mbx_ops *ops;
+ struct work_struct work;
+ struct completion completion;
+ struct list_head cmd_q;
+ unsigned long status;
+ spinlock_t queue_lock; /* Mailbox queue lock */
+ spinlock_t aen_lock; /* Mailbox response/AEN lock */
+ atomic_t rsp_status;
+ u32 num_cmds;
+};
+
struct qlcnic_adapter {
struct qlcnic_hardware_context *ahw;
struct qlcnic_recv_context *recv_ctx;
@@ -1035,6 +1066,7 @@ struct qlcnic_adapter {
struct delayed_work fw_work;
struct delayed_work idc_aen_work;
struct delayed_work mbx_poll_work;
+ struct qlcnic_dcb *dcb;
struct qlcnic_filter_hash fhash;
struct qlcnic_filter_hash rx_fhash;
@@ -1152,6 +1184,7 @@ struct qlcnic_pci_info {
};
struct qlcnic_npar_info {
+ bool eswitch_status;
u16 pvid;
u16 min_bw;
u16 max_bw;
@@ -1371,7 +1404,6 @@ struct qlcnic_esw_statistics {
struct __qlcnic_esw_statistics tx;
};
-#define QLCNIC_DUMP_MASK_DEF 0x1f
#define QLCNIC_FORCE_FW_DUMP_KEY 0xdeadfeed
#define QLCNIC_ENABLE_FW_DUMP 0xaddfeed
#define QLCNIC_DISABLE_FW_DUMP 0xbadfeed
@@ -1385,9 +1417,20 @@ struct _cdrp_cmd {
};
struct qlcnic_cmd_args {
- struct _cdrp_cmd req;
- struct _cdrp_cmd rsp;
- int op_type;
+ struct completion completion;
+ struct list_head list;
+ struct _cdrp_cmd req;
+ struct _cdrp_cmd rsp;
+ atomic_t rsp_status;
+ int pay_size;
+ u32 rsp_opcode;
+ u32 total_cmds;
+ u32 op_type;
+ u32 type;
+ u32 cmd_op;
+ u32 *hdr; /* Back channel message header */
+ u32 *pay; /* Back channel message payload */
+ u8 func_num;
};
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter);
@@ -1400,8 +1443,8 @@ void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
#define ADDR_IN_RANGE(addr, low, high) \
(((addr) < (high)) && ((addr) >= (low)))
-#define QLCRD32(adapter, off) \
- (adapter->ahw->hw_ops->read_reg)(adapter, off)
+#define QLCRD32(adapter, off, err) \
+ (adapter->ahw->hw_ops->read_reg)(adapter, off, err)
#define QLCWR32(adapter, off, val) \
adapter->ahw->hw_ops->write_reg(adapter, off, val)
@@ -1435,6 +1478,12 @@ int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter);
void qlcnic_delete_lb_filters(struct qlcnic_adapter *adapter);
int qlcnic_dump_fw(struct qlcnic_adapter *);
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *);
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *);
+void qlcnic_82xx_io_resume(struct pci_dev *);
/* Functions from qlcnic_init.c */
void qlcnic_schedule_work(struct qlcnic_adapter *, work_func_t, int);
@@ -1462,7 +1511,8 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter);
void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter);
void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *,
+ struct qlcnic_host_tx_ring *);
int qlcnic_check_fw_status(struct qlcnic_adapter *adapter);
void qlcnic_watchdog_task(struct work_struct *work);
@@ -1474,6 +1524,7 @@ void __qlcnic_set_multi(struct net_device *, u16);
int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16);
int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *);
void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *, u32);
@@ -1495,8 +1546,9 @@ int qlcnic_reset_context(struct qlcnic_adapter *);
void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, size_t);
+int qlcnic_set_max_rss(struct qlcnic_adapter *, u8, int);
int qlcnic_validate_max_rss(struct qlcnic_adapter *, __u32);
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *, u32 txq);
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
int qlcnic_enable_msix(struct qlcnic_adapter *, u32);
@@ -1523,6 +1575,7 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *);
void qlcnic_advert_link_change(struct qlcnic_adapter *, int);
void qlcnic_free_tx_rings(struct qlcnic_adapter *);
int qlcnic_alloc_tx_rings(struct qlcnic_adapter *, struct net_device *);
+void qlcnic_dump_mbx(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
@@ -1585,6 +1638,26 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
tx_ring->producer;
}
+static inline int qlcnic_set_real_num_queues(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err, tx_q;
+
+ tx_q = adapter->max_drv_tx_rings;
+
+ netdev->num_tx_queues = tx_q;
+ netdev->real_num_tx_queues = tx_q;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_q);
+ if (err)
+ dev_err(&adapter->pdev->dev, "failed to set %d Tx queues\n",
+ tx_q);
+ else
+ dev_info(&adapter->pdev->dev, "set %d Tx queues\n", tx_q);
+
+ return err;
+}
+
struct qlcnic_nic_template {
int (*config_bridged_mode) (struct qlcnic_adapter *, u32);
int (*config_led) (struct qlcnic_adapter *, u32, u32);
@@ -1600,15 +1673,29 @@ struct qlcnic_nic_template {
int (*resume)(struct qlcnic_adapter *);
};
+struct qlcnic_mbx_ops {
+ int (*enqueue_cmd) (struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *, unsigned long *);
+ void (*dequeue_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*decode_resp) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*encode_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+ void (*nofity_fw) (struct qlcnic_adapter *, u8);
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *);
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx);
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx);
+
/* Adapter hardware abstraction */
struct qlcnic_hardware_ops {
void (*read_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
void (*write_crb) (struct qlcnic_adapter *, char *, loff_t, size_t);
- int (*read_reg) (struct qlcnic_adapter *, ulong);
+ int (*read_reg) (struct qlcnic_adapter *, ulong, int *);
int (*write_reg) (struct qlcnic_adapter *, ulong, u32);
void (*get_ocm_win) (struct qlcnic_hardware_context *);
- int (*get_mac_address) (struct qlcnic_adapter *, u8 *);
- int (*setup_intr) (struct qlcnic_adapter *, u8);
+ int (*get_mac_address) (struct qlcnic_adapter *, u8 *, u8);
+ int (*setup_intr) (struct qlcnic_adapter *, u8, int);
int (*alloc_mbx_args)(struct qlcnic_cmd_args *,
struct qlcnic_adapter *, u32);
int (*mbx_cmd) (struct qlcnic_adapter *, struct qlcnic_cmd_args *);
@@ -1641,6 +1728,11 @@ struct qlcnic_hardware_ops {
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
+ int (*read_phys_port_id) (struct qlcnic_adapter *);
+ pci_ers_result_t (*io_error_detected) (struct pci_dev *,
+ pci_channel_state_t);
+ pci_ers_result_t (*io_slot_reset) (struct pci_dev *);
+ void (*io_resume) (struct pci_dev *);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -1662,12 +1754,6 @@ static inline void qlcnic_write_crb(struct qlcnic_adapter *adapter, char *buf,
adapter->ahw->hw_ops->write_crb(adapter, buf, offset, size);
}
-static inline int qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter,
- ulong off)
-{
- return adapter->ahw->hw_ops->read_reg(adapter, off);
-}
-
static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
ulong off, u32 data)
{
@@ -1675,14 +1761,15 @@ static inline int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter,
}
static inline int qlcnic_get_mac_address(struct qlcnic_adapter *adapter,
- u8 *mac)
+ u8 *mac, u8 function)
{
- return adapter->ahw->hw_ops->get_mac_address(adapter, mac);
+ return adapter->ahw->hw_ops->get_mac_address(adapter, mac, function);
}
-static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+static inline int qlcnic_setup_intr(struct qlcnic_adapter *adapter,
+ u8 num_intr, int txq)
{
- return adapter->ahw->hw_ops->setup_intr(adapter, num_intr);
+ return adapter->ahw->hw_ops->setup_intr(adapter, num_intr, txq);
}
static inline int qlcnic_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -1869,7 +1956,14 @@ static inline void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
static inline void qlcnic_set_mac_filter_count(struct qlcnic_adapter *adapter)
{
- adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+ if (adapter->ahw->hw_ops->set_mac_filter_count)
+ adapter->ahw->hw_ops->set_mac_filter_count(adapter);
+}
+
+static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw->hw_ops->read_phys_port_id)
+ adapter->ahw->hw_ops->read_phys_port_id(adapter);
}
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
@@ -1903,16 +1997,45 @@ static inline void qlcnic_config_ipaddr(struct qlcnic_adapter *adapter,
adapter->nic_ops->config_ipaddr(adapter, ip, cmd);
}
+static inline bool qlcnic_check_multi_tx(struct qlcnic_adapter *adapter)
+{
+ return test_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+}
+
+static inline void qlcnic_disable_multi_tx(struct qlcnic_adapter *adapter)
+{
+ test_and_clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+}
+
+/* When operating in a muti tx mode, driver needs to write 0x1
+ * to src register, instead of 0x0 to disable receiving interrupt.
+ */
static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
{
- writel(0, sds_ring->crb_intr_mask);
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0x1, sds_ring->crb_intr_mask);
+ else
+ writel(0, sds_ring->crb_intr_mask);
}
+/* When operating in a muti tx mode, driver needs to write 0x0
+ * to src register, instead of 0x1 to enable receiving interrupts.
+ */
static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
{
struct qlcnic_adapter *adapter = sds_ring->adapter;
- writel(0x1, sds_ring->crb_intr_mask);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED))
+ writel(0, sds_ring->crb_intr_mask);
+ else
+ writel(0x1, sds_ring->crb_intr_mask);
if (!QLCNIC_IS_MSI_FAMILY(adapter))
writel(0xfbff, adapter->tgt_mask_reg);
@@ -1944,9 +2067,11 @@ extern const struct ethtool_ops qlcnic_ethtool_failed_ops;
__func__, ##_args); \
} while (0)
-#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE834X 0x8030
#define PCI_DEVICE_ID_QLOGIC_VF_QLE834X 0x8430
-#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+#define PCI_DEVICE_ID_QLOGIC_QLE844X 0x8040
+#define PCI_DEVICE_ID_QLOGIC_VF_QLE844X 0x8440
static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
{
@@ -1954,12 +2079,22 @@ static inline bool qlcnic_82xx_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_QLE824X) ? true : false;
}
+static inline bool qlcnic_84xx_check(struct qlcnic_adapter *adapter)
+{
+ unsigned short device = adapter->pdev->device;
+
+ return ((device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+}
+
static inline bool qlcnic_83xx_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
bool status;
status = ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_QLE844X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X) ||
(device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X)) ? true : false;
return status;
@@ -1973,7 +2108,105 @@ static inline bool qlcnic_sriov_pf_check(struct qlcnic_adapter *adapter)
static inline bool qlcnic_sriov_vf_check(struct qlcnic_adapter *adapter)
{
unsigned short device = adapter->pdev->device;
+ bool status;
+
+ status = ((device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ||
+ (device == PCI_DEVICE_ID_QLOGIC_VF_QLE844X)) ? true : false;
+
+ return status;
+}
+
+static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_hw_capability)
+ return dcb->ops->get_hw_capability(adapter);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->free)
+ dcb->ops->free(adapter);
+}
+
+static inline int qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->attach)
+ return dcb->ops->attach(adapter);
+
+ return 0;
+}
+
+static inline int
+qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter, char *buf)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_hw_capability)
+ return dcb->ops->query_hw_capability(adapter, buf);
+
+ return 0;
+}
+
+static inline void qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_info)
+ dcb->ops->get_info(adapter);
+}
+
+static inline int
+qlcnic_dcb_query_cee_param(struct qlcnic_adapter *adapter, char *buf, u8 type)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->query_cee_param)
+ return dcb->ops->query_cee_param(adapter, buf, type);
+
+ return 0;
+}
+
+static inline int qlcnic_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->get_cee_cfg)
+ return dcb->ops->get_cee_cfg(adapter);
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_register_aen(struct qlcnic_adapter *adapter, u8 flag)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->register_aen)
+ dcb->ops->register_aen(adapter, flag);
+}
+
+static inline void qlcnic_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *msg)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (dcb && dcb->ops->handle_aen)
+ dcb->ops->handle_aen(adapter, msg);
+}
+
+static inline void qlcnic_dcb_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
- return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
+ if (dcb && dcb->ops->init_dcbnl_ops)
+ dcb->ops->init_dcbnl_ops(adapter);
}
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 3c51b774a65..3ca00e05f23 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -11,6 +11,7 @@
#include <linux/ipv6.h>
#include <linux/ethtool.h>
#include <linux/interrupt.h>
+#include <linux/aer.h>
#define QLCNIC_MAX_TX_QUEUES 1
#define RSS_HASHTYPE_IP_TCP 0x3
@@ -67,6 +68,8 @@ static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_ADD_RCV_RINGS, 130, 26},
{QLCNIC_CMD_CONFIG_VPORT, 4, 4},
{QLCNIC_CMD_BC_EVENT_SETUP, 2, 1},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 2, 50},
};
const u32 qlcnic_83xx_ext_reg_tbl[] = {
@@ -149,7 +152,7 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_83xx_mbx_op,
+ .mbx_cmd = qlcnic_83xx_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -175,6 +178,10 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.get_board_info = qlcnic_83xx_get_port_info,
.set_mac_filter_count = qlcnic_83xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .io_error_detected = qlcnic_83xx_io_error_detected,
+ .io_slot_reset = qlcnic_83xx_io_slot_reset,
+ .io_resume = qlcnic_83xx_io_resume,
+
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
@@ -228,17 +235,17 @@ static int __qlcnic_set_win_base(struct qlcnic_adapter *adapter, u32 addr)
return 0;
}
-int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr)
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
+ int *err)
{
- int ret;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- ret = __qlcnic_set_win_base(adapter, (u32) addr);
- if (!ret) {
+ *err = __qlcnic_set_win_base(adapter, (u32) addr);
+ if (!*err) {
return QLCRDX(ahw, QLCNIC_WILDCARD);
} else {
dev_err(&adapter->pdev->dev,
- "%s failed, addr = 0x%x\n", __func__, (int)addr);
+ "%s failed, addr = 0x%lx\n", __func__, addr);
return -EIO;
}
}
@@ -261,7 +268,7 @@ int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *adapter, ulong addr,
}
}
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
int err, i, num_msix;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -362,6 +369,10 @@ static inline void qlcnic_83xx_get_mbx_data(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
int i;
+
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
for (i = 0; i < cmd->rsp.num; i++)
cmd->rsp.arg[i] = readl(QLCNIC_MBX_FW(adapter->ahw, i));
}
@@ -398,24 +409,33 @@ irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *adapter)
return IRQ_HANDLED;
}
+static inline void qlcnic_83xx_notify_mbx_response(struct qlcnic_mailbox *mbx)
+{
+ atomic_set(&mbx->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+ complete(&mbx->completion);
+}
+
static void qlcnic_83xx_poll_process_aen(struct qlcnic_adapter *adapter)
{
- u32 resp, event;
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
-
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
out:
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
irqreturn_t qlcnic_83xx_intr(int irq, void *data)
@@ -515,7 +535,7 @@ int qlcnic_83xx_setup_mbx_intr(struct qlcnic_adapter *adapter)
}
/* Enable mailbox interrupt */
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
return err;
}
@@ -561,7 +581,7 @@ void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *adapter)
void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
- int ret;
+ int ret = 0;
u32 data;
if (qlcnic_api_lock(adapter)) {
@@ -571,7 +591,7 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
return;
}
- ret = qlcnic_83xx_rd_reg_indirect(adapter, (u32) offset);
+ data = QLCRD32(adapter, (u32) offset, &ret);
qlcnic_api_unlock(adapter);
if (ret == -EIO) {
@@ -580,7 +600,6 @@ void qlcnic_83xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
__func__, (u32)offset);
return;
}
- data = ret;
memcpy(buf, &data, size);
}
@@ -629,7 +648,7 @@ void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *adapter)
ahw->max_uc_count = count;
}
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *adapter)
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *adapter)
{
u32 val;
@@ -683,11 +702,14 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
u32 data[]);
-static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
int i;
+ if (cmd->op_type == QLC_83XX_MBX_POST_BC_OP)
+ return;
+
dev_info(&adapter->pdev->dev,
"Host MBX regs(%d)\n", cmd->req.num);
for (i = 0; i < cmd->req.num; i++) {
@@ -706,120 +728,73 @@ static void qlcnic_dump_mbx(struct qlcnic_adapter *adapter,
pr_info("\n");
}
-/* Mailbox response for mac rcode */
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_poll_for_mbx_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- u32 fw_data;
- u8 mac_cmd_rcode;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int opcode = LSW(cmd->req.arg[0]);
+ unsigned long max_loops;
- fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
- mac_cmd_rcode = (u8)fw_data;
- if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
- mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
- mac_cmd_rcode == QLC_83XX_MAC_ABSENT)
- return QLCNIC_RCODE_SUCCESS;
- return 1;
-}
+ max_loops = cmd->total_cmds * QLC_83XX_MBX_CMD_LOOP;
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
-{
- u32 data;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- /* wait for mailbox completion */
- do {
- data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
- if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
- data = QLCNIC_RCODE_TIMEOUT;
- break;
- }
- mdelay(1);
- } while (!data);
- return data;
+ for (; max_loops; max_loops--) {
+ if (atomic_read(&cmd->rsp_status) ==
+ QLC_83XX_MBX_RESPONSE_ARRIVED)
+ return;
+
+ udelay(1);
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func, ahw->op_mode);
+ flush_workqueue(ahw->mailbox->work_q);
+ return;
}
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
- struct qlcnic_cmd_args *cmd)
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- int i;
- u16 opcode;
- u8 mbx_err_code;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
+ int cmd_type, err, opcode;
+ unsigned long timeout;
opcode = LSW(cmd->req.arg[0]);
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
+ cmd_type = cmd->type;
+ err = mbx->ops->enqueue_cmd(adapter, cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_context=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
-
- if (mbx_val) {
- QLCDB(adapter, DRV,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- QLCDB(adapter, DRV,
- "Mailbox not available, 0x%x, collect FW dump\n",
- mbx_val);
- cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return cmd->rsp.arg[0];
- }
-
- /* Fill in mailbox registers */
- mbx_cmd = cmd->req.arg[0];
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- for (i = 1; i < cmd->req.num; i++)
- writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
-
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
- qlcnic_83xx_get_mbx_data(adapter, cmd);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
+ switch (cmd_type) {
+ case QLC_83XX_MBX_CMD_WAIT:
+ if (!wait_for_completion_timeout(&cmd->completion, timeout)) {
dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- qlcnic_dump_mbx(adapter, cmd);
- break;
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- goto out;
+ break;
+ case QLC_83XX_MBX_CMD_NO_WAIT:
+ return 0;
+ case QLC_83XX_MBX_CMD_BUSY_WAIT:
+ qlcnic_83xx_poll_for_mbx_completion(adapter, cmd);
+ break;
+ default:
+ dev_err(&adapter->pdev->dev,
+ "%s: Invalid mailbox command, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, opcode, cmd_type, ahw->pci_func,
+ ahw->op_mode);
+ qlcnic_83xx_detach_mailbox_work(adapter);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd->rsp_opcode;
}
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
@@ -829,6 +804,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
u32 temp;
const struct qlcnic_mailbox_metadata *mbx_tbl;
+ memset(mbx, 0, sizeof(struct qlcnic_cmd_args));
mbx_tbl = qlcnic_83xx_mbx_tbl;
size = ARRAY_SIZE(qlcnic_83xx_mbx_tbl);
for (i = 0; i < size; i++) {
@@ -851,6 +827,7 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
temp = adapter->ahw->fw_hal_version << 29;
mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
+ mbx->cmd_op = type;
return 0;
}
}
@@ -889,9 +866,9 @@ static void qlcnic_83xx_handle_idc_comp_aen(struct qlcnic_adapter *adapter,
void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 event[QLC_83XX_MBX_AEN_CNT];
int i;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
for (i = 0; i < QLC_83XX_MBX_AEN_CNT; i++)
event[i] = readl(QLCNIC_MBX_FW(ahw, i));
@@ -911,6 +888,7 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
&adapter->idc_aen_work, 0);
break;
case QLCNIC_MBX_TIME_EXTEND_EVENT:
+ ahw->extend_lb_time = event[1] >> 8 & 0xf;
break;
case QLCNIC_MBX_BC_EVENT:
qlcnic_sriov_handle_bc_event(adapter, event[1]);
@@ -923,6 +901,9 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
dev_info(&adapter->pdev->dev, "SFP Removed AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
break;
+ case QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT:
+ qlcnic_dcb_handle_aen(adapter, (void *)&event[1]);
+ break;
default:
dev_dbg(&adapter->pdev->dev, "Unsupported AEN:0x%x.\n",
QLCNIC_MBX_RSP(event[0]));
@@ -934,20 +915,23 @@ void __qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_process_aen(struct qlcnic_adapter *adapter)
{
+ u32 resp, event, rsp_status = QLC_83XX_MBX_RESPONSE_ARRIVED;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 resp, event;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
unsigned long flags;
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
if (resp & QLCNIC_SET_OWNER) {
event = readl(QLCNIC_MBX_FW(ahw, 0));
- if (event & QLCNIC_MBX_ASYNC_EVENT)
+ if (event & QLCNIC_MBX_ASYNC_EVENT) {
__qlcnic_83xx_process_aen(adapter);
+ } else {
+ if (atomic_read(&mbx->rsp_status) != rsp_status)
+ qlcnic_83xx_notify_mbx_response(mbx);
+ }
}
-
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
}
static void qlcnic_83xx_mbx_poll_work(struct work_struct *work)
@@ -970,6 +954,7 @@ void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *adapter)
return;
INIT_DELAYED_WORK(&adapter->mbx_poll_work, qlcnic_83xx_mbx_poll_work);
+ queue_delayed_work(adapter->qlcnic_wq, &adapter->mbx_poll_work, 0);
}
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *adapter)
@@ -1356,8 +1341,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
/* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_83xx_enable_mbx_poll(adapter);
qlcnic_83xx_free_mbx_intr(adapter);
+ }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1378,6 +1365,8 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
qlcnic_83xx_disable_intr(adapter, sds_ring);
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_enable_mbx_poll(adapter);
}
}
@@ -1387,6 +1376,7 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
err = qlcnic_83xx_setup_mbx_intr(adapter);
+ qlcnic_83xx_disable_mbx_poll(adapter);
if (err) {
dev_err(&adapter->pdev->dev,
"%s: failed to setup mbx interrupt\n",
@@ -1403,6 +1393,10 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
+
+ if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
+ !(adapter->flags & QLCNIC_MSIX_ENABLED))
+ qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
@@ -1620,26 +1614,33 @@ static void qlcnic_83xx_set_interface_id_promisc(struct qlcnic_adapter *adapter,
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
{
- int err;
+ struct qlcnic_cmd_args *cmd = NULL;
u32 temp = 0;
- struct qlcnic_cmd_args cmd;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter,
QLCNIC_CMD_CONFIGURE_MAC_RX_MODE);
if (err)
- return err;
+ goto out;
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
qlcnic_83xx_set_interface_id_promisc(adapter, &temp);
- cmd.req.arg[1] = (mode ? 1 : 0) | temp;
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_info(&adapter->pdev->dev,
- "Promiscuous mode config failed\n");
+ cmd->req.arg[1] = (mode ? 1 : 0) | temp;
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
- qlcnic_free_mbx_args(&cmd);
+ qlcnic_free_mbx_args(cmd);
+
+out:
+ kfree(cmd);
return err;
}
@@ -1652,7 +1653,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
if (ahw->op_mode == QLCNIC_NON_PRIV_FUNC) {
netdev_warn(netdev,
"Loopback test not supported in non privileged mode\n");
- return ret;
+ return -ENOTSUPP;
}
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
@@ -1680,19 +1681,17 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Poll for link up event before running traffic */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
- ret = -EIO;
+ ret = -EBUSY;
goto free_diag_res;
}
if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
netdev_info(netdev,
"Firmware didn't sent link up event to loopback request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ ret = -ETIMEDOUT;
qlcnic_83xx_clear_lb_mode(adapter, mode);
goto free_diag_res;
}
@@ -1701,7 +1700,7 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
/* Make sure carrier is off and queue is stopped during loopback */
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
ret = qlcnic_do_lb_test(adapter, mode);
@@ -1717,18 +1716,42 @@ fail_diag_alloc:
return ret;
}
+static void qlcnic_extend_lb_idc_cmpltn_wait(struct qlcnic_adapter *adapter,
+ u32 *max_wait_count)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int temp;
+
+ netdev_info(adapter->netdev, "Recieved loopback IDC time extend event for 0x%x seconds\n",
+ ahw->extend_lb_time);
+ temp = ahw->extend_lb_time * 1000;
+ *max_wait_count += temp / QLC_83XX_LB_MSLEEP_COUNT;
+ ahw->extend_lb_time = 0;
+}
+
int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct net_device *netdev = adapter->netdev;
+ u32 config, max_wait_count;
int status = 0, loop = 0;
- u32 config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
status = qlcnic_83xx_get_port_config(adapter);
if (status)
return status;
config = ahw->port_config;
+
+ /* Check if port is already in loopback mode */
+ if ((config & QLC_83XX_CFG_LOOPBACK_HSS) ||
+ (config & QLC_83XX_CFG_LOOPBACK_EXT)) {
+ netdev_err(netdev,
+ "Port already in Loopback mode.\n");
+ return -EINPROGRESS;
+ }
+
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
@@ -1749,21 +1772,24 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
qlcnic_83xx_clear_lb_mode(adapter, mode);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1775,10 +1801,12 @@ int qlcnic_83xx_set_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 config = ahw->port_config, max_wait_count;
struct net_device *netdev = adapter->netdev;
int status = 0, loop = 0;
- u32 config = ahw->port_config;
+ ahw->extend_lb_time = 0;
+ max_wait_count = QLC_83XX_LB_WAIT_COUNT;
set_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
if (mode == QLCNIC_ILB_MODE)
ahw->port_config &= ~QLC_83XX_CFG_LOOPBACK_HSS;
@@ -1798,21 +1826,23 @@ int qlcnic_83xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
/* Wait for Link and IDC Completion AEN */
do {
msleep(QLC_83XX_LB_MSLEEP_COUNT);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_process_aen(adapter);
if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
netdev_info(netdev,
"Device is resetting, free LB test resources\n");
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -EBUSY;
}
- if (loop++ > QLC_83XX_LB_WAIT_COUNT) {
- netdev_err(netdev,
- "Did not receive IDC completion AEN\n");
+ if (ahw->extend_lb_time)
+ qlcnic_extend_lb_idc_cmpltn_wait(adapter,
+ &max_wait_count);
+
+ if (loop++ > max_wait_count) {
+ netdev_err(netdev, "%s: Did not receive loopback IDC completion AEN\n",
+ __func__);
clear_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status);
- return -EIO;
+ return -ETIMEDOUT;
}
} while (test_bit(QLC_83XX_IDC_COMP_AEN, &ahw->idc.status));
@@ -1951,25 +1981,31 @@ static void qlcnic_83xx_set_interface_id_macaddr(struct qlcnic_adapter *adapter,
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
u16 vlan_id, u8 op)
{
- int err;
- u32 *buf, temp = 0;
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd = NULL;
struct qlcnic_macvlan_mbx mv;
+ u32 *buf, temp = 0;
+ int err;
if (adapter->recv_ctx->state == QLCNIC_HOST_CTX_STATE_FREED)
return -EIO;
- err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
+ return -ENOMEM;
+
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
if (err)
- return err;
+ goto out;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
if (vlan_id)
op = (op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL;
- cmd.req.arg[1] = op | (1 << 8);
+ cmd->req.arg[1] = op | (1 << 8);
qlcnic_83xx_set_interface_id_macaddr(adapter, &temp);
- cmd.req.arg[1] |= temp;
+ cmd->req.arg[1] |= temp;
mv.vlan = vlan_id;
mv.mac_addr0 = addr[0];
mv.mac_addr1 = addr[1];
@@ -1977,14 +2013,15 @@ int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
- qlcnic_free_mbx_args(&cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
+ if (!err)
+ return err;
+
+ qlcnic_free_mbx_args(cmd);
+out:
+ kfree(cmd);
return err;
}
@@ -2009,12 +2046,14 @@ void qlcnic_83xx_configure_mac(struct qlcnic_adapter *adapter, u8 *mac,
cmd->req.arg[1] = type;
}
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
+ function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
@@ -2075,28 +2114,37 @@ void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *adapter)
static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
u32 data[])
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
u8 link_status, duplex;
/* link speed */
link_status = LSB(data[3]) & 1;
- adapter->ahw->link_speed = MSW(data[2]);
- adapter->ahw->link_autoneg = MSB(MSW(data[3]));
- adapter->ahw->module_type = MSB(LSW(data[3]));
- duplex = LSB(MSW(data[3]));
- if (duplex)
- adapter->ahw->link_duplex = DUPLEX_FULL;
- else
- adapter->ahw->link_duplex = DUPLEX_HALF;
- adapter->ahw->has_link_events = 1;
+ if (link_status) {
+ ahw->link_speed = MSW(data[2]);
+ duplex = LSB(MSW(data[3]));
+ if (duplex)
+ ahw->link_duplex = DUPLEX_FULL;
+ else
+ ahw->link_duplex = DUPLEX_HALF;
+ } else {
+ ahw->link_speed = SPEED_UNKNOWN;
+ ahw->link_duplex = DUPLEX_UNKNOWN;
+ }
+
+ ahw->link_autoneg = MSB(MSW(data[3]));
+ ahw->module_type = MSB(LSW(data[3]));
+ ahw->has_link_events = 1;
qlcnic_advert_link_change(adapter, link_status);
}
irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
{
struct qlcnic_adapter *adapter = data;
- unsigned long flags;
+ struct qlcnic_mailbox *mbx;
u32 mask, resp, event;
+ unsigned long flags;
- spin_lock_irqsave(&adapter->ahw->mbx_lock, flags);
+ mbx = adapter->ahw->mailbox;
+ spin_lock_irqsave(&mbx->aen_lock, flags);
resp = QLCRDX(adapter->ahw, QLCNIC_FW_MBX_CTRL);
if (!(resp & QLCNIC_SET_OWNER))
goto out;
@@ -2104,11 +2152,13 @@ irqreturn_t qlcnic_83xx_handle_aen(int irq, void *data)
event = readl(QLCNIC_MBX_FW(adapter->ahw, 0));
if (event & QLCNIC_MBX_ASYNC_EVENT)
__qlcnic_83xx_process_aen(adapter);
+ else
+ qlcnic_83xx_notify_mbx_response(mbx);
+
out:
mask = QLCRDX(adapter->ahw, QLCNIC_DEF_INT_MASK);
writel(0, adapter->ahw->pci_base0 + mask);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
-
+ spin_unlock_irqrestore(&mbx->aen_lock, flags);
return IRQ_HANDLED;
}
@@ -2281,7 +2331,7 @@ int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *adapter,
pci_info->tx_max_bw, pci_info->mac);
}
if (ahw->op_mode == QLCNIC_MGMT_FUNC)
- dev_info(dev, "Max vNIC functions = %d, active vNIC functions = %d\n",
+ dev_info(dev, "Max functions = %d, active functions = %d\n",
ahw->max_pci_func, ahw->act_pci_func);
} else {
@@ -2384,9 +2434,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
u32 flash_addr, u8 *p_data,
int count)
{
- int i, ret;
- u32 word, range, flash_offset, addr = flash_addr;
+ u32 word, range, flash_offset, addr = flash_addr, ret;
ulong indirect_add, direct_window;
+ int i, err = 0;
flash_offset = addr & (QLCNIC_FLASH_SECTOR_SIZE - 1);
if (addr & 0x3) {
@@ -2404,10 +2454,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Multi sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_add);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
word = ret;
*(u32 *)p_data = word;
@@ -2428,10 +2477,9 @@ int qlcnic_83xx_lockless_flash_read32(struct qlcnic_adapter *adapter,
/* Single sector read */
for (i = 0; i < count; i++) {
indirect_add = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_add);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_add, &err);
+ if (err == -EIO)
+ return err;
word = ret;
*(u32 *)p_data = word;
@@ -2447,10 +2495,13 @@ static int qlcnic_83xx_poll_flash_status_reg(struct qlcnic_adapter *adapter)
{
u32 status;
int retries = QLC_83XX_FLASH_READ_RETRY_COUNT;
+ int err = 0;
do {
- status = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_STATUS);
+ status = QLCRD32(adapter, QLC_83XX_FLASH_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
if ((status & QLC_83XX_FLASH_STATUS_READY) ==
QLC_83XX_FLASH_STATUS_READY)
break;
@@ -2502,7 +2553,8 @@ int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *adapter)
int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
{
- int ret, mfg_id;
+ int ret, err = 0;
+ u32 mfg_id;
if (qlcnic_83xx_lock_flash(adapter))
return -EIO;
@@ -2517,9 +2569,11 @@ int qlcnic_83xx_read_flash_mfg_id(struct qlcnic_adapter *adapter)
return -EIO;
}
- mfg_id = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
- if (mfg_id == -EIO)
- return -EIO;
+ mfg_id = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO) {
+ qlcnic_83xx_unlock_flash(adapter);
+ return err;
+ }
adapter->flash_mfg_id = (mfg_id & 0xFF);
qlcnic_83xx_unlock_flash(adapter);
@@ -2636,7 +2690,7 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
u32 *p_data, int count)
{
u32 temp;
- int ret = -EIO;
+ int ret = -EIO, err = 0;
if ((count < QLC_83XX_FLASH_WRITE_MIN) ||
(count > QLC_83XX_FLASH_WRITE_MAX)) {
@@ -2645,8 +2699,10 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_SPI_CONTROL);
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
@@ -2695,13 +2751,18 @@ int qlcnic_83xx_flash_bulk_write(struct qlcnic_adapter *adapter, u32 addr,
return -EIO;
}
- ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_SPI_STATUS);
+ ret = QLCRD32(adapter, QLC_83XX_FLASH_SPI_STATUS, &err);
+ if (err == -EIO)
+ return err;
+
if ((ret & QLC_83XX_FLASH_SPI_CTRL) == QLC_83XX_FLASH_SPI_CTRL) {
dev_err(&adapter->pdev->dev, "%s: failed at %d\n",
__func__, __LINE__);
/* Operation failed, clear error bit */
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLC_83XX_FLASH_SPI_CONTROL);
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_SPI_CONTROL, &err);
+ if (err == -EIO)
+ return err;
+
qlcnic_83xx_wrt_reg_indirect(adapter,
QLC_83XX_FLASH_SPI_CONTROL,
(temp | QLC_83XX_FLASH_SPI_CTRL));
@@ -2823,6 +2884,7 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
{
int i, j, ret = 0;
u32 temp;
+ int err = 0;
/* Check alignment */
if (addr & 0xF)
@@ -2855,8 +2917,12 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = qlcnic_83xx_rd_reg_indirect(adapter,
- QLCNIC_MS_CTRL);
+ temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
+ if (err == -EIO) {
+ mutex_unlock(&adapter->ahw->mem_lock);
+ return err;
+ }
+
if ((temp & TA_CTL_BUSY) == 0)
break;
}
@@ -2878,9 +2944,9 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
u8 *p_data, int count)
{
- int i, ret;
- u32 word, addr = flash_addr;
+ u32 word, addr = flash_addr, ret;
ulong indirect_addr;
+ int i, err = 0;
if (qlcnic_83xx_lock_flash(adapter) != 0)
return -EIO;
@@ -2900,10 +2966,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
}
indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
- ret = qlcnic_83xx_rd_reg_indirect(adapter,
- indirect_addr);
- if (ret == -EIO)
- return -EIO;
+ ret = QLCRD32(adapter, indirect_addr, &err);
+ if (err == -EIO)
+ return err;
+
word = ret;
*(u32 *)p_data = word;
p_data = p_data + 4;
@@ -3014,8 +3080,8 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
}
if (ahw->port_type == QLCNIC_XGBE) {
- ecmd->supported = SUPPORTED_1000baseT_Full;
- ecmd->advertising = ADVERTISED_1000baseT_Full;
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
} else {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@@ -3244,6 +3310,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
u8 val;
int ret, max_sds_rings = adapter->max_sds_rings;
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state)) {
+ netdev_info(netdev, "Device is resetting\n");
+ return -EBUSY;
+ }
+
if (qlcnic_get_diag_lock(adapter)) {
netdev_info(netdev, "Device in diagnostics mode\n");
return -EBUSY;
@@ -3369,7 +3440,8 @@ int qlcnic_83xx_set_pauseparam(struct qlcnic_adapter *adapter,
static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
{
- int ret;
+ int ret, err = 0;
+ u32 temp;
qlcnic_83xx_wrt_reg_indirect(adapter, QLC_83XX_FLASH_ADDR,
QLC_83XX_FLASH_OEM_READ_SIG);
@@ -3379,8 +3451,11 @@ static int qlcnic_83xx_read_flash_status_reg(struct qlcnic_adapter *adapter)
if (ret)
return -EIO;
- ret = qlcnic_83xx_rd_reg_indirect(adapter, QLC_83XX_FLASH_RDDATA);
- return ret & 0xFF;
+ temp = QLCRD32(adapter, QLC_83XX_FLASH_RDDATA, &err);
+ if (err == -EIO)
+ return err;
+
+ return temp & 0xFF;
}
int qlcnic_83xx_flash_test(struct qlcnic_adapter *adapter)
@@ -3446,3 +3521,360 @@ int qlcnic_83xx_resume(struct qlcnic_adapter *adapter)
idc->delay);
return err;
}
+
+void qlcnic_83xx_reinit_mbx_work(struct qlcnic_mailbox *mbx)
+{
+ INIT_COMPLETION(mbx->completion);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+}
+
+void qlcnic_83xx_free_mailbox(struct qlcnic_mailbox *mbx)
+{
+ destroy_workqueue(mbx->work_q);
+ kfree(mbx);
+}
+
+static inline void
+qlcnic_83xx_notify_cmd_completion(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_ARRIVED);
+
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
+ return;
+ }
+ complete(&cmd->completion);
+}
+
+static void qlcnic_83xx_flush_mbx_queue(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ spin_lock(&mbx->queue_lock);
+
+ while (!list_empty(head)) {
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+ dev_info(&adapter->pdev->dev, "%s: Mailbox command 0x%x\n",
+ __func__, cmd->cmd_op);
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+ }
+
+ spin_unlock(&mbx->queue_lock);
+}
+
+static int qlcnic_83xx_check_mbx_status(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ u32 host_mbx_ctrl;
+
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status))
+ return -EBUSY;
+
+ host_mbx_ctrl = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
+ if (host_mbx_ctrl) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ ahw->idc.collect_dump = 1;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static inline void qlcnic_83xx_signal_mbx_cmd(struct qlcnic_adapter *adapter,
+ u8 issue_cmd)
+{
+ if (issue_cmd)
+ QLCWRX(adapter->ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
+ else
+ QLCWRX(adapter->ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
+}
+
+static void qlcnic_83xx_dequeue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_del(&cmd->list);
+ mbx->num_cmds--;
+
+ spin_unlock(&mbx->queue_lock);
+
+ qlcnic_83xx_notify_cmd_completion(adapter, cmd);
+}
+
+static void qlcnic_83xx_encode_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u32 mbx_cmd, fw_hal_version, hdr_size, total_size, tmp;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int i, j;
+
+ if (cmd->op_type != QLC_83XX_MBX_POST_BC_OP) {
+ mbx_cmd = cmd->req.arg[0];
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+ for (i = 1; i < cmd->req.num; i++)
+ writel(cmd->req.arg[i], QLCNIC_MBX_HOST(ahw, i));
+ } else {
+ fw_hal_version = ahw->fw_hal_version;
+ hdr_size = sizeof(struct qlcnic_bc_hdr) / sizeof(u32);
+ total_size = cmd->pay_size + hdr_size;
+ tmp = QLCNIC_CMD_BC_EVENT_SETUP | total_size << 16;
+ mbx_cmd = tmp | fw_hal_version << 29;
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
+
+ /* Back channel specific operations bits */
+ mbx_cmd = 0x1 | 1 << 4;
+
+ if (qlcnic_sriov_pf_check(adapter))
+ mbx_cmd |= cmd->func_num << 5;
+
+ writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
+
+ for (i = 2, j = 0; j < hdr_size; i++, j++)
+ writel(*(cmd->hdr++), QLCNIC_MBX_HOST(ahw, i));
+ for (j = 0; j < cmd->pay_size; j++, i++)
+ writel(*(cmd->pay++), QLCNIC_MBX_HOST(ahw, i));
+ }
+}
+
+void qlcnic_83xx_detach_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ complete(&mbx->completion);
+ cancel_work_sync(&mbx->work);
+ flush_workqueue(mbx->work_q);
+ qlcnic_83xx_flush_mbx_queue(adapter);
+}
+
+static int qlcnic_83xx_enqueue_mbx_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd,
+ unsigned long *timeout)
+{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
+
+ if (test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
+ atomic_set(&cmd->rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+ init_completion(&cmd->completion);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_UNKNOWN;
+
+ spin_lock(&mbx->queue_lock);
+
+ list_add_tail(&cmd->list, &mbx->cmd_q);
+ mbx->num_cmds++;
+ cmd->total_cmds = mbx->num_cmds;
+ *timeout = cmd->total_cmds * QLC_83XX_MBX_TIMEOUT;
+ queue_work(mbx->work_q, &mbx->work);
+
+ spin_unlock(&mbx->queue_lock);
+
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+static int qlcnic_83xx_check_mac_rcode(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ u8 mac_cmd_rcode;
+ u32 fw_data;
+
+ if (cmd->cmd_op == QLCNIC_CMD_CONFIG_MAC_VLAN) {
+ fw_data = readl(QLCNIC_MBX_FW(adapter->ahw, 2));
+ mac_cmd_rcode = (u8)fw_data;
+ if (mac_cmd_rcode == QLC_83XX_NO_NIC_RESOURCE ||
+ mac_cmd_rcode == QLC_83XX_MAC_PRESENT ||
+ mac_cmd_rcode == QLC_83XX_MAC_ABSENT) {
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ return QLCNIC_RCODE_SUCCESS;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct device *dev = &adapter->pdev->dev;
+ u8 mbx_err_code;
+ u32 fw_data;
+
+ fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
+ mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
+
+ switch (mbx_err_code) {
+ case QLCNIC_MBX_RSP_OK:
+ case QLCNIC_MBX_PORT_RSP_OK:
+ cmd->rsp_opcode = QLCNIC_RCODE_SUCCESS;
+ break;
+ default:
+ if (!qlcnic_83xx_check_mac_rcode(adapter, cmd))
+ break;
+
+ dev_err(dev, "%s: Mailbox command failed, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x, error=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode, mbx_err_code);
+ cmd->rsp_opcode = QLC_83XX_MBX_RESPONSE_FAILED;
+ qlcnic_dump_mbx(adapter, cmd);
+ }
+
+ return;
+}
+
+static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
+{
+ struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
+ work);
+ struct qlcnic_adapter *adapter = mbx->adapter;
+ struct qlcnic_mbx_ops *mbx_ops = mbx->ops;
+ struct device *dev = &adapter->pdev->dev;
+ atomic_t *rsp_status = &mbx->rsp_status;
+ struct list_head *head = &mbx->cmd_q;
+ struct qlcnic_hardware_context *ahw;
+ struct qlcnic_cmd_args *cmd = NULL;
+
+ ahw = adapter->ahw;
+
+ while (true) {
+ if (qlcnic_83xx_check_mbx_status(adapter)) {
+ qlcnic_83xx_flush_mbx_queue(adapter);
+ return;
+ }
+
+ atomic_set(rsp_status, QLC_83XX_MBX_RESPONSE_WAIT);
+
+ spin_lock(&mbx->queue_lock);
+
+ if (list_empty(head)) {
+ spin_unlock(&mbx->queue_lock);
+ return;
+ }
+ cmd = list_entry(head->next, struct qlcnic_cmd_args, list);
+
+ spin_unlock(&mbx->queue_lock);
+
+ mbx_ops->encode_cmd(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_REQUEST);
+
+ if (wait_for_completion_timeout(&mbx->completion,
+ QLC_83XX_MBX_TIMEOUT)) {
+ mbx_ops->decode_resp(adapter, cmd);
+ mbx_ops->nofity_fw(adapter, QLC_83XX_MBX_COMPLETION);
+ } else {
+ dev_err(dev, "%s: Mailbox command timeout, opcode=0x%x, cmd_type=0x%x, func=0x%x, op_mode=0x%x\n",
+ __func__, cmd->cmd_op, cmd->type, ahw->pci_func,
+ ahw->op_mode);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mbx(adapter, cmd);
+ qlcnic_83xx_idc_request_reset(adapter,
+ QLCNIC_FORCE_FW_DUMP_KEY);
+ cmd->rsp_opcode = QLCNIC_RCODE_TIMEOUT;
+ }
+ mbx_ops->dequeue_cmd(adapter, cmd);
+ }
+}
+
+static struct qlcnic_mbx_ops qlcnic_83xx_mbx_ops = {
+ .enqueue_cmd = qlcnic_83xx_enqueue_mbx_cmd,
+ .dequeue_cmd = qlcnic_83xx_dequeue_mbx_cmd,
+ .decode_resp = qlcnic_83xx_decode_mbx_rsp,
+ .encode_cmd = qlcnic_83xx_encode_mbx_cmd,
+ .nofity_fw = qlcnic_83xx_signal_mbx_cmd,
+};
+
+int qlcnic_83xx_init_mailbox_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx;
+
+ ahw->mailbox = kzalloc(sizeof(*mbx), GFP_KERNEL);
+ if (!ahw->mailbox)
+ return -ENOMEM;
+
+ mbx = ahw->mailbox;
+ mbx->ops = &qlcnic_83xx_mbx_ops;
+ mbx->adapter = adapter;
+
+ spin_lock_init(&mbx->queue_lock);
+ spin_lock_init(&mbx->aen_lock);
+ INIT_LIST_HEAD(&mbx->cmd_q);
+ init_completion(&mbx->completion);
+
+ mbx->work_q = create_singlethread_workqueue("qlcnic_mailbox");
+ if (mbx->work_q == NULL) {
+ kfree(mbx);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&mbx->work, qlcnic_83xx_mailbox_worker);
+ set_bit(QLC_83XX_MBX_READY, &mbx->status);
+ return 0;
+}
+
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (state == pci_channel_io_normal)
+ return PCI_ERS_RESULT_RECOVERED;
+
+ set_bit(__QLCNIC_AER, &adapter->state);
+ set_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_83xx_aer_stop_poll_work(adapter);
+
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ int err = 0;
+
+ pdev->error_state = pci_channel_io_normal;
+ err = pci_enable_device(pdev);
+ if (err)
+ goto disconnect;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ err = qlcnic_83xx_aer_reset(adapter);
+ if (err == 0)
+ return PCI_ERS_RESULT_RECOVERED;
+disconnect:
+ clear_bit(__QLCNIC_AER, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+void qlcnic_83xx_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ if (test_and_clear_bit(__QLCNIC_AER, &adapter->state))
+ qlcnic_83xx_aer_start_poll_work(adapter);
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 2548d1403d7..533e150503a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -84,11 +84,20 @@
/* Firmware image definitions */
#define QLC_83XX_BOOTLOADER_FLASH_ADDR 0x10000
#define QLC_83XX_FW_FILE_NAME "83xx_fw.bin"
+#define QLC_84XX_FW_FILE_NAME "84xx_fw.bin"
#define QLC_83XX_BOOT_FROM_FLASH 0
#define QLC_83XX_BOOT_FROM_FILE 0x12345678
+#define QLC_FW_FILE_NAME_LEN 20
#define QLC_83XX_MAX_RESET_SEQ_ENTRIES 16
+#define QLC_83XX_MBX_POST_BC_OP 0x1
+#define QLC_83XX_MBX_COMPLETION 0x0
+#define QLC_83XX_MBX_REQUEST 0x1
+
+#define QLC_83XX_MBX_TIMEOUT (5 * HZ)
+#define QLC_83XX_MBX_CMD_LOOP 5000000
+
/* status descriptor mailbox data
* @phy_addr_{low|high}: physical address of buffer
* @sds_ring_size: buffer size
@@ -265,11 +274,7 @@ struct qlcnic_macvlan_mbx {
struct qlc_83xx_fw_info {
const struct firmware *fw;
- u16 major_fw_version;
- u8 minor_fw_version;
- u8 sub_fw_version;
- u8 fw_build_num;
- u8 load_from_file;
+ char fw_file_name[QLC_FW_FILE_NAME_LEN];
};
struct qlc_83xx_reset {
@@ -288,6 +293,7 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY 0x1
#define QLC_83XX_IDC_GRACEFULL_RESET 0x2
+#define QLC_83XX_IDC_DISABLE_FW_DUMP 0x4
#define QLC_83XX_IDC_TIMESTAMP 0
#define QLC_83XX_IDC_DURATION 1
#define QLC_83XX_IDC_INIT_TIMEOUT_SECS 30
@@ -397,6 +403,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_MAX_MC_COUNT 38
#define QLC_83XX_MAX_UC_COUNT 4096
+#define QLC_83XX_PVID_STRIP_CAPABILITY BIT_22
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
@@ -404,6 +411,7 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
+#define QLC_83XX_ESWITCH_CAPABILITY BIT_23
#define QLC_83XX_VIRTUAL_NIC_MODE 0xFF
#define QLC_83XX_DEFAULT_MODE 0x0
#define QLC_83XX_SRIOV_MODE 0x1
@@ -449,6 +457,20 @@ enum qlcnic_83xx_states {
#define QLC_83xx_FLASH_MAX_WAIT_USEC 100
#define QLC_83XX_FLASH_LOCK_TIMEOUT 10000
+enum qlc_83xx_mbx_cmd_type {
+ QLC_83XX_MBX_CMD_WAIT = 0,
+ QLC_83XX_MBX_CMD_NO_WAIT,
+ QLC_83XX_MBX_CMD_BUSY_WAIT,
+};
+
+enum qlc_83xx_mbx_response_states {
+ QLC_83XX_MBX_RESPONSE_WAIT = 0,
+ QLC_83XX_MBX_RESPONSE_ARRIVED,
+};
+
+#define QLC_83XX_MBX_RESPONSE_FAILED 0x2
+#define QLC_83XX_MBX_RESPONSE_UNKNOWN 0x3
+
/* Additional registers in 83xx */
enum qlc_83xx_ext_regs {
QLCNIC_GLOBAL_RESET = 0,
@@ -498,8 +520,8 @@ enum qlc_83xx_ext_regs {
/* 83xx funcitons */
int qlcnic_83xx_get_fw_version(struct qlcnic_adapter *);
-int qlcnic_83xx_mbx_op(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
-int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_83xx_issue_cmd(struct qlcnic_adapter *, struct qlcnic_cmd_args *);
+int qlcnic_83xx_setup_intr(struct qlcnic_adapter *, u8, int);
void qlcnic_83xx_get_func_no(struct qlcnic_adapter *);
int qlcnic_83xx_cam_lock(struct qlcnic_adapter *);
void qlcnic_83xx_cam_unlock(struct qlcnic_adapter *);
@@ -508,7 +530,7 @@ void qlcnic_83xx_add_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_remove_sysfs(struct qlcnic_adapter *);
void qlcnic_83xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_83xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
-int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong);
+int qlcnic_83xx_rd_reg_indirect(struct qlcnic_adapter *, ulong, int *);
int qlcnic_83xx_wrt_reg_indirect(struct qlcnic_adapter *, ulong, u32);
void qlcnic_83xx_process_rcv_diag(struct qlcnic_adapter *, int, u64 []);
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
@@ -540,7 +562,7 @@ int qlcnic_83xx_setup_link_event(struct qlcnic_adapter *, int);
void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *);
int qlcnic_83xx_config_intrpt(struct qlcnic_adapter *, bool);
int qlcnic_83xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *);
+int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *, u8 *, u8);
void qlcnic_83xx_configure_mac(struct qlcnic_adapter *, u8 *, u8,
struct qlcnic_cmd_args *);
int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *,
@@ -551,7 +573,7 @@ void qlcnic_set_npar_data(struct qlcnic_adapter *, const struct qlcnic_info *,
void qlcnic_83xx_config_intr_coal(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_handle_aen(int, void *);
int qlcnic_83xx_get_port_info(struct qlcnic_adapter *);
-void qlcnic_83xx_enable_mbx_intrpt(struct qlcnic_adapter *);
+void qlcnic_83xx_enable_mbx_interrupt(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_clear_legacy_intr(struct qlcnic_adapter *);
irqreturn_t qlcnic_83xx_intr(int, void *);
@@ -604,6 +626,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_get_vnic_vport_info(struct qlcnic_adapter *,
struct qlcnic_info *, u8);
int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *, int);
void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
@@ -623,8 +646,6 @@ int qlcnic_83xx_set_led(struct net_device *, enum ethtool_phys_id_state);
int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
-u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
void qlcnic_83xx_set_mac_filter_count(struct qlcnic_adapter *);
@@ -634,4 +655,11 @@ int qlcnic_83xx_idc_init(struct qlcnic_adapter *);
int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_vnic_opmode(struct qlcnic_adapter *);
int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
+ pci_channel_state_t);
+pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
+void qlcnic_83xx_io_resume(struct pci_dev *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index f41dfab1e9a..f09e787af0b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -399,6 +399,7 @@ static void qlcnic_83xx_idc_detach_driver(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
/* Disable mailbox interrupt */
qlcnic_83xx_disable_mbx_intr(adapter);
@@ -610,6 +611,9 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
{
int err;
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
+
/* register for NIC IDC AEN Events */
qlcnic_83xx_register_nic_idc_func(adapter, 1);
@@ -617,7 +621,7 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
if (err)
return err;
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
if (qlcnic_83xx_configure_opmode(adapter)) {
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -629,7 +633,10 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
return -EIO;
}
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
+
+ qlcnic_dcb_get_info(adapter);
qlcnic_83xx_idc_attach_driver(adapter);
return 0;
@@ -640,7 +647,6 @@ static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
@@ -791,7 +797,6 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
ret = qlcnic_83xx_idc_restart_hw(adapter, 1);
} else {
ret = qlcnic_83xx_idc_check_timeout(adapter, timeout);
- return ret;
}
return ret;
@@ -810,9 +815,10 @@ static int qlcnic_83xx_idc_init_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
{
- u32 val;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
+ u32 val;
/* Perform NIC configuration based ready state entry actions */
if (ahw->idc.state_entry(adapter))
@@ -824,7 +830,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
dev_err(&adapter->pdev->dev,
"Error: device temperature %d above limits\n",
adapter->ahw->temp);
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_detach_driver(adapter);
qlcnic_83xx_idc_enter_failed_state(adapter, 1);
@@ -837,7 +843,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
if (ret) {
adapter->flags |= QLCNIC_FW_HANG;
if (!(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
}
@@ -845,6 +851,8 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
}
if ((val & QLC_83XX_IDC_GRACEFULL_RESET) || ahw->idc.collect_dump) {
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+
/* Move to need reset state and prepare for reset */
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
return ret;
@@ -882,12 +890,13 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
**/
static int qlcnic_83xx_idc_need_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
int ret = 0;
if (adapter->ahw->idc.prev_state != QLC_83XX_IDC_DEV_NEED_RESET) {
qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
set_bit(__QLCNIC_RESETTING, &adapter->state);
- clear_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
if (adapter->ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
qlcnic_83xx_disable_vnic_mode(adapter, 1);
@@ -1079,7 +1088,6 @@ static void qlcnic_83xx_setup_idc_parameters(struct qlcnic_adapter *adapter)
adapter->ahw->idc.name = (char **)qlc_83xx_idc_states;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
/* Check if reset recovery is disabled */
@@ -1190,6 +1198,9 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
{
u32 val;
+ if (qlcnic_sriov_vf_check(adapter))
+ return;
+
if (qlcnic_83xx_lock_driver(adapter)) {
dev_err(&adapter->pdev->dev,
"%s:failed, please retry\n", __func__);
@@ -1256,31 +1267,33 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
+ const struct firmware *fw = fw_info->fw;
u32 dest, *p_cache;
- u64 addr;
+ int i, ret = -EIO;
u8 data[16];
size_t size;
- int i, ret = -EIO;
+ u64 addr;
dest = QLCRDX(adapter->ahw, QLCNIC_FW_IMAGE_ADDR);
- size = (adapter->ahw->fw_info.fw->size & ~0xF);
- p_cache = (u32 *)adapter->ahw->fw_info.fw->data;
+ size = (fw->size & ~0xF);
+ p_cache = (u32 *)fw->data;
addr = (u64)dest;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
(u32 *)p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
/* alignment check */
- if (adapter->ahw->fw_info.fw->size & 0xF) {
+ if (fw->size & 0xF) {
addr = dest + size;
- for (i = 0; i < (adapter->ahw->fw_info.fw->size & 0xF); i++)
- data[i] = adapter->ahw->fw_info.fw->data[size + i];
+ for (i = 0; i < (fw->size & 0xF); i++)
+ data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
@@ -1288,13 +1301,13 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return -EIO;
}
}
- release_firmware(adapter->ahw->fw_info.fw);
- adapter->ahw->fw_info.fw = NULL;
+ release_firmware(fw);
+ fw_info->fw = NULL;
return 0;
}
@@ -1303,8 +1316,11 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
{
int i, j;
u32 val = 0, val1 = 0, reg = 0;
+ int err = 0;
- val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG);
+ val = QLCRD32(adapter, QLC_83XX_SRE_SHIM_REG, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "SRE-Shim Ctrl:0x%x\n", val);
for (j = 0; j < 2; j++) {
@@ -1318,7 +1334,9 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_THRESHOLD;
}
for (i = 0; i < 8; i++) {
- val = QLCRD32(adapter, reg + (i * 0x4));
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
@@ -1335,8 +1353,10 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_MC_REG;
}
for (i = 0; i < 4; i++) {
- val = QLCRD32(adapter, reg + (i * 0x4));
- dev_info(&adapter->pdev->dev, "0x%x ", val);
+ val = QLCRD32(adapter, reg + (i * 0x4), &err);
+ if (err == -EIO)
+ return;
+ dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
@@ -1352,17 +1372,25 @@ static void qlcnic_83xx_dump_pause_control_regs(struct qlcnic_adapter *adapter)
reg = QLC_83XX_PORT1_TC_STATS;
}
for (i = 7; i >= 0; i--) {
- val = QLCRD32(adapter, reg);
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
val &= ~(0x7 << 29); /* Reset bits 29 to 31 */
QLCWR32(adapter, reg, (val | (i << 29)));
- val = QLCRD32(adapter, reg);
+ val = QLCRD32(adapter, reg, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev, "0x%x ", val);
}
dev_info(&adapter->pdev->dev, "\n");
}
- val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD);
- val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD);
+ val = QLCRD32(adapter, QLC_83XX_PORT2_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
+ val1 = QLCRD32(adapter, QLC_83XX_PORT3_IFB_THRESHOLD, &err);
+ if (err == -EIO)
+ return;
dev_info(&adapter->pdev->dev,
"IFB-Pause Thresholds: Port 2:0x%x, Port 3:0x%x\n",
val, val1);
@@ -1425,7 +1453,7 @@ static void qlcnic_83xx_take_eport_out_of_reset(struct qlcnic_adapter *adapter)
static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
{
u32 heartbeat, peg_status;
- int retries, ret = -EIO;
+ int retries, ret = -EIO, err = 0;
retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
p_dev->heartbeat = QLC_SHARED_REG_RD32(p_dev,
@@ -1453,11 +1481,11 @@ static int qlcnic_83xx_check_heartbeat(struct qlcnic_adapter *p_dev)
"PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
"PEG_NET_4_PC: 0x%x\n", peg_status,
QLC_SHARED_REG_RD32(p_dev, QLCNIC_PEG_HALT_STATUS2),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3),
- QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4));
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_0, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_1, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_2, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_3, &err),
+ QLCRD32(p_dev, QLC_83XX_CRB_PEG_NET_4, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&p_dev->pdev->dev,
@@ -1501,18 +1529,22 @@ int qlcnic_83xx_check_hw_status(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_poll_reg(struct qlcnic_adapter *p_dev, u32 addr,
int duration, u32 mask, u32 status)
{
+ int timeout_error, err = 0;
u32 value;
- int timeout_error;
u8 retries;
- value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
retries = duration / 10;
do {
if ((value & mask) != status) {
timeout_error = 1;
msleep(duration / 10);
- value = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ value = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return err;
} else {
timeout_error = 0;
break;
@@ -1606,9 +1638,12 @@ int qlcnic_83xx_get_reset_instruction_template(struct qlcnic_adapter *p_dev)
static void qlcnic_83xx_read_write_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr)
{
- int value;
+ int err = 0;
+ u32 value;
- value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
qlcnic_83xx_wrt_reg_indirect(p_dev, waddr, value);
}
@@ -1617,12 +1652,16 @@ static void qlcnic_83xx_rmw_crb_reg(struct qlcnic_adapter *p_dev,
u32 raddr, u32 waddr,
struct qlc_83xx_rmw *p_rmw_hdr)
{
- int value;
+ int err = 0;
+ u32 value;
- if (p_rmw_hdr->index_a)
+ if (p_rmw_hdr->index_a) {
value = p_dev->ahw->reset.array[p_rmw_hdr->index_a];
- else
- value = qlcnic_83xx_rd_reg_indirect(p_dev, raddr);
+ } else {
+ value = QLCRD32(p_dev, raddr, &err);
+ if (err == -EIO)
+ return;
+ }
value &= p_rmw_hdr->mask;
value <<= p_rmw_hdr->shl;
@@ -1675,7 +1714,7 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
long delay;
struct qlc_83xx_entry *entry;
struct qlc_83xx_poll *poll;
- int i;
+ int i, err = 0;
unsigned long arg1, arg2;
poll = (struct qlc_83xx_poll *)((char *)p_hdr +
@@ -1699,10 +1738,12 @@ static void qlcnic_83xx_poll_list(struct qlcnic_adapter *p_dev,
arg1, delay,
poll->mask,
poll->status)){
- qlcnic_83xx_rd_reg_indirect(p_dev,
- arg1);
- qlcnic_83xx_rd_reg_indirect(p_dev,
- arg2);
+ QLCRD32(p_dev, arg1, &err);
+ if (err == -EIO)
+ return;
+ QLCRD32(p_dev, arg2, &err);
+ if (err == -EIO)
+ return;
}
}
}
@@ -1768,7 +1809,7 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
struct qlc_83xx_entry_hdr *p_hdr)
{
long delay;
- int index, i, j;
+ int index, i, j, err;
struct qlc_83xx_quad_entry *entry;
struct qlc_83xx_poll *poll;
unsigned long addr;
@@ -1788,7 +1829,10 @@ static void qlcnic_83xx_poll_read_list(struct qlcnic_adapter *p_dev,
poll->mask, poll->status)){
index = p_dev->ahw->reset.array_index;
addr = entry->dr_addr;
- j = qlcnic_83xx_rd_reg_indirect(p_dev, addr);
+ j = QLCRD32(p_dev, addr, &err);
+ if (err == -EIO)
+ return;
+
p_dev->ahw->reset.array[index++] = j;
if (index == QLC_83XX_MAX_RESET_SEQ_ENTRIES)
@@ -1909,10 +1953,11 @@ static void qlcnic_83xx_init_hw(struct qlcnic_adapter *p_dev)
static int qlcnic_83xx_load_fw_image_from_host(struct qlcnic_adapter *adapter)
{
+ struct qlc_83xx_fw_info *fw_info = adapter->ahw->fw_info;
int err = -EIO;
- if (request_firmware(&adapter->ahw->fw_info.fw,
- QLC_83XX_FW_FILE_NAME, &(adapter->pdev->dev))) {
+ if (request_firmware(&fw_info->fw, fw_info->fw_file_name,
+ &(adapter->pdev->dev))) {
dev_err(&adapter->pdev->dev,
"No file FW image, loading flash FW image.\n");
QLC_SHARED_REG_WR32(adapter, QLCNIC_FW_IMG_VALID,
@@ -1958,36 +2003,6 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
return 0;
}
-/**
-* qlcnic_83xx_config_default_opmode
-*
-* @adapter: adapter structure
-*
-* Configure default driver operating mode
-*
-* Returns: Error code or Success(0)
-* */
-int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
-{
- u32 op_mode;
- struct qlcnic_hardware_context *ahw = adapter->ahw;
-
- qlcnic_get_func_no(adapter);
- op_mode = QLCRDX(ahw, QLC_83XX_DRV_OP_MODE);
-
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
- op_mode = QLC_83XX_DEFAULT_OPMODE;
-
- if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
- ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- } else {
- return -EIO;
- }
-
- return 0;
-}
-
int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
{
int err;
@@ -2007,26 +2022,26 @@ int qlcnic_83xx_get_nic_configuration(struct qlcnic_adapter *adapter)
ahw->max_mac_filters = nic_info.max_mac_filters;
ahw->max_mtu = nic_info.max_mtu;
- /* VNIC mode is detected by BIT_23 in capabilities. This bit is also
- * set in case device is SRIOV capable. VNIC and SRIOV are mutually
- * exclusive. So in case of sriov capable device load driver in
- * default mode
+ /* eSwitch capability indicates vNIC mode.
+ * vNIC and SRIOV are mutually exclusive operational modes.
+ * If SR-IOV capability is detected, SR-IOV physical function
+ * will get initialized in default mode.
+ * SR-IOV virtual function initialization follows a
+ * different code path and opmode.
+ * SRIOV mode has precedence over vNIC mode.
*/
- if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state)) {
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
- return ahw->nic_mode;
- }
+ if (test_bit(__QLCNIC_SRIOV_CAPABLE, &adapter->state))
+ return QLC_83XX_DEFAULT_OPMODE;
- if (ahw->capabilities & BIT_23)
- ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
- else
- ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ return QLC_83XX_VIRTUAL_NIC_MODE;
- return ahw->nic_mode;
+ return QLC_83XX_DEFAULT_OPMODE;
}
int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int ret;
ret = qlcnic_83xx_get_nic_configuration(adapter);
@@ -2034,11 +2049,16 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
return -EIO;
if (ret == QLC_83XX_VIRTUAL_NIC_MODE) {
+ ahw->nic_mode = QLC_83XX_VIRTUAL_NIC_MODE;
if (qlcnic_83xx_config_vnic_opmode(adapter))
return -EIO;
- } else if (ret == QLC_83XX_DEFAULT_MODE) {
- if (qlcnic_83xx_config_default_opmode(adapter))
- return -EIO;
+
+ } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
+ ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
+ adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
+ ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
+ } else {
+ return -EIO;
}
return 0;
@@ -2107,43 +2127,173 @@ static void qlcnic_83xx_clear_function_resources(struct qlcnic_adapter *adapter)
}
}
-int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+static int qlcnic_83xx_get_fw_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct pci_dev *pdev = adapter->pdev;
+ struct qlc_83xx_fw_info *fw_info;
+ int err = 0;
- if (qlcnic_sriov_vf_check(adapter))
- return qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ ahw->fw_info = kzalloc(sizeof(*fw_info), GFP_KERNEL);
+ if (!ahw->fw_info) {
+ err = -ENOMEM;
+ } else {
+ fw_info = ahw->fw_info;
+ switch (pdev->device) {
+ case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ strncpy(fw_info->fw_file_name, QLC_83XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
+ strncpy(fw_info->fw_file_name, QLC_84XX_FW_FILE_NAME,
+ QLC_FW_FILE_NAME_LEN);
+ break;
+ default:
+ dev_err(&pdev->dev, "%s: Invalid device id\n",
+ __func__);
+ err = -EINVAL;
+ break;
+ }
+ }
- if (qlcnic_83xx_check_hw_status(adapter))
- return -EIO;
+ return err;
+}
- /* Initilaize 83xx mailbox spinlock */
- spin_lock_init(&ahw->mbx_lock);
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
- qlcnic_83xx_clear_function_resources(adapter);
+int qlcnic_83xx_init(struct qlcnic_adapter *adapter, int pci_using_dac)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err = 0;
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
+ ahw->msix_supported = !!qlcnic_use_msi_x;
+ err = qlcnic_83xx_init_mailbox_work(adapter);
+ if (err)
+ goto exit;
+
+ if (qlcnic_sriov_vf_check(adapter)) {
+ err = qlcnic_sriov_vf_init(adapter, pci_using_dac);
+ if (err)
+ goto detach_mbx;
+ else
+ return err;
+ }
+
+ err = qlcnic_83xx_check_hw_status(adapter);
+ if (err)
+ goto detach_mbx;
if (!qlcnic_83xx_read_flash_descriptor_table(adapter))
qlcnic_83xx_read_flash_mfg_id(adapter);
- if (qlcnic_83xx_idc_init(adapter))
- return -EIO;
+ err = qlcnic_83xx_get_fw_info(adapter);
+ if (err)
+ goto detach_mbx;
+
+ err = qlcnic_83xx_idc_init(adapter);
+ if (err)
+ goto clear_fw_info;
+
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
+ goto disable_intr;
+ }
+
+ err = qlcnic_83xx_setup_mbx_intr(adapter);
+ if (err)
+ goto disable_mbx_intr;
+
+ qlcnic_83xx_clear_function_resources(adapter);
+
+ INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+
+ /* register for NIC IDC AEN Events */
+ qlcnic_83xx_register_nic_idc_func(adapter, 1);
/* Configure default, SR-IOV or Virtual NIC mode of operation */
- if (qlcnic_83xx_configure_opmode(adapter))
- return -EIO;
+ err = qlcnic_83xx_configure_opmode(adapter);
+ if (err)
+ goto disable_mbx_intr;
/* Perform operating mode specific initialization */
- if (adapter->nic_ops->init_driver(adapter))
- return -EIO;
+ err = adapter->nic_ops->init_driver(adapter);
+ if (err)
+ goto disable_mbx_intr;
- INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
/* Periodically monitor device status */
qlcnic_83xx_idc_poll_dev_state(&adapter->fw_work.work);
+ return 0;
+
+disable_mbx_intr:
+ qlcnic_83xx_free_mbx_intr(adapter);
+
+disable_intr:
+ qlcnic_teardown_intr(adapter);
+
+clear_fw_info:
+ kfree(ahw->fw_info);
+
+detach_mbx:
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+exit:
+ return err;
+}
+
+void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+
+ clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ cancel_delayed_work_sync(&adapter->fw_work);
+
+ if (ahw->nic_mode == QLC_83XX_VIRTUAL_NIC_MODE)
+ qlcnic_83xx_disable_vnic_mode(adapter, 1);
+
+ qlcnic_83xx_idc_detach_driver(adapter);
+ qlcnic_83xx_register_nic_idc_func(adapter, 0);
+
+ cancel_delayed_work_sync(&adapter->idc_aen_work);
+}
+
+int qlcnic_83xx_aer_reset(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ int ret = 0;
+ u32 owner;
+
+ /* Mark the previous IDC state as NEED_RESET so
+ * that state_entry() will perform the reattachment
+ * and bringup the device
+ */
+ idc->prev_state = QLC_83XX_IDC_DEV_NEED_RESET;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ ret = qlcnic_83xx_restart_hw(adapter);
+ if (ret < 0)
+ return ret;
+ qlcnic_83xx_idc_clear_registers(adapter, 0);
+ }
+
+ ret = idc->state_entry(adapter);
+ return ret;
+}
+
+void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlc_83xx_idc *idc = &ahw->idc;
+ u32 owner;
+
+ idc->prev_state = QLC_83XX_IDC_DEV_READY;
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner)
+ qlcnic_83xx_idc_enter_ready_state(adapter, 0);
- return adapter->ahw->idc.err_code;
+ qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state, 0);
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index 599d1fda52f..0248a4c2f5d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -208,7 +208,7 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
return -EIO;
}
- if (ahw->capabilities & BIT_23)
+ if (ahw->capabilities & QLC_83XX_ESWITCH_CAPABILITY)
adapter->flags |= QLCNIC_ESWITCH_ENABLED;
else
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
@@ -239,3 +239,41 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
return 0;
}
+
+static int qlcnic_83xx_get_eswitch_port_info(struct qlcnic_adapter *adapter,
+ int func, int *port_id)
+{
+ struct qlcnic_info nic_info;
+ int err = 0;
+
+ memset(&nic_info, 0, sizeof(struct qlcnic_info));
+
+ err = qlcnic_get_nic_info(adapter, &nic_info, func);
+ if (err)
+ return err;
+
+ if (nic_info.capabilities & QLC_83XX_ESWITCH_CAPABILITY)
+ *port_id = nic_info.phys_port;
+ else
+ err = -EIO;
+
+ return err;
+}
+
+int qlcnic_83xx_enable_port_eswitch(struct qlcnic_adapter *adapter, int func)
+{
+ int id, err = 0;
+
+ err = qlcnic_83xx_get_eswitch_port_info(adapter, func, &id);
+ if (err)
+ return err;
+
+ if (!(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ if (!qlcnic_enable_eswitch(adapter, id, 1))
+ adapter->eswitch[id].flags |= QLCNIC_SWITCH_ENABLE;
+ else
+ err = -EIO;
+ }
+
+ return err;
+}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 0581a484ceb..86850dd633a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -38,6 +38,9 @@ static const struct qlcnic_mailbox_metadata qlcnic_mbx_tbl[] = {
{QLCNIC_CMD_GET_TEMP_HDR, 4, 1},
{QLCNIC_CMD_82XX_SET_DRV_VER, 4, 1},
{QLCNIC_CMD_GET_LED_STATUS, 4, 2},
+ {QLCNIC_CMD_MQ_TX_CONFIG_INTR, 2, 3},
+ {QLCNIC_CMD_DCB_QUERY_CAP, 1, 2},
+ {QLCNIC_CMD_DCB_QUERY_PARAM, 4, 1},
};
static inline u32 qlcnic_get_cmd_signature(struct qlcnic_hardware_context *ahw)
@@ -104,7 +107,7 @@ static u32
qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
{
u32 rsp;
- int timeout = 0;
+ int timeout = 0, err = 0;
do {
/* give atleast 1ms for firmware to respond */
@@ -113,7 +116,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
return QLCNIC_CDRP_RSP_TIMEOUT;
- rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET, &err);
} while (!QLCNIC_CDRP_IS_RSP(rsp));
return rsp;
@@ -122,7 +125,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- int i;
+ int i, err = 0;
u32 rsp;
u32 signature;
struct pci_dev *pdev = adapter->pdev;
@@ -148,7 +151,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
dev_err(&pdev->dev, "card response timeout.\n");
cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
} else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
- cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1));
+ cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
switch (cmd->rsp.arg[0]) {
case QLCNIC_RCODE_INVALID_ARGS:
fmt = "CDRP invalid args: [%d]\n";
@@ -171,11 +174,12 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
break;
}
dev_err(&pdev->dev, fmt, cmd->rsp.arg[0]);
+ qlcnic_dump_mbx(adapter, cmd);
} else if (rsp == QLCNIC_CDRP_RSP_OK)
cmd->rsp.arg[0] = QLCNIC_RCODE_SUCCESS;
for (i = 1; i < cmd->rsp.num; i++)
- cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i));
+ cmd->rsp.arg[i] = QLCRD32(adapter, QLCNIC_CDRP_ARG(i), &err);
/* Release semaphore */
qlcnic_api_unlock(adapter);
@@ -210,10 +214,10 @@ int qlcnic_fw_cmd_set_drv_version(struct qlcnic_adapter *adapter, u32 fw_cmd)
if (err) {
dev_info(&adapter->pdev->dev,
"Failed to set driver version in firmware\n");
- return -EIO;
+ err = -EIO;
}
-
- return 0;
+ qlcnic_free_mbx_args(&cmd);
+ return err;
}
int
@@ -243,40 +247,38 @@ qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
{
- void *addr;
- struct qlcnic_hostrq_rx_ctx *prq;
- struct qlcnic_cardrsp_rx_ctx *prsp;
- struct qlcnic_hostrq_rds_ring *prq_rds;
- struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ struct net_device *netdev = adapter->netdev;
+ u32 temp_intr_crb_mode, temp_rds_crb_mode;
struct qlcnic_cardrsp_rds_ring *prsp_rds;
struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
struct qlcnic_host_rds_ring *rds_ring;
struct qlcnic_host_sds_ring *sds_ring;
- struct qlcnic_cmd_args cmd;
-
- dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
- u64 phys_addr;
-
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rx_ctx *prq;
u8 i, nrds_rings, nsds_rings;
- u16 temp_u16;
+ struct qlcnic_cmd_args cmd;
size_t rq_size, rsp_size;
u32 cap, reg, val, reg2;
+ u64 phys_addr;
+ u16 temp_u16;
+ void *addr;
int err;
- struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
-
nrds_rings = adapter->max_rds_rings;
nsds_rings = adapter->max_sds_rings;
- rq_size =
- SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
- nsds_rings);
- rsp_size =
- SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
- nsds_rings);
+ rq_size = SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size = SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &hostrq_phys_addr, GFP_KERNEL);
+ &hostrq_phys_addr, GFP_KERNEL);
if (addr == NULL)
return -ENOMEM;
prq = addr;
@@ -295,15 +297,20 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
| QLCNIC_CAP0_VALIDOFF);
cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
- temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
- prq->valid_field_offset = cpu_to_le16(temp_u16);
- prq->txrx_sds_binding = nsds_rings - 1;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ cap |= QLCNIC_CAP0_TX_MULTI;
+ } else {
+ temp_u16 = offsetof(struct qlcnic_hostrq_rx_ctx, msix_handler);
+ prq->valid_field_offset = cpu_to_le16(temp_u16);
+ prq->txrx_sds_binding = nsds_rings - 1;
+ temp_intr_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode);
+ temp_rds_crb_mode = QLCNIC_HOST_RDS_CRB_MODE_UNIQUE;
+ prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode);
+ }
prq->capabilities[0] = cpu_to_le32(cap);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->host_rds_crb_mode =
- cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
prq->num_rds_rings = cpu_to_le16(nrds_rings);
prq->num_sds_rings = cpu_to_le16(nsds_rings);
@@ -317,10 +324,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->rds_ring_offset));
for (i = 0; i < nrds_rings; i++) {
-
rds_ring = &recv_ctx->rds_rings[i];
rds_ring->producer = 0;
-
prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
prq_rds[i].ring_kind = cpu_to_le32(i);
@@ -331,14 +336,16 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
le32_to_cpu(prq->sds_ring_offset));
for (i = 0; i < nsds_rings; i++) {
-
sds_ring = &recv_ctx->sds_rings[i];
sds_ring->consumer = 0;
memset(sds_ring->desc_head, 0, STATUS_DESC_RINGSIZE(sds_ring));
-
prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
- prq_sds[i].msi_index = cpu_to_le16(i);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ prq_sds[i].msi_index = cpu_to_le16(ahw->intr_tbl[i].id);
+ else
+ prq_sds[i].msi_index = cpu_to_le16(i);
}
phys_addr = hostrq_phys_addr;
@@ -361,9 +368,8 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
rds_ring = &recv_ctx->rds_rings[i];
-
reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
- rds_ring->crb_rcv_producer = adapter->ahw->pci_base0 + reg;
+ rds_ring->crb_rcv_producer = ahw->pci_base0 + reg;
}
prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
@@ -371,24 +377,30 @@ int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
sds_ring = &recv_ctx->sds_rings[i];
-
reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
- reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ reg2 = ahw->intr_tbl[i].src;
+ else
+ reg2 = le32_to_cpu(prsp_sds[i].interrupt_crb);
- sds_ring->crb_sts_consumer = adapter->ahw->pci_base0 + reg;
- sds_ring->crb_intr_mask = adapter->ahw->pci_base0 + reg2;
+ sds_ring->crb_intr_mask = ahw->pci_base0 + reg2;
+ sds_ring->crb_sts_consumer = ahw->pci_base0 + reg;
}
recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
recv_ctx->context_id = le16_to_cpu(prsp->context_id);
recv_ctx->virt_port = prsp->virt_port;
+ netdev_info(netdev, "Rx Context[%d] Created, state 0x%x\n",
+ recv_ctx->context_id, recv_ctx->state);
qlcnic_free_mbx_args(&cmd);
+
out_free_rsp:
dma_free_coherent(&adapter->pdev->dev, rsp_size, prsp,
cardrsp_phys_addr);
out_free_rq:
dma_free_coherent(&adapter->pdev->dev, rq_size, prq, hostrq_phys_addr);
+
return err;
}
@@ -416,16 +428,19 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring,
int ring)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_hostrq_tx_ctx *prq;
struct qlcnic_hostrq_cds_ring *prq_cds;
struct qlcnic_cardrsp_tx_ctx *prsp;
- void *rq_addr, *rsp_addr;
- size_t rq_size, rsp_size;
- u32 temp;
struct qlcnic_cmd_args cmd;
- int err;
- u64 phys_addr;
- dma_addr_t rq_phys_addr, rsp_phys_addr;
+ u32 temp, intr_mask, temp_int_crb_mode;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ int temp_nsds_rings, index, err;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u64 phys_addr;
+ u16 msix_id;
/* reset host resources */
tx_ring->producer = 0;
@@ -433,32 +448,42 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
*(tx_ring->hw_consumer) = 0;
rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
- rq_addr = dma_alloc_coherent(&adapter->pdev->dev, rq_size,
- &rq_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rq_addr = dma_zalloc_coherent(&adapter->pdev->dev, rq_size,
+ &rq_phys_addr, GFP_KERNEL);
if (!rq_addr)
return -ENOMEM;
rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
- rsp_addr = dma_alloc_coherent(&adapter->pdev->dev, rsp_size,
- &rsp_phys_addr, GFP_KERNEL | __GFP_ZERO);
+ rsp_addr = dma_zalloc_coherent(&adapter->pdev->dev, rsp_size,
+ &rsp_phys_addr, GFP_KERNEL);
if (!rsp_addr) {
err = -ENOMEM;
goto out_free_rq;
}
prq = rq_addr;
-
prsp = rsp_addr;
prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
- QLCNIC_CAP0_LSO);
+ QLCNIC_CAP0_LSO);
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test)
+ temp |= QLCNIC_CAP0_TX_MULTI;
+
prq->capabilities[0] = cpu_to_le32(temp);
- prq->host_int_crb_mode =
- cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
- prq->msi_index = 0;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test) {
+ temp_nsds_rings = adapter->max_sds_rings;
+ index = temp_nsds_rings + ring;
+ msix_id = ahw->intr_tbl[index].id;
+ prq->msi_index = cpu_to_le16(msix_id);
+ } else {
+ temp_int_crb_mode = QLCNIC_HOST_INT_CRB_MODE_SHARED;
+ prq->host_int_crb_mode = cpu_to_le32(temp_int_crb_mode);
+ prq->msi_index = 0;
+ }
prq->interrupt_ctl = 0;
prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
@@ -480,15 +505,25 @@ int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter,
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
+ tx_ring->state = le32_to_cpu(prsp->host_ctx_state);
temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
tx_ring->crb_cmd_producer = adapter->ahw->pci_base0 + temp;
tx_ring->ctx_id = le16_to_cpu(prsp->context_id);
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ index = adapter->max_sds_rings + ring;
+ intr_mask = ahw->intr_tbl[index].src;
+ tx_ring->crb_intr_mask = ahw->pci_base0 + intr_mask;
+ }
+
+ netdev_info(netdev, "Tx Context[0x%x] Created, state 0x%x\n",
+ tx_ring->ctx_id, tx_ring->state);
} else {
- dev_err(&adapter->pdev->dev,
- "Failed to create tx ctx in firmware%d\n", err);
+ netdev_err(netdev, "Failed to create tx ctx in firmware%d\n",
+ err);
err = -EIO;
}
-
qlcnic_free_mbx_args(&cmd);
out_free_rsp:
@@ -618,6 +653,13 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
}
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test) {
+ err = qlcnic_82xx_mq_intrpt(dev, 1);
+ if (err)
+ return err;
+ }
+
err = qlcnic_fw_cmd_create_rx_ctx(dev);
if (err)
goto err_out;
@@ -639,13 +681,19 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *dev)
}
set_bit(__QLCNIC_FW_ATTACHED, &dev->state);
+
return 0;
err_out:
+ if (qlcnic_82xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(dev) && !dev->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(dev, 0);
+
if (qlcnic_83xx_check(dev) && (dev->flags & QLCNIC_MSIX_ENABLED)) {
if (dev->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
qlcnic_83xx_config_intrpt(dev, 0);
}
+
return err;
}
@@ -659,6 +707,12 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_del_tx_ctx(adapter,
&adapter->tx_ring[ring]);
+ if (qlcnic_82xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ qlcnic_82xx_config_intrpt(adapter, 0);
+
if (qlcnic_83xx_check(adapter) &&
(adapter->flags & QLCNIC_MSIX_ENABLED)) {
if (adapter->ahw->diag_test != QLCNIC_LOOPBACK_TEST)
@@ -723,8 +777,54 @@ void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
}
}
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_cmd_args cmd;
+ u32 type, val;
+ int i, err = 0;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
+ val = type | (ahw->intr_tbl[i].type << 4);
+ if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
+ val |= (ahw->intr_tbl[i].id << 16);
+ cmd.req.arg[1] = val;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ netdev_err(netdev, "Failed to %s interrupts %d\n",
+ op_type == QLCNIC_INTRPT_ADD ? "Add" :
+ "Delete", err);
+ qlcnic_free_mbx_args(&cmd);
+ return err;
+ }
+ val = cmd.rsp.arg[1];
+ if (LSB(val)) {
+ netdev_info(netdev,
+ "failed to configure interrupt for %d\n",
+ ahw->intr_tbl[i].id);
+ continue;
+ }
+ if (op_type) {
+ ahw->intr_tbl[i].id = MSW(val);
+ ahw->intr_tbl[i].enabled = 1;
+ ahw->intr_tbl[i].src = cmd.rsp.arg[2];
+ } else {
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].enabled = 0;
+ ahw->intr_tbl[i].src = 0;
+ }
+ qlcnic_free_mbx_args(&cmd);
+ }
+
+ return err;
+}
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
+ u8 function)
{
int err, i;
struct qlcnic_cmd_args cmd;
@@ -734,7 +834,7 @@ int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac)
if (err)
return err;
- cmd.req.arg[1] = adapter->ahw->pci_func | BIT_8;
+ cmd.req.arg[1] = function | BIT_8;
err = qlcnic_issue_cmd(adapter, &cmd);
if (err == QLCNIC_RCODE_SUCCESS) {
@@ -765,8 +865,8 @@ int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args cmd;
size_t nic_size = sizeof(struct qlcnic_info_le);
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -819,8 +919,8 @@ int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *adapter,
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
return err;
- nic_info_addr = dma_alloc_coherent(&adapter->pdev->dev, nic_size,
- &nic_dma_t, GFP_KERNEL | __GFP_ZERO);
+ nic_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, nic_size,
+ &nic_dma_t, GFP_KERNEL);
if (!nic_info_addr)
return -ENOMEM;
@@ -872,9 +972,8 @@ int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *adapter,
size_t npar_size = sizeof(struct qlcnic_pci_info_le);
size_t pci_size = npar_size * QLCNIC_MAX_PCI_FUNC;
- pci_info_addr = dma_alloc_coherent(&adapter->pdev->dev, pci_size,
- &pci_info_dma_t,
- GFP_KERNEL | __GFP_ZERO);
+ pci_info_addr = dma_zalloc_coherent(&adapter->pdev->dev, pci_size,
+ &pci_info_dma_t, GFP_KERNEL);
if (!pci_info_addr)
return -ENOMEM;
@@ -974,8 +1073,8 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
return -EIO;
}
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
@@ -1030,8 +1129,8 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
if (mac_stats == NULL)
return -ENOMEM;
- stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
- &stats_dma_t, GFP_KERNEL | __GFP_ZERO);
+ stats_addr = dma_zalloc_coherent(&adapter->pdev->dev, stats_size,
+ &stats_dma_t, GFP_KERNEL);
if (!stats_addr)
return -ENOMEM;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
new file mode 100644
index 00000000000..d62d5ce432e
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -0,0 +1,1179 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#include <linux/types.h>
+#include "qlcnic.h"
+
+#define QLC_DCB_NUM_PARAM 3
+#define QLC_DCB_LOCAL_IDX 0
+#define QLC_DCB_OPER_IDX 1
+#define QLC_DCB_PEER_IDX 2
+
+#define QLC_DCB_GET_MAP(V) (1 << V)
+
+#define QLC_DCB_AEN_BIT 0x2
+#define QLC_DCB_FW_VER 0x2
+#define QLC_DCB_MAX_TC 0x8
+#define QLC_DCB_MAX_APP 0x8
+#define QLC_DCB_MAX_PRIO QLC_DCB_MAX_TC
+#define QLC_DCB_MAX_PG QLC_DCB_MAX_TC
+
+#define QLC_DCB_TSA_SUPPORT(V) (V & 0x1)
+#define QLC_DCB_ETS_SUPPORT(V) ((V >> 1) & 0x1)
+#define QLC_DCB_VERSION_SUPPORT(V) ((V >> 2) & 0xf)
+#define QLC_DCB_MAX_NUM_TC(V) ((V >> 20) & 0xf)
+#define QLC_DCB_MAX_NUM_ETS_TC(V) ((V >> 24) & 0xf)
+#define QLC_DCB_MAX_NUM_PFC_TC(V) ((V >> 28) & 0xf)
+#define QLC_DCB_GET_TC_PRIO(X, P) ((X >> (P * 3)) & 0x7)
+#define QLC_DCB_GET_PGID_PRIO(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_BWPER_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_TSA_PG(X, P) ((X >> (P * 8)) & 0xff)
+#define QLC_DCB_GET_PFC_PRIO(X, P) (((X >> 24) >> P) & 0x1)
+#define QLC_DCB_GET_PROTO_ID_APP(X) ((X >> 8) & 0xffff)
+#define QLC_DCB_GET_SELECTOR_APP(X) (X & 0xff)
+
+#define QLC_DCB_LOCAL_PARAM_FWID 0x3
+#define QLC_DCB_OPER_PARAM_FWID 0x1
+#define QLC_DCB_PEER_PARAM_FWID 0x2
+
+#define QLC_83XX_DCB_GET_NUMAPP(X) ((X >> 2) & 0xf)
+#define QLC_83XX_DCB_TSA_VALID(X) (X & 0x1)
+#define QLC_83XX_DCB_PFC_VALID(X) ((X >> 1) & 0x1)
+#define QLC_83XX_DCB_GET_PRIOMAP_APP(X) (X >> 24)
+
+#define QLC_82XX_DCB_GET_NUMAPP(X) ((X >> 12) & 0xf)
+#define QLC_82XX_DCB_TSA_VALID(X) ((X >> 4) & 0x1)
+#define QLC_82XX_DCB_PFC_VALID(X) ((X >> 5) & 0x1)
+#define QLC_82XX_DCB_GET_PRIOVAL_APP(X) ((X >> 24) & 0x7)
+#define QLC_82XX_DCB_GET_PRIOMAP_APP(X) (1 << X)
+#define QLC_82XX_DCB_PRIO_TC_MAP (0x76543210)
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops;
+
+static void qlcnic_dcb_aen_work(struct work_struct *);
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *);
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *);
+static void __qlcnic_dcb_free(struct qlcnic_adapter *);
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *);
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *, char *);
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *);
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *, char *, u8);
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *);
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *, bool);
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *, void *);
+
+struct qlcnic_dcb_capability {
+ bool tsa_capability;
+ bool ets_capability;
+ u8 max_num_tc;
+ u8 max_ets_tc;
+ u8 max_pfc_tc;
+ u8 dcb_capability;
+};
+
+struct qlcnic_dcb_param {
+ u32 hdr_prio_pfc_map[2];
+ u32 prio_pg_map[2];
+ u32 pg_bw_map[2];
+ u32 pg_tsa_map[2];
+ u32 app[QLC_DCB_MAX_APP];
+};
+
+struct qlcnic_dcb_mbx_params {
+ /* 1st local, 2nd operational 3rd remote */
+ struct qlcnic_dcb_param type[3];
+ u32 prio_tc_map;
+};
+
+struct qlcnic_82xx_dcb_param_mbx_le {
+ __le32 hdr_prio_pfc_map[2];
+ __le32 prio_pg_map[2];
+ __le32 pg_bw_map[2];
+ __le32 pg_tsa_map[2];
+ __le32 app[QLC_DCB_MAX_APP];
+};
+
+enum qlcnic_dcb_selector {
+ QLC_SELECTOR_DEF = 0x0,
+ QLC_SELECTOR_ETHER,
+ QLC_SELECTOR_TCP,
+ QLC_SELECTOR_UDP,
+};
+
+enum qlcnic_dcb_prio_type {
+ QLC_PRIO_NONE = 0,
+ QLC_PRIO_GROUP,
+ QLC_PRIO_LINK,
+};
+
+enum qlcnic_dcb_pfc_type {
+ QLC_PFC_DISABLED = 0,
+ QLC_PFC_FULL,
+ QLC_PFC_TX,
+ QLC_PFC_RX
+};
+
+struct qlcnic_dcb_prio_cfg {
+ bool valid;
+ enum qlcnic_dcb_pfc_type pfc_type;
+};
+
+struct qlcnic_dcb_pg_cfg {
+ bool valid;
+ u8 total_bw_percent; /* of Link/ port BW */
+ u8 prio_count;
+ u8 tsa_type;
+};
+
+struct qlcnic_dcb_tc_cfg {
+ bool valid;
+ struct qlcnic_dcb_prio_cfg prio_cfg[QLC_DCB_MAX_PRIO];
+ enum qlcnic_dcb_prio_type prio_type; /* always prio_link */
+ u8 link_percent; /* % of link bandwidth */
+ u8 bwg_percent; /* % of BWG's bandwidth */
+ u8 up_tc_map;
+ u8 pgid;
+};
+
+struct qlcnic_dcb_app {
+ bool valid;
+ enum qlcnic_dcb_selector selector;
+ u16 protocol;
+ u8 priority;
+};
+
+struct qlcnic_dcb_cee {
+ struct qlcnic_dcb_tc_cfg tc_cfg[QLC_DCB_MAX_TC];
+ struct qlcnic_dcb_pg_cfg pg_cfg[QLC_DCB_MAX_PG];
+ struct qlcnic_dcb_app app[QLC_DCB_MAX_APP];
+ bool tc_param_valid;
+ bool pfc_mode_enable;
+};
+
+struct qlcnic_dcb_cfg {
+ /* 0 - local, 1 - operational, 2 - remote */
+ struct qlcnic_dcb_cee type[QLC_DCB_NUM_PARAM];
+ struct qlcnic_dcb_capability capability;
+ u32 version;
+};
+
+static struct qlcnic_dcb_ops qlcnic_83xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_83xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_83xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_83xx_dcb_get_cee_cfg,
+ .register_aen = qlcnic_83xx_dcb_register_aen,
+ .handle_aen = qlcnic_83xx_dcb_handle_aen,
+};
+
+static struct qlcnic_dcb_ops qlcnic_82xx_dcb_ops = {
+ .init_dcbnl_ops = __qlcnic_init_dcbnl_ops,
+ .free = __qlcnic_dcb_free,
+ .attach = __qlcnic_dcb_attach,
+ .query_hw_capability = __qlcnic_dcb_query_hw_capability,
+ .get_info = __qlcnic_dcb_get_info,
+
+ .get_hw_capability = qlcnic_82xx_dcb_get_hw_capability,
+ .query_cee_param = qlcnic_82xx_dcb_query_cee_param,
+ .get_cee_cfg = qlcnic_82xx_dcb_get_cee_cfg,
+ .handle_aen = qlcnic_82xx_dcb_handle_aen,
+};
+
+static u8 qlcnic_dcb_get_num_app(struct qlcnic_adapter *adapter, u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_NUMAPP(val);
+ else
+ return QLC_83XX_DCB_GET_NUMAPP(val);
+}
+
+static inline u8 qlcnic_dcb_pfc_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_PFC_VALID(val);
+ else
+ return QLC_83XX_DCB_PFC_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_tsa_hdr_valid(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_TSA_VALID(val);
+ else
+ return QLC_83XX_DCB_TSA_VALID(val);
+}
+
+static inline u8 qlcnic_dcb_get_prio_map_app(struct qlcnic_adapter *adapter,
+ u32 val)
+{
+ if (qlcnic_82xx_check(adapter))
+ return QLC_82XX_DCB_GET_PRIOMAP_APP(val);
+ else
+ return QLC_83XX_DCB_GET_PRIOMAP_APP(val);
+}
+
+static int qlcnic_dcb_prio_count(u8 up_tc_map)
+{
+ int j;
+
+ for (j = 0; j < QLC_DCB_MAX_TC; j++)
+ if (up_tc_map & QLC_DCB_GET_MAP(j))
+ break;
+
+ return j;
+}
+
+static inline void __qlcnic_init_dcbnl_ops(struct qlcnic_adapter *adapter)
+{
+ if (test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ adapter->netdev->dcbnl_ops = &qlcnic_dcbnl_ops;
+}
+
+static void qlcnic_set_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_82xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_82xx_dcb_ops;
+ else if (qlcnic_83xx_check(adapter))
+ adapter->dcb->ops = &qlcnic_83xx_dcb_ops;
+}
+
+int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb;
+
+ dcb = kzalloc(sizeof(struct qlcnic_dcb), GFP_ATOMIC);
+ if (!dcb)
+ return -ENOMEM;
+
+ adapter->dcb = dcb;
+ dcb->adapter = adapter;
+ qlcnic_set_dcb_ops(adapter);
+
+ return 0;
+}
+
+static void __qlcnic_dcb_free(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!dcb)
+ return;
+
+ qlcnic_dcb_register_aen(adapter, 0);
+
+ while (test_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ usleep_range(10000, 11000);
+
+ cancel_delayed_work_sync(&dcb->aen_work);
+
+ if (dcb->wq) {
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+ }
+
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+ kfree(dcb->param);
+ dcb->param = NULL;
+ kfree(dcb);
+ adapter->dcb = NULL;
+}
+
+static void __qlcnic_dcb_get_info(struct qlcnic_adapter *adapter)
+{
+ qlcnic_dcb_get_hw_capability(adapter);
+ qlcnic_dcb_get_cee_cfg(adapter);
+ qlcnic_dcb_register_aen(adapter, 1);
+}
+
+static int __qlcnic_dcb_attach(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err = 0;
+
+ INIT_DELAYED_WORK(&dcb->aen_work, qlcnic_dcb_aen_work);
+
+ dcb->wq = create_singlethread_workqueue("qlcnic-dcb");
+ if (!dcb->wq) {
+ dev_err(&adapter->pdev->dev,
+ "DCB workqueue allocation failed. DCB will be disabled\n");
+ return -1;
+ }
+
+ dcb->cfg = kzalloc(sizeof(struct qlcnic_dcb_cfg), GFP_ATOMIC);
+ if (!dcb->cfg) {
+ err = -ENOMEM;
+ goto out_free_wq;
+ }
+
+ dcb->param = kzalloc(sizeof(struct qlcnic_dcb_mbx_params), GFP_ATOMIC);
+ if (!dcb->param) {
+ err = -ENOMEM;
+ goto out_free_cfg;
+ }
+
+ qlcnic_dcb_get_info(adapter);
+
+ return 0;
+out_free_cfg:
+ kfree(dcb->cfg);
+ dcb->cfg = NULL;
+
+out_free_wq:
+ destroy_workqueue(dcb->wq);
+ dcb->wq = NULL;
+
+ return err;
+}
+
+static int __qlcnic_dcb_query_hw_capability(struct qlcnic_adapter *adapter,
+ char *buf)
+{
+ struct qlcnic_cmd_args cmd;
+ u32 mbx_out;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_CAP);
+ if (err)
+ return err;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX capability, err %d\n", err);
+ } else {
+ mbx_out = cmd.rsp.arg[1];
+ if (buf)
+ memcpy(buf, &mbx_out, sizeof(u32));
+ }
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int __qlcnic_dcb_get_capability(struct qlcnic_adapter *adapter, u32 *val)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ memset(cap, 0, sizeof(struct qlcnic_dcb_capability));
+
+ err = qlcnic_dcb_query_hw_capability(adapter, (char *)val);
+ if (err)
+ return err;
+
+ mbx_out = *val;
+ if (QLC_DCB_TSA_SUPPORT(mbx_out))
+ cap->tsa_capability = true;
+
+ if (QLC_DCB_ETS_SUPPORT(mbx_out))
+ cap->ets_capability = true;
+
+ cap->max_num_tc = QLC_DCB_MAX_NUM_TC(mbx_out);
+ cap->max_ets_tc = QLC_DCB_MAX_NUM_ETS_TC(mbx_out);
+ cap->max_pfc_tc = QLC_DCB_MAX_NUM_PFC_TC(mbx_out);
+
+ if (cap->max_num_tc > QLC_DCB_MAX_TC ||
+ cap->max_ets_tc > cap->max_num_tc ||
+ cap->max_pfc_tc > cap->max_num_tc) {
+ dev_err(&adapter->pdev->dev, "Invalid DCB configuration\n");
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_capability *cap;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ cap = &cfg->capability;
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 type)
+{
+ u16 size = sizeof(struct qlcnic_82xx_dcb_param_mbx_le);
+ struct qlcnic_82xx_dcb_param_mbx_le *prsp_le;
+ struct device *dev = &adapter->pdev->dev;
+ dma_addr_t cardrsp_phys_addr;
+ struct qlcnic_dcb_param rsp;
+ struct qlcnic_cmd_args cmd;
+ u64 phys_addr;
+ void *addr;
+ int err, i;
+
+ switch (type) {
+ case QLC_DCB_LOCAL_PARAM_FWID:
+ case QLC_DCB_OPER_PARAM_FWID:
+ case QLC_DCB_PEER_PARAM_FWID:
+ break;
+ default:
+ dev_err(dev, "Invalid parameter type %d\n", type);
+ return -EINVAL;
+ }
+
+ addr = dma_alloc_coherent(&adapter->pdev->dev, size, &cardrsp_phys_addr,
+ GFP_KERNEL);
+ if (addr == NULL)
+ return -ENOMEM;
+
+ prsp_le = addr;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ goto out_free_rsp;
+
+ phys_addr = cardrsp_phys_addr;
+ cmd.req.arg[1] = size | (type << 16);
+ cmd.req.arg[2] = MSD(phys_addr);
+ cmd.req.arg[3] = LSD(phys_addr);
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(dev, "Failed to query DCBX parameter, err %d\n", err);
+ goto out;
+ }
+
+ memset(&rsp, 0, sizeof(struct qlcnic_dcb_param));
+ rsp.hdr_prio_pfc_map[0] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[0]);
+ rsp.hdr_prio_pfc_map[1] = le32_to_cpu(prsp_le->hdr_prio_pfc_map[1]);
+ rsp.prio_pg_map[0] = le32_to_cpu(prsp_le->prio_pg_map[0]);
+ rsp.prio_pg_map[1] = le32_to_cpu(prsp_le->prio_pg_map[1]);
+ rsp.pg_bw_map[0] = le32_to_cpu(prsp_le->pg_bw_map[0]);
+ rsp.pg_bw_map[1] = le32_to_cpu(prsp_le->pg_bw_map[1]);
+ rsp.pg_tsa_map[0] = le32_to_cpu(prsp_le->pg_tsa_map[0]);
+ rsp.pg_tsa_map[1] = le32_to_cpu(prsp_le->pg_tsa_map[1]);
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++)
+ rsp.app[i] = le32_to_cpu(prsp_le->app[i]);
+
+ if (buf)
+ memcpy(buf, &rsp, size);
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+out_free_rsp:
+ dma_free_coherent(&adapter->pdev->dev, size, addr, cardrsp_phys_addr);
+
+ return err;
+}
+
+static int qlcnic_82xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_mbx_params *mbx;
+ int err;
+
+ mbx = adapter->dcb->param;
+ if (!mbx)
+ return 0;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[0],
+ QLC_DCB_LOCAL_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[1],
+ QLC_DCB_OPER_PARAM_FWID);
+ if (err)
+ return err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)&mbx->type[2],
+ QLC_DCB_PEER_PARAM_FWID);
+ if (err)
+ return err;
+
+ mbx->prio_tc_map = QLC_82XX_DCB_PRIO_TC_MAP;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static void qlcnic_dcb_aen_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_dcb *dcb;
+
+ dcb = container_of(work, struct qlcnic_dcb, aen_work.work);
+ adapter = dcb->adapter;
+
+ qlcnic_dcb_get_cee_cfg(adapter);
+ clear_bit(__QLCNIC_DCB_IN_AEN, &adapter->state);
+}
+
+static void qlcnic_82xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static int qlcnic_83xx_dcb_get_hw_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb_capability *cap = &adapter->dcb->cfg->capability;
+ u32 mbx_out;
+ int err;
+
+ err = __qlcnic_dcb_get_capability(adapter, &mbx_out);
+ if (err)
+ return err;
+
+ if (mbx_out & BIT_2)
+ cap->dcb_capability = DCB_CAP_DCBX_VER_CEE;
+ if (mbx_out & BIT_3)
+ cap->dcb_capability |= DCB_CAP_DCBX_VER_IEEE;
+ if (cap->dcb_capability)
+ cap->dcb_capability |= DCB_CAP_DCBX_LLD_MANAGED;
+
+ if (cap->dcb_capability && cap->tsa_capability && cap->ets_capability)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_query_cee_param(struct qlcnic_adapter *adapter,
+ char *buf, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params mbx_out;
+ int err, i, j, k, max_app, size;
+ struct qlcnic_dcb_param *each;
+ struct qlcnic_cmd_args cmd;
+ u32 val;
+ char *p;
+
+ size = 0;
+ memset(&mbx_out, 0, sizeof(struct qlcnic_dcb_mbx_params));
+ memset(buf, 0, sizeof(struct qlcnic_dcb_mbx_params));
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_DCB_QUERY_PARAM);
+ if (err)
+ return err;
+
+ cmd.req.arg[0] |= QLC_DCB_FW_VER << 29;
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to query DCBX param, err %d\n", err);
+ goto out;
+ }
+
+ mbx_out.prio_tc_map = cmd.rsp.arg[1];
+ p = memcpy(buf, &mbx_out, sizeof(u32));
+ k = 2;
+ p += sizeof(u32);
+
+ for (j = 0; j < QLC_DCB_NUM_PARAM; j++) {
+ each = &mbx_out.type[j];
+
+ each->hdr_prio_pfc_map[0] = cmd.rsp.arg[k++];
+ each->hdr_prio_pfc_map[1] = cmd.rsp.arg[k++];
+ each->prio_pg_map[0] = cmd.rsp.arg[k++];
+ each->prio_pg_map[1] = cmd.rsp.arg[k++];
+ each->pg_bw_map[0] = cmd.rsp.arg[k++];
+ each->pg_bw_map[1] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[0] = cmd.rsp.arg[k++];
+ each->pg_tsa_map[1] = cmd.rsp.arg[k++];
+ val = each->hdr_prio_pfc_map[0];
+
+ max_app = qlcnic_dcb_get_num_app(adapter, val);
+ for (i = 0; i < max_app; i++)
+ each->app[i] = cmd.rsp.arg[i + k];
+
+ size = 16 * sizeof(u32);
+ memcpy(p, &each->hdr_prio_pfc_map[0], size);
+ p += size;
+ if (j == 0)
+ k = 18;
+ else
+ k = 34;
+ }
+out:
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_get_cee_cfg(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ int err;
+
+ err = qlcnic_dcb_query_cee_param(adapter, (char *)dcb->param, 0);
+ if (err)
+ return err;
+
+ qlcnic_dcb_data_cee_param_map(adapter);
+
+ return err;
+}
+
+static int qlcnic_83xx_dcb_register_aen(struct qlcnic_adapter *adapter,
+ bool flag)
+{
+ u8 val = (flag ? QLCNIC_CMD_INIT_NIC_FUNC : QLCNIC_CMD_STOP_NIC_FUNC);
+ struct qlcnic_cmd_args cmd;
+ int err;
+
+ err = qlcnic_alloc_mbx_args(&cmd, adapter, val);
+ if (err)
+ return err;
+
+ cmd.req.arg[1] = QLC_DCB_AEN_BIT;
+
+ err = qlcnic_issue_cmd(adapter, &cmd);
+ if (err)
+ dev_err(&adapter->pdev->dev, "Failed to %s DCBX AEN, err %d\n",
+ (flag ? "register" : "unregister"), err);
+
+ qlcnic_free_mbx_args(&cmd);
+
+ return err;
+}
+
+static void qlcnic_83xx_dcb_handle_aen(struct qlcnic_adapter *adapter,
+ void *data)
+{
+ struct qlcnic_dcb *dcb = adapter->dcb;
+ u32 *val = data;
+
+ if (test_and_set_bit(__QLCNIC_DCB_IN_AEN, &adapter->state))
+ return;
+
+ if (*val & BIT_8)
+ set_bit(__QLCNIC_DCB_STATE, &adapter->state);
+ else
+ clear_bit(__QLCNIC_DCB_STATE, &adapter->state);
+
+ queue_delayed_work(dcb->wq, &dcb->aen_work, 0);
+}
+
+static void qlcnic_dcb_fill_cee_tc_params(struct qlcnic_dcb_mbx_params *mbx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 i, tc, pgid;
+
+ for (i = 0; i < QLC_DCB_MAX_PRIO; i++) {
+ tc = QLC_DCB_GET_TC_PRIO(mbx->prio_tc_map, i);
+ tc_cfg = &type->tc_cfg[tc];
+ tc_cfg->valid = true;
+ tc_cfg->up_tc_map |= QLC_DCB_GET_MAP(i);
+
+ if (QLC_DCB_GET_PFC_PRIO(each->hdr_prio_pfc_map[1], i) &&
+ type->pfc_mode_enable) {
+ tc_cfg->prio_cfg[i].valid = true;
+ tc_cfg->prio_cfg[i].pfc_type = QLC_PFC_FULL;
+ }
+
+ if (i < 4)
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[0], i);
+ else
+ pgid = QLC_DCB_GET_PGID_PRIO(each->prio_pg_map[1], i);
+
+ tc_cfg->pgid = pgid;
+
+ tc_cfg->prio_type = QLC_PRIO_LINK;
+ type->pg_cfg[tc_cfg->pgid].prio_count++;
+ }
+}
+
+static void qlcnic_dcb_fill_cee_pg_params(struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_pg_cfg *pg_cfg;
+ u8 i, tsa, bw_per;
+
+ for (i = 0; i < QLC_DCB_MAX_PG; i++) {
+ pg_cfg = &type->pg_cfg[i];
+ pg_cfg->valid = true;
+
+ if (i < 4) {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[0], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[0], i);
+ } else {
+ bw_per = QLC_DCB_GET_BWPER_PG(each->pg_bw_map[1], i);
+ tsa = QLC_DCB_GET_TSA_PG(each->pg_tsa_map[1], i);
+ }
+
+ pg_cfg->total_bw_percent = bw_per;
+ pg_cfg->tsa_type = tsa;
+ }
+}
+
+static void
+qlcnic_dcb_fill_cee_app_params(struct qlcnic_adapter *adapter, u8 idx,
+ struct qlcnic_dcb_param *each,
+ struct qlcnic_dcb_cee *type)
+{
+ struct qlcnic_dcb_app *app;
+ u8 i, num_app, map, cnt;
+ struct dcb_app new_app;
+
+ num_app = qlcnic_dcb_get_num_app(adapter, each->hdr_prio_pfc_map[0]);
+ for (i = 0; i < num_app; i++) {
+ app = &type->app[i];
+ app->valid = true;
+
+ /* Only for CEE (-1) */
+ app->selector = QLC_DCB_GET_SELECTOR_APP(each->app[i]) - 1;
+ new_app.selector = app->selector;
+ app->protocol = QLC_DCB_GET_PROTO_ID_APP(each->app[i]);
+ new_app.protocol = app->protocol;
+ map = qlcnic_dcb_get_prio_map_app(adapter, each->app[i]);
+ cnt = qlcnic_dcb_prio_count(map);
+
+ if (cnt >= QLC_DCB_MAX_TC)
+ cnt = 0;
+
+ app->priority = cnt;
+ new_app.priority = cnt;
+
+ if (idx == QLC_DCB_OPER_IDX && adapter->netdev->dcbnl_ops)
+ dcb_setapp(adapter->netdev, &new_app);
+ }
+}
+
+static void qlcnic_dcb_map_cee_params(struct qlcnic_adapter *adapter, u8 idx)
+{
+ struct qlcnic_dcb_mbx_params *mbx = adapter->dcb->param;
+ struct qlcnic_dcb_param *each = &mbx->type[idx];
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_cee *type = &cfg->type[idx];
+
+ type->tc_param_valid = false;
+ type->pfc_mode_enable = false;
+ memset(type->tc_cfg, 0,
+ sizeof(struct qlcnic_dcb_tc_cfg) * QLC_DCB_MAX_TC);
+ memset(type->pg_cfg, 0,
+ sizeof(struct qlcnic_dcb_pg_cfg) * QLC_DCB_MAX_TC);
+
+ if (qlcnic_dcb_pfc_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_pfc_tc)
+ type->pfc_mode_enable = true;
+
+ if (qlcnic_dcb_tsa_hdr_valid(adapter, each->hdr_prio_pfc_map[0]) &&
+ cfg->capability.max_ets_tc)
+ type->tc_param_valid = true;
+
+ qlcnic_dcb_fill_cee_tc_params(mbx, each, type);
+ qlcnic_dcb_fill_cee_pg_params(each, type);
+ qlcnic_dcb_fill_cee_app_params(adapter, idx, each, type);
+}
+
+static void qlcnic_dcb_data_cee_param_map(struct qlcnic_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < QLC_DCB_NUM_PARAM; i++)
+ qlcnic_dcb_map_cee_params(adapter, i);
+
+ dcbnl_cee_notify(adapter->netdev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+}
+
+static u8 qlcnic_dcb_get_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ return test_bit(__QLCNIC_DCB_STATE, &adapter->state);
+}
+
+static void qlcnic_dcb_get_perm_hw_addr(struct net_device *netdev, u8 *addr)
+{
+ memcpy(addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void
+qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
+ u8 *pgid, u8 *bw_per, u8 *up_tc_map)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg, *temp;
+ struct qlcnic_dcb_cee *type;
+ u8 i, cnt, pg;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *prio = *pgid = *bw_per = *up_tc_map = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ return;
+
+ tc_cfg = &type->tc_cfg[tc];
+ if (!tc_cfg->valid)
+ return;
+
+ *pgid = tc_cfg->pgid;
+ *prio = tc_cfg->prio_type;
+ *up_tc_map = tc_cfg->up_tc_map;
+ pg = *pgid;
+
+ for (i = 0, cnt = 0; i < QLC_DCB_MAX_TC; i++) {
+ temp = &type->tc_cfg[i];
+ if (temp->valid && (pg == temp->pgid))
+ cnt++;
+ }
+
+ tc_cfg->bwg_percent = (100 / cnt);
+ *bw_per = tc_cfg->bwg_percent;
+}
+
+static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
+ u8 *bw_pct)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_pg_cfg *pgcfg;
+ struct qlcnic_dcb_cee *type;
+
+ *bw_pct = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->tc_param_valid)
+ return;
+
+ if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ return;
+
+ pgcfg = &type->pg_cfg[pgid];
+ if (!pgcfg->valid)
+ return;
+
+ *bw_pct = pgcfg->total_bw_percent;
+}
+
+static void qlcnic_dcb_get_pfc_cfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_tc_cfg *tc_cfg;
+ u8 val = QLC_DCB_GET_MAP(prio);
+ struct qlcnic_dcb_cee *type;
+ u8 i;
+
+ *setting = 0;
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state) ||
+ !type->pfc_mode_enable)
+ return;
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc_cfg = &type->tc_cfg[i];
+ if (!tc_cfg->valid)
+ continue;
+
+ if ((val & tc_cfg->up_tc_map) && (tc_cfg->prio_cfg[prio].valid))
+ *setting = tc_cfg->prio_cfg[prio].pfc_type;
+ }
+}
+
+static u8 qlcnic_dcb_get_capability(struct net_device *netdev, int capid,
+ u8 *cap)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80; /* 8 priorities for PGs */
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = adapter->dcb->cfg->capability.dcb_capability;
+ break;
+ default:
+ *cap = false;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return -EINVAL;
+
+ switch (attr) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = cfg->capability.max_ets_tc;
+ return 0;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = cfg->capability.max_pfc_tc;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static u8 qlcnic_dcb_get_pfc_state(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb *dcb = adapter->dcb;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return dcb->cfg->type[QLC_DCB_OPER_IDX].pfc_mode_enable;
+}
+
+static u8 qlcnic_dcb_get_dcbx(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ return cfg->capability.dcb_capability;
+}
+
+static u8 qlcnic_dcb_get_feat_cfg(struct net_device *netdev, int fid, u8 *flag)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *type;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 1;
+
+ type = &adapter->dcb->cfg->type[QLC_DCB_OPER_IDX];
+ *flag = 0;
+
+ switch (fid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (type->tc_param_valid)
+ *flag |= DCB_FEATCFG_ENABLE;
+ else
+ *flag |= DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (type->pfc_mode_enable) {
+ if (type->tc_cfg[0].prio_cfg[0].pfc_type)
+ *flag |= DCB_FEATCFG_ENABLE;
+ } else {
+ *flag |= DCB_FEATCFG_ERROR;
+ }
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ *flag |= DCB_FEATCFG_ENABLE;
+ break;
+ default:
+ netdev_err(netdev, "Invalid Feature ID %d\n", fid);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_tc_cfg_rx(struct net_device *netdev, int prio, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static inline void
+qlcnic_dcb_get_pg_bwg_cfg_rx(struct net_device *netdev, int pgid, u8 *bw_pct)
+{
+ *bw_pct = 0;
+}
+
+static int qlcnic_dcb_peer_app_info(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ int i;
+
+ *app_count = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_APP; i++) {
+ if (peer->app[i].valid)
+ (*app_count)++;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_peer_app_table(struct net_device *netdev,
+ struct dcb_app *table)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ struct qlcnic_dcb_app *app;
+ int i, j;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_APP; i++) {
+ app = &peer->app[i];
+ if (!app->valid)
+ continue;
+
+ table[j].selector = app->selector;
+ table[j].priority = app->priority;
+ table[j++].protocol = app->protocol;
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cee *peer;
+ u8 i, j, k, map;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0, j = 0; i < QLC_DCB_MAX_PG; i++) {
+ if (!peer->pg_cfg[i].valid)
+ continue;
+
+ pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
+
+ for (k = 0; k < QLC_DCB_MAX_TC; k++) {
+ if (peer->tc_cfg[i].valid &&
+ (peer->tc_cfg[i].pgid == i)) {
+ map = peer->tc_cfg[i].up_tc_map;
+ pg->prio_pg[j++] = map;
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int qlcnic_dcb_cee_peer_get_pfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_dcb_cfg *cfg = adapter->dcb->cfg;
+ struct qlcnic_dcb_tc_cfg *tc;
+ struct qlcnic_dcb_cee *peer;
+ u8 i, setting, prio;
+
+ pfc->pfc_en = 0;
+
+ if (!test_bit(__QLCNIC_DCB_STATE, &adapter->state))
+ return 0;
+
+ peer = &cfg->type[QLC_DCB_PEER_IDX];
+
+ for (i = 0; i < QLC_DCB_MAX_TC; i++) {
+ tc = &peer->tc_cfg[i];
+ prio = qlcnic_dcb_prio_count(tc->up_tc_map);
+
+ setting = 0;
+ qlcnic_dcb_get_pfc_cfg(netdev, prio, &setting);
+ if (setting)
+ pfc->pfc_en |= QLC_DCB_GET_MAP(i);
+ }
+
+ pfc->tcs_supported = cfg->capability.max_pfc_tc;
+
+ return 0;
+}
+
+static const struct dcbnl_rtnl_ops qlcnic_dcbnl_ops = {
+ .getstate = qlcnic_dcb_get_state,
+ .getpermhwaddr = qlcnic_dcb_get_perm_hw_addr,
+ .getpgtccfgtx = qlcnic_dcb_get_pg_tc_cfg_tx,
+ .getpgbwgcfgtx = qlcnic_dcb_get_pg_bwg_cfg_tx,
+ .getpfccfg = qlcnic_dcb_get_pfc_cfg,
+ .getcap = qlcnic_dcb_get_capability,
+ .getnumtcs = qlcnic_dcb_get_num_tcs,
+ .getapp = qlcnic_dcb_get_app,
+ .getpfcstate = qlcnic_dcb_get_pfc_state,
+ .getdcbx = qlcnic_dcb_get_dcbx,
+ .getfeatcfg = qlcnic_dcb_get_feat_cfg,
+
+ .getpgtccfgrx = qlcnic_dcb_get_pg_tc_cfg_rx,
+ .getpgbwgcfgrx = qlcnic_dcb_get_pg_bwg_cfg_rx,
+
+ .peer_getappinfo = qlcnic_dcb_peer_app_info,
+ .peer_getapptable = qlcnic_dcb_peer_app_table,
+ .cee_peer_getpg = qlcnic_dcb_cee_peer_get_pg,
+ .cee_peer_getpfc = qlcnic_dcb_cee_peer_get_pfc,
+};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
new file mode 100644
index 00000000000..b87ce9fb503
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -0,0 +1,41 @@
+/*
+ * QLogic qlcnic NIC Driver
+ * Copyright (c) 2009-2013 QLogic Corporation
+ *
+ * See LICENSE.qlcnic for copyright and licensing details.
+ */
+
+#ifndef __QLCNIC_DCBX_H
+#define __QLCNIC_DCBX_H
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *);
+
+#ifdef CONFIG_QLCNIC_DCB
+int __qlcnic_register_dcb(struct qlcnic_adapter *);
+#else
+static inline int __qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{ return 0; }
+#endif
+
+struct qlcnic_dcb_ops {
+ void (*init_dcbnl_ops) (struct qlcnic_adapter *);
+ void (*free) (struct qlcnic_adapter *);
+ int (*attach) (struct qlcnic_adapter *);
+ int (*query_hw_capability) (struct qlcnic_adapter *, char *);
+ int (*get_hw_capability) (struct qlcnic_adapter *);
+ void (*get_info) (struct qlcnic_adapter *);
+ int (*query_cee_param) (struct qlcnic_adapter *, char *, u8);
+ int (*get_cee_cfg) (struct qlcnic_adapter *);
+ int (*register_aen) (struct qlcnic_adapter *, bool);
+ void (*handle_aen) (struct qlcnic_adapter *, void *);
+};
+
+struct qlcnic_dcb {
+ struct qlcnic_dcb_mbx_params *param;
+ struct qlcnic_adapter *adapter;
+ struct delayed_work aen_work;
+ struct workqueue_struct *wq;
+ struct qlcnic_dcb_ops *ops;
+ struct qlcnic_dcb_cfg *cfg;
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 700a46324d0..4d7ad0074d1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -125,6 +125,14 @@ static const char qlcnic_83xx_mac_stats_strings[][ETH_GSTRING_LEN] = {
};
#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_tx_ring_stats_strings[][ETH_GSTRING_LEN] = {
+ "xmit_on",
+ "xmit_off",
+ "xmit_called",
+ "xmit_finished",
+};
+
static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
"ctx_rx_bytes",
"ctx_rx_pkts",
@@ -150,6 +158,7 @@ static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
"Link_Test_on_offline",
"Interrupt_Test_offline",
"Internal_Loopback_offline",
+ "External_Loopback_offline",
"EEPROM_Test_offline"
};
@@ -266,7 +275,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
u32 speed, reg;
- int check_sfp_module = 0;
+ int check_sfp_module = 0, err = 0;
u16 pcifn = ahw->pci_func;
/* read which mode */
@@ -289,7 +298,7 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
} else if (adapter->ahw->port_type == QLCNIC_XGBE) {
u32 val = 0;
- val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR, &err);
if (val == QLCNIC_PORT_MODE_802_3_AP) {
ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -300,9 +309,13 @@ int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
}
if (netif_running(adapter->netdev) && ahw->has_link_events) {
- reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
- speed = P3P_LINK_SPEED_VAL(pcifn, reg);
- ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ if (ahw->linkup) {
+ reg = QLCRD32(adapter,
+ P3P_LINK_SPEED_REG(pcifn), &err);
+ speed = P3P_LINK_SPEED_VAL(pcifn, reg);
+ ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
+ }
+
ethtool_cmd_speed_set(ecmd, ahw->link_speed);
ecmd->autoneg = ahw->link_autoneg;
ecmd->duplex = ahw->link_duplex;
@@ -463,13 +476,14 @@ static int qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
static int qlcnic_82xx_get_registers(struct qlcnic_adapter *adapter,
u32 *regs_buff)
{
- int i, j = 0;
+ int i, j = 0, err = 0;
for (i = QLCNIC_DEV_INFO_SIZE + 1; diag_registers[j] != -1; j++, i++)
regs_buff[i] = QLC_SHARED_REG_RD32(adapter, diag_registers[j]);
j = 0;
while (ext_diag_registers[j] != -1)
- regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++]);
+ regs_buff[i++] = QLCRD32(adapter, ext_diag_registers[j++],
+ &err);
return i;
}
@@ -519,13 +533,16 @@ qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
static u32 qlcnic_test_link(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int err = 0;
u32 val;
if (qlcnic_83xx_check(adapter)) {
val = qlcnic_83xx_test_link(adapter);
return (val & 1) ? 0 : 1;
}
- val = QLCRD32(adapter, CRB_XG_STATE_P3P);
+ val = QLCRD32(adapter, CRB_XG_STATE_P3P, &err);
+ if (err == -EIO)
+ return err;
val = XG_LINK_STATE_P3P(adapter->ahw->pci_func, val);
return (val == XG_LINK_UP_P3P) ? 0 : 1;
}
@@ -621,15 +638,15 @@ qlcnic_set_ringparam(struct net_device *dev,
static void qlcnic_get_channels(struct net_device *dev,
struct ethtool_channels *channel)
{
- int min;
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int min;
min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus());
channel->max_rx = rounddown_pow_of_two(min);
- channel->max_tx = adapter->ahw->max_tx_ques;
+ channel->max_tx = min_t(int, QLCNIC_MAX_TX_RINGS, num_online_cpus());
channel->rx_count = adapter->max_sds_rings;
- channel->tx_count = adapter->ahw->max_tx_ques;
+ channel->tx_count = adapter->max_drv_tx_rings;
}
static int qlcnic_set_channels(struct net_device *dev,
@@ -637,18 +654,27 @@ static int qlcnic_set_channels(struct net_device *dev,
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ int txq = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count != channel->max_tx)
+ if (channel->other_count || channel->combined_count)
return -EINVAL;
- err = qlcnic_validate_max_rss(adapter, channel->rx_count);
- if (err)
- return err;
+ if (channel->rx_count) {
+ err = qlcnic_validate_max_rss(adapter, channel->rx_count);
+ if (err)
+ return err;
+ }
- err = qlcnic_set_max_rss(adapter, channel->rx_count, 0);
- netdev_info(dev, "allocated 0x%x sds rings\n",
- adapter->max_sds_rings);
+ if (channel->tx_count) {
+ err = qlcnic_validate_max_tx_rings(adapter, channel->tx_count);
+ if (err)
+ return err;
+ txq = channel->tx_count;
+ }
+
+ err = qlcnic_set_max_rss(adapter, channel->rx_count, txq);
+ netdev_info(dev, "allocated 0x%x sds rings and 0x%x tx rings\n",
+ adapter->max_sds_rings, adapter->max_drv_tx_rings);
return err;
}
@@ -658,6 +684,7 @@ qlcnic_get_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter)) {
@@ -668,9 +695,13 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return;
/* get flow control settings */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return;
pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
switch (port) {
case 0:
pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
@@ -690,7 +721,9 @@ qlcnic_get_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return;
pause->rx_pause = 1;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return;
if (port == 0)
pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
else
@@ -707,6 +740,7 @@ qlcnic_set_pauseparam(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
int port = adapter->ahw->physical_port;
+ int err = 0;
__u32 val;
if (qlcnic_83xx_check(adapter))
@@ -717,7 +751,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
return -EIO;
/* set flow control */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), &err);
+ if (err == -EIO)
+ return err;
if (pause->rx_pause)
qlcnic_gb_rx_flowctl(val);
@@ -728,7 +764,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
val);
QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port), val);
/* set autoneg */
- val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
switch (port) {
case 0:
if (pause->tx_pause)
@@ -764,7 +802,9 @@ qlcnic_set_pauseparam(struct net_device *netdev,
if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
return -EIO;
- val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, &err);
+ if (err == -EIO)
+ return err;
if (port == 0) {
if (pause->tx_pause)
qlcnic_xg_unset_xg0_mask(val);
@@ -788,11 +828,14 @@ static int qlcnic_reg_test(struct net_device *dev)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 data_read;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_reg_test(adapter);
- data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0), &err);
+ if (err == -EIO)
+ return err;
if ((data_read & 0xffff) != adapter->pdev->vendor)
return 1;
@@ -867,6 +910,7 @@ free_diag_res:
clear_diag_irq:
adapter->max_sds_rings = max_sds_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
return ret;
}
@@ -940,6 +984,7 @@ int qlcnic_do_lb_test(struct qlcnic_adapter *adapter, u8 mode)
int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_drv_tx_rings = adapter->max_drv_tx_rings;
int max_sds_rings = adapter->max_sds_rings;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -980,9 +1025,9 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
msleep(500);
qlcnic_process_rcv_ring_diag(sds_ring);
if (loop++ > QLCNIC_ILB_MAX_RCV_LOOP) {
- netdev_info(netdev, "firmware didnt respond to loopback"
- " configure request\n");
- ret = -QLCNIC_FW_NOT_RESPOND;
+ netdev_info(netdev,
+ "Firmware didn't sent link up event to loopback request\n");
+ ret = -ETIMEDOUT;
goto free_res;
} else if (adapter->ahw->diag_cnt) {
ret = adapter->ahw->diag_cnt;
@@ -999,6 +1044,7 @@ int qlcnic_loopback_test(struct net_device *netdev, u8 mode)
clear_it:
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
}
@@ -1026,8 +1072,15 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
if (data[3])
eth_test->flags |= ETH_TEST_FL_FAILED;
- data[4] = qlcnic_eeprom_test(dev);
- if (data[4])
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ data[4] = qlcnic_loopback_test(dev, QLCNIC_ELB_MODE);
+ if (data[4])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+ }
+
+ data[5] = qlcnic_eeprom_test(dev);
+ if (data[5])
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
@@ -1044,11 +1097,21 @@ qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(qlcnic_tx_ring_stats_strings);
+ for (i = 0; i < adapter->max_drv_tx_rings; i++) {
+ for (index = 0; index < num_stats; index++) {
+ sprintf(data, "tx_ring_%d %s", i,
+ qlcnic_tx_ring_stats_strings[index]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
for (index = 0; index < QLCNIC_STATS_LEN; index++) {
memcpy(data + index * ETH_GSTRING_LEN,
qlcnic_gstrings_stats[index].stat_string,
ETH_GSTRING_LEN);
}
+
if (qlcnic_83xx_check(adapter)) {
num_stats = ARRAY_SIZE(qlcnic_83xx_tx_stats_strings);
for (i = 0; i < num_stats; i++, index++)
@@ -1140,11 +1203,22 @@ static void qlcnic_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_esw_statistics port_stats;
struct qlcnic_mac_statistics mac_stats;
- int index, ret, length, size;
+ int index, ret, length, size, ring;
char *p;
+ memset(data, 0, adapter->max_drv_tx_rings * 4 * sizeof(u64));
+ for (ring = 0, index = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
+ tx_ring = &adapter->tx_ring[ring];
+ *data++ = tx_ring->xmit_on;
+ *data++ = tx_ring->xmit_off;
+ *data++ = tx_ring->xmit_called;
+ *data++ = tx_ring->xmit_finished;
+ }
+ }
memset(data, 0, stats->n_stats * sizeof(u64));
length = QLCNIC_STATS_LEN;
for (index = 0; index < length; index++) {
@@ -1257,17 +1331,20 @@ qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return;
wol->supported = 0;
wol->wolopts = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return;
if (wol_cfg & (1UL << adapter->portnum))
wol->supported |= WAKE_MAGIC;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
if (wol_cfg & (1UL << adapter->portnum))
wol->wolopts |= WAKE_MAGIC;
}
@@ -1277,17 +1354,22 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct qlcnic_adapter *adapter = netdev_priv(dev);
u32 wol_cfg;
+ int err = 0;
if (qlcnic_83xx_check(adapter))
return -EOPNOTSUPP;
if (wol->wolopts & ~WAKE_MAGIC)
return -EINVAL;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
+ if (err == -EIO)
+ return err;
if (!(wol_cfg & (1 << adapter->portnum)))
return -EOPNOTSUPP;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol->wolopts & WAKE_MAGIC)
wol_cfg |= 1UL << adapter->portnum;
else
@@ -1427,6 +1509,68 @@ static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
adapter->ahw->msg_enable = msglvl;
}
+int qlcnic_enable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val &= ~QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = true;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump enabled\n");
+
+ return 0;
+}
+
+static int qlcnic_disable_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ if (qlcnic_83xx_lock_driver(adapter))
+ return -EBUSY;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ val |= QLC_83XX_IDC_DISABLE_FW_DUMP;
+ QLCWRX(adapter->ahw, QLC_83XX_IDC_CTRL, val);
+
+ qlcnic_83xx_unlock_driver(adapter);
+ } else {
+ fw_dump->enable = false;
+ }
+
+ dev_info(&adapter->pdev->dev, "FW dump disabled\n");
+
+ return 0;
+}
+
+bool qlcnic_check_fw_dump_state(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool state;
+ u32 val;
+
+ if (qlcnic_84xx_check(adapter)) {
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ state = (val & QLC_83XX_IDC_DISABLE_FW_DUMP) ? false : true;
+ } else {
+ state = fw_dump->enable;
+ }
+
+ return state;
+}
+
static int
qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
{
@@ -1443,7 +1587,7 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
else
dump->len = 0;
- if (!fw_dump->enable)
+ if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
@@ -1491,77 +1635,111 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
return 0;
}
+static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
+{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ struct net_device *netdev = adapter->netdev;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
+ netdev_info(netdev,
+ "Can not change driver mask to 0x%x. FW dump not enabled\n",
+ mask);
+ return -EOPNOTSUPP;
+ }
+
+ fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
+ return 0;
+}
+
static int
qlcnic_set_dump(struct net_device *netdev, struct ethtool_dump *val)
{
- int i;
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+ bool valid_mask = false;
+ int i, ret = 0;
u32 state;
switch (val->flag) {
case QLCNIC_FORCE_FW_DUMP_KEY:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
- if (!fw_dump->enable) {
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
netdev_info(netdev, "FW dump not enabled\n");
- return 0;
+ ret = -EOPNOTSUPP;
+ break;
}
+
if (fw_dump->clr) {
netdev_info(netdev,
- "Previous dump not cleared, not forcing dump\n");
- return 0;
+ "Previous dump not cleared, not forcing dump\n");
+ break;
}
+
netdev_info(netdev, "Forcing a FW dump\n");
qlcnic_dev_request_reset(adapter, val->flag);
break;
case QLCNIC_DISABLE_FW_DUMP:
- if (fw_dump->enable && fw_dump->tmpl_hdr) {
- netdev_info(netdev, "Disabling FW dump\n");
- fw_dump->enable = 0;
+ if (!fw_dump->tmpl_hdr) {
+ netdev_err(netdev, "FW dump not supported\n");
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_disable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_ENABLE_FW_DUMP:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
- }
- if (!fw_dump->enable) {
- netdev_info(netdev, "Enabling FW dump\n");
- fw_dump->enable = 1;
+ ret = -EOPNOTSUPP;
+ break;
}
- return 0;
+
+ ret = qlcnic_enable_fw_dump_state(adapter);
+ break;
+
case QLCNIC_FORCE_FW_RESET:
netdev_info(netdev, "Forcing a FW reset\n");
qlcnic_dev_request_reset(adapter, val->flag);
adapter->flags &= ~QLCNIC_FW_RESET_OWNER;
- return 0;
+ break;
+
case QLCNIC_SET_QUIESCENT:
case QLCNIC_RESET_QUIESCENT:
- state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ state = QLC_SHARED_REG_RD32(adapter, QLCNIC_CRB_DEV_STATE);
if (state == QLCNIC_DEV_FAILED || (state == QLCNIC_DEV_BADBAD))
netdev_info(netdev, "Device in FAILED state\n");
- return 0;
+ break;
+
default:
if (!fw_dump->tmpl_hdr) {
netdev_err(netdev, "FW dump not supported\n");
- return -ENOTSUPP;
+ ret = -EOPNOTSUPP;
+ break;
}
+
for (i = 0; i < ARRAY_SIZE(qlcnic_fw_dump_level); i++) {
if (val->flag == qlcnic_fw_dump_level[i]) {
- fw_dump->tmpl_hdr->drv_cap_mask =
- val->flag;
- netdev_info(netdev, "Driver mask changed to: 0x%x\n",
- fw_dump->tmpl_hdr->drv_cap_mask);
- return 0;
+ valid_mask = true;
+ break;
}
}
- netdev_info(netdev, "Invalid dump level: 0x%x\n", val->flag);
- return -EINVAL;
+
+ if (valid_mask) {
+ ret = qlcnic_set_dump_mask(adapter, val->flag);
+ } else {
+ netdev_info(netdev, "Invalid dump level: 0x%x\n",
+ val->flag);
+ ret = -EINVAL;
+ }
}
- return 0;
+ return ret;
}
const struct ethtool_ops qlcnic_ethtool_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 5b5d2edf125..f8adc7b01f1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,16 +317,20 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int done = 0, timeout = 0;
+ int timeout = 0;
+ int err = 0;
+ u32 done = 0;
while (!done) {
- done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
+ &err);
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
dev_err(&adapter->pdev->dev,
"Failed to acquire sem=%d lock; holdby=%d\n",
- sem, id_reg ? QLCRD32(adapter, id_reg) : -1);
+ sem,
+ id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
return -EIO;
}
msleep(1);
@@ -341,19 +345,22 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
void
qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
{
- QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+ int err = 0;
+
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)), &err);
}
int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
{
+ int err = 0;
u32 data;
if (qlcnic_82xx_check(adapter))
qlcnic_read_window_reg(addr, adapter->ahw->pci_base0, &data);
else {
- data = qlcnic_83xx_rd_reg_indirect(adapter, addr);
- if (data == -EIO)
- return -EIO;
+ data = QLCRD32(adapter, addr, &err);
+ if (err == -EIO)
+ return err;
}
return data;
}
@@ -380,7 +387,7 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return -EIO;
- tx_ring = adapter->tx_ring;
+ tx_ring = &adapter->tx_ring[0];
__netif_tx_lock_bh(tx_ring->txq);
producer = tx_ring->producer;
@@ -516,20 +523,18 @@ void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
if (netdev->flags & IFF_PROMISC) {
if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
mode = VPORT_MISS_MODE_ACCEPT_ALL;
- } else if (netdev->flags & IFF_ALLMULTI) {
- if (netdev_mc_count(netdev) > ahw->max_mc_count) {
- mode = VPORT_MISS_MODE_ACCEPT_MULTI;
- } else if (!netdev_mc_empty(netdev) &&
- !qlcnic_sriov_vf_check(adapter)) {
- netdev_for_each_mc_addr(ha, netdev)
- qlcnic_nic_add_mac(adapter, ha->addr,
- vlan);
- }
- if (mode != VPORT_MISS_MODE_ACCEPT_MULTI &&
- qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev, vlan);
+ } else if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > ahw->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else if (!netdev_mc_empty(netdev) &&
+ !qlcnic_sriov_vf_check(adapter)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_nic_add_mac(adapter, ha->addr, vlan);
}
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_vf_add_mc_list(netdev, vlan);
+
/* configure unicast MAC address, if there is not sufficient space
* to store all the unicast addresses then enable promiscuous mode
*/
@@ -735,6 +740,22 @@ int qlcnic_82xx_clear_lb_mode(struct qlcnic_adapter *adapter, u8 mode)
return 0;
}
+int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *adapter)
+{
+ u8 mac[ETH_ALEN];
+ int ret;
+
+ ret = qlcnic_get_mac_address(adapter, mac,
+ adapter->ahw->physical_port);
+ if (ret)
+ return ret;
+
+ memcpy(adapter->ahw->phys_port_id, mac, ETH_ALEN);
+ adapter->flags |= QLCNIC_HAS_PHYS_PORT_ID;
+
+ return 0;
+}
+
/*
* Send the interrupt coalescing parameter set by ethtool to the card.
*/
@@ -1161,7 +1182,8 @@ int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off,
return -EIO;
}
-int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off,
+ int *err)
{
unsigned long flags;
int rv;
@@ -1417,7 +1439,7 @@ int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
{
- int offset, board_type, magic;
+ int offset, board_type, magic, err = 0;
struct pci_dev *pdev = adapter->pdev;
offset = QLCNIC_FW_MAGIC_OFFSET;
@@ -1437,7 +1459,9 @@ int qlcnic_82xx_get_board_info(struct qlcnic_adapter *adapter)
adapter->ahw->board_type = board_type;
if (board_type == QLCNIC_BRDTYPE_P3P_4_GB_MM) {
- u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I, &err);
+ if (err == -EIO)
+ return err;
if ((gpio & 0x8000) == 0)
board_type = QLCNIC_BRDTYPE_P3P_10G_TP;
}
@@ -1477,10 +1501,13 @@ int
qlcnic_wol_supported(struct qlcnic_adapter *adapter)
{
u32 wol_cfg;
+ int err = 0;
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV, &err);
if (wol_cfg & (1UL << adapter->portnum)) {
- wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG, &err);
+ if (err == -EIO)
+ return err;
if (wol_cfg & (1 << adapter->portnum))
return 1;
}
@@ -1541,6 +1568,7 @@ void qlcnic_82xx_get_func_no(struct qlcnic_adapter *adapter)
void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
loff_t offset, size_t size)
{
+ int err = 0;
u32 data;
u64 qmdata;
@@ -1548,7 +1576,7 @@ void qlcnic_82xx_read_crb(struct qlcnic_adapter *adapter, char *buf,
qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
memcpy(buf, &qmdata, size);
} else {
- data = QLCRD32(adapter, offset);
+ data = QLCRD32(adapter, offset, &err);
memcpy(buf, &data, size);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 2c22504f57a..272c356cf9b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -85,8 +85,11 @@ enum qlcnic_regs {
#define QLCNIC_CMD_GET_TEMP_HDR 0x30
#define QLCNIC_CMD_BC_EVENT_SETUP 0x31
#define QLCNIC_CMD_CONFIG_VPORT 0x32
+#define QLCNIC_CMD_DCB_QUERY_CAP 0x34
+#define QLCNIC_CMD_DCB_QUERY_PARAM 0x35
#define QLCNIC_CMD_GET_MAC_STATS 0x37
#define QLCNIC_CMD_82XX_SET_DRV_VER 0x38
+#define QLCNIC_CMD_MQ_TX_CONFIG_INTR 0x39
#define QLCNIC_CMD_GET_LED_STATUS 0x3C
#define QLCNIC_CMD_CONFIGURE_RSS 0x41
#define QLCNIC_CMD_CONFIG_INTR_COAL 0x43
@@ -122,6 +125,7 @@ enum qlcnic_regs {
#define QLCNIC_MBX_COMP_EVENT 0x8100
#define QLCNIC_MBX_REQUEST_EVENT 0x8101
#define QLCNIC_MBX_TIME_EXTEND_EVENT 0x8102
+#define QLCNIC_MBX_DCBX_CONFIG_CHANGE_EVENT 0x8110
#define QLCNIC_MBX_SFP_INSERT_EVENT 0x8130
#define QLCNIC_MBX_SFP_REMOVE_EVENT 0x8131
@@ -149,12 +153,11 @@ struct ethtool_stats;
struct pci_device_id;
struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
-struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
int qlcnic_82xx_start_firmware(struct qlcnic_adapter *);
-int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong);
+int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
int qlcnic_82xx_config_hw_lro(struct qlcnic_adapter *adapter, int);
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
@@ -173,10 +176,12 @@ int qlcnic_82xx_set_lb_mode(struct qlcnic_adapter *, u8);
void qlcnic_82xx_write_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_read_crb(struct qlcnic_adapter *, char *, loff_t, size_t);
void qlcnic_82xx_dev_request_reset(struct qlcnic_adapter *, u32);
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8);
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *, u8, int);
irqreturn_t qlcnic_82xx_clear_legacy_intr(struct qlcnic_adapter *);
int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *);
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *, int);
+int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *, u8);
int qlcnic_82xx_fw_cmd_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_82xx_fw_cmd_create_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *tx_ring, int);
@@ -184,7 +189,7 @@ void qlcnic_82xx_fw_cmd_del_rx_ctx(struct qlcnic_adapter *);
void qlcnic_82xx_fw_cmd_del_tx_ctx(struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
int qlcnic_82xx_sre_macaddr_change(struct qlcnic_adapter *, u8 *, u16, u8);
-int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*);
+int qlcnic_82xx_get_mac_address(struct qlcnic_adapter *, u8*, u8);
int qlcnic_82xx_get_nic_info(struct qlcnic_adapter *, struct qlcnic_info *, u8);
int qlcnic_82xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
int qlcnic_82xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info*);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index d28336fc65a..66c26cf7a2b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -127,12 +127,12 @@ void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
}
}
-void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct qlcnic_cmd_buffer *cmd_buf;
struct qlcnic_skb_frag *buffrag;
int i, j;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
@@ -142,7 +142,7 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
buffrag->length, PCI_DMA_TODEVICE);
buffrag->dma = 0ULL;
}
- for (j = 0; j < cmd_buf->frag_count; j++) {
+ for (j = 1; j < cmd_buf->frag_count; j++) {
buffrag++;
if (buffrag->dma) {
pci_unmap_page(adapter->pdev, buffrag->dma,
@@ -241,7 +241,13 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
sds_ring->irq = adapter->msix_entries[ring].vector;
sds_ring->adapter = adapter;
sds_ring->num_desc = adapter->num_rxd;
-
+ if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ sds_ring->tx_ring = &adapter->tx_ring[ring];
+ else
+ sds_ring->tx_ring = &adapter->tx_ring[0];
+ }
for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
INIT_LIST_HEAD(&sds_ring->free_list[i]);
}
@@ -286,10 +292,11 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
{
long timeout = 0;
long done = 0;
+ int err = 0;
cond_resched();
while (done == 0) {
- done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS, &err);
done &= 2;
if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
dev_err(&adapter->pdev->dev,
@@ -304,6 +311,8 @@ static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
static int do_rom_fast_read(struct qlcnic_adapter *adapter,
u32 addr, u32 *valp)
{
+ int err = 0;
+
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
@@ -317,7 +326,9 @@ static int do_rom_fast_read(struct qlcnic_adapter *adapter,
udelay(10);
QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
- *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA, &err);
+ if (err == -EIO)
+ return err;
return 0;
}
@@ -369,11 +380,11 @@ int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, u32 addr, u32 *valp)
int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
{
- int addr, val;
+ int addr, err = 0;
int i, n, init_delay;
struct crb_addr_pair *buf;
unsigned offset;
- u32 off;
+ u32 off, val;
struct pci_dev *pdev = adapter->pdev;
QLC_SHARED_REG_WR32(adapter, QLCNIC_CMDPEG_STATE, 0);
@@ -402,7 +413,9 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
QLCWR32(adapter, QLCNIC_CRB_NIU + 0xb0000, 0x00);
/* halt sre */
- val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000);
+ val = QLCRD32(adapter, QLCNIC_CRB_SRE + 0x1000, &err);
+ if (err == -EIO)
+ return err;
QLCWR32(adapter, QLCNIC_CRB_SRE + 0x1000, val & (~(0x1)));
/* halt epg */
@@ -719,10 +732,12 @@ qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
static int
qlcnic_has_mn(struct qlcnic_adapter *adapter)
{
- u32 capability;
- capability = 0;
+ u32 capability = 0;
+ int err = 0;
- capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY, &err);
+ if (err == -EIO)
+ return err;
if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
return 1;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 2fe15c591b3..11b4bb83b93 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -127,6 +127,23 @@
struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *,
struct qlcnic_host_rds_ring *, u16, u16);
+inline void qlcnic_enable_tx_intr(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(0x0, tx_ring->crb_intr_mask);
+}
+
+
+static inline void qlcnic_disable_tx_int(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test)
+ writel(1, tx_ring->crb_intr_mask);
+}
+
inline void qlcnic_83xx_enable_tx_intr(struct qlcnic_adapter *adapter,
struct qlcnic_host_tx_ring *tx_ring)
{
@@ -147,10 +164,7 @@ static inline u8 qlcnic_mac_hash(u64 mac)
static inline u32 qlcnic_get_ref_handle(struct qlcnic_adapter *adapter,
u16 handle, u8 ring_id)
{
- unsigned short device = adapter->pdev->device;
-
- if ((device == PCI_DEVICE_ID_QLOGIC_QLE834X) ||
- (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X))
+ if (qlcnic_83xx_check(adapter))
return handle | (ring_id << 15);
else
return handle;
@@ -161,36 +175,68 @@ static inline int qlcnic_82xx_is_lb_pkt(u64 sts_data)
return (qlcnic_get_sts_status(sts_data) == STATUS_CKSUM_LOOP) ? 1 : 0;
}
+static void qlcnic_delete_rx_list_mac(struct qlcnic_adapter *adapter,
+ struct qlcnic_filter *fil,
+ void *addr, u16 vlan_id)
+{
+ int ret;
+ u8 op;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (ret)
+ return;
+
+ op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
+ ret = qlcnic_sre_macaddr_change(adapter, addr, vlan_id, op);
+ if (!ret) {
+ hlist_del(&fil->fnode);
+ adapter->rx_fhash.fnum--;
+ }
+}
+
+static struct qlcnic_filter *qlcnic_find_mac_filter(struct hlist_head *head,
+ void *addr, u16 vlan_id)
+{
+ struct qlcnic_filter *tmp_fil = NULL;
+ struct hlist_node *n;
+
+ hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
+ if (!memcmp(tmp_fil->faddr, addr, ETH_ALEN) &&
+ tmp_fil->vlan_id == vlan_id)
+ return tmp_fil;
+ }
+
+ return NULL;
+}
+
void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
int loopback_pkt, u16 vlan_id)
{
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
struct qlcnic_filter *fil, *tmp_fil;
- struct hlist_node *n;
struct hlist_head *head;
unsigned long time;
u64 src_addr = 0;
- u8 hindex, found = 0, op;
+ u8 hindex, op;
int ret;
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
+ hindex = qlcnic_mac_hash(src_addr) &
+ (adapter->fhash.fbucket_size - 1);
if (loopback_pkt) {
if (adapter->rx_fhash.fnum >= adapter->rx_fhash.fmax)
return;
- hindex = qlcnic_mac_hash(src_addr) &
- (adapter->fhash.fbucket_size - 1);
head = &(adapter->rx_fhash.fhead[hindex]);
- hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_READD_AGE * HZ + time))
- tmp_fil->ftime = jiffies;
- return;
- }
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
+ time = tmp_fil->ftime;
+ if (time_after(jiffies, QLCNIC_READD_AGE * HZ + time))
+ tmp_fil->ftime = jiffies;
+ return;
}
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
@@ -205,36 +251,37 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *adapter, struct sk_buff *skb,
adapter->rx_fhash.fnum++;
spin_unlock(&adapter->rx_mac_learn_lock);
} else {
- hindex = qlcnic_mac_hash(src_addr) &
- (adapter->fhash.fbucket_size - 1);
- head = &(adapter->rx_fhash.fhead[hindex]);
- spin_lock(&adapter->rx_mac_learn_lock);
- hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
- if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
- tmp_fil->vlan_id == vlan_id) {
- found = 1;
- break;
- }
- }
+ head = &adapter->fhash.fhead[hindex];
- if (!found) {
- spin_unlock(&adapter->rx_mac_learn_lock);
- return;
- }
+ spin_lock(&adapter->mac_learn_lock);
- op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- ret = qlcnic_sre_macaddr_change(adapter, (u8 *)&src_addr,
- vlan_id, op);
- if (!ret) {
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil) {
op = vlan_id ? QLCNIC_MAC_VLAN_DEL : QLCNIC_MAC_DEL;
ret = qlcnic_sre_macaddr_change(adapter,
(u8 *)&src_addr,
vlan_id, op);
if (!ret) {
- hlist_del(&(tmp_fil->fnode));
- adapter->rx_fhash.fnum--;
+ hlist_del(&tmp_fil->fnode);
+ adapter->fhash.fnum--;
}
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ return;
}
+
+ spin_unlock(&adapter->mac_learn_lock);
+
+ head = &adapter->rx_fhash.fhead[hindex];
+
+ spin_lock(&adapter->rx_mac_learn_lock);
+
+ tmp_fil = qlcnic_find_mac_filter(head, &src_addr, vlan_id);
+ if (tmp_fil)
+ qlcnic_delete_rx_list_mac(adapter, tmp_fil, &src_addr,
+ vlan_id);
+
spin_unlock(&adapter->rx_mac_learn_lock);
}
}
@@ -262,7 +309,7 @@ void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
- memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
+ memcpy(mac_req->mac_addr, uaddr, ETH_ALEN);
vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
vlan_req->vlan_id = cpu_to_le16(vlan_id);
@@ -324,14 +371,14 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
- struct cmd_desc_type0 *first_desc, struct sk_buff *skb)
+ struct cmd_desc_type0 *first_desc, struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 l4proto, opcode = 0, hdr_len = 0;
u16 flags = 0, vlan_tci = 0;
int copied, offset, copy_len, size;
struct cmd_desc_type0 *hwdesc;
struct vlan_ethhdr *vh;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u16 protocol = ntohs(skb->protocol);
u32 producer = tx_ring->producer;
@@ -514,7 +561,7 @@ static inline void qlcnic_clear_cmddesc(u64 *desc)
netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_cmd_buffer *pbuf;
struct qlcnic_skb_frag *buffrag;
struct cmd_desc_type0 *hwdesc, *first_desc;
@@ -523,10 +570,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int i, k, frag_count, delta = 0;
u32 producer, num_txd;
- num_txd = tx_ring->num_desc;
-
if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
return NETDEV_TX_BUSY;
}
@@ -536,7 +581,14 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
goto drop_packet;
}
+ if (qlcnic_check_multi_tx(adapter))
+ tx_ring = &adapter->tx_ring[skb_get_queue_mapping(skb)];
+ else
+ tx_ring = &adapter->tx_ring[0];
+ num_txd = tx_ring->num_desc;
+
frag_count = skb_shinfo(skb)->nr_frags + 1;
+
/* 14 frags supported for normal packet and
* 32 frags supported for TSO packet
*/
@@ -551,11 +603,12 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
- netif_stop_queue(netdev);
+ netif_tx_stop_queue(tx_ring->txq);
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_start_queue(netdev);
+ netif_tx_start_queue(tx_ring->txq);
} else {
adapter->stats.xmit_off++;
+ tx_ring->xmit_off++;
return NETDEV_TX_BUSY;
}
}
@@ -610,7 +663,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->producer = get_next_index(producer, num_txd);
smp_mb();
- if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb)))
+ if (unlikely(qlcnic_tx_pkt(adapter, first_desc, skb, tx_ring)))
goto unwind_buff;
if (adapter->drv_mac_learn)
@@ -618,6 +671,7 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
adapter->stats.txbytes += skb->len;
adapter->stats.xmitcalled++;
+ tx_ring->xmit_called++;
qlcnic_update_cmd_producer(tx_ring);
@@ -640,7 +694,7 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
adapter->ahw->linkup = 0;
if (netif_running(netdev)) {
netif_carrier_off(netdev);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
}
} else if (!adapter->ahw->linkup && linkup) {
netdev_info(netdev, "NIC Link is up\n");
@@ -735,9 +789,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
- return 1;
-
sw_consumer = tx_ring->sw_consumer;
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
@@ -755,6 +806,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
frag->dma = 0ULL;
}
adapter->stats.xmitfinished++;
+ tx_ring->xmit_finished++;
dev_kfree_skb_any(buffer->skb);
buffer->skb = NULL;
}
@@ -767,10 +819,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
if (count && netif_running(netdev)) {
tx_ring->sw_consumer = sw_consumer;
smp_mb();
- if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ if (netif_tx_queue_stopped(tx_ring->txq) &&
+ netif_carrier_ok(netdev)) {
if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
- netif_wake_queue(netdev);
+ netif_tx_wake_queue(tx_ring->txq);
adapter->stats.xmit_on++;
+ tx_ring->xmit_on++;
}
}
adapter->tx_timeo_cnt = 0;
@@ -790,7 +844,6 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
return done;
}
@@ -800,16 +853,40 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
int tx_complete, work_done;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_adapter *adapter;
+ struct qlcnic_host_tx_ring *tx_ring;
sds_ring = container_of(napi, struct qlcnic_host_sds_ring, napi);
adapter = sds_ring->adapter;
- tx_complete = qlcnic_process_cmd_ring(adapter, adapter->tx_ring,
+ tx_ring = sds_ring->tx_ring;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter, tx_ring,
budget);
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if ((work_done < budget) && tx_complete) {
napi_complete(&sds_ring->napi);
- if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_int(sds_ring);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
+
+ return work_done;
+}
+
+static int qlcnic_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_adapter *adapter;
+ int work_done;
+
+ tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
+ adapter = tx_ring->adapter;
+
+ work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
+ if (work_done) {
+ napi_complete(&tx_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_tx_intr(adapter, tx_ring);
}
return work_done;
@@ -919,20 +996,23 @@ static void qlcnic_handle_fw_message(int desc_cnt, int index,
break;
case 1:
dev_info(dev, "loopback already in progress\n");
- adapter->ahw->diag_cnt = -QLCNIC_TEST_IN_PROGRESS;
+ adapter->ahw->diag_cnt = -EINPROGRESS;
break;
case 2:
dev_info(dev, "loopback cable is not connected\n");
- adapter->ahw->diag_cnt = -QLCNIC_LB_CABLE_NOT_CONN;
+ adapter->ahw->diag_cnt = -ENODEV;
break;
default:
dev_info(dev,
"loopback configure request failed, err %x\n",
ret);
- adapter->ahw->diag_cnt = -QLCNIC_UNDEFINED_ERROR;
+ adapter->ahw->diag_cnt = -EIO;
break;
}
break;
+ case QLCNIC_C2H_OPCODE_GET_DCB_AEN:
+ qlcnic_dcb_handle_aen(adapter, (void *)&msg);
+ break;
default:
break;
}
@@ -1378,23 +1458,31 @@ void qlcnic_82xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
-
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
- if (ring == adapter->max_sds_rings - 1)
- netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
- else
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
netif_napi_add(netdev, &sds_ring->napi, qlcnic_rx_poll,
- QLCNIC_NETDEV_WEIGHT*2);
+ NAPI_POLL_WEIGHT);
+ } else {
+ if (ring == (adapter->max_sds_rings - 1))
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll,
+ NAPI_POLL_WEIGHT);
+ else
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_rx_poll,
+ NAPI_POLL_WEIGHT);
+ }
}
if (qlcnic_alloc_tx_rings(adapter, netdev)) {
@@ -1402,6 +1490,14 @@ int qlcnic_82xx_napi_add(struct qlcnic_adapter *adapter,
return -ENOMEM;
}
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_add(netdev, &tx_ring->napi, qlcnic_tx_poll,
+ NAPI_POLL_WEIGHT);
+ }
+ }
+
return 0;
}
@@ -1410,6 +1506,7 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
@@ -1417,6 +1514,14 @@ void qlcnic_82xx_napi_del(struct qlcnic_adapter *adapter)
}
qlcnic_free_sds_rings(adapter->recv_ctx);
+
+ if (qlcnic_check_multi_tx(adapter) && !adapter->ahw->diag_test) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ netif_napi_del(&tx_ring->napi);
+ }
+ }
+
qlcnic_free_tx_rings(adapter);
}
@@ -1424,6 +1529,7 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1434,12 +1540,24 @@ void qlcnic_82xx_napi_enable(struct qlcnic_adapter *adapter)
napi_enable(&sds_ring->napi);
qlcnic_enable_int(sds_ring);
}
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ napi_enable(&tx_ring->napi);
+ qlcnic_enable_tx_intr(adapter, tx_ring);
+ }
+ }
}
void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
{
int ring;
struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
@@ -1451,6 +1569,17 @@ void qlcnic_82xx_napi_disable(struct qlcnic_adapter *adapter)
napi_synchronize(&sds_ring->napi);
napi_disable(&sds_ring->napi);
}
+
+ if ((adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !adapter->ahw->diag_test &&
+ qlcnic_check_multi_tx(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ qlcnic_disable_tx_int(adapter, tx_ring);
+ napi_synchronize(&tx_ring->napi);
+ napi_disable(&tx_ring->napi);
+ }
+ }
}
#define QLC_83XX_NORMAL_LB_PKT (1ULL << 36)
@@ -1831,7 +1960,7 @@ void qlcnic_83xx_napi_disable(struct qlcnic_adapter *adapter)
int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
struct net_device *netdev)
{
- int ring, max_sds_rings, temp;
+ int ring;
struct qlcnic_host_sds_ring *sds_ring;
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
@@ -1839,25 +1968,22 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
return -ENOMEM;
- max_sds_rings = adapter->max_sds_rings;
for (ring = 0; ring < adapter->max_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (adapter->flags & QLCNIC_MSIX_ENABLED) {
- if (!(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if (!(adapter->flags & QLCNIC_TX_INTR_SHARED))
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_rx_poll,
- QLCNIC_NETDEV_WEIGHT * 2);
- } else {
- temp = QLCNIC_NETDEV_WEIGHT / max_sds_rings;
+ NAPI_POLL_WEIGHT);
+ else
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_msix_sriov_vf_poll,
- temp);
- }
+ NAPI_POLL_WEIGHT);
} else {
netif_napi_add(netdev, &sds_ring->napi,
qlcnic_83xx_poll,
- QLCNIC_NETDEV_WEIGHT / max_sds_rings);
+ NAPI_POLL_WEIGHT);
}
}
@@ -1872,7 +1998,7 @@ int qlcnic_83xx_napi_add(struct qlcnic_adapter *adapter,
tx_ring = &adapter->tx_ring[ring];
netif_napi_add(netdev, &tx_ring->napi,
qlcnic_83xx_msix_tx_poll,
- QLCNIC_NETDEV_WEIGHT);
+ NAPI_POLL_WEIGHT);
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 4528f8ec333..c4c5023e1fd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -100,6 +100,8 @@ static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
ENTRY(PCI_DEVICE_ID_QLOGIC_QLE834X),
ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE834X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE844X),
+ ENTRY(PCI_DEVICE_ID_QLOGIC_VF_QLE844X),
{0,}
};
@@ -146,6 +148,11 @@ static const u32 qlcnic_reg_tbl[] = {
static const struct qlcnic_board_info qlcnic_boards[] = {
{ PCI_VENDOR_ID_QLOGIC,
+ PCI_DEVICE_ID_QLOGIC_QLE844X,
+ 0x0,
+ 0x0,
+ "8400 series 10GbE Converged Network Adapter (TCP/IP Networking)" },
+ { PCI_VENDOR_ID_QLOGIC,
PCI_DEVICE_ID_QLOGIC_QLE834X,
PCI_VENDOR_ID_QLOGIC,
0x24e,
@@ -254,7 +261,6 @@ static const struct qlcnic_board_info qlcnic_boards[] = {
};
#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
-#define QLC_MAX_SDS_RINGS 8
static const
struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
@@ -278,12 +284,15 @@ void qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
{
- u8 mac_addr[ETH_ALEN];
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 mac_addr[ETH_ALEN];
+ int ret;
- if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
- return -EIO;
+ ret = qlcnic_get_mac_address(adapter, mac_addr,
+ adapter->ahw->pci_func);
+ if (ret)
+ return ret;
memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -425,6 +434,21 @@ static void qlcnic_82xx_cancel_idc_work(struct qlcnic_adapter *adapter)
cancel_delayed_work_sync(&adapter->fw_work);
}
+static int qlcnic_get_phys_port_id(struct net_device *netdev,
+ struct netdev_phys_port_id *ppid)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+
+ if (!(adapter->flags & QLCNIC_HAS_PHYS_PORT_ID))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = sizeof(ahw->phys_port_id);
+ memcpy(ppid->id, ahw->phys_port_id, ppid->id_len);
+
+ return 0;
+}
+
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
.ndo_stop = qlcnic_close,
@@ -442,6 +466,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_add = qlcnic_fdb_add,
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
+ .ndo_get_phys_port_id = qlcnic_get_phys_port_id,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -514,13 +539,36 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.get_board_info = qlcnic_82xx_get_board_info,
.set_mac_filter_count = qlcnic_82xx_set_mac_filter_count,
.free_mac_list = qlcnic_82xx_free_mac_list,
+ .read_phys_port_id = qlcnic_82xx_read_phys_port_id,
+ .io_error_detected = qlcnic_82xx_io_error_detected,
+ .io_slot_reset = qlcnic_82xx_io_slot_reset,
+ .io_resume = qlcnic_82xx_io_resume,
};
+static void qlcnic_get_multiq_capability(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int num_tx_q;
+
+ if (ahw->msix_supported &&
+ (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_MULTI_TX)) {
+ num_tx_q = min_t(int, QLCNIC_DEF_NUM_TX_RINGS,
+ num_online_cpus());
+ if (num_tx_q > 1) {
+ test_and_set_bit(__QLCNIC_MULTI_TX_UNIQUE,
+ &adapter->state);
+ adapter->max_drv_tx_rings = num_tx_q;
+ }
+ } else {
+ adapter->max_drv_tx_rings = 1;
+ }
+}
+
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
+ int max_tx_rings, max_sds_rings, tx_vector;
int err = -1, i;
- int max_tx_rings, tx_vector;
if (adapter->flags & QLCNIC_TX_INTR_SHARED) {
max_tx_rings = 0;
@@ -554,7 +602,15 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
adapter->max_sds_rings = num_msix -
max_tx_rings - 1;
} else {
- adapter->max_sds_rings = num_msix;
+ adapter->ahw->num_msix = num_msix;
+ if (qlcnic_check_multi_tx(adapter) &&
+ !adapter->ahw->diag_test &&
+ (adapter->max_drv_tx_rings > 1))
+ max_sds_rings = num_msix - max_tx_rings;
+ else
+ max_sds_rings = num_msix;
+
+ adapter->max_sds_rings = max_sds_rings;
}
dev_info(&pdev->dev, "using msi-x interrupts\n");
return err;
@@ -570,6 +626,8 @@ int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
num_msix += (max_tx_rings + 1);
} else {
num_msix = rounddown_pow_of_two(err);
+ if (qlcnic_check_multi_tx(adapter))
+ num_msix += max_tx_rings;
}
if (num_msix) {
@@ -605,6 +663,7 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
adapter->msix_entries[0].vector = pdev->irq;
return err;
}
+
if (qlcnic_use_msi || qlcnic_use_msi_x)
return -EOPNOTSUPP;
@@ -621,28 +680,69 @@ static int qlcnic_enable_msi_legacy(struct qlcnic_adapter *adapter)
return err;
}
-int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr)
+int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter, u8 num_intr, int txq)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int num_msix, err = 0;
if (!num_intr)
num_intr = QLCNIC_DEF_NUM_STS_DESC_RINGS;
- if (adapter->ahw->msix_supported)
+ if (ahw->msix_supported) {
num_msix = rounddown_pow_of_two(min_t(int, num_online_cpus(),
num_intr));
- else
+ if (qlcnic_check_multi_tx(adapter)) {
+ if (txq)
+ adapter->max_drv_tx_rings = txq;
+ num_msix += adapter->max_drv_tx_rings;
+ }
+ } else {
num_msix = 1;
+ }
err = qlcnic_enable_msix(adapter, num_msix);
- if (err == -ENOMEM || !err)
+ if (err == -ENOMEM)
return err;
- err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ qlcnic_disable_multi_tx(adapter);
+
+ err = qlcnic_enable_msi_legacy(adapter);
+ if (!err)
+ return err;
+ }
+
+ return 0;
+}
+
+int qlcnic_82xx_mq_intrpt(struct qlcnic_adapter *adapter, int op_type)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ int err, i;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ !ahw->diag_test &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ ahw->intr_tbl = vzalloc(ahw->num_msix *
+ sizeof(struct qlcnic_intrpt_config));
+ if (!ahw->intr_tbl)
+ return -ENOMEM;
+
+ for (i = 0; i < ahw->num_msix; i++) {
+ ahw->intr_tbl[i].type = QLCNIC_INTRPT_MSIX;
+ ahw->intr_tbl[i].id = i;
+ ahw->intr_tbl[i].src = 0;
+ }
+
+ err = qlcnic_82xx_config_intrpt(adapter, 1);
+ if (err)
+ dev_err(&adapter->pdev->dev,
+ "Failed to configure Interrupt for %d vector\n",
+ ahw->num_msix);
return err;
+ }
- return -EIO;
+ return 0;
}
void qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
@@ -696,6 +796,23 @@ static int qlcnic_get_act_pci_func(struct qlcnic_adapter *adapter)
return ret;
}
+static bool qlcnic_port_eswitch_cfg_capability(struct qlcnic_adapter *adapter)
+{
+ bool ret = false;
+
+ if (qlcnic_84xx_check(adapter)) {
+ ret = true;
+ } else if (qlcnic_83xx_check(adapter)) {
+ if (adapter->ahw->extra_capability[0] &
+ QLCNIC_FW_CAPABILITY_2_PER_PORT_ESWITCH_CFG)
+ ret = true;
+ else
+ ret = false;
+ }
+
+ return ret;
+}
+
int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
{
struct qlcnic_pci_info *pci_info;
@@ -739,18 +856,30 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
(pci_info[i].type != QLCNIC_TYPE_NIC))
continue;
+ if (qlcnic_port_eswitch_cfg_capability(adapter)) {
+ if (!qlcnic_83xx_enable_port_eswitch(adapter, pfn))
+ adapter->npars[j].eswitch_status = true;
+ else
+ continue;
+ } else {
+ adapter->npars[j].eswitch_status = true;
+ }
+
adapter->npars[j].pci_func = pfn;
adapter->npars[j].active = (u8)pci_info[i].active;
adapter->npars[j].type = (u8)pci_info[i].type;
adapter->npars[j].phy_port = (u8)pci_info[i].default_port;
adapter->npars[j].min_bw = pci_info[i].tx_min_bw;
adapter->npars[j].max_bw = pci_info[i].tx_max_bw;
+
j++;
}
- for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++) {
- adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
- if (qlcnic_83xx_check(adapter))
+ if (qlcnic_82xx_check(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
+ adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
+ } else if (!qlcnic_port_eswitch_cfg_capability(adapter)) {
+ for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
qlcnic_enable_eswitch(adapter, i, 1);
}
@@ -829,7 +958,9 @@ static void qlcnic_get_bar_length(u32 dev_id, ulong *bar)
*bar = QLCNIC_82XX_BAR0_LENGTH;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
*bar = QLCNIC_83XX_BAR0_LENGTH;
break;
default:
@@ -870,8 +1001,8 @@ static int qlcnic_setup_pci_map(struct pci_dev *pdev,
return 0;
}
-static inline bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
- int index)
+static bool qlcnic_validate_subsystem_id(struct qlcnic_adapter *adapter,
+ int index)
{
struct pci_dev *pdev = adapter->pdev;
unsigned short subsystem_vendor;
@@ -977,8 +1108,8 @@ qlcnic_check_options(struct qlcnic_adapter *adapter)
static int
qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
{
- int err;
struct qlcnic_info nic_info;
+ int err = 0;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw->pci_func);
@@ -993,7 +1124,9 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
if (adapter->ahw->capabilities & QLCNIC_FW_CAPABILITY_MORE_CAPS) {
u32 temp;
- temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2);
+ temp = QLCRD32(adapter, CRB_FW_CAPABILITIES_2, &err);
+ if (err == -EIO)
+ return err;
adapter->ahw->extra_capability[0] = temp;
}
adapter->ahw->max_mac_filters = nic_info.max_mac_filters;
@@ -1171,6 +1304,9 @@ int qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
return 0;
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
esw_cfg.pci_func = adapter->npars[i].pci_func;
esw_cfg.mac_override = BIT_0;
@@ -1233,6 +1369,9 @@ int qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
for (i = 0; i < adapter->ahw->act_pci_func; i++) {
npar = &adapter->npars[i];
pci_func = npar->pci_func;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
memset(&nic_info, 0, sizeof(struct qlcnic_info));
err = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
if (err)
@@ -1383,6 +1522,8 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
if (qlcnic_82xx_check(adapter))
handler = qlcnic_tmp_intr;
+ else
+ handler = qlcnic_83xx_tmp_intr;
if (!QLCNIC_IS_MSI_FAMILY(adapter))
flags |= IRQF_SHARED;
@@ -1409,6 +1550,7 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
for (ring = 0; ring < num_sds_rings; ring++) {
sds_ring = &recv_ctx->sds_rings[ring];
if (qlcnic_82xx_check(adapter) &&
+ !qlcnic_check_multi_tx(adapter) &&
(ring == (num_sds_rings - 1))) {
if (!(adapter->flags &
QLCNIC_MSIX_ENABLED))
@@ -1432,9 +1574,11 @@ qlcnic_request_irq(struct qlcnic_adapter *adapter)
return err;
}
}
- if (qlcnic_83xx_check(adapter) &&
- (adapter->flags & QLCNIC_MSIX_ENABLED) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter)) ||
+ (qlcnic_83xx_check(adapter) &&
+ (adapter->flags & QLCNIC_MSIX_ENABLED) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED))) {
handler = qlcnic_msix_tx_intr;
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
@@ -1469,8 +1613,10 @@ qlcnic_free_irq(struct qlcnic_adapter *adapter)
free_irq(sds_ring->irq, sds_ring);
}
}
- if (qlcnic_83xx_check(adapter) &&
- !(adapter->flags & QLCNIC_TX_INTR_SHARED)) {
+ if ((qlcnic_83xx_check(adapter) &&
+ !(adapter->flags & QLCNIC_TX_INTR_SHARED)) ||
+ (qlcnic_82xx_check(adapter) &&
+ qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings;
ring++) {
tx_ring = &adapter->tx_ring[ring];
@@ -1506,8 +1652,10 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
return 0;
+
if (qlcnic_set_eswitch_port_config(adapter))
return -EIO;
+
qlcnic_get_lro_mss_capability(adapter);
if (qlcnic_fw_create_ctx(adapter))
@@ -1531,12 +1679,12 @@ int __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (netdev->features & NETIF_F_LRO)
qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
qlcnic_napi_enable(adapter);
qlcnic_linkevent_request(adapter, 1);
adapter->ahw->reset_context = 0;
- set_bit(__QLCNIC_DEV_UP, &adapter->state);
return 0;
}
@@ -1554,6 +1702,8 @@ int qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
{
+ int ring;
+
if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
return;
@@ -1563,7 +1713,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1581,8 +1730,9 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
adapter->flags &= ~QLCNIC_FW_LRO_MSS_CAP;
qlcnic_reset_rx_buffers_list(adapter);
- qlcnic_release_tx_buffers(adapter);
- spin_unlock(&adapter->tx_clean_lock);
+
+ for (ring = 0; ring < adapter->max_drv_tx_rings; ring++)
+ qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
}
/* Usage: During suspend and firmware recovery module */
@@ -1662,6 +1812,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
+ int max_tx_rings = adapter->max_drv_tx_rings;
int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
@@ -1678,6 +1829,7 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
adapter->ahw->diag_test = 0;
adapter->max_sds_rings = max_sds_rings;
+ adapter->max_drv_tx_rings = max_tx_rings;
if (qlcnic_attach(adapter))
goto out;
@@ -1746,6 +1898,7 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
adapter->max_sds_rings = 1;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
+ adapter->max_drv_tx_rings = 1;
ret = qlcnic_attach(adapter);
if (ret) {
@@ -1903,12 +2056,18 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->irq = adapter->msix_entries[0].vector;
+ err = qlcnic_set_real_num_queues(adapter, netdev);
+ if (err)
+ return err;
+
err = register_netdev(netdev);
if (err) {
dev_err(&pdev->dev, "failed to register net device\n");
return err;
}
+ qlcnic_dcb_init_dcbnl_ops(adapter);
+
return 0;
}
@@ -1971,7 +2130,8 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
tx_ring->cmd_buf_arr = cmd_buf_arr;
}
- if (qlcnic_83xx_check(adapter)) {
+ if (qlcnic_83xx_check(adapter) ||
+ (qlcnic_82xx_check(adapter) && qlcnic_check_multi_tx(adapter))) {
for (ring = 0; ring < adapter->max_drv_tx_rings; ring++) {
tx_ring = &adapter->tx_ring[ring];
tx_ring->adapter = adapter;
@@ -1982,6 +2142,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
}
}
+
return 0;
}
@@ -2000,6 +2161,17 @@ void qlcnic_set_drv_version(struct qlcnic_adapter *adapter)
qlcnic_fw_cmd_set_drv_version(adapter, fw_cmd);
}
+static int qlcnic_register_dcb(struct qlcnic_adapter *adapter)
+{
+ return __qlcnic_register_dcb(adapter);
+}
+
+void qlcnic_clear_dcb_ops(struct qlcnic_adapter *adapter)
+{
+ kfree(adapter->dcb);
+ adapter->dcb = NULL;
+}
+
static int
qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2044,9 +2216,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ahw->reg_tbl = (u32 *) qlcnic_reg_tbl;
break;
case PCI_DEVICE_ID_QLOGIC_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_QLE844X:
qlcnic_83xx_register_map(ahw);
break;
case PCI_DEVICE_ID_QLOGIC_VF_QLE834X:
+ case PCI_DEVICE_ID_QLOGIC_VF_QLE844X:
qlcnic_sriov_vf_register_map(ahw);
break;
default:
@@ -2057,7 +2231,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_free_hw_res;
- netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ netdev = alloc_etherdev_mq(sizeof(struct qlcnic_adapter),
+ QLCNIC_MAX_TX_RINGS);
if (!netdev) {
err = -ENOMEM;
goto err_out_iounmap;
@@ -2087,14 +2262,14 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->fdb_mac_learn = true;
else if (qlcnic_mac_learn == DRV_MAC_LEARN)
adapter->drv_mac_learn = true;
- adapter->max_drv_tx_rings = 1;
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
+ qlcnic_register_dcb(adapter);
+
if (qlcnic_82xx_check(adapter)) {
qlcnic_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
@@ -2104,12 +2279,31 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_hw;
}
+ qlcnic_get_multiq_capability(adapter);
+
+ if ((adapter->ahw->act_pci_func > 2) &&
+ qlcnic_check_multi_tx(adapter)) {
+ adapter->max_drv_tx_rings = QLCNIC_DEF_NUM_TX_RINGS;
+ dev_info(&adapter->pdev->dev,
+ "vNIC mode enabled, Set max TX rings = %d\n",
+ adapter->max_drv_tx_rings);
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ clear_bit(__QLCNIC_MULTI_TX_UNIQUE, &adapter->state);
+ adapter->max_drv_tx_rings = 1;
+ }
err = qlcnic_setup_idc_param(adapter);
if (err)
goto err_out_free_hw;
adapter->flags |= QLCNIC_NEED_FLR;
+
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
} else if (qlcnic_83xx_check(adapter)) {
+ adapter->max_drv_tx_rings = 1;
qlcnic_83xx_check_vf(adapter, ent);
adapter->portnum = adapter->ahw->pci_func;
err = qlcnic_83xx_init(adapter, pci_using_dac);
@@ -2128,6 +2322,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_read_mac_addr(adapter))
dev_warn(&pdev->dev, "failed to read mac addr\n");
+ qlcnic_read_phys_port_id(adapter);
+
if (adapter->portnum == 0) {
qlcnic_get_board_name(adapter, board_name);
@@ -2139,18 +2335,14 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (qlcnic_83xx_check(adapter) && !qlcnic_use_msi_x &&
!!qlcnic_use_msi)
dev_warn(&pdev->dev,
- "83xx adapter do not support MSI interrupts\n");
-
- err = qlcnic_setup_intr(adapter, 0);
- if (err) {
- dev_err(&pdev->dev, "Failed to setup interrupt\n");
- goto err_out_disable_msi;
- }
+ "Device does not support MSI interrupts\n");
- if (qlcnic_83xx_check(adapter)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err)
+ if (qlcnic_82xx_check(adapter)) {
+ err = qlcnic_setup_intr(adapter, 0, 0);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
+ }
}
err = qlcnic_get_act_pci_func(adapter);
@@ -2161,7 +2353,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_out_disable_mbx_intr;
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
pci_set_drvdata(pdev, adapter);
@@ -2233,13 +2426,18 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_cancel_idc_work(adapter);
ahw = adapter->ahw;
+ qlcnic_dcb_free(adapter);
+
unregister_netdev(netdev);
qlcnic_sriov_cleanup(adapter);
if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_register_nic_idc_func(adapter, 0);
cancel_delayed_work_sync(&adapter->idc_aen_work);
+ qlcnic_83xx_free_mbx_intr(adapter);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_free_mailbox(ahw->mailbox);
+ kfree(ahw->fw_info);
}
qlcnic_detach(adapter);
@@ -2273,6 +2471,7 @@ static void qlcnic_remove(struct pci_dev *pdev)
destroy_workqueue(adapter->qlcnic_wq);
adapter->qlcnic_wq = NULL;
}
+
qlcnic_free_adapter_resources(adapter);
kfree(ahw);
free_netdev(netdev);
@@ -2331,7 +2530,7 @@ static int qlcnic_open(struct net_device *netdev)
if (err)
goto err_out;
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
return 0;
@@ -2463,6 +2662,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter)
static void qlcnic_tx_timeout(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return;
@@ -2476,6 +2677,25 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
QLCNIC_FORCE_FW_DUMP_KEY);
} else {
netdev_info(netdev, "Tx timeout, reset adapter context.\n");
+ if (qlcnic_82xx_check(adapter)) {
+ for (ring = 0; ring < adapter->max_drv_tx_rings;
+ ring++) {
+ tx_ring = &adapter->tx_ring[ring];
+ dev_info(&netdev->dev, "ring=%d\n", ring);
+ dev_info(&netdev->dev, "crb_intr_mask=%d\n",
+ readl(tx_ring->crb_intr_mask));
+ dev_info(&netdev->dev, "producer=%d\n",
+ readl(tx_ring->crb_cmd_producer));
+ dev_info(&netdev->dev, "sw_consumer = %d\n",
+ tx_ring->sw_consumer);
+ dev_info(&netdev->dev, "hw_consumer = %d\n",
+ le32_to_cpu(*(tx_ring->hw_consumer)));
+ dev_info(&netdev->dev, "xmit-on=%llu\n",
+ tx_ring->xmit_on);
+ dev_info(&netdev->dev, "xmit-off=%llu\n",
+ tx_ring->xmit_off);
+ }
+ }
adapter->ahw->reset_context = 1;
}
}
@@ -2864,7 +3084,7 @@ skip_ack_check:
qlcnic_api_unlock(adapter);
rtnl_lock();
- if (adapter->ahw->fw_dump.enable &&
+ if (qlcnic_check_fw_dump_state(adapter) &&
(adapter->flags & QLCNIC_FW_RESET_OWNER)) {
QLCDB(adapter, DRV, "Take FW dump\n");
qlcnic_dump_fw(adapter);
@@ -3069,6 +3289,8 @@ qlcnic_attach_work(struct work_struct *work)
return;
}
attach:
+ qlcnic_dcb_get_info(adapter);
+
if (netif_running(netdev)) {
if (qlcnic_up(adapter, netdev))
goto done;
@@ -3081,7 +3303,8 @@ done:
adapter->fw_fail_cnt = 0;
adapter->flags &= ~QLCNIC_FW_HANG;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- qlcnic_set_drv_version(adapter);
+ if (adapter->portnum == 0)
+ qlcnic_set_drv_version(adapter);
if (!qlcnic_clr_drv_state(adapter))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
@@ -3093,6 +3316,7 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
{
u32 state = 0, heartbeat;
u32 peg_status;
+ int err = 0;
if (qlcnic_check_temp(adapter))
goto detach;
@@ -3139,11 +3363,11 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
"PEG_NET_4_PC: 0x%x\n",
peg_status,
QLC_SHARED_REG_RD32(adapter, QLCNIC_PEG_HALT_STATUS2),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c),
- QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c));
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x3c, &err),
+ QLCRD32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x3c, &err));
if (QLCNIC_FWERROR_CODE(peg_status) == 0x67)
dev_err(&adapter->pdev->dev,
"Firmware aborted with error code 0x00006700. "
@@ -3238,7 +3462,7 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
qlcnic_clr_drv_state(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = qlcnic_setup_intr(adapter, 0);
+ err = qlcnic_setup_intr(adapter, 0, 0);
if (err) {
kfree(adapter->msix_entries);
@@ -3246,19 +3470,6 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
- if (qlcnic_83xx_check(adapter)) {
- /* register for NIC IDC AEN Events */
- qlcnic_83xx_register_nic_idc_func(adapter, 1);
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "failed to setup mbx interrupt\n");
- qlcnic_clr_all_drv_state(adapter, 1);
- clear_bit(__QLCNIC_AER, &adapter->state);
- goto done;
- }
- }
-
if (netif_running(netdev)) {
err = qlcnic_attach(adapter);
if (err) {
@@ -3279,8 +3490,8 @@ static int qlcnic_attach_func(struct pci_dev *pdev)
return err;
}
-static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
{
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
@@ -3299,12 +3510,6 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
- if (qlcnic_83xx_check(adapter)) {
- qlcnic_83xx_free_mbx_intr(adapter);
- qlcnic_83xx_register_nic_idc_func(adapter, 0);
- cancel_delayed_work_sync(&adapter->idc_aen_work);
- }
-
qlcnic_detach(adapter);
qlcnic_teardown_intr(adapter);
@@ -3316,13 +3521,13 @@ static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
PCI_ERS_RESULT_RECOVERED;
}
-static void qlcnic_io_resume(struct pci_dev *pdev)
+void qlcnic_82xx_io_resume(struct pci_dev *pdev)
{
u32 state;
struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
@@ -3332,9 +3537,48 @@ static void qlcnic_io_resume(struct pci_dev *pdev)
if (state == QLCNIC_DEV_READY && test_and_clear_bit(__QLCNIC_AER,
&adapter->state))
qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
- FW_POLL_DELAY);
+ FW_POLL_DELAY);
}
+static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_error_detected) {
+ return hw_ops->io_error_detected(pdev, state);
+ } else {
+ dev_err(&pdev->dev, "AER error_detected handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_slot_reset) {
+ return hw_ops->io_slot_reset(pdev);
+ } else {
+ dev_err(&pdev->dev, "AER slot_reset handler not registered.\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+static void qlcnic_io_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct qlcnic_hardware_ops *hw_ops = adapter->ahw->hw_ops;
+
+ if (hw_ops->io_resume)
+ hw_ops->io_resume(pdev);
+ else
+ dev_err(&pdev->dev, "AER resume handler not registered.\n");
+}
+
+
static int
qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
{
@@ -3363,16 +3607,65 @@ qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
return err;
}
+int qlcnic_validate_max_tx_rings(struct qlcnic_adapter *adapter, u32 txq)
+{
+ struct net_device *netdev = adapter->netdev;
+ u8 max_hw = QLCNIC_MAX_TX_RINGS;
+ u32 max_allowed;
+
+ if (!qlcnic_82xx_check(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ netdev_err(netdev, "No Multi TX-Q support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (!qlcnic_check_multi_tx(adapter)) {
+ netdev_err(netdev, "No Multi TX-Q support\n");
+ return -EINVAL;
+ }
+
+ if (txq > QLCNIC_MAX_TX_RINGS) {
+ netdev_err(netdev, "Invalid ring count\n");
+ return -EINVAL;
+ }
+
+ max_allowed = rounddown_pow_of_two(min_t(int, max_hw,
+ num_online_cpus()));
+ if ((txq > max_allowed) || !is_power_of_2(txq)) {
+ if (!is_power_of_2(txq))
+ netdev_err(netdev,
+ "TX queue should be a power of 2\n");
+ if (txq > num_online_cpus())
+ netdev_err(netdev,
+ "Tx queue should not be higher than [%u], number of online CPUs in the system\n",
+ num_online_cpus());
+ netdev_err(netdev, "Unable to configure %u Tx rings\n", txq);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
- __u32 val)
+ __u32 val)
{
struct net_device *netdev = adapter->netdev;
u8 max_hw = adapter->ahw->max_rx_ques;
u32 max_allowed;
- if (val > QLC_MAX_SDS_RINGS) {
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
+ if (val > QLCNIC_MAX_SDS_RINGS) {
netdev_err(netdev, "RSS value should not be higher than %u\n",
- QLC_MAX_SDS_RINGS);
+ QLCNIC_MAX_SDS_RINGS);
return -EINVAL;
}
@@ -3402,27 +3695,48 @@ int qlcnic_validate_max_rss(struct qlcnic_adapter *adapter,
return 0;
}
-int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
+int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, int txq)
{
int err;
struct net_device *netdev = adapter->netdev;
+ int num_msix;
if (test_bit(__QLCNIC_RESETTING, &adapter->state))
return -EBUSY;
+ if (qlcnic_82xx_check(adapter) && !qlcnic_use_msi_x &&
+ !qlcnic_use_msi) {
+ netdev_err(netdev, "No RSS support in INT-x mode\n");
+ return -EINVAL;
+ }
+
netif_device_detach(netdev);
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
qlcnic_detach(adapter);
+ if (qlcnic_82xx_check(adapter)) {
+ if (txq != 0)
+ adapter->max_drv_tx_rings = txq;
+
+ if (qlcnic_check_multi_tx(adapter) &&
+ (txq > adapter->max_drv_tx_rings))
+ num_msix = adapter->max_drv_tx_rings;
+ else
+ num_msix = data;
+ }
+
if (qlcnic_83xx_check(adapter)) {
qlcnic_83xx_free_mbx_intr(adapter);
qlcnic_83xx_enable_mbx_poll(adapter);
}
+ netif_set_real_num_tx_queues(netdev, adapter->max_drv_tx_rings);
+
qlcnic_teardown_intr(adapter);
- err = qlcnic_setup_intr(adapter, data);
+
+ err = qlcnic_setup_intr(adapter, data, txq);
if (err) {
kfree(adapter->msix_entries);
netdev_err(netdev, "failed to setup interrupt\n");
@@ -3450,8 +3764,7 @@ int qlcnic_set_max_rss(struct qlcnic_adapter *adapter, u8 data, size_t len)
goto done;
qlcnic_restore_indev_addr(netdev, NETDEV_UP);
}
- err = len;
- done:
+done:
netif_device_attach(netdev);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index ab8a6744d40..15513608d48 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -1082,14 +1082,17 @@ flash_temp:
}
tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = QLCNIC_DUMP_MASK_DEF;
+ tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ dev_info(&adapter->pdev->dev,
+ "Default minidump capture mask 0x%x\n",
+ tmpl_hdr->cap_mask);
- if ((tmpl_hdr->version & 0xffffff) >= 0x20001)
+ if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
ahw->fw_dump.use_pex_dma = true;
else
ahw->fw_dump.use_pex_dma = false;
- ahw->fw_dump.enable = 1;
+ qlcnic_enable_fw_dump_state(adapter);
return 0;
}
@@ -1112,7 +1115,11 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ahw = adapter->ahw;
- if (!fw_dump->enable) {
+ /* Return if we don't have firmware dump template header */
+ if (!tmpl_hdr)
+ return -EIO;
+
+ if (!qlcnic_check_fw_dump_state(adapter)) {
dev_info(&adapter->pdev->dev, "Dump not enabled\n");
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 62380ce8990..652cc13c502 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -33,7 +33,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
struct qlcnic_cmd_args *);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
@@ -45,7 +45,7 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.get_mac_address = qlcnic_83xx_get_mac_address,
.setup_intr = qlcnic_83xx_setup_intr,
.alloc_mbx_args = qlcnic_83xx_alloc_mbx_args,
- .mbx_cmd = qlcnic_sriov_vf_mbx_op,
+ .mbx_cmd = qlcnic_sriov_issue_cmd,
.get_func_no = qlcnic_83xx_get_func_no,
.api_lock = qlcnic_83xx_cam_lock,
.api_unlock = qlcnic_83xx_cam_unlock,
@@ -286,96 +286,38 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
u32 *pay, u8 pci_func, u8 size)
{
- u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
struct qlcnic_hardware_context *ahw = adapter->ahw;
- unsigned long flags;
- u16 opcode;
- u8 mbx_err_code;
- int i, j;
-
- opcode = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
-
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
- dev_info(&adapter->pdev->dev,
- "Mailbox cmd attempted, 0x%x\n", opcode);
- dev_info(&adapter->pdev->dev, "Mailbox detached\n");
- return 0;
- }
-
- spin_lock_irqsave(&ahw->mbx_lock, flags);
-
- mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL);
- if (mbx_val) {
- QLCDB(adapter, DRV, "Mailbox cmd attempted, 0x%x\n", opcode);
- spin_unlock_irqrestore(&ahw->mbx_lock, flags);
- return QLCNIC_RCODE_TIMEOUT;
- }
- /* Fill in mailbox registers */
- val = size + (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- mbx_cmd = 0x31 | (val << 16) | (adapter->ahw->fw_hal_version << 29);
-
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 0));
- mbx_cmd = 0x1 | (1 << 4);
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
+ struct qlcnic_cmd_args cmd;
+ unsigned long timeout;
+ int err;
- if (qlcnic_sriov_pf_check(adapter))
- mbx_cmd |= (pci_func << 5);
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
+ cmd.hdr = hdr;
+ cmd.pay = pay;
+ cmd.pay_size = size;
+ cmd.func_num = pci_func;
+ cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
+ cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
- writel(mbx_cmd, QLCNIC_MBX_HOST(ahw, 1));
- for (i = 2, j = 0; j < (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
- i++, j++) {
- writel(*(hdr++), QLCNIC_MBX_HOST(ahw, i));
+ err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ return err;
}
- for (j = 0; j < size; j++, i++)
- writel(*(pay++), QLCNIC_MBX_HOST(ahw, i));
- /* Signal FW about the impending command */
- QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
-
- /* Waiting for the mailbox cmd to complete and while waiting here
- * some AEN might arrive. If more than 5 seconds expire we can
- * assume something is wrong.
- */
-poll:
- rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
- if (rsp != QLCNIC_RCODE_TIMEOUT) {
- /* Get the FW response data */
- fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
- if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
- __qlcnic_83xx_process_aen(adapter);
- goto poll;
- }
- mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
- rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
- opcode = QLCNIC_MBX_RSP(fw_data);
-
- switch (mbx_err_code) {
- case QLCNIC_MBX_RSP_OK:
- case QLCNIC_MBX_PORT_RSP_OK:
- rsp = QLCNIC_RCODE_SUCCESS;
- break;
- default:
- if (opcode == QLCNIC_CMD_CONFIG_MAC_VLAN) {
- rsp = qlcnic_83xx_mac_rcode(adapter);
- if (!rsp)
- goto out;
- }
- dev_err(&adapter->pdev->dev,
- "MBX command 0x%x failed with err:0x%x\n",
- opcode, mbx_err_code);
- rsp = mbx_err_code;
- break;
- }
- goto out;
+ if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
+ dev_err(&adapter->pdev->dev,
+ "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
+ __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
+ ahw->op_mode);
+ flush_workqueue(mbx->work_q);
}
- dev_err(&adapter->pdev->dev, "MBX command 0x%x timed out\n",
- QLCNIC_MBX_RSP(mbx_cmd));
- rsp = QLCNIC_RCODE_TIMEOUT;
-out:
- /* clear fw mbx control register */
- QLCWRX(ahw, QLCNIC_FW_MBX_CTRL, QLCNIC_CLR_OWNER);
- spin_unlock_irqrestore(&adapter->ahw->mbx_lock, flags);
- return rsp;
+ return cmd.rsp_opcode;
}
static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
@@ -458,7 +400,7 @@ int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- adapter->rx_pvid = (cmd->rsp.arg[1] >> 16) & 0xffff;
+ adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
return 0;
}
@@ -490,11 +432,12 @@ static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
return 0;
}
-static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
+static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
+ struct qlcnic_info *info)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
struct qlcnic_cmd_args cmd;
- int ret;
+ int ret = 0;
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
@@ -522,8 +465,8 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
{
- struct qlcnic_info nic_info;
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_info nic_info;
int err;
err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
@@ -534,7 +477,7 @@ static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
if (err)
return -EIO;
- err = qlcnic_sriov_get_vf_acl(adapter);
+ err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
if (err)
return err;
@@ -562,9 +505,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
- "83xx adapter do not support MSI interrupts\n");
+ "Device does not support MSI interrupts\n");
- err = qlcnic_setup_intr(adapter, 1);
+ err = qlcnic_setup_intr(adapter, 1, 0);
if (err) {
dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
goto err_out_disable_msi;
@@ -590,6 +533,9 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
if (err)
goto err_out_send_channel_term;
+ if (adapter->dcb && qlcnic_dcb_attach(adapter))
+ qlcnic_clear_dcb_ops(adapter);
+
err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
if (err)
goto err_out_send_channel_term;
@@ -597,6 +543,7 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
pci_set_drvdata(adapter->pdev, adapter);
dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
adapter->netdev->name);
+
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
adapter->ahw->idc.delay);
return 0;
@@ -637,8 +584,6 @@ int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
struct qlcnic_hardware_context *ahw = adapter->ahw;
int err;
- spin_lock_init(&ahw->mbx_lock);
- set_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
ahw->reset_context = 0;
@@ -762,6 +707,7 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
mbx->req.arg[0] = (type | (mbx->req.num << 16) |
(3 << 29));
+ mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
return 0;
}
}
@@ -813,6 +759,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.num = trans->req_pay_size / 4;
cmd->rsp.num = trans->rsp_pay_size / 4;
hdr = trans->rsp_hdr;
+ cmd->op_type = trans->req_hdr->op_type;
}
trans->trans_id = seq;
@@ -1083,6 +1030,7 @@ static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
if (test_bit(QLC_BC_VF_FLR, &vf->state))
return;
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
trans = list_first_entry(&vf->rcv_act.wait_list,
struct qlcnic_bc_trans, list);
adapter = vf->adapter;
@@ -1232,6 +1180,7 @@ static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
return;
}
+ memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
cmd_op = hdr->cmd_op;
if (qlcnic_sriov_alloc_bc_trans(&trans))
return;
@@ -1357,7 +1306,7 @@ int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
if (enable)
cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
- err = qlcnic_83xx_mbx_op(adapter, &cmd);
+ err = qlcnic_83xx_issue_cmd(adapter, &cmd);
if (err != QLCNIC_RCODE_SUCCESS) {
dev_err(&adapter->pdev->dev,
@@ -1389,10 +1338,11 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_bc_trans *trans;
int err;
@@ -1409,7 +1359,7 @@ static int qlcnic_sriov_vf_mbx_op(struct qlcnic_adapter *adapter,
goto cleanup_transaction;
retry:
- if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
+ if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
rsp = -EIO;
QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
@@ -1452,7 +1402,7 @@ err_out:
if (rsp == QLCNIC_RCODE_TIMEOUT) {
ahw->reset_context = 1;
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &ahw->idc.status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
}
cleanup_transaction:
@@ -1611,8 +1561,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
{
int err;
- set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
@@ -1626,6 +1576,8 @@ static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
if (err)
goto err_out_term_channel;
+ qlcnic_dcb_get_info(adapter);
+
return 0;
err_out_term_channel:
@@ -1655,8 +1607,10 @@ static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
struct net_device *netdev = adapter->netdev;
u8 i, max_ints = ahw->num_msix - 1;
- qlcnic_83xx_disable_mbx_intr(adapter);
netif_device_detach(netdev);
+ qlcnic_83xx_detach_mailbox_work(adapter);
+ qlcnic_83xx_disable_mbx_intr(adapter);
+
if (netif_running(netdev))
qlcnic_down(adapter, netdev);
@@ -1700,6 +1654,7 @@ static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ struct qlcnic_mailbox *mbx = ahw->mailbox;
struct device *dev = &adapter->pdev->dev;
struct qlc_83xx_idc *idc = &ahw->idc;
u8 func = ahw->pci_func;
@@ -1710,7 +1665,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
/* Skip the context reset and check if FW is hung */
if (adapter->reset_ctx_cnt < 3) {
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
dev_info(dev,
"Resetting context, wait here to check if FW is in failed state\n");
return 0;
@@ -1735,7 +1690,7 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
__func__, adapter->reset_ctx_cnt, func);
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->need_fw_reset = 1;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
adapter->need_fw_reset = 0;
@@ -1785,6 +1740,7 @@ static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
static int
qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
@@ -1792,7 +1748,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
@@ -1801,6 +1757,7 @@ qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
struct qlc_83xx_idc *idc = &adapter->ahw->idc;
u8 func = adapter->ahw->pci_func;
@@ -1810,7 +1767,7 @@ static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
adapter->tx_timeo_cnt = 0;
adapter->reset_ctx_cnt = 0;
- clear_bit(QLC_83XX_MBX_READY, &idc->status);
+ clear_bit(QLC_83XX_MBX_READY, &mbx->status);
qlcnic_sriov_vf_detach(adapter);
}
return 0;
@@ -1988,7 +1945,7 @@ int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
int err;
set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
- qlcnic_83xx_enable_mbx_intrpt(adapter);
+ qlcnic_83xx_enable_mbx_interrupt(adapter);
err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index ee0c1d30796..330d9a8774a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -635,12 +635,12 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_vf_info *vf = trans->vf;
- struct qlcnic_adapter *adapter = vf->adapter;
- int err;
+ struct qlcnic_vport *vp = vf->vp;
+ struct qlcnic_adapter *adapter;
u16 func = vf->pci_func;
+ int err;
- cmd->rsp.arg[0] = trans->req_hdr->cmd_op;
- cmd->rsp.arg[0] |= (1 << 16);
+ adapter = vf->adapter;
if (trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) {
err = qlcnic_sriov_pf_config_vport(adapter, 1, func);
@@ -650,6 +650,8 @@ static int qlcnic_sriov_pf_channel_cfg_cmd(struct qlcnic_bc_trans *trans,
qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
} else {
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ vp->vlan = 0;
err = qlcnic_sriov_pf_config_vport(adapter, 0, func);
}
@@ -1181,9 +1183,18 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans,
struct qlcnic_vf_info *vf = trans->vf;
struct qlcnic_vport *vp = vf->vp;
u8 cmd_op, mode = vp->vlan_mode;
+ struct qlcnic_adapter *adapter;
+
+ adapter = vf->adapter;
cmd_op = trans->req_hdr->cmd_op;
- cmd->rsp.arg[0] = (cmd_op & 0xffff) | 14 << 16 | 1 << 25;
+ cmd->rsp.arg[0] |= 1 << 25;
+
+ /* For 84xx adapter in case of PVID , PFD should send vlan mode as
+ * QLC_NO_VLAN_MODE to VFD which is zero in mailbox response
+ */
+ if (qlcnic_84xx_check(adapter) && mode == QLC_PVID_MODE)
+ return 0;
switch (mode) {
case QLC_GUEST_VLAN_MODE:
@@ -1282,6 +1293,10 @@ static const int qlcnic_pf_passthru_supp_cmds[] = {
QLCNIC_CMD_GET_STATISTICS,
QLCNIC_CMD_GET_PORT_CONFIG,
QLCNIC_CMD_GET_LINK_STATUS,
+ QLCNIC_CMD_DCB_QUERY_CAP,
+ QLCNIC_CMD_DCB_QUERY_PARAM,
+ QLCNIC_CMD_INIT_NIC_FUNC,
+ QLCNIC_CMD_STOP_NIC_FUNC,
};
static const struct qlcnic_sriov_cmd_handler qlcnic_pf_bc_cmd_hdlr[] = {
@@ -1561,6 +1576,7 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
struct qlcnic_vf_info *vf)
{
struct net_device *dev = vf->adapter->netdev;
+ struct qlcnic_vport *vp = vf->vp;
if (!test_and_clear_bit(QLC_BC_VF_STATE, &vf->state)) {
clear_bit(QLC_BC_VF_FLR, &vf->state);
@@ -1573,6 +1589,9 @@ void qlcnic_sriov_pf_handle_flr(struct qlcnic_sriov *sriov,
return;
}
+ if (vp->vlan_mode == QLC_GUEST_VLAN_MODE)
+ vp->vlan = 0;
+
qlcnic_sriov_schedule_flr(sriov, vf, qlcnic_sriov_pf_process_flr);
netdev_info(dev, "FLR received for PCI func %d\n", vf->pci_func);
}
@@ -1621,24 +1640,26 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- int i, num_vfs = sriov->num_vfs;
+ int i, num_vfs;
struct qlcnic_vf_info *vf_info;
u8 *curr_mac;
if (!qlcnic_sriov_pf_check(adapter))
return -EOPNOTSUPP;
+ num_vfs = sriov->num_vfs;
+
if (!is_valid_ether_addr(mac) || vf >= num_vfs)
return -EINVAL;
- if (!compare_ether_addr(adapter->mac_addr, mac)) {
+ if (ether_addr_equal(adapter->mac_addr, mac)) {
netdev_err(netdev, "MAC address is already in use by the PF\n");
return -EINVAL;
}
for (i = 0; i < num_vfs; i++) {
vf_info = &sriov->vf_info[i];
- if (!compare_ether_addr(vf_info->vp->mac, mac)) {
+ if (ether_addr_equal(vf_info->vp->mac, mac)) {
netdev_err(netdev,
"MAC address is already in use by VF %d\n",
i);
@@ -1741,6 +1762,7 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
switch (vlan) {
case 4095:
+ vp->vlan = 0;
vp->vlan_mode = QLC_GUEST_VLAN_MODE;
break;
case 0:
@@ -1759,6 +1781,29 @@ int qlcnic_sriov_set_vf_vlan(struct net_device *netdev, int vf,
return 0;
}
+static __u32 qlcnic_sriov_get_vf_vlan(struct qlcnic_adapter *adapter,
+ struct qlcnic_vport *vp, int vf)
+{
+ __u32 vlan = 0;
+
+ switch (vp->vlan_mode) {
+ case QLC_PVID_MODE:
+ vlan = vp->vlan;
+ break;
+ case QLC_GUEST_VLAN_MODE:
+ vlan = MAX_VLAN_ID;
+ break;
+ case QLC_NO_VLAN_MODE:
+ vlan = 0;
+ break;
+ default:
+ netdev_info(adapter->netdev, "Invalid VLAN mode = %d for VF %d\n",
+ vp->vlan_mode, vf);
+ }
+
+ return vlan;
+}
+
int qlcnic_sriov_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi)
{
@@ -1774,7 +1819,7 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
vp = sriov->vf_info[vf].vp;
memcpy(&ivi->mac, vp->mac, ETH_ALEN);
- ivi->vlan = vp->vlan;
+ ivi->vlan = qlcnic_sriov_get_vf_vlan(adapter, vp, vf);
ivi->qos = vp->qos;
ivi->spoofchk = vp->spoofchk;
if (vp->max_tx_bw == MAX_BW)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 10ed82b3bac..c6165d05cc1 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) {
err = qlcnic_get_beacon_state(adapter, &h_beacon_state);
- if (!err) {
- dev_info(&adapter->pdev->dev,
- "Failed to get current beacon state\n");
+ if (err) {
+ netdev_err(adapter->netdev,
+ "Failed to get current beacon state\n");
} else {
if (h_beacon_state == QLCNIC_BEACON_DISABLE)
ahw->beacon_state = 0;
@@ -465,8 +465,14 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
memset(&pm_cfg, 0,
sizeof(struct qlcnic_pm_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
pm_cfg[pci_func].action = adapter->npars[i].enable_pm;
pm_cfg[pci_func].dest_npar = 0;
pm_cfg[pci_func].pci_func = i;
@@ -632,8 +638,14 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
memset(&esw_cfg, 0,
sizeof(struct qlcnic_esw_func_cfg) * QLCNIC_MAX_PCI_FUNC);
- for (i = 0; i < adapter->ahw->act_pci_func; i++) {
+ for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
pci_func = adapter->npars[i].pci_func;
+ if (!adapter->npars[i].active)
+ continue;
+
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
return QL_STATUS_INVALID_PARAM;
@@ -732,6 +744,9 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
if (ret)
return ret;
+ if (!adapter->npars[i].eswitch_status)
+ continue;
+
np_cfg[i].pci_func = i;
np_cfg[i].op_mode = (u8)nic_info.op_mode;
np_cfg[i].port_num = nic_info.phys_port;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d6826396..89943377846 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -2149,7 +2149,7 @@ struct ql_adapter {
struct timer_list timer;
atomic_t lb_count;
/* Keep local copy of current mac address. */
- char current_mac_addr[6];
+ char current_mac_addr[ETH_ALEN];
};
/*
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index e6acb9fa576..d2e591955bd 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -478,7 +478,7 @@ rx_status_loop:
while (1) {
u32 status, len;
- dma_addr_t mapping;
+ dma_addr_t mapping, new_mapping;
struct sk_buff *skb, *new_skb;
struct cp_desc *desc;
const unsigned buflen = cp->rx_buf_sz;
@@ -520,6 +520,14 @@ rx_status_loop:
goto rx_next;
}
+ new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
+ PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, new_mapping)) {
+ dev->stats.rx_dropped++;
+ kfree_skb(new_skb);
+ goto rx_next;
+ }
+
dma_unmap_single(&cp->pdev->dev, mapping,
buflen, PCI_DMA_FROMDEVICE);
@@ -531,12 +539,11 @@ rx_status_loop:
skb_put(skb, len);
- mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
- PCI_DMA_FROMDEVICE);
cp->rx_skb[rx_tail] = new_skb;
cp_rx_skb(cp, skb, desc);
rx++;
+ mapping = new_mapping;
rx_next:
cp->rx_ring[rx_tail].opts2 = 0;
@@ -716,6 +723,22 @@ static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
}
+static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb,
+ int first, int entry_last)
+{
+ int frag, index;
+ struct cp_desc *txd;
+ skb_frag_t *this_frag;
+ for (frag = 0; frag+first < entry_last; frag++) {
+ index = first+frag;
+ cp->tx_skb[index] = NULL;
+ txd = &cp->tx_ring[index];
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
+ skb_frag_size(this_frag), PCI_DMA_TODEVICE);
+ }
+}
+
static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
struct net_device *dev)
{
@@ -749,6 +772,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
len = skb->len;
mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping))
+ goto out_dma_error;
+
txd->opts2 = opts2;
txd->addr = cpu_to_le64(mapping);
wmb();
@@ -786,6 +812,9 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
first_len = skb_headlen(skb);
first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
first_len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, first_mapping))
+ goto out_dma_error;
+
cp->tx_skb[entry] = skb;
entry = NEXT_TX(entry);
@@ -799,6 +828,11 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
len, PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ unwind_tx_frag_mapping(cp, skb, first_entry, entry);
+ goto out_dma_error;
+ }
+
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
ctrl = eor | len | DescOwn;
@@ -859,11 +893,16 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
+out_unlock:
spin_unlock_irqrestore(&cp->lock, intr_flags);
cpw8(TxPoll, NormalTxPoll);
return NETDEV_TX_OK;
+out_dma_error:
+ kfree_skb(skb);
+ cp->dev->stats.tx_dropped++;
+ goto out_unlock;
}
/* Set or clear the multicast filter for this adaptor.
@@ -1054,6 +1093,10 @@ static int cp_refill_rx(struct cp_private *cp)
mapping = dma_map_single(&cp->pdev->dev, skb->data,
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (dma_mapping_error(&cp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto err_out;
+ }
cp->rx_skb[i] = skb;
cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 4106a743ca7..6f87f2cde64 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -1897,12 +1897,13 @@ static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
-
- if (regs->len > R8169_REGS_SIZE)
- regs->len = R8169_REGS_SIZE;
+ u32 __iomem *data = tp->mmio_addr;
+ u32 *dw = p;
+ int i;
rtl_lock_work(tp);
- memcpy_fromio(p, tp->mmio_addr, regs->len);
+ for (i = 0; i < R8169_REGS_SIZE; i += 4)
+ memcpy_fromio(dw++, data++, 4);
rtl_unlock_work(tp);
}
@@ -3689,7 +3690,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
if (tp->link_ok(ioaddr))
return;
- netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
+ netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
tp->phy_reset_enable(tp);
@@ -6468,6 +6469,8 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_down(dev);
rtl_unlock_work(tp);
+ cancel_work_sync(&tp->wk.work);
+
free_irq(pdev->irq, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
@@ -6793,8 +6796,6 @@ static void rtl_remove_one(struct pci_dev *pdev)
rtl8168_driver_stop(tp);
}
- cancel_work_sync(&tp->wk.work);
-
netif_napi_del(&tp->napi);
unregister_netdev(dev);
@@ -7088,7 +7089,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
RTL_W8(Cfg9346, Cfg9346_Unlock);
RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
+ RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
tp->features |= RTL_FEATURE_WOL;
if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 19a8a045e07..a30c4395b23 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -13,4 +13,4 @@ config SH_ETH
Renesas SuperH Ethernet device driver.
This driver supporting CPUs are:
- SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757,
- R8A7740 and R8A7779.
+ R8A7740, R8A777x and R8A7790.
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a753928bab9..5cd831ebfa8 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -189,6 +189,7 @@ static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
[RMCR] = 0x0258,
[TFUCR] = 0x0264,
[RFOCR] = 0x0268,
+ [RMIIMODE] = 0x026c,
[FCFTR] = 0x0270,
[TRIMD] = 0x027c,
};
@@ -377,6 +378,8 @@ static struct sh_eth_cpu_data r8a777x_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_r8a777x,
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -392,6 +395,30 @@ static struct sh_eth_cpu_data r8a777x_data = {
.hw_swap = 1,
};
+/* R8A7790 */
+static struct sh_eth_cpu_data r8a7790_data = {
+ .set_duplex = sh_eth_set_duplex,
+ .set_rate = sh_eth_set_rate_r8a777x,
+
+ .register_type = SH_ETH_REG_FAST_RCAR,
+
+ .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
+ .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
+ .eesipr_value = 0x01ff009f,
+
+ .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
+ .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+ EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+ EESR_ECI,
+
+ .apr = 1,
+ .mpr = 1,
+ .tpauser = 1,
+ .hw_swap = 1,
+ .rmiimode = 1,
+ .shift_rd0 = 1,
+};
+
static void sh_eth_set_rate_sh7724(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -413,6 +440,8 @@ static struct sh_eth_cpu_data sh7724_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7724,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
.eesipr_value = 0x01ff009f,
@@ -451,6 +480,8 @@ static struct sh_eth_cpu_data sh7757_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_sh7757,
+ .register_type = SH_ETH_REG_FAST_SH4,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.rmcr_value = 0x00000001,
@@ -519,6 +550,8 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_giga,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -577,6 +610,8 @@ static struct sh_eth_cpu_data sh7734_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -604,6 +639,8 @@ static struct sh_eth_cpu_data sh7763_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -641,6 +678,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
.set_duplex = sh_eth_set_duplex,
.set_rate = sh_eth_set_rate_gether,
+ .register_type = SH_ETH_REG_GIGABIT,
+
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
@@ -663,6 +702,8 @@ static struct sh_eth_cpu_data r8a7740_data = {
};
static struct sh_eth_cpu_data sh7619_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.apr = 1,
@@ -672,6 +713,8 @@ static struct sh_eth_cpu_data sh7619_data = {
};
static struct sh_eth_cpu_data sh771x_data = {
+ .register_type = SH_ETH_REG_FAST_SH3_SH2,
+
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
.tsu = 1,
};
@@ -1124,6 +1167,9 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (ret)
goto out;
+ if (mdp->cd->rmiimode)
+ sh_eth_write(ndev, 0x1, RMIIMODE);
+
/* Descriptor format */
sh_eth_ring_format(ndev);
if (mdp->cd->rpadir)
@@ -1297,9 +1343,12 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
+ dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
+ mdp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
- netif_rx(skb);
+ netif_receive_skb(skb);
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
@@ -1857,11 +1906,13 @@ static int sh_eth_open(struct net_device *ndev)
pm_runtime_get_sync(&mdp->pdev->dev);
+ napi_enable(&mdp->napi);
+
ret = request_irq(ndev->irq, sh_eth_interrupt,
mdp->cd->irq_flags, ndev->name, ndev);
if (ret) {
dev_err(&ndev->dev, "Can not assign IRQ number\n");
- return ret;
+ goto out_napi_off;
}
/* Descriptor set */
@@ -1879,12 +1930,12 @@ static int sh_eth_open(struct net_device *ndev)
if (ret)
goto out_free_irq;
- napi_enable(&mdp->napi);
-
return ret;
out_free_irq:
free_irq(ndev->irq, ndev);
+out_napi_off:
+ napi_disable(&mdp->napi);
pm_runtime_put_sync(&mdp->pdev->dev);
return ret;
}
@@ -1976,8 +2027,6 @@ static int sh_eth_close(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- napi_disable(&mdp->napi);
-
netif_stop_queue(ndev);
/* Disable interrupts by clearing the interrupt mask. */
@@ -1995,6 +2044,8 @@ static int sh_eth_close(struct net_device *ndev)
free_irq(ndev->irq, ndev);
+ napi_disable(&mdp->napi);
+
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2561,7 +2612,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct sh_eth_private *mdp = NULL;
- struct sh_eth_plat_data *pd = pdev->dev.platform_data;
+ struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
const struct platform_device_id *id = platform_get_device_id(pdev);
/* get base addr */
@@ -2594,9 +2645,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
- /* Fill in the fields of the device structure with ethernet values. */
- ether_setup(ndev);
-
mdp = netdev_priv(ndev);
mdp->num_tx_ring = TX_RING_SIZE;
mdp->num_rx_ring = RX_RING_SIZE;
@@ -2618,10 +2666,10 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->edmac_endian = pd->edmac_endian;
mdp->no_ether_link = pd->no_ether_link;
mdp->ether_link_active_low = pd->ether_link_active_low;
- mdp->reg_offset = sh_eth_get_register_offset(pd->register_type);
/* set cpu data */
mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
/* set function */
@@ -2749,6 +2797,7 @@ static struct platform_device_id sh_eth_id_table[] = {
{ "sh7763-gether", (kernel_ulong_t)&sh7763_data },
{ "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
{ "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
+ { "r8a7790-ether", (kernel_ulong_t)&r8a7790_data },
{ }
};
MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 99995bf38c4..a0db02c63b1 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -60,6 +60,7 @@ enum {
EDOCR,
TFUCR,
RFOCR,
+ RMIIMODE,
FCFTR,
RPADIR,
TRIMD,
@@ -156,6 +157,13 @@ enum {
SH_ETH_MAX_REGISTER_OFFSET,
};
+enum {
+ SH_ETH_REG_GIGABIT,
+ SH_ETH_REG_FAST_RCAR,
+ SH_ETH_REG_FAST_SH4,
+ SH_ETH_REG_FAST_SH3_SH2
+};
+
/* Driver's parameters */
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
#define SH4_SKB_RX_ALIGN 32
@@ -453,6 +461,7 @@ struct sh_eth_cpu_data {
void (*set_rate)(struct net_device *ndev);
/* mandatory initialize value */
+ int register_type;
unsigned long eesipr_value;
/* optional initialize value */
@@ -482,6 +491,7 @@ struct sh_eth_cpu_data {
unsigned hw_crc:1; /* E-DMAC have CSMR */
unsigned select_mii:1; /* EtherC have RMII_MII (MII select register) */
unsigned shift_rd0:1; /* shift Rx descriptor word 0 right by 16 */
+ unsigned rmiimode:1; /* EtherC has RMIIMODE register */
};
struct sh_eth_private {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 856e523ac93..c7657188601 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -721,7 +721,7 @@ static const struct net_device_ops sgiseeq_netdev_ops = {
static int sgiseeq_probe(struct platform_device *pdev)
{
- struct sgiseeq_platform_data *pd = pdev->dev.platform_data;
+ struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
struct hpc3_regs *hpcregs = pd->hpc;
struct sgiseeq_init_block *sr;
unsigned int irq = pd->irq;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 4136ccc4a95..8b7152565c5 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -1,5 +1,5 @@
config SFC
- tristate "Solarflare SFC4000/SFC9000-family support"
+ tristate "Solarflare SFC4000/SFC9000/SFC9100-family support"
depends on PCI
select MDIO
select CRC32
@@ -8,12 +8,13 @@ config SFC
select PTP_1588_CLOCK
---help---
This driver supports 10-gigabit Ethernet cards based on
- the Solarflare SFC4000 and SFC9000-family controllers.
+ the Solarflare SFC4000, SFC9000-family and SFC9100-family
+ controllers.
To compile this driver as a module, choose M here. The module
will be called sfc.
config SFC_MTD
- bool "Solarflare SFC4000/SFC9000-family MTD support"
+ bool "Solarflare SFC4000/SFC9000/SFC9100-family MTD support"
depends on SFC && MTD && !(SFC=y && MTD=m)
default y
---help---
@@ -21,7 +22,7 @@ config SFC_MTD
(e.g. /dev/mtd1). This is required to update the firmware or
the boot configuration under Linux.
config SFC_MCDI_MON
- bool "Solarflare SFC9000-family hwmon support"
+ bool "Solarflare SFC9000/SFC9100-family hwmon support"
depends on SFC && HWMON && !(SFC=y && HWMON=m)
default y
---help---
diff --git a/drivers/net/ethernet/sfc/Makefile b/drivers/net/ethernet/sfc/Makefile
index 945bf06e69e..3a83c0dca8e 100644
--- a/drivers/net/ethernet/sfc/Makefile
+++ b/drivers/net/ethernet/sfc/Makefile
@@ -1,8 +1,7 @@
-sfc-y += efx.o nic.o falcon.o siena.o tx.o rx.o filter.o \
- falcon_xmac.o mcdi_mac.o \
- selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
+sfc-y += efx.o nic.o farch.o falcon.o siena.o ef10.o tx.o \
+ rx.o selftest.o ethtool.o qt202x_phy.o mdio_10g.o \
tenxpress.o txc43128_phy.o falcon_boards.o \
- mcdi.o mcdi_phy.o mcdi_mon.o ptp.o
+ mcdi.o mcdi_port.o mcdi_mon.o ptp.o
sfc-$(CONFIG_SFC_MTD) += mtd.o
sfc-$(CONFIG_SFC_SRIOV) += siena_sriov.o
diff --git a/drivers/net/ethernet/sfc/bitfield.h b/drivers/net/ethernet/sfc/bitfield.h
index 5400a33f254..17d83f37fbf 100644
--- a/drivers/net/ethernet/sfc/bitfield.h
+++ b/drivers/net/ethernet/sfc/bitfield.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2009 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -29,6 +29,10 @@
/* Lowest bit numbers and widths */
#define EFX_DUMMY_FIELD_LBN 0
#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_WORD_0_LBN 0
+#define EFX_WORD_0_WIDTH 16
+#define EFX_WORD_1_LBN 16
+#define EFX_WORD_1_WIDTH 16
#define EFX_DWORD_0_LBN 0
#define EFX_DWORD_0_WIDTH 32
#define EFX_DWORD_1_LBN 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
new file mode 100644
index 00000000000..5f42313b496
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -0,0 +1,3043 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "ef10_regs.h"
+#include "io.h"
+#include "mcdi.h"
+#include "mcdi_pcol.h"
+#include "nic.h"
+#include "workarounds.h"
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+/* Hardware control for EF10 architecture including 'Huntington'. */
+
+#define EFX_EF10_DRVGEN_EV 7
+enum {
+ EFX_EF10_TEST = 1,
+ EFX_EF10_REFILL,
+};
+
+/* The reserved RSS context value */
+#define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
+
+/* The filter table(s) are managed by firmware and we have write-only
+ * access. When removing filters we must identify them to the
+ * firmware by a 64-bit handle, but this is too wide for Linux kernel
+ * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
+ * be able to tell in advance whether a requested insertion will
+ * replace an existing filter. Therefore we maintain a software hash
+ * table, which should be at least as large as the hardware hash
+ * table.
+ *
+ * Huntington has a single 8K filter table shared between all filter
+ * types and both ports.
+ */
+#define HUNT_FILTER_TBL_ROWS 8192
+
+struct efx_ef10_filter_table {
+/* The RX match field masks supported by this fw & hw, in order of priority */
+ enum efx_filter_match_flags rx_match_flags[
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ unsigned int rx_match_count;
+
+ struct {
+ unsigned long spec; /* pointer to spec plus flag bits */
+/* BUSY flag indicates that an update is in progress. STACK_OLD is
+ * used to mark and sweep stack-owned MAC filters.
+ */
+#define EFX_EF10_FILTER_FLAG_BUSY 1UL
+#define EFX_EF10_FILTER_FLAG_STACK_OLD 2UL
+#define EFX_EF10_FILTER_FLAGS 3UL
+ u64 handle; /* firmware handle */
+ } *entry;
+ wait_queue_head_t waitq;
+/* Shadow of net_device address lists, guarded by mac_lock */
+#define EFX_EF10_FILTER_STACK_UC_MAX 32
+#define EFX_EF10_FILTER_STACK_MC_MAX 256
+ struct {
+ u8 addr[ETH_ALEN];
+ u16 id;
+ } stack_uc_list[EFX_EF10_FILTER_STACK_UC_MAX],
+ stack_mc_list[EFX_EF10_FILTER_STACK_MC_MAX];
+ int stack_uc_count; /* negative for PROMISC */
+ int stack_mc_count; /* negative for PROMISC/ALLMULTI */
+};
+
+/* An arbitrary search limit for the software hash table */
+#define EFX_EF10_FILTER_SEARCH_LIMIT 200
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx);
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
+static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+
+static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
+{
+ efx_dword_t reg;
+
+ efx_readd(efx, &reg, ER_DZ_BIU_MC_SFT_STATUS);
+ return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
+ EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
+}
+
+static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
+{
+ return resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]);
+}
+
+static int efx_ef10_init_capabilities(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen >= sizeof(outbuf)) {
+ nic_data->datapath_caps =
+ MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
+ netif_err(efx, drv, efx->net_dev,
+ "Capabilities don't indicate TSO support.\n");
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+ rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
+ return rc > 0 ? rc : -ERANGE;
+}
+
+static int efx_ef10_get_mac_address(struct efx_nic *efx, u8 *mac_address)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
+ return -EIO;
+
+ memcpy(mac_address,
+ MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE), ETH_ALEN);
+ return 0;
+}
+
+static int efx_ef10_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data;
+ int i, rc;
+
+ /* We can have one VI for each 8K region. However we need
+ * multiple TX queues per channel.
+ */
+ efx->max_channels =
+ min_t(unsigned int,
+ EFX_MAX_CHANNELS,
+ resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
+ (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
+ BUG_ON(efx->max_channels == 0);
+
+ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+ if (!nic_data)
+ return -ENOMEM;
+ efx->nic_data = nic_data;
+
+ rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
+ 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
+ if (rc)
+ goto fail1;
+
+ /* Get the MC's warm boot count. In case it's rebooting right
+ * now, be prepared to retry.
+ */
+ i = 0;
+ for (;;) {
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc >= 0)
+ break;
+ if (++i == 5)
+ goto fail2;
+ ssleep(1);
+ }
+ nic_data->warm_boot_count = rc;
+
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ /* In case we're recovering from a crash (kexec), we want to
+ * cancel any outstanding request by the previous user of this
+ * function. We send a special message using the least
+ * significant bits of the 'high' (doorbell) register.
+ */
+ _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
+
+ rc = efx_mcdi_init(efx);
+ if (rc)
+ goto fail2;
+
+ /* Reset (most) configuration for this function */
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
+ if (rc)
+ goto fail3;
+
+ /* Enable event logging */
+ rc = efx_mcdi_log_ctrl(efx, true, false, 0);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_init_capabilities(efx);
+ if (rc < 0)
+ goto fail3;
+
+ efx->rx_packet_len_offset =
+ ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
+
+ if (!(nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
+ netif_err(efx, probe, efx->net_dev,
+ "current firmware does not support an RX prefix\n");
+ rc = -ENODEV;
+ goto fail3;
+ }
+
+ rc = efx_mcdi_port_get_number(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->port_num = rc;
+
+ rc = efx_ef10_get_mac_address(efx, efx->net_dev->perm_addr);
+ if (rc)
+ goto fail3;
+
+ rc = efx_ef10_get_sysclk_freq(efx);
+ if (rc < 0)
+ goto fail3;
+ efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
+
+ /* Check whether firmware supports bug 35388 workaround */
+ rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
+ if (rc == 0)
+ nic_data->workaround_35388 = true;
+ else if (rc != -ENOSYS && rc != -ENOENT)
+ goto fail3;
+ netif_dbg(efx, probe, efx->net_dev,
+ "workaround for bug 35388 is %sabled\n",
+ nic_data->workaround_35388 ? "en" : "dis");
+
+ rc = efx_mcdi_mon_probe(efx);
+ if (rc)
+ goto fail3;
+
+ efx_ptp_probe(efx);
+
+ return 0;
+
+fail3:
+ efx_mcdi_fini(efx);
+fail2:
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+fail1:
+ kfree(nic_data);
+ efx->nic_data = NULL;
+ return rc;
+}
+
+static int efx_ef10_free_vis(struct efx_nic *efx)
+{
+ int rc = efx_mcdi_rpc(efx, MC_CMD_FREE_VIS, NULL, 0, NULL, 0, NULL);
+
+ /* -EALREADY means nothing to free, so ignore */
+ if (rc == -EALREADY)
+ rc = 0;
+ return rc;
+}
+
+static void efx_ef10_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ efx_mcdi_mon_remove(efx);
+
+ /* This needs to be after efx_ptp_remove_channel() with no filters */
+ efx_ef10_rx_free_indir_table(efx);
+
+ rc = efx_ef10_free_vis(efx);
+ WARN_ON(rc != 0);
+
+ efx_mcdi_fini(efx);
+ efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
+ kfree(nic_data);
+}
+
+static int efx_ef10_alloc_vis(struct efx_nic *efx,
+ unsigned int min_vis, unsigned int max_vis)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
+ MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
+ rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
+ return -EIO;
+
+ netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
+ MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
+
+ nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
+ nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
+ return 0;
+}
+
+static int efx_ef10_dimension_resources(struct efx_nic *efx)
+{
+ unsigned int n_vis =
+ max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+ return efx_ef10_alloc_vis(efx, n_vis, n_vis);
+}
+
+static int efx_ef10_init_nic(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ if (nic_data->must_realloc_vis) {
+ /* We cannot let the number of VIs change now */
+ rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
+ nic_data->n_allocated_vis);
+ if (rc)
+ return rc;
+ nic_data->must_realloc_vis = false;
+ }
+
+ efx_ef10_rx_push_indir_table(efx);
+ return 0;
+}
+
+static int efx_ef10_map_reset_flags(u32 *flags)
+{
+ enum {
+ EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
+ ETH_RESET_SHARED_SHIFT),
+ EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
+ ETH_RESET_OFFLOAD | ETH_RESET_MAC |
+ ETH_RESET_PHY | ETH_RESET_MGMT) <<
+ ETH_RESET_SHARED_SHIFT)
+ };
+
+ /* We assume for now that our PCI function is permitted to
+ * reset everything.
+ */
+
+ if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
+ *flags &= ~EF10_RESET_MC;
+ return RESET_TYPE_WORLD;
+ }
+
+ if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
+ *flags &= ~EF10_RESET_PORT;
+ return RESET_TYPE_ALL;
+ }
+
+ /* no invisible reset implemented */
+
+ return -EINVAL;
+}
+
+#define EF10_DMA_STAT(ext_name, mcdi_name) \
+ [EF10_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
+ [EF10_STAT_ ## int_name] = \
+ { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define EF10_OTHER_STAT(ext_name) \
+ [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
+ EF10_DMA_STAT(tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(tx_packets, TX_PKTS),
+ EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(rx_good_bytes),
+ EF10_OTHER_STAT(rx_bad_bytes),
+ EF10_DMA_STAT(rx_packets, RX_PKTS),
+ EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+};
+
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
+ (1ULL << EF10_STAT_tx_packets) | \
+ (1ULL << EF10_STAT_tx_pause) | \
+ (1ULL << EF10_STAT_tx_unicast) | \
+ (1ULL << EF10_STAT_tx_multicast) | \
+ (1ULL << EF10_STAT_tx_broadcast) | \
+ (1ULL << EF10_STAT_rx_bytes) | \
+ (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_rx_good_bytes) | \
+ (1ULL << EF10_STAT_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_rx_packets) | \
+ (1ULL << EF10_STAT_rx_good) | \
+ (1ULL << EF10_STAT_rx_bad) | \
+ (1ULL << EF10_STAT_rx_pause) | \
+ (1ULL << EF10_STAT_rx_control) | \
+ (1ULL << EF10_STAT_rx_unicast) | \
+ (1ULL << EF10_STAT_rx_multicast) | \
+ (1ULL << EF10_STAT_rx_broadcast) | \
+ (1ULL << EF10_STAT_rx_lt64) | \
+ (1ULL << EF10_STAT_rx_64) | \
+ (1ULL << EF10_STAT_rx_65_to_127) | \
+ (1ULL << EF10_STAT_rx_128_to_255) | \
+ (1ULL << EF10_STAT_rx_256_to_511) | \
+ (1ULL << EF10_STAT_rx_512_to_1023) | \
+ (1ULL << EF10_STAT_rx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
+ (1ULL << EF10_STAT_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
+ (1ULL << EF10_STAT_rx_overflow) | \
+ (1ULL << EF10_STAT_rx_nodesc_drops))
+
+/* These statistics are only provided by the 10G MAC. For a 10G/40G
+ * switchable port we do not expose these because they might not
+ * include all the packets they should.
+ */
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
+ (1ULL << EF10_STAT_tx_lt64) | \
+ (1ULL << EF10_STAT_tx_64) | \
+ (1ULL << EF10_STAT_tx_65_to_127) | \
+ (1ULL << EF10_STAT_tx_128_to_255) | \
+ (1ULL << EF10_STAT_tx_256_to_511) | \
+ (1ULL << EF10_STAT_tx_512_to_1023) | \
+ (1ULL << EF10_STAT_tx_1024_to_15xx) | \
+ (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+
+/* These statistics are only provided by the 40G MAC. For a 10G/40G
+ * switchable port we do expose these because the errors will otherwise
+ * be silent.
+ */
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
+ (1ULL << EF10_STAT_rx_length_error))
+
+#if BITS_PER_LONG == 64
+#define STAT_MASK_BITMAP(bits) (bits)
+#else
+#define STAT_MASK_BITMAP(bits) (bits) & 0xffffffff, (bits) >> 32
+#endif
+
+static const unsigned long *efx_ef10_stat_mask(struct efx_nic *efx)
+{
+ static const unsigned long hunt_40g_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_40G_EXTRA_STAT_MASK)
+ };
+ static const unsigned long hunt_10g_only_stat_mask[] = {
+ STAT_MASK_BITMAP(HUNT_COMMON_STAT_MASK |
+ HUNT_10G_ONLY_STAT_MASK)
+ };
+ u32 port_caps = efx_mcdi_phy_get_caps(efx);
+
+ if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ return hunt_40g_stat_mask;
+ else
+ return hunt_10g_only_stat_mask;
+}
+
+static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
+ efx_ef10_stat_mask(efx), names);
+}
+
+static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const unsigned long *stats_mask = efx_ef10_stat_mask(efx);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ __le64 *dma_stats;
+
+ dma_stats = efx->stats_buffer.addr;
+ nic_data = efx->nic_data;
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
+ return 0;
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, stats_mask,
+ stats, efx->stats_buffer.addr, false);
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start)
+ return -EAGAIN;
+
+ /* Update derived statistics */
+ stats[EF10_STAT_rx_good_bytes] =
+ stats[EF10_STAT_rx_bytes] -
+ stats[EF10_STAT_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
+ stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+
+ return 0;
+}
+
+
+static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ const unsigned long *mask = efx_ef10_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+ int retry;
+
+ /* If we're unlucky enough to read statistics during the DMA, wait
+ * up to 10ms for it to finish (typically takes <500us)
+ */
+ for (retry = 0; retry < 100; ++retry) {
+ if (efx_ef10_try_update_nic_stats(efx) == 0)
+ break;
+ udelay(100);
+ }
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_packets];
+ core_stats->tx_packets = stats[EF10_STAT_tx_packets];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[EF10_STAT_rx_gtjumbo] +
+ stats[EF10_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors);
+ }
+
+ return stats_count;
+}
+
+static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int mode, value;
+ efx_dword_t timer_cmd;
+
+ if (channel->irq_moderation) {
+ mode = 3;
+ value = channel->irq_moderation - 1;
+ } else {
+ mode = 0;
+ value = 0;
+ }
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
+ EFE_DD_EVQ_IND_TIMER_FLAGS,
+ ERF_DD_EVQ_IND_TIMER_MODE, mode,
+ ERF_DD_EVQ_IND_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
+ ERF_DZ_TC_TIMER_VAL, value);
+ efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
+ channel->channel);
+ }
+}
+
+static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
+{
+ if (type != 0)
+ return -EINVAL;
+ return 0;
+}
+
+static void efx_ef10_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(pdu, hdr, hdr_len);
+ memcpy(pdu + hdr_len, sdu, sdu_len);
+ wmb();
+
+ /* The hardware provides 'low' and 'high' (doorbell) registers
+ * for passing the 64-bit address of an MCDI request to
+ * firmware. However the dwords are swapped by firmware. The
+ * least significant bits of the doorbell are then 0 for all
+ * MCDI requests due to alignment.
+ */
+ _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
+ ER_DZ_MC_DB_LWRD);
+ _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
+ ER_DZ_MC_DB_HWRD);
+}
+
+static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
+
+ rmb();
+ return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void
+efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ const u8 *pdu = nic_data->mcdi_buf.addr;
+
+ memcpy(outbuf, pdu + offset, outlen);
+}
+
+static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = efx_ef10_get_warm_boot_count(efx);
+ if (rc < 0) {
+ /* The firmware is presumably in the process of
+ * rebooting. However, we are supposed to report each
+ * reboot just once, so we must only do that once we
+ * can read and store the updated warm boot count.
+ */
+ return 0;
+ }
+
+ if (rc == nic_data->warm_boot_count)
+ return 0;
+
+ nic_data->warm_boot_count = rc;
+
+ /* All our allocations have been reset */
+ nic_data->must_realloc_vis = true;
+ nic_data->must_restore_filters = true;
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+
+ return -EIO;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
+
+ if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
+ /* Note test interrupts */
+ if (context->index == efx->irq_level)
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, ER_DZ_BIU_INT_ISR);
+ queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
+
+ if (queues == 0)
+ return IRQ_NONE;
+
+ if (likely(soft_enabled)) {
+ /* Note test interrupts */
+ if (queues & (1U << efx->irq_level))
+ efx->last_irq_cpu = raw_smp_processor_id();
+
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return IRQ_HANDLED;
+}
+
+static void efx_ef10_irq_test_generate(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
+
+ MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
+ (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+}
+
+static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
+ (tx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+/* This writes to the TX_DESC_WPTR and also pushes data */
+static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned int write_ptr;
+ efx_oword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_TXQ_OUT_LEN);
+ bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
+ struct efx_channel *channel = tx_queue->channel;
+ struct efx_nic *efx = tx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ efx_qword_t *txd;
+ int rc;
+ int i;
+
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
+ MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
+ INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
+ INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = tx_queue->txd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
+ tx_queue->queue, entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* A previous user of this TX queue might have set us up the
+ * bomb by writing a descriptor to the TX push collector but
+ * not the doorbell. (Each collector belongs to a port, not a
+ * queue or function, so cannot easily be reset.) We must
+ * attempt to push a no-op descriptor in its place.
+ */
+ tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
+ tx_queue->insert_count = 1;
+ txd = efx_tx_desc(tx_queue, 0);
+ EFX_POPULATE_QWORD_4(*txd,
+ ESF_DZ_TX_DESC_IS_OPT, true,
+ ESF_DZ_TX_OPTION_TYPE,
+ ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
+ ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
+ ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
+ tx_queue->write_count = 1;
+ wmb();
+ efx_ef10_push_tx_desc(tx_queue, txd);
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_TXQ_OUT_LEN);
+ struct efx_nic *efx = tx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
+ tx_queue->queue);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned int write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
+}
+
+static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
+{
+ unsigned int old_write_count = tx_queue->write_count;
+ struct efx_tx_buffer *buffer;
+ unsigned int write_ptr;
+ efx_qword_t *txd;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ /* Create TX descriptor ring entry */
+ if (buffer->flags & EFX_TX_BUF_OPTION) {
+ *txd = buffer->option;
+ } else {
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_3(
+ *txd,
+ ESF_DZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
+ ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ }
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_ef10_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_ef10_notify_tx_desc(tx_queue);
+ }
+}
+
+static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
+ EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE,
+ MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE);
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES,
+ EFX_MAX_CHANNELS);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
+ return -EIO;
+
+ *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
+
+ return 0;
+}
+
+static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
+ context);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ WARN_ON(rc != 0);
+}
+
+static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context)
+{
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
+ int i, rc;
+
+ MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
+ MCDI_PTR(tablebuf,
+ RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
+ (u8) efx->rx_indir_table[i];
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
+ sizeof(tablebuf), NULL, 0, NULL);
+ if (rc != 0)
+ return rc;
+
+ MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
+ context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
+ efx->rx_hash_key[i];
+
+ return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
+ sizeof(keybuf), NULL, 0, NULL);
+}
+
+static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
+ efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
+ nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
+}
+
+static void efx_ef10_rx_push_indir_table(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ netif_dbg(efx, drv, efx->net_dev, "pushing RX indirection table\n");
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID) {
+ rc = efx_ef10_alloc_rss_context(efx, &nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+ }
+
+ rc = efx_ef10_populate_rss_table(efx, nic_data->rx_rss_context);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
+ (rx_queue->ptr_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_RXQ_OUT_LEN);
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = rx_queue->efx;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+ MCDI_POPULATE_DWORD_1(inbuf, INIT_RXQ_IN_FLAGS,
+ INIT_RXQ_IN_FLAG_PREFIX, 1);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
+ MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ dma_addr = rx_queue->rxd.buf.dma_addr;
+
+ netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
+ efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
+
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_RXQ_OUT_LEN);
+ struct efx_nic *efx = rx_queue->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
+ efx_rx_queue_index(rx_queue));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_2(*rxd,
+ ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
+ ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned int write_count;
+ efx_dword_t reg;
+
+ /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
+ write_count = rx_queue->added_count & ~7;
+ if (rx_queue->notified_count == write_count)
+ return;
+
+ do
+ efx_ef10_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ while (++rx_queue->notified_count != write_count);
+
+ wmb();
+ EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
+ write_count & rx_queue->ptr_mask);
+ efx_writed_page(efx, &reg, ER_DZ_RX_DESC_UPD,
+ efx_rx_queue_index(rx_queue));
+}
+
+static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
+
+static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_REFILL);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
+ inbuf, sizeof(inbuf), 0,
+ efx_ef10_rx_defer_refill_complete, 0);
+}
+
+static void
+efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ /* nothing to do */
+}
+
+static int efx_ef10_ev_probe(struct efx_channel *channel)
+{
+ return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
+ (channel->eventq_mask + 1) *
+ sizeof(efx_qword_t),
+ GFP_KERNEL);
+}
+
+static int efx_ef10_ev_init(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
+ EFX_BUF_SIZE));
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
+ size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
+ struct efx_nic *efx = channel->efx;
+ struct efx_ef10_nic_data *nic_data;
+ bool supports_rx_merge;
+ size_t inlen, outlen;
+ dma_addr_t dma_addr;
+ int rc;
+ int i;
+
+ nic_data = efx->nic_data;
+ supports_rx_merge =
+ !!(nic_data->datapath_caps &
+ 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
+ /* INIT_EVQ expects index in vector table, not absolute */
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
+ MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
+ INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
+ INIT_EVQ_IN_FLAG_RX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_TX_MERGE, 1,
+ INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
+ MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
+ MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
+ MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
+
+ dma_addr = channel->eventq.buf.dma_addr;
+ for (i = 0; i < entries; ++i) {
+ MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
+ dma_addr += EFX_BUF_SIZE;
+ }
+
+ inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ /* IRQ return is ignored */
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static void efx_ef10_ev_fini(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FINI_EVQ_OUT_LEN);
+ struct efx_nic *efx = channel->efx;
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+
+ if (rc && rc != -EALREADY)
+ goto fail;
+
+ return;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+static void efx_ef10_ev_remove(struct efx_channel *channel)
+{
+ efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
+}
+
+static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
+ unsigned int rx_queue_label)
+{
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "rx event arrived on queue %d labeled as queue %u\n",
+ efx_rx_queue_index(rx_queue), rx_queue_label);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+static void
+efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
+ unsigned int actual, unsigned int expected)
+{
+ unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
+ struct efx_nic *efx = rx_queue->efx;
+
+ netif_info(efx, hw, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, actual, expected);
+
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+}
+
+/* partially received RX was aborted. clean up. */
+static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
+{
+ unsigned int rx_desc_ptr;
+
+ WARN_ON(rx_queue->scatter_n == 0);
+
+ netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
+ "scattered RX aborted (dropping %u buffers)\n",
+ rx_queue->scatter_n);
+
+ rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
+
+ efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
+ 0, EFX_RX_PKT_DISCARD);
+
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+ ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
+}
+
+static int efx_ef10_handle_rx_event(struct efx_channel *channel,
+ const efx_qword_t *event)
+{
+ unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
+ unsigned int n_descs, n_packets, i;
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue;
+ bool rx_cont;
+ u16 flags = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ /* Basic packet information */
+ rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
+ next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
+ rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
+ rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
+ rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
+
+ WARN_ON(EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT));
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
+ efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
+
+ n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+
+ if (n_descs != rx_queue->scatter_n + 1) {
+ /* detect rx abort */
+ if (unlikely(n_descs == rx_queue->scatter_n)) {
+ WARN_ON(rx_bytes != 0);
+ efx_ef10_handle_rx_abort(rx_queue);
+ return 0;
+ }
+
+ if (unlikely(rx_queue->scatter_n != 0)) {
+ /* Scattered packet completions cannot be
+ * merged, so something has gone wrong.
+ */
+ efx_ef10_handle_rx_bad_lbits(
+ rx_queue, next_ptr_lbits,
+ (rx_queue->removed_count +
+ rx_queue->scatter_n + 1) &
+ ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
+ return 0;
+ }
+
+ /* Merged completion for multiple non-scattered packets */
+ rx_queue->scatter_n = 1;
+ rx_queue->scatter_len = 0;
+ n_packets = n_descs;
+ ++channel->n_rx_merge_events;
+ channel->n_rx_merge_packets += n_packets;
+ flags |= EFX_RX_PKT_PREFIX_LEN;
+ } else {
+ ++rx_queue->scatter_n;
+ rx_queue->scatter_len += rx_bytes;
+ if (rx_cont)
+ return 0;
+ n_packets = 1;
+ }
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
+ flags |= EFX_RX_PKT_DISCARD;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
+ channel->n_rx_ip_hdr_chksum_err += n_packets;
+ } else if (unlikely(EFX_QWORD_FIELD(*event,
+ ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
+ channel->n_rx_tcp_udp_chksum_err += n_packets;
+ } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
+ rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
+ flags |= EFX_RX_PKT_CSUMMED;
+ }
+
+ if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
+ flags |= EFX_RX_PKT_TCP;
+
+ channel->irq_mod_score += 2 * n_packets;
+
+ /* Handle received packet(s) */
+ for (i = 0; i < n_packets; i++) {
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_queue->scatter_len,
+ flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ }
+
+ rx_queue->scatter_n = 0;
+ rx_queue->scatter_len = 0;
+
+ return n_packets;
+}
+
+static int
+efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_tx_queue *tx_queue;
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ int tx_descs = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
+ return 0;
+
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
+ tx_queue = efx_channel_get_tx_queue(channel,
+ tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
+
+ return tx_descs;
+}
+
+static void
+efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ int subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
+
+ switch (subcode) {
+ case ESE_DZ_DRV_TIMER_EV:
+ case ESE_DZ_DRV_WAKE_UP_EV:
+ break;
+ case ESE_DZ_DRV_START_UP_EV:
+ /* event queue init complete. ok. */
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, subcode,
+ EFX_QWORD_VAL(*event));
+
+ }
+}
+
+static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ u32 subcode;
+
+ subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
+
+ switch (subcode) {
+ case EFX_EF10_TEST:
+ channel->event_test_cpu = raw_smp_processor_id();
+ break;
+ case EFX_EF10_REFILL:
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here
+ */
+ efx_fast_push_rx_descriptors(&channel->rx_queue);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown driver event type %u"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, (unsigned) subcode,
+ EFX_QWORD_VAL(*event));
+ }
+}
+
+static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event, *p_event;
+ unsigned int read_ptr;
+ int ev_code;
+ int tx_descs = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ break;
+
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
+
+ netif_vdbg(efx, drv, efx->net_dev,
+ "processing event on %d " EFX_QWORD_FMT "\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ switch (ev_code) {
+ case ESE_DZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case ESE_DZ_EV_CODE_RX_EV:
+ spent += efx_ef10_handle_rx_event(channel, &event);
+ if (spent >= quota) {
+ /* XXX can we split a merged event to
+ * avoid going over-quota?
+ */
+ spent = quota;
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_TX_EV:
+ tx_descs += efx_ef10_handle_tx_event(channel, &event);
+ if (tx_descs > efx->txq_entries) {
+ spent = quota;
+ goto out;
+ } else if (++spent == quota) {
+ goto out;
+ }
+ break;
+ case ESE_DZ_EV_CODE_DRIVER_EV:
+ efx_ef10_handle_driver_event(channel, &event);
+ if (++spent == quota)
+ goto out;
+ break;
+ case EFX_EF10_DRVGEN_EV:
+ efx_ef10_handle_driver_generated_event(channel, &event);
+ break;
+ default:
+ netif_err(efx, hw, efx->net_dev,
+ "channel %d unknown event type %d"
+ " (data " EFX_QWORD_FMT ")\n",
+ channel->channel, ev_code,
+ EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+static void efx_ef10_ev_read_ack(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ efx_dword_t rptr;
+
+ if (EFX_EF10_WORKAROUND_35388(efx)) {
+ BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
+ (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
+ BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
+ (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
+
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
+ ERF_DD_EVQ_IND_RPTR,
+ (channel->eventq_read_ptr &
+ channel->eventq_mask) >>
+ ERF_DD_EVQ_IND_RPTR_WIDTH);
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
+ EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
+ ERF_DD_EVQ_IND_RPTR,
+ channel->eventq_read_ptr &
+ ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
+ efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
+ channel->channel);
+ } else {
+ EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
+ channel->eventq_read_ptr &
+ channel->eventq_mask);
+ efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
+ }
+}
+
+static void efx_ef10_ev_test_generate(struct efx_channel *channel)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
+ struct efx_nic *efx = channel->efx;
+ efx_qword_t event;
+ int rc;
+
+ EFX_POPULATE_QWORD_2(event,
+ ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
+ ESF_DZ_EV_DATA, EFX_EF10_TEST);
+
+ MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
+
+ /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
+ * already swapped the data to little-endian order.
+ */
+ memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
+ sizeof(efx_qword_t));
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc != 0)
+ goto fail;
+
+ return;
+
+fail:
+ WARN_ON(true);
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+}
+
+void efx_ef10_handle_drain_event(struct efx_nic *efx)
+{
+ if (atomic_dec_and_test(&efx->active_queues))
+ wake_up(&efx->flush_wq);
+
+ WARN_ON(atomic_read(&efx->active_queues) < 0);
+}
+
+static int efx_ef10_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int pending;
+
+ /* If the MC has just rebooted, the TX/RX queues will have already been
+ * torn down, but efx->active_queues needs to be set to zero.
+ */
+ if (nic_data->must_realloc_vis) {
+ atomic_set(&efx->active_queues, 0);
+ return 0;
+ }
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_ef10_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_ef10_tx_fini(tx_queue);
+ }
+
+ wait_event_timeout(efx->flush_wq,
+ atomic_read(&efx->active_queues) == 0,
+ msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
+ pending = atomic_read(&efx->active_queues);
+ if (pending) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
+ pending);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return 0;
+}
+
+static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
+ const struct efx_filter_spec *right)
+{
+ if ((left->match_flags ^ right->match_flags) |
+ ((left->flags ^ right->flags) &
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+ return memcmp(&left->outer_vid, &right->outer_vid,
+ sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) == 0;
+}
+
+static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
+{
+ BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+ return jhash2((const u32 *)&spec->outer_vid,
+ (sizeof(struct efx_filter_spec) -
+ offsetof(struct efx_filter_spec, outer_vid)) / 4,
+ 0);
+ /* XXX should we randomise the initval? */
+}
+
+/* Decide whether a filter should be exclusive or else should allow
+ * delivery to additional recipients. Currently we decide that
+ * filters for specific local unicast MAC and IP addresses are
+ * exclusive.
+ */
+static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
+ !is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ !ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] != 0xff)
+ return true;
+ }
+
+ return false;
+}
+
+static struct efx_filter_spec *
+efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
+ ~EFX_EF10_FILTER_FLAGS);
+}
+
+static unsigned int
+efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
+ unsigned int filter_idx)
+{
+ return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
+}
+
+static void
+efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
+ unsigned int filter_idx,
+ const struct efx_filter_spec *spec,
+ unsigned int flags)
+{
+ table->entry[filter_idx].spec = (unsigned long)spec | flags;
+}
+
+static void efx_ef10_filter_push_prep(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf, u64 handle,
+ bool replacing)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+
+ if (replacing) {
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REPLACE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
+ } else {
+ u32 match_fields = 0;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(spec->gen_field)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &spec->gen_field, sizeof(spec->gen_field)); \
+ }
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+ }
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
+ MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
+ MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
+ MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE, spec->dmaq_id);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
+ (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
+ MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
+ MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
+ if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
+ spec->rss_context !=
+ EFX_FILTER_RSS_CONTEXT_DEFAULT ?
+ spec->rss_context : nic_data->rx_rss_context);
+}
+
+static int efx_ef10_filter_push(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ u64 *handle, bool replacing)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ int rc;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc == 0)
+ *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ return rc;
+}
+
+static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++)
+ if (table->rx_match_flags[match_pri] == match_flags)
+ return match_pri;
+
+ return -EPROTONOSUPPORT;
+}
+
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+ struct efx_filter_spec *saved_spec;
+ unsigned int match_pri, hash;
+ unsigned int priv_flags;
+ bool replacing = false;
+ int ins_index = -1;
+ DEFINE_WAIT(wait);
+ bool is_mc_recip;
+ s32 rc;
+
+ /* For now, only support RX filters */
+ if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
+ EFX_FILTER_FLAG_RX)
+ return -EINVAL;
+
+ rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ if (rc < 0)
+ return rc;
+ match_pri = rc;
+
+ hash = efx_ef10_filter_hash(spec);
+ is_mc_recip = efx_filter_is_mc_recipient(spec);
+ if (is_mc_recip)
+ bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
+
+ /* Find any existing filters with the same match tuple or
+ * else a free slot to insert at. If any of them are busy,
+ * we have to wait and retry.
+ */
+ for (;;) {
+ unsigned int depth = 1;
+ unsigned int i;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec &
+ EFX_EF10_FILTER_FLAG_BUSY)
+ break;
+ if (spec->priority < saved_spec->priority &&
+ !(saved_spec->priority ==
+ EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out_unlock;
+ }
+ if (!is_mc_recip) {
+ /* This is the only one */
+ if (spec->priority ==
+ saved_spec->priority &&
+ !replace_equal) {
+ rc = -EEXIST;
+ goto out_unlock;
+ }
+ ins_index = i;
+ goto found;
+ } else if (spec->priority >
+ saved_spec->priority ||
+ (spec->priority ==
+ saved_spec->priority &&
+ replace_equal)) {
+ if (ins_index < 0)
+ ins_index = i;
+ else
+ __set_bit(depth, mc_rem_map);
+ }
+ }
+
+ /* Once we reach the maximum search depth, use
+ * the first suitable slot or return -EBUSY if
+ * there was none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+ goto found;
+ }
+
+ ++depth;
+ }
+
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+
+found:
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ table->entry[ins_index].spec &=
+ ~EFX_EF10_FILTER_FLAG_STACK_OLD;
+ rc = ins_index;
+ goto out_unlock;
+ }
+ replacing = true;
+ priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto out_unlock;
+ }
+ *saved_spec = *spec;
+ priv_flags = 0;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
+
+ /* Mark lower-priority multicast recipients busy prior to removal */
+ if (is_mc_recip) {
+ unsigned int depth, i;
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ if (test_bit(depth, mc_rem_map))
+ table->entry[i].spec |=
+ EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
+ replacing);
+
+ /* Finalise the software table entry */
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ if (replacing) {
+ /* Update the fields that may differ */
+ saved_spec->priority = spec->priority;
+ saved_spec->flags &= EFX_FILTER_FLAG_RX_STACK;
+ saved_spec->flags |= spec->flags;
+ saved_spec->rss_context = spec->rss_context;
+ saved_spec->dmaq_id = spec->dmaq_id;
+ }
+ } else if (!replacing) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
+
+ /* Remove and finalise entries for lower-priority multicast
+ * recipients
+ */
+ if (is_mc_recip) {
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ unsigned int depth, i;
+
+ memset(inbuf, 0, sizeof(inbuf));
+
+ for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
+ if (!test_bit(depth, mc_rem_map))
+ continue;
+
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+ priv_flags = efx_ef10_filter_entry_flags(table, i);
+
+ if (rc == 0) {
+ spin_unlock_bh(&efx->filter_lock);
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[i].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->filter_lock);
+ }
+
+ if (rc == 0) {
+ kfree(saved_spec);
+ saved_spec = NULL;
+ priv_flags = 0;
+ } else {
+ priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ efx_ef10_filter_set_entry(table, i, saved_spec,
+ priv_flags);
+ }
+ }
+
+ /* If successful, return the inserted filter ID */
+ if (rc == 0)
+ rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
+
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ /* no need to do anything here on EF10 */
+}
+
+/* Remove a filter.
+ * If !stack_requested, remove by ID
+ * If stack_requested, remove by index
+ * Filter ID may come from userland and must be range-checked.
+ */
+static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, bool stack_requested)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+ struct efx_filter_spec *spec;
+ DEFINE_WAIT(wait);
+ int rc;
+
+ /* Find the software table entry and mark it busy. Don't
+ * remove it yet; any attempt to update while we're waiting
+ * for the firmware must find the busy entry.
+ */
+ for (;;) {
+ spin_lock_bh(&efx->filter_lock);
+ if (!(table->entry[filter_idx].spec &
+ EFX_EF10_FILTER_FLAG_BUSY))
+ break;
+ prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&efx->filter_lock);
+ schedule();
+ }
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec || spec->priority > priority ||
+ (!stack_requested &&
+ efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ filter_id / HUNT_FILTER_TBL_ROWS)) {
+ rc = -ENOENT;
+ goto out_unlock;
+ }
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK && !stack_requested) {
+ /* Reset steering of a stack-owned filter */
+
+ struct efx_filter_spec new_spec = *spec;
+
+ new_spec.priority = EFX_FILTER_PRI_REQUIRED;
+ new_spec.flags = (EFX_FILTER_FLAG_RX |
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK);
+ new_spec.dmaq_id = 0;
+ new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
+ rc = efx_ef10_filter_push(efx, &new_spec,
+ &table->entry[filter_idx].handle,
+ true);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0)
+ *spec = new_spec;
+ } else {
+ /* Really remove the filter */
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+out_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ finish_wait(&table->waitq, &wait);
+ return rc;
+}
+
+static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx_ef10_filter_remove_internal(efx, priority, filter_id, false);
+}
+
+static int efx_ef10_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ const struct efx_filter_spec *saved_spec;
+ int rc;
+
+ spin_lock_bh(&efx->filter_lock);
+ saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (saved_spec && saved_spec->priority == priority &&
+ efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ filter_id / HUNT_FILTER_TBL_ROWS) {
+ *spec = *saved_spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void efx_ef10_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ /* TODO */
+}
+
+static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ if (table->entry[filter_idx].spec &&
+ efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
+ priority)
+ ++count;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+
+ return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
+}
+
+static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (spec && spec->priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ break;
+ }
+ buf[count++] = (efx_ef10_filter_rx_match_pri(
+ table, spec->match_flags) *
+ HUNT_FILTER_TBL_ROWS +
+ filter_idx);
+ }
+ }
+ spin_unlock_bh(&efx->filter_lock);
+ return count;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
+
+static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *saved_spec;
+ unsigned int hash, i, depth = 1;
+ bool replacing = false;
+ int ins_index = -1;
+ u64 cookie;
+ s32 rc;
+
+ /* Must be an RX filter without RSS and not for a multicast
+ * destination address (RFS only works for connected sockets).
+ * These restrictions allow us to pass only a tiny amount of
+ * data through to the completion function.
+ */
+ EFX_WARN_ON_PARANOID(spec->flags !=
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
+ EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
+ EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
+
+ hash = efx_ef10_filter_hash(spec);
+
+ spin_lock_bh(&efx->filter_lock);
+
+ /* Find any existing filter with the same match tuple or else
+ * a free slot to insert at. If an existing filter is busy,
+ * we have to give up.
+ */
+ for (;;) {
+ i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
+ saved_spec = efx_ef10_filter_entry_spec(table, i);
+
+ if (!saved_spec) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_ef10_filter_equal(spec, saved_spec)) {
+ if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ EFX_WARN_ON_PARANOID(saved_spec->flags &
+ EFX_FILTER_FLAG_RX_STACK);
+ if (spec->priority < saved_spec->priority) {
+ rc = -EPERM;
+ goto fail_unlock;
+ }
+ ins_index = i;
+ break;
+ }
+
+ /* Once we reach the maximum search depth, use the
+ * first suitable slot or return -EBUSY if there was
+ * none
+ */
+ if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto fail_unlock;
+ }
+ break;
+ }
+
+ ++depth;
+ }
+
+ /* Create a software table entry if necessary, and mark it
+ * busy. We might yet fail to insert, but any attempt to
+ * insert a conflicting filter while we're waiting for the
+ * firmware must find the busy entry.
+ */
+ saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (saved_spec) {
+ replacing = true;
+ } else {
+ saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
+ if (!saved_spec) {
+ rc = -ENOMEM;
+ goto fail_unlock;
+ }
+ *saved_spec = *spec;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, saved_spec,
+ EFX_EF10_FILTER_FLAG_BUSY);
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Pack up the variables needed on completion */
+ cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
+
+ efx_ef10_filter_push_prep(efx, spec, inbuf,
+ table->entry[ins_index].handle, replacing);
+ efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ MC_CMD_FILTER_OP_OUT_LEN,
+ efx_ef10_filter_rfs_insert_complete, cookie);
+
+ return ins_index;
+
+fail_unlock:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int ins_index, dmaq_id;
+ struct efx_filter_spec *spec;
+ bool replacing;
+
+ /* Unpack the cookie */
+ replacing = cookie >> 31;
+ ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
+ dmaq_id = cookie & 0xffff;
+
+ spin_lock_bh(&efx->filter_lock);
+ spec = efx_ef10_filter_entry_spec(table, ins_index);
+ if (rc == 0) {
+ table->entry[ins_index].handle =
+ MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
+ if (replacing)
+ spec->dmaq_id = dmaq_id;
+ } else if (!replacing) {
+ kfree(spec);
+ spec = NULL;
+ }
+ efx_ef10_filter_set_entry(table, ins_index, spec, 0);
+ spin_unlock_bh(&efx->filter_lock);
+
+ wake_up_all(&table->waitq);
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual);
+
+static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int filter_idx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FILTER_OP_IN_HANDLE_OFST +
+ MC_CMD_FILTER_OP_IN_HANDLE_LEN);
+
+ if (!spec ||
+ (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
+ spec->priority != EFX_FILTER_PRI_HINT ||
+ !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
+ flow_id, filter_idx))
+ return false;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ MC_CMD_FILTER_OP_IN_OP_REMOVE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
+ efx_ef10_filter_rfs_expire_complete, filter_idx))
+ return false;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ return true;
+}
+
+static void
+efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
+ unsigned long filter_idx,
+ int rc, efx_dword_t *outbuf,
+ size_t outlen_actual)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_filter_spec *spec =
+ efx_ef10_filter_entry_spec(table, filter_idx);
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc == 0) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ }
+ table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
+ wake_up_all(&table->waitq);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+{
+ int match_flags = 0;
+
+#define MAP_FLAG(gen_flag, mcdi_field) { \
+ u32 old_mcdi_flags = mcdi_flags; \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ if (mcdi_flags != old_mcdi_flags) \
+ match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ }
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FLAG
+
+ /* Did we map them all? */
+ if (mcdi_flags)
+ return -EINVAL;
+
+ return match_flags;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ unsigned int pd_match_pri, pd_match_count;
+ struct efx_ef10_filter_table *table;
+ size_t outlen;
+ int rc;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ /* Find out which RX filter types are supported, and their priorities */
+ MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
+ inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ pd_match_count = MCDI_VAR_ARRAY_LEN(
+ outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
+ table->rx_match_count = 0;
+
+ for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
+ u32 mcdi_flags =
+ MCDI_ARRAY_DWORD(
+ outbuf,
+ GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
+ pd_match_pri);
+ rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ if (rc < 0) {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u not supported in driver\n",
+ __func__, mcdi_flags, pd_match_pri);
+ } else {
+ netif_dbg(efx, probe, efx->net_dev,
+ "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
+ __func__, mcdi_flags, pd_match_pri,
+ rc, table->rx_match_count);
+ table->rx_match_flags[table->rx_match_count++] = rc;
+ }
+ }
+
+ table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
+ if (!table->entry) {
+ rc = -ENOMEM;
+ goto fail;
+ }
+
+ efx->filter_state = table;
+ init_waitqueue_head(&table->waitq);
+ return 0;
+
+fail:
+ kfree(table);
+ return rc;
+}
+
+static void efx_ef10_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ bool failed = false;
+ int rc;
+
+ if (!nic_data->must_restore_filters)
+ return;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
+ spin_unlock_bh(&efx->filter_lock);
+
+ rc = efx_ef10_filter_push(efx, spec,
+ &table->entry[filter_idx].handle,
+ false);
+ if (rc)
+ failed = true;
+
+ spin_lock_bh(&efx->filter_lock);
+ if (rc) {
+ kfree(spec);
+ efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
+ } else {
+ table->entry[filter_idx].spec &=
+ ~EFX_EF10_FILTER_FLAG_BUSY;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ if (failed)
+ netif_err(efx, hw, efx->net_dev,
+ "unable to restore all filters\n");
+ else
+ nic_data->must_restore_filters = false;
+}
+
+static void efx_ef10_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
+ spec = efx_ef10_filter_entry_spec(table, filter_idx);
+ if (!spec)
+ continue;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_REMOVE :
+ MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
+ MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
+ table->entry[filter_idx].handle);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+
+ WARN_ON(rc != 0);
+ kfree(spec);
+ }
+
+ vfree(table->entry);
+ kfree(table);
+}
+
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_filter_spec spec;
+ bool remove_failed = false;
+ struct netdev_hw_addr *uc;
+ struct netdev_hw_addr *mc;
+ unsigned int filter_idx;
+ int i, n, rc;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ /* Mark old filters that may need to be removed */
+ spin_lock_bh(&efx->filter_lock);
+ n = table->stack_uc_count < 0 ? 1 : table->stack_uc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_uc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ n = table->stack_mc_count < 0 ? 1 : table->stack_mc_count;
+ for (i = 0; i < n; i++) {
+ filter_idx = table->stack_mc_list[i].id % HUNT_FILTER_TBL_ROWS;
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_STACK_OLD;
+ }
+ spin_unlock_bh(&efx->filter_lock);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ if (net_dev->flags & IFF_PROMISC ||
+ netdev_uc_count(net_dev) >= EFX_EF10_FILTER_STACK_UC_MAX) {
+ table->stack_uc_count = -1;
+ } else {
+ table->stack_uc_count = 1 + netdev_uc_count(net_dev);
+ memcpy(table->stack_uc_list[0].addr, net_dev->dev_addr,
+ ETH_ALEN);
+ i = 1;
+ netdev_for_each_uc_addr(uc, net_dev) {
+ memcpy(table->stack_uc_list[i].addr,
+ uc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI) ||
+ netdev_mc_count(net_dev) >= EFX_EF10_FILTER_STACK_MC_MAX) {
+ table->stack_mc_count = -1;
+ } else {
+ table->stack_mc_count = 1 + netdev_mc_count(net_dev);
+ eth_broadcast_addr(table->stack_mc_list[0].addr);
+ i = 1;
+ netdev_for_each_mc_addr(mc, net_dev) {
+ memcpy(table->stack_mc_list[i].addr,
+ mc->addr, ETH_ALEN);
+ i++;
+ }
+ }
+ netif_addr_unlock_bh(net_dev);
+
+ /* Insert/renew unicast filters */
+ if (table->stack_uc_count >= 0) {
+ for (i = 0; i < table->stack_uc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_uc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to unicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_uc_list[i].id);
+ table->stack_uc_count = -1;
+ break;
+ }
+ table->stack_uc_list[i].id = rc;
+ }
+ }
+ if (table->stack_uc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_uc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_uc_count = 0;
+ } else {
+ table->stack_uc_list[0].id = rc;
+ }
+ }
+
+ /* Insert/renew multicast filters */
+ if (table->stack_mc_count >= 0) {
+ for (i = 0; i < table->stack_mc_count; i++) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
+ table->stack_mc_list[i].addr);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ /* Fall back to multicast-promisc */
+ while (i--)
+ efx_ef10_filter_remove_safe(
+ efx, EFX_FILTER_PRI_REQUIRED,
+ table->stack_mc_list[i].id);
+ table->stack_mc_count = -1;
+ break;
+ }
+ table->stack_mc_list[i].id = rc;
+ }
+ }
+ if (table->stack_mc_count < 0) {
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_REQUIRED,
+ EFX_FILTER_FLAG_RX_RSS |
+ EFX_FILTER_FLAG_RX_STACK,
+ 0);
+ efx_filter_set_mc_def(&spec);
+ rc = efx_ef10_filter_insert(efx, &spec, true);
+ if (rc < 0) {
+ WARN_ON(1);
+ table->stack_mc_count = 0;
+ } else {
+ table->stack_mc_list[0].id = rc;
+ }
+ }
+
+ /* Remove filters that weren't renewed. Since nothing else
+ * changes the STACK_OLD flag or removes these filters, we
+ * don't need to hold the filter_lock while scanning for
+ * these filters.
+ */
+ for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
+ if (ACCESS_ONCE(table->entry[i].spec) &
+ EFX_EF10_FILTER_FLAG_STACK_OLD) {
+ if (efx_ef10_filter_remove_internal(efx,
+ EFX_FILTER_PRI_REQUIRED,
+ i, true) < 0)
+ remove_failed = true;
+ }
+ }
+ WARN_ON(remove_failed);
+}
+
+static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
+{
+ efx_ef10_filter_sync_rx_mode(efx);
+
+ return efx_mcdi_set_mac(efx);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+struct efx_ef10_nvram_type_info {
+ u16 type, type_mask;
+ u8 port;
+ const char *name;
+};
+
+static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
+ { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
+ { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
+ { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
+ { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
+ { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
+};
+
+static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
+ const struct efx_ef10_nvram_type_info *info;
+ size_t size, erase_size, outlen;
+ bool protected;
+ int rc;
+
+ for (info = efx_ef10_nvram_types; ; info++) {
+ if (info ==
+ efx_ef10_nvram_types + ARRAY_SIZE(efx_ef10_nvram_types))
+ return -ENODEV;
+ if ((type & ~info->type_mask) == info->type)
+ break;
+ }
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
+ return -EIO;
+ if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
+ (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
+ part->fw_subtype = MCDI_DWORD(outbuf,
+ NVRAM_METADATA_OUT_SUBTYPE);
+
+ part->common.dev_type_name = "EF10 NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int efx_ef10_mtd_probe(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
+ struct efx_mcdi_mtd_partition *parts;
+ size_t outlen, n_parts_total, i, n_parts;
+ unsigned int type;
+ int rc;
+
+ ASSERT_RTNL();
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
+ return -EIO;
+
+ n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
+ if (n_parts_total >
+ MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
+ return -EIO;
+
+ parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ n_parts = 0;
+ for (i = 0; i < n_parts_total; i++) {
+ type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
+ i);
+ rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
+}
+
+const struct efx_nic_type efx_hunt_a0_nic_type = {
+ .mem_map_size = efx_ef10_mem_map_size,
+ .probe = efx_ef10_probe,
+ .remove = efx_ef10_remove,
+ .dimension_resources = efx_ef10_dimension_resources,
+ .init = efx_ef10_init_nic,
+ .fini = efx_port_dummy_op_void,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
+ .map_reset_flags = efx_ef10_map_reset_flags,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_ef10_fini_dmaq,
+ .describe_stats = efx_ef10_describe_stats,
+ .update_stats = efx_ef10_update_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
+ .set_id_led = efx_mcdi_set_id_led,
+ .push_irq_moderation = efx_ef10_push_irq_moderation,
+ .reconfigure_mac = efx_ef10_mac_reconfigure,
+ .check_mac_fault = efx_mcdi_mac_check_fault,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
+ .get_wol = efx_ef10_get_wol,
+ .set_wol = efx_ef10_set_wol,
+ .resume_wol = efx_port_dummy_op_void,
+ /* TODO: test_chip */
+ .test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = efx_ef10_mcdi_request,
+ .mcdi_poll_response = efx_ef10_mcdi_poll_response,
+ .mcdi_read_response = efx_ef10_mcdi_read_response,
+ .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
+ .irq_enable_master = efx_port_dummy_op_void,
+ .irq_test_generate = efx_ef10_irq_test_generate,
+ .irq_disable_non_ev = efx_port_dummy_op_void,
+ .irq_handle_msi = efx_ef10_msi_interrupt,
+ .irq_handle_legacy = efx_ef10_legacy_interrupt,
+ .tx_probe = efx_ef10_tx_probe,
+ .tx_init = efx_ef10_tx_init,
+ .tx_remove = efx_ef10_tx_remove,
+ .tx_write = efx_ef10_tx_write,
+ .rx_push_indir_table = efx_ef10_rx_push_indir_table,
+ .rx_probe = efx_ef10_rx_probe,
+ .rx_init = efx_ef10_rx_init,
+ .rx_remove = efx_ef10_rx_remove,
+ .rx_write = efx_ef10_rx_write,
+ .rx_defer_refill = efx_ef10_rx_defer_refill,
+ .ev_probe = efx_ef10_ev_probe,
+ .ev_init = efx_ef10_ev_init,
+ .ev_fini = efx_ef10_ev_fini,
+ .ev_remove = efx_ef10_ev_remove,
+ .ev_process = efx_ef10_ev_process,
+ .ev_read_ack = efx_ef10_ev_read_ack,
+ .ev_test_generate = efx_ef10_ev_test_generate,
+ .filter_table_probe = efx_ef10_filter_table_probe,
+ .filter_table_restore = efx_ef10_filter_table_restore,
+ .filter_table_remove = efx_ef10_filter_table_remove,
+ .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
+ .filter_insert = efx_ef10_filter_insert,
+ .filter_remove_safe = efx_ef10_filter_remove_safe,
+ .filter_get_safe = efx_ef10_filter_get_safe,
+ .filter_clear_rx = efx_ef10_filter_clear_rx,
+ .filter_count_rx_used = efx_ef10_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_ef10_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = efx_ef10_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = efx_ef10_ptp_write_host_time,
+
+ .revision = EFX_REV_HUNT_A0,
+ .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
+ .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
+ .can_rx_scatter = true,
+ .always_rx_scatter = true,
+ .max_interrupt_mode = EFX_INT_MODE_MSIX,
+ .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
+ .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 2,
+ .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
+};
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
new file mode 100644
index 00000000000..b3f4e3755fd
--- /dev/null
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -0,0 +1,415 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2012-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EF10_REGS_H
+#define EFX_EF10_REGS_H
+
+/* EF10 hardware architecture definitions have a name prefix following
+ * the format:
+ *
+ * E<type>_<min-rev><max-rev>_
+ *
+ * The following <type> strings are used:
+ *
+ * MMIO register Host memory structure
+ * -------------------------------------------------------------
+ * Address R
+ * Bitfield RF SF
+ * Enumerator FE SE
+ *
+ * <min-rev> is the first revision to which the definition applies:
+ *
+ * D: Huntington A0
+ *
+ * If the definition has been changed or removed in later revisions
+ * then <max-rev> is the last revision to which the definition applies;
+ * otherwise it is "Z".
+ */
+
+/**************************************************************************
+ *
+ * EF10 registers and descriptors
+ *
+ **************************************************************************
+ */
+
+/* BIU_HW_REV_ID_REG: */
+#define ER_DZ_BIU_HW_REV_ID 0x00000000
+#define ERF_DZ_HW_REV_ID_LBN 0
+#define ERF_DZ_HW_REV_ID_WIDTH 32
+
+/* BIU_MC_SFT_STATUS_REG: */
+#define ER_DZ_BIU_MC_SFT_STATUS 0x00000010
+#define ER_DZ_BIU_MC_SFT_STATUS_STEP 4
+#define ER_DZ_BIU_MC_SFT_STATUS_ROWS 8
+#define ERF_DZ_MC_SFT_STATUS_LBN 0
+#define ERF_DZ_MC_SFT_STATUS_WIDTH 32
+
+/* BIU_INT_ISR_REG: */
+#define ER_DZ_BIU_INT_ISR 0x00000090
+#define ERF_DZ_ISR_REG_LBN 0
+#define ERF_DZ_ISR_REG_WIDTH 32
+
+/* MC_DB_LWRD_REG: */
+#define ER_DZ_MC_DB_LWRD 0x00000200
+#define ERF_DZ_MC_DOORBELL_L_LBN 0
+#define ERF_DZ_MC_DOORBELL_L_WIDTH 32
+
+/* MC_DB_HWRD_REG: */
+#define ER_DZ_MC_DB_HWRD 0x00000204
+#define ERF_DZ_MC_DOORBELL_H_LBN 0
+#define ERF_DZ_MC_DOORBELL_H_WIDTH 32
+
+/* EVQ_RPTR_REG: */
+#define ER_DZ_EVQ_RPTR 0x00000400
+#define ER_DZ_EVQ_RPTR_STEP 8192
+#define ER_DZ_EVQ_RPTR_ROWS 2048
+#define ERF_DZ_EVQ_RPTR_VLD_LBN 15
+#define ERF_DZ_EVQ_RPTR_VLD_WIDTH 1
+#define ERF_DZ_EVQ_RPTR_LBN 0
+#define ERF_DZ_EVQ_RPTR_WIDTH 15
+
+/* EVQ_TMR_REG: */
+#define ER_DZ_EVQ_TMR 0x00000420
+#define ER_DZ_EVQ_TMR_STEP 8192
+#define ER_DZ_EVQ_TMR_ROWS 2048
+#define ERF_DZ_TC_TIMER_MODE_LBN 14
+#define ERF_DZ_TC_TIMER_MODE_WIDTH 2
+#define ERF_DZ_TC_TIMER_VAL_LBN 0
+#define ERF_DZ_TC_TIMER_VAL_WIDTH 14
+
+/* RX_DESC_UPD_REG: */
+#define ER_DZ_RX_DESC_UPD 0x00000830
+#define ER_DZ_RX_DESC_UPD_STEP 8192
+#define ER_DZ_RX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RX_DESC_WPTR_LBN 0
+#define ERF_DZ_RX_DESC_WPTR_WIDTH 12
+
+/* TX_DESC_UPD_REG: */
+#define ER_DZ_TX_DESC_UPD 0x00000a10
+#define ER_DZ_TX_DESC_UPD_STEP 8192
+#define ER_DZ_TX_DESC_UPD_ROWS 2048
+#define ERF_DZ_RSVD_LBN 76
+#define ERF_DZ_RSVD_WIDTH 20
+#define ERF_DZ_TX_DESC_WPTR_LBN 64
+#define ERF_DZ_TX_DESC_WPTR_WIDTH 12
+#define ERF_DZ_TX_DESC_HWORD_LBN 32
+#define ERF_DZ_TX_DESC_HWORD_WIDTH 32
+#define ERF_DZ_TX_DESC_LWORD_LBN 0
+#define ERF_DZ_TX_DESC_LWORD_WIDTH 32
+
+/* DRIVER_EV */
+#define ESF_DZ_DRV_CODE_LBN 60
+#define ESF_DZ_DRV_CODE_WIDTH 4
+#define ESF_DZ_DRV_SUB_CODE_LBN 56
+#define ESF_DZ_DRV_SUB_CODE_WIDTH 4
+#define ESE_DZ_DRV_TIMER_EV 3
+#define ESE_DZ_DRV_START_UP_EV 2
+#define ESE_DZ_DRV_WAKE_UP_EV 1
+#define ESF_DZ_DRV_SUB_DATA_LBN 0
+#define ESF_DZ_DRV_SUB_DATA_WIDTH 56
+#define ESF_DZ_DRV_EVQ_ID_LBN 0
+#define ESF_DZ_DRV_EVQ_ID_WIDTH 14
+#define ESF_DZ_DRV_TMR_ID_LBN 0
+#define ESF_DZ_DRV_TMR_ID_WIDTH 14
+
+/* EVENT_ENTRY */
+#define ESF_DZ_EV_CODE_LBN 60
+#define ESF_DZ_EV_CODE_WIDTH 4
+#define ESE_DZ_EV_CODE_MCDI_EV 12
+#define ESE_DZ_EV_CODE_DRIVER_EV 5
+#define ESE_DZ_EV_CODE_TX_EV 2
+#define ESE_DZ_EV_CODE_RX_EV 0
+#define ESE_DZ_OTHER other
+#define ESF_DZ_EV_DATA_LBN 0
+#define ESF_DZ_EV_DATA_WIDTH 60
+
+/* MC_EVENT */
+#define ESF_DZ_MC_CODE_LBN 60
+#define ESF_DZ_MC_CODE_WIDTH 4
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_MC_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_MC_DROP_EVENT_LBN 58
+#define ESF_DZ_MC_DROP_EVENT_WIDTH 1
+#define ESF_DZ_MC_SOFT_LBN 0
+#define ESF_DZ_MC_SOFT_WIDTH 58
+
+/* RX_EVENT */
+#define ESF_DZ_RX_CODE_LBN 60
+#define ESF_DZ_RX_CODE_WIDTH 4
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_RX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_RX_DROP_EVENT_LBN 58
+#define ESF_DZ_RX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_RX_EV_RSVD2_LBN 54
+#define ESF_DZ_RX_EV_RSVD2_WIDTH 4
+#define ESF_DZ_RX_EV_SOFT2_LBN 52
+#define ESF_DZ_RX_EV_SOFT2_WIDTH 2
+#define ESF_DZ_RX_DSC_PTR_LBITS_LBN 48
+#define ESF_DZ_RX_DSC_PTR_LBITS_WIDTH 4
+#define ESF_DZ_RX_L4_CLASS_LBN 45
+#define ESF_DZ_RX_L4_CLASS_WIDTH 3
+#define ESE_DZ_L4_CLASS_RSVD7 7
+#define ESE_DZ_L4_CLASS_RSVD6 6
+#define ESE_DZ_L4_CLASS_RSVD5 5
+#define ESE_DZ_L4_CLASS_RSVD4 4
+#define ESE_DZ_L4_CLASS_RSVD3 3
+#define ESE_DZ_L4_CLASS_UDP 2
+#define ESE_DZ_L4_CLASS_TCP 1
+#define ESE_DZ_L4_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_L3_CLASS_LBN 42
+#define ESF_DZ_RX_L3_CLASS_WIDTH 3
+#define ESE_DZ_L3_CLASS_RSVD7 7
+#define ESE_DZ_L3_CLASS_IP6_FRAG 6
+#define ESE_DZ_L3_CLASS_ARP 5
+#define ESE_DZ_L3_CLASS_IP4_FRAG 4
+#define ESE_DZ_L3_CLASS_FCOE 3
+#define ESE_DZ_L3_CLASS_IP6 2
+#define ESE_DZ_L3_CLASS_IP4 1
+#define ESE_DZ_L3_CLASS_UNKNOWN 0
+#define ESF_DZ_RX_ETH_TAG_CLASS_LBN 39
+#define ESF_DZ_RX_ETH_TAG_CLASS_WIDTH 3
+#define ESE_DZ_ETH_TAG_CLASS_RSVD7 7
+#define ESE_DZ_ETH_TAG_CLASS_RSVD6 6
+#define ESE_DZ_ETH_TAG_CLASS_RSVD5 5
+#define ESE_DZ_ETH_TAG_CLASS_RSVD4 4
+#define ESE_DZ_ETH_TAG_CLASS_RSVD3 3
+#define ESE_DZ_ETH_TAG_CLASS_VLAN2 2
+#define ESE_DZ_ETH_TAG_CLASS_VLAN1 1
+#define ESE_DZ_ETH_TAG_CLASS_NONE 0
+#define ESF_DZ_RX_ETH_BASE_CLASS_LBN 36
+#define ESF_DZ_RX_ETH_BASE_CLASS_WIDTH 3
+#define ESE_DZ_ETH_BASE_CLASS_LLC_SNAP 2
+#define ESE_DZ_ETH_BASE_CLASS_LLC 1
+#define ESE_DZ_ETH_BASE_CLASS_ETH2 0
+#define ESF_DZ_RX_MAC_CLASS_LBN 35
+#define ESF_DZ_RX_MAC_CLASS_WIDTH 1
+#define ESE_DZ_MAC_CLASS_MCAST 1
+#define ESE_DZ_MAC_CLASS_UCAST 0
+#define ESF_DZ_RX_EV_SOFT1_LBN 32
+#define ESF_DZ_RX_EV_SOFT1_WIDTH 3
+#define ESF_DZ_RX_EV_RSVD1_LBN 31
+#define ESF_DZ_RX_EV_RSVD1_WIDTH 1
+#define ESF_DZ_RX_ABORT_LBN 30
+#define ESF_DZ_RX_ABORT_WIDTH 1
+#define ESF_DZ_RX_ECC_ERR_LBN 29
+#define ESF_DZ_RX_ECC_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC1_ERR_LBN 28
+#define ESF_DZ_RX_CRC1_ERR_WIDTH 1
+#define ESF_DZ_RX_CRC0_ERR_LBN 27
+#define ESF_DZ_RX_CRC0_ERR_WIDTH 1
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_LBN 26
+#define ESF_DZ_RX_TCPUDP_CKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_IPCKSUM_ERR_LBN 25
+#define ESF_DZ_RX_IPCKSUM_ERR_WIDTH 1
+#define ESF_DZ_RX_ECRC_ERR_LBN 24
+#define ESF_DZ_RX_ECRC_ERR_WIDTH 1
+#define ESF_DZ_RX_QLABEL_LBN 16
+#define ESF_DZ_RX_QLABEL_WIDTH 5
+#define ESF_DZ_RX_PARSE_INCOMPLETE_LBN 15
+#define ESF_DZ_RX_PARSE_INCOMPLETE_WIDTH 1
+#define ESF_DZ_RX_CONT_LBN 14
+#define ESF_DZ_RX_CONT_WIDTH 1
+#define ESF_DZ_RX_BYTES_LBN 0
+#define ESF_DZ_RX_BYTES_WIDTH 14
+
+/* RX_KER_DESC */
+#define ESF_DZ_RX_KER_RESERVED_LBN 62
+#define ESF_DZ_RX_KER_RESERVED_WIDTH 2
+#define ESF_DZ_RX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
+
+/* RX_USER_DESC */
+#define ESF_DZ_RX_USR_RESERVED_LBN 62
+#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
+#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+
+/* TX_CSUM_TSTAMP_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TIMESTAMP_LBN 5
+#define ESF_DZ_TX_TIMESTAMP_WIDTH 1
+#define ESF_DZ_TX_OPTION_CRC_MODE_LBN 2
+#define ESF_DZ_TX_OPTION_CRC_MODE_WIDTH 3
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_MPA 5
+#define ESE_DZ_TX_OPTION_CRC_FCOIP_FCOE 4
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR_AND_PYLD 3
+#define ESE_DZ_TX_OPTION_CRC_ISCSI_HDR 2
+#define ESE_DZ_TX_OPTION_CRC_FCOE 1
+#define ESE_DZ_TX_OPTION_CRC_OFF 0
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_LBN 1
+#define ESF_DZ_TX_OPTION_UDP_TCP_CSUM_WIDTH 1
+#define ESF_DZ_TX_OPTION_IP_CSUM_LBN 0
+#define ESF_DZ_TX_OPTION_IP_CSUM_WIDTH 1
+
+/* TX_EVENT */
+#define ESF_DZ_TX_CODE_LBN 60
+#define ESF_DZ_TX_CODE_WIDTH 4
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_LBN 59
+#define ESF_DZ_TX_OVERRIDE_HOLDOFF_WIDTH 1
+#define ESF_DZ_TX_DROP_EVENT_LBN 58
+#define ESF_DZ_TX_DROP_EVENT_WIDTH 1
+#define ESF_DZ_TX_EV_RSVD_LBN 48
+#define ESF_DZ_TX_EV_RSVD_WIDTH 10
+#define ESF_DZ_TX_SOFT2_LBN 32
+#define ESF_DZ_TX_SOFT2_WIDTH 16
+#define ESF_DZ_TX_CAN_MERGE_LBN 31
+#define ESF_DZ_TX_CAN_MERGE_WIDTH 1
+#define ESF_DZ_TX_SOFT1_LBN 24
+#define ESF_DZ_TX_SOFT1_WIDTH 7
+#define ESF_DZ_TX_QLABEL_LBN 16
+#define ESF_DZ_TX_QLABEL_WIDTH 5
+#define ESF_DZ_TX_DESCR_INDX_LBN 0
+#define ESF_DZ_TX_DESCR_INDX_WIDTH 16
+
+/* TX_KER_DESC */
+#define ESF_DZ_TX_KER_TYPE_LBN 63
+#define ESF_DZ_TX_KER_TYPE_WIDTH 1
+#define ESF_DZ_TX_KER_CONT_LBN 62
+#define ESF_DZ_TX_KER_CONT_WIDTH 1
+#define ESF_DZ_TX_KER_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_KER_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_KER_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_KER_BUF_ADDR_WIDTH 48
+
+/* TX_PIO_DESC */
+#define ESF_DZ_TX_PIO_TYPE_LBN 63
+#define ESF_DZ_TX_PIO_TYPE_WIDTH 1
+#define ESF_DZ_TX_PIO_OPT_LBN 60
+#define ESF_DZ_TX_PIO_OPT_WIDTH 3
+#define ESF_DZ_TX_PIO_CONT_LBN 59
+#define ESF_DZ_TX_PIO_CONT_WIDTH 1
+#define ESF_DZ_TX_PIO_BYTE_CNT_LBN 32
+#define ESF_DZ_TX_PIO_BYTE_CNT_WIDTH 12
+#define ESF_DZ_TX_PIO_BUF_ADDR_LBN 0
+#define ESF_DZ_TX_PIO_BUF_ADDR_WIDTH 12
+
+/* TX_TSO_DESC */
+#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
+#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
+#define ESF_DZ_TX_OPTION_TYPE_LBN 60
+#define ESF_DZ_TX_OPTION_TYPE_WIDTH 3
+#define ESE_DZ_TX_OPTION_DESC_TSO 7
+#define ESE_DZ_TX_OPTION_DESC_VLAN 6
+#define ESE_DZ_TX_OPTION_DESC_CRC_CSUM 0
+#define ESF_DZ_TX_TSO_TCP_FLAGS_LBN 48
+#define ESF_DZ_TX_TSO_TCP_FLAGS_WIDTH 8
+#define ESF_DZ_TX_TSO_IP_ID_LBN 32
+#define ESF_DZ_TX_TSO_IP_ID_WIDTH 16
+#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
+#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
+
+/* TX_USER_DESC */
+#define ESF_DZ_TX_USR_TYPE_LBN 63
+#define ESF_DZ_TX_USR_TYPE_WIDTH 1
+#define ESF_DZ_TX_USR_CONT_LBN 62
+#define ESF_DZ_TX_USR_CONT_WIDTH 1
+#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
+#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
+#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
+#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
+#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
+#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
+#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
+#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
+#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
+#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
+#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
+/*************************************************************************/
+
+/* TX_DESC_UPD_REG: Transmit descriptor update register.
+ * We may write just one dword of these registers.
+ */
+#define ER_DZ_TX_DESC_UPD_DWORD (ER_DZ_TX_DESC_UPD + 2 * 4)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_LBN (ERF_DZ_TX_DESC_WPTR_LBN - 2 * 32)
+#define ERF_DZ_TX_DESC_WPTR_DWORD_WIDTH ERF_DZ_TX_DESC_WPTR_WIDTH
+
+/* The workaround for bug 35388 requires multiplexing writes through
+ * the TX_DESC_UPD_DWORD address.
+ * TX_DESC_UPD: 0ppppppppppp (bit 11 lost)
+ * EVQ_RPTR: 1000hhhhhhhh, 1001llllllll (split into high and low bits)
+ * EVQ_TMR: 11mmvvvvvvvv (bits 8:13 of value lost)
+ */
+#define ER_DD_EVQ_INDIRECT ER_DZ_TX_DESC_UPD_DWORD
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_LBN 8
+#define ERF_DD_EVQ_IND_RPTR_FLAGS_WIDTH 4
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH 8
+#define EFE_DD_EVQ_IND_RPTR_FLAGS_LOW 9
+#define ERF_DD_EVQ_IND_RPTR_LBN 0
+#define ERF_DD_EVQ_IND_RPTR_WIDTH 8
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_LBN 10
+#define ERF_DD_EVQ_IND_TIMER_FLAGS_WIDTH 2
+#define EFE_DD_EVQ_IND_TIMER_FLAGS 3
+#define ERF_DD_EVQ_IND_TIMER_MODE_LBN 8
+#define ERF_DD_EVQ_IND_TIMER_MODE_WIDTH 2
+#define ERF_DD_EVQ_IND_TIMER_VAL_LBN 0
+#define ERF_DD_EVQ_IND_TIMER_VAL_WIDTH 8
+
+/* TX_PIOBUF
+ * PIO buffer aperture (paged)
+ */
+#define ER_DZ_TX_PIOBUF 4096
+#define ER_DZ_TX_PIOBUF_SIZE 2048
+
+/* RX packet prefix */
+#define ES_DZ_RX_PREFIX_HASH_OFST 0
+#define ES_DZ_RX_PREFIX_VLAN1_OFST 4
+#define ES_DZ_RX_PREFIX_VLAN2_OFST 6
+#define ES_DZ_RX_PREFIX_PKTLEN_OFST 8
+#define ES_DZ_RX_PREFIX_TSTAMP_OFST 10
+#define ES_DZ_RX_PREFIX_SIZE 14
+
+#endif /* EFX_EF10_REGS_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index c72968840f1..07c9bc4c61b 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -17,7 +17,6 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/in.h>
-#include <linux/crc32.h>
#include <linux/ethtool.h>
#include <linux/topology.h>
#include <linux/gfp.h>
@@ -81,8 +80,7 @@ const char *const efx_reset_type_names[] = {
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
- [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
- [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
+ [RESET_TYPE_DMA_ERROR] = "DMA_ERROR",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
};
@@ -191,8 +189,8 @@ MODULE_PARM_DESC(debug, "Bitmapped debugging message enable value");
*
*************************************************************************/
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq);
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq);
+static int efx_soft_enable_interrupts(struct efx_nic *efx);
+static void efx_soft_disable_interrupts(struct efx_nic *efx);
static void efx_remove_channel(struct efx_channel *channel);
static void efx_remove_channels(struct efx_nic *efx);
static const struct efx_channel_type efx_default_channel_type;
@@ -248,30 +246,12 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
efx_channel_get_rx_queue(channel);
efx_rx_flush_packet(channel);
- if (rx_queue->enabled)
- efx_fast_push_rx_descriptors(rx_queue);
+ efx_fast_push_rx_descriptors(rx_queue);
}
return spent;
}
-/* Mark channel as finished processing
- *
- * Note that since we will not receive further interrupts for this
- * channel before we finish processing and call the eventq_read_ack()
- * method, there is no need to use the interrupt hold-off timers.
- */
-static inline void efx_channel_processed(struct efx_channel *channel)
-{
- /* The interrupt handler for this channel may set work_pending
- * as soon as we acknowledge the events we've seen. Make sure
- * it's cleared before then. */
- channel->work_pending = false;
- smp_wmb();
-
- efx_nic_eventq_read_ack(channel);
-}
-
/* NAPI poll handler
*
* NAPI guarantees serialisation of polls of the same device, which
@@ -316,58 +296,16 @@ static int efx_poll(struct napi_struct *napi, int budget)
/* There is no race here; although napi_disable() will
* only wait for napi_complete(), this isn't a problem
- * since efx_channel_processed() will have no effect if
+ * since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
napi_complete(napi);
- efx_channel_processed(channel);
+ efx_nic_eventq_read_ack(channel);
}
return spent;
}
-/* Process the eventq of the specified channel immediately on this CPU
- *
- * Disable hardware generated interrupts, wait for any existing
- * processing to finish, then directly poll (and ack ) the eventq.
- * Finally reenable NAPI and interrupts.
- *
- * This is for use only during a loopback self-test. It must not
- * deliver any packets up the stack as this can result in deadlock.
- */
-void efx_process_channel_now(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- BUG_ON(channel->channel >= efx->n_channels);
- BUG_ON(!channel->enabled);
- BUG_ON(!efx->loopback_selftest);
-
- /* Disable interrupts and wait for ISRs to complete */
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
- synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
- if (channel->irq)
- synchronize_irq(channel->irq);
-
- /* Wait for any NAPI processing to complete */
- napi_disable(&channel->napi_str);
-
- /* Poll the channel */
- efx_process_channel(channel, channel->eventq_mask + 1);
-
- /* Ack the eventq. This may cause an interrupt to be generated
- * when they are reenabled */
- efx_channel_processed(channel);
-
- napi_enable(&channel->napi_str);
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
-}
-
/* Create event queue
* Event queue memory allocations are done only once. If the channel
* is reset, the memory buffer will be reused; this guards against
@@ -391,14 +329,23 @@ static int efx_probe_eventq(struct efx_channel *channel)
}
/* Prepare channel's event queue */
-static void efx_init_eventq(struct efx_channel *channel)
+static int efx_init_eventq(struct efx_channel *channel)
{
- netif_dbg(channel->efx, drv, channel->efx->net_dev,
- "chan %d init event queue\n", channel->channel);
+ struct efx_nic *efx = channel->efx;
+ int rc;
- channel->eventq_read_ptr = 0;
+ EFX_WARN_ON_PARANOID(channel->eventq_init);
- efx_nic_init_eventq(channel);
+ netif_dbg(efx, drv, efx->net_dev,
+ "chan %d init event queue\n", channel->channel);
+
+ rc = efx_nic_init_eventq(channel);
+ if (rc == 0) {
+ efx->type->push_irq_moderation(channel);
+ channel->eventq_read_ptr = 0;
+ channel->eventq_init = true;
+ }
+ return rc;
}
/* Enable event queue processing and NAPI */
@@ -407,11 +354,7 @@ static void efx_start_eventq(struct efx_channel *channel)
netif_dbg(channel->efx, ifup, channel->efx->net_dev,
"chan %d start event queue\n", channel->channel);
- /* The interrupt handler for this channel may set work_pending
- * as soon as we enable it. Make sure it's cleared before
- * then. Similarly, make sure it sees the enabled flag set.
- */
- channel->work_pending = false;
+ /* Make sure the NAPI handler sees the enabled flag set */
channel->enabled = true;
smp_wmb();
@@ -431,10 +374,14 @@ static void efx_stop_eventq(struct efx_channel *channel)
static void efx_fini_eventq(struct efx_channel *channel)
{
+ if (!channel->eventq_init)
+ return;
+
netif_dbg(channel->efx, drv, channel->efx->net_dev,
"chan %d fini event queue\n", channel->channel);
efx_nic_fini_eventq(channel);
+ channel->eventq_init = false;
}
static void efx_remove_eventq(struct efx_channel *channel)
@@ -583,8 +530,8 @@ static void efx_set_channel_names(struct efx_nic *efx)
efx_for_each_channel(channel, efx)
channel->type->get_name(channel,
- efx->channel_name[channel->channel],
- sizeof(efx->channel_name[0]));
+ efx->msi_context[channel->channel].name,
+ sizeof(efx->msi_context[0].name));
}
static int efx_probe_channels(struct efx_nic *efx)
@@ -634,13 +581,13 @@ static void efx_start_datapath(struct efx_nic *efx)
* support the current MTU, including padding for header
* alignment and overruns.
*/
- efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
+ efx->rx_dma_len = (efx->rx_prefix_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
NET_IP_ALIGN + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
- efx->rx_scatter = false;
+ efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
@@ -668,9 +615,9 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
- /* RX filters also have scatter-enabled flags */
+ /* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
- efx_filter_update_rx_scatter(efx);
+ efx->type->filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly
@@ -684,11 +631,14 @@ static void efx_start_datapath(struct efx_nic *efx)
/* Initialise the channels */
efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
efx_init_tx_queue(tx_queue);
+ atomic_inc(&efx->active_queues);
+ }
efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue);
+ atomic_inc(&efx->active_queues);
efx_nic_generate_fill_event(rx_queue);
}
@@ -704,30 +654,15 @@ static void efx_stop_datapath(struct efx_nic *efx)
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
- struct pci_dev *dev = efx->pci_dev;
int rc;
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
- /* Only perform flush if dma is enabled */
- if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
- rc = efx_nic_flush_queues(efx);
-
- if (rc && EFX_WORKAROUND_7803(efx)) {
- /* Schedule a reset to recover from the flush failure. The
- * descriptor caches reference memory we're about to free,
- * but falcon_reconfigure_mac_wrapper() won't reconnect
- * the MACs because of the pending reset. */
- netif_err(efx, drv, efx->net_dev,
- "Resetting to recover from flush failure\n");
- efx_schedule_reset(efx, RESET_TYPE_ALL);
- } else if (rc) {
- netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
- } else {
- netif_dbg(efx, drv, efx->net_dev,
- "successfully flushed all queues\n");
- }
+ /* Stop RX refill */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ rx_queue->refill_enabled = false;
}
efx_for_each_channel(channel, efx) {
@@ -741,7 +676,26 @@ static void efx_stop_datapath(struct efx_nic *efx)
efx_stop_eventq(channel);
efx_start_eventq(channel);
}
+ }
+
+ rc = efx->type->fini_dmaq(efx);
+ if (rc && EFX_WORKAROUND_7803(efx)) {
+ /* Schedule a reset to recover from the flush failure. The
+ * descriptor caches reference memory we're about to free,
+ * but falcon_reconfigure_mac_wrapper() won't reconnect
+ * the MACs because of the pending reset.
+ */
+ netif_err(efx, drv, efx->net_dev,
+ "Resetting to recover from flush failure\n");
+ efx_schedule_reset(efx, RESET_TYPE_ALL);
+ } else if (rc) {
+ netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
+ } else {
+ netif_dbg(efx, drv, efx->net_dev,
+ "successfully flushed all queues\n");
+ }
+ efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
efx_fini_rx_queue(rx_queue);
efx_for_each_possible_channel_tx_queue(tx_queue, channel)
@@ -779,7 +733,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
u32 old_rxq_entries, old_txq_entries;
unsigned i, next_buffer_table = 0;
- int rc;
+ int rc, rc2;
rc = efx_check_disabled(efx);
if (rc)
@@ -809,7 +763,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, true);
+ efx_soft_disable_interrupts(efx);
/* Clone channels (where possible) */
memset(other_channel, 0, sizeof(other_channel));
@@ -859,9 +813,16 @@ out:
}
}
- efx_start_interrupts(efx, true);
- efx_start_all(efx);
- netif_device_attach(efx->net_dev);
+ rc2 = efx_soft_enable_interrupts(efx);
+ if (rc2) {
+ rc = rc ? rc : rc2;
+ netif_err(efx, drv, efx->net_dev,
+ "unable to restart interrupts on channel reallocation\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ } else {
+ efx_start_all(efx);
+ netif_device_attach(efx->net_dev);
+ }
return rc;
rollback:
@@ -931,10 +892,9 @@ void efx_link_status_changed(struct efx_nic *efx)
/* Status message for kernel log */
if (link_state->up)
netif_info(efx, link, efx->net_dev,
- "link up at %uMbps %s-duplex (MTU %d)%s\n",
+ "link up at %uMbps %s-duplex (MTU %d)\n",
link_state->speed, link_state->fd ? "full" : "half",
- efx->net_dev->mtu,
- (efx->promiscuous ? " [PROMISC]" : ""));
+ efx->net_dev->mtu);
else
netif_info(efx, link, efx->net_dev, "link down\n");
}
@@ -983,10 +943,6 @@ int __efx_reconfigure_port(struct efx_nic *efx)
WARN_ON(!mutex_is_locked(&efx->mac_lock));
- /* Serialise the promiscuous flag with efx_set_rx_mode. */
- netif_addr_lock_bh(efx->net_dev);
- netif_addr_unlock_bh(efx->net_dev);
-
/* Disable PHY transmit in mac level loopbacks */
phy_mode = efx->phy_mode;
if (LOOPBACK_INTERNAL(efx))
@@ -1144,6 +1100,7 @@ static int efx_init_io(struct efx_nic *efx)
{
struct pci_dev *pci_dev = efx->pci_dev;
dma_addr_t dma_mask = efx->type->max_dma_mask;
+ unsigned int mem_map_size = efx->type->mem_map_size(efx);
int rc;
netif_dbg(efx, probe, efx->net_dev, "initialising I/O\n");
@@ -1196,20 +1153,18 @@ static int efx_init_io(struct efx_nic *efx)
rc = -EIO;
goto fail3;
}
- efx->membase = ioremap_nocache(efx->membase_phys,
- efx->type->mem_map_size);
+ efx->membase = ioremap_nocache(efx->membase_phys, mem_map_size);
if (!efx->membase) {
netif_err(efx, probe, efx->net_dev,
"could not map memory BAR at %llx+%x\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size);
+ (unsigned long long)efx->membase_phys, mem_map_size);
rc = -ENOMEM;
goto fail4;
}
netif_dbg(efx, probe, efx->net_dev,
"memory BAR at %llx+%x (virtual %p)\n",
- (unsigned long long)efx->membase_phys,
- efx->type->mem_map_size, efx->membase);
+ (unsigned long long)efx->membase_phys, mem_map_size,
+ efx->membase);
return 0;
@@ -1288,8 +1243,6 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
*/
static int efx_probe_interrupts(struct efx_nic *efx)
{
- unsigned int max_channels =
- min(efx->type->phys_addr_channels, EFX_MAX_CHANNELS);
unsigned int extra_channels = 0;
unsigned int i, j;
int rc;
@@ -1306,7 +1259,7 @@ static int efx_probe_interrupts(struct efx_nic *efx)
if (separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
- n_channels = min(n_channels, max_channels);
+ n_channels = min(n_channels, efx->max_channels);
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
@@ -1392,31 +1345,42 @@ static int efx_probe_interrupts(struct efx_nic *efx)
return 0;
}
-/* Enable interrupts, then probe and start the event queues */
-static void efx_start_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static int efx_soft_enable_interrupts(struct efx_nic *efx)
{
- struct efx_channel *channel;
+ struct efx_channel *channel, *end_channel;
+ int rc;
BUG_ON(efx->state == STATE_DISABLED);
- if (efx->eeh_disabled_legacy_irq) {
- enable_irq(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = false;
- }
- if (efx->legacy_irq)
- efx->legacy_irq_enabled = true;
- efx_nic_enable_interrupts(efx);
+ efx->irq_soft_enabled = true;
+ smp_wmb();
efx_for_each_channel(channel, efx) {
- if (!channel->type->keep_eventq || !may_keep_eventq)
- efx_init_eventq(channel);
+ if (!channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
efx_start_eventq(channel);
}
efx_mcdi_mode_event(efx);
+
+ return 0;
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ efx_stop_eventq(channel);
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ return rc;
}
-static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
+static void efx_soft_disable_interrupts(struct efx_nic *efx)
{
struct efx_channel *channel;
@@ -1425,20 +1389,79 @@ static void efx_stop_interrupts(struct efx_nic *efx, bool may_keep_eventq)
efx_mcdi_mode_poll(efx);
- efx_nic_disable_interrupts(efx);
- if (efx->legacy_irq) {
+ efx->irq_soft_enabled = false;
+ smp_wmb();
+
+ if (efx->legacy_irq)
synchronize_irq(efx->legacy_irq);
- efx->legacy_irq_enabled = false;
- }
efx_for_each_channel(channel, efx) {
if (channel->irq)
synchronize_irq(channel->irq);
efx_stop_eventq(channel);
- if (!channel->type->keep_eventq || !may_keep_eventq)
+ if (!channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ /* Flush the asynchronous MCDI request queue */
+ efx_mcdi_flush_async(efx);
+}
+
+static int efx_enable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel, *end_channel;
+ int rc;
+
+ BUG_ON(efx->state == STATE_DISABLED);
+
+ if (efx->eeh_disabled_legacy_irq) {
+ enable_irq(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = false;
+ }
+
+ efx->type->irq_enable_master(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq) {
+ rc = efx_init_eventq(channel);
+ if (rc)
+ goto fail;
+ }
+ }
+
+ rc = efx_soft_enable_interrupts(efx);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ end_channel = channel;
+ efx_for_each_channel(channel, efx) {
+ if (channel == end_channel)
+ break;
+ if (channel->type->keep_eventq)
efx_fini_eventq(channel);
}
+
+ efx->type->irq_disable_non_ev(efx);
+
+ return rc;
+}
+
+static void efx_disable_interrupts(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+
+ efx_soft_disable_interrupts(efx);
+
+ efx_for_each_channel(channel, efx) {
+ if (channel->type->keep_eventq)
+ efx_fini_eventq(channel);
+ }
+
+ efx->type->irq_disable_non_ev(efx);
}
static void efx_remove_interrupts(struct efx_nic *efx)
@@ -1495,9 +1518,11 @@ static int efx_probe_nic(struct efx_nic *efx)
* in MSI-X interrupts. */
rc = efx_probe_interrupts(efx);
if (rc)
- goto fail;
+ goto fail1;
- efx->type->dimension_resources(efx);
+ rc = efx->type->dimension_resources(efx);
+ if (rc)
+ goto fail2;
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
@@ -1515,7 +1540,9 @@ static int efx_probe_nic(struct efx_nic *efx)
return 0;
-fail:
+fail2:
+ efx_remove_interrupts(efx);
+fail1:
efx->type->remove(efx);
return rc;
}
@@ -1528,6 +1555,44 @@ static void efx_remove_nic(struct efx_nic *efx)
efx->type->remove(efx);
}
+static int efx_probe_filters(struct efx_nic *efx)
+{
+ int rc;
+
+ spin_lock_init(&efx->filter_lock);
+
+ rc = efx->type->filter_table_probe(efx);
+ if (rc)
+ return rc;
+
+#ifdef CONFIG_RFS_ACCEL
+ if (efx->type->offload_features & NETIF_F_NTUPLE) {
+ efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*efx->rps_flow_id),
+ GFP_KERNEL);
+ if (!efx->rps_flow_id) {
+ efx->type->filter_table_remove(efx);
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void efx_remove_filters(struct efx_nic *efx)
+{
+#ifdef CONFIG_RFS_ACCEL
+ kfree(efx->rps_flow_id);
+#endif
+ efx->type->filter_table_remove(efx);
+}
+
+static void efx_restore_filters(struct efx_nic *efx)
+{
+ efx->type->filter_table_restore(efx);
+}
+
/**************************************************************************
*
* NIC startup/shutdown
@@ -1917,34 +1982,9 @@ static struct rtnl_link_stats64 *efx_net_stats(struct net_device *net_dev,
struct rtnl_link_stats64 *stats)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
spin_lock_bh(&efx->stats_lock);
-
- efx->type->update_stats(efx);
-
- stats->rx_packets = mac_stats->rx_packets;
- stats->tx_packets = mac_stats->tx_packets;
- stats->rx_bytes = mac_stats->rx_bytes;
- stats->tx_bytes = mac_stats->tx_bytes;
- stats->rx_dropped = efx->n_rx_nodesc_drop_cnt;
- stats->multicast = mac_stats->rx_multicast;
- stats->collisions = mac_stats->tx_collision;
- stats->rx_length_errors = (mac_stats->rx_gtjumbo +
- mac_stats->rx_length_error);
- stats->rx_crc_errors = mac_stats->rx_bad;
- stats->rx_frame_errors = mac_stats->rx_align_error;
- stats->rx_fifo_errors = mac_stats->rx_overflow;
- stats->rx_missed_errors = mac_stats->rx_missed;
- stats->tx_window_errors = mac_stats->tx_late_collision;
-
- stats->rx_errors = (stats->rx_length_errors +
- stats->rx_crc_errors +
- stats->rx_frame_errors +
- mac_stats->rx_symbol_error);
- stats->tx_errors = (stats->tx_window_errors +
- mac_stats->tx_bad);
-
+ efx->type->update_stats(efx, NULL, stats);
spin_unlock_bh(&efx->stats_lock);
return stats;
@@ -2018,30 +2058,6 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_rx_mode(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct netdev_hw_addr *ha;
- union efx_multicast_hash *mc_hash = &efx->multicast_hash;
- u32 crc;
- int bit;
-
- efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
-
- /* Build multicast hash table */
- if (efx->promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
- memset(mc_hash, 0xff, sizeof(*mc_hash));
- } else {
- memset(mc_hash, 0x00, sizeof(*mc_hash));
- netdev_for_each_mc_addr(ha, net_dev) {
- crc = ether_crc_le(ETH_ALEN, ha->addr);
- bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
- __set_bit_le(bit, mc_hash);
- }
-
- /* Broadcast packets go through the multicast hash filter.
- * ether_crc_le() of the broadcast address is 0xbe2612ff
- * so we always add bit 0xff to the mask.
- */
- __set_bit_le(0xff, mc_hash);
- }
if (efx->port_enabled)
queue_work(efx->workqueue, &efx->mac_work);
@@ -2059,7 +2075,7 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-static const struct net_device_ops efx_netdev_ops = {
+static const struct net_device_ops efx_farch_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
.ndo_get_stats64 = efx_net_stats,
@@ -2086,6 +2102,26 @@ static const struct net_device_ops efx_netdev_ops = {
#endif
};
+static const struct net_device_ops efx_ef10_netdev_ops = {
+ .ndo_open = efx_net_open,
+ .ndo_stop = efx_net_stop,
+ .ndo_get_stats64 = efx_net_stats,
+ .ndo_tx_timeout = efx_watchdog,
+ .ndo_start_xmit = efx_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = efx_ioctl,
+ .ndo_change_mtu = efx_change_mtu,
+ .ndo_set_mac_address = efx_set_mac_address,
+ .ndo_set_rx_mode = efx_set_rx_mode,
+ .ndo_set_features = efx_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = efx_netpoll,
+#endif
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = efx_filter_rfs,
+#endif
+};
+
static void efx_update_name(struct efx_nic *efx)
{
strcpy(efx->name, efx->net_dev->name);
@@ -2098,7 +2134,8 @@ static int efx_netdev_event(struct notifier_block *this,
{
struct net_device *net_dev = netdev_notifier_info_to_dev(ptr);
- if (net_dev->netdev_ops == &efx_netdev_ops &&
+ if ((net_dev->netdev_ops == &efx_farch_netdev_ops ||
+ net_dev->netdev_ops == &efx_ef10_netdev_ops) &&
event == NETDEV_CHANGENAME)
efx_update_name(netdev_priv(net_dev));
@@ -2125,7 +2162,12 @@ static int efx_register_netdev(struct efx_nic *efx)
net_dev->watchdog_timeo = 5 * HZ;
net_dev->irq = efx->pci_dev->irq;
- net_dev->netdev_ops = &efx_netdev_ops;
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ net_dev->netdev_ops = &efx_ef10_netdev_ops;
+ net_dev->priv_flags |= IFF_UNICAST_FLT;
+ } else {
+ net_dev->netdev_ops = &efx_farch_netdev_ops;
+ }
SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
@@ -2185,22 +2227,11 @@ fail_locked:
static void efx_unregister_netdev(struct efx_nic *efx)
{
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
if (!efx->net_dev)
return;
BUG_ON(netdev_priv(efx->net_dev) != efx);
- /* Free up any skbs still remaining. This has to happen before
- * we try to unregister the netdev as running their destructors
- * may be needed to get the device ref. count to 0. */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel)
- efx_release_tx_buffers(tx_queue);
- }
-
strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
@@ -2223,7 +2254,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
EFX_ASSERT_RESET_SERIALISED(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2260,9 +2291,9 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
"could not restore PHY settings\n");
}
- efx->type->reconfigure_mac(efx);
-
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
efx_restore_filters(efx);
efx_sriov_reset(efx);
@@ -2458,6 +2489,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
.driver_data = (unsigned long) &siena_a0_nic_type},
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903), /* SFC9120 PF */
+ .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
{0} /* end of list */
};
@@ -2516,6 +2549,9 @@ static int efx_init_struct(struct efx_nic *efx,
strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
efx->net_dev = net_dev;
+ efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_packet_hash_offset =
+ efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
mutex_init(&efx->mac_lock);
efx->phy_op = &efx_dummy_phy_operations;
@@ -2527,10 +2563,10 @@ static int efx_init_struct(struct efx_nic *efx,
efx->channel[i] = efx_alloc_channel(efx, i, NULL);
if (!efx->channel[i])
goto fail;
+ efx->msi_context[i].efx = efx;
+ efx->msi_context[i].index = i;
}
- EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
-
/* Higher numbered interrupt modes are less capable! */
efx->interrupt_mode = max(efx->type->max_interrupt_mode,
interrupt_mode);
@@ -2579,7 +2615,7 @@ static void efx_pci_remove_main(struct efx_nic *efx)
BUG_ON(efx->state == STATE_READY);
cancel_work_sync(&efx->reset_work);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
efx_nic_fini_interrupt(efx);
efx_fini_port(efx);
efx->type->fini(efx);
@@ -2601,7 +2637,7 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
/* Mark the NIC as fini, then stop the interface */
rtnl_lock();
dev_close(efx->net_dev);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
rtnl_unlock();
efx_sriov_fini(efx);
@@ -2703,10 +2739,14 @@ static int efx_pci_probe_main(struct efx_nic *efx)
rc = efx_nic_init_interrupt(efx);
if (rc)
goto fail5;
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail6;
return 0;
+ fail6:
+ efx_nic_fini_interrupt(efx);
fail5:
efx_fini_port(efx);
fail4:
@@ -2824,7 +2864,7 @@ static int efx_pm_freeze(struct device *dev)
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
}
rtnl_unlock();
@@ -2834,12 +2874,15 @@ static int efx_pm_freeze(struct device *dev)
static int efx_pm_thaw(struct device *dev)
{
+ int rc;
struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
rtnl_lock();
if (efx->state != STATE_DISABLED) {
- efx_start_interrupts(efx, false);
+ rc = efx_enable_interrupts(efx);
+ if (rc)
+ goto fail;
mutex_lock(&efx->mac_lock);
efx->phy_op->reconfigure(efx);
@@ -2860,6 +2903,11 @@ static int efx_pm_thaw(struct device *dev)
queue_work(reset_workqueue, &efx->reset_work);
return 0;
+
+fail:
+ rtnl_unlock();
+
+ return rc;
}
static int efx_pm_poweroff(struct device *dev)
@@ -2896,8 +2944,8 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->init(efx);
if (rc)
return rc;
- efx_pm_thaw(dev);
- return 0;
+ rc = efx_pm_thaw(dev);
+ return rc;
}
static int efx_pm_suspend(struct device *dev)
@@ -2942,7 +2990,7 @@ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
efx_device_detach_sync(efx);
efx_stop_all(efx);
- efx_stop_interrupts(efx, false);
+ efx_disable_interrupts(efx);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index bdb30bbb0c9..34d00f5771f 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -23,7 +23,6 @@ extern void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue(struct efx_tx_queue *tx_queue);
extern void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
extern void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
-extern void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
extern netdev_tx_t
efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
extern netdev_tx_t
@@ -69,27 +68,99 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx))
/* Filters */
-extern int efx_probe_filters(struct efx_nic *efx);
-extern void efx_restore_filters(struct efx_nic *efx);
-extern void efx_remove_filters(struct efx_nic *efx);
-extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
-extern s32 efx_filter_insert_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace);
-extern int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id);
-extern int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *);
-extern void efx_filter_clear_rx(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority);
-extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
-extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size);
+
+/**
+ * efx_filter_insert_filter - add or replace a filter
+ * @efx: NIC in which to insert the filter
+ * @spec: Specification for the filter
+ * @replace_equal: Flag for whether the specified filter may replace an
+ * existing filter with equal priority
+ *
+ * On success, return the filter ID.
+ * On failure, return a negative error code.
+ *
+ * If existing filters have equal match values to the new filter spec,
+ * then the new filter might replace them or the function might fail,
+ * as follows.
+ *
+ * 1. If the existing filters have lower priority, or @replace_equal
+ * is set and they have equal priority, replace them.
+ *
+ * 2. If the existing filters have higher priority, return -%EPERM.
+ *
+ * 3. If !efx_filter_is_mc_recipient(@spec), or the NIC does not
+ * support delivery to multiple recipients, return -%EEXIST.
+ *
+ * This implies that filters for multiple multicast recipients must
+ * all be inserted with the same priority and @replace_equal = %false.
+ */
+static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ return efx->type->filter_insert(efx, spec, replace_equal);
+}
+
+/**
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ return efx->type->filter_remove_safe(efx, priority, filter_id);
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+static inline int
+efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec)
+{
+ return efx->type->filter_get_safe(efx, priority, filter_id, spec);
+}
+
+/**
+ * efx_farch_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+static inline void efx_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_clear_rx(efx, priority);
+}
+
+static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ return efx->type->filter_count_rx_used(efx, priority);
+}
+static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ return efx->type->filter_get_rx_id_limit(efx);
+}
+static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ return efx->type->filter_get_rx_ids(efx, priority, buf, size);
+}
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
@@ -105,11 +176,11 @@ static inline void efx_filter_rfs_expire(struct efx_channel *channel)
static inline void efx_filter_rfs_expire(struct efx_channel *channel) {}
#define efx_filter_rfs_enabled() 0
#endif
+extern bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec);
/* Channels */
extern int efx_channel_dummy_op_int(struct efx_channel *channel);
extern void efx_channel_dummy_op_void(struct efx_channel *channel);
-extern void efx_process_channel_now(struct efx_channel *channel);
extern int
efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries);
@@ -141,7 +212,12 @@ extern void efx_port_dummy_op_void(struct efx_nic *efx);
/* MTD */
#ifdef CONFIG_SFC_MTD
-extern int efx_mtd_probe(struct efx_nic *efx);
+extern int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part);
+static inline int efx_mtd_probe(struct efx_nic *efx)
+{
+ return efx->type->mtd_probe(efx);
+}
extern void efx_mtd_rename(struct efx_nic *efx);
extern void efx_mtd_remove(struct efx_nic *efx);
#else
@@ -155,7 +231,6 @@ static inline void efx_schedule_channel(struct efx_channel *channel)
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
"channel %d scheduling NAPI poll on CPU%d\n",
channel->channel, raw_smp_processor_id());
- channel->work_pending = true;
napi_schedule(&channel->napi_str);
}
diff --git a/drivers/net/ethernet/sfc/enum.h b/drivers/net/ethernet/sfc/enum.h
index ab8fb5889e5..7fdfee01909 100644
--- a/drivers/net/ethernet/sfc/enum.h
+++ b/drivers/net/ethernet/sfc/enum.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2009 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -147,8 +147,7 @@ enum efx_loopback_mode {
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error
* @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
- * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
- * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_DMA_ERROR: DMA error
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
*/
@@ -163,8 +162,7 @@ enum reset_type {
RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR,
RESET_TYPE_RX_RECOVERY,
- RESET_TYPE_RX_DESC_FETCH,
- RESET_TYPE_TX_DESC_FETCH,
+ RESET_TYPE_DMA_ERROR,
RESET_TYPE_TX_SKIP,
RESET_TYPE_MC_FAILURE,
RESET_TYPE_MAX,
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 1fc21458413..5b471cf5c32 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,14 +19,9 @@
#include "filter.h"
#include "nic.h"
-struct ethtool_string {
- char name[ETH_GSTRING_LEN];
-};
-
-struct efx_ethtool_stat {
+struct efx_sw_stat_desc {
const char *name;
enum {
- EFX_ETHTOOL_STAT_SOURCE_mac_stats,
EFX_ETHTOOL_STAT_SOURCE_nic,
EFX_ETHTOOL_STAT_SOURCE_channel,
EFX_ETHTOOL_STAT_SOURCE_tx_queue
@@ -35,7 +30,7 @@ struct efx_ethtool_stat {
u64(*get_stat) (void *field); /* Reader function */
};
-/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+/* Initialiser for a struct efx_sw_stat_desc with type-checking */
#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
get_stat_function) { \
.name = #stat_name, \
@@ -52,24 +47,11 @@ static u64 efx_get_uint_stat(void *field)
return *(unsigned int *)field;
}
-static u64 efx_get_u64_stat(void *field)
-{
- return *(u64 *) field;
-}
-
static u64 efx_get_atomic_stat(void *field)
{
return atomic_read((atomic_t *) field);
}
-#define EFX_ETHTOOL_U64_MAC_STAT(field) \
- EFX_ETHTOOL_STAT(field, mac_stats, field, \
- u64, efx_get_u64_stat)
-
-#define EFX_ETHTOOL_UINT_NIC_STAT(name) \
- EFX_ETHTOOL_STAT(name, nic, n_##name, \
- unsigned int, efx_get_uint_stat)
-
#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \
EFX_ETHTOOL_STAT(field, nic, field, \
atomic_t, efx_get_atomic_stat)
@@ -82,72 +64,12 @@ static u64 efx_get_atomic_stat(void *field)
EFX_ETHTOOL_STAT(tx_##field, tx_queue, field, \
unsigned int, efx_get_uint_stat)
-static const struct efx_ethtool_stat efx_ethtool_stats[] = {
- EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(tx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(tx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(tx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(tx_control),
- EFX_ETHTOOL_U64_MAC_STAT(tx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(tx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_64),
- EFX_ETHTOOL_U64_MAC_STAT(tx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(tx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(tx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(tx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(tx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(tx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(tx_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_single_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_multiple_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_late_collision),
- EFX_ETHTOOL_U64_MAC_STAT(tx_excessive_deferred),
- EFX_ETHTOOL_U64_MAC_STAT(tx_non_tcpudp),
- EFX_ETHTOOL_U64_MAC_STAT(tx_mac_src_error),
- EFX_ETHTOOL_U64_MAC_STAT(tx_ip_src_error),
+static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
+ EFX_ETHTOOL_UINT_TXQ_STAT(merge_events),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_bursts),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_long_headers),
EFX_ETHTOOL_UINT_TXQ_STAT(tso_packets),
EFX_ETHTOOL_UINT_TXQ_STAT(pushes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
- EFX_ETHTOOL_U64_MAC_STAT(rx_packets),
- EFX_ETHTOOL_U64_MAC_STAT(rx_good),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad),
- EFX_ETHTOOL_U64_MAC_STAT(rx_pause),
- EFX_ETHTOOL_U64_MAC_STAT(rx_control),
- EFX_ETHTOOL_U64_MAC_STAT(rx_unicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_multicast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_broadcast),
- EFX_ETHTOOL_U64_MAC_STAT(rx_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_65_to_127),
- EFX_ETHTOOL_U64_MAC_STAT(rx_128_to_255),
- EFX_ETHTOOL_U64_MAC_STAT(rx_256_to_511),
- EFX_ETHTOOL_U64_MAC_STAT(rx_512_to_1023),
- EFX_ETHTOOL_U64_MAC_STAT(rx_1024_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_lt64),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_64_to_15xx),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_15xx_to_jumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_bad_gtjumbo),
- EFX_ETHTOOL_U64_MAC_STAT(rx_overflow),
- EFX_ETHTOOL_U64_MAC_STAT(rx_missed),
- EFX_ETHTOOL_U64_MAC_STAT(rx_false_carrier),
- EFX_ETHTOOL_U64_MAC_STAT(rx_symbol_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_align_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_length_error),
- EFX_ETHTOOL_U64_MAC_STAT(rx_internal_error),
- EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
@@ -155,10 +77,11 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
+ EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
};
-/* Number of ethtool statistics */
-#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+#define EFX_ETHTOOL_SW_STAT_COUNT ARRAY_SIZE(efx_sw_stat_desc)
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
@@ -205,8 +128,6 @@ static int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* GMAC does not support 1000Mbps HD */
- ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -291,12 +212,11 @@ static void efx_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable)
*
* Fill in an individual self-test entry.
*/
-static void efx_fill_test(unsigned int test_index,
- struct ethtool_string *strings, u64 *data,
+static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
int *test, const char *unit_format, int unit_id,
const char *test_format, const char *test_id)
{
- struct ethtool_string unit_str, test_str;
+ char unit_str[ETH_GSTRING_LEN], test_str[ETH_GSTRING_LEN];
/* Fill data value, if applicable */
if (data)
@@ -305,15 +225,14 @@ static void efx_fill_test(unsigned int test_index,
/* Fill string, if applicable */
if (strings) {
if (strchr(unit_format, '%'))
- snprintf(unit_str.name, sizeof(unit_str.name),
+ snprintf(unit_str, sizeof(unit_str),
unit_format, unit_id);
else
- strcpy(unit_str.name, unit_format);
- snprintf(test_str.name, sizeof(test_str.name),
- test_format, test_id);
- snprintf(strings[test_index].name,
- sizeof(strings[test_index].name),
- "%-6s %-24s", unit_str.name, test_str.name);
+ strcpy(unit_str, unit_format);
+ snprintf(test_str, sizeof(test_str), test_format, test_id);
+ snprintf(strings + test_index * ETH_GSTRING_LEN,
+ ETH_GSTRING_LEN,
+ "%-6s %-24s", unit_str, test_str);
}
}
@@ -336,7 +255,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
enum efx_loopback_mode mode,
unsigned int test_index,
- struct ethtool_string *strings, u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel =
efx_get_channel(efx, efx->tx_channel_offset);
@@ -373,8 +292,7 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
- struct ethtool_string *strings,
- u64 *data)
+ u8 *strings, u64 *data)
{
struct efx_channel *channel;
unsigned int n = 0, i;
@@ -433,12 +351,14 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
static int efx_ethtool_get_sset_count(struct net_device *net_dev,
int string_set)
{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
switch (string_set) {
case ETH_SS_STATS:
- return EFX_ETHTOOL_NUM_STATS;
+ return efx->type->describe_stats(efx, NULL) +
+ EFX_ETHTOOL_SW_STAT_COUNT;
case ETH_SS_TEST:
- return efx_ethtool_fill_self_tests(netdev_priv(net_dev),
- NULL, NULL, NULL);
+ return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
default:
return -EINVAL;
}
@@ -448,20 +368,18 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
u32 string_set, u8 *strings)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_string *ethtool_strings =
- (struct ethtool_string *)strings;
int i;
switch (string_set) {
case ETH_SS_STATS:
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
- strlcpy(ethtool_strings[i].name,
- efx_ethtool_stats[i].name,
- sizeof(ethtool_strings[i].name));
+ strings += (efx->type->describe_stats(efx, strings) *
+ ETH_GSTRING_LEN);
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++)
+ strlcpy(strings + i * ETH_GSTRING_LEN,
+ efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
break;
case ETH_SS_TEST:
- efx_ethtool_fill_self_tests(efx, NULL,
- ethtool_strings, NULL);
+ efx_ethtool_fill_self_tests(efx, NULL, strings, NULL);
break;
default:
/* No other string sets */
@@ -474,27 +392,20 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
u64 *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
- const struct efx_ethtool_stat *stat;
+ const struct efx_sw_stat_desc *stat;
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int i;
- EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
-
spin_lock_bh(&efx->stats_lock);
- /* Update MAC and NIC statistics */
- efx->type->update_stats(efx);
+ /* Get NIC statistics */
+ data += efx->type->update_stats(efx, data, NULL);
- /* Fill detailed statistics buffer */
- for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
- stat = &efx_ethtool_stats[i];
+ /* Get software statistics */
+ for (i = 0; i < EFX_ETHTOOL_SW_STAT_COUNT; i++) {
+ stat = &efx_sw_stat_desc[i];
switch (stat->source) {
- case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
- data[i] = stat->get_stat((void *)mac_stats +
- stat->offset);
- break;
case EFX_ETHTOOL_STAT_SOURCE_nic:
data[i] = stat->get_stat((void *)efx + stat->offset);
break;
@@ -709,7 +620,6 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
u8 wanted_fc, old_fc;
u32 old_adv;
- bool reset;
int rc = 0;
mutex_lock(&efx->mac_lock);
@@ -732,24 +642,10 @@ static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
goto out;
}
- /* TX flow control may automatically turn itself off if the
- * link partner (intermittently) stops responding to pause
- * frames. There isn't any indication that this has happened,
- * so the best we do is leave it up to the user to spot this
- * and fix it be cycling transmit flow control on this end. */
- reset = (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX);
- if (EFX_WORKAROUND_11482(efx) && reset) {
- if (efx_nic_rev(efx) == EFX_REV_FALCON_B0) {
- /* Recover by resetting the EM block */
- falcon_stop_nic_stats(efx);
- falcon_drain_tx_fifo(efx);
- falcon_reconfigure_xmac(efx);
- falcon_start_nic_stats(efx);
- } else {
- /* Schedule a reset to recover */
- efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
- }
- }
+ /* Hook for Falcon bug 11482 workaround */
+ if (efx->type->prepare_enable_fc_tx &&
+ (wanted_fc & EFX_FC_TX) && !(efx->wanted_fc & EFX_FC_TX))
+ efx->type->prepare_enable_fc_tx(efx);
old_adv = efx->link_advertising;
old_fc = efx->wanted_fc;
@@ -814,11 +710,12 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
-/* MAC address mask including only MC flag */
-static const u8 mac_addr_mc_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
+/* MAC address mask including only I/G bit */
+static const u8 mac_addr_ig_mask[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 };
#define IP4_ADDR_FULL_MASK ((__force __be32)~0)
#define PORT_FULL_MASK ((__force __be16)~0)
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethtool_rx_flow_spec *rule)
@@ -828,8 +725,6 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
struct ethhdr *mac_entry = &rule->h_u.ether_spec;
struct ethhdr *mac_mask = &rule->m_u.ether_spec;
struct efx_filter_spec spec;
- u16 vid;
- u8 proto;
int rc;
rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
@@ -837,44 +732,72 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx,
if (rc)
return rc;
- if (spec.dmaq_id == 0xfff)
+ if (spec.dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
rule->ring_cookie = RX_CLS_FLOW_DISC;
else
rule->ring_cookie = spec.dmaq_id;
- if (spec.type == EFX_FILTER_MC_DEF || spec.type == EFX_FILTER_UC_DEF) {
- rule->flow_type = ETHER_FLOW;
- memcpy(mac_mask->h_dest, mac_addr_mc_mask, ETH_ALEN);
- if (spec.type == EFX_FILTER_MC_DEF)
- memcpy(mac_entry->h_dest, mac_addr_mc_mask, ETH_ALEN);
- return 0;
- }
-
- rc = efx_filter_get_eth_local(&spec, &vid, mac_entry->h_dest);
- if (rc == 0) {
+ if ((spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) &&
+ spec.ether_type == htons(ETH_P_IP) &&
+ (spec.match_flags & EFX_FILTER_MATCH_IP_PROTO) &&
+ (spec.ip_proto == IPPROTO_TCP || spec.ip_proto == IPPROTO_UDP) &&
+ !(spec.match_flags &
+ ~(EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_OUTER_VID |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_PORT | EFX_FILTER_MATCH_REM_PORT))) {
+ rule->flow_type = ((spec.ip_proto == IPPROTO_TCP) ?
+ TCP_V4_FLOW : UDP_V4_FLOW);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_HOST) {
+ ip_entry->ip4dst = spec.loc_host[0];
+ ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_HOST) {
+ ip_entry->ip4src = spec.rem_host[0];
+ ip_mask->ip4src = IP4_ADDR_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_PORT) {
+ ip_entry->pdst = spec.loc_port;
+ ip_mask->pdst = PORT_FULL_MASK;
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_PORT) {
+ ip_entry->psrc = spec.rem_port;
+ ip_mask->psrc = PORT_FULL_MASK;
+ }
+ } else if (!(spec.match_flags &
+ ~(EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG |
+ EFX_FILTER_MATCH_REM_MAC | EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_OUTER_VID))) {
rule->flow_type = ETHER_FLOW;
- memset(mac_mask->h_dest, ~0, ETH_ALEN);
- if (vid != EFX_FILTER_VID_UNSPEC) {
- rule->flow_type |= FLOW_EXT;
- rule->h_ext.vlan_tci = htons(vid);
- rule->m_ext.vlan_tci = htons(0xfff);
+ if (spec.match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG)) {
+ memcpy(mac_entry->h_dest, spec.loc_mac, ETH_ALEN);
+ if (spec.match_flags & EFX_FILTER_MATCH_LOC_MAC)
+ memset(mac_mask->h_dest, ~0, ETH_ALEN);
+ else
+ memcpy(mac_mask->h_dest, mac_addr_ig_mask,
+ ETH_ALEN);
}
- return 0;
+ if (spec.match_flags & EFX_FILTER_MATCH_REM_MAC) {
+ memcpy(mac_entry->h_source, spec.rem_mac, ETH_ALEN);
+ memset(mac_mask->h_source, ~0, ETH_ALEN);
+ }
+ if (spec.match_flags & EFX_FILTER_MATCH_ETHER_TYPE) {
+ mac_entry->h_proto = spec.ether_type;
+ mac_mask->h_proto = ETHER_TYPE_FULL_MASK;
+ }
+ } else {
+ /* The above should handle all filters that we insert */
+ WARN_ON(1);
+ return -EINVAL;
}
- rc = efx_filter_get_ipv4_local(&spec, &proto,
- &ip_entry->ip4dst, &ip_entry->pdst);
- if (rc != 0) {
- rc = efx_filter_get_ipv4_full(
- &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst,
- &ip_entry->ip4src, &ip_entry->psrc);
- EFX_WARN_ON_PARANOID(rc);
- ip_mask->ip4src = IP4_ADDR_FULL_MASK;
- ip_mask->psrc = PORT_FULL_MASK;
+ if (spec.match_flags & EFX_FILTER_MATCH_OUTER_VID) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = spec.outer_vid;
+ rule->m_ext.vlan_tci = htons(0xfff);
}
- rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
- ip_mask->ip4dst = IP4_ADDR_FULL_MASK;
- ip_mask->pdst = PORT_FULL_MASK;
+
return rc;
}
@@ -982,82 +905,80 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ?
- 0xfff : rule->ring_cookie);
+ EFX_FILTER_RX_DMAQ_ID_DROP : rule->ring_cookie);
- switch (rule->flow_type) {
+ switch (rule->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
- case UDP_V4_FLOW: {
- u8 proto = (rule->flow_type == TCP_V4_FLOW ?
- IPPROTO_TCP : IPPROTO_UDP);
-
- /* Must match all of destination, */
- if (!(ip_mask->ip4dst == IP4_ADDR_FULL_MASK &&
- ip_mask->pdst == PORT_FULL_MASK))
- return -EINVAL;
- /* all or none of source, */
- if ((ip_mask->ip4src || ip_mask->psrc) &&
- !(ip_mask->ip4src == IP4_ADDR_FULL_MASK &&
- ip_mask->psrc == PORT_FULL_MASK))
- return -EINVAL;
- /* and nothing else */
- if (ip_mask->tos || rule->m_ext.vlan_tci)
+ case UDP_V4_FLOW:
+ spec.match_flags = (EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO);
+ spec.ether_type = htons(ETH_P_IP);
+ spec.ip_proto = ((rule->flow_type & ~FLOW_EXT) == TCP_V4_FLOW ?
+ IPPROTO_TCP : IPPROTO_UDP);
+ if (ip_mask->ip4dst) {
+ if (ip_mask->ip4dst != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_HOST;
+ spec.loc_host[0] = ip_entry->ip4dst;
+ }
+ if (ip_mask->ip4src) {
+ if (ip_mask->ip4src != IP4_ADDR_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_HOST;
+ spec.rem_host[0] = ip_entry->ip4src;
+ }
+ if (ip_mask->pdst) {
+ if (ip_mask->pdst != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_PORT;
+ spec.loc_port = ip_entry->pdst;
+ }
+ if (ip_mask->psrc) {
+ if (ip_mask->psrc != PORT_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_PORT;
+ spec.rem_port = ip_entry->psrc;
+ }
+ if (ip_mask->tos)
return -EINVAL;
-
- if (ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst,
- ip_entry->ip4src,
- ip_entry->psrc);
- else
- rc = efx_filter_set_ipv4_local(&spec, proto,
- ip_entry->ip4dst,
- ip_entry->pdst);
- if (rc)
- return rc;
break;
- }
-
- case ETHER_FLOW | FLOW_EXT:
- case ETHER_FLOW: {
- u16 vlan_tag_mask = (rule->flow_type & FLOW_EXT ?
- ntohs(rule->m_ext.vlan_tci) : 0);
-
- /* Must not match on source address or Ethertype */
- if (!is_zero_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto)
- return -EINVAL;
- /* Is it a default UC or MC filter? */
- if (ether_addr_equal(mac_mask->h_dest, mac_addr_mc_mask) &&
- vlan_tag_mask == 0) {
- if (is_multicast_ether_addr(mac_entry->h_dest))
- rc = efx_filter_set_mc_def(&spec);
+ case ETHER_FLOW:
+ if (!is_zero_ether_addr(mac_mask->h_dest)) {
+ if (ether_addr_equal(mac_mask->h_dest,
+ mac_addr_ig_mask))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ else if (is_broadcast_ether_addr(mac_mask->h_dest))
+ spec.match_flags |= EFX_FILTER_MATCH_LOC_MAC;
else
- rc = efx_filter_set_uc_def(&spec);
+ return -EINVAL;
+ memcpy(spec.loc_mac, mac_entry->h_dest, ETH_ALEN);
}
- /* Otherwise, it must match all of destination and all
- * or none of VID.
- */
- else if (is_broadcast_ether_addr(mac_mask->h_dest) &&
- (vlan_tag_mask == 0xfff || vlan_tag_mask == 0)) {
- rc = efx_filter_set_eth_local(
- &spec,
- vlan_tag_mask ?
- ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
- mac_entry->h_dest);
- } else {
- rc = -EINVAL;
+ if (!is_zero_ether_addr(mac_mask->h_source)) {
+ if (!is_broadcast_ether_addr(mac_mask->h_source))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_REM_MAC;
+ memcpy(spec.rem_mac, mac_entry->h_source, ETH_ALEN);
+ }
+ if (mac_mask->h_proto) {
+ if (mac_mask->h_proto != ETHER_TYPE_FULL_MASK)
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_ETHER_TYPE;
+ spec.ether_type = mac_entry->h_proto;
}
- if (rc)
- return rc;
break;
- }
default:
return -EINVAL;
}
+ if ((rule->flow_type & FLOW_EXT) && rule->m_ext.vlan_tci) {
+ if (rule->m_ext.vlan_tci != htons(0xfff))
+ return -EINVAL;
+ spec.match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec.outer_vid = rule->h_ext.vlan_tci;
+ }
+
rc = efx_filter_insert_filter(efx, &spec, true);
if (rc < 0)
return rc;
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 71998e7995d..ff5d322b9b4 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,17 +19,284 @@
#include "net_driver.h"
#include "bitfield.h"
#include "efx.h"
-#include "spi.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
#include "selftest.h"
+#include "mdio_10g.h"
/* Hardware control for SFC4000 (aka Falcon). */
+/**************************************************************************
+ *
+ * NIC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+#define XgDmaDone_WIDTH 32
+
+#define FALCON_XMAC_STATS_DMA_FLAG(efx) \
+ (*(u32 *)((efx)->stats_buffer.addr + XgDmaDone_offset))
+
+#define FALCON_DMA_STAT(ext_name, hw_name) \
+ [FALCON_STAT_ ## ext_name] = \
+ { #ext_name, \
+ /* 48-bit stats are zero-padded to 64 on DMA */ \
+ hw_name ## _ ## WIDTH == 48 ? 64 : hw_name ## _ ## WIDTH, \
+ hw_name ## _ ## offset }
+#define FALCON_OTHER_STAT(ext_name) \
+ [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
+ FALCON_DMA_STAT(tx_bytes, XgTxOctets),
+ FALCON_DMA_STAT(tx_packets, XgTxPkts),
+ FALCON_DMA_STAT(tx_pause, XgTxPausePkts),
+ FALCON_DMA_STAT(tx_control, XgTxControlPkts),
+ FALCON_DMA_STAT(tx_unicast, XgTxUnicastPkts),
+ FALCON_DMA_STAT(tx_multicast, XgTxMulticastPkts),
+ FALCON_DMA_STAT(tx_broadcast, XgTxBroadcastPkts),
+ FALCON_DMA_STAT(tx_lt64, XgTxUndersizePkts),
+ FALCON_DMA_STAT(tx_64, XgTxPkts64Octets),
+ FALCON_DMA_STAT(tx_65_to_127, XgTxPkts65to127Octets),
+ FALCON_DMA_STAT(tx_128_to_255, XgTxPkts128to255Octets),
+ FALCON_DMA_STAT(tx_256_to_511, XgTxPkts256to511Octets),
+ FALCON_DMA_STAT(tx_512_to_1023, XgTxPkts512to1023Octets),
+ FALCON_DMA_STAT(tx_1024_to_15xx, XgTxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(tx_15xx_to_jumbo, XgTxPkts1519toMaxOctets),
+ FALCON_DMA_STAT(tx_gtjumbo, XgTxOversizePkts),
+ FALCON_DMA_STAT(tx_non_tcpudp, XgTxNonTcpUdpPkt),
+ FALCON_DMA_STAT(tx_mac_src_error, XgTxMacSrcErrPkt),
+ FALCON_DMA_STAT(tx_ip_src_error, XgTxIpSrcErrPkt),
+ FALCON_DMA_STAT(rx_bytes, XgRxOctets),
+ FALCON_DMA_STAT(rx_good_bytes, XgRxOctetsOK),
+ FALCON_OTHER_STAT(rx_bad_bytes),
+ FALCON_DMA_STAT(rx_packets, XgRxPkts),
+ FALCON_DMA_STAT(rx_good, XgRxPktsOK),
+ FALCON_DMA_STAT(rx_bad, XgRxFCSerrorPkts),
+ FALCON_DMA_STAT(rx_pause, XgRxPausePkts),
+ FALCON_DMA_STAT(rx_control, XgRxControlPkts),
+ FALCON_DMA_STAT(rx_unicast, XgRxUnicastPkts),
+ FALCON_DMA_STAT(rx_multicast, XgRxMulticastPkts),
+ FALCON_DMA_STAT(rx_broadcast, XgRxBroadcastPkts),
+ FALCON_DMA_STAT(rx_lt64, XgRxUndersizePkts),
+ FALCON_DMA_STAT(rx_64, XgRxPkts64Octets),
+ FALCON_DMA_STAT(rx_65_to_127, XgRxPkts65to127Octets),
+ FALCON_DMA_STAT(rx_128_to_255, XgRxPkts128to255Octets),
+ FALCON_DMA_STAT(rx_256_to_511, XgRxPkts256to511Octets),
+ FALCON_DMA_STAT(rx_512_to_1023, XgRxPkts512to1023Octets),
+ FALCON_DMA_STAT(rx_1024_to_15xx, XgRxPkts1024to15xxOctets),
+ FALCON_DMA_STAT(rx_15xx_to_jumbo, XgRxPkts15xxtoMaxOctets),
+ FALCON_DMA_STAT(rx_gtjumbo, XgRxOversizePkts),
+ FALCON_DMA_STAT(rx_bad_lt64, XgRxUndersizeFCSerrorPkts),
+ FALCON_DMA_STAT(rx_bad_gtjumbo, XgRxJabberPkts),
+ FALCON_DMA_STAT(rx_overflow, XgRxDropEvents),
+ FALCON_DMA_STAT(rx_symbol_error, XgRxSymbolError),
+ FALCON_DMA_STAT(rx_align_error, XgRxAlignError),
+ FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
+ FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
+ FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+};
+static const unsigned long falcon_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
+};
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+#define SPI_WRSR 0x01 /* Write status register */
+#define SPI_WRITE 0x02 /* Write data to memory array */
+#define SPI_READ 0x03 /* Read data from memory array */
+#define SPI_WRDI 0x04 /* Reset write enable latch */
+#define SPI_RDSR 0x05 /* Read status register */
+#define SPI_WREN 0x06 /* Set write enable latch */
+#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
+
+#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
+#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
+#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
+#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
+#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
+#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
+
+/**************************************************************************
+ *
+ * Non-volatile memory layout
+ *
+ **************************************************************************
+ */
+
+/* SFC4000 flash is partitioned into:
+ * 0-0x400 chip and board config (see struct falcon_nvconfig)
+ * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
+ * 0x8000-end boot code (mapped to PCI expansion ROM)
+ * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
+ * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
+ * 0-0x400 chip and board config
+ * configurable VPD
+ * 0x800-0x1800 boot config
+ * Aside from the chip and board config, all of these are optional and may
+ * be absent or truncated depending on the devices used.
+ */
+#define FALCON_NVCONFIG_END 0x400U
+#define FALCON_FLASH_BOOTCODE_START 0x8000U
+#define FALCON_EEPROM_BOOTCONFIG_START 0x800U
+#define FALCON_EEPROM_BOOTCONFIG_END 0x1800U
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+ __le16 nports;
+ u8 port0_phy_addr;
+ u8 port0_phy_type;
+ u8 port1_phy_addr;
+ u8 port1_phy_type;
+ __le16 asic_sub_revision;
+ __le16 board_revision;
+} __packed;
+
+/* Board configuration v3 extra information */
+struct falcon_nvconfig_board_v3 {
+ __le32 spi_device_type[2];
+} __packed;
+
+/* Bit numbers for spi_device_type */
+#define SPI_DEV_TYPE_SIZE_LBN 0
+#define SPI_DEV_TYPE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
+#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
+#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
+#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
+#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
+#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
+#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
+#define SPI_DEV_TYPE_FIELD(type, field) \
+ (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
+
+#define FALCON_NVCONFIG_OFFSET 0x300
+
+#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+ efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
+ u8 mac_address[2][8]; /* 0x310 */
+ efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
+ efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
+ efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
+ efx_oword_t hw_init_reg; /* 0x350 */
+ efx_oword_t nic_stat_reg; /* 0x360 */
+ efx_oword_t glb_ctl_reg; /* 0x370 */
+ efx_oword_t srm_cfg_reg; /* 0x380 */
+ efx_oword_t spare_reg; /* 0x390 */
+ __le16 board_magic_num; /* 0x3A0 */
+ __le16 board_struct_ver;
+ __le16 board_checksum;
+ struct falcon_nvconfig_board_v2 board_v2;
+ efx_oword_t ee_base_page_reg; /* 0x3B0 */
+ struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
+} __packed;
+
+/*************************************************************************/
+
static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
static const unsigned int
/* "Large" EEPROM device: Atmel AT25640 or similar
@@ -146,7 +413,7 @@ static void falcon_prepare_flush(struct efx_nic *efx)
*
* NB most hardware supports MSI interrupts
*/
-inline void falcon_irq_ack_a1(struct efx_nic *efx)
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
{
efx_dword_t reg;
@@ -156,7 +423,7 @@ inline void falcon_irq_ack_a1(struct efx_nic *efx)
}
-irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
efx_oword_t *int_ker = efx->irq_status.addr;
@@ -177,10 +444,13 @@ irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
"IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
/* Check to see if we have a serious error condition */
syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
+ return efx_farch_fatal_interrupt(efx);
/* Determine interrupting queues, clear interrupt status
* register and acknowledge the device interrupt.
@@ -241,9 +511,10 @@ static int falcon_spi_wait(struct efx_nic *efx)
}
}
-int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
- unsigned int command, int address,
- const void *in, void *out, size_t len)
+static int
+falcon_spi_cmd(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ unsigned int command, int address,
+ const void *in, void *out, size_t len)
{
bool addressed = (address >= 0);
bool reading = (out != NULL);
@@ -297,48 +568,16 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
return 0;
}
-static size_t
-falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start)
-{
- return min(FALCON_SPI_MAX_LEN,
- (spi->block_size - (start & (spi->block_size - 1))));
-}
-
static inline u8
-efx_spi_munge_command(const struct efx_spi_device *spi,
- const u8 command, const unsigned int address)
+falcon_spi_munge_command(const struct falcon_spi_device *spi,
+ const u8 command, const unsigned int address)
{
return command | (((address >> 8) & spi->munge_address) << 3);
}
-/* Wait up to 10 ms for buffered write completion */
-int
-falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
- u8 status;
- int rc;
-
- for (;;) {
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (time_after_eq(jiffies, timeout)) {
- netif_err(efx, hw, efx->net_dev,
- "SPI write timeout on device %d"
- " last status=0x%02x\n",
- spi->device_id, status);
- return -ETIMEDOUT;
- }
- schedule_timeout_uninterruptible(1);
- }
-}
-
-int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
- loff_t start, size_t len, size_t *retlen, u8 *buffer)
+static int
+falcon_spi_read(struct efx_nic *efx, const struct falcon_spi_device *spi,
+ loff_t start, size_t len, size_t *retlen, u8 *buffer)
{
size_t block_len, pos = 0;
unsigned int command;
@@ -347,7 +586,7 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
while (pos < len) {
block_len = min(len - pos, FALCON_SPI_MAX_LEN);
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL,
buffer + pos, block_len);
if (rc)
@@ -367,8 +606,52 @@ int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
-int
-falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
+#ifdef CONFIG_SFC_MTD
+
+struct falcon_mtd_partition {
+ struct efx_mtd_partition common;
+ const struct falcon_spi_device *spi;
+ size_t offset;
+};
+
+#define to_falcon_mtd_partition(mtd) \
+ container_of(mtd, struct falcon_mtd_partition, common.mtd)
+
+static size_t
+falcon_spi_write_limit(const struct falcon_spi_device *spi, size_t start)
+{
+ return min(FALCON_SPI_MAX_LEN,
+ (spi->block_size - (start & (spi->block_size - 1))));
+}
+
+/* Wait up to 10 ms for buffered write completion */
+static int
+falcon_spi_wait_write(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100);
+ u8 status;
+ int rc;
+
+ for (;;) {
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (time_after_eq(jiffies, timeout)) {
+ netif_err(efx, hw, efx->net_dev,
+ "SPI write timeout on device %d"
+ " last status=0x%02x\n",
+ spi->device_id, status);
+ return -ETIMEDOUT;
+ }
+ schedule_timeout_uninterruptible(1);
+ }
+}
+
+static int
+falcon_spi_write(struct efx_nic *efx, const struct falcon_spi_device *spi,
loff_t start, size_t len, size_t *retlen, const u8 *buffer)
{
u8 verify_buffer[FALCON_SPI_MAX_LEN];
@@ -383,7 +666,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
block_len = min(len - pos,
falcon_spi_write_limit(spi, start + pos));
- command = efx_spi_munge_command(spi, SPI_WRITE, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_WRITE, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
buffer + pos, NULL, block_len);
if (rc)
@@ -393,7 +676,7 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
if (rc)
break;
- command = efx_spi_munge_command(spi, SPI_READ, start + pos);
+ command = falcon_spi_munge_command(spi, SPI_READ, start + pos);
rc = falcon_spi_cmd(efx, spi, command, start + pos,
NULL, verify_buffer, block_len);
if (memcmp(verify_buffer, buffer + pos, block_len)) {
@@ -416,6 +699,520 @@ falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi,
return rc;
}
+static int
+falcon_spi_slow_wait(struct falcon_mtd_partition *part, bool uninterruptible)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ u8 status;
+ int rc, i;
+
+ /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
+ for (i = 0; i < 40; i++) {
+ __set_current_state(uninterruptible ?
+ TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+ if (!(status & SPI_STATUS_NRDY))
+ return 0;
+ if (signal_pending(current))
+ return -EINTR;
+ }
+ pr_err("%s: timed out waiting for %s\n",
+ part->common.name, part->common.dev_type_name);
+ return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_unlock(struct efx_nic *efx, const struct falcon_spi_device *spi)
+{
+ const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
+ SPI_STATUS_BP0);
+ u8 status;
+ int rc;
+
+ rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
+ &status, sizeof(status));
+ if (rc)
+ return rc;
+
+ if (!(status & unlock_mask))
+ return 0; /* already unlocked */
+
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+
+ status &= ~unlock_mask;
+ rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
+ NULL, sizeof(status));
+ if (rc)
+ return rc;
+ rc = falcon_spi_wait_write(efx, spi);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+#define FALCON_SPI_VERIFY_BUF_LEN 16
+
+static int
+falcon_spi_erase(struct falcon_mtd_partition *part, loff_t start, size_t len)
+{
+ const struct falcon_spi_device *spi = part->spi;
+ struct efx_nic *efx = part->common.mtd.priv;
+ unsigned pos, block_len;
+ u8 empty[FALCON_SPI_VERIFY_BUF_LEN];
+ u8 buffer[FALCON_SPI_VERIFY_BUF_LEN];
+ int rc;
+
+ if (len != spi->erase_size)
+ return -EINVAL;
+
+ if (spi->erase_command == 0)
+ return -EOPNOTSUPP;
+
+ rc = falcon_spi_unlock(efx, spi);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
+ NULL, 0);
+ if (rc)
+ return rc;
+ rc = falcon_spi_slow_wait(part, false);
+
+ /* Verify the entire region has been wiped */
+ memset(empty, 0xff, sizeof(empty));
+ for (pos = 0; pos < len; pos += block_len) {
+ block_len = min(len - pos, sizeof(buffer));
+ rc = falcon_spi_read(efx, spi, start + pos, block_len,
+ NULL, buffer);
+ if (rc)
+ return rc;
+ if (memcmp(empty, buffer, block_len))
+ return -EIO;
+
+ /* Avoid locking up the system */
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+ }
+
+ return rc;
+}
+
+static void falcon_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s",
+ efx->name, part->type_name);
+}
+
+static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_read(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_erase(part, part->offset + start, len);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ rc = mutex_lock_interruptible(&nic_data->spi_lock);
+ if (rc)
+ return rc;
+ rc = falcon_spi_write(efx, part->spi, part->offset + start,
+ len, retlen, buffer);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_sync(struct mtd_info *mtd)
+{
+ struct falcon_mtd_partition *part = to_falcon_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ int rc;
+
+ mutex_lock(&nic_data->spi_lock);
+ rc = falcon_spi_slow_wait(part, true);
+ mutex_unlock(&nic_data->spi_lock);
+ return rc;
+}
+
+static int falcon_mtd_probe(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ struct falcon_mtd_partition *parts;
+ struct falcon_spi_device *spi;
+ size_t n_parts;
+ int rc = -ENODEV;
+
+ ASSERT_RTNL();
+
+ /* Allocate space for maximum number of partitions */
+ parts = kcalloc(2, sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+ n_parts = 0;
+
+ spi = &nic_data->spi_flash;
+ if (falcon_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.dev_type_name = "flash";
+ parts[n_parts].common.type_name = "sfc_flash_bootrom";
+ parts[n_parts].common.mtd.type = MTD_NORFLASH;
+ parts[n_parts].common.mtd.flags = MTD_CAP_NORFLASH;
+ parts[n_parts].common.mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ spi = &nic_data->spi_eeprom;
+ if (falcon_spi_present(spi) && spi->size > FALCON_EEPROM_BOOTCONFIG_START) {
+ parts[n_parts].spi = spi;
+ parts[n_parts].offset = FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.dev_type_name = "EEPROM";
+ parts[n_parts].common.type_name = "sfc_bootconfig";
+ parts[n_parts].common.mtd.type = MTD_RAM;
+ parts[n_parts].common.mtd.flags = MTD_CAP_RAM;
+ parts[n_parts].common.mtd.size =
+ min(spi->size, FALCON_EEPROM_BOOTCONFIG_END) -
+ FALCON_EEPROM_BOOTCONFIG_START;
+ parts[n_parts].common.mtd.erasesize = spi->erase_size;
+ n_parts++;
+ }
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * XMAC operations
+ *
+ **************************************************************************
+ */
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+ efx_oword_t sdctl, txdrv;
+
+ /* Move the XAUI into low power, unless there is no PHY, in
+ * which case the XAUI will have to drive a cable. */
+ if (efx->phy_type == PHY_TYPE_NONE)
+ return;
+
+ efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
+ efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
+
+ EFX_POPULATE_OWORD_8(txdrv,
+ FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
+ FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
+ FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
+ efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+ int count;
+
+ /* Don't fetch MAC statistics over an XMAC reset */
+ WARN_ON(nic_data->stats_disable_count == 0);
+
+ /* Start reset sequence */
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
+
+ /* Wait up to 10 ms for completion, then reinitialise */
+ for (count = 0; count < 1000; count++) {
+ efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
+ if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
+ EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
+ falcon_setup_xaui(efx);
+ return 0;
+ }
+ udelay(10);
+ }
+ netif_err(efx, hw, efx->net_dev,
+ "timed out waiting for XAUI/XGXS reset\n");
+ return -ETIMEDOUT;
+}
+
+static void falcon_ack_status_intr(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t reg;
+
+ if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
+ return;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up)
+ return;
+
+ /* We can only use this interrupt to signal the negative edge of
+ * xaui_align [we have to poll the positive edge]. */
+ if (nic_data->xmac_poll_required)
+ return;
+
+ efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
+}
+
+static bool falcon_xgxs_link_ok(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool align_done, link_ok = false;
+ int sync_status;
+
+ /* Read link status */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
+ sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
+ if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
+ link_ok = true;
+
+ /* Clear link status ready for next read */
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ return link_ok;
+}
+
+static bool falcon_xmac_link_ok(struct efx_nic *efx)
+{
+ /*
+ * Check MAC's XGXS link status except when using XGMII loopback
+ * which bypasses the XGXS block.
+ * If possible, check PHY's XGXS link status except when using
+ * MAC loopback.
+ */
+ return (efx->loopback_mode == LOOPBACK_XGMII ||
+ falcon_xgxs_link_ok(efx)) &&
+ (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
+ LOOPBACK_INTERNAL(efx) ||
+ efx_mdio_phyxgxs_lane_sync(efx));
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+ unsigned int max_frame_len;
+ efx_oword_t reg;
+ bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
+ bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
+
+ /* Configure MAC - cut-thru mode is hard wired on */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AB_XM_RX_JUMBO_MODE, 1,
+ FRF_AB_XM_TX_STAT_EN, 1,
+ FRF_AB_XM_RX_STAT_EN, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
+
+ /* Configure TX */
+ EFX_POPULATE_OWORD_6(reg,
+ FRF_AB_XM_TXEN, 1,
+ FRF_AB_XM_TX_PRMBL, 1,
+ FRF_AB_XM_AUTO_PAD, 1,
+ FRF_AB_XM_TXCRC, 1,
+ FRF_AB_XM_FCNTL, tx_fc,
+ FRF_AB_XM_IPG, 0x3);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
+
+ /* Configure RX */
+ EFX_POPULATE_OWORD_5(reg,
+ FRF_AB_XM_RXEN, 1,
+ FRF_AB_XM_AUTO_DEPAD, 0,
+ FRF_AB_XM_ACPT_ALL_MCAST, 1,
+ FRF_AB_XM_ACPT_ALL_UCAST, !efx->unicast_filter,
+ FRF_AB_XM_PASS_CRC_ERR, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
+
+ /* Set frame length */
+ max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
+ efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
+ FRF_AB_XM_TX_JUMBO_MODE, 1);
+ efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
+
+ EFX_POPULATE_OWORD_2(reg,
+ FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+ FRF_AB_XM_DIS_FCNTL, !rx_fc);
+ efx_writeo(efx, &reg, FR_AB_XM_FC);
+
+ /* Set MAC address */
+ memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
+ memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
+ efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
+}
+
+static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
+{
+ efx_oword_t reg;
+ bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
+ bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
+ bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
+ bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
+
+ /* XGXS block is flaky and will need to be reset if moving
+ * into our out of XGMII, XGXS or XAUI loopbacks. */
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
+ old_xgmii_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
+
+ /* The PHY driver may have turned XAUI off */
+ if ((xgxs_loopback != old_xgxs_loopback) ||
+ (xaui_loopback != old_xaui_loopback) ||
+ (xgmii_loopback != old_xgmii_loopback))
+ falcon_reset_xaui(efx);
+
+ efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
+ (xgxs_loopback || xaui_loopback) ?
+ FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
+
+ efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
+ EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
+ efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
+}
+
+
+/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
+static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
+{
+ bool mac_up = falcon_xmac_link_ok(efx);
+
+ if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
+ efx_phy_mode_disabled(efx->phy_mode))
+ /* XAUI link is expected to be down */
+ return mac_up;
+
+ falcon_stop_nic_stats(efx);
+
+ while (!mac_up && tries) {
+ netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
+ falcon_reset_xaui(efx);
+ udelay(200);
+
+ mac_up = falcon_xmac_link_ok(efx);
+ --tries;
+ }
+
+ falcon_start_nic_stats(efx);
+
+ return mac_up;
+}
+
+static bool falcon_xmac_check_fault(struct efx_nic *efx)
+{
+ return !falcon_xmac_link_ok_retry(efx, 5);
+}
+
+static int falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ efx_farch_filter_sync_rx_mode(efx);
+
+ falcon_reconfigure_xgxs_core(efx);
+ falcon_reconfigure_xmac_core(efx);
+
+ falcon_reconfigure_mac_wrapper(efx);
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+ falcon_ack_status_intr(efx);
+
+ return 0;
+}
+
+static void falcon_poll_xmac(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+
+ /* We expect xgmii faults if the wireside link is down */
+ if (!efx->link_state.up || !nic_data->xmac_poll_required)
+ return;
+
+ nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+ falcon_ack_status_intr(efx);
+}
+
/**************************************************************************
*
* MAC wrapper
@@ -497,7 +1294,7 @@ static void falcon_reset_macs(struct efx_nic *efx)
falcon_setup_xaui(efx);
}
-void falcon_drain_tx_fifo(struct efx_nic *efx)
+static void falcon_drain_tx_fifo(struct efx_nic *efx)
{
efx_oword_t reg;
@@ -529,7 +1326,7 @@ static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
falcon_drain_tx_fifo(efx);
}
-void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
{
struct efx_link_state *link_state = &efx->link_state;
efx_oword_t reg;
@@ -550,7 +1347,7 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
EFX_POPULATE_OWORD_5(reg,
FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
FRF_AB_MAC_BCAD_ACPT, 1,
- FRF_AB_MAC_UC_PROM, efx->promiscuous,
+ FRF_AB_MAC_UC_PROM, !efx->unicast_filter,
FRF_AB_MAC_LINK_STATUS, 1, /* always set */
FRF_AB_MAC_SPEED, link_speed);
/* On B0, MAC backpressure can be disabled and packets get
@@ -583,10 +1380,7 @@ static void falcon_stats_request(struct efx_nic *efx)
WARN_ON(nic_data->stats_pending);
WARN_ON(nic_data->stats_disable_count);
- if (nic_data->stats_dma_done == NULL)
- return; /* no mac selected */
-
- *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE;
+ FALCON_XMAC_STATS_DMA_FLAG(efx) = 0;
nic_data->stats_pending = true;
wmb(); /* ensure done flag is clear */
@@ -608,9 +1402,11 @@ static void falcon_stats_complete(struct efx_nic *efx)
return;
nic_data->stats_pending = false;
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx)) {
rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ efx_nic_update_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, nic_data->stats,
+ efx->stats_buffer.addr, true);
} else {
netif_err(efx, hw, efx->net_dev,
"timed out waiting for statistics\n");
@@ -678,6 +1474,28 @@ static int falcon_reconfigure_port(struct efx_nic *efx)
return 0;
}
+/* TX flow control may automatically turn itself off if the link
+ * partner (intermittently) stops responding to pause frames. There
+ * isn't any indication that this has happened, so the best we do is
+ * leave it up to the user to spot this and fix it by cycling transmit
+ * flow control on this end.
+ */
+
+static void falcon_a1_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Schedule a reset to recover */
+ efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+}
+
+static void falcon_b0_prepare_enable_fc_tx(struct efx_nic *efx)
+{
+ /* Recover by resetting the EM block */
+ falcon_stop_nic_stats(efx);
+ falcon_drain_tx_fifo(efx);
+ falcon_reconfigure_xmac(efx);
+ falcon_start_nic_stats(efx);
+}
+
/**************************************************************************
*
* PHY access via GMII
@@ -861,7 +1679,7 @@ static int falcon_probe_port(struct efx_nic *efx)
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- FALCON_MAC_STATS_SIZE);
+ FALCON_MAC_STATS_SIZE, GFP_KERNEL);
if (rc)
return rc;
netif_dbg(efx, probe, efx->net_dev,
@@ -869,7 +1687,6 @@ static int falcon_probe_port(struct efx_nic *efx)
(u64)efx->stats_buffer.dma_addr,
efx->stats_buffer.addr,
(u64)virt_to_phys(efx->stats_buffer.addr));
- nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset;
return 0;
}
@@ -926,15 +1743,15 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
{
struct falcon_nic_data *nic_data = efx->nic_data;
struct falcon_nvconfig *nvconfig;
- struct efx_spi_device *spi;
+ struct falcon_spi_device *spi;
void *region;
int rc, magic_num, struct_ver;
__le16 *word, *limit;
u32 csum;
- if (efx_spi_present(&nic_data->spi_flash))
+ if (falcon_spi_present(&nic_data->spi_flash))
spi = &nic_data->spi_flash;
- else if (efx_spi_present(&nic_data->spi_eeprom))
+ else if (falcon_spi_present(&nic_data->spi_eeprom))
spi = &nic_data->spi_eeprom;
else
return -EINVAL;
@@ -949,7 +1766,7 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
mutex_unlock(&nic_data->spi_lock);
if (rc) {
netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
- efx_spi_present(&nic_data->spi_flash) ?
+ falcon_spi_present(&nic_data->spi_flash) ?
"flash" : "EEPROM");
rc = -EIO;
goto out;
@@ -998,7 +1815,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
return falcon_read_nvram(efx, NULL);
}
-static const struct efx_nic_register_test falcon_b0_register_tests[] = {
+static const struct efx_farch_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
@@ -1058,8 +1875,8 @@ falcon_b0_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
efx_reset_down(efx, reset_method);
tests->registers =
- efx_nic_test_registers(efx, falcon_b0_register_tests,
- ARRAY_SIZE(falcon_b0_register_tests))
+ efx_farch_test_registers(efx, falcon_b0_register_tests,
+ ARRAY_SIZE(falcon_b0_register_tests))
? -1 : 1;
rc = falcon_reset_hw(efx, reset_method);
@@ -1078,8 +1895,7 @@ static enum reset_type falcon_map_reset_reason(enum reset_type reason)
{
switch (reason) {
case RESET_TYPE_RX_RECOVERY:
- case RESET_TYPE_RX_DESC_FETCH:
- case RESET_TYPE_TX_DESC_FETCH:
+ case RESET_TYPE_DMA_ERROR:
case RESET_TYPE_TX_SKIP:
/* These can occasionally occur due to hardware bugs.
* We try to reset without disrupting the link.
@@ -1294,7 +2110,7 @@ static int falcon_reset_sram(struct efx_nic *efx)
}
static void falcon_spi_device_init(struct efx_nic *efx,
- struct efx_spi_device *spi_device,
+ struct falcon_spi_device *spi_device,
unsigned int device_id, u32 device_type)
{
if (device_type != 0) {
@@ -1360,10 +2176,11 @@ out:
return rc;
}
-static void falcon_dimension_resources(struct efx_nic *efx)
+static int falcon_dimension_resources(struct efx_nic *efx)
{
efx->rx_dc_base = 0x20000;
efx->tx_dc_base = 0x26000;
+ return 0;
}
/* Probe all SPI devices on the NIC */
@@ -1410,6 +2227,20 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
large_eeprom_type);
}
+static unsigned int falcon_a1_mem_map_size(struct efx_nic *efx)
+{
+ return 0x20000;
+}
+
+static unsigned int falcon_b0_mem_map_size(struct efx_nic *efx)
+{
+ /* Map everything up to and including the RSS indirection table.
+ * The PCI core takes care of mapping the MSI-X tables.
+ */
+ return FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS;
+}
+
static int falcon_probe_nic(struct efx_nic *efx)
{
struct falcon_nic_data *nic_data;
@@ -1424,7 +2255,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
rc = -ENODEV;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Falcon FPGA not supported\n");
goto fail1;
@@ -1478,7 +2309,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
}
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -1499,6 +2331,8 @@ static int falcon_probe_nic(struct efx_nic *efx)
goto fail5;
}
+ efx->max_channels = (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? 4 :
+ EFX_MAX_CHANNELS);
efx->timer_quantum_ns = 4968; /* 621 cycles */
/* Initialise I2C adapter */
@@ -1657,7 +2491,7 @@ static int falcon_init_nic(struct efx_nic *efx)
efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
}
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -1688,24 +2522,65 @@ static void falcon_remove_nic(struct efx_nic *efx)
efx->nic_data = NULL;
}
-static void falcon_update_nic_stats(struct efx_nic *efx)
+static size_t falcon_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask, names);
+}
+
+static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
struct falcon_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
efx_oword_t cnt;
- if (nic_data->stats_disable_count)
- return;
+ if (!nic_data->stats_disable_count) {
+ efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
+ stats[FALCON_STAT_rx_nodesc_drop_cnt] +=
+ EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+
+ if (nic_data->stats_pending &&
+ FALCON_XMAC_STATS_DMA_FLAG(efx)) {
+ nic_data->stats_pending = false;
+ rmb(); /* read the done flag before the stats */
+ efx_nic_update_stats(
+ falcon_stat_desc, FALCON_STAT_COUNT,
+ falcon_stat_mask,
+ stats, efx->stats_buffer.addr, true);
+ }
- efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
- efx->n_rx_nodesc_drop_cnt +=
- EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
+ /* Update derived statistic */
+ efx_update_diff_stat(&stats[FALCON_STAT_rx_bad_bytes],
+ stats[FALCON_STAT_rx_bytes] -
+ stats[FALCON_STAT_rx_good_bytes] -
+ stats[FALCON_STAT_rx_control] * 64);
+ }
- if (nic_data->stats_pending &&
- *nic_data->stats_dma_done == FALCON_STATS_DONE) {
- nic_data->stats_pending = false;
- rmb(); /* read the done flag before the stats */
- falcon_update_stats_xmac(efx);
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * FALCON_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[FALCON_STAT_rx_packets];
+ core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
+ core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[FALCON_STAT_rx_multicast];
+ core_stats->rx_length_errors =
+ stats[FALCON_STAT_rx_gtjumbo] +
+ stats[FALCON_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[FALCON_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[FALCON_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[FALCON_STAT_rx_overflow];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[FALCON_STAT_rx_symbol_error]);
}
+
+ return FALCON_STAT_COUNT;
}
void falcon_start_nic_stats(struct efx_nic *efx)
@@ -1734,7 +2609,7 @@ void falcon_stop_nic_stats(struct efx_nic *efx)
/* Wait enough time for the most recent transfer to
* complete. */
for (i = 0; i < 4 && nic_data->stats_pending; i++) {
- if (*nic_data->stats_dma_done == FALCON_STATS_DONE)
+ if (FALCON_XMAC_STATS_DMA_FLAG(efx))
break;
msleep(1);
}
@@ -1778,11 +2653,12 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
*/
const struct efx_nic_type falcon_a1_nic_type = {
+ .mem_map_size = falcon_a1_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
.dimension_resources = falcon_dimension_resources,
- .fini = efx_port_dummy_op_void,
+ .fini = falcon_irq_ack_a1,
.monitor = falcon_monitor,
.map_reset_reason = falcon_map_reset_reason,
.map_reset_flags = falcon_map_reset_flags,
@@ -1790,23 +2666,71 @@ const struct efx_nic_type falcon_a1_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_a1_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
.set_wol = falcon_set_wol,
.resume_wol = efx_port_dummy_op_void,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = falcon_legacy_interrupt_a1,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+
+ /* We don't expose the filter table on Falcon A1 as it is not
+ * mapped into function 0, but these implementations still
+ * work with a degenerate case of all tables set to size 0.
+ */
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_A1,
- .mem_map_size = 0x20000,
.txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
.rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
.buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
@@ -1816,12 +2740,13 @@ const struct efx_nic_type falcon_a1_nic_type = {
.rx_buffer_padding = 0x24,
.can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI,
- .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
+ .mcdi_max_ver = -1,
};
const struct efx_nic_type falcon_b0_nic_type = {
+ .mem_map_size = falcon_b0_mem_map_size,
.probe = falcon_probe_nic,
.remove = falcon_remove_nic,
.init = falcon_init_nic,
@@ -1834,14 +2759,17 @@ const struct efx_nic_type falcon_b0_nic_type = {
.probe_port = falcon_probe_port,
.remove_port = falcon_remove_port,
.handle_global_event = falcon_handle_global_event,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = falcon_prepare_flush,
.finish_flush = efx_port_dummy_op_void,
+ .describe_stats = falcon_describe_nic_stats,
.update_stats = falcon_update_nic_stats,
.start_stats = falcon_start_nic_stats,
.stop_stats = falcon_stop_nic_stats,
.set_id_led = falcon_set_id_led,
.push_irq_moderation = falcon_push_irq_moderation,
.reconfigure_port = falcon_reconfigure_port,
+ .prepare_enable_fc_tx = falcon_b0_prepare_enable_fc_tx,
.reconfigure_mac = falcon_reconfigure_xmac,
.check_mac_fault = falcon_xmac_check_fault,
.get_wol = falcon_get_wol,
@@ -1849,28 +2777,67 @@ const struct efx_nic_type falcon_b0_nic_type = {
.resume_wol = efx_port_dummy_op_void,
.test_chip = falcon_b0_test_chip,
.test_nvram = falcon_test_nvram,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = falcon_mtd_probe,
+ .mtd_rename = falcon_mtd_rename,
+ .mtd_read = falcon_mtd_read,
+ .mtd_erase = falcon_mtd_erase,
+ .mtd_write = falcon_mtd_write,
+ .mtd_sync = falcon_mtd_sync,
+#endif
.revision = EFX_REV_FALCON_B0,
- /* Map everything up to and including the RSS indirection
- * table. Don't map MSI-X table, MSI-X PBA since Linux
- * requires that they not be mapped. */
- .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP *
- FR_BZ_RX_INDIRECTION_TBL_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
+ .mcdi_max_ver = -1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/falcon_boards.c b/drivers/net/ethernet/sfc/falcon_boards.c
index ec1e99d0dca..1736f4b806a 100644
--- a/drivers/net/ethernet/sfc/falcon_boards.c
+++ b/drivers/net/ethernet/sfc/falcon_boards.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2007-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2007-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/falcon_xmac.c b/drivers/net/ethernet/sfc/falcon_xmac.c
deleted file mode 100644
index 8333865d4c9..00000000000
--- a/drivers/net/ethernet/sfc/falcon_xmac.c
+++ /dev/null
@@ -1,362 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/delay.h>
-#include "net_driver.h"
-#include "efx.h"
-#include "nic.h"
-#include "regs.h"
-#include "io.h"
-#include "mdio_10g.h"
-#include "workarounds.h"
-
-/**************************************************************************
- *
- * MAC operations
- *
- *************************************************************************/
-
-/* Configure the XAUI driver that is an output from Falcon */
-void falcon_setup_xaui(struct efx_nic *efx)
-{
- efx_oword_t sdctl, txdrv;
-
- /* Move the XAUI into low power, unless there is no PHY, in
- * which case the XAUI will have to drive a cable. */
- if (efx->phy_type == PHY_TYPE_NONE)
- return;
-
- efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
- efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
-
- EFX_POPULATE_OWORD_8(txdrv,
- FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
- FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
- FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
- efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
-}
-
-int falcon_reset_xaui(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
- int count;
-
- /* Don't fetch MAC statistics over an XMAC reset */
- WARN_ON(nic_data->stats_disable_count == 0);
-
- /* Start reset sequence */
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
-
- /* Wait up to 10 ms for completion, then reinitialise */
- for (count = 0; count < 1000; count++) {
- efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
- if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
- EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
- falcon_setup_xaui(efx);
- return 0;
- }
- udelay(10);
- }
- netif_err(efx, hw, efx->net_dev,
- "timed out waiting for XAUI/XGXS reset\n");
- return -ETIMEDOUT;
-}
-
-static void falcon_ack_status_intr(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t reg;
-
- if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
- return;
-
- /* We expect xgmii faults if the wireside link is down */
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up)
- return;
-
- /* We can only use this interrupt to signal the negative edge of
- * xaui_align [we have to poll the positive edge]. */
- if (nic_data->xmac_poll_required)
- return;
-
- efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
-}
-
-static bool falcon_xgxs_link_ok(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool align_done, link_ok = false;
- int sync_status;
-
- /* Read link status */
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
-
- align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
- sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
- if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
- link_ok = true;
-
- /* Clear link status ready for next read */
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- return link_ok;
-}
-
-static bool falcon_xmac_link_ok(struct efx_nic *efx)
-{
- /*
- * Check MAC's XGXS link status except when using XGMII loopback
- * which bypasses the XGXS block.
- * If possible, check PHY's XGXS link status except when using
- * MAC loopback.
- */
- return (efx->loopback_mode == LOOPBACK_XGMII ||
- falcon_xgxs_link_ok(efx)) &&
- (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) ||
- LOOPBACK_INTERNAL(efx) ||
- efx_mdio_phyxgxs_lane_sync(efx));
-}
-
-static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
-{
- unsigned int max_frame_len;
- efx_oword_t reg;
- bool rx_fc = !!(efx->link_state.fc & EFX_FC_RX);
- bool tx_fc = !!(efx->link_state.fc & EFX_FC_TX);
-
- /* Configure MAC - cut-thru mode is hard wired on */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AB_XM_RX_JUMBO_MODE, 1,
- FRF_AB_XM_TX_STAT_EN, 1,
- FRF_AB_XM_RX_STAT_EN, 1);
- efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
-
- /* Configure TX */
- EFX_POPULATE_OWORD_6(reg,
- FRF_AB_XM_TXEN, 1,
- FRF_AB_XM_TX_PRMBL, 1,
- FRF_AB_XM_AUTO_PAD, 1,
- FRF_AB_XM_TXCRC, 1,
- FRF_AB_XM_FCNTL, tx_fc,
- FRF_AB_XM_IPG, 0x3);
- efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
-
- /* Configure RX */
- EFX_POPULATE_OWORD_5(reg,
- FRF_AB_XM_RXEN, 1,
- FRF_AB_XM_AUTO_DEPAD, 0,
- FRF_AB_XM_ACPT_ALL_MCAST, 1,
- FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
- FRF_AB_XM_PASS_CRC_ERR, 1);
- efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
-
- /* Set frame length */
- max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
- EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
- efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
- FRF_AB_XM_TX_JUMBO_MODE, 1);
- efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
-
- EFX_POPULATE_OWORD_2(reg,
- FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
- FRF_AB_XM_DIS_FCNTL, !rx_fc);
- efx_writeo(efx, &reg, FR_AB_XM_FC);
-
- /* Set MAC address */
- memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
- memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
- efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
-}
-
-static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
-{
- efx_oword_t reg;
- bool xgxs_loopback = (efx->loopback_mode == LOOPBACK_XGXS);
- bool xaui_loopback = (efx->loopback_mode == LOOPBACK_XAUI);
- bool xgmii_loopback = (efx->loopback_mode == LOOPBACK_XGMII);
-
- /* XGXS block is flaky and will need to be reset if moving
- * into our out of XGMII, XGXS or XAUI loopbacks. */
- if (EFX_WORKAROUND_5147(efx)) {
- bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
- bool reset_xgxs;
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
- old_xgmii_loopback =
- EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
-
- /* The PHY driver may have turned XAUI off */
- reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
- (xaui_loopback != old_xaui_loopback) ||
- (xgmii_loopback != old_xgmii_loopback));
-
- if (reset_xgxs)
- falcon_reset_xaui(efx);
- }
-
- efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
- (xgxs_loopback || xaui_loopback) ?
- FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
-
- efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
- EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
- efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
-}
-
-
-/* Try to bring up the Falcon side of the Falcon-Phy XAUI link */
-static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries)
-{
- bool mac_up = falcon_xmac_link_ok(efx);
-
- if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS ||
- efx_phy_mode_disabled(efx->phy_mode))
- /* XAUI link is expected to be down */
- return mac_up;
-
- falcon_stop_nic_stats(efx);
-
- while (!mac_up && tries) {
- netif_dbg(efx, hw, efx->net_dev, "bashing xaui\n");
- falcon_reset_xaui(efx);
- udelay(200);
-
- mac_up = falcon_xmac_link_ok(efx);
- --tries;
- }
-
- falcon_start_nic_stats(efx);
-
- return mac_up;
-}
-
-bool falcon_xmac_check_fault(struct efx_nic *efx)
-{
- return !falcon_xmac_link_ok_retry(efx, 5);
-}
-
-int falcon_reconfigure_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- falcon_reconfigure_xgxs_core(efx);
- falcon_reconfigure_xmac_core(efx);
-
- falcon_reconfigure_mac_wrapper(efx);
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
- falcon_ack_status_intr(efx);
-
- return 0;
-}
-
-void falcon_update_stats_xmac(struct efx_nic *efx)
-{
- struct efx_mac_stats *mac_stats = &efx->mac_stats;
-
- /* Update MAC stats from DMAed values */
- FALCON_STAT(efx, XgRxOctets, rx_bytes);
- FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
- FALCON_STAT(efx, XgRxPkts, rx_packets);
- FALCON_STAT(efx, XgRxPktsOK, rx_good);
- FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
- FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
- FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
- FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
- FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
- FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
- FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
- FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
- FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
- FALCON_STAT(efx, XgRxAlignError, rx_align_error);
- FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
- FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
- FALCON_STAT(efx, XgRxControlPkts, rx_control);
- FALCON_STAT(efx, XgRxPausePkts, rx_pause);
- FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
- FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
- FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
- FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
- FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
- FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
- FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
- FALCON_STAT(efx, XgRxLengthError, rx_length_error);
- FALCON_STAT(efx, XgTxPkts, tx_packets);
- FALCON_STAT(efx, XgTxOctets, tx_bytes);
- FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
- FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
- FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
- FALCON_STAT(efx, XgTxControlPkts, tx_control);
- FALCON_STAT(efx, XgTxPausePkts, tx_pause);
- FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
- FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
- FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
- FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
- FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
- FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
- FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
- FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
- FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
- FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
- FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
- FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
-
- /* Update derived statistics */
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes -
- mac_stats->tx_control * 64);
- efx_update_diff_stat(&mac_stats->rx_bad_bytes,
- mac_stats->rx_bytes - mac_stats->rx_good_bytes -
- mac_stats->rx_control * 64);
-}
-
-void falcon_poll_xmac(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
-
- if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
- !nic_data->xmac_poll_required)
- return;
-
- nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
- falcon_ack_status_intr(efx);
-}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
new file mode 100644
index 00000000000..c0907d884d7
--- /dev/null
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -0,0 +1,2942 @@
+/****************************************************************************
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2013 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/crc32.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "nic.h"
+#include "farch_regs.h"
+#include "io.h"
+#include "workarounds.h"
+
+/* Falcon-architecture (SFC4000 and SFC9000-family) support */
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+/* This is set to 16 for a good reason. In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding). This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 1
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 3
+
+/* If EFX_MAX_INT_ERRORS internal errors occur within
+ * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
+ * disable it.
+ */
+#define EFX_INT_ERROR_EXPIRE 3600
+#define EFX_MAX_INT_ERRORS 5
+
+/* Depth of RX flush request fifo */
+#define EFX_RX_FLUSH_COUNT 4
+
+/* Driver generated events */
+#define _EFX_CHANNEL_MAGIC_TEST 0x000101
+#define _EFX_CHANNEL_MAGIC_FILL 0x000102
+#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
+#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
+
+#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
+#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
+
+#define EFX_CHANNEL_MAGIC_TEST(_channel) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
+#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
+ efx_rx_queue_index(_rx_queue))
+#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
+ _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
+ (_tx_queue)->queue)
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic);
+
+/**************************************************************************
+ *
+ * Hardware access
+ *
+ **************************************************************************/
+
+static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
+ unsigned int index)
+{
+ efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
+ value, index);
+}
+
+static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
+ const efx_oword_t *mask)
+{
+ return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
+ ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
+}
+
+int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs)
+{
+ unsigned address = 0, i, j;
+ efx_oword_t mask, imask, original, reg, buf;
+
+ for (i = 0; i < n_regs; ++i) {
+ address = regs[i].address;
+ mask = imask = regs[i].mask;
+ EFX_INVERT_OWORD(imask);
+
+ efx_reado(efx, &original, address);
+
+ /* bit sweep on and off */
+ for (j = 0; j < 128; j++) {
+ if (!EFX_EXTRACT_OWORD32(mask, j, j))
+ continue;
+
+ /* Test this testable bit can be set in isolation */
+ EFX_AND_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 1);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+
+ /* Test this testable bit can be cleared in isolation */
+ EFX_OR_OWORD(reg, original, mask);
+ EFX_SET_OWORD32(reg, j, j, 0);
+
+ efx_writeo(efx, &reg, address);
+ efx_reado(efx, &buf, address);
+
+ if (efx_masked_compare_oword(&reg, &buf, &mask))
+ goto fail;
+ }
+
+ efx_writeo(efx, &original, address);
+ }
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev,
+ "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
+ " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
+ EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * Special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * efx_alloc_special_buffer()) in the buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static void
+efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_qword_t buf_desc;
+ unsigned int index;
+ dma_addr_t dma_addr;
+ int i;
+
+ EFX_BUG_ON_PARANOID(!buffer->buf.addr);
+
+ /* Write buffer descriptors to NIC */
+ for (i = 0; i < buffer->entries; i++) {
+ index = buffer->index + i;
+ dma_addr = buffer->buf.dma_addr + (i * EFX_BUF_SIZE);
+ netif_dbg(efx, probe, efx->net_dev,
+ "mapping special buffer %d at %llx\n",
+ index, (unsigned long long)dma_addr);
+ EFX_POPULATE_QWORD_3(buf_desc,
+ FRF_AZ_BUF_ADR_REGION, 0,
+ FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
+ FRF_AZ_BUF_OWNER_ID_FBUF, 0);
+ efx_write_buf_tbl(efx, &buf_desc, index);
+ }
+}
+
+/* Unmaps a buffer and clears the buffer table entries */
+static void
+efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ efx_oword_t buf_tbl_upd;
+ unsigned int start = buffer->index;
+ unsigned int end = (buffer->index + buffer->entries - 1);
+
+ if (!buffer->entries)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
+ buffer->index, buffer->index + buffer->entries - 1);
+
+ EFX_POPULATE_OWORD_4(buf_tbl_upd,
+ FRF_AZ_BUF_UPD_CMD, 0,
+ FRF_AZ_BUF_CLR_CMD, 1,
+ FRF_AZ_BUF_CLR_END_ID, end,
+ FRF_AZ_BUF_CLR_START_ID, start);
+ efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
+}
+
+/*
+ * Allocate a new special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range. It does not write into the buffer table.
+ *
+ * This call will allocate 4KB buffers, since 8KB buffers can't be
+ * used for event queues and descriptor rings.
+ */
+static int efx_alloc_special_buffer(struct efx_nic *efx,
+ struct efx_special_buffer *buffer,
+ unsigned int len)
+{
+ len = ALIGN(len, EFX_BUF_SIZE);
+
+ if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
+ return -ENOMEM;
+ buffer->entries = len / EFX_BUF_SIZE;
+ BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
+
+ /* Select new buffer ID */
+ buffer->index = efx->next_buffer_table;
+ efx->next_buffer_table += buffer->entries;
+#ifdef CONFIG_SFC_SRIOV
+ BUG_ON(efx_sriov_enabled(efx) &&
+ efx->vf_buftbl_base < efx->next_buffer_table);
+#endif
+
+ netif_dbg(efx, probe, efx->net_dev,
+ "allocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ return 0;
+}
+
+static void
+efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
+{
+ if (!buffer->buf.addr)
+ return;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "deallocating special buffers %d-%d at %llx+%x "
+ "(virt %p phys %llx)\n", buffer->index,
+ buffer->index + buffer->entries - 1,
+ (u64)buffer->buf.dma_addr, buffer->buf.len,
+ buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
+
+ efx_nic_free_buffer(efx, &buffer->buf);
+ buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * TX path
+ *
+ **************************************************************************/
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void efx_farch_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+ unsigned write_ptr;
+ efx_dword_t reg;
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(tx_queue->efx, &reg,
+ FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
+}
+
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
+ const efx_qword_t *txd)
+{
+ unsigned write_ptr;
+ efx_oword_t reg;
+
+ BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+ BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+ FRF_AZ_TX_DESC_WPTR, write_ptr);
+ reg.qword[0] = *txd;
+ efx_writeo_page(tx_queue->efx, &reg,
+ FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
+{
+
+ struct efx_tx_buffer *buffer;
+ efx_qword_t *txd;
+ unsigned write_ptr;
+ unsigned old_write_count = tx_queue->write_count;
+
+ BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+ do {
+ write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+ buffer = &tx_queue->buffer[write_ptr];
+ txd = efx_tx_desc(tx_queue, write_ptr);
+ ++tx_queue->write_count;
+
+ EFX_BUG_ON_PARANOID(buffer->flags & EFX_TX_BUF_OPTION);
+
+ /* Create TX descriptor ring entry */
+ BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
+ EFX_POPULATE_QWORD_4(*txd,
+ FSF_AZ_TX_KER_CONT,
+ buffer->flags & EFX_TX_BUF_CONT,
+ FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
+ FSF_AZ_TX_KER_BUF_REGION, 0,
+ FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
+ } while (tx_queue->write_count != tx_queue->insert_count);
+
+ wmb(); /* Ensure descriptors are written before they are fetched */
+
+ if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
+ txd = efx_tx_desc(tx_queue,
+ old_write_count & tx_queue->ptr_mask);
+ efx_farch_push_tx_desc(tx_queue, txd);
+ ++tx_queue->pushes;
+ } else {
+ efx_farch_notify_tx_desc(tx_queue);
+ }
+}
+
+/* Allocate hardware resources for a TX queue */
+int efx_farch_tx_probe(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ unsigned entries;
+
+ entries = tx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &tx_queue->txd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_tx_init(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t reg;
+
+ /* Pin TX descriptor ring */
+ efx_init_special_buffer(efx, &tx_queue->txd);
+
+ /* Push TX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(reg,
+ FRF_AZ_TX_DESCQ_EN, 1,
+ FRF_AZ_TX_ISCSI_DDIG_EN, 0,
+ FRF_AZ_TX_ISCSI_HDIG_EN, 0,
+ FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+ FRF_AZ_TX_DESCQ_EVQ_ID,
+ tx_queue->channel->channel,
+ FRF_AZ_TX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
+ FRF_AZ_TX_DESCQ_SIZE,
+ __ffs(tx_queue->txd.entries),
+ FRF_AZ_TX_DESCQ_TYPE, 0,
+ FRF_BZ_TX_NON_IP_DROP_DIS, 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
+ EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
+ !csum);
+ }
+
+ efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
+ /* Only 128 bits in this register */
+ BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
+
+ efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
+ __clear_bit_le(tx_queue->queue, &reg);
+ else
+ __set_bit_le(tx_queue->queue, &reg);
+ efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_1(reg,
+ FRF_BZ_TX_PACE,
+ (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
+ FFE_BZ_TX_PACE_OFF :
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
+ tx_queue->queue);
+ }
+}
+
+static void efx_farch_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_flush_descq;
+
+ WARN_ON(atomic_read(&tx_queue->flush_outstanding));
+ atomic_set(&tx_queue->flush_outstanding, 1);
+
+ EFX_POPULATE_OWORD_2(tx_flush_descq,
+ FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
+ efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
+}
+
+void efx_farch_tx_fini(struct efx_tx_queue *tx_queue)
+{
+ struct efx_nic *efx = tx_queue->efx;
+ efx_oword_t tx_desc_ptr;
+
+ /* Remove TX descriptor ring from card */
+ EFX_ZERO_OWORD(tx_desc_ptr);
+ efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+ tx_queue->queue);
+
+ /* Unpin TX descriptor ring */
+ efx_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void efx_farch_tx_remove(struct efx_tx_queue *tx_queue)
+{
+ efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * RX path
+ *
+ **************************************************************************/
+
+/* This creates an entry in the RX descriptor queue */
+static inline void
+efx_farch_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_rx_buffer *rx_buf;
+ efx_qword_t *rxd;
+
+ rxd = efx_rx_desc(rx_queue, index);
+ rx_buf = efx_rx_buffer(rx_queue, index);
+ EFX_POPULATE_QWORD_3(*rxd,
+ FSF_AZ_RX_KER_BUF_SIZE,
+ rx_buf->len -
+ rx_queue->efx->type->rx_buffer_padding,
+ FSF_AZ_RX_KER_BUF_REGION, 0,
+ FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void efx_farch_rx_write(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_dword_t reg;
+ unsigned write_ptr;
+
+ while (rx_queue->notified_count != rx_queue->added_count) {
+ efx_farch_build_rx_desc(
+ rx_queue,
+ rx_queue->notified_count & rx_queue->ptr_mask);
+ ++rx_queue->notified_count;
+ }
+
+ wmb();
+ write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
+ efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
+ efx_rx_queue_index(rx_queue));
+}
+
+int efx_farch_rx_probe(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned entries;
+
+ entries = rx_queue->ptr_mask + 1;
+ return efx_alloc_special_buffer(efx, &rx_queue->rxd,
+ entries * sizeof(efx_qword_t));
+}
+
+void efx_farch_rx_init(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+ bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
+ bool iscsi_digest_en = is_b0;
+ bool jumbo_en;
+
+ /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
+ * DMA to continue after a PCIe page boundary (and scattering
+ * is not possible). In Falcon B0 and Siena, it enables
+ * scatter.
+ */
+ jumbo_en = !is_b0 || efx->rx_scatter;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "RX queue %d ring in special buffers %d-%d\n",
+ efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
+ rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+ rx_queue->scatter_n = 0;
+
+ /* Pin RX descriptor ring */
+ efx_init_special_buffer(efx, &rx_queue->rxd);
+
+ /* Push RX descriptor ring to card */
+ EFX_POPULATE_OWORD_10(rx_desc_ptr,
+ FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
+ FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+ FRF_AZ_RX_DESCQ_EVQ_ID,
+ efx_rx_queue_channel(rx_queue)->channel,
+ FRF_AZ_RX_DESCQ_OWNER_ID, 0,
+ FRF_AZ_RX_DESCQ_LABEL,
+ efx_rx_queue_index(rx_queue),
+ FRF_AZ_RX_DESCQ_SIZE,
+ __ffs(rx_queue->rxd.entries),
+ FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
+ FRF_AZ_RX_DESCQ_EN, 1);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+}
+
+static void efx_farch_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+ struct efx_nic *efx = rx_queue->efx;
+ efx_oword_t rx_flush_descq;
+
+ EFX_POPULATE_OWORD_2(rx_flush_descq,
+ FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
+ FRF_AZ_RX_FLUSH_DESCQ,
+ efx_rx_queue_index(rx_queue));
+ efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
+}
+
+void efx_farch_rx_fini(struct efx_rx_queue *rx_queue)
+{
+ efx_oword_t rx_desc_ptr;
+ struct efx_nic *efx = rx_queue->efx;
+
+ /* Remove RX descriptor ring from card */
+ EFX_ZERO_OWORD(rx_desc_ptr);
+ efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+ efx_rx_queue_index(rx_queue));
+
+ /* Unpin RX descriptor ring */
+ efx_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void efx_farch_rx_remove(struct efx_rx_queue *rx_queue)
+{
+ efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Flush handling
+ *
+ **************************************************************************/
+
+/* efx_farch_flush_queues() must be woken up when all flushes are completed,
+ * or more RX flushes can be kicked off.
+ */
+static bool efx_farch_flush_wake(struct efx_nic *efx)
+{
+ /* Ensure that all updates are visible to efx_farch_flush_queues() */
+ smp_mb();
+
+ return (atomic_read(&efx->active_queues) == 0 ||
+ (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
+ && atomic_read(&efx->rxq_flush_pending) > 0));
+}
+
+static bool efx_check_tx_flush_complete(struct efx_nic *efx)
+{
+ bool i = true;
+ efx_oword_t txd_ptr_tbl;
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_reado_table(efx, &txd_ptr_tbl,
+ FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
+ if (EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_FLUSH) ||
+ EFX_OWORD_FIELD(txd_ptr_tbl,
+ FRF_AZ_TX_DESCQ_EN)) {
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush did not complete on TXQ %d\n",
+ tx_queue->queue);
+ i = false;
+ } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
+ 1, 0)) {
+ /* The flush is complete, but we didn't
+ * receive a flush completion event
+ */
+ netif_dbg(efx, hw, efx->net_dev,
+ "flush complete on TXQ %d, so drain "
+ "the queue\n", tx_queue->queue);
+ /* Don't need to increment active_queues as it
+ * has already been incremented for the queues
+ * which did not drain
+ */
+ efx_farch_magic_event(channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(
+ tx_queue));
+ }
+ }
+ }
+
+ return i;
+}
+
+/* Flush all the transmit queues, and continue flushing receive queues until
+ * they're all flushed. Wait for the DRAIN events to be recieved so that there
+ * are no more RX and TX events left on any channel. */
+static int efx_farch_do_flush(struct efx_nic *efx)
+{
+ unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ struct efx_tx_queue *tx_queue;
+ int rc = 0;
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ efx_farch_flush_tx_queue(tx_queue);
+ }
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ }
+ }
+
+ while (timeout && atomic_read(&efx->active_queues) > 0) {
+ /* If SRIOV is enabled, then offload receive queue flushing to
+ * the firmware (though we will still have to poll for
+ * completion). If that fails, fall back to the old scheme.
+ */
+ if (efx_sriov_enabled(efx)) {
+ rc = efx_mcdi_flush_rxqs(efx);
+ if (!rc)
+ goto wait;
+ }
+
+ /* The hardware supports four concurrent rx flushes, each of
+ * which may need to be retried if there is an outstanding
+ * descriptor fetch
+ */
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel) {
+ if (atomic_read(&efx->rxq_flush_outstanding) >=
+ EFX_RX_FLUSH_COUNT)
+ break;
+
+ if (rx_queue->flush_pending) {
+ rx_queue->flush_pending = false;
+ atomic_dec(&efx->rxq_flush_pending);
+ atomic_inc(&efx->rxq_flush_outstanding);
+ efx_farch_flush_rx_queue(rx_queue);
+ }
+ }
+ }
+
+ wait:
+ timeout = wait_event_timeout(efx->flush_wq,
+ efx_farch_flush_wake(efx),
+ timeout);
+ }
+
+ if (atomic_read(&efx->active_queues) &&
+ !efx_check_tx_flush_complete(efx)) {
+ netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
+ "(rx %d+%d)\n", atomic_read(&efx->active_queues),
+ atomic_read(&efx->rxq_flush_outstanding),
+ atomic_read(&efx->rxq_flush_pending));
+ rc = -ETIMEDOUT;
+
+ atomic_set(&efx->active_queues, 0);
+ atomic_set(&efx->rxq_flush_pending, 0);
+ atomic_set(&efx->rxq_flush_outstanding, 0);
+ }
+
+ return rc;
+}
+
+int efx_farch_fini_dmaq(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+ struct efx_rx_queue *rx_queue;
+ int rc = 0;
+
+ /* Do not attempt to write to the NIC during EEH recovery */
+ if (efx->state != STATE_RECOVERY) {
+ /* Only perform flush if DMA is enabled */
+ if (efx->pci_dev->is_busmaster) {
+ efx->type->prepare_flush(efx);
+ rc = efx_farch_do_flush(efx);
+ efx->type->finish_flush(efx);
+ }
+
+ efx_for_each_channel(channel, efx) {
+ efx_for_each_channel_rx_queue(rx_queue, channel)
+ efx_farch_rx_fini(rx_queue);
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ efx_farch_tx_fini(tx_queue);
+ }
+ }
+
+ return rc;
+}
+
+/**************************************************************************
+ *
+ * Event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ */
+void efx_farch_ev_read_ack(struct efx_channel *channel)
+{
+ efx_dword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
+ channel->eventq_read_ptr & channel->eventq_mask);
+
+ /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
+ * of 4 bytes, but it is really 16 bytes just like later revisions.
+ */
+ efx_writed(efx, &reg,
+ efx->type->evq_rptr_tbl_base +
+ FR_BZ_EVQ_RPTR_STEP * channel->channel);
+}
+
+/* Use HW to insert a SW defined event */
+void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event)
+{
+ efx_oword_t drv_ev_reg;
+
+ BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
+ FRF_AZ_DRV_EV_DATA_WIDTH != 64);
+ drv_ev_reg.u32[0] = event->u32[0];
+ drv_ev_reg.u32[1] = event->u32[1];
+ drv_ev_reg.u32[2] = 0;
+ drv_ev_reg.u32[3] = 0;
+ EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
+ efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
+}
+
+static void efx_farch_magic_event(struct efx_channel *channel, u32 magic)
+{
+ efx_qword_t event;
+
+ EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
+ FSE_AZ_EV_CODE_DRV_GEN_EV,
+ FSF_AZ_DRV_GEN_EV_MAGIC, magic);
+ efx_farch_generate_event(channel->efx, channel->channel, &event);
+}
+
+/* Handle a transmit completion event
+ *
+ * The NIC batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static int
+efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ unsigned int tx_ev_desc_ptr;
+ unsigned int tx_ev_q_label;
+ struct efx_tx_queue *tx_queue;
+ struct efx_nic *efx = channel->efx;
+ int tx_packets = 0;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return 0;
+
+ if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
+ /* Transmit completion */
+ tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+ tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
+ tx_queue->ptr_mask);
+ efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
+ /* Rewrite the FIFO write pointer */
+ tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
+ tx_queue = efx_channel_get_tx_queue(
+ channel, tx_ev_q_label % EFX_TXQ_TYPES);
+
+ netif_tx_lock(efx->net_dev);
+ efx_farch_notify_tx_desc(tx_queue);
+ netif_tx_unlock(efx->net_dev);
+ } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else {
+ netif_err(efx, tx_err, efx->net_dev,
+ "channel %d unexpected TX event "
+ EFX_QWORD_FMT"\n", channel->channel,
+ EFX_QWORD_VAL(*event));
+ }
+
+ return tx_packets;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static u16 efx_farch_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+ const efx_qword_t *event)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+ bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+ bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+ bool rx_ev_other_err, rx_ev_pause_frm;
+ bool rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned rx_ev_pkt_type;
+
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
+ rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
+ rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
+ rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
+ rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+ FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
+ rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
+ rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
+ rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
+ 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
+ rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
+
+ /* Every error apart from tobe_disc and pause_frm */
+ rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+ rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+ rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+ /* Count errors that are not in MAC stats. Ignore expected
+ * checksum errors during self-test. */
+ if (rx_ev_frm_trunc)
+ ++channel->n_rx_frm_trunc;
+ else if (rx_ev_tobe_disc)
+ ++channel->n_rx_tobe_disc;
+ else if (!efx->loopback_selftest) {
+ if (rx_ev_ip_hdr_chksum_err)
+ ++channel->n_rx_ip_hdr_chksum_err;
+ else if (rx_ev_tcp_udp_chksum_err)
+ ++channel->n_rx_tcp_udp_chksum_err;
+ }
+
+ /* TOBE_DISC is expected on unicast mismatches; don't print out an
+ * error message. FRM_TRUNC indicates RXDP dropped the packet due
+ * to a FIFO overflow.
+ */
+#ifdef DEBUG
+ if (rx_ev_other_err && net_ratelimit()) {
+ netif_dbg(efx, rx_err, efx->net_dev,
+ " RX queue %d unexpected RX event "
+ EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
+ efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
+ rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+ rx_ev_ip_hdr_chksum_err ?
+ " [IP_HDR_CHKSUM_ERR]" : "",
+ rx_ev_tcp_udp_chksum_err ?
+ " [TCP_UDP_CHKSUM_ERR]" : "",
+ rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+ rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+ rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+ rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+ rx_ev_pause_frm ? " [PAUSE]" : "");
+ }
+#endif
+
+ /* The frame must be discarded if any of these are true. */
+ return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+ rx_ev_tobe_disc | rx_ev_pause_frm) ?
+ EFX_RX_PKT_DISCARD : 0;
+}
+
+/* Handle receive events that are not in-order. Return true if this
+ * can be handled as a partial packet discard, false if it's more
+ * serious.
+ */
+static bool
+efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
+{
+ struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
+ struct efx_nic *efx = rx_queue->efx;
+ unsigned expected, dropped;
+
+ if (rx_queue->scatter_n &&
+ index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
+ rx_queue->ptr_mask)) {
+ ++channel->n_rx_nodesc_trunc;
+ return true;
+ }
+
+ expected = rx_queue->removed_count & rx_queue->ptr_mask;
+ dropped = (index - expected) & rx_queue->ptr_mask;
+ netif_info(efx, rx_err, efx->net_dev,
+ "dropped %d events (index=%d expected=%d)\n",
+ dropped, index, expected);
+
+ efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+ RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+ return false;
+}
+
+/* Handle a packet received event
+ *
+ * The NIC gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static void
+efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
+{
+ unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
+ unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
+ unsigned expected_ptr;
+ bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
+ u16 flags;
+ struct efx_rx_queue *rx_queue;
+ struct efx_nic *efx = channel->efx;
+
+ if (unlikely(ACCESS_ONCE(efx->reset_pending)))
+ return;
+
+ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
+ rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
+ WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
+ channel->channel);
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
+ expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
+ rx_queue->ptr_mask);
+
+ /* Check for partial drops and other errors */
+ if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
+ unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
+ if (rx_ev_desc_ptr != expected_ptr &&
+ !efx_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
+ return;
+
+ /* Discard all pending fragments */
+ if (rx_queue->scatter_n) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+ }
+
+ /* Return if there is no new fragment */
+ if (rx_ev_desc_ptr != expected_ptr)
+ return;
+
+ /* Discard new fragment if not SOP */
+ if (!rx_ev_sop) {
+ efx_rx_packet(
+ rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ 1, 0, EFX_RX_PKT_DISCARD);
+ ++rx_queue->removed_count;
+ return;
+ }
+ }
+
+ ++rx_queue->scatter_n;
+ if (rx_ev_cont)
+ return;
+
+ rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
+ rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
+ rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
+
+ if (likely(rx_ev_pkt_ok)) {
+ /* If packet is marked as OK then we can rely on the
+ * hardware checksum and classification.
+ */
+ flags = 0;
+ switch (rx_ev_hdr_type) {
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
+ flags |= EFX_RX_PKT_TCP;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
+ flags |= EFX_RX_PKT_CSUMMED;
+ /* fall through */
+ case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
+ case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
+ break;
+ }
+ } else {
+ flags = efx_farch_handle_rx_not_ok(rx_queue, event);
+ }
+
+ /* Detect multicast packets that didn't match the filter */
+ rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
+ if (rx_ev_mcast_pkt) {
+ unsigned int rx_ev_mcast_hash_match =
+ EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
+
+ if (unlikely(!rx_ev_mcast_hash_match)) {
+ ++channel->n_rx_mcast_mismatch;
+ flags |= EFX_RX_PKT_DISCARD;
+ }
+ }
+
+ channel->irq_mod_score += 2;
+
+ /* Handle received packet */
+ efx_rx_packet(rx_queue,
+ rx_queue->removed_count & rx_queue->ptr_mask,
+ rx_queue->scatter_n, rx_ev_byte_cnt, flags);
+ rx_queue->removed_count += rx_queue->scatter_n;
+ rx_queue->scatter_n = 0;
+}
+
+/* If this flush done event corresponds to a &struct efx_tx_queue, then
+ * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
+ * of all transmit completions.
+ */
+static void
+efx_farch_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_tx_queue *tx_queue;
+ int qid;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+ if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
+ tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
+ qid % EFX_TXQ_TYPES);
+ if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
+ efx_farch_magic_event(tx_queue->channel,
+ EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
+ }
+ }
+}
+
+/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
+ * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
+ * the RX queue back to the mask of RX queues in need of flushing.
+ */
+static void
+efx_farch_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
+{
+ struct efx_channel *channel;
+ struct efx_rx_queue *rx_queue;
+ int qid;
+ bool failed;
+
+ qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
+ failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
+ if (qid >= efx->n_channels)
+ return;
+ channel = efx_get_channel(efx, qid);
+ if (!efx_channel_has_rx_queue(channel))
+ return;
+ rx_queue = efx_channel_get_rx_queue(channel);
+
+ if (failed) {
+ netif_info(efx, hw, efx->net_dev,
+ "RXQ %d flush retry\n", qid);
+ rx_queue->flush_pending = true;
+ atomic_inc(&efx->rxq_flush_pending);
+ } else {
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
+ }
+ atomic_dec(&efx->rxq_flush_outstanding);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void
+efx_farch_handle_drain_event(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+
+ WARN_ON(atomic_read(&efx->active_queues) == 0);
+ atomic_dec(&efx->active_queues);
+ if (efx_farch_flush_wake(efx))
+ wake_up(&efx->flush_wq);
+}
+
+static void efx_farch_handle_generated_event(struct efx_channel *channel,
+ efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ struct efx_rx_queue *rx_queue =
+ efx_channel_has_rx_queue(channel) ?
+ efx_channel_get_rx_queue(channel) : NULL;
+ unsigned magic, code;
+
+ magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
+ code = _EFX_CHANNEL_MAGIC_CODE(magic);
+
+ if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
+ channel->event_test_cpu = raw_smp_processor_id();
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
+ /* The queue must be empty, so we won't receive any rx
+ * events, so efx_process_channel() won't refill the
+ * queue. Refill it here */
+ efx_fast_push_rx_descriptors(rx_queue);
+ } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
+ efx_farch_handle_drain_event(channel);
+ } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
+ efx_farch_handle_drain_event(channel);
+ } else {
+ netif_dbg(efx, hw, efx->net_dev, "channel %d received "
+ "generated event "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(*event));
+ }
+}
+
+static void
+efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int ev_sub_code;
+ unsigned int ev_sub_data;
+
+ ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
+ ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
+
+ switch (ev_sub_code) {
+ case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_tx_flush_done(efx, event);
+ efx_sriov_tx_flush_done(efx, event);
+ break;
+ case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
+ channel->channel, ev_sub_data);
+ efx_farch_handle_rx_flush_done(efx, event);
+ efx_sriov_rx_flush_done(efx, event);
+ break;
+ case FSE_AZ_EVQ_INIT_DONE_EV:
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d EVQ %d initialised\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_SRM_UPD_DONE_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d SRAM update done\n", channel->channel);
+ break;
+ case FSE_AZ_WAKE_UP_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RXQ %d wakeup event\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AZ_TIMER_EV:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d RX queue %d timer expired\n",
+ channel->channel, ev_sub_data);
+ break;
+ case FSE_AA_RX_RECOVER_EV:
+ netif_err(efx, rx_err, efx->net_dev,
+ "channel %d seen DRIVER RX_RESET event. "
+ "Resetting.\n", channel->channel);
+ atomic_inc(&efx->rx_reset);
+ efx_schedule_reset(efx,
+ EFX_WORKAROUND_6555(efx) ?
+ RESET_TYPE_RX_RECOVERY :
+ RESET_TYPE_DISABLE);
+ break;
+ case FSE_BZ_RX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, rx_err, efx->net_dev,
+ "RX DMA Q %d reports descriptor fetch error."
+ " RX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ case FSE_BZ_TX_DSC_ERROR_EV:
+ if (ev_sub_data < EFX_VI_BASE) {
+ netif_err(efx, tx_err, efx->net_dev,
+ "TX DMA Q %d reports descriptor fetch error."
+ " TX Q %d is disabled.\n", ev_sub_data,
+ ev_sub_data);
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ } else
+ efx_sriov_desc_fetch_err(efx, ev_sub_data);
+ break;
+ default:
+ netif_vdbg(efx, hw, efx->net_dev,
+ "channel %d unknown driver event code %d "
+ "data %04x\n", channel->channel, ev_sub_code,
+ ev_sub_data);
+ break;
+ }
+}
+
+int efx_farch_ev_process(struct efx_channel *channel, int budget)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned int read_ptr;
+ efx_qword_t event, *p_event;
+ int ev_code;
+ int tx_packets = 0;
+ int spent = 0;
+
+ read_ptr = channel->eventq_read_ptr;
+
+ for (;;) {
+ p_event = efx_event(channel, read_ptr);
+ event = *p_event;
+
+ if (!efx_event_present(&event))
+ /* End of events */
+ break;
+
+ netif_vdbg(channel->efx, intr, channel->efx->net_dev,
+ "channel %d event is "EFX_QWORD_FMT"\n",
+ channel->channel, EFX_QWORD_VAL(event));
+
+ /* Clear this event by marking it all ones */
+ EFX_SET_QWORD(*p_event);
+
+ ++read_ptr;
+
+ ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
+
+ switch (ev_code) {
+ case FSE_AZ_EV_CODE_RX_EV:
+ efx_farch_handle_rx_event(channel, &event);
+ if (++spent == budget)
+ goto out;
+ break;
+ case FSE_AZ_EV_CODE_TX_EV:
+ tx_packets += efx_farch_handle_tx_event(channel,
+ &event);
+ if (tx_packets > efx->txq_entries) {
+ spent = budget;
+ goto out;
+ }
+ break;
+ case FSE_AZ_EV_CODE_DRV_GEN_EV:
+ efx_farch_handle_generated_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_DRIVER_EV:
+ efx_farch_handle_driver_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_USER_EV:
+ efx_sriov_event(channel, &event);
+ break;
+ case FSE_CZ_EV_CODE_MCDI_EV:
+ efx_mcdi_process_event(channel, &event);
+ break;
+ case FSE_AZ_EV_CODE_GLOBAL_EV:
+ if (efx->type->handle_global_event &&
+ efx->type->handle_global_event(channel, &event))
+ break;
+ /* else fall through */
+ default:
+ netif_err(channel->efx, hw, channel->efx->net_dev,
+ "channel %d unknown event type %d (data "
+ EFX_QWORD_FMT ")\n", channel->channel,
+ ev_code, EFX_QWORD_VAL(event));
+ }
+ }
+
+out:
+ channel->eventq_read_ptr = read_ptr;
+ return spent;
+}
+
+/* Allocate buffer table entries for event queue */
+int efx_farch_ev_probe(struct efx_channel *channel)
+{
+ struct efx_nic *efx = channel->efx;
+ unsigned entries;
+
+ entries = channel->eventq_mask + 1;
+ return efx_alloc_special_buffer(efx, &channel->eventq,
+ entries * sizeof(efx_qword_t));
+}
+
+int efx_farch_ev_init(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ netif_dbg(efx, hw, efx->net_dev,
+ "channel %d event queue in special buffers %d-%d\n",
+ channel->channel, channel->eventq.index,
+ channel->eventq.index + channel->eventq.entries - 1);
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_CZ_TIMER_Q_EN, 1,
+ FRF_CZ_HOST_NOTIFY_MODE, 0,
+ FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+ }
+
+ /* Pin event queue buffer */
+ efx_init_special_buffer(efx, &channel->eventq);
+
+ /* Fill event queue with all ones (i.e. empty events) */
+ memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
+
+ /* Push event queue to card */
+ EFX_POPULATE_OWORD_3(reg,
+ FRF_AZ_EVQ_EN, 1,
+ FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
+ FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+
+ return 0;
+}
+
+void efx_farch_ev_fini(struct efx_channel *channel)
+{
+ efx_oword_t reg;
+ struct efx_nic *efx = channel->efx;
+
+ /* Remove event queue from card */
+ EFX_ZERO_OWORD(reg);
+ efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
+ channel->channel);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
+
+ /* Unpin event queue */
+ efx_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void efx_farch_ev_remove(struct efx_channel *channel)
+{
+ efx_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+void efx_farch_ev_test_generate(struct efx_channel *channel)
+{
+ efx_farch_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+}
+
+void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue)
+{
+ efx_farch_magic_event(efx_rx_queue_channel(rx_queue),
+ EFX_CHANNEL_MAGIC_FILL(rx_queue));
+}
+
+/**************************************************************************
+ *
+ * Hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate interrupts */
+static inline void efx_farch_interrupts(struct efx_nic *efx,
+ bool enabled, bool force)
+{
+ efx_oword_t int_en_reg_ker;
+
+ EFX_POPULATE_OWORD_3(int_en_reg_ker,
+ FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
+ FRF_AZ_KER_INT_KER, force,
+ FRF_AZ_DRV_INT_EN_KER, enabled);
+ efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
+}
+
+void efx_farch_irq_enable_master(struct efx_nic *efx)
+{
+ EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+ wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+ efx_farch_interrupts(efx, true, false);
+}
+
+void efx_farch_irq_disable_master(struct efx_nic *efx)
+{
+ /* Disable interrupts */
+ efx_farch_interrupts(efx, false, false);
+}
+
+/* Generate a test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void efx_farch_irq_test_generate(struct efx_nic *efx)
+{
+ efx_farch_interrupts(efx, true, true);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
+{
+ struct falcon_nic_data *nic_data = efx->nic_data;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ efx_oword_t fatal_intr;
+ int error, mem_perr;
+
+ efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
+ error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
+
+ netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
+ EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+ EFX_OWORD_VAL(fatal_intr),
+ error ? "disabling bus mastering" : "no recognised error");
+
+ /* If this is a memory parity error dump which blocks are offending */
+ mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
+ EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
+ if (mem_perr) {
+ efx_oword_t reg;
+ efx_reado(efx, &reg, FR_AZ_MEM_STAT);
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
+ EFX_OWORD_VAL(reg));
+ }
+
+ /* Disable both devices */
+ pci_clear_master(efx->pci_dev);
+ if (efx_nic_is_dual_func(efx))
+ pci_clear_master(nic_data->pci_dev2);
+ efx_farch_irq_disable_master(efx);
+
+ /* Count errors and reset or disable the NIC accordingly */
+ if (efx->int_error_count == 0 ||
+ time_after(jiffies, efx->int_error_expire)) {
+ efx->int_error_count = 0;
+ efx->int_error_expire =
+ jiffies + EFX_INT_ERROR_EXPIRE * HZ;
+ }
+ if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - reset scheduled\n");
+ efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+ } else {
+ netif_err(efx, hw, efx->net_dev,
+ "SYSTEM ERROR - max number of errors seen."
+ "NIC will be disabled\n");
+ efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
+{
+ struct efx_nic *efx = dev_id;
+ bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ irqreturn_t result = IRQ_NONE;
+ struct efx_channel *channel;
+ efx_dword_t reg;
+ u32 queues;
+ int syserr;
+
+ /* Read the ISR which also ACKs the interrupts */
+ efx_readd(efx, &reg, FR_BZ_INT_ISR0);
+ queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+ /* Legacy interrupts are disabled too late by the EEH kernel
+ * code. Disable them earlier.
+ * If an EEH error occurred, the read will have returned all ones.
+ */
+ if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
+ !efx->eeh_disabled_legacy_irq) {
+ disable_irq_nosync(efx->legacy_irq);
+ efx->eeh_disabled_legacy_irq = true;
+ }
+
+ /* Handle non-event-queue sources */
+ if (queues & (1U << efx->irq_level) && soft_enabled) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ if (queues != 0) {
+ efx->irq_zero_count = 0;
+
+ /* Schedule processing of any interrupting queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ if (queues & 1)
+ efx_schedule_channel_irq(channel);
+ queues >>= 1;
+ }
+ }
+ result = IRQ_HANDLED;
+
+ } else {
+ efx_qword_t *event;
+
+ /* Legacy ISR read can return zero once (SF bug 15783) */
+
+ /* We can't return IRQ_HANDLED more than once on seeing ISR=0
+ * because this might be a shared interrupt. */
+ if (efx->irq_zero_count++ == 0)
+ result = IRQ_HANDLED;
+
+ /* Ensure we schedule or rearm all event queues */
+ if (likely(soft_enabled)) {
+ efx_for_each_channel(channel, efx) {
+ event = efx_event(channel,
+ channel->eventq_read_ptr);
+ if (efx_event_present(event))
+ efx_schedule_channel_irq(channel);
+ else
+ efx_farch_ev_read_ack(channel);
+ }
+ }
+ }
+
+ if (result == IRQ_HANDLED)
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+ return result;
+}
+
+/* Handle an MSI interrupt
+ *
+ * Handle an MSI hardware interrupt. This routine schedules event
+ * queue processing. No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
+{
+ struct efx_msi_context *context = dev_id;
+ struct efx_nic *efx = context->efx;
+ efx_oword_t *int_ker = efx->irq_status.addr;
+ int syserr;
+
+ netif_vdbg(efx, intr, efx->net_dev,
+ "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+ irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+ if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
+ return IRQ_HANDLED;
+
+ /* Handle non-event-queue sources */
+ if (context->index == efx->irq_level) {
+ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
+ if (unlikely(syserr))
+ return efx_farch_fatal_interrupt(efx);
+ efx->last_irq_cpu = raw_smp_processor_id();
+ }
+
+ /* Schedule processing of the channel */
+ efx_schedule_channel_irq(efx->channel[context->index]);
+
+ return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+void efx_farch_rx_push_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
+ efx->rx_indir_table[i]);
+ efx_writed(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ }
+}
+
+/* Looks at available SRAM resources and works out how many queues we
+ * can support, and where things like descriptor caches should live.
+ *
+ * SRAM is split up as follows:
+ * 0 buftbl entries for channels
+ * efx->vf_buftbl_base buftbl entries for SR-IOV
+ * efx->rx_dc_base RX descriptor caches
+ * efx->tx_dc_base TX descriptor caches
+ */
+void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
+{
+ unsigned vi_count, buftbl_min;
+
+ /* Account for the buffer table entries backing the datapath channels
+ * and the descriptor caches for those channels.
+ */
+ buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
+ efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
+ efx->n_channels * EFX_MAX_EVQ_SIZE)
+ * sizeof(efx_qword_t) / EFX_BUF_SIZE);
+ vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
+
+#ifdef CONFIG_SFC_SRIOV
+ if (efx_sriov_wanted(efx)) {
+ unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
+
+ efx->vf_buftbl_base = buftbl_min;
+
+ vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
+ vi_count = max(vi_count, EFX_VI_BASE);
+ buftbl_free = (sram_lim_qw - buftbl_min -
+ vi_count * vi_dc_entries);
+
+ entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
+ efx_vf_size(efx));
+ vf_limit = min(buftbl_free / entries_per_vf,
+ (1024U - EFX_VI_BASE) >> efx->vi_scale);
+
+ if (efx->vf_count > vf_limit) {
+ netif_err(efx, probe, efx->net_dev,
+ "Reducing VF count from from %d to %d\n",
+ efx->vf_count, vf_limit);
+ efx->vf_count = vf_limit;
+ }
+ vi_count += efx->vf_count * efx_vf_size(efx);
+ }
+#endif
+
+ efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
+ efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
+}
+
+u32 efx_farch_fpga_ver(struct efx_nic *efx)
+{
+ efx_oword_t altera_build;
+ efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
+ return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
+}
+
+void efx_farch_init_common(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Set positions of descriptor caches in SRAM. */
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
+ efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
+
+ /* Set TX descriptor cache size. */
+ BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
+
+ /* Set RX descriptor cache size. Set low watermark to size-8, as
+ * this allows most efficient prefetching.
+ */
+ BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
+ EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+ efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
+
+ /* Program INT_KER address */
+ EFX_POPULATE_OWORD_2(temp,
+ FRF_AZ_NORM_INT_VEC_DIS_KER,
+ EFX_INT_MODE_USE_MSI(efx),
+ FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
+ efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
+
+ if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
+ /* Use an interrupt level unused by event queues */
+ efx->irq_level = 0x1f;
+ else
+ /* Use a valid MSI-X vector */
+ efx->irq_level = 0;
+
+ /* Enable all the genuinely fatal interrupts. (They are still
+ * masked by the overall interrupt mask, controlled by
+ * falcon_interrupts()).
+ *
+ * Note: All other fatal interrupts are enabled
+ */
+ EFX_POPULATE_OWORD_3(temp,
+ FRF_AZ_ILL_ADR_INT_KER_EN, 1,
+ FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
+ FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
+ EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
+ EFX_INVERT_OWORD(temp);
+ efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
+
+ efx_farch_rx_push_indir_table(efx);
+
+ /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+ * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+ */
+ efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
+ /* Enable SW_EV to inherit in char driver - assume harmless here */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
+ /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
+ /* Disable hardware watchdog which can misfire */
+ EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
+ /* Squash TX of packets of 16 bytes or less */
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
+ EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+ efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ EFX_POPULATE_OWORD_4(temp,
+ /* Default values */
+ FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
+ FRF_BZ_TX_PACE_SB_AF, 0xb,
+ FRF_BZ_TX_PACE_FB_BASE, 0,
+ /* Allow large pace values in the
+ * fast bin. */
+ FRF_BZ_TX_PACE_BIN_TH,
+ FFE_BZ_TX_PACE_RESERVED);
+ efx_writeo(efx, &temp, FR_BZ_TX_PACE);
+ }
+}
+
+/**************************************************************************
+ *
+ * Filter tables
+ *
+ **************************************************************************
+ */
+
+/* "Fudge factors" - difference between programmed value and actual depth.
+ * Due to pipelined implementation we need to program H/W with a value that
+ * is larger than the hop limit we want.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
+#define EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
+
+/* Hard maximum search limit. Hardware will time-out beyond 200-something.
+ * We also need to avoid infinite loops in efx_farch_filter_search() when the
+ * table is full.
+ */
+#define EFX_FARCH_FILTER_CTL_SRCH_MAX 200
+
+/* Don't try very hard to find space for performance hints, as this is
+ * counter-productive. */
+#define EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
+
+enum efx_farch_filter_type {
+ EFX_FARCH_FILTER_TCP_FULL = 0,
+ EFX_FARCH_FILTER_TCP_WILD,
+ EFX_FARCH_FILTER_UDP_FULL,
+ EFX_FARCH_FILTER_UDP_WILD,
+ EFX_FARCH_FILTER_MAC_FULL = 4,
+ EFX_FARCH_FILTER_MAC_WILD,
+ EFX_FARCH_FILTER_UC_DEF = 8,
+ EFX_FARCH_FILTER_MC_DEF,
+ EFX_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
+};
+
+enum efx_farch_filter_table_id {
+ EFX_FARCH_FILTER_TABLE_RX_IP = 0,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF,
+ EFX_FARCH_FILTER_TABLE_TX_MAC,
+ EFX_FARCH_FILTER_TABLE_COUNT,
+};
+
+enum efx_farch_filter_index {
+ EFX_FARCH_FILTER_INDEX_UC_DEF,
+ EFX_FARCH_FILTER_INDEX_MC_DEF,
+ EFX_FARCH_FILTER_SIZE_RX_DEF,
+};
+
+struct efx_farch_filter_spec {
+ u8 type:4;
+ u8 priority:4;
+ u8 flags;
+ u16 dmaq_id;
+ u32 data[3];
+};
+
+struct efx_farch_filter_table {
+ enum efx_farch_filter_table_id id;
+ u32 offset; /* address of table relative to BAR */
+ unsigned size; /* number of entries */
+ unsigned step; /* step between entries */
+ unsigned used; /* number currently used */
+ unsigned long *used_bitmap;
+ struct efx_farch_filter_spec *spec;
+ unsigned search_limit[EFX_FARCH_FILTER_TYPE_COUNT];
+};
+
+struct efx_farch_filter_state {
+ struct efx_farch_filter_table table[EFX_FARCH_FILTER_TABLE_COUNT];
+};
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx);
+
+/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
+ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
+static u16 efx_farch_filter_hash(u32 key)
+{
+ u16 tmp;
+
+ /* First 16 rounds */
+ tmp = 0x1fff ^ key >> 16;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ tmp = tmp ^ tmp >> 9;
+ /* Last 16 rounds */
+ tmp = tmp ^ tmp << 13 ^ key;
+ tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
+ return tmp ^ tmp >> 9;
+}
+
+/* To allow for hash collisions, filter search continues at these
+ * increments from the first possible entry selected by the hash. */
+static u16 efx_farch_filter_increment(u32 key)
+{
+ return key * 2 - 1;
+}
+
+static enum efx_farch_filter_table_id
+efx_farch_filter_spec_table_id(const struct efx_farch_filter_spec *spec)
+{
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_TCP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_IP !=
+ (EFX_FARCH_FILTER_UDP_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_FULL >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_RX_MAC !=
+ (EFX_FARCH_FILTER_MAC_WILD >> 2));
+ BUILD_BUG_ON(EFX_FARCH_FILTER_TABLE_TX_MAC !=
+ EFX_FARCH_FILTER_TABLE_RX_MAC + 2);
+ return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
+}
+
+static void efx_farch_filter_push_rx_config(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter_ctl;
+
+ efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_TCP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_UDP_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_RSS));
+
+ /* There is a single bit to enable RX scatter for all
+ * unmatched packets. Only set it if scatter is
+ * enabled in both filter specs.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ !!(table->spec[EFX_FARCH_FILTER_INDEX_UC_DEF].flags &
+ table->spec[EFX_FARCH_FILTER_INDEX_MC_DEF].flags &
+ EFX_FILTER_FLAG_RX_SCATTER));
+ } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ /* We don't expose 'default' filters because unmatched
+ * packets always go to the queue number found in the
+ * RSS table. But we still need to set the RX scatter
+ * bit here.
+ */
+ EFX_SET_OWORD_FIELD(
+ filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
+ efx->rx_scatter);
+ }
+
+ efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
+}
+
+static void efx_farch_filter_push_tx_limits(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ efx_oword_t tx_cfg;
+
+ efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ if (table->size) {
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_FULL] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
+ EFX_SET_OWORD_FIELD(
+ tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
+ table->search_limit[EFX_FARCH_FILTER_MAC_WILD] +
+ EFX_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
+ }
+
+ efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
+}
+
+static int
+efx_farch_filter_from_gen_spec(struct efx_farch_filter_spec *spec,
+ const struct efx_filter_spec *gen_spec)
+{
+ bool is_full = false;
+
+ if ((gen_spec->flags & EFX_FILTER_FLAG_RX_RSS) &&
+ gen_spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT)
+ return -EINVAL;
+
+ spec->priority = gen_spec->priority;
+ spec->flags = gen_spec->flags;
+ spec->dmaq_id = gen_spec->dmaq_id;
+
+ switch (gen_spec->match_flags) {
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT):
+ is_full = true;
+ /* fall through */
+ case (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT): {
+ __be32 rhost, host1, host2;
+ __be16 rport, port1, port2;
+
+ EFX_BUG_ON_PARANOID(!(gen_spec->flags & EFX_FILTER_FLAG_RX));
+
+ if (gen_spec->ether_type != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+ if (gen_spec->loc_port == 0 ||
+ (is_full && gen_spec->rem_port == 0))
+ return -EADDRNOTAVAIL;
+ switch (gen_spec->ip_proto) {
+ case IPPROTO_TCP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_TCP_FULL :
+ EFX_FARCH_FILTER_TCP_WILD);
+ break;
+ case IPPROTO_UDP:
+ spec->type = (is_full ? EFX_FARCH_FILTER_UDP_FULL :
+ EFX_FARCH_FILTER_UDP_WILD);
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ /* Filter is constructed in terms of source and destination,
+ * with the odd wrinkle that the ports are swapped in a UDP
+ * wildcard filter. We need to convert from local and remote
+ * (= zero for wildcard) addresses.
+ */
+ rhost = is_full ? gen_spec->rem_host[0] : 0;
+ rport = is_full ? gen_spec->rem_port : 0;
+ host1 = rhost;
+ host2 = gen_spec->loc_host[0];
+ if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
+ port1 = gen_spec->loc_port;
+ port2 = rport;
+ } else {
+ port1 = rport;
+ port2 = gen_spec->loc_port;
+ }
+ spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+ spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+ spec->data[2] = ntohl(host2);
+
+ break;
+ }
+
+ case EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_OUTER_VID:
+ is_full = true;
+ /* fall through */
+ case EFX_FILTER_MATCH_LOC_MAC:
+ spec->type = (is_full ? EFX_FARCH_FILTER_MAC_FULL :
+ EFX_FARCH_FILTER_MAC_WILD);
+ spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
+ spec->data[1] = (gen_spec->loc_mac[2] << 24 |
+ gen_spec->loc_mac[3] << 16 |
+ gen_spec->loc_mac[4] << 8 |
+ gen_spec->loc_mac[5]);
+ spec->data[2] = (gen_spec->loc_mac[0] << 8 |
+ gen_spec->loc_mac[1]);
+ break;
+
+ case EFX_FILTER_MATCH_LOC_MAC_IG:
+ spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
+ EFX_FARCH_FILTER_MC_DEF :
+ EFX_FARCH_FILTER_UC_DEF);
+ memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
+ break;
+
+ default:
+ return -EPROTONOSUPPORT;
+ }
+
+ return 0;
+}
+
+static void
+efx_farch_filter_to_gen_spec(struct efx_filter_spec *gen_spec,
+ const struct efx_farch_filter_spec *spec)
+{
+ bool is_full = false;
+
+ /* *gen_spec should be completely initialised, to be consistent
+ * with efx_filter_init_{rx,tx}() and in case we want to copy
+ * it back to userland.
+ */
+ memset(gen_spec, 0, sizeof(*gen_spec));
+
+ gen_spec->priority = spec->priority;
+ gen_spec->flags = spec->flags;
+ gen_spec->dmaq_id = spec->dmaq_id;
+
+ switch (spec->type) {
+ case EFX_FARCH_FILTER_TCP_FULL:
+ case EFX_FARCH_FILTER_UDP_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_TCP_WILD:
+ case EFX_FARCH_FILTER_UDP_WILD: {
+ __be32 host1, host2;
+ __be16 port1, port2;
+
+ gen_spec->match_flags =
+ EFX_FILTER_MATCH_ETHER_TYPE |
+ EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ if (is_full)
+ gen_spec->match_flags |= (EFX_FILTER_MATCH_REM_HOST |
+ EFX_FILTER_MATCH_REM_PORT);
+ gen_spec->ether_type = htons(ETH_P_IP);
+ gen_spec->ip_proto =
+ (spec->type == EFX_FARCH_FILTER_TCP_FULL ||
+ spec->type == EFX_FARCH_FILTER_TCP_WILD) ?
+ IPPROTO_TCP : IPPROTO_UDP;
+
+ host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ port1 = htons(spec->data[0]);
+ host2 = htonl(spec->data[2]);
+ port2 = htons(spec->data[1] >> 16);
+ if (spec->flags & EFX_FILTER_FLAG_TX) {
+ gen_spec->loc_host[0] = host1;
+ gen_spec->rem_host[0] = host2;
+ } else {
+ gen_spec->loc_host[0] = host2;
+ gen_spec->rem_host[0] = host1;
+ }
+ if (!!(gen_spec->flags & EFX_FILTER_FLAG_TX) ^
+ (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
+ gen_spec->loc_port = port1;
+ gen_spec->rem_port = port2;
+ } else {
+ gen_spec->loc_port = port2;
+ gen_spec->rem_port = port1;
+ }
+
+ break;
+ }
+
+ case EFX_FARCH_FILTER_MAC_FULL:
+ is_full = true;
+ /* fall through */
+ case EFX_FARCH_FILTER_MAC_WILD:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC;
+ if (is_full)
+ gen_spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ gen_spec->loc_mac[0] = spec->data[2] >> 8;
+ gen_spec->loc_mac[1] = spec->data[2];
+ gen_spec->loc_mac[2] = spec->data[1] >> 24;
+ gen_spec->loc_mac[3] = spec->data[1] >> 16;
+ gen_spec->loc_mac[4] = spec->data[1] >> 8;
+ gen_spec->loc_mac[5] = spec->data[1];
+ gen_spec->outer_vid = htons(spec->data[0]);
+ break;
+
+ case EFX_FARCH_FILTER_UC_DEF:
+ case EFX_FARCH_FILTER_MC_DEF:
+ gen_spec->match_flags = EFX_FILTER_MATCH_LOC_MAC_IG;
+ gen_spec->loc_mac[0] = spec->type == EFX_FARCH_FILTER_MC_DEF;
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void
+efx_farch_filter_init_rx_for_stack(struct efx_nic *efx,
+ struct efx_farch_filter_spec *spec)
+{
+ /* If there's only one channel then disable RSS for non VF
+ * traffic, thereby allowing VFs to use RSS when the PF can't.
+ */
+ spec->priority = EFX_FILTER_PRI_REQUIRED;
+ spec->flags = (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_STACK |
+ (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
+ spec->dmaq_id = 0;
+}
+
+/* Build a filter entry and return its n-tuple key. */
+static u32 efx_farch_filter_build(efx_oword_t *filter,
+ struct efx_farch_filter_spec *spec)
+{
+ u32 data3;
+
+ switch (efx_farch_filter_spec_table_id(spec)) {
+ case EFX_FARCH_FILTER_TABLE_RX_IP: {
+ bool is_udp = (spec->type == EFX_FARCH_FILTER_UDP_FULL ||
+ spec->type == EFX_FARCH_FILTER_UDP_WILD);
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_BZ_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_BZ_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_BZ_TCP_UDP, is_udp,
+ FRF_BZ_RXQ_ID, spec->dmaq_id,
+ EFX_DWORD_2, spec->data[2],
+ EFX_DWORD_1, spec->data[1],
+ EFX_DWORD_0, spec->data[0]);
+ data3 = is_udp;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_RX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_7(
+ *filter,
+ FRF_CZ_RMFT_RSS_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
+ FRF_CZ_RMFT_SCATTER_EN,
+ !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
+ FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
+ FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
+ FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
+ FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild;
+ break;
+ }
+
+ case EFX_FARCH_FILTER_TABLE_TX_MAC: {
+ bool is_wild = spec->type == EFX_FARCH_FILTER_MAC_WILD;
+ EFX_POPULATE_OWORD_5(*filter,
+ FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
+ FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
+ FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
+ FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
+ FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
+ data3 = is_wild | spec->dmaq_id << 1;
+ break;
+ }
+
+ default:
+ BUG();
+ }
+
+ return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
+}
+
+static bool efx_farch_filter_equal(const struct efx_farch_filter_spec *left,
+ const struct efx_farch_filter_spec *right)
+{
+ if (left->type != right->type ||
+ memcmp(left->data, right->data, sizeof(left->data)))
+ return false;
+
+ if (left->flags & EFX_FILTER_FLAG_TX &&
+ left->dmaq_id != right->dmaq_id)
+ return false;
+
+ return true;
+}
+
+/*
+ * Construct/deconstruct external filter IDs. At least the RX filter
+ * IDs must be ordered by matching priority, for RX NFC semantics.
+ *
+ * Deconstruction needs to be robust against invalid IDs so that
+ * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
+ * accept user-provided IDs.
+ */
+
+#define EFX_FARCH_FILTER_MATCH_PRI_COUNT 5
+
+static const u8 efx_farch_filter_type_match_pri[EFX_FARCH_FILTER_TYPE_COUNT] = {
+ [EFX_FARCH_FILTER_TCP_FULL] = 0,
+ [EFX_FARCH_FILTER_UDP_FULL] = 0,
+ [EFX_FARCH_FILTER_TCP_WILD] = 1,
+ [EFX_FARCH_FILTER_UDP_WILD] = 1,
+ [EFX_FARCH_FILTER_MAC_FULL] = 2,
+ [EFX_FARCH_FILTER_MAC_WILD] = 3,
+ [EFX_FARCH_FILTER_UC_DEF] = 4,
+ [EFX_FARCH_FILTER_MC_DEF] = 4,
+};
+
+static const enum efx_farch_filter_table_id efx_farch_filter_range_table[] = {
+ EFX_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_RX_IP,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_MAC,
+ EFX_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
+ EFX_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
+};
+
+#define EFX_FARCH_FILTER_INDEX_WIDTH 13
+#define EFX_FARCH_FILTER_INDEX_MASK ((1 << EFX_FARCH_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32
+efx_farch_filter_make_id(const struct efx_farch_filter_spec *spec,
+ unsigned int index)
+{
+ unsigned int range;
+
+ range = efx_farch_filter_type_match_pri[spec->type];
+ if (!(spec->flags & EFX_FILTER_FLAG_RX))
+ range += EFX_FARCH_FILTER_MATCH_PRI_COUNT;
+
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_farch_filter_table_id
+efx_farch_filter_id_table_id(u32 id)
+{
+ unsigned int range = id >> EFX_FARCH_FILTER_INDEX_WIDTH;
+
+ if (range < ARRAY_SIZE(efx_farch_filter_range_table))
+ return efx_farch_filter_range_table[range];
+ else
+ return EFX_FARCH_FILTER_TABLE_COUNT; /* invalid */
+}
+
+static inline unsigned int efx_farch_filter_id_index(u32 id)
+{
+ return id & EFX_FARCH_FILTER_INDEX_MASK;
+}
+
+u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ unsigned int range = EFX_FARCH_FILTER_MATCH_PRI_COUNT - 1;
+ enum efx_farch_filter_table_id table_id;
+
+ do {
+ table_id = efx_farch_filter_range_table[range];
+ if (state->table[table_id].size != 0)
+ return range << EFX_FARCH_FILTER_INDEX_WIDTH |
+ state->table[table_id].size;
+ } while (range--);
+
+ return 0;
+}
+
+s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec,
+ bool replace_equal)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec spec;
+ efx_oword_t filter;
+ int rep_index, ins_index;
+ unsigned int depth = 0;
+ int rc;
+
+ rc = efx_farch_filter_from_gen_spec(&spec, gen_spec);
+ if (rc)
+ return rc;
+
+ table = &state->table[efx_farch_filter_spec_table_id(&spec)];
+ if (table->size == 0)
+ return -EINVAL;
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: type %d search_limit=%d", __func__, spec.type,
+ table->search_limit[spec.type]);
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ /* One filter spec per type */
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_UC_DEF != 0);
+ BUILD_BUG_ON(EFX_FARCH_FILTER_INDEX_MC_DEF !=
+ EFX_FARCH_FILTER_MC_DEF - EFX_FARCH_FILTER_UC_DEF);
+ rep_index = spec.type - EFX_FARCH_FILTER_UC_DEF;
+ ins_index = rep_index;
+
+ spin_lock_bh(&efx->filter_lock);
+ } else {
+ /* Search concurrently for
+ * (1) a filter to be replaced (rep_index): any filter
+ * with the same match values, up to the current
+ * search depth for this type, and
+ * (2) the insertion point (ins_index): (1) or any
+ * free slot before it or up to the maximum search
+ * depth for this priority
+ * We fail if we cannot find (2).
+ *
+ * We can stop once either
+ * (a) we find (1), in which case we have definitely
+ * found (2) as well; or
+ * (b) we have searched exhaustively for (1), and have
+ * either found (2) or searched exhaustively for it
+ */
+ u32 key = efx_farch_filter_build(&filter, &spec);
+ unsigned int hash = efx_farch_filter_hash(key);
+ unsigned int incr = efx_farch_filter_increment(key);
+ unsigned int max_rep_depth = table->search_limit[spec.type];
+ unsigned int max_ins_depth =
+ spec.priority <= EFX_FILTER_PRI_HINT ?
+ EFX_FARCH_FILTER_CTL_SRCH_HINT_MAX :
+ EFX_FARCH_FILTER_CTL_SRCH_MAX;
+ unsigned int i = hash & (table->size - 1);
+
+ ins_index = -1;
+ depth = 1;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (;;) {
+ if (!test_bit(i, table->used_bitmap)) {
+ if (ins_index < 0)
+ ins_index = i;
+ } else if (efx_farch_filter_equal(&spec,
+ &table->spec[i])) {
+ /* Case (a) */
+ if (ins_index < 0)
+ ins_index = i;
+ rep_index = i;
+ break;
+ }
+
+ if (depth >= max_rep_depth &&
+ (ins_index >= 0 || depth >= max_ins_depth)) {
+ /* Case (b) */
+ if (ins_index < 0) {
+ rc = -EBUSY;
+ goto out;
+ }
+ rep_index = -1;
+ break;
+ }
+
+ i = (i + incr) & (table->size - 1);
+ ++depth;
+ }
+ }
+
+ /* If we found a filter to be replaced, check whether we
+ * should do so
+ */
+ if (rep_index >= 0) {
+ struct efx_farch_filter_spec *saved_spec =
+ &table->spec[rep_index];
+
+ if (spec.priority == saved_spec->priority && !replace_equal) {
+ rc = -EEXIST;
+ goto out;
+ }
+ if (spec.priority < saved_spec->priority &&
+ !(saved_spec->priority == EFX_FILTER_PRI_REQUIRED &&
+ saved_spec->flags & EFX_FILTER_FLAG_RX_STACK)) {
+ rc = -EPERM;
+ goto out;
+ }
+ if (spec.flags & EFX_FILTER_FLAG_RX_STACK) {
+ /* Just make sure it won't be removed */
+ saved_spec->flags |= EFX_FILTER_FLAG_RX_STACK;
+ rc = 0;
+ goto out;
+ }
+ /* Retain the RX_STACK flag */
+ spec.flags |= saved_spec->flags & EFX_FILTER_FLAG_RX_STACK;
+ }
+
+ /* Insert the filter */
+ if (ins_index != rep_index) {
+ __set_bit(ins_index, table->used_bitmap);
+ ++table->used;
+ }
+ table->spec[ins_index] = spec;
+
+ if (table->id == EFX_FARCH_FILTER_TABLE_RX_DEF) {
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ if (table->search_limit[spec.type] < depth) {
+ table->search_limit[spec.type] = depth;
+ if (spec.flags & EFX_FILTER_FLAG_TX)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+
+ efx_writeo(efx, &filter,
+ table->offset + table->step * ins_index);
+
+ /* If we were able to replace a filter by inserting
+ * at a lower depth, clear the replaced filter
+ */
+ if (ins_index != rep_index && rep_index >= 0)
+ efx_farch_filter_table_clear_entry(efx, table,
+ rep_index);
+ }
+
+ netif_vdbg(efx, hw, efx->net_dev,
+ "%s: filter type %d index %d rxq %u set",
+ __func__, spec.type, ins_index, spec.dmaq_id);
+ rc = efx_farch_filter_make_id(&spec, ins_index);
+
+out:
+ spin_unlock_bh(&efx->filter_lock);
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear_entry(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx)
+{
+ static efx_oword_t filter;
+
+ EFX_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
+ BUG_ON(table->offset == 0); /* can't clear MAC default filters */
+
+ __clear_bit(filter_idx, table->used_bitmap);
+ --table->used;
+ memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
+
+ efx_writeo(efx, &filter, table->offset + table->step * filter_idx);
+
+ /* If this filter required a greater search depth than
+ * any other, the search limit for its type can now be
+ * decreased. However, it is hard to determine that
+ * unless the table has become completely empty - in
+ * which case, all its search limits can be set to 0.
+ */
+ if (unlikely(table->used == 0)) {
+ memset(table->search_limit, 0, sizeof(table->search_limit));
+ if (table->id == EFX_FARCH_FILTER_TABLE_TX_MAC)
+ efx_farch_filter_push_tx_limits(efx);
+ else
+ efx_farch_filter_push_rx_config(efx);
+ }
+}
+
+static int efx_farch_filter_remove(struct efx_nic *efx,
+ struct efx_farch_filter_table *table,
+ unsigned int filter_idx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_spec *spec = &table->spec[filter_idx];
+
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ spec->priority > priority)
+ return -ENOENT;
+
+ if (spec->flags & EFX_FILTER_FLAG_RX_STACK) {
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ efx_farch_filter_push_rx_config(efx);
+ } else {
+ efx_farch_filter_table_clear_entry(efx, table, filter_idx);
+ }
+
+ return 0;
+}
+
+int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_farch_filter_spec *spec;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+ rc = efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ struct efx_farch_filter_spec *spec;
+ unsigned int filter_idx;
+ int rc;
+
+ table_id = efx_farch_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FARCH_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_farch_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ spin_lock_bh(&efx->filter_lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority) {
+ efx_farch_filter_to_gen_spec(spec_buf, spec);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return rc;
+}
+
+static void
+efx_farch_filter_table_clear(struct efx_nic *efx,
+ enum efx_farch_filter_table_id table_id,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table = &state->table[table_id];
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+ for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
+ efx_farch_filter_remove(efx, table, filter_idx, priority);
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_IP,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_MAC,
+ priority);
+ efx_farch_filter_table_clear(efx, EFX_FARCH_FILTER_TABLE_RX_DEF,
+ priority);
+}
+
+u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_farch_filter_make_id(
+ &table->spec[filter_idx], filter_idx);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&efx->filter_lock);
+
+ return count;
+}
+
+/* Restore filter stater after reset */
+void efx_farch_filter_table_restore(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+
+ /* Check whether this is a regular register table */
+ if (table->step == 0)
+ continue;
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap))
+ continue;
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+ efx_farch_filter_push_tx_limits(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+void efx_farch_filter_table_remove(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ kfree(state->table[table_id].used_bitmap);
+ vfree(state->table[table_id].spec);
+ }
+ kfree(state);
+}
+
+int efx_farch_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state;
+ struct efx_farch_filter_table *table;
+ unsigned table_id;
+
+ state = kzalloc(sizeof(struct efx_farch_filter_state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+ efx->filter_state = state;
+
+ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table->offset = FR_BZ_RX_FILTER_TBL0;
+ table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
+ table->step = FR_BZ_RX_FILTER_TBL0_STEP;
+ }
+
+ if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_MAC;
+ table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ table->id = EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table->size = EFX_FARCH_FILTER_SIZE_RX_DEF;
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_TX_MAC];
+ table->id = EFX_FARCH_FILTER_TABLE_TX_MAC;
+ table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
+ table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
+ table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
+ }
+
+ for (table_id = 0; table_id < EFX_FARCH_FILTER_TABLE_COUNT; table_id++) {
+ table = &state->table[table_id];
+ if (table->size == 0)
+ continue;
+ table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
+ sizeof(unsigned long),
+ GFP_KERNEL);
+ if (!table->used_bitmap)
+ goto fail;
+ table->spec = vzalloc(table->size * sizeof(*table->spec));
+ if (!table->spec)
+ goto fail;
+ }
+
+ table = &state->table[EFX_FARCH_FILTER_TABLE_RX_DEF];
+ if (table->size) {
+ /* RX default filters must always exist */
+ struct efx_farch_filter_spec *spec;
+ unsigned i;
+
+ for (i = 0; i < EFX_FARCH_FILTER_SIZE_RX_DEF; i++) {
+ spec = &table->spec[i];
+ spec->type = EFX_FARCH_FILTER_UC_DEF + i;
+ efx_farch_filter_init_rx_for_stack(efx, spec);
+ __set_bit(i, table->used_bitmap);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ return 0;
+
+fail:
+ efx_farch_filter_table_remove(efx);
+ return -ENOMEM;
+}
+
+/* Update scatter enable flags for filters pointing to our own RX queues */
+void efx_farch_filter_update_rx_scatter(struct efx_nic *efx)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ enum efx_farch_filter_table_id table_id;
+ struct efx_farch_filter_table *table;
+ efx_oword_t filter;
+ unsigned int filter_idx;
+
+ spin_lock_bh(&efx->filter_lock);
+
+ for (table_id = EFX_FARCH_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FARCH_FILTER_TABLE_RX_DEF;
+ table_id++) {
+ table = &state->table[table_id];
+
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (!test_bit(filter_idx, table->used_bitmap) ||
+ table->spec[filter_idx].dmaq_id >=
+ efx->n_rx_channels)
+ continue;
+
+ if (efx->rx_scatter)
+ table->spec[filter_idx].flags |=
+ EFX_FILTER_FLAG_RX_SCATTER;
+ else
+ table->spec[filter_idx].flags &=
+ ~EFX_FILTER_FLAG_RX_SCATTER;
+
+ if (table_id == EFX_FARCH_FILTER_TABLE_RX_DEF)
+ /* Pushed by efx_farch_filter_push_rx_config() */
+ continue;
+
+ efx_farch_filter_build(&filter, &table->spec[filter_idx]);
+ efx_writeo(efx, &filter,
+ table->offset + table->step * filter_idx);
+ }
+ }
+
+ efx_farch_filter_push_rx_config(efx);
+
+ spin_unlock_bh(&efx->filter_lock);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+
+s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *gen_spec)
+{
+ return efx_farch_filter_insert(efx, gen_spec, true);
+}
+
+bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index)
+{
+ struct efx_farch_filter_state *state = efx->filter_state;
+ struct efx_farch_filter_table *table =
+ &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
+
+ if (test_bit(index, table->used_bitmap) &&
+ table->spec[index].priority == EFX_FILTER_PRI_HINT &&
+ rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
+ flow_id, index)) {
+ efx_farch_filter_table_clear_entry(efx, table, index);
+ return true;
+ }
+
+ return false;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+void efx_farch_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct net_device *net_dev = efx->net_dev;
+ struct netdev_hw_addr *ha;
+ union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+ u32 crc;
+ int bit;
+
+ netif_addr_lock_bh(net_dev);
+
+ efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
+
+ /* Build multicast hash table */
+ if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
+ memset(mc_hash, 0xff, sizeof(*mc_hash));
+ } else {
+ memset(mc_hash, 0x00, sizeof(*mc_hash));
+ netdev_for_each_mc_addr(ha, net_dev) {
+ crc = ether_crc_le(ETH_ALEN, ha->addr);
+ bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+ __set_bit_le(bit, mc_hash);
+ }
+
+ /* Broadcast packets go through the multicast hash filter.
+ * ether_crc_le() of the broadcast address is 0xbe2612ff
+ * so we always add bit 0xff to the mask.
+ */
+ __set_bit_le(0xff, mc_hash);
+ }
+
+ netif_addr_unlock_bh(net_dev);
+}
diff --git a/drivers/net/ethernet/sfc/regs.h b/drivers/net/ethernet/sfc/farch_regs.h
index ade4c4dc56c..7019a712e79 100644
--- a/drivers/net/ethernet/sfc/regs.h
+++ b/drivers/net/ethernet/sfc/farch_regs.h
@@ -1,15 +1,15 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#ifndef EFX_REGS_H
-#define EFX_REGS_H
+#ifndef EFX_FARCH_REGS_H
+#define EFX_FARCH_REGS_H
/*
* Falcon hardware architecture definitions have a name prefix following
@@ -2925,264 +2925,8 @@
#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- *
- */
-
-#define GRxGoodOct_offset 0x0
-#define GRxGoodOct_WIDTH 48
-#define GRxBadOct_offset 0x8
-#define GRxBadOct_WIDTH 48
-#define GRxMissPkt_offset 0x10
-#define GRxMissPkt_WIDTH 32
-#define GRxFalseCRS_offset 0x14
-#define GRxFalseCRS_WIDTH 32
-#define GRxPausePkt_offset 0x18
-#define GRxPausePkt_WIDTH 32
-#define GRxBadPkt_offset 0x1C
-#define GRxBadPkt_WIDTH 32
-#define GRxUcastPkt_offset 0x20
-#define GRxUcastPkt_WIDTH 32
-#define GRxMcastPkt_offset 0x24
-#define GRxMcastPkt_WIDTH 32
-#define GRxBcastPkt_offset 0x28
-#define GRxBcastPkt_WIDTH 32
-#define GRxGoodLt64Pkt_offset 0x2C
-#define GRxGoodLt64Pkt_WIDTH 32
-#define GRxBadLt64Pkt_offset 0x30
-#define GRxBadLt64Pkt_WIDTH 32
-#define GRx64Pkt_offset 0x34
-#define GRx64Pkt_WIDTH 32
-#define GRx65to127Pkt_offset 0x38
-#define GRx65to127Pkt_WIDTH 32
-#define GRx128to255Pkt_offset 0x3C
-#define GRx128to255Pkt_WIDTH 32
-#define GRx256to511Pkt_offset 0x40
-#define GRx256to511Pkt_WIDTH 32
-#define GRx512to1023Pkt_offset 0x44
-#define GRx512to1023Pkt_WIDTH 32
-#define GRx1024to15xxPkt_offset 0x48
-#define GRx1024to15xxPkt_WIDTH 32
-#define GRx15xxtoJumboPkt_offset 0x4C
-#define GRx15xxtoJumboPkt_WIDTH 32
-#define GRxGtJumboPkt_offset 0x50
-#define GRxGtJumboPkt_WIDTH 32
-#define GRxFcsErr64to15xxPkt_offset 0x54
-#define GRxFcsErr64to15xxPkt_WIDTH 32
-#define GRxFcsErr15xxtoJumboPkt_offset 0x58
-#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
-#define GRxFcsErrGtJumboPkt_offset 0x5C
-#define GRxFcsErrGtJumboPkt_WIDTH 32
-#define GTxGoodBadOct_offset 0x80
-#define GTxGoodBadOct_WIDTH 48
-#define GTxGoodOct_offset 0x88
-#define GTxGoodOct_WIDTH 48
-#define GTxSglColPkt_offset 0x90
-#define GTxSglColPkt_WIDTH 32
-#define GTxMultColPkt_offset 0x94
-#define GTxMultColPkt_WIDTH 32
-#define GTxExColPkt_offset 0x98
-#define GTxExColPkt_WIDTH 32
-#define GTxDefPkt_offset 0x9C
-#define GTxDefPkt_WIDTH 32
-#define GTxLateCol_offset 0xA0
-#define GTxLateCol_WIDTH 32
-#define GTxExDefPkt_offset 0xA4
-#define GTxExDefPkt_WIDTH 32
-#define GTxPausePkt_offset 0xA8
-#define GTxPausePkt_WIDTH 32
-#define GTxBadPkt_offset 0xAC
-#define GTxBadPkt_WIDTH 32
-#define GTxUcastPkt_offset 0xB0
-#define GTxUcastPkt_WIDTH 32
-#define GTxMcastPkt_offset 0xB4
-#define GTxMcastPkt_WIDTH 32
-#define GTxBcastPkt_offset 0xB8
-#define GTxBcastPkt_WIDTH 32
-#define GTxLt64Pkt_offset 0xBC
-#define GTxLt64Pkt_WIDTH 32
-#define GTx64Pkt_offset 0xC0
-#define GTx64Pkt_WIDTH 32
-#define GTx65to127Pkt_offset 0xC4
-#define GTx65to127Pkt_WIDTH 32
-#define GTx128to255Pkt_offset 0xC8
-#define GTx128to255Pkt_WIDTH 32
-#define GTx256to511Pkt_offset 0xCC
-#define GTx256to511Pkt_WIDTH 32
-#define GTx512to1023Pkt_offset 0xD0
-#define GTx512to1023Pkt_WIDTH 32
-#define GTx1024to15xxPkt_offset 0xD4
-#define GTx1024to15xxPkt_WIDTH 32
-#define GTx15xxtoJumboPkt_offset 0xD8
-#define GTx15xxtoJumboPkt_WIDTH 32
-#define GTxGtJumboPkt_offset 0xDC
-#define GTxGtJumboPkt_WIDTH 32
-#define GTxNonTcpUdpPkt_offset 0xE0
-#define GTxNonTcpUdpPkt_WIDTH 16
-#define GTxMacSrcErrPkt_offset 0xE4
-#define GTxMacSrcErrPkt_WIDTH 16
-#define GTxIpSrcErrPkt_offset 0xE8
-#define GTxIpSrcErrPkt_WIDTH 16
-#define GDmaDone_offset 0xEC
-#define GDmaDone_WIDTH 32
-
-#define XgRxOctets_offset 0x0
-#define XgRxOctets_WIDTH 48
-#define XgRxOctetsOK_offset 0x8
-#define XgRxOctetsOK_WIDTH 48
-#define XgRxPkts_offset 0x10
-#define XgRxPkts_WIDTH 32
-#define XgRxPktsOK_offset 0x14
-#define XgRxPktsOK_WIDTH 32
-#define XgRxBroadcastPkts_offset 0x18
-#define XgRxBroadcastPkts_WIDTH 32
-#define XgRxMulticastPkts_offset 0x1C
-#define XgRxMulticastPkts_WIDTH 32
-#define XgRxUnicastPkts_offset 0x20
-#define XgRxUnicastPkts_WIDTH 32
-#define XgRxUndersizePkts_offset 0x24
-#define XgRxUndersizePkts_WIDTH 32
-#define XgRxOversizePkts_offset 0x28
-#define XgRxOversizePkts_WIDTH 32
-#define XgRxJabberPkts_offset 0x2C
-#define XgRxJabberPkts_WIDTH 32
-#define XgRxUndersizeFCSerrorPkts_offset 0x30
-#define XgRxUndersizeFCSerrorPkts_WIDTH 32
-#define XgRxDropEvents_offset 0x34
-#define XgRxDropEvents_WIDTH 32
-#define XgRxFCSerrorPkts_offset 0x38
-#define XgRxFCSerrorPkts_WIDTH 32
-#define XgRxAlignError_offset 0x3C
-#define XgRxAlignError_WIDTH 32
-#define XgRxSymbolError_offset 0x40
-#define XgRxSymbolError_WIDTH 32
-#define XgRxInternalMACError_offset 0x44
-#define XgRxInternalMACError_WIDTH 32
-#define XgRxControlPkts_offset 0x48
-#define XgRxControlPkts_WIDTH 32
-#define XgRxPausePkts_offset 0x4C
-#define XgRxPausePkts_WIDTH 32
-#define XgRxPkts64Octets_offset 0x50
-#define XgRxPkts64Octets_WIDTH 32
-#define XgRxPkts65to127Octets_offset 0x54
-#define XgRxPkts65to127Octets_WIDTH 32
-#define XgRxPkts128to255Octets_offset 0x58
-#define XgRxPkts128to255Octets_WIDTH 32
-#define XgRxPkts256to511Octets_offset 0x5C
-#define XgRxPkts256to511Octets_WIDTH 32
-#define XgRxPkts512to1023Octets_offset 0x60
-#define XgRxPkts512to1023Octets_WIDTH 32
-#define XgRxPkts1024to15xxOctets_offset 0x64
-#define XgRxPkts1024to15xxOctets_WIDTH 32
-#define XgRxPkts15xxtoMaxOctets_offset 0x68
-#define XgRxPkts15xxtoMaxOctets_WIDTH 32
-#define XgRxLengthError_offset 0x6C
-#define XgRxLengthError_WIDTH 32
-#define XgTxPkts_offset 0x80
-#define XgTxPkts_WIDTH 32
-#define XgTxOctets_offset 0x88
-#define XgTxOctets_WIDTH 48
-#define XgTxMulticastPkts_offset 0x90
-#define XgTxMulticastPkts_WIDTH 32
-#define XgTxBroadcastPkts_offset 0x94
-#define XgTxBroadcastPkts_WIDTH 32
-#define XgTxUnicastPkts_offset 0x98
-#define XgTxUnicastPkts_WIDTH 32
-#define XgTxControlPkts_offset 0x9C
-#define XgTxControlPkts_WIDTH 32
-#define XgTxPausePkts_offset 0xA0
-#define XgTxPausePkts_WIDTH 32
-#define XgTxPkts64Octets_offset 0xA4
-#define XgTxPkts64Octets_WIDTH 32
-#define XgTxPkts65to127Octets_offset 0xA8
-#define XgTxPkts65to127Octets_WIDTH 32
-#define XgTxPkts128to255Octets_offset 0xAC
-#define XgTxPkts128to255Octets_WIDTH 32
-#define XgTxPkts256to511Octets_offset 0xB0
-#define XgTxPkts256to511Octets_WIDTH 32
-#define XgTxPkts512to1023Octets_offset 0xB4
-#define XgTxPkts512to1023Octets_WIDTH 32
-#define XgTxPkts1024to15xxOctets_offset 0xB8
-#define XgTxPkts1024to15xxOctets_WIDTH 32
-#define XgTxPkts1519toMaxOctets_offset 0xBC
-#define XgTxPkts1519toMaxOctets_WIDTH 32
-#define XgTxUndersizePkts_offset 0xC0
-#define XgTxUndersizePkts_WIDTH 32
-#define XgTxOversizePkts_offset 0xC4
-#define XgTxOversizePkts_WIDTH 32
-#define XgTxNonTcpUdpPkt_offset 0xC8
-#define XgTxNonTcpUdpPkt_WIDTH 16
-#define XgTxMacSrcErrPkt_offset 0xCC
-#define XgTxMacSrcErrPkt_WIDTH 16
-#define XgTxIpSrcErrPkt_offset 0xD0
-#define XgTxIpSrcErrPkt_WIDTH 16
-#define XgDmaDone_offset 0xD4
-#define XgDmaDone_WIDTH 32
-
-#define FALCON_STATS_NOT_DONE 0x00000000
-#define FALCON_STATS_DONE 0xffffffff
-
-/**************************************************************************
- *
- * Falcon non-volatile configuration
- *
- **************************************************************************
- */
+/* RX packet prefix */
+#define FS_BZ_RX_PREFIX_HASH_OFST 12
+#define FS_BZ_RX_PREFIX_SIZE 16
-/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
-struct falcon_nvconfig_board_v2 {
- __le16 nports;
- u8 port0_phy_addr;
- u8 port0_phy_type;
- u8 port1_phy_addr;
- u8 port1_phy_type;
- __le16 asic_sub_revision;
- __le16 board_revision;
-} __packed;
-
-/* Board configuration v3 extra information */
-struct falcon_nvconfig_board_v3 {
- __le32 spi_device_type[2];
-} __packed;
-
-/* Bit numbers for spi_device_type */
-#define SPI_DEV_TYPE_SIZE_LBN 0
-#define SPI_DEV_TYPE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
-#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
-#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
-#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
-#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
-#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
-#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
-#define SPI_DEV_TYPE_FIELD(type, field) \
- (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
-
-#define FALCON_NVCONFIG_OFFSET 0x300
-
-#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
-struct falcon_nvconfig {
- efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
- u8 mac_address[2][8]; /* 0x310 */
- efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
- efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
- efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
- efx_oword_t hw_init_reg; /* 0x350 */
- efx_oword_t nic_stat_reg; /* 0x360 */
- efx_oword_t glb_ctl_reg; /* 0x370 */
- efx_oword_t srm_cfg_reg; /* 0x380 */
- efx_oword_t spare_reg; /* 0x390 */
- __le16 board_magic_num; /* 0x3A0 */
- __le16 board_struct_ver;
- __le16 board_checksum;
- struct falcon_nvconfig_board_v2 board_v2;
- efx_oword_t ee_base_page_reg; /* 0x3B0 */
- struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
-} __packed;
-
-#endif /* EFX_REGS_H */
+#endif /* EFX_FARCH_REGS_H */
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
deleted file mode 100644
index b74a60ab9ac..00000000000
--- a/drivers/net/ethernet/sfc/filter.c
+++ /dev/null
@@ -1,1272 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include <linux/in.h>
-#include <net/ip.h>
-#include "efx.h"
-#include "filter.h"
-#include "io.h"
-#include "nic.h"
-#include "regs.h"
-
-/* "Fudge factors" - difference between programmed value and actual depth.
- * Due to pipelined implementation we need to program H/W with a value that
- * is larger than the hop limit we want.
- */
-#define FILTER_CTL_SRCH_FUDGE_WILD 3
-#define FILTER_CTL_SRCH_FUDGE_FULL 1
-
-/* Hard maximum hop limit. Hardware will time-out beyond 200-something.
- * We also need to avoid infinite loops in efx_filter_search() when the
- * table is full.
- */
-#define FILTER_CTL_SRCH_MAX 200
-
-/* Don't try very hard to find space for performance hints, as this is
- * counter-productive. */
-#define FILTER_CTL_SRCH_HINT_MAX 5
-
-enum efx_filter_table_id {
- EFX_FILTER_TABLE_RX_IP = 0,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF,
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_COUNT,
-};
-
-enum efx_filter_index {
- EFX_FILTER_INDEX_UC_DEF,
- EFX_FILTER_INDEX_MC_DEF,
- EFX_FILTER_SIZE_RX_DEF,
-};
-
-struct efx_filter_table {
- enum efx_filter_table_id id;
- u32 offset; /* address of table relative to BAR */
- unsigned size; /* number of entries */
- unsigned step; /* step between entries */
- unsigned used; /* number currently used */
- unsigned long *used_bitmap;
- struct efx_filter_spec *spec;
- unsigned search_depth[EFX_FILTER_TYPE_COUNT];
-};
-
-struct efx_filter_state {
- spinlock_t lock;
- struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
-#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
- unsigned rps_expire_index;
-#endif
-};
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx);
-
-/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
- * key derived from the n-tuple. The initial LFSR state is 0xffff. */
-static u16 efx_filter_hash(u32 key)
-{
- u16 tmp;
-
- /* First 16 rounds */
- tmp = 0x1fff ^ key >> 16;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- tmp = tmp ^ tmp >> 9;
- /* Last 16 rounds */
- tmp = tmp ^ tmp << 13 ^ key;
- tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
- return tmp ^ tmp >> 9;
-}
-
-/* To allow for hash collisions, filter search continues at these
- * increments from the first possible entry selected by the hash. */
-static u16 efx_filter_increment(u32 key)
-{
- return key * 2 - 1;
-}
-
-static enum efx_filter_table_id
-efx_filter_spec_table_id(const struct efx_filter_spec *spec)
-{
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
- BUILD_BUG_ON(EFX_FILTER_TABLE_TX_MAC != EFX_FILTER_TABLE_RX_MAC + 2);
- EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
- return (spec->type >> 2) + ((spec->flags & EFX_FILTER_FLAG_TX) ? 2 : 0);
-}
-
-static struct efx_filter_table *
-efx_filter_spec_table(struct efx_filter_state *state,
- const struct efx_filter_spec *spec)
-{
- if (spec->type == EFX_FILTER_UNSPEC)
- return NULL;
- else
- return &state->table[efx_filter_spec_table_id(spec)];
-}
-
-static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
-{
- memset(table->search_depth, 0, sizeof(table->search_depth));
-}
-
-static void efx_filter_push_rx_config(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t filter_ctl;
-
- efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_TCP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
- table->search_depth[EFX_FILTER_UDP_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
-
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_UC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
- table->spec[EFX_FILTER_INDEX_MC_DEF].dmaq_id);
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
- !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_RSS));
-
- /* There is a single bit to enable RX scatter for all
- * unmatched packets. Only set it if scatter is
- * enabled in both filter specs.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- !!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
- table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
- EFX_FILTER_FLAG_RX_SCATTER));
- } else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- /* We don't expose 'default' filters because unmatched
- * packets always go to the queue number found in the
- * RSS table. But we still need to set the RX scatter
- * bit here.
- */
- EFX_SET_OWORD_FIELD(
- filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
- efx->rx_scatter);
- }
-
- efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
-}
-
-static void efx_filter_push_tx_limits(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table;
- efx_oword_t tx_cfg;
-
- efx_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- if (table->size) {
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_FULL] +
- FILTER_CTL_SRCH_FUDGE_FULL);
- EFX_SET_OWORD_FIELD(
- tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
- table->search_depth[EFX_FILTER_MAC_WILD] +
- FILTER_CTL_SRCH_FUDGE_WILD);
- }
-
- efx_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
-}
-
-static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
- __be32 host1, __be16 port1,
- __be32 host2, __be16 port2)
-{
- spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
- spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
- spec->data[2] = ntohl(host2);
-}
-
-static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
- __be32 *host1, __be16 *port1,
- __be32 *host2, __be16 *port2)
-{
- *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
- *port1 = htons(spec->data[0]);
- *host2 = htonl(spec->data[2]);
- *port2 = htons(spec->data[1] >> 16);
-}
-
-/**
- * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- */
-int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port)
-{
- __be32 host1;
- __be16 port1;
-
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_WILD;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_WILD;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- /* Filter is constructed in terms of source and destination,
- * with the odd wrinkle that the ports are swapped in a UDP
- * wildcard filter. We need to convert from local and remote
- * (= zero for wildcard) addresses.
- */
- host1 = 0;
- if (proto != IPPROTO_UDP) {
- port1 = 0;
- } else {
- port1 = port;
- port = 0;
- }
-
- __efx_filter_set_ipv4(spec, host1, port1, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port)
-{
- __be32 host1;
- __be16 port1;
-
- switch (spec->type) {
- case EFX_FILTER_TCP_WILD:
- *proto = IPPROTO_TCP;
- __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
- return 0;
- case EFX_FILTER_UDP_WILD:
- *proto = IPPROTO_UDP;
- __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
- * @spec: Specification to initialise
- * @proto: Transport layer protocol number
- * @host: Local host address (network byte order)
- * @port: Local port (network byte order)
- * @rhost: Remote host address (network byte order)
- * @rport: Remote port (network byte order)
- */
-int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (port == 0 || rport == 0)
- return -EINVAL;
-
- switch (proto) {
- case IPPROTO_TCP:
- spec->type = EFX_FILTER_TCP_FULL;
- break;
- case IPPROTO_UDP:
- spec->type = EFX_FILTER_UDP_FULL;
- break;
- default:
- return -EPROTONOSUPPORT;
- }
-
- __efx_filter_set_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport)
-{
- switch (spec->type) {
- case EFX_FILTER_TCP_FULL:
- *proto = IPPROTO_TCP;
- break;
- case EFX_FILTER_UDP_FULL:
- *proto = IPPROTO_UDP;
- break;
- default:
- return -EINVAL;
- }
-
- __efx_filter_get_ipv4(spec, rhost, rport, host, port);
- return 0;
-}
-
-/**
- * efx_filter_set_eth_local - specify local Ethernet address and optional VID
- * @spec: Specification to initialise
- * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
- * @addr: Local Ethernet MAC address
- */
-int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- /* This cannot currently be combined with other filtering */
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EPROTONOSUPPORT;
-
- if (vid == EFX_FILTER_VID_UNSPEC) {
- spec->type = EFX_FILTER_MAC_WILD;
- spec->data[0] = 0;
- } else {
- spec->type = EFX_FILTER_MAC_FULL;
- spec->data[0] = vid;
- }
-
- spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
- spec->data[2] = addr[0] << 8 | addr[1];
- return 0;
-}
-
-/**
- * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_uc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_UC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-/**
- * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
- * @spec: Specification to initialise
- */
-int efx_filter_set_mc_def(struct efx_filter_spec *spec)
-{
- EFX_BUG_ON_PARANOID(!(spec->flags &
- (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)));
-
- if (spec->type != EFX_FILTER_UNSPEC)
- return -EINVAL;
-
- spec->type = EFX_FILTER_MC_DEF;
- memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
- return 0;
-}
-
-static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- struct efx_filter_spec *spec = &table->spec[filter_idx];
- enum efx_filter_flags flags = 0;
-
- /* If there's only one channel then disable RSS for non VF
- * traffic, thereby allowing VFs to use RSS when the PF can't.
- */
- if (efx->n_rx_channels > 1)
- flags |= EFX_FILTER_FLAG_RX_RSS;
-
- if (efx->rx_scatter)
- flags |= EFX_FILTER_FLAG_RX_SCATTER;
-
- efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
- spec->type = EFX_FILTER_UC_DEF + filter_idx;
- table->used_bitmap[0] |= 1 << filter_idx;
-}
-
-int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr)
-{
- switch (spec->type) {
- case EFX_FILTER_MAC_WILD:
- *vid = EFX_FILTER_VID_UNSPEC;
- break;
- case EFX_FILTER_MAC_FULL:
- *vid = spec->data[0];
- break;
- default:
- return -EINVAL;
- }
-
- addr[0] = spec->data[2] >> 8;
- addr[1] = spec->data[2];
- addr[2] = spec->data[1] >> 24;
- addr[3] = spec->data[1] >> 16;
- addr[4] = spec->data[1] >> 8;
- addr[5] = spec->data[1];
- return 0;
-}
-
-/* Build a filter entry and return its n-tuple key. */
-static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
-{
- u32 data3;
-
- switch (efx_filter_spec_table_id(spec)) {
- case EFX_FILTER_TABLE_RX_IP: {
- bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
- spec->type == EFX_FILTER_UDP_WILD);
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_BZ_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_BZ_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_BZ_TCP_UDP, is_udp,
- FRF_BZ_RXQ_ID, spec->dmaq_id,
- EFX_DWORD_2, spec->data[2],
- EFX_DWORD_1, spec->data[1],
- EFX_DWORD_0, spec->data[0]);
- data3 = is_udp;
- break;
- }
-
- case EFX_FILTER_TABLE_RX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_7(
- *filter,
- FRF_CZ_RMFT_RSS_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_RSS),
- FRF_CZ_RMFT_SCATTER_EN,
- !!(spec->flags & EFX_FILTER_FLAG_RX_SCATTER),
- FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
- FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
- FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
- FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild;
- break;
- }
-
- case EFX_FILTER_TABLE_TX_MAC: {
- bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
- EFX_POPULATE_OWORD_5(*filter,
- FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
- FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
- FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
- FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
- FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
- data3 = is_wild | spec->dmaq_id << 1;
- break;
- }
-
- default:
- BUG();
- }
-
- return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
-}
-
-static bool efx_filter_equal(const struct efx_filter_spec *left,
- const struct efx_filter_spec *right)
-{
- if (left->type != right->type ||
- memcmp(left->data, right->data, sizeof(left->data)))
- return false;
-
- if (left->flags & EFX_FILTER_FLAG_TX &&
- left->dmaq_id != right->dmaq_id)
- return false;
-
- return true;
-}
-
-/*
- * Construct/deconstruct external filter IDs. At least the RX filter
- * IDs must be ordered by matching priority, for RX NFC semantics.
- *
- * Deconstruction needs to be robust against invalid IDs so that
- * efx_filter_remove_id_safe() and efx_filter_get_filter_safe() can
- * accept user-provided IDs.
- */
-
-#define EFX_FILTER_MATCH_PRI_COUNT 5
-
-static const u8 efx_filter_type_match_pri[EFX_FILTER_TYPE_COUNT] = {
- [EFX_FILTER_TCP_FULL] = 0,
- [EFX_FILTER_UDP_FULL] = 0,
- [EFX_FILTER_TCP_WILD] = 1,
- [EFX_FILTER_UDP_WILD] = 1,
- [EFX_FILTER_MAC_FULL] = 2,
- [EFX_FILTER_MAC_WILD] = 3,
- [EFX_FILTER_UC_DEF] = 4,
- [EFX_FILTER_MC_DEF] = 4,
-};
-
-static const enum efx_filter_table_id efx_filter_range_table[] = {
- EFX_FILTER_TABLE_RX_IP, /* RX match pri 0 */
- EFX_FILTER_TABLE_RX_IP,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_MAC,
- EFX_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
- EFX_FILTER_TABLE_COUNT, /* TX match pri 0; invalid */
- EFX_FILTER_TABLE_COUNT, /* invalid */
- EFX_FILTER_TABLE_TX_MAC,
- EFX_FILTER_TABLE_TX_MAC, /* TX match pri 3 */
-};
-
-#define EFX_FILTER_INDEX_WIDTH 13
-#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
-
-static inline u32
-efx_filter_make_id(const struct efx_filter_spec *spec, unsigned int index)
-{
- unsigned int range;
-
- range = efx_filter_type_match_pri[spec->type];
- if (!(spec->flags & EFX_FILTER_FLAG_RX))
- range += EFX_FILTER_MATCH_PRI_COUNT;
-
- return range << EFX_FILTER_INDEX_WIDTH | index;
-}
-
-static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < ARRAY_SIZE(efx_filter_range_table))
- return efx_filter_range_table[range];
- else
- return EFX_FILTER_TABLE_COUNT; /* invalid */
-}
-
-static inline unsigned int efx_filter_id_index(u32 id)
-{
- return id & EFX_FILTER_INDEX_MASK;
-}
-
-static inline u8 efx_filter_id_flags(u32 id)
-{
- unsigned int range = id >> EFX_FILTER_INDEX_WIDTH;
-
- if (range < EFX_FILTER_MATCH_PRI_COUNT)
- return EFX_FILTER_FLAG_RX;
- else
- return EFX_FILTER_FLAG_TX;
-}
-
-u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- unsigned int range = EFX_FILTER_MATCH_PRI_COUNT - 1;
- enum efx_filter_table_id table_id;
-
- do {
- table_id = efx_filter_range_table[range];
- if (state->table[table_id].size != 0)
- return range << EFX_FILTER_INDEX_WIDTH |
- state->table[table_id].size;
- } while (range--);
-
- return 0;
-}
-
-/**
- * efx_filter_insert_filter - add or replace a filter
- * @efx: NIC in which to insert the filter
- * @spec: Specification for the filter
- * @replace_equal: Flag for whether the specified filter may replace an
- * existing filter with equal priority
- *
- * On success, return the filter ID.
- * On failure, return a negative error code.
- *
- * If an existing filter has equal match values to the new filter
- * spec, then the new filter might replace it, depending on the
- * relative priorities. If the existing filter has lower priority, or
- * if @replace_equal is set and it has equal priority, then it is
- * replaced. Otherwise the function fails, returning -%EPERM if
- * the existing filter has higher priority or -%EEXIST if it has
- * equal priority.
- */
-s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
- bool replace_equal)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- efx_oword_t filter;
- int rep_index, ins_index;
- unsigned int depth = 0;
- int rc;
-
- if (!table || table->size == 0)
- return -EINVAL;
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: type %d search_depth=%d", __func__, spec->type,
- table->search_depth[spec->type]);
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* One filter spec per type */
- BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
- BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
- EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
- rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
- ins_index = rep_index;
-
- spin_lock_bh(&state->lock);
- } else {
- /* Search concurrently for
- * (1) a filter to be replaced (rep_index): any filter
- * with the same match values, up to the current
- * search depth for this type, and
- * (2) the insertion point (ins_index): (1) or any
- * free slot before it or up to the maximum search
- * depth for this priority
- * We fail if we cannot find (2).
- *
- * We can stop once either
- * (a) we find (1), in which case we have definitely
- * found (2) as well; or
- * (b) we have searched exhaustively for (1), and have
- * either found (2) or searched exhaustively for it
- */
- u32 key = efx_filter_build(&filter, spec);
- unsigned int hash = efx_filter_hash(key);
- unsigned int incr = efx_filter_increment(key);
- unsigned int max_rep_depth = table->search_depth[spec->type];
- unsigned int max_ins_depth =
- spec->priority <= EFX_FILTER_PRI_HINT ?
- FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
- unsigned int i = hash & (table->size - 1);
-
- ins_index = -1;
- depth = 1;
-
- spin_lock_bh(&state->lock);
-
- for (;;) {
- if (!test_bit(i, table->used_bitmap)) {
- if (ins_index < 0)
- ins_index = i;
- } else if (efx_filter_equal(spec, &table->spec[i])) {
- /* Case (a) */
- if (ins_index < 0)
- ins_index = i;
- rep_index = i;
- break;
- }
-
- if (depth >= max_rep_depth &&
- (ins_index >= 0 || depth >= max_ins_depth)) {
- /* Case (b) */
- if (ins_index < 0) {
- rc = -EBUSY;
- goto out;
- }
- rep_index = -1;
- break;
- }
-
- i = (i + incr) & (table->size - 1);
- ++depth;
- }
- }
-
- /* If we found a filter to be replaced, check whether we
- * should do so
- */
- if (rep_index >= 0) {
- struct efx_filter_spec *saved_spec = &table->spec[rep_index];
-
- if (spec->priority == saved_spec->priority && !replace_equal) {
- rc = -EEXIST;
- goto out;
- }
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
- }
- }
-
- /* Insert the filter */
- if (ins_index != rep_index) {
- __set_bit(ins_index, table->used_bitmap);
- ++table->used;
- }
- table->spec[ins_index] = *spec;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- efx_filter_push_rx_config(efx);
- } else {
- if (table->search_depth[spec->type] < depth) {
- table->search_depth[spec->type] = depth;
- if (spec->flags & EFX_FILTER_FLAG_TX)
- efx_filter_push_tx_limits(efx);
- else
- efx_filter_push_rx_config(efx);
- }
-
- efx_writeo(efx, &filter,
- table->offset + table->step * ins_index);
-
- /* If we were able to replace a filter by inserting
- * at a lower depth, clear the replaced filter
- */
- if (ins_index != rep_index && rep_index >= 0)
- efx_filter_table_clear_entry(efx, table, rep_index);
- }
-
- netif_vdbg(efx, hw, efx->net_dev,
- "%s: filter type %d index %d rxq %u set",
- __func__, spec->type, ins_index, spec->dmaq_id);
- rc = efx_filter_make_id(spec, ins_index);
-
-out:
- spin_unlock_bh(&state->lock);
- return rc;
-}
-
-static void efx_filter_table_clear_entry(struct efx_nic *efx,
- struct efx_filter_table *table,
- unsigned int filter_idx)
-{
- static efx_oword_t filter;
-
- if (table->id == EFX_FILTER_TABLE_RX_DEF) {
- /* RX default filters must always exist */
- efx_filter_reset_rx_def(efx, filter_idx);
- efx_filter_push_rx_config(efx);
- } else if (test_bit(filter_idx, table->used_bitmap)) {
- __clear_bit(filter_idx, table->used_bitmap);
- --table->used;
- memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
-
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
-}
-
-/**
- * efx_filter_remove_id_safe - remove a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_remove_id_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- struct efx_filter_spec *spec;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-/**
- * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
- * @efx: NIC from which to remove the filter
- * @priority: Priority of filter, as passed to @efx_filter_insert_filter
- * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
- * @spec: Buffer in which to store filter specification
- *
- * This function will range-check @filter_id, so it is safe to call
- * with a value passed from userland.
- */
-int efx_filter_get_filter_safe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id, struct efx_filter_spec *spec_buf)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- struct efx_filter_spec *spec;
- unsigned int filter_idx;
- u8 filter_flags;
- int rc;
-
- table_id = efx_filter_id_table_id(filter_id);
- if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
- return -ENOENT;
- table = &state->table[table_id];
-
- filter_idx = efx_filter_id_index(filter_id);
- if (filter_idx >= table->size)
- return -ENOENT;
- spec = &table->spec[filter_idx];
-
- filter_flags = efx_filter_id_flags(filter_id);
-
- spin_lock_bh(&state->lock);
-
- if (test_bit(filter_idx, table->used_bitmap) &&
- spec->priority == priority) {
- *spec_buf = *spec;
- rc = 0;
- } else {
- rc = -ENOENT;
- }
-
- spin_unlock_bh(&state->lock);
-
- return rc;
-}
-
-static void efx_filter_table_clear(struct efx_nic *efx,
- enum efx_filter_table_id table_id,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[table_id];
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (filter_idx = 0; filter_idx < table->size; ++filter_idx)
- if (table->spec[filter_idx].priority <= priority)
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
-}
-
-/**
- * efx_filter_clear_rx - remove RX filters by priority
- * @efx: NIC from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
-{
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
- efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
-}
-
-u32 efx_filter_count_rx_used(struct efx_nic *efx,
- enum efx_filter_priority priority)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- u32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority)
- ++count;
- }
- }
-
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-s32 efx_filter_get_rx_ids(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 *buf, u32 size)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- unsigned int filter_idx;
- s32 count = 0;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (test_bit(filter_idx, table->used_bitmap) &&
- table->spec[filter_idx].priority == priority) {
- if (count == size) {
- count = -EMSGSIZE;
- goto out;
- }
- buf[count++] = efx_filter_make_id(
- &table->spec[filter_idx], filter_idx);
- }
- }
- }
-out:
- spin_unlock_bh(&state->lock);
-
- return count;
-}
-
-/* Restore filter stater after reset */
-void efx_restore_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
-
- /* Check whether this is a regular register table */
- if (table->step == 0)
- continue;
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap))
- continue;
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
- efx_filter_push_tx_limits(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-int efx_probe_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state;
- struct efx_filter_table *table;
- unsigned table_id;
-
- state = kzalloc(sizeof(*efx->filter_state), GFP_KERNEL);
- if (!state)
- return -ENOMEM;
- efx->filter_state = state;
-
- spin_lock_init(&state->lock);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
-#ifdef CONFIG_RFS_ACCEL
- state->rps_flow_id = kcalloc(FR_BZ_RX_FILTER_TBL0_ROWS,
- sizeof(*state->rps_flow_id),
- GFP_KERNEL);
- if (!state->rps_flow_id)
- goto fail;
-#endif
- table = &state->table[EFX_FILTER_TABLE_RX_IP];
- table->id = EFX_FILTER_TABLE_RX_IP;
- table->offset = FR_BZ_RX_FILTER_TBL0;
- table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
- table->step = FR_BZ_RX_FILTER_TBL0_STEP;
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- table = &state->table[EFX_FILTER_TABLE_RX_MAC];
- table->id = EFX_FILTER_TABLE_RX_MAC;
- table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
- table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
-
- table = &state->table[EFX_FILTER_TABLE_RX_DEF];
- table->id = EFX_FILTER_TABLE_RX_DEF;
- table->size = EFX_FILTER_SIZE_RX_DEF;
-
- table = &state->table[EFX_FILTER_TABLE_TX_MAC];
- table->id = EFX_FILTER_TABLE_TX_MAC;
- table->offset = FR_CZ_TX_MAC_FILTER_TBL0;
- table->size = FR_CZ_TX_MAC_FILTER_TBL0_ROWS;
- table->step = FR_CZ_TX_MAC_FILTER_TBL0_STEP;
- }
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- table = &state->table[table_id];
- if (table->size == 0)
- continue;
- table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
- sizeof(unsigned long),
- GFP_KERNEL);
- if (!table->used_bitmap)
- goto fail;
- table->spec = vzalloc(table->size * sizeof(*table->spec));
- if (!table->spec)
- goto fail;
- }
-
- if (state->table[EFX_FILTER_TABLE_RX_DEF].size) {
- /* RX default filters must always exist */
- unsigned i;
- for (i = 0; i < EFX_FILTER_SIZE_RX_DEF; i++)
- efx_filter_reset_rx_def(efx, i);
- }
-
- efx_filter_push_rx_config(efx);
-
- return 0;
-
-fail:
- efx_remove_filters(efx);
- return -ENOMEM;
-}
-
-void efx_remove_filters(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
-
- for (table_id = 0; table_id < EFX_FILTER_TABLE_COUNT; table_id++) {
- kfree(state->table[table_id].used_bitmap);
- vfree(state->table[table_id].spec);
- }
-#ifdef CONFIG_RFS_ACCEL
- kfree(state->rps_flow_id);
-#endif
- kfree(state);
-}
-
-/* Update scatter enable flags for filters pointing to our own RX queues */
-void efx_filter_update_rx_scatter(struct efx_nic *efx)
-{
- struct efx_filter_state *state = efx->filter_state;
- enum efx_filter_table_id table_id;
- struct efx_filter_table *table;
- efx_oword_t filter;
- unsigned int filter_idx;
-
- spin_lock_bh(&state->lock);
-
- for (table_id = EFX_FILTER_TABLE_RX_IP;
- table_id <= EFX_FILTER_TABLE_RX_DEF;
- table_id++) {
- table = &state->table[table_id];
-
- for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
- if (!test_bit(filter_idx, table->used_bitmap) ||
- table->spec[filter_idx].dmaq_id >=
- efx->n_rx_channels)
- continue;
-
- if (efx->rx_scatter)
- table->spec[filter_idx].flags |=
- EFX_FILTER_FLAG_RX_SCATTER;
- else
- table->spec[filter_idx].flags &=
- ~EFX_FILTER_FLAG_RX_SCATTER;
-
- if (table_id == EFX_FILTER_TABLE_RX_DEF)
- /* Pushed by efx_filter_push_rx_config() */
- continue;
-
- efx_filter_build(&filter, &table->spec[filter_idx]);
- efx_writeo(efx, &filter,
- table->offset + table->step * filter_idx);
- }
- }
-
- efx_filter_push_rx_config(efx);
-
- spin_unlock_bh(&state->lock);
-}
-
-#ifdef CONFIG_RFS_ACCEL
-
-int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
- u16 rxq_index, u32 flow_id)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct efx_channel *channel;
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_spec spec;
- const struct iphdr *ip;
- const __be16 *ports;
- int nhoff;
- int rc;
-
- nhoff = skb_network_offset(skb);
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(struct vlan_hdr));
- if (((const struct vlan_hdr *)skb->data + nhoff)->
- h_vlan_encapsulated_proto != htons(ETH_P_IP))
- return -EPROTONOSUPPORT;
-
- /* This is IP over 802.1q VLAN. We can't filter on the
- * IP 5-tuple and the vlan together, so just strip the
- * vlan header and filter on the IP part.
- */
- nhoff += sizeof(struct vlan_hdr);
- } else if (skb->protocol != htons(ETH_P_IP)) {
- return -EPROTONOSUPPORT;
- }
-
- /* RFS must validate the IP header length before calling us */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- ip = (const struct iphdr *)(skb->data + nhoff);
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
-
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT, 0, rxq_index);
- rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
- ip->daddr, ports[1], ip->saddr, ports[0]);
- if (rc)
- return rc;
-
- rc = efx_filter_insert_filter(efx, &spec, true);
- if (rc < 0)
- return rc;
-
- /* Remember this so we can check whether to expire the filter later */
- state->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
- ++channel->rfs_filters_added;
-
- netif_info(efx, rx_status, efx->net_dev,
- "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
- (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
- &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
- rxq_index, flow_id, rc);
-
- return rc;
-}
-
-bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned quota)
-{
- struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_IP];
- unsigned mask = table->size - 1;
- unsigned index;
- unsigned stop;
-
- if (!spin_trylock_bh(&state->lock))
- return false;
-
- index = state->rps_expire_index;
- stop = (index + quota) & mask;
-
- while (index != stop) {
- if (test_bit(index, table->used_bitmap) &&
- table->spec[index].priority == EFX_FILTER_PRI_HINT &&
- rps_may_expire_flow(efx->net_dev,
- table->spec[index].dmaq_id,
- state->rps_flow_id[index], index)) {
- netif_info(efx, rx_status, efx->net_dev,
- "expiring filter %d [flow %u]\n",
- index, state->rps_flow_id[index]);
- efx_filter_table_clear_entry(efx, table, index);
- }
- index = (index + 1) & mask;
- }
-
- state->rps_expire_index = stop;
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
-
- spin_unlock_bh(&state->lock);
- return true;
-}
-
-#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 5cb54723b82..63c77a55717 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,32 +11,49 @@
#define EFX_FILTER_H
#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <asm/byteorder.h>
/**
- * enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
- * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
- * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
- * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
- * @EFX_FILTER_UC_DEF: Matching all otherwise unmatched unicast
- * @EFX_FILTER_MC_DEF: Matching all otherwise unmatched multicast
- * @EFX_FILTER_UNSPEC: Match type is unspecified
+ * enum efx_filter_match_flags - Flags for hardware filter match type
+ * @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
+ * @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
+ * @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
+ * @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
+ * @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
+ * @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
+ * @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
+ * @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
+ * @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
+ * @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
+ * @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * Used for RX default unicast and multicast/broadcast filters.
*
- * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
+ * Only some combinations are supported, depending on NIC type:
+ *
+ * - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
+ * local 2-tuple (only implemented for Falcon B0)
+ *
+ * - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
+ * or local 2-tuple, or local MAC with or without outer VID, and RX
+ * default filters
+ *
+ * - Huntington supports filter matching controlled by firmware, potentially
+ * using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
+ * with or without outer and inner VID
*/
-enum efx_filter_type {
- EFX_FILTER_TCP_FULL = 0,
- EFX_FILTER_TCP_WILD,
- EFX_FILTER_UDP_FULL,
- EFX_FILTER_UDP_WILD,
- EFX_FILTER_MAC_FULL = 4,
- EFX_FILTER_MAC_WILD,
- EFX_FILTER_UC_DEF = 8,
- EFX_FILTER_MC_DEF,
- EFX_FILTER_TYPE_COUNT, /* number of specific types */
- EFX_FILTER_UNSPEC = 0xf,
+enum efx_filter_match_flags {
+ EFX_FILTER_MATCH_REM_HOST = 0x0001,
+ EFX_FILTER_MATCH_LOC_HOST = 0x0002,
+ EFX_FILTER_MATCH_REM_MAC = 0x0004,
+ EFX_FILTER_MATCH_REM_PORT = 0x0008,
+ EFX_FILTER_MATCH_LOC_MAC = 0x0010,
+ EFX_FILTER_MATCH_LOC_PORT = 0x0020,
+ EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
+ EFX_FILTER_MATCH_INNER_VID = 0x0080,
+ EFX_FILTER_MATCH_OUTER_VID = 0x0100,
+ EFX_FILTER_MATCH_IP_PROTO = 0x0200,
+ EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
};
/**
@@ -61,37 +78,75 @@ enum efx_filter_priority {
* according to the indirection table.
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
* queue.
+ * @EFX_FILTER_FLAG_RX_STACK: Indicates a filter inserted for the
+ * network stack. The filter must have a priority of
+ * %EFX_FILTER_PRI_REQUIRED. It can be steered by a replacement
+ * request with priority %EFX_FILTER_PRI_MANUAL, and a removal
+ * request with priority %EFX_FILTER_PRI_MANUAL will reset the
+ * steering (but not remove the filter).
* @EFX_FILTER_FLAG_RX: Filter is for RX
* @EFX_FILTER_FLAG_TX: Filter is for TX
*/
enum efx_filter_flags {
EFX_FILTER_FLAG_RX_RSS = 0x01,
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
+ EFX_FILTER_FLAG_RX_STACK = 0x04,
EFX_FILTER_FLAG_RX = 0x08,
EFX_FILTER_FLAG_TX = 0x10,
};
/**
* struct efx_filter_spec - specification for a hardware filter
- * @type: Type of match to be performed, from &enum efx_filter_type
+ * @match_flags: Match type flags, from &enum efx_filter_match_flags
* @priority: Priority of the filter, from &enum efx_filter_priority
* @flags: Miscellaneous flags, from &enum efx_filter_flags
- * @dmaq_id: Source/target queue index
- * @data: Match data (type-dependent)
+ * @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set
+ * @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
+ * an RX drop filter
+ * @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
+ * @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
+ * @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
+ * %EFX_FILTER_MATCH_LOC_MAC_IG is set
+ * @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
+ * @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
+ * @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
+ * is set
+ * @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
+ * @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
+ * @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
+ * @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
*
- * Use the efx_filter_set_*() functions to initialise the @type and
- * @data fields.
+ * The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
+ * used to initialise the structure. The efx_filter_set_*() functions
+ * may then be used to set @rss_context, @match_flags and related
+ * fields.
*
* The @priority field is used by software to determine whether a new
* filter may replace an old one. The hardware priority of a filter
- * depends on the filter type.
+ * depends on which fields are matched.
*/
struct efx_filter_spec {
- u8 type:4;
- u8 priority:4;
- u8 flags;
- u16 dmaq_id;
- u32 data[3];
+ u32 match_flags:12;
+ u32 priority:2;
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+ __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+ __be16 ether_type;
+ u8 ip_proto;
+ __be32 loc_host[4];
+ __be32 rem_host[4];
+ __be16 loc_port;
+ __be16 rem_port;
+ /* total 64 bytes */
+};
+
+enum {
+ EFX_FILTER_RSS_CONTEXT_DEFAULT = 0xffffffff,
+ EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
};
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
@@ -99,39 +154,116 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
enum efx_filter_flags flags,
unsigned rxq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = priority;
spec->flags = EFX_FILTER_FLAG_RX | flags;
+ spec->rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
spec->dmaq_id = rxq_id;
}
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
unsigned txq_id)
{
- spec->type = EFX_FILTER_UNSPEC;
+ memset(spec, 0, sizeof(*spec));
spec->priority = EFX_FILTER_PRI_REQUIRED;
spec->flags = EFX_FILTER_FLAG_TX;
spec->dmaq_id = txq_id;
}
-extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port);
-extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port);
-extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
- __be32 host, __be16 port,
- __be32 rhost, __be16 rport);
-extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
- u8 *proto, __be32 *host, __be16 *port,
- __be32 *rhost, __be16 *rport);
-extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
- u16 vid, const u8 *addr);
-extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
- u16 *vid, u8 *addr);
-extern int efx_filter_set_uc_def(struct efx_filter_spec *spec);
-extern int efx_filter_set_mc_def(struct efx_filter_spec *spec);
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+ __be32 host, __be16 port)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = host;
+ spec->loc_port = port;
+ return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @lhost: Local host address (network byte order)
+ * @lport: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+static inline int
+efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+ __be32 lhost, __be16 lport,
+ __be32 rhost, __be16 rport)
+{
+ spec->match_flags |=
+ EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
+ EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
+ EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
+ spec->ether_type = htons(ETH_P_IP);
+ spec->ip_proto = proto;
+ spec->loc_host[0] = lhost;
+ spec->loc_port = lport;
+ spec->rem_host[0] = rhost;
+ spec->rem_port = rport;
+ return 0;
+}
+
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and/or VID
+ * @spec: Specification to initialise
+ * @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address, or %NULL
+ */
+static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+ u16 vid, const u8 *addr)
+{
+ if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
+ return -EINVAL;
+
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
+ spec->outer_vid = htons(vid);
+ }
+ if (addr != NULL) {
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
+ memcpy(spec->loc_mac, addr, ETH_ALEN);
+ }
+ return 0;
+}
+
+/**
+ * efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ return 0;
+}
+
+/**
+ * efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
+ * @spec: Specification to initialise
+ */
+static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
+ spec->loc_mac[0] = 1;
+ return 0;
+}
+
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 96759aee1c6..96ce507d860 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -20,7 +20,7 @@
*
**************************************************************************
*
- * Notes on locking strategy:
+ * Notes on locking strategy for the Falcon architecture:
*
* Many CSRs are very wide and cannot be read or written atomically.
* Writes from the host are buffered by the Bus Interface Unit (BIU)
@@ -54,6 +54,12 @@
* register while the collector already holds values for some other
* register, the write is discarded and the collector maintains its
* current state.
+ *
+ * The EF10 architecture exposes very few registers to the host and
+ * most of them are only 32 bits wide. The only exceptions are the MC
+ * doorbell register pair, which has its own latching, and
+ * TX_DESC_UPD, which works in a similar way to the Falcon
+ * architecture.
*/
#if BITS_PER_LONG == 64
@@ -83,7 +89,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
}
/* Write a normal 128-bit CSR, locking as appropriate. */
-static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
+static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
unsigned int reg)
{
unsigned long flags __attribute__ ((unused));
@@ -108,7 +114,7 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
- efx_qword_t *value, unsigned int index)
+ const efx_qword_t *value, unsigned int index)
{
unsigned int addr = index * sizeof(*value);
unsigned long flags __attribute__ ((unused));
@@ -129,7 +135,7 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
}
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
-static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
+static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
unsigned int reg)
{
netif_vdbg(efx, hw, efx->net_dev,
@@ -190,8 +196,9 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
}
/* Write a 128-bit CSR forming part of a table */
-static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
- unsigned int reg, unsigned int index)
+static inline void
+efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
+ unsigned int reg, unsigned int index)
{
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
}
@@ -203,12 +210,12 @@ static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
}
-/* Page-mapped register block size */
-#define EFX_PAGE_BLOCK_SIZE 0x2000
+/* Page size used as step between per-VI registers */
+#define EFX_VI_PAGE_SIZE 0x2000
-/* Calculate offset to page-mapped register block */
+/* Calculate offset to page-mapped register */
#define EFX_PAGED_REG(page, reg) \
- ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
+ ((page) * EFX_VI_PAGE_SIZE + (reg))
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
@@ -236,19 +243,24 @@ static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
page)
-/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
- * RX_DESC_UPD or TX_DESC_UPD)
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
+ * high bits of RX_DESC_UPD or TX_DESC_UPD)
*/
-static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
- unsigned int reg, unsigned int page)
+static inline void
+_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
+ unsigned int reg, unsigned int page)
{
efx_writed(efx, value, EFX_PAGED_REG(page, reg));
}
#define efx_writed_page(efx, value, reg, page) \
_efx_writed_page(efx, value, \
reg + \
- BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
- && (reg) != 0xa1c), \
+ BUILD_BUG_ON_ZERO((reg) != 0x400 && \
+ (reg) != 0x420 && \
+ (reg) != 0x830 && \
+ (reg) != 0x83c && \
+ (reg) != 0xa18 && \
+ (reg) != 0xa1c), \
page)
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
@@ -256,7 +268,7 @@ static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
* collector register.
*/
static inline void _efx_writed_page_locked(struct efx_nic *efx,
- efx_dword_t *value,
+ const efx_dword_t *value,
unsigned int reg,
unsigned int page)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 97dd8f18c00..128d7cdf9eb 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -8,10 +8,11 @@
*/
#include <linux/delay.h>
+#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "mcdi_pcol.h"
#include "phy.h"
@@ -24,112 +25,235 @@
#define MCDI_RPC_TIMEOUT (10 * HZ)
-#define MCDI_PDU(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
-#define MCDI_DOORBELL(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
-#define MCDI_STATUS(efx) \
- (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
-
/* A reboot/assertion causes the MCDI status word to be set after the
* command word is set or a REBOOT event is sent. If we notice a reboot
- * via these mechanisms then wait 10ms for the status word to be set. */
+ * via these mechanisms then wait 20ms for the status word to be set.
+ */
#define MCDI_STATUS_DELAY_US 100
-#define MCDI_STATUS_DELAY_COUNT 100
+#define MCDI_STATUS_DELAY_COUNT 200
#define MCDI_STATUS_SLEEP_MS \
(MCDI_STATUS_DELAY_US * MCDI_STATUS_DELAY_COUNT / 1000)
#define SEQ_MASK \
EFX_MASK32(EFX_WIDTH(MCDI_HEADER_SEQ))
+struct efx_mcdi_async_param {
+ struct list_head list;
+ unsigned int cmd;
+ size_t inlen;
+ size_t outlen;
+ efx_mcdi_async_completer *complete;
+ unsigned long cookie;
+ /* followed by request/response buffer */
+};
+
+static void efx_mcdi_timeout_async(unsigned long context);
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached_out);
+
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->mcdi;
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->iface;
}
-void efx_mcdi_init(struct efx_nic *efx)
+int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
+ bool already_attached;
+ int rc;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return;
+ efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
+ if (!efx->mcdi)
+ return -ENOMEM;
mcdi = efx_mcdi(efx);
+ mcdi->efx = efx;
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ mcdi->state = MCDI_STATE_QUIESCENT;
mcdi->mode = MCDI_MODE_POLL;
+ spin_lock_init(&mcdi->async_lock);
+ INIT_LIST_HEAD(&mcdi->async_list);
+ setup_timer(&mcdi->async_timer, efx_mcdi_timeout_async,
+ (unsigned long)mcdi);
(void) efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
+
+ /* Recover from a failed assertion before probing */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ /* Let the MC (and BMC, if this is a LOM) know that the driver
+ * is loaded. We should do this before we reset the NIC.
+ */
+ rc = efx_mcdi_drv_attach(efx, true, &already_attached);
+ if (rc) {
+ netif_err(efx, probe, efx->net_dev,
+ "Unable to register driver with MCPU\n");
+ return rc;
+ }
+ if (already_attached)
+ /* Not a fatal error */
+ netif_err(efx, probe, efx->net_dev,
+ "Host already registered with MCPU\n");
+
+ return 0;
}
-static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen)
+void efx_mcdi_fini(struct efx_nic *efx)
+{
+ if (!efx->mcdi)
+ return;
+
+ BUG_ON(efx->mcdi->iface.state != MCDI_STATE_QUIESCENT);
+
+ /* Relinquish the device (back to the BMC, if this is a LOM) */
+ efx_mcdi_drv_attach(efx, false, NULL);
+
+ kfree(efx->mcdi);
+}
+
+static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
- unsigned int i;
- efx_dword_t hdr;
+ efx_dword_t hdr[2];
+ size_t hdr_len;
u32 xflags, seqno;
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(inlen & 3 || inlen >= MC_SMEM_PDU_LEN);
+ BUG_ON(mcdi->state == MCDI_STATE_QUIESCENT);
+
+ /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
+ spin_lock_bh(&mcdi->iface_lock);
+ ++mcdi->seqno;
+ spin_unlock_bh(&mcdi->iface_lock);
seqno = mcdi->seqno & SEQ_MASK;
xflags = 0;
if (mcdi->mode == MCDI_MODE_EVENTS)
xflags |= MCDI_HEADER_XFLAGS_EVREQ;
- EFX_POPULATE_DWORD_6(hdr,
- MCDI_HEADER_RESPONSE, 0,
- MCDI_HEADER_RESYNC, 1,
- MCDI_HEADER_CODE, cmd,
- MCDI_HEADER_DATALEN, inlen,
- MCDI_HEADER_SEQ, seqno,
- MCDI_HEADER_XFLAGS, xflags);
-
- efx_writed(efx, &hdr, pdu);
+ if (efx->type->mcdi_max_ver == 1) {
+ /* MCDI v1 */
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, cmd,
+ MCDI_HEADER_DATALEN, inlen,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ hdr_len = 4;
+ } else {
+ /* MCDI v2 */
+ BUG_ON(inlen > MCDI_CTL_SDU_LEN_MAX_V2);
+ EFX_POPULATE_DWORD_7(hdr[0],
+ MCDI_HEADER_RESPONSE, 0,
+ MCDI_HEADER_RESYNC, 1,
+ MCDI_HEADER_CODE, MC_CMD_V2_EXTN,
+ MCDI_HEADER_DATALEN, 0,
+ MCDI_HEADER_SEQ, seqno,
+ MCDI_HEADER_XFLAGS, xflags,
+ MCDI_HEADER_NOT_EPOCH, !mcdi->new_epoch);
+ EFX_POPULATE_DWORD_2(hdr[1],
+ MC_CMD_V2_EXTN_IN_EXTENDED_CMD, cmd,
+ MC_CMD_V2_EXTN_IN_ACTUAL_LEN, inlen);
+ hdr_len = 8;
+ }
- for (i = 0; i < inlen; i += 4)
- _efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
+ efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
- /* Ensure the payload is written out before the header */
- wmb();
+ mcdi->new_epoch = false;
+}
- /* ring the doorbell with a distinctive value */
- _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+static int efx_mcdi_errno(unsigned int mcdi_err)
+{
+ switch (mcdi_err) {
+ case 0:
+ return 0;
+#define TRANSLATE_ERROR(name) \
+ case MC_CMD_ERR_ ## name: \
+ return -name;
+ TRANSLATE_ERROR(EPERM);
+ TRANSLATE_ERROR(ENOENT);
+ TRANSLATE_ERROR(EINTR);
+ TRANSLATE_ERROR(EAGAIN);
+ TRANSLATE_ERROR(EACCES);
+ TRANSLATE_ERROR(EBUSY);
+ TRANSLATE_ERROR(EINVAL);
+ TRANSLATE_ERROR(EDEADLK);
+ TRANSLATE_ERROR(ENOSYS);
+ TRANSLATE_ERROR(ETIME);
+ TRANSLATE_ERROR(EALREADY);
+ TRANSLATE_ERROR(ENOSPC);
+#undef TRANSLATE_ERROR
+ case MC_CMD_ERR_ALLOC_FAIL:
+ return -ENOBUFS;
+ case MC_CMD_ERR_MAC_EXIST:
+ return -EADDRINUSE;
+ default:
+ return -EPROTO;
+ }
}
-static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
+static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- int i;
+ unsigned int respseq, respcmd, error;
+ efx_dword_t hdr;
+
+ efx->type->mcdi_read_response(efx, &hdr, 0, 4);
+ respseq = EFX_DWORD_FIELD(hdr, MCDI_HEADER_SEQ);
+ respcmd = EFX_DWORD_FIELD(hdr, MCDI_HEADER_CODE);
+ error = EFX_DWORD_FIELD(hdr, MCDI_HEADER_ERROR);
- BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
- BUG_ON(outlen & 3 || outlen >= MC_SMEM_PDU_LEN);
+ if (respcmd != MC_CMD_V2_EXTN) {
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = EFX_DWORD_FIELD(hdr, MCDI_HEADER_DATALEN);
+ } else {
+ efx->type->mcdi_read_response(efx, &hdr, 4, 4);
+ mcdi->resp_hdr_len = 8;
+ mcdi->resp_data_len =
+ EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
+ }
- for (i = 0; i < outlen; i += 4)
- *((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
+ if (error && mcdi->resp_data_len == 0) {
+ netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
+ mcdi->resprc = -EIO;
+ } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
+ respseq, mcdi->seqno);
+ mcdi->resprc = -EIO;
+ } else if (error) {
+ efx->type->mcdi_read_response(efx, &hdr, mcdi->resp_hdr_len, 4);
+ mcdi->resprc =
+ efx_mcdi_errno(EFX_DWORD_FIELD(hdr, EFX_DWORD_0));
+ } else {
+ mcdi->resprc = 0;
+ }
}
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned long time, finish;
- unsigned int respseq, respcmd, error;
- unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
- unsigned int rc, spins;
- efx_dword_t reg;
+ unsigned int spins;
+ int rc;
/* Check for a reboot atomically with respect to efx_mcdi_copyout() */
- rc = -efx_mcdi_poll_reboot(efx);
- if (rc)
- goto out;
+ rc = efx_mcdi_poll_reboot(efx);
+ if (rc) {
+ spin_lock_bh(&mcdi->iface_lock);
+ mcdi->resprc = rc;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
+ spin_unlock_bh(&mcdi->iface_lock);
+ return 0;
+ }
/* Poll for completion. Poll quickly (once a us) for the 1st jiffy,
* because generally mcdi responses are fast. After that, back off
@@ -149,59 +273,16 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
rmb();
- efx_readd(efx, &reg, pdu);
-
- /* All 1's indicates that shared memory is in reset (and is
- * not a valid header). Wait for it to come out reset before
- * completing the command */
- if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) != 0xffffffff &&
- EFX_DWORD_FIELD(reg, MCDI_HEADER_RESPONSE))
+ if (efx->type->mcdi_poll_response(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- mcdi->resplen = EFX_DWORD_FIELD(reg, MCDI_HEADER_DATALEN);
- respseq = EFX_DWORD_FIELD(reg, MCDI_HEADER_SEQ);
- respcmd = EFX_DWORD_FIELD(reg, MCDI_HEADER_CODE);
- error = EFX_DWORD_FIELD(reg, MCDI_HEADER_ERROR);
-
- if (error && mcdi->resplen == 0) {
- netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
- rc = EIO;
- } else if ((respseq ^ mcdi->seqno) & SEQ_MASK) {
- netif_err(efx, hw, efx->net_dev,
- "MC response mismatch tx seq 0x%x rx seq 0x%x\n",
- respseq, mcdi->seqno);
- rc = EIO;
- } else if (error) {
- efx_readd(efx, &reg, pdu + 4);
- switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
-#define TRANSLATE_ERROR(name) \
- case MC_CMD_ERR_ ## name: \
- rc = name; \
- break
- TRANSLATE_ERROR(ENOENT);
- TRANSLATE_ERROR(EINTR);
- TRANSLATE_ERROR(EACCES);
- TRANSLATE_ERROR(EBUSY);
- TRANSLATE_ERROR(EINVAL);
- TRANSLATE_ERROR(EDEADLK);
- TRANSLATE_ERROR(ENOSYS);
- TRANSLATE_ERROR(ETIME);
-#undef TRANSLATE_ERROR
- default:
- rc = EIO;
- break;
- }
- } else
- rc = 0;
-
-out:
- mcdi->resprc = rc;
- if (rc)
- mcdi->resplen = 0;
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
/* Return rc=0 like wait_event_timeout() */
return 0;
@@ -212,52 +293,36 @@ out:
*/
int efx_mcdi_poll_reboot(struct efx_nic *efx)
{
- unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
- efx_dword_t reg;
- uint32_t value;
-
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
- return false;
-
- efx_readd(efx, &reg, addr);
- value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
-
- if (value == 0)
+ if (!efx->mcdi)
return 0;
- /* MAC statistics have been cleared on the NIC; clear our copy
- * so that efx_update_diff_stat() can continue to work.
- */
- memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
-
- EFX_ZERO_DWORD(reg);
- efx_writed(efx, &reg, addr);
+ return efx->type->mcdi_poll_reboot(efx);
+}
- if (value == MC_STATUS_DWORD_ASSERT)
- return -EINTR;
- else
- return -EIO;
+static bool efx_mcdi_acquire_async(struct efx_mcdi_iface *mcdi)
+{
+ return cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_ASYNC) ==
+ MCDI_STATE_QUIESCENT;
}
-static void efx_mcdi_acquire(struct efx_mcdi_iface *mcdi)
+static void efx_mcdi_acquire_sync(struct efx_mcdi_iface *mcdi)
{
/* Wait until the interface becomes QUIESCENT and we win the race
- * to mark it RUNNING. */
+ * to mark it RUNNING_SYNC.
+ */
wait_event(mcdi->wq,
- atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING)
- == MCDI_STATE_QUIESCENT);
+ cmpxchg(&mcdi->state,
+ MCDI_STATE_QUIESCENT, MCDI_STATE_RUNNING_SYNC) ==
+ MCDI_STATE_QUIESCENT);
}
static int efx_mcdi_await_completion(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
- if (wait_event_timeout(
- mcdi->wq,
- atomic_read(&mcdi->state) == MCDI_STATE_COMPLETED,
- MCDI_RPC_TIMEOUT) == 0)
+ if (wait_event_timeout(mcdi->wq, mcdi->state == MCDI_STATE_COMPLETED,
+ MCDI_RPC_TIMEOUT) == 0)
return -ETIMEDOUT;
/* Check if efx_mcdi_set_mode() switched us back to polled completions.
@@ -274,17 +339,14 @@ static int efx_mcdi_await_completion(struct efx_nic *efx)
return 0;
}
-static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
+/* If the interface is RUNNING_SYNC, switch to COMPLETED and wake the
+ * requester. Return whether this was done. Does not take any locks.
+ */
+static bool efx_mcdi_complete_sync(struct efx_mcdi_iface *mcdi)
{
- /* If the interface is RUNNING, then move to COMPLETED and wake any
- * waiters. If the interface isn't in RUNNING then we've received a
- * duplicate completion after we've already transitioned back to
- * QUIESCENT. [A subsequent invocation would increment seqno, so would
- * have failed the seqno check].
- */
- if (atomic_cmpxchg(&mcdi->state,
- MCDI_STATE_RUNNING,
- MCDI_STATE_COMPLETED) == MCDI_STATE_RUNNING) {
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_SYNC, MCDI_STATE_COMPLETED) ==
+ MCDI_STATE_RUNNING_SYNC) {
wake_up(&mcdi->wq);
return true;
}
@@ -294,12 +356,93 @@ static bool efx_mcdi_complete(struct efx_mcdi_iface *mcdi)
static void efx_mcdi_release(struct efx_mcdi_iface *mcdi)
{
- atomic_set(&mcdi->state, MCDI_STATE_QUIESCENT);
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ struct efx_mcdi_async_param *async;
+ struct efx_nic *efx = mcdi->efx;
+
+ /* Process the asynchronous request queue */
+ spin_lock_bh(&mcdi->async_lock);
+ async = list_first_entry_or_null(
+ &mcdi->async_list, struct efx_mcdi_async_param, list);
+ if (async) {
+ mcdi->state = MCDI_STATE_RUNNING_ASYNC;
+ efx_mcdi_send_request(efx, async->cmd,
+ (const efx_dword_t *)(async + 1),
+ async->inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ spin_unlock_bh(&mcdi->async_lock);
+
+ if (async)
+ return;
+ }
+
+ mcdi->state = MCDI_STATE_QUIESCENT;
wake_up(&mcdi->wq);
}
+/* If the interface is RUNNING_ASYNC, switch to COMPLETED, call the
+ * asynchronous completion function, and release the interface.
+ * Return whether this was done. Must be called in bh-disabled
+ * context. Will take iface_lock and async_lock.
+ */
+static bool efx_mcdi_complete_async(struct efx_mcdi_iface *mcdi, bool timeout)
+{
+ struct efx_nic *efx = mcdi->efx;
+ struct efx_mcdi_async_param *async;
+ size_t hdr_len, data_len;
+ efx_dword_t *outbuf;
+ int rc;
+
+ if (cmpxchg(&mcdi->state,
+ MCDI_STATE_RUNNING_ASYNC, MCDI_STATE_COMPLETED) !=
+ MCDI_STATE_RUNNING_ASYNC)
+ return false;
+
+ spin_lock(&mcdi->iface_lock);
+ if (timeout) {
+ /* Ensure that if the completion event arrives later,
+ * the seqno check in efx_mcdi_ev_cpl() will fail
+ */
+ ++mcdi->seqno;
+ ++mcdi->credits;
+ rc = -ETIMEDOUT;
+ hdr_len = 0;
+ data_len = 0;
+ } else {
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
+ }
+ spin_unlock(&mcdi->iface_lock);
+
+ /* Stop the timer. In case the timer function is running, we
+ * must wait for it to return so that there is no possibility
+ * of it aborting the next request.
+ */
+ if (!timeout)
+ del_timer_sync(&mcdi->async_timer);
+
+ spin_lock(&mcdi->async_lock);
+ async = list_first_entry(&mcdi->async_list,
+ struct efx_mcdi_async_param, list);
+ list_del(&async->list);
+ spin_unlock(&mcdi->async_lock);
+
+ outbuf = (efx_dword_t *)(async + 1);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(async->outlen, data_len));
+ async->complete(efx, async->cookie, rc, outbuf, data_len);
+ kfree(async);
+
+ efx_mcdi_release(mcdi);
+
+ return true;
+}
+
static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
- unsigned int datalen, unsigned int errno)
+ unsigned int datalen, unsigned int mcdi_err)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
bool wake = false;
@@ -315,52 +458,161 @@ static void efx_mcdi_ev_cpl(struct efx_nic *efx, unsigned int seqno,
"MC response mismatch tx seq 0x%x rx "
"seq 0x%x\n", seqno, mcdi->seqno);
} else {
- mcdi->resprc = errno;
- mcdi->resplen = datalen;
+ if (efx->type->mcdi_max_ver >= 2) {
+ /* MCDI v2 responses don't fit in an event */
+ efx_mcdi_read_response_header(efx);
+ } else {
+ mcdi->resprc = efx_mcdi_errno(mcdi_err);
+ mcdi->resp_hdr_len = 4;
+ mcdi->resp_data_len = datalen;
+ }
wake = true;
}
spin_unlock(&mcdi->iface_lock);
- if (wake)
- efx_mcdi_complete(mcdi);
+ if (wake) {
+ if (!efx_mcdi_complete_async(mcdi, false))
+ (void) efx_mcdi_complete_sync(mcdi);
+
+ /* If the interface isn't RUNNING_ASYNC or
+ * RUNNING_SYNC then we've received a duplicate
+ * completion after we've already transitioned back to
+ * QUIESCENT. [A subsequent invocation would increment
+ * seqno, so would have failed the seqno check].
+ */
+ }
+}
+
+static void efx_mcdi_timeout_async(unsigned long context)
+{
+ struct efx_mcdi_iface *mcdi = (struct efx_mcdi_iface *)context;
+
+ efx_mcdi_complete_async(mcdi, true);
+}
+
+static int
+efx_mcdi_check_supported(struct efx_nic *efx, unsigned int cmd, size_t inlen)
+{
+ if (efx->type->mcdi_max_ver < 0 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ cmd > MC_CMD_CMD_SPACE_ESCAPE_7))
+ return -EINVAL;
+
+ if (inlen > MCDI_CTL_SDU_LEN_MAX_V2 ||
+ (efx->type->mcdi_max_ver < 2 &&
+ inlen > MCDI_CTL_SDU_LEN_MAX_V1))
+ return -EMSGSIZE;
+
+ return 0;
}
int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen, u8 *outbuf, size_t outlen,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual)
{
- efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ int rc;
+
+ rc = efx_mcdi_rpc_start(efx, cmd, inbuf, inlen);
+ if (rc)
+ return rc;
return efx_mcdi_rpc_finish(efx, cmd, inlen,
outbuf, outlen, outlen_actual);
}
-void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen)
+int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ return 0;
+}
- /* Serialise with efx_mcdi_ev_cpl() and efx_mcdi_ev_death() */
- spin_lock_bh(&mcdi->iface_lock);
- ++mcdi->seqno;
- spin_unlock_bh(&mcdi->iface_lock);
+/**
+ * efx_mcdi_rpc_async - Schedule an MCDI command to run asynchronously
+ * @efx: NIC through which to issue the command
+ * @cmd: Command type number
+ * @inbuf: Command parameters
+ * @inlen: Length of command parameters, in bytes
+ * @outlen: Length to allocate for response buffer, in bytes
+ * @complete: Function to be called on completion or cancellation.
+ * @cookie: Arbitrary value to be passed to @complete.
+ *
+ * This function does not sleep and therefore may be called in atomic
+ * context. It will fail if event queues are disabled or if MCDI
+ * event completions have been disabled due to an error.
+ *
+ * If it succeeds, the @complete function will be called exactly once
+ * in atomic context, when one of the following occurs:
+ * (a) the completion event is received (in NAPI context)
+ * (b) event queues are disabled (in the process that disables them)
+ * (c) the request times-out (in timer context)
+ */
+int
+efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen, size_t outlen,
+ efx_mcdi_async_completer *complete, unsigned long cookie)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ struct efx_mcdi_async_param *async;
+ int rc;
+
+ rc = efx_mcdi_check_supported(efx, cmd, inlen);
+ if (rc)
+ return rc;
+
+ async = kmalloc(sizeof(*async) + ALIGN(max(inlen, outlen), 4),
+ GFP_ATOMIC);
+ if (!async)
+ return -ENOMEM;
+
+ async->cmd = cmd;
+ async->inlen = inlen;
+ async->outlen = outlen;
+ async->complete = complete;
+ async->cookie = cookie;
+ memcpy(async + 1, inbuf, inlen);
+
+ spin_lock_bh(&mcdi->async_lock);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS) {
+ list_add_tail(&async->list, &mcdi->async_list);
+
+ /* If this is at the front of the queue, try to start it
+ * immediately
+ */
+ if (mcdi->async_list.next == &async->list &&
+ efx_mcdi_acquire_async(mcdi)) {
+ efx_mcdi_send_request(efx, cmd, inbuf, inlen);
+ mod_timer(&mcdi->async_timer,
+ jiffies + MCDI_RPC_TIMEOUT);
+ }
+ } else {
+ kfree(async);
+ rc = -ENETDOWN;
+ }
+
+ spin_unlock_bh(&mcdi->async_lock);
- efx_mcdi_copyin(efx, cmd, inbuf, inlen);
+ return rc;
}
int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen, size_t *outlen_actual)
+ efx_dword_t *outbuf, size_t outlen,
+ size_t *outlen_actual)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
int rc;
- BUG_ON(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
-
if (mcdi->mode == MCDI_MODE_POLL)
rc = efx_mcdi_poll(efx);
else
@@ -380,22 +632,25 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
"MC command 0x%x inlen %d mode %d timed out\n",
cmd, (int)inlen, mcdi->mode);
} else {
- size_t resplen;
+ size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure
* we pick up changes from efx_mcdi_ev_cpl(). Protect against
* a spurious efx_mcdi_ev_cpl() running concurrently by
* acquiring the iface_lock. */
spin_lock_bh(&mcdi->iface_lock);
- rc = -mcdi->resprc;
- resplen = mcdi->resplen;
+ rc = mcdi->resprc;
+ hdr_len = mcdi->resp_hdr_len;
+ data_len = mcdi->resp_data_len;
spin_unlock_bh(&mcdi->iface_lock);
+ BUG_ON(rc > 0);
+
if (rc == 0) {
- efx_mcdi_copyout(efx, outbuf,
- min(outlen, mcdi->resplen + 3) & ~0x3);
+ efx->type->mcdi_read_response(efx, outbuf, hdr_len,
+ min(outlen, data_len));
if (outlen_actual != NULL)
- *outlen_actual = resplen;
+ *outlen_actual = data_len;
} else if (cmd == MC_CMD_REBOOT && rc == -EIO)
; /* Don't reset if MC_CMD_REBOOT returns EIO */
else if (rc == -EIO || rc == -EINTR) {
@@ -410,6 +665,7 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
if (rc == -EIO || rc == -EINTR) {
msleep(MCDI_STATUS_SLEEP_MS);
efx_mcdi_poll_reboot(efx);
+ mcdi->new_epoch = true;
}
}
@@ -417,11 +673,15 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
return rc;
}
+/* Switch to polled MCDI completions. This can be called in various
+ * error conditions with various locks held, so it must be lockless.
+ * Caller is responsible for flushing asynchronous requests later.
+ */
void efx_mcdi_mode_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -434,18 +694,57 @@ void efx_mcdi_mode_poll(struct efx_nic *efx)
* efx_mcdi_await_completion() will then call efx_mcdi_poll().
*
* We need an smp_wmb() to synchronise with efx_mcdi_await_completion(),
- * which efx_mcdi_complete() provides for us.
+ * which efx_mcdi_complete_sync() provides for us.
*/
mcdi->mode = MCDI_MODE_POLL;
- efx_mcdi_complete(mcdi);
+ efx_mcdi_complete_sync(mcdi);
+}
+
+/* Flush any running or queued asynchronous requests, after event processing
+ * is stopped
+ */
+void efx_mcdi_flush_async(struct efx_nic *efx)
+{
+ struct efx_mcdi_async_param *async, *next;
+ struct efx_mcdi_iface *mcdi;
+
+ if (!efx->mcdi)
+ return;
+
+ mcdi = efx_mcdi(efx);
+
+ /* We must be in polling mode so no more requests can be queued */
+ BUG_ON(mcdi->mode != MCDI_MODE_POLL);
+
+ del_timer_sync(&mcdi->async_timer);
+
+ /* If a request is still running, make sure we give the MC
+ * time to complete it so that the response won't overwrite our
+ * next request.
+ */
+ if (mcdi->state == MCDI_STATE_RUNNING_ASYNC) {
+ efx_mcdi_poll(efx);
+ mcdi->state = MCDI_STATE_QUIESCENT;
+ }
+
+ /* Nothing else will access the async list now, so it is safe
+ * to walk it without holding async_lock. If we hold it while
+ * calling a completer then lockdep may warn that we have
+ * acquired locks in the wrong order.
+ */
+ list_for_each_entry_safe(async, next, &mcdi->async_list, list) {
+ async->complete(efx, async->cookie, -ENETDOWN, NULL, 0);
+ list_del(&async->list);
+ kfree(async);
+ }
}
void efx_mcdi_mode_event(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
- if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
+ if (!efx->mcdi)
return;
mcdi = efx_mcdi(efx);
@@ -460,7 +759,7 @@ void efx_mcdi_mode_event(struct efx_nic *efx)
* write memory barrier ensure that efx_mcdi_rpc() sees it, which
* efx_mcdi_acquire() provides.
*/
- efx_mcdi_acquire(mcdi);
+ efx_mcdi_acquire_sync(mcdi);
mcdi->mode = MCDI_MODE_EVENTS;
efx_mcdi_release(mcdi);
}
@@ -477,19 +776,25 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
* are sent to the same queue, we can't be racing with
* efx_mcdi_ev_cpl()]
*
- * There's a race here with efx_mcdi_rpc(), because we might receive
- * a REBOOT event *before* the request has been copied out. In polled
- * mode (during startup) this is irrelevant, because efx_mcdi_complete()
- * is ignored. In event mode, this condition is just an edge-case of
- * receiving a REBOOT event after posting the MCDI request. Did the mc
- * reboot before or after the copyout? The best we can do always is
- * just return failure.
+ * If there is an outstanding asynchronous request, we can't
+ * complete it now (efx_mcdi_complete() would deadlock). The
+ * reset process will take care of this.
+ *
+ * There's a race here with efx_mcdi_send_request(), because
+ * we might receive a REBOOT event *before* the request has
+ * been copied out. In polled mode (during startup) this is
+ * irrelevant, because efx_mcdi_complete_sync() is ignored. In
+ * event mode, this condition is just an edge-case of
+ * receiving a REBOOT event after posting the MCDI
+ * request. Did the mc reboot before or after the copyout? The
+ * best we can do always is just return failure.
*/
spin_lock(&mcdi->iface_lock);
- if (efx_mcdi_complete(mcdi)) {
+ if (efx_mcdi_complete_sync(mcdi)) {
if (mcdi->mode == MCDI_MODE_EVENTS) {
mcdi->resprc = rc;
- mcdi->resplen = 0;
+ mcdi->resp_hdr_len = 0;
+ mcdi->resp_data_len = 0;
++mcdi->credits;
}
} else {
@@ -504,41 +809,12 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
break;
udelay(MCDI_STATUS_DELAY_US);
}
+ mcdi->new_epoch = true;
}
spin_unlock(&mcdi->iface_lock);
}
-static unsigned int efx_mcdi_event_link_speed[] = {
- [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
- [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
- [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
-};
-
-
-static void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
-{
- u32 flags, fcntl, speed, lpa;
-
- speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
- EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
- speed = efx_mcdi_event_link_speed[speed];
-
- flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
- fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
- lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
-
- /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
- * which is only run after flushing the event queues. Therefore, it
- * is safe to modify the link state outside of the mac_lock here.
- */
- efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
-
- efx_mcdi_phy_check_fcntl(efx, lpa);
-
- efx_link_status_changed(efx);
-}
-
/* Called from falcon_process_eventq for MCDI events */
void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event)
@@ -551,7 +827,7 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_BADSSERT:
netif_err(efx, hw, efx->net_dev,
"MC watchdog or assertion failure at 0x%x\n", data);
- efx_mcdi_ev_death(efx, EINTR);
+ efx_mcdi_ev_death(efx, -EINTR);
break;
case MCDI_EVENT_CODE_PMNOTICE:
@@ -576,8 +852,9 @@ void efx_mcdi_process_event(struct efx_channel *channel,
"MC Scheduler error address=0x%x\n", data);
break;
case MCDI_EVENT_CODE_REBOOT:
+ case MCDI_EVENT_CODE_MC_REBOOT:
netif_info(efx, hw, efx->net_dev, "MC Reboot\n");
- efx_mcdi_ev_death(efx, EIO);
+ efx_mcdi_ev_death(efx, -EIO);
break;
case MCDI_EVENT_CODE_MAC_STATS_DMA:
/* MAC stats are gather lazily. We can ignore this. */
@@ -590,7 +867,27 @@ void efx_mcdi_process_event(struct efx_channel *channel,
case MCDI_EVENT_CODE_PTP_PPS:
efx_ptp_event(efx, event);
break;
-
+ case MCDI_EVENT_CODE_TX_FLUSH:
+ case MCDI_EVENT_CODE_RX_FLUSH:
+ /* Two flush events will be sent: one to the same event
+ * queue as completions, and one to event queue 0.
+ * In the latter case the {RX,TX}_FLUSH_TO_DRIVER
+ * flag will be set, and we should ignore the event
+ * because we want to wait for all completions.
+ */
+ BUILD_BUG_ON(MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN !=
+ MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN);
+ if (!MCDI_EVENT_FIELD(*event, TX_FLUSH_TO_DRIVER))
+ efx_ef10_handle_drain_event(efx);
+ break;
+ case MCDI_EVENT_CODE_TX_ERR:
+ case MCDI_EVENT_CODE_RX_ERR:
+ netif_err(efx, hw, efx->net_dev,
+ "%s DMA error (event: "EFX_QWORD_FMT")\n",
+ code == MCDI_EVENT_CODE_TX_ERR ? "TX" : "RX",
+ EFX_QWORD_VAL(*event));
+ efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
+ break;
default:
netif_err(efx, hw, efx->net_dev, "Unknown MCDI event 0x%x\n",
code);
@@ -606,27 +903,55 @@ void efx_mcdi_process_event(struct efx_channel *channel,
void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len)
{
- u8 outbuf[ALIGN(MC_CMD_GET_VERSION_OUT_LEN, 4)];
+ MCDI_DECLARE_BUF(outbuf,
+ max(MC_CMD_GET_VERSION_OUT_LEN,
+ MC_CMD_GET_CAPABILITIES_OUT_LEN));
size_t outlength;
const __le16 *ver_words;
+ size_t offset;
int rc;
BUILD_BUG_ON(MC_CMD_GET_VERSION_IN_LEN != 0);
-
rc = efx_mcdi_rpc(efx, MC_CMD_GET_VERSION, NULL, 0,
outbuf, sizeof(outbuf), &outlength);
if (rc)
goto fail;
-
if (outlength < MC_CMD_GET_VERSION_OUT_LEN) {
rc = -EIO;
goto fail;
}
ver_words = (__le16 *)MCDI_PTR(outbuf, GET_VERSION_OUT_VERSION);
- snprintf(buf, len, "%u.%u.%u.%u",
- le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
- le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+ offset = snprintf(buf, len, "%u.%u.%u.%u",
+ le16_to_cpu(ver_words[0]), le16_to_cpu(ver_words[1]),
+ le16_to_cpu(ver_words[2]), le16_to_cpu(ver_words[3]));
+
+ /* EF10 may have multiple datapath firmware variants within a
+ * single version. Report which variants are running.
+ */
+ if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0) {
+ BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc || outlength < MC_CMD_GET_CAPABILITIES_OUT_LEN)
+ offset += snprintf(
+ buf + offset, len - offset, " rx? tx?");
+ else
+ offset += snprintf(
+ buf + offset, len - offset, " rx%x tx%x",
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID),
+ MCDI_WORD(outbuf,
+ GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID));
+
+ /* It's theoretically possible for the string to exceed 31
+ * characters, though in practice the first three version
+ * components are short enough that this doesn't happen.
+ */
+ if (WARN_ON(offset >= len))
+ buf[0] = 0;
+ }
+
return;
fail:
@@ -634,17 +959,18 @@ fail:
buf[0] = 0;
}
-int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached)
+static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
+ bool *was_attached)
{
- u8 inbuf[MC_CMD_DRV_ATTACH_IN_LEN];
- u8 outbuf[MC_CMD_DRV_ATTACH_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_DRV_ATTACH_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_DRV_ATTACH_OUT_LEN);
size_t outlen;
int rc;
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_NEW_STATE,
driver_operating ? 1 : 0);
MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_UPDATE, 1);
+ MCDI_SET_DWORD(inbuf, DRV_ATTACH_IN_FIRMWARE_ID, MC_CMD_FW_LOW_LATENCY);
rc = efx_mcdi_rpc(efx, MC_CMD_DRV_ATTACH, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), &outlen);
@@ -667,8 +993,8 @@ fail:
int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities)
{
- uint8_t outbuf[MC_CMD_GET_BOARD_CFG_OUT_LENMAX];
- size_t outlen, offset, i;
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_BOARD_CFG_OUT_LENMAX);
+ size_t outlen, i;
int port_num = efx_port_num(efx);
int rc;
@@ -684,22 +1010,21 @@ int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
goto fail;
}
- offset = (port_num)
- ? MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST
- : MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST;
if (mac_address)
- memcpy(mac_address, outbuf + offset, ETH_ALEN);
+ memcpy(mac_address,
+ port_num ?
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1) :
+ MCDI_PTR(outbuf, GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0),
+ ETH_ALEN);
if (fw_subtype_list) {
- /* Byte-swap and truncate or zero-pad as necessary */
- offset = MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST;
for (i = 0;
- i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM;
- i++) {
- fw_subtype_list[i] =
- (offset + 2 <= outlen) ?
- le16_to_cpup((__le16 *)(outbuf + offset)) : 0;
- offset += 2;
- }
+ i < MCDI_VAR_ARRAY_LEN(outlen,
+ GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST);
+ i++)
+ fw_subtype_list[i] = MCDI_ARRAY_WORD(
+ outbuf, GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST, i);
+ for (; i < MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM; i++)
+ fw_subtype_list[i] = 0;
}
if (capabilities) {
if (port_num)
@@ -721,7 +1046,7 @@ fail:
int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart, u32 dest_evq)
{
- u8 inbuf[MC_CMD_LOG_CTRL_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_LOG_CTRL_IN_LEN);
u32 dest = 0;
int rc;
@@ -749,7 +1074,7 @@ fail:
int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out)
{
- u8 outbuf[MC_CMD_NVRAM_TYPES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TYPES_OUT_LEN);
size_t outlen;
int rc;
@@ -777,8 +1102,8 @@ int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out)
{
- u8 inbuf[MC_CMD_NVRAM_INFO_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_INFO_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_INFO_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_INFO_OUT_LEN);
size_t outlen;
int rc;
@@ -804,127 +1129,10 @@ fail:
return rc;
}
-int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_START_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_READ_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- size_t outlen;
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- goto fail;
-
- memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX)];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
- memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
- ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length)
-{
- u8 inbuf[MC_CMD_NVRAM_ERASE_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
- MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
-int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
-{
- u8 inbuf[MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN];
- int rc;
-
- MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
-
- BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
- return rc;
-}
-
static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
{
- u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
- u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_TEST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_TEST_OUT_LEN);
int rc;
MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
@@ -976,9 +1184,9 @@ fail1:
static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
- unsigned int flags, index, ofst;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_ASSERTS_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_ASSERTS_OUT_LEN);
+ unsigned int flags, index;
const char *reason;
size_t outlen;
int retry;
@@ -1020,19 +1228,20 @@ static int efx_mcdi_read_assertion(struct efx_nic *efx)
MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
- ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
- for (index = 1; index < 32; index++) {
- netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(outbuf, ofst));
- ofst += sizeof(efx_dword_t);
- }
+ for (index = 0;
+ index < MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM;
+ index++)
+ netif_err(efx, hw, efx->net_dev, "R%.2d (?): 0x%.8x\n",
+ 1 + index,
+ MCDI_ARRAY_DWORD(outbuf, GET_ASSERTS_OUT_GP_REGS_OFFS,
+ index));
return 0;
}
static void efx_mcdi_exit_assertion(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
/* If the MC is running debug firmware, it might now be
* waiting for a debugger to attach, but we just want it to
@@ -1062,7 +1271,7 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
- u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_ID_LED_IN_LEN);
int rc;
BUILD_BUG_ON(EFX_LED_OFF != MC_CMD_LED_OFF);
@@ -1080,7 +1289,7 @@ void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
__func__, rc);
}
-int efx_mcdi_reset_port(struct efx_nic *efx)
+static int efx_mcdi_reset_port(struct efx_nic *efx)
{
int rc = efx_mcdi_rpc(efx, MC_CMD_ENTITY_RESET, NULL, 0, NULL, 0, NULL);
if (rc)
@@ -1089,9 +1298,9 @@ int efx_mcdi_reset_port(struct efx_nic *efx)
return rc;
}
-int efx_mcdi_reset_mc(struct efx_nic *efx)
+static int efx_mcdi_reset_mc(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_REBOOT_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
@@ -1107,11 +1316,31 @@ int efx_mcdi_reset_mc(struct efx_nic *efx)
return rc;
}
+enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason)
+{
+ return RESET_TYPE_RECOVER_OR_ALL;
+}
+
+int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method)
+{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
+
+ if (method == RESET_TYPE_WORLD)
+ return efx_mcdi_reset_mc(efx);
+ else
+ return efx_mcdi_reset_port(efx);
+}
+
static int efx_mcdi_wol_filter_set(struct efx_nic *efx, u32 type,
const u8 *mac, int *id_out)
{
- u8 inbuf[MC_CMD_WOL_FILTER_SET_IN_LEN];
- u8 outbuf[MC_CMD_WOL_FILTER_SET_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_SET_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_SET_OUT_LEN);
size_t outlen;
int rc;
@@ -1151,7 +1380,7 @@ efx_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac, int *id_out)
int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out)
{
- u8 outbuf[MC_CMD_WOL_FILTER_GET_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_WOL_FILTER_GET_OUT_LEN);
size_t outlen;
int rc;
@@ -1178,7 +1407,7 @@ fail:
int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id)
{
- u8 inbuf[MC_CMD_WOL_FILTER_REMOVE_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WOL_FILTER_REMOVE_IN_LEN);
int rc;
MCDI_SET_DWORD(inbuf, WOL_FILTER_REMOVE_IN_FILTER_ID, (u32)id);
@@ -1199,34 +1428,31 @@ int efx_mcdi_flush_rxqs(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_rx_queue *rx_queue;
- __le32 *qid;
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(EFX_MAX_CHANNELS));
int rc, count;
BUILD_BUG_ON(EFX_MAX_CHANNELS >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- qid = kmalloc(EFX_MAX_CHANNELS * sizeof(*qid), GFP_KERNEL);
- if (qid == NULL)
- return -ENOMEM;
-
count = 0;
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel) {
if (rx_queue->flush_pending) {
rx_queue->flush_pending = false;
atomic_dec(&efx->rxq_flush_pending);
- qid[count++] = cpu_to_le32(
- efx_rx_queue_index(rx_queue));
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ count, efx_rx_queue_index(rx_queue));
+ count++;
}
}
}
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)qid,
- count * sizeof(*qid), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(count), NULL, 0, NULL);
WARN_ON(rc < 0);
- kfree(qid);
-
return rc;
}
@@ -1245,3 +1471,247 @@ fail:
return rc;
}
+int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_WORKAROUND_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_WORKAROUND_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, WORKAROUND_IN_ENABLED, enabled);
+ return efx_mcdi_rpc(efx, MC_CMD_WORKAROUND, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+}
+
+#ifdef CONFIG_SFC_MTD
+
+#define EFX_MCDI_NVRAM_LEN_MAX 128
+
+static int efx_mcdi_nvram_update_start(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_START_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_START_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_START_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_START, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
+ loff_t offset, u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf,
+ MC_CMD_NVRAM_READ_OUT_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ size_t outlen;
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_READ_IN_LENGTH, length);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_READ, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ goto fail;
+
+ memcpy(buffer, MCDI_PTR(outbuf, NVRAM_READ_OUT_READ_BUFFER), length);
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
+ loff_t offset, const u8 *buffer, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf,
+ MC_CMD_NVRAM_WRITE_IN_LEN(EFX_MCDI_NVRAM_LEN_MAX));
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_WRITE_IN_LENGTH, length);
+ memcpy(MCDI_PTR(inbuf, NVRAM_WRITE_IN_WRITE_BUFFER), buffer, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_WRITE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_WRITE, inbuf,
+ ALIGN(MC_CMD_NVRAM_WRITE_IN_LEN(length), 4),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
+ loff_t offset, size_t length)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_ERASE_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_TYPE, type);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_OFFSET, offset);
+ MCDI_SET_DWORD(inbuf, NVRAM_ERASE_IN_LENGTH, length);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_ERASE_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_ERASE, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+static int efx_mcdi_nvram_update_finish(struct efx_nic *efx, unsigned int type)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN);
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_UPDATE_FINISH_IN_TYPE, type);
+
+ BUILD_BUG_ON(MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_UPDATE_FINISH, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ return rc;
+}
+
+int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_read(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk = part->common.mtd.erasesize;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ /* The MCDI interface can in fact do multiple erase blocks at once;
+ * but erasing may be slow, so we make multiple calls here to avoid
+ * tripping the MCDI RPC timeout. */
+ while (offset < end) {
+ rc = efx_mcdi_nvram_erase(efx, part->nvram_type, offset,
+ chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ }
+out:
+ return rc;
+}
+
+int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ loff_t offset = start;
+ loff_t end = min_t(loff_t, start + len, mtd->size);
+ size_t chunk;
+ int rc = 0;
+
+ if (!part->updating) {
+ rc = efx_mcdi_nvram_update_start(efx, part->nvram_type);
+ if (rc)
+ goto out;
+ part->updating = true;
+ }
+
+ while (offset < end) {
+ chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
+ rc = efx_mcdi_nvram_write(efx, part->nvram_type, offset,
+ buffer, chunk);
+ if (rc)
+ goto out;
+ offset += chunk;
+ buffer += chunk;
+ }
+out:
+ *retlen = offset - start;
+ return rc;
+}
+
+int efx_mcdi_mtd_sync(struct mtd_info *mtd)
+{
+ struct efx_mcdi_mtd_partition *part = to_efx_mcdi_mtd_partition(mtd);
+ struct efx_nic *efx = mtd->priv;
+ int rc = 0;
+
+ if (part->updating) {
+ part->updating = false;
+ rc = efx_mcdi_nvram_update_finish(efx, part->nvram_type);
+ }
+
+ return rc;
+}
+
+void efx_mcdi_mtd_rename(struct efx_mtd_partition *part)
+{
+ struct efx_mcdi_mtd_partition *mcdi_part =
+ container_of(part, struct efx_mcdi_mtd_partition, common);
+ struct efx_nic *efx = part->mtd.priv;
+
+ snprintf(part->name, sizeof(part->name), "%s %s:%02x",
+ efx->name, part->type_name, mcdi_part->fw_subtype);
+}
+
+#endif /* CONFIG_SFC_MTD */
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 3ba2e5b5a9c..c34d0d4e10e 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2008-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2008-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -11,18 +11,20 @@
#define EFX_MCDI_H
/**
- * enum efx_mcdi_state
+ * enum efx_mcdi_state - MCDI request handling state
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
- * mcdi_lock then they are able to move to MCDI_STATE_RUNNING
- * @MCDI_STATE_RUNNING: There is an MCDI request pending. Only the thread that
- * moved into this state is allowed to move out of it.
+ * mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
+ * @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
+ * Only the thread that moved into this state is allowed to move out of it.
+ * @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
* has not yet consumed the result. For all other threads, equivalent to
- * MCDI_STATE_RUNNING.
+ * %MCDI_STATE_RUNNING.
*/
enum efx_mcdi_state {
MCDI_STATE_QUIESCENT,
- MCDI_STATE_RUNNING,
+ MCDI_STATE_RUNNING_SYNC,
+ MCDI_STATE_RUNNING_ASYNC,
MCDI_STATE_COMPLETED,
};
@@ -32,28 +34,39 @@ enum efx_mcdi_mode {
};
/**
- * struct efx_mcdi_iface
- * @state: Interface state. Waited for by mcdi_wq.
- * @wq: Wait queue for threads waiting for state != STATE_RUNNING
- * @iface_lock: Protects @credits, @seqno, @resprc, @resplen
+ * struct efx_mcdi_iface - MCDI protocol context
+ * @efx: The associated NIC.
+ * @state: Request handling state. Waited for by @wq.
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
- * Serialised by @lock
+ * @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
+ * @new_epoch: Indicates start of day or start of MC reboot recovery
+ * @iface_lock: Serialises access to @seqno, @credits and response metadata
* @seqno: The next sequence number to use for mcdi requests.
- * Serialised by @lock
* @credits: Number of spurious MCDI completion events allowed before we
- * trigger a fatal error. Protected by @lock
- * @resprc: Returned MCDI completion
- * @resplen: Returned payload length
+ * trigger a fatal error
+ * @resprc: Response error/success code (Linux numbering)
+ * @resp_hdr_len: Response header length
+ * @resp_data_len: Response data (SDU or error) length
+ * @async_lock: Serialises access to @async_list while event processing is
+ * enabled
+ * @async_list: Queue of asynchronous requests
+ * @async_timer: Timer for asynchronous request timeout
*/
struct efx_mcdi_iface {
- atomic_t state;
+ struct efx_nic *efx;
+ enum efx_mcdi_state state;
+ enum efx_mcdi_mode mode;
wait_queue_head_t wq;
spinlock_t iface_lock;
- enum efx_mcdi_mode mode;
+ bool new_epoch;
unsigned int credits;
unsigned int seqno;
- unsigned int resprc;
- size_t resplen;
+ int resprc;
+ size_t resp_hdr_len;
+ size_t resp_data_len;
+ spinlock_t async_lock;
+ struct list_head async_list;
+ struct timer_list async_timer;
};
struct efx_mcdi_mon {
@@ -65,65 +78,204 @@ struct efx_mcdi_mon {
unsigned int n_attrs;
};
-extern void efx_mcdi_init(struct efx_nic *efx);
+struct efx_mcdi_mtd_partition {
+ struct efx_mtd_partition common;
+ bool updating;
+ u16 nvram_type;
+ u16 fw_subtype;
+};
+
+#define to_efx_mcdi_mtd_partition(mtd) \
+ container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
+
+/**
+ * struct efx_mcdi_data - extra state for NICs that implement MCDI
+ * @iface: Interface/protocol state
+ * @hwmon: Hardware monitor state
+ */
+struct efx_mcdi_data {
+ struct efx_mcdi_iface iface;
+#ifdef CONFIG_SFC_MCDI_MON
+ struct efx_mcdi_mon hwmon;
+#endif
+};
+
+#ifdef CONFIG_SFC_MCDI_MON
+static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
+{
+ EFX_BUG_ON_PARANOID(!efx->mcdi);
+ return &efx->mcdi->hwmon;
+}
+#endif
+
+extern int efx_mcdi_init(struct efx_nic *efx);
+extern void efx_mcdi_fini(struct efx_nic *efx);
-extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd, const u8 *inbuf,
- size_t inlen, u8 *outbuf, size_t outlen,
+extern int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
-extern void efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
- const u8 *inbuf, size_t inlen);
+extern int efx_mcdi_rpc_start(struct efx_nic *efx, unsigned cmd,
+ const efx_dword_t *inbuf, size_t inlen);
extern int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
- u8 *outbuf, size_t outlen,
+ efx_dword_t *outbuf, size_t outlen,
size_t *outlen_actual);
+typedef void efx_mcdi_async_completer(struct efx_nic *efx,
+ unsigned long cookie, int rc,
+ efx_dword_t *outbuf,
+ size_t outlen_actual);
+extern int efx_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
+ const efx_dword_t *inbuf, size_t inlen,
+ size_t outlen,
+ efx_mcdi_async_completer *complete,
+ unsigned long cookie);
+
extern int efx_mcdi_poll_reboot(struct efx_nic *efx);
extern void efx_mcdi_mode_poll(struct efx_nic *efx);
extern void efx_mcdi_mode_event(struct efx_nic *efx);
+extern void efx_mcdi_flush_async(struct efx_nic *efx);
extern void efx_mcdi_process_event(struct efx_channel *channel,
efx_qword_t *event);
extern void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
-#define MCDI_PTR2(_buf, _ofst) \
- (((u8 *)_buf) + _ofst)
-#define MCDI_SET_DWORD2(_buf, _ofst, _value) \
- EFX_POPULATE_DWORD_1(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0, _value)
-#define MCDI_DWORD2(_buf, _ofst) \
- EFX_DWORD_FIELD(*((efx_dword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_DWORD_0)
-#define MCDI_QWORD2(_buf, _ofst) \
- EFX_QWORD_FIELD64(*((efx_qword_t *)MCDI_PTR2(_buf, _ofst)), \
- EFX_QWORD_0)
-
-#define MCDI_PTR(_buf, _ofst) \
- MCDI_PTR2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_ARRAY_PTR(_buf, _field, _type, _index) \
- MCDI_PTR2(_buf, \
- MC_CMD_ ## _field ## _OFST + \
- (_index) * MC_CMD_ ## _type ## _TYPEDEF_LEN)
-#define MCDI_SET_DWORD(_buf, _ofst, _value) \
- MCDI_SET_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST, _value)
-#define MCDI_DWORD(_buf, _ofst) \
- MCDI_DWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
-#define MCDI_QWORD(_buf, _ofst) \
- MCDI_QWORD2(_buf, MC_CMD_ ## _ofst ## _OFST)
+/* We expect that 16- and 32-bit fields in MCDI requests and responses
+ * are appropriately aligned, but 64-bit fields are only
+ * 32-bit-aligned. Also, on Siena we must copy to the MC shared
+ * memory strictly 32 bits at a time, so add any necessary padding.
+ */
+#define MCDI_DECLARE_BUF(_name, _len) \
+ efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
+#define _MCDI_PTR(_buf, _offset) \
+ ((u8 *)(_buf) + (_offset))
+#define MCDI_PTR(_buf, _field) \
+ _MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
+#define _MCDI_CHECK_ALIGN(_ofst, _align) \
+ ((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
+#define _MCDI_DWORD(_buf, _field) \
+ ((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
+
+#define MCDI_WORD(_buf, _field) \
+ ((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
+#define MCDI_SET_DWORD(_buf, _field, _value) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
+#define MCDI_DWORD(_buf, _field) \
+ EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
+#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
+ EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1)
+#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
+ _name2, _value2) \
+ EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2)
+#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3) \
+ EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3)
+#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4) \
+ EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4)
+#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5) \
+ EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5)
+#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6) \
+ EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6)
+#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
+ _name2, _value2, _name3, _value3, \
+ _name4, _value4, _name5, _value5, \
+ _name6, _value6, _name7, _value7) \
+ EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
+ MC_CMD_ ## _name1, _value1, \
+ MC_CMD_ ## _name2, _value2, \
+ MC_CMD_ ## _name3, _value3, \
+ MC_CMD_ ## _name4, _value4, \
+ MC_CMD_ ## _name5, _value5, \
+ MC_CMD_ ## _name6, _value6, \
+ MC_CMD_ ## _name7, _value7)
+#define MCDI_SET_QWORD(_buf, _field, _value) \
+ do { \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_QWORD(_buf, _field) \
+ (EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
+ (u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
+#define MCDI_FIELD(_ptr, _type, _field) \
+ EFX_EXTRACT_DWORD( \
+ *(efx_dword_t *) \
+ _MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
+ MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
+ (MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
+ MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
+
+#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
+ (_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
+ + (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
+#define MCDI_DECLARE_STRUCT_PTR(_name) \
+ efx_dword_t *_name
+#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
+ ((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_VAR_ARRAY_LEN(_len, _field) \
+ min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
+ ((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
+#define MCDI_ARRAY_WORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
+ le16_to_cpu(*(__force const __le16 *) \
+ _MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
+#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
+ EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
+ EFX_DWORD_0, _value)
+#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
+ EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
+#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
+ (BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
+ (efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
+#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
+ do { \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
+ EFX_DWORD_0, (u32)(_value)); \
+ EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
+ EFX_DWORD_0, (u64)(_value) >> 32); \
+ } while (0)
+#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
+ MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
+ _type ## _TYPEDEF, _field2)
#define MCDI_EVENT_FIELD(_ev, _field) \
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
-#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
- EFX_EXTRACT_DWORD( \
- *((efx_dword_t *) \
- (MCDI_ARRAY_PTR(_buf, _field1, _type, _index) + \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _OFST & ~3))), \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f, \
- (MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _LBN & 0x1f) + \
- MC_CMD_ ## _type ## _TYPEDEF_ ## _field2 ## _WIDTH - 1)
extern void efx_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
-extern int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
- bool *was_attached_out);
extern int efx_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
u16 *fw_subtype_list, u32 *capabilities);
extern int efx_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
@@ -132,34 +284,29 @@ extern int efx_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
extern int efx_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
size_t *size_out, size_t *erase_size_out,
bool *protected_out);
-extern int efx_mcdi_nvram_update_start(struct efx_nic *efx,
- unsigned int type);
-extern int efx_mcdi_nvram_read(struct efx_nic *efx, unsigned int type,
- loff_t offset, u8 *buffer, size_t length);
-extern int efx_mcdi_nvram_write(struct efx_nic *efx, unsigned int type,
- loff_t offset, const u8 *buffer,
- size_t length);
-#define EFX_MCDI_NVRAM_LEN_MAX 128
-extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
- loff_t offset, size_t length);
-extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
- unsigned int type);
extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
-extern int efx_mcdi_reset_port(struct efx_nic *efx);
-extern int efx_mcdi_reset_mc(struct efx_nic *efx);
extern int efx_mcdi_wol_filter_set_magic(struct efx_nic *efx,
const u8 *mac, int *id_out);
extern int efx_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
extern int efx_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
extern int efx_mcdi_wol_filter_reset(struct efx_nic *efx);
extern int efx_mcdi_flush_rxqs(struct efx_nic *efx);
+extern int efx_mcdi_port_probe(struct efx_nic *efx);
+extern void efx_mcdi_port_remove(struct efx_nic *efx);
+extern int efx_mcdi_port_reconfigure(struct efx_nic *efx);
+extern int efx_mcdi_port_get_number(struct efx_nic *efx);
+extern u32 efx_mcdi_phy_get_caps(struct efx_nic *efx);
+extern void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
extern int efx_mcdi_set_mac(struct efx_nic *efx);
-extern int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear);
-extern int efx_mcdi_mac_reconfigure(struct efx_nic *efx);
+#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
+extern void efx_mcdi_mac_start_stats(struct efx_nic *efx);
+extern void efx_mcdi_mac_stop_stats(struct efx_nic *efx);
extern bool efx_mcdi_mac_check_fault(struct efx_nic *efx);
+extern enum reset_type efx_mcdi_map_reset_reason(enum reset_type reason);
+extern int efx_mcdi_reset(struct efx_nic *efx, enum reset_type method);
+extern int efx_mcdi_set_workaround(struct efx_nic *efx, u32 type, bool enabled);
#ifdef CONFIG_SFC_MCDI_MON
extern int efx_mcdi_mon_probe(struct efx_nic *efx);
@@ -169,4 +316,14 @@ static inline int efx_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
static inline void efx_mcdi_mon_remove(struct efx_nic *efx) {}
#endif
+#ifdef CONFIG_SFC_MTD
+extern int efx_mcdi_mtd_read(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, u8 *buffer);
+extern int efx_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
+extern int efx_mcdi_mtd_write(struct mtd_info *mtd, loff_t start,
+ size_t len, size_t *retlen, const u8 *buffer);
+extern int efx_mcdi_mtd_sync(struct mtd_info *mtd);
+extern void efx_mcdi_mtd_rename(struct efx_mtd_partition *part);
+#endif
+
#endif /* EFX_MCDI_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_mac.c b/drivers/net/ethernet/sfc/mcdi_mac.c
deleted file mode 100644
index 1003f309cba..00000000000
--- a/drivers/net/ethernet/sfc/mcdi_mac.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#include "net_driver.h"
-#include "efx.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-int efx_mcdi_set_mac(struct efx_nic *efx)
-{
- u32 reject, fcntl;
- u8 cmdbytes[MC_CMD_SET_MAC_IN_LEN];
-
- memcpy(cmdbytes + MC_CMD_SET_MAC_IN_ADDR_OFST,
- efx->net_dev->dev_addr, ETH_ALEN);
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
- EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
-
- /* The MCDI command provides for controlling accept/reject
- * of broadcast packets too, but the driver doesn't currently
- * expose this. */
- reject = (efx->promiscuous) ? 0 :
- (1 << MC_CMD_SET_MAC_IN_REJECT_UNCST_LBN);
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_REJECT, reject);
-
- switch (efx->wanted_fc) {
- case EFX_FC_RX | EFX_FC_TX:
- fcntl = MC_CMD_FCNTL_BIDIR;
- break;
- case EFX_FC_RX:
- fcntl = MC_CMD_FCNTL_RESPOND;
- break;
- default:
- fcntl = MC_CMD_FCNTL_OFF;
- break;
- }
- if (efx->wanted_fc & EFX_FC_AUTO)
- fcntl = MC_CMD_FCNTL_AUTO;
- if (efx->fc_disable)
- fcntl = MC_CMD_FCNTL_OFF;
-
- MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
- NULL, 0, NULL);
-}
-
-bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
-{
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
- size_t outlength;
- int rc;
-
- BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
- outbuf, sizeof(outbuf), &outlength);
- if (rc) {
- netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
- __func__, rc);
- return true;
- }
-
- return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
-}
-
-int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
- u32 dma_len, int enable, int clear)
-{
- u8 inbuf[MC_CMD_MAC_STATS_IN_LEN];
- int rc;
- efx_dword_t *cmd_ptr;
- int period = enable ? 1000 : 0;
- u32 addr_hi;
- u32 addr_lo;
-
- BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
-
- addr_lo = ((u64)dma_addr) >> 0;
- addr_hi = ((u64)dma_addr) >> 32;
-
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_LO, addr_lo);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_ADDR_HI, addr_hi);
- cmd_ptr = (efx_dword_t *)MCDI_PTR(inbuf, MAC_STATS_IN_CMD);
- EFX_POPULATE_DWORD_7(*cmd_ptr,
- MC_CMD_MAC_STATS_IN_DMA, !!enable,
- MC_CMD_MAC_STATS_IN_CLEAR, clear,
- MC_CMD_MAC_STATS_IN_PERIODIC_CHANGE, 1,
- MC_CMD_MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
- MC_CMD_MAC_STATS_IN_PERIODIC_CLEAR, 0,
- MC_CMD_MAC_STATS_IN_PERIODIC_NOEVENT, 1,
- MC_CMD_MAC_STATS_IN_PERIOD_MS, period);
- MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
- if (rc)
- goto fail;
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
- __func__, enable ? "enable" : "disable", rc);
- return rc;
-}
-
-int efx_mcdi_mac_reconfigure(struct efx_nic *efx)
-{
- int rc;
-
- WARN_ON(!mutex_is_locked(&efx->mac_lock));
-
- rc = efx_mcdi_set_mac(efx);
- if (rc != 0)
- return rc;
-
- return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
- efx->multicast_hash.byte,
- sizeof(efx->multicast_hash),
- NULL, 0, NULL);
-}
diff --git a/drivers/net/ethernet/sfc/mcdi_mon.c b/drivers/net/ethernet/sfc/mcdi_mon.c
index 1d552f0664d..4cc5d95b2a5 100644
--- a/drivers/net/ethernet/sfc/mcdi_mon.c
+++ b/drivers/net/ethernet/sfc/mcdi_mon.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,31 +21,62 @@ enum efx_hwmon_type {
EFX_HWMON_UNKNOWN,
EFX_HWMON_TEMP, /* temperature */
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
- EFX_HWMON_IN /* input voltage */
+ EFX_HWMON_IN, /* voltage */
+ EFX_HWMON_CURR, /* current */
+ EFX_HWMON_POWER, /* power */
};
static const struct {
const char *label;
enum efx_hwmon_type hwmon_type;
int port;
-} efx_mcdi_sensor_type[MC_CMD_SENSOR_ENTRY_MAXNUM] = {
-#define SENSOR(name, label, hwmon_type, port) \
- [MC_CMD_SENSOR_##name] = { label, hwmon_type, port }
- SENSOR(CONTROLLER_TEMP, "Controller temp.", EFX_HWMON_TEMP, -1),
- SENSOR(PHY_COMMON_TEMP, "PHY temp.", EFX_HWMON_TEMP, -1),
- SENSOR(CONTROLLER_COOLING, "Controller cooling", EFX_HWMON_COOL, -1),
- SENSOR(PHY0_TEMP, "PHY temp.", EFX_HWMON_TEMP, 0),
- SENSOR(PHY0_COOLING, "PHY cooling", EFX_HWMON_COOL, 0),
- SENSOR(PHY1_TEMP, "PHY temp.", EFX_HWMON_TEMP, 1),
- SENSOR(PHY1_COOLING, "PHY cooling", EFX_HWMON_COOL, 1),
- SENSOR(IN_1V0, "1.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2, "1.2V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V8, "1.8V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_2V5, "2.5V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_3V3, "3.3V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_12V0, "12.0V supply", EFX_HWMON_IN, -1),
- SENSOR(IN_1V2A, "1.2V analogue supply", EFX_HWMON_IN, -1),
- SENSOR(IN_VREF, "ref. voltage", EFX_HWMON_IN, -1),
+} efx_mcdi_sensor_type[] = {
+#define SENSOR(name, label, hwmon_type, port) \
+ [MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
+ SENSOR(CONTROLLER_TEMP, "Controller ext. temp.", TEMP, -1),
+ SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
+ SENSOR(CONTROLLER_COOLING, "Controller cooling", COOL, -1),
+ SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
+ SENSOR(PHY0_COOLING, "PHY cooling", COOL, 0),
+ SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
+ SENSOR(PHY1_COOLING, "PHY cooling", COOL, 1),
+ SENSOR(IN_1V0, "1.0V supply", IN, -1),
+ SENSOR(IN_1V2, "1.2V supply", IN, -1),
+ SENSOR(IN_1V8, "1.8V supply", IN, -1),
+ SENSOR(IN_2V5, "2.5V supply", IN, -1),
+ SENSOR(IN_3V3, "3.3V supply", IN, -1),
+ SENSOR(IN_12V0, "12.0V supply", IN, -1),
+ SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
+ SENSOR(IN_VREF, "ref. voltage", IN, -1),
+ SENSOR(OUT_VAOE, "AOE power supply", IN, -1),
+ SENSOR(AOE_TEMP, "AOE temp.", TEMP, -1),
+ SENSOR(PSU_AOE_TEMP, "AOE PSU temp.", TEMP, -1),
+ SENSOR(PSU_TEMP, "Controller PSU temp.", TEMP, -1),
+ SENSOR(FAN_0, NULL, COOL, -1),
+ SENSOR(FAN_1, NULL, COOL, -1),
+ SENSOR(FAN_2, NULL, COOL, -1),
+ SENSOR(FAN_3, NULL, COOL, -1),
+ SENSOR(FAN_4, NULL, COOL, -1),
+ SENSOR(IN_VAOE, "AOE input supply", IN, -1),
+ SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
+ SENSOR(IN_IAOE, "AOE input current", CURR, -1),
+ SENSOR(NIC_POWER, "Board power use", POWER, -1),
+ SENSOR(IN_0V9, "0.9V supply", IN, -1),
+ SENSOR(IN_I0V9, "0.9V input current", CURR, -1),
+ SENSOR(IN_I1V2, "1.2V input current", CURR, -1),
+ SENSOR(IN_0V9_ADC, "0.9V supply (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_2_TEMP, "Controller ext. temp. 2", TEMP, -1),
+ SENSOR(VREG_INTERNAL_TEMP, "Voltage regulator temp.", TEMP, -1),
+ SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
+ SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT, "Controller int. temp. raw", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP, "Controller int. temp.", TEMP, -1),
+ SENSOR(CONTROLLER_VPTAT_EXTADC,
+ "Controller int. temp. raw (at ADC)", IN, -1),
+ SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
+ "Controller int. temp. (via ADC)", TEMP, -1),
+ SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
+ SENSOR(AIRFLOW, "Air flow raw", IN, -1),
#undef SENSOR
};
@@ -54,6 +85,7 @@ static const char *const sensor_status_names[] = {
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
+ [MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
};
void efx_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
@@ -85,6 +117,7 @@ struct efx_mcdi_mon_attribute {
struct device_attribute dev_attr;
unsigned int index;
unsigned int type;
+ enum efx_hwmon_type hwmon_type;
unsigned int limit_value;
char name[12];
};
@@ -92,13 +125,12 @@ struct efx_mcdi_mon_attribute {
static int efx_mcdi_mon_update(struct efx_nic *efx)
{
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- u8 inbuf[MC_CMD_READ_SENSORS_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
int rc;
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_LO,
- hwmon->dma_buf.dma_addr & 0xffffffff);
- MCDI_SET_DWORD(inbuf, READ_SENSORS_IN_DMA_ADDR_HI,
- (u64)hwmon->dma_buf.dma_addr >> 32);
+ MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
+ hwmon->dma_buf.dma_addr);
+ MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
rc = efx_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
inbuf, sizeof(inbuf), NULL, 0, NULL);
@@ -146,18 +178,32 @@ static ssize_t efx_mcdi_mon_show_value(struct device *dev,
struct efx_mcdi_mon_attribute *mon_attr =
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
efx_dword_t entry;
- unsigned int value;
+ unsigned int value, state;
int rc;
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
if (rc)
return rc;
+ state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
+ if (state == MC_CMD_SENSOR_STATE_NO_READING)
+ return -EBUSY;
+
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -172,9 +218,19 @@ static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
value = mon_attr->limit_value;
- /* Convert temperature from degrees to milli-degrees Celsius */
- if (efx_mcdi_sensor_type[mon_attr->type].hwmon_type == EFX_HWMON_TEMP)
+ switch (mon_attr->hwmon_type) {
+ case EFX_HWMON_TEMP:
+ /* Convert temperature from degrees to milli-degrees Celsius */
value *= 1000;
+ break;
+ case EFX_HWMON_POWER:
+ /* Convert power from watts to microwatts */
+ value *= 1000000;
+ break;
+ default:
+ /* No conversion needed */
+ break;
+ }
return sprintf(buf, "%u\n", value);
}
@@ -221,6 +277,10 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
strlcpy(attr->name, name, sizeof(attr->name));
attr->index = index;
attr->type = type;
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
+ attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+ else
+ attr->hwmon_type = EFX_HWMON_UNKNOWN;
attr->limit_value = limit_value;
sysfs_attr_init(&attr->dev_attr.attr);
attr->dev_attr.attr.name = attr->name;
@@ -234,36 +294,43 @@ efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
int efx_mcdi_mon_probe(struct efx_nic *efx)
{
+ unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
- unsigned int n_attrs, n_temp = 0, n_cool = 0, n_in = 0;
- u8 outbuf[MC_CMD_SENSOR_INFO_OUT_LENMAX];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
+ unsigned int n_pages, n_sensors, n_attrs, page;
size_t outlen;
char name[12];
u32 mask;
- int rc, i, type;
+ int rc, i, j, type;
- BUILD_BUG_ON(MC_CMD_SENSOR_INFO_IN_LEN != 0);
+ /* Find out how many sensors are present */
+ n_sensors = 0;
+ page = 0;
+ do {
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
- rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, NULL, 0,
- outbuf, sizeof(outbuf), &outlen);
- if (rc)
- return rc;
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
- return -EIO;
-
- /* Find out which sensors are present. Don't create a device
- * if there are none.
- */
- mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
- if (mask == 0)
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
+ return -EIO;
+
+ mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
+ n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ ++page;
+ } while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
+ n_pages = page;
+
+ /* Don't create a device if there are none */
+ if (n_sensors == 0)
return 0;
- /* Check again for short response */
- if (outlen < MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask)))
- return -EIO;
-
- rc = efx_nic_alloc_buffer(efx, &hwmon->dma_buf,
- 4 * MC_CMD_SENSOR_ENTRY_MAXNUM);
+ rc = efx_nic_alloc_buffer(
+ efx, &hwmon->dma_buf,
+ n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
+ GFP_KERNEL);
if (rc)
return rc;
@@ -274,7 +341,7 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
* attributes for this set of sensors: name of the driver plus
* value, min, max, crit, alarm and label for each sensor.
*/
- n_attrs = 1 + 6 * hweight32(mask);
+ n_attrs = 1 + 6 * n_sensors;
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
if (!hwmon->attrs) {
rc = -ENOMEM;
@@ -291,26 +358,63 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- for (i = 0, type = -1; ; i++) {
+ for (i = 0, j = -1, type = -1; ; i++) {
+ enum efx_hwmon_type hwmon_type;
const char *hwmon_prefix;
unsigned hwmon_index;
u16 min1, max1, min2, max2;
/* Find next sensor type or exit if there is none */
- type++;
- while (!(mask & (1 << type))) {
+ do {
type++;
- if (type == 32)
- return 0;
- }
- /* Skip sensors specific to a different port */
- if (efx_mcdi_sensor_type[type].hwmon_type != EFX_HWMON_UNKNOWN &&
- efx_mcdi_sensor_type[type].port >= 0 &&
- efx_mcdi_sensor_type[type].port != efx_port_num(efx))
- continue;
+ if ((type % 32) == 0) {
+ page = type / 32;
+ j = -1;
+ if (page == n_pages)
+ return 0;
+
+ MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
+ page);
+ rc = efx_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
+ inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf),
+ &outlen);
+ if (rc)
+ goto fail;
+ if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
+ rc = -EIO;
+ goto fail;
+ }
+
+ mask = (MCDI_DWORD(outbuf,
+ SENSOR_INFO_OUT_MASK) &
+ ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
- switch (efx_mcdi_sensor_type[type].hwmon_type) {
+ /* Check again for short response */
+ if (outlen <
+ MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
+ rc = -EIO;
+ goto fail;
+ }
+ }
+ } while (!(mask & (1 << type % 32)));
+ j++;
+
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
+ hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
+
+ /* Skip sensors specific to a different port */
+ if (hwmon_type != EFX_HWMON_UNKNOWN &&
+ efx_mcdi_sensor_type[type].port >= 0 &&
+ efx_mcdi_sensor_type[type].port !=
+ efx_port_num(efx))
+ continue;
+ } else {
+ hwmon_type = EFX_HWMON_UNKNOWN;
+ }
+
+ switch (hwmon_type) {
case EFX_HWMON_TEMP:
hwmon_prefix = "temp";
hwmon_index = ++n_temp; /* 1-based */
@@ -327,16 +431,24 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
hwmon_prefix = "in";
hwmon_index = n_in++; /* 0-based */
break;
+ case EFX_HWMON_CURR:
+ hwmon_prefix = "curr";
+ hwmon_index = ++n_curr; /* 1-based */
+ break;
+ case EFX_HWMON_POWER:
+ hwmon_prefix = "power";
+ hwmon_index = ++n_power; /* 1-based */
+ break;
}
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN1);
+ SENSOR_INFO_ENTRY, j, MIN1);
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX1);
+ SENSOR_INFO_ENTRY, j, MAX1);
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MIN2);
+ SENSOR_INFO_ENTRY, j, MIN2);
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
- SENSOR_INFO_ENTRY, i, MAX2);
+ SENSOR_INFO_ENTRY, j, MAX2);
if (min1 != max1) {
snprintf(name, sizeof(name), "%s%u_input",
@@ -346,13 +458,15 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- snprintf(name, sizeof(name), "%s%u_min",
- hwmon_prefix, hwmon_index);
- rc = efx_mcdi_mon_add_attr(
- efx, name, efx_mcdi_mon_show_limit,
- i, type, min1);
- if (rc)
- goto fail;
+ if (hwmon_type != EFX_HWMON_POWER) {
+ snprintf(name, sizeof(name), "%s%u_min",
+ hwmon_prefix, hwmon_index);
+ rc = efx_mcdi_mon_add_attr(
+ efx, name, efx_mcdi_mon_show_limit,
+ i, type, min1);
+ if (rc)
+ goto fail;
+ }
snprintf(name, sizeof(name), "%s%u_max",
hwmon_prefix, hwmon_index);
@@ -383,7 +497,8 @@ int efx_mcdi_mon_probe(struct efx_nic *efx)
if (rc)
goto fail;
- if (efx_mcdi_sensor_type[type].label) {
+ if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
+ efx_mcdi_sensor_type[type].label) {
snprintf(name, sizeof(name), "%s%u_label",
hwmon_prefix, hwmon_index);
rc = efx_mcdi_mon_add_attr(
@@ -400,8 +515,7 @@ fail:
void efx_mcdi_mon_remove(struct efx_nic *efx)
{
- struct siena_nic_data *nic_data = efx->nic_data;
- struct efx_mcdi_mon *hwmon = &nic_data->hwmon;
+ struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
unsigned int i;
for (i = 0; i < hwmon->n_attrs; i++)
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index c5c9747861b..b5cf62492f8 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,13 @@
#define MC_FW_STATE_BOOTING (4)
/* The Scheduler has started. */
#define MC_FW_STATE_SCHED (8)
+/* If this is set in MC_RESET_STATE_REG then it should be
+ * possible to jump into IMEM without loading code from flash.
+ * Unlike a warm boot, assume DMEM has been reloaded, so that
+ * the MC persistent data must be reinitialised. */
+#define MC_FW_TEPID_BOOT_OK (16)
+/* BIST state has been initialized */
+#define MC_FW_BIST_INIT_OK (128)
/* Siena MC shared memmory offsets */
/* The 'doorbell' addresses are hard-wired to alert the MC when written */
@@ -39,18 +46,21 @@
#define MC_STATUS_DWORD_REBOOT (0xb007b007)
#define MC_STATUS_DWORD_ASSERT (0xdeaddead)
+/* Check whether an mcfw version (in host order) belongs to a bootloader */
+#define MC_FW_VERSION_IS_BOOTLOADER(_v) (((_v) >> 16) == 0xb007)
+
/* The current version of the MCDI protocol.
*
* Note that the ROM burnt into the card only talks V0, so at the very
* least every driver must support version 0 and MCDI_PCOL_VERSION
*/
-#define MCDI_PCOL_VERSION 1
+#define MCDI_PCOL_VERSION 2
/* Unused commands: 0x23, 0x27, 0x30, 0x31 */
/* MCDI version 1
*
- * Each MCDI request starts with an MCDI_HEADER, which is a 32byte
+ * Each MCDI request starts with an MCDI_HEADER, which is a 32bit
* structure, filled in by the client.
*
* 0 7 8 16 20 22 23 24 31
@@ -87,9 +97,11 @@
#define MCDI_HEADER_DATALEN_LBN 8
#define MCDI_HEADER_DATALEN_WIDTH 8
#define MCDI_HEADER_SEQ_LBN 16
-#define MCDI_HEADER_RSVD_LBN 20
-#define MCDI_HEADER_RSVD_WIDTH 2
#define MCDI_HEADER_SEQ_WIDTH 4
+#define MCDI_HEADER_RSVD_LBN 20
+#define MCDI_HEADER_RSVD_WIDTH 1
+#define MCDI_HEADER_NOT_EPOCH_LBN 21
+#define MCDI_HEADER_NOT_EPOCH_WIDTH 1
#define MCDI_HEADER_ERROR_LBN 22
#define MCDI_HEADER_ERROR_WIDTH 1
#define MCDI_HEADER_RESPONSE_LBN 23
@@ -100,7 +112,11 @@
#define MCDI_HEADER_XFLAGS_EVREQ 0x01
/* Maximum number of payload bytes */
-#define MCDI_CTL_SDU_LEN_MAX 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V1 0xfc
+#define MCDI_CTL_SDU_LEN_MAX_V2 0x400
+
+#define MCDI_CTL_SDU_LEN_MAX MCDI_CTL_SDU_LEN_MAX_V2
+
/* The MC can generate events for two reasons:
* - To complete a shared memory request if XFLAGS_EVREQ was set
@@ -145,22 +161,69 @@
#define FSE_AZ_EV_CODE_MCDI_EVRESPONSE 0xc
+/* Operation not permitted. */
+#define MC_CMD_ERR_EPERM 1
/* Non-existent command target */
#define MC_CMD_ERR_ENOENT 2
/* assert() has killed the MC */
#define MC_CMD_ERR_EINTR 4
+/* I/O failure */
+#define MC_CMD_ERR_EIO 5
+/* Try again */
+#define MC_CMD_ERR_EAGAIN 11
+/* Out of memory */
+#define MC_CMD_ERR_ENOMEM 12
/* Caller does not hold required locks */
#define MC_CMD_ERR_EACCES 13
/* Resource is currently unavailable (e.g. lock contention) */
#define MC_CMD_ERR_EBUSY 16
+/* No such device */
+#define MC_CMD_ERR_ENODEV 19
/* Invalid argument to target */
#define MC_CMD_ERR_EINVAL 22
+/* Out of range */
+#define MC_CMD_ERR_ERANGE 34
/* Non-recursive resource is already acquired */
#define MC_CMD_ERR_EDEADLK 35
/* Operation not implemented */
#define MC_CMD_ERR_ENOSYS 38
/* Operation timed out */
#define MC_CMD_ERR_ETIME 62
+/* Link has been severed */
+#define MC_CMD_ERR_ENOLINK 67
+/* Protocol error */
+#define MC_CMD_ERR_EPROTO 71
+/* Operation not supported */
+#define MC_CMD_ERR_ENOTSUP 95
+/* Address not available */
+#define MC_CMD_ERR_EADDRNOTAVAIL 99
+/* Not connected */
+#define MC_CMD_ERR_ENOTCONN 107
+/* Operation already in progress */
+#define MC_CMD_ERR_EALREADY 114
+
+/* Resource allocation failed. */
+#define MC_CMD_ERR_ALLOC_FAIL 0x1000
+/* V-adaptor not found. */
+#define MC_CMD_ERR_NO_VADAPTOR 0x1001
+/* EVB port not found. */
+#define MC_CMD_ERR_NO_EVB_PORT 0x1002
+/* V-switch not found. */
+#define MC_CMD_ERR_NO_VSWITCH 0x1003
+/* Too many VLAN tags. */
+#define MC_CMD_ERR_VLAN_LIMIT 0x1004
+/* Bad PCI function number. */
+#define MC_CMD_ERR_BAD_PCI_FUNC 0x1005
+/* Invalid VLAN mode. */
+#define MC_CMD_ERR_BAD_VLAN_MODE 0x1006
+/* Invalid v-switch type. */
+#define MC_CMD_ERR_BAD_VSWITCH_TYPE 0x1007
+/* Invalid v-port type. */
+#define MC_CMD_ERR_BAD_VPORT_TYPE 0x1008
+/* MAC address exists. */
+#define MC_CMD_ERR_MAC_EXIST 0x1009
+/* Slave core not present */
+#define MC_CMD_ERR_SLAVE_NOT_PRESENT 0x100a
#define MC_CMD_ERR_CODE_OFST 0
@@ -178,9 +241,11 @@
/* Vectors in the boot ROM */
/* Point to the copycode entry point. */
-#define MC_BOOTROM_COPYCODE_VEC (0x7f4)
+#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
+#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
-#define MC_BOOTROM_NOFLASH_VEC (0x7f8)
+#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
+#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -209,16 +274,29 @@
(n) * MC_CMD_DBIWROP_TYPEDEF_LEN)
+/* Version 2 adds an optional argument to error returns: the errno value
+ * may be followed by the (0-based) number of the first argument that
+ * could not be processed.
+ */
+#define MC_CMD_ERR_ARG_OFST 4
+
+/* No space */
+#define MC_CMD_ERR_ENOSPC 28
+
/* MCDI_EVENT structuredef */
#define MCDI_EVENT_LEN 8
#define MCDI_EVENT_CONT_LBN 32
#define MCDI_EVENT_CONT_WIDTH 1
#define MCDI_EVENT_LEVEL_LBN 33
#define MCDI_EVENT_LEVEL_WIDTH 3
-#define MCDI_EVENT_LEVEL_INFO 0x0 /* enum */
-#define MCDI_EVENT_LEVEL_WARN 0x1 /* enum */
-#define MCDI_EVENT_LEVEL_ERR 0x2 /* enum */
-#define MCDI_EVENT_LEVEL_FATAL 0x3 /* enum */
+/* enum: Info. */
+#define MCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define MCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define MCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define MCDI_EVENT_LEVEL_FATAL 0x3
#define MCDI_EVENT_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_SEQ_LBN 0
#define MCDI_EVENT_CMDDONE_SEQ_WIDTH 8
@@ -230,9 +308,14 @@
#define MCDI_EVENT_LINKCHANGE_LP_CAP_WIDTH 16
#define MCDI_EVENT_LINKCHANGE_SPEED_LBN 16
#define MCDI_EVENT_LINKCHANGE_SPEED_WIDTH 4
-#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2 /* enum */
-#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3 /* enum */
+/* enum: 100Mbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_100M 0x1
+/* enum: 1Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_1G 0x2
+/* enum: 10Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_10G 0x3
+/* enum: 40Gbs */
+#define MCDI_EVENT_LINKCHANGE_SPEED_40G 0x4
#define MCDI_EVENT_LINKCHANGE_FCNTL_LBN 20
#define MCDI_EVENT_LINKCHANGE_FCNTL_WIDTH 4
#define MCDI_EVENT_LINKCHANGE_LINK_FLAGS_LBN 24
@@ -247,26 +330,80 @@
#define MCDI_EVENT_FWALERT_DATA_WIDTH 24
#define MCDI_EVENT_FWALERT_REASON_LBN 0
#define MCDI_EVENT_FWALERT_REASON_WIDTH 8
-#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1 /* enum */
+/* enum: SRAM Access. */
+#define MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS 0x1
#define MCDI_EVENT_FLR_VF_LBN 0
#define MCDI_EVENT_FLR_VF_WIDTH 8
#define MCDI_EVENT_TX_ERR_TXQ_LBN 0
#define MCDI_EVENT_TX_ERR_TXQ_WIDTH 12
#define MCDI_EVENT_TX_ERR_TYPE_LBN 12
#define MCDI_EVENT_TX_ERR_TYPE_WIDTH 4
-#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1 /* enum */
-#define MCDI_EVENT_TX_ERR_NO_EOP 0x2 /* enum */
-#define MCDI_EVENT_TX_ERR_2BIG 0x3 /* enum */
+/* enum: Descriptor loader reported failure */
+#define MCDI_EVENT_TX_ERR_DL_FAIL 0x1
+/* enum: Descriptor ring empty and no EOP seen for packet */
+#define MCDI_EVENT_TX_ERR_NO_EOP 0x2
+/* enum: Overlength packet */
+#define MCDI_EVENT_TX_ERR_2BIG 0x3
+/* enum: Malformed option descriptor */
+#define MCDI_EVENT_TX_BAD_OPTDESC 0x5
+/* enum: Option descriptor part way through a packet */
+#define MCDI_EVENT_TX_OPT_IN_PKT 0x8
+/* enum: DMA or PIO data access error */
+#define MCDI_EVENT_TX_ERR_BAD_DMA_OR_PIO 0x9
#define MCDI_EVENT_TX_ERR_INFO_LBN 16
#define MCDI_EVENT_TX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_TX_FLUSH_TO_DRIVER_WIDTH 1
#define MCDI_EVENT_TX_FLUSH_TXQ_LBN 0
#define MCDI_EVENT_TX_FLUSH_TXQ_WIDTH 12
#define MCDI_EVENT_PTP_ERR_TYPE_LBN 0
#define MCDI_EVENT_PTP_ERR_TYPE_WIDTH 8
-#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1 /* enum */
-#define MCDI_EVENT_PTP_ERR_FILTER 0x2 /* enum */
-#define MCDI_EVENT_PTP_ERR_FIFO 0x3 /* enum */
-#define MCDI_EVENT_PTP_ERR_QUEUE 0x4 /* enum */
+/* enum: PLL lost lock */
+#define MCDI_EVENT_PTP_ERR_PLL_LOST 0x1
+/* enum: Filter overflow (PDMA) */
+#define MCDI_EVENT_PTP_ERR_FILTER 0x2
+/* enum: FIFO overflow (FPGA) */
+#define MCDI_EVENT_PTP_ERR_FIFO 0x3
+/* enum: Merge queue overflow */
+#define MCDI_EVENT_PTP_ERR_QUEUE 0x4
+#define MCDI_EVENT_AOE_ERR_TYPE_LBN 0
+#define MCDI_EVENT_AOE_ERR_TYPE_WIDTH 8
+/* enum: AOE failed to load - no valid image? */
+#define MCDI_EVENT_AOE_NO_LOAD 0x1
+/* enum: AOE FC reported an exception */
+#define MCDI_EVENT_AOE_FC_ASSERT 0x2
+/* enum: AOE FC watchdogged */
+#define MCDI_EVENT_AOE_FC_WATCHDOG 0x3
+/* enum: AOE FC failed to start */
+#define MCDI_EVENT_AOE_FC_NO_START 0x4
+/* enum: Generic AOE fault - likely to have been reported via other means too
+ * but intended for use by aoex driver.
+ */
+#define MCDI_EVENT_AOE_FAULT 0x5
+/* enum: Results of reprogramming the CPLD (status in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_CPLD_REPROGRAMMED 0x6
+/* enum: AOE loaded successfully */
+#define MCDI_EVENT_AOE_LOAD 0x7
+/* enum: AOE DMA operation completed (LSB of HOST_HANDLE in AOE_ERR_DATA) */
+#define MCDI_EVENT_AOE_DMA 0x8
+/* enum: AOE byteblaster connected/disconnected (Connection status in
+ * AOE_ERR_DATA)
+ */
+#define MCDI_EVENT_AOE_BYTEBLASTER 0x9
+#define MCDI_EVENT_AOE_ERR_DATA_LBN 8
+#define MCDI_EVENT_AOE_ERR_DATA_WIDTH 8
+#define MCDI_EVENT_RX_ERR_RXQ_LBN 0
+#define MCDI_EVENT_RX_ERR_RXQ_WIDTH 12
+#define MCDI_EVENT_RX_ERR_TYPE_LBN 12
+#define MCDI_EVENT_RX_ERR_TYPE_WIDTH 4
+#define MCDI_EVENT_RX_ERR_INFO_LBN 16
+#define MCDI_EVENT_RX_ERR_INFO_WIDTH 16
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_LBN 12
+#define MCDI_EVENT_RX_FLUSH_TO_DRIVER_WIDTH 1
+#define MCDI_EVENT_RX_FLUSH_RXQ_LBN 0
+#define MCDI_EVENT_RX_FLUSH_RXQ_WIDTH 12
+#define MCDI_EVENT_MC_REBOOT_COUNT_LBN 0
+#define MCDI_EVENT_MC_REBOOT_COUNT_WIDTH 16
#define MCDI_EVENT_DATA_LBN 0
#define MCDI_EVENT_DATA_WIDTH 32
#define MCDI_EVENT_SRC_LBN 36
@@ -275,21 +412,60 @@
#define MCDI_EVENT_EV_CODE_WIDTH 4
#define MCDI_EVENT_CODE_LBN 44
#define MCDI_EVENT_CODE_WIDTH 8
-#define MCDI_EVENT_CODE_BADSSERT 0x1 /* enum */
-#define MCDI_EVENT_CODE_PMNOTICE 0x2 /* enum */
-#define MCDI_EVENT_CODE_CMDDONE 0x3 /* enum */
-#define MCDI_EVENT_CODE_LINKCHANGE 0x4 /* enum */
-#define MCDI_EVENT_CODE_SENSOREVT 0x5 /* enum */
-#define MCDI_EVENT_CODE_SCHEDERR 0x6 /* enum */
-#define MCDI_EVENT_CODE_REBOOT 0x7 /* enum */
-#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8 /* enum */
-#define MCDI_EVENT_CODE_FWALERT 0x9 /* enum */
-#define MCDI_EVENT_CODE_FLR 0xa /* enum */
-#define MCDI_EVENT_CODE_TX_ERR 0xb /* enum */
-#define MCDI_EVENT_CODE_TX_FLUSH 0xc /* enum */
-#define MCDI_EVENT_CODE_PTP_RX 0xd /* enum */
-#define MCDI_EVENT_CODE_PTP_FAULT 0xe /* enum */
-#define MCDI_EVENT_CODE_PTP_PPS 0xf /* enum */
+/* enum: Bad assert. */
+#define MCDI_EVENT_CODE_BADSSERT 0x1
+/* enum: PM Notice. */
+#define MCDI_EVENT_CODE_PMNOTICE 0x2
+/* enum: Command done. */
+#define MCDI_EVENT_CODE_CMDDONE 0x3
+/* enum: Link change. */
+#define MCDI_EVENT_CODE_LINKCHANGE 0x4
+/* enum: Sensor Event. */
+#define MCDI_EVENT_CODE_SENSOREVT 0x5
+/* enum: Schedule error. */
+#define MCDI_EVENT_CODE_SCHEDERR 0x6
+/* enum: Reboot. */
+#define MCDI_EVENT_CODE_REBOOT 0x7
+/* enum: Mac stats DMA. */
+#define MCDI_EVENT_CODE_MAC_STATS_DMA 0x8
+/* enum: Firmware alert. */
+#define MCDI_EVENT_CODE_FWALERT 0x9
+/* enum: Function level reset. */
+#define MCDI_EVENT_CODE_FLR 0xa
+/* enum: Transmit error */
+#define MCDI_EVENT_CODE_TX_ERR 0xb
+/* enum: Tx flush has completed */
+#define MCDI_EVENT_CODE_TX_FLUSH 0xc
+/* enum: PTP packet received timestamp */
+#define MCDI_EVENT_CODE_PTP_RX 0xd
+/* enum: PTP NIC failure */
+#define MCDI_EVENT_CODE_PTP_FAULT 0xe
+/* enum: PTP PPS event */
+#define MCDI_EVENT_CODE_PTP_PPS 0xf
+/* enum: Rx flush has completed */
+#define MCDI_EVENT_CODE_RX_FLUSH 0x10
+/* enum: Receive error */
+#define MCDI_EVENT_CODE_RX_ERR 0x11
+/* enum: AOE fault */
+#define MCDI_EVENT_CODE_AOE 0x12
+/* enum: Network port calibration failed (VCAL). */
+#define MCDI_EVENT_CODE_VCAL_FAIL 0x13
+/* enum: HW PPS event */
+#define MCDI_EVENT_CODE_HW_PPS 0x14
+/* enum: The MC has rebooted (huntington and later, siena uses CODE_REBOOT and
+ * a different format)
+ */
+#define MCDI_EVENT_CODE_MC_REBOOT 0x15
+/* enum: the MC has detected a parity error */
+#define MCDI_EVENT_CODE_PAR_ERR 0x16
+/* enum: the MC has detected a correctable error */
+#define MCDI_EVENT_CODE_ECC_CORR_ERR 0x17
+/* enum: the MC has detected an uncorrectable error */
+#define MCDI_EVENT_CODE_ECC_FATAL_ERR 0x18
+/* enum: Artificial event generated by host and posted via MC for test
+ * purposes.
+ */
+#define MCDI_EVENT_CODE_TESTGEN 0xfa
#define MCDI_EVENT_CMDDONE_DATA_OFST 0
#define MCDI_EVENT_CMDDONE_DATA_LBN 0
#define MCDI_EVENT_CMDDONE_DATA_WIDTH 32
@@ -305,15 +481,114 @@
#define MCDI_EVENT_TX_ERR_DATA_OFST 0
#define MCDI_EVENT_TX_ERR_DATA_LBN 0
#define MCDI_EVENT_TX_ERR_DATA_WIDTH 32
+/* Seconds field of timestamp */
#define MCDI_EVENT_PTP_SECONDS_OFST 0
#define MCDI_EVENT_PTP_SECONDS_LBN 0
#define MCDI_EVENT_PTP_SECONDS_WIDTH 32
+/* Nanoseconds field of timestamp */
#define MCDI_EVENT_PTP_NANOSECONDS_OFST 0
#define MCDI_EVENT_PTP_NANOSECONDS_LBN 0
#define MCDI_EVENT_PTP_NANOSECONDS_WIDTH 32
+/* Lowest four bytes of sourceUUID from PTP packet */
#define MCDI_EVENT_PTP_UUID_OFST 0
#define MCDI_EVENT_PTP_UUID_LBN 0
#define MCDI_EVENT_PTP_UUID_WIDTH 32
+#define MCDI_EVENT_RX_ERR_DATA_OFST 0
+#define MCDI_EVENT_RX_ERR_DATA_LBN 0
+#define MCDI_EVENT_RX_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_PAR_ERR_DATA_OFST 0
+#define MCDI_EVENT_PAR_ERR_DATA_LBN 0
+#define MCDI_EVENT_PAR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_CORR_ERR_DATA_WIDTH 32
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_OFST 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_LBN 0
+#define MCDI_EVENT_ECC_FATAL_ERR_DATA_WIDTH 32
+
+/* FCDI_EVENT structuredef */
+#define FCDI_EVENT_LEN 8
+#define FCDI_EVENT_CONT_LBN 32
+#define FCDI_EVENT_CONT_WIDTH 1
+#define FCDI_EVENT_LEVEL_LBN 33
+#define FCDI_EVENT_LEVEL_WIDTH 3
+/* enum: Info. */
+#define FCDI_EVENT_LEVEL_INFO 0x0
+/* enum: Warning. */
+#define FCDI_EVENT_LEVEL_WARN 0x1
+/* enum: Error. */
+#define FCDI_EVENT_LEVEL_ERR 0x2
+/* enum: Fatal. */
+#define FCDI_EVENT_LEVEL_FATAL 0x3
+#define FCDI_EVENT_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_STATUS_LBN 0
+#define FCDI_EVENT_LINK_STATE_STATUS_WIDTH 1
+#define FCDI_EVENT_LINK_DOWN 0x0 /* enum */
+#define FCDI_EVENT_LINK_UP 0x1 /* enum */
+#define FCDI_EVENT_DATA_LBN 0
+#define FCDI_EVENT_DATA_WIDTH 32
+#define FCDI_EVENT_SRC_LBN 36
+#define FCDI_EVENT_SRC_WIDTH 8
+#define FCDI_EVENT_EV_CODE_LBN 60
+#define FCDI_EVENT_EV_CODE_WIDTH 4
+#define FCDI_EVENT_CODE_LBN 44
+#define FCDI_EVENT_CODE_WIDTH 8
+/* enum: The FC was rebooted. */
+#define FCDI_EVENT_CODE_REBOOT 0x1
+/* enum: Bad assert. */
+#define FCDI_EVENT_CODE_ASSERT 0x2
+/* enum: DDR3 test result. */
+#define FCDI_EVENT_CODE_DDR_TEST_RESULT 0x3
+/* enum: Link status. */
+#define FCDI_EVENT_CODE_LINK_STATE 0x4
+/* enum: A timed read is ready to be serviced. */
+#define FCDI_EVENT_CODE_TIMED_READ 0x5
+/* enum: One or more PPS IN events */
+#define FCDI_EVENT_CODE_PPS_IN 0x6
+/* enum: One or more PPS OUT events */
+#define FCDI_EVENT_CODE_PPS_OUT 0x7
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
+#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
+#define FCDI_EVENT_ASSERT_TYPE_LBN 36
+#define FCDI_EVENT_ASSERT_TYPE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_LBN 36
+#define FCDI_EVENT_DDR_TEST_RESULT_STATUS_CODE_WIDTH 8
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_OFST 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_LBN 0
+#define FCDI_EVENT_DDR_TEST_RESULT_RESULT_WIDTH 32
+#define FCDI_EVENT_LINK_STATE_DATA_OFST 0
+#define FCDI_EVENT_LINK_STATE_DATA_LBN 0
+#define FCDI_EVENT_LINK_STATE_DATA_WIDTH 32
+#define FCDI_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EVENT_PPS_COUNT_WIDTH 32
+
+/* FCDI_EXTENDED_EVENT structuredef */
+#define FCDI_EXTENDED_EVENT_LENMIN 16
+#define FCDI_EXTENDED_EVENT_LENMAX 248
+#define FCDI_EXTENDED_EVENT_LEN(num) (8+8*(num))
+/* Number of timestamps following */
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_OFST 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_LBN 0
+#define FCDI_EXTENDED_EVENT_PPS_COUNT_WIDTH 32
+/* Seconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_SECONDS_WIDTH 32
+/* Nanoseconds field of a timestamp record */
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_LBN 96
+#define FCDI_EXTENDED_EVENT_PPS_NANOSECONDS_WIDTH 32
+/* Timestamp records comprising the event */
+#define FCDI_EXTENDED_EVENT_PPS_TIME_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LEN 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LO_OFST 8
+#define FCDI_EXTENDED_EVENT_PPS_TIME_HI_OFST 12
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MINNUM 1
+#define FCDI_EXTENDED_EVENT_PPS_TIME_MAXNUM 30
+#define FCDI_EXTENDED_EVENT_PPS_TIME_LBN 64
+#define FCDI_EXTENDED_EVENT_PPS_TIME_WIDTH 64
/***********************************/
@@ -365,11 +640,27 @@
/* MC_CMD_COPYCODE_IN msgrequest */
#define MC_CMD_COPYCODE_IN_LEN 16
+/* Source address */
#define MC_CMD_COPYCODE_IN_SRC_ADDR_OFST 0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to start the datapath CPUs.
+ * This is useful for certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR 0x1d0d0
+/* enum: Entering the main image via a copy of a single word from and to this
+ * address indicates that it should not attempt to parse any configuration from
+ * flash. (In addition, the datapath CPUs will not be started, as for
+ * MC_CMD_COPYCODE_HUNT_NO_DATAPATH_MAGIC_ADDR above.) This is useful for
+ * certain soft rebooting scenarios. (Huntington only)
+ */
+#define MC_CMD_COPYCODE_HUNT_IGNORE_CONFIG_MAGIC_ADDR 0x1badc
+/* Destination address */
#define MC_CMD_COPYCODE_IN_DEST_ADDR_OFST 4
#define MC_CMD_COPYCODE_IN_NUMWORDS_OFST 8
+/* Address of where to jump after copy. */
#define MC_CMD_COPYCODE_IN_JUMP_OFST 12
-#define MC_CMD_COPYCODE_JUMP_NONE 0x1 /* enum */
+/* enum: Control should return to the caller rather than jumping */
+#define MC_CMD_COPYCODE_JUMP_NONE 0x1
/* MC_CMD_COPYCODE_OUT msgresponse */
#define MC_CMD_COPYCODE_OUT_LEN 0
@@ -377,11 +668,13 @@
/***********************************/
/* MC_CMD_SET_FUNC
+ * Select function for function-specific commands.
*/
#define MC_CMD_SET_FUNC 0x4
/* MC_CMD_SET_FUNC_IN msgrequest */
#define MC_CMD_SET_FUNC_IN_LEN 4
+/* Set function */
#define MC_CMD_SET_FUNC_IN_FUNC_OFST 0
/* MC_CMD_SET_FUNC_OUT msgresponse */
@@ -390,6 +683,7 @@
/***********************************/
/* MC_CMD_GET_BOOT_STATUS
+ * Get the instruction address from which the MC booted.
*/
#define MC_CMD_GET_BOOT_STATUS 0x5
@@ -398,7 +692,10 @@
/* MC_CMD_GET_BOOT_STATUS_OUT msgresponse */
#define MC_CMD_GET_BOOT_STATUS_OUT_LEN 8
+/* ?? */
#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_OFST 0
+/* enum: indicates that the MC wasn't flash booted */
+#define MC_CMD_GET_BOOT_STATUS_OUT_BOOT_OFFSET_NULL 0xdeadbeef
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_OFST 4
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_LBN 0
#define MC_CMD_GET_BOOT_STATUS_OUT_FLAGS_WATCHDOG_WIDTH 1
@@ -410,25 +707,38 @@
/***********************************/
/* MC_CMD_GET_ASSERTS
- * Get and clear any assertion status.
+ * Get (and optionally clear) the current assertion status. Only
+ * OUT.GLOBAL_FLAGS is guaranteed to exist in the completion payload. The other
+ * fields will only be present if OUT.GLOBAL_FLAGS != NO_FAILS
*/
#define MC_CMD_GET_ASSERTS 0x6
/* MC_CMD_GET_ASSERTS_IN msgrequest */
#define MC_CMD_GET_ASSERTS_IN_LEN 4
+/* Set to clear assertion */
#define MC_CMD_GET_ASSERTS_IN_CLEAR_OFST 0
/* MC_CMD_GET_ASSERTS_OUT msgresponse */
#define MC_CMD_GET_ASSERTS_OUT_LEN 140
+/* Assertion status flag. */
#define MC_CMD_GET_ASSERTS_OUT_GLOBAL_FLAGS_OFST 0
-#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3 /* enum */
-#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4 /* enum */
+/* enum: No assertions have failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS 0x1
+/* enum: A system-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL 0x2
+/* enum: A thread-level assertion has failed. */
+#define MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL 0x3
+/* enum: The system was reset by the watchdog. */
+#define MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED 0x4
+/* enum: An illegal address trap stopped the system (huntington and later) */
+#define MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP 0x5
+/* Failing PC value */
#define MC_CMD_GET_ASSERTS_OUT_SAVED_PC_OFFS_OFST 4
+/* Saved GP regs */
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST 8
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_LEN 4
#define MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM 31
+/* Failing thread address */
#define MC_CMD_GET_ASSERTS_OUT_THREAD_OFFS_OFST 132
#define MC_CMD_GET_ASSERTS_OUT_RESERVED_OFST 136
@@ -441,9 +751,12 @@
/* MC_CMD_LOG_CTRL_IN msgrequest */
#define MC_CMD_LOG_CTRL_IN_LEN 8
+/* Log destination */
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_OFST 0
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1 /* enum */
-#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2 /* enum */
+/* enum: UART. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_UART 0x1
+/* enum: Event queue. */
+#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ 0x2
#define MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ_OFST 4
/* MC_CMD_LOG_CTRL_OUT msgresponse */
@@ -459,11 +772,20 @@
/* MC_CMD_GET_VERSION_IN msgrequest */
#define MC_CMD_GET_VERSION_IN_LEN 0
-/* MC_CMD_GET_VERSION_V0_OUT msgresponse */
+/* MC_CMD_GET_VERSION_EXT_IN msgrequest: Asks for the extended version */
+#define MC_CMD_GET_VERSION_EXT_IN_LEN 4
+/* placeholder, set to 0 */
+#define MC_CMD_GET_VERSION_EXT_IN_EXT_FLAGS_OFST 0
+
+/* MC_CMD_GET_VERSION_V0_OUT msgresponse: deprecated version format */
#define MC_CMD_GET_VERSION_V0_OUT_LEN 4
#define MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff /* enum */
-#define MC_CMD_GET_VERSION_OUT_FIRMWARE_BOOTROM 0xb0070000 /* enum */
+/* enum: Reserved version number to indicate "any" version. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_ANY 0xffffffff
+/* enum: Bootrom version value for Siena. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_SIENA_BOOTROM 0xb0070000
+/* enum: Bootrom version value for Huntington. */
+#define MC_CMD_GET_VERSION_OUT_FIRMWARE_HUNT_BOOTROM 0xb0070001
/* MC_CMD_GET_VERSION_OUT msgresponse */
#define MC_CMD_GET_VERSION_OUT_LEN 32
@@ -471,6 +793,7 @@
/* Enum values, see field(s): */
/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
#define MC_CMD_GET_VERSION_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_OFST 8
#define MC_CMD_GET_VERSION_OUT_SUPPORTED_FUNCS_LEN 16
#define MC_CMD_GET_VERSION_OUT_VERSION_OFST 24
@@ -478,46 +801,22 @@
#define MC_CMD_GET_VERSION_OUT_VERSION_LO_OFST 24
#define MC_CMD_GET_VERSION_OUT_VERSION_HI_OFST 28
-
-/***********************************/
-/* MC_CMD_GET_FPGAREG
- * Read multiple bytes from PTP FPGA.
- */
-#define MC_CMD_GET_FPGAREG 0x9
-
-/* MC_CMD_GET_FPGAREG_IN msgrequest */
-#define MC_CMD_GET_FPGAREG_IN_LEN 8
-#define MC_CMD_GET_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_GET_FPGAREG_IN_NUMBYTES_OFST 4
-
-/* MC_CMD_GET_FPGAREG_OUT msgresponse */
-#define MC_CMD_GET_FPGAREG_OUT_LENMIN 1
-#define MC_CMD_GET_FPGAREG_OUT_LENMAX 252
-#define MC_CMD_GET_FPGAREG_OUT_LEN(num) (0+1*(num))
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_OFST 0
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_LEN 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MINNUM 1
-#define MC_CMD_GET_FPGAREG_OUT_BUFFER_MAXNUM 252
-
-
-/***********************************/
-/* MC_CMD_PUT_FPGAREG
- * Write multiple bytes to PTP FPGA.
- */
-#define MC_CMD_PUT_FPGAREG 0xa
-
-/* MC_CMD_PUT_FPGAREG_IN msgrequest */
-#define MC_CMD_PUT_FPGAREG_IN_LENMIN 5
-#define MC_CMD_PUT_FPGAREG_IN_LENMAX 252
-#define MC_CMD_PUT_FPGAREG_IN_LEN(num) (4+1*(num))
-#define MC_CMD_PUT_FPGAREG_IN_ADDR_OFST 0
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_OFST 4
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_LEN 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MINNUM 1
-#define MC_CMD_PUT_FPGAREG_IN_BUFFER_MAXNUM 248
-
-/* MC_CMD_PUT_FPGAREG_OUT msgresponse */
-#define MC_CMD_PUT_FPGAREG_OUT_LEN 0
+/* MC_CMD_GET_VERSION_EXT_OUT msgresponse */
+#define MC_CMD_GET_VERSION_EXT_OUT_LEN 48
+/* MC_CMD_GET_VERSION_OUT_FIRMWARE_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_GET_VERSION_V0_OUT/MC_CMD_GET_VERSION_OUT_FIRMWARE */
+#define MC_CMD_GET_VERSION_EXT_OUT_PCOL_OFST 4
+/* 128bit mask of functions supported by the current firmware */
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_OFST 8
+#define MC_CMD_GET_VERSION_EXT_OUT_SUPPORTED_FUNCS_LEN 16
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LEN 8
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_LO_OFST 24
+#define MC_CMD_GET_VERSION_EXT_OUT_VERSION_HI_OFST 28
+/* extra info */
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_OFST 32
+#define MC_CMD_GET_VERSION_EXT_OUT_EXTRA_LEN 16
/***********************************/
@@ -528,32 +827,74 @@
/* MC_CMD_PTP_IN msgrequest */
#define MC_CMD_PTP_IN_LEN 1
+/* PTP operation code */
#define MC_CMD_PTP_IN_OP_OFST 0
#define MC_CMD_PTP_IN_OP_LEN 1
-#define MC_CMD_PTP_OP_ENABLE 0x1 /* enum */
-#define MC_CMD_PTP_OP_DISABLE 0x2 /* enum */
-#define MC_CMD_PTP_OP_TRANSMIT 0x3 /* enum */
-#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4 /* enum */
-#define MC_CMD_PTP_OP_STATUS 0x5 /* enum */
-#define MC_CMD_PTP_OP_ADJUST 0x6 /* enum */
-#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8 /* enum */
-#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9 /* enum */
-#define MC_CMD_PTP_OP_RESET_STATS 0xa /* enum */
-#define MC_CMD_PTP_OP_DEBUG 0xb /* enum */
-#define MC_CMD_PTP_OP_MAX 0xc /* enum */
+/* enum: Enable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_ENABLE 0x1
+/* enum: Disable PTP packet timestamping operation. */
+#define MC_CMD_PTP_OP_DISABLE 0x2
+/* enum: Send a PTP packet. */
+#define MC_CMD_PTP_OP_TRANSMIT 0x3
+/* enum: Read the current NIC time. */
+#define MC_CMD_PTP_OP_READ_NIC_TIME 0x4
+/* enum: Get the current PTP status. */
+#define MC_CMD_PTP_OP_STATUS 0x5
+/* enum: Adjust the PTP NIC's time. */
+#define MC_CMD_PTP_OP_ADJUST 0x6
+/* enum: Synchronize host and NIC time. */
+#define MC_CMD_PTP_OP_SYNCHRONIZE 0x7
+/* enum: Basic manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_BASIC 0x8
+/* enum: Packet based manufacturing tests. */
+#define MC_CMD_PTP_OP_MANFTEST_PACKET 0x9
+/* enum: Reset some of the PTP related statistics */
+#define MC_CMD_PTP_OP_RESET_STATS 0xa
+/* enum: Debug operations to MC. */
+#define MC_CMD_PTP_OP_DEBUG 0xb
+/* enum: Read an FPGA register */
+#define MC_CMD_PTP_OP_FPGAREAD 0xc
+/* enum: Write an FPGA register */
+#define MC_CMD_PTP_OP_FPGAWRITE 0xd
+/* enum: Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_OFFSET_ADJUST 0xe
+/* enum: Change Apply an offset to the NIC clock */
+#define MC_CMD_PTP_OP_CLOCK_FREQ_ADJUST 0xf
+/* enum: Set the MC packet filter VLAN tags for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_VLAN_FILTER 0x10
+/* enum: Set the MC packet filter UUID for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_UUID_FILTER 0x11
+/* enum: Set the MC packet filter Domain for received PTP packets */
+#define MC_CMD_PTP_OP_RX_SET_DOMAIN_FILTER 0x12
+/* enum: Set the clock source */
+#define MC_CMD_PTP_OP_SET_CLK_SRC 0x13
+/* enum: Reset value of Timer Reg. */
+#define MC_CMD_PTP_OP_RST_CLK 0x14
+/* enum: Enable the forwarding of PPS events to the host */
+#define MC_CMD_PTP_OP_PPS_ENABLE 0x15
+/* enum: Above this for future use. */
+#define MC_CMD_PTP_OP_MAX 0x16
/* MC_CMD_PTP_IN_ENABLE msgrequest */
#define MC_CMD_PTP_IN_ENABLE_LEN 16
#define MC_CMD_PTP_IN_CMD_OFST 0
#define MC_CMD_PTP_IN_PERIPH_ID_OFST 4
+/* Event queue for PTP events */
#define MC_CMD_PTP_IN_ENABLE_QUEUE_OFST 8
+/* PTP timestamping mode */
#define MC_CMD_PTP_IN_ENABLE_MODE_OFST 12
-#define MC_CMD_PTP_MODE_V1 0x0 /* enum */
-#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
-#define MC_CMD_PTP_MODE_V2 0x2 /* enum */
-#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
-#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
+/* enum: PTP, version 1 */
+#define MC_CMD_PTP_MODE_V1 0x0
+/* enum: PTP, version 1, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V1_VLAN 0x1
+/* enum: PTP, version 2 */
+#define MC_CMD_PTP_MODE_V2 0x2
+/* enum: PTP, version 2, with VLAN headers - deprecated */
+#define MC_CMD_PTP_MODE_V2_VLAN 0x3
+/* enum: PTP, version 2, with improved UUID filtering */
+#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4
+/* enum: FCoE (seconds and microseconds) */
+#define MC_CMD_PTP_MODE_FCOE 0x5
/* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8
@@ -566,7 +907,9 @@
#define MC_CMD_PTP_IN_TRANSMIT_LEN(num) (12+1*(num))
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Transmit packet length */
#define MC_CMD_PTP_IN_TRANSMIT_LENGTH_OFST 8
+/* Transmit packet data */
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST 12
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_LEN 1
#define MC_CMD_PTP_IN_TRANSMIT_PACKET_MINNUM 1
@@ -586,19 +929,27 @@
#define MC_CMD_PTP_IN_ADJUST_LEN 24
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
#define MC_CMD_PTP_IN_ADJUST_FREQ_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LEN 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_LO_OFST 8
#define MC_CMD_PTP_IN_ADJUST_FREQ_HI_OFST 12
-#define MC_CMD_PTP_IN_ADJUST_BITS 0x28 /* enum */
+/* enum: Number of fractional bits in frequency adjustment */
+#define MC_CMD_PTP_IN_ADJUST_BITS 0x28
+/* Time adjustment in seconds */
#define MC_CMD_PTP_IN_ADJUST_SECONDS_OFST 16
+/* Time adjustment in nanoseconds */
#define MC_CMD_PTP_IN_ADJUST_NANOSECONDS_OFST 20
/* MC_CMD_PTP_IN_SYNCHRONIZE msgrequest */
#define MC_CMD_PTP_IN_SYNCHRONIZE_LEN 20
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of time readings to capture */
#define MC_CMD_PTP_IN_SYNCHRONIZE_NUMTIMESETS_OFST 8
+/* Host address in which to write "synchronization started" indication (64
+ * bits)
+ */
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_OFST 12
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LEN 8
#define MC_CMD_PTP_IN_SYNCHRONIZE_START_ADDR_LO_OFST 12
@@ -613,86 +964,240 @@
#define MC_CMD_PTP_IN_MANFTEST_PACKET_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Enable or disable packet testing */
#define MC_CMD_PTP_IN_MANFTEST_PACKET_TEST_ENABLE_OFST 8
/* MC_CMD_PTP_IN_RESET_STATS msgrequest */
#define MC_CMD_PTP_IN_RESET_STATS_LEN 8
/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset PTP statistics */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
/* MC_CMD_PTP_IN_DEBUG msgrequest */
#define MC_CMD_PTP_IN_DEBUG_LEN 12
/* MC_CMD_PTP_IN_CMD_OFST 0 */
/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Debug operations */
#define MC_CMD_PTP_IN_DEBUG_DEBUG_PARAM_OFST 8
+/* MC_CMD_PTP_IN_FPGAREAD msgrequest */
+#define MC_CMD_PTP_IN_FPGAREAD_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAREAD_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAREAD_NUMBYTES_OFST 12
+
+/* MC_CMD_PTP_IN_FPGAWRITE msgrequest */
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMIN 13
+#define MC_CMD_PTP_IN_FPGAWRITE_LENMAX 252
+#define MC_CMD_PTP_IN_FPGAWRITE_LEN(num) (12+1*(num))
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+#define MC_CMD_PTP_IN_FPGAWRITE_ADDR_OFST 8
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_OFST 12
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_LEN 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MINNUM 1
+#define MC_CMD_PTP_IN_FPGAWRITE_BUFFER_MAXNUM 240
+
+/* MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Time adjustment in seconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_SECONDS_OFST 8
+/* Time adjustment in nanoseconds */
+#define MC_CMD_PTP_IN_CLOCK_OFFSET_ADJUST_NANOSECONDS_OFST 12
+
+/* MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST msgrequest */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Frequency adjustment 40 bit fixed point ns */
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LEN 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_LO_OFST 8
+#define MC_CMD_PTP_IN_CLOCK_FREQ_ADJUST_FREQ_HI_OFST 12
+/* enum: Number of fractional bits in frequency adjustment */
+/* MC_CMD_PTP_IN_ADJUST_BITS 0x28 */
+
+/* MC_CMD_PTP_IN_RX_SET_VLAN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_LEN 24
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Number of VLAN tags, 0 if not VLAN */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_NUM_VLAN_TAGS_OFST 8
+/* Set of VLAN tags to filter against */
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_LEN 4
+#define MC_CMD_PTP_IN_RX_SET_VLAN_FILTER_VLAN_TAG_NUM 3
+
+/* MC_CMD_PTP_IN_RX_SET_UUID_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_LEN 20
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable UUID filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_ENABLE_OFST 8
+/* UUID to filter against */
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LEN 8
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_LO_OFST 12
+#define MC_CMD_PTP_IN_RX_SET_UUID_FILTER_UUID_HI_OFST 16
+
+/* MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER msgrequest */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_LEN 16
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* 1 to enable Domain filtering, 0 to disable */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_ENABLE_OFST 8
+/* Domain number to filter against */
+#define MC_CMD_PTP_IN_RX_SET_DOMAIN_FILTER_DOMAIN_OFST 12
+
+/* MC_CMD_PTP_IN_SET_CLK_SRC msgrequest */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+/* Set the clock source. */
+#define MC_CMD_PTP_IN_SET_CLK_SRC_CLK_OFST 8
+/* enum: Internal. */
+#define MC_CMD_PTP_CLK_SRC_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_PTP_CLK_SRC_EXTERNAL 0x1
+
+/* MC_CMD_PTP_IN_RST_CLK msgrequest */
+#define MC_CMD_PTP_IN_RST_CLK_LEN 8
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Reset value of Timer Reg. */
+/* MC_CMD_PTP_IN_PERIPH_ID_OFST 4 */
+
+/* MC_CMD_PTP_IN_PPS_ENABLE msgrequest */
+#define MC_CMD_PTP_IN_PPS_ENABLE_LEN 12
+/* MC_CMD_PTP_IN_CMD_OFST 0 */
+/* Enable or disable */
+#define MC_CMD_PTP_IN_PPS_ENABLE_OP_OFST 4
+/* enum: Enable */
+#define MC_CMD_PTP_ENABLE_PPS 0x0
+/* enum: Disable */
+#define MC_CMD_PTP_DISABLE_PPS 0x1
+/* Queueid to send events back */
+#define MC_CMD_PTP_IN_PPS_ENABLE_QUEUE_ID_OFST 8
+
/* MC_CMD_PTP_OUT msgresponse */
#define MC_CMD_PTP_OUT_LEN 0
/* MC_CMD_PTP_OUT_TRANSMIT msgresponse */
#define MC_CMD_PTP_OUT_TRANSMIT_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_TRANSMIT_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_READ_NIC_TIME msgresponse */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_LEN 8
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_SECONDS_OFST 0
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_READ_NIC_TIME_NANOSECONDS_OFST 4
/* MC_CMD_PTP_OUT_STATUS msgresponse */
#define MC_CMD_PTP_OUT_STATUS_LEN 64
+/* Frequency of NIC's hardware clock */
#define MC_CMD_PTP_OUT_STATUS_CLOCK_FREQ_OFST 0
+/* Number of packets transmitted and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_TX_OFST 4
+/* Number of packets received and timestamped */
#define MC_CMD_PTP_OUT_STATUS_STATS_RX_OFST 8
+/* Number of packets timestamped by the FPGA */
#define MC_CMD_PTP_OUT_STATUS_STATS_TS_OFST 12
+/* Number of packets filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_FM_OFST 16
+/* Number of packets not filter matched */
#define MC_CMD_PTP_OUT_STATUS_STATS_NFM_OFST 20
+/* Number of PPS overflows (noise on input?) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFLOW_OFST 24
+/* Number of PPS bad periods */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_BAD_OFST 28
+/* Minimum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MIN_OFST 32
+/* Maximum period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MAX_OFST 36
+/* Last period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_LAST_OFST 40
+/* Mean period of PPS pulse */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_PER_MEAN_OFST 44
+/* Minimum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MIN_OFST 48
+/* Maximum offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MAX_OFST 52
+/* Last offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_LAST_OFST 56
+/* Mean offset of PPS pulse (signed) */
#define MC_CMD_PTP_OUT_STATUS_STATS_PPS_OFF_MEAN_OFST 60
/* MC_CMD_PTP_OUT_SYNCHRONIZE msgresponse */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMIN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX 240
#define MC_CMD_PTP_OUT_SYNCHRONIZE_LEN(num) (0+20*(num))
+/* A set of host and NIC times */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_OFST 0
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN 20
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MINNUM 1
#define MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM 12
+/* Host time immediately before NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTSTART_OFST 0
+/* Value of seconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_SECONDS_OFST 4
+/* Value of nanoseconds timestamp */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_NANOSECONDS_OFST 8
+/* Host time immediately after NIC's hardware clock read */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_HOSTEND_OFST 12
+/* Number of nanoseconds waited after reading NIC's hardware clock */
#define MC_CMD_PTP_OUT_SYNCHRONIZE_WAITNS_OFST 16
/* MC_CMD_PTP_OUT_MANFTEST_BASIC msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_LEN 8
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_RESULT_OFST 0
-#define MC_CMD_PTP_MANF_SUCCESS 0x0 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2 /* enum */
-#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3 /* enum */
-#define MC_CMD_PTP_MANF_OSCILLATOR 0x4 /* enum */
-#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6 /* enum */
-#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7 /* enum */
-#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8 /* enum */
-#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9 /* enum */
+/* enum: Successful test */
+#define MC_CMD_PTP_MANF_SUCCESS 0x0
+/* enum: FPGA load failed */
+#define MC_CMD_PTP_MANF_FPGA_LOAD 0x1
+/* enum: FPGA version invalid */
+#define MC_CMD_PTP_MANF_FPGA_VERSION 0x2
+/* enum: FPGA registers incorrect */
+#define MC_CMD_PTP_MANF_FPGA_REGISTERS 0x3
+/* enum: Oscillator possibly not working? */
+#define MC_CMD_PTP_MANF_OSCILLATOR 0x4
+/* enum: Timestamps not increasing */
+#define MC_CMD_PTP_MANF_TIMESTAMPS 0x5
+/* enum: Mismatched packet count */
+#define MC_CMD_PTP_MANF_PACKET_COUNT 0x6
+/* enum: Mismatched packet count (Siena filter and FPGA) */
+#define MC_CMD_PTP_MANF_FILTER_COUNT 0x7
+/* enum: Not enough packets to perform timestamp check */
+#define MC_CMD_PTP_MANF_PACKET_ENOUGH 0x8
+/* enum: Timestamp trigger GPIO not working */
+#define MC_CMD_PTP_MANF_GPIO_TRIGGER 0x9
+/* Presence of external oscillator */
#define MC_CMD_PTP_OUT_MANFTEST_BASIC_TEST_EXTOSC_OFST 4
/* MC_CMD_PTP_OUT_MANFTEST_PACKET msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_LEN 12
+/* Results of testing */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_RESULT_OFST 0
+/* Number of packets received by FPGA */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FPGACOUNT_OFST 4
+/* Number of packets received by Siena filters */
#define MC_CMD_PTP_OUT_MANFTEST_PACKET_TEST_FILTERCOUNT_OFST 8
+/* MC_CMD_PTP_OUT_FPGAREAD msgresponse */
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMIN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_LENMAX 252
+#define MC_CMD_PTP_OUT_FPGAREAD_LEN(num) (0+1*(num))
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_OFST 0
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_LEN 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MINNUM 1
+#define MC_CMD_PTP_OUT_FPGAREAD_BUFFER_MAXNUM 252
+
/***********************************/
/* MC_CMD_CSR_READ32
@@ -702,6 +1207,7 @@
/* MC_CMD_CSR_READ32_IN msgrequest */
#define MC_CMD_CSR_READ32_IN_LEN 12
+/* Address */
#define MC_CMD_CSR_READ32_IN_ADDR_OFST 0
#define MC_CMD_CSR_READ32_IN_STEP_OFST 4
#define MC_CMD_CSR_READ32_IN_NUMWORDS_OFST 8
@@ -710,6 +1216,7 @@
#define MC_CMD_CSR_READ32_OUT_LENMIN 4
#define MC_CMD_CSR_READ32_OUT_LENMAX 252
#define MC_CMD_CSR_READ32_OUT_LEN(num) (0+4*(num))
+/* The last dword is the status, not a value read */
#define MC_CMD_CSR_READ32_OUT_BUFFER_OFST 0
#define MC_CMD_CSR_READ32_OUT_BUFFER_LEN 4
#define MC_CMD_CSR_READ32_OUT_BUFFER_MINNUM 1
@@ -726,6 +1233,7 @@
#define MC_CMD_CSR_WRITE32_IN_LENMIN 12
#define MC_CMD_CSR_WRITE32_IN_LENMAX 252
#define MC_CMD_CSR_WRITE32_IN_LEN(num) (8+4*(num))
+/* Address */
#define MC_CMD_CSR_WRITE32_IN_ADDR_OFST 0
#define MC_CMD_CSR_WRITE32_IN_STEP_OFST 4
#define MC_CMD_CSR_WRITE32_IN_BUFFER_OFST 8
@@ -739,6 +1247,48 @@
/***********************************/
+/* MC_CMD_HP
+ * These commands are used for HP related features. They are grouped under one
+ * MCDI command to avoid creating too many MCDI commands.
+ */
+#define MC_CMD_HP 0x54
+
+/* MC_CMD_HP_IN msgrequest */
+#define MC_CMD_HP_IN_LEN 16
+/* HP OCSD sub-command. When address is not NULL, request activation of OCSD at
+ * the specified address with the specified interval.When address is NULL,
+ * INTERVAL is interpreted as a command: 0: stop OCSD / 1: Report OCSD current
+ * state / 2: (debug) Show temperature reported by one of the supported
+ * sensors.
+ */
+#define MC_CMD_HP_IN_SUBCMD_OFST 0
+/* enum: OCSD (Option Card Sensor Data) sub-command. */
+#define MC_CMD_HP_IN_OCSD_SUBCMD 0x0
+/* enum: Last known valid HP sub-command. */
+#define MC_CMD_HP_IN_LAST_SUBCMD 0x0
+/* The address to the array of sensor fields. (Or NULL to use a sub-command.)
+ */
+#define MC_CMD_HP_IN_OCSD_ADDR_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_LEN 8
+#define MC_CMD_HP_IN_OCSD_ADDR_LO_OFST 4
+#define MC_CMD_HP_IN_OCSD_ADDR_HI_OFST 8
+/* The requested update interval, in seconds. (Or the sub-command if ADDR is
+ * NULL.)
+ */
+#define MC_CMD_HP_IN_OCSD_INTERVAL_OFST 12
+
+/* MC_CMD_HP_OUT msgresponse */
+#define MC_CMD_HP_OUT_LEN 4
+#define MC_CMD_HP_OUT_OCSD_STATUS_OFST 0
+/* enum: OCSD stopped for this card. */
+#define MC_CMD_HP_OUT_OCSD_STOPPED 0x1
+/* enum: OCSD was successfully started with the address provided. */
+#define MC_CMD_HP_OUT_OCSD_STARTED 0x2
+/* enum: OCSD was already started for this card. */
+#define MC_CMD_HP_OUT_OCSD_ALREADY_STARTED 0x3
+
+
+/***********************************/
/* MC_CMD_STACKINFO
* Get stack information.
*/
@@ -751,6 +1301,7 @@
#define MC_CMD_STACKINFO_OUT_LENMIN 12
#define MC_CMD_STACKINFO_OUT_LENMAX 252
#define MC_CMD_STACKINFO_OUT_LEN(num) (0+12*(num))
+/* (thread ptr, stack size, free space) for each thread in system */
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_OFST 0
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_LEN 12
#define MC_CMD_STACKINFO_OUT_THREAD_INFO_MINNUM 1
@@ -765,19 +1316,35 @@
/* MC_CMD_MDIO_READ_IN msgrequest */
#define MC_CMD_MDIO_READ_IN_LEN 16
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_READ_IN_BUS_OFST 0
-#define MC_CMD_MDIO_BUS_INTERNAL 0x0 /* enum */
-#define MC_CMD_MDIO_BUS_EXTERNAL 0x1 /* enum */
+/* enum: Internal. */
+#define MC_CMD_MDIO_BUS_INTERNAL 0x0
+/* enum: External. */
+#define MC_CMD_MDIO_BUS_EXTERNAL 0x1
+/* Port address */
#define MC_CMD_MDIO_READ_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_READ_IN_DEVAD_OFST 8
-#define MC_CMD_MDIO_CLAUSE22 0x20 /* enum */
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
+#define MC_CMD_MDIO_CLAUSE22 0x20
+/* Address */
#define MC_CMD_MDIO_READ_IN_ADDR_OFST 12
/* MC_CMD_MDIO_READ_OUT msgresponse */
#define MC_CMD_MDIO_READ_OUT_LEN 8
+/* Value */
#define MC_CMD_MDIO_READ_OUT_VALUE_OFST 0
+/* Status the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_READ_OUT_STATUS_OFST 4
-#define MC_CMD_MDIO_STATUS_GOOD 0x8 /* enum */
+/* enum: Good. */
+#define MC_CMD_MDIO_STATUS_GOOD 0x8
/***********************************/
@@ -788,18 +1355,34 @@
/* MC_CMD_MDIO_WRITE_IN msgrequest */
#define MC_CMD_MDIO_WRITE_IN_LEN 20
+/* Bus number; there are two MDIO buses: one for the internal PHY, and one for
+ * external devices.
+ */
#define MC_CMD_MDIO_WRITE_IN_BUS_OFST 0
+/* enum: Internal. */
/* MC_CMD_MDIO_BUS_INTERNAL 0x0 */
+/* enum: External. */
/* MC_CMD_MDIO_BUS_EXTERNAL 0x1 */
+/* Port address */
#define MC_CMD_MDIO_WRITE_IN_PRTAD_OFST 4
+/* Device Address or clause 22. */
#define MC_CMD_MDIO_WRITE_IN_DEVAD_OFST 8
+/* enum: By default all the MCDI MDIO operations perform clause45 mode. If you
+ * want to use clause22 then set DEVAD = MC_CMD_MDIO_CLAUSE22.
+ */
/* MC_CMD_MDIO_CLAUSE22 0x20 */
+/* Address */
#define MC_CMD_MDIO_WRITE_IN_ADDR_OFST 12
+/* Value */
#define MC_CMD_MDIO_WRITE_IN_VALUE_OFST 16
/* MC_CMD_MDIO_WRITE_OUT msgresponse */
#define MC_CMD_MDIO_WRITE_OUT_LEN 4
+/* Status; the MDIO commands return the raw status bits from the MDIO block. A
+ * "good" transaction should have the DONE bit set and all other bits clear.
+ */
#define MC_CMD_MDIO_WRITE_OUT_STATUS_OFST 0
+/* enum: Good. */
/* MC_CMD_MDIO_STATUS_GOOD 0x8 */
@@ -813,6 +1396,9 @@
#define MC_CMD_DBI_WRITE_IN_LENMIN 12
#define MC_CMD_DBI_WRITE_IN_LENMAX 252
#define MC_CMD_DBI_WRITE_IN_LEN(num) (0+12*(num))
+/* Each write op consists of an address (offset 0), byte enable/VF/CS2 (offset
+ * 32) and value (offset 64). See MC_CMD_DBIWROP_TYPEDEF.
+ */
#define MC_CMD_DBI_WRITE_IN_DBIWROP_OFST 0
#define MC_CMD_DBI_WRITE_IN_DBIWROP_LEN 12
#define MC_CMD_DBI_WRITE_IN_DBIWROP_MINNUM 1
@@ -826,9 +1412,15 @@
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_OFST 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_LBN 0
#define MC_CMD_DBIWROP_TYPEDEF_ADDRESS_WIDTH 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_OFST 4
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_LBN 32
-#define MC_CMD_DBIWROP_TYPEDEF_BYTE_MASK_WIDTH 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIWROP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIWROP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIWROP_TYPEDEF_PARMS_WIDTH 32
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_OFST 8
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_LBN 64
#define MC_CMD_DBIWROP_TYPEDEF_VALUE_WIDTH 32
@@ -836,69 +1428,111 @@
/***********************************/
/* MC_CMD_PORT_READ32
- * Read a 32-bit register from the indirect port register map.
+ * Read a 32-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ32 0x14
/* MC_CMD_PORT_READ32_IN msgrequest */
#define MC_CMD_PORT_READ32_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ32_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ32_OUT msgresponse */
#define MC_CMD_PORT_READ32_OUT_LEN 8
+/* Value */
#define MC_CMD_PORT_READ32_OUT_VALUE_OFST 0
+/* Status */
#define MC_CMD_PORT_READ32_OUT_STATUS_OFST 4
/***********************************/
/* MC_CMD_PORT_WRITE32
- * Write a 32-bit register to the indirect port register map.
+ * Write a 32-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE32 0x15
/* MC_CMD_PORT_WRITE32_IN msgrequest */
#define MC_CMD_PORT_WRITE32_IN_LEN 8
+/* Address */
#define MC_CMD_PORT_WRITE32_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE32_IN_VALUE_OFST 4
/* MC_CMD_PORT_WRITE32_OUT msgresponse */
#define MC_CMD_PORT_WRITE32_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE32_OUT_STATUS_OFST 0
/***********************************/
/* MC_CMD_PORT_READ128
- * Read a 128-bit register from the indirect port register map.
+ * Read a 128-bit register from the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_READ128 0x16
/* MC_CMD_PORT_READ128_IN msgrequest */
#define MC_CMD_PORT_READ128_IN_LEN 4
+/* Address */
#define MC_CMD_PORT_READ128_IN_ADDR_OFST 0
/* MC_CMD_PORT_READ128_OUT msgresponse */
#define MC_CMD_PORT_READ128_OUT_LEN 20
+/* Value */
#define MC_CMD_PORT_READ128_OUT_VALUE_OFST 0
#define MC_CMD_PORT_READ128_OUT_VALUE_LEN 16
+/* Status */
#define MC_CMD_PORT_READ128_OUT_STATUS_OFST 16
/***********************************/
/* MC_CMD_PORT_WRITE128
- * Write a 128-bit register to the indirect port register map.
+ * Write a 128-bit register to the indirect port register map. The port to
+ * access is implied by the Shared memory channel used.
*/
#define MC_CMD_PORT_WRITE128 0x17
/* MC_CMD_PORT_WRITE128_IN msgrequest */
#define MC_CMD_PORT_WRITE128_IN_LEN 20
+/* Address */
#define MC_CMD_PORT_WRITE128_IN_ADDR_OFST 0
+/* Value */
#define MC_CMD_PORT_WRITE128_IN_VALUE_OFST 4
#define MC_CMD_PORT_WRITE128_IN_VALUE_LEN 16
/* MC_CMD_PORT_WRITE128_OUT msgresponse */
#define MC_CMD_PORT_WRITE128_OUT_LEN 4
+/* Status */
#define MC_CMD_PORT_WRITE128_OUT_STATUS_OFST 0
+/* MC_CMD_CAPABILITIES structuredef */
+#define MC_CMD_CAPABILITIES_LEN 4
+/* Small buf table. */
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0
+#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 1
+/* Turbo mode (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_LBN 1
+#define MC_CMD_CAPABILITIES_TURBO_WIDTH 1
+/* Turbo mode active (for Maranello). */
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 2
+#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 1
+/* PTP offload. */
+#define MC_CMD_CAPABILITIES_PTP_LBN 3
+#define MC_CMD_CAPABILITIES_PTP_WIDTH 1
+/* AOE mode. */
+#define MC_CMD_CAPABILITIES_AOE_LBN 4
+#define MC_CMD_CAPABILITIES_AOE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_LBN 5
+#define MC_CMD_CAPABILITIES_AOE_ACTIVE_WIDTH 1
+/* AOE mode active. */
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_LBN 6
+#define MC_CMD_CAPABILITIES_FC_ACTIVE_WIDTH 1
+#define MC_CMD_CAPABILITIES_RESERVED_LBN 7
+#define MC_CMD_CAPABILITIES_RESERVED_WIDTH 25
+
/***********************************/
/* MC_CMD_GET_BOARD_CFG
@@ -916,18 +1550,10 @@
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_TYPE_OFST 0
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_OFST 4
#define MC_CMD_GET_BOARD_CFG_OUT_BOARD_NAME_LEN 32
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT0_OFST 36
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_LBN 0x0 /* enum */
-#define MC_CMD_CAPABILITIES_SMALL_BUF_TBL_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_LBN 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN 0x2 /* enum */
-#define MC_CMD_CAPABILITIES_TURBO_ACTIVE_WIDTH 0x1 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_LBN 0x3 /* enum */
-#define MC_CMD_CAPABILITIES_PTP_WIDTH 0x1 /* enum */
+/* See MC_CMD_CAPABILITIES */
#define MC_CMD_GET_BOARD_CFG_OUT_CAPABILITIES_PORT1_OFST 40
-/* Enum values, see field(s): */
-/* CAPABILITIES_PORT0 */
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_OFST 44
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0_LEN 6
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1_OFST 50
@@ -936,6 +1562,11 @@
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_COUNT_PORT1_OFST 60
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT0_OFST 64
#define MC_CMD_GET_BOARD_CFG_OUT_MAC_STRIDE_PORT1_OFST 68
+/* This field contains a 16-bit value for each of the types of NVRAM area. The
+ * values are defined in the firmware/mc/platform/.c file for a specific board
+ * type, but otherwise have no meaning to the MC; they are used by the driver
+ * to manage selection of appropriate firmware updates.
+ */
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_OFST 72
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_LEN 2
#define MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MINNUM 12
@@ -944,7 +1575,7 @@
/***********************************/
/* MC_CMD_DBI_READX
- * Read DBI register(s).
+ * Read DBI register(s) -- extended functionality
*/
#define MC_CMD_DBI_READX 0x19
@@ -952,6 +1583,7 @@
#define MC_CMD_DBI_READX_IN_LENMIN 8
#define MC_CMD_DBI_READX_IN_LENMAX 248
#define MC_CMD_DBI_READX_IN_LEN(num) (0+8*(num))
+/* Each Read op consists of an address (offset 0), VF/CS2) */
#define MC_CMD_DBI_READX_IN_DBIRDOP_OFST 0
#define MC_CMD_DBI_READX_IN_DBIRDOP_LEN 8
#define MC_CMD_DBI_READX_IN_DBIRDOP_LO_OFST 0
@@ -963,11 +1595,27 @@
#define MC_CMD_DBI_READX_OUT_LENMIN 4
#define MC_CMD_DBI_READX_OUT_LENMAX 252
#define MC_CMD_DBI_READX_OUT_LEN(num) (0+4*(num))
+/* Value */
#define MC_CMD_DBI_READX_OUT_VALUE_OFST 0
#define MC_CMD_DBI_READX_OUT_VALUE_LEN 4
#define MC_CMD_DBI_READX_OUT_VALUE_MINNUM 1
#define MC_CMD_DBI_READX_OUT_VALUE_MAXNUM 63
+/* MC_CMD_DBIRDOP_TYPEDEF structuredef */
+#define MC_CMD_DBIRDOP_TYPEDEF_LEN 8
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_OFST 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_LBN 0
+#define MC_CMD_DBIRDOP_TYPEDEF_ADDRESS_WIDTH 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_OFST 4
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_LBN 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_NUM_WIDTH 16
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_LBN 15
+#define MC_CMD_DBIRDOP_TYPEDEF_VF_ACTIVE_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_LBN 14
+#define MC_CMD_DBIRDOP_TYPEDEF_CS2_WIDTH 1
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_LBN 32
+#define MC_CMD_DBIRDOP_TYPEDEF_PARMS_WIDTH 32
+
/***********************************/
/* MC_CMD_SET_RAND_SEED
@@ -977,6 +1625,7 @@
/* MC_CMD_SET_RAND_SEED_IN msgrequest */
#define MC_CMD_SET_RAND_SEED_IN_LEN 16
+/* Seed value. */
#define MC_CMD_SET_RAND_SEED_IN_SEED_OFST 0
#define MC_CMD_SET_RAND_SEED_IN_SEED_LEN 16
@@ -986,7 +1635,7 @@
/***********************************/
/* MC_CMD_LTSSM_HIST
- * Retrieve the history of the PCIE LTSSM.
+ * Retrieve the history of the LTSSM, if the build supports it.
*/
#define MC_CMD_LTSSM_HIST 0x1b
@@ -997,6 +1646,7 @@
#define MC_CMD_LTSSM_HIST_OUT_LENMIN 0
#define MC_CMD_LTSSM_HIST_OUT_LENMAX 252
#define MC_CMD_LTSSM_HIST_OUT_LEN(num) (0+4*(num))
+/* variable number of LTSSM values, as bytes. The history is read-to-clear. */
#define MC_CMD_LTSSM_HIST_OUT_DATA_OFST 0
#define MC_CMD_LTSSM_HIST_OUT_DATA_LEN 4
#define MC_CMD_LTSSM_HIST_OUT_DATA_MINNUM 0
@@ -1005,41 +1655,47 @@
/***********************************/
/* MC_CMD_DRV_ATTACH
- * Inform MCPU that this port is managed on the host.
+ * Inform MCPU that this port is managed on the host (i.e. driver active). For
+ * Huntington, also request the preferred datapath firmware to use if possible
+ * (it may not be possible for this request to be fulfilled; the driver must
+ * issue a subsequent MC_CMD_GET_CAPABILITIES command to determine which
+ * features are actually available). The FIRMWARE_ID field is ignored by older
+ * platforms.
*/
#define MC_CMD_DRV_ATTACH 0x1c
/* MC_CMD_DRV_ATTACH_IN msgrequest */
-#define MC_CMD_DRV_ATTACH_IN_LEN 8
+#define MC_CMD_DRV_ATTACH_IN_LEN 12
+/* new state (0=detached, 1=attached) to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
+/* preferred datapath firmware (for Huntington; ignored for Siena) */
+#define MC_CMD_DRV_ATTACH_IN_FIRMWARE_ID_OFST 8
+/* enum: Prefer to use full featured firmware */
+#define MC_CMD_FW_FULL_FEATURED 0x0
+/* enum: Prefer to use firmware with fewer features but lower latency */
+#define MC_CMD_FW_LOW_LATENCY 0x1
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
+/* previous or existing state (0=detached, 1=attached) */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
-
-/***********************************/
-/* MC_CMD_NCSI_PROD
- * Trigger an NC-SI event.
+/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
+/* previous or existing state (0=detached, 1=attached) */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
+/* Flags associated with this function */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
+/* enum: Labels the lowest-numbered function visible to the OS */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY 0x0
+/* enum: The function can control the link state of the physical port it is
+ * bound to.
*/
-#define MC_CMD_NCSI_PROD 0x1d
-
-/* MC_CMD_NCSI_PROD_IN msgrequest */
-#define MC_CMD_NCSI_PROD_IN_LEN 4
-#define MC_CMD_NCSI_PROD_IN_EVENTS_OFST 0
-#define MC_CMD_NCSI_PROD_LINKCHANGE 0x0 /* enum */
-#define MC_CMD_NCSI_PROD_RESET 0x1 /* enum */
-#define MC_CMD_NCSI_PROD_DRVATTACH 0x2 /* enum */
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_LBN 0
-#define MC_CMD_NCSI_PROD_IN_LINKCHANGE_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_RESET_LBN 1
-#define MC_CMD_NCSI_PROD_IN_RESET_WIDTH 1
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_LBN 2
-#define MC_CMD_NCSI_PROD_IN_DRVATTACH_WIDTH 1
-
-/* MC_CMD_NCSI_PROD_OUT msgresponse */
-#define MC_CMD_NCSI_PROD_OUT_LEN 0
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
+/* enum: The function can perform privileged operations */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
/***********************************/
@@ -1050,6 +1706,7 @@
/* MC_CMD_SHMUART_IN msgrequest */
#define MC_CMD_SHMUART_IN_LEN 4
+/* ??? */
#define MC_CMD_SHMUART_IN_FLAG_OFST 0
/* MC_CMD_SHMUART_OUT msgresponse */
@@ -1057,13 +1714,33 @@
/***********************************/
+/* MC_CMD_PORT_RESET
+ * Generic per-port reset. There is no equivalent for per-board reset. Locks
+ * required: None; Return code: 0, ETIME. NOTE: This command is deprecated -
+ * use MC_CMD_ENTITY_RESET instead.
+ */
+#define MC_CMD_PORT_RESET 0x20
+
+/* MC_CMD_PORT_RESET_IN msgrequest */
+#define MC_CMD_PORT_RESET_IN_LEN 0
+
+/* MC_CMD_PORT_RESET_OUT msgresponse */
+#define MC_CMD_PORT_RESET_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_ENTITY_RESET
- * Generic per-port reset.
+ * Generic per-resource reset. There is no equivalent for per-board reset.
+ * Locks required: None; Return code: 0, ETIME. NOTE: This command is an
+ * extended version of the deprecated MC_CMD_PORT_RESET with added fields.
*/
#define MC_CMD_ENTITY_RESET 0x20
/* MC_CMD_ENTITY_RESET_IN msgrequest */
#define MC_CMD_ENTITY_RESET_IN_LEN 4
+/* Optional flags field. Omitting this will perform a "legacy" reset action
+ * (TBD).
+ */
#define MC_CMD_ENTITY_RESET_IN_FLAG_OFST 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_LBN 0
#define MC_CMD_ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET_WIDTH 1
@@ -1080,7 +1757,9 @@
/* MC_CMD_PCIE_CREDITS_IN msgrequest */
#define MC_CMD_PCIE_CREDITS_IN_LEN 8
+/* poll period. 0 is disabled */
#define MC_CMD_PCIE_CREDITS_IN_POLL_PERIOD_OFST 0
+/* wipe statistics */
#define MC_CMD_PCIE_CREDITS_IN_WIPE_OFST 4
/* MC_CMD_PCIE_CREDITS_OUT msgresponse */
@@ -1141,7 +1820,7 @@
/***********************************/
/* MC_CMD_PUTS
- * puts(3) implementation over MCDI
+ * Copy the given ASCII string out onto UART and/or out of the network port.
*/
#define MC_CMD_PUTS 0x23
@@ -1167,7 +1846,8 @@
/***********************************/
/* MC_CMD_GET_PHY_CFG
- * Report PHY configuration.
+ * Report PHY configuration. This guarantees to succeed even if the PHY is in a
+ * 'zombie' state. Locks required: None
*/
#define MC_CMD_GET_PHY_CFG 0x24
@@ -1176,6 +1856,7 @@
/* MC_CMD_GET_PHY_CFG_OUT msgresponse */
#define MC_CMD_GET_PHY_CFG_OUT_LEN 72
+/* flags */
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_OUT_PRESENT_WIDTH 1
@@ -1191,7 +1872,9 @@
#define MC_CMD_GET_PHY_CFG_OUT_TXDIS_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_BIST_LBN 6
#define MC_CMD_GET_PHY_CFG_OUT_BIST_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
+/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
#define MC_CMD_PHY_CAP_10HDX_LBN 1
#define MC_CMD_PHY_CAP_10HDX_WIDTH 1
@@ -1213,20 +1896,36 @@
#define MC_CMD_PHY_CAP_ASYM_WIDTH 1
#define MC_CMD_PHY_CAP_AN_LBN 10
#define MC_CMD_PHY_CAP_AN_WIDTH 1
+#define MC_CMD_PHY_CAP_40000FDX_LBN 11
+#define MC_CMD_PHY_CAP_40000FDX_WIDTH 1
+#define MC_CMD_PHY_CAP_DDM_LBN 12
+#define MC_CMD_PHY_CAP_DDM_WIDTH 1
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_CHANNEL_OFST 12
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_PRT_OFST 16
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_STATS_MASK_OFST 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_NAME_OFST 24
#define MC_CMD_GET_PHY_CFG_OUT_NAME_LEN 20
+/* ?? */
#define MC_CMD_GET_PHY_CFG_OUT_MEDIA_TYPE_OFST 44
-#define MC_CMD_MEDIA_XAUI 0x1 /* enum */
-#define MC_CMD_MEDIA_CX4 0x2 /* enum */
-#define MC_CMD_MEDIA_KX4 0x3 /* enum */
-#define MC_CMD_MEDIA_XFP 0x4 /* enum */
-#define MC_CMD_MEDIA_SFP_PLUS 0x5 /* enum */
-#define MC_CMD_MEDIA_BASE_T 0x6 /* enum */
+/* enum: Xaui. */
+#define MC_CMD_MEDIA_XAUI 0x1
+/* enum: CX4. */
+#define MC_CMD_MEDIA_CX4 0x2
+/* enum: KX4. */
+#define MC_CMD_MEDIA_KX4 0x3
+/* enum: XFP Far. */
+#define MC_CMD_MEDIA_XFP 0x4
+/* enum: SFP+. */
+#define MC_CMD_MEDIA_SFP_PLUS 0x5
+/* enum: 10GBaseT. */
+#define MC_CMD_MEDIA_BASE_T 0x6
#define MC_CMD_GET_PHY_CFG_OUT_MMD_MASK_OFST 48
-#define MC_CMD_MMD_CLAUSE22 0x0 /* enum */
+/* enum: Native clause 22 */
+#define MC_CMD_MMD_CLAUSE22 0x0
#define MC_CMD_MMD_CLAUSE45_PMAPMD 0x1 /* enum */
#define MC_CMD_MMD_CLAUSE45_WIS 0x2 /* enum */
#define MC_CMD_MMD_CLAUSE45_PCS 0x3 /* enum */
@@ -1234,7 +1933,8 @@
#define MC_CMD_MMD_CLAUSE45_DTEXS 0x5 /* enum */
#define MC_CMD_MMD_CLAUSE45_TC 0x6 /* enum */
#define MC_CMD_MMD_CLAUSE45_AN 0x7 /* enum */
-#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d /* enum */
+/* enum: Clause22 proxied over clause45 by PHY. */
+#define MC_CMD_MMD_CLAUSE45_C22EXT 0x1d
#define MC_CMD_MMD_CLAUSE45_VEND1 0x1e /* enum */
#define MC_CMD_MMD_CLAUSE45_VEND2 0x1f /* enum */
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
@@ -1243,18 +1943,31 @@
/***********************************/
/* MC_CMD_START_BIST
- * Start a BIST test on the PHY.
+ * Start a BIST test on the PHY. Locks required: PHY_LOCK if doing a PHY BIST
+ * Return code: 0, EINVAL, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_START_BIST 0x25
/* MC_CMD_START_BIST_IN msgrequest */
#define MC_CMD_START_BIST_IN_LEN 4
+/* Type of test. */
#define MC_CMD_START_BIST_IN_TYPE_OFST 0
-#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1 /* enum */
-#define MC_CMD_PHY_BIST_CABLE_LONG 0x2 /* enum */
-#define MC_CMD_BPX_SERDES_BIST 0x3 /* enum */
-#define MC_CMD_MC_LOOPBACK_BIST 0x4 /* enum */
-#define MC_CMD_PHY_BIST 0x5 /* enum */
+/* enum: Run the PHY's short cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 0x1
+/* enum: Run the PHY's long cable BIST. */
+#define MC_CMD_PHY_BIST_CABLE_LONG 0x2
+/* enum: Run BIST on the currently selected BPX Serdes (XAUI or XFI) . */
+#define MC_CMD_BPX_SERDES_BIST 0x3
+/* enum: Run the MC loopback tests. */
+#define MC_CMD_MC_LOOPBACK_BIST 0x4
+/* enum: Run the PHY's standard BIST. */
+#define MC_CMD_PHY_BIST 0x5
+/* enum: Run MC RAM test. */
+#define MC_CMD_MC_MEM_BIST 0x6
+/* enum: Run Port RAM test. */
+#define MC_CMD_PORT_MEM_BIST 0x7
+/* enum: Run register test. */
+#define MC_CMD_REG_BIST 0x8
/* MC_CMD_START_BIST_OUT msgresponse */
#define MC_CMD_START_BIST_OUT_LEN 0
@@ -1262,7 +1975,12 @@
/***********************************/
/* MC_CMD_POLL_BIST
- * Poll for BIST completion.
+ * Poll for BIST completion. Returns a single status code, and optionally some
+ * PHY specific bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and MC_CMD_GET_PHY_CFG.TYPE. If a driver can't
+ * successfully parse the BIST output, it should still respect the pass/Fail in
+ * OUT.RESULT. Locks required: PHY_LOCK if doing a PHY BIST. Return code: 0,
+ * EACCES (if PHY_LOCK is not held).
*/
#define MC_CMD_POLL_BIST 0x26
@@ -1271,15 +1989,21 @@
/* MC_CMD_POLL_BIST_OUT msgresponse */
#define MC_CMD_POLL_BIST_OUT_LEN 8
+/* result */
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
-#define MC_CMD_POLL_BIST_RUNNING 0x1 /* enum */
-#define MC_CMD_POLL_BIST_PASSED 0x2 /* enum */
-#define MC_CMD_POLL_BIST_FAILED 0x3 /* enum */
-#define MC_CMD_POLL_BIST_TIMEOUT 0x4 /* enum */
+/* enum: Running. */
+#define MC_CMD_POLL_BIST_RUNNING 0x1
+/* enum: Passed. */
+#define MC_CMD_POLL_BIST_PASSED 0x2
+/* enum: Failed. */
+#define MC_CMD_POLL_BIST_FAILED 0x3
+/* enum: Timed-out. */
+#define MC_CMD_POLL_BIST_TIMEOUT 0x4
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
/* MC_CMD_POLL_BIST_OUT_SFT9001 msgresponse */
#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 36
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
@@ -1287,42 +2011,116 @@
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 8
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 12
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 16
+/* Status of each channel A */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 20
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4 /* enum */
-#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9 /* enum */
+/* enum: Ok. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 0x1
+/* enum: Open. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 0x2
+/* enum: Intra-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 0x3
+/* enum: Inter-pair short. */
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 0x4
+/* enum: Busy. */
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 0x9
+/* Status of each channel B */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 24
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel C */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 28
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
+/* Status of each channel D */
#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 32
/* Enum values, see field(s): */
/* CABLE_STATUS_A */
/* MC_CMD_POLL_BIST_OUT_MRSFP msgresponse */
#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
+/* result */
/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
/* Enum values, see field(s): */
/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
-#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7 /* enum */
-#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8 /* enum */
+/* enum: Complete. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0x0
+/* enum: Bus switch off I2C write. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 0x1
+/* enum: Bus switch off I2C no access IO exp. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 0x2
+/* enum: Bus switch off I2C no access module. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 0x3
+/* enum: IO exp I2C configure. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 0x4
+/* enum: Bus switch I2C no cross talk. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 0x5
+/* enum: Module presence. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 0x6
+/* enum: Module ID I2C access. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 0x7
+/* enum: Module ID sane value. */
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 0x8
+
+/* MC_CMD_POLL_BIST_OUT_MEM msgresponse */
+#define MC_CMD_POLL_BIST_OUT_MEM_LEN 36
+/* result */
+/* MC_CMD_POLL_BIST_OUT_RESULT_OFST 0 */
+/* Enum values, see field(s): */
+/* MC_CMD_POLL_BIST_OUT/MC_CMD_POLL_BIST_OUT_RESULT */
+#define MC_CMD_POLL_BIST_OUT_MEM_TEST_OFST 4
+/* enum: Test has completed. */
+#define MC_CMD_POLL_BIST_MEM_COMPLETE 0x0
+/* enum: RAM test - walk ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ONES 0x1
+/* enum: RAM test - walk zeros. */
+#define MC_CMD_POLL_BIST_MEM_MEM_WALK_ZEROS 0x2
+/* enum: RAM test - walking inversions zeros/ones. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_ZERO_ONE 0x3
+/* enum: RAM test - walking inversions checkerboard. */
+#define MC_CMD_POLL_BIST_MEM_MEM_INV_CHKBOARD 0x4
+/* enum: Register test - set / clear individual bits. */
+#define MC_CMD_POLL_BIST_MEM_REG 0x5
+/* enum: ECC error detected. */
+#define MC_CMD_POLL_BIST_MEM_ECC 0x6
+/* Failure address, only valid if result is POLL_BIST_FAILED */
+#define MC_CMD_POLL_BIST_OUT_MEM_ADDR_OFST 8
+/* Bus or address space to which the failure address corresponds */
+#define MC_CMD_POLL_BIST_OUT_MEM_BUS_OFST 12
+/* enum: MC MIPS bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_MC 0x0
+/* enum: CSR IREG bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_CSR 0x1
+/* enum: RX DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_RX 0x2
+/* enum: TX0 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX0 0x3
+/* enum: TX1 DPCPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DPCPU_TX1 0x4
+/* enum: RX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_RX 0x5
+/* enum: TX DICPU bus. */
+#define MC_CMD_POLL_BIST_MEM_BUS_DICPU_TX 0x6
+/* Pattern written to RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_EXPECT_OFST 16
+/* Actual value read from RAM / register */
+#define MC_CMD_POLL_BIST_OUT_MEM_ACTUAL_OFST 20
+/* ECC error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_OFST 24
+/* ECC parity error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_PARITY_OFST 28
+/* ECC fatal error mask */
+#define MC_CMD_POLL_BIST_OUT_MEM_ECC_FATAL_OFST 32
/***********************************/
/* MC_CMD_FLUSH_RX_QUEUES
- * Flush receive queue(s).
+ * Flush receive queue(s). If SRIOV is enabled (via MC_CMD_SRIOV), then RXQ
+ * flushes should be initiated via this MCDI operation, rather than via
+ * directly writing FLUSH_CMD.
+ *
+ * The flush is completed (either done/fail) asynchronously (after this command
+ * returns). The driver must still wait for flush done/failure events as usual.
*/
#define MC_CMD_FLUSH_RX_QUEUES 0x27
@@ -1341,7 +2139,7 @@
/***********************************/
/* MC_CMD_GET_LOOPBACK_MODES
- * Get port's loopback modes.
+ * Returns a bitmask of loopback modes available at each speed.
*/
#define MC_CMD_GET_LOOPBACK_MODES 0x28
@@ -1349,61 +2147,116 @@
#define MC_CMD_GET_LOOPBACK_MODES_IN_LEN 0
/* MC_CMD_GET_LOOPBACK_MODES_OUT msgresponse */
-#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_LEN 40
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_LO_OFST 0
#define MC_CMD_GET_LOOPBACK_MODES_OUT_100M_HI_OFST 4
-#define MC_CMD_LOOPBACK_NONE 0x0 /* enum */
-#define MC_CMD_LOOPBACK_DATA 0x1 /* enum */
-#define MC_CMD_LOOPBACK_GMAC 0x2 /* enum */
-#define MC_CMD_LOOPBACK_XGMII 0x3 /* enum */
-#define MC_CMD_LOOPBACK_XGXS 0x4 /* enum */
-#define MC_CMD_LOOPBACK_XAUI 0x5 /* enum */
-#define MC_CMD_LOOPBACK_GMII 0x6 /* enum */
-#define MC_CMD_LOOPBACK_SGMII 0x7 /* enum */
-#define MC_CMD_LOOPBACK_XGBR 0x8 /* enum */
-#define MC_CMD_LOOPBACK_XFI 0x9 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_FAR 0xa /* enum */
-#define MC_CMD_LOOPBACK_GMII_FAR 0xb /* enum */
-#define MC_CMD_LOOPBACK_SGMII_FAR 0xc /* enum */
-#define MC_CMD_LOOPBACK_XFI_FAR 0xd /* enum */
-#define MC_CMD_LOOPBACK_GPHY 0xe /* enum */
-#define MC_CMD_LOOPBACK_PHYXS 0xf /* enum */
-#define MC_CMD_LOOPBACK_PCS 0x10 /* enum */
-#define MC_CMD_LOOPBACK_PMAPMD 0x11 /* enum */
-#define MC_CMD_LOOPBACK_XPORT 0x12 /* enum */
-#define MC_CMD_LOOPBACK_XGMII_WS 0x13 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS 0x14 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15 /* enum */
-#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16 /* enum */
-#define MC_CMD_LOOPBACK_GMII_WS 0x17 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS 0x18 /* enum */
-#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19 /* enum */
-#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a /* enum */
+/* enum: None. */
+#define MC_CMD_LOOPBACK_NONE 0x0
+/* enum: Data. */
+#define MC_CMD_LOOPBACK_DATA 0x1
+/* enum: GMAC. */
+#define MC_CMD_LOOPBACK_GMAC 0x2
+/* enum: XGMII. */
+#define MC_CMD_LOOPBACK_XGMII 0x3
+/* enum: XGXS. */
+#define MC_CMD_LOOPBACK_XGXS 0x4
+/* enum: XAUI. */
+#define MC_CMD_LOOPBACK_XAUI 0x5
+/* enum: GMII. */
+#define MC_CMD_LOOPBACK_GMII 0x6
+/* enum: SGMII. */
+#define MC_CMD_LOOPBACK_SGMII 0x7
+/* enum: XGBR. */
+#define MC_CMD_LOOPBACK_XGBR 0x8
+/* enum: XFI. */
+#define MC_CMD_LOOPBACK_XFI 0x9
+/* enum: XAUI Far. */
+#define MC_CMD_LOOPBACK_XAUI_FAR 0xa
+/* enum: GMII Far. */
+#define MC_CMD_LOOPBACK_GMII_FAR 0xb
+/* enum: SGMII Far. */
+#define MC_CMD_LOOPBACK_SGMII_FAR 0xc
+/* enum: XFI Far. */
+#define MC_CMD_LOOPBACK_XFI_FAR 0xd
+/* enum: GPhy. */
+#define MC_CMD_LOOPBACK_GPHY 0xe
+/* enum: PhyXS. */
+#define MC_CMD_LOOPBACK_PHYXS 0xf
+/* enum: PCS. */
+#define MC_CMD_LOOPBACK_PCS 0x10
+/* enum: PMA-PMD. */
+#define MC_CMD_LOOPBACK_PMAPMD 0x11
+/* enum: Cross-Port. */
+#define MC_CMD_LOOPBACK_XPORT 0x12
+/* enum: XGMII-Wireside. */
+#define MC_CMD_LOOPBACK_XGMII_WS 0x13
+/* enum: XAUI Wireside. */
+#define MC_CMD_LOOPBACK_XAUI_WS 0x14
+/* enum: XAUI Wireside Far. */
+#define MC_CMD_LOOPBACK_XAUI_WS_FAR 0x15
+/* enum: XAUI Wireside near. */
+#define MC_CMD_LOOPBACK_XAUI_WS_NEAR 0x16
+/* enum: GMII Wireside. */
+#define MC_CMD_LOOPBACK_GMII_WS 0x17
+/* enum: XFI Wireside. */
+#define MC_CMD_LOOPBACK_XFI_WS 0x18
+/* enum: XFI Wireside Far. */
+#define MC_CMD_LOOPBACK_XFI_WS_FAR 0x19
+/* enum: PhyXS Wireside. */
+#define MC_CMD_LOOPBACK_PHYXS_WS 0x1a
+/* enum: PMA lanes MAC-Serdes. */
+#define MC_CMD_LOOPBACK_PMA_INT 0x1b
+/* enum: KR Serdes Parallel (Encoder). */
+#define MC_CMD_LOOPBACK_SD_NEAR 0x1c
+/* enum: KR Serdes Serial. */
+#define MC_CMD_LOOPBACK_SD_FAR 0x1d
+/* enum: PMA lanes MAC-Serdes Wireside. */
+#define MC_CMD_LOOPBACK_PMA_INT_WS 0x1e
+/* enum: KR Serdes Parallel Wireside (Full PCS). */
+#define MC_CMD_LOOPBACK_SD_FEP2_WS 0x1f
+/* enum: KR Serdes Parallel Wireside (Sym Aligner to TX). */
+#define MC_CMD_LOOPBACK_SD_FEP1_5_WS 0x20
+/* enum: KR Serdes Parallel Wireside (Deserializer to Serializer). */
+#define MC_CMD_LOOPBACK_SD_FEP_WS 0x21
+/* enum: KR Serdes Serial Wireside. */
+#define MC_CMD_LOOPBACK_SD_FES_WS 0x22
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_LO_OFST 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_1G_HI_OFST 12
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_LO_OFST 16
#define MC_CMD_GET_LOOPBACK_MODES_OUT_10G_HI_OFST 20
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN 8
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LO_OFST 24
#define MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_HI_OFST 28
/* Enum values, see field(s): */
/* 100M */
+/* Supported loopbacks. */
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN 8
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LO_OFST 32
+#define MC_CMD_GET_LOOPBACK_MODES_OUT_40G_HI_OFST 36
+/* Enum values, see field(s): */
+/* 100M */
/***********************************/
/* MC_CMD_GET_LINK
- * Read the unified MAC/PHY link state.
+ * Read the unified MAC/PHY link state. Locks required: None Return code: 0,
+ * ETIME.
*/
#define MC_CMD_GET_LINK 0x29
@@ -1412,9 +2265,15 @@
/* MC_CMD_GET_LINK_OUT msgresponse */
#define MC_CMD_GET_LINK_OUT_LEN 28
+/* near-side advertised capabilities */
#define MC_CMD_GET_LINK_OUT_CAP_OFST 0
+/* link-partner advertised capabilities */
#define MC_CMD_GET_LINK_OUT_LP_CAP_OFST 4
+/* Autonegotiated speed in mbit/s. The link may still be down even if this
+ * reads non-zero.
+ */
#define MC_CMD_GET_LINK_OUT_LINK_SPEED_OFST 8
+/* Current loopback setting. */
#define MC_CMD_GET_LINK_OUT_LOOPBACK_MODE_OFST 12
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
@@ -1427,10 +2286,14 @@
#define MC_CMD_GET_LINK_OUT_BPX_LINK_WIDTH 1
#define MC_CMD_GET_LINK_OUT_PHY_LINK_LBN 3
#define MC_CMD_GET_LINK_OUT_PHY_LINK_WIDTH 1
+/* This returns the negotiated flow control value. */
#define MC_CMD_GET_LINK_OUT_FCNTL_OFST 20
-#define MC_CMD_FCNTL_OFF 0x0 /* enum */
-#define MC_CMD_FCNTL_RESPOND 0x1 /* enum */
-#define MC_CMD_FCNTL_BIDIR 0x2 /* enum */
+/* enum: Flow control is off. */
+#define MC_CMD_FCNTL_OFF 0x0
+/* enum: Respond to flow control. */
+#define MC_CMD_FCNTL_RESPOND 0x1
+/* enum: Respond to and Issue flow control. */
+#define MC_CMD_FCNTL_BIDIR 0x2
#define MC_CMD_GET_LINK_OUT_MAC_FAULT_OFST 24
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_LBN 0
#define MC_CMD_MAC_FAULT_XGMII_LOCAL_WIDTH 1
@@ -1444,13 +2307,16 @@
/***********************************/
/* MC_CMD_SET_LINK
- * Write the unified MAC/PHY link configuration.
+ * Write the unified MAC/PHY link configuration. Locks required: None. Return
+ * code: 0, EINVAL, ETIME
*/
#define MC_CMD_SET_LINK 0x2a
/* MC_CMD_SET_LINK_IN msgrequest */
#define MC_CMD_SET_LINK_IN_LEN 16
+/* ??? */
#define MC_CMD_SET_LINK_IN_CAP_OFST 0
+/* Flags */
#define MC_CMD_SET_LINK_IN_FLAGS_OFST 4
#define MC_CMD_SET_LINK_IN_LOWPOWER_LBN 0
#define MC_CMD_SET_LINK_IN_LOWPOWER_WIDTH 1
@@ -1458,9 +2324,13 @@
#define MC_CMD_SET_LINK_IN_POWEROFF_WIDTH 1
#define MC_CMD_SET_LINK_IN_TXDIS_LBN 2
#define MC_CMD_SET_LINK_IN_TXDIS_WIDTH 1
+/* Loopback mode. */
#define MC_CMD_SET_LINK_IN_LOOPBACK_MODE_OFST 8
/* Enum values, see field(s): */
/* MC_CMD_GET_LOOPBACK_MODES/MC_CMD_GET_LOOPBACK_MODES_OUT/100M */
+/* A loopback speed of "0" is supported, and means (choose any available
+ * speed).
+ */
#define MC_CMD_SET_LINK_IN_LOOPBACK_SPEED_OFST 12
/* MC_CMD_SET_LINK_OUT msgresponse */
@@ -1469,12 +2339,13 @@
/***********************************/
/* MC_CMD_SET_ID_LED
- * Set indentification LED state.
+ * Set identification LED state. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_ID_LED 0x2b
/* MC_CMD_SET_ID_LED_IN msgrequest */
#define MC_CMD_SET_ID_LED_IN_LEN 4
+/* Set LED state. */
#define MC_CMD_SET_ID_LED_IN_STATE_OFST 0
#define MC_CMD_LED_OFF 0x0 /* enum */
#define MC_CMD_LED_ON 0x1 /* enum */
@@ -1486,12 +2357,15 @@
/***********************************/
/* MC_CMD_SET_MAC
- * Set MAC configuration.
+ * Set MAC configuration. Locks required: None. Return code: 0, EINVAL
*/
#define MC_CMD_SET_MAC 0x2c
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 24
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
#define MC_CMD_SET_MAC_IN_MTU_OFST 0
#define MC_CMD_SET_MAC_IN_DRAIN_OFST 4
#define MC_CMD_SET_MAC_IN_ADDR_OFST 8
@@ -1504,10 +2378,14 @@
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_LBN 1
#define MC_CMD_SET_MAC_IN_REJECT_BRDCST_WIDTH 1
#define MC_CMD_SET_MAC_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
/* MC_CMD_FCNTL_BIDIR 0x2 */
-#define MC_CMD_FCNTL_AUTO 0x3 /* enum */
+/* enum: Auto neg flow control. */
+#define MC_CMD_FCNTL_AUTO 0x3
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
@@ -1515,12 +2393,18 @@
/***********************************/
/* MC_CMD_PHY_STATS
- * Get generic PHY statistics.
+ * Get generic PHY statistics. This call returns the statistics for a generic
+ * PHY in a sparse array (indexed by the enumerate). Each value is represented
+ * by a 32bit number. If the DMA_ADDR is 0, then no DMA is performed, and the
+ * statistics may be read from the message response. If DMA_ADDR != 0, then the
+ * statistics are dmad to that (page-aligned location). Locks required: None.
+ * Returns: 0, ETIME
*/
#define MC_CMD_PHY_STATS 0x2d
/* MC_CMD_PHY_STATS_IN msgrequest */
#define MC_CMD_PHY_STATS_IN_LEN 8
+/* ??? */
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_PHY_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1534,40 +2418,71 @@
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_OFST 0
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_LEN 4
#define MC_CMD_PHY_STATS_OUT_NO_DMA_STATISTICS_NUM MC_CMD_PHY_NSTATS
-#define MC_CMD_OUI 0x0 /* enum */
-#define MC_CMD_PMA_PMD_LINK_UP 0x1 /* enum */
-#define MC_CMD_PMA_PMD_RX_FAULT 0x2 /* enum */
-#define MC_CMD_PMA_PMD_TX_FAULT 0x3 /* enum */
-#define MC_CMD_PMA_PMD_SIGNAL 0x4 /* enum */
-#define MC_CMD_PMA_PMD_SNR_A 0x5 /* enum */
-#define MC_CMD_PMA_PMD_SNR_B 0x6 /* enum */
-#define MC_CMD_PMA_PMD_SNR_C 0x7 /* enum */
-#define MC_CMD_PMA_PMD_SNR_D 0x8 /* enum */
-#define MC_CMD_PCS_LINK_UP 0x9 /* enum */
-#define MC_CMD_PCS_RX_FAULT 0xa /* enum */
-#define MC_CMD_PCS_TX_FAULT 0xb /* enum */
-#define MC_CMD_PCS_BER 0xc /* enum */
-#define MC_CMD_PCS_BLOCK_ERRORS 0xd /* enum */
-#define MC_CMD_PHYXS_LINK_UP 0xe /* enum */
-#define MC_CMD_PHYXS_RX_FAULT 0xf /* enum */
-#define MC_CMD_PHYXS_TX_FAULT 0x10 /* enum */
-#define MC_CMD_PHYXS_ALIGN 0x11 /* enum */
-#define MC_CMD_PHYXS_SYNC 0x12 /* enum */
-#define MC_CMD_AN_LINK_UP 0x13 /* enum */
-#define MC_CMD_AN_COMPLETE 0x14 /* enum */
-#define MC_CMD_AN_10GBT_STATUS 0x15 /* enum */
-#define MC_CMD_CL22_LINK_UP 0x16 /* enum */
-#define MC_CMD_PHY_NSTATS 0x17 /* enum */
+/* enum: OUI. */
+#define MC_CMD_OUI 0x0
+/* enum: PMA-PMD Link Up. */
+#define MC_CMD_PMA_PMD_LINK_UP 0x1
+/* enum: PMA-PMD RX Fault. */
+#define MC_CMD_PMA_PMD_RX_FAULT 0x2
+/* enum: PMA-PMD TX Fault. */
+#define MC_CMD_PMA_PMD_TX_FAULT 0x3
+/* enum: PMA-PMD Signal */
+#define MC_CMD_PMA_PMD_SIGNAL 0x4
+/* enum: PMA-PMD SNR A. */
+#define MC_CMD_PMA_PMD_SNR_A 0x5
+/* enum: PMA-PMD SNR B. */
+#define MC_CMD_PMA_PMD_SNR_B 0x6
+/* enum: PMA-PMD SNR C. */
+#define MC_CMD_PMA_PMD_SNR_C 0x7
+/* enum: PMA-PMD SNR D. */
+#define MC_CMD_PMA_PMD_SNR_D 0x8
+/* enum: PCS Link Up. */
+#define MC_CMD_PCS_LINK_UP 0x9
+/* enum: PCS RX Fault. */
+#define MC_CMD_PCS_RX_FAULT 0xa
+/* enum: PCS TX Fault. */
+#define MC_CMD_PCS_TX_FAULT 0xb
+/* enum: PCS BER. */
+#define MC_CMD_PCS_BER 0xc
+/* enum: PCS Block Errors. */
+#define MC_CMD_PCS_BLOCK_ERRORS 0xd
+/* enum: PhyXS Link Up. */
+#define MC_CMD_PHYXS_LINK_UP 0xe
+/* enum: PhyXS RX Fault. */
+#define MC_CMD_PHYXS_RX_FAULT 0xf
+/* enum: PhyXS TX Fault. */
+#define MC_CMD_PHYXS_TX_FAULT 0x10
+/* enum: PhyXS Align. */
+#define MC_CMD_PHYXS_ALIGN 0x11
+/* enum: PhyXS Sync. */
+#define MC_CMD_PHYXS_SYNC 0x12
+/* enum: AN link-up. */
+#define MC_CMD_AN_LINK_UP 0x13
+/* enum: AN Complete. */
+#define MC_CMD_AN_COMPLETE 0x14
+/* enum: AN 10GBaseT Status. */
+#define MC_CMD_AN_10GBT_STATUS 0x15
+/* enum: Clause 22 Link-Up. */
+#define MC_CMD_CL22_LINK_UP 0x16
+/* enum: (Last entry) */
+#define MC_CMD_PHY_NSTATS 0x17
/***********************************/
/* MC_CMD_MAC_STATS
- * Get generic MAC statistics.
+ * Get generic MAC statistics. This call returns unified statistics maintained
+ * by the MC as it switches between the GMAC and XMAC. The MC will write out
+ * all supported stats. The driver should zero initialise the buffer to
+ * guarantee consistent results. If the DMA_ADDR is 0, then no DMA is
+ * performed, and the statistics may be read from the message response. If
+ * DMA_ADDR != 0, then the statistics are dmad to that (page-aligned location).
+ * Locks required: None. Returns: 0, ETIME
*/
#define MC_CMD_MAC_STATS 0x2e
/* MC_CMD_MAC_STATS_IN msgrequest */
#define MC_CMD_MAC_STATS_IN_LEN 16
+/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LO_OFST 0
@@ -1684,6 +2599,7 @@
/* MC_CMD_MEMCPY_RECORD_TYPEDEF structuredef */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_LEN 32
+/* this is only used for the first record */
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_OFST 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_LBN 0
#define MC_CMD_MEMCPY_RECORD_TYPEDEF_NUM_RECORDS_WIDTH 32
@@ -1713,7 +2629,23 @@
/***********************************/
/* MC_CMD_MEMCPY
- * Perform memory copy operation.
+ * DMA write data into (Rid,Addr), either by dma reading (Rid,Addr), or by data
+ * embedded directly in the command.
+ *
+ * A common pattern is for a client to use generation counts to signal a dma
+ * update of a datastructure. To facilitate this, this MCDI operation can
+ * contain multiple requests which are executed in strict order. Requests take
+ * the form of duplicating the entire MCDI request continuously (including the
+ * requests record, which is ignored in all but the first structure)
+ *
+ * The source data can either come from a DMA from the host, or it can be
+ * embedded within the request directly, thereby eliminating a DMA read. To
+ * indicate this, the client sets FROM_RID=%RID_INLINE, ADDR_HI=0, and
+ * ADDR_LO=offset, and inserts the data at %offset from the start of the
+ * payload. It's the callers responsibility to ensure that the embedded data
+ * doesn't overlap the records.
+ *
+ * Returns: 0, EINVAL (invalid RID)
*/
#define MC_CMD_MEMCPY 0x31
@@ -1721,6 +2653,7 @@
#define MC_CMD_MEMCPY_IN_LENMIN 32
#define MC_CMD_MEMCPY_IN_LENMAX 224
#define MC_CMD_MEMCPY_IN_LEN(num) (0+32*(num))
+/* see MC_CMD_MEMCPY_RECORD_TYPEDEF */
#define MC_CMD_MEMCPY_IN_RECORD_OFST 0
#define MC_CMD_MEMCPY_IN_RECORD_LEN 32
#define MC_CMD_MEMCPY_IN_RECORD_MINNUM 1
@@ -1741,14 +2674,22 @@
#define MC_CMD_WOL_FILTER_SET_IN_FILTER_MODE_OFST 0
#define MC_CMD_FILTER_MODE_SIMPLE 0x0 /* enum */
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff /* enum */
+/* A type value of 1 is unused. */
#define MC_CMD_WOL_FILTER_SET_IN_WOL_TYPE_OFST 4
-#define MC_CMD_WOL_TYPE_MAGIC 0x0 /* enum */
-#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2 /* enum */
-#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3 /* enum */
-#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4 /* enum */
-#define MC_CMD_WOL_TYPE_BITMAP 0x5 /* enum */
-#define MC_CMD_WOL_TYPE_LINK 0x6 /* enum */
-#define MC_CMD_WOL_TYPE_MAX 0x7 /* enum */
+/* enum: Magic */
+#define MC_CMD_WOL_TYPE_MAGIC 0x0
+/* enum: MS Windows Magic */
+#define MC_CMD_WOL_TYPE_WIN_MAGIC 0x2
+/* enum: IPv4 Syn */
+#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
+/* enum: IPv6 Syn */
+#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
+/* enum: Bitmap */
+#define MC_CMD_WOL_TYPE_BITMAP 0x5
+/* enum: Link */
+#define MC_CMD_WOL_TYPE_LINK 0x6
+/* enum: (Above this for future use) */
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_WOL_FILTER_SET_IN_DATA_OFST 8
#define MC_CMD_WOL_FILTER_SET_IN_DATA_LEN 4
#define MC_CMD_WOL_FILTER_SET_IN_DATA_NUM 46
@@ -1818,7 +2759,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_REMOVE
- * Remove a WoL filter.
+ * Remove a WoL filter. Locks required: None. Returns: 0, EINVAL, ENOSYS
*/
#define MC_CMD_WOL_FILTER_REMOVE 0x33
@@ -1832,7 +2773,8 @@
/***********************************/
/* MC_CMD_WOL_FILTER_RESET
- * Reset (i.e. remove all) WoL filters.
+ * Reset (i.e. remove all) WoL filters. Locks required: None. Returns: 0,
+ * ENOSYS
*/
#define MC_CMD_WOL_FILTER_RESET 0x34
@@ -1848,7 +2790,7 @@
/***********************************/
/* MC_CMD_SET_MCAST_HASH
- * Set the MCASH hash value.
+ * Set the MCAST hash value without otherwise reconfiguring the MAC
*/
#define MC_CMD_SET_MCAST_HASH 0x35
@@ -1865,7 +2807,8 @@
/***********************************/
/* MC_CMD_NVRAM_TYPES
- * Get virtual NVRAM partitions information.
+ * Return bitfield indicating available types of virtual NVRAM partitions.
+ * Locks required: none. Returns: 0
*/
#define MC_CMD_NVRAM_TYPES 0x36
@@ -1874,26 +2817,54 @@
/* MC_CMD_NVRAM_TYPES_OUT msgresponse */
#define MC_CMD_NVRAM_TYPES_OUT_LEN 4
+/* Bit mask of supported types. */
#define MC_CMD_NVRAM_TYPES_OUT_TYPES_OFST 0
-#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW 0x1 /* enum */
-#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3 /* enum */
-#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5 /* enum */
-#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8 /* enum */
-#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9 /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa /* enum */
-#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb /* enum */
-#define MC_CMD_NVRAM_TYPE_LOG 0xc /* enum */
-#define MC_CMD_NVRAM_TYPE_FPGA 0xd /* enum */
+/* enum: Disabled callisto. */
+#define MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO 0x0
+/* enum: MC firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW 0x1
+/* enum: MC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_MC_FW_BACKUP 0x2
+/* enum: Static configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0 0x3
+/* enum: Static configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1 0x4
+/* enum: Dynamic configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0 0x5
+/* enum: Dynamic configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1 0x6
+/* enum: Expansion Rom. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM 0x7
+/* enum: Expansion Rom Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0 0x8
+/* enum: Expansion Rom Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1 0x9
+/* enum: Phy Configuration Port0. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT0 0xa
+/* enum: Phy Configuration Port1. */
+#define MC_CMD_NVRAM_TYPE_PHY_PORT1 0xb
+/* enum: Log. */
+#define MC_CMD_NVRAM_TYPE_LOG 0xc
+/* enum: FPGA image. */
+#define MC_CMD_NVRAM_TYPE_FPGA 0xd
+/* enum: FPGA backup image */
+#define MC_CMD_NVRAM_TYPE_FPGA_BACKUP 0xe
+/* enum: FC firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW 0xf
+/* enum: FC backup firmware. */
+#define MC_CMD_NVRAM_TYPE_FC_FW_BACKUP 0x10
+/* enum: CPLD image. */
+#define MC_CMD_NVRAM_TYPE_CPLD 0x11
+/* enum: Licensing information. */
+#define MC_CMD_NVRAM_TYPE_LICENSE 0x12
+/* enum: FC Log. */
+#define MC_CMD_NVRAM_TYPE_FC_LOG 0x13
/***********************************/
/* MC_CMD_NVRAM_INFO
- * Read info about a virtual NVRAM partition.
+ * Read info about a virtual NVRAM partition. Locks required: none. Returns: 0,
+ * EINVAL (bad type).
*/
#define MC_CMD_NVRAM_INFO 0x37
@@ -1913,13 +2884,19 @@
#define MC_CMD_NVRAM_INFO_OUT_FLAGS_OFST 12
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_LBN 0
#define MC_CMD_NVRAM_INFO_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_OUT_A_B_WIDTH 1
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
- * Start a group of update operations on a virtual NVRAM partition.
+ * Start a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad type), EACCES (if
+ * PHY_LOCK required and not held).
*/
#define MC_CMD_NVRAM_UPDATE_START 0x38
@@ -1935,7 +2912,9 @@
/***********************************/
/* MC_CMD_NVRAM_READ
- * Read data from a virtual NVRAM partition.
+ * Read data from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_READ 0x39
@@ -1945,6 +2924,7 @@
/* Enum values, see field(s): */
/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
#define MC_CMD_NVRAM_READ_IN_OFFSET_OFST 4
+/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
/* MC_CMD_NVRAM_READ_OUT msgresponse */
@@ -1959,7 +2939,9 @@
/***********************************/
/* MC_CMD_NVRAM_WRITE
- * Write data to a virtual NVRAM partition.
+ * Write data to a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_WRITE 0x3a
@@ -1983,7 +2965,9 @@
/***********************************/
/* MC_CMD_NVRAM_ERASE
- * Erase sector(s) from a virtual NVRAM partition.
+ * Erase sector(s) from a virtual NVRAM partition. Locks required: PHY_LOCK if
+ * type==*PHY*. Returns: 0, EINVAL (bad type/offset/length), EACCES (if
+ * PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_ERASE 0x3b
@@ -2001,7 +2985,9 @@
/***********************************/
/* MC_CMD_NVRAM_UPDATE_FINISH
- * Finish a group of update operations on a virtual NVRAM partition.
+ * Finish a group of update operations on a virtual NVRAM partition. Locks
+ * required: PHY_LOCK if type==*PHY*. Returns: 0, EINVAL (bad
+ * type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
@@ -2019,6 +3005,20 @@
/***********************************/
/* MC_CMD_REBOOT
* Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices an
+ * assertion failure (at which point it is expected to perform a complete tear
+ * down and reinitialise), to allow both ports to reset the MC once in an
+ * atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares with
+ * REBOOT_ON_ASSERT=0.
+ *
+ * Locks required: NONE Returns: Nothing. You get back a response with ERR=1,
+ * DATALEN=0
*/
#define MC_CMD_REBOOT 0x3d
@@ -2033,7 +3033,9 @@
/***********************************/
/* MC_CMD_SCHEDINFO
- * Request scheduler info.
+ * Request scheduler info. Locks required: NONE. Returns: An array of
+ * (timeslice,maximum overrun), one for each thread, in ascending order of
+ * thread address.
*/
#define MC_CMD_SCHEDINFO 0x3e
@@ -2052,14 +3054,24 @@
/***********************************/
/* MC_CMD_REBOOT_MODE
+ * Set the mode for the next MC reboot. Locks required: NONE. Sets the reboot
+ * mode to the specified value. Returns the old mode.
*/
#define MC_CMD_REBOOT_MODE 0x3f
/* MC_CMD_REBOOT_MODE_IN msgrequest */
#define MC_CMD_REBOOT_MODE_IN_LEN 4
#define MC_CMD_REBOOT_MODE_IN_VALUE_OFST 0
-#define MC_CMD_REBOOT_MODE_NORMAL 0x0 /* enum */
-#define MC_CMD_REBOOT_MODE_SNAPPER 0x3 /* enum */
+/* enum: Normal. */
+#define MC_CMD_REBOOT_MODE_NORMAL 0x0
+/* enum: Power-on Reset. */
+#define MC_CMD_REBOOT_MODE_POR 0x2
+/* enum: Snapper. */
+#define MC_CMD_REBOOT_MODE_SNAPPER 0x3
+/* enum: snapper fake POR */
+#define MC_CMD_REBOOT_MODE_SNAPPER_POR 0x4
+#define MC_CMD_REBOOT_MODE_IN_FAKE_LBN 7
+#define MC_CMD_REBOOT_MODE_IN_FAKE_WIDTH 1
/* MC_CMD_REBOOT_MODE_OUT msgresponse */
#define MC_CMD_REBOOT_MODE_OUT_LEN 4
@@ -2069,32 +3081,145 @@
/***********************************/
/* MC_CMD_SENSOR_INFO
* Returns information about every available sensor.
+ *
+ * Each sensor has a single (16bit) value, and a corresponding state. The
+ * mapping between value and state is nominally determined by the MC, but may
+ * be implemented using up to 2 ranges per sensor.
+ *
+ * This call returns a mask (32bit) of the sensors that are supported by this
+ * platform, then an array of sensor information structures, in order of sensor
+ * type (but without gaps for unimplemented sensors). Each structure defines
+ * the ranges for the corresponding sensor. An unused range is indicated by
+ * equal limit values. If one range is used, a value outside that range results
+ * in STATE_FATAL. If two ranges are used, a value outside the second range
+ * results in STATE_FATAL while a value outside the first and inside the second
+ * range results in STATE_WARNING.
+ *
+ * Sensor masks and sensor information arrays are organised into pages. For
+ * backward compatibility, older host software can only use sensors in page 0.
+ * Bit 32 in the sensor mask was previously unused, and is no reserved for use
+ * as the next page flag.
+ *
+ * If the request does not contain a PAGE value then firmware will only return
+ * page 0 of sensor information, with bit 31 in the sensor mask cleared.
+ *
+ * If the request contains a PAGE value then firmware responds with the sensor
+ * mask and sensor information array for that page of sensors. In this case bit
+ * 31 in the mask is set if another page exists.
+ *
+ * Locks required: None Returns: 0
*/
#define MC_CMD_SENSOR_INFO 0x41
/* MC_CMD_SENSOR_INFO_IN msgrequest */
#define MC_CMD_SENSOR_INFO_IN_LEN 0
+/* MC_CMD_SENSOR_INFO_EXT_IN msgrequest */
+#define MC_CMD_SENSOR_INFO_EXT_IN_LEN 4
+/* Which page of sensors to report.
+ *
+ * Page 0 contains sensors 0 to 30 (sensor 31 is the next page bit).
+ *
+ * Page 1 contains sensors 32 to 62 (sensor 63 is the next page bit). etc.
+ */
+#define MC_CMD_SENSOR_INFO_EXT_IN_PAGE_OFST 0
+
/* MC_CMD_SENSOR_INFO_OUT msgresponse */
#define MC_CMD_SENSOR_INFO_OUT_LENMIN 12
#define MC_CMD_SENSOR_INFO_OUT_LENMAX 252
#define MC_CMD_SENSOR_INFO_OUT_LEN(num) (4+8*(num))
#define MC_CMD_SENSOR_INFO_OUT_MASK_OFST 0
-#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0 /* enum */
-#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1 /* enum */
-#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2 /* enum */
-#define MC_CMD_SENSOR_PHY0_TEMP 0x3 /* enum */
-#define MC_CMD_SENSOR_PHY0_COOLING 0x4 /* enum */
-#define MC_CMD_SENSOR_PHY1_TEMP 0x5 /* enum */
-#define MC_CMD_SENSOR_PHY1_COOLING 0x6 /* enum */
-#define MC_CMD_SENSOR_IN_1V0 0x7 /* enum */
-#define MC_CMD_SENSOR_IN_1V2 0x8 /* enum */
-#define MC_CMD_SENSOR_IN_1V8 0x9 /* enum */
-#define MC_CMD_SENSOR_IN_2V5 0xa /* enum */
-#define MC_CMD_SENSOR_IN_3V3 0xb /* enum */
-#define MC_CMD_SENSOR_IN_12V0 0xc /* enum */
-#define MC_CMD_SENSOR_IN_1V2A 0xd /* enum */
-#define MC_CMD_SENSOR_IN_VREF 0xe /* enum */
+/* enum: Controller temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_TEMP 0x0
+/* enum: Phy common temperature: degC */
+#define MC_CMD_SENSOR_PHY_COMMON_TEMP 0x1
+/* enum: Controller cooling: bool */
+#define MC_CMD_SENSOR_CONTROLLER_COOLING 0x2
+/* enum: Phy 0 temperature: degC */
+#define MC_CMD_SENSOR_PHY0_TEMP 0x3
+/* enum: Phy 0 cooling: bool */
+#define MC_CMD_SENSOR_PHY0_COOLING 0x4
+/* enum: Phy 1 temperature: degC */
+#define MC_CMD_SENSOR_PHY1_TEMP 0x5
+/* enum: Phy 1 cooling: bool */
+#define MC_CMD_SENSOR_PHY1_COOLING 0x6
+/* enum: 1.0v power: mV */
+#define MC_CMD_SENSOR_IN_1V0 0x7
+/* enum: 1.2v power: mV */
+#define MC_CMD_SENSOR_IN_1V2 0x8
+/* enum: 1.8v power: mV */
+#define MC_CMD_SENSOR_IN_1V8 0x9
+/* enum: 2.5v power: mV */
+#define MC_CMD_SENSOR_IN_2V5 0xa
+/* enum: 3.3v power: mV */
+#define MC_CMD_SENSOR_IN_3V3 0xb
+/* enum: 12v power: mV */
+#define MC_CMD_SENSOR_IN_12V0 0xc
+/* enum: 1.2v analogue power: mV */
+#define MC_CMD_SENSOR_IN_1V2A 0xd
+/* enum: reference voltage: mV */
+#define MC_CMD_SENSOR_IN_VREF 0xe
+/* enum: AOE FPGA power: mV */
+#define MC_CMD_SENSOR_OUT_VAOE 0xf
+/* enum: AOE FPGA temperature: degC */
+#define MC_CMD_SENSOR_AOE_TEMP 0x10
+/* enum: AOE FPGA PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_AOE_TEMP 0x11
+/* enum: AOE PSU temperature: degC */
+#define MC_CMD_SENSOR_PSU_TEMP 0x12
+/* enum: Fan 0 speed: RPM */
+#define MC_CMD_SENSOR_FAN_0 0x13
+/* enum: Fan 1 speed: RPM */
+#define MC_CMD_SENSOR_FAN_1 0x14
+/* enum: Fan 2 speed: RPM */
+#define MC_CMD_SENSOR_FAN_2 0x15
+/* enum: Fan 3 speed: RPM */
+#define MC_CMD_SENSOR_FAN_3 0x16
+/* enum: Fan 4 speed: RPM */
+#define MC_CMD_SENSOR_FAN_4 0x17
+/* enum: AOE FPGA input power: mV */
+#define MC_CMD_SENSOR_IN_VAOE 0x18
+/* enum: AOE FPGA current: mA */
+#define MC_CMD_SENSOR_OUT_IAOE 0x19
+/* enum: AOE FPGA input current: mA */
+#define MC_CMD_SENSOR_IN_IAOE 0x1a
+/* enum: NIC power consumption: W */
+#define MC_CMD_SENSOR_NIC_POWER 0x1b
+/* enum: 0.9v power voltage: mV */
+#define MC_CMD_SENSOR_IN_0V9 0x1c
+/* enum: 0.9v power current: mA */
+#define MC_CMD_SENSOR_IN_I0V9 0x1d
+/* enum: 1.2v power current: mA */
+#define MC_CMD_SENSOR_IN_I1V2 0x1e
+/* enum: Not a sensor: reserved for the next page flag */
+#define MC_CMD_SENSOR_PAGE0_NEXT 0x1f
+/* enum: 0.9v power voltage (at ADC): mV */
+#define MC_CMD_SENSOR_IN_0V9_ADC 0x20
+/* enum: Controller temperature 2: degC */
+#define MC_CMD_SENSOR_CONTROLLER_2_TEMP 0x21
+/* enum: Voltage regulator internal temperature: degC */
+#define MC_CMD_SENSOR_VREG_INTERNAL_TEMP 0x22
+/* enum: 0.9V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_0V9_TEMP 0x23
+/* enum: 1.2V voltage regulator temperature: degC */
+#define MC_CMD_SENSOR_VREG_1V2_TEMP 0x24
+/* enum: controller internal temperature sensor voltage (internal ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT 0x25
+/* enum: controller internal temperature (internal ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP 0x26
+/* enum: controller internal temperature sensor voltage (external ADC): mV */
+#define MC_CMD_SENSOR_CONTROLLER_VPTAT_EXTADC 0x27
+/* enum: controller internal temperature (external ADC): degC */
+#define MC_CMD_SENSOR_CONTROLLER_INTERNAL_TEMP_EXTADC 0x28
+/* enum: ambient temperature: degC */
+#define MC_CMD_SENSOR_AMBIENT_TEMP 0x29
+/* enum: air flow: bool */
+#define MC_CMD_SENSOR_AIRFLOW 0x2a
+/* enum: voltage between VSS08D and VSS08D at CSR: mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR 0x2b
+/* enum: voltage between VSS08D and VSS08D at CSR (external ADC): mV */
+#define MC_CMD_SENSOR_VDD08D_VSS08D_CSR_EXTADC 0x2c
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
#define MC_CMD_SENSOR_ENTRY_LO_OFST 4
@@ -2102,6 +3227,23 @@
#define MC_CMD_SENSOR_ENTRY_MINNUM 1
#define MC_CMD_SENSOR_ENTRY_MAXNUM 31
+/* MC_CMD_SENSOR_INFO_EXT_OUT msgresponse */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMIN 12
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LENMAX 252
+#define MC_CMD_SENSOR_INFO_EXT_OUT_LEN(num) (4+8*(num))
+#define MC_CMD_SENSOR_INFO_EXT_OUT_MASK_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO_OUT */
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_LBN 31
+#define MC_CMD_SENSOR_INFO_EXT_OUT_NEXT_PAGE_WIDTH 1
+/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
+/* MC_CMD_SENSOR_ENTRY_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_LEN 8 */
+/* MC_CMD_SENSOR_ENTRY_LO_OFST 4 */
+/* MC_CMD_SENSOR_ENTRY_HI_OFST 8 */
+/* MC_CMD_SENSOR_ENTRY_MINNUM 1 */
+/* MC_CMD_SENSOR_ENTRY_MAXNUM 31 */
+
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF structuredef */
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_LEN 8
#define MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF_MIN1_OFST 0
@@ -2124,39 +3266,80 @@
/***********************************/
/* MC_CMD_READ_SENSORS
- * Returns the current reading from each sensor.
+ * Returns the current reading from each sensor. DMAs an array of sensor
+ * readings, in order of sensor type (but without gaps for unimplemented
+ * sensors), into host memory. Each array element is a
+ * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF dword.
+ *
+ * If the request does not contain the LENGTH field then only sensors 0 to 30
+ * are reported, to avoid DMA buffer overflow in older host software. If the
+ * sensor reading require more space than the LENGTH allows, then return
+ * EINVAL.
+ *
+ * The MC will send a SENSOREVT event every time any sensor changes state. The
+ * driver is responsible for ensuring that it doesn't miss any events. The
+ * board will function normally if all sensors are in STATE_OK or
+ * STATE_WARNING. Otherwise the board should not be expected to function.
*/
#define MC_CMD_READ_SENSORS 0x42
/* MC_CMD_READ_SENSORS_IN msgrequest */
#define MC_CMD_READ_SENSORS_IN_LEN 8
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_LO_OFST 0
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
+/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
+#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
+/* DMA address of host buffer for sensor readings */
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
+#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_HI_OFST 4
+/* Size in bytes of host buffer. */
+#define MC_CMD_READ_SENSORS_EXT_IN_LENGTH_OFST 8
+
/* MC_CMD_READ_SENSORS_OUT msgresponse */
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* MC_CMD_READ_SENSORS_EXT_OUT msgresponse */
+#define MC_CMD_READ_SENSORS_EXT_OUT_LEN 0
+
/* MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF structuredef */
-#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN 4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_OFST 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LEN 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_LBN 0
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE_WIDTH 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_OFST 2
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LEN 1
-#define MC_CMD_SENSOR_STATE_OK 0x0 /* enum */
-#define MC_CMD_SENSOR_STATE_WARNING 0x1 /* enum */
-#define MC_CMD_SENSOR_STATE_FATAL 0x2 /* enum */
-#define MC_CMD_SENSOR_STATE_BROKEN 0x3 /* enum */
+/* enum: Ok. */
+#define MC_CMD_SENSOR_STATE_OK 0x0
+/* enum: Breached warning threshold. */
+#define MC_CMD_SENSOR_STATE_WARNING 0x1
+/* enum: Breached fatal threshold. */
+#define MC_CMD_SENSOR_STATE_FATAL 0x2
+/* enum: Fault with sensor. */
+#define MC_CMD_SENSOR_STATE_BROKEN 0x3
+/* enum: Sensor is working but does not currently have a reading. */
+#define MC_CMD_SENSOR_STATE_NO_READING 0x4
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_LBN 16
#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE_WIDTH 8
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_OFST 3
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LEN 1
+/* Enum values, see field(s): */
+/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_LBN 24
+#define MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_TYPE_WIDTH 8
/***********************************/
/* MC_CMD_GET_PHY_STATE
- * Report current state of PHY.
+ * Report current state of PHY. A 'zombie' PHY is a PHY that has failed to boot
+ * (e.g. due to missing or corrupted firmware). Locks required: None. Return
+ * code: 0
*/
#define MC_CMD_GET_PHY_STATE 0x43
@@ -2166,13 +3349,16 @@
/* MC_CMD_GET_PHY_STATE_OUT msgresponse */
#define MC_CMD_GET_PHY_STATE_OUT_LEN 4
#define MC_CMD_GET_PHY_STATE_OUT_STATE_OFST 0
-#define MC_CMD_PHY_STATE_OK 0x1 /* enum */
-#define MC_CMD_PHY_STATE_ZOMBIE 0x2 /* enum */
+/* enum: Ok. */
+#define MC_CMD_PHY_STATE_OK 0x1
+/* enum: Faulty. */
+#define MC_CMD_PHY_STATE_ZOMBIE 0x2
/***********************************/
/* MC_CMD_SETUP_8021QBB
- * 802.1Qbb control.
+ * 802.1Qbb control. 8 Tx queues that map to priorities 0 - 7. Use all 1s to
+ * disable 802.Qbb for a given priority.
*/
#define MC_CMD_SETUP_8021QBB 0x44
@@ -2187,7 +3373,7 @@
/***********************************/
/* MC_CMD_WOL_FILTER_GET
- * Retrieve ID of any WoL filters.
+ * Retrieve ID of any WoL filters. Locks required: None. Returns: 0, ENOSYS
*/
#define MC_CMD_WOL_FILTER_GET 0x45
@@ -2201,7 +3387,8 @@
/***********************************/
/* MC_CMD_ADD_LIGHTSOUT_OFFLOAD
- * Add a protocol offload to NIC for lights-out state.
+ * Add a protocol offload to NIC for lights-out state. Locks required: None.
+ * Returns: 0, ENOSYS
*/
#define MC_CMD_ADD_LIGHTSOUT_OFFLOAD 0x46
@@ -2241,7 +3428,8 @@
/***********************************/
/* MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD
- * Remove a protocol offload from NIC for lights-out state.
+ * Remove a protocol offload from NIC for lights-out state. Locks required:
+ * None. Returns: 0, ENOSYS
*/
#define MC_CMD_REMOVE_LIGHTSOUT_OFFLOAD 0x47
@@ -2256,7 +3444,7 @@
/***********************************/
/* MC_CMD_MAC_RESET_RESTORE
- * Restore MAC after block reset.
+ * Restore MAC after block reset. Locks required: None. Returns: 0.
*/
#define MC_CMD_MAC_RESET_RESTORE 0x48
@@ -2269,6 +3457,9 @@
/***********************************/
/* MC_CMD_TESTASSERT
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully). Locks
+ * required: None Returns: 0
*/
#define MC_CMD_TESTASSERT 0x49
@@ -2281,14 +3472,23 @@
/***********************************/
/* MC_CMD_WORKAROUND
- * Enable/Disable a given workaround.
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it doesn't
+ * understand the given workaround number - which should not be treated as a
+ * hard error by client code. This op does not imply any semantics about each
+ * workaround, that's between the driver and the mcfw on a per-workaround
+ * basis. Locks required: None. Returns: 0, EINVAL .
*/
#define MC_CMD_WORKAROUND 0x4a
/* MC_CMD_WORKAROUND_IN msgrequest */
#define MC_CMD_WORKAROUND_IN_LEN 8
#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
-#define MC_CMD_WORKAROUND_BUG17230 0x1 /* enum */
+/* enum: Bug 17230 work around. */
+#define MC_CMD_WORKAROUND_BUG17230 0x1
+/* enum: Bug 35388 work around (unsafe EVQ writes). */
+#define MC_CMD_WORKAROUND_BUG35388 0x2
+/* enum: Bug35017 workaround (A64 tables must be identity map) */
+#define MC_CMD_WORKAROUND_BUG35017 0x3
#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
/* MC_CMD_WORKAROUND_OUT msgresponse */
@@ -2297,7 +3497,12 @@
/***********************************/
/* MC_CMD_GET_PHY_MEDIA_INFO
- * Read media-specific data from PHY.
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs). The 'media type' can be found via GET_PHY_CFG
+ * (GET_PHY_CFG_OUT_MEDIA_TYPE); the valid 'page number' input values, and the
+ * output data, are interpreted on a per-type basis. For SFP+: PAGE=0 or 1
+ * returns a 128-byte block read from module I2C address 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined. Locks required: None. Return code: 0.
*/
#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
@@ -2309,6 +3514,7 @@
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMIN 5
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX 252
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(num) (4+1*(num))
+/* in bytes */
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_LEN 1
@@ -2318,7 +3524,8 @@
/***********************************/
/* MC_CMD_NVRAM_TEST
- * Test a particular NVRAM partition.
+ * Test a particular NVRAM partition for valid contents (where "valid" depends
+ * on the type of partition).
*/
#define MC_CMD_NVRAM_TEST 0x4c
@@ -2331,22 +3538,31 @@
/* MC_CMD_NVRAM_TEST_OUT msgresponse */
#define MC_CMD_NVRAM_TEST_OUT_LEN 4
#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
-#define MC_CMD_NVRAM_TEST_PASS 0x0 /* enum */
-#define MC_CMD_NVRAM_TEST_FAIL 0x1 /* enum */
-#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2 /* enum */
+/* enum: Passed. */
+#define MC_CMD_NVRAM_TEST_PASS 0x0
+/* enum: Failed. */
+#define MC_CMD_NVRAM_TEST_FAIL 0x1
+/* enum: Not supported. */
+#define MC_CMD_NVRAM_TEST_NOTSUPP 0x2
/***********************************/
/* MC_CMD_MRSFP_TWEAK
- * Read status and/or set parameters for the 'mrsfp' driver.
+ * Read status and/or set parameters for the 'mrsfp' driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first. Locks required: None. Return code: 0, EINVAL.
*/
#define MC_CMD_MRSFP_TWEAK 0x4d
/* MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG msgrequest */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_LEN 16
+/* 0-6 low->high de-emph. */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_LEVEL_OFST 0
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_TXEQ_DT_CFG_OFST 4
+/* 0-8 0-8 low->high boost */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_BOOST_OFST 8
+/* 0-8 low->high ref.V */
#define MC_CMD_MRSFP_TWEAK_IN_EQ_CONFIG_RXEQ_DT_CFG_OFST 12
/* MC_CMD_MRSFP_TWEAK_IN_READ_ONLY msgrequest */
@@ -2354,16 +3570,23 @@
/* MC_CMD_MRSFP_TWEAK_OUT msgresponse */
#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+/* input bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0
+/* output bits */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4
+/* direction */
#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0 /* enum */
-#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1 /* enum */
+/* enum: Out. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OUT 0x0
+/* enum: In. */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_IN 0x1
/***********************************/
/* MC_CMD_SENSOR_SET_LIMS
- * Adjusts the sensor limits.
+ * Adjusts the sensor limits. This is a warranty-voiding operation. Returns:
+ * ENOENT if the sensor specified does not exist, EINVAL if the limits are out
+ * of range.
*/
#define MC_CMD_SENSOR_SET_LIMS 0x4e
@@ -2372,9 +3595,13 @@
#define MC_CMD_SENSOR_SET_LIMS_IN_SENSOR_OFST 0
/* Enum values, see field(s): */
/* MC_CMD_SENSOR_INFO/MC_CMD_SENSOR_INFO_OUT/MASK */
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW0_OFST 4
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI0_OFST 8
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_LOW1_OFST 12
+/* interpretation is is sensor-specific. */
#define MC_CMD_SENSOR_SET_LIMS_IN_HI1_OFST 16
/* MC_CMD_SENSOR_SET_LIMS_OUT msgresponse */
@@ -2396,9 +3623,3640 @@
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_RXQ_OFST 8
#define MC_CMD_GET_RESOURCE_LIMITS_OUT_TXQ_OFST 12
+
+/***********************************/
+/* MC_CMD_NVRAM_PARTITIONS
+ * Reads the list of available virtual NVRAM partition types. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_PARTITIONS 0x51
+
+/* MC_CMD_NVRAM_PARTITIONS_IN msgrequest */
+#define MC_CMD_NVRAM_PARTITIONS_IN_LEN 0
+
+/* MC_CMD_NVRAM_PARTITIONS_OUT msgresponse */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX 252
+#define MC_CMD_NVRAM_PARTITIONS_OUT_LEN(num) (4+4*(num))
+/* total number of partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_NUM_PARTITIONS_OFST 0
+/* type ID code for each of NUM_PARTITIONS partitions */
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_OFST 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_LEN 4
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MINNUM 0
+#define MC_CMD_NVRAM_PARTITIONS_OUT_TYPE_ID_MAXNUM 62
+
+
+/***********************************/
+/* MC_CMD_NVRAM_METADATA
+ * Reads soft metadata for a virtual NVRAM partition type. Locks required:
+ * none. Returns: 0, EINVAL (bad type).
+ */
+#define MC_CMD_NVRAM_METADATA 0x52
+
+/* MC_CMD_NVRAM_METADATA_IN msgrequest */
+#define MC_CMD_NVRAM_METADATA_IN_LEN 4
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_IN_TYPE_OFST 0
+
+/* MC_CMD_NVRAM_METADATA_OUT msgresponse */
+#define MC_CMD_NVRAM_METADATA_OUT_LENMIN 20
+#define MC_CMD_NVRAM_METADATA_OUT_LENMAX 252
+#define MC_CMD_NVRAM_METADATA_OUT_LEN(num) (20+1*(num))
+/* Partition type ID code */
+#define MC_CMD_NVRAM_METADATA_OUT_TYPE_OFST 0
+#define MC_CMD_NVRAM_METADATA_OUT_FLAGS_OFST 4
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN 0
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_LBN 1
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_VALID_WIDTH 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_LBN 2
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_VALID_WIDTH 1
+/* Subtype ID code for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_OFST 8
+/* 1st component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_OFST 12
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_W_LEN 2
+/* 2nd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_OFST 14
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_X_LEN 2
+/* 3rd component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_OFST 16
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Y_LEN 2
+/* 4th component of W.X.Y.Z version number for content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_OFST 18
+#define MC_CMD_NVRAM_METADATA_OUT_VERSION_Z_LEN 2
+/* Zero-terminated string describing the content of this partition */
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_OFST 20
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_LEN 1
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MINNUM 0
+#define MC_CMD_NVRAM_METADATA_OUT_DESCRIPTION_MAXNUM 232
+
+
+/***********************************/
+/* MC_CMD_GET_MAC_ADDRESSES
+ * Returns the base MAC, count and stride for the requestiong function
+ */
+#define MC_CMD_GET_MAC_ADDRESSES 0x55
+
+/* MC_CMD_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_GET_MAC_ADDRESSES_IN_LEN 0
+
+/* MC_CMD_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_LEN 16
+/* Base MAC address */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_OFST 0
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE_LEN 6
+/* Padding */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_OFST 6
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_RESERVED_LEN 2
+/* Number of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_COUNT_OFST 8
+/* Spacing of allocated MAC addresses */
+#define MC_CMD_GET_MAC_ADDRESSES_OUT_MAC_STRIDE_OFST 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
-#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff /* enum */
-#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe /* enum */
+/* enum: Any */
+#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
+/* enum: None */
+#define MC_CMD_RESOURCE_INSTANCE_NONE 0xfffffffe
+
+/* EVB_PORT_ID structuredef */
+#define EVB_PORT_ID_LEN 4
+#define EVB_PORT_ID_PORT_ID_OFST 0
+/* enum: An invalid port handle. */
+#define EVB_PORT_ID_NULL 0x0
+/* enum: The port assigned to this function.. */
+#define EVB_PORT_ID_ASSIGNED 0x1000000
+/* enum: External network port 0 */
+#define EVB_PORT_ID_MAC0 0x2000000
+/* enum: External network port 1 */
+#define EVB_PORT_ID_MAC1 0x2000001
+/* enum: External network port 2 */
+#define EVB_PORT_ID_MAC2 0x2000002
+/* enum: External network port 3 */
+#define EVB_PORT_ID_MAC3 0x2000003
+#define EVB_PORT_ID_PORT_ID_LBN 0
+#define EVB_PORT_ID_PORT_ID_WIDTH 32
+
+/* EVB_VLAN_TAG structuredef */
+#define EVB_VLAN_TAG_LEN 2
+/* The VLAN tag value */
+#define EVB_VLAN_TAG_VLAN_ID_LBN 0
+#define EVB_VLAN_TAG_VLAN_ID_WIDTH 12
+#define EVB_VLAN_TAG_MODE_LBN 12
+#define EVB_VLAN_TAG_MODE_WIDTH 4
+/* enum: Insert the VLAN. */
+#define EVB_VLAN_TAG_INSERT 0x0
+/* enum: Replace the VLAN if already present. */
+#define EVB_VLAN_TAG_REPLACE 0x1
+
+/* BUFTBL_ENTRY structuredef */
+#define BUFTBL_ENTRY_LEN 12
+/* the owner ID */
+#define BUFTBL_ENTRY_OID_OFST 0
+#define BUFTBL_ENTRY_OID_LEN 2
+#define BUFTBL_ENTRY_OID_LBN 0
+#define BUFTBL_ENTRY_OID_WIDTH 16
+/* the page parameter as one of ESE_DZ_SMC_PAGE_SIZE_ */
+#define BUFTBL_ENTRY_PGSZ_OFST 2
+#define BUFTBL_ENTRY_PGSZ_LEN 2
+#define BUFTBL_ENTRY_PGSZ_LBN 16
+#define BUFTBL_ENTRY_PGSZ_WIDTH 16
+/* the raw 64-bit address field from the SMC, not adjusted for page size */
+#define BUFTBL_ENTRY_RAWADDR_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_LEN 8
+#define BUFTBL_ENTRY_RAWADDR_LO_OFST 4
+#define BUFTBL_ENTRY_RAWADDR_HI_OFST 8
+#define BUFTBL_ENTRY_RAWADDR_LBN 32
+#define BUFTBL_ENTRY_RAWADDR_WIDTH 64
+
+/* NVRAM_PARTITION_TYPE structuredef */
+#define NVRAM_PARTITION_TYPE_LEN 2
+#define NVRAM_PARTITION_TYPE_ID_OFST 0
+#define NVRAM_PARTITION_TYPE_ID_LEN 2
+/* enum: Primary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE 0x100
+/* enum: Secondary MC firmware partition */
+#define NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP 0x200
+/* enum: Expansion ROM partition */
+#define NVRAM_PARTITION_TYPE_EXPANSION_ROM 0x300
+/* enum: Static configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_STATIC_CONFIG 0x400
+/* enum: Dynamic configuration TLV partition */
+#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
+/* enum: Expansion ROM configuration data for port 0 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Expansion ROM configuration data for port 1 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
+/* enum: Expansion ROM configuration data for port 2 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2 0x602
+/* enum: Expansion ROM configuration data for port 3 */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
+/* enum: Non-volatile log output partition */
+#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Device state dump output partition */
+#define NVRAM_PARTITION_TYPE_DUMP 0x800
+/* enum: Application license key storage partition */
+#define NVRAM_PARTITION_TYPE_LICENSE 0x900
+/* enum: Start of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MIN 0xa00
+/* enum: End of range used for PHY partitions (low 8 bits are the PHY ID) */
+#define NVRAM_PARTITION_TYPE_PHY_MAX 0xaff
+/* enum: Start of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
+/* enum: End of reserved value range (firmware may use for any purpose) */
+#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MAX 0xfffd
+/* enum: Recovery partition map (provided if real map is missing or corrupt) */
+#define NVRAM_PARTITION_TYPE_RECOVERY_MAP 0xfffe
+/* enum: Partition map (real map as stored in flash) */
+#define NVRAM_PARTITION_TYPE_PARTITION_MAP 0xffff
+#define NVRAM_PARTITION_TYPE_ID_LBN 0
+#define NVRAM_PARTITION_TYPE_ID_WIDTH 16
+
+
+/***********************************/
+/* MC_CMD_READ_REGS
+ * Get a dump of the MCPU registers
+ */
+#define MC_CMD_READ_REGS 0x50
+
+/* MC_CMD_READ_REGS_IN msgrequest */
+#define MC_CMD_READ_REGS_IN_LEN 0
+
+/* MC_CMD_READ_REGS_OUT msgresponse */
+#define MC_CMD_READ_REGS_OUT_LEN 308
+/* Whether the corresponding register entry contains a valid value */
+#define MC_CMD_READ_REGS_OUT_MASK_OFST 0
+#define MC_CMD_READ_REGS_OUT_MASK_LEN 16
+/* Same order as MIPS GDB (r0-r31, sr, lo, hi, bad, cause, 32 x float, fsr,
+ * fir, fp)
+ */
+#define MC_CMD_READ_REGS_OUT_REGS_OFST 16
+#define MC_CMD_READ_REGS_OUT_REGS_LEN 4
+#define MC_CMD_READ_REGS_OUT_REGS_NUM 73
+
+
+/***********************************/
+/* MC_CMD_INIT_EVQ
+ * Set up an event queue according to the supplied parameters. The IN arguments
+ * end with an address for each 4k of host memory required to back the EVQ.
+ */
+#define MC_CMD_INIT_EVQ 0x80
+
+/* MC_CMD_INIT_EVQ_IN msgrequest */
+#define MC_CMD_INIT_EVQ_IN_LENMIN 44
+#define MC_CMD_INIT_EVQ_IN_LENMAX 548
+#define MC_CMD_INIT_EVQ_IN_LEN(num) (36+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_EVQ_IN_SIZE_OFST 0
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_EVQ_IN_INSTANCE_OFST 4
+/* The initial timer value. The load value is ignored if the timer mode is DIS.
+ */
+#define MC_CMD_INIT_EVQ_IN_TMR_LOAD_OFST 8
+/* The reload value is ignored in one-shot modes */
+#define MC_CMD_INIT_EVQ_IN_TMR_RELOAD_OFST 12
+/* tbd */
+#define MC_CMD_INIT_EVQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_LBN 0
+#define MC_CMD_INIT_EVQ_IN_FLAG_INTERRUPTING_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_LBN 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RPTR_DOS_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_LBN 2
+#define MC_CMD_INIT_EVQ_IN_FLAG_INT_ARMD_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_LBN 3
+#define MC_CMD_INIT_EVQ_IN_FLAG_CUT_THRU_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_LBN 4
+#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
+#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
+/* enum: Immediate */
+#define MC_CMD_INIT_EVQ_IN_TMR_IMMED_START 0x1
+/* enum: Triggered */
+#define MC_CMD_INIT_EVQ_IN_TMR_TRIG_START 0x2
+/* enum: Hold-off */
+#define MC_CMD_INIT_EVQ_IN_TMR_INT_HLDOFF 0x3
+/* Target EVQ for wakeups if in wakeup mode. */
+#define MC_CMD_INIT_EVQ_IN_TARGET_EVQ_OFST 24
+/* Target interrupt if in interrupting mode (note union with target EVQ). Use
+ * MC_CMD_RESOURCE_INSTANCE_ANY unless a specific one required for test
+ * purposes.
+ */
+#define MC_CMD_INIT_EVQ_IN_IRQ_NUM_OFST 24
+/* Event Counter Mode. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_OFST 28
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS 0x0
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RX 0x1
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_TX 0x2
+/* enum: Disabled */
+#define MC_CMD_INIT_EVQ_IN_COUNT_MODE_RXTX 0x3
+/* Event queue packet count threshold. */
+#define MC_CMD_INIT_EVQ_IN_COUNT_THRSHLD_OFST 32
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_LO_OFST 36
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_HI_OFST 40
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_EVQ_IN_DMA_ADDR_MAXNUM 64
+
+/* MC_CMD_INIT_EVQ_OUT msgresponse */
+#define MC_CMD_INIT_EVQ_OUT_LEN 4
+/* Only valid if INTRFLAG was true */
+#define MC_CMD_INIT_EVQ_OUT_IRQ_OFST 0
+
+/* QUEUE_CRC_MODE structuredef */
+#define QUEUE_CRC_MODE_LEN 1
+#define QUEUE_CRC_MODE_MODE_LBN 0
+#define QUEUE_CRC_MODE_MODE_WIDTH 4
+/* enum: No CRC. */
+#define QUEUE_CRC_MODE_NONE 0x0
+/* enum: CRC Fiber channel over ethernet. */
+#define QUEUE_CRC_MODE_FCOE 0x1
+/* enum: CRC (digest) iSCSI header only. */
+#define QUEUE_CRC_MODE_ISCSI_HDR 0x2
+/* enum: CRC (digest) iSCSI header and payload. */
+#define QUEUE_CRC_MODE_ISCSI 0x3
+/* enum: CRC Fiber channel over IP over ethernet. */
+#define QUEUE_CRC_MODE_FCOIPOE 0x4
+/* enum: CRC MPA. */
+#define QUEUE_CRC_MODE_MPA 0x5
+#define QUEUE_CRC_MODE_SPARE_LBN 4
+#define QUEUE_CRC_MODE_SPARE_WIDTH 4
+
+
+/***********************************/
+/* MC_CMD_INIT_RXQ
+ * set up a receive queue according to the supplied parameters. The IN
+ * arguments end with an address for each 4k of host memory required to back
+ * the RXQ.
+ */
+#define MC_CMD_INIT_RXQ 0x81
+
+/* MC_CMD_INIT_RXQ_IN msgrequest */
+#define MC_CMD_INIT_RXQ_IN_LENMIN 36
+#define MC_CMD_INIT_RXQ_IN_LENMAX 252
+#define MC_CMD_INIT_RXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_RXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to INIT_EVQ
+ */
+#define MC_CMD_INIT_RXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_RXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_RXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_RXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_RXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_LBN 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_HDR_SPLIT_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_LBN 2
+#define MC_CMD_INIT_RXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_LBN 3
+#define MC_CMD_INIT_RXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_LBN 7
+#define MC_CMD_INIT_RXQ_IN_FLAG_CHAIN_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_LBN 8
+#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_RXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_RXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_RXQ_OUT msgresponse */
+#define MC_CMD_INIT_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_INIT_TXQ
+ */
+#define MC_CMD_INIT_TXQ 0x82
+
+/* MC_CMD_INIT_TXQ_IN msgrequest */
+#define MC_CMD_INIT_TXQ_IN_LENMIN 36
+#define MC_CMD_INIT_TXQ_IN_LENMAX 252
+#define MC_CMD_INIT_TXQ_IN_LEN(num) (28+8*(num))
+/* Size, in entries */
+#define MC_CMD_INIT_TXQ_IN_SIZE_OFST 0
+/* The EVQ to send events to. This is an index originally specified to
+ * INIT_EVQ.
+ */
+#define MC_CMD_INIT_TXQ_IN_TARGET_EVQ_OFST 4
+/* The value to put in the event data. Check hardware spec. for valid range. */
+#define MC_CMD_INIT_TXQ_IN_LABEL_OFST 8
+/* Desired instance. Must be set to a specific instance, which is a function
+ * local queue index.
+ */
+#define MC_CMD_INIT_TXQ_IN_INSTANCE_OFST 12
+/* There will be more flags here. */
+#define MC_CMD_INIT_TXQ_IN_FLAGS_OFST 16
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_LBN 0
+#define MC_CMD_INIT_TXQ_IN_FLAG_BUFF_MODE_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_LBN 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_IP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_LBN 2
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_CSUM_DIS_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_LBN 3
+#define MC_CMD_INIT_TXQ_IN_FLAG_TCP_UDP_ONLY_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_LBN 4
+#define MC_CMD_INIT_TXQ_IN_CRC_MODE_WIDTH 4
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_LBN 8
+#define MC_CMD_INIT_TXQ_IN_FLAG_TIMESTAMP_WIDTH 1
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_LBN 9
+#define MC_CMD_INIT_TXQ_IN_FLAG_PACER_BYPASS_WIDTH 1
+/* Owner ID to use if in buffer mode (zero if physical) */
+#define MC_CMD_INIT_TXQ_IN_OWNER_ID_OFST 20
+/* The port ID associated with the v-adaptor which should contain this DMAQ. */
+#define MC_CMD_INIT_TXQ_IN_PORT_ID_OFST 24
+/* 64-bit address of 4k of 4k-aligned host memory buffer */
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LEN 8
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_LO_OFST 28
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_HI_OFST 32
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MINNUM 1
+#define MC_CMD_INIT_TXQ_IN_DMA_ADDR_MAXNUM 28
+
+/* MC_CMD_INIT_TXQ_OUT msgresponse */
+#define MC_CMD_INIT_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_EVQ
+ * Teardown an EVQ.
+ *
+ * All DMAQs or EVQs that point to the EVQ to tear down must be torn down first
+ * or the operation will fail with EBUSY
+ */
+#define MC_CMD_FINI_EVQ 0x83
+
+/* MC_CMD_FINI_EVQ_IN msgrequest */
+#define MC_CMD_FINI_EVQ_IN_LEN 4
+/* Instance of EVQ to destroy. Should be the same instance as that previously
+ * passed to INIT_EVQ
+ */
+#define MC_CMD_FINI_EVQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_EVQ_OUT msgresponse */
+#define MC_CMD_FINI_EVQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_RXQ
+ * Teardown a RXQ.
+ */
+#define MC_CMD_FINI_RXQ 0x84
+
+/* MC_CMD_FINI_RXQ_IN msgrequest */
+#define MC_CMD_FINI_RXQ_IN_LEN 4
+/* Instance of RXQ to destroy */
+#define MC_CMD_FINI_RXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_RXQ_OUT msgresponse */
+#define MC_CMD_FINI_RXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FINI_TXQ
+ * Teardown a TXQ.
+ */
+#define MC_CMD_FINI_TXQ 0x85
+
+/* MC_CMD_FINI_TXQ_IN msgrequest */
+#define MC_CMD_FINI_TXQ_IN_LEN 4
+/* Instance of TXQ to destroy */
+#define MC_CMD_FINI_TXQ_IN_INSTANCE_OFST 0
+
+/* MC_CMD_FINI_TXQ_OUT msgresponse */
+#define MC_CMD_FINI_TXQ_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DRIVER_EVENT
+ * Generate an event on an EVQ belonging to the function issuing the command.
+ */
+#define MC_CMD_DRIVER_EVENT 0x86
+
+/* MC_CMD_DRIVER_EVENT_IN msgrequest */
+#define MC_CMD_DRIVER_EVENT_IN_LEN 12
+/* Handle of target EVQ */
+#define MC_CMD_DRIVER_EVENT_IN_EVQ_OFST 0
+/* Bits 0 - 63 of event */
+#define MC_CMD_DRIVER_EVENT_IN_DATA_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LEN 8
+#define MC_CMD_DRIVER_EVENT_IN_DATA_LO_OFST 4
+#define MC_CMD_DRIVER_EVENT_IN_DATA_HI_OFST 8
+
+/* MC_CMD_DRIVER_EVENT_OUT msgresponse */
+#define MC_CMD_DRIVER_EVENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PROXY_CMD
+ * Execute an arbitrary MCDI command on behalf of a different function, subject
+ * to security restrictions. The command to be proxied follows immediately
+ * afterward in the host buffer (or on the UART). This command supercedes
+ * MC_CMD_SET_FUNC, which remains available for Siena but now deprecated.
+ */
+#define MC_CMD_PROXY_CMD 0x5b
+
+/* MC_CMD_PROXY_CMD_IN msgrequest */
+#define MC_CMD_PROXY_CMD_IN_LEN 4
+/* The handle of the target function. */
+#define MC_CMD_PROXY_CMD_IN_TARGET_OFST 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_LBN 0
+#define MC_CMD_PROXY_CMD_IN_TARGET_PF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_LBN 16
+#define MC_CMD_PROXY_CMD_IN_TARGET_VF_WIDTH 16
+#define MC_CMD_PROXY_CMD_IN_VF_NULL 0xffff /* enum */
+
+
+/***********************************/
+/* MC_CMD_ALLOC_BUFTBL_CHUNK
+ * Allocate a set of buffer table entries using the specified owner ID. This
+ * operation allocates the required buffer table entries (and fails if it
+ * cannot do so). The buffer table entries will initially be zeroed.
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK 0x87
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_LEN 8
+/* Owner ID to use */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_OWNER_OFST 0
+/* Size of buffer table pages to use, in bytes (note that only a few values are
+ * legal on any specific hardware).
+ */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_IN_PAGE_SIZE_OFST 4
+
+/* MC_CMD_ALLOC_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_LEN 12
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_HANDLE_OFST 0
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_NUMENTRIES_OFST 4
+/* Buffer table IDs for use in DMA descriptors. */
+#define MC_CMD_ALLOC_BUFTBL_CHUNK_OUT_ID_OFST 8
+
+
+/***********************************/
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES
+ * Reprogram a set of buffer table entries in the specified chunk.
+ */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES 0x88
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMIN 20
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LENMAX 252
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_LEN(num) (12+8*(num))
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_HANDLE_OFST 0
+/* ID */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_FIRSTID_OFST 4
+/* Num entries */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 8
+/* Buffer table entry address */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LEN 8
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_LO_OFST 12
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_HI_OFST 16
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MINNUM 1
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_IN_ENTRY_MAXNUM 30
+
+/* MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_PROGRAM_BUFTBL_ENTRIES_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FREE_BUFTBL_CHUNK
+ */
+#define MC_CMD_FREE_BUFTBL_CHUNK 0x89
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_IN msgrequest */
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_LEN 4
+#define MC_CMD_FREE_BUFTBL_CHUNK_IN_HANDLE_OFST 0
+
+/* MC_CMD_FREE_BUFTBL_CHUNK_OUT msgresponse */
+#define MC_CMD_FREE_BUFTBL_CHUNK_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_FILTER_OP
+ * Multiplexed MCDI call for filter operations
+ */
+#define MC_CMD_FILTER_OP 0x8a
+
+/* MC_CMD_FILTER_OP_IN msgrequest */
+#define MC_CMD_FILTER_OP_IN_LEN 108
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_IN_OP_OFST 0
+/* enum: single-recipient filter insert */
+#define MC_CMD_FILTER_OP_IN_OP_INSERT 0x0
+/* enum: single-recipient filter remove */
+#define MC_CMD_FILTER_OP_IN_OP_REMOVE 0x1
+/* enum: multi-recipient filter subscribe */
+#define MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE 0x2
+/* enum: multi-recipient filter unsubscribe */
+#define MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE 0x3
+/* enum: replace one recipient with another (warning - the filter handle may
+ * change)
+ */
+#define MC_CMD_FILTER_OP_IN_OP_REPLACE 0x4
+/* filter handle (for remove / unsubscribe operations) */
+#define MC_CMD_FILTER_OP_IN_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_IN_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_IN_HANDLE_HI_OFST 8
+/* The port ID associated with the v-adaptor which should contain this filter.
+ */
+#define MC_CMD_FILTER_OP_IN_PORT_ID_OFST 12
+/* fields to include in match criteria */
+#define MC_CMD_FILTER_OP_IN_MATCH_FIELDS_OFST 16
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_LBN 0
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_LBN 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_IP_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_LBN 2
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_LBN 3
+#define MC_CMD_FILTER_OP_IN_MATCH_SRC_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_LBN 4
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_LBN 5
+#define MC_CMD_FILTER_OP_IN_MATCH_DST_PORT_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_LBN 6
+#define MC_CMD_FILTER_OP_IN_MATCH_ETHER_TYPE_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_LBN 7
+#define MC_CMD_FILTER_OP_IN_MATCH_INNER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_LBN 8
+#define MC_CMD_FILTER_OP_IN_MATCH_OUTER_VLAN_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_LBN 9
+#define MC_CMD_FILTER_OP_IN_MATCH_IP_PROTO_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_LBN 10
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF0_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_LBN 11
+#define MC_CMD_FILTER_OP_IN_MATCH_FWDEF1_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN 30
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN 31
+#define MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_WIDTH 1
+/* receive destination */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_OFST 20
+/* enum: drop packets */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_DROP 0x0
+/* enum: receive to host */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_HOST 0x1
+/* enum: receive to MC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_MC 0x2
+/* enum: loop back to port 0 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX0 0x3
+/* enum: loop back to port 1 TX MAC */
+#define MC_CMD_FILTER_OP_IN_RX_DEST_TX1 0x4
+/* receive queue handle (for multiple queue modes, this is the base queue) */
+#define MC_CMD_FILTER_OP_IN_RX_QUEUE_OFST 24
+/* receive mode */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_OFST 28
+/* enum: receive to just the specified queue */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE 0x0
+/* enum: receive to multiple queues using RSS context */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_RSS 0x1
+/* enum: receive to multiple queues using .1p mapping */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_DOT1P_MAPPING 0x2
+/* enum: install a filter entry that will never match; for test purposes only
+ */
+#define MC_CMD_FILTER_OP_IN_RX_MODE_TEST_NEVER_MATCH 0x80000000
+/* RSS context (for RX_MODE_RSS) or .1p mapping handle (for
+ * RX_MODE_DOT1P_MAPPING), as returned by MC_CMD_RSS_CONTEXT_ALLOC or
+ * MC_CMD_DOT1P_MAPPING_ALLOC. Note that these handles should be considered
+ * opaque to the host, although a value of 0xFFFFFFFF is guaranteed never to be
+ * a valid handle.
+ */
+#define MC_CMD_FILTER_OP_IN_RX_CONTEXT_OFST 32
+/* transmit domain (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_TX_DOMAIN_OFST 36
+/* transmit destination (either set the MAC and/or PM bits for explicit
+ * control, or set this field to TX_DEST_DEFAULT for sensible default
+ * behaviour)
+ */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_OFST 40
+/* enum: request default behaviour (based on filter type) */
+#define MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT 0xffffffff
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_LBN 0
+#define MC_CMD_FILTER_OP_IN_TX_DEST_MAC_WIDTH 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_LBN 1
+#define MC_CMD_FILTER_OP_IN_TX_DEST_PM_WIDTH 1
+/* source MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_OFST 44
+#define MC_CMD_FILTER_OP_IN_SRC_MAC_LEN 6
+/* source port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_OFST 50
+#define MC_CMD_FILTER_OP_IN_SRC_PORT_LEN 2
+/* destination MAC address to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_MAC_OFST 52
+#define MC_CMD_FILTER_OP_IN_DST_MAC_LEN 6
+/* destination port to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_DST_PORT_OFST 58
+#define MC_CMD_FILTER_OP_IN_DST_PORT_LEN 2
+/* Ethernet type to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_OFST 60
+#define MC_CMD_FILTER_OP_IN_ETHER_TYPE_LEN 2
+/* Inner VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_OFST 62
+#define MC_CMD_FILTER_OP_IN_INNER_VLAN_LEN 2
+/* Outer VLAN tag to match (as bytes in network order) */
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_OFST 64
+#define MC_CMD_FILTER_OP_IN_OUTER_VLAN_LEN 2
+/* IP protocol to match (in low byte; set high byte to 0) */
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_OFST 66
+#define MC_CMD_FILTER_OP_IN_IP_PROTO_LEN 2
+/* Firmware defined register 0 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF0_OFST 68
+/* Firmware defined register 1 to match (reserved; set to 0) */
+#define MC_CMD_FILTER_OP_IN_FWDEF1_OFST 72
+/* source IP address to match (as bytes in network order; set last 12 bytes to
+ * 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_SRC_IP_OFST 76
+#define MC_CMD_FILTER_OP_IN_SRC_IP_LEN 16
+/* destination IP address to match (as bytes in network order; set last 12
+ * bytes to 0 for IPv4 address)
+ */
+#define MC_CMD_FILTER_OP_IN_DST_IP_OFST 92
+#define MC_CMD_FILTER_OP_IN_DST_IP_LEN 16
+
+/* MC_CMD_FILTER_OP_OUT msgresponse */
+#define MC_CMD_FILTER_OP_OUT_LEN 12
+/* identifies the type of operation requested */
+#define MC_CMD_FILTER_OP_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_FILTER_OP_IN/OP */
+/* Returned filter handle (for insert / subscribe operations). Note that these
+ * handles should be considered opaque to the host, although a value of
+ * 0xFFFFFFFF_FFFFFFFF is guaranteed never to be a valid handle.
+ */
+#define MC_CMD_FILTER_OP_OUT_HANDLE_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LEN 8
+#define MC_CMD_FILTER_OP_OUT_HANDLE_LO_OFST 4
+#define MC_CMD_FILTER_OP_OUT_HANDLE_HI_OFST 8
+
+
+/***********************************/
+/* MC_CMD_GET_PARSER_DISP_INFO
+ * Get information related to the parser-dispatcher subsystem
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO 0xe4
+
+/* MC_CMD_GET_PARSER_DISP_INFO_IN msgrequest */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_OFST 0
+/* enum: read the list of supported RX filter matches */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES 0x1
+
+/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX 252
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LEN(num) (8+4*(num))
+/* identifies the type of operation requested */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_OP_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_PARSER_DISP_INFO_IN/OP */
+/* number of supported match types */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_NUM_SUPPORTED_MATCHES_OFST 4
+/* array of supported match types (valid MATCH_FIELDS values for
+ * MC_CMD_FILTER_OP) sorted in decreasing priority order
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_OFST 8
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_LEN 4
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MINNUM 0
+#define MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM 61
+
+
+/***********************************/
+/* MC_CMD_PARSER_DISP_RW
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ */
+#define MC_CMD_PARSER_DISP_RW 0xe5
+
+/* MC_CMD_PARSER_DISP_RW_IN msgrequest */
+#define MC_CMD_PARSER_DISP_RW_IN_LEN 32
+/* identifies the target of the operation */
+#define MC_CMD_PARSER_DISP_RW_IN_TARGET_OFST 0
+/* enum: RX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_RX_DICPU 0x0
+/* enum: TX dispatcher CPU */
+#define MC_CMD_PARSER_DISP_RW_IN_TX_DICPU 0x1
+/* enum: Lookup engine */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
+/* identifies the type of operation requested */
+#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
+/* enum: read a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_READ 0x0
+/* enum: write a word of DICPU DMEM or a LUE entry */
+#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
+/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
+#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
+/* data memory address or LUE index */
+#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* value to write (for DMEM writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
+/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_XOR_VALUE_OFST 12
+/* AND mask (for DMEM read-modify-writes: new = (old & mask) ^ value) */
+#define MC_CMD_PARSER_DISP_RW_IN_DMEM_RMW_AND_MASK_OFST 16
+/* value to write (for LUE writes) */
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_OFST 12
+#define MC_CMD_PARSER_DISP_RW_IN_LUE_WRITE_VALUE_LEN 20
+
+/* MC_CMD_PARSER_DISP_RW_OUT msgresponse */
+#define MC_CMD_PARSER_DISP_RW_OUT_LEN 52
+/* value read (for DMEM reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_DMEM_READ_VALUE_OFST 0
+/* value read (for LUE reads) */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_READ_VALUE_LEN 20
+/* up to 8 32-bit words of additional soft state from the LUE manager (the
+ * exact content is firmware-dependent and intended only for debug use)
+ */
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
+#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_PF_COUNT
+ * Get number of PFs on the device.
+ */
+#define MC_CMD_GET_PF_COUNT 0xb6
+
+/* MC_CMD_GET_PF_COUNT_IN msgrequest */
+#define MC_CMD_GET_PF_COUNT_IN_LEN 0
+
+/* MC_CMD_GET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_GET_PF_COUNT_OUT_LEN 1
+/* Identifies the number of PFs on the device. */
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST 0
+#define MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_LEN 1
+
+
+/***********************************/
+/* MC_CMD_SET_PF_COUNT
+ * Set number of PFs on the device.
+ */
+#define MC_CMD_SET_PF_COUNT 0xb7
+
+/* MC_CMD_SET_PF_COUNT_IN msgrequest */
+#define MC_CMD_SET_PF_COUNT_IN_LEN 4
+/* New number of PFs on the device. */
+#define MC_CMD_SET_PF_COUNT_IN_PF_COUNT_OFST 0
+
+/* MC_CMD_SET_PF_COUNT_OUT msgresponse */
+#define MC_CMD_SET_PF_COUNT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_PORT_ASSIGNMENT
+ * Get port assignment for current PCI function.
+ */
+#define MC_CMD_GET_PORT_ASSIGNMENT 0xb8
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN 0
+
+/* MC_CMD_GET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_GET_PORT_ASSIGNMENT_OUT_PORT_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_PORT_ASSIGNMENT
+ * Set port assignment for current PCI function.
+ */
+#define MC_CMD_SET_PORT_ASSIGNMENT 0xb9
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_IN msgrequest */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_LEN 4
+/* Identifies the port assignment for this function. */
+#define MC_CMD_SET_PORT_ASSIGNMENT_IN_PORT_OFST 0
+
+/* MC_CMD_SET_PORT_ASSIGNMENT_OUT msgresponse */
+#define MC_CMD_SET_PORT_ASSIGNMENT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_ALLOC_VIS
+ * Allocate VIs for current PCI function.
+ */
+#define MC_CMD_ALLOC_VIS 0x8b
+
+/* MC_CMD_ALLOC_VIS_IN msgrequest */
+#define MC_CMD_ALLOC_VIS_IN_LEN 8
+/* The minimum number of VIs that is acceptable */
+#define MC_CMD_ALLOC_VIS_IN_MIN_VI_COUNT_OFST 0
+/* The maximum number of VIs that would be useful */
+#define MC_CMD_ALLOC_VIS_IN_MAX_VI_COUNT_OFST 4
+
+/* MC_CMD_ALLOC_VIS_OUT msgresponse */
+#define MC_CMD_ALLOC_VIS_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_ALLOC_VIS_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_ALLOC_VIS_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_FREE_VIS
+ * Free VIs for current PCI function. Any linked PIO buffers will be unlinked,
+ * but not freed.
+ */
+#define MC_CMD_FREE_VIS 0x8c
+
+/* MC_CMD_FREE_VIS_IN msgrequest */
+#define MC_CMD_FREE_VIS_IN_LEN 0
+
+/* MC_CMD_FREE_VIS_OUT msgresponse */
+#define MC_CMD_FREE_VIS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_SRIOV_CFG
+ * Get SRIOV config for this PF.
+ */
+#define MC_CMD_GET_SRIOV_CFG 0xba
+
+/* MC_CMD_GET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_GET_SRIOV_CFG_IN_LEN 0
+
+/* MC_CMD_GET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_GET_SRIOV_CFG_OUT_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_MAX_OFST 4
+#define MC_CMD_GET_SRIOV_CFG_OUT_FLAGS_OFST 8
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_LBN 0
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous. */
+#define MC_CMD_GET_SRIOV_CFG_OUT_VF_STRIDE_OFST 16
+
+
+/***********************************/
+/* MC_CMD_SET_SRIOV_CFG
+ * Set SRIOV config for this PF.
+ */
+#define MC_CMD_SET_SRIOV_CFG 0xbb
+
+/* MC_CMD_SET_SRIOV_CFG_IN msgrequest */
+#define MC_CMD_SET_SRIOV_CFG_IN_LEN 20
+/* Number of VFs currently enabled. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_CURRENT_OFST 0
+/* Max number of VFs before sriov stride and offset may need to be changed. */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_MAX_OFST 4
+#define MC_CMD_SET_SRIOV_CFG_IN_FLAGS_OFST 8
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_LBN 0
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_ENABLED_WIDTH 1
+/* RID offset of first VF from PF, or 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate an offset.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_OFFSET_OFST 12
+/* RID offset of each subsequent VF from the previous, 0 for no change, or
+ * MC_CMD_RESOURCE_INSTANCE_ANY to allow the system to allocate a stride.
+ */
+#define MC_CMD_SET_SRIOV_CFG_IN_VF_STRIDE_OFST 16
+
+/* MC_CMD_SET_SRIOV_CFG_OUT msgresponse */
+#define MC_CMD_SET_SRIOV_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_ALLOC_INFO
+ * Get information about number of VI's and base VI number allocated to this
+ * function.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO 0x8d
+
+/* MC_CMD_GET_VI_ALLOC_INFO_IN msgrequest */
+#define MC_CMD_GET_VI_ALLOC_INFO_IN_LEN 0
+
+/* MC_CMD_GET_VI_ALLOC_INFO_OUT msgresponse */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_LEN 8
+/* The number of VIs allocated on this function */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_COUNT_OFST 0
+/* The base absolute VI number allocated to this function. Required to
+ * correctly interpret wakeup events.
+ */
+#define MC_CMD_GET_VI_ALLOC_INFO_OUT_VI_BASE_OFST 4
+
+
+/***********************************/
+/* MC_CMD_DUMP_VI_STATE
+ * For CmdClient use. Dump pertinent information on a specific absolute VI.
+ */
+#define MC_CMD_DUMP_VI_STATE 0x8e
+
+/* MC_CMD_DUMP_VI_STATE_IN msgrequest */
+#define MC_CMD_DUMP_VI_STATE_IN_LEN 4
+/* The VI number to query. */
+#define MC_CMD_DUMP_VI_STATE_IN_VI_NUMBER_OFST 0
+
+/* MC_CMD_DUMP_VI_STATE_OUT msgresponse */
+#define MC_CMD_DUMP_VI_STATE_OUT_LEN 96
+/* The PF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_OFST 0
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_PF_LEN 2
+/* The VF part of the function owning this VI. */
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_OFST 2
+#define MC_CMD_DUMP_VI_STATE_OUT_OWNER_VF_LEN 2
+/* Base of VIs allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_OFST 4
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_BASE_LEN 2
+/* Count of VIs allocated to the owner function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_OFST 6
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VI_COUNT_LEN 2
+/* Base interrupt vector allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_OFST 8
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_BASE_LEN 2
+/* Number of interrupt vectors allocated to this function. */
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_OFST 10
+#define MC_CMD_DUMP_VI_STATE_OUT_FUNC_VECTOR_COUNT_LEN 2
+/* Raw evq ptr table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_LO_OFST 12
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EVQ_PTR_RAW_HI_OFST 16
+/* Raw evq timer table data. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_LO_OFST 20
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_TIMER_RAW_HI_OFST 24
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_OFST 28
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_EV_META_WKUP_REF_WIDTH 8
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_LO_OFST 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_0_HI_OFST 36
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_LO_OFST 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_1_HI_OFST 44
+/* TXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_LO_OFST 48
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_RAW_TBL_2_HI_OFST 52
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_LO_OFST 56
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_HI_OFST 60
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_TX_META_WAITCOUNT_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_LBN 40
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_PADDING_WIDTH 24
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_LO_OFST 64
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_0_HI_OFST 68
+/* RXDPCPU raw table data for queue. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_LO_OFST 72
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_1_HI_OFST 76
+/* Reserved, currently 0. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_LO_OFST 80
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_RAW_TBL_2_HI_OFST 84
+/* Combined metadata field. */
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LEN 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_LO_OFST 88
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_HI_OFST 92
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_LBN 0
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_BASE_WIDTH 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_LBN 16
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_BUFS_NPAGES_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_LBN 24
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_QSTATE_WIDTH 8
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_LBN 32
+#define MC_CMD_DUMP_VI_STATE_OUT_VI_RX_META_WAITCOUNT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_ALLOC_PIOBUF
+ * Allocate a push I/O buffer for later use with a tx queue.
+ */
+#define MC_CMD_ALLOC_PIOBUF 0x8f
+
+/* MC_CMD_ALLOC_PIOBUF_IN msgrequest */
+#define MC_CMD_ALLOC_PIOBUF_IN_LEN 0
+
+/* MC_CMD_ALLOC_PIOBUF_OUT msgresponse */
+#define MC_CMD_ALLOC_PIOBUF_OUT_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_ALLOC_PIOBUF_OUT_PIOBUF_HANDLE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_FREE_PIOBUF
+ * Free a push I/O buffer.
+ */
+#define MC_CMD_FREE_PIOBUF 0x90
+
+/* MC_CMD_FREE_PIOBUF_IN msgrequest */
+#define MC_CMD_FREE_PIOBUF_IN_LEN 4
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_FREE_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+
+/* MC_CMD_FREE_PIOBUF_OUT msgresponse */
+#define MC_CMD_FREE_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_VI_TLP_PROCESSING
+ * Get TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_GET_VI_TLP_PROCESSING 0xb0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_LEN 4
+/* VI number to get information for. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+
+/* MC_CMD_GET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_LEN 4
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_OFST 0
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_OFST 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_LBN 16
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_LBN 17
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_ID_BASED_ORDERING_WIDTH 1
+/* Set no snoop bit for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_LBN 18
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_LBN 19
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_TPH_ON_WIDTH 1
+#define MC_CMD_GET_VI_TLP_PROCESSING_OUT_DATA_OFST 0
+
+
+/***********************************/
+/* MC_CMD_SET_VI_TLP_PROCESSING
+ * Set TLP steering and ordering information for a VI.
+ */
+#define MC_CMD_SET_VI_TLP_PROCESSING 0xb1
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_IN msgrequest */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_LEN 8
+/* VI number to set information for. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_INSTANCE_OFST 0
+/* Transaction processing steering hint 1 for use with the Rx Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_OFST 4
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG1_RX_LEN 1
+/* Transaction processing steering hint 2 for use with the Ev Queue. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_OFST 5
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_TAG2_EV_LEN 1
+/* Use Relaxed ordering model for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_LBN 48
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_RELAXED_ORDERING_WIDTH 1
+/* Use ID based ordering for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_LBN 49
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_ID_BASED_ORDERING_WIDTH 1
+/* Set the no snoop bit for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_LBN 50
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_NO_SNOOP_WIDTH 1
+/* Enable TPH for TLPs on this VI. */
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_LBN 51
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_TPH_ON_WIDTH 1
+#define MC_CMD_SET_VI_TLP_PROCESSING_IN_DATA_OFST 4
+
+/* MC_CMD_SET_VI_TLP_PROCESSING_OUT msgresponse */
+#define MC_CMD_SET_VI_TLP_PROCESSING_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS
+ * Get global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS 0xbc
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_LEN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* enum: MISC. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_MISC 0x0
+/* enum: IDO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_IDO 0x1
+/* enum: RO. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_RO 0x2
+/* enum: TPH Type. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_TPH_TYPE 0x3
+
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_LEN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_WORD_OFST 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_MISC_SPARE_WIDTH 31
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_IDO_SPARE_WIDTH 28
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_LBN 3
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_RO_SPARE_WIDTH 29
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_LBN 9
+#define MC_CMD_GET_TLP_PROCESSING_GLOBALS_OUT_TLP_INFO_TLP_TYPE_SPARE_WIDTH 23
+
+
+/***********************************/
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS
+ * Set global PCIe steering and transaction processing configuration.
+ */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS 0xbd
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN msgrequest */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_LEN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_GLOBAL_CATEGORY_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_GET_TLP_PROCESSING_GLOBALS/MC_CMD_GET_TLP_PROCESSING_GLOBALS_IN/TLP_GLOBAL_CATEGORY */
+/* Amalgamated TLP info word. */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_WORD_OFST 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_MISC_WTAG_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_TX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_EV_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_LBN 3
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_IDO_RX_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_RXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_LBN 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_TXDMA_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_RO_DL_EN_WIDTH 1
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_LBN 0
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_MSIX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_LBN 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_DL_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_LBN 4
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_TX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_LBN 6
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_EV_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_LBN 8
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_TPH_TYPE_RX_WIDTH 2
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_LBN 10
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_IN_TLP_INFO_SPARE_WIDTH 22
+
+/* MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT msgresponse */
+#define MC_CMD_SET_TLP_PROCESSING_GLOBALS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SATELLITE_DOWNLOAD
+ * Download a new set of images to the satellite CPUs from the host.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD 0x91
+
+/* MC_CMD_SATELLITE_DOWNLOAD_IN msgrequest: The reset requirements for the CPUs
+ * are subtle, and so downloads must proceed in a number of phases.
+ *
+ * 1) PHASE_RESET with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * 2) PHASE_IMEMS for each of the IMEM targets (target IDs 0-11). Each download
+ * may consist of multiple chunks. The final chunk (with CHUNK_ID_LAST) should
+ * be a checksum (a simple 32-bit sum) of the transferred data. An individual
+ * download may be aborted using CHUNK_ID_ABORT.
+ *
+ * 3) PHASE_VECTORS for each of the vector table targets (target IDs 12-15),
+ * similar to PHASE_IMEMS.
+ *
+ * 4) PHASE_READY with a target of TARGET_ALL and chunk ID/length of 0.
+ *
+ * After any error (a requested abort is not considered to be an error) the
+ * sequence must be restarted from PHASE_RESET.
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMIN 20
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LENMAX 252
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_LEN(num) (16+4*(num))
+/* Download phase. (Note: the IDLE phase is used internally and is never valid
+ * in a command from the host.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_OFST 0
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IDLE 0x0 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_RESET 0x1 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_IMEMS 0x2 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_VECTORS 0x3 /* enum */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_PHASE_READY 0x4 /* enum */
+/* Target for download. (These match the blob numbers defined in
+ * mc_flash_layout.h.)
+ */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_OFST 4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_TEXT 0x0
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_TEXT 0x1
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDP_TEXT 0x2
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDP_TEXT 0x3
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT 0x4
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_LUT_CFG 0x5
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT 0x6
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_LUT_CFG 0x7
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_HR_PGM 0x8
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXHRSL_SL_PGM 0x9
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_HR_PGM 0xa
+/* enum: Valid in phase 2 (PHASE_IMEMS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXHRSL_SL_PGM 0xb
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL0 0xc
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL0 0xd
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_RXDI_VTBL1 0xe
+/* enum: Valid in phase 3 (PHASE_VECTORS) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_TXDI_VTBL1 0xf
+/* enum: Valid in phases 1 (PHASE_RESET) and 4 (PHASE_READY) only */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_TARGET_ALL 0xffffffff
+/* Chunk ID, or CHUNK_ID_LAST or CHUNK_ID_ABORT */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_OFST 8
+/* enum: Last chunk, containing checksum rather than data */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_LAST 0xffffffff
+/* enum: Abort download of this item */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_ID_ABORT 0xfffffffe
+/* Length of this chunk in bytes */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_LEN_OFST 12
+/* Data for this chunk */
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_OFST 16
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_LEN 4
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MINNUM 1
+#define MC_CMD_SATELLITE_DOWNLOAD_IN_CHUNK_DATA_MAXNUM 59
+
+/* MC_CMD_SATELLITE_DOWNLOAD_OUT msgresponse */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_LEN 8
+/* Same as MC_CMD_ERR field, but included as 0 in success cases */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_RESULT_OFST 0
+/* Extra status information */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_INFO_OFST 4
+/* enum: Code download OK, completed. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_COMPLETE 0x0
+/* enum: Code download aborted as requested. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_ABORTED 0x1
+/* enum: Code download OK so far, send next chunk. */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_OK_NEXT_CHUNK 0x2
+/* enum: Download phases out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_PHASE 0x100
+/* enum: Bad target for this phase */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_TARGET 0x101
+/* enum: Chunk ID out of sequence */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_ID 0x200
+/* enum: Chunk length zero or too large */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHUNK_LEN 0x201
+/* enum: Checksum was incorrect */
+#define MC_CMD_SATELLITE_DOWNLOAD_OUT_ERR_BAD_CHECKSUM 0x300
+
+
+/***********************************/
+/* MC_CMD_GET_CAPABILITIES
+ * Get device capabilities.
+ *
+ * This is supplementary to the MC_CMD_GET_BOARD_CFG command, and intended to
+ * reference inherent device capabilities as opposed to current NVRAM config.
+ */
+#define MC_CMD_GET_CAPABILITIES 0xbe
+
+/* MC_CMD_GET_CAPABILITIES_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_VSWITCH 0x3 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4 /* enum */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+
+
+/***********************************/
+/* MC_CMD_V2_EXTN
+ * Encapsulation for a v2 extended command
+ */
+#define MC_CMD_V2_EXTN 0x7f
+
+/* MC_CMD_V2_EXTN_IN msgrequest */
+#define MC_CMD_V2_EXTN_IN_LEN 4
+/* the extended command number */
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_LBN 0
+#define MC_CMD_V2_EXTN_IN_EXTENDED_CMD_WIDTH 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_LBN 15
+#define MC_CMD_V2_EXTN_IN_UNUSED_WIDTH 1
+/* the actual length of the encapsulated command (which is not in the v1
+ * header)
+ */
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_LBN 16
+#define MC_CMD_V2_EXTN_IN_ACTUAL_LEN_WIDTH 10
+#define MC_CMD_V2_EXTN_IN_UNUSED2_LBN 26
+#define MC_CMD_V2_EXTN_IN_UNUSED2_WIDTH 6
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_ALLOC
+ * Allocate a pacer bucket (for qau rp or a snapper test)
+ */
+#define MC_CMD_TCM_BUCKET_ALLOC 0xb2
+
+/* MC_CMD_TCM_BUCKET_ALLOC_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_ALLOC_IN_LEN 0
+
+/* MC_CMD_TCM_BUCKET_ALLOC_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_ALLOC_OUT_BUCKET_OFST 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_FREE
+ * Free a pacer bucket
+ */
+#define MC_CMD_TCM_BUCKET_FREE 0xb3
+
+/* MC_CMD_TCM_BUCKET_FREE_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_FREE_IN_LEN 4
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_FREE_IN_BUCKET_OFST 0
+
+/* MC_CMD_TCM_BUCKET_FREE_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_BUCKET_INIT
+ * Initialise pacer bucket with a given rate
+ */
+#define MC_CMD_TCM_BUCKET_INIT 0xb4
+
+/* MC_CMD_TCM_BUCKET_INIT_IN msgrequest */
+#define MC_CMD_TCM_BUCKET_INIT_IN_LEN 8
+/* the bucket id */
+#define MC_CMD_TCM_BUCKET_INIT_IN_BUCKET_OFST 0
+/* the rate in mbps */
+#define MC_CMD_TCM_BUCKET_INIT_IN_RATE_OFST 4
+
+/* MC_CMD_TCM_BUCKET_INIT_OUT msgresponse */
+#define MC_CMD_TCM_BUCKET_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_TCM_TXQ_INIT
+ * Initialise txq in pacer with given options or set options
+ */
+#define MC_CMD_TCM_TXQ_INIT 0xb5
+
+/* MC_CMD_TCM_TXQ_INIT_IN msgrequest */
+#define MC_CMD_TCM_TXQ_INIT_IN_LEN 28
+/* the txq id */
+#define MC_CMD_TCM_TXQ_INIT_IN_QID_OFST 0
+/* the static priority associated with the txq */
+#define MC_CMD_TCM_TXQ_INIT_IN_LABEL_OFST 4
+/* bitmask of the priority queues this txq is inserted into */
+#define MC_CMD_TCM_TXQ_INIT_IN_PQ_FLAGS_OFST 8
+/* the reaction point (RP) bucket */
+#define MC_CMD_TCM_TXQ_INIT_IN_RP_BKT_OFST 12
+/* an already reserved bucket (typically set to bucket associated with outer
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT1_OFST 16
+/* an already reserved bucket (typically set to bucket associated with inner
+ * vswitch)
+ */
+#define MC_CMD_TCM_TXQ_INIT_IN_MAX_BKT2_OFST 20
+/* the min bucket (typically for ETS/minimum bandwidth) */
+#define MC_CMD_TCM_TXQ_INIT_IN_MIN_BKT_OFST 24
+
+/* MC_CMD_TCM_TXQ_INIT_OUT msgresponse */
+#define MC_CMD_TCM_TXQ_INIT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_LINK_PIOBUF
+ * Link a push I/O buffer to a TxQ
+ */
+#define MC_CMD_LINK_PIOBUF 0x92
+
+/* MC_CMD_LINK_PIOBUF_IN msgrequest */
+#define MC_CMD_LINK_PIOBUF_IN_LEN 8
+/* Handle for allocated push I/O buffer. */
+#define MC_CMD_LINK_PIOBUF_IN_PIOBUF_HANDLE_OFST 0
+/* Function Local Instance (VI) number. */
+#define MC_CMD_LINK_PIOBUF_IN_TXQ_INSTANCE_OFST 4
+
+/* MC_CMD_LINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_LINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_UNLINK_PIOBUF
+ * Unlink a push I/O buffer from a TxQ
+ */
+#define MC_CMD_UNLINK_PIOBUF 0x93
+
+/* MC_CMD_UNLINK_PIOBUF_IN msgrequest */
+#define MC_CMD_UNLINK_PIOBUF_IN_LEN 4
+/* Function Local Instance (VI) number. */
+#define MC_CMD_UNLINK_PIOBUF_IN_TXQ_INSTANCE_OFST 0
+
+/* MC_CMD_UNLINK_PIOBUF_OUT msgresponse */
+#define MC_CMD_UNLINK_PIOBUF_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_ALLOC
+ * allocate and initialise a v-switch.
+ */
+#define MC_CMD_VSWITCH_ALLOC 0x94
+
+/* MC_CMD_VSWITCH_ALLOC_IN msgrequest */
+#define MC_CMD_VSWITCH_ALLOC_IN_LEN 16
+/* The port to connect to the v-switch's upstream port. */
+#define MC_CMD_VSWITCH_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of v-switch to create. */
+#define MC_CMD_VSWITCH_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VLAN 0x1
+/* enum: VEB */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEB 0x2
+/* enum: VEPA */
+#define MC_CMD_VSWITCH_ALLOC_IN_VSWITCH_TYPE_VEPA 0x3
+/* Flags controlling v-port creation */
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VSWITCH_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to support. */
+#define MC_CMD_VSWITCH_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+
+/* MC_CMD_VSWITCH_ALLOC_OUT msgresponse */
+#define MC_CMD_VSWITCH_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VSWITCH_FREE
+ * de-allocate a v-switch.
+ */
+#define MC_CMD_VSWITCH_FREE 0x95
+
+/* MC_CMD_VSWITCH_FREE_IN msgrequest */
+#define MC_CMD_VSWITCH_FREE_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_FREE_OUT msgresponse */
+#define MC_CMD_VSWITCH_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_ALLOC
+ * allocate a v-port.
+ */
+#define MC_CMD_VPORT_ALLOC 0x96
+
+/* MC_CMD_VPORT_ALLOC_IN msgrequest */
+#define MC_CMD_VPORT_ALLOC_IN_LEN 20
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VPORT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of the new v-port. */
+#define MC_CMD_VPORT_ALLOC_IN_TYPE_OFST 4
+/* enum: VLAN (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VLAN 0x1
+/* enum: VEB (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEB 0x2
+/* enum: VEPA (obsolete) */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_VEPA 0x3
+/* enum: A normal v-port receives packets which match a specified MAC and/or
+ * VLAN.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_NORMAL 0x4
+/* enum: An expansion v-port packets traffic which don't match any other
+ * v-port.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_EXPANSION 0x5
+/* enum: An test v-port receives packets which match any filters installed by
+ * its downstream components.
+ */
+#define MC_CMD_VPORT_ALLOC_IN_VPORT_TYPE_TEST 0x6
+/* Flags controlling v-port creation */
+#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+/* The number of VLAN tags to insert/remove. */
+#define MC_CMD_VPORT_ALLOC_IN_NUM_VLAN_TAGS_OFST 12
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAGS_OFST 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_ALLOC_IN_VLAN_TAG_1_WIDTH 16
+
+/* MC_CMD_VPORT_ALLOC_OUT msgresponse */
+#define MC_CMD_VPORT_ALLOC_OUT_LEN 4
+/* The handle of the new v-port */
+#define MC_CMD_VPORT_ALLOC_OUT_VPORT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_FREE
+ * de-allocate a v-port.
+ */
+#define MC_CMD_VPORT_FREE 0x97
+
+/* MC_CMD_VPORT_FREE_IN msgrequest */
+#define MC_CMD_VPORT_FREE_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_FREE_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_FREE_OUT msgresponse */
+#define MC_CMD_VPORT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_ALLOC
+ * allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_ALLOC 0x98
+
+/* MC_CMD_VADAPTOR_ALLOC_IN msgrequest */
+#define MC_CMD_VADAPTOR_ALLOC_IN_LEN 16
+/* The port to connect to the v-adaptor's port. */
+#define MC_CMD_VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Flags controlling v-adaptor creation */
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+/* The number of VLAN tags to strip on receive */
+#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
+
+/* MC_CMD_VADAPTOR_ALLOC_OUT msgresponse */
+#define MC_CMD_VADAPTOR_ALLOC_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VADAPTOR_FREE
+ * de-allocate a v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_FREE 0x99
+
+/* MC_CMD_VADAPTOR_FREE_IN msgrequest */
+#define MC_CMD_VADAPTOR_FREE_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_FREE_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_FREE_OUT msgresponse */
+#define MC_CMD_VADAPTOR_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_ASSIGN
+ * assign a port to a PCI function.
+ */
+#define MC_CMD_EVB_PORT_ASSIGN 0x9a
+
+/* MC_CMD_EVB_PORT_ASSIGN_IN msgrequest */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_LEN 8
+/* The port to assign. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PORT_ID_OFST 0
+/* The target function to modify. */
+#define MC_CMD_EVB_PORT_ASSIGN_IN_FUNCTION_OFST 4
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_LBN 0
+#define MC_CMD_EVB_PORT_ASSIGN_IN_PF_WIDTH 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_LBN 16
+#define MC_CMD_EVB_PORT_ASSIGN_IN_VF_WIDTH 16
+
+/* MC_CMD_EVB_PORT_ASSIGN_OUT msgresponse */
+#define MC_CMD_EVB_PORT_ASSIGN_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RDWR_A64_REGIONS
+ * Assign the 64 bit region addresses.
+ */
+#define MC_CMD_RDWR_A64_REGIONS 0x9b
+
+/* MC_CMD_RDWR_A64_REGIONS_IN msgrequest */
+#define MC_CMD_RDWR_A64_REGIONS_IN_LEN 17
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_IN_REGION3_OFST 12
+/* Write enable bits 0-3, set to write, clear to read. */
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_LBN 128
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_WIDTH 4
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_OFST 16
+#define MC_CMD_RDWR_A64_REGIONS_IN_WRITE_MASK_BYTE_LEN 1
+
+/* MC_CMD_RDWR_A64_REGIONS_OUT msgresponse: This data always included
+ * regardless of state of write bits in the request.
+ */
+#define MC_CMD_RDWR_A64_REGIONS_OUT_LEN 16
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION0_OFST 0
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION1_OFST 4
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION2_OFST 8
+#define MC_CMD_RDWR_A64_REGIONS_OUT_REGION3_OFST 12
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_ALLOC
+ * Allocate an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_ALLOC 0x9c
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_LEN 4
+/* The handle of the owning upstream port */
+#define MC_CMD_ONLOAD_STACK_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_ALLOC_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_LEN 4
+/* The handle of the new Onload stack */
+#define MC_CMD_ONLOAD_STACK_ALLOC_OUT_ONLOAD_STACK_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_ONLOAD_STACK_FREE
+ * Free an Onload stack ID.
+ */
+#define MC_CMD_ONLOAD_STACK_FREE 0x9d
+
+/* MC_CMD_ONLOAD_STACK_FREE_IN msgrequest */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_LEN 4
+/* The handle of the Onload stack */
+#define MC_CMD_ONLOAD_STACK_FREE_IN_ONLOAD_STACK_ID_OFST 0
+
+/* MC_CMD_ONLOAD_STACK_FREE_OUT msgresponse */
+#define MC_CMD_ONLOAD_STACK_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_ALLOC
+ * Allocate an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC 0x9e
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN 12
+/* The handle of the owning upstream port */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* The type of context to allocate */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_OFST 4
+/* enum: Allocate a context for exclusive use. The key and indirection table
+ * must be explicitly configured.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE 0x0
+/* enum: Allocate a context for shared use; this will spread across a range of
+ * queues, but the key and indirection table are pre-configured and may not be
+ * changed. For this mode, NUM_QUEUES must 2, 4, 8, 16, 32 or 64.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED 0x1
+/* Number of queues spanned by this context, in the range 1-64; valid offsets
+ * in the indirection table will be in the range 0 to NUM_QUEUES-1.
+ */
+#define MC_CMD_RSS_CONTEXT_ALLOC_IN_NUM_QUEUES_OFST 8
+
+/* MC_CMD_RSS_CONTEXT_ALLOC_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN 4
+/* The handle of the new RSS context */
+#define MC_CMD_RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_FREE
+ * Free an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_FREE 0x9f
+
+/* MC_CMD_RSS_CONTEXT_FREE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_FREE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_KEY
+ * Set the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_KEY 0xa0
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN 44
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN 40
+
+/* MC_CMD_RSS_CONTEXT_SET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_KEY_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_KEY
+ * Get the Toeplitz hash key for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_KEY 0xa1
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_KEY_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN 44
+/* The 40-byte Toeplitz hash key (TBD endianness issues?) */
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY_LEN 40
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_TABLE
+ * Set the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE 0xa2
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN 132
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN 128
+
+/* MC_CMD_RSS_CONTEXT_SET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_TABLE
+ * Get the indirection table for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE 0xa3
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_TABLE_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN 132
+/* The 128-byte indirection table (1 byte per entry) */
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN 128
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS
+ * Set various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS 0xe1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+/* MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_SET_FLAGS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS
+ * Get various control flags for an RSS context.
+ */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS 0xe2
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_IN msgrequest */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_LEN 4
+/* The handle of the RSS context */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
+
+/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
+/* Hash control flags */
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_LBN 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV4_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_LBN 2
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV6_EN_WIDTH 1
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_LBN 3
+#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_TCPV6_EN_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_ALLOC
+ * Allocate a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC 0xa4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_LEN 8
+/* The handle of the owning upstream port */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_UPSTREAM_PORT_ID_OFST 0
+/* Number of queues spanned by this mapping, in the range 1-64; valid fixed
+ * offsets in the mapping table will be in the range 0 to NUM_QUEUES-1, and
+ * referenced RSS contexts must span no more than this number.
+ */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_IN_NUM_QUEUES_OFST 4
+
+/* MC_CMD_DOT1P_MAPPING_ALLOC_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_LEN 4
+/* The handle of the new .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_ALLOC_OUT_DOT1P_MAPPING_ID_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_FREE
+ * Free a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_FREE 0xa5
+
+/* MC_CMD_DOT1P_MAPPING_FREE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_FREE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_FREE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_FREE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE
+ * Set the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE 0xa6
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_LEN 36
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_IN_MAPPING_TABLE_LEN 32
+
+/* MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_SET_TABLE_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE
+ * Get the mapping table for a .1p mapping.
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE 0xa7
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_IN msgrequest */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_LEN 4
+/* The handle of the .1p mapping */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_IN_DOT1P_MAPPING_ID_OFST 0
+
+/* MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT msgresponse */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_LEN 36
+/* Per-priority mappings (1 32-bit word per entry - an offset or RSS context
+ * handle)
+ */
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_OFST 4
+#define MC_CMD_DOT1P_MAPPING_GET_TABLE_OUT_MAPPING_TABLE_LEN 32
+
+
+/***********************************/
+/* MC_CMD_GET_VECTOR_CFG
+ * Get Interrupt Vector config for this PF.
+ */
+#define MC_CMD_GET_VECTOR_CFG 0xbf
+
+/* MC_CMD_GET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_GET_VECTOR_CFG_IN_LEN 0
+
+/* MC_CMD_GET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_GET_VECTOR_CFG_OUT_LEN 12
+/* Base absolute interrupt vector number. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_GET_VECTOR_CFG_OUT_VECS_PER_VF_OFST 8
+
+
+/***********************************/
+/* MC_CMD_SET_VECTOR_CFG
+ * Set Interrupt Vector config for this PF.
+ */
+#define MC_CMD_SET_VECTOR_CFG 0xc0
+
+/* MC_CMD_SET_VECTOR_CFG_IN msgrequest */
+#define MC_CMD_SET_VECTOR_CFG_IN_LEN 12
+/* Base absolute interrupt vector number, or MC_CMD_RESOURCE_INSTANCE_ANY to
+ * let the system find a suitable base.
+ */
+#define MC_CMD_SET_VECTOR_CFG_IN_VEC_BASE_OFST 0
+/* Number of interrupt vectors allocate to this PF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_PF_OFST 4
+/* Number of interrupt vectors to allocate per VF. */
+#define MC_CMD_SET_VECTOR_CFG_IN_VECS_PER_VF_OFST 8
+
+/* MC_CMD_SET_VECTOR_CFG_OUT msgresponse */
+#define MC_CMD_SET_VECTOR_CFG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_STATS
+ * Retrieve rmon rx class statistics
+ */
+#define MC_CMD_RMON_RX_CLASS_STATS 0xc3
+
+/* MC_CMD_RMON_RX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_CLASS_STATS
+ * Retrieve rmon tx class statistics
+ */
+#define MC_CMD_RMON_TX_CLASS_STATS 0xc4
+
+/* MC_CMD_RMON_TX_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS
+ * Retrieve rmon rx super_class statistics
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS 0xc5
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS
+ * Retrieve rmon tx super_class statistics
+ */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS 0xc6
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_SUPER_CLASS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS 0xc7
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS 0xc8
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS
+ * Add qid to class for statistics collection
+ */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS 0xc9
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN msgrequest */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_LEN 12
+/* class */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_CLASS_OFST 0
+/* qid */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_QID_OFST 4
+/* flags */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_FLAGS_OFST 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_LBN 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_PE_DELTA_WIDTH 4
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_LBN 8
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_IN_MTU_WIDTH 14
+
+/* MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_MC_ADD_QID_TO_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_CLASS
+ * Allocate an rmon class
+ */
+#define MC_CMD_RMON_ALLOC_CLASS 0xca
+
+/* MC_CMD_RMON_ALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_ALLOC_CLASS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_CLASS
+ * Deallocate an rmon class
+ */
+#define MC_CMD_RMON_DEALLOC_CLASS 0xcb
+
+/* MC_CMD_RMON_DEALLOC_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_LEN 4
+/* class */
+#define MC_CMD_RMON_DEALLOC_CLASS_IN_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS
+ * Allocate an rmon super_class
+ */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS 0xcc
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_IN_LEN 0
+
+/* MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_ALLOC_SUPER_CLASS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS
+ * Deallocate an rmon tx super_class
+ */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS 0xcd
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN msgrequest */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_LEN 4
+/* super_class */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_IN_SUPER_CLASS_OFST 0
+
+/* MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT msgresponse */
+#define MC_CMD_RMON_DEALLOC_SUPER_CLASS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_UP_CONV_STATS
+ * Retrieve up converter statistics
+ */
+#define MC_CMD_RMON_RX_UP_CONV_STATS 0xce
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_UP_CONV_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_UP_CONV_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_UP_CONV_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPI_STATS
+ * Retrieve rx ipi stats
+ */
+#define MC_CMD_RMON_RX_IPI_STATS 0xcf
+
+/* MC_CMD_RMON_RX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_RX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_RX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve rx ipsec cntxt_ptr indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS 0xd0
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS
+ * Retrieve rx ipsec port indexed stats
+ */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS 0xd1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS 0xd2
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS
+ * Add a MAC address to a v-port
+ */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS 0xa8
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_ADD_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS 0xa9
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_IN msgrequest */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN 10
+/* The handle of the v-port */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID_OFST 0
+/* MAC address to add */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_OFST 4
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_MACADDR_LEN 6
+
+/* MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT msgresponse */
+#define MC_CMD_VPORT_DEL_MAC_ADDRESS_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES
+ * Delete a MAC address from a v-port
+ */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES 0xaa
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_IN msgrequest */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID_OFST 0
+
+/* MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT msgresponse */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX 250
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LEN(num) (4+6*(num))
+/* The number of MAC addresses returned */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT_OFST 0
+/* Array of MAC addresses */
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_OFST 4
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_LEN 6
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MINNUM 0
+#define MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_MAXNUM 41
+
+
+/***********************************/
+/* MC_CMD_DUMP_BUFTBL_ENTRIES
+ * Dump buffer table entries, mainly for command client debug use. Dumps
+ * absolute entries, and does not use chunk handles. All entries must be in
+ * range, and used for q page mapping, Although the latter restriction may be
+ * lifted in future.
+ */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES 0xab
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_IN msgrequest */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_LEN 8
+/* Index of the first buffer table entry. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_FIRSTID_OFST 0
+/* Number of buffer table entries to dump. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_IN_NUMENTRIES_OFST 4
+
+/* MC_CMD_DUMP_BUFTBL_ENTRIES_OUT msgresponse */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMIN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LENMAX 252
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_LEN(num) (0+12*(num))
+/* Raw buffer table entries, layed out as BUFTBL_ENTRY. */
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_OFST 0
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_LEN 12
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MINNUM 1
+#define MC_CMD_DUMP_BUFTBL_ENTRIES_OUT_ENTRY_MAXNUM 21
+
+
+/***********************************/
+/* MC_CMD_SET_RXDP_CONFIG
+ * Set global RXDP configuration settings
+ */
+#define MC_CMD_SET_RXDP_CONFIG 0xc1
+
+/* MC_CMD_SET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_SET_RXDP_CONFIG_IN_LEN 4
+#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+
+/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_RXDP_CONFIG
+ * Get global RXDP configuration settings
+ */
+#define MC_CMD_GET_RXDP_CONFIG 0xc2
+
+/* MC_CMD_GET_RXDP_CONFIG_IN msgrequest */
+#define MC_CMD_GET_RXDP_CONFIG_IN_LEN 0
+
+/* MC_CMD_GET_RXDP_CONFIG_OUT msgresponse */
+#define MC_CMD_GET_RXDP_CONFIG_OUT_LEN 4
+#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS
+ * Retrieve rx class drop stats
+ */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS 0xd3
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS
+ * Retrieve rx super class drop stats
+ */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS 0xd4
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_LBN 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_SUPER_CLASS_WIDTH 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_LBN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_SUPER_CLASS_DROPS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_RX_ERRORS_STATS 0xd5
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_RX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_RX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS 0xd6
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_RX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_RX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPI_STATS
+ * Retrieve tx ipi stats
+ */
+#define MC_CMD_RMON_TX_IPI_STATS 0xd7
+
+/* MC_CMD_RMON_TX_IPI_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPI_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_LBN 0
+#define MC_CMD_RMON_TX_IPI_STATS_IN_VFIFO_WIDTH 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_LBN 5
+#define MC_CMD_RMON_TX_IPI_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPI_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPI_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS
+ * Retrieve tx ipsec counters by cntxt_ptr
+ */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS 0xd8
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_CNTXT_PTR_WIDTH 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_LBN 9
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_CNTXT_PTR_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS
+ * Retrieve tx ipsec counters by port
+ */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS 0xd9
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_PORT_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS
+ * Retrieve tx ipsec overflow
+ */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS 0xda
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_LBN 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_PORT_WIDTH 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_LBN 2
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_IPSEC_OFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_STATS
+ * Retrieve tx nowhere stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_STATS 0xdb
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_NOWHERE_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS
+ * Retrieve tx nowhere qbb stats
+ */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS 0xdc
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_LBN 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_PRIORITY_WIDTH 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_LBN 3
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_NOWHERE_QBB_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_ERRORS_STATS
+ * Retrieve rxdp errors
+ */
+#define MC_CMD_RMON_TX_ERRORS_STATS 0xdd
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_LBN 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_QID_WIDTH 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_LBN 11
+#define MC_CMD_RMON_TX_ERRORS_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_ERRORS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_ERRORS_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_TX_OVERFLOW_STATS
+ * Retrieve rxdp overflow
+ */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS 0xde
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_IN msgrequest */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_LEN 4
+/* flags */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_FLAGS_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_LBN 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_CLASS_WIDTH 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_LBN 8
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_IN_RST_WIDTH 1
+
+/* MC_CMD_RMON_TX_OVERFLOW_STATS_OUT msgresponse */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMIN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LENMAX 252
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_LEN(num) (0+4*(num))
+/* Array of stats */
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_OFST 0
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_LEN 4
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MINNUM 1
+#define MC_CMD_RMON_TX_OVERFLOW_STATS_OUT_BUFFER_MAXNUM 63
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS 0xdf
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_LEN 4
+/* class */
+#define MC_CMD_RMON_COLLECT_CLASS_STATS_OUT_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS
+ * Explicitly collect class stats at the specified evb port
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS 0xe0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN msgrequest */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_LEN 4
+/* The port id associated with the vport/pport at which to collect class stats
+ */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_IN_PORT_ID_OFST 0
+
+/* MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT msgresponse */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_LEN 4
+/* super_class */
+#define MC_CMD_RMON_COLLECT_SUPER_CLASS_STATS_OUT_SUPER_CLASS_OFST 0
+
+
+/***********************************/
+/* MC_CMD_GET_CLOCK
+ * Return the system and PDCPU clock frequencies.
+ */
+#define MC_CMD_GET_CLOCK 0xac
+
+/* MC_CMD_GET_CLOCK_IN msgrequest */
+#define MC_CMD_GET_CLOCK_IN_LEN 0
+
+/* MC_CMD_GET_CLOCK_OUT msgresponse */
+#define MC_CMD_GET_CLOCK_OUT_LEN 8
+/* System frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* DPCPU frequency, MHz */
+#define MC_CMD_GET_CLOCK_OUT_DPCPU_FREQ_OFST 4
+
+
+/***********************************/
+/* MC_CMD_SET_CLOCK
+ * Control the system and DPCPU clock frequencies. Changes are lost reboot.
+ */
+#define MC_CMD_SET_CLOCK 0xad
+
+/* MC_CMD_SET_CLOCK_IN msgrequest */
+#define MC_CMD_SET_CLOCK_IN_LEN 12
+/* Requested system frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_SYS_FREQ_OFST 0
+/* Requested inter-core frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_ICORE_FREQ_OFST 4
+/* Request DPCPU frequency in MHz; 0 leaves unchanged. */
+#define MC_CMD_SET_CLOCK_IN_DPCPU_FREQ_OFST 8
+
+/* MC_CMD_SET_CLOCK_OUT msgresponse */
+#define MC_CMD_SET_CLOCK_OUT_LEN 12
+/* Resulting system frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_SYS_FREQ_OFST 0
+/* Resulting inter-core frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_ICORE_FREQ_OFST 4
+/* Resulting DPCPU frequency in MHz */
+#define MC_CMD_SET_CLOCK_OUT_DPCPU_FREQ_OFST 8
+
+
+/***********************************/
+/* MC_CMD_DPCPU_RPC
+ * Send an arbitrary DPCPU message.
+ */
+#define MC_CMD_DPCPU_RPC 0xae
+
+/* MC_CMD_DPCPU_RPC_IN msgrequest */
+#define MC_CMD_DPCPU_RPC_IN_LEN 36
+#define MC_CMD_DPCPU_RPC_IN_CPU_OFST 0
+/* enum: RxDPCPU */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_RX 0x0
+/* enum: TxDPCPU0 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX0 0x1
+/* enum: TxDPCPU1 */
+#define MC_CMD_DPCPU_RPC_IN_DPCPU_TX1 0x2
+/* First 8 bits [39:32] of DATA are consumed by MC-DPCPU protocol and must be
+ * initialised to zero
+ */
+#define MC_CMD_DPCPU_RPC_IN_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_IN_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_LBN 8
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_CMDNUM_WIDTH 8
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_READ 0x6 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_WRITE 0x7 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_SELF_TEST 0xc /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_TXDPCPU_CSR_ACCESS 0xe /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_READ 0x46 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_WRITE 0x47 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SELF_TEST 0x4a /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_CSR_ACCESS 0x4c /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CMDNUM_RXDPCPU_SET_MC_REPLAY_CNTXT 0x4d /* enum */
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_OBJID_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_ADDR_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_HDR_CMD_REQ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_INFO_WIDTH 240
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_STOP_RETURN_RESULT 0x0 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_READ 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_WRITE_READ 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_CMD_START_PIPELINED_READ 0x4 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_LBN 48
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_START_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_RPT_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_LBN 80
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_GAP_DELAY_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_LBN 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_CUT_THROUGH 0x1 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD 0x2 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_MODE_STORE_FORWARD_FIRST 0x3 /* enum */
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_LBN 64
+#define MC_CMD_DPCPU_RPC_IN_MC_REPLAY_CNTXT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_IN_WDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_IN_WDATA_LEN 24
+/* Register data to write. Only valid in write/write-read. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_DATA_OFST 16
+/* Register address. */
+#define MC_CMD_DPCPU_RPC_IN_CSR_ACCESS_ADDRESS_OFST 20
+
+/* MC_CMD_DPCPU_RPC_OUT msgresponse */
+#define MC_CMD_DPCPU_RPC_OUT_LEN 36
+#define MC_CMD_DPCPU_RPC_OUT_RC_OFST 0
+/* DATA */
+#define MC_CMD_DPCPU_RPC_OUT_DATA_OFST 4
+#define MC_CMD_DPCPU_RPC_OUT_DATA_LEN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_LBN 32
+#define MC_CMD_DPCPU_RPC_OUT_HDR_CMD_RESP_ERRCODE_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_LBN 48
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_COUNT_WIDTH 16
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_RDATA_LEN 24
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_1_OFST 12
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_2_OFST 16
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_3_OFST 20
+#define MC_CMD_DPCPU_RPC_OUT_CSR_ACCESS_READ_VAL_4_OFST 24
+
+
+/***********************************/
+/* MC_CMD_TRIGGER_INTERRUPT
+ * Trigger an interrupt by prodding the BIU.
+ */
+#define MC_CMD_TRIGGER_INTERRUPT 0xe3
+
+/* MC_CMD_TRIGGER_INTERRUPT_IN msgrequest */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_LEN 4
+/* Interrupt level relative to base for function. */
+#define MC_CMD_TRIGGER_INTERRUPT_IN_INTR_LEVEL_OFST 0
+
+/* MC_CMD_TRIGGER_INTERRUPT_OUT msgresponse */
+#define MC_CMD_TRIGGER_INTERRUPT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_DO
+ * Take a dump of the DUT state
+ */
+#define MC_CMD_DUMP_DO 0xe8
+
+/* MC_CMD_DUMP_DO_IN msgrequest */
+#define MC_CMD_DUMP_DO_IN_LEN 52
+#define MC_CMD_DUMP_DO_IN_PADDING_OFST 0
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_OFST 4
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_DEFAULT 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_NVRAM 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_HOST_MEMORY_MLI 0x3 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMP_LOCATION_UART 0x4 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_PAGE_SIZE 0x1000 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_DO_IN_HOST_MEMORY_MLI_MAX_DEPTH 0x2 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_DO_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_OFST 28
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM 0x0 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_NVRAM_DUMP_PARTITION 0x1 /* enum */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_DO_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+/* MC_CMD_DUMP_DO_OUT msgresponse */
+#define MC_CMD_DUMP_DO_OUT_LEN 4
+#define MC_CMD_DUMP_DO_OUT_DUMPFILE_SIZE_OFST 0
+
+
+/***********************************/
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED
+ * Configure unsolicited dumps
+ */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED 0xe9
+
+/* MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN msgrequest */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_LEN 52
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_ENABLE_OFST 0
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_OFST 4
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_TYPE_OFST 8
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_NVRAM_OFFSET_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 16
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 20
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_UART_PORT_OFST 12
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPSPEC_SRC_CUSTOM_SIZE_OFST 24
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_OFST 28
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPFILE_DST */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_TYPE_OFST 32
+/* Enum values, see field(s): */
+/* MC_CMD_DUMP_DO/MC_CMD_DUMP_DO_IN/DUMPSPEC_SRC_CUSTOM_TYPE */
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_PARTITION_TYPE_ID_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_NVRAM_OFFSET_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_LO_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_ROOT_ADDR_HI_OFST 40
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_HOST_MEMORY_MLI_DEPTH_OFST 44
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_UART_PORT_OFST 36
+#define MC_CMD_DUMP_CONFIGURE_UNSOLICITED_IN_DUMPFILE_DST_CUSTOM_SIZE_OFST 48
+
+
+/***********************************/
+/* MC_CMD_SET_PSU
+ * Adjusts power supply parameters. This is a warranty-voiding operation.
+ * Returns: ENOENT if the parameter or rail specified does not exist, EINVAL if
+ * the parameter is out of range.
+ */
+#define MC_CMD_SET_PSU 0xea
+
+/* MC_CMD_SET_PSU_IN msgrequest */
+#define MC_CMD_SET_PSU_IN_LEN 12
+#define MC_CMD_SET_PSU_IN_PARAM_OFST 0
+#define MC_CMD_SET_PSU_IN_PARAM_SUPPLY_VOLTAGE 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_OFST 4
+#define MC_CMD_SET_PSU_IN_RAIL_0V9 0x0 /* enum */
+#define MC_CMD_SET_PSU_IN_RAIL_1V2 0x1 /* enum */
+/* desired value, eg voltage in mV */
+#define MC_CMD_SET_PSU_IN_VALUE_OFST 8
+
+/* MC_CMD_SET_PSU_OUT msgresponse */
+#define MC_CMD_SET_PSU_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_GET_FUNCTION_INFO
+ * Get function information. PF and VF number.
+ */
+#define MC_CMD_GET_FUNCTION_INFO 0xec
+
+/* MC_CMD_GET_FUNCTION_INFO_IN msgrequest */
+#define MC_CMD_GET_FUNCTION_INFO_IN_LEN 0
+
+/* MC_CMD_GET_FUNCTION_INFO_OUT msgresponse */
+#define MC_CMD_GET_FUNCTION_INFO_OUT_LEN 8
+#define MC_CMD_GET_FUNCTION_INFO_OUT_PF_OFST 0
+#define MC_CMD_GET_FUNCTION_INFO_OUT_VF_OFST 4
+
+
+/***********************************/
+/* MC_CMD_ENABLE_OFFLINE_BIST
+ * Enters offline BIST mode. All queues are torn down, chip enters quiescent
+ * mode, calling function gets exclusive MCDI ownership. The only way out is
+ * reboot.
+ */
+#define MC_CMD_ENABLE_OFFLINE_BIST 0xed
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_IN msgrequest */
+#define MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN 0
+
+/* MC_CMD_ENABLE_OFFLINE_BIST_OUT msgresponse */
+#define MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_START_KR_EYE_PLOT
+ * Start KR Serdes Eye diagram plot on a given lane. Lane must have valid
+ * signal.
+ */
+#define MC_CMD_START_KR_EYE_PLOT 0xee
+
+/* MC_CMD_START_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_START_KR_EYE_PLOT_IN_LEN 4
+#define MC_CMD_START_KR_EYE_PLOT_IN_LANE_OFST 0
+
+/* MC_CMD_START_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_START_KR_EYE_PLOT_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_POLL_KR_EYE_PLOT
+ * Poll KR Serdes Eye diagram plot. Returns one row of BER data. The caller
+ * should call this command repeatedly after starting eye plot, until no more
+ * data is returned.
+ */
+#define MC_CMD_POLL_KR_EYE_PLOT 0xef
+
+/* MC_CMD_POLL_KR_EYE_PLOT_IN msgrequest */
+#define MC_CMD_POLL_KR_EYE_PLOT_IN_LEN 0
+
+/* MC_CMD_POLL_KR_EYE_PLOT_OUT msgresponse */
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMIN 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LENMAX 252
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_LEN(num) (0+2*(num))
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_OFST 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_LEN 2
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MINNUM 0
+#define MC_CMD_POLL_KR_EYE_PLOT_OUT_SAMPLES_MAXNUM 126
+
+
+/***********************************/
+/* MC_CMD_READ_FUSES
+ * Read data programmed into the device One-Time-Programmable (OTP) Fuses
+ */
+#define MC_CMD_READ_FUSES 0xf0
+
+/* MC_CMD_READ_FUSES_IN msgrequest */
+#define MC_CMD_READ_FUSES_IN_LEN 8
+/* Offset in OTP to read */
+#define MC_CMD_READ_FUSES_IN_OFFSET_OFST 0
+/* Length of data to read in bytes */
+#define MC_CMD_READ_FUSES_IN_LENGTH_OFST 4
+
+/* MC_CMD_READ_FUSES_OUT msgresponse */
+#define MC_CMD_READ_FUSES_OUT_LENMIN 4
+#define MC_CMD_READ_FUSES_OUT_LENMAX 252
+#define MC_CMD_READ_FUSES_OUT_LEN(num) (4+1*(num))
+/* Length of returned OTP data in bytes */
+#define MC_CMD_READ_FUSES_OUT_LENGTH_OFST 0
+/* Returned data */
+#define MC_CMD_READ_FUSES_OUT_DATA_OFST 4
+#define MC_CMD_READ_FUSES_OUT_DATA_LEN 1
+#define MC_CMD_READ_FUSES_OUT_DATA_MINNUM 0
+#define MC_CMD_READ_FUSES_OUT_DATA_MAXNUM 248
+
+
+/***********************************/
+/* MC_CMD_KR_TUNE
+ * Get or set KR Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_KR_TUNE 0xf1
+
+/* MC_CMD_KR_TUNE_IN msgrequest */
+#define MC_CMD_KR_TUNE_IN_LENMIN 4
+#define MC_CMD_KR_TUNE_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_KR_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_KR_TUNE_IN_TXEQ_SET 0x3
+/* enum: Force KR Serdes reset / recalibration */
+#define MC_CMD_KR_TUNE_IN_RECAL 0x4
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_OFST 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_LEN 4
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MINNUM 0
+#define MC_CMD_KR_TUNE_IN_KR_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_KR_TUNE_OUT msgresponse */
+#define MC_CMD_KR_TUNE_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
+/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
+/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
+/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
+/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_ALL 0x4 /* enum */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_KR_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 3
+/* Enum values, see field(s): */
+/* MC_CMD_KR_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 11
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_LBN 12
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 4
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_KR_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_KR_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RXEQ_SET_OUT_LEN 0
+
+/* MC_CMD_KR_TUNE_RECAL_IN msgrequest */
+#define MC_CMD_KR_TUNE_RECAL_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_OFST 0
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_OFST 1
+#define MC_CMD_KR_TUNE_RECAL_IN_KR_TUNE_RSVD_LEN 3
+
+/* MC_CMD_KR_TUNE_RECAL_OUT msgresponse */
+#define MC_CMD_KR_TUNE_RECAL_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PCIE_TUNE
+ * Get or set PCIE Serdes RXEQ and TX Driver settings
+ */
+#define MC_CMD_PCIE_TUNE 0xf2
+
+/* MC_CMD_PCIE_TUNE_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_IN_LENMIN 4
+#define MC_CMD_PCIE_TUNE_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_OP_LEN 1
+/* enum: Get current RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_GET 0x0
+/* enum: Override RXEQ settings */
+#define MC_CMD_PCIE_TUNE_IN_RXEQ_SET 0x1
+/* enum: Get current TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_GET 0x2
+/* enum: Override TX Driver settings */
+#define MC_CMD_PCIE_TUNE_IN_TXEQ_SET 0x3
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_RSVD_LEN 3
+/* Arguments specific to the operation */
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_OFST 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_LEN 4
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MINNUM 0
+#define MC_CMD_PCIE_TUNE_IN_PCIE_TUNE_ARGS_MAXNUM 62
+
+/* MC_CMD_PCIE_TUNE_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_OUT_LEN 0
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: Attenuation (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_ATT 0x0
+/* enum: CTLE Boost (0-15) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_BOOST 0x1
+/* enum: DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP1 0x2
+/* enum: DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP2 0x3
+/* enum: DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP3 0x4
+/* enum: DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
+/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_3 0x3 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_4 0x4 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_PCIE_TUNE_RSVD_LEN 3
+
+/* MC_CMD_PCIE_TUNE_TXEQ_GET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMIN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LENMAX 252
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_LEN(num) (0+4*(num))
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_OFST 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
+/* enum: TxMargin (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXMARGIN 0x0
+/* enum: TxSwing (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_TXSWING 0x1
+/* enum: De-emphasis coefficient C(-1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CM1 0x2
+/* enum: De-emphasis coefficient C(0) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_C0 0x3
+/* enum: De-emphasis coefficient C(+1) (PIPE) */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_CP1 0x4
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_LBN 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_LBN 24
+#define MC_CMD_PCIE_TUNE_TXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+
+
+/***********************************/
+/* MC_CMD_LICENSING
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ */
+#define MC_CMD_LICENSING 0xf3
+
+/* MC_CMD_LICENSING_IN msgrequest */
+#define MC_CMD_LICENSING_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_IN_OP_GET_KEY_STATS 0x1
+
+/* MC_CMD_LICENSING_OUT msgresponse */
+#define MC_CMD_LICENSING_OUT_LEN 28
+/* count of application keys which are valid */
+#define MC_CMD_LICENSING_OUT_VALID_APP_KEYS_OFST 0
+/* sum of UNVERIFIABLE_APP_KEYS + WRONG_NODE_APP_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_OUT_INVALID_APP_KEYS_OFST 4
+/* count of application keys which are invalid due to being blacklisted */
+#define MC_CMD_LICENSING_OUT_BLACKLISTED_APP_KEYS_OFST 8
+/* count of application keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_OUT_UNVERIFIABLE_APP_KEYS_OFST 12
+/* count of application keys which are invalid due to being for the wrong node
+ */
+#define MC_CMD_LICENSING_OUT_WRONG_NODE_APP_KEYS_OFST 16
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_OUT_LICENSING_STATE_OFST 20
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_OUT_LICENSING_SELF_TEST_OFST 24
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_OUT_SELF_TEST_PASS 0x1
+
+
+/***********************************/
+/* MC_CMD_MC2MC_PROXY
+ * Execute an arbitrary MCDI command on the slave MC of a dual-core device.
+ * This will fail on a single-core system.
+ */
+#define MC_CMD_MC2MC_PROXY 0xf4
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_phy.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 13cb40fe90c..8d33da6697f 100644
--- a/drivers/net/ethernet/sfc/mcdi_phy.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2009-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2009-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -36,7 +36,7 @@ struct efx_mcdi_phy_data {
static int
efx_mcdi_get_phy_cfg(struct efx_nic *efx, struct efx_mcdi_phy_data *cfg)
{
- u8 outbuf[MC_CMD_GET_PHY_CFG_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_CFG_OUT_LEN);
size_t outlen;
int rc;
@@ -78,7 +78,7 @@ static int efx_mcdi_set_link(struct efx_nic *efx, u32 capabilities,
u32 flags, u32 loopback_mode,
u32 loopback_speed)
{
- u8 inbuf[MC_CMD_SET_LINK_IN_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_LINK_IN_LEN);
int rc;
BUILD_BUG_ON(MC_CMD_SET_LINK_OUT_LEN != 0);
@@ -102,7 +102,7 @@ fail:
static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
{
- u8 outbuf[MC_CMD_GET_LOOPBACK_MODES_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LOOPBACK_MODES_OUT_LEN);
size_t outlen;
int rc;
@@ -111,7 +111,8 @@ static int efx_mcdi_loopback_modes(struct efx_nic *efx, u64 *loopback_modes)
if (rc)
goto fail;
- if (outlen < MC_CMD_GET_LOOPBACK_MODES_OUT_LEN) {
+ if (outlen < (MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
+ MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN)) {
rc = -EIO;
goto fail;
}
@@ -125,16 +126,16 @@ fail:
return rc;
}
-int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 *value_out, u32 *status_out)
+static int efx_mcdi_mdio_read(struct net_device *net_dev,
+ int prtad, int devad, u16 addr)
{
- u8 inbuf[MC_CMD_MDIO_READ_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_READ_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
@@ -144,25 +145,27 @@ int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *value_out = (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
- *status_out = MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS);
- return 0;
+ if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
+ return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
fail:
netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
return rc;
}
-int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad, u16 addr,
- u16 value, u32 *status_out)
+static int efx_mcdi_mdio_write(struct net_device *net_dev,
+ int prtad, int devad, u16 addr, u16 value)
{
- u8 inbuf[MC_CMD_MDIO_WRITE_IN_LEN];
- u8 outbuf[MC_CMD_MDIO_WRITE_OUT_LEN];
+ struct efx_nic *efx = netdev_priv(net_dev);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
size_t outlen;
int rc;
- MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, bus);
+ MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
@@ -173,7 +176,10 @@ int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
if (rc)
goto fail;
- *status_out = MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS);
+ if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
+ MC_CMD_MDIO_STATUS_GOOD)
+ return -EIO;
+
return 0;
fail:
@@ -304,10 +310,37 @@ static u32 mcdi_to_ethtool_media(u32 media)
}
}
+static void efx_mcdi_phy_decode_link(struct efx_nic *efx,
+ struct efx_link_state *link_state,
+ u32 speed, u32 flags, u32 fcntl)
+{
+ switch (fcntl) {
+ case MC_CMD_FCNTL_AUTO:
+ WARN_ON(1); /* This is not a link mode */
+ link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_BIDIR:
+ link_state->fc = EFX_FC_TX | EFX_FC_RX;
+ break;
+ case MC_CMD_FCNTL_RESPOND:
+ link_state->fc = EFX_FC_RX;
+ break;
+ default:
+ WARN_ON(1);
+ case MC_CMD_FCNTL_OFF:
+ link_state->fc = 0;
+ break;
+ }
+
+ link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
+ link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
+ link_state->speed = speed;
+}
+
static int efx_mcdi_phy_probe(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
u32 caps;
int rc;
@@ -403,7 +436,7 @@ fail:
return rc;
}
-int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
+int efx_mcdi_port_reconfigure(struct efx_nic *efx)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 caps = (efx->link_advertising ?
@@ -414,37 +447,10 @@ int efx_mcdi_phy_reconfigure(struct efx_nic *efx)
efx->loopback_mode, 0);
}
-void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl)
-{
- switch (fcntl) {
- case MC_CMD_FCNTL_AUTO:
- WARN_ON(1); /* This is not a link mode */
- link_state->fc = EFX_FC_AUTO | EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_BIDIR:
- link_state->fc = EFX_FC_TX | EFX_FC_RX;
- break;
- case MC_CMD_FCNTL_RESPOND:
- link_state->fc = EFX_FC_RX;
- break;
- default:
- WARN_ON(1);
- case MC_CMD_FCNTL_OFF:
- link_state->fc = 0;
- break;
- }
-
- link_state->up = !!(flags & (1 << MC_CMD_GET_LINK_OUT_LINK_UP_LBN));
- link_state->fd = !!(flags & (1 << MC_CMD_GET_LINK_OUT_FULL_DUPLEX_LBN));
- link_state->speed = speed;
-}
-
/* Verify that the forced flow control settings (!EFX_FC_AUTO) are
* supported by the link partner. Warn the user if this isn't the case
*/
-void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
+static void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
u32 rmtadv;
@@ -472,7 +478,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
static bool efx_mcdi_phy_poll(struct efx_nic *efx)
{
struct efx_link_state old_state = efx->link_state;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
WARN_ON(!mutex_is_locked(&efx->mac_lock));
@@ -507,7 +513,7 @@ static void efx_mcdi_phy_remove(struct efx_nic *efx)
static void efx_mcdi_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
{
struct efx_mcdi_phy_data *phy_cfg = efx->phy_data;
- u8 outbuf[MC_CMD_GET_LINK_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
int rc;
ecmd->supported =
@@ -579,7 +585,7 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
{
- u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_STATE_OUT_LEN);
size_t outlen;
int rc;
@@ -615,17 +621,15 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
unsigned int retry, i, count = 0;
size_t outlen;
u32 status;
- u8 *buf, *ptr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_SFT9001_LEN);
+ u8 *ptr;
int rc;
- buf = kzalloc(0x100, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
BUILD_BUG_ON(MC_CMD_START_BIST_OUT_LEN != 0);
- MCDI_SET_DWORD(buf, START_BIST_IN_TYPE, bist_mode);
- rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST, buf, MC_CMD_START_BIST_IN_LEN,
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_mode);
+ rc = efx_mcdi_rpc(efx, MC_CMD_START_BIST,
+ inbuf, MC_CMD_START_BIST_IN_LEN, NULL, 0, NULL);
if (rc)
goto out;
@@ -633,11 +637,11 @@ static int efx_mcdi_bist(struct efx_nic *efx, unsigned int bist_mode,
for (retry = 0; retry < 100; ++retry) {
BUILD_BUG_ON(MC_CMD_POLL_BIST_IN_LEN != 0);
rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
- buf, 0x100, &outlen);
+ outbuf, sizeof(outbuf), &outlen);
if (rc)
goto out;
- status = MCDI_DWORD(buf, POLL_BIST_OUT_RESULT);
+ status = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
if (status != MC_CMD_POLL_BIST_RUNNING)
goto finished;
@@ -654,7 +658,7 @@ finished:
if (efx->phy_type == PHY_TYPE_SFT9001B &&
(bist_mode == MC_CMD_PHY_BIST_CABLE_SHORT ||
bist_mode == MC_CMD_PHY_BIST_CABLE_LONG)) {
- ptr = MCDI_PTR(buf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
+ ptr = MCDI_PTR(outbuf, POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A);
if (status == MC_CMD_POLL_BIST_PASSED &&
outlen >= MC_CMD_POLL_BIST_OUT_SFT9001_LEN) {
for (i = 0; i < 8; i++) {
@@ -668,8 +672,6 @@ finished:
rc = count;
out:
- kfree(buf);
-
return rc;
}
@@ -744,8 +746,8 @@ static const char *efx_mcdi_phy_test_name(struct efx_nic *efx,
static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
struct ethtool_eeprom *ee, u8 *data)
{
- u8 outbuf[MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX];
- u8 inbuf[MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN];
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PHY_MEDIA_INFO_OUT_LENMAX);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN);
size_t outlen;
int rc;
unsigned int payload_len;
@@ -785,8 +787,7 @@ static int efx_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
space_remaining : payload_len;
memcpy(user_data,
- outbuf + page_off +
- MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST,
+ MCDI_PTR(outbuf, GET_PHY_MEDIA_INFO_OUT_DATA) + page_off,
to_copy);
space_remaining -= to_copy;
@@ -813,10 +814,10 @@ static int efx_mcdi_phy_get_module_info(struct efx_nic *efx,
}
}
-const struct efx_phy_operations efx_mcdi_phy_ops = {
+static const struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
- .reconfigure = efx_mcdi_phy_reconfigure,
+ .reconfigure = efx_mcdi_port_reconfigure,
.poll = efx_mcdi_phy_poll,
.fini = efx_port_dummy_op_void,
.remove = efx_mcdi_phy_remove,
@@ -828,3 +829,199 @@ const struct efx_phy_operations efx_mcdi_phy_ops = {
.get_module_eeprom = efx_mcdi_phy_get_module_eeprom,
.get_module_info = efx_mcdi_phy_get_module_info,
};
+
+u32 efx_mcdi_phy_get_caps(struct efx_nic *efx)
+{
+ struct efx_mcdi_phy_data *phy_data = efx->phy_data;
+
+ return phy_data->supported_cap;
+}
+
+static unsigned int efx_mcdi_event_link_speed[] = {
+ [MCDI_EVENT_LINKCHANGE_SPEED_100M] = 100,
+ [MCDI_EVENT_LINKCHANGE_SPEED_1G] = 1000,
+ [MCDI_EVENT_LINKCHANGE_SPEED_10G] = 10000,
+};
+
+void efx_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev)
+{
+ u32 flags, fcntl, speed, lpa;
+
+ speed = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_SPEED);
+ EFX_BUG_ON_PARANOID(speed >= ARRAY_SIZE(efx_mcdi_event_link_speed));
+ speed = efx_mcdi_event_link_speed[speed];
+
+ flags = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LINK_FLAGS);
+ fcntl = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_FCNTL);
+ lpa = EFX_QWORD_FIELD(*ev, MCDI_EVENT_LINKCHANGE_LP_CAP);
+
+ /* efx->link_state is only modified by efx_mcdi_phy_get_link(),
+ * which is only run after flushing the event queues. Therefore, it
+ * is safe to modify the link state outside of the mac_lock here.
+ */
+ efx_mcdi_phy_decode_link(efx, &efx->link_state, speed, flags, fcntl);
+
+ efx_mcdi_phy_check_fcntl(efx, lpa);
+
+ efx_link_status_changed(efx);
+}
+
+int efx_mcdi_set_mac(struct efx_nic *efx)
+{
+ u32 fcntl;
+ MCDI_DECLARE_BUF(cmdbytes, MC_CMD_SET_MAC_IN_LEN);
+
+ BUILD_BUG_ON(MC_CMD_SET_MAC_OUT_LEN != 0);
+
+ memcpy(MCDI_PTR(cmdbytes, SET_MAC_IN_ADDR),
+ efx->net_dev->dev_addr, ETH_ALEN);
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_MTU,
+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu));
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_DRAIN, 0);
+
+ /* Set simple MAC filter for Siena */
+ MCDI_POPULATE_DWORD_1(cmdbytes, SET_MAC_IN_REJECT,
+ SET_MAC_IN_REJECT_UNCST, efx->unicast_filter);
+
+ switch (efx->wanted_fc) {
+ case EFX_FC_RX | EFX_FC_TX:
+ fcntl = MC_CMD_FCNTL_BIDIR;
+ break;
+ case EFX_FC_RX:
+ fcntl = MC_CMD_FCNTL_RESPOND;
+ break;
+ default:
+ fcntl = MC_CMD_FCNTL_OFF;
+ break;
+ }
+ if (efx->wanted_fc & EFX_FC_AUTO)
+ fcntl = MC_CMD_FCNTL_AUTO;
+ if (efx->fc_disable)
+ fcntl = MC_CMD_FCNTL_OFF;
+
+ MCDI_SET_DWORD(cmdbytes, SET_MAC_IN_FCNTL, fcntl);
+
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MAC, cmdbytes, sizeof(cmdbytes),
+ NULL, 0, NULL);
+}
+
+bool efx_mcdi_mac_check_fault(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
+ size_t outlength;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
+ outbuf, sizeof(outbuf), &outlength);
+ if (rc) {
+ netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n",
+ __func__, rc);
+ return true;
+ }
+
+ return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
+}
+
+static int efx_mcdi_mac_stats(struct efx_nic *efx, dma_addr_t dma_addr,
+ u32 dma_len, int enable, int clear)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ int rc;
+ int period = enable ? 1000 : 0;
+
+ BUILD_BUG_ON(MC_CMD_MAC_STATS_OUT_DMA_LEN != 0);
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, dma_addr);
+ MCDI_POPULATE_DWORD_7(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, !!enable,
+ MAC_STATS_IN_CLEAR, clear,
+ MAC_STATS_IN_PERIODIC_CHANGE, 1,
+ MAC_STATS_IN_PERIODIC_ENABLE, !!enable,
+ MAC_STATS_IN_PERIODIC_CLEAR, 0,
+ MAC_STATS_IN_PERIODIC_NOEVENT, 1,
+ MAC_STATS_IN_PERIOD_MS, period);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ netif_err(efx, hw, efx->net_dev, "%s: %s failed rc=%d\n",
+ __func__, enable ? "enable" : "disable", rc);
+ return rc;
+}
+
+void efx_mcdi_mac_start_stats(struct efx_nic *efx)
+{
+ __le64 *dma_stats = efx->stats_buffer.addr;
+
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
+ MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
+}
+
+void efx_mcdi_mac_stop_stats(struct efx_nic *efx)
+{
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+}
+
+int efx_mcdi_port_probe(struct efx_nic *efx)
+{
+ int rc;
+
+ /* Hook in PHY operations table */
+ efx->phy_op = &efx_mcdi_phy_ops;
+
+ /* Set up MDIO structure for PHY */
+ efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+ efx->mdio.mdio_read = efx_mcdi_mdio_read;
+ efx->mdio.mdio_write = efx_mcdi_mdio_write;
+
+ /* Fill out MDIO structure, loopback modes, and initial link state */
+ rc = efx->phy_op->probe(efx);
+ if (rc != 0)
+ return rc;
+
+ /* Allocate buffer for stats */
+ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
+ MC_CMD_MAC_NSTATS * sizeof(u64), GFP_KERNEL);
+ if (rc)
+ return rc;
+ netif_dbg(efx, probe, efx->net_dev,
+ "stats buffer at %llx (virt %p phys %llx)\n",
+ (u64)efx->stats_buffer.dma_addr,
+ efx->stats_buffer.addr,
+ (u64)virt_to_phys(efx->stats_buffer.addr));
+
+ efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
+
+ return 0;
+}
+
+void efx_mcdi_port_remove(struct efx_nic *efx)
+{
+ efx->phy_op->remove(efx);
+ efx_nic_free_buffer(efx, &efx->stats_buffer);
+}
+
+/* Get physical port number (EF10 only; on Siena it is same as PF number) */
+int efx_mcdi_port_get_number(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN);
+ int rc;
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PORT_ASSIGNMENT, NULL, 0,
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ return MCDI_DWORD(outbuf, GET_PORT_ASSIGNMENT_OUT_PORT);
+}
diff --git a/drivers/net/ethernet/sfc/mdio_10g.c b/drivers/net/ethernet/sfc/mdio_10g.c
index 9acfd6696ff..8ff954c59ef 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.c
+++ b/drivers/net/ethernet/sfc/mdio_10g.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mdio_10g.h b/drivers/net/ethernet/sfc/mdio_10g.h
index a97dbbd2de9..16824fecc5e 100644
--- a/drivers/net/ethernet/sfc/mdio_10g.h
+++ b/drivers/net/ethernet/sfc/mdio_10g.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index 08f825b71ac..a77a8bd2dd7 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -1,194 +1,32 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
-#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/rtnetlink.h>
#include "net_driver.h"
-#include "spi.h"
#include "efx.h"
-#include "nic.h"
-#include "mcdi.h"
-#include "mcdi_pcol.h"
-
-#define EFX_SPI_VERIFY_BUF_LEN 16
-
-struct efx_mtd_partition {
- struct mtd_info mtd;
- union {
- struct {
- bool updating;
- u8 nvram_type;
- u16 fw_subtype;
- } mcdi;
- size_t offset;
- };
- const char *type_name;
- char name[IFNAMSIZ + 20];
-};
-
-struct efx_mtd_ops {
- int (*read)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, u8 *buffer);
- int (*erase)(struct mtd_info *mtd, loff_t start, size_t len);
- int (*write)(struct mtd_info *mtd, loff_t start, size_t len,
- size_t *retlen, const u8 *buffer);
- int (*sync)(struct mtd_info *mtd);
-};
-
-struct efx_mtd {
- struct list_head node;
- struct efx_nic *efx;
- const struct efx_spi_device *spi;
- const char *name;
- const struct efx_mtd_ops *ops;
- size_t n_parts;
- struct efx_mtd_partition part[0];
-};
-
-#define efx_for_each_partition(part, efx_mtd) \
- for ((part) = &(efx_mtd)->part[0]; \
- (part) != &(efx_mtd)->part[(efx_mtd)->n_parts]; \
- (part)++)
#define to_efx_mtd_partition(mtd) \
container_of(mtd, struct efx_mtd_partition, mtd)
-static int falcon_mtd_probe(struct efx_nic *efx);
-static int siena_mtd_probe(struct efx_nic *efx);
-
-/* SPI utilities */
-
-static int
-efx_spi_slow_wait(struct efx_mtd_partition *part, bool uninterruptible)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- u8 status;
- int rc, i;
-
- /* Wait up to 4s for flash/EEPROM to finish a slow operation. */
- for (i = 0; i < 40; i++) {
- __set_current_state(uninterruptible ?
- TASK_UNINTERRUPTIBLE : TASK_INTERRUPTIBLE);
- schedule_timeout(HZ / 10);
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
- if (!(status & SPI_STATUS_NRDY))
- return 0;
- if (signal_pending(current))
- return -EINTR;
- }
- pr_err("%s: timed out waiting for %s\n", part->name, efx_mtd->name);
- return -ETIMEDOUT;
-}
-
-static int
-efx_spi_unlock(struct efx_nic *efx, const struct efx_spi_device *spi)
-{
- const u8 unlock_mask = (SPI_STATUS_BP2 | SPI_STATUS_BP1 |
- SPI_STATUS_BP0);
- u8 status;
- int rc;
-
- rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL,
- &status, sizeof(status));
- if (rc)
- return rc;
-
- if (!(status & unlock_mask))
- return 0; /* already unlocked */
-
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_SST_EWSR, -1, NULL, NULL, 0);
- if (rc)
- return rc;
-
- status &= ~unlock_mask;
- rc = falcon_spi_cmd(efx, spi, SPI_WRSR, -1, &status,
- NULL, sizeof(status));
- if (rc)
- return rc;
- rc = falcon_spi_wait_write(efx, spi);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int
-efx_spi_erase(struct efx_mtd_partition *part, loff_t start, size_t len)
-{
- struct efx_mtd *efx_mtd = part->mtd.priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- unsigned pos, block_len;
- u8 empty[EFX_SPI_VERIFY_BUF_LEN];
- u8 buffer[EFX_SPI_VERIFY_BUF_LEN];
- int rc;
-
- if (len != spi->erase_size)
- return -EINVAL;
-
- if (spi->erase_command == 0)
- return -EOPNOTSUPP;
-
- rc = efx_spi_unlock(efx, spi);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0);
- if (rc)
- return rc;
- rc = falcon_spi_cmd(efx, spi, spi->erase_command, start, NULL,
- NULL, 0);
- if (rc)
- return rc;
- rc = efx_spi_slow_wait(part, false);
-
- /* Verify the entire region has been wiped */
- memset(empty, 0xff, sizeof(empty));
- for (pos = 0; pos < len; pos += block_len) {
- block_len = min(len - pos, sizeof(buffer));
- rc = falcon_spi_read(efx, spi, start + pos, block_len,
- NULL, buffer);
- if (rc)
- return rc;
- if (memcmp(empty, buffer, block_len))
- return -EIO;
-
- /* Avoid locking up the system */
- cond_resched();
- if (signal_pending(current))
- return -EINTR;
- }
-
- return rc;
-}
-
/* MTD interface */
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
{
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->erase(mtd, erase->addr, erase->len);
+ rc = efx->type->mtd_erase(mtd, erase->addr, erase->len);
if (rc == 0) {
erase->state = MTD_ERASE_DONE;
} else {
@@ -202,13 +40,13 @@ static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
static void efx_mtd_sync(struct mtd_info *mtd)
{
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
+ struct efx_nic *efx = mtd->priv;
int rc;
- rc = efx_mtd->ops->sync(mtd);
+ rc = efx->type->mtd_sync(mtd);
if (rc)
pr_err("%s: %s sync failed (%d)\n",
- part->name, efx_mtd->name, rc);
+ part->name, part->dev_type_name, rc);
}
static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
@@ -222,62 +60,44 @@ static void efx_mtd_remove_partition(struct efx_mtd_partition *part)
ssleep(1);
}
WARN_ON(rc);
+ list_del(&part->node);
}
-static void efx_mtd_remove_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- efx_mtd_remove_partition(part);
- list_del(&efx_mtd->node);
- kfree(efx_mtd);
-}
-
-static void efx_mtd_rename_device(struct efx_mtd *efx_mtd)
-{
- struct efx_mtd_partition *part;
-
- efx_for_each_partition(part, efx_mtd)
- if (efx_nic_rev(efx_mtd->efx) >= EFX_REV_SIENA_A0)
- snprintf(part->name, sizeof(part->name),
- "%s %s:%02x", efx_mtd->efx->name,
- part->type_name, part->mcdi.fw_subtype);
- else
- snprintf(part->name, sizeof(part->name),
- "%s %s", efx_mtd->efx->name,
- part->type_name);
-}
-
-static int efx_mtd_probe_device(struct efx_nic *efx, struct efx_mtd *efx_mtd)
+int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
+ size_t n_parts, size_t sizeof_part)
{
struct efx_mtd_partition *part;
+ size_t i;
- efx_mtd->efx = efx;
+ for (i = 0; i < n_parts; i++) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
- efx_mtd_rename_device(efx_mtd);
-
- efx_for_each_partition(part, efx_mtd) {
part->mtd.writesize = 1;
part->mtd.owner = THIS_MODULE;
- part->mtd.priv = efx_mtd;
+ part->mtd.priv = efx;
part->mtd.name = part->name;
part->mtd._erase = efx_mtd_erase;
- part->mtd._read = efx_mtd->ops->read;
- part->mtd._write = efx_mtd->ops->write;
+ part->mtd._read = efx->type->mtd_read;
+ part->mtd._write = efx->type->mtd_write;
part->mtd._sync = efx_mtd_sync;
+ efx->type->mtd_rename(part);
+
if (mtd_device_register(&part->mtd, NULL, 0))
goto fail;
+
+ /* Add to list in order - efx_mtd_remove() depends on this */
+ list_add_tail(&part->node, &efx->mtd_list);
}
- list_add(&efx_mtd->node, &efx->mtd_list);
return 0;
fail:
- while (part != &efx_mtd->part[0]) {
- --part;
+ while (i--) {
+ part = (struct efx_mtd_partition *)((char *)parts +
+ i * sizeof_part);
efx_mtd_remove_partition(part);
}
/* Failure is unlikely here, but probably means we're out of memory */
@@ -286,410 +106,28 @@ fail:
void efx_mtd_remove(struct efx_nic *efx)
{
- struct efx_mtd *efx_mtd, *next;
+ struct efx_mtd_partition *parts, *part, *next;
WARN_ON(efx_dev_registered(efx));
- list_for_each_entry_safe(efx_mtd, next, &efx->mtd_list, node)
- efx_mtd_remove_device(efx_mtd);
-}
-
-void efx_mtd_rename(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
-
- ASSERT_RTNL();
-
- list_for_each_entry(efx_mtd, &efx->mtd_list, node)
- efx_mtd_rename_device(efx_mtd);
-}
-
-int efx_mtd_probe(struct efx_nic *efx)
-{
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- return siena_mtd_probe(efx);
- else
- return falcon_mtd_probe(efx);
-}
-
-/* Implementation of MTD operations for Falcon */
-
-static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_read(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = efx_spi_erase(part, part->offset + start, len);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- const struct efx_spi_device *spi = efx_mtd->spi;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- rc = mutex_lock_interruptible(&nic_data->spi_lock);
- if (rc)
- return rc;
- rc = falcon_spi_write(efx, spi, part->offset + start, len,
- retlen, buffer);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static int falcon_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- struct falcon_nic_data *nic_data = efx->nic_data;
- int rc;
-
- mutex_lock(&nic_data->spi_lock);
- rc = efx_spi_slow_wait(part, true);
- mutex_unlock(&nic_data->spi_lock);
- return rc;
-}
-
-static const struct efx_mtd_ops falcon_mtd_ops = {
- .read = falcon_mtd_read,
- .erase = falcon_mtd_erase,
- .write = falcon_mtd_write,
- .sync = falcon_mtd_sync,
-};
-
-static int falcon_mtd_probe(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- struct efx_spi_device *spi;
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
-
- ASSERT_RTNL();
-
- spi = &nic_data->spi_flash;
- if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "flash";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_NORFLASH;
- efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
- efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
- efx_mtd->part[0].type_name = "sfc_flash_bootrom";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- spi = &nic_data->spi_eeprom;
- if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
- efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->spi = spi;
- efx_mtd->name = "EEPROM";
- efx_mtd->ops = &falcon_mtd_ops;
-
- efx_mtd->n_parts = 1;
- efx_mtd->part[0].mtd.type = MTD_RAM;
- efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
- efx_mtd->part[0].mtd.size =
- min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
- EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].mtd.erasesize = spi->erase_size;
- efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
- efx_mtd->part[0].type_name = "sfc_bootconfig";
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
- if (rc) {
- kfree(efx_mtd);
- return rc;
- }
- }
-
- return rc;
-}
-
-/* Implementation of MTD operations for Siena */
-
-static int siena_mtd_read(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_read(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start & ~((loff_t)(mtd->erasesize - 1));
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk = part->mtd.erasesize;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- /* The MCDI interface can in fact do multiple erase blocks at once;
- * but erasing may be slow, so we make multiple calls here to avoid
- * tripping the MCDI RPC timeout. */
- while (offset < end) {
- rc = efx_mcdi_nvram_erase(efx, part->mcdi.nvram_type, offset,
- chunk);
- if (rc)
- goto out;
- offset += chunk;
- }
-out:
- return rc;
-}
-
-static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- loff_t offset = start;
- loff_t end = min_t(loff_t, start + len, mtd->size);
- size_t chunk;
- int rc = 0;
-
- if (!part->mcdi.updating) {
- rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
- if (rc)
- goto out;
- part->mcdi.updating = true;
- }
-
- while (offset < end) {
- chunk = min_t(size_t, end - offset, EFX_MCDI_NVRAM_LEN_MAX);
- rc = efx_mcdi_nvram_write(efx, part->mcdi.nvram_type, offset,
- buffer, chunk);
- if (rc)
- goto out;
- offset += chunk;
- buffer += chunk;
- }
-out:
- *retlen = offset - start;
- return rc;
-}
-
-static int siena_mtd_sync(struct mtd_info *mtd)
-{
- struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
- struct efx_mtd *efx_mtd = mtd->priv;
- struct efx_nic *efx = efx_mtd->efx;
- int rc = 0;
-
- if (part->mcdi.updating) {
- part->mcdi.updating = false;
- rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
- }
-
- return rc;
-}
-
-static const struct efx_mtd_ops siena_mtd_ops = {
- .read = siena_mtd_read,
- .erase = siena_mtd_erase,
- .write = siena_mtd_write,
- .sync = siena_mtd_sync,
-};
-
-struct siena_nvram_type_info {
- int port;
- const char *name;
-};
+ if (list_empty(&efx->mtd_list))
+ return;
-static const struct siena_nvram_type_info siena_nvram_types[] = {
- [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
- [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
- [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
- [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
-};
+ parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
+ node);
-static int siena_mtd_probe_partition(struct efx_nic *efx,
- struct efx_mtd *efx_mtd,
- unsigned int part_id,
- unsigned int type)
-{
- struct efx_mtd_partition *part = &efx_mtd->part[part_id];
- const struct siena_nvram_type_info *info;
- size_t size, erase_size;
- bool protected;
- int rc;
-
- if (type >= ARRAY_SIZE(siena_nvram_types) ||
- siena_nvram_types[type].name == NULL)
- return -ENODEV;
-
- info = &siena_nvram_types[type];
-
- if (info->port != efx_port_num(efx))
- return -ENODEV;
-
- rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
- if (rc)
- return rc;
- if (protected)
- return -ENODEV; /* hide it */
-
- part->mcdi.nvram_type = type;
- part->type_name = info->name;
-
- part->mtd.type = MTD_NORFLASH;
- part->mtd.flags = MTD_CAP_NORFLASH;
- part->mtd.size = size;
- part->mtd.erasesize = erase_size;
+ list_for_each_entry_safe(part, next, &efx->mtd_list, node)
+ efx_mtd_remove_partition(part);
- return 0;
+ kfree(parts);
}
-static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
- struct efx_mtd *efx_mtd)
+void efx_mtd_rename(struct efx_nic *efx)
{
struct efx_mtd_partition *part;
- uint16_t fw_subtype_list[
- MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
- int rc;
-
- rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
- if (rc)
- return rc;
-
- efx_for_each_partition(part, efx_mtd)
- part->mcdi.fw_subtype = fw_subtype_list[part->mcdi.nvram_type];
-
- return 0;
-}
-
-static int siena_mtd_probe(struct efx_nic *efx)
-{
- struct efx_mtd *efx_mtd;
- int rc = -ENODEV;
- u32 nvram_types;
- unsigned int type;
ASSERT_RTNL();
- rc = efx_mcdi_nvram_types(efx, &nvram_types);
- if (rc)
- return rc;
-
- efx_mtd = kzalloc(sizeof(*efx_mtd) +
- hweight32(nvram_types) * sizeof(efx_mtd->part[0]),
- GFP_KERNEL);
- if (!efx_mtd)
- return -ENOMEM;
-
- efx_mtd->name = "Siena NVRAM manager";
-
- efx_mtd->ops = &siena_mtd_ops;
-
- type = 0;
- efx_mtd->n_parts = 0;
-
- while (nvram_types != 0) {
- if (nvram_types & 1) {
- rc = siena_mtd_probe_partition(efx, efx_mtd,
- efx_mtd->n_parts, type);
- if (rc == 0)
- efx_mtd->n_parts++;
- else if (rc != -ENODEV)
- goto fail;
- }
- type++;
- nvram_types >>= 1;
- }
-
- rc = siena_mtd_get_fw_subtypes(efx, efx_mtd);
- if (rc)
- goto fail;
-
- rc = efx_mtd_probe_device(efx, efx_mtd);
-fail:
- if (rc)
- kfree(efx_mtd);
- return rc;
+ list_for_each_entry(part, &efx->mtd_list, node)
+ efx->type->mtd_rename(part);
}
-
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f4c7e6b6774..b172ed13305 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -27,9 +27,11 @@
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/i2c.h>
+#include <linux/mtd/mtd.h>
#include "enum.h"
#include "bitfield.h"
+#include "filter.h"
/**************************************************************************
*
@@ -37,7 +39,7 @@
*
**************************************************************************/
-#define EFX_DRIVER_VERSION "3.2"
+#define EFX_DRIVER_VERSION "4.0"
#ifdef DEBUG
#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
@@ -93,21 +95,36 @@ struct efx_ptp_data;
struct efx_self_tests;
/**
- * struct efx_special_buffer - An Efx special buffer
- * @addr: CPU base address of the buffer
+ * struct efx_buffer - A general-purpose DMA buffer
+ * @addr: host base address of the buffer
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
- * @index: Buffer index within controller;s buffer table
- * @entries: Number of buffer table entries
*
- * Special buffers are used for the event queues and the TX and RX
- * descriptor queues for each channel. They are *not* used for the
- * actual transmit and receive buffers.
+ * The NIC uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
*/
-struct efx_special_buffer {
+struct efx_buffer {
void *addr;
dma_addr_t dma_addr;
unsigned int len;
+};
+
+/**
+ * struct efx_special_buffer - DMA buffer entered into buffer table
+ * @buf: Standard &struct efx_buffer
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
+ * Event and descriptor rings are addressed via one or more buffer
+ * table entries (and so can be physically non-contiguous, although we
+ * currently do not take advantage of that). On Falcon and Siena we
+ * have to take care of allocating and initialising the entries
+ * ourselves. On later hardware this is managed by the firmware and
+ * @index and @entries are left as 0.
+ */
+struct efx_special_buffer {
+ struct efx_buffer buf;
unsigned int index;
unsigned int entries;
};
@@ -118,6 +135,7 @@ struct efx_special_buffer {
* freed when descriptor completes
* @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
* freed when descriptor completes.
+ * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
* @dma_addr: DMA address of the fragment.
* @flags: Flags for allocation and DMA mapping type
* @len: Length of this fragment.
@@ -129,7 +147,10 @@ struct efx_tx_buffer {
const struct sk_buff *skb;
void *heap_buf;
};
- dma_addr_t dma_addr;
+ union {
+ efx_qword_t option;
+ dma_addr_t dma_addr;
+ };
unsigned short flags;
unsigned short len;
unsigned short unmap_len;
@@ -138,6 +159,7 @@ struct efx_tx_buffer {
#define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
#define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
#define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
+#define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
/**
* struct efx_tx_queue - An Efx TX queue
@@ -169,6 +191,7 @@ struct efx_tx_buffer {
* variable indicates that the queue is empty. This is to
* avoid cache-line ping-pong between the xmit path and the
* completion path.
+ * @merge_events: Number of TX merged completion events
* @insert_count: Current insert pointer
* This is the number of buffers that have been added to the
* software ring.
@@ -205,6 +228,7 @@ struct efx_tx_queue {
/* Members used mainly on the completion path */
unsigned int read_count ____cacheline_aligned_in_smp;
unsigned int old_write_count;
+ unsigned int merge_events;
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -244,6 +268,7 @@ struct efx_rx_buffer {
#define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004
#define EFX_RX_PKT_TCP 0x0040
+#define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
/**
* struct efx_rx_page_state - Page-based rx buffer state
@@ -271,13 +296,14 @@ struct efx_rx_page_state {
* @buffer: The software buffer ring
* @rxd: The hardware descriptor ring
* @ptr_mask: The size of the ring minus 1.
- * @enabled: Receive queue enabled indicator.
+ * @refill_enabled: Enable refill whenever fill level is low
* @flush_pending: Set when a RX flush is pending. Has the same lifetime as
* @rxq_flush_pending.
* @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue.
- * @scatter_n: Number of buffers used by current packet
+ * @scatter_n: Used by NIC specific receive code.
+ * @scatter_len: Used by NIC specific receive code.
* @page_ring: The ring to store DMA mapped pages for reuse.
* @page_add: Counter to calculate the write pointer for the recycle ring.
* @page_remove: Counter to calculate the read pointer for the recycle ring.
@@ -302,13 +328,14 @@ struct efx_rx_queue {
struct efx_rx_buffer *buffer;
struct efx_special_buffer rxd;
unsigned int ptr_mask;
- bool enabled;
+ bool refill_enabled;
bool flush_pending;
unsigned int added_count;
unsigned int notified_count;
unsigned int removed_count;
unsigned int scatter_n;
+ unsigned int scatter_len;
struct page **page_ring;
unsigned int page_add;
unsigned int page_remove;
@@ -325,22 +352,6 @@ struct efx_rx_queue {
unsigned int slow_fill_count;
};
-/**
- * struct efx_buffer - An Efx general-purpose buffer
- * @addr: host base address of the buffer
- * @dma_addr: DMA base address of the buffer
- * @len: Buffer length, in bytes
- *
- * The NIC uses these buffers for its interrupt status registers and
- * MAC stats dumps.
- */
-struct efx_buffer {
- void *addr;
- dma_addr_t dma_addr;
- unsigned int len;
-};
-
-
enum efx_rx_alloc_method {
RX_ALLOC_METHOD_AUTO = 0,
RX_ALLOC_METHOD_SKB = 1,
@@ -357,12 +368,12 @@ enum efx_rx_alloc_method {
* @efx: Associated Efx NIC
* @channel: Channel instance number
* @type: Channel type definition
+ * @eventq_init: Event queue initialised flag
* @enabled: Channel enabled indicator
* @irq: IRQ number (MSI and MSI-X only)
* @irq_moderation: IRQ moderation value (in hardware ticks)
* @napi_dev: Net device used with NAPI
* @napi_str: NAPI control structure
- * @work_pending: Is work pending via NAPI?
* @eventq: Event queue buffer
* @eventq_mask: Event queue pointer mask
* @eventq_read_ptr: Event queue read pointer
@@ -378,6 +389,8 @@ enum efx_rx_alloc_method {
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
* lack of descriptors
+ * @n_rx_merge_events: Number of RX merged completion events
+ * @n_rx_merge_packets: Number of RX packets completed by merged events
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
@@ -389,12 +402,12 @@ struct efx_channel {
struct efx_nic *efx;
int channel;
const struct efx_channel_type *type;
+ bool eventq_init;
bool enabled;
int irq;
unsigned int irq_moderation;
struct net_device *napi_dev;
struct napi_struct napi_str;
- bool work_pending;
struct efx_special_buffer eventq;
unsigned int eventq_mask;
unsigned int eventq_read_ptr;
@@ -414,6 +427,8 @@ struct efx_channel {
unsigned n_rx_overlength;
unsigned n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
+ unsigned int n_rx_merge_events;
+ unsigned int n_rx_merge_packets;
unsigned int rx_pkt_n_frags;
unsigned int rx_pkt_index;
@@ -423,6 +438,21 @@ struct efx_channel {
};
/**
+ * struct efx_msi_context - Context for each MSI
+ * @efx: The associated NIC
+ * @index: Index of the channel/IRQ
+ * @name: Name of the channel/IRQ
+ *
+ * Unlike &struct efx_channel, this is never reallocated and is always
+ * safe for the IRQ handler to access.
+ */
+struct efx_msi_context {
+ struct efx_nic *efx;
+ unsigned int index;
+ char name[IFNAMSIZ + 6];
+};
+
+/**
* struct efx_channel_type - distinguishes traffic and extra channels
* @handle_no_channel: Handle failure to allocate an extra channel
* @pre_probe: Set up extra state prior to initialisation
@@ -579,75 +609,17 @@ static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
return !!(mode & ~PHY_MODE_TX_DISABLED);
}
-/*
- * Efx extended statistics
- *
- * Not all statistics are provided by all supported MACs. The purpose
- * is this structure is to contain the raw statistics provided by each
- * MAC.
+/**
+ * struct efx_hw_stat_desc - Description of a hardware statistic
+ * @name: Name of the statistic as visible through ethtool, or %NULL if
+ * it should not be exposed
+ * @dma_width: Width in bits (0 for non-DMA statistics)
+ * @offset: Offset within stats (ignored for non-DMA statistics)
*/
-struct efx_mac_stats {
- u64 tx_bytes;
- u64 tx_good_bytes;
- u64 tx_bad_bytes;
- u64 tx_packets;
- u64 tx_bad;
- u64 tx_pause;
- u64 tx_control;
- u64 tx_unicast;
- u64 tx_multicast;
- u64 tx_broadcast;
- u64 tx_lt64;
- u64 tx_64;
- u64 tx_65_to_127;
- u64 tx_128_to_255;
- u64 tx_256_to_511;
- u64 tx_512_to_1023;
- u64 tx_1024_to_15xx;
- u64 tx_15xx_to_jumbo;
- u64 tx_gtjumbo;
- u64 tx_collision;
- u64 tx_single_collision;
- u64 tx_multiple_collision;
- u64 tx_excessive_collision;
- u64 tx_deferred;
- u64 tx_late_collision;
- u64 tx_excessive_deferred;
- u64 tx_non_tcpudp;
- u64 tx_mac_src_error;
- u64 tx_ip_src_error;
- u64 rx_bytes;
- u64 rx_good_bytes;
- u64 rx_bad_bytes;
- u64 rx_packets;
- u64 rx_good;
- u64 rx_bad;
- u64 rx_pause;
- u64 rx_control;
- u64 rx_unicast;
- u64 rx_multicast;
- u64 rx_broadcast;
- u64 rx_lt64;
- u64 rx_64;
- u64 rx_65_to_127;
- u64 rx_128_to_255;
- u64 rx_256_to_511;
- u64 rx_512_to_1023;
- u64 rx_1024_to_15xx;
- u64 rx_15xx_to_jumbo;
- u64 rx_gtjumbo;
- u64 rx_bad_lt64;
- u64 rx_bad_64_to_15xx;
- u64 rx_bad_15xx_to_jumbo;
- u64 rx_bad_gtjumbo;
- u64 rx_overflow;
- u64 rx_missed;
- u64 rx_false_carrier;
- u64 rx_symbol_error;
- u64 rx_align_error;
- u64 rx_length_error;
- u64 rx_internal_error;
- u64 rx_good_lt64;
+struct efx_hw_stat_desc {
+ const char *name;
+ u16 dma_width;
+ u16 offset;
};
/* Number of bits used in a multicast filter hash address */
@@ -662,7 +634,6 @@ union efx_multicast_hash {
efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
};
-struct efx_filter_state;
struct efx_vf;
struct vfdi_status;
@@ -672,7 +643,6 @@ struct vfdi_status;
* @pci_dev: The PCI device
* @type: Controller type attributes
* @legacy_irq: IRQ number
- * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
* @workqueue: Workqueue for port reconfigures and the HW monitor.
* Work items do not hold and must not acquire RTNL.
* @workqueue_name: Name of workqueue
@@ -689,7 +659,7 @@ struct vfdi_status;
* @tx_queue: TX DMA queues
* @rx_queue: RX DMA queues
* @channel: Channels
- * @channel_name: Names for channels and their IRQs
+ * @msi_context: Context for each MSI
* @extra_channel_types: Types of extra (non-traffic) channels that
* should be allocated for this NIC
* @rxq_entries: Size of receive queues requested by user.
@@ -707,17 +677,25 @@ struct vfdi_status;
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
+ * (valid only if @rx_prefix_size != 0; always negative)
+ * @rx_packet_len_offset: Offset of RX packet length from start of packet data
+ * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
* @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired
+ * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
+ * acknowledge but do nothing else.
* @irq_status: Interrupt status buffer
* @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
* @irq_level: IRQ level/index for IRQs not triggered by an event queue
* @selftest_work: Work item for asynchronous self-test
* @mtd_list: List of MTDs attached to the NIC
* @nic_data: Hardware dependent state
+ * @mcdi: Management-Controller-to-Driver Interface state
* @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
* efx_monitor() and efx_reconfigure_port()
* @port_enabled: Port enabled indicator.
@@ -737,8 +715,10 @@ struct vfdi_status;
* @link_advertising: Autonegotiation advertising flags
* @link_state: Current state of the link
* @n_link_state_changes: Number of times the link has changed state
- * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
- * @multicast_hash: Multicast hash table
+ * @unicast_filter: Flag for Falcon-arch simple unicast filter.
+ * Protected by @mac_lock.
+ * @multicast_hash: Multicast hash table for Falcon-arch.
+ * Protected by @mac_lock.
* @wanted_fc: Wanted flow control flags
* @fc_disable: When non-zero flow control is disabled. Typically used to
* ensure that network back pressure doesn't delay dma queue flushes.
@@ -747,7 +727,12 @@ struct vfdi_status;
* @loopback_mode: Loopback status
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
- * @drain_pending: Count of RX and TX queues that haven't been flushed and drained.
+ * @filter_lock: Filter table lock
+ * @filter_state: Architecture-dependent filter table state
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
+ * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
* @rxq_flush_outstanding: Count of number of RX flushes started but not yet
@@ -771,12 +756,8 @@ struct vfdi_status;
* @last_irq_cpu: Last CPU to handle a possible test interrupt. This
* field is used by efx_test_interrupts() to verify that an
* interrupt has occurred.
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
- * @mac_stats: MAC statistics. These include all statistics the MACs
- * can provide. Generic code converts these into a standard
- * &struct net_device_stats.
- * @stats_lock: Statistics update lock. Serialises statistics fetches
- * and access to @mac_stats.
+ * @stats_lock: Statistics update lock. Must be held when calling
+ * efx_nic_type::{update,start,stop}_stats.
*
* This is stored in the private area of the &struct net_device.
*/
@@ -788,7 +769,6 @@ struct efx_nic {
unsigned int port_num;
const struct efx_nic_type *type;
int legacy_irq;
- bool legacy_irq_enabled;
bool eeh_disabled_legacy_irq;
struct workqueue_struct *workqueue;
char workqueue_name[16];
@@ -806,7 +786,7 @@ struct efx_nic {
unsigned long reset_pending;
struct efx_channel *channel[EFX_MAX_CHANNELS];
- char channel_name[EFX_MAX_CHANNELS][IFNAMSIZ + 6];
+ struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
const struct efx_channel_type *
extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
@@ -819,6 +799,8 @@ struct efx_nic {
unsigned rx_dc_base;
unsigned sram_lim_qw;
unsigned next_buffer_table;
+
+ unsigned int max_channels;
unsigned n_channels;
unsigned n_rx_channels;
unsigned rss_spread;
@@ -830,6 +812,9 @@ struct efx_nic {
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
+ unsigned int rx_prefix_size;
+ int rx_packet_hash_offset;
+ int rx_packet_len_offset;
u8 rx_hash_key[40];
u32 rx_indir_table[128];
bool rx_scatter;
@@ -837,6 +822,7 @@ struct efx_nic {
unsigned int_error_count;
unsigned long int_error_expire;
+ bool irq_soft_enabled;
struct efx_buffer irq_status;
unsigned irq_zero_count;
unsigned irq_level;
@@ -847,6 +833,7 @@ struct efx_nic {
#endif
void *nic_data;
+ struct efx_mcdi_data *mcdi;
struct mutex mac_lock;
struct work_struct mac_work;
@@ -868,7 +855,7 @@ struct efx_nic {
struct efx_link_state link_state;
unsigned int n_link_state_changes;
- bool promiscuous;
+ bool unicast_filter;
union efx_multicast_hash multicast_hash;
u8 wanted_fc;
unsigned fc_disable;
@@ -879,9 +866,14 @@ struct efx_nic {
void *loopback_selftest;
- struct efx_filter_state *filter_state;
+ spinlock_t filter_lock;
+ void *filter_state;
+#ifdef CONFIG_RFS_ACCEL
+ u32 *rps_flow_id;
+ unsigned int rps_expire_index;
+#endif
- atomic_t drain_pending;
+ atomic_t active_queues;
atomic_t rxq_flush_pending;
atomic_t rxq_flush_outstanding;
wait_queue_head_t flush_wq;
@@ -907,8 +899,6 @@ struct efx_nic {
struct delayed_work monitor_work ____cacheline_aligned_in_smp;
spinlock_t biu_lock;
int last_irq_cpu;
- unsigned n_rx_nodesc_drop_cnt;
- struct efx_mac_stats mac_stats;
spinlock_t stats_lock;
};
@@ -922,8 +912,17 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
return efx->port_num;
}
+struct efx_mtd_partition {
+ struct list_head node;
+ struct mtd_info mtd;
+ const char *dev_type_name;
+ const char *type_name;
+ char name[IFNAMSIZ + 20];
+};
+
/**
* struct efx_nic_type - Efx device type definition
+ * @mem_map_size: Get memory BAR mapped size
* @probe: Probe the controller
* @remove: Free resources allocated by probe()
* @init: Initialise the controller
@@ -938,47 +937,118 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @probe_port: Probe the MAC and PHY
* @remove_port: Free resources allocated by probe_port()
* @handle_global_event: Handle a "global" event (may be %NULL)
+ * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
* @prepare_flush: Prepare the hardware for flushing the DMA queues
- * @finish_flush: Clean up after flushing the DMA queues
- * @update_stats: Update statistics not provided by event handling
+ * (for Falcon architecture)
+ * @finish_flush: Clean up after flushing the DMA queues (for Falcon
+ * architecture)
+ * @describe_stats: Describe statistics for ethtool
+ * @update_stats: Update statistics not provided by event handling.
+ * Either argument may be %NULL.
* @start_stats: Start the regular fetching of statistics
* @stop_stats: Stop the regular fetching of statistics
* @set_id_led: Set state of identifying LED or revert to automatic function
* @push_irq_moderation: Apply interrupt moderation value
* @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
+ * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
* @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
* to the hardware. Serialised by the mac_lock.
* @check_mac_fault: Check MAC fault state. True if fault present.
* @get_wol: Get WoL configuration from driver state
* @set_wol: Push WoL configuration to the NIC
* @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
- * @test_chip: Test registers. Should use efx_nic_test_registers(), and is
+ * @test_chip: Test registers. May use efx_farch_test_registers(), and is
* expected to reset the NIC.
* @test_nvram: Test validity of NVRAM contents
+ * @mcdi_request: Send an MCDI request with the given header and SDU.
+ * The SDU length may be any value from 0 up to the protocol-
+ * defined maximum, but its buffer will be padded to a multiple
+ * of 4 bytes.
+ * @mcdi_poll_response: Test whether an MCDI response is available.
+ * @mcdi_read_response: Read the MCDI response PDU. The offset will
+ * be a multiple of 4. The length may not be, but the buffer
+ * will be padded so it is safe to round up.
+ * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
+ * return an appropriate error code for aborting any current
+ * request; otherwise return 0.
+ * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
+ * be separately enabled after this.
+ * @irq_test_generate: Generate a test IRQ
+ * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
+ * queue must be separately disabled before this.
+ * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
+ * a pointer to the &struct efx_msi_context for the channel.
+ * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
+ * is a pointer to the &struct efx_nic.
+ * @tx_probe: Allocate resources for TX queue
+ * @tx_init: Initialise TX queue on the NIC
+ * @tx_remove: Free resources for TX queue
+ * @tx_write: Write TX descriptors and doorbell
+ * @rx_push_indir_table: Write RSS indirection table to the NIC
+ * @rx_probe: Allocate resources for RX queue
+ * @rx_init: Initialise RX queue on the NIC
+ * @rx_remove: Free resources for RX queue
+ * @rx_write: Write RX descriptors and doorbell
+ * @rx_defer_refill: Generate a refill reminder event
+ * @ev_probe: Allocate resources for event queue
+ * @ev_init: Initialise event queue on the NIC
+ * @ev_fini: Deinitialise event queue on the NIC
+ * @ev_remove: Free resources for event queue
+ * @ev_process: Process events for a queue, up to the given NAPI quota
+ * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
+ * @ev_test_generate: Generate a test event
+ * @filter_table_probe: Probe filter capabilities and set up filter software state
+ * @filter_table_restore: Restore filters removed from hardware
+ * @filter_table_remove: Remove filters from hardware and tear down software state
+ * @filter_update_rx_scatter: Update filters after change to rx scatter setting
+ * @filter_insert: add or replace a filter
+ * @filter_remove_safe: remove a filter by ID, carefully
+ * @filter_get_safe: retrieve a filter by ID, carefully
+ * @filter_clear_rx: remove RX filters by priority
+ * @filter_count_rx_used: Get the number of filters in use at a given priority
+ * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
+ * @filter_get_rx_ids: Get list of RX filters at a given priority
+ * @filter_rfs_insert: Add or replace a filter for RFS. This must be
+ * atomic. The hardware change may be asynchronous but should
+ * not be delayed for long. It may fail if this can't be done
+ * atomically.
+ * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
+ * This must check whether the specified table entry is used by RFS
+ * and that rps_may_expire_flow() returns true for it.
+ * @mtd_probe: Probe and add MTD partitions associated with this net device,
+ * using efx_mtd_add()
+ * @mtd_rename: Set an MTD partition name using the net device name
+ * @mtd_read: Read from an MTD partition
+ * @mtd_erase: Erase part of an MTD partition
+ * @mtd_write: Write to an MTD partition
+ * @mtd_sync: Wait for write-back to complete on MTD partition. This
+ * also notifies the driver that a writer has finished using this
+ * partition.
* @revision: Hardware architecture revision
- * @mem_map_size: Memory BAR mapped size
* @txd_ptr_tbl_base: TX descriptor ring base address
* @rxd_ptr_tbl_base: RX descriptor ring base address
* @buf_tbl_base: Buffer table base address
* @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask
- * @rx_buffer_hash_size: Size of hash at start of RX packet
+ * @rx_prefix_size: Size of RX prefix before packet data
+ * @rx_hash_offset: Offset of RX flow hash within prefix
* @rx_buffer_padding: Size of padding at end of RX packet
- * @can_rx_scatter: NIC is able to scatter packet to multiple buffers
+ * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
+ * @always_rx_scatter: NIC will always scatter packets to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
- * @phys_addr_channels: Number of channels with physically addressed
- * descriptors
* @timer_period_max: Maximum period of interrupt timer (in ticks)
* @offload_features: net_device feature flags for protocol offload
* features implemented in hardware
+ * @mcdi_max_ver: Maximum MCDI version supported
*/
struct efx_nic_type {
+ unsigned int (*mem_map_size)(struct efx_nic *efx);
int (*probe)(struct efx_nic *efx);
void (*remove)(struct efx_nic *efx);
int (*init)(struct efx_nic *efx);
- void (*dimension_resources)(struct efx_nic *efx);
+ int (*dimension_resources)(struct efx_nic *efx);
void (*fini)(struct efx_nic *efx);
void (*monitor)(struct efx_nic *efx);
enum reset_type (*map_reset_reason)(enum reset_type reason);
@@ -987,14 +1057,18 @@ struct efx_nic_type {
int (*probe_port)(struct efx_nic *efx);
void (*remove_port)(struct efx_nic *efx);
bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
+ int (*fini_dmaq)(struct efx_nic *efx);
void (*prepare_flush)(struct efx_nic *efx);
void (*finish_flush)(struct efx_nic *efx);
- void (*update_stats)(struct efx_nic *efx);
+ size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
+ size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats);
void (*start_stats)(struct efx_nic *efx);
void (*stop_stats)(struct efx_nic *efx);
void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
void (*push_irq_moderation)(struct efx_channel *channel);
int (*reconfigure_port)(struct efx_nic *efx);
+ void (*prepare_enable_fc_tx)(struct efx_nic *efx);
int (*reconfigure_mac)(struct efx_nic *efx);
bool (*check_mac_fault)(struct efx_nic *efx);
void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
@@ -1002,22 +1076,90 @@ struct efx_nic_type {
void (*resume_wol)(struct efx_nic *efx);
int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
int (*test_nvram)(struct efx_nic *efx);
+ void (*mcdi_request)(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len);
+ bool (*mcdi_poll_response)(struct efx_nic *efx);
+ void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
+ size_t pdu_offset, size_t pdu_len);
+ int (*mcdi_poll_reboot)(struct efx_nic *efx);
+ void (*irq_enable_master)(struct efx_nic *efx);
+ void (*irq_test_generate)(struct efx_nic *efx);
+ void (*irq_disable_non_ev)(struct efx_nic *efx);
+ irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
+ irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
+ int (*tx_probe)(struct efx_tx_queue *tx_queue);
+ void (*tx_init)(struct efx_tx_queue *tx_queue);
+ void (*tx_remove)(struct efx_tx_queue *tx_queue);
+ void (*tx_write)(struct efx_tx_queue *tx_queue);
+ void (*rx_push_indir_table)(struct efx_nic *efx);
+ int (*rx_probe)(struct efx_rx_queue *rx_queue);
+ void (*rx_init)(struct efx_rx_queue *rx_queue);
+ void (*rx_remove)(struct efx_rx_queue *rx_queue);
+ void (*rx_write)(struct efx_rx_queue *rx_queue);
+ void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
+ int (*ev_probe)(struct efx_channel *channel);
+ int (*ev_init)(struct efx_channel *channel);
+ void (*ev_fini)(struct efx_channel *channel);
+ void (*ev_remove)(struct efx_channel *channel);
+ int (*ev_process)(struct efx_channel *channel, int quota);
+ void (*ev_read_ack)(struct efx_channel *channel);
+ void (*ev_test_generate)(struct efx_channel *channel);
+ int (*filter_table_probe)(struct efx_nic *efx);
+ void (*filter_table_restore)(struct efx_nic *efx);
+ void (*filter_table_remove)(struct efx_nic *efx);
+ void (*filter_update_rx_scatter)(struct efx_nic *efx);
+ s32 (*filter_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+ int (*filter_remove_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+ int (*filter_get_safe)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+ void (*filter_clear_rx)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_count_rx_used)(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+ u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
+ s32 (*filter_get_rx_ids)(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+ s32 (*filter_rfs_insert)(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+ bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+#ifdef CONFIG_SFC_MTD
+ int (*mtd_probe)(struct efx_nic *efx);
+ void (*mtd_rename)(struct efx_mtd_partition *part);
+ int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, u8 *buffer);
+ int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
+ int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
+ size_t *retlen, const u8 *buffer);
+ int (*mtd_sync)(struct mtd_info *mtd);
+#endif
+ void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
int revision;
- unsigned int mem_map_size;
unsigned int txd_ptr_tbl_base;
unsigned int rxd_ptr_tbl_base;
unsigned int buf_tbl_base;
unsigned int evq_ptr_tbl_base;
unsigned int evq_rptr_tbl_base;
u64 max_dma_mask;
- unsigned int rx_buffer_hash_size;
+ unsigned int rx_prefix_size;
+ unsigned int rx_hash_offset;
unsigned int rx_buffer_padding;
bool can_rx_scatter;
+ bool always_rx_scatter;
unsigned int max_interrupt_mode;
- unsigned int phys_addr_channels;
unsigned int timer_period_max;
netdev_features_t offload_features;
+ int mcdi_max_ver;
+ unsigned int max_rx_ip_filters;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 56ed3bc71e0..e7dbd2dd202 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -19,295 +19,22 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "workarounds.h"
/**************************************************************************
*
- * Configurable values
- *
- **************************************************************************
- */
-
-/* This is set to 16 for a good reason. In summary, if larger than
- * 16, the descriptor cache holds more than a default socket
- * buffer's worth of packets (for UDP we can only have at most one
- * socket buffer's worth outstanding). This combined with the fact
- * that we only get 1 TX event per descriptor cache means the NIC
- * goes idle.
- */
-#define TX_DC_ENTRIES 16
-#define TX_DC_ENTRIES_ORDER 1
-
-#define RX_DC_ENTRIES 64
-#define RX_DC_ENTRIES_ORDER 3
-
-/* If EFX_MAX_INT_ERRORS internal errors occur within
- * EFX_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
- * disable it.
- */
-#define EFX_INT_ERROR_EXPIRE 3600
-#define EFX_MAX_INT_ERRORS 5
-
-/* Depth of RX flush request fifo */
-#define EFX_RX_FLUSH_COUNT 4
-
-/* Driver generated events */
-#define _EFX_CHANNEL_MAGIC_TEST 0x000101
-#define _EFX_CHANNEL_MAGIC_FILL 0x000102
-#define _EFX_CHANNEL_MAGIC_RX_DRAIN 0x000103
-#define _EFX_CHANNEL_MAGIC_TX_DRAIN 0x000104
-
-#define _EFX_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
-#define _EFX_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
-
-#define EFX_CHANNEL_MAGIC_TEST(_channel) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TEST, (_channel)->channel)
-#define EFX_CHANNEL_MAGIC_FILL(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_FILL, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_RX_DRAIN, \
- efx_rx_queue_index(_rx_queue))
-#define EFX_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
- _EFX_CHANNEL_MAGIC(_EFX_CHANNEL_MAGIC_TX_DRAIN, \
- (_tx_queue)->queue)
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic);
-
-/**************************************************************************
- *
- * Solarstorm hardware access
- *
- **************************************************************************/
-
-static inline void efx_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
- unsigned int index)
-{
- efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
- value, index);
-}
-
-/* Read the current event from the event queue */
-static inline efx_qword_t *efx_event(struct efx_channel *channel,
- unsigned int index)
-{
- return ((efx_qword_t *) (channel->eventq.addr)) +
- (index & channel->eventq_mask);
-}
-
-/* See if an event is present
- *
- * We check both the high and low dword of the event for all ones. We
- * wrote all ones when we cleared the event, and no valid event can
- * have all ones in either its high or low dwords. This approach is
- * robust against reordering.
- *
- * Note that using a single 64-bit comparison is incorrect; even
- * though the CPU read will be atomic, the DMA write may not be.
- */
-static inline int efx_event_present(efx_qword_t *event)
-{
- return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
- EFX_DWORD_IS_ALL_ONES(event->dword[1]));
-}
-
-static bool efx_masked_compare_oword(const efx_oword_t *a, const efx_oword_t *b,
- const efx_oword_t *mask)
-{
- return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
- ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
-}
-
-int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs)
-{
- unsigned address = 0, i, j;
- efx_oword_t mask, imask, original, reg, buf;
-
- for (i = 0; i < n_regs; ++i) {
- address = regs[i].address;
- mask = imask = regs[i].mask;
- EFX_INVERT_OWORD(imask);
-
- efx_reado(efx, &original, address);
-
- /* bit sweep on and off */
- for (j = 0; j < 128; j++) {
- if (!EFX_EXTRACT_OWORD32(mask, j, j))
- continue;
-
- /* Test this testable bit can be set in isolation */
- EFX_AND_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 1);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
-
- /* Test this testable bit can be cleared in isolation */
- EFX_OR_OWORD(reg, original, mask);
- EFX_SET_OWORD32(reg, j, j, 0);
-
- efx_writeo(efx, &reg, address);
- efx_reado(efx, &buf, address);
-
- if (efx_masked_compare_oword(&reg, &buf, &mask))
- goto fail;
- }
-
- efx_writeo(efx, &original, address);
- }
-
- return 0;
-
-fail:
- netif_err(efx, hw, efx->net_dev,
- "wrote "EFX_OWORD_FMT" read "EFX_OWORD_FMT
- " at address 0x%x mask "EFX_OWORD_FMT"\n", EFX_OWORD_VAL(reg),
- EFX_OWORD_VAL(buf), address, EFX_OWORD_VAL(mask));
- return -EIO;
-}
-
-/**************************************************************************
- *
- * Special buffer handling
- * Special buffers are used for event queues and the TX and RX
- * descriptor rings.
- *
- *************************************************************************/
-
-/*
- * Initialise a special buffer
- *
- * This will define a buffer (previously allocated via
- * efx_alloc_special_buffer()) in the buffer table, allowing
- * it to be used for event queues, descriptor rings etc.
- */
-static void
-efx_init_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_qword_t buf_desc;
- unsigned int index;
- dma_addr_t dma_addr;
- int i;
-
- EFX_BUG_ON_PARANOID(!buffer->addr);
-
- /* Write buffer descriptors to NIC */
- for (i = 0; i < buffer->entries; i++) {
- index = buffer->index + i;
- dma_addr = buffer->dma_addr + (i * EFX_BUF_SIZE);
- netif_dbg(efx, probe, efx->net_dev,
- "mapping special buffer %d at %llx\n",
- index, (unsigned long long)dma_addr);
- EFX_POPULATE_QWORD_3(buf_desc,
- FRF_AZ_BUF_ADR_REGION, 0,
- FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
- FRF_AZ_BUF_OWNER_ID_FBUF, 0);
- efx_write_buf_tbl(efx, &buf_desc, index);
- }
-}
-
-/* Unmaps a buffer and clears the buffer table entries */
-static void
-efx_fini_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- efx_oword_t buf_tbl_upd;
- unsigned int start = buffer->index;
- unsigned int end = (buffer->index + buffer->entries - 1);
-
- if (!buffer->entries)
- return;
-
- netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
- buffer->index, buffer->index + buffer->entries - 1);
-
- EFX_POPULATE_OWORD_4(buf_tbl_upd,
- FRF_AZ_BUF_UPD_CMD, 0,
- FRF_AZ_BUF_CLR_CMD, 1,
- FRF_AZ_BUF_CLR_END_ID, end,
- FRF_AZ_BUF_CLR_START_ID, start);
- efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
-}
-
-/*
- * Allocate a new special buffer
- *
- * This allocates memory for a new buffer, clears it and allocates a
- * new buffer ID range. It does not write into the buffer table.
- *
- * This call will allocate 4KB buffers, since 8KB buffers can't be
- * used for event queues and descriptor rings.
- */
-static int efx_alloc_special_buffer(struct efx_nic *efx,
- struct efx_special_buffer *buffer,
- unsigned int len)
-{
- len = ALIGN(len, EFX_BUF_SIZE);
-
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr, GFP_KERNEL);
- if (!buffer->addr)
- return -ENOMEM;
- buffer->len = len;
- buffer->entries = len / EFX_BUF_SIZE;
- BUG_ON(buffer->dma_addr & (EFX_BUF_SIZE - 1));
-
- /* Select new buffer ID */
- buffer->index = efx->next_buffer_table;
- efx->next_buffer_table += buffer->entries;
-#ifdef CONFIG_SFC_SRIOV
- BUG_ON(efx_sriov_enabled(efx) &&
- efx->vf_buftbl_base < efx->next_buffer_table);
-#endif
-
- netif_dbg(efx, probe, efx->net_dev,
- "allocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- return 0;
-}
-
-static void
-efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
-{
- if (!buffer->addr)
- return;
-
- netif_dbg(efx, hw, efx->net_dev,
- "deallocating special buffers %d-%d at %llx+%x "
- "(virt %p phys %llx)\n", buffer->index,
- buffer->index + buffer->entries - 1,
- (u64)buffer->dma_addr, buffer->len,
- buffer->addr, (u64)virt_to_phys(buffer->addr));
-
- dma_free_coherent(&efx->pci_dev->dev, buffer->len, buffer->addr,
- buffer->dma_addr);
- buffer->addr = NULL;
- buffer->entries = 0;
-}
-
-/**************************************************************************
- *
* Generic buffer handling
* These buffers are used for interrupt status, MAC stats, etc.
*
**************************************************************************/
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len)
+ unsigned int len, gfp_t gfp_flags)
{
- buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
- &buffer->dma_addr,
- GFP_ATOMIC | __GFP_ZERO);
+ buffer->addr = dma_zalloc_coherent(&efx->pci_dev->dev, len,
+ &buffer->dma_addr, gfp_flags);
if (!buffer->addr)
return -ENOMEM;
buffer->len = len;
@@ -323,1057 +50,6 @@ void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
}
}
-/**************************************************************************
- *
- * TX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified transmit descriptor in the TX
- * descriptor queue belonging to the specified channel.
- */
-static inline efx_qword_t *
-efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (tx_queue->txd.addr)) + index;
-}
-
-/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
-static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
-{
- unsigned write_ptr;
- efx_dword_t reg;
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(tx_queue->efx, &reg,
- FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
-}
-
-/* Write pointer and first descriptor for TX descriptor ring */
-static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
- const efx_qword_t *txd)
-{
- unsigned write_ptr;
- efx_oword_t reg;
-
- BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
- BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
-
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
- FRF_AZ_TX_DESC_WPTR, write_ptr);
- reg.qword[0] = *txd;
- efx_writeo_page(tx_queue->efx, &reg,
- FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
-}
-
-static inline bool
-efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
-{
- unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
-
- if (empty_read_count == 0)
- return false;
-
- tx_queue->empty_read_count = 0;
- return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
- && tx_queue->write_count - write_count == 1;
-}
-
-/* For each entry inserted into the software descriptor ring, create a
- * descriptor in the hardware TX descriptor ring (in host memory), and
- * write a doorbell.
- */
-void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
-{
-
- struct efx_tx_buffer *buffer;
- efx_qword_t *txd;
- unsigned write_ptr;
- unsigned old_write_count = tx_queue->write_count;
-
- BUG_ON(tx_queue->write_count == tx_queue->insert_count);
-
- do {
- write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
- buffer = &tx_queue->buffer[write_ptr];
- txd = efx_tx_desc(tx_queue, write_ptr);
- ++tx_queue->write_count;
-
- /* Create TX descriptor ring entry */
- BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
- EFX_POPULATE_QWORD_4(*txd,
- FSF_AZ_TX_KER_CONT,
- buffer->flags & EFX_TX_BUF_CONT,
- FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
- FSF_AZ_TX_KER_BUF_REGION, 0,
- FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
- } while (tx_queue->write_count != tx_queue->insert_count);
-
- wmb(); /* Ensure descriptors are written before they are fetched */
-
- if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
- txd = efx_tx_desc(tx_queue,
- old_write_count & tx_queue->ptr_mask);
- efx_push_tx_desc(tx_queue, txd);
- ++tx_queue->pushes;
- } else {
- efx_notify_tx_desc(tx_queue);
- }
-}
-
-/* Allocate hardware resources for a TX queue */
-int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- unsigned entries;
-
- entries = tx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &tx_queue->txd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t reg;
-
- /* Pin TX descriptor ring */
- efx_init_special_buffer(efx, &tx_queue->txd);
-
- /* Push TX descriptor ring to card */
- EFX_POPULATE_OWORD_10(reg,
- FRF_AZ_TX_DESCQ_EN, 1,
- FRF_AZ_TX_ISCSI_DDIG_EN, 0,
- FRF_AZ_TX_ISCSI_HDIG_EN, 0,
- FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
- FRF_AZ_TX_DESCQ_EVQ_ID,
- tx_queue->channel->channel,
- FRF_AZ_TX_DESCQ_OWNER_ID, 0,
- FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
- FRF_AZ_TX_DESCQ_SIZE,
- __ffs(tx_queue->txd.entries),
- FRF_AZ_TX_DESCQ_TYPE, 0,
- FRF_BZ_TX_NON_IP_DROP_DIS, 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- int csum = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
- EFX_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
- !csum);
- }
-
- efx_writeo_table(efx, &reg, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) {
- /* Only 128 bits in this register */
- BUILD_BUG_ON(EFX_MAX_TX_QUEUES > 128);
-
- efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
- if (tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD)
- __clear_bit_le(tx_queue->queue, &reg);
- else
- __set_bit_le(tx_queue->queue, &reg);
- efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
- }
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_1(reg,
- FRF_BZ_TX_PACE,
- (tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ?
- FFE_BZ_TX_PACE_OFF :
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo_table(efx, &reg, FR_BZ_TX_PACE_TBL,
- tx_queue->queue);
- }
-}
-
-static void efx_flush_tx_queue(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_flush_descq;
-
- WARN_ON(atomic_read(&tx_queue->flush_outstanding));
- atomic_set(&tx_queue->flush_outstanding, 1);
-
- EFX_POPULATE_OWORD_2(tx_flush_descq,
- FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
- efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_tx(struct efx_tx_queue *tx_queue)
-{
- struct efx_nic *efx = tx_queue->efx;
- efx_oword_t tx_desc_ptr;
-
- /* Remove TX descriptor ring from card */
- EFX_ZERO_OWORD(tx_desc_ptr);
- efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
- tx_queue->queue);
-
- /* Unpin TX descriptor ring */
- efx_fini_special_buffer(efx, &tx_queue->txd);
-}
-
-/* Free buffers backing TX queue */
-void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
-{
- efx_free_special_buffer(tx_queue->efx, &tx_queue->txd);
-}
-
-/**************************************************************************
- *
- * RX path
- *
- **************************************************************************/
-
-/* Returns a pointer to the specified descriptor in the RX descriptor queue */
-static inline efx_qword_t *
-efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
-{
- return ((efx_qword_t *) (rx_queue->rxd.addr)) + index;
-}
-
-/* This creates an entry in the RX descriptor queue */
-static inline void
-efx_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_rx_buffer *rx_buf;
- efx_qword_t *rxd;
-
- rxd = efx_rx_desc(rx_queue, index);
- rx_buf = efx_rx_buffer(rx_queue, index);
- EFX_POPULATE_QWORD_3(*rxd,
- FSF_AZ_RX_KER_BUF_SIZE,
- rx_buf->len -
- rx_queue->efx->type->rx_buffer_padding,
- FSF_AZ_RX_KER_BUF_REGION, 0,
- FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
-}
-
-/* This writes to the RX_DESC_WPTR register for the specified receive
- * descriptor ring.
- */
-void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_dword_t reg;
- unsigned write_ptr;
-
- while (rx_queue->notified_count != rx_queue->added_count) {
- efx_build_rx_desc(
- rx_queue,
- rx_queue->notified_count & rx_queue->ptr_mask);
- ++rx_queue->notified_count;
- }
-
- wmb();
- write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
- efx_writed_page(efx, &reg, FR_AZ_RX_DESC_UPD_DWORD_P0,
- efx_rx_queue_index(rx_queue));
-}
-
-int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- unsigned entries;
-
- entries = rx_queue->ptr_mask + 1;
- return efx_alloc_special_buffer(efx, &rx_queue->rxd,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
- bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
- bool iscsi_digest_en = is_b0;
- bool jumbo_en;
-
- /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
- * DMA to continue after a PCIe page boundary (and scattering
- * is not possible). In Falcon B0 and Siena, it enables
- * scatter.
- */
- jumbo_en = !is_b0 || efx->rx_scatter;
-
- netif_dbg(efx, hw, efx->net_dev,
- "RX queue %d ring in special buffers %d-%d\n",
- efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
- rx_queue->rxd.index + rx_queue->rxd.entries - 1);
-
- rx_queue->scatter_n = 0;
-
- /* Pin RX descriptor ring */
- efx_init_special_buffer(efx, &rx_queue->rxd);
-
- /* Push RX descriptor ring to card */
- EFX_POPULATE_OWORD_10(rx_desc_ptr,
- FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
- FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
- FRF_AZ_RX_DESCQ_EVQ_ID,
- efx_rx_queue_channel(rx_queue)->channel,
- FRF_AZ_RX_DESCQ_OWNER_ID, 0,
- FRF_AZ_RX_DESCQ_LABEL,
- efx_rx_queue_index(rx_queue),
- FRF_AZ_RX_DESCQ_SIZE,
- __ffs(rx_queue->rxd.entries),
- FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
- FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
- FRF_AZ_RX_DESCQ_EN, 1);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-}
-
-static void efx_flush_rx_queue(struct efx_rx_queue *rx_queue)
-{
- struct efx_nic *efx = rx_queue->efx;
- efx_oword_t rx_flush_descq;
-
- EFX_POPULATE_OWORD_2(rx_flush_descq,
- FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
- FRF_AZ_RX_FLUSH_DESCQ,
- efx_rx_queue_index(rx_queue));
- efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
-}
-
-void efx_nic_fini_rx(struct efx_rx_queue *rx_queue)
-{
- efx_oword_t rx_desc_ptr;
- struct efx_nic *efx = rx_queue->efx;
-
- /* Remove RX descriptor ring from card */
- EFX_ZERO_OWORD(rx_desc_ptr);
- efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
- efx_rx_queue_index(rx_queue));
-
- /* Unpin RX descriptor ring */
- efx_fini_special_buffer(efx, &rx_queue->rxd);
-}
-
-/* Free buffers backing RX queue */
-void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
-{
- efx_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
-}
-
-/**************************************************************************
- *
- * Flush handling
- *
- **************************************************************************/
-
-/* efx_nic_flush_queues() must be woken up when all flushes are completed,
- * or more RX flushes can be kicked off.
- */
-static bool efx_flush_wake(struct efx_nic *efx)
-{
- /* Ensure that all updates are visible to efx_nic_flush_queues() */
- smp_mb();
-
- return (atomic_read(&efx->drain_pending) == 0 ||
- (atomic_read(&efx->rxq_flush_outstanding) < EFX_RX_FLUSH_COUNT
- && atomic_read(&efx->rxq_flush_pending) > 0));
-}
-
-static bool efx_check_tx_flush_complete(struct efx_nic *efx)
-{
- bool i = true;
- efx_oword_t txd_ptr_tbl;
- struct efx_channel *channel;
- struct efx_tx_queue *tx_queue;
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- efx_reado_table(efx, &txd_ptr_tbl,
- FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
- if (EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_FLUSH) ||
- EFX_OWORD_FIELD(txd_ptr_tbl,
- FRF_AZ_TX_DESCQ_EN)) {
- netif_dbg(efx, hw, efx->net_dev,
- "flush did not complete on TXQ %d\n",
- tx_queue->queue);
- i = false;
- } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
- 1, 0)) {
- /* The flush is complete, but we didn't
- * receive a flush completion event
- */
- netif_dbg(efx, hw, efx->net_dev,
- "flush complete on TXQ %d, so drain "
- "the queue\n", tx_queue->queue);
- /* Don't need to increment drain_pending as it
- * has already been incremented for the queues
- * which did not drain
- */
- efx_magic_event(channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(
- tx_queue));
- }
- }
- }
-
- return i;
-}
-
-/* Flush all the transmit queues, and continue flushing receive queues until
- * they're all flushed. Wait for the DRAIN events to be recieved so that there
- * are no more RX and TX events left on any channel. */
-int efx_nic_flush_queues(struct efx_nic *efx)
-{
- unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- struct efx_tx_queue *tx_queue;
- int rc = 0;
-
- efx->type->prepare_flush(efx);
-
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- efx_flush_tx_queue(tx_queue);
- }
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- atomic_inc(&efx->drain_pending);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- }
- }
-
- while (timeout && atomic_read(&efx->drain_pending) > 0) {
- /* If SRIOV is enabled, then offload receive queue flushing to
- * the firmware (though we will still have to poll for
- * completion). If that fails, fall back to the old scheme.
- */
- if (efx_sriov_enabled(efx)) {
- rc = efx_mcdi_flush_rxqs(efx);
- if (!rc)
- goto wait;
- }
-
- /* The hardware supports four concurrent rx flushes, each of
- * which may need to be retried if there is an outstanding
- * descriptor fetch
- */
- efx_for_each_channel(channel, efx) {
- efx_for_each_channel_rx_queue(rx_queue, channel) {
- if (atomic_read(&efx->rxq_flush_outstanding) >=
- EFX_RX_FLUSH_COUNT)
- break;
-
- if (rx_queue->flush_pending) {
- rx_queue->flush_pending = false;
- atomic_dec(&efx->rxq_flush_pending);
- atomic_inc(&efx->rxq_flush_outstanding);
- efx_flush_rx_queue(rx_queue);
- }
- }
- }
-
- wait:
- timeout = wait_event_timeout(efx->flush_wq, efx_flush_wake(efx),
- timeout);
- }
-
- if (atomic_read(&efx->drain_pending) &&
- !efx_check_tx_flush_complete(efx)) {
- netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
- "(rx %d+%d)\n", atomic_read(&efx->drain_pending),
- atomic_read(&efx->rxq_flush_outstanding),
- atomic_read(&efx->rxq_flush_pending));
- rc = -ETIMEDOUT;
-
- atomic_set(&efx->drain_pending, 0);
- atomic_set(&efx->rxq_flush_pending, 0);
- atomic_set(&efx->rxq_flush_outstanding, 0);
- }
-
- efx->type->finish_flush(efx);
-
- return rc;
-}
-
-/**************************************************************************
- *
- * Event queue processing
- * Event queues are processed by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Update a channel's event queue's read pointer (RPTR) register
- *
- * This writes the EVQ_RPTR_REG register for the specified channel's
- * event queue.
- */
-void efx_nic_eventq_read_ack(struct efx_channel *channel)
-{
- efx_dword_t reg;
- struct efx_nic *efx = channel->efx;
-
- EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
- channel->eventq_read_ptr & channel->eventq_mask);
-
- /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
- * of 4 bytes, but it is really 16 bytes just like later revisions.
- */
- efx_writed(efx, &reg,
- efx->type->evq_rptr_tbl_base +
- FR_BZ_EVQ_RPTR_STEP * channel->channel);
-}
-
-/* Use HW to insert a SW defined event */
-void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event)
-{
- efx_oword_t drv_ev_reg;
-
- BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
- FRF_AZ_DRV_EV_DATA_WIDTH != 64);
- drv_ev_reg.u32[0] = event->u32[0];
- drv_ev_reg.u32[1] = event->u32[1];
- drv_ev_reg.u32[2] = 0;
- drv_ev_reg.u32[3] = 0;
- EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
- efx_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
-}
-
-static void efx_magic_event(struct efx_channel *channel, u32 magic)
-{
- efx_qword_t event;
-
- EFX_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
- FSE_AZ_EV_CODE_DRV_GEN_EV,
- FSF_AZ_DRV_GEN_EV_MAGIC, magic);
- efx_generate_event(channel->efx, channel->channel, &event);
-}
-
-/* Handle a transmit completion event
- *
- * The NIC batches TX completion events; the message we receive is of
- * the form "complete all TX events up to this index".
- */
-static int
-efx_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
-{
- unsigned int tx_ev_desc_ptr;
- unsigned int tx_ev_q_label;
- struct efx_tx_queue *tx_queue;
- struct efx_nic *efx = channel->efx;
- int tx_packets = 0;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return 0;
-
- if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
- /* Transmit completion */
- tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
- tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
- tx_queue->ptr_mask);
- efx_xmit_done(tx_queue, tx_ev_desc_ptr);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
- /* Rewrite the FIFO write pointer */
- tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
- tx_queue = efx_channel_get_tx_queue(
- channel, tx_ev_q_label % EFX_TXQ_TYPES);
-
- netif_tx_lock(efx->net_dev);
- efx_notify_tx_desc(tx_queue);
- netif_tx_unlock(efx->net_dev);
- } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
- EFX_WORKAROUND_10727(efx)) {
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else {
- netif_err(efx, tx_err, efx->net_dev,
- "channel %d unexpected TX event "
- EFX_QWORD_FMT"\n", channel->channel,
- EFX_QWORD_VAL(*event));
- }
-
- return tx_packets;
-}
-
-/* Detect errors included in the rx_evt_pkt_ok bit. */
-static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
- const efx_qword_t *event)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- bool rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
- bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
- bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
- bool rx_ev_other_err, rx_ev_pause_frm;
- bool rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned rx_ev_pkt_type;
-
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
- rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
- rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
- rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
- rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
- FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
- rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
- rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
- rx_ev_drib_nib = ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) ?
- 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
- rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
-
- /* Every error apart from tobe_disc and pause_frm */
- rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
- rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
- rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
-
- /* Count errors that are not in MAC stats. Ignore expected
- * checksum errors during self-test. */
- if (rx_ev_frm_trunc)
- ++channel->n_rx_frm_trunc;
- else if (rx_ev_tobe_disc)
- ++channel->n_rx_tobe_disc;
- else if (!efx->loopback_selftest) {
- if (rx_ev_ip_hdr_chksum_err)
- ++channel->n_rx_ip_hdr_chksum_err;
- else if (rx_ev_tcp_udp_chksum_err)
- ++channel->n_rx_tcp_udp_chksum_err;
- }
-
- /* TOBE_DISC is expected on unicast mismatches; don't print out an
- * error message. FRM_TRUNC indicates RXDP dropped the packet due
- * to a FIFO overflow.
- */
-#ifdef DEBUG
- if (rx_ev_other_err && net_ratelimit()) {
- netif_dbg(efx, rx_err, efx->net_dev,
- " RX queue %d unexpected RX event "
- EFX_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
- efx_rx_queue_index(rx_queue), EFX_QWORD_VAL(*event),
- rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
- rx_ev_ip_hdr_chksum_err ?
- " [IP_HDR_CHKSUM_ERR]" : "",
- rx_ev_tcp_udp_chksum_err ?
- " [TCP_UDP_CHKSUM_ERR]" : "",
- rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
- rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
- rx_ev_drib_nib ? " [DRIB_NIB]" : "",
- rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
- rx_ev_pause_frm ? " [PAUSE]" : "");
- }
-#endif
-
- /* The frame must be discarded if any of these are true. */
- return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
- rx_ev_tobe_disc | rx_ev_pause_frm) ?
- EFX_RX_PKT_DISCARD : 0;
-}
-
-/* Handle receive events that are not in-order. Return true if this
- * can be handled as a partial packet discard, false if it's more
- * serious.
- */
-static bool
-efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
-{
- struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
- struct efx_nic *efx = rx_queue->efx;
- unsigned expected, dropped;
-
- if (rx_queue->scatter_n &&
- index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
- rx_queue->ptr_mask)) {
- ++channel->n_rx_nodesc_trunc;
- return true;
- }
-
- expected = rx_queue->removed_count & rx_queue->ptr_mask;
- dropped = (index - expected) & rx_queue->ptr_mask;
- netif_info(efx, rx_err, efx->net_dev,
- "dropped %d events (index=%d expected=%d)\n",
- dropped, index, expected);
-
- efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
- RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
- return false;
-}
-
-/* Handle a packet received event
- *
- * The NIC gives a "discard" flag if it's a unicast packet with the
- * wrong destination address
- * Also "is multicast" and "matches multicast filter" flags can be used to
- * discard non-matching multicast packets.
- */
-static void
-efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
-{
- unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
- unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
- unsigned expected_ptr;
- bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
- u16 flags;
- struct efx_rx_queue *rx_queue;
- struct efx_nic *efx = channel->efx;
-
- if (unlikely(ACCESS_ONCE(efx->reset_pending)))
- return;
-
- rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
- rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
- WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
- channel->channel);
-
- rx_queue = efx_channel_get_rx_queue(channel);
-
- rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
- expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
- rx_queue->ptr_mask);
-
- /* Check for partial drops and other errors */
- if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
- unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
- if (rx_ev_desc_ptr != expected_ptr &&
- !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
- return;
-
- /* Discard all pending fragments */
- if (rx_queue->scatter_n) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
- }
-
- /* Return if there is no new fragment */
- if (rx_ev_desc_ptr != expected_ptr)
- return;
-
- /* Discard new fragment if not SOP */
- if (!rx_ev_sop) {
- efx_rx_packet(
- rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- 1, 0, EFX_RX_PKT_DISCARD);
- ++rx_queue->removed_count;
- return;
- }
- }
-
- ++rx_queue->scatter_n;
- if (rx_ev_cont)
- return;
-
- rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
- rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
- rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
-
- if (likely(rx_ev_pkt_ok)) {
- /* If packet is marked as OK then we can rely on the
- * hardware checksum and classification.
- */
- flags = 0;
- switch (rx_ev_hdr_type) {
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
- flags |= EFX_RX_PKT_TCP;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
- flags |= EFX_RX_PKT_CSUMMED;
- /* fall through */
- case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
- case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
- break;
- }
- } else {
- flags = efx_handle_rx_not_ok(rx_queue, event);
- }
-
- /* Detect multicast packets that didn't match the filter */
- rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
- if (rx_ev_mcast_pkt) {
- unsigned int rx_ev_mcast_hash_match =
- EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
-
- if (unlikely(!rx_ev_mcast_hash_match)) {
- ++channel->n_rx_mcast_mismatch;
- flags |= EFX_RX_PKT_DISCARD;
- }
- }
-
- channel->irq_mod_score += 2;
-
- /* Handle received packet */
- efx_rx_packet(rx_queue,
- rx_queue->removed_count & rx_queue->ptr_mask,
- rx_queue->scatter_n, rx_ev_byte_cnt, flags);
- rx_queue->removed_count += rx_queue->scatter_n;
- rx_queue->scatter_n = 0;
-}
-
-/* If this flush done event corresponds to a &struct efx_tx_queue, then
- * send an %EFX_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
- * of all transmit completions.
- */
-static void
-efx_handle_tx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_tx_queue *tx_queue;
- int qid;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
- if (qid < EFX_TXQ_TYPES * efx->n_tx_channels) {
- tx_queue = efx_get_tx_queue(efx, qid / EFX_TXQ_TYPES,
- qid % EFX_TXQ_TYPES);
- if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
- efx_magic_event(tx_queue->channel,
- EFX_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
- }
- }
-}
-
-/* If this flush done event corresponds to a &struct efx_rx_queue: If the flush
- * was succesful then send an %EFX_CHANNEL_MAGIC_RX_DRAIN, otherwise add
- * the RX queue back to the mask of RX queues in need of flushing.
- */
-static void
-efx_handle_rx_flush_done(struct efx_nic *efx, efx_qword_t *event)
-{
- struct efx_channel *channel;
- struct efx_rx_queue *rx_queue;
- int qid;
- bool failed;
-
- qid = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
- failed = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
- if (qid >= efx->n_channels)
- return;
- channel = efx_get_channel(efx, qid);
- if (!efx_channel_has_rx_queue(channel))
- return;
- rx_queue = efx_channel_get_rx_queue(channel);
-
- if (failed) {
- netif_info(efx, hw, efx->net_dev,
- "RXQ %d flush retry\n", qid);
- rx_queue->flush_pending = true;
- atomic_inc(&efx->rxq_flush_pending);
- } else {
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
- }
- atomic_dec(&efx->rxq_flush_outstanding);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_drain_event(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
-
- WARN_ON(atomic_read(&efx->drain_pending) == 0);
- atomic_dec(&efx->drain_pending);
- if (efx_flush_wake(efx))
- wake_up(&efx->flush_wq);
-}
-
-static void
-efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- struct efx_rx_queue *rx_queue =
- efx_channel_has_rx_queue(channel) ?
- efx_channel_get_rx_queue(channel) : NULL;
- unsigned magic, code;
-
- magic = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
- code = _EFX_CHANNEL_MAGIC_CODE(magic);
-
- if (magic == EFX_CHANNEL_MAGIC_TEST(channel)) {
- channel->event_test_cpu = raw_smp_processor_id();
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_FILL(rx_queue)) {
- /* The queue must be empty, so we won't receive any rx
- * events, so efx_process_channel() won't refill the
- * queue. Refill it here */
- efx_fast_push_rx_descriptors(rx_queue);
- } else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
- rx_queue->enabled = false;
- efx_handle_drain_event(channel);
- } else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
- efx_handle_drain_event(channel);
- } else {
- netif_dbg(efx, hw, efx->net_dev, "channel %d received "
- "generated event "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(*event));
- }
-}
-
-static void
-efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int ev_sub_code;
- unsigned int ev_sub_data;
-
- ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
- ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
-
- switch (ev_sub_code) {
- case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_tx_flush_done(efx, event);
- efx_sriov_tx_flush_done(efx, event);
- break;
- case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
- channel->channel, ev_sub_data);
- efx_handle_rx_flush_done(efx, event);
- efx_sriov_rx_flush_done(efx, event);
- break;
- case FSE_AZ_EVQ_INIT_DONE_EV:
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d EVQ %d initialised\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_SRM_UPD_DONE_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d SRAM update done\n", channel->channel);
- break;
- case FSE_AZ_WAKE_UP_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RXQ %d wakeup event\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AZ_TIMER_EV:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d RX queue %d timer expired\n",
- channel->channel, ev_sub_data);
- break;
- case FSE_AA_RX_RECOVER_EV:
- netif_err(efx, rx_err, efx->net_dev,
- "channel %d seen DRIVER RX_RESET event. "
- "Resetting.\n", channel->channel);
- atomic_inc(&efx->rx_reset);
- efx_schedule_reset(efx,
- EFX_WORKAROUND_6555(efx) ?
- RESET_TYPE_RX_RECOVERY :
- RESET_TYPE_DISABLE);
- break;
- case FSE_BZ_RX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, rx_err, efx->net_dev,
- "RX DMA Q %d reports descriptor fetch error."
- " RX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- case FSE_BZ_TX_DSC_ERROR_EV:
- if (ev_sub_data < EFX_VI_BASE) {
- netif_err(efx, tx_err, efx->net_dev,
- "TX DMA Q %d reports descriptor fetch error."
- " TX Q %d is disabled.\n", ev_sub_data,
- ev_sub_data);
- efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
- } else
- efx_sriov_desc_fetch_err(efx, ev_sub_data);
- break;
- default:
- netif_vdbg(efx, hw, efx->net_dev,
- "channel %d unknown driver event code %d "
- "data %04x\n", channel->channel, ev_sub_code,
- ev_sub_data);
- break;
- }
-}
-
-int efx_nic_process_eventq(struct efx_channel *channel, int budget)
-{
- struct efx_nic *efx = channel->efx;
- unsigned int read_ptr;
- efx_qword_t event, *p_event;
- int ev_code;
- int tx_packets = 0;
- int spent = 0;
-
- read_ptr = channel->eventq_read_ptr;
-
- for (;;) {
- p_event = efx_event(channel, read_ptr);
- event = *p_event;
-
- if (!efx_event_present(&event))
- /* End of events */
- break;
-
- netif_vdbg(channel->efx, intr, channel->efx->net_dev,
- "channel %d event is "EFX_QWORD_FMT"\n",
- channel->channel, EFX_QWORD_VAL(event));
-
- /* Clear this event by marking it all ones */
- EFX_SET_QWORD(*p_event);
-
- ++read_ptr;
-
- ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
-
- switch (ev_code) {
- case FSE_AZ_EV_CODE_RX_EV:
- efx_handle_rx_event(channel, &event);
- if (++spent == budget)
- goto out;
- break;
- case FSE_AZ_EV_CODE_TX_EV:
- tx_packets += efx_handle_tx_event(channel, &event);
- if (tx_packets > efx->txq_entries) {
- spent = budget;
- goto out;
- }
- break;
- case FSE_AZ_EV_CODE_DRV_GEN_EV:
- efx_handle_generated_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_DRIVER_EV:
- efx_handle_driver_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_USER_EV:
- efx_sriov_event(channel, &event);
- break;
- case FSE_CZ_EV_CODE_MCDI_EV:
- efx_mcdi_process_event(channel, &event);
- break;
- case FSE_AZ_EV_CODE_GLOBAL_EV:
- if (efx->type->handle_global_event &&
- efx->type->handle_global_event(channel, &event))
- break;
- /* else fall through */
- default:
- netif_err(channel->efx, hw, channel->efx->net_dev,
- "channel %d unknown event type %d (data "
- EFX_QWORD_FMT ")\n", channel->channel,
- ev_code, EFX_QWORD_VAL(event));
- }
- }
-
-out:
- channel->eventq_read_ptr = read_ptr;
- return spent;
-}
-
/* Check whether an event is present in the eventq at the current
* read pointer. Only useful for self-test.
*/
@@ -1382,323 +58,18 @@ bool efx_nic_event_present(struct efx_channel *channel)
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
}
-/* Allocate buffer table entries for event queue */
-int efx_nic_probe_eventq(struct efx_channel *channel)
-{
- struct efx_nic *efx = channel->efx;
- unsigned entries;
-
- entries = channel->eventq_mask + 1;
- return efx_alloc_special_buffer(efx, &channel->eventq,
- entries * sizeof(efx_qword_t));
-}
-
-void efx_nic_init_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- netif_dbg(efx, hw, efx->net_dev,
- "channel %d event queue in special buffers %d-%d\n",
- channel->channel, channel->eventq.index,
- channel->eventq.index + channel->eventq.entries - 1);
-
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
- EFX_POPULATE_OWORD_3(reg,
- FRF_CZ_TIMER_Q_EN, 1,
- FRF_CZ_HOST_NOTIFY_MODE, 0,
- FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS);
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
- }
-
- /* Pin event queue buffer */
- efx_init_special_buffer(efx, &channel->eventq);
-
- /* Fill event queue with all ones (i.e. empty events) */
- memset(channel->eventq.addr, 0xff, channel->eventq.len);
-
- /* Push event queue to card */
- EFX_POPULATE_OWORD_3(reg,
- FRF_AZ_EVQ_EN, 1,
- FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
- FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
-
- efx->type->push_irq_moderation(channel);
-}
-
-void efx_nic_fini_eventq(struct efx_channel *channel)
-{
- efx_oword_t reg;
- struct efx_nic *efx = channel->efx;
-
- /* Remove event queue from card */
- EFX_ZERO_OWORD(reg);
- efx_writeo_table(efx, &reg, efx->type->evq_ptr_tbl_base,
- channel->channel);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- efx_writeo_table(efx, &reg, FR_BZ_TIMER_TBL, channel->channel);
-
- /* Unpin event queue */
- efx_fini_special_buffer(efx, &channel->eventq);
-}
-
-/* Free buffers backing event queue */
-void efx_nic_remove_eventq(struct efx_channel *channel)
-{
- efx_free_special_buffer(channel->efx, &channel->eventq);
-}
-
-
void efx_nic_event_test_start(struct efx_channel *channel)
{
channel->event_test_cpu = -1;
smp_wmb();
- efx_magic_event(channel, EFX_CHANNEL_MAGIC_TEST(channel));
+ channel->efx->type->ev_test_generate(channel);
}
-void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
-{
- efx_magic_event(efx_rx_queue_channel(rx_queue),
- EFX_CHANNEL_MAGIC_FILL(rx_queue));
-}
-
-/**************************************************************************
- *
- * Hardware interrupts
- * The hardware interrupt handler does very little work; all the event
- * queue processing is carried out by per-channel tasklets.
- *
- **************************************************************************/
-
-/* Enable/disable/generate interrupts */
-static inline void efx_nic_interrupts(struct efx_nic *efx,
- bool enabled, bool force)
-{
- efx_oword_t int_en_reg_ker;
-
- EFX_POPULATE_OWORD_3(int_en_reg_ker,
- FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
- FRF_AZ_KER_INT_KER, force,
- FRF_AZ_DRV_INT_EN_KER, enabled);
- efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
-}
-
-void efx_nic_enable_interrupts(struct efx_nic *efx)
-{
- EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
- wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
-
- efx_nic_interrupts(efx, true, false);
-}
-
-void efx_nic_disable_interrupts(struct efx_nic *efx)
-{
- /* Disable interrupts */
- efx_nic_interrupts(efx, false, false);
-}
-
-/* Generate a test interrupt
- * Interrupt must already have been enabled, otherwise nasty things
- * may happen.
- */
void efx_nic_irq_test_start(struct efx_nic *efx)
{
efx->last_irq_cpu = -1;
smp_wmb();
- efx_nic_interrupts(efx, true, true);
-}
-
-/* Process a fatal interrupt
- * Disable bus mastering ASAP and schedule a reset
- */
-irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx)
-{
- struct falcon_nic_data *nic_data = efx->nic_data;
- efx_oword_t *int_ker = efx->irq_status.addr;
- efx_oword_t fatal_intr;
- int error, mem_perr;
-
- efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
- error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
-
- netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EFX_OWORD_FMT" status "
- EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
- EFX_OWORD_VAL(fatal_intr),
- error ? "disabling bus mastering" : "no recognised error");
-
- /* If this is a memory parity error dump which blocks are offending */
- mem_perr = (EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
- EFX_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
- if (mem_perr) {
- efx_oword_t reg;
- efx_reado(efx, &reg, FR_AZ_MEM_STAT);
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR: memory parity error "EFX_OWORD_FMT"\n",
- EFX_OWORD_VAL(reg));
- }
-
- /* Disable both devices */
- pci_clear_master(efx->pci_dev);
- if (efx_nic_is_dual_func(efx))
- pci_clear_master(nic_data->pci_dev2);
- efx_nic_disable_interrupts(efx);
-
- /* Count errors and reset or disable the NIC accordingly */
- if (efx->int_error_count == 0 ||
- time_after(jiffies, efx->int_error_expire)) {
- efx->int_error_count = 0;
- efx->int_error_expire =
- jiffies + EFX_INT_ERROR_EXPIRE * HZ;
- }
- if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - reset scheduled\n");
- efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
- } else {
- netif_err(efx, hw, efx->net_dev,
- "SYSTEM ERROR - max number of errors seen."
- "NIC will be disabled\n");
- efx_schedule_reset(efx, RESET_TYPE_DISABLE);
- }
-
- return IRQ_HANDLED;
-}
-
-/* Handle a legacy interrupt
- * Acknowledges the interrupt and schedule event queue processing.
- */
-static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
-{
- struct efx_nic *efx = dev_id;
- efx_oword_t *int_ker = efx->irq_status.addr;
- irqreturn_t result = IRQ_NONE;
- struct efx_channel *channel;
- efx_dword_t reg;
- u32 queues;
- int syserr;
-
- /* Could this be ours? If interrupts are disabled then the
- * channel state may not be valid.
- */
- if (!efx->legacy_irq_enabled)
- return result;
-
- /* Read the ISR which also ACKs the interrupts */
- efx_readd(efx, &reg, FR_BZ_INT_ISR0);
- queues = EFX_EXTRACT_DWORD(reg, 0, 31);
-
- /* Legacy interrupts are disabled too late by the EEH kernel
- * code. Disable them earlier.
- * If an EEH error occurred, the read will have returned all ones.
- */
- if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
- !efx->eeh_disabled_legacy_irq) {
- disable_irq_nosync(efx->legacy_irq);
- efx->eeh_disabled_legacy_irq = true;
- }
-
- /* Handle non-event-queue sources */
- if (queues & (1U << efx->irq_level)) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- if (queues != 0) {
- if (EFX_WORKAROUND_15783(efx))
- efx->irq_zero_count = 0;
-
- /* Schedule processing of any interrupting queues */
- efx_for_each_channel(channel, efx) {
- if (queues & 1)
- efx_schedule_channel_irq(channel);
- queues >>= 1;
- }
- result = IRQ_HANDLED;
-
- } else if (EFX_WORKAROUND_15783(efx)) {
- efx_qword_t *event;
-
- /* We can't return IRQ_HANDLED more than once on seeing ISR=0
- * because this might be a shared interrupt. */
- if (efx->irq_zero_count++ == 0)
- result = IRQ_HANDLED;
-
- /* Ensure we schedule or rearm all event queues */
- efx_for_each_channel(channel, efx) {
- event = efx_event(channel, channel->eventq_read_ptr);
- if (efx_event_present(event))
- efx_schedule_channel_irq(channel);
- else
- efx_nic_eventq_read_ack(channel);
- }
- }
-
- if (result == IRQ_HANDLED)
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
-
- return result;
-}
-
-/* Handle an MSI interrupt
- *
- * Handle an MSI hardware interrupt. This routine schedules event
- * queue processing. No interrupt acknowledgement cycle is necessary.
- * Also, we never need to check that the interrupt is for us, since
- * MSI interrupts cannot be shared.
- */
-static irqreturn_t efx_msi_interrupt(int irq, void *dev_id)
-{
- struct efx_channel *channel = *(struct efx_channel **)dev_id;
- struct efx_nic *efx = channel->efx;
- efx_oword_t *int_ker = efx->irq_status.addr;
- int syserr;
-
- netif_vdbg(efx, intr, efx->net_dev,
- "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
- irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
-
- /* Handle non-event-queue sources */
- if (channel->channel == efx->irq_level) {
- syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
- if (unlikely(syserr))
- return efx_nic_fatal_interrupt(efx);
- efx->last_irq_cpu = raw_smp_processor_id();
- }
-
- /* Schedule processing of the channel */
- efx_schedule_channel_irq(channel);
-
- return IRQ_HANDLED;
-}
-
-
-/* Setup RSS indirection table.
- * This maps from the hash value of the packet to RXQ
- */
-void efx_nic_push_rx_indir_table(struct efx_nic *efx)
-{
- size_t i = 0;
- efx_dword_t dword;
-
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return;
-
- BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
- FR_BZ_RX_INDIRECTION_TBL_ROWS);
-
- for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
- EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
- efx->rx_indir_table[i]);
- efx_writed(efx, &dword,
- FR_BZ_RX_INDIRECTION_TBL +
- FR_BZ_RX_INDIRECTION_TBL_STEP * i);
- }
+ efx->type->irq_test_generate(efx);
}
/* Hook interrupt handler(s)
@@ -1711,13 +82,8 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
int rc;
if (!EFX_INT_MODE_USE_MSI(efx)) {
- irq_handler_t handler;
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- handler = efx_legacy_interrupt;
- else
- handler = falcon_legacy_interrupt_a1;
-
- rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+ rc = request_irq(efx->legacy_irq,
+ efx->type->irq_handle_legacy, IRQF_SHARED,
efx->name, efx);
if (rc) {
netif_err(efx, drv, efx->net_dev,
@@ -1742,10 +108,10 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
/* Hook MSI or MSI-X interrupt */
n_irqs = 0;
efx_for_each_channel(channel, efx) {
- rc = request_irq(channel->irq, efx_msi_interrupt,
+ rc = request_irq(channel->irq, efx->type->irq_handle_msi,
IRQF_PROBE_SHARED, /* Not shared */
- efx->channel_name[channel->channel],
- &efx->channel[channel->channel]);
+ efx->msi_context[channel->channel].name,
+ &efx->msi_context[channel->channel]);
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed to hook IRQ %d\n", channel->irq);
@@ -1774,7 +140,7 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
efx_for_each_channel(channel, efx) {
if (n_irqs-- == 0)
break;
- free_irq(channel->irq, &efx->channel[channel->channel]);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
}
fail1:
return rc;
@@ -1783,7 +149,6 @@ int efx_nic_init_interrupt(struct efx_nic *efx)
void efx_nic_fini_interrupt(struct efx_nic *efx)
{
struct efx_channel *channel;
- efx_oword_t reg;
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
@@ -1792,167 +157,13 @@ void efx_nic_fini_interrupt(struct efx_nic *efx)
/* Disable MSI/MSI-X interrupts */
efx_for_each_channel(channel, efx)
- free_irq(channel->irq, &efx->channel[channel->channel]);
-
- /* ACK legacy interrupt */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- efx_reado(efx, &reg, FR_BZ_INT_ISR0);
- else
- falcon_irq_ack_a1(efx);
+ free_irq(channel->irq, &efx->msi_context[channel->channel]);
/* Disable legacy interrupt */
if (efx->legacy_irq)
free_irq(efx->legacy_irq, efx);
}
-/* Looks at available SRAM resources and works out how many queues we
- * can support, and where things like descriptor caches should live.
- *
- * SRAM is split up as follows:
- * 0 buftbl entries for channels
- * efx->vf_buftbl_base buftbl entries for SR-IOV
- * efx->rx_dc_base RX descriptor caches
- * efx->tx_dc_base TX descriptor caches
- */
-void efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw)
-{
- unsigned vi_count, buftbl_min;
-
- /* Account for the buffer table entries backing the datapath channels
- * and the descriptor caches for those channels.
- */
- buftbl_min = ((efx->n_rx_channels * EFX_MAX_DMAQ_SIZE +
- efx->n_tx_channels * EFX_TXQ_TYPES * EFX_MAX_DMAQ_SIZE +
- efx->n_channels * EFX_MAX_EVQ_SIZE)
- * sizeof(efx_qword_t) / EFX_BUF_SIZE);
- vi_count = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
-
-#ifdef CONFIG_SFC_SRIOV
- if (efx_sriov_wanted(efx)) {
- unsigned vi_dc_entries, buftbl_free, entries_per_vf, vf_limit;
-
- efx->vf_buftbl_base = buftbl_min;
-
- vi_dc_entries = RX_DC_ENTRIES + TX_DC_ENTRIES;
- vi_count = max(vi_count, EFX_VI_BASE);
- buftbl_free = (sram_lim_qw - buftbl_min -
- vi_count * vi_dc_entries);
-
- entries_per_vf = ((vi_dc_entries + EFX_VF_BUFTBL_PER_VI) *
- efx_vf_size(efx));
- vf_limit = min(buftbl_free / entries_per_vf,
- (1024U - EFX_VI_BASE) >> efx->vi_scale);
-
- if (efx->vf_count > vf_limit) {
- netif_err(efx, probe, efx->net_dev,
- "Reducing VF count from from %d to %d\n",
- efx->vf_count, vf_limit);
- efx->vf_count = vf_limit;
- }
- vi_count += efx->vf_count * efx_vf_size(efx);
- }
-#endif
-
- efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
- efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
-}
-
-u32 efx_nic_fpga_ver(struct efx_nic *efx)
-{
- efx_oword_t altera_build;
- efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
- return EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
-}
-
-void efx_nic_init_common(struct efx_nic *efx)
-{
- efx_oword_t temp;
-
- /* Set positions of descriptor caches in SRAM. */
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
- efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
-
- /* Set TX descriptor cache size. */
- BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
-
- /* Set RX descriptor cache size. Set low watermark to size-8, as
- * this allows most efficient prefetching.
- */
- BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
- EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
- efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
-
- /* Program INT_KER address */
- EFX_POPULATE_OWORD_2(temp,
- FRF_AZ_NORM_INT_VEC_DIS_KER,
- EFX_INT_MODE_USE_MSI(efx),
- FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
- efx_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
-
- if (EFX_WORKAROUND_17213(efx) && !EFX_INT_MODE_USE_MSI(efx))
- /* Use an interrupt level unused by event queues */
- efx->irq_level = 0x1f;
- else
- /* Use a valid MSI-X vector */
- efx->irq_level = 0;
-
- /* Enable all the genuinely fatal interrupts. (They are still
- * masked by the overall interrupt mask, controlled by
- * falcon_interrupts()).
- *
- * Note: All other fatal interrupts are enabled
- */
- EFX_POPULATE_OWORD_3(temp,
- FRF_AZ_ILL_ADR_INT_KER_EN, 1,
- FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
- FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
- if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0)
- EFX_SET_OWORD_FIELD(temp, FRF_CZ_SRAM_PERR_INT_P_KER_EN, 1);
- EFX_INVERT_OWORD(temp);
- efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
-
- efx_nic_push_rx_indir_table(efx);
-
- /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
- * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
- */
- efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
- /* Enable SW_EV to inherit in char driver - assume harmless here */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
- /* Prefetch threshold 2 => fetch when descriptor cache half empty */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
- /* Disable hardware watchdog which can misfire */
- EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
- /* Squash TX of packets of 16 bytes or less */
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0)
- EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
- efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
-
- if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
- EFX_POPULATE_OWORD_4(temp,
- /* Default values */
- FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
- FRF_BZ_TX_PACE_SB_AF, 0xb,
- FRF_BZ_TX_PACE_FB_BASE, 0,
- /* Allow large pace values in the
- * fast bin. */
- FRF_BZ_TX_PACE_BIN_TH,
- FFE_BZ_TX_PACE_RESERVED);
- efx_writeo(efx, &temp, FR_BZ_TX_PACE);
- }
-}
-
/* Register dump */
#define REGISTER_REVISION_A 1
@@ -2217,3 +428,86 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
}
}
}
+
+/**
+ * efx_nic_describe_stats - Describe supported statistics for ethtool
+ * @desc: Array of &struct efx_hw_stat_desc describing the statistics
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @names: Buffer to copy names to, or %NULL. The names are copied
+ * starting at intervals of %ETH_GSTRING_LEN bytes.
+ *
+ * Returns the number of visible statistics, i.e. the number of set
+ * bits in the first @count bits of @mask for which a name is defined.
+ */
+size_t efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names)
+{
+ size_t visible = 0;
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].name) {
+ if (names) {
+ strlcpy(names, desc[index].name,
+ ETH_GSTRING_LEN);
+ names += ETH_GSTRING_LEN;
+ }
+ ++visible;
+ }
+ }
+
+ return visible;
+}
+
+/**
+ * efx_nic_update_stats - Convert statistics DMA buffer to array of u64
+ * @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
+ * layout. DMA widths of 0, 16, 32 and 64 are supported; where
+ * the width is specified as 0 the corresponding element of
+ * @stats is not updated.
+ * @count: Length of the @desc array
+ * @mask: Bitmask of which elements of @desc are enabled
+ * @stats: Buffer to update with the converted statistics. The length
+ * of this array must be at least the number of set bits in the
+ * first @count bits of @mask.
+ * @dma_buf: DMA buffer containing hardware statistics
+ * @accumulate: If set, the converted values will be added rather than
+ * directly stored to the corresponding elements of @stats
+ */
+void efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate)
+{
+ size_t index;
+
+ for_each_set_bit(index, mask, count) {
+ if (desc[index].dma_width) {
+ const void *addr = dma_buf + desc[index].offset;
+ u64 val;
+
+ switch (desc[index].dma_width) {
+ case 16:
+ val = le16_to_cpup((__le16 *)addr);
+ break;
+ case 32:
+ val = le32_to_cpup((__le32 *)addr);
+ break;
+ case 64:
+ val = le64_to_cpup((__le64 *)addr);
+ break;
+ default:
+ WARN_ON(1);
+ val = 0;
+ break;
+ }
+
+ if (accumulate)
+ *stats += val;
+ else
+ *stats = val;
+ }
+
+ ++stats;
+ }
+}
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index d63c2991a75..4b1e188f7a2 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2011 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -16,17 +16,13 @@
#include "net_driver.h"
#include "efx.h"
#include "mcdi.h"
-#include "spi.h"
-
-/*
- * Falcon hardware control
- */
enum {
EFX_REV_FALCON_A0 = 0,
EFX_REV_FALCON_A1 = 1,
EFX_REV_FALCON_B0 = 2,
EFX_REV_SIENA_A0 = 3,
+ EFX_REV_HUNT_A0 = 4,
};
static inline int efx_nic_rev(struct efx_nic *efx)
@@ -34,7 +30,7 @@ static inline int efx_nic_rev(struct efx_nic *efx)
return efx->type->revision;
}
-extern u32 efx_nic_fpga_ver(struct efx_nic *efx);
+extern u32 efx_farch_fpga_ver(struct efx_nic *efx);
/* NIC has two interlinked PCI functions for the same port. */
static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
@@ -42,6 +38,65 @@ static inline bool efx_nic_is_dual_func(struct efx_nic *efx)
return efx_nic_rev(efx) < EFX_REV_FALCON_B0;
}
+/* Read the current event from the event queue */
+static inline efx_qword_t *efx_event(struct efx_channel *channel,
+ unsigned int index)
+{
+ return ((efx_qword_t *) (channel->eventq.buf.addr)) +
+ (index & channel->eventq_mask);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones. We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords. This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int efx_event_present(efx_qword_t *event)
+{
+ return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+ EFX_DWORD_IS_ALL_ONES(event->dword[1]));
+}
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *
+efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
+}
+
+/* Decide whether to push a TX descriptor to the NIC vs merely writing
+ * the doorbell. This can reduce latency when we are adding a single
+ * descriptor to an empty queue, but is otherwise pointless. Further,
+ * Falcon and Siena have hardware bugs (SF bug 33851) that may be
+ * triggered if we don't check this.
+ */
+static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
+ unsigned int write_count)
+{
+ unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+ if (empty_read_count == 0)
+ return false;
+
+ tx_queue->empty_read_count = 0;
+ return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0
+ && tx_queue->write_count - write_count == 1;
+}
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *
+efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
+{
+ return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
+}
+
enum {
PHY_TYPE_NONE = 0,
PHY_TYPE_TXC43128 = 1,
@@ -59,9 +114,6 @@ enum {
(1 << LOOPBACK_XGXS) | \
(1 << LOOPBACK_XAUI))
-#define FALCON_GMAC_LOOPBACKS \
- (1 << LOOPBACK_GMAC)
-
/* Alignment of PCIe DMA boundaries (4KB) */
#define EFX_PAGE_SIZE 4096
/* Size and alignment of buffer table entries (same) */
@@ -105,13 +157,96 @@ struct falcon_board {
};
/**
+ * struct falcon_spi_device - a Falcon SPI (Serial Peripheral Interface) device
+ * @device_id: Controller's id for the device
+ * @size: Size (in bytes)
+ * @addr_len: Number of address bytes in read/write commands
+ * @munge_address: Flag whether addresses should be munged.
+ * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
+ * use bit 3 of the command byte as address bit A8, rather
+ * than having a two-byte address. If this flag is set, then
+ * commands should be munged in this way.
+ * @erase_command: Erase command (or 0 if sector erase not needed).
+ * @erase_size: Erase sector size (in bytes)
+ * Erase commands affect sectors with this size and alignment.
+ * This must be a power of two.
+ * @block_size: Write block size (in bytes).
+ * Write commands are limited to blocks with this size and alignment.
+ */
+struct falcon_spi_device {
+ int device_id;
+ unsigned int size;
+ unsigned int addr_len;
+ unsigned int munge_address:1;
+ u8 erase_command;
+ unsigned int erase_size;
+ unsigned int block_size;
+};
+
+static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
+{
+ return spi->size != 0;
+}
+
+enum {
+ FALCON_STAT_tx_bytes,
+ FALCON_STAT_tx_packets,
+ FALCON_STAT_tx_pause,
+ FALCON_STAT_tx_control,
+ FALCON_STAT_tx_unicast,
+ FALCON_STAT_tx_multicast,
+ FALCON_STAT_tx_broadcast,
+ FALCON_STAT_tx_lt64,
+ FALCON_STAT_tx_64,
+ FALCON_STAT_tx_65_to_127,
+ FALCON_STAT_tx_128_to_255,
+ FALCON_STAT_tx_256_to_511,
+ FALCON_STAT_tx_512_to_1023,
+ FALCON_STAT_tx_1024_to_15xx,
+ FALCON_STAT_tx_15xx_to_jumbo,
+ FALCON_STAT_tx_gtjumbo,
+ FALCON_STAT_tx_non_tcpudp,
+ FALCON_STAT_tx_mac_src_error,
+ FALCON_STAT_tx_ip_src_error,
+ FALCON_STAT_rx_bytes,
+ FALCON_STAT_rx_good_bytes,
+ FALCON_STAT_rx_bad_bytes,
+ FALCON_STAT_rx_packets,
+ FALCON_STAT_rx_good,
+ FALCON_STAT_rx_bad,
+ FALCON_STAT_rx_pause,
+ FALCON_STAT_rx_control,
+ FALCON_STAT_rx_unicast,
+ FALCON_STAT_rx_multicast,
+ FALCON_STAT_rx_broadcast,
+ FALCON_STAT_rx_lt64,
+ FALCON_STAT_rx_64,
+ FALCON_STAT_rx_65_to_127,
+ FALCON_STAT_rx_128_to_255,
+ FALCON_STAT_rx_256_to_511,
+ FALCON_STAT_rx_512_to_1023,
+ FALCON_STAT_rx_1024_to_15xx,
+ FALCON_STAT_rx_15xx_to_jumbo,
+ FALCON_STAT_rx_gtjumbo,
+ FALCON_STAT_rx_bad_lt64,
+ FALCON_STAT_rx_bad_gtjumbo,
+ FALCON_STAT_rx_overflow,
+ FALCON_STAT_rx_symbol_error,
+ FALCON_STAT_rx_align_error,
+ FALCON_STAT_rx_length_error,
+ FALCON_STAT_rx_internal_error,
+ FALCON_STAT_rx_nodesc_drop_cnt,
+ FALCON_STAT_COUNT
+};
+
+/**
* struct falcon_nic_data - Falcon NIC state
* @pci_dev2: Secondary function of Falcon A
* @board: Board state and functions
+ * @stats: Hardware statistics
* @stats_disable_count: Nest count for disabling statistics fetches
* @stats_pending: Is there a pending DMA of MAC statistics.
* @stats_timer: A timer for regularly fetching MAC statistics.
- * @stats_dma_done: Pointer to the flag which indicates DMA completion.
* @spi_flash: SPI flash device
* @spi_eeprom: SPI EEPROM device
* @spi_lock: SPI bus lock
@@ -121,12 +256,12 @@ struct falcon_board {
struct falcon_nic_data {
struct pci_dev *pci_dev2;
struct falcon_board board;
+ u64 stats[FALCON_STAT_COUNT];
unsigned int stats_disable_count;
bool stats_pending;
struct timer_list stats_timer;
- u32 *stats_dma_done;
- struct efx_spi_device spi_flash;
- struct efx_spi_device spi_eeprom;
+ struct falcon_spi_device spi_flash;
+ struct falcon_spi_device spi_eeprom;
struct mutex spi_lock;
struct mutex mdio_lock;
bool xmac_poll_required;
@@ -138,29 +273,148 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
return &data->board;
}
+enum {
+ SIENA_STAT_tx_bytes,
+ SIENA_STAT_tx_good_bytes,
+ SIENA_STAT_tx_bad_bytes,
+ SIENA_STAT_tx_packets,
+ SIENA_STAT_tx_bad,
+ SIENA_STAT_tx_pause,
+ SIENA_STAT_tx_control,
+ SIENA_STAT_tx_unicast,
+ SIENA_STAT_tx_multicast,
+ SIENA_STAT_tx_broadcast,
+ SIENA_STAT_tx_lt64,
+ SIENA_STAT_tx_64,
+ SIENA_STAT_tx_65_to_127,
+ SIENA_STAT_tx_128_to_255,
+ SIENA_STAT_tx_256_to_511,
+ SIENA_STAT_tx_512_to_1023,
+ SIENA_STAT_tx_1024_to_15xx,
+ SIENA_STAT_tx_15xx_to_jumbo,
+ SIENA_STAT_tx_gtjumbo,
+ SIENA_STAT_tx_collision,
+ SIENA_STAT_tx_single_collision,
+ SIENA_STAT_tx_multiple_collision,
+ SIENA_STAT_tx_excessive_collision,
+ SIENA_STAT_tx_deferred,
+ SIENA_STAT_tx_late_collision,
+ SIENA_STAT_tx_excessive_deferred,
+ SIENA_STAT_tx_non_tcpudp,
+ SIENA_STAT_tx_mac_src_error,
+ SIENA_STAT_tx_ip_src_error,
+ SIENA_STAT_rx_bytes,
+ SIENA_STAT_rx_good_bytes,
+ SIENA_STAT_rx_bad_bytes,
+ SIENA_STAT_rx_packets,
+ SIENA_STAT_rx_good,
+ SIENA_STAT_rx_bad,
+ SIENA_STAT_rx_pause,
+ SIENA_STAT_rx_control,
+ SIENA_STAT_rx_unicast,
+ SIENA_STAT_rx_multicast,
+ SIENA_STAT_rx_broadcast,
+ SIENA_STAT_rx_lt64,
+ SIENA_STAT_rx_64,
+ SIENA_STAT_rx_65_to_127,
+ SIENA_STAT_rx_128_to_255,
+ SIENA_STAT_rx_256_to_511,
+ SIENA_STAT_rx_512_to_1023,
+ SIENA_STAT_rx_1024_to_15xx,
+ SIENA_STAT_rx_15xx_to_jumbo,
+ SIENA_STAT_rx_gtjumbo,
+ SIENA_STAT_rx_bad_gtjumbo,
+ SIENA_STAT_rx_overflow,
+ SIENA_STAT_rx_false_carrier,
+ SIENA_STAT_rx_symbol_error,
+ SIENA_STAT_rx_align_error,
+ SIENA_STAT_rx_length_error,
+ SIENA_STAT_rx_internal_error,
+ SIENA_STAT_rx_nodesc_drop_cnt,
+ SIENA_STAT_COUNT
+};
+
/**
* struct siena_nic_data - Siena NIC state
- * @mcdi: Management-Controller-to-Driver Interface
* @wol_filter_id: Wake-on-LAN packet filter id
- * @hwmon: Hardware monitor state
+ * @stats: Hardware statistics
*/
struct siena_nic_data {
- struct efx_mcdi_iface mcdi;
int wol_filter_id;
-#ifdef CONFIG_SFC_MCDI_MON
- struct efx_mcdi_mon hwmon;
-#endif
+ u64 stats[SIENA_STAT_COUNT];
};
-#ifdef CONFIG_SFC_MCDI_MON
-static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
-{
- struct siena_nic_data *nic_data;
- EFX_BUG_ON_PARANOID(efx_nic_rev(efx) < EFX_REV_SIENA_A0);
- nic_data = efx->nic_data;
- return &nic_data->hwmon;
-}
-#endif
+enum {
+ EF10_STAT_tx_bytes,
+ EF10_STAT_tx_packets,
+ EF10_STAT_tx_pause,
+ EF10_STAT_tx_control,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_lt64,
+ EF10_STAT_tx_64,
+ EF10_STAT_tx_65_to_127,
+ EF10_STAT_tx_128_to_255,
+ EF10_STAT_tx_256_to_511,
+ EF10_STAT_tx_512_to_1023,
+ EF10_STAT_tx_1024_to_15xx,
+ EF10_STAT_tx_15xx_to_jumbo,
+ EF10_STAT_rx_bytes,
+ EF10_STAT_rx_bytes_minus_good_bytes,
+ EF10_STAT_rx_good_bytes,
+ EF10_STAT_rx_bad_bytes,
+ EF10_STAT_rx_packets,
+ EF10_STAT_rx_good,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_pause,
+ EF10_STAT_rx_control,
+ EF10_STAT_rx_unicast,
+ EF10_STAT_rx_multicast,
+ EF10_STAT_rx_broadcast,
+ EF10_STAT_rx_lt64,
+ EF10_STAT_rx_64,
+ EF10_STAT_rx_65_to_127,
+ EF10_STAT_rx_128_to_255,
+ EF10_STAT_rx_256_to_511,
+ EF10_STAT_rx_512_to_1023,
+ EF10_STAT_rx_1024_to_15xx,
+ EF10_STAT_rx_15xx_to_jumbo,
+ EF10_STAT_rx_gtjumbo,
+ EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_overflow,
+ EF10_STAT_rx_align_error,
+ EF10_STAT_rx_length_error,
+ EF10_STAT_rx_nodesc_drops,
+ EF10_STAT_COUNT
+};
+
+/**
+ * struct efx_ef10_nic_data - EF10 architecture NIC state
+ * @mcdi_buf: DMA buffer for MCDI
+ * @warm_boot_count: Last seen MC warm boot count
+ * @vi_base: Absolute index of first VI in this function
+ * @n_allocated_vis: Number of VIs allocated to this function
+ * @must_realloc_vis: Flag: VIs have yet to be reallocated after MC reboot
+ * @must_restore_filters: Flag: filters have yet to be restored after MC reboot
+ * @rx_rss_context: Firmware handle for our RSS context
+ * @stats: Hardware statistics
+ * @workaround_35388: Flag: firmware supports workaround for bug 35388
+ * @datapath_caps: Capabilities of datapath firmware (FLAGS1 field of
+ * %MC_CMD_GET_CAPABILITIES response)
+ */
+struct efx_ef10_nic_data {
+ struct efx_buffer mcdi_buf;
+ u16 warm_boot_count;
+ unsigned int vi_base;
+ unsigned int n_allocated_vis;
+ bool must_realloc_vis;
+ bool must_restore_filters;
+ u32 rx_rss_context;
+ u64 stats[EF10_STAT_COUNT];
+ bool workaround_35388;
+ u32 datapath_caps;
+};
/*
* On the SFC9000 family each port is associated with 1 PCI physical
@@ -263,6 +517,7 @@ extern void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
extern const struct efx_nic_type siena_a0_nic_type;
+extern const struct efx_nic_type efx_hunt_a0_nic_type;
/**************************************************************************
*
@@ -274,35 +529,123 @@ extern const struct efx_nic_type siena_a0_nic_type;
extern int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
/* TX data path */
-extern int efx_nic_probe_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_init_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_fini_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_remove_tx(struct efx_tx_queue *tx_queue);
-extern void efx_nic_push_buffers(struct efx_tx_queue *tx_queue);
+static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
+{
+ return tx_queue->efx->type->tx_probe(tx_queue);
+}
+static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_init(tx_queue);
+}
+static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_remove(tx_queue);
+}
+static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
+{
+ tx_queue->efx->type->tx_write(tx_queue);
+}
/* RX data path */
-extern int efx_nic_probe_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_init_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_fini_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_remove_rx(struct efx_rx_queue *rx_queue);
-extern void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue);
-extern void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue);
+static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
+{
+ return rx_queue->efx->type->rx_probe(rx_queue);
+}
+static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_init(rx_queue);
+}
+static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_remove(rx_queue);
+}
+static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_write(rx_queue);
+}
+static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
+{
+ rx_queue->efx->type->rx_defer_refill(rx_queue);
+}
/* Event data path */
-extern int efx_nic_probe_eventq(struct efx_channel *channel);
-extern void efx_nic_init_eventq(struct efx_channel *channel);
-extern void efx_nic_fini_eventq(struct efx_channel *channel);
-extern void efx_nic_remove_eventq(struct efx_channel *channel);
-extern int efx_nic_process_eventq(struct efx_channel *channel, int rx_quota);
-extern void efx_nic_eventq_read_ack(struct efx_channel *channel);
-extern bool efx_nic_event_present(struct efx_channel *channel);
+static inline int efx_nic_probe_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_probe(channel);
+}
+static inline int efx_nic_init_eventq(struct efx_channel *channel)
+{
+ return channel->efx->type->ev_init(channel);
+}
+static inline void efx_nic_fini_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_fini(channel);
+}
+static inline void efx_nic_remove_eventq(struct efx_channel *channel)
+{
+ channel->efx->type->ev_remove(channel);
+}
+static inline int
+efx_nic_process_eventq(struct efx_channel *channel, int quota)
+{
+ return channel->efx->type->ev_process(channel, quota);
+}
+static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
+{
+ channel->efx->type->ev_read_ack(channel);
+}
+extern void efx_nic_event_test_start(struct efx_channel *channel);
-/* MAC/PHY */
-extern void falcon_drain_tx_fifo(struct efx_nic *efx);
-extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
-extern bool falcon_xmac_check_fault(struct efx_nic *efx);
-extern int falcon_reconfigure_xmac(struct efx_nic *efx);
-extern void falcon_update_stats_xmac(struct efx_nic *efx);
+/* Falcon/Siena queue operations */
+extern int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
+extern void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
+extern int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
+extern void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
+extern int efx_farch_ev_probe(struct efx_channel *channel);
+extern int efx_farch_ev_init(struct efx_channel *channel);
+extern void efx_farch_ev_fini(struct efx_channel *channel);
+extern void efx_farch_ev_remove(struct efx_channel *channel);
+extern int efx_farch_ev_process(struct efx_channel *channel, int quota);
+extern void efx_farch_ev_read_ack(struct efx_channel *channel);
+extern void efx_farch_ev_test_generate(struct efx_channel *channel);
+
+/* Falcon/Siena filter operations */
+extern int efx_farch_filter_table_probe(struct efx_nic *efx);
+extern void efx_farch_filter_table_restore(struct efx_nic *efx);
+extern void efx_farch_filter_table_remove(struct efx_nic *efx);
+extern void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
+extern s32 efx_farch_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec, bool replace);
+extern int efx_farch_filter_remove_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_farch_filter_get_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
+extern void efx_farch_filter_clear_rx(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
+#ifdef CONFIG_RFS_ACCEL
+extern s32 efx_farch_filter_rfs_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec);
+extern bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
+ unsigned int index);
+#endif
+extern void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
+
+extern bool efx_nic_event_present(struct efx_channel *channel);
/* Some statistics are computed as A - B where A and B each increase
* linearly with some hardware counter(s) and the counters are read
@@ -322,16 +665,18 @@ static inline void efx_update_diff_stat(u64 *stat, u64 diff)
*stat = diff;
}
-/* Interrupts and test events */
+/* Interrupts */
extern int efx_nic_init_interrupt(struct efx_nic *efx);
-extern void efx_nic_enable_interrupts(struct efx_nic *efx);
-extern void efx_nic_event_test_start(struct efx_channel *channel);
extern void efx_nic_irq_test_start(struct efx_nic *efx);
-extern void efx_nic_disable_interrupts(struct efx_nic *efx);
extern void efx_nic_fini_interrupt(struct efx_nic *efx);
-extern irqreturn_t efx_nic_fatal_interrupt(struct efx_nic *efx);
-extern irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id);
-extern void falcon_irq_ack_a1(struct efx_nic *efx);
+
+/* Falcon/Siena interrupts */
+extern void efx_farch_irq_enable_master(struct efx_nic *efx);
+extern void efx_farch_irq_test_generate(struct efx_nic *efx);
+extern void efx_farch_irq_disable_master(struct efx_nic *efx);
+extern irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
+extern irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
{
@@ -345,69 +690,47 @@ static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
/* Global Resources */
extern int efx_nic_flush_queues(struct efx_nic *efx);
extern void siena_prepare_flush(struct efx_nic *efx);
+extern int efx_farch_fini_dmaq(struct efx_nic *efx);
extern void siena_finish_flush(struct efx_nic *efx);
extern void falcon_start_nic_stats(struct efx_nic *efx);
extern void falcon_stop_nic_stats(struct efx_nic *efx);
-extern void falcon_setup_xaui(struct efx_nic *efx);
extern int falcon_reset_xaui(struct efx_nic *efx);
-extern void
-efx_nic_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
-extern void efx_nic_init_common(struct efx_nic *efx);
-extern void efx_nic_push_rx_indir_table(struct efx_nic *efx);
+extern void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
+extern void efx_farch_init_common(struct efx_nic *efx);
+extern void efx_ef10_handle_drain_event(struct efx_nic *efx);
+static inline void efx_nic_push_rx_indir_table(struct efx_nic *efx)
+{
+ efx->type->rx_push_indir_table(efx);
+}
+extern void efx_farch_rx_push_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
- unsigned int len);
+ unsigned int len, gfp_t gfp_flags);
void efx_nic_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
/* Tests */
-struct efx_nic_register_test {
+struct efx_farch_register_test {
unsigned address;
efx_oword_t mask;
};
-extern int efx_nic_test_registers(struct efx_nic *efx,
- const struct efx_nic_register_test *regs,
- size_t n_regs);
+extern int efx_farch_test_registers(struct efx_nic *efx,
+ const struct efx_farch_register_test *regs,
+ size_t n_regs);
extern size_t efx_nic_get_regs_len(struct efx_nic *efx);
extern void efx_nic_get_regs(struct efx_nic *efx, void *buf);
-/**************************************************************************
- *
- * Falcon MAC stats
- *
- **************************************************************************
- */
+extern size_t
+efx_nic_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask, u8 *names);
+extern void
+efx_nic_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
+ const unsigned long *mask,
+ u64 *stats, const void *dma_buf, bool accumulate);
+
+#define EFX_MAX_FLUSH_TIME 5000
-#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
-#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
-
-/* Retrieve statistic from statistics block */
-#define FALCON_STAT(efx, falcon_stat, efx_stat) do { \
- if (FALCON_STAT_WIDTH(falcon_stat) == 16) \
- (efx)->mac_stats.efx_stat += le16_to_cpu( \
- *((__force __le16 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \
- (efx)->mac_stats.efx_stat += le32_to_cpu( \
- *((__force __le32 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- else \
- (efx)->mac_stats.efx_stat += le64_to_cpu( \
- *((__force __le64 *) \
- (efx->stats_buffer.addr + \
- FALCON_STAT_OFFSET(falcon_stat)))); \
- } while (0)
-
-#define FALCON_MAC_STATS_SIZE 0x100
-
-#define MAC_DATA_LBN 0
-#define MAC_DATA_WIDTH 32
-
-extern void efx_generate_event(struct efx_nic *efx, unsigned int evq,
- efx_qword_t *event);
-
-extern void falcon_poll_xmac(struct efx_nic *efx);
+extern void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
+ efx_qword_t *event);
#endif /* EFX_NIC_H */
diff --git a/drivers/net/ethernet/sfc/phy.h b/drivers/net/ethernet/sfc/phy.h
index 11d148cd844..45eeb707515 100644
--- a/drivers/net/ethernet/sfc/phy.h
+++ b/drivers/net/ethernet/sfc/phy.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2010 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -47,21 +47,4 @@ extern const struct efx_phy_operations falcon_txc_phy_ops;
extern void falcon_txc_set_gpio_dir(struct efx_nic *efx, int pin, int dir);
extern void falcon_txc_set_gpio_val(struct efx_nic *efx, int pin, int val);
-/****************************************************************************
- * Siena managed PHYs
- */
-extern const struct efx_phy_operations efx_mcdi_phy_ops;
-
-extern int efx_mcdi_mdio_read(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 *value_out, u32 *status_out);
-extern int efx_mcdi_mdio_write(struct efx_nic *efx, unsigned int bus,
- unsigned int prtad, unsigned int devad,
- u16 addr, u16 value, u32 *status_out);
-extern void efx_mcdi_phy_decode_link(struct efx_nic *efx,
- struct efx_link_state *link_state,
- u32 speed, u32 flags, u32 fcntl);
-extern int efx_mcdi_phy_reconfigure(struct efx_nic *efx);
-extern void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa);
-
#endif
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index b495394a6df..03acf57df04 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2011-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -46,7 +46,7 @@
#include "mcdi.h"
#include "mcdi_pcol.h"
#include "io.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "nic.h"
/* Maximum number of events expected to make up a PTP event */
@@ -294,8 +294,7 @@ struct efx_ptp_data {
struct work_struct pps_work;
struct workqueue_struct *pps_workwq;
bool nic_ts_enabled;
- u8 txbuf[ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(
- MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM), 4)];
+ MCDI_DECLARE_BUF(txbuf, MC_CMD_PTP_IN_TRANSMIT_LENMAX);
struct efx_ptp_timeset
timeset[MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_MAXNUM];
};
@@ -311,9 +310,10 @@ static int efx_phc_enable(struct ptp_clock_info *ptp,
/* Enable MCDI PTP support. */
static int efx_ptp_enable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_ENABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ENABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ENABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_QUEUE,
efx->ptp_data->channel->channel);
MCDI_SET_DWORD(inbuf, PTP_IN_ENABLE_MODE, efx->ptp_data->mode);
@@ -329,9 +329,10 @@ static int efx_ptp_enable(struct efx_nic *efx)
*/
static int efx_ptp_disable(struct efx_nic *efx)
{
- u8 inbuf[MC_CMD_PTP_IN_DISABLE_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_DISABLE_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_DISABLE);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
NULL, 0, NULL);
}
@@ -389,14 +390,14 @@ static void efx_ptp_send_times(struct efx_nic *efx,
host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS |
now.ts_real.tv_nsec);
/* Update host time in NIC memory */
- _efx_writed(efx, cpu_to_le32(host_time),
- FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+ efx->type->ptp_write_host_time(efx, host_time);
}
*last_time = now;
}
/* Read a timeset from the MC's results and partial process. */
-static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
+static void efx_ptp_read_timeset(MCDI_DECLARE_STRUCT_PTR(data),
+ struct efx_ptp_timeset *timeset)
{
unsigned start_ns, end_ns;
@@ -425,12 +426,14 @@ static void efx_ptp_read_timeset(u8 *data, struct efx_ptp_timeset *timeset)
* busy. A number of readings are taken so that, hopefully, at least one good
* synchronisation will be seen in the results.
*/
-static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
- size_t response_length,
- const struct pps_event_time *last_time)
+static int
+efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
+ size_t response_length,
+ const struct pps_event_time *last_time)
{
- unsigned number_readings = (response_length /
- MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
+ unsigned number_readings =
+ MCDI_VAR_ARRAY_LEN(response_length,
+ PTP_OUT_SYNCHRONIZE_TIMESET);
unsigned i;
unsigned total;
unsigned ngood = 0;
@@ -447,8 +450,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
* appera to be erroneous.
*/
for (i = 0; i < number_readings; i++) {
- efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
- synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
+ efx_ptp_read_timeset(
+ MCDI_ARRAY_STRUCT_PTR(synch_buf,
+ PTP_OUT_SYNCHRONIZE_TIMESET, i),
+ &ptp->timeset[i]);
}
/* Find the last good host-MC synchronization result. The MC times
@@ -518,7 +523,7 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- u8 synch_buf[MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX];
+ MCDI_DECLARE_BUF(synch_buf, MC_CMD_PTP_OUT_SYNCHRONIZE_LENMAX);
size_t response_length;
int rc;
unsigned long timeout;
@@ -527,17 +532,17 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
int *start = ptp->start.addr;
MCDI_SET_DWORD(synch_buf, PTP_IN_OP, MC_CMD_PTP_OP_SYNCHRONIZE);
+ MCDI_SET_DWORD(synch_buf, PTP_IN_PERIPH_ID, 0);
MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_NUMTIMESETS,
num_readings);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_LO,
- (u32)ptp->start.dma_addr);
- MCDI_SET_DWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR_HI,
- (u32)((u64)ptp->start.dma_addr >> 32));
+ MCDI_SET_QWORD(synch_buf, PTP_IN_SYNCHRONIZE_START_ADDR,
+ ptp->start.dma_addr);
/* Clear flag that signals MC ready */
ACCESS_ONCE(*start) = 0;
- efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
- MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
+ MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
+ EFX_BUG_ON_PARANOID(rc);
/* Wait for start from MCDI (or timeout) */
timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS);
@@ -564,15 +569,15 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
/* Transmit a PTP packet, via the MCDI interface, to the wire. */
static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
{
- u8 *txbuf = efx->ptp_data->txbuf;
+ struct efx_ptp_data *ptp_data = efx->ptp_data;
struct skb_shared_hwtstamps timestamps;
int rc = -EIO;
- /* MCDI driver requires word aligned lengths */
- size_t len = ALIGN(MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len), 4);
- u8 txtime[MC_CMD_PTP_OUT_TRANSMIT_LEN];
+ MCDI_DECLARE_BUF(txtime, MC_CMD_PTP_OUT_TRANSMIT_LEN);
+ size_t len;
- MCDI_SET_DWORD(txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
- MCDI_SET_DWORD(txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_OP, MC_CMD_PTP_OP_TRANSMIT);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_DWORD(ptp_data->txbuf, PTP_IN_TRANSMIT_LENGTH, skb->len);
if (skb_shinfo(skb)->nr_frags != 0) {
rc = skb_linearize(skb);
if (rc != 0)
@@ -585,10 +590,12 @@ static int efx_ptp_xmit_skb(struct efx_nic *efx, struct sk_buff *skb)
goto fail;
}
skb_copy_from_linear_data(skb,
- &txbuf[MC_CMD_PTP_IN_TRANSMIT_PACKET_OFST],
- len);
- rc = efx_mcdi_rpc(efx, MC_CMD_PTP, txbuf, len, txtime,
- sizeof(txtime), &len);
+ MCDI_PTR(ptp_data->txbuf,
+ PTP_IN_TRANSMIT_PACKET),
+ skb->len);
+ rc = efx_mcdi_rpc(efx, MC_CMD_PTP,
+ ptp_data->txbuf, MC_CMD_PTP_IN_TRANSMIT_LEN(skb->len),
+ txtime, sizeof(txtime), &len);
if (rc != 0)
goto fail;
@@ -872,7 +879,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
if (!efx->ptp_data)
return -ENOMEM;
- rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int));
+ rc = efx_nic_alloc_buffer(efx, &ptp->start, sizeof(int), GFP_KERNEL);
if (rc != 0)
goto fail1;
@@ -1359,7 +1366,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inadj[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inadj, MC_CMD_PTP_IN_ADJUST_LEN);
s64 adjustment_ns;
int rc;
@@ -1373,9 +1380,8 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
(PPB_EXTRA_BITS + MAX_PPB_BITS));
MCDI_SET_DWORD(inadj, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_LO, (u32)adjustment_ns);
- MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_FREQ_HI,
- (u32)(adjustment_ns >> 32));
+ MCDI_SET_DWORD(inadj, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inadj, PTP_IN_ADJUST_FREQ, adjustment_ns);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_SECONDS, 0);
MCDI_SET_DWORD(inadj, PTP_IN_ADJUST_NANOSECONDS, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inadj, sizeof(inadj),
@@ -1394,11 +1400,11 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
struct timespec delta_ts = ns_to_timespec(delta);
- u8 inbuf[MC_CMD_PTP_IN_ADJUST_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_ADJUST_LEN);
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_LO, 0);
- MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_FREQ_HI, 0);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1411,11 +1417,12 @@ static int efx_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
struct efx_ptp_data,
phc_clock_info);
struct efx_nic *efx = ptp_data->channel->efx;
- u8 inbuf[MC_CMD_PTP_IN_READ_NIC_TIME_LEN];
- u8 outbuf[MC_CMD_PTP_OUT_READ_NIC_TIME_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_READ_NIC_TIME_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_PTP_OUT_READ_NIC_TIME_LEN);
int rc;
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_READ_NIC_TIME);
+ MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
rc = efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
outbuf, sizeof(outbuf), NULL);
diff --git a/drivers/net/ethernet/sfc/qt202x_phy.c b/drivers/net/ethernet/sfc/qt202x_phy.c
index 326a28637f3..efa3612affc 100644
--- a/drivers/net/ethernet/sfc/qt202x_phy.c
+++ b/drivers/net/ethernet/sfc/qt202x_phy.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 6af9cfda50f..4a596725023 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2011 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -21,6 +21,7 @@
#include <net/checksum.h>
#include "net_driver.h"
#include "efx.h"
+#include "filter.h"
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
@@ -60,13 +61,12 @@ static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
return page_address(buf->page) + buf->page_offset;
}
-static inline u32 efx_rx_buf_hash(const u8 *eh)
+static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
{
- /* The ethernet header is always directly after any hash. */
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
- return __le32_to_cpup((const __le32 *)(eh - 4));
+#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+ return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
#else
- const u8 *data = eh - 4;
+ const u8 *data = eh + efx->rx_packet_hash_offset;
return (u32)data[0] |
(u32)data[1] << 8 |
(u32)data[2] << 16 |
@@ -326,6 +326,9 @@ void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
unsigned int fill_level, batch_size;
int space, rc = 0;
+ if (!rx_queue->refill_enabled)
+ return;
+
/* Calculate current fill level, and exit if we don't need to fill */
fill_level = (rx_queue->added_count - rx_queue->removed_count);
EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
@@ -435,7 +438,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
}
if (efx->net_dev->features & NETIF_F_RXHASH)
- skb->rxhash = efx_rx_buf_hash(eh);
+ skb->rxhash = efx_rx_buf_hash(efx, eh);
skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
@@ -523,10 +526,11 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
/* Validate the number of fragments and completed length */
if (n_frags == 1) {
- efx_rx_packet__check_len(rx_queue, rx_buf, len);
+ if (!(flags & EFX_RX_PKT_PREFIX_LEN))
+ efx_rx_packet__check_len(rx_queue, rx_buf, len);
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
- unlikely(len <= (n_frags - 1) * EFX_RX_USR_BUF_SIZE) ||
- unlikely(len > n_frags * EFX_RX_USR_BUF_SIZE) ||
+ unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
+ unlikely(len > n_frags * efx->rx_dma_len) ||
unlikely(!efx->rx_scatter)) {
/* If this isn't an explicit discard request, either
* the hardware or the driver is broken.
@@ -551,7 +555,7 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
return;
}
- if (n_frags == 1)
+ if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
rx_buf->len = len;
/* Release and/or sync the DMA mapping - assumes all RX buffers
@@ -564,8 +568,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
*/
prefetch(efx_rx_buf_va(rx_buf));
- rx_buf->page_offset += efx->type->rx_buffer_hash_size;
- rx_buf->len -= efx->type->rx_buffer_hash_size;
+ rx_buf->page_offset += efx->rx_prefix_size;
+ rx_buf->len -= efx->rx_prefix_size;
if (n_frags > 1) {
/* Release/sync DMA mapping for additional fragments.
@@ -577,9 +581,9 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
if (--tail_frags == 0)
break;
- efx_sync_rx_buffer(efx, rx_buf, EFX_RX_USR_BUF_SIZE);
+ efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
}
- rx_buf->len = len - (n_frags - 1) * EFX_RX_USR_BUF_SIZE;
+ rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
}
@@ -630,6 +634,13 @@ void __efx_rx_packet(struct efx_channel *channel)
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
+ /* Read length from the prefix if necessary. This already
+ * excludes the length of the prefix itself.
+ */
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ rx_buf->len = le16_to_cpup((__le16 *)
+ (eh + efx->rx_packet_len_offset));
+
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
@@ -738,9 +749,9 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->max_fill = max_fill;
rx_queue->fast_fill_trigger = trigger;
+ rx_queue->refill_enabled = true;
/* Set up RX descriptor ring */
- rx_queue->enabled = true;
efx_nic_init_rx(rx_queue);
}
@@ -753,11 +764,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
"shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
- /* A flush failure might have left rx_queue->enabled */
- rx_queue->enabled = false;
-
del_timer_sync(&rx_queue->slow_fill);
- efx_nic_fini_rx(rx_queue);
/* Release RX buffers from the current read ptr to the write ptr */
if (rx_queue->buffer) {
@@ -803,3 +810,130 @@ module_param(rx_refill_threshold, uint, 0444);
MODULE_PARM_DESC(rx_refill_threshold,
"RX descriptor ring refill threshold (%)");
+#ifdef CONFIG_RFS_ACCEL
+
+int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+ struct efx_channel *channel;
+ struct efx_filter_spec spec;
+ const struct iphdr *ip;
+ const __be16 *ports;
+ int nhoff;
+ int rc;
+
+ nhoff = skb_network_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) <
+ nhoff + sizeof(struct vlan_hdr));
+ if (((const struct vlan_hdr *)skb->data + nhoff)->
+ h_vlan_encapsulated_proto != htons(ETH_P_IP))
+ return -EPROTONOSUPPORT;
+
+ /* This is IP over 802.1q VLAN. We can't filter on the
+ * IP 5-tuple and the vlan together, so just strip the
+ * vlan header and filter on the IP part.
+ */
+ nhoff += sizeof(struct vlan_hdr);
+ } else if (skb->protocol != htons(ETH_P_IP)) {
+ return -EPROTONOSUPPORT;
+ }
+
+ /* RFS must validate the IP header length before calling us */
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
+ ip = (const struct iphdr *)(skb->data + nhoff);
+ if (ip_is_fragment(ip))
+ return -EPROTONOSUPPORT;
+ EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
+ ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
+ efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
+ rxq_index);
+ rc = efx_filter_set_ipv4_full(&spec, ip->protocol,
+ ip->daddr, ports[1], ip->saddr, ports[0]);
+ if (rc)
+ return rc;
+
+ rc = efx->type->filter_rfs_insert(efx, &spec);
+ if (rc < 0)
+ return rc;
+
+ /* Remember this so we can check whether to expire the filter later */
+ efx->rps_flow_id[rc] = flow_id;
+ channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ ++channel->rfs_filters_added;
+
+ netif_info(efx, rx_status, efx->net_dev,
+ "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
+ (ip->protocol == IPPROTO_TCP) ? "TCP" : "UDP",
+ &ip->saddr, ntohs(ports[0]), &ip->daddr, ntohs(ports[1]),
+ rxq_index, flow_id, rc);
+
+ return rc;
+}
+
+bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
+{
+ bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
+ unsigned int index, size;
+ u32 flow_id;
+
+ if (!spin_trylock_bh(&efx->filter_lock))
+ return false;
+
+ expire_one = efx->type->filter_rfs_expire_one;
+ index = efx->rps_expire_index;
+ size = efx->type->max_rx_ip_filters;
+ while (quota--) {
+ flow_id = efx->rps_flow_id[index];
+ if (expire_one(efx, flow_id, index))
+ netif_info(efx, rx_status, efx->net_dev,
+ "expired filter %d [flow %u]\n",
+ index, flow_id);
+ if (++index == size)
+ index = 0;
+ }
+ efx->rps_expire_index = index;
+
+ spin_unlock_bh(&efx->filter_lock);
+ return true;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+/**
+ * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
+ * @spec: Specification to test
+ *
+ * Return: %true if the specification is a non-drop RX filter that
+ * matches a local MAC address I/G bit value of 1 or matches a local
+ * IPv4 or IPv6 address value in the respective multicast address
+ * range. Otherwise %false.
+ */
+bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
+{
+ if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
+ spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
+ return false;
+
+ if (spec->match_flags &
+ (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
+ is_multicast_ether_addr(spec->loc_mac))
+ return true;
+
+ if ((spec->match_flags &
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
+ (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
+ if (spec->ether_type == htons(ETH_P_IP) &&
+ ipv4_is_multicast(spec->loc_host[0]))
+ return true;
+ if (spec->ether_type == htons(ETH_P_IPV6) &&
+ ((const u8 *)spec->loc_host)[0] == 0xff)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 2069f51b2aa..144bbff5a4a 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -447,14 +447,7 @@ static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
static int efx_poll_loopback(struct efx_nic *efx)
{
struct efx_loopback_state *state = efx->loopback_selftest;
- struct efx_channel *channel;
- /* NAPI polling is not enabled, so process channels
- * synchronously */
- efx_for_each_channel(channel, efx) {
- if (channel->work_pending)
- efx_process_channel_now(channel);
- }
return atomic_read(&state->rx_good) == state->packet_count;
}
@@ -586,10 +579,6 @@ static int efx_wait_for_link(struct efx_nic *efx)
mutex_lock(&efx->mac_lock);
efx->type->monitor(efx);
mutex_unlock(&efx->mac_lock);
- } else {
- struct efx_channel *channel = efx_get_channel(efx, 0);
- if (channel->work_pending)
- efx_process_channel_now(channel);
}
mutex_lock(&efx->mac_lock);
diff --git a/drivers/net/ethernet/sfc/selftest.h b/drivers/net/ethernet/sfc/selftest.h
index aed24b73605..87698ae0bf7 100644
--- a/drivers/net/ethernet/sfc/selftest.h
+++ b/drivers/net/ethernet/sfc/selftest.h
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 8c91775e3c5..d034bcd124e 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -18,8 +18,7 @@
#include "bitfield.h"
#include "efx.h"
#include "nic.h"
-#include "spi.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "io.h"
#include "phy.h"
#include "workarounds.h"
@@ -30,7 +29,6 @@
/* Hardware control for SFC9000 family including SFL9021 (aka Siena). */
static void siena_init_wol(struct efx_nic *efx);
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method);
static void siena_push_irq_moderation(struct efx_channel *channel)
@@ -52,81 +50,6 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
channel->channel);
}
-static int siena_mdio_write(struct net_device *net_dev,
- int prtad, int devad, u16 addr, u16 value)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_write(efx, efx->mdio_bus, prtad, devad,
- addr, value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return 0;
-}
-
-static int siena_mdio_read(struct net_device *net_dev,
- int prtad, int devad, u16 addr)
-{
- struct efx_nic *efx = netdev_priv(net_dev);
- uint16_t value;
- uint32_t status;
- int rc;
-
- rc = efx_mcdi_mdio_read(efx, efx->mdio_bus, prtad, devad,
- addr, &value, &status);
- if (rc)
- return rc;
- if (status != MC_CMD_MDIO_STATUS_GOOD)
- return -EIO;
-
- return (int)value;
-}
-
-/* This call is responsible for hooking in the MAC and PHY operations */
-static int siena_probe_port(struct efx_nic *efx)
-{
- int rc;
-
- /* Hook in PHY operations table */
- efx->phy_op = &efx_mcdi_phy_ops;
-
- /* Set up MDIO structure for PHY */
- efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
- efx->mdio.mdio_read = siena_mdio_read;
- efx->mdio.mdio_write = siena_mdio_write;
-
- /* Fill out MDIO structure, loopback modes, and initial link state */
- rc = efx->phy_op->probe(efx);
- if (rc != 0)
- return rc;
-
- /* Allocate buffer for stats */
- rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
- MC_CMD_MAC_NSTATS * sizeof(u64));
- if (rc)
- return rc;
- netif_dbg(efx, probe, efx->net_dev,
- "stats buffer at %llx (virt %p phys %llx)\n",
- (u64)efx->stats_buffer.dma_addr,
- efx->stats_buffer.addr,
- (u64)virt_to_phys(efx->stats_buffer.addr));
-
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 1);
-
- return 0;
-}
-
-static void siena_remove_port(struct efx_nic *efx)
-{
- efx->phy_op->remove(efx);
- efx_nic_free_buffer(efx, &efx->stats_buffer);
-}
-
void siena_prepare_flush(struct efx_nic *efx)
{
if (efx->fc_disable++ == 0)
@@ -139,7 +62,7 @@ void siena_finish_flush(struct efx_nic *efx)
efx_mcdi_set_mac(efx);
}
-static const struct efx_nic_register_test siena_register_tests[] = {
+static const struct efx_farch_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
@@ -178,16 +101,16 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
/* Reset the chip immediately so that it is completely
* quiescent regardless of what any VF driver does.
*/
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
if (rc)
goto out;
tests->registers =
- efx_nic_test_registers(efx, siena_register_tests,
- ARRAY_SIZE(siena_register_tests))
+ efx_farch_test_registers(efx, siena_register_tests,
+ ARRAY_SIZE(siena_register_tests))
? -1 : 1;
- rc = siena_reset_hw(efx, reset_method);
+ rc = efx_mcdi_reset(efx, reset_method);
out:
rc2 = efx_reset_up(efx, reset_method, rc == 0);
return rc ? rc : rc2;
@@ -200,11 +123,6 @@ out:
**************************************************************************
*/
-static enum reset_type siena_map_reset_reason(enum reset_type reason)
-{
- return RESET_TYPE_RECOVER_OR_ALL;
-}
-
static int siena_map_reset_flags(u32 *flags)
{
enum {
@@ -230,21 +148,6 @@ static int siena_map_reset_flags(u32 *flags)
return -EINVAL;
}
-static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
-{
- int rc;
-
- /* Recover from a failed assertion pre-reset */
- rc = efx_mcdi_handle_assertion(efx);
- if (rc)
- return rc;
-
- if (method == RESET_TYPE_WORLD)
- return efx_mcdi_reset_mc(efx);
- else
- return efx_mcdi_reset_port(efx);
-}
-
#ifdef CONFIG_EEH
/* When a PCI device is isolated from the bus, a subsequent MMIO read is
* required for the kernel EEH mechanisms to notice. As the Solarflare driver
@@ -274,19 +177,25 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
return rc;
}
-static void siena_dimension_resources(struct efx_nic *efx)
+static int siena_dimension_resources(struct efx_nic *efx)
{
/* Each port has a small block of internal SRAM dedicated to
* the buffer table and descriptor caches. In theory we can
* map both blocks to one port, but we don't.
*/
- efx_nic_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ efx_farch_dimension_resources(efx, FR_CZ_BUF_FULL_TBL_ROWS / 2);
+ return 0;
+}
+
+static unsigned int siena_mem_map_size(struct efx_nic *efx)
+{
+ return FR_CZ_MC_TREG_SMEM +
+ FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS;
}
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = false;
efx_oword_t reg;
int rc;
@@ -296,38 +205,24 @@ static int siena_probe_nic(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
- if (efx_nic_fpga_ver(efx) != 0) {
+ if (efx_farch_fpga_ver(efx) != 0) {
netif_err(efx, probe, efx->net_dev,
"Siena FPGA not supported\n");
rc = -ENODEV;
goto fail1;
}
+ efx->max_channels = EFX_MAX_CHANNELS;
+
efx_reado(efx, &reg, FR_AZ_CS_DEBUG);
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
- efx_mcdi_init(efx);
-
- /* Recover from a failed assertion before probing */
- rc = efx_mcdi_handle_assertion(efx);
+ rc = efx_mcdi_init(efx);
if (rc)
goto fail1;
- /* Let the BMC know that the driver is now in charge of link and
- * filter settings. We must do this before we reset the NIC */
- rc = efx_mcdi_drv_attach(efx, true, &already_attached);
- if (rc) {
- netif_err(efx, probe, efx->net_dev,
- "Unable to register driver with MCPU\n");
- goto fail2;
- }
- if (already_attached)
- /* Not a fatal error */
- netif_err(efx, probe, efx->net_dev,
- "Host already registered with MCPU\n");
-
/* Now we can reset the NIC */
- rc = siena_reset_hw(efx, RESET_TYPE_ALL);
+ rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
goto fail3;
@@ -336,7 +231,8 @@ static int siena_probe_nic(struct efx_nic *efx)
siena_init_wol(efx);
/* Allocate memory for INT_KER */
- rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
+ GFP_KERNEL);
if (rc)
goto fail4;
BUG_ON(efx->irq_status.dma_addr & 0x0f);
@@ -371,8 +267,7 @@ fail5:
efx_nic_free_buffer(efx, &efx->irq_status);
fail4:
fail3:
- efx_mcdi_drv_attach(efx, false, NULL);
-fail2:
+ efx_mcdi_fini(efx);
fail1:
kfree(efx->nic_data);
return rc;
@@ -448,7 +343,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_POPULATE_OWORD_1(temp, FRF_CZ_USREV_DIS, 1);
efx_writeo(efx, &temp, FR_CZ_USR_EV_CFG);
- efx_nic_init_common(efx);
+ efx_farch_init_common(efx);
return 0;
}
@@ -458,144 +353,192 @@ static void siena_remove_nic(struct efx_nic *efx)
efx_nic_free_buffer(efx, &efx->irq_status);
- siena_reset_hw(efx, RESET_TYPE_ALL);
+ efx_mcdi_reset(efx, RESET_TYPE_ALL);
- /* Relinquish the device back to the BMC */
- efx_mcdi_drv_attach(efx, false, NULL);
+ efx_mcdi_fini(efx);
/* Tear down the private nic state */
kfree(efx->nic_data);
efx->nic_data = NULL;
}
-#define STATS_GENERATION_INVALID ((__force __le64)(-1))
+#define SIENA_DMA_STAT(ext_name, mcdi_name) \
+ [SIENA_STAT_ ## ext_name] = \
+ { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
+#define SIENA_OTHER_STAT(ext_name) \
+ [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+
+static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
+ SIENA_DMA_STAT(tx_bytes, TX_BYTES),
+ SIENA_OTHER_STAT(tx_good_bytes),
+ SIENA_DMA_STAT(tx_bad_bytes, TX_BAD_BYTES),
+ SIENA_DMA_STAT(tx_packets, TX_PKTS),
+ SIENA_DMA_STAT(tx_bad, TX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
+ SIENA_DMA_STAT(tx_control, TX_CONTROL_PKTS),
+ SIENA_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
+ SIENA_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(tx_lt64, TX_LT64_PKTS),
+ SIENA_DMA_STAT(tx_64, TX_64_PKTS),
+ SIENA_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS),
+ SIENA_OTHER_STAT(tx_collision),
+ SIENA_DMA_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_deferred, TX_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS),
+ SIENA_DMA_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS),
+ SIENA_DMA_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS),
+ SIENA_DMA_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS),
+ SIENA_DMA_STAT(rx_bytes, RX_BYTES),
+ SIENA_OTHER_STAT(rx_good_bytes),
+ SIENA_DMA_STAT(rx_bad_bytes, RX_BAD_BYTES),
+ SIENA_DMA_STAT(rx_packets, RX_PKTS),
+ SIENA_DMA_STAT(rx_good, RX_GOOD_PKTS),
+ SIENA_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
+ SIENA_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
+ SIENA_DMA_STAT(rx_control, RX_CONTROL_PKTS),
+ SIENA_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
+ SIENA_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
+ SIENA_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
+ SIENA_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
+ SIENA_DMA_STAT(rx_64, RX_64_PKTS),
+ SIENA_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
+ SIENA_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
+ SIENA_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
+ SIENA_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
+ SIENA_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ SIENA_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ SIENA_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
+ SIENA_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
+ SIENA_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
+ SIENA_DMA_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS),
+ SIENA_DMA_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
+ SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+};
+static const unsigned long siena_stat_mask[] = {
+ [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
+};
+
+static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
+{
+ return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
+ siena_stat_mask, names);
+}
static int siena_try_update_nic_stats(struct efx_nic *efx)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
__le64 *dma_stats;
- struct efx_mac_stats *mac_stats;
__le64 generation_start, generation_end;
- mac_stats = &efx->mac_stats;
dma_stats = efx->stats_buffer.addr;
generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
- if (generation_end == STATS_GENERATION_INVALID)
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
return 0;
rmb();
-
-#define MAC_STAT(M, D) \
- mac_stats->M = le64_to_cpu(dma_stats[MC_CMD_MAC_ ## D])
-
- MAC_STAT(tx_bytes, TX_BYTES);
- MAC_STAT(tx_bad_bytes, TX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->tx_good_bytes,
- mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
- MAC_STAT(tx_packets, TX_PKTS);
- MAC_STAT(tx_bad, TX_BAD_FCS_PKTS);
- MAC_STAT(tx_pause, TX_PAUSE_PKTS);
- MAC_STAT(tx_control, TX_CONTROL_PKTS);
- MAC_STAT(tx_unicast, TX_UNICAST_PKTS);
- MAC_STAT(tx_multicast, TX_MULTICAST_PKTS);
- MAC_STAT(tx_broadcast, TX_BROADCAST_PKTS);
- MAC_STAT(tx_lt64, TX_LT64_PKTS);
- MAC_STAT(tx_64, TX_64_PKTS);
- MAC_STAT(tx_65_to_127, TX_65_TO_127_PKTS);
- MAC_STAT(tx_128_to_255, TX_128_TO_255_PKTS);
- MAC_STAT(tx_256_to_511, TX_256_TO_511_PKTS);
- MAC_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS);
- MAC_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS);
- MAC_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(tx_gtjumbo, TX_GTJUMBO_PKTS);
- mac_stats->tx_collision = 0;
- MAC_STAT(tx_single_collision, TX_SINGLE_COLLISION_PKTS);
- MAC_STAT(tx_multiple_collision, TX_MULTIPLE_COLLISION_PKTS);
- MAC_STAT(tx_excessive_collision, TX_EXCESSIVE_COLLISION_PKTS);
- MAC_STAT(tx_deferred, TX_DEFERRED_PKTS);
- MAC_STAT(tx_late_collision, TX_LATE_COLLISION_PKTS);
- mac_stats->tx_collision = (mac_stats->tx_single_collision +
- mac_stats->tx_multiple_collision +
- mac_stats->tx_excessive_collision +
- mac_stats->tx_late_collision);
- MAC_STAT(tx_excessive_deferred, TX_EXCESSIVE_DEFERRED_PKTS);
- MAC_STAT(tx_non_tcpudp, TX_NON_TCPUDP_PKTS);
- MAC_STAT(tx_mac_src_error, TX_MAC_SRC_ERR_PKTS);
- MAC_STAT(tx_ip_src_error, TX_IP_SRC_ERR_PKTS);
- MAC_STAT(rx_bytes, RX_BYTES);
- MAC_STAT(rx_bad_bytes, RX_BAD_BYTES);
- efx_update_diff_stat(&mac_stats->rx_good_bytes,
- mac_stats->rx_bytes - mac_stats->rx_bad_bytes);
- MAC_STAT(rx_packets, RX_PKTS);
- MAC_STAT(rx_good, RX_GOOD_PKTS);
- MAC_STAT(rx_bad, RX_BAD_FCS_PKTS);
- MAC_STAT(rx_pause, RX_PAUSE_PKTS);
- MAC_STAT(rx_control, RX_CONTROL_PKTS);
- MAC_STAT(rx_unicast, RX_UNICAST_PKTS);
- MAC_STAT(rx_multicast, RX_MULTICAST_PKTS);
- MAC_STAT(rx_broadcast, RX_BROADCAST_PKTS);
- MAC_STAT(rx_lt64, RX_UNDERSIZE_PKTS);
- MAC_STAT(rx_64, RX_64_PKTS);
- MAC_STAT(rx_65_to_127, RX_65_TO_127_PKTS);
- MAC_STAT(rx_128_to_255, RX_128_TO_255_PKTS);
- MAC_STAT(rx_256_to_511, RX_256_TO_511_PKTS);
- MAC_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS);
- MAC_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS);
- MAC_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS);
- MAC_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS);
- mac_stats->rx_bad_lt64 = 0;
- mac_stats->rx_bad_64_to_15xx = 0;
- mac_stats->rx_bad_15xx_to_jumbo = 0;
- MAC_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS);
- MAC_STAT(rx_overflow, RX_OVERFLOW_PKTS);
- mac_stats->rx_missed = 0;
- MAC_STAT(rx_false_carrier, RX_FALSE_CARRIER_PKTS);
- MAC_STAT(rx_symbol_error, RX_SYMBOL_ERROR_PKTS);
- MAC_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS);
- MAC_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS);
- MAC_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS);
- mac_stats->rx_good_lt64 = 0;
-
- efx->n_rx_nodesc_drop_cnt =
- le64_to_cpu(dma_stats[MC_CMD_MAC_RX_NODESC_DROPS]);
-
-#undef MAC_STAT
-
+ efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
+ stats, efx->stats_buffer.addr, false);
rmb();
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
if (generation_end != generation_start)
return -EAGAIN;
+ /* Update derived statistics */
+ efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
+ stats[SIENA_STAT_tx_bytes] -
+ stats[SIENA_STAT_tx_bad_bytes]);
+ stats[SIENA_STAT_tx_collision] =
+ stats[SIENA_STAT_tx_single_collision] +
+ stats[SIENA_STAT_tx_multiple_collision] +
+ stats[SIENA_STAT_tx_excessive_collision] +
+ stats[SIENA_STAT_tx_late_collision];
+ efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
+ stats[SIENA_STAT_rx_bytes] -
+ stats[SIENA_STAT_rx_bad_bytes]);
return 0;
}
-static void siena_update_nic_stats(struct efx_nic *efx)
+static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
int retry;
/* If we're unlucky enough to read statistics wduring the DMA, wait
* up to 10ms for it to finish (typically takes <500us) */
for (retry = 0; retry < 100; ++retry) {
if (siena_try_update_nic_stats(efx) == 0)
- return;
+ break;
udelay(100);
}
- /* Use the old values instead */
+ if (full_stats)
+ memcpy(full_stats, stats, sizeof(u64) * SIENA_STAT_COUNT);
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[SIENA_STAT_rx_packets];
+ core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
+ core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
+ core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
+ core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+ core_stats->multicast = stats[SIENA_STAT_rx_multicast];
+ core_stats->collisions = stats[SIENA_STAT_tx_collision];
+ core_stats->rx_length_errors =
+ stats[SIENA_STAT_rx_gtjumbo] +
+ stats[SIENA_STAT_rx_length_error];
+ core_stats->rx_crc_errors = stats[SIENA_STAT_rx_bad];
+ core_stats->rx_frame_errors = stats[SIENA_STAT_rx_align_error];
+ core_stats->rx_fifo_errors = stats[SIENA_STAT_rx_overflow];
+ core_stats->tx_window_errors =
+ stats[SIENA_STAT_tx_late_collision];
+
+ core_stats->rx_errors = (core_stats->rx_length_errors +
+ core_stats->rx_crc_errors +
+ core_stats->rx_frame_errors +
+ stats[SIENA_STAT_rx_symbol_error]);
+ core_stats->tx_errors = (core_stats->tx_window_errors +
+ stats[SIENA_STAT_tx_bad]);
+ }
+
+ return SIENA_STAT_COUNT;
}
-static void siena_start_nic_stats(struct efx_nic *efx)
+static int siena_mac_reconfigure(struct efx_nic *efx)
{
- __le64 *dma_stats = efx->stats_buffer.addr;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SET_MCAST_HASH_IN_LEN);
+ int rc;
- dma_stats[MC_CMD_MAC_GENERATION_END] = STATS_GENERATION_INVALID;
+ BUILD_BUG_ON(MC_CMD_SET_MCAST_HASH_IN_LEN !=
+ MC_CMD_SET_MCAST_HASH_IN_HASH0_OFST +
+ sizeof(efx->multicast_hash));
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr,
- MC_CMD_MAC_NSTATS * sizeof(u64), 1, 0);
-}
+ efx_farch_filter_sync_rx_mode(efx);
-static void siena_stop_nic_stats(struct efx_nic *efx)
-{
- efx_mcdi_mac_stats(efx, efx->stats_buffer.dma_addr, 0, 0, 0);
+ WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+ rc = efx_mcdi_set_mac(efx);
+ if (rc != 0)
+ return rc;
+
+ memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
+ efx->multicast_hash.byte, sizeof(efx->multicast_hash));
+ return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
+ inbuf, sizeof(inbuf), NULL, 0, NULL);
}
/**************************************************************************
@@ -669,6 +612,241 @@ static void siena_init_wol(struct efx_nic *efx)
}
}
+/**************************************************************************
+ *
+ * MCDI
+ *
+ **************************************************************************
+ */
+
+#define MCDI_PDU(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_PDU_OFST : MC_SMEM_P0_PDU_OFST)
+#define MCDI_DOORBELL(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_DOORBELL_OFST : MC_SMEM_P0_DOORBELL_OFST)
+#define MCDI_STATUS(efx) \
+ (efx_port_num(efx) ? MC_SMEM_P1_STATUS_OFST : MC_SMEM_P0_STATUS_OFST)
+
+static void siena_mcdi_request(struct efx_nic *efx,
+ const efx_dword_t *hdr, size_t hdr_len,
+ const efx_dword_t *sdu, size_t sdu_len)
+{
+ unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
+ unsigned int i;
+ unsigned int inlen_dw = DIV_ROUND_UP(sdu_len, 4);
+
+ EFX_BUG_ON_PARANOID(hdr_len != 4);
+
+ efx_writed(efx, hdr, pdu);
+
+ for (i = 0; i < inlen_dw; i++)
+ efx_writed(efx, &sdu[i], pdu + hdr_len + 4 * i);
+
+ /* Ensure the request is written out before the doorbell */
+ wmb();
+
+ /* ring the doorbell with a distinctive value */
+ _efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
+}
+
+static bool siena_mcdi_poll_response(struct efx_nic *efx)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ efx_dword_t hdr;
+
+ efx_readd(efx, &hdr, pdu);
+
+ /* All 1's indicates that shared memory is in reset (and is
+ * not a valid hdr). Wait for it to come out reset before
+ * completing the command
+ */
+ return EFX_DWORD_FIELD(hdr, EFX_DWORD_0) != 0xffffffff &&
+ EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
+}
+
+static void siena_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
+ size_t offset, size_t outlen)
+{
+ unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
+ unsigned int outlen_dw = DIV_ROUND_UP(outlen, 4);
+ int i;
+
+ for (i = 0; i < outlen_dw; i++)
+ efx_readd(efx, &outbuf[i], pdu + offset + 4 * i);
+}
+
+static int siena_mcdi_poll_reboot(struct efx_nic *efx)
+{
+ struct siena_nic_data *nic_data = efx->nic_data;
+ unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_STATUS(efx);
+ efx_dword_t reg;
+ u32 value;
+
+ efx_readd(efx, &reg, addr);
+ value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
+
+ if (value == 0)
+ return 0;
+
+ EFX_ZERO_DWORD(reg);
+ efx_writed(efx, &reg, addr);
+
+ /* MAC statistics have been cleared on the NIC; clear the local
+ * copies that we update with efx_update_diff_stat().
+ */
+ nic_data->stats[SIENA_STAT_tx_good_bytes] = 0;
+ nic_data->stats[SIENA_STAT_rx_good_bytes] = 0;
+
+ if (value == MC_STATUS_DWORD_ASSERT)
+ return -EINTR;
+ else
+ return -EIO;
+}
+
+/**************************************************************************
+ *
+ * MTD
+ *
+ **************************************************************************
+ */
+
+#ifdef CONFIG_SFC_MTD
+
+struct siena_nvram_type_info {
+ int port;
+ const char *name;
+};
+
+static const struct siena_nvram_type_info siena_nvram_types[] = {
+ [MC_CMD_NVRAM_TYPE_DISABLED_CALLISTO] = { 0, "sfc_dummy_phy" },
+ [MC_CMD_NVRAM_TYPE_MC_FW] = { 0, "sfc_mcfw" },
+ [MC_CMD_NVRAM_TYPE_MC_FW_BACKUP] = { 0, "sfc_mcfw_backup" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT0] = { 0, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_STATIC_CFG_PORT1] = { 1, "sfc_static_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT0] = { 0, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_DYNAMIC_CFG_PORT1] = { 1, "sfc_dynamic_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM] = { 0, "sfc_exp_rom" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT0] = { 0, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_EXP_ROM_CFG_PORT1] = { 1, "sfc_exp_rom_cfg" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT0] = { 0, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_PHY_PORT1] = { 1, "sfc_phy_fw" },
+ [MC_CMD_NVRAM_TYPE_FPGA] = { 0, "sfc_fpga" },
+};
+
+static int siena_mtd_probe_partition(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *part,
+ unsigned int type)
+{
+ const struct siena_nvram_type_info *info;
+ size_t size, erase_size;
+ bool protected;
+ int rc;
+
+ if (type >= ARRAY_SIZE(siena_nvram_types) ||
+ siena_nvram_types[type].name == NULL)
+ return -ENODEV;
+
+ info = &siena_nvram_types[type];
+
+ if (info->port != efx_port_num(efx))
+ return -ENODEV;
+
+ rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
+ if (rc)
+ return rc;
+ if (protected)
+ return -ENODEV; /* hide it */
+
+ part->nvram_type = type;
+ part->common.dev_type_name = "Siena NVRAM manager";
+ part->common.type_name = info->name;
+
+ part->common.mtd.type = MTD_NORFLASH;
+ part->common.mtd.flags = MTD_CAP_NORFLASH;
+ part->common.mtd.size = size;
+ part->common.mtd.erasesize = erase_size;
+
+ return 0;
+}
+
+static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
+ struct efx_mcdi_mtd_partition *parts,
+ size_t n_parts)
+{
+ uint16_t fw_subtype_list[
+ MC_CMD_GET_BOARD_CFG_OUT_FW_SUBTYPE_LIST_MAXNUM];
+ size_t i;
+ int rc;
+
+ rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < n_parts; i++)
+ parts[i].fw_subtype = fw_subtype_list[parts[i].nvram_type];
+
+ return 0;
+}
+
+static int siena_mtd_probe(struct efx_nic *efx)
+{
+ struct efx_mcdi_mtd_partition *parts;
+ u32 nvram_types;
+ unsigned int type;
+ size_t n_parts;
+ int rc;
+
+ ASSERT_RTNL();
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ parts = kcalloc(hweight32(nvram_types), sizeof(*parts), GFP_KERNEL);
+ if (!parts)
+ return -ENOMEM;
+
+ type = 0;
+ n_parts = 0;
+
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = siena_mtd_probe_partition(efx, &parts[n_parts],
+ type);
+ if (rc == 0)
+ n_parts++;
+ else if (rc != -ENODEV)
+ goto fail;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ rc = siena_mtd_get_fw_subtypes(efx, parts, n_parts);
+ if (rc)
+ goto fail;
+
+ rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
+fail:
+ if (rc)
+ kfree(parts);
+ return rc;
+}
+
+#endif /* CONFIG_SFC_MTD */
+
+/**************************************************************************
+ *
+ * PTP
+ *
+ **************************************************************************
+ */
+
+static void siena_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
+{
+ _efx_writed(efx, cpu_to_le32(host_time),
+ FR_CZ_MC_TREG_SMEM + MC_SMEM_P0_PTP_TIME_OFST);
+}
/**************************************************************************
*
@@ -678,6 +856,7 @@ static void siena_init_wol(struct efx_nic *efx)
*/
const struct efx_nic_type siena_a0_nic_type = {
+ .mem_map_size = siena_mem_map_size,
.probe = siena_probe_nic,
.remove = siena_remove_nic,
.init = siena_init_nic,
@@ -688,44 +867,94 @@ const struct efx_nic_type siena_a0_nic_type = {
#else
.monitor = NULL,
#endif
- .map_reset_reason = siena_map_reset_reason,
+ .map_reset_reason = efx_mcdi_map_reset_reason,
.map_reset_flags = siena_map_reset_flags,
- .reset = siena_reset_hw,
- .probe_port = siena_probe_port,
- .remove_port = siena_remove_port,
+ .reset = efx_mcdi_reset,
+ .probe_port = efx_mcdi_port_probe,
+ .remove_port = efx_mcdi_port_remove,
+ .fini_dmaq = efx_farch_fini_dmaq,
.prepare_flush = siena_prepare_flush,
.finish_flush = siena_finish_flush,
+ .describe_stats = siena_describe_nic_stats,
.update_stats = siena_update_nic_stats,
- .start_stats = siena_start_nic_stats,
- .stop_stats = siena_stop_nic_stats,
+ .start_stats = efx_mcdi_mac_start_stats,
+ .stop_stats = efx_mcdi_mac_stop_stats,
.set_id_led = efx_mcdi_set_id_led,
.push_irq_moderation = siena_push_irq_moderation,
- .reconfigure_mac = efx_mcdi_mac_reconfigure,
+ .reconfigure_mac = siena_mac_reconfigure,
.check_mac_fault = efx_mcdi_mac_check_fault,
- .reconfigure_port = efx_mcdi_phy_reconfigure,
+ .reconfigure_port = efx_mcdi_port_reconfigure,
.get_wol = siena_get_wol,
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_chip = siena_test_chip,
.test_nvram = efx_mcdi_nvram_test_all,
+ .mcdi_request = siena_mcdi_request,
+ .mcdi_poll_response = siena_mcdi_poll_response,
+ .mcdi_read_response = siena_mcdi_read_response,
+ .mcdi_poll_reboot = siena_mcdi_poll_reboot,
+ .irq_enable_master = efx_farch_irq_enable_master,
+ .irq_test_generate = efx_farch_irq_test_generate,
+ .irq_disable_non_ev = efx_farch_irq_disable_master,
+ .irq_handle_msi = efx_farch_msi_interrupt,
+ .irq_handle_legacy = efx_farch_legacy_interrupt,
+ .tx_probe = efx_farch_tx_probe,
+ .tx_init = efx_farch_tx_init,
+ .tx_remove = efx_farch_tx_remove,
+ .tx_write = efx_farch_tx_write,
+ .rx_push_indir_table = efx_farch_rx_push_indir_table,
+ .rx_probe = efx_farch_rx_probe,
+ .rx_init = efx_farch_rx_init,
+ .rx_remove = efx_farch_rx_remove,
+ .rx_write = efx_farch_rx_write,
+ .rx_defer_refill = efx_farch_rx_defer_refill,
+ .ev_probe = efx_farch_ev_probe,
+ .ev_init = efx_farch_ev_init,
+ .ev_fini = efx_farch_ev_fini,
+ .ev_remove = efx_farch_ev_remove,
+ .ev_process = efx_farch_ev_process,
+ .ev_read_ack = efx_farch_ev_read_ack,
+ .ev_test_generate = efx_farch_ev_test_generate,
+ .filter_table_probe = efx_farch_filter_table_probe,
+ .filter_table_restore = efx_farch_filter_table_restore,
+ .filter_table_remove = efx_farch_filter_table_remove,
+ .filter_update_rx_scatter = efx_farch_filter_update_rx_scatter,
+ .filter_insert = efx_farch_filter_insert,
+ .filter_remove_safe = efx_farch_filter_remove_safe,
+ .filter_get_safe = efx_farch_filter_get_safe,
+ .filter_clear_rx = efx_farch_filter_clear_rx,
+ .filter_count_rx_used = efx_farch_filter_count_rx_used,
+ .filter_get_rx_id_limit = efx_farch_filter_get_rx_id_limit,
+ .filter_get_rx_ids = efx_farch_filter_get_rx_ids,
+#ifdef CONFIG_RFS_ACCEL
+ .filter_rfs_insert = efx_farch_filter_rfs_insert,
+ .filter_rfs_expire_one = efx_farch_filter_rfs_expire_one,
+#endif
+#ifdef CONFIG_SFC_MTD
+ .mtd_probe = siena_mtd_probe,
+ .mtd_rename = efx_mcdi_mtd_rename,
+ .mtd_read = efx_mcdi_mtd_read,
+ .mtd_erase = efx_mcdi_mtd_erase,
+ .mtd_write = efx_mcdi_mtd_write,
+ .mtd_sync = efx_mcdi_mtd_sync,
+#endif
+ .ptp_write_host_time = siena_ptp_write_host_time,
.revision = EFX_REV_SIENA_A0,
- .mem_map_size = (FR_CZ_MC_TREG_SMEM +
- FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
.evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
.evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
- .rx_buffer_hash_size = 0x10,
+ .rx_prefix_size = FS_BZ_RX_PREFIX_SIZE,
+ .rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
- .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
- * interrupt handler only supports 32
- * channels */
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .mcdi_max_ver = 1,
+ .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 90f8d1604f5..0c38f926871 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2010-2011 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,7 +15,7 @@
#include "mcdi.h"
#include "filter.h"
#include "mcdi_pcol.h"
-#include "regs.h"
+#include "farch_regs.h"
#include "vfdi.h"
/* Number of longs required to track all the VIs in a VF */
@@ -197,8 +197,8 @@ static unsigned abs_index(struct efx_vf *vf, unsigned index)
static int efx_sriov_cmd(struct efx_nic *efx, bool enable,
unsigned *vi_scale_out, unsigned *vf_total_out)
{
- u8 inbuf[MC_CMD_SRIOV_IN_LEN];
- u8 outbuf[MC_CMD_SRIOV_OUT_LEN];
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_SRIOV_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_SRIOV_OUT_LEN);
unsigned vi_scale, vf_total;
size_t outlen;
int rc;
@@ -240,64 +240,55 @@ static void efx_sriov_usrev(struct efx_nic *efx, bool enabled)
static int efx_sriov_memcpy(struct efx_nic *efx, struct efx_memcpy_req *req,
unsigned int count)
{
- u8 *inbuf, *record;
- unsigned int used;
- u32 from_rid, from_hi, from_lo;
+ MCDI_DECLARE_BUF(inbuf, MCDI_CTL_SDU_LEN_MAX_V1);
+ MCDI_DECLARE_STRUCT_PTR(record);
+ unsigned int index, used;
+ u64 from_addr;
+ u32 from_rid;
int rc;
mb(); /* Finish writing source/reading dest before DMA starts */
- used = MC_CMD_MEMCPY_IN_LEN(count);
- if (WARN_ON(used > MCDI_CTL_SDU_LEN_MAX))
+ if (WARN_ON(count > MC_CMD_MEMCPY_IN_RECORD_MAXNUM))
return -ENOBUFS;
+ used = MC_CMD_MEMCPY_IN_LEN(count);
- /* Allocate room for the largest request */
- inbuf = kzalloc(MCDI_CTL_SDU_LEN_MAX, GFP_KERNEL);
- if (inbuf == NULL)
- return -ENOMEM;
-
- record = inbuf;
- MCDI_SET_DWORD(record, MEMCPY_IN_RECORD, count);
- while (count-- > 0) {
+ for (index = 0; index < count; index++) {
+ record = MCDI_ARRAY_STRUCT_PTR(inbuf, MEMCPY_IN_RECORD, index);
+ MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_NUM_RECORDS,
+ count);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_RID,
req->to_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_LO,
- (u32)req->to_addr);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR_HI,
- (u32)(req->to_addr >> 32));
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_TO_ADDR,
+ req->to_addr);
if (req->from_buf == NULL) {
from_rid = req->from_rid;
- from_lo = (u32)req->from_addr;
- from_hi = (u32)(req->from_addr >> 32);
+ from_addr = req->from_addr;
} else {
- if (WARN_ON(used + req->length > MCDI_CTL_SDU_LEN_MAX)) {
+ if (WARN_ON(used + req->length >
+ MCDI_CTL_SDU_LEN_MAX_V1)) {
rc = -ENOBUFS;
goto out;
}
from_rid = MC_CMD_MEMCPY_RECORD_TYPEDEF_RID_INLINE;
- from_lo = used;
- from_hi = 0;
- memcpy(inbuf + used, req->from_buf, req->length);
+ from_addr = used;
+ memcpy(_MCDI_PTR(inbuf, used), req->from_buf,
+ req->length);
used += req->length;
}
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_RID, from_rid);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_LO,
- from_lo);
- MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR_HI,
- from_hi);
+ MCDI_SET_QWORD(record, MEMCPY_RECORD_TYPEDEF_FROM_ADDR,
+ from_addr);
MCDI_SET_DWORD(record, MEMCPY_RECORD_TYPEDEF_LENGTH,
req->length);
++req;
- record += MC_CMD_MEMCPY_IN_RECORD_LEN;
}
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
out:
- kfree(inbuf);
-
mb(); /* Don't write source/read dest before DMA is complete */
return rc;
@@ -473,8 +464,9 @@ static void __efx_sriov_push_vf_status(struct efx_vf *vf)
VFDI_EV_SEQ, (vf->msg_seqno & 0xff),
VFDI_EV_TYPE, VFDI_EV_TYPE_STATUS);
++vf->msg_seqno;
- efx_generate_event(efx, EFX_VI_BASE + vf->index * efx_vf_size(efx),
- &event);
+ efx_farch_generate_event(efx,
+ EFX_VI_BASE + vf->index * efx_vf_size(efx),
+ &event);
}
static void efx_sriov_bufs(struct efx_nic *efx, unsigned offset,
@@ -684,16 +676,12 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
unsigned vf_offset = EFX_VI_BASE + vf->index * efx_vf_size(efx);
unsigned timeout = HZ;
unsigned index, rxqs_count;
- __le32 *rxqs;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FLUSH_RX_QUEUES_IN_LENMAX);
int rc;
BUILD_BUG_ON(VF_MAX_RX_QUEUES >
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
- rxqs = kmalloc(count * sizeof(*rxqs), GFP_KERNEL);
- if (rxqs == NULL)
- return VFDI_RC_ENOMEM;
-
rtnl_lock();
siena_prepare_flush(efx);
rtnl_unlock();
@@ -708,14 +696,19 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
vf_offset + index);
efx_writeo(efx, &reg, FR_AZ_TX_FLUSH_DESCQ);
}
- if (test_bit(index, vf->rxq_mask))
- rxqs[rxqs_count++] = cpu_to_le32(vf_offset + index);
+ if (test_bit(index, vf->rxq_mask)) {
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
+ }
}
atomic_set(&vf->rxq_retry_count, 0);
while (timeout && (vf->rxq_count || vf->txq_count)) {
- rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, (u8 *)rxqs,
- rxqs_count * sizeof(*rxqs), NULL, 0, NULL);
+ rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
+ MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
+ NULL, 0, NULL);
WARN_ON(rc < 0);
timeout = wait_event_timeout(vf->flush_waitq,
@@ -725,8 +718,10 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
for (index = 0; index < count; ++index) {
if (test_and_clear_bit(index, vf->rxq_retry_mask)) {
atomic_dec(&vf->rxq_retry_count);
- rxqs[rxqs_count++] =
- cpu_to_le32(vf_offset + index);
+ MCDI_SET_ARRAY_DWORD(
+ inbuf, FLUSH_RX_QUEUES_IN_QID_OFST,
+ rxqs_count, vf_offset + index);
+ rxqs_count++;
}
}
}
@@ -749,7 +744,6 @@ static int efx_vfdi_fini_all_queues(struct efx_vf *vf)
}
efx_sriov_bufs(efx, vf->buftbl_base, NULL,
EFX_VF_BUFTBL_PER_VI * efx_vf_size(efx));
- kfree(rxqs);
efx_vfdi_flush_clear(vf);
vf->evq0_count = 0;
@@ -1004,7 +998,7 @@ static void efx_sriov_reset_vf_work(struct work_struct *work)
struct efx_nic *efx = vf->efx;
struct efx_buffer buf;
- if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE)) {
+ if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
efx_sriov_reset_vf(vf, &buf);
efx_nic_free_buffer(efx, &buf);
}
@@ -1248,7 +1242,8 @@ static int efx_sriov_vfs_init(struct efx_nic *efx)
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn));
- rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE);
+ rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
+ GFP_KERNEL);
if (rc)
goto fail;
@@ -1280,7 +1275,8 @@ int efx_sriov_init(struct efx_nic *efx)
if (rc)
goto fail_cmd;
- rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status));
+ rc = efx_nic_alloc_buffer(efx, &efx->vfdi_status, sizeof(*vfdi_status),
+ GFP_KERNEL);
if (rc)
goto fail_status;
vfdi_status = efx->vfdi_status.addr;
@@ -1535,7 +1531,7 @@ void efx_sriov_reset(struct efx_nic *efx)
efx_sriov_usrev(efx, true);
(void)efx_sriov_cmd(efx, true, NULL, NULL);
- if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE))
+ if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
return;
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
diff --git a/drivers/net/ethernet/sfc/spi.h b/drivers/net/ethernet/sfc/spi.h
deleted file mode 100644
index 5431a1bbff5..00000000000
--- a/drivers/net/ethernet/sfc/spi.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2005 Fen Systems Ltd.
- * Copyright 2006-2010 Solarflare Communications Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation, incorporated herein by reference.
- */
-
-#ifndef EFX_SPI_H
-#define EFX_SPI_H
-
-#include "net_driver.h"
-
-/**************************************************************************
- *
- * Basic SPI command set and bit definitions
- *
- *************************************************************************/
-
-#define SPI_WRSR 0x01 /* Write status register */
-#define SPI_WRITE 0x02 /* Write data to memory array */
-#define SPI_READ 0x03 /* Read data from memory array */
-#define SPI_WRDI 0x04 /* Reset write enable latch */
-#define SPI_RDSR 0x05 /* Read status register */
-#define SPI_WREN 0x06 /* Set write enable latch */
-#define SPI_SST_EWSR 0x50 /* SST: Enable write to status register */
-
-#define SPI_STATUS_WPEN 0x80 /* Write-protect pin enabled */
-#define SPI_STATUS_BP2 0x10 /* Block protection bit 2 */
-#define SPI_STATUS_BP1 0x08 /* Block protection bit 1 */
-#define SPI_STATUS_BP0 0x04 /* Block protection bit 0 */
-#define SPI_STATUS_WEN 0x02 /* State of the write enable latch */
-#define SPI_STATUS_NRDY 0x01 /* Device busy flag */
-
-/**
- * struct efx_spi_device - an Efx SPI (Serial Peripheral Interface) device
- * @device_id: Controller's id for the device
- * @size: Size (in bytes)
- * @addr_len: Number of address bytes in read/write commands
- * @munge_address: Flag whether addresses should be munged.
- * Some devices with 9-bit addresses (e.g. AT25040A EEPROM)
- * use bit 3 of the command byte as address bit A8, rather
- * than having a two-byte address. If this flag is set, then
- * commands should be munged in this way.
- * @erase_command: Erase command (or 0 if sector erase not needed).
- * @erase_size: Erase sector size (in bytes)
- * Erase commands affect sectors with this size and alignment.
- * This must be a power of two.
- * @block_size: Write block size (in bytes).
- * Write commands are limited to blocks with this size and alignment.
- */
-struct efx_spi_device {
- int device_id;
- unsigned int size;
- unsigned int addr_len;
- unsigned int munge_address:1;
- u8 erase_command;
- unsigned int erase_size;
- unsigned int block_size;
-};
-
-static inline bool efx_spi_present(const struct efx_spi_device *spi)
-{
- return spi->size != 0;
-}
-
-int falcon_spi_cmd(struct efx_nic *efx,
- const struct efx_spi_device *spi, unsigned int command,
- int address, const void *in, void *out, size_t len);
-int falcon_spi_wait_write(struct efx_nic *efx,
- const struct efx_spi_device *spi);
-int falcon_spi_read(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, u8 *buffer);
-int falcon_spi_write(struct efx_nic *efx,
- const struct efx_spi_device *spi, loff_t start,
- size_t len, size_t *retlen, const u8 *buffer);
-
-/*
- * SFC4000 flash is partitioned into:
- * 0-0x400 chip and board config (see falcon_hwdefs.h)
- * 0x400-0x8000 unused (or may contain VPD if EEPROM not present)
- * 0x8000-end boot code (mapped to PCI expansion ROM)
- * SFC4000 small EEPROM (size < 0x400) is used for VPD only.
- * SFC4000 large EEPROM (size >= 0x400) is partitioned into:
- * 0-0x400 chip and board config
- * configurable VPD
- * 0x800-0x1800 boot config
- * Aside from the chip and board config, all of these are optional and may
- * be absent or truncated depending on the devices used.
- */
-#define FALCON_NVCONFIG_END 0x400U
-#define FALCON_FLASH_BOOTCODE_START 0x8000U
-#define EFX_EEPROM_BOOTCONFIG_START 0x800U
-#define EFX_EEPROM_BOOTCONFIG_END 0x1800U
-
-#endif /* EFX_SPI_H */
diff --git a/drivers/net/ethernet/sfc/tenxpress.c b/drivers/net/ethernet/sfc/tenxpress.c
index d37cb501712..2c90e6b3157 100644
--- a/drivers/net/ethernet/sfc/tenxpress.c
+++ b/drivers/net/ethernet/sfc/tenxpress.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2007-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 5e090e54298..2ac91c5b5ee 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -1,7 +1,7 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2005-2006 Fen Systems Ltd.
- * Copyright 2005-2010 Solarflare Communications Inc.
+ * Copyright 2005-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -306,7 +306,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
while (read_ptr != stop_index) {
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
- if (unlikely(buffer->len == 0)) {
+
+ if (!(buffer->flags & EFX_TX_BUF_OPTION) &&
+ unlikely(buffer->len == 0)) {
netif_err(efx, tx_err, efx->net_dev,
"TX queue %d spurious TX completion id %x\n",
tx_queue->queue, read_ptr);
@@ -437,6 +439,9 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
+ if (pkts_compl > 1)
+ ++tx_queue->merge_events;
+
/* See if we need to restart the netif queue. This memory
* barrier ensures that we write read_count (inside
* efx_dequeue_buffers()) before reading the queue status.
@@ -543,10 +548,13 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->initialised = true;
}
-void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
+ netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
+ "shutting down TX queue %d\n", tx_queue->queue);
+
if (!tx_queue->buffer)
return;
@@ -561,22 +569,6 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
netdev_tx_reset_queue(tx_queue->core_txq);
}
-void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
-{
- if (!tx_queue->initialised)
- return;
-
- netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
- "shutting down TX queue %d\n", tx_queue->queue);
-
- tx_queue->initialised = false;
-
- /* Flush TX queue, remove descriptor ring */
- efx_nic_fini_tx(tx_queue);
-
- efx_release_tx_buffers(tx_queue);
-}
-
void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
{
int i;
@@ -708,7 +700,8 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
if (unlikely(!page_buf->addr) &&
- efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE))
+ efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
+ GFP_ATOMIC))
return NULL;
result = (u8 *)page_buf->addr + offset;
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 29bb3f9941c..3d5ee325988 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2006-2011 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/vfdi.h b/drivers/net/ethernet/sfc/vfdi.h
index 225557caaf5..ae044f44936 100644
--- a/drivers/net/ethernet/sfc/vfdi.h
+++ b/drivers/net/ethernet/sfc/vfdi.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
+ * Driver for Solarflare network controllers and boards
* Copyright 2010-2012 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/net/ethernet/sfc/workarounds.h b/drivers/net/ethernet/sfc/workarounds.h
index e4dd3a7f304..2310b75d4ec 100644
--- a/drivers/net/ethernet/sfc/workarounds.h
+++ b/drivers/net/ethernet/sfc/workarounds.h
@@ -1,6 +1,6 @@
/****************************************************************************
- * Driver for Solarflare Solarstorm network controllers and boards
- * Copyright 2006-2010 Solarflare Communications Inc.
+ * Driver for Solarflare network controllers and boards
+ * Copyright 2006-2013 Solarflare Communications Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
@@ -15,27 +15,15 @@
* Bug numbers are from Solarflare's Bugzilla.
*/
-#define EFX_WORKAROUND_ALWAYS(efx) 1
#define EFX_WORKAROUND_FALCON_A(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_A1)
#define EFX_WORKAROUND_FALCON_AB(efx) (efx_nic_rev(efx) <= EFX_REV_FALCON_B0)
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
#define EFX_WORKAROUND_10G(efx) 1
-/* XAUI resets if link not detected */
-#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
-/* RX PCIe double split performance issue */
-#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
/* Bit-bashed I2C reads cause performance drop */
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
-/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
- * or a PCIe error (bug 11028) */
-#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
-/* Transmit flow control may get disabled */
-#define EFX_WORKAROUND_11482 EFX_WORKAROUND_FALCON_AB
/* Truncated IPv4 packets can confuse the TX packet parser */
#define EFX_WORKAROUND_15592 EFX_WORKAROUND_FALCON_AB
-/* Legacy ISR read can return zero once */
-#define EFX_WORKAROUND_15783 EFX_WORKAROUND_ALWAYS
/* Legacy interrupt storm when interrupt fifo fills */
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
@@ -56,4 +44,10 @@
/* Leak overlength packets rather than free */
#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+/* Lockup when writing event block registers at gen2/gen3 */
+#define EFX_EF10_WORKAROUND_35388(efx) \
+ (((struct efx_ef10_nic_data *)efx->nic_data)->workaround_35388)
+#define EFX_WORKAROUND_35388(efx) \
+ (efx_nic_rev(efx) == EFX_REV_HUNT_A0 && EFX_EF10_WORKAROUND_35388(efx))
+
#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 9f5f35e041a..770036bc2d8 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -212,9 +212,8 @@ static void meth_check_link(struct net_device *dev)
static int meth_init_tx_ring(struct meth_private *priv)
{
/* Init TX ring */
- priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE,
- &priv->tx_ring_dma,
- GFP_ATOMIC | __GFP_ZERO);
+ priv->tx_ring = dma_zalloc_coherent(NULL, TX_RING_BUFFER_SIZE,
+ &priv->tx_ring_dma, GFP_ATOMIC);
if (!priv->tx_ring)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 02df0894690..ee18e6f7b4f 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1770,9 +1770,6 @@ static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
struct sis190_private *tp = netdev_priv(dev);
unsigned long flags;
- if (regs->len > SIS190_REGS_SIZE)
- regs->len = SIS190_REGS_SIZE;
-
spin_lock_irqsave(&tp->lock, flags);
memcpy_fromio(p, tp->mmio_addr, regs->len);
spin_unlock_irqrestore(&tp->lock, flags);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 6c1e34cd8ae..975dc2d8e54 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1309,23 +1309,9 @@ static void sis900_timer(unsigned long data)
struct sis900_private *sis_priv = netdev_priv(net_dev);
struct mii_phy *mii_phy = sis_priv->mii;
static const int next_tick = 5*HZ;
+ int speed = 0, duplex = 0;
u16 status;
- if (!sis_priv->autong_complete){
- int uninitialized_var(speed), duplex = 0;
-
- sis900_read_mode(net_dev, &speed, &duplex);
- if (duplex){
- sis900_set_mode(sis_priv, speed, duplex);
- sis630_set_eq(net_dev, sis_priv->chipset_rev);
- netif_start_queue(net_dev);
- }
-
- sis_priv->timer.expires = jiffies + HZ;
- add_timer(&sis_priv->timer);
- return;
- }
-
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
status = mdio_read(net_dev, sis_priv->cur_phy, MII_STATUS);
@@ -1336,9 +1322,15 @@ static void sis900_timer(unsigned long data)
status = sis900_default_phy(net_dev);
mii_phy = sis_priv->mii;
- if (status & MII_STAT_LINK){
- sis900_check_mode(net_dev, mii_phy);
- netif_carrier_on(net_dev);
+ if (status & MII_STAT_LINK) {
+ WARN_ON(!(status & MII_STAT_AUTO_DONE));
+
+ sis900_read_mode(net_dev, &speed, &duplex);
+ if (duplex) {
+ sis900_set_mode(sis_priv, speed, duplex);
+ sis630_set_eq(net_dev, sis_priv->chipset_rev);
+ netif_carrier_on(net_dev);
+ }
}
} else {
/* Link ON -> OFF */
@@ -1612,12 +1604,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
unsigned int index_cur_tx, index_dirty_tx;
unsigned int count_dirty_tx;
- /* Don't transmit data before the complete of auto-negotiation */
- if(!sis_priv->autong_complete){
- netif_stop_queue(net_dev);
- return NETDEV_TX_BUSY;
- }
-
spin_lock_irqsave(&sis_priv->lock, flags);
/* Calculate the next Tx descriptor entry. */
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 345558fe736..afe01c4088a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2067,7 +2067,7 @@ static int smc911x_drv_probe(struct platform_device *pdev)
lp->netdev = ndev;
#ifdef SMC_DYNAMIC_BUS_CONFIG
{
- struct smc911x_platdata *pd = pdev->dev.platform_data;
+ struct smc911x_platdata *pd = dev_get_platdata(&pdev->dev);
if (!pd) {
ret = -EINVAL;
goto release_both;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index cde13be7c7d..73be7f3982e 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2202,7 +2202,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
*/
static int smc_drv_probe(struct platform_device *pdev)
{
- struct smc91x_platdata *pd = pdev->dev.platform_data;
+ struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
struct smc_local *lp;
struct net_device *ndev;
struct resource *res, *ires;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a1419211585..5fdbc2686eb 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2374,7 +2374,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct net_device *dev;
struct smsc911x_data *pdata;
- struct smsc911x_platform_config *config = pdev->dev.platform_data;
+ struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res, *irq_res;
unsigned int intcfg = 0;
int res_size, irq_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index c9d942a5c33..1ef9d8a555a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
struct stmmac_priv *priv = (struct stmmac_priv *)p;
unsigned int txsize = priv->dma_tx_size;
unsigned int entry = priv->cur_tx % txsize;
- struct dma_desc *desc = priv->dma_tx + entry;
+ struct dma_desc *desc;
unsigned int nopaged_len = skb_headlen(skb);
unsigned int bmax, len;
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
+
if (priv->plat->enh_desc)
bmax = BUF_SIZE_8KiB;
else
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
STMMAC_RING_MODE);
wmb();
entry = (++priv->cur_tx) % txsize;
- desc = priv->dma_tx + entry;
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(priv->dma_etx + entry);
+ else
+ desc = priv->dma_tx + entry;
desc->des2 = dma_map_single(priv->device, skb->data + bmax,
len, DMA_TO_DEVICE);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index c922fde929a..f16a9bdf45b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -70,7 +70,6 @@ struct stmmac_priv {
struct net_device *dev;
struct device *device;
struct mac_device_info *hw;
- int no_csum_insertion;
spinlock_t lock;
struct phy_device *phydev ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
index def7e75e1d5..76ad214b403 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -45,8 +45,8 @@ static void stmmac_config_sub_second_increment(void __iomem *ioaddr)
data = (1000000000ULL / 50000000);
/* 0.465ns accuracy */
- if (value & PTP_TCR_TSCTRLSSR)
- data = (data * 100) / 465;
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
writel(data, ioaddr + PTP_SSIR);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index f2ccb36e868..8d4ccd35a01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
GFP_KERNEL);
- if (unlikely(skb == NULL)) {
+ if (!skb) {
pr_err("%s: Rx init fails; skb is NULL\n", __func__);
- return 1;
+ return -ENOMEM;
}
skb_reserve(skb, NET_IP_ALIGN);
priv->rx_skbuff[i] = skb;
priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
priv->dma_buf_sz,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
+ pr_err("%s: DMA mapping error\n", __func__);
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
p->des2 = priv->rx_skbuff_dma[i];
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return 0;
}
+static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
+{
+ if (priv->rx_skbuff[i]) {
+ dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ }
+ priv->rx_skbuff[i] = NULL;
+}
+
/**
* init_dma_desc_rings - init the RX/TX descriptor rings
* @dev: net device structure
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
* and allocates the socket buffers. It suppors the chained and ring
* modes.
*/
-static void init_dma_desc_rings(struct net_device *dev)
+static int init_dma_desc_rings(struct net_device *dev)
{
int i;
struct stmmac_priv *priv = netdev_priv(dev);
unsigned int txsize = priv->dma_tx_size;
unsigned int rxsize = priv->dma_rx_size;
unsigned int bfsize = 0;
+ int ret = -ENOMEM;
/* Set the max buffer size according to the DESC mode
* and the MTU. Note that RING mode allows 16KiB bsize.
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev)
dma_extended_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_erx)
+ goto err_dma;
+
priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct
dma_extended_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_erx) || (!priv->dma_etx))
- return;
+ if (!priv->dma_etx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ goto err_dma;
+ }
} else {
priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma;
+
priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
- if ((!priv->dma_rx) || (!priv->dma_tx))
- return;
+ if (!priv->dma_tx) {
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ goto err_dma;
+ }
}
priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->rx_skbuff_dma)
+ goto err_rx_skbuff_dma;
+
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ goto err_rx_skbuff;
+
priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
GFP_KERNEL);
+ if (!priv->tx_skbuff_dma)
+ goto err_tx_skbuff_dma;
+
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skbuff;
+
if (netif_msg_probe(priv)) {
pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
(u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev)
else
p = priv->dma_rx + i;
- if (stmmac_init_rx_buffers(priv, p, i))
- break;
+ ret = stmmac_init_rx_buffers(priv, p, i);
+ if (ret)
+ goto err_init_rx_buffers;
if (netif_msg_probe(priv))
pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev)
if (netif_msg_hw(priv))
stmmac_display_rings(priv);
+
+ return 0;
+err_init_rx_buffers:
+ while (--i >= 0)
+ stmmac_free_rx_buffers(priv, i);
+ kfree(priv->tx_skbuff);
+err_tx_skbuff:
+ kfree(priv->tx_skbuff_dma);
+err_tx_skbuff_dma:
+ kfree(priv->rx_skbuff);
+err_rx_skbuff:
+ kfree(priv->rx_skbuff_dma);
+err_rx_skbuff_dma:
+ if (priv->extend_desc) {
+ dma_free_coherent(priv->device, priv->dma_tx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_etx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device, priv->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ priv->dma_erx, priv->dma_rx_phy);
+ } else {
+ dma_free_coherent(priv->device,
+ priv->dma_tx_size * sizeof(struct dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ dma_free_coherent(priv->device,
+ priv->dma_rx_size * sizeof(struct dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ }
+err_dma:
+ return ret;
}
static void dma_free_rx_skbufs(struct stmmac_priv *priv)
{
int i;
- for (i = 0; i < priv->dma_rx_size; i++) {
- if (priv->rx_skbuff[i]) {
- dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
- priv->dma_buf_sz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(priv->rx_skbuff[i]);
- }
- priv->rx_skbuff[i] = NULL;
- }
+ for (i = 0; i < priv->dma_rx_size; i++)
+ stmmac_free_rx_buffers(priv, i);
}
static void dma_free_tx_skbufs(struct stmmac_priv *priv)
@@ -1157,8 +1224,9 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
*/
static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
- if (likely(priv->plat->force_sf_dma_mode ||
- ((priv->plat->tx_coe) && (!priv->no_csum_insertion)))) {
+ if (priv->plat->force_thresh_dma_mode)
+ priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
+ else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
/*
* In case of GMAC, SF mode can be enabled
* to perform the TX COE in HW. This depends on:
@@ -1560,12 +1628,17 @@ static int stmmac_open(struct net_device *dev)
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- init_dma_desc_rings(dev);
+
+ ret = init_dma_desc_rings(dev);
+ if (ret < 0) {
+ pr_err("%s: DMA descriptors initialization failed\n", __func__);
+ goto dma_desc_error;
+ }
/* DMA initialization and SW reset */
ret = stmmac_init_dma_engine(priv);
if (ret < 0) {
- pr_err("%s: DMA initialization failed\n", __func__);
+ pr_err("%s: DMA engine initialization failed\n", __func__);
goto init_error;
}
@@ -1672,6 +1745,7 @@ wolirq_error:
init_error:
free_dma_desc_resources(priv);
+dma_desc_error:
if (priv->phydev)
phy_disconnect(priv->phydev);
phy_error:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 03de76c7a17..51c9069ef40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,14 +71,23 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
plat->force_sf_dma_mode = 1;
}
- dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg), GFP_KERNEL);
- if (!dma_cfg)
- return -ENOMEM;
-
- plat->dma_cfg = dma_cfg;
- of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
- dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
- dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+ if (of_find_property(np, "snps,pbl", NULL)) {
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ GFP_KERNEL);
+ if (!dma_cfg)
+ return -ENOMEM;
+ plat->dma_cfg = dma_cfg;
+ of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+ dma_cfg->fixed_burst =
+ of_property_read_bool(np, "snps,fixed-burst");
+ dma_cfg->mixed_burst =
+ of_property_read_bool(np, "snps,mixed-burst");
+ }
+ plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+ if (plat->force_thresh_dma_mode) {
+ plat->force_sf_dma_mode = 0;
+ pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
+ }
return 0;
}
@@ -109,14 +118,11 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
const char *mac = NULL;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr))
return PTR_ERR(addr);
- plat_dat = pdev->dev.platform_data;
+ plat_dat = dev_get_platdata(&pdev->dev);
if (pdev->dev.of_node) {
if (!plat_dat)
plat_dat = devm_kzalloc(&pdev->dev,
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index fa322409bff..f28460ce24a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9360,7 +9360,7 @@ static ssize_t show_port_phy(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
u32 port_phy = p->port_phy;
char *orig_buf = buf;
int i;
@@ -9390,7 +9390,7 @@ static ssize_t show_plat_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
const char *type_str;
switch (p->plat_type) {
@@ -9419,7 +9419,7 @@ static ssize_t __show_chan_per_port(struct device *dev,
int rx)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
char *orig_buf = buf;
u8 *arr;
int i;
@@ -9452,7 +9452,7 @@ static ssize_t show_num_ports(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct platform_device *plat_dev = to_platform_device(dev);
- struct niu_parent *p = plat_dev->dev.platform_data;
+ struct niu_parent *p = dev_get_platdata(&plat_dev->dev);
return sprintf(buf, "%d\n", p->num_ports);
}
@@ -9478,7 +9478,7 @@ static struct niu_parent *niu_new_parent(struct niu *np,
if (IS_ERR(plat_dev))
return NULL;
- for (i = 0; attr_name(niu_parent_attributes[i]); i++) {
+ for (i = 0; niu_parent_attributes[i].attr.name; i++) {
int err = device_create_file(&plat_dev->dev,
&niu_parent_attributes[i]);
if (err)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d43fa9ff98..7217ee5d627 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1239,7 +1239,7 @@ static int bigmac_sbus_probe(struct platform_device *op)
static int bigmac_sbus_remove(struct platform_device *op)
{
- struct bigmac *bp = dev_get_drvdata(&op->dev);
+ struct bigmac *bp = platform_get_drvdata(op);
struct device *parent = op->dev.parent;
struct net_device *net_dev = bp->dev;
struct platform_device *qec_op;
@@ -1259,8 +1259,6 @@ static int bigmac_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 171f5b0809c..e37b587b386 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2798,7 +2798,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
goto err_out_free_coherent;
}
- dev_set_drvdata(&op->dev, hp);
+ platform_set_drvdata(op, hp);
if (qfe_slot != -1)
printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
@@ -3111,7 +3111,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
goto err_out_iounmap;
}
- dev_set_drvdata(&pdev->dev, hp);
+ pci_set_drvdata(pdev, hp);
if (!qfe_slot) {
struct pci_dev *qpdev = qp->quattro_dev;
@@ -3159,7 +3159,7 @@ err_out:
static void happy_meal_pci_remove(struct pci_dev *pdev)
{
- struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
+ struct happy_meal *hp = pci_get_drvdata(pdev);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3171,7 +3171,7 @@ static void happy_meal_pci_remove(struct pci_dev *pdev)
free_netdev(net_dev);
- dev_set_drvdata(&pdev->dev, NULL);
+ pci_set_drvdata(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
@@ -3231,7 +3231,7 @@ static int hme_sbus_probe(struct platform_device *op)
static int hme_sbus_remove(struct platform_device *op)
{
- struct happy_meal *hp = dev_get_drvdata(&op->dev);
+ struct happy_meal *hp = platform_get_drvdata(op);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
@@ -3250,8 +3250,6 @@ static int hme_sbus_remove(struct platform_device *op)
free_netdev(net_dev);
- dev_set_drvdata(&op->dev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 31bbbca341a..2dc16b6efaf 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -636,7 +636,7 @@ static void cpmac_hw_stop(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
cpmac_write(priv->regs, CPMAC_RX_CONTROL,
@@ -659,7 +659,7 @@ static void cpmac_hw_start(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
- struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data;
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
for (i = 0; i < 8; i++) {
@@ -1118,7 +1118,7 @@ static int cpmac_probe(struct platform_device *pdev)
struct net_device *dev;
struct plat_cpmac_data *pdata;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (external_switch || dumb_switch) {
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 05a1674e204..79974e31187 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -34,9 +34,9 @@
#include <linux/of_device.h>
#include <linux/if_vlan.h>
-#include <linux/platform_data/cpsw.h>
#include <linux/pinctrl/consumer.h>
+#include "cpsw.h"
#include "cpsw_ale.h"
#include "cpts.h"
#include "davinci_cpdma.h"
@@ -82,6 +82,8 @@ do { \
#define CPSW_VERSION_1 0x19010a
#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
#define HOST_PORT_NUM 0
#define SLIVER_SIZE 0x40
@@ -91,6 +93,7 @@ do { \
#define CPSW1_SLAVE_SIZE 0x040
#define CPSW1_CPDMA_OFFSET 0x100
#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
#define CPSW1_CPTS_OFFSET 0x500
#define CPSW1_ALE_OFFSET 0x600
#define CPSW1_SLIVER_OFFSET 0x700
@@ -99,6 +102,7 @@ do { \
#define CPSW2_SLAVE_OFFSET 0x200
#define CPSW2_SLAVE_SIZE 0x100
#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
#define CPSW2_STATERAM_OFFSET 0xa00
#define CPSW2_CPTS_OFFSET 0xc00
#define CPSW2_ALE_OFFSET 0xd00
@@ -299,6 +303,44 @@ struct cpsw_sliver_regs {
u32 rx_pri_map;
};
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
struct cpsw_slave {
void __iomem *regs;
struct cpsw_sliver_regs __iomem *sliver;
@@ -332,6 +374,7 @@ struct cpsw_priv {
struct cpsw_platform_data data;
struct cpsw_ss_regs __iomem *regs;
struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
struct cpsw_host_regs __iomem *host_port_regs;
u32 msg_enable;
u32 version;
@@ -354,6 +397,94 @@ struct cpsw_priv {
u32 emac_port;
};
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ sizeof(((struct cpsw_hw_stats *)0)->m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ sizeof(((struct cpdma_chan_stats *)0)->m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+ { "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
+ { "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
+ { "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+ { "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
+ { "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
+ { "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
+ { "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
+ { "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
+ { "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
+ { "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
+ { "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
+ { "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
+ { "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
+ { "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
+ { "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
+ { "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+
#define napi_to_priv(napi) container_of(napi, struct cpsw_priv, napi)
#define for_each_slave(priv, func, arg...) \
do { \
@@ -723,6 +854,69 @@ static int cpsw_set_coalesce(struct net_device *ndev,
return 0;
}
+static int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return CPSW_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpdma_chan_stats rx_stats;
+ struct cpdma_chan_stats tx_stats;
+ u32 val;
+ u8 *p;
+ int i;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ cpdma_chan_get_stats(priv->rxch, &rx_stats);
+ cpdma_chan_get_stats(priv->txch, &tx_stats);
+
+ for (i = 0; i < CPSW_STATS_LEN; i++) {
+ switch (cpsw_gstrings_stats[i].type) {
+ case CPSW_STATS:
+ val = readl(priv->hw_stats +
+ cpsw_gstrings_stats[i].stat_offset);
+ data[i] = val;
+ break;
+
+ case CPDMA_RX_STATS:
+ p = (u8 *)&rx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+
+ case CPDMA_TX_STATS:
+ p = (u8 *)&tx_stats +
+ cpsw_gstrings_stats[i].stat_offset;
+ data[i] = *(u32 *)p;
+ break;
+ }
+ }
+}
+
static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
{
static char *leader = "........................................";
@@ -799,6 +993,8 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
break;
}
@@ -1232,6 +1428,33 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
}
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ int flags = 0;
+ u16 vid = 0;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (priv->data.dual_emac) {
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ flags = ALE_VLAN;
+ }
+
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ flags, vid);
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ flags, vid);
+
+ memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+ memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+ return 0;
+}
+
static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
@@ -1326,6 +1549,7 @@ static const struct net_device_ops cpsw_netdev_ops = {
.ndo_stop = cpsw_ndo_stop,
.ndo_start_xmit = cpsw_ndo_start_xmit,
.ndo_change_rx_flags = cpsw_ndo_change_rx_flags,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
.ndo_do_ioctl = cpsw_ndo_ioctl,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = eth_change_mtu,
@@ -1416,6 +1640,29 @@ static int cpsw_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
return -EOPNOTSUPP;
}
+static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (priv->slaves[slave_no].phy)
+ phy_ethtool_get_wol(priv->slaves[slave_no].phy, wol);
+}
+
+static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no = cpsw_slave_index(priv);
+
+ if (priv->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(priv->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1426,6 +1673,11 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_settings = cpsw_set_settings,
.get_coalesce = cpsw_get_coalesce,
.set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -1623,6 +1875,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
+ priv_sl2->hw_stats = priv->hw_stats;
priv_sl2->dma = priv->dma;
priv_sl2->txch = priv->txch;
priv_sl2->rxch = priv->rxch;
@@ -1780,7 +2033,8 @@ static int cpsw_probe(struct platform_device *pdev)
switch (priv->version) {
case CPSW_VERSION_1:
priv->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW1_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW1_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
@@ -1790,8 +2044,11 @@ static int cpsw_probe(struct platform_device *pdev)
dma_params.desc_mem_phys = 0;
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
priv->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
- priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->cpts->reg = ss_regs + CPSW2_CPTS_OFFSET;
+ priv->hw_stats = ss_regs + CPSW2_HW_STATS;
dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
@@ -1867,7 +2124,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
- if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
+ if (request_irq(i, cpsw_interrupt, 0,
dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
diff --git a/include/linux/platform_data/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index bb3cd58d71e..eb3e101ec04 100644
--- a/include/linux/platform_data/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -1,11 +1,10 @@
-/*
- * Texas Instruments Ethernet Switch Driver
+/* Texas Instruments Ethernet Switch Driver
*
- * Copyright (C) 2012 Texas Instruments
+ * Copyright (C) 2013 Texas Instruments
*
* This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
@@ -22,14 +21,13 @@ struct cpsw_slave_data {
int phy_if;
u8 mac_addr[ETH_ALEN];
u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
-
};
struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
u32 ss_reg_ofs; /* Subsystem control register offset */
u32 channels; /* number of cpdma channels (symmetric) */
u32 slaves; /* number of slave cpgmac ports */
- struct cpsw_slave_data *slave_data;
u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
u32 cpts_clock_mult; /* convert input clock ticks to nanoseconds */
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 031ebc81b50..90a79462c86 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -591,6 +591,7 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
spin_unlock_irqrestore(&chan->lock, flags);
return 0;
}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
int cpdma_chan_dump(struct cpdma_chan *chan)
{
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 07b176bcf92..67df09ea9d0 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1568,8 +1568,7 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- IRQF_DISABLED,
- ndev->name, ndev))
+ 0, ndev->name, ndev))
goto rollback;
}
k++;
@@ -1762,7 +1761,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
const u8 *mac_addr;
if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
- return pdev->dev.platform_data;
+ return dev_get_platdata(&pdev->dev);
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 16ddfc34806..4ec92659a10 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -314,7 +314,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
static int davinci_mdio_probe(struct platform_device *pdev)
{
- struct mdio_platform_data *pdata = pdev->dev.platform_data;
+ struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct davinci_mdio_data *data;
struct resource *res;
@@ -421,8 +421,7 @@ bail_out:
static int davinci_mdio_remove(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ struct davinci_mdio_data *data = platform_get_drvdata(pdev);
if (data->bus) {
mdiobus_unregister(data->bus);
@@ -434,8 +433,6 @@ static int davinci_mdio_remove(struct platform_device *pdev)
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- dev_set_drvdata(dev, NULL);
-
kfree(data);
return 0;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 098b1c42b39..4083ba8839e 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -15,3 +15,14 @@ config TILE_NET
To compile this driver as a module, choose M here: the module
will be called tile_net.
+
+config PTP_1588_CLOCK_TILEGX
+ tristate "Tilera TILE-Gx mPIPE as PTP clock"
+ select PTP_1588_CLOCK
+ depends on TILE_NET
+ depends on TILEGX
+ ---help---
+ This driver adds support for using the mPIPE as a PTP
+ clock. This clock is only useful if your PTP programs are
+ getting hardware time stamps on the PTP Ethernet packets
+ using the SO_TIMESTAMPING API.
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index f3c2d034b32..949076f4e6a 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -36,7 +36,10 @@
#include <linux/io.h>
#include <linux/ctype.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -76,6 +79,9 @@
#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
+/* The "kinds" of buffer stacks (small/large/jumbo). */
+#define MAX_KINDS 3
+
/* Size of completions data to allocate.
* ISSUE: Probably more than needed since we don't use all the channels.
*/
@@ -130,29 +136,31 @@ struct tile_net_tx_wake {
/* Info for a specific cpu. */
struct tile_net_info {
- /* The NAPI struct. */
- struct napi_struct napi;
- /* Packet queue. */
- gxio_mpipe_iqueue_t iqueue;
/* Our cpu. */
int my_cpu;
- /* True if iqueue is valid. */
- bool has_iqueue;
- /* NAPI flags. */
- bool napi_added;
- bool napi_enabled;
- /* Number of small sk_buffs which must still be provided. */
- unsigned int num_needed_small_buffers;
- /* Number of large sk_buffs which must still be provided. */
- unsigned int num_needed_large_buffers;
/* A timer for handling egress completions. */
struct hrtimer egress_timer;
/* True if "egress_timer" is scheduled. */
bool egress_timer_scheduled;
- /* Comps for each egress channel. */
- struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
- /* Transmit wake timer for each egress channel. */
- struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ struct info_mpipe {
+ /* Packet queue. */
+ gxio_mpipe_iqueue_t iqueue;
+ /* The NAPI struct. */
+ struct napi_struct napi;
+ /* Number of buffers (by kind) which must still be provided. */
+ unsigned int num_needed_buffers[MAX_KINDS];
+ /* instance id. */
+ int instance;
+ /* True if iqueue is valid. */
+ bool has_iqueue;
+ /* NAPI flags. */
+ bool napi_added;
+ bool napi_enabled;
+ /* Comps for each egress channel. */
+ struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
+ /* Transmit wake timer for each egress channel. */
+ struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
+ } mpipe[NR_MPIPE_MAX];
};
/* Info for egress on a particular egress channel. */
@@ -177,19 +185,67 @@ struct tile_net_priv {
int loopify_channel;
/* The egress channel (channel or loopify_channel). */
int echannel;
- /* Total stats. */
- struct net_device_stats stats;
+ /* mPIPE instance, 0 or 1. */
+ int instance;
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* The timestamp config. */
+ struct hwtstamp_config stamp_cfg;
+#endif
};
-/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
-static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
+static struct mpipe_data {
+ /* The ingress irq. */
+ int ingress_irq;
-/* Devices currently associated with each channel.
- * NOTE: The array entry can become NULL after ifconfig down, but
- * we do not free the underlying net_device structures, so it is
- * safe to use a pointer after reading it from this array.
- */
-static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+ /* The "context" for all devices. */
+ gxio_mpipe_context_t context;
+
+ /* Egress info, indexed by "priv->echannel"
+ * (lazily created as needed).
+ */
+ struct tile_net_egress
+ egress_for_echannel[TILE_NET_CHANNELS];
+
+ /* Devices currently associated with each channel.
+ * NOTE: The array entry can become NULL after ifconfig down, but
+ * we do not free the underlying net_device structures, so it is
+ * safe to use a pointer after reading it from this array.
+ */
+ struct net_device
+ *tile_net_devs_for_channel[TILE_NET_CHANNELS];
+
+ /* The actual memory allocated for the buffer stacks. */
+ void *buffer_stack_vas[MAX_KINDS];
+
+ /* The amount of memory allocated for each buffer stack. */
+ size_t buffer_stack_bytes[MAX_KINDS];
+
+ /* The first buffer stack index
+ * (small = +0, large = +1, jumbo = +2).
+ */
+ int first_buffer_stack;
+
+ /* The buckets. */
+ int first_bucket;
+ int num_buckets;
+
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ /* PTP-specific data. */
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info caps;
+
+ /* Lock for ptp accessors. */
+ struct mutex ptp_lock;
+#endif
+
+} mpipe_data[NR_MPIPE_MAX] = {
+ [0 ... (NR_MPIPE_MAX - 1)] {
+ .ingress_irq = -1,
+ .first_buffer_stack = -1,
+ .first_bucket = -1,
+ .num_buckets = 1
+ }
+};
/* A mutex for "tile_net_devs_for_channel". */
static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
@@ -197,34 +253,17 @@ static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
/* The per-cpu info. */
static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
-/* The "context" for all devices. */
-static gxio_mpipe_context_t context;
-/* Buffer sizes and mpipe enum codes for buffer stacks.
+/* The buffer size enums for each buffer stack.
* See arch/tile/include/gxio/mpipe.h for the set of possible values.
+ * We avoid the "10384" size because it can induce "false chaining"
+ * on "cut-through" jumbo packets.
*/
-#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
-#define BUFFER_SIZE_SMALL 128
-#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
-#define BUFFER_SIZE_LARGE 1664
-
-/* The small/large "buffer stacks". */
-static int small_buffer_stack = -1;
-static int large_buffer_stack = -1;
-
-/* Amount of memory allocated for each buffer stack. */
-static size_t buffer_stack_size;
-
-/* The actual memory allocated for the buffer stacks. */
-static void *small_buffer_stack_va;
-static void *large_buffer_stack_va;
-
-/* The buckets. */
-static int first_bucket = -1;
-static int num_buckets = 1;
-
-/* The ingress irq. */
-static int ingress_irq = -1;
+static gxio_mpipe_buffer_size_enum_t buffer_size_enums[MAX_KINDS] = {
+ GXIO_MPIPE_BUFFER_SIZE_128,
+ GXIO_MPIPE_BUFFER_SIZE_1664,
+ GXIO_MPIPE_BUFFER_SIZE_16384
+};
/* Text value of tile_net.cpus if passed as a module parameter. */
static char *network_cpus_string;
@@ -232,11 +271,21 @@ static char *network_cpus_string;
/* The actual cpus in "network_cpus". */
static struct cpumask network_cpus_map;
-/* If "loopify=LINK" was specified, this is "LINK". */
+/* If "tile_net.loopify=LINK" was specified, this is "LINK". */
static char *loopify_link_name;
-/* If "tile_net.custom" was specified, this is non-NULL. */
-static char *custom_str;
+/* If "tile_net.custom" was specified, this is true. */
+static bool custom_flag;
+
+/* If "tile_net.jumbo=NUM" was specified, this is "NUM". */
+static uint jumbo_num;
+
+/* Obtain mpipe instance from struct tile_net_priv given struct net_device. */
+static inline int mpipe_instance(struct net_device *dev)
+{
+ struct tile_net_priv *priv = netdev_priv(dev);
+ return priv->instance;
+}
/* The "tile_net.cpus" argument specifies the cpus that are dedicated
* to handle ingress packets.
@@ -289,9 +338,15 @@ MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
/* The "tile_net.custom" argument causes us to ignore the "conventional"
* classifier metadata, in particular, the "l2_offset".
*/
-module_param_named(custom, custom_str, charp, 0444);
+module_param_named(custom, custom_flag, bool, 0444);
MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
+/* The "tile_net.jumbo" argument causes us to support "jumbo" packets,
+ * and to allocate the given number of "jumbo" buffers.
+ */
+module_param_named(jumbo, jumbo_num, uint, 0444);
+MODULE_PARM_DESC(jumbo, "the number of buffers to support jumbo packets");
+
/* Atomically update a statistics field.
* Note that on TILE-Gx, this operation is fire-and-forget on the
* issuing core (single-cycle dispatch) and takes only a few cycles
@@ -305,15 +360,16 @@ static void tile_net_stats_add(unsigned long value, unsigned long *field)
}
/* Allocate and push a buffer. */
-static bool tile_net_provide_buffer(bool small)
+static bool tile_net_provide_buffer(int instance, int kind)
{
- int stack = small ? small_buffer_stack : large_buffer_stack;
+ struct mpipe_data *md = &mpipe_data[instance];
+ gxio_mpipe_buffer_size_enum_t bse = buffer_size_enums[kind];
+ size_t bs = gxio_mpipe_buffer_size_enum_to_buffer_size(bse);
const unsigned long buffer_alignment = 128;
struct sk_buff *skb;
int len;
- len = sizeof(struct sk_buff **) + buffer_alignment;
- len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
+ len = sizeof(struct sk_buff **) + buffer_alignment + bs;
skb = dev_alloc_skb(len);
if (skb == NULL)
return false;
@@ -328,7 +384,7 @@ static bool tile_net_provide_buffer(bool small)
/* Make sure "skb" and the back-pointer have been flushed. */
wmb();
- gxio_mpipe_push_buffer(&context, stack,
+ gxio_mpipe_push_buffer(&md->context, md->first_buffer_stack + kind,
(void *)va_to_tile_io_addr(skb->data));
return true;
@@ -354,11 +410,14 @@ static struct sk_buff *mpipe_buf_to_skb(void *va)
return skb;
}
-static void tile_net_pop_all_buffers(int stack)
+static void tile_net_pop_all_buffers(int instance, int stack)
{
+ struct mpipe_data *md = &mpipe_data[instance];
+
for (;;) {
tile_io_addr_t addr =
- (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
+ (tile_io_addr_t)gxio_mpipe_pop_buffer(&md->context,
+ stack);
if (addr == 0)
break;
dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
@@ -369,24 +428,111 @@ static void tile_net_pop_all_buffers(int stack)
static void tile_net_provide_needed_buffers(void)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ int instance, kind;
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ while (info->mpipe[instance].num_needed_buffers[kind]
+ != 0) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ pr_notice("Tile %d still needs"
+ " some buffers\n",
+ info->my_cpu);
+ return;
+ }
+ info->mpipe[instance].
+ num_needed_buffers[kind]--;
+ }
+ }
+ }
+}
- while (info->num_needed_small_buffers != 0) {
- if (!tile_net_provide_buffer(true))
- goto oops;
- info->num_needed_small_buffers--;
+/* Get RX timestamp, and store it in the skb. */
+static void tile_rx_timestamp(struct tile_net_priv *priv, struct sk_buff *skb,
+ gxio_mpipe_idesc_t *idesc)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ if (unlikely(priv->stamp_cfg.rx_filter != HWTSTAMP_FILTER_NONE)) {
+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
+ memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+ shhwtstamps->hwtstamp = ktime_set(idesc->time_stamp_sec,
+ idesc->time_stamp_ns);
}
+#endif
+}
- while (info->num_needed_large_buffers != 0) {
- if (!tile_net_provide_buffer(false))
- goto oops;
- info->num_needed_large_buffers--;
+/* Get TX timestamp, and store it in the skb. */
+static void tile_tx_timestamp(struct sk_buff *skb, int instance)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct skb_shared_info *shtx = skb_shinfo(skb);
+ if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct skb_shared_hwtstamps shhwtstamps;
+ struct timespec ts;
+
+ shtx->tx_flags |= SKBTX_IN_PROGRESS;
+ gxio_mpipe_get_timestamp(&md->context, &ts);
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ shhwtstamps.hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec);
+ skb_tstamp_tx(skb, &shhwtstamps);
}
+#endif
+}
- return;
+/* Use ioctl() to enable or disable TX or RX timestamping. */
+static int tile_hwtstamp_ioctl(struct net_device *dev, struct ifreq *rq,
+ int cmd)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct hwtstamp_config config;
+ struct tile_net_priv *priv = netdev_priv(dev);
-oops:
- /* Add a description to the page allocation failure dump. */
- pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
+ if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ if (config.flags) /* reserved for future extensions */
+ return -EINVAL;
+
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
+ return -EFAULT;
+
+ priv->stamp_cfg = config;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
}
static inline bool filter_packet(struct net_device *dev, void *buf)
@@ -398,7 +544,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
/* Filter out packets that aren't for us. */
if (!(dev->flags & IFF_PROMISC) &&
!is_multicast_ether_addr(buf) &&
- compare_ether_addr(dev->dev_addr, buf) != 0)
+ !ether_addr_equal(dev->dev_addr, buf))
return true;
return false;
@@ -409,6 +555,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
+ int instance = priv->instance;
/* Encode the actual packet length. */
skb_put(skb, len);
@@ -419,47 +566,52 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_receive_skb(skb);
+ /* Get RX timestamp from idesc. */
+ tile_rx_timestamp(priv, skb, idesc);
+
+ napi_gro_receive(&info->mpipe[instance].napi, skb);
/* Update stats. */
- tile_net_stats_add(1, &priv->stats.rx_packets);
- tile_net_stats_add(len, &priv->stats.rx_bytes);
+ tile_net_stats_add(1, &dev->stats.rx_packets);
+ tile_net_stats_add(len, &dev->stats.rx_bytes);
/* Need a new buffer. */
- if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
- info->num_needed_small_buffers++;
+ if (idesc->size == buffer_size_enums[0])
+ info->mpipe[instance].num_needed_buffers[0]++;
+ else if (idesc->size == buffer_size_enums[1])
+ info->mpipe[instance].num_needed_buffers[1]++;
else
- info->num_needed_large_buffers++;
+ info->mpipe[instance].num_needed_buffers[2]++;
}
/* Handle a packet. Return true if "processed", false if "filtered". */
-static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
+static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
uint8_t l2_offset;
void *va;
void *buf;
unsigned long len;
bool filter;
- /* Drop packets for which no buffer was available.
- * NOTE: This happens under heavy load.
+ /* Drop packets for which no buffer was available (which can
+ * happen under heavy load), or for which the me/tr/ce flags
+ * are set (which can happen for jumbo cut-through packets,
+ * or with a customized classifier).
*/
- if (idesc->be) {
- struct tile_net_priv *priv = netdev_priv(dev);
- tile_net_stats_add(1, &priv->stats.rx_dropped);
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
- if (net_ratelimit())
- pr_info("Dropping packet (insufficient buffers).\n");
- return false;
+ if (idesc->be || idesc->me || idesc->tr || idesc->ce) {
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_errors);
+ goto drop;
}
/* Get the "l2_offset", if allowed. */
- l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
+ l2_offset = custom_flag ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
- /* Get the raw buffer VA (includes "headroom"). */
- va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
+ /* Get the VA (including NET_IP_ALIGN bytes of "headroom"). */
+ va = tile_io_addr_to_va((unsigned long)idesc->va);
/* Get the actual packet start/length. */
buf = va + l2_offset;
@@ -470,7 +622,10 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
filter = filter_packet(dev, buf);
if (filter) {
- gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
+ if (dev)
+ tile_net_stats_add(1, &dev->stats.rx_dropped);
+drop:
+ gxio_mpipe_iqueue_drop(&info->mpipe[instance].iqueue, idesc);
} else {
struct sk_buff *skb = mpipe_buf_to_skb(va);
@@ -480,7 +635,7 @@ static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
tile_net_receive_skb(dev, skb, idesc, len);
}
- gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
+ gxio_mpipe_iqueue_consume(&info->mpipe[instance].iqueue, idesc);
return !filter;
}
@@ -501,14 +656,20 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned int work = 0;
gxio_mpipe_idesc_t *idesc;
- int i, n;
-
- /* Process packets. */
- while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
+ int instance, i, n;
+ struct mpipe_data *md;
+ struct info_mpipe *info_mpipe =
+ container_of(napi, struct info_mpipe, napi);
+
+ instance = info_mpipe->instance;
+ while ((n = gxio_mpipe_iqueue_try_peek(
+ &info_mpipe->iqueue,
+ &idesc)) > 0) {
for (i = 0; i < n; i++) {
if (i == TILE_NET_BATCH)
goto done;
- if (tile_net_handle_packet(idesc + i)) {
+ if (tile_net_handle_packet(instance,
+ idesc + i)) {
if (++work >= budget)
goto done;
}
@@ -516,14 +677,16 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
/* There are no packets left. */
- napi_complete(&info->napi);
+ napi_complete(&info_mpipe->napi);
+ md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */
- gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
+ gxio_mpipe_enable_notif_ring_interrupt(
+ &md->context, info->mpipe[instance].iqueue.ring);
/* HACK: Avoid the "rotting packet" problem. */
- if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
- napi_schedule(&info->napi);
+ if (gxio_mpipe_iqueue_try_peek(&info_mpipe->iqueue, &idesc) > 0)
+ napi_schedule(&info_mpipe->napi);
/* ISSUE: Handle completions? */
@@ -533,11 +696,11 @@ done:
return work;
}
-/* Handle an ingress interrupt on the current cpu. */
-static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
+/* Handle an ingress interrupt from an instance on the current cpu. */
+static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- napi_schedule(&info->napi);
+ napi_schedule(&info->mpipe[(uint64_t)id].napi);
return IRQ_HANDLED;
}
@@ -579,7 +742,9 @@ static void tile_net_schedule_tx_wake_timer(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, tx_queue_idx);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_tx_wake *tx_wake = &info->tx_wake[priv->echannel];
+ int instance = priv->instance;
+ struct tile_net_tx_wake *tx_wake =
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_start(&tx_wake->timer,
ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
@@ -617,7 +782,7 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
unsigned long irqflags;
bool pending = false;
- int i;
+ int i, instance;
local_irq_save(irqflags);
@@ -625,13 +790,19 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
info->egress_timer_scheduled = false;
/* Free all possible comps for this tile. */
- for (i = 0; i < TILE_NET_CHANNELS; i++) {
- struct tile_net_egress *egress = &egress_for_echannel[i];
- struct tile_net_comps *comps = info->comps_for_echannel[i];
- if (comps->comp_last >= comps->comp_next)
- continue;
- tile_net_free_comps(egress->equeue, comps, -1, true);
- pending = pending || (comps->comp_last < comps->comp_next);
+ for (instance = 0; instance < NR_MPIPE_MAX &&
+ info->mpipe[instance].has_iqueue; instance++) {
+ for (i = 0; i < TILE_NET_CHANNELS; i++) {
+ struct tile_net_egress *egress =
+ &mpipe_data[instance].egress_for_echannel[i];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[i];
+ if (!egress || comps->comp_last >= comps->comp_next)
+ continue;
+ tile_net_free_comps(egress->equeue, comps, -1, true);
+ pending = pending ||
+ (comps->comp_last < comps->comp_next);
+ }
}
/* Reschedule timer if needed. */
@@ -643,37 +814,112 @@ static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
return HRTIMER_NORESTART;
}
-/* Helper function for "tile_net_update()".
- * "dev" (i.e. arg) is the device being brought up or down,
- * or NULL if all devices are now down.
- */
-static void tile_net_update_cpu(void *arg)
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+
+/* PTP clock operations. */
+
+static int ptp_mpipe_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
- struct net_device *dev = arg;
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp_freq(&md->context, ppb))
+ ret = -EINVAL;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (!info->has_iqueue)
- return;
+static int ptp_mpipe_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_adjust_timestamp(&md->context, delta))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
- if (dev != NULL) {
- if (!info->napi_added) {
- netif_napi_add(dev, &info->napi,
- tile_net_poll, TILE_NET_WEIGHT);
- info->napi_added = true;
- }
- if (!info->napi_enabled) {
- napi_enable(&info->napi);
- info->napi_enabled = true;
- }
- enable_percpu_irq(ingress_irq, 0);
- } else {
- disable_percpu_irq(ingress_irq);
- if (info->napi_enabled) {
- napi_disable(&info->napi);
- info->napi_enabled = false;
- }
- /* FIXME: Drain the iqueue. */
- }
+static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_get_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
+ const struct timespec *ts)
+{
+ int ret = 0;
+ struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
+ mutex_lock(&md->ptp_lock);
+ if (gxio_mpipe_set_timestamp(&md->context, ts))
+ ret = -EBUSY;
+ mutex_unlock(&md->ptp_lock);
+ return ret;
+}
+
+static int ptp_mpipe_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *request, int on)
+{
+ return -EOPNOTSUPP;
+}
+
+static struct ptp_clock_info ptp_mpipe_caps = {
+ .owner = THIS_MODULE,
+ .name = "mPIPE clock",
+ .max_adj = 999999999,
+ .n_ext_ts = 0,
+ .pps = 0,
+ .adjfreq = ptp_mpipe_adjfreq,
+ .adjtime = ptp_mpipe_adjtime,
+ .gettime = ptp_mpipe_gettime,
+ .settime = ptp_mpipe_settime,
+ .enable = ptp_mpipe_enable,
+};
+
+#endif /* CONFIG_PTP_1588_CLOCK_TILEGX */
+
+/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
+static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ struct timespec ts;
+
+ getnstimeofday(&ts);
+ gxio_mpipe_set_timestamp(&md->context, &ts);
+
+ mutex_init(&md->ptp_lock);
+ md->caps = ptp_mpipe_caps;
+ md->ptp_clock = ptp_clock_register(&md->caps, NULL);
+ if (IS_ERR(md->ptp_clock))
+ netdev_err(dev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(md->ptp_clock));
+#endif
+}
+
+/* Initialize PTP fields in a new device. */
+static void init_ptp_dev(struct tile_net_priv *priv)
+{
+#ifdef CONFIG_PTP_1588_CLOCK_TILEGX
+ priv->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
+ priv->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
+#endif
+}
+
+/* Helper functions for "tile_net_update()". */
+static void enable_ingress_irq(void *irq)
+{
+ enable_percpu_irq((long)irq, 0);
+}
+
+static void disable_ingress_irq(void *irq)
+{
+ disable_percpu_irq((long)irq);
}
/* Helper function for tile_net_open() and tile_net_stop().
@@ -683,19 +929,22 @@ static int tile_net_update(struct net_device *dev)
{
static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
bool saw_channel = false;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int channel;
int rc;
int cpu;
- gxio_mpipe_rules_init(&rules, &context);
+ saw_channel = false;
+ gxio_mpipe_rules_init(&rules, &md->context);
for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
- if (tile_net_devs_for_channel[channel] == NULL)
+ if (md->tile_net_devs_for_channel[channel] == NULL)
continue;
if (!saw_channel) {
saw_channel = true;
- gxio_mpipe_rules_begin(&rules, first_bucket,
- num_buckets, NULL);
+ gxio_mpipe_rules_begin(&rules, md->first_bucket,
+ md->num_buckets, NULL);
gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
}
gxio_mpipe_rules_add_channel(&rules, channel);
@@ -706,102 +955,150 @@ static int tile_net_update(struct net_device *dev)
*/
rc = gxio_mpipe_rules_commit(&rules);
if (rc != 0) {
- netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
+ netdev_warn(dev, "gxio_mpipe_rules_commit: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
- /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, tile_net_update_cpu,
- (saw_channel ? dev : NULL), 1);
+ /* Update all cpus, sequentially (to protect "netif_napi_add()").
+ * We use on_each_cpu to handle the IPI mask or unmask.
+ */
+ if (!saw_channel)
+ on_each_cpu(disable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
+ for_each_online_cpu(cpu) {
+ struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
+
+ if (!info->mpipe[instance].has_iqueue)
+ continue;
+ if (saw_channel) {
+ if (!info->mpipe[instance].napi_added) {
+ netif_napi_add(dev, &info->mpipe[instance].napi,
+ tile_net_poll, TILE_NET_WEIGHT);
+ info->mpipe[instance].napi_added = true;
+ }
+ if (!info->mpipe[instance].napi_enabled) {
+ napi_enable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = true;
+ }
+ } else {
+ if (info->mpipe[instance].napi_enabled) {
+ napi_disable(&info->mpipe[instance].napi);
+ info->mpipe[instance].napi_enabled = false;
+ }
+ /* FIXME: Drain the iqueue. */
+ }
+ }
+ if (saw_channel)
+ on_each_cpu(enable_ingress_irq,
+ (void *)(long)(md->ingress_irq), 1);
/* HACK: Allow packets to flow in the simulator. */
if (saw_channel)
- sim_enable_mpipe_links(0, -1);
+ sim_enable_mpipe_links(instance, -1);
return 0;
}
-/* Allocate and initialize mpipe buffer stacks, and register them in
- * the mPIPE TLBs, for both small and large packet sizes.
- * This routine supports tile_net_init_mpipe(), below.
- */
-static int init_buffer_stacks(struct net_device *dev, int num_buffers)
+/* Initialize a buffer stack. */
+static int create_buffer_stack(struct net_device *dev,
+ int kind, size_t num_buffers)
{
pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
- int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ size_t needed = gxio_mpipe_calc_buffer_stack_bytes(num_buffers);
+ int stack_idx = md->first_buffer_stack + kind;
+ void *va;
+ int i, rc;
- /* Compute stack bytes; we round up to 64KB and then use
- * alloc_pages() so we get the required 64KB alignment as well.
+ /* Round up to 64KB and then use alloc_pages() so we get the
+ * required 64KB alignment.
*/
- buffer_stack_size =
- ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
- 64 * 1024);
+ md->buffer_stack_bytes[kind] =
+ ALIGN(needed, 64 * 1024);
- /* Allocate two buffer stack indices. */
- rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
- if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
- rc);
- return rc;
- }
- small_buffer_stack = rc;
- large_buffer_stack = rc + 1;
-
- /* Allocate the small memory stack. */
- small_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (small_buffer_stack_va == NULL) {
+ va = alloc_pages_exact(md->buffer_stack_bytes[kind], GFP_KERNEL);
+ if (va == NULL) {
netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
+ "Could not alloc %zd bytes for buffer stack %d\n",
+ md->buffer_stack_bytes[kind], kind);
return -ENOMEM;
}
- rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
- BUFFER_SIZE_SMALL_ENUM,
- small_buffer_stack_va,
- buffer_stack_size, 0);
+
+ /* Initialize the buffer stack. */
+ rc = gxio_mpipe_init_buffer_stack(&md->context, stack_idx,
+ buffer_size_enums[kind], va,
+ md->buffer_stack_bytes[kind], 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init_buffer_stack: mpipe[%d] %d\n",
+ instance, rc);
+ free_pages_exact(va, md->buffer_stack_bytes[kind]);
return rc;
}
- rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
+
+ md->buffer_stack_vas[kind] = va;
+
+ rc = gxio_mpipe_register_client_memory(&md->context, stack_idx,
hash_pte, 0);
if (rc != 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_register_client_memory: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- /* Allocate the large buffer stack. */
- large_buffer_stack_va =
- alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
- if (large_buffer_stack_va == NULL) {
- netdev_err(dev,
- "Could not alloc %zd bytes for buffer stacks\n",
- buffer_stack_size);
- return -ENOMEM;
- }
- rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
- BUFFER_SIZE_LARGE_ENUM,
- large_buffer_stack_va,
- buffer_stack_size, 0);
- if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
- rc);
- return rc;
+ /* Provide initial buffers. */
+ for (i = 0; i < num_buffers; i++) {
+ if (!tile_net_provide_buffer(instance, kind)) {
+ netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
+ return -ENOMEM;
+ }
}
- rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
- hash_pte, 0);
- if (rc != 0) {
+
+ return 0;
+}
+
+/* Allocate and initialize mpipe buffer stacks, and register them in
+ * the mPIPE TLBs, for small, large, and (possibly) jumbo packet sizes.
+ * This routine supports tile_net_init_mpipe(), below.
+ */
+static int init_buffer_stacks(struct net_device *dev,
+ int network_cpus_count)
+{
+ int num_kinds = MAX_KINDS - (jumbo_num == 0);
+ size_t num_buffers;
+ int rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ /* Allocate the buffer stacks. */
+ rc = gxio_mpipe_alloc_buffer_stacks(&md->context, num_kinds, 0, 0);
+ if (rc < 0) {
netdev_err(dev,
- "gxio_mpipe_register_buffer_memory failed: %d\n",
- rc);
+ "gxio_mpipe_alloc_buffer_stacks: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
+ md->first_buffer_stack = rc;
- return 0;
+ /* Enough small/large buffers to (normally) avoid buffer errors. */
+ num_buffers =
+ network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
+
+ /* Allocate the small memory stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 0, num_buffers);
+
+ /* Allocate the large buffer stack. */
+ if (rc >= 0)
+ rc = create_buffer_stack(dev, 1, num_buffers);
+
+ /* Allocate the jumbo buffer stack if needed. */
+ if (rc >= 0 && jumbo_num != 0)
+ rc = create_buffer_stack(dev, 2, jumbo_num);
+
+ return rc;
}
/* Allocate per-cpu resources (memory for completions and idescs).
@@ -812,6 +1109,8 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
{
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
int order, i, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
struct page *page;
void *addr;
@@ -826,7 +1125,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
addr = pfn_to_kaddr(page_to_pfn(page));
memset(addr, 0, COMPS_SIZE);
for (i = 0; i < TILE_NET_CHANNELS; i++)
- info->comps_for_echannel[i] =
+ info->mpipe[instance].comps_for_echannel[i] =
addr + i * sizeof(struct tile_net_comps);
/* If this is a network cpu, create an iqueue. */
@@ -840,14 +1139,15 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
return -ENOMEM;
}
addr = pfn_to_kaddr(page_to_pfn(page));
- rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
- addr, NOTIF_RING_SIZE, 0);
+ rc = gxio_mpipe_iqueue_init(&info->mpipe[instance].iqueue,
+ &md->context, ring++, addr,
+ NOTIF_RING_SIZE, 0);
if (rc < 0) {
netdev_err(dev,
"gxio_mpipe_iqueue_init failed: %d\n", rc);
return rc;
}
- info->has_iqueue = true;
+ info->mpipe[instance].has_iqueue = true;
}
return ring;
@@ -860,40 +1160,41 @@ static int init_notif_group_and_buckets(struct net_device *dev,
int ring, int network_cpus_count)
{
int group, rc;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Allocate one NotifGroup. */
- rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
+ rc = gxio_mpipe_alloc_notif_groups(&md->context, 1, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_alloc_notif_groups: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
group = rc;
/* Initialize global num_buckets value. */
if (network_cpus_count > 4)
- num_buckets = 256;
+ md->num_buckets = 256;
else if (network_cpus_count > 1)
- num_buckets = 16;
+ md->num_buckets = 16;
/* Allocate some buckets, and set global first_bucket value. */
- rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
+ rc = gxio_mpipe_alloc_buckets(&md->context, md->num_buckets, 0, 0);
if (rc < 0) {
- netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_alloc_buckets: mpipe[%d] %d\n",
+ instance, rc);
return rc;
}
- first_bucket = rc;
+ md->first_bucket = rc;
/* Init group and buckets. */
rc = gxio_mpipe_init_notif_group_and_buckets(
- &context, group, ring, network_cpus_count,
- first_bucket, num_buckets,
+ &md->context, group, ring, network_cpus_count,
+ md->first_bucket, md->num_buckets,
GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
if (rc != 0) {
- netdev_err(
- dev,
- "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
- rc);
+ netdev_err(dev, "gxio_mpipe_init_notif_group_and_buckets: "
+ "mpipe[%d] %d\n", instance, rc);
return rc;
}
@@ -907,30 +1208,39 @@ static int init_notif_group_and_buckets(struct net_device *dev,
*/
static int tile_net_setup_interrupts(struct net_device *dev)
{
- int cpu, rc;
+ int cpu, rc, irq;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ irq = md->ingress_irq;
+ if (irq < 0) {
+ irq = create_irq();
+ if (irq < 0) {
+ netdev_err(dev,
+ "create_irq failed: mpipe[%d] %d\n",
+ instance, irq);
+ return irq;
+ }
+ tile_irq_activate(irq, TILE_IRQ_PERCPU);
- rc = create_irq();
- if (rc < 0) {
- netdev_err(dev, "create_irq failed: %d\n", rc);
- return rc;
- }
- ingress_irq = rc;
- tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
- rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
- 0, "tile_net", NULL);
- if (rc != 0) {
- netdev_err(dev, "request_irq failed: %d\n", rc);
- destroy_irq(ingress_irq);
- ingress_irq = -1;
- return rc;
+ rc = request_irq(irq, tile_net_handle_ingress_irq,
+ 0, "tile_net", (void *)((uint64_t)instance));
+
+ if (rc != 0) {
+ netdev_err(dev, "request_irq failed: mpipe[%d] %d\n",
+ instance, rc);
+ destroy_irq(irq);
+ return rc;
+ }
+ md->ingress_irq = irq;
}
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- if (info->has_iqueue) {
- gxio_mpipe_request_notif_ring_interrupt(
- &context, cpu_x(cpu), cpu_y(cpu),
- KERNEL_PL, ingress_irq, info->iqueue.ring);
+ if (info->mpipe[instance].has_iqueue) {
+ gxio_mpipe_request_notif_ring_interrupt(&md->context,
+ cpu_x(cpu), cpu_y(cpu), KERNEL_PL, irq,
+ info->mpipe[instance].iqueue.ring);
}
}
@@ -938,39 +1248,45 @@ static int tile_net_setup_interrupts(struct net_device *dev)
}
/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
-static void tile_net_init_mpipe_fail(void)
+static void tile_net_init_mpipe_fail(int instance)
{
- int cpu;
+ int kind, cpu;
+ struct mpipe_data *md = &mpipe_data[instance];
/* Do cleanups that require the mpipe context first. */
- if (small_buffer_stack >= 0)
- tile_net_pop_all_buffers(small_buffer_stack);
- if (large_buffer_stack >= 0)
- tile_net_pop_all_buffers(large_buffer_stack);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ tile_net_pop_all_buffers(instance,
+ md->first_buffer_stack +
+ kind);
+ }
+ }
/* Destroy mpipe context so the hardware no longer owns any memory. */
- gxio_mpipe_destroy(&context);
+ gxio_mpipe_destroy(&md->context);
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
- free_pages((unsigned long)(info->comps_for_echannel[0]),
- get_order(COMPS_SIZE));
- info->comps_for_echannel[0] = NULL;
- free_pages((unsigned long)(info->iqueue.idescs),
+ free_pages(
+ (unsigned long)(
+ info->mpipe[instance].comps_for_echannel[0]),
+ get_order(COMPS_SIZE));
+ info->mpipe[instance].comps_for_echannel[0] = NULL;
+ free_pages((unsigned long)(info->mpipe[instance].iqueue.idescs),
get_order(NOTIF_RING_SIZE));
- info->iqueue.idescs = NULL;
+ info->mpipe[instance].iqueue.idescs = NULL;
}
- if (small_buffer_stack_va)
- free_pages_exact(small_buffer_stack_va, buffer_stack_size);
- if (large_buffer_stack_va)
- free_pages_exact(large_buffer_stack_va, buffer_stack_size);
+ for (kind = 0; kind < MAX_KINDS; kind++) {
+ if (md->buffer_stack_vas[kind] != NULL) {
+ free_pages_exact(md->buffer_stack_vas[kind],
+ md->buffer_stack_bytes[kind]);
+ md->buffer_stack_vas[kind] = NULL;
+ }
+ }
- small_buffer_stack_va = NULL;
- large_buffer_stack_va = NULL;
- large_buffer_stack = -1;
- small_buffer_stack = -1;
- first_bucket = -1;
+ md->first_buffer_stack = -1;
+ md->first_bucket = -1;
}
/* The first time any tilegx network device is opened, we initialize
@@ -984,9 +1300,11 @@ static void tile_net_init_mpipe_fail(void)
*/
static int tile_net_init_mpipe(struct net_device *dev)
{
- int i, num_buffers, rc;
+ int rc;
int cpu;
int first_ring, ring;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
int network_cpus_count = cpus_weight(network_cpus_map);
if (!hash_default) {
@@ -994,36 +1312,21 @@ static int tile_net_init_mpipe(struct net_device *dev)
return -EIO;
}
- rc = gxio_mpipe_init(&context, 0);
+ rc = gxio_mpipe_init(&md->context, instance);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_init: mpipe[%d] %d\n",
+ instance, rc);
return -EIO;
}
/* Set up the buffer stacks. */
- num_buffers =
- network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
- rc = init_buffer_stacks(dev, num_buffers);
+ rc = init_buffer_stacks(dev, network_cpus_count);
if (rc != 0)
goto fail;
- /* Provide initial buffers. */
- rc = -ENOMEM;
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(true)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
- for (i = 0; i < num_buffers; i++) {
- if (!tile_net_provide_buffer(false)) {
- netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
- goto fail;
- }
- }
-
/* Allocate one NotifRing for each network cpu. */
- rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
+ rc = gxio_mpipe_alloc_notif_rings(&md->context,
+ network_cpus_count, 0, 0);
if (rc < 0) {
netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
rc);
@@ -1050,10 +1353,13 @@ static int tile_net_init_mpipe(struct net_device *dev)
if (rc != 0)
goto fail;
+ /* Register PTP clock and set mPIPE timestamp, if configured. */
+ register_ptp_clock(dev, md);
+
return 0;
fail:
- tile_net_init_mpipe_fail();
+ tile_net_init_mpipe_fail(instance);
return rc;
}
@@ -1063,17 +1369,19 @@ fail:
*/
static int tile_net_init_egress(struct net_device *dev, int echannel)
{
+ static int ering = -1;
struct page *headers_page, *edescs_page, *equeue_page;
gxio_mpipe_edesc_t *edescs;
gxio_mpipe_equeue_t *equeue;
unsigned char *headers;
int headers_order, edescs_order, equeue_order;
size_t edescs_size;
- int edma;
int rc = -ENOMEM;
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
/* Only initialize once. */
- if (egress_for_echannel[echannel].equeue != NULL)
+ if (md->egress_for_echannel[echannel].equeue != NULL)
return 0;
/* Allocate memory for the "headers". */
@@ -1110,28 +1418,41 @@ static int tile_net_init_egress(struct net_device *dev, int echannel)
}
equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
- /* Allocate an edma ring. Note that in practice this can't
- * fail, which is good, because we will leak an edma ring if so.
- */
- rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
- if (rc < 0) {
- netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
- rc);
- goto fail_equeue;
+ /* Allocate an edma ring (using a one entry "free list"). */
+ if (ering < 0) {
+ rc = gxio_mpipe_alloc_edma_rings(&md->context, 1, 0, 0);
+ if (rc < 0) {
+ netdev_warn(dev, "gxio_mpipe_alloc_edma_rings: "
+ "mpipe[%d] %d\n", instance, rc);
+ goto fail_equeue;
+ }
+ ering = rc;
}
- edma = rc;
/* Initialize the equeue. */
- rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
+ rc = gxio_mpipe_equeue_init(equeue, &md->context, ering, echannel,
edescs, edescs_size, 0);
if (rc != 0) {
- netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
+ netdev_err(dev, "gxio_mpipe_equeue_init: mpipe[%d] %d\n",
+ instance, rc);
goto fail_equeue;
}
+ /* Don't reuse the ering later. */
+ ering = -1;
+
+ if (jumbo_num != 0) {
+ /* Make sure "jumbo" packets can be egressed safely. */
+ if (gxio_mpipe_equeue_set_snf_size(equeue, 10368) < 0) {
+ /* ISSUE: There is no "gxio_mpipe_equeue_destroy()". */
+ netdev_warn(dev, "Jumbo packets may not be egressed"
+ " properly on channel %d\n", echannel);
+ }
+ }
+
/* Done. */
- egress_for_echannel[echannel].equeue = equeue;
- egress_for_echannel[echannel].headers = headers;
+ md->egress_for_echannel[echannel].equeue = equeue;
+ md->egress_for_echannel[echannel].headers = headers;
return 0;
fail_equeue:
@@ -1151,11 +1472,25 @@ fail:
static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
const char *link_name)
{
- int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
+ int rc = gxio_mpipe_link_open(link, &md->context, link_name, 0);
if (rc < 0) {
- netdev_err(dev, "Failed to open '%s'\n", link_name);
+ netdev_err(dev, "Failed to open '%s', mpipe[%d], %d\n",
+ link_name, instance, rc);
return rc;
}
+ if (jumbo_num != 0) {
+ u32 attr = GXIO_MPIPE_LINK_RECEIVE_JUMBO;
+ rc = gxio_mpipe_link_set_attr(link, attr, 1);
+ if (rc != 0) {
+ netdev_err(dev,
+ "Cannot receive jumbo packets on '%s'\n",
+ link_name);
+ gxio_mpipe_link_close(link);
+ return rc;
+ }
+ }
rc = gxio_mpipe_link_channel(link);
if (rc < 0 || rc >= TILE_NET_CHANNELS) {
netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
@@ -1169,12 +1504,23 @@ static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
static int tile_net_open(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
- int cpu, rc;
+ int cpu, rc, instance;
mutex_lock(&tile_net_devs_for_channel_mutex);
- /* Do one-time initialization the first time any device is opened. */
- if (ingress_irq < 0) {
+ /* Get the instance info. */
+ rc = gxio_mpipe_link_instance(dev->name);
+ if (rc < 0 || rc >= NR_MPIPE_MAX) {
+ mutex_unlock(&tile_net_devs_for_channel_mutex);
+ return -EIO;
+ }
+
+ priv->instance = rc;
+ instance = rc;
+ if (!mpipe_data[rc].context.mmio_fast_base) {
+ /* Do one-time initialization per instance the first time
+ * any device is opened.
+ */
rc = tile_net_init_mpipe(dev);
if (rc != 0)
goto fail;
@@ -1205,7 +1551,7 @@ static int tile_net_open(struct net_device *dev)
if (rc != 0)
goto fail;
- tile_net_devs_for_channel[priv->channel] = dev;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = dev;
rc = tile_net_update(dev);
if (rc != 0)
@@ -1217,7 +1563,7 @@ static int tile_net_open(struct net_device *dev)
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL);
@@ -1243,7 +1589,7 @@ fail:
priv->channel = -1;
}
priv->echannel = -1;
- tile_net_devs_for_channel[priv->channel] = NULL;
+ mpipe_data[instance].tile_net_devs_for_channel[priv->channel] = NULL;
mutex_unlock(&tile_net_devs_for_channel_mutex);
/* Don't return raw gxio error codes to generic Linux. */
@@ -1255,18 +1601,20 @@ static int tile_net_stop(struct net_device *dev)
{
struct tile_net_priv *priv = netdev_priv(dev);
int cpu;
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
for_each_online_cpu(cpu) {
struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
struct tile_net_tx_wake *tx_wake =
- &info->tx_wake[priv->echannel];
+ &info->mpipe[instance].tx_wake[priv->echannel];
hrtimer_cancel(&tx_wake->timer);
netif_stop_subqueue(dev, cpu);
}
mutex_lock(&tile_net_devs_for_channel_mutex);
- tile_net_devs_for_channel[priv->channel] = NULL;
+ md->tile_net_devs_for_channel[priv->channel] = NULL;
(void)tile_net_update(dev);
if (priv->loopify_channel >= 0) {
if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
@@ -1374,20 +1722,20 @@ static int tso_count_edescs(struct sk_buff *skb)
return num_edescs;
}
-/* Prepare modified copies of the skbuff headers.
- * FIXME: add support for IPv6.
- */
+/* Prepare modified copies of the skbuff headers. */
static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
s64 slot)
{
struct skb_shared_info *sh = skb_shinfo(skb);
struct iphdr *ih;
+ struct ipv6hdr *ih6;
struct tcphdr *th;
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned char *data = skb->data;
unsigned int ih_off, th_off, p_len;
unsigned int isum_seed, tsum_seed, id, seq;
+ int is_ipv6;
long f_id = -1; /* id of the current fragment */
long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
long f_used = 0; /* bytes used from the current fragment */
@@ -1395,18 +1743,24 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
int segment;
/* Locate original headers and compute various lengths. */
- ih = ip_hdr(skb);
+ is_ipv6 = skb_is_gso_v6(skb);
+ if (is_ipv6) {
+ ih6 = ipv6_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ } else {
+ ih = ip_hdr(skb);
+ ih_off = skb_network_offset(skb);
+ isum_seed = ((0xFFFF - ih->check) +
+ (0xFFFF - ih->tot_len) +
+ (0xFFFF - ih->id));
+ id = ntohs(ih->id);
+ }
+
th = tcp_hdr(skb);
- ih_off = skb_network_offset(skb);
th_off = skb_transport_offset(skb);
p_len = sh->gso_size;
- /* Set up seed values for IP and TCP csum and initialize id and seq. */
- isum_seed = ((0xFFFF - ih->check) +
- (0xFFFF - ih->tot_len) +
- (0xFFFF - ih->id));
tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
- id = ntohs(ih->id);
seq = ntohl(th->seq);
/* Prepare all the headers. */
@@ -1420,11 +1774,17 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
memcpy(buf, data, sh_len);
/* Update copied ip header. */
- ih = (struct iphdr *)(buf + ih_off);
- ih->tot_len = htons(sh_len + p_len - ih_off);
- ih->id = htons(id);
- ih->check = csum_long(isum_seed + ih->tot_len +
- ih->id) ^ 0xffff;
+ if (is_ipv6) {
+ ih6 = (struct ipv6hdr *)(buf + ih_off);
+ ih6->payload_len = htons(sh_len + p_len - ih_off -
+ sizeof(*ih6));
+ } else {
+ ih = (struct iphdr *)(buf + ih_off);
+ ih->tot_len = htons(sh_len + p_len - ih_off);
+ ih->id = htons(id);
+ ih->check = csum_long(isum_seed + ih->tot_len +
+ ih->id) ^ 0xffff;
+ }
/* Update copied tcp header. */
th = (struct tcphdr *)(buf + th_off);
@@ -1475,8 +1835,9 @@ static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
struct sk_buff *skb, unsigned char *headers, s64 slot)
{
- struct tile_net_priv *priv = netdev_priv(dev);
struct skb_shared_info *sh = skb_shinfo(skb);
+ int instance = mpipe_instance(dev);
+ struct mpipe_data *md = &mpipe_data[instance];
unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
unsigned int data_len = skb->len - sh_len;
unsigned int p_len = sh->gso_size;
@@ -1499,8 +1860,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
edesc_head.xfer_size = sh_len;
/* This is only used to specify the TLB. */
- edesc_head.stack_idx = large_buffer_stack;
- edesc_body.stack_idx = large_buffer_stack;
+ edesc_head.stack_idx = md->first_buffer_stack;
+ edesc_body.stack_idx = md->first_buffer_stack;
/* Egress all the edescs. */
for (segment = 0; segment < sh->gso_segs; segment++) {
@@ -1553,8 +1914,8 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
}
/* Update stats. */
- tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
- tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
+ tile_net_stats_add(tx_packets, &dev->stats.tx_packets);
+ tile_net_stats_add(tx_bytes, &dev->stats.tx_bytes);
}
/* Do "TSO" handling for egress.
@@ -1575,8 +1936,11 @@ static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int channel = priv->echannel;
- struct tile_net_egress *egress = &egress_for_echannel[channel];
- struct tile_net_comps *comps = info->comps_for_echannel[channel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress = &md->egress_for_echannel[channel];
+ struct tile_net_comps *comps =
+ info->mpipe[instance].comps_for_echannel[channel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
unsigned long irqflags;
int num_edescs;
@@ -1640,10 +2004,13 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
- struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
+ int instance = priv->instance;
+ struct mpipe_data *md = &mpipe_data[instance];
+ struct tile_net_egress *egress =
+ &md->egress_for_echannel[priv->echannel];
gxio_mpipe_equeue_t *equeue = egress->equeue;
struct tile_net_comps *comps =
- info->comps_for_echannel[priv->echannel];
+ info->mpipe[instance].comps_for_echannel[priv->echannel];
unsigned int len = skb->len;
unsigned char *data = skb->data;
unsigned int num_edescs;
@@ -1660,7 +2027,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
/* This is only used to specify the TLB. */
- edesc.stack_idx = large_buffer_stack;
+ edesc.stack_idx = md->first_buffer_stack;
/* Prepare the edescs. */
for (i = 0; i < num_edescs; i++) {
@@ -1693,13 +2060,16 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
for (i = 0; i < num_edescs; i++)
gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
+ /* Store TX timestamp if needed. */
+ tile_tx_timestamp(skb, instance);
+
/* Add a completion record. */
add_comp(equeue, comps, slot - 1, skb);
/* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
- tile_net_stats_add(1, &priv->stats.tx_packets);
+ tile_net_stats_add(1, &dev->stats.tx_packets);
tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
- &priv->stats.tx_bytes);
+ &dev->stats.tx_bytes);
local_irq_restore(irqflags);
@@ -1727,20 +2097,18 @@ static void tile_net_tx_timeout(struct net_device *dev)
/* Ioctl commands. */
static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- return -EOPNOTSUPP;
-}
+ if (cmd == SIOCSHWTSTAMP)
+ return tile_hwtstamp_ioctl(dev, rq, cmd);
-/* Get system network statistics for device. */
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
-{
- struct tile_net_priv *priv = netdev_priv(dev);
- return &priv->stats;
+ return -EOPNOTSUPP;
}
/* Change the MTU. */
static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
{
- if ((new_mtu < 68) || (new_mtu > 1500))
+ if (new_mtu < 68)
+ return -EINVAL;
+ if (new_mtu > ((jumbo_num != 0) ? 9000 : 1500))
return -EINVAL;
dev->mtu = new_mtu;
return 0;
@@ -1772,9 +2140,13 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
*/
static void tile_net_netpoll(struct net_device *dev)
{
- disable_percpu_irq(ingress_irq);
- tile_net_handle_ingress_irq(ingress_irq, NULL);
- enable_percpu_irq(ingress_irq, 0);
+ int instance = mpipe_instance(dev);
+ struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct mpipe_data *md = &mpipe_data[instance];
+
+ disable_percpu_irq(md->ingress_irq);
+ napi_schedule(&info->mpipe[instance].napi);
+ enable_percpu_irq(md->ingress_irq, 0);
}
#endif
@@ -1784,7 +2156,6 @@ static const struct net_device_ops tile_net_ops = {
.ndo_start_xmit = tile_net_tx,
.ndo_select_queue = tile_net_select_queue,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -1800,14 +2171,21 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
+ netdev_features_t features = 0;
+
ether_setup(dev);
dev->netdev_ops = &tile_net_ops;
dev->watchdog_timeo = TILE_NET_TIMEOUT;
- dev->features |= NETIF_F_LLTX;
- dev->features |= NETIF_F_HW_CSUM;
- dev->features |= NETIF_F_SG;
- dev->features |= NETIF_F_TSO;
dev->mtu = 1500;
+
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
+ features |= NETIF_F_TSO;
+ features |= NETIF_F_TSO6;
+
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
/* Allocate the device structure, register the device, and obtain the
@@ -1842,6 +2220,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
priv->channel = -1;
priv->loopify_channel = -1;
priv->echannel = -1;
+ init_ptp_dev(priv);
/* Get the MAC address and set it in the device struct; this must
* be done before the device is opened. If the MAC is all zeroes,
@@ -1871,9 +2250,12 @@ static void tile_net_init_module_percpu(void *unused)
{
struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
int my_cpu = smp_processor_id();
+ int instance;
- info->has_iqueue = false;
-
+ for (instance = 0; instance < NR_MPIPE_MAX; instance++) {
+ info->mpipe[instance].has_iqueue = false;
+ info->mpipe[instance].instance = instance;
+ }
info->my_cpu = my_cpu;
/* Initialize the egress timer. */
@@ -1890,6 +2272,8 @@ static int __init tile_net_init_module(void)
pr_info("Tilera Network Driver\n");
+ BUILD_BUG_ON(NR_MPIPE_MAX != 2);
+
mutex_init(&tile_net_devs_for_channel_mutex);
/* Initialize each CPU. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 36435499814..106be47716e 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -31,6 +31,7 @@
#include <linux/in6.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
#include <asm/checksum.h>
#include <asm/homecache.h>
@@ -88,13 +89,6 @@
/* ISSUE: This has not been thoroughly tested (except at 1500). */
#define TILE_NET_MTU 1500
-/* HACK: Define to support GSO. */
-/* ISSUE: This may actually hurt performance of the TCP blaster. */
-/* #define TILE_NET_GSO */
-
-/* Define this to collapse "duplicate" acks. */
-/* #define IGNORE_DUP_ACKS */
-
/* HACK: Define this to verify incoming packets. */
/* #define TILE_NET_VERIFY_INGRESS */
@@ -156,10 +150,13 @@ struct tile_netio_queue {
* Statistics counters for a specific cpu and device.
*/
struct tile_net_stats_t {
- u32 rx_packets;
- u32 rx_bytes;
- u32 tx_packets;
- u32 tx_bytes;
+ struct u64_stats_sync syncp;
+ u64 rx_packets; /* total packets received */
+ u64 tx_packets; /* total packets transmitted */
+ u64 rx_bytes; /* total bytes received */
+ u64 tx_bytes; /* total bytes transmitted */
+ u64 rx_errors; /* packets truncated or marked bad by hw */
+ u64 rx_dropped; /* packets not for us or intf not up */
};
@@ -218,8 +215,6 @@ struct tile_net_priv {
int network_cpus_count;
/* Credits per network cpu. */
int network_cpus_credits;
- /* Network stats. */
- struct net_device_stats stats;
/* For NetIO bringup retries. */
struct delayed_work retry_work;
/* Quick access to per cpu data. */
@@ -627,79 +622,6 @@ static void tile_net_handle_egress_timer(unsigned long arg)
}
-#ifdef IGNORE_DUP_ACKS
-
-/*
- * Help detect "duplicate" ACKs. These are sequential packets (for a
- * given flow) which are exactly 66 bytes long, sharing everything but
- * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
- * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
- * +N, and the Tstamps are usually identical.
- *
- * NOTE: Apparently truly duplicate acks (with identical "ack" values),
- * should not be collapsed, as they are used for some kind of flow control.
- */
-static bool is_dup_ack(char *s1, char *s2, unsigned int len)
-{
- int i;
-
- unsigned long long ignorable = 0;
-
- /* Identification. */
- ignorable |= (1ULL << 0x12);
- ignorable |= (1ULL << 0x13);
-
- /* Header checksum. */
- ignorable |= (1ULL << 0x18);
- ignorable |= (1ULL << 0x19);
-
- /* ACK. */
- ignorable |= (1ULL << 0x2a);
- ignorable |= (1ULL << 0x2b);
- ignorable |= (1ULL << 0x2c);
- ignorable |= (1ULL << 0x2d);
-
- /* WinSize. */
- ignorable |= (1ULL << 0x30);
- ignorable |= (1ULL << 0x31);
-
- /* Checksum. */
- ignorable |= (1ULL << 0x32);
- ignorable |= (1ULL << 0x33);
-
- for (i = 0; i < len; i++, ignorable >>= 1) {
-
- if ((ignorable & 1) || (s1[i] == s2[i]))
- continue;
-
-#ifdef TILE_NET_DEBUG
- /* HACK: Mention non-timestamp diffs. */
- if (i < 0x38 && i != 0x2f &&
- net_ratelimit())
- pr_info("Diff at 0x%x\n", i);
-#endif
-
- return false;
- }
-
-#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
- /* HACK: Do not suppress truly duplicate ACKs. */
- /* ISSUE: Is this actually necessary or helpful? */
- if (s1[0x2a] == s2[0x2a] &&
- s1[0x2b] == s2[0x2b] &&
- s1[0x2c] == s2[0x2c] &&
- s1[0x2d] == s2[0x2d]) {
- return false;
- }
-#endif
-
- return true;
-}
-
-#endif
-
-
-
static void tile_net_discard_aux(struct tile_net_cpu *info, int index)
{
struct tile_netio_queue *queue = &info->queue;
@@ -774,6 +696,7 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
+ netio_pkt_status_t pkt_status = NETIO_PKT_STATUS_M(metadata, pkt);
/* Extract the packet size. FIXME: Shouldn't the second line */
/* get subtracted? Mostly moot, since it should be "zero". */
@@ -806,40 +729,25 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
#endif /* TILE_NET_DUMP_PACKETS */
#ifdef TILE_NET_VERIFY_INGRESS
- if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
- /* Bug 6624: Includes UDP packets with a "zero" checksum. */
- pr_warning("Bad L4 checksum on %d byte packet.\n", len);
- }
- if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
- NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
+ if (pkt_status == NETIO_PKT_STATUS_OVERSIZE && len >= 64) {
dump_packet(buf, len, "rx");
- panic("Bad L3 checksum.");
- }
- switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
- case NETIO_PKT_STATUS_OVERSIZE:
- if (len >= 64) {
- dump_packet(buf, len, "rx");
- panic("Unexpected OVERSIZE.");
- }
- break;
- case NETIO_PKT_STATUS_BAD:
- pr_warning("Unexpected BAD %ld byte packet.\n", len);
+ panic("Unexpected OVERSIZE.");
}
#endif
filter = 0;
- /* ISSUE: Filter TCP packets with "bad" checksums? */
-
- if (!(dev->flags & IFF_UP)) {
+ if (pkt_status == NETIO_PKT_STATUS_BAD) {
+ /* Handle CRC error and hardware truncation. */
+ filter = 2;
+ } else if (!(dev->flags & IFF_UP)) {
/* Filter packets received before we're up. */
filter = 1;
- } else if (NETIO_PKT_STATUS_M(metadata, pkt) == NETIO_PKT_STATUS_BAD) {
+ } else if (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(metadata, pkt) &&
+ pkt_status == NETIO_PKT_STATUS_UNDERSIZE) {
/* Filter "truncated" packets. */
- filter = 1;
+ filter = 2;
} else if (!(dev->flags & IFF_PROMISC)) {
- /* FIXME: Implement HW multicast filter. */
if (!is_multicast_ether_addr(buf)) {
/* Filter packets not for our address. */
const u8 *mine = dev->dev_addr;
@@ -847,9 +755,14 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
}
}
- if (filter) {
+ u64_stats_update_begin(&stats->syncp);
- /* ISSUE: Update "drop" statistics? */
+ if (filter != 0) {
+
+ if (filter == 1)
+ stats->rx_dropped++;
+ else
+ stats->rx_errors++;
tile_net_provide_linux_buffer(info, va, small);
@@ -881,6 +794,8 @@ static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
stats->rx_bytes += len;
}
+ u64_stats_update_end(&stats->syncp);
+
/* ISSUE: It would be nice to defer this until the packet has */
/* actually been processed. */
tile_net_return_credit(info);
@@ -1907,8 +1822,10 @@ busy:
kfree_skb(olds[i]);
/* Update stats. */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets += num_segs;
stats->tx_bytes += (num_segs * sh_len) + d_len;
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -1936,7 +1853,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int csum_start = skb_checksum_start_offset(skb);
- lepp_frag_t frags[LEPP_MAX_FRAGS];
+ lepp_frag_t frags[1 + MAX_SKB_FRAGS];
unsigned int num_frags;
@@ -1951,7 +1868,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
unsigned int cmd_head, cmd_tail, cmd_next;
unsigned int comp_tail;
- lepp_cmd_t cmds[LEPP_MAX_FRAGS];
+ lepp_cmd_t cmds[1 + MAX_SKB_FRAGS];
/*
@@ -2089,8 +2006,10 @@ busy:
kfree_skb(olds[i]);
/* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
+ u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
+ u64_stats_update_end(&stats->syncp);
/* Make sure the egress timer is scheduled. */
tile_net_schedule_egress_timer(info);
@@ -2127,30 +2046,51 @@ static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
*
* Returns the address of the device statistics structure.
*/
-static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *tile_net_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
{
struct tile_net_priv *priv = netdev_priv(dev);
- u32 rx_packets = 0;
- u32 tx_packets = 0;
- u32 rx_bytes = 0;
- u32 tx_bytes = 0;
+ u64 rx_packets = 0, tx_packets = 0;
+ u64 rx_bytes = 0, tx_bytes = 0;
+ u64 rx_errors = 0, rx_dropped = 0;
int i;
for_each_online_cpu(i) {
- if (priv->cpu[i]) {
- rx_packets += priv->cpu[i]->stats.rx_packets;
- rx_bytes += priv->cpu[i]->stats.rx_bytes;
- tx_packets += priv->cpu[i]->stats.tx_packets;
- tx_bytes += priv->cpu[i]->stats.tx_bytes;
- }
+ struct tile_net_stats_t *cpu_stats;
+ u64 trx_packets, ttx_packets, trx_bytes, ttx_bytes;
+ u64 trx_errors, trx_dropped;
+ unsigned int start;
+
+ if (priv->cpu[i] == NULL)
+ continue;
+ cpu_stats = &priv->cpu[i]->stats;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&cpu_stats->syncp);
+ trx_packets = cpu_stats->rx_packets;
+ ttx_packets = cpu_stats->tx_packets;
+ trx_bytes = cpu_stats->rx_bytes;
+ ttx_bytes = cpu_stats->tx_bytes;
+ trx_errors = cpu_stats->rx_errors;
+ trx_dropped = cpu_stats->rx_dropped;
+ } while (u64_stats_fetch_retry_bh(&cpu_stats->syncp, start));
+
+ rx_packets += trx_packets;
+ tx_packets += ttx_packets;
+ rx_bytes += trx_bytes;
+ tx_bytes += ttx_bytes;
+ rx_errors += trx_errors;
+ rx_dropped += trx_dropped;
}
- priv->stats.rx_packets = rx_packets;
- priv->stats.rx_bytes = rx_bytes;
- priv->stats.tx_packets = tx_packets;
- priv->stats.tx_bytes = tx_bytes;
+ stats->rx_packets = rx_packets;
+ stats->tx_packets = tx_packets;
+ stats->rx_bytes = rx_bytes;
+ stats->tx_bytes = tx_bytes;
+ stats->rx_errors = rx_errors;
+ stats->rx_dropped = rx_dropped;
- return &priv->stats;
+ return stats;
}
@@ -2287,7 +2227,7 @@ static const struct net_device_ops tile_net_ops = {
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
.ndo_do_ioctl = tile_net_ioctl,
- .ndo_get_stats = tile_net_get_stats,
+ .ndo_get_stats64 = tile_net_get_stats64,
.ndo_change_mtu = tile_net_change_mtu,
.ndo_tx_timeout = tile_net_tx_timeout,
.ndo_set_mac_address = tile_net_set_mac_address,
@@ -2305,39 +2245,30 @@ static const struct net_device_ops tile_net_ops = {
*/
static void tile_net_setup(struct net_device *dev)
{
- PDEBUG("tile_net_setup()\n");
+ netdev_features_t features = 0;
ether_setup(dev);
-
dev->netdev_ops = &tile_net_ops;
-
dev->watchdog_timeo = TILE_NET_TIMEOUT;
+ dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
+ dev->mtu = TILE_NET_MTU;
- /* We want lockless xmit. */
- dev->features |= NETIF_F_LLTX;
-
- /* We support hardware tx checksums. */
- dev->features |= NETIF_F_HW_CSUM;
-
- /* We support scatter/gather. */
- dev->features |= NETIF_F_SG;
-
- /* We support TSO. */
- dev->features |= NETIF_F_TSO;
+ features |= NETIF_F_HW_CSUM;
+ features |= NETIF_F_SG;
-#ifdef TILE_NET_GSO
- /* We support GSO. */
- dev->features |= NETIF_F_GSO;
-#endif
+ /* We support TSO iff the HV supports sufficient frags. */
+ if (LEPP_MAX_FRAGS >= 1 + MAX_SKB_FRAGS)
+ features |= NETIF_F_TSO;
+ /* We can't support HIGHDMA without hash_default, since we need
+ * to be able to finv() with a VA if we don't have hash_default.
+ */
if (hash_default)
- dev->features |= NETIF_F_HIGHDMA;
-
- /* ISSUE: We should support NETIF_F_UFO. */
+ features |= NETIF_F_HIGHDMA;
- dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
-
- dev->mtu = TILE_NET_MTU;
+ dev->hw_features |= features;
+ dev->vlan_features |= features;
+ dev->features |= features;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index ad32af67e61..9c805e0c0ca 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1466,8 +1466,7 @@ static void gelic_ether_setup_netdev_ops(struct net_device *netdev,
{
netdev->watchdog_timeo = GELIC_NET_WATCHDOG_TIMEOUT;
/* NAPI */
- netif_napi_add(netdev, napi,
- gelic_net_poll, GELIC_NET_NAPI_WEIGHT);
+ netif_napi_add(netdev, napi, gelic_net_poll, NAPI_POLL_WEIGHT);
netdev->ethtool_ops = &gelic_ether_ethtool_ops;
netdev->netdev_ops = &gelic_netdevice_ops;
}
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.h b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
index a93df6ac190..309abb472aa 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.h
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.h
@@ -37,7 +37,6 @@
#define GELIC_NET_RXBUF_ALIGN 128
#define GELIC_CARD_RX_CSUM_DEFAULT 1 /* hw chksum */
#define GELIC_NET_WATCHDOG_TIMEOUT 5*HZ
-#define GELIC_NET_NAPI_WEIGHT (GELIC_NET_RX_DESCRIPTORS)
#define GELIC_NET_BROADCAST_ADDR 0xffffffffffffL
#define GELIC_NET_MC_COUNT_MAX 32 /* multicast address list */
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01bdc6ca075..c4dbf981804 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1308,13 +1308,13 @@ static int tsi108_open(struct net_device *dev)
data->id, dev->irq, dev->name);
}
- data->rxring = dma_alloc_coherent(NULL, rxring_size, &data->rxdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->rxring = dma_zalloc_coherent(NULL, rxring_size, &data->rxdma,
+ GFP_KERNEL);
if (!data->rxring)
return -ENOMEM;
- data->txring = dma_alloc_coherent(NULL, txring_size, &data->txdma,
- GFP_KERNEL | __GFP_ZERO);
+ data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
+ GFP_KERNEL);
if (!data->txring) {
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM;
@@ -1558,7 +1558,7 @@ tsi108_init_one(struct platform_device *pdev)
hw_info *einfo;
int err = 0;
- einfo = pdev->dev.platform_data;
+ einfo = dev_get_platdata(&pdev->dev);
if (NULL == einfo) {
printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index b75eb9e0e86..c8f088ab5fd 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -2407,7 +2407,7 @@ static struct pci_driver rhine_driver = {
.driver.pm = RHINE_PM_OPS,
};
-static struct dmi_system_id __initdata rhine_dmi_table[] = {
+static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
.matches = {
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 1d6dc41f755..d022bf93657 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
}
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_bytes += pkt_len;
stats->rx_packets++;
@@ -2376,6 +2376,23 @@ out_0:
return ret;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * velocity_poll_controller - Velocity Poll controller function
+ * @dev: network device
+ *
+ *
+ * Used by NETCONSOLE and other diagnostic tools to allow network I/P
+ * with interrupts disabled.
+ */
+static void velocity_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ velocity_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* velocity_mii_ioctl - MII ioctl handler
* @dev: network device
@@ -2641,6 +2658,9 @@ static const struct net_device_ops velocity_netdev_ops = {
.ndo_do_ioctl = velocity_ioctl,
.ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = velocity_poll_controller,
+#endif
};
/**
@@ -2884,6 +2904,7 @@ out:
return ret;
err_iounmap:
+ netif_napi_del(&vptr->napi);
iounmap(regs);
err_free_dev:
free_netdev(netdev);
@@ -2904,6 +2925,7 @@ static int velocity_remove(struct device *dev)
struct velocity_info *vptr = netdev_priv(netdev);
unregister_netdev(netdev);
+ netif_napi_del(&vptr->napi);
iounmap(vptr->mac_regs);
free_netdev(netdev);
velocity_nics--;
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 30fed08d167..0df36c6ec7f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -622,7 +622,7 @@ static const struct net_device_ops w5100_netdev_ops = {
static int w5100_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5100_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index e92884564e1..71c27b3292f 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -542,7 +542,7 @@ static const struct net_device_ops w5300_netdev_ops = {
static int w5300_hw_probe(struct platform_device *pdev)
{
- struct wiznet_platform_data *data = pdev->dev.platform_data;
+ struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct w5300_priv *priv = netdev_priv(ndev);
const char *name = netdev_name(ndev);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 58eb4488bef..b88121f240c 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -243,15 +243,15 @@ static int temac_dma_bd_init(struct net_device *ndev)
/* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p, GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index fb7d1c28a2e..b2ff038d6d2 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -201,17 +201,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
/*
* Allocate the Tx and Rx buffer descriptors.
*/
- lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->tx_bd_v) * TX_BD_NUM,
- &lp->tx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->tx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->tx_bd_v) * TX_BD_NUM,
+ &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v)
goto out;
- lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
- sizeof(*lp->rx_bd_v) * RX_BD_NUM,
- &lp->rx_bd_p,
- GFP_KERNEL | __GFP_ZERO);
+ lp->rx_bd_v = dma_zalloc_coherent(ndev->dev.parent,
+ sizeof(*lp->rx_bd_v) * RX_BD_NUM,
+ &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v)
goto out;
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index e90e1f46121..64b4639f43b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -175,6 +175,7 @@ int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np)
printk(KERN_WARNING "Setting MDIO clock divisor to "
"default %d\n", DEFAULT_CLOCK_DIVISOR);
clk_div = DEFAULT_CLOCK_DIVISOR;
+ of_node_put(np1);
goto issue;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index fd4dbdae533..4c619ea5189 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1230,8 +1230,7 @@ error:
*/
static int xemaclite_of_remove(struct platform_device *of_dev)
{
- struct device *dev = &of_dev->dev;
- struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_device *ndev = platform_get_drvdata(of_dev);
struct net_local *lp = netdev_priv(ndev);
@@ -1250,7 +1249,6 @@ static int xemaclite_of_remove(struct platform_device *of_dev)
lp->phy_node = NULL;
xemaclite_remove_ndev(ndev, of_dev);
- dev_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 3d689fcb791..e78802e75ea 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1384,7 +1384,7 @@ static int eth_init_one(struct platform_device *pdev)
{
struct port *port;
struct net_device *dev;
- struct eth_plat_info *plat = pdev->dev.platform_data;
+ struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 4c8ddc944d5..0b40e1c46f0 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1068,9 +1068,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
#endif
sizeof(PI_CONSUMER_BLOCK) +
(PI_ALIGN_K_DESC_BLK - 1);
- bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
- &bp->kmalloced_dma,
- GFP_ATOMIC | __GFP_ZERO);
+ bp->kmalloced = top_v = dma_zalloc_coherent(bp->bus_dev, alloc_size,
+ &bp->kmalloced_dma,
+ GFP_ATOMIC);
if (top_v == NULL)
return DFX_K_FAILURE;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 23a0fff0df5..524f713f601 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -306,7 +306,6 @@ static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->version, HV_DRV_VERSION, sizeof(info->version));
strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
}
@@ -529,7 +528,6 @@ static int __init netvsc_drv_init(void)
}
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
module_init(netvsc_drv_init);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 3adb43ce138..7bbd318bc93 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -351,16 +351,16 @@ static int ali_ircc_open(int i, chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index a4126719783..177441afeb9 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -123,14 +123,14 @@ static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
tty = priv->tty;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
old_termios = tty->termios;
cflag = tty->termios.c_cflag;
tty_encode_baud_rate(tty, speed, speed);
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
priv->io.speed = speed;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return 0;
}
@@ -280,7 +280,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
struct ktermios old_termios;
int cflag;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
old_termios = tty->termios;
cflag = tty->termios.c_cflag;
@@ -292,7 +292,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
tty->termios.c_cflag = cflag;
if (tty->ops->set_termios)
tty->ops->set_termios(tty, &old_termios);
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
}
/*****************************************************************/
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index 9cf836b57c4..ceeb53737f8 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -430,8 +430,8 @@ static int __init nsc_ircc_open(chipio_t *info)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto out2;
@@ -439,8 +439,8 @@ static int __init nsc_ircc_open(chipio_t *info)
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto out3;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 964b116a0ab..3eeaaf80049 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -915,7 +915,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
err = register_netdev(dev);
if (err == 0)
- dev_set_drvdata(&pdev->dev, dev);
+ platform_set_drvdata(pdev, dev);
if (err) {
if (si->pdata->shutdown)
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index aa05dad7533..0dcdf1592f6 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -562,14 +562,14 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->tx_buff.truesize = SMSC_IRCC2_TX_BUFF_TRUESIZE;
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL)
goto err_out2;
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL)
goto err_out3;
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 51f2bc37610..9abaec27f96 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -210,8 +210,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
pci_write_config_byte(pcidev,0x42,(bTmp | 0xf0));
pci_write_config_byte(pcidev,0x5a,0xc0);
WriteLPCReg(0x28, 0x70 );
- if (via_ircc_open(pcidev, &info, 0x3076) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3076);
} else
rc = -ENODEV; //IR not turn on
} else { //Not VT1211
@@ -249,8 +248,7 @@ static int via_init_one(struct pci_dev *pcidev, const struct pci_device_id *id)
info.irq=FirIRQ;
info.dma=FirDRQ1;
info.dma2=FirDRQ0;
- if (via_ircc_open(pcidev, &info, 0x3096) == 0)
- rc=0;
+ rc = via_ircc_open(pcidev, &info, 0x3096);
} else
rc = -ENODEV; //IR not turn on !!!!!
}//Not VT1211
@@ -363,16 +361,16 @@ static int via_ircc_open(struct pci_dev *pdev, chipio_t *info, unsigned int id)
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(&pdev->dev, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
}
self->tx_buff.head =
- dma_alloc_coherent(&pdev->dev, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out3;
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index bb8857a158a..e641bb24036 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -215,16 +215,16 @@ static int w83977af_open(int i, unsigned int iobase, unsigned int irq,
/* Allocate memory if needed */
self->rx_buff.head =
- dma_alloc_coherent(NULL, self->rx_buff.truesize,
- &self->rx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->rx_buff.truesize,
+ &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) {
err = -ENOMEM;
goto err_out1;
}
self->tx_buff.head =
- dma_alloc_coherent(NULL, self->tx_buff.truesize,
- &self->tx_buff_dma, GFP_KERNEL | __GFP_ZERO);
+ dma_zalloc_coherent(NULL, self->tx_buff.truesize,
+ &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) {
err = -ENOMEM;
goto err_out2;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 18373b6ae37..64dfaa303dc 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -337,8 +337,11 @@ static int macvlan_open(struct net_device *dev)
int err;
if (vlan->port->passthru) {
- if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC))
- dev_set_promiscuity(lowerdev, 1);
+ if (!(vlan->flags & MACVLAN_FLAG_NOPROMISC)) {
+ err = dev_set_promiscuity(lowerdev, 1);
+ if (err < 0)
+ goto out;
+ }
goto hash_add;
}
@@ -597,6 +600,9 @@ static int macvlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!vlan->port->passthru)
return -EOPNOTSUPP;
+ if (flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
if (is_unicast_ether_addr(addr))
err = dev_uc_add_excl(dev, addr);
else if (is_multicast_ether_addr(addr))
@@ -680,7 +686,7 @@ void macvlan_common_setup(struct net_device *dev)
dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &macvlan_netdev_ops;
dev->destructor = free_netdev;
- dev->header_ops = &macvlan_hard_header_ops,
+ dev->header_ops = &macvlan_hard_header_ops;
dev->ethtool_ops = &macvlan_ethtool_ops;
}
EXPORT_SYMBOL_GPL(macvlan_common_setup);
@@ -736,6 +742,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
return -EADDRNOTAVAIL;
}
+ if (data && data[IFLA_MACVLAN_FLAGS] &&
+ nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC)
+ return -EINVAL;
+
if (data && data[IFLA_MACVLAN_MODE]) {
switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) {
case MACVLAN_MODE_PRIVATE:
@@ -813,7 +823,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
if (port->count)
return -EINVAL;
port->passthru = true;
- memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(dev, lowerdev);
}
err = netdev_upper_dev_link(lowerdev, dev);
@@ -863,6 +873,18 @@ static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ enum macvlan_mode mode;
+ bool set_mode = false;
+
+ /* Validate mode, but don't set yet: setting flags may fail. */
+ if (data && data[IFLA_MACVLAN_MODE]) {
+ set_mode = true;
+ mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ /* Passthrough mode can't be set or cleared dynamically */
+ if ((mode == MACVLAN_MODE_PASSTHRU) !=
+ (vlan->mode == MACVLAN_MODE_PASSTHRU))
+ return -EINVAL;
+ }
if (data && data[IFLA_MACVLAN_FLAGS]) {
__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
@@ -879,8 +901,8 @@ static int macvlan_changelink(struct net_device *dev,
}
vlan->flags = flags;
}
- if (data && data[IFLA_MACVLAN_MODE])
- vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+ if (set_mode)
+ vlan->mode = mode;
return 0;
}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index a98fb0ed6ae..9dccb1edfd2 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -68,6 +68,8 @@ static const struct proto_ops macvtap_socket_ops;
#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
+#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
+
/*
* RCU usage:
* The macvtap_queue and the macvlan_dev are loosely coupled, the
@@ -278,7 +280,8 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvtap_queue *q = macvtap_get_queue(dev, skb);
- netdev_features_t features;
+ netdev_features_t features = TAP_FEATURES;
+
if (!q)
goto drop;
@@ -287,9 +290,11 @@ static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
skb->dev = dev;
/* Apply the forward feature mask so that we perform segmentation
- * according to users wishes.
+ * according to users wishes. This only works if VNET_HDR is
+ * enabled.
*/
- features = netif_skb_features(skb) & vlan->tap_features;
+ if (q->flags & IFF_VNET_HDR)
+ features |= vlan->tap_features;
if (netif_needs_gso(skb, features)) {
struct sk_buff *segs = __skb_gso_segment(skb, features, false);
@@ -524,7 +529,7 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -536,86 +541,6 @@ static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
/*
* macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
* be shared with the tun/tap driver.
@@ -698,29 +623,6 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
return 0;
}
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
const struct iovec *iv, unsigned long total_len,
@@ -818,10 +720,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
}
- if (vlan)
+ if (vlan) {
+ local_bh_disable();
macvlan_start_xmit(skb, vlan->dev);
- else
+ local_bh_enable();
+ } else {
kfree_skb(skb);
+ }
rcu_read_unlock();
return total_len;
@@ -912,8 +817,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
done:
rcu_read_lock();
vlan = rcu_dereference(q->vlan);
- if (vlan)
+ if (vlan) {
+ preempt_disable();
macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0);
+ preempt_enable();
+ }
rcu_read_unlock();
return ret ? ret : copied;
@@ -1058,8 +966,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
/* tap_features are the same as features on tun/tap and
* reflect user expectations.
*/
- vlan->tap_features = vlan->dev->features &
- (feature_mask | ~TUN_OFFLOADS);
+ vlan->tap_features = feature_mask;
vlan->set_features = features;
netdev_update_features(vlan->dev);
@@ -1155,10 +1062,6 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
- /* TODO: only accept frames with the features that
- got enabled for forwarded frames */
- if (!(q->flags & IFF_VNET_HDR))
- return -EINVAL;
rtnl_lock();
ret = set_offload(q, arg);
rtnl_unlock();
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 4822aafe638..dcb21347c67 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -102,6 +102,7 @@ struct netconsole_target {
struct config_item item;
#endif
int enabled;
+ struct mutex mutex;
struct netpoll np;
};
@@ -181,6 +182,7 @@ static struct netconsole_target *alloc_param_target(char *target_config)
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
+ mutex_init(&nt->mutex);
memset(nt->np.remote_mac, 0xff, ETH_ALEN);
/* Parse parameters and setup netpoll */
@@ -322,6 +324,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
return -EINVAL;
}
+ mutex_lock(&nt->mutex);
if (enabled) { /* 1 */
/*
@@ -331,8 +334,10 @@ static ssize_t store_enabled(struct netconsole_target *nt,
netpoll_print_options(&nt->np);
err = netpoll_setup(&nt->np);
- if (err)
+ if (err) {
+ mutex_unlock(&nt->mutex);
return err;
+ }
printk(KERN_INFO "netconsole: network logging started\n");
@@ -341,6 +346,7 @@ static ssize_t store_enabled(struct netconsole_target *nt,
}
nt->enabled = enabled;
+ mutex_unlock(&nt->mutex);
return strnlen(buf, count);
}
@@ -597,6 +603,7 @@ static struct config_item *make_netconsole_target(struct config_group *group,
strlcpy(nt->np.dev_name, "eth0", IFNAMSIZ);
nt->np.local_port = 6665;
nt->np.remote_port = 6666;
+ mutex_init(&nt->mutex);
memset(nt->np.remote_mac, 0xff, ETH_ALEN);
/* Initialize the config_item member */
@@ -682,7 +689,11 @@ restart:
* we might sleep in __netpoll_cleanup()
*/
spin_unlock_irqrestore(&target_list_lock, flags);
+
+ mutex_lock(&nt->mutex);
__netpoll_cleanup(&nt->np);
+ mutex_unlock(&nt->mutex);
+
spin_lock_irqsave(&target_list_lock, flags);
dev_put(nt->np.dev);
nt->np.dev = NULL;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index a47f9236d96..8004acbef2c 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -191,7 +191,7 @@ static int mdio_gpio_probe(struct platform_device *pdev)
pdata = mdio_gpio_of_get_data(pdev);
bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
bus_id = pdev->id;
}
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index e91d7d736ae..d2dd9e473e2 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -106,7 +106,7 @@ err:
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
- struct mdio_mux_gpio_state *s = pdev->dev.platform_data;
+ struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
return 0;
}
diff --git a/drivers/net/phy/mdio-mux-mmioreg.c b/drivers/net/phy/mdio-mux-mmioreg.c
index 9733bd239a8..f8e305d8da7 100644
--- a/drivers/net/phy/mdio-mux-mmioreg.c
+++ b/drivers/net/phy/mdio-mux-mmioreg.c
@@ -48,7 +48,7 @@ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child,
struct mdio_mux_mmioreg_state *s = data;
if (current_child ^ desired_child) {
- void *p = ioremap(s->phys, 1);
+ void __iomem *p = ioremap(s->phys, 1);
uint8_t x, y;
if (!p)
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index b51fa1f469b..6aee02ed97a 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -222,7 +222,7 @@ static int octeon_mdiobus_probe(struct platform_device *pdev)
bus->mii_bus->read = octeon_mdiobus_read;
bus->mii_bus->write = octeon_mdiobus_write;
- dev_set_drvdata(&pdev->dev, bus);
+ platform_set_drvdata(pdev, bus);
err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node);
if (err)
@@ -244,7 +244,7 @@ static int octeon_mdiobus_remove(struct platform_device *pdev)
struct octeon_mdiobus *bus;
union cvmx_smix_en smi_en;
- bus = dev_get_drvdata(&pdev->dev);
+ bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus->mii_bus);
mdiobus_free(bus->mii_bus);
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index 61d3f4ebf52..18969b3ad8b 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -40,7 +40,7 @@ struct sun4i_mdio_data {
static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
{
struct sun4i_mdio_data *data = bus->priv;
- unsigned long start_jiffies;
+ unsigned long timeout_jiffies;
int value;
/* issue the phy address and reg */
@@ -49,10 +49,9 @@ static int sun4i_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
/* Wait read complete */
- start_jiffies = jiffies;
+ timeout_jiffies = jiffies + MDIO_TIMEOUT;
while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
- if (time_after(start_jiffies,
- start_jiffies + MDIO_TIMEOUT))
+ if (time_is_before_jiffies(timeout_jiffies))
return -ETIMEDOUT;
msleep(1);
}
@@ -69,7 +68,7 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
u16 value)
{
struct sun4i_mdio_data *data = bus->priv;
- unsigned long start_jiffies;
+ unsigned long timeout_jiffies;
/* issue the phy address and reg */
writel((mii_id << 8) | regnum, data->membase + EMAC_MAC_MADR_REG);
@@ -77,10 +76,9 @@ static int sun4i_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
writel(0x1, data->membase + EMAC_MAC_MCMD_REG);
/* Wait read complete */
- start_jiffies = jiffies;
+ timeout_jiffies = jiffies + MDIO_TIMEOUT;
while (readl(data->membase + EMAC_MAC_MIND_REG) & 0x1) {
- if (time_after(start_jiffies,
- start_jiffies + MDIO_TIMEOUT))
+ if (time_is_before_jiffies(timeout_jiffies))
return -ETIMEDOUT;
msleep(1);
}
@@ -103,6 +101,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
struct mii_bus *bus;
struct sun4i_mdio_data *data;
+ struct resource *res;
int ret, i;
bus = mdiobus_alloc_size(sizeof(*data));
@@ -116,7 +115,8 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
bus->parent = &pdev->dev;
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ bus->irq = devm_kzalloc(&pdev->dev, sizeof(int) * PHY_MAX_ADDR,
+ GFP_KERNEL);
if (!bus->irq) {
ret = -ENOMEM;
goto err_out_free_mdiobus;
@@ -126,10 +126,11 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
bus->irq[i] = PHY_POLL;
data = bus->priv;
- data->membase = of_iomap(np, 0);
- if (!data->membase) {
- ret = -ENOMEM;
- goto err_out_free_mdio_irq;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ data->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(data->membase)) {
+ ret = PTR_ERR(data->membase);
+ goto err_out_free_mdiobus;
}
data->regulator = devm_regulator_get(&pdev->dev, "phy");
@@ -141,7 +142,7 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
} else {
ret = regulator_enable(data->regulator);
if (ret)
- goto err_out_free_mdio_irq;
+ goto err_out_free_mdiobus;
}
ret = of_mdiobus_register(bus, np);
@@ -154,8 +155,6 @@ static int sun4i_mdio_probe(struct platform_device *pdev)
err_out_disable_regulator:
regulator_disable(data->regulator);
-err_out_free_mdio_irq:
- kfree(bus->irq);
err_out_free_mdiobus:
mdiobus_free(bus);
return ret;
@@ -166,7 +165,6 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
struct mii_bus *bus = platform_get_drvdata(pdev);
mdiobus_unregister(bus);
- kfree(bus->irq);
mdiobus_free(bus);
return 0;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 2510435f34e..c31aad0004c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/micrel_phy.h>
+#include <linux/of.h>
/* Operation Mode Strap Override */
#define MII_KSZPHY_OMSO 0x16
@@ -53,6 +54,20 @@
#define KS8737_CTRL_INT_ACTIVE_HIGH (1 << 14)
#define KSZ8051_RMII_50MHZ_CLK (1 << 7)
+/* Write/read to/from extended registers */
+#define MII_KSZPHY_EXTREG 0x0b
+#define KSZPHY_EXTREG_WRITE 0x8000
+
+#define MII_KSZPHY_EXTREG_WRITE 0x0c
+#define MII_KSZPHY_EXTREG_READ 0x0d
+
+/* Extended registers */
+#define MII_KSZPHY_CLK_CONTROL_PAD_SKEW 0x104
+#define MII_KSZPHY_RX_DATA_PAD_SKEW 0x105
+#define MII_KSZPHY_TX_DATA_PAD_SKEW 0x106
+
+#define PS_TO_REG 200
+
static int ksz_config_flags(struct phy_device *phydev)
{
int regval;
@@ -65,6 +80,20 @@ static int ksz_config_flags(struct phy_device *phydev)
return 0;
}
+static int kszphy_extended_write(struct phy_device *phydev,
+ u32 regnum, u16 val)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, KSZPHY_EXTREG_WRITE | regnum);
+ return phy_write(phydev, MII_KSZPHY_EXTREG_WRITE, val);
+}
+
+static int kszphy_extended_read(struct phy_device *phydev,
+ u32 regnum)
+{
+ phy_write(phydev, MII_KSZPHY_EXTREG, regnum);
+ return phy_read(phydev, MII_KSZPHY_EXTREG_READ);
+}
+
static int kszphy_ack_interrupt(struct phy_device *phydev)
{
/* bit[7..0] int status, which is a read and clear register. */
@@ -141,10 +170,82 @@ static int ks8051_config_init(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int ksz9021_load_values_from_of(struct phy_device *phydev,
+ struct device_node *of_node, u16 reg,
+ char *field1, char *field2,
+ char *field3, char *field4)
+{
+ int val1 = -1;
+ int val2 = -2;
+ int val3 = -3;
+ int val4 = -4;
+ int newval;
+ int matches = 0;
+
+ if (!of_property_read_u32(of_node, field1, &val1))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field2, &val2))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field3, &val3))
+ matches++;
+
+ if (!of_property_read_u32(of_node, field4, &val4))
+ matches++;
+
+ if (!matches)
+ return 0;
+
+ if (matches < 4)
+ newval = kszphy_extended_read(phydev, reg);
+ else
+ newval = 0;
+
+ if (val1 != -1)
+ newval = ((newval & 0xfff0) | ((val1 / PS_TO_REG) & 0xf) << 0);
+
+ if (val2 != -1)
+ newval = ((newval & 0xff0f) | ((val2 / PS_TO_REG) & 0xf) << 4);
+
+ if (val3 != -1)
+ newval = ((newval & 0xf0ff) | ((val3 / PS_TO_REG) & 0xf) << 8);
+
+ if (val4 != -1)
+ newval = ((newval & 0x0fff) | ((val4 / PS_TO_REG) & 0xf) << 12);
+
+ return kszphy_extended_write(phydev, reg, newval);
+}
+
+static int ksz9021_config_init(struct phy_device *phydev)
+{
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_node) {
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_CLK_CONTROL_PAD_SKEW,
+ "txen-skew-ps", "txc-skew-ps",
+ "rxdv-skew-ps", "rxc-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_RX_DATA_PAD_SKEW,
+ "rxd0-skew-ps", "rxd1-skew-ps",
+ "rxd2-skew-ps", "rxd3-skew-ps");
+ ksz9021_load_values_from_of(phydev, of_node,
+ MII_KSZPHY_TX_DATA_PAD_SKEW,
+ "txd0-skew-ps", "txd1-skew-ps",
+ "txd2-skew-ps", "txd3-skew-ps");
+ }
+ return 0;
+}
+
#define KSZ8873MLL_GLOBAL_CONTROL_4 0x06
#define KSZ8873MLL_GLOBAL_CONTROL_4_DUPLEX (1 << 6)
#define KSZ8873MLL_GLOBAL_CONTROL_4_SPEED (1 << 4)
-int ksz8873mll_read_status(struct phy_device *phydev)
+static int ksz8873mll_read_status(struct phy_device *phydev)
{
int regval;
@@ -281,7 +382,7 @@ static struct phy_driver ksphy_driver[] = {
.name = "Micrel KSZ9021 Gigabit PHY",
.features = (PHY_GBIT_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz9021_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 8e7af835434..138de837977 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -23,7 +23,7 @@
#define RTL821x_INER_INIT 0x6400
#define RTL821x_INSR 0x13
-#define RTL8211E_INER_LINK_STAT 0x10
+#define RTL8211E_INER_LINK_STATUS 0x400
MODULE_DESCRIPTION("Realtek PHY driver");
MODULE_AUTHOR("Johnson Leung");
@@ -57,7 +57,7 @@ static int rtl8211e_config_intr(struct phy_device *phydev)
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, RTL821x_INER,
- RTL8211E_INER_LINK_STAT);
+ RTL8211E_INER_LINK_STATUS);
else
err = phy_write(phydev, RTL821x_INER, 0);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 162464fe86b..6fa5ae00039 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -47,7 +47,7 @@
#define MAX_CALLID 65535
static DECLARE_BITMAP(callid_bitmap, MAX_CALLID + 1);
-static struct pppox_sock **callid_sock;
+static struct pppox_sock __rcu **callid_sock;
static DEFINE_SPINLOCK(chan_lock);
@@ -83,11 +83,11 @@ static const struct proto_ops pptp_ops;
struct pptp_gre_header {
u8 flags;
u8 ver;
- u16 protocol;
- u16 payload_len;
- u16 call_id;
- u32 seq;
- u32 ack;
+ __be16 protocol;
+ __be16 payload_len;
+ __be16 call_id;
+ __be32 seq;
+ __be32 ack;
} __packed;
static struct pppox_sock *lookup_chan(u16 call_id, __be32 s_addr)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index bff7e0b0b4e..50e43e64d51 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}
+/*********************
+ * Peers notification
+ *********************/
+
+static void team_notify_peers_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, notify_peers.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ schedule_delayed_work(&team->notify_peers.dw,
+ msecs_to_jiffies(team->notify_peers.interval));
+}
+
+static void team_notify_peers(struct team *team)
+{
+ if (!team->notify_peers.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
+ schedule_delayed_work(&team->notify_peers.dw, 0);
+}
+
+static void team_notify_peers_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
+}
+
+static void team_notify_peers_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->notify_peers.dw);
+}
+
+
+/*******************************
+ * Send multicast group rejoins
+ *******************************/
+
+static void team_mcast_rejoin_work(struct work_struct *work)
+{
+ struct team *team;
+
+ team = container_of(work, struct team, mcast_rejoin.dw.work);
+
+ if (!rtnl_trylock()) {
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+ return;
+ }
+ call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
+ rtnl_unlock();
+ if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ schedule_delayed_work(&team->mcast_rejoin.dw,
+ msecs_to_jiffies(team->mcast_rejoin.interval));
+}
+
+static void team_mcast_rejoin(struct team *team)
+{
+ if (!team->mcast_rejoin.count || !netif_running(team->dev))
+ return;
+ atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
+ schedule_delayed_work(&team->mcast_rejoin.dw, 0);
+}
+
+static void team_mcast_rejoin_init(struct team *team)
+{
+ INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
+}
+
+static void team_mcast_rejoin_fini(struct team *team)
+{
+ cancel_delayed_work_sync(&team->mcast_rejoin.dw);
+}
+
+
/************************
* Rx path frame handler
************************/
@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
+ team_notify_peers(team);
+ team_mcast_rejoin(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
@@ -953,6 +1037,9 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
struct netpoll *np;
int err;
+ if (!team->dev->npinfo)
+ return 0;
+
np = kzalloc(sizeof(*np), gfp);
if (!np)
return -ENOMEM;
@@ -979,12 +1066,6 @@ static void team_port_disable_netpoll(struct team_port *port)
__netpoll_cleanup(np);
kfree(np);
}
-
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return team->dev->npinfo;
-}
-
#else
static int team_port_enable_netpoll(struct team *team, struct team_port *port,
gfp_t gfp)
@@ -994,10 +1075,6 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port,
static void team_port_disable_netpoll(struct team_port *port)
{
}
-static struct netpoll_info *team_netpoll_info(struct team *team)
-{
- return NULL;
-}
#endif
static void __team_port_change_port_added(struct team_port *port, bool linkup);
@@ -1079,13 +1156,11 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- if (team_netpoll_info(team)) {
- err = team_port_enable_netpoll(team, port, GFP_KERNEL);
- if (err) {
- netdev_err(dev, "Failed to enable netpoll on device %s\n",
- portname);
- goto err_enable_netpoll;
- }
+ err = team_port_enable_netpoll(team, port, GFP_KERNEL);
+ if (err) {
+ netdev_err(dev, "Failed to enable netpoll on device %s\n",
+ portname);
+ goto err_enable_netpoll;
}
err = netdev_master_upper_dev_link(port_dev, dev);
@@ -1205,6 +1280,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
+static int team_notify_peers_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.count;
+ return 0;
+}
+
+static int team_notify_peers_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_notify_peers_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->notify_peers.interval;
+ return 0;
+}
+
+static int team_notify_peers_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->notify_peers.interval = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.count;
+ return 0;
+}
+
+static int team_mcast_rejoin_count_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.count = ctx->data.u32_val;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_get(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ ctx->data.u32_val = team->mcast_rejoin.interval;
+ return 0;
+}
+
+static int team_mcast_rejoin_interval_set(struct team *team,
+ struct team_gsetter_ctx *ctx)
+{
+ team->mcast_rejoin.interval = ctx->data.u32_val;
+ return 0;
+}
+
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
@@ -1317,6 +1448,30 @@ static const struct team_option team_options[] = {
.setter = team_mode_option_set,
},
{
+ .name = "notify_peers_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_count_get,
+ .setter = team_notify_peers_count_set,
+ },
+ {
+ .name = "notify_peers_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_notify_peers_interval_get,
+ .setter = team_notify_peers_interval_set,
+ },
+ {
+ .name = "mcast_rejoin_count",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_count_get,
+ .setter = team_mcast_rejoin_count_set,
+ },
+ {
+ .name = "mcast_rejoin_interval",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = team_mcast_rejoin_interval_get,
+ .setter = team_mcast_rejoin_interval_set,
+ },
+ {
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
.per_port = true,
@@ -1396,6 +1551,10 @@ static int team_init(struct net_device *dev)
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
+
+ team_notify_peers_init(team);
+ team_mcast_rejoin_init(team);
+
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
@@ -1406,6 +1565,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
@@ -1425,6 +1586,8 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ team_mcast_rejoin_fini(team);
+ team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
@@ -1811,7 +1974,7 @@ static void team_setup_by_port(struct net_device *dev,
dev->addr_len = port_dev->addr_len;
dev->mtu = port_dev->mtu;
memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
- memcpy(dev->dev_addr, port_dev->dev_addr, port_dev->addr_len);
+ eth_hw_addr_inherit(dev, port_dev);
}
static int team_dev_type_check_change(struct net_device *dev,
@@ -2698,6 +2861,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, port->team->dev);
+ break;
}
return NOTIFY_DONE;
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db690a37226..a639de8401f 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -60,6 +60,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
+#include <linux/if_vlan.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
@@ -137,7 +138,10 @@ struct tun_file {
struct fasync_struct *fasync;
/* only used for fasnyc */
unsigned int flags;
- u16 queue_index;
+ union {
+ u16 queue_index;
+ unsigned int ifindex;
+ };
struct list_head next;
struct tun_struct *detached;
};
@@ -405,6 +409,12 @@ static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
return tun;
}
+static void tun_queue_purge(struct tun_file *tfile)
+{
+ skb_queue_purge(&tfile->sk.sk_receive_queue);
+ skb_queue_purge(&tfile->sk.sk_error_queue);
+}
+
static void __tun_detach(struct tun_file *tfile, bool clean)
{
struct tun_file *ntfile;
@@ -431,7 +441,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
synchronize_net();
tun_flow_delete_by_queue(tun, tun->numqueues + 1);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
tun_set_real_num_queues(tun);
} else if (tfile->detached && clean) {
tun = tun_enable_queue(tfile);
@@ -483,12 +493,12 @@ static void tun_detach_all(struct net_device *dev)
for (i = 0; i < n; i++) {
tfile = rtnl_dereference(tun->tfiles[i]);
/* Drop read queue */
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
tun_enable_queue(tfile);
- skb_queue_purge(&tfile->sk.sk_receive_queue);
+ tun_queue_purge(tfile);
sock_put(&tfile->sk);
}
BUG_ON(tun->numdisabled != 0);
@@ -497,7 +507,7 @@ static void tun_detach_all(struct net_device *dev)
module_put(THIS_MODULE);
}
-static int tun_attach(struct tun_struct *tun, struct file *file)
+static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
{
struct tun_file *tfile = file->private_data;
int err;
@@ -522,7 +532,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
/* Re-attach the filter to presist device */
- if (tun->filter_attached == true) {
+ if (!skip_filter && (tun->filter_attached == true)) {
err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
if (!err)
goto out;
@@ -739,10 +749,17 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
>= dev->tx_queue_len / tun->numqueues)
goto drop;
- /* Orphan the skb - required as we might hang on to it
- * for indefinite time. */
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
goto drop;
+
+ if (skb->sk) {
+ sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
+ sw_tx_timestamp(skb);
+ }
+
+ /* Orphan the skb - required as we might hang on to it
+ * for indefinite time.
+ */
skb_orphan(skb);
nf_reset(skb);
@@ -943,7 +960,7 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- &err);
+ &err, 0);
if (!skb)
return ERR_PTR(err);
@@ -955,109 +972,6 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
-/* set skb frags from iovec, this can move to core network code for reuse */
-static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
- int offset, size_t count)
-{
- int len = iov_length(from, count) - offset;
- int copy = skb_headlen(skb);
- int size, offset1 = 0;
- int i = 0;
-
- /* Skip over from offset */
- while (count && (offset >= from->iov_len)) {
- offset -= from->iov_len;
- ++from;
- --count;
- }
-
- /* copy up to skb headlen */
- while (count && (copy > 0)) {
- size = min_t(unsigned int, copy, from->iov_len - offset);
- if (copy_from_user(skb->data + offset1, from->iov_base + offset,
- size))
- return -EFAULT;
- if (copy > size) {
- ++from;
- --count;
- offset = 0;
- } else
- offset += size;
- copy -= size;
- offset1 += size;
- }
-
- if (len == offset1)
- return 0;
-
- while (count--) {
- struct page *page[MAX_SKB_FRAGS];
- int num_pages;
- unsigned long base;
- unsigned long truesize;
-
- len = from->iov_len - offset;
- if (!len) {
- offset = 0;
- ++from;
- continue;
- }
- base = (unsigned long)from->iov_base + offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- if (i + size > MAX_SKB_FRAGS)
- return -EMSGSIZE;
- num_pages = get_user_pages_fast(base, size, 0, &page[i]);
- if (num_pages != size) {
- int j;
-
- for (j = 0; j < num_pages; j++)
- put_page(page[i + j]);
- return -EFAULT;
- }
- truesize = size * PAGE_SIZE;
- skb->data_len += len;
- skb->len += len;
- skb->truesize += truesize;
- atomic_add(truesize, &skb->sk->sk_wmem_alloc);
- while (len) {
- int off = base & ~PAGE_MASK;
- int size = min_t(int, len, PAGE_SIZE - off);
- __skb_fill_page_desc(skb, i, page[i], off, size);
- skb_shinfo(skb)->nr_frags++;
- /* increase sk_wmem_alloc */
- base += size;
- len -= size;
- i++;
- }
- offset = 0;
- ++from;
- }
- return 0;
-}
-
-static unsigned long iov_pages(const struct iovec *iv, int offset,
- unsigned long nr_segs)
-{
- unsigned long seg, base;
- int pages = 0, len, size;
-
- while (nr_segs && (offset >= iv->iov_len)) {
- offset -= iv->iov_len;
- ++iv;
- --nr_segs;
- }
-
- for (seg = 0; seg < nr_segs; seg++) {
- base = (unsigned long)iv[seg].iov_base + offset;
- len = iv[seg].iov_len - offset;
- size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
- pages += size;
- offset = 0;
- }
-
- return pages;
-}
-
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, const struct iovec *iv,
@@ -1074,8 +988,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
u32 rxhash;
if (!(tun->flags & TUN_NO_PI)) {
- if ((len -= sizeof(pi)) > total_len)
+ if (len < sizeof(pi))
return -EINVAL;
+ len -= sizeof(pi);
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
@@ -1083,8 +998,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
}
if (tun->flags & TUN_VNET_HDR) {
- if ((len -= tun->vnet_hdr_sz) > total_len)
+ if (len < tun->vnet_hdr_sz)
return -EINVAL;
+ len -= tun->vnet_hdr_sz;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
@@ -1260,6 +1176,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
+ int vlan_offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
@@ -1323,11 +1240,40 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += tun->vnet_hdr_sz;
}
- len = min_t(int, skb->len, len);
+ if (!vlan_tx_tag_present(skb)) {
+ len = min_t(int, skb->len, len);
+ } else {
+ int copy, ret;
+ struct {
+ __be16 h_vlan_proto;
+ __be16 h_vlan_TCI;
+ } veth;
+
+ veth.h_vlan_proto = skb->vlan_proto;
+ veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
+
+ vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ len = min_t(int, skb->len + VLAN_HLEN, len);
+
+ copy = min_t(int, vlan_offset, len);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+
+ copy = min_t(int, sizeof(veth), len);
+ ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+ len -= copy;
+ total += copy;
+ if (ret || !len)
+ goto done;
+ }
- skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += skb->len;
+ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
+ total += len;
+done:
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -1476,7 +1422,6 @@ static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
return ret;
}
-
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
@@ -1488,10 +1433,15 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!tun)
return -EBADFD;
- if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) {
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
ret = -EINVAL;
goto out;
}
+ if (flags & MSG_ERRQUEUE) {
+ ret = sock_recv_errqueue(sock->sk, m, total_len,
+ SOL_PACKET, TUN_TX_TIMESTAMP);
+ goto out;
+ }
ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
@@ -1615,7 +1565,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (err < 0)
return err;
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
if (err < 0)
return err;
@@ -1662,6 +1612,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
+ dev->ifindex = tfile->ifindex;
tun = netdev_priv(dev);
tun->dev = dev;
@@ -1682,12 +1633,13 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun_flow_init(tun);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
- TUN_USER_FEATURES;
+ TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
dev->vlan_features = dev->features;
INIT_LIST_HEAD(&tun->disabled);
- err = tun_attach(tun, file);
+ err = tun_attach(tun, file, false);
if (err < 0)
goto err_free_dev;
@@ -1851,7 +1803,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
ret = security_tun_dev_attach_queue(tun->security);
if (ret < 0)
goto unlock;
- ret = tun_attach(tun, file);
+ ret = tun_attach(tun, file, false);
} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
tun = rtnl_dereference(tfile->tun);
if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
@@ -1877,6 +1829,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
kgid_t group;
int sndbuf;
int vnet_hdr_sz;
+ unsigned int ifindex;
int ret;
if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
@@ -1911,6 +1864,19 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ret = -EFAULT;
goto unlock;
}
+ if (cmd == TUNSETIFINDEX) {
+ ret = -EPERM;
+ if (tun)
+ goto unlock;
+
+ ret = -EFAULT;
+ if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
+ goto unlock;
+
+ ret = 0;
+ tfile->ifindex = ifindex;
+ goto unlock;
+ }
ret = -EBADFD;
if (!tun)
@@ -1923,6 +1889,11 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
case TUNGETIFF:
tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
+ if (tfile->detached)
+ ifr.ifr_flags |= IFF_DETACH_QUEUE;
+ if (!tfile->socket.sk->sk_filter)
+ ifr.ifr_flags |= IFF_NOFILTER;
+
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
@@ -2079,6 +2050,16 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun_detach_filter(tun, tun->numqueues);
break;
+ case TUNGETFILTER:
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = -EFAULT;
+ if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
+ break;
+ ret = 0;
+ break;
+
default:
ret = -EINVAL;
break;
@@ -2159,6 +2140,7 @@ static int tun_chr_open(struct inode *inode, struct file * file)
rcu_assign_pointer(tfile->tun, NULL);
tfile->net = get_net(current->nsproxy->net_ns);
tfile->flags = 0;
+ tfile->ifindex = 0;
rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
init_waitqueue_head(&tfile->wq.wait);
@@ -2274,6 +2256,7 @@ static const struct ethtool_ops tun_ethtool_ops = {
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index d84bfd4109a..40db3123331 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -268,6 +268,14 @@ config USB_NET_DM9601
This option adds support for Davicom DM9601 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9700
+ tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ help
+ This option adds support for CoreChip-sz SR9700 based USB 1.1
+ 10/100 Ethernet adapters.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index e8171784529..8b342cf992f 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
+obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix.h b/drivers/net/usb/asix.h
index 346c032aa79..bdaa12d07a1 100644
--- a/drivers/net/usb/asix.h
+++ b/drivers/net/usb/asix.h
@@ -178,6 +178,8 @@ struct asix_common_private {
struct asix_rx_fixup_info rx_fixup_info;
};
+extern const struct driver_info ax88172a_info;
+
/* ASIX specific flags */
#define FLAG_EEPROM_MAC (1UL << 0) /* init device MAC from eeprom */
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index ad5d1e4384d..386a3df5367 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -778,6 +778,9 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
dev->hard_mtu = net->mtu + net->hard_header_len;
ax88178_set_mfb(dev);
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
@@ -943,8 +946,6 @@ static const struct driver_info hg20f9_info = {
.data = FLAG_EEPROM_MAC,
};
-extern const struct driver_info ax88172a_info;
-
static const struct usb_device_id products [] = {
{
// Linksys USB200M
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d012203b0f2..723b3879ecc 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -161,7 +161,8 @@ static const struct net_device_ops ax88172a_netdev_ops = {
.ndo_set_rx_mode = asix_set_multicast,
};
-int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_get_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
{
if (!net->phydev)
return -ENODEV;
@@ -169,7 +170,8 @@ int ax88172a_get_settings(struct net_device *net, struct ethtool_cmd *cmd)
return phy_ethtool_gset(net->phydev, cmd);
}
-int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
+static int ax88172a_set_settings(struct net_device *net,
+ struct ethtool_cmd *cmd)
{
if (!net->phydev)
return -ENODEV;
@@ -177,7 +179,7 @@ int ax88172a_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
return phy_ethtool_sset(net->phydev, cmd);
}
-int ax88172a_nway_reset(struct net_device *net)
+static int ax88172a_nway_reset(struct net_device *net)
{
if (!net->phydev)
return -ENODEV;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index 1e3c302d94f..3569293df87 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -688,6 +688,9 @@ static int ax88179_change_mtu(struct net_device *net, int new_mtu)
2, 2, &tmp16);
}
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
@@ -1028,11 +1031,19 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
+ if (usb_device_no_sg_constraint(dev->udev))
+ dev->can_dma_sg = 1;
+
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
+
+ if (dev->can_dma_sg) {
+ dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
+ dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
+ }
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
@@ -1166,32 +1177,18 @@ ax88179_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
int frame_size = dev->maxpacket;
int mss = skb_shinfo(skb)->gso_size;
int headroom;
- int tailroom;
tx_hdr1 = skb->len;
tx_hdr2 = mss;
if (((skb->len + 8) % frame_size) == 0)
tx_hdr2 |= 0x80008000; /* Enable padding */
- skb_linearize(skb);
- headroom = skb_headroom(skb);
- tailroom = skb_tailroom(skb);
-
- if (!skb_header_cloned(skb) &&
- !skb_cloned(skb) &&
- (headroom + tailroom) >= 8) {
- if (headroom < 8) {
- skb->data = memmove(skb->head + 8, skb->data, skb->len);
- skb_set_tail_pointer(skb, skb->len);
- }
- } else {
- struct sk_buff *skb2;
+ headroom = skb_headroom(skb) - 8;
- skb2 = skb_copy_expand(skb, 8, 0, flags);
+ if ((skb_header_cloned(skb) || headroom < 0) &&
+ pskb_expand_head(skb, headroom < 0 ? 8 : 0, 0, GFP_ATOMIC)) {
dev_kfree_skb_any(skb);
- skb = skb2;
- if (!skb)
- return NULL;
+ return NULL;
}
skb_push(skb, 4);
@@ -1317,10 +1314,10 @@ static int ax88179_reset(struct usbnet *dev)
1, 1, tmp);
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO;
+ NETIF_F_RXCSUM;
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index 872819851ae..25ba7eca9a1 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -400,6 +400,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68a2, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
},
+ /* HP hs2434 Mobile Broadband Module needs ZLPs */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x3f0, 0x4b1d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_zlp,
+ },
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index cba1d46e672..86292e6aaf4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2816,13 +2816,16 @@ exit:
static int hso_get_config_data(struct usb_interface *interface)
{
struct usb_device *usbdev = interface_to_usbdev(interface);
- u8 config_data[17];
+ u8 *config_data = kmalloc(17, GFP_KERNEL);
u32 if_num = interface->altsetting->desc.bInterfaceNumber;
s32 result;
+ if (!config_data)
+ return -ENOMEM;
if (usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
0x86, 0xC0, 0, 0, config_data, 17,
USB_CTRL_SET_TIMEOUT) != 0x11) {
+ kfree(config_data);
return -EIO;
}
@@ -2873,6 +2876,7 @@ static int hso_get_config_data(struct usb_interface *interface)
if (config_data[16] & 0x1)
result |= HSO_INFO_CRC_BUG;
+ kfree(config_data);
return result;
}
@@ -2886,6 +2890,11 @@ static int hso_probe(struct usb_interface *interface,
struct hso_shared_int *shared_int;
struct hso_device *tmp_dev = NULL;
+ if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
+ dev_err(&interface->dev, "Not our interface\n");
+ return -ENODEV;
+ }
+
if_num = interface->altsetting->desc.bInterfaceNumber;
/* Get the interface/port specification from either driver_info or from
@@ -2895,10 +2904,6 @@ static int hso_probe(struct usb_interface *interface,
else
port_spec = hso_get_config_data(interface);
- if (interface->cur_altsetting->desc.bInterfaceClass != 0xFF) {
- dev_err(&interface->dev, "Not our interface\n");
- return -ENODEV;
- }
/* Check if we need to switch to alt interfaces prior to port
* configuration */
if (interface->num_altsetting > 1)
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 606eba2872b..3a8131582e7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -323,7 +323,7 @@ next_desc:
/* Never use the same address on both ends of the link, even
* if the buggy firmware told us to.
*/
- if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index ee13f9eb740..f3fce412c0c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -19,9 +19,12 @@
#include <linux/crc32.h>
#include <linux/if_vlan.h>
#include <linux/uaccess.h>
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.0.0 (2013/05/03)"
+#define DRIVER_VERSION "v1.01.0 (2013/08/12)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152 Based USB 2.0 Ethernet Adapters"
#define MODULENAME "r8152"
@@ -267,6 +270,12 @@ enum rtl_register_content {
FULL_DUP = 0x01,
};
+#define RTL8152_MAX_TX 10
+#define RTL8152_MAX_RX 10
+#define INTBUFSIZE 2
+
+#define INTR_LINK 0x0004
+
#define RTL8152_REQT_READ 0xc0
#define RTL8152_REQT_WRITE 0x40
#define RTL8152_REQ_GET_REGS 0x05
@@ -285,9 +294,9 @@ enum rtl_register_content {
/* rtl8152 flags */
enum rtl8152_flags {
RTL8152_UNPLUG = 0,
- RX_URB_FAIL,
RTL8152_SET_RX_MODE,
- WORK_ENABLE
+ WORK_ENABLE,
+ RTL8152_LINK_CHG,
};
/* Define these values to match your device */
@@ -311,21 +320,53 @@ struct tx_desc {
u32 opts1;
#define TX_FS (1 << 31) /* First segment of a packet */
#define TX_LS (1 << 30) /* Final segment of a packet */
-#define TX_LEN_MASK 0xffff
+#define TX_LEN_MASK 0x3ffff
+
u32 opts2;
+#define UDP_CS (1 << 31) /* Calculate UDP/IP checksum */
+#define TCP_CS (1 << 30) /* Calculate TCP/IP checksum */
+#define IPV4_CS (1 << 29) /* Calculate IPv4 checksum */
+#define IPV6_CS (1 << 28) /* Calculate IPv6 checksum */
+};
+
+struct r8152;
+
+struct rx_agg {
+ struct list_head list;
+ struct urb *urb;
+ struct r8152 *context;
+ void *buffer;
+ void *head;
+};
+
+struct tx_agg {
+ struct list_head list;
+ struct urb *urb;
+ struct r8152 *context;
+ void *buffer;
+ void *head;
+ u32 skb_num;
+ u32 skb_len;
};
struct r8152 {
unsigned long flags;
struct usb_device *udev;
struct tasklet_struct tl;
+ struct usb_interface *intf;
struct net_device *netdev;
- struct urb *rx_urb, *tx_urb;
- struct sk_buff *tx_skb, *rx_skb;
+ struct urb *intr_urb;
+ struct tx_agg tx_info[RTL8152_MAX_TX];
+ struct rx_agg rx_info[RTL8152_MAX_RX];
+ struct list_head rx_done, tx_free;
+ struct sk_buff_head tx_queue;
+ spinlock_t rx_lock, tx_lock;
struct delayed_work schedule;
struct mii_if_info mii;
+ int intr_interval;
u32 msg_enable;
u16 ocp_base;
+ u8 *intr_buff;
u8 version;
u8 speed;
};
@@ -340,21 +381,46 @@ enum rtl_version {
* The RTL chips use a 64 element hash table based on the Ethernet CRC.
*/
static const int multicast_filter_limit = 32;
+static unsigned int rx_buf_sz = 16384;
static
int get_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
- return usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
+ int ret;
+ void *tmp;
+
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ ret = usb_control_msg(tp->udev, usb_rcvctrlpipe(tp->udev, 0),
RTL8152_REQ_GET_REGS, RTL8152_REQT_READ,
- value, index, data, size, 500);
+ value, index, tmp, size, 500);
+
+ memcpy(data, tmp, size);
+ kfree(tmp);
+
+ return ret;
}
static
int set_registers(struct r8152 *tp, u16 value, u16 index, u16 size, void *data)
{
- return usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
+ int ret;
+ void *tmp;
+
+ tmp = kmalloc(size, GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
+ memcpy(tmp, data, size);
+
+ ret = usb_control_msg(tp->udev, usb_sndctrlpipe(tp->udev, 0),
RTL8152_REQ_SET_REGS, RTL8152_REQT_WRITE,
- value, index, data, size, 500);
+ value, index, tmp, size, 500);
+
+ kfree(tmp);
+ return ret;
}
static int generic_ocp_read(struct r8152 *tp, u16 index, u16 size,
@@ -490,37 +556,31 @@ int usb_ocp_write(struct r8152 *tp, u16 index, u16 byteen, u16 size, void *data)
static u32 ocp_read_dword(struct r8152 *tp, u16 type, u16 index)
{
- u32 data;
+ __le32 data;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(data), &data, type);
return __le32_to_cpu(data);
}
static void ocp_write_dword(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
- else
- usb_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(data), &data);
+ __le32 tmp = __cpu_to_le32(data);
+
+ generic_ocp_write(tp, index, BYTE_EN_DWORD, sizeof(tmp), &tmp, type);
}
static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
+ __le32 tmp;
u8 shift = index & 2;
index &= ~3;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- data = __le32_to_cpu(data);
+ data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xffff;
@@ -529,7 +589,8 @@ static u16 ocp_read_word(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- u32 tmp, mask = 0xffff;
+ u32 mask = 0xffff;
+ __le32 tmp;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
@@ -542,34 +603,25 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(tmp), &tmp);
- else
- usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- tmp = __le32_to_cpu(tmp) & ~mask;
- tmp |= data;
- tmp = __cpu_to_le32(tmp);
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
- else
- usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
{
u32 data;
+ __le32 tmp;
u8 shift = index & 3;
index &= ~3;
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(data), &data);
- else
- usb_ocp_read(tp, index, sizeof(data), &data);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- data = __le32_to_cpu(data);
+ data = __le32_to_cpu(tmp);
data >>= (shift * 8);
data &= 0xff;
@@ -578,7 +630,8 @@ static u8 ocp_read_byte(struct r8152 *tp, u16 type, u16 index)
static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
{
- u32 tmp, mask = 0xff;
+ u32 mask = 0xff;
+ __le32 tmp;
u16 byen = BYTE_EN_BYTE;
u8 shift = index & 3;
@@ -591,19 +644,12 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- if (type == MCU_TYPE_PLA)
- pla_ocp_read(tp, index, sizeof(tmp), &tmp);
- else
- usb_ocp_read(tp, index, sizeof(tmp), &tmp);
+ generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
- tmp = __le32_to_cpu(tmp) & ~mask;
- tmp |= data;
- tmp = __cpu_to_le32(tmp);
+ data |= __le32_to_cpu(tmp) & ~mask;
+ tmp = __cpu_to_le32(data);
- if (type == MCU_TYPE_PLA)
- pla_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
- else
- usb_ocp_write(tp, index, byen, sizeof(tmp), &tmp);
+ generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
}
static void r8152_mdio_write(struct r8152 *tp, u32 reg_addr, u32 value)
@@ -682,24 +728,20 @@ static void ocp_reg_write(struct r8152 *tp, u16 addr, u16 data)
ocp_write_word(tp, MCU_TYPE_PLA, ocp_index, data);
}
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
+
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
- u8 *node_id;
-
- node_id = kmalloc(sizeof(u8) * 8, GFP_KERNEL);
- if (!node_id) {
- netif_err(tp, probe, dev, "out of memory");
- return;
- }
+ u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(u8) * 8, node_id) < 0)
+ if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
netif_notice(tp, probe, dev, "inet addr fail\n");
else {
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
- kfree(node_id);
}
static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
@@ -719,26 +761,6 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p)
return 0;
}
-static int alloc_all_urbs(struct r8152 *tp)
-{
- tp->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->rx_urb)
- return 0;
- tp->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tp->tx_urb) {
- usb_free_urb(tp->rx_urb);
- return 0;
- }
-
- return 1;
-}
-
-static void free_all_urbs(struct r8152 *tp)
-{
- usb_free_urb(tp->rx_urb);
- usb_free_urb(tp->tx_urb);
-}
-
static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
{
return &dev->stats;
@@ -746,151 +768,583 @@ static struct net_device_stats *rtl8152_get_stats(struct net_device *dev)
static void read_bulk_callback(struct urb *urb)
{
- struct r8152 *tp;
- unsigned pkt_len;
- struct sk_buff *skb;
struct net_device *netdev;
- struct net_device_stats *stats;
+ unsigned long flags;
int status = urb->status;
+ struct rx_agg *agg;
+ struct r8152 *tp;
int result;
- struct rx_desc *rx_desc;
- tp = urb->context;
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
if (!tp)
return;
+
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
netdev = tp->netdev;
- if (!netif_device_present(netdev))
+
+ /* When link down, the driver would cancel all bulks. */
+ /* This avoid the re-submitting bulk */
+ if (!netif_carrier_ok(netdev))
return;
- stats = rtl8152_get_stats(netdev);
switch (status) {
case 0:
- break;
+ if (urb->actual_length < ETH_ZLEN)
+ break;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ return;
case -ESHUTDOWN:
set_bit(RTL8152_UNPLUG, &tp->flags);
netif_device_detach(tp->netdev);
+ return;
case -ENOENT:
return; /* the urb is in unlink state */
case -ETIME:
pr_warn_ratelimited("may be reset is needed?..\n");
- goto goon;
+ break;
default:
pr_warn_ratelimited("Rx status %d\n", status);
- goto goon;
+ break;
}
- /* protect against short packets (tell me why we got some?!?) */
- if (urb->actual_length < sizeof(*rx_desc))
- goto goon;
-
-
- rx_desc = (struct rx_desc *)urb->transfer_buffer;
- pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
- if (urb->actual_length < sizeof(struct rx_desc) + pkt_len)
- goto goon;
-
- skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
- if (!skb)
- goto goon;
-
- memcpy(skb->data, tp->rx_skb->data + sizeof(struct rx_desc), pkt_len);
- skb_put(skb, pkt_len);
- skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
- stats->rx_packets++;
- stats->rx_bytes += pkt_len;
-goon:
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
- result = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
+ result = r8152_submit_rx(tp, agg, GFP_ATOMIC);
if (result == -ENODEV) {
netif_device_detach(tp->netdev);
} else if (result) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto resched;
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+ tasklet_schedule(&tp->tl);
+ }
+}
+
+static void write_bulk_callback(struct urb *urb)
+{
+ struct net_device_stats *stats;
+ unsigned long flags;
+ struct tx_agg *agg;
+ struct r8152 *tp;
+ int status = urb->status;
+
+ agg = urb->context;
+ if (!agg)
+ return;
+
+ tp = agg->context;
+ if (!tp)
+ return;
+
+ stats = rtl8152_get_stats(tp->netdev);
+ if (status) {
+ pr_warn_ratelimited("Tx status %d\n", status);
+ stats->tx_errors += agg->skb_num;
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ stats->tx_packets += agg->skb_num;
+ stats->tx_bytes += agg->skb_len;
}
- return;
-resched:
- tasklet_schedule(&tp->tl);
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+ if (!netif_carrier_ok(tp->netdev))
+ return;
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ if (!skb_queue_empty(&tp->tx_queue))
+ tasklet_schedule(&tp->tl);
}
-static void rx_fixup(unsigned long data)
+static void intr_callback(struct urb *urb)
{
struct r8152 *tp;
- int status;
+ __u16 *d;
+ int status = urb->status;
+ int res;
+
+ tp = urb->context;
+ if (!tp)
+ return;
- tp = (struct r8152 *)data;
if (!test_bit(WORK_ENABLE, &tp->flags))
return;
- status = usb_submit_urb(tp->rx_urb, GFP_ATOMIC);
- if (status == -ENODEV) {
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
+ return;
+
+ switch (status) {
+ case 0: /* success */
+ break;
+ case -ECONNRESET: /* unlink */
+ case -ESHUTDOWN:
netif_device_detach(tp->netdev);
- } else if (status) {
- set_bit(RX_URB_FAIL, &tp->flags);
- goto tlsched;
+ case -ENOENT:
+ return;
+ case -EOVERFLOW:
+ netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
+ goto resubmit;
+ /* -EPIPE: should clear the halt */
+ default:
+ netif_info(tp, intr, tp->netdev, "intr status %d\n", status);
+ goto resubmit;
+ }
+
+ d = urb->transfer_buffer;
+ if (INTR_LINK & __le16_to_cpu(d[0])) {
+ if (!(tp->speed & LINK_STATUS)) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
} else {
- clear_bit(RX_URB_FAIL, &tp->flags);
+ if (tp->speed & LINK_STATUS) {
+ set_bit(RTL8152_LINK_CHG, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
- return;
-tlsched:
- tasklet_schedule(&tp->tl);
+resubmit:
+ res = usb_submit_urb(urb, GFP_ATOMIC);
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ else if (res)
+ netif_err(tp, intr, tp->netdev,
+ "can't resubmit intr, status %d\n", res);
}
-static void write_bulk_callback(struct urb *urb)
+static inline void *rx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 8);
+}
+
+static inline void *tx_agg_align(void *data)
+{
+ return (void *)ALIGN((uintptr_t)data, 4);
+}
+
+static void free_all_mem(struct r8152 *tp)
+{
+ int i;
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ if (tp->rx_info[i].urb) {
+ usb_free_urb(tp->rx_info[i].urb);
+ tp->rx_info[i].urb = NULL;
+ }
+
+ if (tp->rx_info[i].buffer) {
+ kfree(tp->rx_info[i].buffer);
+ tp->rx_info[i].buffer = NULL;
+ tp->rx_info[i].head = NULL;
+ }
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ if (tp->tx_info[i].urb) {
+ usb_free_urb(tp->tx_info[i].urb);
+ tp->tx_info[i].urb = NULL;
+ }
+
+ if (tp->tx_info[i].buffer) {
+ kfree(tp->tx_info[i].buffer);
+ tp->tx_info[i].buffer = NULL;
+ tp->tx_info[i].head = NULL;
+ }
+ }
+
+ if (tp->intr_urb) {
+ usb_free_urb(tp->intr_urb);
+ tp->intr_urb = NULL;
+ }
+
+ if (tp->intr_buff) {
+ kfree(tp->intr_buff);
+ tp->intr_buff = NULL;
+ }
+}
+
+static int alloc_all_mem(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ struct usb_interface *intf = tp->intf;
+ struct usb_host_interface *alt = intf->cur_altsetting;
+ struct usb_host_endpoint *ep_intr = alt->endpoint + 2;
+ struct urb *urb;
+ int node, i;
+ u8 *buf;
+
+ node = netdev->dev.parent ? dev_to_node(netdev->dev.parent) : -1;
+
+ spin_lock_init(&tp->rx_lock);
+ spin_lock_init(&tp->tx_lock);
+ INIT_LIST_HEAD(&tp->rx_done);
+ INIT_LIST_HEAD(&tp->tx_free);
+ skb_queue_head_init(&tp->tx_queue);
+
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != rx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 8, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ tp->rx_info[i].context = tp;
+ tp->rx_info[i].urb = urb;
+ tp->rx_info[i].buffer = buf;
+ tp->rx_info[i].head = rx_agg_align(buf);
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++) {
+ buf = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+
+ if (buf != tx_agg_align(buf)) {
+ kfree(buf);
+ buf = kmalloc_node(rx_buf_sz + 4, GFP_KERNEL, node);
+ if (!buf)
+ goto err1;
+ }
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ kfree(buf);
+ goto err1;
+ }
+
+ INIT_LIST_HEAD(&tp->tx_info[i].list);
+ tp->tx_info[i].context = tp;
+ tp->tx_info[i].urb = urb;
+ tp->tx_info[i].buffer = buf;
+ tp->tx_info[i].head = tx_agg_align(buf);
+
+ list_add_tail(&tp->tx_info[i].list, &tp->tx_free);
+ }
+
+ tp->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!tp->intr_urb)
+ goto err1;
+
+ tp->intr_buff = kmalloc(INTBUFSIZE, GFP_KERNEL);
+ if (!tp->intr_buff)
+ goto err1;
+
+ tp->intr_interval = (int)ep_intr->desc.bInterval;
+ usb_fill_int_urb(tp->intr_urb, tp->udev, usb_rcvintpipe(tp->udev, 3),
+ tp->intr_buff, INTBUFSIZE, intr_callback,
+ tp, tp->intr_interval);
+
+ return 0;
+
+err1:
+ free_all_mem(tp);
+ return -ENOMEM;
+}
+
+static struct tx_agg *r8152_get_tx_agg(struct r8152 *tp)
+{
+ struct tx_agg *agg = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ if (!list_empty(&tp->tx_free)) {
+ struct list_head *cursor;
+
+ cursor = tp->tx_free.next;
+ list_del_init(cursor);
+ agg = list_entry(cursor, struct tx_agg, list);
+ }
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+
+ return agg;
+}
+
+static void
+r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
+{
+ memset(desc, 0, sizeof(*desc));
+
+ desc->opts1 = cpu_to_le32((skb->len & TX_LEN_MASK) | TX_FS | TX_LS);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ __be16 protocol;
+ u8 ip_protocol;
+ u32 opts2 = 0;
+
+ if (skb->protocol == htons(ETH_P_8021Q))
+ protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+ else
+ protocol = skb->protocol;
+
+ switch (protocol) {
+ case htons(ETH_P_IP):
+ opts2 |= IPV4_CS;
+ ip_protocol = ip_hdr(skb)->protocol;
+ break;
+
+ case htons(ETH_P_IPV6):
+ opts2 |= IPV6_CS;
+ ip_protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+
+ default:
+ ip_protocol = IPPROTO_RAW;
+ break;
+ }
+
+ if (ip_protocol == IPPROTO_TCP) {
+ opts2 |= TCP_CS;
+ opts2 |= (skb_transport_offset(skb) & 0x7fff) << 17;
+ } else if (ip_protocol == IPPROTO_UDP) {
+ opts2 |= UDP_CS;
+ } else {
+ WARN_ON_ONCE(1);
+ }
+
+ desc->opts2 = cpu_to_le32(opts2);
+ }
+}
+
+static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
+{
+ u32 remain;
+ u8 *tx_data;
+
+ tx_data = agg->head;
+ agg->skb_num = agg->skb_len = 0;
+ remain = rx_buf_sz - sizeof(struct tx_desc);
+
+ while (remain >= ETH_ZLEN) {
+ struct tx_desc *tx_desc;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ skb = skb_dequeue(&tp->tx_queue);
+ if (!skb)
+ break;
+
+ len = skb->len;
+ if (remain < len) {
+ skb_queue_head(&tp->tx_queue, skb);
+ break;
+ }
+
+ tx_desc = (struct tx_desc *)tx_data;
+ tx_data += sizeof(*tx_desc);
+
+ r8152_tx_csum(tp, tx_desc, skb);
+ memcpy(tx_data, skb->data, len);
+ agg->skb_num++;
+ agg->skb_len += len;
+ dev_kfree_skb_any(skb);
+
+ tx_data = tx_agg_align(tx_data + len);
+ remain = rx_buf_sz - sizeof(*tx_desc) -
+ (u32)((void *)tx_data - agg->head);
+ }
+
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, (int)(tx_data - (u8 *)agg->head),
+ (usb_complete_t)write_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, GFP_ATOMIC);
+}
+
+static void rx_bottom(struct r8152 *tp)
+{
+ unsigned long flags;
+ struct list_head *cursor, *next;
+
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_for_each_safe(cursor, next, &tp->rx_done) {
+ struct rx_desc *rx_desc;
+ struct rx_agg *agg;
+ unsigned pkt_len;
+ int len_used = 0;
+ struct urb *urb;
+ u8 *rx_data;
+ int ret;
+
+ list_del_init(cursor);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ agg = list_entry(cursor, struct rx_agg, list);
+ urb = agg->urb;
+ if (urb->actual_length < ETH_ZLEN)
+ goto submit;
+
+ rx_desc = agg->head;
+ rx_data = agg->head;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used += sizeof(struct rx_desc) + pkt_len;
+
+ while (urb->actual_length >= len_used) {
+ struct net_device *netdev = tp->netdev;
+ struct net_device_stats *stats;
+ struct sk_buff *skb;
+
+ if (pkt_len < ETH_ZLEN)
+ break;
+
+ stats = rtl8152_get_stats(netdev);
+
+ pkt_len -= 4; /* CRC */
+ rx_data += sizeof(struct rx_desc);
+
+ skb = netdev_alloc_skb_ip_align(netdev, pkt_len);
+ if (!skb) {
+ stats->rx_dropped++;
+ break;
+ }
+ memcpy(skb->data, rx_data, pkt_len);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, netdev);
+ netif_rx(skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+
+ rx_data = rx_agg_align(rx_data + pkt_len + 4);
+ rx_desc = (struct rx_desc *)rx_data;
+ pkt_len = le32_to_cpu(rx_desc->opts1) & RX_LEN_MASK;
+ len_used = (int)(rx_data - (u8 *)agg->head);
+ len_used += sizeof(struct rx_desc) + pkt_len;
+ }
+
+submit:
+ ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ if (ret && ret != -ENODEV) {
+ list_add_tail(&agg->list, next);
+ tasklet_schedule(&tp->tl);
+ }
+ }
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+}
+
+static void tx_bottom(struct r8152 *tp)
+{
+ int res;
+
+ do {
+ struct tx_agg *agg;
+
+ if (skb_queue_empty(&tp->tx_queue))
+ break;
+
+ agg = r8152_get_tx_agg(tp);
+ if (!agg)
+ break;
+
+ res = r8152_tx_agg_fill(tp, agg);
+ if (res) {
+ struct net_device_stats *stats;
+ struct net_device *netdev;
+ unsigned long flags;
+
+ netdev = tp->netdev;
+ stats = rtl8152_get_stats(netdev);
+
+ if (res == -ENODEV) {
+ netif_device_detach(netdev);
+ } else {
+ netif_warn(tp, tx_err, netdev,
+ "failed tx_urb %d\n", res);
+ stats->tx_dropped += agg->skb_num;
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
+ }
+ }
+ } while (res == 0);
+}
+
+static void bottom_half(unsigned long data)
{
struct r8152 *tp;
- int status = urb->status;
- tp = urb->context;
- if (!tp)
+ tp = (struct r8152 *)data;
+
+ if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
- dev_kfree_skb_irq(tp->tx_skb);
- if (!netif_device_present(tp->netdev))
+
+ if (!test_bit(WORK_ENABLE, &tp->flags))
+ return;
+
+ /* When link down, the driver would cancel all bulks. */
+ /* This avoid the re-submitting bulk */
+ if (!netif_carrier_ok(tp->netdev))
return;
- if (status)
- dev_info(&urb->dev->dev, "%s: Tx status %d\n",
- tp->netdev->name, status);
- tp->netdev->trans_start = jiffies;
- netif_wake_queue(tp->netdev);
+
+ rx_bottom(tp);
+ tx_bottom(tp);
+}
+
+static
+int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
+{
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
+ agg->head, rx_buf_sz,
+ (usb_complete_t)read_bulk_callback, agg);
+
+ return usb_submit_urb(agg->urb, mem_flags);
}
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ int i;
+
netif_warn(tp, tx_err, netdev, "Tx timeout.\n");
- usb_unlink_urb(tp->tx_urb);
- stats->tx_errors++;
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_unlink_urb(tp->tx_info[i].urb);
}
static void rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- if (tp->speed & LINK_STATUS)
+ if (tp->speed & LINK_STATUS) {
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ schedule_delayed_work(&tp->schedule, 0);
+ }
}
static void _rtl8152_set_rx_mode(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
- u32 tmp, *mc_filter; /* Multicast hash filter */
+ u32 mc_filter[2]; /* Multicast hash filter */
+ __le32 tmp[2];
u32 ocp_data;
- mc_filter = kmalloc(sizeof(u32) * 2, GFP_KERNEL);
- if (!mc_filter) {
- netif_err(tp, link, netdev, "out of memory");
- return;
- }
-
clear_bit(RTL8152_SET_RX_MODE, &tp->flags);
netif_stop_queue(netdev);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -918,14 +1372,12 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
}
}
- tmp = mc_filter[0];
- mc_filter[0] = __cpu_to_le32(swab32(mc_filter[1]));
- mc_filter[1] = __cpu_to_le32(swab32(tmp));
+ tmp[0] = __cpu_to_le32(swab32(mc_filter[1]));
+ tmp[1] = __cpu_to_le32(swab32(mc_filter[0]));
- pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(u32) * 2, mc_filter);
+ pla_ocp_write(tp, PLA_MAR, BYTE_EN_DWORD, sizeof(tmp), tmp);
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
netif_wake_queue(netdev);
- kfree(mc_filter);
}
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
@@ -933,33 +1385,39 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
{
struct r8152 *tp = netdev_priv(netdev);
struct net_device_stats *stats = rtl8152_get_stats(netdev);
+ unsigned long flags;
+ struct tx_agg *agg = NULL;
struct tx_desc *tx_desc;
unsigned int len;
+ u8 *tx_data;
int res;
- netif_stop_queue(netdev);
- len = skb->len;
- if (skb_header_cloned(skb) || skb_headroom(skb) < sizeof(*tx_desc)) {
- struct sk_buff *tx_skb;
+ skb_tx_timestamp(skb);
- tx_skb = skb_copy_expand(skb, sizeof(*tx_desc), 0, GFP_ATOMIC);
- dev_kfree_skb_any(skb);
- if (!tx_skb) {
- stats->tx_dropped++;
- netif_wake_queue(netdev);
- return NETDEV_TX_OK;
- }
- skb = tx_skb;
+ /* If tx_queue is not empty, it means at least one previous packt */
+ /* is waiting for sending. Don't send current one before it. */
+ if (skb_queue_empty(&tp->tx_queue))
+ agg = r8152_get_tx_agg(tp);
+
+ if (!agg) {
+ skb_queue_tail(&tp->tx_queue, skb);
+ return NETDEV_TX_OK;
}
- tx_desc = (struct tx_desc *)skb_push(skb, sizeof(*tx_desc));
- memset(tx_desc, 0, sizeof(*tx_desc));
- tx_desc->opts1 = cpu_to_le32((len & TX_LEN_MASK) | TX_FS | TX_LS);
- tp->tx_skb = skb;
- skb_tx_timestamp(skb);
- usb_fill_bulk_urb(tp->tx_urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
- skb->data, skb->len,
- (usb_complete_t)write_bulk_callback, tp);
- res = usb_submit_urb(tp->tx_urb, GFP_ATOMIC);
+
+ tx_desc = (struct tx_desc *)agg->head;
+ tx_data = agg->head + sizeof(*tx_desc);
+ agg->skb_num = agg->skb_len = 0;
+
+ len = skb->len;
+ r8152_tx_csum(tp, tx_desc, skb);
+ memcpy(tx_data, skb->data, len);
+ dev_kfree_skb_any(skb);
+ agg->skb_num++;
+ agg->skb_len += len;
+ usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
+ agg->head, len + sizeof(*tx_desc),
+ (usb_complete_t)write_bulk_callback, agg);
+ res = usb_submit_urb(agg->urb, GFP_ATOMIC);
if (res) {
/* Can we get/handle EPIPE here? */
if (res == -ENODEV) {
@@ -967,12 +1425,11 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
} else {
netif_warn(tp, tx_err, netdev,
"failed tx_urb %d\n", res);
- stats->tx_errors++;
- netif_start_queue(netdev);
+ stats->tx_dropped++;
+ spin_lock_irqsave(&tp->tx_lock, flags);
+ list_add_tail(&agg->list, &tp->tx_free);
+ spin_unlock_irqrestore(&tp->tx_lock, flags);
}
- } else {
- stats->tx_packets++;
- stats->tx_bytes += skb->len;
}
return NETDEV_TX_OK;
@@ -1009,17 +1466,18 @@ static inline u8 rtl8152_get_speed(struct r8152 *tp)
static int rtl8152_enable(struct r8152 *tp)
{
- u32 ocp_data;
+ u32 ocp_data;
+ int i, ret;
u8 speed;
speed = rtl8152_get_speed(tp);
- if (speed & _100bps) {
+ if (speed & _10bps) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data &= ~EEEP_CR_EEEP_TX;
+ ocp_data |= EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
} else {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR);
- ocp_data |= EEEP_CR_EEEP_TX;
+ ocp_data &= ~EEEP_CR_EEEP_TX;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEEP_CR, ocp_data);
}
@@ -1033,23 +1491,34 @@ static int rtl8152_enable(struct r8152 *tp)
ocp_data &= ~RXDY_GATED_EN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
- usb_fill_bulk_urb(tp->rx_urb, tp->udev, usb_rcvbulkpipe(tp->udev, 1),
- tp->rx_skb->data, RTL8152_RMS + sizeof(struct rx_desc),
- (usb_complete_t)read_bulk_callback, tp);
+ INIT_LIST_HEAD(&tp->rx_done);
+ ret = 0;
+ for (i = 0; i < RTL8152_MAX_RX; i++) {
+ INIT_LIST_HEAD(&tp->rx_info[i].list);
+ ret |= r8152_submit_rx(tp, &tp->rx_info[i], GFP_KERNEL);
+ }
- return usb_submit_urb(tp->rx_urb, GFP_KERNEL);
+ return ret;
}
static void rtl8152_disable(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
+ struct sk_buff *skb;
+ u32 ocp_data;
+ int i;
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- usb_kill_urb(tp->tx_urb);
+ while ((skb = skb_dequeue(&tp->tx_queue))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+
+ for (i = 0; i < RTL8152_MAX_TX; i++)
+ usb_kill_urb(tp->tx_info[i].urb);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
ocp_data |= RXDY_GATED_EN;
@@ -1068,7 +1537,8 @@ static void rtl8152_disable(struct r8152 *tp)
mdelay(1);
}
- usb_kill_urb(tp->rx_urb);
+ for (i = 0; i < RTL8152_MAX_RX; i++)
+ usb_kill_urb(tp->rx_info[i].urb);
rtl8152_nic_reset(tp);
}
@@ -1279,7 +1749,6 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
r8152_mdio_write(tp, MII_BMCR, bmcr);
out:
- schedule_delayed_work(&tp->schedule, 5 * HZ);
return ret;
}
@@ -1302,6 +1771,7 @@ static void set_carrier(struct r8152 *tp)
struct net_device *netdev = tp->netdev;
u8 speed;
+ clear_bit(RTL8152_LINK_CHG, &tp->flags);
speed = rtl8152_get_speed(tp);
if (speed & LINK_STATUS) {
@@ -1313,7 +1783,9 @@ static void set_carrier(struct r8152 *tp)
} else {
if (tp->speed & LINK_STATUS) {
netif_carrier_off(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
}
}
tp->speed = speed;
@@ -1329,13 +1801,12 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_UNPLUG, &tp->flags))
goto out1;
- set_carrier(tp);
+ if (test_bit(RTL8152_LINK_CHG, &tp->flags))
+ set_carrier(tp);
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
- schedule_delayed_work(&tp->schedule, HZ);
-
out1:
return;
}
@@ -1345,28 +1816,20 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- tp->speed = rtl8152_get_speed(tp);
- if (tp->speed & LINK_STATUS) {
- res = rtl8152_enable(tp);
- if (res) {
- if (res == -ENODEV)
- netif_device_detach(tp->netdev);
-
- netif_err(tp, ifup, netdev,
- "rtl8152_open failed: %d\n", res);
- return res;
- }
-
- netif_carrier_on(netdev);
- } else {
- netif_stop_queue(netdev);
- netif_carrier_off(netdev);
+ res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ if (res) {
+ if (res == -ENODEV)
+ netif_device_detach(tp->netdev);
+ netif_warn(tp, ifup, netdev,
+ "intr_urb submit failed: %d\n", res);
+ return res;
}
rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
netif_start_queue(netdev);
set_bit(WORK_ENABLE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
return res;
}
@@ -1376,10 +1839,13 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
+ tasklet_disable(&tp->tl);
rtl8152_disable(tp);
+ tasklet_enable(&tp->tl);
return res;
}
@@ -1439,8 +1905,8 @@ static void r8152b_hw_phy_cfg(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
- u32 ocp_data;
- int i;
+ u32 ocp_data;
+ int i;
rtl_clear_bp(tp);
@@ -1485,9 +1951,9 @@ static void r8152b_init(struct r8152 *tp)
break;
}
- /* disable rx aggregation */
+ /* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
- ocp_data |= RX_AGG_DISABLE;
+ ocp_data &= ~RX_AGG_DISABLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_USB_CTRL, ocp_data);
}
@@ -1499,7 +1965,9 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
+ tasklet_disable(&tp->tl);
}
rtl8152_down(tp);
@@ -1514,10 +1982,12 @@ static int rtl8152_resume(struct usb_interface *intf)
r8152b_init(tp);
netif_device_attach(tp->netdev);
if (netif_running(tp->netdev)) {
- rtl8152_enable(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE, SPEED_100, DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
- set_bit(RTL8152_SET_RX_MODE, &tp->flags);
- schedule_delayed_work(&tp->schedule, 0);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+ tasklet_enable(&tp->tl);
}
return 0;
@@ -1629,6 +2099,7 @@ static int rtl8152_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct r8152 *tp;
struct net_device *netdev;
+ int ret;
if (udev->actconfig->desc.bConfigurationValue != 1) {
usb_driver_set_configuration(udev, 1);
@@ -1641,19 +2112,22 @@ static int rtl8152_probe(struct usb_interface *intf,
return -ENOMEM;
}
+ SET_NETDEV_DEV(netdev, &intf->dev);
tp = netdev_priv(netdev);
tp->msg_enable = 0x7FFF;
- tasklet_init(&tp->tl, rx_fixup, (unsigned long)tp);
+ tasklet_init(&tp->tl, bottom_half, (unsigned long)tp);
INIT_DELAYED_WORK(&tp->schedule, rtl_work_func_t);
tp->udev = udev;
tp->netdev = netdev;
+ tp->intf = intf;
netdev->netdev_ops = &rtl8152_netdev_ops;
netdev->watchdog_timeo = RTL8152_TX_TIMEOUT;
- netdev->features &= ~NETIF_F_IP_CSUM;
+
+ netdev->features |= NETIF_F_IP_CSUM;
+ netdev->hw_features = NETIF_F_IP_CSUM;
SET_ETHTOOL_OPS(netdev, &ops);
- tp->speed = 0;
tp->mii.dev = netdev;
tp->mii.mdio_read = read_mii_word;
@@ -1667,37 +2141,27 @@ static int rtl8152_probe(struct usb_interface *intf,
r8152b_init(tp);
set_ethernet_addr(tp);
- if (!alloc_all_urbs(tp)) {
- netif_err(tp, probe, netdev, "out of memory");
+ ret = alloc_all_mem(tp);
+ if (ret)
goto out;
- }
-
- tp->rx_skb = netdev_alloc_skb(netdev,
- RTL8152_RMS + sizeof(struct rx_desc));
- if (!tp->rx_skb)
- goto out1;
usb_set_intfdata(intf, tp);
- SET_NETDEV_DEV(netdev, &intf->dev);
-
- if (register_netdev(netdev) != 0) {
+ ret = register_netdev(netdev);
+ if (ret != 0) {
netif_err(tp, probe, netdev, "couldn't register the device");
- goto out2;
+ goto out1;
}
netif_info(tp, probe, netdev, "%s", DRIVER_VERSION);
return 0;
-out2:
- usb_set_intfdata(intf, NULL);
- dev_kfree_skb(tp->rx_skb);
out1:
- free_all_urbs(tp);
+ usb_set_intfdata(intf, NULL);
out:
free_netdev(netdev);
- return -EIO;
+ return ret;
}
static void rtl8152_unload(struct r8152 *tp)
@@ -1725,9 +2189,7 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
rtl8152_unload(tp);
- free_all_urbs(tp);
- if (tp->rx_skb)
- dev_kfree_skb(tp->rx_skb);
+ free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -1742,11 +2204,12 @@ MODULE_DEVICE_TABLE(usb, rtl8152_table);
static struct usb_driver rtl8152_driver = {
.name = MODULENAME,
+ .id_table = rtl8152_table,
.probe = rtl8152_probe,
.disconnect = rtl8152_disconnect,
- .id_table = rtl8152_table,
.suspend = rtl8152_suspend,
- .resume = rtl8152_resume
+ .resume = rtl8152_resume,
+ .reset_resume = rtl8152_resume,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
index 85239226971..2df2f4fb42a 100644
--- a/drivers/net/usb/r815x.c
+++ b/drivers/net/usb/r815x.c
@@ -24,34 +24,43 @@
static int pla_read_word(struct usb_device *udev, u16 index)
{
- int data, ret;
+ int ret;
u8 shift = index & 2;
- __le32 ocp_data;
+ __le32 *tmp;
+
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
index &= ~3;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
- 500);
+ index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
- return ret;
+ goto out2;
- data = __le32_to_cpu(ocp_data);
- data >>= (shift * 8);
- data &= 0xffff;
+ ret = __le32_to_cpu(*tmp);
+ ret >>= (shift * 8);
+ ret &= 0xffff;
- return data;
+out2:
+ kfree(tmp);
+ return ret;
}
static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
{
- __le32 ocp_data;
+ __le32 *tmp;
u32 mask = 0xffff;
u16 byen = BYTE_EN_WORD;
u8 shift = index & 2;
int ret;
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
data &= mask;
if (shift) {
@@ -63,19 +72,20 @@ static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, &ocp_data, sizeof(ocp_data),
- 500);
+ index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
if (ret < 0)
- return ret;
+ goto out3;
- data |= __le32_to_cpu(ocp_data) & ~mask;
- ocp_data = __cpu_to_le32(data);
+ data |= __le32_to_cpu(*tmp) & ~mask;
+ *tmp = __cpu_to_le32(data);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, &ocp_data,
- sizeof(ocp_data), 500);
+ index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
+ 500);
+out3:
+ kfree(tmp);
return ret;
}
@@ -116,11 +126,18 @@ out1:
static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
{
struct usbnet *dev = netdev_priv(netdev);
+ int ret;
if (phy_id != R815x_PHY_ID)
return -EINVAL;
- return ocp_reg_read(dev, BASE_MII + reg * 2);
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return -ENODEV;
+
+ ret = ocp_reg_read(dev, BASE_MII + reg * 2);
+
+ usb_autopm_put_interface(dev->intf);
+ return ret;
}
static
@@ -131,7 +148,12 @@ void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R815x_PHY_ID)
return;
+ if (usb_autopm_get_interface(dev->intf) < 0)
+ return;
+
ocp_reg_write(dev, BASE_MII + reg * 2, val);
+
+ usb_autopm_put_interface(dev->intf);
}
static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -150,7 +172,7 @@ static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 1;
- return 0;
+ return status;
}
static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
@@ -169,7 +191,7 @@ static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = R815x_PHY_ID;
dev->mii.supports_gmii = 0;
- return 0;
+ return status;
}
static const struct driver_info r8152_info = {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 75409748c77..66ebbacf066 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -45,7 +45,6 @@
#define EEPROM_MAC_OFFSET (0x01)
#define DEFAULT_TX_CSUM_ENABLE (true)
#define DEFAULT_RX_CSUM_ENABLE (true)
-#define DEFAULT_TSO_ENABLE (true)
#define SMSC75XX_INTERNAL_PHY_ID (1)
#define SMSC75XX_TX_OVERHEAD (8)
#define MAX_RX_FIFO_SIZE (20 * 1024)
@@ -1410,17 +1409,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write);
- if (DEFAULT_TX_CSUM_ENABLE) {
+ if (DEFAULT_TX_CSUM_ENABLE)
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- if (DEFAULT_TSO_ENABLE)
- dev->net->features |= NETIF_F_SG |
- NETIF_F_TSO | NETIF_F_TSO6;
- }
+
if (DEFAULT_RX_CSUM_ENABLE)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM;
ret = smsc75xx_wait_ready(dev, 0);
if (ret < 0) {
@@ -2200,8 +2196,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev,
{
u32 tx_cmd_a, tx_cmd_b;
- skb_linearize(skb);
-
if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) {
struct sk_buff *skb2 =
skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags);
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
new file mode 100644
index 00000000000..7ec3e0ee078
--- /dev/null
+++ b/drivers/net/usb/sr9700.c
@@ -0,0 +1,560 @@
+/*
+ * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on dm9601.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+
+#include "sr9700.h"
+
+static int sr_read(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, SR_RD_REGS, SR_REQ_RD_REG, 0, reg, data,
+ length);
+ if ((err != length) && (err >= 0))
+ err = -EINVAL;
+ return err;
+}
+
+static int sr_write(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG, 0, reg, data,
+ length);
+ if ((err >= 0) && (err < length))
+ err = -EINVAL;
+ return err;
+}
+
+static int sr_read_reg(struct usbnet *dev, u8 reg, u8 *value)
+{
+ return sr_read(dev, reg, 1, value);
+}
+
+static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
+{
+ return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ value, reg, NULL, 0);
+}
+
+static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+{
+ usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ 0, reg, data, length);
+}
+
+static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
+{
+ usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
+ value, reg, NULL, 0);
+}
+
+static int wait_phy_eeprom_ready(struct usbnet *dev, int phy)
+{
+ int i;
+
+ for (i = 0; i < SR_SHARE_TIMEOUT; i++) {
+ u8 tmp = 0;
+ int ret;
+
+ udelay(1);
+ ret = sr_read_reg(dev, EPCR, &tmp);
+ if (ret < 0)
+ return ret;
+
+ /* ready */
+ if (!(tmp & EPCR_ERRE))
+ return 0;
+ }
+
+ netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
+
+ return -EIO;
+}
+
+static int sr_share_read_word(struct usbnet *dev, int phy, u8 reg,
+ __le16 *value)
+{
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, EPCR, phy ? (EPCR_EPOS | EPCR_ERPRR) : EPCR_ERPRR);
+
+ ret = wait_phy_eeprom_ready(dev, phy);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPCR, 0x0);
+ ret = sr_read(dev, EPDR, 2, value);
+
+ netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
+ phy, reg, *value, ret);
+
+out_unlock:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+}
+
+static int sr_share_write_word(struct usbnet *dev, int phy, u8 reg,
+ __le16 value)
+{
+ int ret;
+
+ mutex_lock(&dev->phy_mutex);
+
+ ret = sr_write(dev, EPDR, 2, &value);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPAR, phy ? (reg | EPAR_PHY_ADR) : reg);
+ sr_write_reg(dev, EPCR, phy ? (EPCR_WEP | EPCR_EPOS | EPCR_ERPRW) :
+ (EPCR_WEP | EPCR_ERPRW));
+
+ ret = wait_phy_eeprom_ready(dev, phy);
+ if (ret < 0)
+ goto out_unlock;
+
+ sr_write_reg(dev, EPCR, 0x0);
+
+out_unlock:
+ mutex_unlock(&dev->phy_mutex);
+ return ret;
+}
+
+static int sr_read_eeprom_word(struct usbnet *dev, u8 offset, void *value)
+{
+ return sr_share_read_word(dev, 0, offset, value);
+}
+
+static int sr9700_get_eeprom_len(struct net_device *netdev)
+{
+ return SR_EEPROM_LEN;
+}
+
+static int sr9700_get_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 *buf = (__le16 *)data;
+ int ret = 0;
+ int i;
+
+ /* access is 16bit */
+ if ((eeprom->offset & 0x01) || (eeprom->len & 0x01))
+ return -EINVAL;
+
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_eeprom_word(dev, eeprom->offset / 2 + i, buf + i);
+ if (ret < 0)
+ break;
+ }
+
+ return ret;
+}
+
+static int sr_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res;
+ int rc = 0;
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+ return 0;
+ }
+
+ /* Access NSR_LINKST bit for link status instead of MII_BMSR */
+ if (loc == MII_BMSR) {
+ u8 value;
+
+ sr_read_reg(dev, NSR, &value);
+ if (value & NSR_LINKST)
+ rc = 1;
+ }
+ sr_share_read_word(dev, 1, loc, &res);
+ if (rc == 1)
+ res = le16_to_cpu(res) | BMSR_LSTATUS;
+ else
+ res = le16_to_cpu(res) & ~BMSR_LSTATUS;
+
+ netdev_dbg(netdev, "sr_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, res);
+
+ return res;
+}
+
+static void sr_mdio_write(struct net_device *netdev, int phy_id, int loc,
+ int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ __le16 res = cpu_to_le16(val);
+
+ if (phy_id) {
+ netdev_dbg(netdev, "Only internal phy supported\n");
+ return;
+ }
+
+ netdev_dbg(netdev, "sr_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
+
+ sr_share_write_word(dev, 1, loc, res);
+}
+
+static u32 sr9700_get_link(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ u8 value = 0;
+ int rc = 0;
+
+ /* Get the Link Status directly */
+ sr_read_reg(dev, NSR, &value);
+ if (value & NSR_LINKST)
+ rc = 1;
+
+ return rc;
+}
+
+static int sr9700_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static const struct ethtool_ops sr9700_ethtool_ops = {
+ .get_drvinfo = usbnet_get_drvinfo,
+ .get_link = sr9700_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_eeprom_len = sr9700_get_eeprom_len,
+ .get_eeprom = sr9700_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static void sr9700_set_multicast(struct net_device *netdev)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ /* We use the 20 byte dev->data for our 8 byte filter buffer
+ * to avoid allocating memory that is tricky to free later
+ */
+ u8 *hashes = (u8 *)&dev->data;
+ /* rx_ctl setting : enable, disable_long, disable_crc */
+ u8 rx_ctl = RCR_RXEN | RCR_DIS_CRC | RCR_DIS_LONG;
+
+ memset(hashes, 0x00, SR_MCAST_SIZE);
+ /* broadcast address */
+ hashes[SR_MCAST_SIZE - 1] |= SR_MCAST_ADDR_FLAG;
+ if (netdev->flags & IFF_PROMISC) {
+ rx_ctl |= RCR_PRMSC;
+ } else if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > SR_MCAST_MAX) {
+ rx_ctl |= RCR_RUNT;
+ } else if (!netdev_mc_empty(netdev)) {
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, netdev) {
+ u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ hashes[crc >> 3] |= 1 << (crc & 0x7);
+ }
+ }
+
+ sr_write_async(dev, MAR, SR_MCAST_SIZE, hashes);
+ sr_write_reg_async(dev, RCR, rx_ctl);
+}
+
+static int sr9700_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data)) {
+ netdev_err(netdev, "not setting invalid mac address %pM\n",
+ addr->sa_data);
+ return -EINVAL;
+ }
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ sr_write_async(dev, PAR, 6, netdev->dev_addr);
+
+ return 0;
+}
+
+static const struct net_device_ops sr9700_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr9700_ioctl,
+ .ndo_set_rx_mode = sr9700_set_multicast,
+ .ndo_set_mac_address = sr9700_set_mac_address,
+};
+
+static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct net_device *netdev;
+ struct mii_if_info *mii;
+ int ret;
+
+ ret = usbnet_get_endpoints(dev, intf);
+ if (ret)
+ goto out;
+
+ netdev = dev->net;
+
+ netdev->netdev_ops = &sr9700_netdev_ops;
+ netdev->ethtool_ops = &sr9700_ethtool_ops;
+ netdev->hard_header_len += SR_TX_OVERHEAD;
+ dev->hard_mtu = netdev->mtu + netdev->hard_header_len;
+ /* bulkin buffer is preferably not less than 3K */
+ dev->rx_urb_size = 3072;
+
+ mii = &dev->mii;
+ mii->dev = netdev;
+ mii->mdio_read = sr_mdio_read;
+ mii->mdio_write = sr_mdio_write;
+ mii->phy_id_mask = 0x1f;
+ mii->reg_num_mask = 0x1f;
+
+ sr_write_reg(dev, NCR, NCR_RST);
+ udelay(20);
+
+ /* read MAC
+ * After Chip Power on, the Chip will reload the MAC from
+ * EEPROM automatically to PAR. In case there is no EEPROM externally,
+ * a default MAC address is stored in PAR for making chip work properly.
+ */
+ if (sr_read(dev, PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ netdev_err(netdev, "Error reading MAC address\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ /* power up and reset phy */
+ sr_write_reg(dev, PRR, PRR_PHY_RST);
+ /* at least 10ms, here 20ms for safe */
+ mdelay(20);
+ sr_write_reg(dev, PRR, 0);
+ /* at least 1ms, here 2ms for reading right register */
+ udelay(2 * 1000);
+
+ /* receive broadcast packets */
+ sr9700_set_multicast(netdev);
+
+ sr_mdio_write(netdev, mii->phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(netdev, mii->phy_id, MII_ADVERTISE, ADVERTISE_ALL |
+ ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
+ mii_nway_restart(mii);
+
+out:
+ return ret;
+}
+
+static int sr9700_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ struct sk_buff *sr_skb;
+ int len;
+
+ /* skb content (packets) format :
+ * p0 p1 p2 ...... pm
+ * / \
+ * / \
+ * / \
+ * / \
+ * p0b0 p0b1 p0b2 p0b3 ...... p0b(n-4) p0b(n-3)...p0bn
+ *
+ * p0 : packet 0
+ * p0b0 : packet 0 byte 0
+ *
+ * b0: rx status
+ * b1: packet length (incl crc) low
+ * b2: packet length (incl crc) high
+ * b3..n-4: packet data
+ * bn-3..bn: ethernet packet crc
+ */
+ if (unlikely(skb->len < SR_RX_OVERHEAD)) {
+ netdev_err(dev->net, "unexpected tiny rx frame\n");
+ return 0;
+ }
+
+ /* one skb may contains multiple packets */
+ while (skb->len > SR_RX_OVERHEAD) {
+ if (skb->data[0] != 0x40)
+ return 0;
+
+ /* ignore the CRC length */
+ len = (skb->data[1] | (skb->data[2] << 8)) - 4;
+
+ if (len > ETH_FRAME_LEN)
+ return 0;
+
+ /* the last packet of current skb */
+ if (skb->len == (len + SR_RX_OVERHEAD)) {
+ skb_pull(skb, 3);
+ skb->len = len;
+ skb_set_tail_pointer(skb, len);
+ skb->truesize = len + sizeof(struct sk_buff);
+ return 2;
+ }
+
+ /* skb_clone is used for address align */
+ sr_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!sr_skb)
+ return 0;
+
+ sr_skb->len = len;
+ sr_skb->data = skb->data + 3;
+ skb_set_tail_pointer(sr_skb, len);
+ sr_skb->truesize = len + sizeof(struct sk_buff);
+ usbnet_skb_return(dev, sr_skb);
+
+ skb_pull(skb, len + SR_RX_OVERHEAD);
+ };
+
+ return 0;
+}
+
+static struct sk_buff *sr9700_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int len;
+
+ /* SR9700 can only send out one ethernet packet at once.
+ *
+ * b0 b1 b2 b3 ...... b(n-4) b(n-3)...bn
+ *
+ * b0: rx status
+ * b1: packet length (incl crc) low
+ * b2: packet length (incl crc) high
+ * b3..n-4: packet data
+ * bn-3..bn: ethernet packet crc
+ */
+
+ len = skb->len;
+
+ if (skb_headroom(skb) < SR_TX_OVERHEAD) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, SR_TX_OVERHEAD, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ __skb_push(skb, SR_TX_OVERHEAD);
+
+ /* usbnet adds padding if length is a multiple of packet size
+ * if so, adjust length value in header
+ */
+ if ((skb->len % dev->maxpacket) == 0)
+ len++;
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+
+ return skb;
+}
+
+static void sr9700_status(struct usbnet *dev, struct urb *urb)
+{
+ int link;
+ u8 *buf;
+
+ /* format:
+ b0: net status
+ b1: tx status 1
+ b2: tx status 2
+ b3: rx status
+ b4: rx overflow
+ b5: rx count
+ b6: tx count
+ b7: gpr
+ */
+
+ if (urb->actual_length < 8)
+ return;
+
+ buf = urb->transfer_buffer;
+
+ link = !!(buf[0] & 0x40);
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+}
+
+static int sr9700_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+
+ netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
+ ecmd.speed, ecmd.duplex);
+
+ return 0;
+}
+
+static const struct driver_info sr9700_driver_info = {
+ .description = "CoreChip SR9700 USB Ethernet",
+ .flags = FLAG_ETHER,
+ .bind = sr9700_bind,
+ .rx_fixup = sr9700_rx_fixup,
+ .tx_fixup = sr9700_tx_fixup,
+ .status = sr9700_status,
+ .link_reset = sr9700_link_reset,
+ .reset = sr9700_link_reset,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9700), /* SR9700 device */
+ .driver_info = (unsigned long)&sr9700_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr9700_usb_driver = {
+ .name = "sr9700",
+ .id_table = products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disable_hub_initiated_lpm = 1,
+};
+
+module_usb_driver(sr9700_usb_driver);
+
+MODULE_AUTHOR("liujl <liujunliang_ljl@163.com>");
+MODULE_DESCRIPTION("SR9700 one chip USB 1.1 USB to Ethernet device from http://www.corechip-sz.com/");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9700.h b/drivers/net/usb/sr9700.h
new file mode 100644
index 00000000000..fd687c575e7
--- /dev/null
+++ b/drivers/net/usb/sr9700.h
@@ -0,0 +1,173 @@
+/*
+ * CoreChip-sz SR9700 one chip USB 1.1 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _SR9700_H
+#define _SR9700_H
+
+/* sr9700 spec. register table on Linux platform */
+
+/* Network Control Reg */
+#define NCR 0x00
+#define NCR_RST (1 << 0)
+#define NCR_LBK (3 << 1)
+#define NCR_FDX (1 << 3)
+#define NCR_WAKEEN (1 << 6)
+/* Network Status Reg */
+#define NSR 0x01
+#define NSR_RXRDY (1 << 0)
+#define NSR_RXOV (1 << 1)
+#define NSR_TX1END (1 << 2)
+#define NSR_TX2END (1 << 3)
+#define NSR_TXFULL (1 << 4)
+#define NSR_WAKEST (1 << 5)
+#define NSR_LINKST (1 << 6)
+#define NSR_SPEED (1 << 7)
+/* Tx Control Reg */
+#define TCR 0x02
+#define TCR_CRC_DIS (1 << 1)
+#define TCR_PAD_DIS (1 << 2)
+#define TCR_LC_CARE (1 << 3)
+#define TCR_CRS_CARE (1 << 4)
+#define TCR_EXCECM (1 << 5)
+#define TCR_LF_EN (1 << 6)
+/* Tx Status Reg for Packet Index 1 */
+#define TSR1 0x03
+#define TSR1_EC (1 << 2)
+#define TSR1_COL (1 << 3)
+#define TSR1_LC (1 << 4)
+#define TSR1_NC (1 << 5)
+#define TSR1_LOC (1 << 6)
+#define TSR1_TLF (1 << 7)
+/* Tx Status Reg for Packet Index 2 */
+#define TSR2 0x04
+#define TSR2_EC (1 << 2)
+#define TSR2_COL (1 << 3)
+#define TSR2_LC (1 << 4)
+#define TSR2_NC (1 << 5)
+#define TSR2_LOC (1 << 6)
+#define TSR2_TLF (1 << 7)
+/* Rx Control Reg*/
+#define RCR 0x05
+#define RCR_RXEN (1 << 0)
+#define RCR_PRMSC (1 << 1)
+#define RCR_RUNT (1 << 2)
+#define RCR_ALL (1 << 3)
+#define RCR_DIS_CRC (1 << 4)
+#define RCR_DIS_LONG (1 << 5)
+/* Rx Status Reg */
+#define RSR 0x06
+#define RSR_AE (1 << 2)
+#define RSR_MF (1 << 6)
+#define RSR_RF (1 << 7)
+/* Rx Overflow Counter Reg */
+#define ROCR 0x07
+#define ROCR_ROC (0x7F << 0)
+#define ROCR_RXFU (1 << 7)
+/* Back Pressure Threshold Reg */
+#define BPTR 0x08
+#define BPTR_JPT (0x0F << 0)
+#define BPTR_BPHW (0x0F << 4)
+/* Flow Control Threshold Reg */
+#define FCTR 0x09
+#define FCTR_LWOT (0x0F << 0)
+#define FCTR_HWOT (0x0F << 4)
+/* rx/tx Flow Control Reg */
+#define FCR 0x0A
+#define FCR_FLCE (1 << 0)
+#define FCR_BKPA (1 << 4)
+#define FCR_TXPEN (1 << 5)
+#define FCR_TXPF (1 << 6)
+#define FCR_TXP0 (1 << 7)
+/* Eeprom & Phy Control Reg */
+#define EPCR 0x0B
+#define EPCR_ERRE (1 << 0)
+#define EPCR_ERPRW (1 << 1)
+#define EPCR_ERPRR (1 << 2)
+#define EPCR_EPOS (1 << 3)
+#define EPCR_WEP (1 << 4)
+/* Eeprom & Phy Address Reg */
+#define EPAR 0x0C
+#define EPAR_EROA (0x3F << 0)
+#define EPAR_PHY_ADR_MASK (0x03 << 6)
+#define EPAR_PHY_ADR (0x01 << 6)
+/* Eeprom & Phy Data Reg */
+#define EPDR 0x0D /* 0x0D ~ 0x0E for Data Reg Low & High */
+/* Wakeup Control Reg */
+#define WCR 0x0F
+#define WCR_MAGICST (1 << 0)
+#define WCR_LINKST (1 << 2)
+#define WCR_MAGICEN (1 << 3)
+#define WCR_LINKEN (1 << 5)
+/* Physical Address Reg */
+#define PAR 0x10 /* 0x10 ~ 0x15 6 bytes for PAR */
+/* Multicast Address Reg */
+#define MAR 0x16 /* 0x16 ~ 0x1D 8 bytes for MAR */
+/* 0x1e unused */
+/* Phy Reset Reg */
+#define PRR 0x1F
+#define PRR_PHY_RST (1 << 0)
+/* Tx sdram Write Pointer Address Low */
+#define TWPAL 0x20
+/* Tx sdram Write Pointer Address High */
+#define TWPAH 0x21
+/* Tx sdram Read Pointer Address Low */
+#define TRPAL 0x22
+/* Tx sdram Read Pointer Address High */
+#define TRPAH 0x23
+/* Rx sdram Write Pointer Address Low */
+#define RWPAL 0x24
+/* Rx sdram Write Pointer Address High */
+#define RWPAH 0x25
+/* Rx sdram Read Pointer Address Low */
+#define RRPAL 0x26
+/* Rx sdram Read Pointer Address High */
+#define RRPAH 0x27
+/* Vendor ID register */
+#define VID 0x28 /* 0x28 ~ 0x29 2 bytes for VID */
+/* Product ID register */
+#define PID 0x2A /* 0x2A ~ 0x2B 2 bytes for PID */
+/* CHIP Revision register */
+#define CHIPR 0x2C
+/* 0x2D --> 0xEF unused */
+/* USB Device Address */
+#define USBDA 0xF0
+#define USBDA_USBFA (0x7F << 0)
+/* RX packet Counter Reg */
+#define RXC 0xF1
+/* Tx packet Counter & USB Status Reg */
+#define TXC_USBS 0xF2
+#define TXC_USBS_TXC0 (1 << 0)
+#define TXC_USBS_TXC1 (1 << 1)
+#define TXC_USBS_TXC2 (1 << 2)
+#define TXC_USBS_EP1RDY (1 << 5)
+#define TXC_USBS_SUSFLAG (1 << 6)
+#define TXC_USBS_RXFAULT (1 << 7)
+/* USB Control register */
+#define USBC 0xF4
+#define USBC_EP3NAK (1 << 4)
+#define USBC_EP3ACK (1 << 5)
+
+/* Register access commands and flags */
+#define SR_RD_REGS 0x00
+#define SR_WR_REGS 0x01
+#define SR_WR_REG 0x03
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* parameters */
+#define SR_SHARE_TIMEOUT 1000
+#define SR_EEPROM_LEN 256
+#define SR_MCAST_SIZE 8
+#define SR_MCAST_ADDR_FLAG 0x80
+#define SR_MCAST_MAX 64
+#define SR_TX_OVERHEAD 2 /* 2bytes header */
+#define SR_RX_OVERHEAD 7 /* 3bytes header + 4crc tail */
+
+#endif /* _SR9700_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 06ee82f557d..7b331e613e0 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -59,15 +59,13 @@
* For high speed, each frame comfortably fits almost 36 max size
* Ethernet packets (so queues should be bigger).
*
- * REVISIT qlens should be members of 'struct usbnet'; the goal is to
- * let the USB host controller be busy for 5msec or more before an irq
- * is required, under load. Jumbograms change the equation.
+ * The goal is to let the USB host controller be busy for 5msec or
+ * more before an irq is required, under load. Jumbograms change
+ * the equation.
*/
-#define RX_MAX_QUEUE_MEMORY (60 * 1518)
-#define RX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
- (RX_MAX_QUEUE_MEMORY/(dev)->rx_urb_size) : 4)
-#define TX_QLEN(dev) (((dev)->udev->speed == USB_SPEED_HIGH) ? \
- (RX_MAX_QUEUE_MEMORY/(dev)->hard_mtu) : 4)
+#define MAX_QUEUE_MEMORY (60 * 1518)
+#define RX_QLEN(dev) ((dev)->rx_qlen)
+#define TX_QLEN(dev) ((dev)->tx_qlen)
// reawaken network queue this soon after stopping; else watchdog barks
#define TX_TIMEOUT_JIFFIES (5*HZ)
@@ -347,6 +345,31 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
+/* must be called if hard_mtu or rx_urb_size changed */
+void usbnet_update_max_qlen(struct usbnet *dev)
+{
+ enum usb_device_speed speed = dev->udev->speed;
+
+ switch (speed) {
+ case USB_SPEED_HIGH:
+ dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ case USB_SPEED_SUPER:
+ /*
+ * Not take default 5ms qlen for super speed HC to
+ * save memory, and iperf tests show 2.5ms qlen can
+ * work well
+ */
+ dev->rx_qlen = 5 * MAX_QUEUE_MEMORY / dev->rx_urb_size;
+ dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
+ break;
+ default:
+ dev->rx_qlen = dev->tx_qlen = 4;
+ }
+}
+EXPORT_SYMBOL_GPL(usbnet_update_max_qlen);
+
/*-------------------------------------------------------------------------
*
@@ -375,6 +398,9 @@ int usbnet_change_mtu (struct net_device *net, int new_mtu)
usbnet_unlink_rx_urbs(dev);
}
+ /* max qlen depend on hard_mtu and rx_urb_size */
+ usbnet_update_max_qlen(dev);
+
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_change_mtu);
@@ -843,6 +869,9 @@ int usbnet_open (struct net_device *net)
goto done;
}
+ /* hard_mtu or rx_urb_size may change in reset() */
+ usbnet_update_max_qlen(dev);
+
// insist peer be connected
if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
@@ -927,6 +956,9 @@ int usbnet_set_settings (struct net_device *net, struct ethtool_cmd *cmd)
if (dev->driver_info->link_reset)
dev->driver_info->link_reset(dev);
+ /* hard_mtu or rx_urb_size may change in link_reset() */
+ usbnet_update_max_qlen(dev);
+
return retval;
}
@@ -1020,6 +1052,9 @@ static void __handle_link_change(struct usbnet *dev)
tasklet_schedule(&dev->bh);
}
+ /* hard_mtu or rx_urb_size may change during link change */
+ usbnet_update_max_qlen(dev);
+
clear_bit(EVENT_LINK_CHANGE, &dev->flags);
}
@@ -1197,6 +1232,37 @@ EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
/*-------------------------------------------------------------------------*/
+static int build_dma_sg(const struct sk_buff *skb, struct urb *urb)
+{
+ unsigned num_sgs, total_len = 0;
+ int i, s = 0;
+
+ num_sgs = skb_shinfo(skb)->nr_frags + 1;
+ if (num_sgs == 1)
+ return 0;
+
+ urb->sg = kmalloc(num_sgs * sizeof(struct scatterlist), GFP_ATOMIC);
+ if (!urb->sg)
+ return -ENOMEM;
+
+ urb->num_sgs = num_sgs;
+ sg_init_table(urb->sg, urb->num_sgs);
+
+ sg_set_buf(&urb->sg[s++], skb->data, skb_headlen(skb));
+ total_len += skb_headlen(skb);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *f = &skb_shinfo(skb)->frags[i];
+
+ total_len += skb_frag_size(f);
+ sg_set_page(&urb->sg[i + s], f->page.p, f->size,
+ f->page_offset);
+ }
+ urb->transfer_buffer_length = total_len;
+
+ return 1;
+}
+
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
struct net_device *net)
{
@@ -1223,7 +1289,6 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
goto drop;
}
}
- length = skb->len;
if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
netif_dbg(dev, tx_err, dev->net, "no urb\n");
@@ -1233,10 +1298,14 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
entry = (struct skb_data *) skb->cb;
entry->urb = urb;
entry->dev = dev;
- entry->length = length;
usb_fill_bulk_urb (urb, dev->udev, dev->out,
skb->data, skb->len, tx_complete, skb);
+ if (dev->can_dma_sg) {
+ if (build_dma_sg(skb, urb) < 0)
+ goto drop;
+ }
+ entry->length = length = urb->transfer_buffer_length;
/* don't assume the hardware handles USB_ZERO_PACKET
* NOTE: strictly conforming cdc-ether devices should expect
@@ -1305,7 +1374,10 @@ drop:
not_drop:
if (skb)
dev_kfree_skb_any (skb);
- usb_free_urb (urb);
+ if (urb) {
+ kfree(urb->sg);
+ usb_free_urb(urb);
+ }
} else
netif_dbg(dev, tx_queued, dev->net,
"> tx, len %d, type 0x%x\n", length, skb->protocol);
@@ -1356,6 +1428,7 @@ static void usbnet_bh (unsigned long param)
rx_process (dev, skb);
continue;
case tx_done:
+ kfree(entry->urb->sg);
case rx_cleanup:
usb_free_urb (entry->urb);
dev_kfree_skb (skb);
@@ -1594,11 +1667,18 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ /* let userspace know we have a random address */
+ if (ether_addr_equal(net->dev_addr, node_id))
+ net->addr_assign_type = NET_ADDR_RANDOM;
+
if ((dev->driver_info->flags & FLAG_WLAN) != 0)
SET_NETDEV_DEVTYPE(net, &wlan_type);
if ((dev->driver_info->flags & FLAG_WWAN) != 0)
SET_NETDEV_DEVTYPE(net, &wwan_type);
+ /* initialize max rx_qlen and tx_qlen */
+ usbnet_update_max_qlen(dev);
+
status = register_netdev (net);
if (status)
goto out4;
@@ -1689,6 +1769,7 @@ int usbnet_resume (struct usb_interface *intf)
retval = usb_submit_urb(res, GFP_ATOMIC);
if (retval < 0) {
dev_kfree_skb_any(skb);
+ kfree(res->sg);
usb_free_urb(res);
usb_autopm_put_interface_async(dev->intf);
} else {
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index da866523cf2..eee1f19ef1e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -269,6 +269,7 @@ static void veth_setup(struct net_device *dev)
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
+ dev->vlan_features = dev->features;
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3d2a90a6264..defec2b3c5a 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -106,6 +106,9 @@ struct virtnet_info {
/* Has control virtqueue */
bool has_cvq;
+ /* Host can handle any s/g split between our header and packet data */
+ bool any_header_sg;
+
/* enable config space updates */
bool config_enable;
@@ -669,12 +672,28 @@ static void free_old_xmit_skbs(struct send_queue *sq)
static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
{
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct skb_vnet_hdr *hdr;
const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
struct virtnet_info *vi = sq->vq->vdev->priv;
unsigned num_sg;
+ unsigned hdr_len;
+ bool can_push;
pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
+ if (vi->mergeable_rx_bufs)
+ hdr_len = sizeof hdr->mhdr;
+ else
+ hdr_len = sizeof hdr->hdr;
+
+ can_push = vi->any_header_sg &&
+ !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
+ !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
+ /* Even if we can, don't push here yet as this would skew
+ * csum_start offset below. */
+ if (can_push)
+ hdr = (struct skb_vnet_hdr *)(skb->data - hdr_len);
+ else
+ hdr = skb_vnet_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
@@ -703,15 +722,18 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
}
- hdr->mhdr.num_buffers = 0;
-
- /* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr);
- else
- sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr);
+ hdr->mhdr.num_buffers = 0;
- num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ if (can_push) {
+ __skb_push(skb, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
+ /* Pull header back to avoid skew in tx bytes calculations. */
+ __skb_pull(skb, hdr_len);
+ } else {
+ sg_set_buf(sq->sg, hdr, hdr_len);
+ num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
+ }
return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
}
@@ -1516,6 +1538,8 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
/* (!csum && gso) case will be fixed by register_netdev() */
}
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
+ dev->features |= NETIF_F_RXCSUM;
dev->vlan_features = dev->features;
@@ -1552,6 +1576,9 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
+ if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT))
+ vi->any_header_sg = true;
+
if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
vi->has_cvq = true;
@@ -1727,6 +1754,7 @@ static unsigned int features[] = {
VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
VIRTIO_NET_F_CTRL_MAC_ADDR,
+ VIRTIO_F_ANY_LAYOUT,
};
static struct virtio_driver virtio_net_driver = {
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 55a62cae2cb..7e2788c488e 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -313,10 +313,10 @@ vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
struct pci_dev *pdev)
{
if (tbi->map_type == VMXNET3_MAP_SINGLE)
- pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
+ dma_unmap_single(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else if (tbi->map_type == VMXNET3_MAP_PAGE)
- pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
+ dma_unmap_page(&pdev->dev, tbi->dma_addr, tbi->len,
PCI_DMA_TODEVICE);
else
BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
@@ -429,25 +429,29 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
if (tq->tx_ring.base) {
- pci_free_consistent(adapter->pdev, tq->tx_ring.size *
- sizeof(struct Vmxnet3_TxDesc),
- tq->tx_ring.base, tq->tx_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
+ sizeof(struct Vmxnet3_TxDesc),
+ tq->tx_ring.base, tq->tx_ring.basePA);
tq->tx_ring.base = NULL;
}
if (tq->data_ring.base) {
- pci_free_consistent(adapter->pdev, tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
- tq->data_ring.base, tq->data_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->data_ring.size *
+ sizeof(struct Vmxnet3_TxDataDesc),
+ tq->data_ring.base, tq->data_ring.basePA);
tq->data_ring.base = NULL;
}
if (tq->comp_ring.base) {
- pci_free_consistent(adapter->pdev, tq->comp_ring.size *
- sizeof(struct Vmxnet3_TxCompDesc),
- tq->comp_ring.base, tq->comp_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
+ sizeof(struct Vmxnet3_TxCompDesc),
+ tq->comp_ring.base, tq->comp_ring.basePA);
tq->comp_ring.base = NULL;
}
- kfree(tq->buf_info);
- tq->buf_info = NULL;
+ if (tq->buf_info) {
+ dma_free_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * sizeof(tq->buf_info[0]),
+ tq->buf_info, tq->buf_info_pa);
+ tq->buf_info = NULL;
+ }
}
@@ -496,37 +500,38 @@ static int
vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
struct vmxnet3_adapter *adapter)
{
+ size_t sz;
+
BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
tq->comp_ring.base || tq->buf_info);
- tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
- * sizeof(struct Vmxnet3_TxDesc),
- &tq->tx_ring.basePA);
+ tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc),
+ &tq->tx_ring.basePA, GFP_KERNEL);
if (!tq->tx_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx ring\n");
goto err;
}
- tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
- tq->data_ring.size *
- sizeof(struct Vmxnet3_TxDataDesc),
- &tq->data_ring.basePA);
+ tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->data_ring.size * sizeof(struct Vmxnet3_TxDataDesc),
+ &tq->data_ring.basePA, GFP_KERNEL);
if (!tq->data_ring.base) {
netdev_err(adapter->netdev, "failed to allocate data ring\n");
goto err;
}
- tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
- tq->comp_ring.size *
- sizeof(struct Vmxnet3_TxCompDesc),
- &tq->comp_ring.basePA);
+ tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
+ tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc),
+ &tq->comp_ring.basePA, GFP_KERNEL);
if (!tq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
goto err;
}
- tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
- GFP_KERNEL);
+ sz = tq->tx_ring.size * sizeof(tq->buf_info[0]);
+ tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz,
+ &tq->buf_info_pa, GFP_KERNEL);
if (!tq->buf_info)
goto err;
@@ -578,7 +583,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
break;
}
- rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->dma_addr = dma_map_single(
+ &adapter->pdev->dev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
} else {
@@ -595,7 +601,8 @@ vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
rq->stats.rx_buf_alloc_failure++;
break;
}
- rbi->dma_addr = pci_map_page(adapter->pdev,
+ rbi->dma_addr = dma_map_page(
+ &adapter->pdev->dev,
rbi->page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
} else {
@@ -705,7 +712,7 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
tbi = tq->buf_info + tq->tx_ring.next2fill;
tbi->map_type = VMXNET3_MAP_SINGLE;
- tbi->dma_addr = pci_map_single(adapter->pdev,
+ tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
skb->data + buf_offset, buf_size,
PCI_DMA_TODEVICE);
@@ -1221,7 +1228,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
goto rcd_done;
}
- pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
+ dma_unmap_single(&adapter->pdev->dev, rbi->dma_addr,
+ rbi->len,
PCI_DMA_FROMDEVICE);
#ifdef VMXNET3_RSS
@@ -1233,7 +1241,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->skb = new_skb;
- rbi->dma_addr = pci_map_single(adapter->pdev,
+ rbi->dma_addr = dma_map_single(&adapter->pdev->dev,
rbi->skb->data, rbi->len,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1267,7 +1275,7 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
}
if (rcd->len) {
- pci_unmap_page(adapter->pdev,
+ dma_unmap_page(&adapter->pdev->dev,
rbi->dma_addr, rbi->len,
PCI_DMA_FROMDEVICE);
@@ -1276,7 +1284,8 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
/* Immediate refill */
rbi->page = new_page;
- rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
+ rbi->dma_addr = dma_map_page(&adapter->pdev->dev,
+ rbi->page,
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
rxd->addr = cpu_to_le64(rbi->dma_addr);
@@ -1352,13 +1361,13 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
rq->buf_info[ring_idx][i].skb) {
- pci_unmap_single(adapter->pdev, rxd->addr,
+ dma_unmap_single(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
rq->buf_info[ring_idx][i].skb = NULL;
} else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
rq->buf_info[ring_idx][i].page) {
- pci_unmap_page(adapter->pdev, rxd->addr,
+ dma_unmap_page(&adapter->pdev->dev, rxd->addr,
rxd->len, PCI_DMA_FROMDEVICE);
put_page(rq->buf_info[ring_idx][i].page);
rq->buf_info[ring_idx][i].page = NULL;
@@ -1400,25 +1409,31 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
}
- kfree(rq->buf_info[0]);
-
for (i = 0; i < 2; i++) {
if (rq->rx_ring[i].base) {
- pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
- * sizeof(struct Vmxnet3_RxDesc),
- rq->rx_ring[i].base,
- rq->rx_ring[i].basePA);
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[i].size
+ * sizeof(struct Vmxnet3_RxDesc),
+ rq->rx_ring[i].base,
+ rq->rx_ring[i].basePA);
rq->rx_ring[i].base = NULL;
}
rq->buf_info[i] = NULL;
}
if (rq->comp_ring.base) {
- pci_free_consistent(adapter->pdev, rq->comp_ring.size *
- sizeof(struct Vmxnet3_RxCompDesc),
- rq->comp_ring.base, rq->comp_ring.basePA);
+ dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
+ * sizeof(struct Vmxnet3_RxCompDesc),
+ rq->comp_ring.base, rq->comp_ring.basePA);
rq->comp_ring.base = NULL;
}
+
+ if (rq->buf_info[0]) {
+ size_t sz = sizeof(struct vmxnet3_rx_buf_info) *
+ (rq->rx_ring[0].size + rq->rx_ring[1].size);
+ dma_free_coherent(&adapter->pdev->dev, sz, rq->buf_info[0],
+ rq->buf_info_pa);
+ }
}
@@ -1503,8 +1518,10 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
for (i = 0; i < 2; i++) {
sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
- rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
- &rq->rx_ring[i].basePA);
+ rq->rx_ring[i].base = dma_alloc_coherent(
+ &adapter->pdev->dev, sz,
+ &rq->rx_ring[i].basePA,
+ GFP_KERNEL);
if (!rq->rx_ring[i].base) {
netdev_err(adapter->netdev,
"failed to allocate rx ring %d\n", i);
@@ -1513,8 +1530,9 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
}
sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
- rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
- &rq->comp_ring.basePA);
+ rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
+ &rq->comp_ring.basePA,
+ GFP_KERNEL);
if (!rq->comp_ring.base) {
netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
goto err;
@@ -1522,7 +1540,8 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
rq->rx_ring[1].size);
- bi = kzalloc(sz, GFP_KERNEL);
+ bi = dma_zalloc_coherent(&adapter->pdev->dev, sz, &rq->buf_info_pa,
+ GFP_KERNEL);
if (!bi)
goto err;
@@ -2005,6 +2024,7 @@ vmxnet3_set_mc(struct net_device *netdev)
struct Vmxnet3_RxFilterConf *rxConf =
&adapter->shared->devRead.rxFilterConf;
u8 *new_table = NULL;
+ dma_addr_t new_table_pa = 0;
u32 new_mode = VMXNET3_RXM_UCAST;
if (netdev->flags & IFF_PROMISC) {
@@ -2028,8 +2048,12 @@ vmxnet3_set_mc(struct net_device *netdev)
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
netdev_mc_count(netdev) * ETH_ALEN);
- rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
- new_table));
+ new_table_pa = dma_map_single(
+ &adapter->pdev->dev,
+ new_table,
+ rxConf->mfTableLen,
+ PCI_DMA_TODEVICE);
+ rxConf->mfTablePA = cpu_to_le64(new_table_pa);
} else {
netdev_info(netdev, "failed to copy mcast list"
", setting ALL_MULTI\n");
@@ -2056,7 +2080,11 @@ vmxnet3_set_mc(struct net_device *netdev)
VMXNET3_CMD_UPDATE_MAC_FILTERS);
spin_unlock_irqrestore(&adapter->cmd_lock, flags);
- kfree(new_table);
+ if (new_table) {
+ dma_unmap_single(&adapter->pdev->dev, new_table_pa,
+ rxConf->mfTableLen, PCI_DMA_TODEVICE);
+ kfree(new_table);
+ }
}
void
@@ -2096,7 +2124,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
- devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
+ devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
/* set up feature flags */
@@ -2125,7 +2153,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
- tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
+ tqc->ddPA = cpu_to_le64(tq->buf_info_pa);
tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
@@ -2143,8 +2171,7 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
- rqc->ddPA = cpu_to_le64(virt_to_phys(
- rq->buf_info));
+ rqc->ddPA = cpu_to_le64(rq->buf_info_pa);
rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
@@ -2184,8 +2211,9 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
- devRead->rssConfDesc.confLen = sizeof(*rssConf);
- devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
+ devRead->rssConfDesc.confLen = cpu_to_le32(sizeof(*rssConf));
+ devRead->rssConfDesc.confPA =
+ cpu_to_le64(adapter->rss_conf_pa);
}
#endif /* VMXNET3_RSS */
@@ -2948,9 +2976,13 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->pdev = pdev;
spin_lock_init(&adapter->cmd_lock);
- adapter->shared = pci_alloc_consistent(adapter->pdev,
- sizeof(struct Vmxnet3_DriverShared),
- &adapter->shared_pa);
+ adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
+ sizeof(struct vmxnet3_adapter),
+ PCI_DMA_TODEVICE);
+ adapter->shared = dma_alloc_coherent(
+ &adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ &adapter->shared_pa, GFP_KERNEL);
if (!adapter->shared) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
err = -ENOMEM;
@@ -2963,8 +2995,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
- adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
- &adapter->queue_desc_pa);
+ adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
+ &adapter->queue_desc_pa,
+ GFP_KERNEL);
if (!adapter->tqd_start) {
dev_err(&pdev->dev, "Failed to allocate memory\n");
@@ -2974,7 +3007,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
adapter->num_tx_queues);
- adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
+ adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_PMConf),
+ &adapter->pm_conf_pa,
+ GFP_KERNEL);
if (adapter->pm_conf == NULL) {
err = -ENOMEM;
goto err_alloc_pm;
@@ -2982,7 +3018,10 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#ifdef VMXNET3_RSS
- adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+ adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
+ sizeof(struct UPT1_RSSConf),
+ &adapter->rss_conf_pa,
+ GFP_KERNEL);
if (adapter->rss_conf == NULL) {
err = -ENOMEM;
goto err_alloc_rss;
@@ -3077,17 +3116,22 @@ err_ver:
vmxnet3_free_pci_resources(adapter);
err_alloc_pci:
#ifdef VMXNET3_RSS
- kfree(adapter->rss_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
err_alloc_rss:
#endif
- kfree(adapter->pm_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
err_alloc_pm:
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
- adapter->queue_desc_pa);
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
err_alloc_queue_desc:
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
- adapter->shared, adapter->shared_pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
err_alloc_shared:
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
return err;
@@ -3118,16 +3162,21 @@ vmxnet3_remove_device(struct pci_dev *pdev)
vmxnet3_free_intr_resources(adapter);
vmxnet3_free_pci_resources(adapter);
#ifdef VMXNET3_RSS
- kfree(adapter->rss_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
+ adapter->rss_conf, adapter->rss_conf_pa);
#endif
- kfree(adapter->pm_conf);
+ dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
+ adapter->pm_conf, adapter->pm_conf_pa);
size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
- pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
- adapter->queue_desc_pa);
- pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
- adapter->shared, adapter->shared_pa);
+ dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
+ adapter->queue_desc_pa);
+ dma_free_coherent(&adapter->pdev->dev,
+ sizeof(struct Vmxnet3_DriverShared),
+ adapter->shared, adapter->shared_pa);
+ dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
+ sizeof(struct vmxnet3_adapter), PCI_DMA_TODEVICE);
free_netdev(netdev);
}
@@ -3227,8 +3276,8 @@ skip_arp:
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
- pmConf));
+ adapter->shared->devRead.pmConfDesc.confPA =
+ cpu_to_le64(adapter->pm_conf_pa);
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -3265,8 +3314,8 @@ vmxnet3_resume(struct device *device)
adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
*pmConf));
- adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
- pmConf));
+ adapter->shared->devRead.pmConfDesc.confPA =
+ cpu_to_le64(adapter->pm_conf_pa);
netif_device_attach(netdev);
pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 35418146fa1..a03f358fd58 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.30.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01011E00
+#define VMXNET3_DRIVER_VERSION_NUM 0x01020000
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -229,6 +229,7 @@ struct vmxnet3_tx_queue {
spinlock_t tx_lock;
struct vmxnet3_cmd_ring tx_ring;
struct vmxnet3_tx_buf_info *buf_info;
+ dma_addr_t buf_info_pa;
struct vmxnet3_tx_data_ring data_ring;
struct vmxnet3_comp_ring comp_ring;
struct Vmxnet3_TxQueueCtrl *shared;
@@ -277,6 +278,7 @@ struct vmxnet3_rx_queue {
u32 qid; /* rqID in RCD for buffer from 1st ring */
u32 qid2; /* rqID in RCD for buffer from 2nd ring */
struct vmxnet3_rx_buf_info *buf_info[2];
+ dma_addr_t buf_info_pa;
struct Vmxnet3_RxQueueCtrl *shared;
struct vmxnet3_rq_driver_stats stats;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -353,6 +355,10 @@ struct vmxnet3_adapter {
unsigned long state; /* VMXNET3_STATE_BIT_xxx */
int share_intr;
+
+ dma_addr_t adapter_pa;
+ dma_addr_t pm_conf_pa;
+ dma_addr_t rss_conf_pa;
};
#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index a5ba8dd7e6b..bf64b4191dc 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -6,9 +6,6 @@
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
- *
- * TODO
- * - IPv6 (not in RFC)
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -27,6 +24,7 @@
#include <linux/igmp.h>
#include <linux/etherdevice.h>
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/hash.h>
#include <linux/ethtool.h>
#include <net/arp.h>
@@ -41,6 +39,13 @@
#include <net/inet_ecn.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <net/vxlan.h>
+#if IS_ENABLED(CONFIG_IPV6)
+#include <net/ipv6.h>
+#include <net/addrconf.h>
+#include <net/ip6_tunnel.h>
+#include <net/ip6_checksum.h>
+#endif
#define VXLAN_VERSION "0.1"
@@ -57,6 +62,9 @@
#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
/* IP header + UDP + VXLAN + Ethernet header */
#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
+/* IPv6 header + UDP + VXLAN + Ethernet header */
+#define VXLAN6_HEADROOM (40 + 8 + 8 + 14)
+#define VXLAN_HLEN (sizeof(struct udphdr) + sizeof(struct vxlanhdr))
#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
@@ -82,16 +90,6 @@ static int vxlan_net_id;
static const u8 all_zeros_mac[ETH_ALEN];
-/* per UDP socket information */
-struct vxlan_sock {
- struct hlist_node hlist;
- struct rcu_head rcu;
- struct work_struct del_work;
- atomic_t refcnt;
- struct socket *sock;
- struct hlist_head vni_list[VNI_HASH_SIZE];
-};
-
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -99,8 +97,14 @@ struct vxlan_net {
spinlock_t sock_lock;
};
+union vxlan_addr {
+ struct sockaddr_in sin;
+ struct sockaddr_in6 sin6;
+ struct sockaddr sa;
+};
+
struct vxlan_rdst {
- __be32 remote_ip;
+ union vxlan_addr remote_ip;
__be16 remote_port;
u32 remote_vni;
u32 remote_ifindex;
@@ -127,7 +131,7 @@ struct vxlan_dev {
struct vxlan_sock *vn_sock; /* listening socket */
struct net_device *dev;
struct vxlan_rdst default_dst; /* default destination */
- __be32 saddr; /* source address */
+ union vxlan_addr saddr; /* source address */
__be16 dst_port;
__u16 port_min; /* source port range */
__u16 port_max;
@@ -136,7 +140,8 @@ struct vxlan_dev {
u32 flags; /* VXLAN_F_* below */
struct work_struct sock_work;
- struct work_struct igmp_work;
+ struct work_struct igmp_join;
+ struct work_struct igmp_leave;
unsigned long age_interval;
struct timer_list age_timer;
@@ -152,6 +157,7 @@ struct vxlan_dev {
#define VXLAN_F_RSC 0x04
#define VXLAN_F_L2MISS 0x08
#define VXLAN_F_L3MISS 0x10
+#define VXLAN_F_IPV6 0x20 /* internal flag */
/* salt for hash table */
static u32 vxlan_salt __read_mostly;
@@ -159,6 +165,96 @@ static struct workqueue_struct *vxlan_wq;
static void vxlan_sock_work(struct work_struct *work);
+#if IS_ENABLED(CONFIG_IPV6)
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ if (a->sa.sa_family != b->sa.sa_family)
+ return false;
+ if (a->sa.sa_family == AF_INET6)
+ return ipv6_addr_equal(&a->sin6.sin6_addr, &b->sin6.sin6_addr);
+ else
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_any(&ipa->sin6.sin6_addr);
+ else
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ if (ipa->sa.sa_family == AF_INET6)
+ return ipv6_addr_is_multicast(&ipa->sin6.sin6_addr);
+ else
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
+{
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ nla_memcpy(&ip->sin6.sin6_addr, nla, sizeof(struct in6_addr));
+ ip->sa.sa_family = AF_INET6;
+ return 0;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_be32(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
+}
+
+static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
+ const union vxlan_addr *ip)
+{
+ if (ip->sa.sa_family == AF_INET6)
+ return nla_put(skb, attr, sizeof(struct in6_addr), &ip->sin6.sin6_addr);
+ else
+ return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+}
+
+#else /* !CONFIG_IPV6 */
+
+static inline
+bool vxlan_addr_equal(const union vxlan_addr *a, const union vxlan_addr *b)
+{
+ return a->sin.sin_addr.s_addr == b->sin.sin_addr.s_addr;
+}
+
+static inline bool vxlan_addr_any(const union vxlan_addr *ipa)
+{
+ return ipa->sin.sin_addr.s_addr == htonl(INADDR_ANY);
+}
+
+static inline bool vxlan_addr_multicast(const union vxlan_addr *ipa)
+{
+ return IN_MULTICAST(ntohl(ipa->sin.sin_addr.s_addr));
+}
+
+static int vxlan_nla_get_addr(union vxlan_addr *ip, struct nlattr *nla)
+{
+ if (nla_len(nla) >= sizeof(struct in6_addr)) {
+ return -EAFNOSUPPORT;
+ } else if (nla_len(nla) >= sizeof(__be32)) {
+ ip->sin.sin_addr.s_addr = nla_get_be32(nla);
+ ip->sa.sa_family = AF_INET;
+ return 0;
+ } else {
+ return -EAFNOSUPPORT;
+ }
+}
+
+static int vxlan_nla_put_addr(struct sk_buff *skb, int attr,
+ const union vxlan_addr *ip)
+{
+ return nla_put_be32(skb, attr, ip->sin.sin_addr.s_addr);
+}
+#endif
+
/* Virtual Network hash table head */
static inline struct hlist_head *vni_head(struct vxlan_sock *vs, u32 id)
{
@@ -176,13 +272,18 @@ static inline struct hlist_head *vs_head(struct net *net, __be16 port)
/* First remote destination for a forwarding entry.
* Guaranteed to be non-NULL because remotes are never deleted.
*/
-static inline struct vxlan_rdst *first_remote(struct vxlan_fdb *fdb)
+static inline struct vxlan_rdst *first_remote_rcu(struct vxlan_fdb *fdb)
{
- return list_first_or_null_rcu(&fdb->remotes, struct vxlan_rdst, list);
+ return list_entry_rcu(fdb->remotes.next, struct vxlan_rdst, list);
+}
+
+static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
+{
+ return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
}
/* Find VXLAN socket based on network namespace and UDP port */
-static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
+static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port)
{
struct vxlan_sock *vs;
@@ -193,16 +294,10 @@ static struct vxlan_sock *vxlan_find_port(struct net *net, __be16 port)
return NULL;
}
-/* Look up VNI in a per net namespace table */
-static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
{
- struct vxlan_sock *vs;
struct vxlan_dev *vxlan;
- vs = vxlan_find_port(net, port);
- if (!vs)
- return NULL;
-
hlist_for_each_entry_rcu(vxlan, vni_head(vs, id), hlist) {
if (vxlan->default_dst.remote_vni == id)
return vxlan;
@@ -211,6 +306,18 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
return NULL;
}
+/* Look up VNI in a per net namespace table */
+static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port)
+{
+ struct vxlan_sock *vs;
+
+ vs = vxlan_find_sock(net, port);
+ if (!vs)
+ return NULL;
+
+ return vxlan_vs_find_vni(vs, id);
+}
+
/* Fill in neighbour message in skbuff. */
static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
const struct vxlan_fdb *fdb,
@@ -234,7 +341,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (type == RTM_GETNEIGH) {
ndm->ndm_family = AF_INET;
- send_ip = rdst->remote_ip != htonl(INADDR_ANY);
+ send_ip = !vxlan_addr_any(&rdst->remote_ip);
send_eth = !is_zero_ether_addr(fdb->eth_addr);
} else
ndm->ndm_family = AF_BRIDGE;
@@ -246,7 +353,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
goto nla_put_failure;
- if (send_ip && nla_put_be32(skb, NDA_DST, rdst->remote_ip))
+ if (send_ip && vxlan_nla_put_addr(skb, NDA_DST, &rdst->remote_ip))
goto nla_put_failure;
if (rdst->remote_port && rdst->remote_port != vxlan->dst_port &&
@@ -278,7 +385,7 @@ static inline size_t vxlan_nlmsg_size(void)
{
return NLMSG_ALIGN(sizeof(struct ndmsg))
+ nla_total_size(ETH_ALEN) /* NDA_LLADDR */
- + nla_total_size(sizeof(__be32)) /* NDA_DST */
+ + nla_total_size(sizeof(struct in6_addr)) /* NDA_DST */
+ nla_total_size(sizeof(__be16)) /* NDA_PORT */
+ nla_total_size(sizeof(__be32)) /* NDA_VNI */
+ nla_total_size(sizeof(__u32)) /* NDA_IFINDEX */
@@ -296,7 +403,8 @@ static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
if (skb == NULL)
goto errout;
- err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0, first_remote(fdb));
+ err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0,
+ first_remote_rtnl(fdb));
if (err < 0) {
/* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
@@ -311,14 +419,14 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
-static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
+static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb f = {
.state = NUD_STALE,
};
struct vxlan_rdst remote = {
- .remote_ip = ipa, /* goes to NDA_DST */
+ .remote_ip = *ipa, /* goes to NDA_DST */
.remote_vni = VXLAN_N_VID,
};
@@ -370,7 +478,7 @@ static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
struct vxlan_fdb *f;
hlist_for_each_entry_rcu(f, head, hlist) {
- if (compare_ether_addr(mac, f->eth_addr) == 0)
+ if (ether_addr_equal(mac, f->eth_addr))
return f;
}
@@ -391,13 +499,13 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
/* caller should hold vxlan->hash_lock */
static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
- __be32 ip, __be16 port,
+ union vxlan_addr *ip, __be16 port,
__u32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
list_for_each_entry(rd, &f->remotes, list) {
- if (rd->remote_ip == ip &&
+ if (vxlan_addr_equal(&rd->remote_ip, ip) &&
rd->remote_port == port &&
rd->remote_vni == vni &&
rd->remote_ifindex == ifindex)
@@ -407,9 +515,29 @@ static struct vxlan_rdst *vxlan_fdb_find_rdst(struct vxlan_fdb *f,
return NULL;
}
+/* Replace destination of unicast mac */
+static int vxlan_fdb_replace(struct vxlan_fdb *f,
+ union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
+{
+ struct vxlan_rdst *rd;
+
+ rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+ if (rd)
+ return 0;
+
+ rd = list_first_entry_or_null(&f->remotes, struct vxlan_rdst, list);
+ if (!rd)
+ return 0;
+ rd->remote_ip = *ip;
+ rd->remote_port = port;
+ rd->remote_vni = vni;
+ rd->remote_ifindex = ifindex;
+ return 1;
+}
+
/* Add/update destinations for multicast */
static int vxlan_fdb_append(struct vxlan_fdb *f,
- __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
+ union vxlan_addr *ip, __be16 port, __u32 vni, __u32 ifindex)
{
struct vxlan_rdst *rd;
@@ -420,7 +548,7 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
rd = kmalloc(sizeof(*rd), GFP_ATOMIC);
if (rd == NULL)
return -ENOBUFS;
- rd->remote_ip = ip;
+ rd->remote_ip = *ip;
rd->remote_port = port;
rd->remote_vni = vni;
rd->remote_ifindex = ifindex;
@@ -430,9 +558,43 @@ static int vxlan_fdb_append(struct vxlan_fdb *f,
return 1;
}
+/* Notify netdevs that UDP port started listening */
+static void vxlan_notify_add_rx_port(struct sock *sk)
+{
+ struct net_device *dev;
+ struct net *net = sock_net(sk);
+ sa_family_t sa_family = sk->sk_family;
+ u16 port = htons(inet_sk(sk)->inet_sport);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (dev->netdev_ops->ndo_add_vxlan_port)
+ dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
+ port);
+ }
+ rcu_read_unlock();
+}
+
+/* Notify netdevs that UDP port is no more listening */
+static void vxlan_notify_del_rx_port(struct sock *sk)
+{
+ struct net_device *dev;
+ struct net *net = sock_net(sk);
+ sa_family_t sa_family = sk->sk_family;
+ u16 port = htons(inet_sk(sk)->inet_sport);
+
+ rcu_read_lock();
+ for_each_netdev_rcu(net, dev) {
+ if (dev->netdev_ops->ndo_del_vxlan_port)
+ dev->netdev_ops->ndo_del_vxlan_port(dev, sa_family,
+ port);
+ }
+ rcu_read_unlock();
+}
+
/* Add new entry to forwarding table -- assumes lock held */
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
- const u8 *mac, __be32 ip,
+ const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __u32 vni, __u32 ifindex,
__u8 ndm_flags)
@@ -457,6 +619,19 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
f->updated = jiffies;
notify = 1;
}
+ if ((flags & NLM_F_REPLACE)) {
+ /* Only change unicasts */
+ if (!(is_multicast_ether_addr(f->eth_addr) ||
+ is_zero_ether_addr(f->eth_addr))) {
+ int rc = vxlan_fdb_replace(f, ip, port, vni,
+ ifindex);
+
+ if (rc < 0)
+ return rc;
+ notify |= rc;
+ } else
+ return -EOPNOTSUPP;
+ }
if ((flags & NLM_F_APPEND) &&
(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
@@ -473,7 +648,12 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
return -ENOSPC;
- netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
+ /* Disallow replace to add a multicast entry */
+ if ((flags & NLM_F_REPLACE) &&
+ (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
+ return -EOPNOTSUPP;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
f = kmalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
return -ENOMEM;
@@ -498,12 +678,6 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
return 0;
}
-static void vxlan_fdb_free_rdst(struct rcu_head *head)
-{
- struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu);
- kfree(rd);
-}
-
static void vxlan_fdb_free(struct rcu_head *head)
{
struct vxlan_fdb *f = container_of(head, struct vxlan_fdb, rcu);
@@ -527,17 +701,26 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
}
static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan,
- __be32 *ip, __be16 *port, u32 *vni, u32 *ifindex)
+ union vxlan_addr *ip, __be16 *port, u32 *vni, u32 *ifindex)
{
struct net *net = dev_net(vxlan->dev);
+ int err;
if (tb[NDA_DST]) {
- if (nla_len(tb[NDA_DST]) != sizeof(__be32))
- return -EAFNOSUPPORT;
-
- *ip = nla_get_be32(tb[NDA_DST]);
+ err = vxlan_nla_get_addr(ip, tb[NDA_DST]);
+ if (err)
+ return err;
} else {
- *ip = htonl(INADDR_ANY);
+ union vxlan_addr *remote = &vxlan->default_dst.remote_ip;
+ if (remote->sa.sa_family == AF_INET) {
+ ip->sin.sin_addr.s_addr = htonl(INADDR_ANY);
+ ip->sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ip->sin6.sin6_addr = in6addr_any;
+ ip->sa.sa_family = AF_INET6;
+#endif
+ }
}
if (tb[NDA_PORT]) {
@@ -580,7 +763,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
{
struct vxlan_dev *vxlan = netdev_priv(dev);
/* struct net *net = dev_net(vxlan->dev); */
- __be32 ip;
+ union vxlan_addr ip;
__be16 port;
u32 vni, ifindex;
int err;
@@ -599,7 +782,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return err;
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags,
+ err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
port, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
@@ -614,7 +797,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
- __be32 ip;
+ union vxlan_addr ip;
__be16 port;
u32 vni, ifindex;
int err;
@@ -630,8 +813,8 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
if (!f)
goto out;
- if (ip != htonl(INADDR_ANY)) {
- rd = vxlan_fdb_find_rdst(f, ip, port, vni, ifindex);
+ if (!vxlan_addr_any(&ip)) {
+ rd = vxlan_fdb_find_rdst(f, &ip, port, vni, ifindex);
if (!rd)
goto out;
}
@@ -643,7 +826,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
*/
if (rd && !list_is_singular(&f->remotes)) {
list_del_rcu(&rd->list);
- call_rcu(&rd->rcu, vxlan_fdb_free_rdst);
+ kfree_rcu(rd, rcu);
goto out;
}
@@ -694,16 +877,16 @@ out:
* Return true if packet is bogus and should be droppped.
*/
static bool vxlan_snoop(struct net_device *dev,
- __be32 src_ip, const u8 *src_mac)
+ union vxlan_addr *src_ip, const u8 *src_mac)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_fdb *f;
f = vxlan_find_mac(vxlan, src_mac);
if (likely(f)) {
- struct vxlan_rdst *rdst = first_remote(f);
+ struct vxlan_rdst *rdst = first_remote_rcu(f);
- if (likely(rdst->remote_ip == src_ip))
+ if (likely(vxlan_addr_equal(&rdst->remote_ip, src_ip)))
return false;
/* Don't migrate static entries, drop packets */
@@ -712,10 +895,10 @@ static bool vxlan_snoop(struct net_device *dev,
if (net_ratelimit())
netdev_info(dev,
- "%pM migrated from %pI4 to %pI4\n",
+ "%pM migrated from %pIS to %pIS\n",
src_mac, &rdst->remote_ip, &src_ip);
- rdst->remote_ip = src_ip;
+ rdst->remote_ip = *src_ip;
f->updated = jiffies;
vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
} else {
@@ -736,9 +919,8 @@ static bool vxlan_snoop(struct net_device *dev,
return false;
}
-
/* See if multicast group is already in use by other ID */
-static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
+static bool vxlan_group_used(struct vxlan_net *vn, union vxlan_addr *remote_ip)
{
struct vxlan_dev *vxlan;
@@ -746,7 +928,8 @@ static bool vxlan_group_used(struct vxlan_net *vn, __be32 remote_ip)
if (!netif_running(vxlan->dev))
continue;
- if (vxlan->default_dst.remote_ip == remote_ip)
+ if (vxlan_addr_equal(&vxlan->default_dst.remote_ip,
+ remote_ip))
return true;
}
@@ -758,63 +941,101 @@ static void vxlan_sock_hold(struct vxlan_sock *vs)
atomic_inc(&vs->refcnt);
}
-static void vxlan_sock_release(struct vxlan_net *vn, struct vxlan_sock *vs)
+void vxlan_sock_release(struct vxlan_sock *vs)
{
+ struct sock *sk = vs->sock->sk;
+ struct net *net = sock_net(sk);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+
if (!atomic_dec_and_test(&vs->refcnt))
return;
spin_lock(&vn->sock_lock);
hlist_del_rcu(&vs->hlist);
+ smp_wmb();
+ vs->sock->sk->sk_user_data = NULL;
+ vxlan_notify_del_rx_port(sk);
spin_unlock(&vn->sock_lock);
queue_work(vxlan_wq, &vs->del_work);
}
+EXPORT_SYMBOL_GPL(vxlan_sock_release);
-/* Callback to update multicast group membership.
- * Scheduled when vxlan goes up/down.
+/* Callback to update multicast group membership when first VNI on
+ * multicast asddress is brought up
+ * Done as workqueue because ip_mc_join_group acquires RTNL.
*/
-static void vxlan_igmp_work(struct work_struct *work)
+static void vxlan_igmp_join(struct work_struct *work)
{
- struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_work);
- struct vxlan_net *vn = net_generic(dev_net(vxlan->dev), vxlan_net_id);
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_join);
struct vxlan_sock *vs = vxlan->vn_sock;
struct sock *sk = vs->sock->sk;
- struct ip_mreqn mreq = {
- .imr_multiaddr.s_addr = vxlan->default_dst.remote_ip,
- .imr_ifindex = vxlan->default_dst.remote_ifindex,
- };
+ union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
+ int ifindex = vxlan->default_dst.remote_ifindex;
lock_sock(sk);
- if (vxlan_group_used(vn, vxlan->default_dst.remote_ip))
+ if (ip->sa.sa_family == AF_INET) {
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
ip_mc_join_group(sk, &mreq);
- else
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ipv6_stub->ipv6_sock_mc_join(sk, ifindex,
+ &ip->sin6.sin6_addr);
+#endif
+ }
+ release_sock(sk);
+
+ vxlan_sock_release(vs);
+ dev_put(vxlan->dev);
+}
+
+/* Inverse of vxlan_igmp_join when last VNI is brought down */
+static void vxlan_igmp_leave(struct work_struct *work)
+{
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, igmp_leave);
+ struct vxlan_sock *vs = vxlan->vn_sock;
+ struct sock *sk = vs->sock->sk;
+ union vxlan_addr *ip = &vxlan->default_dst.remote_ip;
+ int ifindex = vxlan->default_dst.remote_ifindex;
+
+ lock_sock(sk);
+ if (ip->sa.sa_family == AF_INET) {
+ struct ip_mreqn mreq = {
+ .imr_multiaddr.s_addr = ip->sin.sin_addr.s_addr,
+ .imr_ifindex = ifindex,
+ };
+
ip_mc_leave_group(sk, &mreq);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ ipv6_stub->ipv6_sock_mc_drop(sk, ifindex,
+ &ip->sin6.sin6_addr);
+#endif
+ }
+
release_sock(sk);
- vxlan_sock_release(vn, vs);
+ vxlan_sock_release(vs);
dev_put(vxlan->dev);
}
/* Callback from net/ipv4/udp.c to receive packets */
static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
- struct iphdr *oip;
+ struct vxlan_sock *vs;
struct vxlanhdr *vxh;
- struct vxlan_dev *vxlan;
- struct pcpu_tstats *stats;
__be16 port;
- __u32 vni;
- int err;
-
- /* pop off outer UDP header */
- __skb_pull(skb, sizeof(struct udphdr));
/* Need Vxlan and inner Ethernet header to be present */
- if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
+ if (!pskb_may_pull(skb, VXLAN_HLEN))
goto error;
- /* Drop packets with reserved bits set */
- vxh = (struct vxlanhdr *) skb->data;
+ /* Return packets with reserved bits set */
+ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1);
if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
(vxh->vx_vni & htonl(0xff))) {
netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
@@ -822,40 +1043,72 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
goto error;
}
- __skb_pull(skb, sizeof(struct vxlanhdr));
+ if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
+ goto drop;
- /* Is this VNI defined? */
- vni = ntohl(vxh->vx_vni) >> 8;
port = inet_sk(sk)->inet_sport;
- vxlan = vxlan_find_vni(sock_net(sk), vni, port);
- if (!vxlan) {
- netdev_dbg(skb->dev, "unknown vni %d port %u\n",
- vni, ntohs(port));
+
+ smp_read_barrier_depends();
+ vs = (struct vxlan_sock *)sk->sk_user_data;
+ if (!vs)
goto drop;
- }
- if (!pskb_may_pull(skb, ETH_HLEN)) {
- vxlan->dev->stats.rx_length_errors++;
- vxlan->dev->stats.rx_errors++;
+ vs->rcv(vs, skb, vxh->vx_vni);
+ return 0;
+
+drop:
+ /* Consume bad packet */
+ kfree_skb(skb);
+ return 0;
+
+error:
+ /* Return non vxlan pkt */
+ return 1;
+}
+
+static void vxlan_rcv(struct vxlan_sock *vs,
+ struct sk_buff *skb, __be32 vx_vni)
+{
+ struct iphdr *oip = NULL;
+ struct ipv6hdr *oip6 = NULL;
+ struct vxlan_dev *vxlan;
+ struct pcpu_tstats *stats;
+ union vxlan_addr saddr;
+ __u32 vni;
+ int err = 0;
+ union vxlan_addr *remote_ip;
+
+ vni = ntohl(vx_vni) >> 8;
+ /* Is this VNI defined? */
+ vxlan = vxlan_vs_find_vni(vs, vni);
+ if (!vxlan)
goto drop;
- }
+ remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
-
- /* Re-examine inner Ethernet packet */
- oip = ip_hdr(skb);
skb->protocol = eth_type_trans(skb, vxlan->dev);
/* Ignore packet loops (and multicast echo) */
- if (compare_ether_addr(eth_hdr(skb)->h_source,
- vxlan->dev->dev_addr) == 0)
+ if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
goto drop;
+ /* Re-examine inner Ethernet packet */
+ if (remote_ip->sa.sa_family == AF_INET) {
+ oip = ip_hdr(skb);
+ saddr.sin.sin_addr.s_addr = oip->saddr;
+ saddr.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ oip6 = ipv6_hdr(skb);
+ saddr.sin6.sin6_addr = oip6->saddr;
+ saddr.sa.sa_family = AF_INET6;
+#endif
+ }
+
if ((vxlan->flags & VXLAN_F_LEARN) &&
- vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+ vxlan_snoop(skb->dev, &saddr, eth_hdr(skb)->h_source))
goto drop;
- __skb_tunnel_rx(skb, vxlan->dev);
skb_reset_network_header(skb);
/* If the NIC driver gave us an encapsulated packet with
@@ -869,11 +1122,20 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
skb->encapsulation = 0;
- err = IP_ECN_decapsulate(oip, skb);
+ if (oip6)
+ err = IP6_ECN_decapsulate(oip6, skb);
+ if (oip)
+ err = IP_ECN_decapsulate(oip, skb);
+
if (unlikely(err)) {
- if (log_ecn_error)
- net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
- &oip->saddr, oip->tos);
+ if (log_ecn_error) {
+ if (oip6)
+ net_info_ratelimited("non-ECT from %pI6\n",
+ &oip6->saddr);
+ if (oip)
+ net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
+ &oip->saddr, oip->tos);
+ }
if (err > 1) {
++vxlan->dev->stats.rx_frame_errors;
++vxlan->dev->stats.rx_errors;
@@ -889,16 +1151,10 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
netif_rx(skb);
- return 0;
-error:
- /* Put UDP header back */
- __skb_push(skb, sizeof(struct udphdr));
-
- return 1;
+ return;
drop:
/* Consume bad packet */
kfree_skb(skb);
- return 0;
}
static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
@@ -949,7 +1205,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
}
f = vxlan_find_mac(vxlan, n->ha);
- if (f && first_remote(f)->remote_ip == htonl(INADDR_ANY)) {
+ if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
/* bridge-local neighbor */
neigh_release(n);
goto out;
@@ -967,18 +1223,87 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
- } else if (vxlan->flags & VXLAN_F_L3MISS)
- vxlan_ip_miss(dev, tip);
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ union vxlan_addr ipa = {
+ .sin.sin_addr.s_addr = tip,
+ .sa.sa_family = AF_INET,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ }
out:
consume_skb(skb);
return NETDEV_TX_OK;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct neighbour *n;
+ union vxlan_addr ipa;
+ const struct ipv6hdr *iphdr;
+ const struct in6_addr *saddr, *daddr;
+ struct nd_msg *msg;
+ struct inet6_dev *in6_dev = NULL;
+
+ in6_dev = __in6_dev_get(dev);
+ if (!in6_dev)
+ goto out;
+
+ if (!pskb_may_pull(skb, skb->len))
+ goto out;
+
+ iphdr = ipv6_hdr(skb);
+ saddr = &iphdr->saddr;
+ daddr = &iphdr->daddr;
+
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(daddr))
+ goto out;
+
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code != 0 ||
+ msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+
+ if (n) {
+ struct vxlan_fdb *f;
+
+ if (!(n->nud_state & NUD_CONNECTED)) {
+ neigh_release(n);
+ goto out;
+ }
+
+ f = vxlan_find_mac(vxlan, n->ha);
+ if (f && vxlan_addr_any(&(first_remote_rcu(f)->remote_ip))) {
+ /* bridge-local neighbor */
+ neigh_release(n);
+ goto out;
+ }
+
+ ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
+ !!in6_dev->cnf.forwarding,
+ true, false, false);
+ neigh_release(n);
+ } else if (vxlan->flags & VXLAN_F_L3MISS) {
+ ipa.sin6.sin6_addr = *daddr;
+ ipa.sa.sa_family = AF_INET6;
+ vxlan_ip_miss(dev, &ipa);
+ }
+
+out:
+ consume_skb(skb);
+ return NETDEV_TX_OK;
+}
+#endif
+
static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct neighbour *n;
- struct iphdr *pip;
if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
return false;
@@ -986,11 +1311,47 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
n = NULL;
switch (ntohs(eth_hdr(skb)->h_proto)) {
case ETH_P_IP:
+ {
+ struct iphdr *pip;
+
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
return false;
pip = ip_hdr(skb);
n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
+ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ union vxlan_addr ipa = {
+ .sin.sin_addr.s_addr = pip->daddr,
+ .sa.sa_family = AF_INET,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ return false;
+ }
+
break;
+ }
+#if IS_ENABLED(CONFIG_IPV6)
+ case ETH_P_IPV6:
+ {
+ struct ipv6hdr *pip6;
+
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+ n = neigh_lookup(ipv6_stub->nd_tbl, &pip6->daddr, dev);
+ if (!n && (vxlan->flags & VXLAN_F_L3MISS)) {
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = pip6->daddr,
+ .sa.sa_family = AF_INET6,
+ };
+
+ vxlan_ip_miss(dev, &ipa);
+ return false;
+ }
+
+ break;
+ }
+#endif
default:
return false;
}
@@ -998,7 +1359,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
if (n) {
bool diff;
- diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
+ diff = !ether_addr_equal(eth_hdr(skb)->h_dest, n->ha);
if (diff) {
memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
dev->addr_len);
@@ -1006,8 +1367,8 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
}
neigh_release(n);
return diff;
- } else if (vxlan->flags & VXLAN_F_L3MISS)
- vxlan_ip_miss(dev, pip->daddr);
+ }
+
return false;
}
@@ -1017,11 +1378,8 @@ static void vxlan_sock_put(struct sk_buff *skb)
}
/* On transmit, associate with the tunnel socket */
-static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
+static void vxlan_set_owner(struct sock *sk, struct sk_buff *skb)
{
- struct vxlan_dev *vxlan = netdev_priv(dev);
- struct sock *sk = vxlan->vn_sock->sock->sk;
-
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
@@ -1033,9 +1391,9 @@ static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
* better and maybe available from hardware
* secondary choice is to use jhash on the Ethernet header
*/
-static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
{
- unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
+ unsigned int range = (port_max - port_min) + 1;
u32 hash;
hash = skb_get_rxhash(skb);
@@ -1043,8 +1401,9 @@ static __be16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
hash = jhash(skb->data, 2 * ETH_ALEN,
(__force u32) skb->protocol);
- return htons((((u64) hash * range) >> 32) + vxlan->port_min);
+ return htons((((u64) hash * range) >> 32) + port_min);
}
+EXPORT_SYMBOL_GPL(vxlan_src_port);
static int handle_offloads(struct sk_buff *skb)
{
@@ -1060,21 +1419,187 @@ static int handle_offloads(struct sk_buff *skb)
return 0;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int vxlan6_xmit_skb(struct vxlan_sock *vs,
+ struct dst_entry *dst, struct sk_buff *skb,
+ struct net_device *dev, struct in6_addr *saddr,
+ struct in6_addr *daddr, __u8 prio, __u8 ttl,
+ __be16 src_port, __be16 dst_port, __be32 vni)
+{
+ struct ipv6hdr *ip6h;
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ int min_headroom;
+ int err;
+
+ if (!skb->encapsulation) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ skb_scrub_packet(skb, false);
+
+ min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len
+ + VXLAN_HLEN + sizeof(struct ipv6hdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ /* Need space for new headers (invalidates iph ptr) */
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (WARN_ON(!__vlan_put_tag(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb))))
+ return -ENOMEM;
+
+ skb->vlan_tci = 0;
+ }
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = vni;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
+ IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
+ IPSKB_REROUTED);
+ skb_dst_set(skb, dst);
+
+ if (!skb_is_gso(skb) && !(dst->dev->features & NETIF_F_IPV6_CSUM)) {
+ __wsum csum = skb_checksum(skb, 0, skb->len, 0);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ uh->check = csum_ipv6_magic(saddr, daddr, skb->len,
+ IPPROTO_UDP, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ } else {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ uh->check = ~csum_ipv6_magic(saddr, daddr,
+ skb->len, IPPROTO_UDP, 0);
+ }
+
+ __skb_push(skb, sizeof(*ip6h));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->version = 6;
+ ip6h->priority = prio;
+ ip6h->flow_lbl[0] = 0;
+ ip6h->flow_lbl[1] = 0;
+ ip6h->flow_lbl[2] = 0;
+ ip6h->payload_len = htons(skb->len);
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+
+ vxlan_set_owner(vs->sock->sk, skb);
+
+ err = handle_offloads(skb);
+ if (err)
+ return err;
+
+ ip6tunnel_xmit(skb, dev);
+ return 0;
+}
+#endif
+
+int vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni)
+{
+ struct vxlanhdr *vxh;
+ struct udphdr *uh;
+ int min_headroom;
+ int err;
+
+ if (!skb->encapsulation) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ + VXLAN_HLEN + sizeof(struct iphdr)
+ + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
+
+ /* Need space for new headers (invalidates iph ptr) */
+ err = skb_cow_head(skb, min_headroom);
+ if (unlikely(err))
+ return err;
+
+ if (vlan_tx_tag_present(skb)) {
+ if (WARN_ON(!__vlan_put_tag(skb,
+ skb->vlan_proto,
+ vlan_tx_tag_get(skb))))
+ return -ENOMEM;
+
+ skb->vlan_tci = 0;
+ }
+
+ vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
+ vxh->vx_flags = htonl(VXLAN_FLAGS);
+ vxh->vx_vni = vni;
+
+ __skb_push(skb, sizeof(*uh));
+ skb_reset_transport_header(skb);
+ uh = udp_hdr(skb);
+
+ uh->dest = dst_port;
+ uh->source = src_port;
+
+ uh->len = htons(skb->len);
+ uh->check = 0;
+
+ vxlan_set_owner(vs->sock->sk, skb);
+
+ err = handle_offloads(skb);
+ if (err)
+ return err;
+
+ return iptunnel_xmit(rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df,
+ false);
+}
+EXPORT_SYMBOL_GPL(vxlan_xmit_skb);
+
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
{
struct pcpu_tstats *tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
struct pcpu_tstats *rx_stats = this_cpu_ptr(dst_vxlan->dev->tstats);
+ union vxlan_addr loopback;
+ union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
skb->pkt_type = PACKET_HOST;
skb->encapsulation = 0;
skb->dev = dst_vxlan->dev;
__skb_pull(skb, skb_network_offset(skb));
+ if (remote_ip->sa.sa_family == AF_INET) {
+ loopback.sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
+ loopback.sa.sa_family = AF_INET;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ loopback.sin6.sin6_addr = in6addr_loopback;
+ loopback.sa.sa_family = AF_INET6;
+#endif
+ }
+
if (dst_vxlan->flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, htonl(INADDR_LOOPBACK),
- eth_hdr(skb)->h_source);
+ vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -1095,13 +1620,11 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
struct vxlan_rdst *rdst, bool did_rsc)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct rtable *rt;
+ struct rtable *rt = NULL;
const struct iphdr *old_iph;
- struct vxlanhdr *vxh;
- struct udphdr *uh;
struct flowi4 fl4;
- __be32 dst;
- __be16 src_port, dst_port;
+ union vxlan_addr *dst;
+ __be16 src_port = 0, dst_port;
u32 vni;
__be16 df = 0;
__u8 tos, ttl;
@@ -1109,9 +1632,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
dst_port = rdst->remote_port ? rdst->remote_port : vxlan->dst_port;
vni = rdst->remote_vni;
- dst = rdst->remote_ip;
+ dst = &rdst->remote_ip;
- if (!dst) {
+ if (vxlan_addr_any(dst)) {
if (did_rsc) {
/* short-circuited back to local bridge */
vxlan_encap_bypass(skb, vxlan, vxlan);
@@ -1120,84 +1643,113 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
goto drop;
}
- if (!skb->encapsulation) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
-
- /* Need space for new headers (invalidates iph ptr) */
- if (skb_cow_head(skb, VXLAN_HEADROOM))
- goto drop;
-
old_iph = ip_hdr(skb);
ttl = vxlan->ttl;
- if (!ttl && IN_MULTICAST(ntohl(dst)))
+ if (!ttl && vxlan_addr_multicast(dst))
ttl = 1;
tos = vxlan->tos;
if (tos == 1)
tos = ip_tunnel_get_dsfield(old_iph, skb);
- src_port = vxlan_src_port(vxlan, skb);
+ src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = rdst->remote_ifindex;
- fl4.flowi4_tos = RT_TOS(tos);
- fl4.daddr = dst;
- fl4.saddr = vxlan->saddr;
+ if (dst->sa.sa_family == AF_INET) {
+ memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_oif = rdst->remote_ifindex;
+ fl4.flowi4_tos = RT_TOS(tos);
+ fl4.daddr = dst->sin.sin_addr.s_addr;
+ fl4.saddr = vxlan->saddr.sin.sin_addr.s_addr;
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (IS_ERR(rt)) {
- netdev_dbg(dev, "no route to %pI4\n", &dst);
- dev->stats.tx_carrier_errors++;
- goto tx_error;
- }
+ rt = ip_route_output_key(dev_net(dev), &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(dev, "no route to %pI4\n",
+ &dst->sin.sin_addr.s_addr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
- if (rt->dst.dev == dev) {
- netdev_dbg(dev, "circular route to %pI4\n", &dst);
- ip_rt_put(rt);
- dev->stats.collisions++;
- goto tx_error;
- }
+ if (rt->dst.dev == dev) {
+ netdev_dbg(dev, "circular route to %pI4\n",
+ &dst->sin.sin_addr.s_addr);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
- /* Bypass encapsulation if the destination is local */
- if (rt->rt_flags & RTCF_LOCAL &&
- !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
- struct vxlan_dev *dst_vxlan;
+ /* Bypass encapsulation if the destination is local */
+ if (rt->rt_flags & RTCF_LOCAL &&
+ !(rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ struct vxlan_dev *dst_vxlan;
- ip_rt_put(rt);
- dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
- if (!dst_vxlan)
- goto tx_error;
- vxlan_encap_bypass(skb, vxlan, dst_vxlan);
- return;
- }
- vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
- vxh->vx_flags = htonl(VXLAN_FLAGS);
- vxh->vx_vni = htonl(vni << 8);
+ ip_rt_put(rt);
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
+ }
- __skb_push(skb, sizeof(*uh));
- skb_reset_transport_header(skb);
- uh = udp_hdr(skb);
+ tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
+ ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
- uh->dest = dst_port;
- uh->source = src_port;
+ err = vxlan_xmit_skb(vxlan->vn_sock, rt, skb,
+ fl4.saddr, dst->sin.sin_addr.s_addr,
+ tos, ttl, df, src_port, dst_port,
+ htonl(vni << 8));
- uh->len = htons(skb->len);
- uh->check = 0;
+ if (err < 0)
+ goto rt_tx_error;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ struct sock *sk = vxlan->vn_sock->sock->sk;
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+ u32 flags;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = rdst->remote_ifindex;
+ fl6.daddr = dst->sin6.sin6_addr;
+ fl6.saddr = vxlan->saddr.sin6.sin6_addr;
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ if (ipv6_stub->ipv6_dst_lookup(sk, &ndst, &fl6)) {
+ netdev_dbg(dev, "no route to %pI6\n",
+ &dst->sin6.sin6_addr);
+ dev->stats.tx_carrier_errors++;
+ goto tx_error;
+ }
- vxlan_set_owner(dev, skb);
+ if (ndst->dev == dev) {
+ netdev_dbg(dev, "circular route to %pI6\n",
+ &dst->sin6.sin6_addr);
+ dst_release(ndst);
+ dev->stats.collisions++;
+ goto tx_error;
+ }
- if (handle_offloads(skb))
- goto drop;
+ /* Bypass encapsulation if the destination is local */
+ flags = ((struct rt6_info *)ndst)->rt6i_flags;
+ if (flags & RTF_LOCAL &&
+ !(flags & (RTCF_BROADCAST | RTCF_MULTICAST))) {
+ struct vxlan_dev *dst_vxlan;
+
+ dst_release(ndst);
+ dst_vxlan = vxlan_find_vni(dev_net(dev), vni, dst_port);
+ if (!dst_vxlan)
+ goto tx_error;
+ vxlan_encap_bypass(skb, vxlan, dst_vxlan);
+ return;
+ }
- tos = ip_tunnel_ecn_encap(tos, old_iph, skb);
- ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
+ ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, dst,
- IPPROTO_UDP, tos, ttl, df);
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
+ err = vxlan6_xmit_skb(vxlan->vn_sock, ndst, skb,
+ dev, &fl6.saddr, &fl6.daddr, 0, ttl,
+ src_port, dst_port, htonl(vni << 8));
+#endif
+ }
return;
@@ -1205,6 +1757,8 @@ drop:
dev->stats.tx_dropped++;
goto tx_free;
+rt_tx_error:
+ ip_rt_put(rt);
tx_error:
dev->stats.tx_errors++;
tx_free:
@@ -1228,14 +1782,29 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
- return arp_reduce(dev, skb);
+ if ((vxlan->flags & VXLAN_F_PROXY)) {
+ if (ntohs(eth->h_proto) == ETH_P_ARP)
+ return arp_reduce(dev, skb);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (ntohs(eth->h_proto) == ETH_P_IPV6 &&
+ skb->len >= sizeof(struct ipv6hdr) + sizeof(struct nd_msg) &&
+ ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
+ struct nd_msg *msg;
+
+ msg = (struct nd_msg *)skb_transport_header(skb);
+ if (msg->icmph.icmp6_code == 0 &&
+ msg->icmph.icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ return neigh_reduce(dev, skb);
+ }
+#endif
+ }
f = vxlan_find_mac(vxlan, eth->h_dest);
did_rsc = false;
if (f && (f->flags & NTF_ROUTER) && (vxlan->flags & VXLAN_F_RSC) &&
- ntohs(eth->h_proto) == ETH_P_IP) {
+ (ntohs(eth->h_proto) == ETH_P_IP ||
+ ntohs(eth->h_proto) == ETH_P_IPV6)) {
did_rsc = route_shortcircuit(dev, skb);
if (did_rsc)
f = vxlan_find_mac(vxlan, eth->h_dest);
@@ -1303,25 +1872,31 @@ static void vxlan_cleanup(unsigned long arg)
mod_timer(&vxlan->age_timer, next_timer);
}
+static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
+{
+ __u32 vni = vxlan->default_dst.remote_vni;
+
+ vxlan->vn_sock = vs;
+ hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+}
+
/* Setup stats when device is created */
static int vxlan_init(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- __u32 vni = vxlan->default_dst.remote_vni;
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
spin_lock(&vn->sock_lock);
- vs = vxlan_find_port(dev_net(dev), vxlan->dst_port);
+ vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
/* If we have a socket with same port already, reuse it */
atomic_inc(&vs->refcnt);
- vxlan->vn_sock = vs;
- hlist_add_head_rcu(&vxlan->hlist, vni_head(vs, vni));
+ vxlan_vs_add_dev(vs, vxlan);
} else {
/* otherwise make new socket outside of RTNL */
dev_hold(dev);
@@ -1346,19 +1921,19 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan)
static void vxlan_uninit(struct net_device *dev)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs = vxlan->vn_sock;
vxlan_fdb_delete_default(vxlan);
if (vs)
- vxlan_sock_release(vn, vs);
+ vxlan_sock_release(vs);
free_percpu(dev->tstats);
}
/* Start ageing timer and join group when device is brought up */
static int vxlan_open(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
@@ -1366,10 +1941,11 @@ static int vxlan_open(struct net_device *dev)
if (!vs)
return -ENOTCONN;
- if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_join);
}
if (vxlan->age_interval)
@@ -1400,13 +1976,15 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
/* Cleanup timer and forwarding table on shutdown */
static int vxlan_stop(struct net_device *dev)
{
+ struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_sock *vs = vxlan->vn_sock;
- if (vs && IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip))) {
+ if (vs && vxlan_addr_multicast(&vxlan->default_dst.remote_ip) &&
+ ! vxlan_group_used(vn, &vxlan->default_dst.remote_ip)) {
vxlan_sock_hold(vs);
dev_hold(dev);
- queue_work(vxlan_wq, &vxlan->igmp_work);
+ queue_work(vxlan_wq, &vxlan->igmp_leave);
}
del_timer_sync(&vxlan->age_timer);
@@ -1442,6 +2020,34 @@ static struct device_type vxlan_type = {
.name = "vxlan",
};
+/* Calls the ndo_add_vxlan_port of the caller in order to
+ * supply the listening VXLAN udp ports.
+ */
+void vxlan_get_rx_port(struct net_device *dev)
+{
+ struct vxlan_sock *vs;
+ struct net *net = dev_net(dev);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ sa_family_t sa_family;
+ u16 port;
+ int i;
+
+ if (!dev || !dev->netdev_ops || !dev->netdev_ops->ndo_add_vxlan_port)
+ return;
+
+ spin_lock(&vn->sock_lock);
+ for (i = 0; i < PORT_HASH_SIZE; ++i) {
+ hlist_for_each_entry_rcu(vs, vs_head(net, i), hlist) {
+ port = htons(inet_sk(vs->sock->sk)->inet_sport);
+ sa_family = vs->sock->sk->sk_family;
+ dev->netdev_ops->ndo_add_vxlan_port(dev, sa_family,
+ port);
+ }
+ }
+ spin_unlock(&vn->sock_lock);
+}
+EXPORT_SYMBOL_GPL(vxlan_get_rx_port);
+
/* Initialize the device structure. */
static void vxlan_setup(struct net_device *dev)
{
@@ -1451,7 +2057,10 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
- dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
+ if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
+ dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
+ else
+ dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
@@ -1464,14 +2073,18 @@ static void vxlan_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->vlan_features = dev->features;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
INIT_LIST_HEAD(&vxlan->next);
spin_lock_init(&vxlan->hash_lock);
- INIT_WORK(&vxlan->igmp_work, vxlan_igmp_work);
+ INIT_WORK(&vxlan->igmp_join, vxlan_igmp_join);
+ INIT_WORK(&vxlan->igmp_leave, vxlan_igmp_leave);
INIT_WORK(&vxlan->sock_work, vxlan_sock_work);
init_timer_deferrable(&vxlan->age_timer);
@@ -1492,8 +2105,10 @@ static void vxlan_setup(struct net_device *dev)
static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
[IFLA_VXLAN_ID] = { .type = NLA_U32 },
[IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_LINK] = { .type = NLA_U32 },
[IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
+ [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) },
[IFLA_VXLAN_TOS] = { .type = NLA_U8 },
[IFLA_VXLAN_TTL] = { .type = NLA_U8 },
[IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
@@ -1564,99 +2179,199 @@ static void vxlan_del_work(struct work_struct *work)
kfree_rcu(vs, rcu);
}
-static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port)
+#if IS_ENABLED(CONFIG_IPV6)
+/* Create UDP socket for encapsulation receive. AF_INET6 socket
+ * could be used for both IPv4 and IPv6 communications, but
+ * users may set bindv6only=1.
+ */
+static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+{
+ struct sock *sk;
+ struct socket *sock;
+ struct sockaddr_in6 vxlan_addr = {
+ .sin6_family = AF_INET6,
+ .sin6_port = port,
+ };
+ int rc, val = 1;
+
+ rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
+ if (rc < 0) {
+ pr_debug("UDPv6 socket create failed\n");
+ return rc;
+ }
+
+ /* Put in proper namespace */
+ sk = sock->sk;
+ sk_change_net(sk, net);
+
+ kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
+ (char *)&val, sizeof(val));
+ rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
+ sizeof(struct sockaddr_in6));
+ if (rc < 0) {
+ pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
+ &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
+ sk_release_kernel(sk);
+ return rc;
+ }
+ /* At this point, IPv6 module should have been loaded in
+ * sock_create_kern().
+ */
+ BUG_ON(!ipv6_stub);
+
+ *psock = sock;
+ /* Disable multicast loopback */
+ inet_sk(sk)->mc_loop = 0;
+ return 0;
+}
+
+#else
+
+static int create_v6_sock(struct net *net, __be16 port, struct socket **psock)
+{
+ return -EPFNOSUPPORT;
+}
+#endif
+
+static int create_v4_sock(struct net *net, __be16 port, struct socket **psock)
{
- struct vxlan_sock *vs;
struct sock *sk;
+ struct socket *sock;
struct sockaddr_in vxlan_addr = {
.sin_family = AF_INET,
.sin_addr.s_addr = htonl(INADDR_ANY),
.sin_port = port,
};
int rc;
- unsigned int h;
-
- vs = kmalloc(sizeof(*vs), GFP_KERNEL);
- if (!vs)
- return ERR_PTR(-ENOMEM);
-
- for (h = 0; h < VNI_HASH_SIZE; ++h)
- INIT_HLIST_HEAD(&vs->vni_list[h]);
-
- INIT_WORK(&vs->del_work, vxlan_del_work);
/* Create UDP socket for encapsulation receive. */
- rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vs->sock);
+ rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
if (rc < 0) {
pr_debug("UDP socket create failed\n");
- kfree(vs);
- return ERR_PTR(rc);
+ return rc;
}
/* Put in proper namespace */
- sk = vs->sock->sk;
+ sk = sock->sk;
sk_change_net(sk, net);
- rc = kernel_bind(vs->sock, (struct sockaddr *) &vxlan_addr,
+ rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
sizeof(vxlan_addr));
if (rc < 0) {
pr_debug("bind for UDP socket %pI4:%u (%d)\n",
&vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
sk_release_kernel(sk);
- kfree(vs);
- return ERR_PTR(rc);
+ return rc;
}
+ *psock = sock;
/* Disable multicast loopback */
inet_sk(sk)->mc_loop = 0;
+ return 0;
+}
+
+/* Create new listen socket if needed */
+static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data, bool ipv6)
+{
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ struct vxlan_sock *vs;
+ struct socket *sock;
+ struct sock *sk;
+ int rc = 0;
+ unsigned int h;
+
+ vs = kmalloc(sizeof(*vs), GFP_KERNEL);
+ if (!vs)
+ return ERR_PTR(-ENOMEM);
+
+ for (h = 0; h < VNI_HASH_SIZE; ++h)
+ INIT_HLIST_HEAD(&vs->vni_list[h]);
+
+ INIT_WORK(&vs->del_work, vxlan_del_work);
+
+ if (ipv6)
+ rc = create_v6_sock(net, port, &sock);
+ else
+ rc = create_v4_sock(net, port, &sock);
+ if (rc < 0) {
+ kfree(vs);
+ return ERR_PTR(rc);
+ }
+
+ vs->sock = sock;
+ sk = sock->sk;
+ atomic_set(&vs->refcnt, 1);
+ vs->rcv = rcv;
+ vs->data = data;
+ smp_wmb();
+ vs->sock->sk->sk_user_data = vs;
+
+ spin_lock(&vn->sock_lock);
+ hlist_add_head_rcu(&vs->hlist, vs_head(net, port));
+ vxlan_notify_add_rx_port(sk);
+ spin_unlock(&vn->sock_lock);
/* Mark socket as an encapsulation socket. */
udp_sk(sk)->encap_type = 1;
udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
- udp_encap_enable();
- atomic_set(&vs->refcnt, 1);
+#if IS_ENABLED(CONFIG_IPV6)
+ if (ipv6)
+ ipv6_stub->udpv6_encap_enable();
+ else
+#endif
+ udp_encap_enable();
return vs;
}
-/* Scheduled at device creation to bind to a socket */
-static void vxlan_sock_work(struct work_struct *work)
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6)
{
- struct vxlan_dev *vxlan
- = container_of(work, struct vxlan_dev, sock_work);
- struct net_device *dev = vxlan->dev;
- struct net *net = dev_net(dev);
- __u32 vni = vxlan->default_dst.remote_vni;
- __be16 port = vxlan->dst_port;
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
- struct vxlan_sock *nvs, *ovs;
+ struct vxlan_sock *vs;
- nvs = vxlan_socket_create(net, port);
- if (IS_ERR(nvs)) {
- netdev_err(vxlan->dev, "Can not create UDP socket, %ld\n",
- PTR_ERR(nvs));
- goto out;
- }
+ vs = vxlan_socket_create(net, port, rcv, data, ipv6);
+ if (!IS_ERR(vs))
+ return vs;
+
+ if (no_share) /* Return error if sharing is not allowed. */
+ return vs;
spin_lock(&vn->sock_lock);
- /* Look again to see if can reuse socket */
- ovs = vxlan_find_port(net, port);
- if (ovs) {
- atomic_inc(&ovs->refcnt);
- vxlan->vn_sock = ovs;
- hlist_add_head_rcu(&vxlan->hlist, vni_head(ovs, vni));
- spin_unlock(&vn->sock_lock);
-
- sk_release_kernel(nvs->sock->sk);
- kfree(nvs);
- } else {
- vxlan->vn_sock = nvs;
- hlist_add_head_rcu(&nvs->hlist, vs_head(net, port));
- hlist_add_head_rcu(&vxlan->hlist, vni_head(nvs, vni));
- spin_unlock(&vn->sock_lock);
+ vs = vxlan_find_sock(net, port);
+ if (vs) {
+ if (vs->rcv == rcv)
+ atomic_inc(&vs->refcnt);
+ else
+ vs = ERR_PTR(-EBUSY);
}
-out:
- dev_put(dev);
+ spin_unlock(&vn->sock_lock);
+
+ if (!vs)
+ vs = ERR_PTR(-EINVAL);
+
+ return vs;
+}
+EXPORT_SYMBOL_GPL(vxlan_sock_add);
+
+/* Scheduled at device creation to bind to a socket */
+static void vxlan_sock_work(struct work_struct *work)
+{
+ struct vxlan_dev *vxlan = container_of(work, struct vxlan_dev, sock_work);
+ struct net *net = dev_net(vxlan->dev);
+ struct vxlan_net *vn = net_generic(net, vxlan_net_id);
+ __be16 port = vxlan->dst_port;
+ struct vxlan_sock *nvs;
+
+ nvs = vxlan_sock_add(net, port, vxlan_rcv, NULL, false, vxlan->flags & VXLAN_F_IPV6);
+ spin_lock(&vn->sock_lock);
+ if (!IS_ERR(nvs))
+ vxlan_vs_add_dev(nvs, vxlan);
+ spin_unlock(&vn->sock_lock);
+
+ dev_put(vxlan->dev);
}
static int vxlan_newlink(struct net *net, struct net_device *dev,
@@ -1667,6 +2382,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
struct vxlan_rdst *dst = &vxlan->default_dst;
__u32 vni;
int err;
+ bool use_ipv6 = false;
if (!data[IFLA_VXLAN_ID])
return -EINVAL;
@@ -1674,11 +2390,32 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
vni = nla_get_u32(data[IFLA_VXLAN_ID]);
dst->remote_vni = vni;
- if (data[IFLA_VXLAN_GROUP])
- dst->remote_ip = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ if (data[IFLA_VXLAN_GROUP]) {
+ dst->remote_ip.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
+ dst->remote_ip.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_GROUP6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ nla_memcpy(&dst->remote_ip.sin6.sin6_addr, data[IFLA_VXLAN_GROUP6],
+ sizeof(struct in6_addr));
+ dst->remote_ip.sa.sa_family = AF_INET6;
+ use_ipv6 = true;
+ }
- if (data[IFLA_VXLAN_LOCAL])
- vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+ if (data[IFLA_VXLAN_LOCAL]) {
+ vxlan->saddr.sin.sin_addr.s_addr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
+ vxlan->saddr.sa.sa_family = AF_INET;
+ } else if (data[IFLA_VXLAN_LOCAL6]) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
+
+ /* TODO: respect scope id */
+ nla_memcpy(&vxlan->saddr.sin6.sin6_addr, data[IFLA_VXLAN_LOCAL6],
+ sizeof(struct in6_addr));
+ vxlan->saddr.sa.sa_family = AF_INET6;
+ use_ipv6 = true;
+ }
if (data[IFLA_VXLAN_LINK] &&
(dst->remote_ifindex = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
@@ -1690,12 +2427,23 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
return -ENODEV;
}
+#if IS_ENABLED(CONFIG_IPV6)
+ if (use_ipv6) {
+ struct inet6_dev *idev = __in6_dev_get(lowerdev);
+ if (idev && idev->cnf.disable_ipv6) {
+ pr_info("IPv6 is disabled via sysctl\n");
+ return -EPERM;
+ }
+ vxlan->flags |= VXLAN_F_IPV6;
+ }
+#endif
+
if (!tb[IFLA_MTU])
- dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
+ dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
- VXLAN_HEADROOM;
+ (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
}
if (data[IFLA_VXLAN_TOS])
@@ -1746,7 +2494,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* create an fdb entry for default destination */
err = vxlan_fdb_create(vxlan, all_zeros_mac,
- vxlan->default_dst.remote_ip,
+ &vxlan->default_dst.remote_ip,
NUD_REACHABLE|NUD_PERMANENT,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->dst_port, vxlan->default_dst.remote_vni,
@@ -1770,10 +2518,9 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
- flush_workqueue(vxlan_wq);
-
spin_lock(&vn->sock_lock);
- hlist_del_rcu(&vxlan->hlist);
+ if (!hlist_unhashed(&vxlan->hlist))
+ hlist_del_rcu(&vxlan->hlist);
spin_unlock(&vn->sock_lock);
list_del(&vxlan->next);
@@ -1784,9 +2531,9 @@ static size_t vxlan_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
- nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
+ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_GROUP{6} */
nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
- nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
+ nla_total_size(sizeof(struct in6_addr)) + /* IFLA_VXLAN_LOCAL{6} */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
@@ -1813,14 +2560,36 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
if (nla_put_u32(skb, IFLA_VXLAN_ID, dst->remote_vni))
goto nla_put_failure;
- if (dst->remote_ip && nla_put_be32(skb, IFLA_VXLAN_GROUP, dst->remote_ip))
- goto nla_put_failure;
+ if (!vxlan_addr_any(&dst->remote_ip)) {
+ if (dst->remote_ip.sa.sa_family == AF_INET) {
+ if (nla_put_be32(skb, IFLA_VXLAN_GROUP,
+ dst->remote_ip.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put(skb, IFLA_VXLAN_GROUP6, sizeof(struct in6_addr),
+ &dst->remote_ip.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
if (dst->remote_ifindex && nla_put_u32(skb, IFLA_VXLAN_LINK, dst->remote_ifindex))
goto nla_put_failure;
- if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
- goto nla_put_failure;
+ if (!vxlan_addr_any(&vxlan->saddr)) {
+ if (vxlan->saddr.sa.sa_family == AF_INET) {
+ if (nla_put_be32(skb, IFLA_VXLAN_LOCAL,
+ vxlan->saddr.sin.sin_addr.s_addr))
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (nla_put(skb, IFLA_VXLAN_LOCAL6, sizeof(struct in6_addr),
+ &vxlan->saddr.sin6.sin6_addr))
+ goto nla_put_failure;
+#endif
+ }
+ }
if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
@@ -1878,10 +2647,12 @@ static __net_exit void vxlan_exit_net(struct net *net)
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan;
+ LIST_HEAD(list);
rtnl_lock();
list_for_each_entry(vxlan, &vn->vxlan_list, next)
- dev_close(vxlan->dev);
+ unregister_netdevice_queue(vxlan->dev, &list);
+ unregister_netdevice_many(&list);
rtnl_unlock();
}
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index d43f4efd3e0..5bbcb5e3ee0 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -176,7 +176,7 @@ static u32 mac[ SBNI_MAX_NUM_CARDS ] __initdata;
#ifndef MODULE
typedef u32 iarr[];
-static iarr __initdata *dest[5] = { &io, &irq, &baud, &rxl, &mac };
+static iarr *dest[5] __initdata = { &io, &irq, &baud, &rxl, &mac };
#endif
/* A zero-terminated list of I/O addresses to be probed on ISA bus */
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index d0adbaf8618..7fe19648f10 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2693,7 +2693,7 @@ static struct net_device *init_wifidev(struct airo_info *ai,
dev->base_addr = ethdev->base_addr;
dev->wireless_data = ethdev->wireless_data;
SET_NETDEV_DEV(dev, ethdev->dev.parent);
- memcpy(dev->dev_addr, ethdev->dev_addr, dev->addr_len);
+ eth_hw_addr_inherit(dev, ethdev);
err = register_netdev(dev);
if (err<0) {
free_netdev(dev);
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index daeafeff186..e0ba7cd1425 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -159,7 +159,7 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
- bool antenna_diversity;
+ bool bt_ant_diversity;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig
index cde58fe9625..82e8088ca9b 100644
--- a/drivers/net/wireless/ath/ath10k/Kconfig
+++ b/drivers/net/wireless/ath/ath10k/Kconfig
@@ -1,6 +1,6 @@
config ATH10K
tristate "Atheros 802.11ac wireless cards support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
select ATH_COMMON
---help---
This module adds support for wireless adapters based on
diff --git a/drivers/net/wireless/ath/ath10k/bmi.c b/drivers/net/wireless/ath/ath10k/bmi.c
index 1a2ef51b69d..744da6d1c40 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.c
+++ b/drivers/net/wireless/ath/ath10k/bmi.c
@@ -20,6 +20,12 @@
#include "debug.h"
#include "htc.h"
+void ath10k_bmi_start(struct ath10k *ar)
+{
+ ath10k_dbg(ATH10K_DBG_CORE, "BMI started\n");
+ ar->bmi.done_sent = false;
+}
+
int ath10k_bmi_done(struct ath10k *ar)
{
struct bmi_cmd cmd;
@@ -105,7 +111,8 @@ int ath10k_bmi_read_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen,
&resp, &rxlen);
if (ret) {
- ath10k_warn("unable to read from the device\n");
+ ath10k_warn("unable to read from the device (%d)\n",
+ ret);
return ret;
}
@@ -149,7 +156,8 @@ int ath10k_bmi_write_memory(struct ath10k *ar,
ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen,
NULL, NULL);
if (ret) {
- ath10k_warn("unable to write to the device\n");
+ ath10k_warn("unable to write to the device (%d)\n",
+ ret);
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index 32c56aa33a5..8d81ce1cec2 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -184,6 +184,7 @@ struct bmi_target_info {
#define BMI_CE_NUM_TO_TARG 0
#define BMI_CE_NUM_TO_HOST 1
+void ath10k_bmi_start(struct ath10k *ar);
int ath10k_bmi_done(struct ath10k *ar);
int ath10k_bmi_get_target_info(struct ath10k *ar,
struct bmi_target_info *target_info);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 61a8ac70d3c..f8b969f518f 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -79,7 +79,7 @@ static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *indicator_addr;
- if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ if (!test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n);
return;
}
@@ -637,6 +637,7 @@ static int ath10k_ce_completed_send_next_nolock(struct ce_state *ce_state,
ath10k_pci_wake(ar);
src_ring->hw_index =
ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->hw_index &= nentries_mask;
ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
@@ -950,10 +951,12 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ src_ring->sw_index &= src_ring->nentries_mask;
src_ring->hw_index = src_ring->sw_index;
src_ring->write_index =
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
+ src_ring->write_index &= src_ring->nentries_mask;
ath10k_pci_sleep(ar);
src_ring->per_transfer_context = (void **)ptr;
@@ -1035,8 +1038,10 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_pci_wake(ar);
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
+ dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
+ dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_pci_sleep(ar);
dest_ring->per_transfer_context = (void **)ptr;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2b3426b1ff3..7226c23b956 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -100,7 +100,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
goto conn_fail;
/* Start HTC */
- status = ath10k_htc_start(ar->htc);
+ status = ath10k_htc_start(&ar->htc);
if (status)
goto conn_fail;
@@ -116,7 +116,7 @@ static int ath10k_init_connect_htc(struct ath10k *ar)
return 0;
timeout:
- ath10k_htc_stop(ar->htc);
+ ath10k_htc_stop(&ar->htc);
conn_fail:
return status;
}
@@ -247,19 +247,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
+ const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
- const struct firmware *fw;
int ret;
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.board);
- if (IS_ERR(fw)) {
- ath10k_err("could not fetch board data fw file (%ld)\n",
- PTR_ERR(fw));
- return PTR_ERR(fw);
- }
-
ret = ath10k_push_board_ext_data(ar, fw);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
@@ -286,32 +278,20 @@ static int ath10k_download_board_data(struct ath10k *ar)
}
exit:
- release_firmware(fw);
return ret;
}
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
- const struct firmware *fw;
- u32 address;
+ const struct firmware *fw = ar->otp;
+ u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
- if (ar->hw_params.fw.otp == NULL) {
- ath10k_info("otp file not defined\n");
- return 0;
- }
-
- address = ar->hw_params.patch_load_addr;
-
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.otp);
- if (IS_ERR(fw)) {
- ath10k_warn("could not fetch otp (%ld)\n", PTR_ERR(fw));
+ if (!ar->otp)
return 0;
- }
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
@@ -327,28 +307,17 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
}
exit:
- release_firmware(fw);
return ret;
}
static int ath10k_download_fw(struct ath10k *ar)
{
- const struct firmware *fw;
+ const struct firmware *fw = ar->firmware;
u32 address;
int ret;
- if (ar->hw_params.fw.fw == NULL)
- return -EINVAL;
-
address = ar->hw_params.patch_load_addr;
- fw = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir,
- ar->hw_params.fw.fw);
- if (IS_ERR(fw)) {
- ath10k_err("could not fetch fw (%ld)\n", PTR_ERR(fw));
- return PTR_ERR(fw);
- }
-
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
@@ -356,7 +325,74 @@ static int ath10k_download_fw(struct ath10k *ar)
}
exit:
- release_firmware(fw);
+ return ret;
+}
+
+static void ath10k_core_free_firmware_files(struct ath10k *ar)
+{
+ if (ar->board_data && !IS_ERR(ar->board_data))
+ release_firmware(ar->board_data);
+
+ if (ar->otp && !IS_ERR(ar->otp))
+ release_firmware(ar->otp);
+
+ if (ar->firmware && !IS_ERR(ar->firmware))
+ release_firmware(ar->firmware);
+
+ ar->board_data = NULL;
+ ar->otp = NULL;
+ ar->firmware = NULL;
+}
+
+static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
+{
+ int ret = 0;
+
+ if (ar->hw_params.fw.fw == NULL) {
+ ath10k_err("firmware file not defined\n");
+ return -EINVAL;
+ }
+
+ if (ar->hw_params.fw.board == NULL) {
+ ath10k_err("board data file not defined");
+ return -EINVAL;
+ }
+
+ ar->board_data = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.board);
+ if (IS_ERR(ar->board_data)) {
+ ret = PTR_ERR(ar->board_data);
+ ath10k_err("could not fetch board data (%d)\n", ret);
+ goto err;
+ }
+
+ ar->firmware = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.fw);
+ if (IS_ERR(ar->firmware)) {
+ ret = PTR_ERR(ar->firmware);
+ ath10k_err("could not fetch firmware (%d)\n", ret);
+ goto err;
+ }
+
+ /* OTP may be undefined. If so, don't fetch it at all */
+ if (ar->hw_params.fw.otp == NULL)
+ return 0;
+
+ ar->otp = ath10k_fetch_fw_file(ar,
+ ar->hw_params.fw.dir,
+ ar->hw_params.fw.otp);
+ if (IS_ERR(ar->otp)) {
+ ret = PTR_ERR(ar->otp);
+ ath10k_err("could not fetch otp (%d)\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ ath10k_core_free_firmware_files(ar);
return ret;
}
@@ -440,8 +476,35 @@ static int ath10k_init_hw_params(struct ath10k *ar)
return 0;
}
+static void ath10k_core_restart(struct work_struct *work)
+{
+ struct ath10k *ar = container_of(work, struct ath10k, restart_work);
+
+ mutex_lock(&ar->conf_mutex);
+
+ switch (ar->state) {
+ case ATH10K_STATE_ON:
+ ath10k_halt(ar);
+ ar->state = ATH10K_STATE_RESTARTING;
+ ieee80211_restart_hw(ar->hw);
+ break;
+ case ATH10K_STATE_OFF:
+ /* this can happen if driver is being unloaded */
+ ath10k_warn("cannot restart a device that hasn't been started\n");
+ break;
+ case ATH10K_STATE_RESTARTING:
+ case ATH10K_STATE_RESTARTED:
+ ar->state = ATH10K_STATE_WEDGED;
+ /* fall through */
+ case ATH10K_STATE_WEDGED:
+ ath10k_warn("device is wedged, will not restart\n");
+ break;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
@@ -458,9 +521,6 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
ar->hif.priv = hif_priv;
ar->hif.ops = hif_ops;
- ar->hif.bus = bus;
-
- ar->free_vdev_map = 0xFF; /* 8 vdevs */
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
@@ -487,6 +547,8 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
init_waitqueue_head(&ar->event_queue);
+ INIT_WORK(&ar->restart_work, ath10k_core_restart);
+
return ar;
err_wq:
@@ -504,24 +566,11 @@ void ath10k_core_destroy(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_core_destroy);
-
-int ath10k_core_register(struct ath10k *ar)
+int ath10k_core_start(struct ath10k *ar)
{
- struct ath10k_htc_ops htc_ops;
- struct bmi_target_info target_info;
int status;
- memset(&target_info, 0, sizeof(target_info));
- status = ath10k_bmi_get_target_info(ar, &target_info);
- if (status)
- goto err;
-
- ar->target_version = target_info.version;
- ar->hw->wiphy->hw_version = target_info.version;
-
- status = ath10k_init_hw_params(ar);
- if (status)
- goto err;
+ ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
status = -EINVAL;
@@ -536,32 +585,32 @@ int ath10k_core_register(struct ath10k *ar)
if (status)
goto err;
- htc_ops.target_send_suspend_complete = ath10k_send_suspend_complete;
+ ar->htc.htc_ops.target_send_suspend_complete =
+ ath10k_send_suspend_complete;
- ar->htc = ath10k_htc_create(ar, &htc_ops);
- if (IS_ERR(ar->htc)) {
- status = PTR_ERR(ar->htc);
- ath10k_err("could not create HTC (%d)\n", status);
+ status = ath10k_htc_init(ar);
+ if (status) {
+ ath10k_err("could not init HTC (%d)\n", status);
goto err;
}
status = ath10k_bmi_done(ar);
if (status)
- goto err_htc_destroy;
+ goto err;
status = ath10k_wmi_attach(ar);
if (status) {
ath10k_err("WMI attach failed: %d\n", status);
- goto err_htc_destroy;
+ goto err;
}
- status = ath10k_htc_wait_target(ar->htc);
+ status = ath10k_htc_wait_target(&ar->htc);
if (status)
goto err_wmi_detach;
- ar->htt = ath10k_htt_attach(ar);
- if (!ar->htt) {
- status = -ENOMEM;
+ status = ath10k_htt_attach(ar);
+ if (status) {
+ ath10k_err("could not attach htt (%d)\n", status);
goto err_wmi_detach;
}
@@ -588,77 +637,127 @@ int ath10k_core_register(struct ath10k *ar)
goto err_disconnect_htc;
}
- status = ath10k_htt_attach_target(ar->htt);
- if (status)
- goto err_disconnect_htc;
-
- status = ath10k_mac_register(ar);
+ status = ath10k_htt_attach_target(&ar->htt);
if (status)
goto err_disconnect_htc;
- status = ath10k_debug_create(ar);
- if (status) {
- ath10k_err("unable to initialize debugfs\n");
- goto err_unregister_mac;
- }
+ ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
return 0;
-err_unregister_mac:
- ath10k_mac_unregister(ar);
err_disconnect_htc:
- ath10k_htc_stop(ar->htc);
+ ath10k_htc_stop(&ar->htc);
err_htt_detach:
- ath10k_htt_detach(ar->htt);
+ ath10k_htt_detach(&ar->htt);
err_wmi_detach:
ath10k_wmi_detach(ar);
-err_htc_destroy:
- ath10k_htc_destroy(ar->htc);
err:
return status;
}
-EXPORT_SYMBOL(ath10k_core_register);
+EXPORT_SYMBOL(ath10k_core_start);
-void ath10k_core_unregister(struct ath10k *ar)
+void ath10k_core_stop(struct ath10k *ar)
{
- /* We must unregister from mac80211 before we stop HTC and HIF.
- * Otherwise we will fail to submit commands to FW and mac80211 will be
- * unhappy about callback failures. */
- ath10k_mac_unregister(ar);
- ath10k_htc_stop(ar->htc);
- ath10k_htt_detach(ar->htt);
+ ath10k_htc_stop(&ar->htc);
+ ath10k_htt_detach(&ar->htt);
ath10k_wmi_detach(ar);
- ath10k_htc_destroy(ar->htc);
}
-EXPORT_SYMBOL(ath10k_core_unregister);
+EXPORT_SYMBOL(ath10k_core_stop);
-int ath10k_core_target_suspend(struct ath10k *ar)
+/* mac80211 manages fw/hw initialization through start/stop hooks. However in
+ * order to know what hw capabilities should be advertised to mac80211 it is
+ * necessary to load the firmware (and tear it down immediately since start
+ * hook will try to init it again) before registering */
+static int ath10k_core_probe_fw(struct ath10k *ar)
{
- int ret;
+ struct bmi_target_info target_info;
+ int ret = 0;
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err("could not start pci hif (%d)\n", ret);
+ return ret;
+ }
- ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+ memset(&target_info, 0, sizeof(target_info));
+ ret = ath10k_bmi_get_target_info(ar, &target_info);
+ if (ret) {
+ ath10k_err("could not get target info (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
- ret = ath10k_wmi_pdev_suspend_target(ar);
- if (ret)
- ath10k_warn("could not suspend target (%d)\n", ret);
+ ar->target_version = target_info.version;
+ ar->hw->wiphy->hw_version = target_info.version;
- return ret;
+ ret = ath10k_init_hw_params(ar);
+ if (ret) {
+ ath10k_err("could not get hw params (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ret = ath10k_core_fetch_firmware_files(ar);
+ if (ret) {
+ ath10k_err("could not fetch firmware files (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ret = ath10k_core_start(ar);
+ if (ret) {
+ ath10k_err("could not init core (%d)\n", ret);
+ ath10k_core_free_firmware_files(ar);
+ ath10k_hif_power_down(ar);
+ return ret;
+ }
+
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+ return 0;
}
-EXPORT_SYMBOL(ath10k_core_target_suspend);
-int ath10k_core_target_resume(struct ath10k *ar)
+int ath10k_core_register(struct ath10k *ar)
{
- int ret;
+ int status;
- ath10k_dbg(ATH10K_DBG_CORE, "%s: called", __func__);
+ status = ath10k_core_probe_fw(ar);
+ if (status) {
+ ath10k_err("could not probe fw (%d)\n", status);
+ return status;
+ }
- ret = ath10k_wmi_pdev_resume_target(ar);
- if (ret)
- ath10k_warn("could not resume target (%d)\n", ret);
+ status = ath10k_mac_register(ar);
+ if (status) {
+ ath10k_err("could not register to mac80211 (%d)\n", status);
+ goto err_release_fw;
+ }
- return ret;
+ status = ath10k_debug_create(ar);
+ if (status) {
+ ath10k_err("unable to initialize debugfs\n");
+ goto err_unregister_mac;
+ }
+
+ return 0;
+
+err_unregister_mac:
+ ath10k_mac_unregister(ar);
+err_release_fw:
+ ath10k_core_free_firmware_files(ar);
+ return status;
+}
+EXPORT_SYMBOL(ath10k_core_register);
+
+void ath10k_core_unregister(struct ath10k *ar)
+{
+ /* We must unregister from mac80211 before we stop HTC and HIF.
+ * Otherwise we will fail to submit commands to FW and mac80211 will be
+ * unhappy about callback failures. */
+ ath10k_mac_unregister(ar);
+ ath10k_core_free_firmware_files(ar);
}
-EXPORT_SYMBOL(ath10k_core_target_resume);
+EXPORT_SYMBOL(ath10k_core_unregister);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Core module for QCA988X PCIe devices.");
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index 539336d1be4..e4bba563ed4 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/pci.h>
+#include "htt.h"
#include "htc.h"
#include "hw.h"
#include "targaddrs.h"
@@ -37,16 +38,13 @@
#define ATH10K_SCAN_ID 0
#define WMI_READY_TIMEOUT (5 * HZ)
#define ATH10K_FLUSH_TIMEOUT_HZ (5*HZ)
+#define ATH10K_NUM_CHANS 38
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
struct ath10k;
-enum ath10k_bus {
- ATH10K_BUS_PCI,
-};
-
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
@@ -250,6 +248,28 @@ struct ath10k_debug {
struct completion event_stats_compl;
};
+enum ath10k_state {
+ ATH10K_STATE_OFF = 0,
+ ATH10K_STATE_ON,
+
+ /* When doing firmware recovery the device is first powered down.
+ * mac80211 is supposed to call in to start() hook later on. It is
+ * however possible that driver unloading and firmware crash overlap.
+ * mac80211 can wait on conf_mutex in stop() while the device is
+ * stopped in ath10k_core_restart() work holding conf_mutex. The state
+ * RESTARTED means that the device is up and mac80211 has started hw
+ * reconfiguration. Once mac80211 is done with the reconfiguration we
+ * set the state to STATE_ON in restart_complete(). */
+ ATH10K_STATE_RESTARTING,
+ ATH10K_STATE_RESTARTED,
+
+ /* The device has crashed while restarting hw. This state is like ON
+ * but commands are blocked in HTC and -ECOMM response is given. This
+ * prevents completion timeouts and makes the driver more responsive to
+ * userspace commands. This is also prevents recursive recovery. */
+ ATH10K_STATE_WEDGED,
+};
+
struct ath10k {
struct ath_common ath_common;
struct ieee80211_hw *hw;
@@ -266,6 +286,7 @@ struct ath10k {
u32 hw_max_tx_power;
u32 ht_cap_info;
u32 vht_cap_info;
+ u32 num_rf_chains;
struct targetdef *targetdef;
struct hostdef *hostdef;
@@ -274,19 +295,16 @@ struct ath10k {
struct {
void *priv;
- enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
- struct ath10k_wmi wmi;
-
wait_queue_head_t event_queue;
bool is_target_paused;
struct ath10k_bmi bmi;
-
- struct ath10k_htc *htc;
- struct ath10k_htt *htt;
+ struct ath10k_wmi wmi;
+ struct ath10k_htc htc;
+ struct ath10k_htt htt;
struct ath10k_hw_params {
u32 id;
@@ -301,6 +319,10 @@ struct ath10k {
} fw;
} hw_params;
+ const struct firmware *board_data;
+ const struct firmware *otp;
+ const struct firmware *firmware;
+
struct {
struct completion started;
struct completion completed;
@@ -350,20 +372,28 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
+ enum ath10k_state state;
+
+ struct work_struct restart_work;
+
+ /* cycle count is reported twice for each visited channel during scan.
+ * access protected by data_lock */
+ u32 survey_last_rx_clear_count;
+ u32 survey_last_cycle_count;
+ struct survey_info survey[ATH10K_NUM_CHANS];
+
#ifdef CONFIG_ATH10K_DEBUGFS
struct ath10k_debug debug;
#endif
};
struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
- enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
+int ath10k_core_start(struct ath10k *ar);
+void ath10k_core_stop(struct ath10k *ar);
int ath10k_core_register(struct ath10k *ar);
void ath10k_core_unregister(struct ath10k *ar);
-int ath10k_core_target_suspend(struct ath10k *ar);
-int ath10k_core_target_resume(struct ath10k *ar);
-
#endif /* _CORE_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 499034b873d..3d65594fa09 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -161,7 +161,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_pdev_stats *ps;
int i;
- mutex_lock(&ar->conf_mutex);
+ spin_lock_bh(&ar->data_lock);
stats = &ar->debug.target_stats;
@@ -259,6 +259,7 @@ void ath10k_debug_read_target_stats(struct ath10k *ar,
}
}
+ spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
complete(&ar->debug.event_stats_compl);
}
@@ -268,35 +269,35 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
{
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
- char *buf;
+ char *buf = NULL;
unsigned int len = 0, buf_len = 2500;
- ssize_t ret_cnt;
+ ssize_t ret_cnt = 0;
long left;
int i;
int ret;
fw_stats = &ar->debug.target_stats;
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_ON)
+ goto exit;
+
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
- return -ENOMEM;
+ goto exit;
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn("could not request stats (%d)\n", ret);
- kfree(buf);
- return -EIO;
+ goto exit;
}
left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
+ if (left <= 0)
+ goto exit;
- if (left <= 0) {
- kfree(buf);
- return -ETIMEDOUT;
- }
-
- mutex_lock(&ar->conf_mutex);
-
+ spin_lock_bh(&ar->data_lock);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
@@ -424,14 +425,15 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
fw_stats->peer_stat[i].peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
+ spin_unlock_bh(&ar->data_lock);
if (len > buf_len)
len = buf_len;
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+exit:
mutex_unlock(&ar->conf_mutex);
-
kfree(buf);
return ret_cnt;
}
@@ -443,6 +445,60 @@ static const struct file_operations fops_fw_stats = {
.llseek = default_llseek,
};
+static ssize_t ath10k_read_simulate_fw_crash(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ const char buf[] = "To simulate firmware crash write the keyword"
+ " `crash` to this file.\nThis will force firmware"
+ " to report a crash to the host system.\n";
+ return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+static ssize_t ath10k_write_simulate_fw_crash(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath10k *ar = file->private_data;
+ char buf[32] = {};
+ int ret;
+
+ mutex_lock(&ar->conf_mutex);
+
+ simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+ if (strcmp(buf, "crash") && strcmp(buf, "crash\n")) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (ar->state != ATH10K_STATE_ON &&
+ ar->state != ATH10K_STATE_RESTARTED) {
+ ret = -ENETDOWN;
+ goto exit;
+ }
+
+ ath10k_info("simulating firmware crash\n");
+
+ ret = ath10k_wmi_force_fw_hang(ar, WMI_FORCE_FW_HANG_ASSERT, 0);
+ if (ret)
+ ath10k_warn("failed to force fw hang (%d)\n", ret);
+
+ if (ret == 0)
+ ret = count;
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
+static const struct file_operations fops_simulate_fw_crash = {
+ .read = ath10k_read_simulate_fw_crash,
+ .write = ath10k_write_simulate_fw_crash,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.debugfs_phy = debugfs_create_dir("ath10k",
@@ -459,6 +515,9 @@ int ath10k_debug_create(struct ath10k *ar)
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
+ debugfs_create_file("simulate_fw_crash", S_IRUSR, ar->debug.debugfs_phy,
+ ar, &fops_simulate_fw_crash);
+
return 0;
}
#endif /* CONFIG_ATH10K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath10k/hif.h b/drivers/net/wireless/ath/ath10k/hif.h
index 73a24d44d1b..dcdea68bcc0 100644
--- a/drivers/net/wireless/ath/ath10k/hif.h
+++ b/drivers/net/wireless/ath/ath10k/hif.h
@@ -46,8 +46,11 @@ struct ath10k_hif_ops {
void *request, u32 request_len,
void *response, u32 *response_len);
+ /* Post BMI phase, after FW is loaded. Starts regular operation */
int (*start)(struct ath10k *ar);
+ /* Clean up what start() did. This does not revert to BMI phase. If
+ * desired so, call power_down() and power_up() */
void (*stop)(struct ath10k *ar);
int (*map_service_to_pipe)(struct ath10k *ar, u16 service_id,
@@ -66,10 +69,20 @@ struct ath10k_hif_ops {
*/
void (*send_complete_check)(struct ath10k *ar, u8 pipe_id, int force);
- void (*init)(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks);
+ void (*set_callbacks)(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks);
u16 (*get_free_queue_number)(struct ath10k *ar, u8 pipe_id);
+
+ /* Power up the device and enter BMI transfer mode for FW download */
+ int (*power_up)(struct ath10k *ar);
+
+ /* Power down the device and free up resources. stop() must be called
+ * before this if start() was called earlier */
+ void (*power_down)(struct ath10k *ar);
+
+ int (*suspend)(struct ath10k *ar);
+ int (*resume)(struct ath10k *ar);
};
@@ -122,10 +135,10 @@ static inline void ath10k_hif_send_complete_check(struct ath10k *ar,
ar->hif.ops->send_complete_check(ar, pipe_id, force);
}
-static inline void ath10k_hif_init(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
+static inline void ath10k_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
{
- ar->hif.ops->init(ar, callbacks);
+ ar->hif.ops->set_callbacks(ar, callbacks);
}
static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
@@ -134,4 +147,30 @@ static inline u16 ath10k_hif_get_free_queue_number(struct ath10k *ar,
return ar->hif.ops->get_free_queue_number(ar, pipe_id);
}
+static inline int ath10k_hif_power_up(struct ath10k *ar)
+{
+ return ar->hif.ops->power_up(ar);
+}
+
+static inline void ath10k_hif_power_down(struct ath10k *ar)
+{
+ ar->hif.ops->power_down(ar);
+}
+
+static inline int ath10k_hif_suspend(struct ath10k *ar)
+{
+ if (!ar->hif.ops->suspend)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->suspend(ar);
+}
+
+static inline int ath10k_hif_resume(struct ath10k *ar)
+{
+ if (!ar->hif.ops->resume)
+ return -EOPNOTSUPP;
+
+ return ar->hif.ops->resume(ar);
+}
+
#endif /* _HIF_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
index 74363c94939..ef3329ef52f 100644
--- a/drivers/net/wireless/ath/ath10k/htc.c
+++ b/drivers/net/wireless/ath/ath10k/htc.c
@@ -246,15 +246,22 @@ int ath10k_htc_send(struct ath10k_htc *htc,
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
+ if (htc->ar->state == ATH10K_STATE_WEDGED)
+ return -ECOMM;
+
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn("Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
- skb_push(skb, sizeof(struct ath10k_htc_hdr));
-
spin_lock_bh(&htc->tx_lock);
+ if (htc->stopped) {
+ spin_unlock_bh(&htc->tx_lock);
+ return -ESHUTDOWN;
+ }
+
__skb_queue_tail(&ep->tx_queue, skb);
+ skb_push(skb, sizeof(struct ath10k_htc_hdr));
spin_unlock_bh(&htc->tx_lock);
queue_work(htc->ar->workqueue, &ep->send_work);
@@ -265,25 +272,19 @@ static int ath10k_htc_tx_completion_handler(struct ath10k *ar,
struct sk_buff *skb,
unsigned int eid)
{
- struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
- bool stopping;
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
+ /* note: when using TX credit flow, the re-checking of queues happens
+ * when credits flow back from the target. in the non-TX credit case,
+ * we recheck after the packet completes */
spin_lock_bh(&htc->tx_lock);
- stopping = htc->stopping;
- spin_unlock_bh(&htc->tx_lock);
-
- if (!ep->tx_credit_flow_enabled && !stopping)
- /*
- * note: when using TX credit flow, the re-checking of
- * queues happens when credits flow back from the target.
- * in the non-TX credit case, we recheck after the packet
- * completes
- */
+ if (!ep->tx_credit_flow_enabled && !htc->stopped)
queue_work(ar->workqueue, &ep->send_work);
+ spin_unlock_bh(&htc->tx_lock);
return 0;
}
@@ -414,7 +415,7 @@ static int ath10k_htc_rx_completion_handler(struct ath10k *ar,
u8 pipe_id)
{
int status = 0;
- struct ath10k_htc *htc = ar->htc;
+ struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *hdr;
struct ath10k_htc_ep *ep;
u16 payload_len;
@@ -751,8 +752,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
- ath10k_warn("HTC Service %s does not allocate target credits\n",
- htc_service_name(conn_req->service_id));
+ ath10k_dbg(ATH10K_DBG_HTC,
+ "HTC Service %s does not allocate target credits\n",
+ htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
@@ -947,7 +949,7 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
struct ath10k_htc_ep *ep;
spin_lock_bh(&htc->tx_lock);
- htc->stopping = true;
+ htc->stopped = true;
spin_unlock_bh(&htc->tx_lock);
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
@@ -956,26 +958,18 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
}
ath10k_hif_stop(htc->ar);
- ath10k_htc_reset_endpoint_states(htc);
}
/* registered target arrival callback from the HIF layer */
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
- struct ath10k_htc_ops *htc_ops)
+int ath10k_htc_init(struct ath10k *ar)
{
struct ath10k_hif_cb htc_callbacks;
struct ath10k_htc_ep *ep = NULL;
- struct ath10k_htc *htc = NULL;
-
- /* FIXME: use struct ath10k instead */
- htc = kzalloc(sizeof(struct ath10k_htc), GFP_KERNEL);
- if (!htc)
- return ERR_PTR(-ENOMEM);
+ struct ath10k_htc *htc = &ar->htc;
spin_lock_init(&htc->tx_lock);
- memcpy(&htc->htc_ops, htc_ops, sizeof(struct ath10k_htc_ops));
-
+ htc->stopped = false;
ath10k_htc_reset_endpoint_states(htc);
/* setup HIF layer callbacks */
@@ -986,15 +980,10 @@ struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
/* Get HIF default pipe for HTC message exchange */
ep = &htc->endpoint[ATH10K_HTC_EP_0];
- ath10k_hif_init(ar, &htc_callbacks);
+ ath10k_hif_set_callbacks(ar, &htc_callbacks);
ath10k_hif_get_default_pipe(ar, &ep->ul_pipe_id, &ep->dl_pipe_id);
init_completion(&htc->ctl_resp);
- return htc;
-}
-
-void ath10k_htc_destroy(struct ath10k_htc *htc)
-{
- kfree(htc);
+ return 0;
}
diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
index fa45844b59f..e1dd8c76185 100644
--- a/drivers/net/wireless/ath/ath10k/htc.h
+++ b/drivers/net/wireless/ath/ath10k/htc.h
@@ -335,7 +335,7 @@ struct ath10k_htc {
struct ath10k *ar;
struct ath10k_htc_ep endpoint[ATH10K_HTC_EP_COUNT];
- /* protects endpoint and stopping fields */
+ /* protects endpoint and stopped fields */
spinlock_t tx_lock;
struct ath10k_htc_ops htc_ops;
@@ -349,11 +349,10 @@ struct ath10k_htc {
struct ath10k_htc_svc_tx_credits service_tx_alloc[ATH10K_HTC_EP_COUNT];
int target_credit_size;
- bool stopping;
+ bool stopped;
};
-struct ath10k_htc *ath10k_htc_create(struct ath10k *ar,
- struct ath10k_htc_ops *htc_ops);
+int ath10k_htc_init(struct ath10k *ar);
int ath10k_htc_wait_target(struct ath10k_htc *htc);
int ath10k_htc_start(struct ath10k_htc *htc);
int ath10k_htc_connect_service(struct ath10k_htc *htc,
@@ -362,7 +361,6 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
int ath10k_htc_send(struct ath10k_htc *htc, enum ath10k_htc_ep_id eid,
struct sk_buff *packet);
void ath10k_htc_stop(struct ath10k_htc *htc);
-void ath10k_htc_destroy(struct ath10k_htc *htc);
struct sk_buff *ath10k_htc_alloc_skb(int size);
#endif
diff --git a/drivers/net/wireless/ath/ath10k/htt.c b/drivers/net/wireless/ath/ath10k/htt.c
index 185a5468a2f..39342c5cfcb 100644
--- a/drivers/net/wireless/ath/ath10k/htt.c
+++ b/drivers/net/wireless/ath/ath10k/htt.c
@@ -16,6 +16,7 @@
*/
#include <linux/slab.h>
+#include <linux/if_ether.h>
#include "htt.h"
#include "core.h"
@@ -36,7 +37,7 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_DATA_MSG;
- status = ath10k_htc_connect_service(htt->ar->htc, &conn_req,
+ status = ath10k_htc_connect_service(&htt->ar->htc, &conn_req,
&conn_resp);
if (status)
@@ -47,15 +48,11 @@ static int ath10k_htt_htc_attach(struct ath10k_htt *htt)
return 0;
}
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
+int ath10k_htt_attach(struct ath10k *ar)
{
- struct ath10k_htt *htt;
+ struct ath10k_htt *htt = &ar->htt;
int ret;
- htt = kzalloc(sizeof(*htt), GFP_KERNEL);
- if (!htt)
- return NULL;
-
htt->ar = ar;
htt->max_throughput_mbps = 800;
@@ -65,8 +62,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
* since ath10k_htt_rx_attach involves sending a rx ring configure
* message to the target.
*/
- if (ath10k_htt_htc_attach(htt))
+ ret = ath10k_htt_htc_attach(htt);
+ if (ret) {
+ ath10k_err("could not attach htt htc (%d)\n", ret);
goto err_htc_attach;
+ }
ret = ath10k_htt_tx_attach(htt);
if (ret) {
@@ -74,8 +74,11 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
goto err_htc_attach;
}
- if (ath10k_htt_rx_attach(htt))
+ ret = ath10k_htt_rx_attach(htt);
+ if (ret) {
+ ath10k_err("could not attach htt rx (%d)\n", ret);
goto err_rx_attach;
+ }
/*
* Prefetch enough data to satisfy target
@@ -89,13 +92,12 @@ struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar)
8 + /* llc snap */
2; /* ip4 dscp or ip6 priority */
- return htt;
+ return 0;
err_rx_attach:
ath10k_htt_tx_detach(htt);
err_htc_attach:
- kfree(htt);
- return NULL;
+ return ret;
}
#define HTT_TARGET_VERSION_TIMEOUT_HZ (3*HZ)
@@ -148,5 +150,4 @@ void ath10k_htt_detach(struct ath10k_htt *htt)
{
ath10k_htt_rx_detach(htt);
ath10k_htt_tx_detach(htt);
- kfree(htt);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index a7a7aa04053..318be4629cd 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -20,7 +20,6 @@
#include <linux/bug.h>
-#include "core.h"
#include "htc.h"
#include "rx_desc.h"
@@ -1317,7 +1316,7 @@ struct htt_rx_desc {
#define HTT_LOG2_MAX_CACHE_LINE_SIZE 7 /* 2^7 = 128 */
#define HTT_MAX_CACHE_LINE_SIZE_MASK ((1 << HTT_LOG2_MAX_CACHE_LINE_SIZE) - 1)
-struct ath10k_htt *ath10k_htt_attach(struct ath10k *ar);
+int ath10k_htt_attach(struct ath10k *ar);
int ath10k_htt_attach_target(struct ath10k_htt *htt);
void ath10k_htt_detach(struct ath10k_htt *htt);
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index de058d7adca..e784c40b904 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -15,6 +15,7 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "core.h"
#include "htc.h"
#include "htt.h"
#include "txrx.h"
@@ -803,6 +804,37 @@ static bool ath10k_htt_rx_has_fcs_err(struct sk_buff *skb)
return false;
}
+static int ath10k_htt_rx_get_csum_state(struct sk_buff *skb)
+{
+ struct htt_rx_desc *rxd;
+ u32 flags, info;
+ bool is_ip4, is_ip6;
+ bool is_tcp, is_udp;
+ bool ip_csum_ok, tcpudp_csum_ok;
+
+ rxd = (void *)skb->data - sizeof(*rxd);
+ flags = __le32_to_cpu(rxd->attention.flags);
+ info = __le32_to_cpu(rxd->msdu_start.info1);
+
+ is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
+ is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
+ is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
+ is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
+ ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
+ tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
+
+ if (!is_ip4 && !is_ip6)
+ return CHECKSUM_NONE;
+ if (!is_tcp && !is_udp)
+ return CHECKSUM_NONE;
+ if (!ip_csum_ok)
+ return CHECKSUM_NONE;
+ if (!tcpudp_csum_ok)
+ return CHECKSUM_NONE;
+
+ return CHECKSUM_UNNECESSARY;
+}
+
static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
@@ -814,6 +846,7 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
u8 *fw_desc;
int i, j;
int ret;
+ int ip_summed;
memset(&info, 0, sizeof(info));
@@ -888,6 +921,11 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
continue;
}
+ /* The skb is not yet processed and it may be
+ * reallocated. Since the offload is in the original
+ * skb extract the checksum now and assign it later */
+ ip_summed = ath10k_htt_rx_get_csum_state(msdu_head);
+
info.skb = msdu_head;
info.fcs_err = ath10k_htt_rx_has_fcs_err(msdu_head);
info.signal = ATH10K_DEFAULT_NOISE_FLOOR;
@@ -913,6 +951,8 @@ static void ath10k_htt_rx_handler(struct ath10k_htt *htt,
if (ath10k_htt_rx_hdr_is_amsdu((void *)info.skb->data))
ath10k_dbg(ATH10K_DBG_HTT, "htt mpdu is amsdu\n");
+ info.skb->ip_summed = ip_summed;
+
ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "htt mpdu: ",
info.skb->data, info.skb->len);
ath10k_process_rx(htt->ar, &info);
@@ -979,6 +1019,7 @@ static void ath10k_htt_rx_frag_handler(struct ath10k_htt *htt,
info.status = HTT_RX_IND_MPDU_STATUS_OK;
info.encrypt_type = MS(__le32_to_cpu(rxd->mpdu_start.info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
+ info.skb->ip_summed = ath10k_htt_rx_get_csum_state(info.skb);
if (tkip_mic_err) {
ath10k_warn("tkip mic error\n");
@@ -1036,7 +1077,7 @@ end:
void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
- struct ath10k_htt *htt = ar->htt;
+ struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
/* confirm alignment */
diff --git a/drivers/net/wireless/ath/ath10k/htt_tx.c b/drivers/net/wireless/ath/ath10k/htt_tx.c
index ef79106db24..656c2546b29 100644
--- a/drivers/net/wireless/ath/ath10k/htt_tx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_tx.c
@@ -92,7 +92,7 @@ int ath10k_htt_tx_attach(struct ath10k_htt *htt)
/* At the beginning free queue number should hint us the maximum
* queue length */
- pipe = htt->ar->htc->endpoint[htt->eid].ul_pipe_id;
+ pipe = htt->ar->htc.endpoint[htt->eid].ul_pipe_id;
htt->max_num_pending_tx = ath10k_hif_get_free_queue_number(htt->ar,
pipe);
@@ -153,7 +153,7 @@ void ath10k_htt_tx_detach(struct ath10k_htt *htt)
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
- struct ath10k_htt *htt = ar->htt;
+ struct ath10k_htt *htt = &ar->htt;
if (skb_cb->htt.is_conf) {
dev_kfree_skb_any(skb);
@@ -194,7 +194,7 @@ int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt)
ATH10K_SKB_CB(skb)->htt.is_conf = true;
- ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
@@ -281,7 +281,7 @@ int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt)
ATH10K_SKB_CB(skb)->htt.is_conf = true;
- ret = ath10k_htc_send(htt->ar->htc, htt->eid, skb);
+ ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
@@ -346,7 +346,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.refcount = 2;
skb_cb->htt.msdu = msdu;
- res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
@@ -465,6 +465,8 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 = 0;
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
+ flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
frags_paddr = ATH10K_SKB_CB(txfrag)->paddr;
@@ -486,7 +488,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txfrag = txfrag;
skb_cb->htt.msdu = msdu;
- res = ath10k_htc_send(htt->ar->htc, htt->eid, txdesc);
+ res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
if (res)
goto err;
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index da5c333d0d4..cf2ba4d850c 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -20,6 +20,7 @@
#include <net/mac80211.h>
#include <linux/etherdevice.h>
+#include "hif.h"
#include "core.h"
#include "debug.h"
#include "wmi.h"
@@ -43,6 +44,8 @@ static int ath10k_send_key(struct ath10k_vif *arvif,
.macaddr = macaddr,
};
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
arg.key_flags = WMI_KEY_PAIRWISE;
else
@@ -87,6 +90,8 @@ static int ath10k_install_key(struct ath10k_vif *arvif,
struct ath10k *ar = arvif->ar;
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
INIT_COMPLETION(ar->install_key_done);
ret = ath10k_send_key(arvif, key, cmd, macaddr);
@@ -327,6 +332,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
return 0;
}
+static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
+{
+ if (value != 0xFFFFFFFF)
+ value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
+ ATH10K_RTS_MAX);
+
+ return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_RTS_THRESHOLD,
+ value);
+}
+
+static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
+{
+ if (value != 0xFFFFFFFF)
+ value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
+ ATH10K_FRAGMT_THRESHOLD_MIN,
+ ATH10K_FRAGMT_THRESHOLD_MAX);
+
+ return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
+ value);
+}
+
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
{
int ret;
@@ -364,6 +392,20 @@ static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
spin_unlock_bh(&ar->data_lock);
}
+static void ath10k_peer_cleanup_all(struct ath10k *ar)
+{
+ struct ath10k_peer *peer, *tmp;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ spin_lock_bh(&ar->data_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
+ list_del(&peer->list);
+ kfree(peer);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
/************************/
/* Interface management */
/************************/
@@ -372,6 +414,8 @@ static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = wait_for_completion_timeout(&ar->vdev_setup_done,
ATH10K_VDEV_SETUP_TIMEOUT_HZ);
if (ret == 0)
@@ -605,6 +649,8 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (!info->enable_beacon) {
ath10k_vdev_stop(arvif);
return;
@@ -631,6 +677,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (!info->ibss_joined) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, self_peer);
if (ret)
@@ -680,6 +728,8 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
enum wmi_sta_ps_mode psmode;
int ret;
+ lockdep_assert_held(&arvif->ar->conf_mutex);
+
if (vif->type != NL80211_IFTYPE_STATION)
return;
@@ -722,6 +772,8 @@ static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
+ lockdep_assert_held(&ar->conf_mutex);
+
memcpy(arg->addr, sta->addr, ETH_ALEN);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = sta->aid;
@@ -764,6 +816,8 @@ static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
const u8 *rsnie = NULL;
const u8 *wpaie = NULL;
+ lockdep_assert_held(&ar->conf_mutex);
+
bss = cfg80211_get_bss(ar->hw->wiphy, ar->hw->conf.chandef.chan,
info->bssid, NULL, 0, 0, 0);
if (bss) {
@@ -804,6 +858,8 @@ static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
u32 ratemask;
int i;
+ lockdep_assert_held(&ar->conf_mutex);
+
sband = ar->hw->wiphy->bands[ar->hw->conf.chandef.chan->band];
ratemask = sta->supp_rates[ar->hw->conf.chandef.chan->band];
rates = sband->bitrates;
@@ -827,6 +883,8 @@ static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
int smps;
int i, n;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (!ht_cap->ht_supported)
return;
@@ -905,6 +963,8 @@ static void ath10k_peer_assoc_h_qos_ap(struct ath10k *ar,
u32 uapsd = 0;
u32 max_sp = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (sta->wme)
arg->peer_flags |= WMI_PEER_QOS;
@@ -1056,6 +1116,8 @@ static int ath10k_peer_assoc(struct ath10k *ar,
{
struct wmi_peer_assoc_complete_arg arg;
+ lockdep_assert_held(&ar->conf_mutex);
+
memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
@@ -1079,6 +1141,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
struct ieee80211_sta *ap_sta;
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
@@ -1119,6 +1183,8 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
/*
* For some reason, calling VDEV-DOWN before VDEV-STOP
* makes the FW to send frames via HTT after disassociation.
@@ -1152,6 +1218,8 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
if (ret) {
ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
@@ -1172,6 +1240,8 @@ static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
{
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
ret = ath10k_clear_peer_keys(arvif, sta->addr);
if (ret) {
ath10k_warn("could not clear all peer wep keys (%d)\n", ret);
@@ -1198,6 +1268,8 @@ static int ath10k_update_channel_list(struct ath10k *ar)
int ret;
int i;
+ lockdep_assert_held(&ar->conf_mutex);
+
bands = hw->wiphy->bands;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
if (!bands[band])
@@ -1276,21 +1348,19 @@ static int ath10k_update_channel_list(struct ath10k *ar)
return ret;
}
-static void ath10k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
+static void ath10k_regd_update(struct ath10k *ar)
{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct reg_dmn_pair_mapping *regpair;
- struct ath10k *ar = hw->priv;
int ret;
- ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+ lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_update_channel_list(ar);
if (ret)
ath10k_warn("could not update channel list (%d)\n", ret);
regpair = ar->ath_common.regulatory.regpair;
+
/* Target allows setting up per-band regdomain but ath_common provides
* a combined one only */
ret = ath10k_wmi_pdev_set_regdomain(ar,
@@ -1303,6 +1373,20 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
ath10k_warn("could not set pdev regdomain (%d)\n", ret);
}
+static void ath10k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath10k *ar = hw->priv;
+
+ ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
+
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON)
+ ath10k_regd_update(ar);
+ mutex_unlock(&ar->conf_mutex);
+}
+
/***************/
/* TX handlers */
/***************/
@@ -1322,9 +1406,9 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
return;
qos_ctl = ieee80211_get_qos_ctl(hdr);
- memmove(qos_ctl, qos_ctl + IEEE80211_QOS_CTL_LEN,
- skb->len - ieee80211_hdrlen(hdr->frame_control));
- skb_trim(skb, skb->len - IEEE80211_QOS_CTL_LEN);
+ memmove(skb->data + IEEE80211_QOS_CTL_LEN,
+ skb->data, (void *)qos_ctl - (void *)skb->data);
+ skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
@@ -1397,15 +1481,15 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
int ret;
if (ieee80211_is_mgmt(hdr->frame_control))
- ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else if (ieee80211_is_nullfunc(hdr->frame_control))
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
* those frames when it detects link/beacon loss and depends on
* the tx status to be correct. */
- ret = ath10k_htt_mgmt_tx(ar->htt, skb);
+ ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else
- ret = ath10k_htt_tx(ar->htt, skb);
+ ret = ath10k_htt_tx(&ar->htt, skb);
if (ret) {
ath10k_warn("tx failed (%d). dropping packet.\n", ret);
@@ -1552,6 +1636,10 @@ static int ath10k_abort_scan(struct ath10k *ar)
ret = ath10k_wmi_stop_scan(ar, &arg);
if (ret) {
ath10k_warn("could not submit wmi stop scan (%d)\n", ret);
+ spin_lock_bh(&ar->data_lock);
+ ar->scan.in_progress = false;
+ ath10k_offchan_tx_purge(ar);
+ spin_unlock_bh(&ar->data_lock);
return -EIO;
}
@@ -1645,10 +1733,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
- ath10k_tx_h_qos_workaround(hw, control, skb);
- ath10k_tx_h_update_wep_key(skb);
- ath10k_tx_h_add_p2p_noa_ie(ar, skb);
- ath10k_tx_h_seq_no(skb);
+ /* it makes no sense to process injected frames like that */
+ if (info->control.vif &&
+ info->control.vif->type != NL80211_IFTYPE_MONITOR) {
+ ath10k_tx_h_qos_workaround(hw, control, skb);
+ ath10k_tx_h_update_wep_key(skb);
+ ath10k_tx_h_add_p2p_noa_ie(ar, skb);
+ ath10k_tx_h_seq_no(skb);
+ }
memset(ATH10K_SKB_CB(skb), 0, sizeof(*ATH10K_SKB_CB(skb)));
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
@@ -1673,10 +1765,57 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/*
* Initialize various parameters with default vaules.
*/
+void ath10k_halt(struct ath10k *ar)
+{
+ lockdep_assert_held(&ar->conf_mutex);
+
+ del_timer_sync(&ar->scan.timeout);
+ ath10k_offchan_tx_purge(ar);
+ ath10k_peer_cleanup_all(ar);
+ ath10k_core_stop(ar);
+ ath10k_hif_power_down(ar);
+
+ spin_lock_bh(&ar->data_lock);
+ if (ar->scan.in_progress) {
+ del_timer(&ar->scan.timeout);
+ ar->scan.in_progress = false;
+ ieee80211_scan_completed(ar->hw, true);
+ }
+ spin_unlock_bh(&ar->data_lock);
+}
+
static int ath10k_start(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- int ret;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state != ATH10K_STATE_OFF &&
+ ar->state != ATH10K_STATE_RESTARTING) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ ret = ath10k_hif_power_up(ar);
+ if (ret) {
+ ath10k_err("could not init hif (%d)\n", ret);
+ ar->state = ATH10K_STATE_OFF;
+ goto exit;
+ }
+
+ ret = ath10k_core_start(ar);
+ if (ret) {
+ ath10k_err("could not init core (%d)\n", ret);
+ ath10k_hif_power_down(ar);
+ ar->state = ATH10K_STATE_OFF;
+ goto exit;
+ }
+
+ if (ar->state == ATH10K_STATE_OFF)
+ ar->state = ATH10K_STATE_ON;
+ else if (ar->state == ATH10K_STATE_RESTARTING)
+ ar->state = ATH10K_STATE_RESTARTED;
ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
if (ret)
@@ -1688,6 +1827,10 @@ static int ath10k_start(struct ieee80211_hw *hw)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
+ ath10k_regd_update(ar);
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
return 0;
}
@@ -1695,18 +1838,48 @@ static void ath10k_stop(struct ieee80211_hw *hw)
{
struct ath10k *ar = hw->priv;
- /* avoid leaks in case FW never confirms scan for offchannel */
+ mutex_lock(&ar->conf_mutex);
+ if (ar->state == ATH10K_STATE_ON ||
+ ar->state == ATH10K_STATE_RESTARTED ||
+ ar->state == ATH10K_STATE_WEDGED)
+ ath10k_halt(ar);
+
+ ar->state = ATH10K_STATE_OFF;
+ mutex_unlock(&ar->conf_mutex);
+
cancel_work_sync(&ar->offchan_tx_work);
- ath10k_offchan_tx_purge(ar);
+ cancel_work_sync(&ar->restart_work);
}
-static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+static void ath10k_config_ps(struct ath10k *ar)
{
struct ath10k_generic_iter ar_iter;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar->state == ATH10K_STATE_RESTARTED)
+ return;
+
+ memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
+ ar_iter.ar = ar;
+
+ ieee80211_iterate_active_interfaces_atomic(
+ ar->hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_ps_iter, &ar_iter);
+
+ if (ar_iter.ret)
+ ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
+}
+
+static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
+{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
- u32 flags;
mutex_lock(&ar->conf_mutex);
@@ -1718,18 +1891,8 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&ar->data_lock);
}
- if (changed & IEEE80211_CONF_CHANGE_PS) {
- memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
- ar_iter.ar = ar;
- flags = IEEE80211_IFACE_ITER_RESUME_ALL;
-
- ieee80211_iterate_active_interfaces_atomic(hw,
- flags,
- ath10k_ps_iter,
- &ar_iter);
-
- ret = ar_iter.ret;
- }
+ if (changed & IEEE80211_CONF_CHANGE_PS)
+ ath10k_config_ps(ar);
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR)
@@ -1738,6 +1901,7 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
ret = ath10k_monitor_destroy(ar);
}
+ ath10k_wmi_flush_tx(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
@@ -1761,6 +1925,8 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
+ memset(arvif, 0, sizeof(*arvif));
+
arvif->ar = ar;
arvif->vif = vif;
@@ -1859,6 +2025,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
}
+ ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
+ if (ret)
+ ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+
+ ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
+ if (ret)
+ ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
+ arvif->vdev_id, ret);
+
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
@@ -2164,6 +2340,8 @@ static int ath10k_hw_scan(struct ieee80211_hw *hw,
arg.ssids[i].len = req->ssids[i].ssid_len;
arg.ssids[i].ssid = req->ssids[i].ssid;
}
+ } else {
+ arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
}
if (req->n_channels) {
@@ -2363,6 +2541,8 @@ static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
u32 value = 0;
int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
return 0;
@@ -2558,11 +2738,16 @@ static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
- rts = min_t(u32, rts, ATH10K_RTS_MAX);
+ lockdep_assert_held(&arvif->ar->conf_mutex);
- ar_iter->ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_RTS_THRESHOLD,
- rts);
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+ return;
+
+ ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
@@ -2581,8 +2766,9 @@ static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
ar_iter.ar = ar;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- ath10k_set_rts_iter, &ar_iter);
+ ieee80211_iterate_active_interfaces_atomic(
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_set_rts_iter, &ar_iter);
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
@@ -2593,17 +2779,17 @@ static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath10k_generic_iter *ar_iter = data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
- int ret;
- frag = clamp_t(u32, frag,
- ATH10K_FRAGMT_THRESHOLD_MIN,
- ATH10K_FRAGMT_THRESHOLD_MAX);
+ lockdep_assert_held(&arvif->ar->conf_mutex);
- ret = ath10k_wmi_vdev_set_param(ar_iter->ar, arvif->vdev_id,
- WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
- frag);
+ /* During HW reconfiguration mac80211 reports all interfaces that were
+ * running until reconfiguration was started. Since FW doesn't have any
+ * vdevs at this point we must not iterate over this interface list.
+ * This setting will be updated upon add_interface(). */
+ if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
+ return;
- ar_iter->ret = ret;
+ ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
@@ -2622,8 +2808,9 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
ar_iter.ar = ar;
mutex_lock(&ar->conf_mutex);
- ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
- ath10k_set_frag_iter, &ar_iter);
+ ieee80211_iterate_active_interfaces_atomic(
+ hw, IEEE80211_IFACE_ITER_NORMAL,
+ ath10k_set_frag_iter, &ar_iter);
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
@@ -2632,6 +2819,7 @@ static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
{
struct ath10k *ar = hw->priv;
+ bool skip;
int ret;
/* mac80211 doesn't care if we really xmit queued frames or not
@@ -2639,16 +2827,29 @@ static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
if (drop)
return;
- ret = wait_event_timeout(ar->htt->empty_tx_wq, ({
+ mutex_lock(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED)
+ goto skip;
+
+ ret = wait_event_timeout(ar->htt.empty_tx_wq, ({
bool empty;
- spin_lock_bh(&ar->htt->tx_lock);
- empty = bitmap_empty(ar->htt->used_msdu_ids,
- ar->htt->max_num_pending_tx);
- spin_unlock_bh(&ar->htt->tx_lock);
- (empty);
+
+ spin_lock_bh(&ar->htt.tx_lock);
+ empty = bitmap_empty(ar->htt.used_msdu_ids,
+ ar->htt.max_num_pending_tx);
+ spin_unlock_bh(&ar->htt.tx_lock);
+
+ skip = (ar->state == ATH10K_STATE_WEDGED);
+
+ (empty || skip);
}), ATH10K_FLUSH_TIMEOUT_HZ);
- if (ret <= 0)
+
+ if (ret <= 0 || skip)
ath10k_warn("tx not flushed\n");
+
+skip:
+ mutex_unlock(&ar->conf_mutex);
}
/* TODO: Implement this function properly
@@ -2660,6 +2861,118 @@ static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
return 1;
}
+#ifdef CONFIG_PM
+static int ath10k_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ar->is_target_paused = false;
+
+ ret = ath10k_wmi_pdev_suspend_target(ar);
+ if (ret) {
+ ath10k_warn("could not suspend target (%d)\n", ret);
+ return 1;
+ }
+
+ ret = wait_event_interruptible_timeout(ar->event_queue,
+ ar->is_target_paused == true,
+ 1 * HZ);
+ if (ret < 0) {
+ ath10k_warn("suspend interrupted (%d)\n", ret);
+ goto resume;
+ } else if (ret == 0) {
+ ath10k_warn("suspend timed out - target pause event never came\n");
+ goto resume;
+ }
+
+ ret = ath10k_hif_suspend(ar);
+ if (ret) {
+ ath10k_warn("could not suspend hif (%d)\n", ret);
+ goto resume;
+ }
+
+ return 0;
+resume:
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret)
+ ath10k_warn("could not resume target (%d)\n", ret);
+ return 1;
+}
+
+static int ath10k_resume(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+ int ret;
+
+ ret = ath10k_hif_resume(ar);
+ if (ret) {
+ ath10k_warn("could not resume hif (%d)\n", ret);
+ return 1;
+ }
+
+ ret = ath10k_wmi_pdev_resume_target(ar);
+ if (ret) {
+ ath10k_warn("could not resume target (%d)\n", ret);
+ return 1;
+ }
+
+ return 0;
+}
+#endif
+
+static void ath10k_restart_complete(struct ieee80211_hw *hw)
+{
+ struct ath10k *ar = hw->priv;
+
+ mutex_lock(&ar->conf_mutex);
+
+ /* If device failed to restart it will be in a different state, e.g.
+ * ATH10K_STATE_WEDGED */
+ if (ar->state == ATH10K_STATE_RESTARTED) {
+ ath10k_info("device successfully recovered\n");
+ ar->state = ATH10K_STATE_ON;
+ }
+
+ mutex_unlock(&ar->conf_mutex);
+}
+
+static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
+ struct survey_info *survey)
+{
+ struct ath10k *ar = hw->priv;
+ struct ieee80211_supported_band *sband;
+ struct survey_info *ar_survey = &ar->survey[idx];
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+
+ if (!sband)
+ sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ];
+
+ if (!sband || idx >= sband->n_channels) {
+ ret = -ENOENT;
+ goto exit;
+ }
+
+ spin_lock_bh(&ar->data_lock);
+ memcpy(survey, ar_survey, sizeof(*survey));
+ spin_unlock_bh(&ar->data_lock);
+
+ survey->channel = &sband->channels[idx];
+
+exit:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static const struct ieee80211_ops ath10k_ops = {
.tx = ath10k_tx,
.start = ath10k_start,
@@ -2680,6 +2993,12 @@ static const struct ieee80211_ops ath10k_ops = {
.set_frag_threshold = ath10k_set_frag_threshold,
.flush = ath10k_flush,
.tx_last_beacon = ath10k_tx_last_beacon,
+ .restart_complete = ath10k_restart_complete,
+ .get_survey = ath10k_get_survey,
+#ifdef CONFIG_PM
+ .suspend = ath10k_suspend,
+ .resume = ath10k_resume,
+#endif
};
#define RATETAB_ENT(_rate, _rateid, _flags) { \
@@ -2797,9 +3116,15 @@ static const struct ieee80211_iface_limit ath10k_if_limits[] = {
.max = 8,
.types = BIT(NL80211_IFTYPE_STATION)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
- | BIT(NL80211_IFTYPE_P2P_GO)
- | BIT(NL80211_IFTYPE_AP)
- }
+ },
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_P2P_GO)
+ },
+ {
+ .max = 7,
+ .types = BIT(NL80211_IFTYPE_AP)
+ },
};
static const struct ieee80211_iface_combination ath10k_if_comb = {
@@ -2814,19 +3139,18 @@ static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
+ int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
- /* FIXME: check dynamically how many streams board supports */
- mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 |
- IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
- IEEE80211_VHT_MCS_NOT_SUPPORTED << 14;
+ mcs_map = 0;
+ for (i = 0; i < 8; i++) {
+ if (i < ar->num_rf_chains)
+ mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
+ else
+ mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
+ }
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
@@ -2889,7 +3213,7 @@ static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
- for (i = 0; i < WMI_MAX_SPATIAL_STREAM; i++)
+ for (i = 0; i < ar->num_rf_chains; i++)
ht_cap.mcs.rx_mask[i] = 0xFF;
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
@@ -2948,8 +3272,10 @@ int ath10k_mac_register(struct ath10k *ar)
channels = kmemdup(ath10k_2ghz_channels,
sizeof(ath10k_2ghz_channels),
GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
+ if (!channels) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
@@ -2968,11 +3294,8 @@ int ath10k_mac_register(struct ath10k *ar)
sizeof(ath10k_5ghz_channels),
GFP_KERNEL);
if (!channels) {
- if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
- band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
- kfree(band->channels);
- }
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free;
}
band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
@@ -3032,29 +3355,36 @@ int ath10k_mac_register(struct ath10k *ar)
ar->hw->wiphy->iface_combinations = &ath10k_if_comb;
ar->hw->wiphy->n_iface_combinations = 1;
+ ar->hw->netdev_features = NETIF_F_HW_CSUM;
+
ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
ath10k_reg_notifier);
if (ret) {
ath10k_err("Regulatory initialization failed\n");
- return ret;
+ goto err_free;
}
ret = ieee80211_register_hw(ar->hw);
if (ret) {
ath10k_err("ieee80211 registration failed: %d\n", ret);
- return ret;
+ goto err_free;
}
if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
ret = regulatory_hint(ar->hw->wiphy,
ar->ath_common.regulatory.alpha2);
if (ret)
- goto exit;
+ goto err_unregister;
}
return 0;
-exit:
+
+err_unregister:
ieee80211_unregister_hw(ar->hw);
+err_free:
+ kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels);
+ kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels);
+
return ret;
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.h b/drivers/net/wireless/ath/ath10k/mac.h
index 27fc92e5882..6fce9bfb19a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.h
+++ b/drivers/net/wireless/ath/ath10k/mac.h
@@ -34,6 +34,7 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
+void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 33af4672c90..e2f9ef50b1b 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -32,7 +32,7 @@
#include "ce.h"
#include "pci.h"
-unsigned int ath10k_target_ps;
+static unsigned int ath10k_target_ps;
module_param(ath10k_target_ps, uint, 0644);
MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
@@ -54,6 +54,10 @@ static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
int num);
static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
static void ath10k_pci_stop_ce(struct ath10k *ar);
+static void ath10k_pci_device_reset(struct ath10k *ar);
+static int ath10k_pci_reset_target(struct ath10k *ar);
+static int ath10k_pci_start_intr(struct ath10k *ar);
+static void ath10k_pci_stop_intr(struct ath10k *ar);
static const struct ce_attr host_ce_config_wlan[] = {
/* host->target HTC control and raw streams */
@@ -718,6 +722,8 @@ static void ath10k_pci_hif_dump_area(struct ath10k *ar)
reg_dump_values[i + 1],
reg_dump_values[i + 2],
reg_dump_values[i + 3]);
+
+ ieee80211_queue_work(ar->hw, &ar->restart_work);
}
static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
@@ -744,8 +750,8 @@ static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
ath10k_ce_per_engine_service(ar, pipe);
}
-static void ath10k_pci_hif_post_init(struct ath10k *ar,
- struct ath10k_hif_cb *callbacks)
+static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
+ struct ath10k_hif_cb *callbacks)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -1250,10 +1256,25 @@ static void ath10k_pci_ce_deinit(struct ath10k *ar)
}
}
+static void ath10k_pci_disable_irqs(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int i;
+
+ for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
+ disable_irq(ar_pci->pdev->irq + i);
+}
+
static void ath10k_pci_hif_stop(struct ath10k *ar)
{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
+ /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
+ * by ath10k_pci_start_intr(). */
+ ath10k_pci_disable_irqs(ar);
+
ath10k_pci_stop_ce(ar);
/* At this point, asynchronous threads are stopped, the target should
@@ -1263,7 +1284,8 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_process_ce(ar);
ath10k_pci_cleanup_ce(ar);
ath10k_pci_buffer_cleanup(ar);
- ath10k_pci_ce_deinit(ar);
+
+ ar_pci->started = 0;
}
static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
@@ -1735,6 +1757,124 @@ static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
ath10k_pci_sleep(ar);
}
+static int ath10k_pci_hif_power_up(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
+
+ ret = ath10k_pci_start_intr(ar);
+ if (ret) {
+ ath10k_err("could not start interrupt handling (%d)\n", ret);
+ goto err;
+ }
+
+ /*
+ * Bring the target up cleanly.
+ *
+ * The target may be in an undefined state with an AUX-powered Target
+ * and a Host in WoW mode. If the Host crashes, loses power, or is
+ * restarted (without unloading the driver) then the Target is left
+ * (aux) powered and running. On a subsequent driver load, the Target
+ * is in an unexpected state. We try to catch that here in order to
+ * reset the Target and retry the probe.
+ */
+ ath10k_pci_device_reset(ar);
+
+ ret = ath10k_pci_reset_target(ar);
+ if (ret)
+ goto err_irq;
+
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ /* Force AWAKE forever */
+ ath10k_do_pci_wake(ar);
+
+ ret = ath10k_pci_ce_init(ar);
+ if (ret)
+ goto err_ps;
+
+ ret = ath10k_pci_init_config(ar);
+ if (ret)
+ goto err_ce;
+
+ ret = ath10k_pci_wake_target_cpu(ar);
+ if (ret) {
+ ath10k_err("could not wake up target CPU (%d)\n", ret);
+ goto err_ce;
+ }
+
+ return 0;
+
+err_ce:
+ ath10k_pci_ce_deinit(ar);
+err_ps:
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ ath10k_do_pci_sleep(ar);
+err_irq:
+ ath10k_pci_stop_intr(ar);
+err:
+ return ret;
+}
+
+static void ath10k_pci_hif_power_down(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ ath10k_pci_stop_intr(ar);
+
+ ath10k_pci_ce_deinit(ar);
+ if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
+ ath10k_do_pci_sleep(ar);
+}
+
+#ifdef CONFIG_PM
+
+#define ATH10K_PCI_PM_CONTROL 0x44
+
+static int ath10k_pci_hif_suspend(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0x3) {
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ (val & 0xffffff00) | 0x03);
+ }
+
+ return 0;
+}
+
+static int ath10k_pci_hif_resume(struct ath10k *ar)
+{
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct pci_dev *pdev = ar_pci->pdev;
+ u32 val;
+
+ pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
+
+ if ((val & 0x000000ff) != 0) {
+ pci_restore_state(pdev);
+ pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
+ val & 0xffffff00);
+ /*
+ * Suspend/Resume resets the PCI configuration space,
+ * so we have to re-disable the RETRY_TIMEOUT register (0x41)
+ * to keep PCI Tx retries from interfering with C3 CPU state
+ */
+ pci_read_config_dword(pdev, 0x40, &val);
+
+ if ((val & 0x0000ff00) != 0)
+ pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+ }
+
+ return 0;
+}
+#endif
+
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.send_head = ath10k_pci_hif_send_head,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
@@ -1743,8 +1883,14 @@ static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
.send_complete_check = ath10k_pci_hif_send_complete_check,
- .init = ath10k_pci_hif_post_init,
+ .set_callbacks = ath10k_pci_hif_set_callbacks,
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
+ .power_up = ath10k_pci_hif_power_up,
+ .power_down = ath10k_pci_hif_power_down,
+#ifdef CONFIG_PM
+ .suspend = ath10k_pci_hif_suspend,
+ .resume = ath10k_pci_hif_resume,
+#endif
};
static void ath10k_pci_ce_tasklet(unsigned long ptr)
@@ -1872,8 +2018,13 @@ static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
ath10k_pci_msi_fw_handler,
IRQF_SHARED, "ath10k_pci", ar);
- if (ret)
+ if (ret) {
+ ath10k_warn("request_irq(%d) failed %d\n",
+ ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
+
+ pci_disable_msi(ar_pci->pdev);
return ret;
+ }
for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
ret = request_irq(ar_pci->pdev->irq + i,
@@ -2059,9 +2210,9 @@ static int ath10k_pci_reset_target(struct ath10k *ar)
return 0;
}
-static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
+static void ath10k_pci_device_reset(struct ath10k *ar)
{
- struct ath10k *ar = ar_pci->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *mem = ar_pci->mem;
int i;
u32 val;
@@ -2118,9 +2269,12 @@ static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
case ATH10K_PCI_FEATURE_MSI_X:
ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
break;
- case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
+ case ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND:
ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
break;
+ case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
+ ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
+ break;
}
}
}
@@ -2145,7 +2299,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
switch (pci_dev->device) {
case QCA988X_1_0_DEVICE_ID:
- set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
+ set_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features);
break;
case QCA988X_2_0_DEVICE_ID:
set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
@@ -2156,10 +2310,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_ar_pci;
}
+ if (ath10k_target_ps)
+ set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
+
ath10k_pci_dump_features(ar_pci);
- ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
- &ath10k_pci_hif_ops);
+ ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
if (!ar) {
ath10k_err("ath10k_core_create failed!\n");
ret = -EINVAL;
@@ -2167,7 +2323,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
/* Enable QCA988X_1.0 HW workarounds */
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features))
spin_lock_init(&ar_pci->hw_v1_workaround_lock);
ar_pci->ar = ar;
@@ -2241,62 +2397,14 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ar_pci->cacheline_sz = dma_get_cache_alignment();
- ret = ath10k_pci_start_intr(ar);
- if (ret) {
- ath10k_err("could not start interrupt handling (%d)\n", ret);
- goto err_iomap;
- }
-
- /*
- * Bring the target up cleanly.
- *
- * The target may be in an undefined state with an AUX-powered Target
- * and a Host in WoW mode. If the Host crashes, loses power, or is
- * restarted (without unloading the driver) then the Target is left
- * (aux) powered and running. On a subsequent driver load, the Target
- * is in an unexpected state. We try to catch that here in order to
- * reset the Target and retry the probe.
- */
- ath10k_pci_device_reset(ar_pci);
-
- ret = ath10k_pci_reset_target(ar);
- if (ret)
- goto err_intr;
-
- if (ath10k_target_ps) {
- ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
- } else {
- /* Force AWAKE forever */
- ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
- ath10k_do_pci_wake(ar);
- }
-
- ret = ath10k_pci_ce_init(ar);
- if (ret)
- goto err_intr;
-
- ret = ath10k_pci_init_config(ar);
- if (ret)
- goto err_ce;
-
- ret = ath10k_pci_wake_target_cpu(ar);
- if (ret) {
- ath10k_err("could not wake up target CPU (%d)\n", ret);
- goto err_ce;
- }
-
ret = ath10k_core_register(ar);
if (ret) {
ath10k_err("could not register driver core (%d)\n", ret);
- goto err_ce;
+ goto err_iomap;
}
return 0;
-err_ce:
- ath10k_pci_ce_deinit(ar);
-err_intr:
- ath10k_pci_stop_intr(ar);
err_iomap:
pci_iounmap(pdev, mem);
err_master:
@@ -2333,7 +2441,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar);
- ath10k_pci_stop_intr(ar);
pci_set_drvdata(pdev, NULL);
pci_iounmap(pdev, ar_pci->mem);
@@ -2345,128 +2452,6 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
kfree(ar_pci);
}
-#if defined(CONFIG_PM_SLEEP)
-
-#define ATH10K_PCI_PM_CONTROL 0x44
-
-static int ath10k_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ath10k *ar = pci_get_drvdata(pdev);
- struct ath10k_pci *ar_pci;
- u32 val;
- int ret, retval;
-
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
- if (!ar)
- return -ENODEV;
-
- ar_pci = ath10k_pci_priv(ar);
- if (!ar_pci)
- return -ENODEV;
-
- if (ath10k_core_target_suspend(ar))
- return -EBUSY;
-
- ret = wait_event_interruptible_timeout(ar->event_queue,
- ar->is_target_paused == true,
- 1 * HZ);
- if (ret < 0) {
- ath10k_warn("suspend interrupted (%d)\n", ret);
- retval = ret;
- goto resume;
- } else if (ret == 0) {
- ath10k_warn("suspend timed out - target pause event never came\n");
- retval = EIO;
- goto resume;
- }
-
- /*
- * reset is_target_paused and host can check that in next time,
- * or it will always be TRUE and host just skip the waiting
- * condition, it causes target assert due to host already
- * suspend
- */
- ar->is_target_paused = false;
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0x3) {
- pci_save_state(pdev);
- pci_disable_device(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- (val & 0xffffff00) | 0x03);
- }
-
- return 0;
-resume:
- ret = ath10k_core_target_resume(ar);
- if (ret)
- ath10k_warn("could not resume (%d)\n", ret);
-
- return retval;
-}
-
-static int ath10k_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct ath10k *ar = pci_get_drvdata(pdev);
- struct ath10k_pci *ar_pci;
- int ret;
- u32 val;
-
- ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
-
- if (!ar)
- return -ENODEV;
- ar_pci = ath10k_pci_priv(ar);
-
- if (!ar_pci)
- return -ENODEV;
-
- ret = pci_enable_device(pdev);
- if (ret) {
- ath10k_warn("cannot enable PCI device: %d\n", ret);
- return ret;
- }
-
- pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
-
- if ((val & 0x000000ff) != 0) {
- pci_restore_state(pdev);
- pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
- val & 0xffffff00);
- /*
- * Suspend/Resume resets the PCI configuration space,
- * so we have to re-disable the RETRY_TIMEOUT register (0x41)
- * to keep PCI Tx retries from interfering with C3 CPU state
- */
- pci_read_config_dword(pdev, 0x40, &val);
-
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- }
-
- ret = ath10k_core_target_resume(ar);
- if (ret)
- ath10k_warn("target resume failed: %d\n", ret);
-
- return ret;
-}
-
-static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
- ath10k_pci_suspend,
- ath10k_pci_resume);
-
-#define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
-
-#else
-
-#define ATH10K_PCI_PM_OPS NULL
-
-#endif /* CONFIG_PM_SLEEP */
-
MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
static struct pci_driver ath10k_pci_driver = {
@@ -2474,7 +2459,6 @@ static struct pci_driver ath10k_pci_driver = {
.id_table = ath10k_pci_id_table,
.probe = ath10k_pci_probe,
.remove = ath10k_pci_remove,
- .driver.pm = ATH10K_PCI_PM_OPS,
};
static int __init ath10k_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath10k/pci.h b/drivers/net/wireless/ath/ath10k/pci.h
index d2a055a07dc..871bb339d56 100644
--- a/drivers/net/wireless/ath/ath10k/pci.h
+++ b/drivers/net/wireless/ath/ath10k/pci.h
@@ -152,7 +152,8 @@ struct service_to_pipe {
enum ath10k_pci_features {
ATH10K_PCI_FEATURE_MSI_X = 0,
- ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND = 1,
+ ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND = 1,
+ ATH10K_PCI_FEATURE_SOC_POWER_SAVE = 2,
/* keep last */
ATH10K_PCI_FEATURE_COUNT
@@ -311,7 +312,7 @@ static inline void ath10k_pci_write32(struct ath10k *ar, u32 offset,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
void __iomem *addr = ar_pci->mem;
- if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features)) {
+ if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WORKAROUND, ar_pci->features)) {
unsigned long irq_flags;
spin_lock_irqsave(&ar_pci->hw_v1_workaround_lock, irq_flags);
@@ -335,20 +336,22 @@ static inline u32 ath10k_pci_read32(struct ath10k *ar, u32 offset)
return ioread32(ar_pci->mem + offset);
}
-extern unsigned int ath10k_target_ps;
-
void ath10k_do_pci_wake(struct ath10k *ar);
void ath10k_do_pci_sleep(struct ath10k *ar);
static inline void ath10k_pci_wake(struct ath10k *ar)
{
- if (ath10k_target_ps)
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_wake(ar);
}
static inline void ath10k_pci_sleep(struct ath10k *ar)
{
- if (ath10k_target_ps)
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+
+ if (test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
ath10k_do_pci_sleep(ar);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index 7d4b7987422..55f90c76186 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -27,6 +27,13 @@ void ath10k_wmi_flush_tx(struct ath10k *ar)
{
int ret;
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (ar->state == ATH10K_STATE_WEDGED) {
+ ath10k_warn("wmi flush skipped - device is wedged anyway\n");
+ return;
+ }
+
ret = wait_event_timeout(ar->wmi.wq,
atomic_read(&ar->wmi.pending_tx_count) == 0,
5*HZ);
@@ -111,7 +118,7 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
trace_ath10k_wmi_cmd(cmd_id, skb->data, skb->len);
- status = ath10k_htc_send(ar->htc, ar->wmi.eid, skb);
+ status = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
if (status) {
dev_kfree_skb_any(skb);
atomic_dec(&ar->wmi.pending_tx_count);
@@ -383,9 +390,82 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
+static int freq_to_idx(struct ath10k *ar, int freq)
+{
+ struct ieee80211_supported_band *sband;
+ int band, ch, idx = 0;
+
+ for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
+ sband = ar->hw->wiphy->bands[band];
+ if (!sband)
+ continue;
+
+ for (ch = 0; ch < sband->n_channels; ch++, idx++)
+ if (sband->channels[ch].center_freq == freq)
+ goto exit;
+ }
+
+exit:
+ return idx;
+}
+
static void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb)
{
- ath10k_dbg(ATH10K_DBG_WMI, "WMI_CHAN_INFO_EVENTID\n");
+ struct wmi_chan_info_event *ev;
+ struct survey_info *survey;
+ u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count;
+ int idx;
+
+ ev = (struct wmi_chan_info_event *)skb->data;
+
+ err_code = __le32_to_cpu(ev->err_code);
+ freq = __le32_to_cpu(ev->freq);
+ cmd_flags = __le32_to_cpu(ev->cmd_flags);
+ noise_floor = __le32_to_cpu(ev->noise_floor);
+ rx_clear_count = __le32_to_cpu(ev->rx_clear_count);
+ cycle_count = __le32_to_cpu(ev->cycle_count);
+
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n",
+ err_code, freq, cmd_flags, noise_floor, rx_clear_count,
+ cycle_count);
+
+ spin_lock_bh(&ar->data_lock);
+
+ if (!ar->scan.in_progress) {
+ ath10k_warn("chan info event without a scan request?\n");
+ goto exit;
+ }
+
+ idx = freq_to_idx(ar, freq);
+ if (idx >= ARRAY_SIZE(ar->survey)) {
+ ath10k_warn("chan info: invalid frequency %d (idx %d out of bounds)\n",
+ freq, idx);
+ goto exit;
+ }
+
+ if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) {
+ /* During scanning chan info is reported twice for each
+ * visited channel. The reported cycle count is global
+ * and per-channel cycle count must be calculated */
+
+ cycle_count -= ar->survey_last_cycle_count;
+ rx_clear_count -= ar->survey_last_rx_clear_count;
+
+ survey = &ar->survey[idx];
+ survey->channel_time = WMI_CHAN_INFO_MSEC(cycle_count);
+ survey->channel_time_rx = WMI_CHAN_INFO_MSEC(rx_clear_count);
+ survey->noise = noise_floor;
+ survey->filled = SURVEY_INFO_CHANNEL_TIME |
+ SURVEY_INFO_CHANNEL_TIME_RX |
+ SURVEY_INFO_NOISE_DBM;
+ }
+
+ ar->survey_last_rx_clear_count = rx_clear_count;
+ ar->survey_last_cycle_count = cycle_count;
+
+exit:
+ spin_unlock_bh(&ar->data_lock);
}
static void ath10k_wmi_event_echo(struct ath10k *ar, struct sk_buff *skb)
@@ -501,8 +581,8 @@ static void ath10k_wmi_update_tim(struct ath10k *ar,
ie = (u8 *)cfg80211_find_ie(WLAN_EID_TIM, ies,
(u8 *)skb_tail_pointer(bcn) - ies);
if (!ie) {
- /* highly unlikely for mac80211 */
- ath10k_warn("no tim ie found;\n");
+ if (arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
+ ath10k_warn("no tim ie found;\n");
return;
}
@@ -861,6 +941,13 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
+ ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
+
+ if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
+ ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
+ ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
+ ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
+ }
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
@@ -885,7 +972,7 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI,
- "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u\n",
+ "wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->sw_version_1),
__le32_to_cpu(ev->abi_version),
@@ -894,7 +981,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
- __le32_to_cpu(ev->num_mem_reqs));
+ __le32_to_cpu(ev->num_mem_reqs),
+ __le32_to_cpu(ev->num_rf_chains));
complete(&ar->wmi.service_ready);
}
@@ -1114,7 +1202,7 @@ int ath10k_wmi_connect_htc_service(struct ath10k *ar)
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_WMI_CONTROL;
- status = ath10k_htc_connect_service(ar->htc, &conn_req, &conn_resp);
+ status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
ath10k_warn("failed to connect to WMI CONTROL service status: %d\n",
status);
@@ -1748,6 +1836,9 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
if (arg->key_data)
memcpy(cmd->key_data, arg->key_data, arg->key_len);
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi vdev install key idx %d cipher %d len %d\n",
+ arg->key_idx, arg->key_cipher, arg->key_len);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
}
@@ -2011,6 +2102,9 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
cmd->peer_vht_rates.tx_mcs_set =
__cpu_to_le32(arg->peer_vht_rates.tx_mcs_set);
+ ath10k_dbg(ATH10K_DBG_WMI,
+ "wmi peer assoc vdev %d addr %pM\n",
+ arg->vdev_id, arg->addr);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
}
@@ -2079,3 +2173,22 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
}
+
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms)
+{
+ struct wmi_force_fw_hang_cmd *cmd;
+ struct sk_buff *skb;
+
+ skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_force_fw_hang_cmd *)skb->data;
+ cmd->type = __cpu_to_le32(type);
+ cmd->delay_ms = __cpu_to_le32(delay_ms);
+
+ ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
+ type, delay_ms);
+ return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
+}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 9555f5a0e04..2c5a4f8daf2 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -416,6 +416,7 @@ enum wmi_cmd_id {
WMI_PDEV_FTM_INTG_CMDID,
WMI_VDEV_SET_KEEPALIVE_CMDID,
WMI_VDEV_GET_KEEPALIVE_CMDID,
+ WMI_FORCE_FW_HANG_CMDID,
/* GPIO Configuration */
WMI_GPIO_CONFIG_CMDID = WMI_CMD_GRP(WMI_GRP_GPIO),
@@ -2930,6 +2931,11 @@ struct wmi_chan_info_event {
__le32 cycle_count;
} __packed;
+#define WMI_CHAN_INFO_FLAG_COMPLETE BIT(0)
+
+/* FIXME: empirically extrapolated */
+#define WMI_CHAN_INFO_MSEC(x) ((x) / 76595)
+
/* Beacon filter wmi command info */
#define BCN_FLT_MAX_SUPPORTED_IES 256
#define BCN_FLT_MAX_ELEMS_IE_LIST (BCN_FLT_MAX_SUPPORTED_IES / 32)
@@ -2972,6 +2978,22 @@ struct wmi_sta_keepalive_cmd {
struct wmi_sta_keepalive_arp_resp arp_resp;
} __packed;
+enum wmi_force_fw_hang_type {
+ WMI_FORCE_FW_HANG_ASSERT = 1,
+ WMI_FORCE_FW_HANG_NO_DETECT,
+ WMI_FORCE_FW_HANG_CTRL_EP_FULL,
+ WMI_FORCE_FW_HANG_EMPTY_POINT,
+ WMI_FORCE_FW_HANG_STACK_OVERFLOW,
+ WMI_FORCE_FW_HANG_INFINITE_LOOP,
+};
+
+#define WMI_FORCE_FW_HANG_RANDOM_TIME 0xFFFFFFFF
+
+struct wmi_force_fw_hang_cmd {
+ __le32 type;
+ __le32 delay_ms;
+} __packed;
+
#define ATH10K_RTS_MAX 2347
#define ATH10K_FRAGMT_THRESHOLD_MIN 540
#define ATH10K_FRAGMT_THRESHOLD_MAX 2346
@@ -3048,5 +3070,7 @@ int ath10k_wmi_beacon_send(struct ath10k *ar, const struct wmi_bcn_tx_arg *arg);
int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg);
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
+int ath10k_wmi_force_fw_hang(struct ath10k *ar,
+ enum wmi_force_fw_hang_type type, u32 delay_ms);
#endif /* _WMI_H_ */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 2d691b8b95b..74bd54d6ace 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -29,6 +29,7 @@
#include <linux/average.h>
#include <linux/leds.h>
#include <net/mac80211.h>
+#include <net/cfg80211.h>
/* RX/TX descriptor hw structs
* TODO: Driver part should only see sw structs */
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index ce67ab791ea..48161edec8d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -56,6 +56,7 @@
#include <linux/etherdevice.h>
#include <linux/nl80211.h>
+#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
@@ -165,28 +166,36 @@ static const struct ieee80211_rate ath5k_rates[] = {
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ATH5K_RATE_CODE_6M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 90,
.hw_value = ATH5K_RATE_CODE_9M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 120,
.hw_value = ATH5K_RATE_CODE_12M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 180,
.hw_value = ATH5K_RATE_CODE_18M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 240,
.hw_value = ATH5K_RATE_CODE_24M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 360,
.hw_value = ATH5K_RATE_CODE_36M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 480,
.hw_value = ATH5K_RATE_CODE_48M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
- .flags = 0 },
+ .flags = IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ },
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -435,11 +444,27 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
* Called with ah->lock.
*/
int
-ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
+ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"channel set, resetting (%u -> %u MHz)\n",
- ah->curchan->center_freq, chan->center_freq);
+ ah->curchan->center_freq, chandef->chan->center_freq);
+
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ ah->ah_bwmode = AR5K_BWMODE_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ ah->ah_bwmode = AR5K_BWMODE_10MHZ;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
/*
* To switch channels clear any pending DMA operations;
@@ -447,7 +472,7 @@ ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan)
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
- return ath5k_reset(ah, chan, true);
+ return ath5k_reset(ah, chandef->chan, true);
}
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
@@ -1400,6 +1425,16 @@ ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rxs->flag |= RX_FLAG_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rxs->flag |= RX_FLAG_10MHZ;
+ break;
+ default:
+ break;
+ }
if (rxs->rate_idx >= 0 && rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
@@ -2507,6 +2542,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+
/* both antennas can be configured as RX or TX */
hw->wiphy->available_antennas_tx = 0x3;
hw->wiphy->available_antennas_rx = 0x3;
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index ca9a83ceeee..97469d0fbad 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -101,7 +101,7 @@ void ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable);
void ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif);
-int ath5k_chan_set(struct ath5k_hw *ah, struct ieee80211_channel *chan);
+int ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef);
void ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf);
void ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 9d00dab666a..b8d031ae63c 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -245,9 +245,11 @@ static ssize_t write_file_beacon(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "disable", 7) == 0) {
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
pr_info("debugfs disable beacons\n");
@@ -345,9 +347,11 @@ static ssize_t write_file_debug(struct file *file,
unsigned int i;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
@@ -448,9 +452,11 @@ static ssize_t write_file_antenna(struct file *file,
unsigned int i;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "diversity", 9) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
pr_info("debug: enable diversity\n");
@@ -619,9 +625,11 @@ static ssize_t write_file_frameerrors(struct file *file,
struct ath5k_statistics *st = &ah->stats;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "clear", 5) == 0) {
st->rxerr_crc = 0;
st->rxerr_phy = 0;
@@ -766,9 +774,11 @@ static ssize_t write_file_ani(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
@@ -862,9 +872,11 @@ static ssize_t write_file_queue(struct file *file,
struct ath5k_hw *ah = file->private_data;
char buf[20];
- if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
+ count = min_t(size_t, count, sizeof(buf) - 1);
+ if (copy_from_user(buf, userbuf, count))
return -EFAULT;
+ buf[count] = '\0';
if (strncmp(buf, "start", 5) == 0)
ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
index 81b686c6a37..4ee01f65423 100644
--- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -202,7 +202,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
- ret = ath5k_chan_set(ah, conf->chandef.chan);
+ ret = ath5k_chan_set(ah, &conf->chandef);
if (ret < 0)
goto unlock;
}
@@ -325,7 +325,7 @@ ath5k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr *ha;
mfilt[0] = 0;
- mfilt[1] = 1;
+ mfilt[1] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 1f16b4227d8..c60d36aa13e 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -144,11 +144,13 @@ ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum ieee80211_band band,
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
+ bitrate = DIV_ROUND_UP(bitrate, 2);
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
+ bitrate = DIV_ROUND_UP(bitrate, 4);
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 65fe929529a..0583c69d26d 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -566,9 +566,11 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
enum ieee80211_band band;
+ struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+ u32 rate_flags, i;
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
@@ -605,7 +607,28 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
else
band = IEEE80211_BAND_2GHZ;
- rate = &ah->sbands[band].bitrates[0];
+ switch (ah->ah_bwmode) {
+ case AR5K_BWMODE_5MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
+ break;
+ case AR5K_BWMODE_10MHZ:
+ rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
+ break;
+ default:
+ rate_flags = 0;
+ break;
+ }
+ sband = &ah->sbands[band];
+ rate = NULL;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rate = &sband->bitrates[i];
+ break;
+ }
+ if (WARN_ON(!rate))
+ return -EINVAL;
+
ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
/* ack_tx_time includes an SIFS already */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index 6a67881f94d..4f316bdcbab 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -1836,6 +1836,9 @@ void ath6kl_stop_txrx(struct ath6kl *ar)
clear_bit(WMI_READY, &ar->flag);
+ if (ar->fw_recovery.enable)
+ del_timer_sync(&ar->fw_recovery.hb_timer);
+
/*
* After wmi_shudown all WMI events will be dropped. We
* need to cleanup the buffers allocated in AP mode and
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index d4fcfcad57d..5839fc23bdc 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -29,6 +29,9 @@ struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
+ if (is_zero_ether_addr(node_addr))
+ return NULL;
+
max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.c b/drivers/net/wireless/ath/ath6kl/testmode.c
index acc9aa832f7..d67170ea103 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.c
+++ b/drivers/net/wireless/ath/ath6kl/testmode.c
@@ -66,7 +66,8 @@ nla_put_failure:
ath6kl_warn("nla_put failed on testmode rx skb!\n");
}
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len)
{
struct ath6kl *ar = wiphy_priv(wiphy);
struct nlattr *tb[ATH6KL_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ath/ath6kl/testmode.h b/drivers/net/wireless/ath/ath6kl/testmode.h
index fe651d6707d..9fbcdec3e20 100644
--- a/drivers/net/wireless/ath/ath6kl/testmode.h
+++ b/drivers/net/wireless/ath/ath6kl/testmode.h
@@ -20,7 +20,8 @@
#ifdef CONFIG_NL80211_TESTMODE
void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf, size_t buf_len);
-int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len);
+int ath6kl_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
#else
@@ -29,7 +30,9 @@ static inline void ath6kl_tm_rx_event(struct ath6kl *ar, void *buf,
{
}
-static inline int ath6kl_tm_cmd(struct wiphy *wiphy, void *data, int len)
+static inline int ath6kl_tm_cmd(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
return 0;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index 87aefb4c4c2..546d5da0b89 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -568,8 +568,8 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
dlen, freq, vif->probe_req_report);
if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(&vif->wdev, freq, 0,
- ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0,
+ GFP_ATOMIC);
return 0;
}
@@ -608,8 +608,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(&vif->wdev, freq, 0,
- ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, ev->data, dlen, 0, GFP_ATOMIC);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d491a317898..7944c25c9a4 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -56,7 +56,7 @@ config ATH9K_AHB
config ATH9K_DEBUGFS
bool "Atheros ath9k debugging"
- depends on ATH9K
+ depends on ATH9K && DEBUG_FS
select MAC80211_DEBUGFS
select RELAY
---help---
@@ -96,6 +96,16 @@ config ATH9K_LEGACY_RATE_CONTROL
has to be passed to mac80211 using the module parameter,
ieee80211_default_rc_algo.
+config ATH9K_RFKILL
+ bool "Atheros ath9k rfkill support" if EXPERT
+ depends on ATH9K
+ depends on RFKILL=y || RFKILL=ATH9K
+ default y
+ help
+ Say Y to have ath9k poll the RF-Kill GPIO every couple of
+ seconds. Turn off to save power, but enable it if you have
+ a platform that can toggle the RF-Kill GPIO.
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 4994bea809e..be466b0ef7a 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -319,9 +319,6 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ah->ani_function = 0;
}
- /* always allow mode (on/off) to be controlled */
- ah->ani_function |= ATH9K_ANI_MODE;
-
ofdm_nil = max_t(int, ATH9K_ANI_OFDM_DEF_LEVEL,
aniState->ofdmNoiseImmunityLevel);
cck_nil = max_t(int, ATH9K_ANI_CCK_DEF_LEVEL,
diff --git a/drivers/net/wireless/ath/ath9k/ani.h b/drivers/net/wireless/ath/ath9k/ani.h
index b54a3fb0188..21e7b83c3f6 100644
--- a/drivers/net/wireless/ath/ath9k/ani.h
+++ b/drivers/net/wireless/ath/ath9k/ani.h
@@ -48,15 +48,10 @@
/* values here are relative to the INI */
enum ath9k_ani_cmd {
- ATH9K_ANI_PRESENT = 0x1,
- ATH9K_ANI_NOISE_IMMUNITY_LEVEL = 0x2,
- ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x4,
- ATH9K_ANI_CCK_WEAK_SIGNAL_THR = 0x8,
- ATH9K_ANI_FIRSTEP_LEVEL = 0x10,
- ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x20,
- ATH9K_ANI_MODE = 0x40,
- ATH9K_ANI_PHYERR_RESET = 0x80,
- ATH9K_ANI_MRC_CCK = 0x100,
+ ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION = 0x1,
+ ATH9K_ANI_FIRSTEP_LEVEL = 0x2,
+ ATH9K_ANI_SPUR_IMMUNITY_LEVEL = 0x4,
+ ATH9K_ANI_MRC_CCK = 0x8,
ATH9K_ANI_ALL = 0xfff
};
diff --git a/drivers/net/wireless/ath/ath9k/antenna.c b/drivers/net/wireless/ath/ath9k/antenna.c
index 664844c5d3d..dd1cc73d794 100644
--- a/drivers/net/wireless/ath/ath9k/antenna.c
+++ b/drivers/net/wireless/ath/ath9k/antenna.c
@@ -16,37 +16,119 @@
#include "ath9k.h"
-static inline bool ath_is_alt_ant_ratio_better(int alt_ratio, int maxdelta,
+/*
+ * AR9285
+ * ======
+ *
+ * EEPROM has 2 4-bit fields containing the card configuration.
+ *
+ * antdiv_ctl1:
+ * ------------
+ * bb_enable_ant_div_lnadiv : 1
+ * bb_ant_div_alt_gaintb : 1
+ * bb_ant_div_main_gaintb : 1
+ * bb_enable_ant_fast_div : 1
+ *
+ * antdiv_ctl2:
+ * -----------
+ * bb_ant_div_alt_lnaconf : 2
+ * bb_ant_div_main_lnaconf : 2
+ *
+ * The EEPROM bits are used as follows:
+ * ------------------------------------
+ *
+ * bb_enable_ant_div_lnadiv - Enable LNA path rx antenna diversity/combining.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_ant_div_[alt/main]_gaintb - 0 -> Antenna config Alt/Main uses gaintable 0
+ * 1 -> Antenna config Alt/Main uses gaintable 1
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ *
+ * bb_enable_ant_fast_div - Enable fast antenna diversity.
+ * Set in AR_PHY_CCK_DETECT.
+ *
+ * bb_ant_div_[alt/main]_lnaconf - Alt/Main LNA diversity/combining input config.
+ * Set in AR_PHY_MULTICHAIN_GAIN_CTL.
+ * 10=LNA1
+ * 01=LNA2
+ * 11=LNA1+LNA2
+ * 00=LNA1-LNA2
+ *
+ * AR9485 / AR9565 / AR9331
+ * ========================
+ *
+ * The same bits are present in the EEPROM, but the location in the
+ * EEPROM is different (ant_div_control in ar9300_BaseExtension_1).
+ *
+ * ant_div_alt_lnaconf ==> bit 0~1
+ * ant_div_main_lnaconf ==> bit 2~3
+ * ant_div_alt_gaintb ==> bit 4
+ * ant_div_main_gaintb ==> bit 5
+ * enable_ant_div_lnadiv ==> bit 6
+ * enable_ant_fast_div ==> bit 7
+ */
+
+static inline bool ath_is_alt_ant_ratio_better(struct ath_ant_comb *antcomb,
+ int alt_ratio, int maxdelta,
int mindelta, int main_rssi_avg,
int alt_rssi_avg, int pkt_count)
{
- return (((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg + maxdelta)) ||
- (alt_rssi_avg > main_rssi_avg + mindelta)) && (pkt_count > 50);
+ if (pkt_count <= 50)
+ return false;
+
+ if (alt_rssi_avg > main_rssi_avg + mindelta)
+ return true;
+
+ if (alt_ratio >= antcomb->ant_ratio2 &&
+ alt_rssi_avg >= antcomb->low_rssi_thresh &&
+ (alt_rssi_avg > main_rssi_avg + maxdelta))
+ return true;
+
+ return false;
}
-static inline bool ath_ant_div_comb_alt_check(u8 div_group, int alt_ratio,
- int curr_main_set, int curr_alt_set,
- int alt_rssi_avg, int main_rssi_avg)
+static inline bool ath_ant_div_comb_alt_check(struct ath_hw_antcomb_conf *conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg)
{
- bool result = false;
- switch (div_group) {
+ bool result, set1, set2;
+
+ result = set1 = set2 = false;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA1)
+ set1 = true;
+
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA1 &&
+ conf->alt_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ set2 = true;
+
+ switch (conf->div_group) {
case 0:
if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
result = true;
break;
case 1:
case 2:
- if ((((curr_main_set == ATH_ANT_DIV_COMB_LNA2) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) &&
- (alt_rssi_avg >= (main_rssi_avg - 5))) ||
- ((curr_main_set == ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) &&
- (alt_rssi_avg >= (main_rssi_avg - 2)))) &&
- (alt_rssi_avg >= 4))
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 5))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg - 2))) ||
+ (alt_ratio > antcomb->ant_ratio))
result = true;
- else
- result = false;
+
+ break;
+ case 3:
+ if (alt_rssi_avg < 4 || alt_rssi_avg < antcomb->low_rssi_thresh)
+ break;
+
+ if ((set1 && (alt_rssi_avg >= (main_rssi_avg - 3))) ||
+ (set2 && (alt_rssi_avg >= (main_rssi_avg + 3))) ||
+ (alt_ratio > antcomb->ant_ratio))
+ result = true;
+
break;
}
@@ -108,6 +190,74 @@ static void ath_lnaconf_alt_good_scan(struct ath_ant_comb *antcomb,
}
}
+static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf)
+{
+ /* set alt to the conf with maximun ratio */
+ if (antcomb->first_ratio && antcomb->second_ratio) {
+ if (antcomb->rssi_second > antcomb->rssi_third) {
+ /* first alt*/
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2*/
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf =
+ antcomb->first_quick_scan_conf;
+ } else if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2)) {
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ } else {
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ }
+ } else if (antcomb->first_ratio) {
+ /* first alt */
+ if ((antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->first_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->first_quick_scan_conf;
+ } else if (antcomb->second_ratio) {
+ /* second alt */
+ if ((antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->second_quick_scan_conf;
+ } else {
+ /* main is largest */
+ if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
+ (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
+ /* Set alt LNA1 or LNA2 */
+ if (conf->main_lna_conf == ATH_ANT_DIV_COMB_LNA2)
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ else
+ /* Set alt to A+B or A-B */
+ conf->alt_lna_conf = antcomb->main_conf;
+ }
+}
+
static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
struct ath_hw_antcomb_conf *div_ant_conf,
int main_rssi_avg, int alt_rssi_avg,
@@ -129,7 +279,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
/* main is LNA1 */
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -138,7 +288,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->first_ratio = false;
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -147,11 +297,11 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->first_ratio = false;
} else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
antcomb->first_ratio = true;
else
antcomb->first_ratio = false;
@@ -164,17 +314,21 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
antcomb->rssi_first = main_rssi_avg;
antcomb->rssi_third = alt_rssi_avg;
- if (antcomb->second_quick_scan_conf == ATH_ANT_DIV_COMB_LNA1)
+ switch(antcomb->second_quick_scan_conf) {
+ case ATH_ANT_DIV_COMB_LNA1:
antcomb->rssi_lna1 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)
+ break;
+ case ATH_ANT_DIV_COMB_LNA2:
antcomb->rssi_lna2 = alt_rssi_avg;
- else if (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2) {
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2)
antcomb->rssi_lna2 = main_rssi_avg;
else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1)
antcomb->rssi_lna1 = main_rssi_avg;
+ break;
+ default:
+ break;
}
if (antcomb->rssi_lna2 > antcomb->rssi_lna1 +
@@ -184,7 +338,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -193,7 +347,7 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->second_ratio = false;
} else if (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2) {
- if (ath_is_alt_ant_ratio_better(alt_ratio,
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
ATH_ANT_DIV_COMB_LNA1_DELTA_MID,
ATH_ANT_DIV_COMB_LNA1_DELTA_LOW,
main_rssi_avg, alt_rssi_avg,
@@ -202,105 +356,18 @@ static void ath_select_ant_div_from_quick_scan(struct ath_ant_comb *antcomb,
else
antcomb->second_ratio = false;
} else {
- if ((((alt_ratio >= ATH_ANT_DIV_COMB_ALT_ANT_RATIO2) &&
- (alt_rssi_avg > main_rssi_avg +
- ATH_ANT_DIV_COMB_LNA1_DELTA_HI)) ||
- (alt_rssi_avg > main_rssi_avg)) &&
- (antcomb->total_pkt_count > 50))
+ if (ath_is_alt_ant_ratio_better(antcomb, alt_ratio,
+ ATH_ANT_DIV_COMB_LNA1_DELTA_HI,
+ 0,
+ main_rssi_avg, alt_rssi_avg,
+ antcomb->total_pkt_count))
antcomb->second_ratio = true;
else
antcomb->second_ratio = false;
}
- /* set alt to the conf with maximun ratio */
- if (antcomb->first_ratio && antcomb->second_ratio) {
- if (antcomb->rssi_second > antcomb->rssi_third) {
- /* first alt*/
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2*/
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- } else {
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- }
- } else if (antcomb->first_ratio) {
- /* first alt */
- if ((antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->first_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->first_quick_scan_conf;
- } else if (antcomb->second_ratio) {
- /* second alt */
- if ((antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->second_quick_scan_conf ==
- ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf =
- antcomb->second_quick_scan_conf;
- } else {
- /* main is largest */
- if ((antcomb->main_conf == ATH_ANT_DIV_COMB_LNA1) ||
- (antcomb->main_conf == ATH_ANT_DIV_COMB_LNA2))
- /* Set alt LNA1 or LNA2 */
- if (div_ant_conf->main_lna_conf ==
- ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else
- div_ant_conf->alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- else
- /* Set alt to A+B or A-B */
- div_ant_conf->alt_lna_conf = antcomb->main_conf;
- }
+ ath_ant_set_alt_ratio(antcomb, div_ant_conf);
+
break;
default:
break;
@@ -430,8 +497,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x10: /* LNA2 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -440,15 +506,13 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x13: /* LNA2 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
break;
case 0x20: /* LNA1 A-B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -457,8 +521,7 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
ant_conf->fast_div_bias = 0x1;
break;
case 0x23: /* LNA1 A+B */
- if (!(antcomb->scan) &&
- (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO))
+ if (!antcomb->scan && (alt_ratio > antcomb->ant_ratio))
ant_conf->fast_div_bias = 0x1;
else
ant_conf->fast_div_bias = 0x2;
@@ -475,6 +538,9 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
default:
break;
}
+
+ if (antcomb->fast_div_bias)
+ ant_conf->fast_div_bias = antcomb->fast_div_bias;
} else if (ant_conf->div_group == 3) {
switch ((ant_conf->main_lna_conf << 4) |
ant_conf->alt_lna_conf) {
@@ -540,6 +606,138 @@ static void ath_ant_div_conf_fast_divbias(struct ath_hw_antcomb_conf *ant_conf,
}
}
+static void ath_ant_try_scan(struct ath_ant_comb *antcomb,
+ struct ath_hw_antcomb_conf *conf,
+ int curr_alt_set, int alt_rssi_avg,
+ int main_rssi_avg)
+{
+ switch (curr_alt_set) {
+ case ATH_ANT_DIV_COMB_LNA2:
+ antcomb->rssi_lna2 = alt_rssi_avg;
+ antcomb->rssi_lna1 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1:
+ antcomb->rssi_lna1 = alt_rssi_avg;
+ antcomb->rssi_lna2 = main_rssi_avg;
+ antcomb->scan = true;
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
+ antcomb->rssi_add = alt_rssi_avg;
+ antcomb->scan = true;
+ /* set to A-B */
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ break;
+ case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
+ antcomb->rssi_sub = alt_rssi_avg;
+ antcomb->scan = false;
+ if (antcomb->rssi_lna2 >
+ (antcomb->rssi_lna1 + ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
+ /* use LNA2 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA1 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ }
+ } else {
+ /* use LNA1 as main LNA */
+ if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
+ (antcomb->rssi_add > antcomb->rssi_sub)) {
+ /* set to A+B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
+ } else if (antcomb->rssi_sub >
+ antcomb->rssi_lna1) {
+ /* set to A-B */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
+ } else {
+ /* set to LNA2 */
+ conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static bool ath_ant_try_switch(struct ath_hw_antcomb_conf *div_ant_conf,
+ struct ath_ant_comb *antcomb,
+ int alt_ratio, int alt_rssi_avg,
+ int main_rssi_avg, int curr_main_set,
+ int curr_alt_set)
+{
+ bool ret = false;
+
+ if (ath_ant_div_comb_alt_check(div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg)) {
+ if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
+ /*
+ * Switch main and alt LNA.
+ */
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
+ div_ant_conf->main_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+ }
+
+ ret = true;
+ } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
+ (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
+ /*
+ Set alt to another LNA.
+ */
+ if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA1;
+ else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
+ div_ant_conf->alt_lna_conf = ATH_ANT_DIV_COMB_LNA2;
+
+ ret = true;
+ }
+
+ return ret;
+}
+
+static bool ath_ant_short_scan_check(struct ath_ant_comb *antcomb)
+{
+ int alt_ratio;
+
+ if (!antcomb->scan || !antcomb->alt_good)
+ return false;
+
+ if (time_after(jiffies, antcomb->scan_start_time +
+ msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
+ return true;
+
+ if (antcomb->total_pkt_count == ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
+ alt_ratio = ((antcomb->alt_recv_cnt * 100) /
+ antcomb->total_pkt_count);
+ if (alt_ratio < antcomb->ant_ratio)
+ return true;
+ }
+
+ return false;
+}
+
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
{
struct ath_hw_antcomb_conf div_ant_conf;
@@ -549,41 +747,46 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
int main_rssi = rs->rs_rssi_ctl0;
int alt_rssi = rs->rs_rssi_ctl1;
int rx_ant_conf, main_ant_conf;
- bool short_scan = false;
+ bool short_scan = false, ret;
rx_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_CURRENT_SHIFT) &
ATH_ANT_RX_MASK;
main_ant_conf = (rs->rs_rssi_ctl2 >> ATH_ANT_RX_MAIN_SHIFT) &
ATH_ANT_RX_MASK;
+ if (alt_rssi >= antcomb->low_rssi_thresh) {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2;
+ } else {
+ antcomb->ant_ratio = ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI;
+ antcomb->ant_ratio2 = ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI;
+ }
+
/* Record packet only when both main_rssi and alt_rssi is positive */
if (main_rssi > 0 && alt_rssi > 0) {
antcomb->total_pkt_count++;
antcomb->main_total_rssi += main_rssi;
antcomb->alt_total_rssi += alt_rssi;
+
if (main_ant_conf == rx_ant_conf)
antcomb->main_recv_cnt++;
else
antcomb->alt_recv_cnt++;
}
- /* Short scan check */
- if (antcomb->scan && antcomb->alt_good) {
- if (time_after(jiffies, antcomb->scan_start_time +
- msecs_to_jiffies(ATH_ANT_DIV_COMB_SHORT_SCAN_INTR)))
- short_scan = true;
- else
- if (antcomb->total_pkt_count ==
- ATH_ANT_DIV_COMB_SHORT_SCAN_PKTCOUNT) {
- alt_ratio = ((antcomb->alt_recv_cnt * 100) /
- antcomb->total_pkt_count);
- if (alt_ratio < ATH_ANT_DIV_COMB_ALT_ANT_RATIO)
- short_scan = true;
- }
+ if (main_ant_conf == rx_ant_conf) {
+ ANT_STAT_INC(ANT_MAIN, recv_cnt);
+ ANT_LNA_INC(ANT_MAIN, rx_ant_conf);
+ } else {
+ ANT_STAT_INC(ANT_ALT, recv_cnt);
+ ANT_LNA_INC(ANT_ALT, rx_ant_conf);
}
+ /* Short scan check */
+ short_scan = ath_ant_short_scan_check(antcomb);
+
if (((antcomb->total_pkt_count < ATH_ANT_DIV_COMB_MAX_PKTCOUNT) ||
- rs->rs_moreaggr) && !short_scan)
+ rs->rs_moreaggr) && !short_scan)
return;
if (antcomb->total_pkt_count) {
@@ -595,15 +798,13 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
antcomb->total_pkt_count);
}
-
ath9k_hw_antdiv_comb_conf_get(sc->sc_ah, &div_ant_conf);
curr_alt_set = div_ant_conf.alt_lna_conf;
curr_main_set = div_ant_conf.main_lna_conf;
-
antcomb->count++;
if (antcomb->count == ATH_ANT_DIV_COMB_MAX_COUNT) {
- if (alt_ratio > ATH_ANT_DIV_COMB_ALT_ANT_RATIO) {
+ if (alt_ratio > antcomb->ant_ratio) {
ath_lnaconf_alt_good_scan(antcomb, div_ant_conf,
main_rssi_avg);
antcomb->alt_good = true;
@@ -617,153 +818,47 @@ void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs)
}
if (!antcomb->scan) {
- if (ath_ant_div_comb_alt_check(div_ant_conf.div_group,
- alt_ratio, curr_main_set, curr_alt_set,
- alt_rssi_avg, main_rssi_avg)) {
- if (curr_alt_set == ATH_ANT_DIV_COMB_LNA2) {
- /* Switch main and alt LNA */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- } else if (curr_alt_set == ATH_ANT_DIV_COMB_LNA1) {
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
-
- goto div_comb_done;
- } else if ((curr_alt_set != ATH_ANT_DIV_COMB_LNA1) &&
- (curr_alt_set != ATH_ANT_DIV_COMB_LNA2)) {
- /* Set alt to another LNA */
- if (curr_main_set == ATH_ANT_DIV_COMB_LNA2)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1)
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
-
- goto div_comb_done;
- }
-
- if ((alt_rssi_avg < (main_rssi_avg +
- div_ant_conf.lna1_lna2_delta)))
+ ret = ath_ant_try_switch(&div_ant_conf, antcomb, alt_ratio,
+ alt_rssi_avg, main_rssi_avg,
+ curr_main_set, curr_alt_set);
+ if (ret)
goto div_comb_done;
}
+ if (!antcomb->scan &&
+ (alt_rssi_avg < (main_rssi_avg + div_ant_conf.lna1_lna2_delta)))
+ goto div_comb_done;
+
if (!antcomb->scan_not_start) {
- switch (curr_alt_set) {
- case ATH_ANT_DIV_COMB_LNA2:
- antcomb->rssi_lna2 = alt_rssi_avg;
- antcomb->rssi_lna1 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1:
- antcomb->rssi_lna1 = alt_rssi_avg;
- antcomb->rssi_lna2 = main_rssi_avg;
- antcomb->scan = true;
- /* set to A+B */
- div_ant_conf.main_lna_conf = ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2:
- antcomb->rssi_add = alt_rssi_avg;
- antcomb->scan = true;
- /* set to A-B */
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- break;
- case ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2:
- antcomb->rssi_sub = alt_rssi_avg;
- antcomb->scan = false;
- if (antcomb->rssi_lna2 >
- (antcomb->rssi_lna1 +
- ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA)) {
- /* use LNA2 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna1) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA1 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- }
- } else {
- /* use LNA1 as main LNA */
- if ((antcomb->rssi_add > antcomb->rssi_lna2) &&
- (antcomb->rssi_add > antcomb->rssi_sub)) {
- /* set to A+B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2;
- } else if (antcomb->rssi_sub >
- antcomb->rssi_lna1) {
- /* set to A-B */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2;
- } else {
- /* set to LNA2 */
- div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
- div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
- }
- }
- break;
- default:
- break;
- }
+ ath_ant_try_scan(antcomb, &div_ant_conf, curr_alt_set,
+ alt_rssi_avg, main_rssi_avg);
} else {
if (!antcomb->alt_good) {
antcomb->scan_not_start = false;
/* Set alt to another LNA */
if (curr_main_set == ATH_ANT_DIV_COMB_LNA2) {
div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
+ ATH_ANT_DIV_COMB_LNA2;
div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
+ ATH_ANT_DIV_COMB_LNA1;
} else if (curr_main_set == ATH_ANT_DIV_COMB_LNA1) {
div_ant_conf.main_lna_conf =
- ATH_ANT_DIV_COMB_LNA1;
+ ATH_ANT_DIV_COMB_LNA1;
div_ant_conf.alt_lna_conf =
- ATH_ANT_DIV_COMB_LNA2;
+ ATH_ANT_DIV_COMB_LNA2;
}
goto div_comb_done;
}
+ ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
+ main_rssi_avg, alt_rssi_avg,
+ alt_ratio);
+ antcomb->quick_scan_cnt++;
}
- ath_select_ant_div_from_quick_scan(antcomb, &div_ant_conf,
- main_rssi_avg, alt_rssi_avg,
- alt_ratio);
-
- antcomb->quick_scan_cnt++;
-
div_comb_done:
ath_ant_div_conf_fast_divbias(&div_ant_conf, antcomb, alt_ratio);
ath9k_hw_antdiv_comb_conf_set(sc->sc_ah, &div_ant_conf);
+ ath9k_debug_stat_ant(sc, &div_ant_conf, main_rssi_avg, alt_rssi_avg);
antcomb->scan_start_time = jiffies;
antcomb->total_pkt_count = 0;
@@ -772,26 +867,3 @@ div_comb_done:
antcomb->main_recv_cnt = 0;
antcomb->alt_recv_cnt = 0;
}
-
-void ath_ant_comb_update(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_hw_antcomb_conf div_ant_conf;
- u8 lna_conf;
-
- ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
-
- if (sc->ant_rx == 1)
- lna_conf = ATH_ANT_DIV_COMB_LNA1;
- else
- lna_conf = ATH_ANT_DIV_COMB_LNA2;
-
- div_ant_conf.main_lna_conf = lna_conf;
- div_ant_conf.alt_lna_conf = lna_conf;
-
- ath9k_hw_antdiv_comb_conf_set(ah, &div_ant_conf);
-
- if (common->antenna_diversity)
- ath9k_hw_antctrl_shared_chain_lnadiv(ah, true);
-}
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index d1acfe98918..08656473c63 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -610,7 +610,15 @@ static void ar5008_hw_override_ini(struct ath_hw *ah,
REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
if (AR_SREV_9280_20_OR_LATER(ah)) {
- val = REG_READ(ah, AR_PCU_MISC_MODE2);
+ /*
+ * For AR9280 and above, there is a new feature that allows
+ * Multicast search based on both MAC Address and Key ID.
+ * By default, this feature is enabled. But since the driver
+ * is not using this feature, we switch it off; otherwise
+ * multicast search based on MAC addr only will fail.
+ */
+ val = REG_READ(ah, AR_PCU_MISC_MODE2) &
+ (~AR_ADHOC_MCAST_KEYID_ENABLE);
if (!AR_SREV_9271(ah))
val &= ~AR_PCU_MISC_MODE2_HWWAR1;
@@ -1152,8 +1160,6 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
*/
WARN_ON(1);
break;
- case ATH9K_ANI_PRESENT:
- break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 8dc2d089cde..fb61b081d17 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -269,13 +269,12 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
if (ah->config.pcie_waen & AR_WA_D3_L1_DISABLE)
val |= AR_WA_D3_L1_DISABLE;
} else {
- if (((AR_SREV_9285(ah) ||
- AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) &&
- (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)) ||
- (AR_SREV_9280(ah) &&
- (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE))) {
- val |= AR_WA_D3_L1_DISABLE;
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
+ if (AR9285_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
+ } else if (AR_SREV_9280(ah)) {
+ if (AR9280_WA_DEFAULT & AR_WA_D3_L1_DISABLE)
+ val |= AR_WA_D3_L1_DISABLE;
}
}
@@ -297,24 +296,18 @@ static void ar9002_hw_configpcipowersave(struct ath_hw *ah,
} else {
if (ah->config.pcie_waen) {
val = ah->config.pcie_waen;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
+ val &= (~AR_WA_D3_L1_DISABLE);
} else {
- if (AR_SREV_9285(ah) ||
- AR_SREV_9271(ah) ||
- AR_SREV_9287(ah)) {
+ if (AR_SREV_9285(ah) || AR_SREV_9271(ah) || AR_SREV_9287(ah)) {
val = AR9285_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
- }
- else if (AR_SREV_9280(ah)) {
+ val &= (~AR_WA_D3_L1_DISABLE);
+ } else if (AR_SREV_9280(ah)) {
/*
* For AR9280 chips, bit 22 of 0x4004
* needs to be set.
*/
val = AR9280_WA_DEFAULT;
- if (!power_off)
- val &= (~AR_WA_D3_L1_DISABLE);
+ val &= (~AR_WA_D3_L1_DISABLE);
} else {
val = AR_WA_DEFAULT;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index f4003512d8d..1fc1fa955d4 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -555,6 +555,69 @@ static void ar9002_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
}
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9002_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ u8 antdiv_ctrl1, antdiv_ctrl2;
+ u32 regval;
+
+ if (enable) {
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE;
+
+ /*
+ * Don't disable BT ant to allow BB to control SWCOM.
+ */
+ btcoex->bt_coex_mode2 &= (~(AR_BT_DISABLE_BT_ANT));
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, ATH_BT_COEX_ANT_DIV_SWITCH_COM);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ } else {
+ /*
+ * Disable antenna diversity, use LNA1 only.
+ */
+ antdiv_ctrl1 = ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A;
+ antdiv_ctrl2 = ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A;
+
+ /*
+ * Disable BT Ant. to allow concurrent BT and WLAN receive.
+ */
+ btcoex->bt_coex_mode2 |= AR_BT_DISABLE_BT_ANT;
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
+
+ /*
+ * Program SWCOM table to make sure RF switch always parks
+ * at BT side.
+ */
+ REG_WRITE(ah, AR_PHY_SWITCH_COM, 0);
+ REG_RMW(ah, AR_PHY_SWITCH_CHAIN_0, 0, 0xf0000000);
+ }
+
+ regval = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regval &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL));
+ /*
+ * Clear ant_fast_div_bias [14:9] since for WB195,
+ * the main LNA is always LNA1.
+ */
+ regval &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regval |= SM(antdiv_ctrl1, AR_PHY_9285_ANT_DIV_CTL);
+ regval |= SM(antdiv_ctrl2, AR_PHY_9285_ANT_DIV_ALT_LNACONF);
+ regval |= SM((antdiv_ctrl2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF);
+ regval |= SM((antdiv_ctrl1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB);
+ regval |= SM((antdiv_ctrl1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regval);
+
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ regval |= SM((antdiv_ctrl1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+}
+
+#endif
+
static void ar9002_hw_spectral_scan_config(struct ath_hw *ah,
struct ath_spec_scan *param)
{
@@ -634,5 +697,9 @@ void ar9002_hw_attach_phy_ops(struct ath_hw *ah)
ops->spectral_scan_trigger = ar9002_hw_spectral_scan_trigger;
ops->spectral_scan_wait = ar9002_hw_spectral_scan_wait;
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9002_hw_set_bt_ant_diversity;
+#endif
+
ar9002_hw_set_nf_limits(ah);
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.h b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
index f9eb2c35716..6314ae2e93e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.h
@@ -317,13 +317,15 @@
#define AR_PHY_9285_ANT_DIV_ALT_GAINTB_S 29
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB 0x40000000
#define AR_PHY_9285_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_9285_ANT_DIV_LNA1 2
-#define AR_PHY_9285_ANT_DIV_LNA2 1
-#define AR_PHY_9285_ANT_DIV_LNA1_PLUS_LNA2 3
-#define AR_PHY_9285_ANT_DIV_LNA1_MINUS_LNA2 0
#define AR_PHY_9285_ANT_DIV_GAINTB_0 0
#define AR_PHY_9285_ANT_DIV_GAINTB_1 1
+#define ATH_BT_COEX_ANTDIV_CONTROL1_ENABLE 0x0b
+#define ATH_BT_COEX_ANTDIV_CONTROL2_ENABLE 0x09
+#define ATH_BT_COEX_ANTDIV_CONTROL1_FIXED_A 0x04
+#define ATH_BT_COEX_ANTDIV_CONTROL2_FIXED_A 0x09
+#define ATH_BT_COEX_ANT_DIV_SWITCH_COM 0x66666666
+
#define AR_PHY_EXT_CCA0 0x99b8
#define AR_PHY_EXT_CCA0_THRESH62 0x000000FF
#define AR_PHY_EXT_CCA0_THRESH62_S 0
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index d105e43d22e..f4864807e15 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3541,13 +3541,12 @@ static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is2ghz)
return le16_to_cpu(ar9003_modal_header(ah, is2ghz)->switchcomspdt);
}
-
-static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon);
}
-static u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz)
{
return le32_to_cpu(ar9003_modal_header(ah, is2ghz)->antCtrlCommon2);
}
@@ -3561,6 +3560,7 @@ static u16 ar9003_hw_ant_ctrl_chain_get(struct ath_hw *ah, int chain,
static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
{
+ struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_capabilities *pCap = &ah->caps;
int chain;
u32 regval, value, gpio;
@@ -3614,6 +3614,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
}
value = ar9003_hw_ant_ctrl_common_2_get(ah, is2ghz);
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity) {
+ value &= ~AR_SWITCH_TABLE_COM2_ALL;
+ value |= ah->config.ant_ctrl_comm2g_switch_enable;
+
+ }
REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2, AR_SWITCH_TABLE_COM2_ALL, value);
if ((AR_SREV_9462(ah)) && (ah->rxchainmask == 0x2)) {
@@ -3645,8 +3650,11 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
regval &= (~AR_PHY_ANT_DIV_LNADIV);
regval |= ((value >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ regval |= AR_ANT_DIV_ENABLE;
+
if (AR_SREV_9565(ah)) {
- if (ah->shared_chain_lnadiv) {
+ if (common->bt_ant_diversity) {
regval |= (1 << AR_PHY_ANT_SW_RX_PROT_S);
} else {
regval &= ~(1 << AR_PHY_ANT_DIV_LNADIV_S);
@@ -3656,10 +3664,14 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
- /*enable fast_div */
+ /* enable fast_div */
regval = REG_READ(ah, AR_PHY_CCK_DETECT);
regval &= (~AR_FAST_DIV_ENABLE);
regval |= ((value >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+
+ if (AR_SREV_9485(ah) && common->bt_ant_diversity)
+ regval |= AR_FAST_DIV_ENABLE;
+
REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
@@ -3673,9 +3685,9 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
AR_PHY_ANT_DIV_ALT_GAINTB |
AR_PHY_ANT_DIV_MAIN_GAINTB));
/* by default use LNA1 for the main antenna */
- regval |= (AR_PHY_ANT_DIV_LNA1 <<
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
AR_PHY_ANT_DIV_MAIN_LNACONF_S);
- regval |= (AR_PHY_ANT_DIV_LNA2 <<
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
AR_PHY_ANT_DIV_ALT_LNACONF_S);
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
@@ -3813,6 +3825,11 @@ static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
else
value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
+ if (ah->config.alt_mingainidx)
+ REG_RMW_FIELD(ah, AR_PHY_EXT_ATTEN_CTL_0,
+ AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
+ value);
+
REG_RMW_FIELD(ah, ext_atten_reg[i],
AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN,
value);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 874f6570bd1..75d4fb41962 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,6 +334,8 @@ struct ar9300_eeprom {
s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
+u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz);
+u32 ar9003_hw_ant_ctrl_common_2_get(struct ath_hw *ah, bool is2ghz);
u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index d402cb32283..608bb4824e2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -153,7 +153,7 @@ static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
if (!ah->is_clk_25mhz)
INIT_INI_ARRAY(&ah->iniAdditional,
ar9340_1p0_radio_core_40M);
- } else if (AR_SREV_9485_11(ah)) {
+ } else if (AR_SREV_9485_11_OR_LATER(ah)) {
/* mac */
INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
ar9485_1_1_mac_core);
@@ -424,7 +424,7 @@ static void ar9003_tx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_lowest_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485_modes_lowest_ob_db_tx_gain_1_1);
else if (AR_SREV_9550(ah))
@@ -458,7 +458,7 @@ static void ar9003_tx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -492,7 +492,7 @@ static void ar9003_tx_gain_table_mode2(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_low_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -517,7 +517,7 @@ static void ar9003_tx_gain_table_mode3(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_high_power_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_high_power_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -552,7 +552,7 @@ static void ar9003_tx_gain_table_mode4(struct ath_hw *ah)
static void ar9003_tx_gain_table_mode5(struct ath_hw *ah)
{
- if (AR_SREV_9485_11(ah))
+ if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_ob_db_tx_gain_1_1);
else if (AR_SREV_9340(ah))
@@ -571,7 +571,7 @@ static void ar9003_tx_gain_table_mode6(struct ath_hw *ah)
if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9340Modes_low_ob_db_and_spur_tx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesTxGain,
ar9485Modes_green_spur_ob_db_tx_gain_1_1);
else if (AR_SREV_9580(ah))
@@ -611,7 +611,7 @@ static void ar9003_rx_gain_table_mode0(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_rx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485_common_rx_gain_1_1);
else if (AR_SREV_9550(ah)) {
@@ -644,7 +644,7 @@ static void ar9003_rx_gain_table_mode1(struct ath_hw *ah)
else if (AR_SREV_9340(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9340Common_wo_xlna_rx_gain_table_1p0);
- else if (AR_SREV_9485_11(ah))
+ else if (AR_SREV_9485_11_OR_LATER(ah))
INIT_INI_ARRAY(&ah->iniModesRxGain,
ar9485Common_wo_xlna_rx_gain_1_1);
else if (AR_SREV_9462_21(ah))
@@ -745,16 +745,25 @@ static void ar9003_hw_init_mode_gain_regs(struct ath_hw *ah)
static void ar9003_hw_configpcipowersave(struct ath_hw *ah,
bool power_off)
{
+ /*
+ * Increase L1 Entry Latency. Some WB222 boards don't have
+ * this change in eeprom/OTP.
+ *
+ */
+ if (AR_SREV_9462(ah)) {
+ u32 val = ah->config.aspm_l1_fix;
+ if ((val & 0xff000000) == 0x17000000) {
+ val &= 0x00ffffff;
+ val |= 0x27000000;
+ REG_WRITE(ah, 0x570c, val);
+ }
+ }
+
/* Nothing to do on restore for 11N */
if (!power_off /* !restore */) {
/* set bit 19 to allow forcing of pcie core into L1 state */
REG_SET_BIT(ah, AR_PCIE_PM_CTRL, AR_PCIE_PM_CTRL_ENA);
-
- /* Several PCIe massages to ensure proper behaviour */
- if (ah->config.pcie_waen)
- REG_WRITE(ah, AR_WA, ah->config.pcie_waen);
- else
- REG_WRITE(ah, AR_WA, ah->WARegVal);
+ REG_WRITE(ah, AR_WA, ah->WARegVal);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 5163abd3937..f6c5c1b5047 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -491,6 +491,7 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_rate = MS(rxsp->status1, AR_RxRate);
rxs->rs_more = (rxsp->status2 & AR_RxMore) ? 1 : 0;
+ rxs->rs_firstaggr = (rxsp->status11 & AR_RxFirstAggr) ? 1 : 0;
rxs->rs_isaggr = (rxsp->status11 & AR_RxAggr) ? 1 : 0;
rxs->rs_moreaggr = (rxsp->status11 & AR_RxMoreAggr) ? 1 : 0;
rxs->rs_antenna = (MS(rxsp->status4, AR_RxAntenna) & 0x7);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 1f694ab3cc7..e897648d323 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -632,6 +632,22 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
REG_SET_BIT(ah, AR_PHY_CCK_DETECT,
AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV);
+
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
+ REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
+ AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
+
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
+ ah->enabled_cals |= TX_IQ_CAL;
+ else
+ ah->enabled_cals &= ~TX_IQ_CAL;
+
+ if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
+ ah->enabled_cals |= TX_CL_CAL;
+ else
+ ah->enabled_cals &= ~TX_CL_CAL;
+ }
}
static void ar9003_hw_prog_ini(struct ath_hw *ah,
@@ -814,29 +830,12 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
if (chan->channel == 2484)
ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
- REG_WRITE(ah, AR_GLB_SWREG_DISCONT_MODE,
- AR_GLB_SWREG_DISCONT_EN_BT_WLAN);
-
ah->modes_index = modesIndex;
ar9003_hw_override_ini(ah);
ar9003_hw_set_channel_regs(ah, chan);
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
ath9k_hw_apply_txpower(ah, chan, false);
- if (AR_SREV_9462(ah) || AR_SREV_9565(ah)) {
- if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
- AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL))
- ah->enabled_cals |= TX_IQ_CAL;
- else
- ah->enabled_cals &= ~TX_IQ_CAL;
-
- if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE)
- ah->enabled_cals |= TX_CL_CAL;
- else
- ah->enabled_cals &= ~TX_CL_CAL;
- }
-
return 0;
}
@@ -1173,6 +1172,10 @@ skip_ws_det:
* is_on == 0 means MRC CCK is OFF (more noise imm)
*/
bool is_on = param ? 1 : 0;
+
+ if (ah->caps.rx_chainmask == 1)
+ break;
+
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_ENABLE, is_on);
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
@@ -1190,8 +1193,6 @@ skip_ws_det:
}
break;
}
- case ATH9K_ANI_PRESENT:
- break;
default:
ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
@@ -1413,65 +1414,111 @@ static void ar9003_hw_antdiv_comb_conf_set(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
}
-static void ar9003_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
- bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static void ar9003_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
u8 ant_div_ctl1;
u32 regval;
- if (!AR_SREV_9565(ah))
+ if (!AR_SREV_9485(ah) && !AR_SREV_9565(ah))
return;
- ah->shared_chain_lnadiv = enable;
+ if (AR_SREV_9485(ah)) {
+ regval = ar9003_hw_ant_ctrl_common_2_get(ah,
+ IS_CHAN_2GHZ(ah->curchan));
+ if (enable) {
+ regval &= ~AR_SWITCH_TABLE_COM2_ALL;
+ regval |= ah->config.ant_ctrl_comm2g_switch_enable;
+ }
+ REG_RMW_FIELD(ah, AR_PHY_SWITCH_COM_2,
+ AR_SWITCH_TABLE_COM2_ALL, regval);
+ }
+
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
+ /*
+ * Set MAIN/ALT LNA conf.
+ * Set MAIN/ALT gain_tb.
+ */
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
regval &= (~AR_ANT_DIV_CTRL_ALL);
regval |= (ant_div_ctl1 & 0x3f) << AR_ANT_DIV_CTRL_ALL_S;
- regval &= ~AR_PHY_ANT_DIV_LNADIV;
- regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
-
- if (enable)
- regval |= AR_ANT_DIV_ENABLE;
-
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
- regval = REG_READ(ah, AR_PHY_CCK_DETECT);
- regval &= ~AR_FAST_DIV_ENABLE;
- regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
-
- if (enable)
- regval |= AR_FAST_DIV_ENABLE;
-
- REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
-
- if (enable) {
- REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
- (1 << AR_PHY_ANT_SW_RX_PROT_S));
- if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
- REG_SET_BIT(ah, AR_PHY_RESTART,
- AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
- REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
- AR_BTCOEX_WL_LNADIV_FORCE_ON);
- } else {
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
- REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
- (1 << AR_PHY_ANT_SW_RX_PROT_S));
- REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
- REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
- AR_BTCOEX_WL_LNADIV_FORCE_ON);
-
+ if (AR_SREV_9485_11_OR_LATER(ah)) {
+ /*
+ * Enable LNA diversity.
+ */
regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
- regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
- AR_PHY_ANT_DIV_ALT_LNACONF |
- AR_PHY_ANT_DIV_MAIN_GAINTB |
- AR_PHY_ANT_DIV_ALT_GAINTB);
- regval |= (AR_PHY_ANT_DIV_LNA1 << AR_PHY_ANT_DIV_MAIN_LNACONF_S);
- regval |= (AR_PHY_ANT_DIV_LNA2 << AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ regval &= ~AR_PHY_ANT_DIV_LNADIV;
+ regval |= ((ant_div_ctl1 >> 6) & 0x1) << AR_PHY_ANT_DIV_LNADIV_S;
+ if (enable)
+ regval |= AR_ANT_DIV_ENABLE;
+
REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+
+ /*
+ * Enable fast antenna diversity.
+ */
+ regval = REG_READ(ah, AR_PHY_CCK_DETECT);
+ regval &= ~AR_FAST_DIV_ENABLE;
+ regval |= ((ant_div_ctl1 >> 7) & 0x1) << AR_FAST_DIV_ENABLE_S;
+ if (enable)
+ regval |= AR_FAST_DIV_ENABLE;
+
+ REG_WRITE(ah, AR_PHY_CCK_DETECT, regval);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= (~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_ALT_GAINTB |
+ AR_PHY_ANT_DIV_MAIN_GAINTB));
+ /*
+ * Set MAIN to LNA1 and ALT to LNA2 at the
+ * beginning.
+ */
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
+ } else if (AR_SREV_9565(ah)) {
+ if (enable) {
+ REG_SET_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ if (ah->curchan && IS_CHAN_2GHZ(ah->curchan))
+ REG_SET_BIT(ah, AR_PHY_RESTART,
+ AR_PHY_RESTART_ENABLE_DIV_M2FLAG);
+ REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+ } else {
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_PHY_MC_GAIN_CTRL,
+ (1 << AR_PHY_ANT_SW_RX_PROT_S));
+ REG_CLR_BIT(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE);
+ REG_CLR_BIT(ah, AR_BTCOEX_WL_LNADIV,
+ AR_BTCOEX_WL_LNADIV_FORCE_ON);
+
+ regval = REG_READ(ah, AR_PHY_MC_GAIN_CTRL);
+ regval &= ~(AR_PHY_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_ANT_DIV_ALT_LNACONF |
+ AR_PHY_ANT_DIV_MAIN_GAINTB |
+ AR_PHY_ANT_DIV_ALT_GAINTB);
+ regval |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_ANT_DIV_MAIN_LNACONF_S);
+ regval |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_ANT_DIV_ALT_LNACONF_S);
+ REG_WRITE(ah, AR_PHY_MC_GAIN_CTRL, regval);
+ }
}
}
+#endif
+
static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
struct ath9k_channel *chan,
u8 *ini_reloaded)
@@ -1518,6 +1565,18 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ /*
+ * CUS217 mix LNA mode.
+ */
+ if (ar9003_hw_get_rx_gain_idx(ah) == 2) {
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_core,
+ 1, regWrites);
+ REG_WRITE_ARRAY(&ah->ini_modes_rxgain_bb_postamble,
+ modesIndex, regWrites);
+ }
+ }
+
/*
* For 5GHz channels requiring Fast Clock, apply
* different modal values.
@@ -1528,7 +1587,11 @@ static int ar9003_hw_fast_chan_change(struct ath_hw *ah,
if (AR_SREV_9565(ah))
REG_WRITE_ARRAY(&ah->iniModesFastClock, 1, regWrites);
- REG_WRITE_ARRAY(&ah->iniAdditional, 1, regWrites);
+ /*
+ * JAPAN regulatory.
+ */
+ if (chan->channel == 2484)
+ ar9003_hw_prog_ini(ah, &ah->iniCckfirJapan2484, 1);
ah->modes_index = modesIndex;
*ini_reloaded = true;
@@ -1631,11 +1694,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
- ops->antctrl_shared_chain_lnadiv = ar9003_hw_antctrl_shared_chain_lnadiv;
ops->spectral_scan_config = ar9003_hw_spectral_scan_config;
ops->spectral_scan_trigger = ar9003_hw_spectral_scan_trigger;
ops->spectral_scan_wait = ar9003_hw_spectral_scan_wait;
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ops->set_bt_ant_diversity = ar9003_hw_set_bt_ant_diversity;
+#endif
+
ar9003_hw_set_nf_limits(ah);
ar9003_hw_set_radar_conf(ah);
memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index d4d39f305a0..6fd752321e3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -148,6 +148,8 @@
#define AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S 28
#define AR_PHY_EXT_CCA_THRESH62 0x007F0000
#define AR_PHY_EXT_CCA_THRESH62_S 16
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX 0x0000FF00
+#define AR_PHY_EXTCHN_PWRTHR1_ANT_DIV_ALT_ANT_MINGAINIDX_S 8
#define AR_PHY_EXT_MINCCA_PWR 0x01FF0000
#define AR_PHY_EXT_MINCCA_PWR_S 16
#define AR_PHY_EXT_CYCPWR_THR1 0x0000FE00L
@@ -296,11 +298,6 @@
#define AR_PHY_ANT_DIV_MAIN_GAINTB 0x40000000
#define AR_PHY_ANT_DIV_MAIN_GAINTB_S 30
-#define AR_PHY_ANT_DIV_LNA1_MINUS_LNA2 0x0
-#define AR_PHY_ANT_DIV_LNA2 0x1
-#define AR_PHY_ANT_DIV_LNA1 0x2
-#define AR_PHY_ANT_DIV_LNA1_PLUS_LNA2 0x3
-
#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index c1224b5a257..2ee35f677c0 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -72,17 +72,12 @@ struct ath_config {
/*************************/
#define ATH_TXBUF_RESET(_bf) do { \
- (_bf)->bf_stale = false; \
(_bf)->bf_lastbf = NULL; \
(_bf)->bf_next = NULL; \
memset(&((_bf)->bf_state), 0, \
sizeof(struct ath_buf_state)); \
} while (0)
-#define ATH_RXBUF_RESET(_bf) do { \
- (_bf)->bf_stale = false; \
- } while (0)
-
/**
* enum buffer_type - Buffer type flags
*
@@ -137,7 +132,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_AGGR_ENCRYPTDELIM 10
/* minimum h/w qdepth to be sustained to maximize aggregation */
#define ATH_AGGR_MIN_QDEPTH 2
-#define ATH_AMPDU_SUBFRAME_DEFAULT 32
+/* minimum h/w qdepth for non-aggregated traffic */
+#define ATH_NON_AGGR_MIN_QDEPTH 8
#define IEEE80211_SEQ_SEQ_SHIFT 4
#define IEEE80211_SEQ_MAX 4096
@@ -174,12 +170,6 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define ATH_TX_COMPLETE_POLL_INT 1000
-enum ATH_AGGR_STATUS {
- ATH_AGGR_DONE,
- ATH_AGGR_BAW_CLOSED,
- ATH_AGGR_LIMITED,
-};
-
#define ATH_TXFIFO_DEPTH 8
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
@@ -201,10 +191,10 @@ struct ath_txq {
struct ath_atx_ac {
struct ath_txq *txq;
- int sched;
struct list_head list;
struct list_head tid_q;
bool clear_ps_filter;
+ bool sched;
};
struct ath_frame_info {
@@ -212,14 +202,16 @@ struct ath_frame_info {
int framelen;
enum ath9k_key_type keytype;
u8 keyix;
- u8 retries;
u8 rtscts_rate;
+ u8 retries : 7;
+ u8 baw_tracked : 1;
};
struct ath_buf_state {
u8 bf_type;
u8 bfs_paprd;
u8 ndelim;
+ bool stale;
u16 seqno;
unsigned long bfs_paprd_timestamp;
};
@@ -233,7 +225,6 @@ struct ath_buf {
void *bf_desc; /* virtual addr of desc */
dma_addr_t bf_daddr; /* physical addr of desc */
dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
- bool bf_stale;
struct ieee80211_tx_rate rates[4];
struct ath_buf_state bf_state;
};
@@ -241,16 +232,18 @@ struct ath_buf {
struct ath_atx_tid {
struct list_head list;
struct sk_buff_head buf_q;
+ struct sk_buff_head retry_q;
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
- int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
- int tidno;
+ u8 tidno;
int baw_head; /* first un-acked tx buffer */
int baw_tail; /* next unused tx buffer slot */
+
+ s8 bar_index;
bool sched;
bool paused;
bool active;
@@ -262,16 +255,13 @@ struct ath_node {
struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[IEEE80211_NUM_TIDS];
struct ath_atx_ac ac[IEEE80211_NUM_ACS];
- int ps_key;
u16 maxampdu;
u8 mpdudensity;
+ s8 ps_key;
bool sleeping;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *node_stat;
-#endif
+ bool no_ps_filter;
};
struct ath_tx_control {
@@ -317,6 +307,7 @@ struct ath_rx {
struct ath_descdma rxdma;
struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
+ struct ath_buf *buf_hold;
struct sk_buff *frag;
u32 ampdu_ref;
@@ -367,6 +358,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
/********/
struct ath_vif {
+ struct ath_node mcast_node;
int av_bslot;
bool primary_sta_vif;
__le64 tsf_adjust; /* TSF adjustment for staggered beacons */
@@ -428,6 +420,7 @@ void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -585,19 +578,14 @@ static inline void ath_fill_led_pin(struct ath_softc *sc)
#define ATH_ANT_DIV_COMB_MAX_COUNT 100
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO 30
#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2 20
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO_LOW_RSSI 50
+#define ATH_ANT_DIV_COMB_ALT_ANT_RATIO2_LOW_RSSI 50
#define ATH_ANT_DIV_COMB_LNA1_LNA2_SWITCH_DELTA -1
#define ATH_ANT_DIV_COMB_LNA1_DELTA_HI -4
#define ATH_ANT_DIV_COMB_LNA1_DELTA_MID -2
#define ATH_ANT_DIV_COMB_LNA1_DELTA_LOW 2
-enum ath9k_ant_div_comb_lna_conf {
- ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
- ATH_ANT_DIV_COMB_LNA2,
- ATH_ANT_DIV_COMB_LNA1,
- ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
-};
-
struct ath_ant_comb {
u16 count;
u16 total_pkt_count;
@@ -614,27 +602,36 @@ struct ath_ant_comb {
int rssi_first;
int rssi_second;
int rssi_third;
+ int ant_ratio;
+ int ant_ratio2;
bool alt_good;
int quick_scan_cnt;
- int main_conf;
+ enum ath9k_ant_div_comb_lna_conf main_conf;
enum ath9k_ant_div_comb_lna_conf first_quick_scan_conf;
enum ath9k_ant_div_comb_lna_conf second_quick_scan_conf;
bool first_ratio;
bool second_ratio;
unsigned long scan_start_time;
+
+ /*
+ * Card-specific config values.
+ */
+ int low_rssi_thresh;
+ int fast_div_bias;
};
void ath_ant_comb_scan(struct ath_softc *sc, struct ath_rx_status *rs);
-void ath_ant_comb_update(struct ath_softc *sc);
/********************/
/* Main driver core */
/********************/
-#define ATH9K_PCI_CUS198 0x0001
-#define ATH9K_PCI_CUS230 0x0002
-#define ATH9K_PCI_CUS217 0x0004
-#define ATH9K_PCI_WOW 0x0008
+#define ATH9K_PCI_CUS198 0x0001
+#define ATH9K_PCI_CUS230 0x0002
+#define ATH9K_PCI_CUS217 0x0004
+#define ATH9K_PCI_WOW 0x0008
+#define ATH9K_PCI_BT_ANT_DIV 0x0010
+#define ATH9K_PCI_D3_L1_WAR 0x0020
/*
* Default cache line size, in bytes.
@@ -761,6 +758,7 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
+ struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1a17732bb08..b5c16b3a37b 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -291,6 +291,23 @@ void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
+bool ath9k_csa_is_finished(struct ath_softc *sc)
+{
+ struct ieee80211_vif *vif;
+
+ vif = sc->csa_vif;
+ if (!vif || !vif->csa_active)
+ return false;
+
+ if (!ieee80211_csa_is_complete(vif))
+ return false;
+
+ ieee80211_csa_finish(vif);
+
+ sc->csa_vif = NULL;
+ return true;
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -336,6 +353,10 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc))
+ return;
+
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 344fdde1d7a..d3063c21e16 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -49,37 +49,40 @@ int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
}
EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
-static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+static u32 ath9k_get_extchanmode(struct cfg80211_chan_def *chandef)
{
u32 chanmode = 0;
- switch (chan->band) {
+ switch (chandef->chan->band) {
case IEEE80211_BAND_2GHZ:
- switch (channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_G_HT20;
break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_G_HT40PLUS;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ chanmode = CHANNEL_G_HT40PLUS;
+ else
+ chanmode = CHANNEL_G_HT40MINUS;
break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_G_HT40MINUS;
+ default:
break;
}
break;
case IEEE80211_BAND_5GHZ:
- switch (channel_type) {
- case NL80211_CHAN_NO_HT:
- case NL80211_CHAN_HT20:
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
chanmode = CHANNEL_A_HT20;
break;
- case NL80211_CHAN_HT40PLUS:
- chanmode = CHANNEL_A_HT40PLUS;
+ case NL80211_CHAN_WIDTH_40:
+ if (chandef->center_freq1 > chandef->chan->center_freq)
+ chanmode = CHANNEL_A_HT40PLUS;
+ else
+ chanmode = CHANNEL_A_HT40MINUS;
break;
- case NL80211_CHAN_HT40MINUS:
- chanmode = CHANNEL_A_HT40MINUS;
+ default:
break;
}
break;
@@ -94,13 +97,12 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
* Update internal channel flags.
*/
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type)
+ struct cfg80211_chan_def *chandef)
{
- ichan->channel = chan->center_freq;
- ichan->chan = chan;
+ ichan->channel = chandef->chan->center_freq;
+ ichan->chan = chandef->chan;
- if (chan->band == IEEE80211_BAND_2GHZ) {
+ if (chandef->chan->band == IEEE80211_BAND_2GHZ) {
ichan->chanmode = CHANNEL_G;
ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM;
} else {
@@ -108,8 +110,22 @@ void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
}
- if (channel_type != NL80211_CHAN_NO_HT)
- ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ ichan->channelFlags |= CHANNEL_QUARTER;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ ichan->channelFlags |= CHANNEL_HALF;
+ break;
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ break;
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ ichan->chanmode = ath9k_get_extchanmode(chandef);
+ break;
+ default:
+ WARN_ON(1);
+ }
}
EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
@@ -125,8 +141,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
chan_idx = curchan->hw_value;
channel = &ah->channels[chan_idx];
- ath9k_cmn_update_ichannel(channel, curchan,
- cfg80211_get_chandef_type(&hw->conf.chandef));
+ ath9k_cmn_update_ichannel(channel, &hw->conf.chandef);
return channel;
}
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 207d06995b1..e039bcbfbd7 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -44,8 +44,7 @@
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
- struct ieee80211_channel *chan,
- enum nl80211_channel_type channel_type);
+ struct cfg80211_chan_def *chandef);
struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
struct ath_hw *ah);
int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 87454f6c7b4..c088744a6bf 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -88,90 +88,6 @@ static const struct file_operations fops_debug = {
#define DMA_BUF_LEN 1024
-static ssize_t read_file_tx_chainmask(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "0x%08x\n", ah->txchainmask);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_tx_chainmask(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- unsigned long mask;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &mask))
- return -EINVAL;
-
- ah->txchainmask = mask;
- ah->caps.tx_chainmask = mask;
- return count;
-}
-
-static const struct file_operations fops_tx_chainmask = {
- .read = read_file_tx_chainmask,
- .write = write_file_tx_chainmask,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
-
-
-static ssize_t read_file_rx_chainmask(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- char buf[32];
- unsigned int len;
-
- len = sprintf(buf, "0x%08x\n", ah->rxchainmask);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t write_file_rx_chainmask(struct file *file, const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_softc *sc = file->private_data;
- struct ath_hw *ah = sc->sc_ah;
- unsigned long mask;
- char buf[32];
- ssize_t len;
-
- len = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, len))
- return -EFAULT;
-
- buf[len] = '\0';
- if (kstrtoul(buf, 0, &mask))
- return -EINVAL;
-
- ah->rxchainmask = mask;
- ah->caps.rx_chainmask = mask;
- return count;
-}
-
-static const struct file_operations fops_rx_chainmask = {
- .read = read_file_rx_chainmask,
- .write = write_file_rx_chainmask,
- .open = simple_open,
- .owner = THIS_MODULE,
- .llseek = default_llseek,
-};
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -270,25 +186,29 @@ static const struct file_operations fops_ani = {
.llseek = default_llseek,
};
-static ssize_t read_file_ant_diversity(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static ssize_t read_file_bt_ant_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
char buf[32];
unsigned int len;
- len = sprintf(buf, "%d\n", common->antenna_diversity);
+ len = sprintf(buf, "%d\n", common->bt_ant_diversity);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
-static ssize_t write_file_ant_diversity(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
+static ssize_t write_file_bt_ant_diversity(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- unsigned long antenna_diversity;
+ struct ath9k_hw_capabilities *pCap = &sc->sc_ah->caps;
+ unsigned long bt_ant_diversity;
char buf[32];
ssize_t len;
@@ -296,26 +216,147 @@ static ssize_t write_file_ant_diversity(struct file *file,
if (copy_from_user(buf, user_buf, len))
return -EFAULT;
- if (!AR_SREV_9565(sc->sc_ah))
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
goto exit;
buf[len] = '\0';
- if (kstrtoul(buf, 0, &antenna_diversity))
+ if (kstrtoul(buf, 0, &bt_ant_diversity))
return -EINVAL;
- common->antenna_diversity = !!antenna_diversity;
+ common->bt_ant_diversity = !!bt_ant_diversity;
ath9k_ps_wakeup(sc);
- ath_ant_comb_update(sc);
- ath_dbg(common, CONFIG, "Antenna diversity: %d\n",
- common->antenna_diversity);
+ ath9k_hw_set_bt_ant_diversity(sc->sc_ah, common->bt_ant_diversity);
+ ath_dbg(common, CONFIG, "Enable WLAN/BT RX Antenna diversity: %d\n",
+ common->bt_ant_diversity);
ath9k_ps_restore(sc);
exit:
return count;
}
-static const struct file_operations fops_ant_diversity = {
- .read = read_file_ant_diversity,
- .write = write_file_ant_diversity,
+static const struct file_operations fops_bt_ant_diversity = {
+ .read = read_file_bt_ant_diversity,
+ .write = write_file_bt_ant_diversity,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+#endif
+
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+
+ as_main->lna_attempt_cnt[div_ant_conf->main_lna_conf]++;
+ as_alt->lna_attempt_cnt[div_ant_conf->alt_lna_conf]++;
+
+ as_main->rssi_avg = main_rssi_avg;
+ as_alt->rssi_avg = alt_rssi_avg;
+}
+
+static ssize_t read_file_antenna_diversity(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_antenna_stats *as_main = &sc->debug.stats.ant_stats[ANT_MAIN];
+ struct ath_antenna_stats *as_alt = &sc->debug.stats.ant_stats[ANT_ALT];
+ struct ath_hw_antcomb_conf div_ant_conf;
+ unsigned int len = 0, size = 1024;
+ ssize_t retval = 0;
+ char *buf;
+ char *lna_conf_str[4] = {"LNA1_MINUS_LNA2",
+ "LNA2",
+ "LNA1",
+ "LNA1_PLUS_LNA2"};
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)) {
+ len += snprintf(buf + len, size - len, "%s\n",
+ "Antenna Diversity Combining is disabled");
+ goto exit;
+ }
+
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_antdiv_comb_conf_get(ah, &div_ant_conf);
+ len += snprintf(buf + len, size - len, "Current MAIN config : %s\n",
+ lna_conf_str[div_ant_conf.main_lna_conf]);
+ len += snprintf(buf + len, size - len, "Current ALT config : %s\n",
+ lna_conf_str[div_ant_conf.alt_lna_conf]);
+ len += snprintf(buf + len, size - len, "Average MAIN RSSI : %d\n",
+ as_main->rssi_avg);
+ len += snprintf(buf + len, size - len, "Average ALT RSSI : %d\n\n",
+ as_alt->rssi_avg);
+ ath9k_ps_restore(sc);
+
+ len += snprintf(buf + len, size - len, "Packet Receive Cnt:\n");
+ len += snprintf(buf + len, size - len, "-------------------\n");
+
+ len += snprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "TOTAL COUNT",
+ as_main->recv_cnt,
+ as_alt->recv_cnt);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_recv_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+ len += snprintf(buf + len, size - len, "\nLNA Config Attempts:\n");
+ len += snprintf(buf + len, size - len, "--------------------\n");
+
+ len += snprintf(buf + len, size - len, "%30s%15s\n",
+ "MAIN", "ALT");
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 + LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2]);
+ len += snprintf(buf + len, size - len, "%-14s:%15d%15d\n",
+ "LNA1 - LNA2",
+ as_main->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2],
+ as_alt->lna_attempt_cnt[ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2]);
+
+exit:
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static const struct file_operations fops_antenna_diversity = {
+ .read = read_file_antenna_diversity,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
@@ -607,6 +648,28 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
return retval;
}
+static ssize_t print_queue(struct ath_softc *sc, struct ath_txq *txq,
+ char *buf, ssize_t size)
+{
+ ssize_t len = 0;
+
+ ath_txq_lock(sc, txq);
+
+ len += snprintf(buf + len, size - len, "%s: %d ",
+ "qnum", txq->axq_qnum);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "qdepth", txq->axq_depth);
+ len += snprintf(buf + len, size - len, "%s: %2d ",
+ "ampdu-depth", txq->axq_ampdu_depth);
+ len += snprintf(buf + len, size - len, "%s: %3d ",
+ "pending", txq->pending_frames);
+ len += snprintf(buf + len, size - len, "%s: %d\n",
+ "stopped", txq->stopped);
+
+ ath_txq_unlock(sc, txq);
+ return len;
+}
+
static ssize_t read_file_queues(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -624,24 +687,13 @@ static ssize_t read_file_queues(struct file *file, char __user *user_buf,
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
txq = sc->tx.txq_map[i];
- len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
-
- ath_txq_lock(sc, txq);
-
- len += snprintf(buf + len, size - len, "%s: %d ",
- "qnum", txq->axq_qnum);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "qdepth", txq->axq_depth);
- len += snprintf(buf + len, size - len, "%s: %2d ",
- "ampdu-depth", txq->axq_ampdu_depth);
- len += snprintf(buf + len, size - len, "%s: %3d ",
- "pending", txq->pending_frames);
- len += snprintf(buf + len, size - len, "%s: %d\n",
- "stopped", txq->stopped);
-
- ath_txq_unlock(sc, txq);
+ len += snprintf(buf + len, size - len, "(%s): ", qname[i]);
+ len += print_queue(sc, txq, buf + len, size - len);
}
+ len += snprintf(buf + len, size - len, "(CAB): ");
+ len += print_queue(sc, sc->beacon.cabq, buf + len, size - len);
+
if (len > size)
len = size;
@@ -1589,17 +1641,7 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct dentry *dir)
{
struct ath_node *an = (struct ath_node *)sta->drv_priv;
- an->node_stat = debugfs_create_file("node_stat", S_IRUGO,
- dir, an, &fops_node_stat);
-}
-
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- debugfs_remove(an->node_stat);
+ debugfs_create_file("node_stat", S_IRUGO, dir, an, &fops_node_stat);
}
/* Ethtool support for get-stats */
@@ -1770,10 +1812,10 @@ int ath9k_init_debug(struct ath_hw *ah)
&fops_reset);
debugfs_create_file("recv", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_recv);
- debugfs_create_file("rx_chainmask", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_rx_chainmask);
- debugfs_create_file("tx_chainmask", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_tx_chainmask);
+ debugfs_create_u8("rx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->rxchainmask);
+ debugfs_create_u8("tx_chainmask", S_IRUSR, sc->debug.debugfs_phy,
+ &ah->txchainmask);
debugfs_create_file("ani", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, sc, &fops_ani);
debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
@@ -1814,9 +1856,11 @@ int ath9k_init_debug(struct ath_hw *ah)
sc->debug.debugfs_phy, &sc->sc_ah->gpio_mask);
debugfs_create_u32("gpio_val", S_IRUSR | S_IWUSR,
sc->debug.debugfs_phy, &sc->sc_ah->gpio_val);
- debugfs_create_file("diversity", S_IRUSR | S_IWUSR,
- sc->debug.debugfs_phy, sc, &fops_ant_diversity);
+ debugfs_create_file("antenna_diversity", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_antenna_diversity);
#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ debugfs_create_file("bt_ant_diversity", S_IRUSR | S_IWUSR,
+ sc->debug.debugfs_phy, sc, &fops_bt_ant_diversity);
debugfs_create_file("btcoex", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_btcoex);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index fc679198a0f..6e1556fa2f3 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -28,9 +28,13 @@ struct fft_sample_tlv;
#ifdef CONFIG_ATH9K_DEBUGFS
#define TX_STAT_INC(q, c) sc->debug.stats.txstats[q].c++
#define RESET_STAT_INC(sc, type) sc->debug.stats.reset[type]++
+#define ANT_STAT_INC(i, c) sc->debug.stats.ant_stats[i].c++
+#define ANT_LNA_INC(i, c) sc->debug.stats.ant_stats[i].lna_recv_cnt[c]++;
#else
#define TX_STAT_INC(q, c) do { } while (0)
#define RESET_STAT_INC(sc, type) do { } while (0)
+#define ANT_STAT_INC(i, c) do { } while (0)
+#define ANT_LNA_INC(i, c) do { } while (0)
#endif
enum ath_reset_type {
@@ -243,11 +247,22 @@ struct ath_rx_stats {
u32 rx_spectral;
};
+#define ANT_MAIN 0
+#define ANT_ALT 1
+
+struct ath_antenna_stats {
+ u32 recv_cnt;
+ u32 rssi_avg;
+ u32 lna_recv_cnt[4];
+ u32 lna_attempt_cnt[4];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
struct ath_dfs_stats dfs_stats;
+ struct ath_antenna_stats ant_stats[2];
u32 reset[__RESET_TYPE_MAX];
};
@@ -277,14 +292,11 @@ void ath9k_sta_add_debugfs(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct dentry *dir);
-void ath9k_sta_remove_debugfs(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct dentry *dir);
-
void ath_debug_send_fft_sample(struct ath_softc *sc,
struct fft_sample_tlv *fft_sample);
-
+void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg);
#else
#define RX_STAT_INC(c) /* NOP */
@@ -297,12 +309,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
static inline void ath9k_deinit_debug(struct ath_softc *sc)
{
}
-
static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
enum ath9k_int status)
{
}
-
static inline void ath_debug_stat_tx(struct ath_softc *sc,
struct ath_buf *bf,
struct ath_tx_status *ts,
@@ -310,11 +320,16 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
unsigned int flags)
{
}
-
static inline void ath_debug_stat_rx(struct ath_softc *sc,
struct ath_rx_status *rs)
{
}
+static inline void ath9k_debug_stat_ant(struct ath_softc *sc,
+ struct ath_hw_antcomb_conf *div_ant_conf,
+ int main_rssi_avg, int alt_rssi_avg)
+{
+
+}
#endif /* CONFIG_ATH9K_DEBUGFS */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index c2bfd748eed..9ea8e4b779c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -812,6 +812,7 @@ static void ath9k_hw_4k_set_gain(struct ath_hw *ah,
static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
struct modal_eep_4k_header *pModal;
struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
struct base_eep_header_4k *pBase = &eep->baseEepHeader;
@@ -858,6 +859,24 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal);
regVal = REG_READ(ah, AR_PHY_CCK_DETECT);
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+ /*
+ * If diversity combining is enabled,
+ * set MAIN to LNA1 and ALT to LNA2 initially.
+ */
+ regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL);
+ regVal &= (~(AR_PHY_9285_ANT_DIV_MAIN_LNACONF |
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF));
+
+ regVal |= (ATH_ANT_DIV_COMB_LNA1 <<
+ AR_PHY_9285_ANT_DIV_MAIN_LNACONF_S);
+ regVal |= (ATH_ANT_DIV_COMB_LNA2 <<
+ AR_PHY_9285_ANT_DIV_ALT_LNACONF_S);
+ regVal &= (~(AR_PHY_9285_FAST_DIV_BIAS));
+ regVal |= (0 << AR_PHY_9285_FAST_DIV_BIAS_S);
+ REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal);
+ }
}
if (pModal->version >= 2) {
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 9e582e14da7..6d5d716adc1 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -115,10 +115,10 @@ static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
cmd->skb = skb;
cmd->hif_dev = hif_dev;
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_sndbulkpipe(hif_dev->udev, USB_REG_OUT_PIPE),
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
skb->data, skb->len,
- hif_usb_regout_cb, cmd);
+ hif_usb_regout_cb, cmd, 1);
usb_anchor_urb(urb, &hif_dev->regout_submitted);
ret = usb_submit_urb(urb, GFP_KERNEL);
@@ -723,11 +723,11 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
return;
}
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_rcvbulkpipe(hif_dev->udev,
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
nskb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, nskb);
+ ath9k_hif_usb_reg_in_cb, nskb, 1);
}
resubmit:
@@ -909,11 +909,11 @@ static int ath9k_hif_usb_alloc_reg_in_urbs(struct hif_device_usb *hif_dev)
goto err_skb;
}
- usb_fill_bulk_urb(urb, hif_dev->udev,
- usb_rcvbulkpipe(hif_dev->udev,
+ usb_fill_int_urb(urb, hif_dev->udev,
+ usb_rcvintpipe(hif_dev->udev,
USB_REG_IN_PIPE),
skb->data, MAX_REG_IN_BUF_SIZE,
- ath9k_hif_usb_reg_in_cb, skb);
+ ath9k_hif_usb_reg_in_cb, skb, 1);
/* Anchor URB */
usb_anchor_urb(urb, &hif_dev->reg_in_submitted);
@@ -1031,9 +1031,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
{
- struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
- struct usb_endpoint_descriptor *endp;
- int ret, idx;
+ int ret;
ret = ath9k_hif_usb_download_fw(hif_dev);
if (ret) {
@@ -1043,20 +1041,6 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
return ret;
}
- /* On downloading the firmware to the target, the USB descriptor of EP4
- * is 'patched' to change the type of the endpoint to Bulk. This will
- * bring down CPU usage during the scan period.
- */
- for (idx = 0; idx < alt->desc.bNumEndpoints; idx++) {
- endp = &alt->endpoint[idx].desc;
- if ((endp->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT) {
- endp->bmAttributes &= ~USB_ENDPOINT_XFERTYPE_MASK;
- endp->bmAttributes |= USB_ENDPOINT_XFER_BULK;
- endp->bInterval = 0;
- }
- }
-
/* Alloc URBs */
ret = ath9k_hif_usb_alloc_urbs(hif_dev);
if (ret) {
@@ -1082,7 +1066,7 @@ static void ath9k_hif_usb_firmware_fail(struct hif_device_usb *hif_dev)
struct device *dev = &hif_dev->udev->dev;
struct device *parent = dev->parent;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
if (parent)
device_lock(parent);
@@ -1131,7 +1115,7 @@ static void ath9k_hif_usb_firmware_cb(const struct firmware *fw, void *context)
release_firmware(fw);
hif_dev->flags |= HIF_USB_READY;
- complete(&hif_dev->fw_done);
+ complete_all(&hif_dev->fw_done);
return;
@@ -1268,7 +1252,7 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
if (!buf)
return;
- ret = usb_bulk_msg(udev, usb_sndbulkpipe(udev, USB_REG_OUT_PIPE),
+ ret = usb_interrupt_msg(udev, usb_sndintpipe(udev, USB_REG_OUT_PIPE),
buf, 4, NULL, HZ);
if (ret)
dev_err(&udev->dev, "ath9k_htc: USB reboot failed\n");
@@ -1295,7 +1279,9 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
- if (!unplugged && (hif_dev->flags & HIF_USB_START))
+ /* If firmware was loaded we should drop it
+ * go back to first stage bootloader. */
+ if (!unplugged && (hif_dev->flags & HIF_USB_READY))
ath9k_hif_usb_reboot(udev);
kfree(hif_dev);
@@ -1316,7 +1302,10 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
if (!(hif_dev->flags & HIF_USB_START))
ath9k_htc_suspend(hif_dev->htc_handle);
- ath9k_hif_usb_dealloc_urbs(hif_dev);
+ wait_for_completion(&hif_dev->fw_done);
+
+ if (hif_dev->flags & HIF_USB_READY)
+ ath9k_hif_usb_dealloc_urbs(hif_dev);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 71a183ffc77..c3676bf1d6c 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -861,6 +861,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
if (error != 0)
goto err_rx;
+ ath9k_hw_disable(priv->ah);
#ifdef CONFIG_MAC80211_LEDS
/* must be initialized before ieee80211_register_hw */
priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 5c1bec18c9e..d44258172c0 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1203,16 +1203,13 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || chip_reset) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- enum nl80211_channel_type channel_type =
- cfg80211_get_chandef_type(&hw->conf.chandef);
int pos = curchan->hw_value;
ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
- hw->conf.chandef.chan,
- channel_type);
+ &hw->conf.chandef);
if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
ath_err(common, "Unable to set channel\n");
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index e602c951970..c028df76b56 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv,
struct ieee80211_conf *cur_conf = &priv->hw->conf;
bool txok;
int slot;
+ int hdrlen, padsize;
slot = strip_drv_header(priv, skb);
if (slot < 0) {
@@ -504,6 +505,15 @@ send_mac80211:
ath9k_htc_tx_clear_slot(priv, slot);
+ /* Remove padding before handing frame back to mac80211 */
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+
+ padsize = hdrlen & 3;
+ if (padsize && skb->len > hdrlen + padsize) {
+ memmove(skb->data + padsize, skb->data, hdrlen);
+ skb_pull(skb, padsize);
+ }
+
/* Send status to mac80211 */
ieee80211_tx_status(priv->hw, skb);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 14b701140b4..83f4927aeac 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -78,13 +78,16 @@ static inline void ath9k_hw_antdiv_comb_conf_set(struct ath_hw *ah,
ath9k_hw_ops(ah)->antdiv_comb_conf_set(ah, antconf);
}
-static inline void ath9k_hw_antctrl_shared_chain_lnadiv(struct ath_hw *ah,
- bool enable)
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+
+static inline void ath9k_hw_set_bt_ant_diversity(struct ath_hw *ah, bool enable)
{
- if (ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv)
- ath9k_hw_ops(ah)->antctrl_shared_chain_lnadiv(ah, enable);
+ if (ath9k_hw_ops(ah)->set_bt_ant_diversity)
+ ath9k_hw_ops(ah)->set_bt_ant_diversity(ah, enable);
}
+#endif
+
/* Private hardware call ops */
/* PHY ops */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 4ca0cb06010..ecc6ec4a1ed 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -450,7 +450,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
ah->config.pcie_clock_req = 0;
- ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
@@ -575,18 +574,17 @@ static int __ath9k_hw_init(struct ath_hw *ah)
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
- ah->WARegVal = REG_READ(ah, AR_WA);
- ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
- AR_WA_ASPM_TIMER_BASED_DISABLE);
+ if (AR_SREV_9300_20_OR_LATER(ah)) {
+ ah->WARegVal = REG_READ(ah, AR_WA);
+ ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
+ AR_WA_ASPM_TIMER_BASED_DISABLE);
+ }
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
}
- if (AR_SREV_9462(ah))
- ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
-
if (AR_SREV_9565(ah)) {
ah->WARegVal |= AR_WA_BIT22;
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -656,8 +654,6 @@ static int __ath9k_hw_init(struct ath_hw *ah)
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
- if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
- ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
@@ -1069,7 +1065,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 11;
- sifstime *= 2;
+ sifstime = 32;
ack_offset = 16;
slottime = 13;
} else if (IS_CHAN_QUARTER_RATE(chan)) {
@@ -1079,7 +1075,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 22;
- sifstime *= 4;
+ sifstime = 64;
ack_offset = 32;
slottime = 21;
} else {
@@ -1116,7 +1112,6 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
ctstimeout += 48 - sifstime - ah->slottime;
}
-
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
@@ -1496,16 +1491,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ bool band_switch = false, mode_diff = false;
+ u8 ini_reloaded = 0;
u32 qnum;
int r;
- bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
- bool band_switch, mode_diff;
- u8 ini_reloaded;
- band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
- (ah->curchan->channelFlags & (CHANNEL_2GHZ |
- CHANNEL_5GHZ));
- mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ if (pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH) {
+ u32 cur = ah->curchan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+ u32 new = chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ);
+ band_switch = (cur != new);
+ mode_diff = (chan->chanmode != ah->curchan->chanmode);
+ }
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
@@ -1520,11 +1517,12 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
return false;
}
- if (edma && (band_switch || mode_diff)) {
+ if (band_switch || mode_diff) {
ath9k_hw_mark_phy_inactive(ah);
udelay(5);
- ath9k_hw_init_pll(ah, NULL);
+ if (band_switch)
+ ath9k_hw_init_pll(ah, chan);
if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
ath_err(common, "Failed to do fast channel change\n");
@@ -1541,22 +1539,21 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
}
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
- ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
- if (edma && (band_switch || mode_diff)) {
- ah->ah_flags |= AH_FASTCC;
- if (band_switch || ini_reloaded)
- ah->eep_ops->set_board_values(ah, chan);
+ if (band_switch || ini_reloaded)
+ ah->eep_ops->set_board_values(ah, chan);
- ath9k_hw_init_bb(ah, chan);
+ ath9k_hw_init_bb(ah, chan);
+ ath9k_hw_rfbus_done(ah);
- if (band_switch || ini_reloaded)
- ath9k_hw_init_cal(ah, chan);
+ if (band_switch || ini_reloaded) {
+ ah->ah_flags |= AH_FASTCC;
+ ath9k_hw_init_cal(ah, chan);
ah->ah_flags &= ~AH_FASTCC;
}
@@ -1778,16 +1775,11 @@ static void ath9k_hw_init_desc(struct ath_hw *ah)
/*
* Fast channel change:
* (Change synthesizer based on channel freq without resetting chip)
- *
- * Don't do FCC when
- * - Flag is not set
- * - Chip is just coming out of full sleep
- * - Channel to be set is same as current channel
- * - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
*/
static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
int ret;
if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
@@ -1806,9 +1798,21 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
(CHANNEL_HALF | CHANNEL_QUARTER))
goto fail;
- if ((chan->channelFlags & CHANNEL_ALL) !=
- (ah->curchan->channelFlags & CHANNEL_ALL))
- goto fail;
+ /*
+ * If cross-band fcc is not supoprted, bail out if
+ * either channelFlags or chanmode differ.
+ *
+ * chanmode will be different if the HT operating mode
+ * changes because of CSA.
+ */
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_FCC_BAND_SWITCH)) {
+ if ((chan->channelFlags & CHANNEL_ALL) !=
+ (ah->curchan->channelFlags & CHANNEL_ALL))
+ goto fail;
+
+ if (chan->chanmode != ah->curchan->chanmode)
+ goto fail;
+ }
if (!ath9k_hw_check_alive(ah))
goto fail;
@@ -2047,7 +2051,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_apply_gpio_override(ah);
- if (AR_SREV_9565(ah) && ah->shared_chain_lnadiv)
+ if (AR_SREV_9565(ah) && common->bt_ant_diversity)
REG_SET_BIT(ah, AR_BTCOEX_WL_LNADIV, AR_BTCOEX_WL_LNADIV_FORCE_ON);
return 0;
@@ -2504,7 +2508,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->rts_aggr_limit = (8 * 1024);
-#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
+#ifdef CONFIG_ATH9K_RFKILL
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
ah->rfkill_gpio =
@@ -2550,34 +2554,28 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
- if (AR_SREV_9285(ah))
+ if (AR_SREV_9285(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
ant_div_ctl1 =
ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
- if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
+ if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) {
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
}
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
pCap->hw_caps |= ATH9K_HW_CAP_APM;
}
-
if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah)) {
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
- /*
- * enable the diversity-combining algorithm only when
- * both enable_lna_div and enable_fast_div are set
- * Table for Diversity
- * ant_div_alt_lnaconf bit 0-1
- * ant_div_main_lnaconf bit 2-3
- * ant_div_alt_gaintb bit 4
- * ant_div_main_gaintb bit 5
- * enable_ant_div_lnadiv bit 6
- * enable_ant_fast_div bit 7
- */
- if ((ant_div_ctl1 >> 0x6) == 0x3)
+ if ((ant_div_ctl1 >> 0x6) == 0x3) {
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
+ ath_info(common, "Enable LNA combining\n");
+ }
}
if (ath9k_hw_dfs_tested(ah))
@@ -2610,6 +2608,13 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
+ /*
+ * Fast channel change across bands is available
+ * only for AR9462 and AR9565.
+ */
+ if (AR_SREV_9462(ah) || AR_SREV_9565(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_FCC_BAND_SWITCH;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index cd74b3afef7..69a907b55a7 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -247,6 +247,8 @@ enum ath9k_hw_caps {
ATH9K_HW_CAP_DFS = BIT(16),
ATH9K_HW_WOW_DEVICE_CAPABLE = BIT(17),
ATH9K_HW_CAP_PAPRD = BIT(18),
+ ATH9K_HW_CAP_FCC_BAND_SWITCH = BIT(19),
+ ATH9K_HW_CAP_BT_ANT_DIV = BIT(20),
};
/*
@@ -309,8 +311,11 @@ struct ath9k_ops_config {
u16 ani_poll_interval; /* ANI poll interval in ms */
/* Platform specific config */
+ u32 aspm_l1_fix;
u32 xlna_gpio;
+ u32 ant_ctrl_comm2g_switch_enable;
bool xatten_margin_cfg;
+ bool alt_mingainidx;
};
enum ath9k_int {
@@ -716,11 +721,14 @@ struct ath_hw_ops {
struct ath_hw_antcomb_conf *antconf);
void (*antdiv_comb_conf_set)(struct ath_hw *ah,
struct ath_hw_antcomb_conf *antconf);
- void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
void (*spectral_scan_config)(struct ath_hw *ah,
struct ath_spec_scan *param);
void (*spectral_scan_trigger)(struct ath_hw *ah);
void (*spectral_scan_wait)(struct ath_hw *ah);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
+#endif
};
struct ath_nf_limits {
@@ -765,7 +773,6 @@ struct ath_hw {
bool aspm_enabled;
bool is_monitoring;
bool need_an_top2_fixup;
- bool shared_chain_lnadiv;
u16 tx_trig_level;
u32 nf_regs[6];
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 16f8b201642..9a1f349f926 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -53,9 +53,9 @@ static int ath9k_btcoex_enable;
module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
-static int ath9k_enable_diversity;
-module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
-MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
+static int ath9k_bt_ant_diversity;
+module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
+MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -146,14 +146,22 @@ static struct ieee80211_rate ath9k_legacy_rates[] = {
RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
+ RATE(60, 0x0b, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(90, 0x0f, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(120, 0x0a, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(180, 0x0e, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(240, 0x09, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(360, 0x0d, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(480, 0x08, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
+ RATE(540, 0x0c, (IEEE80211_RATE_SUPPORTS_5MHZ |
+ IEEE80211_RATE_SUPPORTS_10MHZ)),
};
#ifdef CONFIG_MAC80211_LEDS
@@ -516,6 +524,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
static void ath9k_init_platform(struct ath_softc *sc)
{
struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
if (common->bus_ops->ath_bus_type != ATH_PCI)
@@ -525,12 +534,27 @@ static void ath9k_init_platform(struct ath_softc *sc)
ATH9K_PCI_CUS230)) {
ah->config.xlna_gpio = 9;
ah->config.xatten_margin_cfg = true;
+ ah->config.alt_mingainidx = true;
+ ah->config.ant_ctrl_comm2g_switch_enable = 0x000BBB88;
+ sc->ant_comb.low_rssi_thresh = 20;
+ sc->ant_comb.fast_div_bias = 3;
ath_info(common, "Set parameters for %s\n",
(sc->driver_data & ATH9K_PCI_CUS198) ?
"CUS198" : "CUS230");
- } else if (sc->driver_data & ATH9K_PCI_CUS217) {
+ }
+
+ if (sc->driver_data & ATH9K_PCI_CUS217)
ath_info(common, "CUS217 card detected\n");
+
+ if (sc->driver_data & ATH9K_PCI_BT_ANT_DIV) {
+ pCap->hw_caps |= ATH9K_HW_CAP_BT_ANT_DIV;
+ ath_info(common, "Set BT/WLAN RX diversity capability\n");
+ }
+
+ if (sc->driver_data & ATH9K_PCI_D3_L1_WAR) {
+ ah->config.pcie_waen = 0x0040473b;
+ ath_info(common, "Enable WAR for ASPM D3/L1\n");
}
}
@@ -584,6 +608,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
struct ath_hw *ah = NULL;
+ struct ath9k_hw_capabilities *pCap;
struct ath_common *common;
int ret = 0, i;
int csz = 0;
@@ -600,6 +625,7 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ah->reg_ops.rmw = ath9k_reg_rmw;
atomic_set(&ah->intr_ref_cnt, -1);
sc->sc_ah = ah;
+ pCap = &ah->caps;
sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
@@ -631,11 +657,15 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
ath9k_init_platform(sc);
/*
- * Enable Antenna diversity only when BTCOEX is disabled
- * and the user manually requests the feature.
+ * Enable WLAN/BT RX Antenna diversity only when:
+ *
+ * - BTCOEX is disabled.
+ * - the user manually requests the feature.
+ * - the HW cap is set using the platform data.
*/
- if (!common->btcoex_enabled && ath9k_enable_diversity)
- common->antenna_diversity = 1;
+ if (!common->btcoex_enabled && ath9k_bt_ant_diversity &&
+ (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV))
+ common->bt_ant_diversity = 1;
spin_lock_init(&common->cc_lock);
@@ -710,13 +740,15 @@ static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
struct ieee80211_supported_band *sband;
struct ieee80211_channel *chan;
struct ath_hw *ah = sc->sc_ah;
+ struct cfg80211_chan_def chandef;
int i;
sband = &sc->sbands[band];
for (i = 0; i < sband->n_channels; i++) {
chan = &sband->channels[i];
ah->curchan = &ah->channels[chan->hw_value];
- ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
+ cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+ ath9k_cmn_update_ichannel(ah->curchan, &chandef);
ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
}
}
@@ -802,7 +834,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
- IEEE80211_HW_SUPPORTS_RC_TABLE;
+ IEEE80211_HW_SUPPORTS_RC_TABLE |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
@@ -834,6 +867,8 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+ hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
#ifdef CONFIG_PM_SLEEP
if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
diff --git a/drivers/net/wireless/ath/ath9k/link.c b/drivers/net/wireless/ath/ath9k/link.c
index fff5d3ccc66..2f831db396a 100644
--- a/drivers/net/wireless/ath/ath9k/link.c
+++ b/drivers/net/wireless/ath/ath9k/link.c
@@ -41,7 +41,7 @@ void ath_tx_complete_poll_work(struct work_struct *work)
txq->axq_tx_inprogress = true;
}
}
- ath_txq_unlock_complete(sc, txq);
+ ath_txq_unlock(sc, txq);
}
if (needreset) {
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 2ef05ebffbc..a3eff0986a3 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -583,9 +583,9 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_rate = MS(ads.ds_rxstatus0, AR_RxRate);
rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
+ rs->rs_firstaggr = (ads.ds_rxstatus8 & AR_RxFirstAggr) ? 1 : 0;
rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
- rs->rs_moreaggr =
- (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
+ rs->rs_moreaggr = (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
/* directly mapped flags for ieee80211_rx_status */
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index b02dfce964b..bfccaceed44 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -140,6 +140,7 @@ struct ath_rx_status {
int8_t rs_rssi_ext1;
int8_t rs_rssi_ext2;
u8 rs_isaggr;
+ u8 rs_firstaggr;
u8 rs_moreaggr;
u8 rs_num_delims;
u8 rs_flags;
@@ -569,6 +570,7 @@ struct ar5416_desc {
#define AR_RxAggr 0x00020000
#define AR_PostDelimCRCErr 0x00040000
#define AR_RxStatusRsvd71 0x3ff80000
+#define AR_RxFirstAggr 0x20000000
#define AR_DecryptBusyErr 0x40000000
#define AR_KeyMiss 0x80000000
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 1737a3e3368..e4f65900132 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -173,8 +173,7 @@ static void ath_restart_work(struct ath_softc *sc)
{
ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
- if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9485(sc->sc_ah) ||
- AR_SREV_9550(sc->sc_ah))
+ if (AR_SREV_9340(sc->sc_ah) || AR_SREV_9330(sc->sc_ah))
ieee80211_queue_delayed_work(sc->hw, &sc->hw_pll_work,
msecs_to_jiffies(ATH_PLL_WORK_INTERVAL));
@@ -238,9 +237,6 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
ath_restart_work(sc);
}
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3)
- ath_ant_comb_update(sc);
-
ieee80211_wake_queues(sc->hw);
return true;
@@ -966,6 +962,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
+ struct ath_node *an = &avp->mcast_node;
mutex_lock(&sc->mutex);
@@ -979,6 +977,12 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_assign_slot(sc, vif);
+ an->sc = sc;
+ an->sta = NULL;
+ an->vif = vif;
+ an->no_ps_filter = true;
+ ath_tx_node_init(sc, an);
+
mutex_unlock(&sc->mutex);
return 0;
}
@@ -1016,6 +1020,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
{
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_vif *avp = (void *)vif->drv_priv;
ath_dbg(common, CONFIG, "Detach Interface\n");
@@ -1026,10 +1031,15 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
+ if (sc->csa_vif == vif)
+ sc->csa_vif = NULL;
+
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
+ ath_tx_node_cleanup(sc, &avp->mcast_node);
+
mutex_unlock(&sc->mutex);
}
@@ -1193,8 +1203,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) || reset_channel) {
struct ieee80211_channel *curchan = hw->conf.chandef.chan;
- enum nl80211_channel_type channel_type =
- cfg80211_get_chandef_type(&conf->chandef);
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@@ -1202,8 +1210,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (ah->curchan)
old_pos = ah->curchan - &ah->channels[0];
- ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
- curchan->center_freq, channel_type);
+ ath_dbg(common, CONFIG, "Set channel: %d MHz width: %d\n",
+ curchan->center_freq, hw->conf.chandef.width);
/* update survey stats for the old channel before switching */
spin_lock_irqsave(&common->cc_lock, flags);
@@ -1211,7 +1219,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_irqrestore(&common->cc_lock, flags);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
- curchan, channel_type);
+ &conf->chandef);
/*
* If the operating channel changes, change the survey in-use flags
@@ -1374,9 +1382,6 @@ static void ath9k_sta_notify(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_node *an = (struct ath_node *) sta->drv_priv;
- if (!sta->ht_cap.ht_supported)
- return;
-
switch (cmd) {
case STA_NOTIFY_SLEEP:
an->sleeping = true;
@@ -2094,7 +2099,7 @@ static void ath9k_wow_add_pattern(struct ath_softc *sc,
{
struct ath_hw *ah = sc->sc_ah;
struct ath9k_wow_pattern *wow_pattern = NULL;
- struct cfg80211_wowlan_trig_pkt_pattern *patterns = wowlan->patterns;
+ struct cfg80211_pkt_pattern *patterns = wowlan->patterns;
int mask_len;
s8 i = 0;
@@ -2315,6 +2320,19 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
clear_bit(SC_OP_SCANNING, &sc->sc_flags);
}
+static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ath_softc *sc = hw->priv;
+
+ /* mac80211 does not support CSA in multi-if cases (yet) */
+ if (WARN_ON(sc->csa_vif))
+ return;
+
+ sc->csa_vif = vif;
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -2359,8 +2377,8 @@ struct ieee80211_ops ath9k_ops = {
#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
.sta_add_debugfs = ath9k_sta_add_debugfs,
- .sta_remove_debugfs = ath9k_sta_remove_debugfs,
#endif
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
+ .channel_switch_beacon = ath9k_channel_switch_beacon,
};
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index c585c9b3597..d089a7cf01c 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -29,6 +29,60 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
+
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x1C71),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_FOXCONN,
+ 0xE01F),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6632),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x11AD, /* LITEON */
+ 0x6642),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ PCI_VENDOR_ID_QMI,
+ 0x0306),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x185F, /* WNC */
+ 0x309D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147C),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x147D),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002A,
+ 0x10CF, /* Fujitsu */
+ 0x1536),
+ .driver_data = ATH9K_PCI_D3_L1_WAR },
+
+ /* AR9285 card for Asus */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x002B,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x2C37),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
@@ -40,29 +94,106 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2086),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x1237),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2126),
- .driver_data = ATH9K_PCI_CUS198 },
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_AZWAVE,
+ 0x126A),
+ .driver_data = ATH9K_PCI_CUS198 | ATH9K_PCI_BT_ANT_DIV },
/* PCI-E CUS230 */
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_AZWAVE,
0x2152),
- .driver_data = ATH9K_PCI_CUS230 },
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
0x0032,
PCI_VENDOR_ID_FOXCONN,
0xE075),
- .driver_data = ATH9K_PCI_CUS230 },
+ .driver_data = ATH9K_PCI_CUS230 | ATH9K_PCI_BT_ANT_DIV },
+
+ /* WB225 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_ATHEROS,
+ 0x3122),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3119),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ 0x185F, /* WNC */
+ 0x3027),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4105),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x4106),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410D),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410E),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0x410F),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC706),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC680),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_SAMSUNG,
+ 0xC708),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3218),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ATHEROS,
+ 0x0032,
+ PCI_VENDOR_ID_LENOVO,
+ 0x3219),
+ .driver_data = ATH9K_PCI_BT_ANT_DIV },
{ PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
{ PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */
@@ -229,6 +360,22 @@ static void ath_pci_aspm_init(struct ath_common *common)
return;
}
+ /*
+ * 0x70c - Ack Frequency Register.
+ *
+ * Bits 27:29 - DEFAULT_L1_ENTRANCE_LATENCY.
+ *
+ * 000 : 1 us
+ * 001 : 2 us
+ * 010 : 4 us
+ * 011 : 8 us
+ * 100 : 16 us
+ * 101 : 32 us
+ * 110/111 : 64 us
+ */
+ if (AR_SREV_9462(ah))
+ pci_read_config_dword(pdev, 0x70c, &ah->config.aspm_l1_fix);
+
pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &aspm);
if (aspm & (PCI_EXP_LNKCTL_ASPM_L0S | PCI_EXP_LNKCTL_ASPM_L1)) {
ah->aspm_enabled = true;
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 8b380305b0f..4a1b99238ec 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -48,4 +48,11 @@
#define AR_PHY_PLL_CONTROL 0x16180
#define AR_PHY_PLL_MODE 0x16184
+enum ath9k_ant_div_comb_lna_conf {
+ ATH_ANT_DIV_COMB_LNA1_MINUS_LNA2,
+ ATH_ANT_DIV_COMB_LNA2,
+ ATH_ANT_DIV_COMB_LNA1,
+ ATH_ANT_DIV_COMB_LNA1_PLUS_LNA2,
+};
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 7eb1f4b458e..d3d7c51fa6c 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1275,15 +1275,21 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
}
static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct ath_softc *sc = priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ath_rate_priv *ath_rc_priv = priv_sta;
int i, j = 0;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
for (i = 0; i < sband->n_bitrates; i++) {
if (sta->supp_rates[sband->band] & BIT(i)) {
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+
ath_rc_priv->neg_rates.rs_rates[j]
= (sband->bitrates[i].bitrate * 2) / 10;
j++;
@@ -1313,6 +1319,7 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
}
static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
@@ -1324,8 +1331,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
ath_rc_init(sc, priv_sta);
ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating HT Bandwidth changed to: %d\n",
- cfg80211_get_chandef_type(&sc->hw->conf.chandef));
+ "Operating Bandwidth changed to: %d\n",
+ sc->hw->conf.chandef.width);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 865e043e8aa..4ee472a5a4e 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
struct ath_desc *ds;
struct sk_buff *skb;
- ATH_RXBUF_RESET(bf);
-
ds = bf->bf_desc;
ds->ds_link = 0; /* link to null */
ds->ds_data = bf->bf_buf_addr;
@@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
sc->rx.rxlink = &ds->ds_link;
}
+static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
+{
+ if (sc->rx.buf_hold)
+ ath_rx_buf_link(sc, sc->rx.buf_hold);
+
+ sc->rx.buf_hold = bf;
+}
+
static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
{
/* XXX block beacon interrupts */
@@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
skb = bf->bf_mpdu;
- ATH_RXBUF_RESET(bf);
memset(skb->data, 0, ah->caps.rx_status_len);
dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
ah->caps.rx_status_len, DMA_TO_DEVICE);
@@ -185,7 +190,7 @@ static void ath_rx_edma_cleanup(struct ath_softc *sc)
static void ath_rx_edma_init_queue(struct ath_rx_edma *rx_edma, int size)
{
- skb_queue_head_init(&rx_edma->rx_fifo);
+ __skb_queue_head_init(&rx_edma->rx_fifo);
rx_edma->rx_fifo_hwsize = size;
}
@@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
if (list_empty(&sc->rx.rxbuf))
goto start_recv;
+ sc->rx.buf_hold = NULL;
sc->rx.rxlink = NULL;
list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
ath_rx_buf_link(sc, bf);
@@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
}
bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
+ if (bf == sc->rx.buf_hold)
+ return NULL;
+
ds = bf->bf_desc;
/*
@@ -755,7 +764,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
bool is_mc, is_valid_tkip, strip_mic, mic_error;
struct ath_hw *ah = common->ah;
__le16 fc;
- u8 rx_status_len = ah->caps.rx_status_len;
fc = hdr->frame_control;
@@ -777,25 +785,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
!test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
- if (!rx_stats->rs_datalen) {
- RX_STAT_INC(rx_len_err);
- return false;
- }
-
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
- if (rx_stats->rs_datalen > (common->rx_bufsize - rx_status_len)) {
- RX_STAT_INC(rx_len_err);
- return false;
- }
-
- /* Only use error bits from the last fragment */
- if (rx_stats->rs_more)
- return true;
-
mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
!ieee80211_has_morefrags(fc) &&
!(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
@@ -814,8 +803,6 @@ static bool ath9k_rx_accept(struct ath_common *common,
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
mic_error = false;
}
- if (rx_stats->rs_status & ATH9K_RXERR_PHY)
- return false;
if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
(!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
@@ -865,6 +852,17 @@ static int ath9k_process_rate(struct ath_common *common,
band = hw->conf.chandef.chan->band;
sband = hw->wiphy->bands[band];
+ switch (hw->conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ rxs->flag |= RX_FLAG_5MHZ;
+ break;
+ case NL80211_CHAN_WIDTH_10:
+ rxs->flag |= RX_FLAG_10MHZ;
+ break;
+ default:
+ break;
+ }
+
if (rx_stats->rs_rate & 0x80) {
/* HT rate */
rxs->flag |= RX_FLAG_HT;
@@ -898,129 +896,65 @@ static int ath9k_process_rate(struct ath_common *common,
static void ath9k_process_rssi(struct ath_common *common,
struct ieee80211_hw *hw,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rx_stats)
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
{
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = common->ah;
int last_rssi;
int rssi = rx_stats->rs_rssi;
- if (!rx_stats->is_mybeacon ||
- ((ah->opmode != NL80211_IFTYPE_STATION) &&
- (ah->opmode != NL80211_IFTYPE_ADHOC)))
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
return;
-
- if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
-
- last_rssi = sc->last_rssi;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- /* Update Beacon RSSI, this is used by ANI. */
- ah->stats.avgbrssi = rssi;
-}
-
-/*
- * For Decrypt or Demic errors, we only mark packet status here and always push
- * up the frame up to let mac80211 handle the actual error case, be it no
- * decryption key or real decryption error. This let us keep statistics there.
- */
-static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
- struct ieee80211_hdr *hdr,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rx_status,
- bool *decrypt_error)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- bool discard_current = sc->rx.discard_next;
-
- sc->rx.discard_next = rx_stats->rs_more;
- if (discard_current)
- return -EINVAL;
+ }
/*
- * everything but the rate is checked here, the rate check is done
- * separately to avoid doing two lookups for a rate for each frame.
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
- return -EINVAL;
-
- /* Only use status info from the last fragment */
- if (rx_stats->rs_more)
- return 0;
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
- if (ath9k_process_rate(common, hw, rx_stats, rx_status))
- return -EINVAL;
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
+ last_rssi = sc->last_rssi;
- ath9k_process_rssi(common, hw, hdr, rx_stats);
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = ah->noise + rx_stats->rs_rssi;
- rx_status->antenna = rx_stats->rs_antenna;
- rx_status->flag |= RX_FLAG_MACTIME_END;
- if (rx_stats->rs_moreaggr)
- rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ ah->stats.avgbrssi = rssi;
+ }
- sc->rx.discard_next = false;
- return 0;
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
+static void ath9k_process_tsf(struct ath_rx_status *rs,
+ struct ieee80211_rx_status *rxs,
+ u64 tsf)
{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
+ u32 tsf_lower = tsf & 0xffffffff;
- keyix = rx_stats->rs_keyix;
+ rxs->mactime = (tsf & ~0xffffffffULL) | rs->rs_tstamp;
+ if (rs->rs_tstamp > tsf_lower &&
+ unlikely(rs->rs_tstamp - tsf_lower > 0x10000000))
+ rxs->mactime -= 0x100000000ULL;
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
+ if (rs->rs_tstamp < tsf_lower &&
+ unlikely(tsf_lower - rs->rs_tstamp > 0x10000000))
+ rxs->mactime += 0x100000000ULL;
}
#ifdef CONFIG_ATH9K_DEBUGFS
@@ -1133,6 +1067,234 @@ static int ath_process_fft(struct ath_softc *sc, struct ieee80211_hdr *hdr,
#endif
}
+static bool ath9k_is_mybeacon(struct ath_softc *sc, struct ieee80211_hdr *hdr)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ RX_STAT_INC(rx_beacons);
+ if (!is_zero_ether_addr(common->curbssid) &&
+ ether_addr_equal(hdr->addr3, common->curbssid))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * For Decrypt or Demic errors, we only mark packet status here and always push
+ * up the frame up to let mac80211 handle the actual error case, be it no
+ * decryption key or real decryption error. This let us keep statistics there.
+ */
+static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rx_status,
+ bool *decrypt_error, u64 tsf)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ieee80211_hdr *hdr;
+ bool discard_current = sc->rx.discard_next;
+ int ret = 0;
+
+ /*
+ * Discard corrupt descriptors which are marked in
+ * ath_get_next_rx_buf().
+ */
+ sc->rx.discard_next = rx_stats->rs_more;
+ if (discard_current)
+ return -EINVAL;
+
+ /*
+ * Discard zero-length packets.
+ */
+ if (!rx_stats->rs_datalen) {
+ RX_STAT_INC(rx_len_err);
+ return -EINVAL;
+ }
+
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
+ if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
+ RX_STAT_INC(rx_len_err);
+ return -EINVAL;
+ }
+
+ /* Only use status info from the last fragment */
+ if (rx_stats->rs_more)
+ return 0;
+
+ /*
+ * Return immediately if the RX descriptor has been marked
+ * as corrupt based on the various error bits.
+ *
+ * This is different from the other corrupt descriptor
+ * condition handled above.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
+
+ ath9k_process_tsf(rx_stats, rx_status, tsf);
+ ath_debug_stat_rx(sc, rx_stats);
+
+ /*
+ * Process PHY errors and return so that the packet
+ * can be dropped.
+ */
+ if (rx_stats->rs_status & ATH9K_RXERR_PHY) {
+ ath9k_dfs_process_phyerr(sc, hdr, rx_stats, rx_status->mactime);
+ if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
+ RX_STAT_INC(rx_spectral);
+
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rx_stats->is_mybeacon = ath9k_is_mybeacon(sc, hdr);
+ if (rx_stats->is_mybeacon) {
+ sc->hw_busy_count = 0;
+ ath_start_rx_poll(sc, 3);
+ }
+
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
+ ret =-EINVAL;
+ goto exit;
+ }
+
+ ath9k_process_rssi(common, hw, rx_stats, rx_status);
+
+ rx_status->band = hw->conf.chandef.chan->band;
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->antenna = rx_stats->rs_antenna;
+ rx_status->flag |= RX_FLAG_MACTIME_END;
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ if (ieee80211_is_data_present(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control))
+ sc->rx.num_pkts++;
+#endif
+
+exit:
+ sc->rx.discard_next = false;
+ return ret;
+}
+
+static void ath9k_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+
+/*
+ * Run the LNA combining algorithm only in these cases:
+ *
+ * Standalone WLAN cards with both LNA/Antenna diversity
+ * enabled in the EEPROM.
+ *
+ * WLAN+BT cards which are in the supported card list
+ * in ath_pci_id_table and the user has loaded the
+ * driver with "bt_ant_diversity" set to true.
+ */
+static void ath9k_antenna_check(struct ath_softc *sc,
+ struct ath_rx_status *rs)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath9k_hw_capabilities *pCap = &ah->caps;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB))
+ return;
+
+ /*
+ * All MPDUs in an aggregate will use the same LNA
+ * as the first MPDU.
+ */
+ if (rs->rs_isaggr && !rs->rs_firstaggr)
+ return;
+
+ /*
+ * Change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs->rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs->rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
+ if (pCap->hw_caps & ATH9K_HW_CAP_BT_ANT_DIV) {
+ if (common->bt_ant_diversity)
+ ath_ant_comb_scan(sc, rs);
+ } else {
+ ath_ant_comb_scan(sc, rs);
+ }
+}
+
static void ath9k_apply_ampdu_details(struct ath_softc *sc,
struct ath_rx_status *rs, struct ieee80211_rx_status *rxs)
{
@@ -1159,15 +1321,12 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hw *hw = sc->hw;
- struct ieee80211_hdr *hdr;
int retval;
struct ath_rx_status rs;
enum ath9k_rx_qtype qtype;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int dma_type;
- u8 rx_status_len = ah->caps.rx_status_len;
u64 tsf = 0;
- u32 tsf_lower = 0;
unsigned long flags;
dma_addr_t new_buf_addr;
@@ -1179,7 +1338,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
qtype = hp ? ATH9K_RX_QUEUE_HP : ATH9K_RX_QUEUE_LP;
tsf = ath9k_hw_gettsf64(ah);
- tsf_lower = tsf & 0xffffffff;
do {
bool decrypt_error = false;
@@ -1206,55 +1364,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
else
hdr_skb = skb;
- hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
- if (ieee80211_is_beacon(hdr->frame_control)) {
- RX_STAT_INC(rx_beacons);
- if (!is_zero_ether_addr(common->curbssid) &&
- ether_addr_equal(hdr->addr3, common->curbssid))
- rs.is_mybeacon = true;
- else
- rs.is_mybeacon = false;
- }
- else
- rs.is_mybeacon = false;
-
- if (ieee80211_is_data_present(hdr->frame_control) &&
- !ieee80211_is_qos_nullfunc(hdr->frame_control))
- sc->rx.num_pkts++;
-
- ath_debug_stat_rx(sc, &rs);
-
memset(rxs, 0, sizeof(struct ieee80211_rx_status));
- rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
- if (rs.rs_tstamp > tsf_lower &&
- unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
- rxs->mactime -= 0x100000000ULL;
-
- if (rs.rs_tstamp < tsf_lower &&
- unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
- rxs->mactime += 0x100000000ULL;
-
- if (rs.rs_phyerr == ATH9K_PHYERR_RADAR)
- ath9k_dfs_process_phyerr(sc, hdr, &rs, rxs->mactime);
-
- if (rs.rs_status & ATH9K_RXERR_PHY) {
- if (ath_process_fft(sc, hdr, &rs, rxs->mactime)) {
- RX_STAT_INC(rx_spectral);
- goto requeue_drop_frag;
- }
- }
-
- retval = ath9k_rx_skb_preprocess(sc, hdr, &rs, rxs,
- &decrypt_error);
+ retval = ath9k_rx_skb_preprocess(sc, hdr_skb, &rs, rxs,
+ &decrypt_error, tsf);
if (retval)
goto requeue_drop_frag;
- if (rs.is_mybeacon) {
- sc->hw_busy_count = 0;
- ath_start_rx_poll(sc, 3);
- }
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1308,8 +1425,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
sc->rx.frag = skb;
goto requeue;
}
- if (rs.rs_status & ATH9K_RXERR_CORRUPT_DESC)
- goto requeue_drop_frag;
if (sc->rx.frag) {
int space = skb->len - skb_tailroom(hdr_skb);
@@ -1328,22 +1443,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
-
- /*
- * change the default rx antenna if rx diversity
- * chooses the other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
- }
-
- }
-
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
skb_trim(skb, skb->len - 8);
@@ -1355,8 +1454,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
ath_rx_ps(sc, skb, rs.is_mybeacon);
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
- if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx == 3)
- ath_ant_comb_scan(sc, &rs);
+ ath9k_antenna_check(sc, &rs);
ath9k_apply_ampdu_details(sc, &rs, rxs);
@@ -1375,7 +1473,7 @@ requeue:
if (edma) {
ath_rx_edma_buf_link(sc, qtype);
} else {
- ath_rx_buf_link(sc, bf);
+ ath_rx_buf_relink(sc, bf);
ath9k_hw_rxena(ah);
}
} while (1);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 5af97442ac3..a13b2d143d9 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -893,9 +893,9 @@
#define AR_SREV_9485(_ah) \
(((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
-#define AR_SREV_9485_11(_ah) \
- (AR_SREV_9485(_ah) && \
- ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_11))
+#define AR_SREV_9485_11_OR_LATER(_ah) \
+ (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485) && \
+ ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9485_11))
#define AR_SREV_9485_OR_LATER(_ah) \
(((_ah)->hw_version.macVersion >= AR_SREV_VERSION_9485))
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index c59ae43b9b3..35b515fe3ff 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -135,6 +135,9 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
{
+ if (!tid->an->sta)
+ return;
+
ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
seqno << IEEE80211_SEQ_SEQ_SHIFT);
}
@@ -146,6 +149,93 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
ARRAY_SIZE(bf->rates));
}
+static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
+ struct sk_buff *skb)
+{
+ int q;
+
+ q = skb_get_queue_mapping(skb);
+ if (txq == sc->tx.uapsdq)
+ txq = sc->tx.txq_map[q];
+
+ if (txq != sc->tx.txq_map[q])
+ return;
+
+ if (WARN_ON(--txq->pending_frames < 0))
+ txq->pending_frames = 0;
+
+ if (txq->stopped &&
+ txq->pending_frames < sc->tx.txq_max_pending[q]) {
+ ieee80211_wake_queue(sc->hw, q);
+ txq->stopped = false;
+ }
+}
+
+static struct ath_atx_tid *
+ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ u8 tidno = 0;
+
+ hdr = (struct ieee80211_hdr *) skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ tidno = ieee80211_get_qos_ctl(hdr)[0];
+
+ tidno &= IEEE80211_QOS_CTL_TID_MASK;
+ return ATH_AN_2_TID(an, tidno);
+}
+
+static bool ath_tid_has_buffered(struct ath_atx_tid *tid)
+{
+ return !skb_queue_empty(&tid->buf_q) || !skb_queue_empty(&tid->retry_q);
+}
+
+static struct sk_buff *ath_tid_dequeue(struct ath_atx_tid *tid)
+{
+ struct sk_buff *skb;
+
+ skb = __skb_dequeue(&tid->retry_q);
+ if (!skb)
+ skb = __skb_dequeue(&tid->buf_q);
+
+ return skb;
+}
+
+/*
+ * ath_tx_tid_change_state:
+ * - clears a-mpdu flag of previous session
+ * - force sequence number allocation to fix next BlockAck Window
+ */
+static void
+ath_tx_tid_change_state(struct ath_softc *sc, struct ath_atx_tid *tid)
+{
+ struct ath_txq *txq = tid->ac->txq;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff *skb, *tskb;
+ struct ath_buf *bf;
+ struct ath_frame_info *fi;
+
+ skb_queue_walk_safe(&tid->buf_q, skb, tskb) {
+ fi = get_frame_info(skb);
+ bf = fi->bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
+ if (bf)
+ continue;
+
+ bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ if (!bf) {
+ __skb_unlink(skb, &tid->buf_q);
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
+ }
+ }
+
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -160,27 +250,22 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
memset(&ts, 0, sizeof(ts));
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = __skb_dequeue(&tid->retry_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
-
if (!bf) {
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
- if (!bf) {
- ieee80211_free_txskb(sc->hw, skb);
- continue;
- }
+ ath_txq_skb_done(sc, txq, skb);
+ ieee80211_free_txskb(sc->hw, skb);
+ continue;
}
- if (fi->retries) {
- list_add_tail(&bf->list, &bf_head);
+ if (fi->baw_tracked) {
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
sendbar = true;
- } else {
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- ath_tx_send_normal(sc, txq, NULL, skb);
}
+
+ list_add_tail(&bf->list, &bf_head);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
if (sendbar) {
@@ -209,13 +294,16 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
- u16 seqno)
+ struct ath_buf *bf)
{
+ struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u16 seqno = bf->bf_state.seqno;
int index, cindex;
index = ATH_BA_INDEX(tid->seq_start, seqno);
cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
__set_bit(cindex, tid->tx_buf);
+ fi->baw_tracked = 1;
if (index >= ((tid->baw_tail - tid->baw_head) &
(ATH_TID_MAX_BUFS - 1))) {
@@ -224,12 +312,6 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
}
}
-/*
- * TODO: For frame(s) that are in the retry state, we will reuse the
- * sequence number(s) without setting the retry bit. The
- * alternative is to give up on these and BAR the receiver's window
- * forward.
- */
static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid)
@@ -243,7 +325,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
memset(&ts, 0, sizeof(ts));
INIT_LIST_HEAD(&bf_head);
- while ((skb = __skb_dequeue(&tid->buf_q))) {
+ while ((skb = ath_tid_dequeue(tid))) {
fi = get_frame_info(skb);
bf = fi->bf;
@@ -253,14 +335,8 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
}
list_add_tail(&bf->list, &bf_head);
-
- ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
-
- tid->seq_next = tid->seq_start;
- tid->baw_tail = tid->baw_head;
- tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
@@ -380,7 +456,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ieee80211_tx_rate rates[4];
struct ath_frame_info *fi;
int nframes;
- u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
int i, retries;
int bar_index = -1;
@@ -406,7 +481,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
while (bf) {
bf_next = bf->bf_next;
- if (!bf->bf_stale || bf_next != NULL)
+ if (!bf->bf_state.stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
@@ -417,8 +492,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
an = (struct ath_node *)sta->drv_priv;
- tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
- tid = ATH_AN_2_TID(an, tidno);
+ tid = ath_get_skb_tid(sc, an, skb);
seq_first = tid->seq_start;
isba = ts->ts_flags & ATH9K_TX_BA;
@@ -430,7 +504,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* Only BlockAcks have a TID and therefore normal Acks cannot be
* checked
*/
- if (isba && tidno != ts->tid)
+ if (isba && tid->tidno != ts->tid)
txok = false;
isaggr = bf_isaggr(bf);
@@ -466,7 +540,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
tx_info = IEEE80211_SKB_CB(skb);
fi = get_frame_info(skb);
- if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
+ if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno) ||
+ !tid->active) {
/*
* Outside of the current BlockAck window,
* maybe part of a previous session
@@ -499,7 +574,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* not a holding desc.
*/
INIT_LIST_HEAD(&bf_head);
- if (bf_next != NULL || !bf_last->bf_stale)
+ if (bf_next != NULL || !bf_last->bf_state.stale)
list_move_tail(&bf->list, &bf_head);
if (!txpending) {
@@ -523,7 +598,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
ieee80211_sta_eosp(sta);
}
/* retry the un-acked ones */
- if (bf->bf_next == NULL && bf_last->bf_stale) {
+ if (bf->bf_next == NULL && bf_last->bf_state.stale) {
struct ath_buf *tbf;
tbf = ath_clone_txbuf(sc, bf_last);
@@ -560,7 +635,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- skb_queue_splice(&bf_pending, &tid->buf_q);
+ skb_queue_splice_tail(&bf_pending, &tid->retry_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -618,7 +693,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
- if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) && !flush)
+ if (!flush)
ath_txq_schedule(sc, txq);
}
@@ -792,15 +867,20 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
static struct ath_buf *
ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+ struct ath_atx_tid *tid, struct sk_buff_head **q)
{
+ struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
struct ath_buf *bf;
u16 seqno;
while (1) {
- skb = skb_peek(&tid->buf_q);
+ *q = &tid->retry_q;
+ if (skb_queue_empty(*q))
+ *q = &tid->buf_q;
+
+ skb = skb_peek(*q);
if (!skb)
break;
@@ -808,13 +888,26 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!fi->bf)
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
+ else
+ bf->bf_state.stale = false;
if (!bf) {
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
+ ath_txq_skb_done(sc, txq, skb);
ieee80211_free_txskb(sc->hw, skb);
continue;
}
+ bf->bf_next = NULL;
+ bf->bf_lastbf = bf;
+
+ tx_info = IEEE80211_SKB_CB(skb);
+ tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
+ bf->bf_state.bf_type = 0;
+ return bf;
+ }
+
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
@@ -828,73 +921,52 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
INIT_LIST_HEAD(&bf_head);
list_add(&bf->list, &bf_head);
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, *q);
ath_tx_update_baw(sc, tid, seqno);
ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
continue;
}
- bf->bf_next = NULL;
- bf->bf_lastbf = bf;
return bf;
}
return NULL;
}
-static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
- struct ath_txq *txq,
- struct ath_atx_tid *tid,
- struct list_head *bf_q,
- int *aggr_len)
+static bool
+ath_tx_form_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q,
+ int *aggr_len)
{
#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
- struct ath_buf *bf, *bf_first = NULL, *bf_prev = NULL;
- int rl = 0, nframes = 0, ndelim, prev_al = 0;
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ int nframes = 0, ndelim;
u16 aggr_limit = 0, al = 0, bpad = 0,
- al_delta, h_baw = tid->baw_size / 2;
- enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
+ al_delta, h_baw = tid->baw_size / 2;
struct ieee80211_tx_info *tx_info;
struct ath_frame_info *fi;
struct sk_buff *skb;
+ bool closed = false;
- do {
- bf = ath_tx_get_tid_subframe(sc, txq, tid);
- if (!bf) {
- status = ATH_AGGR_BAW_CLOSED;
- break;
- }
+ bf = bf_first;
+ aggr_limit = ath_lookup_rate(sc, bf, tid);
+ do {
skb = bf->bf_mpdu;
fi = get_frame_info(skb);
- if (!bf_first)
- bf_first = bf;
-
- if (!rl) {
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- aggr_limit = ath_lookup_rate(sc, bf, tid);
- rl = 1;
- }
-
/* do not exceed aggregation limit */
al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
+ if (nframes) {
+ if (aggr_limit < al + bpad + al_delta ||
+ ath_lookup_legacy(bf) || nframes >= h_baw)
+ break;
- if (nframes &&
- ((aggr_limit < (al + bpad + al_delta + prev_al)) ||
- ath_lookup_legacy(bf))) {
- status = ATH_AGGR_LIMITED;
- break;
- }
-
- tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
- if (nframes && (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE))
- break;
-
- /* do not exceed subframe limit */
- if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
- status = ATH_AGGR_LIMITED;
- break;
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if ((tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ||
+ !(tx_info->flags & IEEE80211_TX_CTL_AMPDU))
+ break;
}
/* add padding for previous frame to aggregation length */
@@ -912,22 +984,37 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_next = NULL;
/* link buffers of this frame to the aggregate */
- if (!fi->retries)
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+ if (!fi->baw_tracked)
+ ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.ndelim = ndelim;
- __skb_unlink(skb, &tid->buf_q);
+ __skb_unlink(skb, tid_q);
list_add_tail(&bf->list, bf_q);
if (bf_prev)
bf_prev->bf_next = bf;
bf_prev = bf;
- } while (!skb_queue_empty(&tid->buf_q));
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf) {
+ closed = true;
+ break;
+ }
+ } while (ath_tid_has_buffered(tid));
+
+ bf = bf_first;
+ bf->bf_lastbf = bf_prev;
+
+ if (bf == bf_prev) {
+ al = get_frame_info(bf->bf_mpdu)->framelen;
+ bf->bf_state.bf_type = BUF_AMPDU;
+ } else {
+ TX_STAT_INC(txq->axq_qnum, a_aggr);
+ }
*aggr_len = al;
- return status;
+ return closed;
#undef PADBYTES
}
@@ -999,7 +1086,7 @@ void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop)
}
static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
- struct ath_tx_info *info, int len)
+ struct ath_tx_info *info, int len, bool rts)
{
struct ath_hw *ah = sc->sc_ah;
struct sk_buff *skb;
@@ -1008,6 +1095,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
const struct ieee80211_rate *rate;
struct ieee80211_hdr *hdr;
struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
int i;
u8 rix = 0;
@@ -1030,7 +1118,17 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
rix = rates[i].idx;
info->rates[i].Tries = rates[i].count;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ /*
+ * Handle RTS threshold for unaggregated HT frames.
+ */
+ if (bf_isampdu(bf) && !bf_isaggr(bf) &&
+ (rates[i].flags & IEEE80211_TX_RC_MCS) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+
+ if (rts || rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
info->rates[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
info->flags |= ATH9K_TXDESC_RTSENA;
} else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
@@ -1123,6 +1221,8 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
struct ath_hw *ah = sc->sc_ah;
struct ath_buf *bf_first = NULL;
struct ath_tx_info info;
+ u32 rts_thresh = sc->hw->wiphy->rts_threshold;
+ bool rts = false;
memset(&info, 0, sizeof(info));
info.is_first = true;
@@ -1159,7 +1259,22 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
info.flags |= (u32) bf->bf_state.bfs_paprd <<
ATH9K_TXDESC_PAPRD_S;
- ath_buf_set_rate(sc, bf, &info, len);
+ /*
+ * mac80211 doesn't handle RTS threshold for HT because
+ * the decision has to be taken based on AMPDU length
+ * and aggregation is done entirely inside ath9k.
+ * Set the RTS/CTS flag for the first subframe based
+ * on the threshold.
+ */
+ if (aggr && (bf == bf_first) &&
+ unlikely(rts_thresh != (u32) -1)) {
+ /*
+ * "len" is the size of the entire AMPDU.
+ */
+ if (!rts_thresh || (len > rts_thresh))
+ rts = true;
+ }
+ ath_buf_set_rate(sc, bf, &info, len, rts);
}
info.buf_addr[0] = bf->bf_buf_addr;
@@ -1188,53 +1303,86 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
}
}
-static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid)
+static void
+ath_tx_form_burst(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, struct list_head *bf_q,
+ struct ath_buf *bf_first, struct sk_buff_head *tid_q)
{
- struct ath_buf *bf;
- enum ATH_AGGR_STATUS status;
- struct ieee80211_tx_info *tx_info;
- struct list_head bf_q;
- int aggr_len;
+ struct ath_buf *bf = bf_first, *bf_prev = NULL;
+ struct sk_buff *skb;
+ int nframes = 0;
do {
- if (skb_queue_empty(&tid->buf_q))
- return;
+ struct ieee80211_tx_info *tx_info;
+ skb = bf->bf_mpdu;
- INIT_LIST_HEAD(&bf_q);
+ nframes++;
+ __skb_unlink(skb, tid_q);
+ list_add_tail(&bf->list, bf_q);
+ if (bf_prev)
+ bf_prev->bf_next = bf;
+ bf_prev = bf;
- status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
+ if (nframes >= 2)
+ break;
- /*
- * no frames picked up to be aggregated;
- * block-ack window is not open.
- */
- if (list_empty(&bf_q))
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
break;
- bf = list_first_entry(&bf_q, struct ath_buf, list);
- bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
+ break;
- if (tid->ac->clear_ps_filter) {
- tid->ac->clear_ps_filter = false;
- tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
- } else {
- tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
- }
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ } while (1);
+}
- /* if only one frame, send as non-aggregate */
- if (bf == bf->bf_lastbf) {
- aggr_len = get_frame_info(bf->bf_mpdu)->framelen;
- bf->bf_state.bf_type = BUF_AMPDU;
- } else {
- TX_STAT_INC(txq->axq_qnum, a_aggr);
- }
+static bool ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
+ struct ath_atx_tid *tid, bool *stop)
+{
+ struct ath_buf *bf;
+ struct ieee80211_tx_info *tx_info;
+ struct sk_buff_head *tid_q;
+ struct list_head bf_q;
+ int aggr_len = 0;
+ bool aggr, last = true;
+
+ if (!ath_tid_has_buffered(tid))
+ return false;
+
+ INIT_LIST_HEAD(&bf_q);
+
+ bf = ath_tx_get_tid_subframe(sc, txq, tid, &tid_q);
+ if (!bf)
+ return false;
+
+ tx_info = IEEE80211_SKB_CB(bf->bf_mpdu);
+ aggr = !!(tx_info->flags & IEEE80211_TX_CTL_AMPDU);
+ if ((aggr && txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) ||
+ (!aggr && txq->axq_depth >= ATH_NON_AGGR_MIN_QDEPTH)) {
+ *stop = true;
+ return false;
+ }
+
+ ath_set_rates(tid->an->vif, tid->an->sta, bf);
+ if (aggr)
+ last = ath_tx_form_aggr(sc, txq, tid, &bf_q, bf,
+ tid_q, &aggr_len);
+ else
+ ath_tx_form_burst(sc, txq, tid, &bf_q, bf, tid_q);
+
+ if (list_empty(&bf_q))
+ return false;
- ath_tx_fill_desc(sc, bf, txq, aggr_len);
- ath_tx_txqaddbuf(sc, txq, &bf_q, false);
- } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
- status != ATH_AGGR_BAW_CLOSED);
+ if (tid->ac->clear_ps_filter || tid->an->no_ps_filter) {
+ tid->ac->clear_ps_filter = false;
+ tx_info->flags |= IEEE80211_TX_CTL_CLEAR_PS_FILT;
+ }
+
+ ath_tx_fill_desc(sc, bf, txq, aggr_len);
+ ath_tx_txqaddbuf(sc, txq, &bf_q, false);
+ return true;
}
int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1258,6 +1406,9 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
an->mpdudensity = density;
}
+ /* force sequence number allocation for pending frames */
+ ath_tx_tid_change_state(sc, txtid);
+
txtid->active = true;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
@@ -1277,8 +1428,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
ath_txq_lock(sc, txq);
txtid->active = false;
- txtid->paused = true;
+ txtid->paused = false;
ath_tx_flush_tid(sc, txtid);
+ ath_tx_tid_change_state(sc, txtid);
ath_txq_unlock_complete(sc, txq);
}
@@ -1302,7 +1454,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ath_txq_lock(sc, txq);
- buffered = !skb_queue_empty(&tid->buf_q);
+ buffered = ath_tid_has_buffered(tid);
tid->sched = false;
list_del(&tid->list);
@@ -1334,7 +1486,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
- if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
+ if (!tid->paused && ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1359,7 +1511,7 @@ void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta,
tid->baw_size = IEEE80211_MIN_AMPDU_BUF << sta->ht_cap.ampdu_factor;
tid->paused = false;
- if (!skb_queue_empty(&tid->buf_q)) {
+ if (ath_tid_has_buffered(tid)) {
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
}
@@ -1379,6 +1531,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info;
struct list_head bf_q;
struct ath_buf *bf_tail = NULL, *bf;
+ struct sk_buff_head *tid_q;
int sent = 0;
int i;
@@ -1394,15 +1547,15 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
continue;
ath_txq_lock(sc, tid->ac->txq);
- while (!skb_queue_empty(&tid->buf_q) && nframes > 0) {
- bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid);
+ while (nframes > 0) {
+ bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q);
if (!bf)
break;
- __skb_unlink(bf->bf_mpdu, &tid->buf_q);
+ __skb_unlink(bf->bf_mpdu, tid_q);
list_add_tail(&bf->list, &bf_q);
ath_set_rates(tid->an->vif, tid->an->sta, bf);
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
+ ath_tx_addto_baw(sc, tid, bf);
bf->bf_state.bf_type &= ~BUF_AGGR;
if (bf_tail)
bf_tail->bf_next = bf;
@@ -1412,7 +1565,7 @@ void ath9k_release_buffered_frames(struct ieee80211_hw *hw,
sent++;
TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- if (skb_queue_empty(&tid->buf_q))
+ if (an->sta && !ath_tid_has_buffered(tid))
ieee80211_sta_set_buffered(an->sta, i, false);
}
ath_txq_unlock_complete(sc, tid->ac->txq);
@@ -1571,7 +1724,7 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
while (!list_empty(list)) {
bf = list_first_entry(list, struct ath_buf, list);
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
@@ -1665,25 +1818,27 @@ void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq)
*/
void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
{
- struct ath_atx_ac *ac, *ac_tmp, *last_ac;
+ struct ath_atx_ac *ac, *last_ac;
struct ath_atx_tid *tid, *last_tid;
+ bool sent = false;
if (test_bit(SC_OP_HW_RESET, &sc->sc_flags) ||
- list_empty(&txq->axq_acq) ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ list_empty(&txq->axq_acq))
return;
rcu_read_lock();
- ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
+ while (!list_empty(&txq->axq_acq)) {
+ bool stop = false;
- list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq, list) {
+ ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
last_tid = list_entry(ac->tid_q.prev, struct ath_atx_tid, list);
list_del(&ac->list);
ac->sched = false;
while (!list_empty(&ac->tid_q)) {
+
tid = list_first_entry(&ac->tid_q, struct ath_atx_tid,
list);
list_del(&tid->list);
@@ -1692,17 +1847,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
if (tid->paused)
continue;
- ath_tx_sched_aggr(sc, txq, tid);
+ if (ath_tx_sched_aggr(sc, txq, tid, &stop))
+ sent = true;
/*
* add tid to round-robin queue if more frames
* are pending for the tid
*/
- if (!skb_queue_empty(&tid->buf_q))
+ if (ath_tid_has_buffered(tid))
ath_tx_queue_tid(txq, tid);
- if (tid == last_tid ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ if (stop || tid == last_tid)
break;
}
@@ -1711,9 +1866,17 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
list_add_tail(&ac->list, &txq->axq_acq);
}
- if (ac == last_ac ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
+ if (stop)
break;
+
+ if (ac == last_ac) {
+ if (!sent)
+ break;
+
+ sent = false;
+ last_ac = list_entry(txq->axq_acq.prev,
+ struct ath_atx_ac, list);
+ }
}
rcu_read_unlock();
@@ -1792,57 +1955,6 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
}
}
-static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_txq *txq,
- struct ath_atx_tid *tid, struct sk_buff *skb,
- struct ath_tx_control *txctl)
-{
- struct ath_frame_info *fi = get_frame_info(skb);
- struct list_head bf_head;
- struct ath_buf *bf;
-
- /*
- * Do not queue to h/w when any of the following conditions is true:
- * - there are pending frames in software queue
- * - the TID is currently paused for ADDBA/BAR request
- * - seqno is not within block-ack window
- * - h/w queue depth exceeds low water mark
- */
- if ((!skb_queue_empty(&tid->buf_q) || tid->paused ||
- !BAW_WITHIN(tid->seq_start, tid->baw_size, tid->seq_next) ||
- txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) &&
- txq != sc->tx.uapsdq) {
- /*
- * Add this frame to software queue for scheduling later
- * for aggregation.
- */
- TX_STAT_INC(txq->axq_qnum, a_queued_sw);
- __skb_queue_tail(&tid->buf_q, skb);
- if (!txctl->an || !txctl->an->sleeping)
- ath_tx_queue_tid(txq, tid);
- return;
- }
-
- bf = ath_tx_setup_buffer(sc, txq, tid, skb);
- if (!bf) {
- ieee80211_free_txskb(sc->hw, skb);
- return;
- }
-
- ath_set_rates(tid->an->vif, tid->an->sta, bf);
- bf->bf_state.bf_type = BUF_AMPDU;
- INIT_LIST_HEAD(&bf_head);
- list_add(&bf->list, &bf_head);
-
- /* Add sub-frame to BAW */
- ath_tx_addto_baw(sc, tid, bf->bf_state.seqno);
-
- /* Queue to h/w without aggregation */
- TX_STAT_INC(txq->axq_qnum, a_queued_hw);
- bf->bf_lastbf = bf;
- ath_tx_fill_desc(sc, bf, txq, fi->framelen);
- ath_tx_txqaddbuf(sc, txq, &bf_head, false);
-}
-
static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
struct ath_atx_tid *tid, struct sk_buff *skb)
{
@@ -1985,6 +2097,7 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_sta *sta = txctl->sta;
struct ieee80211_vif *vif = info->control.vif;
+ struct ath_vif *avp;
struct ath_softc *sc = hw->priv;
int frmlen = skb->len + FCS_LEN;
int padpos, padsize;
@@ -1992,6 +2105,10 @@ static int ath_tx_prepare(struct ieee80211_hw *hw, struct sk_buff *skb,
/* NOTE: sta can be NULL according to net/mac80211.h */
if (sta)
txctl->an = (struct ath_node *)sta->drv_priv;
+ else if (vif && ieee80211_is_data(hdr->frame_control)) {
+ avp = (void *)vif->drv_priv;
+ txctl->an = &avp->mcast_node;
+ }
if (info->control.hw_key)
frmlen += info->control.hw_key->icv_len;
@@ -2041,7 +2158,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath_txq *txq = txctl->txq;
struct ath_atx_tid *tid = NULL;
struct ath_buf *bf;
- u8 tidno;
int q;
int ret;
@@ -2069,27 +2185,31 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
- }
-
- if (txctl->an && ieee80211_is_data_qos(hdr->frame_control)) {
- tidno = ieee80211_get_qos_ctl(hdr)[0] &
- IEEE80211_QOS_CTL_TID_MASK;
- tid = ATH_AN_2_TID(txctl->an, tidno);
+ } else if (txctl->an &&
+ ieee80211_is_data_present(hdr->frame_control)) {
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
WARN_ON(tid->ac->txq != txctl->txq);
- }
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
+ if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
+ tid->ac->clear_ps_filter = true;
+
/*
- * Try aggregation if it's a unicast data frame
- * and the destination is HT capable.
+ * Add this frame to software queue for scheduling later
+ * for aggregation.
*/
- ath_tx_send_ampdu(sc, txq, tid, skb, txctl);
+ TX_STAT_INC(txq->axq_qnum, a_queued_sw);
+ __skb_queue_tail(&tid->buf_q, skb);
+ if (!txctl->an->sleeping)
+ ath_tx_queue_tid(txq, tid);
+
+ ath_txq_schedule(sc, txq);
goto out;
}
bf = ath_tx_setup_buffer(sc, txq, tid, skb);
if (!bf) {
+ ath_txq_skb_done(sc, txq, skb);
if (txctl->paprd)
dev_kfree_skb_any(skb);
else
@@ -2142,7 +2262,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
bf->bf_lastbf = bf;
ath_set_rates(vif, NULL, bf);
- ath_buf_set_rate(sc, bf, &info, fi->framelen);
+ ath_buf_set_rate(sc, bf, &info, fi->framelen, false);
duration += info.rates[0].PktDuration;
if (bf_tail)
bf_tail->bf_next = bf;
@@ -2189,7 +2309,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
- int q, padpos, padsize;
+ int padpos, padsize;
unsigned long flags;
ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
@@ -2225,21 +2345,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
__skb_queue_tail(&txq->complete_q, skb);
-
- q = skb_get_queue_mapping(skb);
- if (txq == sc->tx.uapsdq)
- txq = sc->tx.txq_map[q];
-
- if (txq == sc->tx.txq_map[q]) {
- if (WARN_ON(--txq->pending_frames < 0))
- txq->pending_frames = 0;
-
- if (txq->stopped &&
- txq->pending_frames < sc->tx.txq_max_pending[q]) {
- ieee80211_wake_queue(sc->hw, q);
- txq->stopped = false;
- }
- }
+ ath_txq_skb_done(sc, txq, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
@@ -2360,8 +2466,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
if (list_empty(&txq->axq_q)) {
txq->axq_link = NULL;
- if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
- ath_txq_schedule(sc, txq);
+ ath_txq_schedule(sc, txq);
break;
}
bf = list_first_entry(&txq->axq_q, struct ath_buf, list);
@@ -2375,7 +2480,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* it with the STALE flag.
*/
bf_held = NULL;
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
bf_held = bf;
if (list_is_last(&bf_held->list, &txq->axq_q))
break;
@@ -2399,7 +2504,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
* however leave the last descriptor back as the holding
* descriptor for hw.
*/
- lastbf->bf_stale = true;
+ lastbf->bf_state.stale = true;
INIT_LIST_HEAD(&bf_head);
if (!list_is_singular(&lastbf->list))
list_cut_position(&bf_head,
@@ -2454,6 +2559,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (ts.qid == sc->beacon.beaconq) {
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
+
+ ath9k_csa_is_finished(sc);
continue;
}
@@ -2470,7 +2577,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
bf = list_first_entry(fifo_list, struct ath_buf, list);
- if (bf->bf_stale) {
+ if (bf->bf_state.stale) {
list_del(&bf->list);
ath_tx_return_buffer(sc, bf);
bf = list_first_entry(fifo_list, struct ath_buf, list);
@@ -2492,7 +2599,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
ath_tx_txqaddbuf(sc, txq, &bf_q, true);
}
} else {
- lastbf->bf_stale = true;
+ lastbf->bf_state.stale = true;
if (bf != lastbf)
list_cut_position(&bf_head, fifo_list,
lastbf->list.prev);
@@ -2583,6 +2690,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
tid->paused = false;
tid->active = false;
__skb_queue_head_init(&tid->buf_q);
+ __skb_queue_head_init(&tid->retry_q);
acno = TID_TO_WME_AC(tidno);
tid->ac = &an->ac[acno];
}
@@ -2590,6 +2698,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
for (acno = 0, ac = &an->ac[acno];
acno < IEEE80211_NUM_ACS; acno++, ac++) {
ac->sched = false;
+ ac->clear_ps_filter = true;
ac->txq = sc->tx.txq_map[acno];
INIT_LIST_HEAD(&ac->tid_q);
}
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 4a33c6e39ca..349fa22a921 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -1860,7 +1860,8 @@ void *carl9170_alloc(size_t priv_size)
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
IEEE80211_HW_SUPPORTS_RC_TABLE |
- IEEE80211_HW_SIGNAL_DBM;
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
if (!modparam_noht) {
/*
diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c
index 4684dd98949..e935f61c7fa 100644
--- a/drivers/net/wireless/ath/carl9170/rx.c
+++ b/drivers/net/wireless/ath/carl9170/rx.c
@@ -602,8 +602,8 @@ static void carl9170_ba_check(struct ar9170 *ar, void *data, unsigned int len)
if (bar->start_seq_num == entry_bar->start_seq_num &&
TID_CHECK(bar->control, entry_bar->control) &&
- compare_ether_addr(bar->ra, entry_bar->ta) == 0 &&
- compare_ether_addr(bar->ta, entry_bar->ra) == 0) {
+ ether_addr_equal(bar->ra, entry_bar->ta) &&
+ ether_addr_equal(bar->ta, entry_bar->ra)) {
struct ieee80211_tx_info *tx_info;
tx_info = IEEE80211_SKB_CB(entry_skb);
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index f891d514d88..990dd42ae79 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -11,9 +11,6 @@ wil6210-y += txrx.o
wil6210-y += debug.o
wil6210-$(CONFIG_WIL6210_TRACING) += trace.o
-ifeq (, $(findstring -W,$(EXTRA_CFLAGS)))
- subdir-ccflags-y += -Werror
-endif
# for tracing framework to find trace.h
CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index e8308ec3097..1caa31992a7 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -51,7 +51,7 @@ static void wil_print_vring(struct seq_file *s, struct wil6210_priv *wil,
if ((i % 64) == 0 && (i != 0))
seq_printf(s, "\n");
seq_printf(s, "%s", (d->dma.status & BIT(0)) ?
- "S" : (vring->ctx[i] ? "H" : "h"));
+ "S" : (vring->ctx[i].skb ? "H" : "h"));
}
seq_printf(s, "\n");
}
@@ -145,7 +145,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
le16_to_cpu(hdr.type), hdr.flags);
if (len <= MAX_MBOXITEM_SIZE) {
int n = 0;
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
unsigned char databuf[MAX_MBOXITEM_SIZE];
void __iomem *src = wmi_buffer(wil, d.addr) +
sizeof(struct wil6210_mbox_hdr);
@@ -406,7 +406,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
volatile struct vring_tx_desc *d =
&(vring->va[dbg_txdesc_index].tx);
volatile u32 *u = (volatile u32 *)d;
- struct sk_buff *skb = vring->ctx[dbg_txdesc_index];
+ struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;
seq_printf(s, "Tx[%3d] = {\n", dbg_txdesc_index);
seq_printf(s, " MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
@@ -416,7 +416,7 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
seq_printf(s, " SKB = %p\n", skb);
if (skb) {
- unsigned char printbuf[16 * 3 + 2];
+ char printbuf[16 * 3 + 2];
int i = 0;
int len = le16_to_cpu(d->dma.length);
void *p = skb->data;
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 29dd1e58cb1..717178f09aa 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -127,6 +127,8 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
ndev->netdev_ops = &wil_netdev_ops;
ndev->ieee80211_ptr = wdev;
+ ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy));
wdev->netdev = ndev;
diff --git a/drivers/net/wireless/ath/wil6210/trace.h b/drivers/net/wireless/ath/wil6210/trace.h
index eff1239be53..e59239d22b9 100644
--- a/drivers/net/wireless/ath/wil6210/trace.h
+++ b/drivers/net/wireless/ath/wil6210/trace.h
@@ -37,36 +37,40 @@ static inline void trace_ ## name(proto) {}
#endif /* !CONFIG_WIL6210_TRACING || defined(__CHECKER__) */
DECLARE_EVENT_CLASS(wil6210_wmi,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len),
+ TP_ARGS(wmi, buf, buf_len),
TP_STRUCT__entry(
+ __field(u8, mid)
__field(u16, id)
+ __field(u32, timestamp)
__field(u16, buf_len)
__dynamic_array(u8, buf, buf_len)
),
TP_fast_assign(
- __entry->id = id;
+ __entry->mid = wmi->mid;
+ __entry->id = le16_to_cpu(wmi->id);
+ __entry->timestamp = le32_to_cpu(wmi->timestamp);
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "id 0x%04x len %d",
- __entry->id, __entry->buf_len
+ "MID %d id 0x%04x len %d timestamp %d",
+ __entry->mid, __entry->id, __entry->buf_len, __entry->timestamp
)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_cmd,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len)
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
);
DEFINE_EVENT(wil6210_wmi, wil6210_wmi_event,
- TP_PROTO(u16 id, void *buf, u16 buf_len),
- TP_ARGS(id, buf, buf_len)
+ TP_PROTO(struct wil6210_mbox_hdr_wmi *wmi, void *buf, u16 buf_len),
+ TP_ARGS(wmi, buf, buf_len)
);
#define WIL6210_MSG_MAX (200)
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index d240b24e1cc..d505b2676a7 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -18,6 +18,9 @@
#include <net/ieee80211_radiotap.h>
#include <linux/if_arp.h>
#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
#include "wil6210.h"
#include "wmi.h"
@@ -70,7 +73,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
vring->swhead = 0;
vring->swtail = 0;
- vring->ctx = kzalloc(vring->size * sizeof(vring->ctx[0]), GFP_KERNEL);
+ vring->ctx = kcalloc(vring->size, sizeof(vring->ctx[0]), GFP_KERNEL);
if (!vring->ctx) {
vring->va = NULL;
return -ENOMEM;
@@ -108,39 +111,39 @@ static void wil_vring_free(struct wil6210_priv *wil, struct vring *vring,
while (!wil_vring_is_empty(vring)) {
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx;
if (tx) {
struct vring_tx_desc dd, *d = &dd;
volatile struct vring_tx_desc *_d =
&vring->va[vring->swtail].tx;
+ ctx = &vring->ctx[vring->swtail];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swtail];
- if (skb) {
- dma_unmap_single(dev, pa, dmalen,
- DMA_TO_DEVICE);
- dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
+ if (vring->ctx[vring->swtail].mapped_as_page) {
dma_unmap_page(dev, pa, dmalen,
DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(dev, pa, dmalen,
+ DMA_TO_DEVICE);
}
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
vring->swtail = wil_vring_next_tail(vring);
} else { /* rx */
struct vring_rx_desc dd, *d = &dd;
volatile struct vring_rx_desc *_d =
- &vring->va[vring->swtail].rx;
+ &vring->va[vring->swhead].rx;
+ ctx = &vring->ctx[vring->swhead];
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- skb = vring->ctx[vring->swhead];
dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE);
- kfree_skb(skb);
+ kfree_skb(ctx->skb);
wil_vring_advance_head(vring, 1);
}
}
@@ -187,7 +190,7 @@ static int wil_vring_alloc_skb(struct wil6210_priv *wil, struct vring *vring,
d->dma.status = 0; /* BIT(0) should be 0 for HW_OWNED */
d->dma.length = cpu_to_le16(sz);
*_d = *d;
- vring->ctx[i] = skb;
+ vring->ctx[i].skb = skb;
return 0;
}
@@ -352,11 +355,11 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL;
}
- skb = vring->ctx[vring->swhead];
+ skb = vring->ctx[vring->swhead].skb;
d = wil_skb_rxdesc(skb);
*d = *_d;
pa = wil_desc_addr(&d->dma.addr);
- vring->ctx[vring->swhead] = NULL;
+ vring->ctx[vring->swhead].skb = NULL;
wil_vring_advance_head(vring, 1);
dma_unmap_single(dev, pa, sz, DMA_FROM_DEVICE);
@@ -407,6 +410,21 @@ static struct sk_buff *wil_vring_reap_rx(struct wil6210_priv *wil,
return NULL;
}
+ /* L4 IDENT is on when HW calculated checksum, check status
+ * and in case of error drop the packet
+ * higher stack layers will handle retransmission (if required)
+ */
+ if (d->dma.status & RX_DMA_STATUS_L4_IDENT) {
+ /* L4 protocol identified, csum calculated */
+ if ((d->dma.error & RX_DMA_ERROR_L4_ERR) == 0)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* If HW reports bad checksum, let IP stack re-check it
+ * For example, HW don't understand Microsoft IP stack that
+ * mis-calculates TCP checksum - if it should be 0x0,
+ * it writes 0xffff in violation of RFC 1624
+ */
+ }
+
ds_bits = wil_rxdesc_ds_bits(d);
if (ds_bits == 1) {
/*
@@ -646,6 +664,53 @@ static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
return 0;
}
+static int wil_tx_desc_offload_cksum_set(struct wil6210_priv *wil,
+ struct vring_tx_desc *d,
+ struct sk_buff *skb)
+{
+ int protocol;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ switch (skb->protocol) {
+ case cpu_to_be16(ETH_P_IP):
+ protocol = ip_hdr(skb)->protocol;
+ break;
+ case cpu_to_be16(ETH_P_IPV6):
+ protocol = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ d->dma.d0 |= (2 << DMA_CFG_DESC_TX_0_L4_TYPE_POS);
+ /* L4 header len: TCP header length */
+ d->dma.d0 |=
+ (tcp_hdrlen(skb) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ case IPPROTO_UDP:
+ /* L4 header len: UDP header length */
+ d->dma.d0 |=
+ (sizeof(struct udphdr) & DMA_CFG_DESC_TX_0_L4_LENGTH_MSK);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ d->dma.ip_length = skb_network_header_len(skb);
+ d->dma.b11 = ETH_HLEN; /* MAC header length */
+ d->dma.b11 |= BIT(DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS);
+ /* Enable TCP/UDP checksum */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_TCP_UDP_CHECKSUM_EN_POS);
+ /* Calculate pseudo-header */
+ d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_PSEUDO_HEADER_CALC_EN_POS);
+
+ return 0;
+}
+
static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
struct sk_buff *skb)
{
@@ -655,7 +720,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
u32 swhead = vring->swhead;
int avail = wil_vring_avail_tx(vring);
int nr_frags = skb_shinfo(skb)->nr_frags;
- uint f;
+ uint f = 0;
int vring_index = vring - wil->vring_tx;
uint i = swhead;
dma_addr_t pa;
@@ -686,13 +751,20 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
return -EINVAL;
/* 1-st segment */
wil_tx_desc_map(d, pa, skb_headlen(skb), vring_index);
+ /* Process TCP/UDP checksum offloading */
+ if (wil_tx_desc_offload_cksum_set(wil, d, skb)) {
+ wil_err(wil, "VRING #%d Failed to set cksum, drop packet\n",
+ vring_index);
+ goto dma_error;
+ }
+
d->mac.d[2] |= ((nr_frags + 1) <<
MAC_CFG_DESC_TX_2_NUM_OF_DESCRIPTORS_POS);
if (nr_frags)
*_d = *d;
/* middle segments */
- for (f = 0; f < nr_frags; f++) {
+ for (; f < nr_frags; f++) {
const struct skb_frag_struct *frag =
&skb_shinfo(skb)->frags[f];
int len = skb_frag_size(frag);
@@ -703,7 +775,7 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
wil_tx_desc_map(d, pa, len, vring_index);
- vring->ctx[i] = NULL;
+ vring->ctx[i].mapped_as_page = 1;
*_d = *d;
}
/* for the last seg only */
@@ -712,6 +784,12 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
d->dma.d0 |= BIT(DMA_CFG_DESC_TX_0_CMD_DMA_IT_POS);
*_d = *d;
+ /* hold reference to skb
+ * to prevent skb release before accounting
+ * in case of immediate "tx done"
+ */
+ vring->ctx[i].skb = skb_get(skb);
+
wil_hex_dump_txrx("Tx ", DUMP_PREFIX_NONE, 32, 4,
(const void *)d, sizeof(*d), false);
@@ -720,29 +798,31 @@ static int wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
wil_dbg_txrx(wil, "Tx swhead %d -> %d\n", swhead, vring->swhead);
trace_wil6210_tx(vring_index, swhead, skb->len, nr_frags);
iowrite32(vring->swhead, wil->csr + HOSTADDR(vring->hwtail));
- /* hold reference to skb
- * to prevent skb release before accounting
- * in case of immediate "tx done"
- */
- vring->ctx[i] = skb_get(skb);
return 0;
dma_error:
/* unmap what we have mapped */
- /* Note: increment @f to operate with positive index */
- for (f++; f > 0; f--) {
+ nr_frags = f + 1; /* frags mapped + one for skb head */
+ for (f = 0; f < nr_frags; f++) {
u16 dmalen;
+ struct wil_ctx *ctx;
i = (swhead + f) % vring->size;
+ ctx = &vring->ctx[i];
_d = &(vring->va[i].tx);
*d = *_d;
_d->dma.status = TX_DMA_STATUS_DU;
pa = wil_desc_addr(&d->dma.addr);
dmalen = le16_to_cpu(d->dma.length);
- if (vring->ctx[i])
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
- else
+ if (ctx->mapped_as_page)
dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
+ if (ctx->skb)
+ dev_kfree_skb_any(ctx->skb);
+
+ memset(ctx, 0, sizeof(*ctx));
}
return -EINVAL;
@@ -821,8 +901,9 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
&vring->va[vring->swtail].tx;
struct vring_tx_desc dd, *d = &dd;
dma_addr_t pa;
- struct sk_buff *skb;
u16 dmalen;
+ struct wil_ctx *ctx = &vring->ctx[vring->swtail];
+ struct sk_buff *skb = ctx->skb;
*d = *_d;
@@ -840,7 +921,11 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
(const void *)d, sizeof(*d), false);
pa = wil_desc_addr(&d->dma.addr);
- skb = vring->ctx[vring->swtail];
+ if (ctx->mapped_as_page)
+ dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
+
if (skb) {
if (d->dma.error == 0) {
ndev->stats.tx_packets++;
@@ -849,16 +934,15 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
ndev->stats.tx_errors++;
}
- dma_unmap_single(dev, pa, dmalen, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
- vring->ctx[vring->swtail] = NULL;
- } else {
- dma_unmap_page(dev, pa, dmalen, DMA_TO_DEVICE);
}
- d->dma.addr.addr_low = 0;
- d->dma.addr.addr_high = 0;
- d->dma.length = 0;
- d->dma.status = TX_DMA_STATUS_DU;
+ memset(ctx, 0, sizeof(*ctx));
+ /*
+ * There is no need to touch HW descriptor:
+ * - ststus bit TX_DMA_STATUS_DU is set by design,
+ * so hardware will not try to process this desc.,
+ * - rest of descriptor will be initialized on Tx.
+ */
vring->swtail = wil_vring_next_tail(vring);
done++;
}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.h b/drivers/net/wireless/ath/wil6210/txrx.h
index 859aea68a1f..b3828279204 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.h
+++ b/drivers/net/wireless/ath/wil6210/txrx.h
@@ -235,7 +235,16 @@ struct vring_tx_mac {
#define DMA_CFG_DESC_TX_0_L4_TYPE_POS 30
#define DMA_CFG_DESC_TX_0_L4_TYPE_LEN 2
-#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000
+#define DMA_CFG_DESC_TX_0_L4_TYPE_MSK 0xC0000000 /* L4 type: 0-UDP, 2-TCP */
+
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_POS 0
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_LEN 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_MAC_LEN_MSK 0x7F /* MAC hdr len */
+
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_POS 7
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_LEN 1
+#define DMA_CFG_DESC_TX_OFFLOAD_CFG_L3T_IPV4_MSK 0x80 /* 1-IPv4, 0-IPv6 */
#define TX_DMA_STATUS_DU BIT(0)
@@ -334,8 +343,17 @@ struct vring_rx_mac {
#define RX_DMA_D0_CMD_DMA_IT BIT(10)
+/* Error field, offload bits */
+#define RX_DMA_ERROR_L3_ERR BIT(4)
+#define RX_DMA_ERROR_L4_ERR BIT(5)
+
+
+/* Status field */
#define RX_DMA_STATUS_DU BIT(0)
#define RX_DMA_STATUS_ERROR BIT(2)
+
+#define RX_DMA_STATUS_L3_IDENT BIT(4)
+#define RX_DMA_STATUS_L4_IDENT BIT(5)
#define RX_DMA_STATUS_PHY_INFO BIT(6)
struct vring_rx_dma {
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 44fdab51de7..c4a51638736 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -156,11 +156,22 @@ struct wil6210_mbox_hdr {
/* max. value for wil6210_mbox_hdr.len */
#define MAX_MBOXITEM_SIZE (240)
+/**
+ * struct wil6210_mbox_hdr_wmi - WMI header
+ *
+ * @mid: MAC ID
+ * 00 - default, created by FW
+ * 01..0f - WiFi ports, driver to create
+ * 10..fe - debug
+ * ff - broadcast
+ * @id: command/event ID
+ * @timestamp: FW fills for events, free-running msec timer
+ */
struct wil6210_mbox_hdr_wmi {
- u8 reserved0[2];
+ u8 mid;
+ u8 reserved;
__le16 id;
- __le16 info1; /* bits [0..3] - device_id, rest - unused */
- u8 reserved1[2];
+ __le32 timestamp;
} __packed;
struct pending_wmi_event {
@@ -172,6 +183,14 @@ struct pending_wmi_event {
} __packed event;
};
+/**
+ * struct wil_ctx - software context for Vring descriptor
+ */
+struct wil_ctx {
+ struct sk_buff *skb;
+ u8 mapped_as_page:1;
+};
+
union vring_desc;
struct vring {
@@ -181,7 +200,7 @@ struct vring {
u32 swtail;
u32 swhead;
u32 hwtail; /* write here to inform hw */
- void **ctx; /* void *ctx[size] - software context */
+ struct wil_ctx *ctx; /* ctx[size] - software context */
};
enum { /* for wil6210_priv.status */
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index dc8059ad4ba..063963ee422 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -172,8 +172,8 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
.len = cpu_to_le16(sizeof(cmd.wmi) + len),
},
.wmi = {
+ .mid = 0,
.id = cpu_to_le16(cmdid),
- .info1 = 0,
},
};
struct wil6210_mbox_ring *r = &wil->mbox_ctl.tx;
@@ -248,7 +248,7 @@ static int __wmi_send(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len)
iowrite32(r->head = next_head, wil->csr + HOST_MBOX +
offsetof(struct wil6210_mbox_ctl, tx.head));
- trace_wil6210_wmi_cmd(cmdid, buf, len);
+ trace_wil6210_wmi_cmd(&cmd.wmi, buf, len);
/* interrupt to FW */
iowrite32(SW_INT_MBOX, wil->csr + HOST_SW_INT);
@@ -339,7 +339,7 @@ static void wmi_evt_rx_mgmt(struct wil6210_priv *wil, int id, void *d, int len)
}
} else {
cfg80211_rx_mgmt(wil->wdev, freq, signal,
- (void *)rx_mgmt_frame, d_len, GFP_KERNEL);
+ (void *)rx_mgmt_frame, d_len, 0, GFP_KERNEL);
}
}
@@ -640,9 +640,13 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
hdr.flags);
if ((hdr.type == WIL_MBOX_HDR_TYPE_WMI) &&
(len >= sizeof(struct wil6210_mbox_hdr_wmi))) {
- u16 id = le16_to_cpu(evt->event.wmi.id);
- wil_dbg_wmi(wil, "WMI event 0x%04x\n", id);
- trace_wil6210_wmi_event(id, &evt->event.wmi, len);
+ struct wil6210_mbox_hdr_wmi *wmi = &evt->event.wmi;
+ u16 id = le16_to_cpu(wmi->id);
+ u32 tstamp = le32_to_cpu(wmi->timestamp);
+ wil_dbg_wmi(wil, "WMI event 0x%04x MID %d @%d msec\n",
+ id, wmi->mid, tstamp);
+ trace_wil6210_wmi_event(wmi, &wmi[1],
+ len - sizeof(*wmi));
}
wil_hex_dump_wmi("evt ", DUMP_PREFIX_OFFSET, 16, 1,
&evt->event.hdr, sizeof(hdr) + len, true);
@@ -920,6 +924,12 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct vring *vring)
cmd.sniffer_cfg.phy_support =
cpu_to_le32((wil->monitor_flags & MONITOR_FLAG_CONTROL)
? WMI_SNIFFER_CP : WMI_SNIFFER_DP);
+ } else {
+ /* Initialize offload (in non-sniffer mode).
+ * Linux IP stack always calculates IP checksum
+ * HW always calculate TCP/UDP checksum
+ */
+ cmd.l3_l4_ctrl |= (1 << L3_L4_CTRL_TCPIP_CHECKSUM_EN_POS);
}
/* typical time for secure PCP is 840ms */
rc = wmi_call(wil, WMI_CFG_RX_CHAIN_CMDID, &cmd, sizeof(cmd),
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index f7c70b3a6ea..c51d2dc489e 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -431,9 +431,9 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
- ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- ring_mem_size, &(ring->dmabase),
- GFP_KERNEL | __GFP_ZERO);
+ ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
+ ring_mem_size, &(ring->dmabase),
+ GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 0e933bb7154..ccd24f0acb8 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4645,6 +4645,19 @@ static void b43_wireless_core_exit(struct b43_wldev *dev)
b43_maskset32(dev, B43_MMIO_MACCTL, ~B43_MACCTL_PSM_RUN,
B43_MACCTL_PSM_JMP0);
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_core_pci_down(dev->dev->bdev->bus);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* TODO */
+ break;
+#endif
+ }
+
b43_dma_free(dev);
b43_pio_free(dev);
b43_chip_exit(dev);
@@ -4684,6 +4697,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
case B43_BUS_BCMA:
bcma_core_pci_irq_ctl(&dev->dev->bdev->bus->drv_pci[0],
dev->dev->bdev, true);
+ bcma_core_pci_up(dev->dev->bdev->bus);
break;
#endif
#ifdef CONFIG_B43_SSB
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index faeafe219c5..42eb26c99e1 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -331,10 +331,9 @@ void free_descriptor_buffer(struct b43legacy_dmaring *ring,
static int alloc_ringmemory(struct b43legacy_dmaring *ring)
{
/* GFP flags must match the flags in free_ringmemory()! */
- ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
- B43legacy_DMA_RINGMEMSIZE,
- &(ring->dmabase),
- GFP_KERNEL | __GFP_ZERO);
+ ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
+ B43legacy_DMA_RINGMEMSIZE,
+ &(ring->dmabase), GFP_KERNEL);
if (!ring->descbase)
return -ENOMEM;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e3f3c48f86d..e13b1a65c65 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -592,6 +592,7 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes)
{
struct sk_buff *mypkt;
+ struct sk_buff_head pktq;
int err;
mypkt = brcmu_pkt_buf_get_skb(nbytes);
@@ -602,7 +603,10 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
}
memcpy(mypkt->data, buf, nbytes);
- err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+ __skb_queue_head_init(&pktq);
+ __skb_queue_tail(&pktq, mypkt);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, &pktq);
+ __skb_dequeue_tail(&pktq);
brcmu_pkt_buf_free_skb(mypkt);
return err;
@@ -611,22 +615,18 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt)
+ uint flags, struct sk_buff_head *pktq)
{
uint width;
int err = 0;
- struct sk_buff_head pkt_list;
brcmf_dbg(SDIO, "fun = %d, addr = 0x%x, size = %d\n",
- fn, addr, pkt->len);
+ fn, addr, pktq->qlen);
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
brcmf_sdio_addrprep(sdiodev, width, &addr);
- skb_queue_head_init(&pkt_list);
- skb_queue_tail(&pkt_list, pkt);
- err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, &pkt_list);
- skb_dequeue_tail(&pkt_list);
+ err = brcmf_sdio_buffrw(sdiodev, fn, true, addr, pktq);
return err;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 289e386f01f..64f4a2bc8dd 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -350,7 +350,6 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
sdiodev->bus_if = bus_if;
bus_if->bus_priv.sdio = sdiodev;
- bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->dev, bus_if);
dev_set_drvdata(&sdiodev->func[1]->dev, bus_if);
sdiodev->dev = &sdiodev->func[1]->dev;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 86cbfe2c7c6..2eb9e642c9b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -194,6 +194,8 @@
#define BRCMF_E_IF_DEL 2
#define BRCMF_E_IF_CHANGE 3
+#define BRCMF_E_IF_FLAG_NOIF 1
+
#define BRCMF_E_IF_ROLE_STA 0
#define BRCMF_E_IF_ROLE_AP 1
#define BRCMF_E_IF_ROLE_WDS 2
@@ -209,6 +211,8 @@
#define BRCMF_DCMD_MEDLEN 1536
#define BRCMF_DCMD_MAXLEN 8192
+#define BRCMF_AMPDU_RX_REORDER_MAXFLOWS 256
+
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -505,6 +509,25 @@ struct brcmf_dcmd {
uint needed; /* bytes needed (optional) */
};
+/**
+ * struct brcmf_ampdu_rx_reorder - AMPDU receive reorder info
+ *
+ * @pktslots: dynamic allocated array for ordering AMPDU packets.
+ * @flow_id: AMPDU flow identifier.
+ * @cur_idx: last AMPDU index from firmware.
+ * @exp_idx: expected next AMPDU index.
+ * @max_idx: maximum amount of packets per AMPDU.
+ * @pend_pkts: number of packets currently in @pktslots.
+ */
+struct brcmf_ampdu_rx_reorder {
+ struct sk_buff **pktslots;
+ u8 flow_id;
+ u8 cur_idx;
+ u8 exp_idx;
+ u8 max_idx;
+ u8 pend_pkts;
+};
+
/* Forward decls for struct brcmf_pub (see below) */
struct brcmf_proto; /* device communication protocol info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
@@ -536,9 +559,10 @@ struct brcmf_pub {
struct brcmf_fweh_info fweh;
- bool fw_signals;
struct brcmf_fws_info *fws;
- spinlock_t fws_spinlock;
+
+ struct brcmf_ampdu_rx_reorder
+ *reorder_flows[BRCMF_AMPDU_RX_REORDER_MAXFLOWS];
#ifdef DEBUG
struct dentry *dbgfs_dir;
#endif
@@ -604,6 +628,9 @@ struct brcmf_if {
wait_queue_head_t pend_8021x_wait;
};
+struct brcmf_skb_reorder_data {
+ u8 *reorder;
+};
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index 080395f49fa..f7c1985844e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -36,7 +36,11 @@ struct brcmf_bus_dcmd {
*
* @init: prepare for communication with dongle.
* @stop: clear pending frames, disable data flow.
- * @txdata: send a data frame to the dongle (callee disposes skb).
+ * @txdata: send a data frame to the dongle. When the data
+ * has been transferred, the common driver must be
+ * notified using brcmf_txcomplete(). The common
+ * driver calls this function with interrupts
+ * disabled.
* @txctl: transmit a control request message to dongle.
* @rxctl: receive a control response message from dongle.
* @gettxq: obtain a reference of bus transmit queue (optional).
@@ -65,7 +69,6 @@ struct brcmf_bus_ops {
* @maxctl: maximum size for rxctl request message.
* @tx_realloc: number of tx packets realloced for headroom.
* @dstats: dongle-based statistical data.
- * @align: alignment requirement for the bus.
* @dcmd_list: bus/device specific dongle initialization commands.
* @chip: device identifier of the dongle chip.
* @chiprev: revision of the dongle chip.
@@ -80,7 +83,6 @@ struct brcmf_bus {
enum brcmf_bus_state state;
uint maxctl;
unsigned long tx_realloc;
- u8 align;
u32 chip;
u32 chiprev;
struct list_head dcmd_list;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
index c37b9d68e45..0f9e9057e7d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c
@@ -50,7 +50,7 @@ int brcmf_debugfs_attach(struct brcmf_pub *drvr)
return -ENODEV;
drvr->dbgfs_dir = debugfs_create_dir(dev_name(dev), root_folder);
- return PTR_RET(drvr->dbgfs_dir);
+ return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
void brcmf_debugfs_detach(struct brcmf_pub *drvr)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 8e8975562ec..e067aec1fbf 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -38,6 +38,19 @@ MODULE_LICENSE("Dual BSD/GPL");
#define MAX_WAIT_FOR_8021X_TX 50 /* msecs */
+/* AMPDU rx reordering definitions */
+#define BRCMF_RXREORDER_FLOWID_OFFSET 0
+#define BRCMF_RXREORDER_MAXIDX_OFFSET 2
+#define BRCMF_RXREORDER_FLAGS_OFFSET 4
+#define BRCMF_RXREORDER_CURIDX_OFFSET 6
+#define BRCMF_RXREORDER_EXPIDX_OFFSET 8
+
+#define BRCMF_RXREORDER_DEL_FLOW 0x01
+#define BRCMF_RXREORDER_FLUSH_ALL 0x02
+#define BRCMF_RXREORDER_CURIDX_VALID 0x04
+#define BRCMF_RXREORDER_EXPIDX_VALID 0x08
+#define BRCMF_RXREORDER_NEW_HOLE 0x10
+
/* Error bits */
int brcmf_msg_level;
module_param_named(debug, brcmf_msg_level, int, S_IRUSR | S_IWUSR);
@@ -242,7 +255,7 @@ void brcmf_txflowblock_if(struct brcmf_if *ifp,
{
unsigned long flags;
- if (!ifp)
+ if (!ifp || !ifp->ndev)
return;
brcmf_dbg(TRACE, "enter: idx=%d stop=0x%X reason=%d state=%d\n",
@@ -265,17 +278,234 @@ void brcmf_txflowblock(struct device *dev, bool state)
{
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
- int i;
brcmf_dbg(TRACE, "Enter\n");
- if (brcmf_fws_fc_active(drvr->fws)) {
- brcmf_fws_bus_blocked(drvr, state);
+ brcmf_fws_bus_blocked(drvr, state);
+}
+
+static void brcmf_netif_rx(struct brcmf_if *ifp, struct sk_buff *skb)
+{
+ skb->dev = ifp->ndev;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+
+ if (skb->pkt_type == PACKET_MULTICAST)
+ ifp->stats.multicast++;
+
+ /* Process special event packets */
+ brcmf_fweh_process_skb(ifp->drvr, skb);
+
+ if (!(ifp->ndev->flags & IFF_UP)) {
+ brcmu_pkt_buf_free_skb(skb);
+ return;
+ }
+
+ ifp->stats.rx_bytes += skb->len;
+ ifp->stats.rx_packets++;
+
+ brcmf_dbg(DATA, "rx proto=0x%X\n", ntohs(skb->protocol));
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ /* If the receive is not processed inside an ISR,
+ * the softirqd must be woken explicitly to service
+ * the NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
+ */
+ netif_rx_ni(skb);
+}
+
+static void brcmf_rxreorder_get_skb_list(struct brcmf_ampdu_rx_reorder *rfi,
+ u8 start, u8 end,
+ struct sk_buff_head *skb_list)
+{
+ /* initialize return list */
+ __skb_queue_head_init(skb_list);
+
+ if (rfi->pend_pkts == 0) {
+ brcmf_dbg(INFO, "no packets in reorder queue\n");
+ return;
+ }
+
+ do {
+ if (rfi->pktslots[start]) {
+ __skb_queue_tail(skb_list, rfi->pktslots[start]);
+ rfi->pktslots[start] = NULL;
+ }
+ start++;
+ if (start > rfi->max_idx)
+ start = 0;
+ } while (start != end);
+ rfi->pend_pkts -= skb_queue_len(skb_list);
+}
+
+static void brcmf_rxreorder_process_info(struct brcmf_if *ifp, u8 *reorder_data,
+ struct sk_buff *pkt)
+{
+ u8 flow_id, max_idx, cur_idx, exp_idx, end_idx;
+ struct brcmf_ampdu_rx_reorder *rfi;
+ struct sk_buff_head reorder_list;
+ struct sk_buff *pnext;
+ u8 flags;
+ u32 buf_size;
+
+ flow_id = reorder_data[BRCMF_RXREORDER_FLOWID_OFFSET];
+ flags = reorder_data[BRCMF_RXREORDER_FLAGS_OFFSET];
+
+ /* validate flags and flow id */
+ if (flags == 0xFF) {
+ brcmf_err("invalid flags...so ignore this packet\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ rfi = ifp->drvr->reorder_flows[flow_id];
+ if (flags & BRCMF_RXREORDER_DEL_FLOW) {
+ brcmf_dbg(INFO, "flow-%d: delete\n",
+ flow_id);
+
+ if (rfi == NULL) {
+ brcmf_dbg(INFO, "received flags to cleanup, but no flow (%d) yet\n",
+ flow_id);
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, rfi->exp_idx,
+ &reorder_list);
+ /* add the last packet */
+ __skb_queue_tail(&reorder_list, pkt);
+ kfree(rfi);
+ ifp->drvr->reorder_flows[flow_id] = NULL;
+ goto netif_rx;
+ }
+ /* from here on we need a flow reorder instance */
+ if (rfi == NULL) {
+ buf_size = sizeof(*rfi);
+ max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+
+ buf_size += (max_idx + 1) * sizeof(pkt);
+
+ /* allocate space for flow reorder info */
+ brcmf_dbg(INFO, "flow-%d: start, maxidx %d\n",
+ flow_id, max_idx);
+ rfi = kzalloc(buf_size, GFP_ATOMIC);
+ if (rfi == NULL) {
+ brcmf_err("failed to alloc buffer\n");
+ brcmf_netif_rx(ifp, pkt);
+ return;
+ }
+
+ ifp->drvr->reorder_flows[flow_id] = rfi;
+ rfi->pktslots = (struct sk_buff **)(rfi+1);
+ rfi->max_idx = max_idx;
+ }
+ if (flags & BRCMF_RXREORDER_NEW_HOLE) {
+ if (rfi->pend_pkts) {
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx,
+ rfi->exp_idx,
+ &reorder_list);
+ WARN_ON(rfi->pend_pkts);
+ } else {
+ __skb_queue_head_init(&reorder_list);
+ }
+ rfi->cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ rfi->exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+ rfi->max_idx = reorder_data[BRCMF_RXREORDER_MAXIDX_OFFSET];
+ rfi->pktslots[rfi->cur_idx] = pkt;
+ rfi->pend_pkts++;
+ brcmf_dbg(DATA, "flow-%d: new hole %d (%d), pending %d\n",
+ flow_id, rfi->cur_idx, rfi->exp_idx, rfi->pend_pkts);
+ } else if (flags & BRCMF_RXREORDER_CURIDX_VALID) {
+ cur_idx = reorder_data[BRCMF_RXREORDER_CURIDX_OFFSET];
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ if ((exp_idx == rfi->exp_idx) && (cur_idx != rfi->exp_idx)) {
+ /* still in the current hole */
+ /* enqueue the current on the buffer chain */
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "HOLE: ERROR buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ rfi->cur_idx = cur_idx;
+ brcmf_dbg(DATA, "flow-%d: store pkt %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ /* can return now as there is no reorder
+ * list to process.
+ */
+ return;
+ }
+ if (rfi->exp_idx == cur_idx) {
+ if (rfi->pktslots[cur_idx] != NULL) {
+ brcmf_dbg(INFO, "error buffer pending..free it\n");
+ brcmu_pkt_buf_free_skb(rfi->pktslots[cur_idx]);
+ rfi->pktslots[cur_idx] = NULL;
+ }
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+
+ /* got the expected one. flush from current to expected
+ * and update expected
+ */
+ brcmf_dbg(DATA, "flow-%d: expected %d (%d), pending %d\n",
+ flow_id, cur_idx, exp_idx, rfi->pend_pkts);
+
+ rfi->cur_idx = cur_idx;
+ rfi->exp_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, cur_idx, exp_idx,
+ &reorder_list);
+ brcmf_dbg(DATA, "flow-%d: freeing buffers %d, pending %d\n",
+ flow_id, skb_queue_len(&reorder_list),
+ rfi->pend_pkts);
+ } else {
+ u8 end_idx;
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): both moved, old %d/%d, new %d/%d\n",
+ flow_id, flags, rfi->cur_idx, rfi->exp_idx,
+ cur_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ /* flush pkts first */
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+
+ if (exp_idx == ((cur_idx + 1) % (rfi->max_idx + 1))) {
+ __skb_queue_tail(&reorder_list, pkt);
+ } else {
+ rfi->pktslots[cur_idx] = pkt;
+ rfi->pend_pkts++;
+ }
+ rfi->exp_idx = exp_idx;
+ rfi->cur_idx = cur_idx;
+ }
} else {
- for (i = 0; i < BRCMF_MAX_IFS; i++)
- brcmf_txflowblock_if(drvr->iflist[i],
- BRCMF_NETIF_STOP_REASON_BLOCK_BUS,
- state);
+ /* explicity window move updating the expected index */
+ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
+
+ brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
+ flow_id, flags, rfi->exp_idx, exp_idx);
+ if (flags & BRCMF_RXREORDER_FLUSH_ALL)
+ end_idx = rfi->exp_idx;
+ else
+ end_idx = exp_idx;
+
+ brcmf_rxreorder_get_skb_list(rfi, rfi->exp_idx, end_idx,
+ &reorder_list);
+ __skb_queue_tail(&reorder_list, pkt);
+ /* set the new expected idx */
+ rfi->exp_idx = exp_idx;
+ }
+netif_rx:
+ skb_queue_walk_safe(&reorder_list, pkt, pnext) {
+ __skb_unlink(pkt, &reorder_list);
+ brcmf_netif_rx(ifp, pkt);
}
}
@@ -285,16 +515,18 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
struct brcmf_if *ifp;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_pub *drvr = bus_if->drvr;
+ struct brcmf_skb_reorder_data *rd;
u8 ifidx;
int ret;
- brcmf_dbg(DATA, "Enter\n");
+ brcmf_dbg(DATA, "Enter: %s: count=%u\n", dev_name(dev),
+ skb_queue_len(skb_list));
skb_queue_walk_safe(skb_list, skb, pnext) {
skb_unlink(skb, skb_list);
/* process and remove protocol-specific header */
- ret = brcmf_proto_hdrpull(drvr, drvr->fw_signals, &ifidx, skb);
+ ret = brcmf_proto_hdrpull(drvr, true, &ifidx, skb);
ifp = drvr->iflist[ifidx];
if (ret || !ifp || !ifp->ndev) {
@@ -304,31 +536,11 @@ void brcmf_rx_frames(struct device *dev, struct sk_buff_head *skb_list)
continue;
}
- skb->dev = ifp->ndev;
- skb->protocol = eth_type_trans(skb, skb->dev);
-
- if (skb->pkt_type == PACKET_MULTICAST)
- ifp->stats.multicast++;
-
- /* Process special event packets */
- brcmf_fweh_process_skb(drvr, skb);
-
- if (!(ifp->ndev->flags & IFF_UP)) {
- brcmu_pkt_buf_free_skb(skb);
- continue;
- }
-
- ifp->stats.rx_bytes += skb->len;
- ifp->stats.rx_packets++;
-
- if (in_interrupt())
- netif_rx(skb);
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ if (rd->reorder)
+ brcmf_rxreorder_process_info(ifp, rd->reorder, skb);
else
- /* If the receive is not processed inside an ISR,
- * the softirqd must be woken explicitly to service the
- * NET_RX_SOFTIRQ. This is handled by netif_rx_ni().
- */
- netif_rx_ni(skb);
+ brcmf_netif_rx(ifp, skb);
}
}
@@ -889,7 +1101,6 @@ int brcmf_bus_start(struct device *dev)
if (ret < 0)
goto fail;
- drvr->fw_signals = true;
ret = brcmf_fws_init(drvr);
if (ret < 0)
goto fail;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 26411196832..1aa75d5951b 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -201,13 +201,6 @@ struct rte_console {
#define SFC_CRC4WOOS (1 << 2) /* CRC error for write out of sync */
#define SFC_ABORTALL (1 << 3) /* Abort all in-progress frames */
-/* HW frame tag */
-#define SDPCM_FRAMETAG_LEN 4 /* 2 bytes len, 2 bytes check val */
-
-/* Total length of frame header for dongle protocol */
-#define SDPCM_HDRLEN (SDPCM_FRAMETAG_LEN + SDPCM_SWHEADER_LEN)
-#define SDPCM_RESERVE (SDPCM_HDRLEN + BRCMF_SDALIGN)
-
/*
* Software allocation of To SB Mailbox resources
*/
@@ -250,38 +243,6 @@ struct rte_console {
/* Current protocol version */
#define SDPCM_PROT_VERSION 4
-/* SW frame header */
-#define SDPCM_PACKET_SEQUENCE(p) (((u8 *)p)[0] & 0xff)
-
-#define SDPCM_CHANNEL_MASK 0x00000f00
-#define SDPCM_CHANNEL_SHIFT 8
-#define SDPCM_PACKET_CHANNEL(p) (((u8 *)p)[1] & 0x0f)
-
-#define SDPCM_NEXTLEN_OFFSET 2
-
-/* Data Offset from SOF (HW Tag, SW Tag, Pad) */
-#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */
-#define SDPCM_DOFFSET_VALUE(p) (((u8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff)
-#define SDPCM_DOFFSET_MASK 0xff000000
-#define SDPCM_DOFFSET_SHIFT 24
-#define SDPCM_FCMASK_OFFSET 4 /* Flow control */
-#define SDPCM_FCMASK_VALUE(p) (((u8 *)p)[SDPCM_FCMASK_OFFSET] & 0xff)
-#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */
-#define SDPCM_WINDOW_VALUE(p) (((u8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff)
-
-#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */
-
-/* logical channel numbers */
-#define SDPCM_CONTROL_CHANNEL 0 /* Control channel Id */
-#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */
-#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */
-#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets */
-#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */
-
-#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for 8bit frame seq */
-
-#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
-
/*
* Shared structure between dongle and the host.
* The structure contains pointers to trap or assert information.
@@ -396,8 +357,8 @@ struct sdpcm_shared_le {
__le32 brpt_addr;
};
-/* SDIO read frame info */
-struct brcmf_sdio_read {
+/* dongle SDIO bus specific header info */
+struct brcmf_sdio_hdrinfo {
u8 seq_num;
u8 channel;
u16 len;
@@ -431,7 +392,7 @@ struct brcmf_sdio {
u8 hdrbuf[MAX_HDR_READ + BRCMF_SDALIGN];
u8 *rxhdr; /* Header of current rx frame (in hdrbuf) */
u8 rx_seq; /* Receive sequence number (expected) */
- struct brcmf_sdio_read cur_read;
+ struct brcmf_sdio_hdrinfo cur_read;
/* info of current read frame */
bool rxskip; /* Skip receive (awaiting NAK ACK) */
bool rxpending; /* Data frame pending in dongle */
@@ -500,6 +461,8 @@ struct brcmf_sdio {
struct brcmf_sdio_count sdcnt;
bool sr_enabled; /* SaveRestore enabled */
bool sleeping; /* SDIO bus sleeping */
+
+ u8 tx_hdrlen; /* sdio bus header length for tx packet */
};
/* clkstate */
@@ -510,7 +473,6 @@ struct brcmf_sdio {
#ifdef DEBUG
static int qcount[NUMPRIO];
-static int tx_packets[NUMPRIO];
#endif /* DEBUG */
#define DEFAULT_SDIO_DRIVE_STRENGTH 6 /* in milliamps */
@@ -1043,18 +1005,63 @@ static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
}
}
-static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
- struct brcmf_sdio_read *rd,
- enum brcmf_sdio_frmtype type)
+/**
+ * brcmfmac sdio bus specific header
+ * This is the lowest layer header wrapped on the packets transmitted between
+ * host and WiFi dongle which contains information needed for SDIO core and
+ * firmware
+ *
+ * It consists of 2 parts: hw header and software header
+ * hardware header (frame tag) - 4 bytes
+ * Byte 0~1: Frame length
+ * Byte 2~3: Checksum, bit-wise inverse of frame length
+ * software header - 8 bytes
+ * Byte 0: Rx/Tx sequence number
+ * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
+ * Byte 2: Length of next data frame, reserved for Tx
+ * Byte 3: Data offset
+ * Byte 4: Flow control bits, reserved for Tx
+ * Byte 5: Maximum Sequence number allowed by firmware for Tx, N/A for Tx packet
+ * Byte 6~7: Reserved
+ */
+#define SDPCM_HWHDR_LEN 4
+#define SDPCM_SWHDR_LEN 8
+#define SDPCM_HDRLEN (SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN)
+/* software header */
+#define SDPCM_SEQ_MASK 0x000000ff
+#define SDPCM_SEQ_WRAP 256
+#define SDPCM_CHANNEL_MASK 0x00000f00
+#define SDPCM_CHANNEL_SHIFT 8
+#define SDPCM_CONTROL_CHANNEL 0 /* Control */
+#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication */
+#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv */
+#define SDPCM_GLOM_CHANNEL 3 /* Coalesced packets */
+#define SDPCM_TEST_CHANNEL 15 /* Test/debug packets */
+#define SDPCM_GLOMDESC(p) (((u8 *)p)[1] & 0x80)
+#define SDPCM_NEXTLEN_MASK 0x00ff0000
+#define SDPCM_NEXTLEN_SHIFT 16
+#define SDPCM_DOFFSET_MASK 0xff000000
+#define SDPCM_DOFFSET_SHIFT 24
+#define SDPCM_FCMASK_MASK 0x000000ff
+#define SDPCM_WINDOW_MASK 0x0000ff00
+#define SDPCM_WINDOW_SHIFT 8
+
+static inline u8 brcmf_sdio_getdatoffset(u8 *swheader)
+{
+ u32 hdrvalue;
+ hdrvalue = *(u32 *)swheader;
+ return (u8)((hdrvalue & SDPCM_DOFFSET_MASK) >> SDPCM_DOFFSET_SHIFT);
+}
+
+static int brcmf_sdio_hdparse(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_hdrinfo *rd,
+ enum brcmf_sdio_frmtype type)
{
u16 len, checksum;
u8 rx_seq, fc, tx_seq_max;
+ u32 swheader;
- /*
- * 4 bytes hardware header (frame tag)
- * Byte 0~1: Frame length
- * Byte 2~3: Checksum, bit-wise inverse of frame length
- */
+ /* hw header */
len = get_unaligned_le16(header);
checksum = get_unaligned_le16(header + sizeof(u16));
/* All zero means no more to read */
@@ -1083,24 +1090,16 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
}
rd->len = len;
- /*
- * 8 bytes hardware header
- * Byte 0: Rx sequence number
- * Byte 1: 4 MSB Channel number, 4 LSB arbitrary flag
- * Byte 2: Length of next data frame
- * Byte 3: Data offset
- * Byte 4: Flow control bits
- * Byte 5: Maximum Sequence number allow for Tx
- * Byte 6~7: Reserved
- */
- if (type == BRCMF_SDIO_FT_SUPER &&
- SDPCM_GLOMDESC(&header[SDPCM_FRAMETAG_LEN])) {
+ /* software header */
+ header += SDPCM_HWHDR_LEN;
+ swheader = le32_to_cpu(*(__le32 *)header);
+ if (type == BRCMF_SDIO_FT_SUPER && SDPCM_GLOMDESC(header)) {
brcmf_err("Glom descriptor found in superframe head\n");
rd->len = 0;
return -EINVAL;
}
- rx_seq = SDPCM_PACKET_SEQUENCE(&header[SDPCM_FRAMETAG_LEN]);
- rd->channel = SDPCM_PACKET_CHANNEL(&header[SDPCM_FRAMETAG_LEN]);
+ rx_seq = (u8)(swheader & SDPCM_SEQ_MASK);
+ rd->channel = (swheader & SDPCM_CHANNEL_MASK) >> SDPCM_CHANNEL_SHIFT;
if (len > MAX_RX_DATASZ && rd->channel != SDPCM_CONTROL_CHANNEL &&
type != BRCMF_SDIO_FT_SUPER) {
brcmf_err("HW header length too long\n");
@@ -1120,7 +1119,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
rd->len = 0;
return -EINVAL;
}
- rd->dat_offset = SDPCM_DOFFSET_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ rd->dat_offset = brcmf_sdio_getdatoffset(header);
if (rd->dat_offset < SDPCM_HDRLEN || rd->dat_offset > rd->len) {
brcmf_err("seq %d: bad data offset\n", rx_seq);
bus->sdcnt.rx_badhdr++;
@@ -1137,14 +1136,15 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
/* no need to check the reset for subframe */
if (type == BRCMF_SDIO_FT_SUB)
return 0;
- rd->len_nxtfrm = header[SDPCM_FRAMETAG_LEN + SDPCM_NEXTLEN_OFFSET];
+ rd->len_nxtfrm = (swheader & SDPCM_NEXTLEN_MASK) >> SDPCM_NEXTLEN_SHIFT;
if (rd->len_nxtfrm << 4 > MAX_RX_DATASZ) {
/* only warm for NON glom packet */
if (rd->channel != SDPCM_GLOM_CHANNEL)
brcmf_err("seq %d: next length error\n", rx_seq);
rd->len_nxtfrm = 0;
}
- fc = SDPCM_FCMASK_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ swheader = le32_to_cpu(*(__le32 *)(header + 4));
+ fc = swheader & SDPCM_FCMASK_MASK;
if (bus->flowcontrol != fc) {
if (~bus->flowcontrol & fc)
bus->sdcnt.fc_xoff++;
@@ -1153,7 +1153,7 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
bus->sdcnt.fc_rcvd++;
bus->flowcontrol = fc;
}
- tx_seq_max = SDPCM_WINDOW_VALUE(&header[SDPCM_FRAMETAG_LEN]);
+ tx_seq_max = (swheader & SDPCM_WINDOW_MASK) >> SDPCM_WINDOW_SHIFT;
if ((u8)(tx_seq_max - bus->tx_seq) > 0x40) {
brcmf_err("seq %d: max tx seq number error\n", rx_seq);
tx_seq_max = bus->tx_seq + 2;
@@ -1163,18 +1163,40 @@ static int brcmf_sdio_hdparser(struct brcmf_sdio *bus, u8 *header,
return 0;
}
+static inline void brcmf_sdio_update_hwhdr(u8 *header, u16 frm_length)
+{
+ *(__le16 *)header = cpu_to_le16(frm_length);
+ *(((__le16 *)header) + 1) = cpu_to_le16(~frm_length);
+}
+
+static void brcmf_sdio_hdpack(struct brcmf_sdio *bus, u8 *header,
+ struct brcmf_sdio_hdrinfo *hd_info)
+{
+ u32 sw_header;
+
+ brcmf_sdio_update_hwhdr(header, hd_info->len);
+
+ sw_header = bus->tx_seq;
+ sw_header |= (hd_info->channel << SDPCM_CHANNEL_SHIFT) &
+ SDPCM_CHANNEL_MASK;
+ sw_header |= (hd_info->dat_offset << SDPCM_DOFFSET_SHIFT) &
+ SDPCM_DOFFSET_MASK;
+ *(((__le32 *)header) + 1) = cpu_to_le32(sw_header);
+ *(((__le32 *)header) + 2) = 0;
+}
+
static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
-
+ u32 align = 0;
u16 sublen;
struct sk_buff *pfirst, *pnext;
int errcode;
u8 doff, sfdoff;
- struct brcmf_sdio_read rd_new;
+ struct brcmf_sdio_hdrinfo rd_new;
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
@@ -1182,6 +1204,11 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
brcmf_dbg(SDIO, "start: glomd %p glom %p\n",
bus->glomd, skb_peek(&bus->glom));
+ if (bus->sdiodev->pdata)
+ align = bus->sdiodev->pdata->sd_sgentry_align;
+ if (align < 4)
+ align = 4;
+
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
pfirst = pnext = NULL;
@@ -1205,9 +1232,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
pnext = NULL;
break;
}
- if (sublen % BRCMF_SDALIGN) {
+ if (sublen % align) {
brcmf_err("sublen %d not multiple of %d\n",
- sublen, BRCMF_SDALIGN);
+ sublen, align);
}
totlen += sublen;
@@ -1220,7 +1247,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
}
/* Allocate/chain packet for next subframe */
- pnext = brcmu_pkt_buf_get_skb(sublen + BRCMF_SDALIGN);
+ pnext = brcmu_pkt_buf_get_skb(sublen + align);
if (pnext == NULL) {
brcmf_err("bcm_pkt_buf_get_skb failed, num %d len %d\n",
num, sublen);
@@ -1229,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
- pkt_align(pnext, sublen, BRCMF_SDALIGN);
+ pkt_align(pnext, sublen, align);
}
/* If all allocations succeeded, save packet chain
@@ -1305,8 +1332,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.seq_num = rxseq;
rd_new.len = dlen;
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdio_hdparser(bus, pfirst->data, &rd_new,
- BRCMF_SDIO_FT_SUPER);
+ errcode = brcmf_sdio_hdparse(bus, pfirst->data, &rd_new,
+ BRCMF_SDIO_FT_SUPER);
sdio_release_host(bus->sdiodev->func[1]);
bus->cur_read.len = rd_new.len_nxtfrm << 4;
@@ -1324,8 +1351,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
rd_new.len = pnext->len;
rd_new.seq_num = rxseq++;
sdio_claim_host(bus->sdiodev->func[1]);
- errcode = brcmf_sdio_hdparser(bus, pnext->data, &rd_new,
- BRCMF_SDIO_FT_SUB);
+ errcode = brcmf_sdio_hdparse(bus, pnext->data, &rd_new,
+ BRCMF_SDIO_FT_SUB);
sdio_release_host(bus->sdiodev->func[1]);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
pnext->data, 32, "subframe:\n");
@@ -1357,7 +1384,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
- doff = SDPCM_DOFFSET_VALUE(&dptr[SDPCM_FRAMETAG_LEN]);
+ doff = brcmf_sdio_getdatoffset(&dptr[SDPCM_HWHDR_LEN]);
brcmf_dbg_hex_dump(BRCMF_BYTES_ON() && BRCMF_DATA_ON(),
dptr, pfirst->len,
@@ -1535,7 +1562,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
uint rxleft = 0; /* Remaining number of frames allowed */
int ret; /* Return code from calls */
uint rxcount = 0; /* Total frames read */
- struct brcmf_sdio_read *rd = &bus->cur_read, rd_new;
+ struct brcmf_sdio_hdrinfo *rd = &bus->cur_read, rd_new;
u8 head_read = 0;
brcmf_dbg(TRACE, "Enter\n");
@@ -1583,8 +1610,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
bus->rxhdr, SDPCM_HDRLEN,
"RxHdr:\n");
- if (brcmf_sdio_hdparser(bus, bus->rxhdr, rd,
- BRCMF_SDIO_FT_NORMAL)) {
+ if (brcmf_sdio_hdparse(bus, bus->rxhdr, rd,
+ BRCMF_SDIO_FT_NORMAL)) {
sdio_release_host(bus->sdiodev->func[1]);
if (!bus->rxpending)
break;
@@ -1648,8 +1675,8 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
memcpy(bus->rxhdr, pkt->data, SDPCM_HDRLEN);
rd_new.seq_num = rd->seq_num;
sdio_claim_host(bus->sdiodev->func[1]);
- if (brcmf_sdio_hdparser(bus, bus->rxhdr, &rd_new,
- BRCMF_SDIO_FT_NORMAL)) {
+ if (brcmf_sdio_hdparse(bus, bus->rxhdr, &rd_new,
+ BRCMF_SDIO_FT_NORMAL)) {
rd->len = 0;
brcmu_pkt_buf_free_skb(pkt);
}
@@ -1693,7 +1720,7 @@ static uint brcmf_sdio_readframes(struct brcmf_sdio *bus, uint maxframes)
/* Save superframe descriptor and allocate packet frame */
if (rd->channel == SDPCM_GLOM_CHANNEL) {
- if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_FRAMETAG_LEN])) {
+ if (SDPCM_GLOMDESC(&bus->rxhdr[SDPCM_HWHDR_LEN])) {
brcmf_dbg(GLOM, "glom descriptor, %d bytes:\n",
rd->len);
brcmf_dbg_hex_dump(BRCMF_GLOM_ON(),
@@ -1759,85 +1786,168 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
return;
}
+/* flag marking a dummy skb added for DMA alignment requirement */
+#define DUMMY_SKB_FLAG 0x10000
+/* bit mask of data length chopped from the previous packet */
+#define DUMMY_SKB_CHOP_LEN_MASK 0xffff
+/**
+ * brcmf_sdio_txpkt_prep - packet preparation for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ * @chan: virtual channel to transmit the packet
+ *
+ * Processes to be applied to the packet
+ * - Align data buffer pointer
+ * - Align data buffer length
+ * - Prepare header
+ * Return: negative value if there is error
+ */
+static int
+brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
+ uint chan)
+{
+ u16 head_pad, tail_pad, tail_chop, head_align, sg_align;
+ int ntail;
+ struct sk_buff *pkt_next, *pkt_new;
+ u8 *dat_buf;
+ unsigned blksize = bus->sdiodev->func[SDIO_FUNC_2]->cur_blksize;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
+
+ /* SDIO ADMA requires at least 32 bit alignment */
+ head_align = 4;
+ sg_align = 4;
+ if (bus->sdiodev->pdata) {
+ head_align = bus->sdiodev->pdata->sd_head_align > 4 ?
+ bus->sdiodev->pdata->sd_head_align : 4;
+ sg_align = bus->sdiodev->pdata->sd_sgentry_align > 4 ?
+ bus->sdiodev->pdata->sd_sgentry_align : 4;
+ }
+ /* sg entry alignment should be a divisor of block size */
+ WARN_ON(blksize % sg_align);
+
+ pkt_next = pktq->next;
+ dat_buf = (u8 *)(pkt_next->data);
+
+ /* Check head padding */
+ head_pad = ((unsigned long)dat_buf % head_align);
+ if (head_pad) {
+ if (skb_headroom(pkt_next) < head_pad) {
+ bus->sdiodev->bus_if->tx_realloc++;
+ head_pad = 0;
+ if (skb_cow(pkt_next, head_pad))
+ return -ENOMEM;
+ }
+ skb_push(pkt_next, head_pad);
+ dat_buf = (u8 *)(pkt_next->data);
+ memset(dat_buf, 0, head_pad + bus->tx_hdrlen);
+ }
+
+ /* Check tail padding */
+ pkt_new = NULL;
+ tail_chop = pkt_next->len % sg_align;
+ tail_pad = sg_align - tail_chop;
+ tail_pad += blksize - (pkt_next->len + tail_pad) % blksize;
+ if (skb_tailroom(pkt_next) < tail_pad && pkt_next->len > blksize) {
+ pkt_new = brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ if (pkt_new == NULL)
+ return -ENOMEM;
+ memcpy(pkt_new->data,
+ pkt_next->data + pkt_next->len - tail_chop,
+ tail_chop);
+ *(u32 *)(pkt_new->cb) = DUMMY_SKB_FLAG + tail_chop;
+ skb_trim(pkt_next, pkt_next->len - tail_chop);
+ __skb_queue_after(pktq, pkt_next, pkt_new);
+ } else {
+ ntail = pkt_next->data_len + tail_pad -
+ (pkt_next->end - pkt_next->tail);
+ if (skb_cloned(pkt_next) || ntail > 0)
+ if (pskb_expand_head(pkt_next, 0, ntail, GFP_ATOMIC))
+ return -ENOMEM;
+ if (skb_linearize(pkt_next))
+ return -ENOMEM;
+ dat_buf = (u8 *)(pkt_next->data);
+ __skb_put(pkt_next, tail_pad);
+ }
+
+ /* Now prep the header */
+ if (pkt_new)
+ hd_info.len = pkt_next->len + tail_chop;
+ else
+ hd_info.len = pkt_next->len - tail_pad;
+ hd_info.channel = chan;
+ hd_info.dat_offset = head_pad + bus->tx_hdrlen;
+ brcmf_sdio_hdpack(bus, dat_buf, &hd_info);
+
+ if (BRCMF_BYTES_ON() &&
+ ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
+ (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)))
+ brcmf_dbg_hex_dump(true, pkt_next, hd_info.len, "Tx Frame:\n");
+ else if (BRCMF_HDRS_ON())
+ brcmf_dbg_hex_dump(true, pkt_next, head_pad + bus->tx_hdrlen,
+ "Tx Header:\n");
+
+ return 0;
+}
+
+/**
+ * brcmf_sdio_txpkt_postp - packet post processing for transmit
+ * @bus: brcmf_sdio structure pointer
+ * @pktq: packet list pointer
+ *
+ * Processes to be applied to the packet
+ * - Remove head padding
+ * - Remove tail padding
+ */
+static void
+brcmf_sdio_txpkt_postp(struct brcmf_sdio *bus, struct sk_buff_head *pktq)
+{
+ u8 *hdr;
+ u32 dat_offset;
+ u32 dummy_flags, chop_len;
+ struct sk_buff *pkt_next, *tmp, *pkt_prev;
+
+ skb_queue_walk_safe(pktq, pkt_next, tmp) {
+ dummy_flags = *(u32 *)(pkt_next->cb);
+ if (dummy_flags & DUMMY_SKB_FLAG) {
+ chop_len = dummy_flags & DUMMY_SKB_CHOP_LEN_MASK;
+ if (chop_len) {
+ pkt_prev = pkt_next->prev;
+ memcpy(pkt_prev->data + pkt_prev->len,
+ pkt_next->data, chop_len);
+ skb_put(pkt_prev, chop_len);
+ }
+ __skb_unlink(pkt_next, pktq);
+ brcmu_pkt_buf_free_skb(pkt_next);
+ } else {
+ hdr = pkt_next->data + SDPCM_HWHDR_LEN;
+ dat_offset = le32_to_cpu(*(__le32 *)hdr);
+ dat_offset = (dat_offset & SDPCM_DOFFSET_MASK) >>
+ SDPCM_DOFFSET_SHIFT;
+ skb_pull(pkt_next, dat_offset);
+ }
+ }
+}
+
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan)
{
int ret;
- u8 *frame;
- u16 len, pad = 0;
- u32 swheader;
int i;
+ struct sk_buff_head localq;
brcmf_dbg(TRACE, "Enter\n");
- frame = (u8 *) (pkt->data);
-
- /* Add alignment padding, allocate new packet if needed */
- pad = ((unsigned long)frame % BRCMF_SDALIGN);
- if (pad) {
- if (skb_headroom(pkt) < pad) {
- brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
- skb_headroom(pkt), pad);
- bus->sdiodev->bus_if->tx_realloc++;
- ret = skb_cow(pkt, BRCMF_SDALIGN);
- if (ret)
- goto done;
- pad = ((unsigned long)frame % BRCMF_SDALIGN);
- }
- skb_push(pkt, pad);
- frame = (u8 *) (pkt->data);
- memset(frame, 0, pad + SDPCM_HDRLEN);
- }
- /* precondition: pad < BRCMF_SDALIGN */
-
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- len = (u16) (pkt->len);
- *(__le16 *) frame = cpu_to_le16(len);
- *(((__le16 *) frame) + 1) = cpu_to_le16(~len);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((chan << SDPCM_CHANNEL_SHIFT) & SDPCM_CHANNEL_MASK) | bus->tx_seq |
- (((pad +
- SDPCM_HDRLEN) << SDPCM_DOFFSET_SHIFT) & SDPCM_DOFFSET_MASK);
-
- *(((__le32 *) frame) + 1) = cpu_to_le32(swheader);
- *(((__le32 *) frame) + 2) = 0;
-
-#ifdef DEBUG
- tx_packets[pkt->priority]++;
-#endif
-
- brcmf_dbg_hex_dump(BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() && chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() && chan != SDPCM_CONTROL_CHANNEL)),
- frame, len, "Tx Frame:\n");
- brcmf_dbg_hex_dump(!(BRCMF_BYTES_ON() &&
- ((BRCMF_CTL_ON() &&
- chan == SDPCM_CONTROL_CHANNEL) ||
- (BRCMF_DATA_ON() &&
- chan != SDPCM_CONTROL_CHANNEL))) &&
- BRCMF_HDRS_ON(),
- frame, min_t(u16, len, 16), "TxHdr:\n");
-
- /* Raise len to next SDIO block to eliminate tail command */
- if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
- u16 pad = bus->blocksize - (len % bus->blocksize);
- if ((pad <= bus->roundup) && (pad < bus->blocksize))
- len += pad;
- } else if (len % BRCMF_SDALIGN) {
- len += BRCMF_SDALIGN - (len % BRCMF_SDALIGN);
- }
-
- /* Some controllers have trouble with odd bytes -- round to even */
- if (len & (ALIGNMENT - 1))
- len = roundup(len, ALIGNMENT);
+ __skb_queue_head_init(&localq);
+ __skb_queue_tail(&localq, pkt);
+ ret = brcmf_sdio_txpkt_prep(bus, &localq, chan);
+ if (ret)
+ goto done;
sdio_claim_host(bus->sdiodev->func[1]);
ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, pkt);
+ SDIO_FUNC_2, F2SYNC, &localq);
bus->sdcnt.f2txdata++;
if (ret < 0) {
@@ -1865,11 +1975,11 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
}
sdio_release_host(bus->sdiodev->func[1]);
if (ret == 0)
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
done:
- /* restore pkt buffer pointer before calling tx complete routine */
- skb_pull(pkt, SDPCM_HDRLEN + pad);
+ brcmf_sdio_txpkt_postp(bus, &localq);
+ __skb_dequeue_tail(&localq);
brcmf_txcomplete(bus->sdiodev->dev, pkt, ret == 0);
return ret;
}
@@ -1880,7 +1990,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
u32 intstatus = 0;
int ret = 0, prec_out;
uint cnt = 0;
- uint datalen;
u8 tx_prec_map;
brcmf_dbg(TRACE, "Enter\n");
@@ -1896,7 +2005,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
break;
}
spin_unlock_bh(&bus->txqlock);
- datalen = pkt->len - SDPCM_HDRLEN;
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL);
@@ -2221,7 +2329,7 @@ static void brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
}
} else {
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
}
sdio_release_host(bus->sdiodev->func[1]);
bus->ctrl_frame_stat = false;
@@ -2276,13 +2384,14 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ ulong flags;
brcmf_dbg(TRACE, "Enter\n");
datalen = pkt->len;
/* Add space for the header */
- skb_push(pkt, SDPCM_HDRLEN);
+ skb_push(pkt, bus->tx_hdrlen);
/* precondition: IS_ALIGNED((unsigned long)(pkt->data), 2) */
prec = prio2prec((pkt->priority & PRIOMASK));
@@ -2293,10 +2402,9 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->sdcnt.fcqueued++;
/* Priority based enq */
- spin_lock_bh(&bus->txqlock);
+ spin_lock_irqsave(&bus->txqlock, flags);
if (!brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec)) {
- skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
+ skb_pull(pkt, bus->tx_hdrlen);
brcmf_err("out of bus->txq !!!\n");
ret = -ENOSR;
} else {
@@ -2307,7 +2415,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
bus->txoff = true;
brcmf_txflowblock(bus->sdiodev->dev, true);
}
- spin_unlock_bh(&bus->txqlock);
+ spin_unlock_irqrestore(&bus->txqlock, flags);
#ifdef DEBUG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2436,7 +2544,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
return ret;
}
- bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQUENCE_WRAP;
+ bus->tx_seq = (bus->tx_seq + 1) % SDPCM_SEQ_WRAP;
return ret;
}
@@ -2446,19 +2554,19 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
- u32 swheader;
uint retries = 0;
u8 doff = 0;
int ret = -1;
struct brcmf_bus *bus_if = dev_get_drvdata(dev);
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
struct brcmf_sdio *bus = sdiodev->bus;
+ struct brcmf_sdio_hdrinfo hd_info = {0};
brcmf_dbg(TRACE, "Enter\n");
/* Back the pointer to make a room for bus header */
- frame = msg - SDPCM_HDRLEN;
- len = (msglen += SDPCM_HDRLEN);
+ frame = msg - bus->tx_hdrlen;
+ len = (msglen += bus->tx_hdrlen);
/* Add alignment padding (optional for ctl frames) */
doff = ((unsigned long)frame % BRCMF_SDALIGN);
@@ -2466,10 +2574,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
frame -= doff;
len += doff;
msglen += doff;
- memset(frame, 0, doff + SDPCM_HDRLEN);
+ memset(frame, 0, doff + bus->tx_hdrlen);
}
/* precondition: doff < BRCMF_SDALIGN */
- doff += SDPCM_HDRLEN;
+ doff += bus->tx_hdrlen;
/* Round send length to next SDIO block */
if (bus->roundup && bus->blocksize && (len > bus->blocksize)) {
@@ -2491,18 +2599,10 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
brcmf_sdbrcm_bus_sleep(bus, false, false);
sdio_release_host(bus->sdiodev->func[1]);
- /* Hardware tag: 2 byte len followed by 2 byte ~len check (all LE) */
- *(__le16 *) frame = cpu_to_le16((u16) msglen);
- *(((__le16 *) frame) + 1) = cpu_to_le16(~msglen);
-
- /* Software tag: channel, sequence number, data offset */
- swheader =
- ((SDPCM_CONTROL_CHANNEL << SDPCM_CHANNEL_SHIFT) &
- SDPCM_CHANNEL_MASK)
- | bus->tx_seq | ((doff << SDPCM_DOFFSET_SHIFT) &
- SDPCM_DOFFSET_MASK);
- put_unaligned_le32(swheader, frame + SDPCM_FRAMETAG_LEN);
- put_unaligned_le32(0, frame + SDPCM_FRAMETAG_LEN + sizeof(swheader));
+ hd_info.len = (u16)msglen;
+ hd_info.channel = SDPCM_CONTROL_CHANNEL;
+ hd_info.dat_offset = doff;
+ brcmf_sdio_hdpack(bus, frame, &hd_info);
if (!data_ok(bus)) {
brcmf_dbg(INFO, "No bus credit bus->tx_max %d, bus->tx_seq %d\n",
@@ -3733,7 +3833,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
struct brcmf_sdio *bus;
struct brcmf_bus_dcmd *dlst;
u32 dngl_txglom;
- u32 dngl_txglomalign;
+ u32 txglomalign = 0;
u8 idx;
brcmf_dbg(TRACE, "Enter\n");
@@ -3752,7 +3852,7 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
- bus->tx_seq = SDPCM_SEQUENCE_WRAP - 1;
+ bus->tx_seq = SDPCM_SEQ_WRAP - 1;
INIT_WORK(&bus->datawork, brcmf_sdio_dataworker);
bus->brcmf_wq = create_singlethread_workqueue("brcmf_wq");
@@ -3794,8 +3894,11 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
bus->sdiodev->bus_if->chip = bus->ci->chip;
bus->sdiodev->bus_if->chiprev = bus->ci->chiprev;
- /* Attach to the brcmf/OS/network interface */
- ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+ /* default sdio bus header length for tx packet */
+ bus->tx_hdrlen = SDPCM_HWHDR_LEN + SDPCM_SWHDR_LEN;
+
+ /* Attach to the common layer, reserve hdr space */
+ ret = brcmf_attach(bus->tx_hdrlen, bus->sdiodev->dev);
if (ret != 0) {
brcmf_err("brcmf_attach failed\n");
goto fail;
@@ -3827,9 +3930,13 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
dlst->param_len = sizeof(u32);
} else {
/* otherwise, set txglomalign */
- dngl_txglomalign = bus->sdiodev->bus_if->align;
+ if (sdiodev->pdata)
+ txglomalign = sdiodev->pdata->sd_sgentry_align;
+ /* SDIO ADMA requires at least 32 bit alignment */
+ if (txglomalign < 4)
+ txglomalign = 4;
dlst->name = "bus:txglomalign";
- dlst->param = (char *)&dngl_txglomalign;
+ dlst->param = (char *)&txglomalign;
dlst->param_len = sizeof(u32);
}
list_add(&dlst->list, &bus->sdiodev->bus_if->dcmd_list);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
index 83ee53a7c76..fad77dd2a3a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fweh.c
@@ -185,6 +185,10 @@ static void brcmf_fweh_handle_if_event(struct brcmf_pub *drvr,
ifevent->action, ifevent->ifidx, ifevent->bssidx,
ifevent->flags, ifevent->role);
+ if (ifevent->flags & BRCMF_E_IF_FLAG_NOIF) {
+ brcmf_dbg(EVENT, "event can be ignored\n");
+ return;
+ }
if (ifevent->ifidx >= BRCMF_MAX_IFS) {
brcmf_err("invalid interface index: %u\n",
ifevent->ifidx);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
index 665ef69e974..ecabb04f33c 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil_types.h
@@ -69,4 +69,25 @@ struct brcmf_fil_bss_enable_le {
__le32 enable;
};
+/**
+ * struct tdls_iovar - common structure for tdls iovars.
+ *
+ * @ea: ether address of peer station.
+ * @mode: mode value depending on specific tdls iovar.
+ * @chanspec: channel specification.
+ * @pad: unused (for future use).
+ */
+struct brcmf_tdls_iovar_le {
+ u8 ea[ETH_ALEN]; /* Station address */
+ u8 mode; /* mode: depends on iovar */
+ __le16 chanspec;
+ __le32 pad; /* future */
+};
+
+enum brcmf_tdls_manual_ep_ops {
+ BRCMF_TDLS_MANUAL_EP_CREATE = 1,
+ BRCMF_TDLS_MANUAL_EP_DELETE = 3,
+ BRCMF_TDLS_MANUAL_EP_DISCOVERY = 6
+};
+
#endif /* FWIL_TYPES_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
index f0d9f7f6c83..82f9140f3d3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwsignal.c
@@ -422,9 +422,12 @@ struct brcmf_fws_macdesc_table {
struct brcmf_fws_info {
struct brcmf_pub *drvr;
+ spinlock_t spinlock;
+ ulong flags;
struct brcmf_fws_stats stats;
struct brcmf_fws_hanger hanger;
enum brcmf_fws_fcmode fcmode;
+ bool fw_signals;
bool bcmc_credit_check;
struct brcmf_fws_macdesc_table desc;
struct workqueue_struct *fws_wq;
@@ -483,6 +486,18 @@ static int brcmf_fws_get_tlv_len(struct brcmf_fws_info *fws,
}
#undef BRCMF_FWS_TLV_DEF
+static void brcmf_fws_lock(struct brcmf_fws_info *fws)
+ __acquires(&fws->spinlock)
+{
+ spin_lock_irqsave(&fws->spinlock, fws->flags);
+}
+
+static void brcmf_fws_unlock(struct brcmf_fws_info *fws)
+ __releases(&fws->spinlock)
+{
+ spin_unlock_irqrestore(&fws->spinlock, fws->flags);
+}
+
static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
{
u32 ifidx = brcmf_skb_if_flags_get_field(skb, INDEX);
@@ -869,8 +884,11 @@ static bool brcmf_fws_tim_update(struct brcmf_fws_info *fws,
skcb->state = BRCMF_FWS_SKBSTATE_TIM;
bus = fws->drvr->bus_if;
err = brcmf_fws_hdrpush(fws, skb);
- if (err == 0)
+ if (err == 0) {
+ brcmf_fws_unlock(fws);
err = brcmf_bus_txdata(bus, skb);
+ brcmf_fws_lock(fws);
+ }
if (err)
brcmu_pkt_buf_free_skb(skb);
return true;
@@ -905,26 +923,10 @@ static int brcmf_fws_rssi_indicate(struct brcmf_fws_info *fws, s8 rssi)
return 0;
}
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_lock(drvr, flags) \
-do { \
- flags = 0; \
- spin_lock_irqsave(&((drvr)->fws_spinlock), (flags)); \
-} while (0)
-
-/* using macro so sparse checking does not complain
- * about locking imbalance.
- */
-#define brcmf_fws_unlock(drvr, flags) \
- spin_unlock_irqrestore(&((drvr)->fws_spinlock), (flags))
-
static
int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry, *existing;
- ulong flags;
u8 mac_handle;
u8 ifidx;
u8 *addr;
@@ -938,10 +940,10 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
if (entry->occupied) {
brcmf_dbg(TRACE, "deleting %s mac %pM\n",
entry->name, addr);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
brcmf_fws_macdesc_cleanup(fws, entry, -1);
brcmf_fws_macdesc_deinit(entry);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
} else
fws->stats.mac_update_failed++;
return 0;
@@ -950,13 +952,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
existing = brcmf_fws_macdesc_lookup(fws, addr);
if (IS_ERR(existing)) {
if (!entry->occupied) {
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_init(entry, addr, ifidx);
brcmf_fws_macdesc_set_name(fws, entry);
brcmu_pktq_init(&entry->psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "add %s mac %pM\n", entry->name, addr);
} else {
fws->stats.mac_update_failed++;
@@ -964,13 +966,13 @@ int brcmf_fws_macdesc_indicate(struct brcmf_fws_info *fws, u8 type, u8 *data)
} else {
if (entry != existing) {
brcmf_dbg(TRACE, "copy mac %s\n", existing->name);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
memcpy(entry, existing,
offsetof(struct brcmf_fws_mac_descriptor, psq));
entry->mac_handle = mac_handle;
brcmf_fws_macdesc_deinit(existing);
brcmf_fws_macdesc_set_name(fws, entry);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
brcmf_dbg(TRACE, "relocate %s mac %pM\n", entry->name,
addr);
} else {
@@ -986,7 +988,6 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
u8 mac_handle;
int ret;
@@ -996,7 +997,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
fws->stats.mac_ps_update_failed++;
return -ESRCH;
}
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
/* a state update should wipe old credits */
entry->requested_credit = 0;
entry->requested_packet = 0;
@@ -1011,7 +1012,7 @@ static int brcmf_fws_macdesc_state_indicate(struct brcmf_fws_info *fws,
brcmf_fws_tim_update(fws, entry, BRCMF_FWS_FIFO_AC_VO, true);
ret = BRCMF_FWS_RET_OK_NOSCHEDULE;
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return ret;
}
@@ -1019,7 +1020,6 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
u8 type, u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
u8 ifidx;
int ret;
@@ -1038,7 +1038,7 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
brcmf_dbg(TRACE, "%s (%d): %s\n", brcmf_fws_get_tlv_name(type), type,
entry->name);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
switch (type) {
case BRCMF_FWS_TYPE_INTERFACE_OPEN:
entry->state = BRCMF_FWS_STATE_OPEN;
@@ -1050,10 +1050,10 @@ static int brcmf_fws_interface_state_indicate(struct brcmf_fws_info *fws,
break;
default:
ret = -EINVAL;
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
goto fail;
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return ret;
fail:
@@ -1065,7 +1065,6 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
u8 *data)
{
struct brcmf_fws_mac_descriptor *entry;
- ulong flags;
entry = &fws->desc.nodes[data[1] & 0x1F];
if (!entry->occupied) {
@@ -1079,14 +1078,14 @@ static int brcmf_fws_request_indicate(struct brcmf_fws_info *fws, u8 type,
brcmf_dbg(TRACE, "%s (%d): %s cnt %d bmp %d\n",
brcmf_fws_get_tlv_name(type), type, entry->name,
data[0], data[2]);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
if (type == BRCMF_FWS_TYPE_MAC_REQUEST_CREDIT)
entry->requested_credit = data[0];
else
entry->requested_packet = data[0];
entry->ac_bitmap = data[2];
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
@@ -1160,7 +1159,8 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws,
static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws)
{
/* only schedule dequeue when there are credits for delayed traffic */
- if (fws->fifo_credit_map & fws->fifo_delay_map)
+ if ((fws->fifo_credit_map & fws->fifo_delay_map) ||
+ (!brcmf_fws_fc_active(fws) && fws->fifo_delay_map))
queue_work(fws->fws_wq, &fws->fws_dequeue_work);
}
@@ -1383,7 +1383,6 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot,
static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
u8 *data)
{
- ulong flags;
int i;
if (fws->fcmode != BRCMF_FWS_FCMODE_EXPLICIT_CREDIT) {
@@ -1392,19 +1391,18 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws,
}
brcmf_dbg(DATA, "enter: data %pM\n", data);
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
for (i = 0; i < BRCMF_FWS_FIFO_COUNT; i++)
brcmf_fws_return_credits(fws, i, data[i]);
brcmf_dbg(DATA, "map: credit %x delay %x\n", fws->fifo_credit_map,
fws->fifo_delay_map);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_SCHEDULE;
}
static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
{
- ulong lflags;
__le32 status_le;
u32 status;
u32 hslot;
@@ -1418,9 +1416,9 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data)
hslot = brcmf_txstatus_get_field(status, HSLOT);
genbit = brcmf_txstatus_get_field(status, GENERATION);
- brcmf_fws_lock(fws->drvr, lflags);
+ brcmf_fws_lock(fws);
brcmf_fws_txs_process(fws, flags, hslot, genbit);
- brcmf_fws_unlock(fws->drvr, lflags);
+ brcmf_fws_unlock(fws);
return BRCMF_FWS_RET_OK_NOSCHEDULE;
}
@@ -1440,7 +1438,6 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
int i;
- ulong flags;
u8 *credits = data;
if (e->datalen < BRCMF_FWS_FIFO_COUNT) {
@@ -1453,7 +1450,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->creditmap_received = true;
brcmf_dbg(TRACE, "enter: credits %pM\n", credits);
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(fws);
for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) {
if (*credits)
fws->fifo_credit_map |= 1 << i;
@@ -1462,7 +1459,7 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp,
fws->fifo_credit[i] = *credits++;
}
brcmf_fws_schedule_deq(fws);
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
@@ -1471,18 +1468,18 @@ static int brcmf_fws_notify_bcmc_credit_support(struct brcmf_if *ifp,
void *data)
{
struct brcmf_fws_info *fws = ifp->drvr->fws;
- ulong flags;
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(fws);
if (fws)
fws->bcmc_credit_check = true;
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
struct sk_buff *skb)
{
+ struct brcmf_skb_reorder_data *rd;
struct brcmf_fws_info *fws = drvr->fws;
u8 *signal_data;
s16 data_len;
@@ -1497,8 +1494,10 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
WARN_ON(signal_len > skb->len);
+ if (!signal_len)
+ return 0;
/* if flow control disabled, skip to packet data and leave */
- if (!signal_len || !drvr->fw_signals) {
+ if (!fws->fw_signals) {
skb_pull(skb, signal_len);
return 0;
}
@@ -1536,9 +1535,12 @@ int brcmf_fws_hdrpull(struct brcmf_pub *drvr, int ifidx, s16 signal_len,
err = BRCMF_FWS_RET_OK_NOSCHEDULE;
switch (type) {
- case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
case BRCMF_FWS_TYPE_COMP_TXSTATUS:
break;
+ case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS:
+ rd = (struct brcmf_skb_reorder_data *)skb->cb;
+ rd->reorder = data;
+ break;
case BRCMF_FWS_TYPE_MACDESC_ADD:
case BRCMF_FWS_TYPE_MACDESC_DEL:
brcmf_fws_macdesc_indicate(fws, type, data);
@@ -1694,17 +1696,22 @@ static int brcmf_fws_commit_skb(struct brcmf_fws_info *fws, int fifo,
return PTR_ERR(entry);
brcmf_fws_precommit_skb(fws, fifo, skb);
+ entry->transit_count++;
+ if (entry->suppressed)
+ entry->suppr_transit_count++;
+ brcmf_fws_unlock(fws);
rc = brcmf_bus_txdata(bus, skb);
+ brcmf_fws_lock(fws);
brcmf_dbg(DATA, "%s flags %X htod %X bus_tx %d\n", entry->name,
skcb->if_flags, skcb->htod, rc);
if (rc < 0) {
+ entry->transit_count--;
+ if (entry->suppressed)
+ entry->suppr_transit_count--;
brcmf_proto_hdrpull(fws->drvr, false, &ifidx, skb);
goto rollback;
}
- entry->transit_count++;
- if (entry->suppressed)
- entry->suppr_transit_count++;
fws->stats.pkt2bus++;
fws->stats.send_pkts[fifo]++;
if (brcmf_skb_if_flags_get_field(skb, REQUESTED))
@@ -1741,26 +1748,19 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
struct brcmf_fws_info *fws = drvr->fws;
struct brcmf_skbuff_cb *skcb = brcmf_skbcb(skb);
struct ethhdr *eh = (struct ethhdr *)(skb->data);
- ulong flags;
int fifo = BRCMF_FWS_FIFO_BCMC;
bool multicast = is_multicast_ether_addr(eh->h_dest);
+ bool pae = eh->h_proto == htons(ETH_P_PAE);
+ brcmf_dbg(DATA, "tx proto=0x%X\n", ntohs(eh->h_proto));
/* determine the priority */
if (!skb->priority)
skb->priority = cfg80211_classify8021d(skb);
drvr->tx_multicast += !!multicast;
- if (ntohs(eh->h_proto) == ETH_P_PAE)
+ if (pae)
atomic_inc(&ifp->pend_8021x_cnt);
- if (!brcmf_fws_fc_active(fws)) {
- /* If the protocol uses a data header, apply it */
- brcmf_proto_hdrpush(drvr, ifp->ifidx, 0, skb);
-
- /* Use bus module to send data frame */
- return brcmf_bus_txdata(drvr->bus_if, skb);
- }
-
/* set control buffer information */
skcb->if_flags = 0;
skcb->state = BRCMF_FWS_SKBSTATE_NEW;
@@ -1768,7 +1768,7 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
if (!multicast)
fifo = brcmf_fws_prio2fifo[skb->priority];
- brcmf_fws_lock(drvr, flags);
+ brcmf_fws_lock(fws);
if (fifo != BRCMF_FWS_FIFO_AC_BE && fifo < BRCMF_FWS_FIFO_BCMC)
fws->borrow_defer_timestamp = jiffies +
BRCMF_FWS_BORROW_DEFER_PERIOD;
@@ -1781,9 +1781,14 @@ int brcmf_fws_process_skb(struct brcmf_if *ifp, struct sk_buff *skb)
brcmf_fws_schedule_deq(fws);
} else {
brcmf_err("drop skb: no hanger slot\n");
+ if (pae) {
+ atomic_dec(&ifp->pend_8021x_cnt);
+ if (waitqueue_active(&ifp->pend_8021x_wait))
+ wake_up(&ifp->pend_8021x_wait);
+ }
brcmu_pkt_buf_free_skb(skb);
}
- brcmf_fws_unlock(drvr, flags);
+ brcmf_fws_unlock(fws);
return 0;
}
@@ -1803,7 +1808,7 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
struct brcmf_fws_info *fws = ifp->drvr->fws;
struct brcmf_fws_mac_descriptor *entry;
- if (!ifp->ndev || !ifp->drvr->fw_signals)
+ if (!ifp->ndev)
return;
entry = &fws->desc.iface[ifp->ifidx];
@@ -1818,31 +1823,54 @@ void brcmf_fws_add_interface(struct brcmf_if *ifp)
void brcmf_fws_del_interface(struct brcmf_if *ifp)
{
struct brcmf_fws_mac_descriptor *entry = ifp->fws_desc;
- ulong flags;
if (!entry)
return;
- brcmf_fws_lock(ifp->drvr, flags);
+ brcmf_fws_lock(ifp->drvr->fws);
ifp->fws_desc = NULL;
brcmf_dbg(TRACE, "deleting %s\n", entry->name);
brcmf_fws_macdesc_deinit(entry);
brcmf_fws_cleanup(ifp->drvr->fws, ifp->ifidx);
- brcmf_fws_unlock(ifp->drvr, flags);
+ brcmf_fws_unlock(ifp->drvr->fws);
}
static void brcmf_fws_dequeue_worker(struct work_struct *worker)
{
struct brcmf_fws_info *fws;
+ struct brcmf_pub *drvr;
struct sk_buff *skb;
- ulong flags;
int fifo;
+ u32 hslot;
+ u32 ifidx;
+ int ret;
fws = container_of(worker, struct brcmf_fws_info, fws_dequeue_work);
+ drvr = fws->drvr;
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
for (fifo = BRCMF_FWS_FIFO_BCMC; fifo >= 0 && !fws->bus_flow_blocked;
fifo--) {
+ if (!brcmf_fws_fc_active(fws)) {
+ while ((skb = brcmf_fws_deq(fws, fifo)) != NULL) {
+ hslot = brcmf_skb_htod_tag_get_field(skb,
+ HSLOT);
+ brcmf_fws_hanger_poppkt(&fws->hanger, hslot,
+ &skb, true);
+ ifidx = brcmf_skb_if_flags_get_field(skb,
+ INDEX);
+ brcmf_proto_hdrpush(drvr, ifidx, 0, skb);
+ /* Use bus module to send data frame */
+ brcmf_fws_unlock(fws);
+ ret = brcmf_bus_txdata(drvr->bus_if, skb);
+ brcmf_fws_lock(fws);
+ if (ret < 0)
+ brcmf_txfinalize(drvr, skb, false);
+ if (fws->bus_flow_blocked)
+ break;
+ }
+ continue;
+ }
while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) &&
(fifo == BRCMF_FWS_FIFO_BCMC))) {
skb = brcmf_fws_deq(fws, fifo);
@@ -1870,42 +1898,43 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker)
}
}
}
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
}
int brcmf_fws_init(struct brcmf_pub *drvr)
{
+ struct brcmf_fws_info *fws;
u32 tlv = BRCMF_FWS_FLAGS_RSSI_SIGNALS;
int rc;
- if (!drvr->fw_signals)
- return 0;
-
- spin_lock_init(&drvr->fws_spinlock);
-
drvr->fws = kzalloc(sizeof(*(drvr->fws)), GFP_KERNEL);
if (!drvr->fws) {
rc = -ENOMEM;
goto fail;
}
+ fws = drvr->fws;
+
+ spin_lock_init(&fws->spinlock);
+
/* set linkage back */
- drvr->fws->drvr = drvr;
- drvr->fws->fcmode = fcmode;
+ fws->drvr = drvr;
+ fws->fcmode = fcmode;
- drvr->fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
- if (drvr->fws->fws_wq == NULL) {
+ fws->fws_wq = create_singlethread_workqueue("brcmf_fws_wq");
+ if (fws->fws_wq == NULL) {
brcmf_err("workqueue creation failed\n");
rc = -EBADF;
goto fail;
}
- INIT_WORK(&drvr->fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
+ INIT_WORK(&fws->fws_dequeue_work, brcmf_fws_dequeue_worker);
/* enable firmware signalling if fcmode active */
- if (drvr->fws->fcmode != BRCMF_FWS_FCMODE_NONE)
+ if (fws->fcmode != BRCMF_FWS_FCMODE_NONE)
tlv |= BRCMF_FWS_FLAGS_XONXOFF_SIGNALS |
BRCMF_FWS_FLAGS_CREDIT_STATUS_SIGNALS |
- BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE;
+ BRCMF_FWS_FLAGS_HOST_PROPTXSTATUS_ACTIVE |
+ BRCMF_FWS_FLAGS_HOST_RXREORDER_ACTIVE;
rc = brcmf_fweh_register(drvr, BRCMF_E_FIFO_CREDIT_MAP,
brcmf_fws_notify_credit_map);
@@ -1921,31 +1950,33 @@ int brcmf_fws_init(struct brcmf_pub *drvr)
goto fail;
}
- /* setting the iovar may fail if feature is unsupported
+ /* Setting the iovar may fail if feature is unsupported
* so leave the rc as is so driver initialization can
- * continue.
+ * continue. Set mode back to none indicating not enabled.
*/
+ fws->fw_signals = true;
if (brcmf_fil_iovar_int_set(drvr->iflist[0], "tlv", tlv)) {
brcmf_err("failed to set bdcv2 tlv signaling\n");
- goto fail_event;
+ fws->fcmode = BRCMF_FWS_FCMODE_NONE;
+ fws->fw_signals = false;
}
- brcmf_fws_hanger_init(&drvr->fws->hanger);
- brcmf_fws_macdesc_init(&drvr->fws->desc.other, NULL, 0);
- brcmf_fws_macdesc_set_name(drvr->fws, &drvr->fws->desc.other);
- brcmu_pktq_init(&drvr->fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
+ if (brcmf_fil_iovar_int_set(drvr->iflist[0], "ampdu_hostreorder", 1))
+ brcmf_dbg(INFO, "enabling AMPDU host-reorder failed\n");
+
+ brcmf_fws_hanger_init(&fws->hanger);
+ brcmf_fws_macdesc_init(&fws->desc.other, NULL, 0);
+ brcmf_fws_macdesc_set_name(fws, &fws->desc.other);
+ brcmu_pktq_init(&fws->desc.other.psq, BRCMF_FWS_PSQ_PREC_COUNT,
BRCMF_FWS_PSQ_LEN);
/* create debugfs file for statistics */
- brcmf_debugfs_create_fws_stats(drvr, &drvr->fws->stats);
+ brcmf_debugfs_create_fws_stats(drvr, &fws->stats);
brcmf_dbg(INFO, "%s bdcv2 tlv signaling [%x]\n",
- drvr->fw_signals ? "enabled" : "disabled", tlv);
+ fws->fw_signals ? "enabled" : "disabled", tlv);
return 0;
-fail_event:
- brcmf_fweh_unregister(drvr, BRCMF_E_BCMC_CREDIT_SUPPORT);
- brcmf_fweh_unregister(drvr, BRCMF_E_FIFO_CREDIT_MAP);
fail:
brcmf_fws_deinit(drvr);
return rc;
@@ -1954,24 +1985,18 @@ fail:
void brcmf_fws_deinit(struct brcmf_pub *drvr)
{
struct brcmf_fws_info *fws = drvr->fws;
- ulong flags;
if (!fws)
return;
- /* disable firmware signalling entirely
- * to avoid using the workqueue.
- */
- drvr->fw_signals = false;
-
if (drvr->fws->fws_wq)
destroy_workqueue(drvr->fws->fws_wq);
/* cleanup */
- brcmf_fws_lock(drvr, flags);
+ brcmf_fws_lock(fws);
brcmf_fws_cleanup(fws, -1);
drvr->fws = NULL;
- brcmf_fws_unlock(drvr, flags);
+ brcmf_fws_unlock(fws);
/* free top structure */
kfree(fws);
@@ -1979,7 +2004,7 @@ void brcmf_fws_deinit(struct brcmf_pub *drvr)
bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
{
- if (!fws)
+ if (!fws->creditmap_received)
return false;
return fws->fcmode != BRCMF_FWS_FCMODE_NONE;
@@ -1987,17 +2012,16 @@ bool brcmf_fws_fc_active(struct brcmf_fws_info *fws)
void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb)
{
- ulong flags;
u32 hslot;
if (brcmf_skbcb(skb)->state == BRCMF_FWS_SKBSTATE_TIM) {
brcmu_pkt_buf_free_skb(skb);
return;
}
- brcmf_fws_lock(fws->drvr, flags);
+ brcmf_fws_lock(fws);
hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0);
- brcmf_fws_unlock(fws->drvr, flags);
+ brcmf_fws_unlock(fws);
}
void brcmf_fws_bus_blocked(struct brcmf_pub *drvr, bool flow_blocked)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
index 79555f006d5..d7a97453290 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/p2p.c
@@ -1430,7 +1430,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
IEEE80211_BAND_5GHZ);
wdev = &ifp->vif->wdev;
- cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len,
+ cfg80211_rx_mgmt(wdev, freq, 0, (u8 *)mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
kfree(mgmt_frame);
@@ -1895,7 +1895,7 @@ s32 brcmf_p2p_notify_rx_mgmt_p2p_probereq(struct brcmf_if *ifp,
IEEE80211_BAND_2GHZ :
IEEE80211_BAND_5GHZ);
- cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len,
+ cfg80211_rx_mgmt(&vif->wdev, freq, 0, mgmt_frame, mgmt_frame_len, 0,
GFP_ATOMIC);
brcmf_dbg(INFO, "mgmt_frame_len (%d) , e->datalen (%d), chanspec (%04x), freq (%d)\n",
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 09786a53995..2b5407f002e 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -208,7 +208,7 @@ extern int brcmf_sdio_regrw_helper(struct brcmf_sdio_dev *sdiodev, u32 addr,
*/
extern int
brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, struct sk_buff *pkt);
+ uint flags, struct sk_buff_head *pktq);
extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
uint flags, u8 *buf, uint nbytes);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 322cadc51de..39e01a7c855 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -614,7 +614,6 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb)
return 0;
fail:
- brcmf_txcomplete(dev, skb, false);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 277b37ae712..571f013cebb 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1093,8 +1093,11 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif)
brcmf_dbg(INFO, "Call WLC_DISASSOC to stop excess roaming\n ");
err = brcmf_fil_cmd_data_set(vif->ifp,
BRCMF_C_DISASSOC, NULL, 0);
- if (err)
+ if (err) {
brcmf_err("WLC_DISASSOC failed (%d)\n", err);
+ cfg80211_disconnected(vif->wdev.netdev, 0,
+ NULL, 0, GFP_KERNEL);
+ }
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -3152,7 +3155,9 @@ static int brcmf_cfg80211_sched_scan_stop(struct wiphy *wiphy,
}
#ifdef CONFIG_NL80211_TESTMODE
-static int brcmf_cfg80211_testmode(struct wiphy *wiphy, void *data, int len)
+static int brcmf_cfg80211_testmode(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct net_device *ndev = cfg_to_ndev(cfg);
@@ -4123,6 +4128,53 @@ static void brcmf_cfg80211_crit_proto_stop(struct wiphy *wiphy,
clear_bit(BRCMF_SCAN_STATUS_SUPPRESS, &cfg->scan_status);
}
+static int brcmf_convert_nl80211_tdls_oper(enum nl80211_tdls_operation oper)
+{
+ int ret;
+
+ switch (oper) {
+ case NL80211_TDLS_DISCOVERY_REQ:
+ ret = BRCMF_TDLS_MANUAL_EP_DISCOVERY;
+ break;
+ case NL80211_TDLS_SETUP:
+ ret = BRCMF_TDLS_MANUAL_EP_CREATE;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ ret = BRCMF_TDLS_MANUAL_EP_DELETE;
+ break;
+ default:
+ brcmf_err("unsupported operation: %d\n", oper);
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int brcmf_cfg80211_tdls_oper(struct wiphy *wiphy,
+ struct net_device *ndev, u8 *peer,
+ enum nl80211_tdls_operation oper)
+{
+ struct brcmf_if *ifp;
+ struct brcmf_tdls_iovar_le info;
+ int ret = 0;
+
+ ret = brcmf_convert_nl80211_tdls_oper(oper);
+ if (ret < 0)
+ return ret;
+
+ ifp = netdev_priv(ndev);
+ memset(&info, 0, sizeof(info));
+ info.mode = (u8)ret;
+ if (peer)
+ memcpy(info.ea, peer, ETH_ALEN);
+
+ ret = brcmf_fil_iovar_data_set(ifp, "tdls_endpoint",
+ &info, sizeof(info));
+ if (ret < 0)
+ brcmf_err("tdls_endpoint iovar failed: ret=%d\n", ret);
+
+ return ret;
+}
+
static struct cfg80211_ops wl_cfg80211_ops = {
.add_virtual_intf = brcmf_cfg80211_add_iface,
.del_virtual_intf = brcmf_cfg80211_del_iface,
@@ -4161,9 +4213,8 @@ static struct cfg80211_ops wl_cfg80211_ops = {
.stop_p2p_device = brcmf_p2p_stop_device,
.crit_proto_start = brcmf_cfg80211_crit_proto_start,
.crit_proto_stop = brcmf_cfg80211_crit_proto_stop,
-#ifdef CONFIG_NL80211_TESTMODE
- .testmode_cmd = brcmf_cfg80211_testmode
-#endif
+ .tdls_oper = brcmf_cfg80211_tdls_oper,
+ CFG80211_TESTMODE_CMD(brcmf_cfg80211_testmode)
};
static s32 brcmf_nl80211_iftype_to_mode(enum nl80211_iftype type)
@@ -4284,7 +4335,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
wiphy->n_cipher_suites = ARRAY_SIZE(__wl_cipher_suites);
wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT |
WIPHY_FLAG_OFFCHAN_TX |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_TDLS;
wiphy->mgmt_stypes = brcmf_txrx_stypes;
wiphy->max_remain_on_channel_duration = 5000;
brcmf_wiphy_pno_params(wiphy);
@@ -4905,6 +4957,12 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr,
goto cfg80211_p2p_attach_out;
}
+ err = brcmf_fil_iovar_int_set(ifp, "tdls_enable", 1);
+ if (err) {
+ brcmf_dbg(INFO, "TDLS not enabled (%d)\n", err);
+ wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_TDLS;
+ }
+
err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_VERSION,
&io_type);
if (err) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index e4fd1ee3d69..53365977bfd 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -679,27 +679,6 @@ bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode)
return mode == BCMA_CLKMODE_FAST;
}
-void ai_pci_up(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = container_of(sih, struct si_info, pub);
-
- if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], true);
-}
-
-/* Unconfigure and/or apply various WARs when going down */
-void ai_pci_down(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = container_of(sih, struct si_info, pub);
-
- if (sii->icbus->hosttype == BCMA_HOSTTYPE_PCI)
- bcma_core_pci_extend_L1timer(&sii->icbus->drv_pci[0], false);
-}
-
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 89562c1fbf4..a8a267b5b87 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -183,9 +183,6 @@ extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, enum bcma_clkmode mode);
extern bool ai_deviceremoved(struct si_pub *sih);
-extern void ai_pci_down(struct si_pub *sih);
-extern void ai_pci_up(struct si_pub *sih);
-
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index bd982856d38..fa391e4eb09 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -928,9 +928,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
} else if (txs->phyerr) {
update_rate = false;
- brcms_err(wlc->hw->d11core,
- "%s: ampdu tx phy error (0x%x)\n",
- __func__, txs->phyerr);
+ brcms_dbg_ht(wlc->hw->d11core,
+ "%s: ampdu tx phy error (0x%x)\n",
+ __func__, txs->phyerr);
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/debug.c b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
index 9761deb4620..a5d4add26f4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/debug.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/debug.c
@@ -56,7 +56,7 @@ int brcms_debugfs_attach(struct brcms_pub *drvr)
drvr->dbgfs_dir = debugfs_create_dir(
dev_name(&drvr->wlc->hw->d11core->dev), root_folder);
- return PTR_RET(drvr->dbgfs_dir);
+ return PTR_ERR_OR_ZERO(drvr->dbgfs_dir);
}
void brcms_debugfs_detach(struct brcms_pub *drvr)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 1860c572b3c..4fb9635d391 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
/*
* post receive buffers
- * return false is refill failed completely and ring is empty this will stall
- * the rx dma and user might want to call rxfill again asap. This unlikely
- * happens on memory-rich NIC, but often on memory-constrained dongle
+ * Return false if refill failed completely or dma mapping failed. The ring
+ * is empty, which will stall the rx dma and user might want to call rxfill
+ * again asap. This is unlikely to happen on a memory-rich NIC, but often on
+ * memory-constrained dongle.
*/
bool dma_rxfill(struct dma_pub *pub)
{
@@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(di->dmadev, pa))
+ return false;
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
/* get physical address of buffer start */
pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
-
+ /* if mapping failed, free skb */
+ if (dma_mapping_error(di->dmadev, pa)) {
+ brcmu_pkt_buf_free_skb(p);
+ return;
+ }
/* With a DMA segment list, Descriptor table is filled
* using the segment list instead of looping over
* buffers in multi-chain DMA. Therefore, EOF for SGLIST
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 9fd6f2fef11..4608e0eb149 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -882,8 +882,8 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
mcl = le16_to_cpu(txh->MacTxControlLow);
if (txs->phyerr)
- brcms_err(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
- txs->phyerr, txh->MainRates);
+ brcms_dbg_tx(wlc->hw->d11core, "phyerr 0x%x, rate 0x%x\n",
+ txs->phyerr, txh->MainRates);
if (txs->frameid != le16_to_cpu(txh->TxFrameID)) {
brcms_err(wlc->hw->d11core, "frameid != txh->TxFrameID\n");
@@ -4652,7 +4652,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
wlc->band->phyrev = wlc_hw->band->phyrev;
wlc->band->radioid = wlc_hw->band->radioid;
wlc->band->radiorev = wlc_hw->band->radiorev;
-
+ brcms_dbg_info(core, "wl%d: phy %u/%u radio %x/%u\n", unit,
+ wlc->band->phytype, wlc->band->phyrev,
+ wlc->band->radioid, wlc->band->radiorev);
/* default contention windows size limits */
wlc_hw->band->CWmin = APHY_CWMIN;
wlc_hw->band->CWmax = PHY_CWMAX;
@@ -4667,7 +4669,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
brcms_c_coredisable(wlc_hw);
/* Match driver "down" state */
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -5010,12 +5012,12 @@ static int brcms_b_up_prep(struct brcms_hardware *wlc_hw)
*/
if (brcms_b_radio_read_hwdisabled(wlc_hw)) {
/* put SB PCI in down state again */
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
return -ENOMEDIUM;
}
- ai_pci_up(wlc_hw->sih);
+ bcma_core_pci_up(wlc_hw->d11core->bus);
/* reset the d11 core */
brcms_b_corereset(wlc_hw, BRCMS_USE_COREFLAGS);
@@ -5212,7 +5214,7 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
/* turn off primary xtal and pll */
if (!wlc_hw->noreset) {
- ai_pci_down(wlc_hw->sih);
+ bcma_core_pci_down(wlc_hw->d11core->bus);
brcms_b_xtal(wlc_hw, OFF);
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 3d6b16ce468..b2d6d6da3da 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
- ((lna2 &
- 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+ ((lna2 & 0x3) << 4) |
+ ((lna1 & 0x3) << 2) |
+ ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1328,6 +1329,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
+static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
+ u16 tia_gain, u16 lna2_gain)
+{
+ u32 i_thresh_l, q_thresh_l;
+ u32 i_thresh_h, q_thresh_h;
+ struct lcnphy_iq_est iq_est_h, iq_est_l;
+
+ wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
+ lna2_gain, 0);
+
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+ wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
+ return false;
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
+ return false;
+
+ i_thresh_l = (iq_est_l.i_pwr << 1);
+ i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
+
+ q_thresh_l = (iq_est_l.q_pwr << 1);
+ q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
+ if ((iq_est_h.i_pwr > i_thresh_l) &&
+ (iq_est_h.i_pwr < i_thresh_h) &&
+ (iq_est_h.q_pwr > q_thresh_l) &&
+ (iq_est_h.q_pwr < q_thresh_h))
+ return true;
+
+ return false;
+}
+
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1380,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain;
- u32 received_power, rx_pwr_threshold;
+ int tia_gain, lna2_gain, biq1_gain;
+ bool set_gain;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@@ -1368,126 +1406,125 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
- if (module == 1) {
+ WARN_ON(module != 1);
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
- for (i = 0; i < 11; i++)
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- wlc_lcnphy_rx_gain_override_enable(pi, true);
-
- tia_gain = 8;
- rx_pwr_threshold = 950;
- while (tia_gain > 0) {
- tia_gain -= 1;
- wlc_lcnphy_set_rx_gain_by_distribution(pi,
- 0, 0, 2, 2,
- (u16)
- tia_gain, 1, 0);
- udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
- received_power =
- wlc_lcnphy_measure_digital_power(pi, 2000);
- if (received_power < rx_pwr_threshold)
- break;
- }
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
- wlc_lcnphy_stop_tx_tone(pi);
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ for (lna2_gain = 3; lna2_gain >= 0; lna2_gain--) {
+ for (tia_gain = 4; tia_gain >= 0; tia_gain--) {
+ for (biq1_gain = 6; biq1_gain >= 0; biq1_gain--) {
+ set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
+ (u16)
+ biq1_gain,
+ (u16)
+ tia_gain,
+ (u16)
+ lna2_gain);
+ if (!set_gain)
+ continue;
+
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
+ goto stop_tone;
+ }
+ }
+ }
- write_phy_reg(pi, 0x631, Core1TxControl_old);
+stop_tone:
+ wlc_lcnphy_stop_tx_tone(pi);
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
- wlc_lcnphy_clear_trsw_override(pi);
+ wlc_lcnphy_clear_trsw_override(pi);
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
- for (i = 0; i < 11; i++)
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
- if (tx_gain_override_old)
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- else
- wlc_lcnphy_disable_tx_gain_override(pi);
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
- wlc_lcnphy_rx_gain_override_enable(pi, false);
- }
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
@@ -1789,6 +1826,19 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ static const u8 reg038[14] = {
+ 0xd, 0xe, 0xd, 0xd, 0xd, 0xc, 0xa,
+ 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0
+ };
+
+ write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
+ write_radio_reg(pi, RADIO_2064_REG091, 0x3);
+ write_radio_reg(pi, RADIO_2064_REG038, 0x3);
+
+ write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
+ }
}
static int
@@ -1983,6 +2033,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
+ mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
+ mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
+ mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2069,13 +2129,23 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
+ mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
+ enum lcnphy_tssi_mode mode;
+ u8 tssi_sel;
+ if (pi->sh->boardflags & BFL_FEM) {
+ tssi_sel = 0x1;
+ mode = LCNPHY_TSSI_EXT;
+ } else {
+ tssi_sel = 0xe;
+ mode = LCNPHY_TSSI_POST_PA;
+ }
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &ind;
@@ -2096,7 +2166,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ wlc_lcnphy_set_tssi_mux(pi, mode);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2132,9 +2202,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@@ -2181,6 +2252,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@@ -2799,6 +2874,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
+ u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
+
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@@ -2816,6 +2893,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
+
+ mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
+ mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
+
+ wlc_lcnphy_set_bbmult(pi, 0x0);
+
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@@ -2837,6 +2920,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
+ wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3050,6 +3134,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
+ mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@@ -3851,7 +3940,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@@ -3862,6 +3950,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
+ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@@ -4283,20 +4372,20 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
u16 pa_gain;
u16 gm_gain;
- if (CHSPEC_IS5G(pi->radio_chanspec))
- pa_gain = 0x70;
- else
- pa_gain = 0x70;
-
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
+ else
+ pa_gain = 0x60;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
+ /* fixed gm_gain value for iPA */
+ gm_gain = 15;
for (j = 0; j < 128; j++) {
- gm_gain = gain_table[j].gm;
+ if (pi->sh->boardflags & BFL_FEM)
+ gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@@ -4507,7 +4596,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ if (pi->sh->boardflags & BFL_FEM)
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ else
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@@ -4518,6 +4610,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
+ write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+ write_radio_reg(pi, RADIO_2064_REG039, 0xe);
+ }
+
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4530,6 +4629,7 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
uint idx;
u8 phybw40;
struct phytbl_info tab;
+ const struct phytbl_info *tb;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
@@ -4547,22 +4647,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
- tab.tbl_width = 16;
- tab.tbl_ptr = &val;
- tab.tbl_len = 1;
-
- val = 114;
- tab.tbl_offset = 0;
- wlc_lcnphy_write_table(pi, &tab);
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+ tab.tbl_width = 16;
+ tab.tbl_ptr = &val;
+ tab.tbl_len = 1;
- val = 130;
- tab.tbl_offset = 1;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 150;
+ tab.tbl_offset = 0;
+ wlc_lcnphy_write_table(pi, &tab);
- val = 6;
- tab.tbl_offset = 8;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 220;
+ tab.tbl_offset = 1;
+ wlc_lcnphy_write_table(pi, &tab);
+ }
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@@ -4576,7 +4674,6 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- const struct phytbl_info *tb;
int l;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
@@ -4597,21 +4694,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tb[idx]);
}
- if ((pi->sh->boardflags & BFL_FEM)
- && !(pi->sh->boardflags & BFL_FEM_BT))
- wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa);
- else if (pi->sh->boardflags & BFL_FEM_BT) {
- if (pi->sh->boardrev < 0x1250)
- wlc_lcnphy_write_table(
- pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
+ if (pi->sh->boardflags & BFL_FEM) {
+ if (pi->sh->boardflags & BFL_FEM_BT) {
+ if (pi->sh->boardrev < 0x1250)
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
+ else
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250;
+ } else {
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_epa;
+ }
+ } else {
+ if (pi->sh->boardflags & BFL_FEM_BT)
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
else
- wlc_lcnphy_write_table(
- pi,
- &dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
- } else
- wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
-
+ tb = &dot11lcn_sw_ctrl_tbl_info_4313;
+ }
+ wlc_lcnphy_write_table(pi, tb);
wlc_lcnphy_load_rfpower(pi);
wlc_lcnphy_clear_papd_comptable(pi);
@@ -4955,6 +5053,8 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
+ if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
+ wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -4993,8 +5093,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if ((pi->sh->boardflags & BFL_FEM) &&
- (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c..d7fa312214f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1507,117 +1507,103 @@ static const u32 dot11lcn_gain_tbl_5G[] = {
const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[] = {
{&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
static const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev1[] = {
{&dot11lcn_gain_tbl_rev1,
- sizeof(dot11lcn_gain_tbl_rev1) / sizeof(dot11lcn_gain_tbl_rev1[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev1), 18,
0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_2G_rev2[] = {
{&dot11lcn_gain_tbl_2G,
- sizeof(dot11lcn_gain_tbl_2G) / sizeof(dot11lcn_gain_tbl_2G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_2G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_2G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_2G,
- sizeof(dot11lcn_gain_idx_tbl_2G) / sizeof(dot11lcn_gain_idx_tbl_2G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_2G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_2G,
- sizeof(dot11lcn_gain_val_tbl_2G) / sizeof(dot11lcn_gain_val_tbl_2G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_2G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_2G_rev2[] = {
{&dot11lcn_gain_tbl_extlna_2G,
- sizeof(dot11lcn_gain_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_tbl_extlna_2G[0]), 18, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_tbl_extlna_2G), 18, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_extlna_2G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_extlna_2G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_extlna_2G,
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_idx_tbl_extlna_2G[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_extlna_2G), 13, 0, 32}
,
{&dot11lcn_gain_val_tbl_extlna_2G,
- sizeof(dot11lcn_gain_val_tbl_extlna_2G) /
- sizeof(dot11lcn_gain_val_tbl_extlna_2G[0]), 17, 0, 8}
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_extlna_2G), 17, 0, 8}
};
const struct phytbl_info dot11lcnphytbl_rx_gain_info_extlna_5G_rev2[] = {
{&dot11lcn_gain_tbl_5G,
- sizeof(dot11lcn_gain_tbl_5G) / sizeof(dot11lcn_gain_tbl_5G[0]), 18, 0,
+ ARRAY_SIZE(dot11lcn_gain_tbl_5G), 18, 0,
32}
,
{&dot11lcn_aux_gain_idx_tbl_5G,
- sizeof(dot11lcn_aux_gain_idx_tbl_5G) /
- sizeof(dot11lcn_aux_gain_idx_tbl_5G[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_5G), 14, 0, 16}
,
{&dot11lcn_gain_idx_tbl_5G,
- sizeof(dot11lcn_gain_idx_tbl_5G) / sizeof(dot11lcn_gain_idx_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_5G),
13, 0, 32}
,
{&dot11lcn_gain_val_tbl_5G,
- sizeof(dot11lcn_gain_val_tbl_5G) / sizeof(dot11lcn_gain_val_tbl_5G[0]),
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_5G),
17, 0, 8}
};
const u32 dot11lcnphytbl_rx_gain_info_sz_rev0 =
- sizeof(dot11lcnphytbl_rx_gain_info_rev0) /
- sizeof(dot11lcnphytbl_rx_gain_info_rev0[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_rev0);
const u32 dot11lcnphytbl_rx_gain_info_2G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_2G_rev2[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_2G_rev2);
const u32 dot11lcnphytbl_rx_gain_info_5G_rev2_sz =
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2) /
- sizeof(dot11lcnphytbl_rx_gain_info_5G_rev2[0]);
+ ARRAY_SIZE(dot11lcnphytbl_rx_gain_info_5G_rev2);
static const u16 dot11lcn_min_sig_sq_tbl_rev0[] = {
0x014d,
@@ -2058,6 +2044,73 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
0x0005,
};
+static const u16 dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo[] = {
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+ 0x0005,
+ 0x0006,
+ 0x0009,
+ 0x000a,
+};
+
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
0x0004,
0x0004,
@@ -2771,89 +2824,79 @@ static const u32 dot11lcn_papd_compdelta_tbl_rev0[] = {
const struct phytbl_info dot11lcnphytbl_info_rev0[] = {
{&dot11lcn_min_sig_sq_tbl_rev0,
- sizeof(dot11lcn_min_sig_sq_tbl_rev0) /
- sizeof(dot11lcn_min_sig_sq_tbl_rev0[0]), 2, 0, 16}
+ ARRAY_SIZE(dot11lcn_min_sig_sq_tbl_rev0), 2, 0, 16}
,
{&dot11lcn_noise_scale_tbl_rev0,
- sizeof(dot11lcn_noise_scale_tbl_rev0) /
- sizeof(dot11lcn_noise_scale_tbl_rev0[0]), 1, 0, 16}
+ ARRAY_SIZE(dot11lcn_noise_scale_tbl_rev0), 1, 0, 16}
,
{&dot11lcn_fltr_ctrl_tbl_rev0,
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0) /
- sizeof(dot11lcn_fltr_ctrl_tbl_rev0[0]), 11, 0, 32}
+ ARRAY_SIZE(dot11lcn_fltr_ctrl_tbl_rev0), 11, 0, 32}
,
{&dot11lcn_ps_ctrl_tbl_rev0,
- sizeof(dot11lcn_ps_ctrl_tbl_rev0) /
- sizeof(dot11lcn_ps_ctrl_tbl_rev0[0]), 12, 0, 32}
+ ARRAY_SIZE(dot11lcn_ps_ctrl_tbl_rev0), 12, 0, 32}
,
{&dot11lcn_gain_idx_tbl_rev0,
- sizeof(dot11lcn_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_gain_idx_tbl_rev0[0]), 13, 0, 32}
+ ARRAY_SIZE(dot11lcn_gain_idx_tbl_rev0), 13, 0, 32}
,
{&dot11lcn_aux_gain_idx_tbl_rev0,
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0) /
- sizeof(dot11lcn_aux_gain_idx_tbl_rev0[0]), 14, 0, 16}
+ ARRAY_SIZE(dot11lcn_aux_gain_idx_tbl_rev0), 14, 0, 16}
,
{&dot11lcn_sw_ctrl_tbl_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_rev0[0]), 15, 0, 16}
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_rev0), 15, 0, 16}
,
{&dot11lcn_nf_table_rev0,
- sizeof(dot11lcn_nf_table_rev0) / sizeof(dot11lcn_nf_table_rev0[0]), 16,
+ ARRAY_SIZE(dot11lcn_nf_table_rev0), 16,
0, 8}
,
{&dot11lcn_gain_val_tbl_rev0,
- sizeof(dot11lcn_gain_val_tbl_rev0) /
- sizeof(dot11lcn_gain_val_tbl_rev0[0]), 17, 0, 8}
+ ARRAY_SIZE(dot11lcn_gain_val_tbl_rev0), 17, 0, 8}
,
{&dot11lcn_gain_tbl_rev0,
- sizeof(dot11lcn_gain_tbl_rev0) / sizeof(dot11lcn_gain_tbl_rev0[0]), 18,
+ ARRAY_SIZE(dot11lcn_gain_tbl_rev0), 18,
0, 32}
,
{&dot11lcn_spur_tbl_rev0,
- sizeof(dot11lcn_spur_tbl_rev0) / sizeof(dot11lcn_spur_tbl_rev0[0]), 20,
+ ARRAY_SIZE(dot11lcn_spur_tbl_rev0), 20,
0, 8}
,
{&dot11lcn_unsup_mcs_tbl_rev0,
- sizeof(dot11lcn_unsup_mcs_tbl_rev0) /
- sizeof(dot11lcn_unsup_mcs_tbl_rev0[0]), 23, 0, 16}
+ ARRAY_SIZE(dot11lcn_unsup_mcs_tbl_rev0), 23, 0, 16}
,
{&dot11lcn_iq_local_tbl_rev0,
- sizeof(dot11lcn_iq_local_tbl_rev0) /
- sizeof(dot11lcn_iq_local_tbl_rev0[0]), 0, 0, 16}
+ ARRAY_SIZE(dot11lcn_iq_local_tbl_rev0), 0, 0, 16}
,
{&dot11lcn_papd_compdelta_tbl_rev0,
- sizeof(dot11lcn_papd_compdelta_tbl_rev0) /
- sizeof(dot11lcn_papd_compdelta_tbl_rev0[0]), 24, 0, 32}
+ ARRAY_SIZE(dot11lcn_papd_compdelta_tbl_rev0), 24, 0, 32}
,
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313 = {
&dot11lcn_sw_ctrl_tbl_4313_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_rev0), 15, 0, 16
+};
+
+const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa = {
+ &dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo,
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_ipa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa = {
&dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_epa_rev0_combo), 15, 0, 16
};
const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250 = {
&dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0,
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0) /
- sizeof(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0[0]), 15, 0, 16
+ ARRAY_SIZE(dot11lcn_sw_ctrl_tbl_4313_bt_epa_p250_rev0), 15, 0, 16
};
const u32 dot11lcnphytbl_info_sz_rev0 =
- sizeof(dot11lcnphytbl_info_rev0) / sizeof(dot11lcnphytbl_info_rev0[0]);
+ ARRAY_SIZE(dot11lcnphytbl_info_rev0);
const struct lcnphy_tx_gain_tbl_entry
dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
@@ -2988,134 +3031,134 @@ dot11lcnphy_2GHz_extPA_gaintable_rev0[128] = {
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_2GHz_gaintable_rev0[128] = {
- {7, 0, 31, 0, 72},
- {7, 0, 31, 0, 70},
- {7, 0, 31, 0, 68},
- {7, 0, 30, 0, 67},
- {7, 0, 29, 0, 68},
- {7, 0, 28, 0, 68},
- {7, 0, 27, 0, 69},
- {7, 0, 26, 0, 70},
- {7, 0, 25, 0, 70},
- {7, 0, 24, 0, 71},
- {7, 0, 23, 0, 72},
- {7, 0, 23, 0, 70},
- {7, 0, 22, 0, 71},
- {7, 0, 21, 0, 72},
- {7, 0, 21, 0, 70},
- {7, 0, 21, 0, 68},
- {7, 0, 21, 0, 66},
- {7, 0, 21, 0, 64},
- {7, 0, 21, 0, 63},
- {7, 0, 20, 0, 64},
- {7, 0, 19, 0, 65},
- {7, 0, 19, 0, 64},
- {7, 0, 18, 0, 65},
- {7, 0, 18, 0, 64},
- {7, 0, 17, 0, 65},
- {7, 0, 17, 0, 64},
- {7, 0, 16, 0, 65},
- {7, 0, 16, 0, 64},
- {7, 0, 16, 0, 62},
- {7, 0, 16, 0, 60},
- {7, 0, 16, 0, 58},
- {7, 0, 15, 0, 61},
- {7, 0, 15, 0, 59},
- {7, 0, 14, 0, 61},
- {7, 0, 14, 0, 60},
- {7, 0, 14, 0, 58},
- {7, 0, 13, 0, 60},
- {7, 0, 13, 0, 59},
- {7, 0, 12, 0, 62},
- {7, 0, 12, 0, 60},
- {7, 0, 12, 0, 58},
- {7, 0, 11, 0, 62},
- {7, 0, 11, 0, 60},
- {7, 0, 11, 0, 59},
- {7, 0, 11, 0, 57},
- {7, 0, 10, 0, 61},
- {7, 0, 10, 0, 59},
- {7, 0, 10, 0, 57},
- {7, 0, 9, 0, 62},
- {7, 0, 9, 0, 60},
- {7, 0, 9, 0, 58},
- {7, 0, 9, 0, 57},
- {7, 0, 8, 0, 62},
- {7, 0, 8, 0, 60},
- {7, 0, 8, 0, 58},
- {7, 0, 8, 0, 57},
- {7, 0, 8, 0, 55},
- {7, 0, 7, 0, 61},
+ {15, 0, 31, 0, 72},
+ {15, 0, 31, 0, 70},
+ {15, 0, 31, 0, 68},
+ {15, 0, 30, 0, 68},
+ {15, 0, 29, 0, 69},
+ {15, 0, 28, 0, 69},
+ {15, 0, 27, 0, 70},
+ {15, 0, 26, 0, 70},
+ {15, 0, 25, 0, 71},
+ {15, 0, 24, 0, 72},
+ {15, 0, 23, 0, 73},
+ {15, 0, 23, 0, 71},
+ {15, 0, 22, 0, 72},
+ {15, 0, 21, 0, 73},
+ {15, 0, 21, 0, 71},
+ {15, 0, 21, 0, 69},
+ {15, 0, 21, 0, 67},
+ {15, 0, 21, 0, 65},
+ {15, 0, 21, 0, 63},
+ {15, 0, 20, 0, 65},
+ {15, 0, 19, 0, 66},
+ {15, 0, 19, 0, 64},
+ {15, 0, 18, 0, 66},
+ {15, 0, 18, 0, 64},
+ {15, 0, 17, 0, 66},
+ {15, 0, 17, 0, 64},
+ {15, 0, 16, 0, 66},
+ {15, 0, 16, 0, 64},
+ {15, 0, 16, 0, 62},
+ {15, 0, 16, 0, 61},
+ {15, 0, 16, 0, 59},
+ {15, 0, 15, 0, 61},
+ {15, 0, 15, 0, 59},
+ {15, 0, 14, 0, 62},
+ {15, 0, 14, 0, 60},
+ {15, 0, 14, 0, 58},
+ {15, 0, 13, 0, 61},
+ {15, 0, 13, 0, 59},
+ {15, 0, 12, 0, 62},
+ {15, 0, 12, 0, 61},
+ {15, 0, 12, 0, 59},
+ {15, 0, 11, 0, 62},
+ {15, 0, 11, 0, 61},
+ {15, 0, 11, 0, 59},
+ {15, 0, 11, 0, 57},
+ {15, 0, 10, 0, 61},
+ {15, 0, 10, 0, 59},
+ {15, 0, 10, 0, 58},
+ {15, 0, 9, 0, 62},
+ {15, 0, 9, 0, 61},
+ {15, 0, 9, 0, 59},
+ {15, 0, 9, 0, 57},
+ {15, 0, 8, 0, 62},
+ {15, 0, 8, 0, 61},
+ {15, 0, 8, 0, 59},
+ {15, 0, 8, 0, 57},
+ {15, 0, 8, 0, 56},
+ {15, 0, 8, 0, 54},
+ {15, 0, 8, 0, 53},
+ {15, 0, 8, 0, 51},
+ {15, 0, 8, 0, 50},
+ {7, 0, 7, 0, 69},
+ {7, 0, 7, 0, 67},
+ {7, 0, 7, 0, 65},
+ {7, 0, 7, 0, 64},
+ {7, 0, 7, 0, 62},
{7, 0, 7, 0, 60},
{7, 0, 7, 0, 58},
- {7, 0, 7, 0, 56},
+ {7, 0, 7, 0, 57},
{7, 0, 7, 0, 55},
{7, 0, 6, 0, 62},
- {7, 0, 6, 0, 60},
- {7, 0, 6, 0, 58},
+ {7, 0, 6, 0, 61},
+ {7, 0, 6, 0, 59},
{7, 0, 6, 0, 57},
- {7, 0, 6, 0, 55},
+ {7, 0, 6, 0, 56},
{7, 0, 6, 0, 54},
- {7, 0, 6, 0, 52},
+ {7, 0, 6, 0, 53},
{7, 0, 5, 0, 61},
- {7, 0, 5, 0, 59},
- {7, 0, 5, 0, 57},
+ {7, 0, 5, 0, 60},
+ {7, 0, 5, 0, 58},
{7, 0, 5, 0, 56},
- {7, 0, 5, 0, 54},
+ {7, 0, 5, 0, 55},
{7, 0, 5, 0, 53},
- {7, 0, 5, 0, 51},
- {7, 0, 4, 0, 62},
- {7, 0, 4, 0, 60},
- {7, 0, 4, 0, 58},
+ {7, 0, 5, 0, 52},
+ {7, 0, 5, 0, 50},
+ {7, 0, 5, 0, 49},
+ {7, 0, 5, 0, 47},
{7, 0, 4, 0, 57},
- {7, 0, 4, 0, 55},
+ {7, 0, 4, 0, 56},
{7, 0, 4, 0, 54},
- {7, 0, 4, 0, 52},
+ {7, 0, 4, 0, 53},
{7, 0, 4, 0, 51},
- {7, 0, 4, 0, 49},
+ {7, 0, 4, 0, 50},
{7, 0, 4, 0, 48},
+ {7, 0, 4, 0, 47},
{7, 0, 4, 0, 46},
- {7, 0, 3, 0, 60},
- {7, 0, 3, 0, 58},
- {7, 0, 3, 0, 57},
- {7, 0, 3, 0, 55},
- {7, 0, 3, 0, 54},
- {7, 0, 3, 0, 52},
+ {7, 0, 4, 0, 44},
+ {7, 0, 4, 0, 43},
+ {7, 0, 4, 0, 42},
+ {7, 0, 4, 0, 41},
+ {7, 0, 4, 0, 40},
{7, 0, 3, 0, 51},
- {7, 0, 3, 0, 49},
+ {7, 0, 3, 0, 50},
{7, 0, 3, 0, 48},
+ {7, 0, 3, 0, 47},
{7, 0, 3, 0, 46},
- {7, 0, 3, 0, 45},
{7, 0, 3, 0, 44},
{7, 0, 3, 0, 43},
+ {7, 0, 3, 0, 42},
{7, 0, 3, 0, 41},
- {7, 0, 2, 0, 61},
- {7, 0, 2, 0, 59},
- {7, 0, 2, 0, 57},
- {7, 0, 2, 0, 56},
- {7, 0, 2, 0, 54},
- {7, 0, 2, 0, 53},
- {7, 0, 2, 0, 51},
- {7, 0, 2, 0, 50},
- {7, 0, 2, 0, 48},
- {7, 0, 2, 0, 47},
- {7, 0, 2, 0, 46},
- {7, 0, 2, 0, 44},
- {7, 0, 2, 0, 43},
- {7, 0, 2, 0, 42},
- {7, 0, 2, 0, 41},
- {7, 0, 2, 0, 39},
- {7, 0, 2, 0, 38},
- {7, 0, 2, 0, 37},
- {7, 0, 2, 0, 36},
- {7, 0, 2, 0, 35},
- {7, 0, 2, 0, 34},
- {7, 0, 2, 0, 33},
- {7, 0, 2, 0, 32},
- {7, 0, 1, 0, 63},
- {7, 0, 1, 0, 61},
- {7, 0, 1, 0, 59},
- {7, 0, 1, 0, 57},
+ {3, 0, 3, 0, 56},
+ {3, 0, 3, 0, 54},
+ {3, 0, 3, 0, 53},
+ {3, 0, 3, 0, 51},
+ {3, 0, 3, 0, 50},
+ {3, 0, 3, 0, 48},
+ {3, 0, 3, 0, 47},
+ {3, 0, 3, 0, 46},
+ {3, 0, 3, 0, 44},
+ {3, 0, 3, 0, 43},
+ {3, 0, 3, 0, 42},
+ {3, 0, 3, 0, 41},
+ {3, 0, 3, 0, 39},
+ {3, 0, 3, 0, 38},
+ {3, 0, 3, 0, 37},
+ {3, 0, 3, 0, 36},
+ {3, 0, 3, 0, 35},
+ {3, 0, 3, 0, 34},
};
const struct lcnphy_tx_gain_tbl_entry dot11lcnphy_5GHz_gaintable_rev0[128] = {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
index 5f75e16bf5a..489422a3608 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.h
@@ -20,6 +20,7 @@
extern const struct phytbl_info dot11lcnphytbl_rx_gain_info_rev0[];
extern const u32 dot11lcnphytbl_rx_gain_info_sz_rev0;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313;
+extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_ipa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_epa_combo;
extern const struct phytbl_info dot11lcn_sw_ctrl_tbl_info_4313_bt_epa;
diff --git a/drivers/net/wireless/cw1200/bh.c b/drivers/net/wireless/cw1200/bh.c
index c1ec2a4dd8c..92d299aa257 100644
--- a/drivers/net/wireless/cw1200/bh.c
+++ b/drivers/net/wireless/cw1200/bh.c
@@ -465,8 +465,8 @@ static int cw1200_bh(void *arg)
(rx || tx || term || suspend || priv->bh_error);
}), status);
- pr_debug("[BH] - rx: %d, tx: %d, term: %d, suspend: %d, status: %ld\n",
- rx, tx, term, suspend, status);
+ pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
+ rx, tx, term, suspend, priv->bh_error, status);
/* Did an error occur? */
if ((status < 0 && status != -ERESTARTSYS) ||
diff --git a/drivers/net/wireless/cw1200/main.c b/drivers/net/wireless/cw1200/main.c
index 3724e739cbf..090f01577dd 100644
--- a/drivers/net/wireless/cw1200/main.c
+++ b/drivers/net/wireless/cw1200/main.c
@@ -507,7 +507,7 @@ u32 cw1200_dpll_from_clk(u16 clk_khz)
case 0xCB20: /* 52000 KHz */
return 0x07627091;
default:
- pr_err("Unknown Refclk freq (0x%04x), using 2600KHz\n",
+ pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n",
clk_khz);
return 0x0EC4F121;
}
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 7365674366f..010b252be58 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv)
if (!priv->join_status)
goto done;
- if (priv->join_status > CW1200_JOIN_STATUS_IBSS) {
- wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n",
- priv->join_status);
- BUG_ON(1);
- }
+ if (priv->join_status == CW1200_JOIN_STATUS_AP)
+ goto done;
cancel_work_sync(&priv->update_filtering_work);
cancel_work_sync(&priv->set_beacon_wakeup_period_work);
diff --git a/drivers/net/wireless/cw1200/txrx.c b/drivers/net/wireless/cw1200/txrx.c
index 5862c373d71..e824d4d4a18 100644
--- a/drivers/net/wireless/cw1200/txrx.c
+++ b/drivers/net/wireless/cw1200/txrx.c
@@ -1165,7 +1165,7 @@ void cw1200_rx_cb(struct cw1200_common *priv,
if (cw1200_handle_action_rx(priv, skb))
return;
} else if (ieee80211_is_beacon(frame->frame_control) &&
- !arg->status &&
+ !arg->status && priv->vif &&
!memcmp(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid,
ETH_ALEN)) {
const u8 *tim_ie;
diff --git a/drivers/net/wireless/cw1200/wsm.h b/drivers/net/wireless/cw1200/wsm.h
index 7afc613c370..48086e84951 100644
--- a/drivers/net/wireless/cw1200/wsm.h
+++ b/drivers/net/wireless/cw1200/wsm.h
@@ -832,7 +832,7 @@ struct wsm_tx {
/* the MSDU shall be terminated. Overrides the global */
/* dot11MaxTransmitMsduLifeTime setting [optional] */
/* Device will set the default value if this is 0. */
- u32 expire_time;
+ __le32 expire_time;
/* WSM_HT_TX_... */
__le32 ht_tx_parameters;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6307a4e36c8..c275dc1623f 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -1425,7 +1425,7 @@ static int prism2_hw_init2(struct net_device *dev, int initial)
}
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
- memcpy(iface->dev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(iface->dev, dev);
}
} else if (local->fw_ap)
prism2_check_sta_fw_version(local);
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index ac074731335..e5090309824 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -523,9 +523,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev,
data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1);
- memcpy(extra, &addr, sizeof(struct sockaddr) * data->length);
+ memcpy(extra, addr, sizeof(struct sockaddr) * data->length);
data->flags = 1; /* has quality information */
- memcpy(extra + sizeof(struct sockaddr) * data->length, &qual,
+ memcpy(extra + sizeof(struct sockaddr) * data->length, qual,
sizeof(struct iw_quality) * data->length);
kfree(addr);
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 15f0fad39ad..a1257c92afc 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -66,7 +66,7 @@ struct net_device * hostap_add_interface(struct local_info *local,
list_add(&iface->list, &local->hostap_interfaces);
mdev = local->dev;
- memcpy(dev->dev_addr, mdev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(dev, mdev);
dev->base_addr = mdev->base_addr;
dev->irq = mdev->irq;
dev->mem_start = mdev->mem_start;
@@ -667,7 +667,7 @@ static int prism2_open(struct net_device *dev)
if (local->no_pri) {
printk(KERN_DEBUG "%s: could not set interface UP - no PRI "
"f/w\n", dev->name);
- return 1;
+ return -ENODEV;
}
if ((local->func->card_present && !local->func->card_present(local)) ||
@@ -682,7 +682,7 @@ static int prism2_open(struct net_device *dev)
printk(KERN_WARNING "%s: could not enable MAC port\n",
dev->name);
prism2_close(dev);
- return 1;
+ return -ENODEV;
}
if (!local->dev_enabled)
prism2_callback(local, PRISM2_CALLBACK_ENABLE);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index fe31590a51b..aea667b430c 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -887,6 +887,7 @@ il3945_remove_debugfs(void *il, void *il_sta)
*/
static void
il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *il_sta)
{
}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
index c092033945c..f09e257759d 100644
--- a/drivers/net/wireless/iwlegacy/3945.c
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -475,6 +475,8 @@ il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
}
}
+#define SMALL_PACKET_SIZE 256
+
static void
il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
struct ieee80211_rx_status *stats)
@@ -483,14 +485,13 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
+ u32 len = le16_to_cpu(rx_hdr->len);
struct sk_buff *skb;
__le16 fc = hdr->frame_control;
+ u32 fraglen = PAGE_SIZE << il->hw_params.rx_page_order;
/* We received data from the HW, so stop the watchdog */
- if (unlikely
- (len + IL39_RX_FRAME_SIZE >
- PAGE_SIZE << il->hw_params.rx_page_order)) {
+ if (unlikely(len + IL39_RX_FRAME_SIZE > fraglen)) {
D_DROP("Corruption detected!\n");
return;
}
@@ -506,26 +507,32 @@ il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
D_INFO("Woke queues - frame received on passive channel\n");
}
- skb = dev_alloc_skb(128);
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
return;
}
if (!il3945_mod_params.sw_crypto)
- il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)pkt,
le32_to_cpu(rx_end->status), stats);
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len,
- len);
-
+ /* If frame is small enough to fit into skb->head, copy it
+ * and do not consume a full page
+ */
+ if (len <= SMALL_PACKET_SIZE) {
+ memcpy(skb_put(skb, len), rx_hdr->payload, len);
+ } else {
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len,
+ fraglen);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+ }
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(il->hw, skb);
- il->alloc_rxb_page--;
- rxb->page = NULL;
}
#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b9b2bb51e60..5ab50a5b48b 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -574,9 +574,11 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
return decrypt_out;
}
+#define SMALL_PACKET_SIZE 256
+
static void
il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
- u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ u32 len, u32 ampdu_status, struct il_rx_buf *rxb,
struct ieee80211_rx_status *stats)
{
struct sk_buff *skb;
@@ -598,21 +600,25 @@ il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
il_set_decrypted_flag(il, hdr, ampdu_status, stats))
return;
- skb = dev_alloc_skb(128);
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE);
if (!skb) {
IL_ERR("dev_alloc_skb failed\n");
return;
}
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len,
- len);
+ if (len <= SMALL_PACKET_SIZE) {
+ memcpy(skb_put(skb, len), hdr, len);
+ } else {
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb),
+ len, PAGE_SIZE << il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+ }
il_update_stats(il, false, fc, len);
memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
ieee80211_rx(il->hw, skb);
- il->alloc_rxb_page--;
- rxb->page = NULL;
}
/* Called for N_RX (legacy ABG frames), or
@@ -4460,13 +4466,13 @@ il4965_irq_tasklet(struct il_priv *il)
* is killed. Hence update the killswitch state here. The
* rfkill handler will care about restarting if needed.
*/
- if (!test_bit(S_ALIVE, &il->status)) {
- if (hw_rf_kill)
- set_bit(S_RFKILL, &il->status);
- else
- clear_bit(S_RFKILL, &il->status);
- wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ if (hw_rf_kill) {
+ set_bit(S_RFKILL, &il->status);
+ } else {
+ clear_bit(S_RFKILL, &il->status);
+ il_force_reset(il, true);
}
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
handled |= CSR_INT_BIT_RF_KILL;
}
@@ -5334,6 +5340,9 @@ il4965_alive_start(struct il_priv *il)
il->active_rate = RATES_MASK;
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
if (il_is_associated(il)) {
struct il_rxon_cmd *active_rxon =
(struct il_rxon_cmd *)&il->active;
@@ -5364,9 +5373,6 @@ il4965_alive_start(struct il_priv *il)
D_INFO("ALIVE processing complete.\n");
wake_up(&il->wait_command_queue);
- il_power_update_mode(il, true);
- D_INFO("Updated power mode\n");
-
return;
restart:
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index ed3c42a63a4..3ccbaf791b4 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2803,6 +2803,7 @@ il4965_rs_remove_debugfs(void *il, void *il_sta)
*/
static void
il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *il_sta)
{
}
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 3195aad440d..b03e22ef546 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external)
return 0;
}
+EXPORT_SYMBOL(il_force_reset);
int
il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index cbaa5c2c410..3eb2102ce23 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -22,6 +22,8 @@ config IWLWIFI
Intel Wireless WiFi Link 6150BGN 2 Adapter
Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
Intel 2000 Series Wi-Fi Adapters
+ Intel 7260 Wi-Fi Adapter
+ Intel 3160 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.
@@ -46,17 +48,16 @@ config IWLDVM
depends on IWLWIFI
default IWLWIFI
help
- This is the driver supporting the DVM firmware which is
- currently the only firmware available for existing devices.
+ This is the driver that supports the DVM firmware which is
+ used by most existing devices (with the exception of 7260
+ and 3160).
config IWLMVM
tristate "Intel Wireless WiFi MVM Firmware support"
depends on IWLWIFI
help
- This is the driver supporting the MVM firmware which is
- currently only available for 7000 series devices.
-
- Say yes if you have such a device.
+ This is the driver that supports the MVM firmware which is
+ currently only available for 7260 and 3160 devices.
# don't call it _MODULE -- will confuse Kconfig/fixdep/...
config IWLWIFI_OPMODE_MODULAR
@@ -127,20 +128,3 @@ config IWLWIFI_DEVICE_TRACING
If unsure, say Y so we can help you better when problems
occur.
endmenu
-
-config IWLWIFI_P2P
- def_bool y
- bool "iwlwifi experimental P2P support"
- depends on IWLWIFI
- help
- This option enables experimental P2P support for some devices
- based on microcode support. Since P2P support is still under
- development, this option may even enable it for some devices
- now that turn out to not support it in the future due to
- microcode restrictions.
-
- To determine if your microcode supports the experimental P2P
- offered by this option, check if the driver advertises AP
- support when it is loaded.
-
- Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/dvm/agn.h b/drivers/net/wireless/iwlwifi/dvm/agn.h
index 18355110def..f2a86ffc3b4 100644
--- a/drivers/net/wireless/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/iwlwifi/dvm/agn.h
@@ -106,7 +106,6 @@ extern const struct iwl_dvm_cfg iwl_dvm_6030_cfg;
#define STATUS_CHANNEL_SWITCH_PENDING 11
#define STATUS_SCAN_COMPLETE 12
#define STATUS_POWER_PMI 13
-#define STATUS_SCAN_ROC_EXPIRED 14
struct iwl_ucode_capabilities;
@@ -250,7 +249,6 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
/* scan */
void iwlagn_post_scan(struct iwl_priv *priv);
-void iwlagn_disable_roc(struct iwl_priv *priv);
int iwl_force_rf_reset(struct iwl_priv *priv, bool external);
void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
@@ -265,10 +263,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
enum iwl_scan_type scan_type,
enum ieee80211_band band);
-void iwl_scan_roc_expired(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb(struct iwl_priv *priv);
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv);
-
/* For faster active scanning, scan will move to the next channel if fewer than
* PLCP_QUIET_THRESH packets are heard on this channel within
* ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
index d5329489245..d94f8ab1500 100644
--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
@@ -69,19 +69,7 @@
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -89,7 +77,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -98,8 +85,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
diff --git a/drivers/net/wireless/iwlwifi/dvm/dev.h b/drivers/net/wireless/iwlwifi/dvm/dev.h
index 60a4e0d1571..a79fdd137f9 100644
--- a/drivers/net/wireless/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/iwlwifi/dvm/dev.h
@@ -540,7 +540,6 @@ struct iwl_rxon_context {
enum iwl_scan_type {
IWL_SCAN_NORMAL,
IWL_SCAN_RADIO_RESET,
- IWL_SCAN_ROC,
};
/**
@@ -825,12 +824,6 @@ struct iwl_priv {
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* remain-on-channel offload support */
- struct ieee80211_channel *hw_roc_channel;
- struct delayed_work hw_roc_disable_work;
- int hw_roc_duration;
- bool hw_roc_setup, hw_roc_start_notified;
-
/* bt coex */
u8 bt_enable_flag;
u8 bt_status;
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 822f1a00efb..cae4d3182e3 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -76,29 +76,6 @@ static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
},
};
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
static const struct ieee80211_iface_combination
iwlagn_iface_combinations_dualmode[] = {
{ .num_different_channels = 1,
@@ -114,21 +91,6 @@ iwlagn_iface_combinations_dualmode[] = {
},
};
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_p2p_sta_go_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_p2p_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
- },
-};
-
/*
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
@@ -186,19 +148,13 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_p2p);
- } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->iface_combinations =
iwlagn_iface_combinations_dualmode;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
}
- hw->wiphy->max_remain_on_channel_duration = 500;
-
hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
WIPHY_FLAG_DISABLE_BEACON_HINTS |
WIPHY_FLAG_IBSS_RSN;
@@ -1068,7 +1024,10 @@ void iwl_chswitch_done(struct iwl_priv *priv, bool is_success)
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ if (!test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
+ return;
+
+ if (ctx->vif)
ieee80211_chswitch_done(ctx->vif, is_success);
}
@@ -1156,126 +1115,6 @@ done:
IWL_DEBUG_MAC80211(priv, "leave\n");
}
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_channel *channel,
- int duration,
- enum ieee80211_roc_type type)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- int err = 0;
-
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- /* mac80211 should not scan while ROC or ROC while scanning */
- if (WARN_ON_ONCE(priv->scan_type != IWL_SCAN_RADIO_RESET)) {
- err = -EBUSY;
- goto out;
- }
-
- iwl_scan_cancel_timeout(priv, 100);
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- err = -EBUSY;
- goto out;
- }
- }
-
- priv->hw_roc_channel = channel;
- /* convert from ms to TU */
- priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
- priv->hw_roc_start_notified = false;
- cancel_delayed_work(&priv->hw_roc_disable_work);
-
- if (!ctx->is_active) {
- static const struct iwl_qos_info default_qos_data = {
- .def_qos_parm = {
- .ac[0] = {
- .cw_min = cpu_to_le16(3),
- .cw_max = cpu_to_le16(7),
- .aifsn = 2,
- .edca_txop = cpu_to_le16(1504),
- },
- .ac[1] = {
- .cw_min = cpu_to_le16(7),
- .cw_max = cpu_to_le16(15),
- .aifsn = 2,
- .edca_txop = cpu_to_le16(3008),
- },
- .ac[2] = {
- .cw_min = cpu_to_le16(15),
- .cw_max = cpu_to_le16(1023),
- .aifsn = 3,
- },
- .ac[3] = {
- .cw_min = cpu_to_le16(15),
- .cw_max = cpu_to_le16(1023),
- .aifsn = 7,
- },
- },
- };
-
- ctx->is_active = true;
- ctx->qos_data = default_qos_data;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- memcpy(ctx->staging.node_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- memcpy(ctx->staging.bssid_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- err = iwlagn_commit_rxon(priv, ctx);
- if (err)
- goto out;
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
-
- err = iwlagn_commit_rxon(priv, ctx);
- if (err) {
- iwlagn_disable_roc(priv);
- goto out;
- }
- priv->hw_roc_setup = true;
- }
-
- err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
- if (err)
- iwlagn_disable_roc(priv);
-
- out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
-
- if (!(priv->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->mutex);
- iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event)
@@ -1431,12 +1270,8 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
viftype, vif->addr);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
mutex_lock(&priv->mutex);
- iwlagn_disable_roc(priv);
-
if (!iwl_is_ready_rf(priv)) {
IWL_WARN(priv, "Try to add interface when device not ready\n");
err = -EINVAL;
@@ -1763,8 +1598,6 @@ struct ieee80211_ops iwlagn_hw_ops = {
.channel_switch = iwlagn_mac_channel_switch,
.flush = iwlagn_mac_flush,
.tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .remain_on_channel = iwlagn_mac_remain_on_channel,
- .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
.rssi_callback = iwlagn_mac_rssi_callback,
.set_tim = iwlagn_mac_set_tim,
};
diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
index 3952ddf2ddb..7aad766865c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/iwlwifi/dvm/main.c
@@ -587,11 +587,6 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
priv->contexts[IWL_RXON_CTX_PAN].interface_modes =
BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP);
- if (ucode_flags & IWL_UCODE_TLV_FLAGS_P2P)
- priv->contexts[IWL_RXON_CTX_PAN].interface_modes |=
- BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
-
priv->contexts[IWL_RXON_CTX_PAN].ap_devtype = RXON_DEV_TYPE_CP;
priv->contexts[IWL_RXON_CTX_PAN].station_devtype = RXON_DEV_TYPE_2STA;
priv->contexts[IWL_RXON_CTX_PAN].unused_devtype = RXON_DEV_TYPE_P2P;
@@ -758,7 +753,7 @@ int iwl_alive_start(struct iwl_priv *priv)
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- } else {
+ } else if (priv->lib->bt_params) {
/*
* default is 2-wire BT coexexistence support
*/
@@ -854,14 +849,6 @@ void iwl_down(struct iwl_priv *priv)
iwl_scan_cancel_timeout(priv, 200);
- /*
- * If active, scanning won't cancel it, so say it expired.
- * No race since we hold the mutex here and a new one
- * can't come in at this time.
- */
- if (priv->ucode_loaded && priv->cur_ucode != IWL_UCODE_INIT)
- ieee80211_remain_on_channel_expired(priv->hw);
-
exit_pending =
test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
@@ -1002,41 +989,6 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-
-
-
-void iwlagn_disable_roc(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->hw_roc_setup)
- return;
-
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- priv->hw_roc_channel = NULL;
-
- memset(ctx->staging.node_addr, 0, ETH_ALEN);
-
- iwlagn_commit_rxon(priv, ctx);
-
- ctx->is_active = false;
- priv->hw_roc_setup = false;
-}
-
-static void iwlagn_disable_roc_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- hw_roc_disable_work.work);
-
- mutex_lock(&priv->mutex);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->mutex);
-}
-
/*****************************************************************************
*
* driver setup and teardown
@@ -1053,8 +1005,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
INIT_WORK(&priv->tx_flush, iwl_bg_tx_flush);
INIT_WORK(&priv->bt_full_concurrency, iwl_bg_bt_full_concurrency);
INIT_WORK(&priv->bt_runtime_config, iwl_bg_bt_runtime_config);
- INIT_DELAYED_WORK(&priv->hw_roc_disable_work,
- iwlagn_disable_roc_work);
iwl_setup_scan_deferred_work(priv);
@@ -1082,7 +1032,6 @@ void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_work_sync(&priv->bt_full_concurrency);
cancel_work_sync(&priv->bt_runtime_config);
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
del_timer_sync(&priv->statistics_periodic);
del_timer_sync(&priv->ucode_trace);
@@ -1169,12 +1118,6 @@ static void iwl_option_config(struct iwl_priv *priv)
#else
IWL_INFO(priv, "CONFIG_IWLWIFI_DEVICE_TRACING disabled\n");
#endif
-
-#ifdef CONFIG_IWLWIFI_P2P
- IWL_INFO(priv, "CONFIG_IWLWIFI_P2P enabled\n");
-#else
- IWL_INFO(priv, "CONFIG_IWLWIFI_P2P disabled\n");
-#endif
}
static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
@@ -1315,10 +1258,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
ucode_flags = fw->ucode_capa.flags;
-#ifndef CONFIG_IWLWIFI_P2P
- ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
-#endif
-
if (ucode_flags & IWL_UCODE_TLV_FLAGS_PAN) {
priv->sta_key_max_num = STA_KEY_MAX_NUM_PAN;
trans_cfg.cmd_queue = IWL_IPAN_CMD_QUEUE_NUM;
@@ -1413,7 +1352,6 @@ static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans,
* if not PAN, then don't support P2P -- might be a uCode
* packaging bug or due to the eeprom check above
*/
- ucode_flags &= ~IWL_UCODE_TLV_FLAGS_P2P;
priv->sta_key_max_num = STA_KEY_MAX_NUM;
trans_cfg.cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 1b693944123..b647e506564 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2826,9 +2826,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
lq_sta->flush_timer = 0;
lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
sta_id);
@@ -3319,7 +3316,8 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
* station is added we ignore it.
*/
static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
{
}
static struct rate_control_ops rs_ops = {
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index cd1ad001918..d7ce2f12a90 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -564,11 +564,7 @@ int iwlagn_set_pan_params(struct iwl_priv *priv)
cmd.slots[0].type = 0; /* BSS */
cmd.slots[1].type = 1; /* PAN */
- if (priv->hw_roc_setup) {
- /* both contexts must be used for this to happen */
- slot1 = IWL_MIN_SLOT_TIME;
- slot0 = 3000;
- } else if (ctx_bss->vif && ctx_pan->vif) {
+ if (ctx_bss->vif && ctx_pan->vif) {
int bcnint = ctx_pan->beacon_int;
int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
diff --git a/drivers/net/wireless/iwlwifi/dvm/scan.c b/drivers/net/wireless/iwlwifi/dvm/scan.c
index 8c686a5b90a..35e0ee8b4e5 100644
--- a/drivers/net/wireless/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/dvm/scan.c
@@ -100,9 +100,6 @@ static void iwl_complete_scan(struct iwl_priv *priv, bool aborted)
ieee80211_scan_completed(priv->hw, aborted);
}
- if (priv->scan_type == IWL_SCAN_ROC)
- iwl_scan_roc_expired(priv);
-
priv->scan_type = IWL_SCAN_NORMAL;
priv->scan_vif = NULL;
priv->scan_request = NULL;
@@ -130,9 +127,6 @@ static void iwl_process_scan_complete(struct iwl_priv *priv)
goto out_settings;
}
- if (priv->scan_type == IWL_SCAN_ROC)
- iwl_scan_roc_expired(priv);
-
if (priv->scan_type != IWL_SCAN_NORMAL && !aborted) {
int err;
@@ -284,12 +278,6 @@ static int iwl_rx_scan_start_notif(struct iwl_priv *priv,
le32_to_cpu(notif->tsf_low),
notif->status, notif->beacon_timer);
- if (priv->scan_type == IWL_SCAN_ROC &&
- !priv->hw_roc_start_notified) {
- ieee80211_ready_on_channel(priv->hw);
- priv->hw_roc_start_notified = true;
- }
-
return 0;
}
@@ -697,8 +685,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
- if (priv->scan_type != IWL_SCAN_ROC &&
- iwl_is_any_associated(priv)) {
+ if (iwl_is_any_associated(priv)) {
u16 interval = 0;
u32 extra;
u32 suspend_time = 100;
@@ -706,9 +693,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
switch (priv->scan_type) {
- case IWL_SCAN_ROC:
- WARN_ON(1);
- break;
case IWL_SCAN_RADIO_RESET:
interval = 0;
break;
@@ -728,11 +712,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan->suspend_time = cpu_to_le32(scan_suspend_time);
IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
scan_suspend_time, interval);
- } else if (priv->scan_type == IWL_SCAN_ROC) {
- scan->suspend_time = 0;
- scan->max_out_time = 0;
- scan->quiet_time = 0;
- scan->quiet_plcp_th = 0;
}
switch (priv->scan_type) {
@@ -774,9 +753,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else
IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
break;
- case IWL_SCAN_ROC:
- IWL_DEBUG_SCAN(priv, "Start ROC scan.\n");
- break;
}
scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
@@ -898,7 +874,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_cmd_size - sizeof(*scan));
break;
case IWL_SCAN_RADIO_RESET:
- case IWL_SCAN_ROC:
/* use bcast addr, will not be transmitted but must be valid */
cmd_len = iwl_fill_probe_req(
(struct ieee80211_mgmt *)scan->data,
@@ -926,46 +901,6 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
is_active, n_probes,
(void *)&scan->data[cmd_len]);
break;
- case IWL_SCAN_ROC: {
- struct iwl_scan_channel *scan_ch;
- int n_chan, i;
- u16 dwell;
-
- dwell = iwl_limit_dwell(priv, priv->hw_roc_duration);
- n_chan = DIV_ROUND_UP(priv->hw_roc_duration, dwell);
-
- scan->channel_count = n_chan;
-
- scan_ch = (void *)&scan->data[cmd_len];
-
- for (i = 0; i < n_chan; i++) {
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- scan_ch->channel =
- cpu_to_le16(priv->hw_roc_channel->hw_value);
-
- if (i == n_chan - 1)
- dwell = priv->hw_roc_duration - i * dwell;
-
- scan_ch->active_dwell =
- scan_ch->passive_dwell = cpu_to_le16(dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (priv->hw_roc_channel->band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- scan_ch++;
- }
- }
-
- break;
}
if (scan->channel_count == 0) {
@@ -1035,7 +970,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Starting %sscan...\n",
scan_type == IWL_SCAN_NORMAL ? "" :
- scan_type == IWL_SCAN_ROC ? "remain-on-channel " :
"internal short ");
set_bit(STATUS_SCANNING, &priv->status);
@@ -1149,40 +1083,3 @@ void iwl_cancel_scan_deferred_work(struct iwl_priv *priv)
mutex_unlock(&priv->mutex);
}
}
-
-void iwl_scan_roc_expired(struct iwl_priv *priv)
-{
- /*
- * The status bit should be set here, to prevent a race
- * where the atomic_read returns 1, but before the execution continues
- * iwl_scan_offchannel_skb_status() checks if the status bit is set
- */
- set_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
-
- if (atomic_read(&priv->num_aux_in_flight) == 0) {
- ieee80211_remain_on_channel_expired(priv->hw);
- priv->hw_roc_channel = NULL;
- schedule_delayed_work(&priv->hw_roc_disable_work,
- 10 * HZ);
-
- clear_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status);
- } else {
- IWL_DEBUG_SCAN(priv, "ROC done with %d frames in aux\n",
- atomic_read(&priv->num_aux_in_flight));
- }
-}
-
-void iwl_scan_offchannel_skb(struct iwl_priv *priv)
-{
- WARN_ON(!priv->hw_roc_start_notified);
- atomic_inc(&priv->num_aux_in_flight);
-}
-
-void iwl_scan_offchannel_skb_status(struct iwl_priv *priv)
-{
- if (atomic_dec_return(&priv->num_aux_in_flight) == 0 &&
- test_bit(STATUS_SCAN_ROC_EXPIRED, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "0 aux frames. Calling ROC expired\n");
- iwl_scan_roc_expired(priv);
- }
-}
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index 5ee983faa67..da442b81370 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -87,7 +87,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
priv->lib->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
- skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
tx_flags |= TX_CMD_FLG_IGNORE_BT;
@@ -478,9 +478,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv,
if (sta_priv && sta_priv->client && !is_agg)
atomic_inc(&sta_priv->pending_frames);
- if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)
- iwl_scan_offchannel_skb(priv);
-
return 0;
drop_unlock_sta:
@@ -1158,7 +1155,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
struct sk_buff *skb;
struct iwl_rxon_context *ctx;
bool is_agg = (txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
- bool is_offchannel_skb;
tid = (tx_resp->ra_tid & IWLAGN_TX_RES_TID_MSK) >>
IWLAGN_TX_RES_TID_POS;
@@ -1178,8 +1174,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
__skb_queue_head_init(&skbs);
- is_offchannel_skb = false;
-
if (tx_resp->frame_count == 1) {
u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
next_reclaimed = IEEE80211_SEQ_TO_SN(next_reclaimed + 0x10);
@@ -1256,8 +1250,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg)
iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1);
- is_offchannel_skb =
- (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN);
freed++;
}
@@ -1271,14 +1263,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
if (!is_agg && freed != 1)
IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed);
- /*
- * An offchannel frame can be send only on the AUX queue, where
- * there is no aggregation (and reordering) so it only is single
- * skb is expected to be processed.
- */
- if (is_offchannel_skb && freed != 1)
- IWL_ERR(priv, "OFFCHANNEL SKB freed %d\n", freed);
-
IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x)\n", txq_id,
iwl_get_tx_fail_reason(status), status);
@@ -1298,9 +1282,6 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
ieee80211_tx_status_ni(priv->hw, skb);
}
- if (is_offchannel_skb)
- iwl_scan_offchannel_skb_status(priv);
-
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 22b7fa5b971..76e14c046d9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -99,6 +99,7 @@ static const struct iwl_base_params iwl7000_base_params = {
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
};
static const struct iwl_ht_params iwl7000_ht_params = {
@@ -126,6 +127,16 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
+ .name = "Intel(R) Dual Band Wireless AC 7260",
+ .fw_name_pre = IWL7260_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7260_NVM_VERSION,
+ .nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .high_temp = true,
+};
+
const struct iwl_cfg iwl7260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 7260",
.fw_name_pre = IWL7260_FW_PRE,
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 83b9ff6ff3a..e4d370bff30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -152,6 +152,7 @@ struct iwl_base_params {
unsigned int wd_timeout;
u32 max_event_log_size;
const bool shadow_reg_enable;
+ const bool pcie_l1_allowed;
};
/*
@@ -205,6 +206,7 @@ struct iwl_eeprom_params {
* @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
+ * @high_temp: Is this NIC is designated to be in high temperature.
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -233,6 +235,7 @@ struct iwl_cfg {
enum iwl_led_mode led_mode;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
+ bool high_temp;
};
/*
@@ -283,6 +286,7 @@ extern const struct iwl_cfg iwl135_bgn_cfg;
#endif /* CONFIG_IWLDVM */
#if IS_ENABLED(CONFIG_IWLMVM)
extern const struct iwl_cfg iwl7260_2ac_cfg;
+extern const struct iwl_cfg iwl7260_2ac_cfg_high_temp;
extern const struct iwl_cfg iwl7260_2n_cfg;
extern const struct iwl_cfg iwl7260_n_cfg;
extern const struct iwl_cfg iwl3160_2ac_cfg;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 7edb8519c8a..b2bb32a781d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -145,6 +145,7 @@ do { \
#define IWL_DL_RX 0x01000000
#define IWL_DL_ISR 0x02000000
#define IWL_DL_HT 0x04000000
+#define IWL_DL_EXTERNAL 0x08000000
/* 0xF0000000 - 0x10000000 */
#define IWL_DL_11H 0x10000000
#define IWL_DL_STATS 0x20000000
@@ -153,6 +154,7 @@ do { \
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
+#define IWL_DEBUG_EXTERNAL(p, f, a...) IWL_DEBUG(p, IWL_DL_EXTERNAL, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 4491c1c72cc..684c416d349 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -33,10 +33,11 @@
static inline bool iwl_trace_data(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if (ieee80211_is_data(hdr->frame_control))
- return skb->protocol != cpu_to_be16(ETH_P_PAE);
- return false;
+ if (!ieee80211_is_data(hdr->frame_control))
+ return false;
+ return !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO);
}
static inline size_t iwl_rx_trace_len(const struct iwl_trans *trans,
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index d0162d426f8..99e1da3123c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -843,7 +843,7 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
int i;
bool load_module = false;
- fw->ucode_capa.max_probe_length = 200;
+ fw->ucode_capa.max_probe_length = IWL_DEFAULT_MAX_PROBE_LENGTH;
fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
@@ -1032,8 +1032,10 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans,
int ret;
drv = kzalloc(sizeof(*drv), GFP_KERNEL);
- if (!drv)
- return NULL;
+ if (!drv) {
+ ret = -ENOMEM;
+ goto err;
+ }
drv->trans = trans;
drv->dev = trans->dev;
@@ -1078,7 +1080,7 @@ err_free_dbgfs:
err_free_drv:
#endif
kfree(drv);
-
+err:
return ERR_PTR(ret);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index f844d5c748c..a1223680bc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -74,13 +74,24 @@
* @IWL_UCODE_TLV_FLAGS_MFP: This uCode image supports MFP (802.11w).
* @IWL_UCODE_TLV_FLAGS_P2P: This uCode image supports P2P.
* @IWL_UCODE_TLV_FLAGS_DW_BC_TABLE: The SCD byte count table is in DWORDS
+ * @IWL_UCODE_TLV_FLAGS_UAPSD: This uCode image supports uAPSD
+ * @IWL_UCODE_TLV_FLAGS_RX_ENERGY_API: supports rx signal strength api
+ * @IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2: using the new time event API.
+ * @IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS: D3 image supports up to six
+ * (rather than two) IPv6 addresses
+ * @IWL_UCODE_TLV_FLAGS_BF_UPDATED: new beacon filtering API
*/
enum iwl_ucode_tlv_flag {
- IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
- IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
- IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
- IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
- IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
+ IWL_UCODE_TLV_FLAGS_NEWSCAN = BIT(1),
+ IWL_UCODE_TLV_FLAGS_MFP = BIT(2),
+ IWL_UCODE_TLV_FLAGS_P2P = BIT(3),
+ IWL_UCODE_TLV_FLAGS_DW_BC_TABLE = BIT(4),
+ IWL_UCODE_TLV_FLAGS_UAPSD = BIT(6),
+ IWL_UCODE_TLV_FLAGS_RX_ENERGY_API = BIT(8),
+ IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2 = BIT(9),
+ IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS = BIT(10),
+ IWL_UCODE_TLV_FLAGS_BF_UPDATED = BIT(11),
};
/* The default calibrate table size if not specified by firmware file */
@@ -88,6 +99,9 @@ enum iwl_ucode_tlv_flag {
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE 253
+/* The default max probe length if not specified by the firmware file */
+#define IWL_DEFAULT_MAX_PROBE_LENGTH 200
+
/**
* enum iwl_ucode_type
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 305c81f2c2b..dfa4d2e3aaa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -33,6 +33,8 @@
#include "iwl-io.h"
#include "iwl-csr.h"
#include "iwl-debug.h"
+#include "iwl-fh.h"
+#include "iwl-csr.h"
#define IWL_POLL_INTERVAL 10 /* microseconds */
@@ -166,3 +168,68 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
}
}
IWL_EXPORT_SYMBOL(iwl_clear_bits_prph);
+
+static const char *get_fh_string(int cmd)
+{
+#define IWL_CMD(x) case x: return #x
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+#undef IWL_CMD
+}
+
+int iwl_dump_fh(struct iwl_trans *trans, char **buf)
+{
+ int i;
+ static const u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (buf) {
+ int pos = 0;
+ size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+
+ return pos;
+ }
+#endif
+
+ IWL_ERR(trans, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
+ IWL_ERR(trans, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(trans, fh_tbl[i]));
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index fd9f5b97fff..63d10ec08db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -77,4 +77,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
+/* Error handling */
+int iwl_dump_fh(struct iwl_trans *trans, char **buf);
+
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index acd2665afb8..b76a9a8fc0b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -118,6 +118,7 @@ static const u8 iwl_nvm_channels[] = {
#define LAST_2GHZ_HT_PLUS 9
#define LAST_5GHZ_HT 161
+#define DEFAULT_MAX_TX_POWER 16
/* rate data (static) */
static struct ieee80211_rate iwl_cfg80211_rates[] = {
@@ -232,8 +233,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
/* Initialize regulatory-based run-time data */
- /* TODO: read the real value from the NVM */
- channel->max_power = 0;
+ /*
+ * Default value - highest tx power value. max_power
+ * is not used in mvm, and is used for backwards compatibility
+ */
+ channel->max_power = DEFAULT_MAX_TX_POWER;
is_5ghz = channel->band == IEEE80211_BAND_5GHZ;
IWL_DEBUG_EEPROM(dev,
"Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm): Ad-Hoc %ssupported\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index 98c7aa7346d..976448a57d0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -93,7 +93,7 @@ struct iwl_cfg;
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
* capabilities advertized by the fw file (in TLV format).
* 2) The driver layer starts the op_mode (ops->start)
- * 3) The op_mode registers registers mac80211
+ * 3) The op_mode registers mac80211
* 4) The op_mode is governed by mac80211
* 5) The driver layer stops the op_mode
*/
@@ -112,7 +112,7 @@ struct iwl_cfg;
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
- * HCMD the this Rx responds to.
+ * HCMD this Rx responds to.
* This callback may sleep, it is called from a threaded IRQ handler.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 8d91422c598..dd57a36ecb1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -180,7 +180,7 @@ struct iwl_rx_packet {
* enum CMD_MODE - how to send the host commands ?
*
* @CMD_SYNC: The caller will be stalled until the fw responds to the command
- * @CMD_ASYNC: Return right away and don't want for the response
+ * @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
*/
@@ -218,7 +218,7 @@ struct iwl_device_cmd {
*
* @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
* ring. The transport layer doesn't map the command's buffer to DMA, but
- * rather copies it to an previously allocated DMA buffer. This flag tells
+ * rather copies it to a previously allocated DMA buffer. This flag tells
* the transport layer not to copy the command, but to map the existing
* buffer (that is passed in) instead. This saves the memcpy and allows
* commands that are bigger than the fixed buffer to be submitted.
@@ -243,7 +243,7 @@ enum iwl_hcmd_dataflag {
* @handler_status: return value of the handler of the command
* (put in setup_rx_handlers) - valid for SYNC mode only
* @flags: can be CMD_*
- * @len: array of the lenths of the chunks in data
+ * @len: array of the lengths of the chunks in data
* @dataflags: IWL_HCMD_DFL_*
* @id: id of the host command
*/
@@ -396,8 +396,6 @@ struct iwl_trans;
* May sleep
* @dbgfs_register: add the dbgfs files under this directory. Files will be
* automatically deleted.
- * @suspend: stop the device unless WoWLAN is configured
- * @resume: resume activity of the device
* @write8: write a u8 to a register at offset ofs from the BAR
* @write32: write a u32 to a register at offset ofs from the BAR
* @read32: read a u32 register at offset ofs from the BAR
@@ -443,10 +441,7 @@ struct iwl_trans_ops {
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*wait_tx_queue_empty)(struct iwl_trans *trans);
-#ifdef CONFIG_PM_SLEEP
- int (*suspend)(struct iwl_trans *trans);
- int (*resume)(struct iwl_trans *trans);
-#endif
+
void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
u32 (*read32)(struct iwl_trans *trans, u32 ofs);
@@ -700,18 +695,6 @@ static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
return trans->ops->dbgfs_register(trans, dir);
}
-#ifdef CONFIG_PM_SLEEP
-static inline int iwl_trans_suspend(struct iwl_trans *trans)
-{
- return trans->ops->suspend(trans);
-}
-
-static inline int iwl_trans_resume(struct iwl_trans *trans)
-{
- return trans->ops->resume(trans);
-}
-#endif
-
static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
{
trans->ops->write8(trans, ofs, val);
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index ff856e543ae..6d73817850c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o bt-coex.o
+iwlmvm-y += power.o power_legacy.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index dbd622a3929..0fad98b85f6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -220,66 +220,87 @@ static const __le32 iwl_single_shared_ant_lookup[BT_COEX_LUT_SIZE] = {
int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
{
- struct iwl_bt_coex_cmd cmd = {
- .max_kill = 5,
- .bt3_time_t7_value = 1,
- .bt3_prio_sample_time = 2,
- .bt3_timer_t2_value = 0xc,
+ struct iwl_bt_coex_cmd *bt_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
};
int ret;
- cmd.flags = iwlwifi_mod_params.bt_coex_active ?
+ /* go to CALIB state in internal BT-Coex state machine */
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
+ BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
+ if (ret)
+ return ret;
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->max_kill = 5;
+ bt_cmd->bt3_time_t7_value = 1;
+ bt_cmd->bt3_prio_sample_time = 2;
+ bt_cmd->bt3_timer_t2_value = 0xc;
+
+ bt_cmd->flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- cmd.flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
+ bt_cmd->flags |= BT_CH_PRIMARY_EN | BT_SYNC_2_BT_DISABLE;
- cmd.valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
- BT_VALID_BT_PRIO_BOOST |
- BT_VALID_MAX_KILL |
- BT_VALID_3W_TMRS |
- BT_VALID_KILL_ACK |
- BT_VALID_KILL_CTS |
- BT_VALID_REDUCED_TX_POWER |
- BT_VALID_LUT);
+ bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_ENABLE |
+ BT_VALID_BT_PRIO_BOOST |
+ BT_VALID_MAX_KILL |
+ BT_VALID_3W_TMRS |
+ BT_VALID_KILL_ACK |
+ BT_VALID_KILL_CTS |
+ BT_VALID_REDUCED_TX_POWER |
+ BT_VALID_LUT);
if (mvm->cfg->bt_shared_single_ant)
- memcpy(&cmd.decision_lut, iwl_single_shared_ant_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant_lookup,
sizeof(iwl_single_shared_ant_lookup));
else if (is_loose_coex())
- memcpy(&cmd.decision_lut, iwl_loose_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_loose_lookup,
sizeof(iwl_tight_lookup));
else
- memcpy(&cmd.decision_lut, iwl_tight_lookup,
+ memcpy(&bt_cmd->decision_lut, iwl_tight_lookup,
sizeof(iwl_tight_lookup));
- cmd.bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
- cmd.kill_ack_msk =
+ bt_cmd->bt_prio_boost = cpu_to_le32(IWL_BT_DEFAULT_BOOST);
+ bt_cmd->kill_ack_msk =
cpu_to_le32(iwl_bt_ack_kill_msk[BT_KILL_MSK_DEFAULT]);
- cmd.kill_cts_msk =
+ bt_cmd->kill_cts_msk =
cpu_to_le32(iwl_bt_cts_kill_msk[BT_KILL_MSK_DEFAULT]);
memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
- /* go to CALIB state in internal BT-Coex state machine */
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_OPEN,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
-
- ret = iwl_send_bt_env(mvm, BT_COEX_ENV_CLOSE,
- BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
- if (ret)
- return ret;
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
- sizeof(cmd), &cmd);
+ kfree(bt_cmd);
+ return ret;
}
static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
bool reduced_tx_power)
{
enum iwl_bt_kill_msk bt_kill_msk;
- struct iwl_bt_coex_cmd cmd = {};
+ struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif;
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .data[0] = &bt_cmd,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+ .flags = CMD_SYNC,
+ };
+ int ret = 0;
lockdep_assert_held(&mvm->mutex);
@@ -308,24 +329,40 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return 0;
mvm->bt_kill_msk = bt_kill_msk;
- cmd.kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
- cmd.kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
- cmd.valid_bit_msk = cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
+
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->kill_ack_msk = cpu_to_le32(iwl_bt_ack_kill_msk[bt_kill_msk]);
+ bt_cmd->kill_cts_msk = cpu_to_le32(iwl_bt_cts_kill_msk[bt_kill_msk]);
+ bt_cmd->valid_bit_msk =
+ cpu_to_le16(BT_VALID_KILL_ACK | BT_VALID_KILL_CTS);
IWL_DEBUG_COEX(mvm, "bt_kill_msk = %d\n", bt_kill_msk);
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_SYNC,
- sizeof(cmd), &cmd);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(bt_cmd);
+ return ret;
}
static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
bool enable)
{
- struct iwl_bt_coex_cmd cmd = {
- .valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
- .bt_reduced_tx_power = sta_id,
+ struct iwl_bt_coex_cmd *bt_cmd;
+ /* Send ASYNC since this can be sent from an atomic context */
+ struct iwl_host_cmd cmd = {
+ .id = BT_CONFIG,
+ .len = { sizeof(*bt_cmd), },
+ .dataflags = { IWL_HCMD_DFL_DUP, },
+ .flags = CMD_ASYNC,
};
+
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
+ int ret;
/* This can happen if the station has been removed right now */
if (sta_id == IWL_MVM_STATION_COUNT)
@@ -339,17 +376,26 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
if (mvmsta->bt_reduced_txpower == enable)
return 0;
+ bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
+ if (!bt_cmd)
+ return -ENOMEM;
+ cmd.data[0] = bt_cmd;
+
+ bt_cmd->valid_bit_msk = cpu_to_le16(BT_VALID_REDUCED_TX_POWER),
+ bt_cmd->bt_reduced_tx_power = sta_id;
+
if (enable)
- cmd.bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
+ bt_cmd->bt_reduced_tx_power |= BT_REDUCED_TX_POWER_BIT;
IWL_DEBUG_COEX(mvm, "%sable reduced Tx Power for sta %d\n",
enable ? "en" : "dis", sta_id);
mvmsta->bt_reduced_txpower = enable;
- /* Send ASYNC since this can be sent from an atomic context */
- return iwl_mvm_send_cmd_pdu(mvm, BT_CONFIG, CMD_ASYNC,
- sizeof(cmd), &cmd);
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+
+ kfree(bt_cmd);
+ return ret;
}
struct iwl_bt_iterator_data {
@@ -384,6 +430,10 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /* non associated BSSes aren't to be considered */
+ if (!vif->bss_conf.assoc)
+ return;
+
if (band != IEEE80211_BAND_2GHZ) {
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
smps_mode);
@@ -523,6 +573,8 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
lockdep_is_held(&mvm->mutex));
mvmsta = (void *)sta->drv_priv;
+ data->num_bss_ifaces++;
+
/*
* This interface doesn't support reduced Tx power (because of low
* RSSI probably), then set bt_kill_msk to default values.
@@ -588,23 +640,5 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
void iwl_mvm_bt_coex_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
- struct ieee80211_chanctx_conf *chanctx_conf;
- enum ieee80211_band band;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- if (chanctx_conf && chanctx_conf->def.chan)
- band = chanctx_conf->def.chan->band;
- else
- band = -1;
- rcu_read_unlock();
-
- /* if we are in 2GHz we will get a notification from the fw */
- if (band == IEEE80211_BAND_2GHZ)
- return;
-
- /* else, we can remove all the constraints */
- memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif));
-
iwl_mvm_bt_coex_notif_handle(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
new file mode 100644
index 00000000000..2bf29f7992e
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+#ifndef __MVM_CONSTANTS_H
+#define __MVM_CONSTANTS_H
+
+#define IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT (100 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC)
+#define IWL_MVM_PS_HEAVY_TX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_RX_THLD_PACKETS 20
+#define IWL_MVM_PS_HEAVY_TX_THLD_PERCENT 50
+#define IWL_MVM_PS_HEAVY_RX_THLD_PERCENT 50
+#define IWL_MVM_PS_SNOOZE_INTERVAL 25
+#define IWL_MVM_PS_SNOOZE_WINDOW 50
+#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+
+#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 7e5e5c2f9f8..417639f77b0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -105,7 +105,7 @@ void iwl_mvm_ipv6_addr_change(struct ieee80211_hw *hw,
list_for_each_entry(ifa, &idev->addr_list, if_list) {
mvmvif->target_ipv6_addrs[idx] = ifa->addr;
idx++;
- if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS)
+ if (idx >= IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX)
break;
}
read_unlock_bh(&idev->lock);
@@ -134,7 +134,7 @@ struct wowlan_key_data {
struct iwl_wowlan_rsc_tsc_params_cmd *rsc_tsc;
struct iwl_wowlan_tkip_params_cmd *tkip;
bool error, use_rsc_tsc, use_tkip;
- int gtk_key_idx;
+ int wep_key_idx;
};
static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
@@ -188,8 +188,8 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
wkc.wep_key.key_offset = 0;
} else {
/* others start at 1 */
- data->gtk_key_idx++;
- wkc.wep_key.key_offset = data->gtk_key_idx;
+ data->wep_key_idx++;
+ wkc.wep_key.key_offset = data->wep_key_idx;
}
ret = iwl_mvm_send_cmd_pdu(mvm, WEP_KEY, CMD_SYNC,
@@ -316,8 +316,13 @@ static void iwl_mvm_wowlan_program_keys(struct ieee80211_hw *hw,
mvm->ptk_ivlen = key->iv_len;
mvm->ptk_icvlen = key->icv_len;
} else {
- data->gtk_key_idx++;
- key->hw_key_idx = data->gtk_key_idx;
+ /*
+ * firmware only supports TSC/RSC for a single key,
+ * so if there are multiple keep overwriting them
+ * with new ones -- this relies on mac80211 doing
+ * list_add_tail().
+ */
+ key->hw_key_idx = 1;
mvm->gtk_ivlen = key->iv_len;
mvm->gtk_icvlen = key->icv_len;
}
@@ -373,36 +378,68 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- struct iwl_proto_offload_cmd cmd = {};
+ union {
+ struct iwl_proto_offload_cmd_v1 v1;
+ struct iwl_proto_offload_cmd_v2 v2;
+ } cmd = {};
+ struct iwl_proto_offload_cmd_common *common;
+ u32 enabled = 0, size;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
- if (mvmvif->num_target_ipv6_addrs) {
- cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_NS);
- memcpy(cmd.ndp_mac_addr, vif->addr, ETH_ALEN);
- }
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
- BUILD_BUG_ON(sizeof(cmd.target_ipv6_addr[i]) !=
- sizeof(mvmvif->target_ipv6_addrs[i]));
+ BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
- for (i = 0; i < mvmvif->num_target_ipv6_addrs; i++)
- memcpy(cmd.target_ipv6_addr[i],
- &mvmvif->target_ipv6_addrs[i],
- sizeof(cmd.target_ipv6_addr[i]));
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
+ memcpy(cmd.v2.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v2.target_ipv6_addr[i]));
+ } else {
+ if (mvmvif->num_target_ipv6_addrs) {
+ enabled |= IWL_D3_PROTO_OFFLOAD_NS;
+ memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
+ }
+
+ BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
+ sizeof(mvmvif->target_ipv6_addrs[0]));
+
+ for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
+ IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
+ memcpy(cmd.v1.target_ipv6_addr[i],
+ &mvmvif->target_ipv6_addrs[i],
+ sizeof(cmd.v1.target_ipv6_addr[i]));
+ }
#endif
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
+ common = &cmd.v2.common;
+ size = sizeof(cmd.v2);
+ } else {
+ common = &cmd.v1.common;
+ size = sizeof(cmd.v1);
+ }
+
if (vif->bss_conf.arp_addr_cnt) {
- cmd.enabled |= cpu_to_le32(IWL_D3_PROTO_OFFLOAD_ARP);
- cmd.host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
- memcpy(cmd.arp_mac_addr, vif->addr, ETH_ALEN);
+ enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
+ common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
+ memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
- if (!cmd.enabled)
+ if (!enabled)
return 0;
+ common->enabled = cpu_to_le32(enabled);
+
return iwl_mvm_send_cmd_pdu(mvm, PROT_OFFLOAD_CONFIG_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
+ size, &cmd);
}
enum iwl_mvm_tcp_packet_type {
@@ -1072,73 +1109,16 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
return __iwl_mvm_suspend(hw, wowlan, false);
}
-static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_wowlan_status *status)
{
- u32 base = mvm->error_event_table;
- struct error_table_start {
- /* cf. struct iwl_error_event_table */
- u32 valid;
- u32 error_id;
- } err_info;
+ struct sk_buff *pkt = NULL;
struct cfg80211_wowlan_wakeup wakeup = {
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
- struct iwl_host_cmd cmd = {
- .id = WOWLAN_GET_STATUSES,
- .flags = CMD_SYNC | CMD_WANT_SKB,
- };
- struct iwl_wowlan_status *status;
- u32 reasons;
- int ret, len;
- struct sk_buff *pkt = NULL;
-
- iwl_trans_read_mem_bytes(mvm->trans, base,
- &err_info, sizeof(err_info));
-
- if (err_info.valid) {
- IWL_INFO(mvm, "error table is valid (%d)\n",
- err_info.valid);
- if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
- wakeup.rfkill_release = true;
- ieee80211_report_wowlan_wakeup(vif, &wakeup,
- GFP_KERNEL);
- }
- return;
- }
-
- /* only for tracing for now */
- ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
- if (ret)
- IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
-
- ret = iwl_mvm_send_cmd(mvm, &cmd);
- if (ret) {
- IWL_ERR(mvm, "failed to query status (%d)\n", ret);
- return;
- }
-
- /* RF-kill already asserted again... */
- if (!cmd.resp_pkt)
- return;
-
- len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out;
- }
-
- status = (void *)cmd.resp_pkt->data;
-
- if (len - sizeof(struct iwl_cmd_header) !=
- sizeof(*status) +
- ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
- IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
- goto out;
- }
-
- reasons = le32_to_cpu(status->wakeup_reasons);
+ u32 reasons = le32_to_cpu(status->wakeup_reasons);
if (reasons == IWL_WOWLAN_WAKEUP_BY_NON_WIRELESS) {
wakeup_report = NULL;
@@ -1201,6 +1181,12 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
pktsize -= hdrlen;
if (ieee80211_has_protected(hdr->frame_control)) {
+ /*
+ * This is unlocked and using gtk_i(c)vlen,
+ * but since everything is under RTNL still
+ * that's not really a problem - changing
+ * it would be difficult.
+ */
if (is_multicast_ether_addr(hdr->addr1)) {
ivlen = mvm->gtk_ivlen;
icvlen += mvm->gtk_icvlen;
@@ -1251,9 +1237,82 @@ static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
report:
ieee80211_report_wowlan_wakeup(vif, wakeup_report, GFP_KERNEL);
kfree_skb(pkt);
+}
- out:
+/* releases the MVM mutex */
+static void iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ u32 base = mvm->error_event_table;
+ struct error_table_start {
+ /* cf. struct iwl_error_event_table */
+ u32 valid;
+ u32 error_id;
+ } err_info;
+ struct iwl_host_cmd cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status *status;
+ int ret, len;
+
+ iwl_trans_read_mem_bytes(mvm->trans, base,
+ &err_info, sizeof(err_info));
+
+ if (err_info.valid) {
+ IWL_INFO(mvm, "error table is valid (%d)\n",
+ err_info.valid);
+ if (err_info.error_id == RF_KILL_INDICATOR_FOR_WOWLAN) {
+ struct cfg80211_wowlan_wakeup wakeup = {
+ .rfkill_release = true,
+ };
+ ieee80211_report_wowlan_wakeup(vif, &wakeup,
+ GFP_KERNEL);
+ }
+ goto out_unlock;
+ }
+
+ /* only for tracing for now */
+ ret = iwl_mvm_send_cmd_pdu(mvm, OFFLOADS_QUERY_CMD, CMD_SYNC, 0, NULL);
+ if (ret)
+ IWL_ERR(mvm, "failed to query offload statistics (%d)\n", ret);
+
+ ret = iwl_mvm_send_cmd(mvm, &cmd);
+ if (ret) {
+ IWL_ERR(mvm, "failed to query status (%d)\n", ret);
+ goto out_unlock;
+ }
+
+ /* RF-kill already asserted again... */
+ if (!cmd.resp_pkt)
+ goto out_unlock;
+
+ len = le32_to_cpu(cmd.resp_pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
+ if (len - sizeof(struct iwl_cmd_header) < sizeof(*status)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out_free_resp;
+ }
+
+ status = (void *)cmd.resp_pkt->data;
+
+ if (len - sizeof(struct iwl_cmd_header) !=
+ sizeof(*status) +
+ ALIGN(le32_to_cpu(status->wake_packet_bufsize), 4)) {
+ IWL_ERR(mvm, "Invalid WoWLAN status response!\n");
+ goto out_free_resp;
+ }
+
+ /* now we have all the data we need, unlock to avoid mac80211 issues */
+ mutex_unlock(&mvm->mutex);
+
+ iwl_mvm_report_wakeup_reasons(mvm, vif, status);
+ iwl_free_resp(&cmd);
+ return;
+
+ out_free_resp:
iwl_free_resp(&cmd);
+ out_unlock:
+ mutex_unlock(&mvm->mutex);
}
static void iwl_mvm_read_d3_sram(struct iwl_mvm *mvm)
@@ -1310,10 +1369,13 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_mvm_read_d3_sram(mvm);
iwl_mvm_query_wakeup_reasons(mvm, vif);
+ /* has unlocked the mutex, so skip that */
+ goto out;
out_unlock:
mutex_unlock(&mvm->mutex);
+ out:
if (!test && vif)
ieee80211_resume_disconnect(vif);
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index e56ed2a8488..aac81b8984b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -352,6 +352,10 @@ static void iwl_dbgfs_update_pm(struct iwl_mvm *mvm,
IWL_DEBUG_POWER(mvm, "lprx_rssi_threshold=%d\n", val);
dbgfs_pm->lprx_rssi_threshold = val;
break;
+ case MVM_DEBUGFS_PM_SNOOZE_ENABLE:
+ IWL_DEBUG_POWER(mvm, "snooze_enable=%d\n", val);
+ dbgfs_pm->snooze_ena = val;
+ break;
}
}
@@ -405,6 +409,10 @@ static ssize_t iwl_dbgfs_pm_params_write(struct file *file,
POWER_LPRX_RSSI_THRESHOLD_MIN)
return -EINVAL;
param = MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD;
+ } else if (!strncmp("snooze_enable=", buf, 14)) {
+ if (sscanf(buf + 14, "%d", &val) != 1)
+ return -EINVAL;
+ param = MVM_DEBUGFS_PM_SNOOZE_ENABLE;
} else {
return -EINVAL;
}
@@ -424,40 +432,11 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
struct ieee80211_vif *vif = file->private_data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm *mvm = mvmvif->dbgfs_data;
- struct iwl_powertable_cmd cmd = {};
- char buf[256];
+ char buf[512];
int bufsz = sizeof(buf);
- int pos = 0;
+ int pos;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
+ pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -621,25 +600,160 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
}
#undef BT_MBOX_PRINT
+#define PRINT_STATS_LE32(_str, _val) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ fmt_table, _str, \
+ le32_to_cpu(_val))
+
+static ssize_t iwl_dbgfs_fw_rx_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ static const char *fmt_table = "\t%-30s %10u\n";
+ static const char *fmt_header = "%-32s\n";
+ int pos = 0;
+ char *buf;
+ int ret;
+ int bufsz = sizeof(struct mvm_statistics_rx_phy) * 20 +
+ sizeof(struct mvm_statistics_rx_non_phy) * 10 +
+ sizeof(struct mvm_statistics_rx_ht_phy) * 10 + 200;
+ struct mvm_statistics_rx_phy *ofdm;
+ struct mvm_statistics_rx_phy *cck;
+ struct mvm_statistics_rx_non_phy *general;
+ struct mvm_statistics_rx_ht_phy *ht;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+
+ ofdm = &mvm->rx_stats.ofdm;
+ cck = &mvm->rx_stats.cck;
+ general = &mvm->rx_stats.general;
+ ht = &mvm->rx_stats.ofdm_ht;
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM");
+ PRINT_STATS_LE32("ina_cnt", ofdm->ina_cnt);
+ PRINT_STATS_LE32("fina_cnt", ofdm->fina_cnt);
+ PRINT_STATS_LE32("plcp_err", ofdm->plcp_err);
+ PRINT_STATS_LE32("crc32_err", ofdm->crc32_err);
+ PRINT_STATS_LE32("overrun_err", ofdm->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", ofdm->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", ofdm->crc32_good);
+ PRINT_STATS_LE32("false_alarm_cnt", ofdm->false_alarm_cnt);
+ PRINT_STATS_LE32("fina_sync_err_cnt", ofdm->fina_sync_err_cnt);
+ PRINT_STATS_LE32("sfd_timeout", ofdm->sfd_timeout);
+ PRINT_STATS_LE32("fina_timeout", ofdm->fina_timeout);
+ PRINT_STATS_LE32("unresponded_rts", ofdm->unresponded_rts);
+ PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+ ofdm->rxe_frame_limit_overrun);
+ PRINT_STATS_LE32("sent_ack_cnt", ofdm->sent_ack_cnt);
+ PRINT_STATS_LE32("sent_cts_cnt", ofdm->sent_cts_cnt);
+ PRINT_STATS_LE32("sent_ba_rsp_cnt", ofdm->sent_ba_rsp_cnt);
+ PRINT_STATS_LE32("dsp_self_kill", ofdm->dsp_self_kill);
+ PRINT_STATS_LE32("mh_format_err", ofdm->mh_format_err);
+ PRINT_STATS_LE32("re_acq_main_rssi_sum", ofdm->re_acq_main_rssi_sum);
+ PRINT_STATS_LE32("reserved", ofdm->reserved);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK");
+ PRINT_STATS_LE32("ina_cnt", cck->ina_cnt);
+ PRINT_STATS_LE32("fina_cnt", cck->fina_cnt);
+ PRINT_STATS_LE32("plcp_err", cck->plcp_err);
+ PRINT_STATS_LE32("crc32_err", cck->crc32_err);
+ PRINT_STATS_LE32("overrun_err", cck->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", cck->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", cck->crc32_good);
+ PRINT_STATS_LE32("false_alarm_cnt", cck->false_alarm_cnt);
+ PRINT_STATS_LE32("fina_sync_err_cnt", cck->fina_sync_err_cnt);
+ PRINT_STATS_LE32("sfd_timeout", cck->sfd_timeout);
+ PRINT_STATS_LE32("fina_timeout", cck->fina_timeout);
+ PRINT_STATS_LE32("unresponded_rts", cck->unresponded_rts);
+ PRINT_STATS_LE32("rxe_frame_lmt_overrun",
+ cck->rxe_frame_limit_overrun);
+ PRINT_STATS_LE32("sent_ack_cnt", cck->sent_ack_cnt);
+ PRINT_STATS_LE32("sent_cts_cnt", cck->sent_cts_cnt);
+ PRINT_STATS_LE32("sent_ba_rsp_cnt", cck->sent_ba_rsp_cnt);
+ PRINT_STATS_LE32("dsp_self_kill", cck->dsp_self_kill);
+ PRINT_STATS_LE32("mh_format_err", cck->mh_format_err);
+ PRINT_STATS_LE32("re_acq_main_rssi_sum", cck->re_acq_main_rssi_sum);
+ PRINT_STATS_LE32("reserved", cck->reserved);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL");
+ PRINT_STATS_LE32("bogus_cts", general->bogus_cts);
+ PRINT_STATS_LE32("bogus_ack", general->bogus_ack);
+ PRINT_STATS_LE32("non_bssid_frames", general->non_bssid_frames);
+ PRINT_STATS_LE32("filtered_frames", general->filtered_frames);
+ PRINT_STATS_LE32("non_channel_beacons", general->non_channel_beacons);
+ PRINT_STATS_LE32("channel_beacons", general->channel_beacons);
+ PRINT_STATS_LE32("num_missed_bcon", general->num_missed_bcon);
+ PRINT_STATS_LE32("adc_rx_saturation_time",
+ general->adc_rx_saturation_time);
+ PRINT_STATS_LE32("ina_detection_search_time",
+ general->ina_detection_search_time);
+ PRINT_STATS_LE32("beacon_silence_rssi_a",
+ general->beacon_silence_rssi_a);
+ PRINT_STATS_LE32("beacon_silence_rssi_b",
+ general->beacon_silence_rssi_b);
+ PRINT_STATS_LE32("beacon_silence_rssi_c",
+ general->beacon_silence_rssi_c);
+ PRINT_STATS_LE32("interference_data_flag",
+ general->interference_data_flag);
+ PRINT_STATS_LE32("channel_load", general->channel_load);
+ PRINT_STATS_LE32("dsp_false_alarms", general->dsp_false_alarms);
+ PRINT_STATS_LE32("beacon_rssi_a", general->beacon_rssi_a);
+ PRINT_STATS_LE32("beacon_rssi_b", general->beacon_rssi_b);
+ PRINT_STATS_LE32("beacon_rssi_c", general->beacon_rssi_c);
+ PRINT_STATS_LE32("beacon_energy_a", general->beacon_energy_a);
+ PRINT_STATS_LE32("beacon_energy_b", general->beacon_energy_b);
+ PRINT_STATS_LE32("beacon_energy_c", general->beacon_energy_c);
+ PRINT_STATS_LE32("num_bt_kills", general->num_bt_kills);
+ PRINT_STATS_LE32("directed_data_mpdu", general->directed_data_mpdu);
+
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - HT");
+ PRINT_STATS_LE32("plcp_err", ht->plcp_err);
+ PRINT_STATS_LE32("overrun_err", ht->overrun_err);
+ PRINT_STATS_LE32("early_overrun_err", ht->early_overrun_err);
+ PRINT_STATS_LE32("crc32_good", ht->crc32_good);
+ PRINT_STATS_LE32("crc32_err", ht->crc32_err);
+ PRINT_STATS_LE32("mh_format_err", ht->mh_format_err);
+ PRINT_STATS_LE32("agg_crc32_good", ht->agg_crc32_good);
+ PRINT_STATS_LE32("agg_mpdu_cnt", ht->agg_mpdu_cnt);
+ PRINT_STATS_LE32("agg_cnt", ht->agg_cnt);
+ PRINT_STATS_LE32("unsupport_mcs", ht->unsupport_mcs);
+
+ mutex_unlock(&mvm->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+
+ return ret;
+}
+#undef PRINT_STAT_LE32
+
static ssize_t iwl_dbgfs_fw_restart_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_mvm *mvm = file->private_data;
- bool restart_fw = iwlwifi_mod_params.restart_fw;
int ret;
- iwlwifi_mod_params.restart_fw = true;
-
mutex_lock(&mvm->mutex);
+ /* allow one more restart that we're provoking here */
+ if (mvm->restart_fw >= 0)
+ mvm->restart_fw++;
+
/* take the return value to make compiler happy - it will fail anyway */
ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_ERROR, CMD_SYNC, 0, NULL);
mutex_unlock(&mvm->mutex);
- iwlwifi_mod_params.restart_fw = restart_fw;
-
return count;
}
@@ -661,8 +775,14 @@ static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
case MVM_DEBUGFS_BF_ROAMING_STATE:
dbgfs_bf->bf_roaming_state = value;
break;
- case MVM_DEBUGFS_BF_TEMPERATURE_DELTA:
- dbgfs_bf->bf_temperature_delta = value;
+ case MVM_DEBUGFS_BF_TEMP_THRESHOLD:
+ dbgfs_bf->bf_temp_threshold = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_FAST_FILTER:
+ dbgfs_bf->bf_temp_fast_filter = value;
+ break;
+ case MVM_DEBUGFS_BF_TEMP_SLOW_FILTER:
+ dbgfs_bf->bf_temp_slow_filter = value;
break;
case MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER:
dbgfs_bf->bf_enable_beacon_filter = value;
@@ -721,13 +841,27 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
value > IWL_BF_ROAMING_STATE_MAX)
return -EINVAL;
param = MVM_DEBUGFS_BF_ROAMING_STATE;
- } else if (!strncmp("bf_temperature_delta=", buf, 21)) {
- if (sscanf(buf+21, "%d", &value) != 1)
+ } else if (!strncmp("bf_temp_threshold=", buf, 18)) {
+ if (sscanf(buf+18, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_THRESHOLD_MIN ||
+ value > IWL_BF_TEMP_THRESHOLD_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_THRESHOLD;
+ } else if (!strncmp("bf_temp_fast_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
return -EINVAL;
- if (value < IWL_BF_TEMPERATURE_DELTA_MIN ||
- value > IWL_BF_TEMPERATURE_DELTA_MAX)
+ if (value < IWL_BF_TEMP_FAST_FILTER_MIN ||
+ value > IWL_BF_TEMP_FAST_FILTER_MAX)
return -EINVAL;
- param = MVM_DEBUGFS_BF_TEMPERATURE_DELTA;
+ param = MVM_DEBUGFS_BF_TEMP_FAST_FILTER;
+ } else if (!strncmp("bf_temp_slow_filter=", buf, 20)) {
+ if (sscanf(buf+20, "%d", &value) != 1)
+ return -EINVAL;
+ if (value < IWL_BF_TEMP_SLOW_FILTER_MIN ||
+ value > IWL_BF_TEMP_SLOW_FILTER_MAX)
+ return -EINVAL;
+ param = MVM_DEBUGFS_BF_TEMP_SLOW_FILTER;
} else if (!strncmp("bf_enable_beacon_filter=", buf, 24)) {
if (sscanf(buf+24, "%d", &value) != 1)
return -EINVAL;
@@ -769,10 +903,7 @@ static ssize_t iwl_dbgfs_bf_params_write(struct file *file,
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value) {
ret = iwl_mvm_disable_beacon_filter(mvm, vif);
} else {
- if (mvmvif->bf_enabled)
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
- else
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif);
}
mutex_unlock(&mvm->mutex);
@@ -789,41 +920,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
int pos = 0;
const size_t bufsz = sizeof(buf);
struct iwl_beacon_filter_cmd cmd = {
- .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT,
- .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT,
- .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT,
- .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT,
- .bf_enable_beacon_filter = IWL_BF_ENABLE_BEACON_FILTER_DEFAULT,
- .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT,
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT),
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT),
- .ba_enable_beacon_abort = IWL_BA_ENABLE_BEACON_ABORT_DEFAULT,
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter =
+ cpu_to_le32(IWL_BF_ENABLE_BEACON_FILTER_DEFAULT),
+ .ba_enable_beacon_abort =
+ cpu_to_le32(IWL_BA_ENABLE_BEACON_ABORT_DEFAULT),
};
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- if (mvmvif->bf_enabled)
- cmd.bf_enable_beacon_filter = 1;
+ if (mvmvif->bf_data.bf_enabled)
+ cmd.bf_enable_beacon_filter = cpu_to_le32(1);
else
cmd.bf_enable_beacon_filter = 0;
pos += scnprintf(buf+pos, bufsz-pos, "bf_energy_delta = %d\n",
- cmd.bf_energy_delta);
+ le32_to_cpu(cmd.bf_energy_delta));
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_energy_delta = %d\n",
- cmd.bf_roaming_energy_delta);
+ le32_to_cpu(cmd.bf_roaming_energy_delta));
pos += scnprintf(buf+pos, bufsz-pos, "bf_roaming_state = %d\n",
- cmd.bf_roaming_state);
- pos += scnprintf(buf+pos, bufsz-pos, "bf_temperature_delta = %d\n",
- cmd.bf_temperature_delta);
+ le32_to_cpu(cmd.bf_roaming_state));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_threshold = %d\n",
+ le32_to_cpu(cmd.bf_temp_threshold));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_fast_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_fast_filter));
+ pos += scnprintf(buf+pos, bufsz-pos, "bf_temp_slow_filter = %d\n",
+ le32_to_cpu(cmd.bf_temp_slow_filter));
pos += scnprintf(buf+pos, bufsz-pos, "bf_enable_beacon_filter = %d\n",
- cmd.bf_enable_beacon_filter);
+ le32_to_cpu(cmd.bf_enable_beacon_filter));
pos += scnprintf(buf+pos, bufsz-pos, "bf_debug_flag = %d\n",
- cmd.bf_debug_flag);
+ le32_to_cpu(cmd.bf_debug_flag));
pos += scnprintf(buf+pos, bufsz-pos, "bf_escape_timer = %d\n",
- cmd.bf_escape_timer);
+ le32_to_cpu(cmd.bf_escape_timer));
pos += scnprintf(buf+pos, bufsz-pos, "ba_escape_timer = %d\n",
- cmd.ba_escape_timer);
+ le32_to_cpu(cmd.ba_escape_timer));
pos += scnprintf(buf+pos, bufsz-pos, "ba_enable_beacon_abort = %d\n",
- cmd.ba_enable_beacon_abort);
+ le32_to_cpu(cmd.ba_enable_beacon_abort));
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -934,6 +1065,7 @@ MVM_DEBUGFS_READ_FILE_OPS(stations);
MVM_DEBUGFS_READ_FILE_OPS(bt_notif);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_allow);
MVM_DEBUGFS_WRITE_FILE_OPS(power_down_d3_allow);
+MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram);
@@ -957,6 +1089,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(power_down_allow, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(power_down_d3_allow, mvm->debugfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
@@ -988,7 +1121,11 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
char buf[100];
- if (!dbgfs_dir)
+ /*
+ * Check if debugfs directory already exist before creating it.
+ * This may happen when, for example, resetting hw or suspend-resume
+ */
+ if (!dbgfs_dir || mvmvif->dbgfs_dir)
return;
mvmvif->dbgfs_dir = debugfs_create_dir("iwlmvm", dbgfs_dir);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 6f8b2c16ae1..df72fcdf817 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -98,34 +98,63 @@ enum iwl_proto_offloads {
IWL_D3_PROTO_OFFLOAD_NS = BIT(1),
};
-#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS 2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1 2
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2 6
+#define IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX 6
/**
- * struct iwl_proto_offload_cmd - ARP/NS offload configuration
+ * struct iwl_proto_offload_cmd_common - ARP/NS offload common part
* @enabled: enable flags
* @remote_ipv4_addr: remote address to answer to (or zero if all)
* @host_ipv4_addr: our IPv4 address to respond to queries for
* @arp_mac_addr: our MAC address for ARP responses
- * @remote_ipv6_addr: remote address to answer to (or zero if all)
- * @solicited_node_ipv6_addr: broken -- solicited node address exists
- * for each target address
- * @target_ipv6_addr: our target addresses
- * @ndp_mac_addr: neighbor soliciation response MAC address
+ * @reserved: unused
*/
-struct iwl_proto_offload_cmd {
+struct iwl_proto_offload_cmd_common {
__le32 enabled;
__be32 remote_ipv4_addr;
__be32 host_ipv4_addr;
u8 arp_mac_addr[ETH_ALEN];
- __le16 reserved1;
+ __le16 reserved;
+} __packed;
+/**
+ * struct iwl_proto_offload_cmd_v1 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v1 {
+ struct iwl_proto_offload_cmd_common common;
u8 remote_ipv6_addr[16];
u8 solicited_node_ipv6_addr[16];
- u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS][16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1][16];
u8 ndp_mac_addr[ETH_ALEN];
__le16 reserved2;
} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_1 */
+/**
+ * struct iwl_proto_offload_cmd_v2 - ARP/NS offload configuration
+ * @common: common/IPv4 configuration
+ * @remote_ipv6_addr: remote address to answer to (or zero if all)
+ * @solicited_node_ipv6_addr: broken -- solicited node address exists
+ * for each target address
+ * @target_ipv6_addr: our target addresses
+ * @ndp_mac_addr: neighbor soliciation response MAC address
+ */
+struct iwl_proto_offload_cmd_v2 {
+ struct iwl_proto_offload_cmd_common common;
+ u8 remote_ipv6_addr[16];
+ u8 solicited_node_ipv6_addr[16];
+ u8 target_ipv6_addr[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2][16];
+ u8 ndp_mac_addr[ETH_ALEN];
+ u8 numValidIPv6Addresses;
+ u8 reserved2[3];
+} __packed; /* PROT_OFFLOAD_CONFIG_CMD_DB_S_VER_2 */
+
/*
* WOWLAN_PATTERNS
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index a6da359a80c..8e7ab41079c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -79,6 +79,10 @@
* '1' Driver enables PM (use rest of parameters)
* @POWER_FLAGS_SKIP_OVER_DTIM_MSK: '0' PM have to walk up every DTIM,
* '1' PM could sleep over DTIM till listen Interval.
+ * @POWER_FLAGS_SNOOZE_ENA_MSK: Enable snoozing only if uAPSD is enabled and all
+ * access categories are both delivery and trigger enabled.
+ * @POWER_FLAGS_BT_SCO_ENA: Enable BT SCO coex only if uAPSD and
+ * PBW Snoozing enabled
* @POWER_FLAGS_ADVANCE_PM_ENA_MSK: Advanced PM (uAPSD) enable mask
* @POWER_FLAGS_LPRX_ENA_MSK: Low Power RX enable.
*/
@@ -86,6 +90,8 @@ enum iwl_power_flags {
POWER_FLAGS_POWER_SAVE_ENA_MSK = BIT(0),
POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK = BIT(1),
POWER_FLAGS_SKIP_OVER_DTIM_MSK = BIT(2),
+ POWER_FLAGS_SNOOZE_ENA_MSK = BIT(5),
+ POWER_FLAGS_BT_SCO_ENA = BIT(8),
POWER_FLAGS_ADVANCE_PM_ENA_MSK = BIT(9),
POWER_FLAGS_LPRX_ENA_MSK = BIT(11),
};
@@ -93,7 +99,8 @@ enum iwl_power_flags {
#define IWL_POWER_VEC_SIZE 5
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct iwl_powertable_cmd - legacy power command. Beside old API support this
+ * is used also with a new power API for device wide power settings.
* POWER_TABLE_CMD = 0x77 (command, has simple generic response)
*
* @flags: Power table command flags from POWER_FLAGS_*
@@ -125,6 +132,76 @@ struct iwl_powertable_cmd {
} __packed;
/**
+ * struct iwl_mac_power_cmd - New power command containing uAPSD support
+ * MAC_PM_POWER_TABLE = 0xA9 (command, has simple generic response)
+ * @id_and_color: MAC contex identifier
+ * @flags: Power table command flags from POWER_FLAGS_*
+ * @keep_alive_seconds: Keep alive period in seconds. Default - 25 sec.
+ * Minimum allowed:- 3 * DTIM. Keep alive period must be
+ * set regardless of power scheme or current power state.
+ * FW use this value also when PM is disabled.
+ * @rx_data_timeout: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - legacy PM
+ * @tx_data_timeout: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - legacy PM
+ * @sleep_interval: not in use
+ * @skip_dtim_periods: Number of DTIM periods to skip if Skip over DTIM flag
+ * is set. For example, if it is required to skip over
+ * one DTIM, this value need to be set to 2 (DTIM periods).
+ * @rx_data_timeout_uapsd: Minimum time (usec) from last Rx packet for AM to
+ * PSM transition - uAPSD
+ * @tx_data_timeout_uapsd: Minimum time (usec) from last Tx packet for AM to
+ * PSM transition - uAPSD
+ * @lprx_rssi_threshold: Signal strength up to which LP RX can be enabled.
+ * Default: 80dbm
+ * @num_skip_dtim: Number of DTIMs to skip if Skip over DTIM flag is set
+ * @snooze_interval: Maximum time between attempts to retrieve buffered data
+ * from the AP [msec]
+ * @snooze_window: A window of time in which PBW snoozing insures that all
+ * packets received. It is also the minimum time from last
+ * received unicast RX packet, before client stops snoozing
+ * for data. [msec]
+ * @snooze_step: TBD
+ * @qndp_tid: TID client shall use for uAPSD QNDP triggers
+ * @uapsd_ac_flags: Set trigger-enabled and delivery-enabled indication for
+ * each corresponding AC.
+ * Use IEEE80211_WMM_IE_STA_QOSINFO_AC* for correct values.
+ * @uapsd_max_sp: Use IEEE80211_WMM_IE_STA_QOSINFO_SP_* for correct
+ * values.
+ * @heavy_tx_thld_packets: TX threshold measured in number of packets
+ * @heavy_rx_thld_packets: RX threshold measured in number of packets
+ * @heavy_tx_thld_percentage: TX threshold measured in load's percentage
+ * @heavy_rx_thld_percentage: RX threshold measured in load's percentage
+ * @limited_ps_threshold:
+*/
+struct iwl_mac_power_cmd {
+ /* CONTEXT_DESC_API_T_VER_1 */
+ __le32 id_and_color;
+
+ /* CLIENT_PM_POWER_TABLE_S_VER_1 */
+ __le16 flags;
+ __le16 keep_alive_seconds;
+ __le32 rx_data_timeout;
+ __le32 tx_data_timeout;
+ __le32 rx_data_timeout_uapsd;
+ __le32 tx_data_timeout_uapsd;
+ u8 lprx_rssi_threshold;
+ u8 skip_dtim_periods;
+ __le16 snooze_interval;
+ __le16 snooze_window;
+ u8 snooze_step;
+ u8 qndp_tid;
+ u8 uapsd_ac_flags;
+ u8 uapsd_max_sp;
+ u8 heavy_tx_thld_packets;
+ u8 heavy_rx_thld_packets;
+ u8 heavy_tx_thld_percentage;
+ u8 heavy_rx_thld_percentage;
+ u8 limited_ps_threshold;
+ u8 reserved;
+} __packed;
+
+/**
* struct iwl_beacon_filter_cmd
* REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
* @id_and_color: MAC contex identifier
@@ -143,11 +220,21 @@ struct iwl_powertable_cmd {
* calculated for current beacon is less than the threshold, use
* Roaming Energy Delta Threshold, otherwise use normal Energy Delta
* Threshold. Typical energy threshold is -72dBm.
- * @bf_temperature_delta: Send Beacon to driver if delta in temperature values
- * calculated for this and the last passed beacon is greater than this
- * threshold. Zero value means that the temperature changeis ignored for
+ * @bf_temp_threshold: This threshold determines the type of temperature
+ * filtering (Slow or Fast) that is selected (Units are in Celsuis):
+ * If the current temperature is above this threshold - Fast filter
+ * will be used, If the current temperature is below this threshold -
+ * Slow filter will be used.
+ * @bf_temp_fast_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
* beacon filtering; beacons will not be forced to be sent to driver
* regardless of whether its temerature has been changed.
+ * @bf_temp_slow_filter: Send Beacon to driver if delta in temperature values
+ * calculated for this and the last passed beacon is greater than this
+ * threshold. Zero value means that the temperature change is ignored for
+ * beacon filtering; beacons will not be forced to be sent to driver
+ * regardless of whether its temerature has been changed.
* @bf_enable_beacon_filter: 1, beacon filtering is enabled; 0, disabled.
* @bf_filter_escape_timer: Send beacons to to driver if no beacons were passed
* for a specific period of time. Units: Beacons.
@@ -156,17 +243,17 @@ struct iwl_powertable_cmd {
* @ba_enable_beacon_abort: 1, beacon abort is enabled; 0, disabled.
*/
struct iwl_beacon_filter_cmd {
- u8 bf_energy_delta;
- u8 bf_roaming_energy_delta;
- u8 bf_roaming_state;
- u8 bf_temperature_delta;
- u8 bf_enable_beacon_filter;
- u8 bf_debug_flag;
- __le16 reserved1;
+ __le32 bf_energy_delta;
+ __le32 bf_roaming_energy_delta;
+ __le32 bf_roaming_state;
+ __le32 bf_temp_threshold;
+ __le32 bf_temp_fast_filter;
+ __le32 bf_temp_slow_filter;
+ __le32 bf_enable_beacon_filter;
+ __le32 bf_debug_flag;
__le32 bf_escape_timer;
__le32 ba_escape_timer;
- u8 ba_enable_beacon_abort;
- u8 reserved2[3];
+ __le32 ba_enable_beacon_abort;
} __packed;
/* Beacon filtering and beacon abort */
@@ -182,9 +269,17 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
-#define IWL_BF_TEMPERATURE_DELTA_DEFAULT 5
-#define IWL_BF_TEMPERATURE_DELTA_MAX 255
-#define IWL_BF_TEMPERATURE_DELTA_MIN 0
+#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_MAX 255
+#define IWL_BF_TEMP_THRESHOLD_MIN 0
+
+#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_MAX 255
+#define IWL_BF_TEMP_FAST_FILTER_MIN 0
+
+#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
+#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
@@ -194,19 +289,23 @@ struct iwl_beacon_filter_cmd {
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
-#define IWL_BA_ESCAPE_TIMER_DEFAULT 3
+#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D3 6
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = IWL_BF_ENERGY_DELTA_DEFAULT, \
- .bf_roaming_energy_delta = IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT, \
- .bf_roaming_state = IWL_BF_ROAMING_STATE_DEFAULT, \
- .bf_temperature_delta = IWL_BF_TEMPERATURE_DELTA_DEFAULT, \
- .bf_debug_flag = IWL_BF_DEBUG_FLAG_DEFAULT, \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
+#define IWL_BF_CMD_CONFIG_DEFAULTS \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index b60d1415172..83cb9b992ea 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -69,7 +69,6 @@
/* Scan Commands, Responses, Notifications */
/* Masks for iwl_scan_channel.type flags */
-#define SCAN_CHANNEL_TYPE_PASSIVE 0
#define SCAN_CHANNEL_TYPE_ACTIVE BIT(0)
#define SCAN_CHANNEL_NARROW_BAND BIT(22)
@@ -138,6 +137,8 @@ struct iwl_ssid_ie {
*@SCAN_FLAGS_DELAYED_SCAN_LOWBAND:
*@SCAN_FLAGS_DELAYED_SCAN_HIGHBAND:
*@SCAN_FLAGS_FRAGMENTED_SCAN:
+ *@SCAN_FLAGS_PASSIVE2ACTIVE: use active scan on channels that was active
+ * in the past hour, even if they are marked as passive.
*/
enum iwl_scan_flags {
SCAN_FLAGS_PERIODIC_SCAN = BIT(0),
@@ -145,6 +146,7 @@ enum iwl_scan_flags {
SCAN_FLAGS_DELAYED_SCAN_LOWBAND = BIT(2),
SCAN_FLAGS_DELAYED_SCAN_HIGHBAND = BIT(3),
SCAN_FLAGS_FRAGMENTED_SCAN = BIT(4),
+ SCAN_FLAGS_PASSIVE2ACTIVE = BIT(5),
};
/**
@@ -179,7 +181,7 @@ enum iwl_scan_type {
* @quiet_time: in msecs, dwell this time for active scan on quiet channels
* @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
* this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active allowed (0 or 1)
+ * @passive2active: is auto switching from passive to active during scan allowed
* @rxchain_sel_flags: RXON_RX_CHAIN_*
* @max_out_time: in usecs, max out of serving channel time
* @suspend_time: how long to pause scan when returning to service channel:
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
index 700cce73177..d606197bde8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-tx.h
@@ -91,7 +91,6 @@
* @TX_CMD_FLG_RESP_TO_DRV: zero this if the response should go only to FW
* @TX_CMD_FLG_CCMP_AGG: this frame uses CCMP for aggregation acceleration
* @TX_CMD_FLG_TKIP_MIC_DONE: FW already performed TKIP MIC calculation
- * @TX_CMD_FLG_CTS_ONLY: send CTS only, no data after that
* @TX_CMD_FLG_DUR: disable duration overwriting used in PS-Poll Assoc-id
* @TX_CMD_FLG_FW_DROP: FW should mark frame to be dropped
* @TX_CMD_FLG_EXEC_PAPD: execute PAPD
@@ -120,7 +119,6 @@ enum iwl_tx_flags {
TX_CMD_FLG_RESP_TO_DRV = BIT(21),
TX_CMD_FLG_CCMP_AGG = BIT(22),
TX_CMD_FLG_TKIP_MIC_DONE = BIT(23),
- TX_CMD_FLG_CTS_ONLY = BIT(24),
TX_CMD_FLG_DUR = BIT(25),
TX_CMD_FLG_FW_DROP = BIT(26),
TX_CMD_FLG_EXEC_PAPD = BIT(27),
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index cbfb3beae78..66264cc5a01 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -136,7 +136,7 @@ enum {
CALIB_RES_NOTIF_PHY_DB = 0x6b,
/* PHY_DB_CMD = 0x6c, */
- /* Power */
+ /* Power - legacy power table command */
POWER_TABLE_CMD = 0x77,
/* Thermal Throttling*/
@@ -159,6 +159,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
CARD_STATE_CMD = 0xa0,
@@ -166,6 +167,9 @@ enum {
MISSED_BEACONS_NOTIFICATION = 0xa2,
+ /* Power - new power table command */
+ MAC_PM_POWER_TABLE = 0xa9,
+
REPLY_RX_PHY_CMD = 0xc0,
REPLY_RX_MPDU_CMD = 0xc1,
BA_NOTIF = 0xc5,
@@ -223,6 +227,19 @@ struct iwl_tx_ant_cfg_cmd {
__le32 valid;
} __packed;
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+ u8 flags;
+ u8 mac_context_id;
+ __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
/*
* Calibration control struct.
* Sent as part of the phy configuration command.
@@ -482,71 +499,199 @@ enum iwl_time_event_type {
TE_MAX
}; /* MAC_EVENT_TYPE_API_E_VER_1 */
+
+
+/* Time event - defines for command API v1 */
+
+/*
+ * @TE_V1_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V1_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * the first fragment is scheduled.
+ * @TE_V1_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * the first 2 fragments are scheduled.
+ * @TE_V1_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
+ *
+ * Other than the constant defined above, specifying a fragmentation value 'x'
+ * means that the event can be fragmented but only the first 'x' will be
+ * scheduled.
+ */
+enum {
+ TE_V1_FRAG_NONE = 0,
+ TE_V1_FRAG_SINGLE = 1,
+ TE_V1_FRAG_DUAL = 2,
+ TE_V1_FRAG_ENDLESS = 0xffffffff
+};
+
+/* If a Time Event can be fragmented, this is the max number of fragments */
+#define TE_V1_FRAG_MAX_MSK 0x0fffffff
+/* Repeat the time event endlessly (until removed) */
+#define TE_V1_REPEAT_ENDLESS 0xffffffff
+/* If a Time Event has bounded repetitions, this is the maximal value */
+#define TE_V1_REPEAT_MAX_MSK_V1 0x0fffffff
+
/* Time Event dependencies: none, on another TE, or in a specific time */
enum {
- TE_INDEPENDENT = 0,
- TE_DEP_OTHER = 1,
- TE_DEP_TSF = 2,
- TE_EVENT_SOCIOPATHIC = 4,
+ TE_V1_INDEPENDENT = 0,
+ TE_V1_DEP_OTHER = BIT(0),
+ TE_V1_DEP_TSF = BIT(1),
+ TE_V1_EVENT_SOCIOPATHIC = BIT(2),
}; /* MAC_EVENT_DEPENDENCY_POLICY_API_E_VER_2 */
+
/*
+ * @TE_V1_NOTIF_NONE: no notifications
+ * @TE_V1_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V1_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V1_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V1_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V1_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V1_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V1_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V1_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ *
* Supported Time event notifications configuration.
* A notification (both event and fragment) includes a status indicating weather
* the FW was able to schedule the event or not. For fragment start/end
* notification the status is always success. There is no start/end fragment
* notification for monolithic events.
- *
- * @TE_NOTIF_NONE: no notifications
- * @TE_NOTIF_HOST_EVENT_START: request/receive notification on event start
- * @TE_NOTIF_HOST_EVENT_END:request/receive notification on event end
- * @TE_NOTIF_INTERNAL_EVENT_START: internal FW use
- * @TE_NOTIF_INTERNAL_EVENT_END: internal FW use.
- * @TE_NOTIF_HOST_FRAG_START: request/receive notification on frag start
- * @TE_NOTIF_HOST_FRAG_END:request/receive notification on frag end
- * @TE_NOTIF_INTERNAL_FRAG_START: internal FW use.
- * @TE_NOTIF_INTERNAL_FRAG_END: internal FW use.
*/
enum {
- TE_NOTIF_NONE = 0,
- TE_NOTIF_HOST_EVENT_START = 0x1,
- TE_NOTIF_HOST_EVENT_END = 0x2,
- TE_NOTIF_INTERNAL_EVENT_START = 0x4,
- TE_NOTIF_INTERNAL_EVENT_END = 0x8,
- TE_NOTIF_HOST_FRAG_START = 0x10,
- TE_NOTIF_HOST_FRAG_END = 0x20,
- TE_NOTIF_INTERNAL_FRAG_START = 0x40,
- TE_NOTIF_INTERNAL_FRAG_END = 0x80
+ TE_V1_NOTIF_NONE = 0,
+ TE_V1_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V1_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V1_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V1_NOTIF_INTERNAL_EVENT_END = BIT(3),
+ TE_V1_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V1_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V1_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V1_NOTIF_INTERNAL_FRAG_END = BIT(7),
}; /* MAC_EVENT_ACTION_API_E_VER_2 */
+
+/**
+ * struct iwl_time_event_cmd_api_v1 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_1 (see also
+ * with version 2. determined by IWL_UCODE_TLV_FLAGS)
+ * ( TIME_EVENT_CMD = 0x29 )
+ * @id_and_color: ID and color of the relevant MAC
+ * @action: action to perform, one of FW_CTXT_ACTION_*
+ * @id: this field has two meanings, depending on the action:
+ * If the action is ADD, then it means the type of event to add.
+ * For all other actions it is the unique event ID assigned when the
+ * event was added by the FW.
+ * @apply_time: When to start the Time Event (in GP2)
+ * @max_delay: maximum delay to event's start (apply time), in TU
+ * @depends_on: the unique ID of the event we depend on (if any)
+ * @interval: interval between repetitions, in TU
+ * @interval_reciprocal: 2^32 / interval
+ * @duration: duration of event in TU
+ * @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
+ * @dep_policy: one of TE_V1_INDEPENDENT, TE_V1_DEP_OTHER, TE_V1_DEP_TSF
+ * and TE_V1_EVENT_SOCIOPATHIC
+ * @is_present: 0 or 1, are we present or absent during the Time Event
+ * @max_frags: maximal number of fragments the Time Event can be divided to
+ * @notify: notifications using TE_V1_NOTIF_* (whom to notify when)
+ */
+struct iwl_time_event_cmd_v1 {
+ /* COMMON_INDEX_HDR_API_S_VER_1 */
+ __le32 id_and_color;
+ __le32 action;
+ __le32 id;
+ /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ __le32 apply_time;
+ __le32 max_delay;
+ __le32 dep_policy;
+ __le32 depends_on;
+ __le32 is_present;
+ __le32 max_frags;
+ __le32 interval;
+ __le32 interval_reciprocal;
+ __le32 duration;
+ __le32 repeat;
+ __le32 notify;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+
+
+/* Time event - defines for command API v2 */
+
/*
- * @TE_FRAG_NONE: fragmentation of the time event is NOT allowed.
- * @TE_FRAG_SINGLE: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_NONE: fragmentation of the time event is NOT allowed.
+ * @TE_V2_FRAG_SINGLE: fragmentation of the time event is allowed, but only
* the first fragment is scheduled.
- * @TE_FRAG_DUAL: fragmentation of the time event is allowed, but only
+ * @TE_V2_FRAG_DUAL: fragmentation of the time event is allowed, but only
* the first 2 fragments are scheduled.
- * @TE_FRAG_ENDLESS: fragmentation of the time event is allowed, and any number
- * of fragments are valid.
+ * @TE_V2_FRAG_ENDLESS: fragmentation of the time event is allowed, and any
+ * number of fragments are valid.
*
* Other than the constant defined above, specifying a fragmentation value 'x'
* means that the event can be fragmented but only the first 'x' will be
* scheduled.
*/
enum {
- TE_FRAG_NONE = 0,
- TE_FRAG_SINGLE = 1,
- TE_FRAG_DUAL = 2,
- TE_FRAG_ENDLESS = 0xffffffff
+ TE_V2_FRAG_NONE = 0,
+ TE_V2_FRAG_SINGLE = 1,
+ TE_V2_FRAG_DUAL = 2,
+ TE_V2_FRAG_MAX = 0xfe,
+ TE_V2_FRAG_ENDLESS = 0xff
};
/* Repeat the time event endlessly (until removed) */
-#define TE_REPEAT_ENDLESS (0xffffffff)
+#define TE_V2_REPEAT_ENDLESS 0xff
/* If a Time Event has bounded repetitions, this is the maximal value */
-#define TE_REPEAT_MAX_MSK (0x0fffffff)
-/* If a Time Event can be fragmented, this is the max number of fragments */
-#define TE_FRAG_MAX_MSK (0x0fffffff)
+#define TE_V2_REPEAT_MAX 0xfe
+
+#define TE_V2_PLACEMENT_POS 12
+#define TE_V2_ABSENCE_POS 15
+
+/* Time event policy values (for time event cmd api v2)
+ * A notification (both event and fragment) includes a status indicating weather
+ * the FW was able to schedule the event or not. For fragment start/end
+ * notification the status is always success. There is no start/end fragment
+ * notification for monolithic events.
+ *
+ * @TE_V2_DEFAULT_POLICY: independent, social, present, unoticable
+ * @TE_V2_NOTIF_HOST_EVENT_START: request/receive notification on event start
+ * @TE_V2_NOTIF_HOST_EVENT_END:request/receive notification on event end
+ * @TE_V2_NOTIF_INTERNAL_EVENT_START: internal FW use
+ * @TE_V2_NOTIF_INTERNAL_EVENT_END: internal FW use.
+ * @TE_V2_NOTIF_HOST_FRAG_START: request/receive notification on frag start
+ * @TE_V2_NOTIF_HOST_FRAG_END:request/receive notification on frag end
+ * @TE_V2_NOTIF_INTERNAL_FRAG_START: internal FW use.
+ * @TE_V2_NOTIF_INTERNAL_FRAG_END: internal FW use.
+ * @TE_V2_DEP_OTHER: depends on another time event
+ * @TE_V2_DEP_TSF: depends on a specific time
+ * @TE_V2_EVENT_SOCIOPATHIC: can't co-exist with other events of tha same MAC
+ * @TE_V2_ABSENCE: are we present or absent during the Time Event.
+ */
+enum {
+ TE_V2_DEFAULT_POLICY = 0x0,
+
+ /* notifications (event start/stop, fragment start/stop) */
+ TE_V2_NOTIF_HOST_EVENT_START = BIT(0),
+ TE_V2_NOTIF_HOST_EVENT_END = BIT(1),
+ TE_V2_NOTIF_INTERNAL_EVENT_START = BIT(2),
+ TE_V2_NOTIF_INTERNAL_EVENT_END = BIT(3),
+
+ TE_V2_NOTIF_HOST_FRAG_START = BIT(4),
+ TE_V2_NOTIF_HOST_FRAG_END = BIT(5),
+ TE_V2_NOTIF_INTERNAL_FRAG_START = BIT(6),
+ TE_V2_NOTIF_INTERNAL_FRAG_END = BIT(7),
+
+ TE_V2_NOTIF_MSK = 0xff,
+
+ /* placement characteristics */
+ TE_V2_DEP_OTHER = BIT(TE_V2_PLACEMENT_POS),
+ TE_V2_DEP_TSF = BIT(TE_V2_PLACEMENT_POS + 1),
+ TE_V2_EVENT_SOCIOPATHIC = BIT(TE_V2_PLACEMENT_POS + 2),
+
+ /* are we present or absent during the Time Event. */
+ TE_V2_ABSENCE = BIT(TE_V2_ABSENCE_POS),
+};
/**
- * struct iwl_time_event_cmd - configuring Time Events
+ * struct iwl_time_event_cmd_api_v2 - configuring Time Events
+ * with struct MAC_TIME_EVENT_DATA_API_S_VER_2 (see also
+ * with version 1. determined by IWL_UCODE_TLV_FLAGS)
* ( TIME_EVENT_CMD = 0x29 )
* @id_and_color: ID and color of the relevant MAC
* @action: action to perform, one of FW_CTXT_ACTION_*
@@ -558,32 +703,30 @@ enum {
* @max_delay: maximum delay to event's start (apply time), in TU
* @depends_on: the unique ID of the event we depend on (if any)
* @interval: interval between repetitions, in TU
- * @interval_reciprocal: 2^32 / interval
* @duration: duration of event in TU
* @repeat: how many repetitions to do, can be TE_REPEAT_ENDLESS
- * @dep_policy: one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
- * @is_present: 0 or 1, are we present or absent during the Time Event
* @max_frags: maximal number of fragments the Time Event can be divided to
- * @notify: notifications using TE_NOTIF_* (whom to notify when)
+ * @policy: defines whether uCode shall notify the host or other uCode modules
+ * on event and/or fragment start and/or end
+ * using one of TE_INDEPENDENT, TE_DEP_OTHER, TE_DEP_TSF
+ * TE_EVENT_SOCIOPATHIC
+ * using TE_ABSENCE and using TE_NOTIF_*
*/
-struct iwl_time_event_cmd {
+struct iwl_time_event_cmd_v2 {
/* COMMON_INDEX_HDR_API_S_VER_1 */
__le32 id_and_color;
__le32 action;
__le32 id;
- /* MAC_TIME_EVENT_DATA_API_S_VER_1 */
+ /* MAC_TIME_EVENT_DATA_API_S_VER_2 */
__le32 apply_time;
__le32 max_delay;
- __le32 dep_policy;
__le32 depends_on;
- __le32 is_present;
- __le32 max_frags;
__le32 interval;
- __le32 interval_reciprocal;
__le32 duration;
- __le32 repeat;
- __le32 notify;
-} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_1 */
+ u8 repeat;
+ u8 max_frags;
+ __le16 policy;
+} __packed; /* MAC_TIME_EVENT_CMD_API_S_VER_2 */
/**
* struct iwl_time_event_resp - response structure to iwl_time_event_cmd
@@ -765,6 +908,14 @@ struct iwl_phy_context_cmd {
} __packed; /* PHY_CONTEXT_CMD_API_VER_1 */
#define IWL_RX_INFO_PHY_CNT 8
+#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
+#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
+#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
+#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
+#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
+#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
+#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
+
#define IWL_RX_INFO_AGC_IDX 1
#define IWL_RX_INFO_RSSI_AB_IDX 2
#define IWL_OFDM_AGC_A_MSK 0x0000007f
@@ -1170,7 +1321,7 @@ struct mvm_statistics_general {
struct mvm_statistics_general_common common;
__le32 beacon_filtered;
__le32 missed_beacons;
- __s8 beacon_filter_everage_energy;
+ __s8 beacon_filter_average_energy;
__s8 beacon_filter_reason;
__s8 beacon_filter_current_energy;
__s8 beacon_filter_reserved;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index cd7c0032cc5..c76299a3a1e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -78,22 +78,6 @@
#define UCODE_VALID_OK cpu_to_le32(0x1)
-/* Default calibration values for WkP - set to INIT image w/o running */
-static const u8 wkp_calib_values_rx_iq_skew[] = { 0x00, 0x00, 0x01, 0x00 };
-static const u8 wkp_calib_values_tx_iq_skew[] = { 0x01, 0x00, 0x00, 0x00 };
-
-struct iwl_calib_default_data {
- u16 size;
- void *data;
-};
-
-#define CALIB_SIZE_N_DATA(_buf) {.size = sizeof(_buf), .data = &_buf}
-
-static const struct iwl_calib_default_data wkp_calib_default_data[12] = {
- [9] = CALIB_SIZE_N_DATA(wkp_calib_values_tx_iq_skew),
- [11] = CALIB_SIZE_N_DATA(wkp_calib_values_rx_iq_skew),
-};
-
struct iwl_mvm_alive_data {
bool valid;
u32 scd_base_addr;
@@ -248,40 +232,6 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
sizeof(phy_cfg_cmd), &phy_cfg_cmd);
}
-static int iwl_set_default_calibrations(struct iwl_mvm *mvm)
-{
- u8 cmd_raw[16]; /* holds the variable size commands */
- struct iwl_set_calib_default_cmd *cmd =
- (struct iwl_set_calib_default_cmd *)cmd_raw;
- int ret, i;
-
- /* Setting default values for calibrations we don't run */
- for (i = 0; i < ARRAY_SIZE(wkp_calib_default_data); i++) {
- u16 cmd_len;
-
- if (wkp_calib_default_data[i].size == 0)
- continue;
-
- memset(cmd_raw, 0, sizeof(cmd_raw));
- cmd_len = wkp_calib_default_data[i].size + sizeof(cmd);
- cmd->calib_index = cpu_to_le16(i);
- cmd->length = cpu_to_le16(wkp_calib_default_data[i].size);
- if (WARN_ONCE(cmd_len > sizeof(cmd_raw),
- "Need to enlarge cmd_raw to %d\n", cmd_len))
- break;
- memcpy(cmd->data, wkp_calib_default_data[i].data,
- wkp_calib_default_data[i].size);
- ret = iwl_mvm_send_cmd_pdu(mvm, SET_CALIB_DEFAULT_CMD, 0,
- sizeof(*cmd) +
- wkp_calib_default_data[i].size,
- cmd);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
{
struct iwl_notification_wait calib_wait;
@@ -342,11 +292,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
if (ret)
goto error;
- /* need to set default values */
- ret = iwl_set_default_calibrations(mvm);
- if (ret)
- goto error;
-
/*
* Send phy configurations command to init uCode
* to start the 16.0 uCode init image internal calibrations.
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 94aae9c8562..5fe23a5ea9b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -264,7 +264,8 @@ static int iwl_mvm_mac_ctxt_allocate_resources(struct iwl_mvm *mvm,
return 0;
/* Therefore, in recovery, we can't get here */
- WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
+ if (WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
+ return -EBUSY;
mvmvif->id = find_first_bit(data.available_mac_ids,
NUM_MAC_INDEX_DRIVER);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index e08683b2053..9833cdf6177 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -153,7 +153,10 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TIMING_BEACON_ONLY |
- IEEE80211_HW_CONNECTION_MONITOR;
+ IEEE80211_HW_CONNECTION_MONITOR |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_UAPSD;
hw->queues = IWL_MVM_FIRST_AGG_QUEUE;
hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE;
@@ -188,6 +191,8 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->max_remain_on_channel_duration = 10000;
hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+ hw->uapsd_queues = IWL_UAPSD_AC_INFO;
+ hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
/* Extract MAC address */
memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN);
@@ -257,7 +262,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (ret)
return ret;
- return ieee80211_register_hw(mvm->hw);
+ ret = ieee80211_register_hw(mvm->hw);
+ if (ret)
+ iwl_mvm_leds_exit(mvm);
+
+ return ret;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
@@ -385,6 +394,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
mvm->vif_count = 0;
+ mvm->rx_ba_sessions = 0;
}
static int iwl_mvm_mac_start(struct ieee80211_hw *hw)
@@ -501,12 +511,33 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- /* Allocate resources for the MAC context, and add it the the fw */
+ /* Allocate resources for the MAC context, and add it to the fw */
ret = iwl_mvm_mac_ctxt_init(mvm, vif);
if (ret)
goto out_unlock;
/*
+ * TODO: remove this temporary code.
+ * Currently MVM FW supports power management only on single MAC.
+ * If new interface added, disable PM on existing interface.
+ * P2P device is a special case, since it is handled by FW similary to
+ * scan. If P2P deviced is added, PM remains enabled on existing
+ * interface.
+ * Note: the method below does not count the new interface being added
+ * at this moment.
+ */
+ if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
+ mvm->vif_count++;
+ if (mvm->vif_count > 1) {
+ IWL_DEBUG_MAC80211(mvm,
+ "Disable power on existing interfaces\n");
+ ieee80211_iterate_active_interfaces_atomic(
+ mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_pm_disable_iterator, mvm);
+ }
+
+ /*
* The AP binding flow can be done only after the beacon
* template is configured (which happens only in the mac80211
* start_ap() flow), and adding the broadcast station can happen
@@ -526,30 +557,10 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
goto out_release;
}
+ iwl_mvm_vif_dbgfs_register(mvm, vif);
goto out_unlock;
}
- /*
- * TODO: remove this temporary code.
- * Currently MVM FW supports power management only on single MAC.
- * If new interface added, disable PM on existing interface.
- * P2P device is a special case, since it is handled by FW similary to
- * scan. If P2P deviced is added, PM remains enabled on existing
- * interface.
- * Note: the method below does not count the new interface being added
- * at this moment.
- */
- if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
- mvm->vif_count++;
- if (mvm->vif_count > 1) {
- IWL_DEBUG_MAC80211(mvm,
- "Disable power on existing interfaces\n");
- ieee80211_iterate_active_interfaces_atomic(
- mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_pm_disable_iterator, mvm);
- }
-
ret = iwl_mvm_mac_ctxt_add(mvm, vif);
if (ret)
goto out_release;
@@ -561,16 +572,18 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_power_update_mode(mvm, vif);
/* beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ if (ret)
+ goto out_remove_mac;
+
if (!mvm->bf_allowed_vif &&
- vif->type == NL80211_IFTYPE_STATION && !vif->p2p){
+ vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
+ mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED){
mvm->bf_allowed_vif = mvmvif;
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
+ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI;
}
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
- if (ret)
- goto out_release;
-
/*
* P2P_DEVICE interface does not have a channel context assigned to it,
* so a dedicated PHY context is allocated to it and the corresponding
@@ -581,7 +594,7 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm);
if (!mvmvif->phy_ctxt) {
ret = -ENOSPC;
- goto out_remove_mac;
+ goto out_free_bf;
}
iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt);
@@ -605,6 +618,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
out_unref_phy:
iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt);
+ out_free_bf:
+ if (mvm->bf_allowed_vif == mvmvif) {
+ mvm->bf_allowed_vif = NULL;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
+ }
out_remove_mac:
mvmvif->phy_ctxt = NULL;
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -669,7 +688,8 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->bf_allowed_vif == mvmvif) {
mvm->bf_allowed_vif = NULL;
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER |
+ IEEE80211_VIF_SUPPORTS_CQM_RSSI);
}
iwl_mvm_vif_dbgfs_clean(mvm, vif);
@@ -714,6 +734,20 @@ out_release:
mutex_unlock(&mvm->mutex);
}
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ s8 tx_power)
+{
+ /* FW is in charge of regulatory enforcement */
+ struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
+ .mac_context_id = iwl_mvm_vif_from_mac80211(vif)->id,
+ .pwr_restriction = cpu_to_le16(tx_power),
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, CMD_SYNC,
+ sizeof(reduce_txpwr_cmd),
+ &reduce_txpwr_cmd);
+}
+
static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed)
{
return 0;
@@ -761,7 +795,6 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_ERR(mvm, "failed to update quotas\n");
return;
}
- iwl_mvm_bt_coex_vif_assoc(mvm, vif);
iwl_mvm_configure_mcast_filter(mvm, vif);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/* remove AP station now that the MAC is unassoc */
@@ -774,9 +807,19 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
}
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
+
+ /* reset rssi values */
+ mvmvif->bf_data.ave_beacon_signal = 0;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)) {
+ /* Workaround for FW bug, otherwise FW disables device
+ * power save upon disassociation
+ */
+ ret = iwl_mvm_power_update_mode(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
+ iwl_mvm_bt_coex_vif_assoc(mvm, vif);
} else if (changes & BSS_CHANGED_BEACON_INFO) {
/*
* We received a beacon _after_ association so
@@ -784,11 +827,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
- } else if (changes & BSS_CHANGED_PS) {
+ } else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mode(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
+ if (changes & BSS_CHANGED_TXPOWER) {
+ IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
+ bss_conf->txpower);
+ iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
+ }
+
+ if (changes & BSS_CHANGED_CQM) {
+ IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
+ /* reset cqm events tracking */
+ mvmvif->bf_data.last_cqm_event = 0;
+ ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ if (ret)
+ IWL_ERR(mvm, "failed to update CQM thresholds\n");
+ }
}
static int iwl_mvm_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -1006,6 +1063,21 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
+ /*
+ * Firmware bug - it'll crash if the beacon interval is less
+ * than 16. We can't avoid connecting at all, so refuse the
+ * station state change, this will cause mac80211 to abandon
+ * attempts to connect to this AP, and eventually wpa_s will
+ * blacklist the AP...
+ */
+ if (vif->type == NL80211_IFTYPE_STATION &&
+ vif->bss_conf.beacon_int < 16) {
+ IWL_ERR(mvm,
+ "AP %pM beacon interval is %d, refusing due to firmware bug!\n",
+ sta->addr, vif->bss_conf.beacon_int);
+ ret = -EINVAL;
+ goto out_unlock;
+ }
ret = iwl_mvm_add_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_AUTH) {
@@ -1038,6 +1110,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else {
ret = -EIO;
}
+ out_unlock:
mutex_unlock(&mvm->mutex);
return ret;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index d40d7db185d..b0389279cc1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -76,6 +76,7 @@
#include "iwl-trans.h"
#include "sta.h"
#include "fw-api.h"
+#include "constants.h"
#define IWL_INVALID_MAC80211_QUEUE 0xff
#define IWL_MVM_MAX_ADDRESSES 5
@@ -91,6 +92,9 @@ enum iwl_mvm_tx_fifo {
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
+extern const struct iwl_mvm_power_ops pm_legacy_ops;
+extern const struct iwl_mvm_power_ops pm_mac_ops;
+
/**
* struct iwl_mvm_mod_params - module parameters for iwlmvm
* @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted.
@@ -149,6 +153,22 @@ enum iwl_power_scheme {
};
#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
+ IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
+#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
+
+struct iwl_mvm_power_ops {
+ int (*power_update_mode)(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
+ int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
+#endif
+};
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
@@ -160,10 +180,11 @@ enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_DISABLE_POWER_OFF = BIT(5),
MVM_DEBUGFS_PM_LPRX_ENA = BIT(6),
MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD = BIT(7),
+ MVM_DEBUGFS_PM_SNOOZE_ENABLE = BIT(8),
};
struct iwl_dbgfs_pm {
- u8 keep_alive_seconds;
+ u16 keep_alive_seconds;
u32 rx_data_timeout;
u32 tx_data_timeout;
bool skip_over_dtim;
@@ -171,6 +192,7 @@ struct iwl_dbgfs_pm {
bool disable_power_off;
bool lprx_ena;
u32 lprx_rssi_threshold;
+ bool snooze_ena;
int mask;
};
@@ -180,24 +202,28 @@ enum iwl_dbgfs_bf_mask {
MVM_DEBUGFS_BF_ENERGY_DELTA = BIT(0),
MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA = BIT(1),
MVM_DEBUGFS_BF_ROAMING_STATE = BIT(2),
- MVM_DEBUGFS_BF_TEMPERATURE_DELTA = BIT(3),
- MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(4),
- MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(5),
- MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(6),
- MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(7),
- MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(8),
+ MVM_DEBUGFS_BF_TEMP_THRESHOLD = BIT(3),
+ MVM_DEBUGFS_BF_TEMP_FAST_FILTER = BIT(4),
+ MVM_DEBUGFS_BF_TEMP_SLOW_FILTER = BIT(5),
+ MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER = BIT(6),
+ MVM_DEBUGFS_BF_DEBUG_FLAG = BIT(7),
+ MVM_DEBUGFS_BF_ESCAPE_TIMER = BIT(8),
+ MVM_DEBUGFS_BA_ESCAPE_TIMER = BIT(9),
+ MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT = BIT(10),
};
struct iwl_dbgfs_bf {
- u8 bf_energy_delta;
- u8 bf_roaming_energy_delta;
- u8 bf_roaming_state;
- u8 bf_temperature_delta;
- u8 bf_enable_beacon_filter;
- u8 bf_debug_flag;
+ u32 bf_energy_delta;
+ u32 bf_roaming_energy_delta;
+ u32 bf_roaming_state;
+ u32 bf_temp_threshold;
+ u32 bf_temp_fast_filter;
+ u32 bf_temp_slow_filter;
+ u32 bf_enable_beacon_filter;
+ u32 bf_debug_flag;
u32 bf_escape_timer;
u32 ba_escape_timer;
- u8 ba_enable_beacon_abort;
+ u32 ba_enable_beacon_abort;
int mask;
};
#endif
@@ -209,6 +235,21 @@ enum iwl_mvm_smps_type_request {
};
/**
+* struct iwl_mvm_vif_bf_data - beacon filtering related data
+* @bf_enabled: indicates if beacon filtering is enabled
+* @ba_enabled: indicated if beacon abort is enabled
+* @last_beacon_signal: last beacon rssi signal in dbm
+* @ave_beacon_signal: average beacon signal
+* @last_cqm_event: rssi of the last cqm event
+*/
+struct iwl_mvm_vif_bf_data {
+ bool bf_enabled;
+ bool ba_enabled;
+ s8 ave_beacon_signal;
+ s8 last_cqm_event;
+};
+
+/**
* struct iwl_mvm_vif - data per Virtual Interface, it is a MAC context
* @id: between 0 and 3
* @color: to solve races upon MAC addition and removal
@@ -233,8 +274,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_active;
bool monitor_active;
- /* indicate whether beacon filtering is enabled */
- bool bf_enabled;
+ struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -268,7 +308,7 @@ struct iwl_mvm_vif {
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
- struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS];
+ struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
#endif
@@ -402,6 +442,8 @@ struct iwl_mvm {
struct iwl_notif_wait_data notif_wait;
+ struct mvm_statistics_rx rx_stats;
+
unsigned long transport_queue_stop;
u8 queue_to_mac80211[IWL_MAX_HW_QUEUES];
atomic_t queue_stop_count[IWL_MAX_HW_QUEUES];
@@ -419,6 +461,7 @@ struct iwl_mvm {
struct work_struct sta_drained_wk;
unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
atomic_t pending_frames[IWL_MVM_STATION_COUNT];
+ u8 rx_ba_sessions;
/* configured by mac80211 */
u32 rts_threshold;
@@ -458,6 +501,9 @@ struct iwl_mvm {
*/
u8 vif_count;
+ /* -1 for always, 0 for never, >0 for that many times */
+ s8 restart_fw;
+
struct led_classdev led;
struct ieee80211_vif *p2p_device_vif;
@@ -481,6 +527,8 @@ struct iwl_mvm {
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
+
+ const struct iwl_mvm_power_ops *pm_ops;
};
/* Extract MVM priv from op_mode and _hw */
@@ -524,6 +572,7 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum ieee80211_band band);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
@@ -659,10 +708,26 @@ int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
u8 flags, bool init);
/* power managment */
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd);
+static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return mvm->pm_ops->power_update_mode(mvm, vif);
+}
+
+static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return mvm->pm_ops->power_disable(mvm, vif);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ char *buf, int bufsz)
+{
+ return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
+}
+#endif
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -706,6 +771,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct ieee80211_vif *vif);
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd);
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable);
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index af79a14063a..2fcc8ef88a6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -275,6 +275,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
@@ -301,6 +302,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MCAST_FILTER_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
+ CMD(MAC_PM_POWER_TABLE),
};
#undef CMD
@@ -340,6 +342,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->fw = fw;
mvm->hw = hw;
+ mvm->restart_fw = iwlwifi_mod_params.restart_fw ? -1 : 0;
+
mutex_init(&mvm->mutex);
spin_lock_init(&mvm->async_handlers_lock);
INIT_LIST_HEAD(&mvm->time_event_list);
@@ -431,6 +435,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD)
+ mvm->pm_ops = &pm_mac_ops;
+ else
+ mvm->pm_ops = &pm_legacy_ops;
+
+ memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+
return op_mode;
out_unregister:
@@ -638,6 +649,22 @@ static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(mvm->hw, skb);
}
+struct iwl_mvm_reprobe {
+ struct device *dev;
+ struct work_struct work;
+};
+
+static void iwl_mvm_reprobe_wk(struct work_struct *wk)
+{
+ struct iwl_mvm_reprobe *reprobe;
+
+ reprobe = container_of(wk, struct iwl_mvm_reprobe, work);
+ if (device_reprobe(reprobe->dev))
+ dev_err(reprobe->dev, "reprobe failed!\n");
+ kfree(reprobe);
+ module_put(THIS_MODULE);
+}
+
static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
{
iwl_abort_notification_waits(&mvm->notif_wait);
@@ -649,9 +676,30 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
* can't recover this since we're already half suspended.
*/
if (test_and_set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- IWL_ERR(mvm, "Firmware error during reconfiguration! Abort.\n");
- } else if (mvm->cur_ucode == IWL_UCODE_REGULAR &&
- iwlwifi_mod_params.restart_fw) {
+ struct iwl_mvm_reprobe *reprobe;
+
+ IWL_ERR(mvm,
+ "Firmware error during reconfiguration - reprobe!\n");
+
+ /*
+ * get a module reference to avoid doing this while unloading
+ * anyway and to avoid scheduling a work with code that's
+ * being removed.
+ */
+ if (!try_module_get(THIS_MODULE)) {
+ IWL_ERR(mvm, "Module is being unloaded - abort\n");
+ return;
+ }
+
+ reprobe = kzalloc(sizeof(*reprobe), GFP_ATOMIC);
+ if (!reprobe) {
+ module_put(THIS_MODULE);
+ return;
+ }
+ reprobe->dev = mvm->trans->dev;
+ INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
+ schedule_work(&reprobe->work);
+ } else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
/*
* This is a bit racy, but worst case we tell mac80211 about
* a stopped/aborted (sched) scan when that was already done
@@ -669,6 +717,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
break;
}
+ if (mvm->restart_fw > 0)
+ mvm->restart_fw--;
ieee80211_restart_hw(mvm->hw);
}
}
@@ -678,6 +728,8 @@ static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
+ if (!mvm->restart_fw)
+ iwl_mvm_dump_sram(mvm);
iwl_mvm_nic_restart(mvm);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index e7ca965a89b..21407a353a3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -75,8 +75,8 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
+ struct iwl_beacon_filter_cmd *cmd)
{
int ret;
@@ -85,69 +85,110 @@ static int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
if (!ret) {
IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- cmd->ba_enable_beacon_abort);
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- cmd->ba_escape_timer);
+ le32_to_cpu(cmd->ba_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- cmd->bf_debug_flag);
+ le32_to_cpu(cmd->bf_debug_flag));
IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- cmd->bf_enable_beacon_filter);
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- cmd->bf_energy_delta);
+ le32_to_cpu(cmd->bf_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- cmd->bf_escape_timer);
+ le32_to_cpu(cmd->bf_escape_timer));
IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- cmd->bf_roaming_energy_delta);
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- cmd->bf_roaming_state);
- IWL_DEBUG_POWER(mvm, "bf_temperature_delta is: %d\n",
- cmd->bf_temperature_delta);
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
}
return ret;
}
-static int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool enable)
+static
+void iwl_mvm_beacon_filter_set_cqm_params(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->bss_conf.cqm_rssi_thold) {
+ cmd->bf_energy_delta =
+ cpu_to_le32(vif->bss_conf.cqm_rssi_hyst);
+ /* fw uses an absolute value for this */
+ cmd->bf_roaming_state =
+ cpu_to_le32(-vif->bss_conf.cqm_rssi_thold);
+ }
+ cmd->ba_enable_beacon_abort = cpu_to_le32(mvmvif->bf_data.ba_enabled);
+}
+
+int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, bool enable)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = 1,
- .ba_enable_beacon_abort = enable,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ .ba_enable_beacon_abort = cpu_to_le32(enable),
};
- if (!mvmvif->bf_enabled)
+ if (!mvmvif->bf_data.bf_enabled)
return 0;
+ if (mvm->cur_ucode == IWL_UCODE_WOWLAN)
+ cmd.ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_D3);
+
+ mvmvif->bf_data.ba_enabled = enable;
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
+ struct iwl_mac_power_cmd *cmd)
{
IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
+ "Sending power table command on mac id 0x%X for power level %d, flags = 0x%X\n",
+ cmd->id_and_color, iwlmvm_mod_params.power_scheme,
le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n",
+ le16_to_cpu(cmd->keep_alive_seconds));
+
+ if (!(cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK))) {
+ IWL_DEBUG_POWER(mvm, "Disable power management\n");
+ return;
+ }
+
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ cmd->skip_dtim_periods);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ cmd->lprx_rssi_threshold);
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "uAPSD enabled\n");
+ IWL_DEBUG_POWER(mvm, "Rx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "Tx timeout (uAPSD) = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout_uapsd));
+ IWL_DEBUG_POWER(mvm, "QNDP TID = %d\n", cmd->qndp_tid);
+ IWL_DEBUG_POWER(mvm, "ACs flags = 0x%x\n", cmd->uapsd_ac_flags);
+ IWL_DEBUG_POWER(mvm, "Max SP = %d\n", cmd->uapsd_max_sp);
}
}
-void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_mac_power_cmd *cmd)
{
struct ieee80211_hw *hw = mvm->hw;
struct ieee80211_chanctx_conf *chanctx_conf;
@@ -157,20 +198,29 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool radar_detect = false;
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
+ enum ieee80211_ac_numbers ac;
+ bool tid_found = false;
+
+ cmd->id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+ dtimper = hw->conf.ps_dtim_period ?: 1;
/*
* Regardless of power management state the driver must set
* keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
+ * immediately after association. Check that keep alive period
+ * is at least 3 * DTIM
*/
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * POWER_KEEP_ALIVE_PERIOD_SEC);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
@@ -186,12 +236,9 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(vif->bss_conf.beacon_rate->bitrate == 10 ||
vif->bss_conf.beacon_rate->bitrate == 60)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+ cmd->lprx_rssi_threshold = POWER_LPRX_RSSI_THRESHOLD;
}
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
/* Check if radar detection is required on current channel */
rcu_read_lock();
chanctx_conf = rcu_dereference(vif->chanctx_conf);
@@ -207,27 +254,82 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
(iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
+ cmd->skip_dtim_periods = 3;
}
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_DEFAULT_PS_TX_DATA_TIMEOUT);
} else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ cmd->rx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ }
+
+ for (ac = IEEE80211_AC_VO; ac <= IEEE80211_AC_BK; ac++) {
+ if (!mvmvif->queue_params[ac].uapsd)
+ continue;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK);
+ cmd->uapsd_ac_flags |= BIT(ac);
+
+ /* QNDP TID - the highest TID with no admission control */
+ if (!tid_found && !mvmvif->queue_params[ac].acm) {
+ tid_found = true;
+ switch (ac) {
+ case IEEE80211_AC_VO:
+ cmd->qndp_tid = 6;
+ break;
+ case IEEE80211_AC_VI:
+ cmd->qndp_tid = 5;
+ break;
+ case IEEE80211_AC_BE:
+ cmd->qndp_tid = 0;
+ break;
+ case IEEE80211_AC_BK:
+ cmd->qndp_tid = 1;
+ break;
+ }
+ }
+ }
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+
+ if (cmd->uapsd_ac_flags == (BIT(IEEE80211_AC_VO) |
+ BIT(IEEE80211_AC_VI) |
+ BIT(IEEE80211_AC_BE) |
+ BIT(IEEE80211_AC_BK))) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ cmd->snooze_interval =
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_INTERVAL);
+ cmd->snooze_window =
+ (mvm->cur_ucode == IWL_UCODE_WOWLAN) ?
+ cpu_to_le16(IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW) :
+ cpu_to_le16(IWL_MVM_PS_SNOOZE_WINDOW);
+ }
+
+ cmd->uapsd_max_sp = IWL_UAPSD_MAX_SP;
+ cmd->heavy_tx_thld_packets =
+ IWL_MVM_PS_HEAVY_TX_THLD_PACKETS;
+ cmd->heavy_rx_thld_packets =
+ IWL_MVM_PS_HEAVY_RX_THLD_PACKETS;
+ cmd->heavy_tx_thld_percentage =
+ IWL_MVM_PS_HEAVY_TX_THLD_PERCENT;
+ cmd->heavy_rx_thld_percentage =
+ IWL_MVM_PS_HEAVY_RX_THLD_PERCENT;
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+ cmd->keep_alive_seconds =
+ cpu_to_le16(mvmvif->dbgfs_pm.keep_alive_seconds);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
if (mvmvif->dbgfs_pm.skip_over_dtim)
cmd->flags |=
@@ -243,8 +345,7 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->tx_data_timeout =
cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+ cmd->skip_dtim_periods = mvmvif->dbgfs_pm.skip_dtim_periods;
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
if (mvmvif->dbgfs_pm.lprx_ena)
cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
@@ -252,16 +353,24 @@ void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
}
if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+ cmd->lprx_rssi_threshold = mvmvif->dbgfs_pm.lprx_rssi_threshold;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SNOOZE_ENABLE) {
+ if (mvmvif->dbgfs_pm.snooze_ena)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SNOOZE_ENA_MSK);
+ }
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
int ret;
bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
+ struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
@@ -280,7 +389,7 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
if (ret)
return ret;
@@ -291,15 +400,19 @@ int iwl_mvm_power_update_mode(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
}
-int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
+static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
{
- struct iwl_powertable_cmd cmd = {};
+ struct iwl_mac_power_cmd cmd = {};
struct iwl_mvm_vif *mvmvif __maybe_unused =
iwl_mvm_vif_from_mac80211(vif);
if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
+ cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
+ mvmvif->color));
+
if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -310,11 +423,98 @@ int iwl_mvm_power_disable(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
#endif
iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
sizeof(cmd), &cmd);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_mac_power_cmd cmd = {};
+ int pos = 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ le16_to_cpu(cmd.keep_alive_seconds));
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+ 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ cmd.skip_dtim_periods);
+ if (!(cmd.flags &
+ cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK))) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ cmd.lprx_rssi_threshold);
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_ADVANCE_PM_ENA_MSK)) {
+ pos +=
+ scnprintf(buf+pos, bufsz-pos,
+ "rx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout_uapsd));
+ pos +=
+ scnprintf(buf+pos, bufsz-pos,
+ "tx_data_timeout_uapsd = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout_uapsd));
+ pos += scnprintf(buf+pos, bufsz-pos, "qndp_tid = %d\n",
+ cmd.qndp_tid);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "uapsd_ac_flags = 0x%x\n",
+ cmd.uapsd_ac_flags);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "uapsd_max_sp = %d\n",
+ cmd.uapsd_max_sp);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_tx_thld_packets = %d\n",
+ cmd.heavy_tx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_rx_thld_packets = %d\n",
+ cmd.heavy_rx_thld_packets);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_tx_thld_percentage = %d\n",
+ cmd.heavy_tx_thld_percentage);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "heavy_rx_thld_percentage = %d\n",
+ cmd.heavy_rx_thld_percentage);
+ pos +=
+ scnprintf(buf+pos, bufsz-pos, "snooze_enable = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) ?
+ 1 : 0);
+ }
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "snooze_interval = %d\n",
+ cmd.snooze_interval);
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "snooze_window = %d\n",
+ cmd.snooze_window);
+ }
+ }
+ return pos;
+}
+
void
iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
@@ -323,22 +523,30 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_dbgfs_bf *dbgfs_bf = &mvmvif->dbgfs_bf;
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ENERGY_DELTA)
- cmd->bf_energy_delta = dbgfs_bf->bf_energy_delta;
+ cmd->bf_energy_delta = cpu_to_le32(dbgfs_bf->bf_energy_delta);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_ENERGY_DELTA)
cmd->bf_roaming_energy_delta =
- dbgfs_bf->bf_roaming_energy_delta;
+ cpu_to_le32(dbgfs_bf->bf_roaming_energy_delta);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ROAMING_STATE)
- cmd->bf_roaming_state = dbgfs_bf->bf_roaming_state;
- if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMPERATURE_DELTA)
- cmd->bf_temperature_delta = dbgfs_bf->bf_temperature_delta;
+ cmd->bf_roaming_state = cpu_to_le32(dbgfs_bf->bf_roaming_state);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_THRESHOLD)
+ cmd->bf_temp_threshold =
+ cpu_to_le32(dbgfs_bf->bf_temp_threshold);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_FAST_FILTER)
+ cmd->bf_temp_fast_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_fast_filter);
+ if (dbgfs_bf->mask & MVM_DEBUGFS_BF_TEMP_SLOW_FILTER)
+ cmd->bf_temp_slow_filter =
+ cpu_to_le32(dbgfs_bf->bf_temp_slow_filter);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_DEBUG_FLAG)
- cmd->bf_debug_flag = dbgfs_bf->bf_debug_flag;
+ cmd->bf_debug_flag = cpu_to_le32(dbgfs_bf->bf_debug_flag);
if (dbgfs_bf->mask & MVM_DEBUGFS_BF_ESCAPE_TIMER)
cmd->bf_escape_timer = cpu_to_le32(dbgfs_bf->bf_escape_timer);
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ESCAPE_TIMER)
cmd->ba_escape_timer = cpu_to_le32(dbgfs_bf->ba_escape_timer);
if (dbgfs_bf->mask & MVM_DEBUGFS_BA_ENABLE_BEACON_ABORT)
- cmd->ba_enable_beacon_abort = dbgfs_bf->ba_enable_beacon_abort;
+ cmd->ba_enable_beacon_abort =
+ cpu_to_le32(dbgfs_bf->ba_enable_beacon_abort);
}
#endif
@@ -348,7 +556,7 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_beacon_filter_cmd cmd = {
IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = 1,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
};
int ret;
@@ -356,11 +564,12 @@ int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_enabled = true;
+ mvmvif->bf_data.bf_enabled = true;
return ret;
}
@@ -372,13 +581,33 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int ret;
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BF_UPDATED) ||
+ vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
if (!ret)
- mvmvif->bf_enabled = false;
+ mvmvif->bf_data.bf_enabled = false;
return ret;
}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (!mvmvif->bf_data.bf_enabled)
+ return 0;
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif);
+}
+
+const struct iwl_mvm_power_ops pm_mac_ops = {
+ .power_update_mode = iwl_mvm_power_mac_update_mode,
+ .power_disable = iwl_mvm_power_mac_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
new file mode 100644
index 00000000000..2ce79bad584
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
@@ -0,0 +1,319 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2012 - 2013 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+
+#include <net/mac80211.h>
+
+#include "iwl-debug.h"
+#include "mvm.h"
+#include "iwl-modparams.h"
+#include "fw-api-power.h"
+
+#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+
+static void iwl_mvm_power_log(struct iwl_mvm *mvm,
+ struct iwl_powertable_cmd *cmd)
+{
+ IWL_DEBUG_POWER(mvm,
+ "Sending power table command for power level %d, flags = 0x%X\n",
+ iwlmvm_mod_params.power_scheme,
+ le16_to_cpu(cmd->flags));
+ IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
+
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
+ le32_to_cpu(cmd->rx_data_timeout));
+ IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
+ le32_to_cpu(cmd->tx_data_timeout));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
+ IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
+ le32_to_cpu(cmd->skip_dtim_periods));
+ if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
+ le32_to_cpu(cmd->lprx_rssi_threshold));
+ }
+}
+
+static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_powertable_cmd *cmd)
+{
+ struct ieee80211_hw *hw = mvm->hw;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_channel *chan;
+ int dtimper, dtimper_msec;
+ int keep_alive;
+ bool radar_detect = false;
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ /*
+ * Regardless of power management state the driver must set
+ * keep alive period. FW will use it for sending keep alive NDPs
+ * immediately after association.
+ */
+ cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ if (!vif->bss_conf.assoc)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ if (!vif->bss_conf.ps)
+ return;
+
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
+
+ if (vif->bss_conf.beacon_rate &&
+ (vif->bss_conf.beacon_rate->bitrate == 10 ||
+ vif->bss_conf.beacon_rate->bitrate == 60)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
+ }
+
+ dtimper = hw->conf.ps_dtim_period ?: 1;
+
+ /* Check if radar detection is required on current channel */
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ WARN_ON(!chanctx_conf);
+ if (chanctx_conf) {
+ chan = chanctx_conf->def.chan;
+ radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
+ }
+ rcu_read_unlock();
+
+ /* Check skip over DTIM conditions */
+ if (!radar_detect && (dtimper <= 10) &&
+ (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
+ mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd->skip_dtim_periods = cpu_to_le32(3);
+ }
+
+ /* Check that keep alive period is at least 3 * DTIM */
+ dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+ keep_alive = max_t(int, 3 * dtimper_msec,
+ MSEC_PER_SEC * cmd->keep_alive_seconds);
+ keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
+ cmd->keep_alive_seconds = keep_alive;
+
+ if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
+ cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
+ } else {
+ cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
+ }
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
+ cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
+ if (mvmvif->dbgfs_pm.skip_over_dtim)
+ cmd->flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ else
+ cmd->flags &=
+ cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
+ cmd->rx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
+ cmd->tx_data_timeout =
+ cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
+ cmd->skip_dtim_periods =
+ cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
+ if (mvmvif->dbgfs_pm.lprx_ena)
+ cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
+ else
+ cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
+ }
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
+ cmd->lprx_rssi_threshold =
+ cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
+#endif /* CONFIG_IWLWIFI_DEBUGFS */
+}
+
+static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ int ret;
+ bool ba_enable;
+ struct iwl_powertable_cmd cmd = {};
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ /*
+ * TODO: The following vif_count verification is temporary condition.
+ * Avoid power mode update if more than one interface is currently
+ * active. Remove this condition when FW will support power management
+ * on multiple MACs.
+ */
+ IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
+ mvm->vif_count);
+ if (mvm->vif_count > 1)
+ return 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ iwl_mvm_power_log(mvm, &cmd);
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ ba_enable = !!(cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
+
+ return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
+}
+
+static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_powertable_cmd cmd = {};
+ struct iwl_mvm_vif *mvmvif __maybe_unused =
+ iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
+ return 0;
+
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
+ mvmvif->dbgfs_pm.disable_power_off)
+ cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+#endif
+ iwl_mvm_power_log(mvm, &cmd);
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
+ sizeof(cmd), &cmd);
+}
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
+{
+ struct iwl_powertable_cmd cmd = {};
+ int pos = 0;
+
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+
+ pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
+ 0 : 1);
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
+ le32_to_cpu(cmd.skip_dtim_periods));
+ pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
+ iwlmvm_mod_params.power_scheme);
+ pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
+ le16_to_cpu(cmd.flags));
+ pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
+ cmd.keep_alive_seconds);
+
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
+ pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
+ (cmd.flags &
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
+ 1 : 0);
+ pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
+ le32_to_cpu(cmd.rx_data_timeout));
+ pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
+ le32_to_cpu(cmd.tx_data_timeout));
+ if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
+ pos += scnprintf(buf+pos, bufsz-pos,
+ "lprx_rssi_threshold = %d\n",
+ le32_to_cpu(cmd.lprx_rssi_threshold));
+ }
+ return pos;
+}
+#endif
+
+const struct iwl_mvm_power_ops pm_legacy_ops = {
+ .power_update_mode = iwl_mvm_power_legacy_update_mode,
+ .power_disable = iwl_mvm_power_legacy_disable,
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
+#endif
+};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index 29d49cf0fdb..5c6ae16ec52 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -131,23 +131,22 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
- struct iwl_time_quota_cmd cmd;
- int i, idx, ret, num_active_bindings, quota, quota_rem;
+ struct iwl_time_quota_cmd cmd = {};
+ int i, idx, ret, num_active_macs, quota, quota_rem;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
+ lockdep_assert_held(&mvm->mutex);
+
/* update all upon completion */
if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
return 0;
- BUILD_BUG_ON(data.colors[MAX_BINDINGS - 1] != -1);
-
- lockdep_assert_held(&mvm->mutex);
-
- memset(&cmd, 0, sizeof(cmd));
+ /* iterator data above must match */
+ BUILD_BUG_ON(MAX_BINDINGS != 4);
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -162,18 +161,17 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
* equally between all the bindings that require quota
*/
- num_active_bindings = 0;
+ num_active_macs = 0;
for (i = 0; i < MAX_BINDINGS; i++) {
cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
- if (data.n_interfaces[i] > 0)
- num_active_bindings++;
+ num_active_macs += data.n_interfaces[i];
}
quota = 0;
quota_rem = 0;
- if (num_active_bindings) {
- quota = IWL_MVM_MAX_QUOTA / num_active_bindings;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_bindings;
+ if (num_active_macs) {
+ quota = IWL_MVM_MAX_QUOTA / num_active_macs;
+ quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -187,7 +185,8 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].quota = cpu_to_le32(0);
cmd.quotas[idx].max_duration = cpu_to_le32(0);
} else {
- cmd.quotas[idx].quota = cpu_to_le32(quota);
+ cmd.quotas[idx].quota =
+ cpu_to_le32(quota * data.n_interfaces[i]);
cmd.quotas[idx].max_duration =
cpu_to_le32(IWL_MVM_MAX_QUOTA);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index b328a988c13..4ffaa3fa153 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -56,61 +56,61 @@
#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
+ [IWL_RATE_1M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_2M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_5M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_11M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_6M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_9M_INDEX] = IWL_RATE_6M_INDEX,
+ [IWL_RATE_12M_INDEX] = IWL_RATE_9M_INDEX,
+ [IWL_RATE_18M_INDEX] = IWL_RATE_12M_INDEX,
+ [IWL_RATE_24M_INDEX] = IWL_RATE_18M_INDEX,
+ [IWL_RATE_36M_INDEX] = IWL_RATE_24M_INDEX,
+ [IWL_RATE_48M_INDEX] = IWL_RATE_36M_INDEX,
+ [IWL_RATE_54M_INDEX] = IWL_RATE_48M_INDEX,
+ [IWL_RATE_60M_INDEX] = IWL_RATE_54M_INDEX,
};
static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
+ [ANT_NONE] = ANT_NONE,
+ [ANT_A] = ANT_B,
+ [ANT_B] = ANT_C,
+ [ANT_AB] = ANT_BC,
+ [ANT_C] = ANT_A,
+ [ANT_AC] = ANT_AB,
+ [ANT_BC] = ANT_AC,
+ [ANT_ABC] = ANT_ABC,
};
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+#define IWL_DECLARE_RATE_INFO(r, s, rp, rn) \
[IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
IWL_RATE_SISO_##s##M_PLCP, \
IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_MIMO3_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
+ IWL_RATE_##rn##M_INDEX }
/*
* Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ * rate, ht rate, prev rate, next rate
*
* If there isn't a valid next or previous rate then INV is used which
* maps to IWL_RATE_INVALID
*
*/
static const struct iwl_rs_rate_info iwl_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+ IWL_DECLARE_RATE_INFO(1, INV, INV, 2), /* 1mbps */
+ IWL_DECLARE_RATE_INFO(2, INV, 1, 5), /* 2mbps */
+ IWL_DECLARE_RATE_INFO(5, INV, 2, 11), /*5.5mbps */
+ IWL_DECLARE_RATE_INFO(11, INV, 9, 12), /* 11mbps */
+ IWL_DECLARE_RATE_INFO(6, 6, 5, 11), /* 6mbps */
+ IWL_DECLARE_RATE_INFO(9, 6, 6, 11), /* 9mbps */
+ IWL_DECLARE_RATE_INFO(12, 12, 11, 18), /* 12mbps */
+ IWL_DECLARE_RATE_INFO(18, 18, 12, 24), /* 18mbps */
+ IWL_DECLARE_RATE_INFO(24, 24, 18, 36), /* 24mbps */
+ IWL_DECLARE_RATE_INFO(36, 36, 24, 48), /* 36mbps */
+ IWL_DECLARE_RATE_INFO(48, 48, 36, 54), /* 48mbps */
+ IWL_DECLARE_RATE_INFO(54, 54, 48, INV), /* 54mbps */
+ IWL_DECLARE_RATE_INFO(60, 60, 48, INV), /* 60mbps */
/* FIXME:RS: ^^ should be INV (legacy) */
};
@@ -128,9 +128,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
if (rate_n_flags & RATE_MCS_HT_MSK) {
idx = rs_extract_rate(rate_n_flags);
- if (idx >= IWL_RATE_MIMO3_6M_PLCP)
- idx = idx - IWL_RATE_MIMO3_6M_PLCP;
- else if (idx >= IWL_RATE_MIMO2_6M_PLCP)
+ WARN_ON_ONCE(idx >= IWL_RATE_MIMO3_6M_PLCP);
+ if (idx >= IWL_RATE_MIMO2_6M_PLCP)
idx = idx - IWL_RATE_MIMO2_6M_PLCP;
idx += IWL_FIRST_OFDM_RATE;
@@ -162,10 +161,10 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
+ u32 *rate_n_flags);
#else
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+ u32 *rate_n_flags)
{}
#endif
@@ -212,20 +211,6 @@ static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
- {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
- {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
- {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
- {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
- {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
- {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
-};
-
/* mbps, mcs */
static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
{ "1", "BPSK DSSS"},
@@ -260,82 +245,6 @@ static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
return (ant_type & valid_antenna) == ant_type;
}
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else {
- return IWL_MAX_TID_COUNT;
- }
-
- if (unlikely(tid >= IWL_MAX_TID_COUNT))
- return IWL_MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return IWL_MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
#ifdef CONFIG_MAC80211_DEBUGFS
/**
* Program the device to use fixed rate for frame transmit
@@ -349,7 +258,6 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
IWL_DEBUG_RATE(mvm, "sta_id %d rate 0x%X\n",
lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
@@ -361,45 +269,11 @@ static void rs_program_fix_rate(struct iwl_mvm *mvm,
}
#endif
-/*
- get the traffic load value for tid
-*/
-static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= IWL_MAX_TID_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
static int rs_tl_turn_on_agg_for_tid(struct iwl_mvm *mvm,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
int ret = -EAGAIN;
- u32 load;
-
- load = rs_tl_get_load(lq_data, tid);
/*
* Don't create TX aggregation sessions when in high
@@ -563,7 +437,7 @@ static u32 rate_n_flags_from_tbl(struct iwl_mvm *mvm,
else if (is_mimo2(tbl->lq_type))
rate_n_flags |= iwl_rates[index].plcp_mimo2;
else
- rate_n_flags |= iwl_rates[index].plcp_mimo3;
+ WARN_ON_ONCE(1);
} else {
IWL_ERR(mvm, "Invalid tbl->lq_type %d\n", tbl->lq_type);
}
@@ -601,7 +475,7 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
u8 num_of_ant = get_num_of_ant_from_rate(rate_n_flags);
u8 mcs;
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
+ memset(tbl, 0, offsetof(struct iwl_scale_tbl_info, win));
*rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
if (*rate_idx == IWL_RATE_INVALID) {
@@ -640,12 +514,8 @@ static int rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
} else if (mcs <= IWL_RATE_MIMO2_60M_PLCP) {
if (num_of_ant == 2)
tbl->lq_type = LQ_MIMO2;
- /* MIMO3 */
} else {
- if (num_of_ant == 3) {
- tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
- tbl->lq_type = LQ_MIMO3;
- }
+ WARN_ON_ONCE(num_of_ant == 3);
}
}
return 0;
@@ -711,10 +581,10 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
} else {
if (is_siso(rate_type))
return lq_sta->active_siso_rate;
- else if (is_mimo2(rate_type))
+ else {
+ WARN_ON_ONCE(!is_mimo2(rate_type));
return lq_sta->active_mimo2_rate;
- else
- return lq_sta->active_mimo3_rate;
+ }
}
}
@@ -1089,7 +959,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
}
/* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
* status */
if (is_siso(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_siso20MHz;
@@ -1097,12 +967,10 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
ht_tbl_pointer = expected_tpt_siso40MHz;
else if (is_mimo2(tbl->lq_type) && !tbl->is_ht40)
ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else if (is_mimo2(tbl->lq_type))
+ else {
+ WARN_ON_ONCE(!is_mimo2(tbl->lq_type));
ht_tbl_pointer = expected_tpt_mimo2_40MHz;
- else if (is_mimo3(tbl->lq_type) && !tbl->is_ht40)
- ht_tbl_pointer = expected_tpt_mimo3_20MHz;
- else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo3_40MHz;
+ }
if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
tbl->expected_tpt = ht_tbl_pointer[0];
@@ -1274,58 +1142,6 @@ static int rs_switch_to_mimo2(struct iwl_mvm *mvm,
}
/*
- * Set up search table for MIMO3
- */
-static int rs_switch_to_mimo3(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
-
- if (!sta->ht_cap.ht_supported)
- return -1;
-
- if (sta->smps_mode == IEEE80211_SMPS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (num_of_ant(iwl_fw_valid_tx_ant(mvm->fw)) < 3)
- return -1;
-
- IWL_DEBUG_RATE(mvm, "LQ: try to switch to MIMO3\n");
-
- tbl->lq_type = LQ_MIMO3;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_11N_MIMO3_SEARCH;
- rate_mask = lq_sta->active_mimo3_rate;
-
- if (iwl_is_ht40_tx_allowed(sta))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = rs_get_best_rate(mvm, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(mvm, "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = rate_n_flags_from_tbl(mvm, tbl, rate, is_green);
-
- IWL_DEBUG_RATE(mvm, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
* Set up search table for SISO
*/
static int rs_switch_to_siso(struct iwl_mvm *mvm,
@@ -1434,21 +1250,14 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
}
break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
+ case IWL_LEGACY_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO2\n");
/* Set up search table to try MIMO */
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
+ search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1461,30 +1270,11 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
goto out;
}
break;
-
- case IWL_LEGACY_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: Legacy switch to MIMO3\n");
-
- /* Set up search table to try MIMO3 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1496,7 +1286,7 @@ static int rs_move_legacy_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_LEGACY_SWITCH_MIMO2)
tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1531,7 +1321,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
+ tbl->action = IWL_SISO_SWITCH_MIMO2;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1573,19 +1363,12 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
goto out;
}
break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
+ case IWL_SISO_SWITCH_MIMO2:
IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO2\n");
memcpy(search_tbl, tbl, sz);
search_tbl->is_SGI = 0;
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
+ search_tbl->ant_type = ANT_AB;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1626,24 +1409,11 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
- case IWL_SISO_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: SISO switch to MIMO3\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1655,7 +1425,7 @@ static int rs_move_siso_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_SISO_SWITCH_GI)
tbl->action = IWL_SISO_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1696,8 +1466,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_B ||
- tbl->action == IWL_MIMO2_SWITCH_SISO_C)
+ if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
tbl->action = IWL_MIMO2_SWITCH_SISO_A;
break;
default:
@@ -1730,7 +1499,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
break;
case IWL_MIMO2_SWITCH_SISO_A:
case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to SISO\n");
/* Set up new search table for SISO */
@@ -1738,10 +1506,8 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
+ else /* tbl->action == IWL_MIMO2_SWITCH_SISO_B */
search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
if (!rs_is_valid_ant(valid_tx_ant,
search_tbl->ant_type))
@@ -1784,26 +1550,11 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
index, is_green);
update_search_tbl_counter = 1;
goto out;
-
- case IWL_MIMO2_SWITCH_MIMO3_ABC:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO2 switch to MIMO3\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- search_tbl->ant_type = ANT_ABC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo3(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
+ default:
+ WARN_ON_ONCE(1);
}
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (tbl->action == start_action)
@@ -1814,7 +1565,7 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
out:
lq_sta->search_better_tbl = 1;
tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_MIMO3_ABC)
+ if (tbl->action > IWL_MIMO2_SWITCH_GI)
tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
if (update_search_tbl_counter)
search_tbl->action = tbl->action;
@@ -1823,171 +1574,6 @@ static int rs_move_mimo2_to_other(struct iwl_mvm *mvm,
}
/*
- * Try to switch to new modulation mode from MIMO3
- */
-static int rs_move_mimo3_to_other(struct iwl_mvm *mvm,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = iwl_fw_valid_tx_ant(mvm->fw);
- u8 tx_chains_num = num_of_ant(valid_tx_ant);
- int ret;
- u8 update_search_tbl_counter = 0;
-
- switch (BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
- case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
- /* nothing */
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
- case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
- /* avoid antenna B and MIMO */
- if (tbl->action != IWL_MIMO3_SWITCH_SISO_A)
- tbl->action = IWL_MIMO3_SWITCH_SISO_A;
- break;
- case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
- /* avoid antenna B unless MIMO */
- if (tbl->action == IWL_MIMO3_SWITCH_SISO_B ||
- tbl->action == IWL_MIMO3_SWITCH_SISO_C)
- tbl->action = IWL_MIMO3_SWITCH_SISO_A;
- break;
- default:
- IWL_ERR(mvm, "Invalid BT load %d",
- BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD));
- break;
- }
-
- start_action = tbl->action;
- while (1) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO3_SWITCH_ANTENNA1:
- case IWL_MIMO3_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle Antennas\n");
-
- if (tx_chains_num <= 3)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate,
- search_tbl))
- goto out;
- break;
- case IWL_MIMO3_SWITCH_SISO_A:
- case IWL_MIMO3_SWITCH_SISO_B:
- case IWL_MIMO3_SWITCH_SISO_C:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO3_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO3_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_siso(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO3_SWITCH_MIMO2_AB:
- case IWL_MIMO3_SWITCH_MIMO2_AC:
- case IWL_MIMO3_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 switch to MIMO2\n");
-
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_MIMO3_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = rs_switch_to_mimo2(mvm, lq_sta, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO3_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(mvm, "LQ: MIMO3 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- rate_n_flags_from_tbl(mvm, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO3_SWITCH_GI)
- tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO3_SWITCH_GI)
- tbl->action = IWL_MIMO3_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
* Check whether we should continue using same modulation mode, or
* begin search for a new mode, based on:
* 1) # tx successes or failures while using this mode
@@ -2086,6 +1672,22 @@ static void rs_update_rate_tbl(struct iwl_mvm *mvm,
iwl_mvm_send_lq_cmd(mvm, &lq_sta->lq, CMD_ASYNC, false);
}
+static u8 rs_get_tid(struct iwl_lq_sta *lq_data,
+ struct ieee80211_hdr *hdr)
+{
+ u8 tid = IWL_MAX_TID_COUNT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ if (unlikely(tid > IWL_MAX_TID_COUNT))
+ tid = IWL_MAX_TID_COUNT;
+
+ return tid;
+}
+
/*
* Do rate scaling and search for new modulation mode.
*/
@@ -2129,7 +1731,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
- tid = rs_tl_add_packet(lq_sta, hdr);
+ tid = rs_get_tid(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
tid_data = &sta_priv->tid_data[tid];
@@ -2377,8 +1979,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
scale_action = 0;
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
- (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && (is_mimo(tbl->lq_type))) {
if (lq_sta->last_bt_traffic >
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD)) {
/*
@@ -2395,8 +1996,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD);
if ((BT_MBOX_MSG(&mvm->last_bt_notif, 3, TRAFFIC_LOAD) >=
- IWL_BT_COEX_TRAFFIC_LOAD_HIGH) &&
- (is_mimo2(tbl->lq_type) || is_mimo3(tbl->lq_type))) {
+ IWL_BT_COEX_TRAFFIC_LOAD_HIGH) && is_mimo(tbl->lq_type)) {
/* search for a new modulation */
rs_stay_in_table(lq_sta, true);
goto lq_update;
@@ -2456,7 +2056,7 @@ lq_update:
else if (is_mimo2(tbl->lq_type))
rs_move_mimo2_to_other(mvm, lq_sta, sta, index);
else
- rs_move_mimo3_to_other(mvm, lq_sta, sta, index);
+ WARN_ON_ONCE(1);
/* If new "search" mode was selected, set up in uCode table */
if (lq_sta->search_better_tbl) {
@@ -2621,11 +2221,10 @@ static void rs_get_rate(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta,
rate_idx -= IWL_FIRST_OFDM_RATE;
/* 6M and 9M shared same MCS index */
rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ WARN_ON_ONCE(rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ IWL_RATE_MIMO3_6M_PLCP);
if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO3_6M_PLCP)
- rate_idx = rate_idx + (2 * MCS_INDEX_PER_STREAM);
- else if (rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
+ IWL_RATE_MIMO2_6M_PLCP)
rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
@@ -2688,9 +2287,6 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->flush_timer = 0;
lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
IWL_DEBUG_RATE(mvm,
"LQ: *** rate scale station global init for station %d ***\n",
@@ -2727,16 +2323,10 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->active_mimo2_rate &= ~((u16)0x2);
lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
- lq_sta->active_mimo3_rate = ht_cap->mcs.rx_mask[2] << 1;
- lq_sta->active_mimo3_rate |= ht_cap->mcs.rx_mask[2] & 0x1;
- lq_sta->active_mimo3_rate &= ~((u16)0x2);
- lq_sta->active_mimo3_rate <<= IWL_FIRST_OFDM_RATE;
-
IWL_DEBUG_RATE(mvm,
- "SISO-RATE=%X MIMO2-RATE=%X MIMO3-RATE=%X\n",
+ "SISO-RATE=%X MIMO2-RATE=%X\n",
lq_sta->active_siso_rate,
- lq_sta->active_mimo2_rate,
- lq_sta->active_mimo3_rate);
+ lq_sta->active_mimo2_rate);
/* These values will be overridden later */
lq_sta->lq.single_stream_ant_msk =
@@ -2780,7 +2370,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
struct iwl_lq_cmd *lq_cmd = &lq_sta->lq;
/* Override starting rate (index 0) if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Interpret new_rate (rate_n_flags) */
rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
@@ -2827,7 +2417,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
}
/* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] =
@@ -2869,7 +2459,7 @@ static void rs_fill_link_cmd(struct iwl_mvm *mvm,
use_ht_possible = 0;
/* Override next rate if needed for debug purposes */
- rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
+ rs_dbgfs_set_mcs(lq_sta, &new_rate);
/* Fill next table entry */
lq_cmd->rs_table[index] = cpu_to_le32(new_rate);
@@ -2914,7 +2504,7 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta,
#ifdef CONFIG_MAC80211_DEBUGFS
static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
+ u32 *rate_n_flags)
{
struct iwl_mvm *mvm;
u8 valid_tx_ant;
@@ -2999,8 +2589,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
(is_legacy(tbl->lq_type)) ? "legacy" : "HT");
if (is_Ht(tbl->lq_type)) {
desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" :
- ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
desc += sprintf(buff+desc, " %s",
(tbl->is_ht40) ? "40MHz" : "20MHz");
desc += sprintf(buff+desc, " %s %s %s\n",
@@ -3100,32 +2689,6 @@ static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
.llseek = default_llseek,
};
-static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
- char buff[120];
- int desc = 0;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = rs_sta_dbgfs_rate_scale_data_read,
- .open = simple_open,
- .llseek = default_llseek,
-};
-
static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
{
struct iwl_lq_sta *lq_sta = mvm_sta;
@@ -3135,9 +2698,6 @@ static void rs_add_debugfs(void *mvm, void *mvm_sta, struct dentry *dir)
lq_sta->rs_sta_dbgfs_stats_table_file =
debugfs_create_file("rate_stats_table", S_IRUSR, dir,
lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
&lq_sta->tx_agg_tid_en);
@@ -3148,7 +2708,6 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
struct iwl_lq_sta *lq_sta = mvm_sta;
debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
}
#endif
@@ -3159,8 +2718,9 @@ static void rs_remove_debugfs(void *mvm, void *mvm_sta)
* station is added we ignore it.
*/
static void rs_rate_init_stub(void *mvm_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *mvm_sta)
+ struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *mvm_sta)
{
}
static struct rate_control_ops rs_mvm_ops = {
@@ -3193,13 +2753,14 @@ void iwl_mvm_rate_control_unregister(void)
* iwl_mvm_tx_protection - Gets LQ command, change it to enable/disable
* Tx protection, according to this rquest and previous requests,
* and send the LQ command.
- * @lq: The LQ command
* @mvmsta: The station
* @enable: Enable Tx protection?
*/
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- struct iwl_mvm_sta *mvmsta, bool enable)
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable)
{
+ struct iwl_lq_cmd *lq = &mvmsta->lq_sta.lq;
+
lockdep_assert_held(&mvm->mutex);
if (enable) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index cff4f6da773..335cf168290 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -38,14 +38,8 @@ struct iwl_rs_rate_info {
u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 plcp_mimo3; /* uCode API: IWL_RATE_MIMO3_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
u8 prev_rs; /* previous rate used in rs algo */
u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
};
#define IWL_RATE_60M_PLCP 3
@@ -120,23 +114,6 @@ enum {
IWL_RATE_MIMO3_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
};
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
#define IWL_INVALID_VALUE -1
@@ -165,47 +142,22 @@ enum {
#define IWL_LEGACY_SWITCH_ANTENNA1 0
#define IWL_LEGACY_SWITCH_ANTENNA2 1
#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-#define IWL_LEGACY_SWITCH_MIMO3_ABC 6
+#define IWL_LEGACY_SWITCH_MIMO2 3
/* possible actions when in siso mode */
#define IWL_SISO_SWITCH_ANTENNA1 0
#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-#define IWL_SISO_SWITCH_MIMO3_ABC 6
-
+#define IWL_SISO_SWITCH_MIMO2 2
+#define IWL_SISO_SWITCH_GI 3
/* possible actions when in mimo mode */
#define IWL_MIMO2_SWITCH_ANTENNA1 0
#define IWL_MIMO2_SWITCH_ANTENNA2 1
#define IWL_MIMO2_SWITCH_SISO_A 2
#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-#define IWL_MIMO2_SWITCH_MIMO3_ABC 6
-
+#define IWL_MIMO2_SWITCH_GI 4
-/* possible actions when in mimo3 mode */
-#define IWL_MIMO3_SWITCH_ANTENNA1 0
-#define IWL_MIMO3_SWITCH_ANTENNA2 1
-#define IWL_MIMO3_SWITCH_SISO_A 2
-#define IWL_MIMO3_SWITCH_SISO_B 3
-#define IWL_MIMO3_SWITCH_SISO_C 4
-#define IWL_MIMO3_SWITCH_MIMO2_AB 5
-#define IWL_MIMO3_SWITCH_MIMO2_AC 6
-#define IWL_MIMO3_SWITCH_MIMO2_BC 7
-#define IWL_MIMO3_SWITCH_GI 8
-
-
-#define IWL_MAX_11N_MIMO3_SEARCH IWL_MIMO3_SWITCH_GI
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_MIMO3_ABC
-
-/*FIXME:RS:add possible actions for MIMO3*/
+#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
#define IWL_ACTION_LIMIT 3 /* # possible actions */
@@ -240,15 +192,13 @@ enum iwl_table_type {
LQ_A,
LQ_SISO, /* high-throughput types */
LQ_MIMO2,
- LQ_MIMO3,
LQ_MAX,
};
#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
#define is_siso(tbl) ((tbl) == LQ_SISO)
#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo3(tbl) ((tbl) == LQ_MIMO3)
-#define is_mimo(tbl) (is_mimo2(tbl) || is_mimo3(tbl))
+#define is_mimo(tbl) is_mimo2(tbl)
#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
#define is_a_band(tbl) ((tbl) == LQ_A)
#define is_g_and(tbl) ((tbl) == LQ_G)
@@ -290,17 +240,6 @@ struct iwl_scale_tbl_info {
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
/**
* struct iwl_lq_sta -- driver's rate scaling private structure
*
@@ -331,18 +270,15 @@ struct iwl_lq_sta {
u16 active_legacy_rate;
u16 active_siso_rate;
u16 active_mimo2_rate;
- u16 active_mimo3_rate;
s8 max_rate_idx; /* Max rate set by user */
u8 missed_rate_counter;
struct iwl_lq_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
u32 dbg_fixed_rate;
#endif
@@ -404,7 +340,7 @@ extern void iwl_mvm_rate_control_unregister(void);
struct iwl_mvm_sta;
-int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq,
- struct iwl_mvm_sta *mvmsta, bool enable);
+int iwl_mvm_tx_protection(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
+ bool enable);
#endif /* __rs__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index e4930d5027d..2a8cb5a6053 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -124,24 +124,15 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
ieee80211_rx_ni(mvm->hw, skb);
}
-/*
- * iwl_mvm_calc_rssi - calculate the rssi in dBm
- * @phy_info: the phy information for the coming packet
- */
-static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
- struct iwl_rx_phy_info *phy_info)
+static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
int rssi_all_band_a, rssi_all_band_b;
u32 agc_a, agc_b, max_agc;
u32 val;
- /* Find max rssi among 2 possible receivers.
- * These values are measured by the Digital Signal Processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's Automatic Gain Control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info.
- */
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
@@ -166,7 +157,51 @@ static int iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
IWL_DEBUG_STATS(mvm, "Rssi In A %d B %d Max %d AGCA %d AGCB %d\n",
rssi_a_dbm, rssi_b_dbm, max_rssi_dbm, agc_a, agc_b);
- return max_rssi_dbm;
+ rx_status->signal = max_rssi_dbm;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = rssi_a_dbm;
+ rx_status->chain_signal[1] = rssi_b_dbm;
+}
+
+/*
+ * iwl_mvm_get_signal_strength - use new rx PHY INFO API
+ * values are reported by the fw as positive values - need to negate
+ * to obtain their dBM. Account for missing antennas by replacing 0
+ * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
+ */
+static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
+ struct iwl_rx_phy_info *phy_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ int energy_a, energy_b, energy_c, max_energy;
+ u32 val;
+
+ val =
+ le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_ENERGY_ANT_ABC_IDX]);
+ energy_a = (val & IWL_RX_INFO_ENERGY_ANT_A_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_A_POS;
+ energy_a = energy_a ? -energy_a : -256;
+ energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_B_POS;
+ energy_b = energy_b ? -energy_b : -256;
+ energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
+ IWL_RX_INFO_ENERGY_ANT_C_POS;
+ energy_c = energy_c ? -energy_c : -256;
+ max_energy = max(energy_a, energy_b);
+ max_energy = max(max_energy, energy_c);
+
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
+ energy_a, energy_b, energy_c, max_energy);
+
+ rx_status->signal = max_energy;
+ rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
+ RX_RES_PHY_FLAGS_ANTENNA)
+ >> RX_RES_PHY_FLAGS_ANTENNA_POS;
+ rx_status->chain_signal[0] = energy_a;
+ rx_status->chain_signal[1] = energy_b;
+ rx_status->chain_signal[2] = energy_c;
}
/*
@@ -289,29 +324,14 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
*/
/*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl_mvm_calc_rssi(mvm, phy_info);
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_RX_ENERGY_API)
+ iwl_mvm_get_signal_strength(mvm, phy_info, &rx_status);
+ else
+ iwl_mvm_calc_rssi(mvm, phy_info, &rx_status);
IWL_DEBUG_STATS_LIMIT(mvm, "Rssi %d, TSF %llu\n", rx_status.signal,
(unsigned long long)rx_status.mactime);
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna = (le16_to_cpu(phy_info->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
/* set the preamble flag if appropriate */
if (phy_info->phy_flags & cpu_to_le16(RX_RES_PHY_FLAGS_SHORT_PREAMBLE))
rx_status.flag |= RX_FLAG_SHORTPRE;
@@ -364,11 +384,74 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
return 0;
}
+static void iwl_mvm_update_rx_statistics(struct iwl_mvm *mvm,
+ struct iwl_notif_statistics *stats)
+{
+ /*
+ * NOTE FW aggregates the statistics - BUT the statistics are cleared
+ * when the driver issues REPLY_STATISTICS_CMD 0x9c with CLEAR_STATS
+ * bit set.
+ */
+ lockdep_assert_held(&mvm->mutex);
+ memcpy(&mvm->rx_stats, &stats->rx, sizeof(struct mvm_statistics_rx));
+}
+
+struct iwl_mvm_stat_data {
+ struct iwl_notif_statistics *stats;
+ struct iwl_mvm *mvm;
+};
+
+static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm_stat_data *data = _data;
+ struct iwl_notif_statistics *stats = data->stats;
+ struct iwl_mvm *mvm = data->mvm;
+ int sig = -stats->general.beacon_filter_average_energy;
+ int last_event;
+ int thold = vif->bss_conf.cqm_rssi_thold;
+ int hyst = vif->bss_conf.cqm_rssi_hyst;
+ u16 id = le32_to_cpu(stats->rx.general.mac_id);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif->id != id)
+ return;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ mvmvif->bf_data.ave_beacon_signal = sig;
+
+ if (!(vif->driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+ return;
+
+ /* CQM Notification */
+ last_event = mvmvif->bf_data.last_cqm_event;
+ if (thold && sig < thold && (last_event == 0 ||
+ sig < last_event - hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm low %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
+ GFP_KERNEL);
+ } else if (sig > thold &&
+ (last_event == 0 || sig > last_event + hyst)) {
+ mvmvif->bf_data.last_cqm_event = sig;
+ IWL_DEBUG_RX(mvm, "cqm_iterator cqm high %d\n",
+ sig);
+ ieee80211_cqm_rssi_notify(
+ vif,
+ NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
+ GFP_KERNEL);
+ }
+}
+
/*
* iwl_mvm_rx_statistics - STATISTICS_NOTIFICATION handler
*
* TODO: This handler is implemented partially.
- * It only gets the NIC's temperature.
*/
int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
@@ -377,11 +460,20 @@ int iwl_mvm_rx_statistics(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_notif_statistics *stats = (void *)&pkt->data;
struct mvm_statistics_general_common *common = &stats->general.common;
+ struct iwl_mvm_stat_data data = {
+ .stats = stats,
+ .mvm = mvm,
+ };
if (mvm->temperature != le32_to_cpu(common->temperature)) {
mvm->temperature = le32_to_cpu(common->temperature);
iwl_mvm_tt_handler(mvm);
}
+ iwl_mvm_update_rx_statistics(mvm, stats);
+ ieee80211_iterate_active_interfaces(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_stat_iterator,
+ &data);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2157b0f8ced..9a7ab849530 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -137,8 +137,8 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
{
int fw_idx, req_idx;
- fw_idx = 0;
- for (req_idx = req->n_ssids - 1; req_idx > 0; req_idx--) {
+ for (req_idx = req->n_ssids - 1, fw_idx = 0; req_idx > 0;
+ req_idx--, fw_idx++) {
cmd->direct_scan[fw_idx].id = WLAN_EID_SSID;
cmd->direct_scan[fw_idx].len = req->ssids[req_idx].ssid_len;
memcpy(cmd->direct_scan[fw_idx].ssid,
@@ -153,7 +153,9 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_scan_cmd *cmd,
* just to notify that this scan is active and not passive.
* In order to notify the FW of the number of SSIDs we wish to scan (including
* the zero-length one), we need to set the corresponding bits in chan->type,
- * one for each SSID, and set the active bit (first).
+ * one for each SSID, and set the active bit (first). The first SSID is already
+ * included in the probe template, so we need to set only req->n_ssids - 1 bits
+ * in addition to the first bit.
*/
static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
{
@@ -176,19 +178,12 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
- __le32 chan_type_value;
-
- if (req->n_ssids > 0)
- chan_type_value = cpu_to_le32(BIT(req->n_ssids + 1) - 1);
- else
- chan_type_value = SCAN_CHANNEL_TYPE_PASSIVE;
for (i = 0; i < cmd->channel_count; i++) {
chan->channel = cpu_to_le16(req->channels[i]->hw_value);
+ chan->type = cpu_to_le32(BIT(req->n_ssids) - 1);
if (req->channels[i]->flags & IEEE80211_CHAN_PASSIVE_SCAN)
- chan->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- chan->type = chan_type_value;
+ chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
chan->iteration_count = cpu_to_le16(1);
@@ -306,10 +301,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
*/
if (req->n_ssids > 0) {
cmd->passive2active = cpu_to_le16(1);
+ cmd->scan_flags |= SCAN_FLAGS_PASSIVE2ACTIVE;
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
} else {
cmd->passive2active = 0;
+ cmd->scan_flags &= ~SCAN_FLAGS_PASSIVE2ACTIVE;
}
iwl_mvm_scan_fill_ssids(cmd, req);
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 62fe5209093..44add291531 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -608,6 +608,8 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *bsta)
return ret;
}
+#define IWL_MAX_RX_BA_SESSIONS 16
+
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
@@ -618,11 +620,20 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lockdep_assert_held(&mvm->mutex);
+ if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) {
+ IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
+ return -ENOSPC;
+ }
+
cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
cmd.sta_id = mvm_sta->sta_id;
cmd.add_modify = STA_MODE_MODIFY;
- cmd.add_immediate_ba_tid = (u8) tid;
- cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ if (start) {
+ cmd.add_immediate_ba_tid = (u8) tid;
+ cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ } else {
+ cmd.remove_immediate_ba_tid = (u8) tid;
+ }
cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID :
STA_MODIFY_REMOVE_BA_TID;
@@ -648,6 +659,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
break;
}
+ if (!ret) {
+ if (start)
+ mvm->rx_ba_sessions++;
+ else if (mvm->rx_ba_sessions > 0)
+ /* check that restart flow didn't zero the counter */
+ mvm->rx_ba_sessions--;
+ }
+
return ret;
}
@@ -807,8 +826,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* method for HT traffic
* this function also sends the LQ command
*/
- return iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
- mvmsta, true);
+ return iwl_mvm_tx_protection(mvm, mvmsta, true);
/*
* TODO: remove the TLC_RTS flag when we tear down the last
* AGG session (agg_tids_count in DVM)
@@ -896,6 +914,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct iwl_mvm_sta *mvmsta = (void *)sta->drv_priv;
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
u16 txq_id;
+ enum iwl_mvm_agg_state old_state;
/*
* First set the agg state to OFF to avoid calling
@@ -905,13 +924,17 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
txq_id = tid_data->txq_id;
IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
mvmsta->sta_id, tid, txq_id, tid_data->state);
+ old_state = tid_data->state;
tid_data->state = IWL_AGG_OFF;
spin_unlock_bh(&mvmsta->lock);
- if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
- IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+ if (old_state >= IWL_AGG_ON) {
+ if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), true))
+ IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
+
+ iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
+ }
- iwl_trans_txq_disable(mvm->trans, tid_data->txq_id);
mvm->queue_to_mac80211[tid_data->txq_id] =
IWL_INVALID_MAC80211_QUEUE;
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 39b3ffbc53b..76a3c177e10 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -73,7 +73,6 @@
#include "iwl-prph.h"
/* A TimeUnit is 1024 microsecond */
-#define TU_TO_JIFFIES(_tu) (usecs_to_jiffies((_tu) * 1024))
#define MSEC_TO_TU(_msec) (_msec*1000/1024)
/*
@@ -138,6 +137,20 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
schedule_work(&mvm->roc_done_wk);
}
+static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ const char *errmsg)
+{
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return false;
+ if (vif->bss_conf.assoc && vif->bss_conf.dtim_period)
+ return false;
+ if (errmsg)
+ IWL_ERR(mvm, "%s\n", errmsg);
+ ieee80211_connection_loss(vif);
+ return true;
+}
+
/*
* Handles a FW notification for an event that is known to the driver.
*
@@ -163,10 +176,15 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* P2P Device discoveribility, while there are other higher priority
* events in the system).
*/
- WARN_ONCE(!le32_to_cpu(notif->status),
- "Failed to schedule time event\n");
+ if (WARN_ONCE(!le32_to_cpu(notif->status),
+ "Failed to schedule time event\n")) {
+ if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
+ iwl_mvm_te_clear_data(mvm, te_data);
+ return;
+ }
+ }
- if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_END) {
+ if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_END) {
IWL_DEBUG_TE(mvm,
"TE ended - current time %lu, estimated end %lu\n",
jiffies, te_data->end_jiffies);
@@ -180,19 +198,12 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* By now, we should have finished association
* and know the dtim period.
*/
- if (te_data->vif->type == NL80211_IFTYPE_STATION &&
- (!te_data->vif->bss_conf.assoc ||
- !te_data->vif->bss_conf.dtim_period)) {
- IWL_ERR(mvm,
- "No association and the time event is over already...\n");
- ieee80211_connection_loss(te_data->vif);
- }
-
+ iwl_mvm_te_check_disconnect(mvm, te_data->vif,
+ "No association and the time event is over already...");
iwl_mvm_te_clear_data(mvm, te_data);
- } else if (le32_to_cpu(notif->action) & TE_NOTIF_HOST_EVENT_START) {
+ } else if (le32_to_cpu(notif->action) & TE_V2_NOTIF_HOST_EVENT_START) {
te_data->running = true;
- te_data->end_jiffies = jiffies +
- TU_TO_JIFFIES(te_data->duration);
+ te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration);
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
@@ -257,10 +268,67 @@ static bool iwl_mvm_time_event_response(struct iwl_notif_wait_data *notif_wait,
return true;
}
+/* used to convert from time event API v2 to v1 */
+#define TE_V2_DEP_POLICY_MSK (TE_V2_DEP_OTHER | TE_V2_DEP_TSF |\
+ TE_V2_EVENT_SOCIOPATHIC)
+static inline u16 te_v2_get_notify(__le16 policy)
+{
+ return le16_to_cpu(policy) & TE_V2_NOTIF_MSK;
+}
+
+static inline u16 te_v2_get_dep_policy(__le16 policy)
+{
+ return (le16_to_cpu(policy) & TE_V2_DEP_POLICY_MSK) >>
+ TE_V2_PLACEMENT_POS;
+}
+
+static inline u16 te_v2_get_absence(__le16 policy)
+{
+ return (le16_to_cpu(policy) & TE_V2_ABSENCE) >> TE_V2_ABSENCE_POS;
+}
+
+static void iwl_mvm_te_v2_to_v1(const struct iwl_time_event_cmd_v2 *cmd_v2,
+ struct iwl_time_event_cmd_v1 *cmd_v1)
+{
+ cmd_v1->id_and_color = cmd_v2->id_and_color;
+ cmd_v1->action = cmd_v2->action;
+ cmd_v1->id = cmd_v2->id;
+ cmd_v1->apply_time = cmd_v2->apply_time;
+ cmd_v1->max_delay = cmd_v2->max_delay;
+ cmd_v1->depends_on = cmd_v2->depends_on;
+ cmd_v1->interval = cmd_v2->interval;
+ cmd_v1->duration = cmd_v2->duration;
+ if (cmd_v2->repeat == TE_V2_REPEAT_ENDLESS)
+ cmd_v1->repeat = cpu_to_le32(TE_V1_REPEAT_ENDLESS);
+ else
+ cmd_v1->repeat = cpu_to_le32(cmd_v2->repeat);
+ cmd_v1->max_frags = cpu_to_le32(cmd_v2->max_frags);
+ cmd_v1->interval_reciprocal = 0; /* unused */
+
+ cmd_v1->dep_policy = cpu_to_le32(te_v2_get_dep_policy(cmd_v2->policy));
+ cmd_v1->is_present = cpu_to_le32(!te_v2_get_absence(cmd_v2->policy));
+ cmd_v1->notify = cpu_to_le32(te_v2_get_notify(cmd_v2->policy));
+}
+
+static int iwl_mvm_send_time_event_cmd(struct iwl_mvm *mvm,
+ const struct iwl_time_event_cmd_v2 *cmd)
+{
+ struct iwl_time_event_cmd_v1 cmd_v1;
+
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_TIME_EVENT_API_V2)
+ return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(*cmd), cmd);
+
+ iwl_mvm_te_v2_to_v1(cmd, &cmd_v1);
+ return iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
+ sizeof(cmd_v1), &cmd_v1);
+}
+
+
static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_mvm_time_event_data *te_data,
- struct iwl_time_event_cmd *te_cmd)
+ struct iwl_time_event_cmd_v2 *te_cmd)
{
static const u8 time_event_response[] = { TIME_EVENT_CMD };
struct iwl_notification_wait wait_time_event;
@@ -296,8 +364,7 @@ static int iwl_mvm_time_event_send_add(struct iwl_mvm *mvm,
ARRAY_SIZE(time_event_response),
iwl_mvm_time_event_response, te_data);
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(*te_cmd), te_cmd);
+ ret = iwl_mvm_send_time_event_cmd(mvm, te_cmd);
if (ret) {
IWL_ERR(mvm, "Couldn't send TIME_EVENT_CMD: %d\n", ret);
iwl_remove_notification(&mvm->notif_wait, &wait_time_event);
@@ -324,13 +391,12 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running &&
- time_after(te_data->end_jiffies,
- jiffies + TU_TO_JIFFIES(min_duration))) {
+ time_after(te_data->end_jiffies, TU_TO_EXP_TIME(min_duration))) {
IWL_DEBUG_TE(mvm, "We have enough time in the current TE: %u\n",
jiffies_to_msecs(te_data->end_jiffies - jiffies));
return;
@@ -359,17 +425,14 @@ void iwl_mvm_protect_session(struct iwl_mvm *mvm,
time_cmd.apply_time =
cpu_to_le32(iwl_read_prph(mvm->trans, DEVICE_SYSTEM_TIME_REG));
- time_cmd.dep_policy = TE_INDEPENDENT;
- time_cmd.is_present = cpu_to_le32(1);
- time_cmd.max_frags = cpu_to_le32(TE_FRAG_NONE);
+ time_cmd.max_frags = TE_V2_FRAG_NONE;
time_cmd.max_delay = cpu_to_le32(500);
/* TODO: why do we need to interval = bi if it is not periodic? */
time_cmd.interval = cpu_to_le32(1);
- time_cmd.interval_reciprocal = cpu_to_le32(iwl_mvm_reciprocal(1));
time_cmd.duration = cpu_to_le32(duration);
- time_cmd.repeat = cpu_to_le32(1);
- time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
- TE_NOTIF_HOST_EVENT_END);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END);
iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
@@ -383,7 +446,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
struct iwl_mvm_vif *mvmvif,
struct iwl_mvm_time_event_data *te_data)
{
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
u32 id, uid;
int ret;
@@ -420,8 +483,7 @@ void iwl_mvm_remove_time_event(struct iwl_mvm *mvm,
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color));
IWL_DEBUG_TE(mvm, "Removing TE 0x%x\n", le32_to_cpu(time_cmd.id));
- ret = iwl_mvm_send_cmd_pdu(mvm, TIME_EVENT_CMD, CMD_SYNC,
- sizeof(time_cmd), &time_cmd);
+ ret = iwl_mvm_send_time_event_cmd(mvm, &time_cmd);
if (WARN_ON(ret))
return;
}
@@ -441,7 +503,7 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_time_event_data *te_data = &mvmvif->time_event_data;
- struct iwl_time_event_cmd time_cmd = {};
+ struct iwl_time_event_cmd_v2 time_cmd = {};
lockdep_assert_held(&mvm->mutex);
if (te_data->running) {
@@ -472,8 +534,6 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
time_cmd.apply_time = cpu_to_le32(0);
- time_cmd.dep_policy = cpu_to_le32(TE_INDEPENDENT);
- time_cmd.is_present = cpu_to_le32(1);
time_cmd.interval = cpu_to_le32(1);
/*
@@ -482,12 +542,12 @@ int iwl_mvm_start_p2p_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
* scheduled. To improve the chances of it being scheduled, allow them
* to be fragmented, and in addition allow them to be delayed.
*/
- time_cmd.max_frags = cpu_to_le32(MSEC_TO_TU(duration)/20);
+ time_cmd.max_frags = min(MSEC_TO_TU(duration)/50, TE_V2_FRAG_ENDLESS);
time_cmd.max_delay = cpu_to_le32(MSEC_TO_TU(duration/2));
time_cmd.duration = cpu_to_le32(MSEC_TO_TU(duration));
- time_cmd.repeat = cpu_to_le32(1);
- time_cmd.notify = cpu_to_le32(TE_NOTIF_HOST_EVENT_START |
- TE_NOTIF_HOST_EVENT_END);
+ time_cmd.repeat = 1;
+ time_cmd.policy = cpu_to_le16(TE_V2_NOTIF_HOST_EVENT_START |
+ TE_V2_NOTIF_HOST_EVENT_END);
return iwl_mvm_time_event_send_add(mvm, vif, te_data, &time_cmd);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index d6ae7f16ac1..1f3282dff51 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -391,8 +391,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
mvmsta = (void *)sta->drv_priv;
if (enable == mvmsta->tt_tx_protection)
continue;
- err = iwl_mvm_tx_protection(mvm, &mvmsta->lq_sta.lq,
- mvmsta, enable);
+ err = iwl_mvm_tx_protection(mvm, mvmsta, enable);
if (err) {
IWL_ERR(mvm, "Failed to %s Tx protection\n",
enable ? "enable" : "disable");
@@ -513,12 +512,39 @@ static const struct iwl_tt_params iwl7000_tt_params = {
.support_tx_backoff = true,
};
+static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
+ .ct_kill_entry = 118,
+ .ct_kill_exit = 96,
+ .ct_kill_duration = 5,
+ .dynamic_smps_entry = 114,
+ .dynamic_smps_exit = 110,
+ .tx_protection_entry = 114,
+ .tx_protection_exit = 108,
+ .tx_backoff = {
+ {.temperature = 112, .backoff = 300},
+ {.temperature = 113, .backoff = 800},
+ {.temperature = 114, .backoff = 1500},
+ {.temperature = 115, .backoff = 3000},
+ {.temperature = 116, .backoff = 5000},
+ {.temperature = 117, .backoff = 10000},
+ },
+ .support_ct_kill = true,
+ .support_dynamic_smps = true,
+ .support_tx_protection = true,
+ .support_tx_backoff = true,
+};
+
void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
IWL_DEBUG_TEMP(mvm, "Initialize Thermal Throttling\n");
- tt->params = &iwl7000_tt_params;
+
+ if (mvm->cfg->high_temp)
+ tt->params = &iwl7000_high_temp_tt_params;
+ else
+ tt->params = &iwl7000_tt_params;
+
tt->throttle = false;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f0e96a92740..e05440d9031 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -91,11 +91,10 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
- if (info->band == IEEE80211_BAND_2GHZ &&
- (skb->protocol == cpu_to_be16(ETH_P_PAE) ||
- is_multicast_ether_addr(hdr->addr1) ||
- ieee80211_is_back_req(fc) ||
- ieee80211_is_mgmt(fc)))
+ if (info->band == IEEE80211_BAND_2GHZ &&
+ (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
+ is_multicast_ether_addr(hdr->addr1) ||
+ ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
tx_flags |= TX_CMD_FLG_BT_DIS;
if (ieee80211_has_morefrags(fc))
@@ -123,6 +122,8 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
* it
*/
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
+ } else if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+ tx_cmd->pm_frame_timeout = cpu_to_le16(2);
} else {
tx_cmd->pm_frame_timeout = 0;
}
@@ -171,7 +172,7 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
}
/*
- * for data packets, rate info comes from the table inside he fw. This
+ * for data packets, rate info comes from the table inside the fw. This
* table is controlled by LINK_QUALITY commands
*/
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 1e1332839e4..a9c35749143 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -453,6 +453,29 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
}
+void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
+{
+ const struct fw_img *img;
+ int ofs, len = 0;
+ u8 *buf;
+
+ if (!mvm->ucode_loaded)
+ return;
+
+ img = &mvm->fw->img[mvm->cur_ucode];
+ ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
+ len = img->sec[IWL_UCODE_SECTION_DATA].len;
+
+ buf = kzalloc(len, GFP_KERNEL);
+ if (!buf)
+ return;
+
+ iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
+ iwl_print_hex_error(mvm->trans, buf, len);
+
+ kfree(buf);
+}
+
/**
* iwl_mvm_send_lq_cmd() - Send link quality command
* @init: This command is sent as part of station initialization right
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 81f3ea5b09a..dc02cb9792a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -130,6 +130,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x423C, 0x1306, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1221, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423C, 0x1321, iwl5150_agn_cfg)}, /* Half Mini Card */
+ {IWL_PCI_DEVICE(0x423C, 0x1326, iwl5150_abg_cfg)}, /* Half Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1211, iwl5150_agn_cfg)}, /* Mini Card */
{IWL_PCI_DEVICE(0x423D, 0x1311, iwl5150_agn_cfg)}, /* Half Mini Card */
@@ -272,9 +273,9 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x08B1, 0x4462, iwl7260_n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4870, iwl7260_2ac_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x486E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg)},
- {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A70, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6E, iwl7260_2ac_cfg_high_temp)},
+ {IWL_PCI_DEVICE(0x08B1, 0x4A6C, iwl7260_2ac_cfg_high_temp)},
{IWL_PCI_DEVICE(0x08B1, 0x4020, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B2, 0x4220, iwl7260_2n_cfg)},
{IWL_PCI_DEVICE(0x08B1, 0x4420, iwl7260_2n_cfg)},
@@ -324,15 +325,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int ret;
iwl_trans = iwl_trans_pcie_alloc(pdev, ent, cfg);
- if (iwl_trans == NULL)
- return -ENOMEM;
+ if (IS_ERR(iwl_trans))
+ return PTR_ERR(iwl_trans);
pci_set_drvdata(pdev, iwl_trans);
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
trans_pcie->drv = iwl_drv_start(iwl_trans, cfg);
- if (IS_ERR_OR_NULL(trans_pcie->drv)) {
+ if (IS_ERR(trans_pcie->drv)) {
ret = PTR_ERR(trans_pcie->drv);
goto out_free_trans;
}
@@ -367,21 +368,19 @@ static void iwl_pci_remove(struct pci_dev *pdev)
static int iwl_pci_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
-
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
* WoWLAN is enabled - don't kill the NIC, someone may need it in Sx.
*/
- return iwl_trans_suspend(iwl_trans);
+ return 0;
}
static int iwl_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_trans *iwl_trans = pci_get_drvdata(pdev);
+ struct iwl_trans *trans = pci_get_drvdata(pdev);
+ bool hw_rfkill;
/* Before you put code here, think about WoWLAN. You cannot check here
* whether WoWLAN is enabled or not, and your code will run even if
@@ -394,7 +393,15 @@ static int iwl_pci_resume(struct device *device)
*/
pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
- return iwl_trans_resume(iwl_trans);
+ if (!trans->op_mode)
+ return 0;
+
+ iwl_enable_rfkill_int(trans);
+
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+
+ return 0;
}
static SIMPLE_DEV_PM_OPS(iwl_dev_pm_ops, iwl_pci_suspend, iwl_pci_resume);
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index b654dcdd048..fa22639b63c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -392,7 +392,6 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
/*****************************************************
* Error handling
******************************************************/
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf);
void iwl_pcie_dump_csr(struct iwl_trans *trans);
/*****************************************************
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index fd848cd1583..3f237b42eb3 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -112,15 +112,16 @@
*/
static int iwl_rxq_space(const struct iwl_rxq *rxq)
{
- int s = rxq->read - rxq->write;
-
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ /* Make sure RX_QUEUE_SIZE is a power of 2 */
+ BUILD_BUG_ON(RX_QUEUE_SIZE & (RX_QUEUE_SIZE - 1));
+
+ /*
+ * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
+ * between empty and completely full queues.
+ * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
+ * defined for negative dividends.
+ */
+ return (rxq->read - rxq->write - 1) & (RX_QUEUE_SIZE - 1);
}
/*
@@ -793,7 +794,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
}
iwl_pcie_dump_csr(trans);
- iwl_pcie_dump_fh(trans, NULL);
+ iwl_dump_fh(trans, NULL);
set_bit(STATUS_FW_ERROR, &trans_pcie->status);
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
@@ -1120,6 +1121,7 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
struct iwl_trans *trans = data;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta, inta_mask;
+ irqreturn_t ret = IRQ_NONE;
lockdep_assert_held(&trans_pcie->irq_lock);
@@ -1168,10 +1170,8 @@ static irqreturn_t iwl_pcie_isr(int irq, void *data)
/* the thread will service interrupts and re-enable them */
if (likely(inta))
return IRQ_WAKE_THREAD;
- else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
- return IRQ_HANDLED;
+
+ ret = IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service. */
@@ -1180,7 +1180,7 @@ none:
!trans_pcie->inta)
iwl_enable_interrupts(trans);
- return IRQ_NONE;
+ return ret;
}
/* interrupt handler using ict table, with this interrupt driver will
@@ -1199,6 +1199,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
u32 val = 0;
u32 read;
unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
if (!trans)
return IRQ_NONE;
@@ -1211,7 +1212,7 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
* use legacy interrupt.
*/
if (unlikely(!trans_pcie->use_ict)) {
- irqreturn_t ret = iwl_pcie_isr(irq, data);
+ ret = iwl_pcie_isr(irq, data);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return ret;
}
@@ -1280,17 +1281,9 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
if (likely(inta)) {
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
return IRQ_WAKE_THREAD;
- } else if (test_bit(STATUS_INT_ENABLED, &trans_pcie->status) &&
- !trans_pcie->inta) {
- /* Allow interrupt if was disabled by this handler and
- * no tasklet was schedules, We should not enable interrupt,
- * tasklet will enable it.
- */
- iwl_enable_interrupts(trans);
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_HANDLED;
+ ret = IRQ_HANDLED;
none:
/* re-enable interrupts here since we don't have anything to service.
@@ -1301,5 +1294,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_NONE;
+ return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 826c15602c4..bad95d28d50 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -670,6 +670,11 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
return err;
}
+ /* Reset the entire device */
+ iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ usleep_range(10, 15);
+
iwl_pcie_apm_init(trans);
/* From now on, the op_mode will be kept updated about RF kill state */
@@ -815,25 +820,6 @@ static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
}
-#ifdef CONFIG_PM_SLEEP
-static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
-{
- return 0;
-}
-
-static int iwl_trans_pcie_resume(struct iwl_trans *trans)
-{
- bool hw_rfkill;
-
- iwl_enable_rfkill_int(trans);
-
- hw_rfkill = iwl_is_rfkill_set(trans);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
-
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
unsigned long *flags)
{
@@ -1033,71 +1019,6 @@ static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
}
-static const char *get_fh_string(int cmd)
-{
-#define IWL_CMD(x) case x: return #x
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-#undef IWL_CMD
-}
-
-int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
-{
- int i;
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (buf) {
- int pos = 0;
- size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
-
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
-
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
-
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return pos;
- }
-#endif
-
- IWL_ERR(trans, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
- IWL_ERR(trans, " %34s: 0X%08x\n",
- get_fh_string(fh_tbl[i]),
- iwl_read_direct32(trans, fh_tbl[i]));
-
- return 0;
-}
-
static const char *get_csr_string(int cmd)
{
#define IWL_CMD(x) case x: return #x
@@ -1178,18 +1099,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
} while (0)
/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.read = iwl_dbgfs_##name##_read, \
.open = simple_open, \
@@ -1197,7 +1107,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.open = simple_open, \
@@ -1205,8 +1114,6 @@ static const struct file_operations iwl_dbgfs_##name##_ops = { \
};
#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
static const struct file_operations iwl_dbgfs_##name##_ops = { \
.write = iwl_dbgfs_##name##_write, \
.read = iwl_dbgfs_##name##_read, \
@@ -1390,7 +1297,7 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
int pos = 0;
ssize_t ret = -EFAULT;
- ret = pos = iwl_pcie_dump_fh(trans, &buf);
+ ret = pos = iwl_dump_fh(trans, &buf);
if (buf) {
ret = simple_read_from_buffer(user_buf,
count, ppos, buf, pos);
@@ -1454,10 +1361,6 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwl_trans_pcie_suspend,
- .resume = iwl_trans_pcie_resume,
-#endif
.write8 = iwl_trans_pcie_write8,
.write32 = iwl_trans_pcie_write32,
.read32 = iwl_trans_pcie_read32,
@@ -1483,9 +1386,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
trans = kzalloc(sizeof(struct iwl_trans) +
sizeof(struct iwl_trans_pcie), GFP_KERNEL);
-
- if (!trans)
- return NULL;
+ if (!trans) {
+ err = -ENOMEM;
+ goto out;
+ }
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1497,15 +1401,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
spin_lock_init(&trans_pcie->reg_lock);
init_waitqueue_head(&trans_pcie->ucode_write_waitq);
- /* W/A - seems to solve weird behavior. We need to remove this if we
- * don't want to stay in L1 all the time. This wastes a lot of power */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
+ if (!cfg->base_params->pcie_l1_allowed) {
+ /*
+ * W/A - seems to solve weird behavior. We need to remove this
+ * if we don't want to stay in L1 all the time. This wastes a
+ * lot of power.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+ }
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
+ err = pci_enable_device(pdev);
+ if (err)
goto out_no_pci;
- }
pci_set_master(pdev);
@@ -1574,17 +1483,20 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
SLAB_HWCACHE_ALIGN,
NULL);
- if (!trans->dev_cmd_pool)
+ if (!trans->dev_cmd_pool) {
+ err = -ENOMEM;
goto out_pci_disable_msi;
+ }
trans_pcie->inta_mask = CSR_INI_SET_MASK;
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
- iwl_pcie_irq_handler,
- IRQF_SHARED, DRV_NAME, trans)) {
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ iwl_pcie_irq_handler,
+ IRQF_SHARED, DRV_NAME, trans);
+ if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
@@ -1603,5 +1515,6 @@ out_pci_disable_device:
pci_disable_device(pdev);
out_no_pci:
kfree(trans);
- return NULL;
+out:
+ return ERR_PTR(err);
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index c47c92165ab..f45eb29c2ed 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -65,18 +65,30 @@
***************************************************/
static int iwl_queue_space(const struct iwl_queue *q)
{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
+ unsigned int max;
+ unsigned int used;
+
+ /*
+ * To avoid ambiguity between empty and completely full queues, there
+ * should always be less than q->n_bd elements in the queue.
+ * If q->n_window is smaller than q->n_bd, there is no need to reserve
+ * any queue entries for this purpose.
+ */
+ if (q->n_window < q->n_bd)
+ max = q->n_window;
+ else
+ max = q->n_bd - 1;
+
+ /*
+ * q->n_bd is a power of 2, so the following is equivalent to modulo by
+ * q->n_bd and is well defined for negative dividends.
+ */
+ used = (q->write_ptr - q->read_ptr) & (q->n_bd - 1);
+
+ if (WARN_ON(used > max))
+ return 0;
+
+ return max - used;
}
/*
@@ -451,13 +463,10 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
return -EINVAL;
}
- if (WARN_ON(addr & ~DMA_BIT_MASK(36)))
+ if (WARN(addr & ~IWL_TX_DMA_MASK,
+ "Unaligned address = %llx\n", (unsigned long long)addr))
return -EINVAL;
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(trans, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
iwl_pcie_tfd_set_tb(tfd, num_tbs, addr, len);
return 0;
@@ -829,7 +838,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
sizeof(struct iwl_txq), GFP_KERNEL);
if (!trans_pcie->txq) {
IWL_ERR(trans, "Not enough memory for txq\n");
- ret = ENOMEM;
+ ret = -ENOMEM;
goto error;
}
@@ -1153,10 +1162,10 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
/*
* iwl_pcie_enqueue_hcmd - enqueue a uCode command
* @priv: device private data point
- * @cmd: a point to the ucode command structure
+ * @cmd: a pointer to the ucode command structure
*
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
+ * The function returns < 0 values to indicate the operation
+ * failed. On success, it returns the index (>= 0) of command in the
* command queue.
*/
static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
@@ -1619,10 +1628,9 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
- if (unlikely(!test_bit(txq_id, trans_pcie->queue_used))) {
- WARN_ON_ONCE(1);
+ if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
+ "TX on unused queue %d\n", txq_id))
return -EINVAL;
- }
spin_lock(&txq->lock);
@@ -1632,7 +1640,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
* Check here that the packets are in the right place on the ring.
*/
wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
- WARN_ONCE(trans_pcie->txq[txq_id].ampdu &&
+ WARN_ONCE(txq->ampdu &&
(wifi_seq & 0xff) != q->write_ptr,
"Q: %d WiFi Seq %d tfdNum %d",
txq_id, wifi_seq, q->write_ptr);
@@ -1664,7 +1672,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
*/
len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
hdr_len - IWL_HCMD_SCRATCHBUF_SIZE;
- tb1_len = (len + 3) & ~3;
+ tb1_len = ALIGN(len, 4);
/* Tell NIC about any 2-byte padding after MAC header */
if (tb1_len != len)
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index efae07e05c8..6fef746345b 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1017,7 +1017,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
mesh_dev->netdev_ops = &mesh_netdev_ops;
mesh_dev->ethtool_ops = &lbs_ethtool_ops;
- memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(mesh_dev, priv->dev);
SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index cb34c7895f2..2cd3f54e1ef 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -867,7 +867,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
if (WARN_ON(skb->len < 10)) {
/* Should not happen; just a sanity check for addr1 use */
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -884,13 +884,13 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw,
}
if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) {
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
if (data->idle && !data->tmp_chan) {
wiphy_debug(hw->wiphy, "Trying to TX when idle - reject\n");
- dev_kfree_skb(skb);
+ ieee80211_free_txskb(hw, skb);
return;
}
@@ -1364,6 +1364,7 @@ static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = {
static int hwsim_fops_ps_write(void *dat, u64 val);
static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
void *data, int len)
{
struct mac80211_hwsim_data *hwsim = hw->priv;
@@ -2309,7 +2310,9 @@ static int __init init_mac80211_hwsim(void)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_UAPSD;
+ hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
@@ -2525,8 +2528,10 @@ static int __init init_mac80211_hwsim(void)
}
hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
- if (hwsim_mon == NULL)
+ if (hwsim_mon == NULL) {
+ err = -ENOMEM;
goto failed;
+ }
rtnl_lock();
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 41e9d25a2d8..0b803c05cab 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -292,6 +292,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_ie_types_extcap *ext_cap;
int ret_len = 0;
struct ieee80211_supported_band *sband;
+ struct ieee_types_header *hdr;
u8 radio_type;
if (!buffer || !*buffer)
@@ -388,17 +389,24 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
}
if (bss_desc->bcn_ext_cap) {
+ hdr = (void *)bss_desc->bcn_ext_cap;
ext_cap = (struct mwifiex_ie_types_extcap *) *buffer;
memset(ext_cap, 0, sizeof(struct mwifiex_ie_types_extcap));
ext_cap->header.type = cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
- ext_cap->header.len = cpu_to_le16(sizeof(ext_cap->ext_cap));
+ ext_cap->header.len = cpu_to_le16(hdr->len);
- memcpy((u8 *)ext_cap + sizeof(struct mwifiex_ie_types_header),
+ memcpy((u8 *)ext_cap->ext_capab,
bss_desc->bcn_ext_cap + sizeof(struct ieee_types_header),
le16_to_cpu(ext_cap->header.len));
- *buffer += sizeof(struct mwifiex_ie_types_extcap);
- ret_len += sizeof(struct mwifiex_ie_types_extcap);
+ if (hdr->len > 3 &&
+ ext_cap->ext_capab[3] & WLAN_EXT_CAPA4_INTERWORKING_ENABLED)
+ priv->hs2_enabled = true;
+ else
+ priv->hs2_enabled = false;
+
+ *buffer += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
+ ret_len += sizeof(struct mwifiex_ie_types_extcap) + hdr->len;
}
return ret_len;
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index a78e0651409..21c68826470 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -69,7 +69,8 @@ mwifiex_11n_form_amsdu_pkt(struct sk_buff *skb_aggr,
memcpy(&tx_header->eth803_hdr, skb_src->data, dt_offset);
/* Copy SNAP header */
- snap.snap_type = *(u16 *) ((u8 *)skb_src->data + dt_offset);
+ snap.snap_type =
+ le16_to_cpu(*(__le16 *) ((u8 *)skb_src->data + dt_offset));
dt_offset += sizeof(u16);
memcpy(&tx_header->rfc1042_hdr, &snap, sizeof(struct rfc_1042_hdr));
@@ -189,7 +190,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_src = skb_dequeue(&pra_list->skb_head);
- pra_list->total_pkts_size -= skb_src->len;
+ pra_list->total_pkt_count--;
atomic_dec(&priv->wmm.tx_pkts_queued);
@@ -268,7 +269,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
skb_queue_tail(&pra_list->skb_head, skb_aggr);
- pra_list->total_pkts_size += skb_aggr->len;
+ pra_list->total_pkt_count++;
atomic_inc(&priv->wmm.tx_pkts_queued);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index ef5fa890a28..fbad00a5abc 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -25,7 +25,9 @@ module_param(reg_alpha2, charp, 0);
static const struct ieee80211_iface_limit mwifiex_ap_sta_limits[] = {
{
- .max = 2, .types = BIT(NL80211_IFTYPE_STATION),
+ .max = 2, .types = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
},
{
.max = 1, .types = BIT(NL80211_IFTYPE_AP),
@@ -189,6 +191,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct sk_buff *skb;
u16 pkt_len;
const struct ieee80211_mgmt *mgmt;
+ struct mwifiex_txinfo *tx_info;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
if (!buf || !len) {
@@ -216,6 +219,10 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
return -ENOMEM;
}
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
mwifiex_form_mgmt_frame(skb, buf, len);
mwifiex_queue_tx_pkt(priv, skb);
@@ -235,16 +242,20 @@ mwifiex_cfg80211_mgmt_frame_register(struct wiphy *wiphy,
u16 frame_type, bool reg)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(wdev->netdev);
+ u32 mask;
if (reg)
- priv->mgmt_frame_mask |= BIT(frame_type >> 4);
+ mask = priv->mgmt_frame_mask | BIT(frame_type >> 4);
else
- priv->mgmt_frame_mask &= ~BIT(frame_type >> 4);
-
- mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
- HostCmd_ACT_GEN_SET, 0, &priv->mgmt_frame_mask);
+ mask = priv->mgmt_frame_mask & ~BIT(frame_type >> 4);
- wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ if (mask != priv->mgmt_frame_mask) {
+ priv->mgmt_frame_mask = mask;
+ mwifiex_send_cmd_async(priv, HostCmd_CMD_MGMT_FRAME_REG,
+ HostCmd_ACT_GEN_SET, 0,
+ &priv->mgmt_frame_mask);
+ wiphy_dbg(wiphy, "info: mgmt frame registered\n");
+ }
}
/*
@@ -1497,6 +1508,7 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
" reason code %d\n", priv->cfg_bssid, reason_code);
memset(priv->cfg_bssid, 0, ETH_ALEN);
+ priv->hs2_enabled = false;
return 0;
}
@@ -1716,9 +1728,9 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret;
- if (priv->bss_mode != NL80211_IFTYPE_STATION) {
+ if (GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) {
wiphy_err(wiphy,
- "%s: reject infra assoc request in non-STA mode\n",
+ "%s: reject infra assoc request in non-STA role\n",
dev->name);
return -EINVAL;
}
@@ -2296,10 +2308,9 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
}
EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf);
-#ifdef CONFIG_PM
static bool
-mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
- s8 *byte_seq)
+mwifiex_is_pattern_supported(struct cfg80211_pkt_pattern *pat, s8 *byte_seq,
+ u8 max_byte_seq)
{
int j, k, valid_byte_cnt = 0;
bool dont_care_byte = false;
@@ -2317,16 +2328,17 @@ mwifiex_is_pattern_supported(struct cfg80211_wowlan_trig_pkt_pattern *pat,
dont_care_byte = true;
}
- if (valid_byte_cnt > MAX_BYTESEQ)
+ if (valid_byte_cnt > max_byte_seq)
return false;
}
}
- byte_seq[MAX_BYTESEQ] = valid_byte_cnt;
+ byte_seq[max_byte_seq] = valid_byte_cnt;
return true;
}
+#ifdef CONFIG_PM
static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wowlan)
{
@@ -2335,7 +2347,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
struct mwifiex_mef_entry *mef_entry;
int i, filt_num = 0, ret;
bool first_pat = true;
- u8 byte_seq[MAX_BYTESEQ + 1];
+ u8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
const u8 ipv4_mc_mac[] = {0x33, 0x33};
const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
struct mwifiex_private *priv =
@@ -2365,7 +2377,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
for (i = 0; i < wowlan->n_patterns; i++) {
memset(byte_seq, 0, sizeof(byte_seq));
if (!mwifiex_is_pattern_supported(&wowlan->patterns[i],
- byte_seq)) {
+ byte_seq,
+ MWIFIEX_MEF_MAX_BYTESEQ)) {
wiphy_err(wiphy, "Pattern not supported\n");
kfree(mef_entry);
return -EOPNOTSUPP;
@@ -2373,16 +2386,16 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
if (!wowlan->patterns[i].pkt_offset) {
if (!(byte_seq[0] & 0x01) &&
- (byte_seq[MAX_BYTESEQ] == 1)) {
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 1)) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_UNICAST;
continue;
} else if (is_broadcast_ether_addr(byte_seq)) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_BROADCAST;
continue;
} else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
- (byte_seq[MAX_BYTESEQ] == 2)) ||
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 2)) ||
(!memcmp(byte_seq, ipv6_mc_mac, 3) &&
- (byte_seq[MAX_BYTESEQ] == 3))) {
+ (byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] == 3))) {
mef_cfg.criteria |= MWIFIEX_CRITERIA_MULTICAST;
continue;
}
@@ -2408,7 +2421,8 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
mef_entry->filter[filt_num].repeat = 16;
memcpy(mef_entry->filter[filt_num].byte_seq, priv->curr_addr,
ETH_ALEN);
- mef_entry->filter[filt_num].byte_seq[MAX_BYTESEQ] = ETH_ALEN;
+ mef_entry->filter[filt_num].byte_seq[MWIFIEX_MEF_MAX_BYTESEQ] =
+ ETH_ALEN;
mef_entry->filter[filt_num].offset = 14;
mef_entry->filter[filt_num].filt_type = TYPE_EQ;
if (filt_num)
@@ -2442,6 +2456,119 @@ static void mwifiex_cfg80211_set_wakeup(struct wiphy *wiphy,
}
#endif
+static int mwifiex_get_coalesce_pkt_type(u8 *byte_seq)
+{
+ const u8 ipv4_mc_mac[] = {0x33, 0x33};
+ const u8 ipv6_mc_mac[] = {0x01, 0x00, 0x5e};
+ const u8 bc_mac[] = {0xff, 0xff, 0xff, 0xff};
+
+ if ((byte_seq[0] & 0x01) &&
+ (byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 1))
+ return PACKET_TYPE_UNICAST;
+ else if (!memcmp(byte_seq, bc_mac, 4))
+ return PACKET_TYPE_BROADCAST;
+ else if ((!memcmp(byte_seq, ipv4_mc_mac, 2) &&
+ byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 2) ||
+ (!memcmp(byte_seq, ipv6_mc_mac, 3) &&
+ byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ] == 3))
+ return PACKET_TYPE_MULTICAST;
+
+ return 0;
+}
+
+static int
+mwifiex_fill_coalesce_rule_info(struct mwifiex_private *priv,
+ struct cfg80211_coalesce_rules *crule,
+ struct mwifiex_coalesce_rule *mrule)
+{
+ u8 byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ + 1];
+ struct filt_field_param *param;
+ int i;
+
+ mrule->max_coalescing_delay = crule->delay;
+
+ param = mrule->params;
+
+ for (i = 0; i < crule->n_patterns; i++) {
+ memset(byte_seq, 0, sizeof(byte_seq));
+ if (!mwifiex_is_pattern_supported(&crule->patterns[i],
+ byte_seq,
+ MWIFIEX_COALESCE_MAX_BYTESEQ)) {
+ dev_err(priv->adapter->dev, "Pattern not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!crule->patterns[i].pkt_offset) {
+ u8 pkt_type;
+
+ pkt_type = mwifiex_get_coalesce_pkt_type(byte_seq);
+ if (pkt_type && mrule->pkt_type) {
+ dev_err(priv->adapter->dev,
+ "Multiple packet types not allowed\n");
+ return -EOPNOTSUPP;
+ } else if (pkt_type) {
+ mrule->pkt_type = pkt_type;
+ continue;
+ }
+ }
+
+ if (crule->condition == NL80211_COALESCE_CONDITION_MATCH)
+ param->operation = RECV_FILTER_MATCH_TYPE_EQ;
+ else
+ param->operation = RECV_FILTER_MATCH_TYPE_NE;
+
+ param->operand_len = byte_seq[MWIFIEX_COALESCE_MAX_BYTESEQ];
+ memcpy(param->operand_byte_stream, byte_seq,
+ param->operand_len);
+ param->offset = crule->patterns[i].pkt_offset;
+ param++;
+
+ mrule->num_of_fields++;
+ }
+
+ if (!mrule->pkt_type) {
+ dev_err(priv->adapter->dev,
+ "Packet type can not be determined\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce)
+{
+ struct mwifiex_adapter *adapter = mwifiex_cfg80211_get_adapter(wiphy);
+ int i, ret;
+ struct mwifiex_ds_coalesce_cfg coalesce_cfg;
+ struct mwifiex_private *priv =
+ mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA);
+
+ memset(&coalesce_cfg, 0, sizeof(coalesce_cfg));
+ if (!coalesce) {
+ dev_dbg(adapter->dev,
+ "Disable coalesce and reset all previous rules\n");
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0,
+ &coalesce_cfg);
+ }
+
+ coalesce_cfg.num_of_rules = coalesce->n_rules;
+ for (i = 0; i < coalesce->n_rules; i++) {
+ ret = mwifiex_fill_coalesce_rule_info(priv, &coalesce->rules[i],
+ &coalesce_cfg.rule[i]);
+ if (ret) {
+ dev_err(priv->adapter->dev,
+ "Recheck the patterns provided for rule %d\n",
+ i + 1);
+ return ret;
+ }
+ }
+
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_COALESCE_CFG,
+ HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
+}
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2476,12 +2603,13 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.resume = mwifiex_cfg80211_resume,
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
+ .set_coalesce = mwifiex_cfg80211_set_coalesce,
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support mwifiex_wowlan_support = {
.flags = WIPHY_WOWLAN_MAGIC_PKT,
- .n_patterns = MWIFIEX_MAX_FILTERS,
+ .n_patterns = MWIFIEX_MEF_MAX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
.max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
@@ -2499,6 +2627,15 @@ static bool mwifiex_is_valid_alpha2(const char *alpha2)
return false;
}
+static const struct wiphy_coalesce_support mwifiex_coalesce_support = {
+ .n_rules = MWIFIEX_COALESCE_MAX_RULES,
+ .max_delay = MWIFIEX_MAX_COALESCING_DELAY,
+ .n_patterns = MWIFIEX_COALESCE_MAX_FILTERS,
+ .pattern_min_len = 1,
+ .pattern_max_len = MWIFIEX_MAX_PATTERN_LEN,
+ .max_pkt_offset = MWIFIEX_MAX_OFFSET_LEN,
+};
+
/*
* This function registers the device with CFG802.11 subsystem.
*
@@ -2560,6 +2697,8 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
wiphy->wowlan = &mwifiex_wowlan_support;
#endif
+ wiphy->coalesce = &mwifiex_coalesce_support;
+
wiphy->probe_resp_offload = NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 988552dece7..9eefacbc844 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -404,18 +404,51 @@ mwifiex_is_rate_auto(struct mwifiex_private *priv)
return false;
}
-/*
- * This function gets the supported data rates.
- *
- * The function works in both Ad-Hoc and infra mode by printing the
- * band and returning the data rates.
+/* This function gets the supported data rates from bitmask inside
+ * cfg80211_scan_request.
+ */
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+ u8 *rates, u8 radio_type)
+{
+ struct wiphy *wiphy = priv->adapter->wiphy;
+ struct cfg80211_scan_request *request = priv->scan_request;
+ u32 num_rates, rate_mask;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ if (radio_type) {
+ sband = wiphy->bands[IEEE80211_BAND_5GHZ];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
+ rate_mask = request->rates[IEEE80211_BAND_5GHZ];
+ } else {
+ sband = wiphy->bands[IEEE80211_BAND_2GHZ];
+ if (WARN_ON_ONCE(!sband))
+ return 0;
+ rate_mask = request->rates[IEEE80211_BAND_2GHZ];
+ }
+
+ num_rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & rate_mask) == 0)
+ continue; /* skip rate */
+ rates[num_rates++] = (u8)(sband->bitrates[i].bitrate / 5);
+ }
+
+ return num_rates;
+}
+
+/* This function gets the supported data rates. The function works in
+ * both Ad-Hoc and infra mode by printing the band and returning the
+ * data rates.
*/
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
{
u32 k = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
switch (adapter->config_bands) {
case BAND_B:
dev_dbg(adapter->dev, "info: infra band=%d "
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 94cc09d4844..5c85d7803d0 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/ieee80211.h>
+#include <uapi/linux/if_arp.h>
#include <net/mac80211.h>
@@ -75,7 +76,8 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
-#define MWIFIEX_BRIDGED_PKTS_THRESHOLD 1024
+#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
+#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
@@ -151,4 +153,12 @@ struct mwifiex_types_wmm_info {
u8 reserved;
struct ieee_types_wmm_ac_parameters ac_params[IEEE80211_NUM_ACS];
} __packed;
+
+struct mwifiex_arp_eth_header {
+ struct arphdr hdr;
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+} __packed;
#endif /* !_MWIFIEX_DECL_H_ */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 1b45aa53330..f80f30b6160 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -85,9 +85,6 @@ enum KEY_TYPE_ID {
#define WAPI_KEY_LEN 50
#define MAX_POLL_TRIES 100
-
-#define MAX_MULTI_INTERFACE_POLL_TRIES 1000
-
#define MAX_FIRMWARE_POLL_TRIES 100
#define FIRMWARE_READY_SDIO 0xfedc
@@ -156,6 +153,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_UAP_PS_AO_TIMER (PROPRIETARY_TLV_BASE_ID + 123)
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
+#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -297,6 +295,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
@@ -453,7 +452,7 @@ enum P2P_MODES {
(((event_cause) >> 24) & 0x00ff)
#define MWIFIEX_MAX_PATTERN_LEN 20
-#define MWIFIEX_MAX_OFFSET_LEN 50
+#define MWIFIEX_MAX_OFFSET_LEN 100
#define STACK_NBYTES 100
#define TYPE_DNUM 1
#define TYPE_BYTESEQ 2
@@ -1331,7 +1330,7 @@ struct mwifiex_ie_types_2040bssco {
struct mwifiex_ie_types_extcap {
struct mwifiex_ie_types_header header;
- u8 ext_cap;
+ u8 ext_capab[0];
} __packed;
struct host_cmd_ds_mac_reg_access {
@@ -1369,11 +1368,6 @@ struct host_cmd_ds_802_11_eeprom_access {
u8 value;
} __packed;
-struct host_cmd_tlv {
- __le16 type;
- __le16 len;
-} __packed;
-
struct mwifiex_assoc_event {
u8 sta_addr[ETH_ALEN];
__le16 type;
@@ -1399,99 +1393,99 @@ struct host_cmd_11ac_vht_cfg {
} __packed;
struct host_cmd_tlv_akmp {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 key_mgmt;
__le16 key_mgmt_operation;
} __packed;
struct host_cmd_tlv_pwk_cipher {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 proto;
u8 cipher;
u8 reserved;
} __packed;
struct host_cmd_tlv_gwk_cipher {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 cipher;
u8 reserved;
} __packed;
struct host_cmd_tlv_passphrase {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 passphrase[0];
} __packed;
struct host_cmd_tlv_wep_key {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 key_index;
u8 is_default;
u8 key[1];
};
struct host_cmd_tlv_auth_type {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 auth_type;
} __packed;
struct host_cmd_tlv_encrypt_protocol {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 proto;
} __packed;
struct host_cmd_tlv_ssid {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 ssid[0];
} __packed;
struct host_cmd_tlv_rates {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 rates[0];
} __packed;
struct host_cmd_tlv_bcast_ssid {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 bcast_ctl;
} __packed;
struct host_cmd_tlv_beacon_period {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 period;
} __packed;
struct host_cmd_tlv_dtim_period {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 period;
} __packed;
struct host_cmd_tlv_frag_threshold {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 frag_thr;
} __packed;
struct host_cmd_tlv_rts_threshold {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le16 rts_thr;
} __packed;
struct host_cmd_tlv_retry_limit {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 limit;
} __packed;
struct host_cmd_tlv_mac_addr {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 mac_addr[ETH_ALEN];
} __packed;
struct host_cmd_tlv_channel_band {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
u8 band_config;
u8 channel;
} __packed;
struct host_cmd_tlv_ageout_timer {
- struct host_cmd_tlv tlv;
+ struct mwifiex_ie_types_header header;
__le32 sta_ao_timer;
} __packed;
@@ -1604,6 +1598,27 @@ struct host_cmd_ds_802_11_cfg_data {
__le16 data_len;
} __packed;
+struct coalesce_filt_field_param {
+ u8 operation;
+ u8 operand_len;
+ __le16 offset;
+ u8 operand_byte_stream[4];
+};
+
+struct coalesce_receive_filt_rule {
+ struct mwifiex_ie_types_header header;
+ u8 num_of_fields;
+ u8 pkt_type;
+ __le16 max_coalescing_delay;
+ struct coalesce_filt_field_param params[0];
+} __packed;
+
+struct host_cmd_ds_coalesce_cfg {
+ __le16 action;
+ __le16 num_of_rules;
+ struct coalesce_receive_filt_rule rule[0];
+} __packed;
+
struct host_cmd_ds_command {
__le16 command;
__le16 size;
@@ -1664,6 +1679,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_802_11_cfg_data cfg_data;
+ struct host_cmd_ds_coalesce_cfg coalesce_cfg;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/ie.c b/drivers/net/wireless/mwifiex/ie.c
index e38342f86c5..220af4fe0fc 100644
--- a/drivers/net/wireless/mwifiex/ie.c
+++ b/drivers/net/wireless/mwifiex/ie.c
@@ -87,7 +87,7 @@ mwifiex_update_autoindex_ies(struct mwifiex_private *priv,
u8 *tmp;
input_len = le16_to_cpu(ie_list->len);
- travel_len = sizeof(struct host_cmd_tlv);
+ travel_len = sizeof(struct mwifiex_ie_types_header);
ie_list->len = 0;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index caaf4bd56b3..6499117fce4 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -135,6 +135,8 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_chan = 0;
priv->csa_expire_time = 0;
+ priv->del_list_idx = 0;
+ priv->hs2_enabled = false;
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -377,18 +379,11 @@ static void mwifiex_free_lock_list(struct mwifiex_adapter *adapter)
static void
mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
{
- int i;
-
if (!adapter) {
pr_err("%s: adapter is NULL\n", __func__);
return;
}
- for (i = 0; i < adapter->priv_num; i++) {
- if (adapter->priv[i])
- del_timer_sync(&adapter->priv[i]->scan_delay_timer);
- }
-
mwifiex_cancel_all_pending_cmd(adapter);
/* Free lock variables */
@@ -398,13 +393,8 @@ mwifiex_adapter_cleanup(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: free cmd buffer\n");
mwifiex_free_cmd_buffer(adapter);
- del_timer(&adapter->cmd_timer);
-
dev_dbg(adapter->dev, "info: free scan table\n");
- if (adapter->if_ops.cleanup_if)
- adapter->if_ops.cleanup_if(adapter);
-
if (adapter->sleep_cfm)
dev_kfree_skb_any(adapter->sleep_cfm);
}
@@ -693,7 +683,7 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
if (!ret) {
dev_notice(adapter->dev,
"WLAN FW already running! Skip FW dnld\n");
- goto done;
+ return 0;
}
poll_num = MAX_FIRMWARE_POLL_TRIES;
@@ -702,7 +692,6 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
if (!adapter->winner) {
dev_notice(adapter->dev,
"FW already running! Skip FW dnld\n");
- poll_num = MAX_MULTI_INTERFACE_POLL_TRIES;
goto poll_fw;
}
}
@@ -719,14 +708,8 @@ int mwifiex_dnld_fw(struct mwifiex_adapter *adapter,
poll_fw:
/* Check if the firmware is downloaded successfully or not */
ret = adapter->if_ops.check_fw_status(adapter, poll_num);
- if (ret) {
+ if (ret)
dev_err(adapter->dev, "FW failed to be active in time\n");
- return -1;
- }
-done:
- /* re-enable host interrupt for mwifiex after fw dnld is successful */
- if (adapter->if_ops.enable_int)
- adapter->if_ops.enable_int(adapter);
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 7f27e45680b..00a95f4c6a6 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -362,13 +362,13 @@ struct mwifiex_ds_misc_subsc_evt {
struct subsc_evt_cfg bcn_h_rssi_cfg;
};
-#define MAX_BYTESEQ 6 /* non-adjustable */
-#define MWIFIEX_MAX_FILTERS 10
+#define MWIFIEX_MEF_MAX_BYTESEQ 6 /* non-adjustable */
+#define MWIFIEX_MEF_MAX_FILTERS 10
struct mwifiex_mef_filter {
u16 repeat;
u16 offset;
- s8 byte_seq[MAX_BYTESEQ + 1];
+ s8 byte_seq[MWIFIEX_MEF_MAX_BYTESEQ + 1];
u8 filt_type;
u8 filt_action;
};
@@ -376,7 +376,7 @@ struct mwifiex_mef_filter {
struct mwifiex_mef_entry {
u8 mode;
u8 action;
- struct mwifiex_mef_filter filter[MWIFIEX_MAX_FILTERS];
+ struct mwifiex_mef_filter filter[MWIFIEX_MEF_MAX_FILTERS];
};
struct mwifiex_ds_mef_cfg {
@@ -397,4 +397,39 @@ enum {
MWIFIEX_FUNC_SHUTDOWN,
};
+enum COALESCE_OPERATION {
+ RECV_FILTER_MATCH_TYPE_EQ = 0x80,
+ RECV_FILTER_MATCH_TYPE_NE,
+};
+
+enum COALESCE_PACKET_TYPE {
+ PACKET_TYPE_UNICAST = 1,
+ PACKET_TYPE_MULTICAST = 2,
+ PACKET_TYPE_BROADCAST = 3
+};
+
+#define MWIFIEX_COALESCE_MAX_RULES 8
+#define MWIFIEX_COALESCE_MAX_BYTESEQ 4 /* non-adjustable */
+#define MWIFIEX_COALESCE_MAX_FILTERS 4
+#define MWIFIEX_MAX_COALESCING_DELAY 100 /* in msecs */
+
+struct filt_field_param {
+ u8 operation;
+ u8 operand_len;
+ u16 offset;
+ u8 operand_byte_stream[MWIFIEX_COALESCE_MAX_BYTESEQ];
+};
+
+struct mwifiex_coalesce_rule {
+ u16 max_coalescing_delay;
+ u8 num_of_fields;
+ u8 pkt_type;
+ struct filt_field_param params[MWIFIEX_COALESCE_MAX_FILTERS];
+};
+
+struct mwifiex_ds_coalesce_cfg {
+ u16 num_of_rules;
+ struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 1c8a771e8e8..9d7c0e6c4fc 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1291,8 +1291,10 @@ int mwifiex_associate(struct mwifiex_private *priv,
{
u8 current_bssid[ETH_ALEN];
- /* Return error if the adapter or table entry is not marked as infra */
- if ((priv->bss_mode != NL80211_IFTYPE_STATION) ||
+ /* Return error if the adapter is not STA role or table entry
+ * is not marked as infra.
+ */
+ if ((GET_BSS_ROLE(priv) != MWIFIEX_BSS_ROLE_STA) ||
(bss_desc->bss_mode != NL80211_IFTYPE_STATION))
return -1;
@@ -1425,6 +1427,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
switch (priv->bss_mode) {
case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
return mwifiex_deauthenticate_infra(priv, mac);
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd_sync(priv,
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index e15ab72fb03..fd778337dee 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -191,12 +191,16 @@ static int mwifiex_unregister(struct mwifiex_adapter *adapter)
{
s32 i;
+ if (adapter->if_ops.cleanup_if)
+ adapter->if_ops.cleanup_if(adapter);
+
del_timer(&adapter->cmd_timer);
/* Free private structures */
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
mwifiex_free_curr_bcn(adapter->priv[i]);
+ del_timer_sync(&adapter->priv[i]->scan_delay_timer);
kfree(adapter->priv[i]);
}
}
@@ -386,6 +390,17 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function cancels all works in the queue and destroys
+ * the main workqueue.
+ */
+static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
+{
+ flush_workqueue(adapter->workqueue);
+ destroy_workqueue(adapter->workqueue);
+ adapter->workqueue = NULL;
+}
+
+/*
* This function gets firmware and initializes it.
*
* The main initialization steps followed are -
@@ -394,16 +409,18 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
*/
static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
{
- int ret;
+ int ret, i;
char fmt[64];
struct mwifiex_private *priv;
struct mwifiex_adapter *adapter = context;
struct mwifiex_fw_image fw;
+ struct semaphore *sem = adapter->card_sem;
+ bool init_failed = false;
if (!firmware) {
dev_err(adapter->dev,
"Failed to get firmware %s\n", adapter->fw_name);
- goto done;
+ goto err_dnld_fw;
}
memset(&fw, 0, sizeof(struct mwifiex_fw_image));
@@ -416,7 +433,7 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
else
ret = mwifiex_dnld_fw(adapter, &fw);
if (ret == -1)
- goto done;
+ goto err_dnld_fw;
dev_notice(adapter->dev, "WLAN FW is active\n");
@@ -427,10 +444,16 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
"Cal data request_firmware() failed\n");
}
+ /* enable host interrupt after fw dnld is successful */
+ if (adapter->if_ops.enable_int) {
+ if (adapter->if_ops.enable_int(adapter))
+ goto err_dnld_fw;
+ }
+
adapter->init_wait_q_woken = false;
ret = mwifiex_init_fw(adapter);
if (ret == -1) {
- goto done;
+ goto err_init_fw;
} else if (!ret) {
adapter->hw_status = MWIFIEX_HW_STATUS_READY;
goto done;
@@ -439,12 +462,12 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
if (adapter->hw_status != MWIFIEX_HW_STATUS_READY)
- goto done;
+ goto err_init_fw;
priv = adapter->priv[MWIFIEX_BSS_ROLE_STA];
if (mwifiex_register_cfg80211(adapter)) {
dev_err(adapter->dev, "cannot register with cfg80211\n");
- goto err_init_fw;
+ goto err_register_cfg80211;
}
rtnl_lock();
@@ -454,20 +477,6 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
dev_err(adapter->dev, "cannot create default STA interface\n");
goto err_add_intf;
}
-
- /* Create AP interface by default */
- if (!mwifiex_add_virtual_intf(adapter->wiphy, "uap%d",
- NL80211_IFTYPE_AP, NULL, NULL)) {
- dev_err(adapter->dev, "cannot create default AP interface\n");
- goto err_add_intf;
- }
-
- /* Create P2P interface by default */
- if (!mwifiex_add_virtual_intf(adapter->wiphy, "p2p%d",
- NL80211_IFTYPE_P2P_CLIENT, NULL, NULL)) {
- dev_err(adapter->dev, "cannot create default P2P interface\n");
- goto err_add_intf;
- }
rtnl_unlock();
mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
@@ -475,18 +484,52 @@ static void mwifiex_fw_dpc(const struct firmware *firmware, void *context)
goto done;
err_add_intf:
- mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+ for (i = 0; i < adapter->priv_num; i++) {
+ priv = adapter->priv[i];
+
+ if (!priv)
+ continue;
+
+ if (priv->wdev && priv->netdev)
+ mwifiex_del_virtual_intf(adapter->wiphy, priv->wdev);
+ }
rtnl_unlock();
+err_register_cfg80211:
+ wiphy_unregister(adapter->wiphy);
+ wiphy_free(adapter->wiphy);
err_init_fw:
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+err_dnld_fw:
pr_debug("info: %s: unregister device\n", __func__);
- adapter->if_ops.unregister_dev(adapter);
+ if (adapter->if_ops.unregister_dev)
+ adapter->if_ops.unregister_dev(adapter);
+
+ if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
+ (adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
+ pr_debug("info: %s: shutdown mwifiex\n", __func__);
+ adapter->init_wait_q_woken = false;
+
+ if (mwifiex_shutdown_drv(adapter) == -EINPROGRESS)
+ wait_event_interruptible(adapter->init_wait_q,
+ adapter->init_wait_q_woken);
+ }
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+ init_failed = true;
done:
if (adapter->cal_data) {
release_firmware(adapter->cal_data);
adapter->cal_data = NULL;
}
- release_firmware(adapter->firmware);
+ if (adapter->firmware) {
+ release_firmware(adapter->firmware);
+ adapter->firmware = NULL;
+ }
complete(&adapter->fw_load);
+ if (init_failed)
+ mwifiex_free_adapter(adapter);
+ up(sem);
return;
}
@@ -797,18 +840,6 @@ static void mwifiex_main_work_queue(struct work_struct *work)
}
/*
- * This function cancels all works in the queue and destroys
- * the main workqueue.
- */
-static void
-mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
-{
- flush_workqueue(adapter->workqueue);
- destroy_workqueue(adapter->workqueue);
- adapter->workqueue = NULL;
-}
-
-/*
* This function adds the card.
*
* This function follows the following major steps to set up the device -
@@ -836,6 +867,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
}
adapter->iface_type = iface_type;
+ adapter->card_sem = sem;
adapter->hw_status = MWIFIEX_HW_STATUS_INITIALIZING;
adapter->surprise_removed = false;
@@ -855,7 +887,7 @@ mwifiex_add_card(void *card, struct semaphore *sem,
INIT_WORK(&adapter->main_work, mwifiex_main_work_queue);
/* Register the device. Fill up the private data structure with relevant
- information from the card and request for the required IRQ. */
+ information from the card. */
if (adapter->if_ops.register_dev(adapter)) {
pr_err("%s: failed to register mwifiex device\n", __func__);
goto err_registerdev;
@@ -866,17 +898,12 @@ mwifiex_add_card(void *card, struct semaphore *sem,
goto err_init_fw;
}
- up(sem);
return 0;
err_init_fw:
pr_debug("info: %s: unregister device\n", __func__);
if (adapter->if_ops.unregister_dev)
adapter->if_ops.unregister_dev(adapter);
-err_registerdev:
- adapter->surprise_removed = true;
- mwifiex_terminate_workqueue(adapter);
-err_kmalloc:
if ((adapter->hw_status == MWIFIEX_HW_STATUS_FW_READY) ||
(adapter->hw_status == MWIFIEX_HW_STATUS_READY)) {
pr_debug("info: %s: shutdown mwifiex\n", __func__);
@@ -886,7 +913,10 @@ err_kmalloc:
wait_event_interruptible(adapter->init_wait_q,
adapter->init_wait_q_woken);
}
-
+err_registerdev:
+ adapter->surprise_removed = true;
+ mwifiex_terminate_workqueue(adapter);
+err_kmalloc:
mwifiex_free_adapter(adapter);
err_init_sw:
@@ -919,6 +949,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (!adapter)
goto exit_remove;
+ /* We can no longer handle interrupts once we start doing the teardown
+ * below. */
+ if (adapter->if_ops.disable_int)
+ adapter->if_ops.disable_int(adapter);
+
adapter->surprise_removed = true;
/* Stop data */
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 3da73d36acd..1d72f13adb9 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -204,11 +204,11 @@ struct mwifiex_ra_list_tbl {
struct list_head list;
struct sk_buff_head skb_head;
u8 ra[ETH_ALEN];
- u32 total_pkts_size;
u32 is_11n_enabled;
u16 max_amsdu;
- u16 pkt_count;
+ u16 ba_pkt_count;
u8 ba_packet_thr;
+ u16 total_pkt_count;
};
struct mwifiex_tid_tbl {
@@ -515,6 +515,8 @@ struct mwifiex_private {
bool scan_aborting;
u8 csa_chan;
unsigned long csa_expire_time;
+ u8 del_list_idx;
+ bool hs2_enabled;
};
enum mwifiex_ba_status {
@@ -601,6 +603,7 @@ struct mwifiex_if_ops {
int (*register_dev) (struct mwifiex_adapter *);
void (*unregister_dev) (struct mwifiex_adapter *);
int (*enable_int) (struct mwifiex_adapter *);
+ void (*disable_int) (struct mwifiex_adapter *);
int (*process_int_status) (struct mwifiex_adapter *);
int (*host_to_card) (struct mwifiex_adapter *, u8, struct sk_buff *,
struct mwifiex_tx_param *);
@@ -747,6 +750,7 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
+ struct semaphore *card_sem;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -899,6 +903,8 @@ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u32 mwifiex_get_active_data_rates(struct mwifiex_private *priv,
u8 *rates);
u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates);
+u32 mwifiex_get_rates_from_cfg80211(struct mwifiex_private *priv,
+ u8 *rates, u8 radio_type);
u8 mwifiex_is_rate_auto(struct mwifiex_private *priv);
extern u16 region_code_index[MWIFIEX_MAX_REGION_CODE];
void mwifiex_save_curr_bcn(struct mwifiex_private *priv);
@@ -1020,7 +1026,7 @@ mwifiex_netdev_get_priv(struct net_device *dev)
*/
static inline bool mwifiex_is_skb_mgmt_frame(struct sk_buff *skb)
{
- return (*(u32 *)skb->data == PKT_TYPE_MGMT);
+ return (le32_to_cpu(*(__le32 *)skb->data) == PKT_TYPE_MGMT);
}
/* This function retrieves channel closed for operation by Channel
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 20c9c4c7b0b..52da8ee7599 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -76,7 +76,7 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
return false;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* Kernel needs to suspend all functions separately. Therefore all
* registered functions must have drivers with suspend and resume
@@ -85,11 +85,12 @@ static bool mwifiex_pcie_ok_to_access_hw(struct mwifiex_adapter *adapter)
* If already not suspended, this function allocates and sends a host
* sleep activate request to the firmware and turns off the traffic.
*/
-static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
+static int mwifiex_pcie_suspend(struct device *dev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
int hs_actived;
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -120,10 +121,11 @@ static int mwifiex_pcie_suspend(struct pci_dev *pdev, pm_message_t state)
* If already not resumed, this function turns on the traffic and
* sends a host sleep cancel request to the firmware.
*/
-static int mwifiex_pcie_resume(struct pci_dev *pdev)
+static int mwifiex_pcie_resume(struct device *dev)
{
struct mwifiex_adapter *adapter;
struct pcie_service_card *card;
+ struct pci_dev *pdev = to_pci_dev(dev);
if (pdev) {
card = (struct pcie_service_card *) pci_get_drvdata(pdev);
@@ -211,9 +213,9 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
wait_for_completion(&adapter->fw_load);
if (user_rmmod) {
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
if (adapter->is_suspended)
- mwifiex_pcie_resume(pdev);
+ mwifiex_pcie_resume(&pdev->dev);
#endif
for (i = 0; i < adapter->priv_num; i++)
@@ -233,6 +235,14 @@ static void mwifiex_pcie_remove(struct pci_dev *pdev)
kfree(card);
}
+static void mwifiex_pcie_shutdown(struct pci_dev *pdev)
+{
+ user_rmmod = 1;
+ mwifiex_pcie_remove(pdev);
+
+ return;
+}
+
static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
{
PCIE_VENDOR_ID_MARVELL, PCIE_DEVICE_ID_MARVELL_88W8766P,
@@ -249,17 +259,24 @@ static DEFINE_PCI_DEVICE_TABLE(mwifiex_ids) = {
MODULE_DEVICE_TABLE(pci, mwifiex_ids);
+#ifdef CONFIG_PM_SLEEP
+/* Power Management Hooks */
+static SIMPLE_DEV_PM_OPS(mwifiex_pcie_pm_ops, mwifiex_pcie_suspend,
+ mwifiex_pcie_resume);
+#endif
+
/* PCI Device Driver */
static struct pci_driver __refdata mwifiex_pcie = {
.name = "mwifiex_pcie",
.id_table = mwifiex_ids,
.probe = mwifiex_pcie_probe,
.remove = mwifiex_pcie_remove,
-#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = mwifiex_pcie_suspend,
- .resume = mwifiex_pcie_resume,
+#ifdef CONFIG_PM_SLEEP
+ .driver = {
+ .pm = &mwifiex_pcie_pm_ops,
+ },
#endif
+ .shutdown = mwifiex_pcie_shutdown,
};
/*
@@ -1925,7 +1942,7 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
ret = 0;
break;
} else {
- mdelay(100);
+ msleep(100);
ret = -1;
}
}
@@ -1937,12 +1954,10 @@ mwifiex_check_fw_status(struct mwifiex_adapter *adapter, u32 poll_num)
else if (!winner_status) {
dev_err(adapter->dev, "PCI-E is the winner\n");
adapter->winner = 1;
- ret = -1;
} else {
dev_err(adapter->dev,
"PCI-E is not the winner <%#x,%d>, exit dnld\n",
ret, adapter->winner);
- ret = 0;
}
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index c447d9bd1aa..8cf7d50a760 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -543,6 +543,37 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
return chan_idx;
}
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_append_rate_tlv(struct mwifiex_private *priv,
+ struct mwifiex_scan_cmd_config *scan_cfg_out,
+ u8 radio)
+{
+ struct mwifiex_ie_types_rates_param_set *rates_tlv;
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *tlv_pos;
+ u32 rates_size;
+
+ memset(rates, 0, sizeof(rates));
+
+ tlv_pos = (u8 *)scan_cfg_out->tlv_buf + scan_cfg_out->tlv_buf_len;
+
+ if (priv->scan_request)
+ rates_size = mwifiex_get_rates_from_cfg80211(priv, rates,
+ radio);
+ else
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ dev_dbg(priv->adapter->dev, "info: SCAN_CMD: Rates size = %d\n",
+ rates_size);
+ rates_tlv = (struct mwifiex_ie_types_rates_param_set *)tlv_pos;
+ rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
+ rates_tlv->header.len = cpu_to_le16((u16) rates_size);
+ memcpy(rates_tlv->rates, rates, rates_size);
+ scan_cfg_out->tlv_buf_len += sizeof(rates_tlv->header) + rates_size;
+
+ return rates_size;
+}
+
/*
* This function constructs and sends multiple scan config commands to
* the firmware.
@@ -564,9 +595,10 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- u32 tlv_idx;
+ u32 tlv_idx, rates_size;
u32 total_scan_time;
u32 done_early;
+ u8 radio_type;
if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) {
dev_dbg(priv->adapter->dev,
@@ -591,6 +623,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
tlv_idx = 0;
total_scan_time = 0;
+ radio_type = 0;
chan_tlv_out->header.len = 0;
start_chan = tmp_chan_list;
done_early = false;
@@ -612,6 +645,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
continue;
}
+ radio_type = tmp_chan_list->radio_type;
dev_dbg(priv->adapter->dev,
"info: Scan: Chan(%3d), Radio(%d),"
" Mode(%d, %d), Dur(%d)\n",
@@ -692,6 +726,9 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
break;
}
+ rates_size = mwifiex_append_rate_tlv(priv, scan_cfg_out,
+ radio_type);
+
priv->adapter->scan_channels = start_chan;
/* Send the scan command to the firmware with the specified
@@ -699,6 +736,14 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
HostCmd_ACT_GEN_SET, 0,
scan_cfg_out);
+
+ /* rate IE is updated per scan command but same starting
+ * pointer is used each time so that rate IE from earlier
+ * scan_cfg_out->buf is overwritten with new one.
+ */
+ scan_cfg_out->tlv_buf_len -=
+ sizeof(struct mwifiex_ie_types_header) + rates_size;
+
if (ret)
break;
}
@@ -741,7 +786,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
- struct mwifiex_ie_types_rates_param_set *rates_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -753,8 +797,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
u8 radio_type;
int i;
u8 ssid_filter;
- u8 rates[MWIFIEX_SUPPORTED_RATES];
- u32 rates_size;
struct mwifiex_ie_types_htcap *ht_cap;
/* The tlv_buf_len is calculated for each scan command. The TLVs added
@@ -889,19 +931,6 @@ mwifiex_config_scan(struct mwifiex_private *priv,
}
- /* Append rates tlv */
- memset(rates, 0, sizeof(rates));
-
- rates_size = mwifiex_get_supported_rates(priv, rates);
-
- rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos;
- rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES);
- rates_tlv->header.len = cpu_to_le16((u16) rates_size);
- memcpy(rates_tlv->rates, rates, rates_size);
- tlv_pos += sizeof(rates_tlv->header) + rates_size;
-
- dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size);
-
if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) &&
(priv->adapter->config_bands & BAND_GN ||
priv->adapter->config_bands & BAND_AN)) {
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 5ee5ed02ecc..1576104e3d9 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -50,8 +50,6 @@ static struct mwifiex_if_ops sdio_ops;
static struct semaphore add_remove_card_sem;
-static int mwifiex_sdio_resume(struct device *dev);
-
/*
* SDIO probe.
*
@@ -112,6 +110,51 @@ mwifiex_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id)
}
/*
+ * SDIO resume.
+ *
+ * Kernel needs to suspend all functions separately. Therefore all
+ * registered functions must have drivers with suspend and resume
+ * methods. Failing that the kernel simply removes the whole card.
+ *
+ * If already not resumed, this function turns on the traffic and
+ * sends a host sleep cancel request to the firmware.
+ */
+static int mwifiex_sdio_resume(struct device *dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(dev);
+ struct sdio_mmc_card *card;
+ struct mwifiex_adapter *adapter;
+ mmc_pm_flag_t pm_flag = 0;
+
+ if (func) {
+ pm_flag = sdio_get_host_pm_caps(func);
+ card = sdio_get_drvdata(func);
+ if (!card || !card->adapter) {
+ pr_err("resume: invalid card or adapter\n");
+ return 0;
+ }
+ } else {
+ pr_err("resume: sdio_func is not specified\n");
+ return 0;
+ }
+
+ adapter = card->adapter;
+
+ if (!adapter->is_suspended) {
+ dev_warn(adapter->dev, "device already resumed\n");
+ return 0;
+ }
+
+ adapter->is_suspended = false;
+
+ /* Disable Host Sleep */
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
+ MWIFIEX_ASYNC_CMD);
+
+ return 0;
+}
+
+/*
* SDIO remove.
*
* This function removes the interface and frees up the card structure.
@@ -211,51 +254,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
return ret;
}
-/*
- * SDIO resume.
- *
- * Kernel needs to suspend all functions separately. Therefore all
- * registered functions must have drivers with suspend and resume
- * methods. Failing that the kernel simply removes the whole card.
- *
- * If already not resumed, this function turns on the traffic and
- * sends a host sleep cancel request to the firmware.
- */
-static int mwifiex_sdio_resume(struct device *dev)
-{
- struct sdio_func *func = dev_to_sdio_func(dev);
- struct sdio_mmc_card *card;
- struct mwifiex_adapter *adapter;
- mmc_pm_flag_t pm_flag = 0;
-
- if (func) {
- pm_flag = sdio_get_host_pm_caps(func);
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_err("resume: invalid card or adapter\n");
- return 0;
- }
- } else {
- pr_err("resume: sdio_func is not specified\n");
- return 0;
- }
-
- adapter = card->adapter;
-
- if (!adapter->is_suspended) {
- dev_warn(adapter->dev, "device already resumed\n");
- return 0;
- }
-
- adapter->is_suspended = false;
-
- /* Disable Host Sleep */
- mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA),
- MWIFIEX_ASYNC_CMD);
-
- return 0;
-}
-
/* Device ID for SD8786 */
#define SDIO_DEVICE_ID_MARVELL_8786 (0x9116)
/* Device ID for SD8787 */
@@ -296,6 +294,15 @@ static struct sdio_driver mwifiex_sdio = {
}
};
+/* Write data into SDIO card register. Caller claims SDIO device. */
+static int
+mwifiex_write_reg_locked(struct sdio_func *func, u32 reg, u8 data)
+{
+ int ret = -1;
+ sdio_writeb(func, data, reg, &ret);
+ return ret;
+}
+
/*
* This function writes data into SDIO card register.
*/
@@ -303,10 +310,10 @@ static int
mwifiex_write_reg(struct mwifiex_adapter *adapter, u32 reg, u8 data)
{
struct sdio_mmc_card *card = adapter->card;
- int ret = -1;
+ int ret;
sdio_claim_host(card->func);
- sdio_writeb(card->func, data, reg, &ret);
+ ret = mwifiex_write_reg_locked(card->func, reg, data);
sdio_release_host(card->func);
return ret;
@@ -685,23 +692,74 @@ mwifiex_sdio_read_fw_status(struct mwifiex_adapter *adapter, u16 *dat)
* The host interrupt mask is read, the disable bit is reset and
* written back to the card host interrupt mask register.
*/
-static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
+static void mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
{
- u8 host_int_mask, host_int_disable = HOST_INT_DISABLE;
+ struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
- /* Read back the host_int_mask register */
- if (mwifiex_read_reg(adapter, HOST_INT_MASK_REG, &host_int_mask))
- return -1;
+ sdio_claim_host(func);
+ mwifiex_write_reg_locked(func, HOST_INT_MASK_REG, 0);
+ sdio_release_irq(func);
+ sdio_release_host(func);
+}
- /* Update with the mask and write back to the register */
- host_int_mask &= ~host_int_disable;
+/*
+ * This function reads the interrupt status from card.
+ */
+static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
+{
+ struct sdio_mmc_card *card = adapter->card;
+ u8 sdio_ireg;
+ unsigned long flags;
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG, host_int_mask)) {
- dev_err(adapter->dev, "disable host interrupt failed\n");
- return -1;
+ if (mwifiex_read_data_sync(adapter, card->mp_regs,
+ card->reg->max_mp_regs,
+ REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
+ dev_err(adapter->dev, "read mp_regs failed\n");
+ return;
}
- return 0;
+ sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
+ if (sdio_ireg) {
+ /*
+ * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
+ * For SDIO new mode CMD port interrupts
+ * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
+ * UP_LD_CMD_PORT_HOST_INT_STATUS
+ * Clear the interrupt status register
+ */
+ dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
+ spin_lock_irqsave(&adapter->int_lock, flags);
+ adapter->int_status |= sdio_ireg;
+ spin_unlock_irqrestore(&adapter->int_lock, flags);
+ }
+}
+
+/*
+ * SDIO interrupt handler.
+ *
+ * This function reads the interrupt status from firmware and handles
+ * the interrupt in current thread (ksdioirqd) right away.
+ */
+static void
+mwifiex_sdio_interrupt(struct sdio_func *func)
+{
+ struct mwifiex_adapter *adapter;
+ struct sdio_mmc_card *card;
+
+ card = sdio_get_drvdata(func);
+ if (!card || !card->adapter) {
+ pr_debug("int: func=%p card=%p adapter=%p\n",
+ func, card, card ? card->adapter : NULL);
+ return;
+ }
+ adapter = card->adapter;
+
+ if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
+ adapter->ps_state = PS_STATE_AWAKE;
+
+ mwifiex_interrupt_status(adapter);
+ mwifiex_main_process(adapter);
}
/*
@@ -713,14 +771,29 @@ static int mwifiex_sdio_disable_host_int(struct mwifiex_adapter *adapter)
static int mwifiex_sdio_enable_host_int(struct mwifiex_adapter *adapter)
{
struct sdio_mmc_card *card = adapter->card;
+ struct sdio_func *func = card->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ /* Request the SDIO IRQ */
+ ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
+ if (ret) {
+ dev_err(adapter->dev, "claim irq failed: ret=%d\n", ret);
+ goto out;
+ }
/* Simply write the mask to the register */
- if (mwifiex_write_reg(adapter, HOST_INT_MASK_REG,
- card->reg->host_int_enable)) {
+ ret = mwifiex_write_reg_locked(func, HOST_INT_MASK_REG,
+ card->reg->host_int_enable);
+ if (ret) {
dev_err(adapter->dev, "enable host interrupt failed\n");
- return -1;
+ sdio_release_irq(func);
}
- return 0;
+
+out:
+ sdio_release_host(func);
+ return ret;
}
/*
@@ -927,7 +1000,7 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
ret = 0;
break;
} else {
- mdelay(100);
+ msleep(100);
ret = -1;
}
}
@@ -946,68 +1019,6 @@ static int mwifiex_check_fw_status(struct mwifiex_adapter *adapter,
}
/*
- * This function reads the interrupt status from card.
- */
-static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
-{
- struct sdio_mmc_card *card = adapter->card;
- u8 sdio_ireg;
- unsigned long flags;
-
- if (mwifiex_read_data_sync(adapter, card->mp_regs,
- card->reg->max_mp_regs,
- REG_PORT | MWIFIEX_SDIO_BYTE_MODE_MASK, 0)) {
- dev_err(adapter->dev, "read mp_regs failed\n");
- return;
- }
-
- sdio_ireg = card->mp_regs[HOST_INTSTATUS_REG];
- if (sdio_ireg) {
- /*
- * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS
- * For SDIO new mode CMD port interrupts
- * DN_LD_CMD_PORT_HOST_INT_STATUS and/or
- * UP_LD_CMD_PORT_HOST_INT_STATUS
- * Clear the interrupt status register
- */
- dev_dbg(adapter->dev, "int: sdio_ireg = %#x\n", sdio_ireg);
- spin_lock_irqsave(&adapter->int_lock, flags);
- adapter->int_status |= sdio_ireg;
- spin_unlock_irqrestore(&adapter->int_lock, flags);
- }
-}
-
-/*
- * SDIO interrupt handler.
- *
- * This function reads the interrupt status from firmware and handles
- * the interrupt in current thread (ksdioirqd) right away.
- */
-static void
-mwifiex_sdio_interrupt(struct sdio_func *func)
-{
- struct mwifiex_adapter *adapter;
- struct sdio_mmc_card *card;
-
- card = sdio_get_drvdata(func);
- if (!card || !card->adapter) {
- pr_debug("int: func=%p card=%p adapter=%p\n",
- func, card, card ? card->adapter : NULL);
- return;
- }
- adapter = card->adapter;
-
- if (adapter->surprise_removed)
- return;
-
- if (!adapter->pps_uapsd_mode && adapter->ps_state == PS_STATE_SLEEP)
- adapter->ps_state = PS_STATE_AWAKE;
-
- mwifiex_interrupt_status(adapter);
- mwifiex_main_process(adapter);
-}
-
-/*
* This function decodes a received packet.
*
* Based on the type, the packet is treated as either a data, or
@@ -1051,7 +1062,7 @@ static int mwifiex_decode_rx_packet(struct mwifiex_adapter *adapter,
case MWIFIEX_TYPE_EVENT:
dev_dbg(adapter->dev, "info: --- Rx: Event ---\n");
- adapter->event_cause = *(u32 *) skb->data;
+ adapter->event_cause = le32_to_cpu(*(__le32 *) skb->data);
if ((skb->len > 0) && (skb->len < MAX_EVENT_SIZE))
memcpy(adapter->event_body,
@@ -1196,8 +1207,8 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
/* get curr PKT len & type */
- pkt_len = *(u16 *) &curr_ptr[0];
- pkt_type = *(u16 *) &curr_ptr[2];
+ pkt_len = le16_to_cpu(*(__le16 *) &curr_ptr[0]);
+ pkt_type = le16_to_cpu(*(__le16 *) &curr_ptr[2]);
/* copy pkt to deaggr buf */
skb_deaggr = card->mpa_rx.skb_arr[pind];
@@ -1625,8 +1636,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter,
/* Allocate buffer and copy payload */
blk_size = MWIFIEX_SDIO_BLOCK_SIZE;
buf_block_len = (pkt_len + blk_size - 1) / blk_size;
- *(u16 *) &payload[0] = (u16) pkt_len;
- *(u16 *) &payload[2] = type;
+ *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len);
+ *(__le16 *)&payload[2] = cpu_to_le16(type);
/*
* This is SDIO specific header
@@ -1728,9 +1739,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
struct sdio_mmc_card *card = adapter->card;
if (adapter->card) {
- /* Release the SDIO IRQ */
sdio_claim_host(card->func);
- sdio_release_irq(card->func);
sdio_disable_func(card->func);
sdio_release_host(card->func);
sdio_set_drvdata(card->func, NULL);
@@ -1744,7 +1753,7 @@ mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
*/
static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
{
- int ret = 0;
+ int ret;
struct sdio_mmc_card *card = adapter->card;
struct sdio_func *func = card->func;
@@ -1753,22 +1762,14 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_claim_host(func);
- /* Request the SDIO IRQ */
- ret = sdio_claim_irq(func, mwifiex_sdio_interrupt);
- if (ret) {
- pr_err("claim irq failed: ret=%d\n", ret);
- goto disable_func;
- }
-
/* Set block size */
ret = sdio_set_block_size(card->func, MWIFIEX_SDIO_BLOCK_SIZE);
+ sdio_release_host(func);
if (ret) {
pr_err("cannot set SDIO block size\n");
- ret = -1;
- goto release_irq;
+ return ret;
}
- sdio_release_host(func);
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
@@ -1776,15 +1777,6 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
strcpy(adapter->fw_name, card->firmware);
return 0;
-
-release_irq:
- sdio_release_irq(func);
-disable_func:
- sdio_disable_func(func);
- sdio_release_host(func);
- adapter->card = NULL;
-
- return -1;
}
/*
@@ -1813,9 +1805,6 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
*/
mwifiex_read_reg(adapter, HOST_INTSTATUS_REG, &sdio_ireg);
- /* Disable host interrupt mask register for SDIO */
- mwifiex_sdio_disable_host_int(adapter);
-
/* Get SDIO ioport */
mwifiex_init_sdio_ioport(adapter);
@@ -1957,6 +1946,7 @@ static struct mwifiex_if_ops sdio_ops = {
.register_dev = mwifiex_register_dev,
.unregister_dev = mwifiex_unregister_dev,
.enable_int = mwifiex_sdio_enable_host_int,
+ .disable_int = mwifiex_sdio_disable_host_int,
.process_int_status = mwifiex_process_int_status,
.host_to_card = mwifiex_sdio_host_to_card,
.wakeup = mwifiex_pm_wakeup_card,
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 6d51dfdd825..532ae0ac4df 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -92,9 +92,6 @@
/* Host Control Registers : Download host interrupt mask */
#define DN_LD_HOST_INT_MASK (0x2U)
-/* Disable Host interrupt mask */
-#define HOST_INT_DISABLE 0xff
-
/* Host Control Registers : Host interrupt status */
#define HOST_INTSTATUS_REG 0x03
/* Host Control Registers : Upload host interrupt status */
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 8ece4858064..c0268b59774 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -707,8 +707,9 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP) {
tlv_mac = (void *)((u8 *)&key_material->key_param_set +
key_param_len);
- tlv_mac->tlv.type = cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
- tlv_mac->tlv.len = cpu_to_le16(ETH_ALEN);
+ tlv_mac->header.type =
+ cpu_to_le16(TLV_TYPE_STA_MAC_ADDR);
+ tlv_mac->header.len = cpu_to_le16(ETH_ALEN);
memcpy(tlv_mac->mac_addr, enc_key->mac_addr, ETH_ALEN);
cmd_size = key_param_len + S_DS_GEN +
sizeof(key_material->action) +
@@ -1069,7 +1070,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
int i, byte_len;
u8 *stack_ptr = *buffer;
- for (i = 0; i < MWIFIEX_MAX_FILTERS; i++) {
+ for (i = 0; i < MWIFIEX_MEF_MAX_FILTERS; i++) {
filter = &mef_entry->filter[i];
if (!filter->filt_type)
break;
@@ -1078,7 +1079,7 @@ mwifiex_cmd_append_rpn_expression(struct mwifiex_private *priv,
*stack_ptr = TYPE_DNUM;
stack_ptr += 1;
- byte_len = filter->byte_seq[MAX_BYTESEQ];
+ byte_len = filter->byte_seq[MWIFIEX_MEF_MAX_BYTESEQ];
memcpy(stack_ptr, filter->byte_seq, byte_len);
stack_ptr += byte_len;
*stack_ptr = byte_len;
@@ -1183,6 +1184,70 @@ static int mwifiex_cmd_cfg_data(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, void *data_buf)
+{
+ struct host_cmd_ds_coalesce_cfg *coalesce_cfg =
+ &cmd->params.coalesce_cfg;
+ struct mwifiex_ds_coalesce_cfg *cfg = data_buf;
+ struct coalesce_filt_field_param *param;
+ u16 cnt, idx, length;
+ struct coalesce_receive_filt_rule *rule;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_COALESCE_CFG);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+
+ coalesce_cfg->action = cpu_to_le16(cmd_action);
+ coalesce_cfg->num_of_rules = cpu_to_le16(cfg->num_of_rules);
+ rule = coalesce_cfg->rule;
+
+ for (cnt = 0; cnt < cfg->num_of_rules; cnt++) {
+ rule->header.type = cpu_to_le16(TLV_TYPE_COALESCE_RULE);
+ rule->max_coalescing_delay =
+ cpu_to_le16(cfg->rule[cnt].max_coalescing_delay);
+ rule->pkt_type = cfg->rule[cnt].pkt_type;
+ rule->num_of_fields = cfg->rule[cnt].num_of_fields;
+
+ length = 0;
+
+ param = rule->params;
+ for (idx = 0; idx < cfg->rule[cnt].num_of_fields; idx++) {
+ param->operation = cfg->rule[cnt].params[idx].operation;
+ param->operand_len =
+ cfg->rule[cnt].params[idx].operand_len;
+ param->offset =
+ cpu_to_le16(cfg->rule[cnt].params[idx].offset);
+ memcpy(param->operand_byte_stream,
+ cfg->rule[cnt].params[idx].operand_byte_stream,
+ param->operand_len);
+
+ length += sizeof(struct coalesce_filt_field_param);
+
+ param++;
+ }
+
+ /* Total rule length is sizeof max_coalescing_delay(u16),
+ * num_of_fields(u8), pkt_type(u8) and total length of the all
+ * params
+ */
+ rule->header.len = cpu_to_le16(length + sizeof(u16) +
+ sizeof(u8) + sizeof(u8));
+
+ /* Add the rule length to the command size*/
+ le16_add_cpu(&cmd->size, le16_to_cpu(rule->header.len) +
+ sizeof(struct mwifiex_ie_types_header));
+
+ rule = (void *)((u8 *)rule->params + length);
+ }
+
+ /* Add sizeof action, num_of_rules to total command length */
+ le16_add_cpu(&cmd->size, sizeof(u16) + sizeof(u16));
+
+ return 0;
+}
+
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1406,6 +1471,10 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
case HostCmd_CMD_MEF_CFG:
ret = mwifiex_cmd_mef_cfg(priv, cmd_ptr, data_buf);
break;
+ case HostCmd_CMD_COALESCE_CFG:
+ ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
+ data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index d85df158cc6..58a6013712d 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -280,7 +280,7 @@ static int mwifiex_ret_tx_rate_cfg(struct mwifiex_private *priv,
tlv_buf = ((u8 *)rate_cfg) +
sizeof(struct host_cmd_ds_tx_rate_cfg);
- tlv_buf_len = *(u16 *) (tlv_buf + sizeof(u16));
+ tlv_buf_len = le16_to_cpu(*(__le16 *) (tlv_buf + sizeof(u16)));
while (tlv_buf && tlv_buf_len > 0) {
tlv = (*tlv_buf);
@@ -997,6 +997,8 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_MEF_CFG:
break;
+ case HostCmd_CMD_COALESCE_CFG:
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index ea265ec0e52..8b057524b25 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -201,6 +201,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DEAUTHENTICATED:
dev_dbg(adapter->dev, "event: Deauthenticated\n");
+ if (priv->wps.session_enable) {
+ dev_dbg(adapter->dev,
+ "info: receive deauth event in wps session\n");
+ break;
+ }
adapter->dbg.num_event_deauth++;
if (priv->media_connected) {
reason_code =
@@ -211,6 +216,11 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
case EVENT_DISASSOCIATED:
dev_dbg(adapter->dev, "event: Disassociated\n");
+ if (priv->wps.session_enable) {
+ dev_dbg(adapter->dev,
+ "info: receive disassoc event in wps session\n");
+ break;
+ }
adapter->dbg.num_event_disassoc++;
if (priv->media_connected) {
reason_code =
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 206c3e03807..f084412eee0 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -257,10 +257,10 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
goto done;
}
- if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ if (priv->bss_mode == NL80211_IFTYPE_STATION ||
+ priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
u8 config_bands;
- /* Infra mode */
ret = mwifiex_deauthenticate(priv, NULL);
if (ret)
goto done;
@@ -797,15 +797,16 @@ static int mwifiex_set_wps_ie(struct mwifiex_private *priv,
u8 *ie_data_ptr, u16 ie_len)
{
if (ie_len) {
- priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
- if (!priv->wps_ie)
- return -ENOMEM;
- if (ie_len > sizeof(priv->wps_ie)) {
+ if (ie_len > MWIFIEX_MAX_VSIE_LEN) {
dev_dbg(priv->adapter->dev,
"info: failed to copy WPS IE, too big\n");
- kfree(priv->wps_ie);
return -1;
}
+
+ priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL);
+ if (!priv->wps_ie)
+ return -ENOMEM;
+
memcpy(priv->wps_ie, ie_data_ptr, ie_len);
priv->wps_ie_len = ie_len;
dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n",
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index b5c10950439..bb22664923e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -17,6 +17,8 @@
* this warranty disclaimer.
*/
+#include <uapi/linux/ipv6.h>
+#include <net/ndisc.h>
#include "decl.h"
#include "ioctl.h"
#include "util.h"
@@ -25,6 +27,46 @@
#include "11n_aggr.h"
#include "11n_rxreorder.h"
+/* This function checks if a frame is IPv4 ARP or IPv6 Neighbour advertisement
+ * frame. If frame has both source and destination mac address as same, this
+ * function drops such gratuitous frames.
+ */
+static bool
+mwifiex_discard_gratuitous_arp(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ const struct mwifiex_arp_eth_header *arp;
+ struct ethhdr *eth_hdr;
+ struct ipv6hdr *ipv6;
+ struct icmp6hdr *icmpv6;
+
+ eth_hdr = (struct ethhdr *)skb->data;
+ switch (ntohs(eth_hdr->h_proto)) {
+ case ETH_P_ARP:
+ arp = (void *)(skb->data + sizeof(struct ethhdr));
+ if (arp->hdr.ar_op == htons(ARPOP_REPLY) ||
+ arp->hdr.ar_op == htons(ARPOP_REQUEST)) {
+ if (!memcmp(arp->ar_sip, arp->ar_tip, 4))
+ return true;
+ }
+ break;
+ case ETH_P_IPV6:
+ ipv6 = (void *)(skb->data + sizeof(struct ethhdr));
+ icmpv6 = (void *)(skb->data + sizeof(struct ethhdr) +
+ sizeof(struct ipv6hdr));
+ if (NDISC_NEIGHBOUR_ADVERTISEMENT == icmpv6->icmp6_type) {
+ if (!memcmp(&ipv6->saddr, &ipv6->daddr,
+ sizeof(struct in6_addr)))
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/*
* This function processes the received packet and forwards it
* to kernel/upper layer.
@@ -90,6 +132,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
either the reconstructed EthII frame or the 802.2/llc/snap frame */
skb_pull(skb, hdr_chop);
+ if (priv->hs2_enabled &&
+ mwifiex_discard_gratuitous_arp(priv, skb)) {
+ dev_dbg(priv->adapter->dev, "Bypassed Gratuitous ARP\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 2de882dead0..64424c81b44 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -293,9 +293,9 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
u8 *tlv = *tlv_buf;
tlv_akmp = (struct host_cmd_tlv_akmp *)tlv;
- tlv_akmp->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
- tlv_akmp->tlv.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
- sizeof(struct host_cmd_tlv));
+ tlv_akmp->header.type = cpu_to_le16(TLV_TYPE_UAP_AKMP);
+ tlv_akmp->header.len = cpu_to_le16(sizeof(struct host_cmd_tlv_akmp) -
+ sizeof(struct mwifiex_ie_types_header));
tlv_akmp->key_mgmt_operation = cpu_to_le16(bss_cfg->key_mgmt_operation);
tlv_akmp->key_mgmt = cpu_to_le16(bss_cfg->key_mgmt);
cmd_size += sizeof(struct host_cmd_tlv_akmp);
@@ -303,10 +303,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len =
+ pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -315,10 +315,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.pairwise_cipher_wpa2 & VALID_CIPHER_BITMAP) {
pwk_cipher = (struct host_cmd_tlv_pwk_cipher *)tlv;
- pwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
- pwk_cipher->tlv.len =
+ pwk_cipher->header.type = cpu_to_le16(TLV_TYPE_PWK_CIPHER);
+ pwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_pwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
pwk_cipher->proto = cpu_to_le16(PROTOCOL_WPA2);
pwk_cipher->cipher = bss_cfg->wpa_cfg.pairwise_cipher_wpa2;
cmd_size += sizeof(struct host_cmd_tlv_pwk_cipher);
@@ -327,10 +327,10 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.group_cipher & VALID_CIPHER_BITMAP) {
gwk_cipher = (struct host_cmd_tlv_gwk_cipher *)tlv;
- gwk_cipher->tlv.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
- gwk_cipher->tlv.len =
+ gwk_cipher->header.type = cpu_to_le16(TLV_TYPE_GWK_CIPHER);
+ gwk_cipher->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_gwk_cipher) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
gwk_cipher->cipher = bss_cfg->wpa_cfg.group_cipher;
cmd_size += sizeof(struct host_cmd_tlv_gwk_cipher);
tlv += sizeof(struct host_cmd_tlv_gwk_cipher);
@@ -338,13 +338,15 @@ mwifiex_uap_bss_wpa(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
if (bss_cfg->wpa_cfg.length) {
passphrase = (struct host_cmd_tlv_passphrase *)tlv;
- passphrase->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
- passphrase->tlv.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
+ passphrase->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_WPA_PASSPHRASE);
+ passphrase->header.len = cpu_to_le16(bss_cfg->wpa_cfg.length);
memcpy(passphrase->passphrase, bss_cfg->wpa_cfg.passphrase,
bss_cfg->wpa_cfg.length);
- cmd_size += sizeof(struct host_cmd_tlv) +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->wpa_cfg.length;
- tlv += sizeof(struct host_cmd_tlv) + bss_cfg->wpa_cfg.length;
+ tlv += sizeof(struct mwifiex_ie_types_header) +
+ bss_cfg->wpa_cfg.length;
}
*param_size = cmd_size;
@@ -403,16 +405,17 @@ mwifiex_uap_bss_wep(u8 **tlv_buf, void *cmd_buf, u16 *param_size)
(bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP40 ||
bss_cfg->wep_cfg[i].length == WLAN_KEY_LEN_WEP104)) {
wep_key = (struct host_cmd_tlv_wep_key *)tlv;
- wep_key->tlv.type = cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
- wep_key->tlv.len =
+ wep_key->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_WEP_KEY);
+ wep_key->header.len =
cpu_to_le16(bss_cfg->wep_cfg[i].length + 2);
wep_key->key_index = bss_cfg->wep_cfg[i].key_index;
wep_key->is_default = bss_cfg->wep_cfg[i].is_default;
memcpy(wep_key->key, bss_cfg->wep_cfg[i].key,
bss_cfg->wep_cfg[i].length);
- cmd_size += sizeof(struct host_cmd_tlv) + 2 +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
- tlv += sizeof(struct host_cmd_tlv) + 2 +
+ tlv += sizeof(struct mwifiex_ie_types_header) + 2 +
bss_cfg->wep_cfg[i].length;
}
}
@@ -449,16 +452,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->ssid.ssid_len) {
ssid = (struct host_cmd_tlv_ssid *)tlv;
- ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
- ssid->tlv.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
+ ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_SSID);
+ ssid->header.len = cpu_to_le16((u16)bss_cfg->ssid.ssid_len);
memcpy(ssid->ssid, bss_cfg->ssid.ssid, bss_cfg->ssid.ssid_len);
- cmd_size += sizeof(struct host_cmd_tlv) +
+ cmd_size += sizeof(struct mwifiex_ie_types_header) +
bss_cfg->ssid.ssid_len;
- tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
+ tlv += sizeof(struct mwifiex_ie_types_header) +
+ bss_cfg->ssid.ssid_len;
bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
- bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
- bcast_ssid->tlv.len =
+ bcast_ssid->header.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
+ bcast_ssid->header.len =
cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
@@ -466,13 +470,13 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
}
if (bss_cfg->rates[0]) {
tlv_rates = (struct host_cmd_tlv_rates *)tlv;
- tlv_rates->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
+ tlv_rates->header.type = cpu_to_le16(TLV_TYPE_UAP_RATES);
for (i = 0; i < MWIFIEX_SUPPORTED_RATES && bss_cfg->rates[i];
i++)
tlv_rates->rates[i] = bss_cfg->rates[i];
- tlv_rates->tlv.len = cpu_to_le16(i);
+ tlv_rates->header.len = cpu_to_le16(i);
cmd_size += sizeof(struct host_cmd_tlv_rates) + i;
tlv += sizeof(struct host_cmd_tlv_rates) + i;
}
@@ -482,10 +486,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
(bss_cfg->band_cfg == BAND_CONFIG_A &&
bss_cfg->channel <= MAX_CHANNEL_BAND_A))) {
chan_band = (struct host_cmd_tlv_channel_band *)tlv;
- chan_band->tlv.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
- chan_band->tlv.len =
+ chan_band->header.type = cpu_to_le16(TLV_TYPE_CHANNELBANDLIST);
+ chan_band->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_channel_band) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
chan_band->band_config = bss_cfg->band_cfg;
chan_band->channel = bss_cfg->channel;
cmd_size += sizeof(struct host_cmd_tlv_channel_band);
@@ -494,11 +498,11 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->beacon_period >= MIN_BEACON_PERIOD &&
bss_cfg->beacon_period <= MAX_BEACON_PERIOD) {
beacon_period = (struct host_cmd_tlv_beacon_period *)tlv;
- beacon_period->tlv.type =
+ beacon_period->header.type =
cpu_to_le16(TLV_TYPE_UAP_BEACON_PERIOD);
- beacon_period->tlv.len =
+ beacon_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_beacon_period) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
beacon_period->period = cpu_to_le16(bss_cfg->beacon_period);
cmd_size += sizeof(struct host_cmd_tlv_beacon_period);
tlv += sizeof(struct host_cmd_tlv_beacon_period);
@@ -506,21 +510,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->dtim_period >= MIN_DTIM_PERIOD &&
bss_cfg->dtim_period <= MAX_DTIM_PERIOD) {
dtim_period = (struct host_cmd_tlv_dtim_period *)tlv;
- dtim_period->tlv.type = cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
- dtim_period->tlv.len =
+ dtim_period->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_DTIM_PERIOD);
+ dtim_period->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_dtim_period) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
dtim_period->period = bss_cfg->dtim_period;
cmd_size += sizeof(struct host_cmd_tlv_dtim_period);
tlv += sizeof(struct host_cmd_tlv_dtim_period);
}
if (bss_cfg->rts_threshold <= MWIFIEX_RTS_MAX_VALUE) {
rts_threshold = (struct host_cmd_tlv_rts_threshold *)tlv;
- rts_threshold->tlv.type =
+ rts_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_RTS_THRESHOLD);
- rts_threshold->tlv.len =
+ rts_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_rts_threshold) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
rts_threshold->rts_thr = cpu_to_le16(bss_cfg->rts_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
@@ -528,21 +533,22 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if ((bss_cfg->frag_threshold >= MWIFIEX_FRAG_MIN_VALUE) &&
(bss_cfg->frag_threshold <= MWIFIEX_FRAG_MAX_VALUE)) {
frag_threshold = (struct host_cmd_tlv_frag_threshold *)tlv;
- frag_threshold->tlv.type =
+ frag_threshold->header.type =
cpu_to_le16(TLV_TYPE_UAP_FRAG_THRESHOLD);
- frag_threshold->tlv.len =
+ frag_threshold->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_frag_threshold) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
frag_threshold->frag_thr = cpu_to_le16(bss_cfg->frag_threshold);
cmd_size += sizeof(struct host_cmd_tlv_frag_threshold);
tlv += sizeof(struct host_cmd_tlv_frag_threshold);
}
if (bss_cfg->retry_limit <= MWIFIEX_RETRY_LIMIT) {
retry_limit = (struct host_cmd_tlv_retry_limit *)tlv;
- retry_limit->tlv.type = cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
- retry_limit->tlv.len =
+ retry_limit->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_RETRY_LIMIT);
+ retry_limit->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_retry_limit) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
retry_limit->limit = (u8)bss_cfg->retry_limit;
cmd_size += sizeof(struct host_cmd_tlv_retry_limit);
tlv += sizeof(struct host_cmd_tlv_retry_limit);
@@ -557,21 +563,21 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if ((bss_cfg->auth_mode <= WLAN_AUTH_SHARED_KEY) ||
(bss_cfg->auth_mode == MWIFIEX_AUTH_MODE_AUTO)) {
auth_type = (struct host_cmd_tlv_auth_type *)tlv;
- auth_type->tlv.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
- auth_type->tlv.len =
+ auth_type->header.type = cpu_to_le16(TLV_TYPE_AUTH_TYPE);
+ auth_type->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_auth_type) -
- sizeof(struct host_cmd_tlv));
+ sizeof(struct mwifiex_ie_types_header));
auth_type->auth_type = (u8)bss_cfg->auth_mode;
cmd_size += sizeof(struct host_cmd_tlv_auth_type);
tlv += sizeof(struct host_cmd_tlv_auth_type);
}
if (bss_cfg->protocol) {
encrypt_protocol = (struct host_cmd_tlv_encrypt_protocol *)tlv;
- encrypt_protocol->tlv.type =
+ encrypt_protocol->header.type =
cpu_to_le16(TLV_TYPE_UAP_ENCRY_PROTOCOL);
- encrypt_protocol->tlv.len =
+ encrypt_protocol->header.len =
cpu_to_le16(sizeof(struct host_cmd_tlv_encrypt_protocol)
- - sizeof(struct host_cmd_tlv));
+ - sizeof(struct mwifiex_ie_types_header));
encrypt_protocol->proto = cpu_to_le16(bss_cfg->protocol);
cmd_size += sizeof(struct host_cmd_tlv_encrypt_protocol);
tlv += sizeof(struct host_cmd_tlv_encrypt_protocol);
@@ -608,9 +614,9 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->sta_ao_timer) {
ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
- ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
- ao_timer->tlv.len = cpu_to_le16(sizeof(*ao_timer) -
- sizeof(struct host_cmd_tlv));
+ ao_timer->header.type = cpu_to_le16(TLV_TYPE_UAP_AO_TIMER);
+ ao_timer->header.len = cpu_to_le16(sizeof(*ao_timer) -
+ sizeof(struct mwifiex_ie_types_header));
ao_timer->sta_ao_timer = cpu_to_le32(bss_cfg->sta_ao_timer);
cmd_size += sizeof(*ao_timer);
tlv += sizeof(*ao_timer);
@@ -618,9 +624,10 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
if (bss_cfg->ps_sta_ao_timer) {
ps_ao_timer = (struct host_cmd_tlv_ageout_timer *)tlv;
- ps_ao_timer->tlv.type = cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
- ps_ao_timer->tlv.len = cpu_to_le16(sizeof(*ps_ao_timer) -
- sizeof(struct host_cmd_tlv));
+ ps_ao_timer->header.type =
+ cpu_to_le16(TLV_TYPE_UAP_PS_AO_TIMER);
+ ps_ao_timer->header.len = cpu_to_le16(sizeof(*ps_ao_timer) -
+ sizeof(struct mwifiex_ie_types_header));
ps_ao_timer->sta_ao_timer =
cpu_to_le32(bss_cfg->ps_sta_ao_timer);
cmd_size += sizeof(*ps_ao_timer);
@@ -636,16 +643,17 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
static int mwifiex_uap_custom_ie_prepare(u8 *tlv, void *cmd_buf, u16 *ie_size)
{
struct mwifiex_ie_list *ap_ie = cmd_buf;
- struct host_cmd_tlv *tlv_ie = (struct host_cmd_tlv *)tlv;
+ struct mwifiex_ie_types_header *tlv_ie = (void *)tlv;
if (!ap_ie || !ap_ie->len || !ap_ie->ie_list)
return -1;
- *ie_size += le16_to_cpu(ap_ie->len) + sizeof(struct host_cmd_tlv);
+ *ie_size += le16_to_cpu(ap_ie->len) +
+ sizeof(struct mwifiex_ie_types_header);
tlv_ie->type = cpu_to_le16(TLV_TYPE_MGMT_IE);
tlv_ie->len = ap_ie->len;
- tlv += sizeof(struct host_cmd_tlv);
+ tlv += sizeof(struct mwifiex_ie_types_header);
memcpy(tlv, ap_ie->ie_list, le16_to_cpu(ap_ie->len));
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index a018e42d117..1cfe5a738c4 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -24,6 +24,69 @@
#include "11n_aggr.h"
#include "11n_rxreorder.h"
+/* This function checks if particular RA list has packets more than low bridge
+ * packet threshold and then deletes packet from this RA list.
+ * Function deletes packets from such RA list and returns true. If no such list
+ * is found, false is returned.
+ */
+static bool
+mwifiex_uap_del_tx_pkts_in_ralist(struct mwifiex_private *priv,
+ struct list_head *ra_list_head)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct sk_buff *skb, *tmp;
+ bool pkt_deleted = false;
+ struct mwifiex_txinfo *tx_info;
+ struct mwifiex_adapter *adapter = priv->adapter;
+
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ if (skb_queue_empty(&ra_list->skb_head))
+ continue;
+
+ skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_BRIDGED_PKT) {
+ __skb_unlink(skb, &ra_list->skb_head);
+ mwifiex_write_data_complete(adapter, skb, 0,
+ -1);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ pkt_deleted = true;
+ }
+ if ((atomic_read(&adapter->pending_bridged_pkts) <=
+ MWIFIEX_BRIDGED_PKTS_THR_LOW))
+ break;
+ }
+ }
+
+ return pkt_deleted;
+}
+
+/* This function deletes packets from particular RA List. RA list index
+ * from which packets are deleted is preserved so that packets from next RA
+ * list are deleted upon subsequent call thus maintaining fairness.
+ */
+static void mwifiex_uap_cleanup_tx_queues(struct mwifiex_private *priv)
+{
+ unsigned long flags;
+ struct list_head *ra_list;
+ int i;
+
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++, priv->del_list_idx++) {
+ if (priv->del_list_idx == MAX_NUM_TID)
+ priv->del_list_idx = 0;
+ ra_list = &priv->wmm.tid_tbl_ptr[priv->del_list_idx].ra_list;
+ if (mwifiex_uap_del_tx_pkts_in_ralist(priv, ra_list)) {
+ priv->del_list_idx++;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+}
+
+
static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
struct sk_buff *skb)
{
@@ -40,10 +103,11 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
rx_pkt_hdr = (void *)uap_rx_pd + le16_to_cpu(uap_rx_pd->rx_pkt_offset);
if ((atomic_read(&adapter->pending_bridged_pkts) >=
- MWIFIEX_BRIDGED_PKTS_THRESHOLD)) {
+ MWIFIEX_BRIDGED_PKTS_THR_HIGH)) {
dev_err(priv->adapter->dev,
"Tx: Bridge packet limit reached. Drop packet!\n");
kfree_skb(skb);
+ mwifiex_uap_cleanup_tx_queues(priv);
return;
}
@@ -95,10 +159,6 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
atomic_inc(&adapter->tx_pending);
atomic_inc(&adapter->pending_bridged_pkts);
- if ((atomic_read(&adapter->tx_pending) >= MAX_TX_PENDING)) {
- mwifiex_set_trans_start(priv->netdev);
- mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
- }
return;
}
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index f90fe21e5bf..2472d4b7f00 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -24,9 +24,9 @@
static const char usbdriver_name[] = "usb8797";
-static u8 user_rmmod;
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
+static struct usb_card_rec *usb_card;
static struct usb_device_id mwifiex_usb_table[] = {
{USB_DEVICE(USB8797_VID, USB8797_PID_1)},
@@ -350,6 +350,7 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
card->udev = udev;
card->intf = intf;
+ usb_card = card;
pr_debug("info: bcdUSB=%#x Device Class=%#x SubClass=%#x Protocol=%#x\n",
udev->descriptor.bcdUSB, udev->descriptor.bDeviceClass,
@@ -532,7 +533,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
{
struct usb_card_rec *card = usb_get_intfdata(intf);
struct mwifiex_adapter *adapter;
- int i;
if (!card || !card->adapter) {
pr_err("%s: card or card->adapter is NULL\n", __func__);
@@ -543,27 +543,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
if (!adapter->priv_num)
return;
- /* In case driver is removed when asynchronous FW downloading is
- * in progress
- */
- wait_for_completion(&adapter->fw_load);
-
- if (user_rmmod) {
-#ifdef CONFIG_PM
- if (adapter->is_suspended)
- mwifiex_usb_resume(intf);
-#endif
- for (i = 0; i < adapter->priv_num; i++)
- if ((GET_BSS_ROLE(adapter->priv[i]) ==
- MWIFIEX_BSS_ROLE_STA) &&
- adapter->priv[i]->media_connected)
- mwifiex_deauthenticate(adapter->priv[i], NULL);
-
- mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
- MWIFIEX_BSS_ROLE_ANY),
- MWIFIEX_FUNC_SHUTDOWN);
- }
-
mwifiex_usb_free(card);
dev_dbg(adapter->dev, "%s: removing card\n", __func__);
@@ -786,6 +765,13 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
return 0;
}
+static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
+{
+ struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+
+ usb_set_intfdata(card->intf, NULL);
+}
+
static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct mwifiex_fw_image *fw)
{
@@ -978,6 +964,7 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
static struct mwifiex_if_ops usb_ops = {
.register_dev = mwifiex_register_dev,
+ .unregister_dev = mwifiex_unregister_dev,
.wakeup = mwifiex_pm_wakeup_card,
.wakeup_complete = mwifiex_pm_wakeup_card_complete,
@@ -1024,8 +1011,29 @@ static void mwifiex_usb_cleanup_module(void)
if (!down_interruptible(&add_remove_card_sem))
up(&add_remove_card_sem);
- /* set the flag as user is removing this module */
- user_rmmod = 1;
+ if (usb_card) {
+ struct mwifiex_adapter *adapter = usb_card->adapter;
+ int i;
+
+ /* In case driver is removed when asynchronous FW downloading is
+ * in progress
+ */
+ wait_for_completion(&adapter->fw_load);
+
+#ifdef CONFIG_PM
+ if (adapter->is_suspended)
+ mwifiex_usb_resume(usb_card->intf);
+#endif
+ for (i = 0; i < adapter->priv_num; i++)
+ if ((GET_BSS_ROLE(adapter->priv[i]) ==
+ MWIFIEX_BSS_ROLE_STA) &&
+ adapter->priv[i]->media_connected)
+ mwifiex_deauthenticate(adapter->priv[i], NULL);
+
+ mwifiex_init_shutdown_fw(mwifiex_get_priv(adapter,
+ MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_FUNC_SHUTDOWN);
+ }
usb_deregister(&mwifiex_usb_driver);
}
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index e57ac0dd3ab..5d9e150f411 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -171,8 +171,8 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv,
rx_pd->rx_pkt_length = cpu_to_le16(pkt_len);
cfg80211_rx_mgmt(priv->wdev, priv->roc_cfg.chan.center_freq,
- CAL_RSSI(rx_pd->snr, rx_pd->nf),
- skb->data, pkt_len, GFP_ATOMIC);
+ CAL_RSSI(rx_pd->snr, rx_pd->nf), skb->data, pkt_len,
+ 0, GFP_ATOMIC);
return 0;
}
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 944e8846f6f..2e8f9cdea54 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -120,7 +120,7 @@ mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, u8 *ra)
memcpy(ra_list->ra, ra, ETH_ALEN);
- ra_list->total_pkts_size = 0;
+ ra_list->total_pkt_count = 0;
dev_dbg(adapter->dev, "info: allocated ra_list %p\n", ra_list);
@@ -188,7 +188,7 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
ra_list, ra_list->is_11n_enabled);
if (ra_list->is_11n_enabled) {
- ra_list->pkt_count = 0;
+ ra_list->ba_pkt_count = 0;
ra_list->ba_packet_thr =
mwifiex_get_random_ba_threshold();
}
@@ -679,8 +679,8 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
skb_queue_tail(&ra_list->skb_head, skb);
- ra_list->total_pkts_size += skb->len;
- ra_list->pkt_count++;
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
tos_to_tid_inv[tid_down])
@@ -1037,7 +1037,7 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
tx_info = MWIFIEX_SKB_TXCB(skb);
dev_dbg(adapter->dev, "data: dequeuing the packet %p %p\n", ptr, skb);
- ptr->total_pkts_size -= skb->len;
+ ptr->total_pkt_count--;
if (!skb_queue_empty(&ptr->skb_head))
skb_next = skb_peek(&ptr->skb_head);
@@ -1062,8 +1062,8 @@ mwifiex_send_single_packet(struct mwifiex_private *priv,
skb_queue_tail(&ptr->skb_head, skb);
- ptr->total_pkts_size += skb->len;
- ptr->pkt_count++;
+ ptr->total_pkt_count++;
+ ptr->ba_pkt_count++;
tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
ra_list_flags);
@@ -1224,7 +1224,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
mwifiex_send_single_packet() */
} else {
if (mwifiex_is_ampdu_allowed(priv, tid) &&
- ptr->pkt_count > ptr->ba_packet_thr) {
+ ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
BA_SETUP_INPROGRESS);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 9b915d3a44b..68dbbb9c6d1 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
menuconfig RT2X00
tristate "Ralink driver support"
- depends on MAC80211
+ depends on MAC80211 && HAS_DMA
---help---
This will enable the support for the Ralink drivers,
developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
@@ -166,6 +166,12 @@ config RT2800USB_RT35XX
rt2800usb driver.
Supported chips: RT3572
+config RT2800USB_RT3573
+ bool "rt2800usb - Include support for rt3573 devices (EXPERIMENTAL)"
+ ---help---
+ This enables support for RT3573 chipset based wireless USB devices
+ in the rt2800usb driver.
+
config RT2800USB_RT53XX
bool "rt2800usb - Include support for rt53xx devices (EXPERIMENTAL)"
---help---
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index d78c495a86a..fa33b5edf93 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -88,6 +88,7 @@
#define REV_RT3071E 0x0211
#define REV_RT3090E 0x0211
#define REV_RT3390E 0x0211
+#define REV_RT3593E 0x0211
#define REV_RT5390F 0x0502
#define REV_RT5390R 0x1502
#define REV_RT5592C 0x0221
@@ -1082,6 +1083,15 @@
#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_0_CCK1_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_CCK1_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_0_CCK5_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_CCK5_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_0_OFDM6_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_OFDM6_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_0_OFDM12_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_0_OFDM12_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_1:
@@ -1095,6 +1105,15 @@
#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_1_OFDM24_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_OFDM24_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_1_OFDM48_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_OFDM48_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_1_MCS0_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_MCS0_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_1_MCS2_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_1_MCS2_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_2:
@@ -1108,6 +1127,15 @@
#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_2_MCS4_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_MCS4_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_2_MCS6_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_MCS6_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_2_MCS8_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_MCS8_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_2_MCS10_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_2_MCS10_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_3:
@@ -1121,6 +1149,15 @@
#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_MCS12_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_MCS12_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_MCS14_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_MCS14_CH1 FIELD32(0x0000f000)
+#define TX_PWR_CFG_3_STBC0_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_STBC0_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_3_STBC2_CH0 FIELD32(0x0f000000)
+#define TX_PWR_CFG_3_STBC2_CH1 FIELD32(0xf0000000)
/*
* TX_PWR_CFG_4:
@@ -1130,6 +1167,11 @@
#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
+/* bits for 3T devices */
+#define TX_PWR_CFG_3_STBC4_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_STBC4_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_3_STBC6_CH0 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_STBC6_CH1 FIELD32(0x0000f000)
/*
* TX_PIN_CFG:
@@ -1451,6 +1493,81 @@
*/
#define EXP_ACK_TIME 0x1380
+/* TX_PWR_CFG_5 */
+#define TX_PWR_CFG_5 0x1384
+#define TX_PWR_CFG_5_MCS16_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_5_MCS16_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_5_MCS16_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_5_MCS18_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_5_MCS18_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_5_MCS18_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_6 */
+#define TX_PWR_CFG_6 0x1388
+#define TX_PWR_CFG_6_MCS20_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_6_MCS20_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_6_MCS20_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_6_MCS22_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_6_MCS22_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_6_MCS22_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_0_EXT */
+#define TX_PWR_CFG_0_EXT 0x1390
+#define TX_PWR_CFG_0_EXT_CCK1_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_0_EXT_CCK5_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_0_EXT_OFDM6_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_0_EXT_OFDM12_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_1_EXT */
+#define TX_PWR_CFG_1_EXT 0x1394
+#define TX_PWR_CFG_1_EXT_OFDM24_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_1_EXT_OFDM48_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_1_EXT_MCS0_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_1_EXT_MCS2_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_2_EXT */
+#define TX_PWR_CFG_2_EXT 0x1398
+#define TX_PWR_CFG_2_EXT_MCS4_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_2_EXT_MCS6_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_2_EXT_MCS8_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_2_EXT_MCS10_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_3_EXT */
+#define TX_PWR_CFG_3_EXT 0x139c
+#define TX_PWR_CFG_3_EXT_MCS12_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_3_EXT_MCS14_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_3_EXT_STBC0_CH2 FIELD32(0x000f0000)
+#define TX_PWR_CFG_3_EXT_STBC2_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_4_EXT */
+#define TX_PWR_CFG_4_EXT 0x13a0
+#define TX_PWR_CFG_4_EXT_STBC4_CH2 FIELD32(0x0000000f)
+#define TX_PWR_CFG_4_EXT_STBC6_CH2 FIELD32(0x00000f00)
+
+/* TX_PWR_CFG_7 */
+#define TX_PWR_CFG_7 0x13d4
+#define TX_PWR_CFG_7_OFDM54_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_7_OFDM54_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_7_OFDM54_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_7_MCS7_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_7_MCS7_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_7_MCS7_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_8 */
+#define TX_PWR_CFG_8 0x13d8
+#define TX_PWR_CFG_8_MCS15_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_8_MCS15_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_8_MCS15_CH2 FIELD32(0x00000f00)
+#define TX_PWR_CFG_8_MCS23_CH0 FIELD32(0x000f0000)
+#define TX_PWR_CFG_8_MCS23_CH1 FIELD32(0x00f00000)
+#define TX_PWR_CFG_8_MCS23_CH2 FIELD32(0x0f000000)
+
+/* TX_PWR_CFG_9 */
+#define TX_PWR_CFG_9 0x13dc
+#define TX_PWR_CFG_9_STBC7_CH0 FIELD32(0x0000000f)
+#define TX_PWR_CFG_9_STBC7_CH1 FIELD32(0x000000f0)
+#define TX_PWR_CFG_9_STBC7_CH2 FIELD32(0x00000f00)
+
/*
* RX_FILTER_CFG: RX configuration register.
*/
@@ -1902,11 +2019,13 @@ struct mac_iveiv_entry {
#define HW_BEACON_BASE6 0x5dc0
#define HW_BEACON_BASE7 0x5bc0
-#define HW_BEACON_OFFSET(__index) \
+#define HW_BEACON_BASE(__index) \
(((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
(((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
(HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
+#define BEACON_BASE_TO_OFFSET(_base) (((_base) - 0x4000) / 64)
+
/*
* BBP registers.
* The wordsize of the BBP is 8 bits.
@@ -1975,6 +2094,10 @@ struct mac_iveiv_entry {
#define BBP109_TX0_POWER FIELD8(0x0f)
#define BBP109_TX1_POWER FIELD8(0xf0)
+/* BBP 110 */
+#define BBP110_TX2_POWER FIELD8(0x0f)
+
+
/*
* BBP 138: Unknown
*/
@@ -2024,6 +2147,12 @@ struct mac_iveiv_entry {
#define RFCSR3_PA2_CASCODE_BIAS_CCKK FIELD8(0x80)
/* Bits for RF3290/RF5360/RF5370/RF5372/RF5390/RF5392 */
#define RFCSR3_VCOCAL_EN FIELD8(0x80)
+/* Bits for RF3050 */
+#define RFCSR3_BIT1 FIELD8(0x02)
+#define RFCSR3_BIT2 FIELD8(0x04)
+#define RFCSR3_BIT3 FIELD8(0x08)
+#define RFCSR3_BIT4 FIELD8(0x10)
+#define RFCSR3_BIT5 FIELD8(0x20)
/*
* FRCSR 5:
@@ -2036,6 +2165,8 @@ struct mac_iveiv_entry {
#define RFCSR6_R1 FIELD8(0x03)
#define RFCSR6_R2 FIELD8(0x40)
#define RFCSR6_TXDIV FIELD8(0x0c)
+/* bits for RF3053 */
+#define RFCSR6_VCO_IC FIELD8(0xc0)
/*
* RFCSR 7:
@@ -2060,7 +2191,12 @@ struct mac_iveiv_entry {
* RFCSR 11:
*/
#define RFCSR11_R FIELD8(0x03)
+#define RFCSR11_PLL_MOD FIELD8(0x0c)
#define RFCSR11_MOD FIELD8(0xc0)
+/* bits for RF3053 */
+/* TODO: verify RFCSR11_MOD usage on other chips */
+#define RFCSR11_PLL_IDOH FIELD8(0x40)
+
/*
* RFCSR 12:
@@ -2092,6 +2228,10 @@ struct mac_iveiv_entry {
#define RFCSR17_R FIELD8(0x20)
#define RFCSR17_CODE FIELD8(0x7f)
+/* RFCSR 18 */
+#define RFCSR18_XO_TUNE_BYPASS FIELD8(0x40)
+
+
/*
* RFCSR 20:
*/
@@ -2152,6 +2292,12 @@ struct mac_iveiv_entry {
#define RFCSR31_RX_H20M FIELD8(0x20)
#define RFCSR31_RX_CALIB FIELD8(0x7f)
+/* RFCSR 32 bits for RF3053 */
+#define RFCSR32_TX_AGC_FC FIELD8(0xf8)
+
+/* RFCSR 36 bits for RF3053 */
+#define RFCSR36_RF_BS FIELD8(0x80)
+
/*
* RFCSR 38:
*/
@@ -2160,6 +2306,7 @@ struct mac_iveiv_entry {
/*
* RFCSR 39:
*/
+#define RFCSR39_RX_DIV FIELD8(0x40)
#define RFCSR39_RX_LO2_EN FIELD8(0x80)
/*
@@ -2167,12 +2314,36 @@ struct mac_iveiv_entry {
*/
#define RFCSR49_TX FIELD8(0x3f)
#define RFCSR49_EP FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR49_TX_LO1_IC FIELD8(0x1c)
+#define RFCSR49_TX_DIV FIELD8(0x20)
/*
* RFCSR 50:
*/
#define RFCSR50_TX FIELD8(0x3f)
#define RFCSR50_EP FIELD8(0xc0)
+/* bits for RT3593 */
+#define RFCSR50_TX_LO1_EN FIELD8(0x20)
+#define RFCSR50_TX_LO2_EN FIELD8(0x10)
+
+/* RFCSR 51 */
+/* bits for RT3593 */
+#define RFCSR51_BITS01 FIELD8(0x03)
+#define RFCSR51_BITS24 FIELD8(0x1c)
+#define RFCSR51_BITS57 FIELD8(0xe0)
+
+#define RFCSR53_TX_POWER FIELD8(0x3f)
+#define RFCSR53_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR54_TX_POWER FIELD8(0x3f)
+#define RFCSR54_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR55_TX_POWER FIELD8(0x3f)
+#define RFCSR55_UNKNOWN FIELD8(0xc0)
+
+#define RFCSR57_DRV_CC FIELD8(0xfc)
+
/*
* RF registers
@@ -2206,28 +2377,67 @@ struct mac_iveiv_entry {
* The wordsize of the EEPROM is 16 bits.
*/
-/*
- * Chip ID
- */
-#define EEPROM_CHIP_ID 0x0000
+enum rt2800_eeprom_word {
+ EEPROM_CHIP_ID = 0,
+ EEPROM_VERSION,
+ EEPROM_MAC_ADDR_0,
+ EEPROM_MAC_ADDR_1,
+ EEPROM_MAC_ADDR_2,
+ EEPROM_NIC_CONF0,
+ EEPROM_NIC_CONF1,
+ EEPROM_FREQ,
+ EEPROM_LED_AG_CONF,
+ EEPROM_LED_ACT_CONF,
+ EEPROM_LED_POLARITY,
+ EEPROM_NIC_CONF2,
+ EEPROM_LNA,
+ EEPROM_RSSI_BG,
+ EEPROM_RSSI_BG2,
+ EEPROM_TXMIXER_GAIN_BG,
+ EEPROM_RSSI_A,
+ EEPROM_RSSI_A2,
+ EEPROM_TXMIXER_GAIN_A,
+ EEPROM_EIRP_MAX_TX_POWER,
+ EEPROM_TXPOWER_DELTA,
+ EEPROM_TXPOWER_BG1,
+ EEPROM_TXPOWER_BG2,
+ EEPROM_TSSI_BOUND_BG1,
+ EEPROM_TSSI_BOUND_BG2,
+ EEPROM_TSSI_BOUND_BG3,
+ EEPROM_TSSI_BOUND_BG4,
+ EEPROM_TSSI_BOUND_BG5,
+ EEPROM_TXPOWER_A1,
+ EEPROM_TXPOWER_A2,
+ EEPROM_TSSI_BOUND_A1,
+ EEPROM_TSSI_BOUND_A2,
+ EEPROM_TSSI_BOUND_A3,
+ EEPROM_TSSI_BOUND_A4,
+ EEPROM_TSSI_BOUND_A5,
+ EEPROM_TXPOWER_BYRATE,
+ EEPROM_BBP_START,
+
+ /* IDs for extended EEPROM format used by three-chain devices */
+ EEPROM_EXT_LNA2,
+ EEPROM_EXT_TXPOWER_BG3,
+ EEPROM_EXT_TXPOWER_A3,
+
+ /* New values must be added before this */
+ EEPROM_WORD_COUNT
+};
/*
* EEPROM Version
*/
-#define EEPROM_VERSION 0x0001
#define EEPROM_VERSION_FAE FIELD16(0x00ff)
#define EEPROM_VERSION_VERSION FIELD16(0xff00)
/*
* HW MAC address.
*/
-#define EEPROM_MAC_ADDR_0 0x0002
#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_1 0x0003
#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
-#define EEPROM_MAC_ADDR_2 0x0004
#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
@@ -2237,7 +2447,6 @@ struct mac_iveiv_entry {
* TXPATH: 1: 1T, 2: 2T, 3: 3T
* RF_TYPE: RFIC type
*/
-#define EEPROM_NIC_CONF0 0x001a
#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
@@ -2261,7 +2470,6 @@ struct mac_iveiv_entry {
* BT_COEXIST: 0: disable, 1: enable
* DAC_TEST: 0: disable, 1: enable
*/
-#define EEPROM_NIC_CONF1 0x001b
#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
@@ -2281,7 +2489,6 @@ struct mac_iveiv_entry {
/*
* EEPROM frequency
*/
-#define EEPROM_FREQ 0x001d
#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
@@ -2298,9 +2505,6 @@ struct mac_iveiv_entry {
* POLARITY_GPIO_4: Polarity GPIO4 setting.
* LED_MODE: Led mode.
*/
-#define EEPROM_LED_AG_CONF 0x001e
-#define EEPROM_LED_ACT_CONF 0x001f
-#define EEPROM_LED_POLARITY 0x0020
#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
@@ -2317,7 +2521,6 @@ struct mac_iveiv_entry {
* TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
* CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
*/
-#define EEPROM_NIC_CONF2 0x0021
#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
@@ -2325,54 +2528,46 @@ struct mac_iveiv_entry {
/*
* EEPROM LNA
*/
-#define EEPROM_LNA 0x0022
#define EEPROM_LNA_BG FIELD16(0x00ff)
#define EEPROM_LNA_A0 FIELD16(0xff00)
/*
* EEPROM RSSI BG offset
*/
-#define EEPROM_RSSI_BG 0x0023
#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
/*
* EEPROM RSSI BG2 offset
*/
-#define EEPROM_RSSI_BG2 0x0024
#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
/*
* EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
*/
-#define EEPROM_TXMIXER_GAIN_BG 0x0024
#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
/*
* EEPROM RSSI A offset
*/
-#define EEPROM_RSSI_A 0x0025
#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
/*
* EEPROM RSSI A2 offset
*/
-#define EEPROM_RSSI_A2 0x0026
#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
/*
* EEPROM TXMIXER GAIN A offset (note overlaps with EEPROM RSSI A2).
*/
-#define EEPROM_TXMIXER_GAIN_A 0x0026
#define EEPROM_TXMIXER_GAIN_A_VAL FIELD16(0x0007)
/*
* EEPROM EIRP Maximum TX power values(unit: dbm)
*/
-#define EEPROM_EIRP_MAX_TX_POWER 0x0027
#define EEPROM_EIRP_MAX_TX_POWER_2GHZ FIELD16(0x00ff)
#define EEPROM_EIRP_MAX_TX_POWER_5GHZ FIELD16(0xff00)
@@ -2383,7 +2578,6 @@ struct mac_iveiv_entry {
* TYPE: 1: Plus the delta value, 0: minus the delta value
* ENABLE: enable tx power compensation for 40BW
*/
-#define EEPROM_TXPOWER_DELTA 0x0028
#define EEPROM_TXPOWER_DELTA_VALUE_2G FIELD16(0x003f)
#define EEPROM_TXPOWER_DELTA_TYPE_2G FIELD16(0x0040)
#define EEPROM_TXPOWER_DELTA_ENABLE_2G FIELD16(0x0080)
@@ -2394,8 +2588,6 @@ struct mac_iveiv_entry {
/*
* EEPROM TXPOWER 802.11BG
*/
-#define EEPROM_TXPOWER_BG1 0x0029
-#define EEPROM_TXPOWER_BG2 0x0030
#define EEPROM_TXPOWER_BG_SIZE 7
#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
@@ -2407,7 +2599,6 @@ struct mac_iveiv_entry {
* MINUS3: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -3)
*/
-#define EEPROM_TSSI_BOUND_BG1 0x0037
#define EEPROM_TSSI_BOUND_BG1_MINUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG1_MINUS3 FIELD16(0xff00)
@@ -2418,7 +2609,6 @@ struct mac_iveiv_entry {
* MINUS1: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -1)
*/
-#define EEPROM_TSSI_BOUND_BG2 0x0038
#define EEPROM_TSSI_BOUND_BG2_MINUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG2_MINUS1 FIELD16(0xff00)
@@ -2428,7 +2618,6 @@ struct mac_iveiv_entry {
* PLUS1: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 1)
*/
-#define EEPROM_TSSI_BOUND_BG3 0x0039
#define EEPROM_TSSI_BOUND_BG3_REF FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG3_PLUS1 FIELD16(0xff00)
@@ -2439,7 +2628,6 @@ struct mac_iveiv_entry {
* PLUS3: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 3)
*/
-#define EEPROM_TSSI_BOUND_BG4 0x003a
#define EEPROM_TSSI_BOUND_BG4_PLUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG4_PLUS3 FIELD16(0xff00)
@@ -2449,19 +2637,20 @@ struct mac_iveiv_entry {
* increased by (agc_step * 4)
* AGC_STEP: Temperature compensation step.
*/
-#define EEPROM_TSSI_BOUND_BG5 0x003b
#define EEPROM_TSSI_BOUND_BG5_PLUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_BG5_AGC_STEP FIELD16(0xff00)
/*
* EEPROM TXPOWER 802.11A
*/
-#define EEPROM_TXPOWER_A1 0x003c
-#define EEPROM_TXPOWER_A2 0x0053
#define EEPROM_TXPOWER_A_SIZE 6
#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
+/* EEPROM_TXPOWER_{A,G} fields for RT3593 */
+#define EEPROM_TXPOWER_ALC FIELD8(0x1f)
+#define EEPROM_TXPOWER_FINE_CTRL FIELD8(0xe0)
+
/*
* EEPROM temperature compensation boundaries 802.11A
* MINUS4: If the actual TSSI is below this boundary, tx power needs to be
@@ -2469,7 +2658,6 @@ struct mac_iveiv_entry {
* MINUS3: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -3)
*/
-#define EEPROM_TSSI_BOUND_A1 0x006a
#define EEPROM_TSSI_BOUND_A1_MINUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A1_MINUS3 FIELD16(0xff00)
@@ -2480,7 +2668,6 @@ struct mac_iveiv_entry {
* MINUS1: If the actual TSSI is below this boundary, tx power needs to be
* reduced by (agc_step * -1)
*/
-#define EEPROM_TSSI_BOUND_A2 0x006b
#define EEPROM_TSSI_BOUND_A2_MINUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A2_MINUS1 FIELD16(0xff00)
@@ -2490,7 +2677,6 @@ struct mac_iveiv_entry {
* PLUS1: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 1)
*/
-#define EEPROM_TSSI_BOUND_A3 0x006c
#define EEPROM_TSSI_BOUND_A3_REF FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A3_PLUS1 FIELD16(0xff00)
@@ -2501,7 +2687,6 @@ struct mac_iveiv_entry {
* PLUS3: If the actual TSSI is above this boundary, tx power needs to be
* increased by (agc_step * 3)
*/
-#define EEPROM_TSSI_BOUND_A4 0x006d
#define EEPROM_TSSI_BOUND_A4_PLUS2 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A4_PLUS3 FIELD16(0xff00)
@@ -2511,14 +2696,12 @@ struct mac_iveiv_entry {
* increased by (agc_step * 4)
* AGC_STEP: Temperature compensation step.
*/
-#define EEPROM_TSSI_BOUND_A5 0x006e
#define EEPROM_TSSI_BOUND_A5_PLUS4 FIELD16(0x00ff)
#define EEPROM_TSSI_BOUND_A5_AGC_STEP FIELD16(0xff00)
/*
* EEPROM TXPOWER by rate: tx power per tx rate for HT20 mode
*/
-#define EEPROM_TXPOWER_BYRATE 0x006f
#define EEPROM_TXPOWER_BYRATE_SIZE 9
#define EEPROM_TXPOWER_BYRATE_RATE0 FIELD16(0x000f)
@@ -2529,11 +2712,14 @@ struct mac_iveiv_entry {
/*
* EEPROM BBP.
*/
-#define EEPROM_BBP_START 0x0078
#define EEPROM_BBP_SIZE 16
#define EEPROM_BBP_VALUE FIELD16(0x00ff)
#define EEPROM_BBP_REG_ID FIELD16(0xff00)
+/* EEPROM_EXT_LNA2 */
+#define EEPROM_EXT_LNA2_A1 FIELD16(0x00ff)
+#define EEPROM_EXT_LNA2_A2 FIELD16(0xff00)
+
/*
* EEPROM IQ Calibration, unlike other entries those are byte addresses.
*/
@@ -2610,6 +2796,7 @@ struct mac_iveiv_entry {
#define MCU_RADAR 0x60
#define MCU_BOOT_SIGNAL 0x72
#define MCU_ANT_SELECT 0X73
+#define MCU_FREQ_OFFSET 0x74
#define MCU_BBP_SIGNAL 0x80
#define MCU_POWER_SAVE 0x83
#define MCU_BAND_SELECT 0x91
@@ -2630,6 +2817,7 @@ struct mac_iveiv_entry {
#define TXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
#define RXWI_DESC_SIZE_4WORDS (4 * sizeof(__le32))
+#define RXWI_DESC_SIZE_5WORDS (5 * sizeof(__le32))
#define RXWI_DESC_SIZE_6WORDS (6 * sizeof(__le32))
/*
@@ -2750,18 +2938,15 @@ struct mac_iveiv_entry {
#define MAX_A_TXPOWER 15
#define DEFAULT_TXPOWER 5
+#define MIN_A_TXPOWER_3593 0
+#define MAX_A_TXPOWER_3593 31
+
#define TXPOWER_G_FROM_DEV(__txpower) \
((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-#define TXPOWER_G_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
-
#define TXPOWER_A_FROM_DEV(__txpower) \
((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
-#define TXPOWER_A_TO_DEV(__txpower) \
- clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
-
/*
* Board's maximun TX power limitation
*/
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1f80ea5e29d..95e6e61c3de 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -221,6 +221,157 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
mutex_unlock(&rt2x00dev->csr_mutex);
}
+static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = {
+ [EEPROM_CHIP_ID] = 0x0000,
+ [EEPROM_VERSION] = 0x0001,
+ [EEPROM_MAC_ADDR_0] = 0x0002,
+ [EEPROM_MAC_ADDR_1] = 0x0003,
+ [EEPROM_MAC_ADDR_2] = 0x0004,
+ [EEPROM_NIC_CONF0] = 0x001a,
+ [EEPROM_NIC_CONF1] = 0x001b,
+ [EEPROM_FREQ] = 0x001d,
+ [EEPROM_LED_AG_CONF] = 0x001e,
+ [EEPROM_LED_ACT_CONF] = 0x001f,
+ [EEPROM_LED_POLARITY] = 0x0020,
+ [EEPROM_NIC_CONF2] = 0x0021,
+ [EEPROM_LNA] = 0x0022,
+ [EEPROM_RSSI_BG] = 0x0023,
+ [EEPROM_RSSI_BG2] = 0x0024,
+ [EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */
+ [EEPROM_RSSI_A] = 0x0025,
+ [EEPROM_RSSI_A2] = 0x0026,
+ [EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */
+ [EEPROM_EIRP_MAX_TX_POWER] = 0x0027,
+ [EEPROM_TXPOWER_DELTA] = 0x0028,
+ [EEPROM_TXPOWER_BG1] = 0x0029,
+ [EEPROM_TXPOWER_BG2] = 0x0030,
+ [EEPROM_TSSI_BOUND_BG1] = 0x0037,
+ [EEPROM_TSSI_BOUND_BG2] = 0x0038,
+ [EEPROM_TSSI_BOUND_BG3] = 0x0039,
+ [EEPROM_TSSI_BOUND_BG4] = 0x003a,
+ [EEPROM_TSSI_BOUND_BG5] = 0x003b,
+ [EEPROM_TXPOWER_A1] = 0x003c,
+ [EEPROM_TXPOWER_A2] = 0x0053,
+ [EEPROM_TSSI_BOUND_A1] = 0x006a,
+ [EEPROM_TSSI_BOUND_A2] = 0x006b,
+ [EEPROM_TSSI_BOUND_A3] = 0x006c,
+ [EEPROM_TSSI_BOUND_A4] = 0x006d,
+ [EEPROM_TSSI_BOUND_A5] = 0x006e,
+ [EEPROM_TXPOWER_BYRATE] = 0x006f,
+ [EEPROM_BBP_START] = 0x0078,
+};
+
+static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = {
+ [EEPROM_CHIP_ID] = 0x0000,
+ [EEPROM_VERSION] = 0x0001,
+ [EEPROM_MAC_ADDR_0] = 0x0002,
+ [EEPROM_MAC_ADDR_1] = 0x0003,
+ [EEPROM_MAC_ADDR_2] = 0x0004,
+ [EEPROM_NIC_CONF0] = 0x001a,
+ [EEPROM_NIC_CONF1] = 0x001b,
+ [EEPROM_NIC_CONF2] = 0x001c,
+ [EEPROM_EIRP_MAX_TX_POWER] = 0x0020,
+ [EEPROM_FREQ] = 0x0022,
+ [EEPROM_LED_AG_CONF] = 0x0023,
+ [EEPROM_LED_ACT_CONF] = 0x0024,
+ [EEPROM_LED_POLARITY] = 0x0025,
+ [EEPROM_LNA] = 0x0026,
+ [EEPROM_EXT_LNA2] = 0x0027,
+ [EEPROM_RSSI_BG] = 0x0028,
+ [EEPROM_TXPOWER_DELTA] = 0x0028, /* Overlaps with RSSI_BG */
+ [EEPROM_RSSI_BG2] = 0x0029,
+ [EEPROM_TXMIXER_GAIN_BG] = 0x0029, /* Overlaps with RSSI_BG2 */
+ [EEPROM_RSSI_A] = 0x002a,
+ [EEPROM_RSSI_A2] = 0x002b,
+ [EEPROM_TXMIXER_GAIN_A] = 0x002b, /* Overlaps with RSSI_A2 */
+ [EEPROM_TXPOWER_BG1] = 0x0030,
+ [EEPROM_TXPOWER_BG2] = 0x0037,
+ [EEPROM_EXT_TXPOWER_BG3] = 0x003e,
+ [EEPROM_TSSI_BOUND_BG1] = 0x0045,
+ [EEPROM_TSSI_BOUND_BG2] = 0x0046,
+ [EEPROM_TSSI_BOUND_BG3] = 0x0047,
+ [EEPROM_TSSI_BOUND_BG4] = 0x0048,
+ [EEPROM_TSSI_BOUND_BG5] = 0x0049,
+ [EEPROM_TXPOWER_A1] = 0x004b,
+ [EEPROM_TXPOWER_A2] = 0x0065,
+ [EEPROM_EXT_TXPOWER_A3] = 0x007f,
+ [EEPROM_TSSI_BOUND_A1] = 0x009a,
+ [EEPROM_TSSI_BOUND_A2] = 0x009b,
+ [EEPROM_TSSI_BOUND_A3] = 0x009c,
+ [EEPROM_TSSI_BOUND_A4] = 0x009d,
+ [EEPROM_TSSI_BOUND_A5] = 0x009e,
+ [EEPROM_TXPOWER_BYRATE] = 0x00a0,
+};
+
+static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word)
+{
+ const unsigned int *map;
+ unsigned int index;
+
+ if (WARN_ONCE(word >= EEPROM_WORD_COUNT,
+ "%s: invalid EEPROM word %d\n",
+ wiphy_name(rt2x00dev->hw->wiphy), word))
+ return 0;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ map = rt2800_eeprom_map_ext;
+ else
+ map = rt2800_eeprom_map;
+
+ index = map[word];
+
+ /* Index 0 is valid only for EEPROM_CHIP_ID.
+ * Otherwise it means that the offset of the
+ * given word is not initialized in the map,
+ * or that the field is not usable on the
+ * actual chipset.
+ */
+ WARN_ONCE(word != EEPROM_CHIP_ID && index == 0,
+ "%s: invalid access of EEPROM word %d\n",
+ wiphy_name(rt2x00dev->hw->wiphy), word);
+
+ return index;
+}
+
+static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ return rt2x00_eeprom_addr(rt2x00dev, index);
+}
+
+static void rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word, u16 *data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ rt2x00_eeprom_read(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word word, u16 data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, word);
+ rt2x00_eeprom_write(rt2x00dev, index, data);
+}
+
+static void rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev,
+ const enum rt2800_eeprom_word array,
+ unsigned int offset,
+ u16 *data)
+{
+ unsigned int index;
+
+ index = rt2800_eeprom_word_index(rt2x00dev, array);
+ rt2x00_eeprom_read(rt2x00dev, index + offset, data);
+}
+
static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -370,6 +521,29 @@ void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_disable_wpdma);
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+ unsigned short *txwi_size,
+ unsigned short *rxwi_size)
+{
+ switch (rt2x00dev->chip.rt) {
+ case RT3593:
+ *txwi_size = TXWI_DESC_SIZE_4WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_5WORDS;
+ break;
+
+ case RT5592:
+ *txwi_size = TXWI_DESC_SIZE_5WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_6WORDS;
+ break;
+
+ default:
+ *txwi_size = TXWI_DESC_SIZE_4WORDS;
+ *rxwi_size = RXWI_DESC_SIZE_4WORDS;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size);
+
static bool rt2800_check_firmware_crc(const u8 *data, const size_t len)
{
u16 fw_crc;
@@ -609,16 +783,16 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
u8 offset2;
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2);
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &eeprom);
offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0);
offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2);
}
@@ -766,6 +940,18 @@ void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi)
}
EXPORT_SYMBOL_GPL(rt2800_txdone_entry);
+static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev,
+ unsigned int index)
+{
+ return HW_BEACON_BASE(index);
+}
+
+static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev,
+ unsigned int index)
+{
+ return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index));
+}
+
void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
@@ -818,7 +1004,8 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
return;
}
- beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
+ beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx);
+
rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
entry->skb->len + padding_len);
@@ -837,10 +1024,13 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
EXPORT_SYMBOL_GPL(rt2800_write_beacon);
static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev,
- unsigned int beacon_base)
+ unsigned int index)
{
int i;
const int txwi_desc_size = rt2x00dev->bcn->winfo_size;
+ unsigned int beacon_base;
+
+ beacon_base = rt2800_hw_beacon_base(rt2x00dev, index);
/*
* For the Beacon base registers we only need to clear
@@ -867,8 +1057,7 @@ void rt2800_clear_beacon(struct queue_entry *entry)
/*
* Clear beacon.
*/
- rt2800_clear_beacon_register(rt2x00dev,
- HW_BEACON_OFFSET(entry->entry_idx));
+ rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx);
/*
* Enabled beaconing again.
@@ -890,6 +1079,9 @@ const struct rt2x00debug rt2800_rt2x00debug = {
.word_count = CSR_REG_SIZE / sizeof(u32),
},
.eeprom = {
+ /* NOTE: The local EEPROM access functions can't
+ * be used here, use the generic versions instead.
+ */
.read = rt2x00_eeprom_read,
.write = rt2x00_eeprom_write,
.word_base = EEPROM_BASE,
@@ -1547,7 +1739,7 @@ static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev)
led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3;
if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) ||
led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE);
if (led_ctrl == 0 || led_ctrl > 0x40) {
rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode);
@@ -1609,7 +1801,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
case 3:
- rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
+ rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
break;
}
@@ -1622,7 +1814,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2x00_rt(rt2x00dev, RT3090) ||
rt2x00_rt(rt2x00dev, RT3352) ||
rt2x00_rt(rt2x00dev, RT3390)) {
- rt2x00_eeprom_read(rt2x00dev,
+ rt2800_eeprom_read(rt2x00dev,
EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY))
@@ -1649,6 +1841,13 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
rt2800_bbp_write(rt2x00dev, 3, r3);
rt2800_bbp_write(rt2x00dev, 1, r1);
+
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (ant->rx_chain_num == 1)
+ rt2800_bbp_write(rt2x00dev, 86, 0x00);
+ else
+ rt2800_bbp_write(rt2x00dev, 86, 0x46);
+ }
}
EXPORT_SYMBOL_GPL(rt2800_config_ant);
@@ -1659,22 +1858,73 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
short lna_gain;
if (libconf->rf.channel <= 14) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
} else if (libconf->rf.channel <= 64) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
} else if (libconf->rf.channel <= 128) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
- lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_EXT_LNA2_A1);
+ } else {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_RSSI_BG2_LNA_A1);
+ }
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
- lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_EXT_LNA2_A2);
+ } else {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
+ lna_gain = rt2x00_get_field16(eeprom,
+ EEPROM_RSSI_A2_LNA_A2);
+ }
}
rt2x00dev->lna_gain = lna_gain;
}
+#define FREQ_OFFSET_BOUND 0x5f
+
+static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+{
+ u8 freq_offset, prev_freq_offset;
+ u8 rfcsr, prev_rfcsr;
+
+ freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE);
+ freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND);
+
+ rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
+ prev_rfcsr = rfcsr;
+
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset);
+ if (rfcsr == prev_rfcsr)
+ return;
+
+ if (rt2x00_is_usb(rt2x00dev)) {
+ rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff,
+ freq_offset, prev_rfcsr);
+ return;
+ }
+
+ prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE);
+ while (prev_freq_offset != freq_offset) {
+ if (prev_freq_offset < freq_offset)
+ prev_freq_offset++;
+ else
+ prev_freq_offset--;
+
+ rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset);
+ rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+
+ usleep_range(1000, 1500);
+ }
+}
+
static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -1993,22 +2243,306 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev,
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
}
-#define POWER_BOUND 0x27
-#define POWER_BOUND_5G 0x2b
-#define FREQ_OFFSET_BOUND 0x5f
-
-static void rt2800_adjust_freq_offset(struct rt2x00_dev *rt2x00dev)
+static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_conf *conf,
+ struct rf_channel *rf,
+ struct channel_info *info)
{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 txrx_agc_fc;
+ u8 txrx_h20m;
u8 rfcsr;
+ u8 bbp;
+ const bool txbf_enabled = false; /* TODO */
- rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
- if (rt2x00dev->freq_offset > FREQ_OFFSET_BOUND)
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, FREQ_OFFSET_BOUND);
+ /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */
+ rt2800_bbp_read(rt2x00dev, 109, &bbp);
+ rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0);
+ rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0);
+ rt2800_bbp_write(rt2x00dev, 109, bbp);
+
+ rt2800_bbp_read(rt2x00dev, 110, &bbp);
+ rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0);
+ rt2800_bbp_write(rt2x00dev, 110, bbp);
+
+ if (rf->channel <= 14) {
+ /* Restore BBP 25 & 26 for 2.4 GHz */
+ rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25);
+ rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26);
+ } else {
+ /* Hard code BBP 25 & 26 for 5GHz */
+
+ /* Enable IQ Phase correction */
+ rt2800_bbp_write(rt2x00dev, 25, 0x09);
+ /* Setup IQ Phase correction value */
+ rt2800_bbp_write(rt2x00dev, 26, 0xff);
+ }
+
+ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf);
+
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3));
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 11, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1);
else
- rt2x00_set_field8(&rfcsr, RFCSR17_CODE, rt2x00dev->freq_offset);
- rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2);
+ rt2800_rfcsr_write(rt2x00dev, 11, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 53, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+ info->default_power1 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER,
+ ((info->default_power1 & 0x18) << 1) |
+ (info->default_power1 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 53, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 55, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+ info->default_power2 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER,
+ ((info->default_power2 & 0x18) << 1) |
+ (info->default_power2 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 55, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 54, &rfcsr);
+ if (rf->channel <= 14) {
+ rfcsr = 0;
+ rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+ info->default_power3 & 0x1f);
+ } else {
+ if (rt2x00_is_usb(rt2x00dev))
+ rfcsr = 0x40;
+
+ rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER,
+ ((info->default_power3 & 0x18) << 1) |
+ (info->default_power3 & 7));
+ }
+ rt2800_rfcsr_write(rt2x00dev, 54, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+
+ switch (rt2x00dev->default_ant.tx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1);
+ break;
+ }
+
+ switch (rt2x00dev->default_ant.rx_chain_num) {
+ case 3:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1);
+ /* fallthrough */
+ case 2:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
+ /* fallthrough */
+ case 1:
+ rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1);
+ break;
+ }
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ if (conf_is_ht40(conf)) {
+ txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40,
+ RFCSR24_TX_AGC_FC);
+ txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40,
+ RFCSR24_TX_H20M);
+ } else {
+ txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20,
+ RFCSR24_TX_AGC_FC);
+ txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20,
+ RFCSR24_TX_H20M);
+ }
+
+ /* NOTE: the reference driver does not writes the new value
+ * back to RFCSR 32
+ */
+ rt2800_rfcsr_read(rt2x00dev, 32, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc);
+
+ if (rf->channel <= 14)
+ rfcsr = 0xa0;
+ else
+ rfcsr = 0x80;
+ rt2800_rfcsr_write(rt2x00dev, 31, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ /* Band selection */
+ rt2800_rfcsr_read(rt2x00dev, 36, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0);
+ rt2800_rfcsr_write(rt2x00dev, 36, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 34, &rfcsr);
+ if (rf->channel <= 14)
+ rfcsr = 0x3c;
+ else
+ rfcsr = 0x20;
+ rt2800_rfcsr_write(rt2x00dev, 34, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
+ if (rf->channel <= 14)
+ rfcsr = 0x1a;
+ else
+ rfcsr = 0x12;
+ rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
+ if (rf->channel >= 1 && rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+ else if (rf->channel >= 36 && rf->channel <= 64)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+ else if (rf->channel >= 100 && rf->channel <= 128)
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1);
+ rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd8);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x23);
+ }
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1);
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3);
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 49, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2);
+
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1);
+
+ rt2800_rfcsr_write(rt2x00dev, 49, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 57, &rfcsr);
+ if (rf->channel <= 14)
+ rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b);
+ else
+ rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f);
+ rt2800_rfcsr_write(rt2x00dev, 57, rfcsr);
+
+ if (rf->channel <= 14) {
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x93);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ } else {
+ rt2800_rfcsr_write(rt2x00dev, 44, 0x9b);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x05);
+ }
+
+ /* Initiate VCO calibration */
+ rt2800_rfcsr_read(rt2x00dev, 3, &rfcsr);
+ if (rf->channel <= 14) {
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ } else {
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1);
+ }
+ rt2800_rfcsr_write(rt2x00dev, 3, rfcsr);
+
+ if (rf->channel >= 1 && rf->channel <= 14) {
+ rfcsr = 0x23;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ } else if (rf->channel >= 36 && rf->channel <= 64) {
+ rfcsr = 0x36;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x36);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xeb);
+ } else if (rf->channel >= 100 && rf->channel <= 128) {
+ rfcsr = 0x32;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xb3);
+ } else {
+ rfcsr = 0x30;
+ if (txbf_enabled)
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_write(rt2x00dev, 45, 0x9b);
+ }
}
+#define POWER_BOUND 0x27
+#define POWER_BOUND_5G 0x2b
+
static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2563,6 +3097,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel)
rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0);
}
+static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev,
+ unsigned int channel,
+ char txpower)
+{
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC);
+
+ if (channel <= 14)
+ return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER);
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return clamp_t(char, txpower, MIN_A_TXPOWER_3593,
+ MAX_A_TXPOWER_3593);
+ else
+ return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER);
+}
+
static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
struct ieee80211_conf *conf,
struct rf_channel *rf,
@@ -2572,13 +3123,14 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp, rfcsr;
- if (rf->channel <= 14) {
- info->default_power1 = TXPOWER_G_TO_DEV(info->default_power1);
- info->default_power2 = TXPOWER_G_TO_DEV(info->default_power2);
- } else {
- info->default_power1 = TXPOWER_A_TO_DEV(info->default_power1);
- info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
- }
+ info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power1);
+ info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power2);
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ info->default_power3 =
+ rt2800_txpower_to_dev(rt2x00dev, rf->channel,
+ info->default_power3);
switch (rt2x00dev->chip.rf) {
case RF2020:
@@ -2591,6 +3143,9 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
break;
+ case RF3053:
+ rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info);
+ break;
case RF3290:
rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info);
break;
@@ -2636,6 +3191,23 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 27, 0x20);
rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain);
+ } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rf->channel > 14) {
+ /* Disable CCK Packet detection on 5GHz */
+ rt2800_bbp_write(rt2x00dev, 70, 0x00);
+ } else {
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+ }
+
+ if (conf_is_ht40(conf))
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+ else
+ rt2800_bbp_write(rt2x00dev, 105, 0x34);
+
+ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
+ rt2800_bbp_write(rt2x00dev, 77, 0x98);
} else {
rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
@@ -2651,16 +3223,27 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2800_bbp_write(rt2x00dev, 82, 0x62);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
} else {
- rt2800_bbp_write(rt2x00dev, 82, 0x84);
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 82, 0x62);
+ else
+ rt2800_bbp_write(rt2x00dev, 82, 0x84);
rt2800_bbp_write(rt2x00dev, 75, 0x50);
}
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 83, 0x8a);
}
+
} else {
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_bbp_write(rt2x00dev, 82, 0x94);
+ else if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 82, 0x82);
else
rt2800_bbp_write(rt2x00dev, 82, 0xf2);
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_bbp_write(rt2x00dev, 83, 0x9a);
+
if (test_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags))
rt2800_bbp_write(rt2x00dev, 75, 0x46);
else
@@ -2731,6 +3314,41 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
if (rt2x00_rt(rt2x00dev, RT3572))
rt2800_rfcsr_write(rt2x00dev, 8, 0x80);
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
+ rt2800_register_read(rt2x00dev, GPIO_CTRL, &reg);
+
+ /* Band selection. GPIO #8 controls all paths */
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0);
+ if (rf->channel <= 14)
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1);
+ else
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0);
+
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0);
+ rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0);
+
+ /* LNA PE control.
+ * GPIO #4 controls PE0 and PE1,
+ * GPIO #7 controls PE2
+ */
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1);
+ rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1);
+
+ rt2800_register_write(rt2x00dev, GPIO_CTRL, reg);
+ }
+
+ /* AGC init */
+ if (rf->channel <= 14)
+ reg = 0x1c + 2 * rt2x00dev->lna_gain;
+ else
+ reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3);
+
+ rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg);
+
+ usleep_range(1000, 1500);
+ }
+
if (rt2x00_rt(rt2x00dev, RT5592)) {
rt2800_bbp_write(rt2x00dev, 195, 141);
rt2800_bbp_write(rt2x00dev, 196, conf_is_ht40(conf) ? 0x10 : 0x1a);
@@ -2790,6 +3408,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
int i;
/*
+ * First check if temperature compensation is supported.
+ */
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
+ return 0;
+
+ /*
* Read TSSI boundaries for temperature compensation from
* the EEPROM.
*
@@ -2798,62 +3423,62 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
* Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00
*/
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG1_MINUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG2_MINUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG3_PLUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG4_PLUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_PLUS4);
step = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_BG5_AGC_STEP);
} else {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1, &eeprom);
tssi_bounds[0] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS4);
tssi_bounds[1] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A1_MINUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2, &eeprom);
tssi_bounds[2] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS2);
tssi_bounds[3] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A2_MINUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3, &eeprom);
tssi_bounds[4] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_REF);
tssi_bounds[5] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A3_PLUS1);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4, &eeprom);
tssi_bounds[6] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS2);
tssi_bounds[7] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A4_PLUS3);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5, &eeprom);
tssi_bounds[8] = rt2x00_get_field16(eeprom,
EEPROM_TSSI_BOUND_A5_PLUS4);
@@ -2899,7 +3524,7 @@ static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev,
u8 comp_type;
int comp_value = 0;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA, &eeprom);
/*
* HT40 compensation not required.
@@ -2966,6 +3591,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
u8 eirp_txpower_criterion;
u8 reg_limit;
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return min_t(u8, txpower, 0xc);
+
if (test_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags)) {
/*
* Check if eirp txpower exceed txpower_limit.
@@ -2974,12 +3602,12 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
* .11b data rate need add additional 4dbm
* when calculating eirp txpower.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + 1,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ 1, &eeprom);
criterion = rt2x00_get_field16(eeprom,
EEPROM_TXPOWER_BYRATE_RATE0);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER,
&eeprom);
if (band == IEEE80211_BAND_2GHZ)
@@ -3001,6 +3629,412 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
return min_t(u8, txpower, 0xc);
}
+
+enum {
+ TX_PWR_CFG_0_IDX,
+ TX_PWR_CFG_1_IDX,
+ TX_PWR_CFG_2_IDX,
+ TX_PWR_CFG_3_IDX,
+ TX_PWR_CFG_4_IDX,
+ TX_PWR_CFG_5_IDX,
+ TX_PWR_CFG_6_IDX,
+ TX_PWR_CFG_7_IDX,
+ TX_PWR_CFG_8_IDX,
+ TX_PWR_CFG_9_IDX,
+ TX_PWR_CFG_0_EXT_IDX,
+ TX_PWR_CFG_1_EXT_IDX,
+ TX_PWR_CFG_2_EXT_IDX,
+ TX_PWR_CFG_3_EXT_IDX,
+ TX_PWR_CFG_4_EXT_IDX,
+ TX_PWR_CFG_IDX_COUNT,
+};
+
+static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
+{
+ u8 txpower;
+ u16 eeprom;
+ u32 regs[TX_PWR_CFG_IDX_COUNT];
+ unsigned int offset;
+ enum ieee80211_band band = chan->band;
+ int delta;
+ int i;
+
+ memset(regs, '\0', sizeof(regs));
+
+ /* TODO: adapt TX power reduction from the rt28xx code */
+
+ /* calculate temperature compensation delta */
+ delta = rt2800_get_gain_calibration_delta(rt2x00dev);
+
+ if (band == IEEE80211_BAND_5GHZ)
+ offset = 16;
+ else
+ offset = 0;
+
+ if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
+ offset += 8;
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset, &eeprom);
+
+ /* CCK 1MBS,2MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK1_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK1_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_CCK1_CH2, txpower);
+
+ /* CCK 5.5MBS,11MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK5_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_CCK5_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_CCK5_CH2, txpower);
+
+ /* OFDM 6MBS,9MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM6_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM6_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower);
+
+ /* OFDM 12MBS,18MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM12_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX],
+ TX_PWR_CFG_0_OFDM12_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX],
+ TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 1, &eeprom);
+
+ /* OFDM 24MBS,36MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM24_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM24_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower);
+
+ /* OFDM 48MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM48_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_OFDM48_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower);
+
+ /* OFDM 54MBS */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_OFDM54_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 2, &eeprom);
+
+ /* MCS 0,1 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS0_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS0_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_MCS0_CH2, txpower);
+
+ /* MCS 2,3 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS2_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX],
+ TX_PWR_CFG_1_MCS2_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX],
+ TX_PWR_CFG_1_EXT_MCS2_CH2, txpower);
+
+ /* MCS 4,5 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS4_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS4_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS4_CH2, txpower);
+
+ /* MCS 6 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS6_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS6_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS6_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 3, &eeprom);
+
+ /* MCS 7 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX],
+ TX_PWR_CFG_7_MCS7_CH2, txpower);
+
+ /* MCS 8,9 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS8_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS8_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS8_CH2, txpower);
+
+ /* MCS 10,11 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS10_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX],
+ TX_PWR_CFG_2_MCS10_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX],
+ TX_PWR_CFG_2_EXT_MCS10_CH2, txpower);
+
+ /* MCS 12,13 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS12_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS12_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_MCS12_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 4, &eeprom);
+
+ /* MCS 14 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS14_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_MCS14_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_MCS14_CH2, txpower);
+
+ /* MCS 15 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS15_CH2, txpower);
+
+ /* MCS 16,17 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS16_CH2, txpower);
+
+ /* MCS 18,19 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX],
+ TX_PWR_CFG_5_MCS18_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 5, &eeprom);
+
+ /* MCS 20,21 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS20_CH2, txpower);
+
+ /* MCS 22 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX],
+ TX_PWR_CFG_6_MCS22_CH2, txpower);
+
+ /* MCS 23 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX],
+ TX_PWR_CFG_8_MCS23_CH2, txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 6, &eeprom);
+
+ /* STBC, MCS 0,1 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC0_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC0_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_STBC0_CH2, txpower);
+
+ /* STBC, MCS 2,3 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC2_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX],
+ TX_PWR_CFG_3_STBC2_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX],
+ TX_PWR_CFG_3_EXT_STBC2_CH2, txpower);
+
+ /* STBC, MCS 4,5 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0,
+ txpower);
+
+ /* STBC, MCS 6 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2,
+ txpower);
+
+ /* read the next four txpower values */
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ offset + 7, &eeprom);
+
+ /* STBC, MCS 7 */
+ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0);
+ txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level,
+ txpower, delta);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH0, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH1, txpower);
+ rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX],
+ TX_PWR_CFG_9_STBC7_CH2, txpower);
+
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]);
+
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT,
+ regs[TX_PWR_CFG_0_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT,
+ regs[TX_PWR_CFG_1_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT,
+ regs[TX_PWR_CFG_2_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT,
+ regs[TX_PWR_CFG_3_EXT_IDX]);
+ rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT,
+ regs[TX_PWR_CFG_4_EXT_IDX]);
+
+ for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++)
+ rt2x00_dbg(rt2x00dev,
+ "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n",
+ (band == IEEE80211_BAND_5GHZ) ? '5' : '2',
+ (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ?
+ '4' : '2',
+ (i > TX_PWR_CFG_9_IDX) ?
+ (i - TX_PWR_CFG_9_IDX - 1) : i,
+ (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "",
+ (unsigned long) regs[i]);
+}
+
/*
* We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and
* BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values,
@@ -3010,9 +4044,9 @@ static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b,
* EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to
* current conditions (i.e. band, bandwidth, temperature, user settings).
*/
-static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
- struct ieee80211_channel *chan,
- int power_level)
+static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
{
u8 txpower, r1;
u16 eeprom;
@@ -3080,8 +4114,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2800_register_read(rt2x00dev, offset, &reg);
/* read the next four txpower values */
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ i, &eeprom);
is_rate_b = i ? 0 : 1;
/*
@@ -3129,8 +4163,8 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower);
/* read the next four txpower values */
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXPOWER_BYRATE + i + 1,
- &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE,
+ i + 1, &eeprom);
is_rate_b = 0;
/*
@@ -3184,6 +4218,16 @@ static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
}
}
+static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
+ struct ieee80211_channel *chan,
+ int power_level)
+{
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level);
+ else
+ rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level);
+}
+
void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev)
{
rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan,
@@ -3219,6 +4263,7 @@ void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
break;
+ case RF3053:
case RF3290:
case RF5360:
case RF5370:
@@ -3442,17 +4487,25 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
return ret;
rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
- rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0,
+ rt2800_get_beacon_offset(rt2x00dev, 0));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1,
+ rt2800_get_beacon_offset(rt2x00dev, 1));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2,
+ rt2800_get_beacon_offset(rt2x00dev, 2));
+ rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3,
+ rt2800_get_beacon_offset(rt2x00dev, 3));
rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
- rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4,
+ rt2800_get_beacon_offset(rt2x00dev, 4));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5,
+ rt2800_get_beacon_offset(rt2x00dev, 5));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6,
+ rt2800_get_beacon_offset(rt2x00dev, 6));
+ rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7,
+ rt2800_get_beacon_offset(rt2x00dev, 7));
rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
@@ -3528,7 +4581,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2800_register_write(rt2x00dev, TX_SW_CFG2,
0x0000002c);
@@ -3559,6 +4613,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
} else if (rt2x00_rt(rt2x00dev, RT3572)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
+ } else if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402);
+ rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
+ if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
+ if (rt2x00_get_field16(eeprom,
+ EEPROM_NIC_CONF1_DAC_TEST))
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000001f);
+ else
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x0000000f);
+ } else {
+ rt2800_register_write(rt2x00dev, TX_SW_CFG2,
+ 0x00000000);
+ }
} else if (rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392) ||
rt2x00_rt(rt2x00dev, RT5592)) {
@@ -3786,14 +4857,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
/*
* Clear all beacons
*/
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE0);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE1);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE2);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE3);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE4);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE5);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE6);
- rt2800_clear_beacon_register(rt2x00dev, HW_BEACON_BASE7);
+ for (i = 0; i < 8; i++)
+ rt2800_clear_beacon_register(rt2x00dev, i);
if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, US_CYC_CNT, &reg);
@@ -3989,7 +5054,7 @@ static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev)
u8 value;
rt2800_bbp_read(rt2x00dev, 138, &value);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
value |= 0x20;
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
@@ -4332,6 +5397,22 @@ static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev)
rt2800_disable_unused_dac_adc(rt2x00dev);
}
+static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev)
+{
+ rt2800_init_bbp_early(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 79, 0x13);
+ rt2800_bbp_write(rt2x00dev, 80, 0x05);
+ rt2800_bbp_write(rt2x00dev, 81, 0x33);
+ rt2800_bbp_write(rt2x00dev, 137, 0x0f);
+
+ rt2800_bbp_write(rt2x00dev, 84, 0x19);
+
+ /* Enable DC filter */
+ if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E))
+ rt2800_bbp_write(rt2x00dev, 103, 0xc0);
+}
+
static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
{
int ant, div_mode;
@@ -4402,7 +5483,7 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_disable_unused_dac_adc(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom,
EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
@@ -4488,7 +5569,7 @@ static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev)
rt2800_bbp4_mac_if_ctrl(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY);
ant = (div_mode == 3) ? 1 : 0;
rt2800_bbp_read(rt2x00dev, 152, &value);
@@ -4547,6 +5628,9 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
case RT3572:
rt2800_init_bbp_3572(rt2x00dev);
break;
+ case RT3593:
+ rt2800_init_bbp_3593(rt2x00dev);
+ return;
case RT5390:
case RT5392:
rt2800_init_bbp_53xx(rt2x00dev);
@@ -4557,7 +5641,8 @@ static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
}
for (i = 0; i < EEPROM_BBP_SIZE; i++) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
+ rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i,
+ &eeprom);
if (eeprom != 0xffff && eeprom != 0x0000) {
reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
@@ -4728,7 +5813,7 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3090)) {
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, &bbp);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4771,6 +5856,42 @@ static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev)
}
}
+static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u8 rfcsr;
+ u8 tx_gain;
+
+ rt2800_rfcsr_read(rt2x00dev, 50, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 50, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 51, &rfcsr);
+ tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g,
+ RFCSR17_TXMIXER_GAIN);
+ rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain);
+ rt2800_rfcsr_write(rt2x00dev, 51, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 38, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 38, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 39, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0);
+ rt2800_rfcsr_write(rt2x00dev, 39, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
+ rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1);
+ rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
+
+ rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2);
+ rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
+
+ /* TODO: enable stream mode */
+}
+
static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
{
u8 reg;
@@ -4778,7 +5899,7 @@ static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev)
/* Turn off unused DAC1 and ADC1 to reduce power consumption */
rt2800_bbp_read(rt2x00dev, 138, &reg);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
@@ -4884,7 +6005,8 @@ static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1,
+ &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
else
@@ -5152,6 +6274,136 @@ static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev)
rt2800_normal_mode_setup_3xxx(rt2x00dev);
}
+static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev)
+{
+ u8 bbp;
+ bool txbf_enabled = false; /* FIXME */
+
+ rt2800_bbp_read(rt2x00dev, 105, &bbp);
+ if (rt2x00dev->default_ant.rx_chain_num == 1)
+ rt2x00_set_field8(&bbp, BBP105_MLD, 0);
+ else
+ rt2x00_set_field8(&bbp, BBP105_MLD, 1);
+ rt2800_bbp_write(rt2x00dev, 105, bbp);
+
+ rt2800_bbp4_mac_if_ctrl(rt2x00dev);
+
+ rt2800_bbp_write(rt2x00dev, 92, 0x02);
+ rt2800_bbp_write(rt2x00dev, 82, 0x82);
+ rt2800_bbp_write(rt2x00dev, 106, 0x05);
+ rt2800_bbp_write(rt2x00dev, 104, 0x92);
+ rt2800_bbp_write(rt2x00dev, 88, 0x90);
+ rt2800_bbp_write(rt2x00dev, 148, 0xc8);
+ rt2800_bbp_write(rt2x00dev, 47, 0x48);
+ rt2800_bbp_write(rt2x00dev, 120, 0x50);
+
+ if (txbf_enabled)
+ rt2800_bbp_write(rt2x00dev, 163, 0xbd);
+ else
+ rt2800_bbp_write(rt2x00dev, 163, 0x9d);
+
+ /* SNR mapping */
+ rt2800_bbp_write(rt2x00dev, 142, 6);
+ rt2800_bbp_write(rt2x00dev, 143, 160);
+ rt2800_bbp_write(rt2x00dev, 142, 7);
+ rt2800_bbp_write(rt2x00dev, 143, 161);
+ rt2800_bbp_write(rt2x00dev, 142, 8);
+ rt2800_bbp_write(rt2x00dev, 143, 162);
+
+ /* ADC/DAC control */
+ rt2800_bbp_write(rt2x00dev, 31, 0x08);
+
+ /* RX AGC energy lower bound in log2 */
+ rt2800_bbp_write(rt2x00dev, 68, 0x0b);
+
+ /* FIXME: BBP 105 owerwrite? */
+ rt2800_bbp_write(rt2x00dev, 105, 0x04);
+
+}
+
+static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev)
+{
+ struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
+ u32 reg;
+ u8 rfcsr;
+
+ /* Disable GPIO #4 and #7 function for LAN PE control */
+ rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_4, 0);
+ rt2x00_set_field32(&reg, GPIO_SWITCH_7, 0);
+ rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
+
+ /* Initialize default register values */
+ rt2800_rfcsr_write(rt2x00dev, 1, 0x03);
+ rt2800_rfcsr_write(rt2x00dev, 3, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 5, 0x00);
+ rt2800_rfcsr_write(rt2x00dev, 6, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 8, 0xf1);
+ rt2800_rfcsr_write(rt2x00dev, 9, 0x02);
+ rt2800_rfcsr_write(rt2x00dev, 10, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 11, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 12, 0x4e);
+ rt2800_rfcsr_write(rt2x00dev, 13, 0x12);
+ rt2800_rfcsr_write(rt2x00dev, 18, 0x40);
+ rt2800_rfcsr_write(rt2x00dev, 22, 0x20);
+ rt2800_rfcsr_write(rt2x00dev, 30, 0x10);
+ rt2800_rfcsr_write(rt2x00dev, 31, 0x80);
+ rt2800_rfcsr_write(rt2x00dev, 32, 0x78);
+ rt2800_rfcsr_write(rt2x00dev, 33, 0x3b);
+ rt2800_rfcsr_write(rt2x00dev, 34, 0x3c);
+ rt2800_rfcsr_write(rt2x00dev, 35, 0xe0);
+ rt2800_rfcsr_write(rt2x00dev, 38, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 39, 0x23);
+ rt2800_rfcsr_write(rt2x00dev, 44, 0xd3);
+ rt2800_rfcsr_write(rt2x00dev, 45, 0xbb);
+ rt2800_rfcsr_write(rt2x00dev, 46, 0x60);
+ rt2800_rfcsr_write(rt2x00dev, 49, 0x8e);
+ rt2800_rfcsr_write(rt2x00dev, 50, 0x86);
+ rt2800_rfcsr_write(rt2x00dev, 51, 0x75);
+ rt2800_rfcsr_write(rt2x00dev, 52, 0x45);
+ rt2800_rfcsr_write(rt2x00dev, 53, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 54, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 55, 0x18);
+ rt2800_rfcsr_write(rt2x00dev, 56, 0xdb);
+ rt2800_rfcsr_write(rt2x00dev, 57, 0x6e);
+
+ /* Initiate calibration */
+ /* TODO: use rt2800_rf_init_calibration ? */
+ rt2800_rfcsr_read(rt2x00dev, 2, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1);
+ rt2800_rfcsr_write(rt2x00dev, 2, rfcsr);
+
+ rt2800_adjust_freq_offset(rt2x00dev);
+
+ rt2800_rfcsr_read(rt2x00dev, 18, &rfcsr);
+ rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1);
+ rt2800_rfcsr_write(rt2x00dev, 18, rfcsr);
+
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
+ rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+ usleep_range(1000, 1500);
+ rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
+ rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
+ rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
+
+ /* Set initial values for RX filter calibration */
+ drv_data->calibration_bw20 = 0x1f;
+ drv_data->calibration_bw40 = 0x2f;
+
+ /* Save BBP 25 & 26 values for later use in channel switching */
+ rt2800_bbp_read(rt2x00dev, 25, &drv_data->bbp25);
+ rt2800_bbp_read(rt2x00dev, 26, &drv_data->bbp26);
+
+ rt2800_led_open_drain_enable(rt2x00dev);
+ rt2800_normal_mode_setup_3593(rt2x00dev);
+
+ rt3593_post_bbp_init(rt2x00dev);
+
+ /* TODO: enable stream mode support */
+}
+
static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev)
{
rt2800_rf_init_calibration(rt2x00dev, 2);
@@ -5380,6 +6632,9 @@ static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
case RT3572:
rt2800_init_rfcsr_3572(rt2x00dev);
break;
+ case RT3593:
+ rt2800_init_rfcsr_3593(rt2x00dev);
+ break;
case RT5390:
rt2800_init_rfcsr_5390(rt2x00dev);
break;
@@ -5456,15 +6711,15 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize LED control
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
word & 0xff, (word >> 8) & 0xff);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
word & 0xff, (word >> 8) & 0xff);
@@ -5560,6 +6815,34 @@ int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
}
EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse);
+static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev)
+{
+ u16 word;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return 0;
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
+ if ((word & 0x00ff) != 0x00ff)
+ return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
+
+ return 0;
+}
+
+static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev)
+{
+ u16 word;
+
+ if (rt2x00_rt(rt2x00dev, RT3593))
+ return 0;
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
+ if ((word & 0x00ff) != 0x00ff)
+ return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
+
+ return 0;
+}
+
static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
{
struct rt2800_drv_data *drv_data = rt2x00dev->drv_data;
@@ -5578,18 +6861,18 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Start validation of the data that has been read.
*/
- mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
+ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
if (!is_valid_ether_addr(mac)) {
eth_random_addr(mac);
rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", mac);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word);
} else if (rt2x00_rt(rt2x00dev, RT2860) ||
rt2x00_rt(rt2x00dev, RT2872)) {
@@ -5598,10 +6881,10 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
*/
if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
if (word == 0xffff) {
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
@@ -5618,24 +6901,24 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word);
}
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
if ((word & 0x00ff) == 0x00ff) {
rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word);
}
if ((word & 0xff00) == 0xff00) {
rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
LED_MODE_TXRX_ACTIVITY);
rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word);
}
@@ -5644,56 +6927,61 @@ static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
* lna0 as correct value. Note that EEPROM_LNA
* is never validated.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &word);
- if ((word & 0x00ff) != 0x00ff) {
- drv_data->txmixer_gain_24g =
- rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL);
- } else {
- drv_data->txmixer_gain_24g = 0;
- }
+ drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
- if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
- rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
- rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
- default_lna_gain);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
-
- rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A, &word);
- if ((word & 0x00ff) != 0x00ff) {
- drv_data->txmixer_gain_5g =
- rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL);
- } else {
- drv_data->txmixer_gain_5g = 0;
+ if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
+ default_lna_gain);
}
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
+ drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev);
+
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
- if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
- rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
- rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
- default_lna_gain);
- rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+ if (!rt2x00_rt(rt2x00dev, RT3593)) {
+ if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
+ default_lna_gain);
+ }
+ rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
+
+ if (rt2x00_rt(rt2x00dev, RT3593)) {
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2, &word);
+ if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+ default_lna_gain);
+ if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 ||
+ rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff)
+ rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1,
+ default_lna_gain);
+ rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word);
+ }
return 0;
}
@@ -5707,7 +6995,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read EEPROM word for configuration.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Identify RF chipset by EEPROM value
@@ -5717,7 +7005,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT3290) ||
rt2x00_rt(rt2x00dev, RT5390) ||
rt2x00_rt(rt2x00dev, RT5392))
- rt2x00_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID, &rf);
else
rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
@@ -5731,6 +7019,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
case RF3021:
case RF3022:
case RF3052:
+ case RF3053:
case RF3290:
case RF3320:
case RF3322:
@@ -5757,7 +7046,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00dev->default_ant.rx_chain_num =
rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
if (rt2x00_rt(rt2x00dev, RT3070) ||
rt2x00_rt(rt2x00dev, RT3090) ||
@@ -5810,7 +7099,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read frequency offset and RF programming sequence.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
/*
@@ -5827,7 +7116,7 @@ static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Check if support EIRP tx power limit feature.
*/
- rt2x00_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER, &eeprom);
if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) <
EIRP_MAX_TX_POWER_LIMIT)
@@ -6109,12 +7398,79 @@ static const struct rf_channel rf_vals_5592_xtal40[] = {
{196, 83, 0, 12, 1},
};
+static const struct rf_channel rf_vals_3053[] = {
+ /* Channel, N, R, K */
+ {1, 241, 2, 2},
+ {2, 241, 2, 7},
+ {3, 242, 2, 2},
+ {4, 242, 2, 7},
+ {5, 243, 2, 2},
+ {6, 243, 2, 7},
+ {7, 244, 2, 2},
+ {8, 244, 2, 7},
+ {9, 245, 2, 2},
+ {10, 245, 2, 7},
+ {11, 246, 2, 2},
+ {12, 246, 2, 7},
+ {13, 247, 2, 2},
+ {14, 248, 2, 4},
+
+ {36, 0x56, 0, 4},
+ {38, 0x56, 0, 6},
+ {40, 0x56, 0, 8},
+ {44, 0x57, 0, 0},
+ {46, 0x57, 0, 2},
+ {48, 0x57, 0, 4},
+ {52, 0x57, 0, 8},
+ {54, 0x57, 0, 10},
+ {56, 0x58, 0, 0},
+ {60, 0x58, 0, 4},
+ {62, 0x58, 0, 6},
+ {64, 0x58, 0, 8},
+
+ {100, 0x5B, 0, 8},
+ {102, 0x5B, 0, 10},
+ {104, 0x5C, 0, 0},
+ {108, 0x5C, 0, 4},
+ {110, 0x5C, 0, 6},
+ {112, 0x5C, 0, 8},
+
+ /* NOTE: Channel 114 has been removed intentionally.
+ * The EEPROM contains no TX power values for that,
+ * and it is disabled in the vendor driver as well.
+ */
+
+ {116, 0x5D, 0, 0},
+ {118, 0x5D, 0, 2},
+ {120, 0x5D, 0, 4},
+ {124, 0x5D, 0, 8},
+ {126, 0x5D, 0, 10},
+ {128, 0x5E, 0, 0},
+ {132, 0x5E, 0, 4},
+ {134, 0x5E, 0, 6},
+ {136, 0x5E, 0, 8},
+ {140, 0x5F, 0, 0},
+
+ {149, 0x5F, 0, 9},
+ {151, 0x5F, 0, 11},
+ {153, 0x60, 0, 1},
+ {157, 0x60, 0, 5},
+ {159, 0x60, 0, 7},
+ {161, 0x60, 0, 9},
+ {165, 0x61, 0, 1},
+ {167, 0x61, 0, 3},
+ {169, 0x61, 0, 5},
+ {171, 0x61, 0, 7},
+ {173, 0x61, 0, 9},
+};
+
static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *default_power1;
char *default_power2;
+ char *default_power3;
unsigned int i;
u16 eeprom;
u32 reg;
@@ -6133,7 +7489,8 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS |
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
/*
* Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices
@@ -6148,7 +7505,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
- rt2x00_eeprom_addr(rt2x00dev,
+ rt2800_eeprom_addr(rt2x00dev,
EEPROM_MAC_ADDR_0));
/*
@@ -6164,7 +7521,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
rt2x00dev->hw->max_report_rates = 7;
rt2x00dev->hw->max_rate_tries = 1;
- rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
+ rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
/*
* Initialize hw_mode information.
@@ -6199,6 +7556,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_3x);
spec->channels = rf_vals_3x;
+ } else if (rt2x00_rf(rt2x00dev, RF3053)) {
+ spec->supported_bands |= SUPPORT_BAND_5GHZ;
+ spec->num_channels = ARRAY_SIZE(rf_vals_3053);
+ spec->channels = rf_vals_3053;
} else if (rt2x00_rf(rt2x00dev, RF5592)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
@@ -6264,21 +7625,40 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels_info = info;
- default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
- default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+ default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
+ default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
+
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ default_power3 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_EXT_TXPOWER_BG3);
+ else
+ default_power3 = NULL;
for (i = 0; i < 14; i++) {
info[i].default_power1 = default_power1[i];
info[i].default_power2 = default_power2[i];
+ if (default_power3)
+ info[i].default_power3 = default_power3[i];
}
if (spec->num_channels > 14) {
- default_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
- default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
+ default_power1 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_TXPOWER_A1);
+ default_power2 = rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_TXPOWER_A2);
+
+ if (rt2x00dev->default_ant.tx_chain_num > 2)
+ default_power3 =
+ rt2800_eeprom_addr(rt2x00dev,
+ EEPROM_EXT_TXPOWER_A3);
+ else
+ default_power3 = NULL;
for (i = 14; i < spec->num_channels; i++) {
info[i].default_power1 = default_power1[i - 14];
info[i].default_power2 = default_power2[i - 14];
+ if (default_power3)
+ info[i].default_power3 = default_power3[i - 14];
}
}
@@ -6289,6 +7669,7 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
case RF3022:
case RF3320:
case RF3052:
+ case RF3053:
case RF3290:
case RF5360:
case RF5370:
@@ -6327,6 +7708,7 @@ static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev)
case RT3352:
case RT3390:
case RT3572:
+ case RT3593:
case RT5390:
case RT5392:
case RT5592:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 6ec739466db..a94ba447e63 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -226,4 +226,8 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey);
void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev);
+void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev,
+ unsigned short *txwi_size,
+ unsigned short *rxwi_size);
+
#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 00055627eb8..f8f2abbfbb6 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -507,9 +507,13 @@ static int rt2800pci_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
if (rt2x00_is_pcie(rt2x00dev) &&
- (rt2x00_rt(rt2x00dev, RT3572) ||
+ (rt2x00_rt(rt2x00dev, RT3090) ||
+ rt2x00_rt(rt2x00dev, RT3390) ||
+ rt2x00_rt(rt2x00dev, RT3572) ||
+ rt2x00_rt(rt2x00dev, RT3593) ||
rt2x00_rt(rt2x00dev, RT5390) ||
- rt2x00_rt(rt2x00dev, RT5392))) {
+ rt2x00_rt(rt2x00dev, RT5392) ||
+ rt2x00_rt(rt2x00dev, RT5592))) {
rt2x00mmio_register_read(rt2x00dev, AUX_CTRL, &reg);
rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1);
rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1);
@@ -1189,12 +1193,17 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
static void rt2800pci_queue_init(struct data_queue *queue)
{
+ struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
+ unsigned short txwi_size, rxwi_size;
+
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
+
switch (queue->qid) {
case QID_RX:
queue->limit = 128;
queue->data_size = AGGREGATION_SIZE;
queue->desc_size = RXD_DESC_SIZE;
- queue->winfo_size = RXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = rxwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
@@ -1205,7 +1214,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
queue->limit = 64;
queue->data_size = AGGREGATION_SIZE;
queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = txwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
@@ -1213,7 +1222,7 @@ static void rt2800pci_queue_init(struct data_queue *queue)
queue->limit = 8;
queue->data_size = 0; /* No DMA required for beacons */
queue->desc_size = TXD_DESC_SIZE;
- queue->winfo_size = TXWI_DESC_SIZE_4WORDS;
+ queue->winfo_size = txwi_size;
queue->priv_size = sizeof(struct queue_entry_priv_mmio);
break;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 840833b26bf..96961b9a395 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -854,13 +854,7 @@ static void rt2800usb_queue_init(struct data_queue *queue)
struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
unsigned short txwi_size, rxwi_size;
- if (rt2x00_rt(rt2x00dev, RT5592)) {
- txwi_size = TXWI_DESC_SIZE_5WORDS;
- rxwi_size = RXWI_DESC_SIZE_6WORDS;
- } else {
- txwi_size = TXWI_DESC_SIZE_4WORDS;
- rxwi_size = RXWI_DESC_SIZE_4WORDS;
- }
+ rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size);
switch (queue->qid) {
case QID_RX:
@@ -977,6 +971,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
{ USB_DEVICE(0x0411, 0x01ee) },
+ { USB_DEVICE(0x0411, 0x01a8) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
@@ -1194,6 +1189,40 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Zinwell */
{ USB_DEVICE(0x5a57, 0x0284) },
#endif
+#ifdef CONFIG_RT2800USB_RT3573
+ /* AirLive */
+ { USB_DEVICE(0x1b75, 0x7733) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x17bc) },
+ { USB_DEVICE(0x0b05, 0x17ad) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x1103) },
+ /* Cameo */
+ { USB_DEVICE(0x148f, 0xf301) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x7733) },
+ /* Hawking */
+ { USB_DEVICE(0x0e66, 0x0020) },
+ { USB_DEVICE(0x0e66, 0x0021) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x094e) },
+ /* Linksys */
+ { USB_DEVICE(0x13b1, 0x003b) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x016b) },
+ /* NETGEAR */
+ { USB_DEVICE(0x0846, 0x9012) },
+ { USB_DEVICE(0x0846, 0x9019) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xed19) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3573) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0067) },
+ { USB_DEVICE(0x0df6, 0x006a) },
+ /* ZyXEL */
+ { USB_DEVICE(0x0586, 0x3421) },
+#endif
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ee3fc570b11..fe4c572db52 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -211,6 +211,7 @@ struct channel_info {
short max_power;
short default_power1;
short default_power2;
+ short default_power3;
};
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index b16521e6bf4..712eea9d398 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -566,10 +566,10 @@ static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev,
#undef TID_CHECK
- if (compare_ether_addr(ba->ra, entry->ta))
+ if (!ether_addr_equal(ba->ra, entry->ta))
continue;
- if (compare_ether_addr(ba->ta, entry->ra))
+ if (!ether_addr_equal(ba->ta, entry->ra))
continue;
/* Mark BAR since we received the according BA */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 6c0a91ff963..6c8a33b6ee2 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -936,13 +936,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index)
spin_unlock_irqrestore(&queue->index_lock, irqflags);
}
-void rt2x00queue_pause_queue(struct data_queue *queue)
+static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue)
{
- if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
- !test_bit(QUEUE_STARTED, &queue->flags) ||
- test_and_set_bit(QUEUE_PAUSED, &queue->flags))
- return;
-
switch (queue->qid) {
case QID_AC_VO:
case QID_AC_VI:
@@ -958,6 +953,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue)
break;
}
}
+void rt2x00queue_pause_queue(struct data_queue *queue)
+{
+ if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
+ !test_bit(QUEUE_STARTED, &queue->flags) ||
+ test_and_set_bit(QUEUE_PAUSED, &queue->flags))
+ return;
+
+ rt2x00queue_pause_queue_nocheck(queue);
+}
EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
void rt2x00queue_unpause_queue(struct data_queue *queue)
@@ -1019,7 +1023,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue)
return;
}
- rt2x00queue_pause_queue(queue);
+ rt2x00queue_pause_queue_nocheck(queue);
queue->rt2x00dev->ops->lib->stop_queue(queue);
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 91a04e2b8ec..fc207b268e4 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -3,10 +3,10 @@
* Linux device driver for RTL8180 / RTL8185
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
@@ -32,7 +32,7 @@
#include "grf5101.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 077ff92cc13..dc845693f32 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -2,7 +2,7 @@
/*
* Radio tuning for GCT GRF5101 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
index 76647111bcf..4d80a278512 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for GCT GRF5101 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 4715000c94d..a63c443c3c6 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -1,7 +1,7 @@
/*
* Radio tuning for Maxim max2820 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
index 61cf6d1e7d5..8e982b72b69 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for Maxim max2820 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index cc2a5412c1f..ee638d0749d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -3,10 +3,10 @@
* Radio tuning for RTL8225 on RTL8180
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8180 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Thanks to Realtek for their support!
*
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index b3ec40f6bd2..7614d9ccc72 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -2,7 +2,7 @@
/*
* Radio tuning for Philips SA2400 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
index a4aaa0d413f..fb0093f3514 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
@@ -4,7 +4,7 @@
/*
* Radio tuning for Philips SA2400 on RTL8180
*
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Code from the BSD driver and the rtl8181 project have been
* very useful to understand certain things
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index f49220e234b..841fb9dfc9d 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -2,10 +2,10 @@
* Linux device driver for RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* The driver was extended to the RTL8187B in 2008 by:
* Herton Ronaldo Krzesinski <herton@mandriva.com.br>
@@ -37,7 +37,7 @@
#include "rfkill.h"
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_AUTHOR("Herton Ronaldo Krzesinski <herton@mandriva.com.br>");
MODULE_AUTHOR("Hin-Tak Leung <htl10@users.sourceforge.net>");
MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index e19a20a8e95..56aee067f32 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -2,10 +2,10 @@
* Definitions for RTL8187 hardware
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index f0bf35fedba..a26193a0444 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -2,10 +2,10 @@
* Radio tuning for RTL8225 on RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* Magic delays, register offsets, and phy value tables below are
* taken from the original r8187 driver sources. Thanks to Realtek
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
index 20c5b6ead0f..141afb09a5b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
@@ -2,10 +2,10 @@
* Radio tuning definitions for RTL8225 on RTL8187
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index 1615f63b02f..ce23dfd4238 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -2,10 +2,10 @@
* Definitions for RTL818x hardware
*
* Copyright 2007 Michael Wu <flamingice@sourmilk.net>
- * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright 2007 Andrea Merello <andrea.merello@gmail.com>
*
* Based on the r8187 driver, which is:
- * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2005 Andrea Merello <andrea.merello@gmail.com>, et al.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
index 7253de3d8c6..c2ffce7a907 100644
--- a/drivers/net/wireless/rtlwifi/Kconfig
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -1,27 +1,20 @@
-config RTLWIFI
- tristate "Realtek wireless card support"
- depends on MAC80211
- select FW_LOADER
- ---help---
- This is common code for RTL8192CE/RTL8192CU/RTL8192SE/RTL8723AE
- drivers. This module does nothing by itself - the various front-end
- drivers need to be enabled to support any desired devices.
-
- If you choose to build as a module, it'll be called rtlwifi.
-
-config RTLWIFI_DEBUG
- bool "Debugging output for rtlwifi driver family"
- depends on RTLWIFI
+menuconfig RTL_CARDS
+ tristate "Realtek rtlwifi family of devices"
+ depends on MAC80211 && (PCI || USB)
default y
---help---
- To use the module option that sets the dynamic-debugging level for,
- the front-end driver, this parameter must be "Y". For memory-limited
- systems, choose "N". If in doubt, choose "Y".
+ This option will enable support for the Realtek mac80211-based
+ wireless drivers. Drivers rtl8192ce, rtl8192cu, rtl8192se, rtl8192de,
+ rtl8723eu, and rtl8188eu share some common code.
+
+if RTL_CARDS
config RTL8192CE
tristate "Realtek RTL8192CE/RTL8188CE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
select RTL8192C_COMMON
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
wireless network adapters.
@@ -30,7 +23,9 @@ config RTL8192CE
config RTL8192SE
tristate "Realtek RTL8192SE/RTL8191SE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192SE/RTL8191SE 802.11n PCIe
wireless network adapters.
@@ -39,7 +34,9 @@ config RTL8192SE
config RTL8192DE
tristate "Realtek RTL8192DE/RTL8188DE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8192DE/RTL8188DE 802.11n PCIe
wireless network adapters.
@@ -48,7 +45,9 @@ config RTL8192DE
config RTL8723AE
tristate "Realtek RTL8723AE PCIe Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8723AE 802.11n PCIe
wireless network adapters.
@@ -57,7 +56,9 @@ config RTL8723AE
config RTL8188EE
tristate "Realtek RTL8188EE Wireless Network Adapter"
- depends on RTLWIFI && PCI
+ depends on PCI
+ select RTLWIFI
+ select RTLWIFI_PCI
---help---
This is the driver for Realtek RTL8188EE 802.11n PCIe
wireless network adapters.
@@ -66,7 +67,9 @@ config RTL8188EE
config RTL8192CU
tristate "Realtek RTL8192CU/RTL8188CU USB Wireless Network Adapter"
- depends on RTLWIFI && USB
+ depends on USB
+ select RTLWIFI
+ select RTLWIFI_USB
select RTL8192C_COMMON
---help---
This is the driver for Realtek RTL8192CU/RTL8188CU 802.11n USB
@@ -74,7 +77,28 @@ config RTL8192CU
If you choose to build it as a module, it will be called rtl8192cu
+config RTLWIFI
+ tristate
+ select FW_LOADER
+
+config RTLWIFI_PCI
+ tristate
+
+config RTLWIFI_USB
+ tristate
+
+config RTLWIFI_DEBUG
+ bool "Debugging output for rtlwifi driver family"
+ depends on RTLWIFI
+ default y
+ ---help---
+ To use the module option that sets the dynamic-debugging level for,
+ the front-end driver, this parameter must be "Y". For memory-limited
+ systems, choose "N". If in doubt, choose "Y".
+
config RTL8192C_COMMON
tristate
depends on RTL8192CE || RTL8192CU
- default m
+ default y
+
+endif
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
index ff02b874f8d..d56f023a4b9 100644
--- a/drivers/net/wireless/rtlwifi/Makefile
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -12,13 +12,11 @@ rtlwifi-objs := \
rtl8192c_common-objs += \
-ifneq ($(CONFIG_PCI),)
-rtlwifi-objs += pci.o
-endif
+obj-$(CONFIG_RTLWIFI_PCI) += rtl_pci.o
+rtl_pci-objs := pci.o
-ifneq ($(CONFIG_USB),)
-rtlwifi-objs += usb.o
-endif
+obj-$(CONFIG_RTLWIFI_USB) += rtl_usb.o
+rtl_usb-objs := usb.o
obj-$(CONFIG_RTL8192C_COMMON) += rtl8192c/
obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 9d558ac77b0..8bb4a9a01a1 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -172,6 +172,7 @@ u8 rtl_tid_to_ac(u8 tid)
{
return tid_to_ac[tid];
}
+EXPORT_SYMBOL_GPL(rtl_tid_to_ac);
static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
struct ieee80211_sta_ht_cap *ht_cap)
@@ -406,6 +407,7 @@ void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
cancel_delayed_work(&rtlpriv->works.fwevt_wq);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
void rtl_init_rfkill(struct ieee80211_hw *hw)
{
@@ -439,6 +441,7 @@ void rtl_deinit_rfkill(struct ieee80211_hw *hw)
{
wiphy_rfkill_stop_polling(hw->wiphy);
}
+EXPORT_SYMBOL_GPL(rtl_deinit_rfkill);
int rtl_init_core(struct ieee80211_hw *hw)
{
@@ -489,10 +492,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
return 0;
}
+EXPORT_SYMBOL_GPL(rtl_init_core);
void rtl_deinit_core(struct ieee80211_hw *hw)
{
}
+EXPORT_SYMBOL_GPL(rtl_deinit_core);
void rtl_init_rx_config(struct ieee80211_hw *hw)
{
@@ -501,6 +506,7 @@ void rtl_init_rx_config(struct ieee80211_hw *hw)
rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
}
+EXPORT_SYMBOL_GPL(rtl_init_rx_config);
/*********************************************************
*
@@ -879,6 +885,7 @@ bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_tx_mgmt_proc);
void rtl_get_tcb_desc(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info,
@@ -1052,6 +1059,7 @@ bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return true;
}
+EXPORT_SYMBOL_GPL(rtl_action_proc);
/*should call before software enc*/
u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
@@ -1125,6 +1133,7 @@ u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
return false;
}
+EXPORT_SYMBOL_GPL(rtl_is_special_data);
/*********************************************************
*
@@ -1295,11 +1304,12 @@ void rtl_beacon_statistic(struct ieee80211_hw *hw, struct sk_buff *skb)
return;
/* and only beacons from the associated BSSID, please */
- if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
rtlpriv->link_info.bcn_rx_inperiod++;
}
+EXPORT_SYMBOL_GPL(rtl_beacon_statistic);
void rtl_watchdog_wq_callback(void *data)
{
@@ -1793,6 +1803,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len)
mac->vendor = vendor;
}
+EXPORT_SYMBOL_GPL(rtl_recognize_peer);
/*********************************************************
*
@@ -1849,6 +1860,7 @@ struct attribute_group rtl_attribute_group = {
.name = "rtlsysfs",
.attrs = rtl_sysfs_entries,
};
+EXPORT_SYMBOL_GPL(rtl_attribute_group);
MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
@@ -1856,7 +1868,8 @@ MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
-struct rtl_global_var global_var = {};
+struct rtl_global_var rtl_global_var = {};
+EXPORT_SYMBOL_GPL(rtl_global_var);
static int __init rtl_core_module_init(void)
{
@@ -1864,8 +1877,8 @@ static int __init rtl_core_module_init(void)
pr_err("Unable to register rtl_rc, use default RC !!\n");
/* init some global vars */
- INIT_LIST_HEAD(&global_var.glb_priv_list);
- spin_lock_init(&global_var.glb_list_lock);
+ INIT_LIST_HEAD(&rtl_global_var.glb_priv_list);
+ spin_lock_init(&rtl_global_var.glb_list_lock);
return 0;
}
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 8576bc34b03..0e5fe0902da 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -147,7 +147,7 @@ void rtl_recognize_peer(struct ieee80211_hw *hw, u8 *data, unsigned int len);
u8 rtl_tid_to_ac(u8 tid);
extern struct attribute_group rtl_attribute_group;
void rtl_easy_concurrent_retrytimer_callback(unsigned long data);
-extern struct rtl_global_var global_var;
+extern struct rtl_global_var rtl_global_var;
int rtlwifi_rate_mapping(struct ieee80211_hw *hw,
bool isht, u8 desc_rate, bool first_ampdu);
bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index ee84844be00..733b7ce7f0e 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1330,3 +1330,4 @@ const struct ieee80211_ops rtl_ops = {
.rfkill_poll = rtl_op_rfkill_poll,
.flush = rtl_op_flush,
};
+EXPORT_SYMBOL_GPL(rtl_ops);
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
index 7d52d3d7769..76e2086e137 100644
--- a/drivers/net/wireless/rtlwifi/debug.c
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -51,3 +51,4 @@ void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
/*Init Debug flag enable condition */
}
+EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
index 9e3894178e7..838a1ed3f19 100644
--- a/drivers/net/wireless/rtlwifi/efuse.c
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -229,6 +229,7 @@ void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
*pbuf = (u8) (value32 & 0xff);
}
+EXPORT_SYMBOL_GPL(read_efuse_byte);
void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
{
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index c97e9d32733..703f839af6c 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -35,6 +35,13 @@
#include "efuse.h"
#include <linux/export.h>
#include <linux/kmemleak.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PCI basic driver for rtlwifi");
static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
PCI_VENDOR_ID_INTEL,
@@ -1008,19 +1015,6 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
-static void rtl_lps_change_work_callback(struct work_struct *work)
-{
- struct rtl_works *rtlworks =
- container_of(work, struct rtl_works, lps_change_work);
- struct ieee80211_hw *hw = rtlworks->hw;
- struct rtl_priv *rtlpriv = rtl_priv(hw);
-
- if (rtlpriv->enter_ps)
- rtl_lps_enter(hw);
- else
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1899,7 +1893,7 @@ int rtl_pci_probe(struct pci_dev *pdev,
rtlpriv->rtlhal.interface = INTF_PCI;
rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
rtlpriv->intf_ops = &rtl_pci_ops;
- rtlpriv->glb_var = &global_var;
+ rtlpriv->glb_var = &rtl_global_var;
/*
*init dbgp flags before all
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 884bceae38a..0d81f766fd0 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -269,6 +269,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
+EXPORT_SYMBOL_GPL(rtl_ips_nic_on);
/*for FW LPS*/
@@ -518,6 +519,7 @@ void rtl_swlps_beacon(struct ieee80211_hw *hw, void *data, unsigned int len)
"u_bufferd: %x, m_buffered: %x\n", u_buffed, m_buffed);
}
}
+EXPORT_SYMBOL_GPL(rtl_swlps_beacon);
void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
{
@@ -611,6 +613,19 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
MSECS(sleep_intv * mac->vif->bss_conf.beacon_int - 40));
}
+void rtl_lps_change_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_change_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+
+ if (rtlpriv->enter_ps)
+ rtl_lps_enter(hw);
+ else
+ rtl_lps_leave(hw);
+}
+EXPORT_SYMBOL_GPL(rtl_lps_change_work_callback);
void rtl_swlps_wq_callback(void *data)
{
@@ -673,7 +688,7 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
find_p2p_ie = true;
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE(&ie[1]);
+ noa_len = READEF2BYTE((__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -702,13 +717,13 @@ static void rtl_p2p_noa_ie(struct ieee80211_hw *hw, void *data,
READEF1BYTE(ie+index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
}
@@ -765,7 +780,7 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
RT_TRACE(rtlpriv, COMP_FW, DBG_LOUD, "action frame find P2P IE.\n");
/*to find noa ie*/
while (ie + 1 < end) {
- noa_len = READEF2BYTE(&ie[1]);
+ noa_len = READEF2BYTE((__le16 *)&ie[1]);
if (ie + 3 + ie[1] > end)
return;
@@ -794,13 +809,13 @@ static void rtl_p2p_action_ie(struct ieee80211_hw *hw, void *data,
READEF1BYTE(ie+index);
index += 1;
p2pinfo->noa_duration[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_interval[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
p2pinfo->noa_start_time[i] =
- READEF4BYTE(ie+index);
+ READEF4BYTE((__le32 *)ie+index);
index += 4;
}
@@ -908,7 +923,7 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
return;
/* and only beacons from the associated BSSID, please */
- if (compare_ether_addr(hdr->addr3, rtlpriv->mac80211.bssid))
+ if (!ether_addr_equal(hdr->addr3, rtlpriv->mac80211.bssid))
return;
/* check if this really is a beacon */
@@ -922,3 +937,4 @@ void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len)
else
rtl_p2p_noa_ie(hw, data, len - FCS_LEN);
}
+EXPORT_SYMBOL_GPL(rtl_p2p_info);
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
index 4d682b753f5..88bd76ea88f 100644
--- a/drivers/net/wireless/rtlwifi/ps.h
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -49,5 +49,6 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw);
void rtl_swlps_rf_sleep(struct ieee80211_hw *hw);
void rtl_p2p_ps_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
void rtl_p2p_info(struct ieee80211_hw *hw, void *data, unsigned int len);
+void rtl_lps_change_work_callback(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index f9f059dadb7..a98acefb8c0 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -218,6 +218,7 @@ static void rtl_tx_status(void *ppriv,
static void rtl_rate_init(void *ppriv,
struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
}
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index a8871d66d56..68685a89825 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -305,13 +305,14 @@ static void _rtl88ee_translate_rx_signal_stuff(struct ieee80211_hw *hw,
psaddr = ieee80211_get_SA(hdr);
memcpy(pstatus->psaddr, psaddr, ETH_ALEN);
- addr = (!compare_ether_addr(mac->bssid, (ufc & IEEE80211_FCTL_TODS) ?
- hdr->addr1 : (ufc & IEEE80211_FCTL_FROMDS) ?
- hdr->addr2 : hdr->addr3));
+ addr = ether_addr_equal(mac->bssid,
+ (ufc & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (ufc & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3);
match_bssid = ((IEEE80211_FTYPE_CTL != type) && (!pstatus->hwerror) &&
(!pstatus->crc) && (!pstatus->icv)) && addr;
- addr = (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ addr = ether_addr_equal(praddr, rtlefuse->dev_addr);
packet_toself = match_bssid && addr;
if (ieee80211_is_beacon(fc))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 8e3ec1e2564..0f7812e0c8a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -109,5 +109,8 @@ void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
bool rtl92cu_phy_mac_config(struct ieee80211_hw *hw);
+void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
index 262e1e4c6e5..a1310abd0d5 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.h
@@ -49,8 +49,5 @@ bool rtl92cu_phy_set_rf_power_state(struct ieee80211_hw *hw,
u32 rtl92cu_phy_query_rf_reg(struct ieee80211_hw *hw,
enum radio_path rfpath, u32 regaddr, u32 bitmask);
void rtl92cu_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
-void rtl92cu_update_hal_rate_tbl(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- u8 rssi_level);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index c72758d8f4e..bcd82a1020a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -255,16 +255,16 @@ static void _rtl8723ae_translate_rx_signal_stuff(struct ieee80211_hw *hw,
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
- packet_matchbssid = ((IEEE80211_FTYPE_CTL != type) &&
- (!compare_ether_addr(mac->bssid,
- (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ?
- hdr->addr1 : (le16_to_cpu(fc) &
- IEEE80211_FCTL_FROMDS) ?
- hdr->addr2 : hdr->addr3)) && (!pstatus->hwerror) &&
- (!pstatus->crc) && (!pstatus->icv));
-
- packet_toself = packet_matchbssid &&
- (!compare_ether_addr(praddr, rtlefuse->dev_addr));
+ packet_matchbssid =
+ ((IEEE80211_FTYPE_CTL != type) &&
+ ether_addr_equal(mac->bssid,
+ (le16_to_cpu(fc) & IEEE80211_FCTL_TODS) ? hdr->addr1 :
+ (le16_to_cpu(fc) & IEEE80211_FCTL_FROMDS) ? hdr->addr2 :
+ hdr->addr3) &&
+ (!pstatus->hwerror) && (!pstatus->crc) && (!pstatus->icv));
+
+ packet_toself = (packet_matchbssid &&
+ ether_addr_equal(praddr, rtlefuse->dev_addr));
if (ieee80211_is_beacon(fc))
packet_beacon = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index a3532e07787..e56778cac9b 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -32,6 +32,13 @@
#include "ps.h"
#include "rtl8192c/fw_common.h"
#include <linux/export.h>
+#include <linux/module.h>
+
+MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
+MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
+MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("USB basic driver for rtlwifi");
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
@@ -1070,6 +1077,8 @@ int rtl_usb_probe(struct usb_interface *intf,
spin_lock_init(&rtlpriv->locks.usb_lock);
INIT_WORK(&rtlpriv->works.fill_h2c_cmd,
rtl_fill_h2c_cmd_work_callback);
+ INIT_WORK(&rtlpriv->works.lps_change_work,
+ rtl_lps_change_work_callback);
rtlpriv->usb_data_index = 0;
init_completion(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b8db55c868c..38995f90040 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -1315,7 +1315,7 @@ static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
#ifdef CONFIG_PM
static int
-wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
+wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{
int num_fields = 0, in_field = 0, fields_size = 0;
int i, pattern_len = 0;
@@ -1458,9 +1458,9 @@ void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
* Allocates an RX filter returned through f
* which needs to be freed using rx_filter_free()
*/
-static int wl1271_convert_wowlan_pattern_to_rx_filter(
- struct cfg80211_wowlan_trig_pkt_pattern *p,
- struct wl12xx_rx_filter **f)
+static int
+wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
+ struct wl12xx_rx_filter **f)
{
int i, j, ret = 0;
struct wl12xx_rx_filter *filter;
@@ -1562,7 +1562,7 @@ static int wl1271_configure_wowlan(struct wl1271 *wl,
/* Translate WoWLAN patterns into filters */
for (i = 0; i < wow->n_patterns; i++) {
- struct cfg80211_wowlan_trig_pkt_pattern *p;
+ struct cfg80211_pkt_pattern *p;
struct wl12xx_rx_filter *filter = NULL;
p = &wow->patterns[i];
@@ -5623,7 +5623,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->max_remain_on_channel_duration = 5000;
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
- WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
diff --git a/drivers/net/wireless/ti/wlcore/testmode.c b/drivers/net/wireless/ti/wlcore/testmode.c
index f3442762d88..527590f2adf 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.c
+++ b/drivers/net/wireless/ti/wlcore/testmode.c
@@ -356,7 +356,8 @@ out:
return ret;
}
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
diff --git a/drivers/net/wireless/ti/wlcore/testmode.h b/drivers/net/wireless/ti/wlcore/testmode.h
index 8071654259e..61d8434d859 100644
--- a/drivers/net/wireless/ti/wlcore/testmode.h
+++ b/drivers/net/wireless/ti/wlcore/testmode.h
@@ -26,6 +26,7 @@
#include <net/mac80211.h>
-int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
+int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 4941f201d6c..d39c4178c33 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -75,8 +75,10 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
len = fw_entry->size;
buf = kmalloc(1024, GFP_ATOMIC);
- if (!buf)
+ if (!buf) {
+ err = -ENOMEM;
goto exit;
+ }
while (len > 0) {
int translen = (len > 1024) ? 1024 : len;
@@ -98,10 +100,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw)
goto exit;
err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4,
- USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT);
+ USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT);
if (err < 0)
goto exit;
+ memcpy(&ret, buf, sizeof(ret));
+
if (ret & 0x80) {
err = -EIO;
goto exit;
@@ -1762,8 +1766,10 @@ static int zd1201_probe(struct usb_interface *interface,
zd->endp_out2 = 2;
zd->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
zd->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!zd->rx_urb || !zd->tx_urb)
+ if (!zd->rx_urb || !zd->tx_urb) {
+ err = -ENOMEM;
goto err_zd;
+ }
mdelay(100);
err = zd1201_drvr_start(zd);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 8a4d77ee9c5..a1977430ddf 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -45,31 +45,109 @@
#include <xen/grant_table.h>
#include <xen/xenbus.h>
-struct xen_netbk;
+typedef unsigned int pending_ring_idx_t;
+#define INVALID_PENDING_RING_IDX (~0U)
+
+/* For the head field in pending_tx_info: it is used to indicate
+ * whether this tx info is the head of one or more coalesced requests.
+ *
+ * When head != INVALID_PENDING_RING_IDX, it means the start of a new
+ * tx requests queue and the end of previous queue.
+ *
+ * An example sequence of head fields (I = INVALID_PENDING_RING_IDX):
+ *
+ * ...|0 I I I|5 I|9 I I I|...
+ * -->|<-INUSE----------------
+ *
+ * After consuming the first slot(s) we have:
+ *
+ * ...|V V V V|5 I|9 I I I|...
+ * -----FREE->|<-INUSE--------
+ *
+ * where V stands for "valid pending ring index". Any number other
+ * than INVALID_PENDING_RING_IDX is OK. These entries are considered
+ * free and can contain any number other than
+ * INVALID_PENDING_RING_IDX. In practice we use 0.
+ *
+ * The in use non-INVALID_PENDING_RING_IDX (say 0, 5 and 9 in the
+ * above example) number is the index into pending_tx_info and
+ * mmap_pages arrays.
+ */
+struct pending_tx_info {
+ struct xen_netif_tx_request req; /* coalesced tx request */
+ pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
+ * if it is head of one or more tx
+ * reqs
+ */
+};
+
+#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
+#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
+
+struct xenvif_rx_meta {
+ int id;
+ int size;
+ int gso_size;
+};
+
+/* Discriminate from any valid pending_idx value. */
+#define INVALID_PENDING_IDX 0xFFFF
+
+#define MAX_BUFFER_OFFSET PAGE_SIZE
+
+#define MAX_PENDING_REQS 256
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
- /* Reference to netback processing backend. */
- struct xen_netbk *netbk;
+ /* Use NAPI for guest TX */
+ struct napi_struct napi;
+ /* When feature-split-event-channels = 0, tx_irq = rx_irq. */
+ unsigned int tx_irq;
+ /* Only used when feature-split-event-channels = 1 */
+ char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
+ struct xen_netif_tx_back_ring tx;
+ struct sk_buff_head tx_queue;
+ struct page *mmap_pages[MAX_PENDING_REQS];
+ pending_ring_idx_t pending_prod;
+ pending_ring_idx_t pending_cons;
+ u16 pending_ring[MAX_PENDING_REQS];
+ struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
+
+ /* Coalescing tx requests before copying makes number of grant
+ * copy ops greater or equal to number of slots required. In
+ * worst case a tx request consumes 2 gnttab_copy.
+ */
+ struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
- u8 fe_dev_addr[6];
+ /* Use kthread for guest RX */
+ struct task_struct *task;
+ wait_queue_head_t wq;
/* When feature-split-event-channels = 0, tx_irq = rx_irq. */
- unsigned int tx_irq;
unsigned int rx_irq;
/* Only used when feature-split-event-channels = 1 */
- char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
+ struct xen_netif_rx_back_ring rx;
+ struct sk_buff_head rx_queue;
- /* List of frontends to notify after a batch of frames sent. */
- struct list_head notify_list;
+ /* Allow xenvif_start_xmit() to peek ahead in the rx request
+ * ring. This is a prediction of what rx_req_cons will be
+ * once all queued skbs are put on the ring.
+ */
+ RING_IDX rx_req_cons_peek;
+
+ /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
+ * head/fragment page uses 2 copy operations because it
+ * straddles two buffers in the frontend.
+ */
+ struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
+ struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
- /* The shared rings and indexes. */
- struct xen_netif_tx_back_ring tx;
- struct xen_netif_rx_back_ring rx;
+
+ u8 fe_dev_addr[6];
/* Frontend feature information. */
u8 can_sg:1;
@@ -80,13 +158,6 @@ struct xenvif {
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
- /*
- * Allow xenvif_start_xmit() to peek ahead in the rx request
- * ring. This is a prediction of what rx_req_cons will be
- * once all queued skbs are put on the ring.
- */
- RING_IDX rx_req_cons_peek;
-
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
unsigned long credit_usec;
@@ -97,11 +168,7 @@ struct xenvif {
unsigned long rx_gso_checksum_fixup;
/* Miscellaneous private stuff. */
- struct list_head schedule_list;
- atomic_t refcnt;
struct net_device *dev;
-
- wait_queue_head_t waiting_to_free;
};
static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
@@ -109,9 +176,6 @@ static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
return to_xenbus_device(vif->dev->dev.parent);
}
-#define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
-#define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-
struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
@@ -121,39 +185,26 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned int rx_evtchn);
void xenvif_disconnect(struct xenvif *vif);
-void xenvif_get(struct xenvif *vif);
-void xenvif_put(struct xenvif *vif);
-
int xenvif_xenbus_init(void);
void xenvif_xenbus_fini(void);
int xenvif_schedulable(struct xenvif *vif);
-int xen_netbk_rx_ring_full(struct xenvif *vif);
+int xenvif_rx_ring_full(struct xenvif *vif);
-int xen_netbk_must_stop_queue(struct xenvif *vif);
+int xenvif_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref);
-
-/* (De)Register a xenvif with the netback backend. */
-void xen_netbk_add_xenvif(struct xenvif *vif);
-void xen_netbk_remove_xenvif(struct xenvif *vif);
-
-/* (De)Schedule backend processing for a xenvif */
-void xen_netbk_schedule_xenvif(struct xenvif *vif);
-void xen_netbk_deschedule_xenvif(struct xenvif *vif);
+void xenvif_unmap_frontend_rings(struct xenvif *vif);
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref);
/* Check for SKBs from frontend and schedule backend processing */
-void xen_netbk_check_rx_xenvif(struct xenvif *vif);
-/* Receive an SKB from the frontend */
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_check_rx_xenvif(struct xenvif *vif);
/* Queue an SKB for transmission to the frontend */
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
@@ -161,7 +212,12 @@ void xenvif_notify_tx_completion(struct xenvif *vif);
void xenvif_carrier_off(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
+
+int xenvif_tx_action(struct xenvif *vif, int budget);
+void xenvif_rx_action(struct xenvif *vif);
+
+int xenvif_kthread(void *data);
extern bool separate_tx_rx_irq;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 087d2db0389..625c6f49cfb 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -30,6 +30,7 @@
#include "common.h"
+#include <linux/kthread.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
@@ -38,17 +39,7 @@
#include <asm/xen/hypercall.h>
#define XENVIF_QUEUE_LENGTH 32
-
-void xenvif_get(struct xenvif *vif)
-{
- atomic_inc(&vif->refcnt);
-}
-
-void xenvif_put(struct xenvif *vif)
-{
- if (atomic_dec_and_test(&vif->refcnt))
- wake_up(&vif->waiting_to_free);
-}
+#define XENVIF_NAPI_WEIGHT 64
int xenvif_schedulable(struct xenvif *vif)
{
@@ -57,28 +48,62 @@ int xenvif_schedulable(struct xenvif *vif)
static int xenvif_rx_schedulable(struct xenvif *vif)
{
- return xenvif_schedulable(vif) && !xen_netbk_rx_ring_full(vif);
+ return xenvif_schedulable(vif) && !xenvif_rx_ring_full(vif);
}
static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (vif->netbk == NULL)
- return IRQ_HANDLED;
-
- xen_netbk_schedule_xenvif(vif);
+ if (RING_HAS_UNCONSUMED_REQUESTS(&vif->tx))
+ napi_schedule(&vif->napi);
return IRQ_HANDLED;
}
+static int xenvif_poll(struct napi_struct *napi, int budget)
+{
+ struct xenvif *vif = container_of(napi, struct xenvif, napi);
+ int work_done;
+
+ work_done = xenvif_tx_action(vif, budget);
+
+ if (work_done < budget) {
+ int more_to_do = 0;
+ unsigned long flags;
+
+ /* It is necessary to disable IRQ before calling
+ * RING_HAS_UNCONSUMED_REQUESTS. Otherwise we might
+ * lose event from the frontend.
+ *
+ * Consider:
+ * RING_HAS_UNCONSUMED_REQUESTS
+ * <frontend generates event to trigger napi_schedule>
+ * __napi_complete
+ *
+ * This handler is still in scheduled state so the
+ * event has no effect at all. After __napi_complete
+ * this handler is descheduled and cannot get
+ * scheduled again. We lose event in this case and the ring
+ * will be completely stalled.
+ */
+
+ local_irq_save(flags);
+
+ RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
+ if (!more_to_do)
+ __napi_complete(napi);
+
+ local_irq_restore(flags);
+ }
+
+ return work_done;
+}
+
static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- if (vif->netbk == NULL)
- return IRQ_HANDLED;
-
if (xenvif_rx_schedulable(vif))
netif_wake_queue(vif->dev);
@@ -99,7 +124,8 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
BUG_ON(skb->dev != dev);
- if (vif->netbk == NULL)
+ /* Drop the packet if vif is not ready */
+ if (vif->task == NULL)
goto drop;
/* Drop the packet if the target domain has no receive buffers. */
@@ -107,13 +133,12 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
goto drop;
/* Reserve ring slots for the worst-case number of fragments. */
- vif->rx_req_cons_peek += xen_netbk_count_skb_slots(vif, skb);
- xenvif_get(vif);
+ vif->rx_req_cons_peek += xenvif_count_skb_slots(vif, skb);
- if (vif->can_queue && xen_netbk_must_stop_queue(vif))
+ if (vif->can_queue && xenvif_must_stop_queue(vif))
netif_stop_queue(dev);
- xen_netbk_queue_tx_skb(vif, skb);
+ xenvif_queue_tx_skb(vif, skb);
return NETDEV_TX_OK;
@@ -123,11 +148,6 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- netif_rx_ni(skb);
-}
-
void xenvif_notify_tx_completion(struct xenvif *vif)
{
if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif))
@@ -142,21 +162,20 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
static void xenvif_up(struct xenvif *vif)
{
- xen_netbk_add_xenvif(vif);
+ napi_enable(&vif->napi);
enable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
enable_irq(vif->rx_irq);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
static void xenvif_down(struct xenvif *vif)
{
+ napi_disable(&vif->napi);
disable_irq(vif->tx_irq);
if (vif->tx_irq != vif->rx_irq)
disable_irq(vif->rx_irq);
del_timer_sync(&vif->credit_timeout);
- xen_netbk_deschedule_xenvif(vif);
- xen_netbk_remove_xenvif(vif);
}
static int xenvif_open(struct net_device *dev)
@@ -272,11 +291,12 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
struct net_device *dev;
struct xenvif *vif;
char name[IFNAMSIZ] = {};
+ int i;
snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
dev = alloc_netdev(sizeof(struct xenvif), name, ether_setup);
if (dev == NULL) {
- pr_warn("Could not allocate netdev\n");
+ pr_warn("Could not allocate netdev for %s\n", name);
return ERR_PTR(-ENOMEM);
}
@@ -285,14 +305,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif = netdev_priv(dev);
vif->domid = domid;
vif->handle = handle;
- vif->netbk = NULL;
vif->can_sg = 1;
vif->csum = 1;
- atomic_set(&vif->refcnt, 1);
- init_waitqueue_head(&vif->waiting_to_free);
vif->dev = dev;
- INIT_LIST_HEAD(&vif->schedule_list);
- INIT_LIST_HEAD(&vif->notify_list);
vif->credit_bytes = vif->remaining_credit = ~0UL;
vif->credit_usec = 0UL;
@@ -307,6 +322,16 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
+ skb_queue_head_init(&vif->rx_queue);
+ skb_queue_head_init(&vif->tx_queue);
+
+ vif->pending_cons = 0;
+ vif->pending_prod = MAX_PENDING_REQS;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ vif->pending_ring[i] = i;
+ for (i = 0; i < MAX_PENDING_REQS; i++)
+ vif->mmap_pages[i] = NULL;
+
/*
* Initialise a dummy MAC address. We choose the numerically
* largest non-broadcast address to prevent the address getting
@@ -316,6 +341,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
memset(dev->dev_addr, 0xFF, ETH_ALEN);
dev->dev_addr[0] &= ~0x01;
+ netif_napi_add(dev, &vif->napi, xenvif_poll, XENVIF_NAPI_WEIGHT);
+
netif_carrier_off(dev);
err = register_netdev(dev);
@@ -341,7 +368,7 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
__module_get(THIS_MODULE);
- err = xen_netbk_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
+ err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
goto err;
@@ -377,7 +404,14 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq);
}
- xenvif_get(vif);
+ init_waitqueue_head(&vif->wq);
+ vif->task = kthread_create(xenvif_kthread,
+ (void *)vif, vif->dev->name);
+ if (IS_ERR(vif->task)) {
+ pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
+ err = PTR_ERR(vif->task);
+ goto err_rx_unbind;
+ }
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
@@ -388,12 +422,18 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
xenvif_up(vif);
rtnl_unlock();
+ wake_up_process(vif->task);
+
return 0;
+
+err_rx_unbind:
+ unbind_from_irqhandler(vif->rx_irq, vif);
+ vif->rx_irq = 0;
err_tx_unbind:
unbind_from_irqhandler(vif->tx_irq, vif);
vif->tx_irq = 0;
err_unmap:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
err:
module_put(THIS_MODULE);
return err;
@@ -408,7 +448,6 @@ void xenvif_carrier_off(struct xenvif *vif)
if (netif_running(dev))
xenvif_down(vif);
rtnl_unlock();
- xenvif_put(vif);
}
void xenvif_disconnect(struct xenvif *vif)
@@ -422,9 +461,6 @@ void xenvif_disconnect(struct xenvif *vif)
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- atomic_dec(&vif->refcnt);
- wait_event(vif->waiting_to_free, atomic_read(&vif->refcnt) == 0);
-
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
unbind_from_irqhandler(vif->tx_irq, vif);
@@ -438,9 +474,14 @@ void xenvif_disconnect(struct xenvif *vif)
need_module_put = 1;
}
+ if (vif->task)
+ kthread_stop(vif->task);
+
+ netif_napi_del(&vif->napi);
+
unregister_netdev(vif->dev);
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
free_netdev(vif->dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64828de25d9..956130c7003 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -70,131 +70,26 @@ module_param(fatal_skb_slots, uint, 0444);
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
-typedef unsigned int pending_ring_idx_t;
-#define INVALID_PENDING_RING_IDX (~0U)
-
-struct pending_tx_info {
- struct xen_netif_tx_request req; /* coalesced tx request */
- struct xenvif *vif;
- pending_ring_idx_t head; /* head != INVALID_PENDING_RING_IDX
- * if it is head of one or more tx
- * reqs
- */
-};
-
-struct netbk_rx_meta {
- int id;
- int size;
- int gso_size;
-};
-
-#define MAX_PENDING_REQS 256
-
-/* Discriminate from any valid pending_idx value. */
-#define INVALID_PENDING_IDX 0xFFFF
-
-#define MAX_BUFFER_OFFSET PAGE_SIZE
-
-/* extra field used in struct page */
-union page_ext {
- struct {
-#if BITS_PER_LONG < 64
-#define IDX_WIDTH 8
-#define GROUP_WIDTH (BITS_PER_LONG - IDX_WIDTH)
- unsigned int group:GROUP_WIDTH;
- unsigned int idx:IDX_WIDTH;
-#else
- unsigned int group, idx;
-#endif
- } e;
- void *mapping;
-};
-
-struct xen_netbk {
- wait_queue_head_t wq;
- struct task_struct *task;
-
- struct sk_buff_head rx_queue;
- struct sk_buff_head tx_queue;
-
- struct timer_list net_timer;
-
- struct page *mmap_pages[MAX_PENDING_REQS];
-
- pending_ring_idx_t pending_prod;
- pending_ring_idx_t pending_cons;
- struct list_head net_schedule_list;
-
- /* Protect the net_schedule_list in netif. */
- spinlock_t net_schedule_list_lock;
-
- atomic_t netfront_count;
-
- struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
- /* Coalescing tx requests before copying makes number of grant
- * copy ops greater or equal to number of slots required. In
- * worst case a tx request consumes 2 gnttab_copy.
- */
- struct gnttab_copy tx_copy_ops[2*MAX_PENDING_REQS];
-
- u16 pending_ring[MAX_PENDING_REQS];
-
- /*
- * Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
- */
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct netbk_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
-};
-
-static struct xen_netbk *xen_netbk;
-static int xen_netbk_group_nr;
-
/*
* If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
* one or more merged tx requests, otherwise it is the continuation of
* previous tx request.
*/
-static inline int pending_tx_is_head(struct xen_netbk *netbk, RING_IDX idx)
-{
- return netbk->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
-}
-
-void xen_netbk_add_xenvif(struct xenvif *vif)
+static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
{
- int i;
- int min_netfront_count;
- int min_group = 0;
- struct xen_netbk *netbk;
-
- min_netfront_count = atomic_read(&xen_netbk[0].netfront_count);
- for (i = 0; i < xen_netbk_group_nr; i++) {
- int netfront_count = atomic_read(&xen_netbk[i].netfront_count);
- if (netfront_count < min_netfront_count) {
- min_group = i;
- min_netfront_count = netfront_count;
- }
- }
-
- netbk = &xen_netbk[min_group];
-
- vif->netbk = netbk;
- atomic_inc(&netbk->netfront_count);
+ return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
}
-void xen_netbk_remove_xenvif(struct xenvif *vif)
-{
- struct xen_netbk *netbk = vif->netbk;
- vif->netbk = NULL;
- atomic_dec(&netbk->netfront_count);
-}
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+ u8 status);
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
- u8 status);
static void make_tx_response(struct xenvif *vif,
struct xen_netif_tx_request *txp,
s8 st);
+
+static inline int tx_work_todo(struct xenvif *vif);
+static inline int rx_work_todo(struct xenvif *vif);
+
static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 id,
s8 st,
@@ -202,55 +97,16 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
u16 size,
u16 flags);
-static inline unsigned long idx_to_pfn(struct xen_netbk *netbk,
+static inline unsigned long idx_to_pfn(struct xenvif *vif,
u16 idx)
{
- return page_to_pfn(netbk->mmap_pages[idx]);
+ return page_to_pfn(vif->mmap_pages[idx]);
}
-static inline unsigned long idx_to_kaddr(struct xen_netbk *netbk,
+static inline unsigned long idx_to_kaddr(struct xenvif *vif,
u16 idx)
{
- return (unsigned long)pfn_to_kaddr(idx_to_pfn(netbk, idx));
-}
-
-/* extra field used in struct page */
-static inline void set_page_ext(struct page *pg, struct xen_netbk *netbk,
- unsigned int idx)
-{
- unsigned int group = netbk - xen_netbk;
- union page_ext ext = { .e = { .group = group + 1, .idx = idx } };
-
- BUILD_BUG_ON(sizeof(ext) > sizeof(ext.mapping));
- pg->mapping = ext.mapping;
-}
-
-static int get_page_ext(struct page *pg,
- unsigned int *pgroup, unsigned int *pidx)
-{
- union page_ext ext = { .mapping = pg->mapping };
- struct xen_netbk *netbk;
- unsigned int group, idx;
-
- group = ext.e.group - 1;
-
- if (group < 0 || group >= xen_netbk_group_nr)
- return 0;
-
- netbk = &xen_netbk[group];
-
- idx = ext.e.idx;
-
- if ((idx < 0) || (idx >= MAX_PENDING_REQS))
- return 0;
-
- if (netbk->mmap_pages[idx] != pg)
- return 0;
-
- *pgroup = group;
- *pidx = idx;
-
- return 1;
+ return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
}
/*
@@ -278,15 +134,10 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1);
}
-static inline pending_ring_idx_t nr_pending_reqs(struct xen_netbk *netbk)
+static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
{
return MAX_PENDING_REQS -
- netbk->pending_prod + netbk->pending_cons;
-}
-
-static void xen_netbk_kick_thread(struct xen_netbk *netbk)
-{
- wake_up(&netbk->wq);
+ vif->pending_prod + vif->pending_cons;
}
static int max_required_rx_slots(struct xenvif *vif)
@@ -300,7 +151,7 @@ static int max_required_rx_slots(struct xenvif *vif)
return max;
}
-int xen_netbk_rx_ring_full(struct xenvif *vif)
+int xenvif_rx_ring_full(struct xenvif *vif)
{
RING_IDX peek = vif->rx_req_cons_peek;
RING_IDX needed = max_required_rx_slots(vif);
@@ -309,16 +160,16 @@ int xen_netbk_rx_ring_full(struct xenvif *vif)
((vif->rx.rsp_prod_pvt + XEN_NETIF_RX_RING_SIZE - peek) < needed);
}
-int xen_netbk_must_stop_queue(struct xenvif *vif)
+int xenvif_must_stop_queue(struct xenvif *vif)
{
- if (!xen_netbk_rx_ring_full(vif))
+ if (!xenvif_rx_ring_full(vif))
return 0;
vif->rx.sring->req_event = vif->rx_req_cons_peek +
max_required_rx_slots(vif);
mb(); /* request notification /then/ check the queue */
- return xen_netbk_rx_ring_full(vif);
+ return xenvif_rx_ring_full(vif);
}
/*
@@ -364,9 +215,9 @@ static bool start_new_rx_buffer(int offset, unsigned long size, int head)
/*
* Figure out how many ring slots we're going to need to send @skb to
* the guest. This function is essentially a dry run of
- * netbk_gop_frag_copy.
+ * xenvif_gop_frag_copy.
*/
-unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
+unsigned int xenvif_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
{
unsigned int count;
int i, copy_off;
@@ -418,15 +269,15 @@ struct netrx_pending_operations {
unsigned copy_prod, copy_cons;
unsigned meta_prod, meta_cons;
struct gnttab_copy *copy;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
int copy_off;
grant_ref_t copy_gref;
};
-static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
- struct netrx_pending_operations *npo)
+static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
+ struct netrx_pending_operations *npo)
{
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
struct xen_netif_rx_request *req;
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
@@ -446,19 +297,13 @@ static struct netbk_rx_meta *get_next_rx_buffer(struct xenvif *vif,
* Set up the grant operations for this fragment. If it's a flipping
* interface, we also set up the unmap request from here.
*/
-static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
- struct netrx_pending_operations *npo,
- struct page *page, unsigned long size,
- unsigned long offset, int *head)
+static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
+ struct netrx_pending_operations *npo,
+ struct page *page, unsigned long size,
+ unsigned long offset, int *head)
{
struct gnttab_copy *copy_gop;
- struct netbk_rx_meta *meta;
- /*
- * These variables are used iff get_page_ext returns true,
- * in which case they are guaranteed to be initialized.
- */
- unsigned int uninitialized_var(group), uninitialized_var(idx);
- int foreign = get_page_ext(page, &group, &idx);
+ struct xenvif_rx_meta *meta;
unsigned long bytes;
/* Data must not cross a page boundary. */
@@ -494,26 +339,15 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
copy_gop = npo->copy + npo->copy_prod++;
copy_gop->flags = GNTCOPY_dest_gref;
- if (foreign) {
- struct xen_netbk *netbk = &xen_netbk[group];
- struct pending_tx_info *src_pend;
-
- src_pend = &netbk->pending_tx_info[idx];
+ copy_gop->len = bytes;
- copy_gop->source.domid = src_pend->vif->domid;
- copy_gop->source.u.ref = src_pend->req.gref;
- copy_gop->flags |= GNTCOPY_source_gref;
- } else {
- void *vaddr = page_address(page);
- copy_gop->source.domid = DOMID_SELF;
- copy_gop->source.u.gmfn = virt_to_mfn(vaddr);
- }
+ copy_gop->source.domid = DOMID_SELF;
+ copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
copy_gop->source.offset = offset;
- copy_gop->dest.domid = vif->domid;
+ copy_gop->dest.domid = vif->domid;
copy_gop->dest.offset = npo->copy_off;
copy_gop->dest.u.ref = npo->copy_gref;
- copy_gop->len = bytes;
npo->copy_off += bytes;
meta->size += bytes;
@@ -549,14 +383,14 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
* zero GSO descriptors (for non-GSO packets) or one descriptor (for
* frontend-side LRO).
*/
-static int netbk_gop_skb(struct sk_buff *skb,
- struct netrx_pending_operations *npo)
+static int xenvif_gop_skb(struct sk_buff *skb,
+ struct netrx_pending_operations *npo)
{
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
struct xen_netif_rx_request *req;
- struct netbk_rx_meta *meta;
+ struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
int old_meta_prod;
@@ -593,30 +427,30 @@ static int netbk_gop_skb(struct sk_buff *skb,
if (data + len > skb_tail_pointer(skb))
len = skb_tail_pointer(skb) - data;
- netbk_gop_frag_copy(vif, skb, npo,
- virt_to_page(data), len, offset, &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ virt_to_page(data), len, offset, &head);
data += len;
}
for (i = 0; i < nr_frags; i++) {
- netbk_gop_frag_copy(vif, skb, npo,
- skb_frag_page(&skb_shinfo(skb)->frags[i]),
- skb_frag_size(&skb_shinfo(skb)->frags[i]),
- skb_shinfo(skb)->frags[i].page_offset,
- &head);
+ xenvif_gop_frag_copy(vif, skb, npo,
+ skb_frag_page(&skb_shinfo(skb)->frags[i]),
+ skb_frag_size(&skb_shinfo(skb)->frags[i]),
+ skb_shinfo(skb)->frags[i].page_offset,
+ &head);
}
return npo->meta_prod - old_meta_prod;
}
/*
- * This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
+ * This is a twin to xenvif_gop_skb. Assume that xenvif_gop_skb was
* used to set up the operations on the top of
* netrx_pending_operations, which have since been done. Check that
* they didn't give any errors and advance over them.
*/
-static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
- struct netrx_pending_operations *npo)
+static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
+ struct netrx_pending_operations *npo)
{
struct gnttab_copy *copy_op;
int status = XEN_NETIF_RSP_OKAY;
@@ -635,9 +469,9 @@ static int netbk_check_gop(struct xenvif *vif, int nr_meta_slots,
return status;
}
-static void netbk_add_frag_responses(struct xenvif *vif, int status,
- struct netbk_rx_meta *meta,
- int nr_meta_slots)
+static void xenvif_add_frag_responses(struct xenvif *vif, int status,
+ struct xenvif_rx_meta *meta,
+ int nr_meta_slots)
{
int i;
unsigned long offset;
@@ -665,9 +499,13 @@ struct skb_cb_overlay {
int meta_slots_used;
};
-static void xen_netbk_rx_action(struct xen_netbk *netbk)
+static void xenvif_kick_thread(struct xenvif *vif)
+{
+ wake_up(&vif->wq);
+}
+
+void xenvif_rx_action(struct xenvif *vif)
{
- struct xenvif *vif = NULL, *tmp;
s8 status;
u16 flags;
struct xen_netif_rx_response *resp;
@@ -679,22 +517,23 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
int count;
unsigned long offset;
struct skb_cb_overlay *sco;
+ int need_to_notify = 0;
struct netrx_pending_operations npo = {
- .copy = netbk->grant_copy_op,
- .meta = netbk->meta,
+ .copy = vif->grant_copy_op,
+ .meta = vif->meta,
};
skb_queue_head_init(&rxq);
count = 0;
- while ((skb = skb_dequeue(&netbk->rx_queue)) != NULL) {
+ while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
vif = netdev_priv(skb->dev);
nr_frags = skb_shinfo(skb)->nr_frags;
sco = (struct skb_cb_overlay *)skb->cb;
- sco->meta_slots_used = netbk_gop_skb(skb, &npo);
+ sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
count += nr_frags + 1;
@@ -706,27 +545,27 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
break;
}
- BUG_ON(npo.meta_prod > ARRAY_SIZE(netbk->meta));
+ BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
if (!npo.copy_prod)
return;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(netbk->grant_copy_op));
- gnttab_batch_copy(netbk->grant_copy_op, npo.copy_prod);
+ BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
sco = (struct skb_cb_overlay *)skb->cb;
vif = netdev_priv(skb->dev);
- if (netbk->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
+ if (vif->meta[npo.meta_cons].gso_size && vif->gso_prefix) {
resp = RING_GET_RESPONSE(&vif->rx,
- vif->rx.rsp_prod_pvt++);
+ vif->rx.rsp_prod_pvt++);
resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
- resp->offset = netbk->meta[npo.meta_cons].gso_size;
- resp->id = netbk->meta[npo.meta_cons].id;
+ resp->offset = vif->meta[npo.meta_cons].gso_size;
+ resp->id = vif->meta[npo.meta_cons].id;
resp->status = sco->meta_slots_used;
npo.meta_cons++;
@@ -737,7 +576,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
vif->dev->stats.tx_bytes += skb->len;
vif->dev->stats.tx_packets++;
- status = netbk_check_gop(vif, sco->meta_slots_used, &npo);
+ status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
if (sco->meta_slots_used == 1)
flags = 0;
@@ -751,12 +590,12 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
flags |= XEN_NETRXF_data_validated;
offset = 0;
- resp = make_rx_response(vif, netbk->meta[npo.meta_cons].id,
+ resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
status, offset,
- netbk->meta[npo.meta_cons].size,
+ vif->meta[npo.meta_cons].size,
flags);
- if (netbk->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
+ if (vif->meta[npo.meta_cons].gso_size && !vif->gso_prefix) {
struct xen_netif_extra_info *gso =
(struct xen_netif_extra_info *)
RING_GET_RESPONSE(&vif->rx,
@@ -764,7 +603,7 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
resp->flags |= XEN_NETRXF_extra_info;
- gso->u.gso.size = netbk->meta[npo.meta_cons].gso_size;
+ gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
@@ -773,123 +612,44 @@ static void xen_netbk_rx_action(struct xen_netbk *netbk)
gso->flags = 0;
}
- netbk_add_frag_responses(vif, status,
- netbk->meta + npo.meta_cons + 1,
- sco->meta_slots_used);
+ xenvif_add_frag_responses(vif, status,
+ vif->meta + npo.meta_cons + 1,
+ sco->meta_slots_used);
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
+ if (ret)
+ need_to_notify = 1;
+
xenvif_notify_tx_completion(vif);
- if (ret && list_empty(&vif->notify_list))
- list_add_tail(&vif->notify_list, &notify);
- else
- xenvif_put(vif);
npo.meta_cons += sco->meta_slots_used;
dev_kfree_skb(skb);
}
- list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
+ if (need_to_notify)
notify_remote_via_irq(vif->rx_irq);
- list_del_init(&vif->notify_list);
- xenvif_put(vif);
- }
/* More work to do? */
- if (!skb_queue_empty(&netbk->rx_queue) &&
- !timer_pending(&netbk->net_timer))
- xen_netbk_kick_thread(netbk);
-}
-
-void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
-{
- struct xen_netbk *netbk = vif->netbk;
-
- skb_queue_tail(&netbk->rx_queue, skb);
-
- xen_netbk_kick_thread(netbk);
-}
-
-static void xen_netbk_alarm(unsigned long data)
-{
- struct xen_netbk *netbk = (struct xen_netbk *)data;
- xen_netbk_kick_thread(netbk);
-}
-
-static int __on_net_schedule_list(struct xenvif *vif)
-{
- return !list_empty(&vif->schedule_list);
-}
-
-/* Must be called with net_schedule_list_lock held */
-static void remove_from_net_schedule_list(struct xenvif *vif)
-{
- if (likely(__on_net_schedule_list(vif))) {
- list_del_init(&vif->schedule_list);
- xenvif_put(vif);
- }
-}
-
-static struct xenvif *poll_net_schedule_list(struct xen_netbk *netbk)
-{
- struct xenvif *vif = NULL;
-
- spin_lock_irq(&netbk->net_schedule_list_lock);
- if (list_empty(&netbk->net_schedule_list))
- goto out;
-
- vif = list_first_entry(&netbk->net_schedule_list,
- struct xenvif, schedule_list);
- if (!vif)
- goto out;
-
- xenvif_get(vif);
-
- remove_from_net_schedule_list(vif);
-out:
- spin_unlock_irq(&netbk->net_schedule_list_lock);
- return vif;
+ if (!skb_queue_empty(&vif->rx_queue))
+ xenvif_kick_thread(vif);
}
-void xen_netbk_schedule_xenvif(struct xenvif *vif)
+void xenvif_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb)
{
- unsigned long flags;
- struct xen_netbk *netbk = vif->netbk;
-
- if (__on_net_schedule_list(vif))
- goto kick;
-
- spin_lock_irqsave(&netbk->net_schedule_list_lock, flags);
- if (!__on_net_schedule_list(vif) &&
- likely(xenvif_schedulable(vif))) {
- list_add_tail(&vif->schedule_list, &netbk->net_schedule_list);
- xenvif_get(vif);
- }
- spin_unlock_irqrestore(&netbk->net_schedule_list_lock, flags);
-
-kick:
- smp_mb();
- if ((nr_pending_reqs(netbk) < (MAX_PENDING_REQS/2)) &&
- !list_empty(&netbk->net_schedule_list))
- xen_netbk_kick_thread(netbk);
-}
+ skb_queue_tail(&vif->rx_queue, skb);
-void xen_netbk_deschedule_xenvif(struct xenvif *vif)
-{
- struct xen_netbk *netbk = vif->netbk;
- spin_lock_irq(&netbk->net_schedule_list_lock);
- remove_from_net_schedule_list(vif);
- spin_unlock_irq(&netbk->net_schedule_list_lock);
+ xenvif_kick_thread(vif);
}
-void xen_netbk_check_rx_xenvif(struct xenvif *vif)
+void xenvif_check_rx_xenvif(struct xenvif *vif)
{
int more_to_do;
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
if (more_to_do)
- xen_netbk_schedule_xenvif(vif);
+ napi_schedule(&vif->napi);
}
static void tx_add_credit(struct xenvif *vif)
@@ -916,11 +676,11 @@ static void tx_credit_callback(unsigned long data)
{
struct xenvif *vif = (struct xenvif *)data;
tx_add_credit(vif);
- xen_netbk_check_rx_xenvif(vif);
+ xenvif_check_rx_xenvif(vif);
}
-static void netbk_tx_err(struct xenvif *vif,
- struct xen_netif_tx_request *txp, RING_IDX end)
+static void xenvif_tx_err(struct xenvif *vif,
+ struct xen_netif_tx_request *txp, RING_IDX end)
{
RING_IDX cons = vif->tx.req_cons;
@@ -931,21 +691,18 @@ static void netbk_tx_err(struct xenvif *vif,
txp = RING_GET_REQUEST(&vif->tx, cons++);
} while (1);
vif->tx.req_cons = cons;
- xen_netbk_check_rx_xenvif(vif);
- xenvif_put(vif);
}
-static void netbk_fatal_tx_err(struct xenvif *vif)
+static void xenvif_fatal_tx_err(struct xenvif *vif)
{
netdev_err(vif->dev, "fatal error; disabling device\n");
xenvif_carrier_off(vif);
- xenvif_put(vif);
}
-static int netbk_count_requests(struct xenvif *vif,
- struct xen_netif_tx_request *first,
- struct xen_netif_tx_request *txp,
- int work_to_do)
+static int xenvif_count_requests(struct xenvif *vif,
+ struct xen_netif_tx_request *first,
+ struct xen_netif_tx_request *txp,
+ int work_to_do)
{
RING_IDX cons = vif->tx.req_cons;
int slots = 0;
@@ -962,7 +719,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev,
"Asked for %d slots but exceeds this limit\n",
work_to_do);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -ENODATA;
}
@@ -973,7 +730,7 @@ static int netbk_count_requests(struct xenvif *vif,
netdev_err(vif->dev,
"Malicious frontend using %d slots, threshold %u\n",
slots, fatal_skb_slots);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -E2BIG;
}
@@ -1021,7 +778,7 @@ static int netbk_count_requests(struct xenvif *vif,
if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
txp->offset, txp->size);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1033,30 +790,30 @@ static int netbk_count_requests(struct xenvif *vif,
} while (more_data);
if (drop_err) {
- netbk_tx_err(vif, first, cons + slots);
+ xenvif_tx_err(vif, first, cons + slots);
return drop_err;
}
return slots;
}
-static struct page *xen_netbk_alloc_page(struct xen_netbk *netbk,
- u16 pending_idx)
+static struct page *xenvif_alloc_page(struct xenvif *vif,
+ u16 pending_idx)
{
struct page *page;
- page = alloc_page(GFP_KERNEL|__GFP_COLD);
+
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
if (!page)
return NULL;
- set_page_ext(page, netbk, pending_idx);
- netbk->mmap_pages[pending_idx] = page;
+ vif->mmap_pages[pending_idx] = page;
+
return page;
}
-static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
- struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_tx_request *txp,
- struct gnttab_copy *gop)
+static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_tx_request *txp,
+ struct gnttab_copy *gop)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
skb_frag_t *frags = shinfo->frags;
@@ -1079,14 +836,14 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
/* Coalesce tx requests, at this point the packet passed in
* should be <= 64K. Any packets larger than 64K have been
- * handled in netbk_count_requests().
+ * handled in xenvif_count_requests().
*/
for (shinfo->nr_frags = slot = start; slot < nr_slots;
shinfo->nr_frags++) {
struct pending_tx_info *pending_tx_info =
- netbk->pending_tx_info;
+ vif->pending_tx_info;
- page = alloc_page(GFP_KERNEL|__GFP_COLD);
+ page = alloc_page(GFP_ATOMIC|__GFP_COLD);
if (!page)
goto err;
@@ -1121,21 +878,18 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
gop->len = txp->size;
dst_offset += gop->len;
- index = pending_index(netbk->pending_cons++);
+ index = pending_index(vif->pending_cons++);
- pending_idx = netbk->pending_ring[index];
+ pending_idx = vif->pending_ring[index];
memcpy(&pending_tx_info[pending_idx].req, txp,
sizeof(*txp));
- xenvif_get(vif);
-
- pending_tx_info[pending_idx].vif = vif;
/* Poison these fields, corresponding
* fields for head tx req will be set
* to correct values after the loop.
*/
- netbk->mmap_pages[pending_idx] = (void *)(~0UL);
+ vif->mmap_pages[pending_idx] = (void *)(~0UL);
pending_tx_info[pending_idx].head =
INVALID_PENDING_RING_IDX;
@@ -1155,8 +909,7 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
first->req.offset = 0;
first->req.size = dst_offset;
first->head = start_idx;
- set_page_ext(page, netbk, head_idx);
- netbk->mmap_pages[head_idx] = page;
+ vif->mmap_pages[head_idx] = page;
frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
}
@@ -1166,20 +919,20 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
err:
/* Unwind, freeing all pages and sending error responses. */
while (shinfo->nr_frags-- > start) {
- xen_netbk_idx_release(netbk,
+ xenvif_idx_release(vif,
frag_get_pending_idx(&frags[shinfo->nr_frags]),
XEN_NETIF_RSP_ERROR);
}
/* The head too, if necessary. */
if (start)
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
return NULL;
}
-static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
- struct sk_buff *skb,
- struct gnttab_copy **gopp)
+static int xenvif_tx_check_gop(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct gnttab_copy **gopp)
{
struct gnttab_copy *gop = *gopp;
u16 pending_idx = *((u16 *)skb->data);
@@ -1192,7 +945,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* Check status of header. */
err = gop->status;
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Skip first skb fragment if it is on same page as header fragment. */
start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
@@ -1202,7 +955,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
pending_ring_idx_t head;
pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
- tx_info = &netbk->pending_tx_info[pending_idx];
+ tx_info = &vif->pending_tx_info[pending_idx];
head = tx_info->head;
/* Check error status: if okay then remember grant handle. */
@@ -1210,18 +963,19 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
newerr = (++gop)->status;
if (newerr)
break;
- peek = netbk->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(netbk, peek));
+ peek = vif->pending_ring[pending_index(++head)];
+ } while (!pending_tx_is_head(vif, peek));
if (likely(!newerr)) {
/* Had a previous error? Invalidate this fragment. */
if (unlikely(err))
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
continue;
}
/* Error on this fragment: respond to client with an error. */
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_ERROR);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
/* Not the first error? Preceding frags already invalidated. */
if (err)
@@ -1229,10 +983,11 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
/* First error: invalidate header and preceding fragments. */
pending_idx = *((u16 *)skb->data);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
for (j = start; j < i; j++) {
pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
}
/* Remember the error: invalidate all subsequent fragments. */
@@ -1243,7 +998,7 @@ static int xen_netbk_tx_check_gop(struct xen_netbk *netbk,
return err;
}
-static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
+static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
@@ -1257,20 +1012,20 @@ static void xen_netbk_fill_frags(struct xen_netbk *netbk, struct sk_buff *skb)
pending_idx = frag_get_pending_idx(frag);
- txp = &netbk->pending_tx_info[pending_idx].req;
- page = virt_to_page(idx_to_kaddr(netbk, pending_idx));
+ txp = &vif->pending_tx_info[pending_idx].req;
+ page = virt_to_page(idx_to_kaddr(vif, pending_idx));
__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
skb->len += txp->size;
skb->data_len += txp->size;
skb->truesize += txp->size;
- /* Take an extra reference to offset xen_netbk_idx_release */
- get_page(netbk->mmap_pages[pending_idx]);
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ /* Take an extra reference to offset xenvif_idx_release */
+ get_page(vif->mmap_pages[pending_idx]);
+ xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
}
}
-static int xen_netbk_get_extras(struct xenvif *vif,
+static int xenvif_get_extras(struct xenvif *vif,
struct xen_netif_extra_info *extras,
int work_to_do)
{
@@ -1280,7 +1035,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
do {
if (unlikely(work_to_do-- <= 0)) {
netdev_err(vif->dev, "Missing extra info\n");
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EBADR;
}
@@ -1291,7 +1046,7 @@ static int xen_netbk_get_extras(struct xenvif *vif,
vif->tx.req_cons = ++cons;
netdev_err(vif->dev,
"Invalid extra type: %d\n", extra.type);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1302,20 +1057,20 @@ static int xen_netbk_get_extras(struct xenvif *vif,
return work_to_do;
}
-static int netbk_set_skb_gso(struct xenvif *vif,
- struct sk_buff *skb,
- struct xen_netif_extra_info *gso)
+static int xenvif_set_skb_gso(struct xenvif *vif,
+ struct sk_buff *skb,
+ struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
netdev_err(vif->dev, "GSO size must not be zero.\n");
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
return -EINVAL;
}
@@ -1426,16 +1181,14 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif)
{
- struct gnttab_copy *gop = netbk->tx_copy_ops, *request_gop;
+ struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
- while ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list)) {
- struct xenvif *vif;
+ while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS)) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
@@ -1446,16 +1199,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
unsigned int data_len;
pending_ring_idx_t index;
- /* Get a netif from the list with work to do. */
- vif = poll_net_schedule_list(netbk);
- /* This can sometimes happen because the test of
- * list_empty(net_schedule_list) at the top of the
- * loop is unlocked. Just go back and have another
- * look.
- */
- if (!vif)
- continue;
-
if (vif->tx.sring->req_prod - vif->tx.req_cons >
XEN_NETIF_TX_RING_SIZE) {
netdev_err(vif->dev,
@@ -1463,15 +1206,13 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
"req_prod %d, req_cons %d, size %ld\n",
vif->tx.sring->req_prod, vif->tx.req_cons,
XEN_NETIF_TX_RING_SIZE);
- netbk_fatal_tx_err(vif);
+ xenvif_fatal_tx_err(vif);
continue;
}
RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
- if (!work_to_do) {
- xenvif_put(vif);
- continue;
- }
+ if (!work_to_do)
+ break;
idx = vif->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
@@ -1479,10 +1220,8 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
/* Credit-based scheduling. */
if (txreq.size > vif->remaining_credit &&
- tx_credit_exceeded(vif, txreq.size)) {
- xenvif_put(vif);
- continue;
- }
+ tx_credit_exceeded(vif, txreq.size))
+ break;
vif->remaining_credit -= txreq.size;
@@ -1491,24 +1230,24 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
memset(extras, 0, sizeof(extras));
if (txreq.flags & XEN_NETTXF_extra_info) {
- work_to_do = xen_netbk_get_extras(vif, extras,
- work_to_do);
+ work_to_do = xenvif_get_extras(vif, extras,
+ work_to_do);
idx = vif->tx.req_cons;
if (unlikely(work_to_do < 0))
- continue;
+ break;
}
- ret = netbk_count_requests(vif, &txreq, txfrags, work_to_do);
+ ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0))
- continue;
+ break;
idx += ret;
if (unlikely(txreq.size < ETH_HLEN)) {
netdev_dbg(vif->dev,
"Bad packet size: %d\n", txreq.size);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
/* No crossing a page as the payload mustn't fragment. */
@@ -1517,12 +1256,12 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
(txreq.offset&~PAGE_MASK) + txreq.size);
- netbk_fatal_tx_err(vif);
- continue;
+ xenvif_fatal_tx_err(vif);
+ break;
}
- index = pending_index(netbk->pending_cons);
- pending_idx = netbk->pending_ring[index];
+ index = pending_index(vif->pending_cons);
+ pending_idx = vif->pending_ring[index];
data_len = (txreq.size > PKT_PROT_LEN &&
ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
@@ -1533,7 +1272,7 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
if (unlikely(skb == NULL)) {
netdev_dbg(vif->dev,
"Can't allocate a skb in start_xmit.\n");
- netbk_tx_err(vif, &txreq, idx);
+ xenvif_tx_err(vif, &txreq, idx);
break;
}
@@ -1544,19 +1283,19 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
- if (netbk_set_skb_gso(vif, skb, gso)) {
- /* Failure in netbk_set_skb_gso is fatal. */
+ if (xenvif_set_skb_gso(vif, skb, gso)) {
+ /* Failure in xenvif_set_skb_gso is fatal. */
kfree_skb(skb);
- continue;
+ break;
}
}
/* XXX could copy straight to head */
- page = xen_netbk_alloc_page(netbk, pending_idx);
+ page = xenvif_alloc_page(vif, pending_idx);
if (!page) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
gop->source.u.ref = txreq.gref;
@@ -1572,10 +1311,9 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
gop++;
- memcpy(&netbk->pending_tx_info[pending_idx].req,
+ memcpy(&vif->pending_tx_info[pending_idx].req,
&txreq, sizeof(txreq));
- netbk->pending_tx_info[pending_idx].vif = vif;
- netbk->pending_tx_info[pending_idx].head = index;
+ vif->pending_tx_info[pending_idx].head = index;
*((u16 *)skb->data) = pending_idx;
__skb_put(skb, data_len);
@@ -1590,46 +1328,45 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
INVALID_PENDING_IDX);
}
- netbk->pending_cons++;
+ vif->pending_cons++;
- request_gop = xen_netbk_get_requests(netbk, vif,
- skb, txfrags, gop);
+ request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
if (request_gop == NULL) {
kfree_skb(skb);
- netbk_tx_err(vif, &txreq, idx);
- continue;
+ xenvif_tx_err(vif, &txreq, idx);
+ break;
}
gop = request_gop;
- __skb_queue_tail(&netbk->tx_queue, skb);
+ __skb_queue_tail(&vif->tx_queue, skb);
vif->tx.req_cons = idx;
- xen_netbk_check_rx_xenvif(vif);
- if ((gop-netbk->tx_copy_ops) >= ARRAY_SIZE(netbk->tx_copy_ops))
+ if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
break;
}
- return gop - netbk->tx_copy_ops;
+ return gop - vif->tx_copy_ops;
}
-static void xen_netbk_tx_submit(struct xen_netbk *netbk)
+
+static int xenvif_tx_submit(struct xenvif *vif, int budget)
{
- struct gnttab_copy *gop = netbk->tx_copy_ops;
+ struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb;
+ int work_done = 0;
- while ((skb = __skb_dequeue(&netbk->tx_queue)) != NULL) {
+ while (work_done < budget &&
+ (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
- struct xenvif *vif;
u16 pending_idx;
unsigned data_len;
pending_idx = *((u16 *)skb->data);
- vif = netbk->pending_tx_info[pending_idx].vif;
- txp = &netbk->pending_tx_info[pending_idx].req;
+ txp = &vif->pending_tx_info[pending_idx].req;
/* Check the remap error code. */
- if (unlikely(xen_netbk_tx_check_gop(netbk, skb, &gop))) {
+ if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
netdev_dbg(vif->dev, "netback grant failed.\n");
skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
@@ -1638,7 +1375,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
data_len = skb->len;
memcpy(skb->data,
- (void *)(idx_to_kaddr(netbk, pending_idx)|txp->offset),
+ (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
data_len);
if (data_len < txp->size) {
/* Append the packet payload as a fragment. */
@@ -1646,7 +1383,8 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
txp->size -= data_len;
} else {
/* Schedule a response immediately. */
- xen_netbk_idx_release(netbk, pending_idx, XEN_NETIF_RSP_OKAY);
+ xenvif_idx_release(vif, pending_idx,
+ XEN_NETIF_RSP_OKAY);
}
if (txp->flags & XEN_NETTXF_csum_blank)
@@ -1654,7 +1392,7 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
else if (txp->flags & XEN_NETTXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- xen_netbk_fill_frags(netbk, skb);
+ xenvif_fill_frags(vif, skb);
/*
* If the initial fragment was < PKT_PROT_LEN then
@@ -1682,53 +1420,61 @@ static void xen_netbk_tx_submit(struct xen_netbk *netbk)
vif->dev->stats.rx_bytes += skb->len;
vif->dev->stats.rx_packets++;
- xenvif_receive_skb(vif, skb);
+ work_done++;
+
+ netif_receive_skb(skb);
}
+
+ return work_done;
}
/* Called after netfront has transmitted */
-static void xen_netbk_tx_action(struct xen_netbk *netbk)
+int xenvif_tx_action(struct xenvif *vif, int budget)
{
unsigned nr_gops;
+ int work_done;
+
+ if (unlikely(!tx_work_todo(vif)))
+ return 0;
- nr_gops = xen_netbk_tx_build_gops(netbk);
+ nr_gops = xenvif_tx_build_gops(vif);
if (nr_gops == 0)
- return;
+ return 0;
- gnttab_batch_copy(netbk->tx_copy_ops, nr_gops);
+ gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
- xen_netbk_tx_submit(netbk);
+ work_done = xenvif_tx_submit(vif, nr_gops);
+
+ return work_done;
}
-static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
- u8 status)
+static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
+ u8 status)
{
- struct xenvif *vif;
struct pending_tx_info *pending_tx_info;
pending_ring_idx_t head;
u16 peek; /* peek into next tx request */
- BUG_ON(netbk->mmap_pages[pending_idx] == (void *)(~0UL));
+ BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
/* Already complete? */
- if (netbk->mmap_pages[pending_idx] == NULL)
+ if (vif->mmap_pages[pending_idx] == NULL)
return;
- pending_tx_info = &netbk->pending_tx_info[pending_idx];
+ pending_tx_info = &vif->pending_tx_info[pending_idx];
- vif = pending_tx_info->vif;
head = pending_tx_info->head;
- BUG_ON(!pending_tx_is_head(netbk, head));
- BUG_ON(netbk->pending_ring[pending_index(head)] != pending_idx);
+ BUG_ON(!pending_tx_is_head(vif, head));
+ BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
do {
pending_ring_idx_t index;
pending_ring_idx_t idx = pending_index(head);
- u16 info_idx = netbk->pending_ring[idx];
+ u16 info_idx = vif->pending_ring[idx];
- pending_tx_info = &netbk->pending_tx_info[info_idx];
+ pending_tx_info = &vif->pending_tx_info[info_idx];
make_tx_response(vif, &pending_tx_info->req, status);
/* Setting any number other than
@@ -1737,18 +1483,15 @@ static void xen_netbk_idx_release(struct xen_netbk *netbk, u16 pending_idx,
*/
pending_tx_info->head = 0;
- index = pending_index(netbk->pending_prod++);
- netbk->pending_ring[index] = netbk->pending_ring[info_idx];
-
- xenvif_put(vif);
+ index = pending_index(vif->pending_prod++);
+ vif->pending_ring[index] = vif->pending_ring[info_idx];
- peek = netbk->pending_ring[pending_index(++head)];
+ peek = vif->pending_ring[pending_index(++head)];
- } while (!pending_tx_is_head(netbk, peek));
+ } while (!pending_tx_is_head(vif, peek));
- netbk->mmap_pages[pending_idx]->mapping = 0;
- put_page(netbk->mmap_pages[pending_idx]);
- netbk->mmap_pages[pending_idx] = NULL;
+ put_page(vif->mmap_pages[pending_idx]);
+ vif->mmap_pages[pending_idx] = NULL;
}
@@ -1796,46 +1539,23 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
return resp;
}
-static inline int rx_work_todo(struct xen_netbk *netbk)
+static inline int rx_work_todo(struct xenvif *vif)
{
- return !skb_queue_empty(&netbk->rx_queue);
+ return !skb_queue_empty(&vif->rx_queue);
}
-static inline int tx_work_todo(struct xen_netbk *netbk)
+static inline int tx_work_todo(struct xenvif *vif)
{
- if ((nr_pending_reqs(netbk) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS) &&
- !list_empty(&netbk->net_schedule_list))
+ if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
+ (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
+ < MAX_PENDING_REQS))
return 1;
return 0;
}
-static int xen_netbk_kthread(void *data)
-{
- struct xen_netbk *netbk = data;
- while (!kthread_should_stop()) {
- wait_event_interruptible(netbk->wq,
- rx_work_todo(netbk) ||
- tx_work_todo(netbk) ||
- kthread_should_stop());
- cond_resched();
-
- if (kthread_should_stop())
- break;
-
- if (rx_work_todo(netbk))
- xen_netbk_rx_action(netbk);
-
- if (tx_work_todo(netbk))
- xen_netbk_tx_action(netbk);
- }
-
- return 0;
-}
-
-void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
+void xenvif_unmap_frontend_rings(struct xenvif *vif)
{
if (vif->tx.sring)
xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
@@ -1845,9 +1565,9 @@ void xen_netbk_unmap_frontend_rings(struct xenvif *vif)
vif->rx.sring);
}
-int xen_netbk_map_frontend_rings(struct xenvif *vif,
- grant_ref_t tx_ring_ref,
- grant_ref_t rx_ring_ref)
+int xenvif_map_frontend_rings(struct xenvif *vif,
+ grant_ref_t tx_ring_ref,
+ grant_ref_t rx_ring_ref)
{
void *addr;
struct xen_netif_tx_sring *txs;
@@ -1876,15 +1596,33 @@ int xen_netbk_map_frontend_rings(struct xenvif *vif,
return 0;
err:
- xen_netbk_unmap_frontend_rings(vif);
+ xenvif_unmap_frontend_rings(vif);
return err;
}
+int xenvif_kthread(void *data)
+{
+ struct xenvif *vif = data;
+
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(vif->wq,
+ rx_work_todo(vif) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+
+ if (rx_work_todo(vif))
+ xenvif_rx_action(vif);
+
+ cond_resched();
+ }
+
+ return 0;
+}
+
static int __init netback_init(void)
{
- int i;
int rc = 0;
- int group;
if (!xen_domain())
return -ENODEV;
@@ -1895,48 +1633,6 @@ static int __init netback_init(void)
fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
}
- xen_netbk_group_nr = num_online_cpus();
- xen_netbk = vzalloc(sizeof(struct xen_netbk) * xen_netbk_group_nr);
- if (!xen_netbk)
- return -ENOMEM;
-
- for (group = 0; group < xen_netbk_group_nr; group++) {
- struct xen_netbk *netbk = &xen_netbk[group];
- skb_queue_head_init(&netbk->rx_queue);
- skb_queue_head_init(&netbk->tx_queue);
-
- init_timer(&netbk->net_timer);
- netbk->net_timer.data = (unsigned long)netbk;
- netbk->net_timer.function = xen_netbk_alarm;
-
- netbk->pending_cons = 0;
- netbk->pending_prod = MAX_PENDING_REQS;
- for (i = 0; i < MAX_PENDING_REQS; i++)
- netbk->pending_ring[i] = i;
-
- init_waitqueue_head(&netbk->wq);
- netbk->task = kthread_create(xen_netbk_kthread,
- (void *)netbk,
- "netback/%u", group);
-
- if (IS_ERR(netbk->task)) {
- pr_alert("kthread_create() fails at netback\n");
- del_timer(&netbk->net_timer);
- rc = PTR_ERR(netbk->task);
- goto failed_init;
- }
-
- kthread_bind(netbk->task, group);
-
- INIT_LIST_HEAD(&netbk->net_schedule_list);
-
- spin_lock_init(&netbk->net_schedule_list_lock);
-
- atomic_set(&netbk->netfront_count, 0);
-
- wake_up_process(netbk->task);
- }
-
rc = xenvif_xenbus_init();
if (rc)
goto failed_init;
@@ -1944,35 +1640,14 @@ static int __init netback_init(void)
return 0;
failed_init:
- while (--group >= 0) {
- struct xen_netbk *netbk = &xen_netbk[group];
- del_timer(&netbk->net_timer);
- kthread_stop(netbk->task);
- }
- vfree(xen_netbk);
return rc;
-
}
module_init(netback_init);
static void __exit netback_fini(void)
{
- int i, j;
-
xenvif_xenbus_fini();
-
- for (i = 0; i < xen_netbk_group_nr; i++) {
- struct xen_netbk *netbk = &xen_netbk[i];
- del_timer_sync(&netbk->net_timer);
- kthread_stop(netbk->task);
- for (j = 0; j < MAX_PENDING_REQS; j++) {
- if (netbk->mmap_pages[j])
- __free_page(netbk->mmap_pages[j]);
- }
- }
-
- vfree(xen_netbk);
}
module_exit(netback_fini);
diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
index c5c30fb1d7b..9a53f13c88d 100644
--- a/drivers/nfc/nfcsim.c
+++ b/drivers/nfc/nfcsim.c
@@ -60,7 +60,7 @@ struct nfcsim {
static struct nfcsim *dev0;
static struct nfcsim *dev1;
-struct workqueue_struct *wq;
+static struct workqueue_struct *wq;
static void nfcsim_cleanup_dev(struct nfcsim *dev, u8 shutdown)
{
@@ -481,7 +481,7 @@ static void nfcsim_free_device(struct nfcsim *dev)
kfree(dev);
}
-int __init nfcsim_init(void)
+static int __init nfcsim_init(void)
{
int rc;
@@ -522,7 +522,7 @@ exit:
return rc;
}
-void __exit nfcsim_exit(void)
+static void __exit nfcsim_exit(void)
{
nfcsim_cleanup_dev(dev0, 1);
nfcsim_cleanup_dev(dev1, 1);
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index daf92ac209f..5df730be88a 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -83,12 +83,20 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
/* How much time we spend listening for initiators */
#define PN533_LISTEN_TIME 2
+/* Delay between each poll frame (ms) */
+#define PN533_POLL_INTERVAL 10
-/* Standard pn533 frame definitions */
+/* Standard pn533 frame definitions (standard and extended)*/
#define PN533_STD_FRAME_HEADER_LEN (sizeof(struct pn533_std_frame) \
+ 2) /* data[0] TFI, data[1] CC */
#define PN533_STD_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/
+#define PN533_EXT_FRAME_HEADER_LEN (sizeof(struct pn533_ext_frame) \
+ + 2) /* data[0] TFI, data[1] CC */
+
+#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
+#define PN533_CMD_DATAFRAME_MAXLEN 240 /* max data length (send) */
+
/*
* Max extended frame payload len, excluding TFI and CC
* which are already in PN533_FRAME_HEADER_LEN.
@@ -99,6 +107,10 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
Postamble (1) */
#define PN533_STD_FRAME_CHECKSUM(f) (f->data[f->datalen])
#define PN533_STD_FRAME_POSTAMBLE(f) (f->data[f->datalen + 1])
+/* Half start code (3), LEN (4) should be 0xffff for extended frame */
+#define PN533_STD_IS_EXTENDED(hdr) ((hdr)->datalen == 0xFF \
+ && (hdr)->datalen_checksum == 0xFF)
+#define PN533_EXT_FRAME_CHECKSUM(f) (f->data[be16_to_cpu(f->datalen)])
/* start of frame */
#define PN533_STD_FRAME_SOF 0x00FF
@@ -124,7 +136,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83
/* PN533 Commands */
-#define PN533_STD_FRAME_CMD(f) (f->data[1])
+#define PN533_FRAME_CMD(f) (f->data[1])
#define PN533_CMD_GET_FIRMWARE_VERSION 0x02
#define PN533_CMD_RF_CONFIGURATION 0x32
@@ -168,8 +180,9 @@ struct pn533_fw_version {
#define PN533_CFGITEM_MAX_RETRIES 0x05
#define PN533_CFGITEM_PASORI 0x82
-#define PN533_CFGITEM_RF_FIELD_ON 0x1
-#define PN533_CFGITEM_RF_FIELD_OFF 0x0
+#define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2
+#define PN533_CFGITEM_RF_FIELD_ON 0x1
+#define PN533_CFGITEM_RF_FIELD_OFF 0x0
#define PN533_CONFIG_TIMING_102 0xb
#define PN533_CONFIG_TIMING_204 0xc
@@ -257,7 +270,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
.initiator_data.felica = {
.opcode = PN533_FELICA_OPC_SENSF_REQ,
.sc = PN533_FELICA_SENSF_SC_ALL,
- .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
.tsn = 0x03,
},
},
@@ -270,7 +283,7 @@ static const struct pn533_poll_modulations poll_mod[] = {
.initiator_data.felica = {
.opcode = PN533_FELICA_OPC_SENSF_REQ,
.sc = PN533_FELICA_SENSF_SC_ALL,
- .rc = PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE,
+ .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE,
.tsn = 0x03,
},
},
@@ -352,13 +365,16 @@ struct pn533 {
struct urb *in_urb;
struct sk_buff_head resp_q;
+ struct sk_buff_head fragment_skb;
struct workqueue_struct *wq;
struct work_struct cmd_work;
struct work_struct cmd_complete_work;
- struct work_struct poll_work;
- struct work_struct mi_work;
+ struct delayed_work poll_work;
+ struct work_struct mi_rx_work;
+ struct work_struct mi_tx_work;
struct work_struct tg_work;
+ struct work_struct rf_work;
struct list_head cmd_queue;
struct pn533_cmd *cmd;
@@ -366,6 +382,7 @@ struct pn533 {
struct mutex cmd_lock; /* protects cmd queue */
void *cmd_complete_mi_arg;
+ void *cmd_complete_dep_arg;
struct pn533_poll_modulations *poll_mod_active[PN533_POLL_MOD_MAX + 1];
u8 poll_mod_count;
@@ -404,6 +421,15 @@ struct pn533_std_frame {
u8 data[];
} __packed;
+struct pn533_ext_frame { /* Extended Information frame */
+ u8 preamble;
+ __be16 start_frame;
+ __be16 eif_flag; /* fixed to 0xFFFF */
+ __be16 datalen;
+ u8 datalen_checksum;
+ u8 data[];
+} __packed;
+
struct pn533_frame_ops {
void (*tx_frame_init)(void *frame, u8 cmd_code);
void (*tx_frame_finish)(void *frame);
@@ -411,7 +437,7 @@ struct pn533_frame_ops {
int tx_header_len;
int tx_tail_len;
- bool (*rx_is_frame_valid)(void *frame);
+ bool (*rx_is_frame_valid)(void *frame, struct pn533 *dev);
int (*rx_frame_size)(void *frame);
int rx_header_len;
int rx_tail_len;
@@ -486,7 +512,7 @@ static void pn533_acr122_tx_update_payload_len(void *_frame, int len)
frame->datalen += len;
}
-static bool pn533_acr122_is_rx_frame_valid(void *_frame)
+static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev)
{
struct pn533_acr122_rx_frame *frame = _frame;
@@ -511,7 +537,7 @@ static u8 pn533_acr122_get_cmd_code(void *frame)
{
struct pn533_acr122_rx_frame *f = frame;
- return PN533_STD_FRAME_CMD(f);
+ return PN533_FRAME_CMD(f);
}
static struct pn533_frame_ops pn533_acr122_frame_ops = {
@@ -530,6 +556,12 @@ static struct pn533_frame_ops pn533_acr122_frame_ops = {
.get_cmd_code = pn533_acr122_get_cmd_code,
};
+/* The rule: value(high byte) + value(low byte) + checksum = 0 */
+static inline u8 pn533_ext_checksum(u16 value)
+{
+ return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1;
+}
+
/* The rule: value + checksum = 0 */
static inline u8 pn533_std_checksum(u8 value)
{
@@ -555,7 +587,7 @@ static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code)
frame->preamble = 0;
frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF);
PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT;
- PN533_STD_FRAME_CMD(frame) = cmd_code;
+ PN533_FRAME_CMD(frame) = cmd_code;
frame->datalen = 2;
}
@@ -578,21 +610,41 @@ static void pn533_std_tx_update_payload_len(void *_frame, int len)
frame->datalen += len;
}
-static bool pn533_std_rx_frame_is_valid(void *_frame)
+static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev)
{
u8 checksum;
- struct pn533_std_frame *frame = _frame;
+ struct pn533_std_frame *stdf = _frame;
- if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
+ if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF))
return false;
- checksum = pn533_std_checksum(frame->datalen);
- if (checksum != frame->datalen_checksum)
- return false;
+ if (likely(!PN533_STD_IS_EXTENDED(stdf))) {
+ /* Standard frame code */
+ dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN;
- checksum = pn533_std_data_checksum(frame->data, frame->datalen);
- if (checksum != PN533_STD_FRAME_CHECKSUM(frame))
- return false;
+ checksum = pn533_std_checksum(stdf->datalen);
+ if (checksum != stdf->datalen_checksum)
+ return false;
+
+ checksum = pn533_std_data_checksum(stdf->data, stdf->datalen);
+ if (checksum != PN533_STD_FRAME_CHECKSUM(stdf))
+ return false;
+ } else {
+ /* Extended */
+ struct pn533_ext_frame *eif = _frame;
+
+ dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN;
+
+ checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen));
+ if (checksum != eif->datalen_checksum)
+ return false;
+
+ /* check data checksum */
+ checksum = pn533_std_data_checksum(eif->data,
+ be16_to_cpu(eif->datalen));
+ if (checksum != PN533_EXT_FRAME_CHECKSUM(eif))
+ return false;
+ }
return true;
}
@@ -612,6 +664,14 @@ static inline int pn533_std_rx_frame_size(void *frame)
{
struct pn533_std_frame *f = frame;
+ /* check for Extended Information frame */
+ if (PN533_STD_IS_EXTENDED(f)) {
+ struct pn533_ext_frame *eif = frame;
+
+ return sizeof(struct pn533_ext_frame)
+ + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN;
+ }
+
return sizeof(struct pn533_std_frame) + f->datalen +
PN533_STD_FRAME_TAIL_LEN;
}
@@ -619,8 +679,12 @@ static inline int pn533_std_rx_frame_size(void *frame)
static u8 pn533_std_get_cmd_code(void *frame)
{
struct pn533_std_frame *f = frame;
+ struct pn533_ext_frame *eif = frame;
- return PN533_STD_FRAME_CMD(f);
+ if (PN533_STD_IS_EXTENDED(f))
+ return PN533_FRAME_CMD(eif);
+ else
+ return PN533_FRAME_CMD(f);
}
static struct pn533_frame_ops pn533_std_frame_ops = {
@@ -675,7 +739,7 @@ static void pn533_recv_response(struct urb *urb)
print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame,
dev->ops->rx_frame_size(in_frame), false);
- if (!dev->ops->rx_is_frame_valid(in_frame)) {
+ if (!dev->ops->rx_is_frame_valid(in_frame, dev)) {
nfc_dev_err(&dev->interface->dev, "Received an invalid frame");
cmd->status = -EIO;
goto sched_wq;
@@ -1657,7 +1721,56 @@ static void pn533_listen_mode_timer(unsigned long data)
pn533_poll_next_mod(dev);
- queue_work(dev->wq, &dev->poll_work);
+ queue_delayed_work(dev->wq, &dev->poll_work,
+ msecs_to_jiffies(PN533_POLL_INTERVAL));
+}
+
+static int pn533_rf_complete(struct pn533 *dev, void *arg,
+ struct sk_buff *resp)
+{
+ int rc = 0;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (IS_ERR(resp)) {
+ rc = PTR_ERR(resp);
+
+ nfc_dev_err(&dev->interface->dev, "%s RF setting error %d",
+ __func__, rc);
+
+ return rc;
+ }
+
+ queue_delayed_work(dev->wq, &dev->poll_work,
+ msecs_to_jiffies(PN533_POLL_INTERVAL));
+
+ dev_kfree_skb(resp);
+ return rc;
+}
+
+static void pn533_wq_rf(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, rf_work);
+ struct sk_buff *skb;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ skb = pn533_alloc_skb(dev, 2);
+ if (!skb)
+ return;
+
+ *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD;
+ *skb_put(skb, 1) = PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
+ rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb,
+ pn533_rf_complete, NULL);
+ if (rc < 0) {
+ dev_kfree_skb(skb);
+ nfc_dev_err(&dev->interface->dev, "RF setting error %d", rc);
+ }
+
+ return;
}
static int pn533_poll_complete(struct pn533 *dev, void *arg,
@@ -1705,7 +1818,8 @@ static int pn533_poll_complete(struct pn533 *dev, void *arg,
}
pn533_poll_next_mod(dev);
- queue_work(dev->wq, &dev->poll_work);
+ /* Not target found, turn radio off */
+ queue_work(dev->wq, &dev->rf_work);
done:
dev_kfree_skb(resp);
@@ -1770,7 +1884,7 @@ static int pn533_send_poll_frame(struct pn533 *dev)
static void pn533_wq_poll(struct work_struct *work)
{
- struct pn533 *dev = container_of(work, struct pn533, poll_work);
+ struct pn533 *dev = container_of(work, struct pn533, poll_work.work);
struct pn533_poll_modulations *cur_mod;
int rc;
@@ -1799,6 +1913,7 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
u32 im_protocols, u32 tm_protocols)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ u8 rand_mod;
nfc_dev_dbg(&dev->interface->dev,
"%s: im protocols 0x%x tm protocols 0x%x",
@@ -1822,11 +1937,15 @@ static int pn533_start_poll(struct nfc_dev *nfc_dev,
tm_protocols = 0;
}
- dev->poll_mod_curr = 0;
pn533_poll_create_mod_list(dev, im_protocols, tm_protocols);
dev->poll_protocols = im_protocols;
dev->listen_protocols = tm_protocols;
+ /* Do not always start polling from the same modulation */
+ get_random_bytes(&rand_mod, sizeof(rand_mod));
+ rand_mod %= dev->poll_mod_count;
+ dev->poll_mod_curr = rand_mod;
+
return pn533_send_poll_frame(dev);
}
@@ -1845,6 +1964,7 @@ static void pn533_stop_poll(struct nfc_dev *nfc_dev)
}
pn533_abort_cmd(dev, GFP_KERNEL);
+ flush_delayed_work(&dev->poll_work);
pn533_poll_reset_mod_list(dev);
}
@@ -2037,28 +2157,15 @@ error:
return rc;
}
-static int pn533_mod_to_baud(struct pn533 *dev)
-{
- switch (dev->poll_mod_curr) {
- case PN533_POLL_MOD_106KBPS_A:
- return 0;
- case PN533_POLL_MOD_212KBPS_FELICA:
- return 1;
- case PN533_POLL_MOD_424KBPS_FELICA:
- return 2;
- default:
- return -EINVAL;
- }
-}
-
+static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf);
#define PASSIVE_DATA_LEN 5
static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
u8 comm_mode, u8 *gb, size_t gb_len)
{
struct pn533 *dev = nfc_get_drvdata(nfc_dev);
struct sk_buff *skb;
- int rc, baud, skb_len;
- u8 *next, *arg;
+ int rc, skb_len;
+ u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE];
u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3};
@@ -2076,41 +2183,39 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
return -EBUSY;
}
- baud = pn533_mod_to_baud(dev);
- if (baud < 0) {
- nfc_dev_err(&dev->interface->dev,
- "Invalid curr modulation %d", dev->poll_mod_curr);
- return baud;
- }
-
skb_len = 3 + gb_len; /* ActPass + BR + Next */
- if (comm_mode == NFC_COMM_PASSIVE)
- skb_len += PASSIVE_DATA_LEN;
+ skb_len += PASSIVE_DATA_LEN;
- if (target && target->nfcid2_len)
- skb_len += NFC_NFCID3_MAXSIZE;
+ /* NFCID3 */
+ skb_len += NFC_NFCID3_MAXSIZE;
+ if (target && !target->nfcid2_len) {
+ nfcid3[0] = 0x1;
+ nfcid3[1] = 0xfe;
+ get_random_bytes(nfcid3 + 2, 6);
+ }
skb = pn533_alloc_skb(dev, skb_len);
if (!skb)
return -ENOMEM;
*skb_put(skb, 1) = !comm_mode; /* ActPass */
- *skb_put(skb, 1) = baud; /* Baud rate */
+ *skb_put(skb, 1) = 0x02; /* 424 kbps */
next = skb_put(skb, 1); /* Next */
*next = 0;
- if (comm_mode == NFC_COMM_PASSIVE && baud > 0) {
- memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data,
- PASSIVE_DATA_LEN);
- *next |= 1;
- }
+ /* Copy passive data */
+ memcpy(skb_put(skb, PASSIVE_DATA_LEN), passive_data, PASSIVE_DATA_LEN);
+ *next |= 1;
- if (target && target->nfcid2_len) {
+ /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */
+ if (target && target->nfcid2_len)
memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2,
target->nfcid2_len);
- *next |= 2;
- }
+ else
+ memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), nfcid3,
+ NFC_NFCID3_MAXSIZE);
+ *next |= 2;
if (gb != NULL && gb_len > 0) {
memcpy(skb_put(skb, gb_len), gb, gb_len);
@@ -2127,6 +2232,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
*arg = !comm_mode;
+ pn533_rf_field(dev->nfc_dev, 0);
+
rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb,
pn533_in_dep_link_up_complete, arg);
@@ -2232,7 +2339,15 @@ static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg,
if (mi) {
dev->cmd_complete_mi_arg = arg;
- queue_work(dev->wq, &dev->mi_work);
+ queue_work(dev->wq, &dev->mi_rx_work);
+ return -EINPROGRESS;
+ }
+
+ /* Prepare for the next round */
+ if (skb_queue_len(&dev->fragment_skb) > 0) {
+ dev->cmd_complete_dep_arg = arg;
+ queue_work(dev->wq, &dev->mi_tx_work);
+
return -EINPROGRESS;
}
@@ -2253,6 +2368,50 @@ _error:
return rc;
}
+/* Split the Tx skb into small chunks */
+static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb)
+{
+ struct sk_buff *frag;
+ int frag_size;
+
+ do {
+ /* Remaining size */
+ if (skb->len > PN533_CMD_DATAFRAME_MAXLEN)
+ frag_size = PN533_CMD_DATAFRAME_MAXLEN;
+ else
+ frag_size = skb->len;
+
+ /* Allocate and reserve */
+ frag = pn533_alloc_skb(dev, frag_size);
+ if (!frag) {
+ skb_queue_purge(&dev->fragment_skb);
+ break;
+ }
+
+ /* Reserve the TG/MI byte */
+ skb_reserve(frag, 1);
+
+ /* MI + TG */
+ if (frag_size == PN533_CMD_DATAFRAME_MAXLEN)
+ *skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1);
+ else
+ *skb_push(frag, sizeof(u8)) = 1; /* TG */
+
+ memcpy(skb_put(frag, frag_size), skb->data, frag_size);
+
+ /* Reduce the size of incoming buffer */
+ skb_pull(skb, frag_size);
+
+ /* Add this to skb_queue */
+ skb_queue_tail(&dev->fragment_skb, frag);
+
+ } while (skb->len > 0);
+
+ dev_kfree_skb(skb);
+
+ return skb_queue_len(&dev->fragment_skb);
+}
+
static int pn533_transceive(struct nfc_dev *nfc_dev,
struct nfc_target *target, struct sk_buff *skb,
data_exchange_cb_t cb, void *cb_context)
@@ -2263,15 +2422,6 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
- if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
- /* TODO: Implement support to multi-part data exchange */
- nfc_dev_err(&dev->interface->dev,
- "Data length greater than the max allowed: %d",
- PN533_CMD_DATAEXCH_DATA_MAXLEN);
- rc = -ENOSYS;
- goto error;
- }
-
if (!dev->tgt_active_prot) {
nfc_dev_err(&dev->interface->dev,
"Can't exchange data if there is no active target");
@@ -2299,7 +2449,20 @@ static int pn533_transceive(struct nfc_dev *nfc_dev,
break;
}
default:
- *skb_push(skb, sizeof(u8)) = 1; /*TG*/
+ /* jumbo frame ? */
+ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) {
+ rc = pn533_fill_fragment_skbs(dev, skb);
+ if (rc <= 0)
+ goto error;
+
+ skb = skb_dequeue(&dev->fragment_skb);
+ if (!skb) {
+ rc = -EIO;
+ goto error;
+ }
+ } else {
+ *skb_push(skb, sizeof(u8)) = 1; /* TG */
+ }
rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE,
skb, pn533_data_exchange_complete,
@@ -2370,7 +2533,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
static void pn533_wq_mi_recv(struct work_struct *work)
{
- struct pn533 *dev = container_of(work, struct pn533, mi_work);
+ struct pn533 *dev = container_of(work, struct pn533, mi_rx_work);
struct sk_buff *skb;
int rc;
@@ -2418,6 +2581,61 @@ error:
queue_work(dev->wq, &dev->cmd_work);
}
+static void pn533_wq_mi_send(struct work_struct *work)
+{
+ struct pn533 *dev = container_of(work, struct pn533, mi_tx_work);
+ struct sk_buff *skb;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ /* Grab the first skb in the queue */
+ skb = skb_dequeue(&dev->fragment_skb);
+
+ if (skb == NULL) { /* No more data */
+ /* Reset the queue for future use */
+ skb_queue_head_init(&dev->fragment_skb);
+ goto error;
+ }
+
+ switch (dev->device_type) {
+ case PN533_DEVICE_PASORI:
+ if (dev->tgt_active_prot != NFC_PROTO_FELICA) {
+ rc = -EIO;
+ break;
+ }
+
+ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_dep_arg);
+
+ break;
+
+ default:
+ /* Still some fragments? */
+ rc = pn533_send_cmd_direct_async(dev,PN533_CMD_IN_DATA_EXCHANGE,
+ skb,
+ pn533_data_exchange_complete,
+ dev->cmd_complete_dep_arg);
+
+ break;
+ }
+
+ if (rc == 0) /* success */
+ return;
+
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when trying to perform data_exchange", rc);
+
+ dev_kfree_skb(skb);
+ kfree(dev->cmd_complete_dep_arg);
+
+error:
+ pn533_send_ack(dev, GFP_KERNEL);
+ queue_work(dev->wq, &dev->cmd_work);
+}
+
static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
u8 cfgdata_len)
{
@@ -2562,6 +2780,8 @@ static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf)
u8 rf_field = !!rf;
int rc;
+ rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA;
+
rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD,
(u8 *)&rf_field, 1);
if (rc) {
@@ -2605,17 +2825,6 @@ static int pn533_setup(struct pn533 *dev)
switch (dev->device_type) {
case PN533_DEVICE_STD:
- max_retries.mx_rty_atr = PN533_CONFIG_MAX_RETRIES_ENDLESS;
- max_retries.mx_rty_psl = 2;
- max_retries.mx_rty_passive_act =
- PN533_CONFIG_MAX_RETRIES_NO_RETRY;
-
- timing.rfu = PN533_CONFIG_TIMING_102;
- timing.atr_res_timeout = PN533_CONFIG_TIMING_204;
- timing.dep_timeout = PN533_CONFIG_TIMING_409;
-
- break;
-
case PN533_DEVICE_PASORI:
case PN533_DEVICE_ACR122U:
max_retries.mx_rty_atr = 0x2;
@@ -2729,9 +2938,11 @@ static int pn533_probe(struct usb_interface *interface,
INIT_WORK(&dev->cmd_work, pn533_wq_cmd);
INIT_WORK(&dev->cmd_complete_work, pn533_wq_cmd_complete);
- INIT_WORK(&dev->mi_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->mi_rx_work, pn533_wq_mi_recv);
+ INIT_WORK(&dev->mi_tx_work, pn533_wq_mi_send);
INIT_WORK(&dev->tg_work, pn533_wq_tg_get_data);
- INIT_WORK(&dev->poll_work, pn533_wq_poll);
+ INIT_DELAYED_WORK(&dev->poll_work, pn533_wq_poll);
+ INIT_WORK(&dev->rf_work, pn533_wq_rf);
dev->wq = alloc_ordered_workqueue("pn533", 0);
if (dev->wq == NULL)
goto error;
@@ -2741,6 +2952,7 @@ static int pn533_probe(struct usb_interface *interface,
dev->listen_timer.function = pn533_listen_mode_timer;
skb_queue_head_init(&dev->resp_q);
+ skb_queue_head_init(&dev->fragment_skb);
INIT_LIST_HEAD(&dev->cmd_queue);
@@ -2842,6 +3054,7 @@ static void pn533_disconnect(struct usb_interface *interface)
usb_kill_urb(dev->in_urb);
usb_kill_urb(dev->out_urb);
+ flush_delayed_work(&dev->poll_work);
destroy_workqueue(dev->wq);
skb_queue_purge(&dev->resp_q);
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 8cf64c19f02..01e27d4bdd0 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -25,11 +25,14 @@
#include <linux/miscdevice.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-
+#include <linux/nfc.h>
+#include <linux/firmware.h>
+#include <linux/unaligned/access_ok.h>
#include <linux/platform_data/pn544.h>
#include <net/nfc/hci.h>
#include <net/nfc/llc.h>
+#include <net/nfc/nfc.h>
#include "pn544.h"
@@ -55,6 +58,58 @@ MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table);
#define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c"
+#define PN544_FW_CMD_WRITE 0x08
+#define PN544_FW_CMD_CHECK 0x06
+
+struct pn544_i2c_fw_frame_write {
+ u8 cmd;
+ u16 be_length;
+ u8 be_dest_addr[3];
+ u16 be_datalen;
+ u8 data[];
+} __packed;
+
+struct pn544_i2c_fw_frame_check {
+ u8 cmd;
+ u16 be_length;
+ u8 be_start_addr[3];
+ u16 be_datalen;
+ u16 be_crc;
+} __packed;
+
+struct pn544_i2c_fw_frame_response {
+ u8 status;
+ u16 be_length;
+} __packed;
+
+struct pn544_i2c_fw_blob {
+ u32 be_size;
+ u32 be_destaddr;
+ u8 data[];
+};
+
+#define PN544_FW_CMD_RESULT_TIMEOUT 0x01
+#define PN544_FW_CMD_RESULT_BAD_CRC 0x02
+#define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08
+#define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B
+#define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11
+#define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18
+#define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74
+
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+
+#define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7
+#define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE
+#define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8
+#define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\
+ PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\
+ PN544_FW_WRITE_BUFFER_MAX_LEN)
+
+#define FW_WORK_STATE_IDLE 1
+#define FW_WORK_STATE_START 2
+#define FW_WORK_STATE_WAIT_WRITE_ANSWER 3
+#define FW_WORK_STATE_WAIT_CHECK_ANSWER 4
+
struct pn544_i2c_phy {
struct i2c_client *i2c_dev;
struct nfc_hci_dev *hdev;
@@ -64,7 +119,18 @@ struct pn544_i2c_phy {
unsigned int gpio_fw;
unsigned int en_polarity;
+ struct work_struct fw_work;
+ int fw_work_state;
+ char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1];
+ const struct firmware *fw;
+ u32 fw_blob_dest_addr;
+ size_t fw_blob_size;
+ const u8 *fw_blob_data;
+ size_t fw_written;
+ int fw_cmd_result;
+
int powered;
+ int run_mode;
int hard_fault; /*
* < 0 if hardware error occured (e.g. i2c err)
@@ -122,15 +188,22 @@ out:
gpio_set_value(phy->gpio_en, !phy->en_polarity);
}
+static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
+{
+ gpio_set_value(phy->gpio_fw, run_mode == PN544_FW_MODE ? 1 : 0);
+ gpio_set_value(phy->gpio_en, phy->en_polarity);
+ usleep_range(10000, 15000);
+
+ phy->run_mode = run_mode;
+}
+
static int pn544_hci_i2c_enable(void *phy_id)
{
struct pn544_i2c_phy *phy = phy_id;
pr_info(DRIVER_DESC ": %s\n", __func__);
- gpio_set_value(phy->gpio_fw, 0);
- gpio_set_value(phy->gpio_en, phy->en_polarity);
- usleep_range(10000, 15000);
+ pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE);
phy->powered = 1;
@@ -305,6 +378,42 @@ flush:
return r;
}
+static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy)
+{
+ int r;
+ struct pn544_i2c_fw_frame_response response;
+ struct i2c_client *client = phy->i2c_dev;
+
+ r = i2c_master_recv(client, (char *) &response, sizeof(response));
+ if (r != sizeof(response)) {
+ dev_err(&client->dev, "cannot read fw status\n");
+ return -EIO;
+ }
+
+ usleep_range(3000, 6000);
+
+ switch (response.status) {
+ case 0:
+ return 0;
+ case PN544_FW_CMD_RESULT_TIMEOUT:
+ return -ETIMEDOUT;
+ case PN544_FW_CMD_RESULT_BAD_CRC:
+ return -ENODATA;
+ case PN544_FW_CMD_RESULT_ACCESS_DENIED:
+ return -EACCES;
+ case PN544_FW_CMD_RESULT_PROTOCOL_ERROR:
+ return -EPROTO;
+ case PN544_FW_CMD_RESULT_INVALID_PARAMETER:
+ return -EINVAL;
+ case PN544_FW_CMD_RESULT_INVALID_LENGTH:
+ return -EBADMSG;
+ case PN544_FW_CMD_RESULT_WRITE_FAILED:
+ return -EIO;
+ default:
+ return -EIO;
+ }
+}
+
/*
* Reads an shdlc frame from the chip. This is not as straightforward as it
* seems. There are cases where we could loose the frame start synchronization.
@@ -339,19 +448,23 @@ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id)
if (phy->hard_fault != 0)
return IRQ_HANDLED;
- r = pn544_hci_i2c_read(phy, &skb);
- if (r == -EREMOTEIO) {
- phy->hard_fault = r;
+ if (phy->run_mode == PN544_FW_MODE) {
+ phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy);
+ schedule_work(&phy->fw_work);
+ } else {
+ r = pn544_hci_i2c_read(phy, &skb);
+ if (r == -EREMOTEIO) {
+ phy->hard_fault = r;
- nfc_hci_recv_frame(phy->hdev, NULL);
+ nfc_hci_recv_frame(phy->hdev, NULL);
- return IRQ_HANDLED;
- } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
- return IRQ_HANDLED;
- }
-
- nfc_hci_recv_frame(phy->hdev, skb);
+ return IRQ_HANDLED;
+ } else if ((r == -ENOMEM) || (r == -EBADMSG)) {
+ return IRQ_HANDLED;
+ }
+ nfc_hci_recv_frame(phy->hdev, skb);
+ }
return IRQ_HANDLED;
}
@@ -361,6 +474,215 @@ static struct nfc_phy_ops i2c_phy_ops = {
.disable = pn544_hci_i2c_disable,
};
+static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name)
+{
+ struct pn544_i2c_phy *phy = phy_id;
+
+ pr_info(DRIVER_DESC ": Starting Firmware Download (%s)\n",
+ firmware_name);
+
+ strcpy(phy->firmware_name, firmware_name);
+
+ phy->fw_work_state = FW_WORK_STATE_START;
+
+ schedule_work(&phy->fw_work);
+
+ return 0;
+}
+
+static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy,
+ int result)
+{
+ pr_info(DRIVER_DESC ": Firmware Download Complete, result=%d\n", result);
+
+ pn544_hci_i2c_disable(phy);
+
+ phy->fw_work_state = FW_WORK_STATE_IDLE;
+
+ if (phy->fw) {
+ release_firmware(phy->fw);
+ phy->fw = NULL;
+ }
+
+ nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result);
+}
+
+static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr,
+ const u8 *data, u16 datalen)
+{
+ u8 frame[PN544_FW_I2C_MAX_PAYLOAD];
+ struct pn544_i2c_fw_frame_write *framep;
+ u16 params_len;
+ int framelen;
+ int r;
+
+ if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN)
+ datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN;
+
+ framep = (struct pn544_i2c_fw_frame_write *) frame;
+
+ params_len = sizeof(framep->be_dest_addr) +
+ sizeof(framep->be_datalen) + datalen;
+ framelen = params_len + sizeof(framep->cmd) +
+ sizeof(framep->be_length);
+
+ framep->cmd = PN544_FW_CMD_WRITE;
+
+ put_unaligned_be16(params_len, &framep->be_length);
+
+ framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16;
+ framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8;
+ framep->be_dest_addr[2] = dest_addr & 0xff;
+
+ put_unaligned_be16(datalen, &framep->be_datalen);
+
+ memcpy(framep->data, data, datalen);
+
+ r = i2c_master_send(client, frame, framelen);
+
+ if (r == framelen)
+ return datalen;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+}
+
+static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr,
+ const u8 *data, u16 datalen)
+{
+ struct pn544_i2c_fw_frame_check frame;
+ int r;
+ u16 crc;
+
+ /* calculate local crc for the data we want to check */
+ crc = crc_ccitt(0xffff, data, datalen);
+
+ frame.cmd = PN544_FW_CMD_CHECK;
+
+ put_unaligned_be16(sizeof(frame.be_start_addr) +
+ sizeof(frame.be_datalen) + sizeof(frame.be_crc),
+ &frame.be_length);
+
+ /* tell the chip the memory region to which our crc applies */
+ frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16;
+ frame.be_start_addr[1] = (start_addr & 0xff00) >> 8;
+ frame.be_start_addr[2] = start_addr & 0xff;
+
+ put_unaligned_be16(datalen, &frame.be_datalen);
+
+ /*
+ * and give our local crc. Chip will calculate its own crc for the
+ * region and compare with ours.
+ */
+ put_unaligned_be16(crc, &frame.be_crc);
+
+ r = i2c_master_send(client, (const char *) &frame, sizeof(frame));
+
+ if (r == sizeof(frame))
+ return 0;
+ else if (r < 0)
+ return r;
+ else
+ return -EIO;
+}
+
+static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy)
+{
+ int r;
+
+ r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev,
+ phy->fw_blob_dest_addr + phy->fw_written,
+ phy->fw_blob_data + phy->fw_written,
+ phy->fw_blob_size - phy->fw_written);
+ if (r < 0)
+ return r;
+
+ phy->fw_written += r;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER;
+
+ return 0;
+}
+
+static void pn544_hci_i2c_fw_work(struct work_struct *work)
+{
+ struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy,
+ fw_work);
+ int r;
+ struct pn544_i2c_fw_blob *blob;
+
+ switch (phy->fw_work_state) {
+ case FW_WORK_STATE_START:
+ pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE);
+
+ r = request_firmware(&phy->fw, phy->firmware_name,
+ &phy->i2c_dev->dev);
+ if (r < 0)
+ goto exit_state_start;
+
+ blob = (struct pn544_i2c_fw_blob *) phy->fw->data;
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_start:
+ if (r < 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ case FW_WORK_STATE_WAIT_WRITE_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_write_answer;
+
+ if (phy->fw_written == phy->fw_blob_size) {
+ r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev,
+ phy->fw_blob_dest_addr,
+ phy->fw_blob_data,
+ phy->fw_blob_size);
+ if (r < 0)
+ goto exit_state_wait_write_answer;
+ phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER;
+ break;
+ }
+
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+
+exit_state_wait_write_answer:
+ if (r < 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ case FW_WORK_STATE_WAIT_CHECK_ANSWER:
+ r = phy->fw_cmd_result;
+ if (r < 0)
+ goto exit_state_wait_check_answer;
+
+ blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data +
+ phy->fw_blob_size);
+ phy->fw_blob_size = get_unaligned_be32(&blob->be_size);
+ if (phy->fw_blob_size != 0) {
+ phy->fw_blob_dest_addr =
+ get_unaligned_be32(&blob->be_destaddr);
+ phy->fw_blob_data = blob->data;
+
+ phy->fw_written = 0;
+ r = pn544_hci_i2c_fw_write_chunk(phy);
+ }
+
+exit_state_wait_check_answer:
+ if (r < 0 || phy->fw_blob_size == 0)
+ pn544_hci_i2c_fw_work_complete(phy, r);
+ break;
+
+ default:
+ break;
+ }
+}
+
static int pn544_hci_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -384,6 +706,9 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
return -ENOMEM;
}
+ INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work);
+ phy->fw_work_state = FW_WORK_STATE_IDLE;
+
phy->i2c_dev = client;
i2c_set_clientdata(client, phy);
@@ -420,7 +745,8 @@ static int pn544_hci_i2c_probe(struct i2c_client *client,
r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME,
PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM,
- PN544_HCI_I2C_LLC_MAX_PAYLOAD, &phy->hdev);
+ PN544_HCI_I2C_LLC_MAX_PAYLOAD,
+ pn544_hci_i2c_fw_download, &phy->hdev);
if (r < 0)
goto err_hci;
@@ -443,6 +769,10 @@ static int pn544_hci_i2c_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s\n", __func__);
+ cancel_work_sync(&phy->fw_work);
+ if (phy->fw_work_state != FW_WORK_STATE_IDLE)
+ pn544_hci_i2c_fw_work_complete(phy, -ENODEV);
+
pn544_hci_remove(phy->hdev);
if (phy->powered)
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index b5d3d18179e..ee67de50c36 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -45,7 +45,7 @@ static int pn544_mei_probe(struct mei_cl_device *device,
r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
- &phy->hdev);
+ NULL, &phy->hdev);
if (r < 0) {
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 0d17da7675b..078e62feba1 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -31,9 +31,6 @@
/* Timing restrictions (ms) */
#define PN544_HCI_RESETVEN_TIME 30
-#define HCI_MODE 0
-#define FW_MODE 1
-
enum pn544_state {
PN544_ST_COLD,
PN544_ST_FW_READY,
@@ -130,6 +127,8 @@ struct pn544_hci_info {
int async_cb_type;
data_exchange_cb_t async_cb;
void *async_cb_context;
+
+ fw_download_t fw_download;
};
static int pn544_hci_open(struct nfc_hci_dev *hdev)
@@ -782,6 +781,17 @@ exit:
return r;
}
+static int pn544_hci_fw_download(struct nfc_hci_dev *hdev,
+ const char *firmware_name)
+{
+ struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev);
+
+ if (info->fw_download == NULL)
+ return -ENOTSUPP;
+
+ return info->fw_download(info->phy_id, firmware_name);
+}
+
static struct nfc_hci_ops pn544_hci_ops = {
.open = pn544_hci_open,
.close = pn544_hci_close,
@@ -796,11 +806,12 @@ static struct nfc_hci_ops pn544_hci_ops = {
.tm_send = pn544_hci_tm_send,
.check_presence = pn544_hci_check_presence,
.event_received = pn544_hci_event_received,
+ .fw_download = pn544_hci_fw_download,
};
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev)
+ fw_download_t fw_download, struct nfc_hci_dev **hdev)
{
struct pn544_hci_info *info;
u32 protocols;
@@ -816,6 +827,7 @@ int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
info->phy_ops = phy_ops;
info->phy_id = phy_id;
+ info->fw_download = fw_download;
info->state = PN544_ST_COLD;
mutex_init(&info->info_lock);
diff --git a/drivers/nfc/pn544/pn544.h b/drivers/nfc/pn544/pn544.h
index f47c6454914..01020e58544 100644
--- a/drivers/nfc/pn544/pn544.h
+++ b/drivers/nfc/pn544/pn544.h
@@ -24,9 +24,14 @@
#define DRIVER_DESC "HCI NFC driver for PN544"
+#define PN544_HCI_MODE 0
+#define PN544_FW_MODE 1
+
+typedef int (*fw_download_t)(void *context, const char *firmware_name);
+
int pn544_hci_probe(void *phy_id, struct nfc_phy_ops *phy_ops, char *llc_name,
int phy_headroom, int phy_tailroom, int phy_payload,
- struct nfc_hci_dev **hdev);
+ fw_download_t fw_download, struct nfc_hci_dev **hdev);
void pn544_hci_remove(struct nfc_hci_dev *hdev);
#endif /* __LOCAL_PN544_H_ */
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 80e5c13b930..78cc7605332 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -48,12 +48,6 @@ config OF_IRQ
def_bool y
depends on !SPARC
-config OF_I2C
- def_tristate I2C
- depends on I2C
- help
- OpenFirmware I2C accessors
-
config OF_NET
depends on NETDEVICES
def_bool y
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index 1f9c0c492ef..efd05102c40 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -3,7 +3,6 @@ obj-$(CONFIG_OF_FLATTREE) += fdt.o
obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
-obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 5c5427918eb..e486e416d5a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ctype.h>
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spinlock.h>
@@ -32,6 +33,7 @@ struct device_node *of_allnodes;
EXPORT_SYMBOL(of_allnodes);
struct device_node *of_chosen;
struct device_node *of_aliases;
+static struct device_node *of_stdout;
DEFINE_MUTEX(of_aliases_mutex);
@@ -230,6 +232,100 @@ const void *of_get_property(const struct device_node *np, const char *name,
}
EXPORT_SYMBOL(of_get_property);
+/*
+ * arch_match_cpu_phys_id - Match the given logical CPU and physical id
+ *
+ * @cpu: logical cpu index of a core/thread
+ * @phys_id: physical identifier of a core/thread
+ *
+ * CPU logical to physical index mapping is architecture specific.
+ * However this __weak function provides a default match of physical
+ * id to logical cpu index. phys_id provided here is usually values read
+ * from the device tree which must match the hardware internal registers.
+ *
+ * Returns true if the physical identifier and the logical cpu index
+ * correspond to the same core/thread, false otherwise.
+ */
+bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return (u32)phys_id == cpu;
+}
+
+/**
+ * Checks if the given "prop_name" property holds the physical id of the
+ * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
+ * NULL, local thread number within the core is returned in it.
+ */
+static bool __of_find_n_match_cpu_property(struct device_node *cpun,
+ const char *prop_name, int cpu, unsigned int *thread)
+{
+ const __be32 *cell;
+ int ac, prop_len, tid;
+ u64 hwid;
+
+ ac = of_n_addr_cells(cpun);
+ cell = of_get_property(cpun, prop_name, &prop_len);
+ if (!cell)
+ return false;
+ prop_len /= sizeof(*cell);
+ for (tid = 0; tid < prop_len; tid++) {
+ hwid = of_read_number(cell, ac);
+ if (arch_match_cpu_phys_id(cpu, hwid)) {
+ if (thread)
+ *thread = tid;
+ return true;
+ }
+ cell += ac;
+ }
+ return false;
+}
+
+/**
+ * of_get_cpu_node - Get device node associated with the given logical CPU
+ *
+ * @cpu: CPU number(logical index) for which device node is required
+ * @thread: if not NULL, local thread number within the physical core is
+ * returned
+ *
+ * The main purpose of this function is to retrieve the device node for the
+ * given logical CPU index. It should be used to initialize the of_node in
+ * cpu device. Once of_node in cpu device is populated, all the further
+ * references can use that instead.
+ *
+ * CPU logical to physical index mapping is architecture specific and is built
+ * before booting secondary cores. This function uses arch_match_cpu_phys_id
+ * which can be overridden by architecture specific implementation.
+ *
+ * Returns a node pointer for the logical cpu if found, else NULL.
+ */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+{
+ struct device_node *cpun, *cpus;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (!cpus) {
+ pr_warn("Missing cpus node, bailing out\n");
+ return NULL;
+ }
+
+ for_each_child_of_node(cpus, cpun) {
+ if (of_node_cmp(cpun->type, "cpu"))
+ continue;
+ /* Check for non-standard "ibm,ppc-interrupt-server#s" property
+ * for thread ids on PowerPC. If it doesn't exist fallback to
+ * standard "reg" property.
+ */
+ if (IS_ENABLED(CONFIG_PPC) &&
+ __of_find_n_match_cpu_property(cpun,
+ "ibm,ppc-interrupt-server#s", cpu, thread))
+ return cpun;
+ if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ return cpun;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_cpu_node);
+
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
@@ -1595,6 +1691,15 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
of_chosen = of_find_node_by_path("/chosen");
if (of_chosen == NULL)
of_chosen = of_find_node_by_path("/chosen@0");
+
+ if (of_chosen) {
+ const char *name;
+
+ name = of_get_property(of_chosen, "linux,stdout-path", NULL);
+ if (name)
+ of_stdout = of_find_node_by_path(name);
+ }
+
of_aliases = of_find_node_by_path("/aliases");
if (!of_aliases)
return;
@@ -1703,3 +1808,19 @@ const char *of_prop_next_string(struct property *prop, const char *cur)
return curv;
}
EXPORT_SYMBOL_GPL(of_prop_next_string);
+
+/**
+ * of_device_is_stdout_path - check if a device node matches the
+ * linux,stdout-path property
+ *
+ * Check if this device node matches the linux,stdout-path property
+ * in the chosen node. return true if yes, false otherwise.
+ */
+int of_device_is_stdout_path(struct device_node *dn)
+{
+ if (!of_stdout)
+ return false;
+
+ return of_stdout == dn;
+}
+EXPORT_SYMBOL_GPL(of_device_is_stdout_path);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 6bb7cf2de55..b10ba00cc3e 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -392,6 +392,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob,
mem = (unsigned long)
dt_alloc(size + 4, __alignof__(struct device_node));
+ memset((void *)mem, 0, size);
+
((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef);
pr_debug(" unflattening %lx...\n", mem);
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
deleted file mode 100644
index b667264222c..00000000000
--- a/drivers/of/of_i2c.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * OF helpers for the I2C API
- *
- * Copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
- *
- * Based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_i2c.h>
-#include <linux/of_irq.h>
-#include <linux/module.h>
-
-void of_i2c_register_devices(struct i2c_adapter *adap)
-{
- void *result;
- struct device_node *node;
-
- /* Only register child devices if the adapter has a node pointer set */
- if (!adap->dev.of_node)
- return;
-
- dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
-
- for_each_available_child_of_node(adap->dev.of_node, node) {
- struct i2c_board_info info = {};
- struct dev_archdata dev_ad = {};
- const __be32 *addr;
- int len;
-
- dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
-
- if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
- dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
- node->full_name);
- continue;
- }
-
- addr = of_get_property(node, "reg", &len);
- if (!addr || (len < sizeof(int))) {
- dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
- node->full_name);
- continue;
- }
-
- info.addr = be32_to_cpup(addr);
- if (info.addr > (1 << 10) - 1) {
- dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
- info.addr, node->full_name);
- continue;
- }
-
- info.irq = irq_of_parse_and_map(node, 0);
- info.of_node = of_node_get(node);
- info.archdata = &dev_ad;
-
- if (of_get_property(node, "wakeup-source", NULL))
- info.flags |= I2C_CLIENT_WAKE;
-
- request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
- result = i2c_new_device(adap, &info);
- if (result == NULL) {
- dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
- node->full_name);
- of_node_put(node);
- irq_dispose_mapping(info.irq);
- continue;
- }
- }
-}
-EXPORT_SYMBOL(of_i2c_register_devices);
-
-static int of_dev_node_match(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
- struct device *dev;
-
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
- if (!dev)
- return NULL;
-
- return i2c_verify_client(dev);
-}
-EXPORT_SYMBOL(of_find_i2c_device_by_node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
-{
- struct device *dev;
-
- dev = bus_find_device(&i2c_bus_type, NULL, node,
- of_dev_node_match);
- if (!dev)
- return NULL;
-
- return i2c_verify_adapter(dev);
-}
-EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index d32ef816337..d5412060ab0 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -30,10 +30,9 @@ extern struct oprofile_operations oprofile_ops;
extern unsigned long oprofile_started;
extern unsigned long oprofile_backtrace_depth;
-struct super_block;
struct dentry;
-void oprofile_create_files(struct super_block *sb, struct dentry *root);
+void oprofile_create_files(struct dentry *root);
int oprofile_timer_init(struct oprofile_operations *ops);
#ifdef CONFIG_OPROFILE_NMI_TIMER
int op_nmi_timer_init(struct oprofile_operations *ops);
diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
index 84a208dbed9..ee2cfce358b 100644
--- a/drivers/oprofile/oprofile_files.c
+++ b/drivers/oprofile/oprofile_files.c
@@ -175,7 +175,7 @@ static const struct file_operations dump_fops = {
.llseek = noop_llseek,
};
-void oprofile_create_files(struct super_block *sb, struct dentry *root)
+void oprofile_create_files(struct dentry *root)
{
/* reinitialize default values */
oprofile_buffer_size = BUFFER_SIZE_DEFAULT;
@@ -183,19 +183,19 @@ void oprofile_create_files(struct super_block *sb, struct dentry *root)
oprofile_buffer_watershed = BUFFER_WATERSHED_DEFAULT;
oprofile_time_slice = msecs_to_jiffies(TIME_SLICE_DEFAULT);
- oprofilefs_create_file(sb, root, "enable", &enable_fops);
- oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
- oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
- oprofilefs_create_ulong(sb, root, "buffer_size", &oprofile_buffer_size);
- oprofilefs_create_ulong(sb, root, "buffer_watershed", &oprofile_buffer_watershed);
- oprofilefs_create_ulong(sb, root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
- oprofilefs_create_file(sb, root, "cpu_type", &cpu_type_fops);
- oprofilefs_create_file(sb, root, "backtrace_depth", &depth_fops);
- oprofilefs_create_file(sb, root, "pointer_size", &pointer_size_fops);
+ oprofilefs_create_file(root, "enable", &enable_fops);
+ oprofilefs_create_file_perm(root, "dump", &dump_fops, 0666);
+ oprofilefs_create_file(root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(root, "buffer_size", &oprofile_buffer_size);
+ oprofilefs_create_ulong(root, "buffer_watershed", &oprofile_buffer_watershed);
+ oprofilefs_create_ulong(root, "cpu_buffer_size", &oprofile_cpu_buffer_size);
+ oprofilefs_create_file(root, "cpu_type", &cpu_type_fops);
+ oprofilefs_create_file(root, "backtrace_depth", &depth_fops);
+ oprofilefs_create_file(root, "pointer_size", &pointer_size_fops);
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_file(sb, root, "time_slice", &timeout_fops);
+ oprofilefs_create_file(root, "time_slice", &timeout_fops);
#endif
- oprofile_create_stats_files(sb, root);
+ oprofile_create_stats_files(root);
if (oprofile_ops.create_files)
- oprofile_ops.create_files(sb, root);
+ oprofile_ops.create_files(root);
}
diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c
index f3cfa0b9adf..d5b2732b1b8 100644
--- a/drivers/oprofile/oprofile_perf.c
+++ b/drivers/oprofile/oprofile_perf.c
@@ -138,7 +138,7 @@ static void op_perf_stop(void)
op_destroy_counter(cpu, event);
}
-static int oprofile_perf_create_files(struct super_block *sb, struct dentry *root)
+static int oprofile_perf_create_files(struct dentry *root)
{
unsigned int i;
@@ -147,13 +147,13 @@ static int oprofile_perf_create_files(struct super_block *sb, struct dentry *roo
char buf[4];
snprintf(buf, sizeof buf, "%d", i);
- dir = oprofilefs_mkdir(sb, root, buf);
- oprofilefs_create_ulong(sb, dir, "enabled", &counter_config[i].enabled);
- oprofilefs_create_ulong(sb, dir, "event", &counter_config[i].event);
- oprofilefs_create_ulong(sb, dir, "count", &counter_config[i].count);
- oprofilefs_create_ulong(sb, dir, "unit_mask", &counter_config[i].unit_mask);
- oprofilefs_create_ulong(sb, dir, "kernel", &counter_config[i].kernel);
- oprofilefs_create_ulong(sb, dir, "user", &counter_config[i].user);
+ dir = oprofilefs_mkdir(root, buf);
+ oprofilefs_create_ulong(dir, "enabled", &counter_config[i].enabled);
+ oprofilefs_create_ulong(dir, "event", &counter_config[i].event);
+ oprofilefs_create_ulong(dir, "count", &counter_config[i].count);
+ oprofilefs_create_ulong(dir, "unit_mask", &counter_config[i].unit_mask);
+ oprofilefs_create_ulong(dir, "kernel", &counter_config[i].kernel);
+ oprofilefs_create_ulong(dir, "user", &counter_config[i].user);
}
return 0;
diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
index 917d28ebeac..59659cea458 100644
--- a/drivers/oprofile/oprofile_stats.c
+++ b/drivers/oprofile/oprofile_stats.c
@@ -38,7 +38,7 @@ void oprofile_reset_stats(void)
}
-void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
+void oprofile_create_stats_files(struct dentry *root)
{
struct oprofile_cpu_buffer *cpu_buf;
struct dentry *cpudir;
@@ -46,39 +46,39 @@ void oprofile_create_stats_files(struct super_block *sb, struct dentry *root)
char buf[10];
int i;
- dir = oprofilefs_mkdir(sb, root, "stats");
+ dir = oprofilefs_mkdir(root, "stats");
if (!dir)
return;
for_each_possible_cpu(i) {
cpu_buf = &per_cpu(op_cpu_buffer, i);
snprintf(buf, 10, "cpu%d", i);
- cpudir = oprofilefs_mkdir(sb, dir, buf);
+ cpudir = oprofilefs_mkdir(dir, buf);
/* Strictly speaking access to these ulongs is racy,
* but we can't simply lock them, and they are
* informational only.
*/
- oprofilefs_create_ro_ulong(sb, cpudir, "sample_received",
+ oprofilefs_create_ro_ulong(cpudir, "sample_received",
&cpu_buf->sample_received);
- oprofilefs_create_ro_ulong(sb, cpudir, "sample_lost_overflow",
+ oprofilefs_create_ro_ulong(cpudir, "sample_lost_overflow",
&cpu_buf->sample_lost_overflow);
- oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted",
+ oprofilefs_create_ro_ulong(cpudir, "backtrace_aborted",
&cpu_buf->backtrace_aborted);
- oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip",
+ oprofilefs_create_ro_ulong(cpudir, "sample_invalid_eip",
&cpu_buf->sample_invalid_eip);
}
- oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm",
+ oprofilefs_create_ro_atomic(dir, "sample_lost_no_mm",
&oprofile_stats.sample_lost_no_mm);
- oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mapping",
+ oprofilefs_create_ro_atomic(dir, "sample_lost_no_mapping",
&oprofile_stats.sample_lost_no_mapping);
- oprofilefs_create_ro_atomic(sb, dir, "event_lost_overflow",
+ oprofilefs_create_ro_atomic(dir, "event_lost_overflow",
&oprofile_stats.event_lost_overflow);
- oprofilefs_create_ro_atomic(sb, dir, "bt_lost_no_mapping",
+ oprofilefs_create_ro_atomic(dir, "bt_lost_no_mapping",
&oprofile_stats.bt_lost_no_mapping);
#ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
- oprofilefs_create_ro_atomic(sb, dir, "multiplex_counter",
+ oprofilefs_create_ro_atomic(dir, "multiplex_counter",
&oprofile_stats.multiplex_counter);
#endif
}
diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
index 38b6fc02898..1fc622bd183 100644
--- a/drivers/oprofile/oprofile_stats.h
+++ b/drivers/oprofile/oprofile_stats.h
@@ -25,10 +25,9 @@ extern struct oprofile_stat_struct oprofile_stats;
/* reset all stats to zero */
void oprofile_reset_stats(void);
-struct super_block;
struct dentry;
/* create the stats/ dir */
-void oprofile_create_stats_files(struct super_block *sb, struct dentry *root);
+void oprofile_create_stats_files(struct dentry *root);
#endif /* OPROFILE_STATS_H */
diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
index 7c12d9c2b23..3f493459378 100644
--- a/drivers/oprofile/oprofilefs.c
+++ b/drivers/oprofile/oprofilefs.c
@@ -132,9 +132,8 @@ static const struct file_operations ulong_ro_fops = {
};
-static int __oprofilefs_create_file(struct super_block *sb,
- struct dentry *root, char const *name, const struct file_operations *fops,
- int perm, void *priv)
+static int __oprofilefs_create_file(struct dentry *root, char const *name,
+ const struct file_operations *fops, int perm, void *priv)
{
struct dentry *dentry;
struct inode *inode;
@@ -145,7 +144,7 @@ static int __oprofilefs_create_file(struct super_block *sb,
mutex_unlock(&root->d_inode->i_mutex);
return -ENOMEM;
}
- inode = oprofilefs_get_inode(sb, S_IFREG | perm);
+ inode = oprofilefs_get_inode(root->d_sb, S_IFREG | perm);
if (!inode) {
dput(dentry);
mutex_unlock(&root->d_inode->i_mutex);
@@ -159,18 +158,18 @@ static int __oprofilefs_create_file(struct super_block *sb,
}
-int oprofilefs_create_ulong(struct super_block *sb, struct dentry *root,
+int oprofilefs_create_ulong(struct dentry *root,
char const *name, unsigned long *val)
{
- return __oprofilefs_create_file(sb, root, name,
+ return __oprofilefs_create_file(root, name,
&ulong_fops, 0644, val);
}
-int oprofilefs_create_ro_ulong(struct super_block *sb, struct dentry *root,
+int oprofilefs_create_ro_ulong(struct dentry *root,
char const *name, unsigned long *val)
{
- return __oprofilefs_create_file(sb, root, name,
+ return __oprofilefs_create_file(root, name,
&ulong_ro_fops, 0444, val);
}
@@ -189,50 +188,49 @@ static const struct file_operations atomic_ro_fops = {
};
-int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
+int oprofilefs_create_ro_atomic(struct dentry *root,
char const *name, atomic_t *val)
{
- return __oprofilefs_create_file(sb, root, name,
+ return __oprofilefs_create_file(root, name,
&atomic_ro_fops, 0444, val);
}
-int oprofilefs_create_file(struct super_block *sb, struct dentry *root,
+int oprofilefs_create_file(struct dentry *root,
char const *name, const struct file_operations *fops)
{
- return __oprofilefs_create_file(sb, root, name, fops, 0644, NULL);
+ return __oprofilefs_create_file(root, name, fops, 0644, NULL);
}
-int oprofilefs_create_file_perm(struct super_block *sb, struct dentry *root,
+int oprofilefs_create_file_perm(struct dentry *root,
char const *name, const struct file_operations *fops, int perm)
{
- return __oprofilefs_create_file(sb, root, name, fops, perm, NULL);
+ return __oprofilefs_create_file(root, name, fops, perm, NULL);
}
-struct dentry *oprofilefs_mkdir(struct super_block *sb,
- struct dentry *root, char const *name)
+struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name)
{
struct dentry *dentry;
struct inode *inode;
- mutex_lock(&root->d_inode->i_mutex);
- dentry = d_alloc_name(root, name);
+ mutex_lock(&parent->d_inode->i_mutex);
+ dentry = d_alloc_name(parent, name);
if (!dentry) {
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&parent->d_inode->i_mutex);
return NULL;
}
- inode = oprofilefs_get_inode(sb, S_IFDIR | 0755);
+ inode = oprofilefs_get_inode(parent->d_sb, S_IFDIR | 0755);
if (!inode) {
dput(dentry);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&parent->d_inode->i_mutex);
return NULL;
}
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_add(dentry, inode);
- mutex_unlock(&root->d_inode->i_mutex);
+ mutex_unlock(&parent->d_inode->i_mutex);
return dentry;
}
@@ -256,7 +254,7 @@ static int oprofilefs_fill_super(struct super_block *sb, void *data, int silent)
if (!sb->s_root)
return -ENOMEM;
- oprofile_create_files(sb, sb->s_root);
+ oprofile_create_files(sb->s_root);
// FIXME: verify kill_litter_super removes our dentries
return 0;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index e79e006eb9a..9ee04b4b68b 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,18 +811,28 @@ int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
return pcidev->irq;
}
-static struct iosapic_info *first_isi = NULL;
+static struct iosapic_info *iosapic_list;
#ifdef CONFIG_64BIT
-int iosapic_serial_irq(int num)
+int iosapic_serial_irq(struct parisc_device *dev)
{
- struct iosapic_info *isi = first_isi;
- struct irt_entry *irte = NULL; /* only used if PAT PDC */
+ struct iosapic_info *isi;
+ struct irt_entry *irte;
struct vector_info *vi;
- int isi_line; /* line used by device */
+ int cnt;
+ int intin;
+
+ intin = (dev->mod_info >> 24) & 15;
/* lookup IRT entry for isi/slot/pin set */
- irte = &irt_cell[num];
+ for (cnt = 0; cnt < irt_num_entry; cnt++) {
+ irte = &irt_cell[cnt];
+ if (COMPARE_IRTE_ADDR(irte, dev->mod0) &&
+ irte->dest_iosapic_intin == intin)
+ break;
+ }
+ if (cnt >= irt_num_entry)
+ return 0; /* no irq found, force polling */
DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
irte,
@@ -834,11 +844,17 @@ int iosapic_serial_irq(int num)
irte->src_seg_id,
irte->dest_iosapic_intin,
(u32) irte->dest_iosapic_addr);
- isi_line = irte->dest_iosapic_intin;
+
+ /* search for iosapic */
+ for (isi = iosapic_list; isi; isi = isi->isi_next)
+ if (isi->isi_hpa == dev->mod0)
+ break;
+ if (!isi)
+ return 0; /* no iosapic found, force polling */
/* get vector info for this input line */
- vi = isi->isi_vector + isi_line;
- DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", isi_line, vi);
+ vi = isi->isi_vector + intin;
+ DBG_IRT("iosapic_serial_irq: line %d vi 0x%p\n", iosapic_intin, vi);
/* If this IRQ line has already been setup, skip it */
if (vi->irte)
@@ -941,8 +957,8 @@ void *iosapic_register(unsigned long hpa)
vip->irqline = (unsigned char) cnt;
vip->iosapic = isi;
}
- if (!first_isi)
- first_isi = isi;
+ isi->isi_next = iosapic_list;
+ iosapic_list = isi;
return isi;
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 19f6f70c67d..37e71ff6408 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1590,7 +1590,6 @@ lba_driver_probe(struct parisc_device *dev)
lba_dump_res(&lba_dev->hba.lmmio_space, 2);
#endif
}
- pci_enable_bridges(lba_bus);
/*
** Once PCI register ops has walked the bus, access to config
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig
index dc82ef096f3..70694ce38be 100644
--- a/drivers/parport/Kconfig
+++ b/drivers/parport/Kconfig
@@ -37,7 +37,7 @@ config PARPORT_PC
tristate "PC-style hardware"
depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && !S390 && \
(!M68K || ISA) && !MN10300 && !AVR32 && !BLACKFIN && \
- !XTENSA && !CRIS
+ !XTENSA && !CRIS && !H8300
---help---
You should say Y here if you have a PC-style parallel port. All
diff --git a/drivers/parport/parport_amiga.c b/drivers/parport/parport_amiga.c
index 09503b8d12e..26ecdea84fb 100644
--- a/drivers/parport/parport_amiga.c
+++ b/drivers/parport/parport_amiga.c
@@ -232,7 +232,6 @@ static int __exit amiga_parallel_remove(struct platform_device *pdev)
if (port->irq != PARPORT_IRQ_NONE)
free_irq(IRQ_AMIGA_CIAA_FLG, port);
parport_put_port(port);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 1cc23661f79..0857ca981fa 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -475,37 +475,33 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
}
-static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
-{
- return true;
-}
-
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
+ return type == PCI_EXP_TYPE_ENDPOINT ||
+ type == PCI_EXP_TYPE_LEG_END ||
type == PCI_EXP_TYPE_ROOT_PORT ||
- type == PCI_EXP_TYPE_ENDPOINT ||
- type == PCI_EXP_TYPE_LEG_END;
+ type == PCI_EXP_TYPE_UPSTREAM ||
+ type == PCI_EXP_TYPE_DOWNSTREAM ||
+ type == PCI_EXP_TYPE_PCI_BRIDGE ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE;
}
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
- (type == PCI_EXP_TYPE_DOWNSTREAM &&
- pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT);
+ return (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_DOWNSTREAM) &&
+ pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
}
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
- return pcie_cap_version(dev) > 1 ||
- type == PCI_EXP_TYPE_ROOT_PORT ||
+ return type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
@@ -520,7 +516,7 @@ static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
- return pcie_cap_has_devctl(dev);
+ return true;
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index b1ff02ab4f1..fc1b7401374 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -216,24 +216,6 @@ void pci_bus_add_devices(const struct pci_bus *bus)
}
}
-void pci_enable_bridges(struct pci_bus *bus)
-{
- struct pci_dev *dev;
- int retval;
-
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->subordinate) {
- if (!pci_is_enabled(dev)) {
- retval = pci_enable_device(dev);
- if (retval)
- dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n", retval);
- pci_set_master(dev);
- }
- pci_enable_bridges(dev->subordinate);
- }
- }
-}
-
/** pci_walk_bus - walk devices on/under bus, calling callback.
* @top bus whose devices should be walked
* @cb callback to be called for each device found
@@ -301,4 +283,3 @@ EXPORT_SYMBOL(pci_bus_put);
EXPORT_SYMBOL(pci_bus_alloc_resource);
EXPORT_SYMBOL_GPL(pci_bus_add_device);
EXPORT_SYMBOL(pci_bus_add_devices);
-EXPORT_SYMBOL(pci_enable_bridges);
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 1184ff6fe86..e5ba4eb4e5b 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -4,6 +4,7 @@ menu "PCI host controller drivers"
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_KIRKWOOD
+ depends on OF
config PCIE_DW
bool
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index 086d8500e84..ab79ccb5bbf 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -1,2 +1,3 @@
-obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
obj-$(CONFIG_PCIE_DW) += pcie-designware.o
+obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
+obj-$(CONFIG_PCI_MVEBU) += pci-mvebu.o
diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c
new file mode 100644
index 00000000000..94e096bb2d0
--- /dev/null
+++ b/drivers/pci/host/pci-exynos.c
@@ -0,0 +1,552 @@
+/*
+ * PCIe host controller driver for Samsung EXYNOS SoCs
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+#include <linux/types.h>
+
+#include "pcie-designware.h"
+
+#define to_exynos_pcie(x) container_of(x, struct exynos_pcie, pp)
+
+struct exynos_pcie {
+ void __iomem *elbi_base;
+ void __iomem *phy_base;
+ void __iomem *block_base;
+ int reset_gpio;
+ struct clk *clk;
+ struct clk *bus_clk;
+ struct pcie_port pp;
+};
+
+/* PCIe ELBI registers */
+#define PCIE_IRQ_PULSE 0x000
+#define IRQ_INTA_ASSERT (0x1 << 0)
+#define IRQ_INTB_ASSERT (0x1 << 2)
+#define IRQ_INTC_ASSERT (0x1 << 4)
+#define IRQ_INTD_ASSERT (0x1 << 6)
+#define PCIE_IRQ_LEVEL 0x004
+#define PCIE_IRQ_SPECIAL 0x008
+#define PCIE_IRQ_EN_PULSE 0x00c
+#define PCIE_IRQ_EN_LEVEL 0x010
+#define PCIE_IRQ_EN_SPECIAL 0x014
+#define PCIE_PWR_RESET 0x018
+#define PCIE_CORE_RESET 0x01c
+#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
+#define PCIE_STICKY_RESET 0x020
+#define PCIE_NONSTICKY_RESET 0x024
+#define PCIE_APP_INIT_RESET 0x028
+#define PCIE_APP_LTSSM_ENABLE 0x02c
+#define PCIE_ELBI_RDLH_LINKUP 0x064
+#define PCIE_ELBI_LTSSM_ENABLE 0x1
+#define PCIE_ELBI_SLV_AWMISC 0x11c
+#define PCIE_ELBI_SLV_ARMISC 0x120
+#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
+
+/* PCIe Purple registers */
+#define PCIE_PHY_GLOBAL_RESET 0x000
+#define PCIE_PHY_COMMON_RESET 0x004
+#define PCIE_PHY_CMN_REG 0x008
+#define PCIE_PHY_MAC_RESET 0x00c
+#define PCIE_PHY_PLL_LOCKED 0x010
+#define PCIE_PHY_TRSVREG_RESET 0x020
+#define PCIE_PHY_TRSV_RESET 0x024
+
+/* PCIe PHY registers */
+#define PCIE_PHY_IMPEDANCE 0x004
+#define PCIE_PHY_PLL_DIV_0 0x008
+#define PCIE_PHY_PLL_BIAS 0x00c
+#define PCIE_PHY_DCC_FEEDBACK 0x014
+#define PCIE_PHY_PLL_DIV_1 0x05c
+#define PCIE_PHY_TRSV0_EMP_LVL 0x084
+#define PCIE_PHY_TRSV0_DRV_LVL 0x088
+#define PCIE_PHY_TRSV0_RXCDR 0x0ac
+#define PCIE_PHY_TRSV0_LVCC 0x0dc
+#define PCIE_PHY_TRSV1_EMP_LVL 0x144
+#define PCIE_PHY_TRSV1_RXCDR 0x16c
+#define PCIE_PHY_TRSV1_LVCC 0x19c
+#define PCIE_PHY_TRSV2_EMP_LVL 0x204
+#define PCIE_PHY_TRSV2_RXCDR 0x22c
+#define PCIE_PHY_TRSV2_LVCC 0x25c
+#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
+#define PCIE_PHY_TRSV3_RXCDR 0x2ec
+#define PCIE_PHY_TRSV3_LVCC 0x31c
+
+static inline void exynos_elb_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->elbi_base + reg);
+}
+
+static inline u32 exynos_elb_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->elbi_base + reg);
+}
+
+static inline void exynos_phy_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->phy_base + reg);
+}
+
+static inline u32 exynos_phy_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->phy_base + reg);
+}
+
+static inline void exynos_blk_writel(struct exynos_pcie *pcie, u32 val, u32 reg)
+{
+ writel(val, pcie->block_base + reg);
+}
+
+static inline u32 exynos_blk_readl(struct exynos_pcie *pcie, u32 reg)
+{
+ return readl(pcie->block_base + reg);
+}
+
+static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_AWMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_AWMISC);
+ }
+}
+
+static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (on) {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val |= PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ } else {
+ val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_SLV_ARMISC);
+ val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_ELBI_SLV_ARMISC);
+ }
+}
+
+static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val &= ~PCIE_CORE_RESET_ENABLE;
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_PWR_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_NONSTICKY_RESET);
+}
+
+static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_CORE_RESET);
+ val |= PCIE_CORE_RESET_ENABLE;
+
+ exynos_elb_writel(exynos_pcie, val, PCIE_CORE_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_STICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_NONSTICKY_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_APP_INIT_RESET);
+ exynos_elb_writel(exynos_pcie, 0, PCIE_APP_INIT_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_MAC_RESET);
+}
+
+static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_MAC_RESET);
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_GLOBAL_RESET);
+}
+
+static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_GLOBAL_RESET);
+ exynos_elb_writel(exynos_pcie, 1, PCIE_PWR_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_CMN_REG);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSVREG_RESET);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_TRSV_RESET);
+}
+
+static void exynos_pcie_init_phy(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* DCC feedback control off */
+ exynos_phy_writel(exynos_pcie, 0x29, PCIE_PHY_DCC_FEEDBACK);
+
+ /* set TX/RX impedance */
+ exynos_phy_writel(exynos_pcie, 0xd5, PCIE_PHY_IMPEDANCE);
+
+ /* set 50Mhz PHY clock */
+ exynos_phy_writel(exynos_pcie, 0x14, PCIE_PHY_PLL_DIV_0);
+ exynos_phy_writel(exynos_pcie, 0x12, PCIE_PHY_PLL_DIV_1);
+
+ /* set TX Differential output for lane 0 */
+ exynos_phy_writel(exynos_pcie, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
+
+ /* set TX Pre-emphasis Level Control for lane 0 to minimum */
+ exynos_phy_writel(exynos_pcie, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
+
+ /* set RX clock and data recovery bandwidth */
+ exynos_phy_writel(exynos_pcie, 0xe7, PCIE_PHY_PLL_BIAS);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV0_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV1_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV2_RXCDR);
+ exynos_phy_writel(exynos_pcie, 0x82, PCIE_PHY_TRSV3_RXCDR);
+
+ /* change TX Pre-emphasis Level Control for lanes */
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
+ exynos_phy_writel(exynos_pcie, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
+
+ /* set LVCC */
+ exynos_phy_writel(exynos_pcie, 0x20, PCIE_PHY_TRSV0_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV1_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV2_LVCC);
+ exynos_phy_writel(exynos_pcie, 0xa0, PCIE_PHY_TRSV3_LVCC);
+}
+
+static void exynos_pcie_assert_reset(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (exynos_pcie->reset_gpio >= 0)
+ devm_gpio_request_one(pp->dev, exynos_pcie->reset_gpio,
+ GPIOF_OUT_INIT_HIGH, "RESET");
+ return;
+}
+
+static int exynos_pcie_establish_link(struct pcie_port *pp)
+{
+ u32 val;
+ int count = 0;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ if (dw_pcie_link_up(pp)) {
+ dev_err(pp->dev, "Link already up\n");
+ return 0;
+ }
+
+ /* assert reset signals */
+ exynos_pcie_assert_core_reset(pp);
+ exynos_pcie_assert_phy_reset(pp);
+
+ /* de-assert phy reset */
+ exynos_pcie_deassert_phy_reset(pp);
+
+ /* initialize phy */
+ exynos_pcie_init_phy(pp);
+
+ /* pulse for common reset */
+ exynos_blk_writel(exynos_pcie, 1, PCIE_PHY_COMMON_RESET);
+ udelay(500);
+ exynos_blk_writel(exynos_pcie, 0, PCIE_PHY_COMMON_RESET);
+
+ /* de-assert core reset */
+ exynos_pcie_deassert_core_reset(pp);
+
+ /* setup root complex */
+ dw_pcie_setup_rc(pp);
+
+ /* assert reset signal */
+ exynos_pcie_assert_reset(pp);
+
+ /* assert LTSSM enable */
+ exynos_elb_writel(exynos_pcie, PCIE_ELBI_LTSSM_ENABLE,
+ PCIE_APP_LTSSM_ENABLE);
+
+ /* check if the link is up or not */
+ while (!dw_pcie_link_up(pp)) {
+ mdelay(100);
+ count++;
+ if (count == 10) {
+ while (exynos_phy_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED) == 0) {
+ val = exynos_blk_readl(exynos_pcie,
+ PCIE_PHY_PLL_LOCKED);
+ dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
+ }
+ dev_err(pp->dev, "PCIe Link Fail\n");
+ return -EINVAL;
+ }
+ }
+
+ dev_info(pp->dev, "Link up\n");
+
+ return 0;
+}
+
+static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ val = exynos_elb_readl(exynos_pcie, PCIE_IRQ_PULSE);
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_PULSE);
+ return;
+}
+
+static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
+{
+ u32 val;
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+
+ /* enable INTX interrupt */
+ val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
+ IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
+ exynos_elb_writel(exynos_pcie, val, PCIE_IRQ_EN_PULSE);
+ return;
+}
+
+static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
+{
+ struct pcie_port *pp = arg;
+
+ exynos_pcie_clear_irq_pulse(pp);
+ return IRQ_HANDLED;
+}
+
+static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
+{
+ exynos_pcie_enable_irq_pulse(pp);
+ return;
+}
+
+static inline void exynos_pcie_readl_rc(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val)
+{
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ *val = readl(dbi_base);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return;
+}
+
+static inline void exynos_pcie_writel_rc(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base)
+{
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ writel(val, dbi_base);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return;
+}
+
+static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+ u32 *val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_r_mode(pp, true);
+ ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_r_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+ u32 val)
+{
+ int ret;
+
+ exynos_pcie_sideband_dbi_w_mode(pp, true);
+ ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
+ exynos_pcie_sideband_dbi_w_mode(pp, false);
+ return ret;
+}
+
+static int exynos_pcie_link_up(struct pcie_port *pp)
+{
+ struct exynos_pcie *exynos_pcie = to_exynos_pcie(pp);
+ u32 val = exynos_elb_readl(exynos_pcie, PCIE_ELBI_RDLH_LINKUP);
+
+ if (val == PCIE_ELBI_LTSSM_ENABLE)
+ return 1;
+
+ return 0;
+}
+
+static void exynos_pcie_host_init(struct pcie_port *pp)
+{
+ exynos_pcie_establish_link(pp);
+ exynos_pcie_enable_interrupts(pp);
+}
+
+static struct pcie_host_ops exynos_pcie_host_ops = {
+ .readl_rc = exynos_pcie_readl_rc,
+ .writel_rc = exynos_pcie_writel_rc,
+ .rd_own_conf = exynos_pcie_rd_own_conf,
+ .wr_own_conf = exynos_pcie_wr_own_conf,
+ .link_up = exynos_pcie_link_up,
+ .host_init = exynos_pcie_host_init,
+};
+
+static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
+{
+ int ret;
+
+ pp->irq = platform_get_irq(pdev, 1);
+ if (!pp->irq) {
+ dev_err(&pdev->dev, "failed to get irq\n");
+ return -ENODEV;
+ }
+ ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
+ IRQF_SHARED, "exynos-pcie", pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ return ret;
+ }
+
+ pp->root_bus_nr = -1;
+ pp->ops = &exynos_pcie_host_ops;
+
+ spin_lock_init(&pp->conf_lock);
+ ret = dw_pcie_host_init(pp);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init exynos_pcie_probe(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie;
+ struct pcie_port *pp;
+ struct device_node *np = pdev->dev.of_node;
+ struct resource *elbi_base;
+ struct resource *phy_base;
+ struct resource *block_base;
+ int ret;
+
+ exynos_pcie = devm_kzalloc(&pdev->dev, sizeof(*exynos_pcie),
+ GFP_KERNEL);
+ if (!exynos_pcie) {
+ dev_err(&pdev->dev, "no memory for exynos pcie\n");
+ return -ENOMEM;
+ }
+
+ pp = &exynos_pcie->pp;
+
+ pp->dev = &pdev->dev;
+
+ exynos_pcie->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
+
+ exynos_pcie->clk = devm_clk_get(&pdev->dev, "pcie");
+ if (IS_ERR(exynos_pcie->clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
+ return PTR_ERR(exynos_pcie->clk);
+ }
+ ret = clk_prepare_enable(exynos_pcie->clk);
+ if (ret)
+ return ret;
+
+ exynos_pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
+ if (IS_ERR(exynos_pcie->bus_clk)) {
+ dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
+ ret = PTR_ERR(exynos_pcie->bus_clk);
+ goto fail_clk;
+ }
+ ret = clk_prepare_enable(exynos_pcie->bus_clk);
+ if (ret)
+ goto fail_clk;
+
+ elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ exynos_pcie->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
+ if (IS_ERR(exynos_pcie->elbi_base))
+ return PTR_ERR(exynos_pcie->elbi_base);
+
+ phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ exynos_pcie->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
+ if (IS_ERR(exynos_pcie->phy_base))
+ return PTR_ERR(exynos_pcie->phy_base);
+
+ block_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ exynos_pcie->block_base = devm_ioremap_resource(&pdev->dev, block_base);
+ if (IS_ERR(exynos_pcie->block_base))
+ return PTR_ERR(exynos_pcie->block_base);
+
+ ret = add_pcie_port(pp, pdev);
+ if (ret < 0)
+ goto fail_bus_clk;
+
+ platform_set_drvdata(pdev, exynos_pcie);
+ return 0;
+
+fail_bus_clk:
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+fail_clk:
+ clk_disable_unprepare(exynos_pcie->clk);
+ return ret;
+}
+
+static int __exit exynos_pcie_remove(struct platform_device *pdev)
+{
+ struct exynos_pcie *exynos_pcie = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(exynos_pcie->bus_clk);
+ clk_disable_unprepare(exynos_pcie->clk);
+
+ return 0;
+}
+
+static const struct of_device_id exynos_pcie_of_match[] = {
+ { .compatible = "samsung,exynos5440-pcie", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
+
+static struct platform_driver exynos_pcie_driver = {
+ .remove = __exit_p(exynos_pcie_remove),
+ .driver = {
+ .name = "exynos-pcie",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(exynos_pcie_of_match),
+ },
+};
+
+/* Exynos PCIe driver does not allow module unload */
+
+static int __init pcie_init(void)
+{
+ return platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
+}
+subsys_initcall(pcie_init);
+
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
+MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13a633b1612..ce1543a584a 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -86,10 +86,6 @@ struct mvebu_sw_pci_bridge {
u16 secondary_status;
u16 membase;
u16 memlimit;
- u16 prefmembase;
- u16 prefmemlimit;
- u32 prefbaseupper;
- u32 preflimitupper;
u16 iobaseupper;
u16 iolimitupper;
u8 cappointer;
@@ -419,15 +415,7 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
break;
case PCI_PREF_MEMORY_BASE:
- *value = (bridge->prefmemlimit << 16 | bridge->prefmembase);
- break;
-
- case PCI_PREF_BASE_UPPER32:
- *value = bridge->prefbaseupper;
- break;
-
- case PCI_PREF_LIMIT_UPPER32:
- *value = bridge->preflimitupper;
+ *value = 0;
break;
case PCI_IO_BASE_UPPER16:
@@ -501,19 +489,6 @@ static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
mvebu_pcie_handle_membase_change(port);
break;
- case PCI_PREF_MEMORY_BASE:
- bridge->prefmembase = value & 0xffff;
- bridge->prefmemlimit = value >> 16;
- break;
-
- case PCI_PREF_BASE_UPPER32:
- bridge->prefbaseupper = value;
- break;
-
- case PCI_PREF_LIMIT_UPPER32:
- bridge->preflimitupper = value;
- break;
-
case PCI_IO_BASE_UPPER16:
bridge->iobaseupper = value & 0xffff;
bridge->iolimitupper = value >> 16;
@@ -750,9 +725,9 @@ mvebu_pcie_map_registers(struct platform_device *pdev,
ret = of_address_to_resource(np, 0, &regs);
if (ret)
- return NULL;
+ return ERR_PTR(ret);
- return devm_request_and_ioremap(&pdev->dev, &regs);
+ return devm_ioremap_resource(&pdev->dev, &regs);
}
static int __init mvebu_pcie_probe(struct platform_device *pdev)
@@ -842,9 +817,10 @@ static int __init mvebu_pcie_probe(struct platform_device *pdev)
continue;
port->base = mvebu_pcie_map_registers(pdev, child, port);
- if (!port->base) {
+ if (IS_ERR(port->base)) {
dev_err(&pdev->dev, "PCIe%d.%d: cannot map registers\n",
port->port, port->lane);
+ port->base = NULL;
continue;
}
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 26bdbda8ff9..c10e9ac9bbb 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -1,5 +1,5 @@
/*
- * PCIe host controller driver for Samsung EXYNOS SoCs
+ * Synopsys Designware PCIe host controller driver
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -11,74 +11,28 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
#include <linux/kernel.h>
-#include <linux/list.h>
#include <linux/module.h>
-#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_gpio.h>
-#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pci_regs.h>
-#include <linux/platform_device.h>
-#include <linux/resource.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
#include <linux/types.h>
-struct pcie_port_info {
- u32 cfg0_size;
- u32 cfg1_size;
- u32 io_size;
- u32 mem_size;
- phys_addr_t io_bus_addr;
- phys_addr_t mem_bus_addr;
-};
-
-struct pcie_port {
- struct device *dev;
- u8 controller;
- u8 root_bus_nr;
- void __iomem *dbi_base;
- void __iomem *elbi_base;
- void __iomem *phy_base;
- void __iomem *purple_base;
- u64 cfg0_base;
- void __iomem *va_cfg0_base;
- u64 cfg1_base;
- void __iomem *va_cfg1_base;
- u64 io_base;
- u64 mem_base;
- spinlock_t conf_lock;
- struct resource cfg;
- struct resource io;
- struct resource mem;
- struct pcie_port_info config;
- struct clk *clk;
- struct clk *bus_clk;
- int irq;
- int reset_gpio;
-};
-
-/*
- * Exynos PCIe IP consists of Synopsys specific part and Exynos
- * specific part. Only core block is a Synopsys designware part;
- * other parts are Exynos specific.
- */
+#include "pcie-designware.h"
/* Synopsis specific PCIE configuration registers */
#define PCIE_PORT_LINK_CONTROL 0x710
#define PORT_LINK_MODE_MASK (0x3f << 16)
+#define PORT_LINK_MODE_1_LANES (0x1 << 16)
+#define PORT_LINK_MODE_2_LANES (0x3 << 16)
#define PORT_LINK_MODE_4_LANES (0x7 << 16)
#define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C
#define PORT_LOGIC_SPEED_CHANGE (0x1 << 17)
#define PORT_LOGIC_LINK_WIDTH_MASK (0x1ff << 8)
-#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x7 << 8)
+#define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8)
+#define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8)
+#define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8)
#define PCIE_MSI_ADDR_LO 0x820
#define PCIE_MSI_ADDR_HI 0x824
@@ -108,69 +62,16 @@ struct pcie_port {
#define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
#define PCIE_ATU_UPPER_TARGET 0x91C
-/* Exynos specific PCIE configuration registers */
-
-/* PCIe ELBI registers */
-#define PCIE_IRQ_PULSE 0x000
-#define IRQ_INTA_ASSERT (0x1 << 0)
-#define IRQ_INTB_ASSERT (0x1 << 2)
-#define IRQ_INTC_ASSERT (0x1 << 4)
-#define IRQ_INTD_ASSERT (0x1 << 6)
-#define PCIE_IRQ_LEVEL 0x004
-#define PCIE_IRQ_SPECIAL 0x008
-#define PCIE_IRQ_EN_PULSE 0x00c
-#define PCIE_IRQ_EN_LEVEL 0x010
-#define PCIE_IRQ_EN_SPECIAL 0x014
-#define PCIE_PWR_RESET 0x018
-#define PCIE_CORE_RESET 0x01c
-#define PCIE_CORE_RESET_ENABLE (0x1 << 0)
-#define PCIE_STICKY_RESET 0x020
-#define PCIE_NONSTICKY_RESET 0x024
-#define PCIE_APP_INIT_RESET 0x028
-#define PCIE_APP_LTSSM_ENABLE 0x02c
-#define PCIE_ELBI_RDLH_LINKUP 0x064
-#define PCIE_ELBI_LTSSM_ENABLE 0x1
-#define PCIE_ELBI_SLV_AWMISC 0x11c
-#define PCIE_ELBI_SLV_ARMISC 0x120
-#define PCIE_ELBI_SLV_DBI_ENABLE (0x1 << 21)
-
-/* PCIe Purple registers */
-#define PCIE_PHY_GLOBAL_RESET 0x000
-#define PCIE_PHY_COMMON_RESET 0x004
-#define PCIE_PHY_CMN_REG 0x008
-#define PCIE_PHY_MAC_RESET 0x00c
-#define PCIE_PHY_PLL_LOCKED 0x010
-#define PCIE_PHY_TRSVREG_RESET 0x020
-#define PCIE_PHY_TRSV_RESET 0x024
-
-/* PCIe PHY registers */
-#define PCIE_PHY_IMPEDANCE 0x004
-#define PCIE_PHY_PLL_DIV_0 0x008
-#define PCIE_PHY_PLL_BIAS 0x00c
-#define PCIE_PHY_DCC_FEEDBACK 0x014
-#define PCIE_PHY_PLL_DIV_1 0x05c
-#define PCIE_PHY_TRSV0_EMP_LVL 0x084
-#define PCIE_PHY_TRSV0_DRV_LVL 0x088
-#define PCIE_PHY_TRSV0_RXCDR 0x0ac
-#define PCIE_PHY_TRSV0_LVCC 0x0dc
-#define PCIE_PHY_TRSV1_EMP_LVL 0x144
-#define PCIE_PHY_TRSV1_RXCDR 0x16c
-#define PCIE_PHY_TRSV1_LVCC 0x19c
-#define PCIE_PHY_TRSV2_EMP_LVL 0x204
-#define PCIE_PHY_TRSV2_RXCDR 0x22c
-#define PCIE_PHY_TRSV2_LVCC 0x25c
-#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
-#define PCIE_PHY_TRSV3_RXCDR 0x2ec
-#define PCIE_PHY_TRSV3_LVCC 0x31c
-
-static struct hw_pci exynos_pci;
+static struct hw_pci dw_pci;
+
+unsigned long global_io_offset;
static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
{
return sys->private_data;
}
-static inline int cfg_read(void *addr, int where, int size, u32 *val)
+int cfg_read(void __iomem *addr, int where, int size, u32 *val)
{
*val = readl(addr);
@@ -184,7 +85,7 @@ static inline int cfg_read(void *addr, int where, int size, u32 *val)
return PCIBIOS_SUCCESSFUL;
}
-static inline int cfg_write(void *addr, int where, int size, u32 val)
+int cfg_write(void __iomem *addr, int where, int size, u32 val)
{
if (size == 4)
writel(val, addr);
@@ -198,155 +99,217 @@ static inline int cfg_write(void *addr, int where, int size, u32 val)
return PCIBIOS_SUCCESSFUL;
}
-static void exynos_pcie_sideband_dbi_w_mode(struct pcie_port *pp, bool on)
+static inline void dw_pcie_readl_rc(struct pcie_port *pp, u32 reg, u32 *val)
{
- u32 val;
-
- if (on) {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- val |= PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- } else {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_AWMISC);
- }
-}
-
-static void exynos_pcie_sideband_dbi_r_mode(struct pcie_port *pp, bool on)
-{
- u32 val;
-
- if (on) {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- val |= PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- } else {
- val = readl(pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- val &= ~PCIE_ELBI_SLV_DBI_ENABLE;
- writel(val, pp->elbi_base + PCIE_ELBI_SLV_ARMISC);
- }
-}
-
-static inline void readl_rc(struct pcie_port *pp, void *dbi_base, u32 *val)
-{
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- *val = readl(dbi_base);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
- return;
+ if (pp->ops->readl_rc)
+ pp->ops->readl_rc(pp, pp->dbi_base + reg, val);
+ else
+ *val = readl(pp->dbi_base + reg);
}
-static inline void writel_rc(struct pcie_port *pp, u32 val, void *dbi_base)
+static inline void dw_pcie_writel_rc(struct pcie_port *pp, u32 val, u32 reg)
{
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- writel(val, dbi_base);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
- return;
+ if (pp->ops->writel_rc)
+ pp->ops->writel_rc(pp, val, pp->dbi_base + reg);
+ else
+ writel(val, pp->dbi_base + reg);
}
-static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size,
u32 *val)
{
int ret;
- exynos_pcie_sideband_dbi_r_mode(pp, true);
- ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
- exynos_pcie_sideband_dbi_r_mode(pp, false);
+ if (pp->ops->rd_own_conf)
+ ret = pp->ops->rd_own_conf(pp, where, size, val);
+ else
+ ret = cfg_read(pp->dbi_base + (where & ~0x3), where, size, val);
+
return ret;
}
-static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size,
u32 val)
{
int ret;
- exynos_pcie_sideband_dbi_w_mode(pp, true);
- ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size, val);
- exynos_pcie_sideband_dbi_w_mode(pp, false);
+ if (pp->ops->wr_own_conf)
+ ret = pp->ops->wr_own_conf(pp, where, size, val);
+ else
+ ret = cfg_write(pp->dbi_base + (where & ~0x3), where, size,
+ val);
+
return ret;
}
-static void exynos_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
+int dw_pcie_link_up(struct pcie_port *pp)
+{
+ if (pp->ops->link_up)
+ return pp->ops->link_up(pp);
+ else
+ return 0;
+}
+
+int __init dw_pcie_host_init(struct pcie_port *pp)
{
+ struct device_node *np = pp->dev->of_node;
+ struct of_pci_range range;
+ struct of_pci_range_parser parser;
u32 val;
- void __iomem *dbi_base = pp->dbi_base;
- /* Program viewport 0 : OUTBOUND : CFG0 */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, pp->cfg0_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->cfg0_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
- writel_rc(pp, PCIE_ATU_TYPE_CFG0, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
+ if (of_pci_range_parser_init(&parser, np)) {
+ dev_err(pp->dev, "missing ranges property\n");
+ return -EINVAL;
+ }
+
+ /* Get the I/O and memory ranges from DT */
+ for_each_of_pci_range(&parser, &range) {
+ unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
+ if (restype == IORESOURCE_IO) {
+ of_pci_range_to_resource(&range, np, &pp->io);
+ pp->io.name = "I/O";
+ pp->io.start = max_t(resource_size_t,
+ PCIBIOS_MIN_IO,
+ range.pci_addr + global_io_offset);
+ pp->io.end = min_t(resource_size_t,
+ IO_SPACE_LIMIT,
+ range.pci_addr + range.size
+ + global_io_offset);
+ pp->config.io_size = resource_size(&pp->io);
+ pp->config.io_bus_addr = range.pci_addr;
+ }
+ if (restype == IORESOURCE_MEM) {
+ of_pci_range_to_resource(&range, np, &pp->mem);
+ pp->mem.name = "MEM";
+ pp->config.mem_size = resource_size(&pp->mem);
+ pp->config.mem_bus_addr = range.pci_addr;
+ }
+ if (restype == 0) {
+ of_pci_range_to_resource(&range, np, &pp->cfg);
+ pp->config.cfg0_size = resource_size(&pp->cfg)/2;
+ pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+ }
+ }
+
+ if (!pp->dbi_base) {
+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
+ resource_size(&pp->cfg));
+ if (!pp->dbi_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+ }
+
+ pp->cfg0_base = pp->cfg.start;
+ pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+ pp->io_base = pp->io.start;
+ pp->mem_base = pp->mem.start;
+
+ pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+ pp->config.cfg0_size);
+ if (!pp->va_cfg0_base) {
+ dev_err(pp->dev, "error with ioremap in function\n");
+ return -ENOMEM;
+ }
+ pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+ pp->config.cfg1_size);
+ if (!pp->va_cfg1_base) {
+ dev_err(pp->dev, "error with ioremap\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
+ dev_err(pp->dev, "Failed to parse the number of lanes\n");
+ return -EINVAL;
+ }
+
+ if (pp->ops->host_init)
+ pp->ops->host_init(pp);
+
+ dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
+
+ /* program correct class for RC */
+ dw_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
+
+ dw_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
+ val |= PORT_LOGIC_SPEED_CHANGE;
+ dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
+
+ dw_pci.nr_controllers = 1;
+ dw_pci.private_data = (void **)&pp;
+
+ pci_common_init(&dw_pci);
+ pci_assign_unassigned_resources();
+#ifdef CONFIG_PCI_DOMAINS
+ dw_pci.domain++;
+#endif
+
+ return 0;
}
-static void exynos_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
+ /* Program viewport 0 : OUTBOUND : CFG0 */
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, pp->cfg0_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg0_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg0_base + pp->config.cfg0_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+}
+static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev)
+{
/* Program viewport 1 : OUTBOUND : CFG1 */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_CFG1, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->cfg1_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->cfg1_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, busdev, dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, 0, dbi_base + PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->cfg1_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->cfg1_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->cfg1_base + pp->config.cfg1_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
}
-static void exynos_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
-
/* Program viewport 0 : OUTBOUND : MEM */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_MEM, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->mem_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->mem_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, pp->config.mem_bus_addr,
- dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
- dbi_base + PCIE_ATU_UPPER_TARGET);
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->mem_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->mem_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->mem_base + pp->config.mem_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
}
-static void exynos_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
+static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp)
{
- u32 val;
- void __iomem *dbi_base = pp->dbi_base;
-
/* Program viewport 1 : OUTBOUND : IO */
- val = PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1;
- writel_rc(pp, val, dbi_base + PCIE_ATU_VIEWPORT);
- writel_rc(pp, PCIE_ATU_TYPE_IO, dbi_base + PCIE_ATU_CR1);
- val = PCIE_ATU_ENABLE;
- writel_rc(pp, val, dbi_base + PCIE_ATU_CR2);
- writel_rc(pp, pp->io_base, dbi_base + PCIE_ATU_LOWER_BASE);
- writel_rc(pp, (pp->io_base >> 32), dbi_base + PCIE_ATU_UPPER_BASE);
- writel_rc(pp, pp->io_base + pp->config.io_size - 1,
- dbi_base + PCIE_ATU_LIMIT);
- writel_rc(pp, pp->config.io_bus_addr,
- dbi_base + PCIE_ATU_LOWER_TARGET);
- writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
- dbi_base + PCIE_ATU_UPPER_TARGET);
-}
-
-static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
+ PCIE_ATU_VIEWPORT);
+ dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
+ dw_pcie_writel_rc(pp, pp->io_base, PCIE_ATU_LOWER_BASE);
+ dw_pcie_writel_rc(pp, (pp->io_base >> 32), PCIE_ATU_UPPER_BASE);
+ dw_pcie_writel_rc(pp, pp->io_base + pp->config.io_size - 1,
+ PCIE_ATU_LIMIT);
+ dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
+ dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+ PCIE_ATU_UPPER_TARGET);
+}
+
+static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 *val)
{
int ret = PCIBIOS_SUCCESSFUL;
@@ -357,19 +320,19 @@ static int exynos_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- exynos_pcie_prog_viewport_cfg0(pp, busdev);
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = cfg_read(pp->va_cfg0_base + address, where, size, val);
- exynos_pcie_prog_viewport_mem_outbound(pp);
+ dw_pcie_prog_viewport_mem_outbound(pp);
} else {
- exynos_pcie_prog_viewport_cfg1(pp, busdev);
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = cfg_read(pp->va_cfg1_base + address, where, size, val);
- exynos_pcie_prog_viewport_io_outbound(pp);
+ dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
-static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
u32 devfn, int where, int size, u32 val)
{
int ret = PCIBIOS_SUCCESSFUL;
@@ -380,59 +343,25 @@ static int exynos_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
address = where & ~0x3;
if (bus->parent->number == pp->root_bus_nr) {
- exynos_pcie_prog_viewport_cfg0(pp, busdev);
+ dw_pcie_prog_viewport_cfg0(pp, busdev);
ret = cfg_write(pp->va_cfg0_base + address, where, size, val);
- exynos_pcie_prog_viewport_mem_outbound(pp);
+ dw_pcie_prog_viewport_mem_outbound(pp);
} else {
- exynos_pcie_prog_viewport_cfg1(pp, busdev);
+ dw_pcie_prog_viewport_cfg1(pp, busdev);
ret = cfg_write(pp->va_cfg1_base + address, where, size, val);
- exynos_pcie_prog_viewport_io_outbound(pp);
+ dw_pcie_prog_viewport_io_outbound(pp);
}
return ret;
}
-static unsigned long global_io_offset;
-
-static int exynos_pcie_setup(int nr, struct pci_sys_data *sys)
-{
- struct pcie_port *pp;
-
- pp = sys_to_pcie(sys);
-
- if (!pp)
- return 0;
-
- if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
- sys->io_offset = global_io_offset - pp->config.io_bus_addr;
- pci_ioremap_io(sys->io_offset, pp->io.start);
- global_io_offset += SZ_64K;
- pci_add_resource_offset(&sys->resources, &pp->io,
- sys->io_offset);
- }
-
- sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
-
- return 1;
-}
-
-static int exynos_pcie_link_up(struct pcie_port *pp)
-{
- u32 val = readl(pp->elbi_base + PCIE_ELBI_RDLH_LINKUP);
-
- if (val == PCIE_ELBI_LTSSM_ENABLE)
- return 1;
- return 0;
-}
-
-static int exynos_pcie_valid_config(struct pcie_port *pp,
+static int dw_pcie_valid_config(struct pcie_port *pp,
struct pci_bus *bus, int dev)
{
/* If there is no link, then there is no device */
if (bus->number != pp->root_bus_nr) {
- if (!exynos_pcie_link_up(pp))
+ if (!dw_pcie_link_up(pp))
return 0;
}
@@ -450,7 +379,7 @@ static int exynos_pcie_valid_config(struct pcie_port *pp,
return 1;
}
-static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
+static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
int size, u32 *val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -462,23 +391,23 @@ static int exynos_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
return -EINVAL;
}
- if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
*val = 0xffffffff;
return PCIBIOS_DEVICE_NOT_FOUND;
}
spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
- ret = exynos_pcie_rd_other_conf(pp, bus, devfn,
+ ret = dw_pcie_rd_other_conf(pp, bus, devfn,
where, size, val);
else
- ret = exynos_pcie_rd_own_conf(pp, where, size, val);
+ ret = dw_pcie_rd_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
-static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
+static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
int where, int size, u32 val)
{
struct pcie_port *pp = sys_to_pcie(bus->sysdata);
@@ -490,34 +419,56 @@ static int exynos_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
return -EINVAL;
}
- if (exynos_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
+ if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
return PCIBIOS_DEVICE_NOT_FOUND;
spin_lock_irqsave(&pp->conf_lock, flags);
if (bus->number != pp->root_bus_nr)
- ret = exynos_pcie_wr_other_conf(pp, bus, devfn,
+ ret = dw_pcie_wr_other_conf(pp, bus, devfn,
where, size, val);
else
- ret = exynos_pcie_wr_own_conf(pp, where, size, val);
+ ret = dw_pcie_wr_own_conf(pp, where, size, val);
spin_unlock_irqrestore(&pp->conf_lock, flags);
return ret;
}
-static struct pci_ops exynos_pcie_ops = {
- .read = exynos_pcie_rd_conf,
- .write = exynos_pcie_wr_conf,
+static struct pci_ops dw_pcie_ops = {
+ .read = dw_pcie_rd_conf,
+ .write = dw_pcie_wr_conf,
};
-static struct pci_bus *exynos_pcie_scan_bus(int nr,
- struct pci_sys_data *sys)
+int dw_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+ struct pcie_port *pp;
+
+ pp = sys_to_pcie(sys);
+
+ if (!pp)
+ return 0;
+
+ if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
+ sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+ pci_ioremap_io(sys->io_offset, pp->io.start);
+ global_io_offset += SZ_64K;
+ pci_add_resource_offset(&sys->resources, &pp->io,
+ sys->io_offset);
+ }
+
+ sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+ pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+
+ return 1;
+}
+
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys)
{
struct pci_bus *bus;
struct pcie_port *pp = sys_to_pcie(sys);
if (pp) {
pp->root_bus_nr = sys->busnr;
- bus = pci_scan_root_bus(NULL, sys->busnr, &exynos_pcie_ops,
+ bus = pci_scan_root_bus(NULL, sys->busnr, &dw_pcie_ops,
sys, &sys->resources);
} else {
bus = NULL;
@@ -527,531 +478,88 @@ static struct pci_bus *exynos_pcie_scan_bus(int nr,
return bus;
}
-static int exynos_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata);
return pp->irq;
}
-static struct hw_pci exynos_pci = {
- .setup = exynos_pcie_setup,
- .scan = exynos_pcie_scan_bus,
- .map_irq = exynos_pcie_map_irq,
+static struct hw_pci dw_pci = {
+ .setup = dw_pcie_setup,
+ .scan = dw_pcie_scan_bus,
+ .map_irq = dw_pcie_map_irq,
};
-static void exynos_pcie_setup_rc(struct pcie_port *pp)
+void dw_pcie_setup_rc(struct pcie_port *pp)
{
struct pcie_port_info *config = &pp->config;
- void __iomem *dbi_base = pp->dbi_base;
u32 val;
u32 membase;
u32 memlimit;
/* set the number of lines as 4 */
- readl_rc(pp, dbi_base + PCIE_PORT_LINK_CONTROL, &val);
+ dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val);
val &= ~PORT_LINK_MODE_MASK;
- val |= PORT_LINK_MODE_4_LANES;
- writel_rc(pp, val, dbi_base + PCIE_PORT_LINK_CONTROL);
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LINK_MODE_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LINK_MODE_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LINK_MODE_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL);
/* set link width speed control register */
- readl_rc(pp, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
+ dw_pcie_readl_rc(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, &val);
val &= ~PORT_LOGIC_LINK_WIDTH_MASK;
- val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
- writel_rc(pp, val, dbi_base + PCIE_LINK_WIDTH_SPEED_CONTROL);
+ switch (pp->lanes) {
+ case 1:
+ val |= PORT_LOGIC_LINK_WIDTH_1_LANES;
+ break;
+ case 2:
+ val |= PORT_LOGIC_LINK_WIDTH_2_LANES;
+ break;
+ case 4:
+ val |= PORT_LOGIC_LINK_WIDTH_4_LANES;
+ break;
+ }
+ dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL);
/* setup RC BARs */
- writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_0);
- writel_rc(pp, 0x00000004, dbi_base + PCI_BASE_ADDRESS_1);
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_0);
+ dw_pcie_writel_rc(pp, 0x00000004, PCI_BASE_ADDRESS_1);
/* setup interrupt pins */
- readl_rc(pp, dbi_base + PCI_INTERRUPT_LINE, &val);
+ dw_pcie_readl_rc(pp, PCI_INTERRUPT_LINE, &val);
val &= 0xffff00ff;
val |= 0x00000100;
- writel_rc(pp, val, dbi_base + PCI_INTERRUPT_LINE);
+ dw_pcie_writel_rc(pp, val, PCI_INTERRUPT_LINE);
/* setup bus numbers */
- readl_rc(pp, dbi_base + PCI_PRIMARY_BUS, &val);
+ dw_pcie_readl_rc(pp, PCI_PRIMARY_BUS, &val);
val &= 0xff000000;
val |= 0x00010100;
- writel_rc(pp, val, dbi_base + PCI_PRIMARY_BUS);
+ dw_pcie_writel_rc(pp, val, PCI_PRIMARY_BUS);
/* setup memory base, memory limit */
membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
val = memlimit | membase;
- writel_rc(pp, val, dbi_base + PCI_MEMORY_BASE);
+ dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
/* setup command register */
- readl_rc(pp, dbi_base + PCI_COMMAND, &val);
+ dw_pcie_readl_rc(pp, PCI_COMMAND, &val);
val &= 0xffff0000;
val |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER | PCI_COMMAND_SERR;
- writel_rc(pp, val, dbi_base + PCI_COMMAND);
-}
-
-static void exynos_pcie_assert_core_reset(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- val = readl(elbi_base + PCIE_CORE_RESET);
- val &= ~PCIE_CORE_RESET_ENABLE;
- writel(val, elbi_base + PCIE_CORE_RESET);
- writel(0, elbi_base + PCIE_PWR_RESET);
- writel(0, elbi_base + PCIE_STICKY_RESET);
- writel(0, elbi_base + PCIE_NONSTICKY_RESET);
-}
-
-static void exynos_pcie_deassert_core_reset(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
-
- val = readl(elbi_base + PCIE_CORE_RESET);
- val |= PCIE_CORE_RESET_ENABLE;
- writel(val, elbi_base + PCIE_CORE_RESET);
- writel(1, elbi_base + PCIE_STICKY_RESET);
- writel(1, elbi_base + PCIE_NONSTICKY_RESET);
- writel(1, elbi_base + PCIE_APP_INIT_RESET);
- writel(0, elbi_base + PCIE_APP_INIT_RESET);
- writel(1, purple_base + PCIE_PHY_MAC_RESET);
-}
-
-static void exynos_pcie_assert_phy_reset(struct pcie_port *pp)
-{
- void __iomem *purple_base = pp->purple_base;
-
- writel(0, purple_base + PCIE_PHY_MAC_RESET);
- writel(1, purple_base + PCIE_PHY_GLOBAL_RESET);
-}
-
-static void exynos_pcie_deassert_phy_reset(struct pcie_port *pp)
-{
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
-
- writel(0, purple_base + PCIE_PHY_GLOBAL_RESET);
- writel(1, elbi_base + PCIE_PWR_RESET);
- writel(0, purple_base + PCIE_PHY_COMMON_RESET);
- writel(0, purple_base + PCIE_PHY_CMN_REG);
- writel(0, purple_base + PCIE_PHY_TRSVREG_RESET);
- writel(0, purple_base + PCIE_PHY_TRSV_RESET);
-}
-
-static void exynos_pcie_init_phy(struct pcie_port *pp)
-{
- void __iomem *phy_base = pp->phy_base;
-
- /* DCC feedback control off */
- writel(0x29, phy_base + PCIE_PHY_DCC_FEEDBACK);
-
- /* set TX/RX impedance */
- writel(0xd5, phy_base + PCIE_PHY_IMPEDANCE);
-
- /* set 50Mhz PHY clock */
- writel(0x14, phy_base + PCIE_PHY_PLL_DIV_0);
- writel(0x12, phy_base + PCIE_PHY_PLL_DIV_1);
-
- /* set TX Differential output for lane 0 */
- writel(0x7f, phy_base + PCIE_PHY_TRSV0_DRV_LVL);
-
- /* set TX Pre-emphasis Level Control for lane 0 to minimum */
- writel(0x0, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
-
- /* set RX clock and data recovery bandwidth */
- writel(0xe7, phy_base + PCIE_PHY_PLL_BIAS);
- writel(0x82, phy_base + PCIE_PHY_TRSV0_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV1_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV2_RXCDR);
- writel(0x82, phy_base + PCIE_PHY_TRSV3_RXCDR);
-
- /* change TX Pre-emphasis Level Control for lanes */
- writel(0x39, phy_base + PCIE_PHY_TRSV0_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV1_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV2_EMP_LVL);
- writel(0x39, phy_base + PCIE_PHY_TRSV3_EMP_LVL);
-
- /* set LVCC */
- writel(0x20, phy_base + PCIE_PHY_TRSV0_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV1_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV2_LVCC);
- writel(0xa0, phy_base + PCIE_PHY_TRSV3_LVCC);
-}
-
-static void exynos_pcie_assert_reset(struct pcie_port *pp)
-{
- if (pp->reset_gpio >= 0)
- devm_gpio_request_one(pp->dev, pp->reset_gpio,
- GPIOF_OUT_INIT_HIGH, "RESET");
- return;
-}
-
-static int exynos_pcie_establish_link(struct pcie_port *pp)
-{
- u32 val;
- int count = 0;
- void __iomem *elbi_base = pp->elbi_base;
- void __iomem *purple_base = pp->purple_base;
- void __iomem *phy_base = pp->phy_base;
-
- if (exynos_pcie_link_up(pp)) {
- dev_err(pp->dev, "Link already up\n");
- return 0;
- }
-
- /* assert reset signals */
- exynos_pcie_assert_core_reset(pp);
- exynos_pcie_assert_phy_reset(pp);
-
- /* de-assert phy reset */
- exynos_pcie_deassert_phy_reset(pp);
-
- /* initialize phy */
- exynos_pcie_init_phy(pp);
-
- /* pulse for common reset */
- writel(1, purple_base + PCIE_PHY_COMMON_RESET);
- udelay(500);
- writel(0, purple_base + PCIE_PHY_COMMON_RESET);
-
- /* de-assert core reset */
- exynos_pcie_deassert_core_reset(pp);
-
- /* setup root complex */
- exynos_pcie_setup_rc(pp);
-
- /* assert reset signal */
- exynos_pcie_assert_reset(pp);
-
- /* assert LTSSM enable */
- writel(PCIE_ELBI_LTSSM_ENABLE, elbi_base + PCIE_APP_LTSSM_ENABLE);
-
- /* check if the link is up or not */
- while (!exynos_pcie_link_up(pp)) {
- mdelay(100);
- count++;
- if (count == 10) {
- while (readl(phy_base + PCIE_PHY_PLL_LOCKED) == 0) {
- val = readl(purple_base + PCIE_PHY_PLL_LOCKED);
- dev_info(pp->dev, "PLL Locked: 0x%x\n", val);
- }
- dev_err(pp->dev, "PCIe Link Fail\n");
- return -EINVAL;
- }
- }
-
- dev_info(pp->dev, "Link up\n");
-
- return 0;
-}
-
-static void exynos_pcie_clear_irq_pulse(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- val = readl(elbi_base + PCIE_IRQ_PULSE);
- writel(val, elbi_base + PCIE_IRQ_PULSE);
- return;
-}
-
-static void exynos_pcie_enable_irq_pulse(struct pcie_port *pp)
-{
- u32 val;
- void __iomem *elbi_base = pp->elbi_base;
-
- /* enable INTX interrupt */
- val = IRQ_INTA_ASSERT | IRQ_INTB_ASSERT |
- IRQ_INTC_ASSERT | IRQ_INTD_ASSERT,
- writel(val, elbi_base + PCIE_IRQ_EN_PULSE);
- return;
-}
-
-static irqreturn_t exynos_pcie_irq_handler(int irq, void *arg)
-{
- struct pcie_port *pp = arg;
-
- exynos_pcie_clear_irq_pulse(pp);
- return IRQ_HANDLED;
-}
-
-static void exynos_pcie_enable_interrupts(struct pcie_port *pp)
-{
- exynos_pcie_enable_irq_pulse(pp);
- return;
-}
-
-static void exynos_pcie_host_init(struct pcie_port *pp)
-{
- struct pcie_port_info *config = &pp->config;
- u32 val;
-
- /* Keep first 64K for IO */
- pp->cfg0_base = pp->cfg.start;
- pp->cfg1_base = pp->cfg.start + config->cfg0_size;
- pp->io_base = pp->io.start;
- pp->mem_base = pp->mem.start;
-
- /* enable link */
- exynos_pcie_establish_link(pp);
-
- exynos_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0);
-
- /* program correct class for RC */
- exynos_pcie_wr_own_conf(pp, PCI_CLASS_DEVICE, 2, PCI_CLASS_BRIDGE_PCI);
-
- exynos_pcie_rd_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, &val);
- val |= PORT_LOGIC_SPEED_CHANGE;
- exynos_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val);
-
- exynos_pcie_enable_interrupts(pp);
-}
-
-static int add_pcie_port(struct pcie_port *pp, struct platform_device *pdev)
-{
- struct resource *elbi_base;
- struct resource *phy_base;
- struct resource *purple_base;
- int ret;
-
- elbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!elbi_base) {
- dev_err(&pdev->dev, "couldn't get elbi base resource\n");
- return -EINVAL;
- }
- pp->elbi_base = devm_ioremap_resource(&pdev->dev, elbi_base);
- if (IS_ERR(pp->elbi_base))
- return PTR_ERR(pp->elbi_base);
-
- phy_base = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!phy_base) {
- dev_err(&pdev->dev, "couldn't get phy base resource\n");
- return -EINVAL;
- }
- pp->phy_base = devm_ioremap_resource(&pdev->dev, phy_base);
- if (IS_ERR(pp->phy_base))
- return PTR_ERR(pp->phy_base);
-
- purple_base = platform_get_resource(pdev, IORESOURCE_MEM, 2);
- if (!purple_base) {
- dev_err(&pdev->dev, "couldn't get purple base resource\n");
- return -EINVAL;
- }
- pp->purple_base = devm_ioremap_resource(&pdev->dev, purple_base);
- if (IS_ERR(pp->purple_base))
- return PTR_ERR(pp->purple_base);
-
- pp->irq = platform_get_irq(pdev, 1);
- if (!pp->irq) {
- dev_err(&pdev->dev, "failed to get irq\n");
- return -ENODEV;
- }
- ret = devm_request_irq(&pdev->dev, pp->irq, exynos_pcie_irq_handler,
- IRQF_SHARED, "exynos-pcie", pp);
- if (ret) {
- dev_err(&pdev->dev, "failed to request irq\n");
- return ret;
- }
-
- pp->dbi_base = devm_ioremap(&pdev->dev, pp->cfg.start,
- resource_size(&pp->cfg));
- if (!pp->dbi_base) {
- dev_err(&pdev->dev, "error with ioremap\n");
- return -ENOMEM;
- }
-
- pp->root_bus_nr = -1;
-
- spin_lock_init(&pp->conf_lock);
- exynos_pcie_host_init(pp);
- pp->va_cfg0_base = devm_ioremap(&pdev->dev, pp->cfg0_base,
- pp->config.cfg0_size);
- if (!pp->va_cfg0_base) {
- dev_err(pp->dev, "error with ioremap in function\n");
- return -ENOMEM;
- }
- pp->va_cfg1_base = devm_ioremap(&pdev->dev, pp->cfg1_base,
- pp->config.cfg1_size);
- if (!pp->va_cfg1_base) {
- dev_err(pp->dev, "error with ioremap\n");
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int __init exynos_pcie_probe(struct platform_device *pdev)
-{
- struct pcie_port *pp;
- struct device_node *np = pdev->dev.of_node;
- struct of_pci_range range;
- struct of_pci_range_parser parser;
- int ret;
-
- pp = devm_kzalloc(&pdev->dev, sizeof(*pp), GFP_KERNEL);
- if (!pp) {
- dev_err(&pdev->dev, "no memory for pcie port\n");
- return -ENOMEM;
- }
-
- pp->dev = &pdev->dev;
-
- if (of_pci_range_parser_init(&parser, np)) {
- dev_err(&pdev->dev, "missing ranges property\n");
- return -EINVAL;
- }
-
- /* Get the I/O and memory ranges from DT */
- for_each_of_pci_range(&parser, &range) {
- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS;
- if (restype == IORESOURCE_IO) {
- of_pci_range_to_resource(&range, np, &pp->io);
- pp->io.name = "I/O";
- pp->io.start = max_t(resource_size_t,
- PCIBIOS_MIN_IO,
- range.pci_addr + global_io_offset);
- pp->io.end = min_t(resource_size_t,
- IO_SPACE_LIMIT,
- range.pci_addr + range.size
- + global_io_offset);
- pp->config.io_size = resource_size(&pp->io);
- pp->config.io_bus_addr = range.pci_addr;
- }
- if (restype == IORESOURCE_MEM) {
- of_pci_range_to_resource(&range, np, &pp->mem);
- pp->mem.name = "MEM";
- pp->config.mem_size = resource_size(&pp->mem);
- pp->config.mem_bus_addr = range.pci_addr;
- }
- if (restype == 0) {
- of_pci_range_to_resource(&range, np, &pp->cfg);
- pp->config.cfg0_size = resource_size(&pp->cfg)/2;
- pp->config.cfg1_size = resource_size(&pp->cfg)/2;
- }
- }
-
- pp->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
-
- pp->clk = devm_clk_get(&pdev->dev, "pcie");
- if (IS_ERR(pp->clk)) {
- dev_err(&pdev->dev, "Failed to get pcie rc clock\n");
- return PTR_ERR(pp->clk);
- }
- ret = clk_prepare_enable(pp->clk);
- if (ret)
- return ret;
-
- pp->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
- if (IS_ERR(pp->bus_clk)) {
- dev_err(&pdev->dev, "Failed to get pcie bus clock\n");
- ret = PTR_ERR(pp->bus_clk);
- goto fail_clk;
- }
- ret = clk_prepare_enable(pp->bus_clk);
- if (ret)
- goto fail_clk;
-
- ret = add_pcie_port(pp, pdev);
- if (ret < 0)
- goto fail_bus_clk;
-
- pp->controller = exynos_pci.nr_controllers;
- exynos_pci.nr_controllers = 1;
- exynos_pci.private_data = (void **)&pp;
-
- pci_common_init(&exynos_pci);
- pci_assign_unassigned_resources();
-#ifdef CONFIG_PCI_DOMAINS
- exynos_pci.domain++;
-#endif
-
- platform_set_drvdata(pdev, pp);
- return 0;
-
-fail_bus_clk:
- clk_disable_unprepare(pp->bus_clk);
-fail_clk:
- clk_disable_unprepare(pp->clk);
- return ret;
-}
-
-static int __exit exynos_pcie_remove(struct platform_device *pdev)
-{
- struct pcie_port *pp = platform_get_drvdata(pdev);
-
- clk_disable_unprepare(pp->bus_clk);
- clk_disable_unprepare(pp->clk);
-
- return 0;
-}
-
-static const struct of_device_id exynos_pcie_of_match[] = {
- { .compatible = "samsung,exynos5440-pcie", },
- {},
-};
-MODULE_DEVICE_TABLE(of, exynos_pcie_of_match);
-
-static struct platform_driver exynos_pcie_driver = {
- .remove = __exit_p(exynos_pcie_remove),
- .driver = {
- .name = "exynos-pcie",
- .owner = THIS_MODULE,
- .of_match_table = of_match_ptr(exynos_pcie_of_match),
- },
-};
-
-static int exynos_pcie_abort(unsigned long addr, unsigned int fsr,
- struct pt_regs *regs)
-{
- unsigned long pc = instruction_pointer(regs);
- unsigned long instr = *(unsigned long *)pc;
-
- WARN_ONCE(1, "pcie abort\n");
-
- /*
- * If the instruction being executed was a read,
- * make it look like it read all-ones.
- */
- if ((instr & 0x0c100000) == 0x04100000) {
- int reg = (instr >> 12) & 15;
- unsigned long val;
-
- if (instr & 0x00400000)
- val = 255;
- else
- val = -1;
-
- regs->uregs[reg] = val;
- regs->ARM_pc += 4;
- return 0;
- }
-
- if ((instr & 0x0e100090) == 0x00100090) {
- int reg = (instr >> 12) & 15;
-
- regs->uregs[reg] = -1;
- regs->ARM_pc += 4;
- return 0;
- }
-
- return 1;
-}
-
-/* Exynos PCIe driver does not allow module unload */
-
-static int __init pcie_init(void)
-{
- hook_fault_code(16 + 6, exynos_pcie_abort, SIGBUS, 0,
- "imprecise external abort");
-
- platform_driver_probe(&exynos_pcie_driver, exynos_pcie_probe);
-
- return 0;
+ dw_pcie_writel_rc(pp, val, PCI_COMMAND);
}
-subsys_initcall(pcie_init);
MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
-MODULE_DESCRIPTION("Samsung PCIe host controller driver");
+MODULE_DESCRIPTION("Designware PCIe host controller driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
new file mode 100644
index 00000000000..133820f1da9
--- /dev/null
+++ b/drivers/pci/host/pcie-designware.h
@@ -0,0 +1,65 @@
+/*
+ * Synopsys Designware PCIe host controller driver
+ *
+ * Copyright (C) 2013 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct pcie_port_info {
+ u32 cfg0_size;
+ u32 cfg1_size;
+ u32 io_size;
+ u32 mem_size;
+ phys_addr_t io_bus_addr;
+ phys_addr_t mem_bus_addr;
+};
+
+struct pcie_port {
+ struct device *dev;
+ u8 root_bus_nr;
+ void __iomem *dbi_base;
+ u64 cfg0_base;
+ void __iomem *va_cfg0_base;
+ u64 cfg1_base;
+ void __iomem *va_cfg1_base;
+ u64 io_base;
+ u64 mem_base;
+ spinlock_t conf_lock;
+ struct resource cfg;
+ struct resource io;
+ struct resource mem;
+ struct pcie_port_info config;
+ int irq;
+ u32 lanes;
+ struct pcie_host_ops *ops;
+};
+
+struct pcie_host_ops {
+ void (*readl_rc)(struct pcie_port *pp,
+ void __iomem *dbi_base, u32 *val);
+ void (*writel_rc)(struct pcie_port *pp,
+ u32 val, void __iomem *dbi_base);
+ int (*rd_own_conf)(struct pcie_port *pp, int where, int size, u32 *val);
+ int (*wr_own_conf)(struct pcie_port *pp, int where, int size, u32 val);
+ int (*link_up)(struct pcie_port *pp);
+ void (*host_init)(struct pcie_port *pp);
+};
+
+extern unsigned long global_io_offset;
+
+int cfg_read(void __iomem *addr, int where, int size, u32 *val);
+int cfg_write(void __iomem *addr, int where, int size, u32 val);
+int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, u32 val);
+int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, u32 *val);
+int dw_pcie_link_up(struct pcie_port *pp);
+void dw_pcie_setup_rc(struct pcie_port *pp);
+int dw_pcie_host_init(struct pcie_port *pp);
+int dw_pcie_setup(int nr, struct pci_sys_data *sys);
+struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys);
+int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
diff --git a/drivers/pci/hotplug/Kconfig b/drivers/pci/hotplug/Kconfig
index bb7ebb22db0..0a648af8953 100644
--- a/drivers/pci/hotplug/Kconfig
+++ b/drivers/pci/hotplug/Kconfig
@@ -3,16 +3,13 @@
#
menuconfig HOTPLUG_PCI
- tristate "Support for PCI Hotplug"
+ bool "Support for PCI Hotplug"
depends on PCI && SYSFS
---help---
Say Y here if you have a motherboard with a PCI Hotplug controller.
This allows you to add and remove PCI cards while the machine is
powered up and running.
- To compile this driver as a module, choose M here: the
- module will be called pci_hotplug.
-
When in doubt, say N.
if HOTPLUG_PCI
@@ -149,7 +146,7 @@ config HOTPLUG_PCI_SGI
When in doubt, say N.
config HOTPLUG_PCI_S390
- tristate "System z PCI Hotplug Support"
+ bool "System z PCI Hotplug Support"
depends on S390 && 64BIT
help
Say Y here if you want to use the System z PCI Hotplug
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 6fdd49c6f0b..f4e02892466 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -49,6 +49,7 @@
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
+struct acpiphp_context;
struct acpiphp_bridge;
struct acpiphp_slot;
@@ -59,6 +60,7 @@ struct slot {
struct hotplug_slot *hotplug_slot;
struct acpiphp_slot *acpi_slot;
struct hotplug_slot_info info;
+ unsigned int sun; /* ACPI _SUN (Slot User Number) value */
};
static inline const char *slot_name(struct slot *slot)
@@ -75,15 +77,11 @@ struct acpiphp_bridge {
struct list_head list;
struct list_head slots;
struct kref ref;
- acpi_handle handle;
- /* Ejectable PCI-to-PCI bridge (PCI bridge and PCI function) */
- struct acpiphp_func *func;
+ struct acpiphp_context *context;
int nr_slots;
- u32 flags;
-
/* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
struct pci_bus *pci_bus;
@@ -99,15 +97,13 @@ struct acpiphp_bridge {
*/
struct acpiphp_slot {
struct list_head node;
- struct acpiphp_bridge *bridge; /* parent */
+ struct pci_bus *bus;
struct list_head funcs; /* one slot may have different
objects (i.e. for each function) */
struct slot *slot;
struct mutex crit_sect;
u8 device; /* pci device# */
-
- unsigned long long sun; /* ACPI _SUN (slot unique number) */
u32 flags; /* see below */
};
@@ -119,16 +115,32 @@ struct acpiphp_slot {
* typically 8 objects per slot (i.e. for each PCI function)
*/
struct acpiphp_func {
- struct acpiphp_slot *slot; /* parent */
+ struct acpiphp_bridge *parent;
+ struct acpiphp_slot *slot;
struct list_head sibling;
- struct notifier_block nb;
- acpi_handle handle;
u8 function; /* pci function# */
u32 flags; /* see below */
};
+struct acpiphp_context {
+ acpi_handle handle;
+ struct acpiphp_func func;
+ struct acpiphp_bridge *bridge;
+ unsigned int refcount;
+};
+
+static inline struct acpiphp_context *func_to_context(struct acpiphp_func *func)
+{
+ return container_of(func, struct acpiphp_context, func);
+}
+
+static inline acpi_handle func_to_handle(struct acpiphp_func *func)
+{
+ return func_to_context(func)->handle;
+}
+
/*
* struct acpiphp_attention_info - device specific attention registration
*
@@ -142,45 +154,32 @@ struct acpiphp_attention_info
struct module *owner;
};
-/* PCI bus bridge HID */
-#define ACPI_PCI_HOST_HID "PNP0A03"
-
/* ACPI _STA method value (ignore bit 4; battery present) */
#define ACPI_STA_ALL (0x0000000f)
-/* bridge flags */
-#define BRIDGE_HAS_EJ0 (0x00000001)
-
/* slot flags */
-#define SLOT_POWEREDON (0x00000001)
-#define SLOT_ENABLED (0x00000002)
-#define SLOT_MULTIFUNCTION (0x00000004)
+#define SLOT_ENABLED (0x00000001)
/* function flags */
#define FUNC_HAS_STA (0x00000001)
#define FUNC_HAS_EJ0 (0x00000002)
-#define FUNC_HAS_PS0 (0x00000010)
-#define FUNC_HAS_PS1 (0x00000020)
-#define FUNC_HAS_PS2 (0x00000040)
-#define FUNC_HAS_PS3 (0x00000080)
-#define FUNC_HAS_DCK (0x00000100)
+#define FUNC_HAS_DCK (0x00000004)
/* function prototypes */
/* acpiphp_core.c */
int acpiphp_register_attention(struct acpiphp_attention_info*info);
int acpiphp_unregister_attention(struct acpiphp_attention_info *info);
-int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot);
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *slot, unsigned int sun);
void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *slot);
/* acpiphp_glue.c */
typedef int (*acpiphp_callback)(struct acpiphp_slot *slot, void *data);
int acpiphp_enable_slot(struct acpiphp_slot *slot);
-int acpiphp_disable_slot(struct acpiphp_slot *slot);
-int acpiphp_eject_slot(struct acpiphp_slot *slot);
+int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot);
u8 acpiphp_get_power_status(struct acpiphp_slot *slot);
u8 acpiphp_get_attention_status(struct acpiphp_slot *slot);
u8 acpiphp_get_latch_status(struct acpiphp_slot *slot);
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index ca8127950fc..bf2203ef130 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -155,15 +155,11 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
static int disable_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
- int retval;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
/* disable the specified slot */
- retval = acpiphp_disable_slot(slot->acpi_slot);
- if (!retval)
- retval = acpiphp_eject_slot(slot->acpi_slot);
- return retval;
+ return acpiphp_disable_and_eject_slot(slot->acpi_slot);
}
@@ -290,7 +286,8 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
}
/* callback routine to initialize 'struct slot' for each slot */
-int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
+int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot,
+ unsigned int sun)
{
struct slot *slot;
int retval = -ENOMEM;
@@ -317,12 +314,11 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot)
slot->hotplug_slot->info->adapter_status = acpiphp_get_adapter_status(slot->acpi_slot);
acpiphp_slot->slot = slot;
- snprintf(name, SLOT_NAME_SIZE, "%llu", slot->acpi_slot->sun);
+ slot->sun = sun;
+ snprintf(name, SLOT_NAME_SIZE, "%u", sun);
- retval = pci_hp_register(slot->hotplug_slot,
- acpiphp_slot->bridge->pci_bus,
- acpiphp_slot->device,
- name);
+ retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bus,
+ acpiphp_slot->device, name);
if (retval == -EBUSY)
goto error_hpslot;
if (retval) {
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 59df8575a48..f6488adf3af 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -46,6 +46,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/pci-acpi.h>
+#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/acpi.h>
@@ -55,28 +56,82 @@
static LIST_HEAD(bridge_list);
static DEFINE_MUTEX(bridge_mutex);
+static DEFINE_MUTEX(acpiphp_context_lock);
#define MY_NAME "acpiphp_glue"
-static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
+static void handle_hotplug_event(acpi_handle handle, u32 type, void *data);
static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(struct pci_bus *bus);
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
-static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static void hotplug_event(acpi_handle handle, u32 type, void *data);
static void free_bridge(struct kref *kref);
-/* callback routine to check for the existence of a pci dock device */
-static acpi_status
-is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
+static void acpiphp_context_handler(acpi_handle handle, void *context)
{
- int *count = (int *)context;
+ /* Intentionally empty. */
+}
- if (is_dock_device(handle)) {
- (*count)++;
- return AE_CTRL_TERMINATE;
- } else {
- return AE_OK;
+/**
+ * acpiphp_init_context - Create hotplug context and grab a reference to it.
+ * @handle: ACPI object handle to create the context for.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_init_context(acpi_handle handle)
+{
+ struct acpiphp_context *context;
+ acpi_status status;
+
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return NULL;
+
+ context->handle = handle;
+ context->refcount = 1;
+ status = acpi_attach_data(handle, acpiphp_context_handler, context);
+ if (ACPI_FAILURE(status)) {
+ kfree(context);
+ return NULL;
}
+ return context;
+}
+
+/**
+ * acpiphp_get_context - Get hotplug context and grab a reference to it.
+ * @handle: ACPI object handle to get the context for.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static struct acpiphp_context *acpiphp_get_context(acpi_handle handle)
+{
+ struct acpiphp_context *context = NULL;
+ acpi_status status;
+ void *data;
+
+ status = acpi_get_data(handle, acpiphp_context_handler, &data);
+ if (ACPI_SUCCESS(status)) {
+ context = data;
+ context->refcount++;
+ }
+ return context;
+}
+
+/**
+ * acpiphp_put_context - Drop a reference to ACPI hotplug context.
+ * @handle: ACPI object handle to put the context for.
+ *
+ * The context object is removed if there are no more references to it.
+ *
+ * Call under acpiphp_context_lock.
+ */
+static void acpiphp_put_context(struct acpiphp_context *context)
+{
+ if (--context->refcount)
+ return;
+
+ WARN_ON(context->bridge);
+ acpi_detach_data(context->handle, acpiphp_context_handler);
+ kfree(context);
}
static inline void get_bridge(struct acpiphp_bridge *bridge)
@@ -91,25 +146,36 @@ static inline void put_bridge(struct acpiphp_bridge *bridge)
static void free_bridge(struct kref *kref)
{
+ struct acpiphp_context *context;
struct acpiphp_bridge *bridge;
struct acpiphp_slot *slot, *next;
struct acpiphp_func *func, *tmp;
+ mutex_lock(&acpiphp_context_lock);
+
bridge = container_of(kref, struct acpiphp_bridge, ref);
list_for_each_entry_safe(slot, next, &bridge->slots, node) {
- list_for_each_entry_safe(func, tmp, &slot->funcs, sibling) {
- kfree(func);
- }
+ list_for_each_entry_safe(func, tmp, &slot->funcs, sibling)
+ acpiphp_put_context(func_to_context(func));
+
kfree(slot);
}
- /* Release reference acquired by acpiphp_bridge_handle_to_function() */
- if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func)
- put_bridge(bridge->func->slot->bridge);
+ context = bridge->context;
+ /* Root bridges will not have hotplug context. */
+ if (context) {
+ /* Release the reference taken by acpiphp_enumerate_slots(). */
+ put_bridge(context->func.parent);
+ context->bridge = NULL;
+ acpiphp_put_context(context);
+ }
+
put_device(&bridge->pci_bus->dev);
pci_dev_put(bridge->pci_dev);
kfree(bridge);
+
+ mutex_unlock(&acpiphp_context_lock);
}
/*
@@ -119,15 +185,14 @@ static void free_bridge(struct kref *kref)
* TBD - figure out a way to only call fixups for
* systems that require them.
*/
-static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
- void *v)
+static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
{
- struct acpiphp_func *func = container_of(nb, struct acpiphp_func, nb);
- struct pci_bus *bus = func->slot->bridge->pci_bus;
+ struct acpiphp_context *context = data;
+ struct pci_bus *bus = context->func.slot->bus;
u32 buses;
if (!bus->self)
- return NOTIFY_OK;
+ return;
/* fixup bad _DCK function that rewrites
* secondary bridge on slot
@@ -143,12 +208,12 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
| ((unsigned int)(bus->busn_res.end) << 16);
pci_write_config_dword(bus->self, PCI_PRIMARY_BUS, buses);
}
- return NOTIFY_OK;
}
static const struct acpi_dock_ops acpiphp_dock_ops = {
- .handler = hotplug_event_func,
+ .fixup = post_dock_fixups,
+ .handler = hotplug_event,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -182,129 +247,118 @@ static bool device_is_managed_by_native_pciehp(struct pci_dev *pdev)
static void acpiphp_dock_init(void *data)
{
- struct acpiphp_func *func = data;
+ struct acpiphp_context *context = data;
- get_bridge(func->slot->bridge);
+ get_bridge(context->func.parent);
}
static void acpiphp_dock_release(void *data)
{
- struct acpiphp_func *func = data;
+ struct acpiphp_context *context = data;
- put_bridge(func->slot->bridge);
+ put_bridge(context->func.parent);
}
/* callback routine to register each ACPI PCI slot object */
-static acpi_status
-register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
+static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
+ void **rv)
{
- struct acpiphp_bridge *bridge = (struct acpiphp_bridge *)context;
+ struct acpiphp_bridge *bridge = data;
+ struct acpiphp_context *context;
struct acpiphp_slot *slot;
struct acpiphp_func *newfunc;
- acpi_handle tmp;
acpi_status status = AE_OK;
- unsigned long long adr, sun;
- int device, function, retval, found = 0;
+ unsigned long long adr;
+ int device, function;
struct pci_bus *pbus = bridge->pci_bus;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = bridge->pci_dev;
u32 val;
- if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
+ if (pdev && device_is_managed_by_native_pciehp(pdev))
return AE_OK;
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- warn("can't evaluate _ADR (%#x)\n", status);
+ acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
device = (adr >> 16) & 0xffff;
function = adr & 0xffff;
- pdev = bridge->pci_dev;
- if (pdev && device_is_managed_by_native_pciehp(pdev))
- return AE_OK;
-
- newfunc = kzalloc(sizeof(struct acpiphp_func), GFP_KERNEL);
- if (!newfunc)
- return AE_NO_MEMORY;
-
- newfunc->handle = handle;
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_init_context(handle);
+ if (!context) {
+ mutex_unlock(&acpiphp_context_lock);
+ acpi_handle_err(handle, "No hotplug context\n");
+ return AE_NOT_EXIST;
+ }
+ newfunc = &context->func;
newfunc->function = function;
+ newfunc->parent = bridge;
+ mutex_unlock(&acpiphp_context_lock);
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_EJ0", &tmp)))
+ if (acpi_has_method(handle, "_EJ0"))
newfunc->flags = FUNC_HAS_EJ0;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_STA", &tmp)))
+ if (acpi_has_method(handle, "_STA"))
newfunc->flags |= FUNC_HAS_STA;
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS0", &tmp)))
- newfunc->flags |= FUNC_HAS_PS0;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_PS3", &tmp)))
- newfunc->flags |= FUNC_HAS_PS3;
-
- if (ACPI_SUCCESS(acpi_get_handle(handle, "_DCK", &tmp)))
+ if (acpi_has_method(handle, "_DCK"))
newfunc->flags |= FUNC_HAS_DCK;
- status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
- if (ACPI_FAILURE(status)) {
- /*
- * use the count of the number of slots we've found
- * for the number of the slot
- */
- sun = bridge->nr_slots+1;
- }
-
/* search for objects that share the same slot */
list_for_each_entry(slot, &bridge->slots, node)
- if (slot->device == device) {
- if (slot->sun != sun)
- warn("sibling found, but _SUN doesn't match!\n");
- found = 1;
- break;
- }
+ if (slot->device == device)
+ goto slot_found;
- if (!found) {
- slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
- if (!slot) {
- kfree(newfunc);
- return AE_NO_MEMORY;
- }
+ slot = kzalloc(sizeof(struct acpiphp_slot), GFP_KERNEL);
+ if (!slot) {
+ status = AE_NO_MEMORY;
+ goto err;
+ }
- slot->bridge = bridge;
- slot->device = device;
- slot->sun = sun;
- INIT_LIST_HEAD(&slot->funcs);
- mutex_init(&slot->crit_sect);
+ slot->bus = bridge->pci_bus;
+ slot->device = device;
+ INIT_LIST_HEAD(&slot->funcs);
+ mutex_init(&slot->crit_sect);
+
+ list_add_tail(&slot->node, &bridge->slots);
+
+ /* Register slots for ejectable funtions only. */
+ if (acpi_pci_check_ejectable(pbus, handle) || is_dock_device(handle)) {
+ unsigned long long sun;
+ int retval;
- mutex_lock(&bridge_mutex);
- list_add_tail(&slot->node, &bridge->slots);
- mutex_unlock(&bridge_mutex);
bridge->nr_slots++;
+ status = acpi_evaluate_integer(handle, "_SUN", NULL, &sun);
+ if (ACPI_FAILURE(status))
+ sun = bridge->nr_slots;
dbg("found ACPI PCI Hotplug slot %llu at PCI %04x:%02x:%02x\n",
- slot->sun, pci_domain_nr(pbus), pbus->number, device);
- retval = acpiphp_register_hotplug_slot(slot);
+ sun, pci_domain_nr(pbus), pbus->number, device);
+
+ retval = acpiphp_register_hotplug_slot(slot, sun);
if (retval) {
+ slot->slot = NULL;
+ bridge->nr_slots--;
if (retval == -EBUSY)
warn("Slot %llu already registered by another "
- "hotplug driver\n", slot->sun);
+ "hotplug driver\n", sun);
else
warn("acpiphp_register_hotplug_slot failed "
"(err code = 0x%x)\n", retval);
- goto err_exit;
}
+ /* Even if the slot registration fails, we can still use it. */
}
+ slot_found:
newfunc->slot = slot;
- mutex_lock(&bridge_mutex);
list_add_tail(&newfunc->sibling, &slot->funcs);
- mutex_unlock(&bridge_mutex);
if (pci_bus_read_dev_vendor_id(pbus, PCI_DEVFN(device, function),
&val, 60*1000))
- slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
+ slot->flags |= SLOT_ENABLED;
if (is_dock_device(handle)) {
/* we don't want to call this device's _EJ0
@@ -313,136 +367,46 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
*/
newfunc->flags &= ~FUNC_HAS_EJ0;
if (register_hotplug_dock_device(handle,
- &acpiphp_dock_ops, newfunc,
+ &acpiphp_dock_ops, context,
acpiphp_dock_init, acpiphp_dock_release))
dbg("failed to register dock device\n");
-
- /* we need to be notified when dock events happen
- * outside of the hotplug operation, since we may
- * need to do fixups before we can hotplug.
- */
- newfunc->nb.notifier_call = post_dock_fixups;
- if (register_dock_notifier(&newfunc->nb))
- dbg("failed to register a dock notifier");
}
/* install notify handler */
if (!(newfunc->flags & FUNC_HAS_DCK)) {
- status = acpi_install_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func,
- newfunc);
-
+ status = acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event,
+ context);
if (ACPI_FAILURE(status))
- err("failed to register interrupt notify handler\n");
- } else
- status = AE_OK;
-
- return status;
-
- err_exit:
- bridge->nr_slots--;
- mutex_lock(&bridge_mutex);
- list_del(&slot->node);
- mutex_unlock(&bridge_mutex);
- kfree(slot);
- kfree(newfunc);
-
- return AE_OK;
-}
-
-
-/* see if it's worth looking at this bridge */
-static int detect_ejectable_slots(acpi_handle handle)
-{
- int found = acpi_pci_detect_ejectable(handle);
- if (!found) {
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
- is_pci_dock_device, NULL, (void *)&found, NULL);
- }
- return found;
-}
-
-/* initialize miscellaneous stuff for both root and PCI-to-PCI bridge */
-static void init_bridge_misc(struct acpiphp_bridge *bridge)
-{
- acpi_status status;
-
- /* must be added to the list prior to calling register_slot */
- mutex_lock(&bridge_mutex);
- list_add(&bridge->list, &bridge_list);
- mutex_unlock(&bridge_mutex);
-
- /* register all slot objects under this bridge */
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
- register_slot, NULL, bridge, NULL);
- if (ACPI_FAILURE(status)) {
- mutex_lock(&bridge_mutex);
- list_del(&bridge->list);
- mutex_unlock(&bridge_mutex);
- return;
+ acpi_handle_err(handle,
+ "failed to install notify handler\n");
}
- /* install notify handler for P2P bridges */
- if (!pci_is_root_bus(bridge->pci_bus)) {
- if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
- status = acpi_remove_notify_handler(bridge->func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
- }
- status = acpi_install_notify_handler(bridge->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge,
- bridge);
-
- if (ACPI_FAILURE(status)) {
- err("failed to register interrupt notify handler\n");
- }
- }
-}
-
-
-/* find acpiphp_func from acpiphp_bridge */
-static struct acpiphp_func *acpiphp_bridge_handle_to_function(acpi_handle handle)
-{
- struct acpiphp_bridge *bridge;
- struct acpiphp_slot *slot;
- struct acpiphp_func *func = NULL;
-
- mutex_lock(&bridge_mutex);
- list_for_each_entry(bridge, &bridge_list, list) {
- list_for_each_entry(slot, &bridge->slots, node) {
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->handle == handle) {
- get_bridge(func->slot->bridge);
- mutex_unlock(&bridge_mutex);
- return func;
- }
- }
- }
- }
- mutex_unlock(&bridge_mutex);
+ return AE_OK;
- return NULL;
+ err:
+ mutex_lock(&acpiphp_context_lock);
+ acpiphp_put_context(context);
+ mutex_unlock(&acpiphp_context_lock);
+ return status;
}
-
static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
{
- struct acpiphp_bridge *bridge;
-
- mutex_lock(&bridge_mutex);
- list_for_each_entry(bridge, &bridge_list, list)
- if (bridge->handle == handle) {
+ struct acpiphp_context *context;
+ struct acpiphp_bridge *bridge = NULL;
+
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (context) {
+ bridge = context->bridge;
+ if (bridge)
get_bridge(bridge);
- mutex_unlock(&bridge_mutex);
- return bridge;
- }
- mutex_unlock(&bridge_mutex);
- return NULL;
+ acpiphp_put_context(context);
+ }
+ mutex_unlock(&acpiphp_context_lock);
+ return bridge;
}
static void cleanup_bridge(struct acpiphp_bridge *bridge)
@@ -450,40 +414,24 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
struct acpiphp_slot *slot;
struct acpiphp_func *func;
acpi_status status;
- acpi_handle handle = bridge->handle;
-
- if (!pci_is_root_bus(bridge->pci_bus)) {
- status = acpi_remove_notify_handler(handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_bridge);
- if (ACPI_FAILURE(status))
- err("failed to remove notify handler\n");
- }
-
- if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
- status = acpi_install_notify_handler(bridge->func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func,
- bridge->func);
- if (ACPI_FAILURE(status))
- err("failed to install interrupt notify handler\n");
- }
list_for_each_entry(slot, &bridge->slots, node) {
list_for_each_entry(func, &slot->funcs, sibling) {
- if (is_dock_device(func->handle)) {
- unregister_hotplug_dock_device(func->handle);
- unregister_dock_notifier(&func->nb);
- }
+ acpi_handle handle = func_to_handle(func);
+
+ if (is_dock_device(handle))
+ unregister_hotplug_dock_device(handle);
+
if (!(func->flags & FUNC_HAS_DCK)) {
- status = acpi_remove_notify_handler(func->handle,
- ACPI_SYSTEM_NOTIFY,
- handle_hotplug_event_func);
+ status = acpi_remove_notify_handler(handle,
+ ACPI_SYSTEM_NOTIFY,
+ handle_hotplug_event);
if (ACPI_FAILURE(status))
err("failed to remove notify handler\n");
}
}
- acpiphp_unregister_hotplug_slot(slot);
+ if (slot->slot)
+ acpiphp_unregister_hotplug_slot(slot);
}
mutex_lock(&bridge_mutex);
@@ -491,71 +439,6 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
mutex_unlock(&bridge_mutex);
}
-static int power_on_slot(struct acpiphp_slot *slot)
-{
- acpi_status status;
- struct acpiphp_func *func;
- int retval = 0;
-
- /* if already enabled, just skip */
- if (slot->flags & SLOT_POWEREDON)
- goto err_exit;
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->flags & FUNC_HAS_PS0) {
- dbg("%s: executing _PS0\n", __func__);
- status = acpi_evaluate_object(func->handle, "_PS0", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _PS0 failed\n", __func__);
- retval = -1;
- goto err_exit;
- } else
- break;
- }
- }
-
- /* TBD: evaluate _STA to check if the slot is enabled */
-
- slot->flags |= SLOT_POWEREDON;
-
- err_exit:
- return retval;
-}
-
-
-static int power_off_slot(struct acpiphp_slot *slot)
-{
- acpi_status status;
- struct acpiphp_func *func;
-
- int retval = 0;
-
- /* if already disabled, just skip */
- if ((slot->flags & SLOT_POWEREDON) == 0)
- goto err_exit;
-
- list_for_each_entry(func, &slot->funcs, sibling) {
- if (func->flags & FUNC_HAS_PS3) {
- status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _PS3 failed\n", __func__);
- retval = -1;
- goto err_exit;
- } else
- break;
- }
- }
-
- /* TBD: evaluate _STA to check if the slot is disabled */
-
- slot->flags &= (~SLOT_POWEREDON);
-
- err_exit:
- return retval;
-}
-
-
-
/**
* acpiphp_max_busnr - return the highest reserved bus number under the given bus.
* @bus: bus to start search with
@@ -583,52 +466,32 @@ static unsigned char acpiphp_max_busnr(struct pci_bus *bus)
return max;
}
-
/**
- * acpiphp_bus_add - add a new bus to acpi subsystem
- * @func: acpiphp_func of the bridge
+ * acpiphp_bus_trim - Trim device objects in an ACPI namespace subtree.
+ * @handle: ACPI device object handle to start from.
*/
-static int acpiphp_bus_add(struct acpiphp_func *func)
+static void acpiphp_bus_trim(acpi_handle handle)
{
- struct acpi_device *device;
- int ret_val;
-
- if (!acpi_bus_get_device(func->handle, &device)) {
- dbg("bus exists... trim\n");
- /* this shouldn't be in here, so remove
- * the bus then re-add it...
- */
- acpi_bus_trim(device);
- }
-
- ret_val = acpi_bus_scan(func->handle);
- if (!ret_val)
- ret_val = acpi_bus_get_device(func->handle, &device);
-
- if (ret_val)
- dbg("error adding bus, %x\n", -ret_val);
+ struct acpi_device *adev = NULL;
- return ret_val;
+ acpi_bus_get_device(handle, &adev);
+ if (adev)
+ acpi_bus_trim(adev);
}
-
/**
- * acpiphp_bus_trim - trim a bus from acpi subsystem
- * @handle: handle to acpi namespace
+ * acpiphp_bus_add - Scan ACPI namespace subtree.
+ * @handle: ACPI object handle to start the scan from.
*/
-static int acpiphp_bus_trim(acpi_handle handle)
+static void acpiphp_bus_add(acpi_handle handle)
{
- struct acpi_device *device;
- int retval;
-
- retval = acpi_bus_get_device(handle, &device);
- if (retval) {
- dbg("acpi_device not found\n");
- return retval;
- }
+ struct acpi_device *adev = NULL;
- acpi_bus_trim(device);
- return 0;
+ acpiphp_bus_trim(handle);
+ acpi_bus_scan(handle);
+ acpi_bus_get_device(handle, &adev);
+ if (adev)
+ acpi_device_set_power(adev, ACPI_STATE_D0);
}
static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
@@ -645,7 +508,8 @@ static void acpiphp_set_acpi_region(struct acpiphp_slot *slot)
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 1;
/* _REG is optional, we don't care about if there is failure */
- acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL);
+ acpi_evaluate_object(func_to_handle(func), "_REG", &arg_list,
+ NULL);
}
}
@@ -653,59 +517,44 @@ static void check_hotplug_bridge(struct acpiphp_slot *slot, struct pci_dev *dev)
{
struct acpiphp_func *func;
- if (!dev->subordinate)
- return;
-
/* quirk, or pcie could set it already */
if (dev->is_hotplug_bridge)
return;
- if (PCI_SLOT(dev->devfn) != slot->device)
- return;
-
list_for_each_entry(func, &slot->funcs, sibling) {
if (PCI_FUNC(dev->devfn) == func->function) {
- /* check if this bridge has ejectable slots */
- if ((detect_ejectable_slots(func->handle) > 0))
- dev->is_hotplug_bridge = 1;
+ dev->is_hotplug_bridge = 1;
break;
}
}
}
/**
- * enable_device - enable, configure a slot
+ * enable_slot - enable, configure a slot
* @slot: slot to be enabled
*
* This function should be called per *physical slot*,
* not per each slot object in ACPI namespace.
*/
-static int __ref enable_device(struct acpiphp_slot *slot)
+static void __ref enable_slot(struct acpiphp_slot *slot)
{
struct pci_dev *dev;
- struct pci_bus *bus = slot->bridge->pci_bus;
+ struct pci_bus *bus = slot->bus;
struct acpiphp_func *func;
- int num, max, pass;
+ int max, pass;
LIST_HEAD(add_list);
- if (slot->flags & SLOT_ENABLED)
- goto err_exit;
-
list_for_each_entry(func, &slot->funcs, sibling)
- acpiphp_bus_add(func);
+ acpiphp_bus_add(func_to_handle(func));
- num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
- if (num == 0) {
- /* Maybe only part of funcs are added. */
- dbg("No new device found\n");
- goto err_exit;
- }
+ pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
max = acpiphp_max_busnr(bus);
for (pass = 0; pass < 2; pass++) {
list_for_each_entry(dev, &bus->devices, bus_list) {
if (PCI_SLOT(dev->devfn) != slot->device)
continue;
+
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
max = pci_scan_bridge(bus, dev, max, pass);
@@ -723,7 +572,6 @@ static int __ref enable_device(struct acpiphp_slot *slot)
acpiphp_sanitize_bus(bus);
acpiphp_set_hpp_values(bus);
acpiphp_set_acpi_region(slot);
- pci_enable_bridges(bus);
list_for_each_entry(dev, &bus->devices, bus_list) {
/* Assume that newly added devices are powered on already. */
@@ -744,16 +592,12 @@ static int __ref enable_device(struct acpiphp_slot *slot)
continue;
}
}
-
-
- err_exit:
- return 0;
}
/* return first device in slot, acquiring a reference on it */
static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
{
- struct pci_bus *bus = slot->bridge->pci_bus;
+ struct pci_bus *bus = slot->bus;
struct pci_dev *dev;
struct pci_dev *ret = NULL;
@@ -769,16 +613,16 @@ static struct pci_dev *dev_in_slot(struct acpiphp_slot *slot)
}
/**
- * disable_device - disable a slot
+ * disable_slot - disable a slot
* @slot: ACPI PHP slot
*/
-static int disable_device(struct acpiphp_slot *slot)
+static void disable_slot(struct acpiphp_slot *slot)
{
struct acpiphp_func *func;
struct pci_dev *pdev;
/*
- * enable_device() enumerates all functions in this device via
+ * enable_slot() enumerates all functions in this device via
* pci_scan_slot(), whether they have associated ACPI hotplug
* methods (_EJ0, etc.) or not. Therefore, we remove all functions
* here.
@@ -788,13 +632,10 @@ static int disable_device(struct acpiphp_slot *slot)
pci_dev_put(pdev);
}
- list_for_each_entry(func, &slot->funcs, sibling) {
- acpiphp_bus_trim(func->handle);
- }
+ list_for_each_entry(func, &slot->funcs, sibling)
+ acpiphp_bus_trim(func_to_handle(func));
slot->flags &= (~SLOT_ENABLED);
-
- return 0;
}
@@ -812,18 +653,21 @@ static int disable_device(struct acpiphp_slot *slot)
*/
static unsigned int get_slot_status(struct acpiphp_slot *slot)
{
- acpi_status status;
unsigned long long sta = 0;
- u32 dvid;
struct acpiphp_func *func;
list_for_each_entry(func, &slot->funcs, sibling) {
if (func->flags & FUNC_HAS_STA) {
- status = acpi_evaluate_integer(func->handle, "_STA", NULL, &sta);
+ acpi_status status;
+
+ status = acpi_evaluate_integer(func_to_handle(func),
+ "_STA", NULL, &sta);
if (ACPI_SUCCESS(status) && sta)
break;
} else {
- pci_bus_read_config_dword(slot->bridge->pci_bus,
+ u32 dvid;
+
+ pci_bus_read_config_dword(slot->bus,
PCI_DEVFN(slot->device,
func->function),
PCI_VENDOR_ID, &dvid);
@@ -838,34 +682,42 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
}
/**
- * acpiphp_eject_slot - physically eject the slot
- * @slot: ACPI PHP slot
+ * trim_stale_devices - remove PCI devices that are not responding.
+ * @dev: PCI device to start walking the hierarchy from.
*/
-int acpiphp_eject_slot(struct acpiphp_slot *slot)
+static void trim_stale_devices(struct pci_dev *dev)
{
- acpi_status status;
- struct acpiphp_func *func;
- struct acpi_object_list arg_list;
- union acpi_object arg;
+ acpi_handle handle = ACPI_HANDLE(&dev->dev);
+ struct pci_bus *bus = dev->subordinate;
+ bool alive = false;
- list_for_each_entry(func, &slot->funcs, sibling) {
- /* We don't want to call _EJ0 on non-existing functions. */
- if ((func->flags & FUNC_HAS_EJ0)) {
- /* _EJ0 method take one argument */
- arg_list.count = 1;
- arg_list.pointer = &arg;
- arg.type = ACPI_TYPE_INTEGER;
- arg.integer.value = 1;
-
- status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL);
- if (ACPI_FAILURE(status)) {
- warn("%s: _EJ0 failed\n", __func__);
- return -1;
- } else
- break;
- }
+ if (handle) {
+ acpi_status status;
+ unsigned long long sta;
+
+ status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
+ alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+ }
+ if (!alive) {
+ u32 v;
+
+ /* Check if the device responds. */
+ alive = pci_bus_read_dev_vendor_id(dev->bus, dev->devfn, &v, 0);
+ }
+ if (!alive) {
+ pci_stop_and_remove_bus_device(dev);
+ if (handle)
+ acpiphp_bus_trim(handle);
+ } else if (bus) {
+ struct pci_dev *child, *tmp;
+
+ /* The device is a bridge. so check the bus below it. */
+ pm_runtime_get_sync(&dev->dev);
+ list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ trim_stale_devices(child);
+
+ pm_runtime_put(&dev->dev);
}
- return 0;
}
/**
@@ -875,43 +727,30 @@ int acpiphp_eject_slot(struct acpiphp_slot *slot)
* Iterate over all slots under this bridge and make sure that if a
* card is present they are enabled, and if not they are disabled.
*/
-static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
+static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
{
struct acpiphp_slot *slot;
- int retval = 0;
- int enabled, disabled;
-
- enabled = disabled = 0;
list_for_each_entry(slot, &bridge->slots, node) {
- unsigned int status = get_slot_status(slot);
- if (slot->flags & SLOT_ENABLED) {
- if (status == ACPI_STA_ALL)
- continue;
- retval = acpiphp_disable_slot(slot);
- if (retval) {
- err("Error occurred in disabling\n");
- goto err_exit;
- } else {
- acpiphp_eject_slot(slot);
- }
- disabled++;
+ struct pci_bus *bus = slot->bus;
+ struct pci_dev *dev, *tmp;
+
+ mutex_lock(&slot->crit_sect);
+ /* wake up all functions */
+ if (get_slot_status(slot) == ACPI_STA_ALL) {
+ /* remove stale devices if any */
+ list_for_each_entry_safe(dev, tmp, &bus->devices,
+ bus_list)
+ if (PCI_SLOT(dev->devfn) == slot->device)
+ trim_stale_devices(dev);
+
+ /* configure all functions */
+ enable_slot(slot);
} else {
- if (status != ACPI_STA_ALL)
- continue;
- retval = acpiphp_enable_slot(slot);
- if (retval) {
- err("Error occurred in enabling\n");
- goto err_exit;
- }
- enabled++;
+ disable_slot(slot);
}
+ mutex_unlock(&slot->crit_sect);
}
-
- dbg("%s: %d enabled, %d disabled\n", __func__, enabled, disabled);
-
- err_exit:
- return retval;
}
static void acpiphp_set_hpp_values(struct pci_bus *bus)
@@ -950,25 +789,6 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
* ACPI event handlers
*/
-static acpi_status
-check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
-{
- struct acpiphp_bridge *bridge;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
-
- bridge = acpiphp_handle_to_bridge(handle);
- if (bridge) {
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
- dbg("%s: re-enumerating slots under %s\n",
- __func__, objname);
- acpiphp_check_bridge(bridge);
- put_bridge(bridge);
- }
- return AE_OK ;
-}
-
void acpiphp_check_host_bridge(acpi_handle handle)
{
struct acpiphp_bridge *bridge;
@@ -978,27 +798,23 @@ void acpiphp_check_host_bridge(acpi_handle handle)
acpiphp_check_bridge(bridge);
put_bridge(bridge);
}
-
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
}
-static void _handle_hotplug_event_bridge(struct work_struct *work)
+static void hotplug_event(acpi_handle handle, u32 type, void *data)
{
+ struct acpiphp_context *context = data;
+ struct acpiphp_func *func = &context->func;
struct acpiphp_bridge *bridge;
char objname[64];
struct acpi_buffer buffer = { .length = sizeof(objname),
.pointer = objname };
- struct acpi_hp_work *hp_work;
- acpi_handle handle;
- u32 type;
- hp_work = container_of(work, struct acpi_hp_work, work);
- handle = hp_work->handle;
- type = hp_work->type;
- bridge = (struct acpiphp_bridge *)hp_work->context;
+ mutex_lock(&acpiphp_context_lock);
+ bridge = context->bridge;
+ if (bridge)
+ get_bridge(bridge);
- acpi_scan_lock_acquire();
+ mutex_unlock(&acpiphp_context_lock);
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
@@ -1007,188 +823,129 @@ static void _handle_hotplug_event_bridge(struct work_struct *work)
/* bus re-enumerate */
dbg("%s: Bus check notify on %s\n", __func__, objname);
dbg("%s: re-enumerating slots under %s\n", __func__, objname);
- acpiphp_check_bridge(bridge);
- acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
- ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
+ if (bridge) {
+ acpiphp_check_bridge(bridge);
+ } else {
+ struct acpiphp_slot *slot = func->slot;
+
+ mutex_lock(&slot->crit_sect);
+ enable_slot(slot);
+ mutex_unlock(&slot->crit_sect);
+ }
break;
case ACPI_NOTIFY_DEVICE_CHECK:
/* device check */
dbg("%s: Device check notify on %s\n", __func__, objname);
- acpiphp_check_bridge(bridge);
- break;
+ if (bridge)
+ acpiphp_check_bridge(bridge);
+ else
+ acpiphp_check_bridge(func->parent);
- case ACPI_NOTIFY_DEVICE_WAKE:
- /* wake event */
- dbg("%s: Device wake notify on %s\n", __func__, objname);
break;
case ACPI_NOTIFY_EJECT_REQUEST:
/* request device eject */
dbg("%s: Device eject notify on %s\n", __func__, objname);
- if ((bridge->flags & BRIDGE_HAS_EJ0) && bridge->func) {
- struct acpiphp_slot *slot;
- slot = bridge->func->slot;
- if (!acpiphp_disable_slot(slot))
- acpiphp_eject_slot(slot);
- }
+ acpiphp_disable_and_eject_slot(func->slot);
break;
+ }
- case ACPI_NOTIFY_FREQUENCY_MISMATCH:
- printk(KERN_ERR "Device %s cannot be configured due"
- " to a frequency mismatch\n", objname);
- break;
+ if (bridge)
+ put_bridge(bridge);
+}
- case ACPI_NOTIFY_BUS_MODE_MISMATCH:
- printk(KERN_ERR "Device %s cannot be configured due"
- " to a bus mode mismatch\n", objname);
- break;
+static void hotplug_event_work(struct work_struct *work)
+{
+ struct acpiphp_context *context;
+ struct acpi_hp_work *hp_work;
- case ACPI_NOTIFY_POWER_FAULT:
- printk(KERN_ERR "Device %s has suffered a power fault\n",
- objname);
- break;
+ hp_work = container_of(work, struct acpi_hp_work, work);
+ context = hp_work->context;
+ acpi_scan_lock_acquire();
- default:
- warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
- break;
- }
+ hotplug_event(hp_work->handle, hp_work->type, context);
acpi_scan_lock_release();
- kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
- put_bridge(bridge);
+ kfree(hp_work); /* allocated in handle_hotplug_event() */
+ put_bridge(context->func.parent);
}
/**
- * handle_hotplug_event_bridge - handle ACPI event on bridges
+ * handle_hotplug_event - handle ACPI hotplug event
* @handle: Notify()'ed acpi_handle
* @type: Notify code
- * @context: pointer to acpiphp_bridge structure
+ * @data: pointer to acpiphp_context structure
*
- * Handles ACPI event notification on {host,p2p} bridges.
+ * Handles ACPI event notification on slots.
*/
-static void handle_hotplug_event_bridge(acpi_handle handle, u32 type,
- void *context)
+static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
- struct acpiphp_bridge *bridge = context;
-
- /*
- * Currently the code adds all hotplug events to the kacpid_wq
- * queue when it should add hotplug events to the kacpi_hotplug_wq.
- * The proper way to fix this is to reorganize the code so that
- * drivers (dock, etc.) do not call acpi_os_execute(), etc.
- * For now just re-add this work to the kacpi_hotplug_wq so we
- * don't deadlock on hotplug actions.
- */
- get_bridge(bridge);
- alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
-}
-
-static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
-{
- struct acpiphp_func *func = context;
- char objname[64];
- struct acpi_buffer buffer = { .length = sizeof(objname),
- .pointer = objname };
-
- acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ struct acpiphp_context *context;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
- /* bus re-enumerate */
- dbg("%s: Bus check notify on %s\n", __func__, objname);
- acpiphp_enable_slot(func->slot);
- break;
-
case ACPI_NOTIFY_DEVICE_CHECK:
- /* device check : re-enumerate from parent bus */
- dbg("%s: Device check notify on %s\n", __func__, objname);
- acpiphp_check_bridge(func->slot->bridge);
- break;
-
- case ACPI_NOTIFY_DEVICE_WAKE:
- /* wake event */
- dbg("%s: Device wake notify on %s\n", __func__, objname);
- break;
-
case ACPI_NOTIFY_EJECT_REQUEST:
- /* request device eject */
- dbg("%s: Device eject notify on %s\n", __func__, objname);
- if (!(acpiphp_disable_slot(func->slot)))
- acpiphp_eject_slot(func->slot);
break;
- default:
- warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
- break;
- }
-}
-
-static void _handle_hotplug_event_func(struct work_struct *work)
-{
- struct acpi_hp_work *hp_work;
- struct acpiphp_func *func;
+ case ACPI_NOTIFY_DEVICE_WAKE:
+ return;
- hp_work = container_of(work, struct acpi_hp_work, work);
- func = hp_work->context;
- acpi_scan_lock_acquire();
+ case ACPI_NOTIFY_FREQUENCY_MISMATCH:
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a frequency mismatch\n");
+ return;
- hotplug_event_func(hp_work->handle, hp_work->type, func);
+ case ACPI_NOTIFY_BUS_MODE_MISMATCH:
+ acpi_handle_err(handle, "Device cannot be configured due "
+ "to a bus mode mismatch\n");
+ return;
- acpi_scan_lock_release();
- kfree(hp_work); /* allocated in handle_hotplug_event_func */
- put_bridge(func->slot->bridge);
-}
+ case ACPI_NOTIFY_POWER_FAULT:
+ acpi_handle_err(handle, "Device has suffered a power fault\n");
+ return;
-/**
- * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
- * @handle: Notify()'ed acpi_handle
- * @type: Notify code
- * @context: pointer to acpiphp_func structure
- *
- * Handles ACPI event notification on slots.
- */
-static void handle_hotplug_event_func(acpi_handle handle, u32 type,
- void *context)
-{
- struct acpiphp_func *func = context;
+ default:
+ acpi_handle_warn(handle, "Unsupported event type 0x%x\n", type);
+ return;
+ }
- /*
- * Currently the code adds all hotplug events to the kacpid_wq
- * queue when it should add hotplug events to the kacpi_hotplug_wq.
- * The proper way to fix this is to reorganize the code so that
- * drivers (dock, etc.) do not call acpi_os_execute(), etc.
- * For now just re-add this work to the kacpi_hotplug_wq so we
- * don't deadlock on hotplug actions.
- */
- get_bridge(func->slot->bridge);
- alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_func);
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (context) {
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ alloc_acpi_hp_work(handle, type, context, hotplug_event_work);
+ }
+ mutex_unlock(&acpiphp_context_lock);
}
/*
* Create hotplug slots for the PCI bus.
* It should always return 0 to avoid skipping following notifiers.
*/
-void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
+void acpiphp_enumerate_slots(struct pci_bus *bus)
{
- acpi_handle dummy_handle;
struct acpiphp_bridge *bridge;
+ acpi_handle handle;
+ acpi_status status;
if (acpiphp_disabled)
return;
- if (detect_ejectable_slots(handle) <= 0)
+ handle = ACPI_HANDLE(bus->bridge);
+ if (!handle)
return;
bridge = kzalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
- if (bridge == NULL) {
- err("out of memory\n");
+ if (!bridge) {
+ acpi_handle_err(handle, "No memory for bridge object\n");
return;
}
INIT_LIST_HEAD(&bridge->slots);
kref_init(&bridge->ref);
- bridge->handle = handle;
bridge->pci_dev = pci_dev_get(bus->self);
bridge->pci_bus = bus;
@@ -1199,31 +956,62 @@ void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle)
*/
get_device(&bus->dev);
- if (!pci_is_root_bus(bridge->pci_bus) &&
- ACPI_SUCCESS(acpi_get_handle(bridge->handle,
- "_EJ0", &dummy_handle))) {
- dbg("found ejectable p2p bridge\n");
- bridge->flags |= BRIDGE_HAS_EJ0;
- bridge->func = acpiphp_bridge_handle_to_function(handle);
+ if (!pci_is_root_bus(bridge->pci_bus)) {
+ struct acpiphp_context *context;
+
+ /*
+ * This bridge should have been registered as a hotplug function
+ * under its parent, so the context has to be there. If not, we
+ * are in deep goo.
+ */
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (WARN_ON(!context)) {
+ mutex_unlock(&acpiphp_context_lock);
+ put_device(&bus->dev);
+ kfree(bridge);
+ return;
+ }
+ bridge->context = context;
+ context->bridge = bridge;
+ /* Get a reference to the parent bridge. */
+ get_bridge(context->func.parent);
+ mutex_unlock(&acpiphp_context_lock);
}
- init_bridge_misc(bridge);
+ /* must be added to the list prior to calling register_slot */
+ mutex_lock(&bridge_mutex);
+ list_add(&bridge->list, &bridge_list);
+ mutex_unlock(&bridge_mutex);
+
+ /* register all slot objects under this bridge */
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ register_slot, NULL, bridge, NULL);
+ if (ACPI_FAILURE(status)) {
+ acpi_handle_err(handle, "failed to register slots\n");
+ cleanup_bridge(bridge);
+ put_bridge(bridge);
+ }
}
/* Destroy hotplug slots associated with the PCI bus */
void acpiphp_remove_slots(struct pci_bus *bus)
{
- struct acpiphp_bridge *bridge, *tmp;
+ struct acpiphp_bridge *bridge;
if (acpiphp_disabled)
return;
- list_for_each_entry_safe(bridge, tmp, &bridge_list, list)
+ mutex_lock(&bridge_mutex);
+ list_for_each_entry(bridge, &bridge_list, list)
if (bridge->pci_bus == bus) {
+ mutex_unlock(&bridge_mutex);
cleanup_bridge(bridge);
put_bridge(bridge);
- break;
+ return;
}
+
+ mutex_unlock(&bridge_mutex);
}
/**
@@ -1232,51 +1020,39 @@ void acpiphp_remove_slots(struct pci_bus *bus)
*/
int acpiphp_enable_slot(struct acpiphp_slot *slot)
{
- int retval;
-
mutex_lock(&slot->crit_sect);
+ /* configure all functions */
+ if (!(slot->flags & SLOT_ENABLED))
+ enable_slot(slot);
- /* wake up all functions */
- retval = power_on_slot(slot);
- if (retval)
- goto err_exit;
-
- if (get_slot_status(slot) == ACPI_STA_ALL) {
- /* configure all functions */
- retval = enable_device(slot);
- if (retval)
- power_off_slot(slot);
- } else {
- dbg("%s: Slot status is not ACPI_STA_ALL\n", __func__);
- power_off_slot(slot);
- }
-
- err_exit:
mutex_unlock(&slot->crit_sect);
- return retval;
+ return 0;
}
/**
- * acpiphp_disable_slot - power off slot
+ * acpiphp_disable_and_eject_slot - power off and eject slot
* @slot: ACPI PHP slot
*/
-int acpiphp_disable_slot(struct acpiphp_slot *slot)
+int acpiphp_disable_and_eject_slot(struct acpiphp_slot *slot)
{
+ struct acpiphp_func *func;
int retval = 0;
mutex_lock(&slot->crit_sect);
/* unconfigure all functions */
- retval = disable_device(slot);
- if (retval)
- goto err_exit;
+ disable_slot(slot);
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (func->flags & FUNC_HAS_EJ0) {
+ acpi_handle handle = func_to_handle(func);
- /* power off all functions */
- retval = power_off_slot(slot);
- if (retval)
- goto err_exit;
+ if (ACPI_FAILURE(acpi_evaluate_ej0(handle)))
+ acpi_handle_err(handle, "_EJ0 failed\n");
+
+ break;
+ }
- err_exit:
mutex_unlock(&slot->crit_sect);
return retval;
}
@@ -1288,7 +1064,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
*/
u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
{
- return (slot->flags & SLOT_POWEREDON);
+ return (slot->flags & SLOT_ENABLED);
}
@@ -1298,11 +1074,7 @@ u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
*/
u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
{
- unsigned int sta;
-
- sta = get_slot_status(slot);
-
- return (sta & ACPI_STA_DEVICE_UI) ? 0 : 1;
+ return !(get_slot_status(slot) & ACPI_STA_DEVICE_UI);
}
@@ -1312,9 +1084,5 @@ u8 acpiphp_get_latch_status(struct acpiphp_slot *slot)
*/
u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
{
- unsigned int sta;
-
- sta = get_slot_status(slot);
-
- return (sta == 0) ? 0 : 1;
+ return !!get_slot_status(slot);
}
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index c35e8ad6db0..2f5786c8522 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -66,7 +66,7 @@ do { \
#define IBM_HARDWARE_ID1 "IBM37D0"
#define IBM_HARDWARE_ID2 "IBM37D4"
-#define hpslot_to_sun(A) (((struct slot *)((A)->private))->acpi_slot->sun)
+#define hpslot_to_sun(A) (((struct slot *)((A)->private))->sun)
/* union apci_descriptor - allows access to the
* various device descriptors that are embedded in the
@@ -270,7 +270,6 @@ static void ibm_handle_events(acpi_handle handle, u32 event, void *context)
if (subevent == 0x80) {
dbg("%s: generationg bus event\n", __func__);
- acpi_bus_generate_proc_event(note->device, note->event, detail);
acpi_bus_generate_netlink_event(note->device->pnp.device_class,
dev_name(&note->device->dev),
note->event, detail);
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 7fb326983ed..541bbe6d534 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -155,6 +155,7 @@ void pciehp_green_led_off(struct slot *slot);
void pciehp_green_led_blink(struct slot *slot);
int pciehp_check_link_status(struct controller *ctrl);
void pciehp_release_ctrl(struct controller *ctrl);
+int pciehp_reset_slot(struct slot *slot, int probe);
static inline const char *slot_name(struct slot *slot)
{
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7d72c5e2eba..f4a18f51a29 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -69,6 +69,7 @@ static int get_power_status (struct hotplug_slot *slot, u8 *value);
static int get_attention_status (struct hotplug_slot *slot, u8 *value);
static int get_latch_status (struct hotplug_slot *slot, u8 *value);
static int get_adapter_status (struct hotplug_slot *slot, u8 *value);
+static int reset_slot (struct hotplug_slot *slot, int probe);
/**
* release_slot - free up the memory used by a slot
@@ -111,6 +112,7 @@ static int init_slot(struct controller *ctrl)
ops->disable_slot = disable_slot;
ops->get_power_status = get_power_status;
ops->get_adapter_status = get_adapter_status;
+ ops->reset_slot = reset_slot;
if (MRL_SENS(ctrl))
ops->get_latch_status = get_latch_status;
if (ATTN_LED(ctrl)) {
@@ -223,6 +225,16 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
return pciehp_get_adapter_status(slot, value);
}
+static int reset_slot(struct hotplug_slot *hotplug_slot, int probe)
+{
+ struct slot *slot = hotplug_slot->private;
+
+ ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n",
+ __func__, slot_name(slot));
+
+ return pciehp_reset_slot(slot, probe);
+}
+
static int pciehp_probe(struct pcie_device *dev)
{
int rc;
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index b2255736ac8..51f56ef4ab6 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -749,6 +749,37 @@ static void pcie_disable_notification(struct controller *ctrl)
ctrl_warn(ctrl, "Cannot disable software notification\n");
}
+/*
+ * pciehp has a 1:1 bus:slot relationship so we ultimately want a secondary
+ * bus reset of the bridge, but if the slot supports surprise removal we need
+ * to disable presence detection around the bus reset and clear any spurious
+ * events after.
+ */
+int pciehp_reset_slot(struct slot *slot, int probe)
+{
+ struct controller *ctrl = slot->ctrl;
+
+ if (probe)
+ return 0;
+
+ if (HP_SUPR_RM(ctrl)) {
+ pcie_write_cmd(ctrl, 0, PCI_EXP_SLTCTL_PDCE);
+ if (pciehp_poll_mode)
+ del_timer_sync(&ctrl->poll_timer);
+ }
+
+ pci_reset_bridge_secondary_bus(ctrl->pcie->port);
+
+ if (HP_SUPR_RM(ctrl)) {
+ pciehp_writew(ctrl, PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC);
+ pcie_write_cmd(ctrl, PCI_EXP_SLTCTL_PDCE, PCI_EXP_SLTCTL_PDCE);
+ if (pciehp_poll_mode)
+ int_poll_timeout(ctrl->poll_timer.data);
+ }
+
+ return 0;
+}
+
int pcie_init_notification(struct controller *ctrl)
{
if (pciehp_request_irq(ctrl))
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index aac7a40e4a4..0e0d0f7f63f 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -92,7 +92,14 @@ int pciehp_unconfigure_device(struct slot *p_slot)
if (ret)
presence = 0;
- list_for_each_entry_safe(dev, temp, &parent->devices, bus_list) {
+ /*
+ * Stopping an SR-IOV PF device removes all the associated VFs,
+ * which will update the bus->devices list and confuse the
+ * iterator. Therefore, iterate in reverse so we remove the VFs
+ * first, then the PF. We do the same in pci_stop_bus_device().
+ */
+ list_for_each_entry_safe_reverse(dev, temp, &parent->devices,
+ bus_list) {
pci_dev_get(dev);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) {
pci_read_config_byte(dev, PCI_BRIDGE_CONTROL, &bctl);
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
index fec2d5b7544..16f92035231 100644
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ b/drivers/pci/hotplug/pcihp_slot.c
@@ -160,9 +160,8 @@ void pci_configure_slot(struct pci_dev *dev)
(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
return;
- if (dev->bus && dev->bus->self)
- pcie_bus_configure_settings(dev->bus,
- dev->bus->self->pcie_mpss);
+ if (dev->bus)
+ pcie_bus_configure_settings(dev->bus);
memset(&hpp, 0, sizeof(hpp));
ret = pci_get_hp_params(dev, &hpp);
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index ea3fa90d020..66e505ca24e 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -79,8 +79,6 @@ static int enable_slot(struct hotplug_slot *hotplug_slot)
if (rc)
goto out_deconfigure;
- slot->zdev->state = ZPCI_FN_STATE_ONLINE;
-
pci_scan_slot(slot->zdev->bus, ZPCI_DEVFN);
pci_bus_add_devices(slot->zdev->bus);
@@ -148,7 +146,7 @@ static struct hotplug_slot_ops s390_hotplug_slot_ops = {
.get_adapter_status = get_adapter_status,
};
-static int init_pci_slot(struct zpci_dev *zdev)
+int zpci_init_slot(struct zpci_dev *zdev)
{
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *info;
@@ -202,7 +200,7 @@ error:
return -ENOMEM;
}
-static void exit_pci_slot(struct zpci_dev *zdev)
+void zpci_exit_slot(struct zpci_dev *zdev)
{
struct list_head *tmp, *n;
struct slot *slot;
@@ -215,60 +213,3 @@ static void exit_pci_slot(struct zpci_dev *zdev)
pci_hp_deregister(slot->hotplug_slot);
}
}
-
-static struct pci_hp_callback_ops hp_ops = {
- .create_slot = init_pci_slot,
- .remove_slot = exit_pci_slot,
-};
-
-static void __init init_pci_slots(void)
-{
- struct zpci_dev *zdev;
-
- /*
- * Create a structure for each slot, and register that slot
- * with the pci_hotplug subsystem.
- */
- mutex_lock(&zpci_list_lock);
- list_for_each_entry(zdev, &zpci_list, entry) {
- init_pci_slot(zdev);
- }
- mutex_unlock(&zpci_list_lock);
-}
-
-static void __exit exit_pci_slots(void)
-{
- struct list_head *tmp, *n;
- struct slot *slot;
-
- /*
- * Unregister all of our slots with the pci_hotplug subsystem.
- * Memory will be freed in release_slot() callback after slot's
- * lifespan is finished.
- */
- list_for_each_safe(tmp, n, &s390_hotplug_slot_list) {
- slot = list_entry(tmp, struct slot, slot_list);
- list_del(&slot->slot_list);
- pci_hp_deregister(slot->hotplug_slot);
- }
-}
-
-static int __init pci_hotplug_s390_init(void)
-{
- if (!s390_pci_probe)
- return -EOPNOTSUPP;
-
- zpci_register_hp_ops(&hp_ops);
- init_pci_slots();
-
- return 0;
-}
-
-static void __exit pci_hotplug_s390_exit(void)
-{
- exit_pci_slots();
- zpci_deregister_hp_ops();
-}
-
-module_init(pci_hotplug_s390_init);
-module_exit(pci_hotplug_s390_exit);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index de8ffacf9c9..21a7182dccd 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -286,7 +286,6 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
(!(iov->cap & PCI_SRIOV_CAP_VFM) && (nr_virtfn > initial)))
return -EINVAL;
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (nr_virtfn > 1 && !stride))
@@ -324,7 +323,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
if (!pdev->is_physfn) {
pci_dev_put(pdev);
- return -ENODEV;
+ return -ENOSYS;
}
rc = sysfs_create_link(&dev->dev.kobj,
@@ -334,6 +333,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
return rc;
}
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, nr_virtfn);
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
@@ -368,6 +368,7 @@ failed:
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
ssleep(1);
pci_cfg_access_unlock(dev);
@@ -401,6 +402,7 @@ static void sriov_disable(struct pci_dev *dev)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
iov->num_VFs = 0;
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_NUM_VF, 0);
}
static int sriov_init(struct pci_dev *dev, int pos)
@@ -662,7 +664,7 @@ int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
might_sleep();
if (!dev->is_physfn)
- return -ENODEV;
+ return -ENOSYS;
return sriov_enable(dev, nr_virtfn);
}
@@ -722,7 +724,7 @@ EXPORT_SYMBOL_GPL(pci_num_vf);
* @dev: the PCI device
*
* Returns number of VFs belonging to this device that are assigned to a guest.
- * If device is not a physical function returns -ENODEV.
+ * If device is not a physical function returns 0.
*/
int pci_vfs_assigned(struct pci_dev *dev)
{
@@ -767,12 +769,15 @@ EXPORT_SYMBOL_GPL(pci_vfs_assigned);
* device's mutex held.
*
* Returns 0 if PF is an SRIOV-capable device and
- * value of numvfs valid. If not a PF with VFS, return -EINVAL;
+ * value of numvfs valid. If not a PF return -ENOSYS;
+ * if numvfs is invalid return -EINVAL;
* if VFs already enabled, return -EBUSY.
*/
int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
{
- if (!dev->is_physfn || (numvfs > dev->sriov->total_VFs))
+ if (!dev->is_physfn)
+ return -ENOSYS;
+ if (numvfs > dev->sriov->total_VFs)
return -EINVAL;
/* Shouldn't change if VFs already enabled */
@@ -786,17 +791,17 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs)
EXPORT_SYMBOL_GPL(pci_sriov_set_totalvfs);
/**
- * pci_sriov_get_totalvfs -- get total VFs supported on this devic3
+ * pci_sriov_get_totalvfs -- get total VFs supported on this device
* @dev: the PCI PF device
*
* For a PCIe device with SRIOV support, return the PCIe
* SRIOV capability value of TotalVFs or the value of driver_max_VFs
- * if the driver reduced it. Otherwise, -EINVAL.
+ * if the driver reduced it. Otherwise 0.
*/
int pci_sriov_get_totalvfs(struct pci_dev *dev)
{
if (!dev->is_physfn)
- return -EINVAL;
+ return 0;
if (dev->sriov->driver_max_VFs)
return dev->sriov->driver_max_VFs;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index dbdc5f7e2b2..7c29ee4ed0a 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -210,7 +210,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
}
if (!error)
- dev_info(&dev->dev, "power state changed by ACPI to %s\n",
+ dev_dbg(&dev->dev, "power state changed by ACPI to %s\n",
acpi_power_state_string(state_conv[state]));
return error;
@@ -290,24 +290,16 @@ static struct pci_platform_pm_ops acpi_pci_platform_pm = {
void acpi_pci_add_bus(struct pci_bus *bus)
{
- acpi_handle handle = NULL;
-
- if (bus->bridge)
- handle = ACPI_HANDLE(bus->bridge);
- if (acpi_pci_disabled || handle == NULL)
+ if (acpi_pci_disabled || !bus->bridge)
return;
- acpi_pci_slot_enumerate(bus, handle);
- acpiphp_enumerate_slots(bus, handle);
+ acpi_pci_slot_enumerate(bus);
+ acpiphp_enumerate_slots(bus);
}
void acpi_pci_remove_bus(struct pci_bus *bus)
{
- /*
- * bus->bridge->acpi_node.handle has already been reset to NULL
- * when acpi_pci_remove_bus() is called, so don't check ACPI handle.
- */
- if (acpi_pci_disabled)
+ if (acpi_pci_disabled || !bus->bridge)
return;
acpiphp_remove_slots(bus);
@@ -317,13 +309,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus)
/* ACPI bus type */
static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
{
- struct pci_dev * pci_dev;
- u64 addr;
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ bool is_bridge;
+ u64 addr;
- pci_dev = to_pci_dev(dev);
+ /*
+ * pci_is_bridge() is not suitable here, because pci_dev->subordinate
+ * is set only after acpi_pci_find_device() has been called for the
+ * given device.
+ */
+ is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE
+ || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
/* Please ref to ACPI spec for the syntax of _ADR */
addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn);
- *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr);
+ *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge);
if (!*handle)
return -ENODEV;
return 0;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e6515e21afa..98f7b9b8950 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -763,6 +763,13 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
+
+/*
+ * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
+ * a hibernate transition
+ */
+struct dev_pm_ops __weak pcibios_pm_ops;
+
static int pci_pm_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -786,6 +793,9 @@ static int pci_pm_freeze(struct device *dev)
return error;
}
+ if (pcibios_pm_ops.freeze)
+ return pcibios_pm_ops.freeze(dev);
+
return 0;
}
@@ -811,6 +821,9 @@ static int pci_pm_freeze_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
+ if (pcibios_pm_ops.freeze_noirq)
+ return pcibios_pm_ops.freeze_noirq(dev);
+
return 0;
}
@@ -820,6 +833,12 @@ static int pci_pm_thaw_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.thaw_noirq) {
+ error = pcibios_pm_ops.thaw_noirq(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume_early(dev);
@@ -837,6 +856,12 @@ static int pci_pm_thaw(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.thaw) {
+ error = pcibios_pm_ops.thaw(dev);
+ if (error)
+ return error;
+ }
+
if (pci_has_legacy_pm_support(pci_dev))
return pci_legacy_resume(dev);
@@ -878,6 +903,9 @@ static int pci_pm_poweroff(struct device *dev)
Fixup:
pci_fixup_device(pci_fixup_suspend, pci_dev);
+ if (pcibios_pm_ops.poweroff)
+ return pcibios_pm_ops.poweroff(dev);
+
return 0;
}
@@ -911,6 +939,9 @@ static int pci_pm_poweroff_noirq(struct device *dev)
if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI)
pci_write_config_word(pci_dev, PCI_COMMAND, 0);
+ if (pcibios_pm_ops.poweroff_noirq)
+ return pcibios_pm_ops.poweroff_noirq(dev);
+
return 0;
}
@@ -920,6 +951,12 @@ static int pci_pm_restore_noirq(struct device *dev)
struct device_driver *drv = dev->driver;
int error = 0;
+ if (pcibios_pm_ops.restore_noirq) {
+ error = pcibios_pm_ops.restore_noirq(dev);
+ if (error)
+ return error;
+ }
+
pci_pm_default_resume_early(pci_dev);
if (pci_has_legacy_pm_support(pci_dev))
@@ -937,6 +974,12 @@ static int pci_pm_restore(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int error = 0;
+ if (pcibios_pm_ops.restore) {
+ error = pcibios_pm_ops.restore(dev);
+ if (error)
+ return error;
+ }
+
/*
* This is necessary for the hibernation error path in which restore is
* called without restoring the standard config registers of the device.
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index c0dbe1f6136..7128cfdd64a 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -131,19 +131,19 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
return ret;
}
-static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpuaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
}
+static DEVICE_ATTR_RO(cpuaffinity);
-static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t cpulistaffinity_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
}
+static DEVICE_ATTR_RO(cpulistaffinity);
/* show resources */
static ssize_t
@@ -379,6 +379,7 @@ dev_bus_rescan_store(struct device *dev, struct device_attribute *attr,
}
return count;
}
+static DEVICE_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store);
#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
static ssize_t d3cold_allowed_store(struct device *dev,
@@ -514,11 +515,20 @@ struct device_attribute pci_dev_attrs[] = {
__ATTR_NULL,
};
-struct device_attribute pcibus_dev_attrs[] = {
- __ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, dev_bus_rescan_store),
- __ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL),
- __ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL),
- __ATTR_NULL,
+static struct attribute *pcibus_attrs[] = {
+ &dev_attr_rescan.attr,
+ &dev_attr_cpuaffinity.attr,
+ &dev_attr_cpulistaffinity.attr,
+ NULL,
+};
+
+static const struct attribute_group pcibus_group = {
+ .attrs = pcibus_attrs,
+};
+
+const struct attribute_group *pcibus_groups[] = {
+ &pcibus_group,
+ NULL,
};
static ssize_t
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e37fea6e178..e8ccf6c0f08 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/pm_runtime.h>
+#include <linux/pci_hotplug.h>
#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include "pci.h"
@@ -1145,6 +1146,24 @@ int pci_reenable_device(struct pci_dev *dev)
return 0;
}
+static void pci_enable_bridge(struct pci_dev *dev)
+{
+ int retval;
+
+ if (!dev)
+ return;
+
+ pci_enable_bridge(dev->bus->self);
+
+ if (pci_is_enabled(dev))
+ return;
+ retval = pci_enable_device(dev);
+ if (retval)
+ dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
+ retval);
+ pci_set_master(dev);
+}
+
static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
{
int err;
@@ -1165,6 +1184,8 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
if (atomic_inc_return(&dev->enable_cnt) > 1)
return 0; /* already enabled */
+ pci_enable_bridge(dev->bus->self);
+
/* only skip sriov related */
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
if (dev->resource[i].flags & flags)
@@ -1992,7 +2013,7 @@ static void pci_add_saved_cap(struct pci_dev *pci_dev,
}
/**
- * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * pci_add_cap_save_buffer - allocate buffer for saving given capability registers
* @dev: the PCI device
* @cap: the capability to allocate the buffer for
* @size: requested size of the buffer
@@ -2095,9 +2116,9 @@ void pci_enable_ido(struct pci_dev *dev, unsigned long type)
u16 ctrl = 0;
if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_IDO_REQ_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_IDO_CMP_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
if (ctrl)
pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, ctrl);
}
@@ -2113,9 +2134,9 @@ void pci_disable_ido(struct pci_dev *dev, unsigned long type)
u16 ctrl = 0;
if (type & PCI_EXP_IDO_REQUEST)
- ctrl |= PCI_EXP_IDO_REQ_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_REQ_EN;
if (type & PCI_EXP_IDO_COMPLETION)
- ctrl |= PCI_EXP_IDO_CMP_EN;
+ ctrl |= PCI_EXP_DEVCTL2_IDO_CMP_EN;
if (ctrl)
pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, ctrl);
}
@@ -2147,7 +2168,7 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
int ret;
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
- if (!(cap & PCI_EXP_OBFF_MASK))
+ if (!(cap & PCI_EXP_DEVCAP2_OBFF_MASK))
return -ENOTSUPP; /* no OBFF support at all */
/* Make sure the topology supports OBFF as well */
@@ -2158,17 +2179,17 @@ int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
}
pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &ctrl);
- if (cap & PCI_EXP_OBFF_WAKE)
- ctrl |= PCI_EXP_OBFF_WAKE_EN;
+ if (cap & PCI_EXP_DEVCAP2_OBFF_WAKE)
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
else {
switch (type) {
case PCI_EXP_OBFF_SIGNAL_L0:
- if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
- ctrl |= PCI_EXP_OBFF_MSGA_EN;
+ if (!(ctrl & PCI_EXP_DEVCTL2_OBFF_WAKE_EN))
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGA_EN;
break;
case PCI_EXP_OBFF_SIGNAL_ALWAYS:
- ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
- ctrl |= PCI_EXP_OBFF_MSGB_EN;
+ ctrl &= ~PCI_EXP_DEVCTL2_OBFF_WAKE_EN;
+ ctrl |= PCI_EXP_DEVCTL2_OBFF_MSGB_EN;
break;
default:
WARN(1, "bad OBFF signal type\n");
@@ -2189,7 +2210,8 @@ EXPORT_SYMBOL(pci_enable_obff);
*/
void pci_disable_obff(struct pci_dev *dev)
{
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_OBFF_WAKE_EN);
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_OBFF_WAKE_EN);
}
EXPORT_SYMBOL(pci_disable_obff);
@@ -2237,7 +2259,8 @@ int pci_enable_ltr(struct pci_dev *dev)
return ret;
}
- return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
+ return pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
}
EXPORT_SYMBOL(pci_enable_ltr);
@@ -2254,7 +2277,8 @@ void pci_disable_ltr(struct pci_dev *dev)
if (!pci_ltr_supported(dev))
return;
- pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2, PCI_EXP_LTR_EN);
+ pcie_capability_clear_word(dev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_LTR_EN);
}
EXPORT_SYMBOL(pci_disable_ltr);
@@ -2359,6 +2383,27 @@ void pci_enable_acs(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
}
+static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
+{
+ int pos;
+ u16 cap, ctrl;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return false;
+
+ /*
+ * Except for egress control, capabilities are either required
+ * or only required if controllable. Features missing from the
+ * capability field can therefore be assumed as hard-wired enabled.
+ */
+ pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
+ acs_flags &= (cap | PCI_ACS_EC);
+
+ pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
+ return (ctrl & acs_flags) == acs_flags;
+}
+
/**
* pci_acs_enabled - test ACS against required flags for a given device
* @pdev: device to test
@@ -2366,36 +2411,76 @@ void pci_enable_acs(struct pci_dev *dev)
*
* Return true if the device supports the provided flags. Automatically
* filters out flags that are not implemented on multifunction devices.
+ *
+ * Note that this interface checks the effective ACS capabilities of the
+ * device rather than the actual capabilities. For instance, most single
+ * function endpoints are not required to support ACS because they have no
+ * opportunity for peer-to-peer access. We therefore return 'true'
+ * regardless of whether the device exposes an ACS capability. This makes
+ * it much easier for callers of this function to ignore the actual type
+ * or topology of the device when testing ACS support.
*/
bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
{
- int pos, ret;
- u16 ctrl;
+ int ret;
ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
if (ret >= 0)
return ret > 0;
+ /*
+ * Conventional PCI and PCI-X devices never support ACS, either
+ * effectively or actually. The shared bus topology implies that
+ * any device on the bus can receive or snoop DMA.
+ */
if (!pci_is_pcie(pdev))
return false;
- /* Filter out flags not applicable to multifunction */
- if (pdev->multifunction)
- acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
- PCI_ACS_EC | PCI_ACS_DT);
-
- if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
- pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
- pdev->multifunction) {
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
- if (!pos)
- return false;
+ switch (pci_pcie_type(pdev)) {
+ /*
+ * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
+ * but since their primary inteface is PCI/X, we conservatively
+ * handle them as we would a non-PCIe device.
+ */
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ /*
+ * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
+ * applicable... must never implement an ACS Extended Capability...".
+ * This seems arbitrary, but we take a conservative interpretation
+ * of this statement.
+ */
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ return false;
+ /*
+ * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
+ * implement ACS in order to indicate their peer-to-peer capabilities,
+ * regardless of whether they are single- or multi-function devices.
+ */
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_ROOT_PORT:
+ return pci_acs_flags_enabled(pdev, acs_flags);
+ /*
+ * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
+ * implemented by the remaining PCIe types to indicate peer-to-peer
+ * capabilities, but only when they are part of a multifunciton
+ * device. The footnote for section 6.12 indicates the specific
+ * PCIe types included here.
+ */
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+ if (!pdev->multifunction)
+ break;
- pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
- if ((ctrl & acs_flags) != acs_flags)
- return false;
+ return pci_acs_flags_enabled(pdev, acs_flags);
}
+ /*
+ * PCIe 3.0, 6.12.1.3 specifies no ACS capabilties are applicable
+ * to single function devices with the exception of downstream ports.
+ */
return true;
}
@@ -3059,18 +3144,23 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
/**
- * pci_msi_off - disables any msi or msix capabilities
+ * pci_msi_off - disables any MSI or MSI-X capabilities
* @dev: the PCI device to operate on
*
- * If you want to use msi see pci_enable_msi and friends.
- * This is a lower level primitive that allows us to disable
- * msi operation at the device level.
+ * If you want to use MSI, see pci_enable_msi() and friends.
+ * This is a lower-level primitive that allows us to disable
+ * MSI operation at the device level.
*/
void pci_msi_off(struct pci_dev *dev)
{
int pos;
u16 control;
+ /*
+ * This looks like it could go in msi.c, but we need it even when
+ * CONFIG_PCI_MSI=n. For the same reason, we can't use
+ * dev->msi_cap or dev->msix_cap here.
+ */
pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
@@ -3098,19 +3188,17 @@ int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
}
EXPORT_SYMBOL(pci_set_dma_seg_boundary);
-static int pcie_flr(struct pci_dev *dev, int probe)
+/**
+ * pci_wait_for_pending_transaction - waits for pending transaction
+ * @dev: the PCI device to operate on
+ *
+ * Return 0 if transaction is pending 1 otherwise.
+ */
+int pci_wait_for_pending_transaction(struct pci_dev *dev)
{
int i;
- u32 cap;
u16 status;
- pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
- if (!(cap & PCI_EXP_DEVCAP_FLR))
- return -ENOTTY;
-
- if (probe)
- return 0;
-
/* Wait for Transaction Pending bit clean */
for (i = 0; i < 4; i++) {
if (i)
@@ -3118,13 +3206,27 @@ static int pcie_flr(struct pci_dev *dev, int probe)
pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
+ return 1;
}
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ return 0;
+}
+EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+
+static int pcie_flr(struct pci_dev *dev, int probe)
+{
+ u32 cap;
+
+ pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-clear:
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
@@ -3215,9 +3317,42 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
return 0;
}
-static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+/**
+ * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
+ * @dev: Bridge device
+ *
+ * Use the bridge control register to assert reset on the secondary bus.
+ * Devices on the secondary bus are left in power-on state.
+ */
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
{
u16 ctrl;
+
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+ /*
+ * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
+ * this to 2ms to ensure that we meet the minium requirement.
+ */
+ msleep(2);
+
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
+
+ /*
+ * Trhfa for conventional PCI is 2^25 clock cycles.
+ * Assuming a minimum 33MHz clock this results in a 1s
+ * delay before we can consider subordinate devices to
+ * be re-initialized. PCIe has some ways to shorten this,
+ * but we don't make use of them yet.
+ */
+ ssleep(1);
+}
+EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
+
+static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
+{
struct pci_dev *pdev;
if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
@@ -3230,18 +3365,40 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
if (probe)
return 0;
- pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
-
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
- msleep(100);
+ pci_reset_bridge_secondary_bus(dev->bus->self);
return 0;
}
+static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
+{
+ int rc = -ENOTTY;
+
+ if (!hotplug || !try_module_get(hotplug->ops->owner))
+ return rc;
+
+ if (hotplug->ops->reset_slot)
+ rc = hotplug->ops->reset_slot(hotplug, probe);
+
+ module_put(hotplug->ops->owner);
+
+ return rc;
+}
+
+static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
+{
+ struct pci_dev *pdev;
+
+ if (dev->subordinate || !dev->slot)
+ return -ENOTTY;
+
+ list_for_each_entry(pdev, &dev->bus->devices, bus_list)
+ if (pdev != dev && pdev->slot == dev->slot)
+ return -ENOTTY;
+
+ return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
+}
+
static int __pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
@@ -3264,27 +3421,65 @@ static int __pci_dev_reset(struct pci_dev *dev, int probe)
if (rc != -ENOTTY)
goto done;
+ rc = pci_dev_reset_slot_function(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pci_parent_bus_reset(dev, probe);
done:
return rc;
}
+static void pci_dev_lock(struct pci_dev *dev)
+{
+ pci_cfg_access_lock(dev);
+ /* block PM suspend, driver probe, etc. */
+ device_lock(&dev->dev);
+}
+
+static void pci_dev_unlock(struct pci_dev *dev)
+{
+ device_unlock(&dev->dev);
+ pci_cfg_access_unlock(dev);
+}
+
+static void pci_dev_save_and_disable(struct pci_dev *dev)
+{
+ /*
+ * Wake-up device prior to save. PM registers default to D0 after
+ * reset and a simple register restore doesn't reliably return
+ * to a non-D0 state anyway.
+ */
+ pci_set_power_state(dev, PCI_D0);
+
+ pci_save_state(dev);
+ /*
+ * Disable the device by clearing the Command register, except for
+ * INTx-disable which is set. This not only disables MMIO and I/O port
+ * BARs, but also prevents the device from being Bus Master, preventing
+ * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
+ * compliant devices, INTx-disable prevents legacy interrupts.
+ */
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+}
+
+static void pci_dev_restore(struct pci_dev *dev)
+{
+ pci_restore_state(dev);
+}
+
static int pci_dev_reset(struct pci_dev *dev, int probe)
{
int rc;
- if (!probe) {
- pci_cfg_access_lock(dev);
- /* block PM suspend, driver probe, etc. */
- device_lock(&dev->dev);
- }
+ if (!probe)
+ pci_dev_lock(dev);
rc = __pci_dev_reset(dev, probe);
- if (!probe) {
- device_unlock(&dev->dev);
- pci_cfg_access_unlock(dev);
- }
+ if (!probe)
+ pci_dev_unlock(dev);
+
return rc;
}
/**
@@ -3375,22 +3570,249 @@ int pci_reset_function(struct pci_dev *dev)
if (rc)
return rc;
- pci_save_state(dev);
-
- /*
- * both INTx and MSI are disabled after the Interrupt Disable bit
- * is set and the Bus Master bit is cleared.
- */
- pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+ pci_dev_save_and_disable(dev);
rc = pci_dev_reset(dev, 0);
- pci_restore_state(dev);
+ pci_dev_restore(dev);
return rc;
}
EXPORT_SYMBOL_GPL(pci_reset_function);
+/* Lock devices from the top of the tree down */
+static void pci_bus_lock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_bus_unlock(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Lock devices from the top of the tree down */
+static void pci_slot_lock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_lock(dev);
+ if (dev->subordinate)
+ pci_bus_lock(dev->subordinate);
+ }
+}
+
+/* Unlock devices from the bottom of the tree up */
+static void pci_slot_unlock(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->subordinate)
+ pci_bus_unlock(dev->subordinate);
+ pci_dev_unlock(dev);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_bus_save_and_disable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_bus_restore(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+/* Save and disable devices from the top of the tree down */
+static void pci_slot_save_and_disable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_save_and_disable(dev);
+ if (dev->subordinate)
+ pci_bus_save_and_disable(dev->subordinate);
+ }
+}
+
+/*
+ * Restore devices from top of the tree down - parent bridges need to be
+ * restored before we can get to subordinate devices.
+ */
+static void pci_slot_restore(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ pci_dev_restore(dev);
+ if (dev->subordinate)
+ pci_bus_restore(dev->subordinate);
+ }
+}
+
+static int pci_slot_reset(struct pci_slot *slot, int probe)
+{
+ int rc;
+
+ if (!slot)
+ return -ENOTTY;
+
+ if (!probe)
+ pci_slot_lock(slot);
+
+ might_sleep();
+
+ rc = pci_reset_hotplug_slot(slot->hotplug, probe);
+
+ if (!probe)
+ pci_slot_unlock(slot);
+
+ return rc;
+}
+
+/**
+ * pci_probe_reset_slot - probe whether a PCI slot can be reset
+ * @slot: PCI slot to probe
+ *
+ * Return 0 if slot can be reset, negative if a slot reset is not supported.
+ */
+int pci_probe_reset_slot(struct pci_slot *slot)
+{
+ return pci_slot_reset(slot, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
+
+/**
+ * pci_reset_slot - reset a PCI slot
+ * @slot: PCI slot to reset
+ *
+ * A PCI bus may host multiple slots, each slot may support a reset mechanism
+ * independent of other slots. For instance, some slots may support slot power
+ * control. In the case of a 1:1 bus to slot architecture, this function may
+ * wrap the bus reset to avoid spurious slot related events such as hotplug.
+ * Generally a slot reset should be attempted before a bus reset. All of the
+ * function of the slot and any subordinate buses behind the slot are reset
+ * through this function. PCI config space of all devices in the slot and
+ * behind the slot is saved before and restored after reset.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_slot(struct pci_slot *slot)
+{
+ int rc;
+
+ rc = pci_slot_reset(slot, 1);
+ if (rc)
+ return rc;
+
+ pci_slot_save_and_disable(slot);
+
+ rc = pci_slot_reset(slot, 0);
+
+ pci_slot_restore(slot);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_slot);
+
+static int pci_bus_reset(struct pci_bus *bus, int probe)
+{
+ if (!bus->self)
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_bus_lock(bus);
+
+ might_sleep();
+
+ pci_reset_bridge_secondary_bus(bus->self);
+
+ pci_bus_unlock(bus);
+
+ return 0;
+}
+
+/**
+ * pci_probe_reset_bus - probe whether a PCI bus can be reset
+ * @bus: PCI bus to probe
+ *
+ * Return 0 if bus can be reset, negative if a bus reset is not supported.
+ */
+int pci_probe_reset_bus(struct pci_bus *bus)
+{
+ return pci_bus_reset(bus, 1);
+}
+EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
+
+/**
+ * pci_reset_bus - reset a PCI bus
+ * @bus: top level PCI bus to reset
+ *
+ * Do a bus reset on the given bus and any subordinate buses, saving
+ * and restoring state of all devices.
+ *
+ * Return 0 on success, non-zero on error.
+ */
+int pci_reset_bus(struct pci_bus *bus)
+{
+ int rc;
+
+ rc = pci_bus_reset(bus, 1);
+ if (rc)
+ return rc;
+
+ pci_bus_save_and_disable(bus);
+
+ rc = pci_bus_reset(bus, 0);
+
+ pci_bus_restore(bus);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(pci_reset_bus);
+
/**
* pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
* @dev: PCI device to query
@@ -3525,8 +3947,6 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
int mps = pcie_get_mps(dev);
- if (mps < 0)
- return mps;
if (mps < rq)
rq = mps;
}
@@ -3543,7 +3963,6 @@ EXPORT_SYMBOL(pcie_set_readrq);
* @dev: PCI device to query
*
* Returns maximum payload size in bytes
- * or appropriate error value.
*/
int pcie_get_mps(struct pci_dev *dev)
{
@@ -3579,6 +3998,49 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
}
/**
+ * pcie_get_minimum_link - determine minimum link settings of a PCI device
+ * @dev: PCI device to query
+ * @speed: storage for minimum speed
+ * @width: storage for minimum width
+ *
+ * This function will walk up the PCI device chain and determine the minimum
+ * link width and speed of the device.
+ */
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width)
+{
+ int ret;
+
+ *speed = PCI_SPEED_UNKNOWN;
+ *width = PCIE_LNK_WIDTH_UNKNOWN;
+
+ while (dev) {
+ u16 lnksta;
+ enum pci_bus_speed next_speed;
+ enum pcie_link_width next_width;
+
+ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
+ if (ret)
+ return ret;
+
+ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
+ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ if (next_speed < *speed)
+ *speed = next_speed;
+
+ if (next_width < *width)
+ *width = next_width;
+
+ dev = dev->bus->self;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pcie_get_minimum_link);
+
+/**
* pci_select_bars - Make BAR mask from the type of resource
* @dev: the PCI device for which BAR mask is made
* @flags: resource type mask to be selected
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d1182c4a754..8a00c063d7b 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,6 +6,9 @@
#define PCI_CFG_SPACE_SIZE 256
#define PCI_CFG_SPACE_EXP_SIZE 4096
+extern const unsigned char pcix_bus_speed[];
+extern const unsigned char pcie_link_speed[];
+
/* Functions internal to the PCI core code */
int pci_create_sysfs_dev_files(struct pci_dev *pdev);
@@ -151,7 +154,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev)
}
extern struct device_attribute pci_dev_attrs[];
-extern struct device_attribute pcibus_dev_attrs[];
+extern const struct attribute_group *pcibus_groups[];
extern struct device_type pci_dev_type;
extern struct bus_attribute pci_bus_attrs[];
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index 569f82fc9e2..7958e59d607 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -2,7 +2,7 @@
# PCI Express Port Bus Configuration
#
config PCIEPORTBUS
- bool "PCI Express support"
+ bool "PCI Express Port Bus support"
depends on PCI
help
This automatically enables PCI Express Port Bus support. Users can
@@ -14,15 +14,12 @@ config PCIEPORTBUS
# Include service Kconfig here
#
config HOTPLUG_PCI_PCIE
- tristate "PCI Express Hotplug driver"
+ bool "PCI Express Hotplug driver"
depends on HOTPLUG_PCI && PCIEPORTBUS
help
Say Y here if you have a motherboard that supports PCI Express Native
Hotplug
- To compile this driver as a module, choose M here: the
- module will be called pciehp.
-
When in doubt, say N.
source "drivers/pci/pcie/aer/Kconfig"
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 76ef634caf6..0bf82a20a0f 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -352,7 +352,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev)
reg32 &= ~ROOT_PORT_INTR_ON_MESG_MASK;
pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, reg32);
- aer_do_secondary_bus_reset(dev);
+ pci_reset_bridge_secondary_bus(dev);
dev_printk(KERN_DEBUG, &dev->dev, "Root Port link has been reset\n");
/* Clear Root Error Status */
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 90ea3e88041..84420b7c945 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -106,7 +106,6 @@ static inline pci_ers_result_t merge_result(enum pci_ers_result orig,
}
extern struct bus_type pcie_port_bus_type;
-void aer_do_secondary_bus_reset(struct pci_dev *dev);
int aer_init(struct pcie_device *dev);
void aer_isr(struct work_struct *work);
void aer_print_error(struct pci_dev *dev, struct aer_err_info *info);
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 8b68ae59b7b..85ca36f2136 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -367,39 +367,6 @@ static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
}
/**
- * aer_do_secondary_bus_reset - perform secondary bus reset
- * @dev: pointer to bridge's pci_dev data structure
- *
- * Invoked when performing link reset at Root Port or Downstream Port.
- */
-void aer_do_secondary_bus_reset(struct pci_dev *dev)
-{
- u16 p2p_ctrl;
-
- /* Assert Secondary Bus Reset */
- pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
- p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * we should send hot reset message for 2ms to allow it time to
- * propagate to all downstream ports
- */
- msleep(2);
-
- /* De-assert Secondary Bus Reset */
- p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
-
- /*
- * System software must wait for at least 100ms from the end
- * of a reset of one or more device before it is permitted
- * to issue Configuration Requests to those devices.
- */
- msleep(200);
-}
-
-/**
* default_reset_link - default reset function
* @dev: pointer to pci_dev data structure
*
@@ -408,7 +375,7 @@ void aer_do_secondary_bus_reset(struct pci_dev *dev)
*/
static pci_ers_result_t default_reset_link(struct pci_dev *dev)
{
- aer_do_secondary_bus_reset(dev);
+ pci_reset_bridge_secondary_bus(dev);
dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 46ada5c098e..4f9cc93c3b5 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -96,7 +96,7 @@ static void release_pcibus_dev(struct device *dev)
static struct class pcibus_class = {
.name = "pci_bus",
.dev_release = &release_pcibus_dev,
- .dev_attrs = pcibus_dev_attrs,
+ .dev_groups = pcibus_groups,
};
static int __init pcibus_class_init(void)
@@ -156,6 +156,8 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
return flags;
}
+#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
/**
* pci_read_base - read a PCI BAR
* @dev: the PCI device
@@ -178,8 +180,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
/* No printks while decoding is disabled! */
if (!dev->mmio_always_on) {
pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
- pci_write_config_word(dev, PCI_COMMAND,
- orig_cmd & ~(PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
+ if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
+ }
}
res->name = pci_name(dev);
@@ -293,7 +297,8 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
fail:
res->flags = 0;
out:
- if (!dev->mmio_always_on)
+ if (!dev->mmio_always_on &&
+ (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
if (bar_too_big)
@@ -513,7 +518,7 @@ static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
return bridge;
}
-static unsigned char pcix_bus_speed[] = {
+const unsigned char pcix_bus_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCI_SPEED_66MHz_PCIX, /* 1 */
PCI_SPEED_100MHz_PCIX, /* 2 */
@@ -532,7 +537,7 @@ static unsigned char pcix_bus_speed[] = {
PCI_SPEED_133MHz_PCIX_533 /* F */
};
-static unsigned char pcie_link_speed[] = {
+const unsigned char pcie_link_speed[] = {
PCI_SPEED_UNKNOWN, /* 0 */
PCIE_SPEED_2_5GT, /* 1 */
PCIE_SPEED_5_0GT, /* 2 */
@@ -1491,24 +1496,23 @@ static int pcie_find_smpss(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
- /* For PCIE hotplug enabled slots not connected directly to a
- * PCI-E root port, there can be problems when hotplugging
- * devices. This is due to the possibility of hotplugging a
- * device into the fabric with a smaller MPS that the devices
- * currently running have configured. Modifying the MPS on the
- * running devices could cause a fatal bus error due to an
- * incoming frame being larger than the newly configured MPS.
- * To work around this, the MPS for the entire fabric must be
- * set to the minimum size. Any devices hotplugged into this
- * fabric will have the minimum MPS set. If the PCI hotplug
- * slot is directly connected to the root port and there are not
- * other devices on the fabric (which seems to be the most
- * common case), then this is not an issue and MPS discovery
- * will occur as normal.
+ /*
+ * We don't have a way to change MPS settings on devices that have
+ * drivers attached. A hot-added device might support only the minimum
+ * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
+ * where devices may be hot-added, we limit the fabric MPS to 128 so
+ * hot-added devices will work correctly.
+ *
+ * However, if we hot-add a device to a slot directly below a Root
+ * Port, it's impossible for there to be other existing devices below
+ * the port. We don't limit the MPS in this case because we can
+ * reconfigure MPS on both the Root Port and the hot-added device,
+ * and there are no other devices involved.
+ *
+ * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
*/
- if (dev->is_hotplug_bridge && (!list_is_singular(&dev->bus->devices) ||
- (dev->bus->self &&
- pci_pcie_type(dev->bus->self) != PCI_EXP_TYPE_ROOT_PORT)))
+ if (dev->is_hotplug_bridge &&
+ pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
*smpss = 0;
if (*smpss > dev->pcie_mpss)
@@ -1583,6 +1587,22 @@ static void pcie_write_mrrs(struct pci_dev *dev)
"with pci=pcie_bus_safe.\n");
}
+static void pcie_bus_detect_mps(struct pci_dev *dev)
+{
+ struct pci_dev *bridge = dev->bus->self;
+ int mps, p_mps;
+
+ if (!bridge)
+ return;
+
+ mps = pcie_get_mps(dev);
+ p_mps = pcie_get_mps(bridge);
+
+ if (mps != p_mps)
+ dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
+ mps, pci_name(bridge), p_mps);
+}
+
static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
{
int mps, orig_mps;
@@ -1590,13 +1610,18 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
if (!pci_is_pcie(dev))
return 0;
+ if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
+ pcie_bus_detect_mps(dev);
+ return 0;
+ }
+
mps = 128 << *(u8 *)data;
orig_mps = pcie_get_mps(dev);
pcie_write_mps(dev, mps);
pcie_write_mrrs(dev);
- dev_info(&dev->dev, "PCI-E Max Payload Size set to %4d/%4d (was %4d), "
+ dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
"Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
orig_mps, pcie_get_readrq(dev));
@@ -1607,25 +1632,25 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
* parents then children fashion. If this changes, then this code will not
* work as designed.
*/
-void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss)
+void pcie_bus_configure_settings(struct pci_bus *bus)
{
u8 smpss;
- if (!pci_is_pcie(bus->self))
+ if (!bus->self)
return;
- if (pcie_bus_config == PCIE_BUS_TUNE_OFF)
+ if (!pci_is_pcie(bus->self))
return;
/* FIXME - Peer to peer DMA is possible, though the endpoint would need
- * to be aware to the MPS of the destination. To work around this,
+ * to be aware of the MPS of the destination. To work around this,
* simply force the MPS of the entire system to the smallest possible.
*/
if (pcie_bus_config == PCIE_BUS_PEER2PEER)
smpss = 0;
if (pcie_bus_config == PCIE_BUS_SAFE) {
- smpss = mpss;
+ smpss = bus->self->pcie_mpss;
pcie_find_smpss(bus->self, &smpss);
pci_walk_bus(bus, pcie_find_smpss, &smpss);
@@ -1979,7 +2004,6 @@ unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
max = pci_scan_child_bus(bus);
pci_assign_unassigned_bus_resources(bus);
- pci_enable_bridges(bus);
pci_bus_add_devices(bus);
return max;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index e85d23044ae..f6c31fabf3a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3126,9 +3126,6 @@ static int reset_intel_generic_dev(struct pci_dev *dev, int probe)
static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
{
- int i;
- u16 status;
-
/*
* http://www.intel.com/content/dam/doc/datasheet/82599-10-gbe-controller-datasheet.pdf
*
@@ -3140,20 +3137,9 @@ static int reset_intel_82599_sfp_virtfn(struct pci_dev *dev, int probe)
if (probe)
return 0;
- /* Wait for Transaction Pending bit clean */
- for (i = 0; i < 4; i++) {
- if (i)
- msleep((1 << (i - 1)) * 100);
-
- pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
- if (!(status & PCI_EXP_DEVSTA_TRPND))
- goto clear;
- }
-
- dev_err(&dev->dev, "transaction is not cleared; "
- "proceeding with reset anyway\n");
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
-clear:
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
msleep(100);
@@ -3208,6 +3194,83 @@ reset_complete:
return 0;
}
+/*
+ * Device-specific reset method for Chelsio T4-based adapters.
+ */
+static int reset_chelsio_generic_dev(struct pci_dev *dev, int probe)
+{
+ u16 old_command;
+ u16 msix_flags;
+
+ /*
+ * If this isn't a Chelsio T4-based device, return -ENOTTY indicating
+ * that we have no device-specific reset method.
+ */
+ if ((dev->device & 0xf000) != 0x4000)
+ return -ENOTTY;
+
+ /*
+ * If this is the "probe" phase, return 0 indicating that we can
+ * reset this device.
+ */
+ if (probe)
+ return 0;
+
+ /*
+ * T4 can wedge if there are DMAs in flight within the chip and Bus
+ * Master has been disabled. We need to have it on till the Function
+ * Level Reset completes. (BUS_MASTER is disabled in
+ * pci_reset_function()).
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &old_command);
+ pci_write_config_word(dev, PCI_COMMAND,
+ old_command | PCI_COMMAND_MASTER);
+
+ /*
+ * Perform the actual device function reset, saving and restoring
+ * configuration information around the reset.
+ */
+ pci_save_state(dev);
+
+ /*
+ * T4 also suffers a Head-Of-Line blocking problem if MSI-X interrupts
+ * are disabled when an MSI-X interrupt message needs to be delivered.
+ * So we briefly re-enable MSI-X interrupts for the duration of the
+ * FLR. The pci_restore_state() below will restore the original
+ * MSI-X state.
+ */
+ pci_read_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS, &msix_flags);
+ if ((msix_flags & PCI_MSIX_FLAGS_ENABLE) == 0)
+ pci_write_config_word(dev, dev->msix_cap+PCI_MSIX_FLAGS,
+ msix_flags |
+ PCI_MSIX_FLAGS_ENABLE |
+ PCI_MSIX_FLAGS_MASKALL);
+
+ /*
+ * Start of pcie_flr() code sequence. This reset code is a copy of
+ * the guts of pcie_flr() because that's not an exported function.
+ */
+
+ if (!pci_wait_for_pending_transaction(dev))
+ dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
+
+ pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
+ msleep(100);
+
+ /*
+ * End of pcie_flr() code sequence.
+ */
+
+ /*
+ * Restore the configuration information (BAR values, etc.) including
+ * the original PCI Configuration Space Command word, and return
+ * success.
+ */
+ pci_restore_state(dev);
+ pci_write_config_word(dev, PCI_COMMAND, old_command);
+ return 0;
+}
+
#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_DEVICE_ID_INTEL_IVB_M_VGA 0x0156
#define PCI_DEVICE_ID_INTEL_IVB_M2_VGA 0x0166
@@ -3221,6 +3284,8 @@ static const struct pci_dev_reset_methods pci_dev_reset_methods[] = {
reset_ivb_igd },
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
reset_intel_generic_dev },
+ { PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ reset_chelsio_generic_dev },
{ 0 }
};
@@ -3295,11 +3360,61 @@ struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
return pci_dev_get(dev);
}
+/*
+ * AMD has indicated that the devices below do not support peer-to-peer
+ * in any system where they are found in the southbridge with an AMD
+ * IOMMU in the system. Multifunction devices that do not support
+ * peer-to-peer between functions can claim to support a subset of ACS.
+ * Such devices effectively enable request redirect (RR) and completion
+ * redirect (CR) since all transactions are redirected to the upstream
+ * root complex.
+ *
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102
+ * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402
+ *
+ * 1002:4385 SBx00 SMBus Controller
+ * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller
+ * 1002:4383 SBx00 Azalia (Intel HDA)
+ * 1002:439d SB7x0/SB8x0/SB9x0 LPC host controller
+ * 1002:4384 SBx00 PCI to PCI Bridge
+ * 1002:4399 SB7x0/SB8x0/SB9x0 USB OHCI2 Controller
+ */
+static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
+{
+#ifdef CONFIG_ACPI
+ struct acpi_table_header *header = NULL;
+ acpi_status status;
+
+ /* Targeting multifunction devices on the SB (appears on root bus) */
+ if (!dev->multifunction || !pci_is_root_bus(dev->bus))
+ return -ENODEV;
+
+ /* The IVRS table describes the AMD IOMMU */
+ status = acpi_get_table("IVRS", 0, &header);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ /* Filter out flags not applicable to multifunction */
+ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT);
+
+ return acs_flags & ~(PCI_ACS_RR | PCI_ACS_CR) ? 0 : 1;
+#else
+ return -ENODEV;
+#endif
+}
+
static const struct pci_dev_acs_enabled {
u16 vendor;
u16 device;
int (*acs_enabled)(struct pci_dev *dev, u16 acs_flags);
} pci_dev_acs_enabled[] = {
+ { PCI_VENDOR_ID_ATI, 0x4385, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439c, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4383, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
+ { PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
{ 0 }
};
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index d254e237953..bc26d7990cc 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -300,6 +300,47 @@ static void assign_requested_resources_sorted(struct list_head *head,
}
}
+static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
+{
+ struct pci_dev_resource *fail_res;
+ unsigned long mask = 0;
+
+ /* check failed type */
+ list_for_each_entry(fail_res, fail_head, list)
+ mask |= fail_res->flags;
+
+ /*
+ * one pref failed resource will set IORESOURCE_MEM,
+ * as we can allocate pref in non-pref range.
+ * Will release all assigned non-pref sibling resources
+ * according to that bit.
+ */
+ return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
+}
+
+static bool pci_need_to_release(unsigned long mask, struct resource *res)
+{
+ if (res->flags & IORESOURCE_IO)
+ return !!(mask & IORESOURCE_IO);
+
+ /* check pref at first */
+ if (res->flags & IORESOURCE_PREFETCH) {
+ if (mask & IORESOURCE_PREFETCH)
+ return true;
+ /* count pref if its parent is non-pref */
+ else if ((mask & IORESOURCE_MEM) &&
+ !(res->parent->flags & IORESOURCE_PREFETCH))
+ return true;
+ else
+ return false;
+ }
+
+ if (res->flags & IORESOURCE_MEM)
+ return !!(mask & IORESOURCE_MEM);
+
+ return false; /* should not get here */
+}
+
static void __assign_resources_sorted(struct list_head *head,
struct list_head *realloc_head,
struct list_head *fail_head)
@@ -312,11 +353,24 @@ static void __assign_resources_sorted(struct list_head *head,
* if could do that, could get out early.
* if could not do that, we still try to assign requested at first,
* then try to reassign add_size for some resources.
+ *
+ * Separate three resource type checking if we need to release
+ * assigned resource after requested + add_size try.
+ * 1. if there is io port assign fail, will release assigned
+ * io port.
+ * 2. if there is pref mmio assign fail, release assigned
+ * pref mmio.
+ * if assigned pref mmio's parent is non-pref mmio and there
+ * is non-pref mmio assign fail, will release that assigned
+ * pref mmio.
+ * 3. if there is non-pref mmio assign fail or pref mmio
+ * assigned fail, will release assigned non-pref mmio.
*/
LIST_HEAD(save_head);
LIST_HEAD(local_fail_head);
struct pci_dev_resource *save_res;
- struct pci_dev_resource *dev_res;
+ struct pci_dev_resource *dev_res, *tmp_res;
+ unsigned long fail_type;
/* Check if optional add_size is there */
if (!realloc_head || list_empty(realloc_head))
@@ -348,6 +402,19 @@ static void __assign_resources_sorted(struct list_head *head,
return;
}
+ /* check failed type */
+ fail_type = pci_fail_res_type_mask(&local_fail_head);
+ /* remove not need to be released assigned res from head list etc */
+ list_for_each_entry_safe(dev_res, tmp_res, head, list)
+ if (dev_res->res->parent &&
+ !pci_need_to_release(fail_type, dev_res->res)) {
+ /* remove it from realloc_head list */
+ remove_from_list(realloc_head, dev_res->res);
+ remove_from_list(&save_head, dev_res->res);
+ list_del(&dev_res->list);
+ kfree(dev_res);
+ }
+
free_list(&local_fail_head);
/* Release assigned resource */
list_for_each_entry(dev_res, head, list)
@@ -747,14 +814,14 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
{
struct pci_dev *dev;
struct resource *b_res = find_free_bus_resource(bus, IORESOURCE_IO);
- unsigned long size = 0, size0 = 0, size1 = 0;
+ resource_size_t size = 0, size0 = 0, size1 = 0;
resource_size_t children_add_size = 0;
- resource_size_t min_align, io_align, align;
+ resource_size_t min_align, align;
if (!b_res)
return;
- io_align = min_align = window_alignment(bus, IORESOURCE_IO);
+ min_align = window_alignment(bus, IORESOURCE_IO);
list_for_each_entry(dev, &bus->devices, bus_list) {
int i;
@@ -781,9 +848,6 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
}
}
- if (min_align > io_align)
- min_align = io_align;
-
size0 = calculate_iosize(size, min_size, size1,
resource_size(b_res), min_align);
if (children_add_size > add_size)
@@ -807,8 +871,9 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
add_to_list(realloc_head, bus->self, b_res, size1-size0,
min_align);
dev_printk(KERN_DEBUG, &bus->self->dev, "bridge window "
- "%pR to %pR add_size %lx\n", b_res,
- &bus->busn_res, size1-size0);
+ "%pR to %pR add_size %llx\n", b_res,
+ &bus->busn_res,
+ (unsigned long long)size1-size0);
}
}
@@ -838,6 +903,8 @@ static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
* pbus_size_mem() - size the memory window of a given bus
*
* @bus : the bus
+ * @mask: mask the resource flag, then compare it with type
+ * @type: the type of free resource from bridge
* @min_size : the minimum memory window that must to be allocated
* @add_size : additional optional memory window
* @realloc_head : track the additional memory window on this list
@@ -1297,39 +1364,21 @@ static void pci_bus_dump_resources(struct pci_bus *bus)
}
}
-static int __init pci_bus_get_depth(struct pci_bus *bus)
+static int pci_bus_get_depth(struct pci_bus *bus)
{
int depth = 0;
- struct pci_dev *dev;
+ struct pci_bus *child_bus;
- list_for_each_entry(dev, &bus->devices, bus_list) {
+ list_for_each_entry(child_bus, &bus->children, node){
int ret;
- struct pci_bus *b = dev->subordinate;
- if (!b)
- continue;
- ret = pci_bus_get_depth(b);
+ ret = pci_bus_get_depth(child_bus);
if (ret + 1 > depth)
depth = ret + 1;
}
return depth;
}
-static int __init pci_get_max_depth(void)
-{
- int depth = 0;
- struct pci_bus *bus;
-
- list_for_each_entry(bus, &pci_root_buses, node) {
- int ret;
-
- ret = pci_bus_get_depth(bus);
- if (ret > depth)
- depth = ret;
- }
-
- return depth;
-}
/*
* -1: undefined, will auto detect later
@@ -1346,7 +1395,7 @@ enum enable_type {
auto_enabled,
};
-static enum enable_type pci_realloc_enable __initdata = undefined;
+static enum enable_type pci_realloc_enable = undefined;
void __init pci_realloc_get_opt(char *str)
{
if (!strncmp(str, "off", 3))
@@ -1354,45 +1403,64 @@ void __init pci_realloc_get_opt(char *str)
else if (!strncmp(str, "on", 2))
pci_realloc_enable = user_enabled;
}
-static bool __init pci_realloc_enabled(void)
+static bool pci_realloc_enabled(enum enable_type enable)
{
- return pci_realloc_enable >= user_enabled;
+ return enable >= user_enabled;
}
-static void __init pci_realloc_detect(void)
-{
#if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
- struct pci_dev *dev = NULL;
-
- if (pci_realloc_enable != undefined)
- return;
-
- for_each_pci_dev(dev) {
- int i;
+static int iov_resources_unassigned(struct pci_dev *dev, void *data)
+{
+ int i;
+ bool *unassigned = data;
- for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
- struct resource *r = &dev->resource[i];
+ for (i = PCI_IOV_RESOURCES; i <= PCI_IOV_RESOURCE_END; i++) {
+ struct resource *r = &dev->resource[i];
+ struct pci_bus_region region;
- /* Not assigned, or rejected by kernel ? */
- if (r->flags && !r->start) {
- pci_realloc_enable = auto_enabled;
+ /* Not assigned or rejected by kernel? */
+ if (!r->flags)
+ continue;
- return;
- }
+ pcibios_resource_to_bus(dev, &region, r);
+ if (!region.start) {
+ *unassigned = true;
+ return 1; /* return early from pci_walk_bus() */
}
}
-#endif
+
+ return 0;
}
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ bool unassigned = false;
+
+ if (enable_local != undefined)
+ return enable_local;
+
+ pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
+ if (unassigned)
+ return auto_enabled;
+
+ return enable_local;
+}
+#else
+static enum enable_type pci_realloc_detect(struct pci_bus *bus,
+ enum enable_type enable_local)
+{
+ return enable_local;
+}
+#endif
+
/*
* first try will not touch pci bridge res
* second and later try will clear small leaf bridge res
* will stop till to the max deepth if can not find good one
*/
-void __init
-pci_assign_unassigned_resources(void)
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
{
- struct pci_bus *bus;
LIST_HEAD(realloc_head); /* list of resources that
want additional resources */
struct list_head *add_list = NULL;
@@ -1403,15 +1471,17 @@ pci_assign_unassigned_resources(void)
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM |
IORESOURCE_PREFETCH;
int pci_try_num = 1;
+ enum enable_type enable_local;
/* don't realloc if asked to do so */
- pci_realloc_detect();
- if (pci_realloc_enabled()) {
- int max_depth = pci_get_max_depth();
+ enable_local = pci_realloc_detect(bus, pci_realloc_enable);
+ if (pci_realloc_enabled(enable_local)) {
+ int max_depth = pci_bus_get_depth(bus);
pci_try_num = max_depth + 1;
- printk(KERN_DEBUG "PCI: max bus depth: %d pci_try_num: %d\n",
- max_depth, pci_try_num);
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "max bus depth: %d pci_try_num: %d\n",
+ max_depth, pci_try_num);
}
again:
@@ -1423,32 +1493,30 @@ again:
add_list = &realloc_head;
/* Depth first, calculate sizes and alignments of all
subordinate buses. */
- list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_size_bridges(bus, add_list);
+ __pci_bus_size_bridges(bus, add_list);
/* Depth last, allocate resources and update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node)
- __pci_bus_assign_resources(bus, add_list, &fail_head);
+ __pci_bus_assign_resources(bus, add_list, &fail_head);
if (add_list)
BUG_ON(!list_empty(add_list));
tried_times++;
/* any device complain? */
if (list_empty(&fail_head))
- goto enable_and_dump;
+ goto dump;
if (tried_times >= pci_try_num) {
- if (pci_realloc_enable == undefined)
- printk(KERN_INFO "Some PCI device resources are unassigned, try booting with pci=realloc\n");
- else if (pci_realloc_enable == auto_enabled)
- printk(KERN_INFO "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
+ if (enable_local == undefined)
+ dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
+ else if (enable_local == auto_enabled)
+ dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
free_list(&fail_head);
- goto enable_and_dump;
+ goto dump;
}
- printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
- tried_times + 1);
+ dev_printk(KERN_DEBUG, &bus->dev,
+ "No. %d try to assign unassigned res\n", tried_times + 1);
/* third times and later will not check if it is leaf */
if ((tried_times + 1) > 2)
@@ -1458,12 +1526,11 @@ again:
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
- list_for_each_entry(fail_res, &fail_head, list) {
- bus = fail_res->dev->bus;
- pci_bus_release_bridge_resources(bus,
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
fail_res->flags & type_mask,
rel_type);
- }
+
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1478,14 +1545,17 @@ again:
goto again;
-enable_and_dump:
- /* Depth last, update the hardware. */
- list_for_each_entry(bus, &pci_root_buses, node)
- pci_enable_bridges(bus);
-
+dump:
/* dump the resource on buses */
- list_for_each_entry(bus, &pci_root_buses, node)
- pci_bus_dump_resources(bus);
+ pci_bus_dump_resources(bus);
+}
+
+void __init pci_assign_unassigned_resources(void)
+{
+ struct pci_bus *root_bus;
+
+ list_for_each_entry(root_bus, &pci_root_buses, node)
+ pci_assign_unassigned_root_bus_resources(root_bus);
}
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
@@ -1522,13 +1592,11 @@ again:
* Try to release leaf bridge's resources that doesn't fit resource of
* child device under that bridge
*/
- list_for_each_entry(fail_res, &fail_head, list) {
- struct pci_bus *bus = fail_res->dev->bus;
- unsigned long flags = fail_res->flags;
-
- pci_bus_release_bridge_resources(bus, flags & type_mask,
+ list_for_each_entry(fail_res, &fail_head, list)
+ pci_bus_release_bridge_resources(fail_res->dev->bus,
+ fail_res->flags & type_mask,
whole_subtree);
- }
+
/* restore size and flags */
list_for_each_entry(fail_res, &fail_head, list) {
struct resource *res = fail_res->res;
@@ -1548,7 +1616,6 @@ enable_all:
if (retval)
dev_err(&bridge->dev, "Error reenabling bridge (%d)\n", retval);
pci_set_master(bridge);
- pci_enable_bridges(parent);
}
EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index 9d3ac998fc1..b2a98cdbd0d 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -91,7 +91,6 @@ int __ref cb_alloc(struct pcmcia_socket *s)
if (s->tune_bridge)
s->tune_bridge(s, bus);
- pci_enable_bridges(bus);
pci_bus_add_devices(bus);
return 0;
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index 5a8ad513931..b6e864e8c9e 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -61,7 +61,7 @@ config PINCTRL_AT91
config PINCTRL_BAYTRAIL
bool "Intel Baytrail GPIO pin control"
depends on GPIOLIB && ACPI && X86
- select IRQ_DOMAIN
+ select IRQ_DOMAIN
help
driver for memory mapped GPIO functionality on Intel Baytrail
platforms. Supports 3 banks with 102, 28 and 44 gpios.
@@ -252,7 +252,7 @@ config PINCTRL_SAMSUNG
config PINCTRL_EXYNOS
bool "Pinctrl driver data for Samsung EXYNOS SoCs other than 5440"
- depends on OF && GPIOLIB && ARCH_EXYNOS
+ depends on OF && GPIOLIB && (ARCH_EXYNOS || ARCH_S5PV210)
select PINCTRL_SAMSUNG
config PINCTRL_EXYNOS5440
@@ -261,6 +261,17 @@ config PINCTRL_EXYNOS5440
select PINMUX
select PINCONF
+config PINCTRL_PALMAS
+ bool "Pinctrl driver for the PALMAS Series MFD devices"
+ depends on OF && MFD_PALMAS
+ select PINMUX
+ select GENERIC_PINCONF
+ help
+ Palmas device supports the configuration of pins for different
+ functionality. This driver supports the pinmux, push-pull and
+ open drain configuration for the Palmas series devices like
+ TPS65913, TPS80036 etc.
+
config PINCTRL_S3C24XX
bool "Samsung S3C24XX SoC pinctrl driver"
depends on ARCH_S3C24XX
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index d64563bf6fb..496d9bf9e1b 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
-obj-$(CONFIG_PINCTRL) += core.o
+obj-$(CONFIG_PINCTRL) += core.o pinctrl-utils.o
obj-$(CONFIG_PINMUX) += pinmux.o
obj-$(CONFIG_PINCONF) += pinconf.o
ifeq ($(CONFIG_OF),y)
@@ -32,6 +32,7 @@ obj-$(CONFIG_PINCTRL_NOMADIK) += pinctrl-nomadik.o
obj-$(CONFIG_PINCTRL_STN8815) += pinctrl-nomadik-stn8815.o
obj-$(CONFIG_PINCTRL_DB8500) += pinctrl-nomadik-db8500.o
obj-$(CONFIG_PINCTRL_DB8540) += pinctrl-nomadik-db8540.o
+obj-$(CONFIG_PINCTRL_PALMAS) += pinctrl-palmas.o
obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o
obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o
obj-$(CONFIG_PINCTRL_SIRF) += sirf/
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5b272bfd261..92f86ab30a1 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -153,9 +153,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
- if (desc == NULL)
- continue;
- if (desc->name && !strcmp(name, desc->name))
+ if (desc && !strcmp(name, desc->name))
return pin;
}
@@ -357,14 +355,17 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
/* Loop over the pin controllers */
list_for_each_entry(pctldev, &pinctrldev_list, node) {
/* Loop over the ranges */
+ mutex_lock(&pctldev->mutex);
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
/* Check if any gpio range overlapped with gpio chip */
if (range->base + range->npins - 1 < chip->base ||
range->base > chip->base + chip->ngpio - 1)
continue;
+ mutex_unlock(&pctldev->mutex);
mutex_unlock(&pinctrldev_list_mutex);
return true;
}
+ mutex_unlock(&pctldev->mutex);
}
mutex_unlock(&pinctrldev_list_mutex);
@@ -392,6 +393,8 @@ static int pinctrl_get_device_gpio_range(unsigned gpio,
{
struct pinctrl_dev *pctldev = NULL;
+ mutex_lock(&pinctrldev_list_mutex);
+
/* Loop over the pin controllers */
list_for_each_entry(pctldev, &pinctrldev_list, node) {
struct pinctrl_gpio_range *range;
@@ -400,10 +403,13 @@ static int pinctrl_get_device_gpio_range(unsigned gpio,
if (range != NULL) {
*outdev = pctldev;
*outrange = range;
+ mutex_unlock(&pinctrldev_list_mutex);
return 0;
}
}
+ mutex_unlock(&pinctrldev_list_mutex);
+
return -EPROBE_DEFER;
}
@@ -556,11 +562,15 @@ int pinctrl_request_gpio(unsigned gpio)
return ret;
}
+ mutex_lock(&pctldev->mutex);
+
/* Convert to the pin controllers number space */
pin = gpio_to_pin(range, gpio);
ret = pinmux_request_gpio(pctldev, range, pin, gpio);
+ mutex_unlock(&pctldev->mutex);
+
return ret;
}
EXPORT_SYMBOL_GPL(pinctrl_request_gpio);
@@ -1193,6 +1203,7 @@ void pinctrl_unregister_map(struct pinctrl_map const *map)
list_for_each_entry(maps_node, &pinctrl_maps, node) {
if (maps_node->maps == map) {
list_del(&maps_node->node);
+ kfree(maps_node);
mutex_unlock(&pinctrl_maps_mutex);
return;
}
@@ -1227,23 +1238,36 @@ EXPORT_SYMBOL_GPL(pinctrl_force_default);
#ifdef CONFIG_PM
/**
- * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * pinctrl_pm_select_state() - select pinctrl state for PM
* @dev: device to select default state for
+ * @state: state to set
*/
-int pinctrl_pm_select_default_state(struct device *dev)
+static int pinctrl_pm_select_state(struct device *dev,
+ struct pinctrl_state *state)
{
struct dev_pin_info *pins = dev->pins;
int ret;
- if (!pins)
- return 0;
- if (IS_ERR(pins->default_state))
- return 0; /* No default state */
- ret = pinctrl_select_state(pins->p, pins->default_state);
+ if (IS_ERR(state))
+ return 0; /* No such state */
+ ret = pinctrl_select_state(pins->p, state);
if (ret)
- dev_err(dev, "failed to activate default pinctrl state\n");
+ dev_err(dev, "failed to activate pinctrl state %s\n",
+ state->name);
return ret;
}
+
+/**
+ * pinctrl_pm_select_default_state() - select default pinctrl state for PM
+ * @dev: device to select default state for
+ */
+int pinctrl_pm_select_default_state(struct device *dev)
+{
+ if (!dev->pins)
+ return 0;
+
+ return pinctrl_pm_select_state(dev, dev->pins->default_state);
+}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
/**
@@ -1252,17 +1276,10 @@ EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state);
*/
int pinctrl_pm_select_sleep_state(struct device *dev)
{
- struct dev_pin_info *pins = dev->pins;
- int ret;
-
- if (!pins)
+ if (!dev->pins)
return 0;
- if (IS_ERR(pins->sleep_state))
- return 0; /* No sleep state */
- ret = pinctrl_select_state(pins->p, pins->sleep_state);
- if (ret)
- dev_err(dev, "failed to activate pinctrl sleep state\n");
- return ret;
+
+ return pinctrl_pm_select_state(dev, dev->pins->sleep_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
@@ -1272,17 +1289,10 @@ EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state);
*/
int pinctrl_pm_select_idle_state(struct device *dev)
{
- struct dev_pin_info *pins = dev->pins;
- int ret;
-
- if (!pins)
+ if (!dev->pins)
return 0;
- if (IS_ERR(pins->idle_state))
- return 0; /* No idle state */
- ret = pinctrl_select_state(pins->p, pins->idle_state);
- if (ret)
- dev_err(dev, "failed to activate pinctrl idle state\n");
- return ret;
+
+ return pinctrl_pm_select_state(dev, dev->pins->idle_state);
}
EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state);
#endif
diff --git a/drivers/pinctrl/mvebu/pinctrl-dove.c b/drivers/pinctrl/mvebu/pinctrl-dove.c
index 048ae80adab..29f7e4fc7ca 100644
--- a/drivers/pinctrl/mvebu/pinctrl-dove.c
+++ b/drivers/pinctrl/mvebu/pinctrl-dove.c
@@ -785,7 +785,7 @@ static int dove_pinctrl_probe(struct platform_device *pdev)
clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "Unable to get pdma clock");
- return PTR_RET(clk);
+ return PTR_ERR(clk);
}
clk_prepare_enable(clk);
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index bb7ddb1bc89..0fd1ad31fbf 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -191,18 +191,27 @@ static int mvebu_pinconf_group_get(struct pinctrl_dev *pctldev,
}
static int mvebu_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned gid, unsigned long config)
+ unsigned gid, unsigned long *configs,
+ unsigned num_configs)
{
struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
+ int i, ret;
if (!grp->ctrl)
return -EINVAL;
- if (grp->ctrl->mpp_set)
- return grp->ctrl->mpp_set(grp->ctrl, config);
+ for (i = 0; i < num_configs; i++) {
+ if (grp->ctrl->mpp_set)
+ ret = grp->ctrl->mpp_set(grp->ctrl, configs[i]);
+ else
+ ret = mvebu_common_mpp_set(pctl, grp, configs[i]);
- return mvebu_common_mpp_set(pctl, grp, config);
+ if (ret)
+ return ret;
+ } /* for each config */
+
+ return 0;
}
static void mvebu_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
@@ -303,6 +312,7 @@ static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid,
struct mvebu_pinctrl_group *grp = &pctl->groups[gid];
struct mvebu_mpp_ctrl_setting *setting;
int ret;
+ unsigned long config;
setting = mvebu_pinctrl_find_setting_by_name(pctl, grp,
func->name);
@@ -313,7 +323,8 @@ static int mvebu_pinmux_enable(struct pinctrl_dev *pctldev, unsigned fid,
return -EINVAL;
}
- ret = mvebu_pinconf_group_set(pctldev, grp->gid, setting->val);
+ config = setting->val;
+ ret = mvebu_pinconf_group_set(pctldev, grp->gid, &config, 1);
if (ret) {
dev_err(pctl->dev, "cannot set group %s to %s\n",
func->groups[gid], func->name);
@@ -329,6 +340,7 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
struct mvebu_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct mvebu_pinctrl_group *grp;
struct mvebu_mpp_ctrl_setting *setting;
+ unsigned long config;
grp = mvebu_pinctrl_find_group_by_pid(pctl, offset);
if (!grp)
@@ -341,7 +353,9 @@ static int mvebu_pinmux_gpio_request_enable(struct pinctrl_dev *pctldev,
if (!setting)
return -ENOTSUPP;
- return mvebu_pinconf_group_set(pctldev, grp->gid, setting->val);
+ config = setting->val;
+
+ return mvebu_pinconf_group_set(pctldev, grp->gid, &config, 1);
}
static int mvebu_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
@@ -430,7 +444,7 @@ static int mvebu_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
*map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
- if (map == NULL) {
+ if (*map == NULL) {
dev_err(pctl->dev,
"cannot allocate pinctrl_map memory for %s\n",
np->name);
@@ -579,7 +593,7 @@ static int mvebu_pinctrl_build_functions(struct platform_device *pdev,
int mvebu_pinctrl_probe(struct platform_device *pdev)
{
struct mvebu_pinctrl_soc_info *soc = dev_get_platdata(&pdev->dev);
- struct device_node *np = pdev->dev.of_node;
+ struct resource *res;
struct mvebu_pinctrl *pctl;
void __iomem *base;
struct pinctrl_pin_desc *pdesc;
@@ -591,11 +605,10 @@ int mvebu_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
}
- base = of_iomap(np, 0);
- if (!base) {
- dev_err(&pdev->dev, "unable to get base address\n");
- return -ENODEV;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
pctl = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pinctrl),
GFP_KERNEL);
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
index 8594f033ac2..55a0ebe830a 100644
--- a/drivers/pinctrl/pinconf-generic.c
+++ b/drivers/pinctrl/pinconf-generic.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include "core.h"
#include "pinconf.h"
+#include "pinctrl-utils.h"
#ifdef CONFIG_DEBUG_FS
@@ -236,4 +237,99 @@ out:
kfree(cfg);
return ret;
}
+
+int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np, struct pinctrl_map **map,
+ unsigned *reserved_maps, unsigned *num_maps,
+ enum pinctrl_map_type type)
+{
+ int ret;
+ const char *function;
+ struct device *dev = pctldev->dev;
+ unsigned long *configs = NULL;
+ unsigned num_configs = 0;
+ unsigned reserve;
+ struct property *prop;
+ const char *group;
+
+ ret = of_property_read_string(np, "function", &function);
+ if (ret < 0) {
+ /* EINVAL=missing, which is fine since it's optional */
+ if (ret != -EINVAL)
+ dev_err(dev, "could not parse property function\n");
+ function = NULL;
+ }
+
+ ret = pinconf_generic_parse_dt_config(np, &configs, &num_configs);
+ if (ret < 0) {
+ dev_err(dev, "could not parse node property\n");
+ return ret;
+ }
+
+ reserve = 0;
+ if (function != NULL)
+ reserve++;
+ if (num_configs)
+ reserve++;
+ ret = of_property_count_strings(np, "pins");
+ if (ret < 0) {
+ dev_err(dev, "could not parse property pins\n");
+ goto exit;
+ }
+ reserve *= ret;
+
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
+ num_maps, reserve);
+ if (ret < 0)
+ goto exit;
+
+ of_property_for_each_string(np, "pins", prop, group) {
+ if (function) {
+ ret = pinctrl_utils_add_map_mux(pctldev, map,
+ reserved_maps, num_maps, group,
+ function);
+ if (ret < 0)
+ goto exit;
+ }
+
+ if (num_configs) {
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps, num_maps, group, configs,
+ num_configs, type);
+ if (ret < 0)
+ goto exit;
+ }
+ }
+ ret = 0;
+
+exit:
+ kfree(configs);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_subnode_to_map);
+
+int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps, enum pinctrl_map_type type)
+{
+ unsigned reserved_maps;
+ struct device_node *np;
+ int ret;
+
+ reserved_maps = 0;
+ *map = NULL;
+ *num_maps = 0;
+
+ for_each_child_of_node(np_config, np) {
+ ret = pinconf_generic_dt_subnode_to_map(pctldev, np, map,
+ &reserved_maps, num_maps, type);
+ if (ret < 0) {
+ pinctrl_utils_dt_free_map(pctldev, *map, *num_maps);
+ return ret;
+ }
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map);
+
#endif
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
index e875f21a590..a138965c01c 100644
--- a/drivers/pinctrl/pinconf.c
+++ b/drivers/pinctrl/pinconf.c
@@ -158,7 +158,7 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
{
struct pinctrl_dev *pctldev = setting->pctldev;
const struct pinconf_ops *ops = pctldev->desc->confops;
- int i, ret;
+ int ret;
if (!ops) {
dev_err(pctldev->dev, "missing confops\n");
@@ -171,17 +171,15 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
dev_err(pctldev->dev, "missing pin_config_set op\n");
return -EINVAL;
}
- for (i = 0; i < setting->data.configs.num_configs; i++) {
- ret = ops->pin_config_set(pctldev,
- setting->data.configs.group_or_pin,
- setting->data.configs.configs[i]);
- if (ret < 0) {
- dev_err(pctldev->dev,
- "pin_config_set op failed for pin %d config %08lx\n",
- setting->data.configs.group_or_pin,
- setting->data.configs.configs[i]);
- return ret;
- }
+ ret = ops->pin_config_set(pctldev,
+ setting->data.configs.group_or_pin,
+ setting->data.configs.configs,
+ setting->data.configs.num_configs);
+ if (ret < 0) {
+ dev_err(pctldev->dev,
+ "pin_config_set op failed for pin %d\n",
+ setting->data.configs.group_or_pin);
+ return ret;
}
break;
case PIN_MAP_TYPE_CONFIGS_GROUP:
@@ -190,17 +188,15 @@ int pinconf_apply_setting(struct pinctrl_setting const *setting)
"missing pin_config_group_set op\n");
return -EINVAL;
}
- for (i = 0; i < setting->data.configs.num_configs; i++) {
- ret = ops->pin_config_group_set(pctldev,
- setting->data.configs.group_or_pin,
- setting->data.configs.configs[i]);
- if (ret < 0) {
- dev_err(pctldev->dev,
- "pin_config_group_set op failed for group %d config %08lx\n",
- setting->data.configs.group_or_pin,
- setting->data.configs.configs[i]);
- return ret;
- }
+ ret = ops->pin_config_group_set(pctldev,
+ setting->data.configs.group_or_pin,
+ setting->data.configs.configs,
+ setting->data.configs.num_configs);
+ if (ret < 0) {
+ dev_err(pctldev->dev,
+ "pin_config_group_set op failed for group %d\n",
+ setting->data.configs.group_or_pin);
+ return ret;
}
break;
default:
@@ -428,12 +424,11 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
{
struct pinctrl_maps *maps_node;
const struct pinctrl_map *map;
- struct pinctrl_dev *pctldev = NULL;
+ const struct pinctrl_map *found = NULL;
+ struct pinctrl_dev *pctldev;
const struct pinconf_ops *confops = NULL;
- const struct pinctrl_map_configs *configs;
struct dbg_cfg *dbg = &pinconf_dbg_conf;
int i, j;
- bool found = false;
unsigned long config;
mutex_lock(&pinctrl_maps_mutex);
@@ -450,14 +445,8 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
for (j = 0; j < map->data.configs.num_configs; j++) {
if (!strcmp(map->data.configs.group_or_pin,
dbg->pin_name)) {
- /*
- * We found the right pin / state, read the
- * config and he pctldev for later use
- */
- configs = &map->data.configs;
- pctldev = get_pinctrl_dev_from_devname
- (map->ctrl_dev_name);
- found = true;
+ /* We found the right pin / state */
+ found = map;
break;
}
}
@@ -473,7 +462,8 @@ static int pinconf_dbg_config_print(struct seq_file *s, void *d)
goto exit;
}
- config = *(configs->configs);
+ pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
+ config = *found->data.configs.configs;
seq_printf(s, "Dev %s has config of %s in state %s: 0x%08lX\n",
dbg->dev_name, dbg->pin_name,
dbg->state_name, config);
@@ -505,12 +495,12 @@ static int pinconf_dbg_config_write(struct file *file,
{
struct pinctrl_maps *maps_node;
const struct pinctrl_map *map;
- struct pinctrl_dev *pctldev = NULL;
+ const struct pinctrl_map *found = NULL;
+ struct pinctrl_dev *pctldev;
const struct pinconf_ops *confops = NULL;
struct dbg_cfg *dbg = &pinconf_dbg_conf;
const struct pinctrl_map_configs *configs;
char config[MAX_NAME_LEN+1];
- bool found = false;
char buf[128];
char *b = &buf[0];
int buf_size;
@@ -518,7 +508,7 @@ static int pinconf_dbg_config_write(struct file *file,
int i;
/* Get userspace string and assure termination */
- buf_size = min(count, (sizeof(buf)-1));
+ buf_size = min(count, (size_t)(sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
buf[buf_size] = 0;
@@ -588,10 +578,7 @@ static int pinconf_dbg_config_write(struct file *file,
/* we found the right pin / state, so overwrite config */
if (!strcmp(map->data.configs.group_or_pin, dbg->pin_name)) {
- found = true;
- pctldev = get_pinctrl_dev_from_devname(
- map->ctrl_dev_name);
- configs = &map->data.configs;
+ found = map;
break;
}
}
@@ -601,10 +588,12 @@ static int pinconf_dbg_config_write(struct file *file,
goto exit;
}
+ pctldev = get_pinctrl_dev_from_devname(found->ctrl_dev_name);
if (pctldev)
confops = pctldev->desc->confops;
if (confops && confops->pin_config_dbg_parse_modify) {
+ configs = &found->data.configs;
for (i = 0; i < configs->num_configs; i++) {
confops->pin_config_dbg_parse_modify(pctldev,
config,
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index 1d3f988c2c8..4780959e11d 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -426,7 +426,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
ret = abx500_gpio_set_bits(chip,
AB8500_GPIO_ALTFUN_REG,
af.alt_bit2,
- !!(af.alta_val && BIT(1)));
+ !!(af.alta_val & BIT(1)));
} else
ret = abx500_gpio_set_bits(chip, AB8500_GPIO_SEL1_REG,
offset, 1);
@@ -447,7 +447,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
ret = abx500_gpio_set_bits(chip,
AB8500_GPIO_ALTFUN_REG,
af.alt_bit2,
- !!(af.altb_val && BIT(1)));
+ !!(af.altb_val & BIT(1)));
break;
case ABX500_ALT_C:
@@ -457,7 +457,7 @@ static int abx500_set_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
goto out;
ret = abx500_gpio_set_bits(chip, AB8500_GPIO_ALTFUN_REG,
- af.alt_bit2, !!(af.altc_val && BIT(0)));
+ af.alt_bit2, !!(af.altc_val & BIT(0)));
if (ret < 0)
goto out;
@@ -1041,98 +1041,115 @@ static int abx500_pin_config_get(struct pinctrl_dev *pctldev,
static int abx500_pin_config_set(struct pinctrl_dev *pctldev,
unsigned pin,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct abx500_pinctrl *pct = pinctrl_dev_get_drvdata(pctldev);
struct gpio_chip *chip = &pct->chip;
unsigned offset;
int ret = -EINVAL;
- enum pin_config_param param = pinconf_to_config_param(config);
- enum pin_config_param argument = pinconf_to_config_argument(config);
-
- dev_dbg(chip->dev, "pin %d [%#lx]: %s %s\n",
- pin, config, (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
- (param == PIN_CONFIG_OUTPUT) ? (argument ? "high" : "low") :
- (argument ? "pull up" : "pull down"));
-
- /* on ABx500, there is no GPIO0, so adjust the offset */
- offset = pin - 1;
-
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- ret = abx500_gpio_direction_input(chip, offset);
- if (ret < 0)
- goto out;
- /*
- * Some chips only support pull down, while some actually
- * support both pull up and pull down. Such chips have
- * a "pullud" range specified for the pins that support
- * both features. If the pin is not within that range, we
- * fall back to the old bit set that only support pull down.
- */
- if (abx500_pullud_supported(chip, pin))
- ret = abx500_set_pull_updown(pct,
- pin,
- ABX500_GPIO_PULL_NONE);
- else
- /* Chip only supports pull down */
- ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG,
- offset, ABX500_GPIO_PULL_NONE);
- break;
-
- case PIN_CONFIG_BIAS_PULL_DOWN:
- ret = abx500_gpio_direction_input(chip, offset);
- if (ret < 0)
- goto out;
- /*
- * if argument = 1 set the pull down
- * else clear the pull down
- * Some chips only support pull down, while some actually
- * support both pull up and pull down. Such chips have
- * a "pullud" range specified for the pins that support
- * both features. If the pin is not within that range, we
- * fall back to the old bit set that only support pull down.
- */
- if (abx500_pullud_supported(chip, pin))
- ret = abx500_set_pull_updown(pct,
- pin,
- argument ? ABX500_GPIO_PULL_DOWN : ABX500_GPIO_PULL_NONE);
- else
- /* Chip only supports pull down */
- ret = abx500_gpio_set_bits(chip, AB8500_GPIO_PUD1_REG,
- offset,
- argument ? ABX500_GPIO_PULL_DOWN : ABX500_GPIO_PULL_NONE);
- break;
-
- case PIN_CONFIG_BIAS_PULL_UP:
- ret = abx500_gpio_direction_input(chip, offset);
- if (ret < 0)
- goto out;
- /*
- * if argument = 1 set the pull up
- * else clear the pull up
- */
- ret = abx500_gpio_direction_input(chip, offset);
- /*
- * Some chips only support pull down, while some actually
- * support both pull up and pull down. Such chips have
- * a "pullud" range specified for the pins that support
- * both features. If the pin is not within that range, do
- * nothing
- */
- if (abx500_pullud_supported(chip, pin))
- ret = abx500_set_pull_updown(pct,
- pin,
- argument ? ABX500_GPIO_PULL_UP : ABX500_GPIO_PULL_NONE);
- break;
+ int i;
+ enum pin_config_param param;
+ enum pin_config_param argument;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ argument = pinconf_to_config_argument(configs[i]);
+
+ dev_dbg(chip->dev, "pin %d [%#lx]: %s %s\n",
+ pin, configs[i],
+ (param == PIN_CONFIG_OUTPUT) ? "output " : "input",
+ (param == PIN_CONFIG_OUTPUT) ?
+ (argument ? "high" : "low") :
+ (argument ? "pull up" : "pull down"));
+
+ /* on ABx500, there is no GPIO0, so adjust the offset */
+ offset = pin - 1;
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ ret = abx500_gpio_direction_input(chip, offset);
+ if (ret < 0)
+ goto out;
+ /*
+ * Some chips only support pull down, while some
+ * actually support both pull up and pull down. Such
+ * chips have a "pullud" range specified for the pins
+ * that support both features. If the pin is not
+ * within that range, we fall back to the old bit set
+ * that only support pull down.
+ */
+ if (abx500_pullud_supported(chip, pin))
+ ret = abx500_set_pull_updown(pct,
+ pin,
+ ABX500_GPIO_PULL_NONE);
+ else
+ /* Chip only supports pull down */
+ ret = abx500_gpio_set_bits(chip,
+ AB8500_GPIO_PUD1_REG, offset,
+ ABX500_GPIO_PULL_NONE);
+ break;
- case PIN_CONFIG_OUTPUT:
- ret = abx500_gpio_direction_output(chip, offset, argument);
- break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ ret = abx500_gpio_direction_input(chip, offset);
+ if (ret < 0)
+ goto out;
+ /*
+ * if argument = 1 set the pull down
+ * else clear the pull down
+ * Some chips only support pull down, while some
+ * actually support both pull up and pull down. Such
+ * chips have a "pullud" range specified for the pins
+ * that support both features. If the pin is not
+ * within that range, we fall back to the old bit set
+ * that only support pull down.
+ */
+ if (abx500_pullud_supported(chip, pin))
+ ret = abx500_set_pull_updown(pct,
+ pin,
+ argument ? ABX500_GPIO_PULL_DOWN :
+ ABX500_GPIO_PULL_NONE);
+ else
+ /* Chip only supports pull down */
+ ret = abx500_gpio_set_bits(chip,
+ AB8500_GPIO_PUD1_REG,
+ offset,
+ argument ? ABX500_GPIO_PULL_DOWN :
+ ABX500_GPIO_PULL_NONE);
+ break;
- default:
- dev_err(chip->dev, "illegal configuration requested\n");
- }
+ case PIN_CONFIG_BIAS_PULL_UP:
+ ret = abx500_gpio_direction_input(chip, offset);
+ if (ret < 0)
+ goto out;
+ /*
+ * if argument = 1 set the pull up
+ * else clear the pull up
+ */
+ ret = abx500_gpio_direction_input(chip, offset);
+ /*
+ * Some chips only support pull down, while some
+ * actually support both pull up and pull down. Such
+ * chips have a "pullud" range specified for the pins
+ * that support both features. If the pin is not
+ * within that range, do nothing
+ */
+ if (abx500_pullud_supported(chip, pin))
+ ret = abx500_set_pull_updown(pct,
+ pin,
+ argument ? ABX500_GPIO_PULL_UP :
+ ABX500_GPIO_PULL_NONE);
+ break;
+
+ case PIN_CONFIG_OUTPUT:
+ ret = abx500_gpio_direction_output(chip, offset,
+ argument);
+ break;
+
+ default:
+ dev_err(chip->dev, "illegal configuration requested\n");
+ }
+ } /* for each config */
out:
if (ret < 0)
dev_err(pct->dev, "%s failed (%d)\n", __func__, ret);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index b90a3a0ac53..f350fd2e170 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -325,7 +325,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
{
- return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
+ return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
}
static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
@@ -445,7 +445,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
{
- return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
+ return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
}
static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
@@ -736,30 +736,40 @@ static int at91_pinconf_get(struct pinctrl_dev *pctldev,
}
static int at91_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin_id, unsigned long config)
+ unsigned pin_id, unsigned long *configs,
+ unsigned num_configs)
{
struct at91_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
unsigned mask;
void __iomem *pio;
-
- dev_dbg(info->dev, "%s:%d, pin_id=%d, config=0x%lx", __func__, __LINE__, pin_id, config);
- pio = pin_to_controller(info, pin_to_bank(pin_id));
- mask = pin_to_mask(pin_id % MAX_NB_GPIO_PER_BANK);
-
- if (config & PULL_UP && config & PULL_DOWN)
- return -EINVAL;
-
- at91_mux_set_pullup(pio, mask, config & PULL_UP);
- at91_mux_set_multidrive(pio, mask, config & MULTI_DRIVE);
- if (info->ops->set_deglitch)
- info->ops->set_deglitch(pio, mask, config & DEGLITCH);
- if (info->ops->set_debounce)
- info->ops->set_debounce(pio, mask, config & DEBOUNCE,
+ int i;
+ unsigned long config;
+
+ for (i = 0; i < num_configs; i++) {
+ config = configs[i];
+
+ dev_dbg(info->dev,
+ "%s:%d, pin_id=%d, config=0x%lx",
+ __func__, __LINE__, pin_id, config);
+ pio = pin_to_controller(info, pin_to_bank(pin_id));
+ mask = pin_to_mask(pin_id % MAX_NB_GPIO_PER_BANK);
+
+ if (config & PULL_UP && config & PULL_DOWN)
+ return -EINVAL;
+
+ at91_mux_set_pullup(pio, mask, config & PULL_UP);
+ at91_mux_set_multidrive(pio, mask, config & MULTI_DRIVE);
+ if (info->ops->set_deglitch)
+ info->ops->set_deglitch(pio, mask, config & DEGLITCH);
+ if (info->ops->set_debounce)
+ info->ops->set_debounce(pio, mask, config & DEBOUNCE,
(config & DEBOUNCE_VAL) >> DEBOUNCE_VAL_SHIFT);
- if (info->ops->set_pulldown)
- info->ops->set_pulldown(pio, mask, config & PULL_DOWN);
- if (info->ops->disable_schmitt_trig && config & DIS_SCHMIT)
- info->ops->disable_schmitt_trig(pio, mask);
+ if (info->ops->set_pulldown)
+ info->ops->set_pulldown(pio, mask, config & PULL_DOWN);
+ if (info->ops->disable_schmitt_trig && config & DIS_SCHMIT)
+ info->ops->disable_schmitt_trig(pio, mask);
+
+ } /* for each config */
return 0;
}
@@ -1241,18 +1251,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
+ irq_set_handler(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
+ irq_set_handler(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
+ irq_set_handler(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
+ irq_set_handler(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1261,6 +1275,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
+ irq_set_handler(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
@@ -1402,6 +1417,8 @@ static int at91_gpio_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct at91_gpio_chip *at91_gpio = h->host_data;
+ void __iomem *pio = at91_gpio->regbase;
+ u32 mask = 1 << hw;
irq_set_lockdep_class(virq, &gpio_lock_class);
@@ -1409,8 +1426,13 @@ static int at91_gpio_irq_map(struct irq_domain *h, unsigned int virq,
* Can use the "simple" and not "edge" handler since it's
* shorter, and the AIC handles interrupts sanely.
*/
- irq_set_chip_and_handler(virq, &gpio_irqchip,
- handle_simple_irq);
+ irq_set_chip(virq, &gpio_irqchip);
+ if ((at91_gpio->ops == &at91sam9x5_ops) &&
+ (readl_relaxed(pio + PIO_AIMMR) & mask) &&
+ (readl_relaxed(pio + PIO_ELSR) & mask))
+ irq_set_handler(virq, handle_level_irq);
+ else
+ irq_set_handler(virq, handle_simple_irq);
set_irq_flags(virq, IRQF_VALID);
irq_set_chip_data(virq, at91_gpio);
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index e9d735dcebf..2832576d8b1 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -130,25 +130,25 @@ struct byt_gpio {
struct pinctrl_gpio_range *range;
};
+#define to_byt_gpio(c) container_of(c, struct byt_gpio, chip)
+
static void __iomem *byt_gpio_reg(struct gpio_chip *chip, unsigned offset,
int reg)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
u32 reg_offset;
- void __iomem *ptr;
if (reg == BYT_INT_STAT_REG)
reg_offset = (offset / 32) * 4;
else
reg_offset = vg->range->pins[offset] * 16;
- ptr = (void __iomem *) (vg->reg_base + reg_offset + reg);
- return ptr;
+ return vg->reg_base + reg_offset + reg;
}
static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
pm_runtime_get(&vg->pdev->dev);
@@ -157,7 +157,7 @@ static int byt_gpio_request(struct gpio_chip *chip, unsigned offset)
static void byt_gpio_free(struct gpio_chip *chip, unsigned offset)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
void __iomem *reg = byt_gpio_reg(&vg->chip, offset, BYT_CONF0_REG);
u32 value;
@@ -218,7 +218,7 @@ static int byt_gpio_get(struct gpio_chip *chip, unsigned offset)
static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
unsigned long flags;
u32 old_val;
@@ -237,7 +237,7 @@ static void byt_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
void __iomem *reg = byt_gpio_reg(chip, offset, BYT_VAL_REG);
unsigned long flags;
u32 value;
@@ -245,7 +245,7 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&vg->lock, flags);
value = readl(reg) | BYT_DIR_MASK;
- value = value & (~BYT_INPUT_EN); /* active low */
+ value &= ~BYT_INPUT_EN; /* active low */
writel(value, reg);
spin_unlock_irqrestore(&vg->lock, flags);
@@ -256,16 +256,20 @@ static int byt_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int byt_gpio_direction_output(struct gpio_chip *chip,
unsigned gpio, int value)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
void __iomem *reg = byt_gpio_reg(chip, gpio, BYT_VAL_REG);
unsigned long flags;
u32 reg_val;
spin_lock_irqsave(&vg->lock, flags);
- reg_val = readl(reg) | (BYT_DIR_MASK | !!value);
- reg_val &= ~(BYT_OUTPUT_EN | !value);
- writel(reg_val, reg);
+ reg_val = readl(reg) | BYT_DIR_MASK;
+ reg_val &= ~BYT_OUTPUT_EN;
+
+ if (value)
+ writel(reg_val | BYT_LEVEL, reg);
+ else
+ writel(reg_val & ~BYT_LEVEL, reg);
spin_unlock_irqrestore(&vg->lock, flags);
@@ -274,7 +278,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
int i;
unsigned long flags;
u32 conf0, val, offs;
@@ -294,16 +298,16 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
val & BYT_LEVEL ? "hi" : "lo",
vg->range->pins[i], offs,
conf0 & 0x7,
- conf0 & BYT_TRIG_NEG ? "fall " : "",
- conf0 & BYT_TRIG_POS ? "rise " : "",
- conf0 & BYT_TRIG_LVL ? "lvl " : "");
+ conf0 & BYT_TRIG_NEG ? " fall" : "",
+ conf0 & BYT_TRIG_POS ? " rise" : "",
+ conf0 & BYT_TRIG_LVL ? " level" : "");
}
spin_unlock_irqrestore(&vg->lock, flags);
}
static int byt_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
- struct byt_gpio *vg = container_of(chip, struct byt_gpio, chip);
+ struct byt_gpio *vg = to_byt_gpio(chip);
return irq_create_mapping(vg->domain, offset);
}
@@ -516,6 +520,7 @@ static int byt_gpio_remove(struct platform_device *pdev)
{
struct byt_gpio *vg = platform_get_drvdata(pdev);
int err;
+
pm_runtime_disable(&pdev->dev);
err = gpiochip_remove(&vg->chip);
if (err)
diff --git a/drivers/pinctrl/pinctrl-bcm2835.c b/drivers/pinctrl/pinctrl-bcm2835.c
index a1c88b30f71..c05c1ef2cc3 100644
--- a/drivers/pinctrl/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/pinctrl-bcm2835.c
@@ -893,28 +893,35 @@ static int bcm2835_pinconf_get(struct pinctrl_dev *pctldev,
}
static int bcm2835_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long config)
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
{
struct bcm2835_pinctrl *pc = pinctrl_dev_get_drvdata(pctldev);
- enum bcm2835_pinconf_param param = BCM2835_PINCONF_UNPACK_PARAM(config);
- u16 arg = BCM2835_PINCONF_UNPACK_ARG(config);
+ enum bcm2835_pinconf_param param;
+ u16 arg;
u32 off, bit;
+ int i;
- if (param != BCM2835_PINCONF_PARAM_PULL)
- return -EINVAL;
-
- off = GPIO_REG_OFFSET(pin);
- bit = GPIO_REG_SHIFT(pin);
-
- bcm2835_gpio_wr(pc, GPPUD, arg & 3);
- /*
- * Docs say to wait 150 cycles, but not of what. We assume a
- * 1 MHz clock here, which is pretty slow...
- */
- udelay(150);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
- udelay(150);
- bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+ for (i = 0; i < num_configs; i++) {
+ param = BCM2835_PINCONF_UNPACK_PARAM(configs[i]);
+ arg = BCM2835_PINCONF_UNPACK_ARG(configs[i]);
+
+ if (param != BCM2835_PINCONF_PARAM_PULL)
+ return -EINVAL;
+
+ off = GPIO_REG_OFFSET(pin);
+ bit = GPIO_REG_SHIFT(pin);
+
+ bcm2835_gpio_wr(pc, GPPUD, arg & 3);
+ /*
+ * Docs say to wait 150 cycles, but not of what. We assume a
+ * 1 MHz clock here, which is pretty slow...
+ */
+ udelay(150);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), BIT(bit));
+ udelay(150);
+ bcm2835_gpio_wr(pc, GPPUDCLK0 + (off * 4), 0);
+ } /* for each config */
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-exynos.c b/drivers/pinctrl/pinctrl-exynos.c
index a74b3cbd745..2689f8d01a1 100644
--- a/drivers/pinctrl/pinctrl-exynos.c
+++ b/drivers/pinctrl/pinctrl-exynos.c
@@ -660,6 +660,64 @@ static void exynos_pinctrl_resume(struct samsung_pinctrl_drv_data *drvdata)
exynos_pinctrl_resume_bank(drvdata, bank);
}
+/* pin banks of s5pv210 pin-controller */
+static struct samsung_pin_bank s5pv210_pin_bank[] = {
+ EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
+ EXYNOS_PIN_BANK_EINTG(6, 0x020, "gpa1", 0x04),
+ EXYNOS_PIN_BANK_EINTG(8, 0x040, "gpb", 0x08),
+ EXYNOS_PIN_BANK_EINTG(5, 0x060, "gpc0", 0x0c),
+ EXYNOS_PIN_BANK_EINTG(5, 0x080, "gpc1", 0x10),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0a0, "gpd0", 0x14),
+ EXYNOS_PIN_BANK_EINTG(4, 0x0c0, "gpd1", 0x18),
+ EXYNOS_PIN_BANK_EINTG(5, 0x0e0, "gpe0", 0x1c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x100, "gpe1", 0x20),
+ EXYNOS_PIN_BANK_EINTG(6, 0x120, "gpf0", 0x24),
+ EXYNOS_PIN_BANK_EINTG(8, 0x140, "gpf1", 0x28),
+ EXYNOS_PIN_BANK_EINTG(8, 0x160, "gpf2", 0x2c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x180, "gpf3", 0x30),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1a0, "gpg0", 0x34),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1c0, "gpg1", 0x38),
+ EXYNOS_PIN_BANK_EINTG(7, 0x1e0, "gpg2", 0x3c),
+ EXYNOS_PIN_BANK_EINTG(7, 0x200, "gpg3", 0x40),
+ EXYNOS_PIN_BANK_EINTN(7, 0x220, "gpi"),
+ EXYNOS_PIN_BANK_EINTG(8, 0x240, "gpj0", 0x44),
+ EXYNOS_PIN_BANK_EINTG(6, 0x260, "gpj1", 0x48),
+ EXYNOS_PIN_BANK_EINTG(8, 0x280, "gpj2", 0x4c),
+ EXYNOS_PIN_BANK_EINTG(8, 0x2a0, "gpj3", 0x50),
+ EXYNOS_PIN_BANK_EINTG(5, 0x2c0, "gpj4", 0x54),
+ EXYNOS_PIN_BANK_EINTN(8, 0x2e0, "mp01"),
+ EXYNOS_PIN_BANK_EINTN(4, 0x300, "mp02"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x320, "mp03"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x340, "mp04"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x360, "mp05"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x380, "mp06"),
+ EXYNOS_PIN_BANK_EINTN(8, 0x3a0, "mp07"),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc00, "gph0", 0x00),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc20, "gph1", 0x04),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc40, "gph2", 0x08),
+ EXYNOS_PIN_BANK_EINTW(8, 0xc60, "gph3", 0x0c),
+};
+
+struct samsung_pin_ctrl s5pv210_pin_ctrl[] = {
+ {
+ /* pin-controller instance 0 data */
+ .pin_banks = s5pv210_pin_bank,
+ .nr_banks = ARRAY_SIZE(s5pv210_pin_bank),
+ .geint_con = EXYNOS_GPIO_ECON_OFFSET,
+ .geint_mask = EXYNOS_GPIO_EMASK_OFFSET,
+ .geint_pend = EXYNOS_GPIO_EPEND_OFFSET,
+ .weint_con = EXYNOS_WKUP_ECON_OFFSET,
+ .weint_mask = EXYNOS_WKUP_EMASK_OFFSET,
+ .weint_pend = EXYNOS_WKUP_EPEND_OFFSET,
+ .svc = EXYNOS_SVC_OFFSET,
+ .eint_gpio_init = exynos_eint_gpio_init,
+ .eint_wkup_init = exynos_eint_wkup_init,
+ .suspend = exynos_pinctrl_suspend,
+ .resume = exynos_pinctrl_resume,
+ .label = "s5pv210-gpio-ctrl0",
+ },
+};
+
/* pin banks of exynos4210 pin-controller 0 */
static struct samsung_pin_bank exynos4210_pin_banks0[] = {
EXYNOS_PIN_BANK_EINTG(8, 0x000, "gpa0", 0x00),
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index 3b283fd898f..544d469c5a7 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -401,64 +401,71 @@ static const struct pinmux_ops exynos5440_pinmux_ops = {
/* set the pin config settings for a specified pin */
static int exynos5440_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct exynos5440_pinctrl_priv_data *priv;
void __iomem *base;
- enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(config);
- u32 cfg_value = PINCFG_UNPACK_VALUE(config);
+ enum pincfg_type cfg_type;
+ u32 cfg_value;
u32 data;
+ int i;
priv = pinctrl_dev_get_drvdata(pctldev);
base = priv->reg_base;
- switch (cfg_type) {
- case PINCFG_TYPE_PUD:
- /* first set pull enable/disable bit */
- data = readl(base + GPIO_PE);
- data &= ~(1 << pin);
- if (cfg_value)
- data |= (1 << pin);
- writel(data, base + GPIO_PE);
-
- /* then set pull up/down bit */
- data = readl(base + GPIO_PS);
- data &= ~(1 << pin);
- if (cfg_value == 2)
- data |= (1 << pin);
- writel(data, base + GPIO_PS);
- break;
-
- case PINCFG_TYPE_DRV:
- /* set the first bit of the drive strength */
- data = readl(base + GPIO_DS0);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS0);
- cfg_value >>= 1;
-
- /* set the second bit of the driver strength */
- data = readl(base + GPIO_DS1);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_DS1);
- break;
- case PINCFG_TYPE_SKEW_RATE:
- data = readl(base + GPIO_SR);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_SR);
- break;
- case PINCFG_TYPE_INPUT_TYPE:
- data = readl(base + GPIO_TYPE);
- data &= ~(1 << pin);
- data |= ((cfg_value & 1) << pin);
- writel(data, base + GPIO_TYPE);
- break;
- default:
- WARN_ON(1);
- return -EINVAL;
- }
+ for (i = 0; i < num_configs; i++) {
+ cfg_type = PINCFG_UNPACK_TYPE(configs[i]);
+ cfg_value = PINCFG_UNPACK_VALUE(configs[i]);
+
+ switch (cfg_type) {
+ case PINCFG_TYPE_PUD:
+ /* first set pull enable/disable bit */
+ data = readl(base + GPIO_PE);
+ data &= ~(1 << pin);
+ if (cfg_value)
+ data |= (1 << pin);
+ writel(data, base + GPIO_PE);
+
+ /* then set pull up/down bit */
+ data = readl(base + GPIO_PS);
+ data &= ~(1 << pin);
+ if (cfg_value == 2)
+ data |= (1 << pin);
+ writel(data, base + GPIO_PS);
+ break;
+
+ case PINCFG_TYPE_DRV:
+ /* set the first bit of the drive strength */
+ data = readl(base + GPIO_DS0);
+ data &= ~(1 << pin);
+ data |= ((cfg_value & 1) << pin);
+ writel(data, base + GPIO_DS0);
+ cfg_value >>= 1;
+
+ /* set the second bit of the driver strength */
+ data = readl(base + GPIO_DS1);
+ data &= ~(1 << pin);
+ data |= ((cfg_value & 1) << pin);
+ writel(data, base + GPIO_DS1);
+ break;
+ case PINCFG_TYPE_SKEW_RATE:
+ data = readl(base + GPIO_SR);
+ data &= ~(1 << pin);
+ data |= ((cfg_value & 1) << pin);
+ writel(data, base + GPIO_SR);
+ break;
+ case PINCFG_TYPE_INPUT_TYPE:
+ data = readl(base + GPIO_TYPE);
+ data &= ~(1 << pin);
+ data |= ((cfg_value & 1) << pin);
+ writel(data, base + GPIO_TYPE);
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ } /* for each config */
return 0;
}
@@ -510,7 +517,8 @@ static int exynos5440_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
/* set the pin config settings for a specified pin group */
static int exynos5440_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
struct exynos5440_pinctrl_priv_data *priv;
const unsigned int *pins;
@@ -520,7 +528,8 @@ static int exynos5440_pinconf_group_set(struct pinctrl_dev *pctldev,
pins = priv->pin_groups[group].pins;
for (cnt = 0; cnt < priv->pin_groups[group].num_pins; cnt++)
- exynos5440_pinconf_set(pctldev, pins[cnt], config);
+ exynos5440_pinconf_set(pctldev, pins[cnt], configs,
+ num_configs);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
index f9b2a1d4854..2e62689b5e9 100644
--- a/drivers/pinctrl/pinctrl-falcon.c
+++ b/drivers/pinctrl/pinctrl-falcon.c
@@ -75,6 +75,7 @@ enum falcon_mux {
FALCON_MUX_GPIO = 0,
FALCON_MUX_RST,
FALCON_MUX_NTR,
+ FALCON_MUX_PPS,
FALCON_MUX_MDIO,
FALCON_MUX_LED,
FALCON_MUX_SPI,
@@ -114,7 +115,7 @@ static struct ltq_mfp_pin falcon_mfp[] = {
MFP_FALCON(GPIO2, GPIO, GPIO, NONE, NONE),
MFP_FALCON(GPIO3, GPIO, GPIO, NONE, NONE),
MFP_FALCON(GPIO4, NTR, GPIO, NONE, NONE),
- MFP_FALCON(GPIO5, NTR, GPIO, NONE, NONE),
+ MFP_FALCON(GPIO5, NTR, GPIO, PPS, NONE),
MFP_FALCON(GPIO6, RST, GPIO, NONE, NONE),
MFP_FALCON(GPIO7, MDIO, GPIO, NONE, NONE),
MFP_FALCON(GPIO8, MDIO, GPIO, NONE, NONE),
@@ -168,6 +169,7 @@ static struct ltq_mfp_pin falcon_mfp[] = {
static const unsigned pins_por[] = {GPIO0};
static const unsigned pins_ntr[] = {GPIO4};
static const unsigned pins_ntr8k[] = {GPIO5};
+static const unsigned pins_pps[] = {GPIO5};
static const unsigned pins_hrst[] = {GPIO6};
static const unsigned pins_mdio[] = {GPIO7, GPIO8};
static const unsigned pins_bled[] = {GPIO9, GPIO10, GPIO11,
@@ -186,6 +188,7 @@ static struct ltq_pin_group falcon_grps[] = {
GRP_MUX("por", RST, pins_por),
GRP_MUX("ntr", NTR, pins_ntr),
GRP_MUX("ntr8k", NTR, pins_ntr8k),
+ GRP_MUX("pps", PPS, pins_pps),
GRP_MUX("hrst", RST, pins_hrst),
GRP_MUX("mdio", MDIO, pins_mdio),
GRP_MUX("bootled", LED, pins_bled),
@@ -201,7 +204,7 @@ static struct ltq_pin_group falcon_grps[] = {
};
static const char * const ltq_rst_grps[] = {"por", "hrst"};
-static const char * const ltq_ntr_grps[] = {"ntr", "ntr8k"};
+static const char * const ltq_ntr_grps[] = {"ntr", "ntr8k", "pps"};
static const char * const ltq_mdio_grps[] = {"mdio"};
static const char * const ltq_bled_grps[] = {"bootled"};
static const char * const ltq_asc_grps[] = {"asc0", "asc1"};
@@ -235,7 +238,8 @@ static int falcon_pinconf_group_get(struct pinctrl_dev *pctrldev,
}
static int falcon_pinconf_group_set(struct pinctrl_dev *pctrldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
return -ENOTSUPP;
}
@@ -276,39 +280,47 @@ static int falcon_pinconf_get(struct pinctrl_dev *pctrldev,
}
static int falcon_pinconf_set(struct pinctrl_dev *pctrldev,
- unsigned pin, unsigned long config)
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
{
- enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(config);
- int arg = LTQ_PINCONF_UNPACK_ARG(config);
+ enum ltq_pinconf_param param;
+ int arg;
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctrldev);
void __iomem *mem = info->membase[PORT(pin)];
u32 reg;
+ int i;
- switch (param) {
- case LTQ_PINCONF_PARAM_DRIVE_CURRENT:
- reg = LTQ_PADC_DCC;
- break;
-
- case LTQ_PINCONF_PARAM_SLEW_RATE:
- reg = LTQ_PADC_SRC;
- break;
-
- case LTQ_PINCONF_PARAM_PULL:
- if (arg == 1)
- reg = LTQ_PADC_PDEN;
- else
- reg = LTQ_PADC_PUEN;
- break;
+ for (i = 0; i < num_configs; i++) {
+ param = LTQ_PINCONF_UNPACK_PARAM(configs[i]);
+ arg = LTQ_PINCONF_UNPACK_ARG(configs[i]);
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_DRIVE_CURRENT:
+ reg = LTQ_PADC_DCC;
+ break;
+
+ case LTQ_PINCONF_PARAM_SLEW_RATE:
+ reg = LTQ_PADC_SRC;
+ break;
+
+ case LTQ_PINCONF_PARAM_PULL:
+ if (arg == 1)
+ reg = LTQ_PADC_PDEN;
+ else
+ reg = LTQ_PADC_PUEN;
+ break;
+
+ default:
+ pr_err("%s: Invalid config param %04x\n",
+ pinctrl_dev_get_name(pctrldev), param);
+ return -ENOTSUPP;
+ }
- default:
- pr_err("%s: Invalid config param %04x\n",
- pinctrl_dev_get_name(pctrldev), param);
- return -ENOTSUPP;
- }
+ pad_w32(mem, BIT(PORT_PIN(pin)), reg);
+ if (!(pad_r32(mem, reg) & BIT(PORT_PIN(pin))))
+ return -ENOTSUPP;
+ } /* for each config */
- pad_w32(mem, BIT(PORT_PIN(pin)), reg);
- if (!(pad_r32(mem, reg) & BIT(PORT_PIN(pin))))
- return -ENOTSUPP;
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index 57a4eb0add2..d78dd813bff 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,18 +27,6 @@
#include "core.h"
#include "pinctrl-imx.h"
-#define IMX_PMX_DUMP(info, p, m, c, n) \
-{ \
- int i, j; \
- printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
- for (i = 0; i < n; i++) { \
- j = p[i]; \
- printk(KERN_DEBUG "%s %d 0x%lx\n", \
- info->pins[j].name, \
- m[i], c[i]); \
- } \
-}
-
/* The bits in CONFIG cell defined in binding doc*/
#define IMX_NO_PAD_CTL 0x80000000 /* no pin config need */
#define IMX_PAD_SION 0x40000000 /* set SION */
@@ -98,7 +86,7 @@ static int imx_get_group_pins(struct pinctrl_dev *pctldev, unsigned selector,
if (selector >= info->ngroups)
return -EINVAL;
- *pins = info->groups[selector].pins;
+ *pins = info->groups[selector].pin_ids;
*npins = info->groups[selector].npins;
return 0;
@@ -134,7 +122,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
}
for (i = 0; i < grp->npins; i++) {
- if (!(grp->configs[i] & IMX_NO_PAD_CTL))
+ if (!(grp->pins[i].config & IMX_NO_PAD_CTL))
map_num++;
}
@@ -159,11 +147,11 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
/* create config map */
new_map++;
for (i = j = 0; i < grp->npins; i++) {
- if (!(grp->configs[i] & IMX_NO_PAD_CTL)) {
+ if (!(grp->pins[i].config & IMX_NO_PAD_CTL)) {
new_map[j].type = PIN_MAP_TYPE_CONFIGS_PIN;
new_map[j].data.configs.group_or_pin =
- pin_get_name(pctldev, grp->pins[i]);
- new_map[j].data.configs.configs = &grp->configs[i];
+ pin_get_name(pctldev, grp->pins[i].pin);
+ new_map[j].data.configs.configs = &grp->pins[i].config;
new_map[j].data.configs.num_configs = 1;
j++;
}
@@ -197,28 +185,23 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg;
- const unsigned *pins, *mux, *input_val;
- u16 *input_reg;
unsigned int npins, pin_id;
int i;
+ struct imx_pin_group *grp;
/*
* Configure the mux mode for each pin in the group for a specific
* function.
*/
- pins = info->groups[group].pins;
- npins = info->groups[group].npins;
- mux = info->groups[group].mux_mode;
- input_val = info->groups[group].input_val;
- input_reg = info->groups[group].input_reg;
-
- WARN_ON(!pins || !npins || !mux || !input_val || !input_reg);
+ grp = &info->groups[group];
+ npins = grp->npins;
dev_dbg(ipctl->dev, "enable function %s group %s\n",
- info->functions[selector].name, info->groups[group].name);
+ info->functions[selector].name, grp->name);
for (i = 0; i < npins; i++) {
- pin_id = pins[i];
+ struct imx_pin *pin = &grp->pins[i];
+ pin_id = pin->pin;
pin_reg = &info->pin_regs[pin_id];
if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->mux_reg) {
@@ -231,20 +214,50 @@ static int imx_pmx_enable(struct pinctrl_dev *pctldev, unsigned selector,
u32 reg;
reg = readl(ipctl->base + pin_reg->mux_reg);
reg &= ~(0x7 << 20);
- reg |= (mux[i] << 20);
+ reg |= (pin->mux_mode << 20);
writel(reg, ipctl->base + pin_reg->mux_reg);
} else {
- writel(mux[i], ipctl->base + pin_reg->mux_reg);
+ writel(pin->mux_mode, ipctl->base + pin_reg->mux_reg);
}
dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%x\n",
- pin_reg->mux_reg, mux[i]);
-
- /* some pins also need select input setting, set it if found */
- if (input_reg[i]) {
- writel(input_val[i], ipctl->base + input_reg[i]);
+ pin_reg->mux_reg, pin->mux_mode);
+
+ /*
+ * If the select input value begins with 0xff, it's a quirky
+ * select input and the value should be interpreted as below.
+ * 31 23 15 7 0
+ * | 0xff | shift | width | select |
+ * It's used to work around the problem that the select
+ * input for some pin is not implemented in the select
+ * input register but in some general purpose register.
+ * We encode the select input value, width and shift of
+ * the bit field into input_val cell of pin function ID
+ * in device tree, and then decode them here for setting
+ * up the select input bits in general purpose register.
+ */
+ if (pin->input_val >> 24 == 0xff) {
+ u32 val = pin->input_val;
+ u8 select = val & 0xff;
+ u8 width = (val >> 8) & 0xff;
+ u8 shift = (val >> 16) & 0xff;
+ u32 mask = ((1 << width) - 1) << shift;
+ /*
+ * The input_reg[i] here is actually some IOMUXC general
+ * purpose register, not regular select input register.
+ */
+ val = readl(ipctl->base + pin->input_val);
+ val &= ~mask;
+ val |= select << shift;
+ writel(val, ipctl->base + pin->input_val);
+ } else if (pin->input_val) {
+ /*
+ * Regular select input register can never be at offset
+ * 0, and we only print register value for regular case.
+ */
+ writel(pin->input_val, ipctl->base + pin->input_reg);
dev_dbg(ipctl->dev,
"==>select_input: offset 0x%x val 0x%x\n",
- input_reg[i], input_val[i]);
+ pin->input_reg, pin->input_val);
}
}
@@ -310,11 +323,13 @@ static int imx_pinconf_get(struct pinctrl_dev *pctldev,
}
static int imx_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin_id, unsigned long config)
+ unsigned pin_id, unsigned long *configs,
+ unsigned num_configs)
{
struct imx_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
const struct imx_pinctrl_soc_info *info = ipctl->info;
const struct imx_pin_reg *pin_reg = &info->pin_regs[pin_id];
+ int i;
if (!(info->flags & ZERO_OFFSET_VALID) && !pin_reg->conf_reg) {
dev_err(info->dev, "Pin(%s) does not support config function\n",
@@ -325,17 +340,19 @@ static int imx_pinconf_set(struct pinctrl_dev *pctldev,
dev_dbg(ipctl->dev, "pinconf set pin %s\n",
info->pins[pin_id].name);
- if (info->flags & SHARE_MUX_CONF_REG) {
- u32 reg;
- reg = readl(ipctl->base + pin_reg->conf_reg);
- reg &= ~0xffff;
- reg |= config;
- writel(reg, ipctl->base + pin_reg->conf_reg);
- } else {
- writel(config, ipctl->base + pin_reg->conf_reg);
- }
- dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%lx\n",
- pin_reg->conf_reg, config);
+ for (i = 0; i < num_configs; i++) {
+ if (info->flags & SHARE_MUX_CONF_REG) {
+ u32 reg;
+ reg = readl(ipctl->base + pin_reg->conf_reg);
+ reg &= ~0xffff;
+ reg |= configs[i];
+ writel(reg, ipctl->base + pin_reg->conf_reg);
+ } else {
+ writel(configs[i], ipctl->base + pin_reg->conf_reg);
+ }
+ dev_dbg(ipctl->dev, "write: offset 0x%x val 0x%lx\n",
+ pin_reg->conf_reg, configs[i]);
+ } /* for each config */
return 0;
}
@@ -373,8 +390,9 @@ static void imx_pinconf_group_dbg_show(struct pinctrl_dev *pctldev,
seq_printf(s, "\n");
grp = &info->groups[group];
for (i = 0; i < grp->npins; i++) {
- name = pin_get_name(pctldev, grp->pins[i]);
- ret = imx_pinconf_get(pctldev, grp->pins[i], &config);
+ struct imx_pin *pin = &grp->pins[i];
+ name = pin_get_name(pctldev, pin->pin);
+ ret = imx_pinconf_get(pctldev, pin->pin, &config);
if (ret)
return;
seq_printf(s, "%s: 0x%lx", name, config);
@@ -426,28 +444,31 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
* do sanity check and calculate pins number
*/
list = of_get_property(np, "fsl,pins", &size);
+ if (!list) {
+ dev_err(info->dev, "no fsl,pins property in node %s\n", np->full_name);
+ return -EINVAL;
+ }
+
/* we do not check return since it's safe node passed down */
if (!size || size % pin_size) {
- dev_err(info->dev, "Invalid fsl,pins property\n");
+ dev_err(info->dev, "Invalid fsl,pins property in node %s\n", np->full_name);
return -EINVAL;
}
grp->npins = size / pin_size;
- grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
+ grp->pins = devm_kzalloc(info->dev, grp->npins * sizeof(struct imx_pin),
GFP_KERNEL);
- grp->mux_mode = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
- GFP_KERNEL);
- grp->input_reg = devm_kzalloc(info->dev, grp->npins * sizeof(u16),
- GFP_KERNEL);
- grp->input_val = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
- GFP_KERNEL);
- grp->configs = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned long),
+ grp->pin_ids = devm_kzalloc(info->dev, grp->npins * sizeof(unsigned int),
GFP_KERNEL);
+ if (!grp->pins || ! grp->pin_ids)
+ return -ENOMEM;
+
for (i = 0; i < grp->npins; i++) {
u32 mux_reg = be32_to_cpu(*list++);
u32 conf_reg;
unsigned int pin_id;
struct imx_pin_reg *pin_reg;
+ struct imx_pin *pin = &grp->pins[i];
if (info->flags & SHARE_MUX_CONF_REG)
conf_reg = mux_reg;
@@ -456,23 +477,23 @@ static int imx_pinctrl_parse_groups(struct device_node *np,
pin_id = mux_reg ? mux_reg / 4 : conf_reg / 4;
pin_reg = &info->pin_regs[pin_id];
- grp->pins[i] = pin_id;
+ pin->pin = pin_id;
+ grp->pin_ids[i] = pin_id;
pin_reg->mux_reg = mux_reg;
pin_reg->conf_reg = conf_reg;
- grp->input_reg[i] = be32_to_cpu(*list++);
- grp->mux_mode[i] = be32_to_cpu(*list++);
- grp->input_val[i] = be32_to_cpu(*list++);
+ pin->input_reg = be32_to_cpu(*list++);
+ pin->mux_mode = be32_to_cpu(*list++);
+ pin->input_val = be32_to_cpu(*list++);
/* SION bit is in mux register */
config = be32_to_cpu(*list++);
if (config & IMX_PAD_SION)
- grp->mux_mode[i] |= IOMUXC_CONFIG_SION;
- grp->configs[i] = config & ~IMX_PAD_SION;
- }
+ pin->mux_mode |= IOMUXC_CONFIG_SION;
+ pin->config = config & ~IMX_PAD_SION;
-#ifdef DEBUG
- IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
-#endif
+ dev_dbg(info->dev, "%s: %d 0x%08lx", info->pins[i].name,
+ pin->mux_mode, pin->config);
+ }
return 0;
}
@@ -484,7 +505,6 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
struct device_node *child;
struct imx_pmx_func *func;
struct imx_pin_group *grp;
- int ret;
static u32 grp_index;
u32 i = 0;
@@ -496,7 +516,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
func->name = np->name;
func->num_groups = of_get_child_count(np);
if (func->num_groups <= 0) {
- dev_err(info->dev, "no groups defined\n");
+ dev_err(info->dev, "no groups defined in %s\n", np->full_name);
return -EINVAL;
}
func->groups = devm_kzalloc(info->dev,
@@ -505,9 +525,7 @@ static int imx_pinctrl_parse_functions(struct device_node *np,
for_each_child_of_node(np, child) {
func->groups[i] = child->name;
grp = &info->groups[grp_index++];
- ret = imx_pinctrl_parse_groups(child, grp, info, i++);
- if (ret)
- return ret;
+ imx_pinctrl_parse_groups(child, grp, info, i++);
}
return 0;
@@ -518,7 +536,6 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
{
struct device_node *np = pdev->dev.of_node;
struct device_node *child;
- int ret;
u32 nfuncs = 0;
u32 i = 0;
@@ -545,13 +562,8 @@ static int imx_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- for_each_child_of_node(np, child) {
- ret = imx_pinctrl_parse_functions(child, info, i++);
- if (ret) {
- dev_err(&pdev->dev, "failed to parse function\n");
- return ret;
- }
- }
+ for_each_child_of_node(np, child)
+ imx_pinctrl_parse_functions(child, info, i++);
return 0;
}
@@ -580,9 +592,6 @@ int imx_pinctrl_probe(struct platform_device *pdev,
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
ipctl->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ipctl->base))
return PTR_ERR(ipctl->base);
diff --git a/drivers/pinctrl/pinctrl-imx.h b/drivers/pinctrl/pinctrl-imx.h
index bcedd991c9f..db408b05700 100644
--- a/drivers/pinctrl/pinctrl-imx.h
+++ b/drivers/pinctrl/pinctrl-imx.h
@@ -18,29 +18,35 @@
struct platform_device;
/**
+ * struct imx_pin_group - describes a single i.MX pin
+ * @pin: the pin_id of this pin
+ * @mux_mode: the mux mode for this pin.
+ * @input_reg: the select input register offset for this pin if any
+ * 0 if no select input setting needed.
+ * @input_val: the select input value for this pin.
+ * @configs: the config for this pin.
+ */
+struct imx_pin {
+ unsigned int pin;
+ unsigned int mux_mode;
+ u16 input_reg;
+ unsigned int input_val;
+ unsigned long config;
+};
+
+/**
* struct imx_pin_group - describes an IMX pin group
* @name: the name of this specific pin group
- * @pins: an array of discrete physical pins used in this group, taken
- * from the driver-local pin enumeration space
* @npins: the number of pins in this group array, i.e. the number of
* elements in .pins so we can iterate over that array
- * @mux_mode: the mux mode for each pin in this group. The size of this
- * array is the same as pins.
- * @input_reg: select input register offset for this mux if any
- * 0 if no select input setting needed.
- * @input_val: the select input value for each pin in this group. The size of
- * this array is the same as pins.
- * @configs: the config for each pin in this group. The size of this
- * array is the same as pins.
+ * @pin_ids: array of pin_ids. pinctrl forces us to maintain such an array
+ * @pins: array of pins
*/
struct imx_pin_group {
const char *name;
- unsigned int *pins;
unsigned npins;
- unsigned int *mux_mode;
- u16 *input_reg;
- unsigned int *input_val;
- unsigned long *configs;
+ unsigned int *pin_ids;
+ struct imx_pin *pins;
};
/**
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index f5d56436ba7..40c76f26998 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -233,7 +233,8 @@ static int mxs_pinconf_get(struct pinctrl_dev *pctldev,
}
static int mxs_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long config)
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
{
return -ENOTSUPP;
}
@@ -249,7 +250,8 @@ static int mxs_pinconf_group_get(struct pinctrl_dev *pctldev,
}
static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
struct mxs_pinctrl_data *d = pinctrl_dev_get_drvdata(pctldev);
struct mxs_group *g = &d->soc->groups[group];
@@ -257,49 +259,56 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev,
u8 ma, vol, pull, bank, shift;
u16 pin;
u32 i;
+ int n;
+ unsigned long config;
- ma = CONFIG_TO_MA(config);
- vol = CONFIG_TO_VOL(config);
- pull = CONFIG_TO_PULL(config);
-
- for (i = 0; i < g->npins; i++) {
- bank = PINID_TO_BANK(g->pins[i]);
- pin = PINID_TO_PIN(g->pins[i]);
-
- /* drive */
- reg = d->base + d->soc->regs->drive;
- reg += bank * 0x40 + pin / 8 * 0x10;
-
- /* mA */
- if (config & MA_PRESENT) {
- shift = pin % 8 * 4;
- writel(0x3 << shift, reg + CLR);
- writel(ma << shift, reg + SET);
- }
-
- /* vol */
- if (config & VOL_PRESENT) {
- shift = pin % 8 * 4 + 2;
- if (vol)
- writel(1 << shift, reg + SET);
- else
- writel(1 << shift, reg + CLR);
+ for (n = 0; n < num_configs; n++) {
+ config = configs[n];
+
+ ma = CONFIG_TO_MA(config);
+ vol = CONFIG_TO_VOL(config);
+ pull = CONFIG_TO_PULL(config);
+
+ for (i = 0; i < g->npins; i++) {
+ bank = PINID_TO_BANK(g->pins[i]);
+ pin = PINID_TO_PIN(g->pins[i]);
+
+ /* drive */
+ reg = d->base + d->soc->regs->drive;
+ reg += bank * 0x40 + pin / 8 * 0x10;
+
+ /* mA */
+ if (config & MA_PRESENT) {
+ shift = pin % 8 * 4;
+ writel(0x3 << shift, reg + CLR);
+ writel(ma << shift, reg + SET);
+ }
+
+ /* vol */
+ if (config & VOL_PRESENT) {
+ shift = pin % 8 * 4 + 2;
+ if (vol)
+ writel(1 << shift, reg + SET);
+ else
+ writel(1 << shift, reg + CLR);
+ }
+
+ /* pull */
+ if (config & PULL_PRESENT) {
+ reg = d->base + d->soc->regs->pull;
+ reg += bank * 0x10;
+ shift = pin;
+ if (pull)
+ writel(1 << shift, reg + SET);
+ else
+ writel(1 << shift, reg + CLR);
+ }
}
- /* pull */
- if (config & PULL_PRESENT) {
- reg = d->base + d->soc->regs->pull;
- reg += bank * 0x10;
- shift = pin;
- if (pull)
- writel(1 << shift, reg + SET);
- else
- writel(1 << shift, reg + CLR);
- }
- }
+ /* cache the config value for mxs_pinconf_group_get() */
+ g->config = config;
- /* cache the config value for mxs_pinconf_group_get() */
- g->config = config;
+ } /* for each config */
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index 4a1cfdce223..d7c3ae300fa 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -337,97 +337,6 @@ static void nmk_prcm_altcx_set_mode(struct nmk_pinctrl *npct,
nmk_write_masked(npct->prcm_base + reg, BIT(bit), BIT(bit));
}
-static void __nmk_config_pin(struct nmk_gpio_chip *nmk_chip, unsigned offset,
- pin_cfg_t cfg, bool sleep, unsigned int *slpmregs)
-{
- static const char *afnames[] = {
- [NMK_GPIO_ALT_GPIO] = "GPIO",
- [NMK_GPIO_ALT_A] = "A",
- [NMK_GPIO_ALT_B] = "B",
- [NMK_GPIO_ALT_C] = "C"
- };
- static const char *pullnames[] = {
- [NMK_GPIO_PULL_NONE] = "none",
- [NMK_GPIO_PULL_UP] = "up",
- [NMK_GPIO_PULL_DOWN] = "down",
- [3] /* illegal */ = "??"
- };
- static const char *slpmnames[] = {
- [NMK_GPIO_SLPM_INPUT] = "input/wakeup",
- [NMK_GPIO_SLPM_NOCHANGE] = "no-change/no-wakeup",
- };
-
- int pin = PIN_NUM(cfg);
- int pull = PIN_PULL(cfg);
- int af = PIN_ALT(cfg);
- int slpm = PIN_SLPM(cfg);
- int output = PIN_DIR(cfg);
- int val = PIN_VAL(cfg);
- bool glitch = af == NMK_GPIO_ALT_C;
-
- dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: af %s, pull %s, slpm %s (%s%s)\n",
- pin, cfg, afnames[af], pullnames[pull], slpmnames[slpm],
- output ? "output " : "input",
- output ? (val ? "high" : "low") : "");
-
- if (sleep) {
- int slpm_pull = PIN_SLPM_PULL(cfg);
- int slpm_output = PIN_SLPM_DIR(cfg);
- int slpm_val = PIN_SLPM_VAL(cfg);
-
- af = NMK_GPIO_ALT_GPIO;
-
- /*
- * The SLPM_* values are normal values + 1 to allow zero to
- * mean "same as normal".
- */
- if (slpm_pull)
- pull = slpm_pull - 1;
- if (slpm_output)
- output = slpm_output - 1;
- if (slpm_val)
- val = slpm_val - 1;
-
- dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n",
- pin,
- slpm_pull ? pullnames[pull] : "same",
- slpm_output ? (output ? "output" : "input") : "same",
- slpm_val ? (val ? "high" : "low") : "same");
- }
-
- if (output)
- __nmk_gpio_make_output(nmk_chip, offset, val);
- else {
- __nmk_gpio_make_input(nmk_chip, offset);
- __nmk_gpio_set_pull(nmk_chip, offset, pull);
- }
-
- __nmk_gpio_set_lowemi(nmk_chip, offset, PIN_LOWEMI(cfg));
-
- /*
- * If the pin is switching to altfunc, and there was an interrupt
- * installed on it which has been lazy disabled, actually mask the
- * interrupt to prevent spurious interrupts that would occur while the
- * pin is under control of the peripheral. Only SKE does this.
- */
- if (af != NMK_GPIO_ALT_GPIO)
- nmk_gpio_disable_lazy_irq(nmk_chip, offset);
-
- /*
- * If we've backed up the SLPM registers (glitch workaround), modify
- * the backups since they will be restored.
- */
- if (slpmregs) {
- if (slpm == NMK_GPIO_SLPM_NOCHANGE)
- slpmregs[nmk_chip->bank] |= BIT(offset);
- else
- slpmregs[nmk_chip->bank] &= ~BIT(offset);
- } else
- __nmk_gpio_set_slpm(nmk_chip, offset, slpm);
-
- __nmk_gpio_set_mode_safe(nmk_chip, offset, af, glitch);
-}
-
/*
* Safe sequence used to switch IOs between GPIO and Alternate-C mode:
* - Save SLPM registers
@@ -474,210 +383,6 @@ static void nmk_gpio_glitch_slpm_restore(unsigned int *slpm)
}
}
-static int __nmk_config_pins(pin_cfg_t *cfgs, int num, bool sleep)
-{
- static unsigned int slpm[NUM_BANKS];
- unsigned long flags;
- bool glitch = false;
- int ret = 0;
- int i;
-
- for (i = 0; i < num; i++) {
- if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C) {
- glitch = true;
- break;
- }
- }
-
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
-
- if (glitch) {
- memset(slpm, 0xff, sizeof(slpm));
-
- for (i = 0; i < num; i++) {
- int pin = PIN_NUM(cfgs[i]);
- int offset = pin % NMK_GPIO_PER_CHIP;
-
- if (PIN_ALT(cfgs[i]) == NMK_GPIO_ALT_C)
- slpm[pin / NMK_GPIO_PER_CHIP] &= ~BIT(offset);
- }
-
- nmk_gpio_glitch_slpm_init(slpm);
- }
-
- for (i = 0; i < num; i++) {
- struct nmk_gpio_chip *nmk_chip;
- int pin = PIN_NUM(cfgs[i]);
-
- nmk_chip = nmk_gpio_chips[pin / NMK_GPIO_PER_CHIP];
- if (!nmk_chip) {
- ret = -EINVAL;
- break;
- }
-
- clk_enable(nmk_chip->clk);
- spin_lock(&nmk_chip->lock);
- __nmk_config_pin(nmk_chip, pin % NMK_GPIO_PER_CHIP,
- cfgs[i], sleep, glitch ? slpm : NULL);
- spin_unlock(&nmk_chip->lock);
- clk_disable(nmk_chip->clk);
- }
-
- if (glitch)
- nmk_gpio_glitch_slpm_restore(slpm);
-
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
-
- return ret;
-}
-
-/**
- * nmk_config_pin - configure a pin's mux attributes
- * @cfg: pin confguration
- * @sleep: Non-zero to apply the sleep mode configuration
- * Configures a pin's mode (alternate function or GPIO), its pull up status,
- * and its sleep mode based on the specified configuration. The @cfg is
- * usually one of the SoC specific macros defined in mach/<soc>-pins.h. These
- * are constructed using, and can be further enhanced with, the macros in
- * <linux/platform_data/pinctrl-nomadik.h>
- *
- * If a pin's mode is set to GPIO, it is configured as an input to avoid
- * side-effects. The gpio can be manipulated later using standard GPIO API
- * calls.
- */
-int nmk_config_pin(pin_cfg_t cfg, bool sleep)
-{
- return __nmk_config_pins(&cfg, 1, sleep);
-}
-EXPORT_SYMBOL(nmk_config_pin);
-
-/**
- * nmk_config_pins - configure several pins at once
- * @cfgs: array of pin configurations
- * @num: number of elments in the array
- *
- * Configures several pins using nmk_config_pin(). Refer to that function for
- * further information.
- */
-int nmk_config_pins(pin_cfg_t *cfgs, int num)
-{
- return __nmk_config_pins(cfgs, num, false);
-}
-EXPORT_SYMBOL(nmk_config_pins);
-
-int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num)
-{
- return __nmk_config_pins(cfgs, num, true);
-}
-EXPORT_SYMBOL(nmk_config_pins_sleep);
-
-/**
- * nmk_gpio_set_slpm() - configure the sleep mode of a pin
- * @gpio: pin number
- * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
- *
- * This register is actually in the pinmux layer, not the GPIO block itself.
- * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
- * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
- * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
- * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
- * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
- * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
- *
- * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
- * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
- * entered) regardless of the altfunction selected. Also wake-up detection is
- * ENABLED.
- *
- * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
- * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
- * (for altfunction GPIO) or respective on-chip peripherals (for other
- * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
- *
- * Note that enable_irq_wake() will automatically enable wakeup detection.
- */
-int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
-{
- struct nmk_gpio_chip *nmk_chip;
- unsigned long flags;
-
- nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP];
- if (!nmk_chip)
- return -EINVAL;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_gpio_slpm_lock, flags);
- spin_lock(&nmk_chip->lock);
-
- __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP, mode);
-
- spin_unlock(&nmk_chip->lock);
- spin_unlock_irqrestore(&nmk_gpio_slpm_lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-/**
- * nmk_gpio_set_pull() - enable/disable pull up/down on a gpio
- * @gpio: pin number
- * @pull: one of NMK_GPIO_PULL_DOWN, NMK_GPIO_PULL_UP, and NMK_GPIO_PULL_NONE
- *
- * Enables/disables pull up/down on a specified pin. This only takes effect if
- * the pin is configured as an input (either explicitly or by the alternate
- * function).
- *
- * NOTE: If enabling the pull up/down, the caller must ensure that the GPIO is
- * configured as an input. Otherwise, due to the way the controller registers
- * work, this function will change the value output on the pin.
- */
-int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull)
-{
- struct nmk_gpio_chip *nmk_chip;
- unsigned long flags;
-
- nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP];
- if (!nmk_chip)
- return -EINVAL;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_chip->lock, flags);
- __nmk_gpio_set_pull(nmk_chip, gpio % NMK_GPIO_PER_CHIP, pull);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-
-/* Mode functions */
-/**
- * nmk_gpio_set_mode() - set the mux mode of a gpio pin
- * @gpio: pin number
- * @gpio_mode: one of NMK_GPIO_ALT_GPIO, NMK_GPIO_ALT_A,
- * NMK_GPIO_ALT_B, and NMK_GPIO_ALT_C
- *
- * Sets the mode of the specified pin to one of the alternate functions or
- * plain GPIO.
- */
-int nmk_gpio_set_mode(int gpio, int gpio_mode)
-{
- struct nmk_gpio_chip *nmk_chip;
- unsigned long flags;
-
- nmk_chip = nmk_gpio_chips[gpio / NMK_GPIO_PER_CHIP];
- if (!nmk_chip)
- return -EINVAL;
-
- clk_enable(nmk_chip->clk);
- spin_lock_irqsave(&nmk_chip->lock, flags);
- __nmk_gpio_set_mode(nmk_chip, gpio % NMK_GPIO_PER_CHIP, gpio_mode);
- spin_unlock_irqrestore(&nmk_chip->lock, flags);
- clk_disable(nmk_chip->clk);
-
- return 0;
-}
-EXPORT_SYMBOL(nmk_gpio_set_mode);
-
static int __maybe_unused nmk_prcm_gpiocr_get_mode(struct pinctrl_dev *pctldev, int gpio)
{
int i;
@@ -1350,10 +1055,6 @@ static int nmk_gpio_probe(struct platform_device *dev)
pdata->num_gpio = NMK_GPIO_PER_CHIP;
}
- res = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
irq = platform_get_irq(dev, 0);
if (irq < 0)
return irq;
@@ -1362,6 +1063,7 @@ static int nmk_gpio_probe(struct platform_device *dev)
if (secondary_irq >= 0 && !pdata->get_secondary_status)
return -EINVAL;
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&dev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1807,7 +1509,7 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
struct nmk_pinctrl *npct = pinctrl_dev_get_drvdata(pctldev);
const struct nmk_pingroup *g;
static unsigned int slpm[NUM_BANKS];
- unsigned long flags;
+ unsigned long flags = 0;
bool glitch;
int ret = -EINVAL;
int i;
@@ -1993,7 +1695,7 @@ static int nmk_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
}
static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
static const char *pullnames[] = {
[NMK_GPIO_PULL_NONE] = "none",
@@ -2010,20 +1712,9 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
struct pinctrl_gpio_range *range;
struct gpio_chip *chip;
unsigned bit;
-
- /*
- * The pin config contains pin number and altfunction fields, here
- * we just ignore that part. It's being handled by the framework and
- * pinmux callback respectively.
- */
- pin_cfg_t cfg = (pin_cfg_t) config;
- int pull = PIN_PULL(cfg);
- int slpm = PIN_SLPM(cfg);
- int output = PIN_DIR(cfg);
- int val = PIN_VAL(cfg);
- bool lowemi = PIN_LOWEMI(cfg);
- bool gpiomode = PIN_GPIOMODE(cfg);
- bool sleep = PIN_SLEEPMODE(cfg);
+ pin_cfg_t cfg;
+ int pull, slpm, output, val, i;
+ bool lowemi, gpiomode, sleep;
range = nmk_match_gpio_range(pctldev, pin);
if (!range) {
@@ -2038,54 +1729,74 @@ static int nmk_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
chip = range->gc;
nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
- if (sleep) {
- int slpm_pull = PIN_SLPM_PULL(cfg);
- int slpm_output = PIN_SLPM_DIR(cfg);
- int slpm_val = PIN_SLPM_VAL(cfg);
-
- /* All pins go into GPIO mode at sleep */
- gpiomode = true;
-
+ for (i = 0; i < num_configs; i++) {
/*
- * The SLPM_* values are normal values + 1 to allow zero to
- * mean "same as normal".
+ * The pin config contains pin number and altfunction fields,
+ * here we just ignore that part. It's being handled by the
+ * framework and pinmux callback respectively.
*/
- if (slpm_pull)
- pull = slpm_pull - 1;
- if (slpm_output)
- output = slpm_output - 1;
- if (slpm_val)
- val = slpm_val - 1;
-
- dev_dbg(nmk_chip->chip.dev, "pin %d: sleep pull %s, dir %s, val %s\n",
- pin,
- slpm_pull ? pullnames[pull] : "same",
- slpm_output ? (output ? "output" : "input") : "same",
- slpm_val ? (val ? "high" : "low") : "same");
- }
+ cfg = (pin_cfg_t) configs[i];
+ pull = PIN_PULL(cfg);
+ slpm = PIN_SLPM(cfg);
+ output = PIN_DIR(cfg);
+ val = PIN_VAL(cfg);
+ lowemi = PIN_LOWEMI(cfg);
+ gpiomode = PIN_GPIOMODE(cfg);
+ sleep = PIN_SLEEPMODE(cfg);
+
+ if (sleep) {
+ int slpm_pull = PIN_SLPM_PULL(cfg);
+ int slpm_output = PIN_SLPM_DIR(cfg);
+ int slpm_val = PIN_SLPM_VAL(cfg);
+
+ /* All pins go into GPIO mode at sleep */
+ gpiomode = true;
+
+ /*
+ * The SLPM_* values are normal values + 1 to allow zero
+ * to mean "same as normal".
+ */
+ if (slpm_pull)
+ pull = slpm_pull - 1;
+ if (slpm_output)
+ output = slpm_output - 1;
+ if (slpm_val)
+ val = slpm_val - 1;
+
+ dev_dbg(nmk_chip->chip.dev,
+ "pin %d: sleep pull %s, dir %s, val %s\n",
+ pin,
+ slpm_pull ? pullnames[pull] : "same",
+ slpm_output ? (output ? "output" : "input")
+ : "same",
+ slpm_val ? (val ? "high" : "low") : "same");
+ }
- dev_dbg(nmk_chip->chip.dev, "pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n",
- pin, cfg, pullnames[pull], slpmnames[slpm],
- output ? "output " : "input",
- output ? (val ? "high" : "low") : "",
- lowemi ? "on" : "off");
+ dev_dbg(nmk_chip->chip.dev,
+ "pin %d [%#lx]: pull %s, slpm %s (%s%s), lowemi %s\n",
+ pin, cfg, pullnames[pull], slpmnames[slpm],
+ output ? "output " : "input",
+ output ? (val ? "high" : "low") : "",
+ lowemi ? "on" : "off");
- clk_enable(nmk_chip->clk);
- bit = pin % NMK_GPIO_PER_CHIP;
- if (gpiomode)
- /* No glitch when going to GPIO mode */
- __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
- if (output)
- __nmk_gpio_make_output(nmk_chip, bit, val);
- else {
- __nmk_gpio_make_input(nmk_chip, bit);
- __nmk_gpio_set_pull(nmk_chip, bit, pull);
- }
- /* TODO: isn't this only applicable on output pins? */
- __nmk_gpio_set_lowemi(nmk_chip, bit, lowemi);
+ clk_enable(nmk_chip->clk);
+ bit = pin % NMK_GPIO_PER_CHIP;
+ if (gpiomode)
+ /* No glitch when going to GPIO mode */
+ __nmk_gpio_set_mode(nmk_chip, bit, NMK_GPIO_ALT_GPIO);
+ if (output)
+ __nmk_gpio_make_output(nmk_chip, bit, val);
+ else {
+ __nmk_gpio_make_input(nmk_chip, bit);
+ __nmk_gpio_set_pull(nmk_chip, bit, pull);
+ }
+ /* TODO: isn't this only applicable on output pins? */
+ __nmk_gpio_set_lowemi(nmk_chip, bit, lowemi);
+
+ __nmk_gpio_set_slpm(nmk_chip, bit, slpm);
+ clk_disable(nmk_chip->clk);
+ } /* for each config */
- __nmk_gpio_set_slpm(nmk_chip, bit, slpm);
- clk_disable(nmk_chip->clk);
return 0;
}
diff --git a/drivers/pinctrl/pinctrl-palmas.c b/drivers/pinctrl/pinctrl-palmas.c
new file mode 100644
index 00000000000..82638fac3cf
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-palmas.c
@@ -0,0 +1,1095 @@
+/*
+ * pinctrl-palmas.c -- TI PALMAS series pin control driver.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mfd/palmas.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pm.h>
+#include <linux/slab.h>
+
+#include "core.h"
+#include "pinconf.h"
+#include "pinctrl-utils.h"
+
+#define PALMAS_PIN_GPIO0_ID 0
+#define PALMAS_PIN_GPIO1_VBUS_LED1_PWM1 1
+#define PALMAS_PIN_GPIO2_REGEN_LED2_PWM2 2
+#define PALMAS_PIN_GPIO3_CHRG_DET 3
+#define PALMAS_PIN_GPIO4_SYSEN1 4
+#define PALMAS_PIN_GPIO5_CLK32KGAUDIO_USB_PSEL 5
+#define PALMAS_PIN_GPIO6_SYSEN2 6
+#define PALMAS_PIN_GPIO7_MSECURE_PWRHOLD 7
+#define PALMAS_PIN_GPIO8_SIM1RSTI 8
+#define PALMAS_PIN_GPIO9_LOW_VBAT 9
+#define PALMAS_PIN_GPIO10_WIRELESS_CHRG1 10
+#define PALMAS_PIN_GPIO11_RCM 11
+#define PALMAS_PIN_GPIO12_SIM2RSTO 12
+#define PALMAS_PIN_GPIO13 13
+#define PALMAS_PIN_GPIO14 14
+#define PALMAS_PIN_GPIO15_SIM2RSTI 15
+#define PALMAS_PIN_VAC 16
+#define PALMAS_PIN_POWERGOOD_USB_PSEL 17
+#define PALMAS_PIN_NRESWARM 18
+#define PALMAS_PIN_PWRDOWN 19
+#define PALMAS_PIN_GPADC_START 20
+#define PALMAS_PIN_RESET_IN 21
+#define PALMAS_PIN_NSLEEP 22
+#define PALMAS_PIN_ENABLE1 23
+#define PALMAS_PIN_ENABLE2 24
+#define PALMAS_PIN_INT 25
+#define PALMAS_PIN_NUM (PALMAS_PIN_INT + 1)
+
+struct palmas_pin_function {
+ const char *name;
+ const char * const *groups;
+ unsigned ngroups;
+};
+
+struct palmas_pctrl_chip_info {
+ struct device *dev;
+ struct pinctrl_dev *pctl;
+ struct palmas *palmas;
+ int pins_current_opt[PALMAS_PIN_NUM];
+ const struct palmas_pin_function *functions;
+ unsigned num_functions;
+ const struct palmas_pingroup *pin_groups;
+ int num_pin_groups;
+ const struct pinctrl_pin_desc *pins;
+ unsigned num_pins;
+};
+
+static const struct pinctrl_pin_desc palmas_pins_desc[] = {
+ PINCTRL_PIN(PALMAS_PIN_GPIO0_ID, "gpio0"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO1_VBUS_LED1_PWM1, "gpio1"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO2_REGEN_LED2_PWM2, "gpio2"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO3_CHRG_DET, "gpio3"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO4_SYSEN1, "gpio4"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO5_CLK32KGAUDIO_USB_PSEL, "gpio5"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO6_SYSEN2, "gpio6"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO7_MSECURE_PWRHOLD, "gpio7"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO8_SIM1RSTI, "gpio8"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO9_LOW_VBAT, "gpio9"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO10_WIRELESS_CHRG1, "gpio10"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO11_RCM, "gpio11"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO12_SIM2RSTO, "gpio12"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO13, "gpio13"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO14, "gpio14"),
+ PINCTRL_PIN(PALMAS_PIN_GPIO15_SIM2RSTI, "gpio15"),
+ PINCTRL_PIN(PALMAS_PIN_VAC, "vac"),
+ PINCTRL_PIN(PALMAS_PIN_POWERGOOD_USB_PSEL, "powergood"),
+ PINCTRL_PIN(PALMAS_PIN_NRESWARM, "nreswarm"),
+ PINCTRL_PIN(PALMAS_PIN_PWRDOWN, "pwrdown"),
+ PINCTRL_PIN(PALMAS_PIN_GPADC_START, "gpadc_start"),
+ PINCTRL_PIN(PALMAS_PIN_RESET_IN, "reset_in"),
+ PINCTRL_PIN(PALMAS_PIN_NSLEEP, "nsleep"),
+ PINCTRL_PIN(PALMAS_PIN_ENABLE1, "enable1"),
+ PINCTRL_PIN(PALMAS_PIN_ENABLE2, "enable2"),
+ PINCTRL_PIN(PALMAS_PIN_INT, "int"),
+};
+
+static const char * const opt0_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+ "vac",
+ "powergood",
+ "nreswarm",
+ "pwrdown",
+ "gpadc_start",
+ "reset_in",
+ "nsleep",
+ "enable1",
+ "enable2",
+ "int",
+};
+
+static const char * const opt1_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio15",
+ "vac",
+ "powergood",
+};
+
+static const char * const opt2_groups[] = {
+ "gpio1",
+ "gpio2",
+ "gpio5",
+ "gpio7",
+};
+
+static const char * const opt3_groups[] = {
+ "gpio1",
+ "gpio2",
+};
+
+static const char * const gpio_groups[] = {
+ "gpio0",
+ "gpio1",
+ "gpio2",
+ "gpio3",
+ "gpio4",
+ "gpio5",
+ "gpio6",
+ "gpio7",
+ "gpio8",
+ "gpio9",
+ "gpio10",
+ "gpio11",
+ "gpio12",
+ "gpio13",
+ "gpio14",
+ "gpio15",
+};
+
+static const char * const led_groups[] = {
+ "gpio1",
+ "gpio2",
+};
+
+static const char * const pwm_groups[] = {
+ "gpio1",
+ "gpio2",
+};
+
+static const char * const regen_groups[] = {
+ "gpio2",
+};
+
+static const char * const sysen_groups[] = {
+ "gpio4",
+ "gpio6",
+};
+
+static const char * const clk32kgaudio_groups[] = {
+ "gpio5",
+};
+
+static const char * const id_groups[] = {
+ "gpio0",
+};
+
+static const char * const vbus_det_groups[] = {
+ "gpio1",
+};
+
+static const char * const chrg_det_groups[] = {
+ "gpio3",
+};
+
+static const char * const vac_groups[] = {
+ "vac",
+};
+
+static const char * const vacok_groups[] = {
+ "vac",
+};
+
+static const char * const powergood_groups[] = {
+ "powergood",
+};
+
+static const char * const usb_psel_groups[] = {
+ "gpio5",
+ "powergood",
+};
+
+static const char * const msecure_groups[] = {
+ "gpio7",
+};
+
+static const char * const pwrhold_groups[] = {
+ "gpio7",
+};
+
+static const char * const int_groups[] = {
+ "int",
+};
+
+static const char * const nreswarm_groups[] = {
+ "nreswarm",
+};
+
+static const char * const simrsto_groups[] = {
+ "gpio12",
+};
+
+static const char * const simrsti_groups[] = {
+ "gpio8",
+ "gpio15",
+};
+
+static const char * const low_vbat_groups[] = {
+ "gpio9",
+};
+
+static const char * const wireless_chrg1_groups[] = {
+ "gpio10",
+};
+
+static const char * const rcm_groups[] = {
+ "gpio11",
+};
+
+static const char * const pwrdown_groups[] = {
+ "pwrdown",
+};
+
+static const char * const gpadc_start_groups[] = {
+ "gpadc_start",
+};
+
+static const char * const reset_in_groups[] = {
+ "reset_in",
+};
+
+static const char * const nsleep_groups[] = {
+ "nsleep",
+};
+
+static const char * const enable_groups[] = {
+ "enable1",
+ "enable2",
+};
+
+#define FUNCTION_GROUPS \
+ FUNCTION_GROUP(opt0, OPTION0), \
+ FUNCTION_GROUP(opt1, OPTION1), \
+ FUNCTION_GROUP(opt2, OPTION2), \
+ FUNCTION_GROUP(opt3, OPTION3), \
+ FUNCTION_GROUP(gpio, GPIO), \
+ FUNCTION_GROUP(led, LED), \
+ FUNCTION_GROUP(pwm, PWM), \
+ FUNCTION_GROUP(regen, REGEN), \
+ FUNCTION_GROUP(sysen, SYSEN), \
+ FUNCTION_GROUP(clk32kgaudio, CLK32KGAUDIO), \
+ FUNCTION_GROUP(id, ID), \
+ FUNCTION_GROUP(vbus_det, VBUS_DET), \
+ FUNCTION_GROUP(chrg_det, CHRG_DET), \
+ FUNCTION_GROUP(vac, VAC), \
+ FUNCTION_GROUP(vacok, VACOK), \
+ FUNCTION_GROUP(powergood, POWERGOOD), \
+ FUNCTION_GROUP(usb_psel, USB_PSEL), \
+ FUNCTION_GROUP(msecure, MSECURE), \
+ FUNCTION_GROUP(pwrhold, PWRHOLD), \
+ FUNCTION_GROUP(int, INT), \
+ FUNCTION_GROUP(nreswarm, NRESWARM), \
+ FUNCTION_GROUP(simrsto, SIMRSTO), \
+ FUNCTION_GROUP(simrsti, SIMRSTI), \
+ FUNCTION_GROUP(low_vbat, LOW_VBAT), \
+ FUNCTION_GROUP(wireless_chrg1, WIRELESS_CHRG1), \
+ FUNCTION_GROUP(rcm, RCM), \
+ FUNCTION_GROUP(pwrdown, PWRDOWN), \
+ FUNCTION_GROUP(gpadc_start, GPADC_START), \
+ FUNCTION_GROUP(reset_in, RESET_IN), \
+ FUNCTION_GROUP(nsleep, NSLEEP), \
+ FUNCTION_GROUP(enable, ENABLE)
+
+static const struct palmas_pin_function palmas_pin_function[] = {
+#undef FUNCTION_GROUP
+#define FUNCTION_GROUP(fname, mux) \
+ { \
+ .name = #fname, \
+ .groups = fname##_groups, \
+ .ngroups = ARRAY_SIZE(fname##_groups), \
+ }
+
+ FUNCTION_GROUPS,
+};
+
+enum palmas_pinmux {
+#undef FUNCTION_GROUP
+#define FUNCTION_GROUP(fname, mux) PALMAS_PINMUX_##mux
+ FUNCTION_GROUPS,
+ PALMAS_PINMUX_NA = 0xFFFF,
+};
+
+struct palmas_pins_pullup_dn_info {
+ int pullup_dn_reg_base;
+ int pullup_dn_reg_add;
+ int pullup_dn_mask;
+ int normal_val;
+ int pull_up_val;
+ int pull_dn_val;
+};
+
+struct palmas_pins_od_info {
+ int od_reg_base;
+ int od_reg_add;
+ int od_mask;
+ int od_enable;
+ int od_disable;
+};
+
+struct palmas_pin_info {
+ enum palmas_pinmux mux_opt;
+ const struct palmas_pins_pullup_dn_info *pud_info;
+ const struct palmas_pins_od_info *od_info;
+};
+
+struct palmas_pingroup {
+ const char *name;
+ const unsigned pins[1];
+ unsigned npins;
+ unsigned mux_reg_base;
+ unsigned mux_reg_add;
+ unsigned mux_reg_mask;
+ unsigned mux_bit_shift;
+ const struct palmas_pin_info *opt[4];
+};
+
+#define PULL_UP_DN(_name, _rbase, _add, _mask, _nv, _uv, _dv) \
+static const struct palmas_pins_pullup_dn_info pud_##_name##_info = { \
+ .pullup_dn_reg_base = PALMAS_##_rbase##_BASE, \
+ .pullup_dn_reg_add = _add, \
+ .pullup_dn_mask = _mask, \
+ .normal_val = _nv, \
+ .pull_up_val = _uv, \
+ .pull_dn_val = _dv, \
+}
+
+PULL_UP_DN(nreswarm, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL1, 0x2, 0x0, 0x2, -1);
+PULL_UP_DN(pwrdown, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL1, 0x4, 0x0, -1, 0x4);
+PULL_UP_DN(gpadc_start, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL1, 0x30, 0x0, 0x20, 0x10);
+PULL_UP_DN(reset_in, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL1, 0x40, 0x0, -1, 0x40);
+PULL_UP_DN(nsleep, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL2, 0x3, 0x0, 0x2, 0x1);
+PULL_UP_DN(enable1, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL2, 0xC, 0x0, 0x8, 0x4);
+PULL_UP_DN(enable2, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL2, 0x30, 0x0, 0x20, 0x10);
+PULL_UP_DN(vacok, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL3, 0x40, 0x0, -1, 0x40);
+PULL_UP_DN(chrg_det, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL3, 0x10, 0x0, -1, 0x10);
+PULL_UP_DN(pwrhold, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL3, 0x4, 0x0, -1, 0x4);
+PULL_UP_DN(msecure, PU_PD_OD, PALMAS_PU_PD_INPUT_CTRL3, 0x1, 0x0, -1, 0x1);
+PULL_UP_DN(id, USB_OTG, PALMAS_USB_ID_CTRL_SET, 0x40, 0x0, 0x40, -1);
+PULL_UP_DN(gpio0, GPIO, PALMAS_PU_PD_GPIO_CTRL1, 0x04, 0, -1, 1);
+PULL_UP_DN(gpio1, GPIO, PALMAS_PU_PD_GPIO_CTRL1, 0x0C, 0, 0x8, 0x4);
+PULL_UP_DN(gpio2, GPIO, PALMAS_PU_PD_GPIO_CTRL1, 0x30, 0x0, 0x20, 0x10);
+PULL_UP_DN(gpio3, GPIO, PALMAS_PU_PD_GPIO_CTRL1, 0x40, 0x0, -1, 0x40);
+PULL_UP_DN(gpio4, GPIO, PALMAS_PU_PD_GPIO_CTRL2, 0x03, 0x0, 0x2, 0x1);
+PULL_UP_DN(gpio5, GPIO, PALMAS_PU_PD_GPIO_CTRL2, 0x0c, 0x0, 0x8, 0x4);
+PULL_UP_DN(gpio6, GPIO, PALMAS_PU_PD_GPIO_CTRL2, 0x30, 0x0, 0x20, 0x10);
+PULL_UP_DN(gpio7, GPIO, PALMAS_PU_PD_GPIO_CTRL2, 0x40, 0x0, -1, 0x40);
+PULL_UP_DN(gpio9, GPIO, PALMAS_PU_PD_GPIO_CTRL3, 0x0C, 0x0, 0x8, 0x4);
+PULL_UP_DN(gpio10, GPIO, PALMAS_PU_PD_GPIO_CTRL3, 0x30, 0x0, 0x20, 0x10);
+PULL_UP_DN(gpio11, GPIO, PALMAS_PU_PD_GPIO_CTRL3, 0xC0, 0x0, 0x80, 0x40);
+PULL_UP_DN(gpio13, GPIO, PALMAS_PU_PD_GPIO_CTRL4, 0x04, 0x0, -1, 0x04);
+PULL_UP_DN(gpio14, GPIO, PALMAS_PU_PD_GPIO_CTRL4, 0x30, 0x0, 0x20, 0x10);
+
+#define OD_INFO(_name, _rbase, _add, _mask, _ev, _dv) \
+static const struct palmas_pins_od_info od_##_name##_info = { \
+ .od_reg_base = PALMAS_##_rbase##_BASE, \
+ .od_reg_add = _add, \
+ .od_mask = _mask, \
+ .od_enable = _ev, \
+ .od_disable = _dv, \
+}
+
+OD_INFO(gpio1, GPIO, PALMAS_OD_OUTPUT_GPIO_CTRL, 0x1, 0x1, 0x0);
+OD_INFO(gpio2, GPIO, PALMAS_OD_OUTPUT_GPIO_CTRL, 0x2, 0x2, 0x0);
+OD_INFO(gpio5, GPIO, PALMAS_OD_OUTPUT_GPIO_CTRL, 0x20, 0x20, 0x0);
+OD_INFO(gpio10, GPIO, PALMAS_OD_OUTPUT_GPIO_CTRL2, 0x04, 0x04, 0x0);
+OD_INFO(gpio13, GPIO, PALMAS_OD_OUTPUT_GPIO_CTRL2, 0x20, 0x20, 0x0);
+OD_INFO(int, PU_PD_OD, PALMAS_OD_OUTPUT_CTRL, 0x8, 0x8, 0x0);
+OD_INFO(pwm1, PU_PD_OD, PALMAS_OD_OUTPUT_CTRL, 0x20, 0x20, 0x0);
+OD_INFO(pwm2, PU_PD_OD, PALMAS_OD_OUTPUT_CTRL, 0x80, 0x80, 0x0);
+OD_INFO(vbus_det, PU_PD_OD, PALMAS_OD_OUTPUT_CTRL, 0x40, 0x40, 0x0);
+
+#define PIN_INFO(_name, _id, _pud_info, _od_info) \
+static const struct palmas_pin_info pin_##_name##_info = { \
+ .mux_opt = PALMAS_PINMUX_##_id, \
+ .pud_info = _pud_info, \
+ .od_info = _od_info \
+}
+
+PIN_INFO(gpio0, GPIO, &pud_gpio0_info, NULL);
+PIN_INFO(gpio1, GPIO, &pud_gpio1_info, &od_gpio1_info);
+PIN_INFO(gpio2, GPIO, &pud_gpio2_info, &od_gpio2_info);
+PIN_INFO(gpio3, GPIO, &pud_gpio3_info, NULL);
+PIN_INFO(gpio4, GPIO, &pud_gpio4_info, NULL);
+PIN_INFO(gpio5, GPIO, &pud_gpio5_info, &od_gpio5_info);
+PIN_INFO(gpio6, GPIO, &pud_gpio6_info, NULL);
+PIN_INFO(gpio7, GPIO, &pud_gpio7_info, NULL);
+PIN_INFO(gpio8, GPIO, NULL, NULL);
+PIN_INFO(gpio9, GPIO, &pud_gpio9_info, NULL);
+PIN_INFO(gpio10, GPIO, &pud_gpio10_info, &od_gpio10_info);
+PIN_INFO(gpio11, GPIO, &pud_gpio11_info, NULL);
+PIN_INFO(gpio12, GPIO, NULL, NULL);
+PIN_INFO(gpio13, GPIO, &pud_gpio13_info, &od_gpio13_info);
+PIN_INFO(gpio14, GPIO, &pud_gpio14_info, NULL);
+PIN_INFO(gpio15, GPIO, NULL, NULL);
+PIN_INFO(id, ID, &pud_id_info, NULL);
+PIN_INFO(led1, LED, NULL, NULL);
+PIN_INFO(led2, LED, NULL, NULL);
+PIN_INFO(regen, REGEN, NULL, NULL);
+PIN_INFO(sysen1, SYSEN, NULL, NULL);
+PIN_INFO(sysen2, SYSEN, NULL, NULL);
+PIN_INFO(int, INT, NULL, &od_int_info);
+PIN_INFO(pwm1, PWM, NULL, &od_pwm1_info);
+PIN_INFO(pwm2, PWM, NULL, &od_pwm2_info);
+PIN_INFO(vacok, VACOK, &pud_vacok_info, NULL);
+PIN_INFO(chrg_det, CHRG_DET, &pud_chrg_det_info, NULL);
+PIN_INFO(pwrhold, PWRHOLD, &pud_pwrhold_info, NULL);
+PIN_INFO(msecure, MSECURE, &pud_msecure_info, NULL);
+PIN_INFO(nreswarm, NA, &pud_nreswarm_info, NULL);
+PIN_INFO(pwrdown, NA, &pud_pwrdown_info, NULL);
+PIN_INFO(gpadc_start, NA, &pud_gpadc_start_info, NULL);
+PIN_INFO(reset_in, NA, &pud_reset_in_info, NULL);
+PIN_INFO(nsleep, NA, &pud_nsleep_info, NULL);
+PIN_INFO(enable1, NA, &pud_enable1_info, NULL);
+PIN_INFO(enable2, NA, &pud_enable2_info, NULL);
+PIN_INFO(clk32kgaudio, CLK32KGAUDIO, NULL, NULL);
+PIN_INFO(usb_psel, USB_PSEL, NULL, NULL);
+PIN_INFO(vac, VAC, NULL, NULL);
+PIN_INFO(powergood, POWERGOOD, NULL, NULL);
+PIN_INFO(vbus_det, VBUS_DET, NULL, &od_vbus_det_info);
+PIN_INFO(sim1rsti, SIMRSTI, NULL, NULL);
+PIN_INFO(low_vbat, LOW_VBAT, NULL, NULL);
+PIN_INFO(rcm, RCM, NULL, NULL);
+PIN_INFO(sim2rsto, SIMRSTO, NULL, NULL);
+PIN_INFO(sim2rsti, SIMRSTI, NULL, NULL);
+PIN_INFO(wireless_chrg1, WIRELESS_CHRG1, NULL, NULL);
+
+#define PALMAS_PRIMARY_SECONDARY_NONE 0
+#define PALMAS_NONE_BASE 0
+#define PALMAS_PRIMARY_SECONDARY_INPUT3 PALMAS_PU_PD_INPUT_CTRL3
+
+#define PALMAS_PINGROUP(pg_name, pin_id, base, reg, _mask, _bshift, o0, o1, o2, o3) \
+ { \
+ .name = #pg_name, \
+ .pins = {PALMAS_PIN_##pin_id}, \
+ .npins = 1, \
+ .mux_reg_base = PALMAS_##base##_BASE, \
+ .mux_reg_add = PALMAS_PRIMARY_SECONDARY_##reg, \
+ .mux_reg_mask = _mask, \
+ .mux_bit_shift = _bshift, \
+ .opt = { \
+ o0, \
+ o1, \
+ o2, \
+ o3, \
+ }, \
+ }
+
+static const struct palmas_pingroup tps65913_pingroups[] = {
+ PALMAS_PINGROUP(gpio0, GPIO0_ID, PU_PD_OD, PAD1, 0x4, 0x2, &pin_gpio0_info, &pin_id_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio1, GPIO1_VBUS_LED1_PWM1, PU_PD_OD, PAD1, 0x18, 0x3, &pin_gpio1_info, &pin_vbus_det_info, &pin_led1_info, &pin_pwm1_info),
+ PALMAS_PINGROUP(gpio2, GPIO2_REGEN_LED2_PWM2, PU_PD_OD, PAD1, 0x60, 0x5, &pin_gpio2_info, &pin_regen_info, &pin_led2_info, &pin_pwm2_info),
+ PALMAS_PINGROUP(gpio3, GPIO3_CHRG_DET, PU_PD_OD, PAD1, 0x80, 0x7, &pin_gpio3_info, &pin_chrg_det_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio4, GPIO4_SYSEN1, PU_PD_OD, PAD1, 0x01, 0x0, &pin_gpio4_info, &pin_sysen1_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio5, GPIO5_CLK32KGAUDIO_USB_PSEL, PU_PD_OD, PAD2, 0x6, 0x1, &pin_gpio5_info, &pin_clk32kgaudio_info, &pin_usb_psel_info, NULL),
+ PALMAS_PINGROUP(gpio6, GPIO6_SYSEN2, PU_PD_OD, PAD2, 0x08, 0x3, &pin_gpio6_info, &pin_sysen2_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio7, GPIO7_MSECURE_PWRHOLD, PU_PD_OD, PAD2, 0x30, 0x4, &pin_gpio7_info, &pin_msecure_info, &pin_pwrhold_info, NULL),
+ PALMAS_PINGROUP(vac, VAC, PU_PD_OD, PAD1, 0x02, 0x1, &pin_vac_info, &pin_vacok_info, NULL, NULL),
+ PALMAS_PINGROUP(powergood, POWERGOOD_USB_PSEL, PU_PD_OD, PAD1, 0x01, 0x0, &pin_powergood_info, &pin_usb_psel_info, NULL, NULL),
+ PALMAS_PINGROUP(nreswarm, NRESWARM, NONE, NONE, 0x0, 0x0, &pin_nreswarm_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(pwrdown, PWRDOWN, NONE, NONE, 0x0, 0x0, &pin_pwrdown_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(gpadc_start, GPADC_START, NONE, NONE, 0x0, 0x0, &pin_gpadc_start_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(reset_in, RESET_IN, NONE, NONE, 0x0, 0x0, &pin_reset_in_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(nsleep, NSLEEP, NONE, NONE, 0x0, 0x0, &pin_nsleep_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(enable1, ENABLE1, NONE, NONE, 0x0, 0x0, &pin_enable1_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(enable2, ENABLE2, NONE, NONE, 0x0, 0x0, &pin_enable2_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(int, INT, NONE, NONE, 0x0, 0x0, &pin_int_info, NULL, NULL, NULL),
+};
+
+static const struct palmas_pingroup tps80036_pingroups[] = {
+ PALMAS_PINGROUP(gpio0, GPIO0_ID, PU_PD_OD, PAD1, 0x4, 0x2, &pin_gpio0_info, &pin_id_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio1, GPIO1_VBUS_LED1_PWM1, PU_PD_OD, PAD1, 0x18, 0x3, &pin_gpio1_info, &pin_vbus_det_info, &pin_led1_info, &pin_pwm1_info),
+ PALMAS_PINGROUP(gpio2, GPIO2_REGEN_LED2_PWM2, PU_PD_OD, PAD1, 0x60, 0x5, &pin_gpio2_info, &pin_regen_info, &pin_led2_info, &pin_pwm2_info),
+ PALMAS_PINGROUP(gpio3, GPIO3_CHRG_DET, PU_PD_OD, PAD1, 0x80, 0x7, &pin_gpio3_info, &pin_chrg_det_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio4, GPIO4_SYSEN1, PU_PD_OD, PAD1, 0x01, 0x0, &pin_gpio4_info, &pin_sysen1_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio5, GPIO5_CLK32KGAUDIO_USB_PSEL, PU_PD_OD, PAD2, 0x6, 0x1, &pin_gpio5_info, &pin_clk32kgaudio_info, &pin_usb_psel_info, NULL),
+ PALMAS_PINGROUP(gpio6, GPIO6_SYSEN2, PU_PD_OD, PAD2, 0x08, 0x3, &pin_gpio6_info, &pin_sysen2_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio7, GPIO7_MSECURE_PWRHOLD, PU_PD_OD, PAD2, 0x30, 0x4, &pin_gpio7_info, &pin_msecure_info, &pin_pwrhold_info, NULL),
+ PALMAS_PINGROUP(gpio8, GPIO8_SIM1RSTI, PU_PD_OD, PAD4, 0x01, 0x0, &pin_gpio8_info, &pin_sim1rsti_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio9, GPIO9_LOW_VBAT, PU_PD_OD, PAD4, 0x02, 0x1, &pin_gpio9_info, &pin_low_vbat_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio10, GPIO10_WIRELESS_CHRG1, PU_PD_OD, PAD4, 0x04, 0x2, &pin_gpio10_info, &pin_wireless_chrg1_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio11, GPIO11_RCM, PU_PD_OD, PAD4, 0x08, 0x3, &pin_gpio11_info, &pin_rcm_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio12, GPIO12_SIM2RSTO, PU_PD_OD, PAD4, 0x10, 0x4, &pin_gpio12_info, &pin_sim2rsto_info, NULL, NULL),
+ PALMAS_PINGROUP(gpio13, GPIO13, NONE, NONE, 0x00, 0x0, &pin_gpio13_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(gpio14, GPIO14, NONE, NONE, 0x00, 0x0, &pin_gpio14_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(gpio15, GPIO15_SIM2RSTI, PU_PD_OD, PAD4, 0x80, 0x7, &pin_gpio15_info, &pin_sim2rsti_info, NULL, NULL),
+ PALMAS_PINGROUP(vac, VAC, PU_PD_OD, PAD1, 0x02, 0x1, &pin_vac_info, &pin_vacok_info, NULL, NULL),
+ PALMAS_PINGROUP(powergood, POWERGOOD_USB_PSEL, PU_PD_OD, PAD1, 0x01, 0x0, &pin_powergood_info, &pin_usb_psel_info, NULL, NULL),
+ PALMAS_PINGROUP(nreswarm, NRESWARM, NONE, NONE, 0x0, 0x0, &pin_nreswarm_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(pwrdown, PWRDOWN, NONE, NONE, 0x0, 0x0, &pin_pwrdown_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(gpadc_start, GPADC_START, NONE, NONE, 0x0, 0x0, &pin_gpadc_start_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(reset_in, RESET_IN, NONE, NONE, 0x0, 0x0, &pin_reset_in_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(nsleep, NSLEEP, NONE, NONE, 0x0, 0x0, &pin_nsleep_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(enable1, ENABLE1, NONE, NONE, 0x0, 0x0, &pin_enable1_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(enable2, ENABLE2, NONE, NONE, 0x0, 0x0, &pin_enable2_info, NULL, NULL, NULL),
+ PALMAS_PINGROUP(int, INT, NONE, NONE, 0x0, 0x0, &pin_int_info, NULL, NULL, NULL),
+};
+
+static int palmas_pinctrl_get_pin_mux(struct palmas_pctrl_chip_info *pci)
+{
+ const struct palmas_pingroup *g;
+ unsigned int val;
+ int ret;
+ int i;
+
+ for (i = 0; i < pci->num_pin_groups; ++i) {
+ g = &pci->pin_groups[i];
+ if (g->mux_reg_base == PALMAS_NONE_BASE) {
+ pci->pins_current_opt[i] = 0;
+ continue;
+ }
+ ret = palmas_read(pci->palmas, g->mux_reg_base,
+ g->mux_reg_add, &val);
+ if (ret < 0) {
+ dev_err(pci->dev, "mux_reg 0x%02x read failed: %d\n",
+ g->mux_reg_add, ret);
+ return ret;
+ }
+ val &= g->mux_reg_mask;
+ pci->pins_current_opt[i] = val >> g->mux_bit_shift;
+ }
+ return 0;
+}
+
+static int palmas_pinctrl_set_dvfs1(struct palmas_pctrl_chip_info *pci,
+ bool enable)
+{
+ int ret;
+ int val;
+
+ val = enable ? PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1 : 0;
+ ret = palmas_update_bits(pci->palmas, PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD3,
+ PALMAS_PRIMARY_SECONDARY_PAD3_DVFS1, val);
+ if (ret < 0)
+ dev_err(pci->dev, "SECONDARY_PAD3 update failed %d\n", ret);
+ return ret;
+}
+
+static int palmas_pinctrl_set_dvfs2(struct palmas_pctrl_chip_info *pci,
+ bool enable)
+{
+ int ret;
+ int val;
+
+ val = enable ? PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2 : 0;
+ ret = palmas_update_bits(pci->palmas, PALMAS_PU_PD_OD_BASE,
+ PALMAS_PRIMARY_SECONDARY_PAD3,
+ PALMAS_PRIMARY_SECONDARY_PAD3_DVFS2, val);
+ if (ret < 0)
+ dev_err(pci->dev, "SECONDARY_PAD3 update failed %d\n", ret);
+ return ret;
+}
+
+static int palmas_pinctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return pci->num_pin_groups;
+}
+
+static const char *palmas_pinctrl_get_group_name(struct pinctrl_dev *pctldev,
+ unsigned group)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return pci->pin_groups[group].name;
+}
+
+static int palmas_pinctrl_get_group_pins(struct pinctrl_dev *pctldev,
+ unsigned group, const unsigned **pins, unsigned *num_pins)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ *pins = pci->pin_groups[group].pins;
+ *num_pins = pci->pin_groups[group].npins;
+ return 0;
+}
+
+static const struct pinctrl_ops palmas_pinctrl_ops = {
+ .get_groups_count = palmas_pinctrl_get_groups_count,
+ .get_group_name = palmas_pinctrl_get_group_name,
+ .get_group_pins = palmas_pinctrl_get_group_pins,
+ .dt_node_to_map = pinconf_generic_dt_node_to_map_pin,
+ .dt_free_map = pinctrl_utils_dt_free_map,
+};
+
+static int palmas_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return pci->num_functions;
+}
+
+static const char *palmas_pinctrl_get_func_name(struct pinctrl_dev *pctldev,
+ unsigned function)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ return pci->functions[function].name;
+}
+
+static int palmas_pinctrl_get_func_groups(struct pinctrl_dev *pctldev,
+ unsigned function, const char * const **groups,
+ unsigned * const num_groups)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+
+ *groups = pci->functions[function].groups;
+ *num_groups = pci->functions[function].ngroups;
+ return 0;
+}
+
+static int palmas_pinctrl_enable(struct pinctrl_dev *pctldev, unsigned function,
+ unsigned group)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+ const struct palmas_pingroup *g;
+ int i;
+ int ret;
+
+ g = &pci->pin_groups[group];
+
+ /* If direct option is provided here */
+ if (function <= PALMAS_PINMUX_OPTION3) {
+ if (!g->opt[function]) {
+ dev_err(pci->dev, "Pin %s does not support option %d\n",
+ g->name, function);
+ return -EINVAL;
+ }
+ i = function;
+ } else {
+ for (i = 0; i < ARRAY_SIZE(g->opt); i++) {
+ if (!g->opt[i])
+ continue;
+ if (g->opt[i]->mux_opt == function)
+ break;
+ }
+ if (WARN_ON(i == ARRAY_SIZE(g->opt))) {
+ dev_err(pci->dev, "Pin %s does not support option %d\n",
+ g->name, function);
+ return -EINVAL;
+ }
+ }
+
+ if (g->mux_reg_base == PALMAS_NONE_BASE) {
+ if (WARN_ON(i != 0))
+ return -EINVAL;
+ return 0;
+ }
+
+ dev_dbg(pci->dev, "%s(): Base0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, g->mux_reg_base, g->mux_reg_add,
+ g->mux_reg_mask, i << g->mux_bit_shift);
+
+ ret = palmas_update_bits(pci->palmas, g->mux_reg_base, g->mux_reg_add,
+ g->mux_reg_mask, i << g->mux_bit_shift);
+ if (ret < 0) {
+ dev_err(pci->dev, "Reg 0x%02x update failed: %d\n",
+ g->mux_reg_add, ret);
+ return ret;
+ }
+ pci->pins_current_opt[group] = i;
+ return 0;
+}
+
+static const struct pinmux_ops palmas_pinmux_ops = {
+ .get_functions_count = palmas_pinctrl_get_funcs_count,
+ .get_function_name = palmas_pinctrl_get_func_name,
+ .get_function_groups = palmas_pinctrl_get_func_groups,
+ .enable = palmas_pinctrl_enable,
+};
+
+static int palmas_pinconf_get(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long *config)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param = pinconf_to_config_param(*config);
+ const struct palmas_pingroup *g;
+ const struct palmas_pin_info *opt;
+ unsigned int val;
+ int ret;
+ int base, add;
+ int rval;
+ int arg;
+ int group_nr;
+
+ for (group_nr = 0; group_nr < pci->num_pin_groups; ++group_nr) {
+ if (pci->pin_groups[group_nr].pins[0] == pin)
+ break;
+ }
+
+ if (group_nr == pci->num_pin_groups) {
+ dev_err(pci->dev,
+ "Pinconf is not supported for pin-id %d\n", pin);
+ return -ENOTSUPP;
+ }
+
+ g = &pci->pin_groups[group_nr];
+ opt = g->opt[pci->pins_current_opt[group_nr]];
+ if (!opt) {
+ dev_err(pci->dev,
+ "Pinconf is not supported for pin %s\n", g->name);
+ return -ENOTSUPP;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!opt->pud_info) {
+ dev_err(pci->dev,
+ "PULL control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ base = opt->pud_info->pullup_dn_reg_base;
+ add = opt->pud_info->pullup_dn_reg_add;
+ ret = palmas_read(pci->palmas, base, add, &val);
+ if (ret < 0) {
+ dev_err(pci->dev, "Reg 0x%02x read failed: %d\n",
+ add, ret);
+ return ret;
+ }
+
+ rval = val & opt->pud_info->pullup_dn_mask;
+ arg = 0;
+ if ((opt->pud_info->normal_val >= 0) &&
+ (opt->pud_info->normal_val == rval) &&
+ (param == PIN_CONFIG_BIAS_DISABLE))
+ arg = 1;
+ else if ((opt->pud_info->pull_up_val >= 0) &&
+ (opt->pud_info->pull_up_val == rval) &&
+ (param == PIN_CONFIG_BIAS_PULL_UP))
+ arg = 1;
+ else if ((opt->pud_info->pull_dn_val >= 0) &&
+ (opt->pud_info->pull_dn_val == rval) &&
+ (param == PIN_CONFIG_BIAS_PULL_DOWN))
+ arg = 1;
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!opt->od_info) {
+ dev_err(pci->dev,
+ "OD control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ base = opt->od_info->od_reg_base;
+ add = opt->od_info->od_reg_add;
+ ret = palmas_read(pci->palmas, base, add, &val);
+ if (ret < 0) {
+ dev_err(pci->dev, "Reg 0x%02x read failed: %d\n",
+ add, ret);
+ return ret;
+ }
+ rval = val & opt->od_info->od_mask;
+ arg = -1;
+ if ((opt->od_info->od_disable >= 0) &&
+ (opt->od_info->od_disable == rval))
+ arg = 0;
+ else if ((opt->od_info->od_enable >= 0) &&
+ (opt->od_info->od_enable == rval))
+ arg = 1;
+ if (arg < 0) {
+ dev_err(pci->dev,
+ "OD control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ break;
+
+ default:
+ dev_err(pci->dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+
+ *config = pinconf_to_config_packed(param, (u16)arg);
+ return 0;
+}
+
+static int palmas_pinconf_set(struct pinctrl_dev *pctldev,
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
+{
+ struct palmas_pctrl_chip_info *pci = pinctrl_dev_get_drvdata(pctldev);
+ enum pin_config_param param;
+ u16 param_val;
+ const struct palmas_pingroup *g;
+ const struct palmas_pin_info *opt;
+ int ret;
+ int base, add, mask;
+ int rval;
+ int group_nr;
+ int i;
+
+ for (group_nr = 0; group_nr < pci->num_pin_groups; ++group_nr) {
+ if (pci->pin_groups[group_nr].pins[0] == pin)
+ break;
+ }
+
+ if (group_nr == pci->num_pin_groups) {
+ dev_err(pci->dev,
+ "Pinconf is not supported for pin-id %d\n", pin);
+ return -ENOTSUPP;
+ }
+
+ g = &pci->pin_groups[group_nr];
+ opt = g->opt[pci->pins_current_opt[group_nr]];
+ if (!opt) {
+ dev_err(pci->dev,
+ "Pinconf is not supported for pin %s\n", g->name);
+ return -ENOTSUPP;
+ }
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ param_val = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ return 0;
+ case PIN_CONFIG_BIAS_DISABLE:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ if (!opt->pud_info) {
+ dev_err(pci->dev,
+ "PULL control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ base = opt->pud_info->pullup_dn_reg_base;
+ add = opt->pud_info->pullup_dn_reg_add;
+ mask = opt->pud_info->pullup_dn_mask;
+
+ if (param == PIN_CONFIG_BIAS_DISABLE)
+ rval = opt->pud_info->normal_val;
+ else if (param == PIN_CONFIG_BIAS_PULL_UP)
+ rval = opt->pud_info->pull_up_val;
+ else
+ rval = opt->pud_info->pull_dn_val;
+
+ if (rval < 0) {
+ dev_err(pci->dev,
+ "PULL control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ break;
+
+ case PIN_CONFIG_DRIVE_OPEN_DRAIN:
+ if (!opt->od_info) {
+ dev_err(pci->dev,
+ "OD control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ base = opt->od_info->od_reg_base;
+ add = opt->od_info->od_reg_add;
+ mask = opt->od_info->od_mask;
+ if (param_val == 0)
+ rval = opt->od_info->od_disable;
+ else
+ rval = opt->od_info->od_enable;
+ if (rval < 0) {
+ dev_err(pci->dev,
+ "OD control not supported for pin %s\n",
+ g->name);
+ return -ENOTSUPP;
+ }
+ break;
+ default:
+ dev_err(pci->dev, "Properties not supported\n");
+ return -ENOTSUPP;
+ }
+
+ dev_dbg(pci->dev, "%s(): Add0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, base, add, mask, rval);
+ ret = palmas_update_bits(pci->palmas, base, add, mask, rval);
+ if (ret < 0) {
+ dev_err(pci->dev, "Reg 0x%02x update failed: %d\n",
+ add, ret);
+ return ret;
+ }
+ } /* for each config */
+
+ return 0;
+}
+
+static int palmas_pinconf_group_get(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long *config)
+{
+ dev_err(pctldev->dev, "palmas_pinconf_group_get op not supported\n");
+ return -ENOTSUPP;
+}
+
+static int palmas_pinconf_group_set(struct pinctrl_dev *pctldev,
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
+{
+ dev_err(pctldev->dev, "palmas_pinconf_group_set op not supported\n");
+ return -ENOTSUPP;
+}
+
+static const struct pinconf_ops palmas_pinconf_ops = {
+ .pin_config_get = palmas_pinconf_get,
+ .pin_config_set = palmas_pinconf_set,
+ .pin_config_group_get = palmas_pinconf_group_get,
+ .pin_config_group_set = palmas_pinconf_group_set,
+};
+
+static struct pinctrl_desc palmas_pinctrl_desc = {
+ .pctlops = &palmas_pinctrl_ops,
+ .pmxops = &palmas_pinmux_ops,
+ .confops = &palmas_pinconf_ops,
+ .owner = THIS_MODULE,
+};
+
+struct palmas_pinctrl_data {
+ const struct palmas_pingroup *pin_groups;
+ int num_pin_groups;
+};
+
+static struct palmas_pinctrl_data tps65913_pinctrl_data = {
+ .pin_groups = tps65913_pingroups,
+ .num_pin_groups = ARRAY_SIZE(tps65913_pingroups),
+};
+
+static struct palmas_pinctrl_data tps80036_pinctrl_data = {
+ .pin_groups = tps80036_pingroups,
+ .num_pin_groups = ARRAY_SIZE(tps80036_pingroups),
+};
+
+static struct of_device_id palmas_pinctrl_of_match[] = {
+ { .compatible = "ti,palmas-pinctrl", .data = &tps65913_pinctrl_data},
+ { .compatible = "ti,tps65913-pinctrl", .data = &tps65913_pinctrl_data},
+ { .compatible = "ti,tps80036-pinctrl", .data = &tps80036_pinctrl_data},
+ { },
+};
+MODULE_DEVICE_TABLE(of, palmas_pinctrl_of_match);
+
+static int palmas_pinctrl_probe(struct platform_device *pdev)
+{
+ struct palmas_pctrl_chip_info *pci;
+ const struct palmas_pinctrl_data *pinctrl_data = &tps65913_pinctrl_data;
+ int ret;
+ bool enable_dvfs1 = false;
+ bool enable_dvfs2 = false;
+
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_device(palmas_pinctrl_of_match, &pdev->dev);
+ pinctrl_data = match->data;
+ enable_dvfs1 = of_property_read_bool(pdev->dev.of_node,
+ "ti,palmas-enable-dvfs1");
+ enable_dvfs2 = of_property_read_bool(pdev->dev.of_node,
+ "ti,palmas-enable-dvfs2");
+ }
+
+ pci = devm_kzalloc(&pdev->dev, sizeof(*pci), GFP_KERNEL);
+ if (!pci) {
+ dev_err(&pdev->dev, "Malloc for pci failed\n");
+ return -ENOMEM;
+ }
+
+ pci->dev = &pdev->dev;
+ pci->palmas = dev_get_drvdata(pdev->dev.parent);
+
+ pci->pins = palmas_pins_desc;
+ pci->num_pins = ARRAY_SIZE(palmas_pins_desc);
+ pci->functions = palmas_pin_function;
+ pci->num_functions = ARRAY_SIZE(palmas_pin_function);
+ pci->pin_groups = pinctrl_data->pin_groups;
+ pci->num_pin_groups = pinctrl_data->num_pin_groups;
+
+ platform_set_drvdata(pdev, pci);
+
+ palmas_pinctrl_set_dvfs1(pci, enable_dvfs1);
+ palmas_pinctrl_set_dvfs2(pci, enable_dvfs2);
+ ret = palmas_pinctrl_get_pin_mux(pci);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Reading pinctrol option register failed: %d\n", ret);
+ return ret;
+ }
+
+ palmas_pinctrl_desc.name = dev_name(&pdev->dev);
+ palmas_pinctrl_desc.pins = palmas_pins_desc;
+ palmas_pinctrl_desc.npins = ARRAY_SIZE(palmas_pins_desc);
+ pci->pctl = pinctrl_register(&palmas_pinctrl_desc, &pdev->dev, pci);
+ if (!pci->pctl) {
+ dev_err(&pdev->dev, "Couldn't register pinctrl driver\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int palmas_pinctrl_remove(struct platform_device *pdev)
+{
+ struct palmas_pctrl_chip_info *pci = platform_get_drvdata(pdev);
+
+ pinctrl_unregister(pci->pctl);
+ return 0;
+}
+
+static struct platform_driver palmas_pinctrl_driver = {
+ .driver = {
+ .name = "palmas-pinctrl",
+ .owner = THIS_MODULE,
+ .of_match_table = palmas_pinctrl_of_match,
+ },
+ .probe = palmas_pinctrl_probe,
+ .remove = palmas_pinctrl_remove,
+};
+
+module_platform_driver(palmas_pinctrl_driver);
+
+MODULE_DESCRIPTION("Palmas pin control driver");
+MODULE_AUTHOR("Laxman Dewangan<ldewangan@nvidia.com>");
+MODULE_ALIAS("platform:palmas-pinctrl");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index 1eb5a2e43b0..e0718b7c4ab 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -36,7 +36,7 @@
#include <linux/pinctrl/pinmux.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <linux/irqchip/chained_irq.h>
-#include <linux/clk-provider.h>
+#include <linux/clk.h>
#include <dt-bindings/pinctrl/rockchip.h>
#include "core.h"
@@ -167,18 +167,14 @@ static const inline struct rockchip_pin_group *pinctrl_name_to_group(
const struct rockchip_pinctrl *info,
const char *name)
{
- const struct rockchip_pin_group *grp = NULL;
int i;
for (i = 0; i < info->ngroups; i++) {
- if (strcmp(info->groups[i].name, name))
- continue;
-
- grp = &info->groups[i];
- break;
+ if (!strcmp(info->groups[i].name, name))
+ return &info->groups[i];
}
- return grp;
+ return NULL;
}
/*
@@ -190,8 +186,7 @@ static struct rockchip_pin_bank *pin_to_bank(struct rockchip_pinctrl *info,
{
struct rockchip_pin_bank *b = info->ctrl->pin_banks;
- while ((pin >= b->pin_base) &&
- ((b->pin_base + b->nr_pins - 1) < pin))
+ while (pin >= (b->pin_base + b->nr_pins))
b++;
return b;
@@ -204,17 +199,12 @@ static struct rockchip_pin_bank *bank_num_to_bank(
struct rockchip_pin_bank *b = info->ctrl->pin_banks;
int i;
- for (i = 0; i < info->ctrl->nr_banks; i++) {
+ for (i = 0; i < info->ctrl->nr_banks; i++, b++) {
if (b->bank_num == num)
- break;
-
- b++;
+ return b;
}
- if (b->bank_num != num)
- return ERR_PTR(-EINVAL);
-
- return b;
+ return ERR_PTR(-EINVAL);
}
/*
@@ -584,32 +574,45 @@ static bool rockchip_pinconf_pull_valid(struct rockchip_pin_ctrl *ctrl,
/* set the pin config settings for a specified pin */
static int rockchip_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct rockchip_pin_bank *bank = pin_to_bank(info, pin);
- enum pin_config_param param = pinconf_to_config_param(config);
- u16 arg = pinconf_to_config_argument(config);
-
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- return rockchip_set_pull(bank, pin - bank->pin_base, param);
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
- if (!rockchip_pinconf_pull_valid(info->ctrl, param))
+ enum pin_config_param param;
+ u16 arg;
+ int i;
+ int rc;
+
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ rc = rockchip_set_pull(bank, pin - bank->pin_base,
+ param);
+ if (rc)
+ return rc;
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_PIN_DEFAULT:
+ if (!rockchip_pinconf_pull_valid(info->ctrl, param))
+ return -ENOTSUPP;
+
+ if (!arg)
+ return -EINVAL;
+
+ rc = rockchip_set_pull(bank, pin - bank->pin_base,
+ param);
+ if (rc)
+ return rc;
+ break;
+ default:
return -ENOTSUPP;
-
- if (!arg)
- return -EINVAL;
-
- return rockchip_set_pull(bank, pin - bank->pin_base, param);
- break;
- default:
- return -ENOTSUPP;
- break;
- }
+ break;
+ }
+ } /* for each config */
return 0;
}
@@ -881,6 +884,16 @@ static int rockchip_pinctrl_register(struct platform_device *pdev,
* GPIO handling
*/
+static int rockchip_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ return pinctrl_request_gpio(chip->base + offset);
+}
+
+static void rockchip_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ pinctrl_free_gpio(chip->base + offset);
+}
+
static void rockchip_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct rockchip_pin_bank *bank = gc_to_pin_bank(gc);
@@ -954,6 +967,8 @@ static int rockchip_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
}
static const struct gpio_chip rockchip_gpiolib_chip = {
+ .request = rockchip_gpio_request,
+ .free = rockchip_gpio_free,
.set = rockchip_gpio_set,
.get = rockchip_gpio_get,
.direction_input = rockchip_gpio_direction_input,
@@ -1270,11 +1285,6 @@ static int rockchip_pinctrl_probe(struct platform_device *pdev)
info->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "cannot find IO resource\n");
- return -ENOENT;
- }
-
info->reg_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->reg_base))
return PTR_ERR(info->reg_base);
@@ -1379,7 +1389,7 @@ static struct platform_driver rockchip_pinctrl_driver = {
.driver = {
.name = "rockchip-pinctrl",
.owner = THIS_MODULE,
- .of_match_table = of_match_ptr(rockchip_pinctrl_dt_match),
+ .of_match_table = rockchip_pinctrl_dt_match,
},
};
diff --git a/drivers/pinctrl/pinctrl-s3c24xx.c b/drivers/pinctrl/pinctrl-s3c24xx.c
index 24446daaad7..ad3eaad1700 100644
--- a/drivers/pinctrl/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/pinctrl-s3c24xx.c
@@ -549,7 +549,7 @@ static int s3c24xx_eint_init(struct samsung_pinctrl_drv_data *d)
irq = bank->eint_offset;
mask = bank->eint_mask;
for (pin = 0; mask; ++pin, mask >>= 1) {
- if (irq > NUM_EINT)
+ if (irq >= NUM_EINT)
break;
if (!(mask & 1))
continue;
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index a7fa9e2d475..92a9d6c8db0 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -442,9 +442,17 @@ static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
/* set the pin config settings for a specified pin */
static int samsung_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
- return samsung_pinconf_rw(pctldev, pin, &config, true);
+ int i, ret;
+
+ for (i = 0; i < num_configs; i++) {
+ ret = samsung_pinconf_rw(pctldev, pin, &configs[i], true);
+ if (ret < 0)
+ return ret;
+ } /* for each config */
+
+ return 0;
}
/* get the pin config settings for a specified pin */
@@ -456,7 +464,8 @@ static int samsung_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
/* set the pin config settings for a specified pin group */
static int samsung_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
struct samsung_pinctrl_drv_data *drvdata;
const unsigned int *pins;
@@ -466,7 +475,7 @@ static int samsung_pinconf_group_set(struct pinctrl_dev *pctldev,
pins = drvdata->pin_groups[group].pins;
for (cnt = 0; cnt < drvdata->pin_groups[group].num_pins; cnt++)
- samsung_pinconf_set(pctldev, pins[cnt], config);
+ samsung_pinconf_set(pctldev, pins[cnt], configs, num_configs);
return 0;
}
@@ -767,6 +776,10 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
}
}
+ ret = samsung_pinctrl_parse_dt(pdev, drvdata);
+ if (ret)
+ return ret;
+
drvdata->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, drvdata);
if (!drvdata->pctl_dev) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
@@ -784,12 +797,6 @@ static int samsung_pinctrl_register(struct platform_device *pdev,
pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
}
- ret = samsung_pinctrl_parse_dt(pdev, drvdata);
- if (ret) {
- pinctrl_unregister(drvdata->pctl_dev);
- return ret;
- }
-
return 0;
}
@@ -1115,6 +1122,8 @@ static const struct of_device_id samsung_pinctrl_dt_match[] = {
.data = (void *)exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
.data = (void *)exynos5420_pin_ctrl },
+ { .compatible = "samsung,s5pv210-pinctrl",
+ .data = (void *)s5pv210_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
diff --git a/drivers/pinctrl/pinctrl-samsung.h b/drivers/pinctrl/pinctrl-samsung.h
index 11bb75ba81a..30622d9afa2 100644
--- a/drivers/pinctrl/pinctrl-samsung.h
+++ b/drivers/pinctrl/pinctrl-samsung.h
@@ -260,5 +260,6 @@ extern struct samsung_pin_ctrl s3c2412_pin_ctrl[];
extern struct samsung_pin_ctrl s3c2416_pin_ctrl[];
extern struct samsung_pin_ctrl s3c2440_pin_ctrl[];
extern struct samsung_pin_ctrl s3c2450_pin_ctrl[];
+extern struct samsung_pin_ctrl s5pv210_pin_ctrl[];
#endif /* __PINCTRL_SAMSUNG_H */
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 6866548fab3..a82ace4d9a2 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -209,7 +209,7 @@ struct pcs_device {
static int pcs_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
unsigned long *config);
static int pcs_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long config);
+ unsigned long *configs, unsigned num_configs);
static enum pin_config_param pcs_bias[] = {
PIN_CONFIG_BIAS_PULL_DOWN,
@@ -536,7 +536,7 @@ static void pcs_pinconf_clear_bias(struct pinctrl_dev *pctldev, unsigned pin)
int i;
for (i = 0; i < ARRAY_SIZE(pcs_bias); i++) {
config = pinconf_to_config_packed(pcs_bias[i], 0);
- pcs_pinconf_set(pctldev, pin, config);
+ pcs_pinconf_set(pctldev, pin, &config, 1);
}
}
@@ -622,22 +622,28 @@ static int pcs_pinconf_get(struct pinctrl_dev *pctldev,
}
static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long config)
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
{
struct pcs_device *pcs = pinctrl_dev_get_drvdata(pctldev);
struct pcs_function *func;
unsigned offset = 0, shift = 0, i, data, ret;
u16 arg;
+ int j;
ret = pcs_get_function(pctldev, pin, &func);
if (ret)
return ret;
- for (i = 0; i < func->nconfs; i++) {
- if (pinconf_to_config_param(config) == func->conf[i].param) {
+ for (j = 0; j < num_configs; j++) {
+ for (i = 0; i < func->nconfs; i++) {
+ if (pinconf_to_config_param(configs[j])
+ != func->conf[i].param)
+ continue;
+
offset = pin * (pcs->width / BITS_PER_BYTE);
data = pcs->read(pcs->base + offset);
- arg = pinconf_to_config_argument(config);
+ arg = pinconf_to_config_argument(configs[j]);
switch (func->conf[i].param) {
/* 2 parameters */
case PIN_CONFIG_INPUT_SCHMITT:
@@ -667,10 +673,14 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev,
return -ENOTSUPP;
}
pcs->write(data, pcs->base + offset);
- return 0;
+
+ break;
}
- }
- return -ENOTSUPP;
+ if (i >= func->nconfs)
+ return -ENOTSUPP;
+ } /* for each config */
+
+ return 0;
}
static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev,
@@ -695,7 +705,8 @@ static int pcs_pinconf_group_get(struct pinctrl_dev *pctldev,
}
static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
const unsigned *pins;
unsigned npins;
@@ -705,7 +716,7 @@ static int pcs_pinconf_group_set(struct pinctrl_dev *pctldev,
if (ret)
return ret;
for (i = 0; i < npins; i++) {
- if (pcs_pinconf_set(pctldev, pins[i], config))
+ if (pcs_pinconf_set(pctldev, pins[i], configs, num_configs))
return -ENOTSUPP;
}
return 0;
@@ -1483,6 +1494,7 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs)
return ret;
}
+#ifdef CONFIG_PM
static int pinctrl_single_suspend(struct platform_device *pdev,
pm_message_t state)
{
@@ -1505,6 +1517,7 @@ static int pinctrl_single_resume(struct platform_device *pdev)
return pinctrl_force_default(pcs->pctl);
}
+#endif
static int pcs_probe(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 04d4506ae18..9cadc68ee57 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -288,8 +288,8 @@ struct st_pinctrl {
/* SOC specific data */
/* STiH415 data */
-unsigned int stih415_input_delays[] = {0, 500, 1000, 1500};
-unsigned int stih415_output_delays[] = {0, 1000, 2000, 3000};
+static unsigned int stih415_input_delays[] = {0, 500, 1000, 1500};
+static unsigned int stih415_output_delays[] = {0, 1000, 2000, 3000};
#define STIH415_PCTRL_COMMON_DATA \
.rt_style = st_retime_style_packed, \
@@ -324,7 +324,7 @@ static const struct st_pctl_data stih415_right_data = {
};
/* STiH416 data */
-unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250, 1500,
+static unsigned int stih416_delays[] = {0, 300, 500, 750, 1000, 1250, 1500,
1750, 2000, 2250, 2500, 2750, 3000, 3250 };
static const struct st_pctl_data stih416_data = {
@@ -811,7 +811,7 @@ static int st_pmx_get_funcs_count(struct pinctrl_dev *pctldev)
return info->nfunctions;
}
-const char *st_pmx_get_fname(struct pinctrl_dev *pctldev,
+static const char *st_pmx_get_fname(struct pinctrl_dev *pctldev,
unsigned selector)
{
struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
@@ -909,15 +909,18 @@ static void st_pinconf_set_retime(struct st_pinctrl *info,
config, pin);
}
-static int st_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin_id, unsigned long config)
+static int st_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin_id,
+ unsigned long *configs, unsigned num_configs)
{
int pin = st_gpio_pin(pin_id);
struct st_pinctrl *info = pinctrl_dev_get_drvdata(pctldev);
struct st_pio_control *pc = st_get_pio_control(pctldev, pin_id);
+ int i;
- st_pinconf_set_config(pc, pin, config);
- st_pinconf_set_retime(info, pc, pin, config);
+ for (i = 0; i < num_configs; i++) {
+ st_pinconf_set_config(pc, pin, configs[i]);
+ st_pinconf_set_retime(info, pc, pin, configs[i]);
+ } /* for each config */
return 0;
}
@@ -1222,11 +1225,9 @@ static int st_gpiolib_register_bank(struct st_pinctrl *info,
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
- bank->base = devm_request_and_ioremap(dev, &res);
- if (!bank->base) {
- dev_err(dev, "Can't get IO memory mapping!\n");
- return -ENODEV;
- }
+ bank->base = devm_ioremap_resource(dev, &res);
+ if (IS_ERR(bank->base))
+ return PTR_ERR(bank->base);
bank->gpio_chip = st_gpio_template;
bank->gpio_chip.base = bank_num * ST_GPIO_PINS_PER_BANK;
diff --git a/drivers/pinctrl/pinctrl-sunxi-pins.h b/drivers/pinctrl/pinctrl-sunxi-pins.h
index 2eeae0c066c..2c7446a1a19 100644
--- a/drivers/pinctrl/pinctrl-sunxi-pins.h
+++ b/drivers/pinctrl/pinctrl-sunxi-pins.h
@@ -806,7 +806,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x3, "pata"), /* ATAD13 */
SUNXI_FUNCTION(0x4, "keypad"), /* IN7 */
SUNXI_FUNCTION(0x5, "sim"), /* VCCEN */
- SUNXI_FUNCTION_IRQ(0x6, 17), /* EINT17 */
+ SUNXI_FUNCTION_IRQ(0x6, 17), /* EINT17 */
SUNXI_FUNCTION(0x7, "csi1")), /* D17 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -815,7 +815,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x3, "pata"), /* ATAD14 */
SUNXI_FUNCTION(0x4, "keypad"), /* OUT0 */
SUNXI_FUNCTION(0x5, "sim"), /* SCK */
- SUNXI_FUNCTION_IRQ(0x6, 18), /* EINT18 */
+ SUNXI_FUNCTION_IRQ(0x6, 18), /* EINT18 */
SUNXI_FUNCTION(0x7, "csi1")), /* D18 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -824,7 +824,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x3, "pata"), /* ATAD15 */
SUNXI_FUNCTION(0x4, "keypad"), /* OUT1 */
SUNXI_FUNCTION(0x5, "sim"), /* SDA */
- SUNXI_FUNCTION_IRQ(0x6, 19), /* EINT19 */
+ SUNXI_FUNCTION_IRQ(0x6, 19), /* EINT19 */
SUNXI_FUNCTION(0x7, "csi1")), /* D19 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -832,7 +832,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x2, "lcd1"), /* D20 */
SUNXI_FUNCTION(0x3, "pata"), /* ATAOE */
SUNXI_FUNCTION(0x4, "can"), /* TX */
- SUNXI_FUNCTION_IRQ(0x6, 20), /* EINT20 */
+ SUNXI_FUNCTION_IRQ(0x6, 20), /* EINT20 */
SUNXI_FUNCTION(0x7, "csi1")), /* D20 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -840,7 +840,7 @@ static const struct sunxi_desc_pin sun4i_a10_pins[] = {
SUNXI_FUNCTION(0x2, "lcd1"), /* D21 */
SUNXI_FUNCTION(0x3, "pata"), /* ATADREQ */
SUNXI_FUNCTION(0x4, "can"), /* RX */
- SUNXI_FUNCTION_IRQ(0x6, 21), /* EINT21 */
+ SUNXI_FUNCTION_IRQ(0x6, 21), /* EINT21 */
SUNXI_FUNCTION(0x7, "csi1")), /* D21 */
SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
SUNXI_FUNCTION(0x0, "gpio_in"),
@@ -2005,6 +2005,1834 @@ static const struct sunxi_desc_pin sun5i_a13_pins[] = {
SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
};
+static const struct sunxi_desc_pin sun6i_a31_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD0 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* DTR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD1 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D1 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* DSR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD2 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D2 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* DCD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD3 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RING */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD4 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D4 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD5 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D5 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD6 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D6 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXD7 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D7 */
+ SUNXI_FUNCTION(0x4, "uart1")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXCLK */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* D8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXEN */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D9 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* CMD */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* GTXCLK */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D10 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* CLK */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD0 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D11 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D0 */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD1 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D12 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D1 */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD2 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D13 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D2 */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD3 */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D14 */
+ SUNXI_FUNCTION(0x4, "mmc3"), /* D3 */
+ SUNXI_FUNCTION(0x5, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD4 */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* D15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD5 */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* D16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD6 */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* D17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXD7 */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* D18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXDV */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D19 */
+ SUNXI_FUNCTION(0x4, "pwm3")), /* Positive */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXCLK */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D20 */
+ SUNXI_FUNCTION(0x4, "pwm3")), /* Negative */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* TXERR */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D21 */
+ SUNXI_FUNCTION(0x4, "spi3")), /* CS0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* RXERR */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D22 */
+ SUNXI_FUNCTION(0x4, "spi3")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* COL */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* D23 */
+ SUNXI_FUNCTION(0x4, "spi3")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* CRS */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* CLK */
+ SUNXI_FUNCTION(0x4, "spi3")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* CLKIN */
+ SUNXI_FUNCTION(0x3, "lcd1"), /* DE */
+ SUNXI_FUNCTION(0x4, "spi3")), /* CS1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MDC */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "gmac"), /* MDIO */
+ SUNXI_FUNCTION(0x3, "lcd1")), /* VSYNC */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x4, "csi")), /* MCLK1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* BCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* LRCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* DO0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO1 */
+ SUNXI_FUNCTION(0x3, "uart3")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO2 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x4, "i2c3")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO3 */
+ SUNXI_FUNCTION(0x3, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x4, "i2c3")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2s0")), /* DI */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* WE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* ALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* CLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* RE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CMD */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* RB1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* CLK */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D0 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D1 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D2 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D3 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ4 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D4 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ5 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D5 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ6 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D6 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ7 */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* D7 */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ8 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ9 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ10 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ11 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ12 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ13 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ14 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQ15 */
+ SUNXI_FUNCTION(0x3, "nand1")), /* DQ7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* DQS */
+ SUNXI_FUNCTION(0x3, "mmc2"), /* RST */
+ SUNXI_FUNCTION(0x4, "mmc3")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* CE3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* D23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* DE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0")), /* VSYNC */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* PCLK */
+ SUNXI_FUNCTION(0x3, "ts")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "ts")), /* ERR */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "ts")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "ts")), /* DVLD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D0 */
+ SUNXI_FUNCTION(0x3, "uart5")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D1 */
+ SUNXI_FUNCTION(0x3, "uart5")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D2 */
+ SUNXI_FUNCTION(0x3, "uart5")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D3 */
+ SUNXI_FUNCTION(0x3, "uart5")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D4 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D5 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D6 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D7 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D8 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D9 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D10 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi"), /* D11 */
+ SUNXI_FUNCTION(0x3, "ts")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "csi")), /* MIPI CSI MCLK */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* MS1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc1")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2")), /* RTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart2")), /* CTS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SCK */
+ SUNXI_FUNCTION(0x3, "usb")), /* DP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c3"), /* SDA */
+ SUNXI_FUNCTION(0x3, "usb")), /* DM3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* BCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* LRCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* DIN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "i2s1")), /* DOUT */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart4")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* WE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* ALE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* CLE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* CE1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* CE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* RE */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* RB0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* RB1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* DQS */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "jtag"), /* MS0 */
+ SUNXI_FUNCTION(0x4, "pwm1")), /* Positive */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "jtag"), /* CK0 */
+ SUNXI_FUNCTION(0x4, "pwm1")), /* Negative */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DO0 */
+ SUNXI_FUNCTION(0x4, "pwm2")), /* Positive */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x3, "jtag"), /* DI0 */
+ SUNXI_FUNCTION(0x4, "pwm2")), /* Negative */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm0")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH28,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out")),
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH29,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* CE2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH30,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand1")), /* CE3 */
+};
+
+static const struct sunxi_desc_pin sun7i_a20_pins[] = {
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXD3 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x4, "uart2"), /* RTS */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXD3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXD2 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart2"), /* CTS */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXD2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXD1 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x4, "uart2"), /* TX */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXD0 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x4, "uart2"), /* RX */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXD3 */
+ SUNXI_FUNCTION(0x3, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GTXD3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXD2 */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS0 */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GTXD2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXD1 */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CLK */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GTXD1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXD0 */
+ SUNXI_FUNCTION(0x3, "spi3"), /* MOSI */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GTXD0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXCK */
+ SUNXI_FUNCTION(0x3, "spi3"), /* MISO */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXERR */
+ SUNXI_FUNCTION(0x3, "spi3"), /* CS1 */
+ SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ERXERR */
+ SUNXI_FUNCTION(0x6, "i2s1")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ERXDV */
+ SUNXI_FUNCTION(0x4, "uart1"), /* TX */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GRXCTL / ERXDV */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* EMDC */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RX */
+ SUNXI_FUNCTION(0x5, "gmac")), /* EMDC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* EMDIO */
+ SUNXI_FUNCTION(0x3, "uart6"), /* TX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RTS */
+ SUNXI_FUNCTION(0x5, "gmac")), /* EMDIO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXEN */
+ SUNXI_FUNCTION(0x3, "uart6"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* CTS */
+ SUNXI_FUNCTION(0x5, "gmac")), /* GTXCTL / ETXEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXCK */
+ SUNXI_FUNCTION(0x3, "uart7"), /* TX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DTR */
+ SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ETXCK */
+ SUNXI_FUNCTION(0x6, "i2s1")), /* BCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ECRS */
+ SUNXI_FUNCTION(0x3, "uart7"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DSR */
+ SUNXI_FUNCTION(0x5, "gmac"), /* GTXCK / ECRS */
+ SUNXI_FUNCTION(0x6, "i2s1")), /* LRCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ECOL */
+ SUNXI_FUNCTION(0x3, "can"), /* TX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* DCD */
+ SUNXI_FUNCTION(0x5, "gmac"), /* GCLKIN / ECOL */
+ SUNXI_FUNCTION(0x6, "i2s1")), /* DO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PA17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "emac"), /* ETXERR */
+ SUNXI_FUNCTION(0x3, "can"), /* RX */
+ SUNXI_FUNCTION(0x4, "uart1"), /* RING */
+ SUNXI_FUNCTION(0x5, "gmac"), /* GNULL / ETXERR */
+ SUNXI_FUNCTION(0x6, "i2s1")), /* LRCK */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c0")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm")), /* PWM0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir0"), /* TX */
+ SUNXI_FUNCTION(0x4, "spdif")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ir0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* MCLK */
+ SUNXI_FUNCTION(0x3, "ac97")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* BCLK */
+ SUNXI_FUNCTION(0x3, "ac97")), /* BCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* LRCK */
+ SUNXI_FUNCTION(0x3, "ac97")), /* SYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DO0 */
+ SUNXI_FUNCTION(0x3, "ac97")), /* DO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* DO2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0")), /* DO3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2s0"), /* DI */
+ SUNXI_FUNCTION(0x3, "ac97"), /* DI */
+ SUNXI_FUNCTION(0x4, "spdif")), /* DI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS1 */
+ SUNXI_FUNCTION(0x4, "spdif")), /* DO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "jtag")), /* MS0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* CLK */
+ SUNXI_FUNCTION(0x3, "jtag")), /* CK0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DO0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi2"), /* MISO */
+ SUNXI_FUNCTION(0x3, "jtag")), /* DI0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c1")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "i2c2")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* TX */
+ SUNXI_FUNCTION(0x3, "ir1")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PB23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "uart0"), /* RX */
+ SUNXI_FUNCTION(0x3, "ir1")), /* RX */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NWE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MOSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NALE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* MISO */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCLE */
+ SUNXI_FUNCTION(0x3, "spi0")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NRE# */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRB0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NRB1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ0 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ1 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ2 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NDQ3 */
+ SUNXI_FUNCTION(0x3, "mmc2")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NDQ4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NDQ5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NDQ6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NDQ7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NWP */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NCE3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE4 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CS0 */
+ SUNXI_FUNCTION_IRQ(0x6, 12)), /* EINT12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE5 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* CLK */
+ SUNXI_FUNCTION_IRQ(0x6, 13)), /* EINT13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE6 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MOSI */
+ SUNXI_FUNCTION_IRQ(0x6, 14)), /* EINT14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0"), /* NCE7 */
+ SUNXI_FUNCTION(0x3, "spi2"), /* MISO */
+ SUNXI_FUNCTION_IRQ(0x6, 15)), /* EINT15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "spi0")), /* CS0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PC24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "nand0")), /* NDQS */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D8 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D9 */
+ SUNXI_FUNCTION(0x3, "lvds0")), /* VM3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D10 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D11 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D12 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D13 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D14 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D15 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D16 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VPC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D17 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D18 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VP3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D19 */
+ SUNXI_FUNCTION(0x3, "lvds1")), /* VN3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D20 */
+ SUNXI_FUNCTION(0x3, "csi1")), /* MCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D21 */
+ SUNXI_FUNCTION(0x3, "sim")), /* VPPEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D22 */
+ SUNXI_FUNCTION(0x3, "sim")), /* VPPPP */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* D23 */
+ SUNXI_FUNCTION(0x3, "sim")), /* DET */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "sim")), /* VCCEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* DE */
+ SUNXI_FUNCTION(0x3, "sim")), /* RST */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "sim")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PD27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd0"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "sim")), /* SDA */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "csi0")), /* PCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* ERR */
+ SUNXI_FUNCTION(0x3, "csi0")), /* CK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "csi0")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* DVLD */
+ SUNXI_FUNCTION(0x3, "csi0")), /* VSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D0 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D1 */
+ SUNXI_FUNCTION(0x3, "csi0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "sim")), /* VPPEN */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D2 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D3 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D4 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D5 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D6 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PE11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts0"), /* D7 */
+ SUNXI_FUNCTION(0x3, "csi0")), /* D7 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D1 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* MSI */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D0 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DI1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CLK */
+ SUNXI_FUNCTION(0x4, "uart0")), /* TX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* CMD */
+ SUNXI_FUNCTION(0x4, "jtag")), /* DO1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart0")), /* RX */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PF5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc0"), /* D2 */
+ SUNXI_FUNCTION(0x4, "jtag")), /* CK1 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "csi1"), /* PCK */
+ SUNXI_FUNCTION(0x4, "mmc1")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* ERR */
+ SUNXI_FUNCTION(0x3, "csi1"), /* CK */
+ SUNXI_FUNCTION(0x4, "mmc1")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* SYNC */
+ SUNXI_FUNCTION(0x3, "csi1"), /* HSYNC */
+ SUNXI_FUNCTION(0x4, "mmc1")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* DVLD */
+ SUNXI_FUNCTION(0x3, "csi1"), /* VSYNC */
+ SUNXI_FUNCTION(0x4, "mmc1")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D0 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D0 */
+ SUNXI_FUNCTION(0x4, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D1 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D1 */
+ SUNXI_FUNCTION(0x4, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D2 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D2 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D3 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D4 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D4 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D5 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D5 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D6 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D6 */
+ SUNXI_FUNCTION(0x4, "uart4"), /* TX */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PG11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ts1"), /* D7 */
+ SUNXI_FUNCTION(0x3, "csi1"), /* D7 */
+ SUNXI_FUNCTION(0x4, "uart4"), /* RX */
+ SUNXI_FUNCTION(0x5, "csi0")), /* D15 */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D0 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 0), /* EINT0 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D1 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 1), /* EINT1 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D2 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* RTS */
+ SUNXI_FUNCTION_IRQ(0x6, 2), /* EINT2 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D3 */
+ SUNXI_FUNCTION(0x4, "uart3"), /* CTS */
+ SUNXI_FUNCTION_IRQ(0x6, 3), /* EINT3 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D4 */
+ SUNXI_FUNCTION(0x4, "uart4"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 4), /* EINT4 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D4 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D5 */
+ SUNXI_FUNCTION(0x4, "uart4"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 5), /* EINT5 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D5 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D6 */
+ SUNXI_FUNCTION(0x4, "uart5"), /* TX */
+ SUNXI_FUNCTION(0x5, "ms"), /* BS */
+ SUNXI_FUNCTION_IRQ(0x6, 6), /* EINT6 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D6 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D7 */
+ SUNXI_FUNCTION(0x4, "uart5"), /* RX */
+ SUNXI_FUNCTION(0x5, "ms"), /* CLK */
+ SUNXI_FUNCTION_IRQ(0x6, 7), /* EINT7 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D7 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D8 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXD3 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN0 */
+ SUNXI_FUNCTION(0x5, "ms"), /* D0 */
+ SUNXI_FUNCTION_IRQ(0x6, 8), /* EINT8 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D8 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D9 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXD2 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN1 */
+ SUNXI_FUNCTION(0x5, "ms"), /* D1 */
+ SUNXI_FUNCTION_IRQ(0x6, 9), /* EINT9 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D9 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D10 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXD1 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN2 */
+ SUNXI_FUNCTION(0x5, "ms"), /* D2 */
+ SUNXI_FUNCTION_IRQ(0x6, 10), /* EINT10 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D10 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D11 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXD0 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN3 */
+ SUNXI_FUNCTION(0x5, "ms"), /* D3 */
+ SUNXI_FUNCTION_IRQ(0x6, 11), /* EINT11 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D11 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D12 */
+ SUNXI_FUNCTION(0x4, "ps2"), /* SCK1 */
+ SUNXI_FUNCTION_IRQ(0x6, 12), /* EINT12 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D12 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D13 */
+ SUNXI_FUNCTION(0x4, "ps2"), /* SDA1 */
+ SUNXI_FUNCTION(0x5, "sim"), /* RST */
+ SUNXI_FUNCTION_IRQ(0x6, 13), /* EINT13 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D13 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D14 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXD3 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN4 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VPPEN */
+ SUNXI_FUNCTION_IRQ(0x6, 14), /* EINT14 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D14 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D15 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXD3 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN5 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VPPPP */
+ SUNXI_FUNCTION_IRQ(0x6, 15), /* EINT15 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D15 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXD2 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
+ SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D17 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXD1 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* IN7 */
+ SUNXI_FUNCTION(0x5, "sim"), /* VCCEN */
+ SUNXI_FUNCTION_IRQ(0x6, 17), /* EINT17 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D17 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D18 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXD0 */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT0 */
+ SUNXI_FUNCTION(0x5, "sim"), /* SCK */
+ SUNXI_FUNCTION_IRQ(0x6, 18), /* EINT18 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D18 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D19 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXERR */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT1 */
+ SUNXI_FUNCTION(0x5, "sim"), /* SDA */
+ SUNXI_FUNCTION_IRQ(0x6, 19), /* EINT19 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D19 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D20 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ERXDV */
+ SUNXI_FUNCTION(0x4, "can"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x6, 20), /* EINT20 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D20 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D21 */
+ SUNXI_FUNCTION(0x3, "emac"), /* EMDC */
+ SUNXI_FUNCTION(0x4, "can"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x6, 21), /* EINT21 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D21 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH22,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D22 */
+ SUNXI_FUNCTION(0x3, "emac"), /* EMDIO */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT2 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* CMD */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH23,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* D23 */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXEN */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT3 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* CLK */
+ SUNXI_FUNCTION(0x7, "csi1")), /* D23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH24,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXCK */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT4 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* D0 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* PCLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH25,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* DE */
+ SUNXI_FUNCTION(0x3, "emac"), /* ECRS */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT5 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* D1 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* FIELD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH26,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* HSYNC */
+ SUNXI_FUNCTION(0x3, "emac"), /* ECOL */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT6 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* D2 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* HSYNC */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PH27,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "lcd1"), /* VSYNC */
+ SUNXI_FUNCTION(0x3, "emac"), /* ETXERR */
+ SUNXI_FUNCTION(0x4, "keypad"), /* OUT7 */
+ SUNXI_FUNCTION(0x5, "mmc1"), /* D3 */
+ SUNXI_FUNCTION(0x7, "csi1")), /* VSYNC */
+ /* Hole */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI0,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2c3")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI1,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2c3")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI2,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x3, "i2c4")), /* SCK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI3,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "pwm"), /* PWM1 */
+ SUNXI_FUNCTION(0x3, "i2c4")), /* SDA */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI4,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* CMD */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI5,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* CLK */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI6,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* D0 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI7,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* D1 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI8,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* D2 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI9,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "mmc3")), /* D3 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI10,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "uart5"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x5, 22)), /* EINT22 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI11,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart5"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x5, 23)), /* EINT23 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI12,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "uart6"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x5, 24)), /* EINT24 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI13,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* MISO */
+ SUNXI_FUNCTION(0x3, "uart6"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x5, 25)), /* EINT25 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI14,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi0"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "ps2"), /* SCK1 */
+ SUNXI_FUNCTION(0x4, "timer4"), /* TCLKIN0 */
+ SUNXI_FUNCTION_IRQ(0x5, 26)), /* EINT26 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI15,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS1 */
+ SUNXI_FUNCTION(0x3, "ps2"), /* SDA1 */
+ SUNXI_FUNCTION(0x4, "timer5"), /* TCLKIN1 */
+ SUNXI_FUNCTION_IRQ(0x5, 27)), /* EINT27 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI16,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CS0 */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RTS */
+ SUNXI_FUNCTION_IRQ(0x5, 28)), /* EINT28 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI17,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* CLK */
+ SUNXI_FUNCTION(0x3, "uart2"), /* CTS */
+ SUNXI_FUNCTION_IRQ(0x5, 29)), /* EINT29 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI18,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MOSI */
+ SUNXI_FUNCTION(0x3, "uart2"), /* TX */
+ SUNXI_FUNCTION_IRQ(0x5, 30)), /* EINT30 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI19,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "spi1"), /* MISO */
+ SUNXI_FUNCTION(0x3, "uart2"), /* RX */
+ SUNXI_FUNCTION_IRQ(0x5, 31)), /* EINT31 */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI20,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ps2"), /* SCK0 */
+ SUNXI_FUNCTION(0x3, "uart7"), /* TX */
+ SUNXI_FUNCTION(0x4, "hdmi")), /* HSCL */
+ SUNXI_PIN(SUNXI_PINCTRL_PIN_PI21,
+ SUNXI_FUNCTION(0x0, "gpio_in"),
+ SUNXI_FUNCTION(0x1, "gpio_out"),
+ SUNXI_FUNCTION(0x2, "ps2"), /* SDA0 */
+ SUNXI_FUNCTION(0x3, "uart7"), /* RX */
+ SUNXI_FUNCTION(0x4, "hdmi")), /* HSDA */
+};
+
static const struct sunxi_pinctrl_desc sun4i_a10_pinctrl_data = {
.pins = sun4i_a10_pins,
.npins = ARRAY_SIZE(sun4i_a10_pins),
@@ -2020,4 +3848,14 @@ static const struct sunxi_pinctrl_desc sun5i_a13_pinctrl_data = {
.npins = ARRAY_SIZE(sun5i_a13_pins),
};
+static const struct sunxi_pinctrl_desc sun6i_a31_pinctrl_data = {
+ .pins = sun6i_a31_pins,
+ .npins = ARRAY_SIZE(sun6i_a31_pins),
+};
+
+static const struct sunxi_pinctrl_desc sun7i_a20_pinctrl_data = {
+ .pins = sun7i_a20_pins,
+ .npins = ARRAY_SIZE(sun7i_a20_pins),
+};
+
#endif /* __PINCTRL_SUNXI_PINS_H */
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index c47fd1e5450..119d2ddedfe 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -175,7 +175,7 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
}
*map = kmalloc(nmaps * sizeof(struct pinctrl_map), GFP_KERNEL);
- if (!map)
+ if (!*map)
return -ENOMEM;
of_property_for_each_string(node, "allwinner,pins", prop, group) {
@@ -274,50 +274,61 @@ static int sunxi_pconf_group_get(struct pinctrl_dev *pctldev,
static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
unsigned group,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
struct sunxi_pinctrl_group *g = &pctl->groups[group];
+ unsigned long flags;
u32 val, mask;
u16 strength;
u8 dlevel;
+ int i;
- switch (pinconf_to_config_param(config)) {
- case PIN_CONFIG_DRIVE_STRENGTH:
- strength = pinconf_to_config_argument(config);
- if (strength > 40)
- return -EINVAL;
- /*
- * We convert from mA to what the register expects:
- * 0: 10mA
- * 1: 20mA
- * 2: 30mA
- * 3: 40mA
- */
- dlevel = strength / 10 - 1;
- val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
- mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
- writel((val & ~mask) | dlevel << sunxi_dlevel_offset(g->pin),
- pctl->membase + sunxi_dlevel_reg(g->pin));
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- val = readl(pctl->membase + sunxi_pull_reg(g->pin));
- mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
- writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
- pctl->membase + sunxi_pull_reg(g->pin));
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- val = readl(pctl->membase + sunxi_pull_reg(g->pin));
- mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
- writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
- pctl->membase + sunxi_pull_reg(g->pin));
- break;
- default:
- break;
- }
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ for (i = 0; i < num_configs; i++) {
+ switch (pinconf_to_config_param(configs[i])) {
+ case PIN_CONFIG_DRIVE_STRENGTH:
+ strength = pinconf_to_config_argument(configs[i]);
+ if (strength > 40) {
+ spin_unlock_irqrestore(&pctl->lock, flags);
+ return -EINVAL;
+ }
+ /*
+ * We convert from mA to what the register expects:
+ * 0: 10mA
+ * 1: 20mA
+ * 2: 30mA
+ * 3: 40mA
+ */
+ dlevel = strength / 10 - 1;
+ val = readl(pctl->membase + sunxi_dlevel_reg(g->pin));
+ mask = DLEVEL_PINS_MASK << sunxi_dlevel_offset(g->pin);
+ writel((val & ~mask)
+ | dlevel << sunxi_dlevel_offset(g->pin),
+ pctl->membase + sunxi_dlevel_reg(g->pin));
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+ mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+ writel((val & ~mask) | 1 << sunxi_pull_offset(g->pin),
+ pctl->membase + sunxi_pull_reg(g->pin));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ val = readl(pctl->membase + sunxi_pull_reg(g->pin));
+ mask = PULL_PINS_MASK << sunxi_pull_offset(g->pin);
+ writel((val & ~mask) | 2 << sunxi_pull_offset(g->pin),
+ pctl->membase + sunxi_pull_reg(g->pin));
+ break;
+ default:
+ break;
+ }
+ /* cache the config value */
+ g->config = configs[i];
+ } /* for each config */
- /* cache the config value */
- g->config = config;
+ spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
}
@@ -360,11 +371,17 @@ static void sunxi_pmx_set(struct pinctrl_dev *pctldev,
u8 config)
{
struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+ unsigned long flags;
+ u32 val, mask;
- u32 val = readl(pctl->membase + sunxi_mux_reg(pin));
- u32 mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ val = readl(pctl->membase + sunxi_mux_reg(pin));
+ mask = MUX_PINS_MASK << sunxi_mux_offset(pin);
writel((val & ~mask) | config << sunxi_mux_offset(pin),
pctl->membase + sunxi_mux_reg(pin));
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pmx_enable(struct pinctrl_dev *pctldev,
@@ -464,8 +481,21 @@ static void sunxi_pinctrl_gpio_set(struct gpio_chip *chip,
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
u32 reg = sunxi_data_reg(offset);
u8 index = sunxi_data_offset(offset);
+ unsigned long flags;
+ u32 regval;
+
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
- writel((value & DATA_PINS_MASK) << index, pctl->membase + reg);
+ if (value)
+ regval |= BIT(index);
+ else
+ regval &= ~(BIT(index));
+
+ writel(regval, pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static int sunxi_pinctrl_gpio_of_xlate(struct gpio_chip *gc,
@@ -491,7 +521,7 @@ static int sunxi_pinctrl_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
struct sunxi_pinctrl *pctl = dev_get_drvdata(chip->dev);
struct sunxi_desc_function *desc;
- if (offset > chip->ngpio)
+ if (offset >= chip->ngpio)
return -ENXIO;
desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, offset, "irq");
@@ -526,6 +556,8 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_cfg_reg(d->hwirq);
u8 index = sunxi_irq_cfg_offset(d->hwirq);
+ unsigned long flags;
+ u32 regval;
u8 mode;
switch (type) {
@@ -548,7 +580,13 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
return -EINVAL;
}
- writel((mode & IRQ_CFG_IRQ_MASK) << index, pctl->membase + reg);
+ spin_lock_irqsave(&pctl->lock, flags);
+
+ regval = readl(pctl->membase + reg);
+ regval &= ~IRQ_CFG_IRQ_MASK;
+ writel(regval | (mode << index), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
return 0;
}
@@ -560,14 +598,19 @@ static void sunxi_pinctrl_irq_mask_ack(struct irq_data *d)
u8 ctrl_idx = sunxi_irq_ctrl_offset(d->hwirq);
u32 status_reg = sunxi_irq_status_reg(d->hwirq);
u8 status_idx = sunxi_irq_status_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + ctrl_reg);
writel(val & ~(1 << ctrl_idx), pctl->membase + ctrl_reg);
/* Clear the IRQ */
writel(1 << status_idx, pctl->membase + status_reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_mask(struct irq_data *d)
@@ -575,11 +618,16 @@ static void sunxi_pinctrl_irq_mask(struct irq_data *d)
struct sunxi_pinctrl *pctl = irq_data_get_irq_chip_data(d);
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Mask the IRQ */
val = readl(pctl->membase + reg);
writel(val & ~(1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
@@ -588,6 +636,7 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
struct sunxi_desc_function *func;
u32 reg = sunxi_irq_ctrl_reg(d->hwirq);
u8 idx = sunxi_irq_ctrl_offset(d->hwirq);
+ unsigned long flags;
u32 val;
func = sunxi_pinctrl_desc_find_function_by_pin(pctl,
@@ -597,9 +646,13 @@ static void sunxi_pinctrl_irq_unmask(struct irq_data *d)
/* Change muxing to INT mode */
sunxi_pmx_set(pctl->pctl_dev, pctl->irq_array[d->hwirq], func->muxval);
+ spin_lock_irqsave(&pctl->lock, flags);
+
/* Unmask the IRQ */
val = readl(pctl->membase + reg);
writel(val | (1 << idx), pctl->membase + reg);
+
+ spin_unlock_irqrestore(&pctl->lock, flags);
}
static struct irq_chip sunxi_pinctrl_irq_chip = {
@@ -631,6 +684,8 @@ static struct of_device_id sunxi_pinctrl_match[] = {
{ .compatible = "allwinner,sun4i-a10-pinctrl", .data = (void *)&sun4i_a10_pinctrl_data },
{ .compatible = "allwinner,sun5i-a10s-pinctrl", .data = (void *)&sun5i_a10s_pinctrl_data },
{ .compatible = "allwinner,sun5i-a13-pinctrl", .data = (void *)&sun5i_a13_pinctrl_data },
+ { .compatible = "allwinner,sun6i-a31-pinctrl", .data = (void *)&sun6i_a31_pinctrl_data },
+ { .compatible = "allwinner,sun7i-a20-pinctrl", .data = (void *)&sun7i_a20_pinctrl_data },
{}
};
MODULE_DEVICE_TABLE(of, sunxi_pinctrl_match);
@@ -752,6 +807,8 @@ static int sunxi_pinctrl_probe(struct platform_device *pdev)
return -ENOMEM;
platform_set_drvdata(pdev, pctl);
+ spin_lock_init(&pctl->lock);
+
pctl->membase = of_iomap(node, 0);
if (!pctl->membase)
return -ENOMEM;
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index d68047d8f69..01c494f8a14 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -14,6 +14,7 @@
#define __PINCTRL_SUNXI_H
#include <linux/kernel.h>
+#include <linux/spinlock.h>
#define PA_BASE 0
#define PB_BASE 32
@@ -407,6 +408,7 @@ struct sunxi_pinctrl {
unsigned ngroups;
int irq;
int irq_array[SUNXI_IRQ_NUMBER];
+ spinlock_t lock;
struct pinctrl_dev *pctl_dev;
};
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index 2fa9bc6cd7a..a2e93a2b5ff 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -32,6 +32,7 @@
#include "core.h"
#include "pinctrl-tegra.h"
+#include "pinctrl-utils.h"
struct tegra_pmx {
struct device *dev;
@@ -90,107 +91,6 @@ static void tegra_pinctrl_pin_dbg_show(struct pinctrl_dev *pctldev,
}
#endif
-static int reserve_map(struct device *dev, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
- unsigned reserve)
-{
- unsigned old_num = *reserved_maps;
- unsigned new_num = *num_maps + reserve;
- struct pinctrl_map *new_map;
-
- if (old_num >= new_num)
- return 0;
-
- new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
- if (!new_map) {
- dev_err(dev, "krealloc(map) failed\n");
- return -ENOMEM;
- }
-
- memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
-
- *map = new_map;
- *reserved_maps = new_num;
-
- return 0;
-}
-
-static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
- unsigned *num_maps, const char *group,
- const char *function)
-{
- if (WARN_ON(*num_maps == *reserved_maps))
- return -ENOSPC;
-
- (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
- (*map)[*num_maps].data.mux.group = group;
- (*map)[*num_maps].data.mux.function = function;
- (*num_maps)++;
-
- return 0;
-}
-
-static int add_map_configs(struct device *dev, struct pinctrl_map **map,
- unsigned *reserved_maps, unsigned *num_maps,
- const char *group, unsigned long *configs,
- unsigned num_configs)
-{
- unsigned long *dup_configs;
-
- if (WARN_ON(*num_maps == *reserved_maps))
- return -ENOSPC;
-
- dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
- GFP_KERNEL);
- if (!dup_configs) {
- dev_err(dev, "kmemdup(configs) failed\n");
- return -ENOMEM;
- }
-
- (*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
- (*map)[*num_maps].data.configs.group_or_pin = group;
- (*map)[*num_maps].data.configs.configs = dup_configs;
- (*map)[*num_maps].data.configs.num_configs = num_configs;
- (*num_maps)++;
-
- return 0;
-}
-
-static int add_config(struct device *dev, unsigned long **configs,
- unsigned *num_configs, unsigned long config)
-{
- unsigned old_num = *num_configs;
- unsigned new_num = old_num + 1;
- unsigned long *new_configs;
-
- new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
- GFP_KERNEL);
- if (!new_configs) {
- dev_err(dev, "krealloc(configs) failed\n");
- return -ENOMEM;
- }
-
- new_configs[old_num] = config;
-
- *configs = new_configs;
- *num_configs = new_num;
-
- return 0;
-}
-
-static void tegra_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
- struct pinctrl_map *map,
- unsigned num_maps)
-{
- int i;
-
- for (i = 0; i < num_maps; i++)
- if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
- kfree(map[i].data.configs.configs);
-
- kfree(map);
-}
-
static const struct cfg_param {
const char *property;
enum tegra_pinconf_param param;
@@ -212,12 +112,13 @@ static const struct cfg_param {
{"nvidia,drive-type", TEGRA_PINCONF_PARAM_DRIVE_TYPE},
};
-static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
+static int tegra_pinctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
struct device_node *np,
struct pinctrl_map **map,
unsigned *reserved_maps,
unsigned *num_maps)
{
+ struct device *dev = pctldev->dev;
int ret, i;
const char *function;
u32 val;
@@ -241,7 +142,8 @@ static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
ret = of_property_read_u32(np, cfg_params[i].property, &val);
if (!ret) {
config = TEGRA_PINCONF_PACK(cfg_params[i].param, val);
- ret = add_config(dev, &configs, &num_configs, config);
+ ret = pinctrl_utils_add_config(pctldev, &configs,
+ &num_configs, config);
if (ret < 0)
goto exit;
/* EINVAL=missing, which is fine since it's optional */
@@ -263,22 +165,25 @@ static int tegra_pinctrl_dt_subnode_to_map(struct device *dev,
}
reserve *= ret;
- ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
+ ret = pinctrl_utils_reserve_map(pctldev, map, reserved_maps,
+ num_maps, reserve);
if (ret < 0)
goto exit;
of_property_for_each_string(np, "nvidia,pins", prop, group) {
if (function) {
- ret = add_map_mux(map, reserved_maps, num_maps,
- group, function);
+ ret = pinctrl_utils_add_map_mux(pctldev, map,
+ reserved_maps, num_maps, group,
+ function);
if (ret < 0)
goto exit;
}
if (num_configs) {
- ret = add_map_configs(dev, map, reserved_maps,
- num_maps, group, configs,
- num_configs);
+ ret = pinctrl_utils_add_map_configs(pctldev, map,
+ reserved_maps, num_maps, group,
+ configs, num_configs,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
if (ret < 0)
goto exit;
}
@@ -305,10 +210,11 @@ static int tegra_pinctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
*num_maps = 0;
for_each_child_of_node(np_config, np) {
- ret = tegra_pinctrl_dt_subnode_to_map(pctldev->dev, np, map,
+ ret = tegra_pinctrl_dt_subnode_to_map(pctldev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
- tegra_pinctrl_dt_free_map(pctldev, *map, *num_maps);
+ pinctrl_utils_dt_free_map(pctldev, *map,
+ *num_maps);
return ret;
}
}
@@ -324,7 +230,7 @@ static const struct pinctrl_ops tegra_pinctrl_ops = {
.pin_dbg_show = tegra_pinctrl_pin_dbg_show,
#endif
.dt_node_to_map = tegra_pinctrl_dt_node_to_map,
- .dt_free_map = tegra_pinctrl_dt_free_map,
+ .dt_free_map = pinctrl_utils_dt_free_map,
};
static int tegra_pinctrl_get_funcs_count(struct pinctrl_dev *pctldev)
@@ -530,7 +436,8 @@ static int tegra_pinconf_get(struct pinctrl_dev *pctldev,
}
static int tegra_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned pin, unsigned long config)
+ unsigned pin, unsigned long *configs,
+ unsigned num_configs)
{
dev_err(pctldev->dev, "pin_config_set op not supported\n");
return -ENOTSUPP;
@@ -565,51 +472,57 @@ static int tegra_pinconf_group_get(struct pinctrl_dev *pctldev,
}
static int tegra_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned group, unsigned long config)
+ unsigned group, unsigned long *configs,
+ unsigned num_configs)
{
struct tegra_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- enum tegra_pinconf_param param = TEGRA_PINCONF_UNPACK_PARAM(config);
- u16 arg = TEGRA_PINCONF_UNPACK_ARG(config);
+ enum tegra_pinconf_param param;
+ u16 arg;
const struct tegra_pingroup *g;
- int ret;
+ int ret, i;
s8 bank, bit, width;
s16 reg;
u32 val, mask;
g = &pmx->soc->groups[group];
- ret = tegra_pinconf_reg(pmx, g, param, true, &bank, &reg, &bit,
- &width);
- if (ret < 0)
- return ret;
+ for (i = 0; i < num_configs; i++) {
+ param = TEGRA_PINCONF_UNPACK_PARAM(configs[i]);
+ arg = TEGRA_PINCONF_UNPACK_ARG(configs[i]);
- val = pmx_readl(pmx, bank, reg);
+ ret = tegra_pinconf_reg(pmx, g, param, true, &bank, &reg, &bit,
+ &width);
+ if (ret < 0)
+ return ret;
- /* LOCK can't be cleared */
- if (param == TEGRA_PINCONF_PARAM_LOCK) {
- if ((val & BIT(bit)) && !arg) {
- dev_err(pctldev->dev, "LOCK bit cannot be cleared\n");
- return -EINVAL;
+ val = pmx_readl(pmx, bank, reg);
+
+ /* LOCK can't be cleared */
+ if (param == TEGRA_PINCONF_PARAM_LOCK) {
+ if ((val & BIT(bit)) && !arg) {
+ dev_err(pctldev->dev, "LOCK bit cannot be cleared\n");
+ return -EINVAL;
+ }
}
- }
- /* Special-case Boolean values; allow any non-zero as true */
- if (width == 1)
- arg = !!arg;
+ /* Special-case Boolean values; allow any non-zero as true */
+ if (width == 1)
+ arg = !!arg;
- /* Range-check user-supplied value */
- mask = (1 << width) - 1;
- if (arg & ~mask) {
- dev_err(pctldev->dev,
- "config %lx: %x too big for %d bit register\n",
- config, arg, width);
- return -EINVAL;
- }
+ /* Range-check user-supplied value */
+ mask = (1 << width) - 1;
+ if (arg & ~mask) {
+ dev_err(pctldev->dev,
+ "config %lx: %x too big for %d bit register\n",
+ configs[i], arg, width);
+ return -EINVAL;
+ }
- /* Update register */
- val &= ~(mask << bit);
- val |= arg << bit;
- pmx_writel(pmx, val, bank, reg);
+ /* Update register */
+ val &= ~(mask << bit);
+ val |= arg << bit;
+ pmx_writel(pmx, val, bank, reg);
+ } /* for each config */
return 0;
}
@@ -737,25 +650,9 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
for (i = 0; i < pmx->nbanks; i++) {
res = platform_get_resource(pdev, IORESOURCE_MEM, i);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev,
- "Couldn't request MEM resource %d\n", i);
- return -ENODEV;
- }
-
- pmx->regs[i] = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!pmx->regs[i]) {
- dev_err(&pdev->dev, "Couldn't ioremap regs %d\n", i);
- return -ENODEV;
- }
+ pmx->regs[i] = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmx->regs[i]))
+ return PTR_ERR(pmx->regs[i]);
}
pmx->pctl = pinctrl_register(&tegra_pinctrl_desc, &pdev->dev, pmx);
diff --git a/drivers/pinctrl/pinctrl-tz1090-pdc.c b/drivers/pinctrl/pinctrl-tz1090-pdc.c
index d4f12cc556b..5bf01c28925 100644
--- a/drivers/pinctrl/pinctrl-tz1090-pdc.c
+++ b/drivers/pinctrl/pinctrl-tz1090-pdc.c
@@ -737,39 +737,46 @@ static int tz1090_pdc_pinconf_get(struct pinctrl_dev *pctldev,
}
static int tz1090_pdc_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned int pin, unsigned long config)
+ unsigned int pin, unsigned long *configs,
+ unsigned num_configs)
{
struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- enum pin_config_param param = pinconf_to_config_param(config);
- unsigned int arg = pinconf_to_config_argument(config);
+ enum pin_config_param param;
+ unsigned int arg;
int ret;
u32 reg, width, mask, shift, val, tmp;
unsigned long flags;
+ int i;
- dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
- __func__, tz1090_pdc_pins[pin].name, config);
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
- /* Get register information */
- ret = tz1090_pdc_pinconf_reg(pctldev, pin, param, true,
- &reg, &width, &mask, &shift, &val);
- if (ret < 0)
- return ret;
+ dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
+ __func__, tz1090_pdc_pins[pin].name, configs[i]);
- /* Unpack argument and range check it */
- if (arg > 1) {
- dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
- __func__, arg);
- return -EINVAL;
- }
+ /* Get register information */
+ ret = tz1090_pdc_pinconf_reg(pctldev, pin, param, true,
+ &reg, &width, &mask, &shift, &val);
+ if (ret < 0)
+ return ret;
- /* Write register field */
- __global_lock2(flags);
- tmp = pmx_read(pmx, reg);
- tmp &= ~mask;
- if (arg)
- tmp |= val << shift;
- pmx_write(pmx, tmp, reg);
- __global_unlock2(flags);
+ /* Unpack argument and range check it */
+ if (arg > 1) {
+ dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
+ __func__, arg);
+ return -EINVAL;
+ }
+
+ /* Write register field */
+ __global_lock2(flags);
+ tmp = pmx_read(pmx, reg);
+ tmp &= ~mask;
+ if (arg)
+ tmp |= val << shift;
+ pmx_write(pmx, tmp, reg);
+ __global_unlock2(flags);
+ } /* for each config */
return 0;
}
@@ -860,54 +867,68 @@ static int tz1090_pdc_pinconf_group_get(struct pinctrl_dev *pctldev,
static int tz1090_pdc_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned int group,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct tz1090_pdc_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tz1090_pdc_pingroup *g = &tz1090_pdc_groups[group];
- enum pin_config_param param = pinconf_to_config_param(config);
+ enum pin_config_param param;
const unsigned int *pit;
unsigned int i;
int ret, arg;
u32 reg, width, mask, shift, val;
unsigned long flags;
const int *map;
+ int j;
- dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
- __func__, g->name, config);
+ for (j = 0; j < num_configs; j++) {
+ param = pinconf_to_config_param(configs[j]);
- /* Get register information */
- ret = tz1090_pdc_pinconf_group_reg(pctldev, g, param, true,
- &reg, &width, &mask, &shift, &map);
- if (ret < 0) {
- /*
- * Maybe we're trying to set a per-pin configuration of a group,
- * so do the pins one by one. This is mainly as a convenience.
- */
- for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
- ret = tz1090_pdc_pinconf_set(pctldev, *pit, config);
- if (ret)
- return ret;
- }
- return 0;
- }
+ dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
+ __func__, g->name, configs[j]);
- /* Unpack argument and map it to register value */
- arg = pinconf_to_config_argument(config);
- for (i = 0; i < BIT(width); ++i) {
- if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
- /* Write register field */
- __global_lock2(flags);
- val = pmx_read(pmx, reg);
- val &= ~mask;
- val |= i << shift;
- pmx_write(pmx, val, reg);
- __global_unlock2(flags);
+ /* Get register information */
+ ret = tz1090_pdc_pinconf_group_reg(pctldev, g, param, true,
+ &reg, &width, &mask, &shift,
+ &map);
+ if (ret < 0) {
+ /*
+ * Maybe we're trying to set a per-pin configuration
+ * of a group, so do the pins one by one. This is
+ * mainly as a convenience.
+ */
+ for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
+ ret = tz1090_pdc_pinconf_set(pctldev, *pit,
+ configs, num_configs);
+ if (ret)
+ return ret;
+ }
return 0;
}
- }
- dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
- __func__, arg);
+ /* Unpack argument and map it to register value */
+ arg = pinconf_to_config_argument(configs[j]);
+ for (i = 0; i < BIT(width); ++i) {
+ if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
+ /* Write register field */
+ __global_lock2(flags);
+ val = pmx_read(pmx, reg);
+ val &= ~mask;
+ val |= i << shift;
+ pmx_write(pmx, val, reg);
+ __global_unlock2(flags);
+ goto next_config;
+ }
+ }
+
+ dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
+ __func__, arg);
+ return 0;
+
+next_config:
+ ;
+ } /* for each config */
+
return 0;
}
@@ -949,25 +970,9 @@ static int tz1090_pdc_pinctrl_probe(struct platform_device *pdev)
tz1090_pdc_pinctrl_desc.npins = ARRAY_SIZE(tz1090_pdc_pins);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev,
- "Couldn't request MEM resource\n");
- return -ENODEV;
- }
-
- pmx->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!pmx->regs) {
- dev_err(&pdev->dev, "Couldn't ioremap regs\n");
- return -ENODEV;
- }
+ pmx->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmx->regs))
+ return PTR_ERR(pmx->regs);
pmx->pctl = pinctrl_register(&tz1090_pdc_pinctrl_desc, &pdev->dev, pmx);
if (!pmx->pctl) {
diff --git a/drivers/pinctrl/pinctrl-tz1090.c b/drivers/pinctrl/pinctrl-tz1090.c
index 4edae08a0a6..bc9cd7a7602 100644
--- a/drivers/pinctrl/pinctrl-tz1090.c
+++ b/drivers/pinctrl/pinctrl-tz1090.c
@@ -1762,39 +1762,46 @@ static int tz1090_pinconf_get(struct pinctrl_dev *pctldev,
}
static int tz1090_pinconf_set(struct pinctrl_dev *pctldev,
- unsigned int pin, unsigned long config)
+ unsigned int pin, unsigned long *configs,
+ unsigned num_configs)
{
struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
- enum pin_config_param param = pinconf_to_config_param(config);
- unsigned int arg = pinconf_to_config_argument(config);
+ enum pin_config_param param;
+ unsigned int arg;
int ret;
u32 reg, width, mask, shift, val, tmp;
unsigned long flags;
+ int i;
- dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
- __func__, tz1090_pins[pin].name, config);
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
- /* Get register information */
- ret = tz1090_pinconf_reg(pctldev, pin, param, true,
- &reg, &width, &mask, &shift, &val);
- if (ret < 0)
- return ret;
+ dev_dbg(pctldev->dev, "%s(pin=%s, config=%#lx)\n",
+ __func__, tz1090_pins[pin].name, configs[i]);
- /* Unpack argument and range check it */
- if (arg > 1) {
- dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
- __func__, arg);
- return -EINVAL;
- }
+ /* Get register information */
+ ret = tz1090_pinconf_reg(pctldev, pin, param, true,
+ &reg, &width, &mask, &shift, &val);
+ if (ret < 0)
+ return ret;
- /* Write register field */
- __global_lock2(flags);
- tmp = pmx_read(pmx, reg);
- tmp &= ~mask;
- if (arg)
- tmp |= val << shift;
- pmx_write(pmx, tmp, reg);
- __global_unlock2(flags);
+ /* Unpack argument and range check it */
+ if (arg > 1) {
+ dev_dbg(pctldev->dev, "%s: arg %u out of range\n",
+ __func__, arg);
+ return -EINVAL;
+ }
+
+ /* Write register field */
+ __global_lock2(flags);
+ tmp = pmx_read(pmx, reg);
+ tmp &= ~mask;
+ if (arg)
+ tmp |= val << shift;
+ pmx_write(pmx, tmp, reg);
+ __global_unlock2(flags);
+ } /* for each config */
return 0;
}
@@ -1894,68 +1901,81 @@ static int tz1090_pinconf_group_get(struct pinctrl_dev *pctldev,
}
static int tz1090_pinconf_group_set(struct pinctrl_dev *pctldev,
- unsigned int group, unsigned long config)
+ unsigned int group, unsigned long *configs,
+ unsigned num_configs)
{
struct tz1090_pmx *pmx = pinctrl_dev_get_drvdata(pctldev);
const struct tz1090_pingroup *g;
- enum pin_config_param param = pinconf_to_config_param(config);
+ enum pin_config_param param;
unsigned int arg, pin, i;
const unsigned int *pit;
int ret;
u32 reg, width, mask, shift, val;
unsigned long flags;
const int *map;
+ int j;
if (group >= ARRAY_SIZE(tz1090_groups)) {
pin = group - ARRAY_SIZE(tz1090_groups);
- return tz1090_pinconf_set(pctldev, pin, config);
+ return tz1090_pinconf_set(pctldev, pin, configs, num_configs);
}
g = &tz1090_groups[group];
if (g->npins == 1) {
pin = g->pins[0];
- ret = tz1090_pinconf_set(pctldev, pin, config);
+ ret = tz1090_pinconf_set(pctldev, pin, configs, num_configs);
if (ret != -ENOTSUPP)
return ret;
}
- dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
- __func__, g->name, config);
+ for (j = 0; j < num_configs; j++) {
+ param = pinconf_to_config_param(configs[j]);
- /* Get register information */
- ret = tz1090_pinconf_group_reg(pctldev, g, param, true,
- &reg, &width, &mask, &shift, &map);
- if (ret < 0) {
- /*
- * Maybe we're trying to set a per-pin configuration of a group,
- * so do the pins one by one. This is mainly as a convenience.
- */
- for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
- ret = tz1090_pinconf_set(pctldev, *pit, config);
- if (ret)
- return ret;
- }
- return 0;
- }
+ dev_dbg(pctldev->dev, "%s(group=%s, config=%#lx)\n",
+ __func__, g->name, configs[j]);
- /* Unpack argument and map it to register value */
- arg = pinconf_to_config_argument(config);
- for (i = 0; i < BIT(width); ++i) {
- if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
- /* Write register field */
- __global_lock2(flags);
- val = pmx_read(pmx, reg);
- val &= ~mask;
- val |= i << shift;
- pmx_write(pmx, val, reg);
- __global_unlock2(flags);
+ /* Get register information */
+ ret = tz1090_pinconf_group_reg(pctldev, g, param, true, &reg,
+ &width, &mask, &shift, &map);
+ if (ret < 0) {
+ /*
+ * Maybe we're trying to set a per-pin configuration
+ * of a group, so do the pins one by one. This is
+ * mainly as a convenience.
+ */
+ for (i = 0, pit = g->pins; i < g->npins; ++i, ++pit) {
+ ret = tz1090_pinconf_set(pctldev, *pit, configs,
+ num_configs);
+ if (ret)
+ return ret;
+ }
return 0;
}
- }
- dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
- __func__, arg);
- return -EINVAL;
+ /* Unpack argument and map it to register value */
+ arg = pinconf_to_config_argument(configs[j]);
+ for (i = 0; i < BIT(width); ++i) {
+ if (map[i] == arg || (map[i] == -EINVAL && !arg)) {
+ /* Write register field */
+ __global_lock2(flags);
+ val = pmx_read(pmx, reg);
+ val &= ~mask;
+ val |= i << shift;
+ pmx_write(pmx, val, reg);
+ __global_unlock2(flags);
+ goto next_config;
+ }
+ }
+
+ dev_dbg(pctldev->dev, "%s: arg %u not supported\n",
+ __func__, arg);
+ return -EINVAL;
+
+next_config:
+ ;
+ } /* for each config */
+
+ return 0;
}
static struct pinconf_ops tz1090_pinconf_ops = {
@@ -1996,25 +2016,9 @@ static int tz1090_pinctrl_probe(struct platform_device *pdev)
tz1090_pinctrl_desc.npins = ARRAY_SIZE(tz1090_pins);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Missing MEM resource\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev,
- "Couldn't request MEM resource\n");
- return -ENODEV;
- }
-
- pmx->regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!pmx->regs) {
- dev_err(&pdev->dev, "Couldn't ioremap regs\n");
- return -ENODEV;
- }
+ pmx->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmx->regs))
+ return PTR_ERR(pmx->regs);
pmx->pctl = pinctrl_register(&tz1090_pinctrl_desc, &pdev->dev, pmx);
if (!pmx->pctl) {
diff --git a/drivers/pinctrl/pinctrl-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 46a152d1735..209a01b8bd3 100644
--- a/drivers/pinctrl/pinctrl-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -1027,21 +1027,23 @@ static int u300_pin_config_get(struct pinctrl_dev *pctldev, unsigned pin,
}
static int u300_pin_config_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
struct pinctrl_gpio_range *range =
pinctrl_find_gpio_range_from_pin(pctldev, pin);
- int ret;
+ int ret, i;
if (!range)
return -EINVAL;
- /* Note: none of these configurations take any argument */
- ret = u300_gpio_config_set(range->gc,
- (pin - range->pin_base + range->base),
- pinconf_to_config_param(config));
- if (ret)
- return ret;
+ for (i = 0; i < num_configs; i++) {
+ /* Note: none of these configurations take any argument */
+ ret = u300_gpio_config_set(range->gc,
+ (pin - range->pin_base + range->base),
+ pinconf_to_config_param(configs[i]));
+ if (ret)
+ return ret;
+ } /* for each config */
return 0;
}
@@ -1075,9 +1077,6 @@ static int u300_pmx_probe(struct platform_device *pdev)
upmx->dev = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENOENT;
-
upmx->virtbase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(upmx->virtbase))
return PTR_ERR(upmx->virtbase);
diff --git a/drivers/pinctrl/pinctrl-utils.c b/drivers/pinctrl/pinctrl-utils.c
new file mode 100644
index 00000000000..d77693f2cc1
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-utils.c
@@ -0,0 +1,142 @@
+/*
+ * Utils functions to implement the pincontrol driver.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#include <linux/device.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include "core.h"
+#include "pinctrl-utils.h"
+
+int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, unsigned reserve)
+{
+ unsigned old_num = *reserved_maps;
+ unsigned new_num = *num_maps + reserve;
+ struct pinctrl_map *new_map;
+
+ if (old_num >= new_num)
+ return 0;
+
+ new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
+ if (!new_map) {
+ dev_err(pctldev->dev, "krealloc(map) failed\n");
+ return -ENOMEM;
+ }
+
+ memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
+
+ *map = new_map;
+ *reserved_maps = new_num;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_utils_reserve_map);
+
+int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ const char *function)
+{
+ if (WARN_ON(*num_maps == *reserved_maps))
+ return -ENOSPC;
+
+ (*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+ (*map)[*num_maps].data.mux.group = group;
+ (*map)[*num_maps].data.mux.function = function;
+ (*num_maps)++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_mux);
+
+int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ unsigned long *configs, unsigned num_configs,
+ enum pinctrl_map_type type)
+{
+ unsigned long *dup_configs;
+
+ if (WARN_ON(*num_maps == *reserved_maps))
+ return -ENOSPC;
+
+ dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
+ GFP_KERNEL);
+ if (!dup_configs) {
+ dev_err(pctldev->dev, "kmemdup(configs) failed\n");
+ return -ENOMEM;
+ }
+
+ (*map)[*num_maps].type = type;
+ (*map)[*num_maps].data.configs.group_or_pin = group;
+ (*map)[*num_maps].data.configs.configs = dup_configs;
+ (*map)[*num_maps].data.configs.num_configs = num_configs;
+ (*num_maps)++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_utils_add_map_configs);
+
+int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
+ unsigned long **configs, unsigned *num_configs,
+ unsigned long config)
+{
+ unsigned old_num = *num_configs;
+ unsigned new_num = old_num + 1;
+ unsigned long *new_configs;
+
+ new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
+ GFP_KERNEL);
+ if (!new_configs) {
+ dev_err(pctldev->dev, "krealloc(configs) failed\n");
+ return -ENOMEM;
+ }
+
+ new_configs[old_num] = config;
+
+ *configs = new_configs;
+ *num_configs = new_num;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pinctrl_utils_add_config);
+
+void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps)
+{
+ int i;
+
+ for (i = 0; i < num_maps; i++) {
+ switch (map[i].type) {
+ case PIN_MAP_TYPE_CONFIGS_GROUP:
+ case PIN_MAP_TYPE_CONFIGS_PIN:
+ kfree(map[i].data.configs.configs);
+ break;
+ default:
+ break;
+ }
+ }
+ kfree(map);
+}
+EXPORT_SYMBOL_GPL(pinctrl_utils_dt_free_map);
diff --git a/drivers/pinctrl/pinctrl-utils.h b/drivers/pinctrl/pinctrl-utils.h
new file mode 100644
index 00000000000..d0ffe1ce200
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-utils.h
@@ -0,0 +1,43 @@
+/*
+ * Utils functions to implement the pincontrol driver.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ * 02111-1307, USA
+ */
+#ifndef __PINCTRL_UTILS_H__
+#define __PINCTRL_UTILS_H__
+
+int pinctrl_utils_reserve_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, unsigned reserve);
+int pinctrl_utils_add_map_mux(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ const char *function);
+int pinctrl_utils_add_map_configs(struct pinctrl_dev *pctldev,
+ struct pinctrl_map **map, unsigned *reserved_maps,
+ unsigned *num_maps, const char *group,
+ unsigned long *configs, unsigned num_configs,
+ enum pinctrl_map_type type);
+int pinctrl_utils_add_config(struct pinctrl_dev *pctldev,
+ unsigned long **configs, unsigned *num_configs,
+ unsigned long config);
+void pinctrl_utils_dt_free_map(struct pinctrl_dev *pctldev,
+ struct pinctrl_map *map, unsigned num_maps);
+
+#endif /* __PINCTRL_UTILS_H__ */
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index e92132c76a6..ed2d1ba69ce 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -102,6 +102,7 @@ enum xway_mux {
XWAY_MUX_EPHY,
XWAY_MUX_DFE,
XWAY_MUX_SDIO,
+ XWAY_MUX_GPHY,
XWAY_MUX_NONE = 0xffff,
};
@@ -109,12 +110,12 @@ static const struct ltq_mfp_pin xway_mfp[] = {
/* pin f0 f1 f2 f3 */
MFP_XWAY(GPIO0, GPIO, EXIN, NONE, TDM),
MFP_XWAY(GPIO1, GPIO, EXIN, NONE, NONE),
- MFP_XWAY(GPIO2, GPIO, CGU, EXIN, NONE),
+ MFP_XWAY(GPIO2, GPIO, CGU, EXIN, GPHY),
MFP_XWAY(GPIO3, GPIO, CGU, NONE, PCI),
MFP_XWAY(GPIO4, GPIO, STP, NONE, ASC),
- MFP_XWAY(GPIO5, GPIO, STP, NONE, NONE),
+ MFP_XWAY(GPIO5, GPIO, STP, NONE, GPHY),
MFP_XWAY(GPIO6, GPIO, STP, GPT, ASC),
- MFP_XWAY(GPIO7, GPIO, CGU, PCI, NONE),
+ MFP_XWAY(GPIO7, GPIO, CGU, PCI, GPHY),
MFP_XWAY(GPIO8, GPIO, CGU, NMI, NONE),
MFP_XWAY(GPIO9, GPIO, ASC, SPI, EXIN),
MFP_XWAY(GPIO10, GPIO, ASC, SPI, NONE),
@@ -151,10 +152,10 @@ static const struct ltq_mfp_pin xway_mfp[] = {
MFP_XWAY(GPIO41, GPIO, NONE, NONE, NONE),
MFP_XWAY(GPIO42, GPIO, MDIO, NONE, NONE),
MFP_XWAY(GPIO43, GPIO, MDIO, NONE, NONE),
- MFP_XWAY(GPIO44, GPIO, NONE, NONE, SIN),
- MFP_XWAY(GPIO45, GPIO, NONE, NONE, SIN),
+ MFP_XWAY(GPIO44, GPIO, NONE, GPHY, SIN),
+ MFP_XWAY(GPIO45, GPIO, NONE, GPHY, SIN),
MFP_XWAY(GPIO46, GPIO, NONE, NONE, EXIN),
- MFP_XWAY(GPIO47, GPIO, NONE, NONE, SIN),
+ MFP_XWAY(GPIO47, GPIO, NONE, GPHY, SIN),
MFP_XWAY(GPIO48, GPIO, EBU, NONE, NONE),
MFP_XWAY(GPIO49, GPIO, EBU, NONE, NONE),
MFP_XWAY(GPIO50, GPIO, NONE, NONE, NONE),
@@ -208,6 +209,13 @@ static const unsigned pins_stp[] = {GPIO4, GPIO5, GPIO6};
static const unsigned pins_nmi[] = {GPIO8};
static const unsigned pins_mdio[] = {GPIO42, GPIO43};
+static const unsigned pins_gphy0_led0[] = {GPIO5};
+static const unsigned pins_gphy0_led1[] = {GPIO7};
+static const unsigned pins_gphy0_led2[] = {GPIO2};
+static const unsigned pins_gphy1_led0[] = {GPIO44};
+static const unsigned pins_gphy1_led1[] = {GPIO45};
+static const unsigned pins_gphy1_led2[] = {GPIO47};
+
static const unsigned pins_ebu_a24[] = {GPIO13};
static const unsigned pins_ebu_clk[] = {GPIO21};
static const unsigned pins_ebu_cs1[] = {GPIO23};
@@ -322,6 +330,12 @@ static const struct ltq_pin_group xway_grps[] = {
GRP_MUX("gnt4", PCI, pins_pci_gnt4),
GRP_MUX("req4", PCI, pins_pci_gnt4),
GRP_MUX("mdio", MDIO, pins_mdio),
+ GRP_MUX("gphy0 led0", GPHY, pins_gphy0_led0),
+ GRP_MUX("gphy0 led1", GPHY, pins_gphy0_led1),
+ GRP_MUX("gphy0 lde2", GPHY, pins_gphy0_led2),
+ GRP_MUX("gphy1 led0", GPHY, pins_gphy1_led0),
+ GRP_MUX("gphy1 led1", GPHY, pins_gphy1_led1),
+ GRP_MUX("gphy1 lde2", GPHY, pins_gphy1_led2),
};
static const struct ltq_pin_group ase_grps[] = {
@@ -365,6 +379,9 @@ static const char * const xway_nmi_grps[] = {"nmi"};
/* ar9/vr9/gr9 */
static const char * const xrx_mdio_grps[] = {"mdio"};
+static const char * const xrx_gphy_grps[] = {"gphy0 led0", "gphy0 led1",
+ "gphy0 led2", "gphy1 led0",
+ "gphy1 led1", "gphy1 led2"};
static const char * const xrx_ebu_grps[] = {"ebu a23", "ebu a24",
"ebu a25", "ebu cs1",
"ebu wait", "ebu clk",
@@ -414,6 +431,7 @@ static const struct ltq_pmx_func xrx_funcs[] = {
{"pci", ARRAY_AND_SIZE(xrx_pci_grps)},
{"ebu", ARRAY_AND_SIZE(xrx_ebu_grps)},
{"mdio", ARRAY_AND_SIZE(xrx_mdio_grps)},
+ {"gphy", ARRAY_AND_SIZE(xrx_gphy_grps)},
};
static const struct ltq_pmx_func ase_funcs[] = {
@@ -481,74 +499,101 @@ static int xway_pinconf_get(struct pinctrl_dev *pctldev,
static int xway_pinconf_set(struct pinctrl_dev *pctldev,
unsigned pin,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
- enum ltq_pinconf_param param = LTQ_PINCONF_UNPACK_PARAM(config);
- int arg = LTQ_PINCONF_UNPACK_ARG(config);
+ enum ltq_pinconf_param param;
+ int arg;
int port = PORT(pin);
u32 reg;
+ int i;
+
+ for (i = 0; i < num_configs; i++) {
+ param = LTQ_PINCONF_UNPACK_PARAM(configs[i]);
+ arg = LTQ_PINCONF_UNPACK_ARG(configs[i]);
+
+ switch (param) {
+ case LTQ_PINCONF_PARAM_OPEN_DRAIN:
+ if (port == PORT3)
+ reg = GPIO3_OD;
+ else
+ reg = GPIO_OD(pin);
+ if (arg == 0)
+ gpio_setbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ else
+ gpio_clearbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ break;
- switch (param) {
- case LTQ_PINCONF_PARAM_OPEN_DRAIN:
- if (port == PORT3)
- reg = GPIO3_OD;
- else
- reg = GPIO_OD(pin);
- if (arg == 0)
+ case LTQ_PINCONF_PARAM_PULL:
+ if (port == PORT3)
+ reg = GPIO3_PUDEN;
+ else
+ reg = GPIO_PUDEN(pin);
+ if (arg == 0) {
+ gpio_clearbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ break;
+ }
gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
- else
- gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
- break;
- case LTQ_PINCONF_PARAM_PULL:
- if (port == PORT3)
- reg = GPIO3_PUDEN;
- else
- reg = GPIO_PUDEN(pin);
- if (arg == 0) {
- gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
+ if (port == PORT3)
+ reg = GPIO3_PUDSEL;
+ else
+ reg = GPIO_PUDSEL(pin);
+ if (arg == 1)
+ gpio_clearbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ else if (arg == 2)
+ gpio_setbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ else
+ dev_err(pctldev->dev,
+ "Invalid pull value %d\n", arg);
break;
- }
- gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
- if (port == PORT3)
- reg = GPIO3_PUDSEL;
- else
- reg = GPIO_PUDSEL(pin);
- if (arg == 1)
- gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
- else if (arg == 2)
- gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
- else
- dev_err(pctldev->dev, "Invalid pull value %d\n", arg);
- break;
+ case LTQ_PINCONF_PARAM_OUTPUT:
+ reg = GPIO_DIR(pin);
+ if (arg == 0)
+ gpio_clearbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ else
+ gpio_setbit(info->membase[0],
+ reg,
+ PORT_PIN(pin));
+ break;
- case LTQ_PINCONF_PARAM_OUTPUT:
- reg = GPIO_DIR(pin);
- if (arg == 0)
- gpio_clearbit(info->membase[0], reg, PORT_PIN(pin));
- else
- gpio_setbit(info->membase[0], reg, PORT_PIN(pin));
- break;
+ default:
+ dev_err(pctldev->dev,
+ "Invalid config param %04x\n", param);
+ return -ENOTSUPP;
+ }
+ } /* for each config */
- default:
- dev_err(pctldev->dev, "Invalid config param %04x\n", param);
- return -ENOTSUPP;
- }
return 0;
}
int xway_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned selector,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct ltq_pinmux_info *info = pinctrl_dev_get_drvdata(pctldev);
int i, ret = 0;
for (i = 0; i < info->grps[selector].npins && !ret; i++)
ret = xway_pinconf_set(pctldev,
- info->grps[selector].pins[i], config);
+ info->grps[selector].pins[i],
+ configs,
+ num_configs);
return ret;
}
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 88cc5095d0c..9d144a263dc 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -400,10 +400,14 @@ int pinmux_enable_setting(struct pinctrl_setting const *setting)
ret = pctlops->get_group_pins(pctldev, setting->data.mux.group,
&pins, &num_pins);
if (ret) {
+ const char *gname;
+
/* errors only affect debug data, so just warn */
+ gname = pctlops->get_group_name(pctldev,
+ setting->data.mux.group);
dev_warn(pctldev->dev,
- "could not get pins for group selector %d\n",
- setting->data.mux.group);
+ "could not get pins for group %s\n",
+ gname);
num_pins = 0;
}
@@ -411,9 +415,18 @@ int pinmux_enable_setting(struct pinctrl_setting const *setting)
for (i = 0; i < num_pins; i++) {
ret = pin_request(pctldev, pins[i], setting->dev_name, NULL);
if (ret) {
+ const char *gname;
+ const char *pname;
+
+ desc = pin_desc_get(pctldev, pins[i]);
+ pname = desc ? desc->name : "non-existing";
+ gname = pctlops->get_group_name(pctldev,
+ setting->data.mux.group);
dev_err(pctldev->dev,
- "could not request pin %d on device %s\n",
- pins[i], pinctrl_dev_get_name(pctldev));
+ "could not request pin %d (%s) from group %s "
+ " on device %s\n",
+ pins[i], pname, gname,
+ pinctrl_dev_get_name(pctldev));
goto err_pin_request;
}
}
@@ -466,10 +479,14 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting)
ret = pctlops->get_group_pins(pctldev, setting->data.mux.group,
&pins, &num_pins);
if (ret) {
+ const char *gname;
+
/* errors only affect debug data, so just warn */
+ gname = pctlops->get_group_name(pctldev,
+ setting->data.mux.group);
dev_warn(pctldev->dev,
- "could not get pins for group selector %d\n",
- setting->data.mux.group);
+ "could not get pins for group %s\n",
+ gname);
num_pins = 0;
}
@@ -482,12 +499,24 @@ void pinmux_disable_setting(struct pinctrl_setting const *setting)
pins[i]);
continue;
}
- desc->mux_setting = NULL;
- }
+ if (desc->mux_setting == &(setting->data.mux)) {
+ desc->mux_setting = NULL;
+ /* And release the pin */
+ pin_free(pctldev, pins[i], NULL);
+ } else {
+ const char *gname;
+ const char *pname;
- /* And release the pins */
- for (i = 0; i < num_pins; i++)
- pin_free(pctldev, pins[i], NULL);
+ pname = desc ? desc->name : "non-existing";
+ gname = pctlops->get_group_name(pctldev,
+ setting->data.mux.group);
+ dev_warn(pctldev->dev,
+ "not freeing pin %d (%s) as part of "
+ "deactivating group %s - it is already "
+ "used for some other setting",
+ pins[i], pname, gname);
+ }
+ }
if (ops->disable)
ops->disable(pctldev, setting->data.mux.func, setting->data.mux.group);
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index f3fc66b2437..738f14f65cf 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -82,24 +82,20 @@ int sh_pfc_get_pin_index(struct sh_pfc *pfc, unsigned int pin)
unsigned int offset;
unsigned int i;
- if (pfc->info->ranges == NULL)
- return pin;
-
- for (i = 0, offset = 0; i < pfc->info->nr_ranges; ++i) {
- const struct pinmux_range *range = &pfc->info->ranges[i];
+ for (i = 0, offset = 0; i < pfc->nr_ranges; ++i) {
+ const struct sh_pfc_pin_range *range = &pfc->ranges[i];
if (pin <= range->end)
- return pin >= range->begin
- ? offset + pin - range->begin : -1;
+ return pin >= range->start
+ ? offset + pin - range->start : -1;
- offset += range->end - range->begin + 1;
+ offset += range->end - range->start + 1;
}
return -EINVAL;
}
-static int sh_pfc_enum_in_range(pinmux_enum_t enum_id,
- const struct pinmux_range *r)
+static int sh_pfc_enum_in_range(u16 enum_id, const struct pinmux_range *r)
{
if (enum_id < r->begin)
return 0;
@@ -194,7 +190,7 @@ static void sh_pfc_write_config_reg(struct sh_pfc *pfc,
sh_pfc_write_raw_reg(mapped_reg, crp->reg_width, data);
}
-static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
+static int sh_pfc_get_config_reg(struct sh_pfc *pfc, u16 enum_id,
const struct pinmux_cfg_reg **crp, int *fieldp,
int *valuep)
{
@@ -238,10 +234,10 @@ static int sh_pfc_get_config_reg(struct sh_pfc *pfc, pinmux_enum_t enum_id,
return -EINVAL;
}
-static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, pinmux_enum_t mark, int pos,
- pinmux_enum_t *enum_idp)
+static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, u16 mark, int pos,
+ u16 *enum_idp)
{
- const pinmux_enum_t *data = pfc->info->gpio_data;
+ const u16 *data = pfc->info->gpio_data;
int k;
if (pos) {
@@ -264,7 +260,7 @@ static int sh_pfc_mark_to_enum(struct sh_pfc *pfc, pinmux_enum_t mark, int pos,
int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
{
const struct pinmux_cfg_reg *cr = NULL;
- pinmux_enum_t enum_id;
+ u16 enum_id;
const struct pinmux_range *range;
int in_range, pos, field, value;
int ret;
@@ -283,14 +279,6 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
range = &pfc->info->input;
break;
- case PINMUX_TYPE_INPUT_PULLUP:
- range = &pfc->info->input_pu;
- break;
-
- case PINMUX_TYPE_INPUT_PULLDOWN:
- range = &pfc->info->input_pd;
- break;
-
default:
return -EINVAL;
}
@@ -350,6 +338,67 @@ int sh_pfc_config_mux(struct sh_pfc *pfc, unsigned mark, int pinmux_type)
return 0;
}
+static int sh_pfc_init_ranges(struct sh_pfc *pfc)
+{
+ struct sh_pfc_pin_range *range;
+ unsigned int nr_ranges;
+ unsigned int i;
+
+ if (pfc->info->pins[0].pin == (u16)-1) {
+ /* Pin number -1 denotes that the SoC doesn't report pin numbers
+ * in its pin arrays yet. Consider the pin numbers range as
+ * continuous and allocate a single range.
+ */
+ pfc->nr_ranges = 1;
+ pfc->ranges = devm_kzalloc(pfc->dev, sizeof(*pfc->ranges),
+ GFP_KERNEL);
+ if (pfc->ranges == NULL)
+ return -ENOMEM;
+
+ pfc->ranges->start = 0;
+ pfc->ranges->end = pfc->info->nr_pins - 1;
+ pfc->nr_gpio_pins = pfc->info->nr_pins;
+
+ return 0;
+ }
+
+ /* Count, allocate and fill the ranges. The PFC SoC data pins array must
+ * be sorted by pin numbers, and pins without a GPIO port must come
+ * last.
+ */
+ for (i = 1, nr_ranges = 1; i < pfc->info->nr_pins; ++i) {
+ if (pfc->info->pins[i-1].pin != pfc->info->pins[i].pin - 1)
+ nr_ranges++;
+ }
+
+ pfc->nr_ranges = nr_ranges;
+ pfc->ranges = devm_kzalloc(pfc->dev, sizeof(*pfc->ranges) * nr_ranges,
+ GFP_KERNEL);
+ if (pfc->ranges == NULL)
+ return -ENOMEM;
+
+ range = pfc->ranges;
+ range->start = pfc->info->pins[0].pin;
+
+ for (i = 1; i < pfc->info->nr_pins; ++i) {
+ if (pfc->info->pins[i-1].pin == pfc->info->pins[i].pin - 1)
+ continue;
+
+ range->end = pfc->info->pins[i-1].pin;
+ if (!(pfc->info->pins[i-1].configs & SH_PFC_PIN_CFG_NO_GPIO))
+ pfc->nr_gpio_pins = range->end + 1;
+
+ range++;
+ range->start = pfc->info->pins[i].pin;
+ }
+
+ range->end = pfc->info->pins[i-1].pin;
+ if (!(pfc->info->pins[i-1].configs & SH_PFC_PIN_CFG_NO_GPIO))
+ pfc->nr_gpio_pins = range->end + 1;
+
+ return 0;
+}
+
#ifdef CONFIG_OF
static const struct of_device_id sh_pfc_of_table[] = {
#ifdef CONFIG_PINCTRL_PFC_R8A73A4
@@ -440,6 +489,10 @@ static int sh_pfc_probe(struct platform_device *pdev)
pinctrl_provide_dummies();
+ ret = sh_pfc_init_ranges(pfc);
+ if (ret < 0)
+ return ret;
+
/*
* Initialize pinctrl bindings first
*/
@@ -486,8 +539,6 @@ static int sh_pfc_remove(struct platform_device *pdev)
if (pfc->info->ops && pfc->info->ops->exit)
pfc->info->ops->exit(pfc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/pinctrl/sh-pfc/core.h b/drivers/pinctrl/sh-pfc/core.h
index f02ba1dde3a..a1b23762ac9 100644
--- a/drivers/pinctrl/sh-pfc/core.h
+++ b/drivers/pinctrl/sh-pfc/core.h
@@ -25,6 +25,11 @@ struct sh_pfc_window {
struct sh_pfc_chip;
struct sh_pfc_pinctrl;
+struct sh_pfc_pin_range {
+ u16 start;
+ u16 end;
+};
+
struct sh_pfc {
struct device *dev;
const struct sh_pfc_soc_info *info;
@@ -34,7 +39,10 @@ struct sh_pfc {
unsigned int num_windows;
struct sh_pfc_window *window;
- unsigned int nr_pins;
+ struct sh_pfc_pin_range *ranges;
+ unsigned int nr_ranges;
+
+ unsigned int nr_gpio_pins;
struct sh_pfc_chip *gpio;
struct sh_pfc_chip *func;
diff --git a/drivers/pinctrl/sh-pfc/gpio.c b/drivers/pinctrl/sh-pfc/gpio.c
index d37efa7dcf9..04bf52b64fb 100644
--- a/drivers/pinctrl/sh-pfc/gpio.c
+++ b/drivers/pinctrl/sh-pfc/gpio.c
@@ -48,11 +48,11 @@ static struct sh_pfc *gpio_to_pfc(struct gpio_chip *gc)
return gpio_to_pfc_chip(gc)->pfc;
}
-static void gpio_get_data_reg(struct sh_pfc_chip *chip, unsigned int gpio,
+static void gpio_get_data_reg(struct sh_pfc_chip *chip, unsigned int offset,
struct sh_pfc_gpio_data_reg **reg,
unsigned int *bit)
{
- int idx = sh_pfc_get_pin_index(chip->pfc, gpio);
+ int idx = sh_pfc_get_pin_index(chip->pfc, offset);
struct sh_pfc_gpio_pin *gpio_pin = &chip->pins[idx];
*reg = &chip->regs[gpio_pin->dreg];
@@ -76,11 +76,11 @@ static void gpio_write_data_reg(struct sh_pfc_chip *chip,
sh_pfc_write_raw_reg(mem, dreg->reg_width, value);
}
-static void gpio_setup_data_reg(struct sh_pfc_chip *chip, unsigned gpio)
+static void gpio_setup_data_reg(struct sh_pfc_chip *chip, unsigned idx)
{
struct sh_pfc *pfc = chip->pfc;
- struct sh_pfc_gpio_pin *gpio_pin = &chip->pins[gpio];
- const struct sh_pfc_pin *pin = &pfc->info->pins[gpio];
+ struct sh_pfc_gpio_pin *gpio_pin = &chip->pins[idx];
+ const struct sh_pfc_pin *pin = &pfc->info->pins[idx];
const struct pinmux_data_reg *dreg;
unsigned int bit;
unsigned int i;
@@ -224,8 +224,8 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
struct gpio_chip *gc = &chip->gpio_chip;
int ret;
- chip->pins = devm_kzalloc(pfc->dev, pfc->nr_pins * sizeof(*chip->pins),
- GFP_KERNEL);
+ chip->pins = devm_kzalloc(pfc->dev, pfc->info->nr_pins *
+ sizeof(*chip->pins), GFP_KERNEL);
if (chip->pins == NULL)
return -ENOMEM;
@@ -245,7 +245,7 @@ static int gpio_pin_setup(struct sh_pfc_chip *chip)
gc->dev = pfc->dev;
gc->owner = THIS_MODULE;
gc->base = 0;
- gc->ngpio = pfc->nr_pins;
+ gc->ngpio = pfc->nr_gpio_pins;
return 0;
}
@@ -293,7 +293,7 @@ static int gpio_function_setup(struct sh_pfc_chip *chip)
gc->label = pfc->info->name;
gc->owner = THIS_MODULE;
- gc->base = pfc->nr_pins;
+ gc->base = pfc->nr_gpio_pins;
gc->ngpio = pfc->info->nr_func_gpios;
return 0;
@@ -334,10 +334,7 @@ sh_pfc_add_gpiochip(struct sh_pfc *pfc, int(*setup)(struct sh_pfc_chip *),
int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
{
- const struct pinmux_range *ranges;
- struct pinmux_range def_range;
struct sh_pfc_chip *chip;
- unsigned int nr_ranges;
unsigned int i;
int ret;
@@ -367,24 +364,20 @@ int sh_pfc_register_gpiochip(struct sh_pfc *pfc)
pfc->gpio = chip;
- /* Register the GPIO to pin mappings. */
- if (pfc->info->ranges == NULL) {
- def_range.begin = 0;
- def_range.end = pfc->info->nr_pins - 1;
- ranges = &def_range;
- nr_ranges = 1;
- } else {
- ranges = pfc->info->ranges;
- nr_ranges = pfc->info->nr_ranges;
- }
+ /* Register the GPIO to pin mappings. As pins with GPIO ports must come
+ * first in the ranges, skip the pins without GPIO ports by stopping at
+ * the first range that contains such a pin.
+ */
+ for (i = 0; i < pfc->nr_ranges; ++i) {
+ const struct sh_pfc_pin_range *range = &pfc->ranges[i];
- for (i = 0; i < nr_ranges; ++i) {
- const struct pinmux_range *range = &ranges[i];
+ if (range->start >= pfc->nr_gpio_pins)
+ break;
ret = gpiochip_add_pin_range(&chip->gpio_chip,
dev_name(pfc->dev),
- range->begin, range->begin,
- range->end - range->begin + 1);
+ range->start, range->start,
+ range->end - range->start + 1);
if (ret < 0)
return ret;
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
index 82bf6aba007..d25fd4ea0a1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a73a4.c
@@ -21,85 +21,84 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
#include <mach/irqs.h>
-#include <mach/r8a73a4.h>
#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
/* Port0 - Port30 */ \
- PORT_10(fn, pfx, sfx), \
- PORT_10(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##2, sfx), \
- PORT_1(fn, pfx##30, sfx), \
+ PORT_10(0, fn, pfx, sfx), \
+ PORT_10(10, fn, pfx##1, sfx), \
+ PORT_10(20, fn, pfx##2, sfx), \
+ PORT_1(30, fn, pfx##30, sfx), \
/* Port32 - Port40 */ \
- PORT_1(fn, pfx##32, sfx), PORT_1(fn, pfx##33, sfx), \
- PORT_1(fn, pfx##34, sfx), PORT_1(fn, pfx##35, sfx), \
- PORT_1(fn, pfx##36, sfx), PORT_1(fn, pfx##37, sfx), \
- PORT_1(fn, pfx##38, sfx), PORT_1(fn, pfx##39, sfx), \
- PORT_1(fn, pfx##40, sfx), \
+ PORT_1(32, fn, pfx##32, sfx), PORT_1(33, fn, pfx##33, sfx), \
+ PORT_1(34, fn, pfx##34, sfx), PORT_1(35, fn, pfx##35, sfx), \
+ PORT_1(36, fn, pfx##36, sfx), PORT_1(37, fn, pfx##37, sfx), \
+ PORT_1(38, fn, pfx##38, sfx), PORT_1(39, fn, pfx##39, sfx), \
+ PORT_1(40, fn, pfx##40, sfx), \
/* Port64 - Port85 */ \
- PORT_1(fn, pfx##64, sfx), PORT_1(fn, pfx##65, sfx), \
- PORT_1(fn, pfx##66, sfx), PORT_1(fn, pfx##67, sfx), \
- PORT_1(fn, pfx##68, sfx), PORT_1(fn, pfx##69, sfx), \
- PORT_10(fn, pfx##7, sfx), \
- PORT_1(fn, pfx##80, sfx), PORT_1(fn, pfx##81, sfx), \
- PORT_1(fn, pfx##82, sfx), PORT_1(fn, pfx##83, sfx), \
- PORT_1(fn, pfx##84, sfx), PORT_1(fn, pfx##85, sfx), \
+ PORT_1(64, fn, pfx##64, sfx), PORT_1(65, fn, pfx##65, sfx), \
+ PORT_1(66, fn, pfx##66, sfx), PORT_1(67, fn, pfx##67, sfx), \
+ PORT_1(68, fn, pfx##68, sfx), PORT_1(69, fn, pfx##69, sfx), \
+ PORT_10(70, fn, pfx##7, sfx), \
+ PORT_1(80, fn, pfx##80, sfx), PORT_1(81, fn, pfx##81, sfx), \
+ PORT_1(82, fn, pfx##82, sfx), PORT_1(83, fn, pfx##83, sfx), \
+ PORT_1(84, fn, pfx##84, sfx), PORT_1(85, fn, pfx##85, sfx), \
/* Port96 - Port126 */ \
- PORT_1(fn, pfx##96, sfx), PORT_1(fn, pfx##97, sfx), \
- PORT_1(fn, pfx##98, sfx), PORT_1(fn, pfx##99, sfx), \
- PORT_10(fn, pfx##10, sfx), \
- PORT_10(fn, pfx##11, sfx), \
- PORT_1(fn, pfx##120, sfx), PORT_1(fn, pfx##121, sfx), \
- PORT_1(fn, pfx##122, sfx), PORT_1(fn, pfx##123, sfx), \
- PORT_1(fn, pfx##124, sfx), PORT_1(fn, pfx##125, sfx), \
- PORT_1(fn, pfx##126, sfx), \
+ PORT_1(96, fn, pfx##96, sfx), PORT_1(97, fn, pfx##97, sfx), \
+ PORT_1(98, fn, pfx##98, sfx), PORT_1(99, fn, pfx##99, sfx), \
+ PORT_10(100, fn, pfx##10, sfx), \
+ PORT_10(110, fn, pfx##11, sfx), \
+ PORT_1(120, fn, pfx##120, sfx), PORT_1(121, fn, pfx##121, sfx), \
+ PORT_1(122, fn, pfx##122, sfx), PORT_1(123, fn, pfx##123, sfx), \
+ PORT_1(124, fn, pfx##124, sfx), PORT_1(125, fn, pfx##125, sfx), \
+ PORT_1(126, fn, pfx##126, sfx), \
/* Port128 - Port134 */ \
- PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
- PORT_1(fn, pfx##130, sfx), PORT_1(fn, pfx##131, sfx), \
- PORT_1(fn, pfx##132, sfx), PORT_1(fn, pfx##133, sfx), \
- PORT_1(fn, pfx##134, sfx), \
+ PORT_1(128, fn, pfx##128, sfx), PORT_1(129, fn, pfx##129, sfx), \
+ PORT_1(130, fn, pfx##130, sfx), PORT_1(131, fn, pfx##131, sfx), \
+ PORT_1(132, fn, pfx##132, sfx), PORT_1(133, fn, pfx##133, sfx), \
+ PORT_1(134, fn, pfx##134, sfx), \
/* Port160 - Port178 */ \
- PORT_10(fn, pfx##16, sfx), \
- PORT_1(fn, pfx##170, sfx), PORT_1(fn, pfx##171, sfx), \
- PORT_1(fn, pfx##172, sfx), PORT_1(fn, pfx##173, sfx), \
- PORT_1(fn, pfx##174, sfx), PORT_1(fn, pfx##175, sfx), \
- PORT_1(fn, pfx##176, sfx), PORT_1(fn, pfx##177, sfx), \
- PORT_1(fn, pfx##178, sfx), \
+ PORT_10(160, fn, pfx##16, sfx), \
+ PORT_1(170, fn, pfx##170, sfx), PORT_1(171, fn, pfx##171, sfx), \
+ PORT_1(172, fn, pfx##172, sfx), PORT_1(173, fn, pfx##173, sfx), \
+ PORT_1(174, fn, pfx##174, sfx), PORT_1(175, fn, pfx##175, sfx), \
+ PORT_1(176, fn, pfx##176, sfx), PORT_1(177, fn, pfx##177, sfx), \
+ PORT_1(178, fn, pfx##178, sfx), \
/* Port192 - Port222 */ \
- PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
- PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
- PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
- PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
- PORT_10(fn, pfx##20, sfx), \
- PORT_10(fn, pfx##21, sfx), \
- PORT_1(fn, pfx##220, sfx), PORT_1(fn, pfx##221, sfx), \
- PORT_1(fn, pfx##222, sfx), \
+ PORT_1(192, fn, pfx##192, sfx), PORT_1(193, fn, pfx##193, sfx), \
+ PORT_1(194, fn, pfx##194, sfx), PORT_1(195, fn, pfx##195, sfx), \
+ PORT_1(196, fn, pfx##196, sfx), PORT_1(197, fn, pfx##197, sfx), \
+ PORT_1(198, fn, pfx##198, sfx), PORT_1(199, fn, pfx##199, sfx), \
+ PORT_10(200, fn, pfx##20, sfx), \
+ PORT_10(210, fn, pfx##21, sfx), \
+ PORT_1(220, fn, pfx##220, sfx), PORT_1(221, fn, pfx##221, sfx), \
+ PORT_1(222, fn, pfx##222, sfx), \
/* Port224 - Port250 */ \
- PORT_1(fn, pfx##224, sfx), PORT_1(fn, pfx##225, sfx), \
- PORT_1(fn, pfx##226, sfx), PORT_1(fn, pfx##227, sfx), \
- PORT_1(fn, pfx##228, sfx), PORT_1(fn, pfx##229, sfx), \
- PORT_10(fn, pfx##23, sfx), \
- PORT_10(fn, pfx##24, sfx), \
- PORT_1(fn, pfx##250, sfx), \
+ PORT_1(224, fn, pfx##224, sfx), PORT_1(225, fn, pfx##225, sfx), \
+ PORT_1(226, fn, pfx##226, sfx), PORT_1(227, fn, pfx##227, sfx), \
+ PORT_1(228, fn, pfx##228, sfx), PORT_1(229, fn, pfx##229, sfx), \
+ PORT_10(230, fn, pfx##23, sfx), \
+ PORT_10(240, fn, pfx##24, sfx), \
+ PORT_1(250, fn, pfx##250, sfx), \
/* Port256 - Port283 */ \
- PORT_1(fn, pfx##256, sfx), PORT_1(fn, pfx##257, sfx), \
- PORT_1(fn, pfx##258, sfx), PORT_1(fn, pfx##259, sfx), \
- PORT_10(fn, pfx##26, sfx), \
- PORT_10(fn, pfx##27, sfx), \
- PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
- PORT_1(fn, pfx##282, sfx), PORT_1(fn, pfx##283, sfx), \
+ PORT_1(256, fn, pfx##256, sfx), PORT_1(257, fn, pfx##257, sfx), \
+ PORT_1(258, fn, pfx##258, sfx), PORT_1(259, fn, pfx##259, sfx), \
+ PORT_10(260, fn, pfx##26, sfx), \
+ PORT_10(270, fn, pfx##27, sfx), \
+ PORT_1(280, fn, pfx##280, sfx), PORT_1(281, fn, pfx##281, sfx), \
+ PORT_1(282, fn, pfx##282, sfx), PORT_1(283, fn, pfx##283, sfx), \
/* Port288 - Port308 */ \
- PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
- PORT_10(fn, pfx##29, sfx), \
- PORT_1(fn, pfx##300, sfx), PORT_1(fn, pfx##301, sfx), \
- PORT_1(fn, pfx##302, sfx), PORT_1(fn, pfx##303, sfx), \
- PORT_1(fn, pfx##304, sfx), PORT_1(fn, pfx##305, sfx), \
- PORT_1(fn, pfx##306, sfx), PORT_1(fn, pfx##307, sfx), \
- PORT_1(fn, pfx##308, sfx), \
+ PORT_1(288, fn, pfx##288, sfx), PORT_1(289, fn, pfx##289, sfx), \
+ PORT_10(290, fn, pfx##29, sfx), \
+ PORT_1(300, fn, pfx##300, sfx), PORT_1(301, fn, pfx##301, sfx), \
+ PORT_1(302, fn, pfx##302, sfx), PORT_1(303, fn, pfx##303, sfx), \
+ PORT_1(304, fn, pfx##304, sfx), PORT_1(305, fn, pfx##305, sfx), \
+ PORT_1(306, fn, pfx##306, sfx), PORT_1(307, fn, pfx##307, sfx), \
+ PORT_1(308, fn, pfx##308, sfx), \
/* Port320 - Port329 */ \
- PORT_10(fn, pfx##32, sfx)
+ PORT_10(320, fn, pfx##32, sfx)
enum {
@@ -428,10 +427,7 @@ enum {
PINMUX_MARK_END,
};
-#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
-#define PINMUX_DATA_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
-
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* specify valid pin states for each pin in GPIO mode */
PINMUX_DATA_ALL(),
@@ -1269,19 +1265,12 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(IRQ57_MARK, PORT329_FN0),
};
-#define R8A73A4_PIN(pin, cfgs) \
- { \
- .name = __stringify(PORT##pin), \
- .enum_id = PORT##pin##_DATA, \
- .configs = cfgs, \
- }
-
#define __O (SH_PFC_PIN_CFG_OUTPUT)
#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
-#define R8A73A4_PIN_IO_PU_PD(pin) R8A73A4_PIN(pin, __IO | __PUD)
-#define R8A73A4_PIN_O(pin) R8A73A4_PIN(pin, __O)
+#define R8A73A4_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
+#define R8A73A4_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
static struct sh_pfc_pin pinmux_pins[] = {
R8A73A4_PIN_IO_PU_PD(0), R8A73A4_PIN_IO_PU_PD(1),
@@ -1408,20 +1397,6 @@ static struct sh_pfc_pin pinmux_pins[] = {
R8A73A4_PIN_IO_PU_PD(328), R8A73A4_PIN_IO_PU_PD(329),
};
-static const struct pinmux_range pinmux_ranges[] = {
- {.begin = 0, .end = 30,},
- {.begin = 32, .end = 40,},
- {.begin = 64, .end = 85,},
- {.begin = 96, .end = 126,},
- {.begin = 128, .end = 134,},
- {.begin = 160, .end = 178,},
- {.begin = 192, .end = 222,},
- {.begin = 224, .end = 250,},
- {.begin = 256, .end = 283,},
- {.begin = 288, .end = 308,},
- {.begin = 320, .end = 329,},
-};
-
/* - IRQC ------------------------------------------------------------------- */
#define IRQC_PINS_MUX(pin, irq_mark) \
static const unsigned int irqc_irq##irq_mark##_pins[] = { \
@@ -2766,9 +2741,6 @@ const struct sh_pfc_soc_info r8a73a4_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .ranges = pinmux_ranges,
- .nr_ranges = ARRAY_SIZE(pinmux_ranges),
-
.groups = pinmux_groups,
.nr_groups = ARRAY_SIZE(pinmux_groups),
.functions = pinmux_functions,
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
index f6ea47c433b..009174d0776 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7740.c
@@ -22,24 +22,16 @@
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-#include <mach/r8a7740.h>
#include <mach/irqs.h>
#include "core.h"
#include "sh_pfc.h"
#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
- PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##20, sfx), \
- PORT_1(fn, pfx##210, sfx), PORT_1(fn, pfx##211, sfx)
-
-#undef _GPIO_PORT
-#define _GPIO_PORT(gpio, sfx) \
- [gpio] = { \
- .name = __stringify(PORT##gpio), \
- .enum_id = PORT##gpio##_DATA, \
- }
+ PORT_10(0, fn, pfx, sfx), PORT_90(0, fn, pfx, sfx), \
+ PORT_10(100, fn, pfx##10, sfx), PORT_90(100, fn, pfx##1, sfx), \
+ PORT_10(200, fn, pfx##20, sfx), \
+ PORT_1(210, fn, pfx##210, sfx), PORT_1(211, fn, pfx##211, sfx)
#define IRQC_PIN_MUX(irq, pin) \
static const unsigned int intc_irq##irq##_pins[] = { \
@@ -590,11 +582,8 @@ enum {
PINMUX_MARK_END,
};
-#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
-
-static const pinmux_enum_t pinmux_data[] = {
- PINMUX_DATA_GP_ALL(),
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_ALL(),
/* Port0 */
PINMUX_DATA(DBGMDT2_MARK, PORT0_FN1),
@@ -1537,13 +1526,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0),
};
-#define R8A7740_PIN(pin, cfgs) \
- { \
- .name = __stringify(PORT##pin), \
- .enum_id = PORT##pin##_DATA, \
- .configs = cfgs, \
- }
-
#define __I (SH_PFC_PIN_CFG_INPUT)
#define __O (SH_PFC_PIN_CFG_OUTPUT)
#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
@@ -1551,15 +1533,15 @@ static const pinmux_enum_t pinmux_data[] = {
#define __PU (SH_PFC_PIN_CFG_PULL_UP)
#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
-#define R8A7740_PIN_I_PD(pin) R8A7740_PIN(pin, __I | __PD)
-#define R8A7740_PIN_I_PU(pin) R8A7740_PIN(pin, __I | __PU)
-#define R8A7740_PIN_I_PU_PD(pin) R8A7740_PIN(pin, __I | __PUD)
-#define R8A7740_PIN_IO(pin) R8A7740_PIN(pin, __IO)
-#define R8A7740_PIN_IO_PD(pin) R8A7740_PIN(pin, __IO | __PD)
-#define R8A7740_PIN_IO_PU(pin) R8A7740_PIN(pin, __IO | __PU)
-#define R8A7740_PIN_IO_PU_PD(pin) R8A7740_PIN(pin, __IO | __PUD)
-#define R8A7740_PIN_O(pin) R8A7740_PIN(pin, __O)
-#define R8A7740_PIN_O_PU_PD(pin) R8A7740_PIN(pin, __O | __PUD)
+#define R8A7740_PIN_I_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PD)
+#define R8A7740_PIN_I_PU(pin) SH_PFC_PIN_CFG(pin, __I | __PU)
+#define R8A7740_PIN_I_PU_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PUD)
+#define R8A7740_PIN_IO(pin) SH_PFC_PIN_CFG(pin, __IO)
+#define R8A7740_PIN_IO_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PD)
+#define R8A7740_PIN_IO_PU(pin) SH_PFC_PIN_CFG(pin, __IO | __PU)
+#define R8A7740_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
+#define R8A7740_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
+#define R8A7740_PIN_O_PU_PD(pin) SH_PFC_PIN_CFG(pin, __O | __PUD)
static struct sh_pfc_pin pinmux_pins[] = {
/* Table 56-1 (I/O and Pull U/D) */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
index f9039102bb4..428d2a6857e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7778.c
@@ -23,26 +23,6 @@
#include <linux/kernel.h>
#include "sh_pfc.h"
-#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
-
-#define PORT_GP_32(bank, fn, sfx) \
- PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
- PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
- PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
- PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
- PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
- PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
- PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
- PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
- PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
- PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
- PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
- PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
- PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
- PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
- PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \
- PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
-
#define PORT_GP_27(bank, fn, sfx) \
PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
@@ -66,26 +46,6 @@
PORT_GP_32(3, fn, sfx), \
PORT_GP_27(4, fn, sfx)
-#define _GP_PORT_ALL(bank, pin, name, sfx) name##_##sfx
-
-#define _GP_GPIO(bank, pin, _name, sfx) \
- [RCAR_GP_PIN(bank, pin)] = { \
- .name = __stringify(_name), \
- .enum_id = _name##_DATA, \
- }
-
-#define _GP_DATA(bank, pin, name, sfx) \
- PINMUX_DATA(name##_DATA, name##_FN)
-
-#define GP_ALL(str) CPU_ALL_PORT(_GP_PORT_ALL, str)
-#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
-
-#define PINMUX_IPSR_NOGP(ispr, fn) PINMUX_DATA(fn##_MARK, FN_##fn)
-#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr)
-#define PINMUX_IPSR_MSEL(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr, FN_##ms)
-#define PINMUX_IPSR_NOGM(ispr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ms)
-
enum {
PINMUX_RESERVED = 0,
@@ -579,7 +539,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(PENC0_MARK, FN_PENC0),
@@ -1294,16 +1254,21 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MSEL(IP10_24_22, CAN_CLK_C, SEL_CANCLK_C),
};
-static struct sh_pfc_pin pinmux_pins[] = {
- PINMUX_GPIO_GP_ALL(),
-};
-
/* Pin numbers for pins without a corresponding GPIO port number are computed
* from the row and column numbers with a 1000 offset to avoid collisions with
* GPIO port numbers.
*/
#define PIN_NUMBER(row, col) (1000+((row)-1)*25+(col)-1)
+static struct sh_pfc_pin pinmux_pins[] = {
+ PINMUX_GPIO_GP_ALL(),
+
+ /* Pins not associated with a GPIO port */
+ SH_PFC_PIN_NAMED(3, 20, C20),
+ SH_PFC_PIN_NAMED(20, 1, T1),
+ SH_PFC_PIN_NAMED(25, 2, Y2),
+};
+
/* - macro */
#define SH_PFC_PINS(name, args...) \
static const unsigned int name ##_pins[] = { args }
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
index 8e22ca6c104..d3e94e307d7 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7779.c
@@ -24,51 +24,13 @@
#include "sh_pfc.h"
-#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
-
-#define PORT_GP_32(bank, fn, sfx) \
- PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
- PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
- PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
- PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
- PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
- PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
- PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
- PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
- PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
- PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
- PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
- PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
- PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
- PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
- PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \
- PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
-
-#define PORT_GP_32_9(bank, fn, sfx) \
+#define PORT_GP_9(bank, fn, sfx) \
PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
PORT_GP_1(bank, 8, fn, sfx)
-#define PORT_GP_32_REV(bank, fn, sfx) \
- PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \
- PORT_GP_1(bank, 29, fn, sfx), PORT_GP_1(bank, 28, fn, sfx), \
- PORT_GP_1(bank, 27, fn, sfx), PORT_GP_1(bank, 26, fn, sfx), \
- PORT_GP_1(bank, 25, fn, sfx), PORT_GP_1(bank, 24, fn, sfx), \
- PORT_GP_1(bank, 23, fn, sfx), PORT_GP_1(bank, 22, fn, sfx), \
- PORT_GP_1(bank, 21, fn, sfx), PORT_GP_1(bank, 20, fn, sfx), \
- PORT_GP_1(bank, 19, fn, sfx), PORT_GP_1(bank, 18, fn, sfx), \
- PORT_GP_1(bank, 17, fn, sfx), PORT_GP_1(bank, 16, fn, sfx), \
- PORT_GP_1(bank, 15, fn, sfx), PORT_GP_1(bank, 14, fn, sfx), \
- PORT_GP_1(bank, 13, fn, sfx), PORT_GP_1(bank, 12, fn, sfx), \
- PORT_GP_1(bank, 11, fn, sfx), PORT_GP_1(bank, 10, fn, sfx), \
- PORT_GP_1(bank, 9, fn, sfx), PORT_GP_1(bank, 8, fn, sfx), \
- PORT_GP_1(bank, 7, fn, sfx), PORT_GP_1(bank, 6, fn, sfx), \
- PORT_GP_1(bank, 5, fn, sfx), PORT_GP_1(bank, 4, fn, sfx), \
- PORT_GP_1(bank, 3, fn, sfx), PORT_GP_1(bank, 2, fn, sfx), \
- PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx)
-
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
PORT_GP_32(1, fn, sfx), \
@@ -76,26 +38,7 @@
PORT_GP_32(3, fn, sfx), \
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx), \
- PORT_GP_32_9(6, fn, sfx)
-
-#define _GP_PORT_ALL(bank, pin, name, sfx) name##_##sfx
-
-#define _GP_GPIO(bank, pin, _name, sfx) \
- [RCAR_GP_PIN(bank, pin)] = { \
- .name = __stringify(_name), \
- .enum_id = _name##_DATA, \
- }
-
-#define _GP_DATA(bank, pin, name, sfx) \
- PINMUX_DATA(name##_DATA, name##_FN)
-
-#define GP_ALL(str) CPU_ALL_PORT(_GP_PORT_ALL, str)
-#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
-
-#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
-#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
- FN_##ipsr, FN_##fn)
+ PORT_GP_9(6, fn, sfx)
enum {
PINMUX_RESERVED = 0,
@@ -664,7 +607,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(AVS1_MARK, FN_AVS1),
@@ -1731,6 +1674,79 @@ static const unsigned int hspi2_b_pins[] = {
static const unsigned int hspi2_b_mux[] = {
HSPI_CLK2_B_MARK, HSPI_CS2_B_MARK, HSPI_RX2_B_MARK, HSPI_TX2_B_MARK,
};
+/* - I2C1 ------------------------------------------------------------------ */
+static const unsigned int i2c1_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(0, 27), RCAR_GP_PIN(0, 28),
+};
+static const unsigned int i2c1_mux[] = {
+ SCL1_MARK, SDA1_MARK,
+};
+static const unsigned int i2c1_b_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(1, 10), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int i2c1_b_mux[] = {
+ SCL1_B_MARK, SDA1_B_MARK,
+};
+static const unsigned int i2c1_c_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(3, 12), RCAR_GP_PIN(3, 13),
+};
+static const unsigned int i2c1_c_mux[] = {
+ SCL1_C_MARK, SDA1_C_MARK,
+};
+static const unsigned int i2c1_d_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(1, 26), RCAR_GP_PIN(1, 27),
+};
+static const unsigned int i2c1_d_mux[] = {
+ SCL1_D_MARK, SDA1_D_MARK,
+};
+/* - I2C2 ------------------------------------------------------------------ */
+static const unsigned int i2c2_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(0, 25), RCAR_GP_PIN(0, 26),
+};
+static const unsigned int i2c2_mux[] = {
+ SCL2_MARK, SDA2_MARK,
+};
+static const unsigned int i2c2_b_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(1, 18), RCAR_GP_PIN(1, 19),
+};
+static const unsigned int i2c2_b_mux[] = {
+ SCL2_B_MARK, SDA2_B_MARK,
+};
+static const unsigned int i2c2_c_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(0, 31), RCAR_GP_PIN(0, 30),
+};
+static const unsigned int i2c2_c_mux[] = {
+ SCL2_C_MARK, SDA2_C_MARK,
+};
+static const unsigned int i2c2_d_pins[] = {
+ /* SCL, SDA */
+ RCAR_GP_PIN(1, 24), RCAR_GP_PIN(1, 25),
+};
+static const unsigned int i2c2_d_mux[] = {
+ SCL2_D_MARK, SDA2_D_MARK,
+};
+/* - I2C3 ------------------------------------------------------------------ */
+static const unsigned int i2c3_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(3, 0), RCAR_GP_PIN(2, 30),
+};
+static const unsigned int i2c3_mux[] = {
+ SCL3_MARK, SDA3_MARK,
+};
+static const unsigned int i2c3_b_pins[] = {
+ /* SCL, SDA, */
+ RCAR_GP_PIN(0, 29), RCAR_GP_PIN(0, 30),
+};
+static const unsigned int i2c3_b_mux[] = {
+ SCL3_B_MARK, SDA3_B_MARK,
+};
/* - INTC ------------------------------------------------------------------- */
static const unsigned int intc_irq0_pins[] = {
/* IRQ */
@@ -2600,6 +2616,16 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(hspi1_d),
SH_PFC_PIN_GROUP(hspi2),
SH_PFC_PIN_GROUP(hspi2_b),
+ SH_PFC_PIN_GROUP(i2c1),
+ SH_PFC_PIN_GROUP(i2c1_b),
+ SH_PFC_PIN_GROUP(i2c1_c),
+ SH_PFC_PIN_GROUP(i2c1_d),
+ SH_PFC_PIN_GROUP(i2c2),
+ SH_PFC_PIN_GROUP(i2c2_b),
+ SH_PFC_PIN_GROUP(i2c2_c),
+ SH_PFC_PIN_GROUP(i2c2_d),
+ SH_PFC_PIN_GROUP(i2c3),
+ SH_PFC_PIN_GROUP(i2c3_b),
SH_PFC_PIN_GROUP(intc_irq0),
SH_PFC_PIN_GROUP(intc_irq0_b),
SH_PFC_PIN_GROUP(intc_irq1),
@@ -2760,6 +2786,25 @@ static const char * const hspi2_groups[] = {
"hspi2_b",
};
+static const char * const i2c1_groups[] = {
+ "i2c1",
+ "i2c1_b",
+ "i2c1_c",
+ "i2c1_d",
+};
+
+static const char * const i2c2_groups[] = {
+ "i2c2",
+ "i2c2_b",
+ "i2c2_c",
+ "i2c2_d",
+};
+
+static const char * const i2c3_groups[] = {
+ "i2c3",
+ "i2c3_b",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq0_b",
@@ -2943,6 +2988,9 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(hspi0),
SH_PFC_FUNCTION(hspi1),
SH_PFC_FUNCTION(hspi2),
+ SH_PFC_FUNCTION(i2c1),
+ SH_PFC_FUNCTION(i2c2),
+ SH_PFC_FUNCTION(i2c3),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(lbsc),
SH_PFC_FUNCTION(mmc0),
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
index 14f3ec267e1..64fcc00693b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7790.c
@@ -27,44 +27,6 @@
#include "core.h"
#include "sh_pfc.h"
-#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
-
-#define PORT_GP_32(bank, fn, sfx) \
- PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
- PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
- PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
- PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
- PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
- PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
- PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
- PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
- PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
- PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
- PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
- PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
- PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
- PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
- PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \
- PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
-
-#define PORT_GP_32_REV(bank, fn, sfx) \
- PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \
- PORT_GP_1(bank, 29, fn, sfx), PORT_GP_1(bank, 28, fn, sfx), \
- PORT_GP_1(bank, 27, fn, sfx), PORT_GP_1(bank, 26, fn, sfx), \
- PORT_GP_1(bank, 25, fn, sfx), PORT_GP_1(bank, 24, fn, sfx), \
- PORT_GP_1(bank, 23, fn, sfx), PORT_GP_1(bank, 22, fn, sfx), \
- PORT_GP_1(bank, 21, fn, sfx), PORT_GP_1(bank, 20, fn, sfx), \
- PORT_GP_1(bank, 19, fn, sfx), PORT_GP_1(bank, 18, fn, sfx), \
- PORT_GP_1(bank, 17, fn, sfx), PORT_GP_1(bank, 16, fn, sfx), \
- PORT_GP_1(bank, 15, fn, sfx), PORT_GP_1(bank, 14, fn, sfx), \
- PORT_GP_1(bank, 13, fn, sfx), PORT_GP_1(bank, 12, fn, sfx), \
- PORT_GP_1(bank, 11, fn, sfx), PORT_GP_1(bank, 10, fn, sfx), \
- PORT_GP_1(bank, 9, fn, sfx), PORT_GP_1(bank, 8, fn, sfx), \
- PORT_GP_1(bank, 7, fn, sfx), PORT_GP_1(bank, 6, fn, sfx), \
- PORT_GP_1(bank, 5, fn, sfx), PORT_GP_1(bank, 4, fn, sfx), \
- PORT_GP_1(bank, 3, fn, sfx), PORT_GP_1(bank, 2, fn, sfx), \
- PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx)
-
#define CPU_ALL_PORT(fn, sfx) \
PORT_GP_32(0, fn, sfx), \
PORT_GP_32(1, fn, sfx), \
@@ -73,25 +35,6 @@
PORT_GP_32(4, fn, sfx), \
PORT_GP_32(5, fn, sfx)
-#define _GP_PORT_ALL(bank, pin, name, sfx) name##_##sfx
-
-#define _GP_GPIO(bank, pin, _name, sfx) \
- [(bank * 32) + pin] = { \
- .name = __stringify(_name), \
- .enum_id = _name##_DATA, \
- }
-
-#define _GP_DATA(bank, pin, name, sfx) \
- PINMUX_DATA(name##_DATA, name##_FN)
-
-#define GP_ALL(str) CPU_ALL_PORT(_GP_PORT_ALL, str)
-#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
-
-#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
-#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
- FN_##ipsr, FN_##fn)
-
enum {
PINMUX_RESERVED = 0,
@@ -168,18 +111,18 @@ enum {
FN_VI0_R0, FN_VI0_R0_B, FN_RX0_B, FN_D5,
FN_SCIFB1_TXD_F, FN_SCIFB0_TXD_C, FN_VI3_DATA5,
FN_VI0_R1, FN_VI0_R1_B, FN_TX0_B, FN_D6,
- FN_SCL2_C, FN_VI3_DATA6, FN_VI0_R2, FN_VI0_R2_B,
- FN_SCL2_CIS_C, FN_D7, FN_AD_DI_B, FN_SDA2_C,
- FN_VI3_DATA7, FN_VI0_R3, FN_VI0_R3_B, FN_SDA2_CIS_C,
- FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0, FN_MII_TXD0,
+ FN_IIC2_SCL_C, FN_VI3_DATA6, FN_VI0_R2, FN_VI0_R2_B,
+ FN_I2C2_SCL_C, FN_D7, FN_AD_DI_B, FN_IIC2_SDA_C,
+ FN_VI3_DATA7, FN_VI0_R3, FN_VI0_R3_B, FN_I2C2_SDA_C, FN_TCLK1,
+ FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0,
FN_VI0_G0, FN_VI0_G0_B, FN_VI2_DATA0_VI2_B0,
/* IPSR1 */
- FN_D9, FN_SCIFA1_RXD_C, FN_AVB_TXD1, FN_MII_TXD1,
+ FN_D9, FN_SCIFA1_RXD_C, FN_AVB_TXD1,
FN_VI0_G1, FN_VI0_G1_B, FN_VI2_DATA1_VI2_B1, FN_D10,
- FN_SCIFA1_TXD_C, FN_AVB_TXD2, FN_MII_TXD2,
+ FN_SCIFA1_TXD_C, FN_AVB_TXD2,
FN_VI0_G2, FN_VI0_G2_B, FN_VI2_DATA2_VI2_B2, FN_D11,
- FN_SCIFA1_CTS_N_C, FN_AVB_TXD3, FN_MII_TXD3,
+ FN_SCIFA1_CTS_N_C, FN_AVB_TXD3,
FN_VI0_G3, FN_VI0_G3_B, FN_VI2_DATA3_VI2_B3,
FN_D12, FN_SCIFA1_RTS_N_C, FN_AVB_TXD4,
FN_VI0_HSYNC_N, FN_VI0_HSYNC_N_B, FN_VI2_DATA4_VI2_B4,
@@ -198,9 +141,9 @@ enum {
FN_A6, FN_SCIFA1_RTS_N_B, FN_TPU0TO2, FN_A7,
FN_SCIFA1_SCK_B, FN_AUDIO_CLKOUT_B, FN_TPU0TO3,
FN_A8, FN_SCIFA1_RXD_B, FN_SSI_SCK5_B, FN_VI0_R4,
- FN_VI0_R4_B, FN_SCIFB2_RXD_C, FN_VI2_DATA0_VI2_B0_B,
+ FN_VI0_R4_B, FN_SCIFB2_RXD_C, FN_RX2_B, FN_VI2_DATA0_VI2_B0_B,
FN_A9, FN_SCIFA1_CTS_N_B, FN_SSI_WS5_B, FN_VI0_R5,
- FN_VI0_R5_B, FN_SCIFB2_TXD_C, FN_VI2_DATA1_VI2_B1_B,
+ FN_VI0_R5_B, FN_SCIFB2_TXD_C, FN_TX2_B, FN_VI2_DATA1_VI2_B1_B,
FN_A10, FN_SSI_SDATA5_B, FN_MSIOF2_SYNC, FN_VI0_R6,
FN_VI0_R6_B, FN_VI2_DATA2_VI2_B2_B,
@@ -239,11 +182,11 @@ enum {
/* IPSR5 */
FN_EX_CS3_N, FN_GPS_MAG, FN_VI3_FIELD, FN_VI1_G1, FN_VI1_G1_B,
FN_VI2_R3, FN_EX_CS4_N, FN_MSIOF1_SCK_B, FN_VI3_HSYNC_N,
- FN_VI2_HSYNC_N, FN_SCL1, FN_VI2_HSYNC_N_B,
- FN_INTC_EN0_N, FN_SCL1_CIS, FN_EX_CS5_N, FN_CAN0_RX,
+ FN_VI2_HSYNC_N, FN_IIC1_SCL, FN_VI2_HSYNC_N_B,
+ FN_INTC_EN0_N, FN_I2C1_SCL, FN_EX_CS5_N, FN_CAN0_RX,
FN_MSIOF1_RXD_B, FN_VI3_VSYNC_N, FN_VI1_G2,
- FN_VI1_G2_B, FN_VI2_R4, FN_SDA1, FN_INTC_EN1_N,
- FN_SDA1_CIS, FN_BS_N, FN_IETX, FN_HTX1_B,
+ FN_VI1_G2_B, FN_VI2_R4, FN_IIC1_SDA, FN_INTC_EN1_N,
+ FN_I2C1_SDA, FN_BS_N, FN_IETX, FN_HTX1_B,
FN_CAN1_TX, FN_DRACK0, FN_IETX_C, FN_RD_N,
FN_CAN0_TX, FN_SCIFA0_SCK_B, FN_RD_WR_N, FN_VI1_G3,
FN_VI1_G3_B, FN_VI2_R5, FN_SCIFA0_RXD_B,
@@ -266,56 +209,55 @@ enum {
FN_DREQ2_N, FN_HSCK1_B, FN_HCTS0_N_B,
FN_MSIOF0_TXD_B, FN_DACK2, FN_IRQ2, FN_INTC_IRQ2_N,
FN_SSI_SDATA6_B, FN_HRTS0_N_B, FN_MSIOF0_RXD_B,
- FN_ETH_CRS_DV, FN_RMII_CRS_DV, FN_STP_ISCLK_0_B,
- FN_TS_SDEN0_D, FN_GLO_Q0_C, FN_SCL2_E,
- FN_SCL2_CIS_E, FN_ETH_RX_ER, FN_RMII_RX_ER,
+ FN_ETH_CRS_DV, FN_STP_ISCLK_0_B,
+ FN_TS_SDEN0_D, FN_GLO_Q0_C, FN_IIC2_SCL_E,
+ FN_I2C2_SCL_E, FN_ETH_RX_ER,
FN_STP_ISD_0_B, FN_TS_SPSYNC0_D, FN_GLO_Q1_C,
- FN_SDA2_E, FN_SDA2_CIS_E, FN_ETH_RXD0, FN_RMII_RXD0,
+ FN_IIC2_SDA_E, FN_I2C2_SDA_E, FN_ETH_RXD0,
FN_STP_ISEN_0_B, FN_TS_SDAT0_D, FN_GLO_I0_C,
FN_SCIFB1_SCK_G, FN_SCK1_E, FN_ETH_RXD1,
- FN_RMII_RXD1, FN_HRX0_E, FN_STP_ISSYNC_0_B,
+ FN_HRX0_E, FN_STP_ISSYNC_0_B,
FN_TS_SCK0_D, FN_GLO_I1_C, FN_SCIFB1_RXD_G,
- FN_RX1_E, FN_ETH_LINK, FN_RMII_LINK, FN_HTX0_E,
+ FN_RX1_E, FN_ETH_LINK, FN_HTX0_E,
FN_STP_IVCXO27_0_B, FN_SCIFB1_TXD_G, FN_TX1_E,
- FN_ETH_REF_CLK, FN_RMII_REF_CLK, FN_HCTS0_N_E,
+ FN_ETH_REF_CLK, FN_HCTS0_N_E,
FN_STP_IVCXO27_1_B, FN_HRX0_F,
/* IPSR7 */
- FN_ETH_MDIO, FN_RMII_MDIO, FN_HRTS0_N_E,
+ FN_ETH_MDIO, FN_HRTS0_N_E,
FN_SIM0_D_C, FN_HCTS0_N_F, FN_ETH_TXD1,
- FN_RMII_TXD1, FN_HTX0_F, FN_BPFCLK_G, FN_RDS_CLK_F,
- FN_ETH_TX_EN, FN_RMII_TX_EN, FN_SIM0_CLK_C,
- FN_HRTS0_N_F, FN_ETH_MAGIC, FN_RMII_MAGIC,
- FN_SIM0_RST_C, FN_ETH_TXD0, FN_RMII_TXD0,
+ FN_HTX0_F, FN_BPFCLK_G,
+ FN_ETH_TX_EN, FN_SIM0_CLK_C,
+ FN_HRTS0_N_F, FN_ETH_MAGIC,
+ FN_SIM0_RST_C, FN_ETH_TXD0,
FN_STP_ISCLK_1_B, FN_TS_SDEN1_C, FN_GLO_SCLK_C,
- FN_ETH_MDC, FN_RMII_MDC, FN_STP_ISD_1_B,
+ FN_ETH_MDC, FN_STP_ISD_1_B,
FN_TS_SPSYNC1_C, FN_GLO_SDATA_C, FN_PWM0,
FN_SCIFA2_SCK_C, FN_STP_ISEN_1_B, FN_TS_SDAT1_C,
FN_GLO_SS_C, FN_PWM1, FN_SCIFA2_TXD_C,
FN_STP_ISSYNC_1_B, FN_TS_SCK1_C, FN_GLO_RFON_C,
FN_PCMOE_N, FN_PWM2, FN_PWMFSW0, FN_SCIFA2_RXD_C,
- FN_PCMWE_N, FN_IECLK_C, FN_DU1_DOTCLKIN,
+ FN_PCMWE_N, FN_IECLK_C, FN_DU_DOTCLKIN1,
FN_AUDIO_CLKC, FN_AUDIO_CLKOUT_C, FN_VI0_CLK,
- FN_ATACS00_N, FN_AVB_RXD1, FN_MII_RXD1,
+ FN_ATACS00_N, FN_AVB_RXD1,
FN_VI0_DATA0_VI0_B0, FN_ATACS10_N, FN_AVB_RXD2,
- FN_MII_RXD2,
/* IPSR8 */
FN_VI0_DATA1_VI0_B1, FN_ATARD0_N, FN_AVB_RXD3,
- FN_MII_RXD3, FN_VI0_DATA2_VI0_B2, FN_ATAWR0_N,
+ FN_VI0_DATA2_VI0_B2, FN_ATAWR0_N,
FN_AVB_RXD4, FN_VI0_DATA3_VI0_B3, FN_ATADIR0_N,
FN_AVB_RXD5, FN_VI0_DATA4_VI0_B4, FN_ATAG0_N,
FN_AVB_RXD6, FN_VI0_DATA5_VI0_B5, FN_EX_WAIT1,
FN_AVB_RXD7, FN_VI0_DATA6_VI0_B6, FN_AVB_RX_ER,
- FN_MII_RX_ER, FN_VI0_DATA7_VI0_B7, FN_AVB_RX_CLK,
- FN_MII_RX_CLK, FN_VI1_CLK, FN_AVB_RX_DV,
- FN_MII_RX_DV, FN_VI1_DATA0_VI1_B0, FN_SCIFA1_SCK_D,
- FN_AVB_CRS, FN_MII_CRS, FN_VI1_DATA1_VI1_B1,
- FN_SCIFA1_RXD_D, FN_AVB_MDC, FN_MII_MDC,
+ FN_VI0_DATA7_VI0_B7, FN_AVB_RX_CLK,
+ FN_VI1_CLK, FN_AVB_RX_DV,
+ FN_VI1_DATA0_VI1_B0, FN_SCIFA1_SCK_D,
+ FN_AVB_CRS, FN_VI1_DATA1_VI1_B1,
+ FN_SCIFA1_RXD_D, FN_AVB_MDC,
FN_VI1_DATA2_VI1_B2, FN_SCIFA1_TXD_D, FN_AVB_MDIO,
- FN_MII_MDIO, FN_VI1_DATA3_VI1_B3, FN_SCIFA1_CTS_N_D,
+ FN_VI1_DATA3_VI1_B3, FN_SCIFA1_CTS_N_D,
FN_AVB_GTX_CLK, FN_VI1_DATA4_VI1_B4, FN_SCIFA1_RTS_N_D,
- FN_AVB_MAGIC, FN_MII_MAGIC, FN_VI1_DATA5_VI1_B5,
+ FN_AVB_MAGIC, FN_VI1_DATA5_VI1_B5,
FN_AVB_PHY_INT, FN_VI1_DATA6_VI1_B6, FN_AVB_GTXREFCLK,
FN_SD0_CLK, FN_VI1_DATA0_VI1_B0_B, FN_SD0_CMD,
FN_SCIFB1_SCK_B, FN_VI1_DATA1_VI1_B1_B,
@@ -326,26 +268,26 @@ enum {
FN_SD0_DAT2, FN_SCIFB1_CTS_N_B, FN_VI1_DATA4_VI1_B4_B,
FN_SD0_DAT3, FN_SCIFB1_RTS_N_B, FN_VI1_DATA5_VI1_B5_B,
FN_SD0_CD, FN_MMC0_D6, FN_TS_SDEN0_B, FN_USB0_EXTP,
- FN_GLO_SCLK, FN_VI1_DATA6_VI1_B6_B, FN_SCL1_B,
- FN_SCL1_CIS_B, FN_VI2_DATA6_VI2_B6_B, FN_SD0_WP,
+ FN_GLO_SCLK, FN_VI1_DATA6_VI1_B6_B, FN_IIC1_SCL_B,
+ FN_I2C1_SCL_B, FN_VI2_DATA6_VI2_B6_B, FN_SD0_WP,
FN_MMC0_D7, FN_TS_SPSYNC0_B, FN_USB0_IDIN,
- FN_GLO_SDATA, FN_VI1_DATA7_VI1_B7_B, FN_SDA1_B,
- FN_SDA1_CIS_B, FN_VI2_DATA7_VI2_B7_B, FN_SD1_CLK,
- FN_AVB_TX_EN, FN_MII_TX_EN, FN_SD1_CMD,
- FN_AVB_TX_ER, FN_MII_TX_ER, FN_SCIFB0_SCK_B,
- FN_SD1_DAT0, FN_AVB_TX_CLK, FN_MII_TX_CLK,
+ FN_GLO_SDATA, FN_VI1_DATA7_VI1_B7_B, FN_IIC1_SDA_B,
+ FN_I2C1_SDA_B, FN_VI2_DATA7_VI2_B7_B, FN_SD1_CLK,
+ FN_AVB_TX_EN, FN_SD1_CMD,
+ FN_AVB_TX_ER, FN_SCIFB0_SCK_B,
+ FN_SD1_DAT0, FN_AVB_TX_CLK,
FN_SCIFB0_RXD_B, FN_SD1_DAT1, FN_AVB_LINK,
- FN_MII_LINK, FN_SCIFB0_TXD_B, FN_SD1_DAT2,
- FN_AVB_COL, FN_MII_COL, FN_SCIFB0_CTS_N_B,
- FN_SD1_DAT3, FN_AVB_RXD0, FN_MII_RXD0,
+ FN_SCIFB0_TXD_B, FN_SD1_DAT2,
+ FN_AVB_COL, FN_SCIFB0_CTS_N_B,
+ FN_SD1_DAT3, FN_AVB_RXD0,
FN_SCIFB0_RTS_N_B, FN_SD1_CD, FN_MMC1_D6,
FN_TS_SDEN1, FN_USB1_EXTP, FN_GLO_SS, FN_VI0_CLK_B,
- FN_SCL2_D, FN_SCL2_CIS_D, FN_SIM0_CLK_B,
+ FN_IIC2_SCL_D, FN_I2C2_SCL_D, FN_SIM0_CLK_B,
FN_VI3_CLK_B,
/* IPSR10 */
FN_SD1_WP, FN_MMC1_D7, FN_TS_SPSYNC1, FN_USB1_IDIN,
- FN_GLO_RFON, FN_VI1_CLK_B, FN_SDA2_D, FN_SDA2_CIS_D,
+ FN_GLO_RFON, FN_VI1_CLK_B, FN_IIC2_SDA_D, FN_I2C2_SDA_D,
FN_SIM0_D_B, FN_SD2_CLK, FN_MMC0_CLK, FN_SIM0_CLK,
FN_VI0_DATA0_VI0_B0_B, FN_TS_SDEN0_C, FN_GLO_SCLK_B,
FN_VI3_DATA0_B, FN_SD2_CMD, FN_MMC0_CMD, FN_SIM0_D,
@@ -354,10 +296,10 @@ enum {
FN_SD2_DAT0, FN_MMC0_D0, FN_FMCLK_B,
FN_VI0_DATA2_VI0_B2_B, FN_SCIFB1_RXD_E, FN_RX1_D,
FN_TS_SDAT0_C, FN_GLO_SS_B, FN_VI3_DATA2_B,
- FN_SD2_DAT1, FN_MMC0_D1, FN_FMIN_B, FN_RDS_DATA,
+ FN_SD2_DAT1, FN_MMC0_D1, FN_FMIN_B,
FN_VI0_DATA3_VI0_B3_B, FN_SCIFB1_TXD_E, FN_TX1_D,
FN_TS_SCK0_C, FN_GLO_RFON_B, FN_VI3_DATA3_B,
- FN_SD2_DAT2, FN_MMC0_D2, FN_BPFCLK_B, FN_RDS_CLK,
+ FN_SD2_DAT2, FN_MMC0_D2, FN_BPFCLK_B,
FN_VI0_DATA4_VI0_B4_B, FN_HRX0_D, FN_TS_SDEN1_B,
FN_GLO_Q0_B, FN_VI3_DATA4_B, FN_SD2_DAT3,
FN_MMC0_D3, FN_SIM0_RST, FN_VI0_DATA5_VI0_B5_B,
@@ -378,12 +320,12 @@ enum {
FN_SCKZ, FN_SD3_CD, FN_MMC1_D4, FN_TS_SDAT1,
FN_VSP, FN_GLO_Q0, FN_SIM0_RST_B, FN_SD3_WP,
FN_MMC1_D5, FN_TS_SCK1, FN_GLO_Q1, FN_FMIN_C,
- FN_RDS_DATA_B, FN_FMIN_E, FN_RDS_DATA_D, FN_FMIN_F,
- FN_RDS_DATA_E, FN_MLB_CLK, FN_SCL2_B, FN_SCL2_CIS_B,
- FN_MLB_SIG, FN_SCIFB1_RXD_D, FN_RX1_C, FN_SDA2_B,
- FN_SDA2_CIS_B, FN_MLB_DAT, FN_SPV_EVEN,
+ FN_FMIN_E, FN_FMIN_F,
+ FN_MLB_CLK, FN_IIC2_SCL_B, FN_I2C2_SCL_B,
+ FN_MLB_SIG, FN_SCIFB1_RXD_D, FN_RX1_C, FN_IIC2_SDA_B,
+ FN_I2C2_SDA_B, FN_MLB_DAT,
FN_SCIFB1_TXD_D, FN_TX1_C, FN_BPFCLK_C,
- FN_RDS_CLK_B, FN_SSI_SCK0129, FN_CAN_CLK_B,
+ FN_SSI_SCK0129, FN_CAN_CLK_B,
FN_MOUT0,
/* IPSR12 */
@@ -410,12 +352,12 @@ enum {
/* IPSR13 */
FN_SSI_SDATA5, FN_SCIFB1_TXD, FN_IETX_B, FN_DU2_DR2,
FN_LCDOUT2, FN_CAN_DEBUGOUT5, FN_SSI_SCK6,
- FN_SCIFB1_CTS_N, FN_BPFCLK_D, FN_RDS_CLK_C,
+ FN_SCIFB1_CTS_N, FN_BPFCLK_D,
FN_DU2_DR3, FN_LCDOUT3, FN_CAN_DEBUGOUT6,
- FN_BPFCLK_F, FN_RDS_CLK_E, FN_SSI_WS6,
+ FN_BPFCLK_F, FN_SSI_WS6,
FN_SCIFB1_RTS_N, FN_CAN0_TX_D, FN_DU2_DR4,
FN_LCDOUT4, FN_CAN_DEBUGOUT7, FN_SSI_SDATA6,
- FN_FMIN_D, FN_RDS_DATA_C, FN_DU2_DR5, FN_LCDOUT5,
+ FN_FMIN_D, FN_DU2_DR5, FN_LCDOUT5,
FN_CAN_DEBUGOUT8, FN_SSI_SCK78, FN_STP_IVCXO27_1,
FN_SCK1, FN_SCIFA1_SCK, FN_DU2_DR6, FN_LCDOUT6,
FN_CAN_DEBUGOUT9, FN_SSI_WS78, FN_STP_ISCLK_1,
@@ -423,8 +365,8 @@ enum {
FN_LCDOUT7, FN_CAN_DEBUGOUT10, FN_SSI_SDATA7,
FN_STP_ISD_1, FN_SCIFB2_RXD, FN_SCIFA2_RTS_N,
FN_TCLK2, FN_QSTVA_QVS, FN_CAN_DEBUGOUT11,
- FN_BPFCLK_E, FN_RDS_CLK_D, FN_SSI_SDATA7_B,
- FN_FMIN_G, FN_RDS_DATA_F, FN_SSI_SDATA8,
+ FN_BPFCLK_E, FN_SSI_SDATA7_B,
+ FN_FMIN_G, FN_SSI_SDATA8,
FN_STP_ISEN_1, FN_SCIFB2_TXD, FN_CAN0_TX_C,
FN_CAN_DEBUGOUT12, FN_SSI_SDATA8_B, FN_SSI_SDATA9,
FN_STP_ISSYNC_1, FN_SCIFB2_CTS_N, FN_SSI_WS1,
@@ -435,29 +377,29 @@ enum {
FN_AUDIO_CLKB, FN_SCIF_CLK, FN_CAN0_RX_D,
FN_DVC_MUTE, FN_CAN0_RX_C, FN_CAN_DEBUGOUT15,
FN_REMOCON, FN_SCIFA0_SCK, FN_HSCK1, FN_SCK0,
- FN_MSIOF3_SS2, FN_DU2_DG2, FN_LCDOUT10, FN_SDA1_C,
- FN_SDA1_CIS_C, FN_SCIFA0_RXD, FN_HRX1, FN_RX0,
+ FN_MSIOF3_SS2, FN_DU2_DG2, FN_LCDOUT10, FN_IIC1_SDA_C,
+ FN_I2C1_SDA_C, FN_SCIFA0_RXD, FN_HRX1, FN_RX0,
FN_DU2_DR0, FN_LCDOUT0, FN_SCIFA0_TXD, FN_HTX1,
FN_TX0, FN_DU2_DR1, FN_LCDOUT1, FN_SCIFA0_CTS_N,
FN_HCTS1_N, FN_CTS0_N, FN_MSIOF3_SYNC, FN_DU2_DG3,
- FN_LCDOUT11, FN_PWM0_B, FN_SCL1_C, FN_SCL1_CIS_C,
- FN_SCIFA0_RTS_N, FN_HRTS1_N, FN_RTS0_N_TANS,
+ FN_LCDOUT11, FN_PWM0_B, FN_IIC1_SCL_C, FN_I2C1_SCL_C,
+ FN_SCIFA0_RTS_N, FN_HRTS1_N, FN_RTS0_N,
FN_MSIOF3_SS1, FN_DU2_DG0, FN_LCDOUT8, FN_PWM1_B,
FN_SCIFA1_RXD, FN_AD_DI, FN_RX1,
FN_DU2_EXODDF_DU2_ODDF_DISP_CDE, FN_QCPV_QDE,
FN_SCIFA1_TXD, FN_AD_DO, FN_TX1, FN_DU2_DG1,
FN_LCDOUT9, FN_SCIFA1_CTS_N, FN_AD_CLK,
FN_CTS1_N, FN_MSIOF3_RXD, FN_DU0_DOTCLKOUT, FN_QCLK,
- FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N_TANS,
+ FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N,
FN_MSIOF3_TXD, FN_DU1_DOTCLKOUT, FN_QSTVB_QVE,
FN_HRTS0_N_C,
/* IPSR15 */
- FN_SCIFA2_SCK, FN_FMCLK, FN_MSIOF3_SCK, FN_DU2_DG7,
+ FN_SCIFA2_SCK, FN_FMCLK, FN_SCK2, FN_MSIOF3_SCK, FN_DU2_DG7,
FN_LCDOUT15, FN_SCIF_CLK_B, FN_SCIFA2_RXD, FN_FMIN,
- FN_DU2_DB0, FN_LCDOUT16, FN_SCL2, FN_SCL2_CIS,
- FN_SCIFA2_TXD, FN_BPFCLK, FN_DU2_DB1, FN_LCDOUT17,
- FN_SDA2, FN_SDA2_CIS, FN_HSCK0, FN_TS_SDEN0,
+ FN_TX2, FN_DU2_DB0, FN_LCDOUT16, FN_IIC2_SCL, FN_I2C2_SCL,
+ FN_SCIFA2_TXD, FN_BPFCLK, FN_RX2, FN_DU2_DB1, FN_LCDOUT17,
+ FN_IIC2_SDA, FN_I2C2_SDA, FN_HSCK0, FN_TS_SDEN0,
FN_DU2_DG4, FN_LCDOUT12, FN_HCTS0_N_C, FN_HRX0,
FN_DU2_DB2, FN_LCDOUT18, FN_HTX0, FN_DU2_DB3,
FN_LCDOUT19, FN_HCTS0_N, FN_SSI_SCK9, FN_DU2_DB4,
@@ -465,7 +407,7 @@ enum {
FN_LCDOUT21, FN_MSIOF0_SCK, FN_TS_SDAT0, FN_ADICLK,
FN_DU2_DB6, FN_LCDOUT22, FN_MSIOF0_SYNC, FN_TS_SCK0,
FN_SSI_SCK2, FN_ADIDATA, FN_DU2_DB7, FN_LCDOUT23,
- FN_SCIFA2_RXD_B, FN_MSIOF0_SS1, FN_ADICHS0,
+ FN_HRX0_C, FN_MSIOF0_SS1, FN_ADICHS0,
FN_DU2_DG5, FN_LCDOUT13, FN_MSIOF0_TXD, FN_ADICHS1,
FN_DU2_DG6, FN_LCDOUT14,
@@ -473,7 +415,7 @@ enum {
FN_MSIOF0_SS2, FN_AUDIO_CLKOUT, FN_ADICHS2,
FN_DU2_DISP, FN_QPOLA, FN_HTX0_C, FN_SCIFA2_TXD_B,
FN_MSIOF0_RXD, FN_TS_SPSYNC0, FN_SSI_WS2,
- FN_ADICS_SAMP, FN_DU2_CDE, FN_QPOLB, FN_HRX0_C,
+ FN_ADICS_SAMP, FN_DU2_CDE, FN_QPOLB, FN_SCIFA2_RXD_B,
FN_USB1_PWEN, FN_AUDIO_CLKOUT_D, FN_USB1_OVC,
FN_TCLK1_B,
@@ -508,6 +450,7 @@ enum {
FN_SEL_CANCLK_0, FN_SEL_CANCLK_1,
FN_SEL_SCIFA2_0, FN_SEL_SCIFA2_1, FN_SEL_SCIFA2_2,
FN_SEL_CAN1_0, FN_SEL_CAN1_1,
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1,
FN_SEL_ADI_0, FN_SEL_ADI_1,
FN_SEL_SSP_0, FN_SEL_SSP_1,
FN_SEL_FM_0, FN_SEL_FM_1, FN_SEL_FM_2, FN_SEL_FM_3,
@@ -515,8 +458,6 @@ enum {
FN_SEL_HSCIF0_0, FN_SEL_HSCIF0_1, FN_SEL_HSCIF0_2, FN_SEL_HSCIF0_3,
FN_SEL_HSCIF0_4, FN_SEL_HSCIF0_5,
FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2,
- FN_SEL_RDS_0, FN_SEL_RDS_1, FN_SEL_RDS_2,
- FN_SEL_RDS_3, FN_SEL_RDS_4, FN_SEL_RDS_5,
FN_SEL_SIM_0, FN_SEL_SIM_1, FN_SEL_SIM_2,
FN_SEL_SSI8_0, FN_SEL_SSI8_1, FN_SEL_SSI8_2,
@@ -548,17 +489,17 @@ enum {
VI0_R0_MARK, VI0_R0_B_MARK, RX0_B_MARK, D5_MARK,
SCIFB1_TXD_F_MARK, SCIFB0_TXD_C_MARK, VI3_DATA5_MARK,
VI0_R1_MARK, VI0_R1_B_MARK, TX0_B_MARK, D6_MARK,
- SCL2_C_MARK, VI3_DATA6_MARK, VI0_R2_MARK, VI0_R2_B_MARK,
- SCL2_CIS_C_MARK, D7_MARK, AD_DI_B_MARK, SDA2_C_MARK,
- VI3_DATA7_MARK, VI0_R3_MARK, VI0_R3_B_MARK, SDA2_CIS_C_MARK,
- D8_MARK, SCIFA1_SCK_C_MARK, AVB_TXD0_MARK, MII_TXD0_MARK,
+ IIC2_SCL_C_MARK, VI3_DATA6_MARK, VI0_R2_MARK, VI0_R2_B_MARK,
+ I2C2_SCL_C_MARK, D7_MARK, AD_DI_B_MARK, IIC2_SDA_C_MARK,
+ VI3_DATA7_MARK, VI0_R3_MARK, VI0_R3_B_MARK, I2C2_SDA_C_MARK, TCLK1_MARK,
+ D8_MARK, SCIFA1_SCK_C_MARK, AVB_TXD0_MARK,
VI0_G0_MARK, VI0_G0_B_MARK, VI2_DATA0_VI2_B0_MARK,
- D9_MARK, SCIFA1_RXD_C_MARK, AVB_TXD1_MARK, MII_TXD1_MARK,
+ D9_MARK, SCIFA1_RXD_C_MARK, AVB_TXD1_MARK,
VI0_G1_MARK, VI0_G1_B_MARK, VI2_DATA1_VI2_B1_MARK, D10_MARK,
- SCIFA1_TXD_C_MARK, AVB_TXD2_MARK, MII_TXD2_MARK,
+ SCIFA1_TXD_C_MARK, AVB_TXD2_MARK,
VI0_G2_MARK, VI0_G2_B_MARK, VI2_DATA2_VI2_B2_MARK, D11_MARK,
- SCIFA1_CTS_N_C_MARK, AVB_TXD3_MARK, MII_TXD3_MARK,
+ SCIFA1_CTS_N_C_MARK, AVB_TXD3_MARK,
VI0_G3_MARK, VI0_G3_B_MARK, VI2_DATA3_VI2_B3_MARK,
D12_MARK, SCIFA1_RTS_N_C_MARK, AVB_TXD4_MARK,
VI0_HSYNC_N_MARK, VI0_HSYNC_N_B_MARK, VI2_DATA4_VI2_B4_MARK,
@@ -576,9 +517,9 @@ enum {
A6_MARK, SCIFA1_RTS_N_B_MARK, TPU0TO2_MARK, A7_MARK,
SCIFA1_SCK_B_MARK, AUDIO_CLKOUT_B_MARK, TPU0TO3_MARK,
A8_MARK, SCIFA1_RXD_B_MARK, SSI_SCK5_B_MARK, VI0_R4_MARK,
- VI0_R4_B_MARK, SCIFB2_RXD_C_MARK, VI2_DATA0_VI2_B0_B_MARK,
+ VI0_R4_B_MARK, SCIFB2_RXD_C_MARK, RX2_B_MARK, VI2_DATA0_VI2_B0_B_MARK,
A9_MARK, SCIFA1_CTS_N_B_MARK, SSI_WS5_B_MARK, VI0_R5_MARK,
- VI0_R5_B_MARK, SCIFB2_TXD_C_MARK, VI2_DATA1_VI2_B1_B_MARK,
+ VI0_R5_B_MARK, SCIFB2_TXD_C_MARK, TX2_B_MARK, VI2_DATA1_VI2_B1_B_MARK,
A10_MARK, SSI_SDATA5_B_MARK, MSIOF2_SYNC_MARK, VI0_R6_MARK,
VI0_R6_B_MARK, VI2_DATA2_VI2_B2_B_MARK,
@@ -615,11 +556,11 @@ enum {
EX_CS3_N_MARK, GPS_MAG_MARK, VI3_FIELD_MARK,
VI1_G1_MARK, VI1_G1_B_MARK, VI2_R3_MARK,
EX_CS4_N_MARK, MSIOF1_SCK_B_MARK, VI3_HSYNC_N_MARK,
- VI2_HSYNC_N_MARK, SCL1_MARK, VI2_HSYNC_N_B_MARK,
- INTC_EN0_N_MARK, SCL1_CIS_MARK, EX_CS5_N_MARK, CAN0_RX_MARK,
+ VI2_HSYNC_N_MARK, IIC1_SCL_MARK, VI2_HSYNC_N_B_MARK,
+ INTC_EN0_N_MARK, I2C1_SCL_MARK, EX_CS5_N_MARK, CAN0_RX_MARK,
MSIOF1_RXD_B_MARK, VI3_VSYNC_N_MARK, VI1_G2_MARK,
- VI1_G2_B_MARK, VI2_R4_MARK, SDA1_MARK, INTC_EN1_N_MARK,
- SDA1_CIS_MARK, BS_N_MARK, IETX_MARK, HTX1_B_MARK,
+ VI1_G2_B_MARK, VI2_R4_MARK, IIC1_SDA_MARK, INTC_EN1_N_MARK,
+ I2C1_SDA_MARK, BS_N_MARK, IETX_MARK, HTX1_B_MARK,
CAN1_TX_MARK, DRACK0_MARK, IETX_C_MARK, RD_N_MARK,
CAN0_TX_MARK, SCIFA0_SCK_B_MARK, RD_WR_N_MARK, VI1_G3_MARK,
VI1_G3_B_MARK, VI2_R5_MARK, SCIFA0_RXD_B_MARK,
@@ -641,54 +582,53 @@ enum {
DREQ2_N_MARK, HSCK1_B_MARK, HCTS0_N_B_MARK,
MSIOF0_TXD_B_MARK, DACK2_MARK, IRQ2_MARK, INTC_IRQ2_N_MARK,
SSI_SDATA6_B_MARK, HRTS0_N_B_MARK, MSIOF0_RXD_B_MARK,
- ETH_CRS_DV_MARK, RMII_CRS_DV_MARK, STP_ISCLK_0_B_MARK,
- TS_SDEN0_D_MARK, GLO_Q0_C_MARK, SCL2_E_MARK,
- SCL2_CIS_E_MARK, ETH_RX_ER_MARK, RMII_RX_ER_MARK,
+ ETH_CRS_DV_MARK, STP_ISCLK_0_B_MARK,
+ TS_SDEN0_D_MARK, GLO_Q0_C_MARK, IIC2_SCL_E_MARK,
+ I2C2_SCL_E_MARK, ETH_RX_ER_MARK,
STP_ISD_0_B_MARK, TS_SPSYNC0_D_MARK, GLO_Q1_C_MARK,
- SDA2_E_MARK, SDA2_CIS_E_MARK, ETH_RXD0_MARK, RMII_RXD0_MARK,
+ IIC2_SDA_E_MARK, I2C2_SDA_E_MARK, ETH_RXD0_MARK,
STP_ISEN_0_B_MARK, TS_SDAT0_D_MARK, GLO_I0_C_MARK,
SCIFB1_SCK_G_MARK, SCK1_E_MARK, ETH_RXD1_MARK,
- RMII_RXD1_MARK, HRX0_E_MARK, STP_ISSYNC_0_B_MARK,
+ HRX0_E_MARK, STP_ISSYNC_0_B_MARK,
TS_SCK0_D_MARK, GLO_I1_C_MARK, SCIFB1_RXD_G_MARK,
- RX1_E_MARK, ETH_LINK_MARK, RMII_LINK_MARK, HTX0_E_MARK,
+ RX1_E_MARK, ETH_LINK_MARK, HTX0_E_MARK,
STP_IVCXO27_0_B_MARK, SCIFB1_TXD_G_MARK, TX1_E_MARK,
- ETH_REF_CLK_MARK, RMII_REF_CLK_MARK, HCTS0_N_E_MARK,
+ ETH_REF_CLK_MARK, HCTS0_N_E_MARK,
STP_IVCXO27_1_B_MARK, HRX0_F_MARK,
- ETH_MDIO_MARK, RMII_MDIO_MARK, HRTS0_N_E_MARK,
+ ETH_MDIO_MARK, HRTS0_N_E_MARK,
SIM0_D_C_MARK, HCTS0_N_F_MARK, ETH_TXD1_MARK,
- RMII_TXD1_MARK, HTX0_F_MARK, BPFCLK_G_MARK, RDS_CLK_F_MARK,
- ETH_TX_EN_MARK, RMII_TX_EN_MARK, SIM0_CLK_C_MARK,
- HRTS0_N_F_MARK, ETH_MAGIC_MARK, RMII_MAGIC_MARK,
- SIM0_RST_C_MARK, ETH_TXD0_MARK, RMII_TXD0_MARK,
+ HTX0_F_MARK, BPFCLK_G_MARK,
+ ETH_TX_EN_MARK, SIM0_CLK_C_MARK,
+ HRTS0_N_F_MARK, ETH_MAGIC_MARK,
+ SIM0_RST_C_MARK, ETH_TXD0_MARK,
STP_ISCLK_1_B_MARK, TS_SDEN1_C_MARK, GLO_SCLK_C_MARK,
- ETH_MDC_MARK, RMII_MDC_MARK, STP_ISD_1_B_MARK,
+ ETH_MDC_MARK, STP_ISD_1_B_MARK,
TS_SPSYNC1_C_MARK, GLO_SDATA_C_MARK, PWM0_MARK,
SCIFA2_SCK_C_MARK, STP_ISEN_1_B_MARK, TS_SDAT1_C_MARK,
GLO_SS_C_MARK, PWM1_MARK, SCIFA2_TXD_C_MARK,
STP_ISSYNC_1_B_MARK, TS_SCK1_C_MARK, GLO_RFON_C_MARK,
PCMOE_N_MARK, PWM2_MARK, PWMFSW0_MARK, SCIFA2_RXD_C_MARK,
- PCMWE_N_MARK, IECLK_C_MARK, DU1_DOTCLKIN_MARK,
+ PCMWE_N_MARK, IECLK_C_MARK, DU_DOTCLKIN1_MARK,
AUDIO_CLKC_MARK, AUDIO_CLKOUT_C_MARK, VI0_CLK_MARK,
- ATACS00_N_MARK, AVB_RXD1_MARK, MII_RXD1_MARK,
+ ATACS00_N_MARK, AVB_RXD1_MARK,
VI0_DATA0_VI0_B0_MARK, ATACS10_N_MARK, AVB_RXD2_MARK,
- MII_RXD2_MARK,
VI0_DATA1_VI0_B1_MARK, ATARD0_N_MARK, AVB_RXD3_MARK,
- MII_RXD3_MARK, VI0_DATA2_VI0_B2_MARK, ATAWR0_N_MARK,
+ VI0_DATA2_VI0_B2_MARK, ATAWR0_N_MARK,
AVB_RXD4_MARK, VI0_DATA3_VI0_B3_MARK, ATADIR0_N_MARK,
AVB_RXD5_MARK, VI0_DATA4_VI0_B4_MARK, ATAG0_N_MARK,
AVB_RXD6_MARK, VI0_DATA5_VI0_B5_MARK, EX_WAIT1_MARK,
AVB_RXD7_MARK, VI0_DATA6_VI0_B6_MARK, AVB_RX_ER_MARK,
- MII_RX_ER_MARK, VI0_DATA7_VI0_B7_MARK, AVB_RX_CLK_MARK,
- MII_RX_CLK_MARK, VI1_CLK_MARK, AVB_RX_DV_MARK,
- MII_RX_DV_MARK, VI1_DATA0_VI1_B0_MARK, SCIFA1_SCK_D_MARK,
- AVB_CRS_MARK, MII_CRS_MARK, VI1_DATA1_VI1_B1_MARK,
- SCIFA1_RXD_D_MARK, AVB_MDC_MARK, MII_MDC_MARK,
+ VI0_DATA7_VI0_B7_MARK, AVB_RX_CLK_MARK,
+ VI1_CLK_MARK, AVB_RX_DV_MARK,
+ VI1_DATA0_VI1_B0_MARK, SCIFA1_SCK_D_MARK,
+ AVB_CRS_MARK, VI1_DATA1_VI1_B1_MARK,
+ SCIFA1_RXD_D_MARK, AVB_MDC_MARK,
VI1_DATA2_VI1_B2_MARK, SCIFA1_TXD_D_MARK, AVB_MDIO_MARK,
- MII_MDIO_MARK, VI1_DATA3_VI1_B3_MARK, SCIFA1_CTS_N_D_MARK,
+ VI1_DATA3_VI1_B3_MARK, SCIFA1_CTS_N_D_MARK,
AVB_GTX_CLK_MARK, VI1_DATA4_VI1_B4_MARK, SCIFA1_RTS_N_D_MARK,
- AVB_MAGIC_MARK, MII_MAGIC_MARK, VI1_DATA5_VI1_B5_MARK,
+ AVB_MAGIC_MARK, VI1_DATA5_VI1_B5_MARK,
AVB_PHY_INT_MARK, VI1_DATA6_VI1_B6_MARK, AVB_GTXREFCLK_MARK,
SD0_CLK_MARK, VI1_DATA0_VI1_B0_B_MARK, SD0_CMD_MARK,
SCIFB1_SCK_B_MARK, VI1_DATA1_VI1_B1_B_MARK,
@@ -698,25 +638,25 @@ enum {
SD0_DAT2_MARK, SCIFB1_CTS_N_B_MARK, VI1_DATA4_VI1_B4_B_MARK,
SD0_DAT3_MARK, SCIFB1_RTS_N_B_MARK, VI1_DATA5_VI1_B5_B_MARK,
SD0_CD_MARK, MMC0_D6_MARK, TS_SDEN0_B_MARK, USB0_EXTP_MARK,
- GLO_SCLK_MARK, VI1_DATA6_VI1_B6_B_MARK, SCL1_B_MARK,
- SCL1_CIS_B_MARK, VI2_DATA6_VI2_B6_B_MARK, SD0_WP_MARK,
+ GLO_SCLK_MARK, VI1_DATA6_VI1_B6_B_MARK, IIC1_SCL_B_MARK,
+ I2C1_SCL_B_MARK, VI2_DATA6_VI2_B6_B_MARK, SD0_WP_MARK,
MMC0_D7_MARK, TS_SPSYNC0_B_MARK, USB0_IDIN_MARK,
- GLO_SDATA_MARK, VI1_DATA7_VI1_B7_B_MARK, SDA1_B_MARK,
- SDA1_CIS_B_MARK, VI2_DATA7_VI2_B7_B_MARK, SD1_CLK_MARK,
- AVB_TX_EN_MARK, MII_TX_EN_MARK, SD1_CMD_MARK,
- AVB_TX_ER_MARK, MII_TX_ER_MARK, SCIFB0_SCK_B_MARK,
- SD1_DAT0_MARK, AVB_TX_CLK_MARK, MII_TX_CLK_MARK,
+ GLO_SDATA_MARK, VI1_DATA7_VI1_B7_B_MARK, IIC1_SDA_B_MARK,
+ I2C1_SDA_B_MARK, VI2_DATA7_VI2_B7_B_MARK, SD1_CLK_MARK,
+ AVB_TX_EN_MARK, SD1_CMD_MARK,
+ AVB_TX_ER_MARK, SCIFB0_SCK_B_MARK,
+ SD1_DAT0_MARK, AVB_TX_CLK_MARK,
SCIFB0_RXD_B_MARK, SD1_DAT1_MARK, AVB_LINK_MARK,
- MII_LINK_MARK, SCIFB0_TXD_B_MARK, SD1_DAT2_MARK,
- AVB_COL_MARK, MII_COL_MARK, SCIFB0_CTS_N_B_MARK,
- SD1_DAT3_MARK, AVB_RXD0_MARK, MII_RXD0_MARK,
+ SCIFB0_TXD_B_MARK, SD1_DAT2_MARK,
+ AVB_COL_MARK, SCIFB0_CTS_N_B_MARK,
+ SD1_DAT3_MARK, AVB_RXD0_MARK,
SCIFB0_RTS_N_B_MARK, SD1_CD_MARK, MMC1_D6_MARK,
TS_SDEN1_MARK, USB1_EXTP_MARK, GLO_SS_MARK, VI0_CLK_B_MARK,
- SCL2_D_MARK, SCL2_CIS_D_MARK, SIM0_CLK_B_MARK,
+ IIC2_SCL_D_MARK, I2C2_SCL_D_MARK, SIM0_CLK_B_MARK,
VI3_CLK_B_MARK,
SD1_WP_MARK, MMC1_D7_MARK, TS_SPSYNC1_MARK, USB1_IDIN_MARK,
- GLO_RFON_MARK, VI1_CLK_B_MARK, SDA2_D_MARK, SDA2_CIS_D_MARK,
+ GLO_RFON_MARK, VI1_CLK_B_MARK, IIC2_SDA_D_MARK, I2C2_SDA_D_MARK,
SIM0_D_B_MARK, SD2_CLK_MARK, MMC0_CLK_MARK, SIM0_CLK_MARK,
VI0_DATA0_VI0_B0_B_MARK, TS_SDEN0_C_MARK, GLO_SCLK_B_MARK,
VI3_DATA0_B_MARK, SD2_CMD_MARK, MMC0_CMD_MARK, SIM0_D_MARK,
@@ -725,10 +665,10 @@ enum {
SD2_DAT0_MARK, MMC0_D0_MARK, FMCLK_B_MARK,
VI0_DATA2_VI0_B2_B_MARK, SCIFB1_RXD_E_MARK, RX1_D_MARK,
TS_SDAT0_C_MARK, GLO_SS_B_MARK, VI3_DATA2_B_MARK,
- SD2_DAT1_MARK, MMC0_D1_MARK, FMIN_B_MARK, RDS_DATA_MARK,
+ SD2_DAT1_MARK, MMC0_D1_MARK, FMIN_B_MARK,
VI0_DATA3_VI0_B3_B_MARK, SCIFB1_TXD_E_MARK, TX1_D_MARK,
TS_SCK0_C_MARK, GLO_RFON_B_MARK, VI3_DATA3_B_MARK,
- SD2_DAT2_MARK, MMC0_D2_MARK, BPFCLK_B_MARK, RDS_CLK_MARK,
+ SD2_DAT2_MARK, MMC0_D2_MARK, BPFCLK_B_MARK,
VI0_DATA4_VI0_B4_B_MARK, HRX0_D_MARK, TS_SDEN1_B_MARK,
GLO_Q0_B_MARK, VI3_DATA4_B_MARK, SD2_DAT3_MARK,
MMC0_D3_MARK, SIM0_RST_MARK, VI0_DATA5_VI0_B5_B_MARK,
@@ -748,12 +688,12 @@ enum {
SCKZ_MARK, SD3_CD_MARK, MMC1_D4_MARK, TS_SDAT1_MARK,
VSP_MARK, GLO_Q0_MARK, SIM0_RST_B_MARK, SD3_WP_MARK,
MMC1_D5_MARK, TS_SCK1_MARK, GLO_Q1_MARK, FMIN_C_MARK,
- RDS_DATA_B_MARK, FMIN_E_MARK, RDS_DATA_D_MARK, FMIN_F_MARK,
- RDS_DATA_E_MARK, MLB_CLK_MARK, SCL2_B_MARK, SCL2_CIS_B_MARK,
- MLB_SIG_MARK, SCIFB1_RXD_D_MARK, RX1_C_MARK, SDA2_B_MARK,
- SDA2_CIS_B_MARK, MLB_DAT_MARK, SPV_EVEN_MARK,
+ FMIN_E_MARK, FMIN_F_MARK,
+ MLB_CLK_MARK, IIC2_SCL_B_MARK, I2C2_SCL_B_MARK,
+ MLB_SIG_MARK, SCIFB1_RXD_D_MARK, RX1_C_MARK, IIC2_SDA_B_MARK,
+ I2C2_SDA_B_MARK, MLB_DAT_MARK,
SCIFB1_TXD_D_MARK, TX1_C_MARK, BPFCLK_C_MARK,
- RDS_CLK_B_MARK, SSI_SCK0129_MARK, CAN_CLK_B_MARK,
+ SSI_SCK0129_MARK, CAN_CLK_B_MARK,
MOUT0_MARK,
SSI_WS0129_MARK, CAN0_TX_B_MARK, MOUT1_MARK,
@@ -778,12 +718,12 @@ enum {
SSI_SDATA5_MARK, SCIFB1_TXD_MARK, IETX_B_MARK, DU2_DR2_MARK,
LCDOUT2_MARK, CAN_DEBUGOUT5_MARK, SSI_SCK6_MARK,
- SCIFB1_CTS_N_MARK, BPFCLK_D_MARK, RDS_CLK_C_MARK,
+ SCIFB1_CTS_N_MARK, BPFCLK_D_MARK,
DU2_DR3_MARK, LCDOUT3_MARK, CAN_DEBUGOUT6_MARK,
- BPFCLK_F_MARK, RDS_CLK_E_MARK, SSI_WS6_MARK,
+ BPFCLK_F_MARK, SSI_WS6_MARK,
SCIFB1_RTS_N_MARK, CAN0_TX_D_MARK, DU2_DR4_MARK,
LCDOUT4_MARK, CAN_DEBUGOUT7_MARK, SSI_SDATA6_MARK,
- FMIN_D_MARK, RDS_DATA_C_MARK, DU2_DR5_MARK, LCDOUT5_MARK,
+ FMIN_D_MARK, DU2_DR5_MARK, LCDOUT5_MARK,
CAN_DEBUGOUT8_MARK, SSI_SCK78_MARK, STP_IVCXO27_1_MARK,
SCK1_MARK, SCIFA1_SCK_MARK, DU2_DR6_MARK, LCDOUT6_MARK,
CAN_DEBUGOUT9_MARK, SSI_WS78_MARK, STP_ISCLK_1_MARK,
@@ -791,8 +731,8 @@ enum {
LCDOUT7_MARK, CAN_DEBUGOUT10_MARK, SSI_SDATA7_MARK,
STP_ISD_1_MARK, SCIFB2_RXD_MARK, SCIFA2_RTS_N_MARK,
TCLK2_MARK, QSTVA_QVS_MARK, CAN_DEBUGOUT11_MARK,
- BPFCLK_E_MARK, RDS_CLK_D_MARK, SSI_SDATA7_B_MARK,
- FMIN_G_MARK, RDS_DATA_F_MARK, SSI_SDATA8_MARK,
+ BPFCLK_E_MARK, SSI_SDATA7_B_MARK,
+ FMIN_G_MARK, SSI_SDATA8_MARK,
STP_ISEN_1_MARK, SCIFB2_TXD_MARK, CAN0_TX_C_MARK,
CAN_DEBUGOUT12_MARK, SSI_SDATA8_B_MARK, SSI_SDATA9_MARK,
STP_ISSYNC_1_MARK, SCIFB2_CTS_N_MARK, SSI_WS1_MARK,
@@ -802,28 +742,28 @@ enum {
AUDIO_CLKB_MARK, SCIF_CLK_MARK, CAN0_RX_D_MARK,
DVC_MUTE_MARK, CAN0_RX_C_MARK, CAN_DEBUGOUT15_MARK,
REMOCON_MARK, SCIFA0_SCK_MARK, HSCK1_MARK, SCK0_MARK,
- MSIOF3_SS2_MARK, DU2_DG2_MARK, LCDOUT10_MARK, SDA1_C_MARK,
- SDA1_CIS_C_MARK, SCIFA0_RXD_MARK, HRX1_MARK, RX0_MARK,
+ MSIOF3_SS2_MARK, DU2_DG2_MARK, LCDOUT10_MARK, IIC1_SDA_C_MARK,
+ I2C1_SDA_C_MARK, SCIFA0_RXD_MARK, HRX1_MARK, RX0_MARK,
DU2_DR0_MARK, LCDOUT0_MARK, SCIFA0_TXD_MARK, HTX1_MARK,
TX0_MARK, DU2_DR1_MARK, LCDOUT1_MARK, SCIFA0_CTS_N_MARK,
HCTS1_N_MARK, CTS0_N_MARK, MSIOF3_SYNC_MARK, DU2_DG3_MARK,
- LCDOUT11_MARK, PWM0_B_MARK, SCL1_C_MARK, SCL1_CIS_C_MARK,
- SCIFA0_RTS_N_MARK, HRTS1_N_MARK, RTS0_N_TANS_MARK,
+ LCDOUT11_MARK, PWM0_B_MARK, IIC1_SCL_C_MARK, I2C1_SCL_C_MARK,
+ SCIFA0_RTS_N_MARK, HRTS1_N_MARK, RTS0_N_MARK,
MSIOF3_SS1_MARK, DU2_DG0_MARK, LCDOUT8_MARK, PWM1_B_MARK,
SCIFA1_RXD_MARK, AD_DI_MARK, RX1_MARK,
DU2_EXODDF_DU2_ODDF_DISP_CDE_MARK, QCPV_QDE_MARK,
SCIFA1_TXD_MARK, AD_DO_MARK, TX1_MARK, DU2_DG1_MARK,
LCDOUT9_MARK, SCIFA1_CTS_N_MARK, AD_CLK_MARK,
CTS1_N_MARK, MSIOF3_RXD_MARK, DU0_DOTCLKOUT_MARK, QCLK_MARK,
- SCIFA1_RTS_N_MARK, AD_NCS_N_MARK, RTS1_N_TANS_MARK,
+ SCIFA1_RTS_N_MARK, AD_NCS_N_MARK, RTS1_N_MARK,
MSIOF3_TXD_MARK, DU1_DOTCLKOUT_MARK, QSTVB_QVE_MARK,
HRTS0_N_C_MARK,
- SCIFA2_SCK_MARK, FMCLK_MARK, MSIOF3_SCK_MARK, DU2_DG7_MARK,
+ SCIFA2_SCK_MARK, FMCLK_MARK, SCK2_MARK, MSIOF3_SCK_MARK, DU2_DG7_MARK,
LCDOUT15_MARK, SCIF_CLK_B_MARK, SCIFA2_RXD_MARK, FMIN_MARK,
- DU2_DB0_MARK, LCDOUT16_MARK, SCL2_MARK, SCL2_CIS_MARK,
- SCIFA2_TXD_MARK, BPFCLK_MARK, DU2_DB1_MARK, LCDOUT17_MARK,
- SDA2_MARK, SDA2_CIS_MARK, HSCK0_MARK, TS_SDEN0_MARK,
+ TX2_MARK, DU2_DB0_MARK, LCDOUT16_MARK, IIC2_SCL_MARK, I2C2_SCL_MARK,
+ SCIFA2_TXD_MARK, BPFCLK_MARK, RX2_MARK, DU2_DB1_MARK, LCDOUT17_MARK,
+ IIC2_SDA_MARK, I2C2_SDA_MARK, HSCK0_MARK, TS_SDEN0_MARK,
DU2_DG4_MARK, LCDOUT12_MARK, HCTS0_N_C_MARK, HRX0_MARK,
DU2_DB2_MARK, LCDOUT18_MARK, HTX0_MARK, DU2_DB3_MARK,
LCDOUT19_MARK, HCTS0_N_MARK, SSI_SCK9_MARK, DU2_DB4_MARK,
@@ -831,20 +771,20 @@ enum {
LCDOUT21_MARK, MSIOF0_SCK_MARK, TS_SDAT0_MARK, ADICLK_MARK,
DU2_DB6_MARK, LCDOUT22_MARK, MSIOF0_SYNC_MARK, TS_SCK0_MARK,
SSI_SCK2_MARK, ADIDATA_MARK, DU2_DB7_MARK, LCDOUT23_MARK,
- SCIFA2_RXD_B_MARK, MSIOF0_SS1_MARK, ADICHS0_MARK,
+ HRX0_C_MARK, MSIOF0_SS1_MARK, ADICHS0_MARK,
DU2_DG5_MARK, LCDOUT13_MARK, MSIOF0_TXD_MARK, ADICHS1_MARK,
DU2_DG6_MARK, LCDOUT14_MARK,
MSIOF0_SS2_MARK, AUDIO_CLKOUT_MARK, ADICHS2_MARK,
DU2_DISP_MARK, QPOLA_MARK, HTX0_C_MARK, SCIFA2_TXD_B_MARK,
MSIOF0_RXD_MARK, TS_SPSYNC0_MARK, SSI_WS2_MARK,
- ADICS_SAMP_MARK, DU2_CDE_MARK, QPOLB_MARK, HRX0_C_MARK,
+ ADICS_SAMP_MARK, DU2_CDE_MARK, QPOLB_MARK, SCIFA2_RXD_B_MARK,
USB1_PWEN_MARK, AUDIO_CLKOUT_D_MARK, USB1_OVC_MARK,
TCLK1_B_MARK,
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(VI1_DATA7_VI1_B7_MARK, FN_VI1_DATA7_VI1_B7),
@@ -892,22 +832,22 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP0_19_16, VI0_R1_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP0_19_16, TX0_B, SEL_SCIF0_1),
PINMUX_IPSR_DATA(IP0_22_20, D6),
- PINMUX_IPSR_MODSEL_DATA(IP0_22_20, SCL2_C, SEL_IIC2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_20, IIC2_SCL_C, SEL_IIC2_2),
PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI3_DATA6, SEL_VI3_0),
PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP0_22_20, VI0_R2_B, SEL_VI0_1),
- PINMUX_IPSR_MODSEL_DATA(IP0_22_20, SCL2_CIS_C, SEL_I2C2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_22_20, I2C2_SCL_C, SEL_I2C2_2),
PINMUX_IPSR_DATA(IP0_26_23, D7),
PINMUX_IPSR_MODSEL_DATA(IP0_26_23, AD_DI_B, SEL_ADI_1),
- PINMUX_IPSR_MODSEL_DATA(IP0_26_23, SDA2_C, SEL_IIC2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_26_23, IIC2_SDA_C, SEL_IIC2_2),
PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI3_DATA7, SEL_VI3_0),
PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP0_26_23, VI0_R3_B, SEL_VI0_1),
- PINMUX_IPSR_MODSEL_DATA(IP0_26_23, SDA2_CIS_C, SEL_I2C2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_26_23, I2C2_SDA_C, SEL_I2C2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP0_26_23, TCLK1, SEL_TMU1_0),
PINMUX_IPSR_DATA(IP0_30_27, D8),
PINMUX_IPSR_MODSEL_DATA(IP0_30_27, SCIFA1_SCK_C, SEL_SCIFA1_2),
PINMUX_IPSR_DATA(IP0_30_27, AVB_TXD0),
- PINMUX_IPSR_DATA(IP0_30_27, MII_TXD0),
PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI0_G0_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP0_30_27, VI2_DATA0_VI2_B0, SEL_VI2_0),
@@ -915,21 +855,18 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP1_3_0, D9),
PINMUX_IPSR_MODSEL_DATA(IP1_3_0, SCIFA1_RXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_DATA(IP1_3_0, AVB_TXD1),
- PINMUX_IPSR_DATA(IP1_3_0, MII_TXD1),
PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI0_G1_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP1_3_0, VI2_DATA1_VI2_B1, SEL_VI2_0),
PINMUX_IPSR_DATA(IP1_7_4, D10),
PINMUX_IPSR_MODSEL_DATA(IP1_7_4, SCIFA1_TXD_C, SEL_SCIFA1_2),
PINMUX_IPSR_DATA(IP1_7_4, AVB_TXD2),
- PINMUX_IPSR_DATA(IP1_7_4, MII_TXD2),
PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI0_G2_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP1_7_4, VI2_DATA2_VI2_B2, SEL_VI2_0),
PINMUX_IPSR_DATA(IP1_11_8, D11),
PINMUX_IPSR_MODSEL_DATA(IP1_11_8, SCIFA1_CTS_N_C, SEL_SCIFA1_2),
PINMUX_IPSR_DATA(IP1_11_8, AVB_TXD3),
- PINMUX_IPSR_DATA(IP1_11_8, MII_TXD3),
PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI0_G3_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP1_11_8, VI2_DATA3_VI2_B3, SEL_VI2_0),
@@ -940,7 +877,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI0_HSYNC_N_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP1_14_12, VI2_DATA4_VI2_B4, SEL_VI2_0),
PINMUX_IPSR_DATA(IP1_17_15, D13),
- PINMUX_IPSR_MODSEL_DATA(IP1_17_15, AVB_TXD5, SEL_SCIFA1_2),
+ PINMUX_IPSR_DATA(IP1_17_15, AVB_TXD5),
PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI0_VSYNC_N_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP1_17_15, VI2_DATA5_VI2_B5, SEL_VI2_0),
@@ -988,6 +925,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI0_R4_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP2_21_18, SCIFB2_RXD_C, SEL_SCIFB2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_21_18, RX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MODSEL_DATA(IP2_21_18, VI2_DATA0_VI2_B0_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP2_25_22, A9),
PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFA1_CTS_N_B, SEL_SCIFA1_1),
@@ -995,6 +933,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5, SEL_VI0_0),
PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI0_R5_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP2_25_22, SCIFB2_TXD_C, SEL_SCIFB2_2),
+ PINMUX_IPSR_MODSEL_DATA(IP2_25_22, TX2_B, SEL_SCIF2_1),
PINMUX_IPSR_MODSEL_DATA(IP2_25_22, VI2_DATA1_VI2_B1_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP2_28_26, A10),
PINMUX_IPSR_MODSEL_DATA(IP2_28_26, SSI_SDATA5_B, SEL_SSI5_1),
@@ -1009,14 +948,14 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI1_R0_B, SEL_VI1_1),
PINMUX_IPSR_DATA(IP3_3_0, VI2_G0),
- PINMUX_IPSR_DATA(IP3_3_0, VI2_DATA3_VI2_B3_B),
+ PINMUX_IPSR_MODSEL_DATA(IP3_3_0, VI2_DATA3_VI2_B3_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP3_7_4, A12),
PINMUX_IPSR_MODSEL_DATA(IP3_7_4, SCIFB2_RXD_B, SEL_SCIFB2_1),
PINMUX_IPSR_DATA(IP3_7_4, MSIOF2_TXD),
PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI1_R1_B, SEL_VI1_1),
PINMUX_IPSR_DATA(IP3_7_4, VI2_G1),
- PINMUX_IPSR_DATA(IP3_7_4, VI2_DATA4_VI2_B4_B),
+ PINMUX_IPSR_MODSEL_DATA(IP3_7_4, VI2_DATA4_VI2_B4_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP3_11_8, A13),
PINMUX_IPSR_MODSEL_DATA(IP3_11_8, SCIFB2_RTS_N_B, SEL_SCIFB2_1),
PINMUX_IPSR_DATA(IP3_11_8, EX_WAIT2),
@@ -1024,7 +963,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI1_R2_B, SEL_VI1_1),
PINMUX_IPSR_DATA(IP3_11_8, VI2_G2),
- PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP3_11_8, VI2_DATA5_VI2_B5_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP3_14_12, A14),
PINMUX_IPSR_MODSEL_DATA(IP3_14_12, SCIFB2_TXD_B, SEL_SCIFB2_1),
PINMUX_IPSR_DATA(IP3_14_12, ATACS11_N),
@@ -1116,14 +1055,14 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP5_2_0, VI1_G1_B, SEL_VI1_1),
PINMUX_IPSR_DATA(IP5_2_0, VI2_R3),
- PINMUX_IPSR_MODSEL_DATA(IP5_5_3, EX_CS4_N, SEL_I2C1_0),
+ PINMUX_IPSR_DATA(IP5_5_3, EX_CS4_N),
PINMUX_IPSR_MODSEL_DATA(IP5_5_3, MSIOF1_SCK_B, SEL_SOF1_1),
PINMUX_IPSR_DATA(IP5_5_3, VI3_HSYNC_N),
PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N, SEL_VI2_0),
- PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SCL1, SEL_IIC1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, IIC1_SCL, SEL_IIC1_0),
PINMUX_IPSR_MODSEL_DATA(IP5_5_3, VI2_HSYNC_N_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP5_5_3, INTC_EN0_N),
- PINMUX_IPSR_MODSEL_DATA(IP5_5_3, SCL1_CIS, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_5_3, I2C1_SCL, SEL_I2C1_0),
PINMUX_IPSR_DATA(IP5_9_6, EX_CS5_N),
PINMUX_IPSR_MODSEL_DATA(IP5_9_6, CAN0_RX, SEL_CAN0_0),
PINMUX_IPSR_MODSEL_DATA(IP5_9_6, MSIOF1_RXD_B, SEL_SOF1_1),
@@ -1131,9 +1070,9 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP5_9_6, VI1_G2_B, SEL_VI1_1),
PINMUX_IPSR_DATA(IP5_9_6, VI2_R4),
- PINMUX_IPSR_MODSEL_DATA(IP5_9_6, SDA1, SEL_IIC1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_9_6, IIC1_SDA, SEL_IIC1_0),
PINMUX_IPSR_DATA(IP5_9_6, INTC_EN1_N),
- PINMUX_IPSR_MODSEL_DATA(IP5_9_6, SDA1_CIS, SEL_I2C1_0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_9_6, I2C1_SDA, SEL_I2C1_0),
PINMUX_IPSR_DATA(IP5_12_10, BS_N),
PINMUX_IPSR_MODSEL_DATA(IP5_12_10, IETX, SEL_IEB_0),
PINMUX_IPSR_MODSEL_DATA(IP5_12_10, HTX1_B, SEL_HSCIF1_1),
@@ -1163,7 +1102,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP5_23_21, VI2_R6),
PINMUX_IPSR_MODSEL_DATA(IP5_23_21, SCIFA0_CTS_N_B, SEL_SCFA_1),
PINMUX_IPSR_MODSEL_DATA(IP5_23_21, IERX_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP5_26_24, EX_WAIT0),
+ PINMUX_IPSR_MODSEL_DATA(IP5_26_24, EX_WAIT0, SEL_LBS_0),
PINMUX_IPSR_DATA(IP5_26_24, IRQ3),
PINMUX_IPSR_DATA(IP5_26_24, INTC_IRQ3_N),
PINMUX_IPSR_MODSEL_DATA(IP5_26_24, VI3_CLK, SEL_VI3_0),
@@ -1205,28 +1144,24 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP6_13_11, HRTS0_N_B, SEL_HSCIF0_1),
PINMUX_IPSR_MODSEL_DATA(IP6_13_11, MSIOF0_RXD_B, SEL_SOF0_1),
PINMUX_IPSR_DATA(IP6_16_14, ETH_CRS_DV),
- PINMUX_IPSR_DATA(IP6_16_14, RMII_CRS_DV),
PINMUX_IPSR_MODSEL_DATA(IP6_16_14, STP_ISCLK_0_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_16_14, TS_SDEN0_D, SEL_TSIF0_3),
PINMUX_IPSR_MODSEL_DATA(IP6_16_14, GLO_Q0_C, SEL_GPS_2),
- PINMUX_IPSR_MODSEL_DATA(IP6_16_14, SCL2_E, SEL_IIC2_4),
- PINMUX_IPSR_MODSEL_DATA(IP6_16_14, SCL2_CIS_E, SEL_I2C2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_16_14, IIC2_SCL_E, SEL_IIC2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_16_14, I2C2_SCL_E, SEL_I2C2_4),
PINMUX_IPSR_DATA(IP6_19_17, ETH_RX_ER),
- PINMUX_IPSR_DATA(IP6_19_17, RMII_RX_ER),
PINMUX_IPSR_MODSEL_DATA(IP6_19_17, STP_ISD_0_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_19_17, TS_SPSYNC0_D, SEL_TSIF0_3),
PINMUX_IPSR_MODSEL_DATA(IP6_19_17, GLO_Q1_C, SEL_GPS_2),
- PINMUX_IPSR_MODSEL_DATA(IP6_19_17, SDA2_E, SEL_IIC2_4),
- PINMUX_IPSR_MODSEL_DATA(IP6_19_17, SDA2_CIS_E, SEL_I2C2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_19_17, IIC2_SDA_E, SEL_IIC2_4),
+ PINMUX_IPSR_MODSEL_DATA(IP6_19_17, I2C2_SDA_E, SEL_I2C2_4),
PINMUX_IPSR_DATA(IP6_22_20, ETH_RXD0),
- PINMUX_IPSR_DATA(IP6_22_20, RMII_RXD0),
PINMUX_IPSR_MODSEL_DATA(IP6_22_20, STP_ISEN_0_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_22_20, TS_SDAT0_D, SEL_TSIF0_3),
PINMUX_IPSR_MODSEL_DATA(IP6_22_20, GLO_I0_C, SEL_GPS_2),
PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCIFB1_SCK_G, SEL_SCIFB1_6),
PINMUX_IPSR_MODSEL_DATA(IP6_22_20, SCK1_E, SEL_SCIF1_4),
PINMUX_IPSR_DATA(IP6_25_23, ETH_RXD1),
- PINMUX_IPSR_DATA(IP6_25_23, RMII_RXD1),
PINMUX_IPSR_MODSEL_DATA(IP6_25_23, HRX0_E, SEL_HSCIF0_4),
PINMUX_IPSR_MODSEL_DATA(IP6_25_23, STP_ISSYNC_0_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_25_23, TS_SCK0_D, SEL_TSIF0_3),
@@ -1234,41 +1169,32 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP6_25_23, SCIFB1_RXD_G, SEL_SCIFB1_6),
PINMUX_IPSR_MODSEL_DATA(IP6_25_23, RX1_E, SEL_SCIF1_4),
PINMUX_IPSR_DATA(IP6_28_26, ETH_LINK),
- PINMUX_IPSR_DATA(IP6_28_26, RMII_LINK),
PINMUX_IPSR_MODSEL_DATA(IP6_28_26, HTX0_E, SEL_HSCIF0_4),
PINMUX_IPSR_MODSEL_DATA(IP6_28_26, STP_IVCXO27_0_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_28_26, SCIFB1_TXD_G, SEL_SCIFB1_6),
PINMUX_IPSR_MODSEL_DATA(IP6_28_26, TX1_E, SEL_SCIF1_4),
PINMUX_IPSR_DATA(IP6_31_29, ETH_REF_CLK),
- PINMUX_IPSR_DATA(IP6_31_29, RMII_REF_CLK),
PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HCTS0_N_E, SEL_HSCIF0_4),
PINMUX_IPSR_MODSEL_DATA(IP6_31_29, STP_IVCXO27_1_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP6_31_29, HRX0_F, SEL_HSCIF0_5),
PINMUX_IPSR_DATA(IP7_2_0, ETH_MDIO),
- PINMUX_IPSR_DATA(IP7_2_0, RMII_MDIO),
PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HRTS0_N_E, SEL_HSCIF0_4),
PINMUX_IPSR_MODSEL_DATA(IP7_2_0, SIM0_D_C, SEL_SIM_2),
PINMUX_IPSR_MODSEL_DATA(IP7_2_0, HCTS0_N_F, SEL_HSCIF0_5),
PINMUX_IPSR_DATA(IP7_5_3, ETH_TXD1),
- PINMUX_IPSR_DATA(IP7_5_3, RMII_TXD1),
- PINMUX_IPSR_MODSEL_DATA(IP7_5_3, HTX0_F, SEL_HSCIF0_4),
- PINMUX_IPSR_MODSEL_DATA(IP7_5_3, BPFCLK_G, SEL_SIM_2),
- PINMUX_IPSR_MODSEL_DATA(IP7_5_3, RDS_CLK_F, SEL_HSCIF0_5),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, HTX0_F, SEL_HSCIF0_5),
+ PINMUX_IPSR_MODSEL_DATA(IP7_5_3, BPFCLK_G, SEL_FM_6),
PINMUX_IPSR_DATA(IP7_7_6, ETH_TX_EN),
- PINMUX_IPSR_DATA(IP7_7_6, RMII_TX_EN),
PINMUX_IPSR_MODSEL_DATA(IP7_7_6, SIM0_CLK_C, SEL_SIM_2),
PINMUX_IPSR_MODSEL_DATA(IP7_7_6, HRTS0_N_F, SEL_HSCIF0_5),
PINMUX_IPSR_DATA(IP7_9_8, ETH_MAGIC),
- PINMUX_IPSR_DATA(IP7_9_8, RMII_MAGIC),
PINMUX_IPSR_MODSEL_DATA(IP7_9_8, SIM0_RST_C, SEL_SIM_2),
PINMUX_IPSR_DATA(IP7_12_10, ETH_TXD0),
- PINMUX_IPSR_DATA(IP7_12_10, RMII_TXD0),
PINMUX_IPSR_MODSEL_DATA(IP7_12_10, STP_ISCLK_1_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP7_12_10, TS_SDEN1_C, SEL_TSIF1_2),
PINMUX_IPSR_MODSEL_DATA(IP7_12_10, GLO_SCLK_C, SEL_GPS_2),
PINMUX_IPSR_DATA(IP7_15_13, ETH_MDC),
- PINMUX_IPSR_DATA(IP7_15_13, RMII_MDC),
PINMUX_IPSR_MODSEL_DATA(IP7_15_13, STP_ISD_1_B, SEL_SSP_1),
PINMUX_IPSR_MODSEL_DATA(IP7_15_13, TS_SPSYNC1_C, SEL_TSIF1_2),
PINMUX_IPSR_MODSEL_DATA(IP7_15_13, GLO_SDATA_C, SEL_GPS_2),
@@ -1288,22 +1214,19 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP7_24_22, SCIFA2_RXD_C, SEL_SCIFA2_2),
PINMUX_IPSR_DATA(IP7_24_22, PCMWE_N),
PINMUX_IPSR_MODSEL_DATA(IP7_24_22, IECLK_C, SEL_IEB_2),
- PINMUX_IPSR_DATA(IP7_26_25, DU1_DOTCLKIN),
+ PINMUX_IPSR_DATA(IP7_26_25, DU_DOTCLKIN1),
PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKC),
PINMUX_IPSR_DATA(IP7_26_25, AUDIO_CLKOUT_C),
PINMUX_IPSR_MODSEL_DATA(IP7_28_27, VI0_CLK, SEL_VI0_0),
PINMUX_IPSR_DATA(IP7_28_27, ATACS00_N),
PINMUX_IPSR_DATA(IP7_28_27, AVB_RXD1),
- PINMUX_IPSR_DATA(IP7_28_27, MII_RXD1),
PINMUX_IPSR_MODSEL_DATA(IP7_30_29, VI0_DATA0_VI0_B0, SEL_VI0_0),
PINMUX_IPSR_DATA(IP7_30_29, ATACS10_N),
PINMUX_IPSR_DATA(IP7_30_29, AVB_RXD2),
- PINMUX_IPSR_DATA(IP7_30_29, MII_RXD2),
PINMUX_IPSR_MODSEL_DATA(IP8_1_0, VI0_DATA1_VI0_B1, SEL_VI0_0),
PINMUX_IPSR_DATA(IP8_1_0, ATARD0_N),
PINMUX_IPSR_DATA(IP8_1_0, AVB_RXD3),
- PINMUX_IPSR_DATA(IP8_1_0, MII_RXD3),
PINMUX_IPSR_MODSEL_DATA(IP8_3_2, VI0_DATA2_VI0_B2, SEL_VI0_0),
PINMUX_IPSR_DATA(IP8_3_2, ATAWR0_N),
PINMUX_IPSR_DATA(IP8_3_2, AVB_RXD4),
@@ -1318,34 +1241,27 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP8_9_8, AVB_RXD7),
PINMUX_IPSR_MODSEL_DATA(IP8_11_10, VI0_DATA6_VI0_B6, SEL_VI0_0),
PINMUX_IPSR_DATA(IP8_11_10, AVB_RX_ER),
- PINMUX_IPSR_DATA(IP8_11_10, MII_RX_ER),
PINMUX_IPSR_MODSEL_DATA(IP8_13_12, VI0_DATA7_VI0_B7, SEL_VI0_0),
PINMUX_IPSR_DATA(IP8_13_12, AVB_RX_CLK),
- PINMUX_IPSR_DATA(IP8_13_12, MII_RX_CLK),
PINMUX_IPSR_MODSEL_DATA(IP8_15_14, VI1_CLK, SEL_VI1_0),
PINMUX_IPSR_DATA(IP8_15_14, AVB_RX_DV),
- PINMUX_IPSR_DATA(IP8_15_14, MII_RX_DV),
PINMUX_IPSR_MODSEL_DATA(IP8_17_16, VI1_DATA0_VI1_B0, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP8_17_16, SCIFA1_SCK_D, SEL_SCIFA1_3),
PINMUX_IPSR_DATA(IP8_17_16, AVB_CRS),
- PINMUX_IPSR_DATA(IP8_17_16, MII_CRS),
PINMUX_IPSR_MODSEL_DATA(IP8_19_18, VI1_DATA1_VI1_B1, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP8_19_18, SCIFA1_RXD_D, SEL_SCIFA1_3),
PINMUX_IPSR_DATA(IP8_19_18, AVB_MDC),
- PINMUX_IPSR_DATA(IP8_19_18, MII_MDC),
PINMUX_IPSR_MODSEL_DATA(IP8_21_20, VI1_DATA2_VI1_B2, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP8_21_20, SCIFA1_TXD_D, SEL_SCIFA1_3),
PINMUX_IPSR_DATA(IP8_21_20, AVB_MDIO),
- PINMUX_IPSR_DATA(IP8_21_20, MII_MDIO),
PINMUX_IPSR_MODSEL_DATA(IP8_23_22, VI1_DATA3_VI1_B3, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP8_23_22, SCIFA1_CTS_N_D, SEL_SCIFA1_3),
PINMUX_IPSR_DATA(IP8_23_22, AVB_GTX_CLK),
PINMUX_IPSR_MODSEL_DATA(IP8_25_24, VI1_DATA4_VI1_B4, SEL_VI1_0),
PINMUX_IPSR_MODSEL_DATA(IP8_25_24, SCIFA1_RTS_N_D, SEL_SCIFA1_3),
PINMUX_IPSR_DATA(IP8_25_24, AVB_MAGIC),
- PINMUX_IPSR_DATA(IP8_25_24, MII_MAGIC),
PINMUX_IPSR_MODSEL_DATA(IP8_26, VI1_DATA5_VI1_B5, SEL_VI1_0),
- PINMUX_IPSR_MODSEL_DATA(IP8_26, AVB_PHY_INT, SEL_SCIFA1_3),
+ PINMUX_IPSR_DATA(IP8_26, AVB_PHY_INT),
PINMUX_IPSR_MODSEL_DATA(IP8_27, VI1_DATA6_VI1_B6, SEL_VI1_0),
PINMUX_IPSR_DATA(IP8_27, AVB_GTXREFCLK),
PINMUX_IPSR_DATA(IP8_28, SD0_CLK),
@@ -1372,8 +1288,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP9_11_8, USB0_EXTP),
PINMUX_IPSR_MODSEL_DATA(IP9_11_8, GLO_SCLK, SEL_GPS_0),
PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI1_DATA6_VI1_B6_B, SEL_VI1_1),
- PINMUX_IPSR_MODSEL_DATA(IP9_11_8, SCL1_B, SEL_IIC1_1),
- PINMUX_IPSR_MODSEL_DATA(IP9_11_8, SCL1_CIS_B, SEL_I2C1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_8, IIC1_SCL_B, SEL_IIC1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_11_8, I2C1_SCL_B, SEL_I2C1_1),
PINMUX_IPSR_MODSEL_DATA(IP9_11_8, VI2_DATA6_VI2_B6_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP9_15_12, SD0_WP),
PINMUX_IPSR_DATA(IP9_15_12, MMC0_D7),
@@ -1381,31 +1297,25 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP9_15_12, USB0_IDIN),
PINMUX_IPSR_MODSEL_DATA(IP9_15_12, GLO_SDATA, SEL_GPS_0),
PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI1_DATA7_VI1_B7_B, SEL_VI1_1),
- PINMUX_IPSR_MODSEL_DATA(IP9_15_12, SDA1_B, SEL_IIC1_1),
- PINMUX_IPSR_MODSEL_DATA(IP9_15_12, SDA1_CIS_B, SEL_I2C1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_12, IIC1_SDA_B, SEL_IIC1_1),
+ PINMUX_IPSR_MODSEL_DATA(IP9_15_12, I2C1_SDA_B, SEL_I2C1_1),
PINMUX_IPSR_MODSEL_DATA(IP9_15_12, VI2_DATA7_VI2_B7_B, SEL_VI2_1),
PINMUX_IPSR_DATA(IP9_17_16, SD1_CLK),
PINMUX_IPSR_DATA(IP9_17_16, AVB_TX_EN),
- PINMUX_IPSR_DATA(IP9_17_16, MII_TX_EN),
PINMUX_IPSR_DATA(IP9_19_18, SD1_CMD),
PINMUX_IPSR_DATA(IP9_19_18, AVB_TX_ER),
- PINMUX_IPSR_DATA(IP9_19_18, MII_TX_ER),
PINMUX_IPSR_MODSEL_DATA(IP9_19_18, SCIFB0_SCK_B, SEL_SCIFB_1),
PINMUX_IPSR_DATA(IP9_21_20, SD1_DAT0),
PINMUX_IPSR_DATA(IP9_21_20, AVB_TX_CLK),
- PINMUX_IPSR_DATA(IP9_21_20, MII_TX_CLK),
PINMUX_IPSR_MODSEL_DATA(IP9_21_20, SCIFB0_RXD_B, SEL_SCIFB_1),
PINMUX_IPSR_DATA(IP9_23_22, SD1_DAT1),
PINMUX_IPSR_DATA(IP9_23_22, AVB_LINK),
- PINMUX_IPSR_DATA(IP9_23_22, MII_LINK),
PINMUX_IPSR_MODSEL_DATA(IP9_23_22, SCIFB0_TXD_B, SEL_SCIFB_1),
PINMUX_IPSR_DATA(IP9_25_24, SD1_DAT2),
PINMUX_IPSR_DATA(IP9_25_24, AVB_COL),
- PINMUX_IPSR_DATA(IP9_25_24, MII_COL),
PINMUX_IPSR_MODSEL_DATA(IP9_25_24, SCIFB0_CTS_N_B, SEL_SCIFB_1),
PINMUX_IPSR_DATA(IP9_27_26, SD1_DAT3),
PINMUX_IPSR_DATA(IP9_27_26, AVB_RXD0),
- PINMUX_IPSR_DATA(IP9_27_26, MII_RXD0),
PINMUX_IPSR_MODSEL_DATA(IP9_27_26, SCIFB0_RTS_N_B, SEL_SCIFB_1),
PINMUX_IPSR_DATA(IP9_31_28, SD1_CD),
PINMUX_IPSR_DATA(IP9_31_28, MMC1_D6),
@@ -1413,8 +1323,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP9_31_28, USB1_EXTP),
PINMUX_IPSR_MODSEL_DATA(IP9_31_28, GLO_SS, SEL_GPS_0),
PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI0_CLK_B, SEL_VI0_1),
- PINMUX_IPSR_MODSEL_DATA(IP9_31_28, SCL2_D, SEL_IIC2_3),
- PINMUX_IPSR_MODSEL_DATA(IP9_31_28, SCL2_CIS_D, SEL_I2C2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_28, IIC2_SCL_D, SEL_IIC2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP9_31_28, I2C2_SCL_D, SEL_I2C2_3),
PINMUX_IPSR_MODSEL_DATA(IP9_31_28, SIM0_CLK_B, SEL_SIM_1),
PINMUX_IPSR_MODSEL_DATA(IP9_31_28, VI3_CLK_B, SEL_VI3_1),
@@ -1424,8 +1334,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP10_3_0, USB1_IDIN),
PINMUX_IPSR_MODSEL_DATA(IP10_3_0, GLO_RFON, SEL_GPS_0),
PINMUX_IPSR_MODSEL_DATA(IP10_3_0, VI1_CLK_B, SEL_VI1_1),
- PINMUX_IPSR_MODSEL_DATA(IP10_3_0, SDA2_D, SEL_IIC2_3),
- PINMUX_IPSR_MODSEL_DATA(IP10_3_0, SDA2_CIS_D, SEL_I2C2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_3_0, IIC2_SDA_D, SEL_IIC2_3),
+ PINMUX_IPSR_MODSEL_DATA(IP10_3_0, I2C2_SDA_D, SEL_I2C2_3),
PINMUX_IPSR_MODSEL_DATA(IP10_3_0, SIM0_D_B, SEL_SIM_1),
PINMUX_IPSR_DATA(IP10_6_4, SD2_CLK),
PINMUX_IPSR_DATA(IP10_6_4, MMC0_CLK),
@@ -1455,7 +1365,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP10_18_15, SD2_DAT1),
PINMUX_IPSR_DATA(IP10_18_15, MMC0_D1),
PINMUX_IPSR_MODSEL_DATA(IP10_18_15, FMIN_B, SEL_FM_1),
- PINMUX_IPSR_MODSEL_DATA(IP10_18_15, RDS_DATA, SEL_RDS_0),
PINMUX_IPSR_MODSEL_DATA(IP10_18_15, VI0_DATA3_VI0_B3_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP10_18_15, SCIFB1_TXD_E, SEL_SCIFB1_4),
PINMUX_IPSR_MODSEL_DATA(IP10_18_15, TX1_D, SEL_SCIF1_3),
@@ -1465,7 +1374,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP10_22_19, SD2_DAT2),
PINMUX_IPSR_DATA(IP10_22_19, MMC0_D2),
PINMUX_IPSR_MODSEL_DATA(IP10_22_19, BPFCLK_B, SEL_FM_1),
- PINMUX_IPSR_MODSEL_DATA(IP10_22_19, RDS_CLK, SEL_RDS_0),
PINMUX_IPSR_MODSEL_DATA(IP10_22_19, VI0_DATA4_VI0_B4_B, SEL_VI0_1),
PINMUX_IPSR_MODSEL_DATA(IP10_22_19, HRX0_D, SEL_HSCIF0_3),
PINMUX_IPSR_MODSEL_DATA(IP10_22_19, TS_SDEN1_B, SEL_TSIF1_1),
@@ -1528,25 +1436,20 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP11_21_18, TS_SCK1, SEL_TSIF1_0),
PINMUX_IPSR_MODSEL_DATA(IP11_21_18, GLO_Q1, SEL_GPS_0),
PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_C, SEL_FM_2),
- PINMUX_IPSR_MODSEL_DATA(IP11_21_18, RDS_DATA_B, SEL_RDS_1),
PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_E, SEL_FM_4),
- PINMUX_IPSR_MODSEL_DATA(IP11_21_18, RDS_DATA_D, SEL_RDS_3),
PINMUX_IPSR_MODSEL_DATA(IP11_21_18, FMIN_F, SEL_FM_5),
- PINMUX_IPSR_MODSEL_DATA(IP11_21_18, RDS_DATA_E, SEL_RDS_4),
PINMUX_IPSR_DATA(IP11_23_22, MLB_CLK),
- PINMUX_IPSR_MODSEL_DATA(IP11_23_22, SCL2_B, SEL_IIC2_1),
- PINMUX_IPSR_MODSEL_DATA(IP11_23_22, SCL2_CIS_B, SEL_I2C2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_22, IIC2_SCL_B, SEL_IIC2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_23_22, I2C2_SCL_B, SEL_I2C2_1),
PINMUX_IPSR_DATA(IP11_26_24, MLB_SIG),
PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SCIFB1_RXD_D, SEL_SCIFB1_3),
PINMUX_IPSR_MODSEL_DATA(IP11_26_24, RX1_C, SEL_SCIF1_2),
- PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SDA2_B, SEL_IIC2_1),
- PINMUX_IPSR_MODSEL_DATA(IP11_26_24, SDA2_CIS_B, SEL_I2C2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, IIC2_SDA_B, SEL_IIC2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP11_26_24, I2C2_SDA_B, SEL_I2C2_1),
PINMUX_IPSR_DATA(IP11_29_27, MLB_DAT),
- PINMUX_IPSR_DATA(IP11_29_27, SPV_EVEN),
PINMUX_IPSR_MODSEL_DATA(IP11_29_27, SCIFB1_TXD_D, SEL_SCIFB1_3),
PINMUX_IPSR_MODSEL_DATA(IP11_29_27, TX1_C, SEL_SCIF1_2),
PINMUX_IPSR_MODSEL_DATA(IP11_29_27, BPFCLK_C, SEL_FM_2),
- PINMUX_IPSR_MODSEL_DATA(IP11_29_27, RDS_CLK_B, SEL_RDS_1),
PINMUX_IPSR_DATA(IP11_31_30, SSI_SCK0129),
PINMUX_IPSR_MODSEL_DATA(IP11_31_30, CAN_CLK_B, SEL_CANCLK_1),
PINMUX_IPSR_DATA(IP11_31_30, MOUT0),
@@ -1562,7 +1465,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP12_5_4, MOUT5),
PINMUX_IPSR_DATA(IP12_7_6, SSI_SDATA2),
PINMUX_IPSR_MODSEL_DATA(IP12_7_6, CAN1_RX_B, SEL_CAN1_1),
- PINMUX_IPSR_MODSEL_DATA(IP12_7_6, CAN1_TX_B, SEL_CAN1_1),
+ PINMUX_IPSR_DATA(IP12_7_6, SSI_SCK1),
PINMUX_IPSR_DATA(IP12_7_6, MOUT6),
PINMUX_IPSR_DATA(IP12_10_8, SSI_SCK34),
PINMUX_IPSR_DATA(IP12_10_8, STP_OPWM_0),
@@ -1617,12 +1520,10 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SSI_SCK6, SEL_SSI6_0),
PINMUX_IPSR_MODSEL_DATA(IP13_6_3, SCIFB1_CTS_N, SEL_SCIFB1_0),
PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_D, SEL_FM_3),
- PINMUX_IPSR_MODSEL_DATA(IP13_6_3, RDS_CLK_C, SEL_RDS_2),
PINMUX_IPSR_DATA(IP13_6_3, DU2_DR3),
PINMUX_IPSR_DATA(IP13_6_3, LCDOUT3),
PINMUX_IPSR_DATA(IP13_6_3, CAN_DEBUGOUT6),
PINMUX_IPSR_MODSEL_DATA(IP13_6_3, BPFCLK_F, SEL_FM_5),
- PINMUX_IPSR_MODSEL_DATA(IP13_6_3, RDS_CLK_E, SEL_RDS_4),
PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SSI_WS6, SEL_SSI6_0),
PINMUX_IPSR_MODSEL_DATA(IP13_9_7, SCIFB1_RTS_N, SEL_SCIFB1_0),
PINMUX_IPSR_MODSEL_DATA(IP13_9_7, CAN0_TX_D, SEL_CAN0_3),
@@ -1631,7 +1532,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP13_9_7, CAN_DEBUGOUT7),
PINMUX_IPSR_MODSEL_DATA(IP13_12_10, SSI_SDATA6, SEL_SSI6_0),
PINMUX_IPSR_MODSEL_DATA(IP13_12_10, FMIN_D, SEL_FM_3),
- PINMUX_IPSR_MODSEL_DATA(IP13_12_10, RDS_DATA_C, SEL_RDS_2),
PINMUX_IPSR_DATA(IP13_12_10, DU2_DR5),
PINMUX_IPSR_DATA(IP13_12_10, LCDOUT5),
PINMUX_IPSR_DATA(IP13_12_10, CAN_DEBUGOUT8),
@@ -1657,10 +1557,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP13_22_19, QSTVA_QVS),
PINMUX_IPSR_DATA(IP13_22_19, CAN_DEBUGOUT11),
PINMUX_IPSR_MODSEL_DATA(IP13_22_19, BPFCLK_E, SEL_FM_4),
- PINMUX_IPSR_MODSEL_DATA(IP13_22_19, RDS_CLK_D, SEL_RDS_3),
PINMUX_IPSR_MODSEL_DATA(IP13_22_19, SSI_SDATA7_B, SEL_SSI7_1),
PINMUX_IPSR_MODSEL_DATA(IP13_22_19, FMIN_G, SEL_FM_6),
- PINMUX_IPSR_MODSEL_DATA(IP13_22_19, RDS_DATA_F, SEL_RDS_5),
PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SSI_SDATA8, SEL_SSI8_0),
PINMUX_IPSR_MODSEL_DATA(IP13_25_23, STP_ISEN_1, SEL_SSP_0),
PINMUX_IPSR_MODSEL_DATA(IP13_25_23, SCIFB2_TXD, SEL_SCIFB2_0),
@@ -1690,8 +1588,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP14_5_3, MSIOF3_SS2),
PINMUX_IPSR_DATA(IP14_5_3, DU2_DG2),
PINMUX_IPSR_DATA(IP14_5_3, LCDOUT10),
- PINMUX_IPSR_MODSEL_DATA(IP14_5_3, SDA1_C, SEL_IIC1_2),
- PINMUX_IPSR_MODSEL_DATA(IP14_5_3, SDA1_CIS_C, SEL_I2C1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_5_3, IIC1_SDA_C, SEL_IIC1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_5_3, I2C1_SDA_C, SEL_I2C1_2),
PINMUX_IPSR_MODSEL_DATA(IP14_8_6, SCIFA0_RXD, SEL_SCFA_0),
PINMUX_IPSR_MODSEL_DATA(IP14_8_6, HRX1, SEL_HSCIF1_0),
PINMUX_IPSR_MODSEL_DATA(IP14_8_6, RX0, SEL_SCIF0_0),
@@ -1704,16 +1602,16 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP14_11_9, LCDOUT1),
PINMUX_IPSR_MODSEL_DATA(IP14_15_12, SCIFA0_CTS_N, SEL_SCFA_0),
PINMUX_IPSR_MODSEL_DATA(IP14_15_12, HCTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_MODSEL_DATA(IP14_15_12, CTS0_N, SEL_SCIF0_0),
+ PINMUX_IPSR_DATA(IP14_15_12, CTS0_N),
PINMUX_IPSR_MODSEL_DATA(IP14_15_12, MSIOF3_SYNC, SEL_SOF3_0),
PINMUX_IPSR_DATA(IP14_15_12, DU2_DG3),
- PINMUX_IPSR_MODSEL_DATA(IP14_15_12, LCDOUT11, SEL_HSCIF1_0),
- PINMUX_IPSR_MODSEL_DATA(IP14_15_12, PWM0_B, SEL_SCIF0_0),
- PINMUX_IPSR_MODSEL_DATA(IP14_15_12, SCL1_C, SEL_IIC1_2),
- PINMUX_IPSR_MODSEL_DATA(IP14_15_12, SCL1_CIS_C, SEL_I2C1_2),
+ PINMUX_IPSR_DATA(IP14_15_12, LCDOUT11),
+ PINMUX_IPSR_DATA(IP14_15_12, PWM0_B),
+ PINMUX_IPSR_MODSEL_DATA(IP14_15_12, IIC1_SCL_C, SEL_IIC1_2),
+ PINMUX_IPSR_MODSEL_DATA(IP14_15_12, I2C1_SCL_C, SEL_I2C1_2),
PINMUX_IPSR_MODSEL_DATA(IP14_18_16, SCIFA0_RTS_N, SEL_SCFA_0),
PINMUX_IPSR_MODSEL_DATA(IP14_18_16, HRTS1_N, SEL_HSCIF1_0),
- PINMUX_IPSR_DATA(IP14_18_16, RTS0_N_TANS),
+ PINMUX_IPSR_DATA(IP14_18_16, RTS0_N),
PINMUX_IPSR_DATA(IP14_18_16, MSIOF3_SS1),
PINMUX_IPSR_DATA(IP14_18_16, DU2_DG0),
PINMUX_IPSR_DATA(IP14_18_16, LCDOUT8),
@@ -1736,7 +1634,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP14_27_25, QCLK),
PINMUX_IPSR_MODSEL_DATA(IP14_30_28, SCIFA1_RTS_N, SEL_SCIFA1_0),
PINMUX_IPSR_MODSEL_DATA(IP14_30_28, AD_NCS_N, SEL_ADI_0),
- PINMUX_IPSR_DATA(IP14_30_28, RTS1_N_TANS),
+ PINMUX_IPSR_DATA(IP14_30_28, RTS1_N),
PINMUX_IPSR_MODSEL_DATA(IP14_30_28, MSIOF3_TXD, SEL_SOF3_0),
PINMUX_IPSR_DATA(IP14_30_28, DU1_DOTCLKOUT),
PINMUX_IPSR_DATA(IP14_30_28, QSTVB_QVE),
@@ -1744,28 +1642,30 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIFA2_SCK, SEL_SCIFA2_0),
PINMUX_IPSR_MODSEL_DATA(IP15_2_0, FMCLK, SEL_FM_0),
+ PINMUX_IPSR_DATA(IP15_2_0, SCK2),
PINMUX_IPSR_MODSEL_DATA(IP15_2_0, MSIOF3_SCK, SEL_SOF3_0),
PINMUX_IPSR_DATA(IP15_2_0, DU2_DG7),
PINMUX_IPSR_DATA(IP15_2_0, LCDOUT15),
- PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_2_0, SCIF_CLK_B, SEL_SCIFCLK_1),
PINMUX_IPSR_MODSEL_DATA(IP15_5_3, SCIFA2_RXD, SEL_SCIFA2_0),
PINMUX_IPSR_MODSEL_DATA(IP15_5_3, FMIN, SEL_FM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_3, TX2, SEL_SCIF2_0),
PINMUX_IPSR_DATA(IP15_5_3, DU2_DB0),
PINMUX_IPSR_DATA(IP15_5_3, LCDOUT16),
- PINMUX_IPSR_MODSEL_DATA(IP15_5_3, SCL2, SEL_IIC2_0),
- PINMUX_IPSR_MODSEL_DATA(IP15_5_3, SCL2_CIS, SEL_I2C2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_3, IIC2_SCL, SEL_IIC2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_5_3, I2C2_SCL, SEL_I2C2_0),
PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_MODSEL_DATA(IP15_8_6, BPFCLK, SEL_FM_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, RX2, SEL_SCIF2_0),
PINMUX_IPSR_DATA(IP15_8_6, DU2_DB1),
PINMUX_IPSR_DATA(IP15_8_6, LCDOUT17),
- PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SDA2, SEL_IIC2_0),
- PINMUX_IPSR_MODSEL_DATA(IP15_8_6, SDA2_CIS, SEL_I2C2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, IIC2_SDA, SEL_IIC2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_8_6, I2C2_SDA, SEL_I2C2_0),
PINMUX_IPSR_DATA(IP15_11_9, HSCK0),
PINMUX_IPSR_MODSEL_DATA(IP15_11_9, TS_SDEN0, SEL_TSIF0_0),
PINMUX_IPSR_DATA(IP15_11_9, DU2_DG4),
PINMUX_IPSR_DATA(IP15_11_9, LCDOUT12),
- PINMUX_IPSR_MODSEL_DATA(IP15_11_9, HCTS0_N_C, SEL_IIC2_0),
- PINMUX_IPSR_MODSEL_DATA(IP15_11_9, SDA2_CIS, SEL_I2C2_0),
+ PINMUX_IPSR_MODSEL_DATA(IP15_11_9, HCTS0_N_C, SEL_HSCIF0_2),
PINMUX_IPSR_MODSEL_DATA(IP15_13_12, HRX0, SEL_HSCIF0_0),
PINMUX_IPSR_DATA(IP15_13_12, DU2_DB2),
PINMUX_IPSR_DATA(IP15_13_12, LCDOUT18),
@@ -1791,7 +1691,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP15_25_23, ADIDATA),
PINMUX_IPSR_DATA(IP15_25_23, DU2_DB7),
PINMUX_IPSR_DATA(IP15_25_23, LCDOUT23),
- PINMUX_IPSR_MODSEL_DATA(IP15_25_23, SCIFA2_RXD_B, SEL_SCIFA2_1),
+ PINMUX_IPSR_MODSEL_DATA(IP15_25_23, HRX0_C, SEL_SCIFA2_1),
PINMUX_IPSR_MODSEL_DATA(IP15_27_26, MSIOF0_SS1, SEL_SOF0_0),
PINMUX_IPSR_DATA(IP15_27_26, ADICHS0),
PINMUX_IPSR_DATA(IP15_27_26, DU2_DG5),
@@ -1814,7 +1714,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_IPSR_DATA(IP16_5_3, ADICS_SAMP),
PINMUX_IPSR_DATA(IP16_5_3, DU2_CDE),
PINMUX_IPSR_DATA(IP16_5_3, QPOLB),
- PINMUX_IPSR_MODSEL_DATA(IP16_5_3, HRX0_C, SEL_HSCIF0_2),
+ PINMUX_IPSR_MODSEL_DATA(IP16_5_3, SCIFA2_RXD_B, SEL_HSCIF0_2),
PINMUX_IPSR_DATA(IP16_6, USB1_PWEN),
PINMUX_IPSR_DATA(IP16_6, AUDIO_CLKOUT_D),
PINMUX_IPSR_DATA(IP16_7, USB1_OVC),
@@ -1825,6 +1725,104 @@ static struct sh_pfc_pin pinmux_pins[] = {
PINMUX_GPIO_GP_ALL(),
};
+/* - DU RGB ----------------------------------------------------------------- */
+static const unsigned int du_rgb666_pins[] = {
+ /* R[7:2], G[7:2], B[7:2] */
+ RCAR_GP_PIN(4, 21), RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19),
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+ RCAR_GP_PIN(5, 4), RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 14),
+ RCAR_GP_PIN(5, 7), RCAR_GP_PIN(4, 30), RCAR_GP_PIN(4, 27),
+ RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 12), RCAR_GP_PIN(5, 11),
+ RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 8),
+};
+static const unsigned int du_rgb666_mux[] = {
+ DU2_DR7_MARK, DU2_DR6_MARK, DU2_DR5_MARK, DU2_DR4_MARK,
+ DU2_DR3_MARK, DU2_DR2_MARK,
+ DU2_DG7_MARK, DU2_DG6_MARK, DU2_DG5_MARK, DU2_DG4_MARK,
+ DU2_DG3_MARK, DU2_DG2_MARK,
+ DU2_DB7_MARK, DU2_DB6_MARK, DU2_DB5_MARK, DU2_DB4_MARK,
+ DU2_DB3_MARK, DU2_DB2_MARK,
+};
+static const unsigned int du_rgb888_pins[] = {
+ /* R[7:0], G[7:0], B[7:0] */
+ RCAR_GP_PIN(4, 21), RCAR_GP_PIN(4, 20), RCAR_GP_PIN(4, 19),
+ RCAR_GP_PIN(4, 18), RCAR_GP_PIN(4, 17), RCAR_GP_PIN(4, 16),
+ RCAR_GP_PIN(4, 29), RCAR_GP_PIN(4, 28), RCAR_GP_PIN(5, 4),
+ RCAR_GP_PIN(5, 15), RCAR_GP_PIN(5, 14), RCAR_GP_PIN(5, 7),
+ RCAR_GP_PIN(4, 30), RCAR_GP_PIN(4, 27), RCAR_GP_PIN(5, 1),
+ RCAR_GP_PIN(4, 31), RCAR_GP_PIN(5, 13), RCAR_GP_PIN(5, 12),
+ RCAR_GP_PIN(5, 11), RCAR_GP_PIN(5, 10), RCAR_GP_PIN(5, 9),
+ RCAR_GP_PIN(5, 8), RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int du_rgb888_mux[] = {
+ DU2_DR7_MARK, DU2_DR6_MARK, DU2_DR5_MARK, DU2_DR4_MARK,
+ DU2_DR3_MARK, DU2_DR2_MARK, DU2_DR1_MARK, DU2_DR0_MARK,
+ DU2_DG7_MARK, DU2_DG6_MARK, DU2_DG5_MARK, DU2_DG4_MARK,
+ DU2_DG3_MARK, DU2_DG2_MARK, DU2_DG1_MARK, DU2_DG0_MARK,
+ DU2_DB7_MARK, DU2_DB6_MARK, DU2_DB5_MARK, DU2_DB4_MARK,
+ DU2_DB3_MARK, DU2_DB2_MARK, DU2_DB1_MARK, DU2_DB0_MARK,
+};
+static const unsigned int du_clk_out_0_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int du_clk_out_0_mux[] = {
+ DU0_DOTCLKOUT_MARK
+};
+static const unsigned int du_clk_out_1_pins[] = {
+ /* CLKOUT */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int du_clk_out_1_mux[] = {
+ DU1_DOTCLKOUT_MARK
+};
+static const unsigned int du_sync_0_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(5, 0),
+};
+static const unsigned int du_sync_0_mux[] = {
+ DU2_EXVSYNC_DU2_VSYNC_MARK, DU2_EXHSYNC_DU2_HSYNC_MARK,
+ DU2_EXODDF_DU2_ODDF_DISP_CDE_MARK
+};
+static const unsigned int du_sync_1_pins[] = {
+ /* VSYNC, HSYNC, DISP */
+ RCAR_GP_PIN(4, 15), RCAR_GP_PIN(4, 14), RCAR_GP_PIN(5, 16),
+};
+static const unsigned int du_sync_1_mux[] = {
+ DU2_EXVSYNC_DU2_VSYNC_MARK, DU2_EXHSYNC_DU2_HSYNC_MARK,
+ DU2_DISP_MARK
+};
+static const unsigned int du_cde_pins[] = {
+ /* CDE */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int du_cde_mux[] = {
+ DU2_CDE_MARK,
+};
+/* - DU0 -------------------------------------------------------------------- */
+static const unsigned int du0_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(5, 26),
+};
+static const unsigned int du0_clk_in_mux[] = {
+ DU_DOTCLKIN0_MARK
+};
+/* - DU1 -------------------------------------------------------------------- */
+static const unsigned int du1_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(5, 27),
+};
+static const unsigned int du1_clk_in_mux[] = {
+ DU_DOTCLKIN1_MARK,
+};
+/* - DU2 -------------------------------------------------------------------- */
+static const unsigned int du2_clk_in_pins[] = {
+ /* CLKIN */
+ RCAR_GP_PIN(5, 28),
+};
+static const unsigned int du2_clk_in_mux[] = {
+ DU_DOTCLKIN2_MARK,
+};
/* - ETH -------------------------------------------------------------------- */
static const unsigned int eth_link_pins[] = {
/* LINK */
@@ -1857,128 +1855,6 @@ static const unsigned int eth_rmii_mux[] = {
ETH_RXD0_MARK, ETH_RXD1_MARK, ETH_RX_ER_MARK, ETH_CRS_DV_MARK,
ETH_TXD0_MARK, ETH_TXD1_MARK, ETH_TX_EN_MARK, ETH_REF_CLK_MARK,
};
-/* - INTC ------------------------------------------------------------------- */
-static const unsigned int intc_irq0_pins[] = {
- /* IRQ */
- RCAR_GP_PIN(1, 25),
-};
-static const unsigned int intc_irq0_mux[] = {
- IRQ0_MARK,
-};
-static const unsigned int intc_irq1_pins[] = {
- /* IRQ */
- RCAR_GP_PIN(1, 27),
-};
-static const unsigned int intc_irq1_mux[] = {
- IRQ1_MARK,
-};
-static const unsigned int intc_irq2_pins[] = {
- /* IRQ */
- RCAR_GP_PIN(1, 29),
-};
-static const unsigned int intc_irq2_mux[] = {
- IRQ2_MARK,
-};
-static const unsigned int intc_irq3_pins[] = {
- /* IRQ */
- RCAR_GP_PIN(1, 23),
-};
-static const unsigned int intc_irq3_mux[] = {
- IRQ3_MARK,
-};
-/* - SCIF0 ----------------------------------------------------------------- */
-static const unsigned int scif0_data_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 29),
-};
-static const unsigned int scif0_data_mux[] = {
- RX0_MARK, TX0_MARK,
-};
-static const unsigned int scif0_clk_pins[] = {
- /* SCK */
- RCAR_GP_PIN(4, 27),
-};
-static const unsigned int scif0_clk_mux[] = {
- SCK0_MARK,
-};
-static const unsigned int scif0_ctrl_pins[] = {
- /* RTS, CTS */
- RCAR_GP_PIN(4, 31), RCAR_GP_PIN(4, 30),
-};
-static const unsigned int scif0_ctrl_mux[] = {
- RTS0_N_TANS_MARK, CTS0_N_MARK,
-};
-static const unsigned int scif0_data_b_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
-};
-static const unsigned int scif0_data_b_mux[] = {
- RX0_B_MARK, TX0_B_MARK,
-};
-/* - SCIF1 ----------------------------------------------------------------- */
-static const unsigned int scif1_data_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 1),
-};
-static const unsigned int scif1_data_mux[] = {
- RX1_MARK, TX1_MARK,
-};
-static const unsigned int scif1_clk_pins[] = {
- /* SCK */
- RCAR_GP_PIN(4, 20),
-};
-static const unsigned int scif1_clk_mux[] = {
- SCK1_MARK,
-};
-static const unsigned int scif1_ctrl_pins[] = {
- /* RTS, CTS */
- RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 2),
-};
-static const unsigned int scif1_ctrl_mux[] = {
- RTS1_N_TANS_MARK, CTS1_N_MARK,
-};
-static const unsigned int scif1_data_b_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
-};
-static const unsigned int scif1_data_b_mux[] = {
- RX1_B_MARK, TX1_B_MARK,
-};
-static const unsigned int scif1_data_c_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 2),
-};
-static const unsigned int scif1_data_c_mux[] = {
- RX1_C_MARK, TX1_C_MARK,
-};
-static const unsigned int scif1_data_d_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
-};
-static const unsigned int scif1_data_d_mux[] = {
- RX1_D_MARK, TX1_D_MARK,
-};
-static const unsigned int scif1_clk_d_pins[] = {
- /* SCK */
- RCAR_GP_PIN(3, 17),
-};
-static const unsigned int scif1_clk_d_mux[] = {
- SCK1_D_MARK,
-};
-static const unsigned int scif1_data_e_pins[] = {
- /* RX, TX */
- RCAR_GP_PIN(2, 21), RCAR_GP_PIN(2, 22),
-};
-static const unsigned int scif1_data_e_mux[] = {
- RX1_E_MARK, TX1_E_MARK,
-};
-static const unsigned int scif1_clk_e_pins[] = {
- /* SCK */
- RCAR_GP_PIN(2, 20),
-};
-static const unsigned int scif1_clk_e_mux[] = {
- SCK1_E_MARK,
-};
/* - HSCIF0 ----------------------------------------------------------------- */
static const unsigned int hscif0_data_pins[] = {
/* RX, TX */
@@ -2114,6 +1990,390 @@ static const unsigned int hscif1_ctrl_b_pins[] = {
static const unsigned int hscif1_ctrl_b_mux[] = {
HRTS1_N_B_MARK, HCTS1_N_B_MARK,
};
+/* - INTC ------------------------------------------------------------------- */
+static const unsigned int intc_irq0_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(1, 25),
+};
+static const unsigned int intc_irq0_mux[] = {
+ IRQ0_MARK,
+};
+static const unsigned int intc_irq1_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(1, 27),
+};
+static const unsigned int intc_irq1_mux[] = {
+ IRQ1_MARK,
+};
+static const unsigned int intc_irq2_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(1, 29),
+};
+static const unsigned int intc_irq2_mux[] = {
+ IRQ2_MARK,
+};
+static const unsigned int intc_irq3_pins[] = {
+ /* IRQ */
+ RCAR_GP_PIN(1, 23),
+};
+static const unsigned int intc_irq3_mux[] = {
+ IRQ3_MARK,
+};
+/* - MMCIF0 ----------------------------------------------------------------- */
+static const unsigned int mmc0_data1_pins[] = {
+ /* D[0] */
+ RCAR_GP_PIN(3, 18),
+};
+static const unsigned int mmc0_data1_mux[] = {
+ MMC0_D0_MARK,
+};
+static const unsigned int mmc0_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
+};
+static const unsigned int mmc0_data4_mux[] = {
+ MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
+};
+static const unsigned int mmc0_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
+ RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
+ RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
+ RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
+};
+static const unsigned int mmc0_data8_mux[] = {
+ MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
+ MMC0_D4_MARK, MMC0_D5_MARK, MMC0_D6_MARK, MMC0_D7_MARK,
+};
+static const unsigned int mmc0_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 16), RCAR_GP_PIN(3, 17),
+};
+static const unsigned int mmc0_ctrl_mux[] = {
+ MMC0_CLK_MARK, MMC0_CMD_MARK,
+};
+/* - MMCIF1 ----------------------------------------------------------------- */
+static const unsigned int mmc1_data1_pins[] = {
+ /* D[0] */
+ RCAR_GP_PIN(3, 26),
+};
+static const unsigned int mmc1_data1_mux[] = {
+ MMC1_D0_MARK,
+};
+static const unsigned int mmc1_data4_pins[] = {
+ /* D[0:3] */
+ RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27),
+ RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
+};
+static const unsigned int mmc1_data4_mux[] = {
+ MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
+};
+static const unsigned int mmc1_data8_pins[] = {
+ /* D[0:7] */
+ RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27),
+ RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
+ RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31),
+ RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
+};
+static const unsigned int mmc1_data8_mux[] = {
+ MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
+ MMC1_D4_MARK, MMC1_D5_MARK, MMC1_D6_MARK, MMC1_D7_MARK,
+};
+static const unsigned int mmc1_ctrl_pins[] = {
+ /* CLK, CMD */
+ RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 25),
+};
+static const unsigned int mmc1_ctrl_mux[] = {
+ MMC1_CLK_MARK, MMC1_CMD_MARK,
+};
+/* - MSIOF0 ----------------------------------------------------------------- */
+static const unsigned int msiof0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 12),
+};
+static const unsigned int msiof0_clk_mux[] = {
+ MSIOF0_SCK_MARK,
+};
+static const unsigned int msiof0_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(5, 13),
+};
+static const unsigned int msiof0_sync_mux[] = {
+ MSIOF0_SYNC_MARK,
+};
+static const unsigned int msiof0_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(5, 14),
+};
+static const unsigned int msiof0_ss1_mux[] = {
+ MSIOF0_SS1_MARK,
+};
+static const unsigned int msiof0_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(5, 16),
+};
+static const unsigned int msiof0_ss2_mux[] = {
+ MSIOF0_SS2_MARK,
+};
+static const unsigned int msiof0_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 17),
+};
+static const unsigned int msiof0_rx_mux[] = {
+ MSIOF0_RXD_MARK,
+};
+static const unsigned int msiof0_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 15),
+};
+static const unsigned int msiof0_tx_mux[] = {
+ MSIOF0_TXD_MARK,
+};
+/* - MSIOF1 ----------------------------------------------------------------- */
+static const unsigned int msiof1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 8),
+};
+static const unsigned int msiof1_clk_mux[] = {
+ MSIOF1_SCK_MARK,
+};
+static const unsigned int msiof1_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(4, 9),
+};
+static const unsigned int msiof1_sync_mux[] = {
+ MSIOF1_SYNC_MARK,
+};
+static const unsigned int msiof1_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(4, 10),
+};
+static const unsigned int msiof1_ss1_mux[] = {
+ MSIOF1_SS1_MARK,
+};
+static const unsigned int msiof1_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(4, 11),
+};
+static const unsigned int msiof1_ss2_mux[] = {
+ MSIOF1_SS2_MARK,
+};
+static const unsigned int msiof1_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(4, 13),
+};
+static const unsigned int msiof1_rx_mux[] = {
+ MSIOF1_RXD_MARK,
+};
+static const unsigned int msiof1_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(4, 12),
+};
+static const unsigned int msiof1_tx_mux[] = {
+ MSIOF1_TXD_MARK,
+};
+/* - MSIOF2 ----------------------------------------------------------------- */
+static const unsigned int msiof2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(0, 27),
+};
+static const unsigned int msiof2_clk_mux[] = {
+ MSIOF2_SCK_MARK,
+};
+static const unsigned int msiof2_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(0, 26),
+};
+static const unsigned int msiof2_sync_mux[] = {
+ MSIOF2_SYNC_MARK,
+};
+static const unsigned int msiof2_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(0, 30),
+};
+static const unsigned int msiof2_ss1_mux[] = {
+ MSIOF2_SS1_MARK,
+};
+static const unsigned int msiof2_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(0, 31),
+};
+static const unsigned int msiof2_ss2_mux[] = {
+ MSIOF2_SS2_MARK,
+};
+static const unsigned int msiof2_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(0, 29),
+};
+static const unsigned int msiof2_rx_mux[] = {
+ MSIOF2_RXD_MARK,
+};
+static const unsigned int msiof2_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(0, 28),
+};
+static const unsigned int msiof2_tx_mux[] = {
+ MSIOF2_TXD_MARK,
+};
+/* - MSIOF3 ----------------------------------------------------------------- */
+static const unsigned int msiof3_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int msiof3_clk_mux[] = {
+ MSIOF3_SCK_MARK,
+};
+static const unsigned int msiof3_sync_pins[] = {
+ /* SYNC */
+ RCAR_GP_PIN(4, 30),
+};
+static const unsigned int msiof3_sync_mux[] = {
+ MSIOF3_SYNC_MARK,
+};
+static const unsigned int msiof3_ss1_pins[] = {
+ /* SS1 */
+ RCAR_GP_PIN(4, 31),
+};
+static const unsigned int msiof3_ss1_mux[] = {
+ MSIOF3_SS1_MARK,
+};
+static const unsigned int msiof3_ss2_pins[] = {
+ /* SS2 */
+ RCAR_GP_PIN(4, 27),
+};
+static const unsigned int msiof3_ss2_mux[] = {
+ MSIOF3_SS2_MARK,
+};
+static const unsigned int msiof3_rx_pins[] = {
+ /* RXD */
+ RCAR_GP_PIN(5, 2),
+};
+static const unsigned int msiof3_rx_mux[] = {
+ MSIOF3_RXD_MARK,
+};
+static const unsigned int msiof3_tx_pins[] = {
+ /* TXD */
+ RCAR_GP_PIN(5, 3),
+};
+static const unsigned int msiof3_tx_mux[] = {
+ MSIOF3_TXD_MARK,
+};
+/* - SCIF0 ------------------------------------------------------------------ */
+static const unsigned int scif0_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 28), RCAR_GP_PIN(4, 29),
+};
+static const unsigned int scif0_data_mux[] = {
+ RX0_MARK, TX0_MARK,
+};
+static const unsigned int scif0_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 27),
+};
+static const unsigned int scif0_clk_mux[] = {
+ SCK0_MARK,
+};
+static const unsigned int scif0_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(4, 31), RCAR_GP_PIN(4, 30),
+};
+static const unsigned int scif0_ctrl_mux[] = {
+ RTS0_N_MARK, CTS0_N_MARK,
+};
+static const unsigned int scif0_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5),
+};
+static const unsigned int scif0_data_b_mux[] = {
+ RX0_B_MARK, TX0_B_MARK,
+};
+/* - SCIF1 ------------------------------------------------------------------ */
+static const unsigned int scif1_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 1),
+};
+static const unsigned int scif1_data_mux[] = {
+ RX1_MARK, TX1_MARK,
+};
+static const unsigned int scif1_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(4, 20),
+};
+static const unsigned int scif1_clk_mux[] = {
+ SCK1_MARK,
+};
+static const unsigned int scif1_ctrl_pins[] = {
+ /* RTS, CTS */
+ RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 2),
+};
+static const unsigned int scif1_ctrl_mux[] = {
+ RTS1_N_MARK, CTS1_N_MARK,
+};
+static const unsigned int scif1_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 14), RCAR_GP_PIN(0, 15),
+};
+static const unsigned int scif1_data_b_mux[] = {
+ RX1_B_MARK, TX1_B_MARK,
+};
+static const unsigned int scif1_data_c_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(4, 1), RCAR_GP_PIN(4, 2),
+};
+static const unsigned int scif1_data_c_mux[] = {
+ RX1_C_MARK, TX1_C_MARK,
+};
+static const unsigned int scif1_data_d_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
+};
+static const unsigned int scif1_data_d_mux[] = {
+ RX1_D_MARK, TX1_D_MARK,
+};
+static const unsigned int scif1_clk_d_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(3, 17),
+};
+static const unsigned int scif1_clk_d_mux[] = {
+ SCK1_D_MARK,
+};
+static const unsigned int scif1_data_e_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(2, 21), RCAR_GP_PIN(2, 22),
+};
+static const unsigned int scif1_data_e_mux[] = {
+ RX1_E_MARK, TX1_E_MARK,
+};
+static const unsigned int scif1_clk_e_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(2, 20),
+};
+static const unsigned int scif1_clk_e_mux[] = {
+ SCK1_E_MARK,
+};
+/* - SCIF2 ------------------------------------------------------------------ */
+static const unsigned int scif2_data_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(5, 6), RCAR_GP_PIN(5, 5),
+};
+static const unsigned int scif2_data_mux[] = {
+ RX2_MARK, TX2_MARK,
+};
+static const unsigned int scif2_clk_pins[] = {
+ /* SCK */
+ RCAR_GP_PIN(5, 4),
+};
+static const unsigned int scif2_clk_mux[] = {
+ SCK2_MARK,
+};
+static const unsigned int scif2_data_b_pins[] = {
+ /* RX, TX */
+ RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+};
+static const unsigned int scif2_data_b_mux[] = {
+ RX2_B_MARK, TX2_B_MARK,
+};
/* - SCIFA0 ----------------------------------------------------------------- */
static const unsigned int scifa0_data_pins[] = {
/* RXD, TXD */
@@ -2477,103 +2737,6 @@ static const unsigned int scifb2_data_c_pins[] = {
static const unsigned int scifb2_data_c_mux[] = {
SCIFB2_RXD_C_MARK, SCIFB2_TXD_C_MARK,
};
-/* - TPU0 ------------------------------------------------------------------- */
-static const unsigned int tpu0_to0_pins[] = {
- /* TO */
- RCAR_GP_PIN(0, 20),
-};
-static const unsigned int tpu0_to0_mux[] = {
- TPU0TO0_MARK,
-};
-static const unsigned int tpu0_to1_pins[] = {
- /* TO */
- RCAR_GP_PIN(0, 21),
-};
-static const unsigned int tpu0_to1_mux[] = {
- TPU0TO1_MARK,
-};
-static const unsigned int tpu0_to2_pins[] = {
- /* TO */
- RCAR_GP_PIN(0, 22),
-};
-static const unsigned int tpu0_to2_mux[] = {
- TPU0TO2_MARK,
-};
-static const unsigned int tpu0_to3_pins[] = {
- /* TO */
- RCAR_GP_PIN(0, 23),
-};
-static const unsigned int tpu0_to3_mux[] = {
- TPU0TO3_MARK,
-};
-/* - MMCIF0 ----------------------------------------------------------------- */
-static const unsigned int mmc0_data1_pins[] = {
- /* D[0] */
- RCAR_GP_PIN(3, 18),
-};
-static const unsigned int mmc0_data1_mux[] = {
- MMC0_D0_MARK,
-};
-static const unsigned int mmc0_data4_pins[] = {
- /* D[0:3] */
- RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
- RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
-};
-static const unsigned int mmc0_data4_mux[] = {
- MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
-};
-static const unsigned int mmc0_data8_pins[] = {
- /* D[0:7] */
- RCAR_GP_PIN(3, 18), RCAR_GP_PIN(3, 19),
- RCAR_GP_PIN(3, 20), RCAR_GP_PIN(3, 21),
- RCAR_GP_PIN(3, 22), RCAR_GP_PIN(3, 23),
- RCAR_GP_PIN(3, 6), RCAR_GP_PIN(3, 7),
-};
-static const unsigned int mmc0_data8_mux[] = {
- MMC0_D0_MARK, MMC0_D1_MARK, MMC0_D2_MARK, MMC0_D3_MARK,
- MMC0_D4_MARK, MMC0_D5_MARK, MMC0_D6_MARK, MMC0_D7_MARK,
-};
-static const unsigned int mmc0_ctrl_pins[] = {
- /* CLK, CMD */
- RCAR_GP_PIN(3, 16), RCAR_GP_PIN(3, 17),
-};
-static const unsigned int mmc0_ctrl_mux[] = {
- MMC0_CLK_MARK, MMC0_CMD_MARK,
-};
-/* - MMCIF1 ----------------------------------------------------------------- */
-static const unsigned int mmc1_data1_pins[] = {
- /* D[0] */
- RCAR_GP_PIN(3, 26),
-};
-static const unsigned int mmc1_data1_mux[] = {
- MMC1_D0_MARK,
-};
-static const unsigned int mmc1_data4_pins[] = {
- /* D[0:3] */
- RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27),
- RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
-};
-static const unsigned int mmc1_data4_mux[] = {
- MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
-};
-static const unsigned int mmc1_data8_pins[] = {
- /* D[0:7] */
- RCAR_GP_PIN(3, 26), RCAR_GP_PIN(3, 27),
- RCAR_GP_PIN(3, 28), RCAR_GP_PIN(3, 29),
- RCAR_GP_PIN(3, 30), RCAR_GP_PIN(3, 31),
- RCAR_GP_PIN(3, 14), RCAR_GP_PIN(3, 15),
-};
-static const unsigned int mmc1_data8_mux[] = {
- MMC1_D0_MARK, MMC1_D1_MARK, MMC1_D2_MARK, MMC1_D3_MARK,
- MMC1_D4_MARK, MMC1_D5_MARK, MMC1_D6_MARK, MMC1_D7_MARK,
-};
-static const unsigned int mmc1_ctrl_pins[] = {
- /* CLK, CMD */
- RCAR_GP_PIN(3, 24), RCAR_GP_PIN(3, 25),
-};
-static const unsigned int mmc1_ctrl_mux[] = {
- MMC1_CLK_MARK, MMC1_CMD_MARK,
-};
/* - SDHI0 ------------------------------------------------------------------ */
static const unsigned int sdhi0_data1_pins[] = {
/* D0 */
@@ -2718,8 +2881,149 @@ static const unsigned int sdhi3_wp_pins[] = {
static const unsigned int sdhi3_wp_mux[] = {
SD3_WP_MARK,
};
+/* - TPU0 ------------------------------------------------------------------- */
+static const unsigned int tpu0_to0_pins[] = {
+ /* TO */
+ RCAR_GP_PIN(0, 20),
+};
+static const unsigned int tpu0_to0_mux[] = {
+ TPU0TO0_MARK,
+};
+static const unsigned int tpu0_to1_pins[] = {
+ /* TO */
+ RCAR_GP_PIN(0, 21),
+};
+static const unsigned int tpu0_to1_mux[] = {
+ TPU0TO1_MARK,
+};
+static const unsigned int tpu0_to2_pins[] = {
+ /* TO */
+ RCAR_GP_PIN(0, 22),
+};
+static const unsigned int tpu0_to2_mux[] = {
+ TPU0TO2_MARK,
+};
+static const unsigned int tpu0_to3_pins[] = {
+ /* TO */
+ RCAR_GP_PIN(0, 23),
+};
+static const unsigned int tpu0_to3_mux[] = {
+ TPU0TO3_MARK,
+};
+/* - USB0 ------------------------------------------------------------------- */
+static const unsigned int usb0_pins[] = {
+ /* PWEN, OVC/VBUS */
+ RCAR_GP_PIN(5, 18), RCAR_GP_PIN(5, 19),
+};
+static const unsigned int usb0_mux[] = {
+ USB0_PWEN_MARK, USB0_OVC_VBUS_MARK,
+};
+/* - USB1 ------------------------------------------------------------------- */
+static const unsigned int usb1_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(5, 20), RCAR_GP_PIN(5, 21),
+};
+static const unsigned int usb1_mux[] = {
+ USB1_PWEN_MARK, USB1_OVC_MARK,
+};
+/* - USB2 ------------------------------------------------------------------- */
+static const unsigned int usb2_pins[] = {
+ /* PWEN, OVC */
+ RCAR_GP_PIN(5, 22), RCAR_GP_PIN(5, 23),
+};
+static const unsigned int usb2_mux[] = {
+ USB2_PWEN_MARK, USB2_OVC_MARK,
+};
+/* - VIN0 ------------------------------------------------------------------- */
+static const unsigned int vin0_data_g_pins[] = {
+ RCAR_GP_PIN(0, 8), RCAR_GP_PIN(0, 9), RCAR_GP_PIN(0, 10),
+ RCAR_GP_PIN(0, 11), RCAR_GP_PIN(0, 0), RCAR_GP_PIN(0, 1),
+ RCAR_GP_PIN(0, 2), RCAR_GP_PIN(0, 3),
+};
+static const unsigned int vin0_data_g_mux[] = {
+ VI0_G0_MARK, VI0_G1_MARK, VI0_G2_MARK,
+ VI0_G3_MARK, VI0_G4_MARK, VI0_G5_MARK,
+ VI0_G6_MARK, VI0_G7_MARK,
+};
+static const unsigned int vin0_data_r_pins[] = {
+ RCAR_GP_PIN(0, 4), RCAR_GP_PIN(0, 5), RCAR_GP_PIN(0, 6),
+ RCAR_GP_PIN(0, 7), RCAR_GP_PIN(0, 24), RCAR_GP_PIN(0, 25),
+ RCAR_GP_PIN(0, 26), RCAR_GP_PIN(1, 11),
+};
+static const unsigned int vin0_data_r_mux[] = {
+ VI0_R0_MARK, VI0_R1_MARK, VI0_R2_MARK,
+ VI0_R3_MARK, VI0_R4_MARK, VI0_R5_MARK,
+ VI0_R6_MARK, VI0_R7_MARK,
+};
+static const unsigned int vin0_data_b_pins[] = {
+ RCAR_GP_PIN(2, 1), RCAR_GP_PIN(2, 2), RCAR_GP_PIN(2, 3),
+ RCAR_GP_PIN(2, 4), RCAR_GP_PIN(2, 5), RCAR_GP_PIN(2, 6),
+ RCAR_GP_PIN(2, 7), RCAR_GP_PIN(2, 8),
+};
+static const unsigned int vin0_data_b_mux[] = {
+ VI0_DATA0_VI0_B0_MARK, VI0_DATA1_VI0_B1_MARK, VI0_DATA2_VI0_B2_MARK,
+ VI0_DATA3_VI0_B3_MARK, VI0_DATA4_VI0_B4_MARK, VI0_DATA5_VI0_B5_MARK,
+ VI0_DATA6_VI0_B6_MARK, VI0_DATA7_VI0_B7_MARK,
+};
+static const unsigned int vin0_hsync_signal_pins[] = {
+ RCAR_GP_PIN(0, 12),
+};
+static const unsigned int vin0_hsync_signal_mux[] = {
+ VI0_HSYNC_N_MARK,
+};
+static const unsigned int vin0_vsync_signal_pins[] = {
+ RCAR_GP_PIN(0, 13),
+};
+static const unsigned int vin0_vsync_signal_mux[] = {
+ VI0_VSYNC_N_MARK,
+};
+static const unsigned int vin0_field_signal_pins[] = {
+ RCAR_GP_PIN(0, 15),
+};
+static const unsigned int vin0_field_signal_mux[] = {
+ VI0_FIELD_MARK,
+};
+static const unsigned int vin0_data_enable_pins[] = {
+ RCAR_GP_PIN(0, 14),
+};
+static const unsigned int vin0_data_enable_mux[] = {
+ VI0_CLKENB_MARK,
+};
+static const unsigned int vin0_clk_pins[] = {
+ RCAR_GP_PIN(2, 0),
+};
+static const unsigned int vin0_clk_mux[] = {
+ VI0_CLK_MARK,
+};
+/* - VIN1 ------------------------------------------------------------------- */
+static const unsigned int vin1_data_pins[] = {
+ RCAR_GP_PIN(2, 10), RCAR_GP_PIN(2, 11), RCAR_GP_PIN(2, 12),
+ RCAR_GP_PIN(2, 13), RCAR_GP_PIN(2, 14), RCAR_GP_PIN(2, 15),
+ RCAR_GP_PIN(2, 16), RCAR_GP_PIN(2, 17),
+};
+static const unsigned int vin1_data_mux[] = {
+ VI1_DATA0_VI1_B0_MARK, VI1_DATA1_VI1_B1_MARK, VI1_DATA2_VI1_B2_MARK,
+ VI1_DATA3_VI1_B3_MARK, VI1_DATA4_VI1_B4_MARK, VI1_DATA5_VI1_B5_MARK,
+ VI1_DATA6_VI1_B6_MARK, VI1_DATA7_VI1_B7_MARK,
+};
+static const unsigned int vin1_clk_pins[] = {
+ RCAR_GP_PIN(2, 9),
+};
+static const unsigned int vin1_clk_mux[] = {
+ VI1_CLK_MARK,
+};
static const struct sh_pfc_pin_group pinmux_groups[] = {
+ SH_PFC_PIN_GROUP(du_rgb666),
+ SH_PFC_PIN_GROUP(du_rgb888),
+ SH_PFC_PIN_GROUP(du_clk_out_0),
+ SH_PFC_PIN_GROUP(du_clk_out_1),
+ SH_PFC_PIN_GROUP(du_sync_0),
+ SH_PFC_PIN_GROUP(du_sync_1),
+ SH_PFC_PIN_GROUP(du_cde),
+ SH_PFC_PIN_GROUP(du0_clk_in),
+ SH_PFC_PIN_GROUP(du1_clk_in),
+ SH_PFC_PIN_GROUP(du2_clk_in),
SH_PFC_PIN_GROUP(eth_link),
SH_PFC_PIN_GROUP(eth_magic),
SH_PFC_PIN_GROUP(eth_mdio),
@@ -2755,6 +3059,30 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(mmc1_data4),
SH_PFC_PIN_GROUP(mmc1_data8),
SH_PFC_PIN_GROUP(mmc1_ctrl),
+ SH_PFC_PIN_GROUP(msiof0_clk),
+ SH_PFC_PIN_GROUP(msiof0_sync),
+ SH_PFC_PIN_GROUP(msiof0_ss1),
+ SH_PFC_PIN_GROUP(msiof0_ss2),
+ SH_PFC_PIN_GROUP(msiof0_rx),
+ SH_PFC_PIN_GROUP(msiof0_tx),
+ SH_PFC_PIN_GROUP(msiof1_clk),
+ SH_PFC_PIN_GROUP(msiof1_sync),
+ SH_PFC_PIN_GROUP(msiof1_ss1),
+ SH_PFC_PIN_GROUP(msiof1_ss2),
+ SH_PFC_PIN_GROUP(msiof1_rx),
+ SH_PFC_PIN_GROUP(msiof1_tx),
+ SH_PFC_PIN_GROUP(msiof2_clk),
+ SH_PFC_PIN_GROUP(msiof2_sync),
+ SH_PFC_PIN_GROUP(msiof2_ss1),
+ SH_PFC_PIN_GROUP(msiof2_ss2),
+ SH_PFC_PIN_GROUP(msiof2_rx),
+ SH_PFC_PIN_GROUP(msiof2_tx),
+ SH_PFC_PIN_GROUP(msiof3_clk),
+ SH_PFC_PIN_GROUP(msiof3_sync),
+ SH_PFC_PIN_GROUP(msiof3_ss1),
+ SH_PFC_PIN_GROUP(msiof3_ss2),
+ SH_PFC_PIN_GROUP(msiof3_rx),
+ SH_PFC_PIN_GROUP(msiof3_tx),
SH_PFC_PIN_GROUP(scif0_data),
SH_PFC_PIN_GROUP(scif0_clk),
SH_PFC_PIN_GROUP(scif0_ctrl),
@@ -2768,6 +3096,9 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(scif1_clk_d),
SH_PFC_PIN_GROUP(scif1_data_e),
SH_PFC_PIN_GROUP(scif1_clk_e),
+ SH_PFC_PIN_GROUP(scif2_data),
+ SH_PFC_PIN_GROUP(scif2_clk),
+ SH_PFC_PIN_GROUP(scif2_data_b),
SH_PFC_PIN_GROUP(scifa0_data),
SH_PFC_PIN_GROUP(scifa0_clk),
SH_PFC_PIN_GROUP(scifa0_ctrl),
@@ -2843,6 +3174,41 @@ static const struct sh_pfc_pin_group pinmux_groups[] = {
SH_PFC_PIN_GROUP(tpu0_to1),
SH_PFC_PIN_GROUP(tpu0_to2),
SH_PFC_PIN_GROUP(tpu0_to3),
+ SH_PFC_PIN_GROUP(usb0),
+ SH_PFC_PIN_GROUP(usb1),
+ SH_PFC_PIN_GROUP(usb2),
+ SH_PFC_PIN_GROUP(vin0_data_g),
+ SH_PFC_PIN_GROUP(vin0_data_r),
+ SH_PFC_PIN_GROUP(vin0_data_b),
+ SH_PFC_PIN_GROUP(vin0_hsync_signal),
+ SH_PFC_PIN_GROUP(vin0_vsync_signal),
+ SH_PFC_PIN_GROUP(vin0_field_signal),
+ SH_PFC_PIN_GROUP(vin0_data_enable),
+ SH_PFC_PIN_GROUP(vin0_clk),
+ SH_PFC_PIN_GROUP(vin1_data),
+ SH_PFC_PIN_GROUP(vin1_clk),
+};
+
+static const char * const du_groups[] = {
+ "du_rgb666",
+ "du_rgb888",
+ "du_clk_out_0",
+ "du_clk_out_1",
+ "du_sync_0",
+ "du_sync_1",
+ "du_cde",
+};
+
+static const char * const du0_groups[] = {
+ "du0_clk_in",
+};
+
+static const char * const du1_groups[] = {
+ "du1_clk_in",
+};
+
+static const char * const du2_groups[] = {
+ "du2_clk_in",
};
static const char * const eth_groups[] = {
@@ -2852,6 +3218,31 @@ static const char * const eth_groups[] = {
"eth_rmii",
};
+static const char * const hscif0_groups[] = {
+ "hscif0_data",
+ "hscif0_clk",
+ "hscif0_ctrl",
+ "hscif0_data_b",
+ "hscif0_ctrl_b",
+ "hscif0_data_c",
+ "hscif0_ctrl_c",
+ "hscif0_data_d",
+ "hscif0_ctrl_d",
+ "hscif0_data_e",
+ "hscif0_ctrl_e",
+ "hscif0_data_f",
+ "hscif0_ctrl_f",
+};
+
+static const char * const hscif1_groups[] = {
+ "hscif1_data",
+ "hscif1_clk",
+ "hscif1_ctrl",
+ "hscif1_data_b",
+ "hscif1_clk_b",
+ "hscif1_ctrl_b",
+};
+
static const char * const intc_groups[] = {
"intc_irq0",
"intc_irq1",
@@ -2859,6 +3250,56 @@ static const char * const intc_groups[] = {
"intc_irq3",
};
+static const char * const mmc0_groups[] = {
+ "mmc0_data1",
+ "mmc0_data4",
+ "mmc0_data8",
+ "mmc0_ctrl",
+};
+
+static const char * const mmc1_groups[] = {
+ "mmc1_data1",
+ "mmc1_data4",
+ "mmc1_data8",
+ "mmc1_ctrl",
+};
+
+static const char * const msiof0_groups[] = {
+ "msiof0_clk",
+ "msiof0_sync",
+ "msiof0_ss1",
+ "msiof0_ss2",
+ "msiof0_rx",
+ "msiof0_tx",
+};
+
+static const char * const msiof1_groups[] = {
+ "msiof1_clk",
+ "msiof1_sync",
+ "msiof1_ss1",
+ "msiof1_ss2",
+ "msiof1_rx",
+ "msiof1_tx",
+};
+
+static const char * const msiof2_groups[] = {
+ "msiof2_clk",
+ "msiof2_sync",
+ "msiof2_ss1",
+ "msiof2_ss2",
+ "msiof2_rx",
+ "msiof2_tx",
+};
+
+static const char * const msiof3_groups[] = {
+ "msiof3_clk",
+ "msiof3_sync",
+ "msiof3_ss1",
+ "msiof3_ss2",
+ "msiof3_rx",
+ "msiof3_tx",
+};
+
static const char * const scif0_groups[] = {
"scif0_data",
"scif0_clk",
@@ -2878,29 +3319,10 @@ static const char * const scif1_groups[] = {
"scif1_clk_e",
};
-static const char * const hscif0_groups[] = {
- "hscif0_data",
- "hscif0_clk",
- "hscif0_ctrl",
- "hscif0_data_b",
- "hscif0_ctrl_b",
- "hscif0_data_c",
- "hscif0_ctrl_c",
- "hscif0_data_d",
- "hscif0_ctrl_d",
- "hscif0_data_e",
- "hscif0_ctrl_e",
- "hscif0_data_f",
- "hscif0_ctrl_f",
-};
-
-static const char * const hscif1_groups[] = {
- "hscif1_data",
- "hscif1_clk",
- "hscif1_ctrl",
- "hscif1_data_b",
- "hscif1_clk_b",
- "hscif1_ctrl_b",
+static const char * const scif2_groups[] = {
+ "scif2_data",
+ "scif2_clk",
+ "scif2_data_b",
};
static const char * const scifa0_groups[] = {
@@ -2972,27 +3394,6 @@ static const char * const scifb2_groups[] = {
"scifb2_data_c",
};
-static const char * const tpu0_groups[] = {
- "tpu0_to0",
- "tpu0_to1",
- "tpu0_to2",
- "tpu0_to3",
-};
-
-static const char * const mmc0_groups[] = {
- "mmc0_data1",
- "mmc0_data4",
- "mmc0_data8",
- "mmc0_ctrl",
-};
-
-static const char * const mmc1_groups[] = {
- "mmc1_data1",
- "mmc1_data4",
- "mmc1_data8",
- "mmc1_ctrl",
-};
-
static const char * const sdhi0_groups[] = {
"sdhi0_data1",
"sdhi0_data4",
@@ -3025,15 +3426,59 @@ static const char * const sdhi3_groups[] = {
"sdhi3_wp",
};
+static const char * const tpu0_groups[] = {
+ "tpu0_to0",
+ "tpu0_to1",
+ "tpu0_to2",
+ "tpu0_to3",
+};
+
+static const char * const usb0_groups[] = {
+ "usb0",
+};
+
+static const char * const usb1_groups[] = {
+ "usb1",
+};
+
+static const char * const usb2_groups[] = {
+ "usb2",
+};
+
+static const char * const vin0_groups[] = {
+ "vin0_data_g",
+ "vin0_data_r",
+ "vin0_data_b",
+ "vin0_hsync_signal",
+ "vin0_vsync_signal",
+ "vin0_field_signal",
+ "vin0_data_enable",
+ "vin0_clk",
+};
+
+static const char * const vin1_groups[] = {
+ "vin1_data",
+ "vin1_clk",
+};
+
static const struct sh_pfc_function pinmux_functions[] = {
+ SH_PFC_FUNCTION(du),
+ SH_PFC_FUNCTION(du0),
+ SH_PFC_FUNCTION(du1),
+ SH_PFC_FUNCTION(du2),
SH_PFC_FUNCTION(eth),
SH_PFC_FUNCTION(hscif0),
SH_PFC_FUNCTION(hscif1),
SH_PFC_FUNCTION(intc),
SH_PFC_FUNCTION(mmc0),
SH_PFC_FUNCTION(mmc1),
+ SH_PFC_FUNCTION(msiof0),
+ SH_PFC_FUNCTION(msiof1),
+ SH_PFC_FUNCTION(msiof2),
+ SH_PFC_FUNCTION(msiof3),
SH_PFC_FUNCTION(scif0),
SH_PFC_FUNCTION(scif1),
+ SH_PFC_FUNCTION(scif2),
SH_PFC_FUNCTION(scifa0),
SH_PFC_FUNCTION(scifa1),
SH_PFC_FUNCTION(scifa2),
@@ -3045,6 +3490,11 @@ static const struct sh_pfc_function pinmux_functions[] = {
SH_PFC_FUNCTION(sdhi2),
SH_PFC_FUNCTION(sdhi3),
SH_PFC_FUNCTION(tpu0),
+ SH_PFC_FUNCTION(usb0),
+ SH_PFC_FUNCTION(usb1),
+ SH_PFC_FUNCTION(usb2),
+ SH_PFC_FUNCTION(vin0),
+ SH_PFC_FUNCTION(vin1),
};
static struct pinmux_cfg_reg pinmux_config_regs[] = {
@@ -3257,16 +3707,16 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP0_31 [1] */
0, 0,
/* IP0_30_27 [4] */
- FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0, FN_MII_TXD0,
+ FN_D8, FN_SCIFA1_SCK_C, FN_AVB_TXD0, 0,
FN_VI0_G0, FN_VI0_G0_B, FN_VI2_DATA0_VI2_B0,
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP0_26_23 [4] */
- FN_D7, FN_AD_DI_B, FN_SDA2_C,
- FN_VI3_DATA7, FN_VI0_R3, FN_VI0_R3_B, FN_SDA2_CIS_C,
- 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ FN_D7, FN_AD_DI_B, FN_IIC2_SDA_C,
+ FN_VI3_DATA7, FN_VI0_R3, FN_VI0_R3_B, FN_I2C2_SDA_C,
+ FN_TCLK1, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP0_22_20 [3] */
- FN_D6, FN_SCL2_C, FN_VI3_DATA6, FN_VI0_R2, FN_VI0_R2_B,
- FN_SCL2_CIS_C, 0, 0,
+ FN_D6, FN_IIC2_SCL_C, FN_VI3_DATA6, FN_VI0_R2, FN_VI0_R2_B,
+ FN_I2C2_SCL_C, 0, 0,
/* IP0_19_16 [4] */
FN_D5, FN_SCIFB1_TXD_F, FN_SCIFB0_TXD_C, FN_VI3_DATA5,
FN_VI0_R1, FN_VI0_R1_B, FN_TX0_B,
@@ -3313,15 +3763,15 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI0_HSYNC_N, FN_VI0_HSYNC_N_B, FN_VI2_DATA4_VI2_B4,
0, 0,
/* IP1_11_8 [4] */
- FN_D11, FN_SCIFA1_CTS_N_C, FN_AVB_TXD3, FN_MII_TXD3,
+ FN_D11, FN_SCIFA1_CTS_N_C, FN_AVB_TXD3, 0,
FN_VI0_G3, FN_VI0_G3_B, FN_VI2_DATA3_VI2_B3,
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP1_7_4 [4] */
- FN_D10, FN_SCIFA1_TXD_C, FN_AVB_TXD2, FN_MII_TXD2,
+ FN_D10, FN_SCIFA1_TXD_C, FN_AVB_TXD2, 0,
FN_VI0_G2, FN_VI0_G2_B, FN_VI2_DATA2_VI2_B2,
0, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP1_3_0 [4] */
- FN_D9, FN_SCIFA1_RXD_C, FN_AVB_TXD1, FN_MII_TXD1,
+ FN_D9, FN_SCIFA1_RXD_C, FN_AVB_TXD1, 0,
FN_VI0_G1, FN_VI0_G1_B, FN_VI2_DATA1_VI2_B1,
0, 0, 0, 0, 0, 0, 0, 0, 0, }
},
@@ -3334,11 +3784,11 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI0_R6_B, FN_VI2_DATA2_VI2_B2_B, 0, 0,
/* IP2_25_22 [4] */
FN_A9, FN_SCIFA1_CTS_N_B, FN_SSI_WS5_B, FN_VI0_R5,
- FN_VI0_R5_B, FN_SCIFB2_TXD_C, 0, FN_VI2_DATA1_VI2_B1_B,
+ FN_VI0_R5_B, FN_SCIFB2_TXD_C, FN_TX2_B, FN_VI2_DATA1_VI2_B1_B,
0, 0, 0, 0, 0, 0, 0, 0,
/* IP2_21_18 [4] */
FN_A8, FN_SCIFA1_RXD_B, FN_SSI_SCK5_B, FN_VI0_R4,
- FN_VI0_R4_B, FN_SCIFB2_RXD_C, 0, FN_VI2_DATA0_VI2_B0_B,
+ FN_VI0_R4_B, FN_SCIFB2_RXD_C, FN_RX2_B, FN_VI2_DATA0_VI2_B0_B,
0, 0, 0, 0, 0, 0, 0, 0,
/* IP2_17_15 [3] */
FN_A7, FN_SCIFA1_SCK_B, FN_AUDIO_CLKOUT_B, FN_TPU0TO3,
@@ -3448,12 +3898,12 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0,
/* IP5_9_6 [4] */
FN_EX_CS5_N, FN_CAN0_RX, FN_MSIOF1_RXD_B, FN_VI3_VSYNC_N,
- FN_VI1_G2, FN_VI1_G2_B, FN_VI2_R4, FN_SDA1, FN_INTC_EN1_N,
- FN_SDA1_CIS, 0, 0, 0, 0, 0, 0,
+ FN_VI1_G2, FN_VI1_G2_B, FN_VI2_R4, FN_IIC1_SDA, FN_INTC_EN1_N,
+ FN_I2C1_SDA, 0, 0, 0, 0, 0, 0,
/* IP5_5_3 [3] */
FN_EX_CS4_N, FN_MSIOF1_SCK_B, FN_VI3_HSYNC_N,
- FN_VI2_HSYNC_N, FN_SCL1, FN_VI2_HSYNC_N_B,
- FN_INTC_EN0_N, FN_SCL1_CIS,
+ FN_VI2_HSYNC_N, FN_IIC1_SCL, FN_VI2_HSYNC_N_B,
+ FN_INTC_EN0_N, FN_I2C1_SCL,
/* IP5_2_0 [3] */
FN_EX_CS3_N, FN_GPS_MAG, FN_VI3_FIELD, FN_VI1_G1, FN_VI1_G1_B,
FN_VI2_R3, 0, 0, }
@@ -3461,24 +3911,24 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG_VAR("IPSR6", 0xE6060038, 32,
3, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3) {
/* IP6_31_29 [3] */
- FN_ETH_REF_CLK, FN_RMII_REF_CLK, FN_HCTS0_N_E,
+ FN_ETH_REF_CLK, 0, FN_HCTS0_N_E,
FN_STP_IVCXO27_1_B, FN_HRX0_F, 0, 0, 0,
/* IP6_28_26 [3] */
- FN_ETH_LINK, FN_RMII_LINK, FN_HTX0_E,
+ FN_ETH_LINK, 0, FN_HTX0_E,
FN_STP_IVCXO27_0_B, FN_SCIFB1_TXD_G, FN_TX1_E, 0, 0,
/* IP6_25_23 [3] */
- FN_ETH_RXD1, FN_RMII_RXD1, FN_HRX0_E, FN_STP_ISSYNC_0_B,
+ FN_ETH_RXD1, 0, FN_HRX0_E, FN_STP_ISSYNC_0_B,
FN_TS_SCK0_D, FN_GLO_I1_C, FN_SCIFB1_RXD_G, FN_RX1_E,
/* IP6_22_20 [3] */
- FN_ETH_RXD0, FN_RMII_RXD0, FN_STP_ISEN_0_B, FN_TS_SDAT0_D,
+ FN_ETH_RXD0, 0, FN_STP_ISEN_0_B, FN_TS_SDAT0_D,
FN_GLO_I0_C, FN_SCIFB1_SCK_G, FN_SCK1_E, 0,
/* IP6_19_17 [3] */
- FN_ETH_RX_ER, FN_RMII_RX_ER, FN_STP_ISD_0_B,
- FN_TS_SPSYNC0_D, FN_GLO_Q1_C, FN_SDA2_E, FN_SDA2_CIS_E, 0,
+ FN_ETH_RX_ER, 0, FN_STP_ISD_0_B,
+ FN_TS_SPSYNC0_D, FN_GLO_Q1_C, FN_IIC2_SDA_E, FN_I2C2_SDA_E, 0,
/* IP6_16_14 [3] */
- FN_ETH_CRS_DV, FN_RMII_CRS_DV, FN_STP_ISCLK_0_B,
- FN_TS_SDEN0_D, FN_GLO_Q0_C, FN_SCL2_E,
- FN_SCL2_CIS_E, 0,
+ FN_ETH_CRS_DV, 0, FN_STP_ISCLK_0_B,
+ FN_TS_SDEN0_D, FN_GLO_Q0_C, FN_IIC2_SCL_E,
+ FN_I2C2_SCL_E, 0,
/* IP6_13_11 [3] */
FN_DACK2, FN_IRQ2, FN_INTC_IRQ2_N,
FN_SSI_SDATA6_B, FN_HRTS0_N_B, FN_MSIOF0_RXD_B, 0, 0,
@@ -3499,12 +3949,11 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP7_31 [1] */
0, 0,
/* IP7_30_29 [2] */
- FN_VI0_DATA0_VI0_B0, FN_ATACS10_N, FN_AVB_RXD2,
- FN_MII_RXD2,
+ FN_VI0_DATA0_VI0_B0, FN_ATACS10_N, FN_AVB_RXD2, 0,
/* IP7_28_27 [2] */
- FN_VI0_CLK, FN_ATACS00_N, FN_AVB_RXD1, FN_MII_RXD1,
+ FN_VI0_CLK, FN_ATACS00_N, FN_AVB_RXD1, 0,
/* IP7_26_25 [2] */
- FN_DU1_DOTCLKIN, FN_AUDIO_CLKC, FN_AUDIO_CLKOUT_C, 0,
+ FN_DU_DOTCLKIN1, FN_AUDIO_CLKC, FN_AUDIO_CLKOUT_C, 0,
/* IP7_24_22 [3] */
FN_PWM2, FN_PWMFSW0, FN_SCIFA2_RXD_C, FN_PCMWE_N, FN_IECLK_C,
0, 0, 0,
@@ -3515,20 +3964,19 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_PWM0, FN_SCIFA2_SCK_C, FN_STP_ISEN_1_B, FN_TS_SDAT1_C,
FN_GLO_SS_C, 0, 0, 0,
/* IP7_15_13 [3] */
- FN_ETH_MDC, FN_RMII_MDC, FN_STP_ISD_1_B,
+ FN_ETH_MDC, 0, FN_STP_ISD_1_B,
FN_TS_SPSYNC1_C, FN_GLO_SDATA_C, 0, 0, 0,
/* IP7_12_10 [3] */
- FN_ETH_TXD0, FN_RMII_TXD0, FN_STP_ISCLK_1_B, FN_TS_SDEN1_C,
+ FN_ETH_TXD0, 0, FN_STP_ISCLK_1_B, FN_TS_SDEN1_C,
FN_GLO_SCLK_C, 0, 0, 0,
/* IP7_9_8 [2] */
- FN_ETH_MAGIC, FN_RMII_MAGIC, FN_SIM0_RST_C, 0,
+ FN_ETH_MAGIC, 0, FN_SIM0_RST_C, 0,
/* IP7_7_6 [2] */
- FN_ETH_TX_EN, FN_RMII_TX_EN, FN_SIM0_CLK_C, FN_HRTS0_N_F,
+ FN_ETH_TX_EN, 0, FN_SIM0_CLK_C, FN_HRTS0_N_F,
/* IP7_5_3 [3] */
- FN_ETH_TXD1, FN_RMII_TXD1, FN_HTX0_F, FN_BPFCLK_G, FN_RDS_CLK_F,
- 0, 0, 0,
+ FN_ETH_TXD1, 0, FN_HTX0_F, FN_BPFCLK_G, 0, 0, 0, 0,
/* IP7_2_0 [3] */
- FN_ETH_MDIO, FN_RMII_MDIO, FN_HRTS0_N_E,
+ FN_ETH_MDIO, 0, FN_HRTS0_N_E,
FN_SIM0_D_C, FN_HCTS0_N_F, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR8", 0xE6060040, 32,
@@ -3546,22 +3994,21 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI1_DATA5_VI1_B5, FN_AVB_PHY_INT,
/* IP8_25_24 [2] */
FN_VI1_DATA4_VI1_B4, FN_SCIFA1_RTS_N_D,
- FN_AVB_MAGIC, FN_MII_MAGIC,
+ FN_AVB_MAGIC, 0,
/* IP8_23_22 [2] */
FN_VI1_DATA3_VI1_B3, FN_SCIFA1_CTS_N_D, FN_AVB_GTX_CLK, 0,
/* IP8_21_20 [2] */
- FN_VI1_DATA2_VI1_B2, FN_SCIFA1_TXD_D, FN_AVB_MDIO,
- FN_MII_MDIO,
+ FN_VI1_DATA2_VI1_B2, FN_SCIFA1_TXD_D, FN_AVB_MDIO, 0,
/* IP8_19_18 [2] */
- FN_VI1_DATA1_VI1_B1, FN_SCIFA1_RXD_D, FN_AVB_MDC, FN_MII_MDC,
+ FN_VI1_DATA1_VI1_B1, FN_SCIFA1_RXD_D, FN_AVB_MDC, 0,
/* IP8_17_16 [2] */
- FN_VI1_DATA0_VI1_B0, FN_SCIFA1_SCK_D, FN_AVB_CRS, FN_MII_CRS,
+ FN_VI1_DATA0_VI1_B0, FN_SCIFA1_SCK_D, FN_AVB_CRS, 0,
/* IP8_15_14 [2] */
- FN_VI1_CLK, FN_AVB_RX_DV, FN_MII_RX_DV, 0,
+ FN_VI1_CLK, FN_AVB_RX_DV, 0, 0,
/* IP8_13_12 [2] */
- FN_VI0_DATA7_VI0_B7, FN_AVB_RX_CLK, FN_MII_RX_CLK, 0,
+ FN_VI0_DATA7_VI0_B7, FN_AVB_RX_CLK, 0, 0,
/* IP8_11_10 [2] */
- FN_VI0_DATA6_VI0_B6, FN_AVB_RX_ER, FN_MII_RX_ER, 0,
+ FN_VI0_DATA6_VI0_B6, FN_AVB_RX_ER, 0, 0,
/* IP8_9_8 [2] */
FN_VI0_DATA5_VI0_B5, FN_EX_WAIT1, FN_AVB_RXD7, 0,
/* IP8_7_6 [2] */
@@ -3571,34 +4018,34 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP8_3_2 [2] */
FN_VI0_DATA2_VI0_B2, FN_ATAWR0_N, FN_AVB_RXD4, 0,
/* IP8_1_0 [2] */
- FN_VI0_DATA1_VI0_B1, FN_ATARD0_N, FN_AVB_RXD3, FN_MII_RXD3, }
+ FN_VI0_DATA1_VI0_B1, FN_ATARD0_N, FN_AVB_RXD3, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR9", 0xE6060044, 32,
4, 2, 2, 2, 2, 2, 2, 4, 4, 2, 2, 2, 2) {
/* IP9_31_28 [4] */
FN_SD1_CD, FN_MMC1_D6, FN_TS_SDEN1, FN_USB1_EXTP,
- FN_GLO_SS, FN_VI0_CLK_B, FN_SCL2_D, FN_SCL2_CIS_D,
+ FN_GLO_SS, FN_VI0_CLK_B, FN_IIC2_SCL_D, FN_I2C2_SCL_D,
FN_SIM0_CLK_B, FN_VI3_CLK_B, 0, 0, 0, 0, 0, 0,
/* IP9_27_26 [2] */
- FN_SD1_DAT3, FN_AVB_RXD0, FN_MII_RXD0, FN_SCIFB0_RTS_N_B,
+ FN_SD1_DAT3, FN_AVB_RXD0, 0, FN_SCIFB0_RTS_N_B,
/* IP9_25_24 [2] */
- FN_SD1_DAT2, FN_AVB_COL, FN_MII_COL, FN_SCIFB0_CTS_N_B,
+ FN_SD1_DAT2, FN_AVB_COL, 0, FN_SCIFB0_CTS_N_B,
/* IP9_23_22 [2] */
- FN_SD1_DAT1, FN_AVB_LINK, FN_MII_LINK, FN_SCIFB0_TXD_B,
+ FN_SD1_DAT1, FN_AVB_LINK, 0, FN_SCIFB0_TXD_B,
/* IP9_21_20 [2] */
- FN_SD1_DAT0, FN_AVB_TX_CLK, FN_MII_TX_CLK, FN_SCIFB0_RXD_B,
+ FN_SD1_DAT0, FN_AVB_TX_CLK, 0, FN_SCIFB0_RXD_B,
/* IP9_19_18 [2] */
- FN_SD1_CMD, FN_AVB_TX_ER, FN_MII_TX_ER, FN_SCIFB0_SCK_B,
+ FN_SD1_CMD, FN_AVB_TX_ER, 0, FN_SCIFB0_SCK_B,
/* IP9_17_16 [2] */
- FN_SD1_CLK, FN_AVB_TX_EN, FN_MII_TX_EN, 0,
+ FN_SD1_CLK, FN_AVB_TX_EN, 0, 0,
/* IP9_15_12 [4] */
FN_SD0_WP, FN_MMC0_D7, FN_TS_SPSYNC0_B, FN_USB0_IDIN,
- FN_GLO_SDATA, FN_VI1_DATA7_VI1_B7_B, FN_SDA1_B,
- FN_SDA1_CIS_B, FN_VI2_DATA7_VI2_B7_B, 0, 0, 0, 0, 0, 0, 0,
+ FN_GLO_SDATA, FN_VI1_DATA7_VI1_B7_B, FN_IIC1_SDA_B,
+ FN_I2C1_SDA_B, FN_VI2_DATA7_VI2_B7_B, 0, 0, 0, 0, 0, 0, 0,
/* IP9_11_8 [4] */
FN_SD0_CD, FN_MMC0_D6, FN_TS_SDEN0_B, FN_USB0_EXTP,
- FN_GLO_SCLK, FN_VI1_DATA6_VI1_B6_B, FN_SCL1_B,
- FN_SCL1_CIS_B, FN_VI2_DATA6_VI2_B6_B, 0, 0, 0, 0, 0, 0, 0,
+ FN_GLO_SCLK, FN_VI1_DATA6_VI1_B6_B, FN_IIC1_SCL_B,
+ FN_I2C1_SCL_B, FN_VI2_DATA6_VI2_B6_B, 0, 0, 0, 0, 0, 0, 0,
/* IP9_7_6 [2] */
FN_SD0_DAT3, FN_SCIFB1_RTS_N_B, FN_VI1_DATA5_VI1_B5_B, 0,
/* IP9_5_4 [2] */
@@ -3620,11 +4067,11 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SD2_DAT3, FN_MMC0_D3, FN_SIM0_RST, FN_VI0_DATA5_VI0_B5_B,
FN_HTX0_D, FN_TS_SPSYNC1_B, FN_GLO_Q1_B, FN_VI3_DATA5_B,
/* IP10_22_19 [4] */
- FN_SD2_DAT2, FN_MMC0_D2, FN_BPFCLK_B, FN_RDS_CLK,
+ FN_SD2_DAT2, FN_MMC0_D2, FN_BPFCLK_B, 0,
FN_VI0_DATA4_VI0_B4_B, FN_HRX0_D, FN_TS_SDEN1_B,
FN_GLO_Q0_B, FN_VI3_DATA4_B, 0, 0, 0, 0, 0, 0, 0,
/* IP10_18_15 [4] */
- FN_SD2_DAT1, FN_MMC0_D1, FN_FMIN_B, FN_RDS_DATA,
+ FN_SD2_DAT1, FN_MMC0_D1, FN_FMIN_B, 0,
FN_VI0_DATA3_VI0_B3_B, FN_SCIFB1_TXD_E, FN_TX1_D,
FN_TS_SCK0_C, FN_GLO_RFON_B, FN_VI3_DATA3_B,
0, 0, 0, 0, 0, 0,
@@ -3644,7 +4091,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_VI3_DATA0_B, 0,
/* IP10_3_0 [4] */
FN_SD1_WP, FN_MMC1_D7, FN_TS_SPSYNC1, FN_USB1_IDIN,
- FN_GLO_RFON, FN_VI1_CLK_B, FN_SDA2_D, FN_SDA2_CIS_D,
+ FN_GLO_RFON, FN_VI1_CLK_B, FN_IIC2_SDA_D, FN_I2C2_SDA_D,
FN_SIM0_D_B, 0, 0, 0, 0, 0, 0, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR11", 0xE606004C, 32,
@@ -3652,17 +4099,16 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP11_31_30 [2] */
FN_SSI_SCK0129, FN_CAN_CLK_B, FN_MOUT0, 0,
/* IP11_29_27 [3] */
- FN_MLB_DAT, FN_SPV_EVEN, FN_SCIFB1_TXD_D, FN_TX1_C, FN_BPFCLK_C,
- FN_RDS_CLK_B, 0, 0,
+ FN_MLB_DAT, 0, FN_SCIFB1_TXD_D, FN_TX1_C, FN_BPFCLK_C,
+ 0, 0, 0,
/* IP11_26_24 [3] */
- FN_MLB_SIG, FN_SCIFB1_RXD_D, FN_RX1_C, FN_SDA2_B, FN_SDA2_CIS_B,
+ FN_MLB_SIG, FN_SCIFB1_RXD_D, FN_RX1_C, FN_IIC2_SDA_B, FN_I2C2_SDA_B,
0, 0, 0,
/* IP11_23_22 [2] */
- FN_MLB_CLK, FN_SCL2_B, FN_SCL2_CIS_B, 0,
+ FN_MLB_CLK, FN_IIC2_SCL_B, FN_I2C2_SCL_B, 0,
/* IP11_21_18 [4] */
FN_SD3_WP, FN_MMC1_D5, FN_TS_SCK1, FN_GLO_Q1, FN_FMIN_C,
- FN_RDS_DATA_B, FN_FMIN_E, FN_RDS_DATA_D, FN_FMIN_F,
- FN_RDS_DATA_E, 0, 0, 0, 0, 0, 0,
+ 0, FN_FMIN_E, 0, FN_FMIN_F, 0, 0, 0, 0, 0, 0, 0,
/* IP11_17_15 [3] */
FN_SD3_CD, FN_MMC1_D4, FN_TS_SDAT1,
FN_VSP, FN_GLO_Q0, FN_SIM0_RST_B, 0, 0,
@@ -3737,8 +4183,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP13_22_19 [4] */
FN_SSI_SDATA7, FN_STP_ISD_1, FN_SCIFB2_RXD, FN_SCIFA2_RTS_N,
FN_TCLK2, FN_QSTVA_QVS, FN_CAN_DEBUGOUT11, FN_BPFCLK_E,
- FN_RDS_CLK_D, FN_SSI_SDATA7_B, FN_FMIN_G, FN_RDS_DATA_F,
- 0, 0, 0, 0,
+ 0, FN_SSI_SDATA7_B, FN_FMIN_G, 0, 0, 0, 0, 0,
/* IP13_18_16 [3] */
FN_SSI_WS78, FN_STP_ISCLK_1, FN_SCIFB2_SCK, FN_SCIFA2_CTS_N,
FN_DU2_DR7, FN_LCDOUT7, FN_CAN_DEBUGOUT10, 0,
@@ -3746,15 +4191,15 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SSI_SCK78, FN_STP_IVCXO27_1, FN_SCK1, FN_SCIFA1_SCK,
FN_DU2_DR6, FN_LCDOUT6, FN_CAN_DEBUGOUT9, 0,
/* IP13_12_10 [3] */
- FN_SSI_SDATA6, FN_FMIN_D, FN_RDS_DATA_C, FN_DU2_DR5, FN_LCDOUT5,
+ FN_SSI_SDATA6, FN_FMIN_D, 0, FN_DU2_DR5, FN_LCDOUT5,
FN_CAN_DEBUGOUT8, 0, 0,
/* IP13_9_7 [3] */
FN_SSI_WS6, FN_SCIFB1_RTS_N, FN_CAN0_TX_D, FN_DU2_DR4,
FN_LCDOUT4, FN_CAN_DEBUGOUT7, 0, 0,
/* IP13_6_3 [4] */
- FN_SSI_SCK6, FN_SCIFB1_CTS_N, FN_BPFCLK_D, FN_RDS_CLK_C,
+ FN_SSI_SCK6, FN_SCIFB1_CTS_N, FN_BPFCLK_D, 0,
FN_DU2_DR3, FN_LCDOUT3, FN_CAN_DEBUGOUT6,
- FN_BPFCLK_F, FN_RDS_CLK_E, 0, 0, 0, 0, 0, 0, 0,
+ FN_BPFCLK_F, 0, 0, 0, 0, 0, 0, 0, 0,
/* IP13_2_0 [3] */
FN_SSI_SDATA5, FN_SCIFB1_TXD, FN_IETX_B, FN_DU2_DR2,
FN_LCDOUT2, FN_CAN_DEBUGOUT5, 0, 0, }
@@ -3764,7 +4209,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
/* IP14_30 [1] */
0, 0,
/* IP14_30_28 [3] */
- FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N_TANS,
+ FN_SCIFA1_RTS_N, FN_AD_NCS_N, FN_RTS1_N,
FN_MSIOF3_TXD, FN_DU1_DOTCLKOUT, FN_QSTVB_QVE,
FN_HRTS0_N_C, 0,
/* IP14_27_25 [3] */
@@ -3777,11 +4222,11 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SCIFA1_RXD, FN_AD_DI, FN_RX1,
FN_DU2_EXODDF_DU2_ODDF_DISP_CDE, FN_QCPV_QDE, 0, 0, 0,
/* IP14_18_16 [3] */
- FN_SCIFA0_RTS_N, FN_HRTS1_N, FN_RTS0_N_TANS,
+ FN_SCIFA0_RTS_N, FN_HRTS1_N, FN_RTS0_N,
FN_MSIOF3_SS1, FN_DU2_DG0, FN_LCDOUT8, FN_PWM1_B, 0,
/* IP14_15_12 [4] */
FN_SCIFA0_CTS_N, FN_HCTS1_N, FN_CTS0_N, FN_MSIOF3_SYNC,
- FN_DU2_DG3, FN_LCDOUT11, FN_PWM0_B, FN_SCL1_C, FN_SCL1_CIS_C,
+ FN_DU2_DG3, FN_LCDOUT11, FN_PWM0_B, FN_IIC1_SCL_C, FN_I2C1_SCL_C,
0, 0, 0, 0, 0, 0, 0,
/* IP14_11_9 [3] */
FN_SCIFA0_TXD, FN_HTX1, FN_TX0, FN_DU2_DR1, FN_LCDOUT1,
@@ -3791,7 +4236,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0,
/* IP14_5_3 [3] */
FN_SCIFA0_SCK, FN_HSCK1, FN_SCK0, FN_MSIOF3_SS2, FN_DU2_DG2,
- FN_LCDOUT10, FN_SDA1_C, FN_SDA1_CIS_C,
+ FN_LCDOUT10, FN_IIC1_SDA_C, FN_I2C1_SDA_C,
/* IP14_2_0 [3] */
FN_AUDIO_CLKB, FN_SCIF_CLK, FN_CAN0_RX_D,
FN_DVC_MUTE, FN_CAN0_RX_C, FN_CAN_DEBUGOUT15,
@@ -3807,7 +4252,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_MSIOF0_SS1, FN_ADICHS0, FN_DU2_DG5, FN_LCDOUT13,
/* IP15_25_23 [3] */
FN_MSIOF0_SYNC, FN_TS_SCK0, FN_SSI_SCK2, FN_ADIDATA,
- FN_DU2_DB7, FN_LCDOUT23, FN_SCIFA2_RXD_B, 0,
+ FN_DU2_DB7, FN_LCDOUT23, FN_HRX0_C, 0,
/* IP15_22_20 [3] */
FN_MSIOF0_SCK, FN_TS_SDAT0, FN_ADICLK,
FN_DU2_DB6, FN_LCDOUT22, 0, 0, 0,
@@ -3823,13 +4268,13 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_HSCK0, FN_TS_SDEN0, FN_DU2_DG4, FN_LCDOUT12, FN_HCTS0_N_C,
0, 0, 0,
/* IP15_8_6 [3] */
- FN_SCIFA2_TXD, FN_BPFCLK, 0, FN_DU2_DB1, FN_LCDOUT17,
- FN_SDA2, FN_SDA2_CIS, 0,
+ FN_SCIFA2_TXD, FN_BPFCLK, FN_RX2, FN_DU2_DB1, FN_LCDOUT17,
+ FN_IIC2_SDA, FN_I2C2_SDA, 0,
/* IP15_5_3 [3] */
- FN_SCIFA2_RXD, FN_FMIN, 0, FN_DU2_DB0, FN_LCDOUT16,
- FN_SCL2, FN_SCL2_CIS, 0,
+ FN_SCIFA2_RXD, FN_FMIN, FN_TX2, FN_DU2_DB0, FN_LCDOUT16,
+ FN_IIC2_SCL, FN_I2C2_SCL, 0,
/* IP15_2_0 [3] */
- FN_SCIFA2_SCK, FN_FMCLK, 0, FN_MSIOF3_SCK, FN_DU2_DG7,
+ FN_SCIFA2_SCK, FN_FMCLK, FN_SCK2, FN_MSIOF3_SCK, FN_DU2_DG7,
FN_LCDOUT15, FN_SCIF_CLK_B, 0, }
},
{ PINMUX_CFG_REG_VAR("IPSR16", 0xE6060160, 32,
@@ -3858,7 +4303,7 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_USB1_PWEN, FN_AUDIO_CLKOUT_D,
/* IP16_5_3 [3] */
FN_MSIOF0_RXD, FN_TS_SPSYNC0, FN_SSI_WS2,
- FN_ADICS_SAMP, FN_DU2_CDE, FN_QPOLB, FN_HRX0_C, 0,
+ FN_ADICS_SAMP, FN_DU2_CDE, FN_QPOLB, FN_SCIFA2_RXD_B, 0,
/* IP16_2_0 [3] */
FN_MSIOF0_SS2, FN_AUDIO_CLKOUT, FN_ADICHS2,
FN_DU2_DISP, FN_QPOLA, FN_HTX0_C, FN_SCIFA2_TXD_B, 0, }
@@ -3934,8 +4379,8 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_CAN1_0, FN_SEL_CAN1_1,
/* RESERVED [2] */
0, 0, 0, 0,
- /* RESERVED [1] (actually TX2, RX2 vs. TX2_B, RX2_B of SCIF2) */
- 0, 0,
+ /* SEL_SCIF2 [1] */
+ FN_SEL_SCIF2_0, FN_SEL_SCIF2_1,
/* SEL_ADI [1] */
FN_SEL_ADI_0, FN_SEL_ADI_1,
/* SEL_SSP [1] */
@@ -3948,9 +4393,8 @@ static struct pinmux_cfg_reg pinmux_config_regs[] = {
FN_SEL_HSCIF0_3, FN_SEL_HSCIF0_4, FN_SEL_HSCIF0_5, 0, 0,
/* SEL_GPS [2] */
FN_SEL_GPS_0, FN_SEL_GPS_1, FN_SEL_GPS_2, 0,
- /* SEL_RDS [3] */
- FN_SEL_RDS_0, FN_SEL_RDS_1, FN_SEL_RDS_2,
- FN_SEL_RDS_3, FN_SEL_RDS_4, FN_SEL_RDS_5, 0, 0,
+ /* RESERVED [3] */
+ 0, 0, 0, 0, 0, 0, 0, 0,
/* SEL_SIM [2] */
FN_SEL_SIM_0, FN_SEL_SIM_1, FN_SEL_SIM_2, 0,
/* SEL_SSI8 [2] */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7203.c b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
index f63d51dc3f4..bf3d8f28768 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7203.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7203.c
@@ -272,8 +272,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* PA */
PINMUX_DATA(PA7_DATA, PA7_IN),
PINMUX_DATA(PA6_DATA, PA6_IN),
@@ -704,117 +703,116 @@ static const pinmux_enum_t pinmux_data[] = {
};
static struct sh_pfc_pin pinmux_pins[] = {
-
/* PA */
- PINMUX_GPIO(GPIO_PA7, PA7_DATA),
- PINMUX_GPIO(GPIO_PA6, PA6_DATA),
- PINMUX_GPIO(GPIO_PA5, PA5_DATA),
- PINMUX_GPIO(GPIO_PA4, PA4_DATA),
- PINMUX_GPIO(GPIO_PA3, PA3_DATA),
- PINMUX_GPIO(GPIO_PA2, PA2_DATA),
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA7),
+ PINMUX_GPIO(PA6),
+ PINMUX_GPIO(PA5),
+ PINMUX_GPIO(PA4),
+ PINMUX_GPIO(PA3),
+ PINMUX_GPIO(PA2),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* PB */
- PINMUX_GPIO(GPIO_PB12, PB12_DATA),
- PINMUX_GPIO(GPIO_PB11, PB11_DATA),
- PINMUX_GPIO(GPIO_PB10, PB10_DATA),
- PINMUX_GPIO(GPIO_PB9, PB9_DATA),
- PINMUX_GPIO(GPIO_PB8, PB8_DATA),
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
- PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+ PINMUX_GPIO(PB12),
+ PINMUX_GPIO(PB11),
+ PINMUX_GPIO(PB10),
+ PINMUX_GPIO(PB9),
+ PINMUX_GPIO(PB8),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
+ PINMUX_GPIO(PB0),
/* PC */
- PINMUX_GPIO(GPIO_PC14, PC14_DATA),
- PINMUX_GPIO(GPIO_PC13, PC13_DATA),
- PINMUX_GPIO(GPIO_PC12, PC12_DATA),
- PINMUX_GPIO(GPIO_PC11, PC11_DATA),
- PINMUX_GPIO(GPIO_PC10, PC10_DATA),
- PINMUX_GPIO(GPIO_PC9, PC9_DATA),
- PINMUX_GPIO(GPIO_PC8, PC8_DATA),
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC14),
+ PINMUX_GPIO(PC13),
+ PINMUX_GPIO(PC12),
+ PINMUX_GPIO(PC11),
+ PINMUX_GPIO(PC10),
+ PINMUX_GPIO(PC9),
+ PINMUX_GPIO(PC8),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* PD */
- PINMUX_GPIO(GPIO_PD15, PD15_DATA),
- PINMUX_GPIO(GPIO_PD14, PD14_DATA),
- PINMUX_GPIO(GPIO_PD13, PD13_DATA),
- PINMUX_GPIO(GPIO_PD12, PD12_DATA),
- PINMUX_GPIO(GPIO_PD11, PD11_DATA),
- PINMUX_GPIO(GPIO_PD10, PD10_DATA),
- PINMUX_GPIO(GPIO_PD9, PD9_DATA),
- PINMUX_GPIO(GPIO_PD8, PD8_DATA),
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD15),
+ PINMUX_GPIO(PD14),
+ PINMUX_GPIO(PD13),
+ PINMUX_GPIO(PD12),
+ PINMUX_GPIO(PD11),
+ PINMUX_GPIO(PD10),
+ PINMUX_GPIO(PD9),
+ PINMUX_GPIO(PD8),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* PE */
- PINMUX_GPIO(GPIO_PE15, PE15_DATA),
- PINMUX_GPIO(GPIO_PE14, PE14_DATA),
- PINMUX_GPIO(GPIO_PE13, PE13_DATA),
- PINMUX_GPIO(GPIO_PE12, PE12_DATA),
- PINMUX_GPIO(GPIO_PE11, PE11_DATA),
- PINMUX_GPIO(GPIO_PE10, PE10_DATA),
- PINMUX_GPIO(GPIO_PE9, PE9_DATA),
- PINMUX_GPIO(GPIO_PE8, PE8_DATA),
- PINMUX_GPIO(GPIO_PE7, PE7_DATA),
- PINMUX_GPIO(GPIO_PE6, PE6_DATA),
- PINMUX_GPIO(GPIO_PE5, PE5_DATA),
- PINMUX_GPIO(GPIO_PE4, PE4_DATA),
- PINMUX_GPIO(GPIO_PE3, PE3_DATA),
- PINMUX_GPIO(GPIO_PE2, PE2_DATA),
- PINMUX_GPIO(GPIO_PE1, PE1_DATA),
- PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+ PINMUX_GPIO(PE15),
+ PINMUX_GPIO(PE14),
+ PINMUX_GPIO(PE13),
+ PINMUX_GPIO(PE12),
+ PINMUX_GPIO(PE11),
+ PINMUX_GPIO(PE10),
+ PINMUX_GPIO(PE9),
+ PINMUX_GPIO(PE8),
+ PINMUX_GPIO(PE7),
+ PINMUX_GPIO(PE6),
+ PINMUX_GPIO(PE5),
+ PINMUX_GPIO(PE4),
+ PINMUX_GPIO(PE3),
+ PINMUX_GPIO(PE2),
+ PINMUX_GPIO(PE1),
+ PINMUX_GPIO(PE0),
/* PF */
- PINMUX_GPIO(GPIO_PF30, PF30_DATA),
- PINMUX_GPIO(GPIO_PF29, PF29_DATA),
- PINMUX_GPIO(GPIO_PF28, PF28_DATA),
- PINMUX_GPIO(GPIO_PF27, PF27_DATA),
- PINMUX_GPIO(GPIO_PF26, PF26_DATA),
- PINMUX_GPIO(GPIO_PF25, PF25_DATA),
- PINMUX_GPIO(GPIO_PF24, PF24_DATA),
- PINMUX_GPIO(GPIO_PF23, PF23_DATA),
- PINMUX_GPIO(GPIO_PF22, PF22_DATA),
- PINMUX_GPIO(GPIO_PF21, PF21_DATA),
- PINMUX_GPIO(GPIO_PF20, PF20_DATA),
- PINMUX_GPIO(GPIO_PF19, PF19_DATA),
- PINMUX_GPIO(GPIO_PF18, PF18_DATA),
- PINMUX_GPIO(GPIO_PF17, PF17_DATA),
- PINMUX_GPIO(GPIO_PF16, PF16_DATA),
- PINMUX_GPIO(GPIO_PF15, PF15_DATA),
- PINMUX_GPIO(GPIO_PF14, PF14_DATA),
- PINMUX_GPIO(GPIO_PF13, PF13_DATA),
- PINMUX_GPIO(GPIO_PF12, PF12_DATA),
- PINMUX_GPIO(GPIO_PF11, PF11_DATA),
- PINMUX_GPIO(GPIO_PF10, PF10_DATA),
- PINMUX_GPIO(GPIO_PF9, PF9_DATA),
- PINMUX_GPIO(GPIO_PF8, PF8_DATA),
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF30),
+ PINMUX_GPIO(PF29),
+ PINMUX_GPIO(PF28),
+ PINMUX_GPIO(PF27),
+ PINMUX_GPIO(PF26),
+ PINMUX_GPIO(PF25),
+ PINMUX_GPIO(PF24),
+ PINMUX_GPIO(PF23),
+ PINMUX_GPIO(PF22),
+ PINMUX_GPIO(PF21),
+ PINMUX_GPIO(PF20),
+ PINMUX_GPIO(PF19),
+ PINMUX_GPIO(PF18),
+ PINMUX_GPIO(PF17),
+ PINMUX_GPIO(PF16),
+ PINMUX_GPIO(PF15),
+ PINMUX_GPIO(PF14),
+ PINMUX_GPIO(PF13),
+ PINMUX_GPIO(PF12),
+ PINMUX_GPIO(PF11),
+ PINMUX_GPIO(PF10),
+ PINMUX_GPIO(PF9),
+ PINMUX_GPIO(PF8),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7264.c b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
index 284675249ed..673a5950322 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7264.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7264.c
@@ -604,8 +604,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* Port A */
PINMUX_DATA(PA3_DATA, PA3_IN),
PINMUX_DATA(PA2_DATA, PA2_IN),
@@ -1073,149 +1072,148 @@ static const pinmux_enum_t pinmux_data[] = {
};
static struct sh_pfc_pin pinmux_pins[] = {
-
/* Port A */
- PINMUX_GPIO(GPIO_PA3, PA3_DATA),
- PINMUX_GPIO(GPIO_PA2, PA2_DATA),
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA3),
+ PINMUX_GPIO(PA2),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* Port B */
- PINMUX_GPIO(GPIO_PB22, PB22_DATA),
- PINMUX_GPIO(GPIO_PB21, PB21_DATA),
- PINMUX_GPIO(GPIO_PB20, PB20_DATA),
- PINMUX_GPIO(GPIO_PB19, PB19_DATA),
- PINMUX_GPIO(GPIO_PB18, PB18_DATA),
- PINMUX_GPIO(GPIO_PB17, PB17_DATA),
- PINMUX_GPIO(GPIO_PB16, PB16_DATA),
- PINMUX_GPIO(GPIO_PB15, PB15_DATA),
- PINMUX_GPIO(GPIO_PB14, PB14_DATA),
- PINMUX_GPIO(GPIO_PB13, PB13_DATA),
- PINMUX_GPIO(GPIO_PB12, PB12_DATA),
- PINMUX_GPIO(GPIO_PB11, PB11_DATA),
- PINMUX_GPIO(GPIO_PB10, PB10_DATA),
- PINMUX_GPIO(GPIO_PB9, PB9_DATA),
- PINMUX_GPIO(GPIO_PB8, PB8_DATA),
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(PB22),
+ PINMUX_GPIO(PB21),
+ PINMUX_GPIO(PB20),
+ PINMUX_GPIO(PB19),
+ PINMUX_GPIO(PB18),
+ PINMUX_GPIO(PB17),
+ PINMUX_GPIO(PB16),
+ PINMUX_GPIO(PB15),
+ PINMUX_GPIO(PB14),
+ PINMUX_GPIO(PB13),
+ PINMUX_GPIO(PB12),
+ PINMUX_GPIO(PB11),
+ PINMUX_GPIO(PB10),
+ PINMUX_GPIO(PB9),
+ PINMUX_GPIO(PB8),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
/* Port C */
- PINMUX_GPIO(GPIO_PC10, PC10_DATA),
- PINMUX_GPIO(GPIO_PC9, PC9_DATA),
- PINMUX_GPIO(GPIO_PC8, PC8_DATA),
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC10),
+ PINMUX_GPIO(PC9),
+ PINMUX_GPIO(PC8),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* Port D */
- PINMUX_GPIO(GPIO_PD15, PD15_DATA),
- PINMUX_GPIO(GPIO_PD14, PD14_DATA),
- PINMUX_GPIO(GPIO_PD13, PD13_DATA),
- PINMUX_GPIO(GPIO_PD12, PD12_DATA),
- PINMUX_GPIO(GPIO_PD11, PD11_DATA),
- PINMUX_GPIO(GPIO_PD10, PD10_DATA),
- PINMUX_GPIO(GPIO_PD9, PD9_DATA),
- PINMUX_GPIO(GPIO_PD8, PD8_DATA),
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD15),
+ PINMUX_GPIO(PD14),
+ PINMUX_GPIO(PD13),
+ PINMUX_GPIO(PD12),
+ PINMUX_GPIO(PD11),
+ PINMUX_GPIO(PD10),
+ PINMUX_GPIO(PD9),
+ PINMUX_GPIO(PD8),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* Port E */
- PINMUX_GPIO(GPIO_PE5, PE5_DATA),
- PINMUX_GPIO(GPIO_PE4, PE4_DATA),
- PINMUX_GPIO(GPIO_PE3, PE3_DATA),
- PINMUX_GPIO(GPIO_PE2, PE2_DATA),
- PINMUX_GPIO(GPIO_PE1, PE1_DATA),
- PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+ PINMUX_GPIO(PE5),
+ PINMUX_GPIO(PE4),
+ PINMUX_GPIO(PE3),
+ PINMUX_GPIO(PE2),
+ PINMUX_GPIO(PE1),
+ PINMUX_GPIO(PE0),
/* Port F */
- PINMUX_GPIO(GPIO_PF12, PF12_DATA),
- PINMUX_GPIO(GPIO_PF11, PF11_DATA),
- PINMUX_GPIO(GPIO_PF10, PF10_DATA),
- PINMUX_GPIO(GPIO_PF9, PF9_DATA),
- PINMUX_GPIO(GPIO_PF8, PF8_DATA),
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF12),
+ PINMUX_GPIO(PF11),
+ PINMUX_GPIO(PF10),
+ PINMUX_GPIO(PF9),
+ PINMUX_GPIO(PF8),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
/* Port G */
- PINMUX_GPIO(GPIO_PG24, PG24_DATA),
- PINMUX_GPIO(GPIO_PG23, PG23_DATA),
- PINMUX_GPIO(GPIO_PG22, PG22_DATA),
- PINMUX_GPIO(GPIO_PG21, PG21_DATA),
- PINMUX_GPIO(GPIO_PG20, PG20_DATA),
- PINMUX_GPIO(GPIO_PG19, PG19_DATA),
- PINMUX_GPIO(GPIO_PG18, PG18_DATA),
- PINMUX_GPIO(GPIO_PG17, PG17_DATA),
- PINMUX_GPIO(GPIO_PG16, PG16_DATA),
- PINMUX_GPIO(GPIO_PG15, PG15_DATA),
- PINMUX_GPIO(GPIO_PG14, PG14_DATA),
- PINMUX_GPIO(GPIO_PG13, PG13_DATA),
- PINMUX_GPIO(GPIO_PG12, PG12_DATA),
- PINMUX_GPIO(GPIO_PG11, PG11_DATA),
- PINMUX_GPIO(GPIO_PG10, PG10_DATA),
- PINMUX_GPIO(GPIO_PG9, PG9_DATA),
- PINMUX_GPIO(GPIO_PG8, PG8_DATA),
- PINMUX_GPIO(GPIO_PG7, PG7_DATA),
- PINMUX_GPIO(GPIO_PG6, PG6_DATA),
- PINMUX_GPIO(GPIO_PG5, PG5_DATA),
- PINMUX_GPIO(GPIO_PG4, PG4_DATA),
- PINMUX_GPIO(GPIO_PG3, PG3_DATA),
- PINMUX_GPIO(GPIO_PG2, PG2_DATA),
- PINMUX_GPIO(GPIO_PG1, PG1_DATA),
- PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+ PINMUX_GPIO(PG24),
+ PINMUX_GPIO(PG23),
+ PINMUX_GPIO(PG22),
+ PINMUX_GPIO(PG21),
+ PINMUX_GPIO(PG20),
+ PINMUX_GPIO(PG19),
+ PINMUX_GPIO(PG18),
+ PINMUX_GPIO(PG17),
+ PINMUX_GPIO(PG16),
+ PINMUX_GPIO(PG15),
+ PINMUX_GPIO(PG14),
+ PINMUX_GPIO(PG13),
+ PINMUX_GPIO(PG12),
+ PINMUX_GPIO(PG11),
+ PINMUX_GPIO(PG10),
+ PINMUX_GPIO(PG9),
+ PINMUX_GPIO(PG8),
+ PINMUX_GPIO(PG7),
+ PINMUX_GPIO(PG6),
+ PINMUX_GPIO(PG5),
+ PINMUX_GPIO(PG4),
+ PINMUX_GPIO(PG3),
+ PINMUX_GPIO(PG2),
+ PINMUX_GPIO(PG1),
+ PINMUX_GPIO(PG0),
/* Port H - Port H does not have a Data Register */
/* Port I - not on device */
/* Port J */
- PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
- PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
- PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
- PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
- PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
- PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
- PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
- PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
- PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
- PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
- PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
- PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+ PINMUX_GPIO(PJ11),
+ PINMUX_GPIO(PJ10),
+ PINMUX_GPIO(PJ9),
+ PINMUX_GPIO(PJ8),
+ PINMUX_GPIO(PJ7),
+ PINMUX_GPIO(PJ6),
+ PINMUX_GPIO(PJ5),
+ PINMUX_GPIO(PJ4),
+ PINMUX_GPIO(PJ3),
+ PINMUX_GPIO(PJ2),
+ PINMUX_GPIO(PJ1),
+ PINMUX_GPIO(PJ0),
/* Port K */
- PINMUX_GPIO(GPIO_PK11, PK11_DATA),
- PINMUX_GPIO(GPIO_PK10, PK10_DATA),
- PINMUX_GPIO(GPIO_PK9, PK9_DATA),
- PINMUX_GPIO(GPIO_PK8, PK8_DATA),
- PINMUX_GPIO(GPIO_PK7, PK7_DATA),
- PINMUX_GPIO(GPIO_PK6, PK6_DATA),
- PINMUX_GPIO(GPIO_PK5, PK5_DATA),
- PINMUX_GPIO(GPIO_PK4, PK4_DATA),
- PINMUX_GPIO(GPIO_PK3, PK3_DATA),
- PINMUX_GPIO(GPIO_PK2, PK2_DATA),
- PINMUX_GPIO(GPIO_PK1, PK1_DATA),
- PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+ PINMUX_GPIO(PK11),
+ PINMUX_GPIO(PK10),
+ PINMUX_GPIO(PK9),
+ PINMUX_GPIO(PK8),
+ PINMUX_GPIO(PK7),
+ PINMUX_GPIO(PK6),
+ PINMUX_GPIO(PK5),
+ PINMUX_GPIO(PK4),
+ PINMUX_GPIO(PK3),
+ PINMUX_GPIO(PK2),
+ PINMUX_GPIO(PK1),
+ PINMUX_GPIO(PK0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7269.c b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
index 4c401a74acd..a19b60f72b2 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7269.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7269.c
@@ -781,8 +781,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* Port A */
PINMUX_DATA(PA1_DATA, PA1_IN),
PINMUX_DATA(PA0_DATA, PA0_IN),
@@ -1454,165 +1453,165 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* Port A */
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* Port B */
- PINMUX_GPIO(GPIO_PB22, PB22_DATA),
- PINMUX_GPIO(GPIO_PB21, PB21_DATA),
- PINMUX_GPIO(GPIO_PB20, PB20_DATA),
- PINMUX_GPIO(GPIO_PB19, PB19_DATA),
- PINMUX_GPIO(GPIO_PB18, PB18_DATA),
- PINMUX_GPIO(GPIO_PB17, PB17_DATA),
- PINMUX_GPIO(GPIO_PB16, PB16_DATA),
- PINMUX_GPIO(GPIO_PB15, PB15_DATA),
- PINMUX_GPIO(GPIO_PB14, PB14_DATA),
- PINMUX_GPIO(GPIO_PB13, PB13_DATA),
- PINMUX_GPIO(GPIO_PB12, PB12_DATA),
- PINMUX_GPIO(GPIO_PB11, PB11_DATA),
- PINMUX_GPIO(GPIO_PB10, PB10_DATA),
- PINMUX_GPIO(GPIO_PB9, PB9_DATA),
- PINMUX_GPIO(GPIO_PB8, PB8_DATA),
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
+ PINMUX_GPIO(PB22),
+ PINMUX_GPIO(PB21),
+ PINMUX_GPIO(PB20),
+ PINMUX_GPIO(PB19),
+ PINMUX_GPIO(PB18),
+ PINMUX_GPIO(PB17),
+ PINMUX_GPIO(PB16),
+ PINMUX_GPIO(PB15),
+ PINMUX_GPIO(PB14),
+ PINMUX_GPIO(PB13),
+ PINMUX_GPIO(PB12),
+ PINMUX_GPIO(PB11),
+ PINMUX_GPIO(PB10),
+ PINMUX_GPIO(PB9),
+ PINMUX_GPIO(PB8),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
/* Port C */
- PINMUX_GPIO(GPIO_PC8, PC8_DATA),
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC8),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* Port D */
- PINMUX_GPIO(GPIO_PD15, PD15_DATA),
- PINMUX_GPIO(GPIO_PD14, PD14_DATA),
- PINMUX_GPIO(GPIO_PD13, PD13_DATA),
- PINMUX_GPIO(GPIO_PD12, PD12_DATA),
- PINMUX_GPIO(GPIO_PD11, PD11_DATA),
- PINMUX_GPIO(GPIO_PD10, PD10_DATA),
- PINMUX_GPIO(GPIO_PD9, PD9_DATA),
- PINMUX_GPIO(GPIO_PD8, PD8_DATA),
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD15),
+ PINMUX_GPIO(PD14),
+ PINMUX_GPIO(PD13),
+ PINMUX_GPIO(PD12),
+ PINMUX_GPIO(PD11),
+ PINMUX_GPIO(PD10),
+ PINMUX_GPIO(PD9),
+ PINMUX_GPIO(PD8),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* Port E */
- PINMUX_GPIO(GPIO_PE7, PE7_DATA),
- PINMUX_GPIO(GPIO_PE6, PE6_DATA),
- PINMUX_GPIO(GPIO_PE5, PE5_DATA),
- PINMUX_GPIO(GPIO_PE4, PE4_DATA),
- PINMUX_GPIO(GPIO_PE3, PE3_DATA),
- PINMUX_GPIO(GPIO_PE2, PE2_DATA),
- PINMUX_GPIO(GPIO_PE1, PE1_DATA),
- PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+ PINMUX_GPIO(PE7),
+ PINMUX_GPIO(PE6),
+ PINMUX_GPIO(PE5),
+ PINMUX_GPIO(PE4),
+ PINMUX_GPIO(PE3),
+ PINMUX_GPIO(PE2),
+ PINMUX_GPIO(PE1),
+ PINMUX_GPIO(PE0),
/* Port F */
- PINMUX_GPIO(GPIO_PF23, PF23_DATA),
- PINMUX_GPIO(GPIO_PF22, PF22_DATA),
- PINMUX_GPIO(GPIO_PF21, PF21_DATA),
- PINMUX_GPIO(GPIO_PF20, PF20_DATA),
- PINMUX_GPIO(GPIO_PF19, PF19_DATA),
- PINMUX_GPIO(GPIO_PF18, PF18_DATA),
- PINMUX_GPIO(GPIO_PF17, PF17_DATA),
- PINMUX_GPIO(GPIO_PF16, PF16_DATA),
- PINMUX_GPIO(GPIO_PF15, PF15_DATA),
- PINMUX_GPIO(GPIO_PF14, PF14_DATA),
- PINMUX_GPIO(GPIO_PF13, PF13_DATA),
- PINMUX_GPIO(GPIO_PF12, PF12_DATA),
- PINMUX_GPIO(GPIO_PF11, PF11_DATA),
- PINMUX_GPIO(GPIO_PF10, PF10_DATA),
- PINMUX_GPIO(GPIO_PF9, PF9_DATA),
- PINMUX_GPIO(GPIO_PF8, PF8_DATA),
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF23),
+ PINMUX_GPIO(PF22),
+ PINMUX_GPIO(PF21),
+ PINMUX_GPIO(PF20),
+ PINMUX_GPIO(PF19),
+ PINMUX_GPIO(PF18),
+ PINMUX_GPIO(PF17),
+ PINMUX_GPIO(PF16),
+ PINMUX_GPIO(PF15),
+ PINMUX_GPIO(PF14),
+ PINMUX_GPIO(PF13),
+ PINMUX_GPIO(PF12),
+ PINMUX_GPIO(PF11),
+ PINMUX_GPIO(PF10),
+ PINMUX_GPIO(PF9),
+ PINMUX_GPIO(PF8),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
/* Port G */
- PINMUX_GPIO(GPIO_PG27, PG27_DATA),
- PINMUX_GPIO(GPIO_PG26, PG26_DATA),
- PINMUX_GPIO(GPIO_PG25, PG25_DATA),
- PINMUX_GPIO(GPIO_PG24, PG24_DATA),
- PINMUX_GPIO(GPIO_PG23, PG23_DATA),
- PINMUX_GPIO(GPIO_PG22, PG22_DATA),
- PINMUX_GPIO(GPIO_PG21, PG21_DATA),
- PINMUX_GPIO(GPIO_PG20, PG20_DATA),
- PINMUX_GPIO(GPIO_PG19, PG19_DATA),
- PINMUX_GPIO(GPIO_PG18, PG18_DATA),
- PINMUX_GPIO(GPIO_PG17, PG17_DATA),
- PINMUX_GPIO(GPIO_PG16, PG16_DATA),
- PINMUX_GPIO(GPIO_PG15, PG15_DATA),
- PINMUX_GPIO(GPIO_PG14, PG14_DATA),
- PINMUX_GPIO(GPIO_PG13, PG13_DATA),
- PINMUX_GPIO(GPIO_PG12, PG12_DATA),
- PINMUX_GPIO(GPIO_PG11, PG11_DATA),
- PINMUX_GPIO(GPIO_PG10, PG10_DATA),
- PINMUX_GPIO(GPIO_PG9, PG9_DATA),
- PINMUX_GPIO(GPIO_PG8, PG8_DATA),
- PINMUX_GPIO(GPIO_PG7, PG7_DATA),
- PINMUX_GPIO(GPIO_PG6, PG6_DATA),
- PINMUX_GPIO(GPIO_PG5, PG5_DATA),
- PINMUX_GPIO(GPIO_PG4, PG4_DATA),
- PINMUX_GPIO(GPIO_PG3, PG3_DATA),
- PINMUX_GPIO(GPIO_PG2, PG2_DATA),
- PINMUX_GPIO(GPIO_PG1, PG1_DATA),
- PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+ PINMUX_GPIO(PG27),
+ PINMUX_GPIO(PG26),
+ PINMUX_GPIO(PG25),
+ PINMUX_GPIO(PG24),
+ PINMUX_GPIO(PG23),
+ PINMUX_GPIO(PG22),
+ PINMUX_GPIO(PG21),
+ PINMUX_GPIO(PG20),
+ PINMUX_GPIO(PG19),
+ PINMUX_GPIO(PG18),
+ PINMUX_GPIO(PG17),
+ PINMUX_GPIO(PG16),
+ PINMUX_GPIO(PG15),
+ PINMUX_GPIO(PG14),
+ PINMUX_GPIO(PG13),
+ PINMUX_GPIO(PG12),
+ PINMUX_GPIO(PG11),
+ PINMUX_GPIO(PG10),
+ PINMUX_GPIO(PG9),
+ PINMUX_GPIO(PG8),
+ PINMUX_GPIO(PG7),
+ PINMUX_GPIO(PG6),
+ PINMUX_GPIO(PG5),
+ PINMUX_GPIO(PG4),
+ PINMUX_GPIO(PG3),
+ PINMUX_GPIO(PG2),
+ PINMUX_GPIO(PG1),
+ PINMUX_GPIO(PG0),
/* Port H - Port H does not have a Data Register */
/* Port I - not on device */
/* Port J */
- PINMUX_GPIO(GPIO_PJ31, PJ31_DATA),
- PINMUX_GPIO(GPIO_PJ30, PJ30_DATA),
- PINMUX_GPIO(GPIO_PJ29, PJ29_DATA),
- PINMUX_GPIO(GPIO_PJ28, PJ28_DATA),
- PINMUX_GPIO(GPIO_PJ27, PJ27_DATA),
- PINMUX_GPIO(GPIO_PJ26, PJ26_DATA),
- PINMUX_GPIO(GPIO_PJ25, PJ25_DATA),
- PINMUX_GPIO(GPIO_PJ24, PJ24_DATA),
- PINMUX_GPIO(GPIO_PJ23, PJ23_DATA),
- PINMUX_GPIO(GPIO_PJ22, PJ22_DATA),
- PINMUX_GPIO(GPIO_PJ21, PJ21_DATA),
- PINMUX_GPIO(GPIO_PJ20, PJ20_DATA),
- PINMUX_GPIO(GPIO_PJ19, PJ19_DATA),
- PINMUX_GPIO(GPIO_PJ18, PJ18_DATA),
- PINMUX_GPIO(GPIO_PJ17, PJ17_DATA),
- PINMUX_GPIO(GPIO_PJ16, PJ16_DATA),
- PINMUX_GPIO(GPIO_PJ15, PJ15_DATA),
- PINMUX_GPIO(GPIO_PJ14, PJ14_DATA),
- PINMUX_GPIO(GPIO_PJ13, PJ13_DATA),
- PINMUX_GPIO(GPIO_PJ12, PJ12_DATA),
- PINMUX_GPIO(GPIO_PJ11, PJ11_DATA),
- PINMUX_GPIO(GPIO_PJ10, PJ10_DATA),
- PINMUX_GPIO(GPIO_PJ9, PJ9_DATA),
- PINMUX_GPIO(GPIO_PJ8, PJ8_DATA),
- PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
- PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
- PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
- PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
- PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
- PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
- PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
- PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+ PINMUX_GPIO(PJ31),
+ PINMUX_GPIO(PJ30),
+ PINMUX_GPIO(PJ29),
+ PINMUX_GPIO(PJ28),
+ PINMUX_GPIO(PJ27),
+ PINMUX_GPIO(PJ26),
+ PINMUX_GPIO(PJ25),
+ PINMUX_GPIO(PJ24),
+ PINMUX_GPIO(PJ23),
+ PINMUX_GPIO(PJ22),
+ PINMUX_GPIO(PJ21),
+ PINMUX_GPIO(PJ20),
+ PINMUX_GPIO(PJ19),
+ PINMUX_GPIO(PJ18),
+ PINMUX_GPIO(PJ17),
+ PINMUX_GPIO(PJ16),
+ PINMUX_GPIO(PJ15),
+ PINMUX_GPIO(PJ14),
+ PINMUX_GPIO(PJ13),
+ PINMUX_GPIO(PJ12),
+ PINMUX_GPIO(PJ11),
+ PINMUX_GPIO(PJ10),
+ PINMUX_GPIO(PJ9),
+ PINMUX_GPIO(PJ8),
+ PINMUX_GPIO(PJ7),
+ PINMUX_GPIO(PJ6),
+ PINMUX_GPIO(PJ5),
+ PINMUX_GPIO(PJ4),
+ PINMUX_GPIO(PJ3),
+ PINMUX_GPIO(PJ2),
+ PINMUX_GPIO(PJ1),
+ PINMUX_GPIO(PJ0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7372.c b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
index 6dfb1877257..70b522d3482 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7372.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7372.c
@@ -23,27 +23,18 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/pinctrl/pinconf-generic.h>
-
-#include <mach/irqs.h>
-#include <mach/sh7372.h>
+#include <linux/sh_intc.h>
#include "core.h"
#include "sh_pfc.h"
-#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
- PORT_10(fn, pfx##10, sfx), PORT_10(fn, pfx##11, sfx), \
- PORT_10(fn, pfx##12, sfx), PORT_10(fn, pfx##13, sfx), \
- PORT_10(fn, pfx##14, sfx), PORT_10(fn, pfx##15, sfx), \
- PORT_10(fn, pfx##16, sfx), PORT_10(fn, pfx##17, sfx), \
- PORT_10(fn, pfx##18, sfx), PORT_1(fn, pfx##190, sfx)
-
-#undef _GPIO_PORT
-#define _GPIO_PORT(gpio, sfx) \
- [gpio] = { \
- .name = __stringify(PORT##gpio), \
- .enum_id = PORT##gpio##_DATA, \
- }
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(0, fn, pfx, sfx), PORT_90(0, fn, pfx, sfx), \
+ PORT_10(100, fn, pfx##10, sfx), PORT_10(110, fn, pfx##11, sfx), \
+ PORT_10(120, fn, pfx##12, sfx), PORT_10(130, fn, pfx##13, sfx), \
+ PORT_10(140, fn, pfx##14, sfx), PORT_10(150, fn, pfx##15, sfx), \
+ PORT_10(160, fn, pfx##16, sfx), PORT_10(170, fn, pfx##17, sfx), \
+ PORT_10(180, fn, pfx##18, sfx), PORT_1(190, fn, pfx##190, sfx)
#define IRQC_PIN_MUX(irq, pin) \
static const unsigned int intc_irq##irq##_pins[] = { \
@@ -391,11 +382,8 @@ enum {
PINMUX_MARK_END,
};
-#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
-
-static const pinmux_enum_t pinmux_data[] = {
- PINMUX_DATA_GP_ALL(),
+static const u16 pinmux_data[] = {
+ PINMUX_DATA_ALL(),
/* IRQ */
PINMUX_DATA(IRQ0_6_MARK, PORT6_FN0, MSEL1CR_0_0),
@@ -839,13 +827,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(MFIv4_MARK, MSEL4CR_6_1),
};
-#define SH7372_PIN(pin, cfgs) \
- { \
- .name = __stringify(PORT##pin), \
- .enum_id = PORT##pin##_DATA, \
- .configs = cfgs, \
- }
-
#define __I (SH_PFC_PIN_CFG_INPUT)
#define __O (SH_PFC_PIN_CFG_OUTPUT)
#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
@@ -853,15 +834,15 @@ static const pinmux_enum_t pinmux_data[] = {
#define __PU (SH_PFC_PIN_CFG_PULL_UP)
#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
-#define SH7372_PIN_I_PD(pin) SH7372_PIN(pin, __I | __PD)
-#define SH7372_PIN_I_PU(pin) SH7372_PIN(pin, __I | __PU)
-#define SH7372_PIN_I_PU_PD(pin) SH7372_PIN(pin, __I | __PUD)
-#define SH7372_PIN_IO(pin) SH7372_PIN(pin, __IO)
-#define SH7372_PIN_IO_PD(pin) SH7372_PIN(pin, __IO | __PD)
-#define SH7372_PIN_IO_PU(pin) SH7372_PIN(pin, __IO | __PU)
-#define SH7372_PIN_IO_PU_PD(pin) SH7372_PIN(pin, __IO | __PUD)
-#define SH7372_PIN_O(pin) SH7372_PIN(pin, __O)
-#define SH7372_PIN_O_PU_PD(pin) SH7372_PIN(pin, __O | __PUD)
+#define SH7372_PIN_I_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PD)
+#define SH7372_PIN_I_PU(pin) SH_PFC_PIN_CFG(pin, __I | __PU)
+#define SH7372_PIN_I_PU_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PUD)
+#define SH7372_PIN_IO(pin) SH_PFC_PIN_CFG(pin, __IO)
+#define SH7372_PIN_IO_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PD)
+#define SH7372_PIN_IO_PU(pin) SH_PFC_PIN_CFG(pin, __IO | __PU)
+#define SH7372_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
+#define SH7372_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
+#define SH7372_PIN_O_PU_PD(pin) SH_PFC_PIN_CFG(pin, __O | __PUD)
static struct sh_pfc_pin pinmux_pins[] = {
/* Table 57-1 (I/O and Pull U/D) */
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
index 7956df58d75..7e278a97e41 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh73a0.c
@@ -31,32 +31,32 @@
#include "core.h"
#include "sh_pfc.h"
-#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \
- PORT_10(fn, pfx##10, sfx), \
- PORT_1(fn, pfx##110, sfx), PORT_1(fn, pfx##111, sfx), \
- PORT_1(fn, pfx##112, sfx), PORT_1(fn, pfx##113, sfx), \
- PORT_1(fn, pfx##114, sfx), PORT_1(fn, pfx##115, sfx), \
- PORT_1(fn, pfx##116, sfx), PORT_1(fn, pfx##117, sfx), \
- PORT_1(fn, pfx##118, sfx), \
- PORT_1(fn, pfx##128, sfx), PORT_1(fn, pfx##129, sfx), \
- PORT_10(fn, pfx##13, sfx), PORT_10(fn, pfx##14, sfx), \
- PORT_10(fn, pfx##15, sfx), \
- PORT_1(fn, pfx##160, sfx), PORT_1(fn, pfx##161, sfx), \
- PORT_1(fn, pfx##162, sfx), PORT_1(fn, pfx##163, sfx), \
- PORT_1(fn, pfx##164, sfx), \
- PORT_1(fn, pfx##192, sfx), PORT_1(fn, pfx##193, sfx), \
- PORT_1(fn, pfx##194, sfx), PORT_1(fn, pfx##195, sfx), \
- PORT_1(fn, pfx##196, sfx), PORT_1(fn, pfx##197, sfx), \
- PORT_1(fn, pfx##198, sfx), PORT_1(fn, pfx##199, sfx), \
- PORT_10(fn, pfx##20, sfx), PORT_10(fn, pfx##21, sfx), \
- PORT_10(fn, pfx##22, sfx), PORT_10(fn, pfx##23, sfx), \
- PORT_10(fn, pfx##24, sfx), PORT_10(fn, pfx##25, sfx), \
- PORT_10(fn, pfx##26, sfx), PORT_10(fn, pfx##27, sfx), \
- PORT_1(fn, pfx##280, sfx), PORT_1(fn, pfx##281, sfx), \
- PORT_1(fn, pfx##282, sfx), \
- PORT_1(fn, pfx##288, sfx), PORT_1(fn, pfx##289, sfx), \
- PORT_10(fn, pfx##29, sfx), PORT_10(fn, pfx##30, sfx)
+#define CPU_ALL_PORT(fn, pfx, sfx) \
+ PORT_10(0, fn, pfx, sfx), PORT_90(0, fn, pfx, sfx), \
+ PORT_10(100, fn, pfx##10, sfx), \
+ PORT_1(110, fn, pfx##110, sfx), PORT_1(111, fn, pfx##111, sfx), \
+ PORT_1(112, fn, pfx##112, sfx), PORT_1(113, fn, pfx##113, sfx), \
+ PORT_1(114, fn, pfx##114, sfx), PORT_1(115, fn, pfx##115, sfx), \
+ PORT_1(116, fn, pfx##116, sfx), PORT_1(117, fn, pfx##117, sfx), \
+ PORT_1(118, fn, pfx##118, sfx), \
+ PORT_1(128, fn, pfx##128, sfx), PORT_1(129, fn, pfx##129, sfx), \
+ PORT_10(130, fn, pfx##13, sfx), PORT_10(140, fn, pfx##14, sfx), \
+ PORT_10(150, fn, pfx##15, sfx), \
+ PORT_1(160, fn, pfx##160, sfx), PORT_1(161, fn, pfx##161, sfx), \
+ PORT_1(162, fn, pfx##162, sfx), PORT_1(163, fn, pfx##163, sfx), \
+ PORT_1(164, fn, pfx##164, sfx), \
+ PORT_1(192, fn, pfx##192, sfx), PORT_1(193, fn, pfx##193, sfx), \
+ PORT_1(194, fn, pfx##194, sfx), PORT_1(195, fn, pfx##195, sfx), \
+ PORT_1(196, fn, pfx##196, sfx), PORT_1(197, fn, pfx##197, sfx), \
+ PORT_1(198, fn, pfx##198, sfx), PORT_1(199, fn, pfx##199, sfx), \
+ PORT_10(200, fn, pfx##20, sfx), PORT_10(210, fn, pfx##21, sfx), \
+ PORT_10(220, fn, pfx##22, sfx), PORT_10(230, fn, pfx##23, sfx), \
+ PORT_10(240, fn, pfx##24, sfx), PORT_10(250, fn, pfx##25, sfx), \
+ PORT_10(260, fn, pfx##26, sfx), PORT_10(270, fn, pfx##27, sfx), \
+ PORT_1(280, fn, pfx##280, sfx), PORT_1(281, fn, pfx##281, sfx), \
+ PORT_1(282, fn, pfx##282, sfx), \
+ PORT_1(288, fn, pfx##288, sfx), PORT_1(289, fn, pfx##289, sfx), \
+ PORT_10(290, fn, pfx##29, sfx), PORT_10(300, fn, pfx##30, sfx)
enum {
PINMUX_RESERVED = 0,
@@ -466,12 +466,9 @@ enum {
PINMUX_MARK_END,
};
-#define _PORT_DATA(pfx, sfx) PORT_DATA_IO(pfx)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
-
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* specify valid pin states for each pin in GPIO mode */
- PINMUX_DATA_GP_ALL(),
+ PINMUX_DATA_ALL(),
/* Table 25-1 (Function 0-7) */
PINMUX_DATA(VBUS_0_MARK, PORT0_FN1),
@@ -1160,13 +1157,6 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(EDBGREQ_PU_MARK, MSEL4CR_MSEL1_1),
};
-#define SH73A0_PIN(pin, cfgs) \
- { \
- .name = __stringify(PORT##pin), \
- .enum_id = PORT##pin##_DATA, \
- .configs = cfgs, \
- }
-
#define __I (SH_PFC_PIN_CFG_INPUT)
#define __O (SH_PFC_PIN_CFG_OUTPUT)
#define __IO (SH_PFC_PIN_CFG_INPUT | SH_PFC_PIN_CFG_OUTPUT)
@@ -1174,14 +1164,20 @@ static const pinmux_enum_t pinmux_data[] = {
#define __PU (SH_PFC_PIN_CFG_PULL_UP)
#define __PUD (SH_PFC_PIN_CFG_PULL_DOWN | SH_PFC_PIN_CFG_PULL_UP)
-#define SH73A0_PIN_I_PD(pin) SH73A0_PIN(pin, __I | __PD)
-#define SH73A0_PIN_I_PU(pin) SH73A0_PIN(pin, __I | __PU)
-#define SH73A0_PIN_I_PU_PD(pin) SH73A0_PIN(pin, __I | __PUD)
-#define SH73A0_PIN_IO(pin) SH73A0_PIN(pin, __IO)
-#define SH73A0_PIN_IO_PD(pin) SH73A0_PIN(pin, __IO | __PD)
-#define SH73A0_PIN_IO_PU(pin) SH73A0_PIN(pin, __IO | __PU)
-#define SH73A0_PIN_IO_PU_PD(pin) SH73A0_PIN(pin, __IO | __PUD)
-#define SH73A0_PIN_O(pin) SH73A0_PIN(pin, __O)
+#define SH73A0_PIN_I_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PD)
+#define SH73A0_PIN_I_PU(pin) SH_PFC_PIN_CFG(pin, __I | __PU)
+#define SH73A0_PIN_I_PU_PD(pin) SH_PFC_PIN_CFG(pin, __I | __PUD)
+#define SH73A0_PIN_IO(pin) SH_PFC_PIN_CFG(pin, __IO)
+#define SH73A0_PIN_IO_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PD)
+#define SH73A0_PIN_IO_PU(pin) SH_PFC_PIN_CFG(pin, __IO | __PU)
+#define SH73A0_PIN_IO_PU_PD(pin) SH_PFC_PIN_CFG(pin, __IO | __PUD)
+#define SH73A0_PIN_O(pin) SH_PFC_PIN_CFG(pin, __O)
+
+/* Pin numbers for pins without a corresponding GPIO port number are computed
+ * from the row and column numbers with a 1000 offset to avoid collisions with
+ * GPIO port numbers.
+ */
+#define PIN_NUMBER(row, col) (1000+((row)-1)*34+(col)-1)
static struct sh_pfc_pin pinmux_pins[] = {
/* Table 25-1 (I/O and Pull U/D) */
@@ -1454,21 +1450,11 @@ static struct sh_pfc_pin pinmux_pins[] = {
SH73A0_PIN_O(307),
SH73A0_PIN_I_PU(308),
SH73A0_PIN_O(309),
-};
-static const struct pinmux_range pinmux_ranges[] = {
- {.begin = 0, .end = 118,},
- {.begin = 128, .end = 164,},
- {.begin = 192, .end = 282,},
- {.begin = 288, .end = 309,},
+ /* Pins not associated with a GPIO port */
+ SH_PFC_PIN_NAMED(6, 26, F26),
};
-/* Pin numbers for pins without a corresponding GPIO port number are computed
- * from the row and column numbers with a 1000 offset to avoid collisions with
- * GPIO port numbers.
- */
-#define PIN_NUMBER(row, col) (1000+((row)-1)*34+(col)-1)
-
/* - BSC -------------------------------------------------------------------- */
static const unsigned int bsc_data_0_7_pins[] = {
/* D[0:7] */
@@ -3674,43 +3660,39 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
{ },
};
-/* External IRQ pins mapped at IRQPIN_BASE */
-#define EXT_IRQ16L(n) irq_pin(n)
-#define EXT_IRQ16H(n) irq_pin(n)
-
static const struct pinmux_irq pinmux_irqs[] = {
- PINMUX_IRQ(EXT_IRQ16H(19), 9),
- PINMUX_IRQ(EXT_IRQ16L(1), 10),
- PINMUX_IRQ(EXT_IRQ16L(0), 11),
- PINMUX_IRQ(EXT_IRQ16H(18), 13),
- PINMUX_IRQ(EXT_IRQ16H(20), 14),
- PINMUX_IRQ(EXT_IRQ16H(21), 15),
- PINMUX_IRQ(EXT_IRQ16H(31), 26),
- PINMUX_IRQ(EXT_IRQ16H(30), 27),
- PINMUX_IRQ(EXT_IRQ16H(29), 28),
- PINMUX_IRQ(EXT_IRQ16H(22), 40),
- PINMUX_IRQ(EXT_IRQ16H(23), 53),
- PINMUX_IRQ(EXT_IRQ16L(10), 54),
- PINMUX_IRQ(EXT_IRQ16L(9), 56),
- PINMUX_IRQ(EXT_IRQ16H(26), 115),
- PINMUX_IRQ(EXT_IRQ16H(27), 116),
- PINMUX_IRQ(EXT_IRQ16H(28), 117),
- PINMUX_IRQ(EXT_IRQ16H(24), 118),
- PINMUX_IRQ(EXT_IRQ16L(6), 147),
- PINMUX_IRQ(EXT_IRQ16L(2), 149),
- PINMUX_IRQ(EXT_IRQ16L(7), 150),
- PINMUX_IRQ(EXT_IRQ16L(12), 156),
- PINMUX_IRQ(EXT_IRQ16L(4), 159),
- PINMUX_IRQ(EXT_IRQ16H(25), 164),
- PINMUX_IRQ(EXT_IRQ16L(8), 223),
- PINMUX_IRQ(EXT_IRQ16L(3), 224),
- PINMUX_IRQ(EXT_IRQ16L(5), 227),
- PINMUX_IRQ(EXT_IRQ16H(17), 234),
- PINMUX_IRQ(EXT_IRQ16L(11), 238),
- PINMUX_IRQ(EXT_IRQ16L(13), 239),
- PINMUX_IRQ(EXT_IRQ16H(16), 249),
- PINMUX_IRQ(EXT_IRQ16L(14), 251),
- PINMUX_IRQ(EXT_IRQ16L(9), 308),
+ PINMUX_IRQ(irq_pin(19), 9),
+ PINMUX_IRQ(irq_pin(1), 10),
+ PINMUX_IRQ(irq_pin(0), 11),
+ PINMUX_IRQ(irq_pin(18), 13),
+ PINMUX_IRQ(irq_pin(20), 14),
+ PINMUX_IRQ(irq_pin(21), 15),
+ PINMUX_IRQ(irq_pin(31), 26),
+ PINMUX_IRQ(irq_pin(30), 27),
+ PINMUX_IRQ(irq_pin(29), 28),
+ PINMUX_IRQ(irq_pin(22), 40),
+ PINMUX_IRQ(irq_pin(23), 53),
+ PINMUX_IRQ(irq_pin(10), 54),
+ PINMUX_IRQ(irq_pin(9), 56),
+ PINMUX_IRQ(irq_pin(26), 115),
+ PINMUX_IRQ(irq_pin(27), 116),
+ PINMUX_IRQ(irq_pin(28), 117),
+ PINMUX_IRQ(irq_pin(24), 118),
+ PINMUX_IRQ(irq_pin(6), 147),
+ PINMUX_IRQ(irq_pin(2), 149),
+ PINMUX_IRQ(irq_pin(7), 150),
+ PINMUX_IRQ(irq_pin(12), 156),
+ PINMUX_IRQ(irq_pin(4), 159),
+ PINMUX_IRQ(irq_pin(25), 164),
+ PINMUX_IRQ(irq_pin(8), 223),
+ PINMUX_IRQ(irq_pin(3), 224),
+ PINMUX_IRQ(irq_pin(5), 227),
+ PINMUX_IRQ(irq_pin(17), 234),
+ PINMUX_IRQ(irq_pin(11), 238),
+ PINMUX_IRQ(irq_pin(13), 239),
+ PINMUX_IRQ(irq_pin(16), 249),
+ PINMUX_IRQ(irq_pin(14), 251),
+ PINMUX_IRQ(irq_pin(9), 308),
};
/* -----------------------------------------------------------------------------
@@ -3785,6 +3767,7 @@ static const struct regulator_desc sh73a0_vccq_mc0_desc = {
static struct regulator_consumer_supply sh73a0_vccq_mc0_consumers[] = {
REGULATOR_SUPPLY("vqmmc", "sh_mobile_sdhi.0"),
+ REGULATOR_SUPPLY("vqmmc", "ee100000.sdhi"),
};
static const struct regulator_init_data sh73a0_vccq_mc0_init_data = {
@@ -3904,8 +3887,6 @@ const struct sh_pfc_soc_info sh73a0_pinmux_info = {
.pins = pinmux_pins,
.nr_pins = ARRAY_SIZE(pinmux_pins),
- .ranges = pinmux_ranges,
- .nr_ranges = ARRAY_SIZE(pinmux_ranges),
.groups = pinmux_groups,
.nr_groups = ARRAY_SIZE(pinmux_groups),
.functions = pinmux_functions,
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7720.c b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
index 52e9f6be665..7a26809eda1 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7720.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7720.c
@@ -81,36 +81,6 @@ enum {
PTV4_IN, PTV3_IN, PTV2_IN, PTV1_IN, PTV0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
- PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
- PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
- PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
- PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
- PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
- PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
- PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
- PTE4_IN_PU, PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
- PTF0_IN_PU,
- PTG6_IN_PU, PTG5_IN_PU, PTG4_IN_PU,
- PTG3_IN_PU, PTG2_IN_PU, PTG1_IN_PU, PTG0_IN_PU,
- PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
- PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
- PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
- PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
- PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
- PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU, PTL3_IN_PU,
- PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
- PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
- PTP4_IN_PU, PTP3_IN_PU, PTP2_IN_PU, PTP1_IN_PU, PTP0_IN_PU,
- PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
- PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
- PTS4_IN_PU, PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
- PTT4_IN_PU, PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
- PTU4_IN_PU, PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
- PTV4_IN_PU, PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
@@ -262,55 +232,55 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* PTA GPIO */
- PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
- PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
- PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
- PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
- PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
- PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
- PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
- PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
/* PTB GPIO */
- PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
- PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
- PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
- PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
- PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
- PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
- PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
- PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
/* PTC GPIO */
- PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
- PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
- PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
- PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
- PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
- PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
- PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
- PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
/* PTD GPIO */
- PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
- PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
- PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
- PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
- PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
- PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
- PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
- PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
/* PTE GPIO */
PINMUX_DATA(PTE6_DATA, PTE6_IN),
PINMUX_DATA(PTE5_DATA, PTE5_IN),
- PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
- PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
- PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
- PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
- PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
/* PTF GPIO */
PINMUX_DATA(PTF6_DATA, PTF6_IN),
@@ -319,102 +289,102 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTF3_DATA, PTF3_IN),
PINMUX_DATA(PTF2_DATA, PTF2_IN),
PINMUX_DATA(PTF1_DATA, PTF1_IN),
- PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
/* PTG GPIO */
- PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT, PTG6_IN_PU),
- PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT, PTG5_IN_PU),
- PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT, PTG4_IN_PU),
- PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT, PTG3_IN_PU),
- PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT, PTG2_IN_PU),
- PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT, PTG1_IN_PU),
- PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT, PTG0_IN_PU),
+ PINMUX_DATA(PTG6_DATA, PTG6_IN, PTG6_OUT),
+ PINMUX_DATA(PTG5_DATA, PTG5_IN, PTG5_OUT),
+ PINMUX_DATA(PTG4_DATA, PTG4_IN, PTG4_OUT),
+ PINMUX_DATA(PTG3_DATA, PTG3_IN, PTG3_OUT),
+ PINMUX_DATA(PTG2_DATA, PTG2_IN, PTG2_OUT),
+ PINMUX_DATA(PTG1_DATA, PTG1_IN, PTG1_OUT),
+ PINMUX_DATA(PTG0_DATA, PTG0_IN, PTG0_OUT),
/* PTH GPIO */
- PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
- PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
- PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
- PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
- PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
- PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
- PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
/* PTJ GPIO */
- PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT, PTJ6_IN_PU),
- PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT, PTJ5_IN_PU),
- PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT, PTJ4_IN_PU),
- PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
- PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
- PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
- PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+ PINMUX_DATA(PTJ6_DATA, PTJ6_IN, PTJ6_OUT),
+ PINMUX_DATA(PTJ5_DATA, PTJ5_IN, PTJ5_OUT),
+ PINMUX_DATA(PTJ4_DATA, PTJ4_IN, PTJ4_OUT),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
/* PTK GPIO */
- PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
- PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
- PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
- PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
/* PTL GPIO */
- PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
- PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
- PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
- PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
- PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
/* PTM GPIO */
- PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
- PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
- PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
- PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
- PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
- PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
- PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
- PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
/* PTP GPIO */
- PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT, PTP4_IN_PU),
- PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT, PTP3_IN_PU),
- PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT, PTP2_IN_PU),
- PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT, PTP1_IN_PU),
- PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT, PTP0_IN_PU),
+ PINMUX_DATA(PTP4_DATA, PTP4_IN, PTP4_OUT),
+ PINMUX_DATA(PTP3_DATA, PTP3_IN, PTP3_OUT),
+ PINMUX_DATA(PTP2_DATA, PTP2_IN, PTP2_OUT),
+ PINMUX_DATA(PTP1_DATA, PTP1_IN, PTP1_OUT),
+ PINMUX_DATA(PTP0_DATA, PTP0_IN, PTP0_OUT),
/* PTR GPIO */
- PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
- PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
- PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
- PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
- PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT, PTR3_IN_PU),
- PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT, PTR2_IN_PU),
- PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
- PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_OUT),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_OUT),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
/* PTS GPIO */
- PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
- PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
- PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
- PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
- PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
/* PTT GPIO */
- PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
- PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
- PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
- PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
- PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
/* PTU GPIO */
- PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
- PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
- PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
- PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
- PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
/* PTV GPIO */
- PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
- PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
- PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
- PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
- PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
/* PTA FN */
PINMUX_DATA(D23_MARK, PTA7_FN),
@@ -608,157 +578,157 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
- PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
- PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
- PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
- PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
- PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
- PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
- PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
- PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+ PINMUX_GPIO(PTA7),
+ PINMUX_GPIO(PTA6),
+ PINMUX_GPIO(PTA5),
+ PINMUX_GPIO(PTA4),
+ PINMUX_GPIO(PTA3),
+ PINMUX_GPIO(PTA2),
+ PINMUX_GPIO(PTA1),
+ PINMUX_GPIO(PTA0),
/* PTB */
- PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
- PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
- PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
- PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
- PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
- PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
- PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
- PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+ PINMUX_GPIO(PTB7),
+ PINMUX_GPIO(PTB6),
+ PINMUX_GPIO(PTB5),
+ PINMUX_GPIO(PTB4),
+ PINMUX_GPIO(PTB3),
+ PINMUX_GPIO(PTB2),
+ PINMUX_GPIO(PTB1),
+ PINMUX_GPIO(PTB0),
/* PTC */
- PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
- PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
- PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
- PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
- PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
- PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
- PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
- PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+ PINMUX_GPIO(PTC7),
+ PINMUX_GPIO(PTC6),
+ PINMUX_GPIO(PTC5),
+ PINMUX_GPIO(PTC4),
+ PINMUX_GPIO(PTC3),
+ PINMUX_GPIO(PTC2),
+ PINMUX_GPIO(PTC1),
+ PINMUX_GPIO(PTC0),
/* PTD */
- PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
- PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
- PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
- PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
- PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
- PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
- PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
- PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+ PINMUX_GPIO(PTD7),
+ PINMUX_GPIO(PTD6),
+ PINMUX_GPIO(PTD5),
+ PINMUX_GPIO(PTD4),
+ PINMUX_GPIO(PTD3),
+ PINMUX_GPIO(PTD2),
+ PINMUX_GPIO(PTD1),
+ PINMUX_GPIO(PTD0),
/* PTE */
- PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
- PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
- PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
- PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
- PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
- PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
- PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+ PINMUX_GPIO(PTE6),
+ PINMUX_GPIO(PTE5),
+ PINMUX_GPIO(PTE4),
+ PINMUX_GPIO(PTE3),
+ PINMUX_GPIO(PTE2),
+ PINMUX_GPIO(PTE1),
+ PINMUX_GPIO(PTE0),
/* PTF */
- PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
- PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
- PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
- PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
- PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
- PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
- PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+ PINMUX_GPIO(PTF6),
+ PINMUX_GPIO(PTF5),
+ PINMUX_GPIO(PTF4),
+ PINMUX_GPIO(PTF3),
+ PINMUX_GPIO(PTF2),
+ PINMUX_GPIO(PTF1),
+ PINMUX_GPIO(PTF0),
/* PTG */
- PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
- PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
- PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
- PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
- PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
- PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
- PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+ PINMUX_GPIO(PTG6),
+ PINMUX_GPIO(PTG5),
+ PINMUX_GPIO(PTG4),
+ PINMUX_GPIO(PTG3),
+ PINMUX_GPIO(PTG2),
+ PINMUX_GPIO(PTG1),
+ PINMUX_GPIO(PTG0),
/* PTH */
- PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
- PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
- PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
- PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
- PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
- PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
- PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+ PINMUX_GPIO(PTH6),
+ PINMUX_GPIO(PTH5),
+ PINMUX_GPIO(PTH4),
+ PINMUX_GPIO(PTH3),
+ PINMUX_GPIO(PTH2),
+ PINMUX_GPIO(PTH1),
+ PINMUX_GPIO(PTH0),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
- PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
- PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
- PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
- PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
- PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
- PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+ PINMUX_GPIO(PTJ6),
+ PINMUX_GPIO(PTJ5),
+ PINMUX_GPIO(PTJ4),
+ PINMUX_GPIO(PTJ3),
+ PINMUX_GPIO(PTJ2),
+ PINMUX_GPIO(PTJ1),
+ PINMUX_GPIO(PTJ0),
/* PTK */
- PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
- PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
- PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
- PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+ PINMUX_GPIO(PTK3),
+ PINMUX_GPIO(PTK2),
+ PINMUX_GPIO(PTK1),
+ PINMUX_GPIO(PTK0),
/* PTL */
- PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
- PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
- PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
- PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
- PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
+ PINMUX_GPIO(PTL7),
+ PINMUX_GPIO(PTL6),
+ PINMUX_GPIO(PTL5),
+ PINMUX_GPIO(PTL4),
+ PINMUX_GPIO(PTL3),
/* PTM */
- PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
- PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
- PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
- PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
- PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
- PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
- PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
- PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+ PINMUX_GPIO(PTM7),
+ PINMUX_GPIO(PTM6),
+ PINMUX_GPIO(PTM5),
+ PINMUX_GPIO(PTM4),
+ PINMUX_GPIO(PTM3),
+ PINMUX_GPIO(PTM2),
+ PINMUX_GPIO(PTM1),
+ PINMUX_GPIO(PTM0),
/* PTP */
- PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
- PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
- PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
- PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
- PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+ PINMUX_GPIO(PTP4),
+ PINMUX_GPIO(PTP3),
+ PINMUX_GPIO(PTP2),
+ PINMUX_GPIO(PTP1),
+ PINMUX_GPIO(PTP0),
/* PTR */
- PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
- PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
- PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
- PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
- PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
- PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
- PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
- PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+ PINMUX_GPIO(PTR7),
+ PINMUX_GPIO(PTR6),
+ PINMUX_GPIO(PTR5),
+ PINMUX_GPIO(PTR4),
+ PINMUX_GPIO(PTR3),
+ PINMUX_GPIO(PTR2),
+ PINMUX_GPIO(PTR1),
+ PINMUX_GPIO(PTR0),
/* PTS */
- PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
- PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
- PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
- PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
- PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+ PINMUX_GPIO(PTS4),
+ PINMUX_GPIO(PTS3),
+ PINMUX_GPIO(PTS2),
+ PINMUX_GPIO(PTS1),
+ PINMUX_GPIO(PTS0),
/* PTT */
- PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
- PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
- PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
- PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
- PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+ PINMUX_GPIO(PTT4),
+ PINMUX_GPIO(PTT3),
+ PINMUX_GPIO(PTT2),
+ PINMUX_GPIO(PTT1),
+ PINMUX_GPIO(PTT0),
/* PTU */
- PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
- PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
- PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
- PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
- PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+ PINMUX_GPIO(PTU4),
+ PINMUX_GPIO(PTU3),
+ PINMUX_GPIO(PTU2),
+ PINMUX_GPIO(PTU1),
+ PINMUX_GPIO(PTU0),
/* PTV */
- PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
- PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
- PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
- PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
- PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+ PINMUX_GPIO(PTV4),
+ PINMUX_GPIO(PTV3),
+ PINMUX_GPIO(PTV2),
+ PINMUX_GPIO(PTV1),
+ PINMUX_GPIO(PTV0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -959,54 +929,54 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
- PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
- PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
- PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
- PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
- PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
- PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
- PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
- PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+ PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+ PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+ PTA4_FN, PTA4_OUT, 0, PTA4_IN,
+ PTA3_FN, PTA3_OUT, 0, PTA3_IN,
+ PTA2_FN, PTA2_OUT, 0, PTA2_IN,
+ PTA1_FN, PTA1_OUT, 0, PTA1_IN,
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
- PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
- PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
- PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
- PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
- PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
- PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
- PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
- PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+ PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+ PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+ PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+ PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+ PTB2_FN, PTB2_OUT, 0, PTB2_IN,
+ PTB1_FN, PTB1_OUT, 0, PTB1_IN,
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
- PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
- PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
- PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
- PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
- PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
- PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
- PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
- PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+ PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+ PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+ PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+ PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+ PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+ PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN }
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
- PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
- PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
- PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
- PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
- PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
- PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
- PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
- PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+ PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+ PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+ PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+ PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+ PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+ PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN }
},
{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
0, 0, 0, 0,
PTE6_FN, 0, 0, PTE6_IN,
PTE5_FN, 0, 0, PTE5_IN,
- PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
- PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
- PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
- PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
- PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+ PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+ PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+ PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN }
},
{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
0, 0, 0, 0,
@@ -1020,123 +990,123 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
0, 0, 0, 0,
- PTG6_FN, PTG6_OUT, PTG6_IN_PU, PTG6_IN,
- PTG5_FN, PTG5_OUT, PTG5_IN_PU, PTG5_IN,
- PTG4_FN, PTG4_OUT, PTG4_IN_PU, PTG4_IN,
- PTG3_FN, PTG3_OUT, PTG3_IN_PU, PTG3_IN,
- PTG2_FN, PTG2_OUT, PTG2_IN_PU, PTG2_IN,
- PTG1_FN, PTG1_OUT, PTG1_IN_PU, PTG1_IN,
- PTG0_FN, PTG0_OUT, PTG0_IN_PU, PTG0_IN }
+ PTG6_FN, PTG6_OUT, 0, PTG6_IN,
+ PTG5_FN, PTG5_OUT, 0, PTG5_IN,
+ PTG4_FN, PTG4_OUT, 0, PTG4_IN,
+ PTG3_FN, PTG3_OUT, 0, PTG3_IN,
+ PTG2_FN, PTG2_OUT, 0, PTG2_IN,
+ PTG1_FN, PTG1_OUT, 0, PTG1_IN,
+ PTG0_FN, PTG0_OUT, 0, PTG0_IN }
},
{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
0, 0, 0, 0,
- PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
- PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
- PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
- PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
- PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
- PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
- PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+ PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+ PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+ PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+ PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+ PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN }
},
{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
0, 0, 0, 0,
- PTJ6_FN, PTJ6_OUT, PTJ6_IN_PU, PTJ6_IN,
- PTJ5_FN, PTJ5_OUT, PTJ5_IN_PU, PTJ5_IN,
- PTJ4_FN, PTJ4_OUT, PTJ4_IN_PU, PTJ4_IN,
- PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
- PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
- PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
- PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ PTJ6_FN, PTJ6_OUT, 0, PTJ6_IN,
+ PTJ5_FN, PTJ5_OUT, 0, PTJ5_IN,
+ PTJ4_FN, PTJ4_OUT, 0, PTJ4_IN,
+ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
},
{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
- PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
- PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
- PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+ PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+ PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN }
},
{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
- PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
- PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
- PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
- PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
- PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
+ PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+ PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+ PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+ PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+ PTL3_FN, PTL3_OUT, 0, PTL3_IN,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
- PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
- PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
- PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
- PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
- PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
- PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
- PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
- PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+ PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+ PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+ PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+ PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+ PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+ PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN }
},
{ PINMUX_CFG_REG("PPCR", 0xa4050118, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTP4_FN, PTP4_OUT, PTP4_IN_PU, PTP4_IN,
- PTP3_FN, PTP3_OUT, PTP3_IN_PU, PTP3_IN,
- PTP2_FN, PTP2_OUT, PTP2_IN_PU, PTP2_IN,
- PTP1_FN, PTP1_OUT, PTP1_IN_PU, PTP1_IN,
- PTP0_FN, PTP0_OUT, PTP0_IN_PU, PTP0_IN }
+ PTP4_FN, PTP4_OUT, 0, PTP4_IN,
+ PTP3_FN, PTP3_OUT, 0, PTP3_IN,
+ PTP2_FN, PTP2_OUT, 0, PTP2_IN,
+ PTP1_FN, PTP1_OUT, 0, PTP1_IN,
+ PTP0_FN, PTP0_OUT, 0, PTP0_IN }
},
{ PINMUX_CFG_REG("PRCR", 0xa405011a, 16, 2) {
- PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
- PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
- PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
- PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
- PTR3_FN, PTR3_OUT, PTR3_IN_PU, PTR3_IN,
- PTR2_FN, PTR2_OUT, PTR2_IN_PU, PTR2_IN,
- PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
- PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+ PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+ PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+ PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+ PTR3_FN, PTR3_OUT, 0, PTR3_IN,
+ PTR2_FN, PTR2_OUT, 0, PTR2_IN,
+ PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN }
},
{ PINMUX_CFG_REG("PSCR", 0xa405011c, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
- PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
- PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
- PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
- PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+ PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+ PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+ PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN }
},
{ PINMUX_CFG_REG("PTCR", 0xa405011e, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
- PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
- PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
- PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
- PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+ PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+ PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+ PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN }
},
{ PINMUX_CFG_REG("PUCR", 0xa4050120, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
- PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
- PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
- PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
- PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+ PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+ PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+ PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN }
},
{ PINMUX_CFG_REG("PVCR", 0xa4050122, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
- PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
- PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
- PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
- PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+ PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+ PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+ PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN }
},
{}
};
@@ -1220,7 +1190,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7720_pinmux_info = {
.name = "sh7720_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7722.c b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
index 32034387477..add309347b0 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7722.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7722.c
@@ -77,39 +77,6 @@ enum {
PTZ5_IN, PTZ4_IN, PTZ3_IN, PTZ2_IN, PTZ1_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLDOWN_BEGIN,
- PTA7_IN_PD, PTA6_IN_PD, PTA5_IN_PD, PTA4_IN_PD,
- PTA3_IN_PD, PTA2_IN_PD, PTA1_IN_PD, PTA0_IN_PD,
- PTE7_IN_PD, PTE6_IN_PD, PTE5_IN_PD, PTE4_IN_PD, PTE1_IN_PD, PTE0_IN_PD,
- PTF6_IN_PD, PTF5_IN_PD, PTF4_IN_PD, PTF3_IN_PD, PTF2_IN_PD, PTF1_IN_PD,
- PTH6_IN_PD, PTH5_IN_PD, PTH1_IN_PD, PTH0_IN_PD,
- PTK6_IN_PD, PTK5_IN_PD, PTK4_IN_PD, PTK3_IN_PD, PTK2_IN_PD, PTK0_IN_PD,
- PTL7_IN_PD, PTL6_IN_PD, PTL5_IN_PD, PTL4_IN_PD,
- PTL3_IN_PD, PTL2_IN_PD, PTL1_IN_PD, PTL0_IN_PD,
- PTM7_IN_PD, PTM6_IN_PD, PTM5_IN_PD, PTM4_IN_PD,
- PTM3_IN_PD, PTM2_IN_PD, PTM1_IN_PD, PTM0_IN_PD,
- PTQ5_IN_PD, PTQ4_IN_PD, PTQ3_IN_PD, PTQ2_IN_PD,
- PTS4_IN_PD, PTS2_IN_PD, PTS1_IN_PD,
- PTT4_IN_PD, PTT3_IN_PD, PTT2_IN_PD, PTT1_IN_PD,
- PTU4_IN_PD, PTU3_IN_PD, PTU2_IN_PD, PTU1_IN_PD, PTU0_IN_PD,
- PTV4_IN_PD, PTV3_IN_PD, PTV2_IN_PD, PTV1_IN_PD, PTV0_IN_PD,
- PTW6_IN_PD, PTW4_IN_PD, PTW3_IN_PD, PTW2_IN_PD, PTW1_IN_PD, PTW0_IN_PD,
- PTX6_IN_PD, PTX5_IN_PD, PTX4_IN_PD,
- PTX3_IN_PD, PTX2_IN_PD, PTX1_IN_PD, PTX0_IN_PD,
- PINMUX_INPUT_PULLDOWN_END,
-
- PINMUX_INPUT_PULLUP_BEGIN,
- PTC7_IN_PU, PTC5_IN_PU,
- PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
- PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU,
- PTJ1_IN_PU, PTJ0_IN_PU,
- PTQ0_IN_PU,
- PTR2_IN_PU,
- PTX6_IN_PU,
- PTY5_IN_PU, PTY4_IN_PU, PTY3_IN_PU, PTY2_IN_PU, PTY0_IN_PU,
- PTZ5_IN_PU, PTZ4_IN_PU, PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA5_OUT,
PTB7_OUT, PTB6_OUT, PTB5_OUT, PTB4_OUT,
@@ -296,16 +263,16 @@ enum {
PINMUX_FUNCTION_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* PTA */
- PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_IN_PD, PTA7_OUT),
- PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_IN_PD),
- PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_IN_PD, PTA5_OUT),
- PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_IN_PD),
- PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_IN_PD),
- PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_IN_PD),
- PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_IN_PD),
- PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_IN_PD),
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN),
/* PTB */
PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
@@ -318,38 +285,38 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
/* PTC */
- PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_IN_PU),
- PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_IN_PU),
+ PINMUX_DATA(PTC7_DATA, PTC7_IN),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN),
PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
/* PTD */
- PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_IN_PU),
- PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN, PTD6_IN_PU),
- PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN, PTD5_IN_PU),
- PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN, PTD4_IN_PU),
- PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN, PTD3_IN_PU),
- PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN, PTD2_IN_PU),
- PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN, PTD1_IN_PU),
+ PINMUX_DATA(PTD7_DATA, PTD7_IN),
+ PINMUX_DATA(PTD6_DATA, PTD6_OUT, PTD6_IN),
+ PINMUX_DATA(PTD5_DATA, PTD5_OUT, PTD5_IN),
+ PINMUX_DATA(PTD4_DATA, PTD4_OUT, PTD4_IN),
+ PINMUX_DATA(PTD3_DATA, PTD3_OUT, PTD3_IN),
+ PINMUX_DATA(PTD2_DATA, PTD2_OUT, PTD2_IN),
+ PINMUX_DATA(PTD1_DATA, PTD1_OUT, PTD1_IN),
PINMUX_DATA(PTD0_DATA, PTD0_OUT),
/* PTE */
- PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN, PTE7_IN_PD),
- PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN, PTE6_IN_PD),
- PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN, PTE5_IN_PD),
- PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN, PTE4_IN_PD),
- PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN, PTE1_IN_PD),
- PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN, PTE0_IN_PD),
+ PINMUX_DATA(PTE7_DATA, PTE7_OUT, PTE7_IN),
+ PINMUX_DATA(PTE6_DATA, PTE6_OUT, PTE6_IN),
+ PINMUX_DATA(PTE5_DATA, PTE5_OUT, PTE5_IN),
+ PINMUX_DATA(PTE4_DATA, PTE4_OUT, PTE4_IN),
+ PINMUX_DATA(PTE1_DATA, PTE1_OUT, PTE1_IN),
+ PINMUX_DATA(PTE0_DATA, PTE0_OUT, PTE0_IN),
/* PTF */
- PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN, PTF6_IN_PD),
- PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN, PTF5_IN_PD),
- PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN, PTF4_IN_PD),
- PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN, PTF3_IN_PD),
- PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN, PTF2_IN_PD),
- PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_IN_PD),
+ PINMUX_DATA(PTF6_DATA, PTF6_OUT, PTF6_IN),
+ PINMUX_DATA(PTF5_DATA, PTF5_OUT, PTF5_IN),
+ PINMUX_DATA(PTF4_DATA, PTF4_OUT, PTF4_IN),
+ PINMUX_DATA(PTF3_DATA, PTF3_OUT, PTF3_IN),
+ PINMUX_DATA(PTF2_DATA, PTF2_OUT, PTF2_IN),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN),
PINMUX_DATA(PTF0_DATA, PTF0_OUT),
/* PTG */
@@ -361,49 +328,49 @@ static const pinmux_enum_t pinmux_data[] = {
/* PTH */
PINMUX_DATA(PTH7_DATA, PTH7_OUT),
- PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN, PTH6_IN_PD),
- PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN, PTH5_IN_PD),
+ PINMUX_DATA(PTH6_DATA, PTH6_OUT, PTH6_IN),
+ PINMUX_DATA(PTH5_DATA, PTH5_OUT, PTH5_IN),
PINMUX_DATA(PTH4_DATA, PTH4_OUT),
PINMUX_DATA(PTH3_DATA, PTH3_OUT),
PINMUX_DATA(PTH2_DATA, PTH2_OUT),
- PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN, PTH1_IN_PD),
- PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN, PTH0_IN_PD),
+ PINMUX_DATA(PTH1_DATA, PTH1_OUT, PTH1_IN),
+ PINMUX_DATA(PTH0_DATA, PTH0_OUT, PTH0_IN),
/* PTJ */
PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
- PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU),
- PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_OUT, PTJ1_IN),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_OUT, PTJ0_IN),
/* PTK */
- PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN, PTK6_IN_PD),
- PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN, PTK5_IN_PD),
- PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN, PTK4_IN_PD),
- PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN, PTK3_IN_PD),
- PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_IN_PD),
+ PINMUX_DATA(PTK6_DATA, PTK6_OUT, PTK6_IN),
+ PINMUX_DATA(PTK5_DATA, PTK5_OUT, PTK5_IN),
+ PINMUX_DATA(PTK4_DATA, PTK4_OUT, PTK4_IN),
+ PINMUX_DATA(PTK3_DATA, PTK3_OUT, PTK3_IN),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN),
PINMUX_DATA(PTK1_DATA, PTK1_OUT),
- PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN, PTK0_IN_PD),
+ PINMUX_DATA(PTK0_DATA, PTK0_OUT, PTK0_IN),
/* PTL */
- PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN, PTL7_IN_PD),
- PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN, PTL6_IN_PD),
- PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN, PTL5_IN_PD),
- PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN, PTL4_IN_PD),
- PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN, PTL3_IN_PD),
- PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN, PTL2_IN_PD),
- PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN, PTL1_IN_PD),
- PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN, PTL0_IN_PD),
+ PINMUX_DATA(PTL7_DATA, PTL7_OUT, PTL7_IN),
+ PINMUX_DATA(PTL6_DATA, PTL6_OUT, PTL6_IN),
+ PINMUX_DATA(PTL5_DATA, PTL5_OUT, PTL5_IN),
+ PINMUX_DATA(PTL4_DATA, PTL4_OUT, PTL4_IN),
+ PINMUX_DATA(PTL3_DATA, PTL3_OUT, PTL3_IN),
+ PINMUX_DATA(PTL2_DATA, PTL2_OUT, PTL2_IN),
+ PINMUX_DATA(PTL1_DATA, PTL1_OUT, PTL1_IN),
+ PINMUX_DATA(PTL0_DATA, PTL0_OUT, PTL0_IN),
/* PTM */
- PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN, PTM7_IN_PD),
- PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN, PTM6_IN_PD),
- PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN, PTM5_IN_PD),
- PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN, PTM4_IN_PD),
- PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN, PTM3_IN_PD),
- PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN, PTM2_IN_PD),
- PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN, PTM1_IN_PD),
- PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN, PTM0_IN_PD),
+ PINMUX_DATA(PTM7_DATA, PTM7_OUT, PTM7_IN),
+ PINMUX_DATA(PTM6_DATA, PTM6_OUT, PTM6_IN),
+ PINMUX_DATA(PTM5_DATA, PTM5_OUT, PTM5_IN),
+ PINMUX_DATA(PTM4_DATA, PTM4_OUT, PTM4_IN),
+ PINMUX_DATA(PTM3_DATA, PTM3_OUT, PTM3_IN),
+ PINMUX_DATA(PTM2_DATA, PTM2_OUT, PTM2_IN),
+ PINMUX_DATA(PTM1_DATA, PTM1_OUT, PTM1_IN),
+ PINMUX_DATA(PTM0_DATA, PTM0_OUT, PTM0_IN),
/* PTN */
PINMUX_DATA(PTN7_DATA, PTN7_OUT, PTN7_IN),
@@ -417,80 +384,80 @@ static const pinmux_enum_t pinmux_data[] = {
/* PTQ */
PINMUX_DATA(PTQ6_DATA, PTQ6_OUT),
- PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN, PTQ5_IN_PD),
- PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN, PTQ4_IN_PD),
- PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN, PTQ3_IN_PD),
- PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_IN_PD),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_OUT, PTQ5_IN),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_OUT, PTQ4_IN),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_OUT, PTQ3_IN),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN),
PINMUX_DATA(PTQ1_DATA, PTQ1_OUT),
- PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN, PTQ0_IN_PU),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_OUT, PTQ0_IN),
/* PTR */
PINMUX_DATA(PTR4_DATA, PTR4_OUT),
PINMUX_DATA(PTR3_DATA, PTR3_OUT),
- PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN),
PINMUX_DATA(PTR1_DATA, PTR1_OUT),
PINMUX_DATA(PTR0_DATA, PTR0_OUT),
/* PTS */
- PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_IN_PD),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN),
PINMUX_DATA(PTS3_DATA, PTS3_OUT),
- PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN, PTS2_IN_PD),
- PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_IN_PD),
+ PINMUX_DATA(PTS2_DATA, PTS2_OUT, PTS2_IN),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN),
PINMUX_DATA(PTS0_DATA, PTS0_OUT),
/* PTT */
- PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN, PTT4_IN_PD),
- PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN, PTT3_IN_PD),
- PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN, PTT2_IN_PD),
- PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_IN_PD),
+ PINMUX_DATA(PTT4_DATA, PTT4_OUT, PTT4_IN),
+ PINMUX_DATA(PTT3_DATA, PTT3_OUT, PTT3_IN),
+ PINMUX_DATA(PTT2_DATA, PTT2_OUT, PTT2_IN),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN),
PINMUX_DATA(PTT0_DATA, PTT0_OUT),
/* PTU */
- PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN, PTU4_IN_PD),
- PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN, PTU3_IN_PD),
- PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN, PTU2_IN_PD),
- PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_IN_PD),
- PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN, PTU0_IN_PD),
+ PINMUX_DATA(PTU4_DATA, PTU4_OUT, PTU4_IN),
+ PINMUX_DATA(PTU3_DATA, PTU3_OUT, PTU3_IN),
+ PINMUX_DATA(PTU2_DATA, PTU2_OUT, PTU2_IN),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN),
+ PINMUX_DATA(PTU0_DATA, PTU0_OUT, PTU0_IN),
/* PTV */
- PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN, PTV4_IN_PD),
- PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN, PTV3_IN_PD),
- PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN, PTV2_IN_PD),
- PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN, PTV1_IN_PD),
- PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN, PTV0_IN_PD),
+ PINMUX_DATA(PTV4_DATA, PTV4_OUT, PTV4_IN),
+ PINMUX_DATA(PTV3_DATA, PTV3_OUT, PTV3_IN),
+ PINMUX_DATA(PTV2_DATA, PTV2_OUT, PTV2_IN),
+ PINMUX_DATA(PTV1_DATA, PTV1_OUT, PTV1_IN),
+ PINMUX_DATA(PTV0_DATA, PTV0_OUT, PTV0_IN),
/* PTW */
- PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_IN_PD),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN),
PINMUX_DATA(PTW5_DATA, PTW5_OUT),
- PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN, PTW4_IN_PD),
- PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN, PTW3_IN_PD),
- PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN, PTW2_IN_PD),
- PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN, PTW1_IN_PD),
- PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN, PTW0_IN_PD),
+ PINMUX_DATA(PTW4_DATA, PTW4_OUT, PTW4_IN),
+ PINMUX_DATA(PTW3_DATA, PTW3_OUT, PTW3_IN),
+ PINMUX_DATA(PTW2_DATA, PTW2_OUT, PTW2_IN),
+ PINMUX_DATA(PTW1_DATA, PTW1_OUT, PTW1_IN),
+ PINMUX_DATA(PTW0_DATA, PTW0_OUT, PTW0_IN),
/* PTX */
- PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN, PTX6_IN_PD),
- PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN, PTX5_IN_PD),
- PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN, PTX4_IN_PD),
- PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN, PTX3_IN_PD),
- PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN, PTX2_IN_PD),
- PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN, PTX1_IN_PD),
- PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN, PTX0_IN_PD),
+ PINMUX_DATA(PTX6_DATA, PTX6_OUT, PTX6_IN),
+ PINMUX_DATA(PTX5_DATA, PTX5_OUT, PTX5_IN),
+ PINMUX_DATA(PTX4_DATA, PTX4_OUT, PTX4_IN),
+ PINMUX_DATA(PTX3_DATA, PTX3_OUT, PTX3_IN),
+ PINMUX_DATA(PTX2_DATA, PTX2_OUT, PTX2_IN),
+ PINMUX_DATA(PTX1_DATA, PTX1_OUT, PTX1_IN),
+ PINMUX_DATA(PTX0_DATA, PTX0_OUT, PTX0_IN),
/* PTY */
- PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN, PTY5_IN_PU),
- PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN, PTY4_IN_PU),
- PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN, PTY3_IN_PU),
- PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN, PTY2_IN_PU),
+ PINMUX_DATA(PTY5_DATA, PTY5_OUT, PTY5_IN),
+ PINMUX_DATA(PTY4_DATA, PTY4_OUT, PTY4_IN),
+ PINMUX_DATA(PTY3_DATA, PTY3_OUT, PTY3_IN),
+ PINMUX_DATA(PTY2_DATA, PTY2_OUT, PTY2_IN),
PINMUX_DATA(PTY1_DATA, PTY1_OUT),
- PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN, PTY0_IN_PU),
+ PINMUX_DATA(PTY0_DATA, PTY0_OUT, PTY0_IN),
/* PTZ */
- PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_IN_PU),
- PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_IN_PU),
- PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_IN_PU),
- PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_IN_PU),
- PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_IN_PU),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN),
/* SCIF0 */
PINMUX_DATA(SCIF0_TXD_MARK, SCIF0_TXD),
@@ -789,199 +756,199 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
- PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
- PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
- PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
- PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
- PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
- PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
- PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
- PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+ PINMUX_GPIO(PTA7),
+ PINMUX_GPIO(PTA6),
+ PINMUX_GPIO(PTA5),
+ PINMUX_GPIO(PTA4),
+ PINMUX_GPIO(PTA3),
+ PINMUX_GPIO(PTA2),
+ PINMUX_GPIO(PTA1),
+ PINMUX_GPIO(PTA0),
/* PTB */
- PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
- PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
- PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
- PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
- PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
- PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
- PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
- PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+ PINMUX_GPIO(PTB7),
+ PINMUX_GPIO(PTB6),
+ PINMUX_GPIO(PTB5),
+ PINMUX_GPIO(PTB4),
+ PINMUX_GPIO(PTB3),
+ PINMUX_GPIO(PTB2),
+ PINMUX_GPIO(PTB1),
+ PINMUX_GPIO(PTB0),
/* PTC */
- PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
- PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
- PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
- PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
- PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
- PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+ PINMUX_GPIO(PTC7),
+ PINMUX_GPIO(PTC5),
+ PINMUX_GPIO(PTC4),
+ PINMUX_GPIO(PTC3),
+ PINMUX_GPIO(PTC2),
+ PINMUX_GPIO(PTC0),
/* PTD */
- PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
- PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
- PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
- PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
- PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
- PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
- PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
- PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+ PINMUX_GPIO(PTD7),
+ PINMUX_GPIO(PTD6),
+ PINMUX_GPIO(PTD5),
+ PINMUX_GPIO(PTD4),
+ PINMUX_GPIO(PTD3),
+ PINMUX_GPIO(PTD2),
+ PINMUX_GPIO(PTD1),
+ PINMUX_GPIO(PTD0),
/* PTE */
- PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
- PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
- PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
- PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
- PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
- PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+ PINMUX_GPIO(PTE7),
+ PINMUX_GPIO(PTE6),
+ PINMUX_GPIO(PTE5),
+ PINMUX_GPIO(PTE4),
+ PINMUX_GPIO(PTE1),
+ PINMUX_GPIO(PTE0),
/* PTF */
- PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
- PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
- PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
- PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
- PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
- PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
- PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+ PINMUX_GPIO(PTF6),
+ PINMUX_GPIO(PTF5),
+ PINMUX_GPIO(PTF4),
+ PINMUX_GPIO(PTF3),
+ PINMUX_GPIO(PTF2),
+ PINMUX_GPIO(PTF1),
+ PINMUX_GPIO(PTF0),
/* PTG */
- PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
- PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
- PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
- PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
- PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+ PINMUX_GPIO(PTG4),
+ PINMUX_GPIO(PTG3),
+ PINMUX_GPIO(PTG2),
+ PINMUX_GPIO(PTG1),
+ PINMUX_GPIO(PTG0),
/* PTH */
- PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
- PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
- PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
- PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
- PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
- PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
- PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
- PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+ PINMUX_GPIO(PTH7),
+ PINMUX_GPIO(PTH6),
+ PINMUX_GPIO(PTH5),
+ PINMUX_GPIO(PTH4),
+ PINMUX_GPIO(PTH3),
+ PINMUX_GPIO(PTH2),
+ PINMUX_GPIO(PTH1),
+ PINMUX_GPIO(PTH0),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
- PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
- PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
- PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
- PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+ PINMUX_GPIO(PTJ7),
+ PINMUX_GPIO(PTJ6),
+ PINMUX_GPIO(PTJ5),
+ PINMUX_GPIO(PTJ1),
+ PINMUX_GPIO(PTJ0),
/* PTK */
- PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
- PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
- PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
- PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
- PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
- PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
- PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+ PINMUX_GPIO(PTK6),
+ PINMUX_GPIO(PTK5),
+ PINMUX_GPIO(PTK4),
+ PINMUX_GPIO(PTK3),
+ PINMUX_GPIO(PTK2),
+ PINMUX_GPIO(PTK1),
+ PINMUX_GPIO(PTK0),
/* PTL */
- PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
- PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
- PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
- PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
- PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
- PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
- PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
- PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+ PINMUX_GPIO(PTL7),
+ PINMUX_GPIO(PTL6),
+ PINMUX_GPIO(PTL5),
+ PINMUX_GPIO(PTL4),
+ PINMUX_GPIO(PTL3),
+ PINMUX_GPIO(PTL2),
+ PINMUX_GPIO(PTL1),
+ PINMUX_GPIO(PTL0),
/* PTM */
- PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
- PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
- PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
- PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
- PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
- PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
- PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
- PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+ PINMUX_GPIO(PTM7),
+ PINMUX_GPIO(PTM6),
+ PINMUX_GPIO(PTM5),
+ PINMUX_GPIO(PTM4),
+ PINMUX_GPIO(PTM3),
+ PINMUX_GPIO(PTM2),
+ PINMUX_GPIO(PTM1),
+ PINMUX_GPIO(PTM0),
/* PTN */
- PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
- PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
- PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
- PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
- PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
- PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
- PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
- PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+ PINMUX_GPIO(PTN7),
+ PINMUX_GPIO(PTN6),
+ PINMUX_GPIO(PTN5),
+ PINMUX_GPIO(PTN4),
+ PINMUX_GPIO(PTN3),
+ PINMUX_GPIO(PTN2),
+ PINMUX_GPIO(PTN1),
+ PINMUX_GPIO(PTN0),
/* PTQ */
- PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
- PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
- PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
- PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
- PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
- PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
- PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+ PINMUX_GPIO(PTQ6),
+ PINMUX_GPIO(PTQ5),
+ PINMUX_GPIO(PTQ4),
+ PINMUX_GPIO(PTQ3),
+ PINMUX_GPIO(PTQ2),
+ PINMUX_GPIO(PTQ1),
+ PINMUX_GPIO(PTQ0),
/* PTR */
- PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
- PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
- PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
- PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
- PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+ PINMUX_GPIO(PTR4),
+ PINMUX_GPIO(PTR3),
+ PINMUX_GPIO(PTR2),
+ PINMUX_GPIO(PTR1),
+ PINMUX_GPIO(PTR0),
/* PTS */
- PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
- PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
- PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
- PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
- PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+ PINMUX_GPIO(PTS4),
+ PINMUX_GPIO(PTS3),
+ PINMUX_GPIO(PTS2),
+ PINMUX_GPIO(PTS1),
+ PINMUX_GPIO(PTS0),
/* PTT */
- PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
- PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
- PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
- PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
- PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+ PINMUX_GPIO(PTT4),
+ PINMUX_GPIO(PTT3),
+ PINMUX_GPIO(PTT2),
+ PINMUX_GPIO(PTT1),
+ PINMUX_GPIO(PTT0),
/* PTU */
- PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
- PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
- PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
- PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
- PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+ PINMUX_GPIO(PTU4),
+ PINMUX_GPIO(PTU3),
+ PINMUX_GPIO(PTU2),
+ PINMUX_GPIO(PTU1),
+ PINMUX_GPIO(PTU0),
/* PTV */
- PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
- PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
- PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
- PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
- PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+ PINMUX_GPIO(PTV4),
+ PINMUX_GPIO(PTV3),
+ PINMUX_GPIO(PTV2),
+ PINMUX_GPIO(PTV1),
+ PINMUX_GPIO(PTV0),
/* PTW */
- PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
- PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
- PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
- PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
- PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
- PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
- PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+ PINMUX_GPIO(PTW6),
+ PINMUX_GPIO(PTW5),
+ PINMUX_GPIO(PTW4),
+ PINMUX_GPIO(PTW3),
+ PINMUX_GPIO(PTW2),
+ PINMUX_GPIO(PTW1),
+ PINMUX_GPIO(PTW0),
/* PTX */
- PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
- PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
- PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
- PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
- PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
- PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
- PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+ PINMUX_GPIO(PTX6),
+ PINMUX_GPIO(PTX5),
+ PINMUX_GPIO(PTX4),
+ PINMUX_GPIO(PTX3),
+ PINMUX_GPIO(PTX2),
+ PINMUX_GPIO(PTX1),
+ PINMUX_GPIO(PTX0),
/* PTY */
- PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
- PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
- PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
- PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
- PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
- PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+ PINMUX_GPIO(PTY5),
+ PINMUX_GPIO(PTY4),
+ PINMUX_GPIO(PTY3),
+ PINMUX_GPIO(PTY2),
+ PINMUX_GPIO(PTY1),
+ PINMUX_GPIO(PTY0),
/* PTZ */
- PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
- PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
- PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
- PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
- PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
+ PINMUX_GPIO(PTZ5),
+ PINMUX_GPIO(PTZ4),
+ PINMUX_GPIO(PTZ3),
+ PINMUX_GPIO(PTZ2),
+ PINMUX_GPIO(PTZ1),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -1270,14 +1237,14 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
- VIO_D7_SCIF1_SCK, PTA7_OUT, PTA7_IN_PD, PTA7_IN,
- VIO_D6_SCIF1_RXD, 0, PTA6_IN_PD, PTA6_IN,
- VIO_D5_SCIF1_TXD, PTA5_OUT, PTA5_IN_PD, PTA5_IN,
- VIO_D4, 0, PTA4_IN_PD, PTA4_IN,
- VIO_D3, 0, PTA3_IN_PD, PTA3_IN,
- VIO_D2, 0, PTA2_IN_PD, PTA2_IN,
- VIO_D1, 0, PTA1_IN_PD, PTA1_IN,
- VIO_D0_LCDLCLK, 0, PTA0_IN_PD, PTA0_IN }
+ VIO_D7_SCIF1_SCK, PTA7_OUT, 0, PTA7_IN,
+ VIO_D6_SCIF1_RXD, 0, 0, PTA6_IN,
+ VIO_D5_SCIF1_TXD, PTA5_OUT, 0, PTA5_IN,
+ VIO_D4, 0, 0, PTA4_IN,
+ VIO_D3, 0, 0, PTA3_IN,
+ VIO_D2, 0, 0, PTA2_IN,
+ VIO_D1, 0, 0, PTA1_IN,
+ VIO_D0_LCDLCLK, 0, 0, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
HPD55, PTB7_OUT, 0, PTB7_IN,
@@ -1290,9 +1257,9 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPD48, PTB0_OUT, 0, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
- 0, 0, PTC7_IN_PU, PTC7_IN,
+ 0, 0, 0, PTC7_IN,
0, 0, 0, 0,
- IOIS16, 0, PTC5_IN_PU, PTC5_IN,
+ IOIS16, 0, 0, PTC5_IN,
HPDQM7, PTC4_OUT, 0, PTC4_IN,
HPDQM6, PTC3_OUT, 0, PTC3_IN,
HPDQM5, PTC2_OUT, 0, PTC2_IN,
@@ -1300,33 +1267,33 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
HPDQM4, PTC0_OUT, 0, PTC0_IN }
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
- SDHICD, 0, PTD7_IN_PU, PTD7_IN,
- SDHIWP, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
- SDHID3, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
- IRQ2_SDHID2, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
- SDHID1, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
- SDHID0, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
- SDHICMD, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
+ SDHICD, 0, 0, PTD7_IN,
+ SDHIWP, PTD6_OUT, 0, PTD6_IN,
+ SDHID3, PTD5_OUT, 0, PTD5_IN,
+ IRQ2_SDHID2, PTD4_OUT, 0, PTD4_IN,
+ SDHID1, PTD3_OUT, 0, PTD3_IN,
+ SDHID0, PTD2_OUT, 0, PTD2_IN,
+ SDHICMD, PTD1_OUT, 0, PTD1_IN,
SDHICLK, PTD0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
- A25, PTE7_OUT, PTE7_IN_PD, PTE7_IN,
- A24, PTE6_OUT, PTE6_IN_PD, PTE6_IN,
- A23, PTE5_OUT, PTE5_IN_PD, PTE5_IN,
- A22, PTE4_OUT, PTE4_IN_PD, PTE4_IN,
+ A25, PTE7_OUT, 0, PTE7_IN,
+ A24, PTE6_OUT, 0, PTE6_IN,
+ A23, PTE5_OUT, 0, PTE5_IN,
+ A22, PTE4_OUT, 0, PTE4_IN,
0, 0, 0, 0,
0, 0, 0, 0,
- IRQ5, PTE1_OUT, PTE1_IN_PD, PTE1_IN,
- IRQ4_BS, PTE0_OUT, PTE0_IN_PD, PTE0_IN }
+ IRQ5, PTE1_OUT, 0, PTE1_IN,
+ IRQ4_BS, PTE0_OUT, 0, PTE0_IN }
},
{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
0, 0, 0, 0,
- PTF6, PTF6_OUT, PTF6_IN_PD, PTF6_IN,
- SIOSCK_SIUBOBT, PTF5_OUT, PTF5_IN_PD, PTF5_IN,
- SIOSTRB1_SIUBOLR, PTF4_OUT, PTF4_IN_PD, PTF4_IN,
- SIOSTRB0_SIUBIBT, PTF3_OUT, PTF3_IN_PD, PTF3_IN,
- SIOD_SIUBILR, PTF2_OUT, PTF2_IN_PD, PTF2_IN,
- SIORXD_SIUBISLD, 0, PTF1_IN_PD, PTF1_IN,
+ PTF6, PTF6_OUT, 0, PTF6_IN,
+ SIOSCK_SIUBOBT, PTF5_OUT, 0, PTF5_IN,
+ SIOSTRB1_SIUBOLR, PTF4_OUT, 0, PTF4_IN,
+ SIOSTRB0_SIUBIBT, PTF3_OUT, 0, PTF3_IN,
+ SIOD_SIUBILR, PTF2_OUT, 0, PTF2_IN,
+ SIORXD_SIUBISLD, 0, 0, PTF1_IN,
SIOTXD_SIUBOSLD, PTF0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
@@ -1341,13 +1308,13 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
},
{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
LCDVCPWC_LCDVCPWC2, PTH7_OUT, 0, 0,
- LCDVSYN2_DACK, PTH6_OUT, PTH6_IN_PD, PTH6_IN,
- LCDVSYN, PTH5_OUT, PTH5_IN_PD, PTH5_IN,
+ LCDVSYN2_DACK, PTH6_OUT, 0, PTH6_IN,
+ LCDVSYN, PTH5_OUT, 0, PTH5_IN,
LCDDISP_LCDRS, PTH4_OUT, 0, 0,
LCDHSYN_LCDCS, PTH3_OUT, 0, 0,
LCDDON_LCDDON2, PTH2_OUT, 0, 0,
- LCDD17_DV_HSYNC, PTH1_OUT, PTH1_IN_PD, PTH1_IN,
- LCDD16_DV_VSYNC, PTH0_OUT, PTH0_IN_PD, PTH0_IN }
+ LCDD17_DV_HSYNC, PTH1_OUT, 0, PTH1_IN,
+ LCDD16_DV_VSYNC, PTH0_OUT, 0, PTH0_IN }
},
{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
STATUS0, PTJ7_OUT, 0, 0,
@@ -1356,38 +1323,38 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- IRQ1, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
- IRQ0, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ IRQ1, PTJ1_OUT, 0, PTJ1_IN,
+ IRQ0, PTJ0_OUT, 0, PTJ0_IN }
},
{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
0, 0, 0, 0,
- SIUAILR_SIOF1_SS2, PTK6_OUT, PTK6_IN_PD, PTK6_IN,
- SIUAIBT_SIOF1_SS1, PTK5_OUT, PTK5_IN_PD, PTK5_IN,
- SIUAOLR_SIOF1_SYNC, PTK4_OUT, PTK4_IN_PD, PTK4_IN,
- SIUAOBT_SIOF1_SCK, PTK3_OUT, PTK3_IN_PD, PTK3_IN,
- SIUAISLD_SIOF1_RXD, 0, PTK2_IN_PD, PTK2_IN,
+ SIUAILR_SIOF1_SS2, PTK6_OUT, 0, PTK6_IN,
+ SIUAIBT_SIOF1_SS1, PTK5_OUT, 0, PTK5_IN,
+ SIUAOLR_SIOF1_SYNC, PTK4_OUT, 0, PTK4_IN,
+ SIUAOBT_SIOF1_SCK, PTK3_OUT, 0, PTK3_IN,
+ SIUAISLD_SIOF1_RXD, 0, 0, PTK2_IN,
SIUAOSLD_SIOF1_TXD, PTK1_OUT, 0, 0,
- PTK0, PTK0_OUT, PTK0_IN_PD, PTK0_IN }
+ PTK0, PTK0_OUT, 0, PTK0_IN }
},
{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
- LCDD15_DV_D15, PTL7_OUT, PTL7_IN_PD, PTL7_IN,
- LCDD14_DV_D14, PTL6_OUT, PTL6_IN_PD, PTL6_IN,
- LCDD13_DV_D13, PTL5_OUT, PTL5_IN_PD, PTL5_IN,
- LCDD12_DV_D12, PTL4_OUT, PTL4_IN_PD, PTL4_IN,
- LCDD11_DV_D11, PTL3_OUT, PTL3_IN_PD, PTL3_IN,
- LCDD10_DV_D10, PTL2_OUT, PTL2_IN_PD, PTL2_IN,
- LCDD9_DV_D9, PTL1_OUT, PTL1_IN_PD, PTL1_IN,
- LCDD8_DV_D8, PTL0_OUT, PTL0_IN_PD, PTL0_IN }
+ LCDD15_DV_D15, PTL7_OUT, 0, PTL7_IN,
+ LCDD14_DV_D14, PTL6_OUT, 0, PTL6_IN,
+ LCDD13_DV_D13, PTL5_OUT, 0, PTL5_IN,
+ LCDD12_DV_D12, PTL4_OUT, 0, PTL4_IN,
+ LCDD11_DV_D11, PTL3_OUT, 0, PTL3_IN,
+ LCDD10_DV_D10, PTL2_OUT, 0, PTL2_IN,
+ LCDD9_DV_D9, PTL1_OUT, 0, PTL1_IN,
+ LCDD8_DV_D8, PTL0_OUT, 0, PTL0_IN }
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
- LCDD7_DV_D7, PTM7_OUT, PTM7_IN_PD, PTM7_IN,
- LCDD6_DV_D6, PTM6_OUT, PTM6_IN_PD, PTM6_IN,
- LCDD5_DV_D5, PTM5_OUT, PTM5_IN_PD, PTM5_IN,
- LCDD4_DV_D4, PTM4_OUT, PTM4_IN_PD, PTM4_IN,
- LCDD3_DV_D3, PTM3_OUT, PTM3_IN_PD, PTM3_IN,
- LCDD2_DV_D2, PTM2_OUT, PTM2_IN_PD, PTM2_IN,
- LCDD1_DV_D1, PTM1_OUT, PTM1_IN_PD, PTM1_IN,
- LCDD0_DV_D0, PTM0_OUT, PTM0_IN_PD, PTM0_IN }
+ LCDD7_DV_D7, PTM7_OUT, 0, PTM7_IN,
+ LCDD6_DV_D6, PTM6_OUT, 0, PTM6_IN,
+ LCDD5_DV_D5, PTM5_OUT, 0, PTM5_IN,
+ LCDD4_DV_D4, PTM4_OUT, 0, PTM4_IN,
+ LCDD3_DV_D3, PTM3_OUT, 0, PTM3_IN,
+ LCDD2_DV_D2, PTM2_OUT, 0, PTM2_IN,
+ LCDD1_DV_D1, PTM1_OUT, 0, PTM1_IN,
+ LCDD0_DV_D0, PTM0_OUT, 0, PTM0_IN }
},
{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
HPD63, PTN7_OUT, 0, PTN7_IN,
@@ -1402,12 +1369,12 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
0, 0, 0, 0,
SIOF0_SS2_SIM_RST, PTQ6_OUT, 0, 0,
- SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, PTQ5_IN_PD, PTQ5_IN,
- SIOF0_SYNC_TS_SDEN, PTQ4_OUT, PTQ4_IN_PD, PTQ4_IN,
- SIOF0_SCK_TS_SCK, PTQ3_OUT, PTQ3_IN_PD, PTQ3_IN,
- PTQ2, 0, PTQ2_IN_PD, PTQ2_IN,
+ SIOF0_SS1_TS_SPSYNC, PTQ5_OUT, 0, PTQ5_IN,
+ SIOF0_SYNC_TS_SDEN, PTQ4_OUT, 0, PTQ4_IN,
+ SIOF0_SCK_TS_SCK, PTQ3_OUT, 0, PTQ3_IN,
+ PTQ2, 0, 0, PTQ2_IN,
PTQ1, PTQ1_OUT, 0, 0,
- PTQ0, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ PTQ0, PTQ0_OUT, 0, PTQ0_IN }
},
{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
0, 0, 0, 0,
@@ -1415,7 +1382,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
LCDRD, PTR4_OUT, 0, 0,
CS6B_CE1B_LCDCS2, PTR3_OUT, 0, 0,
- WAIT, 0, PTR2_IN_PU, PTR2_IN,
+ WAIT, 0, 0, PTR2_IN,
LCDDCK_LCDWR, PTR1_OUT, 0, 0,
LCDVEPWC_LCDVEPWC2, PTR0_OUT, 0, 0 }
},
@@ -1423,80 +1390,80 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- SCIF0_CTS_SIUAISPD, 0, PTS4_IN_PD, PTS4_IN,
+ SCIF0_CTS_SIUAISPD, 0, 0, PTS4_IN,
SCIF0_RTS_SIUAOSPD, PTS3_OUT, 0, 0,
- SCIF0_SCK_TPUTO, PTS2_OUT, PTS2_IN_PD, PTS2_IN,
- SCIF0_RXD, 0, PTS1_IN_PD, PTS1_IN,
+ SCIF0_SCK_TPUTO, PTS2_OUT, 0, PTS2_IN,
+ SCIF0_RXD, 0, 0, PTS1_IN,
SCIF0_TXD, PTS0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- FOE_VIO_VD2, PTT4_OUT, PTT4_IN_PD, PTT4_IN,
- FWE, PTT3_OUT, PTT3_IN_PD, PTT3_IN,
- FSC, PTT2_OUT, PTT2_IN_PD, PTT2_IN,
- DREQ0, 0, PTT1_IN_PD, PTT1_IN,
+ FOE_VIO_VD2, PTT4_OUT, 0, PTT4_IN,
+ FWE, PTT3_OUT, 0, PTT3_IN,
+ FSC, PTT2_OUT, 0, PTT2_IN,
+ DREQ0, 0, 0, PTT1_IN,
FCDE, PTT0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- NAF2_VIO_D10, PTU4_OUT, PTU4_IN_PD, PTU4_IN,
- NAF1_VIO_D9, PTU3_OUT, PTU3_IN_PD, PTU3_IN,
- NAF0_VIO_D8, PTU2_OUT, PTU2_IN_PD, PTU2_IN,
- FRB_VIO_CLK2, 0, PTU1_IN_PD, PTU1_IN,
- FCE_VIO_HD2, PTU0_OUT, PTU0_IN_PD, PTU0_IN }
+ NAF2_VIO_D10, PTU4_OUT, 0, PTU4_IN,
+ NAF1_VIO_D9, PTU3_OUT, 0, PTU3_IN,
+ NAF0_VIO_D8, PTU2_OUT, 0, PTU2_IN,
+ FRB_VIO_CLK2, 0, 0, PTU1_IN,
+ FCE_VIO_HD2, PTU0_OUT, 0, PTU0_IN }
},
{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- NAF7_VIO_D15, PTV4_OUT, PTV4_IN_PD, PTV4_IN,
- NAF6_VIO_D14, PTV3_OUT, PTV3_IN_PD, PTV3_IN,
- NAF5_VIO_D13, PTV2_OUT, PTV2_IN_PD, PTV2_IN,
- NAF4_VIO_D12, PTV1_OUT, PTV1_IN_PD, PTV1_IN,
- NAF3_VIO_D11, PTV0_OUT, PTV0_IN_PD, PTV0_IN }
+ NAF7_VIO_D15, PTV4_OUT, 0, PTV4_IN,
+ NAF6_VIO_D14, PTV3_OUT, 0, PTV3_IN,
+ NAF5_VIO_D13, PTV2_OUT, 0, PTV2_IN,
+ NAF4_VIO_D12, PTV1_OUT, 0, PTV1_IN,
+ NAF3_VIO_D11, PTV0_OUT, 0, PTV0_IN }
},
{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
0, 0, 0, 0,
- VIO_FLD_SCIF2_CTS, 0, PTW6_IN_PD, PTW6_IN,
+ VIO_FLD_SCIF2_CTS, 0, 0, PTW6_IN,
VIO_CKO_SCIF2_RTS, PTW5_OUT, 0, 0,
- VIO_STEX_SCIF2_SCK, PTW4_OUT, PTW4_IN_PD, PTW4_IN,
- VIO_STEM_SCIF2_TXD, PTW3_OUT, PTW3_IN_PD, PTW3_IN,
- VIO_HD_SCIF2_RXD, PTW2_OUT, PTW2_IN_PD, PTW2_IN,
- VIO_VD_SCIF1_CTS, PTW1_OUT, PTW1_IN_PD, PTW1_IN,
- VIO_CLK_SCIF1_RTS, PTW0_OUT, PTW0_IN_PD, PTW0_IN }
+ VIO_STEX_SCIF2_SCK, PTW4_OUT, 0, PTW4_IN,
+ VIO_STEM_SCIF2_TXD, PTW3_OUT, 0, PTW3_IN,
+ VIO_HD_SCIF2_RXD, PTW2_OUT, 0, PTW2_IN,
+ VIO_VD_SCIF1_CTS, PTW1_OUT, 0, PTW1_IN,
+ VIO_CLK_SCIF1_RTS, PTW0_OUT, 0, PTW0_IN }
},
{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
0, 0, 0, 0,
- CS6A_CE2B, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
- LCDD23, PTX5_OUT, PTX5_IN_PD, PTX5_IN,
- LCDD22, PTX4_OUT, PTX4_IN_PD, PTX4_IN,
- LCDD21, PTX3_OUT, PTX3_IN_PD, PTX3_IN,
- LCDD20, PTX2_OUT, PTX2_IN_PD, PTX2_IN,
- LCDD19_DV_CLKI, PTX1_OUT, PTX1_IN_PD, PTX1_IN,
- LCDD18_DV_CLK, PTX0_OUT, PTX0_IN_PD, PTX0_IN }
+ CS6A_CE2B, PTX6_OUT, 0, PTX6_IN,
+ LCDD23, PTX5_OUT, 0, PTX5_IN,
+ LCDD22, PTX4_OUT, 0, PTX4_IN,
+ LCDD21, PTX3_OUT, 0, PTX3_IN,
+ LCDD20, PTX2_OUT, 0, PTX2_IN,
+ LCDD19_DV_CLKI, PTX1_OUT, 0, PTX1_IN,
+ LCDD18_DV_CLK, PTX0_OUT, 0, PTX0_IN }
},
{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
- KEYOUT5_IN5, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
- KEYOUT4_IN6, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
- KEYOUT3, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
- KEYOUT2, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
+ KEYOUT5_IN5, PTY5_OUT, 0, PTY5_IN,
+ KEYOUT4_IN6, PTY4_OUT, 0, PTY4_IN,
+ KEYOUT3, PTY3_OUT, 0, PTY3_IN,
+ KEYOUT2, PTY2_OUT, 0, PTY2_IN,
KEYOUT1, PTY1_OUT, 0, 0,
- KEYOUT0, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ KEYOUT0, PTY0_OUT, 0, PTY0_IN }
},
{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
- KEYIN4_IRQ7, 0, PTZ5_IN_PU, PTZ5_IN,
- KEYIN3, 0, PTZ4_IN_PU, PTZ4_IN,
- KEYIN2, 0, PTZ3_IN_PU, PTZ3_IN,
- KEYIN1, 0, PTZ2_IN_PU, PTZ2_IN,
- KEYIN0_IRQ6, 0, PTZ1_IN_PU, PTZ1_IN,
+ KEYIN4_IRQ7, 0, 0, PTZ5_IN,
+ KEYIN3, 0, 0, PTZ4_IN,
+ KEYIN2, 0, 0, PTZ3_IN,
+ KEYIN1, 0, 0, PTZ2_IN,
+ KEYIN0_IRQ6, 0, 0, PTZ1_IN,
0, 0, 0, 0 }
},
{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
@@ -1763,8 +1730,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7722_pinmux_info = {
.name = "sh7722_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7723.c b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
index 07ad1d8d6c8..1cecc9101a5 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7723.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7723.c
@@ -102,12 +102,6 @@ enum {
PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PTA4_IN_PU, PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
- PTB2_IN_PU, PTB1_IN_PU,
- PTR2_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
@@ -350,16 +344,16 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
- PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
- PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
- PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
- PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
- PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
/* PTB GPIO */
PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
@@ -367,8 +361,8 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
- PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
- PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
/* PTC GPIO */
@@ -487,7 +481,7 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
PINMUX_DATA(PTR3_DATA, PTR3_IN),
- PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN),
PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
@@ -925,220 +919,220 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
- PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
- PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
- PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
- PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
- PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
- PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
- PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
- PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+ PINMUX_GPIO(PTA7),
+ PINMUX_GPIO(PTA6),
+ PINMUX_GPIO(PTA5),
+ PINMUX_GPIO(PTA4),
+ PINMUX_GPIO(PTA3),
+ PINMUX_GPIO(PTA2),
+ PINMUX_GPIO(PTA1),
+ PINMUX_GPIO(PTA0),
/* PTB */
- PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
- PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
- PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
- PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
- PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
- PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
- PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
- PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+ PINMUX_GPIO(PTB7),
+ PINMUX_GPIO(PTB6),
+ PINMUX_GPIO(PTB5),
+ PINMUX_GPIO(PTB4),
+ PINMUX_GPIO(PTB3),
+ PINMUX_GPIO(PTB2),
+ PINMUX_GPIO(PTB1),
+ PINMUX_GPIO(PTB0),
/* PTC */
- PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
- PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
- PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
- PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
- PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
- PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
- PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
- PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+ PINMUX_GPIO(PTC7),
+ PINMUX_GPIO(PTC6),
+ PINMUX_GPIO(PTC5),
+ PINMUX_GPIO(PTC4),
+ PINMUX_GPIO(PTC3),
+ PINMUX_GPIO(PTC2),
+ PINMUX_GPIO(PTC1),
+ PINMUX_GPIO(PTC0),
/* PTD */
- PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
- PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
- PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
- PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
- PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
- PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
- PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
- PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+ PINMUX_GPIO(PTD7),
+ PINMUX_GPIO(PTD6),
+ PINMUX_GPIO(PTD5),
+ PINMUX_GPIO(PTD4),
+ PINMUX_GPIO(PTD3),
+ PINMUX_GPIO(PTD2),
+ PINMUX_GPIO(PTD1),
+ PINMUX_GPIO(PTD0),
/* PTE */
- PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
- PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
- PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
- PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
- PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
- PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+ PINMUX_GPIO(PTE5),
+ PINMUX_GPIO(PTE4),
+ PINMUX_GPIO(PTE3),
+ PINMUX_GPIO(PTE2),
+ PINMUX_GPIO(PTE1),
+ PINMUX_GPIO(PTE0),
/* PTF */
- PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
- PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
- PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
- PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
- PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
- PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
- PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
- PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+ PINMUX_GPIO(PTF7),
+ PINMUX_GPIO(PTF6),
+ PINMUX_GPIO(PTF5),
+ PINMUX_GPIO(PTF4),
+ PINMUX_GPIO(PTF3),
+ PINMUX_GPIO(PTF2),
+ PINMUX_GPIO(PTF1),
+ PINMUX_GPIO(PTF0),
/* PTG */
- PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
- PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
- PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
- PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
- PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
- PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+ PINMUX_GPIO(PTG5),
+ PINMUX_GPIO(PTG4),
+ PINMUX_GPIO(PTG3),
+ PINMUX_GPIO(PTG2),
+ PINMUX_GPIO(PTG1),
+ PINMUX_GPIO(PTG0),
/* PTH */
- PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
- PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
- PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
- PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
- PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
- PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
- PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
- PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+ PINMUX_GPIO(PTH7),
+ PINMUX_GPIO(PTH6),
+ PINMUX_GPIO(PTH5),
+ PINMUX_GPIO(PTH4),
+ PINMUX_GPIO(PTH3),
+ PINMUX_GPIO(PTH2),
+ PINMUX_GPIO(PTH1),
+ PINMUX_GPIO(PTH0),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
- PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
- PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
- PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
- PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
- PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+ PINMUX_GPIO(PTJ7),
+ PINMUX_GPIO(PTJ5),
+ PINMUX_GPIO(PTJ3),
+ PINMUX_GPIO(PTJ2),
+ PINMUX_GPIO(PTJ1),
+ PINMUX_GPIO(PTJ0),
/* PTK */
- PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
- PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
- PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
- PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
- PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
- PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
- PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
- PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+ PINMUX_GPIO(PTK7),
+ PINMUX_GPIO(PTK6),
+ PINMUX_GPIO(PTK5),
+ PINMUX_GPIO(PTK4),
+ PINMUX_GPIO(PTK3),
+ PINMUX_GPIO(PTK2),
+ PINMUX_GPIO(PTK1),
+ PINMUX_GPIO(PTK0),
/* PTL */
- PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
- PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
- PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
- PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
- PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
- PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
- PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
- PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+ PINMUX_GPIO(PTL7),
+ PINMUX_GPIO(PTL6),
+ PINMUX_GPIO(PTL5),
+ PINMUX_GPIO(PTL4),
+ PINMUX_GPIO(PTL3),
+ PINMUX_GPIO(PTL2),
+ PINMUX_GPIO(PTL1),
+ PINMUX_GPIO(PTL0),
/* PTM */
- PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
- PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
- PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
- PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
- PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
- PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
- PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
- PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+ PINMUX_GPIO(PTM7),
+ PINMUX_GPIO(PTM6),
+ PINMUX_GPIO(PTM5),
+ PINMUX_GPIO(PTM4),
+ PINMUX_GPIO(PTM3),
+ PINMUX_GPIO(PTM2),
+ PINMUX_GPIO(PTM1),
+ PINMUX_GPIO(PTM0),
/* PTN */
- PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
- PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
- PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
- PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
- PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
- PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
- PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
- PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+ PINMUX_GPIO(PTN7),
+ PINMUX_GPIO(PTN6),
+ PINMUX_GPIO(PTN5),
+ PINMUX_GPIO(PTN4),
+ PINMUX_GPIO(PTN3),
+ PINMUX_GPIO(PTN2),
+ PINMUX_GPIO(PTN1),
+ PINMUX_GPIO(PTN0),
/* PTQ */
- PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
- PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
- PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
- PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+ PINMUX_GPIO(PTQ3),
+ PINMUX_GPIO(PTQ2),
+ PINMUX_GPIO(PTQ1),
+ PINMUX_GPIO(PTQ0),
/* PTR */
- PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
- PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
- PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
- PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
- PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
- PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
- PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
- PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+ PINMUX_GPIO(PTR7),
+ PINMUX_GPIO(PTR6),
+ PINMUX_GPIO(PTR5),
+ PINMUX_GPIO(PTR4),
+ PINMUX_GPIO(PTR3),
+ PINMUX_GPIO(PTR2),
+ PINMUX_GPIO(PTR1),
+ PINMUX_GPIO(PTR0),
/* PTS */
- PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
- PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
- PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
- PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
- PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
- PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
- PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
- PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+ PINMUX_GPIO(PTS7),
+ PINMUX_GPIO(PTS6),
+ PINMUX_GPIO(PTS5),
+ PINMUX_GPIO(PTS4),
+ PINMUX_GPIO(PTS3),
+ PINMUX_GPIO(PTS2),
+ PINMUX_GPIO(PTS1),
+ PINMUX_GPIO(PTS0),
/* PTT */
- PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
- PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
- PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
- PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
- PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
- PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+ PINMUX_GPIO(PTT5),
+ PINMUX_GPIO(PTT4),
+ PINMUX_GPIO(PTT3),
+ PINMUX_GPIO(PTT2),
+ PINMUX_GPIO(PTT1),
+ PINMUX_GPIO(PTT0),
/* PTU */
- PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
- PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
- PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
- PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
- PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
- PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+ PINMUX_GPIO(PTU5),
+ PINMUX_GPIO(PTU4),
+ PINMUX_GPIO(PTU3),
+ PINMUX_GPIO(PTU2),
+ PINMUX_GPIO(PTU1),
+ PINMUX_GPIO(PTU0),
/* PTV */
- PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
- PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
- PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
- PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
- PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
- PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
- PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
- PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+ PINMUX_GPIO(PTV7),
+ PINMUX_GPIO(PTV6),
+ PINMUX_GPIO(PTV5),
+ PINMUX_GPIO(PTV4),
+ PINMUX_GPIO(PTV3),
+ PINMUX_GPIO(PTV2),
+ PINMUX_GPIO(PTV1),
+ PINMUX_GPIO(PTV0),
/* PTW */
- PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
- PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
- PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
- PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
- PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
- PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
- PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
- PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+ PINMUX_GPIO(PTW7),
+ PINMUX_GPIO(PTW6),
+ PINMUX_GPIO(PTW5),
+ PINMUX_GPIO(PTW4),
+ PINMUX_GPIO(PTW3),
+ PINMUX_GPIO(PTW2),
+ PINMUX_GPIO(PTW1),
+ PINMUX_GPIO(PTW0),
/* PTX */
- PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
- PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
- PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
- PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
- PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
- PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
- PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
- PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+ PINMUX_GPIO(PTX7),
+ PINMUX_GPIO(PTX6),
+ PINMUX_GPIO(PTX5),
+ PINMUX_GPIO(PTX4),
+ PINMUX_GPIO(PTX3),
+ PINMUX_GPIO(PTX2),
+ PINMUX_GPIO(PTX1),
+ PINMUX_GPIO(PTX0),
/* PTY */
- PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
- PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
- PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
- PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
- PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
- PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
- PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
- PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+ PINMUX_GPIO(PTY7),
+ PINMUX_GPIO(PTY6),
+ PINMUX_GPIO(PTY5),
+ PINMUX_GPIO(PTY4),
+ PINMUX_GPIO(PTY3),
+ PINMUX_GPIO(PTY2),
+ PINMUX_GPIO(PTY1),
+ PINMUX_GPIO(PTY0),
/* PTZ */
- PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
- PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
- PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
- PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
- PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
- PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
- PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
- PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+ PINMUX_GPIO(PTZ7),
+ PINMUX_GPIO(PTZ6),
+ PINMUX_GPIO(PTZ5),
+ PINMUX_GPIO(PTZ4),
+ PINMUX_GPIO(PTZ3),
+ PINMUX_GPIO(PTZ2),
+ PINMUX_GPIO(PTZ1),
+ PINMUX_GPIO(PTZ0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -1520,11 +1514,11 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTA7_FN, PTA7_OUT, 0, PTA7_IN,
PTA6_FN, PTA6_OUT, 0, PTA6_IN,
PTA5_FN, PTA5_OUT, 0, PTA5_IN,
- PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
- PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
- PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
- PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
- PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ PTA4_FN, PTA4_OUT, 0, PTA4_IN,
+ PTA3_FN, PTA3_OUT, 0, PTA3_IN,
+ PTA2_FN, PTA2_OUT, 0, PTA2_IN,
+ PTA1_FN, PTA1_OUT, 0, PTA1_IN,
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
PTB7_FN, PTB7_OUT, 0, PTB7_IN,
@@ -1532,8 +1526,8 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTB5_FN, PTB5_OUT, 0, PTB5_IN,
PTB4_FN, PTB4_OUT, 0, PTB4_IN,
PTB3_FN, PTB3_OUT, 0, PTB3_IN,
- PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
- PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
+ PTB2_FN, PTB2_OUT, 0, PTB2_IN,
+ PTB1_FN, PTB1_OUT, 0, PTB1_IN,
PTB0_FN, PTB0_OUT, 0, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
@@ -1662,7 +1656,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTR5_FN, PTR5_OUT, 0, PTR5_IN,
PTR4_FN, PTR4_OUT, 0, PTR4_IN,
PTR3_FN, 0, 0, PTR3_IN,
- PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
+ PTR2_FN, 0, 0, PTR2_IN,
PTR1_FN, PTR1_OUT, 0, PTR1_IN,
PTR0_FN, PTR0_OUT, 0, PTR0_IN }
},
@@ -1888,7 +1882,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7723_pinmux_info = {
.name = "sh7723_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7724.c b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
index 35e55160980..1085ab556b8 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7724.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7724.c
@@ -117,52 +117,6 @@ enum {
PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
- PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
- PTB7_IN_PU, PTB6_IN_PU, PTB5_IN_PU, PTB4_IN_PU,
- PTB3_IN_PU, PTB2_IN_PU, PTB1_IN_PU, PTB0_IN_PU,
- PTC7_IN_PU, PTC6_IN_PU, PTC5_IN_PU, PTC4_IN_PU,
- PTC3_IN_PU, PTC2_IN_PU, PTC1_IN_PU, PTC0_IN_PU,
- PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
- PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
- PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
- PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
- PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
- PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
- PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
- PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
- PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
- PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
- PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
- PTL7_IN_PU, PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
- PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
- PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
- PTM3_IN_PU, PTM2_IN_PU, PTM1_IN_PU, PTM0_IN_PU,
- PTN7_IN_PU, PTN6_IN_PU, PTN5_IN_PU, PTN4_IN_PU,
- PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
- PTQ7_IN_PU, PTQ6_IN_PU, PTQ5_IN_PU, PTQ4_IN_PU,
- PTQ3_IN_PU, PTQ2_IN_PU, PTQ1_IN_PU, PTQ0_IN_PU,
- PTR7_IN_PU, PTR6_IN_PU, PTR5_IN_PU, PTR4_IN_PU,
- PTR3_IN_PU, PTR2_IN_PU, PTR1_IN_PU, PTR0_IN_PU,
- PTS6_IN_PU, PTS5_IN_PU, PTS4_IN_PU,
- PTS3_IN_PU, PTS2_IN_PU, PTS1_IN_PU, PTS0_IN_PU,
- PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
- PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
- PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
- PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
- PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
- PTV3_IN_PU, PTV2_IN_PU, PTV1_IN_PU, PTV0_IN_PU,
- PTW7_IN_PU, PTW6_IN_PU, PTW5_IN_PU, PTW4_IN_PU,
- PTW3_IN_PU, PTW2_IN_PU, PTW1_IN_PU, PTW0_IN_PU,
- PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
- PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
- PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
- PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
- PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
- PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
@@ -572,66 +526,66 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* PTA GPIO */
- PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT, PTA7_IN_PU),
- PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT, PTA6_IN_PU),
- PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT, PTA5_IN_PU),
- PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT, PTA4_IN_PU),
- PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT, PTA3_IN_PU),
- PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT, PTA2_IN_PU),
- PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT, PTA1_IN_PU),
- PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT, PTA0_IN_PU),
+ PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
+ PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
+ PINMUX_DATA(PTA5_DATA, PTA5_IN, PTA5_OUT),
+ PINMUX_DATA(PTA4_DATA, PTA4_IN, PTA4_OUT),
+ PINMUX_DATA(PTA3_DATA, PTA3_IN, PTA3_OUT),
+ PINMUX_DATA(PTA2_DATA, PTA2_IN, PTA2_OUT),
+ PINMUX_DATA(PTA1_DATA, PTA1_IN, PTA1_OUT),
+ PINMUX_DATA(PTA0_DATA, PTA0_IN, PTA0_OUT),
/* PTB GPIO */
- PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT, PTB7_IN_PU),
- PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT, PTB6_IN_PU),
- PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT, PTB5_IN_PU),
- PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT, PTB4_IN_PU),
- PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT, PTB3_IN_PU),
- PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT, PTB2_IN_PU),
- PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT, PTB1_IN_PU),
- PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT, PTB0_IN_PU),
+ PINMUX_DATA(PTB7_DATA, PTB7_IN, PTB7_OUT),
+ PINMUX_DATA(PTB6_DATA, PTB6_IN, PTB6_OUT),
+ PINMUX_DATA(PTB5_DATA, PTB5_IN, PTB5_OUT),
+ PINMUX_DATA(PTB4_DATA, PTB4_IN, PTB4_OUT),
+ PINMUX_DATA(PTB3_DATA, PTB3_IN, PTB3_OUT),
+ PINMUX_DATA(PTB2_DATA, PTB2_IN, PTB2_OUT),
+ PINMUX_DATA(PTB1_DATA, PTB1_IN, PTB1_OUT),
+ PINMUX_DATA(PTB0_DATA, PTB0_IN, PTB0_OUT),
/* PTC GPIO */
- PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT, PTC7_IN_PU),
- PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT, PTC6_IN_PU),
- PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT, PTC5_IN_PU),
- PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT, PTC4_IN_PU),
- PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT, PTC3_IN_PU),
- PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT, PTC2_IN_PU),
- PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT, PTC1_IN_PU),
- PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT, PTC0_IN_PU),
+ PINMUX_DATA(PTC7_DATA, PTC7_IN, PTC7_OUT),
+ PINMUX_DATA(PTC6_DATA, PTC6_IN, PTC6_OUT),
+ PINMUX_DATA(PTC5_DATA, PTC5_IN, PTC5_OUT),
+ PINMUX_DATA(PTC4_DATA, PTC4_IN, PTC4_OUT),
+ PINMUX_DATA(PTC3_DATA, PTC3_IN, PTC3_OUT),
+ PINMUX_DATA(PTC2_DATA, PTC2_IN, PTC2_OUT),
+ PINMUX_DATA(PTC1_DATA, PTC1_IN, PTC1_OUT),
+ PINMUX_DATA(PTC0_DATA, PTC0_IN, PTC0_OUT),
/* PTD GPIO */
- PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT, PTD7_IN_PU),
- PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT, PTD6_IN_PU),
- PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT, PTD5_IN_PU),
- PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT, PTD4_IN_PU),
- PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT, PTD3_IN_PU),
- PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT, PTD2_IN_PU),
- PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT, PTD1_IN_PU),
- PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT, PTD0_IN_PU),
+ PINMUX_DATA(PTD7_DATA, PTD7_IN, PTD7_OUT),
+ PINMUX_DATA(PTD6_DATA, PTD6_IN, PTD6_OUT),
+ PINMUX_DATA(PTD5_DATA, PTD5_IN, PTD5_OUT),
+ PINMUX_DATA(PTD4_DATA, PTD4_IN, PTD4_OUT),
+ PINMUX_DATA(PTD3_DATA, PTD3_IN, PTD3_OUT),
+ PINMUX_DATA(PTD2_DATA, PTD2_IN, PTD2_OUT),
+ PINMUX_DATA(PTD1_DATA, PTD1_IN, PTD1_OUT),
+ PINMUX_DATA(PTD0_DATA, PTD0_IN, PTD0_OUT),
/* PTE GPIO */
- PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT, PTE7_IN_PU),
- PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT, PTE6_IN_PU),
- PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT, PTE5_IN_PU),
- PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT, PTE4_IN_PU),
- PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT, PTE3_IN_PU),
- PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT, PTE2_IN_PU),
- PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT, PTE1_IN_PU),
- PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT, PTE0_IN_PU),
+ PINMUX_DATA(PTE7_DATA, PTE7_IN, PTE7_OUT),
+ PINMUX_DATA(PTE6_DATA, PTE6_IN, PTE6_OUT),
+ PINMUX_DATA(PTE5_DATA, PTE5_IN, PTE5_OUT),
+ PINMUX_DATA(PTE4_DATA, PTE4_IN, PTE4_OUT),
+ PINMUX_DATA(PTE3_DATA, PTE3_IN, PTE3_OUT),
+ PINMUX_DATA(PTE2_DATA, PTE2_IN, PTE2_OUT),
+ PINMUX_DATA(PTE1_DATA, PTE1_IN, PTE1_OUT),
+ PINMUX_DATA(PTE0_DATA, PTE0_IN, PTE0_OUT),
/* PTF GPIO */
- PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT, PTF7_IN_PU),
- PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT, PTF6_IN_PU),
- PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT, PTF5_IN_PU),
- PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT, PTF4_IN_PU),
- PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT, PTF3_IN_PU),
- PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT, PTF2_IN_PU),
- PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT, PTF1_IN_PU),
- PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT, PTF0_IN_PU),
+ PINMUX_DATA(PTF7_DATA, PTF7_IN, PTF7_OUT),
+ PINMUX_DATA(PTF6_DATA, PTF6_IN, PTF6_OUT),
+ PINMUX_DATA(PTF5_DATA, PTF5_IN, PTF5_OUT),
+ PINMUX_DATA(PTF4_DATA, PTF4_IN, PTF4_OUT),
+ PINMUX_DATA(PTF3_DATA, PTF3_IN, PTF3_OUT),
+ PINMUX_DATA(PTF2_DATA, PTF2_IN, PTF2_OUT),
+ PINMUX_DATA(PTF1_DATA, PTF1_IN, PTF1_OUT),
+ PINMUX_DATA(PTF0_DATA, PTF0_IN, PTF0_OUT),
/* PTG GPIO */
PINMUX_DATA(PTG5_DATA, PTG5_OUT),
@@ -642,162 +596,162 @@ static const pinmux_enum_t pinmux_data[] = {
PINMUX_DATA(PTG0_DATA, PTG0_OUT),
/* PTH GPIO */
- PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT, PTH7_IN_PU),
- PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT, PTH6_IN_PU),
- PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT, PTH5_IN_PU),
- PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT, PTH4_IN_PU),
- PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT, PTH3_IN_PU),
- PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT, PTH2_IN_PU),
- PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT, PTH1_IN_PU),
- PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT, PTH0_IN_PU),
+ PINMUX_DATA(PTH7_DATA, PTH7_IN, PTH7_OUT),
+ PINMUX_DATA(PTH6_DATA, PTH6_IN, PTH6_OUT),
+ PINMUX_DATA(PTH5_DATA, PTH5_IN, PTH5_OUT),
+ PINMUX_DATA(PTH4_DATA, PTH4_IN, PTH4_OUT),
+ PINMUX_DATA(PTH3_DATA, PTH3_IN, PTH3_OUT),
+ PINMUX_DATA(PTH2_DATA, PTH2_IN, PTH2_OUT),
+ PINMUX_DATA(PTH1_DATA, PTH1_IN, PTH1_OUT),
+ PINMUX_DATA(PTH0_DATA, PTH0_IN, PTH0_OUT),
/* PTJ GPIO */
PINMUX_DATA(PTJ7_DATA, PTJ7_OUT),
PINMUX_DATA(PTJ6_DATA, PTJ6_OUT),
PINMUX_DATA(PTJ5_DATA, PTJ5_OUT),
- PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT, PTJ3_IN_PU),
- PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT, PTJ2_IN_PU),
- PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT, PTJ1_IN_PU),
- PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT, PTJ0_IN_PU),
+ PINMUX_DATA(PTJ3_DATA, PTJ3_IN, PTJ3_OUT),
+ PINMUX_DATA(PTJ2_DATA, PTJ2_IN, PTJ2_OUT),
+ PINMUX_DATA(PTJ1_DATA, PTJ1_IN, PTJ1_OUT),
+ PINMUX_DATA(PTJ0_DATA, PTJ0_IN, PTJ0_OUT),
/* PTK GPIO */
- PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT, PTK7_IN_PU),
- PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT, PTK6_IN_PU),
- PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT, PTK5_IN_PU),
- PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT, PTK4_IN_PU),
- PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT, PTK3_IN_PU),
- PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT, PTK2_IN_PU),
- PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT, PTK1_IN_PU),
- PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT, PTK0_IN_PU),
+ PINMUX_DATA(PTK7_DATA, PTK7_IN, PTK7_OUT),
+ PINMUX_DATA(PTK6_DATA, PTK6_IN, PTK6_OUT),
+ PINMUX_DATA(PTK5_DATA, PTK5_IN, PTK5_OUT),
+ PINMUX_DATA(PTK4_DATA, PTK4_IN, PTK4_OUT),
+ PINMUX_DATA(PTK3_DATA, PTK3_IN, PTK3_OUT),
+ PINMUX_DATA(PTK2_DATA, PTK2_IN, PTK2_OUT),
+ PINMUX_DATA(PTK1_DATA, PTK1_IN, PTK1_OUT),
+ PINMUX_DATA(PTK0_DATA, PTK0_IN, PTK0_OUT),
/* PTL GPIO */
- PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT, PTL7_IN_PU),
- PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT, PTL6_IN_PU),
- PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT, PTL5_IN_PU),
- PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT, PTL4_IN_PU),
- PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT, PTL3_IN_PU),
- PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT, PTL2_IN_PU),
- PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT, PTL1_IN_PU),
- PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT, PTL0_IN_PU),
+ PINMUX_DATA(PTL7_DATA, PTL7_IN, PTL7_OUT),
+ PINMUX_DATA(PTL6_DATA, PTL6_IN, PTL6_OUT),
+ PINMUX_DATA(PTL5_DATA, PTL5_IN, PTL5_OUT),
+ PINMUX_DATA(PTL4_DATA, PTL4_IN, PTL4_OUT),
+ PINMUX_DATA(PTL3_DATA, PTL3_IN, PTL3_OUT),
+ PINMUX_DATA(PTL2_DATA, PTL2_IN, PTL2_OUT),
+ PINMUX_DATA(PTL1_DATA, PTL1_IN, PTL1_OUT),
+ PINMUX_DATA(PTL0_DATA, PTL0_IN, PTL0_OUT),
/* PTM GPIO */
- PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT, PTM7_IN_PU),
- PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT, PTM6_IN_PU),
- PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT, PTM5_IN_PU),
- PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT, PTM4_IN_PU),
- PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT, PTM3_IN_PU),
- PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT, PTM2_IN_PU),
- PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT, PTM1_IN_PU),
- PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT, PTM0_IN_PU),
+ PINMUX_DATA(PTM7_DATA, PTM7_IN, PTM7_OUT),
+ PINMUX_DATA(PTM6_DATA, PTM6_IN, PTM6_OUT),
+ PINMUX_DATA(PTM5_DATA, PTM5_IN, PTM5_OUT),
+ PINMUX_DATA(PTM4_DATA, PTM4_IN, PTM4_OUT),
+ PINMUX_DATA(PTM3_DATA, PTM3_IN, PTM3_OUT),
+ PINMUX_DATA(PTM2_DATA, PTM2_IN, PTM2_OUT),
+ PINMUX_DATA(PTM1_DATA, PTM1_IN, PTM1_OUT),
+ PINMUX_DATA(PTM0_DATA, PTM0_IN, PTM0_OUT),
/* PTN GPIO */
- PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT, PTN7_IN_PU),
- PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT, PTN6_IN_PU),
- PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT, PTN5_IN_PU),
- PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT, PTN4_IN_PU),
- PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT, PTN3_IN_PU),
- PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT, PTN2_IN_PU),
- PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT, PTN1_IN_PU),
- PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT, PTN0_IN_PU),
+ PINMUX_DATA(PTN7_DATA, PTN7_IN, PTN7_OUT),
+ PINMUX_DATA(PTN6_DATA, PTN6_IN, PTN6_OUT),
+ PINMUX_DATA(PTN5_DATA, PTN5_IN, PTN5_OUT),
+ PINMUX_DATA(PTN4_DATA, PTN4_IN, PTN4_OUT),
+ PINMUX_DATA(PTN3_DATA, PTN3_IN, PTN3_OUT),
+ PINMUX_DATA(PTN2_DATA, PTN2_IN, PTN2_OUT),
+ PINMUX_DATA(PTN1_DATA, PTN1_IN, PTN1_OUT),
+ PINMUX_DATA(PTN0_DATA, PTN0_IN, PTN0_OUT),
/* PTQ GPIO */
- PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT, PTQ7_IN_PU),
- PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT, PTQ6_IN_PU),
- PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT, PTQ5_IN_PU),
- PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT, PTQ4_IN_PU),
- PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT, PTQ3_IN_PU),
- PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT, PTQ2_IN_PU),
- PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT, PTQ1_IN_PU),
- PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT, PTQ0_IN_PU),
+ PINMUX_DATA(PTQ7_DATA, PTQ7_IN, PTQ7_OUT),
+ PINMUX_DATA(PTQ6_DATA, PTQ6_IN, PTQ6_OUT),
+ PINMUX_DATA(PTQ5_DATA, PTQ5_IN, PTQ5_OUT),
+ PINMUX_DATA(PTQ4_DATA, PTQ4_IN, PTQ4_OUT),
+ PINMUX_DATA(PTQ3_DATA, PTQ3_IN, PTQ3_OUT),
+ PINMUX_DATA(PTQ2_DATA, PTQ2_IN, PTQ2_OUT),
+ PINMUX_DATA(PTQ1_DATA, PTQ1_IN, PTQ1_OUT),
+ PINMUX_DATA(PTQ0_DATA, PTQ0_IN, PTQ0_OUT),
/* PTR GPIO */
- PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT, PTR7_IN_PU),
- PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT, PTR6_IN_PU),
- PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT, PTR5_IN_PU),
- PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT, PTR4_IN_PU),
- PINMUX_DATA(PTR3_DATA, PTR3_IN, PTR3_IN_PU),
- PINMUX_DATA(PTR2_DATA, PTR2_IN, PTR2_IN_PU),
- PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT, PTR1_IN_PU),
- PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT, PTR0_IN_PU),
+ PINMUX_DATA(PTR7_DATA, PTR7_IN, PTR7_OUT),
+ PINMUX_DATA(PTR6_DATA, PTR6_IN, PTR6_OUT),
+ PINMUX_DATA(PTR5_DATA, PTR5_IN, PTR5_OUT),
+ PINMUX_DATA(PTR4_DATA, PTR4_IN, PTR4_OUT),
+ PINMUX_DATA(PTR3_DATA, PTR3_IN),
+ PINMUX_DATA(PTR2_DATA, PTR2_IN),
+ PINMUX_DATA(PTR1_DATA, PTR1_IN, PTR1_OUT),
+ PINMUX_DATA(PTR0_DATA, PTR0_IN, PTR0_OUT),
/* PTS GPIO */
- PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT, PTS6_IN_PU),
- PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT, PTS5_IN_PU),
- PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT, PTS4_IN_PU),
- PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT, PTS3_IN_PU),
- PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT, PTS2_IN_PU),
- PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT, PTS1_IN_PU),
- PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT, PTS0_IN_PU),
+ PINMUX_DATA(PTS6_DATA, PTS6_IN, PTS6_OUT),
+ PINMUX_DATA(PTS5_DATA, PTS5_IN, PTS5_OUT),
+ PINMUX_DATA(PTS4_DATA, PTS4_IN, PTS4_OUT),
+ PINMUX_DATA(PTS3_DATA, PTS3_IN, PTS3_OUT),
+ PINMUX_DATA(PTS2_DATA, PTS2_IN, PTS2_OUT),
+ PINMUX_DATA(PTS1_DATA, PTS1_IN, PTS1_OUT),
+ PINMUX_DATA(PTS0_DATA, PTS0_IN, PTS0_OUT),
/* PTT GPIO */
- PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT, PTT7_IN_PU),
- PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT, PTT6_IN_PU),
- PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT, PTT5_IN_PU),
- PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT, PTT4_IN_PU),
- PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT, PTT3_IN_PU),
- PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT, PTT2_IN_PU),
- PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT, PTT1_IN_PU),
- PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT, PTT0_IN_PU),
+ PINMUX_DATA(PTT7_DATA, PTT7_IN, PTT7_OUT),
+ PINMUX_DATA(PTT6_DATA, PTT6_IN, PTT6_OUT),
+ PINMUX_DATA(PTT5_DATA, PTT5_IN, PTT5_OUT),
+ PINMUX_DATA(PTT4_DATA, PTT4_IN, PTT4_OUT),
+ PINMUX_DATA(PTT3_DATA, PTT3_IN, PTT3_OUT),
+ PINMUX_DATA(PTT2_DATA, PTT2_IN, PTT2_OUT),
+ PINMUX_DATA(PTT1_DATA, PTT1_IN, PTT1_OUT),
+ PINMUX_DATA(PTT0_DATA, PTT0_IN, PTT0_OUT),
/* PTU GPIO */
- PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT, PTU7_IN_PU),
- PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT, PTU6_IN_PU),
- PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT, PTU5_IN_PU),
- PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT, PTU4_IN_PU),
- PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT, PTU3_IN_PU),
- PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT, PTU2_IN_PU),
- PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT, PTU1_IN_PU),
- PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT, PTU0_IN_PU),
+ PINMUX_DATA(PTU7_DATA, PTU7_IN, PTU7_OUT),
+ PINMUX_DATA(PTU6_DATA, PTU6_IN, PTU6_OUT),
+ PINMUX_DATA(PTU5_DATA, PTU5_IN, PTU5_OUT),
+ PINMUX_DATA(PTU4_DATA, PTU4_IN, PTU4_OUT),
+ PINMUX_DATA(PTU3_DATA, PTU3_IN, PTU3_OUT),
+ PINMUX_DATA(PTU2_DATA, PTU2_IN, PTU2_OUT),
+ PINMUX_DATA(PTU1_DATA, PTU1_IN, PTU1_OUT),
+ PINMUX_DATA(PTU0_DATA, PTU0_IN, PTU0_OUT),
/* PTV GPIO */
- PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT, PTV7_IN_PU),
- PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT, PTV6_IN_PU),
- PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT, PTV5_IN_PU),
- PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT, PTV4_IN_PU),
- PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT, PTV3_IN_PU),
- PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT, PTV2_IN_PU),
- PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT, PTV1_IN_PU),
- PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT, PTV0_IN_PU),
+ PINMUX_DATA(PTV7_DATA, PTV7_IN, PTV7_OUT),
+ PINMUX_DATA(PTV6_DATA, PTV6_IN, PTV6_OUT),
+ PINMUX_DATA(PTV5_DATA, PTV5_IN, PTV5_OUT),
+ PINMUX_DATA(PTV4_DATA, PTV4_IN, PTV4_OUT),
+ PINMUX_DATA(PTV3_DATA, PTV3_IN, PTV3_OUT),
+ PINMUX_DATA(PTV2_DATA, PTV2_IN, PTV2_OUT),
+ PINMUX_DATA(PTV1_DATA, PTV1_IN, PTV1_OUT),
+ PINMUX_DATA(PTV0_DATA, PTV0_IN, PTV0_OUT),
/* PTW GPIO */
- PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT, PTW7_IN_PU),
- PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT, PTW6_IN_PU),
- PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT, PTW5_IN_PU),
- PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT, PTW4_IN_PU),
- PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT, PTW3_IN_PU),
- PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT, PTW2_IN_PU),
- PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT, PTW1_IN_PU),
- PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT, PTW0_IN_PU),
+ PINMUX_DATA(PTW7_DATA, PTW7_IN, PTW7_OUT),
+ PINMUX_DATA(PTW6_DATA, PTW6_IN, PTW6_OUT),
+ PINMUX_DATA(PTW5_DATA, PTW5_IN, PTW5_OUT),
+ PINMUX_DATA(PTW4_DATA, PTW4_IN, PTW4_OUT),
+ PINMUX_DATA(PTW3_DATA, PTW3_IN, PTW3_OUT),
+ PINMUX_DATA(PTW2_DATA, PTW2_IN, PTW2_OUT),
+ PINMUX_DATA(PTW1_DATA, PTW1_IN, PTW1_OUT),
+ PINMUX_DATA(PTW0_DATA, PTW0_IN, PTW0_OUT),
/* PTX GPIO */
- PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT, PTX7_IN_PU),
- PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT, PTX6_IN_PU),
- PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT, PTX5_IN_PU),
- PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT, PTX4_IN_PU),
- PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT, PTX3_IN_PU),
- PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT, PTX2_IN_PU),
- PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT, PTX1_IN_PU),
- PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT, PTX0_IN_PU),
+ PINMUX_DATA(PTX7_DATA, PTX7_IN, PTX7_OUT),
+ PINMUX_DATA(PTX6_DATA, PTX6_IN, PTX6_OUT),
+ PINMUX_DATA(PTX5_DATA, PTX5_IN, PTX5_OUT),
+ PINMUX_DATA(PTX4_DATA, PTX4_IN, PTX4_OUT),
+ PINMUX_DATA(PTX3_DATA, PTX3_IN, PTX3_OUT),
+ PINMUX_DATA(PTX2_DATA, PTX2_IN, PTX2_OUT),
+ PINMUX_DATA(PTX1_DATA, PTX1_IN, PTX1_OUT),
+ PINMUX_DATA(PTX0_DATA, PTX0_IN, PTX0_OUT),
/* PTY GPIO */
- PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT, PTY7_IN_PU),
- PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT, PTY6_IN_PU),
- PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT, PTY5_IN_PU),
- PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT, PTY4_IN_PU),
- PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT, PTY3_IN_PU),
- PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT, PTY2_IN_PU),
- PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT, PTY1_IN_PU),
- PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT, PTY0_IN_PU),
+ PINMUX_DATA(PTY7_DATA, PTY7_IN, PTY7_OUT),
+ PINMUX_DATA(PTY6_DATA, PTY6_IN, PTY6_OUT),
+ PINMUX_DATA(PTY5_DATA, PTY5_IN, PTY5_OUT),
+ PINMUX_DATA(PTY4_DATA, PTY4_IN, PTY4_OUT),
+ PINMUX_DATA(PTY3_DATA, PTY3_IN, PTY3_OUT),
+ PINMUX_DATA(PTY2_DATA, PTY2_IN, PTY2_OUT),
+ PINMUX_DATA(PTY1_DATA, PTY1_IN, PTY1_OUT),
+ PINMUX_DATA(PTY0_DATA, PTY0_IN, PTY0_OUT),
/* PTZ GPIO */
- PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT, PTZ7_IN_PU),
- PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT, PTZ6_IN_PU),
- PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT, PTZ5_IN_PU),
- PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT, PTZ4_IN_PU),
- PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT, PTZ3_IN_PU),
- PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT, PTZ2_IN_PU),
- PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT, PTZ1_IN_PU),
- PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT, PTZ0_IN_PU),
+ PINMUX_DATA(PTZ7_DATA, PTZ7_IN, PTZ7_OUT),
+ PINMUX_DATA(PTZ6_DATA, PTZ6_IN, PTZ6_OUT),
+ PINMUX_DATA(PTZ5_DATA, PTZ5_IN, PTZ5_OUT),
+ PINMUX_DATA(PTZ4_DATA, PTZ4_IN, PTZ4_OUT),
+ PINMUX_DATA(PTZ3_DATA, PTZ3_IN, PTZ3_OUT),
+ PINMUX_DATA(PTZ2_DATA, PTZ2_IN, PTZ2_OUT),
+ PINMUX_DATA(PTZ1_DATA, PTZ1_IN, PTZ1_OUT),
+ PINMUX_DATA(PTZ0_DATA, PTZ0_IN, PTZ0_OUT),
/* PTA FN */
PINMUX_DATA(D23_MARK, PSA15_0, PSA14_0, PTA7_FN),
@@ -1194,230 +1148,230 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
- PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
- PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
- PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
- PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
- PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
- PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
- PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
- PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+ PINMUX_GPIO(PTA7),
+ PINMUX_GPIO(PTA6),
+ PINMUX_GPIO(PTA5),
+ PINMUX_GPIO(PTA4),
+ PINMUX_GPIO(PTA3),
+ PINMUX_GPIO(PTA2),
+ PINMUX_GPIO(PTA1),
+ PINMUX_GPIO(PTA0),
/* PTB */
- PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
- PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
- PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
- PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
- PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
- PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
- PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
- PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+ PINMUX_GPIO(PTB7),
+ PINMUX_GPIO(PTB6),
+ PINMUX_GPIO(PTB5),
+ PINMUX_GPIO(PTB4),
+ PINMUX_GPIO(PTB3),
+ PINMUX_GPIO(PTB2),
+ PINMUX_GPIO(PTB1),
+ PINMUX_GPIO(PTB0),
/* PTC */
- PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
- PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
- PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
- PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
- PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
- PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
- PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
- PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+ PINMUX_GPIO(PTC7),
+ PINMUX_GPIO(PTC6),
+ PINMUX_GPIO(PTC5),
+ PINMUX_GPIO(PTC4),
+ PINMUX_GPIO(PTC3),
+ PINMUX_GPIO(PTC2),
+ PINMUX_GPIO(PTC1),
+ PINMUX_GPIO(PTC0),
/* PTD */
- PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
- PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
- PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
- PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
- PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
- PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
- PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
- PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+ PINMUX_GPIO(PTD7),
+ PINMUX_GPIO(PTD6),
+ PINMUX_GPIO(PTD5),
+ PINMUX_GPIO(PTD4),
+ PINMUX_GPIO(PTD3),
+ PINMUX_GPIO(PTD2),
+ PINMUX_GPIO(PTD1),
+ PINMUX_GPIO(PTD0),
/* PTE */
- PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
- PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
- PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
- PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
- PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
- PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
- PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
- PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+ PINMUX_GPIO(PTE7),
+ PINMUX_GPIO(PTE6),
+ PINMUX_GPIO(PTE5),
+ PINMUX_GPIO(PTE4),
+ PINMUX_GPIO(PTE3),
+ PINMUX_GPIO(PTE2),
+ PINMUX_GPIO(PTE1),
+ PINMUX_GPIO(PTE0),
/* PTF */
- PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
- PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
- PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
- PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
- PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
- PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
- PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
- PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+ PINMUX_GPIO(PTF7),
+ PINMUX_GPIO(PTF6),
+ PINMUX_GPIO(PTF5),
+ PINMUX_GPIO(PTF4),
+ PINMUX_GPIO(PTF3),
+ PINMUX_GPIO(PTF2),
+ PINMUX_GPIO(PTF1),
+ PINMUX_GPIO(PTF0),
/* PTG */
- PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
- PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
- PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
- PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
- PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
- PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+ PINMUX_GPIO(PTG5),
+ PINMUX_GPIO(PTG4),
+ PINMUX_GPIO(PTG3),
+ PINMUX_GPIO(PTG2),
+ PINMUX_GPIO(PTG1),
+ PINMUX_GPIO(PTG0),
/* PTH */
- PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
- PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
- PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
- PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
- PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
- PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
- PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
- PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+ PINMUX_GPIO(PTH7),
+ PINMUX_GPIO(PTH6),
+ PINMUX_GPIO(PTH5),
+ PINMUX_GPIO(PTH4),
+ PINMUX_GPIO(PTH3),
+ PINMUX_GPIO(PTH2),
+ PINMUX_GPIO(PTH1),
+ PINMUX_GPIO(PTH0),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ7, PTJ7_DATA),
- PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
- PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
- PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
- PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
- PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
- PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+ PINMUX_GPIO(PTJ7),
+ PINMUX_GPIO(PTJ6),
+ PINMUX_GPIO(PTJ5),
+ PINMUX_GPIO(PTJ3),
+ PINMUX_GPIO(PTJ2),
+ PINMUX_GPIO(PTJ1),
+ PINMUX_GPIO(PTJ0),
/* PTK */
- PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
- PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
- PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
- PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
- PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
- PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
- PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
- PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+ PINMUX_GPIO(PTK7),
+ PINMUX_GPIO(PTK6),
+ PINMUX_GPIO(PTK5),
+ PINMUX_GPIO(PTK4),
+ PINMUX_GPIO(PTK3),
+ PINMUX_GPIO(PTK2),
+ PINMUX_GPIO(PTK1),
+ PINMUX_GPIO(PTK0),
/* PTL */
- PINMUX_GPIO(GPIO_PTL7, PTL7_DATA),
- PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
- PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
- PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
- PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
- PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
- PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
- PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+ PINMUX_GPIO(PTL7),
+ PINMUX_GPIO(PTL6),
+ PINMUX_GPIO(PTL5),
+ PINMUX_GPIO(PTL4),
+ PINMUX_GPIO(PTL3),
+ PINMUX_GPIO(PTL2),
+ PINMUX_GPIO(PTL1),
+ PINMUX_GPIO(PTL0),
/* PTM */
- PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
- PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
- PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
- PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
- PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
- PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
- PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
- PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+ PINMUX_GPIO(PTM7),
+ PINMUX_GPIO(PTM6),
+ PINMUX_GPIO(PTM5),
+ PINMUX_GPIO(PTM4),
+ PINMUX_GPIO(PTM3),
+ PINMUX_GPIO(PTM2),
+ PINMUX_GPIO(PTM1),
+ PINMUX_GPIO(PTM0),
/* PTN */
- PINMUX_GPIO(GPIO_PTN7, PTN7_DATA),
- PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
- PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
- PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
- PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
- PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
- PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
- PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+ PINMUX_GPIO(PTN7),
+ PINMUX_GPIO(PTN6),
+ PINMUX_GPIO(PTN5),
+ PINMUX_GPIO(PTN4),
+ PINMUX_GPIO(PTN3),
+ PINMUX_GPIO(PTN2),
+ PINMUX_GPIO(PTN1),
+ PINMUX_GPIO(PTN0),
/* PTQ */
- PINMUX_GPIO(GPIO_PTQ7, PTQ7_DATA),
- PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
- PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
- PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
- PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
- PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
- PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
- PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+ PINMUX_GPIO(PTQ7),
+ PINMUX_GPIO(PTQ6),
+ PINMUX_GPIO(PTQ5),
+ PINMUX_GPIO(PTQ4),
+ PINMUX_GPIO(PTQ3),
+ PINMUX_GPIO(PTQ2),
+ PINMUX_GPIO(PTQ1),
+ PINMUX_GPIO(PTQ0),
/* PTR */
- PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
- PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
- PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
- PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
- PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
- PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
- PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
- PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+ PINMUX_GPIO(PTR7),
+ PINMUX_GPIO(PTR6),
+ PINMUX_GPIO(PTR5),
+ PINMUX_GPIO(PTR4),
+ PINMUX_GPIO(PTR3),
+ PINMUX_GPIO(PTR2),
+ PINMUX_GPIO(PTR1),
+ PINMUX_GPIO(PTR0),
/* PTS */
- PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
- PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
- PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
- PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
- PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
- PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
- PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+ PINMUX_GPIO(PTS6),
+ PINMUX_GPIO(PTS5),
+ PINMUX_GPIO(PTS4),
+ PINMUX_GPIO(PTS3),
+ PINMUX_GPIO(PTS2),
+ PINMUX_GPIO(PTS1),
+ PINMUX_GPIO(PTS0),
/* PTT */
- PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
- PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
- PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
- PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
- PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
- PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
- PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
- PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+ PINMUX_GPIO(PTT7),
+ PINMUX_GPIO(PTT6),
+ PINMUX_GPIO(PTT5),
+ PINMUX_GPIO(PTT4),
+ PINMUX_GPIO(PTT3),
+ PINMUX_GPIO(PTT2),
+ PINMUX_GPIO(PTT1),
+ PINMUX_GPIO(PTT0),
/* PTU */
- PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
- PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
- PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
- PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
- PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
- PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
- PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
- PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+ PINMUX_GPIO(PTU7),
+ PINMUX_GPIO(PTU6),
+ PINMUX_GPIO(PTU5),
+ PINMUX_GPIO(PTU4),
+ PINMUX_GPIO(PTU3),
+ PINMUX_GPIO(PTU2),
+ PINMUX_GPIO(PTU1),
+ PINMUX_GPIO(PTU0),
/* PTV */
- PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
- PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
- PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
- PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
- PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
- PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
- PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
- PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+ PINMUX_GPIO(PTV7),
+ PINMUX_GPIO(PTV6),
+ PINMUX_GPIO(PTV5),
+ PINMUX_GPIO(PTV4),
+ PINMUX_GPIO(PTV3),
+ PINMUX_GPIO(PTV2),
+ PINMUX_GPIO(PTV1),
+ PINMUX_GPIO(PTV0),
/* PTW */
- PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
- PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
- PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
- PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
- PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
- PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
- PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
- PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+ PINMUX_GPIO(PTW7),
+ PINMUX_GPIO(PTW6),
+ PINMUX_GPIO(PTW5),
+ PINMUX_GPIO(PTW4),
+ PINMUX_GPIO(PTW3),
+ PINMUX_GPIO(PTW2),
+ PINMUX_GPIO(PTW1),
+ PINMUX_GPIO(PTW0),
/* PTX */
- PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
- PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
- PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
- PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
- PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
- PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
- PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
- PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+ PINMUX_GPIO(PTX7),
+ PINMUX_GPIO(PTX6),
+ PINMUX_GPIO(PTX5),
+ PINMUX_GPIO(PTX4),
+ PINMUX_GPIO(PTX3),
+ PINMUX_GPIO(PTX2),
+ PINMUX_GPIO(PTX1),
+ PINMUX_GPIO(PTX0),
/* PTY */
- PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
- PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
- PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
- PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
- PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
- PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
- PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
- PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+ PINMUX_GPIO(PTY7),
+ PINMUX_GPIO(PTY6),
+ PINMUX_GPIO(PTY5),
+ PINMUX_GPIO(PTY4),
+ PINMUX_GPIO(PTY3),
+ PINMUX_GPIO(PTY2),
+ PINMUX_GPIO(PTY1),
+ PINMUX_GPIO(PTY0),
/* PTZ */
- PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
- PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
- PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
- PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
- PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
- PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
- PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
- PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+ PINMUX_GPIO(PTZ7),
+ PINMUX_GPIO(PTZ6),
+ PINMUX_GPIO(PTZ5),
+ PINMUX_GPIO(PTZ4),
+ PINMUX_GPIO(PTZ3),
+ PINMUX_GPIO(PTZ2),
+ PINMUX_GPIO(PTZ1),
+ PINMUX_GPIO(PTZ0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -1789,64 +1743,64 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xa4050100, 16, 2) {
- PTA7_FN, PTA7_OUT, PTA7_IN_PU, PTA7_IN,
- PTA6_FN, PTA6_OUT, PTA6_IN_PU, PTA6_IN,
- PTA5_FN, PTA5_OUT, PTA5_IN_PU, PTA5_IN,
- PTA4_FN, PTA4_OUT, PTA4_IN_PU, PTA4_IN,
- PTA3_FN, PTA3_OUT, PTA3_IN_PU, PTA3_IN,
- PTA2_FN, PTA2_OUT, PTA2_IN_PU, PTA2_IN,
- PTA1_FN, PTA1_OUT, PTA1_IN_PU, PTA1_IN,
- PTA0_FN, PTA0_OUT, PTA0_IN_PU, PTA0_IN }
+ PTA7_FN, PTA7_OUT, 0, PTA7_IN,
+ PTA6_FN, PTA6_OUT, 0, PTA6_IN,
+ PTA5_FN, PTA5_OUT, 0, PTA5_IN,
+ PTA4_FN, PTA4_OUT, 0, PTA4_IN,
+ PTA3_FN, PTA3_OUT, 0, PTA3_IN,
+ PTA2_FN, PTA2_OUT, 0, PTA2_IN,
+ PTA1_FN, PTA1_OUT, 0, PTA1_IN,
+ PTA0_FN, PTA0_OUT, 0, PTA0_IN }
},
{ PINMUX_CFG_REG("PBCR", 0xa4050102, 16, 2) {
- PTB7_FN, PTB7_OUT, PTB7_IN_PU, PTB7_IN,
- PTB6_FN, PTB6_OUT, PTB6_IN_PU, PTB6_IN,
- PTB5_FN, PTB5_OUT, PTB5_IN_PU, PTB5_IN,
- PTB4_FN, PTB4_OUT, PTB4_IN_PU, PTB4_IN,
- PTB3_FN, PTB3_OUT, PTB3_IN_PU, PTB3_IN,
- PTB2_FN, PTB2_OUT, PTB2_IN_PU, PTB2_IN,
- PTB1_FN, PTB1_OUT, PTB1_IN_PU, PTB1_IN,
- PTB0_FN, PTB0_OUT, PTB0_IN_PU, PTB0_IN }
+ PTB7_FN, PTB7_OUT, 0, PTB7_IN,
+ PTB6_FN, PTB6_OUT, 0, PTB6_IN,
+ PTB5_FN, PTB5_OUT, 0, PTB5_IN,
+ PTB4_FN, PTB4_OUT, 0, PTB4_IN,
+ PTB3_FN, PTB3_OUT, 0, PTB3_IN,
+ PTB2_FN, PTB2_OUT, 0, PTB2_IN,
+ PTB1_FN, PTB1_OUT, 0, PTB1_IN,
+ PTB0_FN, PTB0_OUT, 0, PTB0_IN }
},
{ PINMUX_CFG_REG("PCCR", 0xa4050104, 16, 2) {
- PTC7_FN, PTC7_OUT, PTC7_IN_PU, PTC7_IN,
- PTC6_FN, PTC6_OUT, PTC6_IN_PU, PTC6_IN,
- PTC5_FN, PTC5_OUT, PTC5_IN_PU, PTC5_IN,
- PTC4_FN, PTC4_OUT, PTC4_IN_PU, PTC4_IN,
- PTC3_FN, PTC3_OUT, PTC3_IN_PU, PTC3_IN,
- PTC2_FN, PTC2_OUT, PTC2_IN_PU, PTC2_IN,
- PTC1_FN, PTC1_OUT, PTC1_IN_PU, PTC1_IN,
- PTC0_FN, PTC0_OUT, PTC0_IN_PU, PTC0_IN }
+ PTC7_FN, PTC7_OUT, 0, PTC7_IN,
+ PTC6_FN, PTC6_OUT, 0, PTC6_IN,
+ PTC5_FN, PTC5_OUT, 0, PTC5_IN,
+ PTC4_FN, PTC4_OUT, 0, PTC4_IN,
+ PTC3_FN, PTC3_OUT, 0, PTC3_IN,
+ PTC2_FN, PTC2_OUT, 0, PTC2_IN,
+ PTC1_FN, PTC1_OUT, 0, PTC1_IN,
+ PTC0_FN, PTC0_OUT, 0, PTC0_IN }
},
{ PINMUX_CFG_REG("PDCR", 0xa4050106, 16, 2) {
- PTD7_FN, PTD7_OUT, PTD7_IN_PU, PTD7_IN,
- PTD6_FN, PTD6_OUT, PTD6_IN_PU, PTD6_IN,
- PTD5_FN, PTD5_OUT, PTD5_IN_PU, PTD5_IN,
- PTD4_FN, PTD4_OUT, PTD4_IN_PU, PTD4_IN,
- PTD3_FN, PTD3_OUT, PTD3_IN_PU, PTD3_IN,
- PTD2_FN, PTD2_OUT, PTD2_IN_PU, PTD2_IN,
- PTD1_FN, PTD1_OUT, PTD1_IN_PU, PTD1_IN,
- PTD0_FN, PTD0_OUT, PTD0_IN_PU, PTD0_IN }
+ PTD7_FN, PTD7_OUT, 0, PTD7_IN,
+ PTD6_FN, PTD6_OUT, 0, PTD6_IN,
+ PTD5_FN, PTD5_OUT, 0, PTD5_IN,
+ PTD4_FN, PTD4_OUT, 0, PTD4_IN,
+ PTD3_FN, PTD3_OUT, 0, PTD3_IN,
+ PTD2_FN, PTD2_OUT, 0, PTD2_IN,
+ PTD1_FN, PTD1_OUT, 0, PTD1_IN,
+ PTD0_FN, PTD0_OUT, 0, PTD0_IN }
},
{ PINMUX_CFG_REG("PECR", 0xa4050108, 16, 2) {
- PTE7_FN, PTE7_OUT, PTE7_IN_PU, PTE7_IN,
- PTE6_FN, PTE6_OUT, PTE6_IN_PU, PTE6_IN,
- PTE5_FN, PTE5_OUT, PTE5_IN_PU, PTE5_IN,
- PTE4_FN, PTE4_OUT, PTE4_IN_PU, PTE4_IN,
- PTE3_FN, PTE3_OUT, PTE3_IN_PU, PTE3_IN,
- PTE2_FN, PTE2_OUT, PTE2_IN_PU, PTE2_IN,
- PTE1_FN, PTE1_OUT, PTE1_IN_PU, PTE1_IN,
- PTE0_FN, PTE0_OUT, PTE0_IN_PU, PTE0_IN }
+ PTE7_FN, PTE7_OUT, 0, PTE7_IN,
+ PTE6_FN, PTE6_OUT, 0, PTE6_IN,
+ PTE5_FN, PTE5_OUT, 0, PTE5_IN,
+ PTE4_FN, PTE4_OUT, 0, PTE4_IN,
+ PTE3_FN, PTE3_OUT, 0, PTE3_IN,
+ PTE2_FN, PTE2_OUT, 0, PTE2_IN,
+ PTE1_FN, PTE1_OUT, 0, PTE1_IN,
+ PTE0_FN, PTE0_OUT, 0, PTE0_IN }
},
{ PINMUX_CFG_REG("PFCR", 0xa405010a, 16, 2) {
- PTF7_FN, PTF7_OUT, PTF7_IN_PU, PTF7_IN,
- PTF6_FN, PTF6_OUT, PTF6_IN_PU, PTF6_IN,
- PTF5_FN, PTF5_OUT, PTF5_IN_PU, PTF5_IN,
- PTF4_FN, PTF4_OUT, PTF4_IN_PU, PTF4_IN,
- PTF3_FN, PTF3_OUT, PTF3_IN_PU, PTF3_IN,
- PTF2_FN, PTF2_OUT, PTF2_IN_PU, PTF2_IN,
- PTF1_FN, PTF1_OUT, PTF1_IN_PU, PTF1_IN,
- PTF0_FN, PTF0_OUT, PTF0_IN_PU, PTF0_IN }
+ PTF7_FN, PTF7_OUT, 0, PTF7_IN,
+ PTF6_FN, PTF6_OUT, 0, PTF6_IN,
+ PTF5_FN, PTF5_OUT, 0, PTF5_IN,
+ PTF4_FN, PTF4_OUT, 0, PTF4_IN,
+ PTF3_FN, PTF3_OUT, 0, PTF3_IN,
+ PTF2_FN, PTF2_OUT, 0, PTF2_IN,
+ PTF1_FN, PTF1_OUT, 0, PTF1_IN,
+ PTF0_FN, PTF0_OUT, 0, PTF0_IN }
},
{ PINMUX_CFG_REG("PGCR", 0xa405010c, 16, 2) {
0, 0, 0, 0,
@@ -1859,164 +1813,164 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTG0_FN, PTG0_OUT, 0, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xa405010e, 16, 2) {
- PTH7_FN, PTH7_OUT, PTH7_IN_PU, PTH7_IN,
- PTH6_FN, PTH6_OUT, PTH6_IN_PU, PTH6_IN,
- PTH5_FN, PTH5_OUT, PTH5_IN_PU, PTH5_IN,
- PTH4_FN, PTH4_OUT, PTH4_IN_PU, PTH4_IN,
- PTH3_FN, PTH3_OUT, PTH3_IN_PU, PTH3_IN,
- PTH2_FN, PTH2_OUT, PTH2_IN_PU, PTH2_IN,
- PTH1_FN, PTH1_OUT, PTH1_IN_PU, PTH1_IN,
- PTH0_FN, PTH0_OUT, PTH0_IN_PU, PTH0_IN }
+ PTH7_FN, PTH7_OUT, 0, PTH7_IN,
+ PTH6_FN, PTH6_OUT, 0, PTH6_IN,
+ PTH5_FN, PTH5_OUT, 0, PTH5_IN,
+ PTH4_FN, PTH4_OUT, 0, PTH4_IN,
+ PTH3_FN, PTH3_OUT, 0, PTH3_IN,
+ PTH2_FN, PTH2_OUT, 0, PTH2_IN,
+ PTH1_FN, PTH1_OUT, 0, PTH1_IN,
+ PTH0_FN, PTH0_OUT, 0, PTH0_IN }
},
{ PINMUX_CFG_REG("PJCR", 0xa4050110, 16, 2) {
PTJ7_FN, PTJ7_OUT, 0, 0,
PTJ6_FN, PTJ6_OUT, 0, 0,
PTJ5_FN, PTJ5_OUT, 0, 0,
0, 0, 0, 0,
- PTJ3_FN, PTJ3_OUT, PTJ3_IN_PU, PTJ3_IN,
- PTJ2_FN, PTJ2_OUT, PTJ2_IN_PU, PTJ2_IN,
- PTJ1_FN, PTJ1_OUT, PTJ1_IN_PU, PTJ1_IN,
- PTJ0_FN, PTJ0_OUT, PTJ0_IN_PU, PTJ0_IN }
+ PTJ3_FN, PTJ3_OUT, 0, PTJ3_IN,
+ PTJ2_FN, PTJ2_OUT, 0, PTJ2_IN,
+ PTJ1_FN, PTJ1_OUT, 0, PTJ1_IN,
+ PTJ0_FN, PTJ0_OUT, 0, PTJ0_IN }
},
{ PINMUX_CFG_REG("PKCR", 0xa4050112, 16, 2) {
- PTK7_FN, PTK7_OUT, PTK7_IN_PU, PTK7_IN,
- PTK6_FN, PTK6_OUT, PTK6_IN_PU, PTK6_IN,
- PTK5_FN, PTK5_OUT, PTK5_IN_PU, PTK5_IN,
- PTK4_FN, PTK4_OUT, PTK4_IN_PU, PTK4_IN,
- PTK3_FN, PTK3_OUT, PTK3_IN_PU, PTK3_IN,
- PTK2_FN, PTK2_OUT, PTK2_IN_PU, PTK2_IN,
- PTK1_FN, PTK1_OUT, PTK1_IN_PU, PTK1_IN,
- PTK0_FN, PTK0_OUT, PTK0_IN_PU, PTK0_IN }
+ PTK7_FN, PTK7_OUT, 0, PTK7_IN,
+ PTK6_FN, PTK6_OUT, 0, PTK6_IN,
+ PTK5_FN, PTK5_OUT, 0, PTK5_IN,
+ PTK4_FN, PTK4_OUT, 0, PTK4_IN,
+ PTK3_FN, PTK3_OUT, 0, PTK3_IN,
+ PTK2_FN, PTK2_OUT, 0, PTK2_IN,
+ PTK1_FN, PTK1_OUT, 0, PTK1_IN,
+ PTK0_FN, PTK0_OUT, 0, PTK0_IN }
},
{ PINMUX_CFG_REG("PLCR", 0xa4050114, 16, 2) {
- PTL7_FN, PTL7_OUT, PTL7_IN_PU, PTL7_IN,
- PTL6_FN, PTL6_OUT, PTL6_IN_PU, PTL6_IN,
- PTL5_FN, PTL5_OUT, PTL5_IN_PU, PTL5_IN,
- PTL4_FN, PTL4_OUT, PTL4_IN_PU, PTL4_IN,
- PTL3_FN, PTL3_OUT, PTL3_IN_PU, PTL3_IN,
- PTL2_FN, PTL2_OUT, PTL2_IN_PU, PTL2_IN,
- PTL1_FN, PTL1_OUT, PTL1_IN_PU, PTL1_IN,
- PTL0_FN, PTL0_OUT, PTL0_IN_PU, PTL0_IN }
+ PTL7_FN, PTL7_OUT, 0, PTL7_IN,
+ PTL6_FN, PTL6_OUT, 0, PTL6_IN,
+ PTL5_FN, PTL5_OUT, 0, PTL5_IN,
+ PTL4_FN, PTL4_OUT, 0, PTL4_IN,
+ PTL3_FN, PTL3_OUT, 0, PTL3_IN,
+ PTL2_FN, PTL2_OUT, 0, PTL2_IN,
+ PTL1_FN, PTL1_OUT, 0, PTL1_IN,
+ PTL0_FN, PTL0_OUT, 0, PTL0_IN }
},
{ PINMUX_CFG_REG("PMCR", 0xa4050116, 16, 2) {
- PTM7_FN, PTM7_OUT, PTM7_IN_PU, PTM7_IN,
- PTM6_FN, PTM6_OUT, PTM6_IN_PU, PTM6_IN,
- PTM5_FN, PTM5_OUT, PTM5_IN_PU, PTM5_IN,
- PTM4_FN, PTM4_OUT, PTM4_IN_PU, PTM4_IN,
- PTM3_FN, PTM3_OUT, PTM3_IN_PU, PTM3_IN,
- PTM2_FN, PTM2_OUT, PTM2_IN_PU, PTM2_IN,
- PTM1_FN, PTM1_OUT, PTM1_IN_PU, PTM1_IN,
- PTM0_FN, PTM0_OUT, PTM0_IN_PU, PTM0_IN }
+ PTM7_FN, PTM7_OUT, 0, PTM7_IN,
+ PTM6_FN, PTM6_OUT, 0, PTM6_IN,
+ PTM5_FN, PTM5_OUT, 0, PTM5_IN,
+ PTM4_FN, PTM4_OUT, 0, PTM4_IN,
+ PTM3_FN, PTM3_OUT, 0, PTM3_IN,
+ PTM2_FN, PTM2_OUT, 0, PTM2_IN,
+ PTM1_FN, PTM1_OUT, 0, PTM1_IN,
+ PTM0_FN, PTM0_OUT, 0, PTM0_IN }
},
{ PINMUX_CFG_REG("PNCR", 0xa4050118, 16, 2) {
- PTN7_FN, PTN7_OUT, PTN7_IN_PU, PTN7_IN,
- PTN6_FN, PTN6_OUT, PTN6_IN_PU, PTN6_IN,
- PTN5_FN, PTN5_OUT, PTN5_IN_PU, PTN5_IN,
- PTN4_FN, PTN4_OUT, PTN4_IN_PU, PTN4_IN,
- PTN3_FN, PTN3_OUT, PTN3_IN_PU, PTN3_IN,
- PTN2_FN, PTN2_OUT, PTN2_IN_PU, PTN2_IN,
- PTN1_FN, PTN1_OUT, PTN1_IN_PU, PTN1_IN,
- PTN0_FN, PTN0_OUT, PTN0_IN_PU, PTN0_IN }
+ PTN7_FN, PTN7_OUT, 0, PTN7_IN,
+ PTN6_FN, PTN6_OUT, 0, PTN6_IN,
+ PTN5_FN, PTN5_OUT, 0, PTN5_IN,
+ PTN4_FN, PTN4_OUT, 0, PTN4_IN,
+ PTN3_FN, PTN3_OUT, 0, PTN3_IN,
+ PTN2_FN, PTN2_OUT, 0, PTN2_IN,
+ PTN1_FN, PTN1_OUT, 0, PTN1_IN,
+ PTN0_FN, PTN0_OUT, 0, PTN0_IN }
},
{ PINMUX_CFG_REG("PQCR", 0xa405011a, 16, 2) {
- PTQ7_FN, PTQ7_OUT, PTQ7_IN_PU, PTQ7_IN,
- PTQ6_FN, PTQ6_OUT, PTQ6_IN_PU, PTQ6_IN,
- PTQ5_FN, PTQ5_OUT, PTQ5_IN_PU, PTQ5_IN,
- PTQ4_FN, PTQ4_OUT, PTQ4_IN_PU, PTQ4_IN,
- PTQ3_FN, PTQ3_OUT, PTQ3_IN_PU, PTQ3_IN,
- PTQ2_FN, PTQ2_OUT, PTQ2_IN_PU, PTQ2_IN,
- PTQ1_FN, PTQ1_OUT, PTQ1_IN_PU, PTQ1_IN,
- PTQ0_FN, PTQ0_OUT, PTQ0_IN_PU, PTQ0_IN }
+ PTQ7_FN, PTQ7_OUT, 0, PTQ7_IN,
+ PTQ6_FN, PTQ6_OUT, 0, PTQ6_IN,
+ PTQ5_FN, PTQ5_OUT, 0, PTQ5_IN,
+ PTQ4_FN, PTQ4_OUT, 0, PTQ4_IN,
+ PTQ3_FN, PTQ3_OUT, 0, PTQ3_IN,
+ PTQ2_FN, PTQ2_OUT, 0, PTQ2_IN,
+ PTQ1_FN, PTQ1_OUT, 0, PTQ1_IN,
+ PTQ0_FN, PTQ0_OUT, 0, PTQ0_IN }
},
{ PINMUX_CFG_REG("PRCR", 0xa405011c, 16, 2) {
- PTR7_FN, PTR7_OUT, PTR7_IN_PU, PTR7_IN,
- PTR6_FN, PTR6_OUT, PTR6_IN_PU, PTR6_IN,
- PTR5_FN, PTR5_OUT, PTR5_IN_PU, PTR5_IN,
- PTR4_FN, PTR4_OUT, PTR4_IN_PU, PTR4_IN,
- PTR3_FN, 0, PTR3_IN_PU, PTR3_IN,
- PTR2_FN, 0, PTR2_IN_PU, PTR2_IN,
- PTR1_FN, PTR1_OUT, PTR1_IN_PU, PTR1_IN,
- PTR0_FN, PTR0_OUT, PTR0_IN_PU, PTR0_IN }
+ PTR7_FN, PTR7_OUT, 0, PTR7_IN,
+ PTR6_FN, PTR6_OUT, 0, PTR6_IN,
+ PTR5_FN, PTR5_OUT, 0, PTR5_IN,
+ PTR4_FN, PTR4_OUT, 0, PTR4_IN,
+ PTR3_FN, 0, 0, PTR3_IN,
+ PTR2_FN, 0, 0, PTR2_IN,
+ PTR1_FN, PTR1_OUT, 0, PTR1_IN,
+ PTR0_FN, PTR0_OUT, 0, PTR0_IN }
},
{ PINMUX_CFG_REG("PSCR", 0xa405011e, 16, 2) {
0, 0, 0, 0,
- PTS6_FN, PTS6_OUT, PTS6_IN_PU, PTS6_IN,
- PTS5_FN, PTS5_OUT, PTS5_IN_PU, PTS5_IN,
- PTS4_FN, PTS4_OUT, PTS4_IN_PU, PTS4_IN,
- PTS3_FN, PTS3_OUT, PTS3_IN_PU, PTS3_IN,
- PTS2_FN, PTS2_OUT, PTS2_IN_PU, PTS2_IN,
- PTS1_FN, PTS1_OUT, PTS1_IN_PU, PTS1_IN,
- PTS0_FN, PTS0_OUT, PTS0_IN_PU, PTS0_IN }
+ PTS6_FN, PTS6_OUT, 0, PTS6_IN,
+ PTS5_FN, PTS5_OUT, 0, PTS5_IN,
+ PTS4_FN, PTS4_OUT, 0, PTS4_IN,
+ PTS3_FN, PTS3_OUT, 0, PTS3_IN,
+ PTS2_FN, PTS2_OUT, 0, PTS2_IN,
+ PTS1_FN, PTS1_OUT, 0, PTS1_IN,
+ PTS0_FN, PTS0_OUT, 0, PTS0_IN }
},
{ PINMUX_CFG_REG("PTCR", 0xa4050140, 16, 2) {
- PTT7_FN, PTT7_OUT, PTT7_IN_PU, PTT7_IN,
- PTT6_FN, PTT6_OUT, PTT6_IN_PU, PTT6_IN,
- PTT5_FN, PTT5_OUT, PTT5_IN_PU, PTT5_IN,
- PTT4_FN, PTT4_OUT, PTT4_IN_PU, PTT4_IN,
- PTT3_FN, PTT3_OUT, PTT3_IN_PU, PTT3_IN,
- PTT2_FN, PTT2_OUT, PTT2_IN_PU, PTT2_IN,
- PTT1_FN, PTT1_OUT, PTT1_IN_PU, PTT1_IN,
- PTT0_FN, PTT0_OUT, PTT0_IN_PU, PTT0_IN }
+ PTT7_FN, PTT7_OUT, 0, PTT7_IN,
+ PTT6_FN, PTT6_OUT, 0, PTT6_IN,
+ PTT5_FN, PTT5_OUT, 0, PTT5_IN,
+ PTT4_FN, PTT4_OUT, 0, PTT4_IN,
+ PTT3_FN, PTT3_OUT, 0, PTT3_IN,
+ PTT2_FN, PTT2_OUT, 0, PTT2_IN,
+ PTT1_FN, PTT1_OUT, 0, PTT1_IN,
+ PTT0_FN, PTT0_OUT, 0, PTT0_IN }
},
{ PINMUX_CFG_REG("PUCR", 0xa4050142, 16, 2) {
- PTU7_FN, PTU7_OUT, PTU7_IN_PU, PTU7_IN,
- PTU6_FN, PTU6_OUT, PTU6_IN_PU, PTU6_IN,
- PTU5_FN, PTU5_OUT, PTU5_IN_PU, PTU5_IN,
- PTU4_FN, PTU4_OUT, PTU4_IN_PU, PTU4_IN,
- PTU3_FN, PTU3_OUT, PTU3_IN_PU, PTU3_IN,
- PTU2_FN, PTU2_OUT, PTU2_IN_PU, PTU2_IN,
- PTU1_FN, PTU1_OUT, PTU1_IN_PU, PTU1_IN,
- PTU0_FN, PTU0_OUT, PTU0_IN_PU, PTU0_IN }
+ PTU7_FN, PTU7_OUT, 0, PTU7_IN,
+ PTU6_FN, PTU6_OUT, 0, PTU6_IN,
+ PTU5_FN, PTU5_OUT, 0, PTU5_IN,
+ PTU4_FN, PTU4_OUT, 0, PTU4_IN,
+ PTU3_FN, PTU3_OUT, 0, PTU3_IN,
+ PTU2_FN, PTU2_OUT, 0, PTU2_IN,
+ PTU1_FN, PTU1_OUT, 0, PTU1_IN,
+ PTU0_FN, PTU0_OUT, 0, PTU0_IN }
},
{ PINMUX_CFG_REG("PVCR", 0xa4050144, 16, 2) {
- PTV7_FN, PTV7_OUT, PTV7_IN_PU, PTV7_IN,
- PTV6_FN, PTV6_OUT, PTV6_IN_PU, PTV6_IN,
- PTV5_FN, PTV5_OUT, PTV5_IN_PU, PTV5_IN,
- PTV4_FN, PTV4_OUT, PTV4_IN_PU, PTV4_IN,
- PTV3_FN, PTV3_OUT, PTV3_IN_PU, PTV3_IN,
- PTV2_FN, PTV2_OUT, PTV2_IN_PU, PTV2_IN,
- PTV1_FN, PTV1_OUT, PTV1_IN_PU, PTV1_IN,
- PTV0_FN, PTV0_OUT, PTV0_IN_PU, PTV0_IN }
+ PTV7_FN, PTV7_OUT, 0, PTV7_IN,
+ PTV6_FN, PTV6_OUT, 0, PTV6_IN,
+ PTV5_FN, PTV5_OUT, 0, PTV5_IN,
+ PTV4_FN, PTV4_OUT, 0, PTV4_IN,
+ PTV3_FN, PTV3_OUT, 0, PTV3_IN,
+ PTV2_FN, PTV2_OUT, 0, PTV2_IN,
+ PTV1_FN, PTV1_OUT, 0, PTV1_IN,
+ PTV0_FN, PTV0_OUT, 0, PTV0_IN }
},
{ PINMUX_CFG_REG("PWCR", 0xa4050146, 16, 2) {
- PTW7_FN, PTW7_OUT, PTW7_IN_PU, PTW7_IN,
- PTW6_FN, PTW6_OUT, PTW6_IN_PU, PTW6_IN,
- PTW5_FN, PTW5_OUT, PTW5_IN_PU, PTW5_IN,
- PTW4_FN, PTW4_OUT, PTW4_IN_PU, PTW4_IN,
- PTW3_FN, PTW3_OUT, PTW3_IN_PU, PTW3_IN,
- PTW2_FN, PTW2_OUT, PTW2_IN_PU, PTW2_IN,
- PTW1_FN, PTW1_OUT, PTW1_IN_PU, PTW1_IN,
- PTW0_FN, PTW0_OUT, PTW0_IN_PU, PTW0_IN }
+ PTW7_FN, PTW7_OUT, 0, PTW7_IN,
+ PTW6_FN, PTW6_OUT, 0, PTW6_IN,
+ PTW5_FN, PTW5_OUT, 0, PTW5_IN,
+ PTW4_FN, PTW4_OUT, 0, PTW4_IN,
+ PTW3_FN, PTW3_OUT, 0, PTW3_IN,
+ PTW2_FN, PTW2_OUT, 0, PTW2_IN,
+ PTW1_FN, PTW1_OUT, 0, PTW1_IN,
+ PTW0_FN, PTW0_OUT, 0, PTW0_IN }
},
{ PINMUX_CFG_REG("PXCR", 0xa4050148, 16, 2) {
- PTX7_FN, PTX7_OUT, PTX7_IN_PU, PTX7_IN,
- PTX6_FN, PTX6_OUT, PTX6_IN_PU, PTX6_IN,
- PTX5_FN, PTX5_OUT, PTX5_IN_PU, PTX5_IN,
- PTX4_FN, PTX4_OUT, PTX4_IN_PU, PTX4_IN,
- PTX3_FN, PTX3_OUT, PTX3_IN_PU, PTX3_IN,
- PTX2_FN, PTX2_OUT, PTX2_IN_PU, PTX2_IN,
- PTX1_FN, PTX1_OUT, PTX1_IN_PU, PTX1_IN,
- PTX0_FN, PTX0_OUT, PTX0_IN_PU, PTX0_IN }
+ PTX7_FN, PTX7_OUT, 0, PTX7_IN,
+ PTX6_FN, PTX6_OUT, 0, PTX6_IN,
+ PTX5_FN, PTX5_OUT, 0, PTX5_IN,
+ PTX4_FN, PTX4_OUT, 0, PTX4_IN,
+ PTX3_FN, PTX3_OUT, 0, PTX3_IN,
+ PTX2_FN, PTX2_OUT, 0, PTX2_IN,
+ PTX1_FN, PTX1_OUT, 0, PTX1_IN,
+ PTX0_FN, PTX0_OUT, 0, PTX0_IN }
},
{ PINMUX_CFG_REG("PYCR", 0xa405014a, 16, 2) {
- PTY7_FN, PTY7_OUT, PTY7_IN_PU, PTY7_IN,
- PTY6_FN, PTY6_OUT, PTY6_IN_PU, PTY6_IN,
- PTY5_FN, PTY5_OUT, PTY5_IN_PU, PTY5_IN,
- PTY4_FN, PTY4_OUT, PTY4_IN_PU, PTY4_IN,
- PTY3_FN, PTY3_OUT, PTY3_IN_PU, PTY3_IN,
- PTY2_FN, PTY2_OUT, PTY2_IN_PU, PTY2_IN,
- PTY1_FN, PTY1_OUT, PTY1_IN_PU, PTY1_IN,
- PTY0_FN, PTY0_OUT, PTY0_IN_PU, PTY0_IN }
+ PTY7_FN, PTY7_OUT, 0, PTY7_IN,
+ PTY6_FN, PTY6_OUT, 0, PTY6_IN,
+ PTY5_FN, PTY5_OUT, 0, PTY5_IN,
+ PTY4_FN, PTY4_OUT, 0, PTY4_IN,
+ PTY3_FN, PTY3_OUT, 0, PTY3_IN,
+ PTY2_FN, PTY2_OUT, 0, PTY2_IN,
+ PTY1_FN, PTY1_OUT, 0, PTY1_IN,
+ PTY0_FN, PTY0_OUT, 0, PTY0_IN }
},
{ PINMUX_CFG_REG("PZCR", 0xa405014c, 16, 2) {
- PTZ7_FN, PTZ7_OUT, PTZ7_IN_PU, PTZ7_IN,
- PTZ6_FN, PTZ6_OUT, PTZ6_IN_PU, PTZ6_IN,
- PTZ5_FN, PTZ5_OUT, PTZ5_IN_PU, PTZ5_IN,
- PTZ4_FN, PTZ4_OUT, PTZ4_IN_PU, PTZ4_IN,
- PTZ3_FN, PTZ3_OUT, PTZ3_IN_PU, PTZ3_IN,
- PTZ2_FN, PTZ2_OUT, PTZ2_IN_PU, PTZ2_IN,
- PTZ1_FN, PTZ1_OUT, PTZ1_IN_PU, PTZ1_IN,
- PTZ0_FN, PTZ0_OUT, PTZ0_IN_PU, PTZ0_IN }
+ PTZ7_FN, PTZ7_OUT, 0, PTZ7_IN,
+ PTZ6_FN, PTZ6_OUT, 0, PTZ6_IN,
+ PTZ5_FN, PTZ5_OUT, 0, PTZ5_IN,
+ PTZ4_FN, PTZ4_OUT, 0, PTZ4_IN,
+ PTZ3_FN, PTZ3_OUT, 0, PTZ3_IN,
+ PTZ2_FN, PTZ2_OUT, 0, PTZ2_IN,
+ PTZ1_FN, PTZ1_OUT, 0, PTZ1_IN,
+ PTZ0_FN, PTZ0_OUT, 0, PTZ0_IN }
},
{ PINMUX_CFG_REG("PSELA", 0xa405014e, 16, 1) {
PSA15_0, PSA15_1,
@@ -2210,7 +2164,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7724_pinmux_info = {
.name = "sh7724_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7734.c b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
index 2fd5b7d4cb9..ec0c47c4f10 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7734.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7734.c
@@ -14,40 +14,30 @@
#include "sh_pfc.h"
-#define CPU_32_PORT5(fn, pfx, sfx) \
- PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
- PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
- PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
- PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
- PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx), \
- PORT_1(fn, pfx##10, sfx), PORT_1(fn, pfx##11, sfx)
-
-/* GPSR0 - GPSR5 */
-#define CPU_ALL_PORT(fn, pfx, sfx) \
- PORT_32(fn, pfx##_0_, sfx), \
- PORT_32(fn, pfx##_1_, sfx), \
- PORT_32(fn, pfx##_2_, sfx), \
- PORT_32(fn, pfx##_3_, sfx), \
- PORT_32(fn, pfx##_4_, sfx), \
- CPU_32_PORT5(fn, pfx##_5_, sfx)
-
-#define _GP_GPIO(pfx, sfx) PINMUX_GPIO(GPIO_GP##pfx, GP##pfx##_DATA)
-#define _GP_DATA(pfx, sfx) PINMUX_DATA(GP##pfx##_DATA, GP##pfx##_FN, \
- GP##pfx##_IN, GP##pfx##_OUT)
-
-#define _GP_INOUTSEL(pfx, sfx) GP##pfx##_IN, GP##pfx##_OUT
-#define _GP_INDT(pfx, sfx) GP##pfx##_DATA
-
-#define GP_ALL(str) CPU_ALL_PORT(_PORT_ALL, GP, str)
-#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, , unused)
-#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, , unused)
-
-#define GP_INOUTSEL(bank) PORT_32_REV(_GP_INOUTSEL, _##bank##_, unused)
-#define GP_INDT(bank) PORT_32_REV(_GP_INDT, _##bank##_, unused)
-
-#define PINMUX_IPSR_DATA(ipsr, fn) PINMUX_DATA(fn##_MARK, FN_##ipsr, FN_##fn)
-#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) PINMUX_DATA(fn##_MARK, FN_##ms, \
- FN_##ipsr, FN_##fn)
+#define PORT_GP_12(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
+ PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx)
+
+#define CPU_ALL_PORT(fn, sfx) \
+ PORT_GP_32(0, fn, sfx), \
+ PORT_GP_32(1, fn, sfx), \
+ PORT_GP_32(2, fn, sfx), \
+ PORT_GP_32(3, fn, sfx), \
+ PORT_GP_32(4, fn, sfx), \
+ PORT_GP_12(5, fn, sfx)
+
+#undef _GP_DATA
+#define _GP_DATA(bank, pin, name, sfx) \
+ PINMUX_DATA(name##_DATA, name##_FN, name##_IN, name##_OUT)
+
+#define _GP_INOUTSEL(bank, pin, name, sfx) name##_IN, name##_OUT
+#define _GP_INDT(bank, pin, name, sfx) name##_DATA
+#define GP_INOUTSEL(bank) PORT_GP_32_REV(bank, _GP_INOUTSEL, unused)
+#define GP_INDT(bank) PORT_GP_32_REV(bank, _GP_INDT, unused)
enum {
PINMUX_RESERVED = 0,
@@ -592,7 +582,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
PINMUX_DATA_GP_ALL(), /* PINMUX_DATA(GP_M_N_DATA, GP_M_N_FN...), */
PINMUX_DATA(CLKOUT_MARK, FN_CLKOUT),
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7757.c b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
index e074230e624..33d75e51091 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7757.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7757.c
@@ -132,46 +132,6 @@ enum {
PTZ3_IN, PTZ2_IN, PTZ1_IN, PTZ0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PTA7_IN_PU, PTA6_IN_PU, PTA5_IN_PU, PTA4_IN_PU,
- PTA3_IN_PU, PTA2_IN_PU, PTA1_IN_PU, PTA0_IN_PU,
- PTD7_IN_PU, PTD6_IN_PU, PTD5_IN_PU, PTD4_IN_PU,
- PTD3_IN_PU, PTD2_IN_PU, PTD1_IN_PU, PTD0_IN_PU,
- PTE7_IN_PU, PTE6_IN_PU, PTE5_IN_PU, PTE4_IN_PU,
- PTE3_IN_PU, PTE2_IN_PU, PTE1_IN_PU, PTE0_IN_PU,
- PTF7_IN_PU, PTF6_IN_PU, PTF5_IN_PU, PTF4_IN_PU,
- PTF3_IN_PU, PTF2_IN_PU, PTF1_IN_PU, PTF0_IN_PU,
- PTG7_IN_PU, PTG6_IN_PU, PTG4_IN_PU,
- PTH7_IN_PU, PTH6_IN_PU, PTH5_IN_PU, PTH4_IN_PU,
- PTH3_IN_PU, PTH2_IN_PU, PTH1_IN_PU, PTH0_IN_PU,
- PTI7_IN_PU, PTI6_IN_PU, PTI4_IN_PU,
- PTI3_IN_PU, PTI2_IN_PU, PTI1_IN_PU, PTI0_IN_PU,
- PTJ6_IN_PU, PTJ5_IN_PU, PTJ4_IN_PU,
- PTJ3_IN_PU, PTJ2_IN_PU, PTJ1_IN_PU, PTJ0_IN_PU,
- PTK7_IN_PU, PTK6_IN_PU, PTK5_IN_PU, PTK4_IN_PU,
- PTK3_IN_PU, PTK2_IN_PU, PTK1_IN_PU, PTK0_IN_PU,
- PTL6_IN_PU, PTL5_IN_PU, PTL4_IN_PU,
- PTL3_IN_PU, PTL2_IN_PU, PTL1_IN_PU, PTL0_IN_PU,
- PTM7_IN_PU, PTM6_IN_PU, PTM5_IN_PU, PTM4_IN_PU,
- PTN4_IN_PU,
- PTN3_IN_PU, PTN2_IN_PU, PTN1_IN_PU, PTN0_IN_PU,
- PTO7_IN_PU, PTO6_IN_PU, PTO5_IN_PU, PTO4_IN_PU,
- PTO3_IN_PU, PTO2_IN_PU, PTO1_IN_PU, PTO0_IN_PU,
- PTT7_IN_PU, PTT6_IN_PU, PTT5_IN_PU, PTT4_IN_PU,
- PTT3_IN_PU, PTT2_IN_PU, PTT1_IN_PU, PTT0_IN_PU,
- PTU7_IN_PU, PTU6_IN_PU, PTU5_IN_PU, PTU4_IN_PU,
- PTU3_IN_PU, PTU2_IN_PU, PTU1_IN_PU, PTU0_IN_PU,
- PTV7_IN_PU, PTV6_IN_PU, PTV5_IN_PU, PTV4_IN_PU,
- PTV3_IN_PU, PTV2_IN_PU,
- PTW1_IN_PU, PTW0_IN_PU,
- PTX7_IN_PU, PTX6_IN_PU, PTX5_IN_PU, PTX4_IN_PU,
- PTX3_IN_PU, PTX2_IN_PU, PTX1_IN_PU, PTX0_IN_PU,
- PTY7_IN_PU, PTY6_IN_PU, PTY5_IN_PU, PTY4_IN_PU,
- PTY3_IN_PU, PTY2_IN_PU, PTY1_IN_PU, PTY0_IN_PU,
- PTZ7_IN_PU, PTZ6_IN_PU, PTZ5_IN_PU, PTZ4_IN_PU,
- PTZ3_IN_PU, PTZ2_IN_PU, PTZ1_IN_PU, PTZ0_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PTA7_OUT, PTA6_OUT, PTA5_OUT, PTA4_OUT,
PTA3_OUT, PTA2_OUT, PTA1_OUT, PTA0_OUT,
@@ -526,7 +486,7 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
+static const u16 pinmux_data[] = {
/* PTA GPIO */
PINMUX_DATA(PTA7_DATA, PTA7_IN, PTA7_OUT),
PINMUX_DATA(PTA6_DATA, PTA6_IN, PTA6_OUT),
@@ -1116,260 +1076,260 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PTA */
- PINMUX_GPIO(GPIO_PTA7, PTA7_DATA),
- PINMUX_GPIO(GPIO_PTA6, PTA6_DATA),
- PINMUX_GPIO(GPIO_PTA5, PTA5_DATA),
- PINMUX_GPIO(GPIO_PTA4, PTA4_DATA),
- PINMUX_GPIO(GPIO_PTA3, PTA3_DATA),
- PINMUX_GPIO(GPIO_PTA2, PTA2_DATA),
- PINMUX_GPIO(GPIO_PTA1, PTA1_DATA),
- PINMUX_GPIO(GPIO_PTA0, PTA0_DATA),
+ PINMUX_GPIO(PTA7),
+ PINMUX_GPIO(PTA6),
+ PINMUX_GPIO(PTA5),
+ PINMUX_GPIO(PTA4),
+ PINMUX_GPIO(PTA3),
+ PINMUX_GPIO(PTA2),
+ PINMUX_GPIO(PTA1),
+ PINMUX_GPIO(PTA0),
/* PTB */
- PINMUX_GPIO(GPIO_PTB7, PTB7_DATA),
- PINMUX_GPIO(GPIO_PTB6, PTB6_DATA),
- PINMUX_GPIO(GPIO_PTB5, PTB5_DATA),
- PINMUX_GPIO(GPIO_PTB4, PTB4_DATA),
- PINMUX_GPIO(GPIO_PTB3, PTB3_DATA),
- PINMUX_GPIO(GPIO_PTB2, PTB2_DATA),
- PINMUX_GPIO(GPIO_PTB1, PTB1_DATA),
- PINMUX_GPIO(GPIO_PTB0, PTB0_DATA),
+ PINMUX_GPIO(PTB7),
+ PINMUX_GPIO(PTB6),
+ PINMUX_GPIO(PTB5),
+ PINMUX_GPIO(PTB4),
+ PINMUX_GPIO(PTB3),
+ PINMUX_GPIO(PTB2),
+ PINMUX_GPIO(PTB1),
+ PINMUX_GPIO(PTB0),
/* PTC */
- PINMUX_GPIO(GPIO_PTC7, PTC7_DATA),
- PINMUX_GPIO(GPIO_PTC6, PTC6_DATA),
- PINMUX_GPIO(GPIO_PTC5, PTC5_DATA),
- PINMUX_GPIO(GPIO_PTC4, PTC4_DATA),
- PINMUX_GPIO(GPIO_PTC3, PTC3_DATA),
- PINMUX_GPIO(GPIO_PTC2, PTC2_DATA),
- PINMUX_GPIO(GPIO_PTC1, PTC1_DATA),
- PINMUX_GPIO(GPIO_PTC0, PTC0_DATA),
+ PINMUX_GPIO(PTC7),
+ PINMUX_GPIO(PTC6),
+ PINMUX_GPIO(PTC5),
+ PINMUX_GPIO(PTC4),
+ PINMUX_GPIO(PTC3),
+ PINMUX_GPIO(PTC2),
+ PINMUX_GPIO(PTC1),
+ PINMUX_GPIO(PTC0),
/* PTD */
- PINMUX_GPIO(GPIO_PTD7, PTD7_DATA),
- PINMUX_GPIO(GPIO_PTD6, PTD6_DATA),
- PINMUX_GPIO(GPIO_PTD5, PTD5_DATA),
- PINMUX_GPIO(GPIO_PTD4, PTD4_DATA),
- PINMUX_GPIO(GPIO_PTD3, PTD3_DATA),
- PINMUX_GPIO(GPIO_PTD2, PTD2_DATA),
- PINMUX_GPIO(GPIO_PTD1, PTD1_DATA),
- PINMUX_GPIO(GPIO_PTD0, PTD0_DATA),
+ PINMUX_GPIO(PTD7),
+ PINMUX_GPIO(PTD6),
+ PINMUX_GPIO(PTD5),
+ PINMUX_GPIO(PTD4),
+ PINMUX_GPIO(PTD3),
+ PINMUX_GPIO(PTD2),
+ PINMUX_GPIO(PTD1),
+ PINMUX_GPIO(PTD0),
/* PTE */
- PINMUX_GPIO(GPIO_PTE7, PTE7_DATA),
- PINMUX_GPIO(GPIO_PTE6, PTE6_DATA),
- PINMUX_GPIO(GPIO_PTE5, PTE5_DATA),
- PINMUX_GPIO(GPIO_PTE4, PTE4_DATA),
- PINMUX_GPIO(GPIO_PTE3, PTE3_DATA),
- PINMUX_GPIO(GPIO_PTE2, PTE2_DATA),
- PINMUX_GPIO(GPIO_PTE1, PTE1_DATA),
- PINMUX_GPIO(GPIO_PTE0, PTE0_DATA),
+ PINMUX_GPIO(PTE7),
+ PINMUX_GPIO(PTE6),
+ PINMUX_GPIO(PTE5),
+ PINMUX_GPIO(PTE4),
+ PINMUX_GPIO(PTE3),
+ PINMUX_GPIO(PTE2),
+ PINMUX_GPIO(PTE1),
+ PINMUX_GPIO(PTE0),
/* PTF */
- PINMUX_GPIO(GPIO_PTF7, PTF7_DATA),
- PINMUX_GPIO(GPIO_PTF6, PTF6_DATA),
- PINMUX_GPIO(GPIO_PTF5, PTF5_DATA),
- PINMUX_GPIO(GPIO_PTF4, PTF4_DATA),
- PINMUX_GPIO(GPIO_PTF3, PTF3_DATA),
- PINMUX_GPIO(GPIO_PTF2, PTF2_DATA),
- PINMUX_GPIO(GPIO_PTF1, PTF1_DATA),
- PINMUX_GPIO(GPIO_PTF0, PTF0_DATA),
+ PINMUX_GPIO(PTF7),
+ PINMUX_GPIO(PTF6),
+ PINMUX_GPIO(PTF5),
+ PINMUX_GPIO(PTF4),
+ PINMUX_GPIO(PTF3),
+ PINMUX_GPIO(PTF2),
+ PINMUX_GPIO(PTF1),
+ PINMUX_GPIO(PTF0),
/* PTG */
- PINMUX_GPIO(GPIO_PTG7, PTG7_DATA),
- PINMUX_GPIO(GPIO_PTG6, PTG6_DATA),
- PINMUX_GPIO(GPIO_PTG5, PTG5_DATA),
- PINMUX_GPIO(GPIO_PTG4, PTG4_DATA),
- PINMUX_GPIO(GPIO_PTG3, PTG3_DATA),
- PINMUX_GPIO(GPIO_PTG2, PTG2_DATA),
- PINMUX_GPIO(GPIO_PTG1, PTG1_DATA),
- PINMUX_GPIO(GPIO_PTG0, PTG0_DATA),
+ PINMUX_GPIO(PTG7),
+ PINMUX_GPIO(PTG6),
+ PINMUX_GPIO(PTG5),
+ PINMUX_GPIO(PTG4),
+ PINMUX_GPIO(PTG3),
+ PINMUX_GPIO(PTG2),
+ PINMUX_GPIO(PTG1),
+ PINMUX_GPIO(PTG0),
/* PTH */
- PINMUX_GPIO(GPIO_PTH7, PTH7_DATA),
- PINMUX_GPIO(GPIO_PTH6, PTH6_DATA),
- PINMUX_GPIO(GPIO_PTH5, PTH5_DATA),
- PINMUX_GPIO(GPIO_PTH4, PTH4_DATA),
- PINMUX_GPIO(GPIO_PTH3, PTH3_DATA),
- PINMUX_GPIO(GPIO_PTH2, PTH2_DATA),
- PINMUX_GPIO(GPIO_PTH1, PTH1_DATA),
- PINMUX_GPIO(GPIO_PTH0, PTH0_DATA),
+ PINMUX_GPIO(PTH7),
+ PINMUX_GPIO(PTH6),
+ PINMUX_GPIO(PTH5),
+ PINMUX_GPIO(PTH4),
+ PINMUX_GPIO(PTH3),
+ PINMUX_GPIO(PTH2),
+ PINMUX_GPIO(PTH1),
+ PINMUX_GPIO(PTH0),
/* PTI */
- PINMUX_GPIO(GPIO_PTI7, PTI7_DATA),
- PINMUX_GPIO(GPIO_PTI6, PTI6_DATA),
- PINMUX_GPIO(GPIO_PTI5, PTI5_DATA),
- PINMUX_GPIO(GPIO_PTI4, PTI4_DATA),
- PINMUX_GPIO(GPIO_PTI3, PTI3_DATA),
- PINMUX_GPIO(GPIO_PTI2, PTI2_DATA),
- PINMUX_GPIO(GPIO_PTI1, PTI1_DATA),
- PINMUX_GPIO(GPIO_PTI0, PTI0_DATA),
+ PINMUX_GPIO(PTI7),
+ PINMUX_GPIO(PTI6),
+ PINMUX_GPIO(PTI5),
+ PINMUX_GPIO(PTI4),
+ PINMUX_GPIO(PTI3),
+ PINMUX_GPIO(PTI2),
+ PINMUX_GPIO(PTI1),
+ PINMUX_GPIO(PTI0),
/* PTJ */
- PINMUX_GPIO(GPIO_PTJ6, PTJ6_DATA),
- PINMUX_GPIO(GPIO_PTJ5, PTJ5_DATA),
- PINMUX_GPIO(GPIO_PTJ4, PTJ4_DATA),
- PINMUX_GPIO(GPIO_PTJ3, PTJ3_DATA),
- PINMUX_GPIO(GPIO_PTJ2, PTJ2_DATA),
- PINMUX_GPIO(GPIO_PTJ1, PTJ1_DATA),
- PINMUX_GPIO(GPIO_PTJ0, PTJ0_DATA),
+ PINMUX_GPIO(PTJ6),
+ PINMUX_GPIO(PTJ5),
+ PINMUX_GPIO(PTJ4),
+ PINMUX_GPIO(PTJ3),
+ PINMUX_GPIO(PTJ2),
+ PINMUX_GPIO(PTJ1),
+ PINMUX_GPIO(PTJ0),
/* PTK */
- PINMUX_GPIO(GPIO_PTK7, PTK7_DATA),
- PINMUX_GPIO(GPIO_PTK6, PTK6_DATA),
- PINMUX_GPIO(GPIO_PTK5, PTK5_DATA),
- PINMUX_GPIO(GPIO_PTK4, PTK4_DATA),
- PINMUX_GPIO(GPIO_PTK3, PTK3_DATA),
- PINMUX_GPIO(GPIO_PTK2, PTK2_DATA),
- PINMUX_GPIO(GPIO_PTK1, PTK1_DATA),
- PINMUX_GPIO(GPIO_PTK0, PTK0_DATA),
+ PINMUX_GPIO(PTK7),
+ PINMUX_GPIO(PTK6),
+ PINMUX_GPIO(PTK5),
+ PINMUX_GPIO(PTK4),
+ PINMUX_GPIO(PTK3),
+ PINMUX_GPIO(PTK2),
+ PINMUX_GPIO(PTK1),
+ PINMUX_GPIO(PTK0),
/* PTL */
- PINMUX_GPIO(GPIO_PTL6, PTL6_DATA),
- PINMUX_GPIO(GPIO_PTL5, PTL5_DATA),
- PINMUX_GPIO(GPIO_PTL4, PTL4_DATA),
- PINMUX_GPIO(GPIO_PTL3, PTL3_DATA),
- PINMUX_GPIO(GPIO_PTL2, PTL2_DATA),
- PINMUX_GPIO(GPIO_PTL1, PTL1_DATA),
- PINMUX_GPIO(GPIO_PTL0, PTL0_DATA),
+ PINMUX_GPIO(PTL6),
+ PINMUX_GPIO(PTL5),
+ PINMUX_GPIO(PTL4),
+ PINMUX_GPIO(PTL3),
+ PINMUX_GPIO(PTL2),
+ PINMUX_GPIO(PTL1),
+ PINMUX_GPIO(PTL0),
/* PTM */
- PINMUX_GPIO(GPIO_PTM7, PTM7_DATA),
- PINMUX_GPIO(GPIO_PTM6, PTM6_DATA),
- PINMUX_GPIO(GPIO_PTM5, PTM5_DATA),
- PINMUX_GPIO(GPIO_PTM4, PTM4_DATA),
- PINMUX_GPIO(GPIO_PTM3, PTM3_DATA),
- PINMUX_GPIO(GPIO_PTM2, PTM2_DATA),
- PINMUX_GPIO(GPIO_PTM1, PTM1_DATA),
- PINMUX_GPIO(GPIO_PTM0, PTM0_DATA),
+ PINMUX_GPIO(PTM7),
+ PINMUX_GPIO(PTM6),
+ PINMUX_GPIO(PTM5),
+ PINMUX_GPIO(PTM4),
+ PINMUX_GPIO(PTM3),
+ PINMUX_GPIO(PTM2),
+ PINMUX_GPIO(PTM1),
+ PINMUX_GPIO(PTM0),
/* PTN */
- PINMUX_GPIO(GPIO_PTN6, PTN6_DATA),
- PINMUX_GPIO(GPIO_PTN5, PTN5_DATA),
- PINMUX_GPIO(GPIO_PTN4, PTN4_DATA),
- PINMUX_GPIO(GPIO_PTN3, PTN3_DATA),
- PINMUX_GPIO(GPIO_PTN2, PTN2_DATA),
- PINMUX_GPIO(GPIO_PTN1, PTN1_DATA),
- PINMUX_GPIO(GPIO_PTN0, PTN0_DATA),
+ PINMUX_GPIO(PTN6),
+ PINMUX_GPIO(PTN5),
+ PINMUX_GPIO(PTN4),
+ PINMUX_GPIO(PTN3),
+ PINMUX_GPIO(PTN2),
+ PINMUX_GPIO(PTN1),
+ PINMUX_GPIO(PTN0),
/* PTO */
- PINMUX_GPIO(GPIO_PTO7, PTO7_DATA),
- PINMUX_GPIO(GPIO_PTO6, PTO6_DATA),
- PINMUX_GPIO(GPIO_PTO5, PTO5_DATA),
- PINMUX_GPIO(GPIO_PTO4, PTO4_DATA),
- PINMUX_GPIO(GPIO_PTO3, PTO3_DATA),
- PINMUX_GPIO(GPIO_PTO2, PTO2_DATA),
- PINMUX_GPIO(GPIO_PTO1, PTO1_DATA),
- PINMUX_GPIO(GPIO_PTO0, PTO0_DATA),
+ PINMUX_GPIO(PTO7),
+ PINMUX_GPIO(PTO6),
+ PINMUX_GPIO(PTO5),
+ PINMUX_GPIO(PTO4),
+ PINMUX_GPIO(PTO3),
+ PINMUX_GPIO(PTO2),
+ PINMUX_GPIO(PTO1),
+ PINMUX_GPIO(PTO0),
/* PTP */
- PINMUX_GPIO(GPIO_PTP7, PTP7_DATA),
- PINMUX_GPIO(GPIO_PTP6, PTP6_DATA),
- PINMUX_GPIO(GPIO_PTP5, PTP5_DATA),
- PINMUX_GPIO(GPIO_PTP4, PTP4_DATA),
- PINMUX_GPIO(GPIO_PTP3, PTP3_DATA),
- PINMUX_GPIO(GPIO_PTP2, PTP2_DATA),
- PINMUX_GPIO(GPIO_PTP1, PTP1_DATA),
- PINMUX_GPIO(GPIO_PTP0, PTP0_DATA),
+ PINMUX_GPIO(PTP7),
+ PINMUX_GPIO(PTP6),
+ PINMUX_GPIO(PTP5),
+ PINMUX_GPIO(PTP4),
+ PINMUX_GPIO(PTP3),
+ PINMUX_GPIO(PTP2),
+ PINMUX_GPIO(PTP1),
+ PINMUX_GPIO(PTP0),
/* PTQ */
- PINMUX_GPIO(GPIO_PTQ6, PTQ6_DATA),
- PINMUX_GPIO(GPIO_PTQ5, PTQ5_DATA),
- PINMUX_GPIO(GPIO_PTQ4, PTQ4_DATA),
- PINMUX_GPIO(GPIO_PTQ3, PTQ3_DATA),
- PINMUX_GPIO(GPIO_PTQ2, PTQ2_DATA),
- PINMUX_GPIO(GPIO_PTQ1, PTQ1_DATA),
- PINMUX_GPIO(GPIO_PTQ0, PTQ0_DATA),
+ PINMUX_GPIO(PTQ6),
+ PINMUX_GPIO(PTQ5),
+ PINMUX_GPIO(PTQ4),
+ PINMUX_GPIO(PTQ3),
+ PINMUX_GPIO(PTQ2),
+ PINMUX_GPIO(PTQ1),
+ PINMUX_GPIO(PTQ0),
/* PTR */
- PINMUX_GPIO(GPIO_PTR7, PTR7_DATA),
- PINMUX_GPIO(GPIO_PTR6, PTR6_DATA),
- PINMUX_GPIO(GPIO_PTR5, PTR5_DATA),
- PINMUX_GPIO(GPIO_PTR4, PTR4_DATA),
- PINMUX_GPIO(GPIO_PTR3, PTR3_DATA),
- PINMUX_GPIO(GPIO_PTR2, PTR2_DATA),
- PINMUX_GPIO(GPIO_PTR1, PTR1_DATA),
- PINMUX_GPIO(GPIO_PTR0, PTR0_DATA),
+ PINMUX_GPIO(PTR7),
+ PINMUX_GPIO(PTR6),
+ PINMUX_GPIO(PTR5),
+ PINMUX_GPIO(PTR4),
+ PINMUX_GPIO(PTR3),
+ PINMUX_GPIO(PTR2),
+ PINMUX_GPIO(PTR1),
+ PINMUX_GPIO(PTR0),
/* PTS */
- PINMUX_GPIO(GPIO_PTS7, PTS7_DATA),
- PINMUX_GPIO(GPIO_PTS6, PTS6_DATA),
- PINMUX_GPIO(GPIO_PTS5, PTS5_DATA),
- PINMUX_GPIO(GPIO_PTS4, PTS4_DATA),
- PINMUX_GPIO(GPIO_PTS3, PTS3_DATA),
- PINMUX_GPIO(GPIO_PTS2, PTS2_DATA),
- PINMUX_GPIO(GPIO_PTS1, PTS1_DATA),
- PINMUX_GPIO(GPIO_PTS0, PTS0_DATA),
+ PINMUX_GPIO(PTS7),
+ PINMUX_GPIO(PTS6),
+ PINMUX_GPIO(PTS5),
+ PINMUX_GPIO(PTS4),
+ PINMUX_GPIO(PTS3),
+ PINMUX_GPIO(PTS2),
+ PINMUX_GPIO(PTS1),
+ PINMUX_GPIO(PTS0),
/* PTT */
- PINMUX_GPIO(GPIO_PTT7, PTT7_DATA),
- PINMUX_GPIO(GPIO_PTT6, PTT6_DATA),
- PINMUX_GPIO(GPIO_PTT5, PTT5_DATA),
- PINMUX_GPIO(GPIO_PTT4, PTT4_DATA),
- PINMUX_GPIO(GPIO_PTT3, PTT3_DATA),
- PINMUX_GPIO(GPIO_PTT2, PTT2_DATA),
- PINMUX_GPIO(GPIO_PTT1, PTT1_DATA),
- PINMUX_GPIO(GPIO_PTT0, PTT0_DATA),
+ PINMUX_GPIO(PTT7),
+ PINMUX_GPIO(PTT6),
+ PINMUX_GPIO(PTT5),
+ PINMUX_GPIO(PTT4),
+ PINMUX_GPIO(PTT3),
+ PINMUX_GPIO(PTT2),
+ PINMUX_GPIO(PTT1),
+ PINMUX_GPIO(PTT0),
/* PTU */
- PINMUX_GPIO(GPIO_PTU7, PTU7_DATA),
- PINMUX_GPIO(GPIO_PTU6, PTU6_DATA),
- PINMUX_GPIO(GPIO_PTU5, PTU5_DATA),
- PINMUX_GPIO(GPIO_PTU4, PTU4_DATA),
- PINMUX_GPIO(GPIO_PTU3, PTU3_DATA),
- PINMUX_GPIO(GPIO_PTU2, PTU2_DATA),
- PINMUX_GPIO(GPIO_PTU1, PTU1_DATA),
- PINMUX_GPIO(GPIO_PTU0, PTU0_DATA),
+ PINMUX_GPIO(PTU7),
+ PINMUX_GPIO(PTU6),
+ PINMUX_GPIO(PTU5),
+ PINMUX_GPIO(PTU4),
+ PINMUX_GPIO(PTU3),
+ PINMUX_GPIO(PTU2),
+ PINMUX_GPIO(PTU1),
+ PINMUX_GPIO(PTU0),
/* PTV */
- PINMUX_GPIO(GPIO_PTV7, PTV7_DATA),
- PINMUX_GPIO(GPIO_PTV6, PTV6_DATA),
- PINMUX_GPIO(GPIO_PTV5, PTV5_DATA),
- PINMUX_GPIO(GPIO_PTV4, PTV4_DATA),
- PINMUX_GPIO(GPIO_PTV3, PTV3_DATA),
- PINMUX_GPIO(GPIO_PTV2, PTV2_DATA),
- PINMUX_GPIO(GPIO_PTV1, PTV1_DATA),
- PINMUX_GPIO(GPIO_PTV0, PTV0_DATA),
+ PINMUX_GPIO(PTV7),
+ PINMUX_GPIO(PTV6),
+ PINMUX_GPIO(PTV5),
+ PINMUX_GPIO(PTV4),
+ PINMUX_GPIO(PTV3),
+ PINMUX_GPIO(PTV2),
+ PINMUX_GPIO(PTV1),
+ PINMUX_GPIO(PTV0),
/* PTW */
- PINMUX_GPIO(GPIO_PTW7, PTW7_DATA),
- PINMUX_GPIO(GPIO_PTW6, PTW6_DATA),
- PINMUX_GPIO(GPIO_PTW5, PTW5_DATA),
- PINMUX_GPIO(GPIO_PTW4, PTW4_DATA),
- PINMUX_GPIO(GPIO_PTW3, PTW3_DATA),
- PINMUX_GPIO(GPIO_PTW2, PTW2_DATA),
- PINMUX_GPIO(GPIO_PTW1, PTW1_DATA),
- PINMUX_GPIO(GPIO_PTW0, PTW0_DATA),
+ PINMUX_GPIO(PTW7),
+ PINMUX_GPIO(PTW6),
+ PINMUX_GPIO(PTW5),
+ PINMUX_GPIO(PTW4),
+ PINMUX_GPIO(PTW3),
+ PINMUX_GPIO(PTW2),
+ PINMUX_GPIO(PTW1),
+ PINMUX_GPIO(PTW0),
/* PTX */
- PINMUX_GPIO(GPIO_PTX7, PTX7_DATA),
- PINMUX_GPIO(GPIO_PTX6, PTX6_DATA),
- PINMUX_GPIO(GPIO_PTX5, PTX5_DATA),
- PINMUX_GPIO(GPIO_PTX4, PTX4_DATA),
- PINMUX_GPIO(GPIO_PTX3, PTX3_DATA),
- PINMUX_GPIO(GPIO_PTX2, PTX2_DATA),
- PINMUX_GPIO(GPIO_PTX1, PTX1_DATA),
- PINMUX_GPIO(GPIO_PTX0, PTX0_DATA),
+ PINMUX_GPIO(PTX7),
+ PINMUX_GPIO(PTX6),
+ PINMUX_GPIO(PTX5),
+ PINMUX_GPIO(PTX4),
+ PINMUX_GPIO(PTX3),
+ PINMUX_GPIO(PTX2),
+ PINMUX_GPIO(PTX1),
+ PINMUX_GPIO(PTX0),
/* PTY */
- PINMUX_GPIO(GPIO_PTY7, PTY7_DATA),
- PINMUX_GPIO(GPIO_PTY6, PTY6_DATA),
- PINMUX_GPIO(GPIO_PTY5, PTY5_DATA),
- PINMUX_GPIO(GPIO_PTY4, PTY4_DATA),
- PINMUX_GPIO(GPIO_PTY3, PTY3_DATA),
- PINMUX_GPIO(GPIO_PTY2, PTY2_DATA),
- PINMUX_GPIO(GPIO_PTY1, PTY1_DATA),
- PINMUX_GPIO(GPIO_PTY0, PTY0_DATA),
+ PINMUX_GPIO(PTY7),
+ PINMUX_GPIO(PTY6),
+ PINMUX_GPIO(PTY5),
+ PINMUX_GPIO(PTY4),
+ PINMUX_GPIO(PTY3),
+ PINMUX_GPIO(PTY2),
+ PINMUX_GPIO(PTY1),
+ PINMUX_GPIO(PTY0),
/* PTZ */
- PINMUX_GPIO(GPIO_PTZ7, PTZ7_DATA),
- PINMUX_GPIO(GPIO_PTZ6, PTZ6_DATA),
- PINMUX_GPIO(GPIO_PTZ5, PTZ5_DATA),
- PINMUX_GPIO(GPIO_PTZ4, PTZ4_DATA),
- PINMUX_GPIO(GPIO_PTZ3, PTZ3_DATA),
- PINMUX_GPIO(GPIO_PTZ2, PTZ2_DATA),
- PINMUX_GPIO(GPIO_PTZ1, PTZ1_DATA),
- PINMUX_GPIO(GPIO_PTZ0, PTZ0_DATA),
+ PINMUX_GPIO(PTZ7),
+ PINMUX_GPIO(PTZ6),
+ PINMUX_GPIO(PTZ5),
+ PINMUX_GPIO(PTZ4),
+ PINMUX_GPIO(PTZ3),
+ PINMUX_GPIO(PTZ2),
+ PINMUX_GPIO(PTZ1),
+ PINMUX_GPIO(PTZ0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -1728,14 +1688,14 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffec0000, 16, 2) {
- PTA7_FN, PTA7_OUT, PTA7_IN, PTA7_IN_PU,
- PTA6_FN, PTA6_OUT, PTA6_IN, PTA6_IN_PU,
- PTA5_FN, PTA5_OUT, PTA5_IN, PTA5_IN_PU,
- PTA4_FN, PTA4_OUT, PTA4_IN, PTA4_IN_PU,
- PTA3_FN, PTA3_OUT, PTA3_IN, PTA3_IN_PU,
- PTA2_FN, PTA2_OUT, PTA2_IN, PTA2_IN_PU,
- PTA1_FN, PTA1_OUT, PTA1_IN, PTA1_IN_PU,
- PTA0_FN, PTA0_OUT, PTA0_IN, PTA0_IN_PU }
+ PTA7_FN, PTA7_OUT, PTA7_IN, 0,
+ PTA6_FN, PTA6_OUT, PTA6_IN, 0,
+ PTA5_FN, PTA5_OUT, PTA5_IN, 0,
+ PTA4_FN, PTA4_OUT, PTA4_IN, 0,
+ PTA3_FN, PTA3_OUT, PTA3_IN, 0,
+ PTA2_FN, PTA2_OUT, PTA2_IN, 0,
+ PTA1_FN, PTA1_OUT, PTA1_IN, 0,
+ PTA0_FN, PTA0_OUT, PTA0_IN, 0 }
},
{ PINMUX_CFG_REG("PBCR", 0xffec0002, 16, 2) {
PTB7_FN, PTB7_OUT, PTB7_IN, 0,
@@ -1758,100 +1718,100 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTC0_FN, PTC0_OUT, PTC0_IN, 0 }
},
{ PINMUX_CFG_REG("PDCR", 0xffec0006, 16, 2) {
- PTD7_FN, PTD7_OUT, PTD7_IN, PTD7_IN_PU,
- PTD6_FN, PTD6_OUT, PTD6_IN, PTD6_IN_PU,
- PTD5_FN, PTD5_OUT, PTD5_IN, PTD5_IN_PU,
- PTD4_FN, PTD4_OUT, PTD4_IN, PTD4_IN_PU,
- PTD3_FN, PTD3_OUT, PTD3_IN, PTD3_IN_PU,
- PTD2_FN, PTD2_OUT, PTD2_IN, PTD2_IN_PU,
- PTD1_FN, PTD1_OUT, PTD1_IN, PTD1_IN_PU,
- PTD0_FN, PTD0_OUT, PTD0_IN, PTD0_IN_PU }
+ PTD7_FN, PTD7_OUT, PTD7_IN, 0,
+ PTD6_FN, PTD6_OUT, PTD6_IN, 0,
+ PTD5_FN, PTD5_OUT, PTD5_IN, 0,
+ PTD4_FN, PTD4_OUT, PTD4_IN, 0,
+ PTD3_FN, PTD3_OUT, PTD3_IN, 0,
+ PTD2_FN, PTD2_OUT, PTD2_IN, 0,
+ PTD1_FN, PTD1_OUT, PTD1_IN, 0,
+ PTD0_FN, PTD0_OUT, PTD0_IN, 0 }
},
{ PINMUX_CFG_REG("PECR", 0xffec0008, 16, 2) {
- PTE7_FN, PTE7_OUT, PTE7_IN, PTE7_IN_PU,
- PTE6_FN, PTE6_OUT, PTE6_IN, PTE6_IN_PU,
- PTE5_FN, PTE5_OUT, PTE5_IN, PTE5_IN_PU,
- PTE4_FN, PTE4_OUT, PTE4_IN, PTE4_IN_PU,
- PTE3_FN, PTE3_OUT, PTE3_IN, PTE3_IN_PU,
- PTE2_FN, PTE2_OUT, PTE2_IN, PTE2_IN_PU,
- PTE1_FN, PTE1_OUT, PTE1_IN, PTE1_IN_PU,
- PTE0_FN, PTE0_OUT, PTE0_IN, PTE0_IN_PU }
+ PTE7_FN, PTE7_OUT, PTE7_IN, 0,
+ PTE6_FN, PTE6_OUT, PTE6_IN, 0,
+ PTE5_FN, PTE5_OUT, PTE5_IN, 0,
+ PTE4_FN, PTE4_OUT, PTE4_IN, 0,
+ PTE3_FN, PTE3_OUT, PTE3_IN, 0,
+ PTE2_FN, PTE2_OUT, PTE2_IN, 0,
+ PTE1_FN, PTE1_OUT, PTE1_IN, 0,
+ PTE0_FN, PTE0_OUT, PTE0_IN, 0 }
},
{ PINMUX_CFG_REG("PFCR", 0xffec000a, 16, 2) {
- PTF7_FN, PTF7_OUT, PTF7_IN, PTF7_IN_PU,
- PTF6_FN, PTF6_OUT, PTF6_IN, PTF6_IN_PU,
- PTF5_FN, PTF5_OUT, PTF5_IN, PTF5_IN_PU,
- PTF4_FN, PTF4_OUT, PTF4_IN, PTF4_IN_PU,
- PTF3_FN, PTF3_OUT, PTF3_IN, PTF3_IN_PU,
- PTF2_FN, PTF2_OUT, PTF2_IN, PTF2_IN_PU,
- PTF1_FN, PTF1_OUT, PTF1_IN, PTF1_IN_PU,
- PTF0_FN, PTF0_OUT, PTF0_IN, PTF0_IN_PU }
+ PTF7_FN, PTF7_OUT, PTF7_IN, 0,
+ PTF6_FN, PTF6_OUT, PTF6_IN, 0,
+ PTF5_FN, PTF5_OUT, PTF5_IN, 0,
+ PTF4_FN, PTF4_OUT, PTF4_IN, 0,
+ PTF3_FN, PTF3_OUT, PTF3_IN, 0,
+ PTF2_FN, PTF2_OUT, PTF2_IN, 0,
+ PTF1_FN, PTF1_OUT, PTF1_IN, 0,
+ PTF0_FN, PTF0_OUT, PTF0_IN, 0 }
},
{ PINMUX_CFG_REG("PGCR", 0xffec000c, 16, 2) {
- PTG7_FN, PTG7_OUT, PTG7_IN, PTG7_IN_PU ,
- PTG6_FN, PTG6_OUT, PTG6_IN, PTG6_IN_PU ,
+ PTG7_FN, PTG7_OUT, PTG7_IN, 0,
+ PTG6_FN, PTG6_OUT, PTG6_IN, 0,
PTG5_FN, PTG5_OUT, PTG5_IN, 0,
- PTG4_FN, PTG4_OUT, PTG4_IN, PTG4_IN_PU ,
+ PTG4_FN, PTG4_OUT, PTG4_IN, 0,
PTG3_FN, PTG3_OUT, PTG3_IN, 0,
PTG2_FN, PTG2_OUT, PTG2_IN, 0,
PTG1_FN, PTG1_OUT, PTG1_IN, 0,
PTG0_FN, PTG0_OUT, PTG0_IN, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xffec000e, 16, 2) {
- PTH7_FN, PTH7_OUT, PTH7_IN, PTH7_IN_PU,
- PTH6_FN, PTH6_OUT, PTH6_IN, PTH6_IN_PU,
- PTH5_FN, PTH5_OUT, PTH5_IN, PTH5_IN_PU,
- PTH4_FN, PTH4_OUT, PTH4_IN, PTH4_IN_PU,
- PTH3_FN, PTH3_OUT, PTH3_IN, PTH3_IN_PU,
- PTH2_FN, PTH2_OUT, PTH2_IN, PTH2_IN_PU,
- PTH1_FN, PTH1_OUT, PTH1_IN, PTH1_IN_PU,
- PTH0_FN, PTH0_OUT, PTH0_IN, PTH0_IN_PU }
+ PTH7_FN, PTH7_OUT, PTH7_IN, 0,
+ PTH6_FN, PTH6_OUT, PTH6_IN, 0,
+ PTH5_FN, PTH5_OUT, PTH5_IN, 0,
+ PTH4_FN, PTH4_OUT, PTH4_IN, 0,
+ PTH3_FN, PTH3_OUT, PTH3_IN, 0,
+ PTH2_FN, PTH2_OUT, PTH2_IN, 0,
+ PTH1_FN, PTH1_OUT, PTH1_IN, 0,
+ PTH0_FN, PTH0_OUT, PTH0_IN, 0 }
},
{ PINMUX_CFG_REG("PICR", 0xffec0010, 16, 2) {
- PTI7_FN, PTI7_OUT, PTI7_IN, PTI7_IN_PU,
- PTI6_FN, PTI6_OUT, PTI6_IN, PTI6_IN_PU,
+ PTI7_FN, PTI7_OUT, PTI7_IN, 0,
+ PTI6_FN, PTI6_OUT, PTI6_IN, 0,
PTI5_FN, PTI5_OUT, PTI5_IN, 0,
- PTI4_FN, PTI4_OUT, PTI4_IN, PTI4_IN_PU,
- PTI3_FN, PTI3_OUT, PTI3_IN, PTI3_IN_PU,
- PTI2_FN, PTI2_OUT, PTI2_IN, PTI2_IN_PU,
- PTI1_FN, PTI1_OUT, PTI1_IN, PTI1_IN_PU,
- PTI0_FN, PTI0_OUT, PTI0_IN, PTI0_IN_PU }
+ PTI4_FN, PTI4_OUT, PTI4_IN, 0,
+ PTI3_FN, PTI3_OUT, PTI3_IN, 0,
+ PTI2_FN, PTI2_OUT, PTI2_IN, 0,
+ PTI1_FN, PTI1_OUT, PTI1_IN, 0,
+ PTI0_FN, PTI0_OUT, PTI0_IN, 0 }
},
{ PINMUX_CFG_REG("PJCR", 0xffec0012, 16, 2) {
0, 0, 0, 0, /* reserved: always set 1 */
- PTJ6_FN, PTJ6_OUT, PTJ6_IN, PTJ6_IN_PU,
- PTJ5_FN, PTJ5_OUT, PTJ5_IN, PTJ5_IN_PU,
- PTJ4_FN, PTJ4_OUT, PTJ4_IN, PTJ4_IN_PU,
- PTJ3_FN, PTJ3_OUT, PTJ3_IN, PTJ3_IN_PU,
- PTJ2_FN, PTJ2_OUT, PTJ2_IN, PTJ2_IN_PU,
- PTJ1_FN, PTJ1_OUT, PTJ1_IN, PTJ1_IN_PU,
- PTJ0_FN, PTJ0_OUT, PTJ0_IN, PTJ0_IN_PU }
+ PTJ6_FN, PTJ6_OUT, PTJ6_IN, 0,
+ PTJ5_FN, PTJ5_OUT, PTJ5_IN, 0,
+ PTJ4_FN, PTJ4_OUT, PTJ4_IN, 0,
+ PTJ3_FN, PTJ3_OUT, PTJ3_IN, 0,
+ PTJ2_FN, PTJ2_OUT, PTJ2_IN, 0,
+ PTJ1_FN, PTJ1_OUT, PTJ1_IN, 0,
+ PTJ0_FN, PTJ0_OUT, PTJ0_IN, 0 }
},
{ PINMUX_CFG_REG("PKCR", 0xffec0014, 16, 2) {
- PTK7_FN, PTK7_OUT, PTK7_IN, PTK7_IN_PU,
- PTK6_FN, PTK6_OUT, PTK6_IN, PTK6_IN_PU,
- PTK5_FN, PTK5_OUT, PTK5_IN, PTK5_IN_PU,
- PTK4_FN, PTK4_OUT, PTK4_IN, PTK4_IN_PU,
- PTK3_FN, PTK3_OUT, PTK3_IN, PTK3_IN_PU,
- PTK2_FN, PTK2_OUT, PTK2_IN, PTK2_IN_PU,
- PTK1_FN, PTK1_OUT, PTK1_IN, PTK1_IN_PU,
- PTK0_FN, PTK0_OUT, PTK0_IN, PTK0_IN_PU }
+ PTK7_FN, PTK7_OUT, PTK7_IN, 0,
+ PTK6_FN, PTK6_OUT, PTK6_IN, 0,
+ PTK5_FN, PTK5_OUT, PTK5_IN, 0,
+ PTK4_FN, PTK4_OUT, PTK4_IN, 0,
+ PTK3_FN, PTK3_OUT, PTK3_IN, 0,
+ PTK2_FN, PTK2_OUT, PTK2_IN, 0,
+ PTK1_FN, PTK1_OUT, PTK1_IN, 0,
+ PTK0_FN, PTK0_OUT, PTK0_IN, 0 }
},
{ PINMUX_CFG_REG("PLCR", 0xffec0016, 16, 2) {
0, 0, 0, 0, /* reserved: always set 1 */
- PTL6_FN, PTL6_OUT, PTL6_IN, PTL6_IN_PU,
- PTL5_FN, PTL5_OUT, PTL5_IN, PTL5_IN_PU,
- PTL4_FN, PTL4_OUT, PTL4_IN, PTL4_IN_PU,
- PTL3_FN, PTL3_OUT, PTL3_IN, PTL3_IN_PU,
- PTL2_FN, PTL2_OUT, PTL2_IN, PTL2_IN_PU,
- PTL1_FN, PTL1_OUT, PTL1_IN, PTL1_IN_PU,
- PTL0_FN, PTL0_OUT, PTL0_IN, PTL0_IN_PU }
+ PTL6_FN, PTL6_OUT, PTL6_IN, 0,
+ PTL5_FN, PTL5_OUT, PTL5_IN, 0,
+ PTL4_FN, PTL4_OUT, PTL4_IN, 0,
+ PTL3_FN, PTL3_OUT, PTL3_IN, 0,
+ PTL2_FN, PTL2_OUT, PTL2_IN, 0,
+ PTL1_FN, PTL1_OUT, PTL1_IN, 0,
+ PTL0_FN, PTL0_OUT, PTL0_IN, 0 }
},
{ PINMUX_CFG_REG("PMCR", 0xffec0018, 16, 2) {
- PTM7_FN, PTM7_OUT, PTM7_IN, PTM7_IN_PU,
- PTM6_FN, PTM6_OUT, PTM6_IN, PTM6_IN_PU,
- PTM5_FN, PTM5_OUT, PTM5_IN, PTM5_IN_PU,
- PTM4_FN, PTM4_OUT, PTM4_IN, PTM4_IN_PU,
+ PTM7_FN, PTM7_OUT, PTM7_IN, 0,
+ PTM6_FN, PTM6_OUT, PTM6_IN, 0,
+ PTM5_FN, PTM5_OUT, PTM5_IN, 0,
+ PTM4_FN, PTM4_OUT, PTM4_IN, 0,
PTM3_FN, PTM3_OUT, PTM3_IN, 0,
PTM2_FN, PTM2_OUT, PTM2_IN, 0,
PTM1_FN, PTM1_OUT, PTM1_IN, 0,
@@ -1861,21 +1821,21 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, /* reserved: always set 1 */
PTN6_FN, PTN6_OUT, PTN6_IN, 0,
PTN5_FN, PTN5_OUT, PTN5_IN, 0,
- PTN4_FN, PTN4_OUT, PTN4_IN, PTN4_IN_PU,
- PTN3_FN, PTN3_OUT, PTN3_IN, PTN3_IN_PU,
- PTN2_FN, PTN2_OUT, PTN2_IN, PTN2_IN_PU,
- PTN1_FN, PTN1_OUT, PTN1_IN, PTN1_IN_PU,
- PTN0_FN, PTN0_OUT, PTN0_IN, PTN0_IN_PU }
+ PTN4_FN, PTN4_OUT, PTN4_IN, 0,
+ PTN3_FN, PTN3_OUT, PTN3_IN, 0,
+ PTN2_FN, PTN2_OUT, PTN2_IN, 0,
+ PTN1_FN, PTN1_OUT, PTN1_IN, 0,
+ PTN0_FN, PTN0_OUT, PTN0_IN, 0 }
},
{ PINMUX_CFG_REG("POCR", 0xffec001c, 16, 2) {
- PTO7_FN, PTO7_OUT, PTO7_IN, PTO7_IN_PU,
- PTO6_FN, PTO6_OUT, PTO6_IN, PTO6_IN_PU,
- PTO5_FN, PTO5_OUT, PTO5_IN, PTO5_IN_PU,
- PTO4_FN, PTO4_OUT, PTO4_IN, PTO4_IN_PU,
- PTO3_FN, PTO3_OUT, PTO3_IN, PTO3_IN_PU,
- PTO2_FN, PTO2_OUT, PTO2_IN, PTO2_IN_PU,
- PTO1_FN, PTO1_OUT, PTO1_IN, PTO1_IN_PU,
- PTO0_FN, PTO0_OUT, PTO0_IN, PTO0_IN_PU }
+ PTO7_FN, PTO7_OUT, PTO7_IN, 0,
+ PTO6_FN, PTO6_OUT, PTO6_IN, 0,
+ PTO5_FN, PTO5_OUT, PTO5_IN, 0,
+ PTO4_FN, PTO4_OUT, PTO4_IN, 0,
+ PTO3_FN, PTO3_OUT, PTO3_IN, 0,
+ PTO2_FN, PTO2_OUT, PTO2_IN, 0,
+ PTO1_FN, PTO1_OUT, PTO1_IN, 0,
+ PTO0_FN, PTO0_OUT, PTO0_IN, 0 }
},
#if 0 /* FIXME: Remove it? */
{ PINMUX_CFG_REG("PPCR", 0xffec001e, 16, 2) {
@@ -1920,32 +1880,32 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTS0_FN, PTS0_OUT, PTS0_IN, 0 }
},
{ PINMUX_CFG_REG("PTCR", 0xffec0026, 16, 2) {
- PTT7_FN, PTT7_OUT, PTT7_IN, PTO7_IN_PU,
- PTT6_FN, PTT6_OUT, PTT6_IN, PTO6_IN_PU,
- PTT5_FN, PTT5_OUT, PTT5_IN, PTO5_IN_PU,
- PTT4_FN, PTT4_OUT, PTT4_IN, PTO4_IN_PU,
- PTT3_FN, PTT3_OUT, PTT3_IN, PTO3_IN_PU,
- PTT2_FN, PTT2_OUT, PTT2_IN, PTO2_IN_PU,
- PTT1_FN, PTT1_OUT, PTT1_IN, PTO1_IN_PU,
- PTT0_FN, PTT0_OUT, PTT0_IN, PTO0_IN_PU }
+ PTT7_FN, PTT7_OUT, PTT7_IN, 0,
+ PTT6_FN, PTT6_OUT, PTT6_IN, 0,
+ PTT5_FN, PTT5_OUT, PTT5_IN, 0,
+ PTT4_FN, PTT4_OUT, PTT4_IN, 0,
+ PTT3_FN, PTT3_OUT, PTT3_IN, 0,
+ PTT2_FN, PTT2_OUT, PTT2_IN, 0,
+ PTT1_FN, PTT1_OUT, PTT1_IN, 0,
+ PTT0_FN, PTT0_OUT, PTT0_IN, 0 }
},
{ PINMUX_CFG_REG("PUCR", 0xffec0028, 16, 2) {
- PTU7_FN, PTU7_OUT, PTU7_IN, PTU7_IN_PU,
- PTU6_FN, PTU6_OUT, PTU6_IN, PTU6_IN_PU,
- PTU5_FN, PTU5_OUT, PTU5_IN, PTU5_IN_PU,
- PTU4_FN, PTU4_OUT, PTU4_IN, PTU4_IN_PU,
- PTU3_FN, PTU3_OUT, PTU3_IN, PTU3_IN_PU,
- PTU2_FN, PTU2_OUT, PTU2_IN, PTU2_IN_PU,
- PTU1_FN, PTU1_OUT, PTU1_IN, PTU1_IN_PU,
- PTU0_FN, PTU0_OUT, PTU0_IN, PTU0_IN_PU }
+ PTU7_FN, PTU7_OUT, PTU7_IN, 0,
+ PTU6_FN, PTU6_OUT, PTU6_IN, 0,
+ PTU5_FN, PTU5_OUT, PTU5_IN, 0,
+ PTU4_FN, PTU4_OUT, PTU4_IN, 0,
+ PTU3_FN, PTU3_OUT, PTU3_IN, 0,
+ PTU2_FN, PTU2_OUT, PTU2_IN, 0,
+ PTU1_FN, PTU1_OUT, PTU1_IN, 0,
+ PTU0_FN, PTU0_OUT, PTU0_IN, 0 }
},
{ PINMUX_CFG_REG("PVCR", 0xffec002a, 16, 2) {
- PTV7_FN, PTV7_OUT, PTV7_IN, PTV7_IN_PU,
- PTV6_FN, PTV6_OUT, PTV6_IN, PTV6_IN_PU,
- PTV5_FN, PTV5_OUT, PTV5_IN, PTV5_IN_PU,
- PTV4_FN, PTV4_OUT, PTV4_IN, PTV4_IN_PU,
- PTV3_FN, PTV3_OUT, PTV3_IN, PTV3_IN_PU,
- PTV2_FN, PTV2_OUT, PTV2_IN, PTV2_IN_PU,
+ PTV7_FN, PTV7_OUT, PTV7_IN, 0,
+ PTV6_FN, PTV6_OUT, PTV6_IN, 0,
+ PTV5_FN, PTV5_OUT, PTV5_IN, 0,
+ PTV4_FN, PTV4_OUT, PTV4_IN, 0,
+ PTV3_FN, PTV3_OUT, PTV3_IN, 0,
+ PTV2_FN, PTV2_OUT, PTV2_IN, 0,
PTV1_FN, PTV1_OUT, PTV1_IN, 0,
PTV0_FN, PTV0_OUT, PTV0_IN, 0 }
},
@@ -1956,28 +1916,28 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
PTW4_FN, PTW4_OUT, PTW4_IN, 0,
PTW3_FN, PTW3_OUT, PTW3_IN, 0,
PTW2_FN, PTW2_OUT, PTW2_IN, 0,
- PTW1_FN, PTW1_OUT, PTW1_IN, PTW1_IN_PU,
- PTW0_FN, PTW0_OUT, PTW0_IN, PTW0_IN_PU }
+ PTW1_FN, PTW1_OUT, PTW1_IN, 0,
+ PTW0_FN, PTW0_OUT, PTW0_IN, 0 }
},
{ PINMUX_CFG_REG("PXCR", 0xffec002e, 16, 2) {
- PTX7_FN, PTX7_OUT, PTX7_IN, PTX7_IN_PU,
- PTX6_FN, PTX6_OUT, PTX6_IN, PTX6_IN_PU,
- PTX5_FN, PTX5_OUT, PTX5_IN, PTX5_IN_PU,
- PTX4_FN, PTX4_OUT, PTX4_IN, PTX4_IN_PU,
- PTX3_FN, PTX3_OUT, PTX3_IN, PTX3_IN_PU,
- PTX2_FN, PTX2_OUT, PTX2_IN, PTX2_IN_PU,
- PTX1_FN, PTX1_OUT, PTX1_IN, PTX1_IN_PU,
- PTX0_FN, PTX0_OUT, PTX0_IN, PTX0_IN_PU }
+ PTX7_FN, PTX7_OUT, PTX7_IN, 0,
+ PTX6_FN, PTX6_OUT, PTX6_IN, 0,
+ PTX5_FN, PTX5_OUT, PTX5_IN, 0,
+ PTX4_FN, PTX4_OUT, PTX4_IN, 0,
+ PTX3_FN, PTX3_OUT, PTX3_IN, 0,
+ PTX2_FN, PTX2_OUT, PTX2_IN, 0,
+ PTX1_FN, PTX1_OUT, PTX1_IN, 0,
+ PTX0_FN, PTX0_OUT, PTX0_IN, 0 }
},
{ PINMUX_CFG_REG("PYCR", 0xffec0030, 16, 2) {
- PTY7_FN, PTY7_OUT, PTY7_IN, PTY7_IN_PU,
- PTY6_FN, PTY6_OUT, PTY6_IN, PTY6_IN_PU,
- PTY5_FN, PTY5_OUT, PTY5_IN, PTY5_IN_PU,
- PTY4_FN, PTY4_OUT, PTY4_IN, PTY4_IN_PU,
- PTY3_FN, PTY3_OUT, PTY3_IN, PTY3_IN_PU,
- PTY2_FN, PTY2_OUT, PTY2_IN, PTY2_IN_PU,
- PTY1_FN, PTY1_OUT, PTY1_IN, PTY1_IN_PU,
- PTY0_FN, PTY0_OUT, PTY0_IN, PTY0_IN_PU }
+ PTY7_FN, PTY7_OUT, PTY7_IN, 0,
+ PTY6_FN, PTY6_OUT, PTY6_IN, 0,
+ PTY5_FN, PTY5_OUT, PTY5_IN, 0,
+ PTY4_FN, PTY4_OUT, PTY4_IN, 0,
+ PTY3_FN, PTY3_OUT, PTY3_IN, 0,
+ PTY2_FN, PTY2_OUT, PTY2_IN, 0,
+ PTY1_FN, PTY1_OUT, PTY1_IN, 0,
+ PTY0_FN, PTY0_OUT, PTY0_IN, 0 }
},
{ PINMUX_CFG_REG("PZCR", 0xffec0032, 16, 2) {
PTZ7_FN, PTZ7_OUT, PTZ7_IN, 0,
@@ -2267,7 +2227,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7757_pinmux_info = {
.name = "sh7757_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7785.c b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
index c176b794f24..517eb49d76b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7785.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7785.c
@@ -77,36 +77,6 @@ enum {
PR3_IN, PR2_IN, PR1_IN, PR0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
- PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
- PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
- PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
- PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
- PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
- PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
- PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
- PE5_IN_PU, PE4_IN_PU, PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
- PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
- PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
- PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
- PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
- PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
- PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
- PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
- PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU, PJ0_IN_PU,
- PK7_IN_PU, PK6_IN_PU, PK5_IN_PU, PK4_IN_PU,
- PK3_IN_PU, PK2_IN_PU, PK1_IN_PU, PK0_IN_PU,
- PL7_IN_PU, PL6_IN_PU, PL5_IN_PU, PL4_IN_PU,
- PL3_IN_PU, PL2_IN_PU, PL1_IN_PU, PL0_IN_PU,
- PM1_IN_PU, PM0_IN_PU,
- PN7_IN_PU, PN6_IN_PU, PN5_IN_PU, PN4_IN_PU,
- PN3_IN_PU, PN2_IN_PU, PN1_IN_PU, PN0_IN_PU,
- PP5_IN_PU, PP4_IN_PU, PP3_IN_PU, PP2_IN_PU, PP1_IN_PU, PP0_IN_PU,
- PQ4_IN_PU, PQ3_IN_PU, PQ2_IN_PU, PQ1_IN_PU, PQ0_IN_PU,
- PR3_IN_PU, PR2_IN_PU, PR1_IN_PU, PR0_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
@@ -355,150 +325,149 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* PA GPIO */
- PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
- PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
- PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
- PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
- PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
- PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
- PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
- PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT),
/* PB GPIO */
- PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
- PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
- PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
- PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
- PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
- PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
- PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
- PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT),
/* PC GPIO */
- PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
- PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
- PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
- PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
- PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
- PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
- PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
- PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT),
/* PD GPIO */
- PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
- PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
- PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
- PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
- PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
- PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
- PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
- PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT),
/* PE GPIO */
- PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
- PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
- PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
- PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
- PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
- PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT),
/* PF GPIO */
- PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
- PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
- PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
- PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
- PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
- PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
- PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
- PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT),
/* PG GPIO */
- PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
- PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
- PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
- PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
- PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
- PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
- PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
- PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT),
/* PH GPIO */
- PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
- PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
- PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
- PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
- PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
- PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
- PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
- PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT),
/* PJ GPIO */
- PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
- PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
- PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
- PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
- PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
- PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
- PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
- PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT, PJ0_IN_PU),
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT),
+ PINMUX_DATA(PJ0_DATA, PJ0_IN, PJ0_OUT),
/* PK GPIO */
- PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT, PK7_IN_PU),
- PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT, PK6_IN_PU),
- PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT, PK5_IN_PU),
- PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT, PK4_IN_PU),
- PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT, PK3_IN_PU),
- PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT, PK2_IN_PU),
- PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT, PK1_IN_PU),
- PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT, PK0_IN_PU),
+ PINMUX_DATA(PK7_DATA, PK7_IN, PK7_OUT),
+ PINMUX_DATA(PK6_DATA, PK6_IN, PK6_OUT),
+ PINMUX_DATA(PK5_DATA, PK5_IN, PK5_OUT),
+ PINMUX_DATA(PK4_DATA, PK4_IN, PK4_OUT),
+ PINMUX_DATA(PK3_DATA, PK3_IN, PK3_OUT),
+ PINMUX_DATA(PK2_DATA, PK2_IN, PK2_OUT),
+ PINMUX_DATA(PK1_DATA, PK1_IN, PK1_OUT),
+ PINMUX_DATA(PK0_DATA, PK0_IN, PK0_OUT),
/* PL GPIO */
- PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT, PL7_IN_PU),
- PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT, PL6_IN_PU),
- PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT, PL5_IN_PU),
- PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT, PL4_IN_PU),
- PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT, PL3_IN_PU),
- PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT, PL2_IN_PU),
- PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT, PL1_IN_PU),
- PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT, PL0_IN_PU),
+ PINMUX_DATA(PL7_DATA, PL7_IN, PL7_OUT),
+ PINMUX_DATA(PL6_DATA, PL6_IN, PL6_OUT),
+ PINMUX_DATA(PL5_DATA, PL5_IN, PL5_OUT),
+ PINMUX_DATA(PL4_DATA, PL4_IN, PL4_OUT),
+ PINMUX_DATA(PL3_DATA, PL3_IN, PL3_OUT),
+ PINMUX_DATA(PL2_DATA, PL2_IN, PL2_OUT),
+ PINMUX_DATA(PL1_DATA, PL1_IN, PL1_OUT),
+ PINMUX_DATA(PL0_DATA, PL0_IN, PL0_OUT),
/* PM GPIO */
- PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT, PM1_IN_PU),
- PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT, PM0_IN_PU),
+ PINMUX_DATA(PM1_DATA, PM1_IN, PM1_OUT),
+ PINMUX_DATA(PM0_DATA, PM0_IN, PM0_OUT),
/* PN GPIO */
- PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT, PN7_IN_PU),
- PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT, PN6_IN_PU),
- PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT, PN5_IN_PU),
- PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT, PN4_IN_PU),
- PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT, PN3_IN_PU),
- PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT, PN2_IN_PU),
- PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT, PN1_IN_PU),
- PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT, PN0_IN_PU),
+ PINMUX_DATA(PN7_DATA, PN7_IN, PN7_OUT),
+ PINMUX_DATA(PN6_DATA, PN6_IN, PN6_OUT),
+ PINMUX_DATA(PN5_DATA, PN5_IN, PN5_OUT),
+ PINMUX_DATA(PN4_DATA, PN4_IN, PN4_OUT),
+ PINMUX_DATA(PN3_DATA, PN3_IN, PN3_OUT),
+ PINMUX_DATA(PN2_DATA, PN2_IN, PN2_OUT),
+ PINMUX_DATA(PN1_DATA, PN1_IN, PN1_OUT),
+ PINMUX_DATA(PN0_DATA, PN0_IN, PN0_OUT),
/* PP GPIO */
- PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT, PP5_IN_PU),
- PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT, PP4_IN_PU),
- PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT, PP3_IN_PU),
- PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT, PP2_IN_PU),
- PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT, PP1_IN_PU),
- PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT, PP0_IN_PU),
+ PINMUX_DATA(PP5_DATA, PP5_IN, PP5_OUT),
+ PINMUX_DATA(PP4_DATA, PP4_IN, PP4_OUT),
+ PINMUX_DATA(PP3_DATA, PP3_IN, PP3_OUT),
+ PINMUX_DATA(PP2_DATA, PP2_IN, PP2_OUT),
+ PINMUX_DATA(PP1_DATA, PP1_IN, PP1_OUT),
+ PINMUX_DATA(PP0_DATA, PP0_IN, PP0_OUT),
/* PQ GPIO */
- PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT, PQ4_IN_PU),
- PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT, PQ3_IN_PU),
- PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT, PQ2_IN_PU),
- PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT, PQ1_IN_PU),
- PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT, PQ0_IN_PU),
+ PINMUX_DATA(PQ4_DATA, PQ4_IN, PQ4_OUT),
+ PINMUX_DATA(PQ3_DATA, PQ3_IN, PQ3_OUT),
+ PINMUX_DATA(PQ2_DATA, PQ2_IN, PQ2_OUT),
+ PINMUX_DATA(PQ1_DATA, PQ1_IN, PQ1_OUT),
+ PINMUX_DATA(PQ0_DATA, PQ0_IN, PQ0_OUT),
/* PR GPIO */
- PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT, PR3_IN_PU),
- PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT, PR2_IN_PU),
- PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT, PR1_IN_PU),
- PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT, PR0_IN_PU),
+ PINMUX_DATA(PR3_DATA, PR3_IN, PR3_OUT),
+ PINMUX_DATA(PR2_DATA, PR2_IN, PR2_OUT),
+ PINMUX_DATA(PR1_DATA, PR1_IN, PR1_OUT),
+ PINMUX_DATA(PR0_DATA, PR0_IN, PR0_OUT),
/* PA FN */
PINMUX_DATA(D63_AD31_MARK, PA7_FN),
@@ -704,147 +673,147 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
- PINMUX_GPIO(GPIO_PA7, PA7_DATA),
- PINMUX_GPIO(GPIO_PA6, PA6_DATA),
- PINMUX_GPIO(GPIO_PA5, PA5_DATA),
- PINMUX_GPIO(GPIO_PA4, PA4_DATA),
- PINMUX_GPIO(GPIO_PA3, PA3_DATA),
- PINMUX_GPIO(GPIO_PA2, PA2_DATA),
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA7),
+ PINMUX_GPIO(PA6),
+ PINMUX_GPIO(PA5),
+ PINMUX_GPIO(PA4),
+ PINMUX_GPIO(PA3),
+ PINMUX_GPIO(PA2),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* PB */
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
- PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
+ PINMUX_GPIO(PB0),
/* PC */
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* PD */
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* PE */
- PINMUX_GPIO(GPIO_PE5, PE5_DATA),
- PINMUX_GPIO(GPIO_PE4, PE4_DATA),
- PINMUX_GPIO(GPIO_PE3, PE3_DATA),
- PINMUX_GPIO(GPIO_PE2, PE2_DATA),
- PINMUX_GPIO(GPIO_PE1, PE1_DATA),
- PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+ PINMUX_GPIO(PE5),
+ PINMUX_GPIO(PE4),
+ PINMUX_GPIO(PE3),
+ PINMUX_GPIO(PE2),
+ PINMUX_GPIO(PE1),
+ PINMUX_GPIO(PE0),
/* PF */
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
/* PG */
- PINMUX_GPIO(GPIO_PG7, PG7_DATA),
- PINMUX_GPIO(GPIO_PG6, PG6_DATA),
- PINMUX_GPIO(GPIO_PG5, PG5_DATA),
- PINMUX_GPIO(GPIO_PG4, PG4_DATA),
- PINMUX_GPIO(GPIO_PG3, PG3_DATA),
- PINMUX_GPIO(GPIO_PG2, PG2_DATA),
- PINMUX_GPIO(GPIO_PG1, PG1_DATA),
- PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+ PINMUX_GPIO(PG7),
+ PINMUX_GPIO(PG6),
+ PINMUX_GPIO(PG5),
+ PINMUX_GPIO(PG4),
+ PINMUX_GPIO(PG3),
+ PINMUX_GPIO(PG2),
+ PINMUX_GPIO(PG1),
+ PINMUX_GPIO(PG0),
/* PH */
- PINMUX_GPIO(GPIO_PH7, PH7_DATA),
- PINMUX_GPIO(GPIO_PH6, PH6_DATA),
- PINMUX_GPIO(GPIO_PH5, PH5_DATA),
- PINMUX_GPIO(GPIO_PH4, PH4_DATA),
- PINMUX_GPIO(GPIO_PH3, PH3_DATA),
- PINMUX_GPIO(GPIO_PH2, PH2_DATA),
- PINMUX_GPIO(GPIO_PH1, PH1_DATA),
- PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+ PINMUX_GPIO(PH7),
+ PINMUX_GPIO(PH6),
+ PINMUX_GPIO(PH5),
+ PINMUX_GPIO(PH4),
+ PINMUX_GPIO(PH3),
+ PINMUX_GPIO(PH2),
+ PINMUX_GPIO(PH1),
+ PINMUX_GPIO(PH0),
/* PJ */
- PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
- PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
- PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
- PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
- PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
- PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
- PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
- PINMUX_GPIO(GPIO_PJ0, PJ0_DATA),
+ PINMUX_GPIO(PJ7),
+ PINMUX_GPIO(PJ6),
+ PINMUX_GPIO(PJ5),
+ PINMUX_GPIO(PJ4),
+ PINMUX_GPIO(PJ3),
+ PINMUX_GPIO(PJ2),
+ PINMUX_GPIO(PJ1),
+ PINMUX_GPIO(PJ0),
/* PK */
- PINMUX_GPIO(GPIO_PK7, PK7_DATA),
- PINMUX_GPIO(GPIO_PK6, PK6_DATA),
- PINMUX_GPIO(GPIO_PK5, PK5_DATA),
- PINMUX_GPIO(GPIO_PK4, PK4_DATA),
- PINMUX_GPIO(GPIO_PK3, PK3_DATA),
- PINMUX_GPIO(GPIO_PK2, PK2_DATA),
- PINMUX_GPIO(GPIO_PK1, PK1_DATA),
- PINMUX_GPIO(GPIO_PK0, PK0_DATA),
+ PINMUX_GPIO(PK7),
+ PINMUX_GPIO(PK6),
+ PINMUX_GPIO(PK5),
+ PINMUX_GPIO(PK4),
+ PINMUX_GPIO(PK3),
+ PINMUX_GPIO(PK2),
+ PINMUX_GPIO(PK1),
+ PINMUX_GPIO(PK0),
/* PL */
- PINMUX_GPIO(GPIO_PL7, PL7_DATA),
- PINMUX_GPIO(GPIO_PL6, PL6_DATA),
- PINMUX_GPIO(GPIO_PL5, PL5_DATA),
- PINMUX_GPIO(GPIO_PL4, PL4_DATA),
- PINMUX_GPIO(GPIO_PL3, PL3_DATA),
- PINMUX_GPIO(GPIO_PL2, PL2_DATA),
- PINMUX_GPIO(GPIO_PL1, PL1_DATA),
- PINMUX_GPIO(GPIO_PL0, PL0_DATA),
+ PINMUX_GPIO(PL7),
+ PINMUX_GPIO(PL6),
+ PINMUX_GPIO(PL5),
+ PINMUX_GPIO(PL4),
+ PINMUX_GPIO(PL3),
+ PINMUX_GPIO(PL2),
+ PINMUX_GPIO(PL1),
+ PINMUX_GPIO(PL0),
/* PM */
- PINMUX_GPIO(GPIO_PM1, PM1_DATA),
- PINMUX_GPIO(GPIO_PM0, PM0_DATA),
+ PINMUX_GPIO(PM1),
+ PINMUX_GPIO(PM0),
/* PN */
- PINMUX_GPIO(GPIO_PN7, PN7_DATA),
- PINMUX_GPIO(GPIO_PN6, PN6_DATA),
- PINMUX_GPIO(GPIO_PN5, PN5_DATA),
- PINMUX_GPIO(GPIO_PN4, PN4_DATA),
- PINMUX_GPIO(GPIO_PN3, PN3_DATA),
- PINMUX_GPIO(GPIO_PN2, PN2_DATA),
- PINMUX_GPIO(GPIO_PN1, PN1_DATA),
- PINMUX_GPIO(GPIO_PN0, PN0_DATA),
+ PINMUX_GPIO(PN7),
+ PINMUX_GPIO(PN6),
+ PINMUX_GPIO(PN5),
+ PINMUX_GPIO(PN4),
+ PINMUX_GPIO(PN3),
+ PINMUX_GPIO(PN2),
+ PINMUX_GPIO(PN1),
+ PINMUX_GPIO(PN0),
/* PP */
- PINMUX_GPIO(GPIO_PP5, PP5_DATA),
- PINMUX_GPIO(GPIO_PP4, PP4_DATA),
- PINMUX_GPIO(GPIO_PP3, PP3_DATA),
- PINMUX_GPIO(GPIO_PP2, PP2_DATA),
- PINMUX_GPIO(GPIO_PP1, PP1_DATA),
- PINMUX_GPIO(GPIO_PP0, PP0_DATA),
+ PINMUX_GPIO(PP5),
+ PINMUX_GPIO(PP4),
+ PINMUX_GPIO(PP3),
+ PINMUX_GPIO(PP2),
+ PINMUX_GPIO(PP1),
+ PINMUX_GPIO(PP0),
/* PQ */
- PINMUX_GPIO(GPIO_PQ4, PQ4_DATA),
- PINMUX_GPIO(GPIO_PQ3, PQ3_DATA),
- PINMUX_GPIO(GPIO_PQ2, PQ2_DATA),
- PINMUX_GPIO(GPIO_PQ1, PQ1_DATA),
- PINMUX_GPIO(GPIO_PQ0, PQ0_DATA),
+ PINMUX_GPIO(PQ4),
+ PINMUX_GPIO(PQ3),
+ PINMUX_GPIO(PQ2),
+ PINMUX_GPIO(PQ1),
+ PINMUX_GPIO(PQ0),
/* PR */
- PINMUX_GPIO(GPIO_PR3, PR3_DATA),
- PINMUX_GPIO(GPIO_PR2, PR2_DATA),
- PINMUX_GPIO(GPIO_PR1, PR1_DATA),
- PINMUX_GPIO(GPIO_PR0, PR0_DATA),
+ PINMUX_GPIO(PR3),
+ PINMUX_GPIO(PR2),
+ PINMUX_GPIO(PR1),
+ PINMUX_GPIO(PR0),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -1020,114 +989,114 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffe70000, 16, 2) {
- PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
- PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
- PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
- PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
- PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
- PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
- PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
- PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ PA7_FN, PA7_OUT, PA7_IN, 0,
+ PA6_FN, PA6_OUT, PA6_IN, 0,
+ PA5_FN, PA5_OUT, PA5_IN, 0,
+ PA4_FN, PA4_OUT, PA4_IN, 0,
+ PA3_FN, PA3_OUT, PA3_IN, 0,
+ PA2_FN, PA2_OUT, PA2_IN, 0,
+ PA1_FN, PA1_OUT, PA1_IN, 0,
+ PA0_FN, PA0_OUT, PA0_IN, 0 }
},
{ PINMUX_CFG_REG("PBCR", 0xffe70002, 16, 2) {
- PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
- PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
- PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
- PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
- PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
- PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
- PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
- PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ PB7_FN, PB7_OUT, PB7_IN, 0,
+ PB6_FN, PB6_OUT, PB6_IN, 0,
+ PB5_FN, PB5_OUT, PB5_IN, 0,
+ PB4_FN, PB4_OUT, PB4_IN, 0,
+ PB3_FN, PB3_OUT, PB3_IN, 0,
+ PB2_FN, PB2_OUT, PB2_IN, 0,
+ PB1_FN, PB1_OUT, PB1_IN, 0,
+ PB0_FN, PB0_OUT, PB0_IN, 0 }
},
{ PINMUX_CFG_REG("PCCR", 0xffe70004, 16, 2) {
- PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
- PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
- PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
- PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
- PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
- PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
- PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
- PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ PC7_FN, PC7_OUT, PC7_IN, 0,
+ PC6_FN, PC6_OUT, PC6_IN, 0,
+ PC5_FN, PC5_OUT, PC5_IN, 0,
+ PC4_FN, PC4_OUT, PC4_IN, 0,
+ PC3_FN, PC3_OUT, PC3_IN, 0,
+ PC2_FN, PC2_OUT, PC2_IN, 0,
+ PC1_FN, PC1_OUT, PC1_IN, 0,
+ PC0_FN, PC0_OUT, PC0_IN, 0 }
},
{ PINMUX_CFG_REG("PDCR", 0xffe70006, 16, 2) {
- PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
- PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
- PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
- PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
- PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
- PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
- PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
- PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ PD7_FN, PD7_OUT, PD7_IN, 0,
+ PD6_FN, PD6_OUT, PD6_IN, 0,
+ PD5_FN, PD5_OUT, PD5_IN, 0,
+ PD4_FN, PD4_OUT, PD4_IN, 0,
+ PD3_FN, PD3_OUT, PD3_IN, 0,
+ PD2_FN, PD2_OUT, PD2_IN, 0,
+ PD1_FN, PD1_OUT, PD1_IN, 0,
+ PD0_FN, PD0_OUT, PD0_IN, 0 }
},
{ PINMUX_CFG_REG("PECR", 0xffe70008, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
- PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
- PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
- PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
- PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
- PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
- PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU }
+ PE5_FN, PE5_OUT, PE5_IN, 0,
+ PE4_FN, PE4_OUT, PE4_IN, 0,
+ PE3_FN, PE3_OUT, PE3_IN, 0,
+ PE2_FN, PE2_OUT, PE2_IN, 0,
+ PE1_FN, PE1_OUT, PE1_IN, 0,
+ PE0_FN, PE0_OUT, PE0_IN, 0 }
},
{ PINMUX_CFG_REG("PFCR", 0xffe7000a, 16, 2) {
- PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
- PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
- PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
- PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
- PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
- PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
- PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
- PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ PF7_FN, PF7_OUT, PF7_IN, 0,
+ PF6_FN, PF6_OUT, PF6_IN, 0,
+ PF5_FN, PF5_OUT, PF5_IN, 0,
+ PF4_FN, PF4_OUT, PF4_IN, 0,
+ PF3_FN, PF3_OUT, PF3_IN, 0,
+ PF2_FN, PF2_OUT, PF2_IN, 0,
+ PF1_FN, PF1_OUT, PF1_IN, 0,
+ PF0_FN, PF0_OUT, PF0_IN, 0 }
},
{ PINMUX_CFG_REG("PGCR", 0xffe7000c, 16, 2) {
- PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
- PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
- PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
- PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
- PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
- PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
- PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
- PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU }
+ PG7_FN, PG7_OUT, PG7_IN, 0,
+ PG6_FN, PG6_OUT, PG6_IN, 0,
+ PG5_FN, PG5_OUT, PG5_IN, 0,
+ PG4_FN, PG4_OUT, PG4_IN, 0,
+ PG3_FN, PG3_OUT, PG3_IN, 0,
+ PG2_FN, PG2_OUT, PG2_IN, 0,
+ PG1_FN, PG1_OUT, PG1_IN, 0,
+ PG0_FN, PG0_OUT, PG0_IN, 0 }
},
{ PINMUX_CFG_REG("PHCR", 0xffe7000e, 16, 2) {
- PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
- PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
- PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
- PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
- PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
- PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
- PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
- PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ PH7_FN, PH7_OUT, PH7_IN, 0,
+ PH6_FN, PH6_OUT, PH6_IN, 0,
+ PH5_FN, PH5_OUT, PH5_IN, 0,
+ PH4_FN, PH4_OUT, PH4_IN, 0,
+ PH3_FN, PH3_OUT, PH3_IN, 0,
+ PH2_FN, PH2_OUT, PH2_IN, 0,
+ PH1_FN, PH1_OUT, PH1_IN, 0,
+ PH0_FN, PH0_OUT, PH0_IN, 0 }
},
{ PINMUX_CFG_REG("PJCR", 0xffe70010, 16, 2) {
- PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
- PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
- PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
- PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
- PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
- PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
- PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
- PJ0_FN, PJ0_OUT, PJ0_IN, PJ0_IN_PU }
+ PJ7_FN, PJ7_OUT, PJ7_IN, 0,
+ PJ6_FN, PJ6_OUT, PJ6_IN, 0,
+ PJ5_FN, PJ5_OUT, PJ5_IN, 0,
+ PJ4_FN, PJ4_OUT, PJ4_IN, 0,
+ PJ3_FN, PJ3_OUT, PJ3_IN, 0,
+ PJ2_FN, PJ2_OUT, PJ2_IN, 0,
+ PJ1_FN, PJ1_OUT, PJ1_IN, 0,
+ PJ0_FN, PJ0_OUT, PJ0_IN, 0 }
},
{ PINMUX_CFG_REG("PKCR", 0xffe70012, 16, 2) {
- PK7_FN, PK7_OUT, PK7_IN, PK7_IN_PU,
- PK6_FN, PK6_OUT, PK6_IN, PK6_IN_PU,
- PK5_FN, PK5_OUT, PK5_IN, PK5_IN_PU,
- PK4_FN, PK4_OUT, PK4_IN, PK4_IN_PU,
- PK3_FN, PK3_OUT, PK3_IN, PK3_IN_PU,
- PK2_FN, PK2_OUT, PK2_IN, PK2_IN_PU,
- PK1_FN, PK1_OUT, PK1_IN, PK1_IN_PU,
- PK0_FN, PK0_OUT, PK0_IN, PK0_IN_PU }
+ PK7_FN, PK7_OUT, PK7_IN, 0,
+ PK6_FN, PK6_OUT, PK6_IN, 0,
+ PK5_FN, PK5_OUT, PK5_IN, 0,
+ PK4_FN, PK4_OUT, PK4_IN, 0,
+ PK3_FN, PK3_OUT, PK3_IN, 0,
+ PK2_FN, PK2_OUT, PK2_IN, 0,
+ PK1_FN, PK1_OUT, PK1_IN, 0,
+ PK0_FN, PK0_OUT, PK0_IN, 0 }
},
{ PINMUX_CFG_REG("PLCR", 0xffe70014, 16, 2) {
- PL7_FN, PL7_OUT, PL7_IN, PL7_IN_PU,
- PL6_FN, PL6_OUT, PL6_IN, PL6_IN_PU,
- PL5_FN, PL5_OUT, PL5_IN, PL5_IN_PU,
- PL4_FN, PL4_OUT, PL4_IN, PL4_IN_PU,
- PL3_FN, PL3_OUT, PL3_IN, PL3_IN_PU,
- PL2_FN, PL2_OUT, PL2_IN, PL2_IN_PU,
- PL1_FN, PL1_OUT, PL1_IN, PL1_IN_PU,
- PL0_FN, PL0_OUT, PL0_IN, PL0_IN_PU }
+ PL7_FN, PL7_OUT, PL7_IN, 0,
+ PL6_FN, PL6_OUT, PL6_IN, 0,
+ PL5_FN, PL5_OUT, PL5_IN, 0,
+ PL4_FN, PL4_OUT, PL4_IN, 0,
+ PL3_FN, PL3_OUT, PL3_IN, 0,
+ PL2_FN, PL2_OUT, PL2_IN, 0,
+ PL1_FN, PL1_OUT, PL1_IN, 0,
+ PL0_FN, PL0_OUT, PL0_IN, 0 }
},
{ PINMUX_CFG_REG("PMCR", 0xffe70016, 16, 2) {
0, 0, 0, 0,
@@ -1136,48 +1105,48 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PM1_FN, PM1_OUT, PM1_IN, PM1_IN_PU,
- PM0_FN, PM0_OUT, PM0_IN, PM0_IN_PU }
+ PM1_FN, PM1_OUT, PM1_IN, 0,
+ PM0_FN, PM0_OUT, PM0_IN, 0 }
},
{ PINMUX_CFG_REG("PNCR", 0xffe70018, 16, 2) {
- PN7_FN, PN7_OUT, PN7_IN, PN7_IN_PU,
- PN6_FN, PN6_OUT, PN6_IN, PN6_IN_PU,
- PN5_FN, PN5_OUT, PN5_IN, PN5_IN_PU,
- PN4_FN, PN4_OUT, PN4_IN, PN4_IN_PU,
- PN3_FN, PN3_OUT, PN3_IN, PN3_IN_PU,
- PN2_FN, PN2_OUT, PN2_IN, PN2_IN_PU,
- PN1_FN, PN1_OUT, PN1_IN, PN1_IN_PU,
- PN0_FN, PN0_OUT, PN0_IN, PN0_IN_PU }
+ PN7_FN, PN7_OUT, PN7_IN, 0,
+ PN6_FN, PN6_OUT, PN6_IN, 0,
+ PN5_FN, PN5_OUT, PN5_IN, 0,
+ PN4_FN, PN4_OUT, PN4_IN, 0,
+ PN3_FN, PN3_OUT, PN3_IN, 0,
+ PN2_FN, PN2_OUT, PN2_IN, 0,
+ PN1_FN, PN1_OUT, PN1_IN, 0,
+ PN0_FN, PN0_OUT, PN0_IN, 0 }
},
{ PINMUX_CFG_REG("PPCR", 0xffe7001a, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
- PP5_FN, PP5_OUT, PP5_IN, PP5_IN_PU,
- PP4_FN, PP4_OUT, PP4_IN, PP4_IN_PU,
- PP3_FN, PP3_OUT, PP3_IN, PP3_IN_PU,
- PP2_FN, PP2_OUT, PP2_IN, PP2_IN_PU,
- PP1_FN, PP1_OUT, PP1_IN, PP1_IN_PU,
- PP0_FN, PP0_OUT, PP0_IN, PP0_IN_PU }
+ PP5_FN, PP5_OUT, PP5_IN, 0,
+ PP4_FN, PP4_OUT, PP4_IN, 0,
+ PP3_FN, PP3_OUT, PP3_IN, 0,
+ PP2_FN, PP2_OUT, PP2_IN, 0,
+ PP1_FN, PP1_OUT, PP1_IN, 0,
+ PP0_FN, PP0_OUT, PP0_IN, 0 }
},
{ PINMUX_CFG_REG("PQCR", 0xffe7001c, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PQ4_FN, PQ4_OUT, PQ4_IN, PQ4_IN_PU,
- PQ3_FN, PQ3_OUT, PQ3_IN, PQ3_IN_PU,
- PQ2_FN, PQ2_OUT, PQ2_IN, PQ2_IN_PU,
- PQ1_FN, PQ1_OUT, PQ1_IN, PQ1_IN_PU,
- PQ0_FN, PQ0_OUT, PQ0_IN, PQ0_IN_PU }
+ PQ4_FN, PQ4_OUT, PQ4_IN, 0,
+ PQ3_FN, PQ3_OUT, PQ3_IN, 0,
+ PQ2_FN, PQ2_OUT, PQ2_IN, 0,
+ PQ1_FN, PQ1_OUT, PQ1_IN, 0,
+ PQ0_FN, PQ0_OUT, PQ0_IN, 0 }
},
{ PINMUX_CFG_REG("PRCR", 0xffe7001e, 16, 2) {
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PR3_FN, PR3_OUT, PR3_IN, PR3_IN_PU,
- PR2_FN, PR2_OUT, PR2_IN, PR2_IN_PU,
- PR1_FN, PR1_OUT, PR1_IN, PR1_IN_PU,
- PR0_FN, PR0_OUT, PR0_IN, PR0_IN_PU }
+ PR3_FN, PR3_OUT, PR3_IN, 0,
+ PR2_FN, PR2_OUT, PR2_IN, 0,
+ PR1_FN, PR1_OUT, PR1_IN, 0,
+ PR0_FN, PR0_OUT, PR0_IN, 0 }
},
{ PINMUX_CFG_REG("P1MSELR", 0xffe70080, 16, 1) {
P1MSEL15_0, P1MSEL15_1,
@@ -1289,7 +1258,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7785_pinmux_info = {
.name = "sh7785_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-sh7786.c b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
index 8ae0e32844e..623345fac93 100644
--- a/drivers/pinctrl/sh-pfc/pfc-sh7786.c
+++ b/drivers/pinctrl/sh-pfc/pfc-sh7786.c
@@ -60,25 +60,6 @@ enum {
PJ3_IN, PJ2_IN, PJ1_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
- PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
- PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
- PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
- PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
- PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
- PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
- PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
- PE7_IN_PU, PE6_IN_PU,
- PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
- PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
- PG7_IN_PU, PG6_IN_PU, PG5_IN_PU,
- PH7_IN_PU, PH6_IN_PU, PH5_IN_PU, PH4_IN_PU,
- PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
- PJ7_IN_PU, PJ6_IN_PU, PJ5_IN_PU, PJ4_IN_PU,
- PJ3_IN_PU, PJ2_IN_PU, PJ1_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
@@ -191,85 +172,84 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* PA GPIO */
- PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
- PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
- PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
- PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
- PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
- PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
- PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
- PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT),
/* PB GPIO */
- PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
- PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
- PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
- PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
- PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
- PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
- PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
- PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT),
/* PC GPIO */
- PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
- PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
- PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
- PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
- PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
- PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
- PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
- PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT),
/* PD GPIO */
- PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
- PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
- PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
- PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
- PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
- PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
- PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
- PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT),
/* PE GPIO */
- PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
- PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT),
/* PF GPIO */
- PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
- PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
- PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
- PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
- PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
- PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
- PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
- PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT),
/* PG GPIO */
- PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
- PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
- PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT),
/* PH GPIO */
- PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT, PH7_IN_PU),
- PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT, PH6_IN_PU),
- PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
- PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
- PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
- PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
- PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
- PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+ PINMUX_DATA(PH7_DATA, PH7_IN, PH7_OUT),
+ PINMUX_DATA(PH6_DATA, PH6_IN, PH6_OUT),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT),
/* PJ GPIO */
- PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT, PJ7_IN_PU),
- PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT, PJ6_IN_PU),
- PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT, PJ5_IN_PU),
- PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT, PJ4_IN_PU),
- PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT, PJ3_IN_PU),
- PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT, PJ2_IN_PU),
- PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT, PJ1_IN_PU),
+ PINMUX_DATA(PJ7_DATA, PJ7_IN, PJ7_OUT),
+ PINMUX_DATA(PJ6_DATA, PJ6_IN, PJ6_OUT),
+ PINMUX_DATA(PJ5_DATA, PJ5_IN, PJ5_OUT),
+ PINMUX_DATA(PJ4_DATA, PJ4_IN, PJ4_OUT),
+ PINMUX_DATA(PJ3_DATA, PJ3_IN, PJ3_OUT),
+ PINMUX_DATA(PJ2_DATA, PJ2_IN, PJ2_OUT),
+ PINMUX_DATA(PJ1_DATA, PJ1_IN, PJ1_OUT),
/* PA FN */
PINMUX_DATA(CDE_MARK, P1MSEL2_0, PA7_FN),
@@ -429,82 +409,82 @@ static const pinmux_enum_t pinmux_data[] = {
static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
- PINMUX_GPIO(GPIO_PA7, PA7_DATA),
- PINMUX_GPIO(GPIO_PA6, PA6_DATA),
- PINMUX_GPIO(GPIO_PA5, PA5_DATA),
- PINMUX_GPIO(GPIO_PA4, PA4_DATA),
- PINMUX_GPIO(GPIO_PA3, PA3_DATA),
- PINMUX_GPIO(GPIO_PA2, PA2_DATA),
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA7),
+ PINMUX_GPIO(PA6),
+ PINMUX_GPIO(PA5),
+ PINMUX_GPIO(PA4),
+ PINMUX_GPIO(PA3),
+ PINMUX_GPIO(PA2),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* PB */
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
- PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
+ PINMUX_GPIO(PB0),
/* PC */
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* PD */
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* PE */
- PINMUX_GPIO(GPIO_PE7, PE7_DATA),
- PINMUX_GPIO(GPIO_PE6, PE6_DATA),
+ PINMUX_GPIO(PE7),
+ PINMUX_GPIO(PE6),
/* PF */
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
/* PG */
- PINMUX_GPIO(GPIO_PG7, PG7_DATA),
- PINMUX_GPIO(GPIO_PG6, PG6_DATA),
- PINMUX_GPIO(GPIO_PG5, PG5_DATA),
+ PINMUX_GPIO(PG7),
+ PINMUX_GPIO(PG6),
+ PINMUX_GPIO(PG5),
/* PH */
- PINMUX_GPIO(GPIO_PH7, PH7_DATA),
- PINMUX_GPIO(GPIO_PH6, PH6_DATA),
- PINMUX_GPIO(GPIO_PH5, PH5_DATA),
- PINMUX_GPIO(GPIO_PH4, PH4_DATA),
- PINMUX_GPIO(GPIO_PH3, PH3_DATA),
- PINMUX_GPIO(GPIO_PH2, PH2_DATA),
- PINMUX_GPIO(GPIO_PH1, PH1_DATA),
- PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+ PINMUX_GPIO(PH7),
+ PINMUX_GPIO(PH6),
+ PINMUX_GPIO(PH5),
+ PINMUX_GPIO(PH4),
+ PINMUX_GPIO(PH3),
+ PINMUX_GPIO(PH2),
+ PINMUX_GPIO(PH1),
+ PINMUX_GPIO(PH0),
/* PJ */
- PINMUX_GPIO(GPIO_PJ7, PJ7_DATA),
- PINMUX_GPIO(GPIO_PJ6, PJ6_DATA),
- PINMUX_GPIO(GPIO_PJ5, PJ5_DATA),
- PINMUX_GPIO(GPIO_PJ4, PJ4_DATA),
- PINMUX_GPIO(GPIO_PJ3, PJ3_DATA),
- PINMUX_GPIO(GPIO_PJ2, PJ2_DATA),
- PINMUX_GPIO(GPIO_PJ1, PJ1_DATA),
+ PINMUX_GPIO(PJ7),
+ PINMUX_GPIO(PJ6),
+ PINMUX_GPIO(PJ5),
+ PINMUX_GPIO(PJ4),
+ PINMUX_GPIO(PJ3),
+ PINMUX_GPIO(PJ2),
+ PINMUX_GPIO(PJ1),
};
#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
@@ -651,48 +631,48 @@ static const struct pinmux_func pinmux_func_gpios[] = {
static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PACR", 0xffcc0000, 16, 2) {
- PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
- PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
- PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
- PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
- PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
- PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
- PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
- PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU }
+ PA7_FN, PA7_OUT, PA7_IN, 0,
+ PA6_FN, PA6_OUT, PA6_IN, 0,
+ PA5_FN, PA5_OUT, PA5_IN, 0,
+ PA4_FN, PA4_OUT, PA4_IN, 0,
+ PA3_FN, PA3_OUT, PA3_IN, 0,
+ PA2_FN, PA2_OUT, PA2_IN, 0,
+ PA1_FN, PA1_OUT, PA1_IN, 0,
+ PA0_FN, PA0_OUT, PA0_IN, 0 }
},
{ PINMUX_CFG_REG("PBCR", 0xffcc0002, 16, 2) {
- PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
- PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
- PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
- PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
- PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
- PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
- PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
- PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU }
+ PB7_FN, PB7_OUT, PB7_IN, 0,
+ PB6_FN, PB6_OUT, PB6_IN, 0,
+ PB5_FN, PB5_OUT, PB5_IN, 0,
+ PB4_FN, PB4_OUT, PB4_IN, 0,
+ PB3_FN, PB3_OUT, PB3_IN, 0,
+ PB2_FN, PB2_OUT, PB2_IN, 0,
+ PB1_FN, PB1_OUT, PB1_IN, 0,
+ PB0_FN, PB0_OUT, PB0_IN, 0 }
},
{ PINMUX_CFG_REG("PCCR", 0xffcc0004, 16, 2) {
- PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
- PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
- PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
- PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
- PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
- PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
- PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
- PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU }
+ PC7_FN, PC7_OUT, PC7_IN, 0,
+ PC6_FN, PC6_OUT, PC6_IN, 0,
+ PC5_FN, PC5_OUT, PC5_IN, 0,
+ PC4_FN, PC4_OUT, PC4_IN, 0,
+ PC3_FN, PC3_OUT, PC3_IN, 0,
+ PC2_FN, PC2_OUT, PC2_IN, 0,
+ PC1_FN, PC1_OUT, PC1_IN, 0,
+ PC0_FN, PC0_OUT, PC0_IN, 0 }
},
{ PINMUX_CFG_REG("PDCR", 0xffcc0006, 16, 2) {
- PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
- PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
- PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
- PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
- PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
- PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
- PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
- PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU }
+ PD7_FN, PD7_OUT, PD7_IN, 0,
+ PD6_FN, PD6_OUT, PD6_IN, 0,
+ PD5_FN, PD5_OUT, PD5_IN, 0,
+ PD4_FN, PD4_OUT, PD4_IN, 0,
+ PD3_FN, PD3_OUT, PD3_IN, 0,
+ PD2_FN, PD2_OUT, PD2_IN, 0,
+ PD1_FN, PD1_OUT, PD1_IN, 0,
+ PD0_FN, PD0_OUT, PD0_IN, 0 }
},
{ PINMUX_CFG_REG("PECR", 0xffcc0008, 16, 2) {
- PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
- PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
+ PE7_FN, PE7_OUT, PE7_IN, 0,
+ PE6_FN, PE6_OUT, PE6_IN, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -701,19 +681,19 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, }
},
{ PINMUX_CFG_REG("PFCR", 0xffcc000a, 16, 2) {
- PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
- PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
- PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
- PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
- PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
- PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
- PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
- PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU }
+ PF7_FN, PF7_OUT, PF7_IN, 0,
+ PF6_FN, PF6_OUT, PF6_IN, 0,
+ PF5_FN, PF5_OUT, PF5_IN, 0,
+ PF4_FN, PF4_OUT, PF4_IN, 0,
+ PF3_FN, PF3_OUT, PF3_IN, 0,
+ PF2_FN, PF2_OUT, PF2_IN, 0,
+ PF1_FN, PF1_OUT, PF1_IN, 0,
+ PF0_FN, PF0_OUT, PF0_IN, 0 }
},
{ PINMUX_CFG_REG("PGCR", 0xffcc000c, 16, 2) {
- PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
- PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
- PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
+ PG7_FN, PG7_OUT, PG7_IN, 0,
+ PG6_FN, PG6_OUT, PG6_IN, 0,
+ PG5_FN, PG5_OUT, PG5_IN, 0,
0, 0, 0, 0,
0, 0, 0, 0,
0, 0, 0, 0,
@@ -721,23 +701,23 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
0, 0, 0, 0, }
},
{ PINMUX_CFG_REG("PHCR", 0xffcc000e, 16, 2) {
- PH7_FN, PH7_OUT, PH7_IN, PH7_IN_PU,
- PH6_FN, PH6_OUT, PH6_IN, PH6_IN_PU,
- PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
- PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
- PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
- PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
- PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
- PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU }
+ PH7_FN, PH7_OUT, PH7_IN, 0,
+ PH6_FN, PH6_OUT, PH6_IN, 0,
+ PH5_FN, PH5_OUT, PH5_IN, 0,
+ PH4_FN, PH4_OUT, PH4_IN, 0,
+ PH3_FN, PH3_OUT, PH3_IN, 0,
+ PH2_FN, PH2_OUT, PH2_IN, 0,
+ PH1_FN, PH1_OUT, PH1_IN, 0,
+ PH0_FN, PH0_OUT, PH0_IN, 0 }
},
{ PINMUX_CFG_REG("PJCR", 0xffcc0010, 16, 2) {
- PJ7_FN, PJ7_OUT, PJ7_IN, PJ7_IN_PU,
- PJ6_FN, PJ6_OUT, PJ6_IN, PJ6_IN_PU,
- PJ5_FN, PJ5_OUT, PJ5_IN, PJ5_IN_PU,
- PJ4_FN, PJ4_OUT, PJ4_IN, PJ4_IN_PU,
- PJ3_FN, PJ3_OUT, PJ3_IN, PJ3_IN_PU,
- PJ2_FN, PJ2_OUT, PJ2_IN, PJ2_IN_PU,
- PJ1_FN, PJ1_OUT, PJ1_IN, PJ1_IN_PU,
+ PJ7_FN, PJ7_OUT, PJ7_IN, 0,
+ PJ6_FN, PJ6_OUT, PJ6_IN, 0,
+ PJ5_FN, PJ5_OUT, PJ5_IN, 0,
+ PJ4_FN, PJ4_OUT, PJ4_IN, 0,
+ PJ3_FN, PJ3_OUT, PJ3_IN, 0,
+ PJ2_FN, PJ2_OUT, PJ2_IN, 0,
+ PJ1_FN, PJ1_OUT, PJ1_IN, 0,
0, 0, 0, 0, }
},
{ PINMUX_CFG_REG("P1MSELR", 0xffcc0080, 16, 1) {
@@ -822,7 +802,6 @@ static const struct pinmux_data_reg pinmux_data_regs[] = {
const struct sh_pfc_soc_info sh7786_pinmux_info = {
.name = "sh7786_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
diff --git a/drivers/pinctrl/sh-pfc/pfc-shx3.c b/drivers/pinctrl/sh-pfc/pfc-shx3.c
index 6594c8c4874..55262bd869e 100644
--- a/drivers/pinctrl/sh-pfc/pfc-shx3.c
+++ b/drivers/pinctrl/sh-pfc/pfc-shx3.c
@@ -56,26 +56,6 @@ enum {
PH3_IN, PH2_IN, PH1_IN, PH0_IN,
PINMUX_INPUT_END,
- PINMUX_INPUT_PULLUP_BEGIN,
- PA7_IN_PU, PA6_IN_PU, PA5_IN_PU, PA4_IN_PU,
- PA3_IN_PU, PA2_IN_PU, PA1_IN_PU, PA0_IN_PU,
- PB7_IN_PU, PB6_IN_PU, PB5_IN_PU, PB4_IN_PU,
- PB3_IN_PU, PB2_IN_PU, PB1_IN_PU, PB0_IN_PU,
- PC7_IN_PU, PC6_IN_PU, PC5_IN_PU, PC4_IN_PU,
- PC3_IN_PU, PC2_IN_PU, PC1_IN_PU, PC0_IN_PU,
- PD7_IN_PU, PD6_IN_PU, PD5_IN_PU, PD4_IN_PU,
- PD3_IN_PU, PD2_IN_PU, PD1_IN_PU, PD0_IN_PU,
- PE7_IN_PU, PE6_IN_PU, PE5_IN_PU, PE4_IN_PU,
- PE3_IN_PU, PE2_IN_PU, PE1_IN_PU, PE0_IN_PU,
- PF7_IN_PU, PF6_IN_PU, PF5_IN_PU, PF4_IN_PU,
- PF3_IN_PU, PF2_IN_PU, PF1_IN_PU, PF0_IN_PU,
- PG7_IN_PU, PG6_IN_PU, PG5_IN_PU, PG4_IN_PU,
- PG3_IN_PU, PG2_IN_PU, PG1_IN_PU, PG0_IN_PU,
-
- PH5_IN_PU, PH4_IN_PU,
- PH3_IN_PU, PH2_IN_PU, PH1_IN_PU, PH0_IN_PU,
- PINMUX_INPUT_PULLUP_END,
-
PINMUX_OUTPUT_BEGIN,
PA7_OUT, PA6_OUT, PA5_OUT, PA4_OUT,
PA3_OUT, PA2_OUT, PA1_OUT, PA0_OUT,
@@ -147,85 +127,84 @@ enum {
PINMUX_MARK_END,
};
-static const pinmux_enum_t shx3_pinmux_data[] = {
-
+static const u16 pinmux_data[] = {
/* PA GPIO */
- PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT, PA7_IN_PU),
- PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT, PA6_IN_PU),
- PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT, PA5_IN_PU),
- PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT, PA4_IN_PU),
- PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT, PA3_IN_PU),
- PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT, PA2_IN_PU),
- PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT, PA1_IN_PU),
- PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT, PA0_IN_PU),
+ PINMUX_DATA(PA7_DATA, PA7_IN, PA7_OUT),
+ PINMUX_DATA(PA6_DATA, PA6_IN, PA6_OUT),
+ PINMUX_DATA(PA5_DATA, PA5_IN, PA5_OUT),
+ PINMUX_DATA(PA4_DATA, PA4_IN, PA4_OUT),
+ PINMUX_DATA(PA3_DATA, PA3_IN, PA3_OUT),
+ PINMUX_DATA(PA2_DATA, PA2_IN, PA2_OUT),
+ PINMUX_DATA(PA1_DATA, PA1_IN, PA1_OUT),
+ PINMUX_DATA(PA0_DATA, PA0_IN, PA0_OUT),
/* PB GPIO */
- PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT, PB7_IN_PU),
- PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT, PB6_IN_PU),
- PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT, PB5_IN_PU),
- PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT, PB4_IN_PU),
- PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT, PB3_IN_PU),
- PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT, PB2_IN_PU),
- PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT, PB1_IN_PU),
- PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT, PB0_IN_PU),
+ PINMUX_DATA(PB7_DATA, PB7_IN, PB7_OUT),
+ PINMUX_DATA(PB6_DATA, PB6_IN, PB6_OUT),
+ PINMUX_DATA(PB5_DATA, PB5_IN, PB5_OUT),
+ PINMUX_DATA(PB4_DATA, PB4_IN, PB4_OUT),
+ PINMUX_DATA(PB3_DATA, PB3_IN, PB3_OUT),
+ PINMUX_DATA(PB2_DATA, PB2_IN, PB2_OUT),
+ PINMUX_DATA(PB1_DATA, PB1_IN, PB1_OUT),
+ PINMUX_DATA(PB0_DATA, PB0_IN, PB0_OUT),
/* PC GPIO */
- PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT, PC7_IN_PU),
- PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT, PC6_IN_PU),
- PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT, PC5_IN_PU),
- PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT, PC4_IN_PU),
- PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT, PC3_IN_PU),
- PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT, PC2_IN_PU),
- PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT, PC1_IN_PU),
- PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT, PC0_IN_PU),
+ PINMUX_DATA(PC7_DATA, PC7_IN, PC7_OUT),
+ PINMUX_DATA(PC6_DATA, PC6_IN, PC6_OUT),
+ PINMUX_DATA(PC5_DATA, PC5_IN, PC5_OUT),
+ PINMUX_DATA(PC4_DATA, PC4_IN, PC4_OUT),
+ PINMUX_DATA(PC3_DATA, PC3_IN, PC3_OUT),
+ PINMUX_DATA(PC2_DATA, PC2_IN, PC2_OUT),
+ PINMUX_DATA(PC1_DATA, PC1_IN, PC1_OUT),
+ PINMUX_DATA(PC0_DATA, PC0_IN, PC0_OUT),
/* PD GPIO */
- PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT, PD7_IN_PU),
- PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT, PD6_IN_PU),
- PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT, PD5_IN_PU),
- PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT, PD4_IN_PU),
- PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT, PD3_IN_PU),
- PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT, PD2_IN_PU),
- PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT, PD1_IN_PU),
- PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT, PD0_IN_PU),
+ PINMUX_DATA(PD7_DATA, PD7_IN, PD7_OUT),
+ PINMUX_DATA(PD6_DATA, PD6_IN, PD6_OUT),
+ PINMUX_DATA(PD5_DATA, PD5_IN, PD5_OUT),
+ PINMUX_DATA(PD4_DATA, PD4_IN, PD4_OUT),
+ PINMUX_DATA(PD3_DATA, PD3_IN, PD3_OUT),
+ PINMUX_DATA(PD2_DATA, PD2_IN, PD2_OUT),
+ PINMUX_DATA(PD1_DATA, PD1_IN, PD1_OUT),
+ PINMUX_DATA(PD0_DATA, PD0_IN, PD0_OUT),
/* PE GPIO */
- PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT, PE7_IN_PU),
- PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT, PE6_IN_PU),
- PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT, PE5_IN_PU),
- PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT, PE4_IN_PU),
- PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT, PE3_IN_PU),
- PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT, PE2_IN_PU),
- PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT, PE1_IN_PU),
- PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT, PE0_IN_PU),
+ PINMUX_DATA(PE7_DATA, PE7_IN, PE7_OUT),
+ PINMUX_DATA(PE6_DATA, PE6_IN, PE6_OUT),
+ PINMUX_DATA(PE5_DATA, PE5_IN, PE5_OUT),
+ PINMUX_DATA(PE4_DATA, PE4_IN, PE4_OUT),
+ PINMUX_DATA(PE3_DATA, PE3_IN, PE3_OUT),
+ PINMUX_DATA(PE2_DATA, PE2_IN, PE2_OUT),
+ PINMUX_DATA(PE1_DATA, PE1_IN, PE1_OUT),
+ PINMUX_DATA(PE0_DATA, PE0_IN, PE0_OUT),
/* PF GPIO */
- PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT, PF7_IN_PU),
- PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT, PF6_IN_PU),
- PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT, PF5_IN_PU),
- PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT, PF4_IN_PU),
- PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT, PF3_IN_PU),
- PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT, PF2_IN_PU),
- PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT, PF1_IN_PU),
- PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT, PF0_IN_PU),
+ PINMUX_DATA(PF7_DATA, PF7_IN, PF7_OUT),
+ PINMUX_DATA(PF6_DATA, PF6_IN, PF6_OUT),
+ PINMUX_DATA(PF5_DATA, PF5_IN, PF5_OUT),
+ PINMUX_DATA(PF4_DATA, PF4_IN, PF4_OUT),
+ PINMUX_DATA(PF3_DATA, PF3_IN, PF3_OUT),
+ PINMUX_DATA(PF2_DATA, PF2_IN, PF2_OUT),
+ PINMUX_DATA(PF1_DATA, PF1_IN, PF1_OUT),
+ PINMUX_DATA(PF0_DATA, PF0_IN, PF0_OUT),
/* PG GPIO */
- PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT, PG7_IN_PU),
- PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT, PG6_IN_PU),
- PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT, PG5_IN_PU),
- PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT, PG4_IN_PU),
- PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT, PG3_IN_PU),
- PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT, PG2_IN_PU),
- PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT, PG1_IN_PU),
- PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT, PG0_IN_PU),
+ PINMUX_DATA(PG7_DATA, PG7_IN, PG7_OUT),
+ PINMUX_DATA(PG6_DATA, PG6_IN, PG6_OUT),
+ PINMUX_DATA(PG5_DATA, PG5_IN, PG5_OUT),
+ PINMUX_DATA(PG4_DATA, PG4_IN, PG4_OUT),
+ PINMUX_DATA(PG3_DATA, PG3_IN, PG3_OUT),
+ PINMUX_DATA(PG2_DATA, PG2_IN, PG2_OUT),
+ PINMUX_DATA(PG1_DATA, PG1_IN, PG1_OUT),
+ PINMUX_DATA(PG0_DATA, PG0_IN, PG0_OUT),
/* PH GPIO */
- PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT, PH5_IN_PU),
- PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT, PH4_IN_PU),
- PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT, PH3_IN_PU),
- PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT, PH2_IN_PU),
- PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT, PH1_IN_PU),
- PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT, PH0_IN_PU),
+ PINMUX_DATA(PH5_DATA, PH5_IN, PH5_OUT),
+ PINMUX_DATA(PH4_DATA, PH4_IN, PH4_OUT),
+ PINMUX_DATA(PH3_DATA, PH3_IN, PH3_OUT),
+ PINMUX_DATA(PH2_DATA, PH2_IN, PH2_OUT),
+ PINMUX_DATA(PH1_DATA, PH1_IN, PH1_OUT),
+ PINMUX_DATA(PH0_DATA, PH0_IN, PH0_OUT),
/* PA FN */
PINMUX_DATA(D31_MARK, PA7_FN),
@@ -306,89 +285,89 @@ static const pinmux_enum_t shx3_pinmux_data[] = {
PINMUX_DATA(IRQOUT_MARK, PH0_FN),
};
-static struct sh_pfc_pin shx3_pinmux_pins[] = {
+static struct sh_pfc_pin pinmux_pins[] = {
/* PA */
- PINMUX_GPIO(GPIO_PA7, PA7_DATA),
- PINMUX_GPIO(GPIO_PA6, PA6_DATA),
- PINMUX_GPIO(GPIO_PA5, PA5_DATA),
- PINMUX_GPIO(GPIO_PA4, PA4_DATA),
- PINMUX_GPIO(GPIO_PA3, PA3_DATA),
- PINMUX_GPIO(GPIO_PA2, PA2_DATA),
- PINMUX_GPIO(GPIO_PA1, PA1_DATA),
- PINMUX_GPIO(GPIO_PA0, PA0_DATA),
+ PINMUX_GPIO(PA7),
+ PINMUX_GPIO(PA6),
+ PINMUX_GPIO(PA5),
+ PINMUX_GPIO(PA4),
+ PINMUX_GPIO(PA3),
+ PINMUX_GPIO(PA2),
+ PINMUX_GPIO(PA1),
+ PINMUX_GPIO(PA0),
/* PB */
- PINMUX_GPIO(GPIO_PB7, PB7_DATA),
- PINMUX_GPIO(GPIO_PB6, PB6_DATA),
- PINMUX_GPIO(GPIO_PB5, PB5_DATA),
- PINMUX_GPIO(GPIO_PB4, PB4_DATA),
- PINMUX_GPIO(GPIO_PB3, PB3_DATA),
- PINMUX_GPIO(GPIO_PB2, PB2_DATA),
- PINMUX_GPIO(GPIO_PB1, PB1_DATA),
- PINMUX_GPIO(GPIO_PB0, PB0_DATA),
+ PINMUX_GPIO(PB7),
+ PINMUX_GPIO(PB6),
+ PINMUX_GPIO(PB5),
+ PINMUX_GPIO(PB4),
+ PINMUX_GPIO(PB3),
+ PINMUX_GPIO(PB2),
+ PINMUX_GPIO(PB1),
+ PINMUX_GPIO(PB0),
/* PC */
- PINMUX_GPIO(GPIO_PC7, PC7_DATA),
- PINMUX_GPIO(GPIO_PC6, PC6_DATA),
- PINMUX_GPIO(GPIO_PC5, PC5_DATA),
- PINMUX_GPIO(GPIO_PC4, PC4_DATA),
- PINMUX_GPIO(GPIO_PC3, PC3_DATA),
- PINMUX_GPIO(GPIO_PC2, PC2_DATA),
- PINMUX_GPIO(GPIO_PC1, PC1_DATA),
- PINMUX_GPIO(GPIO_PC0, PC0_DATA),
+ PINMUX_GPIO(PC7),
+ PINMUX_GPIO(PC6),
+ PINMUX_GPIO(PC5),
+ PINMUX_GPIO(PC4),
+ PINMUX_GPIO(PC3),
+ PINMUX_GPIO(PC2),
+ PINMUX_GPIO(PC1),
+ PINMUX_GPIO(PC0),
/* PD */
- PINMUX_GPIO(GPIO_PD7, PD7_DATA),
- PINMUX_GPIO(GPIO_PD6, PD6_DATA),
- PINMUX_GPIO(GPIO_PD5, PD5_DATA),
- PINMUX_GPIO(GPIO_PD4, PD4_DATA),
- PINMUX_GPIO(GPIO_PD3, PD3_DATA),
- PINMUX_GPIO(GPIO_PD2, PD2_DATA),
- PINMUX_GPIO(GPIO_PD1, PD1_DATA),
- PINMUX_GPIO(GPIO_PD0, PD0_DATA),
+ PINMUX_GPIO(PD7),
+ PINMUX_GPIO(PD6),
+ PINMUX_GPIO(PD5),
+ PINMUX_GPIO(PD4),
+ PINMUX_GPIO(PD3),
+ PINMUX_GPIO(PD2),
+ PINMUX_GPIO(PD1),
+ PINMUX_GPIO(PD0),
/* PE */
- PINMUX_GPIO(GPIO_PE7, PE7_DATA),
- PINMUX_GPIO(GPIO_PE6, PE6_DATA),
- PINMUX_GPIO(GPIO_PE5, PE5_DATA),
- PINMUX_GPIO(GPIO_PE4, PE4_DATA),
- PINMUX_GPIO(GPIO_PE3, PE3_DATA),
- PINMUX_GPIO(GPIO_PE2, PE2_DATA),
- PINMUX_GPIO(GPIO_PE1, PE1_DATA),
- PINMUX_GPIO(GPIO_PE0, PE0_DATA),
+ PINMUX_GPIO(PE7),
+ PINMUX_GPIO(PE6),
+ PINMUX_GPIO(PE5),
+ PINMUX_GPIO(PE4),
+ PINMUX_GPIO(PE3),
+ PINMUX_GPIO(PE2),
+ PINMUX_GPIO(PE1),
+ PINMUX_GPIO(PE0),
/* PF */
- PINMUX_GPIO(GPIO_PF7, PF7_DATA),
- PINMUX_GPIO(GPIO_PF6, PF6_DATA),
- PINMUX_GPIO(GPIO_PF5, PF5_DATA),
- PINMUX_GPIO(GPIO_PF4, PF4_DATA),
- PINMUX_GPIO(GPIO_PF3, PF3_DATA),
- PINMUX_GPIO(GPIO_PF2, PF2_DATA),
- PINMUX_GPIO(GPIO_PF1, PF1_DATA),
- PINMUX_GPIO(GPIO_PF0, PF0_DATA),
+ PINMUX_GPIO(PF7),
+ PINMUX_GPIO(PF6),
+ PINMUX_GPIO(PF5),
+ PINMUX_GPIO(PF4),
+ PINMUX_GPIO(PF3),
+ PINMUX_GPIO(PF2),
+ PINMUX_GPIO(PF1),
+ PINMUX_GPIO(PF0),
/* PG */
- PINMUX_GPIO(GPIO_PG7, PG7_DATA),
- PINMUX_GPIO(GPIO_PG6, PG6_DATA),
- PINMUX_GPIO(GPIO_PG5, PG5_DATA),
- PINMUX_GPIO(GPIO_PG4, PG4_DATA),
- PINMUX_GPIO(GPIO_PG3, PG3_DATA),
- PINMUX_GPIO(GPIO_PG2, PG2_DATA),
- PINMUX_GPIO(GPIO_PG1, PG1_DATA),
- PINMUX_GPIO(GPIO_PG0, PG0_DATA),
+ PINMUX_GPIO(PG7),
+ PINMUX_GPIO(PG6),
+ PINMUX_GPIO(PG5),
+ PINMUX_GPIO(PG4),
+ PINMUX_GPIO(PG3),
+ PINMUX_GPIO(PG2),
+ PINMUX_GPIO(PG1),
+ PINMUX_GPIO(PG0),
/* PH */
- PINMUX_GPIO(GPIO_PH5, PH5_DATA),
- PINMUX_GPIO(GPIO_PH4, PH4_DATA),
- PINMUX_GPIO(GPIO_PH3, PH3_DATA),
- PINMUX_GPIO(GPIO_PH2, PH2_DATA),
- PINMUX_GPIO(GPIO_PH1, PH1_DATA),
- PINMUX_GPIO(GPIO_PH0, PH0_DATA),
+ PINMUX_GPIO(PH5),
+ PINMUX_GPIO(PH4),
+ PINMUX_GPIO(PH3),
+ PINMUX_GPIO(PH2),
+ PINMUX_GPIO(PH1),
+ PINMUX_GPIO(PH0),
};
-#define PINMUX_FN_BASE ARRAY_SIZE(shx3_pinmux_pins)
+#define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins)
-static const struct pinmux_func shx3_pinmux_func_gpios[] = {
+static const struct pinmux_func pinmux_func_gpios[] = {
/* FN */
GPIO_FN(D31),
GPIO_FN(D30),
@@ -454,83 +433,83 @@ static const struct pinmux_func shx3_pinmux_func_gpios[] = {
GPIO_FN(IRQOUT),
};
-static const struct pinmux_cfg_reg shx3_pinmux_config_regs[] = {
+static const struct pinmux_cfg_reg pinmux_config_regs[] = {
{ PINMUX_CFG_REG("PABCR", 0xffc70000, 32, 2) {
- PA7_FN, PA7_OUT, PA7_IN, PA7_IN_PU,
- PA6_FN, PA6_OUT, PA6_IN, PA6_IN_PU,
- PA5_FN, PA5_OUT, PA5_IN, PA5_IN_PU,
- PA4_FN, PA4_OUT, PA4_IN, PA4_IN_PU,
- PA3_FN, PA3_OUT, PA3_IN, PA3_IN_PU,
- PA2_FN, PA2_OUT, PA2_IN, PA2_IN_PU,
- PA1_FN, PA1_OUT, PA1_IN, PA1_IN_PU,
- PA0_FN, PA0_OUT, PA0_IN, PA0_IN_PU,
- PB7_FN, PB7_OUT, PB7_IN, PB7_IN_PU,
- PB6_FN, PB6_OUT, PB6_IN, PB6_IN_PU,
- PB5_FN, PB5_OUT, PB5_IN, PB5_IN_PU,
- PB4_FN, PB4_OUT, PB4_IN, PB4_IN_PU,
- PB3_FN, PB3_OUT, PB3_IN, PB3_IN_PU,
- PB2_FN, PB2_OUT, PB2_IN, PB2_IN_PU,
- PB1_FN, PB1_OUT, PB1_IN, PB1_IN_PU,
- PB0_FN, PB0_OUT, PB0_IN, PB0_IN_PU, },
+ PA7_FN, PA7_OUT, PA7_IN, 0,
+ PA6_FN, PA6_OUT, PA6_IN, 0,
+ PA5_FN, PA5_OUT, PA5_IN, 0,
+ PA4_FN, PA4_OUT, PA4_IN, 0,
+ PA3_FN, PA3_OUT, PA3_IN, 0,
+ PA2_FN, PA2_OUT, PA2_IN, 0,
+ PA1_FN, PA1_OUT, PA1_IN, 0,
+ PA0_FN, PA0_OUT, PA0_IN, 0,
+ PB7_FN, PB7_OUT, PB7_IN, 0,
+ PB6_FN, PB6_OUT, PB6_IN, 0,
+ PB5_FN, PB5_OUT, PB5_IN, 0,
+ PB4_FN, PB4_OUT, PB4_IN, 0,
+ PB3_FN, PB3_OUT, PB3_IN, 0,
+ PB2_FN, PB2_OUT, PB2_IN, 0,
+ PB1_FN, PB1_OUT, PB1_IN, 0,
+ PB0_FN, PB0_OUT, PB0_IN, 0, },
},
{ PINMUX_CFG_REG("PCDCR", 0xffc70004, 32, 2) {
- PC7_FN, PC7_OUT, PC7_IN, PC7_IN_PU,
- PC6_FN, PC6_OUT, PC6_IN, PC6_IN_PU,
- PC5_FN, PC5_OUT, PC5_IN, PC5_IN_PU,
- PC4_FN, PC4_OUT, PC4_IN, PC4_IN_PU,
- PC3_FN, PC3_OUT, PC3_IN, PC3_IN_PU,
- PC2_FN, PC2_OUT, PC2_IN, PC2_IN_PU,
- PC1_FN, PC1_OUT, PC1_IN, PC1_IN_PU,
- PC0_FN, PC0_OUT, PC0_IN, PC0_IN_PU,
- PD7_FN, PD7_OUT, PD7_IN, PD7_IN_PU,
- PD6_FN, PD6_OUT, PD6_IN, PD6_IN_PU,
- PD5_FN, PD5_OUT, PD5_IN, PD5_IN_PU,
- PD4_FN, PD4_OUT, PD4_IN, PD4_IN_PU,
- PD3_FN, PD3_OUT, PD3_IN, PD3_IN_PU,
- PD2_FN, PD2_OUT, PD2_IN, PD2_IN_PU,
- PD1_FN, PD1_OUT, PD1_IN, PD1_IN_PU,
- PD0_FN, PD0_OUT, PD0_IN, PD0_IN_PU, },
+ PC7_FN, PC7_OUT, PC7_IN, 0,
+ PC6_FN, PC6_OUT, PC6_IN, 0,
+ PC5_FN, PC5_OUT, PC5_IN, 0,
+ PC4_FN, PC4_OUT, PC4_IN, 0,
+ PC3_FN, PC3_OUT, PC3_IN, 0,
+ PC2_FN, PC2_OUT, PC2_IN, 0,
+ PC1_FN, PC1_OUT, PC1_IN, 0,
+ PC0_FN, PC0_OUT, PC0_IN, 0,
+ PD7_FN, PD7_OUT, PD7_IN, 0,
+ PD6_FN, PD6_OUT, PD6_IN, 0,
+ PD5_FN, PD5_OUT, PD5_IN, 0,
+ PD4_FN, PD4_OUT, PD4_IN, 0,
+ PD3_FN, PD3_OUT, PD3_IN, 0,
+ PD2_FN, PD2_OUT, PD2_IN, 0,
+ PD1_FN, PD1_OUT, PD1_IN, 0,
+ PD0_FN, PD0_OUT, PD0_IN, 0, },
},
{ PINMUX_CFG_REG("PEFCR", 0xffc70008, 32, 2) {
- PE7_FN, PE7_OUT, PE7_IN, PE7_IN_PU,
- PE6_FN, PE6_OUT, PE6_IN, PE6_IN_PU,
- PE5_FN, PE5_OUT, PE5_IN, PE5_IN_PU,
- PE4_FN, PE4_OUT, PE4_IN, PE4_IN_PU,
- PE3_FN, PE3_OUT, PE3_IN, PE3_IN_PU,
- PE2_FN, PE2_OUT, PE2_IN, PE2_IN_PU,
- PE1_FN, PE1_OUT, PE1_IN, PE1_IN_PU,
- PE0_FN, PE0_OUT, PE0_IN, PE0_IN_PU,
- PF7_FN, PF7_OUT, PF7_IN, PF7_IN_PU,
- PF6_FN, PF6_OUT, PF6_IN, PF6_IN_PU,
- PF5_FN, PF5_OUT, PF5_IN, PF5_IN_PU,
- PF4_FN, PF4_OUT, PF4_IN, PF4_IN_PU,
- PF3_FN, PF3_OUT, PF3_IN, PF3_IN_PU,
- PF2_FN, PF2_OUT, PF2_IN, PF2_IN_PU,
- PF1_FN, PF1_OUT, PF1_IN, PF1_IN_PU,
- PF0_FN, PF0_OUT, PF0_IN, PF0_IN_PU, },
+ PE7_FN, PE7_OUT, PE7_IN, 0,
+ PE6_FN, PE6_OUT, PE6_IN, 0,
+ PE5_FN, PE5_OUT, PE5_IN, 0,
+ PE4_FN, PE4_OUT, PE4_IN, 0,
+ PE3_FN, PE3_OUT, PE3_IN, 0,
+ PE2_FN, PE2_OUT, PE2_IN, 0,
+ PE1_FN, PE1_OUT, PE1_IN, 0,
+ PE0_FN, PE0_OUT, PE0_IN, 0,
+ PF7_FN, PF7_OUT, PF7_IN, 0,
+ PF6_FN, PF6_OUT, PF6_IN, 0,
+ PF5_FN, PF5_OUT, PF5_IN, 0,
+ PF4_FN, PF4_OUT, PF4_IN, 0,
+ PF3_FN, PF3_OUT, PF3_IN, 0,
+ PF2_FN, PF2_OUT, PF2_IN, 0,
+ PF1_FN, PF1_OUT, PF1_IN, 0,
+ PF0_FN, PF0_OUT, PF0_IN, 0, },
},
{ PINMUX_CFG_REG("PGHCR", 0xffc7000c, 32, 2) {
- PG7_FN, PG7_OUT, PG7_IN, PG7_IN_PU,
- PG6_FN, PG6_OUT, PG6_IN, PG6_IN_PU,
- PG5_FN, PG5_OUT, PG5_IN, PG5_IN_PU,
- PG4_FN, PG4_OUT, PG4_IN, PG4_IN_PU,
- PG3_FN, PG3_OUT, PG3_IN, PG3_IN_PU,
- PG2_FN, PG2_OUT, PG2_IN, PG2_IN_PU,
- PG1_FN, PG1_OUT, PG1_IN, PG1_IN_PU,
- PG0_FN, PG0_OUT, PG0_IN, PG0_IN_PU,
+ PG7_FN, PG7_OUT, PG7_IN, 0,
+ PG6_FN, PG6_OUT, PG6_IN, 0,
+ PG5_FN, PG5_OUT, PG5_IN, 0,
+ PG4_FN, PG4_OUT, PG4_IN, 0,
+ PG3_FN, PG3_OUT, PG3_IN, 0,
+ PG2_FN, PG2_OUT, PG2_IN, 0,
+ PG1_FN, PG1_OUT, PG1_IN, 0,
+ PG0_FN, PG0_OUT, PG0_IN, 0,
0, 0, 0, 0,
0, 0, 0, 0,
- PH5_FN, PH5_OUT, PH5_IN, PH5_IN_PU,
- PH4_FN, PH4_OUT, PH4_IN, PH4_IN_PU,
- PH3_FN, PH3_OUT, PH3_IN, PH3_IN_PU,
- PH2_FN, PH2_OUT, PH2_IN, PH2_IN_PU,
- PH1_FN, PH1_OUT, PH1_IN, PH1_IN_PU,
- PH0_FN, PH0_OUT, PH0_IN, PH0_IN_PU, },
+ PH5_FN, PH5_OUT, PH5_IN, 0,
+ PH4_FN, PH4_OUT, PH4_IN, 0,
+ PH3_FN, PH3_OUT, PH3_IN, 0,
+ PH2_FN, PH2_OUT, PH2_IN, 0,
+ PH1_FN, PH1_OUT, PH1_IN, 0,
+ PH0_FN, PH0_OUT, PH0_IN, 0, },
},
{ },
};
-static const struct pinmux_data_reg shx3_pinmux_data_regs[] = {
+static const struct pinmux_data_reg pinmux_data_regs[] = {
{ PINMUX_DATA_REG("PABDR", 0xffc70010, 32) {
0, 0, 0, 0, 0, 0, 0, 0,
PA7_DATA, PA6_DATA, PA5_DATA, PA4_DATA,
@@ -569,16 +548,14 @@ static const struct pinmux_data_reg shx3_pinmux_data_regs[] = {
const struct sh_pfc_soc_info shx3_pinmux_info = {
.name = "shx3_pfc",
.input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END },
- .input_pu = { PINMUX_INPUT_PULLUP_BEGIN,
- PINMUX_INPUT_PULLUP_END },
.output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END },
.function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END },
- .pins = shx3_pinmux_pins,
- .nr_pins = ARRAY_SIZE(shx3_pinmux_pins),
- .func_gpios = shx3_pinmux_func_gpios,
- .nr_func_gpios = ARRAY_SIZE(shx3_pinmux_func_gpios),
- .gpio_data = shx3_pinmux_data,
- .gpio_data_size = ARRAY_SIZE(shx3_pinmux_data),
- .cfg_regs = shx3_pinmux_config_regs,
- .data_regs = shx3_pinmux_data_regs,
+ .pins = pinmux_pins,
+ .nr_pins = ARRAY_SIZE(pinmux_pins),
+ .func_gpios = pinmux_func_gpios,
+ .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios),
+ .gpio_data = pinmux_data,
+ .gpio_data_size = ARRAY_SIZE(pinmux_data),
+ .cfg_regs = pinmux_config_regs,
+ .data_regs = pinmux_data_regs,
};
diff --git a/drivers/pinctrl/sh-pfc/pinctrl.c b/drivers/pinctrl/sh-pfc/pinctrl.c
index bc8b028bb5d..e758af95c20 100644
--- a/drivers/pinctrl/sh-pfc/pinctrl.c
+++ b/drivers/pinctrl/sh-pfc/pinctrl.c
@@ -529,38 +529,44 @@ static int sh_pfc_pinconf_get(struct pinctrl_dev *pctldev, unsigned _pin,
}
static int sh_pfc_pinconf_set(struct pinctrl_dev *pctldev, unsigned _pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
struct sh_pfc *pfc = pmx->pfc;
- enum pin_config_param param = pinconf_to_config_param(config);
+ enum pin_config_param param;
unsigned long flags;
+ unsigned int i;
- if (!sh_pfc_pinconf_validate(pfc, _pin, param))
- return -ENOTSUPP;
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
- switch (param) {
- case PIN_CONFIG_BIAS_PULL_UP:
- case PIN_CONFIG_BIAS_PULL_DOWN:
- case PIN_CONFIG_BIAS_DISABLE:
- if (!pfc->info->ops || !pfc->info->ops->set_bias)
+ if (!sh_pfc_pinconf_validate(pfc, _pin, param))
return -ENOTSUPP;
- spin_lock_irqsave(&pfc->lock, flags);
- pfc->info->ops->set_bias(pfc, _pin, param);
- spin_unlock_irqrestore(&pfc->lock, flags);
+ switch (param) {
+ case PIN_CONFIG_BIAS_PULL_UP:
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_DISABLE:
+ if (!pfc->info->ops || !pfc->info->ops->set_bias)
+ return -ENOTSUPP;
- break;
+ spin_lock_irqsave(&pfc->lock, flags);
+ pfc->info->ops->set_bias(pfc, _pin, param);
+ spin_unlock_irqrestore(&pfc->lock, flags);
- default:
- return -ENOTSUPP;
- }
+ break;
+
+ default:
+ return -ENOTSUPP;
+ }
+ } /* for each config */
return 0;
}
static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
- unsigned long config)
+ unsigned long *configs,
+ unsigned num_configs)
{
struct sh_pfc_pinctrl *pmx = pinctrl_dev_get_drvdata(pctldev);
const unsigned int *pins;
@@ -571,7 +577,7 @@ static int sh_pfc_pinconf_group_set(struct pinctrl_dev *pctldev, unsigned group,
num_pins = pmx->pfc->info->groups[group].nr_pins;
for (i = 0; i < num_pins; ++i)
- sh_pfc_pinconf_set(pctldev, pins[i], config);
+ sh_pfc_pinconf_set(pctldev, pins[i], configs, num_configs);
return 0;
}
@@ -587,22 +593,9 @@ static const struct pinconf_ops sh_pfc_pinconf_ops = {
/* PFC ranges -> pinctrl pin descs */
static int sh_pfc_map_pins(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
{
- const struct pinmux_range *ranges;
- struct pinmux_range def_range;
- unsigned int nr_ranges;
- unsigned int nr_pins;
unsigned int i;
- if (pfc->info->ranges == NULL) {
- def_range.begin = 0;
- def_range.end = pfc->info->nr_pins - 1;
- ranges = &def_range;
- nr_ranges = 1;
- } else {
- ranges = pfc->info->ranges;
- nr_ranges = pfc->info->nr_ranges;
- }
-
+ /* Allocate and initialize the pins and configs arrays. */
pmx->pins = devm_kzalloc(pfc->dev,
sizeof(*pmx->pins) * pfc->info->nr_pins,
GFP_KERNEL);
@@ -615,32 +608,24 @@ static int sh_pfc_map_pins(struct sh_pfc *pfc, struct sh_pfc_pinctrl *pmx)
if (unlikely(!pmx->configs))
return -ENOMEM;
- for (i = 0, nr_pins = 0; i < nr_ranges; ++i) {
- const struct pinmux_range *range = &ranges[i];
- unsigned int number;
-
- for (number = range->begin; number <= range->end;
- number++, nr_pins++) {
- struct sh_pfc_pin_config *cfg = &pmx->configs[nr_pins];
- struct pinctrl_pin_desc *pin = &pmx->pins[nr_pins];
- const struct sh_pfc_pin *info =
- &pfc->info->pins[nr_pins];
+ for (i = 0; i < pfc->info->nr_pins; ++i) {
+ const struct sh_pfc_pin *info = &pfc->info->pins[i];
+ struct sh_pfc_pin_config *cfg = &pmx->configs[i];
+ struct pinctrl_pin_desc *pin = &pmx->pins[i];
- pin->number = number;
- pin->name = info->name;
- cfg->type = PINMUX_TYPE_NONE;
- }
+ /* If the pin number is equal to -1 all pins are considered */
+ pin->number = info->pin != (u16)-1 ? info->pin : i;
+ pin->name = info->name;
+ cfg->type = PINMUX_TYPE_NONE;
}
- pfc->nr_pins = ranges[nr_ranges-1].end + 1;
-
- return nr_ranges;
+ return 0;
}
int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
{
struct sh_pfc_pinctrl *pmx;
- int nr_ranges;
+ int ret;
pmx = devm_kzalloc(pfc->dev, sizeof(*pmx), GFP_KERNEL);
if (unlikely(!pmx))
@@ -649,9 +634,9 @@ int sh_pfc_register_pinctrl(struct sh_pfc *pfc)
pmx->pfc = pfc;
pfc->pinctrl = pmx;
- nr_ranges = sh_pfc_map_pins(pfc, pmx);
- if (unlikely(nr_ranges < 0))
- return nr_ranges;
+ ret = sh_pfc_map_pins(pfc, pmx);
+ if (ret < 0)
+ return ret;
pmx->pctl_desc.name = DRV_NAME;
pmx->pctl_desc.owner = THIS_MODULE;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 830ae1ffd0b..11bd0d970a5 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -14,30 +14,23 @@
#include <linux/bug.h>
#include <linux/stringify.h>
-typedef unsigned short pinmux_enum_t;
-
-#define SH_PFC_MARK_INVALID ((pinmux_enum_t)-1)
-
enum {
PINMUX_TYPE_NONE,
-
PINMUX_TYPE_FUNCTION,
PINMUX_TYPE_GPIO,
PINMUX_TYPE_OUTPUT,
PINMUX_TYPE_INPUT,
- PINMUX_TYPE_INPUT_PULLUP,
- PINMUX_TYPE_INPUT_PULLDOWN,
-
- PINMUX_FLAG_TYPE, /* must be last */
};
#define SH_PFC_PIN_CFG_INPUT (1 << 0)
#define SH_PFC_PIN_CFG_OUTPUT (1 << 1)
#define SH_PFC_PIN_CFG_PULL_UP (1 << 2)
#define SH_PFC_PIN_CFG_PULL_DOWN (1 << 3)
+#define SH_PFC_PIN_CFG_NO_GPIO (1 << 31)
struct sh_pfc_pin {
- const pinmux_enum_t enum_id;
+ u16 pin;
+ u16 enum_id;
const char *name;
unsigned int configs;
};
@@ -71,46 +64,33 @@ struct sh_pfc_function {
};
struct pinmux_func {
- const pinmux_enum_t enum_id;
+ u16 enum_id;
const char *name;
};
-#define PINMUX_GPIO(gpio, data_or_mark) \
- [gpio] = { \
- .name = __stringify(gpio), \
- .enum_id = data_or_mark, \
- }
-#define PINMUX_GPIO_FN(gpio, base, data_or_mark) \
- [gpio - (base)] = { \
- .name = __stringify(gpio), \
- .enum_id = data_or_mark, \
- }
-
-#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
-
struct pinmux_cfg_reg {
unsigned long reg, reg_width, field_width;
- const pinmux_enum_t *enum_ids;
+ const u16 *enum_ids;
const unsigned long *var_field_width;
};
#define PINMUX_CFG_REG(name, r, r_width, f_width) \
.reg = r, .reg_width = r_width, .field_width = f_width, \
- .enum_ids = (pinmux_enum_t [(r_width / f_width) * (1 << f_width)])
+ .enum_ids = (u16 [(r_width / f_width) * (1 << f_width)])
#define PINMUX_CFG_REG_VAR(name, r, r_width, var_fw0, var_fwn...) \
.reg = r, .reg_width = r_width, \
.var_field_width = (unsigned long [r_width]) { var_fw0, var_fwn, 0 }, \
- .enum_ids = (pinmux_enum_t [])
+ .enum_ids = (u16 [])
struct pinmux_data_reg {
unsigned long reg, reg_width;
- const pinmux_enum_t *enum_ids;
+ const u16 *enum_ids;
};
#define PINMUX_DATA_REG(name, r, r_width) \
.reg = r, .reg_width = r_width, \
- .enum_ids = (pinmux_enum_t [r_width]) \
+ .enum_ids = (u16 [r_width]) \
struct pinmux_irq {
int irq;
@@ -121,9 +101,9 @@ struct pinmux_irq {
{ .irq = irq_nr, .gpios = (unsigned short []) { ids, 0 } } \
struct pinmux_range {
- pinmux_enum_t begin;
- pinmux_enum_t end;
- pinmux_enum_t force;
+ u16 begin;
+ u16 end;
+ u16 force;
};
struct sh_pfc;
@@ -141,15 +121,11 @@ struct sh_pfc_soc_info {
const struct sh_pfc_soc_operations *ops;
struct pinmux_range input;
- struct pinmux_range input_pd;
- struct pinmux_range input_pu;
struct pinmux_range output;
struct pinmux_range function;
const struct sh_pfc_pin *pins;
unsigned int nr_pins;
- const struct pinmux_range *ranges;
- unsigned int nr_ranges;
const struct sh_pfc_pin_group *groups;
unsigned int nr_groups;
const struct sh_pfc_function *functions;
@@ -161,7 +137,7 @@ struct sh_pfc_soc_info {
const struct pinmux_cfg_reg *cfg_regs;
const struct pinmux_data_reg *data_regs;
- const pinmux_enum_t *gpio_data;
+ const u16 *gpio_data;
unsigned int gpio_data_size;
const struct pinmux_irq *gpio_irq;
@@ -170,84 +146,155 @@ struct sh_pfc_soc_info {
unsigned long unlock_reg;
};
-enum { GPIO_CFG_REQ, GPIO_CFG_FREE };
-
-/* helper macro for port */
-#define PORT_1(fn, pfx, sfx) fn(pfx, sfx)
-
-#define PORT_10(fn, pfx, sfx) \
- PORT_1(fn, pfx##0, sfx), PORT_1(fn, pfx##1, sfx), \
- PORT_1(fn, pfx##2, sfx), PORT_1(fn, pfx##3, sfx), \
- PORT_1(fn, pfx##4, sfx), PORT_1(fn, pfx##5, sfx), \
- PORT_1(fn, pfx##6, sfx), PORT_1(fn, pfx##7, sfx), \
- PORT_1(fn, pfx##8, sfx), PORT_1(fn, pfx##9, sfx)
-
-#define PORT_10_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##9, sfx), PORT_1(fn, pfx##8, sfx), \
- PORT_1(fn, pfx##7, sfx), PORT_1(fn, pfx##6, sfx), \
- PORT_1(fn, pfx##5, sfx), PORT_1(fn, pfx##4, sfx), \
- PORT_1(fn, pfx##3, sfx), PORT_1(fn, pfx##2, sfx), \
- PORT_1(fn, pfx##1, sfx), PORT_1(fn, pfx##0, sfx)
-
-#define PORT_32(fn, pfx, sfx) \
- PORT_10(fn, pfx, sfx), PORT_10(fn, pfx##1, sfx), \
- PORT_10(fn, pfx##2, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_1(fn, pfx##31, sfx)
-
-#define PORT_32_REV(fn, pfx, sfx) \
- PORT_1(fn, pfx##31, sfx), PORT_1(fn, pfx##30, sfx), \
- PORT_10_REV(fn, pfx##2, sfx), PORT_10_REV(fn, pfx##1, sfx), \
- PORT_10_REV(fn, pfx, sfx)
-
-#define PORT_90(fn, pfx, sfx) \
- PORT_10(fn, pfx##1, sfx), PORT_10(fn, pfx##2, sfx), \
- PORT_10(fn, pfx##3, sfx), PORT_10(fn, pfx##4, sfx), \
- PORT_10(fn, pfx##5, sfx), PORT_10(fn, pfx##6, sfx), \
- PORT_10(fn, pfx##7, sfx), PORT_10(fn, pfx##8, sfx), \
- PORT_10(fn, pfx##9, sfx)
-
-#define _PORT_ALL(pfx, sfx) pfx##_##sfx
-#define _GPIO_PORT(pfx, sfx) PINMUX_GPIO(GPIO_PORT##pfx, PORT##pfx##_DATA)
-#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
-#define GPIO_PORT_ALL() CPU_ALL_PORT(_GPIO_PORT, , unused)
-#define GPIO_FN(str) PINMUX_GPIO_FN(GPIO_FN_##str, PINMUX_FN_BASE, str##_MARK)
-
-/* helper macro for pinmux_enum_t */
-#define PORT_DATA_I(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_IN)
-
-#define PORT_DATA_I_PD(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
- PORT##nr##_IN, PORT##nr##_IN_PD)
-
-#define PORT_DATA_I_PU(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
- PORT##nr##_IN, PORT##nr##_IN_PU)
-
-#define PORT_DATA_I_PU_PD(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, \
- PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
-
-#define PORT_DATA_O(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT)
-
-#define PORT_DATA_IO(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
- PORT##nr##_IN)
-
-#define PORT_DATA_IO_PD(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
- PORT##nr##_IN, PORT##nr##_IN_PD)
-
-#define PORT_DATA_IO_PU(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
- PORT##nr##_IN, PORT##nr##_IN_PU)
-
-#define PORT_DATA_IO_PU_PD(nr) \
- PINMUX_DATA(PORT##nr##_DATA, PORT##nr##_FN0, PORT##nr##_OUT, \
- PORT##nr##_IN, PORT##nr##_IN_PD, PORT##nr##_IN_PU)
-
-/* helper macro for top 4 bits in PORTnCR */
+/* -----------------------------------------------------------------------------
+ * Helper macros to create pin and port lists
+ */
+
+/*
+ * sh_pfc_soc_info gpio_data array macros
+ */
+
+#define PINMUX_DATA(data_or_mark, ids...) data_or_mark, ids, 0
+
+#define PINMUX_IPSR_NOGP(ispr, fn) \
+ PINMUX_DATA(fn##_MARK, FN_##fn)
+#define PINMUX_IPSR_DATA(ipsr, fn) \
+ PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr)
+#define PINMUX_IPSR_NOGM(ispr, fn, ms) \
+ PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ms)
+#define PINMUX_IPSR_MSEL(ipsr, fn, ms) \
+ PINMUX_DATA(fn##_MARK, FN_##fn, FN_##ipsr, FN_##ms)
+#define PINMUX_IPSR_MODSEL_DATA(ipsr, fn, ms) \
+ PINMUX_DATA(fn##_MARK, FN_##ms, FN_##ipsr, FN_##fn)
+
+/*
+ * GP port style (32 ports banks)
+ */
+
+#define PORT_GP_1(bank, pin, fn, sfx) fn(bank, pin, GP_##bank##_##pin, sfx)
+
+#define PORT_GP_32(bank, fn, sfx) \
+ PORT_GP_1(bank, 0, fn, sfx), PORT_GP_1(bank, 1, fn, sfx), \
+ PORT_GP_1(bank, 2, fn, sfx), PORT_GP_1(bank, 3, fn, sfx), \
+ PORT_GP_1(bank, 4, fn, sfx), PORT_GP_1(bank, 5, fn, sfx), \
+ PORT_GP_1(bank, 6, fn, sfx), PORT_GP_1(bank, 7, fn, sfx), \
+ PORT_GP_1(bank, 8, fn, sfx), PORT_GP_1(bank, 9, fn, sfx), \
+ PORT_GP_1(bank, 10, fn, sfx), PORT_GP_1(bank, 11, fn, sfx), \
+ PORT_GP_1(bank, 12, fn, sfx), PORT_GP_1(bank, 13, fn, sfx), \
+ PORT_GP_1(bank, 14, fn, sfx), PORT_GP_1(bank, 15, fn, sfx), \
+ PORT_GP_1(bank, 16, fn, sfx), PORT_GP_1(bank, 17, fn, sfx), \
+ PORT_GP_1(bank, 18, fn, sfx), PORT_GP_1(bank, 19, fn, sfx), \
+ PORT_GP_1(bank, 20, fn, sfx), PORT_GP_1(bank, 21, fn, sfx), \
+ PORT_GP_1(bank, 22, fn, sfx), PORT_GP_1(bank, 23, fn, sfx), \
+ PORT_GP_1(bank, 24, fn, sfx), PORT_GP_1(bank, 25, fn, sfx), \
+ PORT_GP_1(bank, 26, fn, sfx), PORT_GP_1(bank, 27, fn, sfx), \
+ PORT_GP_1(bank, 28, fn, sfx), PORT_GP_1(bank, 29, fn, sfx), \
+ PORT_GP_1(bank, 30, fn, sfx), PORT_GP_1(bank, 31, fn, sfx)
+
+#define PORT_GP_32_REV(bank, fn, sfx) \
+ PORT_GP_1(bank, 31, fn, sfx), PORT_GP_1(bank, 30, fn, sfx), \
+ PORT_GP_1(bank, 29, fn, sfx), PORT_GP_1(bank, 28, fn, sfx), \
+ PORT_GP_1(bank, 27, fn, sfx), PORT_GP_1(bank, 26, fn, sfx), \
+ PORT_GP_1(bank, 25, fn, sfx), PORT_GP_1(bank, 24, fn, sfx), \
+ PORT_GP_1(bank, 23, fn, sfx), PORT_GP_1(bank, 22, fn, sfx), \
+ PORT_GP_1(bank, 21, fn, sfx), PORT_GP_1(bank, 20, fn, sfx), \
+ PORT_GP_1(bank, 19, fn, sfx), PORT_GP_1(bank, 18, fn, sfx), \
+ PORT_GP_1(bank, 17, fn, sfx), PORT_GP_1(bank, 16, fn, sfx), \
+ PORT_GP_1(bank, 15, fn, sfx), PORT_GP_1(bank, 14, fn, sfx), \
+ PORT_GP_1(bank, 13, fn, sfx), PORT_GP_1(bank, 12, fn, sfx), \
+ PORT_GP_1(bank, 11, fn, sfx), PORT_GP_1(bank, 10, fn, sfx), \
+ PORT_GP_1(bank, 9, fn, sfx), PORT_GP_1(bank, 8, fn, sfx), \
+ PORT_GP_1(bank, 7, fn, sfx), PORT_GP_1(bank, 6, fn, sfx), \
+ PORT_GP_1(bank, 5, fn, sfx), PORT_GP_1(bank, 4, fn, sfx), \
+ PORT_GP_1(bank, 3, fn, sfx), PORT_GP_1(bank, 2, fn, sfx), \
+ PORT_GP_1(bank, 1, fn, sfx), PORT_GP_1(bank, 0, fn, sfx)
+
+/* GP_ALL(suffix) - Expand to a list of GP_#_#_suffix */
+#define _GP_ALL(bank, pin, name, sfx) name##_##sfx
+#define GP_ALL(str) CPU_ALL_PORT(_GP_ALL, str)
+
+/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
+#define _GP_GPIO(bank, _pin, _name, sfx) \
+ [(bank * 32) + _pin] = { \
+ .pin = (bank * 32) + _pin, \
+ .name = __stringify(_name), \
+ .enum_id = _name##_DATA, \
+ }
+#define PINMUX_GPIO_GP_ALL() CPU_ALL_PORT(_GP_GPIO, unused)
+
+/* PINMUX_DATA_GP_ALL - Expand to a list of name_DATA, name_FN marks */
+#define _GP_DATA(bank, pin, name, sfx) PINMUX_DATA(name##_DATA, name##_FN)
+#define PINMUX_DATA_GP_ALL() CPU_ALL_PORT(_GP_DATA, unused)
+
+/*
+ * PORT style (linear pin space)
+ */
+
+#define PORT_1(pn, fn, pfx, sfx) fn(pn, pfx, sfx)
+
+#define PORT_10(pn, fn, pfx, sfx) \
+ PORT_1(pn, fn, pfx##0, sfx), PORT_1(pn+1, fn, pfx##1, sfx), \
+ PORT_1(pn+2, fn, pfx##2, sfx), PORT_1(pn+3, fn, pfx##3, sfx), \
+ PORT_1(pn+4, fn, pfx##4, sfx), PORT_1(pn+5, fn, pfx##5, sfx), \
+ PORT_1(pn+6, fn, pfx##6, sfx), PORT_1(pn+7, fn, pfx##7, sfx), \
+ PORT_1(pn+8, fn, pfx##8, sfx), PORT_1(pn+9, fn, pfx##9, sfx)
+
+#define PORT_90(pn, fn, pfx, sfx) \
+ PORT_10(pn+10, fn, pfx##1, sfx), PORT_10(pn+20, fn, pfx##2, sfx), \
+ PORT_10(pn+30, fn, pfx##3, sfx), PORT_10(pn+40, fn, pfx##4, sfx), \
+ PORT_10(pn+50, fn, pfx##5, sfx), PORT_10(pn+60, fn, pfx##6, sfx), \
+ PORT_10(pn+70, fn, pfx##7, sfx), PORT_10(pn+80, fn, pfx##8, sfx), \
+ PORT_10(pn+90, fn, pfx##9, sfx)
+
+/* PORT_ALL(suffix) - Expand to a list of PORT_#_suffix */
+#define _PORT_ALL(pn, pfx, sfx) pfx##_##sfx
+#define PORT_ALL(str) CPU_ALL_PORT(_PORT_ALL, PORT, str)
+
+/* PINMUX_GPIO - Expand to a sh_pfc_pin entry */
+#define PINMUX_GPIO(_pin) \
+ [GPIO_##_pin] = { \
+ .pin = (u16)-1, \
+ .name = __stringify(name), \
+ .enum_id = _pin##_DATA, \
+ }
+
+/* SH_PFC_PIN_CFG - Expand to a sh_pfc_pin entry (named PORT#) with config */
+#define SH_PFC_PIN_CFG(_pin, cfgs) \
+ { \
+ .pin = _pin, \
+ .name = __stringify(PORT##_pin), \
+ .enum_id = PORT##_pin##_DATA, \
+ .configs = cfgs, \
+ }
+
+/* SH_PFC_PIN_NAMED - Expand to a sh_pfc_pin entry with the given name */
+#define SH_PFC_PIN_NAMED(row, col, _name) \
+ { \
+ .pin = PIN_NUMBER(row, col), \
+ .name = __stringify(PIN_##_name), \
+ .configs = SH_PFC_PIN_CFG_NO_GPIO, \
+ }
+
+/* PINMUX_DATA_ALL - Expand to a list of PORT_name_DATA, PORT_name_FN0,
+ * PORT_name_OUT, PORT_name_IN marks
+ */
+#define _PORT_DATA(pn, pfx, sfx) \
+ PINMUX_DATA(PORT##pfx##_DATA, PORT##pfx##_FN0, \
+ PORT##pfx##_OUT, PORT##pfx##_IN)
+#define PINMUX_DATA_ALL() CPU_ALL_PORT(_PORT_DATA, , unused)
+
+/* GPIO_FN(name) - Expand to a sh_pfc_pin entry for a function GPIO */
+#define PINMUX_GPIO_FN(gpio, base, data_or_mark) \
+ [gpio - (base)] = { \
+ .name = __stringify(gpio), \
+ .enum_id = data_or_mark, \
+ }
+#define GPIO_FN(str) \
+ PINMUX_GPIO_FN(GPIO_FN_##str, PINMUX_FN_BASE, str##_MARK)
+
+/*
+ * PORTnCR macro
+ */
#define _PCRH(in, in_pd, in_pu, out) \
0, (out), (in), 0, \
0, 0, 0, 0, \
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas6.c b/drivers/pinctrl/sirf/pinctrl-atlas6.c
index 1fa39a44417..edf45a6940c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas6.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas6.c
@@ -496,7 +496,7 @@ static const unsigned sdmmc5_pins[] = { 24, 25, 26 };
static const struct sirfsoc_muxmask usp0_muxmask[] = {
{
.group = 1,
- .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22),
+ .mask = BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23),
},
};
@@ -507,8 +507,21 @@ static const struct sirfsoc_padmux usp0_padmux = {
.funcval = 0,
};
-static const unsigned usp0_pins[] = { 51, 52, 53, 54 };
+static const unsigned usp0_pins[] = { 51, 52, 53, 54, 55 };
+static const struct sirfsoc_muxmask usp0_uart_nostreamctrl_muxmask[] = {
+ {
+ .group = 1,
+ .mask = BIT(20) | BIT(21),
+ },
+};
+
+static const struct sirfsoc_padmux usp0_uart_nostreamctrl_padmux = {
+ .muxmask_counts = ARRAY_SIZE(usp0_uart_nostreamctrl_muxmask),
+ .muxmask = usp0_uart_nostreamctrl_muxmask,
+};
+
+static const unsigned usp0_uart_nostreamctrl_pins[] = { 52, 53 };
static const struct sirfsoc_muxmask usp1_muxmask[] = {
{
.group = 0,
@@ -818,10 +831,13 @@ static const struct sirfsoc_pin_group sirfsoc_pin_groups[] = {
SIRFSOC_PIN_GROUP("lcd_24bitsgrp", lcd_24bits_pins),
SIRFSOC_PIN_GROUP("lcdrom_grp", lcdrom_pins),
SIRFSOC_PIN_GROUP("uart0grp", uart0_pins),
+ SIRFSOC_PIN_GROUP("uart0_nostreamctrlgrp", uart0_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("uart1grp", uart1_pins),
SIRFSOC_PIN_GROUP("uart2grp", uart2_pins),
SIRFSOC_PIN_GROUP("uart2_nostreamctrlgrp", uart2_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp0grp", usp0_pins),
+ SIRFSOC_PIN_GROUP("usp0_uart_nostreamctrl_grp",
+ usp0_uart_nostreamctrl_pins),
SIRFSOC_PIN_GROUP("usp1grp", usp1_pins),
SIRFSOC_PIN_GROUP("i2c0grp", i2c0_pins),
SIRFSOC_PIN_GROUP("i2c1grp", i2c1_pins),
@@ -859,9 +875,12 @@ static const char * const lcd_18bitsgrp[] = { "lcd_18bitsgrp" };
static const char * const lcd_24bitsgrp[] = { "lcd_24bitsgrp" };
static const char * const lcdromgrp[] = { "lcdromgrp" };
static const char * const uart0grp[] = { "uart0grp" };
+static const char * const uart0_nostreamctrlgrp[] = { "uart0_nostreamctrlgrp" };
static const char * const uart1grp[] = { "uart1grp" };
static const char * const uart2grp[] = { "uart2grp" };
static const char * const uart2_nostreamctrlgrp[] = { "uart2_nostreamctrlgrp" };
+static const char * const usp0_uart_nostreamctrl_grp[] = {
+ "usp0_uart_nostreamctrl_grp" };
static const char * const usp0grp[] = { "usp0grp" };
static const char * const usp1grp[] = { "usp1grp" };
static const char * const i2c0grp[] = { "i2c0grp" };
@@ -900,10 +919,15 @@ static const struct sirfsoc_pmx_func sirfsoc_pmx_functions[] = {
SIRFSOC_PMX_FUNCTION("lcd_24bits", lcd_24bitsgrp, lcd_24bits_padmux),
SIRFSOC_PMX_FUNCTION("lcdrom", lcdromgrp, lcdrom_padmux),
SIRFSOC_PMX_FUNCTION("uart0", uart0grp, uart0_padmux),
+ SIRFSOC_PMX_FUNCTION("uart0_nostreamctrl", uart0_nostreamctrlgrp,
+ uart0_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("uart1", uart1grp, uart1_padmux),
SIRFSOC_PMX_FUNCTION("uart2", uart2grp, uart2_padmux),
SIRFSOC_PMX_FUNCTION("uart2_nostreamctrl", uart2_nostreamctrlgrp, uart2_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp0", usp0grp, usp0_padmux),
+ SIRFSOC_PMX_FUNCTION("usp0_uart_nostreamctrl",
+ usp0_uart_nostreamctrl_grp,
+ usp0_uart_nostreamctrl_padmux),
SIRFSOC_PMX_FUNCTION("usp1", usp1grp, usp1_padmux),
SIRFSOC_PMX_FUNCTION("i2c0", i2c0grp, i2c0_padmux),
SIRFSOC_PMX_FUNCTION("i2c1", i2c1grp, i2c1_padmux),
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index 0677e198db6..26f946af793 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -306,13 +306,13 @@ static int sirfsoc_gpio_of_xlate(struct gpio_chip *gc,
u32 *flags)
{
if (gpiospec->args[0] > SIRFSOC_GPIO_NO_OF_BANKS * SIRFSOC_GPIO_BANK_SIZE)
- return -EINVAL;
+ return -EINVAL;
if (gc != &sgpio_bank[gpiospec->args[0] / SIRFSOC_GPIO_BANK_SIZE].chip.gc)
- return -EINVAL;
+ return -EINVAL;
if (flags)
- *flags = gpiospec->args[1];
+ *flags = gpiospec->args[1];
return gpiospec->args[0] % SIRFSOC_GPIO_BANK_SIZE;
}
@@ -440,6 +440,8 @@ static int sirfsoc_pinmux_resume_noirq(struct device *dev)
static const struct dev_pm_ops sirfsoc_pinmux_pm_ops = {
.suspend_noirq = sirfsoc_pinmux_suspend_noirq,
.resume_noirq = sirfsoc_pinmux_resume_noirq,
+ .freeze_noirq = sirfsoc_pinmux_suspend_noirq,
+ .restore_noirq = sirfsoc_pinmux_resume_noirq,
};
#endif
@@ -831,7 +833,7 @@ static int sirfsoc_gpio_probe(struct device_node *np)
{
int i, err = 0;
struct sirfsoc_gpio_bank *bank;
- void *regs;
+ void __iomem *regs;
struct platform_device *pdev;
bool is_marco = false;
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 116da0412c4..58bf6867aa1 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -367,21 +367,16 @@ int spear_pinctrl_probe(struct platform_device *pdev,
if (!machdata)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
pmx = devm_kzalloc(&pdev->dev, sizeof(*pmx), GFP_KERNEL);
if (!pmx) {
dev_err(&pdev->dev, "Can't alloc spear_pmx\n");
return -ENOMEM;
}
- pmx->vbase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
- if (!pmx->vbase) {
- dev_err(&pdev->dev, "Couldn't ioremap at index 0\n");
- return -ENODEV;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pmx->vbase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(pmx->vbase))
+ return PTR_ERR(pmx->vbase);
pmx->dev = &pdev->dev;
pmx->machdata = machdata;
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 06c7e6f1c7f..ed1d3608f48 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -430,4 +430,4 @@ module_exit(spear310_pinctrl_exit);
MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
MODULE_LICENSE("GPL v2");
-MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
+MODULE_DEVICE_TABLE(of, spear310_pinctrl_of_match);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index 0cc4335bc0f..39aec085081 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -424,15 +424,16 @@ static int wmt_pinconf_get(struct pinctrl_dev *pctldev, unsigned pin,
}
static int wmt_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
- unsigned long config)
+ unsigned long *configs, unsigned num_configs)
{
struct wmt_pinctrl_data *data = pinctrl_dev_get_drvdata(pctldev);
- enum pin_config_param param = pinconf_to_config_param(config);
- u16 arg = pinconf_to_config_argument(config);
+ enum pin_config_param param;
+ u16 arg;
u32 bank = WMT_BANK_FROM_PIN(pin);
u32 bit = WMT_BIT_FROM_PIN(pin);
u32 reg_pull_en = data->banks[bank].reg_pull_en;
u32 reg_pull_cfg = data->banks[bank].reg_pull_cfg;
+ int i;
if ((reg_pull_en == NO_REG) || (reg_pull_cfg == NO_REG)) {
dev_err(data->dev, "bias functions not supported on pin %d\n",
@@ -440,28 +441,33 @@ static int wmt_pinconf_set(struct pinctrl_dev *pctldev, unsigned pin,
return -EINVAL;
}
- if ((param == PIN_CONFIG_BIAS_PULL_DOWN) ||
- (param == PIN_CONFIG_BIAS_PULL_UP)) {
- if (arg == 0)
- param = PIN_CONFIG_BIAS_DISABLE;
- }
+ for (i = 0; i < num_configs; i++) {
+ param = pinconf_to_config_param(configs[i]);
+ arg = pinconf_to_config_argument(configs[i]);
- switch (param) {
- case PIN_CONFIG_BIAS_DISABLE:
- wmt_clearbits(data, reg_pull_en, BIT(bit));
- break;
- case PIN_CONFIG_BIAS_PULL_DOWN:
- wmt_clearbits(data, reg_pull_cfg, BIT(bit));
- wmt_setbits(data, reg_pull_en, BIT(bit));
- break;
- case PIN_CONFIG_BIAS_PULL_UP:
- wmt_setbits(data, reg_pull_cfg, BIT(bit));
- wmt_setbits(data, reg_pull_en, BIT(bit));
- break;
- default:
- dev_err(data->dev, "unknown pinconf param\n");
- return -EINVAL;
- }
+ if ((param == PIN_CONFIG_BIAS_PULL_DOWN) ||
+ (param == PIN_CONFIG_BIAS_PULL_UP)) {
+ if (arg == 0)
+ param = PIN_CONFIG_BIAS_DISABLE;
+ }
+
+ switch (param) {
+ case PIN_CONFIG_BIAS_DISABLE:
+ wmt_clearbits(data, reg_pull_en, BIT(bit));
+ break;
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ wmt_clearbits(data, reg_pull_cfg, BIT(bit));
+ wmt_setbits(data, reg_pull_en, BIT(bit));
+ break;
+ case PIN_CONFIG_BIAS_PULL_UP:
+ wmt_setbits(data, reg_pull_cfg, BIT(bit));
+ wmt_setbits(data, reg_pull_en, BIT(bit));
+ break;
+ default:
+ dev_err(data->dev, "unknown pinconf param\n");
+ return -EINVAL;
+ }
+ } /* for each config */
return 0;
}
diff --git a/drivers/platform/olpc/olpc-ec.c b/drivers/platform/olpc/olpc-ec.c
index 0f9f8596b30..f9119525f55 100644
--- a/drivers/platform/olpc/olpc-ec.c
+++ b/drivers/platform/olpc/olpc-ec.c
@@ -330,7 +330,7 @@ static int __init olpc_ec_init_module(void)
return platform_driver_register(&olpc_ec_plat_driver);
}
-module_init(olpc_ec_init_module);
+arch_initcall(olpc_ec_init_module);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 8e268da6fdb..0e9c169b42f 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1543,7 +1543,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
/* TODO Find a better way to handle events count. */
count = asus->event_count[event % 128]++;
- acpi_bus_generate_proc_event(asus->device, event, count);
acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
dev_name(&asus->device->dev), event,
count);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 5d26e70bed6..a6afd4108be 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1269,7 +1269,6 @@ static void eeepc_acpi_notify(struct acpi_device *device, u32 event)
if (event > ACPI_MAX_SYS_NOTIFY)
return;
count = eeepc->event_count[event % 128]++;
- acpi_bus_generate_proc_event(device, event, count);
acpi_bus_generate_netlink_event(device->pnp.device_class,
dev_name(&device->dev), event,
count);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 1c9386e7c58..52b8a97efde 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -773,8 +773,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
else
set_lcd_level(newb);
}
- acpi_bus_generate_proc_event(fujitsu->dev,
- ACPI_VIDEO_NOTIFY_INC_BRIGHTNESS, 0);
keycode = KEY_BRIGHTNESSUP;
} else if (oldb > newb) {
if (disable_brightness_adjust != 1) {
@@ -783,8 +781,6 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
else
set_lcd_level(newb);
}
- acpi_bus_generate_proc_event(fujitsu->dev,
- ACPI_VIDEO_NOTIFY_DEC_BRIGHTNESS, 0);
keycode = KEY_BRIGHTNESSDOWN;
}
break;
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 97bb05edcb5..d6970f47ae7 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -53,7 +53,6 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_ALS_QUERY 0x3
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
-#define HPWMI_BIOS_QUERY 0x9
#define HPWMI_HOTKEY_QUERY 0xc
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -293,19 +292,6 @@ static int hp_wmi_tablet_state(void)
return (state & 0x4) ? 1 : 0;
}
-static int hp_wmi_enable_hotkeys(void)
-{
- int ret;
- int query = 0x6e;
-
- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
-
- if (ret)
- return -EINVAL;
- return 0;
-}
-
static int hp_wmi_set_block(void *data, bool blocked)
{
enum hp_wmi_radio r = (enum hp_wmi_radio) data;
@@ -1009,8 +995,6 @@ static int __init hp_wmi_init(void)
err = hp_wmi_input_setup();
if (err)
return err;
-
- hp_wmi_enable_hotkeys();
}
if (bios_capable) {
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 4add9a31bf6..984253da365 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -464,9 +464,6 @@ static void acpi_pcc_generate_keyinput(struct pcc_acpi *pcc)
"error getting hotkey status\n"));
return;
}
-
- acpi_bus_generate_proc_event(pcc->device, HKEY_NOTIFY, result);
-
if (!sparse_keymap_report_event(hotk_input_dev,
result & 0xf, result & 0x80, false))
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c
index 1a90b62a71c..4430b8c1369 100644
--- a/drivers/platform/x86/samsung-q10.c
+++ b/drivers/platform/x86/samsung-q10.c
@@ -176,7 +176,7 @@ static int __init samsungq10_init(void)
samsungq10_probe,
NULL, 0, NULL, 0);
- return PTR_RET(samsungq10_device);
+ return PTR_ERR_OR_ZERO(samsungq10_device);
}
static void __exit samsungq10_exit(void)
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 2ac045f27f1..d3fd52036fd 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1275,9 +1275,6 @@ static void sony_nc_notify(struct acpi_device *device, u32 event)
ev_type = HOTKEY;
sony_laptop_report_input_event(real_ev);
}
-
- acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
-
acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
}
@@ -2440,7 +2437,10 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
if (pos < 0)
return pos;
- return snprintf(buffer, PAGE_SIZE, "%s\n", pos ? "speed" : "stamina");
+ return snprintf(buffer, PAGE_SIZE, "%s\n",
+ pos == SPEED ? "speed" :
+ pos == STAMINA ? "stamina" :
+ pos == AUTO ? "auto" : "unknown");
}
static int sony_nc_gfx_switch_setup(struct platform_device *pd,
@@ -4243,7 +4243,6 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
found:
sony_laptop_report_input_event(device_event);
- acpi_bus_generate_proc_event(dev->acpi_dev, 1, device_event);
sonypi_compat_report_event(device_event);
return IRQ_HANDLED;
}
@@ -4320,7 +4319,8 @@ static int sony_pic_add(struct acpi_device *device)
goto err_free_resources;
}
- if (sonypi_compat_init())
+ result = sonypi_compat_init();
+ if (result)
goto err_remove_input;
/* request io port */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 54d31c0a984..be67e5e28d1 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2022,8 +2022,6 @@ static u32 hotkey_driver_mask; /* events needed by the driver */
static u32 hotkey_user_mask; /* events visible to userspace */
static u32 hotkey_acpi_mask; /* events enabled in firmware */
-static unsigned int hotkey_report_mode;
-
static u16 *hotkey_keycode_map;
static struct attribute_set *hotkey_dev_attributes;
@@ -2282,10 +2280,6 @@ static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
static void tpacpi_hotkey_send_key(unsigned int scancode)
{
tpacpi_input_send_key_masked(scancode);
- if (hotkey_report_mode < 2) {
- acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
- 0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
- }
}
static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
@@ -2882,18 +2876,6 @@ static void hotkey_tablet_mode_notify_change(void)
"hotkey_tablet_mode");
}
-/* sysfs hotkey report_mode -------------------------------------------- */
-static ssize_t hotkey_report_mode_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (hotkey_report_mode != 0) ? hotkey_report_mode : 1);
-}
-
-static struct device_attribute dev_attr_hotkey_report_mode =
- __ATTR(hotkey_report_mode, S_IRUGO, hotkey_report_mode_show, NULL);
-
/* sysfs wakeup reason (pollable) -------------------------------------- */
static ssize_t hotkey_wakeup_reason_show(struct device *dev,
struct device_attribute *attr,
@@ -2935,7 +2917,6 @@ static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_hotkey_enable.attr,
&dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_bios_mask.attr,
- &dev_attr_hotkey_report_mode.attr,
&dev_attr_hotkey_wakeup_reason.attr,
&dev_attr_hotkey_wakeup_hotunplug_complete.attr,
&dev_attr_hotkey_mask.attr,
@@ -3439,11 +3420,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
"initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
- dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
- "legacy ibm/hotkey event reporting over procfs %s\n",
- (hotkey_report_mode < 2) ?
- "enabled" : "disabled");
-
tpacpi_inputdev->open = &hotkey_inputdev_open;
tpacpi_inputdev->close = &hotkey_inputdev_close;
@@ -3737,13 +3713,6 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
"event happened to %s\n", TPACPI_MAIL);
}
- /* Legacy events */
- if (!ignore_acpi_ev &&
- (send_acpi_ev || hotkey_report_mode < 2)) {
- acpi_bus_generate_proc_event(ibm->acpi->device,
- event, hkey);
- }
-
/* netlink events */
if (!ignore_acpi_ev && send_acpi_ev) {
acpi_bus_generate_netlink_event(
@@ -8840,11 +8809,6 @@ module_param(brightness_enable, uint, 0444);
MODULE_PARM_DESC(brightness_enable,
"Enables backlight control when 1, disables when 0");
-module_param(hotkey_report_mode, uint, 0444);
-MODULE_PARM_DESC(hotkey_report_mode,
- "used for backwards compatibility with userspace, "
- "see documentation");
-
#ifdef CONFIG_THINKPAD_ACPI_ALSA_SUPPORT
module_param_named(volume_mode, volume_mode, uint, 0444);
MODULE_PARM_DESC(volume_mode,
@@ -8975,10 +8939,6 @@ static int __init thinkpad_acpi_module_init(void)
tpacpi_lifecycle = TPACPI_LIFE_INIT;
- /* Parameter checking */
- if (hotkey_report_mode > 2)
- return -EINVAL;
-
/* Driver-level probe */
ret = get_thinkpad_model_data(&thinkpad_id);
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index b13344c5980..6e02c953d88 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -693,11 +693,13 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "wmi:%s\n", guid_string);
}
+static DEVICE_ATTR_RO(modalias);
-static struct device_attribute wmi_dev_attrs[] = {
- __ATTR_RO(modalias),
- __ATTR_NULL
+static struct attribute *wmi_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(wmi);
static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
@@ -732,7 +734,7 @@ static struct class wmi_class = {
.name = "wmi",
.dev_release = wmi_dev_free,
.dev_uevent = wmi_dev_uevent,
- .dev_attrs = wmi_dev_attrs,
+ .dev_groups = wmi_groups,
};
static int wmi_create_device(const struct guid_block *gblock,
diff --git a/drivers/pnp/driver.c b/drivers/pnp/driver.c
index 00e94032531..12adb43a069 100644
--- a/drivers/pnp/driver.c
+++ b/drivers/pnp/driver.c
@@ -154,7 +154,7 @@ static int pnp_bus_match(struct device *dev, struct device_driver *drv)
return 1;
}
-static int pnp_bus_suspend(struct device *dev, pm_message_t state)
+static int __pnp_bus_suspend(struct device *dev, pm_message_t state)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
struct pnp_driver *pnp_drv = pnp_dev->driver;
@@ -180,6 +180,16 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
return 0;
}
+static int pnp_bus_suspend(struct device *dev)
+{
+ return __pnp_bus_suspend(dev, PMSG_SUSPEND);
+}
+
+static int pnp_bus_freeze(struct device *dev)
+{
+ return __pnp_bus_suspend(dev, PMSG_FREEZE);
+}
+
static int pnp_bus_resume(struct device *dev)
{
struct pnp_dev *pnp_dev = to_pnp_dev(dev);
@@ -210,14 +220,19 @@ static int pnp_bus_resume(struct device *dev)
return 0;
}
+static const struct dev_pm_ops pnp_bus_dev_pm_ops = {
+ .suspend = pnp_bus_suspend,
+ .freeze = pnp_bus_freeze,
+ .resume = pnp_bus_resume,
+};
+
struct bus_type pnp_bus_type = {
.name = "pnp",
.match = pnp_bus_match,
.probe = pnp_device_probe,
.remove = pnp_device_remove,
.shutdown = pnp_device_shutdown,
- .suspend = pnp_bus_suspend,
- .resume = pnp_bus_resume,
+ .pm = &pnp_bus_dev_pm_ops,
.dev_attrs = pnp_interface_attrs,
};
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index 55cd459a390..34049b0b4c7 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -131,7 +131,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
/* acpi_unregister_gsi(pnp_irq(dev, 0)); */
ret = 0;
if (acpi_bus_power_manageable(handle))
- acpi_bus_set_power(handle, ACPI_STATE_D3);
+ acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
/* continue even if acpi_bus_set_power() fails */
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
ret = -ENODEV;
@@ -174,10 +174,10 @@ static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state)
if (acpi_bus_power_manageable(handle)) {
int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL,
- ACPI_STATE_D3);
+ ACPI_STATE_D3_COLD);
if (power_state < 0)
power_state = (state.event == PM_EVENT_ON) ?
- ACPI_STATE_D0 : ACPI_STATE_D3;
+ ACPI_STATE_D0 : ACPI_STATE_D3_COLD;
/*
* acpi_bus_set_power() often fails (keyboard port can't be
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 7173e3ad475..2f07cd61566 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -406,7 +406,7 @@ static int __init pps_init(void)
pr_err("failed to allocate class\n");
return PTR_ERR(pps_class);
}
- pps_class->dev_attrs = pps_attrs;
+ pps_class->dev_groups = pps_groups;
err = alloc_chrdev_region(&pps_devt, 0, PPS_MAX_SOURCES, "pps");
if (err < 0) {
diff --git a/drivers/pps/sysfs.c b/drivers/pps/sysfs.c
index ef0978c71ee..aefb75d6709 100644
--- a/drivers/pps/sysfs.c
+++ b/drivers/pps/sysfs.c
@@ -29,8 +29,8 @@
* Attribute functions
*/
-static ssize_t pps_show_assert(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t assert_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
@@ -41,9 +41,10 @@ static ssize_t pps_show_assert(struct device *dev,
(long long) pps->assert_tu.sec, pps->assert_tu.nsec,
pps->assert_sequence);
}
+static DEVICE_ATTR_RO(assert);
-static ssize_t pps_show_clear(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t clear_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
@@ -54,45 +55,59 @@ static ssize_t pps_show_clear(struct device *dev,
(long long) pps->clear_tu.sec, pps->clear_tu.nsec,
pps->clear_sequence);
}
+static DEVICE_ATTR_RO(clear);
-static ssize_t pps_show_mode(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
return sprintf(buf, "%4x\n", pps->info.mode);
}
+static DEVICE_ATTR_RO(mode);
-static ssize_t pps_show_echo(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t echo_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", !!pps->info.echo);
}
+static DEVICE_ATTR_RO(echo);
-static ssize_t pps_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", pps->info.name);
}
+static DEVICE_ATTR_RO(name);
-static ssize_t pps_show_path(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t path_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct pps_device *pps = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", pps->info.path);
}
+static DEVICE_ATTR_RO(path);
+
+static struct attribute *pps_attrs[] = {
+ &dev_attr_assert.attr,
+ &dev_attr_clear.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_echo.attr,
+ &dev_attr_name.attr,
+ &dev_attr_path.attr,
+ NULL,
+};
+
+static const struct attribute_group pps_group = {
+ .attrs = pps_attrs,
+};
-struct device_attribute pps_attrs[] = {
- __ATTR(assert, S_IRUGO, pps_show_assert, NULL),
- __ATTR(clear, S_IRUGO, pps_show_clear, NULL),
- __ATTR(mode, S_IRUGO, pps_show_mode, NULL),
- __ATTR(echo, S_IRUGO, pps_show_echo, NULL),
- __ATTR(name, S_IRUGO, pps_show_name, NULL),
- __ATTR(path, S_IRUGO, pps_show_path, NULL),
- __ATTR_NULL,
+const struct attribute_group *pps_groups[] = {
+ &pps_group,
+ NULL,
};
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 4a8c388364c..a8319b26664 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -330,7 +330,7 @@ static int __init ptp_init(void)
goto no_region;
}
- ptp_class->dev_attrs = ptp_dev_attrs;
+ ptp_class->dev_groups = ptp_groups;
pr_info("PTP clock support registered\n");
return 0;
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 69d32070cc6..df03f2e30ad 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -84,7 +84,7 @@ uint ptp_poll(struct posix_clock *pc,
* see ptp_sysfs.c
*/
-extern struct device_attribute ptp_dev_attrs[];
+extern const struct attribute_group *ptp_groups[];
int ptp_cleanup_sysfs(struct ptp_clock *ptp);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 2f93926ac97..13ec5311746 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -27,36 +27,43 @@ static ssize_t clock_name_show(struct device *dev,
struct ptp_clock *ptp = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%s\n", ptp->info->name);
}
+static DEVICE_ATTR(clock_name, 0444, clock_name_show, NULL);
-#define PTP_SHOW_INT(name) \
-static ssize_t name##_show(struct device *dev, \
+#define PTP_SHOW_INT(name, var) \
+static ssize_t var##_show(struct device *dev, \
struct device_attribute *attr, char *page) \
{ \
struct ptp_clock *ptp = dev_get_drvdata(dev); \
- return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->name); \
-}
-
-PTP_SHOW_INT(max_adj);
-PTP_SHOW_INT(n_alarm);
-PTP_SHOW_INT(n_ext_ts);
-PTP_SHOW_INT(n_per_out);
-PTP_SHOW_INT(pps);
+ return snprintf(page, PAGE_SIZE-1, "%d\n", ptp->info->var); \
+} \
+static DEVICE_ATTR(name, 0444, var##_show, NULL);
+
+PTP_SHOW_INT(max_adjustment, max_adj);
+PTP_SHOW_INT(n_alarms, n_alarm);
+PTP_SHOW_INT(n_external_timestamps, n_ext_ts);
+PTP_SHOW_INT(n_periodic_outputs, n_per_out);
+PTP_SHOW_INT(pps_available, pps);
+
+static struct attribute *ptp_attrs[] = {
+ &dev_attr_clock_name.attr,
+ &dev_attr_max_adjustment.attr,
+ &dev_attr_n_alarms.attr,
+ &dev_attr_n_external_timestamps.attr,
+ &dev_attr_n_periodic_outputs.attr,
+ &dev_attr_pps_available.attr,
+ NULL,
+};
-#define PTP_RO_ATTR(_var, _name) { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = _var##_show, \
-}
+static const struct attribute_group ptp_group = {
+ .attrs = ptp_attrs,
+};
-struct device_attribute ptp_dev_attrs[] = {
- PTP_RO_ATTR(clock_name, clock_name),
- PTP_RO_ATTR(max_adj, max_adjustment),
- PTP_RO_ATTR(n_alarm, n_alarms),
- PTP_RO_ATTR(n_ext_ts, n_external_timestamps),
- PTP_RO_ATTR(n_per_out, n_periodic_outputs),
- PTP_RO_ATTR(pps, pps_available),
- __ATTR_NULL,
+const struct attribute_group *ptp_groups[] = {
+ &ptp_group,
+ NULL,
};
+
static ssize_t extts_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index dfbfbc52176..2ca95042a0b 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -30,10 +30,9 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#define MAX_PWMS 1024
+#include <dt-bindings/pwm/pwm.h>
-/* flags in the third cell of the DT PWM specifier */
-#define PWM_SPEC_POLARITY (1 << 0)
+#define MAX_PWMS 1024
static DEFINE_MUTEX(pwm_lookup_lock);
static LIST_HEAD(pwm_lookup_list);
@@ -149,7 +148,7 @@ of_pwm_xlate_with_flags(struct pwm_chip *pc, const struct of_phandle_args *args)
pwm_set_period(pwm, args->args[1]);
- if (args->args[2] & PWM_SPEC_POLARITY)
+ if (args->args[2] & PWM_POLARITY_INVERTED)
pwm_set_polarity(pwm, PWM_POLARITY_INVERSED);
else
pwm_set_polarity(pwm, PWM_POLARITY_NORMAL);
diff --git a/drivers/pwm/pwm-lpc32xx.c b/drivers/pwm/pwm-lpc32xx.c
index efb6c7bf875..efac99e03d5 100644
--- a/drivers/pwm/pwm-lpc32xx.c
+++ b/drivers/pwm/pwm-lpc32xx.c
@@ -124,9 +124,6 @@ static int lpc32xx_pwm_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
lpc32xx->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(lpc32xx->base))
return PTR_ERR(lpc32xx->base);
diff --git a/drivers/pwm/pwm-mxs.c b/drivers/pwm/pwm-mxs.c
index 2c77b81da7c..c2c5a4fd1b9 100644
--- a/drivers/pwm/pwm-mxs.c
+++ b/drivers/pwm/pwm-mxs.c
@@ -161,9 +161,15 @@ static int mxs_pwm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mxs);
- stmp_reset_block(mxs->base);
+ ret = stmp_reset_block(mxs->base);
+ if (ret)
+ goto pwm_remove;
return 0;
+
+pwm_remove:
+ pwmchip_remove(&mxs->chip);
+ return ret;
}
static int mxs_pwm_remove(struct platform_device *pdev)
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index dc9717551d3..a4d2164aaf5 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -182,16 +182,6 @@ static struct platform_driver pwm_driver = {
.id_table = pwm_id_table,
};
-static int __init pwm_init(void)
-{
- return platform_driver_register(&pwm_driver);
-}
-arch_initcall(pwm_init);
-
-static void __exit pwm_exit(void)
-{
- platform_driver_unregister(&pwm_driver);
-}
-module_exit(pwm_exit);
+module_platform_driver(pwm_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-renesas-tpu.c b/drivers/pwm/pwm-renesas-tpu.c
index 2600892782c..aff6ba9b49e 100644
--- a/drivers/pwm/pwm-renesas-tpu.c
+++ b/drivers/pwm/pwm-renesas-tpu.c
@@ -20,6 +20,7 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/of.h>
#include <linux/platform_data/pwm-renesas-tpu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -86,7 +87,7 @@ struct tpu_pwm_device {
struct tpu_device {
struct platform_device *pdev;
- struct tpu_pwm_platform_data *pdata;
+ enum pwm_polarity polarities[TPU_CHANNEL_MAX];
struct pwm_chip chip;
spinlock_t lock;
@@ -228,8 +229,7 @@ static int tpu_pwm_request(struct pwm_chip *chip, struct pwm_device *_pwm)
pwm->tpu = tpu;
pwm->channel = _pwm->hwpwm;
- pwm->polarity = tpu->pdata ? tpu->pdata->channels[pwm->channel].polarity
- : PWM_POLARITY_NORMAL;
+ pwm->polarity = tpu->polarities[pwm->channel];
pwm->prescaler = 0;
pwm->period = 0;
pwm->duty = 0;
@@ -388,6 +388,16 @@ static const struct pwm_ops tpu_pwm_ops = {
* Probe and remove
*/
+static void tpu_parse_pdata(struct tpu_device *tpu)
+{
+ struct tpu_pwm_platform_data *pdata = tpu->pdev->dev.platform_data;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpu->polarities); ++i)
+ tpu->polarities[i] = pdata ? pdata->channels[i].polarity
+ : PWM_POLARITY_NORMAL;
+}
+
static int tpu_probe(struct platform_device *pdev)
{
struct tpu_device *tpu;
@@ -400,15 +410,14 @@ static int tpu_probe(struct platform_device *pdev)
return -ENOMEM;
}
- tpu->pdata = pdev->dev.platform_data;
+ spin_lock_init(&tpu->lock);
+ tpu->pdev = pdev;
+
+ /* Initialize device configuration from platform data. */
+ tpu_parse_pdata(tpu);
/* Map memory, get clock and pin control. */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
- }
-
tpu->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(tpu->base))
return PTR_ERR(tpu->base);
@@ -422,11 +431,10 @@ static int tpu_probe(struct platform_device *pdev)
/* Initialize and register the device. */
platform_set_drvdata(pdev, tpu);
- spin_lock_init(&tpu->lock);
- tpu->pdev = pdev;
-
tpu->chip.dev = &pdev->dev;
tpu->chip.ops = &tpu_pwm_ops;
+ tpu->chip.of_xlate = of_pwm_xlate_with_flags;
+ tpu->chip.of_pwm_n_cells = 3;
tpu->chip.base = -1;
tpu->chip.npwm = TPU_CHANNEL_MAX;
@@ -457,12 +465,26 @@ static int tpu_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id tpu_of_table[] = {
+ { .compatible = "renesas,tpu-r8a73a4", },
+ { .compatible = "renesas,tpu-r8a7740", },
+ { .compatible = "renesas,tpu-r8a7790", },
+ { .compatible = "renesas,tpu-sh7372", },
+ { .compatible = "renesas,tpu", },
+ { },
+};
+
+MODULE_DEVICE_TABLE(of, tpu_of_table);
+#endif
+
static struct platform_driver tpu_driver = {
.probe = tpu_probe,
.remove = tpu_remove,
.driver = {
.name = "renesas-tpu-pwm",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(tpu_of_table),
}
};
diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
index a54d2140143..8ad26b8bf41 100644
--- a/drivers/pwm/pwm-spear.c
+++ b/drivers/pwm/pwm-spear.c
@@ -178,18 +178,13 @@ static int spear_pwm_probe(struct platform_device *pdev)
int ret;
u32 val;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no memory resources defined\n");
- return -ENODEV;
- }
-
pc = devm_kzalloc(&pdev->dev, sizeof(*pc), GFP_KERNEL);
if (!pc) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(pc->mmio_base))
return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 72ca42dfa73..c2e2e585236 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -290,6 +290,7 @@ static int ecap_pwm_remove(struct platform_device *pdev)
return pwmchip_remove(&pc->chip);
}
+#ifdef CONFIG_PM_SLEEP
static void ecap_pwm_save_context(struct ecap_pwm_chip *pc)
{
pm_runtime_get_sync(pc->chip.dev);
@@ -306,7 +307,6 @@ static void ecap_pwm_restore_context(struct ecap_pwm_chip *pc)
writew(pc->ctx.ecctl2, pc->mmio_base + ECCTL2);
}
-#ifdef CONFIG_PM_SLEEP
static int ecap_pwm_suspend(struct device *dev)
{
struct ecap_pwm_chip *pc = dev_get_drvdata(dev);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index aa4c5586f53..084f5524653 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -139,17 +139,17 @@ static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip)
return container_of(chip, struct ehrpwm_pwm_chip, chip);
}
-static u16 ehrpwm_read(void *base, int offset)
+static u16 ehrpwm_read(void __iomem *base, int offset)
{
return readw(base + offset);
}
-static void ehrpwm_write(void *base, int offset, unsigned int val)
+static void ehrpwm_write(void __iomem *base, int offset, unsigned int val)
{
writew(val & 0xFFFF, base + offset);
}
-static void ehrpwm_modify(void *base, int offset,
+static void ehrpwm_modify(void __iomem *base, int offset,
unsigned short mask, unsigned short val)
{
unsigned short regval;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 8ca5de316d3..8c20332d482 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -268,6 +268,7 @@ static ssize_t pwm_export_store(struct device *parent,
return ret ? : len;
}
+static DEVICE_ATTR(export, 0200, NULL, pwm_export_store);
static ssize_t pwm_unexport_store(struct device *parent,
struct device_attribute *attr,
@@ -288,27 +289,29 @@ static ssize_t pwm_unexport_store(struct device *parent,
return ret ? : len;
}
+static DEVICE_ATTR(unexport, 0200, NULL, pwm_unexport_store);
-static ssize_t pwm_npwm_show(struct device *parent,
- struct device_attribute *attr,
- char *buf)
+static ssize_t npwm_show(struct device *parent, struct device_attribute *attr,
+ char *buf)
{
const struct pwm_chip *chip = dev_get_drvdata(parent);
return sprintf(buf, "%u\n", chip->npwm);
}
+static DEVICE_ATTR_RO(npwm);
-static struct device_attribute pwm_chip_attrs[] = {
- __ATTR(export, 0200, NULL, pwm_export_store),
- __ATTR(unexport, 0200, NULL, pwm_unexport_store),
- __ATTR(npwm, 0444, pwm_npwm_show, NULL),
- __ATTR_NULL,
+static struct attribute *pwm_chip_attrs[] = {
+ &dev_attr_export.attr,
+ &dev_attr_unexport.attr,
+ &dev_attr_npwm.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pwm_chip);
static struct class pwm_class = {
.name = "pwm",
.owner = THIS_MODULE,
- .dev_attrs = pwm_chip_attrs,
+ .dev_groups = pwm_chip_groups,
};
static int pwmchip_sysfs_match(struct device *parent, const void *data)
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index f4f30af2df6..2e8a20cac58 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1715,11 +1715,13 @@ int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops)
(mport_id == RIO_MPORT_ANY && port->nscan == scan_ops))
port->nscan = NULL;
- list_for_each_entry(scan, &rio_scans, node)
+ list_for_each_entry(scan, &rio_scans, node) {
if (scan->mport_id == mport_id) {
list_del(&scan->node);
kfree(scan);
+ break;
}
+ }
mutex_unlock(&rio_mport_list_lock);
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
new file mode 100644
index 00000000000..3459f60dcfd
--- /dev/null
+++ b/drivers/regulator/88pm800.c
@@ -0,0 +1,383 @@
+/*
+ * Regulators driver for Marvell 88PM800
+ *
+ * Copyright (C) 2012 Marvell International Ltd.
+ * Joseph(Yossi) Hanin <yhanin@marvell.com>
+ * Yi Zhang <yizhang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/mfd/88pm80x.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
+
+/* LDO1 with DVC[0..3] */
+#define PM800_LDO1_VOUT (0x08) /* VOUT1 */
+#define PM800_LDO1_VOUT_2 (0x09)
+#define PM800_LDO1_VOUT_3 (0x0A)
+#define PM800_LDO2_VOUT (0x0B)
+#define PM800_LDO3_VOUT (0x0C)
+#define PM800_LDO4_VOUT (0x0D)
+#define PM800_LDO5_VOUT (0x0E)
+#define PM800_LDO6_VOUT (0x0F)
+#define PM800_LDO7_VOUT (0x10)
+#define PM800_LDO8_VOUT (0x11)
+#define PM800_LDO9_VOUT (0x12)
+#define PM800_LDO10_VOUT (0x13)
+#define PM800_LDO11_VOUT (0x14)
+#define PM800_LDO12_VOUT (0x15)
+#define PM800_LDO13_VOUT (0x16)
+#define PM800_LDO14_VOUT (0x17)
+#define PM800_LDO15_VOUT (0x18)
+#define PM800_LDO16_VOUT (0x19)
+#define PM800_LDO17_VOUT (0x1A)
+#define PM800_LDO18_VOUT (0x1B)
+#define PM800_LDO19_VOUT (0x1C)
+
+/* BUCK1 with DVC[0..3] */
+#define PM800_BUCK1 (0x3C)
+#define PM800_BUCK1_1 (0x3D)
+#define PM800_BUCK1_2 (0x3E)
+#define PM800_BUCK1_3 (0x3F)
+#define PM800_BUCK2 (0x40)
+#define PM800_BUCK3 (0x41)
+#define PM800_BUCK3 (0x41)
+#define PM800_BUCK4 (0x42)
+#define PM800_BUCK4_1 (0x43)
+#define PM800_BUCK4_2 (0x44)
+#define PM800_BUCK4_3 (0x45)
+#define PM800_BUCK5 (0x46)
+
+#define PM800_BUCK_ENA (0x50)
+#define PM800_LDO_ENA1_1 (0x51)
+#define PM800_LDO_ENA1_2 (0x52)
+#define PM800_LDO_ENA1_3 (0x53)
+
+#define PM800_LDO_ENA2_1 (0x56)
+#define PM800_LDO_ENA2_2 (0x57)
+#define PM800_LDO_ENA2_3 (0x58)
+
+#define PM800_BUCK1_MISC1 (0x78)
+#define PM800_BUCK3_MISC1 (0x7E)
+#define PM800_BUCK4_MISC1 (0x81)
+#define PM800_BUCK5_MISC1 (0x84)
+
+struct pm800_regulator_info {
+ struct regulator_desc desc;
+ int max_ua;
+};
+
+struct pm800_regulators {
+ struct regulator_dev *regulators[PM800_ID_RG_MAX];
+ struct pm80x_chip *chip;
+ struct regmap *map;
+};
+
+/*
+ * vreg - the buck regs string.
+ * ereg - the string for the enable register.
+ * ebit - the bit number in the enable register.
+ * amax - the current
+ * Buck has 2 kinds of voltage steps. It is easy to find voltage by ranges,
+ * not the constant voltage table.
+ * n_volt - Number of available selectors
+ */
+#define PM800_BUCK(vreg, ereg, ebit, amax, volt_ranges, n_volt) \
+{ \
+ .desc = { \
+ .name = #vreg, \
+ .ops = &pm800_volt_range_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = n_volt, \
+ .linear_ranges = volt_ranges, \
+ .n_linear_ranges = ARRAY_SIZE(volt_ranges), \
+ .vsel_reg = PM800_##vreg, \
+ .vsel_mask = 0x7f, \
+ .enable_reg = PM800_##ereg, \
+ .enable_mask = 1 << (ebit), \
+ }, \
+ .max_ua = (amax), \
+}
+
+/*
+ * vreg - the LDO regs string
+ * ereg - the string for the enable register.
+ * ebit - the bit number in the enable register.
+ * amax - the current
+ * volt_table - the LDO voltage table
+ * For all the LDOes, there are too many ranges. Using volt_table will be
+ * simpler and faster.
+ */
+#define PM800_LDO(vreg, ereg, ebit, amax, ldo_volt_table) \
+{ \
+ .desc = { \
+ .name = #vreg, \
+ .ops = &pm800_volt_table_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PM800_ID_##vreg, \
+ .owner = THIS_MODULE, \
+ .n_voltages = ARRAY_SIZE(ldo_volt_table), \
+ .vsel_reg = PM800_##vreg##_VOUT, \
+ .vsel_mask = 0x1f, \
+ .enable_reg = PM800_##ereg, \
+ .enable_mask = 1 << (ebit), \
+ .volt_table = ldo_volt_table, \
+ }, \
+ .max_ua = (amax), \
+}
+
+/* Ranges are sorted in ascending order. */
+static const struct regulator_linear_range buck1_volt_range[] = {
+ { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
+ .uV_step = 12500 },
+ { .min_uV = 1600000, .max_uV = 1800000, .min_sel = 0x50,
+ .max_sel = 0x54, .uV_step = 50000 },
+};
+
+/* BUCK 2~5 have same ranges. */
+static const struct regulator_linear_range buck2_5_volt_range[] = {
+ { .min_uV = 600000, .max_uV = 1587500, .min_sel = 0, .max_sel = 0x4f,
+ .uV_step = 12500 },
+ { .min_uV = 1600000, .max_uV = 3300000, .min_sel = 0x50,
+ .max_sel = 0x72, .uV_step = 50000 },
+};
+
+static const unsigned int ldo1_volt_table[] = {
+ 600000, 650000, 700000, 750000, 800000, 850000, 900000, 950000,
+ 1000000, 1050000, 1100000, 1150000, 1200000, 1300000, 1400000, 1500000,
+};
+
+static const unsigned int ldo2_volt_table[] = {
+ 1700000, 1800000, 1900000, 2000000, 2100000, 2500000, 2700000, 2800000,
+};
+
+/* LDO 3~17 have same voltage table. */
+static const unsigned int ldo3_17_volt_table[] = {
+ 1200000, 1250000, 1700000, 1800000, 1850000, 1900000, 2500000, 2600000,
+ 2700000, 2750000, 2800000, 2850000, 2900000, 3000000, 3100000, 3300000,
+};
+
+/* LDO 18~19 have same voltage table. */
+static const unsigned int ldo18_19_volt_table[] = {
+ 1700000, 1800000, 1900000, 2500000, 2800000, 2900000, 3100000, 3300000,
+};
+
+static int pm800_get_current_limit(struct regulator_dev *rdev)
+{
+ struct pm800_regulator_info *info = rdev_get_drvdata(rdev);
+
+ return info->max_ua;
+}
+
+static struct regulator_ops pm800_volt_range_ops = {
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
+};
+
+static struct regulator_ops pm800_volt_table_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_iterate,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_current_limit = pm800_get_current_limit,
+};
+
+/* The array is indexed by id(PM800_ID_XXX) */
+static struct pm800_regulator_info pm800_regulator_info[] = {
+ PM800_BUCK(BUCK1, BUCK_ENA, 0, 3000000, buck1_volt_range, 0x55),
+ PM800_BUCK(BUCK2, BUCK_ENA, 1, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(BUCK3, BUCK_ENA, 2, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(BUCK4, BUCK_ENA, 3, 1200000, buck2_5_volt_range, 0x73),
+ PM800_BUCK(BUCK5, BUCK_ENA, 4, 1200000, buck2_5_volt_range, 0x73),
+
+ PM800_LDO(LDO1, LDO_ENA1_1, 0, 200000, ldo1_volt_table),
+ PM800_LDO(LDO2, LDO_ENA1_1, 1, 10000, ldo2_volt_table),
+ PM800_LDO(LDO3, LDO_ENA1_1, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO4, LDO_ENA1_1, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO5, LDO_ENA1_1, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO6, LDO_ENA1_1, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO7, LDO_ENA1_1, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO8, LDO_ENA1_1, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO9, LDO_ENA1_2, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO10, LDO_ENA1_2, 1, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO11, LDO_ENA1_2, 2, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO12, LDO_ENA1_2, 3, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO13, LDO_ENA1_2, 4, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO14, LDO_ENA1_2, 5, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO15, LDO_ENA1_2, 6, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO16, LDO_ENA1_2, 7, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO17, LDO_ENA1_3, 0, 300000, ldo3_17_volt_table),
+ PM800_LDO(LDO18, LDO_ENA1_3, 1, 200000, ldo18_19_volt_table),
+ PM800_LDO(LDO19, LDO_ENA1_3, 2, 200000, ldo18_19_volt_table),
+};
+
+#define PM800_REGULATOR_OF_MATCH(_name, _id) \
+ [PM800_ID_##_id] = { \
+ .name = #_name, \
+ .driver_data = &pm800_regulator_info[PM800_ID_##_id], \
+ }
+
+static struct of_regulator_match pm800_regulator_matches[] = {
+ PM800_REGULATOR_OF_MATCH(buck1, BUCK1),
+ PM800_REGULATOR_OF_MATCH(buck2, BUCK2),
+ PM800_REGULATOR_OF_MATCH(buck3, BUCK3),
+ PM800_REGULATOR_OF_MATCH(buck4, BUCK4),
+ PM800_REGULATOR_OF_MATCH(buck5, BUCK5),
+ PM800_REGULATOR_OF_MATCH(ldo1, LDO1),
+ PM800_REGULATOR_OF_MATCH(ldo2, LDO2),
+ PM800_REGULATOR_OF_MATCH(ldo3, LDO3),
+ PM800_REGULATOR_OF_MATCH(ldo4, LDO4),
+ PM800_REGULATOR_OF_MATCH(ldo5, LDO5),
+ PM800_REGULATOR_OF_MATCH(ldo6, LDO6),
+ PM800_REGULATOR_OF_MATCH(ldo7, LDO7),
+ PM800_REGULATOR_OF_MATCH(ldo8, LDO8),
+ PM800_REGULATOR_OF_MATCH(ldo9, LDO9),
+ PM800_REGULATOR_OF_MATCH(ldo10, LDO10),
+ PM800_REGULATOR_OF_MATCH(ldo11, LDO11),
+ PM800_REGULATOR_OF_MATCH(ldo12, LDO12),
+ PM800_REGULATOR_OF_MATCH(ldo13, LDO13),
+ PM800_REGULATOR_OF_MATCH(ldo14, LDO14),
+ PM800_REGULATOR_OF_MATCH(ldo15, LDO15),
+ PM800_REGULATOR_OF_MATCH(ldo16, LDO16),
+ PM800_REGULATOR_OF_MATCH(ldo17, LDO17),
+ PM800_REGULATOR_OF_MATCH(ldo18, LDO18),
+ PM800_REGULATOR_OF_MATCH(ldo19, LDO19),
+};
+
+static int pm800_regulator_dt_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int ret;
+
+ ret = of_regulator_match(&pdev->dev, np,
+ pm800_regulator_matches,
+ ARRAY_SIZE(pm800_regulator_matches));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pm800_regulator_probe(struct platform_device *pdev)
+{
+ struct pm80x_chip *chip = dev_get_drvdata(pdev->dev.parent);
+ struct pm80x_platform_data *pdata = dev_get_platdata(pdev->dev.parent);
+ struct pm800_regulators *pm800_data;
+ struct pm800_regulator_info *info;
+ struct regulator_config config = { };
+ struct regulator_init_data *init_data;
+ int i, ret;
+
+ if (!pdata || pdata->num_regulators == 0) {
+ if (IS_ENABLED(CONFIG_OF)) {
+ ret = pm800_regulator_dt_init(pdev);
+ if (ret)
+ return ret;
+ } else {
+ return -ENODEV;
+ }
+ } else if (pdata->num_regulators) {
+ unsigned int count = 0;
+
+ /* Check whether num_regulator is valid. */
+ for (i = 0; i < ARRAY_SIZE(pdata->regulators); i++) {
+ if (pdata->regulators[i])
+ count++;
+ }
+ if (count != pdata->num_regulators)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
+ }
+
+ pm800_data = devm_kzalloc(&pdev->dev, sizeof(*pm800_data),
+ GFP_KERNEL);
+ if (!pm800_data) {
+ dev_err(&pdev->dev, "Failed to allocate pm800_regualtors");
+ return -ENOMEM;
+ }
+
+ pm800_data->map = chip->subchip->regmap_power;
+ pm800_data->chip = chip;
+
+ platform_set_drvdata(pdev, pm800_data);
+
+ for (i = 0; i < PM800_ID_RG_MAX; i++) {
+ if (!pdata || pdata->num_regulators == 0)
+ init_data = pm800_regulator_matches[i].init_data;
+ else
+ init_data = pdata->regulators[i];
+ if (!init_data)
+ continue;
+ info = pm800_regulator_matches[i].driver_data;
+ config.dev = &pdev->dev;
+ config.init_data = init_data;
+ config.driver_data = info;
+ config.regmap = pm800_data->map;
+ config.of_node = pm800_regulator_matches[i].of_node;
+
+ pm800_data->regulators[i] =
+ regulator_register(&info->desc, &config);
+ if (IS_ERR(pm800_data->regulators[i])) {
+ ret = PTR_ERR(pm800_data->regulators[i]);
+ dev_err(&pdev->dev, "Failed to register %s\n",
+ info->desc.name);
+
+ while (--i >= 0)
+ regulator_unregister(pm800_data->regulators[i]);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pm800_regulator_remove(struct platform_device *pdev)
+{
+ struct pm800_regulators *pm800_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < PM800_ID_RG_MAX; i++)
+ regulator_unregister(pm800_data->regulators[i]);
+
+ return 0;
+}
+
+static struct platform_driver pm800_regulator_driver = {
+ .driver = {
+ .name = "88pm80x-regulator",
+ .owner = THIS_MODULE,
+ },
+ .probe = pm800_regulator_probe,
+ .remove = pm800_regulator_remove,
+};
+
+module_platform_driver(pm800_regulator_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Joseph(Yossi) Hanin <yhanin@marvell.com>");
+MODULE_DESCRIPTION("Regulator Driver for Marvell 88PM800 PMIC");
+MODULE_ALIAS("platform:88pm800-regulator");
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index 8a7cb1f4304..70230974468 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -346,7 +346,7 @@ static int pm8607_regulator_probe(struct platform_device *pdev)
{
struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct pm8607_regulator_info *info = NULL;
- struct regulator_init_data *pdata = pdev->dev.platform_data;
+ struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
struct resource *res;
int i;
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index f1e6ad98eeb..dfe58096b37 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -64,15 +64,21 @@ config REGULATOR_USERSPACE_CONSUMER
If unsure, say no.
-config REGULATOR_GPIO
- tristate "GPIO regulator support"
- depends on GPIOLIB
+config REGULATOR_88PM800
+ tristate "Marvell 88PM800 Power regulators"
+ depends on MFD_88PM800
help
- This driver provides support for regulators that can be
- controlled via gpios.
- It is capable of supporting current and voltage regulators
- and the platform has to provide a mapping of GPIO-states
- to target volts/amps.
+ This driver supports Marvell 88PM800 voltage regulator chips.
+ It delivers digitally programmable output,
+ the voltage is programmed via I2C interface.
+ It's suitable to support PXA988 chips to control VCC_MAIN and
+ various voltages.
+
+config REGULATOR_88PM8607
+ tristate "Marvell 88PM8607 Power regulators"
+ depends on MFD_88PM860X=y
+ help
+ This driver supports 88PM8607 voltage regulator chips.
config REGULATOR_AD5398
tristate "Analog Devices AD5398/AD5821 regulators"
@@ -81,6 +87,14 @@ config REGULATOR_AD5398
This driver supports AD5398 and AD5821 current regulator chips.
If building into module, its name is ad5398.ko.
+config REGULATOR_ANATOP
+ tristate "Freescale i.MX on-chip ANATOP LDO regulators"
+ depends on MFD_SYSCON
+ help
+ Say y here to support Freescale i.MX on-chip ANATOP LDOs
+ regulators. It is recommended that this option be
+ enabled on i.MX6 platform.
+
config REGULATOR_AAT2870
tristate "AnalogicTech AAT2870 Regulators"
depends on MFD_AAT2870_CORE
@@ -88,6 +102,22 @@ config REGULATOR_AAT2870
If you have a AnalogicTech AAT2870 say Y to enable the
regulator driver.
+config REGULATOR_AB3100
+ tristate "ST-Ericsson AB3100 Regulator functions"
+ depends on AB3100_CORE
+ default y if AB3100_CORE
+ help
+ These regulators correspond to functionality in the
+ AB3100 analog baseband dealing with power regulators
+ for the system.
+
+config REGULATOR_AB8500
+ bool "ST-Ericsson AB8500 Power Regulators"
+ depends on AB8500_CORE
+ help
+ This driver supports the regulators found on the ST-Ericsson mixed
+ signal AB8500 PMIC
+
config REGULATOR_ARIZONA
tristate "Wolfson Arizona class devices"
depends on MFD_ARIZONA
@@ -96,6 +126,13 @@ config REGULATOR_ARIZONA
Support for the regulators found on Wolfson Arizona class
devices.
+config REGULATOR_AS3711
+ tristate "AS3711 PMIC"
+ depends on MFD_AS3711
+ help
+ This driver provides support for the voltage regulators on the
+ AS3711 PMIC
+
config REGULATOR_DA903X
tristate "Dialog Semiconductor DA9030/DA9034 regulators"
depends on PMIC_DA903X
@@ -120,6 +157,37 @@ config REGULATOR_DA9055
This driver can also be built as a module. If so, the module
will be called da9055-regulator.
+config REGULATOR_DA9063
+ tristate "Dialog Semiconductor DA9063 regulators"
+ depends on MFD_DA9063
+ help
+ Say y here to support the BUCKs and LDOs regulators found on
+ DA9063 PMICs.
+
+ This driver can also be built as a module. If so, the module
+ will be called da9063-regulator.
+
+config REGULATOR_DA9210
+ tristate "Dialog Semiconductor DA9210 regulator"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support for the Dialog Semiconductor DA9210.
+ The DA9210 is a multi-phase synchronous step down
+ converter 12A DC-DC Buck controlled through an I2C
+ interface.
+
+config REGULATOR_DBX500_PRCMU
+ bool
+
+config REGULATOR_DB8500_PRCMU
+ bool "ST-Ericsson DB8500 Voltage Domain Regulators"
+ depends on MFD_DB8500_PRCMU
+ select REGULATOR_DBX500_PRCMU
+ help
+ This driver supports the voltage domain regulators controlled by the
+ DB8500 PRCMU
+
config REGULATOR_FAN53555
tristate "Fairchild FAN53555 Regulator"
depends on I2C
@@ -131,44 +199,57 @@ config REGULATOR_FAN53555
input voltage supply of 2.5V to 5.5V. The output voltage is
programmed through an I2C interface.
-config REGULATOR_ANATOP
- tristate "Freescale i.MX on-chip ANATOP LDO regulators"
- depends on MFD_SYSCON
+config REGULATOR_GPIO
+ tristate "GPIO regulator support"
+ depends on GPIOLIB
help
- Say y here to support Freescale i.MX on-chip ANATOP LDOs
- regulators. It is recommended that this option be
- enabled on i.MX6 platform.
+ This driver provides support for regulators that can be
+ controlled via gpios.
+ It is capable of supporting current and voltage regulators
+ and the platform has to provide a mapping of GPIO-states
+ to target volts/amps.
-config REGULATOR_MC13XXX_CORE
- tristate
+config REGULATOR_ISL6271A
+ tristate "Intersil ISL6271A Power regulator"
+ depends on I2C
+ help
+ This driver supports ISL6271A voltage regulator chip.
-config REGULATOR_MC13783
- tristate "Freescale MC13783 regulator driver"
- depends on MFD_MC13783
- select REGULATOR_MC13XXX_CORE
+config REGULATOR_LP3971
+ tristate "National Semiconductors LP3971 PMIC regulator driver"
+ depends on I2C
help
- Say y here to support the regulators found on the Freescale MC13783
- PMIC.
+ Say Y here to support the voltage regulators and convertors
+ on National Semiconductors LP3971 PMIC
-config REGULATOR_MC13892
- tristate "Freescale MC13892 regulator driver"
- depends on MFD_MC13XXX
- select REGULATOR_MC13XXX_CORE
+config REGULATOR_LP3972
+ tristate "National Semiconductors LP3972 PMIC regulator driver"
+ depends on I2C
help
- Say y here to support the regulators found on the Freescale MC13892
- PMIC.
+ Say Y here to support the voltage regulators and convertors
+ on National Semiconductors LP3972 PMIC
-config REGULATOR_ISL6271A
- tristate "Intersil ISL6271A Power regulator"
+config REGULATOR_LP872X
+ tristate "TI/National Semiconductor LP8720/LP8725 voltage regulators"
depends on I2C
+ select REGMAP_I2C
help
- This driver supports ISL6271A voltage regulator chip.
+ This driver supports LP8720/LP8725 PMIC
-config REGULATOR_88PM8607
- bool "Marvell 88PM8607 Power regulators"
- depends on MFD_88PM860X=y
+config REGULATOR_LP8755
+ tristate "TI LP8755 High Performance PMU driver"
+ depends on I2C
+ select REGMAP_I2C
help
- This driver supports 88PM8607 voltage regulator chips.
+ This driver supports LP8755 High Performance PMU driver. This
+ chip contains six step-down DC/DC converters which can support
+ 9 mode multiphase configuration.
+
+config REGULATOR_LP8788
+ tristate "TI LP8788 Power Regulators"
+ depends on MFD_LP8788
+ help
+ This driver supports LP8788 voltage regulator chip.
config REGULATOR_MAX1586
tristate "Maxim 1586/1587 voltage regulator"
@@ -259,48 +340,43 @@ config REGULATOR_MAX77693
and one current regulator 'CHARGER'. This is suitable for
Exynos-4x12 chips.
-config REGULATOR_PCAP
- tristate "Motorola PCAP2 regulator driver"
- depends on EZX_PCAP
- help
- This driver provides support for the voltage regulators of the
- PCAP2 PMIC.
+config REGULATOR_MC13XXX_CORE
+ tristate
-config REGULATOR_LP3971
- tristate "National Semiconductors LP3971 PMIC regulator driver"
- depends on I2C
+config REGULATOR_MC13783
+ tristate "Freescale MC13783 regulator driver"
+ depends on MFD_MC13783
+ select REGULATOR_MC13XXX_CORE
help
- Say Y here to support the voltage regulators and convertors
- on National Semiconductors LP3971 PMIC
+ Say y here to support the regulators found on the Freescale MC13783
+ PMIC.
-config REGULATOR_LP3972
- tristate "National Semiconductors LP3972 PMIC regulator driver"
- depends on I2C
+config REGULATOR_MC13892
+ tristate "Freescale MC13892 regulator driver"
+ depends on MFD_MC13XXX
+ select REGULATOR_MC13XXX_CORE
help
- Say Y here to support the voltage regulators and convertors
- on National Semiconductors LP3972 PMIC
+ Say y here to support the regulators found on the Freescale MC13892
+ PMIC.
-config REGULATOR_LP872X
- bool "TI/National Semiconductor LP8720/LP8725 voltage regulators"
- depends on I2C=y
- select REGMAP_I2C
+config REGULATOR_PALMAS
+ tristate "TI Palmas PMIC Regulators"
+ depends on MFD_PALMAS
help
- This driver supports LP8720/LP8725 PMIC
+ If you wish to control the regulators on the Palmas series of
+ chips say Y here. This will enable support for all the software
+ controllable SMPS/LDO regulators.
-config REGULATOR_LP8755
- tristate "TI LP8755 High Performance PMU driver"
- depends on I2C
- select REGMAP_I2C
- help
- This driver supports LP8755 High Performance PMU driver. This
- chip contains six step-down DC/DC converters which can support
- 9 mode multiphase configuration.
+ The regulators available on Palmas series chips vary depending
+ on the muxing. This is handled automatically in the driver by
+ reading the mux info from OTP.
-config REGULATOR_LP8788
- bool "TI LP8788 Power Regulators"
- depends on MFD_LP8788
+config REGULATOR_PCAP
+ tristate "Motorola PCAP2 regulator driver"
+ depends on EZX_PCAP
help
- This driver supports LP8788 voltage regulator chip.
+ This driver provides support for the voltage regulators of the
+ PCAP2 PMIC.
config REGULATOR_PCF50633
tristate "NXP PCF50633 regulator driver"
@@ -309,6 +385,14 @@ config REGULATOR_PCF50633
Say Y here to support the voltage regulators and convertors
on PCF50633
+config REGULATOR_PFUZE100
+ tristate "Support regulators on Freescale PFUZE100 PMIC"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say y here to support the regulators found on the Freescale PFUZE100
+ PMIC.
+
config REGULATOR_RC5T583
tristate "RICOH RC5T583 Power regulators"
depends on MFD_RC5T583
@@ -335,44 +419,15 @@ config REGULATOR_S5M8767
via I2C bus. S5M8767A have 9 Bucks and 28 LDOs output and
supports DVS mode with 8bits of output voltage control.
-config REGULATOR_AB3100
- tristate "ST-Ericsson AB3100 Regulator functions"
- depends on AB3100_CORE
- default y if AB3100_CORE
- help
- These regulators correspond to functionality in the
- AB3100 analog baseband dealing with power regulators
- for the system.
-
-config REGULATOR_AB8500
- bool "ST-Ericsson AB8500 Power Regulators"
- depends on AB8500_CORE
- help
- This driver supports the regulators found on the ST-Ericsson mixed
- signal AB8500 PMIC
-
-config REGULATOR_DBX500_PRCMU
- bool
-
-config REGULATOR_DB8500_PRCMU
- bool "ST-Ericsson DB8500 Voltage Domain Regulators"
- depends on MFD_DB8500_PRCMU
- select REGULATOR_DBX500_PRCMU
- help
- This driver supports the voltage domain regulators controlled by the
- DB8500 PRCMU
-
-config REGULATOR_PALMAS
- tristate "TI Palmas PMIC Regulators"
- depends on MFD_PALMAS
+config REGULATOR_TI_ABB
+ tristate "TI Adaptive Body Bias on-chip LDO"
+ depends on ARCH_OMAP
help
- If you wish to control the regulators on the Palmas series of
- chips say Y here. This will enable support for all the software
- controllable SMPS/LDO regulators.
-
- The regulators available on Palmas series chips vary depending
- on the muxing. This is handled automatically in the driver by
- reading the mux info from OTP.
+ Select this option to support Texas Instruments' on-chip Adaptive Body
+ Bias (ABB) LDO regulators. It is recommended that this option be
+ enabled on required TI SoC. Certain Operating Performance Points
+ on TI SoCs may be unstable without enabling this as it provides
+ device specific optimized bias to allow/optimize functionality.
config REGULATOR_TPS51632
tristate "TI TPS51632 Power Regulator"
@@ -475,22 +530,12 @@ config REGULATOR_TPS80031
output to control regulators.
config REGULATOR_TWL4030
- bool "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
+ tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
help
This driver supports the voltage regulators provided by
this family of companion chips.
-config REGULATOR_TI_ABB
- bool "TI Adaptive Body Bias on-chip LDO"
- depends on ARCH_OMAP
- help
- Select this option to support Texas Instruments' on-chip Adaptive Body
- Bias (ABB) LDO regulators. It is recommended that this option be
- enabled on required TI SoC. Certain Operating Performance Points
- on TI SoCs may be unstable without enabling this as it provides
- device specific optimized bias to allow/optimize functionality.
-
config REGULATOR_VEXPRESS
tristate "Versatile Express regulators"
depends on VEXPRESS_CONFIG
@@ -526,12 +571,5 @@ config REGULATOR_WM8994
This driver provides support for the voltage regulators on the
WM8994 CODEC.
-config REGULATOR_AS3711
- tristate "AS3711 PMIC"
- depends on MFD_AS3711
- help
- This driver provides support for the voltage regulators on the
- AS3711 PMIC
-
endif
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index ba4a3cf3afe..185cce24602 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -3,12 +3,13 @@
#
-obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o
+obj-$(CONFIG_REGULATOR) += core.o dummy.o fixed-helper.o helpers.o
obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
+obj-$(CONFIG_REGULATOR_88PM800) += 88pm800.o
obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o
obj-$(CONFIG_REGULATOR_AAT2870) += aat2870-regulator.o
obj-$(CONFIG_REGULATOR_AB3100) += ab3100.o
@@ -20,6 +21,8 @@ obj-$(CONFIG_REGULATOR_AS3711) += as3711-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_DA9055) += da9055-regulator.o
+obj-$(CONFIG_REGULATOR_DA9063) += da9063-regulator.o
+obj-$(CONFIG_REGULATOR_DA9210) += da9210-regulator.o
obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o
obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o
obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o
@@ -46,12 +49,14 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o
obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o
obj-$(CONFIG_REGULATOR_PALMAS) += palmas-regulator.o
+obj-$(CONFIG_REGULATOR_PFUZE100) += pfuze100-regulator.o
obj-$(CONFIG_REGULATOR_TPS51632) += tps51632-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_RC5T583) += rc5t583-regulator.o
obj-$(CONFIG_REGULATOR_S2MPS11) += s2mps11.o
obj-$(CONFIG_REGULATOR_S5M8767) += s5m8767.o
+obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_TPS6105X) += tps6105x-regulator.o
obj-$(CONFIG_REGULATOR_TPS62360) += tps62360-regulator.o
obj-$(CONFIG_REGULATOR_TPS65023) += tps65023-regulator.o
@@ -64,7 +69,6 @@ obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
-obj-$(CONFIG_REGULATOR_TI_ABB) += ti-abb-regulator.o
obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 8b5876356db..881159dfcb5 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -174,7 +174,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.driver_data = ri;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
rdev = regulator_register(&ri->desc, &config);
if (IS_ERR(rdev)) {
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 3be9e46594a..7d5eaa874b2 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -660,7 +660,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
static int ab3100_regulators_probe(struct platform_device *pdev)
{
- struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
+ struct ab3100_platform_data *plfdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
int err = 0;
u8 data;
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index 6b981b5faa7..b2b203cb6b2 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -214,7 +214,7 @@ MODULE_DEVICE_TABLE(i2c, ad5398_id);
static int ad5398_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct regulator_init_data *init_data = client->dev.platform_data;
+ struct regulator_init_data *init_data = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct ad5398_chip_info *chip;
const struct ad5398_current_data_format *df =
diff --git a/drivers/regulator/as3711-regulator.c b/drivers/regulator/as3711-regulator.c
index 3da6bd6950c..8406cd745da 100644
--- a/drivers/regulator/as3711-regulator.c
+++ b/drivers/regulator/as3711-regulator.c
@@ -30,102 +30,6 @@ struct as3711_regulator {
struct regulator_dev *rdev;
};
-static int as3711_list_voltage_sd(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
-
- if (!selector)
- return 0;
- if (selector < 0x41)
- return 600000 + selector * 12500;
- if (selector < 0x71)
- return 1400000 + (selector - 0x40) * 25000;
- return 2600000 + (selector - 0x70) * 50000;
-}
-
-static int as3711_list_voltage_aldo(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
-
- if (selector < 0x10)
- return 1200000 + selector * 50000;
- return 1800000 + (selector - 0x10) * 100000;
-}
-
-static int as3711_list_voltage_dldo(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages ||
- (selector > 0x10 && selector < 0x20))
- return -EINVAL;
-
- if (selector < 0x11)
- return 900000 + selector * 50000;
- return 1750000 + (selector - 0x20) * 50000;
-}
-
-static int as3711_bound_check(struct regulator_dev *rdev,
- int *min_uV, int *max_uV)
-{
- struct as3711_regulator *reg = rdev_get_drvdata(rdev);
- struct as3711_regulator_info *info = reg->reg_info;
-
- dev_dbg(&rdev->dev, "%s(), %d, %d, %d\n", __func__,
- *min_uV, rdev->desc->min_uV, info->max_uV);
-
- if (*max_uV < *min_uV ||
- *min_uV > info->max_uV || rdev->desc->min_uV > *max_uV)
- return -EINVAL;
-
- if (rdev->desc->n_voltages == 1)
- return 0;
-
- if (*max_uV > info->max_uV)
- *max_uV = info->max_uV;
-
- if (*min_uV < rdev->desc->min_uV)
- *min_uV = rdev->desc->min_uV;
-
- return *min_uV;
-}
-
-static int as3711_sel_check(int min, int max, int bottom, int step)
-{
- int sel, voltage;
-
- /* Round up min, when dividing: keeps us within the range */
- sel = DIV_ROUND_UP(min - bottom, step);
- voltage = sel * step + bottom;
- pr_debug("%s(): select %d..%d in %d+N*%d: %d\n", __func__,
- min, max, bottom, step, sel);
- if (voltage > max)
- return -EINVAL;
-
- return sel;
-}
-
-static int as3711_map_voltage_sd(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int ret;
-
- ret = as3711_bound_check(rdev, &min_uV, &max_uV);
- if (ret <= 0)
- return ret;
-
- if (min_uV <= 1400000)
- return as3711_sel_check(min_uV, max_uV, 600000, 12500);
-
- if (min_uV <= 2600000)
- return as3711_sel_check(min_uV, max_uV, 1400000, 25000) + 0x40;
-
- return as3711_sel_check(min_uV, max_uV, 2600000, 50000) + 0x70;
-}
-
/*
* The regulator API supports 4 modes of operataion: FAST, NORMAL, IDLE and
* STANDBY. We map them in the following way to AS3711 SD1-4 DCDC modes:
@@ -180,44 +84,14 @@ static unsigned int as3711_get_mode_sd(struct regulator_dev *rdev)
return -EINVAL;
}
-static int as3711_map_voltage_aldo(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int ret;
-
- ret = as3711_bound_check(rdev, &min_uV, &max_uV);
- if (ret <= 0)
- return ret;
-
- if (min_uV <= 1800000)
- return as3711_sel_check(min_uV, max_uV, 1200000, 50000);
-
- return as3711_sel_check(min_uV, max_uV, 1800000, 100000) + 0x10;
-}
-
-static int as3711_map_voltage_dldo(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int ret;
-
- ret = as3711_bound_check(rdev, &min_uV, &max_uV);
- if (ret <= 0)
- return ret;
-
- if (min_uV <= 1700000)
- return as3711_sel_check(min_uV, max_uV, 900000, 50000);
-
- return as3711_sel_check(min_uV, max_uV, 1750000, 50000) + 0x20;
-}
-
static struct regulator_ops as3711_sd_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = as3711_list_voltage_sd,
- .map_voltage = as3711_map_voltage_sd,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_mode = as3711_get_mode_sd,
.set_mode = as3711_set_mode_sd,
};
@@ -228,8 +102,8 @@ static struct regulator_ops as3711_aldo_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = as3711_list_voltage_aldo,
- .map_voltage = as3711_map_voltage_aldo,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
};
static struct regulator_ops as3711_dldo_ops = {
@@ -238,8 +112,31 @@ static struct regulator_ops as3711_dldo_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .list_voltage = as3711_list_voltage_dldo,
- .map_voltage = as3711_map_voltage_dldo,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
+};
+
+static const struct regulator_linear_range as3711_sd_ranges[] = {
+ { .min_uV = 612500, .max_uV = 1400000,
+ .min_sel = 0x1, .max_sel = 0x40, .uV_step = 12500 },
+ { .min_uV = 1425000, .max_uV = 2600000,
+ .min_sel = 0x41, .max_sel = 0x70, .uV_step = 25000 },
+ { .min_uV = 2650000, .max_uV = 3350000,
+ .min_sel = 0x71, .max_sel = 0x7f, .uV_step = 50000 },
+};
+
+static const struct regulator_linear_range as3711_aldo_ranges[] = {
+ { .min_uV = 1200000, .max_uV = 1950000,
+ .min_sel = 0, .max_sel = 0xf, .uV_step = 50000 },
+ { .min_uV = 1800000, .max_uV = 3300000,
+ .min_sel = 0x10, .max_sel = 0x1f, .uV_step = 100000 },
+};
+
+static const struct regulator_linear_range as3711_dldo_ranges[] = {
+ { .min_uV = 900000, .max_uV = 1700000,
+ .min_sel = 0, .max_sel = 0x10, .uV_step = 50000 },
+ { .min_uV = 1750000, .max_uV = 3300000,
+ .min_sel = 0x20, .max_sel = 0x3f, .uV_step = 50000 },
};
#define AS3711_REG(_id, _en_reg, _en_bit, _vmask, _vshift, _min_uV, _max_uV, _sfx) \
@@ -256,6 +153,8 @@ static struct regulator_ops as3711_dldo_ops = {
.enable_reg = AS3711_ ## _en_reg, \
.enable_mask = BIT(_en_bit), \
.min_uV = _min_uV, \
+ .linear_ranges = as3711_ ## _sfx ## _ranges, \
+ .n_linear_ranges = ARRAY_SIZE(as3711_ ## _sfx ## _ranges), \
}, \
.max_uV = _max_uV, \
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 288c75abc19..a01b8b3b70c 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -323,13 +323,14 @@ static ssize_t regulator_uA_show(struct device *dev,
}
static DEVICE_ATTR(microamps, 0444, regulator_uA_show, NULL);
-static ssize_t regulator_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", rdev_get_name(rdev));
}
+static DEVICE_ATTR_RO(name);
static ssize_t regulator_print_opmode(char *buf, int mode)
{
@@ -489,15 +490,16 @@ static ssize_t regulator_total_uA_show(struct device *dev,
}
static DEVICE_ATTR(requested_microamps, 0444, regulator_total_uA_show, NULL);
-static ssize_t regulator_num_users_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t num_users_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", rdev->use_count);
}
+static DEVICE_ATTR_RO(num_users);
-static ssize_t regulator_type_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct regulator_dev *rdev = dev_get_drvdata(dev);
@@ -509,6 +511,7 @@ static ssize_t regulator_type_show(struct device *dev,
}
return sprintf(buf, "unknown\n");
}
+static DEVICE_ATTR_RO(type);
static ssize_t regulator_suspend_mem_uV_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -632,12 +635,13 @@ static DEVICE_ATTR(bypass, 0444,
* These are the only attributes are present for all regulators.
* Other attributes are a function of regulator functionality.
*/
-static struct device_attribute regulator_dev_attrs[] = {
- __ATTR(name, 0444, regulator_name_show, NULL),
- __ATTR(num_users, 0444, regulator_num_users_show, NULL),
- __ATTR(type, 0444, regulator_type_show, NULL),
- __ATTR_NULL,
+static struct attribute *regulator_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_num_users.attr,
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(regulator_dev);
static void regulator_dev_release(struct device *dev)
{
@@ -648,7 +652,7 @@ static void regulator_dev_release(struct device *dev)
static struct class regulator_class = {
.name = "regulator",
.dev_release = regulator_dev_release,
- .dev_attrs = regulator_dev_attrs,
+ .dev_groups = regulator_dev_groups,
};
/* Calculate the new optimum regulator operating mode based on the new total
@@ -984,7 +988,8 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (rdev->constraints->ramp_delay && ops->set_ramp_delay) {
+ if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
+ && ops->set_ramp_delay) {
ret = ops->set_ramp_delay(rdev, rdev->constraints->ramp_delay);
if (ret < 0) {
rdev_err(rdev, "failed to set ramp_delay\n");
@@ -1238,7 +1243,7 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
- int exclusive)
+ bool exclusive)
{
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
@@ -1344,7 +1349,7 @@ out:
*/
struct regulator *regulator_get(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, 0);
+ return _regulator_get(dev, id, false);
}
EXPORT_SYMBOL_GPL(regulator_get);
@@ -1405,10 +1410,69 @@ EXPORT_SYMBOL_GPL(devm_regulator_get);
*/
struct regulator *regulator_get_exclusive(struct device *dev, const char *id)
{
- return _regulator_get(dev, id, 1);
+ return _regulator_get(dev, id, true);
}
EXPORT_SYMBOL_GPL(regulator_get_exclusive);
+/**
+ * regulator_get_optional - obtain optional access to a regulator.
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Returns a struct regulator corresponding to the regulator producer,
+ * or IS_ERR() condition containing errno. Other consumers will be
+ * unable to obtain this reference is held and the use count for the
+ * regulator will be initialised to reflect the current state of the
+ * regulator.
+ *
+ * This is intended for use by consumers for devices which can have
+ * some supplies unconnected in normal use, such as some MMC devices.
+ * It can allow the regulator core to provide stub supplies for other
+ * supplies requested using normal regulator_get() calls without
+ * disrupting the operation of drivers that can handle absent
+ * supplies.
+ *
+ * Use of supply names configured via regulator_set_device_supply() is
+ * strongly encouraged. It is recommended that the supply name used
+ * should match the name used for the supply and/or the relevant
+ * device pins in the datasheet.
+ */
+struct regulator *regulator_get_optional(struct device *dev, const char *id)
+{
+ return _regulator_get(dev, id, 0);
+}
+EXPORT_SYMBOL_GPL(regulator_get_optional);
+
+/**
+ * devm_regulator_get_optional - Resource managed regulator_get_optional()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_optional(). Regulators returned from this
+ * function are automatically regulator_put() on driver detach. See
+ * regulator_get_optional() for more information.
+ */
+struct regulator *devm_regulator_get_optional(struct device *dev,
+ const char *id)
+{
+ struct regulator **ptr, *regulator;
+
+ ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ regulator = regulator_get_optional(dev, id);
+ if (!IS_ERR(regulator)) {
+ *ptr = regulator;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regulator;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_optional);
+
/* Locks held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
@@ -1435,6 +1499,36 @@ static void _regulator_put(struct regulator *regulator)
}
/**
+ * devm_regulator_get_exclusive - Resource managed regulator_get_exclusive()
+ * @dev: device for regulator "consumer"
+ * @id: Supply name or regulator ID.
+ *
+ * Managed regulator_get_exclusive(). Regulators returned from this function
+ * are automatically regulator_put() on driver detach. See regulator_get() for
+ * more information.
+ */
+struct regulator *devm_regulator_get_exclusive(struct device *dev,
+ const char *id)
+{
+ struct regulator **ptr, *regulator;
+
+ ptr = devres_alloc(devm_regulator_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ regulator = _regulator_get(dev, id, 1);
+ if (!IS_ERR(regulator)) {
+ *ptr = regulator;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return regulator;
+}
+EXPORT_SYMBOL_GPL(devm_regulator_get_exclusive);
+
+/**
* regulator_put - "free" the regulator source
* @regulator: regulator source
*
@@ -1890,8 +1984,9 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
rdev->deferred_disables++;
mutex_unlock(&rdev->mutex);
- ret = schedule_delayed_work(&rdev->disable_work,
- msecs_to_jiffies(ms));
+ ret = queue_delayed_work(system_power_efficient_wq,
+ &rdev->disable_work,
+ msecs_to_jiffies(ms));
if (ret < 0)
return ret;
else
@@ -1899,77 +1994,6 @@ int regulator_disable_deferred(struct regulator *regulator, int ms)
}
EXPORT_SYMBOL_GPL(regulator_disable_deferred);
-/**
- * regulator_is_enabled_regmap - standard is_enabled() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their is_enabled operation, saving some code.
- */
-int regulator_is_enabled_regmap(struct regulator_dev *rdev)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
- if (ret != 0)
- return ret;
-
- if (rdev->desc->enable_is_inverted)
- return (val & rdev->desc->enable_mask) == 0;
- else
- return (val & rdev->desc->enable_mask) != 0;
-}
-EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
-
-/**
- * regulator_enable_regmap - standard enable() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their enable() operation, saving some code.
- */
-int regulator_enable_regmap(struct regulator_dev *rdev)
-{
- unsigned int val;
-
- if (rdev->desc->enable_is_inverted)
- val = 0;
- else
- val = rdev->desc->enable_mask;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_enable_regmap);
-
-/**
- * regulator_disable_regmap - standard disable() for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * enable_reg and enable_mask fields in their descriptor and then use
- * this as their disable() operation, saving some code.
- */
-int regulator_disable_regmap(struct regulator_dev *rdev)
-{
- unsigned int val;
-
- if (rdev->desc->enable_is_inverted)
- val = rdev->desc->enable_mask;
- else
- val = 0;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
- rdev->desc->enable_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_disable_regmap);
-
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
/* A GPIO control always takes precedence */
@@ -2055,55 +2079,6 @@ int regulator_count_voltages(struct regulator *regulator)
EXPORT_SYMBOL_GPL(regulator_count_voltages);
/**
- * regulator_list_voltage_linear - List voltages with simple calculation
- *
- * @rdev: Regulator device
- * @selector: Selector to convert into a voltage
- *
- * Regulators with a simple linear mapping between voltages and
- * selectors can set min_uV and uV_step in the regulator descriptor
- * and then use this function as their list_voltage() operation,
- */
-int regulator_list_voltage_linear(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
- if (selector < rdev->desc->linear_min_sel)
- return 0;
-
- selector -= rdev->desc->linear_min_sel;
-
- return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
-}
-EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
-
-/**
- * regulator_list_voltage_table - List voltages with table based mapping
- *
- * @rdev: Regulator device
- * @selector: Selector to convert into a voltage
- *
- * Regulators with table based mapping between voltages and
- * selectors can set volt_table in the regulator descriptor
- * and then use this function as their list_voltage() operation.
- */
-int regulator_list_voltage_table(struct regulator_dev *rdev,
- unsigned int selector)
-{
- if (!rdev->desc->volt_table) {
- BUG_ON(!rdev->desc->volt_table);
- return -EINVAL;
- }
-
- if (selector >= rdev->desc->n_voltages)
- return -EINVAL;
-
- return rdev->desc->volt_table[selector];
-}
-EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
-
-/**
* regulator_list_voltage - enumerate supported voltages
* @regulator: regulator source
* @selector: identify voltage to list
@@ -2197,177 +2172,6 @@ int regulator_is_supported_voltage(struct regulator *regulator,
}
EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
-/**
- * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
- *
- * @rdev: regulator to operate on
- *
- * Regulators that use regmap for their register I/O can set the
- * vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their get_voltage_vsel operation, saving some code.
- */
-int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
- if (ret != 0)
- return ret;
-
- val &= rdev->desc->vsel_mask;
- val >>= ffs(rdev->desc->vsel_mask) - 1;
-
- return val;
-}
-EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
-
-/**
- * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users
- *
- * @rdev: regulator to operate on
- * @sel: Selector to set
- *
- * Regulators that use regmap for their register I/O can set the
- * vsel_reg and vsel_mask fields in their descriptor and then use this
- * as their set_voltage_vsel operation, saving some code.
- */
-int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
-{
- int ret;
-
- sel <<= ffs(rdev->desc->vsel_mask) - 1;
-
- ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
- rdev->desc->vsel_mask, sel);
- if (ret)
- return ret;
-
- if (rdev->desc->apply_bit)
- ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
- rdev->desc->apply_bit,
- rdev->desc->apply_bit);
- return ret;
-}
-EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
-
-/**
- * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers implementing set_voltage_sel() and list_voltage() can use
- * this as their map_voltage() operation. It will find a suitable
- * voltage by calling list_voltage() until it gets something in bounds
- * for the requested voltages.
- */
-int regulator_map_voltage_iterate(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int best_val = INT_MAX;
- int selector = 0;
- int i, ret;
-
- /* Find the smallest voltage that falls within the specified
- * range.
- */
- for (i = 0; i < rdev->desc->n_voltages; i++) {
- ret = rdev->desc->ops->list_voltage(rdev, i);
- if (ret < 0)
- continue;
-
- if (ret < best_val && ret >= min_uV && ret <= max_uV) {
- best_val = ret;
- selector = i;
- }
- }
-
- if (best_val != INT_MAX)
- return selector;
- else
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
-
-/**
- * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers that have ascendant voltage list can use this as their
- * map_voltage() operation.
- */
-int regulator_map_voltage_ascend(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int i, ret;
-
- for (i = 0; i < rdev->desc->n_voltages; i++) {
- ret = rdev->desc->ops->list_voltage(rdev, i);
- if (ret < 0)
- continue;
-
- if (ret > max_uV)
- break;
-
- if (ret >= min_uV && ret <= max_uV)
- return i;
- }
-
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend);
-
-/**
- * regulator_map_voltage_linear - map_voltage() for simple linear mappings
- *
- * @rdev: Regulator to operate on
- * @min_uV: Lower bound for voltage
- * @max_uV: Upper bound for voltage
- *
- * Drivers providing min_uV and uV_step in their regulator_desc can
- * use this as their map_voltage() operation.
- */
-int regulator_map_voltage_linear(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int ret, voltage;
-
- /* Allow uV_step to be 0 for fixed voltage */
- if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
- if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
- return 0;
- else
- return -EINVAL;
- }
-
- if (!rdev->desc->uV_step) {
- BUG_ON(!rdev->desc->uV_step);
- return -EINVAL;
- }
-
- if (min_uV < rdev->desc->min_uV)
- min_uV = rdev->desc->min_uV;
-
- ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
- if (ret < 0)
- return ret;
-
- ret += rdev->desc->linear_min_sel;
-
- /* Map back into a voltage to verify we're still in bounds */
- voltage = rdev->desc->ops->list_voltage(rdev, ret);
- if (voltage < min_uV || voltage > max_uV)
- return -EINVAL;
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
-
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
{
@@ -2438,8 +2242,8 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
}
/* Call set_voltage_time_sel if successfully obtained old_selector */
- if (ret == 0 && _regulator_is_enabled(rdev) && old_selector >= 0 &&
- old_selector != selector && rdev->desc->ops->set_voltage_time_sel) {
+ if (ret == 0 && !rdev->constraints->ramp_disable && old_selector >= 0
+ && old_selector != selector) {
delay = rdev->desc->ops->set_voltage_time_sel(rdev,
old_selector, selector);
@@ -2971,47 +2775,6 @@ out:
EXPORT_SYMBOL_GPL(regulator_set_optimum_mode);
/**
- * regulator_set_bypass_regmap - Default set_bypass() using regmap
- *
- * @rdev: device to operate on.
- * @enable: state to set.
- */
-int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
-{
- unsigned int val;
-
- if (enable)
- val = rdev->desc->bypass_mask;
- else
- val = 0;
-
- return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
- rdev->desc->bypass_mask, val);
-}
-EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
-
-/**
- * regulator_get_bypass_regmap - Default get_bypass() using regmap
- *
- * @rdev: device to operate on.
- * @enable: current state.
- */
-int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
- if (ret != 0)
- return ret;
-
- *enable = val & rdev->desc->bypass_mask;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
-
-/**
* regulator_allow_bypass - allow the regulator to go into bypass mode
*
* @regulator: Regulator to configure
@@ -3740,8 +3503,11 @@ void regulator_unregister(struct regulator_dev *rdev)
if (rdev == NULL)
return;
- if (rdev->supply)
+ if (rdev->supply) {
+ while (rdev->use_count--)
+ regulator_disable(rdev->supply);
regulator_put(rdev->supply);
+ }
mutex_lock(&regulator_list_mutex);
debugfs_remove_recursive(rdev->debugfs);
flush_work(&rdev->disable_work.work);
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index 2afa5730f32..f06854cf8cf 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -252,39 +252,12 @@ static int da9034_set_dvc_voltage_sel(struct regulator_dev *rdev,
return ret;
}
-static int da9034_map_ldo12_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
- int sel;
-
- if (check_range(info, min_uV, max_uV)) {
- pr_err("invalid voltage range (%d, %d) uV\n", min_uV, max_uV);
- return -EINVAL;
- }
-
- sel = DIV_ROUND_UP(min_uV - info->desc.min_uV, info->desc.uV_step);
- sel = (sel >= 20) ? sel - 12 : ((sel > 7) ? 8 : sel);
-
- return sel;
-}
-
-static int da9034_list_ldo12_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- struct da903x_regulator_info *info = rdev_get_drvdata(rdev);
- int volt;
-
- if (selector >= 8)
- volt = 2700000 + rdev->desc->uV_step * (selector - 8);
- else
- volt = rdev->desc->min_uV + rdev->desc->uV_step * selector;
-
- if (volt > info->max_uV)
- return -EINVAL;
-
- return volt;
-}
+static const struct regulator_linear_range da9034_ldo12_ranges[] = {
+ { .min_uV = 1700000, .max_uV = 2050000, .min_sel = 0, .max_sel = 7,
+ .uV_step = 50000 },
+ { .min_uV = 2700000, .max_uV = 3050000, .min_sel = 8, .max_sel = 15,
+ .uV_step = 50000 },
+};
static struct regulator_ops da903x_regulator_ldo_ops = {
.set_voltage_sel = da903x_set_voltage_sel,
@@ -332,8 +305,8 @@ static struct regulator_ops da9034_regulator_dvc_ops = {
static struct regulator_ops da9034_regulator_ldo12_ops = {
.set_voltage_sel = da903x_set_voltage_sel,
.get_voltage_sel = da903x_get_voltage_sel,
- .list_voltage = da9034_list_ldo12_voltage,
- .map_voltage = da9034_map_ldo12_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.enable = da903x_enable,
.disable = da903x_disable,
.is_enabled = da903x_is_enabled,
@@ -476,6 +449,8 @@ static int da903x_regulator_probe(struct platform_device *pdev)
if (ri->desc.id == DA9034_ID_LDO12) {
ri->desc.ops = &da9034_regulator_ldo12_ops;
ri->desc.n_voltages = 16;
+ ri->desc.linear_ranges = da9034_ldo12_ranges;
+ ri->desc.n_linear_ranges = ARRAY_SIZE(da9034_ldo12_ranges);
}
if (ri->desc.id == DA9030_ID_LDO14)
@@ -485,7 +460,7 @@ static int da903x_regulator_probe(struct platform_device *pdev)
ri->desc.ops = &da9030_regulator_ldo1_15_ops;
config.dev = &pdev->dev;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = ri;
rdev = regulator_register(&ri->desc, &config);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
index 96b569abb46..1e4d483f616 100644
--- a/drivers/regulator/da9052-regulator.c
+++ b/drivers/regulator/da9052-regulator.c
@@ -349,7 +349,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
return -ENOMEM;
da9052 = dev_get_drvdata(pdev->dev.parent);
- pdata = da9052->dev->platform_data;
+ pdata = dev_get_platdata(da9052->dev);
regulator->da9052 = da9052;
regulator->info = find_regulator_info(regulator->da9052->chip_id,
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 30221099d09..77b53e5a231 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -535,7 +535,7 @@ static int da9055_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
struct da9055_regulator *regulator;
struct da9055 *da9055 = dev_get_drvdata(pdev->dev.parent);
- struct da9055_pdata *pdata = da9055->dev->platform_data;
+ struct da9055_pdata *pdata = dev_get_platdata(da9055->dev);
int ret, irq;
if (pdata == NULL || pdata->regulators[pdev->id] == NULL)
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
new file mode 100644
index 00000000000..1a781639077
--- /dev/null
+++ b/drivers/regulator/da9063-regulator.c
@@ -0,0 +1,934 @@
+/*
+ * Regulator driver for DA9063 PMIC series
+ *
+ * Copyright 2012 Dialog Semiconductors Ltd.
+ * Copyright 2013 Philipp Zabel, Pengutronix
+ *
+ * Author: Krystian Garbaciak <krystian.garbaciak@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/mfd/da9063/core.h>
+#include <linux/mfd/da9063/pdata.h>
+#include <linux/mfd/da9063/registers.h>
+
+
+/* Definition for registering regmap bit fields using a mask */
+#define BFIELD(_reg, _mask) \
+ REG_FIELD(_reg, __builtin_ffs((int)_mask) - 1, \
+ sizeof(unsigned int) * 8 - __builtin_clz((_mask)) - 1)
+
+/* Regulator capabilities and registers description */
+struct da9063_regulator_info {
+ struct regulator_desc desc;
+
+ /* Current limiting */
+ unsigned n_current_limits;
+ const int *current_limits;
+
+ /* DA9063 main register fields */
+ struct reg_field mode; /* buck mode of operation */
+ struct reg_field suspend;
+ struct reg_field sleep;
+ struct reg_field suspend_sleep;
+ unsigned int suspend_vsel_reg;
+ struct reg_field ilimit;
+
+ /* DA9063 event detection bit */
+ struct reg_field oc_event;
+};
+
+/* Macros for LDO */
+#define DA9063_LDO(chip, regl_name, min_mV, step_mV, max_mV) \
+ .desc.id = chip##_ID_##regl_name, \
+ .desc.name = __stringify(chip##_##regl_name), \
+ .desc.ops = &da9063_ldo_ops, \
+ .desc.min_uV = (min_mV) * 1000, \
+ .desc.uV_step = (step_mV) * 1000, \
+ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
+ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
+ .desc.enable_mask = DA9063_LDO_EN, \
+ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
+ .desc.vsel_mask = DA9063_V##regl_name##_MASK, \
+ .desc.linear_min_sel = DA9063_V##regl_name##_BIAS, \
+ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_LDO_SL), \
+ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_LDO_SL), \
+ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B
+
+/* Macros for voltage DC/DC converters (BUCKs) */
+#define DA9063_BUCK(chip, regl_name, min_mV, step_mV, max_mV, limits_array) \
+ .desc.id = chip##_ID_##regl_name, \
+ .desc.name = __stringify(chip##_##regl_name), \
+ .desc.ops = &da9063_buck_ops, \
+ .desc.min_uV = (min_mV) * 1000, \
+ .desc.uV_step = (step_mV) * 1000, \
+ .desc.n_voltages = ((max_mV) - (min_mV))/(step_mV) + 1, \
+ .current_limits = limits_array, \
+ .n_current_limits = ARRAY_SIZE(limits_array)
+
+#define DA9063_BUCK_COMMON_FIELDS(regl_name) \
+ .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
+ .desc.enable_mask = DA9063_BUCK_EN, \
+ .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
+ .desc.vsel_mask = DA9063_VBUCK_MASK, \
+ .desc.linear_min_sel = DA9063_VBUCK_BIAS, \
+ .sleep = BFIELD(DA9063_REG_V##regl_name##_A, DA9063_BUCK_SL), \
+ .suspend_sleep = BFIELD(DA9063_REG_V##regl_name##_B, DA9063_BUCK_SL), \
+ .suspend_vsel_reg = DA9063_REG_V##regl_name##_B, \
+ .mode = BFIELD(DA9063_REG_##regl_name##_CFG, DA9063_BUCK_MODE_MASK)
+
+/* Defines asignment of regulators info table to chip model */
+struct da9063_dev_model {
+ const struct da9063_regulator_info *regulator_info;
+ unsigned n_regulators;
+ unsigned dev_model;
+};
+
+/* Single regulator settings */
+struct da9063_regulator {
+ struct regulator_desc desc;
+ struct regulator_dev *rdev;
+ struct da9063 *hw;
+ const struct da9063_regulator_info *info;
+
+ struct regmap_field *mode;
+ struct regmap_field *suspend;
+ struct regmap_field *sleep;
+ struct regmap_field *suspend_sleep;
+ struct regmap_field *ilimit;
+};
+
+/* Encapsulates all information for the regulators driver */
+struct da9063_regulators {
+ int irq_ldo_lim;
+ int irq_uvov;
+
+ unsigned n_regulators;
+ /* Array size to be defined during init. Keep at end. */
+ struct da9063_regulator regulator[0];
+};
+
+/* BUCK modes for DA9063 */
+enum {
+ BUCK_MODE_MANUAL, /* 0 */
+ BUCK_MODE_SLEEP, /* 1 */
+ BUCK_MODE_SYNC, /* 2 */
+ BUCK_MODE_AUTO /* 3 */
+};
+
+/* Regulator operations */
+
+/* Current limits array (in uA) for BCORE1, BCORE2, BPRO.
+ Entry indexes corresponds to register values. */
+static const int da9063_buck_a_limits[] = {
+ 500000, 600000, 700000, 800000, 900000, 1000000, 1100000, 1200000,
+ 1300000, 1400000, 1500000, 1600000, 1700000, 1800000, 1900000, 2000000
+};
+
+/* Current limits array (in uA) for BMEM, BIO, BPERI.
+ Entry indexes corresponds to register values. */
+static const int da9063_buck_b_limits[] = {
+ 1500000, 1600000, 1700000, 1800000, 1900000, 2000000, 2100000, 2200000,
+ 2300000, 2400000, 2500000, 2600000, 2700000, 2800000, 2900000, 3000000
+};
+
+/* Current limits array (in uA) for merged BCORE1 and BCORE2.
+ Entry indexes corresponds to register values. */
+static const int da9063_bcores_merged_limits[] = {
+ 1000000, 1200000, 1400000, 1600000, 1800000, 2000000, 2200000, 2400000,
+ 2600000, 2800000, 3000000, 3200000, 3400000, 3600000, 3800000, 4000000
+};
+
+/* Current limits array (in uA) for merged BMEM and BIO.
+ Entry indexes corresponds to register values. */
+static const int da9063_bmem_bio_merged_limits[] = {
+ 3000000, 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000,
+ 4600000, 4800000, 5000000, 5200000, 5400000, 5600000, 5800000, 6000000
+};
+
+static int da9063_set_current_limit(struct regulator_dev *rdev,
+ int min_uA, int max_uA)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ const struct da9063_regulator_info *rinfo = regl->info;
+ int n, tval;
+
+ for (n = 0; n < rinfo->n_current_limits; n++) {
+ tval = rinfo->current_limits[n];
+ if (tval >= min_uA && tval <= max_uA)
+ return regmap_field_write(regl->ilimit, n);
+ }
+
+ return -EINVAL;
+}
+
+static int da9063_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ const struct da9063_regulator_info *rinfo = regl->info;
+ unsigned int sel;
+ int ret;
+
+ ret = regmap_field_read(regl->ilimit, &sel);
+ if (ret < 0)
+ return ret;
+
+ if (sel >= rinfo->n_current_limits)
+ sel = rinfo->n_current_limits - 1;
+
+ return rinfo->current_limits[sel];
+}
+
+static int da9063_buck_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ unsigned val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = BUCK_MODE_SYNC;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = BUCK_MODE_AUTO;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = BUCK_MODE_SLEEP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_field_write(regl->mode, val);
+}
+
+/*
+ * Bucks use single mode register field for normal operation
+ * and suspend state.
+ * There are 3 modes to map to: FAST, NORMAL, and STANDBY.
+ */
+
+static unsigned da9063_buck_get_mode(struct regulator_dev *rdev)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ struct regmap_field *field;
+ unsigned int val, mode = 0;
+ int ret;
+
+ ret = regmap_field_read(regl->mode, &val);
+ if (ret < 0)
+ return ret;
+
+ switch (val) {
+ default:
+ case BUCK_MODE_MANUAL:
+ mode = REGULATOR_MODE_FAST | REGULATOR_MODE_STANDBY;
+ /* Sleep flag bit decides the mode */
+ break;
+ case BUCK_MODE_SLEEP:
+ return REGULATOR_MODE_STANDBY;
+ case BUCK_MODE_SYNC:
+ return REGULATOR_MODE_FAST;
+ case BUCK_MODE_AUTO:
+ return REGULATOR_MODE_NORMAL;
+ }
+
+ /* Detect current regulator state */
+ ret = regmap_field_read(regl->suspend, &val);
+ if (ret < 0)
+ return 0;
+
+ /* Read regulator mode from proper register, depending on state */
+ if (val)
+ field = regl->suspend_sleep;
+ else
+ field = regl->sleep;
+
+ ret = regmap_field_read(field, &val);
+ if (ret < 0)
+ return 0;
+
+ if (val)
+ mode &= REGULATOR_MODE_STANDBY;
+ else
+ mode &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+
+ return mode;
+}
+
+/*
+ * LDOs use sleep flags - one for normal and one for suspend state.
+ * There are 2 modes to map to: NORMAL and STANDBY (sleep) for each state.
+ */
+
+static int da9063_ldo_set_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ unsigned val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_field_write(regl->sleep, val);
+}
+
+static unsigned da9063_ldo_get_mode(struct regulator_dev *rdev)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ struct regmap_field *field;
+ int ret, val;
+
+ /* Detect current regulator state */
+ ret = regmap_field_read(regl->suspend, &val);
+ if (ret < 0)
+ return 0;
+
+ /* Read regulator mode from proper register, depending on state */
+ if (val)
+ field = regl->suspend_sleep;
+ else
+ field = regl->sleep;
+
+ ret = regmap_field_read(field, &val);
+ if (ret < 0)
+ return 0;
+
+ if (val)
+ return REGULATOR_MODE_STANDBY;
+ else
+ return REGULATOR_MODE_NORMAL;
+}
+
+static int da9063_buck_get_status(struct regulator_dev *rdev)
+{
+ int ret = regulator_is_enabled_regmap(rdev);
+
+ if (ret == 0) {
+ ret = REGULATOR_STATUS_OFF;
+ } else if (ret > 0) {
+ ret = da9063_buck_get_mode(rdev);
+ if (ret > 0)
+ ret = regulator_mode_to_status(ret);
+ else if (ret == 0)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int da9063_ldo_get_status(struct regulator_dev *rdev)
+{
+ int ret = regulator_is_enabled_regmap(rdev);
+
+ if (ret == 0) {
+ ret = REGULATOR_STATUS_OFF;
+ } else if (ret > 0) {
+ ret = da9063_ldo_get_mode(rdev);
+ if (ret > 0)
+ ret = regulator_mode_to_status(ret);
+ else if (ret == 0)
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int da9063_set_suspend_voltage(struct regulator_dev *rdev, int uV)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ const struct da9063_regulator_info *rinfo = regl->info;
+ int ret, sel;
+
+ sel = regulator_map_voltage_linear(rdev, uV, uV);
+ if (sel < 0)
+ return -EINVAL;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+ ret = regmap_update_bits(regl->hw->regmap, rinfo->suspend_vsel_reg,
+ rdev->desc->vsel_mask, sel);
+
+ return ret;
+}
+
+static int da9063_suspend_enable(struct regulator_dev *rdev)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+
+ return regmap_field_write(regl->suspend, 1);
+}
+
+static int da9063_suspend_disable(struct regulator_dev *rdev)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+
+ return regmap_field_write(regl->suspend, 0);
+}
+
+static int da9063_buck_set_suspend_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ int val;
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ val = BUCK_MODE_SYNC;
+ break;
+ case REGULATOR_MODE_NORMAL:
+ val = BUCK_MODE_AUTO;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = BUCK_MODE_SLEEP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_field_write(regl->mode, val);
+}
+
+static int da9063_ldo_set_suspend_mode(struct regulator_dev *rdev, unsigned mode)
+{
+ struct da9063_regulator *regl = rdev_get_drvdata(rdev);
+ unsigned val;
+
+ switch (mode) {
+ case REGULATOR_MODE_NORMAL:
+ val = 0;
+ break;
+ case REGULATOR_MODE_STANDBY:
+ val = 1;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return regmap_field_write(regl->suspend_sleep, val);
+}
+
+static struct regulator_ops da9063_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_current_limit = da9063_set_current_limit,
+ .get_current_limit = da9063_get_current_limit,
+ .set_mode = da9063_buck_set_mode,
+ .get_mode = da9063_buck_get_mode,
+ .get_status = da9063_buck_get_status,
+ .set_suspend_voltage = da9063_set_suspend_voltage,
+ .set_suspend_enable = da9063_suspend_enable,
+ .set_suspend_disable = da9063_suspend_disable,
+ .set_suspend_mode = da9063_buck_set_suspend_mode,
+};
+
+static struct regulator_ops da9063_ldo_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_mode = da9063_ldo_set_mode,
+ .get_mode = da9063_ldo_get_mode,
+ .get_status = da9063_ldo_get_status,
+ .set_suspend_voltage = da9063_set_suspend_voltage,
+ .set_suspend_enable = da9063_suspend_enable,
+ .set_suspend_disable = da9063_suspend_disable,
+ .set_suspend_mode = da9063_ldo_set_suspend_mode,
+};
+
+/* Info of regulators for DA9063 */
+static const struct da9063_regulator_info da9063_regulator_info[] = {
+ {
+ DA9063_BUCK(DA9063, BCORE1, 300, 10, 1570,
+ da9063_buck_a_limits),
+ DA9063_BUCK_COMMON_FIELDS(BCORE1),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
+ DA9063_BCORE1_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BCORE2, 300, 10, 1570,
+ da9063_buck_a_limits),
+ DA9063_BUCK_COMMON_FIELDS(BCORE2),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE2_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
+ DA9063_BCORE2_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BPRO, 530, 10, 1800,
+ da9063_buck_a_limits),
+ DA9063_BUCK_COMMON_FIELDS(BPRO),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPRO_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
+ DA9063_BPRO_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BMEM, 800, 20, 3340,
+ da9063_buck_b_limits),
+ DA9063_BUCK_COMMON_FIELDS(BMEM),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
+ DA9063_BMEM_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BIO, 800, 20, 3340,
+ da9063_buck_b_limits),
+ DA9063_BUCK_COMMON_FIELDS(BIO),
+ .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VBIO_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
+ DA9063_BIO_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BPERI, 800, 20, 3340,
+ da9063_buck_b_limits),
+ DA9063_BUCK_COMMON_FIELDS(BPERI),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBPERI_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_B,
+ DA9063_BPERI_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BCORES_MERGED, 300, 10, 1570,
+ da9063_bcores_merged_limits),
+ /* BCORES_MERGED uses the same register fields as BCORE1 */
+ DA9063_BUCK_COMMON_FIELDS(BCORE1),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBCORE1_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_C,
+ DA9063_BCORE1_ILIM_MASK),
+ },
+ {
+ DA9063_BUCK(DA9063, BMEM_BIO_MERGED, 800, 20, 3340,
+ da9063_bmem_bio_merged_limits),
+ /* BMEM_BIO_MERGED uses the same register fields as BMEM */
+ DA9063_BUCK_COMMON_FIELDS(BMEM),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VBMEM_SEL),
+ .ilimit = BFIELD(DA9063_REG_BUCK_ILIM_A,
+ DA9063_BMEM_ILIM_MASK),
+ },
+ {
+ DA9063_LDO(DA9063, LDO1, 600, 20, 1860),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO1_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO2, 600, 20, 1860),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO2_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO3, 900, 20, 3440),
+ .suspend = BFIELD(DA9063_REG_DVC_1, DA9063_VLDO3_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO3_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO4, 900, 20, 3440),
+ .suspend = BFIELD(DA9063_REG_DVC_2, DA9063_VLDO4_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO4_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO5, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO5_CONT, DA9063_VLDO5_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO6, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO6_CONT, DA9063_VLDO6_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO7, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO7_CONT, DA9063_VLDO7_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO7_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO8, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO8_CONT, DA9063_VLDO8_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO8_LIM),
+ },
+ {
+ DA9063_LDO(DA9063, LDO9, 950, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO9_CONT, DA9063_VLDO9_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO10, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO10_CONT, DA9063_VLDO10_SEL),
+ },
+ {
+ DA9063_LDO(DA9063, LDO11, 900, 50, 3600),
+ .suspend = BFIELD(DA9063_REG_LDO11_CONT, DA9063_VLDO11_SEL),
+ .oc_event = BFIELD(DA9063_REG_STATUS_D, DA9063_LDO11_LIM),
+ },
+};
+
+/* Link chip model with regulators info table */
+static struct da9063_dev_model regulators_models[] = {
+ {
+ .regulator_info = da9063_regulator_info,
+ .n_regulators = ARRAY_SIZE(da9063_regulator_info),
+ .dev_model = PMIC_DA9063,
+ },
+ { }
+};
+
+/* Regulator interrupt handlers */
+static irqreturn_t da9063_ldo_lim_event(int irq, void *data)
+{
+ struct da9063_regulators *regulators = data;
+ struct da9063 *hw = regulators->regulator[0].hw;
+ struct da9063_regulator *regl;
+ int bits, i , ret;
+
+ ret = regmap_read(hw->regmap, DA9063_REG_STATUS_D, &bits);
+ if (ret < 0)
+ return IRQ_NONE;
+
+ for (i = regulators->n_regulators - 1; i >= 0; i--) {
+ regl = &regulators->regulator[i];
+ if (regl->info->oc_event.reg != DA9063_REG_STATUS_D)
+ continue;
+
+ if (BIT(regl->info->oc_event.lsb) & bits)
+ regulator_notifier_call_chain(regl->rdev,
+ REGULATOR_EVENT_OVER_CURRENT, NULL);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Probing and Initialisation functions
+ */
+static const struct regulator_init_data *da9063_get_regulator_initdata(
+ const struct da9063_regulators_pdata *regl_pdata, int id)
+{
+ int i;
+
+ for (i = 0; i < regl_pdata->n_regulators; i++) {
+ if (id == regl_pdata->regulator_data[i].id)
+ return regl_pdata->regulator_data[i].initdata;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_OF
+static struct of_regulator_match da9063_matches[] = {
+ [DA9063_ID_BCORE1] = { .name = "bcore1" },
+ [DA9063_ID_BCORE2] = { .name = "bcore2" },
+ [DA9063_ID_BPRO] = { .name = "bpro", },
+ [DA9063_ID_BMEM] = { .name = "bmem", },
+ [DA9063_ID_BIO] = { .name = "bio", },
+ [DA9063_ID_BPERI] = { .name = "bperi", },
+ [DA9063_ID_BCORES_MERGED] = { .name = "bcores-merged" },
+ [DA9063_ID_BMEM_BIO_MERGED] = { .name = "bmem-bio-merged", },
+ [DA9063_ID_LDO1] = { .name = "ldo1", },
+ [DA9063_ID_LDO2] = { .name = "ldo2", },
+ [DA9063_ID_LDO3] = { .name = "ldo3", },
+ [DA9063_ID_LDO4] = { .name = "ldo4", },
+ [DA9063_ID_LDO5] = { .name = "ldo5", },
+ [DA9063_ID_LDO6] = { .name = "ldo6", },
+ [DA9063_ID_LDO7] = { .name = "ldo7", },
+ [DA9063_ID_LDO8] = { .name = "ldo8", },
+ [DA9063_ID_LDO9] = { .name = "ldo9", },
+ [DA9063_ID_LDO10] = { .name = "ldo10", },
+ [DA9063_ID_LDO11] = { .name = "ldo11", },
+};
+
+static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
+ struct platform_device *pdev,
+ struct of_regulator_match **da9063_reg_matches)
+{
+ struct da9063_regulators_pdata *pdata;
+ struct da9063_regulator_data *rdata;
+ struct device_node *node;
+ int i, n, num;
+
+ node = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!node) {
+ dev_err(&pdev->dev, "Regulators device node not found\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ num = of_regulator_match(&pdev->dev, node, da9063_matches,
+ ARRAY_SIZE(da9063_matches));
+ if (num < 0) {
+ dev_err(&pdev->dev, "Failed to match regulators\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return ERR_PTR(-ENOMEM);
+
+ pdata->regulator_data = devm_kzalloc(&pdev->dev,
+ num * sizeof(*pdata->regulator_data),
+ GFP_KERNEL);
+ if (!pdata->regulator_data)
+ return ERR_PTR(-ENOMEM);
+ pdata->n_regulators = num;
+
+ n = 0;
+ for (i = 0; i < ARRAY_SIZE(da9063_matches); i++) {
+ if (!da9063_matches[i].init_data)
+ continue;
+
+ rdata = &pdata->regulator_data[n];
+ rdata->id = i;
+ rdata->initdata = da9063_matches[i].init_data;
+
+ n++;
+ };
+
+ *da9063_reg_matches = da9063_matches;
+ return pdata;
+}
+#else
+static struct da9063_regulators_pdata *da9063_parse_regulators_dt(
+ struct platform_device *pdev,
+ struct of_regulator_match **da9063_reg_matches)
+{
+ da9063_reg_matches = NULL;
+ return PTR_ERR(-ENODEV);
+}
+#endif
+
+static int da9063_regulator_probe(struct platform_device *pdev)
+{
+ struct da9063 *da9063 = dev_get_drvdata(pdev->dev.parent);
+ struct da9063_pdata *da9063_pdata = dev_get_platdata(da9063->dev);
+ struct of_regulator_match *da9063_reg_matches;
+ struct da9063_regulators_pdata *regl_pdata;
+ const struct da9063_dev_model *model;
+ struct da9063_regulators *regulators;
+ struct da9063_regulator *regl;
+ struct regulator_config config;
+ bool bcores_merged, bmem_bio_merged;
+ int id, irq, n, n_regulators, ret, val;
+ size_t size;
+
+ regl_pdata = da9063_pdata ? da9063_pdata->regulators_pdata : NULL;
+
+ if (!regl_pdata)
+ regl_pdata = da9063_parse_regulators_dt(pdev,
+ &da9063_reg_matches);
+
+ if (IS_ERR(regl_pdata) || regl_pdata->n_regulators == 0) {
+ dev_err(&pdev->dev,
+ "No regulators defined for the platform\n");
+ return PTR_ERR(regl_pdata);
+ }
+
+ /* Find regulators set for particular device model */
+ for (model = regulators_models; model->regulator_info; model++) {
+ if (model->dev_model == da9063->model)
+ break;
+ }
+ if (!model->regulator_info) {
+ dev_err(&pdev->dev, "Chip model not recognised (%u)\n",
+ da9063->model);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(da9063->regmap, DA9063_REG_CONFIG_H, &val);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "Error while reading BUCKs configuration\n");
+ return -EIO;
+ }
+ bcores_merged = val & DA9063_BCORE_MERGE;
+ bmem_bio_merged = val & DA9063_BUCK_MERGE;
+
+ n_regulators = model->n_regulators;
+ if (bcores_merged)
+ n_regulators -= 2; /* remove BCORE1, BCORE2 */
+ else
+ n_regulators--; /* remove BCORES_MERGED */
+ if (bmem_bio_merged)
+ n_regulators -= 2; /* remove BMEM, BIO */
+ else
+ n_regulators--; /* remove BMEM_BIO_MERGED */
+
+ /* Allocate memory required by usable regulators */
+ size = sizeof(struct da9063_regulators) +
+ n_regulators * sizeof(struct da9063_regulator);
+ regulators = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+ if (!regulators) {
+ dev_err(&pdev->dev, "No memory for regulators\n");
+ return -ENOMEM;
+ }
+
+ regulators->n_regulators = n_regulators;
+ platform_set_drvdata(pdev, regulators);
+
+ /* Register all regulators declared in platform information */
+ n = 0;
+ id = 0;
+ while (n < regulators->n_regulators) {
+ /* Skip regulator IDs depending on merge mode configuration */
+ switch (id) {
+ case DA9063_ID_BCORE1:
+ case DA9063_ID_BCORE2:
+ if (bcores_merged) {
+ id++;
+ continue;
+ }
+ break;
+ case DA9063_ID_BMEM:
+ case DA9063_ID_BIO:
+ if (bmem_bio_merged) {
+ id++;
+ continue;
+ }
+ break;
+ case DA9063_ID_BCORES_MERGED:
+ if (!bcores_merged) {
+ id++;
+ continue;
+ }
+ break;
+ case DA9063_ID_BMEM_BIO_MERGED:
+ if (!bmem_bio_merged) {
+ id++;
+ continue;
+ }
+ break;
+ }
+
+ /* Initialise regulator structure */
+ regl = &regulators->regulator[n];
+ regl->hw = da9063;
+ regl->info = &model->regulator_info[id];
+ regl->desc = regl->info->desc;
+ regl->desc.type = REGULATOR_VOLTAGE;
+ regl->desc.owner = THIS_MODULE;
+
+ if (regl->info->mode.reg)
+ regl->mode = devm_regmap_field_alloc(&pdev->dev,
+ da9063->regmap, regl->info->mode);
+ if (regl->info->suspend.reg)
+ regl->suspend = devm_regmap_field_alloc(&pdev->dev,
+ da9063->regmap, regl->info->suspend);
+ if (regl->info->sleep.reg)
+ regl->sleep = devm_regmap_field_alloc(&pdev->dev,
+ da9063->regmap, regl->info->sleep);
+ if (regl->info->suspend_sleep.reg)
+ regl->suspend_sleep = devm_regmap_field_alloc(&pdev->dev,
+ da9063->regmap, regl->info->suspend_sleep);
+ if (regl->info->ilimit.reg)
+ regl->ilimit = devm_regmap_field_alloc(&pdev->dev,
+ da9063->regmap, regl->info->ilimit);
+
+ /* Register regulator */
+ memset(&config, 0, sizeof(config));
+ config.dev = &pdev->dev;
+ config.init_data = da9063_get_regulator_initdata(regl_pdata, id);
+ config.driver_data = regl;
+ if (da9063_reg_matches)
+ config.of_node = da9063_reg_matches[id].of_node;
+ config.regmap = da9063->regmap;
+ regl->rdev = regulator_register(&regl->desc, &config);
+ if (IS_ERR(regl->rdev)) {
+ dev_err(&pdev->dev,
+ "Failed to register %s regulator\n",
+ regl->desc.name);
+ ret = PTR_ERR(regl->rdev);
+ goto err;
+ }
+ id++;
+ n++;
+ }
+
+ /* LDOs overcurrent event support */
+ irq = platform_get_irq_byname(pdev, "LDO_LIM");
+ if (irq < 0) {
+ ret = irq;
+ dev_err(&pdev->dev, "Failed to get IRQ.\n");
+ goto err;
+ }
+
+ regulators->irq_ldo_lim = regmap_irq_get_virq(da9063->regmap_irq, irq);
+ if (regulators->irq_ldo_lim >= 0) {
+ ret = request_threaded_irq(regulators->irq_ldo_lim,
+ NULL, da9063_ldo_lim_event,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "LDO_LIM", regulators);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to request LDO_LIM IRQ.\n");
+ regulators->irq_ldo_lim = -ENXIO;
+ }
+ }
+
+ return 0;
+
+err:
+ /* Wind back regulators registeration */
+ while (--n >= 0)
+ regulator_unregister(regulators->regulator[n].rdev);
+
+ return ret;
+}
+
+static int da9063_regulator_remove(struct platform_device *pdev)
+{
+ struct da9063_regulators *regulators = platform_get_drvdata(pdev);
+ struct da9063_regulator *regl;
+
+ free_irq(regulators->irq_ldo_lim, regulators);
+ free_irq(regulators->irq_uvov, regulators);
+
+ for (regl = &regulators->regulator[regulators->n_regulators - 1];
+ regl >= &regulators->regulator[0]; regl--)
+ regulator_unregister(regl->rdev);
+
+ return 0;
+}
+
+static struct platform_driver da9063_regulator_driver = {
+ .driver = {
+ .name = DA9063_DRVNAME_REGULATORS,
+ .owner = THIS_MODULE,
+ },
+ .probe = da9063_regulator_probe,
+ .remove = da9063_regulator_remove,
+};
+
+static int __init da9063_regulator_init(void)
+{
+ return platform_driver_register(&da9063_regulator_driver);
+}
+subsys_initcall(da9063_regulator_init);
+
+static void __exit da9063_regulator_cleanup(void)
+{
+ platform_driver_unregister(&da9063_regulator_driver);
+}
+module_exit(da9063_regulator_cleanup);
+
+
+/* Module information */
+MODULE_AUTHOR("Krystian Garbaciak <krystian.garbaciak@diasemi.com>");
+MODULE_DESCRIPTION("DA9063 regulators driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("paltform:" DA9063_DRVNAME_REGULATORS);
diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c
new file mode 100644
index 00000000000..f0fe54b3897
--- /dev/null
+++ b/drivers/regulator/da9210-regulator.c
@@ -0,0 +1,196 @@
+/*
+ * da9210-regulator.c - Regulator device driver for DA9210
+ * Copyright (C) 2013 Dialog Semiconductor Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regmap.h>
+
+#include "da9210-regulator.h"
+
+struct da9210 {
+ struct regulator_dev *rdev;
+ struct regmap *regmap;
+};
+
+static const struct regmap_config da9210_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA);
+static int da9210_get_current_limit(struct regulator_dev *rdev);
+
+static struct regulator_ops da9210_buck_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_current_limit = da9210_set_current_limit,
+ .get_current_limit = da9210_get_current_limit,
+};
+
+/* Default limits measured in millivolts and milliamps */
+#define DA9210_MIN_MV 300
+#define DA9210_MAX_MV 1570
+#define DA9210_STEP_MV 10
+
+/* Current limits for buck (uA) indices corresponds with register values */
+static const int da9210_buck_limits[] = {
+ 1600000, 1800000, 2000000, 2200000, 2400000, 2600000, 2800000, 3000000,
+ 3200000, 3400000, 3600000, 3800000, 4000000, 4200000, 4400000, 4600000
+};
+
+static const struct regulator_desc da9210_reg = {
+ .name = "DA9210",
+ .id = 0,
+ .ops = &da9210_buck_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = ((DA9210_MAX_MV - DA9210_MIN_MV) / DA9210_STEP_MV) + 1,
+ .min_uV = (DA9210_MIN_MV * 1000),
+ .uV_step = (DA9210_STEP_MV * 1000),
+ .vsel_reg = DA9210_REG_VBUCK_A,
+ .vsel_mask = DA9210_VBUCK_MASK,
+ .enable_reg = DA9210_REG_BUCK_CONT,
+ .enable_mask = DA9210_BUCK_EN,
+ .owner = THIS_MODULE,
+};
+
+static int da9210_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct da9210 *chip = rdev_get_drvdata(rdev);
+ unsigned int sel;
+ int i;
+
+ /* search for closest to maximum */
+ for (i = ARRAY_SIZE(da9210_buck_limits)-1; i >= 0; i--) {
+ if (min_uA <= da9210_buck_limits[i] &&
+ max_uA >= da9210_buck_limits[i]) {
+ sel = i;
+ sel = sel << DA9210_BUCK_ILIM_SHIFT;
+ return regmap_update_bits(chip->regmap,
+ DA9210_REG_BUCK_ILIM,
+ DA9210_BUCK_ILIM_MASK, sel);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int da9210_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9210 *chip = rdev_get_drvdata(rdev);
+ unsigned int data;
+ unsigned int sel;
+ int ret;
+
+ ret = regmap_read(chip->regmap, DA9210_REG_BUCK_ILIM, &data);
+ if (ret < 0)
+ return ret;
+
+ /* select one of 16 values: 0000 (1600mA) to 1111 (4600mA) */
+ sel = (data & DA9210_BUCK_ILIM_MASK) >> DA9210_BUCK_ILIM_SHIFT;
+
+ return da9210_buck_limits[sel];
+}
+
+/*
+ * I2C driver interface functions
+ */
+static int da9210_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct da9210 *chip;
+ struct da9210_pdata *pdata = i2c->dev.platform_data;
+ struct regulator_dev *rdev = NULL;
+ struct regulator_config config = { };
+ int error;
+
+ chip = devm_kzalloc(&i2c->dev, sizeof(struct da9210), GFP_KERNEL);
+ if (NULL == chip) {
+ dev_err(&i2c->dev,
+ "Cannot kzalloc memory for regulator structure\n");
+ return -ENOMEM;
+ }
+
+ chip->regmap = devm_regmap_init_i2c(i2c, &da9210_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ error = PTR_ERR(chip->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ error);
+ return error;
+ }
+
+ config.dev = &i2c->dev;
+ if (pdata)
+ config.init_data = &pdata->da9210_constraints;
+ config.driver_data = chip;
+ config.regmap = chip->regmap;
+
+ rdev = regulator_register(&da9210_reg, &config);
+ if (IS_ERR(rdev)) {
+ dev_err(&i2c->dev, "Failed to register DA9210 regulator\n");
+ return PTR_ERR(rdev);
+ }
+
+ chip->rdev = rdev;
+
+ i2c_set_clientdata(i2c, chip);
+
+ return 0;
+}
+
+static int da9210_i2c_remove(struct i2c_client *i2c)
+{
+ struct da9210 *chip = i2c_get_clientdata(i2c);
+ regulator_unregister(chip->rdev);
+ return 0;
+}
+
+static const struct i2c_device_id da9210_i2c_id[] = {
+ {"da9210", 0},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, da9210_i2c_id);
+
+static struct i2c_driver da9210_regulator_driver = {
+ .driver = {
+ .name = "da9210",
+ .owner = THIS_MODULE,
+ },
+ .probe = da9210_i2c_probe,
+ .remove = da9210_i2c_remove,
+ .id_table = da9210_i2c_id,
+};
+
+module_i2c_driver(da9210_regulator_driver);
+
+MODULE_AUTHOR("S Twiss <stwiss.opensource@diasemi.com>");
+MODULE_DESCRIPTION("Regulator device driver for Dialog DA9210");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/da9210-regulator.h b/drivers/regulator/da9210-regulator.h
new file mode 100644
index 00000000000..749c550808b
--- /dev/null
+++ b/drivers/regulator/da9210-regulator.h
@@ -0,0 +1,288 @@
+
+/*
+ * da9210-regulator.h - Regulator definitions for DA9210
+ * Copyright (C) 2013 Dialog Semiconductor Ltd.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Library General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Library General Public License for more details.
+ *
+ * You should have received a copy of the GNU Library General Public
+ * License along with this library; if not, write to the
+ * Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __DA9210_REGISTERS_H__
+#define __DA9210_REGISTERS_H__
+
+struct da9210_pdata {
+ struct regulator_init_data da9210_constraints;
+};
+
+/* Page selection */
+#define DA9210_REG_PAGE_CON 0x00
+
+/* System Control and Event Registers */
+#define DA9210_REG_STATUS_A 0x50
+#define DA9210_REG_STATUS_B 0x51
+#define DA9210_REG_EVENT_A 0x52
+#define DA9210_REG_EVENT_B 0x53
+#define DA9210_REG_MASK_A 0x54
+#define DA9210_REG_MASK_B 0x55
+#define DA9210_REG_CONTROL_A 0x56
+
+/* GPIO Control Registers */
+#define DA9210_REG_GPIO_0_1 0x58
+#define DA9210_REG_GPIO_2_3 0x59
+#define DA9210_REG_GPIO_4_5 0x5A
+#define DA9210_REG_GPIO_6 0x5B
+
+/* Regulator Registers */
+#define DA9210_REG_BUCK_CONT 0x5D
+#define DA9210_REG_BUCK_ILIM 0xD0
+#define DA9210_REG_BUCK_CONF1 0xD1
+#define DA9210_REG_BUCK_CONF2 0xD2
+#define DA9210_REG_VBACK_AUTO 0xD4
+#define DA9210_REG_VBACK_BASE 0xD5
+#define DA9210_REG_VBACK_MAX_DVC_IF 0xD6
+#define DA9210_REG_VBACK_DVC 0xD7
+#define DA9210_REG_VBUCK_A 0xD8
+#define DA9210_REG_VBUCK_B 0xD9
+
+/* I2C Interface Settings */
+#define DA9210_REG_INTERFACE 0x105
+
+/* OTP */
+#define DA9210_REG_OPT_COUNT 0x140
+#define DA9210_REG_OPT_ADDR 0x141
+#define DA9210_REG_OPT_DATA 0x142
+
+/* Customer Trim and Configuration */
+#define DA9210_REG_CONFIG_A 0x143
+#define DA9210_REG_CONFIG_B 0x144
+#define DA9210_REG_CONFIG_C 0x145
+#define DA9210_REG_CONFIG_D 0x146
+#define DA9210_REG_CONFIG_E 0x147
+
+
+/*
+ * Registers bits
+ */
+/* DA9210_REG_PAGE_CON (addr=0x00) */
+#define DA9210_PEG_PAGE_SHIFT 0
+#define DA9210_REG_PAGE_MASK 0x0F
+/* On I2C registers 0x00 - 0xFF */
+#define DA9210_REG_PAGE0 0
+/* On I2C registers 0x100 - 0x1FF */
+#define DA9210_REG_PAGE2 2
+#define DA9210_PAGE_WRITE_MODE 0x00
+#define DA9210_REPEAT_WRITE_MODE 0x40
+#define DA9210_PAGE_REVERT 0x80
+
+/* DA9210_REG_STATUS_A (addr=0x50) */
+#define DA9210_GPI0 0x01
+#define DA9210_GPI1 0x02
+#define DA9210_GPI2 0x04
+#define DA9210_GPI3 0x08
+#define DA9210_GPI4 0x10
+#define DA9210_GPI5 0x20
+#define DA9210_GPI6 0x40
+
+/* DA9210_REG_EVENT_A (addr=0x52) */
+#define DA9210_E_GPI0 0x01
+#define DA9210_E_GPI1 0x02
+#define DA9210_E_GPI2 0x04
+#define DA9210_E_GPI3 0x08
+#define DA9210_E_GPI4 0x10
+#define DA9210_E_GPI5 0x20
+#define DA9210_E_GPI6 0x40
+
+/* DA9210_REG_EVENT_B (addr=0x53) */
+#define DA9210_E_OVCURR 0x01
+#define DA9210_E_NPWRGOOD 0x02
+#define DA9210_E_TEMP_WARN 0x04
+#define DA9210_E_TEMP_CRIT 0x08
+#define DA9210_E_VMAX 0x10
+
+/* DA9210_REG_MASK_A (addr=0x54) */
+#define DA9210_M_GPI0 0x01
+#define DA9210_M_GPI1 0x02
+#define DA9210_M_GPI2 0x04
+#define DA9210_M_GPI3 0x08
+#define DA9210_M_GPI4 0x10
+#define DA9210_M_GPI5 0x20
+#define DA9210_M_GPI6 0x40
+
+/* DA9210_REG_MASK_B (addr=0x55) */
+#define DA9210_M_OVCURR 0x01
+#define DA9210_M_NPWRGOOD 0x02
+#define DA9210_M_TEMP_WARN 0x04
+#define DA9210_M_TEMP_CRIT 0x08
+#define DA9210_M_VMAX 0x10
+
+/* DA9210_REG_CONTROL_A (addr=0x56) */
+#define DA9210_DEBOUNCING_SHIFT 0
+#define DA9210_DEBOUNCING_MASK 0x07
+#define DA9210_SLEW_RATE_SHIFT 3
+#define DA9210_SLEW_RATE_MASK 0x18
+#define DA9210_V_LOCK 0x20
+
+/* DA9210_REG_GPIO_0_1 (addr=0x58) */
+#define DA9210_GPIO0_PIN_SHIFT 0
+#define DA9210_GPIO0_PIN_MASK 0x03
+#define DA9210_GPIO0_PIN_GPI 0x00
+#define DA9210_GPIO0_PIN_GPO_OD 0x02
+#define DA9210_GPIO0_PIN_GPO 0x03
+#define DA9210_GPIO0_TYPE 0x04
+#define DA9210_GPIO0_TYPE_GPI 0x00
+#define DA9210_GPIO0_TYPE_GPO 0x04
+#define DA9210_GPIO0_MODE 0x08
+#define DA9210_GPIO1_PIN_SHIFT 4
+#define DA9210_GPIO1_PIN_MASK 0x30
+#define DA9210_GPIO1_PIN_GPI 0x00
+#define DA9210_GPIO1_PIN_VERROR 0x10
+#define DA9210_GPIO1_PIN_GPO_OD 0x20
+#define DA9210_GPIO1_PIN_GPO 0x30
+#define DA9210_GPIO1_TYPE_SHIFT 0x40
+#define DA9210_GPIO1_TYPE_GPI 0x00
+#define DA9210_GPIO1_TYPE_GPO 0x40
+#define DA9210_GPIO1_MODE 0x80
+
+/* DA9210_REG_GPIO_2_3 (addr=0x59) */
+#define DA9210_GPIO2_PIN_SHIFT 0
+#define DA9210_GPIO2_PIN_MASK 0x03
+#define DA9210_GPIO2_PIN_GPI 0x00
+#define DA9210_GPIO5_PIN_BUCK_CLK 0x10
+#define DA9210_GPIO2_PIN_GPO_OD 0x02
+#define DA9210_GPIO2_PIN_GPO 0x03
+#define DA9210_GPIO2_TYPE 0x04
+#define DA9210_GPIO2_TYPE_GPI 0x00
+#define DA9210_GPIO2_TYPE_GPO 0x04
+#define DA9210_GPIO2_MODE 0x08
+#define DA9210_GPIO3_PIN_SHIFT 4
+#define DA9210_GPIO3_PIN_MASK 0x30
+#define DA9210_GPIO3_PIN_GPI 0x00
+#define DA9210_GPIO3_PIN_IERROR 0x10
+#define DA9210_GPIO3_PIN_GPO_OD 0x20
+#define DA9210_GPIO3_PIN_GPO 0x30
+#define DA9210_GPIO3_TYPE_SHIFT 0x40
+#define DA9210_GPIO3_TYPE_GPI 0x00
+#define DA9210_GPIO3_TYPE_GPO 0x40
+#define DA9210_GPIO3_MODE 0x80
+
+/* DA9210_REG_GPIO_4_5 (addr=0x5A) */
+#define DA9210_GPIO4_PIN_SHIFT 0
+#define DA9210_GPIO4_PIN_MASK 0x03
+#define DA9210_GPIO4_PIN_GPI 0x00
+#define DA9210_GPIO4_PIN_GPO_OD 0x02
+#define DA9210_GPIO4_PIN_GPO 0x03
+#define DA9210_GPIO4_TYPE 0x04
+#define DA9210_GPIO4_TYPE_GPI 0x00
+#define DA9210_GPIO4_TYPE_GPO 0x04
+#define DA9210_GPIO4_MODE 0x08
+#define DA9210_GPIO5_PIN_SHIFT 4
+#define DA9210_GPIO5_PIN_MASK 0x30
+#define DA9210_GPIO5_PIN_GPI 0x00
+#define DA9210_GPIO5_PIN_INTERFACE 0x01
+#define DA9210_GPIO5_PIN_GPO_OD 0x20
+#define DA9210_GPIO5_PIN_GPO 0x30
+#define DA9210_GPIO5_TYPE_SHIFT 0x40
+#define DA9210_GPIO5_TYPE_GPI 0x00
+#define DA9210_GPIO5_TYPE_GPO 0x40
+#define DA9210_GPIO5_MODE 0x80
+
+/* DA9210_REG_GPIO_6 (addr=0x5B) */
+#define DA9210_GPIO6_PIN_SHIFT 0
+#define DA9210_GPIO6_PIN_MASK 0x03
+#define DA9210_GPIO6_PIN_GPI 0x00
+#define DA9210_GPIO6_PIN_INTERFACE 0x01
+#define DA9210_GPIO6_PIN_GPO_OD 0x02
+#define DA9210_GPIO6_PIN_GPO 0x03
+#define DA9210_GPIO6_TYPE 0x04
+#define DA9210_GPIO6_TYPE_GPI 0x00
+#define DA9210_GPIO6_TYPE_GPO 0x04
+#define DA9210_GPIO6_MODE 0x08
+
+/* DA9210_REG_BUCK_CONT (addr=0x5D) */
+#define DA9210_BUCK_EN 0x01
+#define DA9210_BUCK_GPI_SHIFT 1
+#define DA9210_BUCK_GPI_MASK 0x06
+#define DA9210_BUCK_GPI_OFF 0x00
+#define DA9210_BUCK_GPI_GPIO0 0x02
+#define DA9210_BUCK_GPI_GPIO3 0x04
+#define DA9210_BUCK_GPI_GPIO4 0x06
+#define DA9210_BUCK_PD_DIS 0x08
+#define DA9210_VBUCK_SEL 0x10
+#define DA9210_VBUCK_SEL_A 0x00
+#define DA9210_VBUCK_SEL_B 0x10
+#define DA9210_VBUCK_GPI_SHIFT 5
+#define DA9210_VBUCK_GPI_MASK 0x60
+#define DA9210_VBUCK_GPI_OFF 0x00
+#define DA9210_VBUCK_GPI_GPIO0 0x20
+#define DA9210_VBUCK_GPI_GPIO3 0x40
+#define DA9210_VBUCK_GPI_GPIO4 0x60
+#define DA9210_DVC_CTRL_EN 0x80
+
+/* DA9210_REG_BUCK_ILIM (addr=0xD0) */
+#define DA9210_BUCK_ILIM_SHIFT 0
+#define DA9210_BUCK_ILIM_MASK 0x0F
+#define DA9210_BUCK_IALARM 0x10
+
+/* DA9210_REG_BUCK_CONF1 (addr=0xD1) */
+#define DA9210_BUCK_MODE_SHIFT 0
+#define DA9210_BUCK_MODE_MASK 0x03
+#define DA9210_BUCK_MODE_MANUAL 0x00
+#define DA9210_BUCK_MODE_SLEEP 0x01
+#define DA9210_BUCK_MODE_SYNC 0x02
+#define DA9210_BUCK_MODE_AUTO 0x03
+#define DA9210_STARTUP_CTRL_SHIFT 2
+#define DA9210_STARTUP_CTRL_MASK 0x1C
+#define DA9210_PWR_DOWN_CTRL_SHIFT 5
+#define DA9210_PWR_DOWN_CTRL_MASK 0xE0
+
+/* DA9210_REG_BUCK_CONF2 (addr=0xD2) */
+#define DA9210_PHASE_SEL_SHIFT 0
+#define DA9210_PHASE_SEL_MASK 0x03
+#define DA9210_FREQ_SEL 0x40
+
+/* DA9210_REG_BUCK_AUTO (addr=0xD4) */
+#define DA9210_VBUCK_AUTO_SHIFT 0
+#define DA9210_VBUCK_AUTO_MASK 0x7F
+
+/* DA9210_REG_BUCK_BASE (addr=0xD5) */
+#define DA9210_VBUCK_BASE_SHIFT 0
+#define DA9210_VBUCK_BASE_MASK 0x7F
+
+/* DA9210_REG_VBUCK_MAX_DVC_IF (addr=0xD6) */
+#define DA9210_VBUCK_MAX_SHIFT 0
+#define DA9210_VBUCK_MAX_MASK 0x7F
+#define DA9210_DVC_STEP_SIZE 0x80
+#define DA9210_DVC_STEP_SIZE_10MV 0x00
+#define DA9210_DVC_STEP_SIZE_20MV 0x80
+
+/* DA9210_REG_VBUCK_DVC (addr=0xD7) */
+#define DA9210_VBUCK_DVC_SHIFT 0
+#define DA9210_VBUCK_DVC_MASK 0x7F
+
+/* DA9210_REG_VBUCK_A/B (addr=0xD8/0xD9) */
+#define DA9210_VBUCK_SHIFT 0
+#define DA9210_VBUCK_MASK 0x7F
+#define DA9210_VBUCK_BIAS 0
+#define DA9210_BUCK_SL 0x80
+
+/* DA9210_REG_INTERFACE (addr=0x105) */
+#define DA9210_IF_BASE_ADDR_SHIFT 4
+#define DA9210_IF_BASE_ADDR_MASK 0xF0
+
+/* DA9210_REG_CONFIG_E (addr=0x147) */
+#define DA9210_STAND_ALONE 0x01
+
+#endif /* __DA9210_REGISTERS_H__ */
+
diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c
index f0e1ae52bb0..70b7220c587 100644
--- a/drivers/regulator/fan53555.c
+++ b/drivers/regulator/fan53555.c
@@ -219,7 +219,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di,
rdesc->owner = THIS_MODULE;
di->rdev = regulator_register(&di->desc, config);
- return PTR_RET(di->rdev);
+ return PTR_ERR_OR_ZERO(di->rdev);
}
@@ -237,7 +237,7 @@ static int fan53555_regulator_probe(struct i2c_client *client,
unsigned int val;
int ret;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata || !pdata->regulator) {
dev_err(&client->dev, "Platform data not found!\n");
return -ENODEV;
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index e5c03b534fa..7610920014d 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -146,7 +146,7 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
if (IS_ERR(config))
return PTR_ERR(config);
} else {
- config = pdev->dev.platform_data;
+ config = dev_get_platdata(&pdev->dev);
}
if (!config)
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9d39eb4aafa..98a98ffa7fe 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -219,7 +219,7 @@ static struct regulator_ops gpio_regulator_current_ops = {
static int gpio_regulator_probe(struct platform_device *pdev)
{
- struct gpio_regulator_config *config = pdev->dev.platform_data;
+ struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct gpio_regulator_data *drvdata;
struct regulator_config cfg = { };
diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c
new file mode 100644
index 00000000000..6e30df14714
--- /dev/null
+++ b/drivers/regulator/helpers.c
@@ -0,0 +1,447 @@
+/*
+ * helpers.c -- Voltage/Current Regulator framework helper functions.
+ *
+ * Copyright 2007, 2008 Wolfson Microelectronics PLC.
+ * Copyright 2008 SlimLogic Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/module.h>
+
+/**
+ * regulator_is_enabled_regmap - standard is_enabled() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their is_enabled operation, saving some code.
+ */
+int regulator_is_enabled_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ if (rdev->desc->enable_is_inverted)
+ return (val & rdev->desc->enable_mask) == 0;
+ else
+ return (val & rdev->desc->enable_mask) != 0;
+}
+EXPORT_SYMBOL_GPL(regulator_is_enabled_regmap);
+
+/**
+ * regulator_enable_regmap - standard enable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their enable() operation, saving some code.
+ */
+int regulator_enable_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted)
+ val = 0;
+ else
+ val = rdev->desc->enable_mask;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_enable_regmap);
+
+/**
+ * regulator_disable_regmap - standard disable() for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * enable_reg and enable_mask fields in their descriptor and then use
+ * this as their disable() operation, saving some code.
+ */
+int regulator_disable_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+
+ if (rdev->desc->enable_is_inverted)
+ val = rdev->desc->enable_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg,
+ rdev->desc->enable_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_disable_regmap);
+
+/**
+ * regulator_get_voltage_sel_regmap - standard get_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their get_voltage_vsel operation, saving some code.
+ */
+int regulator_get_voltage_sel_regmap(struct regulator_dev *rdev)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ val &= rdev->desc->vsel_mask;
+ val >>= ffs(rdev->desc->vsel_mask) - 1;
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(regulator_get_voltage_sel_regmap);
+
+/**
+ * regulator_set_voltage_sel_regmap - standard set_voltage_sel for regmap users
+ *
+ * @rdev: regulator to operate on
+ * @sel: Selector to set
+ *
+ * Regulators that use regmap for their register I/O can set the
+ * vsel_reg and vsel_mask fields in their descriptor and then use this
+ * as their set_voltage_vsel operation, saving some code.
+ */
+int regulator_set_voltage_sel_regmap(struct regulator_dev *rdev, unsigned sel)
+{
+ int ret;
+
+ sel <<= ffs(rdev->desc->vsel_mask) - 1;
+
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg,
+ rdev->desc->vsel_mask, sel);
+ if (ret)
+ return ret;
+
+ if (rdev->desc->apply_bit)
+ ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg,
+ rdev->desc->apply_bit,
+ rdev->desc->apply_bit);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_set_voltage_sel_regmap);
+
+/**
+ * regulator_map_voltage_iterate - map_voltage() based on list_voltage()
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers implementing set_voltage_sel() and list_voltage() can use
+ * this as their map_voltage() operation. It will find a suitable
+ * voltage by calling list_voltage() until it gets something in bounds
+ * for the requested voltages.
+ */
+int regulator_map_voltage_iterate(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int best_val = INT_MAX;
+ int selector = 0;
+ int i, ret;
+
+ /* Find the smallest voltage that falls within the specified
+ * range.
+ */
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ ret = rdev->desc->ops->list_voltage(rdev, i);
+ if (ret < 0)
+ continue;
+
+ if (ret < best_val && ret >= min_uV && ret <= max_uV) {
+ best_val = ret;
+ selector = i;
+ }
+ }
+
+ if (best_val != INT_MAX)
+ return selector;
+ else
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_iterate);
+
+/**
+ * regulator_map_voltage_ascend - map_voltage() for ascendant voltage list
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers that have ascendant voltage list can use this as their
+ * map_voltage() operation.
+ */
+int regulator_map_voltage_ascend(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int i, ret;
+
+ for (i = 0; i < rdev->desc->n_voltages; i++) {
+ ret = rdev->desc->ops->list_voltage(rdev, i);
+ if (ret < 0)
+ continue;
+
+ if (ret > max_uV)
+ break;
+
+ if (ret >= min_uV && ret <= max_uV)
+ return i;
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_ascend);
+
+/**
+ * regulator_map_voltage_linear - map_voltage() for simple linear mappings
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing min_uV and uV_step in their regulator_desc can
+ * use this as their map_voltage() operation.
+ */
+int regulator_map_voltage_linear(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int ret, voltage;
+
+ /* Allow uV_step to be 0 for fixed voltage */
+ if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) {
+ if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV)
+ return 0;
+ else
+ return -EINVAL;
+ }
+
+ if (!rdev->desc->uV_step) {
+ BUG_ON(!rdev->desc->uV_step);
+ return -EINVAL;
+ }
+
+ if (min_uV < rdev->desc->min_uV)
+ min_uV = rdev->desc->min_uV;
+
+ ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
+ if (ret < 0)
+ return ret;
+
+ ret += rdev->desc->linear_min_sel;
+
+ /* Map back into a voltage to verify we're still in bounds */
+ voltage = rdev->desc->ops->list_voltage(rdev, ret);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear);
+
+/**
+ * regulator_map_voltage_linear - map_voltage() for multiple linear ranges
+ *
+ * @rdev: Regulator to operate on
+ * @min_uV: Lower bound for voltage
+ * @max_uV: Upper bound for voltage
+ *
+ * Drivers providing linear_ranges in their descriptor can use this as
+ * their map_voltage() callback.
+ */
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ const struct regulator_linear_range *range;
+ int ret = -EINVAL;
+ int voltage, i;
+
+ if (!rdev->desc->n_linear_ranges) {
+ BUG_ON(!rdev->desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ range = &rdev->desc->linear_ranges[i];
+
+ if (!(min_uV <= range->max_uV && max_uV >= range->min_uV))
+ continue;
+
+ if (min_uV <= range->min_uV)
+ min_uV = range->min_uV;
+
+ /* range->uV_step == 0 means fixed voltage range */
+ if (range->uV_step == 0) {
+ ret = 0;
+ } else {
+ ret = DIV_ROUND_UP(min_uV - range->min_uV,
+ range->uV_step);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret += range->min_sel;
+
+ break;
+ }
+
+ if (i == rdev->desc->n_linear_ranges)
+ return -EINVAL;
+
+ /* Map back into a voltage to verify we're still in bounds */
+ voltage = rdev->desc->ops->list_voltage(rdev, ret);
+ if (voltage < min_uV || voltage > max_uV)
+ return -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_map_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_linear - List voltages with simple calculation
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a simple linear mapping between voltages and
+ * selectors can set min_uV and uV_step in the regulator descriptor
+ * and then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+ if (selector < rdev->desc->linear_min_sel)
+ return 0;
+
+ selector -= rdev->desc->linear_min_sel;
+
+ return rdev->desc->min_uV + (rdev->desc->uV_step * selector);
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear);
+
+/**
+ * regulator_list_voltage_linear_range - List voltages for linear ranges
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with a series of simple linear mappings between voltages
+ * and selectors can set linear_ranges in the regulator descriptor and
+ * then use this function as their list_voltage() operation,
+ */
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ const struct regulator_linear_range *range;
+ int i;
+
+ if (!rdev->desc->n_linear_ranges) {
+ BUG_ON(!rdev->desc->n_linear_ranges);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < rdev->desc->n_linear_ranges; i++) {
+ range = &rdev->desc->linear_ranges[i];
+
+ if (!(selector >= range->min_sel &&
+ selector <= range->max_sel))
+ continue;
+
+ selector -= range->min_sel;
+
+ return range->min_uV + (range->uV_step * selector);
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_linear_range);
+
+/**
+ * regulator_list_voltage_table - List voltages with table based mapping
+ *
+ * @rdev: Regulator device
+ * @selector: Selector to convert into a voltage
+ *
+ * Regulators with table based mapping between voltages and
+ * selectors can set volt_table in the regulator descriptor
+ * and then use this function as their list_voltage() operation.
+ */
+int regulator_list_voltage_table(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (!rdev->desc->volt_table) {
+ BUG_ON(!rdev->desc->volt_table);
+ return -EINVAL;
+ }
+
+ if (selector >= rdev->desc->n_voltages)
+ return -EINVAL;
+
+ return rdev->desc->volt_table[selector];
+}
+EXPORT_SYMBOL_GPL(regulator_list_voltage_table);
+
+/**
+ * regulator_set_bypass_regmap - Default set_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: state to set.
+ */
+int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable)
+{
+ unsigned int val;
+
+ if (enable)
+ val = rdev->desc->bypass_mask;
+ else
+ val = 0;
+
+ return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg,
+ rdev->desc->bypass_mask, val);
+}
+EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap);
+
+/**
+ * regulator_get_bypass_regmap - Default get_bypass() using regmap
+ *
+ * @rdev: device to operate on.
+ * @enable: current state.
+ */
+int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val);
+ if (ret != 0)
+ return ret;
+
+ *enable = val & rdev->desc->bypass_mask;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index b99c49b9aff..88c1a3acf56 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -110,7 +110,7 @@ static int isl6271a_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct regulator_config config = { };
- struct regulator_init_data *init_data = i2c->dev.platform_data;
+ struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev);
struct isl_pmic *pmic;
int err, i;
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 3809b438160..5a4604ee5ea 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -425,7 +425,7 @@ static int lp3971_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lp3971 *lp3971;
- struct lp3971_platform_data *pdata = i2c->dev.platform_data;
+ struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret;
u16 val;
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index 573024039ca..093e6f44ff8 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -519,7 +519,7 @@ static int lp3972_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lp3972 *lp3972;
- struct lp3972_platform_data *pdata = i2c->dev.platform_data;
+ struct lp3972_platform_data *pdata = dev_get_platdata(&i2c->dev);
int ret;
u16 val;
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index b16336bcd4d..2b84b727a3c 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -373,7 +373,7 @@ static int lp8725_buck_set_current_limit(struct regulator_dev *rdev,
return -EINVAL;
}
- for (i = ARRAY_SIZE(lp8725_buck_uA) - 1 ; i >= 0; i--) {
+ for (i = ARRAY_SIZE(lp8725_buck_uA) - 1; i >= 0; i--) {
if (lp8725_buck_uA[i] >= min_uA &&
lp8725_buck_uA[i] <= max_uA)
return lp872x_update_bits(lp, addr,
@@ -787,7 +787,7 @@ static int lp872x_regulator_register(struct lp872x *lp)
struct regulator_dev *rdev;
int i, ret;
- for (i = 0 ; i < lp->num_regulators ; i++) {
+ for (i = 0; i < lp->num_regulators; i++) {
desc = (lp->chipid == LP8720) ? &lp8720_regulator_desc[i] :
&lp8725_regulator_desc[i];
@@ -820,7 +820,7 @@ static void lp872x_regulator_unregister(struct lp872x *lp)
struct regulator_dev *rdev;
int i;
- for (i = 0 ; i < lp->num_regulators ; i++) {
+ for (i = 0; i < lp->num_regulators; i++) {
rdev = *(lp->regulators + i);
regulator_unregister(rdev);
}
@@ -907,7 +907,8 @@ static struct lp872x_platform_data
goto out;
for (i = 0; i < num_matches; i++) {
- pdata->regulator_data[i].id = (int)match[i].driver_data;
+ pdata->regulator_data[i].id =
+ (enum lp872x_regulator_id)match[i].driver_data;
pdata->regulator_data[i].init_data = match[i].init_data;
/* Operation mode configuration for buck/buck1/buck2 */
@@ -961,7 +962,7 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id)
}
lp->dev = &cl->dev;
- lp->pdata = cl->dev.platform_data;
+ lp->pdata = dev_get_platdata(&cl->dev);
lp->chipid = id->driver_data;
lp->num_regulators = num_regulators;
i2c_set_clientdata(cl, lp);
diff --git a/drivers/regulator/lp8755.c b/drivers/regulator/lp8755.c
index d9e38b4c2ad..785a25e9a43 100644
--- a/drivers/regulator/lp8755.c
+++ b/drivers/regulator/lp8755.c
@@ -228,6 +228,7 @@ err_i2c:
}
static struct regulator_ops lp8755_buck_ops = {
+ .map_voltage = regulator_map_voltage_linear,
.list_voltage = regulator_list_voltage_linear,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
@@ -449,7 +450,7 @@ static int lp8755_probe(struct i2c_client *client,
{
int ret, icnt;
struct lp8755_chip *pchip;
- struct lp8755_platform_data *pdata = client->dev.platform_data;
+ struct lp8755_platform_data *pdata = dev_get_platdata(&client->dev);
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "i2c functionality check fail.\n");
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 54af6101581..3a599ee0a45 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -163,7 +163,7 @@ static int max1586_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
- struct max1586_platform_data *pdata = client->dev.platform_data;
+ struct max1586_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max1586_data *max1586;
int i, id, ret = -ENOMEM;
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index db6c9be10f3..19c6f08eafd 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -152,7 +152,7 @@ static struct regmap_config max8649_regmap_config = {
static int max8649_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct max8649_platform_data *pdata = client->dev.platform_data;
+ struct max8649_platform_data *pdata = dev_get_platdata(&client->dev);
struct max8649_regulator_info *info = NULL;
struct regulator_config config = { };
unsigned int val;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index d428ef9a626..144bcacd734 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -44,6 +44,9 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max8660.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
#define MAX8660_DCDC_MIN_UV 725000
#define MAX8660_DCDC_MAX_UV 1800000
@@ -305,21 +308,105 @@ static const struct regulator_desc max8660_reg[] = {
},
};
+enum {
+ MAX8660 = 0,
+ MAX8661 = 1,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id max8660_dt_ids[] = {
+ { .compatible = "maxim,max8660", .data = (void *) MAX8660 },
+ { .compatible = "maxim,max8661", .data = (void *) MAX8661 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, max8660_dt_ids);
+
+static int max8660_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct max8660_platform_data *pdata)
+{
+ int matched, i;
+ struct device_node *np;
+ struct max8660_subdev_data *sub;
+ struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)];
+
+ np = of_find_node_by_name(dev->of_node, "regulators");
+ if (!np) {
+ dev_err(dev, "missing 'regulators' subnode in DT\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rmatch); i++)
+ rmatch[i].name = max8660_reg[i].name;
+
+ matched = of_regulator_match(dev, np, rmatch, ARRAY_SIZE(rmatch));
+ if (matched <= 0)
+ return matched;
+
+ pdata->subdevs = devm_kzalloc(dev, sizeof(struct max8660_subdev_data) *
+ matched, GFP_KERNEL);
+ if (!pdata->subdevs)
+ return -ENOMEM;
+
+ pdata->num_subdevs = matched;
+ sub = pdata->subdevs;
+
+ for (i = 0; i < matched; i++) {
+ sub->id = i;
+ sub->name = rmatch[i].name;
+ sub->platform_data = rmatch[i].init_data;
+ of_node[i] = rmatch[i].of_node;
+ sub++;
+ }
+
+ return 0;
+}
+#else
+static inline int max8660_pdata_from_dt(struct device *dev,
+ struct device_node **of_node,
+ struct max8660_platform_data *pdata)
+{
+ return 0;
+}
+#endif
+
static int max8660_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
- struct max8660_platform_data *pdata = client->dev.platform_data;
+ struct device *dev = &client->dev;
+ struct max8660_platform_data *pdata = dev_get_platdata(dev);
struct regulator_config config = { };
struct max8660 *max8660;
int boot_on, i, id, ret = -EINVAL;
+ struct device_node *of_node[MAX8660_V_END];
+ unsigned long type;
+
+ if (dev->of_node && !pdata) {
+ const struct of_device_id *id;
+ struct max8660_platform_data pdata_of;
+
+ id = of_match_device(of_match_ptr(max8660_dt_ids), dev);
+ if (!id)
+ return -ENODEV;
+
+ ret = max8660_pdata_from_dt(dev, of_node, &pdata_of);
+ if (ret < 0)
+ return ret;
+
+ pdata = &pdata_of;
+ type = (unsigned long) id->data;
+ } else {
+ type = i2c_id->driver_data;
+ memset(of_node, 0, sizeof(of_node));
+ }
if (pdata->num_subdevs > MAX8660_V_END) {
- dev_err(&client->dev, "Too many regulators found!\n");
+ dev_err(dev, "Too many regulators found!\n");
return -EINVAL;
}
- max8660 = devm_kzalloc(&client->dev, sizeof(struct max8660) +
+ max8660 = devm_kzalloc(dev, sizeof(struct max8660) +
sizeof(struct regulator_dev *) * MAX8660_V_END,
GFP_KERNEL);
if (!max8660)
@@ -376,8 +463,8 @@ static int max8660_probe(struct i2c_client *client,
break;
case MAX8660_V7:
- if (!strcmp(i2c_id->name, "max8661")) {
- dev_err(&client->dev, "Regulator not on this chip!\n");
+ if (type == MAX8661) {
+ dev_err(dev, "Regulator not on this chip!\n");
goto err_out;
}
@@ -386,7 +473,7 @@ static int max8660_probe(struct i2c_client *client,
break;
default:
- dev_err(&client->dev, "invalid regulator %s\n",
+ dev_err(dev, "invalid regulator %s\n",
pdata->subdevs[i].name);
goto err_out;
}
@@ -397,14 +484,15 @@ static int max8660_probe(struct i2c_client *client,
id = pdata->subdevs[i].id;
- config.dev = &client->dev;
+ config.dev = dev;
config.init_data = pdata->subdevs[i].platform_data;
+ config.of_node = of_node[i];
config.driver_data = max8660;
rdev[i] = regulator_register(&max8660_reg[id], &config);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
- dev_err(&client->dev, "failed to register %s\n",
+ dev_err(dev, "failed to register %s\n",
max8660_reg[id].name);
goto err_unregister;
}
@@ -431,8 +519,8 @@ static int max8660_remove(struct i2c_client *client)
}
static const struct i2c_device_id max8660_id[] = {
- { "max8660", 0 },
- { "max8661", 0 },
+ { .name = "max8660", .driver_data = MAX8660 },
+ { .name = "max8661", .driver_data = MAX8661 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max8660_id);
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index e6d54a546d3..d80b5fa758a 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -277,7 +277,7 @@ static int max8925_regulator_dt_init(struct platform_device *pdev,
static int max8925_regulator_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct regulator_init_data *pdata = pdev->dev.platform_data;
+ struct regulator_init_data *pdata = dev_get_platdata(&pdev->dev);
struct regulator_config config = { };
struct max8925_regulator_info *ri;
struct resource *res;
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 5259c2fea90..788e5ae2af1 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -196,7 +196,7 @@ static int max8952_pmic_probe(struct i2c_client *client,
const struct i2c_device_id *i2c_id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
- struct max8952_platform_data *pdata = client->dev.platform_data;
+ struct max8952_platform_data *pdata = dev_get_platdata(&client->dev);
struct regulator_config config = { };
struct max8952_data *max8952;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 0c5195a842e..5b77ab7762e 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -371,7 +371,7 @@ static int max8973_probe(struct i2c_client *client,
struct max8973_chip *max;
int ret;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata && !client->dev.of_node) {
dev_err(&client->dev, "No Platform data");
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index f3c8f8f9dc3..7827384680d 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -21,6 +21,7 @@ static void of_get_regulation_constraints(struct device_node *np,
{
const __be32 *min_uV, *max_uV, *uV_offset;
const __be32 *min_uA, *max_uA, *ramp_delay;
+ struct property *prop;
struct regulation_constraints *constraints = &(*init_data)->constraints;
constraints->name = of_get_property(np, "regulator-name", NULL);
@@ -64,9 +65,14 @@ static void of_get_regulation_constraints(struct device_node *np,
if (of_property_read_bool(np, "regulator-allow-bypass"))
constraints->valid_ops_mask |= REGULATOR_CHANGE_BYPASS;
- ramp_delay = of_get_property(np, "regulator-ramp-delay", NULL);
- if (ramp_delay)
- constraints->ramp_delay = be32_to_cpu(*ramp_delay);
+ prop = of_find_property(np, "regulator-ramp-delay", NULL);
+ if (prop && prop->value) {
+ ramp_delay = prop->value;
+ if (*ramp_delay)
+ constraints->ramp_delay = be32_to_cpu(*ramp_delay);
+ else
+ constraints->ramp_disable = true;
+ }
}
/**
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index d0c87856dd2..488dfe7ce9a 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -97,11 +97,16 @@ static const struct regs_info palmas_regs_info[] = {
.ctrl_addr = PALMAS_SMPS9_CTRL,
},
{
- .name = "SMPS10",
+ .name = "SMPS10_OUT2",
.sname = "smps10-in",
.ctrl_addr = PALMAS_SMPS10_CTRL,
},
{
+ .name = "SMPS10_OUT1",
+ .sname = "smps10-out2",
+ .ctrl_addr = PALMAS_SMPS10_CTRL,
+ },
+ {
.name = "LDO1",
.sname = "ldo1-in",
.vsel_addr = PALMAS_LDO1_VOLTAGE,
@@ -487,6 +492,8 @@ static struct regulator_ops palmas_ops_smps10 = {
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
+ .set_bypass = regulator_set_bypass_regmap,
+ .get_bypass = regulator_get_bypass_regmap,
};
static int palmas_is_enabled_ldo(struct regulator_dev *dev)
@@ -538,7 +545,8 @@ static int palmas_smps_init(struct palmas *palmas, int id,
return ret;
switch (id) {
- case PALMAS_REG_SMPS10:
+ case PALMAS_REG_SMPS10_OUT1:
+ case PALMAS_REG_SMPS10_OUT2:
reg &= ~PALMAS_SMPS10_CTRL_MODE_SLEEP_MASK;
if (reg_init->mode_sleep)
reg |= reg_init->mode_sleep <<
@@ -681,7 +689,8 @@ static struct of_regulator_match palmas_matches[] = {
{ .name = "smps7", },
{ .name = "smps8", },
{ .name = "smps9", },
- { .name = "smps10", },
+ { .name = "smps10_out2", },
+ { .name = "smps10_out1", },
{ .name = "ldo1", },
{ .name = "ldo2", },
{ .name = "ldo3", },
@@ -765,7 +774,7 @@ static void palmas_dt_to_pdata(struct device *dev,
static int palmas_regulators_probe(struct platform_device *pdev)
{
struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
- struct palmas_pmic_platform_data *pdata = pdev->dev.platform_data;
+ struct palmas_pmic_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct regulator_dev *rdev;
struct regulator_config config = { };
@@ -838,7 +847,8 @@ static int palmas_regulators_probe(struct platform_device *pdev)
continue;
ramp_delay_support = true;
break;
- case PALMAS_REG_SMPS10:
+ case PALMAS_REG_SMPS10_OUT1:
+ case PALMAS_REG_SMPS10_OUT2:
if (!PALMAS_PMIC_HAS(palmas, SMPS10_BOOST))
continue;
}
@@ -872,7 +882,8 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].id = id;
switch (id) {
- case PALMAS_REG_SMPS10:
+ case PALMAS_REG_SMPS10_OUT1:
+ case PALMAS_REG_SMPS10_OUT2:
pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES;
pmic->desc[id].ops = &palmas_ops_smps10;
pmic->desc[id].vsel_reg =
@@ -882,7 +893,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
pmic->desc[id].enable_reg =
PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
PALMAS_SMPS10_CTRL);
- pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+ if (id == PALMAS_REG_SMPS10_OUT1)
+ pmic->desc[id].enable_mask = SMPS10_SWITCH_EN;
+ else
+ pmic->desc[id].enable_mask = SMPS10_BOOST_EN;
+ pmic->desc[id].bypass_reg =
+ PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+ PALMAS_SMPS10_CTRL);
+ pmic->desc[id].bypass_mask = SMPS10_BYPASS_EN;
pmic->desc[id].min_uV = 3750000;
pmic->desc[id].uV_step = 1250000;
break;
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 1a73a297fe7..b49eaeedea8 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -243,7 +243,7 @@ static int pcap_regulator_probe(struct platform_device *pdev)
struct regulator_config config = { };
config.dev = &pdev->dev;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcap;
rdev = regulator_register(&pcap_regulators[pdev->id], &config);
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 54df9f7cb50..0f3576d48ab 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -86,7 +86,7 @@ static int pcf50633_regulator_probe(struct platform_device *pdev)
pcf = dev_to_pcf50633(pdev->dev.parent);
config.dev = &pdev->dev;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = pcf;
config.regmap = pcf->regmap;
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
new file mode 100644
index 00000000000..ba67b2c4e2e
--- /dev/null
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -0,0 +1,445 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/pfuze100.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/regmap.h>
+
+#define PFUZE_NUMREGS 128
+#define PFUZE100_VOL_OFFSET 0
+#define PFUZE100_STANDBY_OFFSET 1
+#define PFUZE100_MODE_OFFSET 3
+#define PFUZE100_CONF_OFFSET 4
+
+#define PFUZE100_DEVICEID 0x0
+#define PFUZE100_REVID 0x3
+#define PFUZE100_FABID 0x3
+
+#define PFUZE100_SW1ABVOL 0x20
+#define PFUZE100_SW1CVOL 0x2e
+#define PFUZE100_SW2VOL 0x35
+#define PFUZE100_SW3AVOL 0x3c
+#define PFUZE100_SW3BVOL 0x43
+#define PFUZE100_SW4VOL 0x4a
+#define PFUZE100_SWBSTCON1 0x66
+#define PFUZE100_VREFDDRCON 0x6a
+#define PFUZE100_VSNVSVOL 0x6b
+#define PFUZE100_VGEN1VOL 0x6c
+#define PFUZE100_VGEN2VOL 0x6d
+#define PFUZE100_VGEN3VOL 0x6e
+#define PFUZE100_VGEN4VOL 0x6f
+#define PFUZE100_VGEN5VOL 0x70
+#define PFUZE100_VGEN6VOL 0x71
+
+struct pfuze_regulator {
+ struct regulator_desc desc;
+ unsigned char stby_reg;
+ unsigned char stby_mask;
+};
+
+struct pfuze_chip {
+ struct regmap *regmap;
+ struct device *dev;
+ struct pfuze_regulator regulator_descs[PFUZE100_MAX_REGULATOR];
+ struct regulator_dev *regulators[PFUZE100_MAX_REGULATOR];
+};
+
+static const int pfuze100_swbst[] = {
+ 5000000, 5050000, 5100000, 5150000,
+};
+
+static const int pfuze100_vsnvs[] = {
+ 1000000, 1100000, 1200000, 1300000, 1500000, 1800000, 3000000,
+};
+
+static const struct i2c_device_id pfuze_device_id[] = {
+ {.name = "pfuze100"},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, pfuze_device_id);
+
+static const struct of_device_id pfuze_dt_ids[] = {
+ { .compatible = "fsl,pfuze100" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pfuze_dt_ids);
+
+static int pfuze100_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct pfuze_chip *pfuze100 = rdev_get_drvdata(rdev);
+ int id = rdev->desc->id;
+ unsigned int ramp_bits;
+ int ret;
+
+ if (id < PFUZE100_SWBST) {
+ ramp_delay = 12500 / ramp_delay;
+ ramp_bits = (ramp_delay >> 1) - (ramp_delay >> 3);
+ ret = regmap_update_bits(pfuze100->regmap,
+ rdev->desc->vsel_reg + 4,
+ 0xc0, ramp_bits << 6);
+ if (ret < 0)
+ dev_err(pfuze100->dev, "ramp failed, err %d\n", ret);
+ } else
+ ret = -EACCES;
+
+ return ret;
+}
+
+static struct regulator_ops pfuze100_ldo_regulator_ops = {
+ .enable = regulator_enable_regmap,
+ .disable = regulator_disable_regmap,
+ .is_enabled = regulator_is_enabled_regmap,
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+};
+
+static struct regulator_ops pfuze100_fixed_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+};
+
+static struct regulator_ops pfuze100_sw_regulator_ops = {
+ .list_voltage = regulator_list_voltage_linear,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+ .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_ramp_delay = pfuze100_set_ramp_delay,
+};
+
+static struct regulator_ops pfuze100_swb_regulator_ops = {
+ .list_voltage = regulator_list_voltage_table,
+ .map_voltage = regulator_map_voltage_ascend,
+ .set_voltage_sel = regulator_set_voltage_sel_regmap,
+ .get_voltage_sel = regulator_get_voltage_sel_regmap,
+
+};
+
+#define PFUZE100_FIXED_REG(_name, base, voltage) \
+ [PFUZE100_ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = 1, \
+ .ops = &pfuze100_fixed_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PFUZE100_ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (voltage), \
+ .enable_reg = (base), \
+ .enable_mask = 0x10, \
+ }, \
+ }
+
+#define PFUZE100_SW_REG(_name, base, min, max, step) \
+ [PFUZE100_ ## _name] = { \
+ .desc = { \
+ .name = #_name,\
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &pfuze100_sw_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PFUZE100_ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .vsel_reg = (base) + PFUZE100_VOL_OFFSET, \
+ .vsel_mask = 0x3f, \
+ }, \
+ .stby_reg = (base) + PFUZE100_STANDBY_OFFSET, \
+ .stby_mask = 0x3f, \
+ }
+
+#define PFUZE100_SWB_REG(_name, base, mask, voltages) \
+ [PFUZE100_ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ARRAY_SIZE(voltages), \
+ .ops = &pfuze100_swb_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PFUZE100_ ## _name, \
+ .owner = THIS_MODULE, \
+ .volt_table = voltages, \
+ .vsel_reg = (base), \
+ .vsel_mask = (mask), \
+ }, \
+ }
+
+#define PFUZE100_VGEN_REG(_name, base, min, max, step) \
+ [PFUZE100_ ## _name] = { \
+ .desc = { \
+ .name = #_name, \
+ .n_voltages = ((max) - (min)) / (step) + 1, \
+ .ops = &pfuze100_ldo_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = PFUZE100_ ## _name, \
+ .owner = THIS_MODULE, \
+ .min_uV = (min), \
+ .uV_step = (step), \
+ .vsel_reg = (base), \
+ .vsel_mask = 0xf, \
+ .enable_reg = (base), \
+ .enable_mask = 0x10, \
+ }, \
+ .stby_reg = (base), \
+ .stby_mask = 0x20, \
+ }
+
+static struct pfuze_regulator pfuze100_regulators[] = {
+ PFUZE100_SW_REG(SW1AB, PFUZE100_SW1ABVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(SW1C, PFUZE100_SW1CVOL, 300000, 1875000, 25000),
+ PFUZE100_SW_REG(SW2, PFUZE100_SW2VOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(SW3A, PFUZE100_SW3AVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(SW3B, PFUZE100_SW3BVOL, 400000, 1975000, 25000),
+ PFUZE100_SW_REG(SW4, PFUZE100_SW4VOL, 400000, 1975000, 25000),
+ PFUZE100_SWB_REG(SWBST, PFUZE100_SWBSTCON1, 0x3 , pfuze100_swbst),
+ PFUZE100_SWB_REG(VSNVS, PFUZE100_VSNVSVOL, 0x7, pfuze100_vsnvs),
+ PFUZE100_FIXED_REG(VREFDDR, PFUZE100_VREFDDRCON, 750000),
+ PFUZE100_VGEN_REG(VGEN1, PFUZE100_VGEN1VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(VGEN2, PFUZE100_VGEN2VOL, 800000, 1550000, 50000),
+ PFUZE100_VGEN_REG(VGEN3, PFUZE100_VGEN3VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(VGEN4, PFUZE100_VGEN4VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(VGEN5, PFUZE100_VGEN5VOL, 1800000, 3300000, 100000),
+ PFUZE100_VGEN_REG(VGEN6, PFUZE100_VGEN6VOL, 1800000, 3300000, 100000),
+};
+
+#ifdef CONFIG_OF
+static struct of_regulator_match pfuze100_matches[] = {
+ { .name = "sw1ab", },
+ { .name = "sw1c", },
+ { .name = "sw2", },
+ { .name = "sw3a", },
+ { .name = "sw3b", },
+ { .name = "sw4", },
+ { .name = "swbst", },
+ { .name = "vsnvs", },
+ { .name = "vrefddr", },
+ { .name = "vgen1", },
+ { .name = "vgen2", },
+ { .name = "vgen3", },
+ { .name = "vgen4", },
+ { .name = "vgen5", },
+ { .name = "vgen6", },
+};
+
+static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
+{
+ struct device *dev = chip->dev;
+ struct device_node *np, *parent;
+ int ret;
+
+ np = of_node_get(dev->parent->of_node);
+ if (!np)
+ return 0;
+
+ parent = of_find_node_by_name(np, "regulators");
+ if (!parent) {
+ dev_err(dev, "regulators node not found\n");
+ return -EINVAL;
+ }
+
+ ret = of_regulator_match(dev, parent, pfuze100_matches,
+ ARRAY_SIZE(pfuze100_matches));
+
+ of_node_put(parent);
+ if (ret < 0) {
+ dev_err(dev, "Error parsing regulator init data: %d\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return pfuze100_matches[index].init_data;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return pfuze100_matches[index].of_node;
+}
+#else
+static int pfuze_parse_regulators_dt(struct pfuze_chip *chip)
+{
+ return 0;
+}
+
+static inline struct regulator_init_data *match_init_data(int index)
+{
+ return NULL;
+}
+
+static inline struct device_node *match_of_node(int index)
+{
+ return NULL;
+}
+#endif
+
+static int pfuze_identify(struct pfuze_chip *pfuze_chip)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(pfuze_chip->regmap, PFUZE100_DEVICEID, &value);
+ if (ret)
+ return ret;
+
+ if (value & 0x0f) {
+ dev_warn(pfuze_chip->dev, "Illegal ID: %x\n", value);
+ return -ENODEV;
+ }
+
+ ret = regmap_read(pfuze_chip->regmap, PFUZE100_REVID, &value);
+ if (ret)
+ return ret;
+ dev_info(pfuze_chip->dev,
+ "Full lay: %x, Metal lay: %x\n",
+ (value & 0xf0) >> 4, value & 0x0f);
+
+ ret = regmap_read(pfuze_chip->regmap, PFUZE100_FABID, &value);
+ if (ret)
+ return ret;
+ dev_info(pfuze_chip->dev, "FAB: %x, FIN: %x\n",
+ (value & 0xc) >> 2, value & 0x3);
+
+ return 0;
+}
+
+static const struct regmap_config pfuze_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = PFUZE_NUMREGS - 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static int pfuze100_regulator_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct pfuze_chip *pfuze_chip;
+ struct pfuze_regulator_platform_data *pdata =
+ dev_get_platdata(&client->dev);
+ struct regulator_config config = { };
+ int i, ret;
+
+ pfuze_chip = devm_kzalloc(&client->dev, sizeof(*pfuze_chip),
+ GFP_KERNEL);
+ if (!pfuze_chip)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, pfuze_chip);
+
+ memcpy(pfuze_chip->regulator_descs, pfuze100_regulators,
+ sizeof(pfuze_chip->regulator_descs));
+
+ pfuze_chip->dev = &client->dev;
+
+ pfuze_chip->regmap = devm_regmap_init_i2c(client, &pfuze_regmap_config);
+ if (IS_ERR(pfuze_chip->regmap)) {
+ ret = PTR_ERR(pfuze_chip->regmap);
+ dev_err(&client->dev,
+ "regmap allocation failed with err %d\n", ret);
+ return ret;
+ }
+
+ ret = pfuze_identify(pfuze_chip);
+ if (ret) {
+ dev_err(&client->dev, "unrecognized pfuze chip ID!\n");
+ return ret;
+ }
+
+ ret = pfuze_parse_regulators_dt(pfuze_chip);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PFUZE100_MAX_REGULATOR; i++) {
+ struct regulator_init_data *init_data;
+ struct regulator_desc *desc;
+ int val;
+
+ desc = &pfuze_chip->regulator_descs[i].desc;
+
+ if (pdata)
+ init_data = pdata->init_data[i];
+ else
+ init_data = match_init_data(i);
+
+ /* SW2~SW4 high bit check and modify the voltage value table */
+ if (i > PFUZE100_SW1C && i < PFUZE100_SWBST) {
+ regmap_read(pfuze_chip->regmap, desc->vsel_reg, &val);
+ if (val & 0x40) {
+ desc->min_uV = 800000;
+ desc->uV_step = 50000;
+ desc->n_voltages = 51;
+ }
+ }
+
+ config.dev = &client->dev;
+ config.init_data = init_data;
+ config.driver_data = pfuze_chip;
+ config.of_node = match_of_node(i);
+
+ pfuze_chip->regulators[i] = regulator_register(desc, &config);
+ if (IS_ERR(pfuze_chip->regulators[i])) {
+ dev_err(&client->dev, "register regulator%s failed\n",
+ pfuze100_regulators[i].desc.name);
+ ret = PTR_ERR(pfuze_chip->regulators[i]);
+ while (--i >= 0)
+ regulator_unregister(pfuze_chip->regulators[i]);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int pfuze100_regulator_remove(struct i2c_client *client)
+{
+ int i;
+ struct pfuze_chip *pfuze_chip = i2c_get_clientdata(client);
+
+ for (i = 0; i < PFUZE100_MAX_REGULATOR; i++)
+ regulator_unregister(pfuze_chip->regulators[i]);
+
+ return 0;
+}
+
+static struct i2c_driver pfuze_driver = {
+ .id_table = pfuze_device_id,
+ .driver = {
+ .name = "pfuze100-regulator",
+ .owner = THIS_MODULE,
+ .of_match_table = pfuze_dt_ids,
+ },
+ .probe = pfuze100_regulator_probe,
+ .remove = pfuze100_regulator_remove,
+};
+module_i2c_driver(pfuze_driver);
+
+MODULE_AUTHOR("Robin Gong <b38343@freescale.com>");
+MODULE_DESCRIPTION("Regulator Driver for Freescale PFUZE100 PMIC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("i2c:pfuze100-regulator");
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 2f62564ca93..5eba2ff8c0e 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -16,12 +16,17 @@
#include <linux/gpio.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/mfd/samsung/core.h>
#include <linux/mfd/samsung/s2mps11.h>
+#define S2MPS11_REGULATOR_CNT ARRAY_SIZE(regulators)
+
struct s2mps11_info {
struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX];
@@ -31,11 +36,6 @@ struct s2mps11_info {
int ramp_delay16;
int ramp_delay7810;
int ramp_delay9;
-
- bool buck6_ramp;
- bool buck2_ramp;
- bool buck3_ramp;
- bool buck4_ramp;
};
static int get_ramp_delay(int ramp_delay)
@@ -50,9 +50,171 @@ static int get_ramp_delay(int ramp_delay)
break;
cnt++;
}
+
+ if (cnt > 3)
+ cnt = 3;
+
return cnt;
}
+static int s2mps11_regulator_set_voltage_time_sel(struct regulator_dev *rdev,
+ unsigned int old_selector,
+ unsigned int new_selector)
+{
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ unsigned int ramp_delay = 0;
+ int old_volt, new_volt;
+
+ switch (rdev->desc->id) {
+ case S2MPS11_BUCK2:
+ ramp_delay = s2mps11->ramp_delay2;
+ break;
+ case S2MPS11_BUCK3:
+ ramp_delay = s2mps11->ramp_delay34;
+ break;
+ case S2MPS11_BUCK4:
+ ramp_delay = s2mps11->ramp_delay34;
+ break;
+ case S2MPS11_BUCK5:
+ ramp_delay = s2mps11->ramp_delay5;
+ break;
+ case S2MPS11_BUCK6:
+ case S2MPS11_BUCK1:
+ ramp_delay = s2mps11->ramp_delay16;
+ break;
+ case S2MPS11_BUCK7:
+ case S2MPS11_BUCK8:
+ case S2MPS11_BUCK10:
+ ramp_delay = s2mps11->ramp_delay7810;
+ break;
+ case S2MPS11_BUCK9:
+ ramp_delay = s2mps11->ramp_delay9;
+ }
+
+ if (ramp_delay == 0)
+ ramp_delay = rdev->desc->ramp_delay;
+
+ old_volt = rdev->desc->min_uV + (rdev->desc->uV_step * old_selector);
+ new_volt = rdev->desc->min_uV + (rdev->desc->uV_step * new_selector);
+
+ return DIV_ROUND_UP(abs(new_volt - old_volt), ramp_delay);
+}
+
+static int s2mps11_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay)
+{
+ struct s2mps11_info *s2mps11 = rdev_get_drvdata(rdev);
+ unsigned int ramp_val, ramp_shift, ramp_reg = S2MPS11_REG_RAMP_BUCK;
+ unsigned int ramp_enable = 1, enable_shift = 0;
+ int ret;
+
+ switch (rdev->desc->id) {
+ case S2MPS11_BUCK1:
+ if (ramp_delay > s2mps11->ramp_delay16)
+ s2mps11->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mps11->ramp_delay16;
+
+ ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
+ break;
+ case S2MPS11_BUCK2:
+ enable_shift = S2MPS11_BUCK2_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ s2mps11->ramp_delay2 = ramp_delay;
+ ramp_shift = S2MPS11_BUCK2_RAMP_SHIFT;
+ ramp_reg = S2MPS11_REG_RAMP;
+ break;
+ case S2MPS11_BUCK3:
+ enable_shift = S2MPS11_BUCK3_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mps11->ramp_delay34)
+ s2mps11->ramp_delay34 = ramp_delay;
+ else
+ ramp_delay = s2mps11->ramp_delay34;
+
+ ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
+ ramp_reg = S2MPS11_REG_RAMP;
+ break;
+ case S2MPS11_BUCK4:
+ enable_shift = S2MPS11_BUCK4_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mps11->ramp_delay34)
+ s2mps11->ramp_delay34 = ramp_delay;
+ else
+ ramp_delay = s2mps11->ramp_delay34;
+
+ ramp_shift = S2MPS11_BUCK34_RAMP_SHIFT;
+ ramp_reg = S2MPS11_REG_RAMP;
+ break;
+ case S2MPS11_BUCK5:
+ s2mps11->ramp_delay5 = ramp_delay;
+ ramp_shift = S2MPS11_BUCK5_RAMP_SHIFT;
+ break;
+ case S2MPS11_BUCK6:
+ enable_shift = S2MPS11_BUCK6_RAMP_EN_SHIFT;
+ if (!ramp_delay) {
+ ramp_enable = 0;
+ break;
+ }
+
+ if (ramp_delay > s2mps11->ramp_delay16)
+ s2mps11->ramp_delay16 = ramp_delay;
+ else
+ ramp_delay = s2mps11->ramp_delay16;
+
+ ramp_shift = S2MPS11_BUCK16_RAMP_SHIFT;
+ break;
+ case S2MPS11_BUCK7:
+ case S2MPS11_BUCK8:
+ case S2MPS11_BUCK10:
+ if (ramp_delay > s2mps11->ramp_delay7810)
+ s2mps11->ramp_delay7810 = ramp_delay;
+ else
+ ramp_delay = s2mps11->ramp_delay7810;
+
+ ramp_shift = S2MPS11_BUCK7810_RAMP_SHIFT;
+ break;
+ case S2MPS11_BUCK9:
+ s2mps11->ramp_delay9 = ramp_delay;
+ ramp_shift = S2MPS11_BUCK9_RAMP_SHIFT;
+ break;
+ default:
+ return 0;
+ }
+
+ if (!ramp_enable)
+ goto ramp_disable;
+
+ if (enable_shift) {
+ ret = regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+ 1 << enable_shift, 1 << enable_shift);
+ if (ret) {
+ dev_err(&rdev->dev, "failed to enable ramp rate\n");
+ return ret;
+ }
+ }
+
+ ramp_val = get_ramp_delay(ramp_delay);
+
+ return regmap_update_bits(rdev->regmap, ramp_reg, 0x3 << ramp_shift,
+ ramp_val << ramp_shift);
+
+ramp_disable:
+ return regmap_update_bits(rdev->regmap, S2MPS11_REG_RAMP,
+ 1 << enable_shift, 0);
+}
+
static struct regulator_ops s2mps11_ldo_ops = {
.list_voltage = regulator_list_voltage_linear,
.map_voltage = regulator_map_voltage_linear,
@@ -72,7 +234,8 @@ static struct regulator_ops s2mps11_buck_ops = {
.disable = regulator_disable_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .set_voltage_time_sel = regulator_set_voltage_time_sel,
+ .set_voltage_time_sel = s2mps11_regulator_set_voltage_time_sel,
+ .set_ramp_delay = s2mps11_set_ramp_delay,
};
#define regulator_desc_ldo1(num) { \
@@ -239,59 +402,51 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
{
struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct of_regulator_match rdata[S2MPS11_REGULATOR_MAX];
+ struct device_node *reg_np = NULL;
struct regulator_config config = { };
struct s2mps11_info *s2mps11;
int i, ret;
- unsigned char ramp_enable, ramp_reg = 0;
-
- if (!pdata) {
- dev_err(pdev->dev.parent, "Platform data not supplied\n");
- return -ENODEV;
- }
s2mps11 = devm_kzalloc(&pdev->dev, sizeof(struct s2mps11_info),
GFP_KERNEL);
if (!s2mps11)
return -ENOMEM;
- platform_set_drvdata(pdev, s2mps11);
+ if (!iodev->dev->of_node) {
+ if (pdata) {
+ goto common_reg;
+ } else {
+ dev_err(pdev->dev.parent,
+ "Platform data or DT node not supplied\n");
+ return -ENODEV;
+ }
+ }
- s2mps11->ramp_delay2 = pdata->buck2_ramp_delay;
- s2mps11->ramp_delay34 = pdata->buck34_ramp_delay;
- s2mps11->ramp_delay5 = pdata->buck5_ramp_delay;
- s2mps11->ramp_delay16 = pdata->buck16_ramp_delay;
- s2mps11->ramp_delay7810 = pdata->buck7810_ramp_delay;
- s2mps11->ramp_delay9 = pdata->buck9_ramp_delay;
-
- s2mps11->buck6_ramp = pdata->buck6_ramp_enable;
- s2mps11->buck2_ramp = pdata->buck2_ramp_enable;
- s2mps11->buck3_ramp = pdata->buck3_ramp_enable;
- s2mps11->buck4_ramp = pdata->buck4_ramp_enable;
-
- ramp_enable = (s2mps11->buck2_ramp << 3) | (s2mps11->buck3_ramp << 2) |
- (s2mps11->buck4_ramp << 1) | s2mps11->buck6_ramp ;
-
- if (ramp_enable) {
- if (s2mps11->buck2_ramp)
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay2) << 6;
- if (s2mps11->buck3_ramp || s2mps11->buck4_ramp)
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay34) << 4;
- sec_reg_write(iodev, S2MPS11_REG_RAMP, ramp_reg | ramp_enable);
+ for (i = 0; i < S2MPS11_REGULATOR_CNT; i++)
+ rdata[i].name = regulators[i].name;
+
+ reg_np = of_find_node_by_name(iodev->dev->of_node, "regulators");
+ if (!reg_np) {
+ dev_err(&pdev->dev, "could not find regulators sub-node\n");
+ return -EINVAL;
}
- ramp_reg &= 0x00;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay5) << 6;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay16) << 4;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay7810) << 2;
- ramp_reg |= get_ramp_delay(s2mps11->ramp_delay9);
- sec_reg_write(iodev, S2MPS11_REG_RAMP_BUCK, ramp_reg);
+ of_regulator_match(&pdev->dev, reg_np, rdata, S2MPS11_REGULATOR_MAX);
- for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+common_reg:
+ platform_set_drvdata(pdev, s2mps11);
- config.dev = &pdev->dev;
- config.regmap = iodev->regmap;
- config.init_data = pdata->regulators[i].initdata;
- config.driver_data = s2mps11;
+ config.dev = &pdev->dev;
+ config.regmap = iodev->regmap;
+ config.driver_data = s2mps11;
+ for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
+ if (!reg_np) {
+ config.init_data = pdata->regulators[i].initdata;
+ } else {
+ config.init_data = rdata[i].init_data;
+ config.of_node = rdata[i].of_node;
+ }
s2mps11->rdev[i] = regulator_register(&regulators[i], &config);
if (IS_ERR(s2mps11->rdev[i])) {
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 3753ed05e71..d8e3e1262bc 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -717,11 +717,6 @@ static int ti_abb_probe(struct platform_device *pdev)
/* Map ABB resources */
pname = "base-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- if (!res) {
- dev_err(dev, "Missing '%s' IO resource\n", pname);
- ret = -ENODEV;
- goto err;
- }
abb->base = devm_ioremap_resource(dev, res);
if (IS_ERR(abb->base)) {
ret = PTR_ERR(abb->base);
@@ -770,11 +765,6 @@ static int ti_abb_probe(struct platform_device *pdev)
pname = "ldo-address";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- if (!res) {
- dev_dbg(dev, "Missing '%s' IO resource\n", pname);
- ret = -ENODEV;
- goto skip_opt;
- }
abb->ldo_base = devm_ioremap_resource(dev, res);
if (IS_ERR(abb->ldo_base)) {
ret = PTR_ERR(abb->ldo_base);
diff --git a/drivers/regulator/tps51632-regulator.c b/drivers/regulator/tps51632-regulator.c
index 6e67be75ea1..9392a7ca3d2 100644
--- a/drivers/regulator/tps51632-regulator.c
+++ b/drivers/regulator/tps51632-regulator.c
@@ -275,7 +275,7 @@ static int tps51632_probe(struct i2c_client *client,
}
}
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (!pdata && client->dev.of_node)
pdata = of_get_tps51632_platform_data(&client->dev);
if (!pdata) {
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index a490d5b749b..0b7ebb1ebf8 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -350,7 +350,7 @@ static int tps62360_probe(struct i2c_client *client,
int i;
int chip_id;
- pdata = client->dev.platform_data;
+ pdata = dev_get_platdata(&client->dev);
if (client->dev.of_node) {
const struct of_device_id *match;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 9d053e23e9e..a15263d4bdf 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -218,7 +218,7 @@ static int tps_65023_probe(struct i2c_client *client,
* init_data points to array of regulator_init structures
* coming from the board-evm file.
*/
- init_data = client->dev.platform_data;
+ init_data = dev_get_platdata(&client->dev);
if (!init_data)
return -EIO;
diff --git a/drivers/regulator/tps65217-regulator.c b/drivers/regulator/tps65217-regulator.c
index 2df4616621f..90861d68a0b 100644
--- a/drivers/regulator/tps65217-regulator.c
+++ b/drivers/regulator/tps65217-regulator.c
@@ -27,7 +27,7 @@
#include <linux/regulator/machine.h>
#include <linux/mfd/tps65217.h>
-#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t) \
+#define TPS65217_REGULATOR(_name, _id, _ops, _n, _vr, _vm, _em, _t, _lr, _nlr) \
{ \
.name = _name, \
.id = _id, \
@@ -40,17 +40,10 @@
.enable_reg = TPS65217_REG_ENABLE, \
.enable_mask = _em, \
.volt_table = _t, \
+ .linear_ranges = _lr, \
+ .n_linear_ranges = _nlr, \
} \
-#define TPS65217_INFO(_nm, _min, _max, _f1, _f2) \
- { \
- .name = _nm, \
- .min_uV = _min, \
- .max_uV = _max, \
- .vsel_to_uv = _f1, \
- .uv_to_vsel = _f2, \
- }
-
static const unsigned int LDO1_VSEL_table[] = {
1000000, 1100000, 1200000, 1250000,
1300000, 1350000, 1400000, 1500000,
@@ -58,88 +51,26 @@ static const unsigned int LDO1_VSEL_table[] = {
2800000, 3000000, 3100000, 3300000,
};
-static int tps65217_vsel_to_uv1(unsigned int vsel)
-{
- int uV = 0;
-
- if (vsel > 63)
- return -EINVAL;
-
- if (vsel <= 24)
- uV = vsel * 25000 + 900000;
- else if (vsel <= 52)
- uV = (vsel - 24) * 50000 + 1500000;
- else if (vsel < 56)
- uV = (vsel - 52) * 100000 + 2900000;
- else
- uV = 3300000;
-
- return uV;
-}
-
-static int tps65217_uv_to_vsel1(int uV, unsigned int *vsel)
-{
- if (uV < 0 || uV > 3300000)
- return -EINVAL;
-
- if (uV <= 1500000)
- *vsel = DIV_ROUND_UP(uV - 900000, 25000);
- else if (uV <= 2900000)
- *vsel = 24 + DIV_ROUND_UP(uV - 1500000, 50000);
- else if (uV < 3300000)
- *vsel = 52 + DIV_ROUND_UP(uV - 2900000, 100000);
- else
- *vsel = 56;
-
- return 0;
-}
-
-static int tps65217_vsel_to_uv2(unsigned int vsel)
-{
- int uV = 0;
-
- if (vsel > 31)
- return -EINVAL;
-
- if (vsel <= 8)
- uV = vsel * 50000 + 1500000;
- else if (vsel <= 13)
- uV = (vsel - 8) * 100000 + 1900000;
- else
- uV = (vsel - 13) * 50000 + 2400000;
-
- return uV;
-}
-
-static int tps65217_uv_to_vsel2(int uV, unsigned int *vsel)
-{
- if (uV < 0 || uV > 3300000)
- return -EINVAL;
-
- if (uV <= 1900000)
- *vsel = DIV_ROUND_UP(uV - 1500000, 50000);
- else if (uV <= 2400000)
- *vsel = 8 + DIV_ROUND_UP(uV - 1900000, 100000);
- else
- *vsel = 13 + DIV_ROUND_UP(uV - 2400000, 50000);
-
- return 0;
-}
+static const struct regulator_linear_range tps65217_uv1_ranges[] = {
+ { .min_uV = 900000, .max_uV = 1500000, .min_sel = 0, .max_sel = 24,
+ .uV_step = 25000 },
+ { .min_uV = 1550000, .max_uV = 1800000, .min_sel = 25, .max_sel = 30,
+ .uV_step = 50000 },
+ { .min_uV = 1850000, .max_uV = 2900000, .min_sel = 31, .max_sel = 52,
+ .uV_step = 50000 },
+ { .min_uV = 3000000, .max_uV = 3200000, .min_sel = 53, .max_sel = 55,
+ .uV_step = 100000 },
+ { .min_uV = 3300000, .max_uV = 3300000, .min_sel = 56, .max_sel = 62,
+ .uV_step = 0 },
+};
-static struct tps_info tps65217_pmic_regs[] = {
- TPS65217_INFO("DCDC1", 900000, 1800000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1),
- TPS65217_INFO("DCDC2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1),
- TPS65217_INFO("DCDC3", 900000, 1500000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1),
- TPS65217_INFO("LDO1", 1000000, 3300000, NULL, NULL),
- TPS65217_INFO("LDO2", 900000, 3300000, tps65217_vsel_to_uv1,
- tps65217_uv_to_vsel1),
- TPS65217_INFO("LDO3", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2),
- TPS65217_INFO("LDO4", 1800000, 3300000, tps65217_vsel_to_uv2,
- tps65217_uv_to_vsel2),
+static const struct regulator_linear_range tps65217_uv2_ranges[] = {
+ { .min_uV = 1500000, .max_uV = 1900000, .min_sel = 0, .max_sel = 8,
+ .uV_step = 50000 },
+ { .min_uV = 2000000, .max_uV = 2400000, .min_sel = 9, .max_sel = 13,
+ .uV_step = 100000 },
+ { .min_uV = 2450000, .max_uV = 3300000, .min_sel = 14, .max_sel = 31,
+ .uV_step = 50000 },
};
static int tps65217_pmic_enable(struct regulator_dev *dev)
@@ -192,49 +123,6 @@ static int tps65217_pmic_set_voltage_sel(struct regulator_dev *dev,
return ret;
}
-static int tps65217_pmic_map_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
-{
-
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int sel, rid = rdev_get_id(dev);
- int ret;
-
- /* LDO1 uses regulator_map_voltage_iterate() */
- if (rid == TPS65217_LDO_1)
- return -EINVAL;
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- if (min_uV < tps->info[rid]->min_uV)
- min_uV = tps->info[rid]->min_uV;
-
- if (max_uV < tps->info[rid]->min_uV || min_uV > tps->info[rid]->max_uV)
- return -EINVAL;
-
- ret = tps->info[rid]->uv_to_vsel(min_uV, &sel);
- if (ret)
- return ret;
-
- return sel;
-}
-
-static int tps65217_pmic_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- struct tps65217 *tps = rdev_get_drvdata(dev);
- unsigned int rid = rdev_get_id(dev);
-
- if (rid < TPS65217_DCDC_1 || rid > TPS65217_LDO_4)
- return -EINVAL;
-
- if (selector >= dev->desc->n_voltages)
- return -EINVAL;
-
- return tps->info[rid]->vsel_to_uv(selector);
-}
-
/* Operations permitted on DCDCx, LDO2, LDO3 and LDO4 */
static struct regulator_ops tps65217_pmic_ops = {
.is_enabled = regulator_is_enabled_regmap,
@@ -242,8 +130,8 @@ static struct regulator_ops tps65217_pmic_ops = {
.disable = tps65217_pmic_disable,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = tps65217_pmic_set_voltage_sel,
- .list_voltage = tps65217_pmic_list_voltage,
- .map_voltage = tps65217_pmic_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
};
/* Operations permitted on LDO1 */
@@ -259,27 +147,33 @@ static struct regulator_ops tps65217_pmic_ldo1_ops = {
static const struct regulator_desc regulators[] = {
TPS65217_REGULATOR("DCDC1", TPS65217_DCDC_1, tps65217_pmic_ops, 64,
TPS65217_REG_DEFDCDC1, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC1_EN, NULL),
+ TPS65217_ENABLE_DC1_EN, NULL, tps65217_uv1_ranges,
+ 2), /* DCDC1 voltage range: 900000 ~ 1800000 */
TPS65217_REGULATOR("DCDC2", TPS65217_DCDC_2, tps65217_pmic_ops, 64,
TPS65217_REG_DEFDCDC2, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC2_EN, NULL),
+ TPS65217_ENABLE_DC2_EN, NULL, tps65217_uv1_ranges,
+ ARRAY_SIZE(tps65217_uv1_ranges)),
TPS65217_REGULATOR("DCDC3", TPS65217_DCDC_3, tps65217_pmic_ops, 64,
TPS65217_REG_DEFDCDC3, TPS65217_DEFDCDCX_DCDC_MASK,
- TPS65217_ENABLE_DC3_EN, NULL),
+ TPS65217_ENABLE_DC3_EN, NULL, tps65217_uv1_ranges,
+ 1), /* DCDC3 voltage range: 900000 ~ 1500000 */
TPS65217_REGULATOR("LDO1", TPS65217_LDO_1, tps65217_pmic_ldo1_ops, 16,
TPS65217_REG_DEFLDO1, TPS65217_DEFLDO1_LDO1_MASK,
- TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table),
+ TPS65217_ENABLE_LDO1_EN, LDO1_VSEL_table, NULL, 0),
TPS65217_REGULATOR("LDO2", TPS65217_LDO_2, tps65217_pmic_ops, 64,
TPS65217_REG_DEFLDO2, TPS65217_DEFLDO2_LDO2_MASK,
- TPS65217_ENABLE_LDO2_EN, NULL),
+ TPS65217_ENABLE_LDO2_EN, NULL, tps65217_uv1_ranges,
+ ARRAY_SIZE(tps65217_uv1_ranges)),
TPS65217_REGULATOR("LDO3", TPS65217_LDO_3, tps65217_pmic_ops, 32,
TPS65217_REG_DEFLS1, TPS65217_DEFLDO3_LDO3_MASK,
TPS65217_ENABLE_LS1_EN | TPS65217_DEFLDO3_LDO3_EN,
- NULL),
+ NULL, tps65217_uv2_ranges,
+ ARRAY_SIZE(tps65217_uv2_ranges)),
TPS65217_REGULATOR("LDO4", TPS65217_LDO_4, tps65217_pmic_ops, 32,
TPS65217_REG_DEFLS2, TPS65217_DEFLDO4_LDO4_MASK,
TPS65217_ENABLE_LS2_EN | TPS65217_DEFLDO4_LDO4_EN,
- NULL),
+ NULL, tps65217_uv2_ranges,
+ ARRAY_SIZE(tps65217_uv2_ranges)),
};
#ifdef CONFIG_OF
@@ -368,8 +262,6 @@ static int tps65217_regulator_probe(struct platform_device *pdev)
continue;
/* Register the regulators */
- tps->info[i] = &tps65217_pmic_regs[i];
-
config.dev = tps->dev;
config.init_data = reg_data;
config.driver_data = tps;
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 1094393155e..62e8d28beab 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -601,7 +601,7 @@ static int pmic_probe(struct spi_device *spi)
struct regulator_config config = { };
int ret = 0, i;
- init_data = dev->platform_data;
+ init_data = dev_get_platdata(dev);
if (!init_data) {
dev_err(dev, "could not find regulator platform data\n");
return -EINVAL;
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 17e994e47dc..281e52ac64b 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -118,6 +118,15 @@ struct tps65912_reg {
int eco_reg;
};
+static const struct regulator_linear_range tps65912_ldo_ranges[] = {
+ { .min_uV = 800000, .max_uV = 1600000, .min_sel = 0, .max_sel = 32,
+ .uV_step = 25000 },
+ { .min_uV = 1650000, .max_uV = 3000000, .min_sel = 33, .max_sel = 60,
+ .uV_step = 50000 },
+ { .min_uV = 3100000, .max_uV = 3300000, .min_sel = 61, .max_sel = 63,
+ .uV_step = 100000 },
+};
+
static int tps65912_get_range(struct tps65912_reg *pmic, int id)
{
struct tps65912 *mfd = pmic->mfd;
@@ -184,20 +193,6 @@ static unsigned long tps65912_vsel_to_uv_range3(u8 vsel)
return uv;
}
-static unsigned long tps65912_vsel_to_uv_ldo(u8 vsel)
-{
- unsigned long uv = 0;
-
- if (vsel <= 32)
- uv = ((vsel * 25000) + 800000);
- else if (vsel > 32 && vsel <= 60)
- uv = (((vsel - 32) * 50000) + 1600000);
- else if (vsel > 60)
- uv = (((vsel - 60) * 100000) + 3000000);
-
- return uv;
-}
-
static int tps65912_get_ctrl_register(int id)
{
if (id >= TPS65912_REG_DCDC1 && id <= TPS65912_REG_LDO4)
@@ -376,9 +371,6 @@ static int tps65912_list_voltage(struct regulator_dev *dev, unsigned selector)
struct tps65912_reg *pmic = rdev_get_drvdata(dev);
int range, voltage = 0, id = rdev_get_id(dev);
- if (id >= TPS65912_REG_LDO1 && id <= TPS65912_REG_LDO10)
- return tps65912_vsel_to_uv_ldo(selector);
-
if (id > TPS65912_REG_DCDC4)
return -EINVAL;
@@ -456,7 +448,8 @@ static struct regulator_ops tps65912_ops_ldo = {
.disable = tps65912_reg_disable,
.get_voltage_sel = tps65912_get_voltage_sel,
.set_voltage_sel = tps65912_set_voltage_sel,
- .list_voltage = tps65912_list_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
};
static int tps65912_probe(struct platform_device *pdev)
@@ -495,8 +488,14 @@ static int tps65912_probe(struct platform_device *pdev)
pmic->desc[i].name = info->name;
pmic->desc[i].id = i;
pmic->desc[i].n_voltages = 64;
- pmic->desc[i].ops = (i > TPS65912_REG_DCDC4 ?
- &tps65912_ops_ldo : &tps65912_ops_dcdc);
+ if (i > TPS65912_REG_DCDC4) {
+ pmic->desc[i].ops = &tps65912_ops_ldo;
+ pmic->desc[i].linear_ranges = tps65912_ldo_ranges;
+ pmic->desc[i].n_linear_ranges =
+ ARRAY_SIZE(tps65912_ldo_ranges);
+ } else {
+ pmic->desc[i].ops = &tps65912_ops_dcdc;
+ }
pmic->desc[i].type = REGULATOR_VOLTAGE;
pmic->desc[i].owner = THIS_MODULE;
range = tps65912_get_range(pmic, i);
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 93bc4f456da..78aae4cbb00 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1108,7 +1108,7 @@ static int twlreg_probe(struct platform_device *pdev)
drvdata = NULL;
} else {
id = pdev->id;
- initdata = pdev->dev.platform_data;
+ initdata = dev_get_platdata(&pdev->dev);
for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) {
template = twl_of_match[i].data;
if (template && template->desc.id == id)
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index a7c8deb5f28..765acc11c9c 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -111,7 +111,7 @@ static int regulator_userspace_consumer_probe(struct platform_device *pdev)
struct userspace_consumer_data *drvdata;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -EINVAL;
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index a9d4284ea00..f53e78b9a84 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -287,7 +287,7 @@ static const struct attribute_group regulator_virtual_attr_group = {
static int regulator_virtual_probe(struct platform_device *pdev)
{
- char *reg_id = pdev->dev.platform_data;
+ char *reg_id = dev_get_platdata(&pdev->dev);
struct virtual_consumer_data *drvdata;
int ret;
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 46938cf162a..11861cb861d 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -451,7 +451,7 @@ static void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc,
static int wm831x_buckv_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_dcdc *dcdc;
@@ -624,7 +624,7 @@ static struct regulator_ops wm831x_buckp_ops = {
static int wm831x_buckp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_dcdc *dcdc;
@@ -770,7 +770,7 @@ static struct regulator_ops wm831x_boostp_ops = {
static int wm831x_boostp_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id = pdev->id % ARRAY_SIZE(pdata->dcdc);
struct wm831x_dcdc *dcdc;
@@ -880,7 +880,7 @@ static struct regulator_ops wm831x_epe_ops = {
static int wm831x_epe_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id = pdev->id % ARRAY_SIZE(pdata->epe);
struct wm831x_dcdc *dcdc;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 16ebdf94d0a..4eb373de1fa 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -151,7 +151,7 @@ static irqreturn_t wm831x_isink_irq(int irq, void *data)
static int wm831x_isink_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct wm831x_isink *isink;
int id = pdev->id % ARRAY_SIZE(pdata->isink);
struct regulator_config config = { };
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 9ff883f8087..1432b26ef2e 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -62,41 +62,12 @@ static irqreturn_t wm831x_ldo_uv_irq(int irq, void *data)
* General purpose LDOs
*/
-#define WM831X_GP_LDO_SELECTOR_LOW 0xe
-#define WM831X_GP_LDO_MAX_SELECTOR 0x1f
-
-static int wm831x_gp_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- /* 0.9-1.6V in 50mV steps */
- if (selector <= WM831X_GP_LDO_SELECTOR_LOW)
- return 900000 + (selector * 50000);
- /* 1.7-3.3V in 100mV steps */
- if (selector <= WM831X_GP_LDO_MAX_SELECTOR)
- return 1600000 + ((selector - WM831X_GP_LDO_SELECTOR_LOW)
- * 100000);
- return -EINVAL;
-}
-
-static int wm831x_gp_ldo_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int volt, vsel;
-
- if (min_uV < 900000)
- vsel = 0;
- else if (min_uV < 1700000)
- vsel = ((min_uV - 900000) / 50000);
- else
- vsel = ((min_uV - 1700000) / 100000)
- + WM831X_GP_LDO_SELECTOR_LOW + 1;
-
- volt = wm831x_gp_ldo_list_voltage(rdev, vsel);
- if (volt < min_uV || volt > max_uV)
- return -EINVAL;
-
- return vsel;
-}
+static const struct regulator_linear_range wm831x_gp_ldo_ranges[] = {
+ { .min_uV = 900000, .max_uV = 1650000, .min_sel = 0, .max_sel = 14,
+ .uV_step = 50000 },
+ { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
+ .uV_step = 100000 },
+};
static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
@@ -105,7 +76,7 @@ static int wm831x_gp_ldo_set_suspend_voltage(struct regulator_dev *rdev,
struct wm831x *wm831x = ldo->wm831x;
int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- sel = wm831x_gp_ldo_map_voltage(rdev, uV, uV);
+ sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return sel;
@@ -230,8 +201,8 @@ static unsigned int wm831x_gp_ldo_get_optimum_mode(struct regulator_dev *rdev,
static struct regulator_ops wm831x_gp_ldo_ops = {
- .list_voltage = wm831x_gp_ldo_list_voltage,
- .map_voltage = wm831x_gp_ldo_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_gp_ldo_set_suspend_voltage,
@@ -250,7 +221,7 @@ static struct regulator_ops wm831x_gp_ldo_ops = {
static int wm831x_gp_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
@@ -290,7 +261,7 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
ldo->desc.id = id;
ldo->desc.type = REGULATOR_VOLTAGE;
- ldo->desc.n_voltages = WM831X_GP_LDO_MAX_SELECTOR + 1;
+ ldo->desc.n_voltages = 32;
ldo->desc.ops = &wm831x_gp_ldo_ops;
ldo->desc.owner = THIS_MODULE;
ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
@@ -299,6 +270,8 @@ static int wm831x_gp_ldo_probe(struct platform_device *pdev)
ldo->desc.enable_mask = 1 << id;
ldo->desc.bypass_reg = ldo->base;
ldo->desc.bypass_mask = WM831X_LDO1_SWI;
+ ldo->desc.linear_ranges = wm831x_gp_ldo_ranges;
+ ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_gp_ldo_ranges);
config.dev = pdev->dev.parent;
if (pdata)
@@ -358,43 +331,12 @@ static struct platform_driver wm831x_gp_ldo_driver = {
* Analogue LDOs
*/
-
-#define WM831X_ALDO_SELECTOR_LOW 0xc
-#define WM831X_ALDO_MAX_SELECTOR 0x1f
-
-static int wm831x_aldo_list_voltage(struct regulator_dev *rdev,
- unsigned int selector)
-{
- /* 1-1.6V in 50mV steps */
- if (selector <= WM831X_ALDO_SELECTOR_LOW)
- return 1000000 + (selector * 50000);
- /* 1.7-3.5V in 100mV steps */
- if (selector <= WM831X_ALDO_MAX_SELECTOR)
- return 1600000 + ((selector - WM831X_ALDO_SELECTOR_LOW)
- * 100000);
- return -EINVAL;
-}
-
-static int wm831x_aldo_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- int volt, vsel;
-
- if (min_uV < 1000000)
- vsel = 0;
- else if (min_uV < 1700000)
- vsel = ((min_uV - 1000000) / 50000);
- else
- vsel = ((min_uV - 1700000) / 100000)
- + WM831X_ALDO_SELECTOR_LOW + 1;
-
- volt = wm831x_aldo_list_voltage(rdev, vsel);
- if (volt < min_uV || volt > max_uV)
- return -EINVAL;
-
- return vsel;
-
-}
+static const struct regulator_linear_range wm831x_aldo_ranges[] = {
+ { .min_uV = 1000000, .max_uV = 1650000, .min_sel = 0, .max_sel = 12,
+ .uV_step = 50000 },
+ { .min_uV = 1700000, .max_uV = 3500000, .min_sel = 13, .max_sel = 31,
+ .uV_step = 100000 },
+};
static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
int uV)
@@ -403,7 +345,7 @@ static int wm831x_aldo_set_suspend_voltage(struct regulator_dev *rdev,
struct wm831x *wm831x = ldo->wm831x;
int sel, reg = ldo->base + WM831X_LDO_SLEEP_CONTROL;
- sel = wm831x_aldo_map_voltage(rdev, uV, uV);
+ sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return sel;
@@ -486,8 +428,8 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev)
}
static struct regulator_ops wm831x_aldo_ops = {
- .list_voltage = wm831x_aldo_list_voltage,
- .map_voltage = wm831x_aldo_map_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
+ .map_voltage = regulator_map_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.set_suspend_voltage = wm831x_aldo_set_suspend_voltage,
@@ -505,7 +447,7 @@ static struct regulator_ops wm831x_aldo_ops = {
static int wm831x_aldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
@@ -545,7 +487,9 @@ static int wm831x_aldo_probe(struct platform_device *pdev)
ldo->desc.id = id;
ldo->desc.type = REGULATOR_VOLTAGE;
- ldo->desc.n_voltages = WM831X_ALDO_MAX_SELECTOR + 1;
+ ldo->desc.n_voltages = 32;
+ ldo->desc.linear_ranges = wm831x_aldo_ranges;
+ ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_aldo_ranges);
ldo->desc.ops = &wm831x_aldo_ops;
ldo->desc.owner = THIS_MODULE;
ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL;
@@ -661,7 +605,7 @@ static struct regulator_ops wm831x_alive_ldo_ops = {
static int wm831x_alive_ldo_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
- struct wm831x_pdata *pdata = wm831x->dev->platform_data;
+ struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev);
struct regulator_config config = { };
int id;
struct wm831x_ldo *ldo;
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 7f0fa22ef2a..835b5f0f344 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -542,41 +542,12 @@ static int wm8350_dcdc_set_suspend_mode(struct regulator_dev *rdev,
return 0;
}
-static int wm8350_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned selector)
-{
- if (selector > WM8350_LDO1_VSEL_MASK)
- return -EINVAL;
-
- if (selector < 16)
- return (selector * 50000) + 900000;
- else
- return ((selector - 16) * 100000) + 1800000;
-}
-
-static int wm8350_ldo_map_voltage(struct regulator_dev *rdev, int min_uV,
- int max_uV)
-{
- int volt, sel;
- int min_mV = min_uV / 1000;
- int max_mV = max_uV / 1000;
-
- if (min_mV < 900 || min_mV > 3300)
- return -EINVAL;
- if (max_mV < 900 || max_mV > 3300)
- return -EINVAL;
-
- if (min_mV < 1800) /* step size is 50mV < 1800mV */
- sel = DIV_ROUND_UP(min_uV - 900, 50);
- else /* step size is 100mV > 1800mV */
- sel = DIV_ROUND_UP(min_uV - 1800, 100) + 16;
-
- volt = wm8350_ldo_list_voltage(rdev, sel);
- if (volt < min_uV || volt > max_uV)
- return -EINVAL;
-
- return sel;
-}
+static const struct regulator_linear_range wm8350_ldo_ranges[] = {
+ { .min_uV = 900000, .max_uV = 1750000, .min_sel = 0, .max_sel = 15,
+ .uV_step = 50000 },
+ { .min_uV = 1800000, .max_uV = 3300000, .min_sel = 16, .max_sel = 31,
+ .uV_step = 100000 },
+};
static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
{
@@ -603,7 +574,7 @@ static int wm8350_ldo_set_suspend_voltage(struct regulator_dev *rdev, int uV)
return -EINVAL;
}
- sel = wm8350_ldo_map_voltage(rdev, uV, uV);
+ sel = regulator_map_voltage_linear_range(rdev, uV, uV);
if (sel < 0)
return -EINVAL;
@@ -998,10 +969,10 @@ static struct regulator_ops wm8350_dcdc2_5_ops = {
};
static struct regulator_ops wm8350_ldo_ops = {
- .map_voltage = wm8350_ldo_map_voltage,
+ .map_voltage = regulator_map_voltage_linear_range,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
- .list_voltage = wm8350_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
.is_enabled = regulator_is_enabled_regmap,
@@ -1108,6 +1079,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO1,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO1_VSEL_MASK + 1,
+ .linear_ranges = wm8350_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO1_CONTROL,
.vsel_mask = WM8350_LDO1_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1121,6 +1094,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO2,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO2_VSEL_MASK + 1,
+ .linear_ranges = wm8350_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO2_CONTROL,
.vsel_mask = WM8350_LDO2_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1134,6 +1109,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO3,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO3_VSEL_MASK + 1,
+ .linear_ranges = wm8350_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO3_CONTROL,
.vsel_mask = WM8350_LDO3_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1147,6 +1124,8 @@ static const struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
.irq = WM8350_IRQ_UV_LDO4,
.type = REGULATOR_VOLTAGE,
.n_voltages = WM8350_LDO4_VSEL_MASK + 1,
+ .linear_ranges = wm8350_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8350_ldo_ranges),
.vsel_reg = WM8350_LDO4_CONTROL,
.vsel_mask = WM8350_LDO4_VSEL_MASK,
.enable_reg = WM8350_DCDC_LDO_REQUESTED,
@@ -1222,7 +1201,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
}
config.dev = &pdev->dev;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = dev_get_drvdata(&pdev->dev);
config.regmap = wm8350->regmap;
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index a09f03ee550..58f51bec13f 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -19,47 +19,21 @@
#include <linux/regulator/driver.h>
#include <linux/mfd/wm8400-private.h>
-static int wm8400_ldo_list_voltage(struct regulator_dev *dev,
- unsigned selector)
-{
- if (selector > WM8400_LDO1_VSEL_MASK)
- return -EINVAL;
-
- if (selector < 15)
- return 900000 + (selector * 50000);
- else
- return 1700000 + ((selector - 15) * 100000);
-}
-
-static int wm8400_ldo_map_voltage(struct regulator_dev *dev,
- int min_uV, int max_uV)
-{
- u16 val;
- int volt;
-
- if (min_uV < 900000 || min_uV > 3300000)
- return -EINVAL;
-
- if (min_uV < 1700000) /* Steps of 50mV from 900mV; */
- val = DIV_ROUND_UP(min_uV - 900000, 50000);
- else /* Steps of 100mV from 1700mV */
- val = DIV_ROUND_UP(min_uV - 1700000, 100000) + 15;
-
- volt = wm8400_ldo_list_voltage(dev, val);
- if (volt < min_uV || volt > max_uV)
- return -EINVAL;
-
- return val;
-}
+static const struct regulator_linear_range wm8400_ldo_ranges[] = {
+ { .min_uV = 900000, .max_uV = 1600000, .min_sel = 0, .max_sel = 14,
+ .uV_step = 50000 },
+ { .min_uV = 1700000, .max_uV = 3300000, .min_sel = 15, .max_sel = 31,
+ .uV_step = 100000 },
+};
static struct regulator_ops wm8400_ldo_ops = {
.is_enabled = regulator_is_enabled_regmap,
.enable = regulator_enable_regmap,
.disable = regulator_disable_regmap,
- .list_voltage = wm8400_ldo_list_voltage,
+ .list_voltage = regulator_list_voltage_linear_range,
.get_voltage_sel = regulator_get_voltage_sel_regmap,
.set_voltage_sel = regulator_set_voltage_sel_regmap,
- .map_voltage = wm8400_ldo_map_voltage,
+ .map_voltage = regulator_map_voltage_linear_range,
};
static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev)
@@ -155,6 +129,8 @@ static struct regulator_desc regulators[] = {
.enable_reg = WM8400_LDO1_CONTROL,
.enable_mask = WM8400_LDO1_ENA,
.n_voltages = WM8400_LDO1_VSEL_MASK + 1,
+ .linear_ranges = wm8400_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO1_CONTROL,
.vsel_mask = WM8400_LDO1_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
@@ -167,6 +143,8 @@ static struct regulator_desc regulators[] = {
.enable_reg = WM8400_LDO2_CONTROL,
.enable_mask = WM8400_LDO2_ENA,
.n_voltages = WM8400_LDO2_VSEL_MASK + 1,
+ .linear_ranges = wm8400_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.type = REGULATOR_VOLTAGE,
.vsel_reg = WM8400_LDO2_CONTROL,
.vsel_mask = WM8400_LDO2_VSEL_MASK,
@@ -179,6 +157,8 @@ static struct regulator_desc regulators[] = {
.enable_reg = WM8400_LDO3_CONTROL,
.enable_mask = WM8400_LDO3_ENA,
.n_voltages = WM8400_LDO3_VSEL_MASK + 1,
+ .linear_ranges = wm8400_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO3_CONTROL,
.vsel_mask = WM8400_LDO3_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
@@ -191,6 +171,8 @@ static struct regulator_desc regulators[] = {
.enable_reg = WM8400_LDO4_CONTROL,
.enable_mask = WM8400_LDO4_ENA,
.n_voltages = WM8400_LDO4_VSEL_MASK + 1,
+ .linear_ranges = wm8400_ldo_ranges,
+ .n_linear_ranges = ARRAY_SIZE(wm8400_ldo_ranges),
.vsel_reg = WM8400_LDO4_CONTROL,
.vsel_mask = WM8400_LDO4_VSEL_MASK,
.type = REGULATOR_VOLTAGE,
@@ -233,7 +215,7 @@ static int wm8400_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
config.dev = &pdev->dev;
- config.init_data = pdev->dev.platform_data;
+ config.init_data = dev_get_platdata(&pdev->dev);
config.driver_data = wm8400;
config.regmap = wm8400->regmap;
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index 8f2a8a7a3f9..5ee2a208457 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -125,7 +125,7 @@ static const struct regulator_init_data wm8994_ldo_default[] = {
static int wm8994_ldo_probe(struct platform_device *pdev)
{
struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
- struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct wm8994_pdata *pdata = dev_get_platdata(wm8994->dev);
int id = pdev->id % ARRAY_SIZE(pdata->ldo);
struct regulator_config config = { };
struct wm8994_ldo *ldo;
diff --git a/drivers/remoteproc/da8xx_remoteproc.c b/drivers/remoteproc/da8xx_remoteproc.c
index 9b2e60afa1a..129f7b99786 100644
--- a/drivers/remoteproc/da8xx_remoteproc.c
+++ b/drivers/remoteproc/da8xx_remoteproc.c
@@ -165,7 +165,7 @@ static int reset_assert(struct device *dev)
dsp_clk = clk_get(dev, NULL);
if (IS_ERR(dsp_clk)) {
dev_err(dev, "clk_get error: %ld\n", PTR_ERR(dsp_clk));
- return PTR_RET(dsp_clk);
+ return PTR_ERR(dsp_clk);
}
davinci_clk_reset_assert(dsp_clk);
diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c
index 9c8c19441cc..4385ca4503d 100644
--- a/drivers/rtc/rtc-da9052.c
+++ b/drivers/rtc/rtc-da9052.c
@@ -250,7 +250,7 @@ static int da9052_rtc_probe(struct platform_device *pdev)
rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
&da9052_rtc_ops, THIS_MODULE);
- return PTR_RET(rtc->rtc);
+ return PTR_ERR_OR_ZERO(rtc->rtc);
}
static struct platform_driver da9052_rtc_driver = {
diff --git a/drivers/rtc/rtc-isl12022.c b/drivers/rtc/rtc-isl12022.c
index 5dbdc440571..03b89112942 100644
--- a/drivers/rtc/rtc-isl12022.c
+++ b/drivers/rtc/rtc-isl12022.c
@@ -268,7 +268,7 @@ static int isl12022_probe(struct i2c_client *client,
isl12022->rtc = devm_rtc_device_register(&client->dev,
isl12022_driver.driver.name,
&isl12022_rtc_ops, THIS_MODULE);
- return PTR_RET(isl12022->rtc);
+ return PTR_ERR_OR_ZERO(isl12022->rtc);
}
static const struct i2c_device_id isl12022_id[] = {
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 23c3779a5f2..411adb3f86a 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -175,7 +175,7 @@ static int m48t35_probe(struct platform_device *pdev)
priv->rtc = devm_rtc_device_register(&pdev->dev, "m48t35",
&m48t35_ops, THIS_MODULE);
- return PTR_RET(priv->rtc);
+ return PTR_ERR_OR_ZERO(priv->rtc);
}
static struct platform_driver m48t35_platform_driver = {
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index 710c3a5aa6f..63b558c4819 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -264,7 +264,7 @@ static int pcf8563_probe(struct i2c_client *client,
pcf8563_driver.driver.name,
&pcf8563_rtc_ops, THIS_MODULE);
- return PTR_RET(pcf8563->rtc);
+ return PTR_ERR_OR_ZERO(pcf8563->rtc);
}
static const struct i2c_device_id pcf8563_id[] = {
diff --git a/drivers/rtc/rtc-pcf8583.c b/drivers/rtc/rtc-pcf8583.c
index 843a745c42f..c2639845186 100644
--- a/drivers/rtc/rtc-pcf8583.c
+++ b/drivers/rtc/rtc-pcf8583.c
@@ -285,7 +285,7 @@ static int pcf8583_probe(struct i2c_client *client,
pcf8583_driver.driver.name,
&pcf8583_rtc_ops, THIS_MODULE);
- return PTR_RET(pcf8583->rtc);
+ return PTR_ERR_OR_ZERO(pcf8583->rtc);
}
static const struct i2c_device_id pcf8583_id[] = {
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 767fee2ab34..26019531db1 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -23,6 +23,7 @@
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
+#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <linux/of_device.h>
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev)
}
#endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */
-static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
+static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data)
{
+ int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */
/*
- * The datasheet doesn't say which way round the
- * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0,
- * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS
+ * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010
+ * states:
+ * | The order in which registers are updated is
+ * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds.
+ * | (This list is in bitfield order, from LSB to MSB, as they would
+ * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT
+ * | register. For example, the Seconds register corresponds to
+ * | STALE_REGS or NEW_REGS containing 0x80.)
*/
- while (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
- (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))
- cpu_relax();
+ do {
+ if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)))
+ return 0;
+ udelay(1);
+ } while (--timeout > 0);
+ return (readl(rtc_data->io + STMP3XXX_RTC_STAT) &
+ (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0;
}
/* Time read/write */
static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
{
+ int ret;
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
- stmp3xxx_wait_time(rtc_data);
+ ret = stmp3xxx_wait_time(rtc_data);
+ if (ret)
+ return ret;
+
rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm);
return 0;
}
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t)
struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev);
writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS);
- stmp3xxx_wait_time(rtc_data);
- return 0;
+ return stmp3xxx_wait_time(rtc_data);
}
/* interrupt(s) handler */
diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c
index 4b26f8672b2..babd43bf3dd 100644
--- a/drivers/rtc/rtc-sysfs.c
+++ b/drivers/rtc/rtc-sysfs.c
@@ -25,15 +25,14 @@
*/
static ssize_t
-rtc_sysfs_show_name(struct device *dev, struct device_attribute *attr,
- char *buf)
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%s\n", to_rtc_device(dev)->name);
}
+static DEVICE_ATTR_RO(name);
static ssize_t
-rtc_sysfs_show_date(struct device *dev, struct device_attribute *attr,
- char *buf)
+date_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
struct rtc_time tm;
@@ -46,10 +45,10 @@ rtc_sysfs_show_date(struct device *dev, struct device_attribute *attr,
return retval;
}
+static DEVICE_ATTR_RO(date);
static ssize_t
-rtc_sysfs_show_time(struct device *dev, struct device_attribute *attr,
- char *buf)
+time_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
struct rtc_time tm;
@@ -62,10 +61,10 @@ rtc_sysfs_show_time(struct device *dev, struct device_attribute *attr,
return retval;
}
+static DEVICE_ATTR_RO(time);
static ssize_t
-rtc_sysfs_show_since_epoch(struct device *dev, struct device_attribute *attr,
- char *buf)
+since_epoch_show(struct device *dev, struct device_attribute *attr, char *buf)
{
ssize_t retval;
struct rtc_time tm;
@@ -79,16 +78,16 @@ rtc_sysfs_show_since_epoch(struct device *dev, struct device_attribute *attr,
return retval;
}
+static DEVICE_ATTR_RO(since_epoch);
static ssize_t
-rtc_sysfs_show_max_user_freq(struct device *dev, struct device_attribute *attr,
- char *buf)
+max_user_freq_show(struct device *dev, struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_rtc_device(dev)->max_user_freq);
}
static ssize_t
-rtc_sysfs_set_max_user_freq(struct device *dev, struct device_attribute *attr,
+max_user_freq_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t n)
{
struct rtc_device *rtc = to_rtc_device(dev);
@@ -101,6 +100,7 @@ rtc_sysfs_set_max_user_freq(struct device *dev, struct device_attribute *attr,
return n;
}
+static DEVICE_ATTR_RW(max_user_freq);
/**
* rtc_sysfs_show_hctosys - indicate if the given RTC set the system time
@@ -109,8 +109,7 @@ rtc_sysfs_set_max_user_freq(struct device *dev, struct device_attribute *attr,
* boot or resume event.
*/
static ssize_t
-rtc_sysfs_show_hctosys(struct device *dev, struct device_attribute *attr,
- char *buf)
+hctosys_show(struct device *dev, struct device_attribute *attr, char *buf)
{
#ifdef CONFIG_RTC_HCTOSYS_DEVICE
if (rtc_hctosys_ret == 0 &&
@@ -121,17 +120,18 @@ rtc_sysfs_show_hctosys(struct device *dev, struct device_attribute *attr,
#endif
return sprintf(buf, "0\n");
}
-
-static struct device_attribute rtc_attrs[] = {
- __ATTR(name, S_IRUGO, rtc_sysfs_show_name, NULL),
- __ATTR(date, S_IRUGO, rtc_sysfs_show_date, NULL),
- __ATTR(time, S_IRUGO, rtc_sysfs_show_time, NULL),
- __ATTR(since_epoch, S_IRUGO, rtc_sysfs_show_since_epoch, NULL),
- __ATTR(max_user_freq, S_IRUGO | S_IWUSR, rtc_sysfs_show_max_user_freq,
- rtc_sysfs_set_max_user_freq),
- __ATTR(hctosys, S_IRUGO, rtc_sysfs_show_hctosys, NULL),
- { },
+static DEVICE_ATTR_RO(hctosys);
+
+static struct attribute *rtc_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_date.attr,
+ &dev_attr_time.attr,
+ &dev_attr_since_epoch.attr,
+ &dev_attr_max_user_freq.attr,
+ &dev_attr_hctosys.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(rtc);
static ssize_t
rtc_sysfs_show_wakealarm(struct device *dev, struct device_attribute *attr,
@@ -261,5 +261,5 @@ void rtc_sysfs_del_device(struct rtc_device *rtc)
void __init rtc_sysfs_init(struct class *rtc_class)
{
- rtc_class->dev_attrs = rtc_attrs;
+ rtc_class->dev_groups = rtc_groups;
}
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 02faf3c4e0d..c2e80d7ca5e 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -524,6 +524,8 @@ static int twl_rtc_probe(struct platform_device *pdev)
if (ret < 0)
goto out1;
+ device_init_wakeup(&pdev->dev, 1);
+
rtc = rtc_device_register(pdev->name,
&pdev->dev, &twl_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -542,7 +544,6 @@ static int twl_rtc_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, rtc);
- device_init_wakeup(&pdev->dev, 1);
return 0;
out2:
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 17150a77898..451bf99582f 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
rc = cqr->intrc;
else
rc = -EIO;
+
+ /* kick tasklets */
+ dasd_schedule_device_bh(device);
+ if (device->block)
+ dasd_schedule_block_bh(device->block);
+
return rc;
}
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 58bc6eb49de..2ead7e78c45 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -930,7 +930,7 @@ dasd_use_raw_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(devmap))
return PTR_ERR(devmap);
- if ((strict_strtoul(buf, 10, &val) != 0) || val > 1)
+ if ((kstrtoul(buf, 10, &val) != 0) || val > 1)
return -EINVAL;
spin_lock(&dasd_devmap_lock);
@@ -1225,7 +1225,7 @@ dasd_expires_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return -ENODEV;
- if ((strict_strtoul(buf, 10, &val) != 0) ||
+ if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_EXPIRES_MAX) || val == 0) {
dasd_put_device(device);
return -EINVAL;
@@ -1265,7 +1265,7 @@ dasd_retries_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return -ENODEV;
- if ((strict_strtoul(buf, 10, &val) != 0) ||
+ if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_RETRIES_MAX)) {
dasd_put_device(device);
return -EINVAL;
@@ -1307,7 +1307,7 @@ dasd_timeout_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device) || !device->block)
return -ENODEV;
- if ((strict_strtoul(buf, 10, &val) != 0) ||
+ if ((kstrtoul(buf, 10, &val) != 0) ||
val > UINT_MAX / HZ) {
dasd_put_device(device);
return -EINVAL;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index e61a6deea3c..5adb2042e82 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -85,6 +85,8 @@ MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
static struct ccw_driver dasd_eckd_driver; /* see below */
+static void *rawpadpage;
+
#define INIT_CQR_OK 0
#define INIT_CQR_UNFORMATTED 1
#define INIT_CQR_ERROR 2
@@ -3237,18 +3239,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
unsigned int seg_len, len_to_track_end;
unsigned int first_offs;
unsigned int cidaw, cplength, datasize;
- sector_t first_trk, last_trk;
+ sector_t first_trk, last_trk, sectors;
+ sector_t start_padding_sectors, end_sector_offset, end_padding_sectors;
unsigned int pfx_datasize;
/*
* raw track access needs to be mutiple of 64k and on 64k boundary
+ * For read requests we can fix an incorrect alignment by padding
+ * the request with dummy pages.
*/
- if ((blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK) != 0) {
- cqr = ERR_PTR(-EINVAL);
- goto out;
- }
- if (((blk_rq_pos(req) + blk_rq_sectors(req)) %
- DASD_RAW_SECTORS_PER_TRACK) != 0) {
+ start_padding_sectors = blk_rq_pos(req) % DASD_RAW_SECTORS_PER_TRACK;
+ end_sector_offset = (blk_rq_pos(req) + blk_rq_sectors(req)) %
+ DASD_RAW_SECTORS_PER_TRACK;
+ end_padding_sectors = (DASD_RAW_SECTORS_PER_TRACK - end_sector_offset) %
+ DASD_RAW_SECTORS_PER_TRACK;
+ basedev = block->base;
+ if ((start_padding_sectors || end_padding_sectors) &&
+ (rq_data_dir(req) == WRITE)) {
+ DBF_DEV_EVENT(DBF_ERR, basedev,
+ "raw write not track aligned (%lu,%lu) req %p",
+ start_padding_sectors, end_padding_sectors, req);
cqr = ERR_PTR(-EINVAL);
goto out;
}
@@ -3258,7 +3268,6 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
DASD_RAW_SECTORS_PER_TRACK;
trkcount = last_trk - first_trk + 1;
first_offs = 0;
- basedev = block->base;
if (rq_data_dir(req) == READ)
cmd = DASD_ECKD_CCW_READ_TRACK;
@@ -3307,12 +3316,26 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
}
idaws = (unsigned long *)(cqr->data + pfx_datasize);
-
len_to_track_end = 0;
-
+ if (start_padding_sectors) {
+ ccw[-1].flags |= CCW_FLAG_CC;
+ ccw->cmd_code = cmd;
+ /* maximum 3390 track size */
+ ccw->count = 57326;
+ /* 64k map to one track */
+ len_to_track_end = 65536 - start_padding_sectors * 512;
+ ccw->cda = (__u32)(addr_t)idaws;
+ ccw->flags |= CCW_FLAG_IDA;
+ ccw->flags |= CCW_FLAG_SLI;
+ ccw++;
+ for (sectors = 0; sectors < start_padding_sectors; sectors += 8)
+ idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
+ }
rq_for_each_segment(bv, req, iter) {
dst = page_address(bv->bv_page) + bv->bv_offset;
seg_len = bv->bv_len;
+ if (cmd == DASD_ECKD_CCW_READ_TRACK)
+ memset(dst, 0, seg_len);
if (!len_to_track_end) {
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
@@ -3328,7 +3351,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
len_to_track_end -= seg_len;
idaws = idal_create_words(idaws, dst, seg_len);
}
-
+ for (sectors = 0; sectors < end_padding_sectors; sectors += 8)
+ idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
if (blk_noretry_request(req) ||
block->base->features & DASD_FEATURE_FAILFAST)
set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
@@ -4479,12 +4503,19 @@ dasd_eckd_init(void)
kfree(dasd_reserve_req);
return -ENOMEM;
}
+ rawpadpage = (void *)__get_free_page(GFP_KERNEL);
+ if (!rawpadpage) {
+ kfree(path_verification_worker);
+ kfree(dasd_reserve_req);
+ return -ENOMEM;
+ }
ret = ccw_driver_register(&dasd_eckd_driver);
if (!ret)
wait_for_device_probe();
else {
kfree(path_verification_worker);
kfree(dasd_reserve_req);
+ free_page((unsigned long)rawpadpage);
}
return ret;
}
@@ -4495,6 +4526,7 @@ dasd_eckd_cleanup(void)
ccw_driver_unregister(&dasd_eckd_driver);
kfree(path_verification_worker);
kfree(dasd_reserve_req);
+ free_page((unsigned long)rawpadpage);
}
module_init(dasd_eckd_init);
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index 8d11f773a75..e1e88486b2b 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -124,10 +124,15 @@ dasd_default_erp_action(struct dasd_ccw_req *cqr)
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
{
int success;
+ unsigned long long startclk, stopclk;
+ struct dasd_device *startdev;
BUG_ON(cqr->refers == NULL || cqr->function == NULL);
success = cqr->status == DASD_CQR_DONE;
+ startclk = cqr->startclk;
+ stopclk = cqr->stopclk;
+ startdev = cqr->startdev;
/* free all ERPs - but NOT the original cqr */
while (cqr->refers != NULL) {
@@ -142,6 +147,9 @@ struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *cqr)
}
/* set corresponding status to original cqr */
+ cqr->startclk = startclk;
+ cqr->stopclk = stopclk;
+ cqr->startdev = startdev;
if (success)
cqr->status = DASD_CQR_DONE;
else {
@@ -160,11 +168,13 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
- dev_err(&device->cdev->dev, "cqr %p timeout error", cqr);
+ dev_err(&device->cdev->dev,
+ "A timeout error occurred for cqr %p", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
- dev_err(&device->cdev->dev, "cqr %p transport error", cqr);
+ dev_err(&device->cdev->dev,
+ "A transport error occurred for cqr %p", cqr);
return;
}
/* dump sense data */
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 444d36183a2..94415620747 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -32,7 +32,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
struct device *dev;
s390_adjust_jiffies();
- pr_warning("cpu capability changed.\n");
+ pr_info("CPU capability may have changed\n");
get_online_cpus();
for_each_online_cpu(cpu) {
dev = get_cpu_device(cpu);
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c
index 91edbd7ee80..d028fd800c9 100644
--- a/drivers/s390/cio/airq.c
+++ b/drivers/s390/cio/airq.c
@@ -81,15 +81,185 @@ void unregister_adapter_interrupt(struct airq_struct *airq)
}
EXPORT_SYMBOL(unregister_adapter_interrupt);
-void do_adapter_IO(u8 isc)
+static irqreturn_t do_airq_interrupt(int irq, void *dummy)
{
+ struct tpi_info *tpi_info;
struct airq_struct *airq;
struct hlist_head *head;
- head = &airq_lists[isc];
+ __this_cpu_write(s390_idle.nohz_delay, 1);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
+ head = &airq_lists[tpi_info->isc];
rcu_read_lock();
hlist_for_each_entry_rcu(airq, head, list)
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
airq->handler(airq);
rcu_read_unlock();
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction airq_interrupt = {
+ .name = "AIO",
+ .handler = do_airq_interrupt,
+};
+
+void __init init_airq_interrupts(void)
+{
+ irq_set_chip_and_handler(THIN_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(THIN_INTERRUPT, &airq_interrupt);
+}
+
+/**
+ * airq_iv_create - create an interrupt vector
+ * @bits: number of bits in the interrupt vector
+ * @flags: allocation flags
+ *
+ * Returns a pointer to an interrupt vector structure
+ */
+struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags)
+{
+ struct airq_iv *iv;
+ unsigned long size;
+
+ iv = kzalloc(sizeof(*iv), GFP_KERNEL);
+ if (!iv)
+ goto out;
+ iv->bits = bits;
+ size = BITS_TO_LONGS(bits) * sizeof(unsigned long);
+ iv->vector = kzalloc(size, GFP_KERNEL);
+ if (!iv->vector)
+ goto out_free;
+ if (flags & AIRQ_IV_ALLOC) {
+ iv->avail = kmalloc(size, GFP_KERNEL);
+ if (!iv->avail)
+ goto out_free;
+ memset(iv->avail, 0xff, size);
+ iv->end = 0;
+ } else
+ iv->end = bits;
+ if (flags & AIRQ_IV_BITLOCK) {
+ iv->bitlock = kzalloc(size, GFP_KERNEL);
+ if (!iv->bitlock)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_PTR) {
+ size = bits * sizeof(unsigned long);
+ iv->ptr = kzalloc(size, GFP_KERNEL);
+ if (!iv->ptr)
+ goto out_free;
+ }
+ if (flags & AIRQ_IV_DATA) {
+ size = bits * sizeof(unsigned int);
+ iv->data = kzalloc(size, GFP_KERNEL);
+ if (!iv->data)
+ goto out_free;
+ }
+ spin_lock_init(&iv->lock);
+ return iv;
+
+out_free:
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->avail);
+ kfree(iv->vector);
+ kfree(iv);
+out:
+ return NULL;
+}
+EXPORT_SYMBOL(airq_iv_create);
+
+/**
+ * airq_iv_release - release an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ */
+void airq_iv_release(struct airq_iv *iv)
+{
+ kfree(iv->data);
+ kfree(iv->ptr);
+ kfree(iv->bitlock);
+ kfree(iv->vector);
+ kfree(iv->avail);
+ kfree(iv);
+}
+EXPORT_SYMBOL(airq_iv_release);
+
+/**
+ * airq_iv_alloc_bit - allocate an irq bit from an interrupt vector
+ * @iv: pointer to an interrupt vector structure
+ *
+ * Returns the bit number of the allocated irq, or -1UL if no bit
+ * is available or the AIRQ_IV_ALLOC flag has not been specified
+ */
+unsigned long airq_iv_alloc_bit(struct airq_iv *iv)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ unsigned long bit;
+
+ if (!iv->avail)
+ return -1UL;
+ spin_lock(&iv->lock);
+ bit = find_first_bit_left(iv->avail, iv->bits);
+ if (bit < iv->bits) {
+ clear_bit(bit ^ be_to_le, iv->avail);
+ if (bit >= iv->end)
+ iv->end = bit + 1;
+ } else
+ bit = -1UL;
+ spin_unlock(&iv->lock);
+ return bit;
+
+}
+EXPORT_SYMBOL(airq_iv_alloc_bit);
+
+/**
+ * airq_iv_free_bit - free an irq bit of an interrupt vector
+ * @iv: pointer to interrupt vector structure
+ * @bit: number of the irq bit to free
+ */
+void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+
+ if (!iv->avail)
+ return;
+ spin_lock(&iv->lock);
+ /* Clear (possibly left over) interrupt bit */
+ clear_bit(bit ^ be_to_le, iv->vector);
+ /* Make the bit position available again */
+ set_bit(bit ^ be_to_le, iv->avail);
+ if (bit == iv->end - 1) {
+ /* Find new end of bit-field */
+ while (--iv->end > 0)
+ if (!test_bit((iv->end - 1) ^ be_to_le, iv->avail))
+ break;
+ }
+ spin_unlock(&iv->lock);
+}
+EXPORT_SYMBOL(airq_iv_free_bit);
+
+/**
+ * airq_iv_scan - scan interrupt vector for non-zero bits
+ * @iv: pointer to interrupt vector structure
+ * @start: bit number to start the search
+ * @end: bit number to end the search
+ *
+ * Returns the bit number of the next non-zero interrupt bit, or
+ * -1UL if the scan completed without finding any more any non-zero bits.
+ */
+unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start,
+ unsigned long end)
+{
+ const unsigned long be_to_le = BITS_PER_LONG - 1;
+ unsigned long bit;
+
+ /* Find non-zero bit starting from 'ivs->next'. */
+ bit = find_next_bit_left(iv->vector, end, start);
+ if (bit >= end)
+ return -1UL;
+ /* Clear interrupt bit (find left uses big-endian bit numbers) */
+ clear_bit(bit ^ be_to_le, iv->vector);
+ return bit;
}
+EXPORT_SYMBOL(airq_iv_scan);
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 84846c2b96d..959135a0184 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -137,7 +137,7 @@ static ssize_t ccwgroup_online_store(struct device *dev,
if (!try_module_get(gdrv->driver.owner))
return -EINVAL;
- ret = strict_strtoul(buf, 0, &value);
+ ret = kstrtoul(buf, 0, &value);
if (ret)
goto out;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 4eeb4a6bf20..d7da67a31c7 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -561,37 +561,23 @@ out:
}
/*
- * do_IRQ() handles all normal I/O device IRQ's (the special
- * SMP cross-CPU interrupts have their own specific
- * handlers).
- *
+ * do_cio_interrupt() handles all normal I/O device IRQ's
*/
-void __irq_entry do_IRQ(struct pt_regs *regs)
+static irqreturn_t do_cio_interrupt(int irq, void *dummy)
{
- struct tpi_info *tpi_info = (struct tpi_info *) &regs->int_code;
+ struct tpi_info *tpi_info;
struct subchannel *sch;
struct irb *irb;
- struct pt_regs *old_regs;
- old_regs = set_irq_regs(regs);
- irq_enter();
__this_cpu_write(s390_idle.nohz_delay, 1);
- if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
- /* Serve timer interrupts first. */
- clock_comparator_work();
-
- kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
+ tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
irb = (struct irb *) &S390_lowcore.irb;
- if (tpi_info->adapter_IO) {
- do_adapter_IO(tpi_info->isc);
- goto out;
- }
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
if (!sch) {
/* Clear pending interrupt condition. */
inc_irq_stat(IRQIO_CIO);
tsch(tpi_info->schid, irb);
- goto out;
+ return IRQ_HANDLED;
}
spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */
@@ -606,9 +592,23 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
} else
inc_irq_stat(IRQIO_CIO);
spin_unlock(sch->lock);
-out:
- irq_exit();
- set_irq_regs(old_regs);
+
+ return IRQ_HANDLED;
+}
+
+static struct irq_desc *irq_desc_io;
+
+static struct irqaction io_interrupt = {
+ .name = "IO",
+ .handler = do_cio_interrupt,
+};
+
+void __init init_cio_interrupts(void)
+{
+ irq_set_chip_and_handler(IO_INTERRUPT,
+ &dummy_irq_chip, handle_percpu_irq);
+ setup_irq(IO_INTERRUPT, &io_interrupt);
+ irq_desc_io = irq_to_desc(IO_INTERRUPT);
}
#ifdef CONFIG_CCW_CONSOLE
@@ -635,7 +635,7 @@ void cio_tsch(struct subchannel *sch)
local_bh_disable();
irq_enter();
}
- kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
+ kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
if (sch->driver && sch->driver->irq)
sch->driver->irq(sch);
else
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index d62f5e7f3cf..d42f67412bd 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -121,9 +121,6 @@ extern int cio_commit_config(struct subchannel *sch);
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
int cio_tm_intrg(struct subchannel *sch);
-void do_adapter_IO(u8 isc);
-void do_IRQ(struct pt_regs *);
-
/* Use with care. */
#ifdef CONFIG_CCW_CONSOLE
extern struct subchannel *cio_probe_console(void);
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 4495e0627a4..23054f8fa9f 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -1182,7 +1182,7 @@ static ssize_t cmb_enable_store(struct device *dev,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 1ebe5d3ddeb..8c2cb87bccc 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -546,7 +546,9 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data)
case -ENOMEM:
case -EIO:
/* These should abort looping */
+ spin_lock_irq(&slow_subchannel_lock);
idset_sch_del_subseq(slow_subchannel_set, schid);
+ spin_unlock_irq(&slow_subchannel_lock);
break;
default:
rc = 0;
@@ -740,7 +742,7 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
int ret;
unsigned long val;
- ret = strict_strtoul(buf, 16, &val);
+ ret = kstrtoul(buf, 16, &val);
if (ret)
return ret;
mutex_lock(&css->mutex);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index b1de6033523..29351321bad 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -130,8 +130,6 @@ struct channel_subsystem {
extern struct channel_subsystem *channel_subsystems[];
-void channel_subsystem_reinit(void);
-
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 1ab5f6c36d9..e4a7ab2bb62 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -564,7 +564,7 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
ret = 0;
} else {
force = 0;
- ret = strict_strtoul(buf, 16, &i);
+ ret = kstrtoul(buf, 16, &i);
}
if (ret)
goto out;
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index d1c8025b0b0..adef5f5de11 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -208,7 +208,7 @@ static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
goto out;
}
- rc = strict_strtoul(buf, 16, &i);
+ rc = kstrtoul(buf, 16, &i);
if (rc) {
rc = -EINVAL;
goto out;
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 1b9e4aee914..8004b071a9f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -104,11 +104,11 @@ static void __init zfcp_init_device_setup(char *devstr)
strncpy(busid, token, ZFCP_BUS_ID_SIZE);
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, (unsigned long long *) &wwpn))
+ if (!token || kstrtoull(token, 0, (unsigned long long *) &wwpn))
goto err_out;
token = strsep(&str, ",");
- if (!token || strict_strtoull(token, 0, (unsigned long long *) &lun))
+ if (!token || kstrtoull(token, 0, (unsigned long long *) &lun))
goto err_out;
kfree(str_saved);
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 1d4c8fe7275..c82fe65c412 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
zfcp_erp_action_dismiss(&port->erp_action);
- else
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ else {
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
zfcp_erp_action_dismiss_lun(sdev);
+ spin_unlock(port->adapter->scsi_host->host_lock);
+ }
}
static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
{
struct scsi_device *sdev;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock(port->adapter->scsi_host->host_lock);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
_zfcp_erp_lun_reopen(sdev, clear, id, 0);
+ spin_unlock(port->adapter->scsi_host->host_lock);
}
static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -1434,8 +1439,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
atomic_set_mask(common_mask, &port->status);
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host)
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host)
atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1469,11 +1476,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
}
read_unlock_irqrestore(&adapter->port_list_lock, flags);
- shost_for_each_device(sdev, adapter->scsi_host) {
+ spin_lock_irqsave(adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, adapter->scsi_host) {
atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags);
}
/**
@@ -1487,16 +1496,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
{
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+ unsigned long flags;
atomic_set_mask(mask, &port->status);
if (!common_mask)
return;
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port)
atomic_set_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
@@ -1511,6 +1523,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
struct scsi_device *sdev;
u32 common_mask = mask & ZFCP_COMMON_FLAGS;
u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+ unsigned long flags;
atomic_clear_mask(mask, &port->status);
@@ -1520,13 +1533,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
if (clear_counter)
atomic_set(&port->erp_counter, 0);
- shost_for_each_device(sdev, port->adapter->scsi_host)
+ spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags);
+ __shost_for_each_device(sdev, port->adapter->scsi_host)
if (sdev_to_zfcp(sdev)->port == port) {
atomic_clear_mask(common_mask,
&sdev_to_zfcp(sdev)->status);
if (clear_counter)
atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
}
+ spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags);
}
/**
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 83e3f1408c3..a9c570a09b8 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -126,8 +126,6 @@ extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *, struct zfcp_qdio_req *,
extern int zfcp_qdio_open(struct zfcp_qdio *);
extern void zfcp_qdio_close(struct zfcp_qdio *);
extern void zfcp_qdio_siosl(struct zfcp_adapter *);
-extern struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *,
- struct qdio_buffer *);
/* zfcp_scsi.c */
extern struct scsi_transport_template *zfcp_scsi_transport_template;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 510e9b06c1a..0fe8d5d9511 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -770,7 +770,8 @@ int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
if (zfcp_qdio_sbal_get(qdio))
goto out;
- req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, 0,
+ req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+ SBAL_SFLAGS0_TYPE_STATUS,
adapter->pool.status_read_req);
if (IS_ERR(req)) {
retval = PTR_ERR(req);
@@ -2387,12 +2388,3 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
break;
}
}
-
-struct zfcp_fsf_req *zfcp_fsf_get_req(struct zfcp_qdio *qdio,
- struct qdio_buffer *sbal)
-{
- struct qdio_buffer_element *sbale = &sbal->element[0];
- u64 req_id = (unsigned long) sbale->addr;
-
- return zfcp_reqlist_find(qdio->adapter->req_list, req_id);
-}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 665e3cfaaf8..06025cdaa4a 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -16,9 +16,9 @@
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
-static bool enable_multibuffer;
+static bool enable_multibuffer = 1;
module_param_named(datarouter, enable_multibuffer, bool, 0400);
-MODULE_PARM_DESC(datarouter, "Enable hardware data router support");
+MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
{
@@ -224,11 +224,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
{
- spin_lock_irq(&qdio->req_q_lock);
if (atomic_read(&qdio->req_q_free) ||
!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return 1;
- spin_unlock_irq(&qdio->req_q_lock);
return 0;
}
@@ -246,9 +244,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
{
long ret;
- spin_unlock_irq(&qdio->req_q_lock);
- ret = wait_event_interruptible_timeout(qdio->req_q_wq,
- zfcp_qdio_sbal_check(qdio), 5 * HZ);
+ ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq,
+ zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ);
if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
return -EIO;
@@ -262,7 +259,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1");
}
- spin_lock_irq(&qdio->req_q_lock);
return -EIO;
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 3f01bbf0609..672b57219e1 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -27,6 +27,16 @@ static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
zfcp_sysfs_##_feat##_##_name##_show, NULL);
+#define ZFCP_DEFINE_ATTR_CONST(_feat, _name, _format, _value) \
+static ssize_t zfcp_sysfs_##_feat##_##_name##_show(struct device *dev, \
+ struct device_attribute *at,\
+ char *buf) \
+{ \
+ return sprintf(buf, _format, _value); \
+} \
+static ZFCP_DEV_ATTR(_feat, _name, S_IRUGO, \
+ zfcp_sysfs_##_feat##_##_name##_show, NULL);
+
#define ZFCP_DEFINE_A_ATTR(_name, _format, _value) \
static ssize_t zfcp_sysfs_adapter_##_name##_show(struct device *dev, \
struct device_attribute *at,\
@@ -75,6 +85,8 @@ ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
(zfcp_unit_sdev_status(unit) &
ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_shared, "%d\n", 0);
+ZFCP_DEFINE_ATTR_CONST(unit, access_readonly, "%d\n", 0);
static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
struct device_attribute *attr,
@@ -95,7 +107,7 @@ static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
unsigned long val;
- if (strict_strtoul(buf, 0, &val) || val != 0)
+ if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
@@ -134,7 +146,7 @@ static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
unsigned long val;
struct scsi_device *sdev;
- if (strict_strtoul(buf, 0, &val) || val != 0)
+ if (kstrtoul(buf, 0, &val) || val != 0)
return -EINVAL;
sdev = zfcp_unit_sdev(unit);
@@ -184,7 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
if (!adapter)
return -ENODEV;
- if (strict_strtoul(buf, 0, &val) || val != 0) {
+ if (kstrtoul(buf, 0, &val) || val != 0) {
retval = -EINVAL;
goto out;
}
@@ -236,7 +248,7 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
if (!adapter)
return -ENODEV;
- if (strict_strtoull(buf, 0, (unsigned long long *) &wwpn))
+ if (kstrtoull(buf, 0, (unsigned long long *) &wwpn))
goto out;
port = zfcp_get_port_by_wwpn(adapter, wwpn);
@@ -297,7 +309,7 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
u64 fcp_lun;
int retval;
- if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
retval = zfcp_unit_add(port, fcp_lun);
@@ -315,7 +327,7 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
u64 fcp_lun;
- if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
+ if (kstrtoull(buf, 0, (unsigned long long *) &fcp_lun))
return -EINVAL;
if (zfcp_unit_remove(port, fcp_lun))
@@ -347,6 +359,8 @@ static struct attribute *zfcp_unit_attrs[] = {
&dev_attr_unit_in_recovery.attr,
&dev_attr_unit_status.attr,
&dev_attr_unit_access_denied.attr,
+ &dev_attr_unit_access_shared.attr,
+ &dev_attr_unit_access_readonly.attr,
NULL
};
static struct attribute_group zfcp_unit_attr_group = {
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 48b2918e0d6..fe25677a551 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -601,6 +601,7 @@ config SCSI_ARCMSR
To compile this driver as a module, choose M here: the
module will be called arcmsr (modprobe arcmsr).
+source "drivers/scsi/esas2r/Kconfig"
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
source "drivers/scsi/mpt3sas/Kconfig"
@@ -1353,7 +1354,6 @@ config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"
depends on PCI && SCSI
select SCSI_FC_ATTRS
- select GENERIC_CSUM
select CRC_T10DIF
help
This lpfc driver supports the Emulex LightPulse
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b607ba4f563..149bb6bf184 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -141,6 +141,7 @@ obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/
obj-$(CONFIG_SCSI_BNX2_ISCSI) += libiscsi.o bnx2i/
obj-$(CONFIG_BE2ISCSI) += libiscsi.o be2iscsi/
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r/
obj-$(CONFIG_SCSI_PMCRAID) += pmcraid.o
obj-$(CONFIG_SCSI_VIRTIO) += virtio_scsi.o
obj-$(CONFIG_VMWARE_PVSCSI) += vmw_pvscsi.o
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 9611195d670..f8ca7becacc 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -63,9 +63,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.1.0.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.0.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.1.1.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.1.1.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
index 25093a04123..3d33767f2f2 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_constants.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
index f2db5fe7bdc..7052a839b0e 100644
--- a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
+++ b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
@@ -1,6 +1,6 @@
/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -581,8 +581,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u16 cq_num_wqes;
#elif defined(__LITTLE_ENDIAN)
u16 cq_num_wqes;
@@ -593,8 +595,10 @@ struct iscsi_kwqe_init1 {
#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
-#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
-#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE (0x1<<6)
+#define ISCSI_KWQE_INIT1_TIME_STAMPS_ENABLE_SHIFT 6
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 7
u8 cq_log_wqes_per_page;
#endif
#if defined(__BIG_ENDIAN)
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index f109e3b073c..6940f0930a8 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -1,6 +1,6 @@
/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index a28b03e5a5f..af3e675d4d4 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1,6 +1,6 @@
/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 50fef6963a8..34c294b42c8 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -1,6 +1,6 @@
/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
@@ -18,8 +18,8 @@ static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
static u32 adapter_count;
#define DRV_MODULE_NAME "bnx2i"
-#define DRV_MODULE_VERSION "2.7.2.2"
-#define DRV_MODULE_RELDATE "Apr 25, 2012"
+#define DRV_MODULE_VERSION "2.7.6.2"
+#define DRV_MODULE_RELDATE "Jun 06, 2013"
static char version[] =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -172,16 +172,14 @@ void bnx2i_start(void *handle)
struct bnx2i_hba *hba = handle;
int i = HZ;
- /*
- * We should never register devices that don't support iSCSI
- * (see bnx2i_init_one), so something is wrong if we try to
- * start a iSCSI adapter on hardware with 0 supported iSCSI
- * connections
+ /* On some bnx2x devices, it is possible that iSCSI is no
+ * longer supported after firmware is downloaded. In that
+ * case, the iscsi_init_msg will return failure.
*/
- BUG_ON(!hba->cnic->max_iscsi_conn);
bnx2i_send_fw_iscsi_init_msg(hba);
- while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+ while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) &&
+ !test_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state) && i--)
msleep(BNX2I_INIT_POLL_TIME);
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 0056e47bd56..fabeb88602a 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1,7 +1,7 @@
/*
* bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2006 - 2012 Broadcom Corporation
+ * Copyright (c) 2006 - 2013 Broadcom Corporation
* Copyright (c) 2007, 2008 Red Hat, Inc. All rights reserved.
* Copyright (c) 2007, 2008 Mike Christie
*
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
index c61cf7a4365..a0a3d9fe61f 100644
--- a/drivers/scsi/bnx2i/bnx2i_sysfs.c
+++ b/drivers/scsi/bnx2i/bnx2i_sysfs.c
@@ -1,6 +1,6 @@
/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
*
- * Copyright (c) 2004 - 2012 Broadcom Corporation
+ * Copyright (c) 2004 - 2013 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/scsi/eata_pio.c b/drivers/scsi/eata_pio.c
index 356def44ce5..1663173cdb9 100644
--- a/drivers/scsi/eata_pio.c
+++ b/drivers/scsi/eata_pio.c
@@ -919,7 +919,7 @@ static int eata_pio_detect(struct scsi_host_template *tpnt)
find_pio_EISA(&gc);
find_pio_ISA(&gc);
- for (i = 0; i <= MAXIRQ; i++)
+ for (i = 0; i < MAXIRQ; i++)
if (reg_IRQ[i])
request_irq(i, do_eata_pio_int_handler, IRQF_DISABLED, "EATA-PIO", NULL);
diff --git a/drivers/scsi/esas2r/Kconfig b/drivers/scsi/esas2r/Kconfig
new file mode 100644
index 00000000000..78fdbfd9b4b
--- /dev/null
+++ b/drivers/scsi/esas2r/Kconfig
@@ -0,0 +1,5 @@
+config SCSI_ESAS2R
+ tristate "ATTO Technology's ExpressSAS RAID adapter driver"
+ depends on PCI && SCSI
+ ---help---
+ This driver supports the ATTO ExpressSAS R6xx SAS/SATA RAID controllers.
diff --git a/drivers/scsi/esas2r/Makefile b/drivers/scsi/esas2r/Makefile
new file mode 100644
index 00000000000..c77160b8c8b
--- /dev/null
+++ b/drivers/scsi/esas2r/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_SCSI_ESAS2R) += esas2r.o
+
+esas2r-objs := esas2r_log.o esas2r_disc.o esas2r_flash.o esas2r_init.o \
+ esas2r_int.o esas2r_io.o esas2r_ioctl.o esas2r_targdb.o \
+ esas2r_vda.o esas2r_main.o
diff --git a/drivers/scsi/esas2r/atioctl.h b/drivers/scsi/esas2r/atioctl.h
new file mode 100644
index 00000000000..4aca3d52c85
--- /dev/null
+++ b/drivers/scsi/esas2r/atioctl.h
@@ -0,0 +1,1254 @@
+/* linux/drivers/scsi/esas2r/atioctl.h
+ * ATTO IOCTL Handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "atvda.h"
+
+#ifndef ATIOCTL_H
+#define ATIOCTL_H
+
+#define EXPRESS_IOCTL_SIGNATURE "Express"
+#define EXPRESS_IOCTL_SIGNATURE_SIZE 8
+
+/* structure definitions for IOCTls */
+
+struct __packed atto_express_ioctl_header {
+ u8 signature[EXPRESS_IOCTL_SIGNATURE_SIZE];
+ u8 return_code;
+
+#define IOCTL_SUCCESS 0
+#define IOCTL_ERR_INVCMD 101
+#define IOCTL_INIT_FAILED 102
+#define IOCTL_NOT_IMPLEMENTED 103
+#define IOCTL_BAD_CHANNEL 104
+#define IOCTL_TARGET_OVERRUN 105
+#define IOCTL_TARGET_NOT_ENABLED 106
+#define IOCTL_BAD_FLASH_IMGTYPE 107
+#define IOCTL_OUT_OF_RESOURCES 108
+#define IOCTL_GENERAL_ERROR 109
+#define IOCTL_INVALID_PARAM 110
+
+ u8 channel;
+ u8 retries;
+ u8 pad[5];
+};
+
+/*
+ * NOTE - if channel == 0xFF, the request is
+ * handled on the adapter it came in on.
+ */
+#define MAX_NODE_NAMES 256
+
+struct __packed atto_firmware_rw_request {
+ u8 function;
+ #define FUNC_FW_DOWNLOAD 0x09
+ #define FUNC_FW_UPLOAD 0x12
+
+ u8 img_type;
+ #define FW_IMG_FW 0x01
+ #define FW_IMG_BIOS 0x02
+ #define FW_IMG_NVR 0x03
+ #define FW_IMG_RAW 0x04
+ #define FW_IMG_FM_API 0x05
+ #define FW_IMG_FS_API 0x06
+
+ u8 pad[2];
+ u32 img_offset;
+ u32 img_size;
+ u8 image[0x80000];
+};
+
+struct __packed atto_param_rw_request {
+ u16 code;
+ char data_buffer[512];
+};
+
+#define MAX_CHANNEL 256
+
+struct __packed atto_channel_list {
+ u32 num_channels;
+ u8 channel[MAX_CHANNEL];
+};
+
+struct __packed atto_channel_info {
+ u8 major_rev;
+ u8 minor_rev;
+ u8 IRQ;
+ u8 revision_id;
+ u8 pci_bus;
+ u8 pci_dev_func;
+ u8 core_rev;
+ u8 host_no;
+ u16 device_id;
+ u16 vendor_id;
+ u16 ven_dev_id;
+ u8 pad[3];
+ u32 hbaapi_rev;
+};
+
+/*
+ * CSMI control codes
+ * class independent
+ */
+#define CSMI_CC_GET_DRVR_INFO 1
+#define CSMI_CC_GET_CNTLR_CFG 2
+#define CSMI_CC_GET_CNTLR_STS 3
+#define CSMI_CC_FW_DOWNLOAD 4
+
+/* RAID class */
+#define CSMI_CC_GET_RAID_INFO 10
+#define CSMI_CC_GET_RAID_CFG 11
+
+/* HBA class */
+#define CSMI_CC_GET_PHY_INFO 20
+#define CSMI_CC_SET_PHY_INFO 21
+#define CSMI_CC_GET_LINK_ERRORS 22
+#define CSMI_CC_SMP_PASSTHRU 23
+#define CSMI_CC_SSP_PASSTHRU 24
+#define CSMI_CC_STP_PASSTHRU 25
+#define CSMI_CC_GET_SATA_SIG 26
+#define CSMI_CC_GET_SCSI_ADDR 27
+#define CSMI_CC_GET_DEV_ADDR 28
+#define CSMI_CC_TASK_MGT 29
+#define CSMI_CC_GET_CONN_INFO 30
+
+/* PHY class */
+#define CSMI_CC_PHY_CTRL 60
+
+/*
+ * CSMI status codes
+ * class independent
+ */
+#define CSMI_STS_SUCCESS 0
+#define CSMI_STS_FAILED 1
+#define CSMI_STS_BAD_CTRL_CODE 2
+#define CSMI_STS_INV_PARAM 3
+#define CSMI_STS_WRITE_ATTEMPTED 4
+
+/* RAID class */
+#define CSMI_STS_INV_RAID_SET 1000
+
+/* HBA class */
+#define CSMI_STS_PHY_CHANGED CSMI_STS_SUCCESS
+#define CSMI_STS_PHY_UNCHANGEABLE 2000
+#define CSMI_STS_INV_LINK_RATE 2001
+#define CSMI_STS_INV_PHY 2002
+#define CSMI_STS_INV_PHY_FOR_PORT 2003
+#define CSMI_STS_PHY_UNSELECTABLE 2004
+#define CSMI_STS_SELECT_PHY_OR_PORT 2005
+#define CSMI_STS_INV_PORT 2006
+#define CSMI_STS_PORT_UNSELECTABLE 2007
+#define CSMI_STS_CONNECTION_FAILED 2008
+#define CSMI_STS_NO_SATA_DEV 2009
+#define CSMI_STS_NO_SATA_SIGNATURE 2010
+#define CSMI_STS_SCSI_EMULATION 2011
+#define CSMI_STS_NOT_AN_END_DEV 2012
+#define CSMI_STS_NO_SCSI_ADDR 2013
+#define CSMI_STS_NO_DEV_ADDR 2014
+
+/* CSMI class independent structures */
+struct atto_csmi_get_driver_info {
+ char name[81];
+ char description[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 csmi_major_rev;
+ u16 csmi_minor_rev;
+ #define CSMI_MAJOR_REV_0_81 0
+ #define CSMI_MINOR_REV_0_81 81
+
+ #define CSMI_MAJOR_REV CSMI_MAJOR_REV_0_81
+ #define CSMI_MINOR_REV CSMI_MINOR_REV_0_81
+};
+
+struct atto_csmi_get_pci_bus_addr {
+ u8 bus_num;
+ u8 device_num;
+ u8 function_num;
+ u8 reserved;
+};
+
+struct atto_csmi_get_cntlr_cfg {
+ u32 base_io_addr;
+
+ struct {
+ u32 base_memaddr_lo;
+ u32 base_memaddr_hi;
+ };
+
+ u32 board_id;
+ u16 slot_num;
+ #define CSMI_SLOT_NUM_UNKNOWN 0xFFFF
+
+ u8 cntlr_class;
+ #define CSMI_CNTLR_CLASS_HBA 5
+
+ u8 io_bus_type;
+ #define CSMI_BUS_TYPE_PCI 3
+ #define CSMI_BUS_TYPE_PCMCIA 4
+
+ union {
+ struct atto_csmi_get_pci_bus_addr pci_addr;
+ u8 reserved[32];
+ };
+
+ char serial_num[81];
+ u16 major_rev;
+ u16 minor_rev;
+ u16 build_rev;
+ u16 release_rev;
+ u16 bios_major_rev;
+ u16 bios_minor_rev;
+ u16 bios_build_rev;
+ u16 bios_release_rev;
+ u32 cntlr_flags;
+ #define CSMI_CNTLRF_SAS_HBA 0x00000001
+ #define CSMI_CNTLRF_SAS_RAID 0x00000002
+ #define CSMI_CNTLRF_SATA_HBA 0x00000004
+ #define CSMI_CNTLRF_SATA_RAID 0x00000008
+ #define CSMI_CNTLRF_FWD_SUPPORT 0x00010000
+ #define CSMI_CNTLRF_FWD_ONLINE 0x00020000
+ #define CSMI_CNTLRF_FWD_SRESET 0x00040000
+ #define CSMI_CNTLRF_FWD_HRESET 0x00080000
+ #define CSMI_CNTLRF_FWD_RROM 0x00100000
+
+ u16 rrom_major_rev;
+ u16 rrom_minor_rev;
+ u16 rrom_build_rev;
+ u16 rrom_release_rev;
+ u16 rrom_biosmajor_rev;
+ u16 rrom_biosminor_rev;
+ u16 rrom_biosbuild_rev;
+ u16 rrom_biosrelease_rev;
+ u8 reserved2[7];
+};
+
+struct atto_csmi_get_cntlr_sts {
+ u32 status;
+ #define CSMI_CNTLR_STS_GOOD 1
+ #define CSMI_CNTLR_STS_FAILED 2
+ #define CSMI_CNTLR_STS_OFFLINE 3
+ #define CSMI_CNTLR_STS_POWEROFF 4
+
+ u32 offline_reason;
+ #define CSMI_OFFLINE_NO_REASON 0
+ #define CSMI_OFFLINE_INITIALIZING 1
+ #define CSMI_OFFLINE_BUS_DEGRADED 2
+ #define CSMI_OFFLINE_BUS_FAILURE 3
+
+ u8 reserved[28];
+};
+
+struct atto_csmi_fw_download {
+ u32 buffer_len;
+ u32 download_flags;
+ #define CSMI_FWDF_VALIDATE 0x00000001
+ #define CSMI_FWDF_SOFT_RESET 0x00000002
+ #define CSMI_FWDF_HARD_RESET 0x00000004
+
+ u8 reserved[32];
+ u16 status;
+ #define CSMI_FWD_STS_SUCCESS 0
+ #define CSMI_FWD_STS_FAILED 1
+ #define CSMI_FWD_STS_USING_RROM 2
+ #define CSMI_FWD_STS_REJECT 3
+ #define CSMI_FWD_STS_DOWNREV 4
+
+ u16 severity;
+ #define CSMI_FWD_SEV_INFO 0
+ #define CSMI_FWD_SEV_WARNING 1
+ #define CSMI_FWD_SEV_ERROR 2
+ #define CSMI_FWD_SEV_FATAL 3
+
+};
+
+/* CSMI RAID class structures */
+struct atto_csmi_get_raid_info {
+ u32 num_raid_sets;
+ u32 max_drivesper_set;
+ u8 reserved[92];
+};
+
+struct atto_csmi_raid_drives {
+ char model[40];
+ char firmware[8];
+ char serial_num[40];
+ u8 sas_addr[8];
+ u8 lun[8];
+ u8 drive_sts;
+ #define CSMI_DRV_STS_OK 0
+ #define CSMI_DRV_STS_REBUILDING 1
+ #define CSMI_DRV_STS_FAILED 2
+ #define CSMI_DRV_STS_DEGRADED 3
+
+ u8 drive_usage;
+ #define CSMI_DRV_USE_NOT_USED 0
+ #define CSMI_DRV_USE_MEMBER 1
+ #define CSMI_DRV_USE_SPARE 2
+
+ u8 reserved[30]; /* spec says 22 */
+};
+
+struct atto_csmi_get_raid_cfg {
+ u32 raid_set_index;
+ u32 capacity;
+ u32 stripe_size;
+ u8 raid_type;
+ u8 status;
+ u8 information;
+ u8 drive_cnt;
+ u8 reserved[20];
+
+ struct atto_csmi_raid_drives drives[1];
+};
+
+/* CSMI HBA class structures */
+struct atto_csmi_phy_entity {
+ u8 ident_frame[0x1C];
+ u8 port_id;
+ u8 neg_link_rate;
+ u8 min_link_rate;
+ u8 max_link_rate;
+ u8 phy_change_cnt;
+ u8 auto_discover;
+ #define CSMI_DISC_NOT_SUPPORTED 0x00
+ #define CSMI_DISC_NOT_STARTED 0x01
+ #define CSMI_DISC_IN_PROGRESS 0x02
+ #define CSMI_DISC_COMPLETE 0x03
+ #define CSMI_DISC_ERROR 0x04
+
+ u8 reserved[2];
+ u8 attach_ident_frame[0x1C];
+};
+
+struct atto_csmi_get_phy_info {
+ u8 number_of_phys;
+ u8 reserved[3];
+ struct atto_csmi_phy_entity
+ phy[32];
+};
+
+struct atto_csmi_set_phy_info {
+ u8 phy_id;
+ u8 neg_link_rate;
+ #define CSMI_NEG_RATE_NEGOTIATE 0x00
+ #define CSMI_NEG_RATE_PHY_DIS 0x01
+
+ u8 prog_minlink_rate;
+ u8 prog_maxlink_rate;
+ u8 signal_class;
+ #define CSMI_SIG_CLASS_UNKNOWN 0x00
+ #define CSMI_SIG_CLASS_DIRECT 0x01
+ #define CSMI_SIG_CLASS_SERVER 0x02
+ #define CSMI_SIG_CLASS_ENCLOSURE 0x03
+
+ u8 reserved[3];
+};
+
+struct atto_csmi_get_link_errors {
+ u8 phy_id;
+ u8 reset_cnts;
+ #define CSMI_RESET_CNTS_NO 0x00
+ #define CSMI_RESET_CNTS_YES 0x01
+
+ u8 reserved[2];
+ u32 inv_dw_cnt;
+ u32 disp_err_cnt;
+ u32 loss_ofdw_sync_cnt;
+ u32 phy_reseterr_cnt;
+
+ /*
+ * The following field has been added by ATTO for ease of
+ * implementation of additional statistics. Drivers must validate
+ * the length of the IOCTL payload prior to filling them in so CSMI
+ * complaint applications function correctly.
+ */
+
+ u32 crc_err_cnt;
+};
+
+struct atto_csmi_smp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u32 req_len;
+ u8 smp_req[1020];
+ u8 conn_sts;
+ u8 reserved2[3];
+ u32 rsp_len;
+ u8 smp_rsp[1020];
+};
+
+struct atto_csmi_ssp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 data_present;
+ u8 status;
+ u16 rsp_length;
+ u8 rsp[256];
+ u32 data_bytes;
+};
+
+struct atto_csmi_ssp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 lun[8];
+ u8 cdb_len;
+ u8 add_cdb_len;
+ u8 reserved2[2];
+ u8 cdb[16];
+ u32 flags;
+ #define CSMI_SSPF_DD_READ 0x00000001
+ #define CSMI_SSPF_DD_WRITE 0x00000002
+ #define CSMI_SSPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_SSPF_TA_SIMPLE 0x00000000
+ #define CSMI_SSPF_TA_HEAD_OF_Q 0x00000010
+ #define CSMI_SSPF_TA_ORDERED 0x00000020
+ #define CSMI_SSPF_TA_ACA 0x00000040
+
+ u8 add_cdb[24];
+ u32 data_len;
+
+ struct atto_csmi_ssp_passthru_sts sts;
+};
+
+struct atto_csmi_stp_passthru_sts {
+ u8 conn_sts;
+ u8 reserved[3];
+ u8 sts_fis[20];
+ u32 scr[16];
+ u32 data_bytes;
+};
+
+struct atto_csmi_stp_passthru {
+ u8 phy_id;
+ u8 port_id;
+ u8 conn_rate;
+ u8 reserved;
+ u8 dest_sas_addr[8];
+ u8 reserved2[4];
+ u8 command_fis[20];
+ u32 flags;
+ #define CSMI_STPF_DD_READ 0x00000001
+ #define CSMI_STPF_DD_WRITE 0x00000002
+ #define CSMI_STPF_DD_UNSPECIFIED 0x00000004
+ #define CSMI_STPF_PIO 0x00000010
+ #define CSMI_STPF_DMA 0x00000020
+ #define CSMI_STPF_PACKET 0x00000040
+ #define CSMI_STPF_DMA_QUEUED 0x00000080
+ #define CSMI_STPF_EXECUTE_DIAG 0x00000100
+ #define CSMI_STPF_RESET_DEVICE 0x00000200
+
+ u32 data_len;
+
+ struct atto_csmi_stp_passthru_sts sts;
+};
+
+struct atto_csmi_get_sata_sig {
+ u8 phy_id;
+ u8 reserved[3];
+ u8 reg_dth_fis[20];
+};
+
+struct atto_csmi_get_scsi_addr {
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+};
+
+struct atto_csmi_get_dev_addr {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u8 sas_addr[8];
+ u8 sas_lun[8];
+};
+
+struct atto_csmi_task_mgmt {
+ u8 host_index;
+ u8 path_id;
+ u8 target_id;
+ u8 lun;
+ u32 flags;
+ #define CSMI_TMF_TASK_IU 0x00000001
+ #define CSMI_TMF_HARD_RST 0x00000002
+ #define CSMI_TMF_SUPPRESS_RSLT 0x00000004
+
+ u32 queue_tag;
+ u32 reserved;
+ u8 task_mgt_func;
+ u8 reserved2[7];
+ u32 information;
+ #define CSMI_TM_INFO_TEST 1
+ #define CSMI_TM_INFO_EXCEEDED 2
+ #define CSMI_TM_INFO_DEMAND 3
+ #define CSMI_TM_INFO_TRIGGER 4
+
+ struct atto_csmi_ssp_passthru_sts sts;
+
+};
+
+struct atto_csmi_get_conn_info {
+ u32 pinout;
+ #define CSMI_CON_UNKNOWN 0x00000001
+ #define CSMI_CON_SFF_8482 0x00000002
+ #define CSMI_CON_SFF_8470_LANE_1 0x00000100
+ #define CSMI_CON_SFF_8470_LANE_2 0x00000200
+ #define CSMI_CON_SFF_8470_LANE_3 0x00000400
+ #define CSMI_CON_SFF_8470_LANE_4 0x00000800
+ #define CSMI_CON_SFF_8484_LANE_1 0x00010000
+ #define CSMI_CON_SFF_8484_LANE_2 0x00020000
+ #define CSMI_CON_SFF_8484_LANE_3 0x00040000
+ #define CSMI_CON_SFF_8484_LANE_4 0x00080000
+
+ u8 connector[16];
+ u8 location;
+ #define CSMI_CON_INTERNAL 0x02
+ #define CSMI_CON_EXTERNAL 0x04
+ #define CSMI_CON_SWITCHABLE 0x08
+ #define CSMI_CON_AUTO 0x10
+
+ u8 reserved[15];
+};
+
+/* CSMI PHY class structures */
+struct atto_csmi_character {
+ u8 type_flags;
+ #define CSMI_CTF_POS_DISP 0x01
+ #define CSMI_CTF_NEG_DISP 0x02
+ #define CSMI_CTF_CTRL_CHAR 0x04
+
+ u8 value;
+};
+
+struct atto_csmi_pc_ctrl {
+ u8 type;
+ #define CSMI_PC_TYPE_UNDEFINED 0x00
+ #define CSMI_PC_TYPE_SATA 0x01
+ #define CSMI_PC_TYPE_SAS 0x02
+ u8 rate;
+ u8 reserved[6];
+ u32 vendor_unique[8];
+ u32 tx_flags;
+ #define CSMI_PC_TXF_PREEMP_DIS 0x00000001
+
+ signed char tx_amplitude;
+ signed char tx_preemphasis;
+ signed char tx_slew_rate;
+ signed char tx_reserved[13];
+ u8 tx_vendor_unique[64];
+ u32 rx_flags;
+ #define CSMI_PC_RXF_EQ_DIS 0x00000001
+
+ signed char rx_threshold;
+ signed char rx_equalization_gain;
+ signed char rx_reserved[14];
+ u8 rx_vendor_unique[64];
+ u32 pattern_flags;
+ #define CSMI_PC_PATF_FIXED 0x00000001
+ #define CSMI_PC_PATF_DIS_SCR 0x00000002
+ #define CSMI_PC_PATF_DIS_ALIGN 0x00000004
+ #define CSMI_PC_PATF_DIS_SSC 0x00000008
+
+ u8 fixed_pattern;
+ #define CSMI_PC_FP_CJPAT 0x00000001
+ #define CSMI_PC_FP_ALIGN 0x00000002
+
+ u8 user_pattern_len;
+ u8 pattern_reserved[6];
+
+ struct atto_csmi_character user_pattern_buffer[16];
+};
+
+struct atto_csmi_phy_ctrl {
+ u32 function;
+ #define CSMI_PC_FUNC_GET_SETUP 0x00000100
+
+ u8 phy_id;
+ u16 len_of_cntl;
+ u8 num_of_cntls;
+ u8 reserved[4];
+ u32 link_flags;
+ #define CSMI_PHY_ACTIVATE_CTRL 0x00000001
+ #define CSMI_PHY_UPD_SPINUP_RATE 0x00000002
+ #define CSMI_PHY_AUTO_COMWAKE 0x00000004
+
+ u8 spinup_rate;
+ u8 link_reserved[7];
+ u32 vendor_unique[8];
+
+ struct atto_csmi_pc_ctrl control[1];
+};
+
+union atto_ioctl_csmi {
+ struct atto_csmi_get_driver_info drvr_info;
+ struct atto_csmi_get_cntlr_cfg cntlr_cfg;
+ struct atto_csmi_get_cntlr_sts cntlr_sts;
+ struct atto_csmi_fw_download fw_dwnld;
+ struct atto_csmi_get_raid_info raid_info;
+ struct atto_csmi_get_raid_cfg raid_cfg;
+ struct atto_csmi_get_phy_info get_phy_info;
+ struct atto_csmi_set_phy_info set_phy_info;
+ struct atto_csmi_get_link_errors link_errs;
+ struct atto_csmi_smp_passthru smp_pass_thru;
+ struct atto_csmi_ssp_passthru ssp_pass_thru;
+ struct atto_csmi_stp_passthru stp_pass_thru;
+ struct atto_csmi_task_mgmt tsk_mgt;
+ struct atto_csmi_get_sata_sig sata_sig;
+ struct atto_csmi_get_scsi_addr scsi_addr;
+ struct atto_csmi_get_dev_addr dev_addr;
+ struct atto_csmi_get_conn_info conn_info[32];
+ struct atto_csmi_phy_ctrl phy_ctrl;
+};
+
+struct atto_csmi {
+ u32 control_code;
+ u32 status;
+ union atto_ioctl_csmi data;
+};
+
+struct atto_module_info {
+ void *adapter;
+ void *pci_dev;
+ void *scsi_host;
+ unsigned short host_no;
+ union {
+ struct {
+ u64 node_name;
+ u64 port_name;
+ };
+ u64 sas_addr;
+ };
+};
+
+#define ATTO_FUNC_GET_ADAP_INFO 0x00
+#define ATTO_VER_GET_ADAP_INFO0 0
+#define ATTO_VER_GET_ADAP_INFO ATTO_VER_GET_ADAP_INFO0
+
+struct __packed atto_hba_get_adapter_info {
+
+ struct {
+ u16 vendor_id;
+ u16 device_id;
+ u16 ss_vendor_id;
+ u16 ss_device_id;
+ u8 class_code[3];
+ u8 rev_id;
+ u8 bus_num;
+ u8 dev_num;
+ u8 func_num;
+ u8 link_width_max;
+ u8 link_width_curr;
+ #define ATTO_GAI_PCILW_UNKNOWN 0x00
+
+ u8 link_speed_max;
+ u8 link_speed_curr;
+ #define ATTO_GAI_PCILS_UNKNOWN 0x00
+ #define ATTO_GAI_PCILS_GEN1 0x01
+ #define ATTO_GAI_PCILS_GEN2 0x02
+ #define ATTO_GAI_PCILS_GEN3 0x03
+
+ u8 interrupt_mode;
+ #define ATTO_GAI_PCIIM_UNKNOWN 0x00
+ #define ATTO_GAI_PCIIM_LEGACY 0x01
+ #define ATTO_GAI_PCIIM_MSI 0x02
+ #define ATTO_GAI_PCIIM_MSIX 0x03
+
+ u8 msi_vector_cnt;
+ u8 reserved[19];
+ } pci;
+
+ u8 adap_type;
+ #define ATTO_GAI_AT_EPCIU320 0x00
+ #define ATTO_GAI_AT_ESASRAID 0x01
+ #define ATTO_GAI_AT_ESASRAID2 0x02
+ #define ATTO_GAI_AT_ESASHBA 0x03
+ #define ATTO_GAI_AT_ESASHBA2 0x04
+ #define ATTO_GAI_AT_CELERITY 0x05
+ #define ATTO_GAI_AT_CELERITY8 0x06
+ #define ATTO_GAI_AT_FASTFRAME 0x07
+ #define ATTO_GAI_AT_ESASHBA3 0x08
+ #define ATTO_GAI_AT_CELERITY16 0x09
+ #define ATTO_GAI_AT_TLSASHBA 0x0A
+ #define ATTO_GAI_AT_ESASHBA4 0x0B
+
+ u8 adap_flags;
+ #define ATTO_GAI_AF_DEGRADED 0x01
+ #define ATTO_GAI_AF_SPT_SUPP 0x02
+ #define ATTO_GAI_AF_DEVADDR_SUPP 0x04
+ #define ATTO_GAI_AF_PHYCTRL_SUPP 0x08
+ #define ATTO_GAI_AF_TEST_SUPP 0x10
+ #define ATTO_GAI_AF_DIAG_SUPP 0x20
+ #define ATTO_GAI_AF_VIRT_SES 0x40
+ #define ATTO_GAI_AF_CONN_CTRL 0x80
+
+ u8 num_ports;
+ u8 num_phys;
+ u8 drvr_rev_major;
+ u8 drvr_rev_minor;
+ u8 drvr_revsub_minor;
+ u8 drvr_rev_build;
+ char drvr_rev_ascii[16];
+ char drvr_name[32];
+ char firmware_rev[16];
+ char flash_rev[16];
+ char model_name_short[16];
+ char model_name[32];
+ u32 num_targets;
+ u32 num_targsper_bus;
+ u32 num_lunsper_targ;
+ u8 num_busses;
+ u8 num_connectors;
+ u8 adap_flags2;
+ #define ATTO_GAI_AF2_FCOE_SUPP 0x01
+ #define ATTO_GAI_AF2_NIC_SUPP 0x02
+ #define ATTO_GAI_AF2_LOCATE_SUPP 0x04
+ #define ATTO_GAI_AF2_ADAP_CTRL_SUPP 0x08
+ #define ATTO_GAI_AF2_DEV_INFO_SUPP 0x10
+ #define ATTO_GAI_AF2_NPIV_SUPP 0x20
+ #define ATTO_GAI_AF2_MP_SUPP 0x40
+
+ u8 num_temp_sensors;
+ u32 num_targets_backend;
+ u32 tunnel_flags;
+ #define ATTO_GAI_TF_MEM_RW 0x00000001
+ #define ATTO_GAI_TF_TRACE 0x00000002
+ #define ATTO_GAI_TF_SCSI_PASS_THRU 0x00000004
+ #define ATTO_GAI_TF_GET_DEV_ADDR 0x00000008
+ #define ATTO_GAI_TF_PHY_CTRL 0x00000010
+ #define ATTO_GAI_TF_CONN_CTRL 0x00000020
+ #define ATTO_GAI_TF_GET_DEV_INFO 0x00000040
+
+ u8 reserved3[0x138];
+};
+
+#define ATTO_FUNC_GET_ADAP_ADDR 0x01
+#define ATTO_VER_GET_ADAP_ADDR0 0
+#define ATTO_VER_GET_ADAP_ADDR ATTO_VER_GET_ADAP_ADDR0
+
+struct __packed atto_hba_get_adapter_address {
+
+ u8 addr_type;
+ #define ATTO_GAA_AT_PORT 0x00
+ #define ATTO_GAA_AT_NODE 0x01
+ #define ATTO_GAA_AT_CURR_MAC 0x02
+ #define ATTO_GAA_AT_PERM_MAC 0x03
+ #define ATTO_GAA_AT_VNIC 0x04
+
+ u8 port_id;
+ u16 addr_len;
+ u8 address[256];
+};
+
+#define ATTO_FUNC_MEM_RW 0x02
+#define ATTO_VER_MEM_RW0 0
+#define ATTO_VER_MEM_RW ATTO_VER_MEM_RW0
+
+struct __packed atto_hba_memory_read_write {
+ u8 mem_func;
+ u8 mem_type;
+ union {
+ u8 pci_index;
+ u8 i2c_dev;
+ };
+ u8 i2c_status;
+ u32 length;
+ u64 address;
+ u8 reserved[48];
+
+};
+
+#define ATTO_FUNC_TRACE 0x03
+#define ATTO_VER_TRACE0 0
+#define ATTO_VER_TRACE1 1
+#define ATTO_VER_TRACE ATTO_VER_TRACE1
+
+struct __packed atto_hba_trace {
+ u8 trace_func;
+ #define ATTO_TRC_TF_GET_INFO 0x00
+ #define ATTO_TRC_TF_ENABLE 0x01
+ #define ATTO_TRC_TF_DISABLE 0x02
+ #define ATTO_TRC_TF_SET_MASK 0x03
+ #define ATTO_TRC_TF_UPLOAD 0x04
+ #define ATTO_TRC_TF_RESET 0x05
+
+ u8 trace_type;
+ #define ATTO_TRC_TT_DRIVER 0x00
+ #define ATTO_TRC_TT_FWCOREDUMP 0x01
+
+ u8 reserved[2];
+ u32 current_offset;
+ u32 total_length;
+ u32 trace_mask;
+ u8 reserved2[48];
+};
+
+#define ATTO_FUNC_SCSI_PASS_THRU 0x04
+#define ATTO_VER_SCSI_PASS_THRU0 0
+#define ATTO_VER_SCSI_PASS_THRU ATTO_VER_SCSI_PASS_THRU0
+
+struct __packed atto_hba_scsi_pass_thru {
+ u8 cdb[32];
+ u8 cdb_length;
+ u8 req_status;
+ #define ATTO_SPT_RS_SUCCESS 0x00
+ #define ATTO_SPT_RS_FAILED 0x01
+ #define ATTO_SPT_RS_OVERRUN 0x02
+ #define ATTO_SPT_RS_UNDERRUN 0x03
+ #define ATTO_SPT_RS_NO_DEVICE 0x04
+ #define ATTO_SPT_RS_NO_LUN 0x05
+ #define ATTO_SPT_RS_TIMEOUT 0x06
+ #define ATTO_SPT_RS_BUS_RESET 0x07
+ #define ATTO_SPT_RS_ABORTED 0x08
+ #define ATTO_SPT_RS_BUSY 0x09
+ #define ATTO_SPT_RS_DEGRADED 0x0A
+
+ u8 scsi_status;
+ u8 sense_length;
+ u32 flags;
+ #define ATTO_SPTF_DATA_IN 0x00000001
+ #define ATTO_SPTF_DATA_OUT 0x00000002
+ #define ATTO_SPTF_SIMPLE_Q 0x00000004
+ #define ATTO_SPTF_HEAD_OF_Q 0x00000008
+ #define ATTO_SPTF_ORDERED_Q 0x00000010
+
+ u32 timeout;
+ u32 target_id;
+ u8 lun[8];
+ u32 residual_length;
+ u8 sense_data[0xFC];
+ u8 reserved[0x28];
+};
+
+#define ATTO_FUNC_GET_DEV_ADDR 0x05
+#define ATTO_VER_GET_DEV_ADDR0 0
+#define ATTO_VER_GET_DEV_ADDR ATTO_VER_GET_DEV_ADDR0
+
+struct __packed atto_hba_get_device_address {
+ u8 addr_type;
+ #define ATTO_GDA_AT_PORT 0x00
+ #define ATTO_GDA_AT_NODE 0x01
+ #define ATTO_GDA_AT_MAC 0x02
+ #define ATTO_GDA_AT_PORTID 0x03
+ #define ATTO_GDA_AT_UNIQUE 0x04
+
+ u8 reserved;
+ u16 addr_len;
+ u32 target_id;
+ u8 address[256];
+};
+
+/* The following functions are supported by firmware but do not have any
+ * associated driver structures
+ */
+#define ATTO_FUNC_PHY_CTRL 0x06
+#define ATTO_FUNC_CONN_CTRL 0x0C
+#define ATTO_FUNC_ADAP_CTRL 0x0E
+#define ATTO_VER_ADAP_CTRL0 0
+#define ATTO_VER_ADAP_CTRL ATTO_VER_ADAP_CTRL0
+
+struct __packed atto_hba_adap_ctrl {
+ u8 adap_func;
+ #define ATTO_AC_AF_HARD_RST 0x00
+ #define ATTO_AC_AF_GET_STATE 0x01
+ #define ATTO_AC_AF_GET_TEMP 0x02
+
+ u8 adap_state;
+ #define ATTO_AC_AS_UNKNOWN 0x00
+ #define ATTO_AC_AS_OK 0x01
+ #define ATTO_AC_AS_RST_SCHED 0x02
+ #define ATTO_AC_AS_RST_IN_PROG 0x03
+ #define ATTO_AC_AS_RST_DISC 0x04
+ #define ATTO_AC_AS_DEGRADED 0x05
+ #define ATTO_AC_AS_DISABLED 0x06
+ #define ATTO_AC_AS_TEMP 0x07
+
+ u8 reserved[2];
+
+ union {
+ struct {
+ u8 temp_sensor;
+ u8 temp_state;
+
+ #define ATTO_AC_TS_UNSUPP 0x00
+ #define ATTO_AC_TS_UNKNOWN 0x01
+ #define ATTO_AC_TS_INIT_FAILED 0x02
+ #define ATTO_AC_TS_NORMAL 0x03
+ #define ATTO_AC_TS_OUT_OF_RANGE 0x04
+ #define ATTO_AC_TS_FAULT 0x05
+
+ signed short temp_value;
+ signed short temp_lower_lim;
+ signed short temp_upper_lim;
+ char temp_desc[32];
+ u8 reserved2[20];
+ };
+ };
+};
+
+#define ATTO_FUNC_GET_DEV_INFO 0x0F
+#define ATTO_VER_GET_DEV_INFO0 0
+#define ATTO_VER_GET_DEV_INFO ATTO_VER_GET_DEV_INFO0
+
+struct __packed atto_hba_sas_device_info {
+
+ #define ATTO_SDI_MAX_PHYS_WIDE_PORT 16
+
+ u8 phy_id[ATTO_SDI_MAX_PHYS_WIDE_PORT]; /* IDs of parent exp/adapt */
+ #define ATTO_SDI_PHY_ID_INV ATTO_SAS_PHY_ID_INV
+ u32 exp_target_id;
+ u32 sas_port_mask;
+ u8 sas_level;
+ #define ATTO_SDI_SAS_LVL_INV 0xFF
+
+ u8 slot_num;
+ #define ATTO_SDI_SLOT_NUM_INV ATTO_SLOT_NUM_INV
+
+ u8 dev_type;
+ #define ATTO_SDI_DT_END_DEVICE 0
+ #define ATTO_SDI_DT_EXPANDER 1
+ #define ATTO_SDI_DT_PORT_MULT 2
+
+ u8 ini_flags;
+ u8 tgt_flags;
+ u8 link_rate; /* SMP_RATE_XXX */
+ u8 loc_flags;
+ #define ATTO_SDI_LF_DIRECT 0x01
+ #define ATTO_SDI_LF_EXPANDER 0x02
+ #define ATTO_SDI_LF_PORT_MULT 0x04
+ u8 pm_port;
+ u8 reserved[0x60];
+};
+
+union atto_hba_device_info {
+ struct atto_hba_sas_device_info sas_dev_info;
+};
+
+struct __packed atto_hba_get_device_info {
+ u32 target_id;
+ u8 info_type;
+ #define ATTO_GDI_IT_UNKNOWN 0x00
+ #define ATTO_GDI_IT_SAS 0x01
+ #define ATTO_GDI_IT_FC 0x02
+ #define ATTO_GDI_IT_FCOE 0x03
+
+ u8 reserved[11];
+ union atto_hba_device_info dev_info;
+};
+
+struct atto_ioctl {
+ u8 version;
+ u8 function; /* ATTO_FUNC_XXX */
+ u8 status;
+#define ATTO_STS_SUCCESS 0x00
+#define ATTO_STS_FAILED 0x01
+#define ATTO_STS_INV_VERSION 0x02
+#define ATTO_STS_OUT_OF_RSRC 0x03
+#define ATTO_STS_INV_FUNC 0x04
+#define ATTO_STS_UNSUPPORTED 0x05
+#define ATTO_STS_INV_ADAPTER 0x06
+#define ATTO_STS_INV_DRVR_VER 0x07
+#define ATTO_STS_INV_PARAM 0x08
+#define ATTO_STS_TIMEOUT 0x09
+#define ATTO_STS_NOT_APPL 0x0A
+#define ATTO_STS_DEGRADED 0x0B
+
+ u8 flags;
+ #define HBAF_TUNNEL 0x01
+
+ u32 data_length;
+ u8 reserved2[56];
+
+ union {
+ u8 byte[1];
+ struct atto_hba_get_adapter_info get_adap_info;
+ struct atto_hba_get_adapter_address get_adap_addr;
+ struct atto_hba_scsi_pass_thru scsi_pass_thru;
+ struct atto_hba_get_device_address get_dev_addr;
+ struct atto_hba_adap_ctrl adap_ctrl;
+ struct atto_hba_get_device_info get_dev_info;
+ struct atto_hba_trace trace;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_scsi_cmd {
+
+ #define ATTO_VDA_SCSI_VER0 0
+ #define ATTO_VDA_SCSI_VER ATTO_VDA_SCSI_VER0
+
+ u8 cdb[16];
+ u32 flags;
+ u32 data_length;
+ u32 residual_length;
+ u16 target_id;
+ u8 sense_len;
+ u8 scsi_stat;
+ u8 reserved[8];
+ u8 sense_data[80];
+};
+
+struct __packed atto_ioctl_vda_flash_cmd {
+
+ #define ATTO_VDA_FLASH_VER0 0
+ #define ATTO_VDA_FLASH_VER ATTO_VDA_FLASH_VER0
+
+ u32 flash_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 reserved[15];
+
+ union {
+ struct {
+ u32 flash_size;
+ u32 page_size;
+ u8 prod_info[32];
+ } info;
+
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ u32 file_size;
+ } file;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_diag_cmd {
+
+ #define ATTO_VDA_DIAG_VER0 0
+ #define ATTO_VDA_DIAG_VER ATTO_VDA_DIAG_VER0
+
+ u64 local_addr;
+ u32 data_length;
+ u8 sub_func;
+ u8 flags;
+ u8 reserved[3];
+};
+
+struct __packed atto_ioctl_vda_cli_cmd {
+
+ #define ATTO_VDA_CLI_VER0 0
+ #define ATTO_VDA_CLI_VER ATTO_VDA_CLI_VER0
+
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_smp_cmd {
+
+ #define ATTO_VDA_SMP_VER0 0
+ #define ATTO_VDA_SMP_VER ATTO_VDA_SMP_VER0
+
+ u64 dest;
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_ioctl_vda_cfg_cmd {
+
+ #define ATTO_VDA_CFG_VER0 0
+ #define ATTO_VDA_CFG_VER ATTO_VDA_CFG_VER0
+
+ u32 data_length;
+ u8 cfg_func;
+ u8 reserved[11];
+
+ union {
+ u8 bytes[112];
+ struct atto_vda_cfg_init init;
+ } data;
+
+};
+
+struct __packed atto_ioctl_vda_mgt_cmd {
+
+ #define ATTO_VDA_MGT_VER0 0
+ #define ATTO_VDA_MGT_VER ATTO_VDA_MGT_VER0
+
+ u8 mgt_func;
+ u8 scan_generation;
+ u16 dev_index;
+ u32 data_length;
+ u8 reserved[8];
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dh_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ struct atto_vda_adapter_info adapter_info;
+ struct atto_vda_temp_info temp_info;
+ struct atto_vda_fan_info fan_info;
+ } data;
+};
+
+struct __packed atto_ioctl_vda_gsv_cmd {
+
+ #define ATTO_VDA_GSV_VER0 0
+ #define ATTO_VDA_GSV_VER ATTO_VDA_GSV_VER0
+
+ u8 rsp_len;
+ u8 reserved[7];
+ u8 version_info[1];
+ #define ATTO_VDA_VER_UNSUPPORTED 0xFF
+
+};
+
+struct __packed atto_ioctl_vda {
+ u8 version;
+ u8 function; /* VDA_FUNC_XXXX */
+ u8 status; /* ATTO_STS_XXX */
+ u8 vda_status; /* RS_XXX (if status == ATTO_STS_SUCCESS) */
+ u32 data_length;
+ u8 reserved[8];
+
+ union {
+ struct atto_ioctl_vda_scsi_cmd scsi;
+ struct atto_ioctl_vda_flash_cmd flash;
+ struct atto_ioctl_vda_diag_cmd diag;
+ struct atto_ioctl_vda_cli_cmd cli;
+ struct atto_ioctl_vda_smp_cmd smp;
+ struct atto_ioctl_vda_cfg_cmd cfg;
+ struct atto_ioctl_vda_mgt_cmd mgt;
+ struct atto_ioctl_vda_gsv_cmd gsv;
+ u8 cmd_info[256];
+ } cmd;
+
+ union {
+ u8 data[1];
+ struct atto_vda_devinfo2 dev_info2;
+ } data;
+
+};
+
+struct __packed atto_ioctl_smp {
+ u8 version;
+ #define ATTO_SMP_VERSION0 0
+ #define ATTO_SMP_VERSION1 1
+ #define ATTO_SMP_VERSION2 2
+ #define ATTO_SMP_VERSION ATTO_SMP_VERSION2
+
+ u8 function;
+#define ATTO_SMP_FUNC_DISC_SMP 0x00
+#define ATTO_SMP_FUNC_DISC_TARG 0x01
+#define ATTO_SMP_FUNC_SEND_CMD 0x02
+#define ATTO_SMP_FUNC_DISC_TARG_DIRECT 0x03
+#define ATTO_SMP_FUNC_SEND_CMD_DIRECT 0x04
+#define ATTO_SMP_FUNC_DISC_SMP_DIRECT 0x05
+
+ u8 status; /* ATTO_STS_XXX */
+ u8 smp_status; /* if status == ATTO_STS_SUCCESS */
+ #define ATTO_SMP_STS_SUCCESS 0x00
+ #define ATTO_SMP_STS_FAILURE 0x01
+ #define ATTO_SMP_STS_RESCAN 0x02
+ #define ATTO_SMP_STS_NOT_FOUND 0x03
+
+ u16 target_id;
+ u8 phy_id;
+ u8 dev_index;
+ u64 smp_sas_addr;
+ u64 targ_sas_addr;
+ u32 req_length;
+ u32 rsp_length;
+ u8 flags;
+ #define ATTO_SMPF_ROOT_EXP 0x01 /* expander direct attached */
+
+ u8 reserved[31];
+
+ union {
+ u8 byte[1];
+ u32 dword[1];
+ } data;
+
+};
+
+struct __packed atto_express_ioctl {
+ struct atto_express_ioctl_header header;
+
+ union {
+ struct atto_firmware_rw_request fwrw;
+ struct atto_param_rw_request prw;
+ struct atto_channel_list chanlist;
+ struct atto_channel_info chaninfo;
+ struct atto_ioctl ioctl_hba;
+ struct atto_module_info modinfo;
+ struct atto_ioctl_vda ioctl_vda;
+ struct atto_ioctl_smp ioctl_smp;
+ struct atto_csmi csmi;
+
+ } data;
+};
+
+/* The struct associated with the code is listed after the definition */
+#define EXPRESS_IOCTL_MIN 0x4500
+#define EXPRESS_IOCTL_RW_FIRMWARE 0x4500 /* FIRMWARERW */
+#define EXPRESS_IOCTL_READ_PARAMS 0x4501 /* PARAMRW */
+#define EXPRESS_IOCTL_WRITE_PARAMS 0x4502 /* PARAMRW */
+#define EXPRESS_IOCTL_FC_API 0x4503 /* internal */
+#define EXPRESS_IOCTL_GET_CHANNELS 0x4504 /* CHANNELLIST */
+#define EXPRESS_IOCTL_CHAN_INFO 0x4505 /* CHANNELINFO */
+#define EXPRESS_IOCTL_DEFAULT_PARAMS 0x4506 /* PARAMRW */
+#define EXPRESS_ADDR_MEMORY 0x4507 /* MEMADDR */
+#define EXPRESS_RW_MEMORY 0x4508 /* MEMRW */
+#define EXPRESS_TSDK_DUMP 0x4509 /* TSDKDUMP */
+#define EXPRESS_IOCTL_SMP 0x450A /* IOCTL_SMP */
+#define EXPRESS_CSMI 0x450B /* CSMI */
+#define EXPRESS_IOCTL_HBA 0x450C /* IOCTL_HBA */
+#define EXPRESS_IOCTL_VDA 0x450D /* IOCTL_VDA */
+#define EXPRESS_IOCTL_GET_ID 0x450E /* GET_ID */
+#define EXPRESS_IOCTL_GET_MOD_INFO 0x450F /* MODULE_INFO */
+#define EXPRESS_IOCTL_MAX 0x450F
+
+#endif
diff --git a/drivers/scsi/esas2r/atvda.h b/drivers/scsi/esas2r/atvda.h
new file mode 100644
index 00000000000..5fc1f991d24
--- /dev/null
+++ b/drivers/scsi/esas2r/atvda.h
@@ -0,0 +1,1319 @@
+/* linux/drivers/scsi/esas2r/atvda.h
+ * ATTO VDA interface definitions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+#ifndef ATVDA_H
+#define ATVDA_H
+
+struct __packed atto_dev_addr {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ #define VDA_DEVADDRF_SATA 0x01
+ #define VDA_DEVADDRF_SSD 0x02
+ u8 link_speed; /* VDALINKSPEED_xxx */
+ u8 pad[1];
+};
+
+/* dev_addr2 was added for 64-bit alignment */
+
+struct __packed atto_dev_addr2 {
+ u64 dev_port;
+ u64 hba_port;
+ u8 lun;
+ u8 flags;
+ u8 link_speed;
+ u8 pad[5];
+};
+
+struct __packed atto_vda_sge {
+ u32 length;
+ u64 address;
+};
+
+
+/* VDA request function codes */
+
+#define VDA_FUNC_SCSI 0x00
+#define VDA_FUNC_FLASH 0x01
+#define VDA_FUNC_DIAG 0x02
+#define VDA_FUNC_AE 0x03
+#define VDA_FUNC_CLI 0x04
+#define VDA_FUNC_IOCTL 0x05
+#define VDA_FUNC_CFG 0x06
+#define VDA_FUNC_MGT 0x07
+#define VDA_FUNC_GSV 0x08
+
+
+/* VDA request status values. for host driver considerations, values for
+ * SCSI requests start at zero. other requests may use these values as well. */
+
+#define RS_SUCCESS 0x00 /*! successful completion */
+#define RS_INV_FUNC 0x01 /*! invalid command function */
+#define RS_BUSY 0x02 /*! insufficient resources */
+#define RS_SEL 0x03 /*! no target at target_id */
+#define RS_NO_LUN 0x04 /*! invalid LUN */
+#define RS_TIMEOUT 0x05 /*! request timeout */
+#define RS_OVERRUN 0x06 /*! data overrun */
+#define RS_UNDERRUN 0x07 /*! data underrun */
+#define RS_SCSI_ERROR 0x08 /*! SCSI error occurred */
+#define RS_ABORTED 0x0A /*! command aborted */
+#define RS_RESID_MISM 0x0B /*! residual length incorrect */
+#define RS_TM_FAILED 0x0C /*! task management failed */
+#define RS_RESET 0x0D /*! aborted due to bus reset */
+#define RS_ERR_DMA_SG 0x0E /*! error reading SG list */
+#define RS_ERR_DMA_DATA 0x0F /*! error transferring data */
+#define RS_UNSUPPORTED 0x10 /*! unsupported request */
+#define RS_SEL2 0x70 /*! internal generated RS_SEL */
+#define RS_VDA_BASE 0x80 /*! base of VDA-specific errors */
+#define RS_MGT_BASE 0x80 /*! base of VDA management errors */
+#define RS_SCAN_FAIL (RS_MGT_BASE + 0x00)
+#define RS_DEV_INVALID (RS_MGT_BASE + 0x01)
+#define RS_DEV_ASSIGNED (RS_MGT_BASE + 0x02)
+#define RS_DEV_REMOVE (RS_MGT_BASE + 0x03)
+#define RS_DEV_LOST (RS_MGT_BASE + 0x04)
+#define RS_SCAN_GEN (RS_MGT_BASE + 0x05)
+#define RS_GRP_INVALID (RS_MGT_BASE + 0x08)
+#define RS_GRP_EXISTS (RS_MGT_BASE + 0x09)
+#define RS_GRP_LIMIT (RS_MGT_BASE + 0x0A)
+#define RS_GRP_INTLV (RS_MGT_BASE + 0x0B)
+#define RS_GRP_SPAN (RS_MGT_BASE + 0x0C)
+#define RS_GRP_TYPE (RS_MGT_BASE + 0x0D)
+#define RS_GRP_MEMBERS (RS_MGT_BASE + 0x0E)
+#define RS_GRP_COMMIT (RS_MGT_BASE + 0x0F)
+#define RS_GRP_REBUILD (RS_MGT_BASE + 0x10)
+#define RS_GRP_REBUILD_TYPE (RS_MGT_BASE + 0x11)
+#define RS_GRP_BLOCK_SIZE (RS_MGT_BASE + 0x12)
+#define RS_CFG_SAVE (RS_MGT_BASE + 0x14)
+#define RS_PART_LAST (RS_MGT_BASE + 0x18)
+#define RS_ELEM_INVALID (RS_MGT_BASE + 0x19)
+#define RS_PART_MAPPED (RS_MGT_BASE + 0x1A)
+#define RS_PART_TARGET (RS_MGT_BASE + 0x1B)
+#define RS_PART_LUN (RS_MGT_BASE + 0x1C)
+#define RS_PART_DUP (RS_MGT_BASE + 0x1D)
+#define RS_PART_NOMAP (RS_MGT_BASE + 0x1E)
+#define RS_PART_MAX (RS_MGT_BASE + 0x1F)
+#define RS_PART_CAP (RS_MGT_BASE + 0x20)
+#define RS_PART_STATE (RS_MGT_BASE + 0x21)
+#define RS_TEST_IN_PROG (RS_MGT_BASE + 0x22)
+#define RS_METRICS_ERROR (RS_MGT_BASE + 0x23)
+#define RS_HS_ERROR (RS_MGT_BASE + 0x24)
+#define RS_NO_METRICS_TEST (RS_MGT_BASE + 0x25)
+#define RS_BAD_PARAM (RS_MGT_BASE + 0x26)
+#define RS_GRP_MEMBER_SIZE (RS_MGT_BASE + 0x27)
+#define RS_FLS_BASE 0xB0 /*! base of VDA errors */
+#define RS_FLS_ERR_AREA (RS_FLS_BASE + 0x00)
+#define RS_FLS_ERR_BUSY (RS_FLS_BASE + 0x01)
+#define RS_FLS_ERR_RANGE (RS_FLS_BASE + 0x02)
+#define RS_FLS_ERR_BEGIN (RS_FLS_BASE + 0x03)
+#define RS_FLS_ERR_CHECK (RS_FLS_BASE + 0x04)
+#define RS_FLS_ERR_FAIL (RS_FLS_BASE + 0x05)
+#define RS_FLS_ERR_RSRC (RS_FLS_BASE + 0x06)
+#define RS_FLS_ERR_NOFILE (RS_FLS_BASE + 0x07)
+#define RS_FLS_ERR_FSIZE (RS_FLS_BASE + 0x08)
+#define RS_CFG_BASE 0xC0 /*! base of VDA configuration errors */
+#define RS_CFG_ERR_BUSY (RS_CFG_BASE + 0)
+#define RS_CFG_ERR_SGE (RS_CFG_BASE + 1)
+#define RS_CFG_ERR_DATE (RS_CFG_BASE + 2)
+#define RS_CFG_ERR_TIME (RS_CFG_BASE + 3)
+#define RS_DEGRADED 0xFB /*! degraded mode */
+#define RS_CLI_INTERNAL 0xFC /*! VDA CLI internal error */
+#define RS_VDA_INTERNAL 0xFD /*! catch-all */
+#define RS_PENDING 0xFE /*! pending, not started */
+#define RS_STARTED 0xFF /*! started */
+
+
+/* flash request subfunctions. these are used in both the IOCTL and the
+ * driver-firmware interface (VDA_FUNC_FLASH). */
+
+#define VDA_FLASH_BEGINW 0x00
+#define VDA_FLASH_READ 0x01
+#define VDA_FLASH_WRITE 0x02
+#define VDA_FLASH_COMMIT 0x03
+#define VDA_FLASH_CANCEL 0x04
+#define VDA_FLASH_INFO 0x05
+#define VDA_FLASH_FREAD 0x06
+#define VDA_FLASH_FWRITE 0x07
+#define VDA_FLASH_FINFO 0x08
+
+
+/* IOCTL request subfunctions. these identify the payload type for
+ * VDA_FUNC_IOCTL.
+ */
+
+#define VDA_IOCTL_HBA 0x00
+#define VDA_IOCTL_CSMI 0x01
+#define VDA_IOCTL_SMP 0x02
+
+struct __packed atto_vda_devinfo {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+
+ union {
+ u8 dev_status;
+ #define VDADEVSTAT_INVALID 0x00
+ #define VDADEVSTAT_CORRUPT VDADEVSTAT_INVALID
+ #define VDADEVSTAT_ASSIGNED 0x01
+ #define VDADEVSTAT_SPARE 0x02
+ #define VDADEVSTAT_UNAVAIL 0x03
+ #define VDADEVSTAT_PT_MAINT 0x04
+ #define VDADEVSTAT_LCLSPARE 0x05
+ #define VDADEVSTAT_UNUSEABLE 0x06
+ #define VDADEVSTAT_AVAIL 0xFF
+
+ u8 op_ctrl;
+ #define VDA_DEV_OP_CTRL_START 0x01
+ #define VDA_DEV_OP_CTRL_HALT 0x02
+ #define VDA_DEV_OP_CTRL_RESUME 0x03
+ #define VDA_DEV_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 member_state;
+ #define VDAMBRSTATE_ONLINE 0x00
+ #define VDAMBRSTATE_DEGRADED 0x01
+ #define VDAMBRSTATE_UNAVAIL 0x02
+ #define VDAMBRSTATE_FAULTED 0x03
+ #define VDAMBRSTATE_MISREAD 0x04
+ #define VDAMBRSTATE_INCOMPAT 0x05
+
+ u8 operation;
+ #define VDAOP_NONE 0x00
+ #define VDAOP_REBUILD 0x01
+ #define VDAOP_ERASE 0x02
+ #define VDAOP_PATTERN 0x03
+ #define VDAOP_CONVERSION 0x04
+ #define VDAOP_FULL_INIT 0x05
+ #define VDAOP_QUICK_INIT 0x06
+ #define VDAOP_SECT_SCAN 0x07
+ #define VDAOP_SECT_SCAN_PARITY 0x08
+ #define VDAOP_SECT_SCAN_PARITY_FIX 0x09
+ #define VDAOP_RECOV_REBUILD 0x0A
+
+ u8 op_status;
+ #define VDAOPSTAT_OK 0x00
+ #define VDAOPSTAT_FAULTED 0x01
+ #define VDAOPSTAT_HALTED 0x02
+ #define VDAOPSTAT_INT 0x03
+
+ u8 progress; /* 0 - 100% */
+ u16 ses_dev_index;
+ #define VDASESDI_INVALID 0xFFFF
+
+ u8 serial_no[32];
+
+ union {
+ u16 target_id;
+ #define VDATGTID_INVALID 0xFFFF
+
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ #define VDADEVFEAT_ENC_SERV 0x0001
+ #define VDADEVFEAT_IDENT 0x0002
+ #define VDADEVFEAT_DH_SUPP 0x0004
+ #define VDADEVFEAT_PHYS_ID 0x0008
+
+ u8 ses_element_id;
+ u8 link_speed;
+ #define VDALINKSPEED_UNKNOWN 0x00
+ #define VDALINKSPEED_1GB 0x01
+ #define VDALINKSPEED_1_5GB 0x02
+ #define VDALINKSPEED_2GB 0x03
+ #define VDALINKSPEED_3GB 0x04
+ #define VDALINKSPEED_4GB 0x05
+ #define VDALINKSPEED_6GB 0x06
+ #define VDALINKSPEED_8GB 0x07
+
+ u16 phys_target_id;
+ u8 reserved[2];
+};
+
+
+/*! struct atto_vda_devinfo2 is a replacement for atto_vda_devinfo. it
+ * extends beyond the 0x70 bytes allowed in atto_vda_mgmt_req; therefore,
+ * the entire structure is DMaed between the firmware and host buffer and
+ * the data will always be in little endian format.
+ */
+
+struct __packed atto_vda_devinfo2 {
+ struct atto_dev_addr dev_addr;
+ u8 vendor_id[8];
+ u8 product_id[16];
+ u8 revision[4];
+ u64 capacity;
+ u32 block_size;
+ u8 dev_type;
+ u8 dev_status;
+ u8 member_state;
+ u8 operation;
+ u8 op_status;
+ u8 progress;
+ u16 ses_dev_index;
+ u8 serial_no[32];
+ union {
+ u16 target_id;
+ u16 features_mask;
+ };
+
+ u16 lun;
+ u16 features;
+ u8 ses_element_id;
+ u8 link_speed;
+ u16 phys_target_id;
+ u8 reserved[2];
+
+/* This is where fields specific to struct atto_vda_devinfo2 begin. Note
+ * that the structure version started at one so applications that unionize this
+ * structure with atto_vda_dev_info can differentiate them if desired.
+ */
+
+ u8 version;
+ #define VDADEVINFO_VERSION0 0x00
+ #define VDADEVINFO_VERSION1 0x01
+ #define VDADEVINFO_VERSION2 0x02
+ #define VDADEVINFO_VERSION3 0x03
+ #define VDADEVINFO_VERSION VDADEVINFO_VERSION3
+
+ u8 reserved2[3];
+
+ /* sector scanning fields */
+
+ u32 ss_curr_errors;
+ u64 ss_curr_scanned;
+ u32 ss_curr_recvrd;
+ u32 ss_scan_length;
+ u32 ss_total_errors;
+ u32 ss_total_recvrd;
+ u32 ss_num_scans;
+
+ /* grp_name was added in version 2 of this structure. */
+
+ char grp_name[15];
+ u8 reserved3[4];
+
+ /* dev_addr_list was added in version 3 of this structure. */
+
+ u8 num_dev_addr;
+ struct atto_dev_addr2 dev_addr_list[8];
+};
+
+
+struct __packed atto_vda_grp_info {
+ u8 grp_index;
+ #define VDA_MAX_RAID_GROUPS 32
+
+ char grp_name[15];
+ u64 capacity;
+ u32 block_size;
+ u32 interleave;
+ u8 type;
+ #define VDA_GRP_TYPE_RAID0 0
+ #define VDA_GRP_TYPE_RAID1 1
+ #define VDA_GRP_TYPE_RAID4 4
+ #define VDA_GRP_TYPE_RAID5 5
+ #define VDA_GRP_TYPE_RAID6 6
+ #define VDA_GRP_TYPE_RAID10 10
+ #define VDA_GRP_TYPE_RAID40 40
+ #define VDA_GRP_TYPE_RAID50 50
+ #define VDA_GRP_TYPE_RAID60 60
+ #define VDA_GRP_TYPE_DVRAID_HS 252
+ #define VDA_GRP_TYPE_DVRAID_NOHS 253
+ #define VDA_GRP_TYPE_JBOD 254
+ #define VDA_GRP_TYPE_SPARE 255
+
+ union {
+ u8 status;
+ #define VDA_GRP_STAT_INVALID 0x00
+ #define VDA_GRP_STAT_NEW 0x01
+ #define VDA_GRP_STAT_WAITING 0x02
+ #define VDA_GRP_STAT_ONLINE 0x03
+ #define VDA_GRP_STAT_DEGRADED 0x04
+ #define VDA_GRP_STAT_OFFLINE 0x05
+ #define VDA_GRP_STAT_DELETED 0x06
+ #define VDA_GRP_STAT_RECOV_BASIC 0x07
+ #define VDA_GRP_STAT_RECOV_EXTREME 0x08
+
+ u8 op_ctrl;
+ #define VDA_GRP_OP_CTRL_START 0x01
+ #define VDA_GRP_OP_CTRL_HALT 0x02
+ #define VDA_GRP_OP_CTRL_RESUME 0x03
+ #define VDA_GRP_OP_CTRL_CANCEL 0x04
+ };
+
+ u8 rebuild_state;
+ #define VDA_RBLD_NONE 0x00
+ #define VDA_RBLD_REBUILD 0x01
+ #define VDA_RBLD_ERASE 0x02
+ #define VDA_RBLD_PATTERN 0x03
+ #define VDA_RBLD_CONV 0x04
+ #define VDA_RBLD_FULL_INIT 0x05
+ #define VDA_RBLD_QUICK_INIT 0x06
+ #define VDA_RBLD_SECT_SCAN 0x07
+ #define VDA_RBLD_SECT_SCAN_PARITY 0x08
+ #define VDA_RBLD_SECT_SCAN_PARITY_FIX 0x09
+ #define VDA_RBLD_RECOV_REBUILD 0x0A
+ #define VDA_RBLD_RECOV_BASIC 0x0B
+ #define VDA_RBLD_RECOV_EXTREME 0x0C
+
+ u8 span_depth;
+ u8 progress;
+ u8 mirror_width;
+ u8 stripe_width;
+ u8 member_cnt;
+
+ union {
+ u16 members[32];
+ #define VDA_MEMBER_MISSING 0xFFFF
+ #define VDA_MEMBER_NEW 0xFFFE
+ u16 features_mask;
+ };
+
+ u16 features;
+ #define VDA_GRP_FEAT_HOTSWAP 0x0001
+ #define VDA_GRP_FEAT_SPDRD_MASK 0x0006
+ #define VDA_GRP_FEAT_SPDRD_DIS 0x0000
+ #define VDA_GRP_FEAT_SPDRD_ENB 0x0002
+ #define VDA_GRP_FEAT_SPDRD_AUTO 0x0004
+ #define VDA_GRP_FEAT_IDENT 0x0008
+ #define VDA_GRP_FEAT_RBLDPRI_MASK 0x0030
+ #define VDA_GRP_FEAT_RBLDPRI_LOW 0x0010
+ #define VDA_GRP_FEAT_RBLDPRI_SAME 0x0020
+ #define VDA_GRP_FEAT_RBLDPRI_HIGH 0x0030
+ #define VDA_GRP_FEAT_WRITE_CACHE 0x0040
+ #define VDA_GRP_FEAT_RBLD_RESUME 0x0080
+ #define VDA_GRP_FEAT_SECT_RESUME 0x0100
+ #define VDA_GRP_FEAT_INIT_RESUME 0x0200
+ #define VDA_GRP_FEAT_SSD 0x0400
+ #define VDA_GRP_FEAT_BOOT_DEV 0x0800
+
+ /*
+ * for backward compatibility, a prefetch value of zero means the
+ * setting is ignored/unsupported. therefore, the firmware supported
+ * 0-6 values are incremented to 1-7.
+ */
+
+ u8 prefetch;
+ u8 op_status;
+ #define VDAGRPOPSTAT_MASK 0x0F
+ #define VDAGRPOPSTAT_INVALID 0x00
+ #define VDAGRPOPSTAT_OK 0x01
+ #define VDAGRPOPSTAT_FAULTED 0x02
+ #define VDAGRPOPSTAT_HALTED 0x03
+ #define VDAGRPOPSTAT_INT 0x04
+ #define VDAGRPOPPROC_MASK 0xF0
+ #define VDAGRPOPPROC_STARTABLE 0x10
+ #define VDAGRPOPPROC_CANCELABLE 0x20
+ #define VDAGRPOPPROC_RESUMABLE 0x40
+ #define VDAGRPOPPROC_HALTABLE 0x80
+ u8 over_provision;
+ u8 reserved[3];
+
+};
+
+
+struct __packed atto_vdapart_info {
+ u8 part_no;
+ #define VDA_MAX_PARTITIONS 128
+
+ char grp_name[15];
+ u64 part_size;
+ u64 start_lba;
+ u32 block_size;
+ u16 target_id;
+ u8 LUN;
+ char serial_no[41];
+ u8 features;
+ #define VDAPI_FEAT_WRITE_CACHE 0x01
+
+ u8 reserved[7];
+};
+
+
+struct __packed atto_vda_dh_info {
+ u8 req_type;
+ #define VDADH_RQTYPE_CACHE 0x01
+ #define VDADH_RQTYPE_FETCH 0x02
+ #define VDADH_RQTYPE_SET_STAT 0x03
+ #define VDADH_RQTYPE_GET_STAT 0x04
+
+ u8 req_qual;
+ #define VDADH_RQQUAL_SMART 0x01
+ #define VDADH_RQQUAL_MEDDEF 0x02
+ #define VDADH_RQQUAL_INFOEXC 0x04
+
+ u8 num_smart_attribs;
+ u8 status;
+ #define VDADH_STAT_DISABLE 0x00
+ #define VDADH_STAT_ENABLE 0x01
+
+ u32 med_defect_cnt;
+ u32 info_exc_cnt;
+ u8 smart_status;
+ #define VDADH_SMARTSTAT_OK 0x00
+ #define VDADH_SMARTSTAT_ERR 0x01
+
+ u8 reserved[35];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_dh_smart {
+ u8 attrib_id;
+ u8 current_val;
+ u8 worst;
+ u8 threshold;
+ u8 raw_data[6];
+ u8 raw_attrib_status;
+ #define VDADHSM_RAWSTAT_PREFAIL_WARRANTY 0x01
+ #define VDADHSM_RAWSTAT_ONLINE_COLLECTION 0x02
+ #define VDADHSM_RAWSTAT_PERFORMANCE_ATTR 0x04
+ #define VDADHSM_RAWSTAT_ERROR_RATE_ATTR 0x08
+ #define VDADHSM_RAWSTAT_EVENT_COUNT_ATTR 0x10
+ #define VDADHSM_RAWSTAT_SELF_PRESERVING_ATTR 0x20
+
+ u8 calc_attrib_status;
+ #define VDADHSM_CALCSTAT_UNKNOWN 0x00
+ #define VDADHSM_CALCSTAT_GOOD 0x01
+ #define VDADHSM_CALCSTAT_PREFAIL 0x02
+ #define VDADHSM_CALCSTAT_OLDAGE 0x03
+
+ u8 reserved[4];
+};
+
+
+struct __packed atto_vda_metrics_info {
+ u8 data_version;
+ #define VDAMET_VERSION0 0x00
+ #define VDAMET_VERSION VDAMET_VERSION0
+
+ u8 metrics_action;
+ #define VDAMET_METACT_NONE 0x00
+ #define VDAMET_METACT_START 0x01
+ #define VDAMET_METACT_STOP 0x02
+ #define VDAMET_METACT_RETRIEVE 0x03
+ #define VDAMET_METACT_CLEAR 0x04
+
+ u8 test_action;
+ #define VDAMET_TSTACT_NONE 0x00
+ #define VDAMET_TSTACT_STRT_INIT 0x01
+ #define VDAMET_TSTACT_STRT_READ 0x02
+ #define VDAMET_TSTACT_STRT_VERIFY 0x03
+ #define VDAMET_TSTACT_STRT_INIT_VERIFY 0x04
+ #define VDAMET_TSTACT_STOP 0x05
+
+ u8 num_dev_indexes;
+ #define VDAMET_ALL_DEVICES 0xFF
+
+ u16 dev_indexes[32];
+ u8 reserved[12];
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_metrics_data {
+ u16 dev_index;
+ u16 length;
+ #define VDAMD_LEN_LAST 0x8000
+ #define VDAMD_LEN_MASK 0x0FFF
+
+ u32 flags;
+ #define VDAMDF_RUN 0x00000007
+ #define VDAMDF_RUN_READ 0x00000001
+ #define VDAMDF_RUN_WRITE 0x00000002
+ #define VDAMDF_RUN_ALL 0x00000004
+ #define VDAMDF_READ 0x00000010
+ #define VDAMDF_WRITE 0x00000020
+ #define VDAMDF_ALL 0x00000040
+ #define VDAMDF_DRIVETEST 0x40000000
+ #define VDAMDF_NEW 0x80000000
+
+ u64 total_read_data;
+ u64 total_write_data;
+ u64 total_read_io;
+ u64 total_write_io;
+ u64 read_start_time;
+ u64 read_stop_time;
+ u64 write_start_time;
+ u64 write_stop_time;
+ u64 read_maxio_time;
+ u64 wpvdadmetricsdatarite_maxio_time;
+ u64 read_totalio_time;
+ u64 write_totalio_time;
+ u64 read_total_errs;
+ u64 write_total_errs;
+ u64 read_recvrd_errs;
+ u64 write_recvrd_errs;
+ u64 miscompares;
+};
+
+
+struct __packed atto_vda_schedule_info {
+ u8 schedule_type;
+ #define VDASI_SCHTYPE_ONETIME 0x01
+ #define VDASI_SCHTYPE_DAILY 0x02
+ #define VDASI_SCHTYPE_WEEKLY 0x03
+
+ u8 operation;
+ #define VDASI_OP_NONE 0x00
+ #define VDASI_OP_CREATE 0x01
+ #define VDASI_OP_CANCEL 0x02
+
+ u8 hour;
+ u8 minute;
+ u8 day;
+ #define VDASI_DAY_NONE 0x00
+
+ u8 progress;
+ #define VDASI_PROG_NONE 0xFF
+
+ u8 event_type;
+ #define VDASI_EVTTYPE_SECT_SCAN 0x01
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY 0x02
+ #define VDASI_EVTTYPE_SECT_SCAN_PARITY_FIX 0x03
+
+ u8 recurrences;
+ #define VDASI_RECUR_FOREVER 0x00
+
+ u32 id;
+ #define VDASI_ID_NONE 0x00
+
+ char grp_name[15];
+ u8 reserved[85];
+};
+
+
+struct __packed atto_vda_n_vcache_info {
+ u8 super_cap_status;
+ #define VDANVCI_SUPERCAP_NOT_PRESENT 0x00
+ #define VDANVCI_SUPERCAP_FULLY_CHARGED 0x01
+ #define VDANVCI_SUPERCAP_NOT_CHARGED 0x02
+
+ u8 nvcache_module_status;
+ #define VDANVCI_NVCACHEMODULE_NOT_PRESENT 0x00
+ #define VDANVCI_NVCACHEMODULE_PRESENT 0x01
+
+ u8 protection_mode;
+ #define VDANVCI_PROTMODE_HI_PROTECT 0x00
+ #define VDANVCI_PROTMODE_HI_PERFORM 0x01
+
+ u8 reserved[109];
+};
+
+
+struct __packed atto_vda_buzzer_info {
+ u8 status;
+ #define VDABUZZI_BUZZER_OFF 0x00
+ #define VDABUZZI_BUZZER_ON 0x01
+ #define VDABUZZI_BUZZER_LAST 0x02
+
+ u8 reserved[3];
+ u32 duration;
+ #define VDABUZZI_DURATION_INDEFINITE 0xffffffff
+
+ u8 reserved2[104];
+};
+
+
+struct __packed atto_vda_adapter_info {
+ u8 version;
+ #define VDAADAPINFO_VERSION0 0x00
+ #define VDAADAPINFO_VERSION VDAADAPINFO_VERSION0
+
+ u8 reserved;
+ signed short utc_offset;
+ u32 utc_time;
+ u32 features;
+ #define VDA_ADAP_FEAT_IDENT 0x0001
+ #define VDA_ADAP_FEAT_BUZZ_ERR 0x0002
+ #define VDA_ADAP_FEAT_UTC_TIME 0x0004
+
+ u32 valid_features;
+ char active_config[33];
+ u8 temp_count;
+ u8 fan_count;
+ u8 reserved3[61];
+};
+
+
+struct __packed atto_vda_temp_info {
+ u8 temp_index;
+ u8 max_op_temp;
+ u8 min_op_temp;
+ u8 op_temp_warn;
+ u8 temperature;
+ u8 type;
+ #define VDA_TEMP_TYPE_CPU 1
+
+ u8 reserved[106];
+};
+
+
+struct __packed atto_vda_fan_info {
+ u8 fan_index;
+ u8 status;
+ #define VDA_FAN_STAT_UNKNOWN 0
+ #define VDA_FAN_STAT_NORMAL 1
+ #define VDA_FAN_STAT_FAIL 2
+
+ u16 crit_pvdafaninfothreshold;
+ u16 warn_threshold;
+ u16 speed;
+ u8 reserved[104];
+};
+
+
+/* VDA management commands */
+
+#define VDAMGT_DEV_SCAN 0x00
+#define VDAMGT_DEV_INFO 0x01
+#define VDAMGT_DEV_CLEAN 0x02
+#define VDAMGT_DEV_IDENTIFY 0x03
+#define VDAMGT_DEV_IDENTSTOP 0x04
+#define VDAMGT_DEV_PT_INFO 0x05
+#define VDAMGT_DEV_FEATURES 0x06
+#define VDAMGT_DEV_PT_FEATURES 0x07
+#define VDAMGT_DEV_HEALTH_REQ 0x08
+#define VDAMGT_DEV_METRICS 0x09
+#define VDAMGT_DEV_INFO2 0x0A
+#define VDAMGT_DEV_OPERATION 0x0B
+#define VDAMGT_DEV_INFO2_BYADDR 0x0C
+#define VDAMGT_GRP_INFO 0x10
+#define VDAMGT_GRP_CREATE 0x11
+#define VDAMGT_GRP_DELETE 0x12
+#define VDAMGT_ADD_STORAGE 0x13
+#define VDAMGT_MEMBER_ADD 0x14
+#define VDAMGT_GRP_COMMIT 0x15
+#define VDAMGT_GRP_REBUILD 0x16
+#define VDAMGT_GRP_COMMIT_INIT 0x17
+#define VDAMGT_QUICK_RAID 0x18
+#define VDAMGT_GRP_FEATURES 0x19
+#define VDAMGT_GRP_COMMIT_INIT_AUTOMAP 0x1A
+#define VDAMGT_QUICK_RAID_INIT_AUTOMAP 0x1B
+#define VDAMGT_GRP_OPERATION 0x1C
+#define VDAMGT_CFG_SAVE 0x20
+#define VDAMGT_LAST_ERROR 0x21
+#define VDAMGT_ADAP_INFO 0x22
+#define VDAMGT_ADAP_FEATURES 0x23
+#define VDAMGT_TEMP_INFO 0x24
+#define VDAMGT_FAN_INFO 0x25
+#define VDAMGT_PART_INFO 0x30
+#define VDAMGT_PART_MAP 0x31
+#define VDAMGT_PART_UNMAP 0x32
+#define VDAMGT_PART_AUTOMAP 0x33
+#define VDAMGT_PART_SPLIT 0x34
+#define VDAMGT_PART_MERGE 0x35
+#define VDAMGT_SPARE_LIST 0x40
+#define VDAMGT_SPARE_ADD 0x41
+#define VDAMGT_SPARE_REMOVE 0x42
+#define VDAMGT_LOCAL_SPARE_ADD 0x43
+#define VDAMGT_SCHEDULE_EVENT 0x50
+#define VDAMGT_SCHEDULE_INFO 0x51
+#define VDAMGT_NVCACHE_INFO 0x60
+#define VDAMGT_NVCACHE_SET 0x61
+#define VDAMGT_BUZZER_INFO 0x70
+#define VDAMGT_BUZZER_SET 0x71
+
+
+struct __packed atto_vda_ae_hdr {
+ u8 bylength;
+ u8 byflags;
+ #define VDAAE_HDRF_EVENT_ACK 0x01
+
+ u8 byversion;
+ #define VDAAE_HDR_VER_0 0
+
+ u8 bytype;
+ #define VDAAE_HDR_TYPE_RAID 1
+ #define VDAAE_HDR_TYPE_LU 2
+ #define VDAAE_HDR_TYPE_DISK 3
+ #define VDAAE_HDR_TYPE_RESET 4
+ #define VDAAE_HDR_TYPE_LOG_INFO 5
+ #define VDAAE_HDR_TYPE_LOG_WARN 6
+ #define VDAAE_HDR_TYPE_LOG_CRIT 7
+ #define VDAAE_HDR_TYPE_LOG_FAIL 8
+ #define VDAAE_HDR_TYPE_NVC 9
+ #define VDAAE_HDR_TYPE_TLG_INFO 10
+ #define VDAAE_HDR_TYPE_TLG_WARN 11
+ #define VDAAE_HDR_TYPE_TLG_CRIT 12
+ #define VDAAE_HDR_TYPE_PWRMGT 13
+ #define VDAAE_HDR_TYPE_MUTE 14
+ #define VDAAE_HDR_TYPE_DEV 15
+};
+
+
+struct __packed atto_vda_ae_raid {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwflags;
+ #define VDAAE_GROUP_STATE 0x00000001
+ #define VDAAE_RBLD_STATE 0x00000002
+ #define VDAAE_RBLD_PROG 0x00000004
+ #define VDAAE_MEMBER_CHG 0x00000008
+ #define VDAAE_PART_CHG 0x00000010
+ #define VDAAE_MEM_STATE_CHG 0x00000020
+
+ u8 bygroup_state;
+ #define VDAAE_RAID_INVALID 0
+ #define VDAAE_RAID_NEW 1
+ #define VDAAE_RAID_WAITING 2
+ #define VDAAE_RAID_ONLINE 3
+ #define VDAAE_RAID_DEGRADED 4
+ #define VDAAE_RAID_OFFLINE 5
+ #define VDAAE_RAID_DELETED 6
+ #define VDAAE_RAID_BASIC 7
+ #define VDAAE_RAID_EXTREME 8
+ #define VDAAE_RAID_UNKNOWN 9
+
+ u8 byrebuild_state;
+ #define VDAAE_RBLD_NONE 0
+ #define VDAAE_RBLD_REBUILD 1
+ #define VDAAE_RBLD_ERASE 2
+ #define VDAAE_RBLD_PATTERN 3
+ #define VDAAE_RBLD_CONV 4
+ #define VDAAE_RBLD_FULL_INIT 5
+ #define VDAAE_RBLD_QUICK_INIT 6
+ #define VDAAE_RBLD_SECT_SCAN 7
+ #define VDAAE_RBLD_SECT_SCAN_PARITY 8
+ #define VDAAE_RBLD_SECT_SCAN_PARITY_FIX 9
+ #define VDAAE_RBLD_RECOV_REBUILD 10
+ #define VDAAE_RBLD_UNKNOWN 11
+
+ u8 byrebuild_progress;
+ u8 op_status;
+ #define VDAAE_GRPOPSTAT_MASK 0x0F
+ #define VDAAE_GRPOPSTAT_INVALID 0x00
+ #define VDAAE_GRPOPSTAT_OK 0x01
+ #define VDAAE_GRPOPSTAT_FAULTED 0x02
+ #define VDAAE_GRPOPSTAT_HALTED 0x03
+ #define VDAAE_GRPOPSTAT_INT 0x04
+ #define VDAAE_GRPOPPROC_MASK 0xF0
+ #define VDAAE_GRPOPPROC_STARTABLE 0x10
+ #define VDAAE_GRPOPPROC_CANCELABLE 0x20
+ #define VDAAE_GRPOPPROC_RESUMABLE 0x40
+ #define VDAAE_GRPOPPROC_HALTABLE 0x80
+ char acname[15];
+ u8 byreserved;
+ u8 byreserved2[0x80 - 0x1C];
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+};
+
+
+struct __packed atto_vda_ae_lu_tgt_lun_raid {
+ u16 wtarget_id;
+ u8 bylun;
+ u8 byreserved;
+ u32 dwinterleave;
+ u32 dwblock_size;
+};
+
+
+struct __packed atto_vda_ae_lu {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwevent;
+ #define VDAAE_LU_DISC 0x00000001
+ #define VDAAE_LU_LOST 0x00000002
+ #define VDAAE_LU_STATE 0x00000004
+ #define VDAAE_LU_PASSTHROUGH 0x10000000
+ #define VDAAE_LU_PHYS_ID 0x20000000
+
+ u8 bystate;
+ #define VDAAE_LU_UNDEFINED 0
+ #define VDAAE_LU_NOT_PRESENT 1
+ #define VDAAE_LU_OFFLINE 2
+ #define VDAAE_LU_ONLINE 3
+ #define VDAAE_LU_DEGRADED 4
+ #define VDAAE_LU_FACTORY_DISABLED 5
+ #define VDAAE_LU_DELETED 6
+ #define VDAAE_LU_BUSSCAN 7
+ #define VDAAE_LU_UNKNOWN 8
+
+ u8 byreserved;
+ u16 wphys_target_id;
+
+ union {
+ struct atto_vda_ae_lu_tgt_lun tgtlun;
+ struct atto_vda_ae_lu_tgt_lun_raid tgtlun_raid;
+ } id;
+};
+
+
+struct __packed atto_vda_ae_disk {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+#define VDAAE_LOG_STRSZ 64
+
+struct __packed atto_vda_ae_log {
+ struct atto_vda_ae_hdr hdr;
+ char aclog_ascii[VDAAE_LOG_STRSZ];
+};
+
+
+#define VDAAE_TLG_STRSZ 56
+
+struct __packed atto_vda_ae_timestamp_log {
+ struct atto_vda_ae_hdr hdr;
+ u32 dwtimestamp;
+ char aclog_ascii[VDAAE_TLG_STRSZ];
+};
+
+
+struct __packed atto_vda_ae_nvc {
+ struct atto_vda_ae_hdr hdr;
+};
+
+
+struct __packed atto_vda_ae_dev {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_dev_addr devaddr;
+};
+
+
+union atto_vda_ae {
+ struct atto_vda_ae_hdr hdr;
+ struct atto_vda_ae_disk disk;
+ struct atto_vda_ae_lu lu;
+ struct atto_vda_ae_raid raid;
+ struct atto_vda_ae_log log;
+ struct atto_vda_ae_timestamp_log tslog;
+ struct atto_vda_ae_nvc nvcache;
+ struct atto_vda_ae_dev dev;
+};
+
+
+struct __packed atto_vda_date_and_time {
+ u8 flags;
+ #define VDA_DT_DAY_MASK 0x07
+ #define VDA_DT_DAY_NONE 0x00
+ #define VDA_DT_DAY_SUN 0x01
+ #define VDA_DT_DAY_MON 0x02
+ #define VDA_DT_DAY_TUE 0x03
+ #define VDA_DT_DAY_WED 0x04
+ #define VDA_DT_DAY_THU 0x05
+ #define VDA_DT_DAY_FRI 0x06
+ #define VDA_DT_DAY_SAT 0x07
+ #define VDA_DT_PM 0x40
+ #define VDA_DT_MILITARY 0x80
+
+ u8 seconds;
+ u8 minutes;
+ u8 hours;
+ u8 day;
+ u8 month;
+ u16 year;
+};
+
+#define SGE_LEN_LIMIT 0x003FFFFF /*! mask of segment length */
+#define SGE_LEN_MAX 0x003FF000 /*! maximum segment length */
+#define SGE_LAST 0x01000000 /*! last entry */
+#define SGE_ADDR_64 0x04000000 /*! 64-bit addressing flag */
+#define SGE_CHAIN 0x80000000 /*! chain descriptor flag */
+#define SGE_CHAIN_LEN 0x0000FFFF /*! mask of length in chain entries */
+#define SGE_CHAIN_SZ 0x00FF0000 /*! mask of size of chained buffer */
+
+
+struct __packed atto_vda_cfg_init {
+ struct atto_vda_date_and_time date_time;
+ u32 sgl_page_size;
+ u32 vda_version;
+ u32 fw_version;
+ u32 fw_build;
+ u32 fw_release;
+ u32 epoch_time;
+ u32 ioctl_tunnel;
+ #define VDA_ITF_MEM_RW 0x00000001
+ #define VDA_ITF_TRACE 0x00000002
+ #define VDA_ITF_SCSI_PASS_THRU 0x00000004
+ #define VDA_ITF_GET_DEV_ADDR 0x00000008
+ #define VDA_ITF_PHY_CTRL 0x00000010
+ #define VDA_ITF_CONN_CTRL 0x00000020
+ #define VDA_ITF_GET_DEV_INFO 0x00000040
+
+ u32 num_targets_backend;
+ u8 reserved[0x48];
+};
+
+
+/* configuration commands */
+
+#define VDA_CFG_INIT 0x00
+#define VDA_CFG_GET_INIT 0x01
+#define VDA_CFG_GET_INIT2 0x02
+
+
+/*! physical region descriptor (PRD) aka scatter/gather entry */
+
+struct __packed atto_physical_region_description {
+ u64 address;
+ u32 ctl_len;
+ #define PRD_LEN_LIMIT 0x003FFFFF
+ #define PRD_LEN_MAX 0x003FF000
+ #define PRD_NXT_PRD_CNT 0x0000007F
+ #define PRD_CHAIN 0x01000000
+ #define PRD_DATA 0x00000000
+ #define PRD_INT_SEL 0xF0000000
+ #define PRD_INT_SEL_F0 0x00000000
+ #define PRD_INT_SEL_F1 0x40000000
+ #define PRD_INT_SEL_F2 0x80000000
+ #define PRD_INT_SEL_F3 0xc0000000
+ #define PRD_INT_SEL_SRAM 0x10000000
+ #define PRD_INT_SEL_PBSR 0x20000000
+
+};
+
+/* Request types. NOTE that ALL requests have the same layout for the first
+ * few bytes.
+ */
+struct __packed atto_vda_req_header {
+ u32 length;
+ u8 function;
+ u8 variable1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+};
+
+
+#define FCP_CDB_SIZE 16
+
+struct __packed atto_vda_scsi_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_SCSI */
+ u8 sense_len;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flags;
+ #define FCP_CMND_LUN_MASK 0x000000FF
+ #define FCP_CMND_TA_MASK 0x00000700
+ #define FCP_CMND_TA_SIMPL_Q 0x00000000
+ #define FCP_CMND_TA_HEAD_Q 0x00000100
+ #define FCP_CMND_TA_ORDRD_Q 0x00000200
+ #define FCP_CMND_TA_ACA 0x00000400
+ #define FCP_CMND_PRI_MASK 0x00007800
+ #define FCP_CMND_TM_MASK 0x00FF0000
+ #define FCP_CMND_ATS 0x00020000
+ #define FCP_CMND_CTS 0x00040000
+ #define FCP_CMND_LRS 0x00100000
+ #define FCP_CMND_TRS 0x00200000
+ #define FCP_CMND_CLA 0x00400000
+ #define FCP_CMND_TRM 0x00800000
+ #define FCP_CMND_DATA_DIR 0x03000000
+ #define FCP_CMND_WRD 0x01000000
+ #define FCP_CMND_RDD 0x02000000
+
+ u8 cdb[FCP_CDB_SIZE];
+ union {
+ struct __packed {
+ u64 ppsense_buf;
+ u16 target_id;
+ u8 iblk_cnt_prd;
+ u8 reserved;
+ };
+
+ struct atto_physical_region_description sense_buff_prd;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+
+ u32 abort_handle;
+ u32 dwords[245];
+ struct atto_physical_region_description prd[1];
+ } u;
+};
+
+
+struct __packed atto_vda_flash_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_FLASH */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 flash_addr;
+ u8 checksum;
+ u8 rsvd[3];
+
+ union {
+ struct {
+ char file_name[16]; /* 8.3 fname, NULL term, wc=* */
+ struct atto_vda_sge sge[1];
+ } file;
+
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[2];
+ } data;
+};
+
+
+struct __packed atto_vda_diag_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_DIAG */
+ u8 sub_func;
+ #define VDA_DIAG_STATUS 0x00
+ #define VDA_DIAG_RESET 0x01
+ #define VDA_DIAG_PAUSE 0x02
+ #define VDA_DIAG_RESUME 0x03
+ #define VDA_DIAG_READ 0x04
+ #define VDA_DIAG_WRITE 0x05
+
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 rsvd;
+ u64 local_addr;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ae_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_AE */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cli_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CLI */
+ u8 reserved1;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u32 cmd_rsp_len;
+ struct atto_vda_sge sge[1];
+};
+
+
+struct __packed atto_vda_ioctl_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_IOCTL */
+ u8 sub_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ struct atto_vda_sge reserved_sge;
+ struct atto_physical_region_description reserved_prde;
+ };
+
+ union {
+ struct {
+ u32 ctrl_code;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+
+ union {
+ struct atto_vda_sge sge[1];
+ struct atto_physical_region_description prde[1];
+ };
+};
+
+
+struct __packed atto_vda_cfg_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_CFG */
+ u8 sub_func;
+ u8 rsvd1;
+ u8 sg_list_offset;
+ u32 handle;
+
+ union {
+ u8 bytes[116];
+ struct atto_vda_cfg_init init;
+ struct atto_vda_sge sge;
+ struct atto_physical_region_description prde;
+ } data;
+};
+
+
+struct __packed atto_vda_mgmt_req {
+ u32 length;
+ u8 function; /* VDA_FUNC_MGT */
+ u8 mgt_func;
+ u8 chain_offset;
+ u8 sg_list_offset;
+ u32 handle;
+ u8 scan_generation;
+ u8 payld_sglst_offset;
+ u16 dev_index;
+ u32 payld_length;
+ u32 pad;
+ union {
+ struct atto_vda_sge sge[2];
+ struct atto_physical_region_description prde[2];
+ };
+ struct atto_vda_sge payld_sge[1];
+};
+
+
+union atto_vda_req {
+ struct atto_vda_scsi_req scsi;
+ struct atto_vda_flash_req flash;
+ struct atto_vda_diag_req diag;
+ struct atto_vda_ae_req ae;
+ struct atto_vda_cli_req cli;
+ struct atto_vda_ioctl_req ioctl;
+ struct atto_vda_cfg_req cfg;
+ struct atto_vda_mgmt_req mgt;
+ u8 bytes[1024];
+};
+
+/* Outbound response structures */
+
+struct __packed atto_vda_scsi_rsp {
+ u8 scsi_stat;
+ u8 sense_len;
+ u8 rsvd[2];
+ u32 residual_length;
+};
+
+struct __packed atto_vda_flash_rsp {
+ u32 file_size;
+};
+
+struct __packed atto_vda_ae_rsp {
+ u32 length;
+};
+
+struct __packed atto_vda_cli_rsp {
+ u32 cmd_rsp_len;
+};
+
+struct __packed atto_vda_ioctl_rsp {
+ union {
+ struct {
+ u32 csmi_status;
+ u16 target_id;
+ u8 lun;
+ u8 reserved;
+ } csmi;
+ };
+};
+
+struct __packed atto_vda_cfg_rsp {
+ u16 vda_version;
+ u16 fw_release;
+ u32 fw_build;
+};
+
+struct __packed atto_vda_mgmt_rsp {
+ u32 length;
+ u16 dev_index;
+ u8 scan_generation;
+};
+
+union atto_vda_func_rsp {
+ struct atto_vda_scsi_rsp scsi_rsp;
+ struct atto_vda_flash_rsp flash_rsp;
+ struct atto_vda_ae_rsp ae_rsp;
+ struct atto_vda_cli_rsp cli_rsp;
+ struct atto_vda_ioctl_rsp ioctl_rsp;
+ struct atto_vda_cfg_rsp cfg_rsp;
+ struct atto_vda_mgmt_rsp mgt_rsp;
+ u32 dwords[2];
+};
+
+struct __packed atto_vda_ob_rsp {
+ u32 handle;
+ u8 req_stat;
+ u8 rsvd[3];
+
+ union atto_vda_func_rsp
+ func_rsp;
+};
+
+struct __packed atto_vda_ae_data {
+ u8 event_data[256];
+};
+
+struct __packed atto_vda_mgmt_data {
+ union {
+ u8 bytes[112];
+ struct atto_vda_devinfo dev_info;
+ struct atto_vda_grp_info grp_info;
+ struct atto_vdapart_info part_info;
+ struct atto_vda_dh_info dev_health_info;
+ struct atto_vda_metrics_info metrics_info;
+ struct atto_vda_schedule_info sched_info;
+ struct atto_vda_n_vcache_info nvcache_info;
+ struct atto_vda_buzzer_info buzzer_info;
+ } data;
+};
+
+union atto_vda_rsp_data {
+ struct atto_vda_ae_data ae_data;
+ struct atto_vda_mgmt_data mgt_data;
+ u8 sense_data[252];
+ #define SENSE_DATA_SZ 252;
+ u8 bytes[256];
+};
+
+#endif
diff --git a/drivers/scsi/esas2r/esas2r.h b/drivers/scsi/esas2r/esas2r.h
new file mode 100644
index 00000000000..0838e265e0b
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r.h
@@ -0,0 +1,1441 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+
+#include "esas2r_log.h"
+#include "atioctl.h"
+#include "atvda.h"
+
+#ifndef ESAS2R_H
+#define ESAS2R_H
+
+/* Global Variables */
+extern struct esas2r_adapter *esas2r_adapters[];
+extern u8 *esas2r_buffered_ioctl;
+extern dma_addr_t esas2r_buffered_ioctl_addr;
+extern u32 esas2r_buffered_ioctl_size;
+extern struct pci_dev *esas2r_buffered_ioctl_pcid;
+#define SGL_PG_SZ_MIN 64
+#define SGL_PG_SZ_MAX 1024
+extern int sgl_page_size;
+#define NUM_SGL_MIN 8
+#define NUM_SGL_MAX 2048
+extern int num_sg_lists;
+#define NUM_REQ_MIN 4
+#define NUM_REQ_MAX 256
+extern int num_requests;
+#define NUM_AE_MIN 2
+#define NUM_AE_MAX 8
+extern int num_ae_requests;
+extern int cmd_per_lun;
+extern int can_queue;
+extern int esas2r_max_sectors;
+extern int sg_tablesize;
+extern int interrupt_mode;
+extern int num_io_requests;
+
+/* Macro defintions */
+#define ESAS2R_MAX_ID 255
+#define MAX_ADAPTERS 32
+#define ESAS2R_DRVR_NAME "esas2r"
+#define ESAS2R_LONGNAME "ATTO ExpressSAS 6GB RAID Adapter"
+#define ESAS2R_MAX_DEVICES 32
+#define ATTONODE_NAME "ATTONode"
+#define ESAS2R_MAJOR_REV 1
+#define ESAS2R_MINOR_REV 00
+#define ESAS2R_VERSION_STR DEFINED_NUM_TO_STR(ESAS2R_MAJOR_REV) "." \
+ DEFINED_NUM_TO_STR(ESAS2R_MINOR_REV)
+#define ESAS2R_COPYRIGHT_YEARS "2001-2013"
+#define ESAS2R_DEFAULT_SGL_PAGE_SIZE 384
+#define ESAS2R_DEFAULT_CMD_PER_LUN 64
+#define ESAS2R_DEFAULT_NUM_SG_LISTS 1024
+#define DEFINED_NUM_TO_STR(num) NUM_TO_STR(num)
+#define NUM_TO_STR(num) #num
+
+#define ESAS2R_SGL_ALIGN 16
+#define ESAS2R_LIST_ALIGN 16
+#define ESAS2R_LIST_EXTRA ESAS2R_NUM_EXTRA
+#define ESAS2R_DATA_BUF_LEN 256
+#define ESAS2R_DEFAULT_TMO 5000
+#define ESAS2R_DISC_BUF_LEN 512
+#define ESAS2R_FWCOREDUMP_SZ 0x80000
+#define ESAS2R_NUM_PHYS 8
+#define ESAS2R_TARG_ID_INV 0xFFFF
+#define ESAS2R_INT_STS_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_ENB_MASK MU_INTSTAT_MASK
+#define ESAS2R_INT_DIS_MASK 0
+#define ESAS2R_MAX_TARGETS 256
+#define ESAS2R_KOBJ_NAME_LEN 20
+
+/* u16 (WORD) component macros */
+#define LOBYTE(w) ((u8)(u16)(w))
+#define HIBYTE(w) ((u8)(((u16)(w)) >> 8))
+#define MAKEWORD(lo, hi) ((u16)((u8)(lo) | ((u16)(u8)(hi) << 8)))
+
+/* u32 (DWORD) component macros */
+#define LOWORD(d) ((u16)(u32)(d))
+#define HIWORD(d) ((u16)(((u32)(d)) >> 16))
+#define MAKEDWORD(lo, hi) ((u32)((u16)(lo) | ((u32)(u16)(hi) << 16)))
+
+/* macro to get the lowest nonzero bit of a value */
+#define LOBIT(x) ((x) & (0 - (x)))
+
+/* These functions are provided to access the chip's control registers.
+ * The register is specified by its byte offset from the register base
+ * for the adapter.
+ */
+#define esas2r_read_register_dword(a, reg) \
+ readl((void __iomem *)a->regs + (reg) + MW_REG_OFFSET_HWREG)
+
+#define esas2r_write_register_dword(a, reg, data) \
+ writel(data, (void __iomem *)(a->regs + (reg) + MW_REG_OFFSET_HWREG))
+
+#define esas2r_flush_register_dword(a, r) esas2r_read_register_dword(a, r)
+
+/* This function is provided to access the chip's data window. The
+ * register is specified by its byte offset from the window base
+ * for the adapter.
+ */
+#define esas2r_read_data_byte(a, reg) \
+ readb((void __iomem *)a->data_window + (reg))
+
+/* ATTO vendor and device Ids */
+#define ATTO_VENDOR_ID 0x117C
+#define ATTO_DID_INTEL_IOP348 0x002C
+#define ATTO_DID_MV_88RC9580 0x0049
+#define ATTO_DID_MV_88RC9580TS 0x0066
+#define ATTO_DID_MV_88RC9580TSE 0x0067
+#define ATTO_DID_MV_88RC9580TL 0x0068
+
+/* ATTO subsystem device Ids */
+#define ATTO_SSDID_TBT 0x4000
+#define ATTO_TSSC_3808 0x4066
+#define ATTO_TSSC_3808E 0x4067
+#define ATTO_TLSH_1068 0x4068
+#define ATTO_ESAS_R680 0x0049
+#define ATTO_ESAS_R608 0x004A
+#define ATTO_ESAS_R60F 0x004B
+#define ATTO_ESAS_R6F0 0x004C
+#define ATTO_ESAS_R644 0x004D
+#define ATTO_ESAS_R648 0x004E
+
+/*
+ * flash definitions & structures
+ * define the code types
+ */
+#define FBT_CPYR 0xAA00
+#define FBT_SETUP 0xAA02
+#define FBT_FLASH_VER 0xAA04
+
+/* offsets to various locations in flash */
+#define FLS_OFFSET_BOOT (u32)(0x00700000)
+#define FLS_OFFSET_NVR (u32)(0x007C0000)
+#define FLS_OFFSET_CPYR FLS_OFFSET_NVR
+#define FLS_LENGTH_BOOT (FLS_OFFSET_CPYR - FLS_OFFSET_BOOT)
+#define FLS_BLOCK_SIZE (u32)(0x00020000)
+#define FI_NVR_2KB 0x0800
+#define FI_NVR_8KB 0x2000
+#define FM_BUF_SZ 0x800
+
+/*
+ * marvell frey (88R9580) register definitions
+ * chip revision identifiers
+ */
+#define MVR_FREY_B2 0xB2
+
+/*
+ * memory window definitions. window 0 is the data window with definitions
+ * of MW_DATA_XXX. window 1 is the register window with definitions of
+ * MW_REG_XXX.
+ */
+#define MW_REG_WINDOW_SIZE (u32)(0x00040000)
+#define MW_REG_OFFSET_HWREG (u32)(0x00000000)
+#define MW_REG_OFFSET_PCI (u32)(0x00008000)
+#define MW_REG_PCI_HWREG_DELTA (MW_REG_OFFSET_PCI - MW_REG_OFFSET_HWREG)
+#define MW_DATA_WINDOW_SIZE (u32)(0x00020000)
+#define MW_DATA_ADDR_SER_FLASH (u32)(0xEC000000)
+#define MW_DATA_ADDR_SRAM (u32)(0xF4000000)
+#define MW_DATA_ADDR_PAR_FLASH (u32)(0xFC000000)
+
+/*
+ * the following registers are for the communication
+ * list interface (AKA message unit (MU))
+ */
+#define MU_IN_LIST_ADDR_LO (u32)(0x00004000)
+#define MU_IN_LIST_ADDR_HI (u32)(0x00004004)
+
+#define MU_IN_LIST_WRITE (u32)(0x00004018)
+ #define MU_ILW_TOGGLE (u32)(0x00004000)
+
+#define MU_IN_LIST_READ (u32)(0x0000401C)
+ #define MU_ILR_TOGGLE (u32)(0x00004000)
+ #define MU_ILIC_LIST (u32)(0x0000000F)
+ #define MU_ILIC_LIST_F0 (u32)(0x00000000)
+ #define MU_ILIC_DEST (u32)(0x00000F00)
+ #define MU_ILIC_DEST_DDR (u32)(0x00000200)
+#define MU_IN_LIST_IFC_CONFIG (u32)(0x00004028)
+
+#define MU_IN_LIST_CONFIG (u32)(0x0000402C)
+ #define MU_ILC_ENABLE (u32)(0x00000001)
+ #define MU_ILC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_ILC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_ILC_DYNAMIC_SRC (u32)(0x00008000)
+ #define MU_ILC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_ILC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_ADDR_LO (u32)(0x00004050)
+#define MU_OUT_LIST_ADDR_HI (u32)(0x00004054)
+
+#define MU_OUT_LIST_COPY_PTR_LO (u32)(0x00004058)
+#define MU_OUT_LIST_COPY_PTR_HI (u32)(0x0000405C)
+
+#define MU_OUT_LIST_WRITE (u32)(0x00004068)
+ #define MU_OLW_TOGGLE (u32)(0x00004000)
+
+#define MU_OUT_LIST_COPY (u32)(0x0000406C)
+ #define MU_OLC_TOGGLE (u32)(0x00004000)
+ #define MU_OLC_WRT_PTR (u32)(0x00003FFF)
+
+#define MU_OUT_LIST_IFC_CONFIG (u32)(0x00004078)
+ #define MU_OLIC_LIST (u32)(0x0000000F)
+ #define MU_OLIC_LIST_F0 (u32)(0x00000000)
+ #define MU_OLIC_SOURCE (u32)(0x00000F00)
+ #define MU_OLIC_SOURCE_DDR (u32)(0x00000200)
+
+#define MU_OUT_LIST_CONFIG (u32)(0x0000407C)
+ #define MU_OLC_ENABLE (u32)(0x00000001)
+ #define MU_OLC_ENTRY_MASK (u32)(0x000000F0)
+ #define MU_OLC_ENTRY_4_DW (u32)(0x00000020)
+ #define MU_OLC_NUMBER_MASK (u32)(0x7FFF0000)
+ #define MU_OLC_NUMBER_SHIFT 16
+
+#define MU_OUT_LIST_INT_STAT (u32)(0x00004088)
+ #define MU_OLIS_INT (u32)(0x00000001)
+
+#define MU_OUT_LIST_INT_MASK (u32)(0x0000408C)
+ #define MU_OLIS_MASK (u32)(0x00000001)
+
+/*
+ * the maximum size of the communication lists is two greater than the
+ * maximum amount of VDA requests. the extra are to prevent queue overflow.
+ */
+#define ESAS2R_MAX_NUM_REQS 256
+#define ESAS2R_NUM_EXTRA 2
+#define ESAS2R_MAX_COMM_LIST_SIZE (ESAS2R_MAX_NUM_REQS + ESAS2R_NUM_EXTRA)
+
+/*
+ * the following registers are for the CPU interface
+ */
+#define MU_CTL_STATUS_IN (u32)(0x00010108)
+ #define MU_CTL_IN_FULL_RST (u32)(0x00000020)
+#define MU_CTL_STATUS_IN_B2 (u32)(0x00010130)
+ #define MU_CTL_IN_FULL_RST2 (u32)(0x80000000)
+#define MU_DOORBELL_IN (u32)(0x00010460)
+ #define DRBL_RESET_BUS (u32)(0x00000002)
+ #define DRBL_PAUSE_AE (u32)(0x00000004)
+ #define DRBL_RESUME_AE (u32)(0x00000008)
+ #define DRBL_MSG_IFC_DOWN (u32)(0x00000010)
+ #define DRBL_FLASH_REQ (u32)(0x00000020)
+ #define DRBL_FLASH_DONE (u32)(0x00000040)
+ #define DRBL_FORCE_INT (u32)(0x00000080)
+ #define DRBL_MSG_IFC_INIT (u32)(0x00000100)
+ #define DRBL_POWER_DOWN (u32)(0x00000200)
+ #define DRBL_DRV_VER_1 (u32)(0x00010000)
+ #define DRBL_DRV_VER DRBL_DRV_VER_1
+#define MU_DOORBELL_IN_ENB (u32)(0x00010464)
+#define MU_DOORBELL_OUT (u32)(0x00010480)
+ #define DRBL_PANIC_REASON_MASK (u32)(0x00F00000)
+ #define DRBL_UNUSED_HANDLER (u32)(0x00100000)
+ #define DRBL_UNDEF_INSTR (u32)(0x00200000)
+ #define DRBL_PREFETCH_ABORT (u32)(0x00300000)
+ #define DRBL_DATA_ABORT (u32)(0x00400000)
+ #define DRBL_JUMP_TO_ZERO (u32)(0x00500000)
+ #define DRBL_FW_RESET (u32)(0x00080000)
+ #define DRBL_FW_VER_MSK (u32)(0x00070000)
+ #define DRBL_FW_VER_0 (u32)(0x00000000)
+ #define DRBL_FW_VER_1 (u32)(0x00010000)
+ #define DRBL_FW_VER DRBL_FW_VER_1
+#define MU_DOORBELL_OUT_ENB (u32)(0x00010484)
+ #define DRBL_ENB_MASK (u32)(0x00F803FF)
+#define MU_INT_STATUS_OUT (u32)(0x00010200)
+ #define MU_INTSTAT_POST_OUT (u32)(0x00000010)
+ #define MU_INTSTAT_DRBL_IN (u32)(0x00000100)
+ #define MU_INTSTAT_DRBL (u32)(0x00001000)
+ #define MU_INTSTAT_MASK (u32)(0x00001010)
+#define MU_INT_MASK_OUT (u32)(0x0001020C)
+
+/* PCI express registers accessed via window 1 */
+#define MVR_PCI_WIN1_REMAP (u32)(0x00008438)
+ #define MVRPW1R_ENABLE (u32)(0x00000001)
+
+
+/* structures */
+
+/* inbound list dynamic source entry */
+struct esas2r_inbound_list_source_entry {
+ u64 address;
+ u32 length;
+ #define HWILSE_INTERFACE_F0 0x00000000
+ u32 reserved;
+};
+
+/* PCI data structure in expansion ROM images */
+struct __packed esas2r_boot_header {
+ char signature[4];
+ u16 vendor_id;
+ u16 device_id;
+ u16 VPD;
+ u16 struct_length;
+ u8 struct_revision;
+ u8 class_code[3];
+ u16 image_length;
+ u16 code_revision;
+ u8 code_type;
+ #define CODE_TYPE_PC 0
+ #define CODE_TYPE_OPEN 1
+ #define CODE_TYPE_EFI 3
+ u8 indicator;
+ #define INDICATOR_LAST 0x80
+ u8 reserved[2];
+};
+
+struct __packed esas2r_boot_image {
+ u16 signature;
+ u8 reserved[22];
+ u16 header_offset;
+ u16 pnp_offset;
+};
+
+struct __packed esas2r_pc_image {
+ u16 signature;
+ u8 length;
+ u8 entry_point[3];
+ u8 checksum;
+ u16 image_end;
+ u16 min_size;
+ u8 rom_flags;
+ u8 reserved[12];
+ u16 header_offset;
+ u16 pnp_offset;
+ struct esas2r_boot_header boot_image;
+};
+
+struct __packed esas2r_efi_image {
+ u16 signature;
+ u16 length;
+ u32 efi_signature;
+ #define EFI_ROM_SIG 0x00000EF1
+ u16 image_type;
+ #define EFI_IMAGE_APP 10
+ #define EFI_IMAGE_BSD 11
+ #define EFI_IMAGE_RTD 12
+ u16 machine_type;
+ #define EFI_MACHINE_IA32 0x014c
+ #define EFI_MACHINE_IA64 0x0200
+ #define EFI_MACHINE_X64 0x8664
+ #define EFI_MACHINE_EBC 0x0EBC
+ u16 compression;
+ #define EFI_UNCOMPRESSED 0x0000
+ #define EFI_COMPRESSED 0x0001
+ u8 reserved[8];
+ u16 efi_offset;
+ u16 header_offset;
+ u16 reserved2;
+ struct esas2r_boot_header boot_image;
+};
+
+struct esas2r_adapter;
+struct esas2r_sg_context;
+struct esas2r_request;
+
+typedef void (*RQCALLBK) (struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+typedef bool (*RQBUILDSGL) (struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+
+struct esas2r_component_header {
+ u8 img_type;
+ #define CH_IT_FW 0x00
+ #define CH_IT_NVR 0x01
+ #define CH_IT_BIOS 0x02
+ #define CH_IT_MAC 0x03
+ #define CH_IT_CFG 0x04
+ #define CH_IT_EFI 0x05
+ u8 status;
+ #define CH_STAT_PENDING 0xff
+ #define CH_STAT_FAILED 0x00
+ #define CH_STAT_SUCCESS 0x01
+ #define CH_STAT_RETRY 0x02
+ #define CH_STAT_INVALID 0x03
+ u8 pad[2];
+ u32 version;
+ u32 length;
+ u32 image_offset;
+};
+
+#define FI_REL_VER_SZ 16
+
+struct esas2r_flash_img_v0 {
+ u8 fi_version;
+ #define FI_VERSION_0 00
+ u8 status;
+ u8 adap_typ;
+ u8 action;
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ u16 num_comps;
+ #define FI_NUM_COMPS_V0 5
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V0];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+struct esas2r_flash_img {
+ u8 fi_version;
+ #define FI_VERSION_1 01
+ u8 status;
+ #define FI_STAT_SUCCESS 0x00
+ #define FI_STAT_FAILED 0x01
+ #define FI_STAT_REBOOT 0x02
+ #define FI_STAT_ADAPTYP 0x03
+ #define FI_STAT_INVALID 0x04
+ #define FI_STAT_CHKSUM 0x05
+ #define FI_STAT_LENGTH 0x06
+ #define FI_STAT_UNKNOWN 0x07
+ #define FI_STAT_IMG_VER 0x08
+ #define FI_STAT_BUSY 0x09
+ #define FI_STAT_DUAL 0x0A
+ #define FI_STAT_MISSING 0x0B
+ #define FI_STAT_UNSUPP 0x0C
+ #define FI_STAT_ERASE 0x0D
+ #define FI_STAT_FLASH 0x0E
+ #define FI_STAT_DEGRADED 0x0F
+ u8 adap_typ;
+ #define FI_AT_UNKNWN 0xFF
+ #define FI_AT_SUN_LAKE 0x0B
+ #define FI_AT_MV_9580 0x0F
+ u8 action;
+ #define FI_ACT_DOWN 0x00
+ #define FI_ACT_UP 0x01
+ #define FI_ACT_UPSZ 0x02
+ #define FI_ACT_MAX 0x02
+ #define FI_ACT_DOWN1 0x80
+ u32 length;
+ u16 checksum;
+ u16 driver_error;
+ u16 flags;
+ #define FI_FLG_NVR_DEF 0x0001
+ u16 num_comps;
+ #define FI_NUM_COMPS_V1 6
+ u8 rel_version[FI_REL_VER_SZ];
+ struct esas2r_component_header cmp_hdr[FI_NUM_COMPS_V1];
+ u8 scratch_buf[FM_BUF_SZ];
+};
+
+/* definitions for flash script (FS) commands */
+struct esas2r_ioctlfs_command {
+ u8 command;
+ #define ESAS2R_FS_CMD_ERASE 0
+ #define ESAS2R_FS_CMD_READ 1
+ #define ESAS2R_FS_CMD_BEGINW 2
+ #define ESAS2R_FS_CMD_WRITE 3
+ #define ESAS2R_FS_CMD_COMMIT 4
+ #define ESAS2R_FS_CMD_CANCEL 5
+ u8 checksum;
+ u8 reserved[2];
+ u32 flash_addr;
+ u32 length;
+ u32 image_offset;
+};
+
+struct esas2r_ioctl_fs {
+ u8 version;
+ #define ESAS2R_FS_VER 0
+ u8 status;
+ u8 driver_error;
+ u8 adap_type;
+ #define ESAS2R_FS_AT_ESASRAID2 3
+ #define ESAS2R_FS_AT_TSSASRAID2 4
+ #define ESAS2R_FS_AT_TSSASRAID2E 5
+ #define ESAS2R_FS_AT_TLSASHBA 6
+ u8 driver_ver;
+ u8 reserved[11];
+ struct esas2r_ioctlfs_command command;
+ u8 data[1];
+};
+
+struct esas2r_sas_nvram {
+ u8 signature[4];
+ u8 version;
+ #define SASNVR_VERSION_0 0x00
+ #define SASNVR_VERSION SASNVR_VERSION_0
+ u8 checksum;
+ #define SASNVR_CKSUM_SEED 0x5A
+ u8 max_lun_for_target;
+ u8 pci_latency;
+ #define SASNVR_PCILAT_DIS 0x00
+ #define SASNVR_PCILAT_MIN 0x10
+ #define SASNVR_PCILAT_MAX 0xF8
+ u8 options1;
+ #define SASNVR1_BOOT_DRVR 0x01
+ #define SASNVR1_BOOT_SCAN 0x02
+ #define SASNVR1_DIS_PCI_MWI 0x04
+ #define SASNVR1_FORCE_ORD_Q 0x08
+ #define SASNVR1_CACHELINE_0 0x10
+ #define SASNVR1_DIS_DEVSORT 0x20
+ #define SASNVR1_PWR_MGT_EN 0x40
+ #define SASNVR1_WIDEPORT 0x80
+ u8 options2;
+ #define SASNVR2_SINGLE_BUS 0x01
+ #define SASNVR2_SLOT_BIND 0x02
+ #define SASNVR2_EXP_PROG 0x04
+ #define SASNVR2_CMDTHR_LUN 0x08
+ #define SASNVR2_HEARTBEAT 0x10
+ #define SASNVR2_INT_CONNECT 0x20
+ #define SASNVR2_SW_MUX_CTRL 0x40
+ #define SASNVR2_DISABLE_NCQ 0x80
+ u8 int_coalescing;
+ #define SASNVR_COAL_DIS 0x00
+ #define SASNVR_COAL_LOW 0x01
+ #define SASNVR_COAL_MED 0x02
+ #define SASNVR_COAL_HI 0x03
+ u8 cmd_throttle;
+ #define SASNVR_CMDTHR_NONE 0x00
+ u8 dev_wait_time;
+ u8 dev_wait_count;
+ u8 spin_up_delay;
+ #define SASNVR_SPINUP_MAX 0x14
+ u8 ssp_align_rate;
+ u8 sas_addr[8];
+ u8 phy_speed[16];
+ #define SASNVR_SPEED_AUTO 0x00
+ #define SASNVR_SPEED_1_5GB 0x01
+ #define SASNVR_SPEED_3GB 0x02
+ #define SASNVR_SPEED_6GB 0x03
+ #define SASNVR_SPEED_12GB 0x04
+ u8 phy_mux[16];
+ #define SASNVR_MUX_DISABLED 0x00
+ #define SASNVR_MUX_1_5GB 0x01
+ #define SASNVR_MUX_3GB 0x02
+ #define SASNVR_MUX_6GB 0x03
+ u8 phy_flags[16];
+ #define SASNVR_PHF_DISABLED 0x01
+ #define SASNVR_PHF_RD_ONLY 0x02
+ u8 sort_type;
+ #define SASNVR_SORT_SAS_ADDR 0x00
+ #define SASNVR_SORT_H308_CONN 0x01
+ #define SASNVR_SORT_PHY_ID 0x02
+ #define SASNVR_SORT_SLOT_ID 0x03
+ u8 dpm_reqcmd_lmt;
+ u8 dpm_stndby_time;
+ u8 dpm_active_time;
+ u8 phy_target_id[16];
+ #define SASNVR_PTI_DISABLED 0xFF
+ u8 virt_ses_mode;
+ #define SASNVR_VSMH_DISABLED 0x00
+ u8 read_write_mode;
+ #define SASNVR_RWM_DEFAULT 0x00
+ u8 link_down_to;
+ u8 reserved[0xA1];
+};
+
+typedef u32 (*PGETPHYSADDR) (struct esas2r_sg_context *sgc, u64 *addr);
+
+struct esas2r_sg_context {
+ struct esas2r_adapter *adapter;
+ struct esas2r_request *first_req;
+ u32 length;
+ u8 *cur_offset;
+ PGETPHYSADDR get_phys_addr;
+ union {
+ struct {
+ struct atto_vda_sge *curr;
+ struct atto_vda_sge *last;
+ struct atto_vda_sge *limit;
+ struct atto_vda_sge *chain;
+ } a64;
+ struct {
+ struct atto_physical_region_description *curr;
+ struct atto_physical_region_description *chain;
+ u32 sgl_max_cnt;
+ u32 sge_cnt;
+ } prd;
+ } sge;
+ struct scatterlist *cur_sgel;
+ u8 *exp_offset;
+ int num_sgel;
+ int sgel_count;
+};
+
+struct esas2r_target {
+ u8 flags;
+ #define TF_PASS_THRU 0x01
+ #define TF_USED 0x02
+ u8 new_target_state;
+ u8 target_state;
+ u8 buffered_target_state;
+#define TS_NOT_PRESENT 0x00
+#define TS_PRESENT 0x05
+#define TS_LUN_CHANGE 0x06
+#define TS_INVALID 0xFF
+ u32 block_size;
+ u32 inter_block;
+ u32 inter_byte;
+ u16 virt_targ_id;
+ u16 phys_targ_id;
+ u8 identifier_len;
+ u64 sas_addr;
+ u8 identifier[60];
+ struct atto_vda_ae_lu lu_event;
+};
+
+struct esas2r_request {
+ struct list_head comp_list;
+ struct list_head req_list;
+ union atto_vda_req *vrq;
+ struct esas2r_mem_desc *vrq_md;
+ union {
+ void *data_buf;
+ union atto_vda_rsp_data *vda_rsp_data;
+ };
+ u8 *sense_buf;
+ struct list_head sg_table_head;
+ struct esas2r_mem_desc *sg_table;
+ u32 timeout;
+ #define RQ_TIMEOUT_S1 0xFFFFFFFF
+ #define RQ_TIMEOUT_S2 0xFFFFFFFE
+ #define RQ_MAX_TIMEOUT 0xFFFFFFFD
+ u16 target_id;
+ u8 req_type;
+ #define RT_INI_REQ 0x01
+ #define RT_DISC_REQ 0x02
+ u8 sense_len;
+ union atto_vda_func_rsp func_rsp;
+ RQCALLBK comp_cb;
+ RQCALLBK interrupt_cb;
+ void *interrupt_cx;
+ u8 flags;
+ #define RF_1ST_IBLK_BASE 0x04
+ #define RF_FAILURE_OK 0x08
+ u8 req_stat;
+ u16 vda_req_sz;
+ #define RQ_SIZE_DEFAULT 0
+ u64 lba;
+ RQCALLBK aux_req_cb;
+ void *aux_req_cx;
+ u32 blk_len;
+ u32 max_blk_len;
+ union {
+ struct scsi_cmnd *cmd;
+ u8 *task_management_status_ptr;
+ };
+};
+
+struct esas2r_flash_context {
+ struct esas2r_flash_img *fi;
+ RQCALLBK interrupt_cb;
+ u8 *sgc_offset;
+ u8 *scratch;
+ u32 fi_hdr_len;
+ u8 task;
+ #define FMTSK_ERASE_BOOT 0
+ #define FMTSK_WRTBIOS 1
+ #define FMTSK_READBIOS 2
+ #define FMTSK_WRTMAC 3
+ #define FMTSK_READMAC 4
+ #define FMTSK_WRTEFI 5
+ #define FMTSK_READEFI 6
+ #define FMTSK_WRTCFG 7
+ #define FMTSK_READCFG 8
+ u8 func;
+ u16 num_comps;
+ u32 cmp_len;
+ u32 flsh_addr;
+ u32 curr_len;
+ u8 comp_typ;
+ struct esas2r_sg_context sgc;
+};
+
+struct esas2r_disc_context {
+ u8 disc_evt;
+ #define DCDE_DEV_CHANGE 0x01
+ #define DCDE_DEV_SCAN 0x02
+ u8 state;
+ #define DCS_DEV_RMV 0x00
+ #define DCS_DEV_ADD 0x01
+ #define DCS_BLOCK_DEV_SCAN 0x02
+ #define DCS_RAID_GRP_INFO 0x03
+ #define DCS_PART_INFO 0x04
+ #define DCS_PT_DEV_INFO 0x05
+ #define DCS_PT_DEV_ADDR 0x06
+ #define DCS_DISC_DONE 0xFF
+ u16 flags;
+ #define DCF_DEV_CHANGE 0x0001
+ #define DCF_DEV_SCAN 0x0002
+ #define DCF_POLLED 0x8000
+ u32 interleave;
+ u32 block_size;
+ u16 dev_ix;
+ u8 part_num;
+ u8 raid_grp_ix;
+ char raid_grp_name[16];
+ struct esas2r_target *curr_targ;
+ u16 curr_virt_id;
+ u16 curr_phys_id;
+ u8 scan_gen;
+ u8 dev_addr_type;
+ u64 sas_addr;
+};
+
+struct esas2r_mem_desc {
+ struct list_head next_desc;
+ void *virt_addr;
+ u64 phys_addr;
+ void *pad;
+ void *esas2r_data;
+ u32 esas2r_param;
+ u32 size;
+};
+
+enum fw_event_type {
+ fw_event_null,
+ fw_event_lun_change,
+ fw_event_present,
+ fw_event_not_present,
+ fw_event_vda_ae
+};
+
+struct esas2r_vda_ae {
+ u32 signature;
+#define ESAS2R_VDA_EVENT_SIG 0x4154544F
+ u8 bus_number;
+ u8 devfn;
+ u8 pad[2];
+ union atto_vda_ae vda_ae;
+};
+
+struct esas2r_fw_event_work {
+ struct list_head list;
+ struct delayed_work work;
+ struct esas2r_adapter *a;
+ enum fw_event_type type;
+ u8 data[sizeof(struct esas2r_vda_ae)];
+};
+
+enum state {
+ FW_INVALID_ST,
+ FW_STATUS_ST,
+ FW_COMMAND_ST
+};
+
+struct esas2r_firmware {
+ enum state state;
+ struct esas2r_flash_img header;
+ u8 *data;
+ u64 phys;
+ int orig_len;
+ void *header_buff;
+ u64 header_buff_phys;
+};
+
+struct esas2r_adapter {
+ struct esas2r_target targetdb[ESAS2R_MAX_TARGETS];
+ struct esas2r_target *targetdb_end;
+ unsigned char *regs;
+ unsigned char *data_window;
+ u32 volatile flags;
+ #define AF_PORT_CHANGE (u32)(0x00000001)
+ #define AF_CHPRST_NEEDED (u32)(0x00000004)
+ #define AF_CHPRST_PENDING (u32)(0x00000008)
+ #define AF_CHPRST_DETECTED (u32)(0x00000010)
+ #define AF_BUSRST_NEEDED (u32)(0x00000020)
+ #define AF_BUSRST_PENDING (u32)(0x00000040)
+ #define AF_BUSRST_DETECTED (u32)(0x00000080)
+ #define AF_DISABLED (u32)(0x00000100)
+ #define AF_FLASH_LOCK (u32)(0x00000200)
+ #define AF_OS_RESET (u32)(0x00002000)
+ #define AF_FLASHING (u32)(0x00004000)
+ #define AF_POWER_MGT (u32)(0x00008000)
+ #define AF_NVR_VALID (u32)(0x00010000)
+ #define AF_DEGRADED_MODE (u32)(0x00020000)
+ #define AF_DISC_PENDING (u32)(0x00040000)
+ #define AF_TASKLET_SCHEDULED (u32)(0x00080000)
+ #define AF_HEARTBEAT (u32)(0x00200000)
+ #define AF_HEARTBEAT_ENB (u32)(0x00400000)
+ #define AF_NOT_PRESENT (u32)(0x00800000)
+ #define AF_CHPRST_STARTED (u32)(0x01000000)
+ #define AF_FIRST_INIT (u32)(0x02000000)
+ #define AF_POWER_DOWN (u32)(0x04000000)
+ #define AF_DISC_IN_PROG (u32)(0x08000000)
+ #define AF_COMM_LIST_TOGGLE (u32)(0x10000000)
+ #define AF_LEGACY_SGE_MODE (u32)(0x20000000)
+ #define AF_DISC_POLLED (u32)(0x40000000)
+ u32 volatile flags2;
+ #define AF2_SERIAL_FLASH (u32)(0x00000001)
+ #define AF2_DEV_SCAN (u32)(0x00000002)
+ #define AF2_DEV_CNT_OK (u32)(0x00000004)
+ #define AF2_COREDUMP_AVAIL (u32)(0x00000008)
+ #define AF2_COREDUMP_SAVED (u32)(0x00000010)
+ #define AF2_VDA_POWER_DOWN (u32)(0x00000100)
+ #define AF2_THUNDERLINK (u32)(0x00000200)
+ #define AF2_THUNDERBOLT (u32)(0x00000400)
+ #define AF2_INIT_DONE (u32)(0x00000800)
+ #define AF2_INT_PENDING (u32)(0x00001000)
+ #define AF2_TIMER_TICK (u32)(0x00002000)
+ #define AF2_IRQ_CLAIMED (u32)(0x00004000)
+ #define AF2_MSI_ENABLED (u32)(0x00008000)
+ atomic_t disable_cnt;
+ atomic_t dis_ints_cnt;
+ u32 int_stat;
+ u32 int_mask;
+ u32 volatile *outbound_copy;
+ struct list_head avail_request;
+ spinlock_t request_lock;
+ spinlock_t sg_list_lock;
+ spinlock_t queue_lock;
+ spinlock_t mem_lock;
+ struct list_head free_sg_list_head;
+ struct esas2r_mem_desc *sg_list_mds;
+ struct list_head active_list;
+ struct list_head defer_list;
+ struct esas2r_request **req_table;
+ union {
+ u16 prev_dev_cnt;
+ u32 heartbeat_time;
+ #define ESAS2R_HEARTBEAT_TIME (3000)
+ };
+ u32 chip_uptime;
+ #define ESAS2R_CHP_UPTIME_MAX (60000)
+ #define ESAS2R_CHP_UPTIME_CNT (20000)
+ u64 uncached_phys;
+ u8 *uncached;
+ struct esas2r_sas_nvram *nvram;
+ struct esas2r_request general_req;
+ u8 init_msg;
+ #define ESAS2R_INIT_MSG_START 1
+ #define ESAS2R_INIT_MSG_INIT 2
+ #define ESAS2R_INIT_MSG_GET_INIT 3
+ #define ESAS2R_INIT_MSG_REINIT 4
+ u16 cmd_ref_no;
+ u32 fw_version;
+ u32 fw_build;
+ u32 chip_init_time;
+ #define ESAS2R_CHPRST_TIME (180000)
+ #define ESAS2R_CHPRST_WAIT_TIME (2000)
+ u32 last_tick_time;
+ u32 window_base;
+ RQBUILDSGL build_sgl;
+ struct esas2r_request *first_ae_req;
+ u32 list_size;
+ u32 last_write;
+ u32 last_read;
+ u16 max_vdareq_size;
+ u16 disc_wait_cnt;
+ struct esas2r_mem_desc inbound_list_md;
+ struct esas2r_mem_desc outbound_list_md;
+ struct esas2r_disc_context disc_ctx;
+ u8 *disc_buffer;
+ u32 disc_start_time;
+ u32 disc_wait_time;
+ u32 flash_ver;
+ char flash_rev[16];
+ char fw_rev[16];
+ char image_type[16];
+ struct esas2r_flash_context flash_context;
+ u32 num_targets_backend;
+ u32 ioctl_tunnel;
+ struct tasklet_struct tasklet;
+ struct pci_dev *pcid;
+ struct Scsi_Host *host;
+ unsigned int index;
+ char name[32];
+ struct timer_list timer;
+ struct esas2r_firmware firmware;
+ wait_queue_head_t nvram_waiter;
+ int nvram_command_done;
+ wait_queue_head_t fm_api_waiter;
+ int fm_api_command_done;
+ wait_queue_head_t vda_waiter;
+ int vda_command_done;
+ u8 *vda_buffer;
+ u64 ppvda_buffer;
+#define VDA_BUFFER_HEADER_SZ (offsetof(struct atto_ioctl_vda, data))
+#define VDA_MAX_BUFFER_SIZE (0x40000 + VDA_BUFFER_HEADER_SZ)
+ wait_queue_head_t fs_api_waiter;
+ int fs_api_command_done;
+ u64 ppfs_api_buffer;
+ u8 *fs_api_buffer;
+ u32 fs_api_buffer_size;
+ wait_queue_head_t buffered_ioctl_waiter;
+ int buffered_ioctl_done;
+ int uncached_size;
+ struct workqueue_struct *fw_event_q;
+ struct list_head fw_event_list;
+ spinlock_t fw_event_lock;
+ u8 fw_events_off; /* if '1', then ignore events */
+ char fw_event_q_name[ESAS2R_KOBJ_NAME_LEN];
+ /*
+ * intr_mode stores the interrupt mode currently being used by this
+ * adapter. it is based on the interrupt_mode module parameter, but
+ * can be changed based on the ability (or not) to utilize the
+ * mode requested by the parameter.
+ */
+ int intr_mode;
+#define INTR_MODE_LEGACY 0
+#define INTR_MODE_MSI 1
+#define INTR_MODE_MSIX 2
+ struct esas2r_sg_context fm_api_sgc;
+ u8 *save_offset;
+ struct list_head vrq_mds_head;
+ struct esas2r_mem_desc *vrq_mds;
+ int num_vrqs;
+ struct semaphore fm_api_semaphore;
+ struct semaphore fs_api_semaphore;
+ struct semaphore nvram_semaphore;
+ struct atto_ioctl *local_atto_ioctl;
+ u8 fw_coredump_buff[ESAS2R_FWCOREDUMP_SZ];
+ unsigned int sysfs_fw_created:1;
+ unsigned int sysfs_fs_created:1;
+ unsigned int sysfs_vda_created:1;
+ unsigned int sysfs_hw_created:1;
+ unsigned int sysfs_live_nvram_created:1;
+ unsigned int sysfs_default_nvram_created:1;
+};
+
+/*
+ * Function Declarations
+ * SCSI functions
+ */
+int esas2r_release(struct Scsi_Host *);
+const char *esas2r_info(struct Scsi_Host *);
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data);
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg);
+int esas2r_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba);
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd);
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh);
+int esas2r_slave_alloc(struct scsi_device *dev);
+int esas2r_slave_configure(struct scsi_device *dev);
+void esas2r_slave_destroy(struct scsi_device *dev);
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason);
+int esas2r_change_queue_type(struct scsi_device *dev, int type);
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+/* SCSI error handler (eh) functions */
+int esas2r_eh_abort(struct scsi_cmnd *cmd);
+int esas2r_device_reset(struct scsi_cmnd *cmd);
+int esas2r_host_reset(struct scsi_cmnd *cmd);
+int esas2r_bus_reset(struct scsi_cmnd *cmd);
+int esas2r_target_reset(struct scsi_cmnd *cmd);
+
+/* Internal functions */
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index);
+int esas2r_cleanup(struct Scsi_Host *host);
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count);
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count);
+void esas2r_adapter_tasklet(unsigned long context);
+irqreturn_t esas2r_interrupt(int irq, void *dev_id);
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id);
+void esas2r_kickoff_timer(struct esas2r_adapter *a);
+int esas2r_suspend(struct pci_dev *pcid, pm_message_t state);
+int esas2r_resume(struct pci_dev *pcid);
+void esas2r_fw_event_off(struct esas2r_adapter *a);
+void esas2r_fw_event_on(struct esas2r_adapter *a);
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram);
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_reset_detected(struct esas2r_adapter *a);
+void esas2r_target_state_changed(struct esas2r_adapter *ha, u16 targ_id,
+ u8 state);
+int esas2r_req_status_to_error(u8 req_stat);
+void esas2r_kill_adapter(int i);
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a);
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a);
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area);
+bool esas2r_check_adapter(struct esas2r_adapter *a);
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll);
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func);
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a);
+void esas2r_adapter_interrupt(struct esas2r_adapter *a);
+void esas2r_do_deferred_processes(struct esas2r_adapter *a);
+void esas2r_reset_bus(struct esas2r_adapter *a);
+void esas2r_reset_adapter(struct esas2r_adapter *a);
+void esas2r_timer_tick(struct esas2r_adapter *a);
+const char *esas2r_get_model_name(struct esas2r_adapter *a);
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a);
+u32 esas2r_stall_execution(struct esas2r_adapter *a, u32 start_time,
+ u32 *delay);
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length);
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data);
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len);
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func);
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data);
+void esas2r_power_down(struct esas2r_adapter *a);
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll);
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq);
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo);
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+bool esas2r_read_flash_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_read_mem_block(struct esas2r_adapter *a, void *to, u32 from,
+ u32 size);
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc);
+void esas2r_force_interrupt(struct esas2r_adapter *a);
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_process_adapter_reset(struct esas2r_adapter *a);
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_dummy_complete(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq);
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_read_flash_rev(struct esas2r_adapter *a);
+bool esas2r_read_image_type(struct esas2r_adapter *a);
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a);
+bool esas2r_nvram_validate(struct esas2r_adapter *a);
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a);
+bool esas2r_print_flash_rev(struct esas2r_adapter *a);
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt);
+bool esas2r_init_msgs(struct esas2r_adapter *a);
+bool esas2r_is_adapter_present(struct esas2r_adapter *a);
+void esas2r_nuxi_mgt_data(u8 function, void *data);
+void esas2r_nuxi_cfg_data(u8 function, void *data);
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae);
+void esas2r_reset_chip(struct esas2r_adapter *a);
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+void esas2r_polled_interrupt(struct esas2r_adapter *a);
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status);
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc);
+void esas2r_targ_db_initialize(struct esas2r_adapter *a);
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify);
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a);
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc);
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len);
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t);
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr);
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len);
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id);
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id);
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a);
+void esas2r_disc_initialize(struct esas2r_adapter *a);
+void esas2r_disc_start_waiting(struct esas2r_adapter *a);
+void esas2r_disc_check_for_work(struct esas2r_adapter *a);
+void esas2r_disc_check_complete(struct esas2r_adapter *a);
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt);
+bool esas2r_disc_start_port(struct esas2r_adapter *a);
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str);
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc);
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz);
+
+/* Inline functions */
+static inline u32 esas2r_lock_set_flags(volatile u32 *flags, u32 bits)
+{
+ return test_and_set_bit(ilog2(bits), (volatile unsigned long *)flags);
+}
+
+static inline u32 esas2r_lock_clear_flags(volatile u32 *flags, u32 bits)
+{
+ return test_and_clear_bit(ilog2(bits),
+ (volatile unsigned long *)flags);
+}
+
+/* Allocate a chip scatter/gather list entry */
+static inline struct esas2r_mem_desc *esas2r_alloc_sgl(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct list_head *sgl;
+ struct esas2r_mem_desc *result = NULL;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ if (likely(!list_empty(&a->free_sg_list_head))) {
+ sgl = a->free_sg_list_head.next;
+ result = list_entry(sgl, struct esas2r_mem_desc, next_desc);
+ list_del_init(sgl);
+ }
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+
+ return result;
+}
+
+/* Initialize a scatter/gather context */
+static inline void esas2r_sgc_init(struct esas2r_sg_context *sgc,
+ struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_sge *first)
+{
+ sgc->adapter = a;
+ sgc->first_req = rq;
+
+ /*
+ * set the limit pointer such that an SGE pointer above this value
+ * would be the first one to overflow the SGL.
+ */
+ sgc->sge.a64.limit = (struct atto_vda_sge *)((u8 *)rq->vrq
+ + (sizeof(union
+ atto_vda_req) /
+ 8)
+ - sizeof(struct
+ atto_vda_sge));
+ if (first) {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = first;
+ rq->vrq->scsi.sg_list_offset = (u8)
+ ((u8 *)first -
+ (u8 *)rq->vrq);
+ } else {
+ sgc->sge.a64.last =
+ sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0];
+ rq->vrq->scsi.sg_list_offset =
+ (u8)offsetof(struct atto_vda_scsi_req, u.sge);
+ }
+ sgc->sge.a64.chain = NULL;
+}
+
+static inline void esas2r_rq_init_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ union atto_vda_req *vrq = rq->vrq;
+ u32 handle;
+
+ INIT_LIST_HEAD(&rq->sg_table_head);
+ rq->data_buf = (void *)(vrq + 1);
+ rq->interrupt_cb = NULL;
+ rq->comp_cb = esas2r_complete_request_cb;
+ rq->flags = 0;
+ rq->timeout = 0;
+ rq->req_stat = RS_PENDING;
+ rq->req_type = RT_INI_REQ;
+
+ /* clear the outbound response */
+ rq->func_rsp.dwords[0] = 0;
+ rq->func_rsp.dwords[1] = 0;
+
+ /*
+ * clear the size of the VDA request. esas2r_build_sg_list() will
+ * only allow the size of the request to grow. there are some
+ * management requests that go through there twice and the second
+ * time through sets a smaller request size. if this is not modified
+ * at all we'll set it to the size of the entire VDA request.
+ */
+ rq->vda_req_sz = RQ_SIZE_DEFAULT;
+
+ /* req_table entry should be NULL at this point - if not, halt */
+
+ if (a->req_table[LOWORD(vrq->scsi.handle)])
+ esas2r_bugon();
+
+ /* fill in the table for this handle so we can get back to the
+ * request.
+ */
+ a->req_table[LOWORD(vrq->scsi.handle)] = rq;
+
+ /*
+ * add a reference number to the handle to make it unique (until it
+ * wraps of course) while preserving the upper word
+ */
+
+ handle = be32_to_cpu(vrq->scsi.handle) & 0xFFFF0000;
+ vrq->scsi.handle = cpu_to_be32(handle + a->cmd_ref_no++);
+
+ /*
+ * the following formats a SCSI request. the caller can override as
+ * necessary. clear_vda_request can be called to clear the VDA
+ * request for another type of request.
+ */
+ vrq->scsi.function = VDA_FUNC_SCSI;
+ vrq->scsi.sense_len = SENSE_DATA_SZ;
+
+ /* clear out sg_list_offset and chain_offset */
+ vrq->scsi.sg_list_offset = 0;
+ vrq->scsi.chain_offset = 0;
+ vrq->scsi.flags = 0;
+ vrq->scsi.reserved = 0;
+
+ /* set the sense buffer to be the data payload buffer */
+ vrq->scsi.ppsense_buf
+ = cpu_to_le64(rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+}
+
+static inline void esas2r_rq_free_sg_lists(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ if (list_empty(&rq->sg_table_head))
+ return;
+
+ spin_lock_irqsave(&a->sg_list_lock, flags);
+ list_splice_tail_init(&rq->sg_table_head, &a->free_sg_list_head);
+ spin_unlock_irqrestore(&a->sg_list_lock, flags);
+}
+
+static inline void esas2r_rq_destroy_request(struct esas2r_request *rq,
+ struct esas2r_adapter *a)
+
+{
+ esas2r_rq_free_sg_lists(rq, a);
+ a->req_table[LOWORD(rq->vrq->scsi.handle)] = NULL;
+ rq->data_buf = NULL;
+}
+
+static inline bool esas2r_is_tasklet_pending(struct esas2r_adapter *a)
+{
+ return (a->flags & (AF_BUSRST_NEEDED | AF_BUSRST_DETECTED
+ | AF_CHPRST_NEEDED | AF_CHPRST_DETECTED
+ | AF_PORT_CHANGE))
+ ? true : false;
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the esas2r_sg_context. The caller must initialize
+ * struct esas2r_sg_context prior to the initial call by calling
+ * esas2r_sgc_init()
+ */
+static inline bool esas2r_build_sg_list(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ if (unlikely(le32_to_cpu(rq->vrq->scsi.length) == 0))
+ return true;
+
+ return (*a->build_sgl)(a, sgc);
+}
+
+static inline void esas2r_disable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_inc_return(&a->dis_ints_cnt) == 1)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_DIS_MASK);
+}
+
+static inline void esas2r_enable_chip_interrupts(struct esas2r_adapter *a)
+{
+ if (atomic_dec_return(&a->dis_ints_cnt) == 0)
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT,
+ ESAS2R_INT_ENB_MASK);
+}
+
+/* Schedule a TASKLET to perform non-interrupt tasks that may require delays
+ * or long completion times.
+ */
+static inline void esas2r_schedule_tasklet(struct esas2r_adapter *a)
+{
+ /* make sure we don't schedule twice */
+ if (!(esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED) &
+ ilog2(AF_TASKLET_SCHEDULED)))
+ tasklet_hi_schedule(&a->tasklet);
+}
+
+static inline void esas2r_enable_heartbeat(struct esas2r_adapter *a)
+{
+ if (!(a->flags & (AF_DEGRADED_MODE | AF_CHPRST_PENDING))
+ && (a->nvram->options2 & SASNVR2_HEARTBEAT))
+ esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT_ENB);
+ else
+ esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+}
+
+static inline void esas2r_disable_heartbeat(struct esas2r_adapter *a)
+{
+ esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT_ENB);
+ esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+}
+
+/* Set the initial state for resetting the adapter on the next pass through
+ * esas2r_do_deferred.
+ */
+static inline void esas2r_local_reset_adapter(struct esas2r_adapter *a)
+{
+ esas2r_disable_heartbeat(a);
+
+ esas2r_lock_set_flags(&a->flags, AF_CHPRST_NEEDED);
+ esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+ esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+}
+
+/* See if an interrupt is pending on the adapter. */
+static inline bool esas2r_adapter_interrupt_pending(struct esas2r_adapter *a)
+{
+ u32 intstat;
+
+ if (a->int_mask == 0)
+ return false;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if ((intstat & a->int_mask) == 0)
+ return false;
+
+ esas2r_disable_chip_interrupts(a);
+
+ a->int_stat = intstat;
+ a->int_mask = 0;
+
+ return true;
+}
+
+static inline u16 esas2r_targ_get_id(struct esas2r_target *t,
+ struct esas2r_adapter *a)
+{
+ return (u16)(uintptr_t)(t - a->targetdb);
+}
+
+/* Build and start an asynchronous event request */
+static inline void esas2r_start_ae_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_build_ae_req(a, rq);
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+static inline void esas2r_comp_list_drain(struct esas2r_adapter *a,
+ struct list_head *comp_list)
+{
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, comp_list) {
+ rq = list_entry(element, struct esas2r_request, comp_list);
+ list_del_init(element);
+ esas2r_complete_request(a, rq);
+ }
+}
+
+/* sysfs handlers */
+extern struct bin_attribute bin_attr_fw;
+extern struct bin_attribute bin_attr_fs;
+extern struct bin_attribute bin_attr_vda;
+extern struct bin_attribute bin_attr_hw;
+extern struct bin_attribute bin_attr_live_nvram;
+extern struct bin_attribute bin_attr_default_nvram;
+
+#endif /* ESAS2R_H */
diff --git a/drivers/scsi/esas2r/esas2r_disc.c b/drivers/scsi/esas2r/esas2r_disc.c
new file mode 100644
index 00000000000..dec6c334ce3
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_disc.c
@@ -0,0 +1,1189 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_disc.c
+ * esas2r device discovery routines
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Miscellaneous internal discovery routines */
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a);
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr);
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Internal discovery routines that process the states */
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+void esas2r_disc_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *nvr = a->nvram;
+
+ esas2r_trace_enter();
+
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+ esas2r_lock_clear_flags(&a->flags2, AF2_DEV_SCAN);
+ esas2r_lock_clear_flags(&a->flags2, AF2_DEV_CNT_OK);
+
+ a->disc_start_time = jiffies_to_msecs(jiffies);
+ a->disc_wait_time = nvr->dev_wait_time * 1000;
+ a->disc_wait_cnt = nvr->dev_wait_count;
+
+ if (a->disc_wait_cnt > ESAS2R_MAX_TARGETS)
+ a->disc_wait_cnt = ESAS2R_MAX_TARGETS;
+
+ /*
+ * If we are doing chip reset or power management processing, always
+ * wait for devices. use the NVRAM device count if it is greater than
+ * previously discovered devices.
+ */
+
+ esas2r_hdebug("starting discovery...");
+
+ a->general_req.interrupt_cx = NULL;
+
+ if (a->flags & (AF_CHPRST_DETECTED | AF_POWER_MGT)) {
+ if (a->prev_dev_cnt == 0) {
+ /* Don't bother waiting if there is nothing to wait
+ * for.
+ */
+ a->disc_wait_time = 0;
+ } else {
+ /*
+ * Set the device wait count to what was previously
+ * found. We don't care if the user only configured
+ * a time because we know the exact count to wait for.
+ * There is no need to honor the user's wishes to
+ * always wait the full time.
+ */
+ a->disc_wait_cnt = a->prev_dev_cnt;
+
+ /*
+ * bump the minimum wait time to 15 seconds since the
+ * default is 3 (system boot or the boot driver usually
+ * buys us more time).
+ */
+ if (a->disc_wait_time < 15000)
+ a->disc_wait_time = 15000;
+ }
+ }
+
+ esas2r_trace("disc wait count: %d", a->disc_wait_cnt);
+ esas2r_trace("disc wait time: %d", a->disc_wait_time);
+
+ if (a->disc_wait_time == 0)
+ esas2r_disc_check_complete(a);
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_start_waiting(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (a->disc_ctx.disc_evt)
+ esas2r_disc_start_port(a);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+void esas2r_disc_check_for_work(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+
+ /* service any pending interrupts first */
+
+ esas2r_polled_interrupt(a);
+
+ /*
+ * now, interrupt processing may have queued up a discovery event. go
+ * see if we have one to start. we couldn't start it in the ISR since
+ * polled discovery would cause a deadlock.
+ */
+
+ esas2r_disc_start_waiting(a);
+
+ if (rq->interrupt_cx == NULL)
+ return;
+
+ if (rq->req_stat == RS_STARTED
+ && rq->timeout <= RQ_MAX_TIMEOUT) {
+ /* wait for the current discovery request to complete. */
+ esas2r_wait_request(a, rq);
+
+ if (rq->req_stat == RS_TIMEOUT) {
+ esas2r_disc_abort(a, rq);
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+
+ if (rq->req_stat == RS_PENDING
+ || rq->req_stat == RS_STARTED)
+ return;
+
+ esas2r_disc_continue(a, rq);
+}
+
+void esas2r_disc_check_complete(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ /* check to see if we should be waiting for devices */
+ if (a->disc_wait_time) {
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 time = currtime - a->disc_start_time;
+
+ /*
+ * Wait until the device wait time is exhausted or the device
+ * wait count is satisfied.
+ */
+ if (time < a->disc_wait_time
+ && (esas2r_targ_db_get_tgt_cnt(a) < a->disc_wait_cnt
+ || a->disc_wait_cnt == 0)) {
+ /* After three seconds of waiting, schedule a scan. */
+ if (time >= 3000
+ && !(esas2r_lock_set_flags(&a->flags2,
+ AF2_DEV_SCAN) &
+ ilog2(AF2_DEV_SCAN))) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * We are done waiting...we think. Adjust the wait time to
+ * consume events after the count is met.
+ */
+ if (!(esas2r_lock_set_flags(&a->flags2, AF2_DEV_CNT_OK)
+ & ilog2(AF2_DEV_CNT_OK)))
+ a->disc_wait_time = time + 3000;
+
+ /* If we haven't done a full scan yet, do it now. */
+ if (!(esas2r_lock_set_flags(&a->flags2,
+ AF2_DEV_SCAN) &
+ ilog2(AF2_DEV_SCAN))) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+ return;
+ }
+
+ /*
+ * Now, if there is still time left to consume events, continue
+ * waiting.
+ */
+ if (time < a->disc_wait_time) {
+ esas2r_trace_exit();
+ return;
+ }
+ } else {
+ if (!(esas2r_lock_set_flags(&a->flags2,
+ AF2_DEV_SCAN) &
+ ilog2(AF2_DEV_SCAN))) {
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_disc_queue_event(a, DCDE_DEV_SCAN);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ }
+ }
+
+ /* We want to stop waiting for devices. */
+ a->disc_wait_time = 0;
+
+ if ((a->flags & AF_DISC_POLLED)
+ && (a->flags & AF_DISC_IN_PROG)) {
+ /*
+ * Polled discovery is still pending so continue the active
+ * discovery until it is done. At that point, we will stop
+ * polled discovery and transition to interrupt driven
+ * discovery.
+ */
+ } else {
+ /*
+ * Done waiting for devices. Note that we get here immediately
+ * after deferred waiting completes because that is interrupt
+ * driven; i.e. There is no transition.
+ */
+ esas2r_disc_fix_curr_requests(a);
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+
+ /*
+ * We have deferred target state changes until now because we
+ * don't want to report any removals (due to the first arrival)
+ * until the device wait time expires.
+ */
+ esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+ }
+
+ esas2r_trace_exit();
+}
+
+void esas2r_disc_queue_event(struct esas2r_adapter *a, u8 disc_evt)
+{
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("disc_event: %d", disc_evt);
+
+ /* Initialize the discovery context */
+ dc->disc_evt |= disc_evt;
+
+ /*
+ * Don't start discovery before or during polled discovery. if we did,
+ * we would have a deadlock if we are in the ISR already.
+ */
+ if (!(a->flags & (AF_CHPRST_PENDING | AF_DISC_POLLED)))
+ esas2r_disc_start_port(a);
+
+ esas2r_trace_exit();
+}
+
+bool esas2r_disc_start_port(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ struct esas2r_disc_context *dc = &a->disc_ctx;
+ bool ret;
+
+ esas2r_trace_enter();
+
+ if (a->flags & AF_DISC_IN_PROG) {
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* If there is a discovery waiting, process it. */
+ if (dc->disc_evt) {
+ if ((a->flags & AF_DISC_POLLED)
+ && a->disc_wait_time == 0) {
+ /*
+ * We are doing polled discovery, but we no longer want
+ * to wait for devices. Stop polled discovery and
+ * transition to interrupt driven discovery.
+ */
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+ } else {
+ /* Discovery is complete. */
+
+ esas2r_hdebug("disc done");
+
+ esas2r_lock_set_flags(&a->flags, AF_PORT_CHANGE);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ /* Handle the discovery context */
+ esas2r_trace("disc_evt: %d", dc->disc_evt);
+ esas2r_lock_set_flags(&a->flags, AF_DISC_IN_PROG);
+ dc->flags = 0;
+
+ if (a->flags & AF_DISC_POLLED)
+ dc->flags |= DCF_POLLED;
+
+ rq->interrupt_cx = dc;
+ rq->req_stat = RS_SUCCESS;
+
+ /* Decode the event code */
+ if (dc->disc_evt & DCDE_DEV_SCAN) {
+ dc->disc_evt &= ~DCDE_DEV_SCAN;
+
+ dc->flags |= DCF_DEV_SCAN;
+ dc->state = DCS_BLOCK_DEV_SCAN;
+ } else if (dc->disc_evt & DCDE_DEV_CHANGE) {
+ dc->disc_evt &= ~DCDE_DEV_CHANGE;
+
+ dc->flags |= DCF_DEV_CHANGE;
+ dc->state = DCS_DEV_RMV;
+ }
+
+ /* Continue interrupt driven discovery */
+ if (!(a->flags & AF_DISC_POLLED))
+ ret = esas2r_disc_continue(a, rq);
+ else
+ ret = true;
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+static bool esas2r_disc_continue(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ /* Device discovery/removal */
+ while (dc->flags & (DCF_DEV_CHANGE | DCF_DEV_SCAN)) {
+ rslt = false;
+
+ switch (dc->state) {
+ case DCS_DEV_RMV:
+
+ rslt = esas2r_disc_dev_remove(a, rq);
+ break;
+
+ case DCS_DEV_ADD:
+
+ rslt = esas2r_disc_dev_add(a, rq);
+ break;
+
+ case DCS_BLOCK_DEV_SCAN:
+
+ rslt = esas2r_disc_block_dev_scan(a, rq);
+ break;
+
+ case DCS_RAID_GRP_INFO:
+
+ rslt = esas2r_disc_raid_grp_info(a, rq);
+ break;
+
+ case DCS_PART_INFO:
+
+ rslt = esas2r_disc_part_info(a, rq);
+ break;
+
+ case DCS_PT_DEV_INFO:
+
+ rslt = esas2r_disc_passthru_dev_info(a, rq);
+ break;
+ case DCS_PT_DEV_ADDR:
+
+ rslt = esas2r_disc_passthru_dev_addr(a, rq);
+ break;
+ case DCS_DISC_DONE:
+
+ dc->flags &= ~(DCF_DEV_CHANGE | DCF_DEV_SCAN);
+ break;
+
+ default:
+
+ esas2r_bugon();
+ dc->state = DCS_DISC_DONE;
+ break;
+ }
+
+ if (rslt)
+ return true;
+ }
+
+ /* Discovery is done...for now. */
+ rq->interrupt_cx = NULL;
+
+ if (!(a->flags & AF_DISC_PENDING))
+ esas2r_disc_fix_curr_requests(a);
+
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+
+ /* Start the next discovery. */
+ return esas2r_disc_start_port(a);
+}
+
+static bool esas2r_disc_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ /* Set the timeout to a minimum value. */
+ if (rq->timeout < ESAS2R_DEFAULT_TMO)
+ rq->timeout = ESAS2R_DEFAULT_TMO;
+
+ /*
+ * Override the request type to distinguish discovery requests. If we
+ * end up deferring the request, esas2r_disc_local_start_request()
+ * will be called to restart it.
+ */
+ rq->req_type = RT_DISC_REQ;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (!(a->flags & (AF_CHPRST_PENDING | AF_FLASHING)))
+ esas2r_disc_local_start_request(a, rq);
+ else
+ list_add_tail(&rq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ return true;
+}
+
+void esas2r_disc_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+
+ list_add_tail(&rq->req_list, &a->active_list);
+
+ esas2r_start_vda_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return;
+}
+
+static void esas2r_disc_abort(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ esas2r_trace_enter();
+
+ /* abort the current discovery */
+
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_block_dev_scan(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_SCAN,
+ 0,
+ 0,
+ 0,
+ NULL);
+
+ rq->comp_cb = esas2r_disc_block_dev_scan_cb;
+
+ rq->timeout = 30000;
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_block_dev_scan_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SUCCESS)
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix = 0;
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_raid_grp_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("raid_group_idx: %d", dc->raid_grp_ix);
+
+ if (dc->raid_grp_ix >= VDA_MAX_RAID_GROUPS) {
+ dc->state = DCS_DISC_DONE;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ memset(grpinfo, 0, sizeof(struct atto_vda_grp_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_GRP_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vda_grp_info),
+ NULL);
+
+ grpinfo->grp_index = dc->raid_grp_ix;
+
+ rq->comp_cb = esas2r_disc_raid_grp_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_raid_grp_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_grp_info *grpinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ goto done;
+ }
+
+ if (rq->req_stat == RS_SUCCESS) {
+ grpinfo = &rq->vda_rsp_data->mgt_data.data.grp_info;
+
+ if (grpinfo->status != VDA_GRP_STAT_ONLINE
+ && grpinfo->status != VDA_GRP_STAT_DEGRADED) {
+ /* go to the next group. */
+
+ dc->raid_grp_ix++;
+ } else {
+ memcpy(&dc->raid_grp_name[0],
+ &grpinfo->grp_name[0],
+ sizeof(grpinfo->grp_name));
+
+ dc->interleave = le32_to_cpu(grpinfo->interleave);
+ dc->block_size = le32_to_cpu(grpinfo->block_size);
+
+ dc->state = DCS_PART_INFO;
+ dc->part_num = 0;
+ }
+ } else {
+ if (!(rq->req_stat == RS_GRP_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group info failed - "
+ "returned with %x",
+ rq->req_stat);
+ }
+
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ }
+
+done:
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_part_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("part_num: %d", dc->part_num);
+
+ if (dc->part_num >= VDA_MAX_PARTITIONS) {
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ memset(partinfo, 0, sizeof(struct atto_vdapart_info));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_PART_INFO,
+ dc->scan_gen,
+ 0,
+ sizeof(struct atto_vdapart_info),
+ NULL);
+
+ partinfo->part_no = dc->part_num;
+
+ memcpy(&partinfo->grp_name[0],
+ &dc->raid_grp_name[0],
+ sizeof(partinfo->grp_name));
+
+ rq->comp_cb = esas2r_disc_part_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_part_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vdapart_info *partinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->raid_grp_ix = 0;
+ dc->state = DCS_RAID_GRP_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ partinfo = &rq->vda_rsp_data->mgt_data.data.part_info;
+
+ dc->part_num = partinfo->part_no;
+
+ dc->curr_virt_id = le16_to_cpu(partinfo->target_id);
+
+ esas2r_targ_db_add_raid(a, dc);
+
+ dc->part_num++;
+ } else {
+ if (!(rq->req_stat == RS_PART_LAST)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for RAID group partition info "
+ "failed - status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_RAID_GRP_INFO;
+ dc->raid_grp_ix++;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_info(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ esas2r_trace("dev_ix: %d", dc->dev_ix);
+
+ esas2r_rq_init_request(rq, a);
+
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ memset(devinfo, 0, sizeof(struct atto_vda_devinfo));
+
+ esas2r_build_mgt_req(a,
+ rq,
+ VDAMGT_DEV_PT_INFO,
+ dc->scan_gen,
+ dc->dev_ix,
+ sizeof(struct atto_vda_devinfo),
+ NULL);
+
+ rq->comp_cb = esas2r_disc_passthru_dev_info_cb;
+
+ rq->interrupt_cx = dc;
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_info_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ unsigned long flags;
+ struct atto_vda_devinfo *devinfo;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ if (rq->req_stat == RS_SCAN_GEN) {
+ dc->scan_gen = rq->func_rsp.mgt_rsp.scan_generation;
+ dc->dev_ix = 0;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (rq->req_stat == RS_SUCCESS) {
+ devinfo = &rq->vda_rsp_data->mgt_data.data.dev_info;
+
+ dc->dev_ix = le16_to_cpu(rq->func_rsp.mgt_rsp.dev_index);
+
+ dc->curr_virt_id = le16_to_cpu(devinfo->target_id);
+
+ if (le16_to_cpu(devinfo->features) & VDADEVFEAT_PHYS_ID) {
+ dc->curr_phys_id =
+ le16_to_cpu(devinfo->phys_target_id);
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->state = DCS_PT_DEV_ADDR;
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ } else {
+ dc->dev_ix++;
+ }
+ } else {
+ if (!(rq->req_stat == RS_DEV_INVALID)) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "A request for device information failed - "
+ "status:%d", rq->req_stat);
+ }
+
+ dc->state = DCS_DISC_DONE;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static bool esas2r_disc_passthru_dev_addr(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ bool rslt;
+ struct atto_ioctl *hi;
+ struct esas2r_sg_context sgc;
+
+ esas2r_trace_enter();
+
+ esas2r_rq_init_request(rq, a);
+
+ /* format the request. */
+
+ sgc.cur_offset = NULL;
+ sgc.get_phys_addr = (PGETPHYSADDR)esas2r_disc_get_phys_addr;
+ sgc.length = offsetof(struct atto_ioctl, data)
+ + sizeof(struct atto_hba_get_device_address);
+
+ esas2r_sgc_init(&sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc.length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, &sgc)) {
+ esas2r_rq_destroy_request(rq, a);
+
+ esas2r_trace_exit();
+
+ return false;
+ }
+
+ rq->comp_cb = esas2r_disc_passthru_dev_addr_cb;
+
+ rq->interrupt_cx = dc;
+
+ /* format the IOCTL data. */
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ memset(a->disc_buffer, 0, ESAS2R_DISC_BUF_LEN);
+
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ hi->function = ATTO_FUNC_GET_DEV_ADDR;
+ hi->flags = HBAF_TUNNEL;
+
+ hi->data.get_dev_addr.target_id = le32_to_cpu(dc->curr_phys_id);
+ hi->data.get_dev_addr.addr_type = dc->dev_addr_type;
+
+ /* start it up. */
+
+ rslt = esas2r_disc_start_request(a, rq);
+
+ esas2r_trace_exit();
+
+ return rslt;
+}
+
+static void esas2r_disc_passthru_dev_addr_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = NULL;
+ unsigned long flags;
+ struct atto_ioctl *hi;
+ u16 addrlen;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ hi = (struct atto_ioctl *)a->disc_buffer;
+
+ if (rq->req_stat == RS_SUCCESS
+ && hi->status == ATTO_STS_SUCCESS) {
+ addrlen = le16_to_cpu(hi->data.get_dev_addr.addr_len);
+
+ if (dc->dev_addr_type == ATTO_GDA_AT_PORT) {
+ if (addrlen == sizeof(u64))
+ memcpy(&dc->sas_addr,
+ &hi->data.get_dev_addr.address[0],
+ addrlen);
+ else
+ memset(&dc->sas_addr, 0, sizeof(dc->sas_addr));
+
+ /* Get the unique identifier. */
+ dc->dev_addr_type = ATTO_GDA_AT_UNIQUE;
+
+ goto next_dev_addr;
+ } else {
+ /* Add the pass through target. */
+ if (HIBYTE(addrlen) == 0) {
+ t = esas2r_targ_db_add_pthru(a,
+ dc,
+ &hi->data.
+ get_dev_addr.
+ address[0],
+ (u8)hi->data.
+ get_dev_addr.
+ addr_len);
+
+ if (t)
+ memcpy(&t->sas_addr, &dc->sas_addr,
+ sizeof(t->sas_addr));
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the "
+ "back end data (%s:%d)",
+ __func__,
+ __LINE__);
+ }
+ }
+ } else {
+ /* getting the back end data failed */
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "an error occurred retrieving the back end data - "
+ "rq->req_stat:%d hi->status:%d",
+ rq->req_stat, hi->status);
+ }
+
+ /* proceed to the next device. */
+
+ if (dc->flags & DCF_DEV_SCAN) {
+ dc->dev_ix++;
+ dc->state = DCS_PT_DEV_INFO;
+ } else if (dc->flags & DCF_DEV_CHANGE) {
+ dc->curr_targ++;
+ dc->state = DCS_DEV_ADD;
+ } else {
+ esas2r_bugon();
+ }
+
+next_dev_addr:
+ esas2r_rq_destroy_request(rq, a);
+
+ /* continue discovery if it's interrupt driven */
+
+ if (!(dc->flags & DCF_POLLED))
+ esas2r_disc_continue(a, rq);
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ esas2r_trace_exit();
+}
+
+static u32 esas2r_disc_get_phys_addr(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = sgc->adapter;
+
+ if (sgc->length > ESAS2R_DISC_BUF_LEN)
+ esas2r_bugon();
+
+ *addr = a->uncached_phys
+ + (u64)((u8 *)a->disc_buffer - a->uncached);
+
+ return sgc->length;
+}
+
+static bool esas2r_disc_dev_remove(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t;
+ struct esas2r_target *t2;
+
+ esas2r_trace_enter();
+
+ /* process removals. */
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->new_target_state != TS_NOT_PRESENT)
+ continue;
+
+ t->new_target_state = TS_INVALID;
+
+ /* remove the right target! */
+
+ t2 =
+ esas2r_targ_db_find_by_virt_id(a,
+ esas2r_targ_get_id(t,
+ a));
+
+ if (t2)
+ esas2r_targ_db_remove(a, t2);
+ }
+
+ /* removals complete. process arrivals. */
+
+ dc->state = DCS_DEV_ADD;
+ dc->curr_targ = a->targetdb;
+
+ esas2r_trace_exit();
+
+ return false;
+}
+
+static bool esas2r_disc_dev_add(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_disc_context *dc =
+ (struct esas2r_disc_context *)rq->interrupt_cx;
+ struct esas2r_target *t = dc->curr_targ;
+
+ if (t >= a->targetdb_end) {
+ /* done processing state changes. */
+
+ dc->state = DCS_DISC_DONE;
+ } else if (t->new_target_state == TS_PRESENT) {
+ struct atto_vda_ae_lu *luevt = &t->lu_event;
+
+ esas2r_trace_enter();
+
+ /* clear this now in case more events come in. */
+
+ t->new_target_state = TS_INVALID;
+
+ /* setup the discovery context for adding this device. */
+
+ dc->curr_virt_id = esas2r_targ_get_id(t, a);
+
+ if ((luevt->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid))
+ && !(luevt->dwevent & VDAAE_LU_PASSTHROUGH)) {
+ dc->block_size = luevt->id.tgtlun_raid.dwblock_size;
+ dc->interleave = luevt->id.tgtlun_raid.dwinterleave;
+ } else {
+ dc->block_size = 0;
+ dc->interleave = 0;
+ }
+
+ /* determine the device type being added. */
+
+ if (luevt->dwevent & VDAAE_LU_PASSTHROUGH) {
+ if (luevt->dwevent & VDAAE_LU_PHYS_ID) {
+ dc->state = DCS_PT_DEV_ADDR;
+ dc->dev_addr_type = ATTO_GDA_AT_PORT;
+ dc->curr_phys_id = luevt->wphys_target_id;
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "luevt->dwevent does not have the "
+ "VDAAE_LU_PHYS_ID bit set (%s:%d)",
+ __func__, __LINE__);
+ }
+ } else {
+ dc->raid_grp_name[0] = 0;
+
+ esas2r_targ_db_add_raid(a, dc);
+ }
+
+ esas2r_trace("curr_virt_id: %d", dc->curr_virt_id);
+ esas2r_trace("curr_phys_id: %d", dc->curr_phys_id);
+ esas2r_trace("dwevent: %d", luevt->dwevent);
+
+ esas2r_trace_exit();
+ }
+
+ if (dc->state == DCS_DEV_ADD) {
+ /* go to the next device. */
+
+ dc->curr_targ++;
+ }
+
+ return false;
+}
+
+/*
+ * When discovery is done, find all requests on defer queue and
+ * test if they need to be modified. If a target is no longer present
+ * then complete the request with RS_SEL. Otherwise, update the
+ * target_id since after a hibernate it can be a different value.
+ * VDA does not make passthrough target IDs persistent.
+ */
+static void esas2r_disc_fix_curr_requests(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+ struct esas2r_target *t;
+ struct esas2r_request *rq;
+ struct list_head *element;
+
+ /* update virt_targ_id in any outstanding esas2r_requests */
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ t = a->targetdb + rq->target_id;
+
+ if (t->target_state == TS_PRESENT)
+ rq->vrq->scsi.target_id = le16_to_cpu(
+ t->virt_targ_id);
+ else
+ rq->req_stat = RS_SEL;
+ }
+
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c
new file mode 100644
index 00000000000..8582929b1fe
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_flash.c
@@ -0,0 +1,1512 @@
+
+/*
+ * linux/drivers/scsi/esas2r/esas2r_flash.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/* local macro defs */
+#define esas2r_nvramcalc_cksum(n) \
+ (esas2r_calc_byte_cksum((u8 *)(n), sizeof(struct esas2r_sas_nvram), \
+ SASNVR_CKSUM_SEED))
+#define esas2r_nvramcalc_xor_cksum(n) \
+ (esas2r_calc_byte_xor_cksum((u8 *)(n), \
+ sizeof(struct esas2r_sas_nvram), 0))
+
+#define ESAS2R_FS_DRVR_VER 2
+
+static struct esas2r_sas_nvram default_sas_nvram = {
+ { 'E', 'S', 'A', 'S' }, /* signature */
+ SASNVR_VERSION, /* version */
+ 0, /* checksum */
+ 31, /* max_lun_for_target */
+ SASNVR_PCILAT_MAX, /* pci_latency */
+ SASNVR1_BOOT_DRVR, /* options1 */
+ SASNVR2_HEARTBEAT | SASNVR2_SINGLE_BUS /* options2 */
+ | SASNVR2_SW_MUX_CTRL,
+ SASNVR_COAL_DIS, /* int_coalescing */
+ SASNVR_CMDTHR_NONE, /* cmd_throttle */
+ 3, /* dev_wait_time */
+ 1, /* dev_wait_count */
+ 0, /* spin_up_delay */
+ 0, /* ssp_align_rate */
+ { 0x50, 0x01, 0x08, 0x60, /* sas_addr */
+ 0x00, 0x00, 0x00, 0x00 },
+ { SASNVR_SPEED_AUTO }, /* phy_speed */
+ { SASNVR_MUX_DISABLED }, /* SAS multiplexing */
+ { 0 }, /* phy_flags */
+ SASNVR_SORT_SAS_ADDR, /* sort_type */
+ 3, /* dpm_reqcmd_lmt */
+ 3, /* dpm_stndby_time */
+ 0, /* dpm_active_time */
+ { 0 }, /* phy_target_id */
+ SASNVR_VSMH_DISABLED, /* virt_ses_mode */
+ SASNVR_RWM_DEFAULT, /* read_write_mode */
+ 0, /* link down timeout */
+ { 0 } /* reserved */
+};
+
+static u8 cmd_to_fls_func[] = {
+ 0xFF,
+ VDA_FLASH_READ,
+ VDA_FLASH_BEGINW,
+ VDA_FLASH_WRITE,
+ VDA_FLASH_COMMIT,
+ VDA_FLASH_CANCEL
+};
+
+static u8 esas2r_calc_byte_xor_cksum(u8 *addr, u32 len, u8 seed)
+{
+ u32 cksum = seed;
+ u8 *p = (u8 *)&cksum;
+
+ while (len) {
+ if (((uintptr_t)addr & 3) == 0)
+ break;
+
+ cksum = cksum ^ *addr;
+ addr++;
+ len--;
+ }
+ while (len >= sizeof(u32)) {
+ cksum = cksum ^ *(u32 *)addr;
+ addr += 4;
+ len -= 4;
+ }
+ while (len--) {
+ cksum = cksum ^ *addr;
+ addr++;
+ }
+ return p[0] ^ p[1] ^ p[2] ^ p[3];
+}
+
+static u8 esas2r_calc_byte_cksum(void *addr, u32 len, u8 seed)
+{
+ u8 *p = (u8 *)addr;
+ u8 cksum = seed;
+
+ while (len--)
+ cksum = cksum + p[len];
+ return cksum;
+}
+
+/* Interrupt callback to process FM API write requests. */
+static void esas2r_fmapi_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* Last request was successful. See what to do now. */
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ if (fc->sgc.cur_offset == NULL)
+ goto commit;
+
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+commit:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ rq->interrupt_cb = fc->interrupt_cb;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING)
+ /*
+ * All done. call the real callback to complete the FM API
+ * request. We should only get here if a BEGINW or WRITE
+ * operation failed.
+ */
+ (*fc->interrupt_cb)(a, rq);
+}
+
+/*
+ * Build a flash request based on the flash context. The request status
+ * is filled in on an error.
+ */
+static void build_flash_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_sg_context *sgc = &fc->sgc;
+ u8 cksum = 0;
+
+ /* calculate the checksum */
+ if (fc->func == VDA_FLASH_BEGINW) {
+ if (sgc->cur_offset)
+ cksum = esas2r_calc_byte_xor_cksum(sgc->cur_offset,
+ sgc->length,
+ 0);
+ rq->interrupt_cb = esas2r_fmapi_callback;
+ } else {
+ rq->interrupt_cb = fc->interrupt_cb;
+ }
+ esas2r_build_flash_req(a,
+ rq,
+ fc->func,
+ cksum,
+ fc->flsh_addr,
+ sgc->length);
+
+ esas2r_rq_free_sg_lists(rq, a);
+
+ /*
+ * remember the length we asked for. we have to keep track of
+ * the current amount done so we know how much to compare when
+ * doing the verification phase.
+ */
+ fc->curr_len = fc->sgc.length;
+
+ if (sgc->cur_offset) {
+ /* setup the S/G context to build the S/G table */
+ esas2r_sgc_init(sgc, a, rq, &rq->vrq->flash.data.sge[0]);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ rq->req_stat = RS_BUSY;
+ return;
+ }
+ } else {
+ fc->sgc.length = 0;
+ }
+
+ /* update the flsh_addr to the next one to write to */
+ fc->flsh_addr += fc->curr_len;
+}
+
+/* determine the method to process the flash request */
+static bool load_image(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ /*
+ * assume we have more to do. if we return with the status set to
+ * RS_PENDING, FM API tasks will continue.
+ */
+ rq->req_stat = RS_PENDING;
+ if (a->flags & AF_DEGRADED_MODE)
+ /* not suppported for now */;
+ else
+ build_flash_msg(a, rq);
+
+ return rq->req_stat == RS_PENDING;
+}
+
+/* boot image fixer uppers called before downloading the image. */
+static void fix_bios(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_BIOS];
+ struct esas2r_pc_image *pi;
+ struct esas2r_boot_header *bh;
+
+ pi = (struct esas2r_pc_image *)((u8 *)fi + ch->image_offset);
+ bh =
+ (struct esas2r_boot_header *)((u8 *)pi +
+ le16_to_cpu(pi->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+
+ /* Recalculate the checksum in the PNP header if there */
+ if (pi->pnp_offset) {
+ u8 *pnp_header_bytes =
+ ((u8 *)pi + le16_to_cpu(pi->pnp_offset));
+
+ /* Identifier - dword that starts at byte 10 */
+ *((u32 *)&pnp_header_bytes[10]) =
+ cpu_to_le32(MAKEDWORD(a->pcid->subsystem_vendor,
+ a->pcid->subsystem_device));
+
+ /* Checksum - byte 9 */
+ pnp_header_bytes[9] -= esas2r_calc_byte_cksum(pnp_header_bytes,
+ 32, 0);
+ }
+
+ /* Recalculate the checksum needed by the PC */
+ pi->checksum = pi->checksum -
+ esas2r_calc_byte_cksum((u8 *)pi, ch->length, 0);
+}
+
+static void fix_efi(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_component_header *ch = &fi->cmp_hdr[CH_IT_EFI];
+ u32 len = ch->length;
+ u32 offset = ch->image_offset;
+ struct esas2r_efi_image *ei;
+ struct esas2r_boot_header *bh;
+
+ while (len) {
+ u32 thislen;
+
+ ei = (struct esas2r_efi_image *)((u8 *)fi + offset);
+ bh = (struct esas2r_boot_header *)((u8 *)ei +
+ le16_to_cpu(
+ ei->header_offset));
+ bh->device_id = cpu_to_le16(a->pcid->device);
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+
+ if (thislen > len)
+ break;
+
+ len -= thislen;
+ offset += thislen;
+ }
+}
+
+/* Complete a FM API request with the specified status. */
+static bool complete_fmapi_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq, u8 fi_stat)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+
+ fi->status = fi_stat;
+ fi->driver_error = rq->req_stat;
+ rq->interrupt_cb = NULL;
+ rq->req_stat = RS_SUCCESS;
+
+ if (fi_stat != FI_STAT_IMG_VER)
+ memset(fc->scratch, 0, FM_BUF_SZ);
+
+ esas2r_enable_heartbeat(a);
+ esas2r_lock_clear_flags(&a->flags, AF_FLASH_LOCK);
+ return false;
+}
+
+/* Process each phase of the flash download process. */
+static void fw_download_proc(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_flash_context *fc =
+ (struct esas2r_flash_context *)rq->interrupt_cx;
+ struct esas2r_flash_img *fi = fc->fi;
+ struct esas2r_component_header *ch;
+ u32 len;
+ u8 *p, *q;
+
+ /* If the previous operation failed, just return. */
+ if (rq->req_stat != RS_SUCCESS)
+ goto error;
+
+ /*
+ * If an upload just completed and the compare length is non-zero,
+ * then we just read back part of the image we just wrote. verify the
+ * section and continue reading until the entire image is verified.
+ */
+ if (fc->func == VDA_FLASH_READ
+ && fc->cmp_len) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ p = fc->scratch;
+ q = (u8 *)fi /* start of the whole gob */
+ + ch->image_offset /* start of the current image */
+ + ch->length /* end of the current image */
+ - fc->cmp_len; /* where we are now */
+
+ /*
+ * NOTE - curr_len is the exact count of bytes for the read
+ * even when the end is read and its not a full buffer
+ */
+ for (len = fc->curr_len; len; len--)
+ if (*p++ != *q++)
+ goto error;
+
+ fc->cmp_len -= fc->curr_len; /* # left to compare */
+
+ /* Update fc and determine the length for the next upload */
+ if (fc->cmp_len > FM_BUF_SZ)
+ fc->sgc.length = FM_BUF_SZ;
+ else
+ fc->sgc.length = fc->cmp_len;
+
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ((u8 *)fc->scratch - (u8 *)fi);
+ }
+
+ /*
+ * This code uses a 'while' statement since the next component may
+ * have a length = zero. This can happen since some components are
+ * not required. At the end of this 'while' we set up the length
+ * for the next request and therefore sgc.length can be = 0.
+ */
+ while (fc->sgc.length == 0) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+
+ switch (fc->task) {
+ case FMTSK_ERASE_BOOT:
+ /* the BIOS image is written next */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+ if (ch->length == 0)
+ goto no_bios;
+
+ fc->task = FMTSK_WRTBIOS;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_BIOS;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTBIOS:
+ /*
+ * The BIOS image has been written - read it and
+ * verify it
+ */
+ fc->task = FMTSK_READBIOS;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READBIOS:
+no_bios:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The MAC image is written next */
+ ch = &fi->cmp_hdr[CH_IT_MAC];
+ if (ch->length == 0)
+ goto no_mac;
+
+ fc->task = FMTSK_WRTMAC;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_MAC;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTMAC:
+ /* The MAC image has been written - read and verify */
+ fc->task = FMTSK_READMAC;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READMAC:
+no_mac:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The EFI image is written next */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+ if (ch->length == 0)
+ goto no_efi;
+
+ fc->task = FMTSK_WRTEFI;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_EFI;
+ fc->flsh_addr = FLS_OFFSET_BOOT
+ + fi->cmp_hdr[CH_IT_BIOS].length
+ + fi->cmp_hdr[CH_IT_MAC].length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTEFI:
+ /* The EFI image has been written - read and verify */
+ fc->task = FMTSK_READEFI;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr -= ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READEFI:
+no_efi:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /* The CFG image is written next */
+ ch = &fi->cmp_hdr[CH_IT_CFG];
+
+ if (ch->length == 0)
+ goto no_cfg;
+ fc->task = FMTSK_WRTCFG;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->sgc.length = ch->length;
+ fc->sgc.cur_offset = fc->sgc_offset +
+ ch->image_offset;
+ break;
+
+ case FMTSK_WRTCFG:
+ /* The CFG image has been written - read and verify */
+ fc->task = FMTSK_READCFG;
+ fc->func = VDA_FLASH_READ;
+ fc->flsh_addr = FLS_OFFSET_CPYR - ch->length;
+ fc->cmp_len = ch->length;
+ fc->sgc.length = FM_BUF_SZ;
+ fc->sgc.cur_offset = fc->sgc_offset
+ + ((u8 *)fc->scratch -
+ (u8 *)fi);
+ break;
+
+ case FMTSK_READCFG:
+no_cfg:
+ /*
+ * Mark the component header status for the image
+ * completed
+ */
+ ch->status = CH_STAT_SUCCESS;
+
+ /*
+ * The download is complete. If in degraded mode,
+ * attempt a chip reset.
+ */
+ if (a->flags & AF_DEGRADED_MODE)
+ esas2r_local_reset_adapter(a);
+
+ a->flash_ver = fi->cmp_hdr[CH_IT_BIOS].version;
+ esas2r_print_flash_rev(a);
+
+ /* Update the type of boot image on the card */
+ memcpy(a->image_type, fi->rel_version,
+ sizeof(fi->rel_version));
+ complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ return;
+ }
+
+ /* If verifying, don't try reading more than what's there */
+ if (fc->func == VDA_FLASH_READ
+ && fc->sgc.length > fc->cmp_len)
+ fc->sgc.length = fc->cmp_len;
+ }
+
+ /* Build the request to perform the next action */
+ if (!load_image(a, rq)) {
+error:
+ if (fc->comp_typ < fi->num_comps) {
+ ch = &fi->cmp_hdr[fc->comp_typ];
+ ch->status = CH_STAT_FAILED;
+ }
+
+ complete_fmapi_req(a, rq, FI_STAT_FAILED);
+ }
+}
+
+/* Determine the flash image adaptyp for this adapter */
+static u8 get_fi_adap_type(struct esas2r_adapter *a)
+{
+ u8 type;
+
+ /* use the device ID to get the correct adap_typ for this HBA */
+ switch (a->pcid->device) {
+ case ATTO_DID_INTEL_IOP348:
+ type = FI_AT_SUN_LAKE;
+ break;
+
+ case ATTO_DID_MV_88RC9580:
+ case ATTO_DID_MV_88RC9580TS:
+ case ATTO_DID_MV_88RC9580TSE:
+ case ATTO_DID_MV_88RC9580TL:
+ type = FI_AT_MV_9580;
+ break;
+
+ default:
+ type = FI_AT_UNKNWN;
+ break;
+ }
+
+ return type;
+}
+
+/* Size of config + copyright + flash_ver images, 0 for failure. */
+static u32 chk_cfg(u8 *cfg, u32 length, u32 *flash_ver)
+{
+ u16 *pw = (u16 *)cfg - 1;
+ u32 sz = 0;
+ u32 len = length;
+
+ if (len == 0)
+ len = FM_BUF_SZ;
+
+ if (flash_ver)
+ *flash_ver = 0;
+
+ while (true) {
+ u16 type;
+ u16 size;
+
+ type = le16_to_cpu(*pw--);
+ size = le16_to_cpu(*pw--);
+
+ if (type != FBT_CPYR
+ && type != FBT_SETUP
+ && type != FBT_FLASH_VER)
+ break;
+
+ if (type == FBT_FLASH_VER
+ && flash_ver)
+ *flash_ver = le32_to_cpu(*(u32 *)(pw - 1));
+
+ sz += size + (2 * sizeof(u16));
+ pw -= size / sizeof(u16);
+
+ if (sz > len - (2 * sizeof(u16)))
+ break;
+ }
+
+ /* See if we are comparing the size to the specified length */
+ if (length && sz != length)
+ return 0;
+
+ return sz;
+}
+
+/* Verify that the boot image is valid */
+static u8 chk_boot(u8 *boot_img, u32 length)
+{
+ struct esas2r_boot_image *bi = (struct esas2r_boot_image *)boot_img;
+ u16 hdroffset = le16_to_cpu(bi->header_offset);
+ struct esas2r_boot_header *bh;
+
+ if (bi->signature != le16_to_cpu(0xaa55)
+ || (long)hdroffset >
+ (long)(65536L - sizeof(struct esas2r_boot_header))
+ || (hdroffset & 3)
+ || (hdroffset < sizeof(struct esas2r_boot_image))
+ || ((u32)hdroffset + sizeof(struct esas2r_boot_header) > length))
+ return 0xff;
+
+ bh = (struct esas2r_boot_header *)((char *)bi + hdroffset);
+
+ if (bh->signature[0] != 'P'
+ || bh->signature[1] != 'C'
+ || bh->signature[2] != 'I'
+ || bh->signature[3] != 'R'
+ || le16_to_cpu(bh->struct_length) <
+ (u16)sizeof(struct esas2r_boot_header)
+ || bh->class_code[2] != 0x01
+ || bh->class_code[1] != 0x04
+ || bh->class_code[0] != 0x00
+ || (bh->code_type != CODE_TYPE_PC
+ && bh->code_type != CODE_TYPE_OPEN
+ && bh->code_type != CODE_TYPE_EFI))
+ return 0xff;
+
+ return bh->code_type;
+}
+
+/* The sum of all the WORDS of the image */
+static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u16 cksum;
+ u32 len;
+ u16 *pw;
+
+ for (len = (fi->length - fc->fi_hdr_len) / 2,
+ pw = (u16 *)((u8 *)fi + fc->fi_hdr_len),
+ cksum = 0;
+ len;
+ len--, pw++)
+ cksum = cksum + le16_to_cpu(*pw);
+
+ return cksum;
+}
+
+/*
+ * Verify the flash image structure. The following verifications will
+ * be performed:
+ * 1) verify the fi_version is correct
+ * 2) verify the checksum of the entire image.
+ * 3) validate the adap_typ, action and length fields.
+ * 4) valdiate each component header. check the img_type and
+ * length fields
+ * 5) valdiate each component image. validate signatures and
+ * local checksums
+ */
+static bool verify_fi(struct esas2r_adapter *a,
+ struct esas2r_flash_context *fc)
+{
+ struct esas2r_flash_img *fi = fc->fi;
+ u8 type;
+ bool imgerr;
+ u16 i;
+ u32 len;
+ struct esas2r_component_header *ch;
+
+ /* Verify the length - length must even since we do a word checksum */
+ len = fi->length;
+
+ if ((len & 1)
+ || len < fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Get adapter type and verify type in flash image */
+ type = get_fi_adap_type(a);
+ if ((type == FI_AT_UNKNWN) || (fi->adap_typ != type)) {
+ fi->status = FI_STAT_ADAPTYP;
+ return false;
+ }
+
+ /*
+ * Loop through each component and verify the img_type and length
+ * fields. Keep a running count of the sizes sooze we can verify total
+ * size to additive size.
+ */
+ imgerr = false;
+
+ for (i = 0, len = 0, ch = fi->cmp_hdr;
+ i < fi->num_comps;
+ i++, ch++) {
+ bool cmperr = false;
+
+ /*
+ * Verify that the component header has the same index as the
+ * image type. The headers must be ordered correctly
+ */
+ if (i != ch->img_type) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ continue;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_BIOS:
+ type = CODE_TYPE_PC;
+ break;
+
+ case CH_IT_MAC:
+ type = CODE_TYPE_OPEN;
+ break;
+
+ case CH_IT_EFI:
+ type = CODE_TYPE_EFI;
+ break;
+ }
+
+ switch (ch->img_type) {
+ case CH_IT_FW:
+ case CH_IT_NVR:
+ break;
+
+ case CH_IT_BIOS:
+ case CH_IT_MAC:
+ case CH_IT_EFI:
+ if (ch->length & 0x1ff)
+ cmperr = true;
+
+ /* Test if component image is present */
+ if (ch->length == 0)
+ break;
+
+ /* Image is present - verify the image */
+ if (chk_boot((u8 *)fi + ch->image_offset, ch->length)
+ != type)
+ cmperr = true;
+
+ break;
+
+ case CH_IT_CFG:
+
+ /* Test if component image is present */
+ if (ch->length == 0) {
+ cmperr = true;
+ break;
+ }
+
+ /* Image is present - verify the image */
+ if (!chk_cfg((u8 *)fi + ch->image_offset + ch->length,
+ ch->length, NULL))
+ cmperr = true;
+
+ break;
+
+ default:
+
+ fi->status = FI_STAT_UNKNOWN;
+ return false;
+ }
+
+ if (cmperr) {
+ imgerr = true;
+ ch->status = CH_STAT_INVALID;
+ } else {
+ ch->status = CH_STAT_PENDING;
+ len += ch->length;
+ }
+ }
+
+ if (imgerr) {
+ fi->status = FI_STAT_MISSING;
+ return false;
+ }
+
+ /* Compare fi->length to the sum of ch->length fields */
+ if (len != fi->length - fc->fi_hdr_len) {
+ fi->status = FI_STAT_LENGTH;
+ return false;
+ }
+
+ /* Compute the checksum - it should come out zero */
+ if (fi->checksum != calc_fi_checksum(fc)) {
+ fi->status = FI_STAT_CHKSUM;
+ return false;
+ }
+
+ return true;
+}
+
+/* Fill in the FS IOCTL response data from a completed request. */
+static void esas2r_complete_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)rq->interrupt_cx;
+
+ if (rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ esas2r_enable_heartbeat(a);
+
+ fs->driver_error = rq->req_stat;
+
+ if (fs->driver_error == RS_SUCCESS)
+ fs->status = ATTO_STS_SUCCESS;
+ else
+ fs->status = ATTO_STS_FAILED;
+}
+
+/* Prepare an FS IOCTL request to be sent to the firmware. */
+bool esas2r_process_fs_ioctl(struct esas2r_adapter *a,
+ struct esas2r_ioctl_fs *fs,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u8 cmdcnt = (u8)ARRAY_SIZE(cmd_to_fls_func);
+ struct esas2r_ioctlfs_command *fsc = &fs->command;
+ u8 func = 0;
+ u32 datalen;
+
+ fs->status = ATTO_STS_FAILED;
+ fs->driver_error = RS_PENDING;
+
+ if (fs->version > ESAS2R_FS_VER) {
+ fs->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ func = cmd_to_fls_func[fsc->command];
+ if (fsc->command >= cmdcnt || func == 0xFF) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (fsc->command != ESAS2R_FS_CMD_CANCEL) {
+ if ((a->pcid->device != ATTO_DID_MV_88RC9580
+ || fs->adap_type != ESAS2R_FS_AT_ESASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TS
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TSE
+ || fs->adap_type != ESAS2R_FS_AT_TSSASRAID2E)
+ && (a->pcid->device != ATTO_DID_MV_88RC9580TL
+ || fs->adap_type != ESAS2R_FS_AT_TLSASHBA)) {
+ fs->status = ATTO_STS_INV_ADAPTER;
+ return false;
+ }
+
+ if (fs->driver_ver > ESAS2R_FS_DRVR_VER) {
+ fs->status = ATTO_STS_INV_DRVR_VER;
+ return false;
+ }
+ }
+
+ if (a->flags & AF_DEGRADED_MODE) {
+ fs->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ rq->interrupt_cb = esas2r_complete_fs_ioctl;
+ rq->interrupt_cx = fs;
+ datalen = le32_to_cpu(fsc->length);
+ esas2r_build_flash_req(a,
+ rq,
+ func,
+ fsc->checksum,
+ le32_to_cpu(fsc->flash_addr),
+ datalen);
+
+ if (func == VDA_FLASH_WRITE
+ || func == VDA_FLASH_READ) {
+ if (datalen == 0) {
+ fs->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->flash.data.sge);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ if (func == VDA_FLASH_COMMIT)
+ esas2r_disable_heartbeat(a);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static bool esas2r_flash_access(struct esas2r_adapter *a, u32 function)
+{
+ u32 starttime;
+ u32 timeout;
+ u32 intstat;
+ u32 doorbell;
+
+ /* Disable chip interrupts awhile */
+ if (function == DRBL_FLASH_REQ)
+ esas2r_disable_chip_interrupts(a);
+
+ /* Issue the request to the firmware */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, function);
+
+ /* Now wait for the firmware to process it */
+ starttime = jiffies_to_msecs(jiffies);
+ timeout = a->flags &
+ (AF_CHPRST_PENDING | AF_DISC_PENDING) ? 40000 : 5000;
+
+ while (true) {
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ /* Got a doorbell interrupt. Check for the function */
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (doorbell & function)
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ /*
+ * Iimeout. If we were requesting flash access,
+ * indicate we are done so the firmware knows we gave
+ * up. If this was a REQ, we also need to re-enable
+ * chip interrupts.
+ */
+ if (function == DRBL_FLASH_REQ) {
+ esas2r_hdebug("flash access timeout");
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_FLASH_DONE);
+ esas2r_enable_chip_interrupts(a);
+ } else {
+ esas2r_hdebug("flash release timeout");
+ }
+
+ return false;
+ }
+ }
+
+ /* if we're done, re-enable chip interrupts */
+ if (function == DRBL_FLASH_DONE)
+ esas2r_enable_chip_interrupts(a);
+
+ return true;
+}
+
+#define WINDOW_SIZE ((signed int)MW_DATA_WINDOW_SIZE)
+
+bool esas2r_read_flash_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ /* Try to acquire access to the flash */
+ if (!esas2r_flash_access(a, DRBL_FLASH_REQ))
+ return false;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ if (a->flags2 & AF2_SERIAL_FLASH)
+ iatvr = MW_DATA_ADDR_SER_FLASH + (from & -WINDOW_SIZE);
+ else
+ iatvr = MW_DATA_ADDR_PAR_FLASH + (from & -WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+ offset = from & (WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > WINDOW_SIZE - offset)
+ len = WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ /* Release flash access */
+ esas2r_flash_access(a, DRBL_FLASH_DONE);
+ return true;
+}
+
+bool esas2r_read_flash_rev(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ u16 *pw;
+ u16 *pwstart;
+ u16 type;
+ u16 size;
+ u32 sz;
+
+ sz = sizeof(bytes);
+ pw = (u16 *)(bytes + sz);
+ pwstart = (u16 *)bytes + 2;
+
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_CPYR - sz, sz))
+ goto invalid_rev;
+
+ while (pw >= pwstart) {
+ pw--;
+ type = le16_to_cpu(*pw);
+ pw--;
+ size = le16_to_cpu(*pw);
+ pw -= size / 2;
+
+ if (type == FBT_CPYR
+ || type == FBT_SETUP
+ || pw < pwstart)
+ continue;
+
+ if (type == FBT_FLASH_VER)
+ a->flash_ver = le32_to_cpu(*(u32 *)pw);
+
+ break;
+ }
+
+invalid_rev:
+ return esas2r_print_flash_rev(a);
+}
+
+bool esas2r_print_flash_rev(struct esas2r_adapter *a)
+{
+ u16 year = LOWORD(a->flash_ver);
+ u8 day = LOBYTE(HIWORD(a->flash_ver));
+ u8 month = HIBYTE(HIWORD(a->flash_ver));
+
+ if (day == 0
+ || month == 0
+ || day > 31
+ || month > 12
+ || year < 2006
+ || year > 9999) {
+ strcpy(a->flash_rev, "not found");
+ a->flash_ver = 0;
+ return false;
+ }
+
+ sprintf(a->flash_rev, "%02d/%02d/%04d", month, day, year);
+ esas2r_hdebug("flash version: %s", a->flash_rev);
+ return true;
+}
+
+/*
+ * Find the type of boot image type that is currently in the flash.
+ * The chip only has a 64 KB PCI-e expansion ROM
+ * size so only one image can be flashed at a time.
+ */
+bool esas2r_read_image_type(struct esas2r_adapter *a)
+{
+ u8 bytes[256];
+ struct esas2r_boot_image *bi;
+ struct esas2r_boot_header *bh;
+ u32 sz;
+ u32 len;
+ u32 offset;
+
+ /* Start at the base of the boot images and look for a valid image */
+ sz = sizeof(bytes);
+ len = FLS_LENGTH_BOOT;
+ offset = 0;
+
+ while (true) {
+ if (!esas2r_read_flash_block(a, bytes, FLS_OFFSET_BOOT +
+ offset,
+ sz))
+ goto invalid_rev;
+
+ bi = (struct esas2r_boot_image *)bytes;
+ bh = (struct esas2r_boot_header *)((u8 *)bi +
+ le16_to_cpu(
+ bi->header_offset));
+ if (bi->signature != cpu_to_le16(0xAA55))
+ goto invalid_rev;
+
+ if (bh->code_type == CODE_TYPE_PC) {
+ strcpy(a->image_type, "BIOS");
+
+ return true;
+ } else if (bh->code_type == CODE_TYPE_EFI) {
+ struct esas2r_efi_image *ei;
+
+ /*
+ * So we have an EFI image. There are several types
+ * so see which architecture we have.
+ */
+ ei = (struct esas2r_efi_image *)bytes;
+
+ switch (le16_to_cpu(ei->machine_type)) {
+ case EFI_MACHINE_IA32:
+ strcpy(a->image_type, "EFI 32-bit");
+ return true;
+
+ case EFI_MACHINE_IA64:
+ strcpy(a->image_type, "EFI itanium");
+ return true;
+
+ case EFI_MACHINE_X64:
+ strcpy(a->image_type, "EFI 64-bit");
+ return true;
+
+ case EFI_MACHINE_EBC:
+ strcpy(a->image_type, "EFI EBC");
+ return true;
+
+ default:
+ goto invalid_rev;
+ }
+ } else {
+ u32 thislen;
+
+ /* jump to the next image */
+ thislen = (u32)le16_to_cpu(bh->image_length) * 512;
+ if (thislen == 0
+ || thislen + offset > len
+ || bh->indicator == INDICATOR_LAST)
+ break;
+
+ offset += thislen;
+ }
+ }
+
+invalid_rev:
+ strcpy(a->image_type, "no boot images");
+ return false;
+}
+
+/*
+ * Read and validate current NVRAM parameters by accessing
+ * physical NVRAM directly. if currently stored parameters are
+ * invalid, use the defaults.
+ */
+bool esas2r_nvram_read_direct(struct esas2r_adapter *a)
+{
+ bool result;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram))) {
+ esas2r_hdebug("NVRAM read failed, using defaults");
+ return false;
+ }
+
+ result = esas2r_nvram_validate(a);
+
+ up(&a->nvram_semaphore);
+
+ return result;
+}
+
+/* Interrupt callback to process NVRAM completions. */
+static void esas2r_nvram_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (rq->req_stat == RS_SUCCESS) {
+ /* last request was successful. see what to do now. */
+
+ switch (vrq->sub_func) {
+ case VDA_FLASH_BEGINW:
+ vrq->sub_func = VDA_FLASH_WRITE;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_WRITE:
+ vrq->sub_func = VDA_FLASH_COMMIT;
+ rq->req_stat = RS_PENDING;
+ break;
+
+ case VDA_FLASH_READ:
+ esas2r_nvram_validate(a);
+ break;
+
+ case VDA_FLASH_COMMIT:
+ default:
+ break;
+ }
+ }
+
+ if (rq->req_stat != RS_PENDING) {
+ /* update the NVRAM state */
+ if (rq->req_stat == RS_SUCCESS)
+ esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+ else
+ esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+
+ esas2r_enable_heartbeat(a);
+
+ up(&a->nvram_semaphore);
+ }
+}
+
+/*
+ * Write the contents of nvram to the adapter's physical NVRAM.
+ * The cached copy of the NVRAM is also updated.
+ */
+bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *nvram)
+{
+ struct esas2r_sas_nvram *n = nvram;
+ u8 sas_address_bytes[8];
+ u32 *sas_address_dwords = (u32 *)&sas_address_bytes[0];
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return false;
+
+ if (down_interruptible(&a->nvram_semaphore))
+ return false;
+
+ if (n == NULL)
+ n = a->nvram;
+
+ /* check the validity of the settings */
+ if (n->version > SASNVR_VERSION) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ memcpy(&sas_address_bytes[0], n->sas_addr, 8);
+
+ if (sas_address_bytes[0] != 0x50
+ || sas_address_bytes[1] != 0x01
+ || sas_address_bytes[2] != 0x08
+ || (sas_address_bytes[3] & 0xF0) != 0x60
+ || ((sas_address_bytes[3] & 0x0F) | sas_address_dwords[1]) == 0) {
+ up(&a->nvram_semaphore);
+ return false;
+ }
+
+ if (n->spin_up_delay > SASNVR_SPINUP_MAX)
+ n->spin_up_delay = SASNVR_SPINUP_MAX;
+
+ n->version = SASNVR_VERSION;
+ n->checksum = n->checksum - esas2r_nvramcalc_cksum(n);
+ memcpy(a->nvram, n, sizeof(struct esas2r_sas_nvram));
+
+ /* write the NVRAM */
+ n = a->nvram;
+ esas2r_disable_heartbeat(a);
+
+ esas2r_build_flash_req(a,
+ rq,
+ VDA_FLASH_BEGINW,
+ esas2r_nvramcalc_xor_cksum(n),
+ FLS_OFFSET_NVR,
+ sizeof(struct esas2r_sas_nvram));
+
+ if (a->flags & AF_LEGACY_SGE_MODE) {
+
+ vrq->data.sge[0].length =
+ cpu_to_le32(SGE_LAST |
+ sizeof(struct esas2r_sas_nvram));
+ vrq->data.sge[0].address = cpu_to_le64(
+ a->uncached_phys + (u64)((u8 *)n - a->uncached));
+ } else {
+ vrq->data.prde[0].ctl_len =
+ cpu_to_le32(sizeof(struct esas2r_sas_nvram));
+ vrq->data.prde[0].address = cpu_to_le64(
+ a->uncached_phys
+ + (u64)((u8 *)n - a->uncached));
+ }
+ rq->interrupt_cb = esas2r_nvram_callback;
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+/* Validate the cached NVRAM. if the NVRAM is invalid, load the defaults. */
+bool esas2r_nvram_validate(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ bool rslt = false;
+
+ if (n->signature[0] != 'E'
+ || n->signature[1] != 'S'
+ || n->signature[2] != 'A'
+ || n->signature[3] != 'S') {
+ esas2r_hdebug("invalid NVRAM signature");
+ } else if (esas2r_nvramcalc_cksum(n)) {
+ esas2r_hdebug("invalid NVRAM checksum");
+ } else if (n->version > SASNVR_VERSION) {
+ esas2r_hdebug("invalid NVRAM version");
+ } else {
+ esas2r_lock_set_flags(&a->flags, AF_NVR_VALID);
+ rslt = true;
+ }
+
+ if (rslt == false) {
+ esas2r_hdebug("using defaults");
+ esas2r_nvram_set_defaults(a);
+ }
+
+ return rslt;
+}
+
+/*
+ * Set the cached NVRAM to defaults. note that this function sets the default
+ * NVRAM when it has been determined that the physical NVRAM is invalid.
+ * In this case, the SAS address is fabricated.
+ */
+void esas2r_nvram_set_defaults(struct esas2r_adapter *a)
+{
+ struct esas2r_sas_nvram *n = a->nvram;
+ u32 time = jiffies_to_msecs(jiffies);
+
+ esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID);
+ memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+ n->sas_addr[3] |= 0x0F;
+ n->sas_addr[4] = HIBYTE(LOWORD(time));
+ n->sas_addr[5] = LOBYTE(LOWORD(time));
+ n->sas_addr[6] = a->pcid->bus->number;
+ n->sas_addr[7] = a->pcid->devfn;
+}
+
+void esas2r_nvram_get_defaults(struct esas2r_adapter *a,
+ struct esas2r_sas_nvram *nvram)
+{
+ u8 sas_addr[8];
+
+ /*
+ * in case we are copying the defaults into the adapter, copy the SAS
+ * address out first.
+ */
+ memcpy(&sas_addr[0], a->nvram->sas_addr, 8);
+ memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram));
+ memcpy(&nvram->sas_addr[0], &sas_addr[0], 8);
+}
+
+bool esas2r_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi,
+ struct esas2r_request *rq, struct esas2r_sg_context *sgc)
+{
+ struct esas2r_flash_context *fc = &a->flash_context;
+ u8 j;
+ struct esas2r_component_header *ch;
+
+ if (esas2r_lock_set_flags(&a->flags, AF_FLASH_LOCK) & AF_FLASH_LOCK) {
+ /* flag was already set */
+ fi->status = FI_STAT_BUSY;
+ return false;
+ }
+
+ memcpy(&fc->sgc, sgc, sizeof(struct esas2r_sg_context));
+ sgc = &fc->sgc;
+ fc->fi = fi;
+ fc->sgc_offset = sgc->cur_offset;
+ rq->req_stat = RS_SUCCESS;
+ rq->interrupt_cx = fc;
+
+ switch (fi->fi_version) {
+ case FI_VERSION_1:
+ fc->scratch = ((struct esas2r_flash_img *)fi)->scratch_buf;
+ fc->num_comps = FI_NUM_COMPS_V1;
+ fc->fi_hdr_len = sizeof(struct esas2r_flash_img);
+ break;
+
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_IMG_VER);
+ }
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return complete_fmapi_req(a, rq, FI_STAT_DEGRADED);
+
+ switch (fi->action) {
+ case FI_ACT_DOWN: /* Download the components */
+ /* Verify the format of the flash image */
+ if (!verify_fi(a, fc))
+ return complete_fmapi_req(a, rq, fi->status);
+
+ /* Adjust the BIOS fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_BIOS];
+
+ if (ch->length)
+ fix_bios(a, fi);
+
+ /* Adjust the EFI fields that are dependent on the HBA */
+ ch = &fi->cmp_hdr[CH_IT_EFI];
+
+ if (ch->length)
+ fix_efi(a, fi);
+
+ /*
+ * Since the image was just modified, compute the checksum on
+ * the modified image. First update the CRC for the composite
+ * expansion ROM image.
+ */
+ fi->checksum = calc_fi_checksum(fc);
+
+ /* Disable the heartbeat */
+ esas2r_disable_heartbeat(a);
+
+ /* Now start up the download sequence */
+ fc->task = FMTSK_ERASE_BOOT;
+ fc->func = VDA_FLASH_BEGINW;
+ fc->comp_typ = CH_IT_CFG;
+ fc->flsh_addr = FLS_OFFSET_BOOT;
+ fc->sgc.length = FLS_LENGTH_BOOT;
+ fc->sgc.cur_offset = NULL;
+
+ /* Setup the callback address */
+ fc->interrupt_cb = fw_download_proc;
+ break;
+
+ case FI_ACT_UPSZ: /* Get upload sizes */
+ fi->adap_typ = get_fi_adap_type(a);
+ fi->flags = 0;
+ fi->num_comps = fc->num_comps;
+ fi->length = fc->fi_hdr_len;
+
+ /* Report the type of boot image in the rel_version string */
+ memcpy(fi->rel_version, a->image_type,
+ sizeof(fi->rel_version));
+
+ /* Build the component headers */
+ for (j = 0, ch = fi->cmp_hdr;
+ j < fi->num_comps;
+ j++, ch++) {
+ ch->img_type = j;
+ ch->status = CH_STAT_PENDING;
+ ch->length = 0;
+ ch->version = 0xffffffff;
+ ch->image_offset = 0;
+ ch->pad[0] = 0;
+ ch->pad[1] = 0;
+ }
+
+ if (a->flash_ver != 0) {
+ fi->cmp_hdr[CH_IT_BIOS].version =
+ fi->cmp_hdr[CH_IT_MAC].version =
+ fi->cmp_hdr[CH_IT_EFI].version =
+ fi->cmp_hdr[CH_IT_CFG].version
+ = a->flash_ver;
+
+ fi->cmp_hdr[CH_IT_BIOS].status =
+ fi->cmp_hdr[CH_IT_MAC].status =
+ fi->cmp_hdr[CH_IT_EFI].status =
+ fi->cmp_hdr[CH_IT_CFG].status =
+ CH_STAT_SUCCESS;
+
+ return complete_fmapi_req(a, rq, FI_STAT_SUCCESS);
+ }
+
+ /* fall through */
+
+ case FI_ACT_UP: /* Upload the components */
+ default:
+ return complete_fmapi_req(a, rq, FI_STAT_INVALID);
+ }
+
+ /*
+ * If we make it here, fc has been setup to do the first task. Call
+ * load_image to format the request, start it, and get out. The
+ * interrupt code will call the callback when the first message is
+ * complete.
+ */
+ if (!load_image(a, rq))
+ return complete_fmapi_req(a, rq, FI_STAT_FAILED);
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c
new file mode 100644
index 00000000000..3a798e7d5c5
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_init.c
@@ -0,0 +1,1773 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_init.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+static bool esas2r_initmem_alloc(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc,
+ u32 align)
+{
+ mem_desc->esas2r_param = mem_desc->size + align;
+ mem_desc->virt_addr = NULL;
+ mem_desc->phys_addr = 0;
+ mem_desc->esas2r_data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)mem_desc->
+ esas2r_param,
+ (dma_addr_t *)&mem_desc->
+ phys_addr,
+ GFP_KERNEL);
+
+ if (mem_desc->esas2r_data == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %lu bytes of consistent memory!",
+ (long
+ unsigned
+ int)mem_desc->esas2r_param);
+ return false;
+ }
+
+ mem_desc->virt_addr = PTR_ALIGN(mem_desc->esas2r_data, align);
+ mem_desc->phys_addr = ALIGN(mem_desc->phys_addr, align);
+ memset(mem_desc->virt_addr, 0, mem_desc->size);
+ return true;
+}
+
+static void esas2r_initmem_free(struct esas2r_adapter *a,
+ struct esas2r_mem_desc *mem_desc)
+{
+ if (mem_desc->virt_addr == NULL)
+ return;
+
+ /*
+ * Careful! phys_addr and virt_addr may have been adjusted from the
+ * original allocation in order to return the desired alignment. That
+ * means we have to use the original address (in esas2r_data) and size
+ * (esas2r_param) and calculate the original physical address based on
+ * the difference between the requested and actual allocation size.
+ */
+ if (mem_desc->phys_addr) {
+ int unalign = ((u8 *)mem_desc->virt_addr) -
+ ((u8 *)mem_desc->esas2r_data);
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)mem_desc->esas2r_param,
+ mem_desc->esas2r_data,
+ (dma_addr_t)(mem_desc->phys_addr - unalign));
+ } else {
+ kfree(mem_desc->esas2r_data);
+ }
+
+ mem_desc->virt_addr = NULL;
+}
+
+static bool alloc_vda_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_mem_desc *memdesc = kzalloc(
+ sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (memdesc == NULL) {
+ esas2r_hdebug("could not alloc mem for vda request memdesc\n");
+ return false;
+ }
+
+ memdesc->size = sizeof(union atto_vda_req) +
+ ESAS2R_DATA_BUF_LEN;
+
+ if (!esas2r_initmem_alloc(a, memdesc, 256)) {
+ esas2r_hdebug("could not alloc mem for vda request\n");
+ kfree(memdesc);
+ return false;
+ }
+
+ a->num_vrqs++;
+ list_add(&memdesc->next_desc, &a->vrq_mds_head);
+
+ rq->vrq_md = memdesc;
+ rq->vrq = (union atto_vda_req *)memdesc->virt_addr;
+ rq->vrq->scsi.handle = a->num_vrqs;
+
+ return true;
+}
+
+static void esas2r_unmap_regions(struct esas2r_adapter *a)
+{
+ if (a->regs)
+ iounmap((void __iomem *)a->regs);
+
+ a->regs = NULL;
+
+ pci_release_region(a->pcid, 2);
+
+ if (a->data_window)
+ iounmap((void __iomem *)a->data_window);
+
+ a->data_window = NULL;
+
+ pci_release_region(a->pcid, 0);
+}
+
+static int esas2r_map_regions(struct esas2r_adapter *a)
+{
+ int error;
+
+ a->regs = NULL;
+ a->data_window = NULL;
+
+ error = pci_request_region(a->pcid, 2, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+
+ return error;
+ }
+
+ a->regs = (void __force *)ioremap(pci_resource_start(a->pcid, 2),
+ pci_resource_len(a->pcid, 2));
+ if (a->regs == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for regs mem region\n");
+ pci_release_region(a->pcid, 2);
+ return -EFAULT;
+ }
+
+ error = pci_request_region(a->pcid, 0, a->name);
+ if (error != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "pci_request_region(2) failed, error %d",
+ error);
+ esas2r_unmap_regions(a);
+ return error;
+ }
+
+ a->data_window = (void __force *)ioremap(pci_resource_start(a->pcid,
+ 0),
+ pci_resource_len(a->pcid, 0));
+ if (a->data_window == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "ioremap failed for data_window mem region\n");
+ esas2r_unmap_regions(a);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static void esas2r_setup_interrupts(struct esas2r_adapter *a, int intr_mode)
+{
+ int i;
+
+ /* Set up interrupt mode based on the requested value */
+ switch (intr_mode) {
+ case INTR_MODE_LEGACY:
+use_legacy_interrupts:
+ a->intr_mode = INTR_MODE_LEGACY;
+ break;
+
+ case INTR_MODE_MSI:
+ i = pci_enable_msi(a->pcid);
+ if (i != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "failed to enable MSI for adapter %d, "
+ "falling back to legacy interrupts "
+ "(err=%d)", a->index,
+ i);
+ goto use_legacy_interrupts;
+ }
+ a->intr_mode = INTR_MODE_MSI;
+ esas2r_lock_set_flags(&a->flags2, AF2_MSI_ENABLED);
+ break;
+
+
+ default:
+ esas2r_log(ESAS2R_LOG_WARN,
+ "unknown interrupt_mode %d requested, "
+ "falling back to legacy interrupt",
+ interrupt_mode);
+ goto use_legacy_interrupts;
+ }
+}
+
+static void esas2r_claim_interrupts(struct esas2r_adapter *a)
+{
+ unsigned long flags = IRQF_DISABLED;
+
+ if (a->intr_mode == INTR_MODE_LEGACY)
+ flags |= IRQF_SHARED;
+
+ esas2r_log(ESAS2R_LOG_INFO,
+ "esas2r_claim_interrupts irq=%d (%p, %s, %x)",
+ a->pcid->irq, a, a->name, flags);
+
+ if (request_irq(a->pcid->irq,
+ (a->intr_mode ==
+ INTR_MODE_LEGACY) ? esas2r_interrupt :
+ esas2r_msi_interrupt,
+ flags,
+ a->name,
+ a)) {
+ esas2r_log(ESAS2R_LOG_CRIT, "unable to request IRQ %02X",
+ a->pcid->irq);
+ return;
+ }
+
+ esas2r_lock_set_flags(&a->flags2, AF2_IRQ_CLAIMED);
+ esas2r_log(ESAS2R_LOG_INFO,
+ "claimed IRQ %d flags: 0x%lx",
+ a->pcid->irq, flags);
+}
+
+int esas2r_init_adapter(struct Scsi_Host *host, struct pci_dev *pcid,
+ int index)
+{
+ struct esas2r_adapter *a;
+ u64 bus_addr = 0;
+ int i;
+ void *next_uncached;
+ struct esas2r_request *first_request, *last_request;
+
+ if (index >= MAX_ADAPTERS) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init invalid adapter index %u!",
+ index);
+ return 0;
+ }
+
+ if (esas2r_adapters[index]) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "tried to init existing adapter index %u!",
+ index);
+ return 0;
+ }
+
+ a = (struct esas2r_adapter *)host->hostdata;
+ memset(a, 0, sizeof(struct esas2r_adapter));
+ a->pcid = pcid;
+ a->host = host;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask = dma_get_required_mask
+ (&pcid->dev);
+ if (required_mask > DMA_BIT_MASK(32)
+ && !pci_set_dma_mask(pcid, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(64))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "64-bit PCI addressing enabled\n");
+ } else if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ } else {
+ if (!pci_set_dma_mask(pcid, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pcid,
+ DMA_BIT_MASK(32))) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "32-bit PCI addressing enabled\n");
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to set DMA mask");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+ esas2r_adapters[index] = a;
+ sprintf(a->name, ESAS2R_DRVR_NAME "_%02d", index);
+ esas2r_debug("new adapter %p, name %s", a, a->name);
+ spin_lock_init(&a->request_lock);
+ spin_lock_init(&a->fw_event_lock);
+ sema_init(&a->fm_api_semaphore, 1);
+ sema_init(&a->fs_api_semaphore, 1);
+ sema_init(&a->nvram_semaphore, 1);
+
+ esas2r_fw_event_off(a);
+ snprintf(a->fw_event_q_name, ESAS2R_KOBJ_NAME_LEN, "esas2r/%d",
+ a->index);
+ a->fw_event_q = create_singlethread_workqueue(a->fw_event_q_name);
+
+ init_waitqueue_head(&a->buffered_ioctl_waiter);
+ init_waitqueue_head(&a->nvram_waiter);
+ init_waitqueue_head(&a->fm_api_waiter);
+ init_waitqueue_head(&a->fs_api_waiter);
+ init_waitqueue_head(&a->vda_waiter);
+
+ INIT_LIST_HEAD(&a->general_req.req_list);
+ INIT_LIST_HEAD(&a->active_list);
+ INIT_LIST_HEAD(&a->defer_list);
+ INIT_LIST_HEAD(&a->free_sg_list_head);
+ INIT_LIST_HEAD(&a->avail_request);
+ INIT_LIST_HEAD(&a->vrq_mds_head);
+ INIT_LIST_HEAD(&a->fw_event_list);
+
+ first_request = (struct esas2r_request *)((u8 *)(a + 1));
+
+ for (last_request = first_request, i = 1; i < num_requests;
+ last_request++, i++) {
+ INIT_LIST_HEAD(&last_request->req_list);
+ list_add_tail(&last_request->comp_list, &a->avail_request);
+ if (!alloc_vda_req(a, last_request)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate a VDA request!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+ }
+
+ esas2r_debug("requests: %p to %p (%d, %d)", first_request,
+ last_request,
+ sizeof(*first_request),
+ num_requests);
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not map PCI regions!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->index = index;
+
+ /* interrupts will be disabled until we are done with init */
+ atomic_inc(&a->dis_ints_cnt);
+ atomic_inc(&a->disable_cnt);
+ a->flags |= AF_CHPRST_PENDING
+ | AF_DISC_PENDING
+ | AF_FIRST_INIT
+ | AF_LEGACY_SGE_MODE;
+
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+
+ esas2r_setup_interrupts(a, interrupt_mode);
+
+ a->uncached_size = esas2r_get_uncached_size(a);
+ a->uncached = dma_alloc_coherent(&pcid->dev,
+ (size_t)a->uncached_size,
+ (dma_addr_t *)&bus_addr,
+ GFP_KERNEL);
+ if (a->uncached == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate %d bytes of consistent memory!",
+ a->uncached_size);
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ a->uncached_phys = bus_addr;
+
+ esas2r_debug("%d bytes uncached memory allocated @ %p (%x:%x)",
+ a->uncached_size,
+ a->uncached,
+ upper_32_bits(bus_addr),
+ lower_32_bits(bus_addr));
+ memset(a->uncached, 0, a->uncached_size);
+ next_uncached = a->uncached;
+
+ if (!esas2r_init_adapter_struct(a,
+ &next_uncached)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to initialize adapter structure (2)!");
+ esas2r_kill_adapter(index);
+ return 0;
+ }
+
+ tasklet_init(&a->tasklet,
+ esas2r_adapter_tasklet,
+ (unsigned long)a);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts
+ * until we claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_check_adapter(a);
+
+ if (!esas2r_init_adapter_hw(a, true))
+ esas2r_log(ESAS2R_LOG_CRIT, "failed to initialize hardware!");
+ else
+ esas2r_debug("esas2r_init_adapter ok");
+
+ esas2r_claim_interrupts(a);
+
+ if (a->flags2 & AF2_IRQ_CLAIMED)
+ esas2r_enable_chip_interrupts(a);
+
+ esas2r_lock_set_flags(&a->flags2, AF2_INIT_DONE);
+ if (!(a->flags & AF_DEGRADED_MODE))
+ esas2r_kickoff_timer(a);
+ esas2r_debug("esas2r_init_adapter done for %p (%d)",
+ a, a->disable_cnt);
+
+ return 1;
+}
+
+static void esas2r_adapter_power_down(struct esas2r_adapter *a,
+ int power_management)
+{
+ struct esas2r_mem_desc *memdesc, *next;
+
+ if ((a->flags2 & AF2_INIT_DONE)
+ && (!(a->flags & AF_DEGRADED_MODE))) {
+ if (!power_management) {
+ del_timer_sync(&a->timer);
+ tasklet_kill(&a->tasklet);
+ }
+ esas2r_power_down(a);
+
+ /*
+ * There are versions of firmware that do not handle the sync
+ * cache command correctly. Stall here to ensure that the
+ * cache is lazily flushed.
+ */
+ mdelay(500);
+ esas2r_debug("chip halted");
+ }
+
+ /* Remove sysfs binary files */
+ if (a->sysfs_fw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fw);
+ a->sysfs_fw_created = 0;
+ }
+
+ if (a->sysfs_fs_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_fs);
+ a->sysfs_fs_created = 0;
+ }
+
+ if (a->sysfs_vda_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_vda);
+ a->sysfs_vda_created = 0;
+ }
+
+ if (a->sysfs_hw_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj, &bin_attr_hw);
+ a->sysfs_hw_created = 0;
+ }
+
+ if (a->sysfs_live_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_live_nvram);
+ a->sysfs_live_nvram_created = 0;
+ }
+
+ if (a->sysfs_default_nvram_created) {
+ sysfs_remove_bin_file(&a->host->shost_dev.kobj,
+ &bin_attr_default_nvram);
+ a->sysfs_default_nvram_created = 0;
+ }
+
+ /* Clean up interrupts */
+ if (a->flags2 & AF2_IRQ_CLAIMED) {
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "free_irq(%d) called", a->pcid->irq);
+
+ free_irq(a->pcid->irq, a);
+ esas2r_debug("IRQ released");
+ esas2r_lock_clear_flags(&a->flags2, AF2_IRQ_CLAIMED);
+ }
+
+ if (a->flags2 & AF2_MSI_ENABLED) {
+ pci_disable_msi(a->pcid);
+ esas2r_lock_clear_flags(&a->flags2, AF2_MSI_ENABLED);
+ esas2r_debug("MSI disabled");
+ }
+
+ if (a->inbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->inbound_list_md);
+
+ if (a->outbound_list_md.virt_addr)
+ esas2r_initmem_free(a, &a->outbound_list_md);
+
+ list_for_each_entry_safe(memdesc, next, &a->free_sg_list_head,
+ next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ }
+
+ /* Following frees everything allocated via alloc_vda_req */
+ list_for_each_entry_safe(memdesc, next, &a->vrq_mds_head, next_desc) {
+ esas2r_initmem_free(a, memdesc);
+ list_del(&memdesc->next_desc);
+ kfree(memdesc);
+ }
+
+ kfree(a->first_ae_req);
+ a->first_ae_req = NULL;
+
+ kfree(a->sg_list_mds);
+ a->sg_list_mds = NULL;
+
+ kfree(a->req_table);
+ a->req_table = NULL;
+
+ if (a->regs) {
+ esas2r_unmap_regions(a);
+ a->regs = NULL;
+ a->data_window = NULL;
+ esas2r_debug("regions unmapped");
+ }
+}
+
+/* Release/free allocated resources for specified adapters. */
+void esas2r_kill_adapter(int i)
+{
+ struct esas2r_adapter *a = esas2r_adapters[i];
+
+ if (a) {
+ unsigned long flags;
+ struct workqueue_struct *wq;
+ esas2r_debug("killing adapter %p [%d] ", a, i);
+ esas2r_fw_event_off(a);
+ esas2r_adapter_power_down(a, 0);
+ if (esas2r_buffered_ioctl &&
+ (a->pcid == esas2r_buffered_ioctl_pcid)) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+ esas2r_buffered_ioctl = NULL;
+ }
+
+ if (a->vda_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)VDA_MAX_BUFFER_SIZE,
+ a->vda_buffer,
+ (dma_addr_t)a->ppvda_buffer);
+ a->vda_buffer = NULL;
+ }
+ if (a->fs_api_buffer) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+ a->fs_api_buffer = NULL;
+ }
+
+ kfree(a->local_atto_ioctl);
+ a->local_atto_ioctl = NULL;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ wq = a->fw_event_q;
+ a->fw_event_q = NULL;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ if (a->uncached) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->uncached_size,
+ a->uncached,
+ (dma_addr_t)a->uncached_phys);
+ a->uncached = NULL;
+ esas2r_debug("uncached area freed");
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_disable_device() called. msix_enabled: %d "
+ "msi_enabled: %d irq: %d pin: %d",
+ a->pcid->msix_enabled,
+ a->pcid->msi_enabled,
+ a->pcid->irq,
+ a->pcid->pin);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "before pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ pci_disable_device(a->pcid);
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "after pci_disable_device() enable_cnt: %d",
+ a->pcid->enable_cnt.counter);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->pcid->dev),
+ "pci_set_drv_data(%p, NULL) called",
+ a->pcid);
+
+ pci_set_drvdata(a->pcid, NULL);
+ esas2r_adapters[i] = NULL;
+
+ if (a->flags2 & AF2_INIT_DONE) {
+ esas2r_lock_clear_flags(&a->flags2,
+ AF2_INIT_DONE);
+
+ esas2r_lock_set_flags(&a->flags,
+ AF_DEGRADED_MODE);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_remove_host() called");
+
+ scsi_remove_host(a->host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO,
+ &(a->host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(a->host);
+ }
+ }
+}
+
+int esas2r_cleanup(struct Scsi_Host *host)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+ int index;
+
+ if (host == NULL) {
+ int i;
+
+ esas2r_debug("esas2r_cleanup everything");
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_kill_adapter(i);
+ return -1;
+ }
+
+ esas2r_debug("esas2r_cleanup called for host %p", host);
+ index = a->index;
+ esas2r_kill_adapter(index);
+ return index;
+}
+
+int esas2r_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ u32 device_state;
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "suspending adapter()");
+ if (!a)
+ return -ENODEV;
+
+ esas2r_adapter_power_down(a, 1);
+ device_state = pci_choose_state(pdev, state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_save_state() called");
+ pci_save_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_disable_device() called");
+ pci_disable_device(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state() called");
+ pci_set_power_state(pdev, device_state);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "esas2r_suspend(): 0");
+ return 0;
+}
+
+int esas2r_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host = pci_get_drvdata(pdev);
+ struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata;
+ int rez;
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev), "resuming adapter()");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_set_power_state(PCI_D0) "
+ "called");
+ pci_set_power_state(pdev, PCI_D0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_wake(PCI_D0, 0) "
+ "called");
+ pci_enable_wake(pdev, PCI_D0, 0);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_restore_state() called");
+ pci_restore_state(pdev);
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "pci_enable_device() called");
+ rez = pci_enable_device(pdev);
+ pci_set_master(pdev);
+
+ if (!a) {
+ rez = -ENODEV;
+ goto error_exit;
+ }
+
+ if (esas2r_map_regions(a) != 0) {
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-map PCI regions!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ /* Set up interupt mode */
+ esas2r_setup_interrupts(a, a->intr_mode);
+
+ /*
+ * Disable chip interrupts to prevent spurious interrupts until we
+ * claim the IRQ.
+ */
+ esas2r_disable_chip_interrupts(a);
+ if (!esas2r_power_up(a, true)) {
+ esas2r_debug("yikes, esas2r_power_up failed");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+ esas2r_claim_interrupts(a);
+
+ if (a->flags2 & AF2_IRQ_CLAIMED) {
+ /*
+ * Now that system interrupt(s) are claimed, we can enable
+ * chip interrupts.
+ */
+ esas2r_enable_chip_interrupts(a);
+ esas2r_kickoff_timer(a);
+ } else {
+ esas2r_debug("yikes, unable to claim IRQ");
+ esas2r_log(ESAS2R_LOG_CRIT, "could not re-claim IRQ!");
+ rez = -ENOMEM;
+ goto error_exit;
+ }
+
+error_exit:
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pdev->dev), "esas2r_resume(): %d",
+ rez);
+ return rez;
+}
+
+bool esas2r_set_degraded_mode(struct esas2r_adapter *a, char *error_str)
+{
+ esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "setting adapter to degraded mode: %s\n", error_str);
+ return false;
+}
+
+u32 esas2r_get_uncached_size(struct esas2r_adapter *a)
+{
+ return sizeof(struct esas2r_sas_nvram)
+ + ALIGN(ESAS2R_DISC_BUF_LEN, 8)
+ + ALIGN(sizeof(u32), 8) /* outbound list copy pointer */
+ + 8
+ + (num_sg_lists * (u16)sgl_page_size)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct esas2r_inbound_list_source_entry),
+ 8)
+ + ALIGN((num_requests + num_ae_requests + 1 +
+ ESAS2R_LIST_EXTRA) *
+ sizeof(struct atto_vda_ob_rsp), 8)
+ + 256; /* VDA request and buffer align */
+}
+
+static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a)
+{
+ int pcie_cap_reg;
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (0xffff && pcie_cap_reg) {
+ u16 devcontrol;
+
+ pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL,
+ &devcontrol);
+
+ if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > 0x2000) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "max read request size > 512B");
+
+ devcontrol &= ~PCI_EXP_DEVCTL_READRQ;
+ devcontrol |= 0x2000;
+ pci_write_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_DEVCTL,
+ devcontrol);
+ }
+ }
+}
+
+/*
+ * Determine the organization of the uncached data area and
+ * finish initializing the adapter structure
+ */
+bool esas2r_init_adapter_struct(struct esas2r_adapter *a,
+ void **uncached_area)
+{
+ u32 i;
+ u8 *high;
+ struct esas2r_inbound_list_source_entry *element;
+ struct esas2r_request *rq;
+ struct esas2r_mem_desc *sgl;
+
+ spin_lock_init(&a->sg_list_lock);
+ spin_lock_init(&a->mem_lock);
+ spin_lock_init(&a->queue_lock);
+
+ a->targetdb_end = &a->targetdb[ESAS2R_MAX_TARGETS];
+
+ if (!alloc_vda_req(a, &a->general_req)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request for the general req!");
+ return false;
+ }
+
+ /* allocate requests for asynchronous events */
+ a->first_ae_req =
+ kzalloc(num_ae_requests * sizeof(struct esas2r_request),
+ GFP_KERNEL);
+
+ if (a->first_ae_req == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for asynchronous events");
+ return false;
+ }
+
+ /* allocate the S/G list memory descriptors */
+ a->sg_list_mds = kzalloc(
+ num_sg_lists * sizeof(struct esas2r_mem_desc), GFP_KERNEL);
+
+ if (a->sg_list_mds == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for s/g list descriptors");
+ return false;
+ }
+
+ /* allocate the request table */
+ a->req_table =
+ kzalloc((num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *), GFP_KERNEL);
+
+ if (a->req_table == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "failed to allocate memory for the request table");
+ return false;
+ }
+
+ /* initialize PCI configuration space */
+ esas2r_init_pci_cfg_space(a);
+
+ /*
+ * the thunder_stream boards all have a serial flash part that has a
+ * different base address on the AHB bus.
+ */
+ if ((a->pcid->subsystem_vendor == ATTO_VENDOR_ID)
+ && (a->pcid->subsystem_device & ATTO_SSDID_TBT))
+ a->flags2 |= AF2_THUNDERBOLT;
+
+ if (a->flags2 & AF2_THUNDERBOLT)
+ a->flags2 |= AF2_SERIAL_FLASH;
+
+ if (a->pcid->subsystem_device == ATTO_TLSH_1068)
+ a->flags2 |= AF2_THUNDERLINK;
+
+ /* Uncached Area */
+ high = (u8 *)*uncached_area;
+
+ /* initialize the scatter/gather table pages */
+
+ for (i = 0, sgl = a->sg_list_mds; i < num_sg_lists; i++, sgl++) {
+ sgl->size = sgl_page_size;
+
+ list_add_tail(&sgl->next_desc, &a->free_sg_list_head);
+
+ if (!esas2r_initmem_alloc(a, sgl, ESAS2R_SGL_ALIGN)) {
+ /* Allow the driver to load if the minimum count met. */
+ if (i < NUM_SGL_MIN)
+ return false;
+ break;
+ }
+ }
+
+ /* compute the size of the lists */
+ a->list_size = num_requests + ESAS2R_LIST_EXTRA;
+
+ /* allocate the inbound list */
+ a->inbound_list_md.size = a->list_size *
+ sizeof(struct
+ esas2r_inbound_list_source_entry);
+
+ if (!esas2r_initmem_alloc(a, &a->inbound_list_md, ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the outbound list */
+ a->outbound_list_md.size = a->list_size *
+ sizeof(struct atto_vda_ob_rsp);
+
+ if (!esas2r_initmem_alloc(a, &a->outbound_list_md,
+ ESAS2R_LIST_ALIGN)) {
+ esas2r_hdebug("failed to allocate IB list");
+ return false;
+ }
+
+ /* allocate the NVRAM structure */
+ a->nvram = (struct esas2r_sas_nvram *)high;
+ high += sizeof(struct esas2r_sas_nvram);
+
+ /* allocate the discovery buffer */
+ a->disc_buffer = high;
+ high += ESAS2R_DISC_BUF_LEN;
+ high = PTR_ALIGN(high, 8);
+
+ /* allocate the outbound list copy pointer */
+ a->outbound_copy = (u32 volatile *)high;
+ high += sizeof(u32);
+
+ if (!(a->flags & AF_NVR_VALID))
+ esas2r_nvram_set_defaults(a);
+
+ /* update the caller's uncached memory area pointer */
+ *uncached_area = (void *)high;
+
+ /* initialize the allocated memory */
+ if (a->flags & AF_FIRST_INIT) {
+ memset(a->req_table, 0,
+ (num_requests + num_ae_requests +
+ 1) * sizeof(struct esas2r_request *));
+
+ esas2r_targ_db_initialize(a);
+
+ /* prime parts of the inbound list */
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->
+ inbound_list_md.
+ virt_addr;
+
+ for (i = 0; i < a->list_size; i++) {
+ element->address = 0;
+ element->reserved = 0;
+ element->length = cpu_to_le32(HWILSE_INTERFACE_F0
+ | (sizeof(union
+ atto_vda_req)
+ /
+ sizeof(u32)));
+ element++;
+ }
+
+ /* init the AE requests */
+ for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++,
+ i++) {
+ INIT_LIST_HEAD(&rq->req_list);
+ if (!alloc_vda_req(a, rq)) {
+ esas2r_hdebug(
+ "failed to allocate a VDA request!");
+ return false;
+ }
+
+ esas2r_rq_init_request(rq, a);
+
+ /* override the completion function */
+ rq->comp_cb = esas2r_ae_complete;
+ }
+ }
+
+ return true;
+}
+
+/* This code will verify that the chip is operational. */
+bool esas2r_check_adapter(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+ u64 ppaddr;
+ u32 dw;
+
+ /*
+ * if the chip reset detected flag is set, we can bypass a bunch of
+ * stuff.
+ */
+ if (a->flags & AF_CHPRST_DETECTED)
+ goto skip_chip_reset;
+
+ /*
+ * BEFORE WE DO ANYTHING, disable the chip interrupts! the boot driver
+ * may have left them enabled or we may be recovering from a fault.
+ */
+ esas2r_write_register_dword(a, MU_INT_MASK_OUT, ESAS2R_INT_DIS_MASK);
+ esas2r_flush_register_dword(a, MU_INT_MASK_OUT);
+
+ /*
+ * wait for the firmware to become ready by forcing an interrupt and
+ * waiting for a response.
+ */
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ esas2r_force_interrupt(a);
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF) {
+ /*
+ * Give the firmware up to two seconds to enable
+ * register access after a reset.
+ */
+ if ((jiffies_to_msecs(jiffies) - starttime) > 2000)
+ return esas2r_set_degraded_mode(a,
+ "unable to access registers");
+ } else if (doorbell & DRBL_FORCE_INT) {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /*
+ * This driver supports version 0 and version 1 of
+ * the API
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+
+ if (ver == DRBL_FW_VER_0) {
+ esas2r_lock_set_flags(&a->flags,
+ AF_LEGACY_SGE_MODE);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ esas2r_lock_clear_flags(&a->flags,
+ AF_LEGACY_SGE_MODE);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ return esas2r_set_degraded_mode(a,
+ "unknown firmware version");
+ }
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 180000) {
+ esas2r_hdebug("FW ready TMO");
+ esas2r_bugon();
+
+ return esas2r_set_degraded_mode(a,
+ "firmware start has timed out");
+ }
+ }
+
+ /* purge any asynchronous events since we will repost them later */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(50));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug("timeout waiting for interface down");
+ break;
+ }
+ }
+skip_chip_reset:
+ /*
+ * first things first, before we go changing any of these registers
+ * disable the communication lists.
+ */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~MU_ILC_ENABLE;
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~MU_OLC_ENABLE;
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /* configure the communication list addresses */
+ ppaddr = a->inbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_IN_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->outbound_list_md.phys_addr;
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_ADDR_HI,
+ upper_32_bits(ppaddr));
+ ppaddr = a->uncached_phys +
+ ((u8 *)a->outbound_copy - a->uncached);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_LO,
+ lower_32_bits(ppaddr));
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY_PTR_HI,
+ upper_32_bits(ppaddr));
+
+ /* reset the read and write pointers */
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+ esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, MU_ILW_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_COPY, MU_OLC_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_IN_LIST_READ, MU_ILR_TOGGLE |
+ a->last_write);
+ esas2r_write_register_dword(a, MU_OUT_LIST_WRITE,
+ MU_OLW_TOGGLE | a->last_write);
+
+ /* configure the interface select fields */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_IFC_CONFIG);
+ dw &= ~(MU_ILIC_LIST | MU_ILIC_DEST);
+ esas2r_write_register_dword(a, MU_IN_LIST_IFC_CONFIG,
+ (dw | MU_ILIC_LIST_F0 | MU_ILIC_DEST_DDR));
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_IFC_CONFIG);
+ dw &= ~(MU_OLIC_LIST | MU_OLIC_SOURCE);
+ esas2r_write_register_dword(a, MU_OUT_LIST_IFC_CONFIG,
+ (dw | MU_OLIC_LIST_F0 |
+ MU_OLIC_SOURCE_DDR));
+
+ /* finish configuring the communication lists */
+ dw = esas2r_read_register_dword(a, MU_IN_LIST_CONFIG);
+ dw &= ~(MU_ILC_ENTRY_MASK | MU_ILC_NUMBER_MASK);
+ dw |= MU_ILC_ENTRY_4_DW | MU_ILC_DYNAMIC_SRC
+ | (a->list_size << MU_ILC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_IN_LIST_CONFIG, dw);
+ dw = esas2r_read_register_dword(a, MU_OUT_LIST_CONFIG);
+ dw &= ~(MU_OLC_ENTRY_MASK | MU_OLC_NUMBER_MASK);
+ dw |= MU_OLC_ENTRY_4_DW | (a->list_size << MU_OLC_NUMBER_SHIFT);
+ esas2r_write_register_dword(a, MU_OUT_LIST_CONFIG, dw);
+
+ /*
+ * notify the firmware that we're done setting up the communication
+ * list registers. wait here until the firmware is done configuring
+ * its lists. it will signal that it is done by enabling the lists.
+ */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_MSG_IFC_INIT);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_INIT) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for communication list init");
+ esas2r_bugon();
+ return esas2r_set_degraded_mode(a,
+ "timeout waiting for communication list init");
+ }
+ }
+
+ /*
+ * flag whether the firmware supports the power down doorbell. we
+ * determine this by reading the inbound doorbell enable mask.
+ */
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_IN_ENB);
+ if (doorbell & DRBL_POWER_DOWN)
+ esas2r_lock_set_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+ else
+ esas2r_lock_clear_flags(&a->flags2, AF2_VDA_POWER_DOWN);
+
+ /*
+ * enable assertion of outbound queue and doorbell interrupts in the
+ * main interrupt cause register.
+ */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_MASK, MU_OLIS_MASK);
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT_ENB, DRBL_ENB_MASK);
+ return true;
+}
+
+/* Process the initialization message just completed and format the next one. */
+static bool esas2r_format_init_msg(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u32 msg = a->init_msg;
+ struct atto_vda_cfg_init *ci;
+
+ a->init_msg = 0;
+
+ switch (msg) {
+ case ESAS2R_INIT_MSG_START:
+ case ESAS2R_INIT_MSG_REINIT:
+ {
+ struct timeval now;
+ do_gettimeofday(&now);
+ esas2r_hdebug("CFG init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_INIT,
+ 0,
+ NULL);
+ ci = (struct atto_vda_cfg_init *)&rq->vrq->cfg.data.init;
+ ci->sgl_page_size = sgl_page_size;
+ ci->epoch_time = now.tv_sec;
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_INIT:
+ if (rq->req_stat == RS_SUCCESS) {
+ u32 major;
+ u32 minor;
+
+ a->fw_version = le16_to_cpu(
+ rq->func_rsp.cfg_rsp.vda_version);
+ a->fw_build = rq->func_rsp.cfg_rsp.fw_build;
+ major = LOBYTE(rq->func_rsp.cfg_rsp.fw_release);
+ minor = HIBYTE(rq->func_rsp.cfg_rsp.fw_release);
+ a->fw_version += (major << 16) + (minor << 24);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+
+ /*
+ * the 2.71 and earlier releases of R6xx firmware did not error
+ * unsupported config requests correctly.
+ */
+
+ if ((a->flags2 & AF2_THUNDERBOLT)
+ || (be32_to_cpu(a->fw_version) >
+ be32_to_cpu(0x47020052))) {
+ esas2r_hdebug("CFG get init");
+ esas2r_build_cfg_req(a,
+ rq,
+ VDA_CFG_GET_INIT2,
+ sizeof(struct atto_vda_cfg_init),
+ NULL);
+
+ rq->vrq->cfg.sg_list_offset = offsetof(
+ struct atto_vda_cfg_req,
+ data.sge);
+ rq->vrq->cfg.data.prde.ctl_len =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ rq->vrq->cfg.data.prde.address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ rq->flags |= RF_FAILURE_OK;
+ a->init_msg = ESAS2R_INIT_MSG_GET_INIT;
+ break;
+ }
+
+ case ESAS2R_INIT_MSG_GET_INIT:
+ if (msg == ESAS2R_INIT_MSG_GET_INIT) {
+ ci = (struct atto_vda_cfg_init *)rq->data_buf;
+ if (rq->req_stat == RS_SUCCESS) {
+ a->num_targets_backend =
+ le32_to_cpu(ci->num_targets_backend);
+ a->ioctl_tunnel =
+ le32_to_cpu(ci->ioctl_tunnel);
+ } else {
+ esas2r_hdebug("FAILED");
+ }
+ }
+ /* fall through */
+
+ default:
+ rq->req_stat = RS_SUCCESS;
+ return false;
+ }
+ return true;
+}
+
+/*
+ * Perform initialization messages via the request queue. Messages are
+ * performed with interrupts disabled.
+ */
+bool esas2r_init_msgs(struct esas2r_adapter *a)
+{
+ bool success = true;
+ struct esas2r_request *rq = &a->general_req;
+
+ esas2r_rq_init_request(rq, a);
+ rq->comp_cb = esas2r_dummy_complete;
+
+ if (a->init_msg == 0)
+ a->init_msg = ESAS2R_INIT_MSG_REINIT;
+
+ while (a->init_msg) {
+ if (esas2r_format_init_msg(a, rq)) {
+ unsigned long flags;
+ while (true) {
+ spin_lock_irqsave(&a->queue_lock, flags);
+ esas2r_start_vda_request(a, rq);
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_wait_request(a, rq);
+ if (rq->req_stat != RS_PENDING)
+ break;
+ }
+ }
+
+ if (rq->req_stat == RS_SUCCESS
+ || ((rq->flags & RF_FAILURE_OK)
+ && rq->req_stat != RS_TIMEOUT))
+ continue;
+
+ esas2r_log(ESAS2R_LOG_CRIT, "init message %x failed (%x, %x)",
+ a->init_msg, rq->req_stat, rq->flags);
+ a->init_msg = ESAS2R_INIT_MSG_START;
+ success = false;
+ break;
+ }
+
+ esas2r_rq_destroy_request(rq, a);
+ return success;
+}
+
+/* Initialize the adapter chip */
+bool esas2r_init_adapter_hw(struct esas2r_adapter *a, bool init_poll)
+{
+ bool rslt = false;
+ struct esas2r_request *rq;
+ u32 i;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ goto exit;
+
+ if (!(a->flags & AF_NVR_VALID)) {
+ if (!esas2r_nvram_read_direct(a))
+ esas2r_log(ESAS2R_LOG_WARN,
+ "invalid/missing NVRAM parameters");
+ }
+
+ if (!esas2r_init_msgs(a)) {
+ esas2r_set_degraded_mode(a, "init messages failed");
+ goto exit;
+ }
+
+ /* The firmware is ready. */
+ esas2r_lock_clear_flags(&a->flags, AF_DEGRADED_MODE);
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+
+ /* Post all the async event requests */
+ for (i = 0, rq = a->first_ae_req; i < num_ae_requests; i++, rq++)
+ esas2r_start_ae_request(a, rq);
+
+ if (!a->flash_rev[0])
+ esas2r_read_flash_rev(a);
+
+ if (!a->image_type[0])
+ esas2r_read_image_type(a);
+
+ if (a->fw_version == 0)
+ a->fw_rev[0] = 0;
+ else
+ sprintf(a->fw_rev, "%1d.%02d",
+ (int)LOBYTE(HIWORD(a->fw_version)),
+ (int)HIBYTE(HIWORD(a->fw_version)));
+
+ esas2r_hdebug("firmware revision: %s", a->fw_rev);
+
+ if ((a->flags & AF_CHPRST_DETECTED)
+ && (a->flags & AF_FIRST_INIT)) {
+ esas2r_enable_chip_interrupts(a);
+ return true;
+ }
+
+ /* initialize discovery */
+ esas2r_disc_initialize(a);
+
+ /*
+ * wait for the device wait time to expire here if requested. this is
+ * usually requested during initial driver load and possibly when
+ * resuming from a low power state. deferred device waiting will use
+ * interrupts. chip reset recovery always defers device waiting to
+ * avoid being in a TASKLET too long.
+ */
+ if (init_poll) {
+ u32 currtime = a->disc_start_time;
+ u32 nexttick = 100;
+ u32 deltatime;
+
+ /*
+ * Block Tasklets from getting scheduled and indicate this is
+ * polled discovery.
+ */
+ esas2r_lock_set_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ esas2r_lock_set_flags(&a->flags, AF_DISC_POLLED);
+
+ /*
+ * Temporarily bring the disable count to zero to enable
+ * deferred processing. Note that the count is already zero
+ * after the first initialization.
+ */
+ if (a->flags & AF_FIRST_INIT)
+ atomic_dec(&a->disable_cnt);
+
+ while (a->flags & AF_DISC_PENDING) {
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ /*
+ * Determine the need for a timer tick based on the
+ * delta time between this and the last iteration of
+ * this loop. We don't use the absolute time because
+ * then we would have to worry about when nexttick
+ * wraps and currtime hasn't yet.
+ */
+ deltatime = jiffies_to_msecs(jiffies) - currtime;
+ currtime += deltatime;
+
+ /*
+ * Process any waiting discovery as long as the chip is
+ * up. If a chip reset happens during initial polling,
+ * we have to make sure the timer tick processes the
+ * doorbell indicating the firmware is ready.
+ */
+ if (!(a->flags & AF_CHPRST_PENDING))
+ esas2r_disc_check_for_work(a);
+
+ /* Simulate a timer tick. */
+ if (nexttick <= deltatime) {
+
+ /* Time for a timer tick */
+ nexttick += 100;
+ esas2r_timer_tick(a);
+ }
+
+ if (nexttick > deltatime)
+ nexttick -= deltatime;
+
+ /* Do any deferred processing */
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ }
+
+ if (a->flags & AF_FIRST_INIT)
+ atomic_inc(&a->disable_cnt);
+
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_POLLED);
+ esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ }
+
+
+ esas2r_targ_db_report_changes(a);
+
+ /*
+ * For cases where (a) the initialization messages processing may
+ * handle an interrupt for a port event and a discovery is waiting, but
+ * we are not waiting for devices, or (b) the device wait time has been
+ * exhausted but there is still discovery pending, start any leftover
+ * discovery in interrupt driven mode.
+ */
+ esas2r_disc_start_waiting(a);
+
+ /* Enable chip interrupts */
+ a->int_mask = ESAS2R_INT_STS_MASK;
+ esas2r_enable_chip_interrupts(a);
+ esas2r_enable_heartbeat(a);
+ rslt = true;
+
+exit:
+ /*
+ * Regardless of whether initialization was successful, certain things
+ * need to get done before we exit.
+ */
+
+ if ((a->flags & AF_CHPRST_DETECTED)
+ && (a->flags & AF_FIRST_INIT)) {
+ /*
+ * Reinitialization was performed during the first
+ * initialization. Only clear the chip reset flag so the
+ * original device polling is not cancelled.
+ */
+ if (!rslt)
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+ } else {
+ /* First initialization or a subsequent re-init is complete. */
+ if (!rslt) {
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+ }
+
+
+ /* Enable deferred processing after the first initialization. */
+ if (a->flags & AF_FIRST_INIT) {
+ esas2r_lock_clear_flags(&a->flags, AF_FIRST_INIT);
+
+ if (atomic_dec_return(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+ }
+ }
+
+ return rslt;
+}
+
+void esas2r_reset_adapter(struct esas2r_adapter *a)
+{
+ esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+ esas2r_local_reset_adapter(a);
+ esas2r_schedule_tasklet(a);
+}
+
+void esas2r_reset_chip(struct esas2r_adapter *a)
+{
+ if (!esas2r_is_adapter_present(a))
+ return;
+
+ /*
+ * Before we reset the chip, save off the VDA core dump. The VDA core
+ * dump is located in the upper 512KB of the onchip SRAM. Make sure
+ * to not overwrite a previous crash that was saved.
+ */
+ if ((a->flags2 & AF2_COREDUMP_AVAIL)
+ && !(a->flags2 & AF2_COREDUMP_SAVED)
+ && a->fw_coredump_buff) {
+ esas2r_read_mem_block(a,
+ a->fw_coredump_buff,
+ MW_DATA_ADDR_SRAM + 0x80000,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_SAVED);
+ }
+
+ esas2r_lock_clear_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+
+ /* Reset the chip */
+ if (a->pcid->revision == MVR_FREY_B2)
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN_B2,
+ MU_CTL_IN_FULL_RST2);
+ else
+ esas2r_write_register_dword(a, MU_CTL_STATUS_IN,
+ MU_CTL_IN_FULL_RST);
+
+
+ /* Stall a little while to let the reset condition clear */
+ mdelay(10);
+}
+
+static void esas2r_power_down_notify_firmware(struct esas2r_adapter *a)
+{
+ u32 starttime;
+ u32 doorbell;
+
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_POWER_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_POWER_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 30000) {
+ esas2r_hdebug("Timeout waiting for power down");
+ break;
+ }
+ }
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+void esas2r_power_down(struct esas2r_adapter *a)
+{
+ esas2r_lock_set_flags(&a->flags, AF_POWER_MGT);
+ esas2r_lock_set_flags(&a->flags, AF_POWER_DOWN);
+
+ if (!(a->flags & AF_DEGRADED_MODE)) {
+ u32 starttime;
+ u32 doorbell;
+
+ /*
+ * We are currently running OK and will be reinitializing later.
+ * increment the disable count to coordinate with
+ * esas2r_init_adapter. We don't have to do this in degraded
+ * mode since we never enabled interrupts in the first place.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_disable_heartbeat(a);
+
+ /* wait for any VDA activity to clear before continuing */
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_MSG_IFC_DOWN);
+ starttime = jiffies_to_msecs(jiffies);
+
+ while (true) {
+ doorbell =
+ esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell & DRBL_MSG_IFC_DOWN) {
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ break;
+ }
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > 3000) {
+ esas2r_hdebug(
+ "timeout waiting for interface down");
+ break;
+ }
+ }
+
+ /*
+ * For versions of firmware that support it tell them the driver
+ * is powering down.
+ */
+ if (a->flags2 & AF2_VDA_POWER_DOWN)
+ esas2r_power_down_notify_firmware(a);
+ }
+
+ /* Suspend I/O processing. */
+ esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+ esas2r_lock_set_flags(&a->flags, AF_DISC_PENDING);
+ esas2r_lock_set_flags(&a->flags, AF_CHPRST_PENDING);
+
+ esas2r_process_adapter_reset(a);
+
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt = esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+}
+
+/*
+ * Perform power management processing including managing device states, adapter
+ * states, interrupts, and I/O.
+ */
+bool esas2r_power_up(struct esas2r_adapter *a, bool init_poll)
+{
+ bool ret;
+
+ esas2r_lock_clear_flags(&a->flags, AF_POWER_DOWN);
+ esas2r_init_pci_cfg_space(a);
+ esas2r_lock_set_flags(&a->flags, AF_FIRST_INIT);
+ atomic_inc(&a->disable_cnt);
+
+ /* reinitialize the adapter */
+ ret = esas2r_check_adapter(a);
+ if (!esas2r_init_adapter_hw(a, init_poll))
+ ret = false;
+
+ /* send the reset asynchronous event */
+ esas2r_send_reset_ae(a, true);
+
+ /* clear this flag after initialization. */
+ esas2r_lock_clear_flags(&a->flags, AF_POWER_MGT);
+ return ret;
+}
+
+bool esas2r_is_adapter_present(struct esas2r_adapter *a)
+{
+ if (a->flags & AF_NOT_PRESENT)
+ return false;
+
+ if (esas2r_read_register_dword(a, MU_DOORBELL_OUT) == 0xFFFFFFFF) {
+ esas2r_lock_set_flags(&a->flags, AF_NOT_PRESENT);
+
+ return false;
+ }
+ return true;
+}
+
+const char *esas2r_get_model_name(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "ATTO ExpressSAS R680";
+
+ case ATTO_ESAS_R608:
+ return "ATTO ExpressSAS R608";
+
+ case ATTO_ESAS_R60F:
+ return "ATTO ExpressSAS R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "ATTO ExpressSAS R6F0";
+
+ case ATTO_ESAS_R644:
+ return "ATTO ExpressSAS R644";
+
+ case ATTO_ESAS_R648:
+ return "ATTO ExpressSAS R648";
+
+ case ATTO_TSSC_3808:
+ return "ATTO ThunderStream SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "ATTO ThunderStream SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "ATTO ThunderLink SH 1068";
+ }
+
+ return "ATTO SAS Controller";
+}
+
+const char *esas2r_get_model_name_short(struct esas2r_adapter *a)
+{
+ switch (a->pcid->subsystem_device) {
+ case ATTO_ESAS_R680:
+ return "R680";
+
+ case ATTO_ESAS_R608:
+ return "R608";
+
+ case ATTO_ESAS_R60F:
+ return "R60F";
+
+ case ATTO_ESAS_R6F0:
+ return "R6F0";
+
+ case ATTO_ESAS_R644:
+ return "R644";
+
+ case ATTO_ESAS_R648:
+ return "R648";
+
+ case ATTO_TSSC_3808:
+ return "SC 3808D";
+
+ case ATTO_TSSC_3808E:
+ return "SC 3808E";
+
+ case ATTO_TLSH_1068:
+ return "SH 1068";
+ }
+
+ return "unknown";
+}
diff --git a/drivers/scsi/esas2r/esas2r_int.c b/drivers/scsi/esas2r/esas2r_int.c
new file mode 100644
index 00000000000..c2d4ff57c5c
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_int.c
@@ -0,0 +1,941 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_int.c
+ * esas2r interrupt handling
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+/* Local function prototypes */
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell);
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a);
+static void esas2r_process_bus_reset(struct esas2r_adapter *a);
+
+/*
+ * Poll the adapter for interrupts and service them.
+ * This function handles both legacy interrupts and MSI.
+ */
+void esas2r_polled_interrupt(struct esas2r_adapter *a)
+{
+ u32 intstat;
+ u32 doorbell;
+
+ esas2r_disable_chip_interrupts(a);
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (intstat & MU_INTSTAT_POST_OUT) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (intstat & MU_INTSTAT_DRBL) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Legacy and MSI interrupt handlers. Note that the legacy interrupt handler
+ * schedules a TASKLET to process events, whereas the MSI handler just
+ * processes interrupt events directly.
+ */
+irqreturn_t esas2r_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+
+ if (!esas2r_adapter_interrupt_pending(a))
+ return IRQ_NONE;
+
+ esas2r_lock_set_flags(&a->flags2, AF2_INT_PENDING);
+ esas2r_schedule_tasklet(a);
+
+ return IRQ_HANDLED;
+}
+
+void esas2r_adapter_interrupt(struct esas2r_adapter *a)
+{
+ u32 doorbell;
+
+ if (likely(a->int_stat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(a->int_stat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ a->int_mask = ESAS2R_INT_STS_MASK;
+
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+}
+
+irqreturn_t esas2r_msi_interrupt(int irq, void *dev_id)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)dev_id;
+ u32 intstat;
+ u32 doorbell;
+
+ intstat = esas2r_read_register_dword(a, MU_INT_STATUS_OUT);
+
+ if (likely(intstat & MU_INTSTAT_POST_OUT)) {
+ /* clear the interrupt */
+
+ esas2r_write_register_dword(a, MU_OUT_LIST_INT_STAT,
+ MU_OLIS_INT);
+ esas2r_flush_register_dword(a, MU_OUT_LIST_INT_STAT);
+
+ esas2r_get_outbound_responses(a);
+ }
+
+ if (unlikely(intstat & MU_INTSTAT_DRBL)) {
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell != 0)
+ esas2r_doorbell_interrupt(a, doorbell);
+ }
+
+ /*
+ * Work around a chip bug and force a new MSI to be sent if one is
+ * still pending.
+ */
+ esas2r_disable_chip_interrupts(a);
+ esas2r_enable_chip_interrupts(a);
+
+ if (likely(atomic_read(&a->disable_cnt) == 0))
+ esas2r_do_deferred_processes(a);
+
+ esas2r_do_tasklet_tasks(a);
+
+ return 1;
+}
+
+
+
+static void esas2r_handle_outbound_rsp_err(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct atto_vda_ob_rsp *rsp)
+{
+
+ /*
+ * For I/O requests, only copy the response if an error
+ * occurred and setup a callback to do error processing.
+ */
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp));
+
+ if (rq->req_stat == RS_ABORTED) {
+ if (rq->timeout > RQ_MAX_TIMEOUT)
+ rq->req_stat = RS_TIMEOUT;
+ } else if (rq->req_stat == RS_SCSI_ERROR) {
+ u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat;
+
+ esas2r_trace("scsistatus: %x", scsistatus);
+
+ /* Any of these are a good result. */
+ if (scsistatus == SAM_STAT_GOOD || scsistatus ==
+ SAM_STAT_CONDITION_MET || scsistatus ==
+ SAM_STAT_INTERMEDIATE || scsistatus ==
+ SAM_STAT_INTERMEDIATE_CONDITION_MET) {
+ rq->req_stat = RS_SUCCESS;
+ rq->func_rsp.scsi_rsp.scsi_stat =
+ SAM_STAT_GOOD;
+ }
+ }
+ }
+}
+
+static void esas2r_get_outbound_responses(struct esas2r_adapter *a)
+{
+ struct atto_vda_ob_rsp *rsp;
+ u32 rspput_ptr;
+ u32 rspget_ptr;
+ struct esas2r_request *rq;
+ u32 handle;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* Get the outbound limit and pointers */
+ rspput_ptr = le32_to_cpu(*a->outbound_copy) & MU_OLC_WRT_PTR;
+ rspget_ptr = a->last_read;
+
+ esas2r_trace("rspput_ptr: %x, rspget_ptr: %x", rspput_ptr, rspget_ptr);
+
+ /* If we don't have anything to process, get out */
+ if (unlikely(rspget_ptr == rspput_ptr)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_trace_exit();
+ return;
+ }
+
+ /* Make sure the firmware is healthy */
+ if (unlikely(rspput_ptr >= a->list_size)) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ esas2r_trace_exit();
+ return;
+ }
+
+ do {
+ rspget_ptr++;
+
+ if (rspget_ptr >= a->list_size)
+ rspget_ptr = 0;
+
+ rsp = (struct atto_vda_ob_rsp *)a->outbound_list_md.virt_addr
+ + rspget_ptr;
+
+ handle = rsp->handle;
+
+ /* Verify the handle range */
+ if (unlikely(LOWORD(handle) == 0
+ || LOWORD(handle) > num_requests +
+ num_ae_requests + 1)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ /* Get the request for this handle */
+ rq = a->req_table[LOWORD(handle)];
+
+ if (unlikely(rq == NULL || rq->vrq->scsi.handle != handle)) {
+ esas2r_bugon();
+ continue;
+ }
+
+ list_del(&rq->req_list);
+
+ /* Get the completion status */
+ rq->req_stat = rsp->req_stat;
+
+ esas2r_trace("handle: %x", handle);
+ esas2r_trace("rq: %p", rq);
+ esas2r_trace("req_status: %x", rq->req_stat);
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ esas2r_handle_outbound_rsp_err(a, rq, rsp);
+ } else {
+ /*
+ * Copy the outbound completion struct for non-I/O
+ * requests.
+ */
+ memcpy(&rq->func_rsp, &rsp->func_rsp,
+ sizeof(rsp->func_rsp));
+ }
+
+ /* Queue the request for completion. */
+ list_add_tail(&rq->comp_list, &comp_list);
+
+ } while (rspget_ptr != rspput_ptr);
+
+ a->last_read = rspget_ptr;
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_trace_exit();
+}
+
+/*
+ * Perform all deferred processes for the adapter. Deferred
+ * processes can only be done while the current interrupt
+ * disable_cnt for the adapter is zero.
+ */
+void esas2r_do_deferred_processes(struct esas2r_adapter *a)
+{
+ int startreqs = 2;
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ /*
+ * startreqs is used to control starting requests
+ * that are on the deferred queue
+ * = 0 - do not start any requests
+ * = 1 - can start discovery requests
+ * = 2 - can start any request
+ */
+
+ if (a->flags & (AF_CHPRST_PENDING | AF_FLASHING))
+ startreqs = 0;
+ else if (a->flags & AF_DISC_PENDING)
+ startreqs = 1;
+
+ atomic_inc(&a->disable_cnt);
+
+ /* Clear off the completed list to be processed later. */
+
+ if (esas2r_is_tasklet_pending(a)) {
+ esas2r_schedule_tasklet(a);
+
+ startreqs = 0;
+ }
+
+ /*
+ * If we can start requests then traverse the defer queue
+ * looking for requests to start or complete
+ */
+ if (startreqs && !list_empty(&a->defer_list)) {
+ LIST_HEAD(comp_list);
+ struct list_head *element, *next;
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+
+ if (rq->req_stat != RS_PENDING) {
+ list_del(element);
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+ /*
+ * Process discovery and OS requests separately. We
+ * can't hold up discovery requests when discovery is
+ * pending. In general, there may be different sets of
+ * conditions for starting different types of requests.
+ */
+ else if (rq->req_type == RT_DISC_REQ) {
+ list_del(element);
+ esas2r_disc_local_start_request(a, rq);
+ } else if (startreqs == 2) {
+ list_del(element);
+ esas2r_local_start_request(a, rq);
+
+ /*
+ * Flashing could have been set by last local
+ * start
+ */
+ if (a->flags & AF_FLASHING)
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ }
+
+ atomic_dec(&a->disable_cnt);
+}
+
+/*
+ * Process an adapter reset (or one that is about to happen)
+ * by making sure all outstanding requests are completed that
+ * haven't been already.
+ */
+void esas2r_process_adapter_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq = &a->general_req;
+ unsigned long flags;
+ struct esas2r_disc_context *dc;
+
+ LIST_HEAD(comp_list);
+ struct list_head *element;
+
+ esas2r_trace_enter();
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* abort the active discovery, if any. */
+
+ if (rq->interrupt_cx) {
+ dc = (struct esas2r_disc_context *)rq->interrupt_cx;
+
+ dc->disc_evt = 0;
+
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_IN_PROG);
+ }
+
+ /*
+ * just clear the interrupt callback for now. it will be dequeued if
+ * and when we find it on the active queue and we don't want the
+ * callback called. also set the dummy completion callback in case we
+ * were doing an I/O request.
+ */
+
+ rq->interrupt_cx = NULL;
+ rq->interrupt_cb = NULL;
+
+ rq->comp_cb = esas2r_dummy_complete;
+
+ /* Reset the read and write pointers */
+
+ *a->outbound_copy =
+ a->last_write =
+ a->last_read = a->list_size - 1;
+
+ esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+
+ /* Kill all the requests on the active list */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->req_stat == RS_STARTED)
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ esas2r_comp_list_drain(a, &comp_list);
+ esas2r_process_bus_reset(a);
+ esas2r_trace_exit();
+}
+
+static void esas2r_process_bus_reset(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ struct list_head *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+
+ esas2r_hdebug("reset detected");
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* kill all the requests on the deferred queue */
+ list_for_each(element, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list, &comp_list);
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ esas2r_lock_clear_flags(&a->flags, AF_OS_RESET);
+
+ esas2r_trace_exit();
+}
+
+static void esas2r_chip_rst_needed_during_tasklet(struct esas2r_adapter *a)
+{
+
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_NEEDED);
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+ /*
+ * Make sure we don't get attempt more than 3 resets
+ * when the uptime between resets does not exceed one
+ * minute. This will stop any situation where there is
+ * really something wrong with the hardware. The way
+ * this works is that we start with uptime ticks at 0.
+ * Each time we do a reset, we add 20 seconds worth to
+ * the count. Each time a timer tick occurs, as long
+ * as a chip reset is not pending, we decrement the
+ * tick count. If the uptime ticks ever gets to 60
+ * seconds worth, we disable the adapter from that
+ * point forward. Three strikes, you're out.
+ */
+ if (!esas2r_is_adapter_present(a) || (a->chip_uptime >=
+ ESAS2R_CHP_UPTIME_MAX)) {
+ esas2r_hdebug("*** adapter disabled ***");
+
+ /*
+ * Ok, some kind of hard failure. Make sure we
+ * exit this loop with chip interrupts
+ * permanently disabled so we don't lock up the
+ * entire system. Also flag degraded mode to
+ * prevent the heartbeat from trying to recover.
+ */
+
+ esas2r_lock_set_flags(&a->flags, AF_DEGRADED_MODE);
+ esas2r_lock_set_flags(&a->flags, AF_DISABLED);
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_PENDING);
+ esas2r_lock_clear_flags(&a->flags, AF_DISC_PENDING);
+
+ esas2r_disable_chip_interrupts(a);
+ a->int_mask = 0;
+ esas2r_process_adapter_reset(a);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Adapter disabled because of hardware failure");
+ } else {
+ u32 flags =
+ esas2r_lock_set_flags(&a->flags, AF_CHPRST_STARTED);
+
+ if (!(flags & AF_CHPRST_STARTED))
+ /*
+ * Only disable interrupts if this is
+ * the first reset attempt.
+ */
+ esas2r_disable_chip_interrupts(a);
+
+ if ((a->flags & AF_POWER_MGT) && !(a->flags & AF_FIRST_INIT) &&
+ !(flags & AF_CHPRST_STARTED)) {
+ /*
+ * Don't reset the chip on the first
+ * deferred power up attempt.
+ */
+ } else {
+ esas2r_hdebug("*** resetting chip ***");
+ esas2r_reset_chip(a);
+ }
+
+ /* Kick off the reinitialization */
+ a->chip_uptime += ESAS2R_CHP_UPTIME_CNT;
+ a->chip_init_time = jiffies_to_msecs(jiffies);
+ if (!(a->flags & AF_POWER_MGT)) {
+ esas2r_process_adapter_reset(a);
+
+ if (!(flags & AF_CHPRST_STARTED)) {
+ /* Remove devices now that I/O is cleaned up. */
+ a->prev_dev_cnt =
+ esas2r_targ_db_get_tgt_cnt(a);
+ esas2r_targ_db_remove_all(a, false);
+ }
+ }
+
+ a->int_mask = 0;
+ }
+}
+
+static void esas2r_handle_chip_rst_during_tasklet(struct esas2r_adapter *a)
+{
+ while (a->flags & AF_CHPRST_DETECTED) {
+ /*
+ * Balance the enable in esas2r_initadapter_hw.
+ * Esas2r_power_down already took care of it for power
+ * management.
+ */
+ if (!(a->flags & AF_DEGRADED_MODE) && !(a->flags &
+ AF_POWER_MGT))
+ esas2r_disable_chip_interrupts(a);
+
+ /* Reinitialize the chip. */
+ esas2r_check_adapter(a);
+ esas2r_init_adapter_hw(a, 0);
+
+ if (a->flags & AF_CHPRST_NEEDED)
+ break;
+
+ if (a->flags & AF_POWER_MGT) {
+ /* Recovery from power management. */
+ if (a->flags & AF_FIRST_INIT) {
+ /* Chip reset during normal power up */
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "The firmware was reset during a normal power-up sequence");
+ } else {
+ /* Deferred power up complete. */
+ esas2r_lock_clear_flags(&a->flags,
+ AF_POWER_MGT);
+ esas2r_send_reset_ae(a, true);
+ }
+ } else {
+ /* Recovery from online chip reset. */
+ if (a->flags & AF_FIRST_INIT) {
+ /* Chip reset during driver load */
+ } else {
+ /* Chip reset after driver load */
+ esas2r_send_reset_ae(a, false);
+ }
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "Recovering from a chip reset while the chip was online");
+ }
+
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_STARTED);
+ esas2r_enable_chip_interrupts(a);
+
+ /*
+ * Clear this flag last! this indicates that the chip has been
+ * reset already during initialization.
+ */
+ esas2r_lock_clear_flags(&a->flags, AF_CHPRST_DETECTED);
+ }
+}
+
+
+/* Perform deferred tasks when chip interrupts are disabled */
+void esas2r_do_tasklet_tasks(struct esas2r_adapter *a)
+{
+ if (a->flags & (AF_CHPRST_NEEDED | AF_CHPRST_DETECTED)) {
+ if (a->flags & AF_CHPRST_NEEDED)
+ esas2r_chip_rst_needed_during_tasklet(a);
+
+ esas2r_handle_chip_rst_during_tasklet(a);
+ }
+
+ if (a->flags & AF_BUSRST_NEEDED) {
+ esas2r_hdebug("hard resetting bus");
+
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_NEEDED);
+
+ if (a->flags & AF_FLASHING)
+ esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+ else
+ esas2r_write_register_dword(a, MU_DOORBELL_IN,
+ DRBL_RESET_BUS);
+ }
+
+ if (a->flags & AF_BUSRST_DETECTED) {
+ esas2r_process_bus_reset(a);
+
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "scsi_report_bus_reset() called");
+
+ scsi_report_bus_reset(a->host, 0);
+
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_DETECTED);
+ esas2r_lock_clear_flags(&a->flags, AF_BUSRST_PENDING);
+
+ esas2r_log(ESAS2R_LOG_WARN, "Bus reset complete");
+ }
+
+ if (a->flags & AF_PORT_CHANGE) {
+ esas2r_lock_clear_flags(&a->flags, AF_PORT_CHANGE);
+
+ esas2r_targ_db_report_changes(a);
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+static void esas2r_doorbell_interrupt(struct esas2r_adapter *a, u32 doorbell)
+{
+ if (!(doorbell & DRBL_FORCE_INT)) {
+ esas2r_trace_enter();
+ esas2r_trace("doorbell: %x", doorbell);
+ }
+
+ /* First clear the doorbell bits */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT, doorbell);
+
+ if (doorbell & DRBL_RESET_BUS)
+ esas2r_lock_set_flags(&a->flags, AF_BUSRST_DETECTED);
+
+ if (doorbell & DRBL_FORCE_INT)
+ esas2r_lock_clear_flags(&a->flags, AF_HEARTBEAT);
+
+ if (doorbell & DRBL_PANIC_REASON_MASK) {
+ esas2r_hdebug("*** Firmware Panic ***");
+ esas2r_log(ESAS2R_LOG_CRIT, "The firmware has panicked");
+ }
+
+ if (doorbell & DRBL_FW_RESET) {
+ esas2r_lock_set_flags(&a->flags2, AF2_COREDUMP_AVAIL);
+ esas2r_local_reset_adapter(a);
+ }
+
+ if (!(doorbell & DRBL_FORCE_INT))
+ esas2r_trace_exit();
+}
+
+void esas2r_force_interrupt(struct esas2r_adapter *a)
+{
+ esas2r_write_register_dword(a, MU_DOORBELL_IN, DRBL_FORCE_INT |
+ DRBL_DRV_VER);
+}
+
+
+static void esas2r_lun_event(struct esas2r_adapter *a, union atto_vda_ae *ae,
+ u16 target, u32 length)
+{
+ struct esas2r_target *t = a->targetdb + target;
+ u32 cplen = length;
+ unsigned long flags;
+
+ if (cplen > sizeof(t->lu_event))
+ cplen = sizeof(t->lu_event);
+
+ esas2r_trace("ae->lu.dwevent: %x", ae->lu.dwevent);
+ esas2r_trace("ae->lu.bystate: %x", ae->lu.bystate);
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+
+ t->new_target_state = TS_INVALID;
+
+ if (ae->lu.dwevent & VDAAE_LU_LOST) {
+ t->new_target_state = TS_NOT_PRESENT;
+ } else {
+ switch (ae->lu.bystate) {
+ case VDAAE_LU_NOT_PRESENT:
+ case VDAAE_LU_OFFLINE:
+ case VDAAE_LU_DELETED:
+ case VDAAE_LU_FACTORY_DISABLED:
+ t->new_target_state = TS_NOT_PRESENT;
+ break;
+
+ case VDAAE_LU_ONLINE:
+ case VDAAE_LU_DEGRADED:
+ t->new_target_state = TS_PRESENT;
+ break;
+ }
+ }
+
+ if (t->new_target_state != TS_INVALID) {
+ memcpy(&t->lu_event, &ae->lu, cplen);
+
+ esas2r_disc_queue_event(a, DCDE_DEV_CHANGE);
+ }
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+}
+
+
+
+void esas2r_ae_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ union atto_vda_ae *ae =
+ (union atto_vda_ae *)rq->vda_rsp_data->ae_data.event_data;
+ u32 length = le32_to_cpu(rq->func_rsp.ae_rsp.length);
+ union atto_vda_ae *last =
+ (union atto_vda_ae *)(rq->vda_rsp_data->ae_data.event_data
+ + length);
+
+ esas2r_trace_enter();
+ esas2r_trace("length: %d", length);
+
+ if (length > sizeof(struct atto_vda_ae_data)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "The AE request response length (%p) is too long: %d",
+ rq, length);
+
+ esas2r_hdebug("aereq->length (0x%x) too long", length);
+ esas2r_bugon();
+
+ last = ae;
+ }
+
+ while (ae < last) {
+ u16 target;
+
+ esas2r_trace("ae: %p", ae);
+ esas2r_trace("ae->hdr: %p", &(ae->hdr));
+
+ length = ae->hdr.bylength;
+
+ if (length > (u32)((u8 *)last - (u8 *)ae)
+ || (length & 3) != 0
+ || length == 0) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "the async event length is invalid (%p): %d",
+ ae, length);
+
+ esas2r_hdebug("ae->hdr.length (0x%x) invalid", length);
+ esas2r_bugon();
+
+ break;
+ }
+
+ esas2r_nuxi_ae_data(ae);
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, ae,
+ sizeof(union atto_vda_ae));
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ if (ae->raid.dwflags & (VDAAE_GROUP_STATE
+ | VDAAE_RBLD_STATE
+ | VDAAE_MEMBER_CHG
+ | VDAAE_PART_CHG)) {
+ esas2r_log(ESAS2R_LOG_INFO,
+ "RAID event received - name:%s rebuild_state:%d group_state:%d",
+ ae->raid.acname,
+ ae->raid.byrebuild_state,
+ ae->raid.bygroup_state);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ esas2r_log(ESAS2R_LOG_INFO,
+ "LUN event received: event:%d target_id:%d LUN:%d state:%d",
+ ae->lu.dwevent,
+ ae->lu.id.tgtlun.wtarget_id,
+ ae->lu.id.tgtlun.bylun,
+ ae->lu.bystate);
+
+ target = ae->lu.id.tgtlun.wtarget_id;
+
+ if (target < ESAS2R_MAX_TARGETS)
+ esas2r_lun_event(a, ae, target, length);
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ esas2r_log(ESAS2R_LOG_INFO, "Disk event received");
+ break;
+
+ default:
+
+ /* Silently ignore the rest and let the apps deal with
+ * them.
+ */
+
+ break;
+ }
+
+ ae = (union atto_vda_ae *)((u8 *)ae + length);
+ }
+
+ /* Now requeue it. */
+ esas2r_start_ae_request(a, rq);
+ esas2r_trace_exit();
+}
+
+/* Send an asynchronous event for a chip reset or power management. */
+void esas2r_send_reset_ae(struct esas2r_adapter *a, bool pwr_mgt)
+{
+ struct atto_vda_ae_hdr ae;
+
+ if (pwr_mgt)
+ ae.bytype = VDAAE_HDR_TYPE_PWRMGT;
+ else
+ ae.bytype = VDAAE_HDR_TYPE_RESET;
+
+ ae.byversion = VDAAE_HDR_VER_0;
+ ae.byflags = 0;
+ ae.bylength = (u8)sizeof(struct atto_vda_ae_hdr);
+
+ if (pwr_mgt)
+ esas2r_hdebug("*** sending power management AE ***");
+ else
+ esas2r_hdebug("*** sending reset AE ***");
+
+ esas2r_queue_fw_event(a, fw_event_vda_ae, &ae,
+ sizeof(union atto_vda_ae));
+}
+
+void esas2r_dummy_complete(struct esas2r_adapter *a, struct esas2r_request *rq)
+{}
+
+static void esas2r_check_req_rsp_sense(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 snslen, snslen2;
+
+ snslen = snslen2 = rq->func_rsp.scsi_rsp.sense_len;
+
+ if (snslen > rq->sense_len)
+ snslen = rq->sense_len;
+
+ if (snslen) {
+ if (rq->sense_buf)
+ memcpy(rq->sense_buf, rq->data_buf, snslen);
+ else
+ rq->sense_buf = (u8 *)rq->data_buf;
+
+ /* See about possible sense data */
+ if (snslen2 > 0x0c) {
+ u8 *s = (u8 *)rq->data_buf;
+
+ esas2r_trace_enter();
+
+ /* Report LUNS data has changed */
+ if (s[0x0c] == 0x3f && s[0x0d] == 0x0E) {
+ esas2r_trace("rq->target_id: %d",
+ rq->target_id);
+ esas2r_target_state_changed(a, rq->target_id,
+ TS_LUN_CHANGE);
+ }
+
+ esas2r_trace("add_sense_key=%x", s[0x0c]);
+ esas2r_trace("add_sense_qual=%x", s[0x0d]);
+ esas2r_trace_exit();
+ }
+ }
+
+ rq->sense_len = snslen;
+}
+
+
+void esas2r_complete_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ if (rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT)
+ esas2r_lock_clear_flags(&a->flags, AF_FLASHING);
+
+ /* See if we setup a callback to do special processing */
+
+ if (rq->interrupt_cb) {
+ (*rq->interrupt_cb)(a, rq);
+
+ if (rq->req_stat == RS_PENDING) {
+ esas2r_start_request(a, rq);
+ return;
+ }
+ }
+
+ if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ && unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_check_req_rsp_sense(a, rq);
+ esas2r_log_request_failure(a, rq);
+ }
+
+ (*rq->comp_cb)(a, rq);
+}
diff --git a/drivers/scsi/esas2r/esas2r_io.c b/drivers/scsi/esas2r/esas2r_io.c
new file mode 100644
index 00000000000..324e2626a08
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_io.c
@@ -0,0 +1,880 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_io.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)mpt3sas/mpt3sas_trigger_diag.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct esas2r_target *t = NULL;
+ struct esas2r_request *startrq = rq;
+ unsigned long flags;
+
+ if (unlikely(a->flags & (AF_DEGRADED_MODE | AF_POWER_DOWN))) {
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI)
+ rq->req_stat = RS_SEL2;
+ else
+ rq->req_stat = RS_DEGRADED;
+ } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) {
+ t = a->targetdb + rq->target_id;
+
+ if (unlikely(t >= a->targetdb_end
+ || !(t->flags & TF_USED))) {
+ rq->req_stat = RS_SEL;
+ } else {
+ /* copy in the target ID. */
+ rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id);
+
+ /*
+ * Test if we want to report RS_SEL for missing target.
+ * Note that if AF_DISC_PENDING is set than this will
+ * go on the defer queue.
+ */
+ if (unlikely(t->target_state != TS_PRESENT
+ && !(a->flags & AF_DISC_PENDING)))
+ rq->req_stat = RS_SEL;
+ }
+ }
+
+ if (unlikely(rq->req_stat != RS_PENDING)) {
+ esas2r_complete_request(a, rq);
+ return;
+ }
+
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq->scsi.handle=%x", rq->vrq->scsi.handle);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ esas2r_trace("rq->target_id=%d", rq->target_id);
+ esas2r_trace("rq->vrq->scsi.flags=%x", rq->vrq->scsi.flags);
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ if (likely(list_empty(&a->defer_list) &&
+ !(a->flags &
+ (AF_CHPRST_PENDING | AF_FLASHING | AF_DISC_PENDING))))
+ esas2r_local_start_request(a, startrq);
+ else
+ list_add_tail(&startrq->req_list, &a->defer_list);
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+}
+
+/*
+ * Starts the specified request. all requests have RS_PENDING set when this
+ * routine is called. The caller is usually esas2r_start_request, but
+ * esas2r_do_deferred_processes will start request that are deferred.
+ *
+ * The caller must ensure that requests can be started.
+ *
+ * esas2r_start_request will defer a request if there are already requests
+ * waiting or there is a chip reset pending. once the reset condition clears,
+ * esas2r_do_deferred_processes will call this function to start the request.
+ *
+ * When a request is started, it is placed on the active list and queued to
+ * the controller.
+ */
+void esas2r_local_start_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq=%p", rq);
+ esas2r_trace("rq->vrq:%p", rq->vrq);
+ esas2r_trace("rq->vrq_md->phys_addr:%x", rq->vrq_md->phys_addr);
+
+ if (unlikely(rq->vrq->scsi.function == VDA_FUNC_FLASH
+ && rq->vrq->flash.sub_func == VDA_FLASH_COMMIT))
+ esas2r_lock_set_flags(&a->flags, AF_FLASHING);
+
+ list_add_tail(&rq->req_list, &a->active_list);
+ esas2r_start_vda_request(a, rq);
+ esas2r_trace_exit();
+ return;
+}
+
+void esas2r_start_vda_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct esas2r_inbound_list_source_entry *element;
+ u32 dw;
+
+ rq->req_stat = RS_STARTED;
+ /*
+ * Calculate the inbound list entry location and the current state of
+ * toggle bit.
+ */
+ a->last_write++;
+ if (a->last_write >= a->list_size) {
+ a->last_write = 0;
+ /* update the toggle bit */
+ if (a->flags & AF_COMM_LIST_TOGGLE)
+ esas2r_lock_clear_flags(&a->flags,
+ AF_COMM_LIST_TOGGLE);
+ else
+ esas2r_lock_set_flags(&a->flags, AF_COMM_LIST_TOGGLE);
+ }
+
+ element =
+ (struct esas2r_inbound_list_source_entry *)a->inbound_list_md.
+ virt_addr
+ + a->last_write;
+
+ /* Set the VDA request size if it was never modified */
+ if (rq->vda_req_sz == RQ_SIZE_DEFAULT)
+ rq->vda_req_sz = (u16)(a->max_vdareq_size / sizeof(u32));
+
+ element->address = cpu_to_le64(rq->vrq_md->phys_addr);
+ element->length = cpu_to_le32(rq->vda_req_sz);
+
+ /* Update the write pointer */
+ dw = a->last_write;
+
+ if (a->flags & AF_COMM_LIST_TOGGLE)
+ dw |= MU_ILW_TOGGLE;
+
+ esas2r_trace("rq->vrq->scsi.handle:%x", rq->vrq->scsi.handle);
+ esas2r_trace("dw:%x", dw);
+ esas2r_trace("rq->vda_req_sz:%x", rq->vda_req_sz);
+ esas2r_write_register_dword(a, MU_IN_LIST_WRITE, dw);
+}
+
+/*
+ * Build the scatter/gather list for an I/O request according to the
+ * specifications placed in the s/g context. The caller must initialize
+ * context prior to the initial call by calling esas2r_sgc_init().
+ */
+bool esas2r_build_sg_list_sge(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ union atto_vda_req *vrq = rq->vrq;
+
+ while (sgc->length) {
+ u32 rem = 0;
+ u64 addr;
+ u32 len;
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* if current length is more than what's left, stop there */
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* limit to a round number less than the maximum length */
+ if (len > SGE_LEN_MAX) {
+ /*
+ * Save the remainder of the split. Whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - SGE_LEN_MAX;
+ len = SGE_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) {
+ u8 sgelen;
+ struct esas2r_mem_desc *sgl;
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /* Calculate the length of the last SGE filled in */
+ sgelen = (u8)((u8 *)sgc->sge.a64.curr
+ - (u8 *)sgc->sge.a64.last);
+
+ /*
+ * Copy the last SGE filled in to the first entry of
+ * the new SGL to make room for the chain entry.
+ */
+ memcpy(sgl->virt_addr, sgc->sge.a64.last, sgelen);
+
+ /* Figure out the new curr pointer in the new segment */
+ sgc->sge.a64.curr =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr +
+ sgelen);
+
+ /* Set the limit pointer and build the chain entry */
+ sgc->sge.a64.limit =
+ (struct atto_vda_sge *)((u8 *)sgl->virt_addr
+ + sgl_page_size
+ - sizeof(struct
+ atto_vda_sge));
+ sgc->sge.a64.last->length = cpu_to_le32(
+ SGE_CHAIN | SGE_ADDR_64);
+ sgc->sge.a64.last->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Now, if there was a previous chain entry, then
+ * update it to contain the length of this segment
+ * and size of this chain. otherwise this is the
+ * first SGL, so set the chain_offset in the request.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |=
+ cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.
+ last + 1)
+ - (u8 *)rq->sg_table->
+ virt_addr)
+ + sizeof(struct atto_vda_sge) *
+ LOBIT(SGE_CHAIN_SZ));
+ } else {
+ vrq->scsi.chain_offset = (u8)
+ ((u8 *)sgc->
+ sge.a64.last -
+ (u8 *)vrq);
+
+ /*
+ * This is the first SGL, so set the
+ * chain_offset and the VDA request size in
+ * the request.
+ */
+ rq->vda_req_sz =
+ (vrq->scsi.chain_offset +
+ sizeof(struct atto_vda_sge) +
+ 3)
+ / sizeof(u32);
+ }
+
+ /*
+ * Remember this so when we get a new SGL filled in we
+ * can update the length of this chain entry.
+ */
+ sgc->sge.a64.chain = sgc->sge.a64.last;
+
+ /* Now link the new SGL onto the primary request. */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+ }
+
+ /* Update last one filled in */
+ sgc->sge.a64.last = sgc->sge.a64.curr;
+
+ /* Build the new SGE and update the S/G context */
+ sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len);
+ sgc->sge.a64.curr->address = cpu_to_le32(addr);
+ sgc->sge.a64.curr++;
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ /* Mark the end of the SGL */
+ sgc->sge.a64.last->length |= cpu_to_le32(SGE_LAST);
+
+ /*
+ * If there was a previous chain entry, update the length to indicate
+ * the length of this last segment.
+ */
+ if (sgc->sge.a64.chain) {
+ sgc->sge.a64.chain->length |= cpu_to_le32(
+ ((u8 *)(sgc->sge.a64.curr) -
+ (u8 *)rq->sg_table->virt_addr));
+ } else {
+ u16 reqsize;
+
+ /*
+ * The entire VDA request was not used so lets
+ * set the size of the VDA request to be DMA'd
+ */
+ reqsize =
+ ((u16)((u8 *)sgc->sge.a64.last - (u8 *)vrq)
+ + sizeof(struct atto_vda_sge) + 3) / sizeof(u32);
+
+ /*
+ * Only update the request size if it is bigger than what is
+ * already there. We can come in here twice for some management
+ * commands.
+ */
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+ }
+ return true;
+}
+
+
+/*
+ * Create PRD list for each I-block consumed by the command. This routine
+ * determines how much data is required from each I-block being consumed
+ * by the command. The first and last I-blocks can be partials and all of
+ * the I-blocks in between are for a full I-block of data.
+ *
+ * The interleave size is used to determine the number of bytes in the 1st
+ * I-block and the remaining I-blocks are what remeains.
+ */
+static bool esas2r_build_prd_iblk(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u64 addr;
+ u32 len;
+ struct esas2r_mem_desc *sgl;
+ u32 numchain = 1;
+ u32 rem = 0;
+
+ while (sgc->length) {
+ /* Get the next address/length pair */
+
+ len = (*sgc->get_phys_addr)(sgc, &addr);
+
+ if (unlikely(len == 0))
+ return false;
+
+ /* If current length is more than what's left, stop there */
+
+ if (unlikely(len > sgc->length))
+ len = sgc->length;
+
+another_entry:
+ /* Limit to a round number less than the maximum length */
+
+ if (len > PRD_LEN_MAX) {
+ /*
+ * Save the remainder of the split. whenever we limit
+ * an entry we come back around to build entries out
+ * of the leftover. We do this to prevent multiple
+ * calls to the get_phys_addr() function for an SGE
+ * that is too large.
+ */
+ rem = len - PRD_LEN_MAX;
+ len = PRD_LEN_MAX;
+ }
+
+ /* See if we need to allocate a new SGL */
+ if (sgc->sge.prd.sge_cnt == 0) {
+ if (len == sgc->length) {
+ /*
+ * We only have 1 PRD entry left.
+ * It can be placed where the chain
+ * entry would have gone
+ */
+
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(
+ PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Adjust length related fields */
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /* We use the reserved chain entry for data */
+ numchain = 0;
+
+ break;
+ }
+
+ if (sgc->sge.prd.chain) {
+ /*
+ * Fill # of entries of current SGL in previous
+ * chain the length of this current SGL may not
+ * full.
+ */
+
+ sgc->sge.prd.chain->ctl_len |= cpu_to_le32(
+ sgc->sge.prd.sgl_max_cnt);
+ }
+
+ /*
+ * If no SGls are available, return failure. The
+ * caller can call us later with the current context
+ * to pick up here.
+ */
+
+ sgl = esas2r_alloc_sgl(a);
+
+ if (unlikely(sgl == NULL))
+ return false;
+
+ /*
+ * Link the new SGL onto the chain
+ * They are in reverse order
+ */
+ list_add(&sgl->next_desc, &rq->sg_table_head);
+
+ /*
+ * An SGL was just filled in and we are starting
+ * a new SGL. Prime the chain of the ending SGL with
+ * info that points to the new SGL. The length gets
+ * filled in when the new SGL is filled or ended
+ */
+
+ sgc->sge.prd.chain = sgc->sge.prd.curr;
+
+ sgc->sge.prd.chain->ctl_len = cpu_to_le32(PRD_CHAIN);
+ sgc->sge.prd.chain->address =
+ cpu_to_le64(sgl->phys_addr);
+
+ /*
+ * Start a new segment.
+ * Take one away and save for chain SGE
+ */
+
+ sgc->sge.prd.curr =
+ (struct atto_physical_region_description *)sgl
+ ->
+ virt_addr;
+ sgc->sge.prd.sge_cnt = sgc->sge.prd.sgl_max_cnt - 1;
+ }
+
+ sgc->sge.prd.sge_cnt--;
+ /* Build the simple SGE */
+ sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len);
+ sgc->sge.prd.curr->address = cpu_to_le64(addr);
+
+ /* Used another element. Point to the next one */
+
+ sgc->sge.prd.curr++;
+
+ /* Adjust length related fields */
+
+ sgc->cur_offset += len;
+ sgc->length -= len;
+
+ /*
+ * Check if we previously split an entry. If so we have to
+ * pick up where we left off.
+ */
+
+ if (rem) {
+ addr += len;
+ len = rem;
+ rem = 0;
+ goto another_entry;
+ }
+ }
+
+ if (!list_empty(&rq->sg_table_head)) {
+ if (sgc->sge.prd.chain) {
+ sgc->sge.prd.chain->ctl_len |=
+ cpu_to_le32(sgc->sge.prd.sgl_max_cnt
+ - sgc->sge.prd.sge_cnt
+ - numchain);
+ }
+ }
+
+ return true;
+}
+
+bool esas2r_build_sg_list_prd(struct esas2r_adapter *a,
+ struct esas2r_sg_context *sgc)
+{
+ struct esas2r_request *rq = sgc->first_req;
+ u32 len = sgc->length;
+ struct esas2r_target *t = a->targetdb + rq->target_id;
+ u8 is_i_o = 0;
+ u16 reqsize;
+ struct atto_physical_region_description *curr_iblk_chn;
+ u8 *cdb = (u8 *)&rq->vrq->scsi.cdb[0];
+
+ /*
+ * extract LBA from command so we can determine
+ * the I-Block boundary
+ */
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && t->target_state == TS_PRESENT
+ && !(t->flags & TF_PASS_THRU)) {
+ u32 lbalo = 0;
+
+ switch (rq->vrq->scsi.cdb[0]) {
+ case READ_16:
+ case WRITE_16:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[9],
+ cdb[8]),
+ MAKEWORD(cdb[7],
+ cdb[6]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_12:
+ case WRITE_12:
+ case READ_10:
+ case WRITE_10:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[5],
+ cdb[4]),
+ MAKEWORD(cdb[3],
+ cdb[2]));
+ is_i_o = 1;
+ break;
+ }
+
+ case READ_6:
+ case WRITE_6:
+ {
+ lbalo =
+ MAKEDWORD(MAKEWORD(cdb[3],
+ cdb[2]),
+ MAKEWORD(cdb[1] & 0x1F,
+ 0));
+ is_i_o = 1;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (is_i_o) {
+ u32 startlba;
+
+ rq->vrq->scsi.iblk_cnt_prd = 0;
+
+ /* Determine size of 1st I-block PRD list */
+ startlba = t->inter_block - (lbalo & (t->inter_block -
+ 1));
+ sgc->length = startlba * t->block_size;
+
+ /* Chk if the 1st iblk chain starts at base of Iblock */
+ if ((lbalo & (t->inter_block - 1)) == 0)
+ rq->flags |= RF_1ST_IBLK_BASE;
+
+ if (sgc->length > len)
+ sgc->length = len;
+ } else {
+ sgc->length = len;
+ }
+ } else {
+ sgc->length = len;
+ }
+
+ /* get our starting chain address */
+
+ curr_iblk_chn =
+ (struct atto_physical_region_description *)sgc->sge.a64.curr;
+
+ sgc->sge.prd.sgl_max_cnt = sgl_page_size /
+ sizeof(struct
+ atto_physical_region_description);
+
+ /* create all of the I-block PRD lists */
+
+ while (len) {
+ sgc->sge.prd.sge_cnt = 0;
+ sgc->sge.prd.chain = NULL;
+ sgc->sge.prd.curr = curr_iblk_chn;
+
+ /* increment to next I-Block */
+
+ len -= sgc->length;
+
+ /* go build the next I-Block PRD list */
+
+ if (unlikely(!esas2r_build_prd_iblk(a, sgc)))
+ return false;
+
+ curr_iblk_chn++;
+
+ if (is_i_o) {
+ rq->vrq->scsi.iblk_cnt_prd++;
+
+ if (len > t->inter_byte)
+ sgc->length = t->inter_byte;
+ else
+ sgc->length = len;
+ }
+ }
+
+ /* figure out the size used of the VDA request */
+
+ reqsize = ((u16)((u8 *)curr_iblk_chn - (u8 *)rq->vrq))
+ / sizeof(u32);
+
+ /*
+ * only update the request size if it is bigger than what is
+ * already there. we can come in here twice for some management
+ * commands.
+ */
+
+ if (reqsize > rq->vda_req_sz)
+ rq->vda_req_sz = reqsize;
+
+ return true;
+}
+
+static void esas2r_handle_pending_reset(struct esas2r_adapter *a, u32 currtime)
+{
+ u32 delta = currtime - a->chip_init_time;
+
+ if (delta <= ESAS2R_CHPRST_WAIT_TIME) {
+ /* Wait before accessing registers */
+ } else if (delta >= ESAS2R_CHPRST_TIME) {
+ /*
+ * The last reset failed so try again. Reset
+ * processing will give up after three tries.
+ */
+ esas2r_local_reset_adapter(a);
+ } else {
+ /* We can now see if the firmware is ready */
+ u32 doorbell;
+
+ doorbell = esas2r_read_register_dword(a, MU_DOORBELL_OUT);
+ if (doorbell == 0xFFFFFFFF || !(doorbell & DRBL_FORCE_INT)) {
+ esas2r_force_interrupt(a);
+ } else {
+ u32 ver = (doorbell & DRBL_FW_VER_MSK);
+
+ /* Driver supports API version 0 and 1 */
+ esas2r_write_register_dword(a, MU_DOORBELL_OUT,
+ doorbell);
+ if (ver == DRBL_FW_VER_0) {
+ esas2r_lock_set_flags(&a->flags,
+ AF_CHPRST_DETECTED);
+ esas2r_lock_set_flags(&a->flags,
+ AF_LEGACY_SGE_MODE);
+
+ a->max_vdareq_size = 128;
+ a->build_sgl = esas2r_build_sg_list_sge;
+ } else if (ver == DRBL_FW_VER_1) {
+ esas2r_lock_set_flags(&a->flags,
+ AF_CHPRST_DETECTED);
+ esas2r_lock_clear_flags(&a->flags,
+ AF_LEGACY_SGE_MODE);
+
+ a->max_vdareq_size = 1024;
+ a->build_sgl = esas2r_build_sg_list_prd;
+ } else {
+ esas2r_local_reset_adapter(a);
+ }
+ }
+ }
+}
+
+
+/* This function must be called once per timer tick */
+void esas2r_timer_tick(struct esas2r_adapter *a)
+{
+ u32 currtime = jiffies_to_msecs(jiffies);
+ u32 deltatime = currtime - a->last_tick_time;
+
+ a->last_tick_time = currtime;
+
+ /* count down the uptime */
+ if (a->chip_uptime
+ && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+ if (deltatime >= a->chip_uptime)
+ a->chip_uptime = 0;
+ else
+ a->chip_uptime -= deltatime;
+ }
+
+ if (a->flags & AF_CHPRST_PENDING) {
+ if (!(a->flags & AF_CHPRST_NEEDED)
+ && !(a->flags & AF_CHPRST_DETECTED))
+ esas2r_handle_pending_reset(a, currtime);
+ } else {
+ if (a->flags & AF_DISC_PENDING)
+ esas2r_disc_check_complete(a);
+
+ if (a->flags & AF_HEARTBEAT_ENB) {
+ if (a->flags & AF_HEARTBEAT) {
+ if ((currtime - a->heartbeat_time) >=
+ ESAS2R_HEARTBEAT_TIME) {
+ esas2r_lock_clear_flags(&a->flags,
+ AF_HEARTBEAT);
+ esas2r_hdebug("heartbeat failed");
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "heartbeat failed");
+ esas2r_bugon();
+ esas2r_local_reset_adapter(a);
+ }
+ } else {
+ esas2r_lock_set_flags(&a->flags, AF_HEARTBEAT);
+ a->heartbeat_time = currtime;
+ esas2r_force_interrupt(a);
+ }
+ }
+ }
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+}
+
+/*
+ * Send the specified task management function to the target and LUN
+ * specified in rqaux. in addition, immediately abort any commands that
+ * are queued but not sent to the device according to the rules specified
+ * by the task management function.
+ */
+bool esas2r_send_task_mgmt(struct esas2r_adapter *a,
+ struct esas2r_request *rqaux, u8 task_mgt_func)
+{
+ u16 targetid = rqaux->target_id;
+ u8 lun = (u8)le32_to_cpu(rqaux->vrq->scsi.flags);
+ bool ret = false;
+ struct esas2r_request *rq;
+ struct list_head *next, *element;
+ unsigned long flags;
+
+ LIST_HEAD(comp_list);
+
+ esas2r_trace_enter();
+ esas2r_trace("rqaux:%p", rqaux);
+ esas2r_trace("task_mgt_func:%x", task_mgt_func);
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /* search the defer queue looking for requests for the device */
+ list_for_each_safe(element, next, &a->defer_list) {
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) { /* target reset */
+ /* Found a request affected by the task management */
+ if (rq->req_stat == RS_PENDING) {
+ /*
+ * The request is pending or waiting. We can
+ * safelycomplete the request now.
+ */
+ if (esas2r_ioreq_aborted(a, rq, RS_ABORTED))
+ list_add_tail(&rq->comp_list,
+ &comp_list);
+ }
+ }
+ }
+
+ /* Send the task management request to the firmware */
+ rqaux->sense_len = 0;
+ rqaux->vrq->scsi.length = 0;
+ rqaux->target_id = targetid;
+ rqaux->vrq->scsi.flags |= cpu_to_le32(lun);
+ memset(rqaux->vrq->scsi.cdb, 0, sizeof(rqaux->vrq->scsi.cdb));
+ rqaux->vrq->scsi.flags |=
+ cpu_to_le16(task_mgt_func * LOBIT(FCP_CMND_TM_MASK));
+
+ if (a->flags & AF_FLASHING) {
+ /* Assume success. if there are active requests, return busy */
+ rqaux->req_stat = RS_SUCCESS;
+
+ list_for_each_safe(element, next, &a->active_list) {
+ rq = list_entry(element, struct esas2r_request,
+ req_list);
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI
+ && rq->target_id == targetid
+ && (((u8)le32_to_cpu(rq->vrq->scsi.flags)) == lun
+ || task_mgt_func == 0x20)) /* target reset */
+ rqaux->req_stat = RS_BUSY;
+ }
+
+ ret = true;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (!(a->flags & AF_FLASHING))
+ esas2r_start_request(a, rqaux);
+
+ esas2r_comp_list_drain(a, &comp_list);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ esas2r_trace_exit();
+
+ return ret;
+}
+
+void esas2r_reset_bus(struct esas2r_adapter *a)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "performing a bus reset");
+
+ if (!(a->flags & AF_DEGRADED_MODE)
+ && !(a->flags & (AF_CHPRST_PENDING | AF_DISC_PENDING))) {
+ esas2r_lock_set_flags(&a->flags, AF_BUSRST_NEEDED);
+ esas2r_lock_set_flags(&a->flags, AF_BUSRST_PENDING);
+ esas2r_lock_set_flags(&a->flags, AF_OS_RESET);
+
+ esas2r_schedule_tasklet(a);
+ }
+}
+
+bool esas2r_ioreq_aborted(struct esas2r_adapter *a, struct esas2r_request *rq,
+ u8 status)
+{
+ esas2r_trace_enter();
+ esas2r_trace("rq:%p", rq);
+ list_del_init(&rq->req_list);
+ if (rq->timeout > RQ_MAX_TIMEOUT) {
+ /*
+ * The request timed out, but we could not abort it because a
+ * chip reset occurred. Return busy status.
+ */
+ rq->req_stat = RS_BUSY;
+ esas2r_trace_exit();
+ return true;
+ }
+
+ rq->req_stat = status;
+ esas2r_trace_exit();
+ return true;
+}
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
new file mode 100644
index 00000000000..f3d0cb88597
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -0,0 +1,2110 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_ioctl.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * Buffered ioctl handlers. A buffered ioctl is one which requires that we
+ * allocate a DMA-able memory area to communicate with the firmware. In
+ * order to prevent continually allocating and freeing consistent memory,
+ * we will allocate a global buffer the first time we need it and re-use
+ * it for subsequent ioctl calls that require it.
+ */
+
+u8 *esas2r_buffered_ioctl;
+dma_addr_t esas2r_buffered_ioctl_addr;
+u32 esas2r_buffered_ioctl_size;
+struct pci_dev *esas2r_buffered_ioctl_pcid;
+
+static DEFINE_SEMAPHORE(buffered_ioctl_semaphore);
+typedef int (*BUFFERED_IOCTL_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *,
+ struct esas2r_sg_context *,
+ void *);
+typedef void (*BUFFERED_IOCTL_DONE_CALLBACK)(struct esas2r_adapter *,
+ struct esas2r_request *, void *);
+
+struct esas2r_buffered_ioctl {
+ struct esas2r_adapter *a;
+ void *ioctl;
+ u32 length;
+ u32 control_code;
+ u32 offset;
+ BUFFERED_IOCTL_CALLBACK
+ callback;
+ void *context;
+ BUFFERED_IOCTL_DONE_CALLBACK
+ done_callback;
+ void *done_context;
+
+};
+
+static void complete_fm_api_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fm_api_command_done = 1;
+ wake_up_interruptible(&a->fm_api_waiter);
+}
+
+/* Callbacks for building scatter/gather lists for FM API requests */
+static u32 get_physaddr_fm_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.phys + offset;
+ return a->firmware.orig_len - offset;
+}
+
+static u32 get_physaddr_fm_api_header(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = sgc->cur_offset - a->save_offset;
+
+ (*addr) = a->firmware.header_buff_phys + offset;
+ return sizeof(struct esas2r_flash_img) - offset;
+}
+
+/* Handle EXPRESS_IOCTL_RW_FIRMWARE ioctl with img_type = FW_IMG_FM_API. */
+static void do_fm_api(struct esas2r_adapter *a, struct esas2r_flash_img *fi)
+{
+ struct esas2r_request *rq;
+
+ if (down_interruptible(&a->fm_api_semaphore)) {
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ up(&a->fm_api_semaphore);
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ if (fi == &a->firmware.header) {
+ a->firmware.header_buff = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)sizeof(
+ struct
+ esas2r_flash_img),
+ (dma_addr_t *)&a->
+ firmware.
+ header_buff_phys,
+ GFP_KERNEL);
+
+ if (a->firmware.header_buff == NULL) {
+ esas2r_debug("failed to allocate header buffer!");
+ fi->status = FI_STAT_BUSY;
+ return;
+ }
+
+ memcpy(a->firmware.header_buff, fi,
+ sizeof(struct esas2r_flash_img));
+ a->save_offset = a->firmware.header_buff;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api_header;
+ } else {
+ a->save_offset = (u8 *)fi;
+ a->fm_api_sgc.get_phys_addr =
+ (PGETPHYSADDR)get_physaddr_fm_api;
+ }
+
+ rq->comp_cb = complete_fm_api_req;
+ a->fm_api_command_done = 0;
+ a->fm_api_sgc.cur_offset = a->save_offset;
+
+ if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq,
+ &a->fm_api_sgc))
+ goto all_done;
+
+ /* Now wait around for it to complete. */
+ while (!a->fm_api_command_done)
+ wait_event_interruptible(a->fm_api_waiter,
+ a->fm_api_command_done);
+all_done:
+ if (fi == &a->firmware.header) {
+ memcpy(fi, a->firmware.header_buff,
+ sizeof(struct esas2r_flash_img));
+
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)sizeof(struct esas2r_flash_img),
+ a->firmware.header_buff,
+ (dma_addr_t)a->firmware.header_buff_phys);
+ }
+
+ up(&a->fm_api_semaphore);
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+ return;
+
+}
+
+static void complete_nvr_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->nvram_command_done = 1;
+ wake_up_interruptible(&a->nvram_waiter);
+}
+
+/* Callback for building scatter/gather lists for buffered ioctls */
+static u32 get_physaddr_buffered_ioctl(struct esas2r_sg_context *sgc,
+ u64 *addr)
+{
+ int offset = (u8 *)sgc->cur_offset - esas2r_buffered_ioctl;
+
+ (*addr) = esas2r_buffered_ioctl_addr + offset;
+ return esas2r_buffered_ioctl_size - offset;
+}
+
+static void complete_buffered_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->buffered_ioctl_done = 1;
+ wake_up_interruptible(&a->buffered_ioctl_waiter);
+}
+
+static u8 handle_buffered_ioctl(struct esas2r_buffered_ioctl *bi)
+{
+ struct esas2r_adapter *a = bi->a;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ u8 result = IOCTL_SUCCESS;
+
+ if (down_interruptible(&buffered_ioctl_semaphore))
+ return IOCTL_OUT_OF_RESOURCES;
+
+ /* allocate a buffer or use the existing buffer. */
+ if (esas2r_buffered_ioctl) {
+ if (esas2r_buffered_ioctl_size < bi->length) {
+ /* free the too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)esas2r_buffered_ioctl_size,
+ esas2r_buffered_ioctl,
+ esas2r_buffered_ioctl_addr);
+
+ goto allocate_buffer;
+ }
+ } else {
+allocate_buffer:
+ esas2r_buffered_ioctl_size = bi->length;
+ esas2r_buffered_ioctl_pcid = a->pcid;
+ esas2r_buffered_ioctl = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ esas2r_buffered_ioctl_size,
+ &
+ esas2r_buffered_ioctl_addr,
+ GFP_KERNEL);
+ }
+
+ if (!esas2r_buffered_ioctl) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate %d bytes of consistent memory "
+ "for a buffered ioctl!",
+ bi->length);
+
+ esas2r_debug("buffered ioctl alloc failure");
+ result = IOCTL_OUT_OF_RESOURCES;
+ goto exit_cleanly;
+ }
+
+ memcpy(esas2r_buffered_ioctl, bi->ioctl, bi->length);
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "could not allocate an internal request");
+
+ result = IOCTL_OUT_OF_RESOURCES;
+ esas2r_debug("buffered ioctl - no requests");
+ goto exit_cleanly;
+ }
+
+ a->buffered_ioctl_done = 0;
+ rq->comp_cb = complete_buffered_ioctl_req;
+ sgc.cur_offset = esas2r_buffered_ioctl + bi->offset;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_buffered_ioctl;
+ sgc.length = esas2r_buffered_ioctl_size;
+
+ if (!(*bi->callback)(a, rq, &sgc, bi->context)) {
+ /* completed immediately, no need to wait */
+ a->buffered_ioctl_done = 0;
+ goto free_andexit_cleanly;
+ }
+
+ /* now wait around for it to complete. */
+ while (!a->buffered_ioctl_done)
+ wait_event_interruptible(a->buffered_ioctl_waiter,
+ a->buffered_ioctl_done);
+
+free_andexit_cleanly:
+ if (result == IOCTL_SUCCESS && bi->done_callback)
+ (*bi->done_callback)(a, rq, bi->done_context);
+
+ esas2r_free_request(a, rq);
+
+exit_cleanly:
+ if (result == IOCTL_SUCCESS)
+ memcpy(bi->ioctl, esas2r_buffered_ioctl, bi->length);
+
+ up(&buffered_ioctl_semaphore);
+ return result;
+}
+
+/* SMP ioctl support */
+static int smp_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_ioctl_smp *si =
+ (struct atto_ioctl_smp *)esas2r_buffered_ioctl;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_SMP);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ si->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static u8 handle_smp_ioctl(struct esas2r_adapter *a, struct atto_ioctl_smp *si)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = si;
+ bi.length = sizeof(struct atto_ioctl_smp)
+ + le32_to_cpu(si->req_length)
+ + le32_to_cpu(si->rsp_length);
+ bi.offset = 0;
+ bi.callback = smp_ioctl_callback;
+ return handle_buffered_ioctl(&bi);
+}
+
+
+/* CSMI ioctl support */
+static void esas2r_csmi_ioctl_tunnel_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ rq->target_id = le16_to_cpu(rq->func_rsp.ioctl_rsp.csmi.target_id);
+ rq->vrq->scsi.flags |= cpu_to_le32(rq->func_rsp.ioctl_rsp.csmi.lun);
+
+ /* Now call the original completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+/* Tunnel a CSMI IOCTL to the back end driver for processing. */
+static bool csmi_ioctl_tunnel(struct esas2r_adapter *a,
+ union atto_ioctl_csmi *ci,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ u32 ctrl_code,
+ u16 target_id)
+{
+ struct atto_vda_ioctl_req *ioctl = &rq->vrq->ioctl;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return false;
+
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_CSMI);
+ ioctl->csmi.ctrl_code = cpu_to_le32(ctrl_code);
+ ioctl->csmi.target_id = cpu_to_le16(target_id);
+ ioctl->csmi.lun = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+
+ /*
+ * Always usurp the completion callback since the interrupt callback
+ * mechanism may be used.
+ */
+ rq->aux_req_cx = ci;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = esas2r_csmi_ioctl_tunnel_comp_cb;
+
+ if (!esas2r_build_sg_list(a, rq, sgc))
+ return false;
+
+ esas2r_start_request(a, rq);
+ return true;
+}
+
+static bool check_lun(struct scsi_lun lun)
+{
+ bool result;
+
+ result = ((lun.scsi_lun[7] == 0) &&
+ (lun.scsi_lun[6] == 0) &&
+ (lun.scsi_lun[5] == 0) &&
+ (lun.scsi_lun[4] == 0) &&
+ (lun.scsi_lun[3] == 0) &&
+ (lun.scsi_lun[2] == 0) &&
+/* Byte 1 is intentionally skipped */
+ (lun.scsi_lun[0] == 0));
+
+ return result;
+}
+
+static int csmi_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+ u8 path = 0;
+ u8 tid = 0;
+ u8 lun = 0;
+ u32 sts = CSMI_STS_SUCCESS;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ if (ci->control_code == CSMI_CC_GET_DEV_ADDR) {
+ struct atto_csmi_get_dev_addr *gda = &ci->data.dev_addr;
+
+ path = gda->path_id;
+ tid = gda->target_id;
+ lun = gda->lun;
+ } else if (ci->control_code == CSMI_CC_TASK_MGT) {
+ struct atto_csmi_task_mgmt *tm = &ci->data.tsk_mgt;
+
+ path = tm->path_id;
+ tid = tm->target_id;
+ lun = tm->lun;
+ }
+
+ if (path > 0 || tid > ESAS2R_MAX_ID) {
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(
+ CSMI_STS_INV_PARAM);
+ return false;
+ }
+
+ rq->target_id = tid;
+ rq->vrq->scsi.flags |= cpu_to_le32(lun);
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi = &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->description, esas2r_get_model_name(a));
+ gdi->csmi_major_rev = CSMI_MAJOR_REV;
+ gdi->csmi_minor_rev = CSMI_MINOR_REV;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_CFG:
+ {
+ struct atto_csmi_get_cntlr_cfg *gcc = &ioctl_csmi->cntlr_cfg;
+
+ gcc->base_io_addr = 0;
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_2,
+ &gcc->base_memaddr_lo);
+ pci_read_config_dword(a->pcid, PCI_BASE_ADDRESS_3,
+ &gcc->base_memaddr_hi);
+ gcc->board_id = MAKEDWORD(a->pcid->subsystem_device,
+ a->pcid->subsystem_vendor);
+ gcc->slot_num = CSMI_SLOT_NUM_UNKNOWN;
+ gcc->cntlr_class = CSMI_CNTLR_CLASS_HBA;
+ gcc->io_bus_type = CSMI_BUS_TYPE_PCI;
+ gcc->pci_addr.bus_num = a->pcid->bus->number;
+ gcc->pci_addr.device_num = PCI_SLOT(a->pcid->devfn);
+ gcc->pci_addr.function_num = PCI_FUNC(a->pcid->devfn);
+
+ memset(gcc->serial_num, 0, sizeof(gcc->serial_num));
+
+ gcc->major_rev = LOBYTE(LOWORD(a->fw_version));
+ gcc->minor_rev = HIBYTE(LOWORD(a->fw_version));
+ gcc->build_rev = LOBYTE(HIWORD(a->fw_version));
+ gcc->release_rev = HIBYTE(HIWORD(a->fw_version));
+ gcc->bios_major_rev = HIBYTE(HIWORD(a->flash_ver));
+ gcc->bios_minor_rev = LOBYTE(HIWORD(a->flash_ver));
+ gcc->bios_build_rev = LOWORD(a->flash_ver);
+
+ if (a->flags2 & AF2_THUNDERLINK)
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_HBA
+ | CSMI_CNTLRF_SATA_HBA;
+ else
+ gcc->cntlr_flags = CSMI_CNTLRF_SAS_RAID
+ | CSMI_CNTLRF_SATA_RAID;
+
+ gcc->rrom_major_rev = 0;
+ gcc->rrom_minor_rev = 0;
+ gcc->rrom_build_rev = 0;
+ gcc->rrom_release_rev = 0;
+ gcc->rrom_biosmajor_rev = 0;
+ gcc->rrom_biosminor_rev = 0;
+ gcc->rrom_biosbuild_rev = 0;
+ gcc->rrom_biosrelease_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_CNTLR_STS:
+ {
+ struct atto_csmi_get_cntlr_sts *gcs = &ioctl_csmi->cntlr_sts;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ gcs->status = CSMI_CNTLR_STS_FAILED;
+ else
+ gcs->status = CSMI_CNTLR_STS_GOOD;
+
+ gcs->offline_reason = CSMI_OFFLINE_NO_REASON;
+ break;
+ }
+
+ case CSMI_CC_FW_DOWNLOAD:
+ case CSMI_CC_GET_RAID_INFO:
+ case CSMI_CC_GET_RAID_CFG:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+
+ case CSMI_CC_SMP_PASSTHRU:
+ case CSMI_CC_SSP_PASSTHRU:
+ case CSMI_CC_STP_PASSTHRU:
+ case CSMI_CC_GET_PHY_INFO:
+ case CSMI_CC_SET_PHY_INFO:
+ case CSMI_CC_GET_LINK_ERRORS:
+ case CSMI_CC_GET_SATA_SIG:
+ case CSMI_CC_GET_CONN_INFO:
+ case CSMI_CC_PHY_CTRL:
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ ESAS2R_TARG_ID_INV)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ struct scsi_lun lun;
+
+ memcpy(&lun, gsa->sas_lun, sizeof(struct scsi_lun));
+
+ if (!check_lun(lun)) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ /* make sure the device is present */
+ spin_lock_irqsave(&a->mem_lock, flags);
+ t = esas2r_targ_db_find_by_sas_addr(a, (u64 *)gsa->sas_addr);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (t == NULL) {
+ sts = CSMI_STS_NO_SCSI_ADDR;
+ break;
+ }
+
+ gsa->host_index = 0xFF;
+ gsa->lun = gsa->sas_lun[1];
+ rq->target_id = esas2r_targ_get_id(t, a);
+ break;
+ }
+
+ case CSMI_CC_GET_DEV_ADDR:
+ {
+ struct atto_csmi_get_dev_addr *gda = &ioctl_csmi->dev_addr;
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || t->sas_addr == 0) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ /* fill in the result */
+ *(u64 *)gda->sas_addr = t->sas_addr;
+ memset(gda->sas_lun, 0, sizeof(gda->sas_lun));
+ gda->sas_lun[1] = (u8)le32_to_cpu(rq->vrq->scsi.flags);
+ break;
+ }
+
+ case CSMI_CC_TASK_MGT:
+
+ /* make sure the target is present */
+ t = a->targetdb + rq->target_id;
+
+ if (t >= a->targetdb_end
+ || t->target_state != TS_PRESENT
+ || !(t->flags & TF_PASS_THRU)) {
+ sts = CSMI_STS_NO_DEV_ADDR;
+ break;
+ }
+
+ if (!csmi_ioctl_tunnel(a, ioctl_csmi, rq, sgc,
+ ci->control_code,
+ t->phys_targ_id)) {
+ sts = CSMI_STS_FAILED;
+ break;
+ }
+
+ return true;
+
+ default:
+
+ sts = CSMI_STS_BAD_CTRL_CODE;
+ break;
+ }
+
+ rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32(sts);
+
+ return false;
+}
+
+
+static void csmi_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_csmi *ci = (struct atto_csmi *)context;
+ union atto_ioctl_csmi *ioctl_csmi =
+ (union atto_ioctl_csmi *)esas2r_buffered_ioctl;
+
+ switch (ci->control_code) {
+ case CSMI_CC_GET_DRVR_INFO:
+ {
+ struct atto_csmi_get_driver_info *gdi =
+ &ioctl_csmi->drvr_info;
+
+ strcpy(gdi->name, ESAS2R_VERSION_STR);
+
+ gdi->major_rev = ESAS2R_MAJOR_REV;
+ gdi->minor_rev = ESAS2R_MINOR_REV;
+ gdi->build_rev = 0;
+ gdi->release_rev = 0;
+ break;
+ }
+
+ case CSMI_CC_GET_SCSI_ADDR:
+ {
+ struct atto_csmi_get_scsi_addr *gsa = &ioctl_csmi->scsi_addr;
+
+ if (le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status) ==
+ CSMI_STS_SUCCESS) {
+ gsa->target_id = rq->target_id;
+ gsa->path_id = 0;
+ }
+
+ break;
+ }
+ }
+
+ ci->status = le32_to_cpu(rq->func_rsp.ioctl_rsp.csmi.csmi_status);
+}
+
+
+static u8 handle_csmi_ioctl(struct esas2r_adapter *a, struct atto_csmi *ci)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = &ci->data;
+ bi.length = sizeof(union atto_ioctl_csmi);
+ bi.offset = 0;
+ bi.callback = csmi_ioctl_callback;
+ bi.context = ci;
+ bi.done_callback = csmi_ioctl_done_callback;
+ bi.done_context = ci;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+/* ATTO HBA ioctl support */
+
+/* Tunnel an ATTO HBA IOCTL to the back end driver for processing. */
+static bool hba_ioctl_tunnel(struct esas2r_adapter *a,
+ struct atto_ioctl *hi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ esas2r_sgc_init(sgc, a, rq, rq->vrq->ioctl.sge);
+
+ esas2r_build_ioctl_req(a, rq, sgc->length, VDA_IOCTL_HBA);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+
+ return false;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void scsi_passthru_comp_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)rq->aux_req_cx;
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ u8 sts = ATTO_SPT_RS_FAILED;
+
+ spt->scsi_status = rq->func_rsp.scsi_rsp.scsi_stat;
+ spt->sense_length = rq->sense_len;
+ spt->residual_length =
+ le32_to_cpu(rq->func_rsp.scsi_rsp.residual_length);
+
+ switch (rq->req_stat) {
+ case RS_SUCCESS:
+ case RS_SCSI_ERROR:
+ sts = ATTO_SPT_RS_SUCCESS;
+ break;
+ case RS_UNDERRUN:
+ sts = ATTO_SPT_RS_UNDERRUN;
+ break;
+ case RS_OVERRUN:
+ sts = ATTO_SPT_RS_OVERRUN;
+ break;
+ case RS_SEL:
+ case RS_SEL2:
+ sts = ATTO_SPT_RS_NO_DEVICE;
+ break;
+ case RS_NO_LUN:
+ sts = ATTO_SPT_RS_NO_LUN;
+ break;
+ case RS_TIMEOUT:
+ sts = ATTO_SPT_RS_TIMEOUT;
+ break;
+ case RS_DEGRADED:
+ sts = ATTO_SPT_RS_DEGRADED;
+ break;
+ case RS_BUSY:
+ sts = ATTO_SPT_RS_BUSY;
+ break;
+ case RS_ABORTED:
+ sts = ATTO_SPT_RS_ABORTED;
+ break;
+ case RS_RESET:
+ sts = ATTO_SPT_RS_BUS_RESET;
+ break;
+ }
+
+ spt->req_status = sts;
+
+ /* Update the target ID to the next one present. */
+ spt->target_id =
+ esas2r_targ_db_find_next_present(a, (u16)spt->target_id);
+
+ /* Done, call the completion callback. */
+ (*rq->aux_req_cb)(a, rq);
+}
+
+static int hba_ioctl_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc,
+ void *context)
+{
+ struct atto_ioctl *hi = (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ hi->status = ATTO_STS_SUCCESS;
+
+ switch (hi->function) {
+ case ATTO_FUNC_GET_ADAP_INFO:
+ {
+ u8 *class_code = (u8 *)&a->pcid->class;
+
+ struct atto_hba_get_adapter_info *gai =
+ &hi->data.get_adap_info;
+ int pcie_cap_reg;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_INFO0;
+ break;
+ }
+
+ memset(gai, 0, sizeof(*gai));
+
+ gai->pci.vendor_id = a->pcid->vendor;
+ gai->pci.device_id = a->pcid->device;
+ gai->pci.ss_vendor_id = a->pcid->subsystem_vendor;
+ gai->pci.ss_device_id = a->pcid->subsystem_device;
+ gai->pci.class_code[0] = class_code[0];
+ gai->pci.class_code[1] = class_code[1];
+ gai->pci.class_code[2] = class_code[2];
+ gai->pci.rev_id = a->pcid->revision;
+ gai->pci.bus_num = a->pcid->bus->number;
+ gai->pci.dev_num = PCI_SLOT(a->pcid->devfn);
+ gai->pci.func_num = PCI_FUNC(a->pcid->devfn);
+
+ pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP);
+ if (pcie_cap_reg) {
+ u16 stat;
+ u32 caps;
+
+ pci_read_config_word(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKSTA,
+ &stat);
+ pci_read_config_dword(a->pcid,
+ pcie_cap_reg + PCI_EXP_LNKCAP,
+ &caps);
+
+ gai->pci.link_speed_curr =
+ (u8)(stat & PCI_EXP_LNKSTA_CLS);
+ gai->pci.link_speed_max =
+ (u8)(caps & PCI_EXP_LNKCAP_SLS);
+ gai->pci.link_width_curr =
+ (u8)((stat & PCI_EXP_LNKSTA_NLW)
+ >> PCI_EXP_LNKSTA_NLW_SHIFT);
+ gai->pci.link_width_max =
+ (u8)((caps & PCI_EXP_LNKCAP_MLW)
+ >> 4);
+ }
+
+ gai->pci.msi_vector_cnt = 1;
+
+ if (a->pcid->msix_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSIX;
+ else if (a->pcid->msi_enabled)
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_MSI;
+ else
+ gai->pci.interrupt_mode = ATTO_GAI_PCIIM_LEGACY;
+
+ gai->adap_type = ATTO_GAI_AT_ESASRAID2;
+
+ if (a->flags2 & AF2_THUNDERLINK)
+ gai->adap_type = ATTO_GAI_AT_TLSASHBA;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ gai->adap_flags |= ATTO_GAI_AF_DEGRADED;
+
+ gai->adap_flags |= ATTO_GAI_AF_SPT_SUPP |
+ ATTO_GAI_AF_DEVADDR_SUPP;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R60F
+ || a->pcid->subsystem_device == ATTO_ESAS_R608
+ || a->pcid->subsystem_device == ATTO_ESAS_R644
+ || a->pcid->subsystem_device == ATTO_TSSC_3808E)
+ gai->adap_flags |= ATTO_GAI_AF_VIRT_SES;
+
+ gai->num_ports = ESAS2R_NUM_PHYS;
+ gai->num_phys = ESAS2R_NUM_PHYS;
+
+ strcpy(gai->firmware_rev, a->fw_rev);
+ strcpy(gai->flash_rev, a->flash_rev);
+ strcpy(gai->model_name_short, esas2r_get_model_name_short(a));
+ strcpy(gai->model_name, esas2r_get_model_name(a));
+
+ gai->num_targets = ESAS2R_MAX_TARGETS;
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = gai->num_targets;
+ gai->num_lunsper_targ = 256;
+
+ if (a->pcid->subsystem_device == ATTO_ESAS_R6F0
+ || a->pcid->subsystem_device == ATTO_ESAS_R60F)
+ gai->num_connectors = 4;
+ else
+ gai->num_connectors = 2;
+
+ gai->adap_flags2 |= ATTO_GAI_AF2_ADAP_CTRL_SUPP;
+
+ gai->num_targets_backend = a->num_targets_backend;
+
+ gai->tunnel_flags = a->ioctl_tunnel
+ & (ATTO_GAI_TF_MEM_RW
+ | ATTO_GAI_TF_TRACE
+ | ATTO_GAI_TF_SCSI_PASS_THRU
+ | ATTO_GAI_TF_GET_DEV_ADDR
+ | ATTO_GAI_TF_PHY_CTRL
+ | ATTO_GAI_TF_CONN_CTRL
+ | ATTO_GAI_TF_GET_DEV_INFO);
+ break;
+ }
+
+ case ATTO_FUNC_GET_ADAP_ADDR:
+ {
+ struct atto_hba_get_adapter_address *gaa =
+ &hi->data.get_adap_addr;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_ADAP_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_ADAP_ADDR0;
+ } else if (gaa->addr_type == ATTO_GAA_AT_PORT
+ || gaa->addr_type == ATTO_GAA_AT_NODE) {
+ if (gaa->addr_type == ATTO_GAA_AT_PORT
+ && gaa->port_id >= ESAS2R_NUM_PHYS) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ memcpy((u64 *)gaa->address,
+ &a->nvram->sas_addr[0], sizeof(u64));
+ gaa->addr_len = sizeof(u64);
+ }
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_MEM_RW:
+ {
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+
+ break;
+ }
+
+ case ATTO_FUNC_TRACE:
+ {
+ struct atto_hba_trace *trc = &hi->data.trace;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_TRACE1) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_TRACE1;
+ break;
+ }
+
+ if (trc->trace_type == ATTO_TRC_TT_FWCOREDUMP
+ && hi->version >= ATTO_VER_TRACE1) {
+ if (trc->trace_func == ATTO_TRC_TF_UPLOAD) {
+ u32 len = hi->data_length;
+ u32 offset = trc->current_offset;
+ u32 total_len = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Size is zero if a core dump isn't present */
+ if (!(a->flags2 & AF2_COREDUMP_SAVED))
+ total_len = 0;
+
+ if (len > total_len)
+ len = total_len;
+
+ if (offset >= total_len
+ || offset + len > total_len
+ || len == 0) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ memcpy(trc + 1,
+ a->fw_coredump_buff + offset,
+ len);
+
+ hi->data_length = len;
+ } else if (trc->trace_func == ATTO_TRC_TF_RESET) {
+ memset(a->fw_coredump_buff, 0,
+ ESAS2R_FWCOREDUMP_SZ);
+
+ esas2r_lock_clear_flags(&a->flags2,
+ AF2_COREDUMP_SAVED);
+ } else if (trc->trace_func != ATTO_TRC_TF_GET_INFO) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ /* Always return all the info we can. */
+ trc->trace_mask = 0;
+ trc->current_offset = 0;
+ trc->total_length = ESAS2R_FWCOREDUMP_SZ;
+
+ /* Return zero length buffer if core dump not present */
+ if (!(a->flags2 & AF2_COREDUMP_SAVED))
+ trc->total_length = 0;
+ } else {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ }
+
+ break;
+ }
+
+ case ATTO_FUNC_SCSI_PASS_THRU:
+ {
+ struct atto_hba_scsi_pass_thru *spt = &hi->data.scsi_pass_thru;
+ struct scsi_lun lun;
+
+ memcpy(&lun, spt->lun, sizeof(struct scsi_lun));
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_SCSI_PASS_THRU0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_SCSI_PASS_THRU0;
+ break;
+ }
+
+ if (spt->target_id >= ESAS2R_MAX_TARGETS || !check_lun(lun)) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ esas2r_sgc_init(sgc, a, rq, NULL);
+
+ sgc->length = hi->data_length;
+ sgc->cur_offset += offsetof(struct atto_ioctl, data.byte)
+ + sizeof(struct atto_hba_scsi_pass_thru);
+
+ /* Finish request initialization */
+ rq->target_id = (u16)spt->target_id;
+ rq->vrq->scsi.flags |= cpu_to_le32(spt->lun[1]);
+ memcpy(rq->vrq->scsi.cdb, spt->cdb, 16);
+ rq->vrq->scsi.length = cpu_to_le32(hi->data_length);
+ rq->sense_len = spt->sense_length;
+ rq->sense_buf = (u8 *)spt->sense_data;
+ /* NOTE: we ignore spt->timeout */
+
+ /*
+ * always usurp the completion callback since the interrupt
+ * callback mechanism may be used.
+ */
+
+ rq->aux_req_cx = hi;
+ rq->aux_req_cb = rq->comp_cb;
+ rq->comp_cb = scsi_passthru_comp_cb;
+
+ if (spt->flags & ATTO_SPTF_DATA_IN) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ } else if (spt->flags & ATTO_SPTF_DATA_OUT) {
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ } else {
+ if (sgc->length) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+ }
+
+ if (spt->flags & ATTO_SPTF_ORDERED_Q)
+ rq->vrq->scsi.flags |=
+ cpu_to_le32(FCP_CMND_TA_ORDRD_Q);
+ else if (spt->flags & ATTO_SPTF_HEAD_OF_Q)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_TA_HEAD_Q);
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ hi->status = ATTO_STS_OUT_OF_RSRC;
+ break;
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+ }
+
+ case ATTO_FUNC_GET_DEV_ADDR:
+ {
+ struct atto_hba_get_device_address *gda =
+ &hi->data.get_dev_addr;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_ADDR0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_ADDR0;
+ break;
+ }
+
+ if (gda->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gda->target_id;
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ } else if (gda->addr_type == ATTO_GDA_AT_PORT) {
+ if (t->sas_addr == 0) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ } else {
+ *(u64 *)gda->address = t->sas_addr;
+
+ gda->addr_len = sizeof(u64);
+ }
+ } else if (gda->addr_type == ATTO_GDA_AT_NODE) {
+ hi->status = ATTO_STS_NOT_APPL;
+ } else {
+ hi->status = ATTO_STS_INV_PARAM;
+ }
+
+ /* update the target ID to the next one present. */
+
+ gda->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gda->target_id);
+ break;
+ }
+
+ case ATTO_FUNC_PHY_CTRL:
+ case ATTO_FUNC_CONN_CTRL:
+ {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ case ATTO_FUNC_ADAP_CTRL:
+ {
+ struct atto_hba_adap_ctrl *ac = &hi->data.adap_ctrl;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (hi->version > ATTO_VER_ADAP_CTRL0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_ADAP_CTRL0;
+ break;
+ }
+
+ if (ac->adap_func == ATTO_AC_AF_HARD_RST) {
+ esas2r_reset_adapter(a);
+ } else if (ac->adap_func != ATTO_AC_AF_GET_STATE) {
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ if (a->flags & AF_CHPRST_NEEDED)
+ ac->adap_state = ATTO_AC_AS_RST_SCHED;
+ else if (a->flags & AF_CHPRST_PENDING)
+ ac->adap_state = ATTO_AC_AS_RST_IN_PROG;
+ else if (a->flags & AF_DISC_PENDING)
+ ac->adap_state = ATTO_AC_AS_RST_DISC;
+ else if (a->flags & AF_DISABLED)
+ ac->adap_state = ATTO_AC_AS_DISABLED;
+ else if (a->flags & AF_DEGRADED_MODE)
+ ac->adap_state = ATTO_AC_AS_DEGRADED;
+ else
+ ac->adap_state = ATTO_AC_AS_OK;
+
+ break;
+ }
+
+ case ATTO_FUNC_GET_DEV_INFO:
+ {
+ struct atto_hba_get_device_info *gdi = &hi->data.get_dev_info;
+ struct esas2r_target *t;
+
+ if (hi->flags & HBAF_TUNNEL) {
+ if (hba_ioctl_tunnel(a, hi, rq, sgc))
+ return true;
+
+ break;
+ }
+
+ if (hi->version > ATTO_VER_GET_DEV_INFO0) {
+ hi->status = ATTO_STS_INV_VERSION;
+ hi->version = ATTO_VER_GET_DEV_INFO0;
+ break;
+ }
+
+ if (gdi->target_id >= ESAS2R_MAX_TARGETS) {
+ hi->status = ATTO_STS_INV_PARAM;
+ break;
+ }
+
+ t = a->targetdb + (u16)gdi->target_id;
+
+ /* update the target ID to the next one present. */
+
+ gdi->target_id =
+ esas2r_targ_db_find_next_present(a,
+ (u16)gdi->target_id);
+
+ if (t->target_state != TS_PRESENT) {
+ hi->status = ATTO_STS_FAILED;
+ break;
+ }
+
+ hi->status = ATTO_STS_UNSUPPORTED;
+ break;
+ }
+
+ default:
+
+ hi->status = ATTO_STS_INV_FUNC;
+ break;
+ }
+
+ return false;
+}
+
+static void hba_ioctl_done_callback(struct esas2r_adapter *a,
+ struct esas2r_request *rq, void *context)
+{
+ struct atto_ioctl *ioctl_hba =
+ (struct atto_ioctl *)esas2r_buffered_ioctl;
+
+ esas2r_debug("hba_ioctl_done_callback %d", a->index);
+
+ if (ioctl_hba->function == ATTO_FUNC_GET_ADAP_INFO) {
+ struct atto_hba_get_adapter_info *gai =
+ &ioctl_hba->data.get_adap_info;
+
+ esas2r_debug("ATTO_FUNC_GET_ADAP_INFO");
+
+ gai->drvr_rev_major = ESAS2R_MAJOR_REV;
+ gai->drvr_rev_minor = ESAS2R_MINOR_REV;
+
+ strcpy(gai->drvr_rev_ascii, ESAS2R_VERSION_STR);
+ strcpy(gai->drvr_name, ESAS2R_DRVR_NAME);
+
+ gai->num_busses = 1;
+ gai->num_targsper_bus = ESAS2R_MAX_ID + 1;
+ gai->num_lunsper_targ = 1;
+ }
+}
+
+u8 handle_hba_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl *ioctl_hba)
+{
+ struct esas2r_buffered_ioctl bi;
+
+ memset(&bi, 0, sizeof(bi));
+
+ bi.a = a;
+ bi.ioctl = ioctl_hba;
+ bi.length = sizeof(struct atto_ioctl) + ioctl_hba->data_length;
+ bi.callback = hba_ioctl_callback;
+ bi.context = NULL;
+ bi.done_callback = hba_ioctl_done_callback;
+ bi.done_context = NULL;
+ bi.offset = 0;
+
+ return handle_buffered_ioctl(&bi);
+}
+
+
+int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
+ struct esas2r_sas_nvram *data)
+{
+ int result = 0;
+
+ a->nvram_command_done = 0;
+ rq->comp_cb = complete_nvr_req;
+
+ if (esas2r_nvram_write(a, rq, data)) {
+ /* now wait around for it to complete. */
+ while (!a->nvram_command_done)
+ wait_event_interruptible(a->nvram_waiter,
+ a->nvram_command_done);
+ ;
+
+ /* done, check the status. */
+ if (rq->req_stat == RS_SUCCESS)
+ result = 1;
+ }
+ return result;
+}
+
+
+/* This function only cares about ATTO-specific ioctls (atto_express_ioctl) */
+int esas2r_ioctl_handler(void *hostdata, int cmd, void __user *arg)
+{
+ struct atto_express_ioctl *ioctl = NULL;
+ struct esas2r_adapter *a;
+ struct esas2r_request *rq;
+ u16 code;
+ int err;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "ioctl (%p, %x, %p)", hostdata, cmd, arg);
+
+ if ((arg == NULL)
+ || (cmd < EXPRESS_IOCTL_MIN)
+ || (cmd > EXPRESS_IOCTL_MAX))
+ return -ENOTSUPP;
+
+ if (!access_ok(VERIFY_WRITE, arg, sizeof(struct atto_express_ioctl))) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler access_ok failed for cmd %d, "
+ "address %p", cmd,
+ arg);
+ return -EFAULT;
+ }
+
+ /* allocate a kernel memory buffer for the IOCTL data */
+ ioctl = kzalloc(sizeof(struct atto_express_ioctl), GFP_KERNEL);
+ if (ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler kzalloc failed for %d bytes",
+ sizeof(struct atto_express_ioctl));
+ return -ENOMEM;
+ }
+
+ err = __copy_from_user(ioctl, arg, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "copy_from_user didn't copy everything (err %d, cmd %d)",
+ err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ /* verify the signature */
+
+ if (memcmp(ioctl->header.signature,
+ EXPRESS_IOCTL_SIGNATURE,
+ EXPRESS_IOCTL_SIGNATURE_SIZE) != 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "invalid signature");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+
+ /* assume success */
+
+ ioctl->header.return_code = IOCTL_SUCCESS;
+ err = 0;
+
+ /*
+ * handle EXPRESS_IOCTL_GET_CHANNELS
+ * without paying attention to channel
+ */
+
+ if (cmd == EXPRESS_IOCTL_GET_CHANNELS) {
+ int i = 0, k = 0;
+
+ ioctl->data.chanlist.num_channels = 0;
+
+ while (i < MAX_ADAPTERS) {
+ if (esas2r_adapters[i]) {
+ ioctl->data.chanlist.num_channels++;
+ ioctl->data.chanlist.channel[k] = i;
+ k++;
+ }
+ i++;
+ }
+
+ goto ioctl_done;
+ }
+
+ /* get the channel */
+
+ if (ioctl->header.channel == 0xFF) {
+ a = (struct esas2r_adapter *)hostdata;
+ } else {
+ a = esas2r_adapters[ioctl->header.channel];
+ if (ioctl->header.channel >= MAX_ADAPTERS || (a == NULL)) {
+ ioctl->header.return_code = IOCTL_BAD_CHANNEL;
+ esas2r_log(ESAS2R_LOG_WARN, "bad channel value");
+ kfree(ioctl);
+
+ return -ENOTSUPP;
+ }
+ }
+
+ switch (cmd) {
+ case EXPRESS_IOCTL_RW_FIRMWARE:
+
+ if (ioctl->data.fwrw.img_type == FW_IMG_FM_API) {
+ err = esas2r_write_fw(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fw(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else if (ioctl->data.fwrw.img_type == FW_IMG_FS_API) {
+ err = esas2r_write_fs(a,
+ (char *)ioctl->data.fwrw.image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+
+ if (err >= 0) {
+ err = esas2r_read_fs(a,
+ (char *)ioctl->data.fwrw.
+ image,
+ 0,
+ sizeof(struct
+ atto_express_ioctl));
+ }
+ } else {
+ ioctl->header.return_code = IOCTL_BAD_FLASH_IMGTYPE;
+ }
+
+ break;
+
+ case EXPRESS_IOCTL_READ_PARAMS:
+
+ memcpy(ioctl->data.prw.data_buffer, a->nvram,
+ sizeof(struct esas2r_sas_nvram));
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_WRITE_PARAMS:
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ up(&a->nvram_semaphore);
+ ioctl->data.prw.code = 0;
+ break;
+ }
+
+ code = esas2r_write_params(a, rq,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = code;
+
+ esas2r_free_request(a, rq);
+
+ break;
+
+ case EXPRESS_IOCTL_DEFAULT_PARAMS:
+
+ esas2r_nvram_get_defaults(a,
+ (struct esas2r_sas_nvram *)ioctl->data.prw.data_buffer);
+ ioctl->data.prw.code = 1;
+ break;
+
+ case EXPRESS_IOCTL_CHAN_INFO:
+
+ ioctl->data.chaninfo.major_rev = ESAS2R_MAJOR_REV;
+ ioctl->data.chaninfo.minor_rev = ESAS2R_MINOR_REV;
+ ioctl->data.chaninfo.IRQ = a->pcid->irq;
+ ioctl->data.chaninfo.device_id = a->pcid->device;
+ ioctl->data.chaninfo.vendor_id = a->pcid->vendor;
+ ioctl->data.chaninfo.ven_dev_id = a->pcid->subsystem_device;
+ ioctl->data.chaninfo.revision_id = a->pcid->revision;
+ ioctl->data.chaninfo.pci_bus = a->pcid->bus->number;
+ ioctl->data.chaninfo.pci_dev_func = a->pcid->devfn;
+ ioctl->data.chaninfo.core_rev = 0;
+ ioctl->data.chaninfo.host_no = a->host->host_no;
+ ioctl->data.chaninfo.hbaapi_rev = 0;
+ break;
+
+ case EXPRESS_IOCTL_SMP:
+ ioctl->header.return_code = handle_smp_ioctl(a,
+ &ioctl->data.
+ ioctl_smp);
+ break;
+
+ case EXPRESS_CSMI:
+ ioctl->header.return_code =
+ handle_csmi_ioctl(a, &ioctl->data.csmi);
+ break;
+
+ case EXPRESS_IOCTL_HBA:
+ ioctl->header.return_code = handle_hba_ioctl(a,
+ &ioctl->data.
+ ioctl_hba);
+ break;
+
+ case EXPRESS_IOCTL_VDA:
+ err = esas2r_write_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+
+ if (err >= 0) {
+ err = esas2r_read_vda(a,
+ (char *)&ioctl->data.ioctl_vda,
+ 0,
+ sizeof(struct atto_ioctl_vda) +
+ ioctl->data.ioctl_vda.data_length);
+ }
+
+
+
+
+ break;
+
+ case EXPRESS_IOCTL_GET_MOD_INFO:
+
+ ioctl->data.modinfo.adapter = a;
+ ioctl->data.modinfo.pci_dev = a->pcid;
+ ioctl->data.modinfo.scsi_host = a->host;
+ ioctl->data.modinfo.host_no = a->host->host_no;
+
+ break;
+
+ default:
+ esas2r_debug("esas2r_ioctl invalid cmd %p!", cmd);
+ ioctl->header.return_code = IOCTL_ERR_INVCMD;
+ }
+
+ioctl_done:
+
+ if (err < 0) {
+ esas2r_log(ESAS2R_LOG_WARN, "err %d on ioctl cmd %d", err,
+ cmd);
+
+ switch (err) {
+ case -ENOMEM:
+ case -EBUSY:
+ ioctl->header.return_code = IOCTL_OUT_OF_RESOURCES;
+ break;
+
+ case -ENOSYS:
+ case -EINVAL:
+ ioctl->header.return_code = IOCTL_INVALID_PARAM;
+ break;
+ }
+
+ ioctl->header.return_code = IOCTL_GENERAL_ERROR;
+ }
+
+ /* Always copy the buffer back, if only to pick up the status */
+ err = __copy_to_user(arg, ioctl, sizeof(struct atto_express_ioctl));
+ if (err != 0) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "ioctl_handler copy_to_user didn't copy "
+ "everything (err %d, cmd %d)", err,
+ cmd);
+ kfree(ioctl);
+
+ return -EFAULT;
+ }
+
+ kfree(ioctl);
+
+ return 0;
+}
+
+int esas2r_ioctl(struct scsi_device *sd, int cmd, void __user *arg)
+{
+ return esas2r_ioctl_handler(sd->host->hostdata, cmd, arg);
+}
+
+static void free_fw_buffers(struct esas2r_adapter *a)
+{
+ if (a->firmware.data) {
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->firmware.orig_len,
+ a->firmware.data,
+ (dma_addr_t)a->firmware.phys);
+
+ a->firmware.data = NULL;
+ }
+}
+
+static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
+{
+ free_fw_buffers(a);
+
+ a->firmware.orig_len = length;
+
+ a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.
+ phys,
+ GFP_KERNEL);
+
+ if (!a->firmware.data) {
+ esas2r_debug("buffer alloc failed!");
+ return 0;
+ }
+
+ return 1;
+}
+
+/* Handle a call to read firmware. */
+int esas2r_read_fw(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ esas2r_trace_enter();
+ /* if the cached header is a status, simply copy it over and return. */
+ if (a->firmware.state == FW_STATUS_ST) {
+ int size = min_t(int, count, sizeof(a->firmware.header));
+ esas2r_trace_exit();
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("esas2r_read_fw: STATUS size %d", size);
+ return size;
+ }
+
+ /*
+ * if the cached header is a command, do it if at
+ * offset 0, otherwise copy the pieces.
+ */
+
+ if (a->firmware.state == FW_COMMAND_ST) {
+ u32 length = a->firmware.header.length;
+ esas2r_trace_exit();
+
+ esas2r_debug("esas2r_read_fw: COMMAND length %d off %d",
+ length,
+ off);
+
+ if (off == 0) {
+ if (a->firmware.header.action == FI_ACT_UP) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+
+ /* copy header over */
+
+ memcpy(a->firmware.data,
+ &a->firmware.header,
+ sizeof(a->firmware.header));
+
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+ } else if (a->firmware.header.action == FI_ACT_UPSZ) {
+ int size =
+ min((int)count,
+ (int)sizeof(a->firmware.header));
+ do_fm_api(a, &a->firmware.header);
+ memcpy(buf, &a->firmware.header, size);
+ esas2r_debug("FI_ACT_UPSZ size %d", size);
+ return size;
+ } else {
+ esas2r_debug("invalid action %d",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ }
+
+ if (count + off > length)
+ count = length - off;
+
+ if (count < 0)
+ return 0;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "read: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ esas2r_debug("esas2r_read_fw: off %d count %d length %d ", off,
+ count,
+ length);
+
+ memcpy(buf, &a->firmware.data[off], count);
+
+ /* when done, release the buffer */
+
+ if (length <= off + count) {
+ esas2r_debug("esas2r_read_fw: freeing buffer!");
+
+ free_fw_buffers(a);
+ }
+
+ return count;
+ }
+
+ esas2r_trace_exit();
+ esas2r_debug("esas2r_read_fw: invalid firmware state %d",
+ a->firmware.state);
+
+ return -EINVAL;
+}
+
+/* Handle a call to write firmware. */
+int esas2r_write_fw(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ u32 length;
+
+ if (off == 0) {
+ struct esas2r_flash_img *header =
+ (struct esas2r_flash_img *)buf;
+
+ /* assume version 0 flash image */
+
+ int min_size = sizeof(struct esas2r_flash_img_v0);
+
+ a->firmware.state = FW_INVALID_ST;
+
+ /* validate the version field first */
+
+ if (count < 4
+ || header->fi_version > FI_VERSION_1) {
+ esas2r_debug(
+ "esas2r_write_fw: short header or invalid version");
+ return -EINVAL;
+ }
+
+ /* See if its a version 1 flash image */
+
+ if (header->fi_version == FI_VERSION_1)
+ min_size = sizeof(struct esas2r_flash_img);
+
+ /* If this is the start, the header must be full and valid. */
+ if (count < min_size) {
+ esas2r_debug("esas2r_write_fw: short header, aborting");
+ return -EINVAL;
+ }
+
+ /* Make sure the size is reasonable. */
+ length = header->length;
+
+ if (length > 1024 * 1024) {
+ esas2r_debug(
+ "esas2r_write_fw: hosed, length %d fi_version %d",
+ length, header->fi_version);
+ return -EINVAL;
+ }
+
+ /*
+ * If this is a write command, allocate memory because
+ * we have to cache everything. otherwise, just cache
+ * the header, because the read op will do the command.
+ */
+
+ if (header->action == FI_ACT_DOWN) {
+ if (!allocate_fw_buffers(a, length))
+ return -ENOMEM;
+
+ /*
+ * Store the command, so there is context on subsequent
+ * calls.
+ */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+ } else if (header->action == FI_ACT_UP
+ || header->action == FI_ACT_UPSZ) {
+ /* Save the command, result will be picked up on read */
+ memcpy(&a->firmware.header,
+ buf,
+ sizeof(*header));
+
+ a->firmware.state = FW_COMMAND_ST;
+
+ esas2r_debug(
+ "esas2r_write_fw: COMMAND, count %d, action %d ",
+ count, header->action);
+
+ /*
+ * Pretend we took the whole buffer,
+ * so we don't get bothered again.
+ */
+
+ return count;
+ } else {
+ esas2r_debug("esas2r_write_fw: invalid action %d ",
+ a->firmware.header.action);
+ return -ENOSYS;
+ }
+ } else {
+ length = a->firmware.header.length;
+ }
+
+ /*
+ * We only get here on a download command, regardless of offset.
+ * the chunks written by the system need to be cached, and when
+ * the final one arrives, issue the fmapi command.
+ */
+
+ if (off + count > length)
+ count = length - off;
+
+ if (count > 0) {
+ esas2r_debug("esas2r_write_fw: off %d count %d length %d", off,
+ count,
+ length);
+
+ /*
+ * On a full upload, the system tries sending the whole buffer.
+ * there's nothing to do with it, so just drop it here, before
+ * trying to copy over into unallocated memory!
+ */
+ if (a->firmware.header.action == FI_ACT_UP)
+ return count;
+
+ if (!a->firmware.data) {
+ esas2r_debug(
+ "write: nonzero offset but no buffer available!");
+ return -ENOMEM;
+ }
+
+ memcpy(&a->firmware.data[off], buf, count);
+
+ if (length == off + count) {
+ do_fm_api(a,
+ (struct esas2r_flash_img *)a->firmware.data);
+
+ /*
+ * Now copy the header result to be picked up by the
+ * next read
+ */
+ memcpy(&a->firmware.header,
+ a->firmware.data,
+ sizeof(a->firmware.header));
+
+ a->firmware.state = FW_STATUS_ST;
+
+ esas2r_debug("write completed");
+
+ /*
+ * Since the system has the data buffered, the only way
+ * this can leak is if a root user writes a program
+ * that writes a shorter buffer than it claims, and the
+ * copyin fails.
+ */
+ free_fw_buffers(a);
+ }
+ }
+
+ return count;
+}
+
+/* Callback for the completion of a VDA request. */
+static void vda_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->vda_command_done = 1;
+ wake_up_interruptible(&a->vda_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_vda(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ int offset = (u8 *)sgc->cur_offset - (u8 *)a->vda_buffer;
+
+ (*addr) = a->ppvda_buffer + offset;
+ return VDA_MAX_BUFFER_SIZE - offset;
+}
+
+/* Handle a call to read a VDA command. */
+int esas2r_read_vda(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct atto_ioctl_vda *vi =
+ (struct atto_ioctl_vda *)a->vda_buffer;
+ struct esas2r_sg_context sgc;
+ bool wait_for_completion;
+
+ /*
+ * Presumeably, someone has already written to the vda_buffer,
+ * and now they are reading the node the response, so now we
+ * will actually issue the request to the chip and reply.
+ */
+
+ /* allocate a request */
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_vda: out of requestss");
+ return -EBUSY;
+ }
+
+ rq->comp_cb = vda_complete_req;
+
+ sgc.first_req = rq;
+ sgc.adapter = a;
+ sgc.cur_offset = a->vda_buffer + VDA_BUFFER_HEADER_SZ;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_vda;
+
+ a->vda_command_done = 0;
+
+ wait_for_completion =
+ esas2r_process_vda_ioctl(a, vi, rq, &sgc);
+
+ if (wait_for_completion) {
+ /* now wait around for it to complete. */
+
+ while (!a->vda_command_done)
+ wait_event_interruptible(a->vda_waiter,
+ a->vda_command_done);
+ }
+
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+ }
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->vda_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write a VDA command. */
+int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ /*
+ * allocate memory for it, if not already done. once allocated,
+ * we will keep it around until the driver is unloaded.
+ */
+
+ if (!a->vda_buffer) {
+ dma_addr_t dma_addr;
+ a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
+
+ a->ppvda_buffer = dma_addr;
+ }
+
+ if (!a->vda_buffer)
+ return -ENOMEM;
+
+ if (off > VDA_MAX_BUFFER_SIZE)
+ return 0;
+
+ if (count + off > VDA_MAX_BUFFER_SIZE)
+ count = VDA_MAX_BUFFER_SIZE - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->vda_buffer + off, buf, count);
+
+ return count;
+}
+
+/* Callback for the completion of an FS_API request.*/
+static void fs_api_complete_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ a->fs_api_command_done = 1;
+
+ wake_up_interruptible(&a->fs_api_waiter);
+}
+
+/* Scatter/gather callback for VDA requests */
+static u32 get_physaddr_fs_api(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sgc->adapter;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+ u32 offset = (u8 *)sgc->cur_offset - (u8 *)fs;
+
+ (*addr) = a->ppfs_api_buffer + offset;
+
+ return a->fs_api_buffer_size - offset;
+}
+
+/* Handle a call to read firmware via FS_API. */
+int esas2r_read_fs(struct esas2r_adapter *a, char *buf, long off, int count)
+{
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off == 0) {
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ struct esas2r_ioctl_fs *fs =
+ (struct esas2r_ioctl_fs *)a->fs_api_buffer;
+
+ /* If another flash request is already in progress, return. */
+ if (down_interruptible(&a->fs_api_semaphore)) {
+busy:
+ fs->status = ATTO_STS_OUT_OF_RSRC;
+ return -EBUSY;
+ }
+
+ /*
+ * Presumeably, someone has already written to the
+ * fs_api_buffer, and now they are reading the node the
+ * response, so now we will actually issue the request to the
+ * chip and reply. Allocate a request
+ */
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ esas2r_debug("esas2r_read_fs: out of requests");
+ up(&a->fs_api_semaphore);
+ goto busy;
+ }
+
+ rq->comp_cb = fs_api_complete_req;
+
+ /* Set up the SGCONTEXT for to build the s/g table */
+
+ sgc.cur_offset = fs->data;
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_fs_api;
+
+ a->fs_api_command_done = 0;
+
+ if (!esas2r_process_fs_ioctl(a, fs, rq, &sgc)) {
+ if (fs->status == ATTO_STS_OUT_OF_RSRC)
+ count = -EBUSY;
+
+ goto dont_wait;
+ }
+
+ /* Now wait around for it to complete. */
+
+ while (!a->fs_api_command_done)
+ wait_event_interruptible(a->fs_api_waiter,
+ a->fs_api_command_done);
+ ;
+dont_wait:
+ /* Free the request and keep going */
+ up(&a->fs_api_semaphore);
+ esas2r_free_request(a, (struct esas2r_request *)rq);
+
+ /* Pick up possible error code from above */
+ if (count < 0)
+ return count;
+ }
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 0)
+ return 0;
+
+ memcpy(buf, a->fs_api_buffer + off, count);
+
+ return count;
+}
+
+/* Handle a call to write firmware via FS_API. */
+int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
+ int count)
+{
+ if (off == 0) {
+ struct esas2r_ioctl_fs *fs = (struct esas2r_ioctl_fs *)buf;
+ u32 length = fs->command.length + offsetof(
+ struct esas2r_ioctl_fs,
+ data);
+
+ /*
+ * Special case, for BEGIN commands, the length field
+ * is lying to us, so just get enough for the header.
+ */
+
+ if (fs->command.command == ESAS2R_FS_CMD_BEGINW)
+ length = offsetof(struct esas2r_ioctl_fs, data);
+
+ /*
+ * Beginning a command. We assume we'll get at least
+ * enough in the first write so we can look at the
+ * header and see how much we need to alloc.
+ */
+
+ if (count < offsetof(struct esas2r_ioctl_fs, data))
+ return -EINVAL;
+
+ /* Allocate a buffer or use the existing buffer. */
+ if (a->fs_api_buffer) {
+ if (a->fs_api_buffer_size < length) {
+ /* Free too-small buffer and get a new one */
+ dma_free_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ a->fs_api_buffer,
+ (dma_addr_t)a->ppfs_api_buffer);
+
+ goto re_allocate_buffer;
+ }
+ } else {
+re_allocate_buffer:
+ a->fs_api_buffer_size = length;
+
+ a->fs_api_buffer = (u8 *)dma_alloc_coherent(
+ &a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
+ }
+ }
+
+ if (!a->fs_api_buffer)
+ return -ENOMEM;
+
+ if (off > a->fs_api_buffer_size)
+ return 0;
+
+ if (count + off > a->fs_api_buffer_size)
+ count = a->fs_api_buffer_size - off;
+
+ if (count < 1)
+ return 0;
+
+ memcpy(a->fs_api_buffer + off, buf, count);
+
+ return count;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.c b/drivers/scsi/esas2r/esas2r_log.c
new file mode 100644
index 00000000000..9bf285df58d
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.c
@@ -0,0 +1,254 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+/*
+ * this module within the driver is tasked with providing logging functionality.
+ * the event_log_level module parameter controls the level of messages that are
+ * written to the system log. the default level of messages that are written
+ * are critical and warning messages. if other types of messages are desired,
+ * one simply needs to load the module with the correct value for the
+ * event_log_level module parameter. for example:
+ *
+ * insmod <module> event_log_level=1
+ *
+ * will load the module and only critical events will be written by this module
+ * to the system log. if critical, warning, and information-level messages are
+ * desired, the correct value for the event_log_level module parameter
+ * would be as follows:
+ *
+ * insmod <module> event_log_level=3
+ */
+
+#define EVENT_LOG_BUFF_SIZE 1024
+
+static long event_log_level = ESAS2R_LOG_DFLT;
+
+module_param(event_log_level, long, S_IRUGO | S_IRUSR);
+MODULE_PARM_DESC(event_log_level,
+ "Specifies the level of events to report to the system log. Critical and warning level events are logged by default.");
+
+/* A shared buffer to use for formatting messages. */
+static char event_buffer[EVENT_LOG_BUFF_SIZE];
+
+/* A lock to protect the shared buffer used for formatting messages. */
+static DEFINE_SPINLOCK(event_buffer_lock);
+
+/**
+ * translates an esas2r-defined logging event level to a kernel logging level.
+ *
+ * @param [in] level the esas2r-defined logging event level to translate
+ *
+ * @return the corresponding kernel logging level.
+ */
+static const char *translate_esas2r_event_level_to_kernel(const long level)
+{
+ switch (level) {
+ case ESAS2R_LOG_CRIT:
+ return KERN_CRIT;
+
+ case ESAS2R_LOG_WARN:
+ return KERN_WARNING;
+
+ case ESAS2R_LOG_INFO:
+ return KERN_INFO;
+
+ case ESAS2R_LOG_DEBG:
+ case ESAS2R_LOG_TRCE:
+ default:
+ return KERN_DEBUG;
+ }
+}
+
+/**
+ * the master logging function. this function will format the message as
+ * outlined by the formatting string, the input device information and the
+ * substitution arguments and output the resulting string to the system log.
+ *
+ * @param [in] level the event log level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] args the substition arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+static int esas2r_log_master(const long level,
+ const struct device *dev,
+ const char *format,
+ va_list args)
+{
+ if (level <= event_log_level) {
+ unsigned long flags = 0;
+ int retval = 0;
+ char *buffer = event_buffer;
+ size_t buflen = EVENT_LOG_BUFF_SIZE;
+ const char *fmt_nodev = "%s%s: ";
+ const char *fmt_dev = "%s%s [%s, %s, %s]";
+ const char *slevel =
+ translate_esas2r_event_level_to_kernel(level);
+
+ spin_lock_irqsave(&event_buffer_lock, flags);
+
+ if (buffer == NULL) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ memset(buffer, 0, buflen);
+
+ /*
+ * format the level onto the beginning of the string and do
+ * some pointer arithmetic to move the pointer to the point
+ * where the actual message can be inserted.
+ */
+
+ if (dev == NULL) {
+ snprintf(buffer, buflen, fmt_nodev, slevel,
+ ESAS2R_DRVR_NAME);
+ } else {
+ snprintf(buffer, buflen, fmt_dev, slevel,
+ ESAS2R_DRVR_NAME,
+ (dev->driver ? dev->driver->name : "unknown"),
+ (dev->bus ? dev->bus->name : "unknown"),
+ dev_name(dev));
+ }
+
+ buffer += strlen(event_buffer);
+ buflen -= strlen(event_buffer);
+
+ retval = vsnprintf(buffer, buflen, format, args);
+ if (retval < 0) {
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ return -1;
+ }
+
+ /*
+ * Put a line break at the end of the formatted string so that
+ * we don't wind up with run-on messages. only append if there
+ * is enough space in the buffer.
+ */
+ if (strlen(event_buffer) < buflen)
+ strcat(buffer, "\n");
+
+ printk(event_buffer);
+
+ spin_unlock_irqrestore(&event_buffer_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * formats and logs a message to the system log.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] format the formating string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log(const long level, const char *format, ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, NULL, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] dev the device information
+ * @param [in] format the formatting string for the message
+ * @param [in] ... the substitution arguments to the formatting string
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...)
+{
+ int retval = 0;
+ va_list args;
+
+ va_start(args, format);
+
+ retval = esas2r_log_master(level, dev, format, args);
+
+ va_end(args);
+
+ return retval;
+}
+
+/**
+ * formats and logs a message to the system log. this message will include
+ * device information.
+ *
+ * @param [in] level the event level of the message
+ * @param [in] buf
+ * @param [in] len
+ *
+ * @return 0 on success, or -1 if an error occurred.
+ */
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len)
+{
+ if (level <= event_log_level) {
+ print_hex_dump(translate_esas2r_event_level_to_kernel(level),
+ "", DUMP_PREFIX_OFFSET, 16, 1, buf,
+ len, true);
+ }
+
+ return 1;
+}
diff --git a/drivers/scsi/esas2r/esas2r_log.h b/drivers/scsi/esas2r/esas2r_log.h
new file mode 100644
index 00000000000..7b6397bb5b9
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_log.h
@@ -0,0 +1,118 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_log.h
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef __esas2r_log_h__
+#define __esas2r_log_h__
+
+struct device;
+
+enum {
+ ESAS2R_LOG_NONE = 0, /* no events logged */
+ ESAS2R_LOG_CRIT = 1, /* critical events */
+ ESAS2R_LOG_WARN = 2, /* warning events */
+ ESAS2R_LOG_INFO = 3, /* info events */
+ ESAS2R_LOG_DEBG = 4, /* debugging events */
+ ESAS2R_LOG_TRCE = 5, /* tracing events */
+
+#ifdef ESAS2R_TRACE
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_TRCE
+#else
+ ESAS2R_LOG_DFLT = ESAS2R_LOG_WARN
+#endif
+};
+
+int esas2r_log(const long level, const char *format, ...);
+int esas2r_log_dev(const long level,
+ const struct device *dev,
+ const char *format,
+ ...);
+int esas2r_log_hexdump(const long level,
+ const void *buf,
+ size_t len);
+
+/*
+ * the following macros are provided specifically for debugging and tracing
+ * messages. esas2r_debug() is provided for generic non-hardware layer
+ * debugging and tracing events. esas2r_hdebug is provided specifically for
+ * hardware layer debugging and tracing events.
+ */
+
+#ifdef ESAS2R_DEBUG
+#define esas2r_debug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#define esas2r_hdebug(f, args ...) esas2r_log(ESAS2R_LOG_DEBG, f, ## args)
+#else
+#define esas2r_debug(f, args ...)
+#define esas2r_hdebug(f, args ...)
+#endif /* ESAS2R_DEBUG */
+
+/*
+ * the following macros are provided in order to trace the driver and catch
+ * some more serious bugs. be warned, enabling these macros may *severely*
+ * impact performance.
+ */
+
+#ifdef ESAS2R_TRACE
+#define esas2r_bugon() \
+ do { \
+ esas2r_log(ESAS2R_LOG_TRCE, "esas2r_bugon() called in %s:%d" \
+ " - dumping stack and stopping kernel", __func__, \
+ __LINE__); \
+ dump_stack(); \
+ BUG(); \
+ } while (0)
+
+#define esas2r_trace_enter() esas2r_log(ESAS2R_LOG_TRCE, "entered %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace_exit() esas2r_log(ESAS2R_LOG_TRCE, "exited %s (%s:%d)", \
+ __func__, __FILE__, __LINE__)
+#define esas2r_trace(f, args ...) esas2r_log(ESAS2R_LOG_TRCE, "(%s:%s:%d): " \
+ f, __func__, __FILE__, __LINE__, \
+ ## args)
+#else
+#define esas2r_bugon()
+#define esas2r_trace_enter()
+#define esas2r_trace_exit()
+#define esas2r_trace(f, args ...)
+#endif /* ESAS2R_TRACE */
+
+#endif /* __esas2r_log_h__ */
diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c
new file mode 100644
index 00000000000..4abf1272e1e
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_main.c
@@ -0,0 +1,2032 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_main.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+MODULE_DESCRIPTION(ESAS2R_DRVR_NAME ": " ESAS2R_LONGNAME " driver");
+MODULE_AUTHOR("ATTO Technology, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ESAS2R_VERSION_STR);
+
+/* global definitions */
+
+static int found_adapters;
+struct esas2r_adapter *esas2r_adapters[MAX_ADAPTERS];
+
+#define ESAS2R_VDA_EVENT_PORT1 54414
+#define ESAS2R_VDA_EVENT_PORT2 54415
+#define ESAS2R_VDA_EVENT_SOCK_COUNT 2
+
+static struct esas2r_adapter *esas2r_adapter_from_kobj(struct kobject *kobj)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct Scsi_Host *host = class_to_shost(dev);
+
+ return (struct esas2r_adapter *)host->hostdata;
+}
+
+static ssize_t read_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fw(a, buf, off, count);
+}
+
+static ssize_t write_fw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_fw(a, buf, off, count);
+}
+
+static ssize_t read_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_fs(a, buf, off, count);
+}
+
+static ssize_t write_fs(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct esas2r_ioctl_fs), count);
+ int result = 0;
+
+ result = esas2r_write_fs(a, buf, off, count);
+
+ if (result < 0)
+ result = 0;
+
+ return length;
+}
+
+static ssize_t read_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_read_vda(a, buf, off, count);
+}
+
+static ssize_t write_vda(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ return esas2r_write_vda(a, buf, off, count);
+}
+
+static ssize_t read_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct esas2r_sas_nvram), PAGE_SIZE);
+
+ memcpy(buf, a->nvram, length);
+ return length;
+}
+
+static ssize_t write_live_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ struct esas2r_request *rq;
+ int result = -EFAULT;
+
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL)
+ return -ENOMEM;
+
+ if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf))
+ result = count;
+
+ esas2r_free_request(a, rq);
+
+ return result;
+}
+
+static ssize_t read_default_nvram(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+
+ esas2r_nvram_get_defaults(a, (struct esas2r_sas_nvram *)buf);
+
+ return sizeof(struct esas2r_sas_nvram);
+}
+
+static ssize_t read_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min_t(size_t, sizeof(struct atto_ioctl), PAGE_SIZE);
+
+ if (!a->local_atto_ioctl)
+ return -ENOMEM;
+
+ if (handle_hba_ioctl(a, a->local_atto_ioctl) != IOCTL_SUCCESS)
+ return -ENOMEM;
+
+ memcpy(buf, a->local_atto_ioctl, length);
+
+ return length;
+}
+
+static ssize_t write_hw(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr,
+ char *buf, loff_t off, size_t count)
+{
+ struct esas2r_adapter *a = esas2r_adapter_from_kobj(kobj);
+ int length = min(sizeof(struct atto_ioctl), count);
+
+ if (!a->local_atto_ioctl) {
+ a->local_atto_ioctl = kzalloc(sizeof(struct atto_ioctl),
+ GFP_KERNEL);
+ if (a->local_atto_ioctl == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "write_hw kzalloc failed for %d bytes",
+ sizeof(struct atto_ioctl));
+ return -ENOMEM;
+ }
+ }
+
+ memset(a->local_atto_ioctl, 0, sizeof(struct atto_ioctl));
+ memcpy(a->local_atto_ioctl, buf, length);
+
+ return length;
+}
+
+#define ESAS2R_RW_BIN_ATTR(_name) \
+ struct bin_attribute bin_attr_ ## _name = { \
+ .attr = \
+ { .name = __stringify(_name), .mode = S_IRUSR | S_IWUSR }, \
+ .size = 0, \
+ .read = read_ ## _name, \
+ .write = write_ ## _name }
+
+ESAS2R_RW_BIN_ATTR(fw);
+ESAS2R_RW_BIN_ATTR(fs);
+ESAS2R_RW_BIN_ATTR(vda);
+ESAS2R_RW_BIN_ATTR(hw);
+ESAS2R_RW_BIN_ATTR(live_nvram);
+
+struct bin_attribute bin_attr_default_nvram = {
+ .attr = { .name = "default_nvram", .mode = S_IRUGO },
+ .size = 0,
+ .read = read_default_nvram,
+ .write = NULL
+};
+
+static struct scsi_host_template driver_template = {
+ .module = THIS_MODULE,
+ .show_info = esas2r_show_info,
+ .name = ESAS2R_LONGNAME,
+ .release = esas2r_release,
+ .info = esas2r_info,
+ .ioctl = esas2r_ioctl,
+ .queuecommand = esas2r_queuecommand,
+ .eh_abort_handler = esas2r_eh_abort,
+ .eh_device_reset_handler = esas2r_device_reset,
+ .eh_bus_reset_handler = esas2r_bus_reset,
+ .eh_host_reset_handler = esas2r_host_reset,
+ .eh_target_reset_handler = esas2r_target_reset,
+ .can_queue = 128,
+ .this_id = -1,
+ .sg_tablesize = SCSI_MAX_SG_SEGMENTS,
+ .cmd_per_lun =
+ ESAS2R_DEFAULT_CMD_PER_LUN,
+ .present = 0,
+ .unchecked_isa_dma = 0,
+ .use_clustering = ENABLE_CLUSTERING,
+ .emulated = 0,
+ .proc_name = ESAS2R_DRVR_NAME,
+ .slave_configure = esas2r_slave_configure,
+ .slave_alloc = esas2r_slave_alloc,
+ .slave_destroy = esas2r_slave_destroy,
+ .change_queue_depth = esas2r_change_queue_depth,
+ .change_queue_type = esas2r_change_queue_type,
+ .max_sectors = 0xFFFF,
+};
+
+int sgl_page_size = 512;
+module_param(sgl_page_size, int, 0);
+MODULE_PARM_DESC(sgl_page_size,
+ "Scatter/gather list (SGL) page size in number of S/G "
+ "entries. If your application is doing a lot of very large "
+ "transfers, you may want to increase the SGL page size. "
+ "Default 512.");
+
+int num_sg_lists = 1024;
+module_param(num_sg_lists, int, 0);
+MODULE_PARM_DESC(num_sg_lists,
+ "Number of scatter/gather lists. Default 1024.");
+
+int sg_tablesize = SCSI_MAX_SG_SEGMENTS;
+module_param(sg_tablesize, int, 0);
+MODULE_PARM_DESC(sg_tablesize,
+ "Maximum number of entries in a scatter/gather table.");
+
+int num_requests = 256;
+module_param(num_requests, int, 0);
+MODULE_PARM_DESC(num_requests,
+ "Number of requests. Default 256.");
+
+int num_ae_requests = 4;
+module_param(num_ae_requests, int, 0);
+MODULE_PARM_DESC(num_ae_requests,
+ "Number of VDA asynchromous event requests. Default 4.");
+
+int cmd_per_lun = ESAS2R_DEFAULT_CMD_PER_LUN;
+module_param(cmd_per_lun, int, 0);
+MODULE_PARM_DESC(cmd_per_lun,
+ "Maximum number of commands per LUN. Default "
+ DEFINED_NUM_TO_STR(ESAS2R_DEFAULT_CMD_PER_LUN) ".");
+
+int can_queue = 128;
+module_param(can_queue, int, 0);
+MODULE_PARM_DESC(can_queue,
+ "Maximum number of commands per adapter. Default 128.");
+
+int esas2r_max_sectors = 0xFFFF;
+module_param(esas2r_max_sectors, int, 0);
+MODULE_PARM_DESC(esas2r_max_sectors,
+ "Maximum number of disk sectors in a single data transfer. "
+ "Default 65535 (largest possible setting).");
+
+int interrupt_mode = 1;
+module_param(interrupt_mode, int, 0);
+MODULE_PARM_DESC(interrupt_mode,
+ "Defines the interrupt mode to use. 0 for legacy"
+ ", 1 for MSI. Default is MSI (1).");
+
+static struct pci_device_id
+ esas2r_pci_table[] = {
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x0049,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004A,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004B,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004C,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004D,
+ 0,
+ 0, 0 },
+ { ATTO_VENDOR_ID, 0x0049, ATTO_VENDOR_ID, 0x004E,
+ 0,
+ 0, 0 },
+ { 0, 0, 0, 0,
+ 0,
+ 0, 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, esas2r_pci_table);
+
+static int
+esas2r_probe(struct pci_dev *pcid, const struct pci_device_id *id);
+
+static void
+esas2r_remove(struct pci_dev *pcid);
+
+static struct pci_driver
+ esas2r_pci_driver = {
+ .name = ESAS2R_DRVR_NAME,
+ .id_table = esas2r_pci_table,
+ .probe = esas2r_probe,
+ .remove = esas2r_remove,
+ .suspend = esas2r_suspend,
+ .resume = esas2r_resume,
+};
+
+static int esas2r_probe(struct pci_dev *pcid,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *host = NULL;
+ struct esas2r_adapter *a;
+ int err;
+
+ size_t host_alloc_size = sizeof(struct esas2r_adapter)
+ + ((num_requests) +
+ 1) * sizeof(struct esas2r_request);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
+ "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
+ pcid->vendor,
+ pcid->device,
+ pcid->subsystem_vendor,
+ pcid->subsystem_device);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "before pci_enable_device() "
+ "enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ err = pci_enable_device(pcid);
+ if (err != 0) {
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
+ "pci_enable_device() FAIL (%d)",
+ err);
+ return -ENODEV;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "pci_enable_device() OK");
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
+ "after pci_device_enable() enable_cnt: %d",
+ pcid->enable_cnt.counter);
+
+ host = scsi_host_alloc(&driver_template, host_alloc_size);
+ if (host == NULL) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
+ return -ENODEV;
+ }
+
+ memset(host->hostdata, 0, host_alloc_size);
+
+ a = (struct esas2r_adapter *)host->hostdata;
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);
+
+ /* override max LUN and max target id */
+
+ host->max_id = ESAS2R_MAX_ID + 1;
+ host->max_lun = 255;
+
+ /* we can handle 16-byte CDbs */
+
+ host->max_cmd_len = 16;
+
+ host->can_queue = can_queue;
+ host->cmd_per_lun = cmd_per_lun;
+ host->this_id = host->max_id + 1;
+ host->max_channel = 0;
+ host->unique_id = found_adapters;
+ host->sg_tablesize = sg_tablesize;
+ host->max_sectors = esas2r_max_sectors;
+
+ /* set to bus master for BIOses that don't do it for us */
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");
+
+ pci_set_master(pcid);
+
+ if (!esas2r_init_adapter(host, pcid, found_adapters)) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to initialize device at PCI bus %x:%x",
+ pcid->bus->number,
+ pcid->devfn);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ return 0;
+
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
+ host->hostdata);
+
+ pci_set_drvdata(pcid, host);
+
+ esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");
+
+ err = scsi_add_host(host, &pcid->dev);
+
+ if (err) {
+ esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
+ esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
+ "scsi_add_host() FAIL");
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_host_put() called");
+
+ scsi_host_put(host);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "pci_set_drvdata(%p, NULL) called",
+ pcid);
+
+ pci_set_drvdata(pcid, NULL);
+
+ return -ENODEV;
+ }
+
+
+ esas2r_fw_event_on(a);
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
+ "scsi_scan_host() called");
+
+ scsi_scan_host(host);
+
+ /* Add sysfs binary files */
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fw");
+ else
+ a->sysfs_fw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: fs");
+ else
+ a->sysfs_fs_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: vda");
+ else
+ a->sysfs_vda_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: hw");
+ else
+ a->sysfs_hw_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: live_nvram");
+ else
+ a->sysfs_live_nvram_created = 1;
+
+ if (sysfs_create_bin_file(&host->shost_dev.kobj,
+ &bin_attr_default_nvram))
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
+ "Failed to create sysfs binary file: default_nvram");
+ else
+ a->sysfs_default_nvram_created = 1;
+
+ found_adapters++;
+
+ return 0;
+}
+
+static void esas2r_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ int index;
+
+ if (pdev == NULL) {
+ esas2r_log(ESAS2R_LOG_WARN, "esas2r_remove pdev==NULL");
+ return;
+ }
+
+ host = pci_get_drvdata(pdev);
+
+ if (host == NULL) {
+ /*
+ * this can happen if pci_set_drvdata was already called
+ * to clear the host pointer. if this is the case, we
+ * are okay; this channel has already been cleaned up.
+ */
+
+ return;
+ }
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(pdev->dev),
+ "esas2r_remove(%p) called; "
+ "host:%p", pdev,
+ host);
+
+ index = esas2r_cleanup(host);
+
+ if (index < 0)
+ esas2r_log_dev(ESAS2R_LOG_WARN, &(pdev->dev),
+ "unknown host in %s",
+ __func__);
+
+ found_adapters--;
+
+ /* if this was the last adapter, clean up the rest of the driver */
+
+ if (found_adapters == 0)
+ esas2r_cleanup(NULL);
+}
+
+static int __init esas2r_init(void)
+{
+ int i;
+
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ /* verify valid parameters */
+
+ if (can_queue < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be at least 1, value "
+ "forced.");
+ can_queue = 1;
+ } else if (can_queue > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: can_queue must be no larger than 2048, "
+ "value forced.");
+ can_queue = 2048;
+ }
+
+ if (cmd_per_lun < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be at least 1, value "
+ "forced.");
+ cmd_per_lun = 1;
+ } else if (cmd_per_lun > 2048) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: cmd_per_lun must be no larger than "
+ "2048, value forced.");
+ cmd_per_lun = 2048;
+ }
+
+ if (sg_tablesize < 32) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: sg_tablesize must be at least 32, "
+ "value forced.");
+ sg_tablesize = 32;
+ }
+
+ if (esas2r_max_sectors < 1) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be at least "
+ "1, value forced.");
+ esas2r_max_sectors = 1;
+ } else if (esas2r_max_sectors > 0xffff) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "warning: esas2r_max_sectors must be no larger "
+ "than 0xffff, value forced.");
+ esas2r_max_sectors = 0xffff;
+ }
+
+ sgl_page_size &= ~(ESAS2R_SGL_ALIGN - 1);
+
+ if (sgl_page_size < SGL_PG_SZ_MIN)
+ sgl_page_size = SGL_PG_SZ_MIN;
+ else if (sgl_page_size > SGL_PG_SZ_MAX)
+ sgl_page_size = SGL_PG_SZ_MAX;
+
+ if (num_sg_lists < NUM_SGL_MIN)
+ num_sg_lists = NUM_SGL_MIN;
+ else if (num_sg_lists > NUM_SGL_MAX)
+ num_sg_lists = NUM_SGL_MAX;
+
+ if (num_requests < NUM_REQ_MIN)
+ num_requests = NUM_REQ_MIN;
+ else if (num_requests > NUM_REQ_MAX)
+ num_requests = NUM_REQ_MAX;
+
+ if (num_ae_requests < NUM_AE_MIN)
+ num_ae_requests = NUM_AE_MIN;
+ else if (num_ae_requests > NUM_AE_MAX)
+ num_ae_requests = NUM_AE_MAX;
+
+ /* set up other globals */
+
+ for (i = 0; i < MAX_ADAPTERS; i++)
+ esas2r_adapters[i] = NULL;
+
+ /* initialize */
+
+ driver_template.module = THIS_MODULE;
+
+ if (pci_register_driver(&esas2r_pci_driver) != 0)
+ esas2r_log(ESAS2R_LOG_CRIT, "pci_register_driver FAILED");
+ else
+ esas2r_log(ESAS2R_LOG_INFO, "pci_register_driver() OK");
+
+ if (!found_adapters) {
+ pci_unregister_driver(&esas2r_pci_driver);
+ esas2r_cleanup(NULL);
+
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "driver will not be loaded because no ATTO "
+ "%s devices were found",
+ ESAS2R_DRVR_NAME);
+ return -1;
+ } else {
+ esas2r_log(ESAS2R_LOG_INFO, "found %d adapters",
+ found_adapters);
+ }
+
+ return 0;
+}
+
+/* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */
+static const struct file_operations esas2r_proc_fops = {
+ .compat_ioctl = esas2r_proc_ioctl,
+ .unlocked_ioctl = esas2r_proc_ioctl,
+};
+
+static struct Scsi_Host *esas2r_proc_host;
+static int esas2r_proc_major;
+
+long esas2r_proc_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ return esas2r_ioctl_handler(esas2r_proc_host->hostdata,
+ (int)cmd, (void __user *)arg);
+}
+
+static void __exit esas2r_exit(void)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "%s called", __func__);
+
+ if (esas2r_proc_major > 0) {
+ esas2r_log(ESAS2R_LOG_INFO, "unregister proc");
+
+ remove_proc_entry(ATTONODE_NAME,
+ esas2r_proc_host->hostt->proc_dir);
+ unregister_chrdev(esas2r_proc_major, ESAS2R_DRVR_NAME);
+
+ esas2r_proc_major = 0;
+ }
+
+ esas2r_log(ESAS2R_LOG_INFO, "pci_unregister_driver() called");
+
+ pci_unregister_driver(&esas2r_pci_driver);
+}
+
+int esas2r_show_info(struct seq_file *m, struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+
+ struct esas2r_target *t;
+ int dev_count = 0;
+
+ esas2r_log(ESAS2R_LOG_DEBG, "esas2r_show_info (%p,%d)", m, sh->host_no);
+
+ seq_printf(m, ESAS2R_LONGNAME "\n"
+ "Driver version: "ESAS2R_VERSION_STR "\n"
+ "Flash version: %s\n"
+ "Firmware version: %s\n"
+ "Copyright "ESAS2R_COPYRIGHT_YEARS "\n"
+ "http://www.attotech.com\n"
+ "\n",
+ a->flash_rev,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+
+ seq_printf(m, "Adapter information:\n"
+ "--------------------\n"
+ "Model: %s\n"
+ "SAS address: %02X%02X%02X%02X:%02X%02X%02X%02X\n",
+ esas2r_get_model_name(a),
+ a->nvram->sas_addr[0],
+ a->nvram->sas_addr[1],
+ a->nvram->sas_addr[2],
+ a->nvram->sas_addr[3],
+ a->nvram->sas_addr[4],
+ a->nvram->sas_addr[5],
+ a->nvram->sas_addr[6],
+ a->nvram->sas_addr[7]);
+
+ seq_puts(m, "\n"
+ "Discovered devices:\n"
+ "\n"
+ " # Target ID\n"
+ "---------------\n");
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->buffered_target_state == TS_PRESENT) {
+ seq_printf(m, " %3d %3d\n",
+ ++dev_count,
+ (u16)(uintptr_t)(t - a->targetdb));
+ }
+
+ if (dev_count == 0)
+ seq_puts(m, "none\n");
+
+ seq_puts(m, "\n");
+ return 0;
+
+}
+
+int esas2r_release(struct Scsi_Host *sh)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_release() called");
+
+ esas2r_cleanup(sh);
+ if (sh->irq)
+ free_irq(sh->irq, NULL);
+ scsi_unregister(sh);
+ return 0;
+}
+
+const char *esas2r_info(struct Scsi_Host *sh)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)sh->hostdata;
+ static char esas2r_info_str[512];
+
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(sh->shost_gendev),
+ "esas2r_info() called");
+
+ /*
+ * if we haven't done so already, register as a char driver
+ * and stick a node under "/proc/scsi/esas2r/ATTOnode"
+ */
+
+ if (esas2r_proc_major <= 0) {
+ esas2r_proc_host = sh;
+
+ esas2r_proc_major = register_chrdev(0, ESAS2R_DRVR_NAME,
+ &esas2r_proc_fops);
+
+ esas2r_log_dev(ESAS2R_LOG_DEBG, &(sh->shost_gendev),
+ "register_chrdev (major %d)",
+ esas2r_proc_major);
+
+ if (esas2r_proc_major > 0) {
+ struct proc_dir_entry *pde;
+
+ pde = proc_create(ATTONODE_NAME, 0,
+ sh->hostt->proc_dir,
+ &esas2r_proc_fops);
+
+ if (!pde) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(sh->shost_gendev),
+ "failed to create_proc_entry");
+ esas2r_proc_major = -1;
+ }
+ }
+ }
+
+ sprintf(esas2r_info_str,
+ ESAS2R_LONGNAME " (bus 0x%02X, device 0x%02X, IRQ 0x%02X)"
+ " driver version: "ESAS2R_VERSION_STR " firmware version: "
+ "%s\n",
+ a->pcid->bus->number, a->pcid->devfn, a->pcid->irq,
+ a->fw_rev[0] ? a->fw_rev : "(none)");
+
+ return esas2r_info_str;
+}
+
+/* Callback for building a request scatter/gather list */
+static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
+{
+ u32 len;
+
+ if (likely(sgc->cur_offset == sgc->exp_offset)) {
+ /*
+ * the normal case: caller used all bytes from previous call, so
+ * expected offset is the same as the current offset.
+ */
+
+ if (sgc->sgel_count < sgc->num_sgel) {
+ /* retrieve next segment, except for first time */
+ if (sgc->exp_offset > (u8 *)0) {
+ /* advance current segment */
+ sgc->cur_sgel = sg_next(sgc->cur_sgel);
+ ++(sgc->sgel_count);
+ }
+
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ /* save the total # bytes returned to caller so far */
+ sgc->exp_offset += len;
+
+ } else {
+ len = 0;
+ }
+ } else if (sgc->cur_offset < sgc->exp_offset) {
+ /*
+ * caller did not use all bytes from previous call. need to
+ * compute the address based on current segment.
+ */
+
+ len = sg_dma_len(sgc->cur_sgel);
+ (*addr) = sg_dma_address(sgc->cur_sgel);
+
+ sgc->exp_offset -= len;
+
+ /* calculate PA based on prev segment address and offsets */
+ *addr = *addr +
+ (sgc->cur_offset - sgc->exp_offset);
+
+ sgc->exp_offset += len;
+
+ /* re-calculate length based on offset */
+ len = lower_32_bits(
+ sgc->exp_offset - sgc->cur_offset);
+ } else { /* if ( sgc->cur_offset > sgc->exp_offset ) */
+ /*
+ * we don't expect the caller to skip ahead.
+ * cur_offset will never exceed the len we return
+ */
+ len = 0;
+ }
+
+ return len;
+}
+
+int esas2r_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ struct esas2r_sg_context sgc;
+ unsigned bufflen;
+
+ /* Assume success, if it fails we will fix the result later. */
+ cmd->result = DID_OK << 16;
+
+ if (unlikely(a->flags & AF_DEGRADED_MODE)) {
+ cmd->result = DID_NO_CONNECT << 16;
+ cmd->scsi_done(cmd);
+ return 0;
+ }
+
+ rq = esas2r_alloc_request(a);
+ if (unlikely(rq == NULL)) {
+ esas2r_debug("esas2r_alloc_request failed");
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ rq->cmd = cmd;
+ bufflen = scsi_bufflen(cmd);
+
+ if (likely(bufflen != 0)) {
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD);
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_RDD);
+ }
+
+ memcpy(rq->vrq->scsi.cdb, cmd->cmnd, cmd->cmd_len);
+ rq->vrq->scsi.length = cpu_to_le32(bufflen);
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->sense_buf = cmd->sense_buffer;
+ rq->sense_len = SCSI_SENSE_BUFFERSIZE;
+
+ esas2r_sgc_init(&sgc, a, rq, NULL);
+
+ sgc.length = bufflen;
+ sgc.cur_offset = NULL;
+
+ sgc.cur_sgel = scsi_sglist(cmd);
+ sgc.exp_offset = NULL;
+ sgc.num_sgel = scsi_dma_map(cmd);
+ sgc.sgel_count = 0;
+
+ if (unlikely(sgc.num_sgel < 0)) {
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ sgc.get_phys_addr = (PGETPHYSADDR)get_physaddr_from_sgc;
+
+ if (unlikely(!esas2r_build_sg_list(a, rq, &sgc))) {
+ scsi_dma_unmap(cmd);
+ esas2r_free_request(a, rq);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ esas2r_debug("start request %p to %d:%d\n", rq, (int)cmd->device->id,
+ (int)cmd->device->lun);
+
+ esas2r_start_request(a, rq);
+
+ return 0;
+}
+
+static void complete_task_management_request(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ (*rq->task_management_status_ptr) = rq->req_stat;
+ esas2r_free_request(a, rq);
+}
+
+/**
+ * Searches the specified queue for the specified queue for the command
+ * to abort.
+ *
+ * @param [in] a
+ * @param [in] abort_request
+ * @param [in] cmd
+ * t
+ * @return 0 on failure, 1 if command was not found, 2 if command was found
+ */
+static int esas2r_check_active_queue(struct esas2r_adapter *a,
+ struct esas2r_request **abort_request,
+ struct scsi_cmnd *cmd,
+ struct list_head *queue)
+{
+ bool found = false;
+ struct esas2r_request *ar = *abort_request;
+ struct esas2r_request *rq;
+ struct list_head *element, *next;
+
+ list_for_each_safe(element, next, queue) {
+
+ rq = list_entry(element, struct esas2r_request, req_list);
+
+ if (rq->cmd == cmd) {
+
+ /* Found the request. See what to do with it. */
+ if (queue == &a->active_list) {
+ /*
+ * We are searching the active queue, which
+ * means that we need to send an abort request
+ * to the firmware.
+ */
+ ar = esas2r_alloc_request(a);
+ if (ar == NULL) {
+ esas2r_log_dev(ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "unable to allocate an abort request for cmd %p",
+ cmd);
+ return 0; /* Failure */
+ }
+
+ /*
+ * Task management request must be formatted
+ * with a lock held.
+ */
+ ar->sense_len = 0;
+ ar->vrq->scsi.length = 0;
+ ar->target_id = rq->target_id;
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ (u8)le32_to_cpu(rq->vrq->scsi.flags));
+
+ memset(ar->vrq->scsi.cdb, 0,
+ sizeof(ar->vrq->scsi.cdb));
+
+ ar->vrq->scsi.flags |= cpu_to_le32(
+ FCP_CMND_TRM);
+ ar->vrq->scsi.u.abort_handle =
+ rq->vrq->scsi.handle;
+ } else {
+ /*
+ * The request is pending but not active on
+ * the firmware. Just free it now and we'll
+ * report the successful abort below.
+ */
+ list_del_init(&rq->req_list);
+ esas2r_free_request(a, rq);
+ }
+
+ found = true;
+ break;
+ }
+
+ }
+
+ if (!found)
+ return 1; /* Not found */
+
+ return 2; /* found */
+
+
+}
+
+int esas2r_eh_abort(struct scsi_cmnd *cmd)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *abort_request = NULL;
+ unsigned long flags;
+ struct list_head *queue;
+ int result;
+
+ esas2r_log(ESAS2R_LOG_INFO, "eh_abort (%p)", cmd);
+
+ if (a->flags & AF_DEGRADED_MODE) {
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return 0;
+ }
+
+ spin_lock_irqsave(&a->queue_lock, flags);
+
+ /*
+ * Run through the defer and active queues looking for the request
+ * to abort.
+ */
+
+ queue = &a->defer_list;
+
+check_active_queue:
+
+ result = esas2r_check_active_queue(a, &abort_request, cmd, queue);
+
+ if (!result) {
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+ return FAILED;
+ } else if (result == 2 && (queue == &a->defer_list)) {
+ queue = &a->active_list;
+ goto check_active_queue;
+ }
+
+ spin_unlock_irqrestore(&a->queue_lock, flags);
+
+ if (abort_request) {
+ u8 task_management_status = RS_PENDING;
+
+ /*
+ * the request is already active, so we need to tell
+ * the firmware to abort it and wait for the response.
+ */
+
+ abort_request->comp_cb = complete_task_management_request;
+ abort_request->task_management_status_ptr =
+ &task_management_status;
+
+ esas2r_start_request(a, abort_request);
+
+ if (atomic_read(&a->disable_cnt) == 0)
+ esas2r_do_deferred_processes(a);
+
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+
+ /*
+ * Once we get here, the original request will have been
+ * completed by the firmware and the abort request will have
+ * been cleaned up. we're done!
+ */
+
+ return SUCCESS;
+ }
+
+ /*
+ * If we get here, either we found the inactive request and
+ * freed it, or we didn't find it at all. Either way, success!
+ */
+
+ cmd->result = DID_ABORT << 16;
+
+ scsi_set_resid(cmd, 0);
+
+ cmd->scsi_done(cmd);
+
+ return SUCCESS;
+}
+
+static int esas2r_host_bus_reset(struct scsi_cmnd *cmd, bool host_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return FAILED;
+
+ if (host_reset)
+ esas2r_reset_adapter(a);
+ else
+ esas2r_reset_bus(a);
+
+ /* above call sets the AF_OS_RESET flag. wait for it to clear. */
+
+ while (a->flags & AF_OS_RESET) {
+ msleep(10);
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return FAILED;
+ }
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return FAILED;
+
+ return SUCCESS;
+}
+
+int esas2r_host_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "host_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, true);
+}
+
+int esas2r_bus_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "bus_reset (%p)", cmd);
+
+ return esas2r_host_bus_reset(cmd, false);
+}
+
+static int esas2r_dev_targ_reset(struct scsi_cmnd *cmd, bool target_reset)
+{
+ struct esas2r_adapter *a =
+ (struct esas2r_adapter *)cmd->device->host->hostdata;
+ struct esas2r_request *rq;
+ u8 task_management_status = RS_PENDING;
+ bool completed;
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return FAILED;
+
+retry:
+ rq = esas2r_alloc_request(a);
+ if (rq == NULL) {
+ if (target_reset) {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "target reset (%d)!",
+ cmd->device->id);
+ } else {
+ esas2r_log(ESAS2R_LOG_CRIT,
+ "unable to allocate a request for a "
+ "device reset (%d:%d)!",
+ cmd->device->id,
+ cmd->device->lun);
+ }
+
+
+ return FAILED;
+ }
+
+ rq->target_id = cmd->device->id;
+ rq->vrq->scsi.flags |= cpu_to_le32(cmd->device->lun);
+ rq->req_stat = RS_PENDING;
+
+ rq->comp_cb = complete_task_management_request;
+ rq->task_management_status_ptr = &task_management_status;
+
+ if (target_reset) {
+ esas2r_debug("issuing target reset (%p) to id %d", rq,
+ cmd->device->id);
+ completed = esas2r_send_task_mgmt(a, rq, 0x20);
+ } else {
+ esas2r_debug("issuing device reset (%p) to id %d lun %d", rq,
+ cmd->device->id, cmd->device->lun);
+ completed = esas2r_send_task_mgmt(a, rq, 0x10);
+ }
+
+ if (completed) {
+ /* Task management cmd completed right away, need to free it. */
+
+ esas2r_free_request(a, rq);
+ } else {
+ /*
+ * Wait for firmware to complete the request. Completion
+ * callback will free it.
+ */
+ while (task_management_status == RS_PENDING)
+ msleep(10);
+ }
+
+ if (a->flags & AF_DEGRADED_MODE)
+ return FAILED;
+
+ if (task_management_status == RS_BUSY) {
+ /*
+ * Busy, probably because we are flashing. Wait a bit and
+ * try again.
+ */
+ msleep(100);
+ goto retry;
+ }
+
+ return SUCCESS;
+}
+
+int esas2r_device_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "device_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, false);
+
+}
+
+int esas2r_target_reset(struct scsi_cmnd *cmd)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "target_reset (%p)", cmd);
+
+ return esas2r_dev_targ_reset(cmd, true);
+}
+
+int esas2r_change_queue_depth(struct scsi_device *dev, int depth, int reason)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "change_queue_depth %p, %d", dev, depth);
+
+ scsi_adjust_queue_depth(dev, scsi_get_tag_type(dev), depth);
+
+ return dev->queue_depth;
+}
+
+int esas2r_change_queue_type(struct scsi_device *dev, int type)
+{
+ esas2r_log(ESAS2R_LOG_INFO, "change_queue_type %p, %d", dev, type);
+
+ if (dev->tagged_supported) {
+ scsi_set_tag_type(dev, type);
+
+ if (type)
+ scsi_activate_tcq(dev, dev->queue_depth);
+ else
+ scsi_deactivate_tcq(dev, dev->queue_depth);
+ } else {
+ type = 0;
+ }
+
+ return type;
+}
+
+int esas2r_slave_alloc(struct scsi_device *dev)
+{
+ return 0;
+}
+
+int esas2r_slave_configure(struct scsi_device *dev)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+ "esas2r_slave_configure()");
+
+ if (dev->tagged_supported) {
+ scsi_set_tag_type(dev, MSG_SIMPLE_TAG);
+ scsi_activate_tcq(dev, cmd_per_lun);
+ } else {
+ scsi_set_tag_type(dev, 0);
+ scsi_deactivate_tcq(dev, cmd_per_lun);
+ }
+
+ return 0;
+}
+
+void esas2r_slave_destroy(struct scsi_device *dev)
+{
+ esas2r_log_dev(ESAS2R_LOG_INFO, &(dev->sdev_gendev),
+ "esas2r_slave_destroy()");
+}
+
+void esas2r_log_request_failure(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ u8 reqstatus = rq->req_stat;
+
+ if (reqstatus == RS_SUCCESS)
+ return;
+
+ if (rq->vrq->scsi.function == VDA_FUNC_SCSI) {
+ if (reqstatus == RS_SCSI_ERROR) {
+ if (rq->func_rsp.scsi_rsp.sense_len >= 13) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error %x ASC:%x ASCQ:%x CDB:%x",
+ rq->sense_buf[2], rq->sense_buf[12],
+ rq->sense_buf[13],
+ rq->vrq->scsi.cdb[0]);
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - SCSI error CDB:%x\n",
+ rq->vrq->scsi.cdb[0]);
+ }
+ } else if ((rq->vrq->scsi.cdb[0] != INQUIRY
+ && rq->vrq->scsi.cdb[0] != REPORT_LUNS)
+ || (reqstatus != RS_SEL
+ && reqstatus != RS_SEL2)) {
+ if ((reqstatus == RS_UNDERRUN) &&
+ (rq->vrq->scsi.cdb[0] == INQUIRY)) {
+ /* Don't log inquiry underruns */
+ } else {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "request failure - cdb:%x reqstatus:%d target:%d",
+ rq->vrq->scsi.cdb[0], reqstatus,
+ rq->target_id);
+ }
+ }
+ }
+}
+
+void esas2r_wait_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ u32 starttime;
+ u32 timeout;
+
+ starttime = jiffies_to_msecs(jiffies);
+ timeout = rq->timeout ? rq->timeout : 5000;
+
+ while (true) {
+ esas2r_polled_interrupt(a);
+
+ if (rq->req_stat != RS_STARTED)
+ break;
+
+ schedule_timeout_interruptible(msecs_to_jiffies(100));
+
+ if ((jiffies_to_msecs(jiffies) - starttime) > timeout) {
+ esas2r_hdebug("request TMO");
+ esas2r_bugon();
+
+ rq->req_stat = RS_TIMEOUT;
+
+ esas2r_local_reset_adapter(a);
+ return;
+ }
+ }
+}
+
+u32 esas2r_map_data_window(struct esas2r_adapter *a, u32 addr_lo)
+{
+ u32 offset = addr_lo & (MW_DATA_WINDOW_SIZE - 1);
+ u32 base = addr_lo & -(signed int)MW_DATA_WINDOW_SIZE;
+
+ if (a->window_base != base) {
+ esas2r_write_register_dword(a, MVR_PCI_WIN1_REMAP,
+ base | MVRPW1R_ENABLE);
+ esas2r_flush_register_dword(a, MVR_PCI_WIN1_REMAP);
+ a->window_base = base;
+ }
+
+ return offset;
+}
+
+/* Read a block of data from chip memory */
+bool esas2r_read_mem_block(struct esas2r_adapter *a,
+ void *to,
+ u32 from,
+ u32 size)
+{
+ u8 *end = (u8 *)to;
+
+ while (size) {
+ u32 len;
+ u32 offset;
+ u32 iatvr;
+
+ iatvr = (from & -(signed int)MW_DATA_WINDOW_SIZE);
+
+ esas2r_map_data_window(a, iatvr);
+
+ offset = from & (MW_DATA_WINDOW_SIZE - 1);
+ len = size;
+
+ if (len > MW_DATA_WINDOW_SIZE - offset)
+ len = MW_DATA_WINDOW_SIZE - offset;
+
+ from += len;
+ size -= len;
+
+ while (len--) {
+ *end++ = esas2r_read_data_byte(a, offset);
+ offset++;
+ }
+ }
+
+ return true;
+}
+
+void esas2r_nuxi_mgt_data(u8 function, void *data)
+{
+ struct atto_vda_grp_info *g;
+ struct atto_vda_devinfo *d;
+ struct atto_vdapart_info *p;
+ struct atto_vda_dh_info *h;
+ struct atto_vda_metrics_info *m;
+ struct atto_vda_schedule_info *s;
+ struct atto_vda_buzzer_info *b;
+ u8 i;
+
+ switch (function) {
+ case VDAMGT_BUZZER_INFO:
+ case VDAMGT_BUZZER_SET:
+
+ b = (struct atto_vda_buzzer_info *)data;
+
+ b->duration = le32_to_cpu(b->duration);
+ break;
+
+ case VDAMGT_SCHEDULE_INFO:
+ case VDAMGT_SCHEDULE_EVENT:
+
+ s = (struct atto_vda_schedule_info *)data;
+
+ s->id = le32_to_cpu(s->id);
+
+ break;
+
+ case VDAMGT_DEV_INFO:
+ case VDAMGT_DEV_CLEAN:
+ case VDAMGT_DEV_PT_INFO:
+ case VDAMGT_DEV_FEATURES:
+ case VDAMGT_DEV_PT_FEATURES:
+ case VDAMGT_DEV_OPERATION:
+
+ d = (struct atto_vda_devinfo *)data;
+
+ d->capacity = le64_to_cpu(d->capacity);
+ d->block_size = le32_to_cpu(d->block_size);
+ d->ses_dev_index = le16_to_cpu(d->ses_dev_index);
+ d->target_id = le16_to_cpu(d->target_id);
+ d->lun = le16_to_cpu(d->lun);
+ d->features = le16_to_cpu(d->features);
+ break;
+
+ case VDAMGT_GRP_INFO:
+ case VDAMGT_GRP_CREATE:
+ case VDAMGT_GRP_DELETE:
+ case VDAMGT_ADD_STORAGE:
+ case VDAMGT_MEMBER_ADD:
+ case VDAMGT_GRP_COMMIT:
+ case VDAMGT_GRP_REBUILD:
+ case VDAMGT_GRP_COMMIT_INIT:
+ case VDAMGT_QUICK_RAID:
+ case VDAMGT_GRP_FEATURES:
+ case VDAMGT_GRP_COMMIT_INIT_AUTOMAP:
+ case VDAMGT_QUICK_RAID_INIT_AUTOMAP:
+ case VDAMGT_SPARE_LIST:
+ case VDAMGT_SPARE_ADD:
+ case VDAMGT_SPARE_REMOVE:
+ case VDAMGT_LOCAL_SPARE_ADD:
+ case VDAMGT_GRP_OPERATION:
+
+ g = (struct atto_vda_grp_info *)data;
+
+ g->capacity = le64_to_cpu(g->capacity);
+ g->block_size = le32_to_cpu(g->block_size);
+ g->interleave = le32_to_cpu(g->interleave);
+ g->features = le16_to_cpu(g->features);
+
+ for (i = 0; i < 32; i++)
+ g->members[i] = le16_to_cpu(g->members[i]);
+
+ break;
+
+ case VDAMGT_PART_INFO:
+ case VDAMGT_PART_MAP:
+ case VDAMGT_PART_UNMAP:
+ case VDAMGT_PART_AUTOMAP:
+ case VDAMGT_PART_SPLIT:
+ case VDAMGT_PART_MERGE:
+
+ p = (struct atto_vdapart_info *)data;
+
+ p->part_size = le64_to_cpu(p->part_size);
+ p->start_lba = le32_to_cpu(p->start_lba);
+ p->block_size = le32_to_cpu(p->block_size);
+ p->target_id = le16_to_cpu(p->target_id);
+ break;
+
+ case VDAMGT_DEV_HEALTH_REQ:
+
+ h = (struct atto_vda_dh_info *)data;
+
+ h->med_defect_cnt = le32_to_cpu(h->med_defect_cnt);
+ h->info_exc_cnt = le32_to_cpu(h->info_exc_cnt);
+ break;
+
+ case VDAMGT_DEV_METRICS:
+
+ m = (struct atto_vda_metrics_info *)data;
+
+ for (i = 0; i < 32; i++)
+ m->dev_indexes[i] = le16_to_cpu(m->dev_indexes[i]);
+
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_cfg_data(u8 function, void *data)
+{
+ struct atto_vda_cfg_init *ci;
+
+ switch (function) {
+ case VDA_CFG_INIT:
+ case VDA_CFG_GET_INIT:
+ case VDA_CFG_GET_INIT2:
+
+ ci = (struct atto_vda_cfg_init *)data;
+
+ ci->date_time.year = le16_to_cpu(ci->date_time.year);
+ ci->sgl_page_size = le32_to_cpu(ci->sgl_page_size);
+ ci->vda_version = le32_to_cpu(ci->vda_version);
+ ci->epoch_time = le32_to_cpu(ci->epoch_time);
+ ci->ioctl_tunnel = le32_to_cpu(ci->ioctl_tunnel);
+ ci->num_targets_backend = le32_to_cpu(ci->num_targets_backend);
+ break;
+
+ default:
+ break;
+ }
+}
+
+void esas2r_nuxi_ae_data(union atto_vda_ae *ae)
+{
+ struct atto_vda_ae_raid *r = &ae->raid;
+ struct atto_vda_ae_lu *l = &ae->lu;
+
+ switch (ae->hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+
+ r->dwflags = le32_to_cpu(r->dwflags);
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+
+ l->dwevent = le32_to_cpu(l->dwevent);
+ l->wphys_target_id = le16_to_cpu(l->wphys_target_id);
+ l->id.tgtlun.wtarget_id = le16_to_cpu(l->id.tgtlun.wtarget_id);
+
+ if (l->hdr.bylength >= offsetof(struct atto_vda_ae_lu, id)
+ + sizeof(struct atto_vda_ae_lu_tgt_lun_raid)) {
+ l->id.tgtlun_raid.dwinterleave
+ = le32_to_cpu(l->id.tgtlun_raid.dwinterleave);
+ l->id.tgtlun_raid.dwblock_size
+ = le32_to_cpu(l->id.tgtlun_raid.dwblock_size);
+ }
+
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ default:
+ break;
+ }
+}
+
+void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ unsigned long flags;
+
+ esas2r_rq_destroy_request(rq, a);
+ spin_lock_irqsave(&a->request_lock, flags);
+ list_add(&rq->comp_list, &a->avail_request);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+}
+
+struct esas2r_request *esas2r_alloc_request(struct esas2r_adapter *a)
+{
+ struct esas2r_request *rq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->request_lock, flags);
+
+ if (unlikely(list_empty(&a->avail_request))) {
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ return NULL;
+ }
+
+ rq = list_first_entry(&a->avail_request, struct esas2r_request,
+ comp_list);
+ list_del(&rq->comp_list);
+ spin_unlock_irqrestore(&a->request_lock, flags);
+ esas2r_rq_init_request(rq, a);
+
+ return rq;
+
+}
+
+void esas2r_complete_request_cb(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ esas2r_debug("completing request %p\n", rq);
+
+ scsi_dma_unmap(rq->cmd);
+
+ if (unlikely(rq->req_stat != RS_SUCCESS)) {
+ esas2r_debug("[%x STATUS %x:%x (%x)]", rq->target_id,
+ rq->req_stat,
+ rq->func_rsp.scsi_rsp.scsi_stat,
+ rq->cmd);
+
+ rq->cmd->result =
+ ((esas2r_req_status_to_error(rq->req_stat) << 16)
+ | (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
+
+ if (rq->req_stat == RS_UNDERRUN)
+ scsi_set_resid(rq->cmd,
+ le32_to_cpu(rq->func_rsp.scsi_rsp.
+ residual_length));
+ else
+ scsi_set_resid(rq->cmd, 0);
+ }
+
+ rq->cmd->scsi_done(rq->cmd);
+
+ esas2r_free_request(a, rq);
+}
+
+/* Run tasklet to handle stuff outside of interrupt context. */
+void esas2r_adapter_tasklet(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ if (unlikely(a->flags2 & AF2_TIMER_TICK)) {
+ esas2r_lock_clear_flags(&a->flags2, AF2_TIMER_TICK);
+ esas2r_timer_tick(a);
+ }
+
+ if (likely(a->flags2 & AF2_INT_PENDING)) {
+ esas2r_lock_clear_flags(&a->flags2, AF2_INT_PENDING);
+ esas2r_adapter_interrupt(a);
+ }
+
+ if (esas2r_is_tasklet_pending(a))
+ esas2r_do_tasklet_tasks(a);
+
+ if (esas2r_is_tasklet_pending(a)
+ || (a->flags2 & AF2_INT_PENDING)
+ || (a->flags2 & AF2_TIMER_TICK)) {
+ esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ esas2r_schedule_tasklet(a);
+ } else {
+ esas2r_lock_clear_flags(&a->flags, AF_TASKLET_SCHEDULED);
+ }
+}
+
+static void esas2r_timer_callback(unsigned long context);
+
+void esas2r_kickoff_timer(struct esas2r_adapter *a)
+{
+ init_timer(&a->timer);
+
+ a->timer.function = esas2r_timer_callback;
+ a->timer.data = (unsigned long)a;
+ a->timer.expires = jiffies +
+ msecs_to_jiffies(100);
+
+ add_timer(&a->timer);
+}
+
+static void esas2r_timer_callback(unsigned long context)
+{
+ struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+
+ esas2r_lock_set_flags(&a->flags2, AF2_TIMER_TICK);
+
+ esas2r_schedule_tasklet(a);
+
+ esas2r_kickoff_timer(a);
+}
+
+/*
+ * Firmware events need to be handled outside of interrupt context
+ * so we schedule a delayed_work to handle them.
+ */
+
+static void
+esas2r_free_fw_event(struct esas2r_fw_event_work *fw_event)
+{
+ unsigned long flags;
+ struct esas2r_adapter *a = fw_event->a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_off(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 1;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void
+esas2r_fw_event_on(struct esas2r_adapter *a)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ a->fw_events_off = 0;
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+static void esas2r_add_device(struct esas2r_adapter *a, u16 target_id)
+{
+ int ret;
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi device already exists at id %d", target_id);
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device() called for 0:%d:0",
+ target_id);
+
+ ret = scsi_add_device(a->host, 0, target_id, 0);
+ if (ret) {
+ esas2r_log_dev(
+ ESAS2R_LOG_CRIT,
+ &(a->host->
+ shost_gendev),
+ "scsi_add_device failed with %d for id %d",
+ ret, target_id);
+ }
+ }
+}
+
+static void esas2r_remove_device(struct esas2r_adapter *a, u16 target_id)
+{
+ struct scsi_device *scsi_dev;
+
+ scsi_dev = scsi_device_lookup(a->host, 0, target_id, 0);
+
+ if (scsi_dev) {
+ scsi_device_set_state(scsi_dev, SDEV_OFFLINE);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_remove_device() called for 0:%d:0",
+ target_id);
+
+ scsi_remove_device(scsi_dev);
+
+ esas2r_log_dev(
+ ESAS2R_LOG_INFO,
+ &(scsi_dev->
+ sdev_gendev),
+ "scsi_device_put() called");
+
+ scsi_device_put(scsi_dev);
+ } else {
+ esas2r_log_dev(
+ ESAS2R_LOG_WARN,
+ &(a->host->shost_gendev),
+ "no target found at id %d",
+ target_id);
+ }
+}
+
+/*
+ * Sends a firmware asynchronous event to anyone who happens to be
+ * listening on the defined ATTO VDA event ports.
+ */
+static void esas2r_send_ae_event(struct esas2r_fw_event_work *fw_event)
+{
+ struct esas2r_vda_ae *ae = (struct esas2r_vda_ae *)fw_event->data;
+ char *type;
+
+ switch (ae->vda_ae.hdr.bytype) {
+ case VDAAE_HDR_TYPE_RAID:
+ type = "RAID group state change";
+ break;
+
+ case VDAAE_HDR_TYPE_LU:
+ type = "Mapped destination LU change";
+ break;
+
+ case VDAAE_HDR_TYPE_DISK:
+ type = "Physical disk inventory change";
+ break;
+
+ case VDAAE_HDR_TYPE_RESET:
+ type = "Firmware reset";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_INFO:
+ type = "Event Log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_WARN:
+ type = "Event Log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_CRIT:
+ type = "Event Log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_LOG_FAIL:
+ type = "Event Log message (FAIL level)";
+ break;
+
+ case VDAAE_HDR_TYPE_NVC:
+ type = "NVCache change";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_INFO:
+ type = "Time stamped log message (INFO level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_WARN:
+ type = "Time stamped log message (WARN level)";
+ break;
+
+ case VDAAE_HDR_TYPE_TLG_CRIT:
+ type = "Time stamped log message (CRIT level)";
+ break;
+
+ case VDAAE_HDR_TYPE_PWRMGT:
+ type = "Power management";
+ break;
+
+ case VDAAE_HDR_TYPE_MUTE:
+ type = "Mute button pressed";
+ break;
+
+ case VDAAE_HDR_TYPE_DEV:
+ type = "Device attribute change";
+ break;
+
+ default:
+ type = "Unknown";
+ break;
+ }
+
+ esas2r_log(ESAS2R_LOG_WARN,
+ "An async event of type \"%s\" was received from the firmware. The event contents are:",
+ type);
+ esas2r_log_hexdump(ESAS2R_LOG_WARN, &ae->vda_ae,
+ ae->vda_ae.hdr.bylength);
+
+}
+
+static void
+esas2r_firmware_event_work(struct work_struct *work)
+{
+ struct esas2r_fw_event_work *fw_event =
+ container_of(work, struct esas2r_fw_event_work, work.work);
+
+ struct esas2r_adapter *a = fw_event->a;
+
+ u16 target_id = *(u16 *)&fw_event->data[0];
+
+ if (a->fw_events_off)
+ goto done;
+
+ switch (fw_event->type) {
+ case fw_event_null:
+ break; /* do nothing */
+
+ case fw_event_lun_change:
+ esas2r_remove_device(a, target_id);
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_present:
+ esas2r_add_device(a, target_id);
+ break;
+
+ case fw_event_not_present:
+ esas2r_remove_device(a, target_id);
+ break;
+
+ case fw_event_vda_ae:
+ esas2r_send_ae_event(fw_event);
+ break;
+ }
+
+done:
+ esas2r_free_fw_event(fw_event);
+}
+
+void esas2r_queue_fw_event(struct esas2r_adapter *a,
+ enum fw_event_type type,
+ void *data,
+ int data_sz)
+{
+ struct esas2r_fw_event_work *fw_event;
+ unsigned long flags;
+
+ fw_event = kzalloc(sizeof(struct esas2r_fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ esas2r_log(ESAS2R_LOG_WARN,
+ "esas2r_queue_fw_event failed to alloc");
+ return;
+ }
+
+ if (type == fw_event_vda_ae) {
+ struct esas2r_vda_ae *ae =
+ (struct esas2r_vda_ae *)fw_event->data;
+
+ ae->signature = ESAS2R_VDA_EVENT_SIG;
+ ae->bus_number = a->pcid->bus->number;
+ ae->devfn = a->pcid->devfn;
+ memcpy(&ae->vda_ae, data, sizeof(ae->vda_ae));
+ } else {
+ memcpy(fw_event->data, data, data_sz);
+ }
+
+ fw_event->type = type;
+ fw_event->a = a;
+
+ spin_lock_irqsave(&a->fw_event_lock, flags);
+ list_add_tail(&fw_event->list, &a->fw_event_list);
+ INIT_DELAYED_WORK(&fw_event->work, esas2r_firmware_event_work);
+ queue_delayed_work_on(
+ smp_processor_id(), a->fw_event_q, &fw_event->work,
+ msecs_to_jiffies(1));
+ spin_unlock_irqrestore(&a->fw_event_lock, flags);
+}
+
+void esas2r_target_state_changed(struct esas2r_adapter *a, u16 targ_id,
+ u8 state)
+{
+ if (state == TS_LUN_CHANGE)
+ esas2r_queue_fw_event(a, fw_event_lun_change, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_present, &targ_id,
+ sizeof(targ_id));
+ else if (state == TS_NOT_PRESENT)
+ esas2r_queue_fw_event(a, fw_event_not_present, &targ_id,
+ sizeof(targ_id));
+}
+
+/* Translate status to a Linux SCSI mid-layer error code */
+int esas2r_req_status_to_error(u8 req_stat)
+{
+ switch (req_stat) {
+ case RS_OVERRUN:
+ case RS_UNDERRUN:
+ case RS_SUCCESS:
+ /*
+ * NOTE: SCSI mid-layer wants a good status for a SCSI error, because
+ * it will check the scsi_stat value in the completion anyway.
+ */
+ case RS_SCSI_ERROR:
+ return DID_OK;
+
+ case RS_SEL:
+ case RS_SEL2:
+ return DID_NO_CONNECT;
+
+ case RS_RESET:
+ return DID_RESET;
+
+ case RS_ABORTED:
+ return DID_ABORT;
+
+ case RS_BUSY:
+ return DID_BUS_BUSY;
+ }
+
+ /* everything else is just an error. */
+
+ return DID_ERROR;
+}
+
+module_init(esas2r_init);
+module_exit(esas2r_exit);
diff --git a/drivers/scsi/esas2r/esas2r_targdb.c b/drivers/scsi/esas2r/esas2r_targdb.c
new file mode 100644
index 00000000000..e540a2fa3d1
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_targdb.c
@@ -0,0 +1,306 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_targdb.c
+ * For use with ATTO ExpressSAS R6xx SAS/SATA RAID controllers
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include "esas2r.h"
+
+void esas2r_targ_db_initialize(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ memset(t, 0, sizeof(struct esas2r_target));
+
+ t->target_state = TS_NOT_PRESENT;
+ t->buffered_target_state = TS_NOT_PRESENT;
+ t->new_target_state = TS_INVALID;
+ }
+}
+
+void esas2r_targ_db_remove_all(struct esas2r_adapter *a, bool notify)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ esas2r_targ_db_remove(a, t);
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ if (notify) {
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t,
+ a));
+ esas2r_target_state_changed(a, esas2r_targ_get_id(t,
+ a),
+ TS_NOT_PRESENT);
+ }
+ }
+}
+
+void esas2r_targ_db_report_changes(struct esas2r_adapter *a)
+{
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ esas2r_trace_enter();
+
+ if (a->flags & AF_DISC_PENDING) {
+ esas2r_trace_exit();
+ return;
+ }
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ u8 state = TS_INVALID;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ if (t->buffered_target_state != t->target_state)
+ state = t->buffered_target_state = t->target_state;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+ if (state != TS_INVALID) {
+ esas2r_trace("targ_db_report_changes:%d",
+ esas2r_targ_get_id(
+ t,
+ a));
+ esas2r_trace("state:%d", state);
+
+ esas2r_target_state_changed(a,
+ esas2r_targ_get_id(t,
+ a),
+ state);
+ }
+ }
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_add_raid(struct esas2r_adapter *a,
+ struct esas2r_disc_context *
+ dc)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ esas2r_hdebug("add RAID %s, T:%d", dc->raid_grp_name,
+ esas2r_targ_get_id(
+ t,
+ a));
+
+ if (dc->interleave == 0
+ || dc->block_size == 0) {
+ /* these are invalid values, don't create the target entry. */
+
+ esas2r_hdebug("invalid RAID group dimensions");
+
+ esas2r_trace_exit();
+
+ return NULL;
+ }
+
+ t->block_size = dc->block_size;
+ t->inter_byte = dc->interleave;
+ t->inter_block = dc->interleave / dc->block_size;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = ESAS2R_TARG_ID_INV;
+
+ t->flags &= ~TF_PASS_THRU;
+ t->flags |= TF_USED;
+
+ t->identifier_len = 0;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+struct esas2r_target *esas2r_targ_db_add_pthru(struct esas2r_adapter *a,
+ struct esas2r_disc_context *dc,
+ u8 *ident,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ esas2r_trace_enter();
+
+ if (dc->curr_virt_id >= ESAS2R_MAX_TARGETS) {
+ esas2r_bugon();
+ esas2r_trace_exit();
+ return NULL;
+ }
+
+ /* see if we found this device before. */
+
+ t = esas2r_targ_db_find_by_ident(a, ident, ident_len);
+
+ if (t == NULL) {
+ t = a->targetdb + dc->curr_virt_id;
+
+ if (ident_len > sizeof(t->identifier)
+ || t->target_state == TS_PRESENT) {
+ esas2r_trace_exit();
+ return NULL;
+ }
+ }
+
+ esas2r_hdebug("add PT; T:%d, V:%d, P:%d", esas2r_targ_get_id(t, a),
+ dc->curr_virt_id,
+ dc->curr_phys_id);
+
+ t->block_size = 0;
+ t->inter_byte = 0;
+ t->inter_block = 0;
+ t->virt_targ_id = dc->curr_virt_id;
+ t->phys_targ_id = dc->curr_phys_id;
+ t->identifier_len = ident_len;
+
+ memcpy(t->identifier, ident, ident_len);
+
+ t->flags |= TF_PASS_THRU | TF_USED;
+
+ t->target_state = TS_PRESENT;
+
+ return t;
+}
+
+void esas2r_targ_db_remove(struct esas2r_adapter *a, struct esas2r_target *t)
+{
+ esas2r_trace_enter();
+
+ t->target_state = TS_NOT_PRESENT;
+
+ esas2r_trace("remove id:%d", esas2r_targ_get_id(t, a));
+
+ esas2r_trace_exit();
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_sas_addr(struct esas2r_adapter *a,
+ u64 *sas_addr)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->sas_addr == *sas_addr)
+ return t;
+
+ return NULL;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_ident(struct esas2r_adapter *a,
+ void *identifier,
+ u8 ident_len)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (ident_len == t->identifier_len
+ && memcmp(&t->identifier[0], identifier,
+ ident_len) == 0)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_find_next_present(struct esas2r_adapter *a, u16 target_id)
+{
+ u16 id = target_id + 1;
+
+ while (id < ESAS2R_MAX_TARGETS) {
+ struct esas2r_target *t = a->targetdb + id;
+
+ if (t->target_state == TS_PRESENT)
+ break;
+
+ id++;
+ }
+
+ return id;
+}
+
+struct esas2r_target *esas2r_targ_db_find_by_virt_id(struct esas2r_adapter *a,
+ u16 virt_id)
+{
+ struct esas2r_target *t;
+
+ for (t = a->targetdb; t < a->targetdb_end; t++) {
+ if (t->target_state != TS_PRESENT)
+ continue;
+
+ if (t->virt_targ_id == virt_id)
+ return t;
+ }
+
+ return NULL;
+}
+
+u16 esas2r_targ_db_get_tgt_cnt(struct esas2r_adapter *a)
+{
+ u16 devcnt = 0;
+ struct esas2r_target *t;
+ unsigned long flags;
+
+ spin_lock_irqsave(&a->mem_lock, flags);
+ for (t = a->targetdb; t < a->targetdb_end; t++)
+ if (t->target_state == TS_PRESENT)
+ devcnt++;
+
+ spin_unlock_irqrestore(&a->mem_lock, flags);
+
+ return devcnt;
+}
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c
new file mode 100644
index 00000000000..f8ec6d63684
--- /dev/null
+++ b/drivers/scsi/esas2r/esas2r_vda.c
@@ -0,0 +1,521 @@
+/*
+ * linux/drivers/scsi/esas2r/esas2r_vda.c
+ * esas2r driver VDA firmware interface functions
+ *
+ * Copyright (c) 2001-2013 ATTO Technology, Inc.
+ * (mailto:linuxdrivers@attotech.com)
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+ *
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "esas2r.h"
+
+static u8 esas2r_vdaioctl_versions[] = {
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_FLASH_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CLI_VER,
+ ATTO_VDA_VER_UNSUPPORTED,
+ ATTO_VDA_CFG_VER,
+ ATTO_VDA_MGT_VER,
+ ATTO_VDA_GSV_VER
+};
+
+static void clear_vda_request(struct esas2r_request *rq);
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq);
+
+/* Prepare a VDA IOCTL request to be sent to the firmware. */
+bool esas2r_process_vda_ioctl(struct esas2r_adapter *a,
+ struct atto_ioctl_vda *vi,
+ struct esas2r_request *rq,
+ struct esas2r_sg_context *sgc)
+{
+ u32 datalen = 0;
+ struct atto_vda_sge *firstsg = NULL;
+ u8 vercnt = (u8)ARRAY_SIZE(esas2r_vdaioctl_versions);
+
+ vi->status = ATTO_STS_SUCCESS;
+ vi->vda_status = RS_PENDING;
+
+ if (vi->function >= vercnt) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->version > esas2r_vdaioctl_versions[vi->function]) {
+ vi->status = ATTO_STS_INV_VERSION;
+ return false;
+ }
+
+ if (a->flags & AF_DEGRADED_MODE) {
+ vi->status = ATTO_STS_DEGRADED;
+ return false;
+ }
+
+ if (vi->function != VDA_FUNC_SCSI)
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = vi->function;
+ rq->interrupt_cb = esas2r_complete_vda_ioctl;
+ rq->interrupt_cx = vi;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FREAD
+ && vi->cmd.flash.sub_func != VDA_FLASH_FWRITE
+ && vi->cmd.flash.sub_func != VDA_FLASH_FINFO) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (vi->cmd.flash.sub_func != VDA_FLASH_FINFO)
+ datalen = vi->data_length;
+
+ rq->vrq->flash.length = cpu_to_le32(datalen);
+ rq->vrq->flash.sub_func = vi->cmd.flash.sub_func;
+
+ memcpy(rq->vrq->flash.data.file.file_name,
+ vi->cmd.flash.data.file.file_name,
+ sizeof(vi->cmd.flash.data.file.file_name));
+
+ firstsg = rq->vrq->flash.data.file.sge;
+ break;
+
+ case VDA_FUNC_CLI:
+
+ datalen = vi->data_length;
+
+ rq->vrq->cli.cmd_rsp_len =
+ cpu_to_le32(vi->cmd.cli.cmd_rsp_len);
+ rq->vrq->cli.length = cpu_to_le32(datalen);
+
+ firstsg = rq->vrq->cli.sge;
+ break;
+
+ case VDA_FUNC_MGT:
+ {
+ u8 *cmdcurr_offset = sgc->cur_offset
+ - offsetof(struct atto_ioctl_vda, data)
+ + offsetof(struct atto_ioctl_vda, cmd)
+ + offsetof(struct atto_ioctl_vda_mgt_cmd,
+ data);
+ /*
+ * build the data payload SGL here first since
+ * esas2r_sgc_init() will modify the S/G list offset for the
+ * management SGL (which is built below where the data SGL is
+ * usually built).
+ */
+
+ if (vi->data_length) {
+ u32 payldlen = 0;
+
+ if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_HEALTH_REQ
+ || vi->cmd.mgt.mgt_func == VDAMGT_DEV_METRICS) {
+ rq->vrq->mgt.payld_sglst_offset =
+ (u8)offsetof(struct atto_vda_mgmt_req,
+ payld_sge);
+
+ payldlen = vi->data_length;
+ datalen = vi->cmd.mgt.data_length;
+ } else if (vi->cmd.mgt.mgt_func == VDAMGT_DEV_INFO2
+ || vi->cmd.mgt.mgt_func ==
+ VDAMGT_DEV_INFO2_BYADDR) {
+ datalen = vi->data_length;
+ cmdcurr_offset = sgc->cur_offset;
+ } else {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ /* Setup the length so building the payload SGL works */
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+
+ if (payldlen) {
+ rq->vrq->mgt.payld_length =
+ cpu_to_le32(payldlen);
+
+ esas2r_sgc_init(sgc, a, rq,
+ rq->vrq->mgt.payld_sge);
+ sgc->length = payldlen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+ } else {
+ datalen = vi->cmd.mgt.data_length;
+
+ rq->vrq->mgt.length = cpu_to_le32(datalen);
+ }
+
+ /*
+ * Now that the payload SGL is built, if any, setup to build
+ * the management SGL.
+ */
+ firstsg = rq->vrq->mgt.sge;
+ sgc->cur_offset = cmdcurr_offset;
+
+ /* Finish initializing the management request. */
+ rq->vrq->mgt.mgt_func = vi->cmd.mgt.mgt_func;
+ rq->vrq->mgt.scan_generation = vi->cmd.mgt.scan_generation;
+ rq->vrq->mgt.dev_index =
+ cpu_to_le32(vi->cmd.mgt.dev_index);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+ }
+
+ case VDA_FUNC_CFG:
+
+ if (vi->data_length
+ || vi->cmd.cfg.data_length == 0) {
+ vi->status = ATTO_STS_INV_PARAM;
+ return false;
+ }
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_INIT) {
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ rq->vrq->cfg.sub_func = vi->cmd.cfg.cfg_func;
+ rq->vrq->cfg.length = cpu_to_le32(vi->cmd.cfg.data_length);
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ memcpy(&rq->vrq->cfg.data,
+ &vi->cmd.cfg.data,
+ vi->cmd.cfg.data_length);
+
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &rq->vrq->cfg.data);
+ } else {
+ vi->status = ATTO_STS_INV_FUNC;
+
+ return false;
+ }
+
+ break;
+
+ case VDA_FUNC_GSV:
+
+ vi->cmd.gsv.rsp_len = vercnt;
+
+ memcpy(vi->cmd.gsv.version_info, esas2r_vdaioctl_versions,
+ vercnt);
+
+ vi->vda_status = RS_SUCCESS;
+ break;
+
+ default:
+
+ vi->status = ATTO_STS_INV_FUNC;
+ return false;
+ }
+
+ if (datalen) {
+ esas2r_sgc_init(sgc, a, rq, firstsg);
+ sgc->length = datalen;
+
+ if (!esas2r_build_sg_list(a, rq, sgc)) {
+ vi->status = ATTO_STS_OUT_OF_RSRC;
+ return false;
+ }
+ }
+
+ esas2r_start_request(a, rq);
+
+ return true;
+}
+
+static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a,
+ struct esas2r_request *rq)
+{
+ struct atto_ioctl_vda *vi = (struct atto_ioctl_vda *)rq->interrupt_cx;
+
+ vi->vda_status = rq->req_stat;
+
+ switch (vi->function) {
+ case VDA_FUNC_FLASH:
+
+ if (vi->cmd.flash.sub_func == VDA_FLASH_FINFO
+ || vi->cmd.flash.sub_func == VDA_FLASH_FREAD)
+ vi->cmd.flash.data.file.file_size =
+ le32_to_cpu(rq->func_rsp.flash_rsp.file_size);
+
+ break;
+
+ case VDA_FUNC_MGT:
+
+ vi->cmd.mgt.scan_generation =
+ rq->func_rsp.mgt_rsp.scan_generation;
+ vi->cmd.mgt.dev_index = le16_to_cpu(
+ rq->func_rsp.mgt_rsp.dev_index);
+
+ if (vi->data_length == 0)
+ vi->cmd.mgt.data_length =
+ le32_to_cpu(rq->func_rsp.mgt_rsp.length);
+
+ esas2r_nuxi_mgt_data(rq->vrq->mgt.mgt_func, &vi->cmd.mgt.data);
+ break;
+
+ case VDA_FUNC_CFG:
+
+ if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) {
+ struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg;
+ struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp;
+
+ cfg->data_length =
+ cpu_to_le32(sizeof(struct atto_vda_cfg_init));
+ cfg->data.init.vda_version =
+ le32_to_cpu(rsp->vda_version);
+ cfg->data.init.fw_build = rsp->fw_build;
+
+ sprintf((char *)&cfg->data.init.fw_release,
+ "%1d.%02d",
+ (int)LOBYTE(le16_to_cpu(rsp->fw_release)),
+ (int)HIBYTE(le16_to_cpu(rsp->fw_release)));
+
+ if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A')
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_build;
+ else
+ cfg->data.init.fw_version =
+ cfg->data.init.fw_release;
+ } else {
+ esas2r_nuxi_cfg_data(rq->vrq->cfg.sub_func,
+ &vi->cmd.cfg.data);
+ }
+
+ break;
+
+ case VDA_FUNC_CLI:
+
+ vi->cmd.cli.cmd_rsp_len =
+ le32_to_cpu(rq->func_rsp.cli_rsp.cmd_rsp_len);
+ break;
+
+ default:
+
+ break;
+ }
+}
+
+/* Build a flash VDA request. */
+void esas2r_build_flash_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 cksum,
+ u32 addr,
+ u32 length)
+{
+ struct atto_vda_flash_req *vrq = &rq->vrq->flash;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_FLASH;
+
+ if (sub_func == VDA_FLASH_BEGINW
+ || sub_func == VDA_FLASH_WRITE
+ || sub_func == VDA_FLASH_READ)
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_flash_req,
+ data.sge);
+
+ vrq->length = cpu_to_le32(length);
+ vrq->flash_addr = cpu_to_le32(addr);
+ vrq->checksum = cksum;
+ vrq->sub_func = sub_func;
+}
+
+/* Build a VDA management request. */
+void esas2r_build_mgt_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u8 scan_gen,
+ u16 dev_index,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_mgmt_req *vrq = &rq->vrq->mgt;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_MGT;
+
+ vrq->mgt_func = sub_func;
+ vrq->scan_generation = scan_gen;
+ vrq->dev_index = cpu_to_le16(dev_index);
+ vrq->length = cpu_to_le32(length);
+
+ if (vrq->length) {
+ if (a->flags & AF_LEGACY_SGE_MODE) {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, sge);
+
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(
+ struct atto_vda_mgmt_req, prde);
+
+ vrq->prde[0].ctl_len = cpu_to_le32(length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+ }
+
+ if (data) {
+ esas2r_nuxi_mgt_data(sub_func, data);
+
+ memcpy(&rq->vda_rsp_data->mgt_data.data.bytes[0], data,
+ length);
+ }
+}
+
+/* Build a VDA asyncronous event (AE) request. */
+void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq)
+{
+ struct atto_vda_ae_req *vrq = &rq->vrq->ae;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_AE;
+
+ vrq->length = cpu_to_le32(sizeof(struct atto_vda_ae_data));
+
+ if (a->flags & AF_LEGACY_SGE_MODE) {
+ vrq->sg_list_offset =
+ (u8)offsetof(struct atto_vda_ae_req, sge);
+ vrq->sge[0].length = cpu_to_le32(SGE_LAST | vrq->length);
+ vrq->sge[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ } else {
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ae_req,
+ prde);
+ vrq->prde[0].ctl_len = cpu_to_le32(vrq->length);
+ vrq->prde[0].address = cpu_to_le64(
+ rq->vrq_md->phys_addr +
+ sizeof(union atto_vda_req));
+ }
+}
+
+/* Build a VDA CLI request. */
+void esas2r_build_cli_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u32 cmd_rsp_len)
+{
+ struct atto_vda_cli_req *vrq = &rq->vrq->cli;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CLI;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->cmd_rsp_len = cpu_to_le32(cmd_rsp_len);
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_cli_req, sge);
+}
+
+/* Build a VDA IOCTL request. */
+void esas2r_build_ioctl_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u32 length,
+ u8 sub_func)
+{
+ struct atto_vda_ioctl_req *vrq = &rq->vrq->ioctl;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_IOCTL;
+
+ vrq->length = cpu_to_le32(length);
+ vrq->sub_func = sub_func;
+ vrq->sg_list_offset = (u8)offsetof(struct atto_vda_ioctl_req, sge);
+}
+
+/* Build a VDA configuration request. */
+void esas2r_build_cfg_req(struct esas2r_adapter *a,
+ struct esas2r_request *rq,
+ u8 sub_func,
+ u32 length,
+ void *data)
+{
+ struct atto_vda_cfg_req *vrq = &rq->vrq->cfg;
+
+ clear_vda_request(rq);
+
+ rq->vrq->scsi.function = VDA_FUNC_CFG;
+
+ vrq->sub_func = sub_func;
+ vrq->length = cpu_to_le32(length);
+
+ if (data) {
+ esas2r_nuxi_cfg_data(sub_func, data);
+
+ memcpy(&vrq->data, data, length);
+ }
+}
+
+static void clear_vda_request(struct esas2r_request *rq)
+{
+ u32 handle = rq->vrq->scsi.handle;
+
+ memset(rq->vrq, 0, sizeof(*rq->vrq));
+
+ rq->vrq->scsi.handle = handle;
+
+ rq->req_stat = RS_PENDING;
+
+ /* since the data buffer is separate clear that too */
+
+ memset(rq->data_buf, 0, ESAS2R_DATA_BUF_LEN);
+
+ /*
+ * Setup next and prev pointer in case the request is not going through
+ * esas2r_start_request().
+ */
+
+ INIT_LIST_HEAD(&rq->req_list);
+}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 34552bf1c02..55548dc5cec 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp)
static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
- if (!ent->tag[0]) {
+ if (!ent->orig_tag[0]) {
/* Non-tagged, slot already taken? */
if (lp->non_tagged_cmd)
return -EBUSY;
@@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
return -EBUSY;
}
- BUG_ON(lp->tagged_cmds[ent->tag[1]]);
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
- lp->tagged_cmds[ent->tag[1]] = ent;
+ lp->tagged_cmds[ent->orig_tag[1]] = ent;
lp->num_tagged++;
return 0;
@@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
static void esp_free_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
- if (ent->tag[0]) {
- BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
- lp->tagged_cmds[ent->tag[1]] = NULL;
+ if (ent->orig_tag[0]) {
+ BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
+ lp->tagged_cmds[ent->orig_tag[1]] = NULL;
lp->num_tagged--;
} else {
BUG_ON(lp->non_tagged_cmd != ent);
@@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
ent->tag[0] = 0;
ent->tag[1] = 0;
}
+ ent->orig_tag[0] = ent->tag[0];
+ ent->orig_tag[1] = ent->tag[1];
if (esp_alloc_lun_tag(ent, lp) < 0)
continue;
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h
index 28e22acf87e..cd68805e8d7 100644
--- a/drivers/scsi/esp_scsi.h
+++ b/drivers/scsi/esp_scsi.h
@@ -271,6 +271,7 @@ struct esp_cmd_entry {
#define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */
u8 tag[2];
+ u8 orig_tag[2];
u8 status;
u8 message;
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index b6d1f92ed33..c18c68150e9 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -38,7 +38,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.5.0.22"
+#define DRV_VERSION "1.5.0.23"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 5f09d1814d2..42e15ee6e1b 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
skb_queue_head_init(&fnic->fip_frame_queue);
- spin_lock_irqsave(&fnic_list_lock, flags);
- if (!fnic_fip_queue) {
- fnic_fip_queue =
- create_singlethread_workqueue("fnic_fip_q");
- if (!fnic_fip_queue) {
- spin_unlock_irqrestore(&fnic_list_lock, flags);
- printk(KERN_ERR PFX "fnic FIP work queue "
- "create failed\n");
- err = -ENOMEM;
- goto err_out_free_max_pool;
- }
- }
- spin_unlock_irqrestore(&fnic_list_lock, flags);
INIT_LIST_HEAD(&fnic->evlist);
INIT_LIST_HEAD(&fnic->vlans);
} else {
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void)
spin_lock_init(&fnic_list_lock);
INIT_LIST_HEAD(&fnic_list);
+ fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q");
+ if (!fnic_fip_queue) {
+ printk(KERN_ERR PFX "fnic FIP work queue create failed\n");
+ err = -ENOMEM;
+ goto err_create_fip_workq;
+ }
+
fnic_fc_transport = fc_attach_transport(&fnic_fc_functions);
if (!fnic_fc_transport) {
printk(KERN_ERR PFX "fc_attach_transport error\n");
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void)
err_pci_register:
fc_release_transport(fnic_fc_transport);
err_fc_transport:
+ destroy_workqueue(fnic_fip_queue);
+err_create_fip_workq:
destroy_workqueue(fnic_event_queue);
err_create_fnic_workq:
kmem_cache_destroy(fnic_io_req_cache);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4cfa3af95b5..fac8cf5832d 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -583,7 +583,7 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
if (likely(h->msix_vector))
c->Header.ReplyQueue =
- smp_processor_id() % h->nreply_queues;
+ raw_smp_processor_id() % h->nreply_queues;
}
}
@@ -1205,8 +1205,8 @@ static void complete_scsi_command(struct CommandList *cp)
scsi_set_resid(cmd, ei->ResidualCnt);
if (ei->CommandStatus == 0) {
- cmd->scsi_done(cmd);
cmd_free(h, cp);
+ cmd->scsi_done(cmd);
return;
}
@@ -1379,8 +1379,8 @@ static void complete_scsi_command(struct CommandList *cp)
dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
cp, ei->CommandStatus);
}
- cmd->scsi_done(cmd);
cmd_free(h, cp);
+ cmd->scsi_done(cmd);
}
static void hpsa_pci_unmap(struct pci_dev *pdev,
@@ -2721,7 +2721,6 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
} while (test_and_set_bit
(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
- h->nr_allocs++;
spin_unlock_irqrestore(&h->lock, flags);
c = h->cmd_pool + i;
@@ -2793,7 +2792,6 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c)
spin_lock_irqsave(&h->lock, flags);
clear_bit(i & (BITS_PER_LONG - 1),
h->cmd_pool_bits + (i / BITS_PER_LONG));
- h->nr_frees++;
spin_unlock_irqrestore(&h->lock, flags);
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 981647989bf..bc85e7244f4 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -98,8 +98,6 @@ struct ctlr_info {
struct ErrorInfo *errinfo_pool;
dma_addr_t errinfo_pool_dhandle;
unsigned long *cmd_pool_bits;
- int nr_allocs;
- int nr_frees;
int scan_finished;
spinlock_t scan_lock;
wait_queue_head_t scan_wait_queue;
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6601e03520c..36ac1c34ce9 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9990,6 +9990,20 @@ static struct pci_device_id ipr_pci_table[] = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 07a85ce4178..cad1483f05d 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -100,6 +100,13 @@
#define IPR_SUBS_DEV_ID_57D6 0x03FC
#define IPR_SUBS_DEV_ID_57D7 0x03FF
#define IPR_SUBS_DEV_ID_57D8 0x03FE
+#define IPR_SUBS_DEV_ID_57D9 0x046D
+#define IPR_SUBS_DEV_ID_57EB 0x0474
+#define IPR_SUBS_DEV_ID_57EC 0x0475
+#define IPR_SUBS_DEV_ID_57ED 0x0499
+#define IPR_SUBS_DEV_ID_57EE 0x049A
+#define IPR_SUBS_DEV_ID_57EF 0x049B
+#define IPR_SUBS_DEV_ID_57F0 0x049C
#define IPR_NAME "ipr"
/*
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index cd962da4a57..85c77f6b802 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -311,9 +311,9 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost,
&ihost->phys[phy_index]);
assigned_phy_mask |= (1 << phy_index);
+ phy_index++;
}
- phy_index++;
}
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 7b082157eb7..99d2930b18c 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -185,7 +185,7 @@ static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq)
cmd_iu->_r_c = 0;
sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cmd->cmnd,
- task->ssp_task.cmd->cmd_len / sizeof(u32));
+ (task->ssp_task.cmd->cmd_len+3) / sizeof(u32));
}
static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq)
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 9bb020ac089..0d30ca849e8 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -491,6 +491,7 @@ int isci_task_abort_task(struct sas_task *task)
struct isci_tmf tmf;
int ret = TMF_RESP_FUNC_FAILED;
unsigned long flags;
+ int target_done_already = 0;
/* Get the isci_request reference from the task. Note that
* this check does not depend on the pending request list
@@ -505,9 +506,11 @@ int isci_task_abort_task(struct sas_task *task)
/* If task is already done, the request isn't valid */
if (!(task->task_state_flags & SAS_TASK_STATE_DONE) &&
(task->task_state_flags & SAS_TASK_AT_INITIATOR) &&
- old_request)
+ old_request) {
idev = isci_get_device(task->dev->lldd_dev);
-
+ target_done_already = test_bit(IREQ_COMPLETE_IN_TARGET,
+ &old_request->flags);
+ }
spin_unlock(&task->task_state_lock);
spin_unlock_irqrestore(&ihost->scic_lock, flags);
@@ -561,7 +564,7 @@ int isci_task_abort_task(struct sas_task *task)
if (task->task_proto == SAS_PROTOCOL_SMP ||
sas_protocol_ata(task->task_proto) ||
- test_bit(IREQ_COMPLETE_IN_TARGET, &old_request->flags) ||
+ target_done_already ||
test_bit(IDEV_GONE, &idev->flags)) {
spin_unlock_irqrestore(&ihost->scic_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index ae69dfcc783..e3995612ea7 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2812,6 +2812,8 @@ void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
kfree(session->boot_nic);
kfree(session->boot_target);
kfree(session->ifacename);
+ kfree(session->portal_type);
+ kfree(session->discovery_parent_type);
iscsi_destroy_session(cls_session);
iscsi_host_dec_session_cnt(shost);
@@ -3168,6 +3170,7 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
{
struct iscsi_conn *conn = cls_conn->dd_data;
struct iscsi_session *session = conn->session;
+ int val;
switch(param) {
case ISCSI_PARAM_FAST_ABORT:
@@ -3257,6 +3260,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
return iscsi_switch_str_param(&session->boot_nic, buf);
case ISCSI_PARAM_BOOT_TARGET:
return iscsi_switch_str_param(&session->boot_target, buf);
+ case ISCSI_PARAM_PORTAL_TYPE:
+ return iscsi_switch_str_param(&session->portal_type, buf);
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ return iscsi_switch_str_param(&session->discovery_parent_type,
+ buf);
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ sscanf(buf, "%d", &val);
+ session->discovery_sess = !!val;
+ break;
default:
return -ENOSYS;
}
@@ -3305,6 +3317,9 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_DATASEQ_INORDER_EN:
len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
break;
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ len = sprintf(buf, "%d\n", session->def_taskmgmt_tmo);
+ break;
case ISCSI_PARAM_ERL:
len = sprintf(buf, "%d\n", session->erl);
break;
@@ -3344,6 +3359,52 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
case ISCSI_PARAM_BOOT_TARGET:
len = sprintf(buf, "%s\n", session->boot_target);
break;
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ len = sprintf(buf, "%u\n", session->auto_snd_tgt_disable);
+ break;
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ len = sprintf(buf, "%u\n", session->discovery_sess);
+ break;
+ case ISCSI_PARAM_PORTAL_TYPE:
+ len = sprintf(buf, "%s\n", session->portal_type);
+ break;
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ len = sprintf(buf, "%u\n", session->chap_auth_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ len = sprintf(buf, "%u\n", session->discovery_logout_en);
+ break;
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ len = sprintf(buf, "%u\n", session->bidi_chap_en);
+ break;
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ len = sprintf(buf, "%u\n", session->discovery_auth_optional);
+ break;
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ len = sprintf(buf, "%d\n", session->time2wait);
+ break;
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ len = sprintf(buf, "%d\n", session->time2retain);
+ break;
+ case ISCSI_PARAM_TSID:
+ len = sprintf(buf, "%u\n", session->tsid);
+ break;
+ case ISCSI_PARAM_ISID:
+ len = sprintf(buf, "%02x%02x%02x%02x%02x%02x\n",
+ session->isid[0], session->isid[1],
+ session->isid[2], session->isid[3],
+ session->isid[4], session->isid[5]);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ len = sprintf(buf, "%u\n", session->discovery_parent_idx);
+ break;
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
+ if (session->discovery_parent_type)
+ len = sprintf(buf, "%s\n",
+ session->discovery_parent_type);
+ else
+ len = sprintf(buf, "\n");
+ break;
default:
return -ENOSYS;
}
@@ -3433,6 +3494,54 @@ int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
case ISCSI_PARAM_PERSISTENT_ADDRESS:
len = sprintf(buf, "%s\n", conn->persistent_address);
break;
+ case ISCSI_PARAM_STATSN:
+ len = sprintf(buf, "%u\n", conn->statsn);
+ break;
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ len = sprintf(buf, "%u\n", conn->max_segment_size);
+ break;
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ len = sprintf(buf, "%u\n", conn->keepalive_tmo);
+ break;
+ case ISCSI_PARAM_LOCAL_PORT:
+ len = sprintf(buf, "%u\n", conn->local_port);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_stat);
+ break;
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_nagle_disable);
+ break;
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ len = sprintf(buf, "%u\n", conn->tcp_wsf_disable);
+ break;
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ len = sprintf(buf, "%u\n", conn->tcp_timer_scale);
+ break;
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ len = sprintf(buf, "%u\n", conn->tcp_timestamp_en);
+ break;
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ len = sprintf(buf, "%u\n", conn->fragment_disable);
+ break;
+ case ISCSI_PARAM_IPV4_TOS:
+ len = sprintf(buf, "%u\n", conn->ipv4_tos);
+ break;
+ case ISCSI_PARAM_IPV6_TC:
+ len = sprintf(buf, "%u\n", conn->ipv6_traffic_class);
+ break;
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ len = sprintf(buf, "%u\n", conn->ipv6_flow_label);
+ break;
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ len = sprintf(buf, "%u\n", conn->is_fw_assigned_ipv6);
+ break;
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_xmit_wsf);
+ break;
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ len = sprintf(buf, "%u\n", conn->tcp_recv_wsf);
+ break;
default:
return -ENOSYS;
}
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 93f222d6671..df43bfe6d57 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -421,6 +421,7 @@ struct lpfc_vport {
uint32_t cfg_enable_da_id;
uint32_t cfg_max_scsicmpl_time;
uint32_t cfg_tgt_queue_depth;
+ uint32_t cfg_first_burst_size;
uint32_t dev_loss_tmo_changed;
@@ -710,8 +711,6 @@ struct lpfc_hba {
uint32_t cfg_use_msi;
uint32_t cfg_fcp_imax;
uint32_t cfg_fcp_cpu_map;
- uint32_t cfg_fcp_wq_count;
- uint32_t cfg_fcp_eq_count;
uint32_t cfg_fcp_io_channel;
uint32_t cfg_total_seg_cnt;
uint32_t cfg_sg_seg_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f4360c5ea6a..16498e030c7 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -674,9 +674,6 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type)
int i;
int rc;
- if (phba->pport->fc_flag & FC_OFFLINE_MODE)
- return 0;
-
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_OFFLINE_PREP);
@@ -744,14 +741,15 @@ lpfc_selective_reset(struct lpfc_hba *phba)
int status = 0;
int rc;
- if ((!phba->cfg_enable_hba_reset) ||
- (phba->pport->fc_flag & FC_OFFLINE_MODE))
+ if (!phba->cfg_enable_hba_reset)
return -EACCES;
- status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
+ if (!(phba->pport->fc_flag & FC_OFFLINE_MODE)) {
+ status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
- if (status != 0)
- return status;
+ if (status != 0)
+ return status;
+ }
init_completion(&online_compl);
rc = lpfc_workq_post_event(phba, &status, &online_compl,
@@ -2591,9 +2589,12 @@ LPFC_VPORT_ATTR_R(enable_da_id, 1, 0, 1,
/*
# lun_queue_depth: This parameter is used to limit the number of outstanding
-# commands per FCP LUN. Value range is [1,128]. Default value is 30.
+# commands per FCP LUN. Value range is [1,512]. Default value is 30.
+# If this parameter value is greater than 1/8th the maximum number of exchanges
+# supported by the HBA port, then the lun queue depth will be reduced to
+# 1/8th the maximum number of exchanges.
*/
-LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
+LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 512,
"Max number of FCP commands we can queue to a specific LUN");
/*
@@ -2601,7 +2602,7 @@ LPFC_VPORT_ATTR_R(lun_queue_depth, 30, 1, 128,
# commands per target port. Value range is [10,65535]. Default value is 65535.
*/
LPFC_VPORT_ATTR_R(tgt_queue_depth, 65535, 10, 65535,
- "Max number of FCP commands we can queue to a specific target port");
+ "Max number of FCP commands we can queue to a specific target port");
/*
# hba_queue_depth: This parameter is used to limit the number of outstanding
@@ -3949,6 +3950,14 @@ LPFC_VPORT_ATTR_RW(use_adisc, 0, 0, 1,
"Use ADISC on rediscovery to authenticate FCP devices");
/*
+# lpfc_first_burst_size: First burst size to use on the NPorts
+# that support first burst.
+# Value range is [0,65536]. Default value is 0.
+*/
+LPFC_VPORT_ATTR_RW(first_burst_size, 0, 0, 65536,
+ "First burst size for Targets that support first burst");
+
+/*
# lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue
# depth. Default value is 0. When the value of this parameter is zero the
# SCSI command completion time is not used for controlling I/O queue depth. When
@@ -4112,25 +4121,6 @@ LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
"MSI-X (2), if possible");
/*
-# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
-# This parameter is ignored and will eventually be depricated
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_wq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
- LPFC_FCP_IO_CHAN_MAX,
- "Set the number of fast-path FCP work queues, if possible");
-
-/*
-# lpfc_fcp_eq_count: Set the number of FCP EQ/CQ/WQ IO channels
-#
-# Value range is [1,7]. Default value is 4.
-*/
-LPFC_ATTR_R(fcp_eq_count, LPFC_FCP_IO_CHAN_DEF, LPFC_FCP_IO_CHAN_MIN,
- LPFC_FCP_IO_CHAN_MAX,
- "Set the number of fast-path FCP event queues, if possible");
-
-/*
# lpfc_fcp_io_channel: Set the number of FCP EQ/CQ/WQ IO channels
#
# Value range is [1,7]. Default value is 4.
@@ -4276,6 +4266,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_devloss_tmo,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
&dev_attr_lpfc_ack0,
&dev_attr_lpfc_topology,
&dev_attr_lpfc_scan_down,
@@ -4307,8 +4298,6 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_use_msi,
&dev_attr_lpfc_fcp_imax,
&dev_attr_lpfc_fcp_cpu_map,
- &dev_attr_lpfc_fcp_wq_count,
- &dev_attr_lpfc_fcp_eq_count,
&dev_attr_lpfc_fcp_io_channel,
&dev_attr_lpfc_enable_bg,
&dev_attr_lpfc_soft_wwnn,
@@ -4352,6 +4341,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
&dev_attr_lpfc_restrict_login,
&dev_attr_lpfc_fcp_class,
&dev_attr_lpfc_use_adisc,
+ &dev_attr_lpfc_first_burst_size,
&dev_attr_lpfc_fdmi_on,
&dev_attr_lpfc_max_luns,
&dev_attr_nport_evt_cnt,
@@ -5290,8 +5280,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_use_msi_init(phba, lpfc_use_msi);
lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map);
- lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
- lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel);
lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
@@ -5331,6 +5319,7 @@ lpfc_get_vport_cfgparam(struct lpfc_vport *vport)
lpfc_restrict_login_init(vport, lpfc_restrict_login);
lpfc_fcp_class_init(vport, lpfc_fcp_class);
lpfc_use_adisc_init(vport, lpfc_use_adisc);
+ lpfc_first_burst_size_init(vport, lpfc_first_burst_size);
lpfc_max_scsicmpl_time_init(vport, lpfc_max_scsicmpl_time);
lpfc_fdmi_on_init(vport, lpfc_fdmi_on);
lpfc_discovery_threads_init(vport, lpfc_discovery_threads);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index fcbedd6053c..79c13c3263f 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -2498,7 +2498,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
struct lpfc_sli_ct_request *ctreq = NULL;
int ret_val = 0;
int time_left;
- int iocb_stat = 0;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
*txxri = 0;
@@ -2574,6 +2574,7 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq,
@@ -2963,7 +2964,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
int time_left;
- int iocb_stat;
+ int iocb_stat = IOCB_SUCCESS;
unsigned long flags;
void *dataout = NULL;
uint32_t total_mem;
@@ -3149,6 +3150,7 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
}
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
+ cmdiocbq->iocb_cmpl = NULL;
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
@@ -3209,7 +3211,7 @@ err_loopback_test_exit:
lpfc_bsg_event_unref(evt); /* delete */
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
- if (cmdiocbq != NULL)
+ if ((cmdiocbq != NULL) && (iocb_stat != IOCB_TIMEDOUT))
lpfc_sli_release_iocbq(phba, cmdiocbq);
if (rspiocbq != NULL)
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 68391177432..02e8cd923d0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -895,7 +895,7 @@ lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (irsp->ulpStatus) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
- "0268 NS cmd %x Error (%d %d)\n",
+ "0268 NS cmd x%x Error (x%x x%x)\n",
cmdcode, irsp->ulpStatus, irsp->un.ulpWord[4]);
if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index af49fb03dbb..e409ba5f728 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -154,6 +154,7 @@ struct lpfc_node_rrq {
#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */
#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */
#define NLP_SC_REQ 0x20000000 /* Target requires authentication */
+#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */
#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */
/* ndlp usage management macros */
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 6b8ee7449f1..110445f0c58 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -2122,6 +2122,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
npr->estabImagePair = 1;
npr->readXferRdyDis = 1;
+ if (vport->cfg_first_burst_size)
+ npr->writeXferRdyDis = 1;
/* For FCP support */
npr->prliType = PRLI_FCP_TYPE;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 4ec3d7c044c..086c3f28caa 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -234,6 +234,9 @@ struct ulp_bde64 {
uint32_t addrHigh;
};
+/* Maximun size of immediate data that can fit into a 128 byte WQE */
+#define LPFC_MAX_BDE_IMM_SIZE 64
+
struct lpfc_sli4_flags {
uint32_t word0;
#define lpfc_idx_rsrc_rdy_SHIFT 0
@@ -2585,6 +2588,9 @@ struct lpfc_sli4_parameters {
#define cfg_mqv_WORD word6
uint32_t word7;
uint32_t word8;
+#define cfg_wqsize_SHIFT 8
+#define cfg_wqsize_MASK 0x0000000f
+#define cfg_wqsize_WORD word8
#define cfg_wqv_SHIFT 14
#define cfg_wqv_MASK 0x00000003
#define cfg_wqv_WORD word8
@@ -3622,6 +3628,13 @@ union lpfc_wqe {
struct gen_req64_wqe gen_req;
};
+union lpfc_wqe128 {
+ uint32_t words[32];
+ struct lpfc_wqe_generic generic;
+ struct xmit_seq64_wqe xmit_sequence;
+ struct gen_req64_wqe gen_req;
+};
+
#define LPFC_GROUP_OJECT_MAGIC_NUM 0xfeaa0001
#define LPFC_FILE_TYPE_GROUP 0xf7
#define LPFC_FILE_ID_GROUP 0xa2
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index e0b20fad850..501147c4a14 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -472,10 +472,22 @@ lpfc_config_port_post(struct lpfc_hba *phba)
lpfc_sli_read_link_ste(phba);
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
- phba->cfg_hba_queue_depth =
- (mb->un.varRdConfig.max_xri + 1) -
- lpfc_sli4_get_els_iocb_cnt(phba);
+ i = (mb->un.varRdConfig.max_xri + 1);
+ if (phba->cfg_hba_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3359 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, i);
+ phba->cfg_hba_queue_depth = i;
+ }
+
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ i = (mb->un.varRdConfig.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > i) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3360 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, i);
+ phba->pport->cfg_lun_queue_depth = i;
+ }
phba->lmt = mb->un.varRdConfig.lmt;
@@ -4901,9 +4913,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
lpfc_get_cfgparam(phba);
phba->max_vpi = LPFC_MAX_VPI;
- /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
- phba->cfg_fcp_io_channel = phba->cfg_fcp_eq_count;
-
/* This will be set to correct value after the read_config mbox */
phba->max_vports = 0;
@@ -6664,12 +6673,14 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
goto read_cfg_out;
/* Reset the DFT_HBA_Q_DEPTH to the max xri */
- if (phba->cfg_hba_queue_depth >
- (phba->sli4_hba.max_cfg_param.max_xri -
- lpfc_sli4_get_els_iocb_cnt(phba)))
- phba->cfg_hba_queue_depth =
- phba->sli4_hba.max_cfg_param.max_xri -
- lpfc_sli4_get_els_iocb_cnt(phba);
+ length = phba->sli4_hba.max_cfg_param.max_xri -
+ lpfc_sli4_get_els_iocb_cnt(phba);
+ if (phba->cfg_hba_queue_depth > length) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3361 HBA queue depth changed from %d to %d\n",
+ phba->cfg_hba_queue_depth, length);
+ phba->cfg_hba_queue_depth = length;
+ }
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
LPFC_SLI_INTF_IF_TYPE_2)
@@ -6859,11 +6870,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq;
}
- /* Eventually cfg_fcp_eq_count / cfg_fcp_wq_count will be depricated */
-
/* The actual number of FCP event queues adopted */
- phba->cfg_fcp_eq_count = cfg_fcp_io_channel;
- phba->cfg_fcp_wq_count = cfg_fcp_io_channel;
phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
/* Get EQ depth from module parameter, fake the default for now */
@@ -9154,6 +9161,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
+ sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
mbx_sli4_parameters);
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index b1c510f6b8f..1f292e29d56 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -178,7 +178,8 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mb->mbxOwner = OWN_HOST;
mb->un.varDmp.cv = 1;
mb->un.varDmp.type = DMP_NV_PARAMS;
- mb->un.varDmp.entry_index = 0;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ mb->un.varDmp.entry_index = 0;
mb->un.varDmp.region_id = WAKE_UP_PARMS_REGION_ID;
mb->un.varDmp.word_cnt = WAKE_UP_PARMS_WORD_SIZE;
mb->un.varDmp.co = 0;
@@ -361,7 +362,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
/* NEW_FEATURE
* SLI-2, Coalescing Response Feature.
*/
- if (phba->cfg_cr_delay) {
+ if (phba->cfg_cr_delay && (phba->sli_rev < LPFC_SLI_REV4)) {
mb->un.varCfgLnk.cr = 1;
mb->un.varCfgLnk.ci = 1;
mb->un.varCfgLnk.cr_delay = phba->cfg_cr_delay;
@@ -377,7 +378,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
mb->un.varCfgLnk.crtov = phba->fc_crtov;
mb->un.varCfgLnk.citov = phba->fc_citov;
- if (phba->cfg_ack0)
+ if (phba->cfg_ack0 && (phba->sli_rev < LPFC_SLI_REV4))
mb->un.varCfgLnk.ack0_enable = 1;
mb->mbxCommand = MBX_CONFIG_LINK;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 6aaf39a1f1c..abc361259d6 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -690,11 +690,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if (npr->prliType == PRLI_FCP_TYPE) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc)
+ if (npr->targetFunc) {
ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
@@ -1676,12 +1680,16 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Check out PRLI rsp */
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
if ((npr->acceptRspCode == PRLI_REQ_EXECUTED) &&
(npr->prliType == PRLI_FCP_TYPE)) {
if (npr->initiatorFunc)
ndlp->nlp_type |= NLP_FCP_INITIATOR;
- if (npr->targetFunc)
+ if (npr->targetFunc) {
ndlp->nlp_type |= NLP_FCP_TARGET;
+ if (npr->writeXferRdyDis)
+ ndlp->nlp_flag |= NLP_FIRSTBURST;
+ }
if (npr->Retry)
ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE;
}
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 243de1d324b..1242b6c4308 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -4386,11 +4386,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
if (scsi_sg_count(scsi_cmnd)) {
if (datadir == DMA_TO_DEVICE) {
iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
- if (sli4)
- iocb_cmd->ulpPU = PARM_READ_CHECK;
- else {
- iocb_cmd->un.fcpi.fcpi_parm = 0;
- iocb_cmd->ulpPU = 0;
+ iocb_cmd->ulpPU = PARM_READ_CHECK;
+ if (vport->cfg_first_burst_size &&
+ (pnode->nlp_flag & NLP_FIRSTBURST)) {
+ piocbq->iocb.un.fcpi.fcpi_XRdy =
+ vport->cfg_first_burst_size;
}
fcp_cmnd->fcpCntl3 = WRITE_DATA;
phba->fc4OutputRequests++;
@@ -5022,6 +5022,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
lpfc_release_scsi_buf(phba, lpfc_cmd);
return FAILED;
}
+ iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
"0702 Issue %s to TGT %d LUN %d "
@@ -5034,7 +5035,6 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata,
iocbq, iocbqrsp, lpfc_cmd->timeout);
if (status != IOCB_SUCCESS) {
if (status == IOCB_TIMEDOUT) {
- iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
ret = TIMEOUT_ERROR;
} else
ret = FAILED;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 43440ca16f4..0392e114531 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -6163,6 +6163,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
kfree(vpd);
goto out_free_mbox;
}
+
mqe = &mboxq->u.mqe;
phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev))
@@ -6249,6 +6250,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
+ /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
+ rc = (phba->sli4_hba.max_cfg_param.max_xri >> 3);
+ if (phba->pport->cfg_lun_queue_depth > rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3362 LUN queue depth changed from %d to %d\n",
+ phba->pport->cfg_lun_queue_depth, rc);
+ phba->pport->cfg_lun_queue_depth = rc;
+ }
+
+
/*
* Discover the port's supported feature set and match it against the
* hosts requests.
@@ -9889,6 +9900,24 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags);
+ if (cmdiocbq->iocb_flag & LPFC_IO_WAKE_TMO) {
+
+ /*
+ * A time out has occurred for the iocb. If a time out
+ * completion handler has been supplied, call it. Otherwise,
+ * just free the iocbq.
+ */
+
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ cmdiocbq->iocb_cmpl = cmdiocbq->wait_iocb_cmpl;
+ cmdiocbq->wait_iocb_cmpl = NULL;
+ if (cmdiocbq->iocb_cmpl)
+ (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, NULL);
+ else
+ lpfc_sli_release_iocbq(phba, cmdiocbq);
+ return;
+ }
+
cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
if (cmdiocbq->context2 && rspiocbq)
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
@@ -9944,10 +9973,16 @@ lpfc_chk_iocb_flg(struct lpfc_hba *phba,
* @timeout: Timeout in number of seconds.
*
* This function issues the iocb to firmware and waits for the
- * iocb to complete. If the iocb command is not
- * completed within timeout seconds, it returns IOCB_TIMEDOUT.
- * Caller should not free the iocb resources if this function
- * returns IOCB_TIMEDOUT.
+ * iocb to complete. The iocb_cmpl field of the shall be used
+ * to handle iocbs which time out. If the field is NULL, the
+ * function shall free the iocbq structure. If more clean up is
+ * needed, the caller is expected to provide a completion function
+ * that will provide the needed clean up. If the iocb command is
+ * not completed within timeout seconds, the function will either
+ * free the iocbq structure (if iocb_cmpl == NULL) or execute the
+ * completion function set in the iocb_cmpl field and then return
+ * a status of IOCB_TIMEDOUT. The caller should not free the iocb
+ * resources if this function returns IOCB_TIMEDOUT.
* The function waits for the iocb completion using an
* non-interruptible wait.
* This function will sleep while waiting for iocb completion.
@@ -9980,6 +10015,9 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
int txq_cnt = 0;
int txcmplq_cnt = 0;
struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+ unsigned long iflags;
+ bool iocb_completed = true;
+
/*
* If the caller has provided a response iocbq buffer, then context2
* is NULL or its an error.
@@ -9990,9 +10028,10 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
piocb->context2 = prspiocbq;
}
+ piocb->wait_iocb_cmpl = piocb->iocb_cmpl;
piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
piocb->context_un.wait_queue = &done_q;
- piocb->iocb_flag &= ~LPFC_IO_WAKE;
+ piocb->iocb_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
if (lpfc_readl(phba->HCregaddr, &creg_val))
@@ -10009,8 +10048,19 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
timeleft = wait_event_timeout(done_q,
lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
timeout_req);
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
- if (piocb->iocb_flag & LPFC_IO_WAKE) {
+ /*
+ * IOCB timed out. Inform the wake iocb wait
+ * completion function and set local status
+ */
+
+ iocb_completed = false;
+ piocb->iocb_flag |= LPFC_IO_WAKE_TMO;
+ }
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+ if (iocb_completed) {
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"0331 IOCB wake signaled\n");
} else if (timeleft == 0) {
@@ -10122,7 +10172,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
*/
if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
retval = MBX_SUCCESS;
- lpfc_sli4_swap_str(phba, pmboxq);
} else {
retval = MBX_TIMEOUT;
pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -12820,10 +12869,44 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->page_count);
bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
cq->queue_id);
+
+ /* wqv is the earliest version supported, NOT the latest */
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
- if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
+ switch (phba->sli4_hba.pc_sli4_params.wqv) {
+ case LPFC_Q_CREATE_VERSION_0:
+ switch (wq->entry_size) {
+ default:
+ case 64:
+ /* Nothing to do, version 0 ONLY supports 64 byte */
+ page = wq_create->u.request.page;
+ break;
+ case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
+ /* If we get here the HBA MUST also support V1 and
+ * we MUST use it
+ */
+ bf_set(lpfc_mbox_hdr_version, &shdr->request,
+ LPFC_Q_CREATE_VERSION_1);
+
+ bf_set(lpfc_mbx_wq_create_wqe_count,
+ &wq_create->u.request_1, wq->entry_count);
+ bf_set(lpfc_mbx_wq_create_wqe_size,
+ &wq_create->u.request_1,
+ LPFC_WQ_WQE_SIZE_128);
+ bf_set(lpfc_mbx_wq_create_page_size,
+ &wq_create->u.request_1,
+ (PAGE_SIZE/SLI4_PAGE_SIZE));
+ page = wq_create->u.request_1.page;
+ break;
+ }
+ break;
+ case LPFC_Q_CREATE_VERSION_1:
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
switch (wq->entry_size) {
@@ -12834,6 +12917,11 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
LPFC_WQ_WQE_SIZE_64);
break;
case 128:
+ if (!(phba->sli4_hba.pc_sli4_params.wqsize &
+ LPFC_WQ_SZ128_SUPPORT)) {
+ status = -ERANGE;
+ goto out;
+ }
bf_set(lpfc_mbx_wq_create_wqe_size,
&wq_create->u.request_1,
LPFC_WQ_WQE_SIZE_128);
@@ -12842,9 +12930,12 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
bf_set(lpfc_mbx_wq_create_page_size, &wq_create->u.request_1,
(PAGE_SIZE/SLI4_PAGE_SIZE));
page = wq_create->u.request_1.page;
- } else {
- page = wq_create->u.request.page;
+ break;
+ default:
+ status = -ERANGE;
+ goto out;
}
+
list_for_each_entry(dmabuf, &wq->page_list, list) {
memset(dmabuf->virt, 0, hw_page_size);
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
@@ -14665,14 +14756,20 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
first_iocbq->iocb.unsli3.rcvsli3.vpi =
vport->phba->vpi_ids[vport->vpi];
/* put the first buffer into the first IOCBq */
+ tot_len = bf_get(lpfc_rcqe_length,
+ &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->context2 = &seq_dmabuf->dbuf;
first_iocbq->context3 = NULL;
first_iocbq->iocb.ulpBdeCount = 1;
- first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (tot_len > LPFC_DATA_BUF_SIZE)
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ first_iocbq->iocb.un.cont64[0].tus.f.bdeSize = tot_len;
+
first_iocbq->iocb.un.rcvels.remoteID = sid;
- tot_len = bf_get(lpfc_rcqe_length,
- &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
+
first_iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
}
iocbq = first_iocbq;
@@ -14688,14 +14785,17 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
if (!iocbq->context3) {
iocbq->context3 = d_buf;
iocbq->iocb.ulpBdeCount++;
- pbde = (struct ulp_bde64 *)
- &iocbq->iocb.unsli3.sli3Words[4];
- pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
-
/* We need to get the size out of the right CQE */
hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
len = bf_get(lpfc_rcqe_length,
&hbq_buf->cq_event.cqe.rcqe_cmpl);
+ pbde = (struct ulp_bde64 *)
+ &iocbq->iocb.unsli3.sli3Words[4];
+ if (len > LPFC_DATA_BUF_SIZE)
+ pbde->tus.f.bdeSize = LPFC_DATA_BUF_SIZE;
+ else
+ pbde->tus.f.bdeSize = len;
+
iocbq->iocb.unsli3.rcvsli3.acc_len += len;
tot_len += len;
} else {
@@ -14710,16 +14810,19 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
lpfc_in_buf_free(vport->phba, d_buf);
continue;
}
+ /* We need to get the size out of the right CQE */
+ hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+ len = bf_get(lpfc_rcqe_length,
+ &hbq_buf->cq_event.cqe.rcqe_cmpl);
iocbq->context2 = d_buf;
iocbq->context3 = NULL;
iocbq->iocb.ulpBdeCount = 1;
- iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+ if (len > LPFC_DATA_BUF_SIZE)
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize =
LPFC_DATA_BUF_SIZE;
+ else
+ iocbq->iocb.un.cont64[0].tus.f.bdeSize = len;
- /* We need to get the size out of the right CQE */
- hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
- len = bf_get(lpfc_rcqe_length,
- &hbq_buf->cq_event.cqe.rcqe_cmpl);
tot_len += len;
iocbq->iocb.unsli3.rcvsli3.acc_len = tot_len;
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 9d2e0c6fe33..97617996206 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2007 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -60,7 +60,8 @@ struct lpfc_iocbq {
uint8_t retry; /* retry counter for IOCB cmd - if needed */
uint16_t iocb_flag;
#define LPFC_IO_LIBDFC 1 /* libdfc iocb */
-#define LPFC_IO_WAKE 2 /* High Priority Queue signal flag */
+#define LPFC_IO_WAKE 2 /* Synchronous I/O completed */
+#define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */
#define LPFC_IO_FCP 4 /* FCP command -- iocbq in scsi_buf */
#define LPFC_DRIVER_ABORTED 8 /* driver aborted this request */
#define LPFC_IO_FABRIC 0x10 /* Iocb send using fabric scheduler */
@@ -93,6 +94,8 @@ struct lpfc_iocbq {
void (*fabric_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
+ void (*wait_iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
+ struct lpfc_iocbq *);
void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
struct lpfc_iocbq *);
};
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d710b87a441..5bcc38223ac 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -117,6 +117,7 @@ union sli4_qe {
struct lpfc_rcqe_complete *rcqe_complete;
struct lpfc_mqe *mqe;
union lpfc_wqe *wqe;
+ union lpfc_wqe128 *wqe128;
struct lpfc_rqe *rqe;
};
@@ -325,12 +326,14 @@ struct lpfc_bmbx {
#define LPFC_EQE_SIZE_16B 16
#define LPFC_CQE_SIZE 16
#define LPFC_WQE_SIZE 64
+#define LPFC_WQE128_SIZE 128
#define LPFC_MQE_SIZE 256
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
+#define LPFC_WQE128_DEF_COUNT 128
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -416,6 +419,9 @@ struct lpfc_pc_sli4_params {
uint8_t mqv;
uint8_t wqv;
uint8_t rqv;
+ uint8_t wqsize;
+#define LPFC_WQ_SZ64_SUPPORT 1
+#define LPFC_WQ_SZ128_SUPPORT 2
};
struct lpfc_iov {
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index c6c32eebf3d..21859d2006c 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.40"
+#define LPFC_DRIVER_VERSION "8.3.41"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e28e431564b..a87ee33f4f2 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2008 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2013 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
@@ -387,6 +387,9 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
/* Create binary sysfs attribute for vport */
lpfc_alloc_sysfs_attr(vport);
+ /* Set the DFT_LUN_Q_DEPTH accordingly */
+ vport->cfg_lun_queue_depth = phba->pport->cfg_lun_queue_depth;
+
*(struct lpfc_vport **)fc_vport->dd_data = vport;
vport->fc_vport = fc_vport;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 0177295599e..1f0ca68409d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance)
break;
}
- /*
- * We expect the FW state to be READY
- */
- if (megasas_transition_to_ready(instance, 0))
- goto fail_ready_state;
+ if (megasas_transition_to_ready(instance, 0)) {
+ atomic_set(&instance->fw_reset_no_pci_access, 1);
+ instance->instancet->adp_reset
+ (instance, instance->reg_set);
+ atomic_set(&instance->fw_reset_no_pci_access, 0);
+ dev_info(&instance->pdev->dev,
+ "megasas: FW restarted successfully from %s!\n",
+ __func__);
+
+ /*waitting for about 30 second before retry*/
+ ssleep(30);
+
+ if (megasas_transition_to_ready(instance, 0))
+ goto fail_ready_state;
+ }
/*
* MSI-X host index 0 is common for all adapter.
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 31b5b15a472..7b14a015c90 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.27
+ * mpi2.h Version: 02.00.28
*
* Version History
* ---------------
@@ -77,6 +77,7 @@
* Added Hard Reset delay timings.
* 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
* 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -102,7 +103,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x1B)
+#define MPI2_HEADER_VERSION_UNIT (0x1C)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 737fa8cfb54..88cb7f828bb 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
/*
- * Copyright (c) 2000-2011 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_cnfg.h
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.22
+ * mpi2_cnfg.h Version: 02.00.23
*
* Version History
* ---------------
@@ -149,6 +149,8 @@
* 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
* Added UEFIVersion field to BIOS Page 1 and defined new
* BiosOptions bits.
+ * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
+ * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
* --------------------------------------------------------------------------
*/
@@ -698,6 +700,7 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7
#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
/* defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002)
#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
@@ -1224,6 +1227,9 @@ typedef struct _MPI2_CONFIG_PAGE_BIOS_1
#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
/* values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0)
+#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000)
+
#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 963761fb846..9d284dae655 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_init.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index e93f8f53adf..d159c5f24aa 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_ioc.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 255b0ca219a..0d202a2c6db 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_raid.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index fdffde1ebc0..50b39ccd526 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2010 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 67c387f10e5..11b2ac4e7c6 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2012 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_tool.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index cfde017bf16..0b128b68a5e 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2007 LSI Corporation.
+ * Copyright (c) 2000-2013 LSI Corporation.
*
*
* Name: mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ccd6d5a97ec..3901edc3581 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -768,10 +768,9 @@ mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
* @msix_index: MSIX table index supplied by the OS
* @reply: reply message frame(lower 32bit addr)
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-static u8
+static void
_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
{
Mpi2EventNotificationReply_t *mpi_reply;
@@ -780,9 +779,9 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (!mpi_reply)
- return 1;
+ return;
if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
- return 1;
+ return;
#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
_base_display_event_data(ioc, mpi_reply);
#endif
@@ -812,7 +811,7 @@ _base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
/* ctl callback handler */
mpt2sas_ctl_event_callback(ioc, msix_index, reply);
- return 1;
+ return;
}
/**
@@ -1409,8 +1408,6 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
int i;
u8 try_msix = 0;
- INIT_LIST_HEAD(&ioc->reply_queue_list);
-
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1489,6 +1486,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
if (pci_enable_device_mem(pdev)) {
printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
"failed\n", ioc->name);
+ ioc->bars = 0;
return -ENODEV;
}
@@ -1497,6 +1495,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
MPT2SAS_DRIVER_NAME)) {
printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
"failed\n", ioc->name);
+ ioc->bars = 0;
r = -ENODEV;
goto out_fail;
}
@@ -4229,18 +4228,25 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
__func__));
- _base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
- ioc->shost_recovery = 0;
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->chip_phys)
+
+ if (ioc->chip_phys && ioc->chip)
iounmap(ioc->chip);
ioc->chip_phys = 0;
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
return;
}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 6fbd0841777..1f2ac3a2862 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
* for access to MPT (Message Passing Technology) firmware.
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "15.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 15
+#define MPT2SAS_DRIVER_VERSION "16.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 16
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -1061,7 +1061,7 @@ void mpt2sas_base_update_missing_delay(struct MPT2SAS_ADAPTER *ioc,
int mpt2sas_port_enable(struct MPT2SAS_ADAPTER *ioc);
/* scsih shared API */
-u8 mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
int mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle,
uint channel, uint id, uint lun, u8 type, u16 smid_task,
@@ -1144,7 +1144,7 @@ void mpt2sas_ctl_exit(void);
u8 mpt2sas_ctl_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
u32 reply);
void mpt2sas_ctl_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase);
-u8 mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
+void mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply);
void mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
Mpi2EventNotificationReply_t *mpi_reply);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 863778071a9..0c47425c73f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
* This module provides common API for accessing firmware configuration pages
*
* This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index eec052c2670..b7f887c9b0b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -397,18 +397,22 @@ mpt2sas_ctl_add_to_event_log(struct MPT2SAS_ADAPTER *ioc,
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-u8
+void
mpt2sas_ctl_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply)
{
Mpi2EventNotificationReply_t *mpi_reply;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
mpt2sas_ctl_add_to_event_log(ioc, mpi_reply);
- return 1;
+ return;
}
/**
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index b5eb0d1b8ea..8b2ac1869dc 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
* controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index 69cc7d0c112..a9021cbd662 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
* Logging Support for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 51004768d0f..7f0af4fcc00 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
* Scsi Host Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -628,11 +628,12 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
* devices while scanning is turned on due to an oops in
* scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
*/
- if (!ioc->is_driver_loading)
+ if (!ioc->is_driver_loading) {
mpt2sas_transport_port_remove(ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
}
@@ -1402,6 +1403,7 @@ _scsih_slave_alloc(struct scsi_device *sdev)
struct MPT2SAS_DEVICE *sas_device_priv_data;
struct scsi_target *starget;
struct _raid_device *raid_device;
+ struct _sas_device *sas_device;
unsigned long flags;
sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
@@ -1430,6 +1432,19 @@ _scsih_slave_alloc(struct scsi_device *sdev)
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && (sas_device->starget == NULL)) {
+ sdev_printk(KERN_INFO, sdev,
+ "%s : sas_device->starget set to starget @ %d\n",
+ __func__, __LINE__);
+ sas_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
return 0;
}
@@ -6753,7 +6768,7 @@ _scsih_search_responding_sas_devices(struct MPT2SAS_ADAPTER *ioc)
handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(sas_device_pg0.DevHandle);
device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
@@ -6862,7 +6877,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
&volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(volume_pg1.DevHandle);
@@ -6887,7 +6902,7 @@ _scsih_search_responding_raid_devices(struct MPT2SAS_ADAPTER *ioc)
phys_disk_num))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
phys_disk_num = pd_pg0.PhysDiskNum;
handle = le16_to_cpu(pd_pg0.DevHandle);
@@ -6967,7 +6982,7 @@ _scsih_search_responding_expanders(struct MPT2SAS_ADAPTER *ioc)
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
break;
handle = le16_to_cpu(expander_pg0.DevHandle);
@@ -7109,8 +7124,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from expander scan: "
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7153,8 +7166,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
phys_disk_num))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from phys disk scan:"
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7219,8 +7230,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
&volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from volume scan: "
"ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7278,8 +7287,6 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
handle))) {
ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
MPI2_IOCSTATUS_MASK;
- if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
- break;
if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
printk(MPT2SAS_INFO_FMT "\tbreak from end device scan:"
" ioc_status(0x%04x), loginfo(0x%08x)\n",
@@ -7471,10 +7478,9 @@ _firmware_event_work(struct work_struct *work)
* This function merely adds a new work task into ioc->firmware_event_thread.
* The tasks are worked from _firmware_event_work in user context.
*
- * Return 1 meaning mf should be freed from _base_interrupt
- * 0 means the mf is freed from this function.
+ * Returns void.
*/
-u8
+void
mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
u32 reply)
{
@@ -7485,14 +7491,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
/* events turned off due to host reset or driver unloading */
if (ioc->remove_host || ioc->pci_error_recovery)
- return 1;
+ return;
mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
if (unlikely(!mpi_reply)) {
printk(MPT2SAS_ERR_FMT "mpi_reply not valid at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- return 1;
+ return;
}
event = le16_to_cpu(mpi_reply->Event);
@@ -7507,11 +7513,11 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
if (baen_data->Primitive !=
MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
- return 1;
+ return;
if (ioc->broadcast_aen_busy) {
ioc->broadcast_aen_pending++;
- return 1;
+ return;
} else
ioc->broadcast_aen_busy = 1;
break;
@@ -7587,14 +7593,14 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
break;
default: /* ignore the rest */
- return 1;
+ return;
}
fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
if (!fw_event) {
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
- return 1;
+ return;
}
sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
@@ -7602,7 +7608,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
ioc->name, __FILE__, __LINE__, __func__);
kfree(fw_event);
- return 1;
+ return;
}
memcpy(fw_event->event_data, mpi_reply->EventData,
@@ -7612,7 +7618,7 @@ mpt2sas_scsih_event_callback(struct MPT2SAS_ADAPTER *ioc, u8 msix_index,
fw_event->VP_ID = mpi_reply->VP_ID;
fw_event->event = event;
_scsih_fw_event_add(ioc, fw_event);
- return 1;
+ return;
}
/* shost template */
@@ -7711,10 +7717,6 @@ _scsih_ir_shutdown(struct MPT2SAS_ADAPTER *ioc)
if (!ioc->ir_firmware)
return;
- /* are there any volumes ? */
- if (list_empty(&ioc->raid_device_list))
- return;
-
mutex_lock(&ioc->scsih_cmds.mutex);
if (ioc->scsih_cmds.status != MPT2_CMD_NOT_USED) {
@@ -7929,10 +7931,12 @@ _scsih_probe_boot_devices(struct MPT2SAS_ADAPTER *ioc)
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
} else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
- mpt2sas_transport_port_remove(ioc, sas_address,
+ if (!ioc->is_driver_loading) {
+ mpt2sas_transport_port_remove(ioc,
+ sas_address,
sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
}
}
@@ -7985,14 +7989,14 @@ _scsih_probe_sas(struct MPT2SAS_ADAPTER *ioc)
kfree(sas_device);
continue;
} else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
+ if (!ioc->is_driver_loading) {
mpt2sas_transport_port_remove(ioc,
sas_device->sas_address,
sas_device->sas_address_parent);
- list_del(&sas_device->list);
- kfree(sas_device);
- continue;
-
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
}
spin_lock_irqsave(&ioc->sas_device_lock, flags);
list_move_tail(&sas_device->list, &ioc->sas_device_list);
@@ -8175,6 +8179,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
/* init shost parameters */
shost->max_cmd_len = 32;
@@ -8280,6 +8285,7 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt2sas_base_stop_watchdog(ioc);
scsi_block_requests(shost);
+ _scsih_ir_shutdown(ioc);
device_state = pci_choose_state(pdev, state);
printk(MPT2SAS_INFO_FMT "pdev=0x%p, slot=%s, entering "
"operating state [D%d]\n", ioc->name, pdev,
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 193e7ae90c3..9d26637308b 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
* SAS Transport Layer for MPT (Message Passing Technology) based controllers
*
* This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2012 LSI Corporation
+ * Copyright (C) 2007-2013 LSI Corporation
* (mailto:DL-MPTFusionLinux@lsi.com)
*
* This program is free software; you can redistribute it and/or
@@ -1006,9 +1006,12 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
&mpt2sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
- } else
+ } else {
memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ mpt2sas_phy);
+ }
if (mpt2sas_phy->phy)
mpt2sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5dc280c7532..fa785062e97 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -82,6 +82,10 @@ static int msix_disable = -1;
module_param(msix_disable, int, 0);
MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+static int max_msix_vectors = 8;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors,
+ " max msix vectors - (default=8)");
static int mpt3sas_fwfault_debug;
MODULE_PARM_DESC(mpt3sas_fwfault_debug,
@@ -1709,8 +1713,6 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
int i;
u8 try_msix = 0;
- INIT_LIST_HEAD(&ioc->reply_queue_list);
-
if (msix_disable == -1 || msix_disable == 0)
try_msix = 1;
@@ -1723,6 +1725,16 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
+ printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
+ ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
+ ioc->cpu_count, max_msix_vectors);
+
+ if (max_msix_vectors > 0) {
+ ioc->reply_queue_count = min_t(int, max_msix_vectors,
+ ioc->reply_queue_count);
+ ioc->msix_vector_count = ioc->reply_queue_count;
+ }
+
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
GFP_KERNEL);
if (!entries) {
@@ -1790,6 +1802,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
if (pci_enable_device_mem(pdev)) {
pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
ioc->name);
+ ioc->bars = 0;
return -ENODEV;
}
@@ -1798,6 +1811,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
MPT3SAS_DRIVER_NAME)) {
pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
ioc->name);
+ ioc->bars = 0;
r = -ENODEV;
goto out_fail;
}
@@ -4393,18 +4407,25 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
- _base_mask_interrupts(ioc);
- ioc->shost_recovery = 1;
- _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
- ioc->shost_recovery = 0;
+ if (ioc->chip_phys && ioc->chip) {
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ }
+
_base_free_irq(ioc);
_base_disable_msix(ioc);
- if (ioc->chip_phys)
+
+ if (ioc->chip_phys && ioc->chip)
iounmap(ioc->chip);
ioc->chip_phys = 0;
- pci_release_selected_regions(ioc->pdev, ioc->bars);
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
+
+ if (pci_is_enabled(pdev)) {
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ }
return;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 8cbe8fd21fc..a961fe11b52 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -7779,6 +7779,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
INIT_LIST_HEAD(&ioc->delayed_tr_list);
INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
/* init shost parameters */
shost->max_cmd_len = 32;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index dcadd56860f..e771a88c6a7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1003,9 +1003,12 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
&mpt3sas_phy->remote_identify);
_transport_add_phy_to_an_existing_port(ioc, sas_node,
mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
- } else
+ } else {
memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
sas_identify));
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ mpt3sas_phy);
+ }
if (mpt3sas_phy->phy)
mpt3sas_phy->phy->negotiated_linkrate =
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index f14665a6293..6b1b4e91e53 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1857,11 +1857,16 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
goto out;
}
- /* error info record present */
- if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+ /*
+ * error info record present; slot->response is 32 bit aligned but may
+ * not be 64 bit aligned, so check for zero in two 32 bit reads
+ */
+ if (unlikely((rx_desc & RXQ_ERR)
+ && (*((u32 *)slot->response)
+ || *(((u32 *)slot->response) + 1)))) {
mv_dprintk("port %d slot %d rx_desc %X has error info"
"%016llX.\n", slot->port->sas_port.id, slot_idx,
- rx_desc, (u64)(*(u64 *)slot->response));
+ rx_desc, get_unaligned_le64(slot->response));
tstat->stat = mvs_slot_err(mvi, task, slot_idx);
tstat->resp = SAS_TASK_COMPLETE;
goto out;
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index 60e2fb7f2dc..d6b19dc80be 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -39,6 +39,7 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#include <asm/unaligned.h>
#include <scsi/libsas.h>
#include <scsi/scsi.h>
#include <scsi/scsi_tcq.h>
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index 9d86947d67f..e1d9a4c4c4b 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -107,6 +107,7 @@ static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
class_dev);
return sprintf(buf, "%s\n", ould->odi.osdname);
}
+static DEVICE_ATTR_RO(osdname);
static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -117,17 +118,19 @@ static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
return ould->odi.systemid_len;
}
+static DEVICE_ATTR_RO(systemid);
-static struct device_attribute osd_uld_attrs[] = {
- __ATTR(osdname, S_IRUGO, osdname_show, NULL),
- __ATTR(systemid, S_IRUGO, systemid_show, NULL),
- __ATTR_NULL,
+static struct attribute *osd_uld_attrs[] = {
+ &dev_attr_osdname.attr,
+ &dev_attr_systemid.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(osd_uld);
static struct class osd_uld_class = {
.owner = THIS_MODULE,
.name = "scsi_osd",
- .dev_attrs = osd_uld_attrs,
+ .dev_groups = osd_uld_groups,
};
/*
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 5456f5c7359..4a219575219 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -221,7 +221,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01;
for (i = 0; i < PM8001_MAX_INB_NUM; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
- PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
@@ -247,7 +247,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
}
for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
- PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 3861aa1f452..f7c189606b8 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -424,7 +424,8 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
PM8001_INIT_DBG(pm8001_ha, pm8001_printk(
"base addr %llx virt_addr=%llx len=%d\n",
(u64)pm8001_ha->io_mem[logicalBar].membase,
- (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr,
+ (u64)(unsigned long)
+ pm8001_ha->io_mem[logicalBar].memvirtaddr,
pm8001_ha->io_mem[logicalBar].memsize));
} else {
pm8001_ha->io_mem[logicalBar].membase = 0;
@@ -734,7 +735,7 @@ static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha)
pdev = pm8001_ha->pdev;
#ifdef PM8001_USE_MSIX
- if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+ if (pdev->msix_cap)
return pm8001_setup_msix(pm8001_ha);
else {
PM8001_INIT_DBG(pm8001_ha,
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 7f77210f5cf..9f91030211e 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -275,7 +275,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) {
pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt =
- PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30);
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x00<<30);
pm8001_ha->inbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[IB + i].phys_addr_hi;
pm8001_ha->inbnd_q_tbl[i].lower_base_addr =
@@ -301,7 +301,7 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
}
for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) {
pm8001_ha->outbnd_q_tbl[i].element_size_cnt =
- PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30);
+ PM8001_MPI_QUEUE | (pm8001_ha->iomb_size << 16) | (0x01<<30);
pm8001_ha->outbnd_q_tbl[i].upper_base_addr =
pm8001_ha->memoryMap.region[OB + i].phys_addr_hi;
pm8001_ha->outbnd_q_tbl[i].lower_base_addr =
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index c37b244cf8a..ff0fc7c7812 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@
qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
- qla_nx.o qla_mr.o qla_target.o
+ qla_nx.o qla_mr.o qla_nx2.o qla_target.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index d7a99ae7f39..5f174b83f56 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -29,7 +29,7 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
if (!(ha->fw_dump_reading || ha->mctp_dump_reading))
return 0;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (off < ha->md_template_size) {
rval = memory_read_from_buffer(buf, count,
&off, ha->md_tmplt_hdr, ha->md_template_size);
@@ -71,7 +71,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_md_free(vha);
qla82xx_md_prep(vha);
}
@@ -95,11 +95,15 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
} else
qla2x00_system_error(vha);
break;
case 4:
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (ha->md_tmplt_hdr)
ql_dbg(ql_dbg_user, vha, 0x705b,
"MiniDump supported with this firmware.\n");
@@ -109,7 +113,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
}
break;
case 5:
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
case 6:
@@ -586,7 +590,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
int type;
uint32_t idc_control;
-
+ uint8_t *tmp_data = NULL;
if (off != 0)
return -EINVAL;
@@ -597,14 +601,23 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
"Issuing ISP reset.\n");
scsi_block_requests(vha->host);
- set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_no_md_cap = 1;
qla82xx_idc_lock(ha);
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha,
+ QLA8044_IDC_DRV_CTRL);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control | GRACEFUL_RESET_BIT1));
+ qla82xx_set_reset_owner(vha);
+ qla8044_idc_unlock(ha);
+ } else {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
}
- qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
scsi_unblock_requests(vha->host);
break;
@@ -640,7 +653,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
break;
}
case 0x2025e:
- if (!IS_QLA82XX(ha) || vha != base_vha) {
+ if (!IS_P3P_TYPE(ha) || vha != base_vha) {
ql_log(ql_log_info, vha, 0x7071,
"FCoE ctx reset no supported.\n");
return -EPERM;
@@ -674,7 +687,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
__qla83xx_set_idc_control(vha, idc_control);
qla83xx_idc_unlock(vha, 0);
break;
-
+ case 0x20261:
+ ql_dbg(ql_dbg_user, vha, 0x70e0,
+ "Updating cache versions without reset ");
+
+ tmp_data = vmalloc(256);
+ if (!tmp_data) {
+ ql_log(ql_log_warn, vha, 0x70e1,
+ "Unable to allocate memory for VPD information update.\n");
+ return -ENOMEM;
+ }
+ ha->isp_ops->get_flash_version(vha, tmp_data);
+ vfree(tmp_data);
+ break;
}
return count;
}
@@ -1212,7 +1237,7 @@ qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
return snprintf(buf, PAGE_SIZE, "\n");
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
@@ -1265,10 +1290,7 @@ qla2x00_vn_port_mac_address_show(struct device *dev,
if (!IS_CNA_CAPABLE(vha->hw))
return snprintf(buf, PAGE_SIZE, "\n");
- return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
- vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
- vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+ return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
}
static ssize_t
@@ -1287,12 +1309,6 @@ qla2x00_thermal_temp_show(struct device *dev,
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
uint16_t temp = 0;
- if (!vha->hw->thermal_support) {
- ql_log(ql_log_warn, vha, 0x70db,
- "Thermal not supported by this card.\n");
- goto done;
- }
-
if (qla2x00_reset_active(vha)) {
ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
goto done;
@@ -1725,11 +1741,21 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
pfc_host_stat->lip_count = stats->lip_cnt;
pfc_host_stat->tx_frames = stats->tx_frames;
pfc_host_stat->rx_frames = stats->rx_frames;
- pfc_host_stat->dumped_frames = stats->dumped_frames;
+ pfc_host_stat->dumped_frames = stats->discarded_frames;
pfc_host_stat->nos_count = stats->nos_rcvd;
+ pfc_host_stat->error_frames =
+ stats->dropped_frames + stats->discarded_frames;
+ pfc_host_stat->rx_words = vha->qla_stats.input_bytes;
+ pfc_host_stat->tx_words = vha->qla_stats.output_bytes;
}
+ pfc_host_stat->fcp_control_requests = vha->qla_stats.control_requests;
+ pfc_host_stat->fcp_input_requests = vha->qla_stats.input_requests;
+ pfc_host_stat->fcp_output_requests = vha->qla_stats.output_requests;
pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ pfc_host_stat->seconds_since_last_reset =
+ get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
+ do_div(pfc_host_stat->seconds_since_last_reset, HZ);
done_free:
dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1738,6 +1764,16 @@ done:
}
static void
+qla2x00_reset_host_stats(struct Scsi_Host *shost)
+{
+ scsi_qla_host_t *vha = shost_priv(shost);
+
+ memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
+}
+
+static void
qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
{
scsi_qla_host_t *vha = shost_priv(shost);
@@ -2043,6 +2079,7 @@ struct fc_function_template qla2xxx_transport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
.vport_create = qla24xx_vport_create,
.vport_disable = qla24xx_vport_disable,
@@ -2089,6 +2126,8 @@ struct fc_function_template qla2xxx_transport_vport_functions = {
.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
.terminate_rport_io = qla2x00_terminate_rport_io,
.get_fc_host_stats = qla2x00_get_fc_host_stats,
+ .reset_fc_host_stats = qla2x00_reset_host_stats,
+
.bsg_request = qla24xx_bsg_request,
.bsg_timeout = qla24xx_bsg_timeout,
};
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 5afdc3a2501..aa57bf0af57 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -125,7 +125,7 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
uint32_t len;
uint32_t oper;
- if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_P3P_TYPE(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
@@ -559,7 +559,7 @@ qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
uint16_t new_config[4];
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
goto done_reset_internal;
memset(new_config, 0 , sizeof(new_config));
@@ -627,9 +627,10 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
{
int ret = 0;
int rval = 0;
+ unsigned long rem_tmo = 0, current_tmo = 0;
struct qla_hw_data *ha = vha->hw;
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
goto done_set_internal;
if (mode == INTERNAL_LOOPBACK)
@@ -652,8 +653,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp,
- (DCBX_COMP_TIMEOUT * HZ))) {
+ current_tmo = DCBX_COMP_TIMEOUT * HZ;
+ while (1) {
+ rem_tmo = wait_for_completion_timeout(&ha->dcbx_comp,
+ current_tmo);
+ if (!ha->idc_extend_tmo || rem_tmo) {
+ ha->idc_extend_tmo = 0;
+ break;
+ }
+ current_tmo = ha->idc_extend_tmo * HZ;
+ ha->idc_extend_tmo = 0;
+ }
+
+ if (!rem_tmo) {
ql_dbg(ql_dbg_user, vha, 0x7022,
"DCBX completion not received.\n");
ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
@@ -678,6 +690,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
ha->notify_dcbx_comp = 0;
+ ha->idc_extend_tmo = 0;
done_set_internal:
return rval;
@@ -773,7 +786,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (atomic_read(&vha->loop_state) == LOOP_READY &&
(ha->current_topology == ISP_CFG_F ||
- ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
+ ((IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) &&
le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
@@ -783,7 +796,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
- if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
+ if (IS_QLA81XX(ha) || IS_QLA8031(ha) || IS_QLA8044(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
@@ -806,7 +819,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
"elreq.options=%04x\n", elreq.options);
if (elreq.options == EXTERNAL_LOOPBACK)
- if (IS_QLA8031(ha))
+ if (IS_QLA8031(ha) || IS_QLA8044(ha))
rval = qla81xx_set_loopback_mode(vha,
config, new_config, elreq.options);
else
@@ -1266,6 +1279,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
int rval = 0;
struct qla_port_param *port_param = NULL;
fc_port_t *fcport = NULL;
+ int found = 0;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint8_t *rsp_ptr = NULL;
@@ -1288,10 +1302,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (memcmp(port_param->fc_scsi_addr.dest_addr.wwpn,
fcport->port_name, sizeof(fcport->port_name)))
continue;
+
+ found = 1;
break;
}
- if (!fcport) {
+ if (!found) {
ql_log(ql_log_warn, vha, 0x7049,
"Failed to find port.\n");
return -EINVAL;
@@ -1318,12 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x704c,
- "iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
- "%04x %x %04x %04x.\n", fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2],
- fcport->port_name[3], fcport->port_name[4],
- fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
+ "iIDMA cmd failed for %8phN -- "
+ "%04x %x %04x %04x.\n", fcport->port_name,
+ rval, fcport->fp_speed, mb[0], mb[1]);
rval = (DID_ERROR << 16);
} else {
if (!port_param->mode) {
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index df132fec6d8..2ef497ebadc 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,9 +11,12 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x014f | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x117a | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0159 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x1181 | 0x111a-0x111b |
* | | | 0x1155-0x1158 |
+ * | | | 0x1018-0x1019 |
+ * | | | 0x1115-0x1116 |
+ * | | | 0x10ca |
* | Device Discovery | 0x2095 | 0x2020-0x2022, |
* | | | 0x2011-0x2012, |
* | | | 0x2016 |
@@ -24,11 +27,12 @@
* | | | 0x3036,0x3038 |
* | | | 0x303a |
* | DPC Thread | 0x4022 | 0x4002,0x4013 |
- * | Async Events | 0x5081 | 0x502b-0x502f |
+ * | Async Events | 0x5087 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
- * | | | 0x5040,0x5075 |
- * | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70dd | 0x7018,0x702e, |
+ * | | | 0x5084,0x5075 |
+ * | | | 0x503d,0x5044 |
+ * | Timer Routines | 0x6012 | |
+ * | User Space Interactions | 0x70e1 | 0x7018,0x702e, |
* | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
@@ -36,17 +40,28 @@
* | | | 0x70a5,0x70a6, |
* | | | 0x70a8,0x70ab, |
* | | | 0x70ad-0x70ae, |
- * | | | 0x70d1-0x70da, |
+ * | | | 0x70d1-0x70db, |
* | | | 0x7047,0x703b |
- * | Task Management | 0x803c | 0x8025-0x8026 |
+ * | | | 0x70de-0x70df, |
+ * | Task Management | 0x803d | 0x8025-0x8026 |
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb14c | 0xb002,0xb024 |
+ * | | | 0xb09e,0xb0ae |
+ * | | | 0xb0e0-0xb0ef |
+ * | | | 0xb085,0xb0dc |
+ * | | | 0xb107,0xb108 |
+ * | | | 0xb111,0xb11e |
+ * | | | 0xb12c,0xb12d |
+ * | | | 0xb13a,0xb142 |
+ * | | | 0xb13c-0xb140 |
+ * | | | 0xb149 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe070 | |
- * | Target Mode Management | 0xf072 | |
+ * | Target Mode | 0xe070 | 0xe021 |
+ * | Target Mode Management | 0xf072 | 0xf002-0xf003 |
+ * | | | 0xf046-0xf049 |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -519,7 +534,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
uint32_t cnt, que_idx;
uint8_t que_cnt;
struct qla2xxx_mq_chain *mq = ptr;
- struct device_reg_25xxmq __iomem *reg;
+ device_reg_t __iomem *reg;
if (!ha->mqenable || IS_QLA83XX(ha))
return ptr;
@@ -533,13 +548,16 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
- reg = (struct device_reg_25xxmq __iomem *)
- (ha->mqiobase + cnt * QLA_QUE_PAGE);
+ reg = ISP_QUE_REG(ha, cnt);
que_idx = cnt * 4;
- mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
- mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
- mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
- mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
+ mq->qregs[que_idx] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
+ mq->qregs[que_idx+1] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
+ mq->qregs[que_idx+2] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
+ mq->qregs[que_idx+3] =
+ htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
}
return ptr + sizeof(struct qla2xxx_mq_chain);
@@ -941,7 +959,7 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
uint32_t *last_chain = NULL;
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
risc_address = ext_mem_cnt = 0;
@@ -2530,7 +2548,7 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
if (!ql_mask_match(level))
return;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
mbx_reg = &reg82->mailbox_in[0];
else if (IS_FWI2_CAPABLE(ha))
mbx_reg = &reg24->mailbox0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 95ca32a71e7..93db74ef346 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -35,6 +35,7 @@
#include "qla_bsg.h"
#include "qla_nx.h"
+#include "qla_nx2.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
#define QLA2XXX_MANUFACTURER "QLogic Corporation"
@@ -642,6 +643,7 @@ struct device_reg_fx00 {
uint32_t initval6; /* C8 */
uint32_t initval7; /* CC */
uint32_t fwheartbeat; /* D0 */
+ uint32_t pseudoaen; /* D4 */
};
@@ -805,6 +807,7 @@ struct mbx_cmd_32 {
#define MBA_MIRROR_LUN_CHANGE 0x8402 /* Mirror LUN State Change
Notification */
#define MBA_FW_POLL_STATE 0x8600 /* Firmware in poll diagnostic state */
+#define MBA_FW_RESET_FCT 0x8502 /* Firmware reset factory defaults */
/* 83XX FCoE specific */
#define MBA_IDC_AEN 0x8200 /* FCoE: NIC Core state change AEN */
@@ -997,6 +1000,7 @@ struct mbx_cmd_32 {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
/*
@@ -1233,8 +1237,9 @@ struct link_statistics {
uint32_t unused1[0x1a];
uint32_t tx_frames;
uint32_t rx_frames;
- uint32_t dumped_frames;
- uint32_t unused2[2];
+ uint32_t discarded_frames;
+ uint32_t dropped_frames;
+ uint32_t unused2[1];
uint32_t nos_rcvd;
};
@@ -2656,6 +2661,11 @@ struct qla_statistics {
uint32_t total_isp_aborts;
uint64_t input_bytes;
uint64_t output_bytes;
+ uint64_t input_requests;
+ uint64_t output_requests;
+ uint32_t control_requests;
+
+ uint64_t jiffies_at_last_reset;
};
struct bidi_statistics {
@@ -2670,9 +2680,8 @@ struct bidi_statistics {
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable || IS_QLA83XX(ha)) ? \
- ((device_reg_t __iomem *)(ha->mqiobase) +\
- (QLA_QUE_PAGE * id)) :\
- ((device_reg_t __iomem *)(ha->iobase)))
+ ((void __iomem *)ha->mqiobase + (QLA_QUE_PAGE * id)) :\
+ ((void __iomem *)ha->iobase))
#define QLA_REQ_QUE_ID(tag) \
((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
#define QLA_DEFAULT_QUE_QOS 5
@@ -2935,7 +2944,8 @@ struct qla_hw_data {
#define DT_ISP2031 BIT_15
#define DT_ISP8031 BIT_16
#define DT_ISPFX00 BIT_17
-#define DT_ISP_LAST (DT_ISPFX00 << 1)
+#define DT_ISP8044 BIT_18
+#define DT_ISP_LAST (DT_ISP8044 << 1)
#define DT_T10_PI BIT_25
#define DT_IIDMA BIT_26
@@ -2961,6 +2971,7 @@ struct qla_hw_data {
#define IS_QLA8001(ha) (DT_MASK(ha) & DT_ISP8001)
#define IS_QLA81XX(ha) (IS_QLA8001(ha))
#define IS_QLA82XX(ha) (DT_MASK(ha) & DT_ISP8021)
+#define IS_QLA8044(ha) (DT_MASK(ha) & DT_ISP8044)
#define IS_QLA2031(ha) (DT_MASK(ha) & DT_ISP2031)
#define IS_QLA8031(ha) (DT_MASK(ha) & DT_ISP8031)
#define IS_QLAFX00(ha) (DT_MASK(ha) & DT_ISPFX00)
@@ -2975,10 +2986,12 @@ struct qla_hw_data {
#define IS_QLA24XX_TYPE(ha) (IS_QLA24XX(ha) || IS_QLA54XX(ha) || \
IS_QLA84XX(ha))
#define IS_CNA_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA82XX(ha) || \
- IS_QLA8031(ha))
+ IS_QLA8031(ha) || IS_QLA8044(ha))
+#define IS_P3P_TYPE(ha) (IS_QLA82XX(ha) || IS_QLA8044(ha))
#define IS_QLA2XXX_MIDTYPE(ha) (IS_QLA24XX(ha) || IS_QLA84XX(ha) || \
IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA82XX(ha) || IS_QLA83XX(ha))
+ IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
+ IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
@@ -3187,10 +3200,12 @@ struct qla_hw_data {
uint32_t nvram_data_off;
uint32_t fdt_wrt_disable;
+ uint32_t fdt_wrt_enable;
uint32_t fdt_erase_cmd;
uint32_t fdt_block_size;
uint32_t fdt_unprotect_sec_cmd;
uint32_t fdt_protect_sec_cmd;
+ uint32_t fdt_wrt_sts_reg_cmd;
uint32_t flt_region_flt;
uint32_t flt_region_fdt;
@@ -3277,6 +3292,7 @@ struct qla_hw_data {
/* QLA83XX IDC specific fields */
uint32_t idc_audit_ts;
+ uint32_t idc_extend_tmo;
/* DPC low-priority workqueue */
struct workqueue_struct *dpc_lp_wq;
@@ -3296,9 +3312,6 @@ struct qla_hw_data {
struct mr_data_fx00 mr;
struct qlt_hw_data tgt;
- uint16_t thermal_support;
-#define THERMAL_SUPPORT_I2C BIT_0
-#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3364,6 +3377,7 @@ typedef struct scsi_qla_host {
#define PORT_UPDATE_NEEDED 24
#define FX00_RESET_RECOVERY 25
#define FX00_TARGET_SCAN 26
+#define FX00_CRITEMP_RECOVERY 27
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3402,7 +3416,7 @@ typedef struct scsi_qla_host {
uint16_t fcoe_fcf_idx;
uint8_t fcoe_vn_port_mac[6];
- uint32_t vp_abort_cnt;
+ uint32_t vp_abort_cnt;
struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
uint16_t vp_idx; /* vport ID */
@@ -3435,6 +3449,7 @@ typedef struct scsi_qla_host {
struct bidi_statistics bidi_stats;
atomic_t vref_count;
+ struct qla8044_reset_template reset_tmplt;
} scsi_qla_host_t;
#define SET_VP_IDX 1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 1ac2b0e3a0e..610d3aa905a 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1387,6 +1387,8 @@ struct qla_flt_header {
#define FLT_REG_GOLD_FW 0x2f
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
+#define FLT_REG_CNA_FW 0x97
+#define FLT_REG_BOOT_CODE_8044 0xA2
#define FLT_REG_FCOE_FW 0xA4
#define FLT_REG_FCOE_NVRAM_0 0xAA
#define FLT_REG_FCOE_NVRAM_1 0xAC
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 2d98232a08e..4446bf5fe29 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,12 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla82xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
+qla25xx_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -435,19 +441,19 @@ qla2x00_process_completed_request(struct scsi_qla_host *, struct req_que *,
*/
extern void qla2x00_release_nvram_protection(scsi_qla_host_t *);
extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern uint8_t *qla24xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla2x00_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla24xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
- uint32_t);
+ uint32_t);
extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
@@ -463,21 +469,25 @@ extern int qla83xx_wr_reg(scsi_qla_host_t *, uint32_t, uint32_t);
extern int qla83xx_rd_reg(scsi_qla_host_t *, uint32_t, uint32_t *);
extern int qla83xx_restart_nic_firmware(scsi_qla_host_t *);
extern int qla83xx_access_control(scsi_qla_host_t *, uint16_t, uint32_t,
- uint32_t, uint16_t *);
+ uint32_t, uint16_t *);
extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla2x00_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla24xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla24xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern uint8_t *qla25xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
+extern uint8_t *qla8044_read_optrom_data(struct scsi_qla_host *,
+ uint8_t *, uint32_t, uint32_t);
+extern void qla8044_watchdog(struct scsi_qla_host *vha);
extern int qla2x00_get_flash_version(scsi_qla_host_t *, void *);
extern int qla24xx_get_flash_version(scsi_qla_host_t *, void *);
+extern int qla82xx_get_flash_version(scsi_qla_host_t *, void *);
extern int qla2xxx_get_flash_info(scsi_qla_host_t *);
extern int qla2xxx_get_vpd_field(scsi_qla_host_t *, char *, char *, size_t);
@@ -498,7 +508,7 @@ extern void qla2x00_dump_buffer(uint8_t *, uint32_t);
extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t);
extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t);
extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t,
- uint8_t *, uint32_t);
+ uint8_t *, uint32_t);
extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int);
/*
@@ -584,6 +594,7 @@ extern int qlafx00_start_scsi(srb_t *);
extern int qlafx00_abort_isp(scsi_qla_host_t *);
extern int qlafx00_iospace_config(struct qla_hw_data *);
extern int qlafx00_init_firmware(scsi_qla_host_t *, uint16_t);
+extern int qlafx00_driver_shutdown(scsi_qla_host_t *, int);
extern int qlafx00_fw_ready(scsi_qla_host_t *);
extern int qlafx00_configure_devices(scsi_qla_host_t *);
extern int qlafx00_reset_initialize(scsi_qla_host_t *);
@@ -601,6 +612,7 @@ extern void qlafx00_abort_iocb(srb_t *, struct abort_iocb_entry_fx00 *);
extern void qlafx00_fxdisc_iocb(srb_t *, struct fxdisc_entry_fx00 *);
extern void qlafx00_timer_routine(scsi_qla_host_t *);
extern int qlafx00_rescan_isp(scsi_qla_host_t *);
+extern int qlafx00_loop_reset(scsi_qla_host_t *vha);
/* qla82xx related functions */
@@ -619,9 +631,9 @@ extern int qla82xx_start_firmware(scsi_qla_host_t *);
/* Firmware and flash related functions */
extern int qla82xx_load_risc(scsi_qla_host_t *, uint32_t *);
extern uint8_t *qla82xx_read_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
extern int qla82xx_write_optrom_data(struct scsi_qla_host *, uint8_t *,
- uint32_t, uint32_t);
+ uint32_t, uint32_t);
/* Mailbox related functions */
extern int qla82xx_abort_isp(scsi_qla_host_t *);
@@ -662,7 +674,7 @@ extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *);
extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *);
extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
- size_t, char *);
+ size_t, char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
extern void qla82xx_start_iocbs(scsi_qla_host_t *);
@@ -674,6 +686,8 @@ extern int qla81xx_get_led_config(scsi_qla_host_t *, uint16_t *);
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
extern char *qdev_state(uint32_t);
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
+extern int qla82xx_read_temperature(scsi_qla_host_t *);
+extern int qla8044_read_temperature(scsi_qla_host_t *);
/* BSG related functions */
extern int qla24xx_bsg_request(struct fc_bsg_job *);
@@ -695,5 +709,31 @@ extern void qla82xx_md_free(scsi_qla_host_t *);
extern int qla82xx_md_collect(scsi_qla_host_t *);
extern void qla82xx_md_prep(scsi_qla_host_t *);
extern void qla82xx_set_reset_owner(scsi_qla_host_t *);
+extern int qla82xx_validate_template_chksum(scsi_qla_host_t *vha);
+
+/* Function declarations for ISP8044 */
+extern int qla8044_idc_lock(struct qla_hw_data *ha);
+extern void qla8044_idc_unlock(struct qla_hw_data *ha);
+extern uint32_t qla8044_rd_reg(struct qla_hw_data *ha, ulong addr);
+extern void qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val);
+extern void qla8044_read_reset_template(struct scsi_qla_host *ha);
+extern void qla8044_set_idc_dontreset(struct scsi_qla_host *ha);
+extern int qla8044_rd_direct(struct scsi_qla_host *vha, const uint32_t crb_reg);
+extern void qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg, const uint32_t value);
+extern inline void qla8044_set_qsnt_ready(struct scsi_qla_host *vha);
+extern inline void qla8044_need_reset_handler(struct scsi_qla_host *vha);
+extern int qla8044_device_state_handler(struct scsi_qla_host *vha);
+extern void qla8044_clear_qsnt_ready(struct scsi_qla_host *vha);
+extern void qla8044_clear_drv_active(struct scsi_qla_host *vha);
+void qla8044_get_minidump(struct scsi_qla_host *vha);
+int qla8044_collect_md_data(struct scsi_qla_host *vha);
+extern int qla8044_md_get_template(scsi_qla_host_t *);
+extern int qla8044_write_optrom_data(struct scsi_qla_host *, uint8_t *,
+ uint32_t, uint32_t);
+extern irqreturn_t qla8044_intr_handler(int, void *);
+extern void qla82xx_mbx_completion(scsi_qla_host_t *, uint16_t);
+extern int qla8044_abort_isp(scsi_qla_host_t *);
+extern int qla8044_check_fw_alive(struct scsi_qla_host *);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 0926451980e..cd47f1b32d9 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -49,6 +49,8 @@ qla2x00_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ms_pkt->dseg_rsp_address[1] = cpu_to_le32(MSD(ha->ct_sns_dma));
ms_pkt->dseg_rsp_length = ms_pkt->rsp_bytecount;
+ vha->qla_stats.control_requests++;
+
return (ms_pkt);
}
@@ -87,6 +89,8 @@ qla24xx_prep_ms_iocb(scsi_qla_host_t *vha, uint32_t req_size, uint32_t rsp_size)
ct_pkt->dseg_1_len = ct_pkt->rsp_byte_count;
ct_pkt->vp_index = vha->vp_idx;
+ vha->qla_stats.control_requests++;
+
return (ct_pkt);
}
@@ -226,17 +230,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->d_id.b.domain = 0xf0;
ql_dbg(ql_dbg_disc, vha, 0x2063,
- "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GA_NXT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- fcport->node_name[0], fcport->node_name[1],
- fcport->node_name[2], fcport->node_name[3],
- fcport->node_name[4], fcport->node_name[5],
- fcport->node_name[6], fcport->node_name[7],
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
+ fcport->node_name, fcport->port_name,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
}
@@ -447,17 +443,9 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x2058,
- "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
- "pn %02x%02x%02x%02x%02x%02x%02X%02x "
+ "GID_PT entry - nn %8phN pn %8phN "
"portid=%02x%02x%02x.\n",
- list[i].node_name[0], list[i].node_name[1],
- list[i].node_name[2], list[i].node_name[3],
- list[i].node_name[4], list[i].node_name[5],
- list[i].node_name[6], list[i].node_name[7],
- list[i].port_name[0], list[i].port_name[1],
- list[i].port_name[2], list[i].port_name[3],
- list[i].port_name[4], list[i].port_name[5],
- list[i].port_name[6], list[i].port_name[7],
+ list[i].node_name, list[i].port_name,
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa);
}
@@ -739,6 +727,8 @@ qla2x00_prep_sns_cmd(scsi_qla_host_t *vha, uint16_t cmd, uint16_t scmd_len,
wc = (data_size - 16) / 4; /* Size in 32bit words. */
sns_cmd->p.cmd.size = cpu_to_le16(wc);
+ vha->qla_stats.control_requests++;
+
return (sns_cmd);
}
@@ -796,17 +786,9 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->d_id.b.domain = 0xf0;
ql_dbg(ql_dbg_disc, vha, 0x2061,
- "GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GA_NXT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- fcport->node_name[0], fcport->node_name[1],
- fcport->node_name[2], fcport->node_name[3],
- fcport->node_name[4], fcport->node_name[5],
- fcport->node_name[6], fcport->node_name[7],
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
+ fcport->node_name, fcport->port_name,
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
}
@@ -991,17 +973,9 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x206e,
- "GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
- "pn %02x%02x%02x%02x%02x%02x%02x%02x "
+ "GID_PT entry - nn %8phN pn %8phN "
"port_id=%02x%02x%02x.\n",
- list[i].node_name[0], list[i].node_name[1],
- list[i].node_name[2], list[i].node_name[3],
- list[i].node_name[4], list[i].node_name[5],
- list[i].node_name[6], list[i].node_name[7],
- list[i].port_name[0], list[i].port_name[1],
- list[i].port_name[2], list[i].port_name[3],
- list[i].port_name[4], list[i].port_name[5],
- list[i].port_name[6], list[i].port_name[7],
+ list[i].node_name, list[i].port_name,
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa);
}
@@ -1321,11 +1295,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
size += 4 + WWN_SIZE;
ql_dbg(ql_dbg_disc, vha, 0x2025,
- "NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
- eiter->a.node_name[0], eiter->a.node_name[1],
- eiter->a.node_name[2], eiter->a.node_name[3],
- eiter->a.node_name[4], eiter->a.node_name[5],
- eiter->a.node_name[6], eiter->a.node_name[7]);
+ "NodeName = %8phN.\n", eiter->a.node_name);
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@@ -1428,16 +1398,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x202e,
- "RHBA identifier = "
- "%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
- ct_req->req.rhba.hba_identifier[0],
- ct_req->req.rhba.hba_identifier[1],
- ct_req->req.rhba.hba_identifier[2],
- ct_req->req.rhba.hba_identifier[3],
- ct_req->req.rhba.hba_identifier[4],
- ct_req->req.rhba.hba_identifier[5],
- ct_req->req.rhba.hba_identifier[6],
- ct_req->req.rhba.hba_identifier[7], size);
+ "RHBA identifier = %8phN size=%d.\n",
+ ct_req->req.rhba.hba_identifier, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
entries, size);
@@ -1494,11 +1456,7 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
ql_dbg(ql_dbg_disc, vha, 0x2036,
- "DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
- ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
- ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
- ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
- ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
+ "DHBA portname = %8phN.\n", ct_req->req.dhba.port_name);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
@@ -1678,12 +1636,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
ql_dbg(ql_dbg_disc, vha, 0x203e,
- "RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
- ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
- ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
- ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
- ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
- size);
+ "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
entries, size);
@@ -1940,16 +1893,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
ql_dbg(ql_dbg_disc, vha, 0x205b,
"GPSC ext entry - fpn "
- "%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
- "speed=%04x.\n",
- list[i].fabric_port_name[0],
- list[i].fabric_port_name[1],
- list[i].fabric_port_name[2],
- list[i].fabric_port_name[3],
- list[i].fabric_port_name[4],
- list[i].fabric_port_name[5],
- list[i].fabric_port_name[6],
- list[i].fabric_port_name[7],
+ "%8phN speeds=%04x speed=%04x.\n",
+ list[i].fabric_port_name,
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f2216ed2ad8..03f715e7591 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -524,7 +524,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
+ vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -552,7 +552,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (rval) {
ql_log(ql_log_fatal, vha, 0x004f,
"Unable to validate FLASH data.\n");
- return (rval);
+ return rval;
+ }
+
+ if (IS_QLA8044(ha)) {
+ qla8044_read_reset_template(vha);
+
+ /* NOTE: If ql2xdontresethba==1, set IDC_CTRL DONTRESET_BIT0.
+ * If DONRESET_BIT0 is set, drivers should not set dev_state
+ * to NEED_RESET. But if NEED_RESET is set, drivers should
+ * should honor the reset. */
+ if (ql2xdontresethba == 1)
+ qla8044_set_idc_dontreset(vha);
}
ha->isp_ops->get_flash_version(vha, req->ring);
@@ -564,12 +575,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (ha->flags.disable_serdes) {
/* Mask HBA via NVRAM settings? */
ql_log(ql_log_info, vha, 0x0077,
- "Masking HBA WWPN "
- "%02x%02x%02x%02x%02x%02x%02x%02x (via NVRAM).\n",
- vha->port_name[0], vha->port_name[1],
- vha->port_name[2], vha->port_name[3],
- vha->port_name[4], vha->port_name[5],
- vha->port_name[6], vha->port_name[7]);
+ "Masking HBA WWPN %8phN (via NVRAM).\n", vha->port_name);
return QLA_FUNCTION_FAILED;
}
@@ -620,6 +626,11 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ if (IS_P3P_TYPE(ha))
+ qla82xx_set_driver_version(vha, QLA2XXX_VERSION);
+ else
+ qla25xx_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1332,7 +1343,7 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
@@ -1615,7 +1626,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
unsigned long flags;
uint16_t fw_major_version;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
rval = ha->isp_ops->load_risc(vha, &srisc_address);
if (rval == QLA_SUCCESS) {
qla2x00_stop_firmware(vha);
@@ -1651,7 +1662,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
if (rval == QLA_SUCCESS) {
enable_82xx_npiv:
fw_major_version = ha->fw_major_version;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
qla82xx_check_md_needed(vha);
else
rval = qla2x00_get_fw_version(vha);
@@ -1681,7 +1692,7 @@ enable_82xx_npiv:
goto failed;
if (!fw_major_version && ql2xallocfwdump
- && !IS_QLA82XX(ha))
+ && !(IS_P3P_TYPE(ha)))
qla2x00_alloc_fw_dump(vha);
}
} else {
@@ -1849,7 +1860,7 @@ qla24xx_update_fw_options(scsi_qla_host_t *vha)
int rval;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
/* Update Serial Link options. */
@@ -3061,22 +3072,13 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
mb);
if (rval != QLA_SUCCESS) {
ql_dbg(ql_dbg_disc, vha, 0x2004,
- "Unable to adjust iIDMA "
- "%02x%02x%02x%02x%02x%02x%02x%02x -- %04x %x %04x "
- "%04x.\n", fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7], rval,
- fcport->fp_speed, mb[0], mb[1]);
+ "Unable to adjust iIDMA %8phN -- %04x %x %04x %04x.\n",
+ fcport->port_name, rval, fcport->fp_speed, mb[0], mb[1]);
} else {
ql_dbg(ql_dbg_disc, vha, 0x2005,
- "iIDMA adjusted to %s GB/s "
- "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ "iIDMA adjusted to %s GB/s on %8phN.\n",
qla2x00_get_link_speed_str(ha, fcport->fp_speed),
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7]);
+ fcport->port_name);
}
}
@@ -4007,10 +4009,18 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
uint32_t class_type_mask = 0x3;
uint16_t fcoe_other_function = 0xffff, i;
- qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
-
- qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
- qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ if (IS_QLA8044(ha)) {
+ drv_presence = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ dev_part_info1 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO_INDEX);
+ dev_part_info2 = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_PART_INFO2);
+ } else {
+ qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO1, &dev_part_info1);
+ qla83xx_rd_reg(vha, QLA83XX_DEV_PARTINFO2, &dev_part_info2);
+ }
for (i = 0; i < 8; i++) {
class_type = ((dev_part_info1 >> (i * 4)) & class_type_mask);
if ((class_type == QLA83XX_CLASS_TYPE_FCOE) &&
@@ -4347,7 +4357,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
/* For ISP82XX, driver waits for completion of the commands.
* online flag should be set.
*/
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
vha->flags.online = 0;
ha->flags.chip_reset_done = 0;
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -4360,7 +4370,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
* Driver waits for the completion of the commands.
* the interrupts need to be enabled.
*/
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
ha->isp_ops->reset_chip(vha);
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
@@ -4403,7 +4413,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
if (!ha->flags.eeh_busy) {
/* Make sure for ISP 82XX IO DMA is complete */
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_chip_reset_cleanup(vha);
ql_log(ql_log_info, vha, 0x00b4,
"Done chip reset cleanup.\n");
@@ -4723,7 +4733,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
vha->flags.online = 0;
@@ -4789,8 +4799,6 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
}
ha->nvram_size = sizeof(struct nvram_24xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
- if (IS_QLA82XX(ha))
- ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5552,6 +5560,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Determine NVRAM starting address. */
ha->nvram_size = sizeof(struct nvram_81xx);
ha->vpd_size = FA_NVRAM_VPD_SIZE;
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
+ ha->vpd_size = FA_VPD_SIZE_82XX;
/* Get VPD data into cache */
ha->vpd = ha->nvram + VPD_OFFSET;
@@ -5734,7 +5744,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
/* Link Down Timeout = 0:
*
- * When Port Down timer expires we will start returning
+ * When Port Down timer expires we will start returning
* I/O's to OS with "DID_NO_CONNECT".
*
* Link Down Timeout != 0:
@@ -6061,7 +6071,7 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (priority < 0)
return QLA_FUNCTION_FAILED;
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(vha->hw)) {
fcport->fcp_prio = priority & 0xf;
return QLA_SUCCESS;
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 28c38b4929c..957088b0461 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -59,7 +59,7 @@ qla2x00_poll(struct rsp_que *rsp)
unsigned long flags;
struct qla_hw_data *ha = rsp->hw;
local_irq_save(flags);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
qla82xx_poll(0, rsp);
else
ha->isp_ops->intr_handler(0, rsp);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 42ef481db94..46b9307e8be 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -32,9 +32,11 @@ qla2x00_get_cmd_direction(srb_t *sp)
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cflags = CF_WRITE;
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cflags = CF_READ;
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
return (cflags);
}
@@ -419,6 +421,8 @@ qla2x00_start_scsi(srb_t *sp)
__constant_cpu_to_le16(CF_SIMPLE_TAG);
break;
}
+ } else {
+ cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG);
}
/* Load SCSI command packet. */
@@ -472,7 +476,7 @@ qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
struct qla_hw_data *ha = vha->hw;
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
qla82xx_start_iocbs(vha);
} else {
/* Adjust ring index. */
@@ -640,10 +644,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags =
__constant_cpu_to_le16(CF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
cur_seg = scsi_sglist(cmd);
@@ -756,10 +762,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_WRITE_DATA);
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags =
__constant_cpu_to_le16(TMF_READ_DATA);
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ vha->qla_stats.input_requests++;
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1307,11 +1315,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
fcp_cmnd->task_attribute = TSK_ORDERED;
break;
default:
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
break;
}
} else {
- fcp_cmnd->task_attribute = 0;
+ fcp_cmnd->task_attribute = TSK_SIMPLE;
}
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
@@ -1525,7 +1533,12 @@ qla24xx_start_scsi(srb_t *sp)
case ORDERED_QUEUE_TAG:
cmd_pkt->task = TSK_ORDERED;
break;
+ default:
+ cmd_pkt->task = TSK_SIMPLE;
+ break;
}
+ } else {
+ cmd_pkt->task = TSK_SIMPLE;
}
/* Load SCSI command packet. */
@@ -1837,7 +1850,7 @@ skip_cmd_array:
if (req->cnt < req_cnt) {
if (ha->mqenable || IS_QLA83XX(ha))
cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
- else if (IS_QLA82XX(ha))
+ else if (IS_P3P_TYPE(ha))
cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
else if (IS_FWI2_CAPABLE(ha))
cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
@@ -2049,6 +2062,8 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
(bsg_job->reply_payload.sg_list)));
els_iocb->rx_len = cpu_to_le32(sg_dma_len
(bsg_job->reply_payload.sg_list));
+
+ sp->fcport->vha->qla_stats.control_requests++;
}
static void
@@ -2126,6 +2141,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
avail_dsds--;
}
ct_iocb->entry_count = entry_count;
+
+ sp->fcport->vha->qla_stats.control_requests++;
}
static void
@@ -2678,6 +2695,9 @@ qla25xx_build_bidir_iocb(srb_t *sp, struct scsi_qla_host *vha,
vha->bidi_stats.transfer_bytes += req_data_len;
vha->bidi_stats.io_count++;
+ vha->qla_stats.output_bytes += req_data_len;
+ vha->qla_stats.output_requests++;
+
/* Only one dsd is available for bidirectional IOCB, remaining dsds
* are bundled in continuation iocb
*/
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 2d8e7b81235..df1b30ba938 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -282,25 +282,38 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
"%04x %04x %04x %04x %04x %04x %04x.\n",
event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
mb[4], mb[5], mb[6]);
- if ((aen == MBA_IDC_COMPLETE && mb[1] >> 15)) {
- vha->hw->flags.idc_compl_status = 1;
- if (vha->hw->notify_dcbx_comp)
- complete(&vha->hw->dcbx_comp);
- }
-
- /* Acknowledgement needed? [Notify && non-zero timeout]. */
- timeout = (descr >> 8) & 0xf;
- if (aen != MBA_IDC_NOTIFY || !timeout)
- return;
+ switch (aen) {
+ /* Handle IDC Error completion case. */
+ case MBA_IDC_COMPLETE:
+ if (mb[1] >> 15) {
+ vha->hw->flags.idc_compl_status = 1;
+ if (vha->hw->notify_dcbx_comp)
+ complete(&vha->hw->dcbx_comp);
+ }
+ break;
- ql_dbg(ql_dbg_async, vha, 0x5022,
- "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
- vha->host_no, event[aen & 0xff], timeout);
+ case MBA_IDC_NOTIFY:
+ /* Acknowledgement needed? [Notify && non-zero timeout]. */
+ timeout = (descr >> 8) & 0xf;
+ ql_dbg(ql_dbg_async, vha, 0x5022,
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
+ vha->host_no, event[aen & 0xff], timeout);
- rval = qla2x00_post_idc_ack_work(vha, mb);
- if (rval != QLA_SUCCESS)
- ql_log(ql_log_warn, vha, 0x5023,
- "IDC failed to post ACK.\n");
+ if (!timeout)
+ return;
+ rval = qla2x00_post_idc_ack_work(vha, mb);
+ if (rval != QLA_SUCCESS)
+ ql_log(ql_log_warn, vha, 0x5023,
+ "IDC failed to post ACK.\n");
+ break;
+ case MBA_IDC_TIME_EXT:
+ vha->hw->idc_extend_tmo = descr;
+ ql_dbg(ql_dbg_async, vha, 0x5087,
+ "%lu Inter-Driver Communication %s -- "
+ "Extend timeout by=%d.\n",
+ vha->host_no, event[aen & 0xff], vha->hw->idc_extend_tmo);
+ break;
+ }
}
#define LS_UNKNOWN 2
@@ -691,7 +704,8 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? RD_REG_WORD(&reg24->mailbox4) : 0;
- mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
+ mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
+ : mbx;
ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
@@ -740,7 +754,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- if (IS_QLA81XX(ha) || IS_QLA82XX(ha) || IS_QLA8031(ha)) {
+ if (IS_CNA_CAPABLE(ha)) {
ql_dbg(ql_dbg_async, vha, 0x500d,
"DCBX Completed -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
@@ -1002,7 +1016,7 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- if (IS_QLA8031(vha->hw)) {
+ if (IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
mb[4] = RD_REG_WORD(&reg24->mailbox4);
if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
(mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
@@ -1022,7 +1036,8 @@ skip_rio:
complete(&ha->lb_portup_comp);
/* Fallthru */
case MBA_IDC_TIME_EXT:
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) ||
+ IS_QLA8044(ha))
qla81xx_idc_event(vha, mb[0], mb[1]);
break;
@@ -1063,7 +1078,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1080,7 +1095,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
} else {
ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1100,7 +1115,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1805,6 +1820,9 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
if (scsi_status == 0) {
bsg_job->reply->reply_payload_rcv_len =
bsg_job->reply_payload.payload_len;
+ vha->qla_stats.input_bytes +=
+ bsg_job->reply->reply_payload_rcv_len;
+ vha->qla_stats.input_requests++;
rval = EXT_STATUS_OK;
}
goto done;
@@ -1949,7 +1967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2176,8 +2194,10 @@ check_scsi_status:
}
ql_dbg(ql_dbg_io, fcport->vha, 0x3021,
- "Port down status: port-state=0x%x.\n",
- atomic_read(&fcport->state));
+ "Port to be marked lost on fcport=%02x%02x%02x, current "
+ "port state= %s.\n", fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ port_state_str[atomic_read(&fcport->state)]);
if (atomic_read(&fcport->state) == FCS_ONLINE)
qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
@@ -2212,16 +2232,13 @@ check_scsi_status:
out:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
- "FCP command status: 0x%x-0x%x (0x%x) "
- "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
- "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
- cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
- cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
- cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
+ cp->cmnd, scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
if (!res)
@@ -2324,7 +2341,7 @@ fatal:
ql_log(ql_log_warn, vha, 0x5030,
"Error entry - invalid handle/queue.\n");
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -2452,7 +2469,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
/* Adjust ring index */
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
} else
@@ -2865,7 +2882,7 @@ msix_failed:
ret = request_irq(qentry->vector,
qla83xx_msix_entries[i].handler,
0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_QLA82XX(ha)) {
+ } else if (IS_P3P_TYPE(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
@@ -2950,7 +2967,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
skip_msix:
if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
- !IS_QLA8001(ha) && !IS_QLA82XX(ha) && !IS_QLAFX00(ha))
+ !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha))
goto skip_msi;
ret = pci_enable_msi(ha->pdev);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 7257c3c4f2d..a9aae500e79 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -75,7 +75,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- if (IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung) {
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
ql_log(ql_log_warn, vha, 0x1004,
@@ -106,9 +106,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Load mailbox registers. */
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
optr = (uint16_t __iomem *)&reg->isp82.mailbox_in[0];
- else if (IS_FWI2_CAPABLE(ha) && !IS_QLA82XX(ha))
+ else if (IS_FWI2_CAPABLE(ha) && !(IS_P3P_TYPE(ha)))
optr = (uint16_t __iomem *)&reg->isp24.mailbox0;
else
optr = (uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 0);
@@ -117,33 +117,25 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
command = mcp->mb[0];
mboxes = mcp->out_mb;
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+ "Mailbox registers (OUT):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
optr =
(uint16_t __iomem *)MAILBOX_REG(ha, &reg->isp, 8);
- if (mboxes & BIT_0)
+ if (mboxes & BIT_0) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1112,
+ "mbox[%d]<-0x%04x\n", cnt, *iptr);
WRT_REG_WORD(optr, *iptr);
+ }
mboxes >>= 1;
optr++;
iptr++;
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
- "Loaded MBX registers (displayed in bytes) =.\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1112,
- (uint8_t *)mcp->mb, 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1113,
- ".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1114,
- ((uint8_t *)mcp->mb + 0x10), 16);
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1115,
- ".\n");
- ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1116,
- ((uint8_t *)mcp->mb + 0x20), 8);
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1117,
"I/O Address = %p.\n", optr);
- ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x100e);
/* Issue set host interrupt command to send cmd out. */
ha->flags.mbox_int = 0;
@@ -159,7 +151,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) {
set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -189,7 +181,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx, vha, 0x1011,
"Cmd=%x Polling Mode.\n", command);
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (RD_REG_DWORD(&reg->isp82.hint) &
HINT_MBX_INT_PENDING) {
spin_unlock_irqrestore(&ha->hardware_lock,
@@ -236,7 +228,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ha->flags.mbox_int = 0;
clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
- if ((IS_QLA82XX(ha) && ha->flags.isp82xx_fw_hung)) {
+ if (IS_P3P_TYPE(ha) && ha->flags.isp82xx_fw_hung) {
ha->flags.mbox_busy = 0;
/* Setting Link-Down error */
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
@@ -254,9 +246,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
iptr2 = mcp->mb;
iptr = (uint16_t *)&ha->mailbox_out[0];
mboxes = mcp->in_mb;
+
+ ql_dbg(ql_dbg_mbx, vha, 0x1113,
+ "Mailbox registers (IN):\n");
for (cnt = 0; cnt < ha->mbx_count; cnt++) {
- if (mboxes & BIT_0)
+ if (mboxes & BIT_0) {
*iptr2 = *iptr;
+ ql_dbg(ql_dbg_mbx, vha, 0x1114,
+ "mbox[%d]->0x%04x\n", cnt, *iptr2);
+ }
mboxes >>= 1;
iptr2++;
@@ -537,7 +535,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
mcp->out_mb = MBX_0;
mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha))
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(ha) || IS_QLA8044(ha))
mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8;
if (IS_FWI2_CAPABLE(ha))
mcp->in_mb |= MBX_17|MBX_16|MBX_15;
@@ -556,7 +554,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
ha->fw_memory_size = 0x1FFFF; /* Defaults to 128KB. */
else
ha->fw_memory_size = (mcp->mb[5] << 16) | mcp->mb[4];
- if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw)) {
+ if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw) || IS_QLA8044(ha)) {
ha->mpi_version[0] = mcp->mb[10] & 0xff;
ha->mpi_version[1] = mcp->mb[11] >> 8;
ha->mpi_version[2] = mcp->mb[11] & 0xff;
@@ -1201,7 +1199,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
"Entered %s.\n", __func__);
- if (IS_QLA82XX(ha) && ql2xdbwr)
+ if (IS_P3P_TYPE(ha) && ql2xdbwr)
qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
(0x04 | (ha->portnum << 5) | (0 << 8) | (0 << 16)));
@@ -1667,7 +1665,11 @@ qla24xx_link_initialize(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_LINK_INITIALIZATION;
- mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[1] = BIT_4;
+ if (vha->hw->operating_mode == LOOP)
+ mcp->mb[1] |= BIT_6;
+ else
+ mcp->mb[1] |= BIT_5;
mcp->mb[2] = 0;
mcp->mb[3] = 0;
mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -3574,7 +3576,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
unsigned long flags;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
@@ -3595,9 +3596,6 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
- QLA_QUE_PAGE * req->id);
-
mcp->mb[4] = req->id;
/* que in ptr index */
mcp->mb[8] = 0;
@@ -3619,12 +3617,10 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(req->options & BIT_0)) {
- WRT_REG_DWORD(&reg->req_q_in, 0);
+ WRT_REG_DWORD(req->req_q_in, 0);
if (!IS_QLA83XX(ha))
- WRT_REG_DWORD(&reg->req_q_out, 0);
+ WRT_REG_DWORD(req->req_q_out, 0);
}
- req->req_q_in = &reg->req_q_in;
- req->req_q_out = &reg->req_q_out;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
rval = qla2x00_mailbox_command(vha, mcp);
@@ -3646,7 +3642,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
unsigned long flags;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- struct device_reg_25xxmq __iomem *reg;
struct qla_hw_data *ha = vha->hw;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
@@ -3664,9 +3659,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
- QLA_QUE_PAGE * rsp->id);
-
mcp->mb[4] = rsp->id;
/* que in ptr index */
mcp->mb[8] = 0;
@@ -3690,9 +3682,9 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!(rsp->options & BIT_0)) {
- WRT_REG_DWORD(&reg->rsp_q_out, 0);
+ WRT_REG_DWORD(rsp->rsp_q_out, 0);
if (!IS_QLA83XX(ha))
- WRT_REG_DWORD(&reg->rsp_q_in, 0);
+ WRT_REG_DWORD(rsp->rsp_q_in, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3872,6 +3864,112 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
return rval;
}
+int
+qla82xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int i;
+ int len;
+ uint16_t *str;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117b,
+ "Entered %s.\n", __func__);
+
+ str = (void *)version;
+ len = strlen(version);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ for (i = 4; i < 16 && len; i++, str++, len -= 2) {
+ mcp->mb[i] = cpu_to_le16p(str);
+ mcp->out_mb |= 1<<i;
+ }
+ for (; i < 16; i++) {
+ mcp->mb[i] = 0;
+ mcp->out_mb |= 1<<i;
+ }
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x117c,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117d,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
+qla25xx_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha) ||
+ IS_P3P_TYPE(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x117e,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x117f,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * 4 - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1180,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1181,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
@@ -4407,7 +4505,7 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
"Entered %s.\n", __func__);
- if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
+ if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha) && !IS_QLA8044(ha))
return QLA_FUNCTION_FAILED;
mcp->mb[0] = MBC_GET_PORT_CONFIG;
mcp->out_mb = MBX_0;
@@ -4512,40 +4610,43 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
struct qla_hw_data *ha = vha->hw;
uint8_t byte;
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
- "Entered %s.\n", __func__);
-
- if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
- rval = qla2x00_read_sfp(vha, 0, &byte,
- 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
- *temp = byte;
- if (rval == QLA_SUCCESS)
- goto done;
-
- ql_log(ql_log_warn, vha, 0x10c9,
- "Thermal not supported through I2C bus, trying alternate "
- "method (ISP access).\n");
- ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA24XX_TYPE(ha) || IS_QLA81XX(ha)) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1150,
+ "Thermal not supported by this card.\n");
+ return rval;
}
- if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
- rval = qla2x00_read_asic_temperature(vha, temp);
- if (rval == QLA_SUCCESS)
- goto done;
-
- ql_log(ql_log_warn, vha, 0x1019,
- "Thermal not supported through ISP.\n");
- ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
+ if (IS_QLA25XX(ha)) {
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+ ha->pdev->subsystem_device == 0x0175) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+ ha->pdev->subsystem_device == 0x338e) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_15|BIT_14|BIT_0);
+ *temp = byte;
+ return rval;
+ }
+ ql_dbg(ql_dbg_mbx, vha, 0x10c9,
+ "Thermal not supported by this card.\n");
+ return rval;
}
- ql_log(ql_log_warn, vha, 0x1150,
- "Thermal not supported by this card "
- "(ignoring further requests).\n");
- return rval;
+ if (IS_QLA82XX(ha)) {
+ *temp = qla82xx_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ } else if (IS_QLA8044(ha)) {
+ *temp = qla8044_read_temperature(vha);
+ rval = QLA_SUCCESS;
+ return rval;
+ }
-done:
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
- "Done %s.\n", __func__);
+ rval = qla2x00_read_asic_temperature(vha, temp);
return rval;
}
@@ -4595,7 +4696,7 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
"Entered %s.\n", __func__);
- if (!IS_QLA82XX(ha))
+ if (!IS_P3P_TYPE(ha))
return QLA_FUNCTION_FAILED;
memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4713,6 +4814,60 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
}
int
+qla8044_md_get_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int rval = QLA_FUNCTION_FAILED;
+ int offset = 0, size = MINIDUMP_SIZE_36K;
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11f,
+ "Entered %s.\n", __func__);
+
+ ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
+ ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
+ if (!ha->md_tmplt_hdr) {
+ ql_log(ql_log_warn, vha, 0xb11b,
+ "Unable to allocate memory for Minidump template.\n");
+ return rval;
+ }
+
+ memset(mcp->mb, 0 , sizeof(mcp->mb));
+ while (offset < ha->md_template_size) {
+ mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
+ mcp->mb[2] = LSW(RQST_TMPLT);
+ mcp->mb[3] = MSW(RQST_TMPLT);
+ mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma + offset));
+ mcp->mb[8] = LSW(size);
+ mcp->mb[9] = MSW(size);
+ mcp->mb[10] = offset & 0x0000FFFF;
+ mcp->mb[11] = offset & 0xFFFF0000;
+ mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8|
+ MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0xb11c,
+ "mailbox command FAILED=0x%x, subcode=%x.\n",
+ ((mcp->mb[1] << 16) | mcp->mb[0]),
+ ((mcp->mb[3] << 16) | mcp->mb[2]));
+ return rval;
+ } else
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0xb11d,
+ "Done %s.\n", __func__);
+ offset = offset + size;
+ }
+ return rval;
+}
+
+int
qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
{
int rval;
@@ -4808,7 +4963,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
- if (!IS_QLA82XX(ha))
+ if (!IS_P3P_TYPE(ha))
return QLA_FUNCTION_FAILED;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index f868a9f98af..a72df701fb3 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -699,6 +699,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->cnt = req->length;
req->id = que_id;
reg = ISP_QUE_REG(ha, que_id);
+ req->req_q_in = &reg->isp25mq.req_q_in;
+ req->req_q_out = &reg->isp25mq.req_q_out;
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index ab4be107cda..62ee7131b20 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -294,7 +294,7 @@ premature_exit:
* Context:
* Kernel context.
*/
-static int
+int
qlafx00_driver_shutdown(scsi_qla_host_t *vha, int tmo)
{
int rval;
@@ -776,6 +776,29 @@ qlafx00_lun_reset(fc_port_t *fcport, unsigned int l, int tag)
}
int
+qlafx00_loop_reset(scsi_qla_host_t *vha)
+{
+ int ret;
+ struct fc_port *fcport;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (ql2xtargetreset) {
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+
+ ret = ha->isp_ops->target_reset(fcport, 0, 0);
+ if (ret != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_taskm, vha, 0x803d,
+ "Bus Reset failed: Reset=%d "
+ "d_id=%x.\n", ret, fcport->d_id.b24);
+ }
+ }
+ }
+ return QLA_SUCCESS;
+}
+
+int
qlafx00_iospace_config(struct qla_hw_data *ha)
{
if (pci_request_selected_regions(ha->pdev, ha->bars,
@@ -918,12 +941,23 @@ qlafx00_init_fw_ready(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
uint32_t aenmbx, aenmbx7 = 0;
+ uint32_t pseudo_aen;
uint32_t state[5];
bool done = false;
/* 30 seconds wait - Adjust if required */
wait_time = 30;
+ pseudo_aen = RD_REG_DWORD(&reg->pseudoaen);
+ if (pseudo_aen == 1) {
+ aenmbx7 = RD_REG_DWORD(&reg->initval7);
+ ha->mbx_intr_code = MSW(aenmbx7);
+ ha->rqstq_intr_code = LSW(aenmbx7);
+ rval = qlafx00_driver_shutdown(vha, 10);
+ if (rval != QLA_SUCCESS)
+ qlafx00_soft_reset(vha);
+ }
+
/* wait time before firmware ready */
wtime = jiffies + (wait_time * HZ);
do {
@@ -1349,21 +1383,22 @@ qlafx00_configure_devices(scsi_qla_host_t *vha)
}
static void
-qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
+qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha, bool critemp)
{
struct qla_hw_data *ha = vha->hw;
fc_port_t *fcport;
vha->flags.online = 0;
- ha->flags.chip_reset_done = 0;
ha->mr.fw_hbt_en = 0;
- clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- vha->qla_stats.total_isp_aborts++;
-
- ql_log(ql_log_info, vha, 0x013f,
- "Performing ISP error recovery - ha = %p.\n", ha);
- ha->isp_ops->reset_chip(vha);
+ if (!critemp) {
+ ha->flags.chip_reset_done = 0;
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ql_log(ql_log_info, vha, 0x013f,
+ "Performing ISP error recovery - ha = %p.\n", ha);
+ ha->isp_ops->reset_chip(vha);
+ }
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1383,12 +1418,19 @@ qlafx00_abort_isp_cleanup(scsi_qla_host_t *vha)
}
if (!ha->flags.eeh_busy) {
- /* Requeue all commands in outstanding command list. */
- qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ if (critemp) {
+ qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16);
+ } else {
+ /* Requeue all commands in outstanding command list. */
+ qla2x00_abort_all_cmds(vha, DID_RESET << 16);
+ }
}
qla2x00_free_irqs(vha);
- set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ if (critemp)
+ set_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags);
+ else
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
/* Clear the Interrupts */
QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
@@ -1475,6 +1517,7 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
uint32_t fw_heart_beat;
uint32_t aenmbx0;
struct device_reg_fx00 __iomem *reg = &ha->iobase->ispfx00;
+ uint32_t tempc;
/* Check firmware health */
if (ha->mr.fw_hbt_cnt)
@@ -1539,10 +1582,36 @@ qlafx00_timer_routine(scsi_qla_host_t *vha)
} else if ((aenmbx0 & 0xFF00) == MBA_FW_POLL_STATE) {
ha->mr.fw_reset_timer_tick =
QLAFX00_MAX_RESET_INTERVAL;
+ } else if (aenmbx0 == MBA_FW_RESET_FCT) {
+ ha->mr.fw_reset_timer_tick =
+ QLAFX00_MAX_RESET_INTERVAL;
}
ha->mr.old_aenmbx0_state = aenmbx0;
ha->mr.fw_reset_timer_tick--;
}
+ if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
+ /*
+ * Critical temperature recovery to be
+ * performed in timer routine
+ */
+ if (ha->mr.fw_critemp_timer_tick == 0) {
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_timer, vha, 0x6012,
+ "ISPFx00(%s): Critical temp timer, "
+ "current SOC temperature: %d\n",
+ __func__, tempc);
+ if (tempc < ha->mr.critical_temperature) {
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ clear_bit(FX00_CRITEMP_RECOVERY,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ ha->mr.fw_critemp_timer_tick =
+ QLAFX00_CRITEMP_INTERVAL;
+ } else {
+ ha->mr.fw_critemp_timer_tick--;
+ }
+ }
}
/*
@@ -1570,7 +1639,7 @@ qlafx00_reset_initialize(scsi_qla_host_t *vha)
if (vha->flags.online) {
scsi_block_requests(vha->host);
- qlafx00_abort_isp_cleanup(vha);
+ qlafx00_abort_isp_cleanup(vha, false);
}
ql_log(ql_log_info, vha, 0x0143,
@@ -1602,7 +1671,15 @@ qlafx00_abort_isp(scsi_qla_host_t *vha)
}
scsi_block_requests(vha->host);
- qlafx00_abort_isp_cleanup(vha);
+ qlafx00_abort_isp_cleanup(vha, false);
+ } else {
+ scsi_block_requests(vha->host);
+ clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ vha->qla_stats.total_isp_aborts++;
+ ha->isp_ops->reset_chip(vha);
+ set_bit(FX00_RESET_RECOVERY, &vha->dpc_flags);
+ /* Clear the Interrupts */
+ QLAFX00_CLR_INTR_REG(ha, QLAFX00_HST_INT_STS_BITS);
}
ql_log(ql_log_info, vha, 0x0145,
@@ -1688,6 +1765,15 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt)
aen_code = FCH_EVT_LINKDOWN;
aen_data = 0;
break;
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5082,
+ "Process critical temperature event "
+ "aenmb[0]: %x\n",
+ evt->u.aenfx.evtcode);
+ scsi_block_requests(vha->host);
+ qlafx00_abort_isp_cleanup(vha, true);
+ scsi_unblock_requests(vha->host);
+ break;
}
fc_host_post_event(vha->host, fc_get_event_number(),
@@ -1879,6 +1965,11 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type)
sizeof(vha->hw->mr.uboot_version));
memcpy(&vha->hw->mr.fru_serial_num, pinfo->fru_serial_num,
sizeof(vha->hw->mr.fru_serial_num));
+ vha->hw->mr.critical_temperature =
+ (pinfo->nominal_temp_value) ?
+ pinfo->nominal_temp_value : QLAFX00_CRITEMP_THRSHLD;
+ ha->mr.extended_io_enabled = (pinfo->enabled_capabilities &
+ QLAFX00_EXTENDED_IO_EN_MASK) != 0;
} else if (fx_type == FXDISC_GET_PORT_INFO) {
struct port_info_data *pinfo =
(struct port_info_data *) fdisc->u.fxiocb.rsp_addr;
@@ -2021,6 +2112,7 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
{
int rval;
struct qla_hw_data *ha = vha->hw;
+ uint32_t tempc;
/* Clear adapter flags. */
vha->flags.online = 0;
@@ -2028,7 +2120,6 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->thermal_support = 0;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -2072,6 +2163,11 @@ qlafx00_initialize_adapter(scsi_qla_host_t *vha)
rval = qla2x00_init_rings(vha);
ha->flags.chip_reset_done = 1;
+ tempc = QLAFX00_GET_TEMPERATURE(ha);
+ ql_dbg(ql_dbg_init, vha, 0x0152,
+ "ISPFx00(%s): Critical temp timer, current SOC temperature: 0x%x\n",
+ __func__, tempc);
+
return rval;
}
@@ -2526,16 +2622,13 @@ check_scsi_status:
if (logit)
ql_dbg(ql_dbg_io, fcport->vha, 0x3058,
- "FCP command status: 0x%x-0x%x (0x%x) "
- "nexus=%ld:%d:%d tgt_id: 0x%x lscsi_status: 0x%x"
- "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x "
- "sense_len=0x%x, par_sense_len=0x%x, rsp_info_len=0x%x\n",
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
+ "tgt_id: 0x%x lscsi_status: 0x%x cdb=%10phN len=0x%x "
+ "rsp_info=0x%x resid=0x%x fw_resid=0x%x sense_len=0x%x, "
+ "par_sense_len=0x%x, rsp_info_len=0x%x\n",
comp_status, scsi_status, res, vha->host_no,
cp->device->id, cp->device->lun, fcport->tgt_id,
- lscsi_status, cp->cmnd[0], cp->cmnd[1], cp->cmnd[2],
- cp->cmnd[3], cp->cmnd[4], cp->cmnd[5], cp->cmnd[6],
- cp->cmnd[7], cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp),
+ lscsi_status, cp->cmnd, scsi_bufflen(cp),
rsp_info_len, resid_len, fw_resid_len, sense_len,
par_sense_len, rsp_info_len);
@@ -2720,9 +2813,6 @@ qlafx00_process_response_queue(struct scsi_qla_host *vha,
struct sts_entry_fx00 *pkt;
response_t *lptr;
- if (!vha->flags.online)
- return;
-
while (RD_REG_DWORD((void __iomem *)&(rsp->ring_ptr->signature)) !=
RESPONSE_PROCESSED) {
lptr = rsp->ring_ptr;
@@ -2824,6 +2914,28 @@ qlafx00_async_event(scsi_qla_host_t *vha)
ha->aenmb[0], ha->aenmb[1], ha->aenmb[2], ha->aenmb[3]);
data_size = 4;
break;
+
+ case QLAFX00_MBA_TEMP_OVER: /* Over temperature event */
+ ql_log(ql_log_info, vha, 0x5085,
+ "Asynchronous over temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_NORM: /* Normal temperature event */
+ ql_log(ql_log_info, vha, 0x5086,
+ "Asynchronous normal temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
+ case QLAFX00_MBA_TEMP_CRIT: /* Critical temperature event */
+ ql_log(ql_log_info, vha, 0x5083,
+ "Asynchronous critical temperature event received "
+ "aenmb[0]: %x\n",
+ ha->aenmb[0]);
+ break;
+
default:
ha->aenmb[1] = RD_REG_WORD(&reg->aenmailbox1);
ha->aenmb[2] = RD_REG_WORD(&reg->aenmailbox2);
diff --git a/drivers/scsi/qla2xxx/qla_mr.h b/drivers/scsi/qla2xxx/qla_mr.h
index 1a092af0e2c..79a93c52bae 100644
--- a/drivers/scsi/qla2xxx/qla_mr.h
+++ b/drivers/scsi/qla2xxx/qla_mr.h
@@ -329,11 +329,13 @@ struct config_info_data {
uint64_t adapter_id;
uint32_t cluster_key_len;
- uint8_t cluster_key[10];
+ uint8_t cluster_key[16];
uint64_t cluster_master_id;
uint64_t cluster_slave_id;
uint8_t cluster_flags;
+ uint32_t enabled_capabilities;
+ uint32_t nominal_temp_value;
} __packed;
#define FXDISC_GET_CONFIG_INFO 0x01
@@ -342,10 +344,11 @@ struct config_info_data {
#define FXDISC_GET_TGT_NODE_LIST 0x81
#define FXDISC_REG_HOST_INFO 0x99
-#define QLAFX00_HBA_ICNTRL_REG 0x21B08
+#define QLAFX00_HBA_ICNTRL_REG 0x20B08
#define QLAFX00_ICR_ENB_MASK 0x80000000
#define QLAFX00_ICR_DIS_MASK 0x7fffffff
#define QLAFX00_HST_RST_REG 0x18264
+#define QLAFX00_SOC_TEMP_REG 0x184C4
#define QLAFX00_HST_TO_HBA_REG 0x20A04
#define QLAFX00_HBA_TO_HOST_REG 0x21B70
#define QLAFX00_HST_INT_STS_BITS 0x7
@@ -361,6 +364,9 @@ struct config_info_data {
#define QLAFX00_INTR_ALL_CMPLT 0x7
#define QLAFX00_MBA_SYSTEM_ERR 0x8002
+#define QLAFX00_MBA_TEMP_OVER 0x8005
+#define QLAFX00_MBA_TEMP_NORM 0x8006
+#define QLAFX00_MBA_TEMP_CRIT 0x8007
#define QLAFX00_MBA_LINK_UP 0x8011
#define QLAFX00_MBA_LINK_DOWN 0x8012
#define QLAFX00_MBA_PORT_UPDATE 0x8014
@@ -434,9 +440,11 @@ struct qla_mt_iocb_rqst_fx00 {
__le32 dataword_extra;
- __le32 req_len;
+ __le16 req_len;
+ __le16 reserved_2;
- __le32 rsp_len;
+ __le16 rsp_len;
+ __le16 reserved_3;
};
struct qla_mt_iocb_rsp_fx00 {
@@ -499,12 +507,37 @@ struct mr_data_fx00 {
uint32_t old_fw_hbt_cnt;
uint16_t fw_reset_timer_tick;
uint8_t fw_reset_timer_exp;
+ uint16_t fw_critemp_timer_tick;
uint32_t old_aenmbx0_state;
+ uint32_t critical_temperature;
+ bool extended_io_enabled;
};
+#define QLAFX00_EXTENDED_IO_EN_MASK 0x20
+
+/*
+ * SoC Junction Temperature is stored in
+ * bits 9:1 of SoC Junction Temperature Register
+ * in a firmware specific format format.
+ * To get the temperature in Celsius degrees
+ * the value from this bitfiled should be converted
+ * using this formula:
+ * Temperature (degrees C) = ((3,153,000 - (10,000 * X)) / 13,825)
+ * where X is the bit field value
+ * this macro reads the register, extracts the bitfield value,
+ * performs the calcualtions and returns temperature in Celsius
+ */
+#define QLAFX00_GET_TEMPERATURE(ha) ((3153000 - (10000 * \
+ ((QLAFX00_RD_REG(ha, QLAFX00_SOC_TEMP_REG) & 0x3FE) >> 1))) / 13825)
+
+
#define QLAFX00_LOOP_DOWN_TIME 615 /* 600 */
#define QLAFX00_HEARTBEAT_INTERVAL 6 /* number of seconds */
#define QLAFX00_HEARTBEAT_MISS_CNT 3 /* number of miss */
#define QLAFX00_RESET_INTERVAL 120 /* number of seconds */
#define QLAFX00_MAX_RESET_INTERVAL 600 /* number of seconds */
+#define QLAFX00_CRITEMP_INTERVAL 60 /* number of seconds */
+
+#define QLAFX00_CRITEMP_THRSHLD 80 /* Celsius degrees */
+
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index cce0cd0d7ec..11ce53dcbe7 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -848,7 +848,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
uint32_t lock_owner = 0;
- scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
@@ -857,9 +856,6 @@ qla82xx_rom_lock(struct qla_hw_data *ha)
break;
if (timeout >= qla82xx_rom_lock_timeout) {
lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
- ql_dbg(ql_dbg_p3p, vha, 0xb085,
- "Failed to acquire rom lock, acquired by %d.\n",
- lock_owner);
return -1;
}
timeout++;
@@ -1666,8 +1662,14 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
}
/* Mapping of IO base pointer */
- ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
- 0xbc000 + (ha->pdev->devfn << 11));
+ if (IS_QLA8044(ha)) {
+ ha->iobase =
+ (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase);
+ } else if (IS_QLA82XX(ha)) {
+ ha->iobase =
+ (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase +
+ 0xbc000 + (ha->pdev->devfn << 11));
+ }
if (!ql2xdbwr) {
ha->nxdb_wr_ptr =
@@ -1967,7 +1969,7 @@ static struct qla82xx_legacy_intr_set legacy_intr[] = \
* @ha: SCSI driver HA context
* @mb0: Mailbox0 register
*/
-static void
+void
qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
@@ -2075,13 +2077,6 @@ qla82xx_intr_handler(int irq, void *dev_id)
WRT_REG_DWORD(&reg->host_int, 0);
}
-#ifdef QL_DEBUG_LEVEL_17
- if (!irq && ha->flags.eeh_busy)
- ql_log(ql_log_warn, vha, 0x503d,
- "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2147,13 +2142,6 @@ qla82xx_msix_default(int irq, void *dev_id)
WRT_REG_DWORD(&reg->host_int, 0);
} while (0);
-#ifdef QL_DEBUG_LEVEL_17
- if (!irq && ha->flags.eeh_busy)
- ql_log(ql_log_warn, vha, 0x5044,
- "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n",
- status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
-#endif
-
qla2x00_handle_mbx_completion(ha, status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2247,7 +2235,10 @@ qla82xx_enable_intrs(struct qla_hw_data *ha)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_mbx_intr_enable(vha);
spin_lock_irq(&ha->hardware_lock);
- qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 0);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
spin_unlock_irq(&ha->hardware_lock);
ha->interrupts_on = 1;
}
@@ -2258,7 +2249,10 @@ qla82xx_disable_intrs(struct qla_hw_data *ha)
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla82xx_mbx_intr_disable(vha);
spin_lock_irq(&ha->hardware_lock);
- qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
+ if (IS_QLA8044(ha))
+ qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1);
+ else
+ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400);
spin_unlock_irq(&ha->hardware_lock);
ha->interrupts_on = 0;
}
@@ -3008,6 +3002,9 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha)
if (IS_QLA82XX(ha)) {
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_clear_drv_active(vha);
+ qla8044_idc_unlock(ha);
}
/* Set DEV_FAILED flag to disable timer */
@@ -3134,7 +3131,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
if (fw_major_version != ha->fw_major_version ||
fw_minor_version != ha->fw_minor_version ||
fw_subminor_version != ha->fw_subminor_version) {
- ql_log(ql_log_info, vha, 0xb02d,
+ ql_dbg(ql_dbg_p3p, vha, 0xb02d,
"Firmware version differs "
"Previous version: %d:%d:%d - "
"New version: %d:%d:%d\n",
@@ -3330,6 +3327,14 @@ static int qla82xx_check_temp(scsi_qla_host_t *vha)
return 0;
}
+int qla82xx_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla82xx_rd_32(vha->hw, CRB_TEMP_STATE);
+ return qla82xx_get_temp_val(temp);
+}
+
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -3423,8 +3428,18 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
{
- int rval;
- rval = qla82xx_device_state_handler(vha);
+ int rval = -1;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
return rval;
}
@@ -3432,17 +3447,25 @@ void
qla82xx_set_reset_owner(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
- uint32_t dev_state;
+ uint32_t dev_state = 0;
+
+ if (IS_QLA82XX(ha))
+ dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ else if (IS_QLA8044(ha))
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
- dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
ql_log(ql_log_info, vha, 0xb02f,
"HW State: NEED RESET\n");
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_NEED_RESET);
- ha->flags.nic_core_reset_owner = 1;
- ql_dbg(ql_dbg_p3p, vha, 0xb030,
- "reset_owner is 0x%x\n", ha->portnum);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_NEED_RESET);
+ ha->flags.nic_core_reset_owner = 1;
+ ql_dbg(ql_dbg_p3p, vha, 0xb030,
+ "reset_owner is 0x%x\n", ha->portnum);
+ } else if (IS_QLA8044(ha))
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
} else
ql_log(ql_log_info, vha, 0xb031,
"Device state is 0x%x = %s.\n",
@@ -3463,7 +3486,7 @@ qla82xx_set_reset_owner(scsi_qla_host_t *vha)
int
qla82xx_abort_isp(scsi_qla_host_t *vha)
{
- int rval;
+ int rval = -1;
struct qla_hw_data *ha = vha->hw;
if (vha->device_flags & DFLG_DEV_FAILED) {
@@ -3477,7 +3500,15 @@ qla82xx_abort_isp(scsi_qla_host_t *vha)
qla82xx_set_reset_owner(vha);
qla82xx_idc_unlock(ha);
- rval = qla82xx_device_state_handler(vha);
+ if (IS_QLA82XX(ha))
+ rval = qla82xx_device_state_handler(vha);
+ else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ /* Decide the reset ownership */
+ qla83xx_reset_ownership(vha);
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ }
qla82xx_idc_lock(ha);
qla82xx_clear_rst_ready(ha);
@@ -3597,7 +3628,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
void
qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
{
- int i;
+ int i, fw_state = 0;
unsigned long flags;
struct qla_hw_data *ha = vha->hw;
@@ -3608,7 +3639,11 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
if (!ha->flags.isp82xx_fw_hung) {
for (i = 0; i < 2; i++) {
msleep(1000);
- if (qla82xx_check_fw_alive(vha)) {
+ if (IS_QLA82XX(ha))
+ fw_state = qla82xx_check_fw_alive(vha);
+ else if (IS_QLA8044(ha))
+ fw_state = qla8044_check_fw_alive(vha);
+ if (fw_state) {
ha->flags.isp82xx_fw_hung = 1;
qla82xx_clear_pending_mbx(vha);
break;
@@ -4072,7 +4107,7 @@ qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha,
return QLA_SUCCESS;
}
-static int
+int
qla82xx_validate_template_chksum(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4384,7 +4419,11 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
ha->md_template_size / 1024);
/* Get Minidump template */
- rval = qla82xx_md_get_template(vha);
+ if (IS_QLA8044(ha))
+ rval = qla8044_md_get_template(vha);
+ else
+ rval = qla82xx_md_get_template(vha);
+
if (rval == QLA_SUCCESS) {
ql_dbg(ql_dbg_p3p, vha, 0xb04b,
"MiniDump Template obtained\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index d268e8406fd..1bb93dbbccb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -589,6 +589,7 @@
* The PCI VendorID and DeviceID for our board.
*/
#define PCI_DEVICE_ID_QLOGIC_ISP8021 0x8021
+#define PCI_DEVICE_ID_QLOGIC_ISP8044 0x8044
#define QLA82XX_MSIX_TBL_SPACE 8192
#define QLA82XX_PCI_REG_MSIX_TBL 0x44
@@ -954,6 +955,11 @@ struct ct6_dsd {
#define QLA82XX_CNTRL 98
#define QLA82XX_TLHDR 99
#define QLA82XX_RDEND 255
+#define QLA8044_POLLRD 35
+#define QLA8044_RDMUX2 36
+#define QLA8044_L1DTG 8
+#define QLA8044_L1ITG 9
+#define QLA8044_POLLRDMWR 37
/*
* Opcodes for Control Entries.
@@ -1191,4 +1197,8 @@ enum {
QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
};
+
+#define LEG_INTR_PTR_OFFSET 0x38C0
+#define LEG_INTR_TRIG_OFFSET 0x38C4
+#define LEG_INTR_MASK_OFFSET 0x38C8
#endif
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
new file mode 100644
index 00000000000..8164cc9e728
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -0,0 +1,3716 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "qla_def.h"
+#include "qla_gbl.h"
+
+#include <linux/delay.h>
+
+/* 8044 Flash Read/Write functions */
+uint32_t
+qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
+{
+ return readl((void __iomem *) (ha->nx_pcibase + addr));
+}
+
+void
+qla8044_wr_reg(struct qla_hw_data *ha, ulong addr, uint32_t val)
+{
+ writel(val, (void __iomem *)((ha)->nx_pcibase + addr));
+}
+
+int
+qla8044_rd_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ return qla8044_rd_reg(ha, qla8044_reg_tbl[crb_reg]);
+ else
+ return QLA_FUNCTION_FAILED;
+}
+
+void
+qla8044_wr_direct(struct scsi_qla_host *vha,
+ const uint32_t crb_reg,
+ const uint32_t value)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (crb_reg < CRB_REG_INDEX_MAX)
+ qla8044_wr_reg(ha, qla8044_reg_tbl[crb_reg], value);
+}
+
+static int
+qla8044_set_win_base(scsi_qla_host_t *vha, uint32_t addr)
+{
+ uint32_t val;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_wr_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum), addr);
+ val = qla8044_rd_reg(ha, QLA8044_CRB_WIN_FUNC(ha->portnum));
+
+ if (val != addr) {
+ ql_log(ql_log_warn, vha, 0xb087,
+ "%s: Failed to set register window : "
+ "addr written 0x%x, read 0x%x!\n",
+ __func__, addr, val);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+ return ret_val;
+}
+
+static int
+qla8044_rd_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t *data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ *data = qla8044_rd_reg(ha, QLA8044_WILDCARD);
+ else
+ ql_log(ql_log_warn, vha, 0xb088,
+ "%s: failed read of addr 0x%x!\n", __func__, addr);
+ return ret_val;
+}
+
+static int
+qla8044_wr_reg_indirect(scsi_qla_host_t *vha, uint32_t addr, uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_set_win_base(vha, addr);
+ if (!ret_val)
+ qla8044_wr_reg(ha, QLA8044_WILDCARD, data);
+ else
+ ql_log(ql_log_warn, vha, 0xb089,
+ "%s: failed wrt to addr 0x%x, data 0x%x\n",
+ __func__, addr, data);
+ return ret_val;
+}
+
+/*
+ * qla8044_read_write_crb_reg - Read from raddr and write value to waddr.
+ *
+ * @ha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ *
+ */
+static void
+qla8044_read_write_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr)
+{
+ uint32_t value;
+
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ qla8044_wr_reg_indirect(vha, waddr, value);
+}
+
+/*
+ * qla8044_rmw_crb_reg - Read value from raddr, AND with test_mask,
+ * Shift Left,Right/OR/XOR with values RMW header and write value to waddr.
+ *
+ * @vha : Pointer to adapter structure
+ * @raddr : CRB address to read from
+ * @waddr : CRB address to write to
+ * @p_rmw_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_rmw_crb_reg(struct scsi_qla_host *vha,
+ uint32_t raddr, uint32_t waddr, struct qla8044_rmw *p_rmw_hdr)
+{
+ uint32_t value;
+
+ if (p_rmw_hdr->index_a)
+ value = vha->reset_tmplt.array[p_rmw_hdr->index_a];
+ else
+ qla8044_rd_reg_indirect(vha, raddr, &value);
+ value &= p_rmw_hdr->test_mask;
+ value <<= p_rmw_hdr->shl;
+ value >>= p_rmw_hdr->shr;
+ value |= p_rmw_hdr->or_value;
+ value ^= p_rmw_hdr->xor_value;
+ qla8044_wr_reg_indirect(vha, waddr, value);
+ return;
+}
+
+inline void
+qla8044_set_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state |= (1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08e, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+void
+qla8044_clear_qsnt_ready(struct scsi_qla_host *vha)
+{
+ uint32_t qsnt_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qsnt_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ qsnt_state &= ~(1 << ha->portnum);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, qsnt_state);
+ ql_log(ql_log_info, vha, 0xb08f, "%s(%ld): qsnt_state: 0x%08x\n",
+ __func__, vha->host_no, qsnt_state);
+}
+
+/**
+ *
+ * qla8044_lock_recovery - Recovers the idc_lock.
+ * @ha : Pointer to adapter structure
+ *
+ * Lock Recovery Register
+ * 5-2 Lock recovery owner: Function ID of driver doing lock recovery,
+ * valid if bits 1..0 are set by driver doing lock recovery.
+ * 1-0 1 - Driver intends to force unlock the IDC lock.
+ * 2 - Driver is moving forward to unlock the IDC lock. Driver clears
+ * this field after force unlocking the IDC lock.
+ *
+ * Lock Recovery process
+ * a. Read the IDC_LOCK_RECOVERY register. If the value in bits 1..0 is
+ * greater than 0, then wait for the other driver to unlock otherwise
+ * move to the next step.
+ * b. Indicate intent to force-unlock by writing 1h to the IDC_LOCK_RECOVERY
+ * register bits 1..0 and also set the function# in bits 5..2.
+ * c. Read the IDC_LOCK_RECOVERY register again after a delay of 200ms.
+ * Wait for the other driver to perform lock recovery if the function
+ * number in bits 5..2 has changed, otherwise move to the next step.
+ * d. Write a value of 2h to the IDC_LOCK_RECOVERY register bits 1..0
+ * leaving your function# in bits 5..2.
+ * e. Force unlock using the DRIVER_UNLOCK register and immediately clear
+ * the IDC_LOCK_RECOVERY bits 5..0 by writing 0.
+ **/
+static int
+qla8044_lock_recovery(struct scsi_qla_host *vha)
+{
+ uint32_t lock = 0, lockid;
+ struct qla_hw_data *ha = vha->hw;
+
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+
+ /* Check for other Recovery in progress, go wait */
+ if ((lockid & IDC_LOCK_RECOVERY_STATE_MASK) != 0)
+ return QLA_FUNCTION_FAILED;
+
+ /* Intent to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) | INTENT_TO_RECOVER);
+ msleep(200);
+
+ /* Check Intent to Recover is advertised */
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCKRECOVERY);
+ if ((lockid & IDC_LOCK_RECOVERY_OWNER_MASK) != (ha->portnum <<
+ IDC_LOCK_RECOVERY_STATE_SHIFT_BITS))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb08B, "%s:%d: IDC Lock recovery initiated\n"
+ , __func__, ha->portnum);
+
+ /* Proceed to Recover */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY,
+ (ha->portnum << IDC_LOCK_RECOVERY_STATE_SHIFT_BITS) |
+ PROCEED_TO_RECOVER);
+
+ /* Force Unlock() */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, 0xFF);
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+
+ /* Clear bits 0-5 in IDC_RECOVERY register*/
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCKRECOVERY, 0);
+
+ /* Get lock() */
+ lock = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+ if (lock) {
+ lockid = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lockid = ((lockid + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lockid);
+ return QLA_SUCCESS;
+ } else
+ return QLA_FUNCTION_FAILED;
+}
+
+int
+qla8044_idc_lock(struct qla_hw_data *ha)
+{
+ uint32_t ret_val = QLA_SUCCESS, timeout = 0, status = 0;
+ uint32_t lock_id, lock_cnt, func_num, tmo_owner = 0, first_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ while (status == 0) {
+ /* acquire semaphore5 from PCI HW block */
+ status = qla8044_rd_reg(ha, QLA8044_DRV_LOCK);
+
+ if (status) {
+ /* Increment Counter (8-31) and update func_num (0-7) on
+ * getting a successful lock */
+ lock_id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ lock_id = ((lock_id + (1 << 8)) & ~0xFF) | ha->portnum;
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, lock_id);
+ break;
+ }
+
+ if (timeout == 0)
+ first_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if (++timeout >=
+ (QLA8044_DRV_LOCK_TIMEOUT / QLA8044_DRV_LOCK_MSLEEP)) {
+ tmo_owner = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+ func_num = tmo_owner & 0xFF;
+ lock_cnt = tmo_owner >> 8;
+ ql_log(ql_log_warn, vha, 0xb114,
+ "%s: Lock by func %d failed after 2s, lock held "
+ "by func %d, lock count %d, first_owner %d\n",
+ __func__, ha->portnum, func_num, lock_cnt,
+ (first_owner & 0xFF));
+ if (first_owner != tmo_owner) {
+ /* Some other driver got lock,
+ * OR same driver got lock again (counter
+ * value changed), when we were waiting for
+ * lock. Retry for another 2 sec */
+ ql_dbg(ql_dbg_p3p, vha, 0xb115,
+ "%s: %d: IDC lock failed\n",
+ __func__, ha->portnum);
+ timeout = 0;
+ } else {
+ /* Same driver holding lock > 2sec.
+ * Force Recovery */
+ if (qla8044_lock_recovery(vha) == QLA_SUCCESS) {
+ /* Recovered and got lock */
+ ret_val = QLA_SUCCESS;
+ ql_dbg(ql_dbg_p3p, vha, 0xb116,
+ "%s:IDC lock Recovery by %d"
+ "successful...\n", __func__,
+ ha->portnum);
+ }
+ /* Recovery Failed, some other function
+ * has the lock, wait for 2secs
+ * and retry
+ */
+ ql_dbg(ql_dbg_p3p, vha, 0xb08a,
+ "%s: IDC lock Recovery by %d "
+ "failed, Retrying timout\n", __func__,
+ ha->portnum);
+ timeout = 0;
+ }
+ }
+ msleep(QLA8044_DRV_LOCK_MSLEEP);
+ }
+ return ret_val;
+}
+
+void
+qla8044_idc_unlock(struct qla_hw_data *ha)
+{
+ int id;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+
+ id = qla8044_rd_reg(ha, QLA8044_DRV_LOCK_ID);
+
+ if ((id & 0xFF) != ha->portnum) {
+ ql_log(ql_log_warn, vha, 0xb118,
+ "%s: IDC Unlock by %d failed, lock owner is %d!\n",
+ __func__, ha->portnum, (id & 0xFF));
+ return;
+ }
+
+ /* Keep lock counter value, update the ha->func_num to 0xFF */
+ qla8044_wr_reg(ha, QLA8044_DRV_LOCK_ID, (id | 0xFF));
+ qla8044_rd_reg(ha, QLA8044_DRV_UNLOCK);
+}
+
+/* 8044 Flash Lock/Unlock functions */
+static int
+qla8044_flash_lock(scsi_qla_host_t *vha)
+{
+ int lock_owner;
+ int timeout = 0;
+ uint32_t lock_status = 0;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ while (lock_status == 0) {
+ lock_status = qla8044_rd_reg(ha, QLA8044_FLASH_LOCK);
+ if (lock_status)
+ break;
+
+ if (++timeout >= QLA8044_FLASH_LOCK_TIMEOUT / 20) {
+ lock_owner = qla8044_rd_reg(ha,
+ QLA8044_FLASH_LOCK_ID);
+ ql_log(ql_log_warn, vha, 0xb113,
+ "%s: flash lock by %d failed, held by %d\n",
+ __func__, ha->portnum, lock_owner);
+ ret_val = QLA_FUNCTION_FAILED;
+ break;
+ }
+ msleep(20);
+ }
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, ha->portnum);
+ return ret_val;
+}
+
+static void
+qla8044_flash_unlock(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Reading FLASH_UNLOCK register unlocks the Flash */
+ qla8044_wr_reg(ha, QLA8044_FLASH_LOCK_ID, 0xFF);
+ ret_val = qla8044_rd_reg(ha, QLA8044_FLASH_UNLOCK);
+}
+
+
+static
+void qla8044_flash_lock_recovery(struct scsi_qla_host *vha)
+{
+
+ if (qla8044_flash_lock(vha)) {
+ /* Someone else is holding the lock. */
+ ql_log(ql_log_warn, vha, 0xb120, "Resetting flash_lock\n");
+ }
+
+ /*
+ * Either we got the lock, or someone
+ * else died while holding it.
+ * In either case, unlock.
+ */
+ qla8044_flash_unlock(vha);
+}
+
+/*
+ * Address and length are byte address
+ */
+static int
+qla8044_read_flash_data(scsi_qla_host_t *vha, uint8_t *p_data,
+ uint32_t flash_addr, int u32_word_count)
+{
+ int i, ret_val = QLA_SUCCESS;
+ uint32_t u32_word;
+
+ if (qla8044_flash_lock(vha) != QLA_SUCCESS) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lock_error;
+ }
+
+ if (flash_addr & 0x03) {
+ ql_log(ql_log_warn, vha, 0xb117,
+ "%s: Illegal addr = 0x%x\n", __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ for (i = 0; i < u32_word_count; i++) {
+ if (qla8044_wr_reg_indirect(vha, QLA8044_FLASH_DIRECT_WINDOW,
+ (flash_addr & 0xFFFF0000))) {
+ ql_log(ql_log_warn, vha, 0xb119,
+ "%s: failed to write addr 0x%x to "
+ "FLASH_DIRECT_WINDOW\n! ",
+ __func__, flash_addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_flash_read;
+ }
+
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(flash_addr),
+ &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08c,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, flash_addr);
+ goto exit_flash_read;
+ }
+
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ flash_addr = flash_addr + 4;
+ }
+
+exit_flash_read:
+ qla8044_flash_unlock(vha);
+
+exit_lock_error:
+ return ret_val;
+}
+
+/*
+ * Address and length are byte address
+ */
+uint8_t *
+qla8044_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ scsi_block_requests(vha->host);
+ if (qla8044_read_flash_data(vha, (uint8_t *)buf, offset, length / 4)
+ != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0xb08d,
+ "%s: Failed to read from flash\n",
+ __func__);
+ }
+ scsi_unblock_requests(vha->host);
+ return buf;
+}
+
+inline int
+qla8044_need_reset(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state, drv_active;
+ int rval;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ rval = drv_state & (1 << ha->portnum);
+
+ if (ha->flags.eeh_busy && drv_active)
+ rval = 1;
+ return rval;
+}
+
+/*
+ * qla8044_write_list - Write the value (p_entry->arg2) to address specified
+ * by p_entry->arg1 for all entries in header with delay of p_hdr->delay between
+ * entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->arg1, p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_read_write_list - Read from address specified by p_entry->arg1,
+ * write value read to address specified by p_entry->arg2, for all entries in
+ * header with delay of p_hdr->delay between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for READ_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_read_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ uint32_t i;
+
+ p_entry = (struct qla8044_entry *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_read_write_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_poll_reg - Poll the given CRB addr for duration msecs till
+ * value read ANDed with test_mask is equal to test_result.
+ *
+ * @ha : Pointer to adapter structure
+ * @addr : CRB register address
+ * @duration : Poll for total of "duration" msecs
+ * @test_mask : Mask value read with "test_mask"
+ * @test_result : Compare (value&test_mask) with test_result.
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_poll_reg(struct scsi_qla_host *vha, uint32_t addr,
+ int duration, uint32_t test_mask, uint32_t test_result)
+{
+ uint32_t value;
+ int timeout_error;
+ uint8_t retries;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+
+ /* poll every 1/10 of the total duration */
+ retries = duration/10;
+
+ do {
+ if ((value & test_mask) != test_result) {
+ timeout_error = 1;
+ msleep(duration/10);
+ ret_val = qla8044_rd_reg_indirect(vha, addr, &value);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ timeout_error = 1;
+ goto exit_poll_reg;
+ }
+ } else {
+ timeout_error = 0;
+ break;
+ }
+ } while (retries--);
+
+exit_poll_reg:
+ if (timeout_error) {
+ vha->reset_tmplt.seq_error++;
+ ql_log(ql_log_fatal, vha, 0xb090,
+ "%s: Poll Failed: 0x%08x 0x%08x 0x%08x\n",
+ __func__, value, test_mask, test_result);
+ }
+
+ return timeout_error;
+}
+
+/*
+ * qla8044_poll_list - For all entries in the POLL_LIST header, poll read CRB
+ * register specified by p_entry->arg1 and compare (value AND test_mask) with
+ * test_result to validate it. Wait for p_hdr->delay between processing entries.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_hdr : reset_entry header for POLL_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ /* Entries start after 8 byte qla8044_poll, poll header contains
+ * the test_mask, test_value.
+ */
+ p_entry = (struct qla8044_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ if (!delay) {
+ for (i = 0; i < p_hdr->count; i++, p_entry++)
+ qla8044_poll_reg(vha, p_entry->arg1,
+ delay, p_poll->test_mask, p_poll->test_value);
+ } else {
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->arg1, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ /*If
+ * (data_read&test_mask != test_value)
+ * read TIMEOUT_ADDR (arg1) and
+ * ADDR (arg2) registers
+ */
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg1, &value);
+ qla8044_rd_reg_indirect(vha,
+ p_entry->arg2, &value);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_poll_write_list - Write dr_value, ar_value to dr_addr/ar_addr,
+ * read ar_addr, if (value& test_mask != test_mask) re-read till timeout
+ * expires.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : reset entry header for POLL_WRITE_LIST opcode.
+ *
+ */
+static void
+qla8044_poll_write_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+
+ p_poll = (struct qla8044_poll *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)((char *)p_poll +
+ sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha,
+ p_entry->dr_addr, p_entry->dr_value);
+ qla8044_wr_reg_indirect(vha,
+ p_entry->ar_addr, p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha,
+ p_entry->ar_addr, delay,
+ p_poll->test_mask,
+ p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb091,
+ "%s: Timeout Error: poll list, ",
+ __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb092,
+ "item_num %d, entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ }
+ }
+ }
+}
+
+/*
+ * qla8044_read_modify_write - Read value from p_entry->arg1, modify the
+ * value, write value to p_entry->arg2. Process entries with p_hdr->delay
+ * between entries.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : header with shift/or/xor values.
+ *
+ */
+static void
+qla8044_read_modify_write(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ struct qla8044_entry *p_entry;
+ struct qla8044_rmw *p_rmw_hdr;
+ uint32_t i;
+
+ p_rmw_hdr = (struct qla8044_rmw *)((char *)p_hdr +
+ sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_entry *)((char *)p_rmw_hdr +
+ sizeof(struct qla8044_rmw));
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_rmw_crb_reg(vha, p_entry->arg1,
+ p_entry->arg2, p_rmw_hdr);
+ if (p_hdr->delay)
+ udelay((uint32_t)(p_hdr->delay));
+ }
+}
+
+/*
+ * qla8044_pause - Wait for p_hdr->delay msecs, called between processing
+ * two entries of a sequence.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static
+void qla8044_pause(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ if (p_hdr->delay)
+ mdelay((uint32_t)((long)p_hdr->delay));
+}
+
+/*
+ * qla8044_template_end - Indicates end of reset sequence processing.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_template_end(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ vha->reset_tmplt.template_end = 1;
+
+ if (vha->reset_tmplt.seq_error == 0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb093,
+ "%s: Reset sequence completed SUCCESSFULLY.\n", __func__);
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb094,
+ "%s: Reset sequence completed with some timeout "
+ "errors.\n", __func__);
+ }
+}
+
+/*
+ * qla8044_poll_read_list - Write ar_value to ar_addr register, read ar_addr,
+ * if (value & test_mask != test_value) re-read till timeout value expires,
+ * read dr_addr register and assign to reset_tmplt.array.
+ *
+ * @vha : Pointer to adapter structure
+ * @p_hdr : Common reset entry header.
+ *
+ */
+static void
+qla8044_poll_read_list(struct scsi_qla_host *vha,
+ struct qla8044_reset_entry_hdr *p_hdr)
+{
+ long delay;
+ int index;
+ struct qla8044_quad_entry *p_entry;
+ struct qla8044_poll *p_poll;
+ uint32_t i;
+ uint32_t value;
+
+ p_poll = (struct qla8044_poll *)
+ ((char *)p_hdr + sizeof(struct qla8044_reset_entry_hdr));
+
+ p_entry = (struct qla8044_quad_entry *)
+ ((char *)p_poll + sizeof(struct qla8044_poll));
+
+ delay = (long)p_hdr->delay;
+
+ for (i = 0; i < p_hdr->count; i++, p_entry++) {
+ qla8044_wr_reg_indirect(vha, p_entry->ar_addr,
+ p_entry->ar_value);
+ if (delay) {
+ if (qla8044_poll_reg(vha, p_entry->ar_addr, delay,
+ p_poll->test_mask, p_poll->test_value)) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb095,
+ "%s: Timeout Error: poll "
+ "list, ", __func__);
+ ql_dbg(ql_dbg_p3p, vha, 0xb096,
+ "Item_num %d, "
+ "entry_num %d\n", i,
+ vha->reset_tmplt.seq_index);
+ } else {
+ index = vha->reset_tmplt.array_index;
+ qla8044_rd_reg_indirect(vha,
+ p_entry->dr_addr, &value);
+ vha->reset_tmplt.array[index++] = value;
+ if (index == QLA8044_MAX_RESET_SEQ_ENTRIES)
+ vha->reset_tmplt.array_index = 1;
+ }
+ }
+ }
+}
+
+/*
+ * qla8031_process_reset_template - Process all entries in reset template
+ * till entry with SEQ_END opcode, which indicates end of the reset template
+ * processing. Each entry has a Reset Entry header, entry opcode/command, with
+ * size of the entry, number of entries in sub-sequence and delay in microsecs
+ * or timeout in millisecs.
+ *
+ * @ha : Pointer to adapter structure
+ * @p_buff : Common reset entry header.
+ *
+ */
+static void
+qla8044_process_reset_template(struct scsi_qla_host *vha,
+ char *p_buff)
+{
+ int index, entries;
+ struct qla8044_reset_entry_hdr *p_hdr;
+ char *p_entry = p_buff;
+
+ vha->reset_tmplt.seq_end = 0;
+ vha->reset_tmplt.template_end = 0;
+ entries = vha->reset_tmplt.hdr->entries;
+ index = vha->reset_tmplt.seq_index;
+
+ for (; (!vha->reset_tmplt.seq_end) && (index < entries); index++) {
+ p_hdr = (struct qla8044_reset_entry_hdr *)p_entry;
+ switch (p_hdr->cmd) {
+ case OPCODE_NOP:
+ break;
+ case OPCODE_WRITE_LIST:
+ qla8044_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_WRITE_LIST:
+ qla8044_read_write_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_LIST:
+ qla8044_poll_list(vha, p_hdr);
+ break;
+ case OPCODE_POLL_WRITE_LIST:
+ qla8044_poll_write_list(vha, p_hdr);
+ break;
+ case OPCODE_READ_MODIFY_WRITE:
+ qla8044_read_modify_write(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_PAUSE:
+ qla8044_pause(vha, p_hdr);
+ break;
+ case OPCODE_SEQ_END:
+ vha->reset_tmplt.seq_end = 1;
+ break;
+ case OPCODE_TMPL_END:
+ qla8044_template_end(vha, p_hdr);
+ break;
+ case OPCODE_POLL_READ_LIST:
+ qla8044_poll_read_list(vha, p_hdr);
+ break;
+ default:
+ ql_log(ql_log_fatal, vha, 0xb097,
+ "%s: Unknown command ==> 0x%04x on "
+ "entry = %d\n", __func__, p_hdr->cmd, index);
+ break;
+ }
+ /*
+ *Set pointer to next entry in the sequence.
+ */
+ p_entry += p_hdr->size;
+ }
+ vha->reset_tmplt.seq_index = index;
+}
+
+static void
+qla8044_process_init_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha,
+ vha->reset_tmplt.init_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb098,
+ "%s: Abrupt INIT Sub-Sequence end.\n",
+ __func__);
+}
+
+static void
+qla8044_process_stop_seq(struct scsi_qla_host *vha)
+{
+ vha->reset_tmplt.seq_index = 0;
+ qla8044_process_reset_template(vha, vha->reset_tmplt.stop_offset);
+ if (vha->reset_tmplt.seq_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb099,
+ "%s: Abrupt STOP Sub-Sequence end.\n", __func__);
+}
+
+static void
+qla8044_process_start_seq(struct scsi_qla_host *vha)
+{
+ qla8044_process_reset_template(vha, vha->reset_tmplt.start_offset);
+ if (vha->reset_tmplt.template_end != 1)
+ ql_log(ql_log_fatal, vha, 0xb09a,
+ "%s: Abrupt START Sub-Sequence end.\n",
+ __func__);
+}
+
+static int
+qla8044_lockless_flash_read_u32(struct scsi_qla_host *vha,
+ uint32_t flash_addr, uint8_t *p_data, int u32_word_count)
+{
+ uint32_t i;
+ uint32_t u32_word;
+ uint32_t flash_offset;
+ uint32_t addr = flash_addr;
+ int ret_val = QLA_SUCCESS;
+
+ flash_offset = addr & (QLA8044_FLASH_SECTOR_SIZE - 1);
+
+ if (addr & 0x3) {
+ ql_log(ql_log_fatal, vha, 0xb09b, "%s: Illegal addr = 0x%x\n",
+ __func__, addr);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_lockless_read;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09c,
+ "%s: failed to write addr 0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+
+ /* Check if data is spread across multiple sectors */
+ if ((flash_offset + (u32_word_count * sizeof(uint32_t))) >
+ (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* Multi sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09d,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ flash_offset = flash_offset + 4;
+ if (flash_offset > (QLA8044_FLASH_SECTOR_SIZE - 1)) {
+ /* This write is needed once for each sector */
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_WINDOW, (addr));
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb09f,
+ "%s: failed to write addr "
+ "0x%x to FLASH_DIRECT_WINDOW!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ flash_offset = 0;
+ }
+ }
+ } else {
+ /* Single sector read */
+ for (i = 0; i < u32_word_count; i++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ QLA8044_FLASH_DIRECT_DATA(addr), &u32_word);
+ if (ret_val != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0a0,
+ "%s: failed to read addr 0x%x!\n",
+ __func__, addr);
+ goto exit_lockless_read;
+ }
+ *(uint32_t *)p_data = u32_word;
+ p_data = p_data + 4;
+ addr = addr + 4;
+ }
+ }
+
+exit_lockless_read:
+ return ret_val;
+}
+
+/*
+ * qla8044_ms_mem_write_128b - Writes data to MS/off-chip memory
+ *
+ * @vha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * data : Data to be written
+ * count : word_count to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_ms_mem_write_128b(struct scsi_qla_host *vha,
+ uint64_t addr, uint32_t *data, uint32_t count)
+{
+ int i, j, ret_val = QLA_SUCCESS;
+ uint32_t agt_ctrl;
+ unsigned long flags;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* Only 128-bit aligned access */
+ if (addr & 0xF) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write;
+ }
+ write_lock_irqsave(&ha->hw_lock, flags);
+
+ /* Write address */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, 0);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a1,
+ "%s: write to AGT_ADDR_HI failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (i = 0; i < count; i++, addr += 16) {
+ if (!((QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_QDR_NET,
+ QLA8044_ADDR_QDR_NET_MAX)) ||
+ (QLA8044_ADDR_IN_RANGE(addr, QLA8044_ADDR_DDR_NET,
+ QLA8044_ADDR_DDR_NET_MAX)))) {
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_ADDR_LO, addr);
+
+ /* Write data */
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_LO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_HI, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_ULO, *data++);
+ ret_val += qla8044_wr_reg_indirect(vha,
+ MD_MIU_TEST_AGT_WRDATA_UHI, *data++);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a2,
+ "%s: write to AGT_WRDATA failed!\n",
+ __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ /* Check write status */
+ ret_val = qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_ENABLE);
+ ret_val += qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ MIU_TA_CTL_WRITE_START);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a3,
+ "%s: write to AGT_CTRL failed!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ ret_val = qla8044_rd_reg_indirect(vha,
+ MD_MIU_TEST_AGT_CTRL, &agt_ctrl);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a4,
+ "%s: failed to read "
+ "MD_MIU_TEST_AGT_CTRL!\n", __func__);
+ goto exit_ms_mem_write_unlock;
+ }
+ if ((agt_ctrl & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ /* Status check failed */
+ if (j >= MAX_CTL_CHECK) {
+ ql_log(ql_log_fatal, vha, 0xb0a5,
+ "%s: MS memory write failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_ms_mem_write_unlock;
+ }
+ }
+
+exit_ms_mem_write_unlock:
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+exit_ms_mem_write:
+ return ret_val;
+}
+
+static int
+qla8044_copy_bootloader(struct scsi_qla_host *vha)
+{
+ uint8_t *p_cache;
+ uint32_t src, count, size;
+ uint64_t dest;
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ src = QLA8044_BOOTLOADER_FLASH_ADDR;
+ dest = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_ADDR);
+ size = qla8044_rd_reg(ha, QLA8044_BOOTLOADER_SIZE);
+
+ /* 128 bit alignment check */
+ if (size & 0xF)
+ size = (size + 16) & ~0xF;
+
+ /* 16 byte count */
+ count = size/16;
+
+ p_cache = vmalloc(size);
+ if (p_cache == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0a6,
+ "%s: Failed to allocate memory for "
+ "boot loader cache\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_copy_bootloader;
+ }
+
+ ret_val = qla8044_lockless_flash_read_u32(vha, src,
+ p_cache, size/sizeof(uint32_t));
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a7,
+ "%s: Error reading F/W from flash!!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0a8, "%s: Read F/W from flash!\n",
+ __func__);
+
+ /* 128 bit/16 byte write to MS memory */
+ ret_val = qla8044_ms_mem_write_128b(vha, dest,
+ (uint32_t *)p_cache, count);
+ if (ret_val == QLA_FUNCTION_FAILED) {
+ ql_log(ql_log_fatal, vha, 0xb0a9,
+ "%s: Error writing F/W to MS !!!\n", __func__);
+ goto exit_copy_error;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0aa,
+ "%s: Wrote F/W (size %d) to MS !!!\n",
+ __func__, size);
+
+exit_copy_error:
+ vfree(p_cache);
+
+exit_copy_bootloader:
+ return ret_val;
+}
+
+static int
+qla8044_restart(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_process_stop_seq(vha);
+
+ /* Collect minidump */
+ if (ql2xmdenable)
+ qla8044_get_minidump(vha);
+ else
+ ql_log(ql_log_fatal, vha, 0xb14c,
+ "Minidump disabled.\n");
+
+ qla8044_process_init_seq(vha);
+
+ if (qla8044_copy_bootloader(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ab,
+ "%s: Copy bootloader, firmware restart failed!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_restart;
+ }
+
+ /*
+ * Loads F/W from flash
+ */
+ qla8044_wr_reg(ha, QLA8044_FW_IMAGE_VALID, QLA8044_BOOT_FROM_FLASH);
+
+ qla8044_process_start_seq(vha);
+
+exit_restart:
+ return ret_val;
+}
+
+/*
+ * qla8044_check_cmd_peg_status - Check peg status to see if Peg is
+ * initialized.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_check_cmd_peg_status(struct scsi_qla_host *vha)
+{
+ uint32_t val, ret_val = QLA_FUNCTION_FAILED;
+ int retries = CRB_CMDPEG_CHECK_RETRY_COUNT;
+ struct qla_hw_data *ha = vha->hw;
+
+ do {
+ val = qla8044_rd_reg(ha, QLA8044_CMDPEG_STATE);
+ if (val == PHAN_INITIALIZE_COMPLETE) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ac,
+ "%s: Command Peg initialization "
+ "complete! state=0x%x\n", __func__, val);
+ ret_val = QLA_SUCCESS;
+ break;
+ }
+ msleep(CRB_CMDPEG_CHECK_DELAY);
+ } while (--retries);
+
+ return ret_val;
+}
+
+static int
+qla8044_start_firmware(struct scsi_qla_host *vha)
+{
+ int ret_val = QLA_SUCCESS;
+
+ if (qla8044_restart(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0ad,
+ "%s: Restart Error!!!, Need Reset!!!\n",
+ __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ goto exit_start_fw;
+ } else
+ ql_dbg(ql_dbg_p3p, vha, 0xb0af,
+ "%s: Restart done!\n", __func__);
+
+ ret_val = qla8044_check_cmd_peg_status(vha);
+ if (ret_val) {
+ ql_log(ql_log_fatal, vha, 0xb0b0,
+ "%s: Peg not initialized!\n", __func__);
+ ret_val = QLA_FUNCTION_FAILED;
+ }
+
+exit_start_fw:
+ return ret_val;
+}
+
+void
+qla8044_clear_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active &= ~(1 << (ha->portnum));
+
+ ql_log(ql_log_info, vha, 0xb0b1,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+/*
+ * qla8044_device_bootstrap - Initialize device, set DEV_READY, start fw
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static int
+qla8044_device_bootstrap(struct scsi_qla_host *vha)
+{
+ int rval = QLA_FUNCTION_FAILED;
+ int i;
+ uint32_t old_count = 0, count = 0;
+ int need_reset = 0;
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ need_reset = qla8044_need_reset(vha);
+
+ if (!need_reset) {
+ old_count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ for (i = 0; i < 10; i++) {
+ msleep(200);
+
+ count = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+ if (count != old_count) {
+ rval = QLA_SUCCESS;
+ goto dev_ready;
+ }
+ }
+ qla8044_flash_lock_recovery(vha);
+ } else {
+ /* We are trying to perform a recovery here. */
+ if (ha->flags.isp82xx_fw_hung)
+ qla8044_flash_lock_recovery(vha);
+ }
+
+ /* set to DEV_INITIALIZING */
+ ql_log(ql_log_info, vha, 0xb0b2,
+ "%s: HW State: INITIALIZING\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_INITIALIZING);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_start_firmware(vha);
+ qla8044_idc_lock(ha);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_info, vha, 0xb0b3,
+ "%s: HW State: FAILED\n", __func__);
+ qla8044_clear_drv_active(vha);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ return rval;
+ }
+
+ /* For ISP8044, If IDC_CTRL GRACEFUL_RESET_BIT1 is set , reset it after
+ * device goes to INIT state. */
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_ctrl & GRACEFUL_RESET_BIT1) {
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_ctrl & ~GRACEFUL_RESET_BIT1));
+ ha->fw_dumped = 0;
+ }
+
+dev_ready:
+ ql_log(ql_log_info, vha, 0xb0b4,
+ "%s: HW State: READY\n", __func__);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX, QLA8XXX_DEV_READY);
+
+ return rval;
+}
+
+/*-------------------------Reset Sequence Functions-----------------------*/
+static void
+qla8044_dump_reset_seq_hdr(struct scsi_qla_host *vha)
+{
+ u8 *phdr;
+
+ if (!vha->reset_tmplt.buff) {
+ ql_log(ql_log_fatal, vha, 0xb0b5,
+ "%s: Error Invalid reset_seq_template\n", __func__);
+ return;
+ }
+
+ phdr = vha->reset_tmplt.buff;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b6,
+ "Reset Template :\n\t0x%X 0x%X 0x%X 0x%X"
+ "0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n"
+ "\t0x%X 0x%X 0x%X 0x%X 0x%X 0x%X\n\n",
+ *phdr, *(phdr+1), *(phdr+2), *(phdr+3), *(phdr+4),
+ *(phdr+5), *(phdr+6), *(phdr+7), *(phdr + 8),
+ *(phdr+9), *(phdr+10), *(phdr+11), *(phdr+12),
+ *(phdr+13), *(phdr+14), *(phdr+15));
+}
+
+/*
+ * qla8044_reset_seq_checksum_test - Validate Reset Sequence template.
+ *
+ * @ha : Pointer to adapter structure
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ */
+static int
+qla8044_reset_seq_checksum_test(struct scsi_qla_host *vha)
+{
+ uint32_t sum = 0;
+ uint16_t *buff = (uint16_t *)vha->reset_tmplt.buff;
+ int u16_count = vha->reset_tmplt.hdr->size / sizeof(uint16_t);
+
+ while (u16_count-- > 0)
+ sum += *buff++;
+
+ while (sum >> 16)
+ sum = (sum & 0xFFFF) + (sum >> 16);
+
+ /* checksum of 0 indicates a valid template */
+ if (~sum) {
+ return QLA_SUCCESS;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0b7,
+ "%s: Reset seq checksum failed\n", __func__);
+ return QLA_FUNCTION_FAILED;
+ }
+}
+
+/*
+ * qla8044_read_reset_template - Read Reset Template from Flash, validate
+ * the template and store offsets of stop/start/init offsets in ha->reset_tmplt.
+ *
+ * @ha : Pointer to adapter structure
+ */
+void
+qla8044_read_reset_template(struct scsi_qla_host *vha)
+{
+ uint8_t *p_buff;
+ uint32_t addr, tmplt_hdr_def_size, tmplt_hdr_size;
+
+ vha->reset_tmplt.seq_error = 0;
+ vha->reset_tmplt.buff = vmalloc(QLA8044_RESTART_TEMPLATE_SIZE);
+ if (vha->reset_tmplt.buff == NULL) {
+ ql_log(ql_log_fatal, vha, 0xb0b8,
+ "%s: Failed to allocate reset template resources\n",
+ __func__);
+ goto exit_read_reset_template;
+ }
+
+ p_buff = vha->reset_tmplt.buff;
+ addr = QLA8044_RESET_TEMPLATE_ADDR;
+
+ tmplt_hdr_def_size =
+ sizeof(struct qla8044_reset_template_hdr) / sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0b9,
+ "%s: Read template hdr size %d from Flash\n",
+ __func__, tmplt_hdr_def_size);
+
+ /* Copy template header from flash */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0ba,
+ "%s: Failed to read reset template\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ vha->reset_tmplt.hdr =
+ (struct qla8044_reset_template_hdr *) vha->reset_tmplt.buff;
+
+ /* Validate the template header size and signature */
+ tmplt_hdr_size = vha->reset_tmplt.hdr->hdr_size/sizeof(uint32_t);
+ if ((tmplt_hdr_size != tmplt_hdr_def_size) ||
+ (vha->reset_tmplt.hdr->signature != RESET_TMPLT_HDR_SIGNATURE)) {
+ ql_log(ql_log_fatal, vha, 0xb0bb,
+ "%s: Template Header size invalid %d "
+ "tmplt_hdr_def_size %d!!!\n", __func__,
+ tmplt_hdr_size, tmplt_hdr_def_size);
+ goto exit_read_template_error;
+ }
+
+ addr = QLA8044_RESET_TEMPLATE_ADDR + vha->reset_tmplt.hdr->hdr_size;
+ p_buff = vha->reset_tmplt.buff + vha->reset_tmplt.hdr->hdr_size;
+ tmplt_hdr_def_size = (vha->reset_tmplt.hdr->size -
+ vha->reset_tmplt.hdr->hdr_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bc,
+ "%s: Read rest of the template size %d\n",
+ __func__, vha->reset_tmplt.hdr->size);
+
+ /* Copy rest of the template */
+ if (qla8044_read_flash_data(vha, p_buff, addr, tmplt_hdr_def_size)) {
+ ql_log(ql_log_fatal, vha, 0xb0bd,
+ "%s: Failed to read reset tempelate\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ /* Integrity check */
+ if (qla8044_reset_seq_checksum_test(vha)) {
+ ql_log(ql_log_fatal, vha, 0xb0be,
+ "%s: Reset Seq checksum failed!\n", __func__);
+ goto exit_read_template_error;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0bf,
+ "%s: Reset Seq checksum passed! Get stop, "
+ "start and init seq offsets\n", __func__);
+
+ /* Get STOP, START, INIT sequence offsets */
+ vha->reset_tmplt.init_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->init_seq_offset;
+
+ vha->reset_tmplt.start_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->start_seq_offset;
+
+ vha->reset_tmplt.stop_offset = vha->reset_tmplt.buff +
+ vha->reset_tmplt.hdr->hdr_size;
+
+ qla8044_dump_reset_seq_hdr(vha);
+
+ goto exit_read_reset_template;
+
+exit_read_template_error:
+ vfree(vha->reset_tmplt.buff);
+
+exit_read_reset_template:
+ return;
+}
+
+void
+qla8044_set_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl |= DONTRESET_BIT0;
+ ql_dbg(ql_dbg_p3p, vha, 0xb0c0,
+ "%s: idc_ctrl = %d\n", __func__, idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+inline void
+qla8044_set_rst_ready(struct scsi_qla_host *vha)
+{
+ uint32_t drv_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_state |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c1,
+ "%s(%ld): drv_state: 0x%08x\n",
+ __func__, vha->host_no, drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+/**
+ * qla8044_need_reset_handler - Code to start reset sequence
+ * @ha: pointer to adapter structure
+ *
+ * Note: IDC lock must be held upon entry
+ **/
+static void
+qla8044_need_reset_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state = 0, drv_state, drv_active;
+ unsigned long reset_timeout, dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_log(ql_log_fatal, vha, 0xb0c2,
+ "%s: Performing ISP error recovery\n", __func__);
+
+ if (vha->flags.online) {
+ qla8044_idc_unlock(ha);
+ qla2x00_abort_isp_cleanup(vha);
+ ha->isp_ops->get_flash_version(vha, vha->req->ring);
+ ha->isp_ops->nvram_config(vha);
+ qla8044_idc_lock(ha);
+ }
+
+ if (!ha->flags.nic_core_reset_owner) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0c3,
+ "%s(%ld): reset acknowledged\n",
+ __func__, vha->host_no);
+ qla8044_set_rst_ready(vha);
+
+ /* Non-reset owners ACK Reset and wait for device INIT state
+ * as part of Reset Recovery by Reset Owner
+ */
+ dev_init_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ do {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c4,
+ "%s: Non Reset owner DEV INIT "
+ "TIMEOUT!\n", __func__);
+ break;
+ }
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ dev_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DEV_STATE_INDEX);
+ } while (dev_state == QLA8XXX_DEV_NEED_RESET);
+ } else {
+ qla8044_set_rst_ready(vha);
+
+ /* wait for 10 seconds for reset ack from all functions */
+ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0c5,
+ "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
+ __func__, vha->host_no, drv_state, drv_active);
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, reset_timeout)) {
+ ql_log(ql_log_info, vha, 0xb0c6,
+ "%s: RESET TIMEOUT!"
+ "drv_state: 0x%08x, drv_active: 0x%08x\n",
+ QLA2XXX_DRIVER_NAME, drv_state, drv_active);
+ break;
+ }
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ }
+
+ if (drv_state != drv_active) {
+ ql_log(ql_log_info, vha, 0xb0c7,
+ "%s(%ld): Reset_owner turning off drv_active "
+ "of non-acking function 0x%x\n", __func__,
+ vha->host_no, (drv_active ^ drv_state));
+ drv_active = drv_active & drv_state;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX,
+ drv_active);
+ }
+
+ /*
+ * Clear RESET OWNER, will be set at next reset
+ * by next RST_OWNER
+ */
+ ha->flags.nic_core_reset_owner = 0;
+
+ /* Start Reset Recovery */
+ qla8044_device_bootstrap(vha);
+ }
+}
+
+static void
+qla8044_set_drv_active(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.*/
+ drv_active |= (1 << ha->portnum);
+
+ ql_log(ql_log_info, vha, 0xb0c8,
+ "%s(%ld): drv_active: 0x%08x\n",
+ __func__, vha->host_no, drv_active);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX, drv_active);
+}
+
+static void
+qla8044_clear_idc_dontreset(struct scsi_qla_host *vha)
+{
+ uint32_t idc_ctrl;
+ struct qla_hw_data *ha = vha->hw;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ idc_ctrl &= ~DONTRESET_BIT0;
+ ql_log(ql_log_info, vha, 0xb0c9,
+ "%s: idc_ctrl = %d\n", __func__,
+ idc_ctrl);
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL, idc_ctrl);
+}
+
+static int
+qla8044_set_idc_ver(struct scsi_qla_host *vha)
+{
+ int idc_ver;
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+ if (drv_active == (1 << ha->portnum)) {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= (~0xFF);
+ idc_ver |= QLA8044_IDC_VER_MAJ_VALUE;
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ idc_ver);
+ ql_log(ql_log_info, vha, 0xb0ca,
+ "%s: IDC version updated to %d\n",
+ __func__, idc_ver);
+ } else {
+ idc_ver = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX);
+ idc_ver &= 0xFF;
+ if (QLA8044_IDC_VER_MAJ_VALUE != idc_ver) {
+ ql_log(ql_log_info, vha, 0xb0cb,
+ "%s: qla4xxx driver IDC version %d "
+ "is not compatible with IDC version %d "
+ "of other drivers!\n",
+ __func__, QLA8044_IDC_VER_MAJ_VALUE,
+ idc_ver);
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_set_idc_ver;
+ }
+ }
+
+ /* Update IDC_MINOR_VERSION */
+ idc_ver = qla8044_rd_reg(ha, QLA8044_CRB_IDC_VER_MINOR);
+ idc_ver &= ~(0x03 << (ha->portnum * 2));
+ idc_ver |= (QLA8044_IDC_VER_MIN_VALUE << (ha->portnum * 2));
+ qla8044_wr_reg(ha, QLA8044_CRB_IDC_VER_MINOR, idc_ver);
+
+exit_set_idc_ver:
+ return rval;
+}
+
+static int
+qla8044_update_idc_reg(struct scsi_qla_host *vha)
+{
+ uint32_t drv_active;
+ int rval = QLA_SUCCESS;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.init_done)
+ goto exit_update_idc_reg;
+
+ qla8044_idc_lock(ha);
+ qla8044_set_drv_active(vha);
+
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* If we are the first driver to load and
+ * ql2xdontresethba is not set, clear IDC_CTRL BIT0. */
+ if ((drv_active == (1 << ha->portnum)) && !ql2xdontresethba)
+ qla8044_clear_idc_dontreset(vha);
+
+ rval = qla8044_set_idc_ver(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ qla8044_clear_drv_active(vha);
+ qla8044_idc_unlock(ha);
+
+exit_update_idc_reg:
+ return rval;
+}
+
+/**
+ * qla8044_need_qsnt_handler - Code to start qsnt
+ * @ha: pointer to adapter structure
+ **/
+static void
+qla8044_need_qsnt_handler(struct scsi_qla_host *vha)
+{
+ unsigned long qsnt_timeout;
+ uint32_t drv_state, drv_active, dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (vha->flags.online)
+ qla2x00_quiesce_io(vha);
+ else
+ return;
+
+ qla8044_set_qsnt_ready(vha);
+
+ /* Wait for 30 secs for all functions to ack qsnt mode */
+ qsnt_timeout = jiffies + (QSNT_ACK_TOV * HZ);
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha, QLA8044_CRB_DRV_ACTIVE_INDEX);
+
+ /* Shift drv_active by 1 to match drv_state. As quiescent ready bit
+ position is at bit 1 and drv active is at bit 0 */
+ drv_active = drv_active << 1;
+
+ while (drv_state != drv_active) {
+ if (time_after_eq(jiffies, qsnt_timeout)) {
+ /* Other functions did not ack, changing state to
+ * DEV_READY
+ */
+ clear_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_READY);
+ qla8044_clear_qsnt_ready(vha);
+ ql_log(ql_log_info, vha, 0xb0cc,
+ "Timeout waiting for quiescent ack!!!\n");
+ return;
+ }
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ drv_state = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_STATE_INDEX);
+ drv_active = qla8044_rd_direct(vha,
+ QLA8044_CRB_DRV_ACTIVE_INDEX);
+ drv_active = drv_active << 1;
+ }
+
+ /* All functions have Acked. Set quiescent state */
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) {
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_QUIESCENT);
+ ql_log(ql_log_info, vha, 0xb0cd,
+ "%s: HW State: QUIESCENT\n", __func__);
+ }
+}
+
+/*
+ * qla8044_device_state_handler - Adapter state machine
+ * @ha: pointer to host adapter structure.
+ *
+ * Note: IDC lock must be UNLOCKED upon entry
+ **/
+int
+qla8044_device_state_handler(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state;
+ int rval = QLA_SUCCESS;
+ unsigned long dev_init_timeout;
+ struct qla_hw_data *ha = vha->hw;
+
+ rval = qla8044_update_idc_reg(vha);
+ if (rval == QLA_FUNCTION_FAILED)
+ goto exit_error;
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_dbg(ql_dbg_p3p, vha, 0xb0ce,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* wait for 30 seconds for device to go ready */
+ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ);
+
+ qla8044_idc_lock(ha);
+
+ while (1) {
+ if (time_after_eq(jiffies, dev_init_timeout)) {
+ ql_log(ql_log_warn, vha, 0xb0cf,
+ "%s: Device Init Failed 0x%x = %s\n",
+ QLA2XXX_DRIVER_NAME, dev_state,
+ dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ }
+
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+ ql_log(ql_log_info, vha, 0xb0d0,
+ "Device state is 0x%x = %s\n",
+ dev_state, dev_state < MAX_STATES ?
+ qdev_state(dev_state) : "Unknown");
+
+ /* NOTE: Make sure idc unlocked upon exit of switch statement */
+ switch (dev_state) {
+ case QLA8XXX_DEV_READY:
+ ha->flags.nic_core_reset_owner = 0;
+ goto exit;
+ case QLA8XXX_DEV_COLD:
+ rval = qla8044_device_bootstrap(vha);
+ goto exit;
+ case QLA8XXX_DEV_INITIALIZING:
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+ break;
+ case QLA8XXX_DEV_NEED_RESET:
+ /* For ISP8044, if NEED_RESET is set by any driver,
+ * it should be honored, irrespective of IDC_CTRL
+ * DONTRESET_BIT0 */
+ qla8044_need_reset_handler(vha);
+ break;
+ case QLA8XXX_DEV_NEED_QUIESCENT:
+ /* idc locked/unlocked in handler */
+ qla8044_need_qsnt_handler(vha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_QUIESCENT:
+ ql_log(ql_log_info, vha, 0xb0d1,
+ "HW State: QUIESCENT\n");
+
+ qla8044_idc_unlock(ha);
+ msleep(1000);
+ qla8044_idc_lock(ha);
+
+ /* Reset the init timeout after qsnt handler */
+ dev_init_timeout = jiffies +
+ (ha->fcoe_reset_timeout * HZ);
+ break;
+ case QLA8XXX_DEV_FAILED:
+ ha->flags.nic_core_reset_owner = 0;
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ default:
+ qla8044_idc_unlock(ha);
+ qla8xxx_dev_failed_handler(vha);
+ rval = QLA_FUNCTION_FAILED;
+ qla8044_idc_lock(ha);
+ goto exit;
+ }
+ }
+exit:
+ qla8044_idc_unlock(ha);
+
+exit_error:
+ return rval;
+}
+
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int
+qla8044_check_temp(struct scsi_qla_host *vha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql_log(ql_log_warn, vha, 0xb0d2,
+ "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down\n", temp_val);
+ status = QLA_FUNCTION_FAILED;
+ return status;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ ql_log(ql_log_warn, vha, 0xb0d3,
+ "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ }
+ return 0;
+}
+
+int qla8044_read_temperature(scsi_qla_host_t *vha)
+{
+ uint32_t temp;
+
+ temp = qla8044_rd_direct(vha, QLA8044_CRB_TEMP_STATE_INDEX);
+ return qla82xx_get_temp_val(temp);
+}
+
+/**
+ * qla8044_check_fw_alive - Check firmware health
+ * @ha: Pointer to host adapter structure.
+ *
+ * Context: Interrupt
+ **/
+int
+qla8044_check_fw_alive(struct scsi_qla_host *vha)
+{
+ uint32_t fw_heartbeat_counter;
+ uint32_t halt_status1, halt_status2;
+ int status = QLA_SUCCESS;
+
+ fw_heartbeat_counter = qla8044_rd_direct(vha,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX);
+
+ /* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
+ if (fw_heartbeat_counter == 0xffffffff) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0d4,
+ "scsi%ld: %s: Device in frozen "
+ "state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
+ vha->host_no, __func__);
+ return status;
+ }
+
+ if (vha->fw_heartbeat_counter == fw_heartbeat_counter) {
+ vha->seconds_since_last_heartbeat++;
+ /* FW not alive after 2 seconds */
+ if (vha->seconds_since_last_heartbeat == 2) {
+ vha->seconds_since_last_heartbeat = 0;
+ halt_status1 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ halt_status2 = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS2_INDEX);
+
+ ql_log(ql_log_info, vha, 0xb0d5,
+ "scsi(%ld): %s, ISP8044 "
+ "Dumping hw/fw registers:\n"
+ " PEG_HALT_STATUS1: 0x%x, "
+ "PEG_HALT_STATUS2: 0x%x,\n",
+ vha->host_no, __func__, halt_status1,
+ halt_status2);
+ status = QLA_FUNCTION_FAILED;
+ }
+ } else
+ vha->seconds_since_last_heartbeat = 0;
+
+ vha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
+}
+
+void
+qla8044_watchdog(struct scsi_qla_host *vha)
+{
+ uint32_t dev_state, halt_status;
+ int halt_status_unrecoverable = 0;
+ struct qla_hw_data *ha = vha->hw;
+
+ /* don't poll if reset is going on or FW hang in quiescent state */
+ if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &vha->dpc_flags) ||
+ test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags))) {
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (qla8044_check_temp(vha)) {
+ set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
+ ha->flags.isp82xx_fw_hung = 1;
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_RESET &&
+ !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d6,
+ "%s: HW State: NEED RESET!\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT &&
+ !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
+ ql_log(ql_log_info, vha, 0xb0d7,
+ "%s: HW State: NEED QUIES detected!\n",
+ __func__);
+ set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ } else {
+ /* Check firmware health */
+ if (qla8044_check_fw_alive(vha)) {
+ halt_status = qla8044_rd_direct(vha,
+ QLA8044_PEG_HALT_STATUS1_INDEX);
+ if (halt_status &
+ QLA8044_HALT_STATUS_FW_RESET) {
+ ql_log(ql_log_fatal, vha,
+ 0xb0d8, "%s: Firmware "
+ "error detected device "
+ "is being reset\n",
+ __func__);
+ } else if (halt_status &
+ QLA8044_HALT_STATUS_UNRECOVERABLE) {
+ halt_status_unrecoverable = 1;
+ }
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status_unrecoverable) {
+ set_bit(ISP_UNRECOVERABLE,
+ &vha->dpc_flags);
+ } else {
+ if (dev_state ==
+ QLA8XXX_DEV_QUIESCENT) {
+ set_bit(FCOE_CTX_RESET_NEEDED,
+ &vha->dpc_flags);
+ ql_log(ql_log_info, vha, 0xb0d9,
+ "%s: FW CONTEXT Reset "
+ "needed!\n", __func__);
+ } else {
+ ql_log(ql_log_info, vha,
+ 0xb0da, "%s: "
+ "detect abort needed\n",
+ __func__);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla82xx_clear_pending_mbx(vha);
+ }
+ }
+ ha->flags.isp82xx_fw_hung = 1;
+ ql_log(ql_log_warn, vha, 0xb10a,
+ "Firmware hung.\n");
+ qla2xxx_wake_dpc(vha);
+ }
+ }
+
+ }
+}
+
+static int
+qla8044_minidump_process_control(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr)
+{
+ struct qla8044_minidump_entry_crb *crb_entry;
+ uint32_t read_value, opcode, poll_time, addr, index;
+ uint32_t crb_addr, rval = QLA_SUCCESS;
+ unsigned long wtime;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ int i;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0dd, "Entering fn: %s\n", __func__);
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ crb_entry = (struct qla8044_minidump_entry_crb *)entry_hdr;
+
+ crb_addr = crb_entry->addr;
+ for (i = 0; i < crb_entry->op_count; i++) {
+ opcode = crb_entry->crb_ctrl.opcode;
+
+ if (opcode & QLA82XX_DBG_OPCODE_WR) {
+ qla8044_wr_reg_indirect(vha, crb_addr,
+ crb_entry->value_1);
+ opcode &= ~QLA82XX_DBG_OPCODE_WR;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RW) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_RW;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_AND) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value &= crb_entry->value_2;
+ opcode &= ~QLA82XX_DBG_OPCODE_AND;
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ read_value |= crb_entry->value_3;
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_OR) {
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+ read_value |= crb_entry->value_3;
+ qla8044_wr_reg_indirect(vha, crb_addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_OR;
+ }
+ if (opcode & QLA82XX_DBG_OPCODE_POLL) {
+ poll_time = crb_entry->crb_strd.poll_timeout;
+ wtime = jiffies + poll_time;
+ qla8044_rd_reg_indirect(vha, crb_addr, &read_value);
+
+ do {
+ if ((read_value & crb_entry->value_2) ==
+ crb_entry->value_1) {
+ break;
+ } else if (time_after_eq(jiffies, wtime)) {
+ /* capturing dump failed */
+ rval = QLA_FUNCTION_FAILED;
+ break;
+ } else {
+ qla8044_rd_reg_indirect(vha,
+ crb_addr, &read_value);
+ }
+ } while (1);
+ opcode &= ~QLA82XX_DBG_OPCODE_POLL;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ qla8044_rd_reg_indirect(vha, addr, &read_value);
+ index = crb_entry->crb_ctrl.state_index_v;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
+ if (crb_entry->crb_strd.state_index_a) {
+ index = crb_entry->crb_strd.state_index_a;
+ addr = tmplt_hdr->saved_state_array[index];
+ } else {
+ addr = crb_addr;
+ }
+
+ if (crb_entry->crb_ctrl.state_index_v) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value =
+ tmplt_hdr->saved_state_array[index];
+ } else {
+ read_value = crb_entry->value_1;
+ }
+
+ qla8044_wr_reg_indirect(vha, addr, read_value);
+ opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
+ }
+
+ if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
+ index = crb_entry->crb_ctrl.state_index_v;
+ read_value = tmplt_hdr->saved_state_array[index];
+ read_value <<= crb_entry->crb_ctrl.shl;
+ read_value >>= crb_entry->crb_ctrl.shr;
+ if (crb_entry->value_2)
+ read_value &= crb_entry->value_2;
+ read_value |= crb_entry->value_3;
+ read_value += crb_entry->value_1;
+ tmplt_hdr->saved_state_array[index] = read_value;
+ opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
+ }
+ crb_addr += crb_entry->crb_strd.addr_stride;
+ }
+ return rval;
+}
+
+static void
+qla8044_minidump_process_rdcrb(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_crb *crb_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0de, "Entering fn: %s\n", __func__);
+ crb_hdr = (struct qla8044_minidump_entry_crb *)entry_hdr;
+ r_addr = crb_hdr->addr;
+ r_stride = crb_hdr->crb_strd.addr_stride;
+ loop_cnt = crb_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_addr;
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static int
+qla8044_minidump_process_rdmem(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_value, r_data;
+ uint32_t i, j, loop_cnt;
+ struct qla8044_minidump_entry_rdmem *m_hdr;
+ unsigned long flags;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0df, "Entering fn: %s\n", __func__);
+ m_hdr = (struct qla8044_minidump_entry_rdmem *)entry_hdr;
+ r_addr = m_hdr->read_addr;
+ loop_cnt = m_hdr->read_data_size/16;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f0,
+ "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size);
+
+ if (r_addr & 0xf) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f1,
+ "[%s]: Read addr 0x%x not 16 bytes alligned\n",
+ __func__, r_addr);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ if (m_hdr->read_data_size % 16) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f2,
+ "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
+ __func__, m_hdr->read_data_size);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f3,
+ "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, m_hdr->read_data_size, loop_cnt);
+
+ write_lock_irqsave(&ha->hw_lock, flags);
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_LO, r_addr);
+ r_value = 0;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_ADDR_HI, r_value);
+ r_value = MIU_TA_CTL_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+ r_value = MIU_TA_CTL_START_ENABLE;
+ qla8044_wr_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL, r_value);
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_CTRL,
+ &r_value);
+ if ((r_value & MIU_TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ printk_ratelimited(KERN_ERR
+ "%s: failed to read through agent\n", __func__);
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+ return QLA_SUCCESS;
+ }
+
+ for (j = 0; j < 4; j++) {
+ qla8044_rd_reg_indirect(vha, MD_MIU_TEST_AGT_RDDATA[j],
+ &r_data);
+ *data_ptr++ = r_data;
+ }
+
+ r_addr += 16;
+ }
+ write_unlock_irqrestore(&ha->hw_lock, flags);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f4,
+ "Leaving fn: %s datacount: 0x%x\n",
+ __func__, (loop_cnt * 16));
+
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+/* ISP83xx flash read for _RDROM _BOARD */
+static uint32_t
+qla8044_minidump_process_rdrom(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t fl_addr, u32_count, rval;
+ struct qla8044_minidump_entry_rdrom *rom_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rom_hdr = (struct qla8044_minidump_entry_rdrom *)entry_hdr;
+ fl_addr = rom_hdr->read_addr;
+ u32_count = (rom_hdr->read_data_size)/sizeof(uint32_t);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f5, "[%s]: fl_addr: 0x%x, count: 0x%x\n",
+ __func__, fl_addr, u32_count);
+
+ rval = qla8044_lockless_flash_read_u32(vha, fl_addr,
+ (u8 *)(data_ptr), u32_count);
+
+ if (rval != QLA_SUCCESS) {
+ ql_log(ql_log_fatal, vha, 0xb0f6,
+ "%s: Flash Read Error,Count=%d\n", __func__, u32_count);
+ return QLA_FUNCTION_FAILED;
+ } else {
+ data_ptr += u32_count;
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+ }
+}
+
+static void
+qla8044_mark_entry_skipped(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, int index)
+{
+ entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
+
+ ql_log(ql_log_info, vha, 0xb0f7,
+ "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
+ vha->host_no, index, entry_hdr->entry_type,
+ entry_hdr->d_ctrl.entry_capture_mask);
+}
+
+static int
+qla8044_minidump_process_l2tag(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ unsigned long p_wait, w_time, p_mask;
+ uint32_t c_value_w, c_value_r;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ int rval = QLA_FUNCTION_FAILED;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f8, "Entering fn: %s\n", __func__);
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+ p_wait = cache_hdr->cache_ctrl.poll_wait;
+ p_mask = cache_hdr->cache_ctrl.poll_mask;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ if (c_value_w)
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+
+ if (p_mask) {
+ w_time = jiffies + p_wait;
+ do {
+ qla8044_rd_reg_indirect(vha, c_addr,
+ &c_value_r);
+ if ((c_value_r & p_mask) == 0) {
+ break;
+ } else if (time_after_eq(jiffies, w_time)) {
+ /* capturing dump failed */
+ return rval;
+ }
+ } while (1);
+ }
+
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+}
+
+static void
+qla8044_minidump_process_l1cache(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t addr, r_addr, c_addr, t_r_addr;
+ uint32_t i, k, loop_count, t_value, r_cnt, r_value;
+ uint32_t c_value_w;
+ struct qla8044_minidump_entry_cache *cache_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ cache_hdr = (struct qla8044_minidump_entry_cache *)entry_hdr;
+ loop_count = cache_hdr->op_count;
+ r_addr = cache_hdr->read_addr;
+ c_addr = cache_hdr->control_addr;
+ c_value_w = cache_hdr->cache_ctrl.write_value;
+
+ t_r_addr = cache_hdr->tag_reg_addr;
+ t_value = cache_hdr->addr_ctrl.init_tag_value;
+ r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
+
+ for (i = 0; i < loop_count; i++) {
+ qla8044_wr_reg_indirect(vha, t_r_addr, t_value);
+ qla8044_wr_reg_indirect(vha, c_addr, c_value_w);
+ addr = r_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, addr, &r_value);
+ *data_ptr++ = r_value;
+ addr += cache_hdr->read_ctrl.read_addr_stride;
+ }
+ t_value += cache_hdr->addr_ctrl.tag_value_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdocm(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t r_addr, r_stride, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_rdocm *ocm_hdr;
+ uint32_t *data_ptr = *d_ptr;
+ struct qla_hw_data *ha = vha->hw;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0f9, "Entering fn: %s\n", __func__);
+
+ ocm_hdr = (struct qla8044_minidump_entry_rdocm *)entry_hdr;
+ r_addr = ocm_hdr->read_addr;
+ r_stride = ocm_hdr->read_addr_stride;
+ loop_cnt = ocm_hdr->op_count;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fa,
+ "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
+ __func__, r_addr, r_stride, loop_cnt);
+
+ for (i = 0; i < loop_cnt; i++) {
+ r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fb, "Leaving fn: %s datacount: 0x%lx\n",
+ __func__, (long unsigned int) (loop_cnt * sizeof(uint32_t)));
+
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_rdmux(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
+ struct qla8044_minidump_entry_mux *mux_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fc, "Entering fn: %s\n", __func__);
+
+ mux_hdr = (struct qla8044_minidump_entry_mux *)entry_hdr;
+ r_addr = mux_hdr->read_addr;
+ s_addr = mux_hdr->select_addr;
+ s_stride = mux_hdr->select_value_stride;
+ s_value = mux_hdr->select_value;
+ loop_cnt = mux_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+static void
+qla8044_minidump_process_queue(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t s_addr, r_addr;
+ uint32_t r_stride, r_value, r_cnt, qid = 0;
+ uint32_t i, k, loop_cnt;
+ struct qla8044_minidump_entry_queue *q_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb0fd, "Entering fn: %s\n", __func__);
+ q_hdr = (struct qla8044_minidump_entry_queue *)entry_hdr;
+ s_addr = q_hdr->select_addr;
+ r_cnt = q_hdr->rd_strd.read_addr_cnt;
+ r_stride = q_hdr->rd_strd.read_addr_stride;
+ loop_cnt = q_hdr->op_count;
+
+ for (i = 0; i < loop_cnt; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, qid);
+ r_addr = q_hdr->read_addr;
+ for (k = 0; k < r_cnt; k++) {
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = r_value;
+ r_addr += r_stride;
+ }
+ qid += q_hdr->q_strd.queue_id_stride;
+ }
+ *d_ptr = data_ptr;
+}
+
+/* ISP83xx functions to process new minidump entries... */
+static uint32_t
+qla8044_minidump_process_pollrd(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t r_addr, s_addr, s_value, r_value, poll_wait, poll_mask;
+ uint16_t s_stride, i;
+ struct qla8044_minidump_entry_pollrd *pollrd_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ pollrd_hdr = (struct qla8044_minidump_entry_pollrd *) entry_hdr;
+ s_addr = pollrd_hdr->select_addr;
+ r_addr = pollrd_hdr->read_addr;
+ s_value = pollrd_hdr->select_value;
+ s_stride = pollrd_hdr->select_value_stride;
+
+ poll_wait = pollrd_hdr->poll_wait;
+ poll_mask = pollrd_hdr->poll_mask;
+
+ for (i = 0; i < pollrd_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, s_addr, s_value);
+ poll_wait = pollrd_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, s_addr, &r_value);
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0fe,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+ qla8044_rd_reg_indirect(vha, r_addr, &r_value);
+ *data_ptr++ = s_value;
+ *data_ptr++ = r_value;
+
+ s_value += s_stride;
+ }
+ *d_ptr = data_ptr;
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+static void
+qla8044_minidump_process_rdmux2(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ uint32_t sel_val1, sel_val2, t_sel_val, data, i;
+ uint32_t sel_addr1, sel_addr2, sel_val_mask, read_addr;
+ struct qla8044_minidump_entry_rdmux2 *rdmux2_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ rdmux2_hdr = (struct qla8044_minidump_entry_rdmux2 *) entry_hdr;
+ sel_val1 = rdmux2_hdr->select_value_1;
+ sel_val2 = rdmux2_hdr->select_value_2;
+ sel_addr1 = rdmux2_hdr->select_addr_1;
+ sel_addr2 = rdmux2_hdr->select_addr_2;
+ sel_val_mask = rdmux2_hdr->select_value_mask;
+ read_addr = rdmux2_hdr->read_addr;
+
+ for (i = 0; i < rdmux2_hdr->op_count; i++) {
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val1);
+ t_sel_val = sel_val1 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ qla8044_wr_reg_indirect(vha, sel_addr1, sel_val2);
+ t_sel_val = sel_val2 & sel_val_mask;
+ *data_ptr++ = t_sel_val;
+
+ qla8044_wr_reg_indirect(vha, sel_addr2, t_sel_val);
+ qla8044_rd_reg_indirect(vha, read_addr, &data);
+
+ *data_ptr++ = data;
+
+ sel_val1 += rdmux2_hdr->select_value_stride;
+ sel_val2 += rdmux2_hdr->select_value_stride;
+ }
+
+ *d_ptr = data_ptr;
+}
+
+static uint32_t
+qla8044_minidump_process_pollrdmwr(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t poll_wait, poll_mask, r_value, data;
+ uint32_t addr_1, addr_2, value_1, value_2;
+ struct qla8044_minidump_entry_pollrdmwr *poll_hdr;
+ uint32_t *data_ptr = *d_ptr;
+
+ poll_hdr = (struct qla8044_minidump_entry_pollrdmwr *) entry_hdr;
+ addr_1 = poll_hdr->addr_1;
+ addr_2 = poll_hdr->addr_2;
+ value_1 = poll_hdr->value_1;
+ value_2 = poll_hdr->value_2;
+ poll_mask = poll_hdr->poll_mask;
+
+ qla8044_wr_reg_indirect(vha, addr_1, value_1);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb0ff,
+ "%s: TIMEOUT\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ qla8044_rd_reg_indirect(vha, addr_2, &data);
+ data &= poll_hdr->modify_mask;
+ qla8044_wr_reg_indirect(vha, addr_2, data);
+ qla8044_wr_reg_indirect(vha, addr_1, value_2);
+
+ poll_wait = poll_hdr->poll_wait;
+ while (1) {
+ qla8044_rd_reg_indirect(vha, addr_1, &r_value);
+
+ if ((r_value & poll_mask) != 0) {
+ break;
+ } else {
+ usleep_range(1000, 1100);
+ if (--poll_wait == 0) {
+ ql_log(ql_log_fatal, vha, 0xb100,
+ "%s: TIMEOUT2\n", __func__);
+ goto error;
+ }
+ }
+ }
+
+ *data_ptr++ = addr_2;
+ *data_ptr++ = data;
+
+ *d_ptr = data_ptr;
+
+ return QLA_SUCCESS;
+
+error:
+ return QLA_FUNCTION_FAILED;
+}
+
+#define ISP8044_PEX_DMA_ENGINE_INDEX 8
+#define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000
+#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000
+#define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0
+#define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define ISP8044_PEX_DMA_READ_SIZE (16 * 1024)
+#define ISP8044_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+static int
+qla8044_check_dma_engine_state(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ return QLA_FUNCTION_FAILED;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+
+ return QLA_FUNCTION_FAILED;
+}
+
+static int
+qla8044_start_pex_dma(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla8044_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = ha->md_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[ISP8044_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = ISP8044_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * ISP8044_PEX_DMA_NUM_OFFSET);
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = qla8044_wr_reg_indirect(vha,
+ dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < ISP8044_PEX_DMA_MAX_WAIT; wait++) {
+ rval = qla8044_rd_reg_indirect(vha,
+ (dma_base_addr + ISP8044_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= ISP8044_PEX_DMA_MAX_WAIT) {
+ rval = QLA_FUNCTION_FAILED;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int
+qla8044_minidump_pex_dma_read(struct scsi_qla_host *vha,
+ struct qla8044_minidump_entry_hdr *entry_hdr, uint32_t **d_ptr)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval = QLA_SUCCESS;
+ struct qla8044_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t chunk_size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla8044_pex_dma_descriptor dma_desc;
+
+ rval = qla8044_check_dma_engine_state(vha);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb147,
+ "DMA engine not available. Fallback to rdmem-read.\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ m_hdr = (void *)entry_hdr;
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ ISP8044_PEX_DMA_READ_SIZE, &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb148,
+ "Unable to allocate rdmem dma buffer\n");
+ return QLA_FUNCTION_FAILED;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ * dma_bus_addr: dma buffer address
+ * cmd.read_data_size: amount of data-chunk to be read.
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |=
+ ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+
+ dma_desc.dma_bus_addr = rdmem_dma;
+ dma_desc.cmd.read_data_size = chunk_size = ISP8044_PEX_DMA_READ_SIZE;
+ read_size = 0;
+
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of ISP8044_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size <
+ ISP8044_PEX_DMA_READ_SIZE) {
+ chunk_size = (m_hdr->read_data_size - read_size);
+ dma_desc.cmd.read_data_size = chunk_size;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla8044_ms_mem_write_128b(vha,
+ m_hdr->desc_card_addr, (void *)&dma_desc,
+ (sizeof(struct qla8044_pex_dma_descriptor)/16));
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb14a,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+ ql_dbg(ql_dbg_p3p, vha, 0xb14b,
+ "%s: Dma-descriptor: Instruct for rdmem dma "
+ "(chunk_size 0x%x).\n", __func__, chunk_size);
+
+ /* Execute: Start pex-dma operation. */
+ rval = qla8044_start_pex_dma(vha, m_hdr);
+ if (rval)
+ goto error_exit;
+
+ memcpy(data_ptr, rdmem_buffer, chunk_size);
+ data_ptr += chunk_size;
+ read_size += chunk_size;
+ }
+
+ *d_ptr = (void *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, ISP8044_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ return rval;
+}
+
+/*
+ *
+ * qla8044_collect_md_data - Retrieve firmware minidump data.
+ * @ha: pointer to adapter structure
+ **/
+int
+qla8044_collect_md_data(struct scsi_qla_host *vha)
+{
+ int num_entry_hdr = 0;
+ struct qla8044_minidump_entry_hdr *entry_hdr;
+ struct qla8044_minidump_template_hdr *tmplt_hdr;
+ uint32_t *data_ptr;
+ uint32_t data_collected = 0, f_capture_mask;
+ int i, rval = QLA_FUNCTION_FAILED;
+ uint64_t now;
+ uint32_t timestamp, idc_control;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!ha->md_dump) {
+ ql_log(ql_log_info, vha, 0xb101,
+ "%s(%ld) No buffer to dump\n",
+ __func__, vha->host_no);
+ return rval;
+ }
+
+ if (ha->fw_dumped) {
+ ql_log(ql_log_warn, vha, 0xb10d,
+ "Firmware has been previously dumped (%p) "
+ "-- ignoring request.\n", ha->fw_dump);
+ goto md_failed;
+ }
+
+ ha->fw_dumped = 0;
+
+ if (!ha->md_tmplt_hdr || !ha->md_dump) {
+ ql_log(ql_log_warn, vha, 0xb10e,
+ "Memory not allocated for minidump capture\n");
+ goto md_failed;
+ }
+
+ qla8044_idc_lock(ha);
+ idc_control = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ if (idc_control & GRACEFUL_RESET_BIT1) {
+ ql_log(ql_log_warn, vha, 0xb112,
+ "Forced reset from application, "
+ "ignore minidump capture\n");
+ qla8044_wr_reg(ha, QLA8044_IDC_DRV_CTRL,
+ (idc_control & ~GRACEFUL_RESET_BIT1));
+ qla8044_idc_unlock(ha);
+
+ goto md_failed;
+ }
+ qla8044_idc_unlock(ha);
+
+ if (qla82xx_validate_template_chksum(vha)) {
+ ql_log(ql_log_info, vha, 0xb109,
+ "Template checksum validation error\n");
+ goto md_failed;
+ }
+
+ tmplt_hdr = (struct qla8044_minidump_template_hdr *)
+ ha->md_tmplt_hdr;
+ data_ptr = (uint32_t *)((uint8_t *)ha->md_dump);
+ num_entry_hdr = tmplt_hdr->num_of_entries;
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb11a,
+ "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level);
+
+ f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF;
+
+ /* Validate whether required debug level is set */
+ if ((f_capture_mask & 0x3) != 0x3) {
+ ql_log(ql_log_warn, vha, 0xb10f,
+ "Minimum required capture mask[0x%x] level not set\n",
+ f_capture_mask);
+
+ }
+ tmplt_hdr->driver_capture_mask = ql2xmdcapmask;
+ ql_log(ql_log_info, vha, 0xb102,
+ "[%s]: starting data ptr: %p\n",
+ __func__, data_ptr);
+ ql_log(ql_log_info, vha, 0xb10b,
+ "[%s]: no of entry headers in Template: 0x%x\n",
+ __func__, num_entry_hdr);
+ ql_log(ql_log_info, vha, 0xb10c,
+ "[%s]: Total_data_size 0x%x, %d obtained\n",
+ __func__, ha->md_dump_size, ha->md_dump_size);
+
+ /* Update current timestamp before taking dump */
+ now = get_jiffies_64();
+ timestamp = (u32)(jiffies_to_msecs(now) / 1000);
+ tmplt_hdr->driver_timestamp = timestamp;
+
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset);
+ tmplt_hdr->saved_state_array[QLA8044_SS_OCM_WNDREG_INDEX] =
+ tmplt_hdr->ocm_window_reg[ha->portnum];
+
+ /* Walk through the entry headers - validate/perform required action */
+ for (i = 0; i < num_entry_hdr; i++) {
+ if (data_collected > ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb103,
+ "Data collected: [0x%x], "
+ "Total Dump size: [0x%x]\n",
+ data_collected, ha->md_dump_size);
+ return rval;
+ }
+
+ if (!(entry_hdr->d_ctrl.entry_capture_mask &
+ ql2xmdcapmask)) {
+ entry_hdr->d_ctrl.driver_flags |=
+ QLA82XX_DBG_SKIPPED_FLAG;
+ goto skip_nxt_entry;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb104,
+ "Data collected: [0x%x], Dump size left:[0x%x]\n",
+ data_collected,
+ (ha->md_dump_size - data_collected));
+
+ /* Decode the entry type and take required action to capture
+ * debug data
+ */
+ switch (entry_hdr->entry_type) {
+ case QLA82XX_RDEND:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_CNTRL:
+ rval = qla8044_minidump_process_control(vha,
+ entry_hdr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA82XX_RDCRB:
+ qla8044_minidump_process_rdcrb(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMEM:
+ rval = qla8044_minidump_pex_dma_read(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ rval = qla8044_minidump_process_rdmem(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ goto md_failed;
+ }
+ }
+ break;
+ case QLA82XX_BOARD:
+ case QLA82XX_RDROM:
+ rval = qla8044_minidump_process_rdrom(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha,
+ entry_hdr, i);
+ }
+ break;
+ case QLA82XX_L2DTG:
+ case QLA82XX_L2ITG:
+ case QLA82XX_L2DAT:
+ case QLA82XX_L2INS:
+ rval = qla8044_minidump_process_l2tag(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ goto md_failed;
+ }
+ break;
+ case QLA8044_L1DTG:
+ case QLA8044_L1ITG:
+ case QLA82XX_L1DAT:
+ case QLA82XX_L1INS:
+ qla8044_minidump_process_l1cache(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDOCM:
+ qla8044_minidump_process_rdocm(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_RDMUX:
+ qla8044_minidump_process_rdmux(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA82XX_QUEUE:
+ qla8044_minidump_process_queue(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRD:
+ rval = qla8044_minidump_process_pollrd(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA8044_RDMUX2:
+ qla8044_minidump_process_rdmux2(vha,
+ entry_hdr, &data_ptr);
+ break;
+ case QLA8044_POLLRDMWR:
+ rval = qla8044_minidump_process_pollrdmwr(vha,
+ entry_hdr, &data_ptr);
+ if (rval != QLA_SUCCESS)
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ case QLA82XX_RDNOP:
+ default:
+ qla8044_mark_entry_skipped(vha, entry_hdr, i);
+ break;
+ }
+
+ data_collected = (uint8_t *)data_ptr -
+ (uint8_t *)((uint8_t *)ha->md_dump);
+skip_nxt_entry:
+ /*
+ * next entry in the template
+ */
+ entry_hdr = (struct qla8044_minidump_entry_hdr *)
+ (((uint8_t *)entry_hdr) + entry_hdr->entry_size);
+ }
+
+ if (data_collected != ha->md_dump_size) {
+ ql_log(ql_log_info, vha, 0xb105,
+ "Dump data mismatch: Data collected: "
+ "[0x%x], total_data_size:[0x%x]\n",
+ data_collected, ha->md_dump_size);
+ goto md_failed;
+ }
+
+ ql_log(ql_log_info, vha, 0xb110,
+ "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n",
+ vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump);
+ ha->fw_dumped = 1;
+ qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
+
+
+ ql_log(ql_log_info, vha, 0xb106,
+ "Leaving fn: %s Last entry: 0x%x\n",
+ __func__, i);
+md_failed:
+ return rval;
+}
+
+void
+qla8044_get_minidump(struct scsi_qla_host *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!qla8044_collect_md_data(vha)) {
+ ha->fw_dumped = 1;
+ } else {
+ ql_log(ql_log_fatal, vha, 0xb0db,
+ "%s: Unable to collect minidump\n",
+ __func__);
+ }
+}
+
+static int
+qla8044_poll_flash_status_reg(struct scsi_qla_host *vha)
+{
+ uint32_t flash_status;
+ int retries = QLA8044_FLASH_READ_RETRY_COUNT;
+ int ret_val = QLA_SUCCESS;
+
+ while (retries--) {
+ ret_val = qla8044_rd_reg_indirect(vha, QLA8044_FLASH_STATUS,
+ &flash_status);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb13c,
+ "%s: Failed to read FLASH_STATUS reg.\n",
+ __func__);
+ break;
+ }
+ if ((flash_status & QLA8044_FLASH_STATUS_READY) ==
+ QLA8044_FLASH_STATUS_READY)
+ break;
+ msleep(QLA8044_FLASH_STATUS_REG_POLL_DELAY);
+ }
+
+ if (!retries)
+ ret_val = QLA_FUNCTION_FAILED;
+
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_status_reg(struct scsi_qla_host *vha,
+ uint32_t data)
+{
+ int ret_val = QLA_SUCCESS;
+ uint32_t cmd;
+
+ cmd = vha->hw->fdt_wrt_sts_reg_cmd;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_STATUS_WRITE_DEF_SIG | cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb125,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb126,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb127,
+ "%s: Failed to write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb128,
+ "%s: Error polling flash status reg.\n", __func__);
+
+exit_func:
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_unprotect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_enable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb139,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+/*
+ * This function assumes that the flash lock is held.
+ */
+static int
+qla8044_protect_flash(scsi_qla_host_t *vha)
+{
+ int ret_val;
+ struct qla_hw_data *ha = vha->hw;
+
+ ret_val = qla8044_write_flash_status_reg(vha, ha->fdt_wrt_disable);
+ if (ret_val)
+ ql_log(ql_log_warn, vha, 0xb13b,
+ "%s: Write flash status failed.\n", __func__);
+
+ return ret_val;
+}
+
+
+static int
+qla8044_erase_flash_sector(struct scsi_qla_host *vha,
+ uint32_t sector_start_addr)
+{
+ uint32_t reversed_addr;
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12e,
+ "%s: Poll flash status after erase failed..\n", __func__);
+ }
+
+ reversed_addr = (((sector_start_addr & 0xFF) << 16) |
+ (sector_start_addr & 0xFF00) |
+ ((sector_start_addr & 0xFF0000) >> 16));
+
+ ret_val = qla8044_wr_reg_indirect(vha,
+ QLA8044_FLASH_WRDATA, reversed_addr);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb12f,
+ "%s: Failed to write to FLASH_WRDATA.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_ERASE_SIG | vha->hw->fdt_erase_cmd);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb130,
+ "%s: Failed to write to FLASH_ADDR.\n", __func__);
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_ERASE_MS_VAL);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb131,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb132,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+
+ return ret_val;
+}
+
+/*
+ * qla8044_flash_write_u32 - Write data to flash
+ *
+ * @ha : Pointer to adapter structure
+ * addr : Flash address to write to
+ * p_data : Data to be written
+ *
+ * Return Value - QLA_SUCCESS/QLA_FUNCTION_FAILED
+ *
+ * NOTE: Lock should be held on entry
+ */
+static int
+qla8044_flash_write_u32(struct scsi_qla_host *vha, uint32_t addr,
+ uint32_t *p_data)
+{
+ int ret_val = QLA_SUCCESS;
+
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ 0x00800000 | (addr >> 2));
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb134,
+ "%s: Failed write to FLASH_ADDR.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *p_data);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb135,
+ "%s: Failed write to FLASH_WRDATA.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL, 0x3D);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb136,
+ "%s: Failed write to FLASH_CONTROL.\n", __func__);
+ goto exit_func;
+ }
+ ret_val = qla8044_poll_flash_status_reg(vha);
+ if (ret_val) {
+ ql_log(ql_log_warn, vha, 0xb137,
+ "%s: Poll flash status failed.\n", __func__);
+ }
+
+exit_func:
+ return ret_val;
+}
+
+static int
+qla8044_write_flash_buffer_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t spi_val;
+
+ if (dwords < QLA8044_MIN_OPTROM_BURST_DWORDS ||
+ dwords > QLA8044_MAX_OPTROM_BURST_DWORDS) {
+ ql_dbg(ql_dbg_user, vha, 0xb123,
+ "Got unsupported dwords = 0x%x.\n",
+ dwords);
+ return QLA_FUNCTION_FAILED;
+ }
+
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL, &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL);
+
+ /* First DWORD write to FLASH_WRDATA */
+ ret = qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA,
+ *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_FIRST_MS_PATTERN);
+
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb124,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+
+ dwords--;
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_SECOND_TEMP_VAL);
+
+
+ /* Second to N-1 DWORDS writes */
+ while (dwords != 1) {
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_SECOND_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb129,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ dwords--;
+ }
+
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_ADDR,
+ QLA8044_FLASH_FIRST_TEMP_VAL | (faddr >> 2));
+
+ /* Last DWORD write */
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_WRDATA, *dwptr++);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_CONTROL,
+ QLA8044_FLASH_LAST_MS_PATTERN);
+ ret = qla8044_poll_flash_status_reg(vha);
+ if (ret) {
+ ql_log(ql_log_warn, vha, 0xb12a,
+ "%s: Failed.\n", __func__);
+ goto exit_func;
+ }
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_STATUS, &spi_val);
+
+ if ((spi_val & QLA8044_FLASH_SPI_CTL) == QLA8044_FLASH_SPI_CTL) {
+ ql_log(ql_log_warn, vha, 0xb12b,
+ "%s: Failed.\n", __func__);
+ spi_val = 0;
+ /* Operation failed, clear error bit. */
+ qla8044_rd_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ &spi_val);
+ qla8044_wr_reg_indirect(vha, QLA8044_FLASH_SPI_CONTROL,
+ spi_val | QLA8044_FLASH_SPI_CTL);
+ }
+exit_func:
+ return ret;
+}
+
+static int
+qla8044_write_flash_dword_mode(scsi_qla_host_t *vha, uint32_t *dwptr,
+ uint32_t faddr, uint32_t dwords)
+{
+ int ret = QLA_FUNCTION_FAILED;
+ uint32_t liter;
+
+ for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) {
+ ret = qla8044_flash_write_u32(vha, faddr, dwptr);
+ if (ret) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb141,
+ "%s: flash address=%x data=%x.\n", __func__,
+ faddr, *dwptr);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int
+qla8044_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
+ uint32_t offset, uint32_t length)
+{
+ int rval = QLA_FUNCTION_FAILED, i, burst_iter_count;
+ int dword_count, erase_sec_count;
+ uint32_t erase_offset;
+ uint8_t *p_cache, *p_src;
+
+ erase_offset = offset;
+
+ p_cache = kcalloc(length, sizeof(uint8_t), GFP_KERNEL);
+ if (!p_cache)
+ return QLA_FUNCTION_FAILED;
+
+ memcpy(p_cache, buf, length);
+ p_src = p_cache;
+ dword_count = length / sizeof(uint32_t);
+ /* Since the offset and legth are sector aligned, it will be always
+ * multiple of burst_iter_count (64)
+ */
+ burst_iter_count = dword_count / QLA8044_MAX_OPTROM_BURST_DWORDS;
+ erase_sec_count = length / QLA8044_SECTOR_SIZE;
+
+ /* Suspend HBA. */
+ scsi_block_requests(vha->host);
+ /* Lock and enable write for whole operation. */
+ qla8044_flash_lock(vha);
+ qla8044_unprotect_flash(vha);
+
+ /* Erasing the sectors */
+ for (i = 0; i < erase_sec_count; i++) {
+ rval = qla8044_erase_flash_sector(vha, erase_offset);
+ ql_dbg(ql_dbg_user, vha, 0xb138,
+ "Done erase of sector=0x%x.\n",
+ erase_offset);
+ if (rval) {
+ ql_log(ql_log_warn, vha, 0xb121,
+ "Failed to erase the sector having address: "
+ "0x%x.\n", erase_offset);
+ goto out;
+ }
+ erase_offset += QLA8044_SECTOR_SIZE;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb13f,
+ "Got write for addr = 0x%x length=0x%x.\n",
+ offset, length);
+
+ for (i = 0; i < burst_iter_count; i++) {
+
+ /* Go with write. */
+ rval = qla8044_write_flash_buffer_mode(vha, (uint32_t *)p_src,
+ offset, QLA8044_MAX_OPTROM_BURST_DWORDS);
+ if (rval) {
+ /* Buffer Mode failed skip to dword mode */
+ ql_log(ql_log_warn, vha, 0xb122,
+ "Failed to write flash in buffer mode, "
+ "Reverting to slow-write.\n");
+ rval = qla8044_write_flash_dword_mode(vha,
+ (uint32_t *)p_src, offset,
+ QLA8044_MAX_OPTROM_BURST_DWORDS);
+ }
+ p_src += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ offset += sizeof(uint32_t) * QLA8044_MAX_OPTROM_BURST_DWORDS;
+ }
+ ql_dbg(ql_dbg_user, vha, 0xb133,
+ "Done writing.\n");
+
+out:
+ qla8044_protect_flash(vha);
+ qla8044_flash_unlock(vha);
+ scsi_unblock_requests(vha->host);
+ kfree(p_cache);
+
+ return rval;
+}
+
+#define LEG_INT_PTR_B31 (1 << 31)
+#define LEG_INT_PTR_B30 (1 << 30)
+#define PF_BITS_MASK (0xF << 16)
+/**
+ * qla8044_intr_handler() - Process interrupts for the ISP8044
+ * @irq:
+ * @dev_id: SCSI driver HA context
+ *
+ * Called by system whenever the host adapter generates an interrupt.
+ *
+ * Returns handled flag.
+ */
+irqreturn_t
+qla8044_intr_handler(int irq, void *dev_id)
+{
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct rsp_que *rsp;
+ struct device_reg_82xx __iomem *reg;
+ int status = 0;
+ unsigned long flags;
+ unsigned long iter;
+ uint32_t stat;
+ uint16_t mb[4];
+ uint32_t leg_int_ptr = 0, pf_bit;
+
+ rsp = (struct rsp_que *) dev_id;
+ if (!rsp) {
+ ql_log(ql_log_info, NULL, 0xb143,
+ "%s(): NULL response queue pointer\n", __func__);
+ return IRQ_NONE;
+ }
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ return IRQ_HANDLED;
+
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+
+ /* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
+ if (!(leg_int_ptr & (LEG_INT_PTR_B31))) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb144,
+ "%s: Legacy Interrupt Bit 31 not set, "
+ "spurious interrupt!\n", __func__);
+ return IRQ_NONE;
+ }
+
+ pf_bit = ha->portnum << 16;
+ /* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb145,
+ "%s: Incorrect function ID 0x%x in "
+ "legacy interrupt register, "
+ "ha->pf_bit = 0x%x\n", __func__,
+ (leg_int_ptr & (PF_BITS_MASK)), pf_bit);
+ return IRQ_NONE;
+ }
+
+ /* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
+ * Control register and poll till Legacy Interrupt Pointer register
+ * bit32 is 0.
+ */
+ qla8044_wr_reg(ha, LEG_INTR_TRIG_OFFSET, 0);
+ do {
+ leg_int_ptr = qla8044_rd_reg(ha, LEG_INTR_PTR_OFFSET);
+ if ((leg_int_ptr & (PF_BITS_MASK)) != pf_bit)
+ break;
+ } while (leg_int_ptr & (LEG_INT_PTR_B30));
+
+ reg = &ha->iobase->isp82;
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ for (iter = 1; iter--; ) {
+
+ if (RD_REG_DWORD(&reg->host_int)) {
+ stat = RD_REG_DWORD(&reg->host_status);
+ if ((stat & HSRX_RISC_INT) == 0)
+ break;
+
+ switch (stat & 0xff) {
+ case 0x1:
+ case 0x2:
+ case 0x10:
+ case 0x11:
+ qla82xx_mbx_completion(vha, MSW(stat));
+ status |= MBX_INTERRUPT;
+ break;
+ case 0x12:
+ mb[0] = MSW(stat);
+ mb[1] = RD_REG_WORD(&reg->mailbox_out[1]);
+ mb[2] = RD_REG_WORD(&reg->mailbox_out[2]);
+ mb[3] = RD_REG_WORD(&reg->mailbox_out[3]);
+ qla2x00_async_event(vha, rsp, mb);
+ break;
+ case 0x13:
+ qla24xx_process_response_queue(vha, rsp);
+ break;
+ default:
+ ql_dbg(ql_dbg_p3p, vha, 0xb146,
+ "Unrecognized interrupt type "
+ "(%d).\n", stat & 0xff);
+ break;
+ }
+ }
+ WRT_REG_DWORD(&reg->host_int, 0);
+ }
+
+ qla2x00_handle_mbx_completion(ha, status);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
+static int
+qla8044_idc_dontreset(struct qla_hw_data *ha)
+{
+ uint32_t idc_ctrl;
+
+ idc_ctrl = qla8044_rd_reg(ha, QLA8044_IDC_DRV_CTRL);
+ return idc_ctrl & DONTRESET_BIT0;
+}
+
+static void
+qla8044_clear_rst_ready(scsi_qla_host_t *vha)
+{
+ uint32_t drv_state;
+
+ drv_state = qla8044_rd_direct(vha, QLA8044_CRB_DRV_STATE_INDEX);
+
+ /*
+ * For ISP8044, drv_active register has 1 bit per function,
+ * shift 1 by func_num to set a bit for the function.
+ * For ISP82xx, drv_active has 4 bits per function
+ */
+ drv_state &= ~(1 << vha->hw->portnum);
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb13d,
+ "drv_state: 0x%08x\n", drv_state);
+ qla8044_wr_direct(vha, QLA8044_CRB_DRV_STATE_INDEX, drv_state);
+}
+
+int
+qla8044_abort_isp(scsi_qla_host_t *vha)
+{
+ int rval;
+ uint32_t dev_state;
+ struct qla_hw_data *ha = vha->hw;
+
+ qla8044_idc_lock(ha);
+ dev_state = qla8044_rd_direct(vha, QLA8044_CRB_DEV_STATE_INDEX);
+
+ if (ql2xdontresethba)
+ qla8044_set_idc_dontreset(vha);
+
+ /* If device_state is NEED_RESET, go ahead with
+ * Reset,irrespective of ql2xdontresethba. This is to allow a
+ * non-reset-owner to force a reset. Non-reset-owner sets
+ * the IDC_CTRL BIT0 to prevent Reset-owner from doing a Reset
+ * and then forces a Reset by setting device_state to
+ * NEED_RESET. */
+ if (dev_state == QLA8XXX_DEV_READY) {
+ /* If IDC_CTRL DONTRESETHBA_BIT0 is set don't do reset
+ * recovery */
+ if (qla8044_idc_dontreset(ha) == DONTRESET_BIT0) {
+ ql_dbg(ql_dbg_p3p, vha, 0xb13e,
+ "Reset recovery disabled\n");
+ rval = QLA_FUNCTION_FAILED;
+ goto exit_isp_reset;
+ }
+
+ ql_dbg(ql_dbg_p3p, vha, 0xb140,
+ "HW State: NEED RESET\n");
+ qla8044_wr_direct(vha, QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_NEED_RESET);
+ }
+
+ /* For ISP8044, Reset owner is NIC, iSCSI or FCOE based on priority
+ * and which drivers are present. Unlike ISP82XX, the function setting
+ * NEED_RESET, may not be the Reset owner. */
+ qla83xx_reset_ownership(vha);
+
+ qla8044_idc_unlock(ha);
+ rval = qla8044_device_state_handler(vha);
+ qla8044_idc_lock(ha);
+ qla8044_clear_rst_ready(vha);
+
+exit_isp_reset:
+ qla8044_idc_unlock(ha);
+ if (rval == QLA_SUCCESS) {
+ ha->flags.isp82xx_fw_hung = 0;
+ ha->flags.nic_core_reset_hdlr_active = 0;
+ rval = qla82xx_restart_isp(vha);
+ }
+
+ return rval;
+}
+
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
new file mode 100644
index 00000000000..2ab2eabab90
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -0,0 +1,551 @@
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c) 2003-2013 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+
+#ifndef __QLA_NX2_H
+#define __QLA_NX2_H
+
+#define QSNT_ACK_TOV 30
+#define INTENT_TO_RECOVER 0x01
+#define PROCEED_TO_RECOVER 0x02
+#define IDC_LOCK_RECOVERY_OWNER_MASK 0x3C
+#define IDC_LOCK_RECOVERY_STATE_MASK 0x3
+#define IDC_LOCK_RECOVERY_STATE_SHIFT_BITS 2
+
+#define QLA8044_DRV_LOCK_MSLEEP 200
+#define QLA8044_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLA8044_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+
+#define MD_MIU_TEST_AGT_WRDATA_LO 0x410000A0
+#define MD_MIU_TEST_AGT_WRDATA_HI 0x410000A4
+#define MD_MIU_TEST_AGT_WRDATA_ULO 0x410000B0
+#define MD_MIU_TEST_AGT_WRDATA_UHI 0x410000B4
+#define MD_MIU_TEST_AGT_RDDATA_LO 0x410000A8
+#define MD_MIU_TEST_AGT_RDDATA_HI 0x410000AC
+#define MD_MIU_TEST_AGT_RDDATA_ULO 0x410000B8
+#define MD_MIU_TEST_AGT_RDDATA_UHI 0x410000BC
+
+/* MIU_TEST_AGT_CTRL flags. work for SIU as well */
+#define MIU_TA_CTL_WRITE_ENABLE (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE)
+#define MIU_TA_CTL_WRITE_START (MIU_TA_CTL_WRITE | MIU_TA_CTL_ENABLE | \
+ MIU_TA_CTL_START)
+#define MIU_TA_CTL_START_ENABLE (MIU_TA_CTL_START | MIU_TA_CTL_ENABLE)
+
+/* Imbus address bit used to indicate a host address. This bit is
+ * eliminated by the pcie bar and bar select before presentation
+ * over pcie. */
+/* host memory via IMBUS */
+#define QLA8044_P2_ADDR_PCIE (0x0000000800000000ULL)
+#define QLA8044_P3_ADDR_PCIE (0x0000008000000000ULL)
+#define QLA8044_ADDR_PCIE_MAX (0x0000000FFFFFFFFFULL)
+#define QLA8044_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLA8044_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLA8044_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLA8044_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLA8044_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLA8044_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL)
+#define QLA8044_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL)
+#define QLA8044_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
+#define QLA8044_PCI_CRBSPACE ((unsigned long)0x06000000)
+#define QLA8044_PCI_DIRECT_CRB ((unsigned long)0x04400000)
+#define QLA8044_PCI_CAMQM ((unsigned long)0x04800000)
+#define QLA8044_PCI_CAMQM_MAX ((unsigned long)0x04ffffff)
+#define QLA8044_PCI_DDR_NET ((unsigned long)0x00000000)
+#define QLA8044_PCI_QDR_NET ((unsigned long)0x04000000)
+#define QLA8044_PCI_QDR_NET_MAX ((unsigned long)0x043fffff)
+
+/* PCI Windowing for DDR regions. */
+#define QLA8044_ADDR_IN_RANGE(addr, low, high) \
+ (((addr) <= (high)) && ((addr) >= (low)))
+
+/* Indirectly Mapped Registers */
+#define QLA8044_FLASH_SPI_STATUS 0x2808E010
+#define QLA8044_FLASH_SPI_CONTROL 0x2808E014
+#define QLA8044_FLASH_STATUS 0x42100004
+#define QLA8044_FLASH_CONTROL 0x42110004
+#define QLA8044_FLASH_ADDR 0x42110008
+#define QLA8044_FLASH_WRDATA 0x4211000C
+#define QLA8044_FLASH_RDDATA 0x42110018
+#define QLA8044_FLASH_DIRECT_WINDOW 0x42110030
+#define QLA8044_FLASH_DIRECT_DATA(DATA) (0x42150000 | (0x0000FFFF&DATA))
+
+/* Flash access regs */
+#define QLA8044_FLASH_LOCK 0x3850
+#define QLA8044_FLASH_UNLOCK 0x3854
+#define QLA8044_FLASH_LOCK_ID 0x3500
+
+/* Driver Lock regs */
+#define QLA8044_DRV_LOCK 0x3868
+#define QLA8044_DRV_UNLOCK 0x386C
+#define QLA8044_DRV_LOCK_ID 0x3504
+#define QLA8044_DRV_LOCKRECOVERY 0x379C
+
+/* IDC version */
+#define QLA8044_IDC_VER_MAJ_VALUE 0x1
+#define QLA8044_IDC_VER_MIN_VALUE 0x0
+
+/* IDC Registers : Driver Coexistence Defines */
+#define QLA8044_CRB_IDC_VER_MAJOR 0x3780
+#define QLA8044_CRB_IDC_VER_MINOR 0x3798
+#define QLA8044_IDC_DRV_AUDIT 0x3794
+#define QLA8044_SRE_SHIM_CONTROL 0x0D200284
+#define QLA8044_PORT0_RXB_PAUSE_THRS 0x0B2003A4
+#define QLA8044_PORT1_RXB_PAUSE_THRS 0x0B2013A4
+#define QLA8044_PORT0_RXB_TC_MAX_CELL 0x0B200388
+#define QLA8044_PORT1_RXB_TC_MAX_CELL 0x0B201388
+#define QLA8044_PORT0_RXB_TC_STATS 0x0B20039C
+#define QLA8044_PORT1_RXB_TC_STATS 0x0B20139C
+#define QLA8044_PORT2_IFB_PAUSE_THRS 0x0B200704
+#define QLA8044_PORT3_IFB_PAUSE_THRS 0x0B201704
+
+/* set value to pause threshold value */
+#define QLA8044_SET_PAUSE_VAL 0x0
+#define QLA8044_SET_TC_MAX_CELL_VAL 0x03FF03FF
+#define QLA8044_PEG_HALT_STATUS1 0x34A8
+#define QLA8044_PEG_HALT_STATUS2 0x34AC
+#define QLA8044_PEG_ALIVE_COUNTER 0x34B0 /* FW_HEARTBEAT */
+#define QLA8044_FW_CAPABILITIES 0x3528
+#define QLA8044_CRB_DRV_ACTIVE 0x3788 /* IDC_DRV_PRESENCE */
+#define QLA8044_CRB_DEV_STATE 0x3784 /* IDC_DEV_STATE */
+#define QLA8044_CRB_DRV_STATE 0x378C /* IDC_DRV_ACK */
+#define QLA8044_CRB_DRV_SCRATCH 0x3548
+#define QLA8044_CRB_DEV_PART_INFO1 0x37E0
+#define QLA8044_CRB_DEV_PART_INFO2 0x37E4
+#define QLA8044_FW_VER_MAJOR 0x3550
+#define QLA8044_FW_VER_MINOR 0x3554
+#define QLA8044_FW_VER_SUB 0x3558
+#define QLA8044_NPAR_STATE 0x359C
+#define QLA8044_FW_IMAGE_VALID 0x35FC
+#define QLA8044_CMDPEG_STATE 0x3650
+#define QLA8044_ASIC_TEMP 0x37B4
+#define QLA8044_FW_API 0x356C
+#define QLA8044_DRV_OP_MODE 0x3570
+#define QLA8044_CRB_WIN_BASE 0x3800
+#define QLA8044_CRB_WIN_FUNC(f) (QLA8044_CRB_WIN_BASE+((f)*4))
+#define QLA8044_SEM_LOCK_BASE 0x3840
+#define QLA8044_SEM_UNLOCK_BASE 0x3844
+#define QLA8044_SEM_LOCK_FUNC(f) (QLA8044_SEM_LOCK_BASE+((f)*8))
+#define QLA8044_SEM_UNLOCK_FUNC(f) (QLA8044_SEM_UNLOCK_BASE+((f)*8))
+#define QLA8044_LINK_STATE(f) (0x3698+((f) > 7 ? 4 : 0))
+#define QLA8044_LINK_SPEED(f) (0x36E0+(((f) >> 2) * 4))
+#define QLA8044_MAX_LINK_SPEED(f) (0x36F0+(((f) / 4) * 4))
+#define QLA8044_LINK_SPEED_FACTOR 10
+
+/* FLASH API Defines */
+#define QLA8044_FLASH_MAX_WAIT_USEC 100
+#define QLA8044_FLASH_LOCK_TIMEOUT 10000
+#define QLA8044_FLASH_SECTOR_SIZE 65536
+#define QLA8044_DRV_LOCK_TIMEOUT 2000
+#define QLA8044_FLASH_SECTOR_ERASE_CMD 0xdeadbeef
+#define QLA8044_FLASH_WRITE_CMD 0xdacdacda
+#define QLA8044_FLASH_BUFFER_WRITE_CMD 0xcadcadca
+#define QLA8044_FLASH_READ_RETRY_COUNT 2000
+#define QLA8044_FLASH_STATUS_READY 0x6
+#define QLA8044_FLASH_BUFFER_WRITE_MIN 2
+#define QLA8044_FLASH_BUFFER_WRITE_MAX 64
+#define QLA8044_FLASH_STATUS_REG_POLL_DELAY 1
+#define QLA8044_ERASE_MODE 1
+#define QLA8044_WRITE_MODE 2
+#define QLA8044_DWORD_WRITE_MODE 3
+#define QLA8044_GLOBAL_RESET 0x38CC
+#define QLA8044_WILDCARD 0x38F0
+#define QLA8044_INFORMANT 0x38FC
+#define QLA8044_HOST_MBX_CTRL 0x3038
+#define QLA8044_FW_MBX_CTRL 0x303C
+#define QLA8044_BOOTLOADER_ADDR 0x355C
+#define QLA8044_BOOTLOADER_SIZE 0x3560
+#define QLA8044_FW_IMAGE_ADDR 0x3564
+#define QLA8044_MBX_INTR_ENABLE 0x1000
+#define QLA8044_MBX_INTR_MASK 0x1200
+
+/* IDC Control Register bit defines */
+#define DONTRESET_BIT0 0x1
+#define GRACEFUL_RESET_BIT1 0x2
+
+/* ISP8044 PEG_HALT_STATUS1 bits */
+#define QLA8044_HALT_STATUS_INFORMATIONAL (0x1 << 29)
+#define QLA8044_HALT_STATUS_FW_RESET (0x2 << 29)
+#define QLA8044_HALT_STATUS_UNRECOVERABLE (0x4 << 29)
+
+/* Firmware image definitions */
+#define QLA8044_BOOTLOADER_FLASH_ADDR 0x10000
+#define QLA8044_BOOT_FROM_FLASH 0
+#define QLA8044_IDC_PARAM_ADDR 0x3e8020
+
+/* FLASH related definitions */
+#define QLA8044_OPTROM_BURST_SIZE 0x100
+#define QLA8044_MAX_OPTROM_BURST_DWORDS (QLA8044_OPTROM_BURST_SIZE / 4)
+#define QLA8044_MIN_OPTROM_BURST_DWORDS 2
+#define QLA8044_SECTOR_SIZE (64 * 1024)
+
+#define QLA8044_FLASH_SPI_CTL 0x4
+#define QLA8044_FLASH_FIRST_TEMP_VAL 0x00800000
+#define QLA8044_FLASH_SECOND_TEMP_VAL 0x00800001
+#define QLA8044_FLASH_FIRST_MS_PATTERN 0x43
+#define QLA8044_FLASH_SECOND_MS_PATTERN 0x7F
+#define QLA8044_FLASH_LAST_MS_PATTERN 0x7D
+#define QLA8044_FLASH_STATUS_WRITE_DEF_SIG 0xFD0100
+#define QLA8044_FLASH_SECOND_ERASE_MS_VAL 0x5
+#define QLA8044_FLASH_ERASE_SIG 0xFD0300
+#define QLA8044_FLASH_LAST_ERASE_MS_VAL 0x3D
+
+/* Reset template definitions */
+#define QLA8044_MAX_RESET_SEQ_ENTRIES 16
+#define QLA8044_RESTART_TEMPLATE_SIZE 0x2000
+#define QLA8044_RESET_TEMPLATE_ADDR 0x4F0000
+#define QLA8044_RESET_SEQ_VERSION 0x0101
+
+/* Reset template entry opcodes */
+#define OPCODE_NOP 0x0000
+#define OPCODE_WRITE_LIST 0x0001
+#define OPCODE_READ_WRITE_LIST 0x0002
+#define OPCODE_POLL_LIST 0x0004
+#define OPCODE_POLL_WRITE_LIST 0x0008
+#define OPCODE_READ_MODIFY_WRITE 0x0010
+#define OPCODE_SEQ_PAUSE 0x0020
+#define OPCODE_SEQ_END 0x0040
+#define OPCODE_TMPL_END 0x0080
+#define OPCODE_POLL_READ_LIST 0x0100
+
+/* Template Header */
+#define RESET_TMPLT_HDR_SIGNATURE 0xCAFE
+#define QLA8044_IDC_DRV_CTRL 0x3790
+#define AF_8044_NO_FW_DUMP 27 /* 0x08000000 */
+
+#define MINIDUMP_SIZE_36K 36864
+
+struct qla8044_reset_template_hdr {
+ uint16_t version;
+ uint16_t signature;
+ uint16_t size;
+ uint16_t entries;
+ uint16_t hdr_size;
+ uint16_t checksum;
+ uint16_t init_seq_offset;
+ uint16_t start_seq_offset;
+} __packed;
+
+/* Common Entry Header. */
+struct qla8044_reset_entry_hdr {
+ uint16_t cmd;
+ uint16_t size;
+ uint16_t count;
+ uint16_t delay;
+} __packed;
+
+/* Generic poll entry type. */
+struct qla8044_poll {
+ uint32_t test_mask;
+ uint32_t test_value;
+} __packed;
+
+/* Read modify write entry type. */
+struct qla8044_rmw {
+ uint32_t test_mask;
+ uint32_t xor_value;
+ uint32_t or_value;
+ uint8_t shl;
+ uint8_t shr;
+ uint8_t index_a;
+ uint8_t rsvd;
+} __packed;
+
+/* Generic Entry Item with 2 DWords. */
+struct qla8044_entry {
+ uint32_t arg1;
+ uint32_t arg2;
+} __packed;
+
+/* Generic Entry Item with 4 DWords.*/
+struct qla8044_quad_entry {
+ uint32_t dr_addr;
+ uint32_t dr_value;
+ uint32_t ar_addr;
+ uint32_t ar_value;
+} __packed;
+
+struct qla8044_reset_template {
+ int seq_index;
+ int seq_error;
+ int array_index;
+ uint32_t array[QLA8044_MAX_RESET_SEQ_ENTRIES];
+ uint8_t *buff;
+ uint8_t *stop_offset;
+ uint8_t *start_offset;
+ uint8_t *init_offset;
+ struct qla8044_reset_template_hdr *hdr;
+ uint8_t seq_end;
+ uint8_t template_end;
+};
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+struct qla8044_minidump_entry_hdr {
+ uint32_t entry_type;
+ uint32_t entry_size;
+ uint32_t entry_capture_size;
+ struct {
+ uint8_t entry_capture_mask;
+ uint8_t entry_code;
+ uint8_t driver_code;
+ uint8_t driver_flags;
+ } d_ctrl;
+} __packed;
+
+/* Read CRB entry header */
+struct qla8044_minidump_entry_crb {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr;
+ struct {
+ uint8_t addr_stride;
+ uint8_t state_index_a;
+ uint16_t poll_timeout;
+ } crb_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+
+ struct {
+ uint8_t opcode;
+ uint8_t state_index_v;
+ uint8_t shl;
+ uint8_t shr;
+ } crb_ctrl;
+
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t value_3;
+} __packed;
+
+struct qla8044_minidump_entry_cache {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t tag_reg_addr;
+ struct {
+ uint16_t tag_value_stride;
+ uint16_t init_tag_value;
+ } addr_ctrl;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t control_addr;
+ struct {
+ uint16_t write_value;
+ uint8_t poll_mask;
+ uint8_t poll_wait;
+ } cache_ctrl;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_1;
+ } read_ctrl;
+} __packed;
+
+/* Read OCM */
+struct qla8044_minidump_entry_rdocm {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd_0;
+ uint32_t rsvd_1;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_2;
+ uint32_t rsvd_3;
+ uint32_t read_addr;
+ uint32_t read_addr_stride;
+} __packed;
+
+/* Read Memory */
+struct qla8044_minidump_entry_rdmem {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+/* Read Memory: For Pex-DMA */
+struct qla8044_minidump_entry_rdmem_pex_dma {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Read ROM */
+struct qla8044_minidump_entry_rdrom {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t rsvd[6];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+} __packed;
+
+/* Mux entry */
+struct qla8044_minidump_entry_mux {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t rsvd_0;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t select_value;
+ uint32_t select_value_stride;
+ uint32_t read_addr;
+ uint32_t rsvd_1;
+} __packed;
+
+/* Queue entry */
+struct qla8044_minidump_entry_queue {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ struct {
+ uint16_t queue_id_stride;
+ uint16_t rsvd_0;
+ } q_strd;
+ uint32_t data_size;
+ uint32_t op_count;
+ uint32_t rsvd_1;
+ uint32_t rsvd_2;
+ uint32_t read_addr;
+ struct {
+ uint8_t read_addr_stride;
+ uint8_t read_addr_cnt;
+ uint16_t rsvd_3;
+ } rd_strd;
+} __packed;
+
+/* POLLRD Entry */
+struct qla8044_minidump_entry_pollrd {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr;
+ uint32_t read_addr;
+ uint32_t select_value;
+ uint16_t select_value_stride;
+ uint16_t op_count;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t data_size;
+ uint32_t rsvd_1;
+} __packed;
+
+/* RDMUX2 Entry */
+struct qla8044_minidump_entry_rdmux2 {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t select_addr_1;
+ uint32_t select_addr_2;
+ uint32_t select_value_1;
+ uint32_t select_value_2;
+ uint32_t op_count;
+ uint32_t select_value_mask;
+ uint32_t read_addr;
+ uint8_t select_value_stride;
+ uint8_t data_size;
+ uint8_t rsvd[2];
+} __packed;
+
+/* POLLRDMWR Entry */
+struct qla8044_minidump_entry_pollrdmwr {
+ struct qla8044_minidump_entry_hdr h;
+ uint32_t addr_1;
+ uint32_t addr_2;
+ uint32_t value_1;
+ uint32_t value_2;
+ uint32_t poll_wait;
+ uint32_t poll_mask;
+ uint32_t modify_mask;
+ uint32_t data_size;
+} __packed;
+
+/* IDC additional information */
+struct qla8044_idc_information {
+ uint32_t request_desc; /* IDC request descriptor */
+ uint32_t info1; /* IDC additional info */
+ uint32_t info2; /* IDC additional info */
+ uint32_t info3; /* IDC additional info */
+} __packed;
+
+enum qla_regs {
+ QLA8044_PEG_HALT_STATUS1_INDEX = 0,
+ QLA8044_PEG_HALT_STATUS2_INDEX,
+ QLA8044_PEG_ALIVE_COUNTER_INDEX,
+ QLA8044_CRB_DRV_ACTIVE_INDEX,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8044_CRB_DRV_STATE_INDEX,
+ QLA8044_CRB_DRV_SCRATCH_INDEX,
+ QLA8044_CRB_DEV_PART_INFO_INDEX,
+ QLA8044_CRB_DRV_IDC_VERSION_INDEX,
+ QLA8044_FW_VERSION_MAJOR_INDEX,
+ QLA8044_FW_VERSION_MINOR_INDEX,
+ QLA8044_FW_VERSION_SUB_INDEX,
+ QLA8044_CRB_CMDPEG_STATE_INDEX,
+ QLA8044_CRB_TEMP_STATE_INDEX,
+} __packed;
+
+#define CRB_REG_INDEX_MAX 14
+#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
+#define CRB_CMDPEG_CHECK_DELAY 500
+
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
+/* MiniDump Structures */
+
+/* Driver_code is for driver to write some info about the entry
+ * currently not used.
+ */
+#define QLA8044_SS_OCM_WNDREG_INDEX 3
+#define QLA8044_DBG_STATE_ARRAY_LEN 16
+#define QLA8044_DBG_CAP_SIZE_ARRAY_LEN 8
+#define QLA8044_DBG_RSVD_ARRAY_LEN 8
+#define QLA8044_DBG_OCM_WNDREG_ARRAY_LEN 16
+#define QLA8044_SS_PCI_INDEX 0
+
+struct qla8044_minidump_template_hdr {
+ uint32_t entry_type;
+ uint32_t first_entry_offset;
+ uint32_t size_of_template;
+ uint32_t capture_debug_level;
+ uint32_t num_of_entries;
+ uint32_t version;
+ uint32_t driver_timestamp;
+ uint32_t checksum;
+
+ uint32_t driver_capture_mask;
+ uint32_t driver_info_word2;
+ uint32_t driver_info_word3;
+ uint32_t driver_info_word4;
+
+ uint32_t saved_state_array[QLA8044_DBG_STATE_ARRAY_LEN];
+ uint32_t capture_size_array[QLA8044_DBG_CAP_SIZE_ARRAY_LEN];
+ uint32_t ocm_window_reg[QLA8044_DBG_OCM_WNDREG_ARRAY_LEN];
+};
+
+struct qla8044_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /*0-3: desc-cmd, 4-7: pci-func, 8-15: desc-cmd*/
+ uint8_t rsvd[24];
+} __packed;
+
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3e21e9fc9d9..9f01bbbf3a2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1247,7 +1247,7 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (qla2x00_vp_abort_isp(vha))
goto eh_host_reset_lock;
} else {
- if (IS_QLA82XX(vha->hw)) {
+ if (IS_P3P_TYPE(vha->hw)) {
if (!qla82xx_fcoe_ctx_reset(vha)) {
/* Ctx reset success */
ret = SUCCESS;
@@ -1303,6 +1303,10 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
struct fc_port *fcport;
struct qla_hw_data *ha = vha->hw;
+ if (IS_QLAFX00(ha)) {
+ return qlafx00_loop_reset(vha);
+ }
+
if (ql2xtargetreset == 1 && ha->flags.enable_target_reset) {
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->port_type != FCT_TARGET)
@@ -1311,14 +1315,12 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
ret = ha->isp_ops->target_reset(fcport, 0, 0);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802c,
- "Bus Reset failed: Target Reset=%d "
+ "Bus Reset failed: Reset=%d "
"d_id=%x.\n", ret, fcport->d_id.b24);
}
}
}
- if (IS_QLAFX00(ha))
- return QLA_SUCCESS;
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -1506,7 +1508,7 @@ qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
if (sdev->queue_depth > shost->cmd_per_lun) {
if (sdev->queue_depth < ha->cfg_lun_q_depth)
continue;
- ql_log(ql_log_warn, vp, 0x3031,
+ ql_dbg(ql_dbg_io, vp, 0x3031,
"%ld:%d:%d: Ramping down queue depth to %d",
vp->host_no, sdev->id, sdev->lun,
ha->cfg_lun_q_depth);
@@ -1911,7 +1913,7 @@ static struct isp_operations qla2300_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -1949,7 +1951,7 @@ static struct isp_operations qla24xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -1987,7 +1989,7 @@ static struct isp_operations qla25xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -2025,7 +2027,7 @@ static struct isp_operations qla81xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
- .iospace_config = qla2x00_iospace_config,
+ .iospace_config = qla2x00_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
@@ -2060,13 +2062,51 @@ static struct isp_operations qla82xx_isp_ops = {
.beacon_blink = NULL,
.read_optrom = qla82xx_read_optrom_data,
.write_optrom = qla82xx_write_optrom_data,
- .get_flash_version = qla24xx_get_flash_version,
+ .get_flash_version = qla82xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
.iospace_config = qla82xx_iospace_config,
.initialize_adapter = qla2x00_initialize_adapter,
};
+static struct isp_operations qla8044_isp_ops = {
+ .pci_config = qla82xx_pci_config,
+ .reset_chip = qla82xx_reset_chip,
+ .chip_diag = qla24xx_chip_diag,
+ .config_rings = qla82xx_config_rings,
+ .reset_adapter = qla24xx_reset_adapter,
+ .nvram_config = qla81xx_nvram_config,
+ .update_fw_options = qla24xx_update_fw_options,
+ .load_risc = qla82xx_load_risc,
+ .pci_info_str = qla24xx_pci_info_str,
+ .fw_version_str = qla24xx_fw_version_str,
+ .intr_handler = qla8044_intr_handler,
+ .enable_intrs = qla82xx_enable_intrs,
+ .disable_intrs = qla82xx_disable_intrs,
+ .abort_command = qla24xx_abort_command,
+ .target_reset = qla24xx_abort_target,
+ .lun_reset = qla24xx_lun_reset,
+ .fabric_login = qla24xx_login_fabric,
+ .fabric_logout = qla24xx_fabric_logout,
+ .calc_req_entries = NULL,
+ .build_iocbs = NULL,
+ .prep_ms_iocb = qla24xx_prep_ms_iocb,
+ .prep_ms_fdmi_iocb = qla24xx_prep_ms_fdmi_iocb,
+ .read_nvram = NULL,
+ .write_nvram = NULL,
+ .fw_dump = qla24xx_fw_dump,
+ .beacon_on = qla82xx_beacon_on,
+ .beacon_off = qla82xx_beacon_off,
+ .beacon_blink = NULL,
+ .read_optrom = qla82xx_read_optrom_data,
+ .write_optrom = qla8044_write_optrom_data,
+ .get_flash_version = qla82xx_get_flash_version,
+ .start_scsi = qla82xx_start_scsi,
+ .abort_isp = qla8044_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
+ .initialize_adapter = qla2x00_initialize_adapter,
+};
+
static struct isp_operations qla83xx_isp_ops = {
.pci_config = qla25xx_pci_config,
.reset_chip = qla24xx_reset_chip,
@@ -2237,6 +2277,14 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
/* Initialize 82XX ISP flags */
qla82xx_init_flags(ha);
break;
+ case PCI_DEVICE_ID_QLOGIC_ISP8044:
+ ha->device_type |= DT_ISP8044;
+ ha->device_type |= DT_ZIO_SUPPORTED;
+ ha->device_type |= DT_FWI2;
+ ha->fw_srisc_address = RISC_START_ADDRESS_2400;
+ /* Initialize 82XX ISP flags */
+ qla82xx_init_flags(ha);
+ break;
case PCI_DEVICE_ID_QLOGIC_ISP2031:
ha->device_type |= DT_ISP2031;
ha->device_type |= DT_ZIO_SUPPORTED;
@@ -2317,7 +2365,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
uint16_t req_length = 0, rsp_length = 0;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
-
bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
sht = &qla2xxx_driver_template;
if (pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2422 ||
@@ -2330,7 +2377,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8021 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP2031 ||
pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8031 ||
- pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001) {
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISPF001 ||
+ pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8044) {
bars = pci_select_bars(pdev, IORESOURCE_MEM);
mem_only = 1;
ql_dbg_pci(ql_dbg_init, pdev, 0x0007,
@@ -2484,6 +2532,21 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
+ } else if (IS_QLA8044(ha)) {
+ ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ req_length = REQUEST_ENTRY_CNT_82XX;
+ rsp_length = RESPONSE_ENTRY_CNT_82XX;
+ ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
+ ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
+ ha->gid_list_info_size = 8;
+ ha->optrom_size = OPTROM_SIZE_83XX;
+ ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
+ ha->isp_ops = &qla8044_isp_ops;
+ ha->flash_conf_off = FARX_ACCESS_FLASH_CONF;
+ ha->flash_data_off = FARX_ACCESS_FLASH_DATA;
+ ha->nvram_conf_off = FARX_ACCESS_NVRAM_CONF;
+ ha->nvram_data_off = FARX_ACCESS_NVRAM_DATA;
} else if (IS_QLA83XX(ha)) {
ha->portnum = PCI_FUNC(ha->pdev->devfn);
ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
@@ -2512,6 +2575,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->port_down_retry_count = 30; /* default value */
ha->mr.fw_hbt_cnt = QLAFX00_HEARTBEAT_INTERVAL;
ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+ ha->mr.fw_critemp_timer_tick = QLAFX00_CRITEMP_INTERVAL;
ha->mr.fw_hbt_en = 1;
}
@@ -2676,7 +2740,7 @@ que_init:
rsp->rsp_q_out = &ha->iobase->ispfx00.rsp_q_out;
}
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
req->req_q_out = &ha->iobase->isp82.req_q_out[0];
rsp->rsp_q_in = &ha->iobase->isp82.rsp_q_in[0];
rsp->rsp_q_out = &ha->iobase->isp82.rsp_q_out[0];
@@ -2709,6 +2773,14 @@ que_init:
qla82xx_idc_unlock(ha);
ql_log(ql_log_fatal, base_vha, 0x00d7,
"HW State: FAILED.\n");
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_fatal, base_vha, 0x0150,
+ "HW State: FAILED.\n");
}
ret = -ENODEV;
@@ -2804,6 +2876,13 @@ skip_dpc:
ha->isp_ops->enable_intrs(ha);
+ if (IS_QLAFX00(ha)) {
+ ret = qlafx00_fx_disc(base_vha,
+ &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
+ host->sg_tablesize = (ha->mr.extended_io_enabled) ?
+ QLA_SG_ALL : 128;
+ }
+
ret = scsi_add_host(host, &pdev->dev);
if (ret)
goto probe_failed;
@@ -2824,9 +2903,6 @@ skip_dpc:
if (IS_QLAFX00(ha)) {
ret = qlafx00_fx_disc(base_vha,
- &base_vha->hw->mr.fcport, FXDISC_GET_CONFIG_INFO);
-
- ret = qlafx00_fx_disc(base_vha,
&base_vha->hw->mr.fcport, FXDISC_GET_PORT_INFO);
/* Register system information */
@@ -2881,8 +2957,13 @@ probe_hw_failed:
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
}
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(base_vha);
+ qla8044_idc_unlock(ha);
+ }
iospace_config_failed:
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
if (!ha->nx_pcibase)
iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
@@ -2930,6 +3011,10 @@ qla2x00_shutdown(struct pci_dev *pdev)
vha = pci_get_drvdata(pdev);
ha = vha->hw;
+ /* Notify ISPFX00 firmware */
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(vha, 20);
+
/* Turn-off FCE trace */
if (ha->flags.fce_enabled) {
qla2x00_disable_fce_trace(vha, NULL, NULL);
@@ -2977,6 +3062,9 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->flags.host_shutting_down = 1;
set_bit(UNLOADING, &base_vha->dpc_flags);
+ if (IS_QLAFX00(ha))
+ qlafx00_driver_shutdown(base_vha, 20);
+
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -3061,6 +3149,11 @@ qla2x00_remove_one(struct pci_dev *pdev)
scsi_host_put(base_vha->host);
+ if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_drv_active(base_vha);
+ qla8044_idc_unlock(ha);
+ }
if (IS_QLA82XX(ha)) {
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
@@ -3210,14 +3303,8 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_disc, vha, 0x2067,
- "Port login retry "
- "%02x%02x%02x%02x%02x%02x%02x%02x, "
- "id = 0x%04x retry cnt=%d.\n",
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
- fcport->loop_id, fcport->login_retry);
+ "Port login retry %8phN, id = 0x%04x retry cnt=%d.\n",
+ fcport->port_name, fcport->loop_id, fcport->login_retry);
}
}
@@ -3290,7 +3377,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->srb_mempool)
goto fail_free_gid_list;
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
/* Allocate cache for CT6 Ctx. */
if (!ctx_cachep) {
ctx_cachep = kmem_cache_create("qla2xxx_ctx",
@@ -3324,7 +3411,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
"init_cb=%p gid_list=%p, srb_mempool=%p s_dma_pool=%p.\n",
ha->init_cb, ha->gid_list, ha->srb_mempool, ha->s_dma_pool);
- if (IS_QLA82XX(ha) || ql2xenabledif) {
+ if (IS_P3P_TYPE(ha) || ql2xenabledif) {
ha->dl_dma_pool = dma_pool_create(name, &ha->pdev->dev,
DSD_LIST_DMA_POOL_SIZE, 8, 0);
if (!ha->dl_dma_pool) {
@@ -3532,7 +3619,7 @@ fail:
* Frees fw dump stuff.
*
* Input:
-* ha = adapter block pointer.
+* ha = adapter block pointer
*/
static void
qla2x00_free_fw_dump(struct qla_hw_data *ha)
@@ -4699,17 +4786,33 @@ qla2x00_do_dpc(void *data)
qla2x00_do_work(base_vha);
- if (IS_QLA82XX(ha)) {
- if (test_and_clear_bit(ISP_UNRECOVERABLE,
- &base_vha->dpc_flags)) {
- qla82xx_idc_lock(ha);
- qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
- QLA8XXX_DEV_FAILED);
- qla82xx_idc_unlock(ha);
- ql_log(ql_log_info, base_vha, 0x4004,
- "HW State: FAILED.\n");
- qla82xx_device_state_handler(base_vha);
- continue;
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA8044(ha)) {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla8044_idc_lock(ha);
+ qla8044_wr_direct(base_vha,
+ QLA8044_CRB_DEV_STATE_INDEX,
+ QLA8XXX_DEV_FAILED);
+ qla8044_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x4004,
+ "HW State: FAILED.\n");
+ qla8044_device_state_handler(base_vha);
+ continue;
+ }
+
+ } else {
+ if (test_and_clear_bit(ISP_UNRECOVERABLE,
+ &base_vha->dpc_flags)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
+ QLA8XXX_DEV_FAILED);
+ qla82xx_idc_unlock(ha);
+ ql_log(ql_log_info, base_vha, 0x0151,
+ "HW State: FAILED.\n");
+ qla82xx_device_state_handler(base_vha);
+ continue;
+ }
}
if (test_and_clear_bit(FCOE_CTX_RESET_NEEDED,
@@ -4809,16 +4912,26 @@ qla2x00_do_dpc(void *data)
if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
"Quiescence mode scheduled.\n");
- if (IS_QLA82XX(ha)) {
- qla82xx_device_state_handler(base_vha);
+ if (IS_P3P_TYPE(ha)) {
+ if (IS_QLA82XX(ha))
+ qla82xx_device_state_handler(base_vha);
+ if (IS_QLA8044(ha))
+ qla8044_device_state_handler(base_vha);
clear_bit(ISP_QUIESCE_NEEDED,
&base_vha->dpc_flags);
if (!ha->flags.quiesce_owner) {
qla2x00_perform_loop_resync(base_vha);
-
- qla82xx_idc_lock(ha);
- qla82xx_clear_qsnt_ready(base_vha);
- qla82xx_idc_unlock(ha);
+ if (IS_QLA82XX(ha)) {
+ qla82xx_idc_lock(ha);
+ qla82xx_clear_qsnt_ready(
+ base_vha);
+ qla82xx_idc_unlock(ha);
+ } else if (IS_QLA8044(ha)) {
+ qla8044_idc_lock(ha);
+ qla8044_clear_qsnt_ready(
+ base_vha);
+ qla8044_idc_unlock(ha);
+ }
}
} else {
clear_bit(ISP_QUIESCE_NEEDED,
@@ -4992,10 +5105,13 @@ qla2x00_timer(scsi_qla_host_t *vha)
pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
/* Make sure qla82xx_watchdog is run only for physical port */
- if (!vha->vp_idx && IS_QLA82XX(ha)) {
+ if (!vha->vp_idx && IS_P3P_TYPE(ha)) {
if (test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags))
start_dpc++;
- qla82xx_watchdog(vha);
+ if (IS_QLA82XX(ha))
+ qla82xx_watchdog(vha);
+ else if (IS_QLA8044(ha))
+ qla8044_watchdog(vha);
}
if (!vha->vp_idx && IS_QLAFX00(ha))
@@ -5075,7 +5191,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
/* Check if beacon LED needs to be blinked for physical host only */
if (!vha->vp_idx && (ha->beacon_blink_led == 1)) {
/* There is no beacon_blink function for ISP82xx */
- if (!IS_QLA82XX(ha)) {
+ if (!IS_P3P_TYPE(ha)) {
set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags);
start_dpc++;
}
@@ -5519,6 +5635,7 @@ static struct pci_device_id qla2xxx_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8021) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8031) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISPF001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_ISP8044) },
{ 0 },
};
MODULE_DEVICE_TABLE(pci, qla2xxx_pci_tbl);
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 3bef6736d88..bd56cde795f 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -565,7 +565,7 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = FA_FLASH_LAYOUT_ADDR;
else if (IS_QLA81XX(ha))
*start = FA_FLASH_LAYOUT_ADDR_81;
- else if (IS_QLA82XX(ha)) {
+ else if (IS_P3P_TYPE(ha)) {
*start = FA_FLASH_LAYOUT_ADDR_82;
goto end;
} else if (IS_QLA83XX(ha)) {
@@ -719,7 +719,7 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
start = le32_to_cpu(region->start) >> 2;
ql_dbg(ql_dbg_init, vha, 0x0049,
"FLT[%02x]: start=0x%x "
- "end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
+ "end=0x%x size=0x%x.\n", le32_to_cpu(region->code) & 0xff,
start, le32_to_cpu(region->end) >> 2,
le32_to_cpu(region->size));
@@ -741,13 +741,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (IS_QLA8031(ha))
break;
ha->flt_region_vpd_nvram = start;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
break;
if (ha->flags.port0)
ha->flt_region_vpd = start;
break;
case FLT_REG_VPD_1:
- if (IS_QLA82XX(ha) || IS_QLA8031(ha))
+ if (IS_P3P_TYPE(ha) || IS_QLA8031(ha))
break;
if (!ha->flags.port0)
ha->flt_region_vpd = start;
@@ -789,9 +789,17 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOT_CODE_82XX:
ha->flt_region_boot = start;
break;
+ case FLT_REG_BOOT_CODE_8044:
+ if (IS_QLA8044(ha))
+ ha->flt_region_boot = start;
+ break;
case FLT_REG_FW_82XX:
ha->flt_region_fw = start;
break;
+ case FLT_REG_CNA_FW:
+ if (IS_CNA_CAPABLE(ha))
+ ha->flt_region_fw = start;
+ break;
case FLT_REG_GOLD_FW_82XX:
ha->flt_region_gold_fw = start;
break;
@@ -803,13 +811,13 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
- if (!IS_QLA8031(ha))
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
if (ha->flags.port0)
ha->flt_region_nvram = start;
break;
case FLT_REG_FCOE_NVRAM_1:
- if (!IS_QLA8031(ha))
+ if (!(IS_QLA8031(ha) || IS_QLA8044(ha)))
break;
if (!ha->flags.port0)
ha->flt_region_nvram = start;
@@ -883,7 +891,13 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
mid = le16_to_cpu(fdt->man_id);
fid = le16_to_cpu(fdt->id);
ha->fdt_wrt_disable = fdt->wrt_disable_bits;
- ha->fdt_erase_cmd = flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
+ ha->fdt_wrt_enable = fdt->wrt_enable_bits;
+ ha->fdt_wrt_sts_reg_cmd = fdt->wrt_sts_reg_cmd;
+ if (IS_QLA8044(ha))
+ ha->fdt_erase_cmd = fdt->erase_cmd;
+ else
+ ha->fdt_erase_cmd =
+ flash_conf_addr(ha, 0x0300 | fdt->erase_cmd);
ha->fdt_block_size = le32_to_cpu(fdt->block_size);
if (fdt->unprotect_sec_cmd) {
ha->fdt_unprotect_sec_cmd = flash_conf_addr(ha, 0x0300 |
@@ -895,7 +909,7 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
goto done;
no_flash_data:
loc = locations[0];
- if (IS_QLA82XX(ha)) {
+ if (IS_P3P_TYPE(ha)) {
ha->fdt_block_size = FLASH_BLK_SIZE_64K;
goto done;
}
@@ -946,7 +960,7 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
- if (!IS_QLA82XX(ha))
+ if (!(IS_P3P_TYPE(ha)))
return;
wptr = (uint32_t *)req->ring;
@@ -1008,6 +1022,9 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (ha->flags.nic_core_reset_hdlr_active)
return;
+ if (IS_QLA8044(ha))
+ return;
+
ha->isp_ops->read_optrom(vha, (uint8_t *)&hdr,
ha->flt_region_npiv_conf << 2, sizeof(struct qla_npiv_header));
if (hdr.version == __constant_cpu_to_le16(0xffff))
@@ -1302,7 +1319,7 @@ qla24xx_read_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
uint32_t *dwptr;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return buf;
/* Dword reads to flash. */
@@ -1360,7 +1377,7 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = QLA_SUCCESS;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return ret;
/* Enable flash write. */
@@ -1474,7 +1491,7 @@ qla2x00_beacon_blink(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return;
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1752,7 +1769,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
if (IS_QLA8031(ha) || IS_QLA81XX(ha))
@@ -1804,7 +1821,7 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return QLA_SUCCESS;
ha->beacon_blink_led = 0;
@@ -2822,6 +2839,121 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
}
int
+qla82xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
+{
+ int ret = QLA_SUCCESS;
+ uint32_t pcihdr, pcids;
+ uint32_t *dcode;
+ uint8_t *bcode;
+ uint8_t code_type, last_image;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!mbuf)
+ return QLA_FUNCTION_FAILED;
+
+ memset(ha->bios_revision, 0, sizeof(ha->bios_revision));
+ memset(ha->efi_revision, 0, sizeof(ha->efi_revision));
+ memset(ha->fcode_revision, 0, sizeof(ha->fcode_revision));
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+
+ dcode = mbuf;
+
+ /* Begin with first PCI expansion ROM header. */
+ pcihdr = ha->flt_region_boot << 2;
+ last_image = 1;
+ do {
+ /* Verify PCI expansion ROM header. */
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcihdr,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+ if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
+ /* No signature */
+ ql_log(ql_log_fatal, vha, 0x0154,
+ "No matching ROM signature.\n");
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Locate PCI data structure. */
+ pcids = pcihdr + ((bcode[0x19] << 8) | bcode[0x18]);
+
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, pcids,
+ 0x20 * 4);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
+ bcode[0x2] != 'I' || bcode[0x3] != 'R') {
+ /* Incorrect header. */
+ ql_log(ql_log_fatal, vha, 0x0155,
+ "PCI data struct not found pcir_adr=%x.\n", pcids);
+ ret = QLA_FUNCTION_FAILED;
+ break;
+ }
+
+ /* Read version */
+ code_type = bcode[0x14];
+ switch (code_type) {
+ case ROM_CODE_TYPE_BIOS:
+ /* Intel x86, PC-AT compatible. */
+ ha->bios_revision[0] = bcode[0x12];
+ ha->bios_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0156,
+ "Read BIOS %d.%d.\n",
+ ha->bios_revision[1], ha->bios_revision[0]);
+ break;
+ case ROM_CODE_TYPE_FCODE:
+ /* Open Firmware standard for PCI (FCode). */
+ ha->fcode_revision[0] = bcode[0x12];
+ ha->fcode_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0157,
+ "Read FCODE %d.%d.\n",
+ ha->fcode_revision[1], ha->fcode_revision[0]);
+ break;
+ case ROM_CODE_TYPE_EFI:
+ /* Extensible Firmware Interface (EFI). */
+ ha->efi_revision[0] = bcode[0x12];
+ ha->efi_revision[1] = bcode[0x13];
+ ql_dbg(ql_dbg_init, vha, 0x0158,
+ "Read EFI %d.%d.\n",
+ ha->efi_revision[1], ha->efi_revision[0]);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0159,
+ "Unrecognized code type %x at pcids %x.\n",
+ code_type, pcids);
+ break;
+ }
+
+ last_image = bcode[0x15] & BIT_7;
+
+ /* Locate next PCI expansion ROM. */
+ pcihdr += ((bcode[0x11] << 8) | bcode[0x10]) * 512;
+ } while (!last_image);
+
+ /* Read firmware image information. */
+ memset(ha->fw_revision, 0, sizeof(ha->fw_revision));
+ dcode = mbuf;
+ ha->isp_ops->read_optrom(vha, (uint8_t *)dcode, ha->flt_region_fw << 2,
+ 0x20);
+ bcode = mbuf + (pcihdr % 4);
+
+ /* Validate signature of PCI data structure. */
+ if (bcode[0x0] == 0x3 && bcode[0x1] == 0x0 &&
+ bcode[0x2] == 0x40 && bcode[0x3] == 0x40) {
+ ha->fw_revision[0] = bcode[0x4];
+ ha->fw_revision[1] = bcode[0x5];
+ ha->fw_revision[2] = bcode[0x6];
+ ql_dbg(ql_dbg_init, vha, 0x0153,
+ "Firmware revision %d.%d.%d\n",
+ ha->fw_revision[0], ha->fw_revision[1],
+ ha->fw_revision[2]);
+ }
+
+ return ret;
+}
+
+int
qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
{
int ret = QLA_SUCCESS;
@@ -2832,7 +2964,7 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
int i;
struct qla_hw_data *ha = vha->hw;
- if (IS_QLA82XX(ha))
+ if (IS_P3P_TYPE(ha))
return ret;
if (!mbuf)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 83a8f7a9ec7..ff12d4677cc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -430,13 +430,8 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
}
ql_dbg(ql_dbg_tgt, vha, 0xe047,
- "scsi(%ld): resetting (session %p from port "
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
- "mcmd %x, loop_id %d)\n", vha->host_no, sess,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
+ "scsi(%ld): resetting (session %p from port %8phC mcmd %x, "
+ "loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id);
lun = a->u.isp24.fcp_cmnd.lun;
@@ -467,15 +462,10 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
sess->expires = jiffies + dev_loss_tmo * HZ;
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
- "qla_target(%d): session for port %02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
+ "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
"deletion in %u secs (expires: %lu) immed: %d\n",
- sess->vha->vp_idx,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
- sess->loop_id, dev_loss_tmo, sess->expires, immediate);
+ sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
+ sess->expires, immediate);
if (immediate)
schedule_delayed_work(&tgt->sess_del_work, 0);
@@ -630,13 +620,9 @@ static struct qla_tgt_sess *qlt_create_sess(
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
if (!sess) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
- "qla_target(%u): session allocation failed, "
- "all commands from port %02x:%02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7]);
+ "qla_target(%u): session allocation failed, all commands "
+ "from port %8phC will be refused", vha->vp_idx,
+ fcport->port_name);
return NULL;
}
@@ -680,15 +666,11 @@ static struct qla_tgt_sess *qlt_create_sess(
spin_unlock_irqrestore(&ha->hardware_lock, flags);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
- "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
- "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
- " completion %ssupported) added\n",
- vha->vp_idx, local ? "local " : "", fcport->port_name[0],
- fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
- fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
- sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
- "" : "not ");
+ "qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
+ "s_id %x:%x:%x, confirmed completion %ssupported) added\n",
+ vha->vp_idx, local ? "local " : "", fcport->port_name,
+ fcport->loop_id, sess->s_id.b.domain, sess->s_id.b.area,
+ sess->s_id.b.al_pa, sess->conf_compl_supported ? "" : "not ");
return sess;
}
@@ -730,13 +712,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
- "qla_target(%u): %ssession for port %02x:"
- "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
- "reappeared\n", vha->vp_idx, sess->local ? "local "
- : "", sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
+ "qla_target(%u): %ssession for port %8phC "
+ "(loop ID %d) reappeared\n", vha->vp_idx,
+ sess->local ? "local " : "", sess->port_name,
sess->loop_id);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
@@ -749,13 +727,8 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
if (sess && sess->local) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
"qla_target(%u): local session for "
- "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
- "(loop ID %d) became global\n", vha->vp_idx,
- fcport->port_name[0], fcport->port_name[1],
- fcport->port_name[2], fcport->port_name[3],
- fcport->port_name[4], fcport->port_name[5],
- fcport->port_name[6], fcport->port_name[7],
- sess->loop_id);
+ "port %8phC (loop ID %d) became global\n", vha->vp_idx,
+ fcport->port_name, sess->loop_id);
sess->local = 0;
}
ha->tgt.tgt_ops->put_sess(sess);
@@ -2840,10 +2813,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
int res = 0;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
- "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
- " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
- iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
- iocb->u.isp24.status_subcode);
+ "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
+ vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
switch (iocb->u.isp24.status_subcode) {
case ELS_PLOGI:
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 6c66d22eb1b..a808e293dae 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.05.00.03-k"
+#define QLA2XXX_VERSION "8.06.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
-#define QLA_DRIVER_MINOR_VER 5
+#define QLA_DRIVER_MINOR_VER 6
#define QLA_DRIVER_PATCH_VER 0
#define QLA_DRIVER_BETA_VER 0
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index a318092e033..a6da313e253 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1474,15 +1474,11 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
if (sess->loop_id != loop_id || sess->s_id.b24 != s_id.b24)
- pr_info("Updating session %p from port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
- sess,
- sess->port_name[0], sess->port_name[1],
- sess->port_name[2], sess->port_name[3],
- sess->port_name[4], sess->port_name[5],
- sess->port_name[6], sess->port_name[7],
- sess->loop_id, loop_id,
- sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
- s_id.b.domain, s_id.b.area, s_id.b.al_pa);
+ pr_info("Updating session %p from port %8phC loop_id %d -> %d s_id %x:%x:%x -> %x:%x:%x\n",
+ sess, sess->port_name,
+ sess->loop_id, loop_id, sess->s_id.b.domain,
+ sess->s_id.b.area, sess->s_id.b.al_pa, s_id.b.domain,
+ s_id.b.area, s_id.b.al_pa);
if (sess->loop_id != loop_id) {
/*
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index d607eb8e24c..8196c2f7915 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -259,8 +259,8 @@ void qla4_83xx_rom_lock_recovery(struct scsi_qla_host *ha)
* Return: On success return QLA_SUCCESS
* On error return QLA_ERROR
**/
-static int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
- uint32_t *data, uint32_t count)
+int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha, uint64_t addr,
+ uint32_t *data, uint32_t count)
{
int i, j;
uint32_t agt_ctrl;
@@ -1473,9 +1473,9 @@ int qla4_83xx_isp_reset(struct scsi_qla_host *ha)
__func__));
}
- /* For ISP8324, Reset owner is NIC, iSCSI or FCOE based on priority
- * and which drivers are present. Unlike ISP8022, the function setting
- * NEED_RESET, may not be the Reset owner. */
+ /* For ISP8324 and ISP8042, Reset owner is NIC, iSCSI or FCOE based on
+ * priority and which drivers are present. Unlike ISP8022, the function
+ * setting NEED_RESET, may not be the Reset owner. */
if (qla4_83xx_can_perform_reset(ha))
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.h b/drivers/scsi/qla4xxx/ql4_83xx.h
index fab237fa32c..a0de6e25ea5 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.h
+++ b/drivers/scsi/qla4xxx/ql4_83xx.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -290,4 +290,38 @@ struct qla4_83xx_idc_information {
uint32_t info3; /* IDC additional info */
};
+#define QLA83XX_PEX_DMA_ENGINE_INDEX 8
+#define QLA83XX_PEX_DMA_BASE_ADDRESS 0x77320000
+#define QLA83XX_PEX_DMA_NUM_OFFSET 0x10000
+#define QLA83XX_PEX_DMA_CMD_ADDR_LOW 0x0
+#define QLA83XX_PEX_DMA_CMD_ADDR_HIGH 0x04
+#define QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL 0x08
+
+#define QLA83XX_PEX_DMA_READ_SIZE (16 * 1024)
+#define QLA83XX_PEX_DMA_MAX_WAIT (100 * 100) /* Max wait of 100 msecs */
+
+/* Read Memory: For Pex-DMA */
+struct qla4_83xx_minidump_entry_rdmem_pex_dma {
+ struct qla8xxx_minidump_entry_hdr h;
+ uint32_t desc_card_addr;
+ uint16_t dma_desc_cmd;
+ uint8_t rsvd[2];
+ uint32_t start_dma_cmd;
+ uint8_t rsvd2[12];
+ uint32_t read_addr;
+ uint32_t read_data_size;
+};
+
+struct qla4_83xx_pex_dma_descriptor {
+ struct {
+ uint32_t read_data_size; /* 0-23: size, 24-31: rsvd */
+ uint8_t rsvd[2];
+ uint16_t dma_desc_cmd;
+ } cmd;
+ uint64_t src_addr;
+ uint64_t dma_bus_addr; /* 0-3: desc-cmd, 4-7: pci-func,
+ * 8-15: desc-cmd */
+ uint8_t rsvd[24];
+} __packed;
+
#endif
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 19ee55a6226..463239c972b 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2011 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -83,7 +83,7 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
if (is_qla8022(ha) ||
- (is_qla8032(ha) &&
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
qla4_83xx_can_perform_reset(ha))) {
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
set_bit(AF_FW_RECOVERY, &ha->flags);
@@ -158,14 +158,12 @@ qla4xxx_fw_version_show(struct device *dev,
if (is_qla80XX(ha))
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
- ha->firmware_version[0],
- ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
else
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
- ha->firmware_version[0],
- ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
}
static ssize_t
@@ -181,8 +179,8 @@ qla4xxx_iscsi_version_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->iscsi_major,
- ha->iscsi_minor);
+ return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fw_info.iscsi_major,
+ ha->fw_info.iscsi_minor);
}
static ssize_t
@@ -191,8 +189,8 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr,
{
struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d.%02d\n",
- ha->bootload_major, ha->bootload_minor,
- ha->bootload_patch, ha->bootload_build);
+ ha->fw_info.bootload_major, ha->fw_info.bootload_minor,
+ ha->fw_info.bootload_patch, ha->fw_info.bootload_build);
}
static ssize_t
@@ -259,6 +257,63 @@ qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name);
}
+static ssize_t
+qla4xxx_fw_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s %s\n", ha->fw_info.fw_build_date,
+ ha->fw_info.fw_build_time);
+}
+
+static ssize_t
+qla4xxx_fw_build_user_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.fw_build_user);
+}
+
+static ssize_t
+qla4xxx_fw_ext_timestamp_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ return snprintf(buf, PAGE_SIZE, "%s\n", ha->fw_info.extended_timestamp);
+}
+
+static ssize_t
+qla4xxx_fw_load_src_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ char *load_src = NULL;
+
+ switch (ha->fw_info.fw_load_source) {
+ case 1:
+ load_src = "Flash Primary";
+ break;
+ case 2:
+ load_src = "Flash Secondary";
+ break;
+ case 3:
+ load_src = "Host Download";
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s\n", load_src);
+}
+
+static ssize_t
+qla4xxx_fw_uptime_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev));
+ qla4xxx_about_firmware(ha);
+ return snprintf(buf, PAGE_SIZE, "%u.%u secs\n", ha->fw_uptime_secs,
+ ha->fw_uptime_msecs);
+}
+
static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL);
static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL);
static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL);
@@ -269,6 +324,12 @@ static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL);
static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL);
static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL);
static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL);
+static DEVICE_ATTR(fw_timestamp, S_IRUGO, qla4xxx_fw_timestamp_show, NULL);
+static DEVICE_ATTR(fw_build_user, S_IRUGO, qla4xxx_fw_build_user_show, NULL);
+static DEVICE_ATTR(fw_ext_timestamp, S_IRUGO, qla4xxx_fw_ext_timestamp_show,
+ NULL);
+static DEVICE_ATTR(fw_load_src, S_IRUGO, qla4xxx_fw_load_src_show, NULL);
+static DEVICE_ATTR(fw_uptime, S_IRUGO, qla4xxx_fw_uptime_show, NULL);
struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_fw_version,
@@ -281,5 +342,10 @@ struct device_attribute *qla4xxx_host_attrs[] = {
&dev_attr_phy_port_num,
&dev_attr_iscsi_func_cnt,
&dev_attr_hba_model,
+ &dev_attr_fw_timestamp,
+ &dev_attr_fw_build_user,
+ &dev_attr_fw_ext_timestamp,
+ &dev_attr_fw_load_src,
+ &dev_attr_fw_uptime,
NULL,
};
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
index 8acdc582ff6..cf8fdf1d125 100644
--- a/drivers/scsi/qla4xxx/ql4_bsg.c
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2011 QLogic Corporation
+ * Copyright (c) 2011-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 77b7c594010..5649e9ef59a 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -141,21 +141,22 @@ void qla4_8xxx_dump_peg_reg(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
ql4_printk(KERN_INFO, ha,
- "scsi(%ld): %s, ISP8022 Dumping hw/fw registers:\n"
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n"
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,\n"
" PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,\n"
- " PEG_NET_4_PC: 0x%x\n", ha->host_no,
- __func__, halt_status1, halt_status2,
+ " PEG_NET_4_PC: 0x%x\n", ha->host_no, __func__,
+ ha->pdev->device, halt_status1, halt_status2,
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla4_82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c));
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha,
- "scsi(%ld): %s, ISP8324 Dumping hw/fw registers:\n"
+ "scsi(%ld): %s, ISP%04x Dumping hw/fw registers:\n"
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,\n",
- ha->host_no, __func__, halt_status1, halt_status2);
+ ha->host_no, __func__, ha->pdev->device,
+ halt_status1, halt_status2);
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index ddf16a86bbf..41327d46ecf 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -64,6 +64,10 @@
#define PCI_DEVICE_ID_QLOGIC_ISP8324 0x8032
#endif
+#ifndef PCI_DEVICE_ID_QLOGIC_ISP8042
+#define PCI_DEVICE_ID_QLOGIC_ISP8042 0x8042
+#endif
+
#define ISP4XXX_PCI_FN_1 0x1
#define ISP4XXX_PCI_FN_2 0x3
@@ -201,6 +205,7 @@
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
+#define IDC_EXTEND_TOV 8
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -335,6 +340,7 @@ struct ql4_tuple_ddb {
#define DF_BOOT_TGT 1 /* Boot target entry */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
+#define DF_DISABLE_RELOGIN 4 /* Disable relogin to device */
enum qla4_work_type {
QLA4_EVENT_AEN,
@@ -557,6 +563,7 @@ struct scsi_qla_host {
#define DPC_HA_UNRECOVERABLE 21 /* 0x00080000 ISP-82xx only*/
#define DPC_HA_NEED_QUIESCENT 22 /* 0x00100000 ISP-82xx only*/
#define DPC_POST_IDC_ACK 23 /* 0x00200000 */
+#define DPC_RESTORE_ACB 24 /* 0x01000000 */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
@@ -734,12 +741,9 @@ struct scsi_qla_host {
struct iscsi_iface *iface_ipv6_1;
/* --- From About Firmware --- */
- uint16_t iscsi_major;
- uint16_t iscsi_minor;
- uint16_t bootload_major;
- uint16_t bootload_minor;
- uint16_t bootload_patch;
- uint16_t bootload_build;
+ struct about_fw_info fw_info;
+ uint32_t fw_uptime_secs; /* seconds elapsed since fw bootup */
+ uint32_t fw_uptime_msecs; /* milliseconds beyond elapsed seconds */
uint16_t def_timeout; /* Default login timeout */
uint32_t flash_state;
@@ -780,9 +784,11 @@ struct scsi_qla_host {
uint32_t *reg_tbl;
struct qla4_83xx_reset_template reset_tmplt;
struct device_reg_83xx __iomem *qla4_83xx_reg; /* Base I/O address
- for ISP8324 */
+ for ISP8324 and
+ and ISP8042 */
uint32_t pf_bit;
struct qla4_83xx_idc_information idc_info;
+ struct addr_ctrl_blk *saved_acb;
};
struct ql4_task_data {
@@ -850,9 +856,14 @@ static inline int is_qla8032(struct scsi_qla_host *ha)
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8324;
}
+static inline int is_qla8042(struct scsi_qla_host *ha)
+{
+ return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8042;
+}
+
static inline int is_qla80XX(struct scsi_qla_host *ha)
{
- return is_qla8022(ha) || is_qla8032(ha);
+ return is_qla8022(ha) || is_qla8032(ha) || is_qla8042(ha);
}
static inline int is_aer_supported(struct scsi_qla_host *ha)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index c7b8892b5a8..51d1a70f8b4 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -458,6 +458,7 @@ struct qla_flt_region {
#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
#define MBOX_CMD_IDC_ACK 0x0101
+#define MBOX_CMD_IDC_TIME_EXTEND 0x0102
#define MBOX_CMD_PORT_RESET 0x0120
#define MBOX_CMD_SET_PORT_CONFIG 0x0122
@@ -502,6 +503,7 @@ struct qla_flt_region {
#define MBOX_ASTS_SYSTEM_WARNING_EVENT 0x8036
#define MBOX_ASTS_IDC_COMPLETE 0x8100
#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
+#define MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION 0x8102
#define MBOX_ASTS_DCBX_CONF_CHANGE 0x8110
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -512,6 +514,10 @@ struct qla_flt_region {
#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
+/* ACB Configuration Defines */
+#define ACB_CONFIG_DISABLE 0x00
+#define ACB_CONFIG_SET 0x01
+
/* ACB State Defines */
#define ACB_STATE_UNCONFIGURED 0x00
#define ACB_STATE_INVALID 0x01
@@ -955,7 +961,7 @@ struct about_fw_info {
uint16_t bootload_minor; /* 46 - 47 */
uint16_t bootload_patch; /* 48 - 49 */
uint16_t bootload_build; /* 4A - 4B */
- uint8_t reserved2[180]; /* 4C - FF */
+ uint8_t extended_timestamp[180];/* 4C - FF */
};
struct crash_record {
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 4a428009f69..e6f2a2669db 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -266,6 +266,14 @@ int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options,
dma_addr_t dma_addr);
int qla4xxx_get_uni_chap_at_index(struct scsi_qla_host *ha, char *username,
char *password, uint16_t chap_index);
+int qla4xxx_disable_acb(struct scsi_qla_host *ha);
+int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd,
+ uint32_t *mbox_sts, dma_addr_t acb_dma);
+int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma,
+ uint32_t acb_type, uint32_t len);
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config);
+int qla4_83xx_ms_mem_write_128b(struct scsi_qla_host *ha,
+ uint64_t addr, uint32_t *data, uint32_t count);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 8fc8548ba4b..7456eeb2e58 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -107,7 +107,7 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha)
(unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_in);
writel(0,
(unsigned long __iomem *)&ha->qla4_82xx_reg->rsp_q_out);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0,
(unsigned long __iomem *)&ha->qla4_83xx_reg->req_q_in);
writel(0,
@@ -940,7 +940,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
* while switching from polling to interrupt mode. IOCB interrupts are
* enabled via isp_ops->enable_intrs.
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
qla4_83xx_enable_mbox_intrs(ha);
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_inline.h b/drivers/scsi/qla4xxx/ql4_inline.h
index 6f4decd44c6..8503ad643bd 100644
--- a/drivers/scsi/qla4xxx/ql4_inline.h
+++ b/drivers/scsi/qla4xxx/ql4_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index fad71ed067e..e5697ab144d 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 482287f4005..7dff09f09b7 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -588,7 +588,7 @@ static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
{
int rval = 1;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
(ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -621,7 +621,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
__le32 __iomem *mailbox_out;
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
else if (is_qla8022(ha))
mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
@@ -665,7 +665,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
qla4xxx_dump_registers(ha);
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
ha->host_no, __func__));
} else {
@@ -744,17 +745,23 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
* mbox_sts[3] = new ACB state */
if ((mbox_sts[3] == ACB_STATE_VALID) &&
((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
- (mbox_sts[2] == ACB_STATE_ACQUIRING)))
+ (mbox_sts[2] == ACB_STATE_ACQUIRING))) {
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
- else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID)) {
+ } else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
+ (mbox_sts[2] == ACB_STATE_VALID)) {
if (is_qla80XX(ha))
set_bit(DPC_RESET_HA_FW_CONTEXT,
&ha->dpc_flags);
else
set_bit(DPC_RESET_HA, &ha->dpc_flags);
- } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ } else if (mbox_sts[3] == ACB_STATE_DISABLING) {
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB in disabling state\n",
+ ha->host_no, __func__);
+ } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) {
complete(&ha->disable_acb_comp);
+ ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ACB state unconfigured\n",
+ ha->host_no, __func__);
+ }
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
@@ -836,7 +843,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
{
uint32_t opcode;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
ha->host_no, mbox_sts[0],
@@ -858,7 +865,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
}
case MBOX_ASTS_IDC_COMPLETE:
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
ha->host_no, mbox_sts[0],
@@ -868,10 +875,15 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
- if (qla4_83xx_loopback_in_progress(ha))
+ if (qla4_83xx_loopback_in_progress(ha)) {
set_bit(AF_LOOPBACK, &ha->flags);
- else
+ } else {
clear_bit(AF_LOOPBACK, &ha->flags);
+ if (ha->saved_acb)
+ set_bit(DPC_RESTORE_ACB,
+ &ha->dpc_flags);
+ }
+ qla4xxx_wake_dpc(ha);
}
break;
@@ -886,6 +898,17 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
ha->host_no, mbox_sts[0]));
break;
+ case MBOX_ASTS_IDC_TIME_EXTEND_NOTIFICATION:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x mbox_sts[5]=%08x\n",
+ ha->host_no, mbox_sts[0], mbox_sts[1],
+ mbox_sts[2], mbox_sts[3], mbox_sts[4],
+ mbox_sts[5]));
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: AEN %04x Received IDC Extend Timeout notification\n",
+ ha->host_no, mbox_sts[0]));
+ break;
+
case MBOX_ASTS_INITIALIZATION_FAILED:
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi%ld: AEN %04x, mbox_sts[3]=%08x\n",
@@ -1297,7 +1320,7 @@ qla4_8xxx_default_intr_handler(int irq, void *dev_id)
uint32_t intr_status;
uint8_t reqs_count = 0;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_mailbox_intr_handler(irq, dev_id);
} else {
spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -1334,7 +1357,7 @@ qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
uint32_t ival = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
if (ival == 0) {
ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
@@ -1425,10 +1448,10 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
goto try_intx;
if (ql4xenablemsix == 2) {
- /* Note: MSI Interrupts not supported for ISP8324 */
- if (is_qla8032(ha)) {
- ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
- __func__);
+ /* Note: MSI Interrupts not supported for ISP8324 and ISP8042 */
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP%04x, Falling back-to INTx mode\n",
+ __func__, ha->pdev->device);
goto try_intx;
}
goto try_msi;
@@ -1444,9 +1467,9 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
"MSI-X: Enabled (0x%X).\n", ha->revision_id));
goto irq_attached;
} else {
- if (is_qla8032(ha)) {
- ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
- __func__, ret);
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ ql4_printk(KERN_INFO, ha, "%s: ISP%04x: MSI-X: Falling back-to INTx mode. ret = %d\n",
+ __func__, ha->pdev->device, ret);
goto try_intx;
}
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index a501beab3ff..62d4208af21 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -1,10 +1,11 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
+#include <linux/ctype.h>
#include "ql4_def.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
@@ -52,7 +53,7 @@ static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
{
int rval = 1;
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
rval = 0;
@@ -223,7 +224,7 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
qla4_82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
CRB_NIU_XG_PAUSE_CTL_P0 |
CRB_NIU_XG_PAUSE_CTL_P1);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha, " %s: disabling pause transmit on port 0 & 1.\n",
__func__);
qla4_83xx_disable_pause(ha);
@@ -1270,16 +1271,28 @@ int qla4xxx_about_firmware(struct scsi_qla_host *ha)
}
/* Save version information. */
- ha->firmware_version[0] = le16_to_cpu(about_fw->fw_major);
- ha->firmware_version[1] = le16_to_cpu(about_fw->fw_minor);
- ha->patch_number = le16_to_cpu(about_fw->fw_patch);
- ha->build_number = le16_to_cpu(about_fw->fw_build);
- ha->iscsi_major = le16_to_cpu(about_fw->iscsi_major);
- ha->iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
- ha->bootload_major = le16_to_cpu(about_fw->bootload_major);
- ha->bootload_minor = le16_to_cpu(about_fw->bootload_minor);
- ha->bootload_patch = le16_to_cpu(about_fw->bootload_patch);
- ha->bootload_build = le16_to_cpu(about_fw->bootload_build);
+ ha->fw_info.fw_major = le16_to_cpu(about_fw->fw_major);
+ ha->fw_info.fw_minor = le16_to_cpu(about_fw->fw_minor);
+ ha->fw_info.fw_patch = le16_to_cpu(about_fw->fw_patch);
+ ha->fw_info.fw_build = le16_to_cpu(about_fw->fw_build);
+ memcpy(ha->fw_info.fw_build_date, about_fw->fw_build_date,
+ sizeof(about_fw->fw_build_date));
+ memcpy(ha->fw_info.fw_build_time, about_fw->fw_build_time,
+ sizeof(about_fw->fw_build_time));
+ strcpy((char *)ha->fw_info.fw_build_user,
+ skip_spaces((char *)about_fw->fw_build_user));
+ ha->fw_info.fw_load_source = le16_to_cpu(about_fw->fw_load_source);
+ ha->fw_info.iscsi_major = le16_to_cpu(about_fw->iscsi_major);
+ ha->fw_info.iscsi_minor = le16_to_cpu(about_fw->iscsi_minor);
+ ha->fw_info.bootload_major = le16_to_cpu(about_fw->bootload_major);
+ ha->fw_info.bootload_minor = le16_to_cpu(about_fw->bootload_minor);
+ ha->fw_info.bootload_patch = le16_to_cpu(about_fw->bootload_patch);
+ ha->fw_info.bootload_build = le16_to_cpu(about_fw->bootload_build);
+ strcpy((char *)ha->fw_info.extended_timestamp,
+ skip_spaces((char *)about_fw->extended_timestamp));
+
+ ha->fw_uptime_secs = le32_to_cpu(mbox_sts[5]);
+ ha->fw_uptime_msecs = le32_to_cpu(mbox_sts[6]);
status = QLA_SUCCESS;
exit_about_fw:
@@ -1723,6 +1736,45 @@ int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha,
return status;
}
+/**
+ * qla4_84xx_extend_idc_tmo - Extend IDC Timeout.
+ * @ha: Pointer to host adapter structure.
+ * @ext_tmo: idc timeout value
+ *
+ * Requests firmware to extend the idc timeout value.
+ **/
+static int qla4_84xx_extend_idc_tmo(struct scsi_qla_host *ha, uint32_t ext_tmo)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ int status;
+
+ memset(&mbox_cmd, 0, sizeof(mbox_cmd));
+ memset(&mbox_sts, 0, sizeof(mbox_sts));
+ ext_tmo &= 0xf;
+
+ mbox_cmd[0] = MBOX_CMD_IDC_TIME_EXTEND;
+ mbox_cmd[1] = ((ha->idc_info.request_desc & 0xfffff0ff) |
+ (ext_tmo << 8)); /* new timeout */
+ mbox_cmd[2] = ha->idc_info.info1;
+ mbox_cmd[3] = ha->idc_info.info2;
+ mbox_cmd[4] = ha->idc_info.info3;
+
+ status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, MBOX_REG_COUNT,
+ mbox_cmd, mbox_sts);
+ if (status != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi%ld: %s: failed status %04X\n",
+ ha->host_no, __func__, mbox_sts[0]));
+ return QLA_ERROR;
+ } else {
+ ql4_printk(KERN_INFO, ha, "%s: IDC timeout extended by %d secs\n",
+ __func__, ext_tmo);
+ }
+
+ return QLA_SUCCESS;
+}
+
int qla4xxx_disable_acb(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
@@ -1739,6 +1791,23 @@ int qla4xxx_disable_acb(struct scsi_qla_host *ha)
DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB "
"failed w/ status %04X %04X %04X", __func__,
mbox_sts[0], mbox_sts[1], mbox_sts[2]));
+ } else {
+ if (is_qla8042(ha) &&
+ (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE)) {
+ /*
+ * Disable ACB mailbox command takes time to complete
+ * based on the total number of targets connected.
+ * For 512 targets, it took approximately 5 secs to
+ * complete. Setting the timeout value to 8, with the 3
+ * secs buffer.
+ */
+ qla4_84xx_extend_idc_tmo(ha, IDC_EXTEND_TOV);
+ if (!wait_for_completion_timeout(&ha->disable_acb_comp,
+ IDC_EXTEND_TOV * HZ)) {
+ ql4_printk(KERN_WARNING, ha, "%s: Disable ACB Completion not received\n",
+ __func__);
+ }
+ }
}
return status;
}
@@ -2145,8 +2214,80 @@ int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha)
ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", __func__,
mbox_sts[0]);
else
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n",
- __func__));
+ ql4_printk(KERN_INFO, ha, "%s: IDC ACK posted\n", __func__);
return status;
}
+
+int qla4_84xx_config_acb(struct scsi_qla_host *ha, int acb_config)
+{
+ uint32_t mbox_cmd[MBOX_REG_COUNT];
+ uint32_t mbox_sts[MBOX_REG_COUNT];
+ struct addr_ctrl_blk *acb = NULL;
+ uint32_t acb_len = sizeof(struct addr_ctrl_blk);
+ int rval = QLA_SUCCESS;
+ dma_addr_t acb_dma;
+
+ acb = dma_alloc_coherent(&ha->pdev->dev,
+ sizeof(struct addr_ctrl_blk),
+ &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", __func__);
+ rval = QLA_ERROR;
+ goto exit_config_acb;
+ }
+ memset(acb, 0, acb_len);
+
+ switch (acb_config) {
+ case ACB_CONFIG_DISABLE:
+ rval = qla4xxx_get_acb(ha, acb_dma, 0, acb_len);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ if (!ha->saved_acb)
+ ha->saved_acb = kzalloc(acb_len, GFP_KERNEL);
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_config_acb;
+ }
+ memcpy(ha->saved_acb, acb, acb_len);
+ break;
+ case ACB_CONFIG_SET:
+
+ if (!ha->saved_acb) {
+ ql4_printk(KERN_ERR, ha, "%s: Can't set ACB, Saved ACB not available\n",
+ __func__);
+ rval = QLA_ERROR;
+ goto exit_free_acb;
+ }
+
+ memcpy(acb, ha->saved_acb, acb_len);
+ kfree(ha->saved_acb);
+ ha->saved_acb = NULL;
+
+ rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma);
+ if (rval != QLA_SUCCESS)
+ goto exit_free_acb;
+
+ break;
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: Invalid ACB Configuration\n",
+ __func__);
+ }
+
+exit_free_acb:
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), acb,
+ acb_dma);
+exit_config_acb:
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s %s\n", __func__,
+ rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED"));
+ return rval;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 325db1f2c09..3bf418fbd43 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.h b/drivers/scsi/qla4xxx/ql4_nvram.h
index dba0514d1c7..e97d79ff16f 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.h
+++ b/drivers/scsi/qla4xxx/ql4_nvram.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index eaf00c162eb..d001202d356 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -1514,11 +1514,11 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_active |= (1 << ha->func_num);
else
drv_active |= (1 << (ha->func_num * 4));
@@ -1536,11 +1536,11 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_active &= ~(1 << (ha->func_num));
else
drv_active &= ~(1 << (ha->func_num * 4));
@@ -1559,11 +1559,11 @@ inline int qla4_8xxx_need_reset(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
rval = drv_state & (1 << ha->func_num);
else
rval = drv_state & (1 << (ha->func_num * 4));
@@ -1581,11 +1581,11 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_state |= (1 << ha->func_num);
else
drv_state |= (1 << (ha->func_num * 4));
@@ -1602,11 +1602,11 @@ void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
drv_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
drv_state &= ~(1 << ha->func_num);
else
drv_state &= ~(1 << (ha->func_num * 4));
@@ -1624,11 +1624,11 @@ qla4_8xxx_set_qsnt_ready(struct scsi_qla_host *ha)
qsnt_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_STATE);
/*
- * For ISP8324, drv_active register has 1 bit per function,
+ * For ISP8324 and ISP8042, drv_active register has 1 bit per function,
* shift 1 by func_num to set a bit for the function.
* For ISP8022, drv_active has 4 bits per function.
*/
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
qsnt_state |= (1 << ha->func_num);
else
qsnt_state |= (2 << (ha->func_num * 4));
@@ -1737,6 +1737,208 @@ static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
*d_ptr = data_ptr;
}
+static int qla4_83xx_check_dma_engine_state(struct scsi_qla_host *ha)
+{
+ int rval = QLA_SUCCESS;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ /* Read the pex-dma's command-status-and-control register. */
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+
+ if (rval)
+ return QLA_ERROR;
+
+ /* Check if requested pex-dma engine is available. */
+ if (cmd_sts_and_cntrl & BIT_31)
+ return QLA_SUCCESS;
+ else
+ return QLA_ERROR;
+}
+
+static int qla4_83xx_start_pex_dma(struct scsi_qla_host *ha,
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr)
+{
+ int rval = QLA_SUCCESS, wait = 0;
+ uint32_t dma_eng_num = 0, cmd_sts_and_cntrl = 0;
+ uint64_t dma_base_addr = 0;
+ struct qla4_8xxx_minidump_template_hdr *tmplt_hdr = NULL;
+
+ tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
+ ha->fw_dump_tmplt_hdr;
+ dma_eng_num =
+ tmplt_hdr->saved_state_array[QLA83XX_PEX_DMA_ENGINE_INDEX];
+ dma_base_addr = QLA83XX_PEX_DMA_BASE_ADDRESS +
+ (dma_eng_num * QLA83XX_PEX_DMA_NUM_OFFSET);
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_LOW,
+ m_hdr->desc_card_addr);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_ADDR_HIGH, 0);
+ if (rval)
+ goto error_exit;
+
+ rval = ha->isp_ops->wr_reg_indirect(ha,
+ dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL,
+ m_hdr->start_dma_cmd);
+ if (rval)
+ goto error_exit;
+
+ /* Wait for dma operation to complete. */
+ for (wait = 0; wait < QLA83XX_PEX_DMA_MAX_WAIT; wait++) {
+ rval = ha->isp_ops->rd_reg_indirect(ha,
+ (dma_base_addr + QLA83XX_PEX_DMA_CMD_STS_AND_CNTRL),
+ &cmd_sts_and_cntrl);
+ if (rval)
+ goto error_exit;
+
+ if ((cmd_sts_and_cntrl & BIT_1) == 0)
+ break;
+ else
+ udelay(10);
+ }
+
+ /* Wait a max of 100 ms, otherwise fallback to rdmem entry read */
+ if (wait >= QLA83XX_PEX_DMA_MAX_WAIT) {
+ rval = QLA_ERROR;
+ goto error_exit;
+ }
+
+error_exit:
+ return rval;
+}
+
+static int qla4_83xx_minidump_pex_dma_read(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ int rval = QLA_SUCCESS;
+ struct qla4_83xx_minidump_entry_rdmem_pex_dma *m_hdr = NULL;
+ uint32_t size, read_size;
+ uint8_t *data_ptr = (uint8_t *)*d_ptr;
+ void *rdmem_buffer = NULL;
+ dma_addr_t rdmem_dma;
+ struct qla4_83xx_pex_dma_descriptor dma_desc;
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
+
+ rval = qla4_83xx_check_dma_engine_state(ha);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DMA engine not available. Fallback to rdmem-read.\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ m_hdr = (struct qla4_83xx_minidump_entry_rdmem_pex_dma *)entry_hdr;
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ &rdmem_dma, GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+
+ /* Prepare pex-dma descriptor to be written to MS memory. */
+ /* dma-desc-cmd layout:
+ * 0-3: dma-desc-cmd 0-3
+ * 4-7: pcid function number
+ * 8-15: dma-desc-cmd 8-15
+ */
+ dma_desc.cmd.dma_desc_cmd = (m_hdr->dma_desc_cmd & 0xff0f);
+ dma_desc.cmd.dma_desc_cmd |= ((PCI_FUNC(ha->pdev->devfn) & 0xf) << 0x4);
+ dma_desc.dma_bus_addr = rdmem_dma;
+
+ size = 0;
+ read_size = 0;
+ /*
+ * Perform rdmem operation using pex-dma.
+ * Prepare dma in chunks of QLA83XX_PEX_DMA_READ_SIZE.
+ */
+ while (read_size < m_hdr->read_data_size) {
+ if (m_hdr->read_data_size - read_size >=
+ QLA83XX_PEX_DMA_READ_SIZE)
+ size = QLA83XX_PEX_DMA_READ_SIZE;
+ else {
+ size = (m_hdr->read_data_size - read_size);
+
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev,
+ QLA83XX_PEX_DMA_READ_SIZE,
+ rdmem_buffer, rdmem_dma);
+
+ rdmem_buffer = dma_alloc_coherent(&ha->pdev->dev, size,
+ &rdmem_dma,
+ GFP_KERNEL);
+ if (!rdmem_buffer) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Unable to allocate rdmem dma buffer\n",
+ __func__));
+ return QLA_ERROR;
+ }
+ dma_desc.dma_bus_addr = rdmem_dma;
+ }
+
+ dma_desc.src_addr = m_hdr->read_addr + read_size;
+ dma_desc.cmd.read_data_size = size;
+
+ /* Prepare: Write pex-dma descriptor to MS memory. */
+ rval = qla4_83xx_ms_mem_write_128b(ha,
+ (uint64_t)m_hdr->desc_card_addr,
+ (uint32_t *)&dma_desc,
+ (sizeof(struct qla4_83xx_pex_dma_descriptor)/16));
+ if (rval == -1) {
+ ql4_printk(KERN_INFO, ha,
+ "%s: Error writing rdmem-dma-init to MS !!!\n",
+ __func__);
+ goto error_exit;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Dma-desc: Instruct for rdmem dma (size 0x%x).\n",
+ __func__, size));
+ /* Execute: Start pex-dma operation. */
+ rval = qla4_83xx_start_pex_dma(ha, m_hdr);
+ if (rval != QLA_SUCCESS) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "scsi(%ld): start-pex-dma failed rval=0x%x\n",
+ ha->host_no, rval));
+ goto error_exit;
+ }
+
+ memcpy(data_ptr, rdmem_buffer, size);
+ data_ptr += size;
+ read_size += size;
+ }
+
+ DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
+
+ *d_ptr = (uint32_t *)data_ptr;
+
+error_exit:
+ if (rdmem_buffer)
+ dma_free_coherent(&ha->pdev->dev, size, rdmem_buffer,
+ rdmem_dma);
+
+ return rval;
+}
+
static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
@@ -2068,7 +2270,7 @@ static void qla4_82xx_minidump_process_rdrom(struct scsi_qla_host *ha,
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
-static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+static int __qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
uint32_t **d_ptr)
{
@@ -2150,6 +2352,28 @@ static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
return QLA_SUCCESS;
}
+static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
+ struct qla8xxx_minidump_entry_hdr *entry_hdr,
+ uint32_t **d_ptr)
+{
+ uint32_t *data_ptr = *d_ptr;
+ int rval = QLA_SUCCESS;
+
+ if (is_qla8032(ha) || is_qla8042(ha)) {
+ rval = qla4_83xx_minidump_pex_dma_read(ha, entry_hdr,
+ &data_ptr);
+ if (rval != QLA_SUCCESS) {
+ rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ }
+ } else {
+ rval = __qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
+ &data_ptr);
+ }
+ *d_ptr = data_ptr;
+ return rval;
+}
+
static void qla4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
struct qla8xxx_minidump_entry_hdr *entry_hdr,
int index)
@@ -2398,13 +2622,13 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
(((uint8_t *)ha->fw_dump_tmplt_hdr) +
tmplt_hdr->first_entry_offset);
- if (is_qla8032(ha))
+ if (is_qla8032(ha) || is_qla8042(ha))
tmplt_hdr->saved_state_array[QLA83XX_SS_OCM_WNDREG_INDEX] =
tmplt_hdr->ocm_window_reg[ha->func_num];
/* Walk through the entry headers - validate/perform required action */
for (i = 0; i < num_entry_hdr; i++) {
- if (data_collected >= ha->fw_dump_size) {
+ if (data_collected > ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Data collected: [0x%x], Total Dump size: [0x%x]\n",
data_collected, ha->fw_dump_size);
@@ -2455,7 +2679,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_minidump_process_rdrom(ha, entry_hdr,
&data_ptr);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
rval = qla4_83xx_minidump_process_rdrom(ha,
entry_hdr,
&data_ptr);
@@ -2496,7 +2720,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
&data_ptr);
break;
case QLA83XX_POLLRD:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2506,7 +2730,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
case QLA83XX_RDMUX2:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2514,7 +2738,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
&data_ptr);
break;
case QLA83XX_POLLRDMWR:
- if (!is_qla8032(ha)) {
+ if (is_qla8022(ha)) {
qla4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
break;
}
@@ -2529,9 +2753,7 @@ static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
break;
}
- data_collected = (uint8_t *)data_ptr -
- ((uint8_t *)((uint8_t *)ha->fw_dump +
- ha->fw_dump_tmplt_size));
+ data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->fw_dump;
skip_nxt_entry:
/* next entry in the template */
entry_hdr = (struct qla8xxx_minidump_entry_hdr *)
@@ -2539,10 +2761,11 @@ skip_nxt_entry:
entry_hdr->entry_size);
}
- if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
+ if (data_collected != ha->fw_dump_size) {
ql4_printk(KERN_INFO, ha,
"Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
data_collected, ha->fw_dump_size);
+ rval = QLA_ERROR;
goto md_failed;
}
@@ -2642,10 +2865,10 @@ dev_initialize:
QLA8XXX_DEV_INITIALIZING);
/*
- * For ISP8324, if IDC_CTRL GRACEFUL_RESET_BIT1 is set, reset it after
- * device goes to INIT state.
+ * For ISP8324 and ISP8042, if IDC_CTRL GRACEFUL_RESET_BIT1 is set,
+ * reset it after device goes to INIT state.
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
if (idc_ctrl & GRACEFUL_RESET_BIT1) {
qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
@@ -2846,7 +3069,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
* If we are the first driver to load and
* ql4xdontresethba is not set, clear IDC_CTRL BIT0.
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
drv_active = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DRV_ACTIVE);
if ((drv_active == (1 << ha->func_num)) && !ql4xdontresethba)
qla4_83xx_clear_idc_dontreset(ha);
@@ -2854,7 +3077,7 @@ int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_set_idc_ver(ha);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
rval = qla4_83xx_set_idc_ver(ha);
if (rval == QLA_ERROR)
qla4_8xxx_clear_drv_active(ha);
@@ -2922,11 +3145,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
break;
case QLA8XXX_DEV_NEED_RESET:
/*
- * For ISP8324, if NEED_RESET is set by any driver,
- * it should be honored, irrespective of IDC_CTRL
- * DONTRESET_BIT0
+ * For ISP8324 and ISP8042, if NEED_RESET is set by any
+ * driver, it should be honored, irrespective of
+ * IDC_CTRL DONTRESET_BIT0
*/
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_need_reset_handler(ha);
} else if (is_qla8022(ha)) {
if (!ql4xdontresethba) {
@@ -2976,7 +3199,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
int retval;
/* clear the interrupt */
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0, &ha->qla4_83xx_reg->risc_intr);
readl(&ha->qla4_83xx_reg->risc_intr);
} else if (is_qla8022(ha)) {
@@ -3094,7 +3317,7 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr)
if (is_qla8022(ha)) {
qla4_82xx_read_optrom_data(ha, (uint8_t *)ha->request_ring,
flt_addr << 2, OPTROM_BURST_SIZE);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
status = qla4_83xx_flash_read_u32(ha, flt_addr << 2,
(uint8_t *)ha->request_ring,
0x400);
@@ -3326,7 +3549,7 @@ qla4_8xxx_get_flash_info(struct scsi_qla_host *ha)
if (is_qla8022(ha)) {
qla4_82xx_get_fdt_info(ha);
qla4_82xx_get_idc_param(ha);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_get_idc_param(ha);
}
@@ -3436,7 +3659,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Make sure we receive the minimum required data to cache internally */
- if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+ if (((is_qla8032(ha) || is_qla8042(ha)) ? mbox_sts[3] : mbox_sts[4]) <
offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 9dc0bbfe50d..14500a0f62c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index b246b3c2691..f8a0a26a3cd 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1,6 +1,6 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
@@ -378,6 +378,44 @@ static umode_t qla4_attr_is_visible(int param_type, int param)
case ISCSI_PARAM_PASSWORD:
case ISCSI_PARAM_USERNAME_IN:
case ISCSI_PARAM_PASSWORD_IN:
+ case ISCSI_PARAM_AUTO_SND_TGT_DISABLE:
+ case ISCSI_PARAM_DISCOVERY_SESS:
+ case ISCSI_PARAM_PORTAL_TYPE:
+ case ISCSI_PARAM_CHAP_AUTH_EN:
+ case ISCSI_PARAM_DISCOVERY_LOGOUT_EN:
+ case ISCSI_PARAM_BIDI_CHAP_EN:
+ case ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL:
+ case ISCSI_PARAM_DEF_TIME2WAIT:
+ case ISCSI_PARAM_DEF_TIME2RETAIN:
+ case ISCSI_PARAM_HDRDGST_EN:
+ case ISCSI_PARAM_DATADGST_EN:
+ case ISCSI_PARAM_INITIAL_R2T_EN:
+ case ISCSI_PARAM_IMM_DATA_EN:
+ case ISCSI_PARAM_PDU_INORDER_EN:
+ case ISCSI_PARAM_DATASEQ_INORDER_EN:
+ case ISCSI_PARAM_MAX_SEGMENT_SIZE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_STAT:
+ case ISCSI_PARAM_TCP_WSF_DISABLE:
+ case ISCSI_PARAM_TCP_NAGLE_DISABLE:
+ case ISCSI_PARAM_TCP_TIMER_SCALE:
+ case ISCSI_PARAM_TCP_TIMESTAMP_EN:
+ case ISCSI_PARAM_TCP_XMIT_WSF:
+ case ISCSI_PARAM_TCP_RECV_WSF:
+ case ISCSI_PARAM_IP_FRAGMENT_DISABLE:
+ case ISCSI_PARAM_IPV4_TOS:
+ case ISCSI_PARAM_IPV6_TC:
+ case ISCSI_PARAM_IPV6_FLOW_LABEL:
+ case ISCSI_PARAM_IS_FW_ASSIGNED_IPV6:
+ case ISCSI_PARAM_KEEPALIVE_TMO:
+ case ISCSI_PARAM_LOCAL_PORT:
+ case ISCSI_PARAM_ISID:
+ case ISCSI_PARAM_TSID:
+ case ISCSI_PARAM_DEF_TASKMGMT_TMO:
+ case ISCSI_PARAM_ERL:
+ case ISCSI_PARAM_STATSN:
+ case ISCSI_PARAM_EXP_STATSN:
+ case ISCSI_PARAM_DISCOVERY_PARENT_IDX:
+ case ISCSI_PARAM_DISCOVERY_PARENT_TYPE:
return S_IRUGO;
default:
return 0;
@@ -2218,19 +2256,23 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size);
fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf);
fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf);
- fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label);
fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout);
fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port);
fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn);
fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn);
- fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type);
+ fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_idx);
fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx);
fw_ddb_entry->tsid = cpu_to_le16(sess->tsid);
fw_ddb_entry->port = cpu_to_le16(conn->port);
fw_ddb_entry->def_timeout =
cpu_to_le16(sess->default_taskmgmt_timeout);
+ if (!strncmp(sess->portal_type, PORTAL_TYPE_IPV6, 4))
+ fw_ddb_entry->ipv4_tos = conn->ipv6_traffic_class;
+ else
+ fw_ddb_entry->ipv4_tos = conn->ipv4_tos;
+
if (conn->ipaddress)
memcpy(fw_ddb_entry->ip_addr, conn->ipaddress,
sizeof(fw_ddb_entry->ip_addr));
@@ -2257,6 +2299,101 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess,
return rc;
}
+static void qla4xxx_copy_to_sess_conn_params(struct iscsi_conn *conn,
+ struct iscsi_session *sess,
+ struct dev_db_entry *fw_ddb_entry)
+{
+ unsigned long options = 0;
+ uint16_t ddb_link;
+ uint16_t disc_parent;
+
+ options = le16_to_cpu(fw_ddb_entry->options);
+ conn->is_fw_assigned_ipv6 = test_bit(OPT_IS_FW_ASSIGNED_IPV6, &options);
+ sess->auto_snd_tgt_disable = test_bit(OPT_AUTO_SENDTGTS_DISABLE,
+ &options);
+ sess->discovery_sess = test_bit(OPT_DISC_SESSION, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->iscsi_options);
+ conn->hdrdgst_en = test_bit(ISCSIOPT_HEADER_DIGEST_EN, &options);
+ conn->datadgst_en = test_bit(ISCSIOPT_DATA_DIGEST_EN, &options);
+ sess->imm_data_en = test_bit(ISCSIOPT_IMMEDIATE_DATA_EN, &options);
+ sess->initial_r2t_en = test_bit(ISCSIOPT_INITIAL_R2T_EN, &options);
+ sess->dataseq_inorder_en = test_bit(ISCSIOPT_DATA_SEQ_IN_ORDER,
+ &options);
+ sess->pdu_inorder_en = test_bit(ISCSIOPT_DATA_PDU_IN_ORDER, &options);
+ sess->chap_auth_en = test_bit(ISCSIOPT_CHAP_AUTH_EN, &options);
+ sess->discovery_logout_en = test_bit(ISCSIOPT_DISCOVERY_LOGOUT_EN,
+ &options);
+ sess->bidi_chap_en = test_bit(ISCSIOPT_BIDI_CHAP_EN, &options);
+ sess->discovery_auth_optional =
+ test_bit(ISCSIOPT_DISCOVERY_AUTH_OPTIONAL, &options);
+ if (test_bit(ISCSIOPT_ERL1, &options))
+ sess->erl |= BIT_1;
+ if (test_bit(ISCSIOPT_ERL0, &options))
+ sess->erl |= BIT_0;
+
+ options = le16_to_cpu(fw_ddb_entry->tcp_options);
+ conn->tcp_timestamp_stat = test_bit(TCPOPT_TIMESTAMP_STAT, &options);
+ conn->tcp_nagle_disable = test_bit(TCPOPT_NAGLE_DISABLE, &options);
+ conn->tcp_wsf_disable = test_bit(TCPOPT_WSF_DISABLE, &options);
+ if (test_bit(TCPOPT_TIMER_SCALE3, &options))
+ conn->tcp_timer_scale |= BIT_3;
+ if (test_bit(TCPOPT_TIMER_SCALE2, &options))
+ conn->tcp_timer_scale |= BIT_2;
+ if (test_bit(TCPOPT_TIMER_SCALE1, &options))
+ conn->tcp_timer_scale |= BIT_1;
+
+ conn->tcp_timer_scale >>= 1;
+ conn->tcp_timestamp_en = test_bit(TCPOPT_TIMESTAMP_EN, &options);
+
+ options = le16_to_cpu(fw_ddb_entry->ip_options);
+ conn->fragment_disable = test_bit(IPOPT_FRAGMENT_DISABLE, &options);
+
+ conn->max_recv_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
+ conn->max_xmit_dlength = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
+ sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
+ sess->first_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
+ sess->max_burst = BYTE_UNITS *
+ le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
+ sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
+ sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ conn->max_segment_size = le16_to_cpu(fw_ddb_entry->mss);
+ conn->tcp_xmit_wsf = fw_ddb_entry->tcp_xmt_wsf;
+ conn->tcp_recv_wsf = fw_ddb_entry->tcp_rcv_wsf;
+ conn->ipv4_tos = fw_ddb_entry->ipv4_tos;
+ conn->keepalive_tmo = le16_to_cpu(fw_ddb_entry->ka_timeout);
+ conn->local_port = le16_to_cpu(fw_ddb_entry->lcl_port);
+ conn->statsn = le32_to_cpu(fw_ddb_entry->stat_sn);
+ conn->exp_statsn = le32_to_cpu(fw_ddb_entry->exp_stat_sn);
+ sess->tsid = le16_to_cpu(fw_ddb_entry->tsid);
+ COPY_ISID(sess->isid, fw_ddb_entry->isid);
+
+ ddb_link = le16_to_cpu(fw_ddb_entry->ddb_link);
+ if (ddb_link < MAX_DDB_ENTRIES)
+ sess->discovery_parent_idx = ddb_link;
+ else
+ sess->discovery_parent_idx = DDB_NO_LINK;
+
+ if (ddb_link == DDB_ISNS)
+ disc_parent = ISCSI_DISC_PARENT_ISNS;
+ else if (ddb_link == DDB_NO_LINK)
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+ else if (ddb_link < MAX_DDB_ENTRIES)
+ disc_parent = ISCSI_DISC_PARENT_SENDTGT;
+ else
+ disc_parent = ISCSI_DISC_PARENT_UNKNOWN;
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
+ iscsi_get_discovery_parent_name(disc_parent), 0);
+
+ iscsi_set_param(conn->cls_conn, ISCSI_PARAM_TARGET_ALIAS,
+ (char *)fw_ddb_entry->iscsi_alias, 0);
+}
+
static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
struct iscsi_cls_session *cls_sess,
@@ -2275,47 +2412,29 @@ static void qla4xxx_copy_fwddb_param(struct scsi_qla_host *ha,
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
- conn->max_recv_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
-
- conn->max_xmit_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
-
- sess->initial_r2t_en =
- (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
-
- sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->first_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
-
- sess->max_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
-
- sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
-
- sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
+ sess->def_taskmgmt_tmo = le16_to_cpu(fw_ddb_entry->def_timeout);
conn->persistent_port = le16_to_cpu(fw_ddb_entry->port);
- sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
-
+ memset(ip_addr, 0, sizeof(ip_addr));
options = le16_to_cpu(fw_ddb_entry->options);
- if (options & DDB_OPT_IPV6_DEVICE)
+ if (options & DDB_OPT_IPV6_DEVICE) {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv6", 4);
+
+ memset(ip_addr, 0, sizeof(ip_addr));
sprintf(ip_addr, "%pI6", fw_ddb_entry->ip_addr);
- else
+ } else {
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PORTAL_TYPE, "ipv4", 4);
sprintf(ip_addr, "%pI4", fw_ddb_entry->ip_addr);
+ }
+ iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
+ (char *)ip_addr, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_NAME,
(char *)fw_ddb_entry->iscsi_name, buflen);
iscsi_set_param(cls_conn, ISCSI_PARAM_INITIATOR_NAME,
(char *)ha->name_string, buflen);
- iscsi_set_param(cls_conn, ISCSI_PARAM_PERSISTENT_ADDRESS,
- (char *)ip_addr, buflen);
- iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
- (char *)fw_ddb_entry->iscsi_alias, buflen);
}
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
@@ -2403,37 +2522,11 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
/* Update params */
ddb_entry->chap_tbl_idx = le16_to_cpu(fw_ddb_entry->chap_tbl_idx);
- conn->max_recv_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len);
-
- conn->max_xmit_dlength = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len);
-
- sess->initial_r2t_en =
- (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t);
-
- sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options));
-
- sess->first_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len);
-
- sess->max_burst = BYTE_UNITS *
- le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len);
-
- sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
-
- sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain);
-
- sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
+ qla4xxx_copy_to_sess_conn_params(conn, sess, fw_ddb_entry);
memcpy(sess->initiatorname, ha->name_string,
min(sizeof(ha->name_string), sizeof(sess->initiatorname)));
- iscsi_set_param(cls_conn, ISCSI_PARAM_TARGET_ALIAS,
- (char *)fw_ddb_entry->iscsi_alias, 0);
-
exit_session_conn_param:
if (fw_ddb_entry)
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
@@ -2578,6 +2671,8 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
test_bit(AF_LOOPBACK, &ha->flags) ||
+ test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags) ||
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -2652,7 +2747,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
if (ha->nx_pcibase)
iounmap(
(struct device_reg_82xx __iomem *)ha->nx_pcibase);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
if (ha->nx_pcibase)
iounmap(
(struct device_reg_83xx __iomem *)ha->nx_pcibase);
@@ -2846,7 +2941,7 @@ static void qla4_8xxx_process_fw_error(struct scsi_qla_host *ha)
__func__);
if (halt_status & HALT_STATUS_UNRECOVERABLE)
halt_status_unrecoverable = 1;
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
if (halt_status & QLA83XX_HALT_STATUS_FW_RESET)
ql4_printk(KERN_ERR, ha, "%s: Firmware error detected device is being reset\n",
__func__);
@@ -2901,7 +2996,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
ql4_printk(KERN_INFO, ha, "%s: HW State: NEED RESET!\n",
__func__);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
idc_ctrl = qla4_83xx_rd_reg(ha,
QLA83XX_IDC_DRV_CTRL);
if (!(idc_ctrl & GRACEFUL_RESET_BIT1)) {
@@ -2912,7 +3007,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
}
}
- if (is_qla8032(ha) ||
+ if ((is_qla8032(ha) || is_qla8042(ha)) ||
(is_qla8022(ha) && !ql4xdontresethba)) {
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
@@ -3296,7 +3391,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags);
- if (is_qla8032(ha) &&
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
!test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) {
ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
__func__);
@@ -3494,7 +3589,9 @@ static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session)
} else {
/* Trigger relogin */
if (ddb_entry->ddb_type == FLASH_DDB) {
- if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
+ if (!(test_bit(DF_RELOGIN, &ddb_entry->flags) ||
+ test_bit(DF_DISABLE_RELOGIN,
+ &ddb_entry->flags)))
qla4xxx_arm_relogin_timer(ddb_entry);
} else
iscsi_session_failure(cls_session->dd_data,
@@ -3597,6 +3694,9 @@ static void qla4xxx_dpc_relogin(struct iscsi_cls_session *cls_sess)
if (!(ddb_entry->ddb_type == FLASH_DDB))
return;
+ if (test_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ return;
+
if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) &&
!iscsi_is_session_online(cls_sess)) {
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -3750,7 +3850,7 @@ static void qla4xxx_do_dpc(struct work_struct *work)
if (is_qla80XX(ha)) {
if (test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags)) {
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
ql4_printk(KERN_INFO, ha, "%s: disabling pause transmit on port 0 & 1.\n",
__func__);
/* disable pause frame for ISP83xx */
@@ -3765,8 +3865,35 @@ static void qla4xxx_do_dpc(struct work_struct *work)
qla4_8xxx_device_state_handler(ha);
}
- if (test_and_clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags))
+ if (test_bit(DPC_POST_IDC_ACK, &ha->dpc_flags)) {
+ if (is_qla8042(ha)) {
+ if (ha->idc_info.info2 &
+ ENABLE_INTERNAL_LOOPBACK) {
+ ql4_printk(KERN_INFO, ha, "%s: Disabling ACB\n",
+ __func__);
+ status = qla4_84xx_config_acb(ha,
+ ACB_CONFIG_DISABLE);
+ if (status != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed\n",
+ __func__);
+ }
+ }
+ }
qla4_83xx_post_idc_ack(ha);
+ clear_bit(DPC_POST_IDC_ACK, &ha->dpc_flags);
+ }
+
+ if (is_qla8042(ha) &&
+ test_bit(DPC_RESTORE_ACB, &ha->dpc_flags)) {
+ ql4_printk(KERN_INFO, ha, "%s: Restoring ACB\n",
+ __func__);
+ if (qla4_84xx_config_acb(ha, ACB_CONFIG_SET) !=
+ QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "%s: ACB config failed ",
+ __func__);
+ }
+ clear_bit(DPC_RESTORE_ACB, &ha->dpc_flags);
+ }
if (test_and_clear_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
qla4_8xxx_need_qsnt_handler(ha);
@@ -3778,7 +3905,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))) {
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
clear_bit(DPC_RESET_HA, &ha->dpc_flags);
@@ -3870,7 +3998,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
} else if (is_qla8022(ha)) {
writel(0, &ha->qla4_82xx_reg->host_int);
readl(&ha->qla4_82xx_reg->host_int);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
writel(0, &ha->qla4_83xx_reg->risc_intr);
readl(&ha->qla4_83xx_reg->risc_intr);
}
@@ -3945,7 +4073,7 @@ int qla4_8xxx_iospace_config(struct scsi_qla_host *ha)
(ha->pdev->devfn << 11));
ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
QLA82XX_CAM_RAM_DB2);
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ha->qla4_83xx_reg = (struct device_reg_83xx __iomem *)
((uint8_t *)ha->nx_pcibase);
}
@@ -5609,7 +5737,8 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf,
goto exit_ddb_add;
}
- for (idx = 0; idx < max_ddbs; idx++) {
+ /* Index 0 and 1 are reserved for boot target entries */
+ for (idx = 2; idx < max_ddbs; idx++) {
if (qla4xxx_flashdb_by_index(ha, fw_ddb_entry,
fw_ddb_entry_dma, idx))
break;
@@ -5925,13 +6054,6 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
goto exit_ddb_logout;
}
- options = LOGOUT_OPTION_CLOSE_SESSION;
- if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) {
- ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__);
- ret = -EIO;
- goto exit_ddb_logout;
- }
-
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
@@ -5941,6 +6063,38 @@ static int qla4xxx_sysfs_ddb_logout_sid(struct iscsi_cls_session *cls_sess)
goto exit_ddb_logout;
}
+ if (test_and_set_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto ddb_logout_init;
+
+ if (ddb_state == DDB_DS_SESSION_ACTIVE)
+ goto ddb_logout_init;
+
+ /* wait until next relogin is triggered using DF_RELOGIN and
+ * clear DF_RELOGIN to avoid invocation of further relogin
+ */
+ wtime = jiffies + (HZ * RELOGIN_TOV);
+ do {
+ if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags))
+ goto ddb_logout_init;
+
+ schedule_timeout_uninterruptible(HZ);
+ } while ((time_after(wtime, jiffies)));
+
+ddb_logout_init:
+ atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
+ atomic_set(&ddb_entry->relogin_timer, 0);
+
+ options = LOGOUT_OPTION_CLOSE_SESSION;
+ qla4xxx_session_logout_ddb(ha, ddb_entry, options);
+
+ memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry));
wtime = jiffies + (HZ * LOGOUT_TOV);
do {
ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
@@ -5970,10 +6124,12 @@ ddb_logout_clr_sess:
spin_lock_irqsave(&ha->hardware_lock, flags);
qla4xxx_free_ddb(ha, ddb_entry);
+ clear_bit(ddb_entry->fw_ddb_index, ha->ddb_idx_map);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
iscsi_session_teardown(ddb_entry->sess);
+ clear_bit(DF_DISABLE_RELOGIN, &ddb_entry->flags);
ret = QLA_SUCCESS;
exit_ddb_logout:
@@ -6110,7 +6266,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
struct iscsi_bus_flash_conn *fnode_conn;
struct ql4_chap_table chap_tbl;
struct device *dev;
- int parent_type, parent_index = 0xffff;
+ int parent_type;
int rc = 0;
dev = iscsi_find_flashnode_conn(fnode_sess);
@@ -6276,10 +6432,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess,
rc = sprintf(buf, "\n");
break;
case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
- if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)
- parent_index = fnode_sess->discovery_parent_idx;
-
- rc = sprintf(buf, "%u\n", parent_index);
+ rc = sprintf(buf, "%u\n", fnode_sess->discovery_parent_idx);
break;
case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
if (fnode_sess->discovery_parent_type == DDB_ISNS)
@@ -6533,8 +6686,8 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess,
memcpy(fnode_conn->link_local_ipv6_addr,
fnode_param->value, IPv6_ADDR_LEN);
break;
- case ISCSI_FLASHNODE_DISCOVERY_PARENT_TYPE:
- fnode_sess->discovery_parent_type =
+ case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX:
+ fnode_sess->discovery_parent_idx =
*(uint16_t *)fnode_param->value;
break;
case ISCSI_FLASHNODE_TCP_XMIT_WSF:
@@ -6910,7 +7063,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
nx_legacy_intr->tgt_status_reg;
ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg;
ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg;
- } else if (is_qla8032(ha)) {
+ } else if (is_qla8032(ha) || is_qla8042(ha)) {
ha->isp_ops = &qla4_83xx_isp_ops;
ha->reg_tbl = (uint32_t *)qla4_83xx_reg_tbl;
} else {
@@ -6981,7 +7134,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
if (is_qla80XX(ha))
qla4_8xxx_get_flash_info(ha);
- if (is_qla8032(ha)) {
+ if (is_qla8032(ha) || is_qla8042(ha)) {
qla4_83xx_read_reset_template(ha);
/*
* NOTE: If ql4dontresethba==1, set IDC_CTRL DONTRESET_BIT0.
@@ -7036,7 +7189,8 @@ skip_retry_init:
ql4_printk(KERN_WARNING, ha, "Failed to initialize adapter\n");
if ((is_qla8022(ha) && ql4xdontresethba) ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
/* Put the device in failed state. */
DEBUG2(printk(KERN_ERR "HW STATE: FAILED\n"));
ha->isp_ops->idc_lock(ha);
@@ -7097,8 +7251,8 @@ skip_retry_init:
" QLogic iSCSI HBA Driver version: %s\n"
" QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev),
- ha->host_no, ha->firmware_version[0], ha->firmware_version[1],
- ha->patch_number, ha->build_number);
+ ha->host_no, ha->fw_info.fw_major, ha->fw_info.fw_minor,
+ ha->fw_info.fw_patch, ha->fw_info.fw_build);
/* Set the driver version */
if (is_qla80XX(ha))
@@ -7645,16 +7799,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
ha = to_qla_host(cmd->device->host);
- if (is_qla8032(ha) && ql4xdontresethba)
+ if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
/*
- * For ISP8324, if IDC_CTRL DONTRESET_BIT0 is set by other
- * protocol drivers, we should not set device_state to
- * NEED_RESET
+ * For ISP8324 and ISP8042, if IDC_CTRL DONTRESET_BIT0 is set by other
+ * protocol drivers, we should not set device_state to NEED_RESET
*/
if (ql4xdontresethba ||
- (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
+ ((is_qla8032(ha) || is_qla8042(ha)) &&
+ qla4_83xx_idc_dontreset(ha))) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
@@ -7779,9 +7933,10 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
}
recover_adapter:
- /* For ISP83XX set graceful reset bit in IDC_DRV_CTRL if
+ /* For ISP8324 and ISP8042 set graceful reset bit in IDC_DRV_CTRL if
* reset is issued by application */
- if (is_qla8032(ha) && test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
+ if ((is_qla8032(ha) || is_qla8042(ha)) &&
+ test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
idc_ctrl = qla4_83xx_rd_reg(ha, QLA83XX_IDC_DRV_CTRL);
qla4_83xx_wr_reg(ha, QLA83XX_IDC_DRV_CTRL,
(idc_ctrl | GRACEFUL_RESET_BIT1));
@@ -8078,6 +8233,12 @@ static struct pci_device_id qla4xxx_pci_tbl[] = {
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
+ {
+ .vendor = PCI_VENDOR_ID_QLOGIC,
+ .device = PCI_DEVICE_ID_QLOGIC_ISP8042,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ },
{0, 0},
};
MODULE_DEVICE_TABLE(pci, qla4xxx_pci_tbl);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index fe873cf7570..f4fef72c9bc 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -1,8 +1,8 @@
/*
* QLogic iSCSI HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k9"
+#define QLA4XXX_DRIVER_VERSION "5.04.00-k1"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 3b1ea34e1f5..eaa808e6ba9 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
{
int i, result;
+ if (sdev->skip_vpd_pages)
+ goto fail;
+
/* Ask for all the pages supported by this device */
result = scsi_vpd_inquiry(sdev, buf, 0, buf_len);
if (result)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index cb4fefa1bfb..01c0ffa3127 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1997,8 +1997,14 @@ static unsigned long lba_to_map_index(sector_t lba)
static sector_t map_index_to_lba(unsigned long index)
{
- return index * scsi_debug_unmap_granularity -
- scsi_debug_unmap_alignment;
+ sector_t lba = index * scsi_debug_unmap_granularity;
+
+ if (scsi_debug_unmap_alignment) {
+ lba -= scsi_debug_unmap_granularity -
+ scsi_debug_unmap_alignment;
+ }
+
+ return lba;
}
static unsigned int map_state(sector_t lba, unsigned int *num)
@@ -2659,8 +2665,8 @@ static void __init sdebug_build_parts(unsigned char *ramp,
/ sdebug_sectors_per;
pp->end_sector = (end_sec % sdebug_sectors_per) + 1;
- pp->start_sect = start_sec;
- pp->nr_sects = end_sec - start_sec + 1;
+ pp->start_sect = cpu_to_le32(start_sec);
+ pp->nr_sects = cpu_to_le32(end_sec - start_sec + 1);
pp->sys_ind = 0x83; /* plain Linux partition */
}
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 21505962f53..83e591b6019 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -223,12 +223,80 @@ static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost,
}
#endif
+ /**
+ * scsi_report_lun_change - Set flag on all *other* devices on the same target
+ * to indicate that a UNIT ATTENTION is expected.
+ * @sdev: Device reporting the UNIT ATTENTION
+ */
+static void scsi_report_lun_change(struct scsi_device *sdev)
+{
+ sdev->sdev_target->expecting_lun_change = 1;
+}
+
+/**
+ * scsi_report_sense - Examine scsi sense information and log messages for
+ * certain conditions, also issue uevents for some of them.
+ * @sdev: Device reporting the sense code
+ * @sshdr: sshdr to be examined
+ */
+static void scsi_report_sense(struct scsi_device *sdev,
+ struct scsi_sense_hdr *sshdr)
+{
+ enum scsi_device_event evt_type = SDEV_EVT_MAXBITS; /* i.e. none */
+
+ if (sshdr->sense_key == UNIT_ATTENTION) {
+ if (sshdr->asc == 0x3f && sshdr->ascq == 0x03) {
+ evt_type = SDEV_EVT_INQUIRY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Inquiry data has changed");
+ } else if (sshdr->asc == 0x3f && sshdr->ascq == 0x0e) {
+ evt_type = SDEV_EVT_LUN_CHANGE_REPORTED;
+ scsi_report_lun_change(sdev);
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN assignments on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically remap LUN assignments.\n");
+ } else if (sshdr->asc == 0x3f)
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "operating parameters on this target have "
+ "changed. The Linux SCSI layer does not "
+ "automatically adjust these parameters.\n");
+
+ if (sshdr->asc == 0x38 && sshdr->ascq == 0x07) {
+ evt_type = SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Warning! Received an indication that the "
+ "LUN reached a thin provisioning soft "
+ "threshold.\n");
+ }
+
+ if (sshdr->asc == 0x2a && sshdr->ascq == 0x01) {
+ evt_type = SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Mode parameters changed");
+ } else if (sshdr->asc == 0x2a && sshdr->ascq == 0x09) {
+ evt_type = SDEV_EVT_CAPACITY_CHANGE_REPORTED;
+ sdev_printk(KERN_WARNING, sdev,
+ "Capacity data has changed");
+ } else if (sshdr->asc == 0x2a)
+ sdev_printk(KERN_WARNING, sdev,
+ "Parameters changed");
+ }
+
+ if (evt_type != SDEV_EVT_MAXBITS) {
+ set_bit(evt_type, sdev->pending_events);
+ schedule_work(&sdev->event_work);
+ }
+}
+
/**
* scsi_check_sense - Examine scsi cmd sense
* @scmd: Cmd to have sense checked.
*
* Return value:
- * SUCCESS or FAILED or NEEDS_RETRY or TARGET_ERROR
+ * SUCCESS or FAILED or NEEDS_RETRY or ADD_TO_MLQUEUE
*
* Notes:
* When a deferred error is detected the current command has
@@ -250,6 +318,8 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
*/
return SUCCESS;
+ scsi_report_sense(sdev, &sshdr);
+
if (scsi_sense_is_deferred(&sshdr))
return NEEDS_RETRY;
@@ -315,6 +385,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
}
}
/*
+ * we might also expect a cc/ua if another LUN on the target
+ * reported a UA with an ASC/ASCQ of 3F 0E -
+ * REPORTED LUNS DATA HAS CHANGED.
+ */
+ if (scmd->device->sdev_target->expecting_lun_change &&
+ sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
+ return NEEDS_RETRY;
+ /*
* if the device is in the process of becoming ready, we
* should retry.
*/
@@ -327,26 +405,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->allow_restart &&
(sshdr.asc == 0x04) && (sshdr.ascq == 0x02))
return FAILED;
-
- if (sshdr.asc == 0x3f && sshdr.ascq == 0x0e)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "LUN assignments on this target have "
- "changed. The Linux SCSI layer does not "
- "automatically remap LUN assignments.\n");
- else if (sshdr.asc == 0x3f)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "operating parameters on this target have "
- "changed. The Linux SCSI layer does not "
- "automatically adjust these parameters.\n");
-
- if (sshdr.asc == 0x38 && sshdr.ascq == 0x07)
- scmd_printk(KERN_WARNING, scmd,
- "Warning! Received an indication that the "
- "LUN reached a thin provisioning soft "
- "threshold.\n");
-
/*
* Pass the UA upwards for a determination in the completion
* functions.
@@ -354,18 +412,25 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
return SUCCESS;
/* these are not supported */
+ case DATA_PROTECT:
+ if (sshdr.asc == 0x27 && sshdr.ascq == 0x07) {
+ /* Thin provisioning hard threshold reached */
+ set_host_byte(scmd, DID_ALLOC_FAILURE);
+ return SUCCESS;
+ }
case COPY_ABORTED:
case VOLUME_OVERFLOW:
case MISCOMPARE:
case BLANK_CHECK:
- case DATA_PROTECT:
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
+ return SUCCESS;
case MEDIUM_ERROR:
if (sshdr.asc == 0x11 || /* UNRECOVERED READ ERR */
sshdr.asc == 0x13 || /* AMNF DATA FIELD */
sshdr.asc == 0x14) { /* RECORD NOT FOUND */
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_MEDIUM_ERROR);
+ return SUCCESS;
}
return NEEDS_RETRY;
@@ -373,14 +438,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
if (scmd->device->retry_hwerror)
return ADD_TO_MLQUEUE;
else
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
case ILLEGAL_REQUEST:
if (sshdr.asc == 0x20 || /* Invalid command operation code */
sshdr.asc == 0x21 || /* Logical block address out of range */
sshdr.asc == 0x24 || /* Invalid field in cdb */
sshdr.asc == 0x26) { /* Parameter value invalid */
- return TARGET_ERROR;
+ set_host_byte(scmd, DID_TARGET_FAILURE);
}
return SUCCESS;
@@ -843,7 +908,6 @@ retry:
case SUCCESS:
case NEEDS_RETRY:
case FAILED:
- case TARGET_ERROR:
break;
case ADD_TO_MLQUEUE:
rtn = NEEDS_RETRY;
@@ -1568,6 +1632,8 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
*/
return ADD_TO_MLQUEUE;
case GOOD:
+ if (scmd->cmnd[0] == REPORT_LUNS)
+ scmd->device->sdev_target->expecting_lun_change = 0;
scsi_handle_queue_ramp_up(scmd->device);
case COMMAND_TERMINATED:
return SUCCESS;
@@ -1577,14 +1643,6 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
rtn = scsi_check_sense(scmd);
if (rtn == NEEDS_RETRY)
goto maybe_retry;
- else if (rtn == TARGET_ERROR) {
- /*
- * Need to modify host byte to signal a
- * permanent target failure
- */
- set_host_byte(scmd, DID_TARGET_FAILURE);
- rtn = SUCCESS;
- }
/* if rtn == FAILED, we have no sense information;
* returning FAILED will wake the error handler thread
* to collect the sense and redo the decide
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 124392f3091..d1549b74e2d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -68,28 +68,6 @@ static struct scsi_host_sg_pool scsi_sg_pools[] = {
struct kmem_cache *scsi_sdb_cache;
-#ifdef CONFIG_ACPI
-#include <acpi/acpi_bus.h>
-
-static bool acpi_scsi_bus_match(struct device *dev)
-{
- return dev->bus == &scsi_bus_type;
-}
-
-int scsi_register_acpi_bus_type(struct acpi_bus_type *bus)
-{
- bus->match = acpi_scsi_bus_match;
- return register_acpi_bus_type(bus);
-}
-EXPORT_SYMBOL_GPL(scsi_register_acpi_bus_type);
-
-void scsi_unregister_acpi_bus_type(struct acpi_bus_type *bus)
-{
- unregister_acpi_bus_type(bus);
-}
-EXPORT_SYMBOL_GPL(scsi_unregister_acpi_bus_type);
-#endif
-
/*
* When to reinvoke queueing after a resource shortage. It's 3 msecs to
* not change behaviour from the previous unplug mechanism, experimentation
@@ -716,6 +694,20 @@ void scsi_release_buffers(struct scsi_cmnd *cmd)
}
EXPORT_SYMBOL(scsi_release_buffers);
+/**
+ * __scsi_error_from_host_byte - translate SCSI error code into errno
+ * @cmd: SCSI command (unused)
+ * @result: scsi error code
+ *
+ * Translate SCSI error code into standard UNIX errno.
+ * Return values:
+ * -ENOLINK temporary transport failure
+ * -EREMOTEIO permanent target failure, do not retry
+ * -EBADE permanent nexus failure, retry on other path
+ * -ENOSPC No write space available
+ * -ENODATA Medium error
+ * -EIO unspecified I/O error
+ */
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
int error = 0;
@@ -732,6 +724,14 @@ static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
set_host_byte(cmd, DID_OK);
error = -EBADE;
break;
+ case DID_ALLOC_FAILURE:
+ set_host_byte(cmd, DID_OK);
+ error = -ENOSPC;
+ break;
+ case DID_MEDIUM_ERROR:
+ set_host_byte(cmd, DID_OK);
+ error = -ENODATA;
+ break;
default:
error = -EIO;
break;
@@ -2231,7 +2231,21 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
case SDEV_EVT_MEDIA_CHANGE:
envp[idx++] = "SDEV_MEDIA_CHANGE=1";
break;
-
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
+ break;
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
+ break;
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
+ break;
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
+ envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
+ break;
default:
/* do nothing */
break;
@@ -2252,10 +2266,15 @@ static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
void scsi_evt_thread(struct work_struct *work)
{
struct scsi_device *sdev;
+ enum scsi_device_event evt_type;
LIST_HEAD(event_list);
sdev = container_of(work, struct scsi_device, event_work);
+ for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
+ if (test_and_clear_bit(evt_type, sdev->pending_events))
+ sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);
+
while (1) {
struct scsi_event *evt;
struct list_head *this, *tmp;
@@ -2325,6 +2344,11 @@ struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
/* evt_type-specific initialization, if any */
switch (evt_type) {
case SDEV_EVT_MEDIA_CHANGE:
+ case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
+ case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
+ case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
+ case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
+ case SDEV_EVT_LUN_CHANGE_REPORTED:
default:
/* do nothing */
break;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 7e50061e9ef..40c639491b2 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -739,6 +739,11 @@ sdev_store_evt_##name(struct device *dev, struct device_attribute *attr,\
#define REF_EVT(name) &dev_attr_evt_##name.attr
DECLARE_EVT(media_change, MEDIA_CHANGE)
+DECLARE_EVT(inquiry_change_reported, INQUIRY_CHANGE_REPORTED)
+DECLARE_EVT(capacity_change_reported, CAPACITY_CHANGE_REPORTED)
+DECLARE_EVT(soft_threshold_reached, SOFT_THRESHOLD_REACHED_REPORTED)
+DECLARE_EVT(mode_parameter_change_reported, MODE_PARAMETER_CHANGE_REPORTED)
+DECLARE_EVT(lun_change_reported, LUN_CHANGE_REPORTED)
/* Default template for device attributes. May NOT be modified */
static struct attribute *scsi_sdev_attrs[] = {
@@ -759,6 +764,11 @@ static struct attribute *scsi_sdev_attrs[] = {
&dev_attr_ioerr_cnt.attr,
&dev_attr_modalias.attr,
REF_EVT(media_change),
+ REF_EVT(inquiry_change_reported),
+ REF_EVT(capacity_change_reported),
+ REF_EVT(soft_threshold_reached),
+ REF_EVT(mode_parameter_change_reported),
+ REF_EVT(lun_change_reported),
NULL
};
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index abf7c402e1a..e4a989fa477 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -25,7 +25,6 @@
#include <linux/slab.h>
#include <linux/bsg-lib.h>
#include <linux/idr.h>
-#include <linux/list.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -3327,6 +3326,23 @@ iscsi_conn_attr(exp_statsn, ISCSI_PARAM_EXP_STATSN);
iscsi_conn_attr(persistent_address, ISCSI_PARAM_PERSISTENT_ADDRESS);
iscsi_conn_attr(ping_tmo, ISCSI_PARAM_PING_TMO);
iscsi_conn_attr(recv_tmo, ISCSI_PARAM_RECV_TMO);
+iscsi_conn_attr(local_port, ISCSI_PARAM_LOCAL_PORT);
+iscsi_conn_attr(statsn, ISCSI_PARAM_STATSN);
+iscsi_conn_attr(keepalive_tmo, ISCSI_PARAM_KEEPALIVE_TMO);
+iscsi_conn_attr(max_segment_size, ISCSI_PARAM_MAX_SEGMENT_SIZE);
+iscsi_conn_attr(tcp_timestamp_stat, ISCSI_PARAM_TCP_TIMESTAMP_STAT);
+iscsi_conn_attr(tcp_wsf_disable, ISCSI_PARAM_TCP_WSF_DISABLE);
+iscsi_conn_attr(tcp_nagle_disable, ISCSI_PARAM_TCP_NAGLE_DISABLE);
+iscsi_conn_attr(tcp_timer_scale, ISCSI_PARAM_TCP_TIMER_SCALE);
+iscsi_conn_attr(tcp_timestamp_enable, ISCSI_PARAM_TCP_TIMESTAMP_EN);
+iscsi_conn_attr(fragment_disable, ISCSI_PARAM_IP_FRAGMENT_DISABLE);
+iscsi_conn_attr(ipv4_tos, ISCSI_PARAM_IPV4_TOS);
+iscsi_conn_attr(ipv6_traffic_class, ISCSI_PARAM_IPV6_TC);
+iscsi_conn_attr(ipv6_flow_label, ISCSI_PARAM_IPV6_FLOW_LABEL);
+iscsi_conn_attr(is_fw_assigned_ipv6, ISCSI_PARAM_IS_FW_ASSIGNED_IPV6);
+iscsi_conn_attr(tcp_xmit_wsf, ISCSI_PARAM_TCP_XMIT_WSF);
+iscsi_conn_attr(tcp_recv_wsf, ISCSI_PARAM_TCP_RECV_WSF);
+
#define iscsi_conn_ep_attr_show(param) \
static ssize_t show_conn_ep_param_##param(struct device *dev, \
@@ -3379,6 +3395,22 @@ static struct attribute *iscsi_conn_attrs[] = {
&dev_attr_conn_persistent_port.attr,
&dev_attr_conn_ping_tmo.attr,
&dev_attr_conn_recv_tmo.attr,
+ &dev_attr_conn_local_port.attr,
+ &dev_attr_conn_statsn.attr,
+ &dev_attr_conn_keepalive_tmo.attr,
+ &dev_attr_conn_max_segment_size.attr,
+ &dev_attr_conn_tcp_timestamp_stat.attr,
+ &dev_attr_conn_tcp_wsf_disable.attr,
+ &dev_attr_conn_tcp_nagle_disable.attr,
+ &dev_attr_conn_tcp_timer_scale.attr,
+ &dev_attr_conn_tcp_timestamp_enable.attr,
+ &dev_attr_conn_fragment_disable.attr,
+ &dev_attr_conn_ipv4_tos.attr,
+ &dev_attr_conn_ipv6_traffic_class.attr,
+ &dev_attr_conn_ipv6_flow_label.attr,
+ &dev_attr_conn_is_fw_assigned_ipv6.attr,
+ &dev_attr_conn_tcp_xmit_wsf.attr,
+ &dev_attr_conn_tcp_recv_wsf.attr,
NULL,
};
@@ -3416,6 +3448,38 @@ static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_PING_TMO;
else if (attr == &dev_attr_conn_recv_tmo.attr)
param = ISCSI_PARAM_RECV_TMO;
+ else if (attr == &dev_attr_conn_local_port.attr)
+ param = ISCSI_PARAM_LOCAL_PORT;
+ else if (attr == &dev_attr_conn_statsn.attr)
+ param = ISCSI_PARAM_STATSN;
+ else if (attr == &dev_attr_conn_keepalive_tmo.attr)
+ param = ISCSI_PARAM_KEEPALIVE_TMO;
+ else if (attr == &dev_attr_conn_max_segment_size.attr)
+ param = ISCSI_PARAM_MAX_SEGMENT_SIZE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_stat.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_STAT;
+ else if (attr == &dev_attr_conn_tcp_wsf_disable.attr)
+ param = ISCSI_PARAM_TCP_WSF_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_nagle_disable.attr)
+ param = ISCSI_PARAM_TCP_NAGLE_DISABLE;
+ else if (attr == &dev_attr_conn_tcp_timer_scale.attr)
+ param = ISCSI_PARAM_TCP_TIMER_SCALE;
+ else if (attr == &dev_attr_conn_tcp_timestamp_enable.attr)
+ param = ISCSI_PARAM_TCP_TIMESTAMP_EN;
+ else if (attr == &dev_attr_conn_fragment_disable.attr)
+ param = ISCSI_PARAM_IP_FRAGMENT_DISABLE;
+ else if (attr == &dev_attr_conn_ipv4_tos.attr)
+ param = ISCSI_PARAM_IPV4_TOS;
+ else if (attr == &dev_attr_conn_ipv6_traffic_class.attr)
+ param = ISCSI_PARAM_IPV6_TC;
+ else if (attr == &dev_attr_conn_ipv6_flow_label.attr)
+ param = ISCSI_PARAM_IPV6_FLOW_LABEL;
+ else if (attr == &dev_attr_conn_is_fw_assigned_ipv6.attr)
+ param = ISCSI_PARAM_IS_FW_ASSIGNED_IPV6;
+ else if (attr == &dev_attr_conn_tcp_xmit_wsf.attr)
+ param = ISCSI_PARAM_TCP_XMIT_WSF;
+ else if (attr == &dev_attr_conn_tcp_recv_wsf.attr)
+ param = ISCSI_PARAM_TCP_RECV_WSF;
else {
WARN_ONCE(1, "Invalid conn attr");
return 0;
@@ -3476,6 +3540,21 @@ iscsi_session_attr(targetalias, ISCSI_PARAM_TARGET_ALIAS, 0);
iscsi_session_attr(boot_root, ISCSI_PARAM_BOOT_ROOT, 0);
iscsi_session_attr(boot_nic, ISCSI_PARAM_BOOT_NIC, 0);
iscsi_session_attr(boot_target, ISCSI_PARAM_BOOT_TARGET, 0);
+iscsi_session_attr(auto_snd_tgt_disable, ISCSI_PARAM_AUTO_SND_TGT_DISABLE, 0);
+iscsi_session_attr(discovery_session, ISCSI_PARAM_DISCOVERY_SESS, 0);
+iscsi_session_attr(portal_type, ISCSI_PARAM_PORTAL_TYPE, 0);
+iscsi_session_attr(chap_auth, ISCSI_PARAM_CHAP_AUTH_EN, 0);
+iscsi_session_attr(discovery_logout, ISCSI_PARAM_DISCOVERY_LOGOUT_EN, 0);
+iscsi_session_attr(bidi_chap, ISCSI_PARAM_BIDI_CHAP_EN, 0);
+iscsi_session_attr(discovery_auth_optional,
+ ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL, 0);
+iscsi_session_attr(def_time2wait, ISCSI_PARAM_DEF_TIME2WAIT, 0);
+iscsi_session_attr(def_time2retain, ISCSI_PARAM_DEF_TIME2RETAIN, 0);
+iscsi_session_attr(isid, ISCSI_PARAM_ISID, 0);
+iscsi_session_attr(tsid, ISCSI_PARAM_TSID, 0);
+iscsi_session_attr(def_taskmgmt_tmo, ISCSI_PARAM_DEF_TASKMGMT_TMO, 0);
+iscsi_session_attr(discovery_parent_idx, ISCSI_PARAM_DISCOVERY_PARENT_IDX, 0);
+iscsi_session_attr(discovery_parent_type, ISCSI_PARAM_DISCOVERY_PARENT_TYPE, 0);
static ssize_t
show_priv_session_state(struct device *dev, struct device_attribute *attr,
@@ -3580,6 +3659,20 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
&dev_attr_priv_sess_target_id.attr,
+ &dev_attr_sess_auto_snd_tgt_disable.attr,
+ &dev_attr_sess_discovery_session.attr,
+ &dev_attr_sess_portal_type.attr,
+ &dev_attr_sess_chap_auth.attr,
+ &dev_attr_sess_discovery_logout.attr,
+ &dev_attr_sess_bidi_chap.attr,
+ &dev_attr_sess_discovery_auth_optional.attr,
+ &dev_attr_sess_def_time2wait.attr,
+ &dev_attr_sess_def_time2retain.attr,
+ &dev_attr_sess_isid.attr,
+ &dev_attr_sess_tsid.attr,
+ &dev_attr_sess_def_taskmgmt_tmo.attr,
+ &dev_attr_sess_discovery_parent_idx.attr,
+ &dev_attr_sess_discovery_parent_type.attr,
NULL,
};
@@ -3643,6 +3736,34 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
param = ISCSI_PARAM_BOOT_NIC;
else if (attr == &dev_attr_sess_boot_target.attr)
param = ISCSI_PARAM_BOOT_TARGET;
+ else if (attr == &dev_attr_sess_auto_snd_tgt_disable.attr)
+ param = ISCSI_PARAM_AUTO_SND_TGT_DISABLE;
+ else if (attr == &dev_attr_sess_discovery_session.attr)
+ param = ISCSI_PARAM_DISCOVERY_SESS;
+ else if (attr == &dev_attr_sess_portal_type.attr)
+ param = ISCSI_PARAM_PORTAL_TYPE;
+ else if (attr == &dev_attr_sess_chap_auth.attr)
+ param = ISCSI_PARAM_CHAP_AUTH_EN;
+ else if (attr == &dev_attr_sess_discovery_logout.attr)
+ param = ISCSI_PARAM_DISCOVERY_LOGOUT_EN;
+ else if (attr == &dev_attr_sess_bidi_chap.attr)
+ param = ISCSI_PARAM_BIDI_CHAP_EN;
+ else if (attr == &dev_attr_sess_discovery_auth_optional.attr)
+ param = ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL;
+ else if (attr == &dev_attr_sess_def_time2wait.attr)
+ param = ISCSI_PARAM_DEF_TIME2WAIT;
+ else if (attr == &dev_attr_sess_def_time2retain.attr)
+ param = ISCSI_PARAM_DEF_TIME2RETAIN;
+ else if (attr == &dev_attr_sess_isid.attr)
+ param = ISCSI_PARAM_ISID;
+ else if (attr == &dev_attr_sess_tsid.attr)
+ param = ISCSI_PARAM_TSID;
+ else if (attr == &dev_attr_sess_def_taskmgmt_tmo.attr)
+ param = ISCSI_PARAM_DEF_TASKMGMT_TMO;
+ else if (attr == &dev_attr_sess_discovery_parent_idx.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_IDX;
+ else if (attr == &dev_attr_sess_discovery_parent_type.attr)
+ param = ISCSI_PARAM_DISCOVERY_PARENT_TYPE;
else if (attr == &dev_attr_priv_sess_recovery_tmo.attr)
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 80f39b8b022..b58e8f815a0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -132,8 +132,8 @@ static const char *sd_cache_types[] = {
};
static ssize_t
-sd_store_cache_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+cache_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int i, ct = -1, rcd, wce, sp;
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -199,8 +199,18 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+manage_start_stop_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdp = sdkp->device;
+
+ return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
+}
+
+static ssize_t
+manage_start_stop_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -212,10 +222,19 @@ sd_store_manage_start_stop(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(manage_start_stop);
static ssize_t
-sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
+}
+
+static ssize_t
+allow_restart_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -230,47 +249,30 @@ sd_store_allow_restart(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(allow_restart);
static ssize_t
-sd_show_cache_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
int ct = sdkp->RCD + 2*sdkp->WCE;
return snprintf(buf, 40, "%s\n", sd_cache_types[ct]);
}
+static DEVICE_ATTR_RW(cache_type);
static ssize_t
-sd_show_fua(struct device *dev, struct device_attribute *attr, char *buf)
+FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->DPOFUA);
}
+static DEVICE_ATTR_RO(FUA);
static ssize_t
-sd_show_manage_start_stop(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_disk *sdkp = to_scsi_disk(dev);
- struct scsi_device *sdp = sdkp->device;
-
- return snprintf(buf, 20, "%u\n", sdp->manage_start_stop);
-}
-
-static ssize_t
-sd_show_allow_restart(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct scsi_disk *sdkp = to_scsi_disk(dev);
-
- return snprintf(buf, 40, "%d\n", sdkp->device->allow_restart);
-}
-
-static ssize_t
-sd_show_protection_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+protection_type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -278,8 +280,8 @@ sd_show_protection_type(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_protection_type(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+protection_type_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
unsigned int val;
@@ -298,10 +300,11 @@ sd_store_protection_type(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(protection_type);
static ssize_t
-sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+protection_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -320,24 +323,26 @@ sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
}
+static DEVICE_ATTR_RO(protection_mode);
static ssize_t
-sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
- char *buf)
+app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->ATO);
}
+static DEVICE_ATTR_RO(app_tag_own);
static ssize_t
-sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
- char *buf)
+thin_provisioning_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
return snprintf(buf, 20, "%u\n", sdkp->lbpme);
}
+static DEVICE_ATTR_RO(thin_provisioning);
static const char *lbp_mode[] = {
[SD_LBP_FULL] = "full",
@@ -349,8 +354,8 @@ static const char *lbp_mode[] = {
};
static ssize_t
-sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
+provisioning_mode_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -358,8 +363,8 @@ sd_show_provisioning_mode(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+provisioning_mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -385,10 +390,11 @@ sd_store_provisioning_mode(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(provisioning_mode);
static ssize_t
-sd_show_max_medium_access_timeouts(struct device *dev,
- struct device_attribute *attr, char *buf)
+max_medium_access_timeouts_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -396,9 +402,9 @@ sd_show_max_medium_access_timeouts(struct device *dev,
}
static ssize_t
-sd_store_max_medium_access_timeouts(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+max_medium_access_timeouts_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
int err;
@@ -410,10 +416,11 @@ sd_store_max_medium_access_timeouts(struct device *dev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_medium_access_timeouts);
static ssize_t
-sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
- char *buf)
+max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
@@ -421,8 +428,8 @@ sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
}
static ssize_t
-sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct scsi_disk *sdkp = to_scsi_disk(dev);
struct scsi_device *sdp = sdkp->device;
@@ -451,35 +458,29 @@ sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
return count;
}
-
-static struct device_attribute sd_disk_attrs[] = {
- __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
- sd_store_cache_type),
- __ATTR(FUA, S_IRUGO, sd_show_fua, NULL),
- __ATTR(allow_restart, S_IRUGO|S_IWUSR, sd_show_allow_restart,
- sd_store_allow_restart),
- __ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
- sd_store_manage_start_stop),
- __ATTR(protection_type, S_IRUGO|S_IWUSR, sd_show_protection_type,
- sd_store_protection_type),
- __ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
- __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
- __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
- __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
- sd_store_provisioning_mode),
- __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
- sd_show_write_same_blocks, sd_store_write_same_blocks),
- __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
- sd_show_max_medium_access_timeouts,
- sd_store_max_medium_access_timeouts),
- __ATTR_NULL,
+static DEVICE_ATTR_RW(max_write_same_blocks);
+
+static struct attribute *sd_disk_attrs[] = {
+ &dev_attr_cache_type.attr,
+ &dev_attr_FUA.attr,
+ &dev_attr_allow_restart.attr,
+ &dev_attr_manage_start_stop.attr,
+ &dev_attr_protection_type.attr,
+ &dev_attr_protection_mode.attr,
+ &dev_attr_app_tag_own.attr,
+ &dev_attr_thin_provisioning.attr,
+ &dev_attr_provisioning_mode.attr,
+ &dev_attr_max_write_same_blocks.attr,
+ &dev_attr_max_medium_access_timeouts.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(sd_disk);
static struct class sd_disk_class = {
.name = "scsi_disk",
.owner = THIS_MODULE,
.dev_release = scsi_disk_release,
- .dev_attrs = sd_disk_attrs,
+ .dev_groups = sd_disk_groups,
};
static const struct dev_pm_ops sd_pm_ops = {
@@ -838,10 +839,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
static void sd_unprep_fn(struct request_queue *q, struct request *rq)
{
+ struct scsi_cmnd *SCpnt = rq->special;
+
if (rq->cmd_flags & REQ_DISCARD) {
free_page((unsigned long)rq->buffer);
rq->buffer = NULL;
}
+ if (SCpnt->cmnd != rq->cmd) {
+ mempool_free(SCpnt->cmnd, sd_cdb_pool);
+ SCpnt->cmnd = NULL;
+ SCpnt->cmd_len = 0;
+ }
}
/**
@@ -1720,21 +1728,6 @@ static int sd_done(struct scsi_cmnd *SCpnt)
if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt))
sd_dif_complete(SCpnt, good_bytes);
- if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type)
- == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) {
-
- /* We have to print a failed command here as the
- * extended CDB gets freed before scsi_io_completion()
- * is called.
- */
- if (result)
- scsi_print_command(SCpnt);
-
- mempool_free(SCpnt->cmnd, sd_cdb_pool);
- SCpnt->cmnd = NULL;
- SCpnt->cmd_len = 0;
- }
-
return good_bytes;
}
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index df5e961484e..5cbc4bb1b39 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -105,11 +105,8 @@ static int scatter_elem_sz_prev = SG_SCATTER_SZ;
static int sg_add(struct device *, struct class_interface *);
static void sg_remove(struct device *, struct class_interface *);
-static DEFINE_SPINLOCK(sg_open_exclusive_lock);
-
static DEFINE_IDR(sg_index_idr);
-static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock
- file descriptor list for device */
+static DEFINE_RWLOCK(sg_index_lock);
static struct class_interface sg_interface = {
.add_dev = sg_add,
@@ -146,8 +143,7 @@ typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */
} Sg_request;
typedef struct sg_fd { /* holds the state of a file descriptor */
- /* sfd_siblings is protected by sg_index_lock */
- struct list_head sfd_siblings;
+ struct list_head sfd_siblings; /* protected by sfd_lock of device */
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
@@ -170,13 +166,12 @@ typedef struct sg_fd { /* holds the state of a file descriptor */
typedef struct sg_device { /* holds the state of each scsi generic device */
struct scsi_device *device;
- wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */
int sg_tablesize; /* adapter's max scatter-gather table size */
u32 index; /* device index number */
- /* sfds is protected by sg_index_lock */
+ spinlock_t sfd_lock; /* protect file descriptor list for device */
struct list_head sfds;
+ struct rw_semaphore o_sem; /* exclude open should hold this rwsem */
volatile char detached; /* 0->attached, 1->detached pending removal */
- /* exclude protected by sg_open_exclusive_lock */
char exclude; /* opened for exclusive access */
char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */
struct gendisk *disk;
@@ -225,35 +220,14 @@ static int sg_allow_access(struct file *filp, unsigned char *cmd)
return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE);
}
-static int get_exclude(Sg_device *sdp)
-{
- unsigned long flags;
- int ret;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- ret = sdp->exclude;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return ret;
-}
-
-static int set_exclude(Sg_device *sdp, char val)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sg_open_exclusive_lock, flags);
- sdp->exclude = val;
- spin_unlock_irqrestore(&sg_open_exclusive_lock, flags);
- return val;
-}
-
static int sfds_list_empty(Sg_device *sdp)
{
unsigned long flags;
int ret;
- read_lock_irqsave(&sg_index_lock, flags);
+ spin_lock_irqsave(&sdp->sfd_lock, flags);
ret = list_empty(&sdp->sfds);
- read_unlock_irqrestore(&sg_index_lock, flags);
+ spin_unlock_irqrestore(&sdp->sfd_lock, flags);
return ret;
}
@@ -265,7 +239,6 @@ sg_open(struct inode *inode, struct file *filp)
struct request_queue *q;
Sg_device *sdp;
Sg_fd *sfp;
- int res;
int retval;
nonseekable_open(inode, filp);
@@ -294,54 +267,52 @@ sg_open(struct inode *inode, struct file *filp)
goto error_out;
}
- if (flags & O_EXCL) {
- if (O_RDONLY == (flags & O_ACCMODE)) {
- retval = -EPERM; /* Can't lock it with read only access */
- goto error_out;
- }
- if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait,
- ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1)));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */
- if (flags & O_NONBLOCK) {
- retval = -EBUSY;
- goto error_out;
- }
- res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp));
- if (res) {
- retval = res; /* -ERESTARTSYS because signal hit process */
- goto error_out;
- }
- }
- if (sdp->detached) {
- retval = -ENODEV;
+ if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) {
+ retval = -EPERM; /* Can't lock it with read only access */
goto error_out;
}
+ if (flags & O_NONBLOCK) {
+ if (flags & O_EXCL) {
+ if (!down_write_trylock(&sdp->o_sem)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ } else {
+ if (!down_read_trylock(&sdp->o_sem)) {
+ retval = -EBUSY;
+ goto error_out;
+ }
+ }
+ } else {
+ if (flags & O_EXCL)
+ down_write(&sdp->o_sem);
+ else
+ down_read(&sdp->o_sem);
+ }
+ /* Since write lock is held, no need to check sfd_list */
+ if (flags & O_EXCL)
+ sdp->exclude = 1; /* used by release lock */
+
if (sfds_list_empty(sdp)) { /* no existing opens on this device */
sdp->sgdebug = 0;
q = sdp->device->request_queue;
sdp->sg_tablesize = queue_max_segments(q);
}
- if ((sfp = sg_add_sfp(sdp, dev)))
+ sfp = sg_add_sfp(sdp, dev);
+ if (!IS_ERR(sfp))
filp->private_data = sfp;
+ /* retval is already provably zero at this point because of the
+ * check after retval = scsi_autopm_get_device(sdp->device))
+ */
else {
+ retval = PTR_ERR(sfp);
+
if (flags & O_EXCL) {
- set_exclude(sdp, 0); /* undo if error */
- wake_up_interruptible(&sdp->o_excl_wait);
- }
- retval = -ENOMEM;
- goto error_out;
- }
- retval = 0;
+ sdp->exclude = 0; /* undo if error */
+ up_write(&sdp->o_sem);
+ } else
+ up_read(&sdp->o_sem);
error_out:
- if (retval) {
scsi_autopm_put_device(sdp->device);
sdp_put:
scsi_device_put(sdp->device);
@@ -358,13 +329,18 @@ sg_release(struct inode *inode, struct file *filp)
{
Sg_device *sdp;
Sg_fd *sfp;
+ int excl;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name));
- set_exclude(sdp, 0);
- wake_up_interruptible(&sdp->o_excl_wait);
+ excl = sdp->exclude;
+ sdp->exclude = 0;
+ if (excl)
+ up_write(&sdp->o_sem);
+ else
+ up_read(&sdp->o_sem);
scsi_autopm_put_device(sdp->device);
kref_put(&sfp->f_ref, sg_remove_sfp);
@@ -1415,8 +1391,9 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp)
disk->first_minor = k;
sdp->disk = disk;
sdp->device = scsidp;
+ spin_lock_init(&sdp->sfd_lock);
INIT_LIST_HEAD(&sdp->sfds);
- init_waitqueue_head(&sdp->o_excl_wait);
+ init_rwsem(&sdp->o_sem);
sdp->sg_tablesize = queue_max_segments(q);
sdp->index = k;
kref_init(&sdp->d_ref);
@@ -1549,11 +1526,13 @@ static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf)
/* Need a write lock to set sdp->detached. */
write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock(&sdp->sfd_lock);
sdp->detached = 1;
list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) {
wake_up_interruptible(&sfp->read_wait);
kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP);
}
+ spin_unlock(&sdp->sfd_lock);
write_unlock_irqrestore(&sg_index_lock, iflags);
sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic");
@@ -2064,7 +2043,7 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN);
if (!sfp)
- return NULL;
+ return ERR_PTR(-ENOMEM);
init_waitqueue_head(&sfp->read_wait);
rwlock_init(&sfp->rq_list_lock);
@@ -2078,9 +2057,13 @@ sg_add_sfp(Sg_device * sdp, int dev)
sfp->cmd_q = SG_DEF_COMMAND_Q;
sfp->keep_orphan = SG_DEF_KEEP_ORPHAN;
sfp->parentdp = sdp;
- write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock_irqsave(&sdp->sfd_lock, iflags);
+ if (sdp->detached) {
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
+ return ERR_PTR(-ENODEV);
+ }
list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
- write_unlock_irqrestore(&sg_index_lock, iflags);
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp));
if (unlikely(sg_big_buff != def_reserved_size))
sg_big_buff = def_reserved_size;
@@ -2130,10 +2113,9 @@ static void sg_remove_sfp(struct kref *kref)
struct sg_device *sdp = sfp->parentdp;
unsigned long iflags;
- write_lock_irqsave(&sg_index_lock, iflags);
+ spin_lock_irqsave(&sdp->sfd_lock, iflags);
list_del(&sfp->sfd_siblings);
- write_unlock_irqrestore(&sg_index_lock, iflags);
- wake_up_interruptible(&sdp->o_excl_wait);
+ spin_unlock_irqrestore(&sdp->sfd_lock, iflags);
INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext);
schedule_work(&sfp->ew.work);
@@ -2520,7 +2502,7 @@ static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v)
return 0;
}
-/* must be called while holding sg_index_lock */
+/* must be called while holding sg_index_lock and sfd_lock */
static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp)
{
int k, m, new_interface, blen, usg;
@@ -2605,22 +2587,26 @@ static int sg_proc_seq_show_debug(struct seq_file *s, void *v)
read_lock_irqsave(&sg_index_lock, iflags);
sdp = it ? sg_lookup_dev(it->index) : NULL;
- if (sdp && !list_empty(&sdp->sfds)) {
- struct scsi_device *scsidp = sdp->device;
+ if (sdp) {
+ spin_lock(&sdp->sfd_lock);
+ if (!list_empty(&sdp->sfds)) {
+ struct scsi_device *scsidp = sdp->device;
- seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
- if (sdp->detached)
- seq_printf(s, "detached pending close ");
- else
- seq_printf
- (s, "scsi%d chan=%d id=%d lun=%d em=%d",
- scsidp->host->host_no,
- scsidp->channel, scsidp->id,
- scsidp->lun,
- scsidp->host->hostt->emulated);
- seq_printf(s, " sg_tablesize=%d excl=%d\n",
- sdp->sg_tablesize, get_exclude(sdp));
- sg_proc_debug_helper(s, sdp);
+ seq_printf(s, " >>> device=%s ", sdp->disk->disk_name);
+ if (sdp->detached)
+ seq_printf(s, "detached pending close ");
+ else
+ seq_printf
+ (s, "scsi%d chan=%d id=%d lun=%d em=%d",
+ scsidp->host->host_no,
+ scsidp->channel, scsidp->id,
+ scsidp->lun,
+ scsidp->host->hostt->emulated);
+ seq_printf(s, " sg_tablesize=%d excl=%d\n",
+ sdp->sg_tablesize, sdp->exclude);
+ sg_proc_debug_helper(s, sdp);
+ }
+ spin_unlock(&sdp->sfd_lock);
}
read_unlock_irqrestore(&sg_index_lock, iflags);
return 0;
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index 2a32036a940..ff44b3c2cff 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -82,7 +82,7 @@ static int try_rdio = 1;
static int try_wdio = 1;
static struct class st_sysfs_class;
-static struct device_attribute st_dev_attrs[];
+static const struct attribute_group *st_dev_groups[];
MODULE_AUTHOR("Kai Makisara");
MODULE_DESCRIPTION("SCSI tape (st) driver");
@@ -4274,7 +4274,7 @@ static void scsi_tape_release(struct kref *kref)
static struct class st_sysfs_class = {
.name = "scsi_tape",
- .dev_attrs = st_dev_attrs,
+ .dev_groups = st_dev_groups,
};
static int __init init_st(void)
@@ -4408,6 +4408,7 @@ defined_show(struct device *dev, struct device_attribute *attr, char *buf)
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
return l;
}
+static DEVICE_ATTR_RO(defined);
static ssize_t
default_blksize_show(struct device *dev, struct device_attribute *attr,
@@ -4419,7 +4420,7 @@ default_blksize_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
return l;
}
-
+static DEVICE_ATTR_RO(default_blksize);
static ssize_t
default_density_show(struct device *dev, struct device_attribute *attr,
@@ -4433,6 +4434,7 @@ default_density_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, fmt, STm->default_density);
return l;
}
+static DEVICE_ATTR_RO(default_density);
static ssize_t
default_compression_show(struct device *dev, struct device_attribute *attr,
@@ -4444,6 +4446,7 @@ default_compression_show(struct device *dev, struct device_attribute *attr,
l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
return l;
}
+static DEVICE_ATTR_RO(default_compression);
static ssize_t
options_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -4472,15 +4475,17 @@ options_show(struct device *dev, struct device_attribute *attr, char *buf)
l = snprintf(buf, PAGE_SIZE, "0x%08x\n", options);
return l;
}
-
-static struct device_attribute st_dev_attrs[] = {
- __ATTR_RO(defined),
- __ATTR_RO(default_blksize),
- __ATTR_RO(default_density),
- __ATTR_RO(default_compression),
- __ATTR_RO(options),
- __ATTR_NULL,
+static DEVICE_ATTR_RO(options);
+
+static struct attribute *st_dev_attrs[] = {
+ &dev_attr_defined.attr,
+ &dev_attr_default_blksize.attr,
+ &dev_attr_default_density.attr,
+ &dev_attr_default_compression.attr,
+ &dev_attr_options.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(st_dev);
/* The following functions may be useful for a larger audience. */
static int sgl_map_user_pages(struct st_buffer *STbp,
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 83ec1aa8596..1a28f563279 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1879,7 +1879,6 @@ static void __exit storvsc_drv_exit(void)
}
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
module_init(storvsc_drv_init);
module_exit(storvsc_drv_exit);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 139bc0647b4..bce09a6898c 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -36,10 +36,17 @@
#ifndef _UFS_H
#define _UFS_H
+#include <linux/mutex.h>
+#include <linux/types.h>
+
#define MAX_CDB_SIZE 16
+#define GENERAL_UPIU_REQUEST_SIZE 32
+#define QUERY_DESC_MAX_SIZE 256
+#define QUERY_OSF_SIZE (GENERAL_UPIU_REQUEST_SIZE - \
+ (sizeof(struct utp_upiu_header)))
#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- ((byte3 << 24) | (byte2 << 16) |\
+ cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
(byte1 << 8) | (byte0))
/*
@@ -62,7 +69,7 @@ enum {
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
UPIU_TRANSACTION_TASK_REQ = 0x04,
- UPIU_TRANSACTION_QUERY_REQ = 0x26,
+ UPIU_TRANSACTION_QUERY_REQ = 0x16,
};
/* UTP UPIU Transaction Codes Target to Initiator */
@@ -73,6 +80,7 @@ enum {
UPIU_TRANSACTION_TASK_RSP = 0x24,
UPIU_TRANSACTION_READY_XFER = 0x31,
UPIU_TRANSACTION_QUERY_RSP = 0x36,
+ UPIU_TRANSACTION_REJECT_UPIU = 0x3F,
};
/* UPIU Read/Write flags */
@@ -90,8 +98,41 @@ enum {
UPIU_TASK_ATTR_ACA = 0x03,
};
-/* UTP QUERY Transaction Specific Fields OpCode */
+/* UPIU Query request function */
enum {
+ UPIU_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
+ UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST = 0x81,
+};
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+ QUERY_FLAG_IDN_FDEVICEINIT = 0x01,
+ QUERY_FLAG_IDN_BKOPS_EN = 0x04,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+ QUERY_ATTR_IDN_BKOPS_STATUS = 0x05,
+ QUERY_ATTR_IDN_EE_CONTROL = 0x0D,
+ QUERY_ATTR_IDN_EE_STATUS = 0x0E,
+};
+
+/* Exception event mask values */
+enum {
+ MASK_EE_STATUS = 0xFFFF,
+ MASK_EE_URGENT_BKOPS = (1 << 2),
+};
+
+/* Background operation status */
+enum {
+ BKOPS_STATUS_NO_OP = 0x0,
+ BKOPS_STATUS_NON_CRITICAL = 0x1,
+ BKOPS_STATUS_PERF_IMPACT = 0x2,
+ BKOPS_STATUS_CRITICAL = 0x3,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
UPIU_QUERY_OPCODE_NOP = 0x0,
UPIU_QUERY_OPCODE_READ_DESC = 0x1,
UPIU_QUERY_OPCODE_WRITE_DESC = 0x2,
@@ -103,6 +144,21 @@ enum {
UPIU_QUERY_OPCODE_TOGGLE_FLAG = 0x8,
};
+/* Query response result code */
+enum {
+ QUERY_RESULT_SUCCESS = 0x00,
+ QUERY_RESULT_NOT_READABLE = 0xF6,
+ QUERY_RESULT_NOT_WRITEABLE = 0xF7,
+ QUERY_RESULT_ALREADY_WRITTEN = 0xF8,
+ QUERY_RESULT_INVALID_LENGTH = 0xF9,
+ QUERY_RESULT_INVALID_VALUE = 0xFA,
+ QUERY_RESULT_INVALID_SELECTOR = 0xFB,
+ QUERY_RESULT_INVALID_INDEX = 0xFC,
+ QUERY_RESULT_INVALID_IDN = 0xFD,
+ QUERY_RESULT_INVALID_OPCODE = 0xFE,
+ QUERY_RESULT_GENERAL_FAILURE = 0xFF,
+};
+
/* UTP Transfer Request Command Type (CT) */
enum {
UPIU_COMMAND_SET_TYPE_SCSI = 0x0,
@@ -110,10 +166,18 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
+/* UTP Transfer Request Command Offset */
+#define UPIU_COMMAND_TYPE_OFFSET 28
+
+/* Offset of the response code in the UPIU header */
+#define UPIU_RSP_CODE_OFFSET 8
+
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_SCSI_STATUS = 0xFF,
+ MASK_TASK_RESPONSE = 0xFF00,
+ MASK_RSP_UPIU_RESULT = 0xFFFF,
+ MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
+ MASK_RSP_EXCEPTION_EVENT = 0x10000,
};
/* Task management service response */
@@ -138,26 +202,59 @@ struct utp_upiu_header {
/**
* struct utp_upiu_cmd - Command UPIU structure
- * @header: UPIU header structure DW-0 to DW-2
* @data_transfer_len: Data Transfer Length DW-3
* @cdb: Command Descriptor Block CDB DW-4 to DW-7
*/
struct utp_upiu_cmd {
- struct utp_upiu_header header;
u32 exp_data_transfer_len;
u8 cdb[MAX_CDB_SIZE];
};
/**
- * struct utp_upiu_rsp - Response UPIU structure
- * @header: UPIU header DW-0 to DW-2
+ * struct utp_upiu_query - upiu request buffer structure for
+ * query request.
+ * @opcode: command to perform B-0
+ * @idn: a value that indicates the particular type of data B-1
+ * @index: Index to further identify data B-2
+ * @selector: Index to further identify data B-3
+ * @reserved_osf: spec reserved field B-4,5
+ * @length: number of descriptor bytes to read/write B-6,7
+ * @value: Attribute value to be written DW-5
+ * @reserved: spec reserved DW-6,7
+ */
+struct utp_upiu_query {
+ u8 opcode;
+ u8 idn;
+ u8 index;
+ u8 selector;
+ u16 reserved_osf;
+ u16 length;
+ u32 value;
+ u32 reserved[2];
+};
+
+/**
+ * struct utp_upiu_req - general upiu request structure
+ * @header:UPIU header structure DW-0 to DW-2
+ * @sc: fields structure for scsi command DW-3 to DW-7
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_req {
+ struct utp_upiu_header header;
+ union {
+ struct utp_upiu_cmd sc;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
+ * struct utp_cmd_rsp - Response UPIU structure
* @residual_transfer_count: Residual transfer count DW-3
* @reserved: Reserved double words DW-4 to DW-7
* @sense_data_len: Sense data length DW-8 U16
* @sense_data: Sense data field DW-8 to DW-12
*/
-struct utp_upiu_rsp {
- struct utp_upiu_header header;
+struct utp_cmd_rsp {
u32 residual_transfer_count;
u32 reserved[4];
u16 sense_data_len;
@@ -165,6 +262,20 @@ struct utp_upiu_rsp {
};
/**
+ * struct utp_upiu_rsp - general upiu response structure
+ * @header: UPIU header structure DW-0 to DW-2
+ * @sr: fields structure for scsi command DW-3 to DW-12
+ * @qr: fields structure for query request DW-3 to DW-7
+ */
+struct utp_upiu_rsp {
+ struct utp_upiu_header header;
+ union {
+ struct utp_cmd_rsp sr;
+ struct utp_upiu_query qr;
+ };
+};
+
+/**
* struct utp_upiu_task_req - Task request UPIU structure
* @header - UPIU header structure DW0 to DW-2
* @input_param1: Input parameter 1 DW-3
@@ -194,4 +305,24 @@ struct utp_upiu_task_rsp {
u32 reserved[3];
};
+/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ u8 response;
+ struct utp_upiu_query upiu_res;
+};
+
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 48be39a6f6d..a823cf44e94 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -35,6 +35,7 @@
#include "ufshcd.h"
#include <linux/pci.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_PM
/**
@@ -44,7 +45,7 @@
*
* Returns -ENOSYS
*/
-static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+static int ufshcd_pci_suspend(struct device *dev)
{
/*
* TODO:
@@ -61,7 +62,7 @@ static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
*
* Returns -ENOSYS
*/
-static int ufshcd_pci_resume(struct pci_dev *pdev)
+static int ufshcd_pci_resume(struct device *dev)
{
/*
* TODO:
@@ -71,8 +72,45 @@ static int ufshcd_pci_resume(struct pci_dev *pdev)
return -ENOSYS;
}
+#else
+#define ufshcd_pci_suspend NULL
+#define ufshcd_pci_resume NULL
#endif /* CONFIG_PM */
+#ifdef CONFIG_PM_RUNTIME
+static int ufshcd_pci_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_suspend(hba);
+}
+static int ufshcd_pci_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_resume(hba);
+}
+static int ufshcd_pci_runtime_idle(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_idle(hba);
+}
+#else /* !CONFIG_PM_RUNTIME */
+#define ufshcd_pci_runtime_suspend NULL
+#define ufshcd_pci_runtime_resume NULL
+#define ufshcd_pci_runtime_idle NULL
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -91,12 +129,10 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
{
struct ufs_hba *hba = pci_get_drvdata(pdev);
- disable_irq(pdev->irq);
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
ufshcd_remove(hba);
- pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
}
/**
@@ -133,55 +169,49 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem *mmio_base;
int err;
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
- goto out_error;
+ dev_err(&pdev->dev, "pcim_enable_device failed\n");
+ return err;
}
pci_set_master(pdev);
-
- err = pci_request_regions(pdev, UFSHCD);
+ err = pcim_iomap_regions(pdev, 1 << 0, UFSHCD);
if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_disable;
+ dev_err(&pdev->dev, "request and iomap failed\n");
+ return err;
}
- mmio_base = pci_ioremap_bar(pdev, 0);
- if (!mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
+ mmio_base = pcim_iomap_table(pdev)[0];
err = ufshcd_set_dma_mask(pdev);
if (err) {
dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
+ return err;
}
err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
if (err) {
dev_err(&pdev->dev, "Initialization failed\n");
- goto out_iounmap;
+ return err;
}
pci_set_drvdata(pdev, hba);
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_allow(&pdev->dev);
return 0;
-
-out_iounmap:
- iounmap(mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-out_error:
- return err;
}
+static const struct dev_pm_ops ufshcd_pci_pm_ops = {
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+ .runtime_suspend = ufshcd_pci_runtime_suspend,
+ .runtime_resume = ufshcd_pci_runtime_resume,
+ .runtime_idle = ufshcd_pci_runtime_idle,
+};
+
static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
{ PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ } /* terminate list */
@@ -195,10 +225,9 @@ static struct pci_driver ufshcd_pci_driver = {
.probe = ufshcd_pci_probe,
.remove = ufshcd_pci_remove,
.shutdown = ufshcd_pci_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_pci_suspend,
- .resume = ufshcd_pci_resume,
-#endif
+ .driver = {
+ .pm = &ufshcd_pci_pm_ops
+ },
};
module_pci_driver(ufshcd_pci_driver);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index c42db40d4e5..5e462322542 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -34,6 +34,7 @@
*/
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include "ufshcd.h"
@@ -87,6 +88,40 @@ static int ufshcd_pltfrm_resume(struct device *dev)
#define ufshcd_pltfrm_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_suspend(hba);
+}
+static int ufshcd_pltfrm_runtime_resume(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_resume(hba);
+}
+static int ufshcd_pltfrm_runtime_idle(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ if (!hba)
+ return 0;
+
+ return ufshcd_runtime_idle(hba);
+}
+#else /* !CONFIG_PM_RUNTIME */
+#define ufshcd_pltfrm_runtime_suspend NULL
+#define ufshcd_pltfrm_runtime_resume NULL
+#define ufshcd_pltfrm_runtime_idle NULL
+#endif /* CONFIG_PM_RUNTIME */
+
/**
* ufshcd_pltfrm_probe - probe routine of the driver
* @pdev: pointer to Platform device handle
@@ -102,15 +137,8 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(dev, "Memory resource not available\n");
- err = -ENODEV;
- goto out;
- }
-
mmio_base = devm_ioremap_resource(dev, mem_res);
if (IS_ERR(mmio_base)) {
- dev_err(dev, "memory map failed\n");
err = PTR_ERR(mmio_base);
goto out;
}
@@ -122,14 +150,22 @@ static int ufshcd_pltfrm_probe(struct platform_device *pdev)
goto out;
}
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
err = ufshcd_init(dev, &hba, mmio_base, irq);
if (err) {
dev_err(dev, "Intialization failed\n");
- goto out;
+ goto out_disable_rpm;
}
platform_set_drvdata(pdev, hba);
+ return 0;
+
+out_disable_rpm:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
out:
return err;
}
@@ -144,7 +180,7 @@ static int ufshcd_pltfrm_remove(struct platform_device *pdev)
{
struct ufs_hba *hba = platform_get_drvdata(pdev);
- disable_irq(hba->irq);
+ pm_runtime_get_sync(&(pdev)->dev);
ufshcd_remove(hba);
return 0;
}
@@ -157,6 +193,9 @@ static const struct of_device_id ufs_of_match[] = {
static const struct dev_pm_ops ufshcd_dev_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
+ .runtime_suspend = ufshcd_pltfrm_runtime_suspend,
+ .runtime_resume = ufshcd_pltfrm_runtime_resume,
+ .runtime_idle = ufshcd_pltfrm_runtime_idle,
};
static struct platform_driver ufshcd_pltfrm_driver = {
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index b743bd6fce6..b36ca9a2dfb 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -43,6 +43,19 @@
/* UIC command timeout, unit: ms */
#define UIC_CMD_TIMEOUT 500
+/* NOP OUT retries waiting for NOP IN response */
+#define NOP_OUT_RETRIES 10
+/* Timeout after 30 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 30 /* msecs */
+
+/* Query request retries */
+#define QUERY_REQ_RETRIES 10
+/* Query request timeout */
+#define QUERY_REQ_TIMEOUT 30 /* msec */
+
+/* Expose the flag value from utp_upiu_query.value */
+#define MASK_QUERY_UPIU_FLAG_LOC 0xFF
+
enum {
UFSHCD_MAX_CHANNEL = 0,
UFSHCD_MAX_ID = 1,
@@ -71,6 +84,40 @@ enum {
INT_AGGR_CONFIG,
};
+/*
+ * ufshcd_wait_for_register - wait for register value to change
+ * @hba - per-adapter interface
+ * @reg - mmio register offset
+ * @mask - mask to apply to read register value
+ * @val - wait condition
+ * @interval_us - polling interval in microsecs
+ * @timeout_ms - timeout in millisecs
+ *
+ * Returns -ETIMEDOUT on error, zero on success
+ */
+static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
+ u32 val, unsigned long interval_us, unsigned long timeout_ms)
+{
+ int err = 0;
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+
+ /* ignore bits that we don't intend to wait on */
+ val = val & mask;
+
+ while ((ufshcd_readl(hba, reg) & mask) != val) {
+ /* wakeup within 50us of expiry */
+ usleep_range(interval_us, interval_us + 50);
+
+ if (time_after(jiffies, timeout)) {
+ if ((ufshcd_readl(hba, reg) & mask) != val)
+ err = -ETIMEDOUT;
+ break;
+ }
+ }
+
+ return err;
+}
+
/**
* ufshcd_get_intr_mask - Get the interrupt bit mask
* @hba - Pointer to adapter instance
@@ -191,18 +238,13 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
}
/**
- * ufshcd_is_valid_req_rsp - checks if controller TR response is valid
+ * ufshcd_get_req_rsp - returns the TR response transaction type
* @ucd_rsp_ptr: pointer to response UPIU
- *
- * This function checks the response UPIU for valid transaction type in
- * response field
- * Returns 0 on success, non-zero on failure
*/
static inline int
-ufshcd_is_valid_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
+ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
{
- return ((be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24) ==
- UPIU_TRANSACTION_RESPONSE) ? 0 : DID_ERROR << 16;
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
}
/**
@@ -219,6 +261,21 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
}
/**
+ * ufshcd_is_exception_event - Check if the device raised an exception event
+ * @ucd_rsp_ptr: pointer to response UPIU
+ *
+ * The function checks if the device raised an exception event indicated in
+ * the Device Information field of response UPIU.
+ *
+ * Returns true if exception is raised, false otherwise.
+ */
+static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
+{
+ return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
+ MASK_RSP_EXCEPTION_EVENT ? true : false;
+}
+
+/**
* ufshcd_config_int_aggr - Configure interrupt aggregation values.
* Currently there is no use case where we want to configure
* interrupt aggregation dynamically. So to configure interrupt
@@ -299,14 +356,68 @@ static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
{
int len;
if (lrbp->sense_buffer) {
- len = be16_to_cpu(lrbp->ucd_rsp_ptr->sense_data_len);
+ len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
memcpy(lrbp->sense_buffer,
- lrbp->ucd_rsp_ptr->sense_data,
+ lrbp->ucd_rsp_ptr->sr.sense_data,
min_t(int, len, SCSI_SENSE_BUFFERSIZE));
}
}
/**
+ * ufshcd_query_to_cpu() - formats the buffer to native cpu endian
+ * @response: upiu query response to convert
+ */
+static inline void ufshcd_query_to_cpu(struct utp_upiu_query *response)
+{
+ response->length = be16_to_cpu(response->length);
+ response->value = be32_to_cpu(response->value);
+}
+
+/**
+ * ufshcd_query_to_be() - formats the buffer to big endian
+ * @request: upiu query request to convert
+ */
+static inline void ufshcd_query_to_be(struct utp_upiu_query *request)
+{
+ request->length = cpu_to_be16(request->length);
+ request->value = cpu_to_be32(request->value);
+}
+
+/**
+ * ufshcd_copy_query_response() - Copy the Query Response and the data
+ * descriptor
+ * @hba: per adapter instance
+ * @lrb - pointer to local reference block
+ */
+static
+void ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
+
+ /* Get the UPIU response */
+ query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
+ UPIU_RSP_CODE_OFFSET;
+
+ memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
+ ufshcd_query_to_cpu(&query_res->upiu_res);
+
+
+ /* Get the descriptor */
+ if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
+ u8 *descp = (u8 *)&lrbp->ucd_rsp_ptr +
+ GENERAL_UPIU_REQUEST_SIZE;
+ u16 len;
+
+ /* data segment length */
+ len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
+ MASK_QUERY_DATA_SEG_LEN;
+
+ memcpy(hba->dev_cmd.query.descriptor, descp,
+ min_t(u16, len, QUERY_DESC_MAX_SIZE));
+ }
+}
+
+/**
* ufshcd_hba_capabilities - Read controller capabilities
* @hba: per adapter instance
*/
@@ -519,76 +630,170 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
}
/**
+ * ufshcd_prepare_req_desc_hdr() - Fills the requests header
+ * descriptor according to request
+ * @lrbp: pointer to local reference block
+ * @upiu_flags: flags required in the header
+ * @cmd_dir: requests data direction
+ */
+static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
+ u32 *upiu_flags, enum dma_data_direction cmd_dir)
+{
+ struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+ u32 data_direction;
+ u32 dword_0;
+
+ if (cmd_dir == DMA_FROM_DEVICE) {
+ data_direction = UTP_DEVICE_TO_HOST;
+ *upiu_flags = UPIU_CMD_FLAGS_READ;
+ } else if (cmd_dir == DMA_TO_DEVICE) {
+ data_direction = UTP_HOST_TO_DEVICE;
+ *upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ } else {
+ data_direction = UTP_NO_DATA_TRANSFER;
+ *upiu_flags = UPIU_CMD_FLAGS_NONE;
+ }
+
+ dword_0 = data_direction | (lrbp->command_type
+ << UPIU_COMMAND_TYPE_OFFSET);
+ if (lrbp->intr_cmd)
+ dword_0 |= UTP_REQ_DESC_INT_CMD;
+
+ /* Transfer request descriptor header fields */
+ req_desc->header.dword_0 = cpu_to_le32(dword_0);
+
+ /*
+ * assigning invalid value for command status. Controller
+ * updates OCS on command completion, with the command
+ * status
+ */
+ req_desc->header.dword_2 =
+ cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
+}
+
+/**
+ * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
+ * for scsi commands
+ * @lrbp - local reference block pointer
+ * @upiu_flags - flags
+ */
+static
+void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_COMMAND, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
+
+ /* Total EHS length and Data segment length will be zero */
+ ucd_req_ptr->header.dword_2 = 0;
+
+ ucd_req_ptr->sc.exp_data_transfer_len =
+ cpu_to_be32(lrbp->cmd->sdb.length);
+
+ memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
+ (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
+}
+
+/**
+ * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
+ * for query requsts
+ * @hba: UFS hba
+ * @lrbp: local reference block pointer
+ * @upiu_flags: flags
+ */
+static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, u32 upiu_flags)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+ struct ufs_query *query = &hba->dev_cmd.query;
+ u16 len = query->request.upiu_req.length;
+ u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
+
+ /* Query request header */
+ ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
+ lrbp->lun, lrbp->task_tag);
+ ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
+ 0, query->request.query_func, 0, 0);
+
+ /* Data segment length */
+ ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
+ 0, 0, len >> 8, (u8)len);
+
+ /* Copy the Query Request buffer as is */
+ memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
+ QUERY_OSF_SIZE);
+ ufshcd_query_to_be(&ucd_req_ptr->qr);
+
+ /* Copy the Descriptor */
+ if ((len > 0) && (query->request.upiu_req.opcode ==
+ UPIU_QUERY_OPCODE_WRITE_DESC)) {
+ memcpy(descp, query->descriptor,
+ min_t(u16, len, QUERY_DESC_MAX_SIZE));
+ }
+}
+
+static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
+{
+ struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
+
+ memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
+
+ /* command descriptor fields */
+ ucd_req_ptr->header.dword_0 =
+ UPIU_HEADER_DWORD(
+ UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
+}
+
+/**
* ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
+ * @hba - per adapter instance
* @lrb - pointer to local reference block
*/
-static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp)
+static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
- struct utp_transfer_req_desc *req_desc;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- u32 data_direction;
u32 upiu_flags;
-
- ucd_cmd_ptr = lrbp->ucd_cmd_ptr;
- req_desc = lrbp->utr_descriptor_ptr;
+ int ret = 0;
switch (lrbp->command_type) {
case UTP_CMD_TYPE_SCSI:
- if (lrbp->cmd->sc_data_direction == DMA_FROM_DEVICE) {
- data_direction = UTP_DEVICE_TO_HOST;
- upiu_flags = UPIU_CMD_FLAGS_READ;
- } else if (lrbp->cmd->sc_data_direction == DMA_TO_DEVICE) {
- data_direction = UTP_HOST_TO_DEVICE;
- upiu_flags = UPIU_CMD_FLAGS_WRITE;
+ if (likely(lrbp->cmd)) {
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
+ lrbp->cmd->sc_data_direction);
+ ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
} else {
- data_direction = UTP_NO_DATA_TRANSFER;
- upiu_flags = UPIU_CMD_FLAGS_NONE;
+ ret = -EINVAL;
}
-
- /* Transfer request descriptor header fields */
- req_desc->header.dword_0 =
- cpu_to_le32(data_direction | UTP_SCSI_COMMAND);
-
- /*
- * assigning invalid value for command status. Controller
- * updates OCS on command completion, with the command
- * status
- */
- req_desc->header.dword_2 =
- cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
-
- /* command descriptor fields */
- ucd_cmd_ptr->header.dword_0 =
- cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_COMMAND,
- upiu_flags,
- lrbp->lun,
- lrbp->task_tag));
- ucd_cmd_ptr->header.dword_1 =
- cpu_to_be32(
- UPIU_HEADER_DWORD(UPIU_COMMAND_SET_TYPE_SCSI,
- 0,
- 0,
- 0));
-
- /* Total EHS length and Data segment length will be zero */
- ucd_cmd_ptr->header.dword_2 = 0;
-
- ucd_cmd_ptr->exp_data_transfer_len =
- cpu_to_be32(lrbp->cmd->sdb.length);
-
- memcpy(ucd_cmd_ptr->cdb,
- lrbp->cmd->cmnd,
- (min_t(unsigned short,
- lrbp->cmd->cmd_len,
- MAX_CDB_SIZE)));
break;
case UTP_CMD_TYPE_DEV_MANAGE:
- /* For query function implementation */
+ ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
+ if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+ ufshcd_prepare_utp_query_req_upiu(
+ hba, lrbp, upiu_flags);
+ else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+ ufshcd_prepare_utp_nop_upiu(lrbp);
+ else
+ ret = -EINVAL;
break;
case UTP_CMD_TYPE_UFS:
/* For UFS native command implementation */
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: UFS native command are not supported\n",
+ __func__);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
+ __func__, lrbp->command_type);
break;
} /* end of switch */
+
+ return ret;
}
/**
@@ -615,21 +820,37 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto out;
}
+ /* acquire the tag to make sure device cmds don't use it */
+ if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
+ /*
+ * Dev manage command in progress, requeue the command.
+ * Requeuing the command helps in cases where the request *may*
+ * find different tag instead of waiting for dev manage command
+ * completion.
+ */
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out;
+ }
+
lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
lrbp->cmd = cmd;
lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
lrbp->sense_buffer = cmd->sense_buffer;
lrbp->task_tag = tag;
lrbp->lun = cmd->device->lun;
-
+ lrbp->intr_cmd = false;
lrbp->command_type = UTP_CMD_TYPE_SCSI;
/* form UPIU before issuing the command */
- ufshcd_compose_upiu(lrbp);
+ ufshcd_compose_upiu(hba, lrbp);
err = ufshcd_map_sg(lrbp);
- if (err)
+ if (err) {
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
goto out;
+ }
/* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -639,6 +860,338 @@ out:
return err;
}
+static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
+{
+ lrbp->cmd = NULL;
+ lrbp->sense_bufflen = 0;
+ lrbp->sense_buffer = NULL;
+ lrbp->task_tag = tag;
+ lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+ lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
+ lrbp->intr_cmd = true; /* No interrupt aggregation */
+ hba->dev_cmd.type = cmd_type;
+
+ return ufshcd_compose_upiu(hba, lrbp);
+}
+
+static int
+ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
+{
+ int err = 0;
+ unsigned long flags;
+ u32 mask = 1 << tag;
+
+ /* clear outstanding transaction before retry */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_utrl_clear(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ /*
+ * wait for for h/w to clear corresponding bit in door-bell.
+ * max. wait is 1 sec.
+ */
+ err = ufshcd_wait_for_register(hba,
+ REG_UTP_TRANSFER_REQ_DOOR_BELL,
+ mask, ~mask, 1000, 1000);
+
+ return err;
+}
+
+/**
+ * ufshcd_dev_cmd_completion() - handles device management command responses
+ * @hba: per adapter instance
+ * @lrbp: pointer to local reference block
+ */
+static int
+ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+ int resp;
+ int err = 0;
+
+ resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
+
+ switch (resp) {
+ case UPIU_TRANSACTION_NOP_IN:
+ if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: unexpected response %x\n",
+ __func__, resp);
+ }
+ break;
+ case UPIU_TRANSACTION_QUERY_RSP:
+ ufshcd_copy_query_response(hba, lrbp);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ err = -EPERM;
+ dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
+ __func__);
+ break;
+ default:
+ err = -EINVAL;
+ dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
+ __func__, resp);
+ break;
+ }
+
+ return err;
+}
+
+static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, int max_timeout)
+{
+ int err = 0;
+ unsigned long time_left;
+ unsigned long flags;
+
+ time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
+ msecs_to_jiffies(max_timeout));
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->dev_cmd.complete = NULL;
+ if (likely(time_left)) {
+ err = ufshcd_get_tr_ocs(lrbp);
+ if (!err)
+ err = ufshcd_dev_cmd_completion(hba, lrbp);
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ if (!time_left) {
+ err = -ETIMEDOUT;
+ if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
+ /* sucessfully cleared the command, retry if needed */
+ err = -EAGAIN;
+ }
+
+ return err;
+}
+
+/**
+ * ufshcd_get_dev_cmd_tag - Get device management command tag
+ * @hba: per-adapter instance
+ * @tag: pointer to variable with available slot value
+ *
+ * Get a free slot and lock it until device management command
+ * completes.
+ *
+ * Returns false if free slot is unavailable for locking, else
+ * return true with tag value in @tag.
+ */
+static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
+{
+ int tag;
+ bool ret = false;
+ unsigned long tmp;
+
+ if (!tag_out)
+ goto out;
+
+ do {
+ tmp = ~hba->lrb_in_use;
+ tag = find_last_bit(&tmp, hba->nutrs);
+ if (tag >= hba->nutrs)
+ goto out;
+ } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
+
+ *tag_out = tag;
+ ret = true;
+out:
+ return ret;
+}
+
+static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
+{
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+}
+
+/**
+ * ufshcd_exec_dev_cmd - API for sending device management requests
+ * @hba - UFS hba
+ * @cmd_type - specifies the type (NOP, Query...)
+ * @timeout - time in seconds
+ *
+ * NOTE: Since there is only one available tag for device management commands,
+ * it is expected you hold the hba->dev_cmd.lock mutex.
+ */
+static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
+ enum dev_cmd_type cmd_type, int timeout)
+{
+ struct ufshcd_lrb *lrbp;
+ int err;
+ int tag;
+ struct completion wait;
+ unsigned long flags;
+
+ /*
+ * Get free slot, sleep if slots are unavailable.
+ * Even though we use wait_event() which sleeps indefinitely,
+ * the maximum wait time is bounded by SCSI request timeout.
+ */
+ wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
+
+ init_completion(&wait);
+ lrbp = &hba->lrb[tag];
+ WARN_ON(lrbp->cmd);
+ err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
+ if (unlikely(err))
+ goto out_put_tag;
+
+ hba->dev_cmd.complete = &wait;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_send_command(hba, tag);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
+
+out_put_tag:
+ ufshcd_put_dev_cmd_tag(hba, tag);
+ wake_up(&hba->dev_cmd.tag_wq);
+ return err;
+}
+
+/**
+ * ufshcd_query_flag() - API function for sending flag query requests
+ * hba: per-adapter instance
+ * query_opcode: flag query to perform
+ * idn: flag idn to access
+ * flag_res: the flag value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+ */
+static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
+ enum flag_idn idn, bool *flag_res)
+{
+ struct ufs_query_req *request;
+ struct ufs_query_res *response;
+ int err;
+
+ BUG_ON(!hba);
+
+ mutex_lock(&hba->dev_cmd.lock);
+ request = &hba->dev_cmd.query.request;
+ response = &hba->dev_cmd.query.response;
+ memset(request, 0, sizeof(struct ufs_query_req));
+ memset(response, 0, sizeof(struct ufs_query_res));
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_SET_FLAG:
+ case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+ case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ break;
+ case UPIU_QUERY_OPCODE_READ_FLAG:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ if (!flag_res) {
+ /* No dummy reads */
+ dev_err(hba->dev, "%s: Invalid argument for read request\n",
+ __func__);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ break;
+ default:
+ dev_err(hba->dev,
+ "%s: Expected query flag opcode but got = %d\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ request->upiu_req.opcode = opcode;
+ request->upiu_req.idn = idn;
+
+ /* Send query request */
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
+ QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev,
+ "%s: Sending flag query for idn %d failed, err = %d\n",
+ __func__, idn, err);
+ goto out_unlock;
+ }
+
+ if (flag_res)
+ *flag_res = (response->upiu_res.value &
+ MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+ return err;
+}
+
+/**
+ * ufshcd_query_attr - API function for sending attribute requests
+ * hba: per-adapter instance
+ * opcode: attribute opcode
+ * idn: attribute idn to access
+ * index: index field
+ * selector: selector field
+ * attr_val: the attribute value after the query request completes
+ *
+ * Returns 0 for success, non-zero in case of failure
+*/
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+ enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
+{
+ struct ufs_query_req *request;
+ struct ufs_query_res *response;
+ int err;
+
+ BUG_ON(!hba);
+
+ if (!attr_val) {
+ dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out;
+ }
+
+ mutex_lock(&hba->dev_cmd.lock);
+ request = &hba->dev_cmd.query.request;
+ response = &hba->dev_cmd.query.response;
+ memset(request, 0, sizeof(struct ufs_query_req));
+ memset(response, 0, sizeof(struct ufs_query_res));
+
+ switch (opcode) {
+ case UPIU_QUERY_OPCODE_WRITE_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
+ request->upiu_req.value = *attr_val;
+ break;
+ case UPIU_QUERY_OPCODE_READ_ATTR:
+ request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
+ break;
+ default:
+ dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
+ __func__, opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
+ request->upiu_req.opcode = opcode;
+ request->upiu_req.idn = idn;
+ request->upiu_req.index = index;
+ request->upiu_req.selector = selector;
+
+ /* Send query request */
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY,
+ QUERY_REQ_TIMEOUT);
+
+ if (err) {
+ dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
+ __func__, opcode, idn, err);
+ goto out_unlock;
+ }
+
+ *attr_val = response->upiu_res.value;
+
+out_unlock:
+ mutex_unlock(&hba->dev_cmd.lock);
+out:
+ return err;
+}
+
/**
* ufshcd_memory_alloc - allocate memory for host memory space data structures
* @hba: per adapter instance
@@ -774,8 +1327,8 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba)
cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
- hba->lrb[i].ucd_cmd_ptr =
- (struct utp_upiu_cmd *)(cmd_descp + i);
+ hba->lrb[i].ucd_req_ptr =
+ (struct utp_upiu_req *)(cmd_descp + i);
hba->lrb[i].ucd_rsp_ptr =
(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
hba->lrb[i].ucd_prdt_ptr =
@@ -809,6 +1362,57 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
}
/**
+ * ufshcd_complete_dev_init() - checks device readiness
+ * hba: per-adapter instance
+ *
+ * Set fDeviceInit flag and poll until device toggles it.
+ */
+static int ufshcd_complete_dev_init(struct ufs_hba *hba)
+{
+ int i, retries, err = 0;
+ bool flag_res = 1;
+
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ /* Set the fDeviceInit flag */
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, NULL);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ if (err) {
+ dev_err(hba->dev,
+ "%s setting fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ /* poll for max. 100 iterations for fDeviceInit flag to clear */
+ for (i = 0; i < 100 && !err && flag_res; i++) {
+ for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
+ err = ufshcd_query_flag(hba,
+ UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
+ if (!err || err == -ETIMEDOUT)
+ break;
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
+ err);
+ }
+ }
+ if (err)
+ dev_err(hba->dev,
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ else if (flag_res)
+ dev_err(hba->dev,
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+
+out:
+ return err;
+}
+
+/**
* ufshcd_make_hba_operational - Make UFS controller operational
* @hba: per adapter instance
*
@@ -961,6 +1565,38 @@ out:
}
/**
+ * ufshcd_verify_dev_init() - Verify device initialization
+ * @hba: per-adapter instance
+ *
+ * Send NOP OUT UPIU and wait for NOP IN response to check whether the
+ * device Transport Protocol (UTP) layer is ready after a reset.
+ * If the UTP layer at the device side is not initialized, it may
+ * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
+ * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
+ */
+static int ufshcd_verify_dev_init(struct ufs_hba *hba)
+{
+ int err = 0;
+ int retries;
+
+ mutex_lock(&hba->dev_cmd.lock);
+ for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
+ err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
+ NOP_OUT_TIMEOUT);
+
+ if (!err || err == -ETIMEDOUT)
+ break;
+
+ dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
+ }
+ mutex_unlock(&hba->dev_cmd.lock);
+
+ if (err)
+ dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
+ return err;
+}
+
+/**
* ufshcd_do_reset - reset the host controller
* @hba: per adapter instance
*
@@ -986,13 +1622,20 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
for (tag = 0; tag < hba->nutrs; tag++) {
if (test_bit(tag, &hba->outstanding_reqs)) {
lrbp = &hba->lrb[tag];
- scsi_dma_unmap(lrbp->cmd);
- lrbp->cmd->result = DID_RESET << 16;
- lrbp->cmd->scsi_done(lrbp->cmd);
- lrbp->cmd = NULL;
+ if (lrbp->cmd) {
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd->result = DID_RESET << 16;
+ lrbp->cmd->scsi_done(lrbp->cmd);
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ }
}
}
+ /* complete device management command */
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
+
/* clear outstanding request/task bit maps */
hba->outstanding_reqs = 0;
hba->outstanding_tasks = 0;
@@ -1199,27 +1842,39 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
switch (ocs) {
case OCS_SUCCESS:
+ result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
- /* check if the returned transfer response is valid */
- result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
- if (result) {
+ switch (result) {
+ case UPIU_TRANSACTION_RESPONSE:
+ /*
+ * get the response UPIU result to extract
+ * the SCSI command status
+ */
+ result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
+
+ /*
+ * get the result based on SCSI status response
+ * to notify the SCSI midlayer of the command status
+ */
+ scsi_status = result & MASK_SCSI_STATUS;
+ result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
+
+ if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
+ schedule_work(&hba->eeh_work);
+ break;
+ case UPIU_TRANSACTION_REJECT_UPIU:
+ /* TODO: handle Reject UPIU Response */
+ result = DID_ERROR << 16;
+ dev_err(hba->dev,
+ "Reject UPIU not fully implemented\n");
+ break;
+ default:
+ result = DID_ERROR << 16;
dev_err(hba->dev,
- "Invalid response = %x\n", result);
+ "Unexpected request response code = %x\n",
+ result);
break;
}
-
- /*
- * get the response UPIU result to extract
- * the SCSI command status
- */
- result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
-
- /*
- * get the result based on SCSI status response
- * to notify the SCSI midlayer of the command status
- */
- scsi_status = result & MASK_SCSI_STATUS;
- result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
break;
case OCS_ABORTED:
result |= DID_ABORT << 16;
@@ -1259,28 +1914,40 @@ static void ufshcd_uic_cmd_compl(struct ufs_hba *hba)
*/
static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
{
- struct ufshcd_lrb *lrb;
+ struct ufshcd_lrb *lrbp;
+ struct scsi_cmnd *cmd;
unsigned long completed_reqs;
u32 tr_doorbell;
int result;
int index;
+ bool int_aggr_reset = false;
- lrb = hba->lrb;
tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
for (index = 0; index < hba->nutrs; index++) {
if (test_bit(index, &completed_reqs)) {
+ lrbp = &hba->lrb[index];
+ cmd = lrbp->cmd;
+ /*
+ * Don't skip resetting interrupt aggregation counters
+ * if a regular command is present.
+ */
+ int_aggr_reset |= !lrbp->intr_cmd;
- result = ufshcd_transfer_rsp_status(hba, &lrb[index]);
-
- if (lrb[index].cmd) {
- scsi_dma_unmap(lrb[index].cmd);
- lrb[index].cmd->result = result;
- lrb[index].cmd->scsi_done(lrb[index].cmd);
-
+ if (cmd) {
+ result = ufshcd_transfer_rsp_status(hba, lrbp);
+ scsi_dma_unmap(cmd);
+ cmd->result = result;
/* Mark completed command as NULL in LRB */
- lrb[index].cmd = NULL;
+ lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
+ /* Do not touch lrbp after scsi done */
+ cmd->scsi_done(cmd);
+ } else if (lrbp->command_type ==
+ UTP_CMD_TYPE_DEV_MANAGE) {
+ if (hba->dev_cmd.complete)
+ complete(hba->dev_cmd.complete);
}
} /* end of if */
} /* end of for */
@@ -1288,8 +1955,238 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
/* clear corresponding bits of completed commands */
hba->outstanding_reqs ^= completed_reqs;
+ /* we might have free'd some tags above */
+ wake_up(&hba->dev_cmd.tag_wq);
+
/* Reset interrupt aggregation counters */
- ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+ if (int_aggr_reset)
+ ufshcd_config_int_aggr(hba, INT_AGGR_RESET);
+}
+
+/**
+ * ufshcd_disable_ee - disable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to disable
+ *
+ * Disables exception event in the device so that the EVENT_ALERT
+ * bit is not set.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (!(hba->ee_ctrl_mask & mask))
+ goto out;
+
+ val = hba->ee_ctrl_mask & ~mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask &= ~mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_ee - enable exception event
+ * @hba: per-adapter instance
+ * @mask: exception event to enable
+ *
+ * Enable corresponding exception event in the device to allow
+ * device to alert host in critical scenarios.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
+{
+ int err = 0;
+ u32 val;
+
+ if (hba->ee_ctrl_mask & mask)
+ goto out;
+
+ val = hba->ee_ctrl_mask | mask;
+ val &= 0xFFFF; /* 2 bytes */
+ err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+ QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
+ if (!err)
+ hba->ee_ctrl_mask |= mask;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_enable_auto_bkops - Allow device managed BKOPS
+ * @hba: per-adapter instance
+ *
+ * Allow device to manage background operations on its own. Enabling
+ * this might lead to inconsistent latencies during normal data transfers
+ * as the device is allowed to manage its own way of handling background
+ * operations.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (hba->auto_bkops_enabled)
+ goto out;
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable bkops %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = true;
+
+ /* No need of URGENT_BKOPS exception from the device */
+ err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err)
+ dev_err(hba->dev, "%s: failed to disable exception event %d\n",
+ __func__, err);
+out:
+ return err;
+}
+
+/**
+ * ufshcd_disable_auto_bkops - block device in doing background operations
+ * @hba: per-adapter instance
+ *
+ * Disabling background operations improves command response latency but
+ * has drawback of device moving into critical state where the device is
+ * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
+ * host is idle so that BKOPS are managed effectively without any negative
+ * impacts.
+ *
+ * Returns zero on success, non-zero on failure.
+ */
+static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
+{
+ int err = 0;
+
+ if (!hba->auto_bkops_enabled)
+ goto out;
+
+ /*
+ * If host assisted BKOPs is to be enabled, make sure
+ * urgent bkops exception is allowed.
+ */
+ err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to enable exception event %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
+ QUERY_FLAG_IDN_BKOPS_EN, NULL);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to disable bkops %d\n",
+ __func__, err);
+ ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
+ goto out;
+ }
+
+ hba->auto_bkops_enabled = false;
+out:
+ return err;
+}
+
+/**
+ * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * @hba: per adapter instance
+ *
+ * After a device reset the device may toggle the BKOPS_EN flag
+ * to default value. The s/w tracking variables should be updated
+ * as well. Do this by forcing enable of auto bkops.
+ */
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+{
+ hba->auto_bkops_enabled = false;
+ hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+ ufshcd_enable_auto_bkops(hba);
+}
+
+static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_urgent_bkops - handle urgent bkops exception event
+ * @hba: per-adapter instance
+ *
+ * Enable fBackgroundOpsEn flag in the device to permit background
+ * operations.
+ */
+static int ufshcd_urgent_bkops(struct ufs_hba *hba)
+{
+ int err;
+ u32 status = 0;
+
+ err = ufshcd_get_bkops_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status = status & 0xF;
+
+ /* handle only if status indicates performance impact or critical */
+ if (status >= BKOPS_STATUS_PERF_IMPACT)
+ err = ufshcd_enable_auto_bkops(hba);
+out:
+ return err;
+}
+
+static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
+{
+ return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+ QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
+}
+
+/**
+ * ufshcd_exception_event_handler - handle exceptions raised by device
+ * @work: pointer to work data
+ *
+ * Read bExceptionEventStatus attribute from the device and handle the
+ * exception event accordingly.
+ */
+static void ufshcd_exception_event_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int err;
+ u32 status = 0;
+ hba = container_of(work, struct ufs_hba, eeh_work);
+
+ pm_runtime_get_sync(hba->dev);
+ err = ufshcd_get_ee_status(hba, &status);
+ if (err) {
+ dev_err(hba->dev, "%s: failed to get exception status %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ status &= hba->ee_ctrl_mask;
+ if (status & MASK_EE_URGENT_BKOPS) {
+ err = ufshcd_urgent_bkops(hba);
+ if (err)
+ dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
+ __func__, err);
+ }
+out:
+ pm_runtime_put_sync(hba->dev);
+ return;
}
/**
@@ -1301,9 +2198,11 @@ static void ufshcd_fatal_err_handler(struct work_struct *work)
struct ufs_hba *hba;
hba = container_of(work, struct ufs_hba, feh_workq);
+ pm_runtime_get_sync(hba->dev);
/* check if reset is already in progress */
if (hba->ufshcd_state != UFSHCD_STATE_RESET)
ufshcd_do_reset(hba);
+ pm_runtime_put_sync(hba->dev);
}
/**
@@ -1432,10 +2331,10 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
task_req_upiup =
(struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
task_req_upiup->header.dword_0 =
- cpu_to_be32(UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
- lrbp->lun, lrbp->task_tag));
+ UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
+ lrbp->lun, lrbp->task_tag);
task_req_upiup->header.dword_1 =
- cpu_to_be32(UPIU_HEADER_DWORD(0, tm_function, 0, 0));
+ UPIU_HEADER_DWORD(0, tm_function, 0, 0);
task_req_upiup->input_param1 = lrbp->lun;
task_req_upiup->input_param1 =
@@ -1502,9 +2401,11 @@ static int ufshcd_device_reset(struct scsi_cmnd *cmd)
if (hba->lrb[pos].cmd) {
scsi_dma_unmap(hba->lrb[pos].cmd);
hba->lrb[pos].cmd->result =
- DID_ABORT << 16;
+ DID_ABORT << 16;
hba->lrb[pos].cmd->scsi_done(cmd);
hba->lrb[pos].cmd = NULL;
+ clear_bit_unlock(pos, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
}
}
} /* end of for */
@@ -1572,6 +2473,9 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
__clear_bit(tag, &hba->outstanding_reqs);
hba->lrb[tag].cmd = NULL;
spin_unlock_irqrestore(host->host_lock, flags);
+
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ wake_up(&hba->dev_cmd.tag_wq);
out:
return err;
}
@@ -1587,8 +2491,22 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
int ret;
ret = ufshcd_link_startup(hba);
- if (!ret)
- scsi_scan_host(hba->host);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_verify_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_complete_dev_init(hba);
+ if (ret)
+ goto out;
+
+ ufshcd_force_reset_auto_bkops(hba);
+ scsi_scan_host(hba->host);
+ pm_runtime_put_sync(hba->dev);
+out:
+ return;
}
static struct scsi_host_template ufshcd_driver_template = {
@@ -1650,6 +2568,34 @@ int ufshcd_resume(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_resume);
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ /*
+ * The device is idle with no requests in the queue,
+ * allow background operations.
+ */
+ return ufshcd_enable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_suspend);
+
+int ufshcd_runtime_resume(struct ufs_hba *hba)
+{
+ if (!hba)
+ return 0;
+
+ return ufshcd_disable_auto_bkops(hba);
+}
+EXPORT_SYMBOL(ufshcd_runtime_resume);
+
+int ufshcd_runtime_idle(struct ufs_hba *hba)
+{
+ return 0;
+}
+EXPORT_SYMBOL(ufshcd_runtime_idle);
+
/**
* ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
@@ -1657,11 +2603,11 @@ EXPORT_SYMBOL_GPL(ufshcd_resume);
*/
void ufshcd_remove(struct ufs_hba *hba)
{
+ scsi_remove_host(hba->host);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
- scsi_remove_host(hba->host);
scsi_host_put(hba->host);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -1740,10 +2686,17 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
/* Initialize work queues */
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
+ INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
+ /* Initialize mutex for device management commands */
+ mutex_init(&hba->dev_cmd.lock);
+
+ /* Initialize device management tag acquire wait queue */
+ init_waitqueue_head(&hba->dev_cmd.tag_wq);
+
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
@@ -1773,6 +2726,9 @@ int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
*hba_handle = hba;
+ /* Hold auto suspend until async scan completes */
+ pm_runtime_get_sync(dev);
+
async_schedule(ufshcd_async_scan, hba);
return 0;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 49590ee07ac..59c9c4848be 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -68,6 +68,11 @@
#define UFSHCD "ufshcd"
#define UFSHCD_DRIVER_VERSION "0.2"
+enum dev_cmd_type {
+ DEV_CMD_TYPE_NOP = 0x0,
+ DEV_CMD_TYPE_QUERY = 0x1,
+};
+
/**
* struct uic_command - UIC command structure
* @command: UIC command
@@ -91,7 +96,7 @@ struct uic_command {
/**
* struct ufshcd_lrb - local reference block
* @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_req_ptr: UCD address of the command
* @ucd_rsp_ptr: Response UPIU address for this command
* @ucd_prdt_ptr: PRDT address of the command
* @cmd: pointer to SCSI command
@@ -101,10 +106,11 @@ struct uic_command {
* @command_type: SCSI, UFS, Query.
* @task_tag: Task tag of the command
* @lun: LUN of the command
+ * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
*/
struct ufshcd_lrb {
struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_req *ucd_req_ptr;
struct utp_upiu_rsp *ucd_rsp_ptr;
struct ufshcd_sg_entry *ucd_prdt_ptr;
@@ -116,8 +122,35 @@ struct ufshcd_lrb {
int command_type;
int task_tag;
unsigned int lun;
+ bool intr_cmd;
};
+/**
+ * struct ufs_query - holds relevent data structures for query request
+ * @request: request upiu and function
+ * @descriptor: buffer for sending/receiving descriptor
+ * @response: response upiu and response
+ */
+struct ufs_query {
+ struct ufs_query_req request;
+ u8 *descriptor;
+ struct ufs_query_res response;
+};
+
+/**
+ * struct ufs_dev_cmd - all assosiated fields with device management commands
+ * @type: device management command type - Query, NOP OUT
+ * @lock: lock to allow one command at a time
+ * @complete: internal commands completion
+ * @tag_wq: wait queue until free command slot is available
+ */
+struct ufs_dev_cmd {
+ enum dev_cmd_type type;
+ struct mutex lock;
+ struct completion *complete;
+ wait_queue_head_t tag_wq;
+ struct ufs_query query;
+};
/**
* struct ufs_hba - per adapter private structure
@@ -131,6 +164,7 @@ struct ufshcd_lrb {
* @host: Scsi_Host instance of the driver
* @dev: device handle
* @lrb: local reference block
+ * @lrb_in_use: lrb in use
* @outstanding_tasks: Bits representing outstanding task requests
* @outstanding_reqs: Bits representing outstanding transfer requests
* @capabilities: UFS Controller Capabilities
@@ -144,8 +178,12 @@ struct ufshcd_lrb {
* @tm_condition: condition variable for task management
* @ufshcd_state: UFSHCD states
* @intr_mask: Interrupt Mask Bits
+ * @ee_ctrl_mask: Exception event control mask
* @feh_workq: Work queue for fatal controller error handling
+ * @eeh_work: Worker to handle exception events
* @errors: HBA errors
+ * @dev_cmd: ufs device management command information
+ * @auto_bkops_enabled: to track whether bkops is enabled in device
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -164,6 +202,7 @@ struct ufs_hba {
struct device *dev;
struct ufshcd_lrb *lrb;
+ unsigned long lrb_in_use;
unsigned long outstanding_tasks;
unsigned long outstanding_reqs;
@@ -182,12 +221,19 @@ struct ufs_hba {
u32 ufshcd_state;
u32 intr_mask;
+ u16 ee_ctrl_mask;
/* Work Queues */
struct work_struct feh_workq;
+ struct work_struct eeh_work;
/* HBA Errors */
u32 errors;
+
+ /* Device management request data */
+ struct ufs_dev_cmd dev_cmd;
+
+ bool auto_bkops_enabled;
};
#define ufshcd_writel(hba, val, reg) \
@@ -208,4 +254,13 @@ static inline void ufshcd_hba_stop(struct ufs_hba *hba)
ufshcd_writel(hba, CONTROLLER_DISABLE, REG_CONTROLLER_ENABLE);
}
+static inline void check_upiu_size(void)
+{
+ BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
+ GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
+}
+
+extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
+extern int ufshcd_runtime_resume(struct ufs_hba *hba);
+extern int ufshcd_runtime_idle(struct ufs_hba *hba);
#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index d5c5f1482d7..f1e1b745910 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -39,7 +39,7 @@
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
TASK_RSP_UPIU_SIZE_DWORDS = 8,
- ALIGNED_UPIU_SIZE = 128,
+ ALIGNED_UPIU_SIZE = 512,
};
/* UFSHCI Registers */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 2168258fb2c..74b88efde6a 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
vscsi->affinity_hint_set = true;
} else {
- for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++)
+ for (i = 0; i < vscsi->num_queues; i++)
virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
vscsi->affinity_hint_set = false;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 89cbbabaff4..0170d4c4a8a 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -70,14 +70,14 @@ config SPI_ATH79
config SPI_ATMEL
tristate "Atmel SPI Controller"
- depends on (ARCH_AT91 || AVR32)
+ depends on (ARCH_AT91 || AVR32 || COMPILE_TEST)
help
This selects a driver for the Atmel SPI Controller, present on
many AT32 (AVR32) and AT91 (ARM) chips.
config SPI_BCM2835
tristate "BCM2835 SPI controller"
- depends on ARCH_BCM2835
+ depends on ARCH_BCM2835 || COMPILE_TEST
help
This selects a driver for the Broadcom BCM2835 SPI master.
@@ -88,10 +88,17 @@ config SPI_BCM2835
config SPI_BFIN5XX
tristate "SPI controller driver for ADI Blackfin5xx"
- depends on BLACKFIN
+ depends on BLACKFIN && !BF60x
help
This is the SPI controller master driver for Blackfin 5xx processor.
+config SPI_BFIN_V3
+ tristate "SPI controller v3 for Blackfin"
+ depends on BF60x
+ help
+ This is the SPI controller v3 master driver
+ found on Blackfin 60x processor.
+
config SPI_BFIN_SPORT
tristate "SPI bus via Blackfin SPORT"
depends on BLACKFIN
@@ -151,15 +158,22 @@ config SPI_COLDFIRE_QSPI
config SPI_DAVINCI
tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
- depends on ARCH_DAVINCI
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
select SPI_BITBANG
select TI_EDMA
help
SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+config SPI_EFM32
+ tristate "EFM32 SPI controller"
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select SPI_BITBANG
+ help
+ Driver for the spi controller found on Energy Micro's EFM32 SoCs.
+
config SPI_EP93XX
tristate "Cirrus Logic EP93xx SPI controller"
- depends on ARCH_EP93XX
+ depends on ARCH_EP93XX || COMPILE_TEST
help
This enables using the Cirrus EP93xx SPI controller in master
mode.
@@ -191,7 +205,7 @@ config SPI_GPIO
config SPI_IMX
tristate "Freescale i.MX SPI controllers"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
default m if IMX_HAVE_PLATFORM_SPI_IMX
help
@@ -248,6 +262,13 @@ config SPI_FSL_SPI
This also enables using the Aeroflex Gaisler GRLIB SPI controller in
master mode.
+config SPI_FSL_DSPI
+ tristate "Freescale DSPI controller"
+ select SPI_BITBANG
+ help
+ This enables support for the Freescale DSPI controller in master
+ mode. VF610 platform uses the controller.
+
config SPI_FSL_ESPI
bool "Freescale eSPI controller"
depends on FSL_SOC
@@ -280,20 +301,28 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
- depends on ARCH_OMAP2PLUS
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
(McSPI) modules.
+config SPI_TI_QSPI
+ tristate "DRA7xxx QSPI controller support"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ QSPI master controller for DRA7xxx used for flash devices.
+ This device supports single, dual and quad read support, while
+ it only supports single write mode.
+
config SPI_OMAP_100K
tristate "OMAP SPI 100K"
- depends on ARCH_OMAP850 || ARCH_OMAP730
+ depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST
help
OMAP SPI 100K master controller for omap7xx boards.
config SPI_ORION
tristate "Orion SPI master"
- depends on PLAT_ORION
+ depends on PLAT_ORION || COMPILE_TEST
help
This enables using the SPI master controller on the Orion chips.
@@ -341,7 +370,7 @@ config SPI_PXA2XX_PCI
config SPI_RSPI
tristate "Renesas RSPI controller"
- depends on SUPERH
+ depends on SUPERH && SH_DMAE_BASE
help
SPI driver for Renesas RSPI blocks.
@@ -385,7 +414,7 @@ config SPI_SH_MSIOF
config SPI_SH
tristate "SuperH SPI controller"
- depends on SUPERH
+ depends on SUPERH || COMPILE_TEST
help
SPI driver for SuperH SPI blocks.
@@ -398,13 +427,13 @@ config SPI_SH_SCI
config SPI_SH_HSPI
tristate "SuperH HSPI controller"
- depends on ARCH_SHMOBILE
+ depends on ARCH_SHMOBILE || COMPILE_TEST
help
SPI driver for SuperH HSPI blocks.
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
- depends on ARCH_SIRF
+ depends on SIRF_DMA
select SPI_BITBANG
help
SPI driver for CSR SiRFprimaII SoCs
@@ -418,7 +447,7 @@ config SPI_MXS
config SPI_TEGRA114
tristate "NVIDIA Tegra114 SPI Controller"
- depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
help
SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
is different than the older SoCs SPI controller and also register interface
@@ -426,7 +455,7 @@ config SPI_TEGRA114
config SPI_TEGRA20_SFLASH
tristate "Nvidia Tegra20 Serial flash Controller"
- depends on ARCH_TEGRA
+ depends on ARCH_TEGRA || COMPILE_TEST
help
SPI driver for Nvidia Tegra20 Serial flash Controller interface.
The main usecase of this controller is to use spi flash as boot
@@ -434,7 +463,7 @@ config SPI_TEGRA20_SFLASH
config SPI_TEGRA20_SLINK
tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
- depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
help
SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
@@ -457,7 +486,7 @@ config SPI_TOPCLIFF_PCH
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
- depends on GPIOLIB && CPU_TX49XX
+ depends on GPIOLIB && (CPU_TX49XX || COMPILE_TEST)
help
SPI driver for Toshiba TXx9 MIPS SoCs
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 33f9c09561e..ab8d8644af0 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_BFIN_V3) += spi-bfin-v3.o
obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
@@ -27,9 +28,11 @@ obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
+obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
@@ -46,6 +49,7 @@ obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
obj-$(CONFIG_SPI_ORION) += spi-orion.o
obj-$(CONFIG_SPI_PL022) += spi-pl022.o
obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 81b9adb6e76..f38855f7653 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -103,16 +103,6 @@ static void altera_spi_chipsel(struct spi_device *spi, int value)
}
}
-static int altera_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
-{
- return 0;
-}
-
-static int altera_spi_setup(struct spi_device *spi)
-{
- return 0;
-}
-
static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
{
if (hw->tx) {
@@ -134,7 +124,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->tx = t->tx_buf;
hw->rx = t->rx_buf;
hw->count = 0;
- hw->bytes_per_word = t->bits_per_word / 8;
+ hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
hw->len = t->len / hw->bytes_per_word;
if (hw->irq >= 0) {
@@ -150,12 +140,12 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
} else {
- /* send the first byte */
- writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
-
- while (1) {
+ while (hw->count < hw->len) {
unsigned int rxd;
+ writel(hw_txbyte(hw, hw->count),
+ hw->base + ALTERA_SPI_TXDATA);
+
while (!(readl(hw->base + ALTERA_SPI_STATUS) &
ALTERA_SPI_STATUS_RRDY_MSK))
cpu_relax();
@@ -174,14 +164,7 @@ static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
hw->count++;
-
- if (hw->count < hw->len)
- writel(hw_txbyte(hw, hw->count),
- hw->base + ALTERA_SPI_TXDATA);
- else
- break;
}
-
}
return hw->count * hw->bytes_per_word;
@@ -217,7 +200,7 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
static int altera_spi_probe(struct platform_device *pdev)
{
- struct altera_spi_platform_data *platp = pdev->dev.platform_data;
+ struct altera_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct altera_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -231,7 +214,6 @@ static int altera_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 16;
master->mode_bits = SPI_CS_HIGH;
- master->setup = altera_spi_setup;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -240,21 +222,16 @@ static int altera_spi_probe(struct platform_device *pdev)
hw->bitbang.master = spi_master_get(master);
if (!hw->bitbang.master)
return err;
- hw->bitbang.setup_transfer = altera_spi_setupxfer;
hw->bitbang.chipselect = altera_spi_chipsel;
hw->bitbang.txrx_bufs = altera_spi_txrx;
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!hw->base)
- goto exit_busy;
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
/* program defaults into the registers */
hw->imr = 0; /* disable spi interrupts */
writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
@@ -281,9 +258,6 @@ static int altera_spi_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
return 0;
-
-exit_busy:
- err = -EBUSY;
exit:
spi_master_put(master);
return err;
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 0e06407a467..37bad952ab3 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -221,7 +221,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp = spi_master_get_devdata(master);
platform_set_drvdata(pdev, sp);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->setup = ath79_spi_setup;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index ea1ec009f44..fd7cc566095 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -360,12 +360,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
gpio_set_value(asd->npcs_pin, !active);
}
-static void atmel_spi_lock(struct atmel_spi *as)
+static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
{
spin_lock_irqsave(&as->lock, as->flags);
}
-static void atmel_spi_unlock(struct atmel_spi *as)
+static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
{
spin_unlock_irqrestore(&as->lock, as->flags);
}
@@ -629,9 +629,9 @@ static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
goto err_dma;
dev_dbg(master->dev.parent,
- " start dma xfer %p: len %u tx %p/%08x rx %p/%08x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
+ xfer->rx_buf, (unsigned long long)xfer->rx_dma);
/* Enable relevant interrupts */
spi_writel(as, IER, SPI_BIT(OVRES));
@@ -732,9 +732,10 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, TCR, len);
dev_dbg(&msg->spi->dev,
- " start xfer %p: len %u tx %p/%08x rx %p/%08x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
} else {
xfer = as->next_transfer;
remaining = as->next_remaining_bytes;
@@ -771,9 +772,10 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master,
spi_writel(as, TNCR, len);
dev_dbg(&msg->spi->dev,
- " next xfer %p: len %u tx %p/%08x rx %p/%08x\n",
- xfer, xfer->len, xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
ieval = SPI_BIT(ENDRX) | SPI_BIT(OVRES);
} else {
spi_writel(as, RNCR, 0);
@@ -1579,7 +1581,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_unmap_regs;
/* Initialize the hardware */
- clk_enable(clk);
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_unmap_regs;
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
if (as->caps.has_wdrbt) {
@@ -1609,7 +1613,7 @@ out_free_dma:
spi_writel(as, CR, SPI_BIT(SWRST));
spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
- clk_disable(clk);
+ clk_disable_unprepare(clk);
free_irq(irq, master);
out_unmap_regs:
iounmap(as->regs);
@@ -1661,7 +1665,7 @@ static int atmel_spi_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
as->buffer_dma);
- clk_disable(as->clk);
+ clk_disable_unprepare(as->clk);
clk_put(as->clk);
free_irq(as->irq, master);
iounmap(as->regs);
@@ -1678,7 +1682,7 @@ static int atmel_spi_suspend(struct platform_device *pdev, pm_message_t mesg)
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
- clk_disable(as->clk);
+ clk_disable_unprepare(as->clk);
return 0;
}
@@ -1687,7 +1691,7 @@ static int atmel_spi_resume(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
- clk_enable(as->clk);
+ return clk_prepare_enable(as->clk);
return 0;
}
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index e1965553ab7..1d00d9b397d 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -776,7 +776,7 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
hw->master = spi_master_get(master);
- hw->pdata = pdev->dev.platform_data;
+ hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index a4185e49232..52c81481c5c 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -314,7 +314,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
master->mode_bits = BCM2835_SPI_MODE_BITS;
- master->bits_per_word_mask = BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->bus_num = -1;
master->num_chipselect = 3;
master->transfer_one_message = bcm2835_spi_transfer_one;
@@ -325,12 +325,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
init_completion(&bs->done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "could not get memory resource\n");
- err = -ENODEV;
- goto out_master_put;
- }
-
bs->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(bs->regs)) {
err = PTR_ERR(bs->regs);
@@ -383,7 +377,7 @@ out_master_put:
static int bcm2835_spi_remove(struct platform_device *pdev)
{
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
struct bcm2835_spi *bs = spi_master_get_devdata(master);
free_irq(bs->irq, master);
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 9fd7a39b802..536b0e36382 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -231,24 +231,6 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
return 0;
}
-static int bcm63xx_spi_prepare_transfer(struct spi_master *master)
-{
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(&bs->pdev->dev);
-
- return 0;
-}
-
-static int bcm63xx_spi_unprepare_transfer(struct spi_master *master)
-{
- struct bcm63xx_spi *bs = spi_master_get_devdata(master);
-
- pm_runtime_put(&bs->pdev->dev);
-
- return 0;
-}
-
static int bcm63xx_spi_transfer_one(struct spi_master *master,
struct spi_message *m)
{
@@ -353,20 +335,13 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct device *dev = &pdev->dev;
- struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data;
+ struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev);
int irq;
struct spi_master *master;
struct clk *clk;
struct bcm63xx_spi *bs;
int ret;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(dev, "no iomem\n");
- ret = -ENXIO;
- goto out;
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "no irq\n");
@@ -393,6 +368,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
bs->regs = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(bs->regs)) {
ret = PTR_ERR(bs->regs);
@@ -412,11 +388,10 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
- master->prepare_transfer_hardware = bcm63xx_spi_prepare_transfer;
- master->unprepare_transfer_hardware = bcm63xx_spi_unprepare_transfer;
master->transfer_one_message = bcm63xx_spi_transfer_one;
master->mode_bits = MODEBITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
bs->msg_type_shift = pdata->msg_type_shift;
bs->msg_ctl_width = pdata->msg_ctl_width;
bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
@@ -480,8 +455,7 @@ static int bcm63xx_spi_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int bcm63xx_spi_suspend(struct device *dev)
{
- struct spi_master *master =
- platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
spi_master_suspend(master);
@@ -493,8 +467,7 @@ static int bcm63xx_spi_suspend(struct device *dev)
static int bcm63xx_spi_resume(struct device *dev)
{
- struct spi_master *master =
- platform_get_drvdata(to_platform_device(dev));
+ struct spi_master *master = dev_get_drvdata(dev);
struct bcm63xx_spi *bs = spi_master_get_devdata(master);
clk_prepare_enable(bs->clk);
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 07ec597f973..91921b5f581 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -756,7 +756,7 @@ static int bfin_sport_spi_probe(struct platform_device *pdev)
struct bfin_sport_spi_master_data *drv_data;
int status;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
/* Allocate master with space for drv_data */
master = spi_alloc_master(dev, sizeof(*master) + 16);
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
new file mode 100644
index 00000000000..f4bf81347d6
--- /dev/null
+++ b/drivers/spi/spi-bfin-v3.c
@@ -0,0 +1,965 @@
+/*
+ * Analog Devices SPI3 controller driver
+ *
+ * Copyright (c) 2013 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include <asm/bfin_spi3.h>
+#include <asm/cacheflush.h>
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+enum bfin_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE
+};
+
+struct bfin_spi_master;
+
+struct bfin_spi_transfer_ops {
+ void (*write) (struct bfin_spi_master *);
+ void (*read) (struct bfin_spi_master *);
+ void (*duplex) (struct bfin_spi_master *);
+};
+
+/* runtime info for spi master */
+struct bfin_spi_master {
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct bfin_spi_regs __iomem *regs;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_spi_device *cur_chip;
+ unsigned transfer_len;
+
+ /* transfer buffer */
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+
+ /* dma info */
+ unsigned int tx_dma;
+ unsigned int rx_dma;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+ unsigned long dummy_buffer; /* used in unidirectional transfer */
+ unsigned long tx_dma_size;
+ unsigned long rx_dma_size;
+ int tx_num;
+ int rx_num;
+
+ /* store register value for suspend/resume */
+ u32 control;
+ u32 ssel;
+
+ unsigned long sclk;
+ enum bfin_spi_state state;
+
+ const struct bfin_spi_transfer_ops *ops;
+};
+
+struct bfin_spi_device {
+ u32 control;
+ u32 clock;
+ u32 ssel;
+
+ u8 cs;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u32 tx_dummy_val; /* tx value for rx only transfer */
+ bool enable_dma;
+ const struct bfin_spi_transfer_ops *ops;
+};
+
+static void bfin_spi_enable(struct bfin_spi_master *drv_data)
+{
+ bfin_write_or(&drv_data->regs->control, SPI_CTL_EN);
+}
+
+static void bfin_spi_disable(struct bfin_spi_master *drv_data)
+{
+ bfin_write_and(&drv_data->regs->control, ~SPI_CTL_EN);
+}
+
+/* Caculate the SPI_CLOCK register value based on input HZ */
+static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
+{
+ u32 spi_clock = sclk / speed_hz;
+
+ if (spi_clock)
+ spi_clock--;
+ return spi_clock;
+}
+
+static int bfin_spi_flush(struct bfin_spi_master *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ /* wait for stop and clear stat */
+ while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
+ cpu_relax();
+
+ bfin_write(&drv_data->regs->status, 0xFFFFFFFF);
+
+ return limit;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void bfin_spi_cs_active(struct bfin_spi_master *drv_data, struct bfin_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS))
+ bfin_write_and(&drv_data->regs->ssel, ~chip->ssel);
+ else
+ gpio_set_value(chip->cs_gpio, 0);
+}
+
+static void bfin_spi_cs_deactive(struct bfin_spi_master *drv_data,
+ struct bfin_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS))
+ bfin_write_or(&drv_data->regs->ssel, chip->ssel);
+ else
+ gpio_set_value(chip->cs_gpio, 1);
+
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
+static inline void bfin_spi_cs_enable(struct bfin_spi_master *drv_data,
+ struct bfin_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS)
+ bfin_write_or(&drv_data->regs->ssel, chip->ssel >> 8);
+}
+
+static inline void bfin_spi_cs_disable(struct bfin_spi_master *drv_data,
+ struct bfin_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS)
+ bfin_write_and(&drv_data->regs->ssel, ~(chip->ssel >> 8));
+}
+
+/* stop controller and re-config current chip*/
+static void bfin_spi_restore_state(struct bfin_spi_master *drv_data)
+{
+ struct bfin_spi_device *chip = drv_data->cur_chip;
+
+ /* Clear status and disable clock */
+ bfin_write(&drv_data->regs->status, 0xFFFFFFFF);
+ bfin_write(&drv_data->regs->rx_control, 0x0);
+ bfin_write(&drv_data->regs->tx_control, 0x0);
+ bfin_spi_disable(drv_data);
+
+ SSYNC();
+
+ /* Load the registers */
+ bfin_write(&drv_data->regs->control, chip->control);
+ bfin_write(&drv_data->regs->clock, chip->clock);
+
+ bfin_spi_enable(drv_data);
+ drv_data->tx_num = drv_data->rx_num = 0;
+ /* we always choose tx transfer initiate */
+ bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN);
+ bfin_write(&drv_data->regs->tx_control,
+ SPI_TXCTL_TEN | SPI_TXCTL_TTI);
+ bfin_spi_cs_active(drv_data, chip);
+}
+
+/* discard invalid rx data and empty rfifo */
+static inline void dummy_read(struct bfin_spi_master *drv_data)
+{
+ while (!(bfin_read(&drv_data->regs->status) & SPI_STAT_RFE))
+ bfin_read(&drv_data->regs->rfifo);
+}
+
+static void bfin_spi_u8_write(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++)));
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ bfin_read(&drv_data->regs->rfifo);
+ }
+}
+
+static void bfin_spi_u8_read(struct bfin_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, tx_val);
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo);
+ }
+}
+
+static void bfin_spi_u8_duplex(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u8 *)(drv_data->tx++)));
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = bfin_read(&drv_data->regs->rfifo);
+ }
+}
+
+static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = {
+ .write = bfin_spi_u8_write,
+ .read = bfin_spi_u8_read,
+ .duplex = bfin_spi_u8_duplex,
+};
+
+static void bfin_spi_u16_write(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx));
+ drv_data->tx += 2;
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ bfin_read(&drv_data->regs->rfifo);
+ }
+}
+
+static void bfin_spi_u16_read(struct bfin_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, tx_val);
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static void bfin_spi_u16_duplex(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u16 *)drv_data->tx));
+ drv_data->tx += 2;
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = {
+ .write = bfin_spi_u16_write,
+ .read = bfin_spi_u16_read,
+ .duplex = bfin_spi_u16_duplex,
+};
+
+static void bfin_spi_u32_write(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx));
+ drv_data->tx += 4;
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ bfin_read(&drv_data->regs->rfifo);
+ }
+}
+
+static void bfin_spi_u32_read(struct bfin_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, tx_val);
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static void bfin_spi_u32_duplex(struct bfin_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tfifo, (*(u32 *)drv_data->tx));
+ drv_data->tx += 4;
+ while (bfin_read(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = bfin_read(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u32 = {
+ .write = bfin_spi_u32_write,
+ .read = bfin_spi_u32_read,
+ .duplex = bfin_spi_u32_duplex,
+};
+
+
+/* test if there is more transfer to be done */
+static void bfin_spi_next_transfer(struct bfin_spi_master *drv)
+{
+ struct spi_message *msg = drv->cur_msg;
+ struct spi_transfer *t = drv->cur_transfer;
+
+ /* Move to next transfer */
+ if (t->transfer_list.next != &msg->transfers) {
+ drv->cur_transfer = list_entry(t->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ drv->state = RUNNING_STATE;
+ } else {
+ drv->state = DONE_STATE;
+ drv->cur_transfer = NULL;
+ }
+}
+
+static void bfin_spi_giveback(struct bfin_spi_master *drv_data)
+{
+ struct bfin_spi_device *chip = drv_data->cur_chip;
+
+ bfin_spi_cs_deactive(drv_data, chip);
+ spi_finalize_current_message(drv_data->master);
+}
+
+static int bfin_spi_setup_transfer(struct bfin_spi_master *drv)
+{
+ struct spi_transfer *t = drv->cur_transfer;
+ u32 cr, cr_width;
+
+ if (t->tx_buf) {
+ drv->tx = (void *)t->tx_buf;
+ drv->tx_end = drv->tx + t->len;
+ } else {
+ drv->tx = NULL;
+ }
+
+ if (t->rx_buf) {
+ drv->rx = t->rx_buf;
+ drv->rx_end = drv->rx + t->len;
+ } else {
+ drv->rx = NULL;
+ }
+
+ drv->transfer_len = t->len;
+
+ /* bits per word setup */
+ switch (t->bits_per_word) {
+ case 8:
+ cr_width = SPI_CTL_SIZE08;
+ drv->ops = &bfin_bfin_spi_transfer_ops_u8;
+ break;
+ case 16:
+ cr_width = SPI_CTL_SIZE16;
+ drv->ops = &bfin_bfin_spi_transfer_ops_u16;
+ break;
+ case 32:
+ cr_width = SPI_CTL_SIZE32;
+ drv->ops = &bfin_bfin_spi_transfer_ops_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ cr = bfin_read(&drv->regs->control) & ~SPI_CTL_SIZE;
+ cr |= cr_width;
+ bfin_write(&drv->regs->control, cr);
+
+ /* speed setup */
+ bfin_write(&drv->regs->clock,
+ hz_to_spi_clock(drv->sclk, t->speed_hz));
+ return 0;
+}
+
+static int bfin_spi_dma_xfer(struct bfin_spi_master *drv_data)
+{
+ struct spi_transfer *t = drv_data->cur_transfer;
+ struct spi_message *msg = drv_data->cur_msg;
+ struct bfin_spi_device *chip = drv_data->cur_chip;
+ u32 dma_config;
+ unsigned long word_count, word_size;
+ void *tx_buf, *rx_buf;
+
+ switch (t->bits_per_word) {
+ case 8:
+ dma_config = WDSIZE_8 | PSIZE_8;
+ word_count = drv_data->transfer_len;
+ word_size = 1;
+ break;
+ case 16:
+ dma_config = WDSIZE_16 | PSIZE_16;
+ word_count = drv_data->transfer_len / 2;
+ word_size = 2;
+ break;
+ default:
+ dma_config = WDSIZE_32 | PSIZE_32;
+ word_count = drv_data->transfer_len / 4;
+ word_size = 4;
+ break;
+ }
+
+ if (!drv_data->rx) {
+ tx_buf = drv_data->tx;
+ rx_buf = &drv_data->dummy_buffer;
+ drv_data->tx_dma_size = drv_data->transfer_len;
+ drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer);
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, 0);
+ } else if (!drv_data->tx) {
+ drv_data->dummy_buffer = chip->tx_dummy_val;
+ tx_buf = &drv_data->dummy_buffer;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer);
+ drv_data->rx_dma_size = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, 0);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ } else {
+ tx_buf = drv_data->tx;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = drv_data->rx_dma_size
+ = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ }
+
+ drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)tx_buf,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->tx_dma_addr))
+ return -ENOMEM;
+
+ drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)rx_buf,
+ drv_data->rx_dma_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->rx_dma_addr)) {
+ dma_unmap_single(&msg->spi->dev,
+ drv_data->tx_dma_addr,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ dummy_read(drv_data);
+ set_dma_x_count(drv_data->tx_dma, word_count);
+ set_dma_x_count(drv_data->rx_dma, word_count);
+ set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr);
+ set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr);
+ dma_config |= DMAFLOW_STOP | RESTART | DI_EN;
+ set_dma_config(drv_data->tx_dma, dma_config);
+ set_dma_config(drv_data->rx_dma, dma_config | WNR);
+ enable_dma(drv_data->tx_dma);
+ enable_dma(drv_data->rx_dma);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rx_control, SPI_RXCTL_REN | SPI_RXCTL_RDR_NE);
+ SSYNC();
+ bfin_write(&drv_data->regs->tx_control,
+ SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF);
+
+ return 0;
+}
+
+static int bfin_spi_pio_xfer(struct bfin_spi_master *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ if (!drv_data->rx) {
+ /* write only half duplex */
+ drv_data->ops->write(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ } else if (!drv_data->tx) {
+ /* read only half duplex */
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ return -EIO;
+ } else {
+ /* full duplex mode */
+ drv_data->ops->duplex(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ }
+
+ if (!bfin_spi_flush(drv_data))
+ return -EIO;
+ msg->actual_length += drv_data->transfer_len;
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+static void bfin_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_spi_master *drv_data = (struct bfin_spi_master *)data;
+ struct spi_message *msg = NULL;
+ struct spi_transfer *t = NULL;
+ struct bfin_spi_device *chip = NULL;
+ int ret;
+
+ /* Get current state information */
+ msg = drv_data->cur_msg;
+ t = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ msg->status = -EIO;
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ if (drv_data->state == RUNNING_STATE) {
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ if (t->cs_change)
+ bfin_spi_cs_deactive(drv_data, chip);
+ bfin_spi_next_transfer(drv_data);
+ t = drv_data->cur_transfer;
+ }
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ msg->status = 0;
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) {
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return;
+ }
+
+ ret = bfin_spi_setup_transfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ bfin_spi_giveback(drv_data);
+ }
+
+ bfin_write(&drv_data->regs->status, 0xFFFFFFFF);
+ bfin_spi_cs_active(drv_data, chip);
+ drv_data->state = RUNNING_STATE;
+
+ if (chip->enable_dma)
+ ret = bfin_spi_dma_xfer(drv_data);
+ else
+ ret = bfin_spi_pio_xfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ bfin_spi_giveback(drv_data);
+ }
+}
+
+static int bfin_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(master);
+
+ drv_data->cur_msg = m;
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ bfin_spi_restore_state(drv_data);
+
+ drv_data->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+#define MAX_SPI_SSEL 7
+
+static const u16 ssel[][MAX_SPI_SSEL] = {
+ {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
+ P_SPI0_SSEL4, P_SPI0_SSEL5,
+ P_SPI0_SSEL6, P_SPI0_SSEL7},
+
+ {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
+ P_SPI1_SSEL4, P_SPI1_SSEL5,
+ P_SPI1_SSEL6, P_SPI1_SSEL7},
+
+ {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
+ P_SPI2_SSEL4, P_SPI2_SSEL5,
+ P_SPI2_SSEL6, P_SPI2_SSEL7},
+};
+
+static int bfin_spi_setup(struct spi_device *spi)
+{
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master);
+ struct bfin_spi_device *chip = spi_get_ctldata(spi);
+ u32 bfin_ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
+ int ret = -EINVAL;
+
+ if (!chip) {
+ struct bfin_spi3_chip *chip_info = spi->controller_data;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev, "can not allocate chip data\n");
+ return -ENOMEM;
+ }
+ if (chip_info) {
+ if (chip_info->control & ~bfin_ctl_reg) {
+ dev_err(&spi->dev,
+ "do not set bits that the SPI framework manages\n");
+ goto error;
+ }
+ chip->control = chip_info->control;
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->tx_dummy_val = chip_info->tx_dummy_val;
+ chip->enable_dma = chip_info->enable_dma;
+ }
+ chip->cs = spi->chip_select;
+ if (chip->cs < MAX_CTRL_CS) {
+ chip->ssel = (1 << chip->cs) << 8;
+ ret = peripheral_request(ssel[spi->master->bus_num]
+ [chip->cs-1], dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "peripheral_request() error\n");
+ goto error;
+ }
+ } else {
+ chip->cs_gpio = chip->cs - MAX_CTRL_CS;
+ ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "gpio_request_one() error\n");
+ goto error;
+ }
+ }
+ spi_set_ctldata(spi, chip);
+ }
+
+ /* force a default base state */
+ chip->control &= bfin_ctl_reg;
+
+ if (spi->mode & SPI_CPOL)
+ chip->control |= SPI_CTL_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->control |= SPI_CTL_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->control |= SPI_CTL_LSBF;
+ chip->control |= SPI_CTL_MSTR;
+ /* we choose software to controll cs */
+ chip->control &= ~SPI_CTL_ASSEL;
+
+ chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
+
+ bfin_spi_cs_enable(drv_data, chip);
+ bfin_spi_cs_deactive(drv_data, chip);
+
+ return 0;
+error:
+ if (chip) {
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+ }
+
+ return ret;
+}
+
+static void bfin_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_spi_device *chip = spi_get_ctldata(spi);
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(spi->master);
+
+ if (!chip)
+ return;
+
+ if (chip->cs < MAX_CTRL_CS) {
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->cs-1]);
+ bfin_spi_cs_disable(drv_data, chip);
+ } else {
+ gpio_free(chip->cs_gpio);
+ }
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+static irqreturn_t bfin_spi_tx_dma_isr(int irq, void *dev_id)
+{
+ struct bfin_spi_master *drv_data = dev_id;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
+
+ clear_dma_irqstat(drv_data->tx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->tx_num++;
+ } else {
+ dev_err(&drv_data->master->dev,
+ "spi tx dma error: %d\n", dma_stat);
+ if (drv_data->tx)
+ drv_data->state = ERROR_STATE;
+ }
+ bfin_write_and(&drv_data->regs->tx_control, ~SPI_TXCTL_TDR_NF);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_spi_rx_dma_isr(int irq, void *dev_id)
+{
+ struct bfin_spi_master *drv_data = dev_id;
+ struct spi_message *msg = drv_data->cur_msg;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
+
+ clear_dma_irqstat(drv_data->rx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->rx_num++;
+ /* we may fail on tx dma */
+ if (drv_data->state != ERROR_STATE)
+ msg->actual_length += drv_data->transfer_len;
+ } else {
+ drv_data->state = ERROR_STATE;
+ dev_err(&drv_data->master->dev,
+ "spi rx dma error: %d\n", dma_stat);
+ }
+ bfin_write(&drv_data->regs->tx_control, 0);
+ bfin_write(&drv_data->regs->rx_control, 0);
+ if (drv_data->rx_num != drv_data->tx_num)
+ dev_dbg(&drv_data->master->dev,
+ "dma interrupt missing: tx=%d,rx=%d\n",
+ drv_data->tx_num, drv_data->rx_num);
+ tasklet_schedule(&drv_data->pump_transfers);
+ return IRQ_HANDLED;
+}
+
+static int bfin_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin_spi3_master *info = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct bfin_spi_master *drv_data;
+ struct resource *mem, *res;
+ unsigned int tx_dma, rx_dma;
+ unsigned long sclk;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "platform data missing!\n");
+ return -ENODEV;
+ }
+
+ sclk = get_sclk1();
+ if (!sclk) {
+ dev_err(dev, "can not get sclk1\n");
+ return -ENXIO;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "can not get tx dma resource\n");
+ return -ENXIO;
+ }
+ tx_dma = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(dev, "can not get rx dma resource\n");
+ return -ENXIO;
+ }
+ rx_dma = res->start;
+
+ /* allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*drv_data));
+ if (!master) {
+ dev_err(dev, "can not alloc spi_master\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ /* the mode bits supported by this driver */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = info->num_chipselect;
+ master->cleanup = bfin_spi_cleanup;
+ master->setup = bfin_spi_setup;
+ master->transfer_one_message = bfin_spi_transfer_one_message;
+ master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->tx_dma = tx_dma;
+ drv_data->rx_dma = rx_dma;
+ drv_data->pin_req = info->pin_req;
+ drv_data->sclk = sclk;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv_data->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(drv_data->regs)) {
+ ret = PTR_ERR(drv_data->regs);
+ goto err_put_master;
+ }
+
+ /* request tx and rx dma */
+ ret = request_dma(tx_dma, "SPI_TX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI TX DMA channel\n");
+ goto err_put_master;
+ }
+ set_dma_callback(tx_dma, bfin_spi_tx_dma_isr, drv_data);
+
+ ret = request_dma(rx_dma, "SPI_RX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI RX DMA channel\n");
+ goto err_free_tx_dma;
+ }
+ set_dma_callback(drv_data->rx_dma, bfin_spi_rx_dma_isr, drv_data);
+
+ /* request CLK, MOSI and MISO */
+ ret = peripheral_request_list(drv_data->pin_req, "bfin-spi3");
+ if (ret < 0) {
+ dev_err(dev, "can not request spi pins\n");
+ goto err_free_rx_dma;
+ }
+
+ bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA);
+ bfin_write(&drv_data->regs->ssel, 0x0000FE00);
+ bfin_write(&drv_data->regs->delay, 0x0);
+
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_spi_pump_transfers, (unsigned long)drv_data);
+ /* register with the SPI framework */
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(dev, "can not register spi master\n");
+ goto err_free_peripheral;
+ }
+
+ return ret;
+
+err_free_peripheral:
+ peripheral_free_list(drv_data->pin_req);
+err_free_rx_dma:
+ free_dma(rx_dma);
+err_free_tx_dma:
+ free_dma(tx_dma);
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int bfin_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(master);
+
+ bfin_spi_disable(drv_data);
+
+ peripheral_free_list(drv_data->pin_req);
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+
+ spi_unregister_master(drv_data->master);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bfin_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ drv_data->control = bfin_read(&drv_data->regs->control);
+ drv_data->ssel = bfin_read(&drv_data->regs->ssel);
+
+ bfin_write(&drv_data->regs->control, SPI_CTL_MSTR | SPI_CTL_CPHA);
+ bfin_write(&drv_data->regs->ssel, 0x0000FE00);
+ dma_disable_irq(drv_data->rx_dma);
+ dma_disable_irq(drv_data->tx_dma);
+
+ return 0;
+}
+
+static int bfin_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bfin_spi_master *drv_data = spi_master_get_devdata(master);
+ int ret = 0;
+
+ /* bootrom may modify spi and dma status when resume in spi boot mode */
+ disable_dma(drv_data->rx_dma);
+
+ dma_enable_irq(drv_data->rx_dma);
+ dma_enable_irq(drv_data->tx_dma);
+ bfin_write(&drv_data->regs->control, drv_data->control);
+ bfin_write(&drv_data->regs->ssel, drv_data->ssel);
+
+ ret = spi_master_resume(master);
+ if (ret) {
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+ }
+
+ return ret;
+}
+#endif
+static const struct dev_pm_ops bfin_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bfin_spi_suspend, bfin_spi_resume)
+};
+
+MODULE_ALIAS("platform:bfin-spi3");
+static struct platform_driver bfin_spi_driver = {
+ .driver = {
+ .name = "bfin-spi3",
+ .owner = THIS_MODULE,
+ .pm = &bfin_spi_pm_ops,
+ },
+ .remove = bfin_spi_remove,
+};
+
+module_platform_driver_probe(bfin_spi_driver, bfin_spi_probe);
+
+MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index 59a73424419..45bdf73d686 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -1271,7 +1271,7 @@ static int bfin_spi_probe(struct platform_device *pdev)
struct resource *res;
int status = 0;
- platform_info = dev->platform_data;
+ platform_info = dev_get_platdata(dev);
/* Allocate master with space for drv_data */
master = spi_alloc_master(dev, sizeof(*drv_data));
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index 1c2ba17760b..8c11355dec2 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -255,150 +255,140 @@ static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
* Drivers can provide word-at-a-time i/o primitives, or provide
* transfer-at-a-time ones to leverage dma or fifo hardware.
*/
-static void bitbang_work(struct work_struct *work)
+
+static int spi_bitbang_prepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang =
- container_of(work, struct spi_bitbang, work);
+ struct spi_bitbang *bitbang;
unsigned long flags;
- struct spi_message *m, *_m;
+
+ bitbang = spi_master_get_devdata(spi);
spin_lock_irqsave(&bitbang->lock, flags);
bitbang->busy = 1;
- list_for_each_entry_safe(m, _m, &bitbang->queue, queue) {
- struct spi_device *spi;
- unsigned nsecs;
- struct spi_transfer *t = NULL;
- unsigned tmp;
- unsigned cs_change;
- int status;
- int do_setup = -1;
-
- list_del(&m->queue);
- spin_unlock_irqrestore(&bitbang->lock, flags);
-
- /* FIXME this is made-up ... the correct value is known to
- * word-at-a-time bitbang code, and presumably chipselect()
- * should enforce these requirements too?
- */
- nsecs = 100;
+ spin_unlock_irqrestore(&bitbang->lock, flags);
- spi = m->spi;
- tmp = 0;
- cs_change = 1;
- status = 0;
+ return 0;
+}
- list_for_each_entry (t, &m->transfers, transfer_list) {
-
- /* override speed or wordsize? */
- if (t->speed_hz || t->bits_per_word)
- do_setup = 1;
-
- /* init (-1) or override (1) transfer params */
- if (do_setup != 0) {
- status = bitbang->setup_transfer(spi, t);
- if (status < 0)
- break;
- if (do_setup == -1)
- do_setup = 0;
- }
-
- /* set up default clock polarity, and activate chip;
- * this implicitly updates clock and spi modes as
- * previously recorded for this device via setup().
- * (and also deselects any other chip that might be
- * selected ...)
- */
- if (cs_change) {
- bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
- ndelay(nsecs);
- }
- cs_change = t->cs_change;
- if (!t->tx_buf && !t->rx_buf && t->len) {
- status = -EINVAL;
- break;
- }
+static int spi_bitbang_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_bitbang *bitbang;
+ unsigned nsecs;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+ int do_setup = -1;
+ struct spi_device *spi = m->spi;
+
+ bitbang = spi_master_get_devdata(master);
+
+ /* FIXME this is made-up ... the correct value is known to
+ * word-at-a-time bitbang code, and presumably chipselect()
+ * should enforce these requirements too?
+ */
+ nsecs = 100;
- /* transfer data. the lower level code handles any
- * new dma mappings it needs. our caller always gave
- * us dma-safe buffers.
- */
- if (t->len) {
- /* REVISIT dma API still needs a designated
- * DMA_ADDR_INVALID; ~0 might be better.
- */
- if (!m->is_dma_mapped)
- t->rx_dma = t->tx_dma = 0;
- status = bitbang->txrx_bufs(spi, t);
- }
- if (status > 0)
- m->actual_length += status;
- if (status != t->len) {
- /* always report some kind of error */
- if (status >= 0)
- status = -EREMOTEIO;
+ cs_change = 1;
+ status = 0;
+
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
+ status = bitbang->setup_transfer(spi, t);
+ if (status < 0)
break;
- }
- status = 0;
-
- /* protocol tweaks before next transfer */
- if (t->delay_usecs)
- udelay(t->delay_usecs);
-
- if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
- /* sometimes a short mid-message deselect of the chip
- * may be needed to terminate a mode or command
- */
- ndelay(nsecs);
- bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
- ndelay(nsecs);
- }
+ if (do_setup == -1)
+ do_setup = 0;
}
- m->status = status;
- m->complete(m->context);
+ /* set up default clock polarity, and activate chip;
+ * this implicitly updates clock and spi modes as
+ * previously recorded for this device via setup().
+ * (and also deselects any other chip that might be
+ * selected ...)
+ */
+ if (cs_change) {
+ bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
+ if (!t->tx_buf && !t->rx_buf && t->len) {
+ status = -EINVAL;
+ break;
+ }
- /* normally deactivate chipselect ... unless no error and
- * cs_change has hinted that the next message will probably
- * be for this chip too.
+ /* transfer data. the lower level code handles any
+ * new dma mappings it needs. our caller always gave
+ * us dma-safe buffers.
*/
- if (!(status == 0 && cs_change)) {
+ if (t->len) {
+ /* REVISIT dma API still needs a designated
+ * DMA_ADDR_INVALID; ~0 might be better.
+ */
+ if (!m->is_dma_mapped)
+ t->rx_dma = t->tx_dma = 0;
+ status = bitbang->txrx_bufs(spi, t);
+ }
+ if (status > 0)
+ m->actual_length += status;
+ if (status != t->len) {
+ /* always report some kind of error */
+ if (status >= 0)
+ status = -EREMOTEIO;
+ break;
+ }
+ status = 0;
+
+ /* protocol tweaks before next transfer */
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change && !list_is_last(&t->transfer_list, &m->transfers)) {
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
ndelay(nsecs);
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(nsecs);
}
+ }
+
+ m->status = status;
- spin_lock_irqsave(&bitbang->lock, flags);
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change)) {
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
}
- bitbang->busy = 0;
- spin_unlock_irqrestore(&bitbang->lock, flags);
+
+ spi_finalize_current_message(master);
+
+ return status;
}
-/**
- * spi_bitbang_transfer - default submit to transfer queue
- */
-int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
+static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
{
- struct spi_bitbang *bitbang;
+ struct spi_bitbang *bitbang;
unsigned long flags;
- int status = 0;
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- bitbang = spi_master_get_devdata(spi->master);
+ bitbang = spi_master_get_devdata(spi);
spin_lock_irqsave(&bitbang->lock, flags);
- if (!spi->max_speed_hz)
- status = -ENETDOWN;
- else {
- list_add_tail(&m->queue, &bitbang->queue);
- queue_work(bitbang->workqueue, &bitbang->work);
- }
+ bitbang->busy = 0;
spin_unlock_irqrestore(&bitbang->lock, flags);
- return status;
+ return 0;
}
-EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
/*----------------------------------------------------------------------*/
@@ -428,20 +418,22 @@ EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
int spi_bitbang_start(struct spi_bitbang *bitbang)
{
struct spi_master *master = bitbang->master;
- int status;
if (!master || !bitbang->chipselect)
return -EINVAL;
- INIT_WORK(&bitbang->work, bitbang_work);
spin_lock_init(&bitbang->lock);
- INIT_LIST_HEAD(&bitbang->queue);
if (!master->mode_bits)
master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
- if (!master->transfer)
- master->transfer = spi_bitbang_transfer;
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one_message = spi_bitbang_transfer_one;
+
if (!bitbang->txrx_bufs) {
bitbang->use_dma = 0;
bitbang->txrx_bufs = spi_bitbang_bufs;
@@ -452,34 +444,12 @@ int spi_bitbang_start(struct spi_bitbang *bitbang)
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
}
- } else if (!master->setup)
- return -EINVAL;
- if (master->transfer == spi_bitbang_transfer &&
- !bitbang->setup_transfer)
- return -EINVAL;
-
- /* this task is the only thing to touch the SPI bits */
- bitbang->busy = 0;
- bitbang->workqueue = create_singlethread_workqueue(
- dev_name(master->dev.parent));
- if (bitbang->workqueue == NULL) {
- status = -EBUSY;
- goto err1;
}
/* driver may get busy before register() returns, especially
* if someone registered boardinfo for devices
*/
- status = spi_register_master(master);
- if (status < 0)
- goto err2;
-
- return status;
-
-err2:
- destroy_workqueue(bitbang->workqueue);
-err1:
- return status;
+ return spi_register_master(master);
}
EXPORT_SYMBOL_GPL(spi_bitbang_start);
@@ -490,10 +460,6 @@ int spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
- WARN_ON(!list_empty(&bitbang->queue));
-
- destroy_workqueue(bitbang->workqueue);
-
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 17965fe225c..5655acf55bf 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -239,11 +239,8 @@ static int spi_clps711x_probe(struct platform_device *pdev)
}
dev_err(&pdev->dev, "Failed to register master\n");
- devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
clk_out:
- devm_clk_put(&pdev->dev, hw->spi_clk);
-
err_out:
while (--i >= 0)
if (gpio_is_valid(hw->chipselect[i]))
@@ -261,13 +258,10 @@ static int spi_clps711x_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct spi_clps711x_data *hw = spi_master_get_devdata(master);
- devm_free_irq(&pdev->dev, IRQ_SSEOTI, hw);
-
for (i = 0; i < master->num_chipselect; i++)
if (gpio_is_valid(hw->chipselect[i]))
gpio_free(hw->chipselect[i]);
- devm_clk_put(&pdev->dev, hw->spi_clk);
spi_unregister_master(master);
kfree(master);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index 0631b9d4a5d..cc5b75d10c3 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -354,24 +354,6 @@ static int mcfqspi_transfer_one_message(struct spi_master *master,
}
-static int mcfqspi_prepare_transfer_hw(struct spi_master *master)
-{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(mcfqspi->dev);
-
- return 0;
-}
-
-static int mcfqspi_unprepare_transfer_hw(struct spi_master *master)
-{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
-
- pm_runtime_put_sync(mcfqspi->dev);
-
- return 0;
-}
-
static int mcfqspi_setup(struct spi_device *spi)
{
if (spi->chip_select >= spi->master->num_chipselect) {
@@ -400,7 +382,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
struct mcfqspi_platform_data *pdata;
int status;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_dbg(&pdev->dev, "platform data is missing\n");
return -ENOENT;
@@ -473,8 +455,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->setup = mcfqspi_setup;
master->transfer_one_message = mcfqspi_transfer_one_message;
- master->prepare_transfer_hardware = mcfqspi_prepare_transfer_hw;
- master->unprepare_transfer_hardware = mcfqspi_unprepare_transfer_hw;
+ master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
@@ -558,7 +539,7 @@ static int mcfqspi_resume(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
clk_disable(mcfqspi->clk);
@@ -567,7 +548,7 @@ static int mcfqspi_runtime_suspend(struct device *dev)
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = platform_get_drvdata(to_platform_device(dev));
+ struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 222d3e37fc2..8fbfe2483ff 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
else
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
- t->len, DMA_FROM_DEVICE);
+ t->len, DMA_TO_DEVICE);
if (!t->tx_dma) {
ret = -EFAULT;
goto err_tx_map;
@@ -872,8 +872,8 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
}
- if (pdev->dev.platform_data) {
- pdata = pdev->dev.platform_data;
+ if (dev_get_platdata(&pdev->dev)) {
+ pdata = dev_get_platdata(&pdev->dev);
dspi->pdata = *pdata;
} else {
/* update dspi pdata with that from the DT */
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
new file mode 100644
index 00000000000..7d84418a01d
--- /dev/null
+++ b/drivers/spi/spi-efm32.c
@@ -0,0 +1,516 @@
+/*
+ * Copyright (C) 2012-2013 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/efm32-spi.h>
+
+#define DRIVER_NAME "efm32-spi"
+
+#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
+
+#define REG_CTRL 0x00
+#define REG_CTRL_SYNC 0x0001
+#define REG_CTRL_CLKPOL 0x0100
+#define REG_CTRL_CLKPHA 0x0200
+#define REG_CTRL_MSBF 0x0400
+#define REG_CTRL_TXBIL 0x1000
+
+#define REG_FRAME 0x04
+#define REG_FRAME_DATABITS__MASK 0x000f
+#define REG_FRAME_DATABITS(n) ((n) - 3)
+
+#define REG_CMD 0x0c
+#define REG_CMD_RXEN 0x0001
+#define REG_CMD_RXDIS 0x0002
+#define REG_CMD_TXEN 0x0004
+#define REG_CMD_TXDIS 0x0008
+#define REG_CMD_MASTEREN 0x0010
+
+#define REG_STATUS 0x10
+#define REG_STATUS_TXENS 0x0002
+#define REG_STATUS_TXC 0x0020
+#define REG_STATUS_TXBL 0x0040
+#define REG_STATUS_RXDATAV 0x0080
+
+#define REG_CLKDIV 0x14
+
+#define REG_RXDATAX 0x18
+#define REG_RXDATAX_RXDATA__MASK 0x01ff
+#define REG_RXDATAX_PERR 0x4000
+#define REG_RXDATAX_FERR 0x8000
+
+#define REG_TXDATA 0x34
+
+#define REG_IF 0x40
+#define REG_IF_TXBL 0x0002
+#define REG_IF_RXDATAV 0x0004
+
+#define REG_IFS 0x44
+#define REG_IFC 0x48
+#define REG_IEN 0x4c
+
+#define REG_ROUTE 0x54
+#define REG_ROUTE_RXPEN 0x0001
+#define REG_ROUTE_TXPEN 0x0002
+#define REG_ROUTE_CLKPEN 0x0008
+#define REG_ROUTE_LOCATION__MASK 0x0700
+#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
+
+struct efm32_spi_ddata {
+ struct spi_bitbang bitbang;
+
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int rxirq, txirq;
+ struct efm32_spi_pdata pdata;
+
+ /* irq data */
+ struct completion done;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned tx_len, rx_len;
+
+ /* chip selects */
+ unsigned csgpio[];
+};
+
+#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
+#define efm32_spi_vdbg(ddata, format, arg...) \
+ dev_vdbg(ddata_to_dev(ddata), format, ##arg)
+
+static void efm32_spi_write32(struct efm32_spi_ddata *ddata,
+ u32 value, unsigned offset)
+{
+ writel_relaxed(value, ddata->base + offset);
+}
+
+static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
+{
+ return readl_relaxed(ddata->base + offset);
+}
+
+static void efm32_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE);
+
+ gpio_set_value(ddata->csgpio[spi->chip_select], value);
+}
+
+static int efm32_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+
+ unsigned bpw = t->bits_per_word ?: spi->bits_per_word;
+ unsigned speed = t->speed_hz ?: spi->max_speed_hz;
+ unsigned long clkfreq = clk_get_rate(ddata->clk);
+ u32 clkdiv;
+
+ efm32_spi_write32(ddata, REG_CTRL_SYNC | REG_CTRL_MSBF |
+ (spi->mode & SPI_CPHA ? REG_CTRL_CLKPHA : 0) |
+ (spi->mode & SPI_CPOL ? REG_CTRL_CLKPOL : 0), REG_CTRL);
+
+ efm32_spi_write32(ddata,
+ REG_FRAME_DATABITS(bpw), REG_FRAME);
+
+ if (2 * speed >= clkfreq)
+ clkdiv = 0;
+ else
+ clkdiv = 64 * (DIV_ROUND_UP(2 * clkfreq, speed) - 4);
+
+ if (clkdiv > (1U << 21))
+ return -EINVAL;
+
+ efm32_spi_write32(ddata, clkdiv, REG_CLKDIV);
+ efm32_spi_write32(ddata, REG_CMD_MASTEREN, REG_CMD);
+ efm32_spi_write32(ddata, REG_CMD_RXEN | REG_CMD_TXEN, REG_CMD);
+
+ return 0;
+}
+
+static void efm32_spi_tx_u8(struct efm32_spi_ddata *ddata)
+{
+ u8 val = 0;
+
+ if (ddata->tx_buf) {
+ val = *ddata->tx_buf;
+ ddata->tx_buf++;
+ }
+
+ ddata->tx_len--;
+ efm32_spi_write32(ddata, val, REG_TXDATA);
+ efm32_spi_vdbg(ddata, "%s: tx 0x%x\n", __func__, val);
+}
+
+static void efm32_spi_rx_u8(struct efm32_spi_ddata *ddata)
+{
+ u32 rxdata = efm32_spi_read32(ddata, REG_RXDATAX);
+ efm32_spi_vdbg(ddata, "%s: rx 0x%x\n", __func__, rxdata);
+
+ if (ddata->rx_buf) {
+ *ddata->rx_buf = rxdata;
+ ddata->rx_buf++;
+ }
+
+ ddata->rx_len--;
+}
+
+static void efm32_spi_filltx(struct efm32_spi_ddata *ddata)
+{
+ while (ddata->tx_len &&
+ ddata->tx_len + 2 > ddata->rx_len &&
+ efm32_spi_read32(ddata, REG_STATUS) & REG_STATUS_TXBL) {
+ efm32_spi_tx_u8(ddata);
+ }
+}
+
+static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int ret = -EBUSY;
+
+ spin_lock_irq(&ddata->lock);
+
+ if (ddata->tx_buf || ddata->rx_buf)
+ goto out_unlock;
+
+ ddata->tx_buf = t->tx_buf;
+ ddata->rx_buf = t->rx_buf;
+ ddata->tx_len = ddata->rx_len =
+ t->len * DIV_ROUND_UP(t->bits_per_word, 8);
+
+ efm32_spi_filltx(ddata);
+
+ init_completion(&ddata->done);
+
+ efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
+
+ spin_unlock_irq(&ddata->lock);
+
+ wait_for_completion(&ddata->done);
+
+ spin_lock_irq(&ddata->lock);
+
+ ret = t->len - max(ddata->tx_len, ddata->rx_len);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ ddata->tx_buf = ddata->rx_buf = NULL;
+
+out_unlock:
+ spin_unlock_irq(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_rxirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&ddata->lock);
+
+ while (ddata->rx_len > 0 &&
+ efm32_spi_read32(ddata, REG_STATUS) &
+ REG_STATUS_RXDATAV) {
+ efm32_spi_rx_u8(ddata);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (!ddata->rx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_RXDATAV;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+
+ complete(&ddata->done);
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_txirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+
+ efm32_spi_vdbg(ddata,
+ "%s: txlen = %u, rxlen = %u, if=0x%08x, stat=0x%08x\n",
+ __func__, ddata->tx_len, ddata->rx_len,
+ efm32_spi_read32(ddata, REG_IF),
+ efm32_spi_read32(ddata, REG_STATUS));
+
+ spin_lock(&ddata->lock);
+
+ efm32_spi_filltx(ddata);
+
+ efm32_spi_vdbg(ddata, "%s: txlen = %u, rxlen = %u\n",
+ __func__, ddata->tx_len, ddata->rx_len);
+
+ if (!ddata->tx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_TXBL;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+ efm32_spi_vdbg(ddata, "disable TXBL\n");
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return IRQ_HANDLED;
+}
+
+static const struct efm32_spi_pdata efm32_spi_pdata_default = {
+ .location = 1,
+};
+
+static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
+{
+ u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
+
+ return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
+}
+
+static int efm32_spi_probe_dt(struct platform_device *pdev,
+ struct spi_master *master, struct efm32_spi_ddata *ddata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 location;
+ int ret;
+
+ if (!np)
+ return 1;
+
+ ret = of_property_read_u32(np, "location", &location);
+ if (!ret) {
+ dev_dbg(&pdev->dev, "using location %u\n", location);
+ } else {
+ /* default to location configured in hardware */
+ location = efm32_spi_get_configured_location(ddata);
+
+ dev_info(&pdev->dev, "fall back to location %u\n", location);
+ }
+
+ ddata->pdata.location = location;
+
+ /* spi core takes care about the bus number using an alias */
+ master->bus_num = -1;
+
+ return 0;
+}
+
+static int efm32_spi_probe(struct platform_device *pdev)
+{
+ struct efm32_spi_ddata *ddata;
+ struct resource *res;
+ int ret;
+ struct spi_master *master;
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int num_cs, i;
+
+ num_cs = of_gpio_named_count(np, "cs-gpios");
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(*ddata) + num_cs * sizeof(unsigned));
+ if (!master) {
+ dev_dbg(&pdev->dev,
+ "failed to allocate spi master controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
+ ddata = spi_master_get_devdata(master);
+
+ ddata->bitbang.master = spi_master_get(master);
+ ddata->bitbang.chipselect = efm32_spi_chipselect;
+ ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
+ ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
+
+ spin_lock_init(&ddata->lock);
+
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ ret = PTR_ERR(ddata->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < num_cs; ++i) {
+ ret = of_get_named_gpio(np, "cs-gpios", i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ ddata->csgpio[i] = ret;
+ dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]);
+ ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i],
+ GPIOF_OUT_INIT_LOW, DRIVER_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to configure csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ goto err;
+ }
+
+ if (resource_size(res) < 60) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "memory resource too small\n");
+ goto err;
+ }
+
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base)) {
+ ret = PTR_ERR(ddata->base);
+ goto err;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
+ goto err;
+ }
+
+ ddata->rxirq = ret;
+
+ ret = platform_get_irq(pdev, 1);
+ if (ret <= 0)
+ ret = ddata->rxirq + 1;
+
+ ddata->txirq = ret;
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
+ goto err;
+ }
+
+ ret = efm32_spi_probe_dt(pdev, master, ddata);
+ if (ret > 0) {
+ /* not created by device tree */
+ const struct efm32_spi_pdata *pdata =
+ dev_get_platdata(&pdev->dev);
+
+ if (pdata)
+ ddata->pdata = *pdata;
+ else
+ ddata->pdata.location =
+ efm32_spi_get_configured_location(ddata);
+
+ master->bus_num = pdev->id;
+
+ } else if (ret < 0) {
+ goto err_disable_clk;
+ }
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
+ REG_ROUTE_CLKPEN |
+ REG_ROUTE_LOCATION(ddata->pdata.location), REG_ROUTE);
+
+ ret = request_irq(ddata->rxirq, efm32_spi_rxirq,
+ 0, DRIVER_NAME " rx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rxirq (%d)\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = request_irq(ddata->txirq, efm32_spi_txirq,
+ 0, DRIVER_NAME " tx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register txirq (%d)\n", ret);
+ goto err_free_rx_irq;
+ }
+
+ ret = spi_bitbang_start(&ddata->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed (%d)\n", ret);
+
+ free_irq(ddata->txirq, ddata);
+err_free_rx_irq:
+ free_irq(ddata->rxirq, ddata);
+err_disable_clk:
+ clk_disable_unprepare(ddata->clk);
+err:
+ spi_master_put(master);
+ kfree(master);
+ }
+
+ return ret;
+}
+
+static int efm32_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+
+ free_irq(ddata->txirq, ddata);
+ free_irq(ddata->rxirq, ddata);
+ clk_disable_unprepare(ddata->clk);
+ spi_master_put(master);
+ kfree(master);
+
+ return 0;
+}
+
+static const struct of_device_id efm32_spi_dt_ids[] = {
+ {
+ .compatible = "efm32,spi",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, efm32_spi_dt_ids);
+
+static struct platform_driver efm32_spi_driver = {
+ .probe = efm32_spi_probe,
+ .remove = efm32_spi_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = efm32_spi_dt_ids,
+ },
+};
+module_platform_driver(efm32_spi_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("EFM32 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index cad30b8a1d7..d22c00a227b 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -26,7 +26,6 @@
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/sched.h>
#include <linux/scatterlist.h>
#include <linux/spi/spi.h>
@@ -70,19 +69,13 @@
/**
* struct ep93xx_spi - EP93xx SPI controller structure
- * @lock: spinlock that protects concurrent accesses to fields @running,
- * @current_msg and @msg_queue
* @pdev: pointer to platform device
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
* @min_rate: minimum clock rate (in Hz) supported by the controller
* @max_rate: maximum clock rate (in Hz) supported by the controller
- * @running: is the queue running
- * @wq: workqueue used by the driver
- * @msg_work: work that is queued for the driver
* @wait: wait here until given transfer is completed
- * @msg_queue: queue for the messages
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
* @rx: current byte in transfer to receive
@@ -96,30 +89,15 @@
* @tx_sgt: sg table for TX transfers
* @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
* the client
- *
- * This structure holds EP93xx SPI controller specific information. When
- * @running is %true, driver accepts transfer requests from protocol drivers.
- * @current_msg is used to hold pointer to the message that is currently
- * processed. If @current_msg is %NULL, it means that no processing is going
- * on.
- *
- * Most of the fields are only written once and they can be accessed without
- * taking the @lock. Fields that are accessed concurrently are: @current_msg,
- * @running, and @msg_queue.
*/
struct ep93xx_spi {
- spinlock_t lock;
const struct platform_device *pdev;
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
unsigned long min_rate;
unsigned long max_rate;
- bool running;
- struct workqueue_struct *wq;
- struct work_struct msg_work;
struct completion wait;
- struct list_head msg_queue;
struct spi_message *current_msg;
size_t tx;
size_t rx;
@@ -136,50 +114,36 @@ struct ep93xx_spi {
/**
* struct ep93xx_spi_chip - SPI device hardware settings
* @spi: back pointer to the SPI device
- * @rate: max rate in hz this chip supports
- * @div_cpsr: cpsr (pre-scaler) divider
- * @div_scr: scr divider
- * @dss: bits per word (4 - 16 bits)
* @ops: private chip operations
- *
- * This structure is used to store hardware register specific settings for each
- * SPI device. Settings are written to hardware by function
- * ep93xx_spi_chip_setup().
*/
struct ep93xx_spi_chip {
const struct spi_device *spi;
- unsigned long rate;
- u8 div_cpsr;
- u8 div_scr;
- u8 dss;
struct ep93xx_spi_chip_ops *ops;
};
/* converts bits per word to CR0.DSS value */
#define bits_per_word_to_dss(bpw) ((bpw) - 1)
-static inline void
-ep93xx_spi_write_u8(const struct ep93xx_spi *espi, u16 reg, u8 value)
+static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
+ u16 reg, u8 value)
{
- __raw_writeb(value, espi->regs_base + reg);
+ writeb(value, espi->regs_base + reg);
}
-static inline u8
-ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
+static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
{
- return __raw_readb(spi->regs_base + reg);
+ return readb(spi->regs_base + reg);
}
-static inline void
-ep93xx_spi_write_u16(const struct ep93xx_spi *espi, u16 reg, u16 value)
+static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
+ u16 reg, u16 value)
{
- __raw_writew(value, espi->regs_base + reg);
+ writew(value, espi->regs_base + reg);
}
-static inline u16
-ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
+static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
{
- return __raw_readw(spi->regs_base + reg);
+ return readw(spi->regs_base + reg);
}
static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
@@ -230,17 +194,13 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
/**
* ep93xx_spi_calc_divisors() - calculates SPI clock divisors
* @espi: ep93xx SPI controller struct
- * @chip: divisors are calculated for this chip
* @rate: desired SPI output clock rate
- *
- * Function calculates cpsr (clock pre-scaler) and scr divisors based on
- * given @rate and places them to @chip->div_cpsr and @chip->div_scr. If,
- * for some reason, divisors cannot be calculated nothing is stored and
- * %-EINVAL is returned.
+ * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
+ * @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
- struct ep93xx_spi_chip *chip,
- unsigned long rate)
+ unsigned long rate,
+ u8 *div_cpsr, u8 *div_scr)
{
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
@@ -248,7 +208,7 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
/*
* Make sure that max value is between values supported by the
* controller. Note that minimum value is already checked in
- * ep93xx_spi_transfer().
+ * ep93xx_spi_transfer_one_message().
*/
rate = clamp(rate, espi->min_rate, espi->max_rate);
@@ -263,8 +223,8 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
for (cpsr = 2; cpsr <= 254; cpsr += 2) {
for (scr = 0; scr <= 255; scr++) {
if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
- chip->div_scr = (u8)scr;
- chip->div_cpsr = (u8)cpsr;
+ *div_scr = (u8)scr;
+ *div_cpsr = (u8)cpsr;
return 0;
}
}
@@ -319,73 +279,11 @@ static int ep93xx_spi_setup(struct spi_device *spi)
spi_set_ctldata(spi, chip);
}
- if (spi->max_speed_hz != chip->rate) {
- int err;
-
- err = ep93xx_spi_calc_divisors(espi, chip, spi->max_speed_hz);
- if (err != 0) {
- spi_set_ctldata(spi, NULL);
- kfree(chip);
- return err;
- }
- chip->rate = spi->max_speed_hz;
- }
-
- chip->dss = bits_per_word_to_dss(spi->bits_per_word);
-
ep93xx_spi_cs_control(spi, false);
return 0;
}
/**
- * ep93xx_spi_transfer() - queue message to be transferred
- * @spi: target SPI device
- * @msg: message to be transferred
- *
- * This function is called by SPI device drivers when they are going to transfer
- * a new message. It simply puts the message in the queue and schedules
- * workqueue to perform the actual transfer later on.
- *
- * Returns %0 on success and negative error in case of failure.
- */
-static int ep93xx_spi_transfer(struct spi_device *spi, struct spi_message *msg)
-{
- struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
- struct spi_transfer *t;
- unsigned long flags;
-
- if (!msg || !msg->complete)
- return -EINVAL;
-
- /* first validate each transfer */
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- if (t->speed_hz && t->speed_hz < espi->min_rate)
- return -EINVAL;
- }
-
- /*
- * Now that we own the message, let's initialize it so that it is
- * suitable for us. We use @msg->status to signal whether there was
- * error in transfer and @msg->state is used to hold pointer to the
- * current transfer (or %NULL if no active current transfer).
- */
- msg->state = NULL;
- msg->status = 0;
- msg->actual_length = 0;
-
- spin_lock_irqsave(&espi->lock, flags);
- if (!espi->running) {
- spin_unlock_irqrestore(&espi->lock, flags);
- return -ESHUTDOWN;
- }
- list_add_tail(&msg->queue, &espi->msg_queue);
- queue_work(espi->wq, &espi->msg_work);
- spin_unlock_irqrestore(&espi->lock, flags);
-
- return 0;
-}
-
-/**
* ep93xx_spi_cleanup() - cleans up master controller specific state
* @spi: SPI device to cleanup
*
@@ -409,39 +307,40 @@ static void ep93xx_spi_cleanup(struct spi_device *spi)
* ep93xx_spi_chip_setup() - configures hardware according to given @chip
* @espi: ep93xx SPI controller struct
* @chip: chip specific settings
- *
- * This function sets up the actual hardware registers with settings given in
- * @chip. Note that no validation is done so make sure that callers validate
- * settings before calling this.
+ * @speed_hz: transfer speed
+ * @bits_per_word: transfer bits_per_word
*/
-static void ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
- const struct ep93xx_spi_chip *chip)
+static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
+ const struct ep93xx_spi_chip *chip,
+ u32 speed_hz, u8 bits_per_word)
{
+ u8 dss = bits_per_word_to_dss(bits_per_word);
+ u8 div_cpsr = 0;
+ u8 div_scr = 0;
u16 cr0;
+ int err;
- cr0 = chip->div_scr << SSPCR0_SCR_SHIFT;
+ err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
+ if (err)
+ return err;
+
+ cr0 = div_scr << SSPCR0_SCR_SHIFT;
cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
- cr0 |= chip->dss;
+ cr0 |= dss;
dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
- chip->spi->mode, chip->div_cpsr, chip->div_scr, chip->dss);
+ chip->spi->mode, div_cpsr, div_scr, dss);
dev_dbg(&espi->pdev->dev, "setup: cr0 %#x", cr0);
- ep93xx_spi_write_u8(espi, SSPCPSR, chip->div_cpsr);
+ ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
ep93xx_spi_write_u16(espi, SSPCR0, cr0);
-}
-
-static inline int bits_per_word(const struct ep93xx_spi *espi)
-{
- struct spi_message *msg = espi->current_msg;
- struct spi_transfer *t = msg->state;
- return t->bits_per_word;
+ return 0;
}
static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
{
- if (bits_per_word(espi) > 8) {
+ if (t->bits_per_word > 8) {
u16 tx_val = 0;
if (t->tx_buf)
@@ -460,7 +359,7 @@ static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
{
- if (bits_per_word(espi) > 8) {
+ if (t->bits_per_word > 8) {
u16 rx_val;
rx_val = ep93xx_spi_read_u16(espi, SSPDR);
@@ -546,7 +445,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
size_t len = t->len;
int i, ret, nents;
- if (bits_per_word(espi) > 8)
+ if (t->bits_per_word > 8)
buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
else
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
@@ -610,7 +509,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
}
if (WARN_ON(len)) {
- dev_warn(&espi->pdev->dev, "len = %d expected 0!", len);
+ dev_warn(&espi->pdev->dev, "len = %zu expected 0!", len);
return ERR_PTR(-EINVAL);
}
@@ -708,37 +607,16 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
struct spi_transfer *t)
{
struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
+ int err;
msg->state = t;
- /*
- * Handle any transfer specific settings if needed. We use
- * temporary chip settings here and restore original later when
- * the transfer is finished.
- */
- if (t->speed_hz || t->bits_per_word) {
- struct ep93xx_spi_chip tmp_chip = *chip;
-
- if (t->speed_hz) {
- int err;
-
- err = ep93xx_spi_calc_divisors(espi, &tmp_chip,
- t->speed_hz);
- if (err) {
- dev_err(&espi->pdev->dev,
- "failed to adjust speed\n");
- msg->status = err;
- return;
- }
- }
-
- if (t->bits_per_word)
- tmp_chip.dss = bits_per_word_to_dss(t->bits_per_word);
-
- /*
- * Set up temporary new hw settings for this transfer.
- */
- ep93xx_spi_chip_setup(espi, &tmp_chip);
+ err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
+ if (err) {
+ dev_err(&espi->pdev->dev,
+ "failed to setup chip for transfer\n");
+ msg->status = err;
+ return;
}
espi->rx = 0;
@@ -783,9 +661,6 @@ static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
ep93xx_spi_cs_control(msg->spi, true);
}
}
-
- if (t->speed_hz || t->bits_per_word)
- ep93xx_spi_chip_setup(espi, chip);
}
/*
@@ -838,10 +713,8 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
espi->fifo_level = 0;
/*
- * Update SPI controller registers according to spi device and assert
- * the chipselect.
+ * Assert the chipselect.
*/
- ep93xx_spi_chip_setup(espi, spi_get_ctldata(msg->spi));
ep93xx_spi_cs_control(msg->spi, true);
list_for_each_entry(t, &msg->transfers, transfer_list) {
@@ -858,50 +731,29 @@ static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
ep93xx_spi_disable(espi);
}
-#define work_to_espi(work) (container_of((work), struct ep93xx_spi, msg_work))
-
-/**
- * ep93xx_spi_work() - EP93xx SPI workqueue worker function
- * @work: work struct
- *
- * Workqueue worker function. This function is called when there are new
- * SPI messages to be processed. Message is taken out from the queue and then
- * passed to ep93xx_spi_process_message().
- *
- * After message is transferred, protocol driver is notified by calling
- * @msg->complete(). In case of error, @msg->status is set to negative error
- * number, otherwise it contains zero (and @msg->actual_length is updated).
- */
-static void ep93xx_spi_work(struct work_struct *work)
+static int ep93xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct ep93xx_spi *espi = work_to_espi(work);
- struct spi_message *msg;
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+ struct spi_transfer *t;
- spin_lock_irq(&espi->lock);
- if (!espi->running || espi->current_msg ||
- list_empty(&espi->msg_queue)) {
- spin_unlock_irq(&espi->lock);
- return;
+ /* first validate each transfer */
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ if (t->speed_hz < espi->min_rate)
+ return -EINVAL;
}
- msg = list_first_entry(&espi->msg_queue, struct spi_message, queue);
- list_del_init(&msg->queue);
- espi->current_msg = msg;
- spin_unlock_irq(&espi->lock);
- ep93xx_spi_process_message(espi, msg);
+ msg->state = NULL;
+ msg->status = 0;
+ msg->actual_length = 0;
- /*
- * Update the current message and re-schedule ourselves if there are
- * more messages in the queue.
- */
- spin_lock_irq(&espi->lock);
+ espi->current_msg = msg;
+ ep93xx_spi_process_message(espi, msg);
espi->current_msg = NULL;
- if (espi->running && !list_empty(&espi->msg_queue))
- queue_work(espi->wq, &espi->msg_work);
- spin_unlock_irq(&espi->lock);
- /* notify the protocol driver that we are done with this message */
- msg->complete(msg->context);
+ spi_finalize_current_message(master);
+
+ return 0;
}
static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
@@ -1022,16 +874,26 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
int irq;
int error;
- info = pdev->dev.platform_data;
+ info = dev_get_platdata(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resources\n");
+ return -EBUSY;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get iomem resource\n");
+ return -ENODEV;
+ }
master = spi_alloc_master(&pdev->dev, sizeof(*espi));
- if (!master) {
- dev_err(&pdev->dev, "failed to allocate spi master\n");
+ if (!master)
return -ENOMEM;
- }
master->setup = ep93xx_spi_setup;
- master->transfer = ep93xx_spi_transfer;
+ master->transfer_one_message = ep93xx_spi_transfer_one_message;
master->cleanup = ep93xx_spi_cleanup;
master->bus_num = pdev->id;
master->num_chipselect = info->num_chipselect;
@@ -1042,14 +904,13 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
espi = spi_master_get_devdata(master);
- espi->clk = clk_get(&pdev->dev, NULL);
+ espi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(espi->clk)) {
dev_err(&pdev->dev, "unable to get spi clock\n");
error = PTR_ERR(espi->clk);
goto fail_release_master;
}
- spin_lock_init(&espi->lock);
init_completion(&espi->wait);
/*
@@ -1060,55 +921,31 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- error = -EBUSY;
- dev_err(&pdev->dev, "failed to get irq resources\n");
- goto fail_put_clock;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "unable to get iomem resource\n");
- error = -ENODEV;
- goto fail_put_clock;
- }
-
espi->sspdr_phys = res->start + SSPDR;
espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(espi->regs_base)) {
error = PTR_ERR(espi->regs_base);
- goto fail_put_clock;
+ goto fail_release_master;
}
error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
0, "ep93xx-spi", espi);
if (error) {
dev_err(&pdev->dev, "failed to request irq\n");
- goto fail_put_clock;
+ goto fail_release_master;
}
if (info->use_dma && ep93xx_spi_setup_dma(espi))
dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
- espi->wq = create_singlethread_workqueue("ep93xx_spid");
- if (!espi->wq) {
- dev_err(&pdev->dev, "unable to create workqueue\n");
- error = -ENOMEM;
- goto fail_free_dma;
- }
- INIT_WORK(&espi->msg_work, ep93xx_spi_work);
- INIT_LIST_HEAD(&espi->msg_queue);
- espi->running = true;
-
/* make sure that the hardware is disabled */
ep93xx_spi_write_u8(espi, SSPCR1, 0);
error = spi_register_master(master);
if (error) {
dev_err(&pdev->dev, "failed to register SPI master\n");
- goto fail_free_queue;
+ goto fail_free_dma;
}
dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
@@ -1116,12 +953,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
return 0;
-fail_free_queue:
- destroy_workqueue(espi->wq);
fail_free_dma:
ep93xx_spi_release_dma(espi);
-fail_put_clock:
- clk_put(espi->clk);
fail_release_master:
spi_master_put(master);
@@ -1133,31 +966,7 @@ static int ep93xx_spi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- spin_lock_irq(&espi->lock);
- espi->running = false;
- spin_unlock_irq(&espi->lock);
-
- destroy_workqueue(espi->wq);
-
- /*
- * Complete remaining messages with %-ESHUTDOWN status.
- */
- spin_lock_irq(&espi->lock);
- while (!list_empty(&espi->msg_queue)) {
- struct spi_message *msg;
-
- msg = list_first_entry(&espi->msg_queue,
- struct spi_message, queue);
- list_del_init(&msg->queue);
- msg->status = -ESHUTDOWN;
- spin_unlock_irq(&espi->lock);
- msg->complete(msg->context);
- spin_lock_irq(&espi->lock);
- }
- spin_unlock_irq(&espi->lock);
-
ep93xx_spi_release_dma(espi);
- clk_put(espi->clk);
spi_unregister_master(master);
return 0;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
new file mode 100644
index 00000000000..6cd07d13eca
--- /dev/null
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -0,0 +1,557 @@
+/*
+ * drivers/spi/spi-fsl-dspi.c
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Freescale DSPI driver
+ * This file contains a driver for the Freescale DSPI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#define DRIVER_NAME "fsl-dspi"
+
+#define TRAN_STATE_RX_VOID 0x01
+#define TRAN_STATE_TX_VOID 0x02
+#define TRAN_STATE_WORD_ODD_NUM 0x04
+
+#define DSPI_FIFO_SIZE 4
+
+#define SPI_MCR 0x00
+#define SPI_MCR_MASTER (1 << 31)
+#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_CLR_TXF (1 << 11)
+#define SPI_MCR_CLR_RXF (1 << 10)
+
+#define SPI_TCR 0x08
+
+#define SPI_CTAR(x) (0x0c + (x * 4))
+#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
+#define SPI_CTAR_CPOL(x) ((x) << 26)
+#define SPI_CTAR_CPHA(x) ((x) << 25)
+#define SPI_CTAR_LSBFE(x) ((x) << 24)
+#define SPI_CTAR_PCSSCR(x) (((x) & 0x00000003) << 22)
+#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
+#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
+#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
+#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
+#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
+#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
+#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
+
+#define SPI_CTAR0_SLAVE 0x0c
+
+#define SPI_SR 0x2c
+#define SPI_SR_EOQF 0x10000000
+
+#define SPI_RSER 0x30
+#define SPI_RSER_EOQFE 0x10000000
+
+#define SPI_PUSHR 0x34
+#define SPI_PUSHR_CONT (1 << 31)
+#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28)
+#define SPI_PUSHR_EOQ (1 << 27)
+#define SPI_PUSHR_CTCNT (1 << 26)
+#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_PUSHR_SLAVE 0x34
+
+#define SPI_POPR 0x38
+#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_TXFR0 0x3c
+#define SPI_TXFR1 0x40
+#define SPI_TXFR2 0x44
+#define SPI_TXFR3 0x48
+#define SPI_RXFR0 0x7c
+#define SPI_RXFR1 0x80
+#define SPI_RXFR2 0x84
+#define SPI_RXFR3 0x88
+
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
+
+#define SPI_CS_INIT 0x01
+#define SPI_CS_ASSERT 0x02
+#define SPI_CS_DROP 0x04
+
+struct chip_data {
+ u32 mcr_val;
+ u32 ctar_val;
+ u16 void_write_data;
+};
+
+struct fsl_dspi {
+ struct spi_bitbang bitbang;
+ struct platform_device *pdev;
+
+ void *base;
+ int irq;
+ struct clk *clk;
+
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ char dataflags;
+ u8 cs;
+ u16 void_write_data;
+
+ wait_queue_head_t waitq;
+ u32 waitflags;
+};
+
+static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+{
+ return ((readl(dspi->base + SPI_CTAR(dspi->cs)) & SPI_FRAME_BITS_MASK)
+ == SPI_FRAME_BITS(8)) ? 0 : 1;
+}
+
+static void set_bit_mode(struct fsl_dspi *dspi, unsigned char bits)
+{
+ u32 temp;
+
+ temp = readl(dspi->base + SPI_CTAR(dspi->cs));
+ temp &= ~SPI_FRAME_BITS_MASK;
+ temp |= SPI_FRAME_BITS(bits);
+ writel(temp, dspi->base + SPI_CTAR(dspi->cs));
+}
+
+static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
+ unsigned long clkrate)
+{
+ /* Valid baud rate pre-scaler values */
+ int pbr_tbl[4] = {2, 3, 5, 7};
+ int brs[16] = { 2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768 };
+ int temp, i = 0, j = 0;
+
+ temp = clkrate / 2 / speed_hz;
+
+ for (i = 0; i < ARRAY_SIZE(pbr_tbl); i++)
+ for (j = 0; j < ARRAY_SIZE(brs); j++) {
+ if (pbr_tbl[i] * brs[j] >= temp) {
+ *pbr = i;
+ *br = j;
+ return;
+ }
+ }
+
+ pr_warn("Can not find valid buad rate,speed_hz is %d,clkrate is %ld\
+ ,we use the max prescaler value.\n", speed_hz, clkrate);
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+}
+
+static int dspi_transfer_write(struct fsl_dspi *dspi)
+{
+ int tx_count = 0;
+ int tx_word;
+ u16 d16;
+ u8 d8;
+ u32 dspi_pushr = 0;
+ int first = 1;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ /* If we are in word mode, but only have a single byte to transfer
+ * then switch to byte mode temporarily. Will switch back at the
+ * end of the transfer.
+ */
+ if (tx_word && (dspi->len == 1)) {
+ dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
+ set_bit_mode(dspi, 8);
+ tx_word = 0;
+ }
+
+ while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
+ if (tx_word) {
+ if (dspi->len == 1)
+ break;
+
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+ d16 = *(u16 *)dspi->tx;
+ dspi->tx += 2;
+ } else {
+ d16 = dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d16) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len -= 2;
+ } else {
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+
+ d8 = *(u8 *)dspi->tx;
+ dspi->tx++;
+ } else {
+ d8 = (u8)dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d8) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len--;
+ }
+
+ if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
+ /* last transfer in the transfer */
+ dspi_pushr |= SPI_PUSHR_EOQ;
+ } else if (tx_word && (dspi->len == 1))
+ dspi_pushr |= SPI_PUSHR_EOQ;
+
+ if (first) {
+ first = 0;
+ dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
+ }
+
+ writel(dspi_pushr, dspi->base + SPI_PUSHR);
+ tx_count++;
+ }
+
+ return tx_count * (tx_word + 1);
+}
+
+static int dspi_transfer_read(struct fsl_dspi *dspi)
+{
+ int rx_count = 0;
+ int rx_word = is_double_byte_mode(dspi);
+ u16 d;
+ while ((dspi->rx < dspi->rx_end)
+ && (rx_count < DSPI_FIFO_SIZE)) {
+ if (rx_word) {
+ if ((dspi->rx_end - dspi->rx) == 1)
+ break;
+
+ d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u16 *)dspi->rx = d;
+ dspi->rx += 2;
+
+ } else {
+ d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u8 *)dspi->rx = d;
+ dspi->rx++;
+ }
+ rx_count++;
+ }
+
+ return rx_count;
+}
+
+static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ dspi->cur_transfer = t;
+ dspi->cur_chip = spi_get_ctldata(spi);
+ dspi->cs = spi->chip_select;
+ dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+ dspi->dataflags = 0;
+ dspi->tx = (void *)t->tx_buf;
+ dspi->tx_end = dspi->tx + t->len;
+ dspi->rx = t->rx_buf;
+ dspi->rx_end = dspi->rx + t->len;
+ dspi->len = t->len;
+
+ if (!dspi->rx)
+ dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+ if (!dspi->tx)
+ dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+ writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR);
+ writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs));
+ writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER);
+
+ if (t->speed_hz)
+ writel(dspi->cur_chip->ctar_val,
+ dspi->base + SPI_CTAR(dspi->cs));
+
+ dspi_transfer_write(dspi);
+
+ if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+ dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+
+ return t->len - dspi->len;
+}
+
+static void dspi_chipselect(struct spi_device *spi, int value)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ u32 pushr = readl(dspi->base + SPI_PUSHR);
+
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ pushr |= SPI_PUSHR_CONT;
+ case BITBANG_CS_INACTIVE:
+ pushr &= ~SPI_PUSHR_CONT;
+ }
+
+ writel(pushr, dspi->base + SPI_PUSHR);
+}
+
+static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct chip_data *chip;
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ unsigned char br = 0, pbr = 0, fmsz = 0;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ }
+
+ chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
+ if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+ fmsz = spi->bits_per_word - 1;
+ } else {
+ pr_err("Invalid wordsize\n");
+ kfree(chip);
+ return -ENODEV;
+ }
+
+ chip->void_write_data = 0;
+
+ hz_to_spi_baud(&pbr, &br,
+ spi->max_speed_hz, clk_get_rate(dspi->clk));
+
+ chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
+ | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
+ | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
+ | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PBR(pbr)
+ | SPI_CTAR_BR(br);
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static int dspi_setup(struct spi_device *spi)
+{
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ return dspi_setup_transfer(spi, NULL);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+
+ writel(SPI_SR_EOQF, dspi->base + SPI_SR);
+
+ dspi_transfer_read(dspi);
+
+ if (!dspi->len) {
+ if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
+ set_bit_mode(dspi, 16);
+ dspi->waitflags = 1;
+ wake_up_interruptible(&dspi->waitq);
+ } else {
+ dspi_transfer_write(dspi);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct of_device_id fsl_dspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-dspi", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int dspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(dspi->clk);
+
+ return 0;
+}
+
+static int dspi_resume(struct device *dev)
+{
+
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ clk_prepare_enable(dspi->clk);
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops dspi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dspi_suspend, dspi_resume)
+};
+
+static int dspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct fsl_dspi *dspi;
+ struct resource *res;
+ int ret = 0, cs_num, bus_num;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
+ if (!master)
+ return -ENOMEM;
+
+ dspi = spi_master_get_devdata(master);
+ dspi->pdev = pdev;
+ dspi->bitbang.master = spi_master_get(master);
+ dspi->bitbang.chipselect = dspi_chipselect;
+ dspi->bitbang.setup_transfer = dspi_setup_transfer;
+ dspi->bitbang.txrx_bufs = dspi_txrx_transfer;
+ dspi->bitbang.master->setup = dspi_setup;
+ dspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
+ SPI_BPW_MASK(16);
+
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't get platform resource\n");
+ ret = -EINVAL;
+ goto out_master_put;
+ }
+
+ dspi->base = devm_ioremap_resource(&pdev->dev, res);
+ if (!dspi->base) {
+ ret = -EINVAL;
+ goto out_master_put;
+ }
+
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq < 0) {
+ dev_err(&pdev->dev, "can't get platform irq\n");
+ ret = dspi->irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+ pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ goto out_master_put;
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_master_put;
+ }
+ clk_prepare_enable(dspi->clk);
+
+ init_waitqueue_head(&dspi->waitq);
+ platform_set_drvdata(pdev, dspi);
+
+ ret = spi_bitbang_start(&dspi->bitbang);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Problem registering DSPI master\n");
+ goto out_clk_put;
+ }
+
+ pr_info(KERN_INFO "Freescale DSPI master initialized\n");
+ return ret;
+
+out_clk_put:
+ clk_disable_unprepare(dspi->clk);
+out_master_put:
+ spi_master_put(master);
+ platform_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+static int dspi_remove(struct platform_device *pdev)
+{
+ struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+
+ /* Disconnect from the SPI framework */
+ spi_bitbang_stop(&dspi->bitbang);
+ spi_master_put(dspi->bitbang.master);
+
+ return 0;
+}
+
+static struct platform_driver fsl_dspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.of_match_table = fsl_dspi_dt_ids,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &dspi_pm,
+ .probe = dspi_probe,
+ .remove = dspi_remove,
+};
+module_platform_driver(fsl_dspi_driver);
+
+MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 6a74d7848d9..b8f1103fe28 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -584,7 +584,7 @@ static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
static struct spi_master * fsl_espi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_espi_reg *reg_base;
@@ -665,7 +665,7 @@ err:
static int of_fsl_espi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
const u32 *prop;
int len;
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index e947f2d1b2f..0b75f26158a 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -122,7 +122,7 @@ const char *mpc8xxx_spi_strmode(unsigned int flags)
int mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
int ret = 0;
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 41e89c3e3ed..2129fcd1c31 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -574,7 +574,7 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
static void fsl_spi_grlib_probe(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master = dev_get_drvdata(dev);
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
@@ -600,7 +600,7 @@ static void fsl_spi_grlib_probe(struct device *dev)
static struct spi_master * fsl_spi_probe(struct device *dev,
struct resource *mem, unsigned int irq)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct spi_master *master;
struct mpc8xxx_spi *mpc8xxx_spi;
struct fsl_spi_reg *reg_base;
@@ -700,7 +700,8 @@ err:
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
{
struct device *dev = spi->dev.parent->parent;
- struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(dev->platform_data);
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
u16 cs = spi->chip_select;
int gpio = pinfo->gpios[cs];
bool alow = pinfo->alow_flags[cs];
@@ -711,7 +712,7 @@ static void fsl_spi_cs_control(struct spi_device *spi, bool on)
static int of_fsl_spi_get_chipselects(struct device *dev)
{
struct device_node *np = dev->of_node;
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
int ngpios;
int i = 0;
@@ -790,7 +791,7 @@ err_alloc_flags:
static int of_fsl_spi_free_chipselects(struct device *dev)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
int i;
@@ -889,7 +890,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
int irq;
struct spi_master *master;
- if (!pdev->dev.platform_data)
+ if (!dev_get_platdata(&pdev->dev))
return -EINVAL;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -901,7 +902,7 @@ static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
return -EINVAL;
master = fsl_spi_probe(&pdev->dev, mem, irq);
- return PTR_RET(master);
+ return PTR_ERR_OR_ZERO(master);
}
static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index a54524cf42c..68b69fec13a 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -420,7 +420,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
if (status > 0)
use_of = 1;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
#ifdef GENERIC_BITBANG
if (!pdata || !pdata->num_chipselect)
return -ENODEV;
@@ -506,7 +506,7 @@ static int spi_gpio_remove(struct platform_device *pdev)
int status;
spi_gpio = platform_get_drvdata(pdev);
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
status = spi_bitbang_stop(&spi_gpio->bitbang);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 7db4f43ee4d..15323d8bd9c 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -619,6 +619,7 @@ static const struct of_device_id spi_imx_dt_ids[] = {
{ .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
static void spi_imx_chipselect(struct spi_device *spi, int is_active)
{
@@ -796,10 +797,11 @@ static int spi_imx_probe(struct platform_device *pdev)
if (!gpio_is_valid(cs_gpio))
continue;
- ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
+ ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
+ DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev, "can't get cs gpios\n");
- goto out_gpio_free;
+ goto out_master_put;
}
}
@@ -816,50 +818,44 @@ static int spi_imx_probe(struct platform_device *pdev)
(struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't get platform resource\n");
- ret = -ENOMEM;
- goto out_gpio_free;
- }
-
- if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
- dev_err(&pdev->dev, "request_mem_region failed\n");
- ret = -EBUSY;
- goto out_gpio_free;
- }
-
- spi_imx->base = ioremap(res->start, resource_size(res));
- if (!spi_imx->base) {
- ret = -EINVAL;
- goto out_release_mem;
+ spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_imx->base)) {
+ ret = PTR_ERR(spi_imx->base);
+ goto out_master_put;
}
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
ret = -EINVAL;
- goto out_iounmap;
+ goto out_master_put;
}
- ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
+ ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
+ DRIVER_NAME, spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
- goto out_iounmap;
+ goto out_master_put;
}
spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(spi_imx->clk_ipg)) {
ret = PTR_ERR(spi_imx->clk_ipg);
- goto out_free_irq;
+ goto out_master_put;
}
spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
if (IS_ERR(spi_imx->clk_per)) {
ret = PTR_ERR(spi_imx->clk_per);
- goto out_free_irq;
+ goto out_master_put;
}
- clk_prepare_enable(spi_imx->clk_per);
- clk_prepare_enable(spi_imx->clk_ipg);
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_master_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
@@ -879,47 +875,27 @@ static int spi_imx_probe(struct platform_device *pdev)
return ret;
out_clk_put:
- clk_disable_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
-out_free_irq:
- free_irq(spi_imx->irq, spi_imx);
-out_iounmap:
- iounmap(spi_imx->base);
-out_release_mem:
- release_mem_region(res->start, resource_size(res));
-out_gpio_free:
- while (--i >= 0) {
- if (gpio_is_valid(spi_imx->chipselect[i]))
- gpio_free(spi_imx->chipselect[i]);
- }
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
+out_master_put:
spi_master_put(master);
- kfree(master);
+
return ret;
}
static int spi_imx_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
- int i;
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_per);
clk_disable_unprepare(spi_imx->clk_ipg);
- free_irq(spi_imx->irq, spi_imx);
- iounmap(spi_imx->base);
-
- for (i = 0; i < master->num_chipselect; i++)
- if (gpio_is_valid(spi_imx->chipselect[i]))
- gpio_free(spi_imx->chipselect[i]);
-
+ clk_disable_unprepare(spi_imx->clk_per);
spi_master_put(master);
- release_mem_region(res->start, resource_size(res));
-
return 0;
}
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 29fce6af514..dbc5e999a1f 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -38,7 +38,8 @@ struct mpc512x_psc_spi {
struct mpc512x_psc_fifo __iomem *fifo;
unsigned int irq;
u8 bits_per_word;
- u32 mclk;
+ struct clk *clk_mclk;
+ u32 mclk_rate;
struct completion txisrdone;
};
@@ -72,6 +73,7 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
struct mpc52xx_psc __iomem *psc = mps->psc;
u32 sicr;
u32 ccr;
+ int speed;
u16 bclkdiv;
sicr = in_be32(&psc->sicr);
@@ -95,10 +97,10 @@ static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
ccr = in_be32(&psc->ccr);
ccr &= 0xFF000000;
- if (cs->speed_hz)
- bclkdiv = (mps->mclk / cs->speed_hz) - 1;
- else
- bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+ speed = cs->speed_hz;
+ if (!speed)
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(&psc->ccr, ccr);
@@ -386,19 +388,11 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
{
struct mpc52xx_psc __iomem *psc = mps->psc;
struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
- struct clk *spiclk;
- int ret = 0;
- char name[32];
u32 sicr;
u32 ccr;
+ int speed;
u16 bclkdiv;
- sprintf(name, "psc%d_mclk", master->bus_num);
- spiclk = clk_get(&master->dev, name);
- clk_enable(spiclk);
- mps->mclk = clk_get_rate(spiclk);
- clk_put(spiclk);
-
/* Reset the PSC into a known state */
out_8(&psc->command, MPC52xx_PSC_RST_RX);
out_8(&psc->command, MPC52xx_PSC_RST_TX);
@@ -425,7 +419,8 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
ccr = in_be32(&psc->ccr);
ccr &= 0xFF000000;
- bclkdiv = (mps->mclk / 1000000) - 1; /* default 1MHz */
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
out_be32(&psc->ccr, ccr);
@@ -445,7 +440,7 @@ static int mpc512x_psc_spi_port_config(struct spi_master *master,
mps->bits_per_word = 8;
- return ret;
+ return 0;
}
static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
@@ -474,11 +469,14 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq,
s16 bus_num)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc512x_psc_spi *mps;
struct spi_master *master;
int ret;
void *tempp;
+ int psc_num;
+ char clk_name[16];
+ struct clk *clk;
master = spi_alloc_master(dev, sizeof *mps);
if (master == NULL)
@@ -521,16 +519,29 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
goto free_master;
init_completion(&mps->txisrdone);
+ psc_num = master->bus_num;
+ snprintf(clk_name, sizeof(clk_name), "psc%d_mclk", psc_num);
+ clk = devm_clk_get(dev, clk_name);
+ if (IS_ERR(clk))
+ goto free_irq;
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_irq;
+ mps->clk_mclk = clk;
+ mps->mclk_rate = clk_get_rate(clk);
+
ret = mpc512x_psc_spi_port_config(master, mps);
if (ret < 0)
- goto free_irq;
+ goto free_clock;
ret = spi_register_master(master);
if (ret < 0)
- goto free_irq;
+ goto free_clock;
return ret;
+free_clock:
+ clk_disable_unprepare(mps->clk_mclk);
free_irq:
free_irq(mps->irq, mps);
free_master:
@@ -547,6 +558,7 @@ static int mpc512x_psc_spi_do_remove(struct device *dev)
struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
spi_unregister_master(master);
+ clk_disable_unprepare(mps->clk_mclk);
free_irq(mps->irq, mps);
if (mps->psc)
iounmap(mps->psc);
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index fed0571d4de..6e925dc3439 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -366,7 +366,7 @@ static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
u32 size, unsigned int irq, s16 bus_num)
{
- struct fsl_spi_platform_data *pdata = dev->platform_data;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc52xx_psc_spi *mps;
struct spi_master *master;
int ret;
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 424d38e5942..de7b1141b90 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -67,13 +67,8 @@ static int mxs_spi_setup_transfer(struct spi_device *dev,
{
struct mxs_spi *spi = spi_master_get_devdata(dev->master);
struct mxs_ssp *ssp = &spi->ssp;
- uint8_t bits_per_word;
uint32_t hz = 0;
- bits_per_word = dev->bits_per_word;
- if (t && t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
hz = dev->max_speed_hz;
if (t && t->speed_hz)
hz = min(hz, t->speed_hz);
@@ -513,7 +508,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
- if (!iores || irq_err < 0)
+ if (irq_err < 0)
return -EINVAL;
base = devm_ioremap_resource(&pdev->dev, iores);
@@ -563,25 +558,31 @@ static int mxs_spi_probe(struct platform_device *pdev)
goto out_master_free;
}
- clk_prepare_enable(ssp->clk);
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ goto out_dma_release;
+
clk_set_rate(ssp->clk, clk_freq);
ssp->clk_rate = clk_get_rate(ssp->clk) / 1000;
- stmp_reset_block(ssp->base);
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ goto out_disable_clk;
platform_set_drvdata(pdev, master);
ret = spi_register_master(master);
if (ret) {
dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
- goto out_free_dma;
+ goto out_disable_clk;
}
return 0;
-out_free_dma:
- dma_release_channel(ssp->dmach);
+out_disable_clk:
clk_disable_unprepare(ssp->clk);
+out_dma_release:
+ dma_release_channel(ssp->dmach);
out_master_free:
spi_master_put(master);
return ret;
@@ -598,11 +599,8 @@ static int mxs_spi_remove(struct platform_device *pdev)
ssp = &spi->ssp;
spi_unregister_master(master);
-
- dma_release_channel(ssp->dmach);
-
clk_disable_unprepare(ssp->clk);
-
+ dma_release_channel(ssp->dmach);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 150d85453c2..47a68b43bcd 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -174,17 +174,6 @@ static void nuc900_spi_gobusy(struct nuc900_spi *hw)
spin_unlock_irqrestore(&hw->lock, flags);
}
-static int nuc900_spi_setupxfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- return 0;
-}
-
-static int nuc900_spi_setup(struct spi_device *spi)
-{
- return 0;
-}
-
static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
{
return hw->tx ? hw->tx[count] : 0;
@@ -361,7 +350,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
hw->master = spi_master_get(master);
- hw->pdata = pdev->dev.platform_data;
+ hw->pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
@@ -373,14 +362,12 @@ static int nuc900_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, hw);
init_completion(&hw->done);
- master->mode_bits = SPI_MODE_0;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
- hw->bitbang.setup_transfer = nuc900_spi_setupxfer;
hw->bitbang.chipselect = nuc900_spi_chipsel;
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
- hw->bitbang.master->setup = nuc900_spi_setup;
hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (hw->res == NULL) {
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index 58deb79d046..333cb1badcd 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -285,7 +285,7 @@ static int tiny_spi_of_probe(struct platform_device *pdev)
static int tiny_spi_probe(struct platform_device *pdev)
{
- struct tiny_spi_platform_data *platp = pdev->dev.platform_data;
+ struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct tiny_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -315,15 +315,11 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* find and map our resources */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- goto exit_busy;
- if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
- pdev->name))
- goto exit_busy;
- hw->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!hw->base)
- goto exit_busy;
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
/* irq is optional */
hw->irq = platform_get_irq(pdev, 0);
if (hw->irq >= 0) {
@@ -337,8 +333,10 @@ static int tiny_spi_probe(struct platform_device *pdev)
if (platp) {
hw->gpio_cs_count = platp->gpio_cs_count;
hw->gpio_cs = platp->gpio_cs;
- if (platp->gpio_cs_count && !platp->gpio_cs)
- goto exit_busy;
+ if (platp->gpio_cs_count && !platp->gpio_cs) {
+ err = -EBUSY;
+ goto exit;
+ }
hw->freq = platp->freq;
hw->baudwidth = platp->baudwidth;
} else {
@@ -365,8 +363,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
exit_gpio:
while (i-- > 0)
gpio_free(hw->gpio_cs[i]);
-exit_busy:
- err = -EBUSY;
exit:
spi_master_put(master);
return err;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 24daf964a40..5f28ddbe4f7 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -28,7 +28,6 @@
#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
struct octeon_spi {
- struct spi_master *my_master;
u64 register_base;
u64 last_cfg;
u64 cs_enax;
@@ -64,7 +63,6 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
unsigned int speed_hz;
int mode;
bool cpha, cpol;
- int bits_per_word;
const u8 *tx_buf;
u8 *rx_buf;
int len;
@@ -76,12 +74,9 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
mode = msg_setup->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- bits_per_word = msg_setup->bits_per_word;
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
- if (xfer->bits_per_word)
- bits_per_word = xfer->bits_per_word;
if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
@@ -166,19 +161,6 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
return xfer->len;
}
-static int octeon_spi_validate_bpw(struct spi_device *spi, u32 speed)
-{
- switch (speed) {
- case 8:
- break;
- default:
- dev_err(&spi->dev, "Error: %d bits per word not supported\n",
- speed);
- return -EINVAL;
- }
- return 0;
-}
-
static int octeon_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -197,15 +179,6 @@ static int octeon_spi_transfer_one_message(struct spi_master *master,
}
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (xfer->bits_per_word) {
- status = octeon_spi_validate_bpw(msg->spi,
- xfer->bits_per_word);
- if (status)
- goto err;
- }
- }
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
@@ -236,14 +209,9 @@ static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
static int octeon_spi_setup(struct spi_device *spi)
{
- int r;
struct octeon_spi_setup *new_setup;
struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
- r = octeon_spi_validate_bpw(spi, spi->bits_per_word);
- if (r)
- return r;
-
new_setup = octeon_spi_new_setup(spi);
if (!new_setup)
return -ENOMEM;
@@ -261,14 +229,8 @@ static void octeon_spi_cleanup(struct spi_device *spi)
kfree(old_setup);
}
-static int octeon_spi_nop_transfer_hardware(struct spi_master *master)
-{
- return 0;
-}
-
static int octeon_spi_probe(struct platform_device *pdev)
{
-
struct resource *res_mem;
struct spi_master *master;
struct octeon_spi *p;
@@ -278,8 +240,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
p = spi_master_get_devdata(master);
- platform_set_drvdata(pdev, p);
- p->my_master = master;
+ platform_set_drvdata(pdev, master);
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -307,9 +268,8 @@ static int octeon_spi_probe(struct platform_device *pdev)
master->setup = octeon_spi_setup;
master->cleanup = octeon_spi_cleanup;
- master->prepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
master->transfer_one_message = octeon_spi_transfer_one_message;
- master->unprepare_transfer_hardware = octeon_spi_nop_transfer_hardware;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = pdev->dev.of_node;
err = spi_register_master(master);
@@ -328,10 +288,11 @@ fail:
static int octeon_spi_remove(struct platform_device *pdev)
{
- struct octeon_spi *p = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_master_get_devdata(master);
u64 register_base = p->register_base;
- spi_unregister_master(p->my_master);
+ spi_unregister_master(master);
/* Clear the CSENA* and put everything in a known state. */
cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index ee25670f8cf..69ecf05757d 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -83,11 +83,6 @@
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
- struct work_struct work;
-
- /* lock protects queue and registers */
- spinlock_t lock;
- struct list_head msg_queue;
struct spi_master *master;
struct clk *ick;
struct clk *fck;
@@ -104,8 +99,6 @@ struct omap1_spi100k_cs {
int word_len;
};
-static struct workqueue_struct *omap1_spi100k_wq;
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -310,170 +303,102 @@ static int omap1_spi100k_setup(struct spi_device *spi)
spi100k_open(spi->master);
- clk_enable(spi100k->ick);
- clk_enable(spi100k->fck);
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
ret = omap1_spi100k_setup_transfer(spi, NULL);
- clk_disable(spi100k->ick);
- clk_disable(spi100k->fck);
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
return ret;
}
-static void omap1_spi100k_work(struct work_struct *work)
+static int omap1_spi100k_prepare_hardware(struct spi_master *master)
{
- struct omap1_spi100k *spi100k;
- int status = 0;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- spi100k = container_of(work, struct omap1_spi100k, work);
- spin_lock_irq(&spi100k->lock);
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
- clk_enable(spi100k->ick);
- clk_enable(spi100k->fck);
+ return 0;
+}
- /* We only enable one channel at a time -- the one whose message is
- * at the head of the queue -- although this controller would gladly
- * arbitrate among multiple channels. This corresponds to "single
- * channel" master mode. As a side effect, we need to manage the
- * chipselect with the FORCE bit ... CS != channel enable.
- */
- while (!list_empty(&spi100k->msg_queue)) {
- struct spi_message *m;
- struct spi_device *spi;
- struct spi_transfer *t = NULL;
- int cs_active = 0;
- struct omap1_spi100k_cs *cs;
- int par_override = 0;
-
- m = container_of(spi100k->msg_queue.next, struct spi_message,
- queue);
-
- list_del_init(&m->queue);
- spin_unlock_irq(&spi100k->lock);
-
- spi = m->spi;
- cs = spi->controller_state;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
- status = -EINVAL;
+static int omap1_spi100k_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ int par_override = 0;
+ int status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
break;
- }
- if (par_override || t->speed_hz || t->bits_per_word) {
- par_override = 1;
- status = omap1_spi100k_setup_transfer(spi, t);
- if (status < 0)
- break;
- if (!t->speed_hz && !t->bits_per_word)
- par_override = 0;
- }
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
- if (!cs_active) {
- omap1_spi100k_force_cs(spi100k, 1);
- cs_active = 1;
- }
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
- if (t->len) {
- unsigned count;
+ if (t->len) {
+ unsigned count;
- count = omap1_spi100k_txrx_pio(spi, t);
- m->actual_length += count;
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
- if (count != t->len) {
- status = -EIO;
- break;
- }
+ if (count != t->len) {
+ status = -EIO;
+ break;
}
+ }
- if (t->delay_usecs)
- udelay(t->delay_usecs);
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
- /* ignore the "leave it on after last xfer" hint */
+ /* ignore the "leave it on after last xfer" hint */
- if (t->cs_change) {
- omap1_spi100k_force_cs(spi100k, 0);
- cs_active = 0;
- }
- }
-
- /* Restore defaults if they were overriden */
- if (par_override) {
- par_override = 0;
- status = omap1_spi100k_setup_transfer(spi, NULL);
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
}
+ }
- if (cs_active)
- omap1_spi100k_force_cs(spi100k, 0);
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+ }
- m->status = status;
- m->complete(m->context);
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
- spin_lock_irq(&spi100k->lock);
- }
+ m->status = status;
- clk_disable(spi100k->ick);
- clk_disable(spi100k->fck);
- spin_unlock_irq(&spi100k->lock);
+ spi_finalize_current_message(master);
- if (status < 0)
- printk(KERN_WARNING "spi transfer failed with %d\n", status);
+ return status;
}
-static int omap1_spi100k_transfer(struct spi_device *spi, struct spi_message *m)
+static int omap1_spi100k_unprepare_hardware(struct spi_master *master)
{
- struct omap1_spi100k *spi100k;
- unsigned long flags;
- struct spi_transfer *t;
-
- m->actual_length = 0;
- m->status = -EINPROGRESS;
-
- spi100k = spi_master_get_devdata(spi->master);
-
- /* Don't accept new work if we're shutting down */
- if (spi100k->state == SPI_SHUTDOWN)
- return -ESHUTDOWN;
-
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers) || !m->complete)
- return -EINVAL;
-
- list_for_each_entry(t, &m->transfers, transfer_list) {
- const void *tx_buf = t->tx_buf;
- void *rx_buf = t->rx_buf;
- unsigned len = t->len;
-
- if (t->speed_hz > OMAP1_SPI100K_MAX_FREQ
- || (len && !(rx_buf || tx_buf))) {
- dev_dbg(&spi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
- t->speed_hz,
- len,
- tx_buf ? "tx" : "",
- rx_buf ? "rx" : "",
- t->bits_per_word);
- return -EINVAL;
- }
-
- if (t->speed_hz && t->speed_hz < OMAP1_SPI100K_MAX_FREQ/(1<<16)) {
- dev_dbg(&spi->dev, "%d Hz max exceeds %d\n",
- t->speed_hz,
- OMAP1_SPI100K_MAX_FREQ/(1<<16));
- return -EINVAL;
- }
-
- }
-
- spin_lock_irqsave(&spi100k->lock, flags);
- list_add_tail(&m->queue, &spi100k->msg_queue);
- queue_work(omap1_spi100k_wq, &spi100k->work);
- spin_unlock_irqrestore(&spi100k->lock, flags);
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
- return 0;
-}
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
-static int omap1_spi100k_reset(struct omap1_spi100k *spi100k)
-{
return 0;
}
@@ -496,11 +421,15 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
- master->transfer = omap1_spi100k_transfer;
+ master->transfer_one_message = omap1_spi100k_transfer_one_message;
+ master->prepare_transfer_hardware = omap1_spi100k_prepare_hardware;
+ master->unprepare_transfer_hardware = omap1_spi100k_unprepare_hardware;
master->cleanup = NULL;
master->num_chipselect = 2;
master->mode_bits = MODEBITS;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
+ master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
platform_set_drvdata(pdev, master);
@@ -512,42 +441,31 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
* You should allocate this with ioremap() before initializing
* the SPI.
*/
- spi100k->base = (void __iomem *) pdev->dev.platform_data;
-
- INIT_WORK(&spi100k->work, omap1_spi100k_work);
+ spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev);
- spin_lock_init(&spi100k->lock);
- INIT_LIST_HEAD(&spi100k->msg_queue);
- spi100k->ick = clk_get(&pdev->dev, "ick");
+ spi100k->ick = devm_clk_get(&pdev->dev, "ick");
if (IS_ERR(spi100k->ick)) {
dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
status = PTR_ERR(spi100k->ick);
- goto err1;
+ goto err;
}
- spi100k->fck = clk_get(&pdev->dev, "fck");
+ spi100k->fck = devm_clk_get(&pdev->dev, "fck");
if (IS_ERR(spi100k->fck)) {
dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
status = PTR_ERR(spi100k->fck);
- goto err2;
+ goto err;
}
- if (omap1_spi100k_reset(spi100k) < 0)
- goto err3;
-
status = spi_register_master(master);
if (status < 0)
- goto err3;
+ goto err;
spi100k->state = SPI_RUNNING;
return status;
-err3:
- clk_put(spi100k->fck);
-err2:
- clk_put(spi100k->ick);
-err1:
+err:
spi_master_put(master);
return status;
}
@@ -557,33 +475,14 @@ static int omap1_spi100k_remove(struct platform_device *pdev)
struct spi_master *master;
struct omap1_spi100k *spi100k;
struct resource *r;
- unsigned limit = 500;
- unsigned long flags;
int status = 0;
master = platform_get_drvdata(pdev);
spi100k = spi_master_get_devdata(master);
- spin_lock_irqsave(&spi100k->lock, flags);
-
- spi100k->state = SPI_SHUTDOWN;
- while (!list_empty(&spi100k->msg_queue) && limit--) {
- spin_unlock_irqrestore(&spi100k->lock, flags);
- msleep(10);
- spin_lock_irqsave(&spi100k->lock, flags);
- }
-
- if (!list_empty(&spi100k->msg_queue))
- status = -EBUSY;
-
- spin_unlock_irqrestore(&spi100k->lock, flags);
-
if (status != 0)
return status;
- clk_put(spi100k->fck);
- clk_put(spi100k->ick);
-
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi_unregister_master(master);
@@ -596,30 +495,11 @@ static struct platform_driver omap1_spi100k_driver = {
.name = "omap1_spi100k",
.owner = THIS_MODULE,
},
+ .probe = omap1_spi100k_probe,
.remove = omap1_spi100k_remove,
};
-
-static int __init omap1_spi100k_init(void)
-{
- omap1_spi100k_wq = create_singlethread_workqueue(
- omap1_spi100k_driver.driver.name);
-
- if (omap1_spi100k_wq == NULL)
- return -1;
-
- return platform_driver_probe(&omap1_spi100k_driver, omap1_spi100k_probe);
-}
-
-static void __exit omap1_spi100k_exit(void)
-{
- platform_driver_unregister(&omap1_spi100k_driver);
-
- destroy_workqueue(omap1_spi100k_wq);
-}
-
-module_init(omap1_spi100k_init);
-module_exit(omap1_spi100k_exit);
+module_platform_driver(omap1_spi100k_driver);
MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 5994039758d..ed4af4708d9 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -335,23 +335,6 @@ static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
__raw_writel(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
}
-static int omap2_prepare_transfer(struct spi_master *master)
-{
- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(mcspi->dev);
- return 0;
-}
-
-static int omap2_unprepare_transfer(struct spi_master *master)
-{
- struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
-
- pm_runtime_mark_last_busy(mcspi->dev);
- pm_runtime_put_autosuspend(mcspi->dev);
- return 0;
-}
-
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
@@ -1318,8 +1301,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
master->setup = omap2_mcspi_setup;
- master->prepare_transfer_hardware = omap2_prepare_transfer;
- master->unprepare_transfer_hardware = omap2_unprepare_transfer;
+ master->auto_runtime_pm = true;
master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
@@ -1340,7 +1322,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
} else {
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
master->num_chipselect = pdata->num_cs;
if (pdev->id != -1)
master->bus_num = pdev->id;
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 5d90bebaa0f..1d1d321d90c 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/clk.h>
+#include <linux/sizes.h>
#include <asm/unaligned.h>
#define DRIVER_NAME "orion_spi"
@@ -446,30 +447,22 @@ static int orion_spi_probe(struct platform_device *pdev)
spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (r == NULL) {
- status = -ENODEV;
+ spi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(spi->base)) {
+ status = PTR_ERR(spi->base);
goto out_rel_clk;
}
- if (!request_mem_region(r->start, resource_size(r),
- dev_name(&pdev->dev))) {
- status = -EBUSY;
- goto out_rel_clk;
- }
- spi->base = ioremap(r->start, SZ_1K);
-
if (orion_spi_reset(spi) < 0)
- goto out_rel_mem;
+ goto out_rel_clk;
master->dev.of_node = pdev->dev.of_node;
status = spi_register_master(master);
if (status < 0)
- goto out_rel_mem;
+ goto out_rel_clk;
return status;
-out_rel_mem:
- release_mem_region(r->start, resource_size(r));
out_rel_clk:
clk_disable_unprepare(spi->clk);
clk_put(spi->clk);
@@ -482,7 +475,6 @@ out:
static int orion_spi_remove(struct platform_device *pdev)
{
struct spi_master *master;
- struct resource *r;
struct orion_spi *spi;
master = platform_get_drvdata(pdev);
@@ -491,9 +483,6 @@ static int orion_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(spi->clk);
clk_put(spi->clk);
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(r->start, resource_size(r));
-
spi_unregister_master(master);
return 0;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index abef061fb84..9c511a954d2 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1555,18 +1555,6 @@ static int pl022_transfer_one_message(struct spi_master *master,
return 0;
}
-static int pl022_prepare_transfer_hardware(struct spi_master *master)
-{
- struct pl022 *pl022 = spi_master_get_devdata(master);
-
- /*
- * Just make sure we have all we need to run the transfer by syncing
- * with the runtime PM framework.
- */
- pm_runtime_get_sync(&pl022->adev->dev);
- return 0;
-}
-
static int pl022_unprepare_transfer_hardware(struct spi_master *master)
{
struct pl022 *pl022 = spi_master_get_devdata(master);
@@ -1575,13 +1563,6 @@ static int pl022_unprepare_transfer_hardware(struct spi_master *master)
writew((readw(SSP_CR1(pl022->virtbase)) &
(~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
- if (pl022->master_info->autosuspend_delay > 0) {
- pm_runtime_mark_last_busy(&pl022->adev->dev);
- pm_runtime_put_autosuspend(&pl022->adev->dev);
- } else {
- pm_runtime_put(&pl022->adev->dev);
- }
-
return 0;
}
@@ -2091,7 +2072,8 @@ pl022_platform_data_dt_get(struct device *dev)
static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
{
struct device *dev = &adev->dev;
- struct pl022_ssp_controller *platform_info = adev->dev.platform_data;
+ struct pl022_ssp_controller *platform_info =
+ dev_get_platdata(&adev->dev);
struct spi_master *master;
struct pl022 *pl022 = NULL; /*Data for this driver */
struct device_node *np = adev->dev.of_node;
@@ -2139,7 +2121,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
master->num_chipselect = num_cs;
master->cleanup = pl022_cleanup;
master->setup = pl022_setup;
- master->prepare_transfer_hardware = pl022_prepare_transfer_hardware;
+ master->auto_runtime_pm = true;
master->transfer_one_message = pl022_transfer_one_message;
master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
master->rt = platform_info->rt;
@@ -2193,8 +2175,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
status = -ENOMEM;
goto err_no_ioremap;
}
- printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n",
- adev->res.start, pl022->virtbase);
+ printk(KERN_INFO "pl022: mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
pl022->clk = devm_clk_get(&adev->dev, NULL);
if (IS_ERR(pl022->clk)) {
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f440dcee852..2eb06ee0b32 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -69,6 +69,8 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_TX_HITHRESH_DFLT 224
/* Offset from drv_data->lpss_base */
+#define GENERAL_REG 0x08
+#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
#define SSP_REG 0x0c
#define SPI_CS_CONTROL 0x18
#define SPI_CS_CONTROL_SW_MODE BIT(0)
@@ -142,8 +144,13 @@ detection_done:
__lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
/* Enable multiblock DMA transfers */
- if (drv_data->master_info->enable_dma)
+ if (drv_data->master_info->enable_dma) {
__lpss_ssp_write_priv(drv_data, SSP_REG, 1);
+
+ value = __lpss_ssp_read_priv(drv_data, GENERAL_REG);
+ value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_write_priv(drv_data, GENERAL_REG, value);
+ }
}
static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
@@ -804,14 +811,6 @@ static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
return 0;
}
-static int pxa2xx_spi_prepare_transfer(struct spi_master *master)
-{
- struct driver_data *drv_data = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(&drv_data->pdev->dev);
- return 0;
-}
-
static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
{
struct driver_data *drv_data = spi_master_get_devdata(master);
@@ -820,8 +819,6 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
write_SSCR0(read_SSCR0(drv_data->ioaddr) & ~SSCR0_SSE,
drv_data->ioaddr);
- pm_runtime_mark_last_busy(&drv_data->pdev->dev);
- pm_runtime_put_autosuspend(&drv_data->pdev->dev);
return 0;
}
@@ -1134,8 +1131,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->cleanup = cleanup;
master->setup = setup;
master->transfer_one_message = pxa2xx_spi_transfer_one_message;
- master->prepare_transfer_hardware = pxa2xx_spi_prepare_transfer;
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ master->auto_runtime_pm = true;
drv_data->ssp_type = ssp->type;
drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index b44a6ac3cec..8719206a03a 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -564,8 +564,12 @@ static void rspi_work(struct work_struct *work)
unsigned long flags;
int ret;
- spin_lock_irqsave(&rspi->lock, flags);
- while (!list_empty(&rspi->queue)) {
+ while (1) {
+ spin_lock_irqsave(&rspi->lock, flags);
+ if (list_empty(&rspi->queue)) {
+ spin_unlock_irqrestore(&rspi->lock, flags);
+ break;
+ }
mesg = list_entry(rspi->queue.next, struct spi_message, queue);
list_del_init(&mesg->queue);
spin_unlock_irqrestore(&rspi->lock, flags);
@@ -595,8 +599,6 @@ static void rspi_work(struct work_struct *work)
mesg->status = 0;
mesg->complete(mesg->context);
-
- spin_lock_irqsave(&rspi->lock, flags);
}
return;
@@ -664,12 +666,13 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
- struct rspi_plat_data *rspi_pd = pdev->dev.platform_data;
+ struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dma_cap_mask_t mask;
struct dma_slave_config cfg;
int ret;
- if (!rspi_pd)
+ if (!res || !rspi_pd)
return 0; /* The driver assumes no error. */
rspi->dma_width_16bit = rspi_pd->dma_width_16bit;
@@ -683,6 +686,8 @@ static int rspi_request_dma(struct rspi_data *rspi,
if (rspi->chan_rx) {
cfg.slave_id = rspi_pd->dma_rx_id;
cfg.direction = DMA_DEV_TO_MEM;
+ cfg.dst_addr = 0;
+ cfg.src_addr = res->start + RSPI_SPDR;
ret = dmaengine_slave_config(rspi->chan_rx, &cfg);
if (!ret)
dev_info(&pdev->dev, "Use DMA when rx.\n");
@@ -698,6 +703,8 @@ static int rspi_request_dma(struct rspi_data *rspi,
if (rspi->chan_tx) {
cfg.slave_id = rspi_pd->dma_tx_id;
cfg.direction = DMA_MEM_TO_DEV;
+ cfg.dst_addr = res->start + RSPI_SPDR;
+ cfg.src_addr = 0;
ret = dmaengine_slave_config(rspi->chan_tx, &cfg);
if (!ret)
dev_info(&pdev->dev, "Use DMA when tx\n");
@@ -719,7 +726,7 @@ static void rspi_release_dma(struct rspi_data *rspi)
static int rspi_remove(struct platform_device *pdev)
{
- struct rspi_data *rspi = platform_get_drvdata(pdev);
+ struct rspi_data *rspi = spi_master_get(platform_get_drvdata(pdev));
spi_unregister_master(rspi->master);
rspi_release_dma(rspi);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 68910b31015..ce318d95a6e 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -525,7 +525,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
memset(hw, 0, sizeof(struct s3c24xx_spi));
hw->master = spi_master_get(master);
- hw->pdata = pdata = pdev->dev.platform_data;
+ hw->pdata = pdata = dev_get_platdata(&pdev->dev);
hw->dev = &pdev->dev;
if (pdata == NULL) {
@@ -690,7 +690,7 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
static int s3c24xx_spi_suspend(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -701,7 +701,7 @@ static int s3c24xx_spi_suspend(struct device *dev)
static int s3c24xx_spi_resume(struct device *dev)
{
- struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev));
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
s3c24xx_spi_initialsetup(hw);
return 0;
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 63e2070c6c1..512b8893893 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -172,7 +172,6 @@ struct s3c64xx_spi_port_config {
* @master: Pointer to the SPI Protocol master.
* @cntrlr_info: Platform specific data for the controller this driver manages.
* @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
- * @queue: To log SPI xfer requests.
* @lock: Controller specific lock.
* @state: Set of FLAGS to indicate status.
* @rx_dmach: Controller's DMA channel for Rx.
@@ -193,7 +192,6 @@ struct s3c64xx_spi_driver_data {
struct spi_master *master;
struct s3c64xx_spi_info *cntrlr_info;
struct spi_device *tgl_spi;
- struct list_head queue;
spinlock_t lock;
unsigned long sfr_start;
struct completion xfer_completion;
@@ -338,8 +336,10 @@ static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
req.cap = DMA_SLAVE;
req.client = &s3c64xx_spi_dma_client;
- sdd->rx_dma.ch = (void *)sdd->ops->request(sdd->rx_dma.dmach, &req, dev, "rx");
- sdd->tx_dma.ch = (void *)sdd->ops->request(sdd->tx_dma.dmach, &req, dev, "tx");
+ sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
+ sdd->rx_dma.dmach, &req, dev, "rx");
+ sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
+ sdd->tx_dma.dmach, &req, dev, "tx");
return 1;
}
@@ -356,8 +356,6 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
while (!is_polling(sdd) && !acquire_dma(sdd))
usleep_range(10000, 11000);
- pm_runtime_get_sync(&sdd->pdev->dev);
-
return 0;
}
@@ -372,7 +370,6 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
&s3c64xx_spi_dma_client);
}
- pm_runtime_put(&sdd->pdev->dev);
return 0;
}
@@ -389,9 +386,10 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
- struct scatterlist sg;
struct dma_async_tx_descriptor *desc;
+ memset(&config, 0, sizeof(config));
+
if (dma->direction == DMA_DEV_TO_MEM) {
sdd = container_of((void *)dma,
struct s3c64xx_spi_driver_data, rx_dma);
@@ -410,14 +408,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config);
}
- sg_init_table(&sg, 1);
- sg_dma_len(&sg) = len;
- sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
- len, offset_in_page(buf));
- sg_dma_address(&sg) = buf;
-
- desc = dmaengine_prep_slave_sg(dma->ch,
- &sg, 1, dma->direction, DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_single(dma->ch, buf, len,
+ dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
@@ -434,27 +426,26 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
dma_cap_mask_t mask;
int ret;
- if (is_polling(sdd))
- return 0;
-
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
-
- /* Acquire DMA channels */
- sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- (void*)sdd->rx_dma.dmach, dev, "rx");
- if (!sdd->rx_dma.ch) {
- dev_err(dev, "Failed to get RX DMA channel\n");
- ret = -EBUSY;
- goto out;
- }
+ if (!is_polling(sdd)) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)sdd->rx_dma.dmach, dev, "rx");
+ if (!sdd->rx_dma.ch) {
+ dev_err(dev, "Failed to get RX DMA channel\n");
+ ret = -EBUSY;
+ goto out;
+ }
- sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
- (void*)sdd->tx_dma.dmach, dev, "tx");
- if (!sdd->tx_dma.ch) {
- dev_err(dev, "Failed to get TX DMA channel\n");
- ret = -EBUSY;
- goto out_rx;
+ sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)sdd->tx_dma.dmach, dev, "tx");
+ if (!sdd->tx_dma.ch) {
+ dev_err(dev, "Failed to get TX DMA channel\n");
+ ret = -EBUSY;
+ goto out_rx;
+ }
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
@@ -1056,8 +1047,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
struct s3c64xx_spi_csinfo *cs = spi->controller_data;
struct s3c64xx_spi_driver_data *sdd;
struct s3c64xx_spi_info *sci;
- struct spi_message *msg;
- unsigned long flags;
int err;
sdd = spi_master_get_devdata(spi->master);
@@ -1071,37 +1060,23 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
return -ENODEV;
}
- /* Request gpio only if cs line is asserted by gpio pins */
- if (sdd->cs_gpio) {
- err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
- dev_name(&spi->dev));
- if (err) {
- dev_err(&spi->dev,
- "Failed to get /CS gpio [%d]: %d\n",
- cs->line, err);
- goto err_gpio_req;
+ if (!spi_get_ctldata(spi)) {
+ /* Request gpio only if cs line is asserted by gpio pins */
+ if (sdd->cs_gpio) {
+ err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (err) {
+ dev_err(&spi->dev,
+ "Failed to get /CS gpio [%d]: %d\n",
+ cs->line, err);
+ goto err_gpio_req;
+ }
}
- }
- if (!spi_get_ctldata(spi))
spi_set_ctldata(spi, cs);
-
- sci = sdd->cntrlr_info;
-
- spin_lock_irqsave(&sdd->lock, flags);
-
- list_for_each_entry(msg, &sdd->queue, queue) {
- /* Is some mssg is already queued for this device */
- if (msg->spi == spi) {
- dev_err(&spi->dev,
- "setup: attempt while mssg in queue!\n");
- spin_unlock_irqrestore(&sdd->lock, flags);
- err = -EBUSY;
- goto err_msgq;
- }
}
- spin_unlock_irqrestore(&sdd->lock, flags);
+ sci = sdd->cntrlr_info;
pm_runtime_get_sync(&sdd->pdev->dev);
@@ -1149,7 +1124,6 @@ setup_exit:
/* setup() returns with device de-selected */
disable_cs(sdd, spi);
-err_msgq:
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
@@ -1275,7 +1249,7 @@ static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
#else
static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
{
- return dev->platform_data;
+ return dev_get_platdata(dev);
}
#endif
@@ -1300,7 +1274,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
struct resource *mem_res;
struct resource *res;
struct s3c64xx_spi_driver_data *sdd;
- struct s3c64xx_spi_info *sci = pdev->dev.platform_data;
+ struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
struct spi_master *master;
int ret, irq;
char clk_name[16];
@@ -1364,16 +1338,14 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (!sdd->pdev->dev.of_node) {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
- dev_warn(&pdev->dev, "Unable to get SPI tx dma "
- "resource. Switching to poll mode\n");
+ dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->tx_dma.dmach = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
- dev_warn(&pdev->dev, "Unable to get SPI rx dma "
- "resource. Switching to poll mode\n");
+ dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->rx_dma.dmach = res->start;
@@ -1395,6 +1367,7 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
SPI_BPW_MASK(8);
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->auto_runtime_pm = true;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
@@ -1442,7 +1415,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
spin_lock_init(&sdd->lock);
init_completion(&sdd->xfer_completion);
- INIT_LIST_HEAD(&sdd->queue);
ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
"spi-s3c64xx", sdd);
@@ -1464,8 +1436,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
sdd->port_id, master->num_chipselect);
- dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n",
- mem_res->end, mem_res->start,
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res,
sdd->rx_dma.dmach, sdd->tx_dma.dmach);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 716edf99953..0b68cb592fa 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -99,21 +99,6 @@ static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
/*
* spi master function
*/
-static int hspi_prepare_transfer(struct spi_master *master)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
-
- pm_runtime_get_sync(hspi->dev);
- return 0;
-}
-
-static int hspi_unprepare_transfer(struct spi_master *master)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(master);
-
- pm_runtime_put_sync(hspi->dev);
- return 0;
-}
#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
@@ -316,9 +301,8 @@ static int hspi_probe(struct platform_device *pdev)
master->setup = hspi_setup;
master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->prepare_transfer_hardware = hspi_prepare_transfer;
+ master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
- master->unprepare_transfer_hardware = hspi_unprepare_transfer;
ret = spi_register_master(master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
@@ -327,8 +311,6 @@ static int hspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- dev_info(&pdev->dev, "probed\n");
-
return 0;
error1:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 2bc5a6b8630..2a95435a6a1 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -645,7 +645,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
if (pdev->dev.of_node)
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
else
- p->info = pdev->dev.platform_data;
+ p->info = dev_get_platdata(&pdev->dev);
if (!p->info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
@@ -745,18 +745,6 @@ static int sh_msiof_spi_remove(struct platform_device *pdev)
return ret;
}
-static int sh_msiof_spi_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * This driver re-initializes all registers after
- * pm_runtime_get_sync() anyway so there is no need
- * to save and restore registers here.
- */
- return 0;
-}
-
#ifdef CONFIG_OF
static const struct of_device_id sh_msiof_match[] = {
{ .compatible = "renesas,sh-msiof", },
@@ -766,18 +754,12 @@ static const struct of_device_id sh_msiof_match[] = {
MODULE_DEVICE_TABLE(of, sh_msiof_match);
#endif
-static struct dev_pm_ops sh_msiof_spi_dev_pm_ops = {
- .runtime_suspend = sh_msiof_spi_runtime_nop,
- .runtime_resume = sh_msiof_spi_runtime_nop,
-};
-
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.driver = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
- .pm = &sh_msiof_spi_dev_pm_ops,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 097e506042b..8eefeb6007d 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -130,7 +130,7 @@ static int sh_sci_spi_probe(struct platform_device *dev)
sp = spi_master_get_devdata(master);
platform_set_drvdata(dev, sp);
- sp->info = dev->dev.platform_data;
+ sp->info = dev_get_platdata(&dev->dev);
/* setup spi bitbang adaptor */
sp->bitbang.master = spi_master_get(master);
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index fc20bcfd90c..a1f21b74773 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -19,6 +19,10 @@
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/sirfsoc_dma.h>
#define DRIVER_NAME "sirfsoc_spi"
@@ -119,9 +123,19 @@
#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+/*
+ * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
+ * due to the limitation of dma controller
+ */
+
+#define ALIGNED(x) (!((u32)x & 0x3))
+#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
+ ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+
struct sirfsoc_spi {
struct spi_bitbang bitbang;
- struct completion done;
+ struct completion rx_done;
+ struct completion tx_done;
void __iomem *base;
u32 ctrl_freq; /* SPI controller clock speed */
@@ -137,8 +151,16 @@ struct sirfsoc_spi {
void (*tx_word) (struct sirfsoc_spi *);
/* number of words left to be tranmitted/received */
- unsigned int left_tx_cnt;
- unsigned int left_rx_cnt;
+ unsigned int left_tx_word;
+ unsigned int left_rx_word;
+
+ /* rx & tx DMA channels */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ void *dummypage;
+ int word_width; /* in bytes */
int chipselect[0];
};
@@ -155,7 +177,7 @@ static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
@@ -169,7 +191,7 @@ static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
@@ -184,7 +206,7 @@ static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
@@ -198,7 +220,7 @@ static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
@@ -213,7 +235,7 @@ static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
sspi->rx = rx;
}
- sspi->left_rx_cnt--;
+ sspi->left_rx_word--;
}
@@ -228,7 +250,7 @@ static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
}
writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
- sspi->left_tx_cnt--;
+ sspi->left_tx_word--;
}
static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
@@ -241,7 +263,7 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
/* Error Conditions */
if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
spi_stat & SIRFSOC_SPI_TX_UFLOW) {
- complete(&sspi->done);
+ complete(&sspi->rx_done);
writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
}
@@ -249,50 +271,61 @@ static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
| SIRFSOC_SPI_RXFIFO_THD_REACH))
while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
& SIRFSOC_SPI_FIFO_EMPTY)) &&
- sspi->left_rx_cnt)
+ sspi->left_rx_word)
sspi->rx_word(sspi);
if (spi_stat & (SIRFSOC_SPI_FIFO_EMPTY
| SIRFSOC_SPI_TXFIFO_THD_REACH))
while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
& SIRFSOC_SPI_FIFO_FULL)) &&
- sspi->left_tx_cnt)
+ sspi->left_tx_word)
sspi->tx_word(sspi);
/* Received all words */
- if ((sspi->left_rx_cnt == 0) && (sspi->left_tx_cnt == 0)) {
- complete(&sspi->done);
+ if ((sspi->left_rx_word == 0) && (sspi->left_tx_word == 0)) {
+ complete(&sspi->rx_done);
writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
}
return IRQ_HANDLED;
}
+static void spi_sirfsoc_dma_fini_callback(void *data)
+{
+ struct completion *dma_complete = data;
+
+ complete(dma_complete);
+}
+
static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
{
struct sirfsoc_spi *sspi;
int timeout = t->len * 10;
sspi = spi_master_get_devdata(spi->master);
- sspi->tx = t->tx_buf;
- sspi->rx = t->rx_buf;
- sspi->left_tx_cnt = sspi->left_rx_cnt = t->len;
- INIT_COMPLETION(sspi->done);
+ sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
+ sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
+ sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
+ INIT_COMPLETION(sspi->rx_done);
+ INIT_COMPLETION(sspi->tx_done);
writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
- if (t->len == 1) {
+ if (sspi->left_tx_word == 1) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_ENA_AUTO_CLR,
sspi->base + SIRFSOC_SPI_CTRL);
writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
- } else if ((t->len > 1) && (t->len < SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
+ } else if ((sspi->left_tx_word > 1) && (sspi->left_tx_word <
+ SIRFSOC_SPI_DAT_FRM_LEN_MAX)) {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
SIRFSOC_SPI_MUL_DAT_MODE |
SIRFSOC_SPI_ENA_AUTO_CLR,
sspi->base + SIRFSOC_SPI_CTRL);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
- writel(t->len - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
} else {
writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
sspi->base + SIRFSOC_SPI_CTRL);
@@ -305,17 +338,64 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
- /* Send the first word to trigger the whole tx/rx process */
- sspi->tx_word(sspi);
+ if (IS_DMA_VALID(t)) {
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+
+ sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len, DMA_FROM_DEVICE);
+ rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
+ sspi->dst_start, t->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ rx_desc->callback_param = &sspi->rx_done;
+
+ sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, DMA_TO_DEVICE);
+ tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
+ sspi->src_start, t->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ tx_desc->callback_param = &sspi->tx_done;
+
+ dmaengine_submit(tx_desc);
+ dmaengine_submit(rx_desc);
+ dma_async_issue_pending(sspi->tx_chan);
+ dma_async_issue_pending(sspi->rx_chan);
+ } else {
+ /* Send the first word to trigger the whole tx/rx process */
+ sspi->tx_word(sspi);
+
+ writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
+ SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
+ SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
+ SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
+ }
- writel(SIRFSOC_SPI_RX_OFLOW_INT_EN | SIRFSOC_SPI_TX_UFLOW_INT_EN |
- SIRFSOC_SPI_RXFIFO_THD_INT_EN | SIRFSOC_SPI_TXFIFO_THD_INT_EN |
- SIRFSOC_SPI_FRM_END_INT_EN | SIRFSOC_SPI_RXFIFO_FULL_INT_EN |
- SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN, sspi->base + SIRFSOC_SPI_INT_EN);
writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN, sspi->base + SIRFSOC_SPI_TX_RX_EN);
- if (wait_for_completion_timeout(&sspi->done, timeout) == 0)
+ if (!IS_DMA_VALID(t)) { /* for PIO */
+ if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0)
+ dev_err(&spi->dev, "transfer timeout\n");
+ } else if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->rx_chan);
+ } else
+ sspi->left_rx_word = 0;
+
+ /*
+ * we only wait tx-done event if transferring by DMA. for PIO,
+ * we get rx data by writing tx data, so if rx is done, tx has
+ * done earlier
+ */
+ if (IS_DMA_VALID(t)) {
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->tx_chan);
+ }
+ }
+
+ if (IS_DMA_VALID(t)) {
+ dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
+ dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
+ }
/* TX, RX FIFO stop */
writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
@@ -323,7 +403,7 @@ static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
- return t->len - sspi->left_rx_cnt;
+ return t->len - sspi->left_rx_word * sspi->word_width;
}
static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
@@ -332,7 +412,6 @@ static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
if (sspi->chipselect[spi->chip_select] == 0) {
u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
- regval |= SIRFSOC_SPI_CS_IO_OUT;
switch (value) {
case BITBANG_CS_ACTIVE:
if (spi->mode & SPI_CS_HIGH)
@@ -369,11 +448,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
- /* Enable IO mode for RX, TX */
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
- writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
regval = (sspi->ctrl_freq / (2 * hz)) - 1;
-
if (regval > 0xFFFF || regval < 0) {
dev_err(&spi->dev, "Speed %d not supported\n", hz);
return -EINVAL;
@@ -388,6 +463,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_BYTE;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_BYTE;
+ sspi->word_width = 1;
break;
case 12:
case 16:
@@ -399,6 +475,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_WORD;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_WORD;
+ sspi->word_width = 2;
break;
case 32:
regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
@@ -408,6 +485,7 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
SIRFSOC_SPI_FIFO_WIDTH_DWORD;
rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
SIRFSOC_SPI_FIFO_WIDTH_DWORD;
+ sspi->word_width = 4;
break;
default:
BUG();
@@ -442,6 +520,17 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+
+ if (IS_DMA_VALID(t)) {
+ /* Enable DMA mode for RX, TX */
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_RX_DMA_FLUSH, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ } else {
+ /* Enable IO mode for RX, TX */
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_IO_MODE_SEL, sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ }
+
return 0;
}
@@ -466,6 +555,8 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
struct spi_master *master;
struct resource *mem_res;
int num_cs, cs_gpio, irq;
+ u32 rx_dma_ch, tx_dma_ch;
+ dma_cap_mask_t dma_cap_mask;
int i;
int ret;
@@ -476,6 +567,20 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
goto err_cs;
}
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "sirf,spi-dma-rx-channel", &rx_dma_ch);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get rx dma channel\n");
+ goto err_cs;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node,
+ "sirf,spi-dma-tx-channel", &tx_dma_ch);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get tx dma channel\n");
+ goto err_cs;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*sspi) + sizeof(int) * num_cs);
if (!master) {
dev_err(&pdev->dev, "Unable to allocate SPI master\n");
@@ -484,12 +589,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
sspi = spi_master_get_devdata(master);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "Unable to get IO resource\n");
- ret = -ENODEV;
- goto free_master;
- }
master->num_chipselect = num_cs;
for (i = 0; i < master->num_chipselect; i++) {
@@ -516,6 +615,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
}
}
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sspi->base)) {
ret = PTR_ERR(sspi->base);
@@ -538,19 +638,40 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
sspi->bitbang.master->setup = spi_sirfsoc_setup;
master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+ /* request DMA channels */
+ dma_cap_zero(dma_cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma_cap_mask);
+
+ sspi->rx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)rx_dma_ch);
+ if (!sspi->rx_chan) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ sspi->tx_chan = dma_request_channel(dma_cap_mask, (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)tx_dma_ch);
+ if (!sspi->tx_chan) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto free_rx_dma;
+ }
+
sspi->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sspi->clk)) {
- ret = -EINVAL;
- goto free_master;
+ ret = PTR_ERR(sspi->clk);
+ goto free_tx_dma;
}
clk_prepare_enable(sspi->clk);
sspi->ctrl_freq = clk_get_rate(sspi->clk);
- init_completion(&sspi->done);
+ init_completion(&sspi->rx_done);
+ init_completion(&sspi->tx_done);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
@@ -559,17 +680,28 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
/* We are not using dummy delay between command and data */
writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
+ sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+ if (!sspi->dummypage) {
+ ret = -ENOMEM;
+ goto free_clk;
+ }
+
ret = spi_bitbang_start(&sspi->bitbang);
if (ret)
- goto free_clk;
+ goto free_dummypage;
dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
return 0;
-
+free_dummypage:
+ kfree(sspi->dummypage);
free_clk:
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
+free_tx_dma:
+ dma_release_channel(sspi->tx_chan);
+free_rx_dma:
+ dma_release_channel(sspi->rx_chan);
free_master:
spi_master_put(master);
err_cs:
@@ -590,8 +722,11 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
if (sspi->chipselect[i] > 0)
gpio_free(sspi->chipselect[i]);
}
+ kfree(sspi->dummypage);
clk_disable_unprepare(sspi->clk);
clk_put(sspi->clk);
+ dma_release_channel(sspi->rx_chan);
+ dma_release_channel(sspi->tx_chan);
spi_master_put(master);
return 0;
}
@@ -599,8 +734,7 @@ static int spi_sirfsoc_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
static int spi_sirfsoc_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
clk_disable(sspi->clk);
@@ -609,8 +743,7 @@ static int spi_sirfsoc_suspend(struct device *dev)
static int spi_sirfsoc_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_master *master = dev_get_drvdata(dev);
struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
clk_enable(sspi->clk);
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index e8f542ab893..145dd435483 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -816,14 +816,6 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
msg->status = 0;
msg->actual_length = 0;
- ret = pm_runtime_get_sync(tspi->dev);
- if (ret < 0) {
- dev_err(tspi->dev, "runtime PM get failed: %d\n", ret);
- msg->status = ret;
- spi_finalize_current_message(master);
- return ret;
- }
-
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
INIT_COMPLETION(tspi->xfer_completion);
@@ -859,7 +851,6 @@ static int tegra_spi_transfer_one_message(struct spi_master *master,
ret = 0;
exit:
tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
- pm_runtime_put(tspi->dev);
msg->status = ret;
spi_finalize_current_message(master);
return ret;
@@ -1053,24 +1044,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
+ master->auto_runtime_pm = true;
tspi->master = master;
tspi->dev = &pdev->dev;
spin_lock_init(&tspi->lock);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "No IO memory resource\n");
- ret = -ENODEV;
- goto exit_free_master;
- }
- tspi->phys = r->start;
tspi->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(tspi->base)) {
ret = PTR_ERR(tspi->base);
- dev_err(&pdev->dev, "ioremap failed: err = %d\n", ret);
goto exit_free_master;
}
+ tspi->phys = r->start;
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index c1d5d95e70e..1d814dc6e00 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -335,12 +335,6 @@ static int tegra_sflash_transfer_one_message(struct spi_master *master,
struct spi_device *spi = msg->spi;
int ret;
- ret = pm_runtime_get_sync(tsd->dev);
- if (ret < 0) {
- dev_err(tsd->dev, "pm_runtime_get() failed, err = %d\n", ret);
- return ret;
- }
-
msg->status = 0;
msg->actual_length = 0;
single_xfer = list_is_singular(&msg->transfers);
@@ -380,7 +374,6 @@ exit:
tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
msg->status = ret;
spi_finalize_current_message(master);
- pm_runtime_put(tsd->dev);
return ret;
}
@@ -477,6 +470,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->setup = tegra_sflash_setup;
master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 80490cc11ce..c70353672a2 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -836,11 +836,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
msg->status = 0;
msg->actual_length = 0;
- ret = pm_runtime_get_sync(tspi->dev);
- if (ret < 0) {
- dev_err(tspi->dev, "runtime get failed: %d\n", ret);
- goto done;
- }
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
@@ -878,8 +873,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
exit:
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- pm_runtime_put(tspi->dev);
-done:
msg->status = ret;
spi_finalize_current_message(master);
return ret;
@@ -1086,6 +1079,7 @@ static int tegra_slink_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_slink_setup;
master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
new file mode 100644
index 00000000000..e12d962a289
--- /dev/null
+++ b/drivers/spi/spi-ti-qspi.c
@@ -0,0 +1,574 @@
+/*
+ * TI QSPI driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sourav Poddar <sourav.poddar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GPLv2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <linux/spi/spi.h>
+
+struct ti_qspi_regs {
+ u32 clkctrl;
+};
+
+struct ti_qspi {
+ struct completion transfer_complete;
+
+ /* IRQ synchronization */
+ spinlock_t lock;
+
+ /* list synchronization */
+ struct mutex list_lock;
+
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *fclk;
+ struct device *dev;
+
+ struct ti_qspi_regs ctx_reg;
+
+ u32 spi_max_frequency;
+ u32 cmd;
+ u32 dc;
+ u32 stat;
+};
+
+#define QSPI_PID (0x0)
+#define QSPI_SYSCONFIG (0x10)
+#define QSPI_INTR_STATUS_RAW_SET (0x20)
+#define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24)
+#define QSPI_INTR_ENABLE_SET_REG (0x28)
+#define QSPI_INTR_ENABLE_CLEAR_REG (0x2c)
+#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
+#define QSPI_SPI_DC_REG (0x44)
+#define QSPI_SPI_CMD_REG (0x48)
+#define QSPI_SPI_STATUS_REG (0x4c)
+#define QSPI_SPI_DATA_REG (0x50)
+#define QSPI_SPI_SETUP0_REG (0x54)
+#define QSPI_SPI_SWITCH_REG (0x64)
+#define QSPI_SPI_SETUP1_REG (0x58)
+#define QSPI_SPI_SETUP2_REG (0x5c)
+#define QSPI_SPI_SETUP3_REG (0x60)
+#define QSPI_SPI_DATA_REG_1 (0x68)
+#define QSPI_SPI_DATA_REG_2 (0x6c)
+#define QSPI_SPI_DATA_REG_3 (0x70)
+
+#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+#define QSPI_FCLK 192000000
+
+/* Clock Control */
+#define QSPI_CLK_EN (1 << 31)
+#define QSPI_CLK_DIV_MAX 0xffff
+
+/* Command */
+#define QSPI_EN_CS(n) (n << 28)
+#define QSPI_WLEN(n) ((n - 1) << 19)
+#define QSPI_3_PIN (1 << 18)
+#define QSPI_RD_SNGL (1 << 16)
+#define QSPI_WR_SNGL (2 << 16)
+#define QSPI_RD_DUAL (3 << 16)
+#define QSPI_RD_QUAD (7 << 16)
+#define QSPI_INVAL (4 << 16)
+#define QSPI_WC_CMD_INT_EN (1 << 14)
+#define QSPI_FLEN(n) ((n - 1) << 0)
+
+/* STATUS REGISTER */
+#define WC 0x02
+
+/* INTERRUPT REGISTER */
+#define QSPI_WC_INT_EN (1 << 1)
+#define QSPI_WC_INT_DISABLE (1 << 1)
+
+/* Device Control */
+#define QSPI_DD(m, n) (m << (3 + n * 8))
+#define QSPI_CKPHA(n) (1 << (2 + n * 8))
+#define QSPI_CSPOL(n) (1 << (1 + n * 8))
+#define QSPI_CKPOL(n) (1 << (n * 8))
+
+#define QSPI_FRAME 4096
+
+#define QSPI_AUTOSUSPEND_TIMEOUT 2000
+
+static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
+ unsigned long reg)
+{
+ return readl(qspi->base + reg);
+}
+
+static inline void ti_qspi_write(struct ti_qspi *qspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, qspi->base + reg);
+}
+
+static int ti_qspi_setup(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+ int clk_div = 0, ret;
+ u32 clk_ctrl_reg, clk_rate, clk_mask;
+
+ if (spi->master->busy) {
+ dev_dbg(qspi->dev, "master busy doing other trasnfers\n");
+ return -EBUSY;
+ }
+
+ if (!qspi->spi_max_frequency) {
+ dev_err(qspi->dev, "spi max frequency not defined\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_get_rate(qspi->fclk);
+
+ clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
+
+ if (clk_div < 0) {
+ dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
+ return -EINVAL;
+ }
+
+ if (clk_div > QSPI_CLK_DIV_MAX) {
+ dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
+ QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
+ return -EINVAL;
+ }
+
+ dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
+ qspi->spi_max_frequency, clk_div);
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ clk_ctrl_reg &= ~QSPI_CLK_EN;
+
+ /* disable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ /* enable SCLK */
+ clk_mask = QSPI_CLK_EN | clk_div;
+ ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
+ ctx_reg->clkctrl = clk_mask;
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ ret = pm_runtime_put_autosuspend(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+
+ ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
+}
+
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count, ret;
+ unsigned int cmd;
+ const u8 *txbuf;
+
+ txbuf = t->tx_buf;
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ count = t->len;
+ wlen = t->bits_per_word;
+
+ while (count) {
+ switch (wlen) {
+ case 8:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
+ cmd, qspi->dc, *txbuf);
+ writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += 1;
+ count -= 1;
+ break;
+ case 16:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
+ cmd, qspi->dc, *txbuf);
+ writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += 2;
+ count -= 2;
+ break;
+ case 32:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
+ cmd, qspi->dc, *txbuf);
+ writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += 4;
+ count -= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count, ret;
+ unsigned int cmd;
+ u8 *rxbuf;
+
+ rxbuf = t->rx_buf;
+ cmd = qspi->cmd;
+ switch (t->rx_nbits) {
+ case SPI_NBITS_DUAL:
+ cmd |= QSPI_RD_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ cmd |= QSPI_RD_QUAD;
+ break;
+ default:
+ cmd |= QSPI_RD_SNGL;
+ break;
+ }
+ count = t->len;
+ wlen = t->bits_per_word;
+
+ while (count) {
+ dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ ret = wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT);
+ if (ret == 0) {
+ dev_err(qspi->dev, "read timed out\n");
+ return -ETIMEDOUT;
+ }
+ switch (wlen) {
+ case 8:
+ *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ rxbuf += 1;
+ count -= 1;
+ break;
+ case 16:
+ *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
+ rxbuf += 2;
+ count -= 2;
+ break;
+ case 32:
+ *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
+ rxbuf += 4;
+ count -= 4;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ ret = qspi_write_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while writing\n");
+ return ret;
+ }
+ }
+
+ if (t->rx_buf) {
+ ret = qspi_read_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while reading\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ti_qspi_start_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0, ret;
+ int frame_length;
+
+ /* setup device control reg */
+ qspi->dc = 0;
+
+ if (spi->mode & SPI_CPHA)
+ qspi->dc |= QSPI_CKPHA(spi->chip_select);
+ if (spi->mode & SPI_CPOL)
+ qspi->dc |= QSPI_CKPOL(spi->chip_select);
+ if (spi->mode & SPI_CS_HIGH)
+ qspi->dc |= QSPI_CSPOL(spi->chip_select);
+
+ frame_length = (m->frame_length << 3) / spi->bits_per_word;
+
+ frame_length = clamp(frame_length, 0, QSPI_FRAME);
+
+ /* setup command reg */
+ qspi->cmd = 0;
+ qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+ qspi->cmd |= QSPI_FLEN(frame_length);
+ qspi->cmd |= QSPI_WC_CMD_INT_EN;
+
+ ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
+ ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+
+ mutex_lock(&qspi->list_lock);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+
+ ret = qspi_transfer_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "transfer message failed\n");
+ mutex_unlock(&qspi->list_lock);
+ return -EINVAL;
+ }
+
+ m->actual_length += t->len;
+ }
+
+ mutex_unlock(&qspi->list_lock);
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+
+ return status;
+}
+
+static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
+{
+ struct ti_qspi *qspi = dev_id;
+ u16 int_stat;
+
+ irqreturn_t ret = IRQ_HANDLED;
+
+ spin_lock(&qspi->lock);
+
+ int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
+ qspi->stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+
+ if (!int_stat) {
+ dev_dbg(qspi->dev, "No IRQ triggered\n");
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ ret = IRQ_WAKE_THREAD;
+
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
+ QSPI_INTR_STATUS_ENABLED_CLEAR);
+
+out:
+ spin_unlock(&qspi->lock);
+
+ return ret;
+}
+
+static irqreturn_t ti_qspi_threaded_isr(int this_irq, void *dev_id)
+{
+ struct ti_qspi *qspi = dev_id;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qspi->lock, flags);
+
+ if (qspi->stat & WC)
+ complete(&qspi->transfer_complete);
+
+ spin_unlock_irqrestore(&qspi->lock, flags);
+
+ ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
+
+ return IRQ_HANDLED;
+}
+
+static int ti_qspi_runtime_resume(struct device *dev)
+{
+ struct ti_qspi *qspi;
+ struct spi_master *master;
+
+ master = dev_get_drvdata(dev);
+ qspi = spi_master_get_devdata(master);
+ ti_qspi_restore_ctx(qspi);
+
+ return 0;
+}
+
+static const struct of_device_id ti_qspi_match[] = {
+ {.compatible = "ti,dra7xxx-qspi" },
+ {.compatible = "ti,am4372-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_qspi_match);
+
+static int ti_qspi_probe(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi;
+ struct spi_master *master;
+ struct resource *r;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+ int ret = 0, num_cs, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ master->bus_num = -1;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_qspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = ti_qspi_start_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+
+ platform_set_drvdata(pdev, master);
+
+ qspi = spi_master_get_devdata(master);
+ qspi->master = master;
+ qspi->dev = &pdev->dev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ spin_lock_init(&qspi->lock);
+ mutex_init(&qspi->list_lock);
+
+ qspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(qspi->base)) {
+ ret = PTR_ERR(qspi->base);
+ goto free_master;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, irq, ti_qspi_isr,
+ ti_qspi_threaded_isr, 0,
+ dev_name(&pdev->dev), qspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ irq);
+ goto free_master;
+ }
+
+ qspi->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(qspi->fclk)) {
+ ret = PTR_ERR(qspi->fclk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ }
+
+ init_completion(&qspi->transfer_complete);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ qspi->spi_max_frequency = max_freq;
+
+ ret = spi_register_master(master);
+ if (ret)
+ goto free_master;
+
+ return 0;
+
+free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ti_qspi_remove(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+
+ spi_unregister_master(qspi->master);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_qspi_pm_ops = {
+ .runtime_resume = ti_qspi_runtime_resume,
+};
+
+static struct platform_driver ti_qspi_driver = {
+ .probe = ti_qspi_probe,
+ .remove = ti_qspi_remove,
+ .driver = {
+ .name = "ti,dra7xxx-qspi",
+ .owner = THIS_MODULE,
+ .pm = &ti_qspi_pm_ops,
+ .of_match_table = ti_qspi_match,
+ }
+};
+
+module_platform_driver(ti_qspi_driver);
+
+MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QSPI controller driver");
diff --git a/drivers/spi/spi-ti-ssp.c b/drivers/spi/spi-ti-ssp.c
index 10606fcc6ef..7d20e121e4c 100644
--- a/drivers/spi/spi-ti-ssp.c
+++ b/drivers/spi/spi-ti-ssp.c
@@ -283,7 +283,7 @@ static int ti_ssp_spi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int error = 0;
- pdata = dev->platform_data;
+ pdata = dev_get_platdata(dev);
if (!pdata) {
dev_err(dev, "platform data not found\n");
return -EINVAL;
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 6b0874d782e..2d4010d8082 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -52,8 +52,7 @@ static inline int tle62x0_write(struct tle62x0_state *st)
buff[1] = gpio_state;
}
- dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
- buff[0], buff[1], buff[2]);
+ dev_dbg(&st->us->dev, "buff %3ph\n", buff);
return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
}
@@ -247,7 +246,7 @@ static int tle62x0_probe(struct spi_device *spi)
int ptr;
int ret;
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
if (pdata == NULL) {
dev_err(&spi->dev, "no device data specified\n");
return -EINVAL;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index dd55707a6aa..eaeeed51bbb 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1797,3 +1797,5 @@ MODULE_PARM_DESC(use_dma,
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
+MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
+
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
index e9b7681ff6a..7c6d15766c7 100644
--- a/drivers/spi/spi-txx9.c
+++ b/drivers/spi/spi-txx9.c
@@ -26,7 +26,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
-#include <asm/gpio.h>
+#include <linux/gpio.h>
#define SPI_FIFO_SIZE 4
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 09a94285259..0bf1b2c457a 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -80,10 +80,9 @@ struct xilinx_spi {
/* bitbang has to be first */
struct spi_bitbang bitbang;
struct completion done;
- struct resource mem; /* phys mem */
void __iomem *regs; /* virt. address of the control registers */
- u32 irq;
+ int irq;
u8 *rx_ptr; /* pointer in the Tx buffer */
const u8 *tx_ptr; /* pointer in the Rx buffer */
@@ -233,21 +232,6 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
return 0;
}
-static int xilinx_spi_setup(struct spi_device *spi)
-{
- /* always return 0, we can not check the number of bits.
- * There are cases when SPI setup is called before any driver is
- * there, in that case the SPI core defaults to 8 bits, which we
- * do not support in some cases. But if we return an error, the
- * SPI device would not be registered and no driver can get hold of it
- * When the driver is there, it will call SPI setup again with the
- * correct number of bits per transfer.
- * If a driver setups with the wrong bit number, it will fail when
- * it tries to do a transfer
- */
- return 0;
-}
-
static void xilinx_spi_fill_tx_fifo(struct xilinx_spi *xspi)
{
u8 sr;
@@ -355,17 +339,34 @@ static const struct of_device_id xilinx_spi_of_match[] = {
};
MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
-struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
- u32 irq, s16 bus_num, int num_cs, int bits_per_word)
+static int xilinx_spi_probe(struct platform_device *pdev)
{
- struct spi_master *master;
struct xilinx_spi *xspi;
- int ret;
+ struct xspi_platform_data *pdata;
+ struct resource *res;
+ int ret, num_cs = 0, bits_per_word = 8;
+ struct spi_master *master;
u32 tmp;
+ u8 i;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ num_cs = pdata->num_chipselect;
+ bits_per_word = pdata->bits_per_word;
+ } else {
+ of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
+ &num_cs);
+ }
+
+ if (!num_cs) {
+ dev_err(&pdev->dev,
+ "Missing slave select configuration data\n");
+ return -EINVAL;
+ }
- master = spi_alloc_master(dev, sizeof(struct xilinx_spi));
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
if (!master)
- return NULL;
+ return -ENODEV;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
@@ -375,25 +376,18 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
xspi->bitbang.chipselect = xilinx_spi_chipselect;
xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
- xspi->bitbang.master->setup = xilinx_spi_setup;
init_completion(&xspi->done);
- if (!request_mem_region(mem->start, resource_size(mem),
- XILINX_SPI_NAME))
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
goto put_master;
-
- xspi->regs = ioremap(mem->start, resource_size(mem));
- if (xspi->regs == NULL) {
- dev_warn(dev, "ioremap failure\n");
- goto map_failed;
}
- master->bus_num = bus_num;
+ master->bus_num = pdev->dev.id;
master->num_chipselect = num_cs;
- master->dev.of_node = dev->of_node;
-
- xspi->mem = *mem;
- xspi->irq = irq;
+ master->dev.of_node = pdev->dev.of_node;
/*
* Detect endianess on the IP via loop bit in CR. Detection
@@ -423,113 +417,63 @@ struct spi_master *xilinx_spi_init(struct device *dev, struct resource *mem,
} else if (xspi->bits_per_word == 32) {
xspi->tx_fn = xspi_tx32;
xspi->rx_fn = xspi_rx32;
- } else
- goto unmap_io;
-
+ } else {
+ ret = -EINVAL;
+ goto put_master;
+ }
/* SPI controller initializations */
xspi_init_hw(xspi);
+ xspi->irq = platform_get_irq(pdev, 0);
+ if (xspi->irq < 0) {
+ ret = xspi->irq;
+ goto put_master;
+ }
+
/* Register for SPI Interrupt */
- ret = request_irq(xspi->irq, xilinx_spi_irq, 0, XILINX_SPI_NAME, xspi);
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+ dev_name(&pdev->dev), xspi);
if (ret)
- goto unmap_io;
+ goto put_master;
ret = spi_bitbang_start(&xspi->bitbang);
if (ret) {
- dev_err(dev, "spi_bitbang_start FAILED\n");
- goto free_irq;
- }
-
- dev_info(dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
- (unsigned long long)mem->start, xspi->regs, xspi->irq);
- return master;
-
-free_irq:
- free_irq(xspi->irq, xspi);
-unmap_io:
- iounmap(xspi->regs);
-map_failed:
- release_mem_region(mem->start, resource_size(mem));
-put_master:
- spi_master_put(master);
- return NULL;
-}
-EXPORT_SYMBOL(xilinx_spi_init);
-
-void xilinx_spi_deinit(struct spi_master *master)
-{
- struct xilinx_spi *xspi;
-
- xspi = spi_master_get_devdata(master);
-
- spi_bitbang_stop(&xspi->bitbang);
- free_irq(xspi->irq, xspi);
- iounmap(xspi->regs);
-
- release_mem_region(xspi->mem.start, resource_size(&xspi->mem));
- spi_master_put(xspi->bitbang.master);
-}
-EXPORT_SYMBOL(xilinx_spi_deinit);
-
-static int xilinx_spi_probe(struct platform_device *dev)
-{
- struct xspi_platform_data *pdata;
- struct resource *r;
- int irq, num_cs = 0, bits_per_word = 8;
- struct spi_master *master;
- u8 i;
-
- pdata = dev->dev.platform_data;
- if (pdata) {
- num_cs = pdata->num_chipselect;
- bits_per_word = pdata->bits_per_word;
- }
-
-#ifdef CONFIG_OF
- if (dev->dev.of_node) {
- const __be32 *prop;
- int len;
-
- /* number of slave select bits is required */
- prop = of_get_property(dev->dev.of_node, "xlnx,num-ss-bits",
- &len);
- if (prop && len >= sizeof(*prop))
- num_cs = __be32_to_cpup(prop);
- }
-#endif
-
- if (!num_cs) {
- dev_err(&dev->dev, "Missing slave select configuration data\n");
- return -EINVAL;
+ dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
+ goto put_master;
}
-
- r = platform_get_resource(dev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- irq = platform_get_irq(dev, 0);
- if (irq < 0)
- return -ENXIO;
-
- master = xilinx_spi_init(&dev->dev, r, irq, dev->id, num_cs,
- bits_per_word);
- if (!master)
- return -ENODEV;
+ dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
+ (unsigned long long)res->start, xspi->regs, xspi->irq);
if (pdata) {
for (i = 0; i < pdata->num_devices; i++)
spi_new_device(master, pdata->devices + i);
}
- platform_set_drvdata(dev, master);
+ platform_set_drvdata(pdev, master);
return 0;
+
+put_master:
+ spi_master_put(master);
+
+ return ret;
}
-static int xilinx_spi_remove(struct platform_device *dev)
+static int xilinx_spi_remove(struct platform_device *pdev)
{
- xilinx_spi_deinit(platform_get_drvdata(dev));
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ void __iomem *regs_base = xspi->regs;
+
+ spi_bitbang_stop(&xspi->bitbang);
+
+ /* Disable all the interrupts just in case */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+
+ spi_master_put(xspi->bitbang.master);
return 0;
}
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 978dda2c523..9e039c60c06 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -553,6 +553,10 @@ static void spi_pump_messages(struct kthread_work *work)
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
"failed to unprepare transfer hardware\n");
+ if (master->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(master->dev.parent);
+ pm_runtime_put_autosuspend(master->dev.parent);
+ }
return;
}
@@ -572,11 +576,23 @@ static void spi_pump_messages(struct kthread_work *work)
master->busy = true;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ if (!was_busy && master->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(master->dev.parent);
+ if (ret < 0) {
+ dev_err(&master->dev, "Failed to power device: %d\n",
+ ret);
+ return;
+ }
+ }
+
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
if (ret) {
dev_err(&master->dev,
"failed to prepare transfer hardware\n");
+
+ if (master->auto_runtime_pm)
+ pm_runtime_put(master->dev.parent);
return;
}
}
@@ -774,7 +790,7 @@ static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
msg->status = -EINPROGRESS;
list_add_tail(&msg->queue, &master->queue);
- if (master->running && !master->busy)
+ if (!master->busy)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
@@ -869,6 +885,47 @@ static void of_register_spi_devices(struct spi_master *master)
if (of_find_property(nc, "spi-3wire", NULL))
spi->mode |= SPI_3WIRE;
+ /* Device DUAL/QUAD mode */
+ prop = of_get_property(nc, "spi-tx-bus-width", &len);
+ if (prop && len == sizeof(*prop)) {
+ switch (be32_to_cpup(prop)) {
+ case SPI_NBITS_SINGLE:
+ break;
+ case SPI_NBITS_DUAL:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ default:
+ dev_err(&master->dev,
+ "spi-tx-bus-width %d not supported\n",
+ be32_to_cpup(prop));
+ spi_dev_put(spi);
+ continue;
+ }
+ }
+
+ prop = of_get_property(nc, "spi-rx-bus-width", &len);
+ if (prop && len == sizeof(*prop)) {
+ switch (be32_to_cpup(prop)) {
+ case SPI_NBITS_SINGLE:
+ break;
+ case SPI_NBITS_DUAL:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_err(&master->dev,
+ "spi-rx-bus-width %d not supported\n",
+ be32_to_cpup(prop));
+ spi_dev_put(spi);
+ continue;
+ }
+ }
+
/* Device speed */
prop = of_get_property(nc, "spi-max-frequency", &len);
if (!prop || len < sizeof(*prop)) {
@@ -1169,7 +1226,7 @@ int spi_register_master(struct spi_master *master)
else {
status = spi_master_initialize_queue(master);
if (status) {
- device_unregister(&master->dev);
+ device_del(&master->dev);
goto done;
}
}
@@ -1316,6 +1373,19 @@ int spi_setup(struct spi_device *spi)
unsigned bad_bits;
int status = 0;
+ /* check mode to prevent that DUAL and QUAD set at the same time
+ */
+ if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
+ ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
+ dev_err(&spi->dev,
+ "setup: can not select dual and quad at the same time\n");
+ return -EINVAL;
+ }
+ /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
+ */
+ if ((spi->mode & SPI_3WIRE) && (spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current master
*/
@@ -1351,6 +1421,11 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
+ if (list_empty(&message->transfers))
+ return -EINVAL;
+ if (!message->complete)
+ return -EINVAL;
+
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
@@ -1373,12 +1448,20 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
/**
* Set transfer bits_per_word and max speed as spi device default if
* it is not set for this transfer.
+ * Set transfer tx_nbits and rx_nbits as single transfer default
+ * (SPI_NBITS_SINGLE) if it is not set for this transfer.
*/
list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
- if (!xfer->speed_hz)
+ if (!xfer->speed_hz) {
xfer->speed_hz = spi->max_speed_hz;
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
+ }
+
if (master->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
if (xfer->bits_per_word > 32)
@@ -1387,6 +1470,54 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
BIT(xfer->bits_per_word - 1)))
return -EINVAL;
}
+
+ if (xfer->speed_hz && master->min_speed_hz &&
+ xfer->speed_hz < master->min_speed_hz)
+ return -EINVAL;
+ if (xfer->speed_hz && master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ return -EINVAL;
+
+ if (xfer->tx_buf && !xfer->tx_nbits)
+ xfer->tx_nbits = SPI_NBITS_SINGLE;
+ if (xfer->rx_buf && !xfer->rx_nbits)
+ xfer->rx_nbits = SPI_NBITS_SINGLE;
+ /* check transfer tx/rx_nbits:
+ * 1. keep the value is not out of single, dual and quad
+ * 2. keep tx/rx_nbits is contained by mode in spi_device
+ * 3. if SPI_3WIRE, tx/rx_nbits should be in single
+ */
+ if (xfer->tx_buf) {
+ if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ xfer->tx_nbits != SPI_NBITS_DUAL &&
+ xfer->tx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_TX_QUAD))
+ return -EINVAL;
+ if ((spi->mode & SPI_3WIRE) &&
+ (xfer->tx_nbits != SPI_NBITS_SINGLE))
+ return -EINVAL;
+ }
+ /* check transfer rx_nbits */
+ if (xfer->rx_buf) {
+ if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ xfer->rx_nbits != SPI_NBITS_DUAL &&
+ xfer->rx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD))
+ return -EINVAL;
+ if ((spi->mode & SPI_3WIRE) &&
+ (xfer->rx_nbits != SPI_NBITS_SINGLE))
+ return -EINVAL;
+ }
}
message->spi = spi;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 911e9e0711d..ca5bcfe874d 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -603,7 +603,7 @@ static int spidev_probe(struct spi_device *spi)
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
- status = PTR_RET(dev);
+ status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
index 36171fd2826..2cd9b0e44a4 100644
--- a/drivers/ssb/Kconfig
+++ b/drivers/ssb/Kconfig
@@ -138,7 +138,7 @@ config SSB_DRIVER_MIPS
config SSB_SFLASH
bool "SSB serial flash support"
- depends on SSB_DRIVER_MIPS && BROKEN
+ depends on SSB_DRIVER_MIPS
default y
# Assumption: We are on embedded, if we compile the MIPS core.
diff --git a/drivers/ssb/driver_chipcommon_sflash.c b/drivers/ssb/driver_chipcommon_sflash.c
index e84cf04f441..50328de712f 100644
--- a/drivers/ssb/driver_chipcommon_sflash.c
+++ b/drivers/ssb/driver_chipcommon_sflash.c
@@ -151,8 +151,8 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
sflash->size = sflash->blocksize * sflash->numblocks;
sflash->present = true;
- pr_info("Found %s serial flash (blocksize: 0x%X, blocks: %d)\n",
- e->name, e->blocksize, e->numblocks);
+ pr_info("Found %s serial flash (size: %dKiB, blocksize: 0x%X, blocks: %d)\n",
+ e->name, sflash->size / 1024, e->blocksize, e->numblocks);
/* Prepare platform device, but don't register it yet. It's too early,
* malloc (required by device_private_init) is not available yet. */
@@ -160,7 +160,5 @@ int ssb_sflash_init(struct ssb_chipcommon *cc)
sflash->size;
ssb_sflash_dev.dev.platform_data = sflash;
- pr_err("Serial flash support is not implemented yet!\n");
-
- return -ENOTSUPP;
+ return 0;
}
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 57d8b344460..3626dbc8eb0 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -40,8 +40,6 @@ source "drivers/staging/comedi/Kconfig"
source "drivers/staging/olpc_dcon/Kconfig"
-source "drivers/staging/asus_oled/Kconfig"
-
source "drivers/staging/panel/Kconfig"
source "drivers/staging/rtl8187se/Kconfig"
@@ -52,6 +50,8 @@ source "drivers/staging/rtl8192e/Kconfig"
source "drivers/staging/rtl8712/Kconfig"
+source "drivers/staging/rtl8188eu/Kconfig"
+
source "drivers/staging/rts5139/Kconfig"
source "drivers/staging/frontier/Kconfig"
@@ -118,6 +118,8 @@ source "drivers/staging/ozwpan/Kconfig"
source "drivers/staging/gdm72xx/Kconfig"
+source "drivers/staging/gdm724x/Kconfig"
+
source "drivers/staging/silicom/Kconfig"
source "drivers/staging/ced1401/Kconfig"
@@ -130,8 +132,6 @@ source "drivers/staging/sb105x/Kconfig"
source "drivers/staging/fwserial/Kconfig"
-source "drivers/staging/zcache/Kconfig"
-
source "drivers/staging/goldfish/Kconfig"
source "drivers/staging/netlogic/Kconfig"
@@ -142,4 +142,10 @@ source "drivers/staging/lustre/Kconfig"
source "drivers/staging/btmtk_usb/Kconfig"
+source "drivers/staging/xillybus/Kconfig"
+
+source "drivers/staging/dgnc/Kconfig"
+
+source "drivers/staging/dgap/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 429321f1510..d1b4b8003c2 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -12,12 +12,12 @@ obj-$(CONFIG_PRISM2_USB) += wlan-ng/
obj-$(CONFIG_ECHO) += echo/
obj-$(CONFIG_COMEDI) += comedi/
obj-$(CONFIG_FB_OLPC_DCON) += olpc_dcon/
-obj-$(CONFIG_ASUS_OLED) += asus_oled/
obj-$(CONFIG_PANEL) += panel/
obj-$(CONFIG_R8187SE) += rtl8187se/
obj-$(CONFIG_RTL8192U) += rtl8192u/
obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
+obj-$(CONFIG_R8188EU) += rtl8188eu/
obj-$(CONFIG_RTS5139) += rts5139/
obj-$(CONFIG_TRANZPORT) += frontier/
obj-$(CONFIG_IDE_PHISON) += phison/
@@ -52,14 +52,17 @@ obj-$(CONFIG_MFD_NVEC) += nvec/
obj-$(CONFIG_ANDROID) += android/
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
+obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_NET_VENDOR_SILICOM) += silicom/
obj-$(CONFIG_CED1401) += ced1401/
obj-$(CONFIG_DRM_IMX) += imx-drm/
obj-$(CONFIG_DGRP) += dgrp/
obj-$(CONFIG_SB105X) += sb105x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
-obj-$(CONFIG_ZCACHE) += zcache/
obj-$(CONFIG_GOLDFISH) += goldfish/
obj-$(CONFIG_USB_DWC2) += dwc2/
obj-$(CONFIG_LUSTRE_FS) += lustre/
obj-$(CONFIG_USB_BTMTK) += btmtk_usb/
+obj-$(CONFIG_XILLYBUS) += xillybus/
+obj-$(CONFIG_DGNC) += dgnc/
+obj-$(CONFIG_DGAP) += dgap/
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index 119d486a5cf..98ac020bf91 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -1248,7 +1248,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
struct flat_binder_object *fp;
if (*offp > buffer->data_size - sizeof(*fp) ||
buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
+ !IS_ALIGNED(*offp, sizeof(u32))) {
pr_err("transaction release %d bad offset %zd, size %zd\n",
debug_id, *offp, buffer->data_size);
continue;
@@ -1272,7 +1272,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- pr_err("transaction release %d bad handle %ld\n",
+ pr_err("transaction release %d bad handle %d\n",
debug_id, fp->handle);
break;
}
@@ -1284,13 +1284,13 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
case BINDER_TYPE_FD:
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld\n", fp->handle);
+ " fd %d\n", fp->handle);
if (failed_at)
task_close_fd(proc, fp->handle);
break;
default:
- pr_err("transaction release %d bad object type %lx\n",
+ pr_err("transaction release %d bad object type %x\n",
debug_id, fp->type);
break;
}
@@ -1497,7 +1497,7 @@ static void binder_transaction(struct binder_proc *proc,
struct flat_binder_object *fp;
if (*offp > t->buffer->data_size - sizeof(*fp) ||
t->buffer->data_size < sizeof(*fp) ||
- !IS_ALIGNED(*offp, sizeof(void *))) {
+ !IS_ALIGNED(*offp, sizeof(u32))) {
binder_user_error("%d:%d got transaction with invalid offset, %zd\n",
proc->pid, thread->pid, *offp);
return_error = BR_FAILED_REPLY;
@@ -1548,7 +1548,7 @@ static void binder_transaction(struct binder_proc *proc,
case BINDER_TYPE_WEAK_HANDLE: {
struct binder_ref *ref = binder_get_ref(proc, fp->handle);
if (ref == NULL) {
- binder_user_error("%d:%d got transaction with invalid handle, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid handle, %d\n",
proc->pid,
thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
@@ -1591,13 +1591,13 @@ static void binder_transaction(struct binder_proc *proc,
if (reply) {
if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
- binder_user_error("%d:%d got reply with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
}
} else if (!target_node->accept_fds) {
- binder_user_error("%d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fd_not_allowed;
@@ -1605,7 +1605,7 @@ static void binder_transaction(struct binder_proc *proc,
file = fget(fp->handle);
if (file == NULL) {
- binder_user_error("%d:%d got transaction with invalid fd, %ld\n",
+ binder_user_error("%d:%d got transaction with invalid fd, %d\n",
proc->pid, thread->pid, fp->handle);
return_error = BR_FAILED_REPLY;
goto err_fget_failed;
@@ -1619,13 +1619,13 @@ static void binder_transaction(struct binder_proc *proc,
task_fd_install(target_proc, target_fd, file);
trace_binder_transaction_fd(t, fp->handle, target_fd);
binder_debug(BINDER_DEBUG_TRANSACTION,
- " fd %ld -> %d\n", fp->handle, target_fd);
+ " fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
fp->handle = target_fd;
} break;
default:
- binder_user_error("%d:%d got transaction with invalid object type, %lx\n",
+ binder_user_error("%d:%d got transaction with invalid object type, %x\n",
proc->pid, thread->pid, fp->type);
return_error = BR_FAILED_REPLY;
goto err_bad_object_type;
@@ -1701,7 +1701,7 @@ err_no_context_mgr_node:
}
int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
- void __user *buffer, int size, signed long *consumed)
+ void __user *buffer, size_t size, size_t *consumed)
{
uint32_t cmd;
void __user *ptr = buffer + *consumed;
@@ -2081,8 +2081,8 @@ static int binder_has_thread_work(struct binder_thread *thread)
static int binder_thread_read(struct binder_proc *proc,
struct binder_thread *thread,
- void __user *buffer, int size,
- signed long *consumed, int non_block)
+ void __user *buffer, size_t size,
+ size_t *consumed, int non_block)
{
void __user *ptr = buffer + *consumed;
void __user *end = buffer + size;
@@ -2579,7 +2579,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
goto err;
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d write %ld at %08lx, read %ld at %08lx\n",
+ "%d:%d write %zd at %016lx, read %zd at %016lx\n",
proc->pid, thread->pid, bwr.write_size,
bwr.write_buffer, bwr.read_size, bwr.read_buffer);
@@ -2605,7 +2605,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
binder_debug(BINDER_DEBUG_READ_WRITE,
- "%d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ "%d:%d wrote %zd of %zd, read return %zd of %zd\n",
proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
bwr.read_consumed, bwr.read_size);
if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
index dbe81ceca1b..cbe34516806 100644
--- a/drivers/staging/android/binder.h
+++ b/drivers/staging/android/binder.h
@@ -48,13 +48,13 @@ enum {
*/
struct flat_binder_object {
/* 8 bytes for large_flat_header. */
- unsigned long type;
- unsigned long flags;
+ __u32 type;
+ __u32 flags;
/* 8 bytes of data. */
union {
void __user *binder; /* local object */
- signed long handle; /* remote object */
+ __u32 handle; /* remote object */
};
/* extra data associated with local object */
@@ -67,18 +67,18 @@ struct flat_binder_object {
*/
struct binder_write_read {
- signed long write_size; /* bytes to write */
- signed long write_consumed; /* bytes consumed by driver */
+ size_t write_size; /* bytes to write */
+ size_t write_consumed; /* bytes consumed by driver */
unsigned long write_buffer;
- signed long read_size; /* bytes to read */
- signed long read_consumed; /* bytes consumed by driver */
+ size_t read_size; /* bytes to read */
+ size_t read_consumed; /* bytes consumed by driver */
unsigned long read_buffer;
};
/* Use with BINDER_VERSION, driver fills in fields. */
struct binder_version {
/* driver protocol version -- increment with incompatible change */
- signed long protocol_version;
+ __s32 protocol_version;
};
/* This is the current protocol version. */
@@ -86,7 +86,7 @@ struct binder_version {
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
-#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
@@ -119,14 +119,14 @@ struct binder_transaction_data {
* identifying the target and contents of the transaction.
*/
union {
- size_t handle; /* target descriptor of command transaction */
+ __u32 handle; /* target descriptor of command transaction */
void *ptr; /* target descriptor of return transaction */
} target;
void *cookie; /* target object cookie */
- unsigned int code; /* transaction command */
+ __u32 code; /* transaction command */
/* General information about the transaction. */
- unsigned int flags;
+ __u32 flags;
pid_t sender_pid;
uid_t sender_euid;
size_t data_size; /* number of bytes of data */
@@ -143,7 +143,7 @@ struct binder_transaction_data {
/* offsets from buffer to flat_binder_object structs */
const void __user *offsets;
} ptr;
- uint8_t buf[8];
+ __u8 buf[8];
} data;
};
@@ -153,18 +153,18 @@ struct binder_ptr_cookie {
};
struct binder_pri_desc {
- int priority;
- int desc;
+ __s32 priority;
+ __u32 desc;
};
struct binder_pri_ptr_cookie {
- int priority;
+ __s32 priority;
void *ptr;
void *cookie;
};
enum binder_driver_return_protocol {
- BR_ERROR = _IOR('r', 0, int),
+ BR_ERROR = _IOR('r', 0, __s32),
/*
* int: error code
*/
@@ -178,7 +178,7 @@ enum binder_driver_return_protocol {
* binder_transaction_data: the received command.
*/
- BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
/*
* not currently supported
* int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
@@ -258,22 +258,22 @@ enum binder_driver_command_protocol {
* binder_transaction_data: the sent command.
*/
- BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
/*
* not currently supported
* int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
* Else you have acquired a primary reference on the object.
*/
- BC_FREE_BUFFER = _IOW('c', 3, int),
+ BC_FREE_BUFFER = _IOW('c', 3, void *),
/*
* void *: ptr to transaction data received on a read
*/
- BC_INCREFS = _IOW('c', 4, int),
- BC_ACQUIRE = _IOW('c', 5, int),
- BC_RELEASE = _IOW('c', 6, int),
- BC_DECREFS = _IOW('c', 7, int),
+ BC_INCREFS = _IOW('c', 4, __u32),
+ BC_ACQUIRE = _IOW('c', 5, __u32),
+ BC_RELEASE = _IOW('c', 6, __u32),
+ BC_DECREFS = _IOW('c', 7, __u32),
/*
* int: descriptor
*/
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 080abf2faf9..a8c344422a7 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -469,7 +469,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t ppos)
{
struct logger_log *log = file_get_log(iocb->ki_filp);
- size_t orig = log->w_off;
+ size_t orig;
struct logger_entry header;
struct timespec now;
ssize_t ret = 0;
@@ -490,6 +490,8 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
mutex_lock(&log->mutex);
+ orig = log->w_off;
+
/*
* Fix up any readers, pulling them forward to the first readable
* entry after (what will be) the new write offset. We do this now
diff --git a/drivers/staging/android/sw_sync.c b/drivers/staging/android/sw_sync.c
index 765c757b120..f24493ac65e 100644
--- a/drivers/staging/android/sw_sync.c
+++ b/drivers/staging/android/sw_sync.c
@@ -163,7 +163,7 @@ static int sw_sync_release(struct inode *inode, struct file *file)
static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
unsigned long arg)
{
- int fd = get_unused_fd();
+ int fd = get_unused_fd_flags(O_CLOEXEC);
int err;
struct sync_pt *pt;
struct sync_fence *fence;
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 2996077fede..38e5d3b5ed9 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -697,7 +697,7 @@ static long sync_fence_ioctl_wait(struct sync_fence *fence, unsigned long arg)
static long sync_fence_ioctl_merge(struct sync_fence *fence, unsigned long arg)
{
- int fd = get_unused_fd();
+ int fd = get_unused_fd_flags(O_CLOEXEC);
int err;
struct sync_fence *fence2, *fence3;
struct sync_merge_data data;
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
index ee3a57f2283..2c617834dc4 100644
--- a/drivers/staging/android/timed_output.c
+++ b/drivers/staging/android/timed_output.c
@@ -28,7 +28,7 @@ static struct class *timed_output_class;
static atomic_t device_count;
static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int remaining = tdev->get_time(tdev);
@@ -36,9 +36,8 @@ static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%d\n", remaining);
}
-static ssize_t enable_store(
- struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
{
struct timed_output_dev *tdev = dev_get_drvdata(dev);
int value;
@@ -50,8 +49,13 @@ static ssize_t enable_store(
return size;
}
+static DEVICE_ATTR_RW(enable);
-static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+static struct attribute *timed_output_attrs[] = {
+ &dev_attr_enable.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(timed_output);
static int create_timed_output_class(void)
{
@@ -60,6 +64,7 @@ static int create_timed_output_class(void)
if (IS_ERR(timed_output_class))
return PTR_ERR(timed_output_class);
atomic_set(&device_count, 0);
+ timed_output_class->dev_groups = timed_output_groups;
}
return 0;
@@ -82,27 +87,15 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
if (IS_ERR(tdev->dev))
return PTR_ERR(tdev->dev);
- ret = device_create_file(tdev->dev, &dev_attr_enable);
- if (ret < 0)
- goto err_create_file;
-
dev_set_drvdata(tdev->dev, tdev);
tdev->state = 0;
return 0;
-
-err_create_file:
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
- pr_err("failed to register driver %s\n",
- tdev->name);
-
- return ret;
}
EXPORT_SYMBOL_GPL(timed_output_dev_register);
void timed_output_dev_unregister(struct timed_output_dev *tdev)
{
tdev->enable(tdev, 0);
- device_remove_file(tdev->dev, &dev_attr_enable);
device_destroy(timed_output_class, MKDEV(0, tdev->index));
dev_set_drvdata(tdev->dev, NULL);
}
diff --git a/drivers/staging/asus_oled/Kconfig b/drivers/staging/asus_oled/Kconfig
deleted file mode 100644
index e56dbb25ac5..00000000000
--- a/drivers/staging/asus_oled/Kconfig
+++ /dev/null
@@ -1,6 +0,0 @@
-config ASUS_OLED
- tristate "Asus OLED driver"
- depends on USB
- default N
- ---help---
- Enable support for the OLED display present in some Asus laptops.
diff --git a/drivers/staging/asus_oled/Makefile b/drivers/staging/asus_oled/Makefile
deleted file mode 100644
index e71f9aa9e03..00000000000
--- a/drivers/staging/asus_oled/Makefile
+++ /dev/null
@@ -1 +0,0 @@
-obj-$(CONFIG_ASUS_OLED) += asus_oled.o
diff --git a/drivers/staging/asus_oled/README b/drivers/staging/asus_oled/README
deleted file mode 100644
index 2d721232467..00000000000
--- a/drivers/staging/asus_oled/README
+++ /dev/null
@@ -1,156 +0,0 @@
-
- Driver for Asus OLED display present in some Asus laptops.
-
- The code of this driver is based on 'asusoled' program taken from
- <http://lapsus.berlios.de/asus_oled.html>. I just wanted to have a simple
- kernel driver for controlling this device, but I didn't know how
- to do that. Now I know ;) Also, that program can not be used
- with usbhid loaded, which means no USB mouse/keyboard while
- controlling OLED display :(
-
- It has been tested on Asus G1 and didn't cause any problems,
- but I don't guarantee that it won't do anything wrong :)
-
- It can (and probably does) have errors. It is usable
- in my case, and I hope others will find it useful too!
-
-*******
-
-Building the module
-
- To build the module you need kernel 2.6 include files and some C compiler.
-
- Just run:
- make
- make install (as a root)
-
- It will build (hopefully) the module and install it in
- /lib/modules/'uname -r'/extra/asus_oled.ko.
-
- To load it just use:
- modprobe asus_oled
-
- You can check if it has detected your OLED display by looking into dmesg output.
- There should be something like this:
- asus-oled 2-7:1.0: Attached Asus OLED device
-
- If it doesn't find your display, you can try removing usbhid module.
- If you add asus_oled into the list of modules loaded during system boot
- before usbhid, it will work even when usbhid is present.
-
- If it still doesn't detect your hardware, check lsusb output.
- There should be similar line:
- Bus 002 Device 005: ID 0b05:1726 ASUSTek Computer, Inc.
-
- If you don't see any lines with '0b05:1726' it means that you have different
- type of hardware that is not detected (it may or may not work, but the driver
- knows only '0b05:1726' device).
-
-*******
-
-Configuration
-
- There is only one option: start_off.
- You can use it by: 'modprobe asus_oled start_off=1', or by adding this
- line to /etc/modprobe.d/asus_oled.conf:
- options asus_oled start_off=1
-
- With this option provided, asus_oled driver will switch off the display
- when it is detected and attached. It is nice feature to just switch off the 'ASUS'
- logo. If you don't use the display, it is probably the good idea to switch it off,
- to protect OLEDs from "wearing off".
-
-*******
-
-Usage
-
- This module can be controlled with two special files:
- /sys/class/asus_oled/oled_N/enabled
- /sys/class/asus_oled/oled_N/picture
-
- (N is the device number, the first, and probably the only, has number 1,
- so it is /sys/class/asus_oled/oled_1/enabled
- and /sys/class/asus_oled/oled_1/picture)
-
- 'enabled' files is for reading and writing, 'picture' is writeable only.
-
- You can write 0 or 1 to 'enabled' file, which will switch
- on and off the display. Reading from this file will tell you the last
- status set, either 0 or 1. By default it is 1, so if the device was set to 'off',
- and the computer was rebooted without power-off, this file will contain wrong
- value - because the device is off, but hasn't been disabled this time and is
- assumed to be on...
-
- To 'picture' file you write pictures to be displayed by the OLED device.
- The format of the file:
- <M:WxH>
- 00001110010111000
- 00010101010101010
- ....
-
- First line is a configuration parameter. Meaning of fields in <M:WxH>:
- M - picture mode. It can be either 's' for static pictures,
- 'r' for rolling pictures, and 'f' for flashing pictures.
- W - width of the picture. May be between 1 and 1792
- H - height of the picture. May be between 1 and 32
-
- For example <s:128x32> means static picture, 128 pixels long and 32 pixels high.
-
- The physical size of the display is 128x32 pixels. Static and flashing pictures
- can't be larger than that (actually they can, but only part of them will be displayed ;) )
-
- If the picture is smaller than 128x32 it will be centered. Rolling pictures wider than
- 128 pixels will be centered too, unless their width = n*128. Vertically they will be
- centered just like static pictures, if their height is smaller than 32.
-
- Flashing pictures will be centered horizontally if their width < 128, but they were
- centered vertically in a different way. If their height < 16, they will be centered
- in the upper half of the display (rows 0-15). This is because only the first half
- of flashing pictures is used for flashing. When the picture with heigh = 32 is
- displayed in flashing mode, its upper 16 rows will be flashing in the upper half
- of the display, and the lower half will be empty. After few seconds upper part will
- stop flashing (but that part of the picture will remain there), and the lower
- half of the display will start displayin the lower half of the picture
- in rolling mode, unless it is empty, or the picture was small enough to fit in
- upper part. It is not mine idea, this is just the way Asus' display work ;)
- So if you need just flashing, use at most 128x16 picture. If you need flashing and
- rolling, use whole size of the display.
-
- Lines following the first, configuration, line are picture data. Each '1' means
- that the pixel is lit, and '0' means that it is not. You can also use '#' as ON,
- and ' ' (space) as OFF. Empty lines and all other characters are ignored.
-
- It is possible to write everything in one line <M:WxH>01010101010101010...,
- and W*H characters will be used. If there is not enough characters, nothing will be
- displayed. However, the 'line mode' is easier to read (and write), and it also
- lets to omit parts of data. Whenever End-Of-Line character is found, but
- the line is not W characters long, it is assumed that all missing characters
- are equal to the last character in the line.
-
- Following line represents '0', '1' and a lots of '0's, dependng on the width of the picture
- provided in configuration data:
- 010
-
- So if you need empty line, it is sufficient to write line with only one '0' in it.
- The same works with '1' (or ' ' and '#').
-
- If there are too many data in the file, they will be ignored. If you are not sure
- how many characters you are missing, you can add few lines with one zero in each of them.
-
- There are some example pictures in .txt format, that can be used as follows:
- cat foo.txt > /sys/class/asus_oled/oled_1/picture
-
- If the display is switched off you also need to run:
- echo 1 > /sys/class/asus_oled/oled_1/enabled
- To switch it off, just use:
- echo 0 > /sys/class/asus_oled/oled_1/enabled
-
-
-*******
-
- For any additional info please have a look at http://lapsus.berlios.de/asus_oled.html
-
-
-
- Jakub Schmidtke (sjakub@gmail.com)
-
diff --git a/drivers/staging/asus_oled/TODO b/drivers/staging/asus_oled/TODO
deleted file mode 100644
index 2514131670a..00000000000
--- a/drivers/staging/asus_oled/TODO
+++ /dev/null
@@ -1,10 +0,0 @@
-TODO:
- - checkpatch.pl cleanups
- - sparse fixes
- - audit the userspace interface
- - sysfs vs. char?
- - Documentation/ABI/ needs to be added
- - put the sample .txt files and README file somewhere.
-
-Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
-Cc: Jakub Schmidtke <sjakub@gmail.com>
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
deleted file mode 100644
index 3654dc32a0c..00000000000
--- a/drivers/staging/asus_oled/asus_oled.c
+++ /dev/null
@@ -1,847 +0,0 @@
-/*
- * Asus OLED USB driver
- *
- * Copyright (C) 2007,2008 Jakub Schmidtke (sjakub@gmail.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- *
- *
- * This module is based on usbled and asus-laptop modules.
- *
- *
- * Asus OLED support is based on asusoled program taken from
- * <http://lapsus.berlios.de/asus_oled.html>.
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/platform_device.h>
-#include <linux/ctype.h>
-
-#define ASUS_OLED_VERSION "0.04-dev"
-#define ASUS_OLED_NAME "asus-oled"
-#define ASUS_OLED_UNDERSCORE_NAME "asus_oled"
-
-#define ASUS_OLED_STATIC 's'
-#define ASUS_OLED_ROLL 'r'
-#define ASUS_OLED_FLASH 'f'
-
-#define ASUS_OLED_MAX_WIDTH 1792
-#define ASUS_OLED_DISP_HEIGHT 32
-#define ASUS_OLED_PACKET_BUF_SIZE 256
-
-#define USB_VENDOR_ID_ASUS 0x0b05
-#define USB_DEVICE_ID_ASUS_LCM 0x1726
-#define USB_DEVICE_ID_ASUS_LCM2 0x175b
-
-MODULE_AUTHOR("Jakub Schmidtke, sjakub@gmail.com");
-MODULE_DESCRIPTION("Asus OLED Driver");
-MODULE_LICENSE("GPL");
-MODULE_VERSION(ASUS_OLED_VERSION);
-
-static struct class *oled_class;
-static int oled_num;
-
-static uint start_off;
-
-module_param(start_off, uint, 0644);
-
-MODULE_PARM_DESC(start_off,
- "Set to 1 to switch off OLED display after it is attached");
-
-enum oled_pack_mode {
- PACK_MODE_G1,
- PACK_MODE_G50,
- PACK_MODE_LAST
-};
-
-struct oled_dev_desc_str {
- uint16_t idVendor;
- uint16_t idProduct;
- /* width of display */
- uint16_t devWidth;
- /* formula to be used while packing the picture */
- enum oled_pack_mode packMode;
- const char *devDesc;
-};
-
-/* table of devices that work with this driver */
-static const struct usb_device_id id_table[] = {
- /* Asus G1/G2 (and variants)*/
- { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM) },
- /* Asus G50V (and possibly others - G70? G71?)*/
- { USB_DEVICE(USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2) },
- { },
-};
-
-/* parameters of specific devices */
-static struct oled_dev_desc_str oled_dev_desc_table[] = {
- { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM, 128, PACK_MODE_G1,
- "G1/G2" },
- { USB_VENDOR_ID_ASUS, USB_DEVICE_ID_ASUS_LCM2, 256, PACK_MODE_G50,
- "G50" },
- { },
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-struct asus_oled_header {
- uint8_t magic1;
- uint8_t magic2;
- uint8_t flags;
- uint8_t value3;
- uint8_t buffer1;
- uint8_t buffer2;
- uint8_t value6;
- uint8_t value7;
- uint8_t value8;
- uint8_t padding2[7];
-} __attribute((packed));
-
-struct asus_oled_packet {
- struct asus_oled_header header;
- uint8_t bitmap[ASUS_OLED_PACKET_BUF_SIZE];
-} __attribute((packed));
-
-struct asus_oled_dev {
- struct usb_device *udev;
- uint8_t pic_mode;
- uint16_t dev_width;
- enum oled_pack_mode pack_mode;
- size_t height;
- size_t width;
- size_t x_shift;
- size_t y_shift;
- size_t buf_offs;
- uint8_t last_val;
- size_t buf_size;
- char *buf;
- uint8_t enabled;
- uint8_t enabled_post_resume;
- struct device *dev;
-};
-
-static void setup_packet_header(struct asus_oled_packet *packet, char flags,
- char value3, char buffer1, char buffer2, char value6,
- char value7, char value8)
-{
- memset(packet, 0, sizeof(struct asus_oled_header));
- packet->header.magic1 = 0x55;
- packet->header.magic2 = 0xaa;
- packet->header.flags = flags;
- packet->header.value3 = value3;
- packet->header.buffer1 = buffer1;
- packet->header.buffer2 = buffer2;
- packet->header.value6 = value6;
- packet->header.value7 = value7;
- packet->header.value8 = value8;
-}
-
-static void enable_oled(struct asus_oled_dev *odev, uint8_t enabl)
-{
- int retval;
- int act_len;
- struct asus_oled_packet *packet;
-
- packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
- if (!packet)
- return;
-
- setup_packet_header(packet, 0x20, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00);
-
- if (enabl)
- packet->bitmap[0] = 0xaf;
- else
- packet->bitmap[0] = 0xae;
-
- retval = usb_bulk_msg(odev->udev,
- usb_sndbulkpipe(odev->udev, 2),
- packet,
- sizeof(struct asus_oled_header) + 1,
- &act_len,
- -1);
-
- if (retval)
- dev_dbg(&odev->udev->dev, "retval = %d\n", retval);
-
- odev->enabled = enabl;
-
- kfree(packet);
-}
-
-static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct asus_oled_dev *odev = usb_get_intfdata(intf);
- unsigned long value;
- if (kstrtoul(buf, 10, &value))
- return -EINVAL;
-
- enable_oled(odev, value);
-
- return count;
-}
-
-static ssize_t class_set_enabled(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct asus_oled_dev *odev =
- (struct asus_oled_dev *) dev_get_drvdata(device);
- unsigned long value;
-
- if (kstrtoul(buf, 10, &value))
- return -EINVAL;
-
- enable_oled(odev, value);
-
- return count;
-}
-
-static ssize_t get_enabled(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct usb_interface *intf = to_usb_interface(dev);
- struct asus_oled_dev *odev = usb_get_intfdata(intf);
-
- return sprintf(buf, "%d\n", odev->enabled);
-}
-
-static ssize_t class_get_enabled(struct device *device,
- struct device_attribute *attr, char *buf)
-{
- struct asus_oled_dev *odev =
- (struct asus_oled_dev *) dev_get_drvdata(device);
-
- return sprintf(buf, "%d\n", odev->enabled);
-}
-
-static void send_packets(struct usb_device *udev,
- struct asus_oled_packet *packet,
- char *buf, uint8_t p_type, size_t p_num)
-{
- size_t i;
- int act_len;
-
- for (i = 0; i < p_num; i++) {
- int retval;
-
- switch (p_type) {
- case ASUS_OLED_ROLL:
- setup_packet_header(packet, 0x40, 0x80, p_num,
- i + 1, 0x00, 0x01, 0xff);
- break;
- case ASUS_OLED_STATIC:
- setup_packet_header(packet, 0x10 + i, 0x80, 0x01,
- 0x01, 0x00, 0x01, 0x00);
- break;
- case ASUS_OLED_FLASH:
- setup_packet_header(packet, 0x10 + i, 0x80, 0x01,
- 0x01, 0x00, 0x00, 0xff);
- break;
- }
-
- memcpy(packet->bitmap, buf + (ASUS_OLED_PACKET_BUF_SIZE*i),
- ASUS_OLED_PACKET_BUF_SIZE);
-
- retval = usb_bulk_msg(udev, usb_sndctrlpipe(udev, 2),
- packet, sizeof(struct asus_oled_packet),
- &act_len, -1);
-
- if (retval)
- dev_dbg(&udev->dev, "retval = %d\n", retval);
- }
-}
-
-static void send_packet(struct usb_device *udev,
- struct asus_oled_packet *packet,
- size_t offset, size_t len, char *buf, uint8_t b1,
- uint8_t b2, uint8_t b3, uint8_t b4, uint8_t b5,
- uint8_t b6) {
- int retval;
- int act_len;
-
- setup_packet_header(packet, b1, b2, b3, b4, b5, b6, 0x00);
- memcpy(packet->bitmap, buf + offset, len);
-
- retval = usb_bulk_msg(udev,
- usb_sndctrlpipe(udev, 2),
- packet,
- sizeof(struct asus_oled_packet),
- &act_len,
- -1);
-
- if (retval)
- dev_dbg(&udev->dev, "retval = %d\n", retval);
-}
-
-
-static void send_packets_g50(struct usb_device *udev,
- struct asus_oled_packet *packet, char *buf)
-{
- send_packet(udev, packet, 0, 0x100, buf,
- 0x10, 0x00, 0x02, 0x01, 0x00, 0x01);
- send_packet(udev, packet, 0x100, 0x080, buf,
- 0x10, 0x00, 0x02, 0x02, 0x80, 0x00);
-
- send_packet(udev, packet, 0x180, 0x100, buf,
- 0x11, 0x00, 0x03, 0x01, 0x00, 0x01);
- send_packet(udev, packet, 0x280, 0x100, buf,
- 0x11, 0x00, 0x03, 0x02, 0x00, 0x01);
- send_packet(udev, packet, 0x380, 0x080, buf,
- 0x11, 0x00, 0x03, 0x03, 0x80, 0x00);
-}
-
-
-static void send_data(struct asus_oled_dev *odev)
-{
- size_t packet_num = odev->buf_size / ASUS_OLED_PACKET_BUF_SIZE;
- struct asus_oled_packet *packet;
-
- packet = kzalloc(sizeof(struct asus_oled_packet), GFP_KERNEL);
- if (!packet)
- return;
-
- if (odev->pack_mode == PACK_MODE_G1) {
- /*
- * When sending roll-mode data the display updated only
- * first packet. I have no idea why, but when static picture
- * is sent just before rolling picture everything works fine.
- */
- if (odev->pic_mode == ASUS_OLED_ROLL)
- send_packets(odev->udev, packet, odev->buf,
- ASUS_OLED_STATIC, 2);
-
- /* Only ROLL mode can use more than 2 packets.*/
- if (odev->pic_mode != ASUS_OLED_ROLL && packet_num > 2)
- packet_num = 2;
-
- send_packets(odev->udev, packet, odev->buf,
- odev->pic_mode, packet_num);
- } else if (odev->pack_mode == PACK_MODE_G50) {
- send_packets_g50(odev->udev, packet, odev->buf);
- }
-
- kfree(packet);
-}
-
-static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
-{
- odev->last_val = val;
-
- if (val == 0) {
- odev->buf_offs += count;
- return 0;
- }
-
- while (count-- > 0) {
- size_t x = odev->buf_offs % odev->width;
- size_t y = odev->buf_offs / odev->width;
- size_t i;
-
- x += odev->x_shift;
- y += odev->y_shift;
-
- switch (odev->pack_mode) {
- case PACK_MODE_G1:
- /*
- * i = (x/128)*640 + 127 - x + (y/8)*128;
- * This one for 128 is the same, but might be better
- * for different widths?
- */
- i = (x/odev->dev_width)*640 +
- odev->dev_width - 1 - x +
- (y/8)*odev->dev_width;
- break;
-
- case PACK_MODE_G50:
- i = (odev->dev_width - 1 - x)/8 + y*odev->dev_width/8;
- break;
-
- default:
- i = 0;
- dev_err(odev->dev, "Unknown OLED Pack Mode: %d!\n",
- odev->pack_mode);
- break;
- }
-
- if (i >= odev->buf_size) {
- dev_err(odev->dev, "Buffer overflow! Report a bug: offs: %zu >= %zu i: %zu (x: %zu y: %zu)\n",
- odev->buf_offs, odev->buf_size, i, x, y);
- return -EIO;
- }
-
- switch (odev->pack_mode) {
- case PACK_MODE_G1:
- odev->buf[i] &= ~(1<<(y%8));
- break;
-
- case PACK_MODE_G50:
- odev->buf[i] &= ~(1<<(x%8));
- break;
-
- default:
- /* cannot get here; stops gcc complaining*/
- break;
- }
-
- odev->buf_offs++;
- }
-
- return 0;
-}
-
-static ssize_t odev_set_picture(struct asus_oled_dev *odev,
- const char *buf, size_t count)
-{
- size_t offs = 0, max_offs;
-
- if (count < 1)
- return 0;
-
- if (tolower(buf[0]) == 'b') {
- /* binary mode, set the entire memory*/
-
- size_t i;
-
- odev->buf_size = (odev->dev_width * ASUS_OLED_DISP_HEIGHT) / 8;
-
- kfree(odev->buf);
- odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
- if (odev->buf == NULL) {
- odev->buf_size = 0;
- dev_err(odev->dev, "Out of memory!\n");
- return -ENOMEM;
- }
-
- memset(odev->buf, 0xff, odev->buf_size);
-
- for (i = 1; i < count && i <= 32 * 32; i++) {
- odev->buf[i-1] = buf[i];
- odev->buf_offs = i-1;
- }
-
- odev->width = odev->dev_width / 8;
- odev->height = ASUS_OLED_DISP_HEIGHT;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->last_val = 0;
-
- send_data(odev);
-
- return count;
- }
-
- if (buf[0] == '<') {
- size_t i;
- size_t w = 0, h = 0;
- size_t w_mem, h_mem;
-
- if (count < 10 || buf[2] != ':')
- goto error_header;
-
-
- switch (tolower(buf[1])) {
- case ASUS_OLED_STATIC:
- case ASUS_OLED_ROLL:
- case ASUS_OLED_FLASH:
- odev->pic_mode = buf[1];
- break;
- default:
- dev_err(odev->dev, "Wrong picture mode: '%c'.\n",
- buf[1]);
- return -EIO;
- break;
- }
-
- for (i = 3; i < count; ++i) {
- if (buf[i] >= '0' && buf[i] <= '9') {
- w = 10*w + (buf[i] - '0');
-
- if (w > ASUS_OLED_MAX_WIDTH)
- goto error_width;
- } else if (tolower(buf[i]) == 'x') {
- break;
- } else {
- goto error_width;
- }
- }
-
- for (++i; i < count; ++i) {
- if (buf[i] >= '0' && buf[i] <= '9') {
- h = 10*h + (buf[i] - '0');
-
- if (h > ASUS_OLED_DISP_HEIGHT)
- goto error_height;
- } else if (tolower(buf[i]) == '>') {
- break;
- } else {
- goto error_height;
- }
- }
-
- if (w < 1 || w > ASUS_OLED_MAX_WIDTH)
- goto error_width;
-
- if (h < 1 || h > ASUS_OLED_DISP_HEIGHT)
- goto error_height;
-
- if (i >= count || buf[i] != '>')
- goto error_header;
-
- offs = i+1;
-
- if (w % (odev->dev_width) != 0)
- w_mem = (w/(odev->dev_width) + 1)*(odev->dev_width);
- else
- w_mem = w;
-
- if (h < ASUS_OLED_DISP_HEIGHT)
- h_mem = ASUS_OLED_DISP_HEIGHT;
- else
- h_mem = h;
-
- odev->buf_size = w_mem * h_mem / 8;
-
- kfree(odev->buf);
- odev->buf = kmalloc(odev->buf_size, GFP_KERNEL);
-
- if (odev->buf == NULL) {
- odev->buf_size = 0;
- dev_err(odev->dev, "Out of memory!\n");
- return -ENOMEM;
- }
-
- memset(odev->buf, 0xff, odev->buf_size);
-
- odev->buf_offs = 0;
- odev->width = w;
- odev->height = h;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->last_val = 0;
-
- if (odev->pic_mode == ASUS_OLED_FLASH) {
- if (h < ASUS_OLED_DISP_HEIGHT/2)
- odev->y_shift = (ASUS_OLED_DISP_HEIGHT/2 - h)/2;
- } else {
- if (h < ASUS_OLED_DISP_HEIGHT)
- odev->y_shift = (ASUS_OLED_DISP_HEIGHT - h)/2;
- }
-
- if (w < (odev->dev_width))
- odev->x_shift = ((odev->dev_width) - w)/2;
- }
-
- max_offs = odev->width * odev->height;
-
- while (offs < count && odev->buf_offs < max_offs) {
- int ret = 0;
-
- if (buf[offs] == '1' || buf[offs] == '#') {
- ret = append_values(odev, 1, 1);
- if (ret < 0)
- return ret;
- } else if (buf[offs] == '0' || buf[offs] == ' ') {
- ret = append_values(odev, 0, 1);
- if (ret < 0)
- return ret;
- } else if (buf[offs] == '\n') {
- /*
- * New line detected. Lets assume, that all characters
- * till the end of the line were equal to the last
- * character in this line.
- */
- if (odev->buf_offs % odev->width != 0)
- ret = append_values(odev, odev->last_val,
- odev->width -
- (odev->buf_offs %
- odev->width));
- if (ret < 0)
- return ret;
- }
-
- offs++;
- }
-
- if (odev->buf_offs >= max_offs)
- send_data(odev);
-
- return count;
-
-error_width:
- dev_err(odev->dev, "Wrong picture width specified.\n");
- return -EIO;
-
-error_height:
- dev_err(odev->dev, "Wrong picture height specified.\n");
- return -EIO;
-
-error_header:
- dev_err(odev->dev, "Wrong picture header.\n");
- return -EIO;
-}
-
-static ssize_t set_picture(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct usb_interface *intf = to_usb_interface(dev);
-
- return odev_set_picture(usb_get_intfdata(intf), buf, count);
-}
-
-static ssize_t class_set_picture(struct device *device,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return odev_set_picture((struct asus_oled_dev *)
- dev_get_drvdata(device), buf, count);
-}
-
-#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
-
-static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
- get_enabled, set_enabled);
-static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
-
-static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
- class_get_enabled, class_set_enabled);
-static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
-
-static int asus_oled_probe(struct usb_interface *interface,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(interface);
- struct asus_oled_dev *odev = NULL;
- int retval = -ENOMEM;
- uint16_t dev_width = 0;
- enum oled_pack_mode pack_mode = PACK_MODE_LAST;
- const struct oled_dev_desc_str *dev_desc = oled_dev_desc_table;
- const char *desc = NULL;
-
- if (!id) {
- /* Even possible? Just to make sure...*/
- dev_err(&interface->dev, "No usb_device_id provided!\n");
- return -ENODEV;
- }
-
- for (; dev_desc->idVendor; dev_desc++) {
- if (dev_desc->idVendor == id->idVendor
- && dev_desc->idProduct == id->idProduct) {
- dev_width = dev_desc->devWidth;
- desc = dev_desc->devDesc;
- pack_mode = dev_desc->packMode;
- break;
- }
- }
-
- if (!desc || dev_width < 1 || pack_mode == PACK_MODE_LAST) {
- dev_err(&interface->dev,
- "Missing or incomplete device description!\n");
- return -ENODEV;
- }
-
- odev = kzalloc(sizeof(struct asus_oled_dev), GFP_KERNEL);
- if (odev == NULL)
- return -ENOMEM;
-
- odev->udev = usb_get_dev(udev);
- odev->pic_mode = ASUS_OLED_STATIC;
- odev->dev_width = dev_width;
- odev->pack_mode = pack_mode;
- odev->height = 0;
- odev->width = 0;
- odev->x_shift = 0;
- odev->y_shift = 0;
- odev->buf_offs = 0;
- odev->buf_size = 0;
- odev->last_val = 0;
- odev->buf = NULL;
- odev->enabled = 1;
- odev->dev = NULL;
-
- usb_set_intfdata(interface, odev);
-
- retval = device_create_file(&interface->dev,
- &ASUS_OLED_DEVICE_ATTR(enabled));
- if (retval)
- goto err_files;
-
- retval = device_create_file(&interface->dev,
- &ASUS_OLED_DEVICE_ATTR(picture));
- if (retval)
- goto err_files;
-
- odev->dev = device_create(oled_class, &interface->dev, MKDEV(0, 0),
- NULL, "oled_%d", ++oled_num);
-
- if (IS_ERR(odev->dev)) {
- retval = PTR_ERR(odev->dev);
- goto err_files;
- }
-
- dev_set_drvdata(odev->dev, odev);
-
- retval = device_create_file(odev->dev, &dev_attr_enabled);
- if (retval)
- goto err_class_enabled;
-
- retval = device_create_file(odev->dev, &dev_attr_picture);
- if (retval)
- goto err_class_picture;
-
- dev_info(&interface->dev,
- "Attached Asus OLED device: %s [width %u, pack_mode %d]\n",
- desc, odev->dev_width, odev->pack_mode);
-
- if (start_off)
- enable_oled(odev, 0);
-
- return 0;
-
-err_class_picture:
- device_remove_file(odev->dev, &dev_attr_picture);
-
-err_class_enabled:
- device_remove_file(odev->dev, &dev_attr_enabled);
- device_unregister(odev->dev);
-
-err_files:
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
-
- usb_set_intfdata(interface, NULL);
- usb_put_dev(odev->udev);
- kfree(odev);
-
- return retval;
-}
-
-static void asus_oled_disconnect(struct usb_interface *interface)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(interface);
- usb_set_intfdata(interface, NULL);
-
- device_remove_file(odev->dev, &dev_attr_picture);
- device_remove_file(odev->dev, &dev_attr_enabled);
- device_unregister(odev->dev);
-
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(picture));
- device_remove_file(&interface->dev, &ASUS_OLED_DEVICE_ATTR(enabled));
-
- usb_put_dev(odev->udev);
-
- kfree(odev->buf);
-
- kfree(odev);
-
- dev_info(&interface->dev, "Disconnected Asus OLED device\n");
-}
-
-#ifdef CONFIG_PM
-static int asus_oled_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(intf);
- if (!odev)
- return -ENODEV;
-
- odev->enabled_post_resume = odev->enabled;
- enable_oled(odev, 0);
-
- return 0;
-}
-
-static int asus_oled_resume(struct usb_interface *intf)
-{
- struct asus_oled_dev *odev;
-
- odev = usb_get_intfdata(intf);
- if (!odev)
- return -ENODEV;
-
- enable_oled(odev, odev->enabled_post_resume);
-
- return 0;
-}
-#else
-#define asus_oled_suspend NULL
-#define asus_oled_resume NULL
-#endif
-
-static struct usb_driver oled_driver = {
- .name = ASUS_OLED_NAME,
- .probe = asus_oled_probe,
- .disconnect = asus_oled_disconnect,
- .id_table = id_table,
- .suspend = asus_oled_suspend,
- .resume = asus_oled_resume,
-};
-
-static CLASS_ATTR_STRING(version, S_IRUGO,
- ASUS_OLED_UNDERSCORE_NAME " " ASUS_OLED_VERSION);
-
-static int __init asus_oled_init(void)
-{
- int retval = 0;
- oled_class = class_create(THIS_MODULE, ASUS_OLED_UNDERSCORE_NAME);
-
- if (IS_ERR(oled_class)) {
- pr_err("Error creating " ASUS_OLED_UNDERSCORE_NAME " class\n");
- return PTR_ERR(oled_class);
- }
-
- retval = class_create_file(oled_class, &class_attr_version.attr);
- if (retval) {
- pr_err("Error creating class version file\n");
- goto error;
- }
-
- retval = usb_register(&oled_driver);
-
- if (retval) {
- pr_err("usb_register failed. Error number %d\n", retval);
- goto error;
- }
-
- return retval;
-
-error:
- class_destroy(oled_class);
- return retval;
-}
-
-static void __exit asus_oled_exit(void)
-{
- usb_deregister(&oled_driver);
- class_remove_file(oled_class, &class_attr_version.attr);
- class_destroy(oled_class);
-}
-
-module_init(asus_oled_init);
-module_exit(asus_oled_exit);
-
diff --git a/drivers/staging/asus_oled/linux.txt b/drivers/staging/asus_oled/linux.txt
deleted file mode 100644
index dc758b0eb37..00000000000
--- a/drivers/staging/asus_oled/linux.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<s:74x32>
-0
-0
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-01111111111000000000000000000000000000000000000000000000000000000000000000
-00011111100000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000111000000000000000000000000000000000000000000000000
-00001111000000000000000000000000000000000000000000000000000000000000000000
-00001111000000000000000000000000000000000000000000000000000000000000000000
-00001111000000000000011100001111111111100000111110011111100011111101111000
-00001111000000000000111110000011111000111000111110000111100001111000110000
-00001111000000000001101110000011111000111000001111000111100000111100100000
-00001111000000000001001110000011110000111100001111000111100000111101100000
-00001111000000000100001110000011110000111100001111000111100000011111000000
-00001111000000000100011110000011110000111100001111000111100000001111000000
-00001111000000000100011110000011110000111100001111000111100000001111000000
-00001111000000000100011100100011110000111100001111000111100000001111100000
-00001111000000001100111100100011110000111100001111000111100000001111110000
-00001111000000001100111101100011110000111100001111000111100000011011110000
-00001111000000011100111101000011110000111100001111000111100000010001111000
-00011111000001111100111011000011110000111100001111001111100000110000111100
-11111111111111111100011110001111111011111110000111110111111011111011111110
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000
-0
-0
-0
-0
diff --git a/drivers/staging/asus_oled/linux_f.txt b/drivers/staging/asus_oled/linux_f.txt
deleted file mode 100644
index b4bb85cc6eb..00000000000
--- a/drivers/staging/asus_oled/linux_f.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-<f:128x16>
-00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
-00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
-
diff --git a/drivers/staging/asus_oled/linux_fr.txt b/drivers/staging/asus_oled/linux_fr.txt
deleted file mode 100644
index f88e2b3bdd1..00000000000
--- a/drivers/staging/asus_oled/linux_fr.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<f:128x32>
-00000000000000000000000000000000001111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000001110000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011000111111111100001111001111100111110111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000111100001111000110001111000111100011100010000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000001011100001111000111000111100111100001110110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000000011100001110000111000111100111100001111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100011100001110000111000111100111100000111100000000000000000000000000000000000000
-00000000000000000000000000000000000011110000000100111001001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111011001110000111000111100111100000111110000000000000000000000000000000000000
-00000000000000000000000000000000000011110000001100111010001110000111000111100111100000100111000000000000000000000000000000000000
-00000000000000000000000000000000000011110000111100110110001110000111000111100111100001000011100000000000000000000000000000000000
-00000000000000000000000000000000001111111111111100111100111111011111100011110111110111101111110000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
-00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
-00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
-00000000000000000000000000011111100000000000000000000000000000000000000000000000000000000000011111100000000000000000000000000000
-00000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000
-00000000000000000000000000000110000000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
diff --git a/drivers/staging/asus_oled/tux.txt b/drivers/staging/asus_oled/tux.txt
deleted file mode 100644
index 9d2052854b6..00000000000
--- a/drivers/staging/asus_oled/tux.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<s:32x32>
-00000000000001111111000000000000
-0000000000001 100000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 1 111 10000000000
-000000000001 1 1 1000000000
-000000000001 111 1000000000
-000000000001 111111 1000000000
-000000000001 111111 1000000000
-000000000001 1 1 100000000
-00000000001 11 100000000
-00000000001 11111111 10000000
-0000000001 11111111 1000000
-000000001 111111111 1000000
-000000001 1111111111 100000
-00000001 11111111111 100000
-00000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-000000011 11111111111 10000
-000011 11 11111111111 100000
-0001 1111 111111111111111 1000
-001 1111111 11111111111111 1000
-001 1111111 1111111 111111 100
-001 11111111 111111 1111111 10
-001 11111111 11111 100
-001 1111111 111 11100
-000111 111 11111 11 100000
-000000111 111111111 1000000
diff --git a/drivers/staging/asus_oled/tux_r.txt b/drivers/staging/asus_oled/tux_r.txt
deleted file mode 100644
index fd81a3e8494..00000000000
--- a/drivers/staging/asus_oled/tux_r.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:32x32>
-00000000000001111111000000000000
-0000000000001 100000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 10000000000
-000000000001 1 111 10000000000
-000000000001 1 1 1000000000
-000000000001 111 1000000000
-000000000001 111111 1000000000
-000000000001 111111 1000000000
-000000000001 1 1 100000000
-00000000001 11 100000000
-00000000001 11111111 10000000
-0000000001 11111111 1000000
-000000001 111111111 1000000
-000000001 1111111111 100000
-00000001 11111111111 100000
-00000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-0000001 111111111111 10000
-000000011 11111111111 10000
-000011 11 11111111111 100000
-0001 1111 111111111111111 1000
-001 1111111 11111111111111 1000
-001 1111111 1111111 111111 100
-001 11111111 111111 1111111 10
-001 11111111 11111 100
-001 1111111 111 11100
-000111 111 11111 11 100000
-000000111 111111111 1000000
diff --git a/drivers/staging/asus_oled/tux_r2.txt b/drivers/staging/asus_oled/tux_r2.txt
deleted file mode 100644
index e94d84eaab0..00000000000
--- a/drivers/staging/asus_oled/tux_r2.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:256x32>
-000000000000000000000000000000000000000000000000000000000000011111110000000000000000000000000000000000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 1 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111100000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000111000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000000001 1 1 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000001 11 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000000001 11111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000011100001111111111100000111110011111100011111101111000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000000001 11111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000000111110000011111000111000111110000111100001111000110000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000001 111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001101110000011111000111000001111000111100000111100100000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000001 1111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000001001110000011110000111100001111000111100000111101100000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000001 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100001110000011110000111100001111000111100000011111000000000000000000000000000000000
-00000000000000000000000000000000000000000000000000000001 111111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011110000000001000111100000111100001111000011110001111000000011110000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011110000011110000111100001111000111100000001111000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000000100011100100011110000111100001111000111100000001111100000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111100100011110000111100001111000111100000001111110000000000000000000000000000000
-0000000000000000000000000000000000000000000000000000001 111111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000001100111101100011110000111100001111000111100000011011110000000000000000000000000000000
-000000000000000000000000000000000000000000000000000000011 11111111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000001111000000011100111101000011110000111100001111000111100000010001111000000000000000000000000000000
-000000000000000000000000000000000000000000000000000011 11 11111111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000011111000001111100111011000011110000111100001111001111100000110000111100000000000000000000000000000
-0000000000000000000000000000000000000000000000000001 1111 111111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111100011110001111111011111110000111110111111011111011111110000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 11111111111111 100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 1111111 111111 10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 11111111 111111 1111111 1000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 11111111 11111 1000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000001 1111111 111 111000000000000000000000000000000000000000000000000000
-000000000000000000000000000000000000000000000000000111 111 11111 11 10
-000000000000000000000000000000000000000000000000000000111 111111111 10
diff --git a/drivers/staging/asus_oled/zig.txt b/drivers/staging/asus_oled/zig.txt
deleted file mode 100644
index 31573d8f799..00000000000
--- a/drivers/staging/asus_oled/zig.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-<r:128x32>
-10000000000000000000000000000000000000000000000000000000000000011000000000000000000000000000000000000000000000000000000000000001
-01000000000000000000000000000000000000000000000000000000000000100100000000000000000000000000000000000000000000000000000000000010
-00100000000000000000000000000000000000000000000000000000000001000010000000000000000000000000000000000000000000000000000000000100
-00010000000000000000000000000000000000000000000000000000000010000001000000000000000000000000000000000000000000000000000000001000
-00001000000000000000000000000000000000000000000000000000000100000000100000000000000000000000000000000000000000000000000000010000
-00000100000000000000000000000000000000000000000000000000001000000000010000000000000000000000000000000000000000000000000000100000
-00000010000000000000000000000000000000000000000000000000010000000000001000000000000000000000000000000000000000000000000001000000
-00000001000000000000000000000000000000000000000000000000100000000000000100000000000000000000000000000000000000000000000010000000
-00000000100000000000000000000000000000000000000000000001000000000000000010000000000000000000000000000000000000000000000100000000
-00000000010000000000000000000000000000000000000000000010000000000000000001000000000000000000000000000000000000000000001000000000
-00000000001000000000000000000000000000000000000000000100000000000000000000100000000000000000000000000000000000000000010000000000
-00000000000100000000000000000000000000000000000000001000000000000000000000010000000000000000000000000000000000000000100000000000
-00000000000010000000000000000000000000000000000000010000000000000000000000001000000000000000000000000000000000000001000000000000
-00000000000001000000000000000000000000000000000000100000000000000000000000000100000000000000000000000000000000000010000000000000
-00000000000000100000000000000000000000000000000001000000000000000000000000000010000000000000000000000000000000000100000000000000
-00000000000000010000000000000000000000000000000010000000000000000000000000000001000000000000000000000000000000001000000000000000
-00000000000000001000000000000000000000000000000100000000000000000000000000000000100000000000000000000000000000010000000000000000
-00000000000000000100000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000100000000000000000
-00000000000000000010000000000000000000000000010000000000000000000000000000000000001000000000000000000000000001000000000000000000
-00000000000000000001000000000000000000000000100000000000000000000000000000000000000100000000000000000000000010000000000000000000
-00000000000000000000100000000000000000000001000000000000000000000000000000000000000010000000000000000000000100000000000000000000
-00000000000000000000010000000000000000000010000000000000000000000000000000000000000001000000000000000000001000000000000000000000
-00000000000000000000001000000000000000000100000000000000000000000000000000000000000000100000000000000000010000000000000000000000
-00000000000000000000000100000000000000001000000000000000000000000000000000000000000000010000000000000000100000000000000000000000
-00000000000000000000000010000000000000010000000000000000000000000000000000000000000000001000000000000001000000000000000000000000
-00000000000000000000000001000000000000100000000000000000000000000000000000000000000000000100000000000010000000000000000000000000
-00000000000000000000000000100000000001000000000000000000000000000000000000000000000000000010000000000100000000000000000000000000
-00000000000000000000000000010000000010000000000000000000000000000000000000000000000000000001000000001000000000000000000000000000
-00000000000000000000000000001000000100000000000000000000000000000000000000000000000000000000100000010000000000000000000000000000
-00000000000000000000000000000100001000000000000000000000000000000000000000000000000000000000010000100000000000000000000000000000
-00000000000000000000000000000010010000000000000000000000000000000000000000000000000000000000001001000000000000000000000000000000
-00000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000110000000000000000000000000000000
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index f67a22536cb..f91bc1fdd89 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1004,9 +1004,9 @@ cntrlEnd:
if (copy_from_user(&IoBuffer, argp, sizeof(struct bcm_ioctl_buffer)))
return -EFAULT;
- len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1);
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(DRV_VERSION) + 1);
- if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len))
+ if (copy_to_user(IoBuffer.OutputBuffer, DRV_VERSION, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
diff --git a/drivers/staging/bcm/DDRInit.c b/drivers/staging/bcm/DDRInit.c
index 8c696b64ab2..f5eda96f0f8 100644
--- a/drivers/staging/bcm/DDRInit.c
+++ b/drivers/staging/bcm/DDRInit.c
@@ -828,13 +828,13 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
{
retval= rdmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue |= 0x44;
retval = wrmalt(Adapter,(UINT)0x0f000830, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, WRM, DBG_LVL_ALL, "%s:%d WRM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -972,7 +972,7 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
}
retval = wrmalt(Adapter, psDDRSetting->ulRegAddress, &value, sizeof(value));
if(STATUS_SUCCESS != retval) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
@@ -992,25 +992,25 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
uiResetValue = 0x01010001;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x00040020;
retval = wrmalt(Adapter, (UINT)0x0F007094, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01020101;
retval = wrmalt(Adapter, (UINT)0x0F00701c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x01010000;
retval = wrmalt(Adapter, (UINT)0x0F007018, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1026,34 +1026,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
{
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x132296;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1062,34 +1062,34 @@ int ddr_init(struct bcm_mini_adapter *Adapter)
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x6003229a;
retval = wrmalt(Adapter, (UINT)0x0f000d14, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
retval = rdmalt(Adapter,(UINT)0x0f000c00, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
uiResetValue = 0x1322a8;
retval = wrmalt(Adapter, (UINT)0x0f000d1c, &uiResetValue, sizeof(uiResetValue));
if(retval < 0) {
- BCM_DEBUG_PRINT(Adapter,CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, CMHOST, RDM, DBG_LVL_ALL, "%s:%d RDM failed\n", __func__, __LINE__);
return retval;
}
}
@@ -1235,28 +1235,28 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
if(retval)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
/*signature */
value =(0x1d1e0dd0);
retval = wrmalt(Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
if(retval)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
return retval;
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
RegCount*=(sizeof(struct bcm_ddr_setting)/sizeof(ULONG));
while(RegCount && !retval)
{
value = psDDRSetting->ulRegAddress ;
retval = wrmalt( Adapter, ul_ddr_setting_load_addr, &value, sizeof(value));
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
if(!retval)
{
if(bOverrideSelfRefresh && (psDDRSetting->ulRegAddress == 0x0F007018))
@@ -1264,7 +1264,7 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
value = (psDDRSetting->ulRegValue |(1<<8));
if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr,
&value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
}
@@ -1274,12 +1274,12 @@ int download_ddr_settings(struct bcm_mini_adapter *Adapter)
if(STATUS_SUCCESS != wrmalt(Adapter, ul_ddr_setting_load_addr ,
&value, sizeof(value))){
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"%s:%d\n", __FUNCTION__, __LINE__);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "%s:%d\n", __func__, __LINE__);
break;
}
}
}
- ul_ddr_setting_load_addr+=sizeof(ULONG);
+ ul_ddr_setting_load_addr += sizeof(ULONG);
RegCount--;
psDDRSetting++;
}
diff --git a/drivers/staging/bcm/Ioctl.h b/drivers/staging/bcm/Ioctl.h
index e253c080a78..797f862b90c 100644
--- a/drivers/staging/bcm/Ioctl.h
+++ b/drivers/staging/bcm/Ioctl.h
@@ -175,7 +175,7 @@ struct bcm_flash2x_copy_section {
/*
* This section provide the complete bitmap of the Flash.
- * using this map lib/APP will isssue read/write command.
+ * using this map lib/APP will issue read/write command.
* Fields are defined as :
* Bit [0] = section is present //1:present, 0: Not present
* Bit [1] = section is valid //1: valid, 0: not valid
diff --git a/drivers/staging/bcm/LeakyBucket.c b/drivers/staging/bcm/LeakyBucket.c
index 877cf0b2bee..bc486163332 100644
--- a/drivers/staging/bcm/LeakyBucket.c
+++ b/drivers/staging/bcm/LeakyBucket.c
@@ -17,47 +17,42 @@
static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
{
- ULONG liCurrentTime;
- INT i = 0;
+ ULONG liCurrentTime;
+ INT i = 0;
struct timeval tv;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL,
"=====>\n");
- if(NULL == Adapter)
- {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
+ if (NULL == Adapter) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS,
DBG_LVL_ALL, "Adapter found NULL!\n");
return;
}
do_gettimeofday(&tv);
- for(i = 0; i < NO_OF_QUEUES; i++)
- {
- if(TRUE == Adapter->PackInfo[i].bValid &&
- (1 == Adapter->PackInfo[i].ucDirection))
- {
+ for (i = 0; i < NO_OF_QUEUES; i++) {
+ if (TRUE == Adapter->PackInfo[i].bValid &&
+ (1 == Adapter->PackInfo[i].ucDirection)) {
liCurrentTime = ((tv.tv_sec-
Adapter->PackInfo[i].stLastUpdateTokenAt.tv_sec)*1000 +
(tv.tv_usec-Adapter->PackInfo[i].stLastUpdateTokenAt.tv_usec)/
1000);
- if(0!=liCurrentTime)
- {
+ if (0 != liCurrentTime) {
Adapter->PackInfo[i].uiCurrentTokenCount += (ULONG)
((Adapter->PackInfo[i].uiMaxAllowedRate) *
((ULONG)((liCurrentTime)))/1000);
memcpy(&Adapter->PackInfo[i].stLastUpdateTokenAt,
&tv, sizeof(struct timeval));
Adapter->PackInfo[i].liLastUpdateTokenAt = liCurrentTime;
- if((Adapter->PackInfo[i].uiCurrentTokenCount) >=
- Adapter->PackInfo[i].uiMaxBucketSize)
- {
+ if (Adapter->PackInfo[i].uiCurrentTokenCount >=
+ Adapter->PackInfo[i].uiMaxBucketSize) {
Adapter->PackInfo[i].uiCurrentTokenCount =
Adapter->PackInfo[i].uiMaxBucketSize;
}
}
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "<=====\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "<=====\n");
return;
}
@@ -79,33 +74,26 @@ static VOID UpdateTokenCount(register struct bcm_mini_adapter *Adapter)
***********************************************************************/
static ULONG GetSFTokenCount(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow ===>");
/* Validate the parameters */
- if(NULL == Adapter || (psSF < Adapter->PackInfo &&
- (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority]))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
+ if (NULL == Adapter || (psSF < Adapter->PackInfo &&
+ (uintptr_t)psSF > (uintptr_t) &Adapter->PackInfo[HiPriority])) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Got wrong Parameters:Adapter: %p, QIndex: %zd\n", Adapter, (psSF-Adapter->PackInfo));
return 0;
}
- if(FALSE != psSF->bValid && psSF->ucDirection)
- {
- if(0 != psSF->uiCurrentTokenCount)
- {
+ if (FALSE != psSF->bValid && psSF->ucDirection) {
+ if (0 != psSF->uiCurrentTokenCount) {
return psSF->uiCurrentTokenCount;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "Not enough tokens in queue %zd Available %u\n",
psSF-Adapter->PackInfo, psSF->uiCurrentTokenCount);
psSF->uiPendedLast = 1;
}
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
}
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IPAFF: Queue %zd not valid\n", psSF-Adapter->PackInfo);
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TOKEN_COUNTS, DBG_LVL_ALL, "IsPacketAllowedForFlow <===");
return 0;
}
@@ -116,33 +104,29 @@ This function despatches packet from the specified queue.
*/
static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adapter*/
struct bcm_packet_info *psSF, /**<Queue identifier*/
- struct sk_buff* Packet) /**<Pointer to the packet to be sent*/
+ struct sk_buff *Packet) /**<Pointer to the packet to be sent*/
{
- INT Status=STATUS_FAILURE;
- UINT uiIndex =0,PktLen = 0;
+ INT Status = STATUS_FAILURE;
+ UINT uiIndex = 0, PktLen = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "=====>");
- if(!Adapter || !Packet || !psSF)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "Got NULL Adapter or Packet");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "=====>");
+ if (!Adapter || !Packet || !psSF) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "Got NULL Adapter or Packet");
return -EINVAL;
}
- if(psSF->liDrainCalculated==0)
- {
+ if (psSF->liDrainCalculated == 0)
psSF->liDrainCalculated = jiffies;
- }
- ///send the packet to the fifo..
+ /* send the packet to the fifo.. */
PktLen = Packet->len;
Status = SetupNextSend(Adapter, Packet, psSF->usVCID_Value);
- if(Status == 0)
- {
- for(uiIndex = 0 ; uiIndex < MIBS_MAX_HIST_ENTRIES ; uiIndex++)
- { if((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
+ if (Status == 0) {
+ for (uiIndex = 0; uiIndex < MIBS_MAX_HIST_ENTRIES; uiIndex++) {
+ if ((PktLen <= MIBS_PKTSIZEHIST_RANGE*(uiIndex+1)) && (PktLen > MIBS_PKTSIZEHIST_RANGE*(uiIndex)))
Adapter->aTxPktSizeHist[uiIndex]++;
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, SEND_QUEUE, DBG_LVL_ALL, "<=====");
return Status;
}
@@ -160,107 +144,93 @@ static INT SendPacketFromQueue(struct bcm_mini_adapter *Adapter,/**<Logical Adap
****************************************************************************/
static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, struct bcm_packet_info *psSF)
{
- struct sk_buff *QueuePacket=NULL;
- char *pControlPacket = NULL;
- INT Status=0;
- int iPacketLen=0;
+ struct sk_buff *QueuePacket = NULL;
+ char *pControlPacket = NULL;
+ INT Status = 0;
+ int iPacketLen = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
- if((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount))//Get data packet
- {
- if(!psSF->ucDirection )
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "%zd ====>", (psSF-Adapter->PackInfo));
+ if ((psSF != &Adapter->PackInfo[HiPriority]) && Adapter->LinkUpStatus && atomic_read(&psSF->uiPerSFTxResourceCount)) { /* Get data packet */
+ if (!psSF->ucDirection)
return;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
- if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "UpdateTokenCount ");
+ if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
return; /* in idle mode */
- // Check for Free Descriptors
- if(atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " No Free Tx Descriptor(%d) is available for Data pkt..",atomic_read(&Adapter->CurrNumFreeTxDesc));
- return ;
+ /* Check for Free Descriptors */
+ if (atomic_read(&Adapter->CurrNumFreeTxDesc) <= MINIMUM_PENDING_DESCRIPTORS) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " No Free Tx Descriptor(%d) is available for Data pkt..", atomic_read(&Adapter->CurrNumFreeTxDesc));
+ return;
}
spin_lock_bh(&psSF->SFQueueLock);
- QueuePacket=psSF->FirstTxQueue;
+ QueuePacket = psSF->FirstTxQueue;
- if(QueuePacket)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Dequeuing Data Packet");
+ if (QueuePacket) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Dequeuing Data Packet");
- if(psSF->bEthCSSupport)
+ if (psSF->bEthCSSupport)
iPacketLen = QueuePacket->len;
else
iPacketLen = QueuePacket->len-ETH_HLEN;
- iPacketLen<<=3;
- if(iPacketLen <= GetSFTokenCount(Adapter, psSF))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Allowed bytes %d",
+ iPacketLen <<= 3;
+ if (iPacketLen <= GetSFTokenCount(Adapter, psSF)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Allowed bytes %d",
(iPacketLen >> 3));
- DEQUEUEPACKET(psSF->FirstTxQueue,psSF->LastTxQueue);
+ DEQUEUEPACKET(psSF->FirstTxQueue, psSF->LastTxQueue);
psSF->uiCurrentBytesOnHost -= (QueuePacket->len);
psSF->uiCurrentPacketsOnHost--;
atomic_dec(&Adapter->TotalPacketCount);
spin_unlock_bh(&psSF->SFQueueLock);
- Status = SendPacketFromQueue(Adapter, psSF, QueuePacket);
+ Status = SendPacketFromQueue(Adapter, psSF, QueuePacket);
psSF->uiPendedLast = FALSE;
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "For Queue: %zd\n", psSF-Adapter->PackInfo);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nAvailable Tokens = %d required = %d\n",
psSF->uiCurrentTokenCount, iPacketLen);
- //this part indicates that because of non-availability of the tokens
- //pkt has not been send out hence setting the pending flag indicating the host to send it out
- //first next iteration .
+ /*
+ this part indicates that because of non-availability of the tokens
+ pkt has not been send out hence setting the pending flag indicating the host to send it out
+ first next iteration.
+ */
psSF->uiPendedLast = TRUE;
spin_unlock_bh(&psSF->SFQueueLock);
}
- }
- else
- {
+ } else {
spin_unlock_bh(&psSF->SFQueueLock);
}
- }
- else
- {
-
- if((atomic_read(&Adapter->CurrNumFreeTxDesc) > 0 ) &&
- (atomic_read(&Adapter->index_rd_txcntrlpkt) !=
- atomic_read(&Adapter->index_wr_txcntrlpkt))
- )
- {
+ } else {
+
+ if ((atomic_read(&Adapter->CurrNumFreeTxDesc) > 0) &&
+ (atomic_read(&Adapter->index_rd_txcntrlpkt) !=
+ atomic_read(&Adapter->index_wr_txcntrlpkt))) {
pControlPacket = Adapter->txctlpacket
[(atomic_read(&Adapter->index_rd_txcntrlpkt)%MAX_CNTRL_PKTS)];
- if(pControlPacket)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Sending Control packet");
+ if (pControlPacket) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Sending Control packet");
Status = SendControlPacket(Adapter, pControlPacket);
- if(STATUS_SUCCESS==Status)
- {
+ if (STATUS_SUCCESS == Status) {
spin_lock_bh(&psSF->SFQueueLock);
psSF->NumOfPacketsSent++;
- psSF->uiSentBytes+=((struct bcm_leader *)pControlPacket)->PLength;
+ psSF->uiSentBytes += ((struct bcm_leader *)pControlPacket)->PLength;
psSF->uiSentPackets++;
atomic_dec(&Adapter->TotalPacketCount);
psSF->uiCurrentBytesOnHost -= ((struct bcm_leader *)pControlPacket)->PLength;
psSF->uiCurrentPacketsOnHost--;
atomic_inc(&Adapter->index_rd_txcntrlpkt);
spin_unlock_bh(&psSF->SFQueueLock);
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "SendControlPacket Failed\n");
}
- else
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "SendControlPacket Failed\n");
- }
- else
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " Control Pkt is not available, Indexing is wrong....");
+ } else {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, " Control Pkt is not available, Indexing is wrong....");
}
- }
+ }
}
}
@@ -277,79 +247,71 @@ static VOID CheckAndSendPacketFromIndex(struct bcm_mini_adapter *Adapter, struct
********************************************************************/
VOID transmit_packets(struct bcm_mini_adapter *Adapter)
{
- UINT uiPrevTotalCount = 0;
+ UINT uiPrevTotalCount = 0;
int iIndex = 0;
- BOOLEAN exit_flag = TRUE ;
+ BOOLEAN exit_flag = TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "=====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "=====>");
- if(NULL == Adapter)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX,TX_PACKETS, DBG_LVL_ALL, "Got NULL Adapter");
+ if (NULL == Adapter) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Got NULL Adapter");
return;
}
- if(Adapter->device_removed == TRUE)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Device removed");
+ if (Adapter->device_removed == TRUE) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Device removed");
return;
}
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nUpdateTokenCount ====>\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nUpdateTokenCount ====>\n");
UpdateTokenCount(Adapter);
- BCM_DEBUG_PRINT (Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nPruneQueueAllSF ====>\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "\nPruneQueueAllSF ====>\n");
PruneQueueAllSF(Adapter);
uiPrevTotalCount = atomic_read(&Adapter->TotalPacketCount);
- for(iIndex=HiPriority;iIndex>=0;iIndex--)
- {
- if( !uiPrevTotalCount || (TRUE == Adapter->device_removed))
+ for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
+ if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
break;
- if(Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiPendedLast &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
+ if (Adapter->PackInfo[iIndex].bValid &&
+ Adapter->PackInfo[iIndex].uiPendedLast &&
+ Adapter->PackInfo[iIndex].uiCurrentBytesOnHost) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
uiPrevTotalCount--;
}
}
- while(uiPrevTotalCount > 0 && !Adapter->device_removed)
- {
- exit_flag = TRUE ;
- //second iteration to parse non-pending queues
- for(iIndex=HiPriority;iIndex>=0;iIndex--)
- {
- if( !uiPrevTotalCount || (TRUE == Adapter->device_removed))
- break;
-
- if(Adapter->PackInfo[iIndex].bValid &&
- Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
- !Adapter->PackInfo[iIndex].uiPendedLast )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
+ while (uiPrevTotalCount > 0 && !Adapter->device_removed) {
+ exit_flag = TRUE;
+ /* second iteration to parse non-pending queues */
+ for (iIndex = HiPriority; iIndex >= 0; iIndex--) {
+ if (!uiPrevTotalCount || (TRUE == Adapter->device_removed))
+ break;
+
+ if (Adapter->PackInfo[iIndex].bValid &&
+ Adapter->PackInfo[iIndex].uiCurrentBytesOnHost &&
+ !Adapter->PackInfo[iIndex].uiPendedLast) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "Calling CheckAndSendPacketFromIndex..");
CheckAndSendPacketFromIndex(Adapter, &Adapter->PackInfo[iIndex]);
uiPrevTotalCount--;
exit_flag = FALSE;
}
}
- if(Adapter->IdleMode || Adapter->bPreparingForLowPowerMode)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "In Idle Mode\n");
+ if (Adapter->IdleMode || Adapter->bPreparingForLowPowerMode) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "In Idle Mode\n");
break;
}
- if(exit_flag == TRUE )
- break ;
- }/* end of inner while loop */
+ if (exit_flag == TRUE)
+ break;
+ } /* end of inner while loop */
- update_per_cid_rx (Adapter);
+ update_per_cid_rx(Adapter);
Adapter->txtransmit_running = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, TX_PACKETS, DBG_LVL_ALL, "<======");
}
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index d23eeeb9506..4cfc2c33c69 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -210,7 +210,7 @@ exit_download:
* @ingroup ctrl_pkt_functions
* This function copies the contents of given buffer
* to the control packet and queues it for transmission.
- * @note Do not acquire the spinock, as it it already acquired.
+ * @note Do not acquire the spinlock, as it it already acquired.
* @return SUCCESS/FAILURE.
* Arguments:
* Logical Adapter
diff --git a/drivers/staging/bcm/Qos.c b/drivers/staging/bcm/Qos.c
index 8d142a547e7..2d4a77cca91 100644
--- a/drivers/staging/bcm/Qos.c
+++ b/drivers/staging/bcm/Qos.c
@@ -4,11 +4,11 @@ This file contains the routines related to Quality of Service.
*/
#include "headers.h"
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo,struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
+static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, PVOID pvEthPayload, struct bcm_eth_packet_info *pstEthCsPktInfo);
+static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo, struct bcm_classifier_rule *pstClassifierRule, B_UINT8 EthCSCupport);
static USHORT IpVersion4(struct bcm_mini_adapter *Adapter, struct iphdr *iphd,
- struct bcm_classifier_rule *pstClassifierRule );
+ struct bcm_classifier_rule *pstClassifierRule);
static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
@@ -20,30 +20,30 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex);
* matches with that of Queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulSrcIP : Source IP address from the packet.
+* - ulSrcIP : Source IP address from the packet.
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ulSrcIP)
+BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulSrcIP)
{
- UCHAR ucLoopIndex=0;
-
- struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
-
- ulSrcIP=ntohl(ulSrcIP);
- if(0 == pstClassifierRule->ucIPSourceAddressLength)
- return TRUE;
- for(ucLoopIndex=0; ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength);ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x", (UINT)pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)ulSrcIP, (UINT)pstClassifierRule->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]);
- if((pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] & ulSrcIP)==
- (pstClassifierRule->stSrcIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] ))
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Not Matched");
- return FALSE;
+ UCHAR ucLoopIndex = 0;
+
+ struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
+
+ ulSrcIP = ntohl(ulSrcIP);
+ if (0 == pstClassifierRule->ucIPSourceAddressLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < (pstClassifierRule->ucIPSourceAddressLength); ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Mask:0x%x PacketIp:0x%x and Classification:0x%x", (UINT)pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)ulSrcIP, (UINT)pstClassifierRule->stSrcIpAddress.ulIpv6Addr[ucLoopIndex]);
+ if ((pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex] & ulSrcIP) ==
+ (pstClassifierRule->stSrcIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stSrcIpAddress.ulIpv4Mask[ucLoopIndex]))
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Ip Address Not Matched");
+ return FALSE;
}
@@ -54,30 +54,30 @@ BOOLEAN MatchSrcIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ul
* matches with that of Queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ulDestIP : Destination IP address from the packet.
+* - ulDestIP : Destination IP address from the packet.
*
* Returns - TRUE(If address matches) else FAIL .
*********************************************************************/
-BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG ulDestIP)
+BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule, ULONG ulDestIP)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- ulDestIP=ntohl(ulDestIP);
- if(0 == pstClassifierRule->ucIPDestinationAddressLength)
- return TRUE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address 0x%x 0x%x 0x%x ", (UINT)ulDestIP, (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
-
- for(ucLoopIndex=0;ucLoopIndex<(pstClassifierRule->ucIPDestinationAddressLength);ucLoopIndex++)
- {
- if((pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex] & ulDestIP)==
- (pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex]))
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address Not Matched");
- return FALSE;
+ ulDestIP = ntohl(ulDestIP);
+ if (0 == pstClassifierRule->ucIPDestinationAddressLength)
+ return TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address 0x%x 0x%x 0x%x ", (UINT)ulDestIP, (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex], (UINT)pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex]);
+
+ for (ucLoopIndex = 0; ucLoopIndex < (pstClassifierRule->ucIPDestinationAddressLength); ucLoopIndex++)
+ {
+ if ((pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex] & ulDestIP) ==
+ (pstClassifierRule->stDestIpAddress.ulIpv4Addr[ucLoopIndex] & pstClassifierRule->stDestIpAddress.ulIpv4Mask[ucLoopIndex]))
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Ip Address Not Matched");
+ return FALSE;
}
@@ -87,23 +87,23 @@ BOOLEAN MatchDestIpAddress(struct bcm_classifier_rule *pstClassifierRule,ULONG u
* Description - Checks the TOS from the packet matches with that of queue.
*
* Parameters - pstClassifierRule : Pointer to the packet info structure.
-* - ucTypeOfService: TOS from the packet.
+* - ucTypeOfService: TOS from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
**************************************************************************/
-BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucTypeOfService)
+BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucTypeOfService)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if( 3 != pstClassifierRule->ucIPTypeOfServiceLength )
- return TRUE;
-
- if(((pstClassifierRule->ucTosMask & ucTypeOfService)<=pstClassifierRule->ucTosHigh) && ((pstClassifierRule->ucTosMask & ucTypeOfService)>=pstClassifierRule->ucTosLow))
- {
- return TRUE;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Type Of Service Not Matched");
- return FALSE;
+ if (3 != pstClassifierRule->ucIPTypeOfServiceLength)
+ return TRUE;
+
+ if (((pstClassifierRule->ucTosMask & ucTypeOfService) <= pstClassifierRule->ucTosHigh) && ((pstClassifierRule->ucTosMask & ucTypeOfService) >= pstClassifierRule->ucTosLow))
+ {
+ return TRUE;
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Type Of Service Not Matched");
+ return FALSE;
}
@@ -113,26 +113,26 @@ BOOLEAN MatchTos(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucTypeOfSer
* Description - Checks the protocol from the packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ucProtocol : Protocol from the packet.
+* - ucProtocol : Protocol from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
****************************************************************************/
-bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtocol)
+bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule, UCHAR ucProtocol)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucProtocolLength)
- return TRUE;
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucProtocolLength;ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol:0x%X Classification Protocol:0x%X",ucProtocol,pstClassifierRule->ucProtocol[ucLoopIndex]);
- if(pstClassifierRule->ucProtocol[ucLoopIndex]==ucProtocol)
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Not Matched");
- return FALSE;
+ if (0 == pstClassifierRule->ucProtocolLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucProtocolLength; ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol:0x%X Classification Protocol:0x%X", ucProtocol, pstClassifierRule->ucProtocol[ucLoopIndex]);
+ if (pstClassifierRule->ucProtocol[ucLoopIndex] == ucProtocol)
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Not Matched");
+ return FALSE;
}
@@ -142,29 +142,29 @@ bool MatchProtocol(struct bcm_classifier_rule *pstClassifierRule,UCHAR ucProtoco
* Description - Checks, Source port from the packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushSrcPort : Source port from the packet.
+* - ushSrcPort : Source port from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPort)
+bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushSrcPort)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucSrcPortRangeLength)
- return TRUE;
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucSrcPortRangeLength;ucLoopIndex++)
- {
- if(ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
- ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port: %x Not Matched ",ushSrcPort);
- return FALSE;
+ if (0 == pstClassifierRule->ucSrcPortRangeLength)
+ return TRUE;
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucSrcPortRangeLength; ucLoopIndex++)
+ {
+ if (ushSrcPort <= pstClassifierRule->usSrcPortRangeHi[ucLoopIndex] &&
+ ushSrcPort >= pstClassifierRule->usSrcPortRangeLo[ucLoopIndex])
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port: %x Not Matched ", ushSrcPort);
+ return FALSE;
}
@@ -174,30 +174,30 @@ bool MatchSrcPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushSrcPor
* Description - Checks, Destination port from packet matches with that of queue.
*
* Parameters - pstClassifierRule: Pointer to the packet info structure.
-* - ushDestPort : Destination port from the packet.
+* - ushDestPort : Destination port from the packet.
*
* Returns - TRUE(If address matches) else FAIL.
***************************************************************************/
-bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule,USHORT ushDestPort)
+bool MatchDestPort(struct bcm_classifier_rule *pstClassifierRule, USHORT ushDestPort)
{
- UCHAR ucLoopIndex=0;
+ UCHAR ucLoopIndex = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(0 == pstClassifierRule->ucDestPortRangeLength)
- return TRUE;
-
- for(ucLoopIndex=0;ucLoopIndex<pstClassifierRule->ucDestPortRangeLength;ucLoopIndex++)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Matching Port:0x%X 0x%X 0x%X",ushDestPort,pstClassifierRule->usDestPortRangeLo[ucLoopIndex],pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
-
- if(ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
- ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
- {
- return TRUE;
- }
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dest Port: %x Not Matched",ushDestPort);
- return FALSE;
+ if (0 == pstClassifierRule->ucDestPortRangeLength)
+ return TRUE;
+
+ for (ucLoopIndex = 0; ucLoopIndex < pstClassifierRule->ucDestPortRangeLength; ucLoopIndex++)
+ {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Matching Port:0x%X 0x%X 0x%X", ushDestPort, pstClassifierRule->usDestPortRangeLo[ucLoopIndex], pstClassifierRule->usDestPortRangeHi[ucLoopIndex]);
+
+ if (ushDestPort <= pstClassifierRule->usDestPortRangeHi[ucLoopIndex] &&
+ ushDestPort >= pstClassifierRule->usDestPortRangeLo[ucLoopIndex])
+ {
+ return TRUE;
+ }
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dest Port: %x Not Matched", ushDestPort);
+ return FALSE;
}
/**
@ingroup tx_functions
@@ -209,95 +209,95 @@ static USHORT IpVersion4(struct bcm_mini_adapter *Adapter,
struct bcm_classifier_rule *pstClassifierRule)
{
struct bcm_transport_header *xprt_hdr = NULL;
- BOOLEAN bClassificationSucceed=FALSE;
+ BOOLEAN bClassificationSucceed = FALSE;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "========>");
- xprt_hdr=(struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
+ xprt_hdr = (struct bcm_transport_header *)((PUCHAR)iphd + sizeof(struct iphdr));
do {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to see Direction = %d %d",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to see Direction = %d %d",
pstClassifierRule->ucDirection,
pstClassifierRule->usVCID_Value);
//Checking classifier validity
- if(!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
+ if (!pstClassifierRule->bUsed || pstClassifierRule->ucDirection == DOWNLINK_DIR)
{
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
- if(pstClassifierRule->bIpv6Protocol)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "is IPv6 check!");
+ if (pstClassifierRule->bIpv6Protocol)
break;
//**************Checking IP header parameter**************************//
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
- if(FALSE == (bClassificationSucceed =
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Trying to match Source IP Address");
+ if (FALSE == (bClassificationSucceed =
MatchSrcIpAddress(pstClassifierRule, iphd->saddr)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source IP Address Matched");
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchDestIpAddress(pstClassifierRule, iphd->daddr)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination IP Address Matched");
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchTos(pstClassifierRule, iphd->tos)))
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Match failed\n");
break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "TOS Matched");
- if(FALSE == (bClassificationSucceed =
- MatchProtocol(pstClassifierRule,iphd->protocol)))
+ if (FALSE == (bClassificationSucceed =
+ MatchProtocol(pstClassifierRule, iphd->protocol)))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Protocol Matched");
//if protocol is not TCP or UDP then no need of comparing source port and destination port
- if(iphd->protocol!=TCP && iphd->protocol!=UDP)
+ if (iphd->protocol != TCP && iphd->protocol != UDP)
break;
//******************Checking Transport Layer Header field if present *****************//
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
- (iphd->protocol==UDP)?xprt_hdr->uhdr.source:xprt_hdr->thdr.source);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Source Port %04x",
+ (iphd->protocol == UDP) ? xprt_hdr->uhdr.source : xprt_hdr->thdr.source);
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchSrcPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP)?
- xprt_hdr->uhdr.source:xprt_hdr->thdr.source))))
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.source : xprt_hdr->thdr.source))))
break;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Src Port Matched");
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
- (iphd->protocol==UDP)?xprt_hdr->uhdr.dest:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Destination Port %04x",
+ (iphd->protocol == UDP) ? xprt_hdr->uhdr.dest :
xprt_hdr->thdr.dest);
- if(FALSE == (bClassificationSucceed =
+ if (FALSE == (bClassificationSucceed =
MatchDestPort(pstClassifierRule,
- ntohs((iphd->protocol == UDP)?
- xprt_hdr->uhdr.dest:xprt_hdr->thdr.dest))))
+ ntohs((iphd->protocol == UDP) ?
+ xprt_hdr->uhdr.dest : xprt_hdr->thdr.dest))))
break;
- } while(0);
+ } while (0);
- if(TRUE==bClassificationSucceed)
+ if (TRUE == bClassificationSucceed)
{
INT iMatchedSFQueueIndex = 0;
- iMatchedSFQueueIndex = SearchSfid(Adapter,pstClassifierRule->ulSFID);
- if(iMatchedSFQueueIndex >= NO_OF_QUEUES)
+ iMatchedSFQueueIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
+ if (iMatchedSFQueueIndex >= NO_OF_QUEUES)
{
bClassificationSucceed = FALSE;
}
else
{
- if(FALSE == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
+ if (FALSE == Adapter->PackInfo[iMatchedSFQueueIndex].bActive)
{
bClassificationSucceed = FALSE;
}
}
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "IpVersion4 <==========");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "IpVersion4 <==========");
return bClassificationSucceed;
}
@@ -306,9 +306,9 @@ VOID PruneQueueAllSF(struct bcm_mini_adapter *Adapter)
{
UINT iIndex = 0;
- for(iIndex = 0; iIndex < HiPriority; iIndex++)
+ for (iIndex = 0; iIndex < HiPriority; iIndex++)
{
- if(!Adapter->PackInfo[iIndex].bValid)
+ if (!Adapter->PackInfo[iIndex].bValid)
continue;
PruneQueue(Adapter, iIndex);
@@ -325,15 +325,15 @@ less than or equal to max queue size for the queue.
*/
static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
{
- struct sk_buff* PacketToDrop=NULL;
+ struct sk_buff* PacketToDrop = NULL;
struct net_device_stats *netstats;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d",iIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "=====> Index %d", iIndex);
- if(iIndex == HiPriority)
+ if (iIndex == HiPriority)
return;
- if(!Adapter || (iIndex < 0) || (iIndex > HiPriority))
+ if (!Adapter || (iIndex < 0) || (iIndex > HiPriority))
return;
/* To Store the netdevice statistic */
@@ -341,26 +341,26 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
spin_lock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
- while(1)
+ while (1)
// while((UINT)Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost >
// SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "uiCurrentBytesOnHost:%x uiMaxBucketSize :%x",
Adapter->PackInfo[iIndex].uiCurrentBytesOnHost,
Adapter->PackInfo[iIndex].uiMaxBucketSize);
PacketToDrop = Adapter->PackInfo[iIndex].FirstTxQueue;
- if(PacketToDrop == NULL)
+ if (PacketToDrop == NULL)
break;
- if((Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost < SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
+ if ((Adapter->PackInfo[iIndex].uiCurrentPacketsOnHost < SF_MAX_ALLOWED_PACKETS_TO_BACKUP) &&
((1000*(jiffies - *((B_UINT32 *)(PacketToDrop->cb)+SKB_CB_LATENCY_OFFSET))/HZ) <= Adapter->PackInfo[iIndex].uiMaxLatency))
break;
- if(PacketToDrop)
+ if (PacketToDrop)
{
if (netif_msg_tx_err(Adapter))
- pr_info(PFX "%s: tx queue %d overlimit\n",
+ pr_info(PFX "%s: tx queue %d overlimit\n",
Adapter->dev->name, iIndex);
netstats->tx_dropped++;
@@ -378,7 +378,7 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
Adapter->PackInfo[iIndex].uiDroppedCountBytes,
Adapter->PackInfo[iIndex].uiDroppedCountPackets);
@@ -387,29 +387,29 @@ static VOID PruneQueue(struct bcm_mini_adapter *Adapter, INT iIndex)
spin_unlock_bh(&Adapter->PackInfo[iIndex].SFQueueLock);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "TotalPacketCount:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "TotalPacketCount:%x",
atomic_read(&Adapter->TotalPacketCount));
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, PRUNE_QUEUE, DBG_LVL_ALL, "<=====");
}
VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
{
INT iQIndex;
UINT uiTotalPacketLength;
- struct sk_buff* PacketToDrop=NULL;
+ struct sk_buff* PacketToDrop = NULL;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "=====>");
// down(&Adapter->data_packet_queue_lock);
- for(iQIndex=LowPriority; iQIndex<HiPriority; iQIndex++)
+ for (iQIndex = LowPriority; iQIndex < HiPriority; iQIndex++)
{
struct net_device_stats *netstats = &Adapter->dev->stats;
spin_lock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
- while(Adapter->PackInfo[iQIndex].FirstTxQueue)
+ while (Adapter->PackInfo[iQIndex].FirstTxQueue)
{
PacketToDrop = Adapter->PackInfo[iQIndex].FirstTxQueue;
- if(PacketToDrop)
+ if (PacketToDrop)
{
uiTotalPacketLength = PacketToDrop->len;
netstats->tx_dropped++;
@@ -431,7 +431,7 @@ VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
Adapter->PackInfo[iQIndex].uiDroppedCountBytes += uiTotalPacketLength;
Adapter->PackInfo[iQIndex].uiDroppedCountPackets++;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "Dropped Bytes:%x Dropped Packets:%x",
Adapter->PackInfo[iQIndex].uiDroppedCountBytes,
Adapter->PackInfo[iQIndex].uiDroppedCountPackets);
atomic_dec(&Adapter->TotalPacketCount);
@@ -439,30 +439,30 @@ VOID flush_all_queues(struct bcm_mini_adapter *Adapter)
spin_unlock_bh(&Adapter->PackInfo[iQIndex].SFQueueLock);
}
// up(&Adapter->data_packet_queue_lock);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "<=====");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, DUMP_INFO, DBG_LVL_ALL, "<=====");
}
-USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
+USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter, struct sk_buff* skb)
{
- INT uiLoopIndex=0;
+ INT uiLoopIndex = 0;
struct bcm_classifier_rule *pstClassifierRule = NULL;
struct bcm_eth_packet_info stEthCsPktInfo;
PVOID pvEThPayload = NULL;
- struct iphdr *pIpHeader = NULL;
- INT uiSfIndex=0;
- USHORT usIndex=Adapter->usBestEffortQueueIndex;
- BOOLEAN bFragmentedPkt=FALSE,bClassificationSucceed=FALSE;
- USHORT usCurrFragment =0;
+ struct iphdr *pIpHeader = NULL;
+ INT uiSfIndex = 0;
+ USHORT usIndex = Adapter->usBestEffortQueueIndex;
+ BOOLEAN bFragmentedPkt = FALSE, bClassificationSucceed = FALSE;
+ USHORT usCurrFragment = 0;
struct bcm_tcp_header *pTcpHeader;
UCHAR IpHeaderLength;
UCHAR TcpHeaderLength;
pvEThPayload = skb->data;
- *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET ) = 0;
- EThCSGetPktInfo(Adapter,pvEThPayload,&stEthCsPktInfo);
+ *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET) = 0;
+ EThCSGetPktInfo(Adapter, pvEThPayload, &stEthCsPktInfo);
- switch(stEthCsPktInfo.eNwpktEthFrameType)
+ switch (stEthCsPktInfo.eNwpktEthFrameType)
{
case eEth802LLCFrame:
{
@@ -497,75 +497,75 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
}
}
- if(stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
+ if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
{
usCurrFragment = (ntohs(pIpHeader->frag_off) & IP_OFFSET);
- if((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
+ if ((ntohs(pIpHeader->frag_off) & IP_MF) || usCurrFragment)
bFragmentedPkt = TRUE;
- if(bFragmentedPkt)
+ if (bFragmentedPkt)
{
//Fragmented Packet. Get Frag Classifier Entry.
- pstClassifierRule = GetFragIPClsEntry(Adapter,pIpHeader->id, pIpHeader->saddr);
- if(pstClassifierRule)
+ pstClassifierRule = GetFragIPClsEntry(Adapter, pIpHeader->id, pIpHeader->saddr);
+ if (pstClassifierRule)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,"It is next Fragmented pkt");
- bClassificationSucceed=TRUE;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "It is next Fragmented pkt");
+ bClassificationSucceed = TRUE;
}
- if(!(ntohs(pIpHeader->frag_off) & IP_MF))
+ if (!(ntohs(pIpHeader->frag_off) & IP_MF))
{
//Fragmented Last packet . Remove Frag Classifier Entry
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL,"This is the last fragmented Pkt");
- DelFragIPClsEntry(Adapter,pIpHeader->id, pIpHeader->saddr);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "This is the last fragmented Pkt");
+ DelFragIPClsEntry(Adapter, pIpHeader->id, pIpHeader->saddr);
}
}
}
- for(uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
+ for (uiLoopIndex = MAX_CLASSIFIERS - 1; uiLoopIndex >= 0; uiLoopIndex--)
{
- if(bClassificationSucceed)
+ if (bClassificationSucceed)
break;
//Iterate through all classifiers which are already in order of priority
//to classify the packet until match found
do
{
- if(FALSE==Adapter->astClassifierTable[uiLoopIndex].bUsed)
+ if (FALSE == Adapter->astClassifierTable[uiLoopIndex].bUsed)
{
- bClassificationSucceed=FALSE;
+ bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Adapter->PackInfo[%d].bvalid=True\n",uiLoopIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Adapter->PackInfo[%d].bvalid=True\n", uiLoopIndex);
- if(0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection)
+ if (0 == Adapter->astClassifierTable[uiLoopIndex].ucDirection)
{
- bClassificationSucceed=FALSE;//cannot be processed for classification.
+ bClassificationSucceed = FALSE;//cannot be processed for classification.
break; // it is a down link connection
}
pstClassifierRule = &Adapter->astClassifierTable[uiLoopIndex];
- uiSfIndex = SearchSfid(Adapter,pstClassifierRule->ulSFID);
+ uiSfIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
if (uiSfIndex >= NO_OF_QUEUES) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Queue Not Valid. SearchSfid for this classifier Failed\n");
break;
}
- if(Adapter->PackInfo[uiSfIndex].bEthCSSupport)
+ if (Adapter->PackInfo[uiSfIndex].bEthCSSupport)
{
- if(eEthUnsupportedFrame==stEthCsPktInfo.eNwpktEthFrameType)
+ if (eEthUnsupportedFrame == stEthCsPktInfo.eNwpktEthFrameType)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a Valid Supported Ethernet Frame\n");
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n",pstClassifierRule->uiClassifierRuleIndex,Adapter->PackInfo[uiSfIndex].ulSFID);
- bClassificationSucceed = EThCSClassifyPkt(Adapter,skb,&stEthCsPktInfo,pstClassifierRule, Adapter->PackInfo[uiSfIndex].bEthCSSupport);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Performing ETH CS Classification on Classifier Rule ID : %x Service Flow ID : %lx\n", pstClassifierRule->uiClassifierRuleIndex, Adapter->PackInfo[uiSfIndex].ulSFID);
+ bClassificationSucceed = EThCSClassifyPkt(Adapter, skb, &stEthCsPktInfo, pstClassifierRule, Adapter->PackInfo[uiSfIndex].bEthCSSupport);
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
{
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ClassifyPacket : Ethernet CS Classification Failed\n");
break;
@@ -574,9 +574,9 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
else // No ETH Supported on this SF
{
- if(eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType)
+ if (eEthOtherFrame != stEthCsPktInfo.eNwpktEthFrameType)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet Not a 802.3 Ethernet Frame... hence not allowed over non-ETH CS SF\n");
bClassificationSucceed = FALSE;
break;
}
@@ -584,51 +584,51 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Proceeding to IP CS Clasification");
- if(Adapter->PackInfo[uiSfIndex].bIPCSSupport)
+ if (Adapter->PackInfo[uiSfIndex].bIPCSSupport)
{
- if(stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket)
+ if (stEthCsPktInfo.eNwpktIPFrameType == eNonIPPacket)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet is Not an IP Packet \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, " ClassifyPacket : Packet is Not an IP Packet\n");
bClassificationSucceed = FALSE;
break;
}
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dump IP Header : \n");
- DumpFullPacket((PUCHAR)pIpHeader,20);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "Dump IP Header :\n");
+ DumpFullPacket((PUCHAR)pIpHeader, 20);
- if(stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
- bClassificationSucceed = IpVersion4(Adapter,pIpHeader,pstClassifierRule);
- else if(stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
- bClassificationSucceed = IpVersion6(Adapter,pIpHeader,pstClassifierRule);
+ if (stEthCsPktInfo.eNwpktIPFrameType == eIPv4Packet)
+ bClassificationSucceed = IpVersion4(Adapter, pIpHeader, pstClassifierRule);
+ else if (stEthCsPktInfo.eNwpktIPFrameType == eIPv6Packet)
+ bClassificationSucceed = IpVersion6(Adapter, pIpHeader, pstClassifierRule);
}
- }while(0);
+ } while (0);
}
- if(bClassificationSucceed == TRUE)
+ if (bClassificationSucceed == TRUE)
{
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "CF id : %d, SF ID is =%lu",pstClassifierRule->uiClassifierRuleIndex, pstClassifierRule->ulSFID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "CF id : %d, SF ID is =%lu", pstClassifierRule->uiClassifierRuleIndex, pstClassifierRule->ulSFID);
//Store The matched Classifier in SKB
*((UINT32*)(skb->cb)+SKB_CB_CLASSIFICATION_OFFSET) = pstClassifierRule->uiClassifierRuleIndex;
- if((TCP == pIpHeader->protocol ) && !bFragmentedPkt && (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= skb->len) )
+ if ((TCP == pIpHeader->protocol) && !bFragmentedPkt && (ETH_AND_IP_HEADER_LEN + TCP_HEADER_LEN <= skb->len))
{
IpHeaderLength = pIpHeader->ihl;
pTcpHeader = (struct bcm_tcp_header *)(((PUCHAR)pIpHeader)+(IpHeaderLength*4));
TcpHeaderLength = GET_TCP_HEADER_LEN(pTcpHeader->HeaderLength);
- if((pTcpHeader->ucFlags & TCP_ACK) &&
+ if ((pTcpHeader->ucFlags & TCP_ACK) &&
(ntohs(pIpHeader->tot_len) == (IpHeaderLength*4)+(TcpHeaderLength*4)))
{
- *((UINT32*) (skb->cb) +SKB_CB_TCPACK_OFFSET ) = TCP_ACK;
+ *((UINT32*) (skb->cb) + SKB_CB_TCPACK_OFFSET) = TCP_ACK;
}
}
usIndex = SearchSfid(Adapter, pstClassifierRule->ulSFID);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "index is =%d", usIndex);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "index is =%d", usIndex);
//If this is the first fragment of a Fragmented pkt, add this CF. Only This CF should be used for all other fragment of this Pkt.
- if(bFragmentedPkt && (usCurrFragment == 0))
+ if (bFragmentedPkt && (usCurrFragment == 0))
{
//First Fragment of Fragmented Packet. Create Frag CLS Entry
struct bcm_fragmented_packet_info stFragPktInfo;
@@ -637,77 +637,77 @@ USHORT ClassifyPacket(struct bcm_mini_adapter *Adapter,struct sk_buff* skb)
stFragPktInfo.usIpIdentification = pIpHeader->id;
stFragPktInfo.pstMatchedClassifierEntry = pstClassifierRule;
stFragPktInfo.bOutOfOrderFragment = FALSE;
- AddFragIPClsEntry(Adapter,&stFragPktInfo);
+ AddFragIPClsEntry(Adapter, &stFragPktInfo);
}
}
- if(bClassificationSucceed)
+ if (bClassificationSucceed)
return usIndex;
else
return INVALID_QUEUE_INDEX;
}
-static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule,PUCHAR Mac)
+static BOOLEAN EthCSMatchSrcMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
- UINT i=0;
+ UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(pstClassifierRule->ucEthCSSrcMACLen==0)
+ if (pstClassifierRule->ucEthCSSrcMACLen == 0)
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s \n",__FUNCTION__);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s\n", __FUNCTION__);
+ for (i = 0; i < MAC_ADDRESS_SIZE; i++)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",i,Mac[i],pstClassifierRule->au8EThCSSrcMAC[i],pstClassifierRule->au8EThCSSrcMACMask[i]);
- if((pstClassifierRule->au8EThCSSrcMAC[i] & pstClassifierRule->au8EThCSSrcMACMask[i])!=
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSSrcMAC[i], pstClassifierRule->au8EThCSSrcMACMask[i]);
+ if ((pstClassifierRule->au8EThCSSrcMAC[i] & pstClassifierRule->au8EThCSSrcMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSSrcMACMask[i]))
return FALSE;
}
return TRUE;
}
-static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule,PUCHAR Mac)
+static BOOLEAN EthCSMatchDestMACAddress(struct bcm_classifier_rule *pstClassifierRule, PUCHAR Mac)
{
- UINT i=0;
+ UINT i = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if(pstClassifierRule->ucEthCSDestMACLen==0)
+ if (pstClassifierRule->ucEthCSDestMACLen == 0)
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s \n",__FUNCTION__);
- for(i=0;i<MAC_ADDRESS_SIZE;i++)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s\n", __FUNCTION__);
+ for (i = 0; i < MAC_ADDRESS_SIZE; i++)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n",i,Mac[i],pstClassifierRule->au8EThCSDestMAC[i],pstClassifierRule->au8EThCSDestMACMask[i]);
- if((pstClassifierRule->au8EThCSDestMAC[i] & pstClassifierRule->au8EThCSDestMACMask[i])!=
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "SRC MAC[%x] = %x ClassifierRuleSrcMAC = %x Mask : %x\n", i, Mac[i], pstClassifierRule->au8EThCSDestMAC[i], pstClassifierRule->au8EThCSDestMACMask[i]);
+ if ((pstClassifierRule->au8EThCSDestMAC[i] & pstClassifierRule->au8EThCSDestMACMask[i]) !=
(Mac[i] & pstClassifierRule->au8EThCSDestMACMask[i]))
return FALSE;
}
return TRUE;
}
-static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- if((pstClassifierRule->ucEtherTypeLen==0)||
+ if ((pstClassifierRule->ucEtherTypeLen == 0) ||
(pstClassifierRule->au8EthCSEtherType[0] == 0))
return TRUE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s SrcEtherType:%x CLS EtherType[0]:%x\n",__FUNCTION__,pstEthCsPktInfo->usEtherType,pstClassifierRule->au8EthCSEtherType[0]);
- if(pstClassifierRule->au8EthCSEtherType[0] == 1)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s SrcEtherType:%x CLS EtherType[0]:%x\n", __FUNCTION__, pstEthCsPktInfo->usEtherType, pstClassifierRule->au8EthCSEtherType[0]);
+ if (pstClassifierRule->au8EthCSEtherType[0] == 1)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS EtherType[1]:%x EtherType[2]:%x\n",__FUNCTION__,pstClassifierRule->au8EthCSEtherType[1],pstClassifierRule->au8EthCSEtherType[2]);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS EtherType[1]:%x EtherType[2]:%x\n", __FUNCTION__, pstClassifierRule->au8EthCSEtherType[1], pstClassifierRule->au8EthCSEtherType[2]);
- if(memcmp(&pstEthCsPktInfo->usEtherType,&pstClassifierRule->au8EthCSEtherType[1],2)==0)
+ if (memcmp(&pstEthCsPktInfo->usEtherType, &pstClassifierRule->au8EthCSEtherType[1], 2) == 0)
return TRUE;
else
return FALSE;
}
- if(pstClassifierRule->au8EthCSEtherType[0] == 2)
+ if (pstClassifierRule->au8EthCSEtherType[0] == 2)
{
- if(eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
+ if (eEth802LLCFrame != pstEthCsPktInfo->eNwpktEthFrameType)
return FALSE;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s EthCS DSAP:%x EtherType[2]:%x\n",__FUNCTION__,pstEthCsPktInfo->ucDSAP,pstClassifierRule->au8EthCSEtherType[2]);
- if(pstEthCsPktInfo->ucDSAP == pstClassifierRule->au8EthCSEtherType[2])
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s EthCS DSAP:%x EtherType[2]:%x\n", __FUNCTION__, pstEthCsPktInfo->ucDSAP, pstClassifierRule->au8EthCSEtherType[2]);
+ if (pstEthCsPktInfo->ucDSAP == pstClassifierRule->au8EthCSEtherType[2])
return TRUE;
else
return FALSE;
@@ -718,27 +718,27 @@ static BOOLEAN EthCSMatchEThTypeSAP(struct bcm_classifier_rule *pstClassifierRul
}
-static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule,struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
+static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule, struct sk_buff* skb, struct bcm_eth_packet_info *pstEthCsPktInfo)
{
BOOLEAN bClassificationSucceed = FALSE;
USHORT usVLANID;
B_UINT8 uPriority = 0;
struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(gblpnetdev);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS UserPrio:%x CLS VLANID:%x\n",__FUNCTION__,ntohs(*((USHORT *)pstClassifierRule->usUserPriority)),pstClassifierRule->usVLANID);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s CLS UserPrio:%x CLS VLANID:%x\n", __FUNCTION__, ntohs(*((USHORT *)pstClassifierRule->usUserPriority)), pstClassifierRule->usVLANID);
/* In case FW didn't receive the TLV, the priority field should be ignored */
- if(pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID))
+ if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_USER_PRIORITY_VALID))
{
- if(pstEthCsPktInfo->eNwpktEthFrameType!=eEth802QVLANFrame)
+ if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
return FALSE;
uPriority = (ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xF000) >> 13;
- if((uPriority >= pstClassifierRule->usUserPriority[0]) && (uPriority <= pstClassifierRule->usUserPriority[1]))
+ if ((uPriority >= pstClassifierRule->usUserPriority[0]) && (uPriority <= pstClassifierRule->usUserPriority[1]))
bClassificationSucceed = TRUE;
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
return FALSE;
}
@@ -746,19 +746,19 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
bClassificationSucceed = FALSE;
- if(pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_VLANID_VALID))
+ if (pstClassifierRule->usValidityBitMap & (1<<PKT_CLASSIFICATION_VLANID_VALID))
{
- if(pstEthCsPktInfo->eNwpktEthFrameType!=eEth802QVLANFrame)
+ if (pstEthCsPktInfo->eNwpktEthFrameType != eEth802QVLANFrame)
return FALSE;
usVLANID = ntohs(*(USHORT *)(skb->data + sizeof(struct bcm_eth_header))) & 0xFFF;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s Pkt VLANID %x Priority: %d\n",__FUNCTION__,usVLANID, uPriority);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "%s Pkt VLANID %x Priority: %d\n", __FUNCTION__, usVLANID, uPriority);
- if(usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
+ if (usVLANID == ((pstClassifierRule->usVLANID & 0xFFF0) >> 4))
bClassificationSucceed = TRUE;
- if(!bClassificationSucceed)
+ if (!bClassificationSucceed)
return FALSE;
}
@@ -768,50 +768,50 @@ static BOOLEAN EthCSMatchVLANRules(struct bcm_classifier_rule *pstClassifierRule
}
-static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter,struct sk_buff* skb,
+static BOOLEAN EThCSClassifyPkt(struct bcm_mini_adapter *Adapter, struct sk_buff* skb,
struct bcm_eth_packet_info *pstEthCsPktInfo,
struct bcm_classifier_rule *pstClassifierRule,
B_UINT8 EthCSCupport)
{
BOOLEAN bClassificationSucceed = FALSE;
- bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule,((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchSrcMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8SourceAddress);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS SrcMAC Matched\n");
- bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule,((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchDestMACAddress(pstClassifierRule, ((struct bcm_eth_header *)(skb->data))->au8DestinationAddress);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS DestMAC Matched\n");
//classify on ETHType/802.2SAP TLV
- bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule,skb,pstEthCsPktInfo);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchEThTypeSAP(pstClassifierRule, skb, pstEthCsPktInfo);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS EthType/802.2SAP Matched\n");
//classify on 802.1VLAN Header Parameters
- bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule,skb,pstEthCsPktInfo);
- if(!bClassificationSucceed)
+ bClassificationSucceed = EthCSMatchVLANRules(pstClassifierRule, skb, pstEthCsPktInfo);
+ if (!bClassificationSucceed)
return FALSE;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "ETH CS 802.1 VLAN Rules Matched\n");
return bClassificationSucceed;
}
-static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
+static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter, PVOID pvEthPayload,
struct bcm_eth_packet_info *pstEthCsPktInfo)
{
USHORT u16Etype = ntohs(((struct bcm_eth_header *)pvEthPayload)->u16Etype);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : Eth Hdr Type : %X\n",u16Etype);
- if(u16Etype > 0x5dc)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : Eth Hdr Type : %X\n", u16Etype);
+ if (u16Etype > 0x5dc)
{
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : ETH2 Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCSGetPktInfo : ETH2 Frame\n");
//ETH2 Frame
- if(u16Etype == ETHERNET_FRAMETYPE_802QVLAN)
+ if (u16Etype == ETHERNET_FRAMETYPE_802QVLAN)
{
//802.1Q VLAN Header
pstEthCsPktInfo->eNwpktEthFrameType = eEth802QVLANFrame;
@@ -828,27 +828,27 @@ static void EThCSGetPktInfo(struct bcm_mini_adapter *Adapter,PVOID pvEthPayload,
else
{
//802.2 LLC
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "802.2 LLC Frame \n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "802.2 LLC Frame\n");
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCFrame;
pstEthCsPktInfo->ucDSAP = ((struct bcm_eth_llc_frame *)pvEthPayload)->DSAP;
- if(pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA)
+ if (pstEthCsPktInfo->ucDSAP == 0xAA && ((struct bcm_eth_llc_frame *)pvEthPayload)->SSAP == 0xAA)
{
//SNAP Frame
pstEthCsPktInfo->eNwpktEthFrameType = eEth802LLCSNAPFrame;
u16Etype = ((struct bcm_eth_llc_snap_frame *)pvEthPayload)->usEtherType;
}
}
- if(u16Etype == ETHERNET_FRAMETYPE_IPV4)
+ if (u16Etype == ETHERNET_FRAMETYPE_IPV4)
pstEthCsPktInfo->eNwpktIPFrameType = eIPv4Packet;
- else if(u16Etype == ETHERNET_FRAMETYPE_IPV6)
+ else if (u16Etype == ETHERNET_FRAMETYPE_IPV6)
pstEthCsPktInfo->eNwpktIPFrameType = eIPv6Packet;
else
pstEthCsPktInfo->eNwpktIPFrameType = eNonIPPacket;
pstEthCsPktInfo->usEtherType = ((struct bcm_eth_header *)pvEthPayload)->u16Etype;
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktIPFrameType : %x\n",pstEthCsPktInfo->eNwpktIPFrameType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktEthFrameType : %x\n",pstEthCsPktInfo->eNwpktEthFrameType);
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->usEtherType : %x\n",pstEthCsPktInfo->usEtherType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktIPFrameType : %x\n", pstEthCsPktInfo->eNwpktIPFrameType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->eNwpktEthFrameType : %x\n", pstEthCsPktInfo->eNwpktEthFrameType);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_TX, IPV4_DBG, DBG_LVL_ALL, "EthCsPktInfo->usEtherType : %x\n", pstEthCsPktInfo->usEtherType);
}
diff --git a/drivers/staging/bcm/Version.h b/drivers/staging/bcm/Version.h
deleted file mode 100644
index f1cb9de734a..00000000000
--- a/drivers/staging/bcm/Version.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/*Copyright (c) 2005 Beceem Communications Inc.
-
-Module Name:
-
- Version.h
-
-Abstract:
-
-
---*/
-
-#ifndef VERSION_H
-#define VERSION_H
-
-
-#define VER_FILETYPE VFT_DRV
-#define VER_FILESUBTYPE VFT2_DRV_NETWORK
-
-#define VER_FILEVERSION 5.2.45
-#define VER_FILEVERSION_STR "5.2.45"
-
-#undef VER_PRODUCTVERSION
-#define VER_PRODUCTVERSION VER_FILEVERSION
-
-#undef VER_PRODUCTVERSION_STR
-#define VER_PRODUCTVERSION_STR VER_FILEVERSION_STR
-
-
-#endif /* VERSION_H */
diff --git a/drivers/staging/bcm/headers.h b/drivers/staging/bcm/headers.h
index da47db8c8f2..7fd21c6923c 100644
--- a/drivers/staging/bcm/headers.h
+++ b/drivers/staging/bcm/headers.h
@@ -38,7 +38,6 @@
#include <net/ip.h>
#include "Typedefs.h"
-#include "Version.h"
#include "Macros.h"
#include "HostMIBSInterface.h"
#include "cntrl_SignalingInterface.h"
@@ -71,7 +70,7 @@
#define DEV_NAME "tarang"
#define DRV_DESCRIPTION "Beceem Communications Inc. WiMAX driver"
#define DRV_COPYRIGHT "Copyright 2010. Beceem Communications Inc"
-#define DRV_VERSION VER_FILEVERSION_STR
+#define DRV_VERSION "5.2.45"
#define PFX DRV_NAME " "
extern struct class *bcm_class;
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index bea1330f7ea..91a5715964b 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -2966,7 +2966,7 @@ int BcmFlash2xBulkWrite(struct bcm_mini_adapter *Adapter,
* @Adapter :-Drivers private Data Structure
*
* Return Value:-
- * Return STATUS_SUCESS if get success in setting the right DSD else negaive error code
+ * Return STATUS_SUCESS if get success in setting the right DSD else negative error code
*
*/
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 8c8a5513225..a84aab47a11 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -100,7 +100,6 @@ endif # COMEDI_MISC_DRIVERS
menuconfig COMEDI_ISA_DRIVERS
bool "Comedi ISA and PC/104 drivers"
- depends on ISA
---help---
Enable comedi ISA and PC/104 drivers to be built
@@ -122,8 +121,18 @@ config COMEDI_PCL724
tristate "Advantech PCL-722/724/731 and ADlink ACL-7122/7124/PET-48DIO"
select COMEDI_8255
---help---
- Enable support for Advantech PCL-724, PCL-722, PCL-731 and
- ADlink ACL-7122, ACL-7124, PET-48DIO ISA cards
+ Enable support for ISA and PC/104 based 8255 digital i/o boards. This
+ driver provides a legacy comedi driver wrapper for the generic 8255
+ support driver.
+
+ Supported boards include:
+ Advantech PCL-724 24 channels
+ Advantech PCL-722 144 (or 96) channels
+ Advantech PCL-731 48 channels
+ ADlink ACL-7122 144 (or 96) channels
+ ADlink ACL-7124 24 channels
+ ADlink PET-48DIO 48 channels
+ WinSystems PCM-IO48 48 channels (PC/104)
To compile this driver as a module, choose M here: the module will be
called pcl724.
@@ -403,6 +412,15 @@ config COMEDI_AIO_IIRO_16
To compile this driver as a module, choose M here: the module will be
called aio_iiro_16.
+config COMEDI_II_PCI20KC
+ tristate "Intelligent Instruments PCI-20001C carrier support"
+ ---help---
+ Enable support for Intelligent Instruments PCI-20001C carrier
+ PCI-20001, PCI-20006 and PCI-20341
+
+ To compile this driver as a module, choose M here: the module will be
+ called ii_pci20kc.
+
config COMEDI_C6XDIGIO
tristate "Mechatronic Systems Inc. C6x_DIGIO DSP daughter card support"
---help---
@@ -448,7 +466,6 @@ config COMEDI_NI_AT_AO
config COMEDI_NI_ATMIO
tristate "NI AT-MIO E series ISA-PNP card support"
- depends on ISAPNP
select COMEDI_8255
select COMEDI_NI_TIO
---help---
@@ -461,11 +478,10 @@ config COMEDI_NI_ATMIO
called ni_atmio.
config COMEDI_NI_ATMIO16D
- tristate "NI AT-MIO16/AT-MIO16D series ISA-PNP card support"
- depends on ISAPNP
+ tristate "NI AT-MIO-16/AT-MIO-16D series ISA card support"
select COMEDI_8255
---help---
- Enable support for National Instruments AT-MIO16/AT-MIO16D cards.
+ Enable support for National Instruments AT-MIO-16/AT-MIO-16D cards.
To compile this driver as a module, choose M here: the module will be
called ni_atmio16d.
@@ -473,7 +489,7 @@ config COMEDI_NI_ATMIO16D
config COMEDI_NI_LABPC_ISA
tristate "NI Lab-PC and compatibles ISA support"
select COMEDI_NI_LABPC
- depends on VIRT_TO_BUS
+ select COMEDI_NI_LABPC_ISADMA if ISA_DMA_API && VIRT_TO_BUS
---help---
Enable support for National Instruments Lab-PC and compatibles
Lab-PC-1200, Lab-PC-1200AI, Lab-PC+.
@@ -866,15 +882,6 @@ config COMEDI_ICP_MULTI
To compile this driver as a module, choose M here: the module will be
called icp_multi.
-config COMEDI_II_PCI20KC
- tristate "Intelligent Instruments PCI-20001C carrier support"
- ---help---
- Enable support for Intelligent Instruments PCI-20001C carrier
- PCI-20001, PCI-20006 and PCI-20341
-
- To compile this driver as a module, choose M here: the module will be
- called ii_pci20kc.
-
config COMEDI_DAQBOARD2000
tristate "IOtech DAQboard/2000 support"
select COMEDI_8255
@@ -1262,6 +1269,9 @@ config COMEDI_NI_LABPC
select COMEDI_8255
select COMEDI_FC
+config COMEDI_NI_LABPC_ISADMA
+ tristate
+
config COMEDI_NI_TIO
tristate
diff --git a/drivers/staging/comedi/TODO b/drivers/staging/comedi/TODO
index b10f739b7e3..fa8da9aada3 100644
--- a/drivers/staging/comedi/TODO
+++ b/drivers/staging/comedi/TODO
@@ -9,4 +9,4 @@ TODO:
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
Ian Abbott <abbotti@mev.co.uk>
- Frank Mori Hess <fmhess@users.sourceforge.net>
+ H Hartley Sweeten <hsweeten@visionengravers.com>
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index b4c001b6f88..94b2385fb0a 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -15,6 +15,8 @@
* GNU General Public License for more details.
*/
+#include <linux/vmalloc.h>
+
#include "comedidev.h"
#include "comedi_internal.h"
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 8647518259f..1636c7ca57e 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -26,7 +26,6 @@
#include <linux/sched.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/kmod.h>
@@ -262,7 +261,7 @@ static int resize_async_buffer(struct comedi_device *dev,
/* sysfs attribute files */
-static ssize_t show_max_read_buffer_kb(struct device *csdev,
+static ssize_t max_read_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -283,7 +282,7 @@ static ssize_t show_max_read_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_max_read_buffer_kb(struct device *csdev,
+static ssize_t max_read_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -314,8 +313,9 @@ static ssize_t store_max_read_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_read_buffer_kb);
-static ssize_t show_read_buffer_kb(struct device *csdev,
+static ssize_t read_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -336,7 +336,7 @@ static ssize_t show_read_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_read_buffer_kb(struct device *csdev,
+static ssize_t read_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -367,8 +367,9 @@ static ssize_t store_read_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(read_buffer_kb);
-static ssize_t show_max_write_buffer_kb(struct device *csdev,
+static ssize_t max_write_buffer_kb_show(struct device *csdev,
struct device_attribute *attr,
char *buf)
{
@@ -390,7 +391,7 @@ static ssize_t show_max_write_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_max_write_buffer_kb(struct device *csdev,
+static ssize_t max_write_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -421,8 +422,9 @@ static ssize_t store_max_write_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(max_write_buffer_kb);
-static ssize_t show_write_buffer_kb(struct device *csdev,
+static ssize_t write_buffer_kb_show(struct device *csdev,
struct device_attribute *attr, char *buf)
{
unsigned int minor = MINOR(csdev->devt);
@@ -443,7 +445,7 @@ static ssize_t show_write_buffer_kb(struct device *csdev,
return snprintf(buf, PAGE_SIZE, "%i\n", size);
}
-static ssize_t store_write_buffer_kb(struct device *csdev,
+static ssize_t write_buffer_kb_store(struct device *csdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -474,18 +476,16 @@ static ssize_t store_write_buffer_kb(struct device *csdev,
return err ? err : count;
}
+static DEVICE_ATTR_RW(write_buffer_kb);
-static struct device_attribute comedi_dev_attrs[] = {
- __ATTR(max_read_buffer_kb, S_IRUGO | S_IWUSR,
- show_max_read_buffer_kb, store_max_read_buffer_kb),
- __ATTR(read_buffer_kb, S_IRUGO | S_IWUSR | S_IWGRP,
- show_read_buffer_kb, store_read_buffer_kb),
- __ATTR(max_write_buffer_kb, S_IRUGO | S_IWUSR,
- show_max_write_buffer_kb, store_max_write_buffer_kb),
- __ATTR(write_buffer_kb, S_IRUGO | S_IWUSR | S_IWGRP,
- show_write_buffer_kb, store_write_buffer_kb),
- __ATTR_NULL
+static struct attribute *comedi_dev_attrs[] = {
+ &dev_attr_max_read_buffer_kb.attr,
+ &dev_attr_read_buffer_kb.attr,
+ &dev_attr_max_write_buffer_kb.attr,
+ &dev_attr_write_buffer_kb.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(comedi_dev);
static void comedi_set_subdevice_runflags(struct comedi_subdevice *s,
unsigned mask, unsigned bits)
@@ -1413,22 +1413,19 @@ static int do_cmd_ioctl(struct comedi_device *dev,
DPRINTK("subdevice busy\n");
return -EBUSY;
}
- s->busy = file;
/* make sure channel/gain list isn't too long */
if (cmd.chanlist_len > s->len_chanlist) {
DPRINTK("channel/gain list too long %u > %d\n",
cmd.chanlist_len, s->len_chanlist);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
/* make sure channel/gain list isn't too short */
if (cmd.chanlist_len < 1) {
DPRINTK("channel/gain list too short %u < 1\n",
cmd.chanlist_len);
- ret = -EINVAL;
- goto cleanup;
+ return -EINVAL;
}
async->cmd = cmd;
@@ -1438,8 +1435,7 @@ static int do_cmd_ioctl(struct comedi_device *dev,
kmalloc(async->cmd.chanlist_len * sizeof(int), GFP_KERNEL);
if (!async->cmd.chanlist) {
DPRINTK("allocation failed\n");
- ret = -ENOMEM;
- goto cleanup;
+ return -ENOMEM;
}
if (copy_from_user(async->cmd.chanlist, user_chanlist,
@@ -1491,6 +1487,9 @@ static int do_cmd_ioctl(struct comedi_device *dev,
comedi_set_subdevice_runflags(s, ~0, SRF_USER | SRF_RUNNING);
+ /* set s->busy _after_ setting SRF_RUNNING flag to avoid race with
+ * comedi_read() or comedi_write() */
+ s->busy = file;
ret = s->do_cmd(dev, s);
if (ret == 0)
return 0;
@@ -1705,6 +1704,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
void *file)
{
struct comedi_subdevice *s;
+ int ret;
if (arg >= dev->n_subdevices)
return -EINVAL;
@@ -1721,7 +1721,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
if (s->busy != file)
return -EBUSY;
- return do_cancel(dev, s);
+ ret = do_cancel(dev, s);
+ if (comedi_get_subdevice_runflags(s) & SRF_USER)
+ wake_up_interruptible(&s->async->wait_head);
+
+ return ret;
}
/*
@@ -2053,11 +2057,13 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
if (!comedi_is_subdevice_running(s)) {
if (count == 0) {
+ mutex_lock(&dev->mutex);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
break;
}
@@ -2156,11 +2162,13 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
if (n == 0) {
if (!comedi_is_subdevice_running(s)) {
+ mutex_lock(&dev->mutex);
do_become_nonbusy(dev, s);
if (comedi_is_subdevice_in_error(s))
retval = -EPIPE;
else
retval = 0;
+ mutex_unlock(&dev->mutex);
break;
}
if (file->f_flags & O_NONBLOCK) {
@@ -2198,9 +2206,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
buf += n;
break; /* makes device work like a pipe */
}
- if (comedi_is_subdevice_idle(s) &&
- async->buf_read_count - async->buf_write_count == 0) {
- do_become_nonbusy(dev, s);
+ if (comedi_is_subdevice_idle(s)) {
+ mutex_lock(&dev->mutex);
+ if (async->buf_read_count - async->buf_write_count == 0)
+ do_become_nonbusy(dev, s);
+ mutex_unlock(&dev->mutex);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&async->wait_head, &wait);
@@ -2554,7 +2564,7 @@ static int __init comedi_init(void)
return PTR_ERR(comedi_class);
}
- comedi_class->dev_attrs = comedi_dev_attrs;
+ comedi_class->dev_groups = comedi_dev_groups;
/* XXX requires /proc interface */
comedi_proc_init();
diff --git a/drivers/staging/comedi/comedi_internal.h b/drivers/staging/comedi/comedi_internal.h
index d5e03e558b3..fda1a7ba0e1 100644
--- a/drivers/staging/comedi/comedi_internal.h
+++ b/drivers/staging/comedi/comedi_internal.h
@@ -24,6 +24,7 @@ extern unsigned int comedi_default_buf_maxsize_kb;
/* drivers.c */
extern struct comedi_driver *comedi_drivers;
+extern struct mutex comedi_drivers_list_lock;
int insn_inval(struct comedi_device *, struct comedi_subdevice *,
struct comedi_insn *, unsigned int *);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index b75915f30f4..2e19f659cd2 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -19,22 +19,7 @@
#ifndef _COMEDIDEV_H
#define _COMEDIDEV_H
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/wait.h>
-#include <linux/mm.h>
-#include <linux/init.h>
-#include <linux/vmalloc.h>
#include <linux/dma-mapping.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/timer.h>
#include "comedi.h"
@@ -357,6 +342,11 @@ void comedi_buf_memcpy_from(struct comedi_async *async, unsigned int offset,
/* drivers.c - general comedi driver functions */
+int comedi_dio_insn_config(struct comedi_device *, struct comedi_subdevice *,
+ struct comedi_insn *, unsigned int *data,
+ unsigned int mask);
+
+void *comedi_alloc_devpriv(struct comedi_device *, size_t);
int comedi_alloc_subdevices(struct comedi_device *, int);
int comedi_load_firmware(struct comedi_device *, struct device *,
@@ -377,7 +367,7 @@ int comedi_auto_config(struct device *, struct comedi_driver *,
void comedi_auto_unconfig(struct device *);
int comedi_driver_register(struct comedi_driver *);
-int comedi_driver_unregister(struct comedi_driver *);
+void comedi_driver_unregister(struct comedi_driver *);
/**
* module_comedi_driver() - Helper macro for registering a comedi driver
@@ -400,7 +390,6 @@ int comedi_driver_unregister(struct comedi_driver *);
*/
#define PCI_VENDOR_ID_KOLTER 0x1001
#define PCI_VENDOR_ID_ICP 0x104c
-#define PCI_VENDOR_ID_AMCC 0x10e8
#define PCI_VENDOR_ID_DT 0x1116
#define PCI_VENDOR_ID_IOTECH 0x1616
#define PCI_VENDOR_ID_CONTEC 0x1221
diff --git a/drivers/staging/comedi/comedilib.h b/drivers/staging/comedi/comedilib.h
index 1a78b15543c..56baf852ecf 100644
--- a/drivers/staging/comedi/comedilib.h
+++ b/drivers/staging/comedi/comedilib.h
@@ -21,10 +21,13 @@
struct comedi_device *comedi_open(const char *path);
int comedi_close(struct comedi_device *dev);
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io);
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io);
-int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits);
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd);
int comedi_get_n_channels(struct comedi_device *dev, unsigned int subdevice);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index e25eba5713c..317a821b790 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -23,7 +23,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
-#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -39,6 +38,7 @@
#include "comedi_internal.h"
struct comedi_driver *comedi_drivers;
+DEFINE_MUTEX(comedi_drivers_list_lock);
int comedi_set_hw_dev(struct comedi_device *dev, struct device *hw_dev)
{
@@ -57,6 +57,18 @@ static void comedi_clear_hw_dev(struct comedi_device *dev)
dev->hw_dev = NULL;
}
+/**
+ * comedi_alloc_devpriv() - Allocate memory for the device private data.
+ * @dev: comedi_device struct
+ * @size: size of the memory to allocate
+ */
+void *comedi_alloc_devpriv(struct comedi_device *dev, size_t size)
+{
+ dev->private = kzalloc(size, GFP_KERNEL);
+ return dev->private;
+}
+EXPORT_SYMBOL_GPL(comedi_alloc_devpriv);
+
int comedi_alloc_subdevices(struct comedi_device *dev, int num_subdevices)
{
struct comedi_subdevice *s;
@@ -138,6 +150,46 @@ int insn_inval(struct comedi_device *dev, struct comedi_subdevice *s,
return -EINVAL;
}
+/**
+ * comedi_dio_insn_config() - boilerplate (*insn_config) for DIO subdevices.
+ * @dev: comedi_device struct
+ * @s: comedi_subdevice struct
+ * @insn: comedi_insn struct
+ * @data: parameters for the @insn
+ * @mask: io_bits mask for grouped channels
+ */
+int comedi_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data,
+ unsigned int mask)
+{
+ unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
+
+ if (!mask)
+ mask = chan_mask;
+
+ switch (data[0]) {
+ case INSN_CONFIG_DIO_INPUT:
+ s->io_bits &= ~mask;
+ break;
+
+ case INSN_CONFIG_DIO_OUTPUT:
+ s->io_bits |= mask;
+ break;
+
+ case INSN_CONFIG_DIO_QUERY:
+ data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
+ return insn->n;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(comedi_dio_insn_config);
+
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
@@ -442,6 +494,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (dev->attached)
return -EBUSY;
+ mutex_lock(&comedi_drivers_list_lock);
for (driv = comedi_drivers; driv; driv = driv->next) {
if (!try_module_get(driv->module))
continue;
@@ -462,7 +515,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
comedi_report_boards(driv);
module_put(driv->module);
}
- return -EIO;
+ ret = -EIO;
+ goto out;
}
if (driv->attach == NULL) {
/* driver does not support manual configuration */
@@ -470,7 +524,8 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"driver '%s' does not support attach using comedi_config\n",
driv->driver_name);
module_put(driv->module);
- return -ENOSYS;
+ ret = -ENOSYS;
+ goto out;
}
/* initialize dev->driver here so
* comedi_error() can be called from attach */
@@ -482,9 +537,11 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = comedi_device_postconfig(dev);
if (ret < 0) {
comedi_device_detach(dev);
- module_put(dev->driver->module);
+ module_put(driv->module);
}
/* On success, the driver module count has been incremented. */
+out:
+ mutex_unlock(&comedi_drivers_list_lock);
return ret;
}
@@ -541,18 +598,34 @@ EXPORT_SYMBOL_GPL(comedi_auto_unconfig);
int comedi_driver_register(struct comedi_driver *driver)
{
+ mutex_lock(&comedi_drivers_list_lock);
driver->next = comedi_drivers;
comedi_drivers = driver;
+ mutex_unlock(&comedi_drivers_list_lock);
return 0;
}
EXPORT_SYMBOL_GPL(comedi_driver_register);
-int comedi_driver_unregister(struct comedi_driver *driver)
+void comedi_driver_unregister(struct comedi_driver *driver)
{
struct comedi_driver *prev;
int i;
+ /* unlink the driver */
+ mutex_lock(&comedi_drivers_list_lock);
+ if (comedi_drivers == driver) {
+ comedi_drivers = driver->next;
+ } else {
+ for (prev = comedi_drivers; prev->next; prev = prev->next) {
+ if (prev->next == driver) {
+ prev->next = driver->next;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&comedi_drivers_list_lock);
+
/* check for devices using this driver */
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
struct comedi_device *dev = comedi_dev_from_minor(i);
@@ -570,18 +643,5 @@ int comedi_driver_unregister(struct comedi_driver *driver)
}
mutex_unlock(&dev->mutex);
}
-
- if (comedi_drivers == driver) {
- comedi_drivers = driver->next;
- return 0;
- }
-
- for (prev = comedi_drivers; prev->next; prev = prev->next) {
- if (prev->next == driver) {
- prev->next = driver->next;
- return 0;
- }
- }
- return -EINVAL;
}
EXPORT_SYMBOL_GPL(comedi_driver_unregister);
diff --git a/drivers/staging/comedi/drivers/8255.c b/drivers/staging/comedi/drivers/8255.c
index 94e17500150..2f070fdbbb1 100644
--- a/drivers/staging/comedi/drivers/8255.c
+++ b/drivers/staging/comedi/drivers/8255.c
@@ -73,10 +73,9 @@ I/O port base address can be found in the output of 'lspci -v'.
will copy the latched value to a Comedi buffer.
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#include "comedi_fc.h"
#include "8255.h"
@@ -185,39 +184,29 @@ static void subdev_8255_do_config(struct comedi_device *dev,
static int subdev_8255_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
+ int ret;
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
subdev_8255_do_config(dev, s);
- return 1;
+ return insn->n;
}
static int subdev_8255_cmdtest(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 3d3547c1948..432e3f9c330 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -50,6 +50,7 @@ Interrupt support for these boards is also not currently supported.
Configuration Options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -186,10 +187,9 @@ static int pci_8255_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/Makefile b/drivers/staging/comedi/drivers/Makefile
index dbb93e33248..94cbd2618fc 100644
--- a/drivers/staging/comedi/drivers/Makefile
+++ b/drivers/staging/comedi/drivers/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_COMEDI_DMM32AT) += dmm32at.o
obj-$(CONFIG_COMEDI_FL512) += fl512.o
obj-$(CONFIG_COMEDI_AIO_AIO12_8) += aio_aio12_8.o
obj-$(CONFIG_COMEDI_AIO_IIRO_16) += aio_iiro_16.o
+obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
obj-$(CONFIG_COMEDI_C6XDIGIO) += c6xdigio.o
obj-$(CONFIG_COMEDI_MPC624) += mpc624.o
obj-$(CONFIG_COMEDI_ADQ12B) += adq12b.o
@@ -89,7 +90,6 @@ obj-$(CONFIG_COMEDI_DYNA_PCI10XX) += dyna_pci10xx.o
obj-$(CONFIG_COMEDI_UNIOXX5) += unioxx5.o
obj-$(CONFIG_COMEDI_GSC_HPDI) += gsc_hpdi.o
obj-$(CONFIG_COMEDI_ICP_MULTI) += icp_multi.o
-obj-$(CONFIG_COMEDI_II_PCI20KC) += ii_pci20kc.o
obj-$(CONFIG_COMEDI_DAQBOARD2000) += daqboard2000.o
obj-$(CONFIG_COMEDI_JR3_PCI) += jr3_pci.o
obj-$(CONFIG_COMEDI_KE_COUNTER) += ke_counter.o
@@ -132,6 +132,7 @@ obj-$(CONFIG_COMEDI_MITE) += mite.o
obj-$(CONFIG_COMEDI_NI_TIO) += ni_tio.o
obj-$(CONFIG_COMEDI_NI_TIOCMD) += ni_tiocmd.o
obj-$(CONFIG_COMEDI_NI_LABPC) += ni_labpc.o
+obj-$(CONFIG_COMEDI_NI_LABPC_ISADMA) += ni_labpc_isadma.o
obj-$(CONFIG_COMEDI_8255) += 8255.o
obj-$(CONFIG_COMEDI_AMPLC_DIO200) += amplc_dio200_common.o
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c
deleted file mode 100644
index d0702084caa..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_82x54.c
+++ /dev/null
@@ -1,1068 +0,0 @@
-/*
- * Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
- *
- * ADDI-DATA GmbH
- * Dieselstrasse 3
- * D-77833 Ottersweier
- * Tel: +19(0)7223/9493-0
- * Fax: +49(0)7223/9493-92
- * http://www.addi-data.com
- * info@addi-data.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-/*
- | Description : APCI-1710 82X54 timer module |
-*/
-
-#define APCI1710_PCI_BUS_CLOCK 0
-#define APCI1710_FRONT_CONNECTOR_INPUT 1
-#define APCI1710_TIMER_READVALUE 0
-#define APCI1710_TIMER_GETOUTPUTLEVEL 1
-#define APCI1710_TIMER_GETPROGRESSSTATUS 2
-#define APCI1710_TIMER_WRITEVALUE 3
-
-#define APCI1710_TIMER_READINTERRUPT 1
-#define APCI1710_TIMER_READALLTIMER 2
-
-#ifndef APCI1710_10MHZ
-#define APCI1710_10MHZ 10
-#endif
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTimer |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char_ b_TimerMode, |
-| ULONG_ ul_ReloadValue, |
-| unsigned char_ b_InputClockSelection, |
-| unsigned char_ b_InputClockLevel, |
-| unsigned char_ b_OutputLevel, |
-| unsigned char_ b_HardwareGateLevel)
-int i_InsnConfig_InitTimer(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data)
-|
-+----------------------------------------------------------------------------+
-| Task : Configure the Timer (b_TimerNbr) operating mode |
-| (b_TimerMode) from selected module (b_ModulNbr). |
-| You must calling this function be for you call any |
-| other function witch access of the timer. |
-| |
-| |
-| Timer mode description table |
-| |
-|+--------+-----------------------------+--------------+--------------------+|
-||Selected+ Mode description +u_ReloadValue | Hardware gate input||
-|| mode | | description | action ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 0 is typically used | | ||
-|| |for event counting. After | | ||
-|| |the initialisation, OUT | | ||
-|| |is initially low, and | | ||
-|| 0 |will remain low until the |Start counting| Hardware gate ||
-|| |counter reaches zero. | value | ||
-|| |OUT then goes high and | | ||
-|| |remains high until a new | | ||
-|| |count is written. See | | ||
-|| |"i_APCI1710_WriteTimerValue" | | ||
-|| |function. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 1 is similar to mode 0 | | ||
-|| |except for the gate input | | ||
-|| 1 |action. The gate input is not|Start counting| Hardware trigger ||
-|| |used for enabled or disabled | value | ||
-|| |the timer. | | ||
-|| |The gate input is used for | | ||
-|| |triggered the timer. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |This mode functions like a | | ||
-|| |divide-by-ul_ReloadValue | | ||
-|| |counter. It is typically used| | ||
-|| |to generate a real time clock| | ||
-|| |interrupt. OUT will initially| | ||
-|| 2 |be high after the | Division | Hardware gate ||
-|| |initialisation. When the | factor | ||
-|| |initial count has decremented| | ||
-|| |to 1, OUT goes low for one | | ||
-|| |CLK pule. OUT then goes high | | ||
-|| |again, the counter reloads | | ||
-|| |the initial count | | ||
-|| |(ul_ReloadValue) and the | | ||
-|| |process is repeated. | | ||
-|| |This action can generated a | | ||
-|| |interrupt. See function | | ||
-|| |"i_APCI1710_SetBoardInt- | | ||
-|| |RoutineX" | | ||
-|| |and "i_APCI1710_EnableTimer" | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 3 is typically used for | | ||
-|| |baud rate generation. This | | ||
-|| |mode is similar to mode 2 | | ||
-|| |except for the duty cycle of | | ||
-|| 3 |OUT. OUT will initially be | Division | Hardware gate ||
-|| |high after the initialisation| factor | ||
-|| |When half the initial count | | ||
-|| |(ul_ReloadValue) has expired,| | ||
-|| |OUT goes low for the | | ||
-|| |remainder of the count. The | | ||
-|| |mode is periodic; the | | ||
-|| |sequence above is repeated | | ||
-|| |indefinitely. | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |OUT will be initially high | | ||
-|| |after the initialisation. | | ||
-|| |When the initial count | | ||
-|| 4 |expires OUT will go low for |Start counting| Hardware gate ||
-|| |one CLK pulse and then go | value | ||
-|| |high again. | | ||
-|| |The counting sequences is | | ||
-|| |triggered by writing a new | | ||
-|| |value. See | | ||
-|| |"i_APCI1710_WriteTimerValue" | | ||
-|| |function. If a new count is | | ||
-|| |written during counting, | | ||
-|| |it will be loaded on the | | ||
-|| |next CLK pulse | | ||
-|+--------+-----------------------------+--------------+--------------------+|
-|| |Mode 5 is similar to mode 4 | | ||
-|| |except for the gate input | | ||
-|| |action. The gate input is not| | ||
-|| 5 |used for enabled or disabled |Start counting| Hardware trigger ||
-|| |the timer. The gate input is | value | ||
-|| |used for triggered the timer.| | ||
-|+--------+-----------------------------+--------------+--------------------+|
-| |
-| |
-| |
-| Input clock selection table |
-| |
-| +--------------------------------+------------------------------------+ |
-| | b_InputClockSelection | Description | |
-| | parameter | | |
-| +--------------------------------+------------------------------------+ |
-| | APCI1710_PCI_BUS_CLOCK | For the timer input clock, the PCI | |
-| | | bus clock / 4 is used. This PCI bus| |
-| | | clock can be 30MHz or 33MHz. For | |
-| | | Timer 0 only this selection are | |
-| | | available. | |
-| +--------------------------------+------------------------------------+ |
-| | APCI1710_ FRONT_CONNECTOR_INPUT| Of the front connector you have the| |
-| | | possibility to inject a input clock| |
-| | | for Timer 1 or Timer 2. The source | |
-| | | from this clock can eat the output | |
-| | | clock from Timer 0 or any other | |
-| | | clock source. | |
-| +--------------------------------+------------------------------------+ |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to |
-| configure (0 to 2) |
-| unsigned char_ b_TimerMode : Timer mode selection |
-| (0 to 5) |
-| 0: Interrupt on terminal|
-| count |
-| 1: Hardware |
-| retriggerable one- |
-| shot |
-| 2: Rate generator |
-| 3: Square wave mode |
-| 4: Software triggered |
-| strobe |
-| 5: Hardware triggered |
-| strobe |
-| See timer mode |
-| description table. |
-| ULONG_ ul_ReloadValue : Start counting value |
-| or division factor |
-| See timer mode |
-| description table. |
-| unsigned char_ b_InputClockSelection : Selection from input |
-| timer clock. |
-| See input clock |
-| selection table. |
-| unsigned char_ b_InputClockLevel : Selection from input |
-| clock level. |
-| 0 : Low active |
-| (Input inverted) |
-| 1 : High active |
-| unsigned char_ b_OutputLevel, : Selection from output |
-| clock level. |
-| 0 : Low active |
-| 1 : High active |
-| (Output inverted) |
-| unsigned char_ b_HardwareGateLevel : Selection from |
-| hardware gate level. |
-| 0 : Low active |
-| (Input inverted) |
-| 1 : High active |
-| If you will not used |
-| the hardware gate set |
-| this value to 0.
-|b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_TimerMode = (unsigned char) data[0];
- ul_ReloadValue = (unsigned int) data[1];
- b_InputClockSelection =(unsigned char) data[2];
- b_InputClockLevel =(unsigned char) data[3];
- b_OutputLevel =(unsigned char) data[4];
- b_HardwareGateLevel =(unsigned char) data[5];
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer mode selection is wrong |
-| -6: Input timer clock selection is wrong |
-| -7: Selection from input clock level is wrong |
-| -8: Selection from output clock level is wrong |
-| -9: Selection from hardware gate level is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnConfigInitTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TimerNbr;
- unsigned char b_TimerMode;
- unsigned int ul_ReloadValue;
- unsigned char b_InputClockSelection;
- unsigned char b_InputClockLevel;
- unsigned char b_OutputLevel;
- unsigned char b_HardwareGateLevel;
-
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- unsigned int dw_Test = 0;
- /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_TimerMode = (unsigned char) data[0];
- ul_ReloadValue = (unsigned int) data[1];
- b_InputClockSelection = (unsigned char) data[2];
- b_InputClockLevel = (unsigned char) data[3];
- b_OutputLevel = (unsigned char) data[4];
- b_HardwareGateLevel = (unsigned char) data[5];
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
-
- if (b_TimerNbr <= 2) {
- /* Test the timer mode */
- if (b_TimerMode <= 5) {
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- /* Test te imput clock selection */
- /*
- if (((b_TimerNbr == 0) && (b_InputClockSelection == 0)) ||
- ((b_TimerNbr != 0) && ((b_InputClockSelection == 0) || (b_InputClockSelection == 1))))
- */
-
- if (((b_TimerNbr == 0) &&
- (b_InputClockSelection == APCI1710_PCI_BUS_CLOCK)) ||
- ((b_TimerNbr == 0) &&
- (b_InputClockSelection == APCI1710_10MHZ)) ||
- ((b_TimerNbr != 0) &&
- ((b_InputClockSelection == APCI1710_PCI_BUS_CLOCK) ||
- (b_InputClockSelection == APCI1710_FRONT_CONNECTOR_INPUT) ||
- (b_InputClockSelection == APCI1710_10MHZ)))) {
- /* BEGIN JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- if (((b_InputClockSelection == APCI1710_10MHZ) &&
- ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) >= 0x3131)) ||
- (b_InputClockSelection != APCI1710_10MHZ)) {
- /* END JK 27.10.2003 : Add the possibility to use a 40 Mhz quartz */
- /* Test the input clock level selection */
-
- if ((b_InputClockLevel == 0) ||
- (b_InputClockLevel == 1)) {
- /* Test the output clock level selection */
- if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) {
- /* Test the hardware gate level selection */
- if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) {
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /* Test if version > 1.1 and clock selection = 10MHz */
- if ((b_InputClockSelection == APCI1710_10MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0x0000FFFFUL) > 0x3131)) {
- /* Test if 40MHz quartz on board */
- dw_Test = inl(devpriv->s_BoardInfos.ui_Address + (16 + (b_TimerNbr * 4) + (64 * b_ModulNbr)));
-
- dw_Test = (dw_Test >> 16) & 1;
- } else {
- dw_Test = 1;
- }
-
- /* Test if detection OK */
- if (dw_Test == 1) {
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /* Initialisation OK */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init = 1;
-
- /* Save the input clock selection */
- devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockSelection = b_InputClockSelection;
-
- /* Save the input clock level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_InputClockLevel = ~b_InputClockLevel & 1;
-
- /* Save the output level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel = ~b_OutputLevel & 1;
-
- /* Save the gate level */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_HardwareGateLevel = b_HardwareGateLevel;
-
- /* Set the configuration word and disable the timer */
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- /*
- devpriv->s_ModuleInfo [b_ModulNbr].
- s_82X54ModuleInfo.
- s_82X54TimerInfo [b_TimerNbr].
- dw_ConfigurationWord = (unsigned int) (((b_HardwareGateLevel << 0) & 0x1) |
- ((b_InputClockLevel << 1) & 0x2) |
- (((~b_OutputLevel & 1) << 2) & 0x4) |
- ((b_InputClockSelection << 4) & 0x10));
- */
- /* Test if 10MHz selected */
- if (b_InputClockSelection == APCI1710_10MHZ) {
- b_InputClockSelection = 2;
- }
-
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = (unsigned int)(((b_HardwareGateLevel << 0) & 0x1) | ((b_InputClockLevel << 1) & 0x2) | (((~b_OutputLevel & 1) << 2) & 0x4) | ((b_InputClockSelection << 4) & 0x30));
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Initialise the 82X54 Timer */
- outl((unsigned int) b_TimerMode, devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Write the reload value */
- outl(ul_ReloadValue, devpriv->s_BoardInfos.ui_Address + 0 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- /* BEGIN JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- } /* if (dw_Test == 1) */
- else {
- /* Input timer clock selection is wrong */
- i_ReturnValue = -6;
- } /* if (dw_Test == 1) */
- /* END JK 27.10.03 : Add the possibility to use a 40 Mhz quartz */
- } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */
- else {
- /* Selection from hardware gate level is wrong */
- DPRINTK("Selection from hardware gate level is wrong\n");
- i_ReturnValue = -9;
- } /* if ((b_HardwareGateLevel == 0) || (b_HardwareGateLevel == 1)) */
- } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */
- else {
- /* Selection from output clock level is wrong */
- DPRINTK("Selection from output clock level is wrong\n");
- i_ReturnValue = -8;
- } /* if ((b_OutputLevel == 0) || (b_OutputLevel == 1)) */
- } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */
- else {
- /* Selection from input clock level is wrong */
- DPRINTK("Selection from input clock level is wrong\n");
- i_ReturnValue = -7;
- } /* if ((b_InputClockLevel == 0) || (b_InputClockLevel == 1)) */
- } else {
- /* Input timer clock selection is wrong */
- DPRINTK("Input timer clock selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /* Input timer clock selection is wrong */
- DPRINTK("Input timer clock selection is wrong\n");
- i_ReturnValue = -6;
- }
- } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */
- else {
- /* Timer mode selection is wrong */
- DPRINTK("Timer mode selection is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_TimerMode >= 0) && (b_TimerMode <= 5)) */
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableTimer |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char_ b_InterruptEnable)
-int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Enable OR Disable the Timer (b_TimerNbr) from selected module |
-| (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTimer" function be for you call this |
-| function. If you enable the timer interrupt, the timer |
-| generate a interrupt after the timer value reach |
-| the zero. See function "i_APCI1710_SetBoardIntRoutineX"|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to enable |
-| (0 to 2) |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| timer interrupt. |
-| APCI1710_ENABLE : |
-| Enable the timer interrupt |
-| APCI1710_DISABLE : |
-| Disable the timer interrupt|
-i_ReturnValue=insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_ActionType = (unsigned char) data[0]; /* enable disable */
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -6: Interrupt parameter is wrong |
-| -7: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnWriteEnableDisableTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_DummyRead;
- unsigned char b_ModulNbr;
- unsigned char b_TimerNbr;
- unsigned char b_ActionType;
- unsigned char b_InterruptEnable;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_TimerNbr = (unsigned char) CR_CHAN(insn->chanspec);
- b_ActionType = (unsigned char) data[0]; /* enable disable */
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
-
- switch (b_ActionType) {
- case APCI1710_ENABLE:
- b_InterruptEnable = (unsigned char) data[1];
- /* Test the interrupt selection */
- if ((b_InterruptEnable == APCI1710_ENABLE) ||
- (b_InterruptEnable == APCI1710_DISABLE)) {
- if (b_InterruptEnable == APCI1710_ENABLE) {
-
- dw_DummyRead = inl(devpriv->s_BoardInfos.ui_Address + 12 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Enable the interrupt */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord | 0x8;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- } /* if (b_InterruptEnable == APCI1710_ENABLE) */
- else {
- /* Disable the interrupt */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr));
- } /* if (b_InterruptEnable == APCI1710_ENABLE) */
-
- /* Test if error occur */
- if (i_ReturnValue >= 0) {
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask | ((1 & b_InterruptEnable) << b_TimerNbr);
-
- /* Enable the timer */
- outl(1, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- }
- } else {
- /* Interrupt parameter is wrong */
- DPRINTK("\n");
- i_ReturnValue = -6;
- }
- break;
- case APCI1710_DISABLE:
- /* Test the interrupt flag */
- if (((devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask >> b_TimerNbr) & 1) == 1) {
- /* Disable the interrupt */
-
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr]. dw_ConfigurationWord = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord & 0xF7;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].dw_ConfigurationWord, devpriv->s_BoardInfos.ui_Address + 32 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- /* Save the interrupt flag */
- devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask = devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.b_InterruptMask & (0xFF - (1 << b_TimerNbr));
- }
-
- /* Disable the timer */
- outl(0, devpriv->s_BoardInfos.ui_Address + 44 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- break;
- } /* Switch end */
- } else {
- /* Timer not initialised see function */
- DPRINTK ("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadAllTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| PULONG_ pul_TimerValueArray)
-int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Return the all timer values from selected timer |
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_TimerValueArray : Timer value array. |
-| Element 0 contain the timer 0 value. |
-| Element 1 contain the timer 1 value. |
-| Element 2 contain the timer 2 value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a TIMER module |
-| -4: Timer 0 not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -5: Timer 1 not initialised see function |
-| "i_APCI1710_InitTimer" |
-| -6: Timer 2 not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadAllTimerValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_ReadType;
- unsigned int *pul_TimerValueArray;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = CR_CHAN(insn->chanspec);
- pul_TimerValueArray = (unsigned int *) data;
- i_ReturnValue = insn->n;
-
- switch (b_ReadType) {
- case APCI1710_TIMER_READINTERRUPT:
-
- data[0] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.s_FIFOInterruptParameters[devpriv->s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /* Increment the read FIFO */
- devpriv->s_InterruptParameters.ui_Read = (devpriv->s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- break;
-
- case APCI1710_TIMER_READALLTIMER:
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test if timer 0 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[0].b_82X54Init == 1) {
- /* Test if timer 1 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[1].b_82X54Init == 1) {
- /* Test if timer 2 iniutialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[2].b_82X54Init == 1) {
- /* Latch all counter */
- outl(0x17, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer 0 value */
- pul_TimerValueArray[0] = inl(devpriv->s_BoardInfos.ui_Address + 0 + (64 * b_ModulNbr));
-
- /* Read the timer 1 value */
- pul_TimerValueArray[1] = inl(devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
-
- /* Read the timer 2 value */
- pul_TimerValueArray[2] = inl(devpriv->s_BoardInfos.ui_Address + 8 + (64 * b_ModulNbr));
- } else {
- /* Timer 2 not initialised see function */
- DPRINTK("Timer 2 not initialised see function\n");
- i_ReturnValue = -6;
- }
- } else {
- /* Timer 1 not initialised see function */
- DPRINTK("Timer 1 not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer 0 not initialised see function */
- DPRINTK("Timer 0 not initialised see function\n");
- i_ReturnValue = -4;
- }
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -3;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- } /* End of Switch */
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| PULONG_ pul_TimerValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the timer value from selected digital timer |
-| (b_TimerNbr) from selected timer module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to read |
-| (0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_TimerValue : Timer value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadTimerValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned int *pul_TimerValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_82X54ModuleInfo.
- s_82X54TimerInfo[b_TimerNbr].
- b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xD0,
- devpriv->s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- /* Read the counter value */
- *pul_TimerValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + (b_TimerNbr * 4) +
- (64 * b_ModulNbr));
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_GetTimerOutputLevel |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char_ b_TimerNbr, |
- | unsigned char *_ pb_OutputLevel) |
- +----------------------------------------------------------------------------+
- | Task : Return the output signal level (pb_OutputLevel) from |
- | selected digital timer (b_TimerNbr) from selected timer|
- | module (b_ModulNbr). |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
- | APCI-1710 |
- | unsigned char_ b_ModulNbr : Selected module number |
- | (0 to 3) |
- | unsigned char_ b_TimerNbr : Timer number to test |
- | (0 to 2) |
- +----------------------------------------------------------------------------+
- | Output Parameters : unsigned char *_ pb_OutputLevel : Output signal level |
- | 0 : The output is low |
- | 1 : The output is high |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: Module selection wrong |
- | -3: Timer selection wrong |
- | -4: The module is not a TIMER module |
- | -5: Timer not initialised see function |
- | "i_APCI1710_InitTimer" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_GetTimerOutputLevel(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned char *pb_OutputLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_TimerStatus;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer status */
- dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- *pb_OutputLevel = (unsigned char) (((dw_TimerStatus >> 7) & 1) ^ devpriv-> s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_OutputLevel);
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetTimerProgressStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| unsigned char *_ pb_TimerStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the progress status (pb_TimerStatus) from |
-| selected digital timer (b_TimerNbr) from selected timer|
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to test |
-| (0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimerStatus : Output signal level |
-| 0 : Timer not in progress |
-| 1 : Timer in progress |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetTimerProgressStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned char *pb_TimerStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_TimerStatus;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Latch the timer value */
- outl((2 << b_TimerNbr) | 0xE0, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- /* Read the timer status */
- dw_TimerStatus = inl(devpriv->s_BoardInfos.ui_Address + 16 + (b_TimerNbr * 4) + (64 * b_ModulNbr));
-
- *pb_TimerStatus = (unsigned char) ((dw_TimerStatus) >> 8) & 1;
- printk("ProgressStatus : %d", *pb_TimerStatus);
- } else {
- /* Timer not initialised see function */
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
-
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
-
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_WriteTimerValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TimerNbr, |
-| ULONG_ ul_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write the value (ul_WriteValue) into the selected timer|
-| (b_TimerNbr) from selected timer module (b_ModulNbr). |
-| The action in depend of the time mode selection. |
-| See timer mode description table. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board |
-| APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_TimerNbr : Timer number to write |
-| (0 to 2) |
-| ULONG_ ul_WriteValue : Value to write |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: Timer selection wrong |
-| -4: The module is not a TIMER module |
-| -5: Timer not initialised see function |
-| "i_APCI1710_InitTimer" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_WriteTimerValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_TimerNbr,
- unsigned int ul_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /* Test the module number */
- if (b_ModulNbr < 4) {
- /* Test if 82X54 timer */
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
- /* Test the timer number */
- if (b_TimerNbr <= 2) {
- /* Test if timer initialised */
- if (devpriv->s_ModuleInfo[b_ModulNbr].s_82X54ModuleInfo.s_82X54TimerInfo[b_TimerNbr].b_82X54Init == 1) {
- /* Write the value */
- outl(ul_WriteValue, devpriv->s_BoardInfos.ui_Address + (b_TimerNbr * 4) + (64 * b_ModulNbr));
- } else {
- /* Timer not initialised see function */
- DPRINTK("Timer not initialised see function\n");
- i_ReturnValue = -5;
- }
- } else {
- /* Timer selection wrong */
- DPRINTK("Timer selection wrong\n");
- i_ReturnValue = -3;
- } /* if ((b_TimerNbr >= 0) && (b_TimerNbr <= 2)) */
- } else {
- /* The module is not a TIMER module */
- DPRINTK("The module is not a TIMER module\n");
- i_ReturnValue = -4;
- }
- } else {
- /* Module number error */
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name :INT i_APCI1710_InsnBitsTimer(struct comedi_device *dev,
-struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read write functions for Timer |
-+----------------------------------------------------------------------------+
-| Input Parameters :
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnBitsTimer(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_BitsType;
- int i_ReturnValue = 0;
- b_BitsType = data[0];
-
- printk("\n82X54");
-
- switch (b_BitsType) {
- case APCI1710_TIMER_READVALUE:
- i_ReturnValue = i_APCI1710_ReadTimerValue(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned int *) &data[0]);
- break;
-
- case APCI1710_TIMER_GETOUTPUTLEVEL:
- i_ReturnValue = i_APCI1710_GetTimerOutputLevel(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned char *) &data[0]);
- break;
-
- case APCI1710_TIMER_GETPROGRESSSTATUS:
- i_ReturnValue = i_APCI1710_GetTimerProgressStatus(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned char *)&data[0]);
- break;
-
- case APCI1710_TIMER_WRITEVALUE:
- i_ReturnValue = i_APCI1710_WriteTimerValue(dev,
- (unsigned char)CR_AREF(insn->chanspec),
- (unsigned char)CR_CHAN(insn->chanspec),
- (unsigned int)data[1]);
-
- break;
-
- default:
- printk("Bits Config Parameter Wrong\n");
- i_ReturnValue = -1;
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c
deleted file mode 100644
index d91f586fdd2..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Chrono.c
+++ /dev/null
@@ -1,2050 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : CHRONO.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 chronometer module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 29/06/98 | S. Weber | Digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_CHRONO_PROGRESS_STATUS 0
-#define APCI1710_CHRONO_READVALUE 1
-#define APCI1710_CHRONO_CONVERTVALUE 2
-#define APCI1710_CHRONO_READINTERRUPT 3
-
-#define APCI1710_CHRONO_SET_CHANNELON 0
-#define APCI1710_CHRONO_SET_CHANNELOFF 1
-#define APCI1710_CHRONO_READ_CHANNEL 2
-#define APCI1710_CHRONO_READ_PORT 3
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitChrono |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ChronoMode, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval)
-
-+----------------------------------------------------------------------------+
-| Task : Configure the chronometer operating mode (b_ChronoMode)|
-| from selected module (b_ModulNbr). |
-| The ul_TimingInterval and ul_TimingUnit determine the |
-| timing base for the measurement. |
-| The pul_RealTimingInterval return the real timing |
-| value. You must calling this function be for you call |
-| any other function witch access of the chronometer. |
-| |
-| Witch this functionality from the APCI-1710 you have |
-| the possibility to measure the timing witch two event. |
-| |
-| The mode 0 and 1 is appropriate for period measurement.|
-| The mode 2 and 3 is appropriate for frequent |
-| measurement. |
-| The mode 4 to 7 is appropriate for measuring the timing|
-| between two event. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(insn->chanspec) : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ChronoMode data[0] : Chronometer action mode |
-| (0 to 7). |
-| unsigned char_ b_PCIInputClock data[1] : Selection from PCI bus clock|
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| unsigned char_ b_TimingUnit data[2] : Base timing unity (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_TimingInterval : data[3] Base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing |
-| value.
-| data[0]
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer mode selection is wrong |
-| -5: The selected PCI input clock is wrong |
-| -6: Timing unity selection is wrong |
-| -7: Base timing selection is wrong |
-| -8: You can not used the 40MHz clock selection with |
-| this board |
-| -9: You can not used the 40MHz clock selection with |
-| this CHRONOS version |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnConfigInitChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- unsigned int ul_TimingInterval = 0;
- unsigned int ul_RealTimingInterval = 0;
- double d_RealTimingInterval = 0;
- unsigned int dw_ModeArray[8] =
- { 0x01, 0x05, 0x00, 0x04, 0x02, 0x0E, 0x0A, 0x06 };
- unsigned char b_ModulNbr, b_ChronoMode, b_PCIInputClock, b_TimingUnit;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ChronoMode = (unsigned char) data[0];
- b_PCIInputClock = (unsigned char) data[1];
- b_TimingUnit = (unsigned char) data[2];
- ul_TimingInterval = (unsigned int) data[3];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /*****************************/
- /* Test the chronometer mode */
- /*****************************/
-
- if (b_ChronoMode <= 7) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /*************************/
- /* Test the timing unity */
- /*************************/
-
- if (b_TimingUnit <= 4) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 66) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165576UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143165UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 143UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 60) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150240UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130150UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 130UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 2UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 50) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374182UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107374UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 107UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 1UL))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- /************************/
- /* Test the TOR version */
- /************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- fpu_begin
- ();
-
- /****************************************/
- /* Calculate the timer 0 division fator */
- /****************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.001 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.001 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.001 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.001
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.001 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (1.0 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 1.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (1000
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (1000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (1000000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (
- (ul_TimingInterval
- *
- 60)
- *
- (1000000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_TimingInterval * 60.0) * (1000000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (1000000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60;
- d_RealTimingInterval
- =
- (
- (double)
- ul_TimerValue
- /
- (0.001 * (double)b_PCIInputClock)) / 60.0;
-
- if ((double)(((double)ul_TimerValue / (1000000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 0.99392);
- }
-
- break;
- }
-
- fpu_end();
-
- /****************************/
- /* Save the PCI input clock */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_PCIInputClock
- =
- b_PCIInputClock;
-
- /*************************/
- /* Save the timing unity */
- /*************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_TimingUnit
- =
- b_TimingUnit;
-
- /************************/
- /* Save the base timing */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- d_TimingInterval
- =
- d_RealTimingInterval;
-
- /****************************/
- /* Set the chronometer mode */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- =
- dw_ModeArray
- [b_ChronoMode];
-
- /***********************/
- /* Test if 40 MHz used */
- /***********************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg
- |
- 0x80;
- }
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].s_ChronoModuleInfo.dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 16 + (64 * b_ModulNbr));
-
- /***********************/
- /* Write timer 0 value */
- /***********************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + (64 * b_ModulNbr));
-
- /*********************/
- /* Chronometer init. */
- /*********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_ChronoInit
- =
- 1;
- } else {
- /***********************************************/
- /* TOR version error for 40MHz clock selection */
- /***********************************************/
-
- DPRINTK("TOR version error for 40MHz clock selection\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /**************************************************************/
- /* You can not use the 40MHz clock selection with this board */
- /**************************************************************/
-
- DPRINTK("You can not used the 40MHz clock selection with this board\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /***********************************/
- /* Timing unity selection is wrong */
- /***********************************/
-
- DPRINTK("Timing unity selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */
- else {
- /***************************************/
- /* Chronometer mode selection is wrong */
- /***************************************/
-
- DPRINTK("Chronometer mode selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_ChronoMode >= 0 && b_ChronoMode <= 7) */
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
- data[0] = ul_RealTimingInterval;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableChrono |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_CycleMode, |
-| unsigned char_ b_InterruptEnable)
-int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev,
-struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Enable the chronometer from selected module |
-| (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitChrono" function be for you call this |
-| function. |
-| If you enable the chronometer interrupt, the |
-| chronometer generate a interrupt after the stop signal.|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle.
-
-| Disable the chronometer from selected module |
-| (b_ModulNbr). If you disable the chronometer after a |
-| start signal occur and you restart the chronometer |
-| witch the " i_APCI1710_EnableChrono" function, if no |
-| stop signal occur this start signal is ignored.
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number (0 to 3) |
- data[0] ENABle/Disable chrono
-| unsigned char_ b_CycleMode : Selected the chronometer |
-| data[1] acquisition mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| data[2] chronometer interrupt. |
-| APCI1710_ENABLE: |
-| Enable the chronometer |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the chronometer |
-| interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-| -5: Chronometer acquisition mode cycle is wrong |
-| -6: Interrupt parameter is wrong |
-| -7: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX"
- -8: data[0] wrong input |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnWriteEnableDisableChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_CycleMode, b_InterruptEnable, b_Action;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0];
- b_CycleMode = (unsigned char) data[1];
- b_InterruptEnable = (unsigned char) data[2];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
-
- switch (b_Action) {
-
- case APCI1710_ENABLE:
-
- /*********************************/
- /* Test the cycle mode parameter */
- /*********************************/
-
- if ((b_CycleMode == APCI1710_SINGLE)
- || (b_CycleMode ==
- APCI1710_CONTINUOUS)) {
- /***************************/
- /* Test the interrupt flag */
- /***************************/
-
- if ((b_InterruptEnable ==
- APCI1710_ENABLE)
- || (b_InterruptEnable ==
- APCI1710_DISABLE))
- {
-
- /***************************/
- /* Save the interrupt flag */
- /***************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_InterruptMask
- =
- b_InterruptEnable;
-
- /***********************/
- /* Save the cycle mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode =
- b_CycleMode;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg &
- 0x8F) | ((1 &
- b_InterruptEnable)
- << 5) | ((1 &
- b_CycleMode)
- << 6) | 0x10;
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE)
- {
- /****************************/
- /* Clear the interrupt flag */
- /****************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 32 +
- (64 * b_ModulNbr));
- devpriv->tsk_Current = current; /* Save the current process task structure */
- }
-
- /***********************************/
- /* Enable or disable the interrupt */
- /* Enable the chronometer */
- /***********************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 16 +
- (64 * b_ModulNbr));
-
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->
- s_BoardInfos.
- ui_Address +
- 36 +
- (64 * b_ModulNbr));
-
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- else {
- /***********************************************/
- /* Chronometer acquisition mode cycle is wrong */
- /***********************************************/
-
- DPRINTK("Chronometer acquisition mode cycle is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- break;
-
- case APCI1710_DISABLE:
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- b_InterruptMask = 0;
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- dw_ConfigReg & 0x2F;
-
- /***************************/
- /* Disable the interrupt */
- /* Disable the chronometer */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.dw_ConfigReg,
- devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode ==
- APCI1710_CONTINUOUS) {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 36 +
- (64 * b_ModulNbr));
- }
- break;
-
- default:
- DPRINTK("Inputs wrong! Enable or Disable chrono\n");
- i_ReturnValue = -8;
- } /* switch ENABLE/DISABLE */
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
-
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetChronoProgressStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_ChronoStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the chronometer status (pb_ChronoStatus) from |
-| selected chronometer module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer |
-| status. |
-| 0 : Measurement not started.|
-| No start signal occur. |
-| 1 : Measurement started. |
-| A start signal occur. |
-| 2 : Measurement stopped. |
-| A stop signal occur. |
-| The measurement is |
-| terminate. |
-| 3: A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetChronoProgressStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_ChronoStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (64 * b_ModulNbr));
-
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 8) == 8) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_ChronoStatus = 3;
- } /* if ((dw_Status & 8) == 8) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) == 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_ChronoStatus = 2;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_ChronoStatus = 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_ChronoStatus = 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned int_ ui_TimeOut, |
-| unsigned char *_ pb_ChronoStatus, |
-| PULONG_ pul_ChronoValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the chronometer status (pb_ChronoStatus) and the|
-| timing value (pul_ChronoValue) after a stop signal |
-| occur from selected chronometer module (b_ModulNbr). |
-| This function are only avaible if you have disabled |
-| the interrupt functionality. See function |
-| "i_APCI1710_EnableChrono" and the Interrupt mask |
-| description chapter. |
-| You can test the chronometer status witch the |
-| "i_APCI1710_GetChronoProgressStatus" function. |
-| |
-| The returned value from pul_ChronoValue parameter is |
-| not real measured timing. |
-| You must used the "i_APCI1710_ConvertChronoValue" |
-| function or make this operation for calculate the |
-| timing: |
-| |
-| Timing = pul_ChronoValue * pul_RealTimingInterval. |
-| |
-| pul_RealTimingInterval is the returned parameter from |
-| "i_APCI1710_InitChrono" function and the time unity is |
-| the b_TimingUnit from "i_APCI1710_InitChrono" function|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pb_ChronoStatus : Return the chronometer |
-| status. |
-| 0 : Measurement not started.|
-| No start signal occur. |
-| 1 : Measurement started. |
-| A start signal occur. |
-| 2 : Measurement stopped. |
-| A stop signal occur. |
-| The measurement is |
-| terminate. |
-| 3: A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitChrono" |
-| unsigned int * pul_ChronoValue : Chronometer timing value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-| -5: Timeout parameter is wrong (0 to 65535) |
-| -6: Interrupt routine installed. You can not read |
-| directly the chronometer measured timing. |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadChronoValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ui_TimeOut,
- unsigned char *pb_ChronoStatus,
- unsigned int *pul_ChronoValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_TimeOut = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- /*****************************/
- /* Test the timout parameter */
- /*****************************/
-
- if (ui_TimeOut <= 65535UL) {
-
- for (;;) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 8) == 8) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_ChronoStatus = 3;
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode ==
- APCI1710_CONTINUOUS)
- {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr));
- }
-
- break;
- } /* if ((dw_Status & 8) == 8) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) ==
- 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_ChronoStatus
- = 2;
-
- /***************************/
- /* Test if continnous mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_ChronoModuleInfo.
- b_CycleMode
- ==
- APCI1710_CONTINUOUS)
- {
- /*************************/
- /* Clear status register */
- /*************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 36 + (64 * b_ModulNbr));
- }
- break;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_ChronoStatus
- =
- 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_ChronoStatus
- =
- 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
-
- if (dw_TimeOut == ui_TimeOut) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- break;
- } else {
- /*************************/
- /* Increment the timeout */
- /*************************/
-
- dw_TimeOut =
- dw_TimeOut + 1;
- mdelay(1000);
-
- }
- } /* for (;;) */
-
- /*****************************/
- /* Test if stop signal occur */
- /*****************************/
-
- if (*pb_ChronoStatus == 2) {
- /**********************************/
- /* Read the measured timing value */
- /**********************************/
-
- *pul_ChronoValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModulNbr));
-
- if (*pul_ChronoValue != 0) {
- *pul_ChronoValue =
- *pul_ChronoValue
- - 1;
- }
- } else {
- /*************************/
- /* Test if timeout occur */
- /*************************/
-
- if ((*pb_ChronoStatus != 3)
- && (dw_TimeOut ==
- ui_TimeOut)
- && (ui_TimeOut != 0)) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- *pb_ChronoStatus = 4;
- }
- }
-
- } else {
- /******************************/
- /* Timeout parameter is wrong */
- /******************************/
- DPRINTK("Timeout parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ConvertChronoValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| ULONG_ ul_ChronoValue, |
-| PULONG_ pul_Hour, |
-| unsigned char *_ pb_Minute, |
-| unsigned char *_ pb_Second, |
-| unsigned int *_ pui_MilliSecond, |
-| unsigned int *_ pui_MicroSecond, |
-| unsigned int *_ pui_NanoSecond) |
-+----------------------------------------------------------------------------+
-| Task : Convert the chronometer measured timing |
-| (ul_ChronoValue) in to h, mn, s, ms, µs, ns. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| ULONG_ ul_ChronoValue : Measured chronometer timing |
-| value. |
-| See"i_APCI1710_ReadChronoValue"|
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_Hour : Chronometer timing hour |
-| unsigned char *_ pb_Minute : Chronometer timing minute |
-| unsigned char *_ pb_Second : Chronometer timing second |
-| unsigned int *_ pui_MilliSecond : Chronometer timing mini |
-| second |
-| unsigned int *_ pui_MicroSecond : Chronometer timing micro |
-| second |
-| unsigned int *_ pui_NanoSecond : Chronometer timing nano |
-| second |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ConvertChronoValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ul_ChronoValue,
- unsigned int *pul_Hour,
- unsigned char *pb_Minute,
- unsigned char *pb_Second,
- unsigned int *pui_MilliSecond,
- unsigned int *pui_MicroSecond,
- unsigned int *pui_NanoSecond)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- double d_Hour;
- double d_Minute;
- double d_Second;
- double d_MilliSecond;
- double d_MicroSecond;
- double d_NanoSecond;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- fpu_begin();
-
- d_Hour = (double)ul_ChronoValue *(double)
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.d_TimingInterval;
-
- switch (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_TimingUnit) {
- case 0:
- d_Hour = d_Hour / (double)1000.0;
-
- case 1:
- d_Hour = d_Hour / (double)1000.0;
-
- case 2:
- d_Hour = d_Hour / (double)1000.0;
-
- case 3:
- d_Hour = d_Hour / (double)60.0;
-
- case 4:
- /**********************/
- /* Calculate the hour */
- /**********************/
-
- d_Hour = d_Hour / (double)60.0;
- *pul_Hour = (unsigned int) d_Hour;
-
- /************************/
- /* Calculate the minute */
- /************************/
-
- d_Minute = d_Hour - *pul_Hour;
- d_Minute = d_Minute * 60;
- *pb_Minute = (unsigned char) d_Minute;
-
- /************************/
- /* Calculate the second */
- /************************/
-
- d_Second = d_Minute - *pb_Minute;
- d_Second = d_Second * 60;
- *pb_Second = (unsigned char) d_Second;
-
- /*****************************/
- /* Calculate the mini second */
- /*****************************/
-
- d_MilliSecond = d_Second - *pb_Second;
- d_MilliSecond = d_MilliSecond * 1000;
- *pui_MilliSecond = (unsigned int) d_MilliSecond;
-
- /******************************/
- /* Calculate the micro second */
- /******************************/
-
- d_MicroSecond =
- d_MilliSecond -
- *pui_MilliSecond;
- d_MicroSecond = d_MicroSecond * 1000;
- *pui_MicroSecond = (unsigned int) d_MicroSecond;
-
- /******************************/
- /* Calculate the micro second */
- /******************************/
-
- d_NanoSecond =
- d_MicroSecond -
- *pui_MicroSecond;
- d_NanoSecond = d_NanoSecond * 1000;
- *pui_NanoSecond = (unsigned int) d_NanoSecond;
- break;
- }
-
- fpu_end();
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name :INT i_APCI1710_InsnReadChrono(struct comedi_device *dev,struct comedi_subdevice *s,
-struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read functions for Timer |
-+----------------------------------------------------------------------------+
-| Input Parameters :
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadChrono(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ReadType;
- int i_ReturnValue = insn->n;
-
- b_ReadType = CR_CHAN(insn->chanspec);
-
- switch (b_ReadType) {
- case APCI1710_CHRONO_PROGRESS_STATUS:
- i_ReturnValue = i_APCI1710_GetChronoProgressStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_CHRONO_READVALUE:
- i_ReturnValue = i_APCI1710_ReadChronoValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned int) insn->unused[0],
- (unsigned char *) &data[0], (unsigned int *) &data[1]);
- break;
-
- case APCI1710_CHRONO_CONVERTVALUE:
- i_ReturnValue = i_APCI1710_ConvertChronoValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned int) insn->unused[0],
- (unsigned int *) &data[0],
- (unsigned char *) &data[1],
- (unsigned char *) &data[2],
- (unsigned int *) &data[3],
- (unsigned int *) &data[4], (unsigned int *) &data[5]);
- break;
-
- case APCI1710_CHRONO_READINTERRUPT:
- printk("In Chrono Read Interrupt\n");
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
- break;
-
- default:
- printk("ReadType Parameter wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Sets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting an|
-| output high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel : Selection from digital output |
-| CR_CHAN() channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital output is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetChronoChlOff |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_OutputChannel) |
-+----------------------------------------------------------------------------+
-| Task : Resets the output witch has been passed with the |
-| parameter b_Channel. Resetting an output means setting |
-| an output low. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710
- data[0] : Chl ON, Chl OFF , Chl Read , Port Read
-
-| unsigned char_ b_ModulNbr CR_AREF : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel CR_CHAN : Selection from digital output |
-| channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital output is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoChlValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from selected digital input |
-| (b_InputChannel) from selected chronometer |
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_InputChannel : Selection from digital input |
-| channel (0 to 2) |
-| CR_CHAN() 0 : Channel E |
-| 1 : Channel F |
-| 2 : Channel G |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ChannelStatus : Digital input channel status.|
-| data[0] 0 : Channel is not active |
-| 1 : Channel is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: The selected digital input is wrong |
-| -5: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadChronoPortValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_PortValue) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from digital inputs port from |
-| selected (b_ModulNbr) chronometer module. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_PortValue : Digital inputs port status.
-| data[0]
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a Chronometer module |
-| -4: Chronometer not initialised see function |
-| "i_APCI1710_InitChrono" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnBitsChronoDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr, b_OutputChannel, b_InputChannel, b_IOType;
- unsigned int dw_Status;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_PortValue;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- i_ReturnValue = insn->n;
- b_IOType = (unsigned char) data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
- /***********************************/
- /* Test if chronometer initialised */
- /***********************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_ChronoModuleInfo.b_ChronoInit == 1) {
- /***********************************/
- /* Test the digital output channel */
- /***********************************/
- switch (b_IOType) {
-
- case APCI1710_CHRONO_SET_CHANNELOFF:
-
- b_OutputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
- if (b_OutputChannel <= 2) {
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 20 +
- (b_OutputChannel * 4) +
- (64 * b_ModulNbr));
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
- else {
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- DPRINTK("The selected digital output is wrong\n");
- i_ReturnValue = -4;
-
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_SET_CHANNELON:
-
- b_OutputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
- if (b_OutputChannel <= 2) {
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 20 +
- (b_OutputChannel * 4) +
- (64 * b_ModulNbr));
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
- else {
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- DPRINTK("The selected digital output is wrong\n");
- i_ReturnValue = -4;
-
- } /* if ((b_OutputChannel >= 0) && (b_OutputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_READ_CHANNEL:
- /**********************************/
- /* Test the digital input channel */
- /**********************************/
- pb_ChannelStatus = (unsigned char *) &data[0];
- b_InputChannel =
- (unsigned char) CR_CHAN(insn->chanspec);
-
- if (b_InputChannel <= 2) {
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus =
- (unsigned char) (((dw_Status >>
- b_InputChannel)
- & 1) ^ 1);
- } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */
- else {
- /***************************************/
- /* The selected digital input is wrong */
- /***************************************/
-
- DPRINTK("The selected digital input is wrong\n");
- i_ReturnValue = -4;
- } /* if ((b_InputChannel >= 0) && (b_InputChannel <= 2)) */
-
- break;
-
- case APCI1710_CHRONO_READ_PORT:
-
- pb_PortValue = (unsigned char *) &data[0];
-
- dw_Status =
- inl(devpriv->s_BoardInfos.
- ui_Address + 12 +
- (64 * b_ModulNbr));
-
- *pb_PortValue =
- (unsigned char) ((dw_Status & 0x7) ^ 7);
- break;
- }
- } else {
- /*******************************/
- /* Chronometer not initialised */
- /*******************************/
-
- DPRINTK("Chronometer not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a Chronometer module */
- /******************************************/
-
- DPRINTK("The module is not a Chronometer module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c
deleted file mode 100644
index 27de18e7989..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c
+++ /dev/null
@@ -1,1037 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : DIG_IO.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 digital I/O module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 16/06/98 | S. Weber | Digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-/* Digital Output ON or OFF */
-#define APCI1710_ON 1
-#define APCI1710_OFF 0
-
-/* Digital I/O */
-#define APCI1710_INPUT 0
-#define APCI1710_OUTPUT 1
-
-#define APCI1710_DIGIO_MEMORYONOFF 0x10
-#define APCI1710_DIGIO_INIT 0x11
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev, |
-| struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)|
-+----------------------------------------------------------------------------+
-| Task : Configure the digital I/O operating mode from selected |
-| module (b_ModulNbr). You must calling this function be|
-| for you call any other function witch access of digital|
-| I/O. |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-| unsigned char_ b_ModulNbr data[0]: Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_ChannelAMode data[1] : Channel A mode selection |
-| 0 : Channel used for digital |
-| input |
-| 1 : Channel used for digital |
-| output |
-| unsigned char_ b_ChannelBMode data[2] : Channel B mode selection |
-| 0 : Channel used for digital |
-| input |
-| 1 : Channel used for digital |
-| output |
- data[0] memory on/off
-Activates and deactivates the digital output memory.
- After having |
-| called up this function with memory on,the output you have previously|
-| activated with the function are not reset
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Bi-directional channel A configuration error |
-| -5: Bi-directional channel B configuration error |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulNbr, b_ChannelAMode, b_ChannelBMode;
- unsigned char b_MemoryOnOff, b_ConfigType;
- int i_ReturnValue = 0;
- unsigned int dw_WriteConfig = 0;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_ConfigType = (unsigned char) data[0]; /* Memory or Init */
- b_ChannelAMode = (unsigned char) data[1];
- b_ChannelBMode = (unsigned char) data[2];
- b_MemoryOnOff = (unsigned char) data[1]; /* if memory operation */
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr >= 4) {
- DPRINTK("Module Number invalid\n");
- i_ReturnValue = -2;
- return i_ReturnValue;
- }
- switch (b_ConfigType) {
- case APCI1710_DIGIO_MEMORYONOFF:
-
- if (b_MemoryOnOff) /* If Memory ON */
- {
- /****************************/
- /* Set the output memory on */
- /****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_OutputMemoryEnabled = 1;
-
- /***************************/
- /* Clear the output memory */
- /***************************/
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.dw_OutputMemory = 0;
- } else /* If memory off */
- {
- /*****************************/
- /* Set the output memory off */
- /*****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_OutputMemoryEnabled = 0;
- }
- break;
-
- case APCI1710_DIGIO_INIT:
-
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
-
- /***************************************************/
- /* Test the bi-directional channel A configuration */
- /***************************************************/
-
- if ((b_ChannelAMode == 0) || (b_ChannelAMode == 1)) {
- /***************************************************/
- /* Test the bi-directional channel B configuration */
- /***************************************************/
-
- if ((b_ChannelBMode == 0)
- || (b_ChannelBMode == 1)) {
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit =
- 1;
-
- /********************************/
- /* Save channel A configuration */
- /********************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode = b_ChannelAMode;
-
- /********************************/
- /* Save channel B configuration */
- /********************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode = b_ChannelBMode;
-
- /*****************************************/
- /* Set the channel A and B configuration */
- /*****************************************/
-
- dw_WriteConfig =
- (unsigned int) (b_ChannelAMode |
- (b_ChannelBMode * 2));
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(dw_WriteConfig,
- devpriv->s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModulNbr));
-
- } else {
- /************************************************/
- /* Bi-directional channel B configuration error */
- /************************************************/
- DPRINTK("Bi-directional channel B configuration error\n");
- i_ReturnValue = -5;
- }
-
- } else {
- /************************************************/
- /* Bi-directional channel A configuration error */
- /************************************************/
- DPRINTK("Bi-directional channel A configuration error\n");
- i_ReturnValue = -4;
-
- }
-
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
- DPRINTK("The module is not a digital I/O module\n");
- i_ReturnValue = -3;
- }
- } /* end of Switch */
- printk("Return Value %d\n", i_ReturnValue);
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-
-|INT i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev,comedi_subdevice
-*s, struct comedi_insn *insn,unsigned int *data)
-
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected digital I/O digital input|
-| (b_InputChannel) |
-+----------------------------------------------------------------------------|
-
-
-|
-| unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_InputChannel CR_CHAN(chanspec) : Selection from digital |
-| input ( 0 to 6) |
-| 0 : Channel C |
-| 1 : Channel D |
-| 2 : Channel E |
-| 3 : Channel F |
-| 4 : Channel G |
-| 5 : Channel A |
-| 6 : Channel B
-
-
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0] : Digital input channel |
-| status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: The selected digital I/O digital input is wrong |
-| -5: Digital I/O not initialised |
-| -6: The digital channel A is used for output |
-| -7: The digital channel B is used for output |
-+----------------------------------------------------------------------------+
-*/
-
-/* _INT_ i_APCI1710_ReadDigitalIOChlValue (unsigned char_ b_BoardHandle, */
-/*
-* unsigned char_ b_ModulNbr, unsigned char_ b_InputChannel,
-* unsigned char *_ pb_ChannelStatus)
-*/
-static int i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr, b_InputChannel;
- unsigned char *pb_ChannelStatus;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec);
- data[0] = 0;
- pb_ChannelStatus = (unsigned char *) &data[0];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- if (b_InputChannel <= 6) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /**********************************/
- /* Test if channel A or channel B */
- /**********************************/
-
- if (b_InputChannel > 4) {
- /*********************/
- /* Test if channel A */
- /*********************/
-
- if (b_InputChannel == 5) {
- /***************************/
- /* Test the channel A mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode
- != 0) {
- /********************************************/
- /* The digital channel A is used for output */
- /********************************************/
-
- i_ReturnValue =
- -6;
- }
- } /* if (b_InputChannel == 5) */
- else {
- /***************************/
- /* Test the channel B mode */
- /***************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode
- != 0) {
- /********************************************/
- /* The digital channel B is used for output */
- /********************************************/
-
- i_ReturnValue =
- -7;
- }
- } /* if (b_InputChannel == 5) */
- } /* if (b_InputChannel > 4) */
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
-/*
-* INPDW (ps_APCI1710Variable-> s_Board [b_BoardHandle].
-* s_BoardInfos. ui_Address + (64 * b_ModulNbr), &dw_StatusReg);
-*/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus =
- (unsigned char) ((dw_StatusReg ^
- 0x1C) >>
- b_InputChannel) & 1;
-
- } /* if (i_ReturnValue == 0) */
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
- DPRINTK("Digital I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
- DPRINTK("The module is not a digital I/O module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnWriteDigitalIOChlOnOff(comedi_device
-|*dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)
-
-+----------------------------------------------------------------------------+
-| Task : Sets or resets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting |
-| an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr (aref ) : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel (CR_CHAN) : Selection from digital output |
-| channel (0 to 2) |
-| 0 : Channel H |
-| 1 : Channel A |
-| 2 : Channel B |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: The selected digital output is wrong |
-| -5: digital I/O not initialised see function |
-| " i_APCI1710_InitDigitalIO" |
-| -6: The digital channel A is used for input |
-| -7: The digital channel B is used for input
- -8: Digital Output Memory OFF. |
-| Use previously the function |
-| "i_APCI1710_SetDigitalIOMemoryOn". |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-* _INT_ i_APCI1710_SetDigitalIOChlOn (unsigned char_ b_BoardHandle,
-* unsigned char_ b_ModulNbr, unsigned char_ b_OutputChannel)
-*/
-static int i_APCI1710_InsnWriteDigitalIOChlOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_WriteValue = 0;
- unsigned char b_ModulNbr, b_OutputChannel;
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel = CR_CHAN(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /******************************************/
- /* Test the digital output channel number */
- /******************************************/
-
- switch (b_OutputChannel) {
- /*************/
- /* Channel H */
- /*************/
-
- case 0:
- break;
-
- /*************/
- /* Channel A */
- /*************/
-
- case 1:
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode != 1) {
- /*******************************************/
- /* The digital channel A is used for input */
- /*******************************************/
-
- i_ReturnValue = -6;
- }
- break;
-
- /*************/
- /* Channel B */
- /*************/
-
- case 2:
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode != 1) {
- /*******************************************/
- /* The digital channel B is used for input */
- /*******************************************/
-
- i_ReturnValue = -7;
- }
- break;
-
- default:
- /****************************************/
- /* The selected digital output is wrong */
- /****************************************/
-
- i_ReturnValue = -4;
- break;
- }
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
-
- /*********************************/
- /* Test if set channel ON */
- /*********************************/
- if (data[0]) {
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled ==
- 1) {
- dw_WriteValue =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- | (1 <<
- b_OutputChannel);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- = dw_WriteValue;
- } else {
- dw_WriteValue =
- 1 <<
- b_OutputChannel;
- }
- } /* set channel off */
- else {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled ==
- 1) {
- dw_WriteValue =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- & (0xFFFFFFFFUL
- -
- (1 << b_OutputChannel));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- = dw_WriteValue;
- } else {
- /*****************************/
- /* Digital Output Memory OFF */
- /*****************************/
- /* +Use previously the function "i_APCI1710_SetDigitalIOMemoryOn" */
- i_ReturnValue = -8;
- }
-
- }
- /*******************/
- /* Write the value */
- /*******************/
-
- /* OUTPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos. ui_Address + (64 * b_ModulNbr),
- * dw_WriteValue);
- */
-*/
- outl(dw_WriteValue,
- devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- }
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-
-|INT i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev,comedi_subdevice
- *s, struct comedi_insn *insn,unsigned int *data)
-+----------------------------------------------------------------------------+
-| Task : write:
- Sets or resets one or several outputs from port. |
-| Setting an output means setting an output high. |
-| If you have switched OFF the digital output memory |
-| (OFF), all the other output are set to "0".
-
-| read:
- Read the status from digital input port |
-| from selected digital I/O module (b_ModulNbr)
-+----------------------------------------------------------------------------+
-| Input Parameters :
- unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr CR_AREF(aref) : Selected module number (0 to 3)|
-| unsigned char_ b_PortValue CR_CHAN(chanspec) : Output Value ( 0 To 7 )
-| data[0] read or write port
-| data[1] if write then indicate ON or OFF
-
-| if read : data[1] will return port status.
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value :
-
-| INPUT :
-
- 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Digital I/O not initialised
-
- OUTPUT: 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a digital I/O module |
-| -4: Output value wrong |
-| -5: digital I/O not initialised see function |
-| " i_APCI1710_InitDigitalIO" |
-| -6: The digital channel A is used for input |
-| -7: The digital channel B is used for input
- -8: Digital Output Memory OFF. |
-| Use previously the function |
-| "i_APCI1710_SetDigitalIOMemoryOn". |
-+----------------------------------------------------------------------------+
-*/
-
-/*
- * _INT_ i_APCI1710_SetDigitalIOPortOn (unsigned char_
- * b_BoardHandle, unsigned char_ b_ModulNbr, unsigned char_
- * b_PortValue)
-*/
-static int i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_WriteValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr, b_PortValue;
- unsigned char b_PortOperation, b_PortOnOFF;
-
- unsigned char *pb_PortValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PortOperation = (unsigned char) data[0]; /* Input or output */
- b_PortOnOFF = (unsigned char) data[1]; /* if output then On or Off */
- b_PortValue = (unsigned char) data[2]; /* if out put then Value */
- i_ReturnValue = insn->n;
- pb_PortValue = (unsigned char *) &data[0];
-/* if input then read value */
-
- switch (b_PortOperation) {
- case APCI1710_INPUT:
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- /* INPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos.
- * ui_Address + (64 * b_ModulNbr),
- * &dw_StatusReg);
- */
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- *pb_PortValue =
- (unsigned char) (dw_StatusReg ^ 0x1C);
-
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -4;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
-
- break;
-
- case APCI1710_OUTPUT:
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if digital I/O counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_DIGITAL_IO) {
- /**********************************************/
- /* Test if the digital I/O module initialised */
- /**********************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_DigitalIOInfo.b_DigitalInit == 1) {
- /***********************/
- /* Test the port value */
- /***********************/
-
- if (b_PortValue <= 7) {
- /***********************************/
- /* Test the digital output channel */
- /***********************************/
-
- /**************************/
- /* Test if channel A used */
- /**************************/
-
- if ((b_PortValue & 2) == 2) {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelAMode
- != 1) {
- /*******************************************/
- /* The digital channel A is used for input */
- /*******************************************/
-
- i_ReturnValue =
- -6;
- }
- } /* if ((b_PortValue & 2) == 2) */
-
- /**************************/
- /* Test if channel B used */
- /**************************/
-
- if ((b_PortValue & 4) == 4) {
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_ChannelBMode
- != 1) {
- /*******************************************/
- /* The digital channel B is used for input */
- /*******************************************/
-
- i_ReturnValue =
- -7;
- }
- } /* if ((b_PortValue & 4) == 4) */
-
- /***********************/
- /* Test if error occur */
- /***********************/
-
- if (i_ReturnValue >= 0) {
-
- /* if(data[1]) { */
-
- switch (b_PortOnOFF) {
- /*********************************/
- /* Test if set Port ON */
- /*********************************/
-
- case APCI1710_ON:
-
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled
- == 1) {
- dw_WriteValue
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- |
- b_PortValue;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- =
- dw_WriteValue;
- } else {
- dw_WriteValue
- =
- b_PortValue;
- }
- break;
-
- /* If Set PORT OFF */
- case APCI1710_OFF:
-
- /*********************************/
- /* Test if output memory enabled */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- b_OutputMemoryEnabled
- == 1) {
- dw_WriteValue
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- &
- (0xFFFFFFFFUL
- -
- b_PortValue);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_DigitalIOInfo.
- dw_OutputMemory
- =
- dw_WriteValue;
- } else {
- /*****************************/
- /* Digital Output Memory OFF */
- /*****************************/
-
- i_ReturnValue
- =
- -8;
- }
- } /* switch */
-
- /*******************/
- /* Write the value */
- /*******************/
-
- /* OUTPDW (ps_APCI1710Variable->
- * s_Board [b_BoardHandle].
- * s_BoardInfos.
- * ui_Address + (64 * b_ModulNbr),
- * dw_WriteValue); */
-
- outl(dw_WriteValue,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- }
- } else {
- /**********************/
- /* Output value wrong */
- /**********************/
-
- i_ReturnValue = -4;
- }
- } else {
- /*******************************/
- /* Digital I/O not initialised */
- /*******************************/
-
- i_ReturnValue = -5;
- }
- } else {
- /******************************************/
- /* The module is not a digital I/O module */
- /******************************************/
-
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- i_ReturnValue = -2;
- }
- break;
-
- default:
- i_ReturnValue = -9;
- DPRINTK("NO INPUT/OUTPUT specified\n");
- } /* switch INPUT / OUTPUT */
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c
deleted file mode 100644
index c9db601da2c..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_INCCPT.c
+++ /dev/null
@@ -1,5461 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : INC_CPT.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 incremental counter module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | 29/06/01 | Guinot C. | - 1100/0231 -> 0701/0232 |
- | | | See i_APCI1710_DisableFrequencyMeasurement |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_16BIT_COUNTER 0x10
-#define APCI1710_32BIT_COUNTER 0x0
-#define APCI1710_QUADRUPLE_MODE 0x0
-#define APCI1710_DOUBLE_MODE 0x3
-#define APCI1710_SIMPLE_MODE 0xF
-#define APCI1710_DIRECT_MODE 0x80
-#define APCI1710_HYSTERESIS_ON 0x60
-#define APCI1710_HYSTERESIS_OFF 0x0
-#define APCI1710_INCREMENT 0x60
-#define APCI1710_DECREMENT 0x0
-#define APCI1710_LATCH_COUNTER 0x1
-#define APCI1710_CLEAR_COUNTER 0x0
-#define APCI1710_LOW 0x0
-#define APCI1710_HIGH 0x1
-
-/*********************/
-/* Version 0600-0229 */
-/*********************/
-#define APCI1710_HIGH_EDGE_CLEAR_COUNTER 0x0
-#define APCI1710_HIGH_EDGE_LATCH_COUNTER 0x1
-#define APCI1710_LOW_EDGE_CLEAR_COUNTER 0x2
-#define APCI1710_LOW_EDGE_LATCH_COUNTER 0x3
-#define APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER 0x4
-#define APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER 0x5
-#define APCI1710_SOURCE_0 0x0
-#define APCI1710_SOURCE_1 0x1
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_ENABLE_LATCH_INT 0x80
-#define APCI1710_DISABLE_LATCH_INT (~APCI1710_ENABLE_LATCH_INT)
-
-#define APCI1710_INDEX_LATCH_COUNTER 0x10
-#define APCI1710_INDEX_AUTO_MODE 0x8
-#define APCI1710_ENABLE_INDEX 0x4
-#define APCI1710_DISABLE_INDEX (~APCI1710_ENABLE_INDEX)
-#define APCI1710_ENABLE_LATCH_AND_CLEAR 0x8
-#define APCI1710_DISABLE_LATCH_AND_CLEAR (~APCI1710_ENABLE_LATCH_AND_CLEAR)
-#define APCI1710_SET_LOW_INDEX_LEVEL 0x4
-#define APCI1710_SET_HIGH_INDEX_LEVEL (~APCI1710_SET_LOW_INDEX_LEVEL)
-#define APCI1710_INVERT_INDEX_RFERENCE 0x2
-#define APCI1710_DEFAULT_INDEX_RFERENCE (~APCI1710_INVERT_INDEX_RFERENCE)
-
-#define APCI1710_ENABLE_INDEX_INT 0x1
-#define APCI1710_DISABLE_INDEX_INT (~APCI1710_ENABLE_INDEX_INT)
-
-#define APCI1710_ENABLE_FREQUENCY 0x4
-#define APCI1710_DISABLE_FREQUENCY (~APCI1710_ENABLE_FREQUENCY)
-
-#define APCI1710_ENABLE_FREQUENCY_INT 0x8
-#define APCI1710_DISABLE_FREQUENCY_INT (~APCI1710_ENABLE_FREQUENCY_INT)
-
-#define APCI1710_ENABLE_40MHZ_FREQUENCY 0x40
-#define APCI1710_DISABLE_40MHZ_FREQUENCY (~APCI1710_ENABLE_40MHZ_FREQUENCY)
-
-#define APCI1710_ENABLE_40MHZ_FILTER 0x80
-#define APCI1710_DISABLE_40MHZ_FILTER (~APCI1710_ENABLE_40MHZ_FILTER)
-
-#define APCI1710_ENABLE_COMPARE_INT 0x2
-#define APCI1710_DISABLE_COMPARE_INT (~APCI1710_ENABLE_COMPARE_INT)
-
-#define APCI1710_ENABLE_INDEX_ACTION 0x20
-#define APCI1710_DISABLE_INDEX_ACTION (~APCI1710_ENABLE_INDEX_ACTION)
-#define APCI1710_REFERENCE_HIGH 0x40
-#define APCI1710_REFERENCE_LOW (~APCI1710_REFERENCE_HIGH)
-
-#define APCI1710_TOR_GATE_LOW 0x40
-#define APCI1710_TOR_GATE_HIGH (~APCI1710_TOR_GATE_LOW)
-
-/* INSN CONFIG */
-#define APCI1710_INCCPT_INITCOUNTER 100
-#define APCI1710_INCCPT_COUNTERAUTOTEST 101
-#define APCI1710_INCCPT_INITINDEX 102
-#define APCI1710_INCCPT_INITREFERENCE 103
-#define APCI1710_INCCPT_INITEXTERNALSTROBE 104
-#define APCI1710_INCCPT_INITCOMPARELOGIC 105
-#define APCI1710_INCCPT_INITFREQUENCYMEASUREMENT 106
-
-/* INSN READ */
-#define APCI1710_INCCPT_READLATCHREGISTERSTATUS 200
-#define APCI1710_INCCPT_READLATCHREGISTERVALUE 201
-#define APCI1710_INCCPT_READ16BITCOUNTERVALUE 202
-#define APCI1710_INCCPT_READ32BITCOUNTERVALUE 203
-#define APCI1710_INCCPT_GETINDEXSTATUS 204
-#define APCI1710_INCCPT_GETREFERENCESTATUS 205
-#define APCI1710_INCCPT_GETUASSTATUS 206
-#define APCI1710_INCCPT_GETCBSTATUS 207
-#define APCI1710_INCCPT_GET16BITCBSTATUS 208
-#define APCI1710_INCCPT_GETUDSTATUS 209
-#define APCI1710_INCCPT_GETINTERRUPTUDLATCHEDSTATUS 210
-#define APCI1710_INCCPT_READFREQUENCYMEASUREMENT 211
-#define APCI1710_INCCPT_READINTERRUPT 212
-
-/* INSN BITS */
-#define APCI1710_INCCPT_CLEARCOUNTERVALUE 300
-#define APCI1710_INCCPT_CLEARALLCOUNTERVALUE 301
-#define APCI1710_INCCPT_SETINPUTFILTER 302
-#define APCI1710_INCCPT_LATCHCOUNTER 303
-#define APCI1710_INCCPT_SETINDEXANDREFERENCESOURCE 304
-#define APCI1710_INCCPT_SETDIGITALCHLON 305
-#define APCI1710_INCCPT_SETDIGITALCHLOFF 306
-
-/* INSN WRITE */
-#define APCI1710_INCCPT_ENABLELATCHINTERRUPT 400
-#define APCI1710_INCCPT_DISABLELATCHINTERRUPT 401
-#define APCI1710_INCCPT_WRITE16BITCOUNTERVALUE 402
-#define APCI1710_INCCPT_WRITE32BITCOUNTERVALUE 403
-#define APCI1710_INCCPT_ENABLEINDEX 404
-#define APCI1710_INCCPT_DISABLEINDEX 405
-#define APCI1710_INCCPT_ENABLECOMPARELOGIC 406
-#define APCI1710_INCCPT_DISABLECOMPARELOGIC 407
-#define APCI1710_INCCPT_ENABLEFREQUENCYMEASUREMENT 408
-#define APCI1710_INCCPT_DISABLEFREQUENCYMEASUREMENT 409
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_CounterRange, |
-| unsigned char_ b_FirstCounterModus, |
-| unsigned char_ b_FirstCounterOption, |
-| unsigned char_ b_SecondCounterModus, |
-| unsigned char_ b_SecondCounterOption) |
-+----------------------------------------------------------------------------+
-| Task : Configure the counter operating mode from selected |
-| module (b_ModulNbr). You must calling this function be |
-| for you call any other function witch access of |
-| counters. |
-| |
-| Counter range |
-| ------------- |
-| +------------------------------------+-----------------------------------+ |
-| | Parameter Passed value | Description | |
-| |------------------------------------+-----------------------------------| |
-| |b_ModulNbr APCI1710_16BIT_COUNTER | The module is configured for | |
-| | | two 16-bit counter. | |
-| | | - b_FirstCounterModus and | |
-| | | b_FirstCounterOption | |
-| | | configure the first 16 bit | |
-| | | counter. | |
-| | | - b_SecondCounterModus and | |
-| | | b_SecondCounterOption | |
-| | | configure the second 16 bit | |
-| | | counter. | |
-| |------------------------------------+-----------------------------------| |
-| |b_ModulNbr APCI1710_32BIT_COUNTER | The module is configured for one | |
-| | | 32-bit counter. | |
-| | | - b_FirstCounterModus and | |
-| | | b_FirstCounterOption | |
-| | | configure the 32 bit counter. | |
-| | | - b_SecondCounterModus and | |
-| | | b_SecondCounterOption | |
-| | | are not used and have no | |
-| | | importance. | |
-| +------------------------------------+-----------------------------------+ |
-| |
-| Counter operating mode |
-| ---------------------- |
-| |
-| +--------------------+-------------------------+-------------------------+ |
-| | Parameter | Passed value | Description | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_QUADRUPLE_MODE | In the quadruple mode, | |
-| | or | | the edge analysis | |
-| |b_SecondCounterModus| | circuit generates a | |
-| | | | counting pulse from | |
-| | | | each edge of 2 signals | |
-| | | | which are phase shifted | |
-| | | | in relation to each | |
-| | | | other. | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_DOUBLE_MODE | Functions in the same | |
-| | or | | way as the quadruple | |
-| |b_SecondCounterModus| | mode, except that only | |
-| | | | two of the four edges | |
-| | | | are analysed per | |
-| | | | period | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_SIMPLE_MODE | Functions in the same | |
-| | or | | way as the quadruple | |
-| |b_SecondCounterModus| | mode, except that only | |
-| | | | one of the four edges | |
-| | | | is analysed per | |
-| | | | period. | |
-| |--------------------+-------------------------+-------------------------| |
-| |b_FirstCounterModus | APCI1710_DIRECT_MODE | In the direct mode the | |
-| | or | | both edge analysis | |
-| |b_SecondCounterModus| | circuits are inactive. | |
-| | | | The inputs A, B in the | |
-| | | | 32-bit mode or A, B and | |
-| | | | C, D in the 16-bit mode | |
-| | | | represent, each, one | |
-| | | | clock pulse gate circuit| |
-| | | | There by frequency and | |
-| | | | pulse duration | |
-| | | | measurements can be | |
-| | | | performed. | |
-| +--------------------+-------------------------+-------------------------+ |
-| |
-| |
-| IMPORTANT! |
-| If you have configured the module for two 16-bit counter, a mixed |
-| mode with a counter in quadruple/double/single mode |
-| and the other counter in direct mode is not possible! |
-| |
-| |
-| Counter operating option for quadruple/double/simple mode |
-| --------------------------------------------------------- |
-| |
-| +----------------------+-------------------------+------------------------+|
-| | Parameter | Passed value | Description ||
-| |----------------------+-------------------------+------------------------||
-| |b_FirstCounterOption | APCI1710_HYSTERESIS_ON | In both edge analysis ||
-| | or | | circuits is available ||
-| |b_SecondCounterOption | | one hysteresis circuit.||
-| | | | It suppresses each ||
-| | | | time the first counting||
-| | | | pulse after a change ||
-| | | | of rotation. ||
-| |----------------------+-------------------------+------------------------||
-| |b_FirstCounterOption | APCI1710_HYSTERESIS_OFF | The first counting ||
-| | or | | pulse is not suppress ||
-| |b_SecondCounterOption | | after a change of ||
-| | | | rotation. ||
-| +----------------------+-------------------------+------------------------+|
-| |
-| |
-| IMPORTANT! |
-| This option are only avaible if you have selected the direct mode. |
-| |
-| |
-| Counter operating option for direct mode |
-| ---------------------------------------- |
-| |
-| +----------------------+--------------------+----------------------------+ |
-| | Parameter | Passed value | Description | |
-| |----------------------+--------------------+----------------------------| |
-| |b_FirstCounterOption | APCI1710_INCREMENT | The counter increment for | |
-| | or | | each counting pulse | |
-| |b_SecondCounterOption | | | |
-| |----------------------+--------------------+----------------------------| |
-| |b_FirstCounterOption | APCI1710_DECREMENT | The counter decrement for | |
-| | or | | each counting pulse | |
-| |b_SecondCounterOption | | | |
-| +----------------------+--------------------+----------------------------+ |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_CounterRange : Selection form counter |
-| range. |
-| unsigned char_ b_FirstCounterModus : First counter operating |
-| mode. |
-| unsigned char_ b_FirstCounterOption : First counter option. |
-| unsigned char_ b_SecondCounterModus : Second counter operating |
-| mode. |
-| unsigned char_ b_SecondCounterOption : Second counter option. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module is not a counter module |
-| -3: The selected counter range is wrong. |
-| -4: The selected first counter operating mode is wrong. |
-| -5: The selected first counter operating option is wrong|
-| -6: The selected second counter operating mode is wrong.|
-| -7: The selected second counter operating option is |
-| wrong. |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitCounter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_CounterRange,
- unsigned char b_FirstCounterModus,
- unsigned char b_FirstCounterOption,
- unsigned char b_SecondCounterModus,
- unsigned char b_SecondCounterOption)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /**************************/
- /* Test the counter range */
- /**************************/
-
- if (b_CounterRange == APCI1710_16BIT_COUNTER
- || b_CounterRange == APCI1710_32BIT_COUNTER) {
- /********************************/
- /* Test the first counter modus */
- /********************************/
-
- if (b_FirstCounterModus == APCI1710_QUADRUPLE_MODE ||
- b_FirstCounterModus == APCI1710_DOUBLE_MODE ||
- b_FirstCounterModus == APCI1710_SIMPLE_MODE ||
- b_FirstCounterModus == APCI1710_DIRECT_MODE) {
- /*********************************/
- /* Test the first counter option */
- /*********************************/
-
- if ((b_FirstCounterModus == APCI1710_DIRECT_MODE
- && (b_FirstCounterOption ==
- APCI1710_INCREMENT
- || b_FirstCounterOption
- == APCI1710_DECREMENT))
- || (b_FirstCounterModus !=
- APCI1710_DIRECT_MODE
- && (b_FirstCounterOption ==
- APCI1710_HYSTERESIS_ON
- || b_FirstCounterOption
- ==
- APCI1710_HYSTERESIS_OFF)))
- {
- /**************************/
- /* Test if 16-bit counter */
- /**************************/
-
- if (b_CounterRange ==
- APCI1710_16BIT_COUNTER) {
- /*********************************/
- /* Test the second counter modus */
- /*********************************/
-
- if ((b_FirstCounterModus !=
- APCI1710_DIRECT_MODE
- &&
- (b_SecondCounterModus
- ==
- APCI1710_QUADRUPLE_MODE
- ||
- b_SecondCounterModus
- ==
- APCI1710_DOUBLE_MODE
- ||
- b_SecondCounterModus
- ==
- APCI1710_SIMPLE_MODE))
- || (b_FirstCounterModus
- ==
- APCI1710_DIRECT_MODE
- &&
- b_SecondCounterModus
- ==
- APCI1710_DIRECT_MODE))
- {
- /**********************************/
- /* Test the second counter option */
- /**********************************/
-
- if ((b_SecondCounterModus == APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_INCREMENT || b_SecondCounterOption == APCI1710_DECREMENT)) || (b_SecondCounterModus != APCI1710_DIRECT_MODE && (b_SecondCounterOption == APCI1710_HYSTERESIS_ON || b_SecondCounterOption == APCI1710_HYSTERESIS_OFF))) {
- i_ReturnValue =
- 0;
- } else {
- /*********************************************************/
- /* The selected second counter operating option is wrong */
- /*********************************************************/
-
- DPRINTK("The selected second counter operating option is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /*******************************************************/
- /* The selected second counter operating mode is wrong */
- /*******************************************************/
-
- DPRINTK("The selected second counter operating mode is wrong\n");
- i_ReturnValue = -6;
- }
- }
- } else {
- /********************************************************/
- /* The selected first counter operating option is wrong */
- /********************************************************/
-
- DPRINTK("The selected first counter operating option is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /******************************************************/
- /* The selected first counter operating mode is wrong */
- /******************************************************/
- DPRINTK("The selected first counter operating mode is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************************/
- /* The selected counter range is wrong */
- /***************************************/
-
- DPRINTK("The selected counter range is wrong\n");
- i_ReturnValue = -3;
- }
-
- /*************************/
- /* Test if a error occur */
- /*************************/
-
- if (i_ReturnValue == 0) {
- /**************************/
- /* Test if 16-Bit counter */
- /**************************/
-
- if (b_CounterRange == APCI1710_32BIT_COUNTER) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = b_CounterRange |
- b_FirstCounterModus |
- b_FirstCounterOption;
- } else {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = b_CounterRange |
- (b_FirstCounterModus & 0x5) |
- (b_FirstCounterOption & 0x20) |
- (b_SecondCounterModus & 0xA) |
- (b_SecondCounterOption & 0x40);
-
- /***********************/
- /* Test if direct mode */
- /***********************/
-
- if (b_FirstCounterModus == APCI1710_DIRECT_MODE) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 |
- APCI1710_DIRECT_MODE;
- }
- }
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CounterInit = 1;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_CounterAutoTest |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char *_ pb_TestStatus) |
-+----------------------------------------------------------------------------+
-| Task : A test mode is intended for testing the component and |
-| the connected periphery. All the 8-bit counter chains |
-| are operated internally as down counters. |
-| Independently from the external signals, |
-| all the four 8-bit counter chains are decremented in |
-| parallel by each negative clock pulse edge of CLKX. |
-| |
-| Counter auto test conclusion |
-| ---------------------------- |
-| +-----------------+-----------------------------+ |
-| | pb_TestStatus | Error description | |
-| | mask | | |
-| |-----------------+-----------------------------| |
-| | 0000 | No error detected | |
-| |-----------------|-----------------------------| |
-| | 0001 | Error detected of counter 0 | |
-| |-----------------|-----------------------------| |
-| | 0010 | Error detected of counter 1 | |
-| |-----------------|-----------------------------| |
-| | 0100 | Error detected of counter 2 | |
-| |-----------------|-----------------------------| |
-| | 1000 | Error detected of counter 3 | |
-| +-----------------+-----------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TestStatus : Auto test conclusion. See table|
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_CounterAutoTest(struct comedi_device *dev,
- unsigned char *pb_TestStatus)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulCpt = 0;
- int i_ReturnValue = 0;
- unsigned int dw_LathchValue;
-
- *pb_TestStatus = 0;
-
- /********************************/
- /* Test if counter module found */
- /********************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulCpt] &
- 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /******************/
- /* Start the test */
- /******************/
-
- outl(3, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
-
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulCpt));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- dw_LathchValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulCpt));
-
- if ((dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 8) & 0xFF)
- && (dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 16) & 0xFF)
- && (dw_LathchValue & 0xFF) !=
- ((dw_LathchValue >> 24) & 0xFF)) {
- *pb_TestStatus =
- *pb_TestStatus | (1 <<
- b_ModulCpt);
- }
-
- /*****************/
- /* Stop the test */
- /*****************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
- }
- }
- } else {
- /***************************/
- /* No counter module found */
- /***************************/
-
- DPRINTK("No counter module found\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ReferenceAction, |
-| unsigned char_ b_IndexOperation, |
-| unsigned char_ b_AutoMode, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Initialise the index corresponding to the selected |
-| module (b_ModulNbr). If a INDEX flag occur, you have |
-| the possibility to clear the 32-Bit counter or to latch|
-| the current 32-Bit value in to the first latch |
-| register. The b_IndexOperation parameter give the |
-| possibility to choice the INDEX action. |
-| If you have enabled the automatic mode, each INDEX |
-| action is cleared automatically, else you must read |
-| the index status ("i_APCI1710_ReadIndexStatus") |
-| after each INDEX action. |
-| |
-| |
-| Index action |
-| ------------ |
-| |
-| +------------------------+------------------------------------+ |
-| | b_IndexOperation | Operation | |
-| |------------------------+------------------------------------| |
-| |APCI1710_LATCH_COUNTER | After a index signal, the counter | |
-| | | value (32-Bit) is latched in to | |
-| | | the first latch register | |
-| |------------------------|------------------------------------| |
-| |APCI1710_CLEAR_COUNTER | After a index signal, the counter | |
-| | | value is cleared (32-Bit) | |
-| +------------------------+------------------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ReferenceAction : Determine if the reference |
-| must set or no for the |
-| acceptance from index |
-| APCI1710_ENABLE : |
-| Reference must be set for |
-| accepted the index |
-| APCI1710_DISABLE : |
-| Reference have not |
-| importance |
-| unsigned char_ b_IndexOperation : Index operating mode. |
-| See table. |
-| unsigned char_ b_AutoMode : Enable or disable the |
-| automatic index reset. |
-| APCI1710_ENABLE : |
-| Enable the automatic mode |
-| APCI1710_DISABLE : |
-| Disable the automatic mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| interrupt. |
-| APCI1710_ENABLE : |
-| Enable the interrupt |
-| APCI1710_DISABLE : |
-| Disable the interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4 The reference action parameter is wrong |
-| -5: The index operating mode parameter is wrong |
-| -6: The auto mode parameter is wrong |
-| -7: Interrupt parameter is wrong |
-| -8: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ReferenceAction,
- unsigned char b_IndexOperation,
- unsigned char b_AutoMode,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************/
- /* Test the reference parameter */
- /********************************/
-
- if (b_ReferenceAction == APCI1710_ENABLE ||
- b_ReferenceAction == APCI1710_DISABLE) {
- /****************************/
- /* Test the index parameter */
- /****************************/
-
- if (b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_COUNTER
- || b_IndexOperation ==
- APCI1710_HIGH_EDGE_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
- || b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /********************************/
- /* Test the auto mode parameter */
- /********************************/
-
- if (b_AutoMode == APCI1710_ENABLE ||
- b_AutoMode == APCI1710_DISABLE)
- {
- /***************************/
- /* Test the interrupt mode */
- /***************************/
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE
- || b_InterruptEnable ==
- APCI1710_DISABLE) {
-
- /************************************/
- /* Makte the configuration commando */
- /************************************/
-
- if (b_ReferenceAction ==
- APCI1710_ENABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_ENABLE_INDEX_ACTION;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- APCI1710_DISABLE_INDEX_ACTION;
- }
-
- /****************************************/
- /* Test if low level latch or/and clear */
- /****************************************/
-
- if (b_IndexOperation ==
- APCI1710_LOW_EDGE_LATCH_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_CLEAR_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /*************************************/
- /* Set the index level to low (DQ26) */
- /*************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_SET_LOW_INDEX_LEVEL;
- } else {
- /**************************************/
- /* Set the index level to high (DQ26) */
- /**************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_SET_HIGH_INDEX_LEVEL;
- }
-
- /***********************************/
- /* Test if latch and clear counter */
- /***********************************/
-
- if (b_IndexOperation ==
- APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER
- ||
- b_IndexOperation
- ==
- APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER)
- {
- /***************************************/
- /* Set the latch and clear flag (DQ27) */
- /***************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_LATCH_AND_CLEAR;
- } /* if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
- else {
- /*****************************************/
- /* Clear the latch and clear flag (DQ27) */
- /*****************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_LATCH_AND_CLEAR;
-
- /*************************/
- /* Test if latch counter */
- /*************************/
-
- if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_COUNTER) {
- /*********************************/
- /* Enable the latch from counter */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_INDEX_LATCH_COUNTER;
- } else {
- /*********************************/
- /* Enable the clear from counter */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- (~APCI1710_INDEX_LATCH_COUNTER);
- }
- } /* // if (b_IndexOperation == APCI1710_HIGH_EDGE_LATCH_AND_CLEAR_COUNTER || b_IndexOperation == APCI1710_LOW_EDGE_LATCH_AND_CLEAR_COUNTER) */
-
- if (b_AutoMode ==
- APCI1710_DISABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- |
- APCI1710_INDEX_AUTO_MODE;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2
- &
- (~APCI1710_INDEX_AUTO_MODE);
- }
-
- if (b_InterruptEnable ==
- APCI1710_ENABLE)
- {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- |
- APCI1710_ENABLE_INDEX_INT;
- } else {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- &
- APCI1710_DISABLE_INDEX_INT;
- }
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_IndexInit = 1;
-
- } else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -7;
- }
- } else {
- /************************************/
- /* The auto mode parameter is wrong */
- /************************************/
-
- DPRINTK("The auto mode parameter is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /***********************************************/
- /* The index operating mode parameter is wrong */
- /***********************************************/
-
- DPRINTK("The index operating mode parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************************/
- /* The reference action parameter is wrong */
- /*******************************************/
-
- DPRINTK("The reference action parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitReference |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ReferenceLevel) |
-+----------------------------------------------------------------------------+
-| Task : Initialise the reference corresponding to the selected |
-| module (b_ModulNbr). |
-| |
-| Reference level |
-| --------------- |
-| +--------------------+-------------------------+ |
-| | b_ReferenceLevel | Operation | |
-| +--------------------+-------------------------+ |
-| | APCI1710_LOW | Reference occur if "0" | |
-| |--------------------|-------------------------| |
-| | APCI1710_HIGH | Reference occur if "1" | |
-| +--------------------+-------------------------+ |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ReferenceLevel : Reference level. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number parameter is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Reference level parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitReference(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ReferenceLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************************/
- /* Test the reference level parameter */
- /**************************************/
-
- if (b_ReferenceLevel == 0 || b_ReferenceLevel == 1) {
- if (b_ReferenceLevel == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 |
- APCI1710_REFERENCE_HIGH;
- } else {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_REFERENCE_LOW;
- }
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_ReferenceInit = 1;
- } else {
- /**************************************/
- /* Reference level parameter is wrong */
- /**************************************/
-
- DPRINTK("Reference level parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitExternalStrobe |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_ExternalStrobe, |
-| unsigned char_ b_ExternalStrobeLevel) |
-+----------------------------------------------------------------------------+
-| Task : Initialises the external strobe level corresponding to |
-| the selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_ExternalStrobe : External strobe selection |
-| 0 : External strobe A |
-| 1 : External strobe B |
-| unsigned char_ b_ExternalStrobeLevel : External strobe level |
-| APCI1710_LOW : |
-| External latch occurs if "0" |
-| APCI1710_HIGH : |
-| External latch occurs if "1" |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised. |
-| See function "i_APCI1710_InitCounter" |
-| -4: External strobe selection is wrong |
-| -5: External strobe level parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitExternalStrobe(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_ExternalStrobe,
- unsigned char b_ExternalStrobeLevel)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************************/
- /* Test the external strobe selection */
- /**************************************/
-
- if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) {
- /******************/
- /* Test the level */
- /******************/
-
- if ((b_ExternalStrobeLevel == APCI1710_HIGH) ||
- ((b_ExternalStrobeLevel == APCI1710_LOW
- && (devpriv->
- s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3135))) {
- /*****************/
- /* Set the level */
- /*****************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 = (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 & (0xFF -
- (0x10 << b_ExternalStrobe))) | ((b_ExternalStrobeLevel ^ 1) << (4 + b_ExternalStrobe));
- } else {
- /********************************************/
- /* External strobe level parameter is wrong */
- /********************************************/
-
- DPRINTK("External strobe level parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
- else {
- /**************************************/
- /* External strobe selection is wrong */
- /**************************************/
-
- DPRINTK("External strobe selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_ExternalStrobe == 0 || b_ExternalStrobe == 1) */
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_InitCompareLogic |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned int_ ui_CompareValue) |
- +----------------------------------------------------------------------------+
- | Task : Set the 32-Bit compare value. At that moment that the |
- | incremental counter arrive to the compare value |
- | (ui_CompareValue) a interrupt is generated. |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Module number to configure |
- | (0 to 3) |
- | unsigned int_ ui_CompareValue : 32-Bit compare value |
- +----------------------------------------------------------------------------+
- | Output Parameters : -
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: No counter module found |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_InitCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ui_CompareValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- outl(ui_CompareValue, devpriv->s_BoardInfos.
- ui_Address + 28 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit = 1;
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitFrequencyMeasurement |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnity, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval) |
-+----------------------------------------------------------------------------+
-| Task : Sets the time for the frequency measurement. |
-| Configures the selected TOR incremental counter of the |
-| selected module (b_ModulNbr). The ul_TimingInterval and|
-| ul_TimingUnity determine the time base for the |
-| measurement. The pul_RealTimingInterval returns the |
-| real time value. You must call up this function before |
-| you call up any other function which gives access to |
-| the frequency measurement. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC has a PCI bus clock |
-| of 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC has a PCI bus clock |
-| of 33 MHz |
-| unsigned char_ b_TimingUnity : Base time unit (0 to 2) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| ULONG_ ul_TimingInterval: Base time value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base time value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected PCI input clock is wrong |
-| -5: Timing unity selection is wrong |
-| -6: Base timing selection is wrong |
-| -7: 40MHz quartz not on board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PCIInputClock,
- unsigned char b_TimingUnity,
- unsigned int ul_TimingInterval,
- unsigned int *pul_RealTimingInterval)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- double d_RealTimingInterval;
- unsigned int dw_Status = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnity <= 2) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 266)
- && (ul_TimingInterval <=
- 8738133UL))
- || ((b_PCIInputClock ==
- APCI1710_30MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 8738UL))
- || ((b_PCIInputClock ==
- APCI1710_30MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 8UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 242)
- && (ul_TimingInterval <=
- 7943757UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 7943UL))
- || ((b_PCIInputClock ==
- APCI1710_33MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 7UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 0)
- && (ul_TimingInterval >=
- 200)
- && (ul_TimingInterval <=
- 6553500UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 1)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 6553UL))
- || ((b_PCIInputClock ==
- APCI1710_40MHZ)
- && (b_TimingUnity == 2)
- && (ul_TimingInterval >=
- 1)
- && (ul_TimingInterval <=
- 6UL))) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3135) {
- /*********************************/
- /* Test if 40MHz quartz on board */
- /*********************************/
-
- /*INPDW (ps_APCI1710Variable->
- s_Board [b_BoardHandle].
- s_BoardInfos.
- ui_Address + 36 + (64 * b_ModulNbr), &dw_Status); */
- dw_Status =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- + 36 +
- (64 * b_ModulNbr));
-
- /******************************/
- /* Test the quartz flag (DQ0) */
- /******************************/
-
- if ((dw_Status & 1) != 1) {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
-
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue
- =
- -7;
- }
- } else {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue =
- -7;
- }
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /***************************/
- /* Test if not error occur */
- /***************************/
-
- if (i_ReturnValue == 0) {
- /****************************/
- /* Test the INC_CPT version */
- /****************************/
-
- if ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131) {
-
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- /*********************************/
- /* Enable the 40MHz quarz (DQ30) */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_40MHZ_FREQUENCY;
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
- else {
- /**********************************/
- /* Disable the 40MHz quarz (DQ30) */
- /**********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_40MHZ_FREQUENCY;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /********************************/
- /* Calculate the division fator */
- /********************************/
-
- fpu_begin();
- switch (b_TimingUnity) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.00025 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.00025 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.00025
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.25 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.25 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (250.0
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)*pul_RealTimingInterval + 0.5)) {
- *pul_RealTimingInterval
- =
- *pul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- break;
- }
-
- fpu_end();
- /*************************/
- /* Write the timer value */
- /*************************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 32 + (64 * b_ModulNbr));
-
- /*******************************/
- /* Set the initialisation flag */
- /*******************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementInit
- = 1;
- } else {
- /***************************/
- /* Counter not initialised */
- /***************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue =
- -3;
- }
- } /* if (i_ReturnValue == 0) */
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /***********************************/
- /* Timing unity selection is wrong */
- /***********************************/
-
- DPRINTK("Timing unity selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Configuration function for INC_CPT
- */
-static int i_APCI1710_InsnConfigINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_ConfigType;
- int i_ReturnValue = 0;
-
- ui_ConfigType = CR_CHAN(insn->chanspec);
-
- printk("\nINC_CPT");
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- switch (ui_ConfigType) {
- case APCI1710_INCCPT_INITCOUNTER:
- i_ReturnValue = i_APCI1710_InitCounter(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1],
- (unsigned char) data[2], (unsigned char) data[3], (unsigned char) data[4]);
- break;
-
- case APCI1710_INCCPT_COUNTERAUTOTEST:
- i_ReturnValue = i_APCI1710_CounterAutoTest(dev,
- (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_INITINDEX:
- i_ReturnValue = i_APCI1710_InitIndex(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned char) data[2], (unsigned char) data[3]);
- break;
-
- case APCI1710_INCCPT_INITREFERENCE:
- i_ReturnValue = i_APCI1710_InitReference(dev,
- CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_INITEXTERNALSTROBE:
- i_ReturnValue = i_APCI1710_InitExternalStrobe(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned char) data[1]);
- break;
-
- case APCI1710_INCCPT_INITCOMPARELOGIC:
- i_ReturnValue = i_APCI1710_InitCompareLogic(dev,
- CR_AREF(insn->chanspec), (unsigned int) data[0]);
- break;
-
- case APCI1710_INCCPT_INITFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_InitFrequencyMeasurement(dev,
- CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned int) data[2], (unsigned int *) &data[0]);
- break;
-
- default:
- printk("Insn Config : Config Parameter Wrong\n");
-
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ClearCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Clear the counter value from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number parameter is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ClearCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************/
- /* Clear the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ClearAllCounterValue |
-| (unsigned char_ b_BoardHandle) |
-+----------------------------------------------------------------------------+
-| Task : Clear all counter value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ClearAllCounterValue(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModulCpt = 0;
- int i_ReturnValue = 0;
-
- /********************************/
- /* Test if counter module found */
- /********************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[0] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[1] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[2] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER
- || (devpriv->s_BoardInfos.
- dw_MolduleConfiguration[3] & 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- for (b_ModulCpt = 0; b_ModulCpt < 4; b_ModulCpt++) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulCpt] &
- 0xFFFF0000UL) ==
- APCI1710_INCREMENTAL_COUNTER) {
- /*********************/
- /* Clear the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulCpt));
- }
- }
- } else {
- /***************************/
- /* No counter module found */
- /***************************/
-
- DPRINTK("No counter module found\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetInputFilter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_Module, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_Filter) |
-+----------------------------------------------------------------------------+
-| Task : Disable or enable the software filter from selected |
-| module (b_ModulNbr). b_Filter determine the filter time|
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-| unsigned char_ b_PCIInputClock : Selection of the PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC has a PCI bus clock |
-| of 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC has a PCI bus clock |
-| of 33 MHz |
-| - APCI1710_40MHZ : |
-| The APCI1710 has a 40MHz |
-| quartz |
-| unsigned char_ b_Filter : Filter selection |
-| |
-| 30 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 266ns (3.750000MHz) |
-| 2: Filter from 400ns (2.500000MHz) |
-| 3: Filter from 533ns (1.876170MHz) |
-| 4: Filter from 666ns (1.501501MHz) |
-| 5: Filter from 800ns (1.250000MHz) |
-| 6: Filter from 933ns (1.071800MHz) |
-| 7: Filter from 1066ns (0.938080MHz) |
-| 8: Filter from 1200ns (0.833333MHz) |
-| 9: Filter from 1333ns (0.750000MHz) |
-| 10: Filter from 1466ns (0.682100MHz) |
-| 11: Filter from 1600ns (0.625000MHz) |
-| 12: Filter from 1733ns (0.577777MHz) |
-| 13: Filter from 1866ns (0.535900MHz) |
-| 14: Filter from 2000ns (0.500000MHz) |
-| 15: Filter from 2133ns (0.468800MHz) |
-| |
-| 33 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 242ns (4.125000MHz) |
-| 2: Filter from 363ns (2.754820MHz) |
-| 3: Filter from 484ns (2.066115MHz) |
-| 4: Filter from 605ns (1.652892MHz) |
-| 5: Filter from 726ns (1.357741MHz) |
-| 6: Filter from 847ns (1.180637MHz) |
-| 7: Filter from 968ns (1.033055MHz) |
-| 8: Filter from 1089ns (0.918273MHz) |
-| 9: Filter from 1210ns (0.826446MHz) |
-| 10: Filter from 1331ns (0.751314MHz) |
-| 11: Filter from 1452ns (0.688705MHz) |
-| 12: Filter from 1573ns (0.635727MHz) |
-| 13: Filter from 1694ns (0.590318MHz) |
-| 14: Filter from 1815ns (0.550964MHz) |
-| 15: Filter from 1936ns (0.516528MHz) |
-| |
-| 40 MHz |
-| ------ |
-| 0: Software filter not used |
-| 1: Filter from 200ns (5.000000MHz) |
-| 2: Filter from 300ns (3.333333MHz) |
-| 3: Filter from 400ns (2.500000MHz) |
-| 4: Filter from 500ns (2.000000MHz) |
-| 5: Filter from 600ns (1.666666MHz) |
-| 6: Filter from 700ns (1.428500MHz) |
-| 7: Filter from 800ns (1.250000MHz) |
-| 8: Filter from 900ns (1.111111MHz) |
-| 9: Filter from 1000ns (1.000000MHz) |
-| 10: Filter from 1100ns (0.909090MHz) |
-| 11: Filter from 1200ns (0.833333MHz) |
-| 12: Filter from 1300ns (0.769200MHz) |
-| 13: Filter from 1400ns (0.714200MHz) |
-| 14: Filter from 1500ns (0.666666MHz) |
-| 15: Filter from 1600ns (0.625000MHz) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: The module is not a counter module |
-| -4: The selected PCI input clock is wrong |
-| -5: The selected filter value is wrong |
-| -6: 40MHz quartz not on board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetInputFilter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PCIInputClock,
- unsigned char b_Filter)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF) >= 0x3135) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ)) {
- /*************************/
- /* Test the filter value */
- /*************************/
-
- if (b_Filter < 16) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ) {
- /*********************************/
- /* Test if 40MHz quartz on board */
- /*********************************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- 36 +
- (64 * b_ModulNbr));
-
- /******************************/
- /* Test the quartz flag (DQ0) */
- /******************************/
-
- if ((dw_Status & 1) !=
- 1) {
- /*****************************/
- /* 40MHz quartz not on board */
- /*****************************/
-
- DPRINTK("40MHz quartz not on board\n");
- i_ReturnValue =
- -6;
- }
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /***************************/
- /* Test if error not occur */
- /***************************/
-
- if (i_ReturnValue == 0) {
- /**********************/
- /* Test if 40MHz used */
- /**********************/
-
- if (b_PCIInputClock ==
- APCI1710_40MHZ)
- {
- /*********************************/
- /* Enable the 40MHz quarz (DQ31) */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- |
- APCI1710_ENABLE_40MHZ_FILTER;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
- else {
- /**********************************/
- /* Disable the 40MHz quarz (DQ31) */
- /**********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- &
- APCI1710_DISABLE_40MHZ_FILTER;
-
- } /* if (b_PCIInputClock == APCI1710_40MHZ) */
-
- /************************/
- /* Set the filter value */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3
- & 0x1F) |
- ((b_Filter &
- 0x7) <<
- 5);
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4
- & 0xFE) |
- ((b_Filter &
- 0x8) >>
- 3);
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
- } /* if (i_ReturnValue == 0) */
- } /* if (b_Filter < 16) */
- else {
- /**************************************/
- /* The selected filter value is wrong */
- /**************************************/
-
- DPRINTK("The selected filter value is wrong\n");
- i_ReturnValue = -5;
- } /* if (b_Filter < 16) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = 4;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ) || (b_PCIInputClock == APCI1710_40MHZ)) */
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_LatchCounter (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg) |
-+----------------------------------------------------------------------------+
-| Task : Latch the courant value from selected module |
-| (b_ModulNbr) in to the selected latch register |
-| (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_LatchCounter(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1 << (b_LatchReg * 4),
- devpriv->s_BoardInfos.ui_Address +
- (64 * b_ModulNbr));
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetIndexAndReferenceSource |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SourceSelection) |
-+----------------------------------------------------------------------------+
-| Task : Determine the hardware source for the index and the |
-| reference logic. Per default the index logic is |
-| connected to the difference input C and the reference |
-| logic is connected to the 24V input E |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SourceSelection : APCI1710_SOURCE_0 : |
-| The index logic is connected |
-| to the difference input C and|
-| the reference logic is |
-| connected to the 24V input E.|
-| This is the default |
-| configuration. |
-| APCI1710_SOURCE_1 : |
-| The reference logic is |
-| connected to the difference |
-| input C and the index logic |
-| is connected to the 24V |
-| input E |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: The module is not a counter module. |
-| -4: The source selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetIndexAndReferenceSource(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SourceSelection)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if incremental counter */
- /*******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
- /******************************/
- /* Test if firmware >= Rev1.5 */
- /******************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF) >= 0x3135) {
- /*****************************/
- /* Test the source selection */
- /*****************************/
-
- if (b_SourceSelection == APCI1710_SOURCE_0 ||
- b_SourceSelection == APCI1710_SOURCE_1)
- {
- /******************************************/
- /* Test if invert the index and reference */
- /******************************************/
-
- if (b_SourceSelection ==
- APCI1710_SOURCE_1) {
- /********************************************/
- /* Invert index and reference source (DQ25) */
- /********************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 |
- APCI1710_INVERT_INDEX_RFERENCE;
- } else {
- /****************************************/
- /* Set the default configuration (DQ25) */
- /****************************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister4 &
- APCI1710_DEFAULT_INDEX_RFERENCE;
- }
- } /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
- else {
- /*********************************/
- /* The source selection is wrong */
- /*********************************/
-
- DPRINTK("The source selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_SourceSelection == APCI1710_SOURCE_0 ||b_SourceSelection == APCI1710_SOURCE_1) */
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /**************************************/
- /* The module is not a counter module */
- /**************************************/
-
- DPRINTK("The module is not a counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***************************************/
- /* The selected module number is wrong */
- /***************************************/
-
- DPRINTK("The selected module number is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetDigitalChlOn |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Sets the digital output H Setting an output means |
-| setting an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetDigitalChlOn(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.b_ModeRegister3 | 0x10;
-
- /*********************/
- /* Set the output On */
- /*********************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetDigitalChlOff |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Resets the digital output H. Resetting an output means |
-| setting an ouput low. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Number of the module to be |
-| configured (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The selected module number is wrong |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetDigitalChlOff(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.b_ModeRegister3 & 0xEF;
-
- /**********************/
- /* Set the output Off */
- /**********************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Set & Clear Functions for INC_CPT
- */
-static int i_APCI1710_InsnBitsINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_BitsType;
- int i_ReturnValue = 0;
-
- ui_BitsType = CR_CHAN(insn->chanspec);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- switch (ui_BitsType) {
- case APCI1710_INCCPT_CLEARCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_ClearCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_CLEARALLCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_ClearAllCounterValue(dev);
- break;
-
- case APCI1710_INCCPT_SETINPUTFILTER:
- i_ReturnValue = i_APCI1710_SetInputFilter(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned char) data[1]);
- break;
-
- case APCI1710_INCCPT_LATCHCOUNTER:
- i_ReturnValue = i_APCI1710_LatchCounter(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_SETINDEXANDREFERENCESOURCE:
- i_ReturnValue = i_APCI1710_SetIndexAndReferenceSource(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_SETDIGITALCHLON:
- i_ReturnValue = i_APCI1710_SetDigitalChlOn(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_SETDIGITALCHLOFF:
- i_ReturnValue = i_APCI1710_SetDigitalChlOff(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- default:
- printk("Bits Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableLatchInterrupt |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the latch interrupt from selected module |
-| (b_ModulNbr). Each software or hardware latch occur a |
-| interrupt. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt routine not installed see function |
-| "i_APCI1710_SetBoardIntRoutine" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableLatchInterrupt(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- /********************/
- /* Enable interrupt */
- /********************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 | APCI1710_ENABLE_LATCH_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4, devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableLatchInterrupt |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the latch interrupt from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt routine not installed see function |
-| "i_APCI1710_SetBoardIntRoutine" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableLatchInterrupt(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4 &
- ((APCI1710_DISABLE_LATCH_INT << 8) | 0xFF),
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- mdelay(1000);
-
- /*********************/
- /* Disable interrupt */
- /*********************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 & APCI1710_DISABLE_LATCH_INT;
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Write16BitCounterValue |
-| (unsigned char_ b_BoardHandle |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedCounter, |
-| unsigned int_ ui_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write a 16-Bit value (ui_WriteValue) in to the selected|
-| 16-Bit counter (b_SelectedCounter) from selected module|
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
-| (0 or 1) |
-| unsigned int_ ui_WriteValue : 16-Bit write value |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected 16-Bit counter parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Write16BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SelectedCounter,
- unsigned int ui_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /******************************/
- /* Test the counter selection */
- /******************************/
-
- if (b_SelectedCounter < 2) {
- /*******************/
- /* Write the value */
- /*******************/
-
- outl((unsigned int) ((unsigned int) (ui_WriteValue) << (16 *
- b_SelectedCounter)),
- devpriv->s_BoardInfos.ui_Address + 8 +
- (b_SelectedCounter * 4) +
- (64 * b_ModulNbr));
- } else {
- /**************************************************/
- /* The selected 16-Bit counter parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected 16-Bit counter parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Write32BitCounterValue |
-| (unsigned char_ b_BoardHandle |
-| unsigned char_ b_ModulNbr, |
-| ULONG_ ul_WriteValue) |
-+----------------------------------------------------------------------------+
-| Task : Write a 32-Bit value (ui_WriteValue) in to the selected|
-| module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| ULONG_ ul_WriteValue : 32-Bit write value |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Write32BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int ul_WriteValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*******************/
- /* Write the value */
- /*******************/
-
- outl(ul_WriteValue, devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the INDEX actions |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_InterruptLatchReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 | APCI1710_ENABLE_INDEX;
-
- ul_InterruptLatchReg =
- inl(devpriv->s_BoardInfos.ui_Address +
- 24 + (64 * b_ModulNbr));
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised \n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableIndex (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the INDEX actions |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableIndex(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_DISABLE_INDEX;
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised \n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableCompareLogic |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Enable the 32-Bit compare logic. At that moment that |
-| the incremental counter arrive to the compare value a |
-| interrupt is generated. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Compare logic not initialised. |
-| See function "i_APCI1710_InitCompareLogic" |
-| -5: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnableCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test if compare logic initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 |
- APCI1710_ENABLE_COMPARE_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*********************************/
- /* Compare logic not initialised */
- /*********************************/
-
- DPRINTK("Compare logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableCompareLogic |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Task : Disable the 32-Bit compare logic.
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : -
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Compare logic not initialised. |
-| See function "i_APCI1710_InitCompareLogic" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisableCompareLogic(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test if compare logic initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_CompareLogicInit == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_COMPARE_INT;
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- } else {
- /*********************************/
- /* Compare logic not initialised */
- /*********************************/
-
- DPRINTK("Compare logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_EnableFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char_ b_InterruptEnable) |
- +----------------------------------------------------------------------------+
- | Task : Enables the frequency measurement function |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- | unsigned char_ b_InterruptEnable: Enable or disable the |
- | interrupt. |
- | APCI1710_ENABLE: |
- | Enable the interrupt |
- | APCI1710_DISABLE: |
- | Disable the interrupt |
- +----------------------------------------------------------------------------+
- | Output Parameters : - |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- | -5: Interrupt parameter is wrong |
- | -6: Interrupt function not initialised. |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_EnableFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /***************************/
- /* Test the interrupt mode */
- /***************************/
-
- if ((b_InterruptEnable == APCI1710_DISABLE) ||
- (b_InterruptEnable == APCI1710_ENABLE))
- {
-
- /************************************/
- /* Enable the frequency measurement */
- /************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 |
- APCI1710_ENABLE_FREQUENCY;
-
- /*********************************************/
- /* Disable or enable the frequency interrupt */
- /*********************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_FREQUENCY_INT)
- | (b_InterruptEnable << 3);
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable =
- 1;
- } else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_DisableFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr) |
- +----------------------------------------------------------------------------+
- | Task : Disables the frequency measurement function |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- +----------------------------------------------------------------------------+
- | Output Parameters : - |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_DisableFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /*************************************/
- /* Disable the frequency measurement */
- /*************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_DISABLE_FREQUENCY
- /* Begin CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
- & APCI1710_DISABLE_FREQUENCY_INT;
- /* End CG 29/06/01 CG 1100/0231 -> 0701/0232 Frequence measure IRQ must be cleared */
-
- /***************************/
- /* Write the configuration */
- /***************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
-
- /*************************************/
- /* Disable the frequency measurement */
- /*************************************/
-
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable = 0;
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Enable Disable functions for INC_CPT
- */
-static int i_APCI1710_InsnWriteINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_WriteType;
- int i_ReturnValue = 0;
-
- ui_WriteType = CR_CHAN(insn->chanspec);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- switch (ui_WriteType) {
- case APCI1710_INCCPT_ENABLELATCHINTERRUPT:
- i_ReturnValue = i_APCI1710_EnableLatchInterrupt(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLELATCHINTERRUPT:
- i_ReturnValue = i_APCI1710_DisableLatchInterrupt(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_WRITE16BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Write16BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0], (unsigned int) data[1]);
- break;
-
- case APCI1710_INCCPT_WRITE32BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Write32BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned int) data[0]);
-
- break;
-
- case APCI1710_INCCPT_ENABLEINDEX:
- i_APCI1710_EnableIndex(dev, (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLEINDEX:
- i_ReturnValue = i_APCI1710_DisableIndex(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_ENABLECOMPARELOGIC:
- i_ReturnValue = i_APCI1710_EnableCompareLogic(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_DISABLECOMPARELOGIC:
- i_ReturnValue = i_APCI1710_DisableCompareLogic(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- case APCI1710_INCCPT_ENABLEFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_EnableFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_INCCPT_DISABLEFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_DisableFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec));
- break;
-
- default:
- printk("Write Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadLatchRegisterStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg, |
-| unsigned char *_ pb_LatchStatus) |
-+----------------------------------------------------------------------------+
-| Task : Read the latch register status from selected module |
-| (b_ModulNbr) and selected latch register (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_LatchStatus : Latch register status. |
-| 0 : No latch occur |
-| 1 : A software latch occur |
-| 2 : A hardware latch occur |
-| 3 : A software and hardware |
-| latch occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadLatchRegisterStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg,
- unsigned char *pb_LatchStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_LatchReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- dw_LatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- *pb_LatchStatus =
- (unsigned char) ((dw_LatchReg >> (b_LatchReg *
- 4)) & 0x3);
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadLatchRegisterValue |
-| (unsigned char_ b_BoardHandle,|
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_LatchReg, |
-| PULONG_ pul_LatchValue) |
-+----------------------------------------------------------------------------+
-| Task : Read the latch register value from selected module |
-| (b_ModulNbr) and selected latch register (b_LatchReg). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_LatchReg : Selected latch register |
-| 0 : for the first latch register |
-| 1 : for the second latch register |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_LatchValue : Latch register value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected latch register parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_ReadLatchRegisterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_LatchReg,
- unsigned int *pul_LatchValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************************/
- /* Test the latch register parameter */
- /*************************************/
-
- if (b_LatchReg < 2) {
- *pul_LatchValue = inl(devpriv->s_BoardInfos.
- ui_Address + ((b_LatchReg + 1) * 4) +
- (64 * b_ModulNbr));
-
- } else {
- /**************************************************/
- /* The selected latch register parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected latch register parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read16BitCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedCounter, |
-| unsigned int *_ pui_CounterValue) |
-+----------------------------------------------------------------------------+
-| Task : Latch the selected 16-Bit counter (b_SelectedCounter) |
-| from selected module (b_ModulNbr) in to the first |
-| latch register and return the latched value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| unsigned char_ b_SelectedCounter : Selected 16-Bit counter |
-| (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned int *_ pui_CounterValue : 16-Bit counter value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: The selected 16-Bit counter parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Read16BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_SelectedCounter,
- unsigned int *pui_CounterValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_LathchValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /******************************/
- /* Test the counter selection */
- /******************************/
-
- if (b_SelectedCounter < 2) {
- /*********************/
- /* Latch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- dw_LathchValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
-
- *pui_CounterValue =
- (unsigned int) ((dw_LathchValue >> (16 *
- b_SelectedCounter)) &
- 0xFFFFU);
- } else {
- /**************************************************/
- /* The selected 16-Bit counter parameter is wrong */
- /**************************************************/
-
- DPRINTK("The selected 16-Bit counter parameter is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read32BitCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| PULONG_ pul_CounterValue) |
-+----------------------------------------------------------------------------+
-| Task : Latch the 32-Bit counter from selected module |
-| (b_ModulNbr) in to the first latch register and return |
-| the latched value. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_CounterValue : 32-Bit counter value |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Read32BitCounterValue(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned int *pul_CounterValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************/
- /* Tatch the counter */
- /*********************/
-
- outl(1, devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /************************/
- /* Read the latch value */
- /************************/
-
- *pul_CounterValue = inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (64 * b_ModulNbr));
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetIndexStatus (unsigned char_ b_BoardHandle,|
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_IndexStatus)|
-+----------------------------------------------------------------------------+
-| Task : Return the index status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_IndexStatus : 0 : No INDEX occur |
-| 1 : A INDEX occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Index not initialised see function |
-| "i_APCI1710_InitIndex" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetIndexStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_IndexStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*****************************/
- /* Test if index initialised */
- /*****************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_IndexInit) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModulNbr));
-
- *pb_IndexStatus = (unsigned char) (dw_StatusReg & 1);
- } else {
- /*************************************************************/
- /* Index not initialised see function "i_APCI1710_InitIndex" */
- /*************************************************************/
-
- DPRINTK("Index not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetReferenceStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_ReferenceStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the reference status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ReferenceStatus : 0 : No REFERENCE occur |
-| 1 : A REFERENCE occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Reference not initialised see function |
-| "i_APCI1710_InitReference" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetReferenceStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_ReferenceStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************************/
- /* Test if reference initialised */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_ReferenceInit) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_ReferenceStatus =
- (unsigned char) (~dw_StatusReg & 1);
- } else {
- /*********************************************************************/
- /* Reference not initialised see function "i_APCI1710_InitReference" */
- /*********************************************************************/
-
- DPRINTK("Reference not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetUASStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UASStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the error signal (UAS) status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UASStatus : 0 : UAS is low "0" |
-| 1 : UAS is high "1" |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetUASStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UASStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_UASStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
-
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetCBStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_CBStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter overflow status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_CBStatus : 0 : Counter no overflow |
-| 1 : Counter overflow |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetCBStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_CBStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModulNbr));
-
- *pb_CBStatus = (unsigned char) (dw_StatusReg & 1);
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Get16BitCBStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_CBStatusCounter0, |
-| unsigned char *_ pb_CBStatusCounter1) |
-+----------------------------------------------------------------------------+
-| Task : Returns the counter overflow (counter initialised to |
-| 2*16-bit) status from selected incremental counter |
-| module |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_CBStatusCounter0 : 0 : No overflow occur for |
-| the first 16-bit |
-| counter |
-| 1 : Overflow occur for the|
-| first 16-bit counter |
-| unsigned char *_ pb_CBStatusCounter1 : 0 : No overflow occur for |
-| the second 16-bit |
-| counter |
-| 1 : Overflow occur for the|
-| second 16-bit counter |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Counter not initialised to 2*16-bit mode. |
-| See function "i_APCI1710_InitCounter" |
-| -5: Firmware revision error |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_Get16BitCBStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_CBStatusCounter0,
- unsigned char *pb_CBStatusCounter1)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*************************/
- /* Test if 2*16-Bit mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 & 0x10) == 0x10) {
- /*****************************/
- /* Test the Firmware version */
- /*****************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) >=
- 0x3136) {
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- *pb_CBStatusCounter1 =
- (unsigned char) ((dw_StatusReg >> 0) &
- 1);
- *pb_CBStatusCounter0 =
- (unsigned char) ((dw_StatusReg >> 1) &
- 1);
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
- else {
- /****************************/
- /* Firmware revision error */
- /****************************/
-
- i_ReturnValue = -5;
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_BoardInfos.dw_MolduleConfiguration [b_ModulNbr] & 0xFFFF) >= 0x3136) */
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
- else {
- /********************************************/
- /* Counter not initialised to 2*16-bit mode */
- /* "i_APCI1710_InitCounter" */
- /********************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -4;
- } /* if ((ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & 0x10) == 0x10) */
- } /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
- else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- } /* if (ps_APCI1710Variable->s_Board [b_BoardHandle].s_ModuleInfo [b_ModulNbr].s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) */
- } /* if (b_ModulNbr < 4) */
- else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- } /* if (b_ModulNbr < 4) */
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetUDStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UDStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter progress status |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
-| selected mode down |
-| 1 : Counter progress in the |
-| selected mode up |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetUDStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UDStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModulNbr));
-
- *pb_UDStatus = (unsigned char) ((dw_StatusReg >> 2) & 1);
-
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetInterruptUDLatchedStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char *_ pb_UDStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the counter progress latched status after a |
-| index interrupt occur. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
-| selected mode down |
-| 1 : Counter progress in the |
-| selected mode up |
-| 2 : No index interrupt occur |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: No counter module found |
-| -3: Counter not initialised see function |
-| "i_APCI1710_InitCounter" |
-| -4: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetInterruptUDLatchedStatus(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_UDStatus)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /*********************************/
- /* Test if index interrupt occur */
- /*********************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur == 1) {
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur = 0;
-
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModulNbr));
-
- *pb_UDStatus = (unsigned char) ((dw_StatusReg >> 1) & 1);
- } else {
- /****************************/
- /* No index interrupt occur */
- /****************************/
-
- *pb_UDStatus = 2;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
- /*
- +----------------------------------------------------------------------------+
- | Function Name : _INT_ i_APCI1710_ReadFrequencyMeasurement |
- | (unsigned char_ b_BoardHandle, |
- | unsigned char_ b_ModulNbr, |
- | unsigned char *_ pb_Status, |
- | PULONG_ pul_ReadValue) |
- +----------------------------------------------------------------------------+
- | Task : Returns the status (pb_Status) and the number of |
- | increments in the set time. |
- | See function " i_APCI1710_InitFrequencyMeasurement " |
- +----------------------------------------------------------------------------+
- | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
- | unsigned char_ b_ModulNbr : Number of the module to be |
- | configured (0 to 3) |
- +----------------------------------------------------------------------------+
- | Output Parameters : unsigned char *_ pb_Status : Returns the frequency |
- | measurement status |
- | 0 : Counting cycle not |
- | started. |
- | 1 : Counting cycle started. |
- | 2 : Counting cycle stopped. |
- | The measurement cycle is |
- | completed. |
- | unsigned char *_ pb_UDStatus : 0 : Counter progress in the |
- | selected mode down |
- | 1 : Counter progress in the |
- | selected mode up |
- | PULONG_ pul_ReadValue : Return the number of |
- | increments in the defined |
- | time base. |
- +----------------------------------------------------------------------------+
- | Return Value : 0: No error |
- | -1: The handle parameter of the board is wrong |
- | -2: The selected module number is wrong |
- | -3: Counter not initialised see function |
- | "i_APCI1710_InitCounter" |
- | -4: Frequency measurement logic not initialised. |
- | See function "i_APCI1710_InitFrequencyMeasurement" |
- +----------------------------------------------------------------------------+
- */
-static int i_APCI1710_ReadFrequencyMeasurement(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char *pb_Status,
- unsigned char *pb_UDStatus,
- unsigned int *pul_ReadValue)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ui_16BitValue;
- unsigned int dw_StatusReg;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.s_InitFlag.b_CounterInit == 1) {
- /********************************************/
- /* Test if frequency measurement initialised */
- /********************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.b_FrequencyMeasurementInit == 1) {
- /******************/
- /* Test if enable */
- /******************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SiemensCounterInfo.
- s_InitFlag.
- b_FrequencyMeasurementEnable == 1) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + 32 +
- (64 * b_ModulNbr));
-
- /**************************/
- /* Test if frequency stop */
- /**************************/
-
- if (dw_StatusReg & 1) {
- *pb_Status = 2;
- *pb_UDStatus =
- (unsigned char) ((dw_StatusReg >>
- 1) & 3);
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_ReadValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 28 +
- (64 * b_ModulNbr));
-
- if (*pb_UDStatus == 0) {
- /*************************/
- /* Test the counter mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModulNbr].s_SiemensCounterInfo.s_ModeRegister.s_ByteModeRegister.b_ModeRegister1 & APCI1710_16BIT_COUNTER) == APCI1710_16BIT_COUNTER) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFFU) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- *
- pul_ReadValue
- &
- 0xFFFFU;
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFF0000UL)
- |
- (0xFFFFU
- -
- ui_16BitValue);
- }
-
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- (
- (*pul_ReadValue
- >>
- 16)
- &
- 0xFFFFU);
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFFUL)
- |
- (
- (0xFFFFU - ui_16BitValue) << 16);
- }
- } else {
- if (*pul_ReadValue != 0) {
- *pul_ReadValue
- =
- 0xFFFFFFFFUL
- -
- *pul_ReadValue;
- }
- }
- } else {
- if (*pb_UDStatus == 1) {
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFF0000UL) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- (
- (*pul_ReadValue
- >>
- 16)
- &
- 0xFFFFU);
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFFUL)
- |
- (
- (0xFFFFU - ui_16BitValue) << 16);
- }
- } else {
- if (*pb_UDStatus
- == 2) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((*pul_ReadValue & 0xFFFFU) != 0) {
- ui_16BitValue
- =
- (unsigned int)
- *
- pul_ReadValue
- &
- 0xFFFFU;
- *pul_ReadValue
- =
- (*pul_ReadValue
- &
- 0xFFFF0000UL)
- |
- (0xFFFFU
- -
- ui_16BitValue);
- }
- }
- }
- }
- } else {
- *pb_Status = 1;
- *pb_UDStatus = 0;
- }
- } else {
- *pb_Status = 0;
- *pb_UDStatus = 0;
- }
- } else {
- /***********************************************/
- /* Frequency measurement logic not initialised */
- /***********************************************/
-
- DPRINTK("Frequency measurement logic not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /****************************************/
- /* Counter not initialised see function */
- /* "i_APCI1710_InitCounter" */
- /****************************************/
-
- DPRINTK("Counter not initialised\n");
- i_ReturnValue = -3;
- }
- } else {
- /*************************************************/
- /* The selected module number parameter is wrong */
- /*************************************************/
-
- DPRINTK("The selected module number parameter is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-/*
- * Read and Get functions for INC_CPT
- */
-static int i_APCI1710_InsnReadINCCPT(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- unsigned int ui_ReadType;
- int i_ReturnValue = 0;
-
- ui_ReadType = CR_CHAN(insn->chanspec);
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- switch (ui_ReadType) {
- case APCI1710_INCCPT_READLATCHREGISTERSTATUS:
- i_ReturnValue = i_APCI1710_ReadLatchRegisterStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READLATCHREGISTERVALUE:
- i_ReturnValue = i_APCI1710_ReadLatchRegisterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
- printk("Latch Register Value %d\n", data[0]);
- break;
-
- case APCI1710_INCCPT_READ16BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Read16BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) CR_RANGE(insn->chanspec), (unsigned int *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READ32BITCOUNTERVALUE:
- i_ReturnValue = i_APCI1710_Read32BitCounterValue(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned int *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETINDEXSTATUS:
- i_ReturnValue = i_APCI1710_GetIndexStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETREFERENCESTATUS:
- i_ReturnValue = i_APCI1710_GetReferenceStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETUASSTATUS:
- i_ReturnValue = i_APCI1710_GetUASStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GETCBSTATUS:
- i_ReturnValue = i_APCI1710_GetCBStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_GET16BITCBSTATUS:
- i_ReturnValue = i_APCI1710_Get16BitCBStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char *) &data[0], (unsigned char *) &data[1]);
- break;
-
- case APCI1710_INCCPT_GETUDSTATUS:
- i_ReturnValue = i_APCI1710_GetUDStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
-
- break;
-
- case APCI1710_INCCPT_GETINTERRUPTUDLATCHEDSTATUS:
- i_ReturnValue = i_APCI1710_GetInterruptUDLatchedStatus(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char *) &data[0]);
- break;
-
- case APCI1710_INCCPT_READFREQUENCYMEASUREMENT:
- i_ReturnValue = i_APCI1710_ReadFrequencyMeasurement(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char *) &data[0],
- (unsigned char *) &data[1], (unsigned int *) &data[2]);
- break;
-
- case APCI1710_INCCPT_READINTERRUPT:
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- break;
-
- default:
- printk("ReadType Parameter wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c
deleted file mode 100644
index 6bbcb06cc27..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Inp_cpt.c
+++ /dev/null
@@ -1,866 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : Inp_CPT.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 pulse encoder module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_PULSEENCODER_READ 0
-#define APCI1710_PULSEENCODER_WRITE 1
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitPulseEncoder |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char_ b_InputLevelSelection, |
-| unsigned char_ b_TriggerOutputAction, |
-| ULONG_ ul_StartValue) |
-+----------------------------------------------------------------------------+
-| Task : Configure the pulse encoder operating mode selected via|
-| b_ModulNbr and b_PulseEncoderNbr. The pulse encoder |
-| after each pulse decrement the counter value from 1. |
-| |
-| You must calling this function be for you call any |
-| other function witch access of pulse encoders. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3) |
-| unsigned char_ b_InputLevelSelection : Input level selection |
-| (0 or 1) |
-| 0 : Set pulse encoder|
-| count the the low|
-| level pulse. |
-| 1 : Set pulse encoder|
-| count the the |
-| high level pulse.|
-| unsigned char_ b_TriggerOutputAction : Digital TRIGGER output |
-| action |
-| 0 : No action |
-| 1 : Set the trigger |
-| output to "1" |
-| (high) after the |
-| passage from 1 to|
-| 0 from pulse |
-| encoder. |
-| 2 : Set the trigger |
-| output to "0" |
-| (low) after the |
-| passage from 1 to|
-| 0 from pulse |
-| encoder |
-| ULONG_ ul_StartValue : Pulse encoder start value|
-| (1 to 4294967295)
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_PulseEncoderNbr =(unsigned char) data[0];
- b_InputLevelSelection =(unsigned char) data[1];
- b_TriggerOutputAction =(unsigned char) data[2];
- ul_StartValue =(unsigned int) data[3];
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module is not a pulse encoder module |
-| -3: Pulse encoder selection is wrong |
-| -4: Input level selection is wrong |
-| -5: Digital TRIGGER output action selection is wrong |
-| -6: Pulse encoder start value is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitPulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_IntRegister;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char b_InputLevelSelection;
- unsigned char b_TriggerOutputAction;
- unsigned int ul_StartValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PulseEncoderNbr = (unsigned char) data[0];
- b_InputLevelSelection = (unsigned char) data[1];
- b_TriggerOutputAction = (unsigned char) data[2];
- ul_StartValue = (unsigned int) data[3];
-
- i_ReturnValue = insn->n;
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /*************************/
- /* Test if pulse encoder */
- /*************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- APCI1710_PULSE_ENCODER) ==
- APCI1710_PULSE_ENCODER) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /************************/
- /* Test the input level */
- /************************/
-
- if ((b_InputLevelSelection == 0)
- || (b_InputLevelSelection == 1)) {
- /*******************************************/
- /* Test the ouput TRIGGER action selection */
- /*******************************************/
-
- if ((b_TriggerOutputAction <= 2)
- || (b_PulseEncoderNbr > 0)) {
- if (ul_StartValue > 1) {
-
- dw_IntRegister =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
-
- /***********************/
- /* Set the start value */
- /***********************/
-
- outl(ul_StartValue,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (b_PulseEncoderNbr
- * 4) +
- (64 * b_ModulNbr));
-
- /***********************/
- /* Set the input level */
- /***********************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister &
- (0xFFFFFFFFUL -
- (1UL << (8 + b_PulseEncoderNbr)))) | ((1UL & (~b_InputLevelSelection)) << (8 + b_PulseEncoderNbr));
-
- /*******************************/
- /* Test if output trigger used */
- /*******************************/
-
- if ((b_TriggerOutputAction > 0) && (b_PulseEncoderNbr > 1)) {
- /****************************/
- /* Enable the output action */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- | (1UL
- << (4 + b_PulseEncoderNbr));
-
- /*********************************/
- /* Set the output TRIGGER action */
- /*********************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << (12 + b_PulseEncoderNbr)))) | ((1UL & (b_TriggerOutputAction - 1)) << (12 + b_PulseEncoderNbr));
- } else {
- /*****************************/
- /* Disable the output action */
- /*****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << (4 + b_PulseEncoderNbr)));
- }
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister,
- devpriv->
- s_BoardInfos.
- ui_Address +
- 20 +
- (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo
- [b_PulseEncoderNbr].
- b_PulseEncoderInit
- = 1;
- } else {
- /**************************************/
- /* Pulse encoder start value is wrong */
- /**************************************/
-
- DPRINTK("Pulse encoder start value is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /****************************************************/
- /* Digital TRIGGER output action selection is wrong */
- /****************************************************/
-
- DPRINTK("Digital TRIGGER output action selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /**********************************/
- /* Input level selection is wrong */
- /**********************************/
-
- DPRINTK("Input level selection is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /********************************************/
- /* The module is not a pulse encoder module */
- /********************************************/
-
- DPRINTK("The module is not a pulse encoder module\n");
- i_ReturnValue = -2;
- }
- } else {
- /********************************************/
- /* The module is not a pulse encoder module */
- /********************************************/
-
- DPRINTK("The module is not a pulse encoder module\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnablePulseEncoder |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char_ b_CycleSelection, |
-| unsigned char_ b_InterruptHandling) |
-+----------------------------------------------------------------------------+
-| Task : Enableor disable the selected pulse encoder (b_PulseEncoderNbr) |
-| from selected module (b_ModulNbr). Each input pulse |
-| decrement the pulse encoder counter value from 1. |
-| If you enabled the interrupt (b_InterruptHandling), a |
-| interrupt is generated when the pulse encoder has run |
-| down. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3) |
-| unsigned char_ b_CycleSelection : APCI1710_CONTINUOUS: |
-| Each time the |
-| counting value is set|
-| on "0", the pulse |
-| encoder load the |
-| start value after |
-| the next pulse. |
-| APCI1710_SINGLE: |
-| If the counter is set|
-| on "0", the pulse |
-| encoder is stopped. |
-| unsigned char_ b_InterruptHandling : Interrupts can be |
-| generated, when the pulse|
-| encoder has run down. |
-| With this parameter the |
-| user decides if |
-| interrupts are used or |
-| not. |
-| APCI1710_ENABLE: |
-| Interrupts are enabled |
-| APCI1710_DISABLE: |
-| Interrupts are disabled
-
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_Action =(unsigned char) data[0];
- b_PulseEncoderNbr =(unsigned char) data[1];
- b_CycleSelection =(unsigned char) data[2];
- b_InterruptHandling =(unsigned char) data[3];|
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection is wrong |
-| -3: Pulse encoder selection is wrong |
-| -4: Pulse encoder not initialised. |
-| See function "i_APCI1710_InitPulseEncoder" |
-| -5: Cycle selection mode is wrong |
-| -6: Interrupt handling mode is wrong |
-| -7: Interrupt routine not installed. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteEnableDisablePulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char b_CycleSelection;
- unsigned char b_InterruptHandling;
- unsigned char b_Action;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0];
- b_PulseEncoderNbr = (unsigned char) data[1];
- b_CycleSelection = (unsigned char) data[2];
- b_InterruptHandling = (unsigned char) data[3];
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo[b_PulseEncoderNbr].
- b_PulseEncoderInit == 1) {
- switch (b_Action) {
-
- case APCI1710_ENABLE:
- /****************************/
- /* Test the cycle selection */
- /****************************/
-
- if (b_CycleSelection ==
- APCI1710_CONTINUOUS
- || b_CycleSelection ==
- APCI1710_SINGLE) {
- /*******************************/
- /* Test the interrupt handling */
- /*******************************/
-
- if (b_InterruptHandling ==
- APCI1710_ENABLE
- || b_InterruptHandling
- == APCI1710_DISABLE) {
- /******************************/
- /* Test if interrupt not used */
- /******************************/
-
- if (b_InterruptHandling
- ==
- APCI1710_DISABLE)
- {
- /*************************/
- /* Disable the interrupt */
- /*************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- &
- (0xFFFFFFFFUL
- -
- (1UL << b_PulseEncoderNbr));
- } else {
-
- /************************/
- /* Enable the interrupt */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister
- | (1UL
- <<
- b_PulseEncoderNbr);
- devpriv->tsk_Current = current; /* Save the current process task structure */
-
- }
-
- if (i_ReturnValue >= 0) {
- /***********************************/
- /* Enable or disable the interrupt */
- /***********************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_SetRegister,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 20 +
- (64 * b_ModulNbr));
-
- /****************************/
- /* Enable the pulse encoder */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- | (1UL
- <<
- b_PulseEncoderNbr);
-
- /**********************/
- /* Set the cycle mode */
- /**********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- =
- (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister
- &
- (0xFFFFFFFFUL
- -
- (1 << (b_PulseEncoderNbr + 4)))) | ((b_CycleSelection & 1UL) << (4 + b_PulseEncoderNbr));
-
- /****************************/
- /* Enable the pulse encoder */
- /****************************/
-
- outl(devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 16 +
- (64 * b_ModulNbr));
- }
- } else {
- /************************************/
- /* Interrupt handling mode is wrong */
- /************************************/
-
- DPRINTK("Interrupt handling mode is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /*********************************/
- /* Cycle selection mode is wrong */
- /*********************************/
-
- DPRINTK("Cycle selection mode is wrong\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_DISABLE:
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister &
- (0xFFFFFFFFUL -
- (1UL << b_PulseEncoderNbr));
-
- /*****************************/
- /* Disable the pulse encoder */
- /*****************************/
-
- outl(devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_ControlRegister,
- devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- break;
- } /* switch End */
-
- } else {
- /*********************************/
- /* Pulse encoder not initialised */
- /*********************************/
-
- DPRINTK("Pulse encoder not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /*****************************/
- /* Module selection is wrong */
- /*****************************/
-
- DPRINTK("Module selection is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadPulseEncoderStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PulseEncoderNbr, |
-| unsigned char *_ pb_Status) |
-+----------------------------------------------------------------------------+
-| Task APCI1710_PULSEENCODER_READ : Reads the pulse encoder status
- and valuefrom selected pulse |
-| encoder (b_PulseEncoderNbr) from selected module |
-| (b_ModulNbr). |
-+----------------------------------------------------------------------------+
- unsigned char b_Type; data[0]
- APCI1710_PULSEENCODER_WRITE
- Writes a 32-bit value (ul_WriteValue) into the selected|
-| pulse encoder (b_PulseEncoderNbr) from selected module |
-| (b_ModulNbr). This operation set the new start pulse |
-| encoder value.
- APCI1710_PULSEENCODER_READ
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| CRAREF() unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| data[1] unsigned char_ b_PulseEncoderNbr : Pulse encoder selection |
-| (0 to 3)
- APCI1710_PULSEENCODER_WRITE
- data[2] ULONG_ ul_WriteValue : 32-bit value to be |
-| written |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_Status : Pulse encoder status. |
-| 0 : No overflow occur|
-| 1 : Overflow occur
- PULONG_ pul_ReadValue : Pulse encoder value | |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection is wrong |
-| -3: Pulse encoder selection is wrong |
-| -4: Pulse encoder not initialised. |
-| See function "i_APCI1710_InitPulseEncoder" |
-+----------------------------------------------------------------------------+
-*/
-
-/*_INT_ i_APCI1710_ReadPulseEncoderStatus (unsigned char_ b_BoardHandle,
- unsigned char_ b_ModulNbr,
- unsigned char_ b_PulseEncoderNbr,
-
- unsigned char *_ pb_Status)
- */
-static int i_APCI1710_InsnBitsReadWritePulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusRegister;
- unsigned char b_ModulNbr;
- unsigned char b_PulseEncoderNbr;
- unsigned char *pb_Status;
- unsigned char b_Type;
- unsigned int *pul_ReadValue;
- unsigned int ul_WriteValue;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Type = (unsigned char) data[0];
- b_PulseEncoderNbr = (unsigned char) data[1];
- pb_Status = (unsigned char *) &data[0];
- pul_ReadValue = (unsigned int *) &data[1];
-
- /***********************************/
- /* Test the selected module number */
- /***********************************/
-
- if (b_ModulNbr <= 3) {
- /******************************************/
- /* Test the selected pulse encoder number */
- /******************************************/
-
- if (b_PulseEncoderNbr <= 3) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo[b_PulseEncoderNbr].
- b_PulseEncoderInit == 1) {
-
- switch (b_Type) {
- case APCI1710_PULSEENCODER_READ:
- /****************************/
- /* Read the status register */
- /****************************/
-
- dw_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (64 * b_ModulNbr));
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister |
- dw_StatusRegister;
-
- *pb_Status =
- (unsigned char) (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister >> (1 +
- b_PulseEncoderNbr)) & 1;
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PulseEncoderModuleInfo.
- dw_StatusRegister &
- (0xFFFFFFFFUL - (1 << (1 +
- b_PulseEncoderNbr)));
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_ReadValue =
- inl(devpriv->s_BoardInfos.
- ui_Address +
- (4 * b_PulseEncoderNbr) +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_PULSEENCODER_WRITE:
- ul_WriteValue = (unsigned int) data[2];
- /*******************/
- /* Write the value */
- /*******************/
-
- outl(ul_WriteValue,
- devpriv->s_BoardInfos.
- ui_Address +
- (4 * b_PulseEncoderNbr) +
- (64 * b_ModulNbr));
-
- } /* end of switch */
- } else {
- /*********************************/
- /* Pulse encoder not initialised */
- /*********************************/
-
- DPRINTK("Pulse encoder not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* Pulse encoder selection is wrong */
- /************************************/
-
- DPRINTK("Pulse encoder selection is wrong\n");
- i_ReturnValue = -3;
- }
- } else {
- /*****************************/
- /* Module selection is wrong */
- /*****************************/
-
- DPRINTK("Module selection is wrong\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-static int i_APCI1710_InsnReadInterruptPulseEncoder(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /***************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c
deleted file mode 100644
index 5c830337db8..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Pwm.c
+++ /dev/null
@@ -1,3582 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : PWM.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 Wulse wide modulation module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +-----------------------------------------------------------------------+
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_PWM_INIT 0
-#define APCI1710_PWM_GETINITDATA 1
-
-#define APCI1710_PWM_DISABLE 0
-#define APCI1710_PWM_ENABLE 1
-#define APCI1710_PWM_NEWTIMING 2
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitPWM |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_ClockSelection, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_LowTiming, |
-| ULONG_ ul_HighTiming, |
-| PULONG_ pul_RealLowTiming, |
-| PULONG_ pul_RealHighTiming) |
-+----------------------------------------------------------------------------+
-| Task : Configure the selected PWM (b_PWM) from selected module|
-| (b_ModulNbr). The ul_LowTiming, ul_HighTiming and |
-| ul_TimingUnit determine the low/high timing base for |
-| the period. pul_RealLowTiming, pul_RealHighTiming |
-| return the real timing value. |
-| You must calling this function be for you call any |
-| other function witch access of the PWM. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure|
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1). |
-| unsigned char_ b_ClockSelection : Selection from PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC have a 30 MHz |
-| PCI bus clock |
-| - APCI1710_33MHZ : |
-| The PC have a 33 MHz |
-| PCI bus clock |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_LowTiming : Low base timing value. |
-| ULONG_ ul_HighTiming : High base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealLowTiming : Real low base timing |
-| value. |
-| PULONG_ pul_RealHighTiming : Real high base timing |
-| value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: The selected input clock is wrong |
-| -6: Timing Unit selection is wrong |
-| -7: Low base timing selection is wrong |
-| -8: High base timing selection is wrong |
-| -9: You can not used the 40MHz clock selection with |
-| this board |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InitPWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_ClockSelection,
- unsigned char b_TimingUnit,
- unsigned int ul_LowTiming,
- unsigned int ul_HighTiming,
- unsigned int *pul_RealLowTiming,
- unsigned int *pul_RealHighTiming)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_LowTimerValue = 0;
- unsigned int ul_HighTimerValue = 0;
- unsigned int dw_Command;
- double d_RealLowTiming = 0;
- double d_RealHighTiming = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /******************/
- /* Test the clock */
- /******************/
-
- if ((b_ClockSelection == APCI1710_30MHZ) ||
- (b_ClockSelection == APCI1710_33MHZ) ||
- (b_ClockSelection == APCI1710_40MHZ)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnit <= 4) {
- /*********************************/
- /* Test the low timing selection */
- /*********************************/
-
- if (((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 266)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230650UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 9UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 242)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691043UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 520UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 8UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 200)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496729UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 7UL))) {
- /**********************************/
- /* Test the High timing selection */
- /**********************************/
-
- if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_ClockSelection == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_ClockSelection != APCI1710_40MHZ)) {
-
- /************************************/
- /* Calculate the low division fator */
- /************************************/
-
- fpu_begin
- ();
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- ul_LowTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (ul_LowTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealLowTiming
- =
- (
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealLowTiming + 0.5)) {
- *pul_RealLowTiming
- =
- *pul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- /*************************************/
- /* Calculate the high division fator */
- /*************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- ul_HighTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (ul_HighTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- *pul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealHighTiming
- =
- (
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)*pul_RealHighTiming + 0.5)) {
- *pul_RealHighTiming
- =
- *pul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
- /****************************/
- /* Save the clock selection */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- b_ClockSelection
- =
- b_ClockSelection;
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /****************************/
- /* Save the low base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_LowTiming
- =
- d_RealLowTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealLowTiming
- =
- *pul_RealLowTiming;
-
- /****************************/
- /* Save the high base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_HighTiming
- =
- d_RealHighTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealHighTiming
- =
- *pul_RealHighTiming;
-
- /************************/
- /* Write the low timing */
- /************************/
-
- outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************************/
- /* Write the high timing */
- /*************************/
-
- outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 8
- +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command
- =
- dw_Command
- &
- 0x7F;
-
- if (b_ClockSelection == APCI1710_40MHZ) {
- dw_Command
- =
- dw_Command
- |
- 0x80;
- }
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************/
- /* PWM init. */
- /*************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_PWMInit
- =
- 1;
- } else {
- /***************************************************/
- /* You can not used the 40MHz clock selection with */
- /* this board */
- /***************************************************/
- DPRINTK("You can not used the 40MHz clock selection with this board\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /***************************************/
- /* High base timing selection is wrong */
- /***************************************/
- DPRINTK("High base timing selection is wrong\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**************************************/
- /* Low base timing selection is wrong */
- /**************************************/
- DPRINTK("Low base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
- else {
- /*******************************/
- /* The selected clock is wrong */
- /*******************************/
- DPRINTK("The selected clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_ClockSelection == APCI1710_30MHZ) || (b_ClockSelection == APCI1710_33MHZ) || (b_ClockSelection == APCI1710_40MHZ)) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetPWMInitialisation |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char *_ pb_TimingUnit, |
-| PULONG_ pul_LowTiming, |
-| PULONG_ pul_HighTiming, |
-| unsigned char *_ pb_StartLevel, |
-| unsigned char *_ pb_StopMode, |
-| unsigned char *_ pb_StopLevel, |
-| unsigned char *_ pb_ExternGate, |
-| unsigned char *_ pb_InterruptEnable, |
-| unsigned char *_ pb_Enable) |
-+----------------------------------------------------------------------------+
-| Task : Return the PWM (b_PWM) initialisation from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitPWM" function be for you call this |
-| function. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| PULONG_ pul_LowTiming : Low base timing value. |
-| PULONG_ pul_HighTiming : High base timing value. |
-| unsigned char *_ pb_StartLevel : Start period level |
-| selection |
-| 0 : The period start |
-| with a low level |
-| 1 : The period start |
-| with a high level|
-| unsigned char *_ pb_StopMode : Stop mode selection |
-| 0 : The PWM is stopped |
-| directly after the |
-| "i_APCI1710_DisablePWM"|
-| function and break the|
-| last period |
-| 1 : After the |
-| "i_APCI1710_DisablePWM"|
-| function the PWM is |
-| stopped at the end |
-| from last period cycle|
-| unsigned char *_ pb_StopLevel : Stop PWM level selection |
-| 0 : The output signal |
-| keep the level after|
-| the |
-| "i_APCI1710_DisablePWM"|
-| function |
-| 1 : The output signal is|
-| set to low after the|
-| "i_APCI1710_DisablePWM"|
-| function |
-| 2 : The output signal is|
-| set to high after |
-| the |
-| "i_APCI1710_DisablePWM"|
-| function |
-| unsigned char *_ pb_ExternGate : Extern gate action |
-| selection |
-| 0 : Extern gate signal |
-| not used. |
-| 1 : Extern gate signal |
-| used. |
-| unsigned char *_ pb_InterruptEnable : Enable or disable the PWM |
-| interrupt. |
-| - APCI1710_ENABLE : |
-| Enable the PWM interrupt|
-| A interrupt occur after |
-| each period |
-| - APCI1710_DISABLE : |
-| Disable the PWM |
-| interrupt |
-| unsigned char *_ pb_Enable : Indicate if the PWM is |
-| enabled or no |
-| 0 : PWM not enabled |
-| 1 : PWM enabled |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_GetPWMInitialisation(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char *pb_TimingUnit,
- unsigned int *pul_LowTiming,
- unsigned int *pul_HighTiming,
- unsigned char *pb_StartLevel,
- unsigned char *pb_StopMode,
- unsigned char *pb_StopLevel,
- unsigned char *pb_ExternGate,
- unsigned char *pb_InterruptEnable,
- unsigned char *pb_Enable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Read the low timing */
- /***********************/
-
- *pul_LowTiming =
- inl(devpriv->s_BoardInfos.
- ui_Address + 0 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- /************************/
- /* Read the high timing */
- /************************/
-
- *pul_HighTiming =
- inl(devpriv->s_BoardInfos.
- ui_Address + 4 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- *pb_StartLevel =
- (unsigned char) ((dw_Command >> 5) & 1);
- *pb_StopMode =
- (unsigned char) ((dw_Command >> 0) & 1);
- *pb_StopLevel =
- (unsigned char) ((dw_Command >> 1) & 1);
- *pb_ExternGate =
- (unsigned char) ((dw_Command >> 4) & 1);
- *pb_InterruptEnable =
- (unsigned char) ((dw_Command >> 3) & 1);
-
- if (*pb_StopLevel) {
- *pb_StopLevel =
- *pb_StopLevel +
- (unsigned char) ((dw_Command >>
- 2) & 1);
- }
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- *pb_Enable =
- (unsigned char) ((dw_Command >> 0) & 1);
-
- *pb_TimingUnit = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo[b_PWM].b_TimingUnit;
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Pwm Init and Get Pwm Initialisation
- */
-static int i_APCI1710_InsnConfigPWM(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_ConfigType;
- int i_ReturnValue = 0;
- b_ConfigType = CR_CHAN(insn->chanspec);
-
- switch (b_ConfigType) {
- case APCI1710_PWM_INIT:
- i_ReturnValue = i_APCI1710_InitPWM(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
- (unsigned char) data[0], /* b_PWM */
- (unsigned char) data[1], /* b_ClockSelection */
- (unsigned char) data[2], /* b_TimingUnit */
- (unsigned int) data[3], /* ul_LowTiming */
- (unsigned int) data[4], /* ul_HighTiming */
- (unsigned int *) &data[0], /* pul_RealLowTiming */
- (unsigned int *) &data[1] /* pul_RealHighTiming */
- );
- break;
-
- case APCI1710_PWM_GETINITDATA:
- i_ReturnValue = i_APCI1710_GetPWMInitialisation(dev, (unsigned char) CR_AREF(insn->chanspec), /* b_ModulNbr */
- (unsigned char) data[0], /* b_PWM */
- (unsigned char *) &data[0], /* pb_TimingUnit */
- (unsigned int *) &data[1], /* pul_LowTiming */
- (unsigned int *) &data[2], /* pul_HighTiming */
- (unsigned char *) &data[3], /* pb_StartLevel */
- (unsigned char *) &data[4], /* pb_StopMode */
- (unsigned char *) &data[5], /* pb_StopLevel */
- (unsigned char *) &data[6], /* pb_ExternGate */
- (unsigned char *) &data[7], /* pb_InterruptEnable */
- (unsigned char *) &data[8] /* pb_Enable */
- );
- break;
-
- default:
- printk(" Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnablePWM |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_StartLevel, |
-| unsigned char_ b_StopMode, |
-| unsigned char_ b_StopLevel, |
-| unsigned char_ b_ExternGate, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Enable the selected PWM (b_PWM) from selected module |
-| (b_ModulNbr). You must calling the "i_APCI1710_InitPWM"|
-| function be for you call this function. |
-| If you enable the PWM interrupt, the PWM generate a |
-| interrupt after each period. |
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number |
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-| unsigned char_ b_StartLevel : Start period level selection |
-| 0 : The period start with a |
-| low level |
-| 1 : The period start with a |
-| high level |
-| unsigned char_ b_StopMode : Stop mode selection |
-| 0 : The PWM is stopped |
-| directly after the |
-| "i_APCI1710_DisablePWM" |
-| function and break the |
-| last period |
-| 1 : After the |
-| "i_APCI1710_DisablePWM" |
-| function the PWM is |
-| stopped at the end from|
-| last period cycle. |
-| unsigned char_ b_StopLevel : Stop PWM level selection |
-| 0 : The output signal keep |
-| the level after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| 1 : The output signal is set|
-| to low after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| 2 : The output signal is set|
-| to high after the |
-| "i_APCI1710_DisablePWM" |
-| function |
-| unsigned char_ b_ExternGate : Extern gate action selection |
-| 0 : Extern gate signal not |
-| used. |
-| 1 : Extern gate signal used.|
-| unsigned char_ b_InterruptEnable : Enable or disable the PWM |
-| interrupt. |
-| - APCI1710_ENABLE : |
-| Enable the PWM interrupt |
-| A interrupt occur after |
-| each period |
-| - APCI1710_DISABLE : |
-| Disable the PWM interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM start level selection is wrong |
-| -7: PWM stop mode selection is wrong |
-| -8: PWM stop level selection is wrong |
-| -9: Extern gate signal selection is wrong |
-| -10: Interrupt parameter is wrong |
-| -11: Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_EnablePWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_StartLevel,
- unsigned char b_StopMode,
- unsigned char b_StopLevel,
- unsigned char b_ExternGate,
- unsigned char b_InterruptEnable)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
-
- devpriv->tsk_Current = current; /* Save the current process task structure */
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /**********************************/
- /* Test the start level selection */
- /**********************************/
-
- if (b_StartLevel <= 1) {
- /**********************/
- /* Test the stop mode */
- /**********************/
-
- if (b_StopMode <= 1) {
- /***********************/
- /* Test the stop level */
- /***********************/
-
- if (b_StopLevel <= 2) {
- /*****************************/
- /* Test the extern gate mode */
- /*****************************/
-
- if (b_ExternGate
- <= 1) {
- /*****************************/
- /* Test the interrupt action */
- /*****************************/
-
- if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) {
- /******************************************/
- /* Test if interrupt function initialised */
- /******************************************/
-
- /********************/
- /* Read the command */
- /********************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 8
- +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command
- =
- dw_Command
- &
- 0x80;
-
- /********************/
- /* Make the command */
- /********************/
-
- dw_Command
- =
- dw_Command
- |
- b_StopMode
- |
- (b_InterruptEnable
- <<
- 3)
- |
- (b_ExternGate
- <<
- 4)
- |
- (b_StartLevel
- <<
- 5);
-
- if (b_StopLevel & 3) {
- dw_Command
- =
- dw_Command
- |
- 2;
-
- if (b_StopLevel & 2) {
- dw_Command
- =
- dw_Command
- |
- 4;
- }
- }
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_InterruptEnable
- =
- b_InterruptEnable;
-
- /*******************/
- /* Set the command */
- /*******************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 8 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /******************/
- /* Enable the PWM */
- /******************/
- outl(1, devpriv->s_BoardInfos.ui_Address + 12 + (20 * b_PWM) + (64 * b_ModulNbr));
- } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue
- =
- -10;
- } /* if (b_InterruptEnable == APCI1710_ENABLE || b_InterruptEnable == APCI1710_DISABLE) */
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- else {
- /*****************************************/
- /* Extern gate signal selection is wrong */
- /*****************************************/
- DPRINTK("Extern gate signal selection is wrong\n");
- i_ReturnValue
- =
- -9;
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
- else {
- /*************************************/
- /* PWM stop level selection is wrong */
- /*************************************/
- DPRINTK("PWM stop level selection is wrong\n");
- i_ReturnValue =
- -8;
- } /* if (b_StopLevel >= 0 && b_StopLevel <= 2) */
- } /* if (b_StopMode >= 0 && b_StopMode <= 1) */
- else {
- /************************************/
- /* PWM stop mode selection is wrong */
- /************************************/
- DPRINTK("PWM stop mode selection is wrong\n");
- i_ReturnValue = -7;
- } /* if (b_StopMode >= 0 && b_StopMode <= 1) */
- } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
- else {
- /**************************************/
- /* PWM start level selection is wrong */
- /**************************************/
- DPRINTK("PWM start level selection is wrong\n");
- i_ReturnValue = -6;
- } /* if (b_StartLevel >= 0 && b_StartLevel <= 1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisablePWM (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM) |
-+----------------------------------------------------------------------------+
-| Task : Disable the selected PWM (b_PWM) from selected module |
-| (b_ModulNbr). The output signal level depend of the |
-| initialisation by the "i_APCI1710_EnablePWM". |
-| See the b_StartLevel, b_StopMode and b_StopLevel |
-| parameters from this function. |
-+----------------------------------------------------------------------------+
-| Input Parameters :BYTE_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM not enabled see function |
-| "i_APCI1710_EnablePWM" |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_DisablePWM(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Test if PWM enabled */
- /***********************/
-
- if (dw_Status & 0x1) {
- /*******************/
- /* Disable the PWM */
- /*******************/
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 12 +
- (20 * b_PWM) +
- (64 * b_ModulNbr));
- } /* if (dw_Status & 0x1) */
- else {
- /*******************/
- /* PWM not enabled */
- /*******************/
- DPRINTK("PWM not enabled\n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK(" PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetNewPWMTiming |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char_ b_ClockSelection, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_LowTiming, |
-| ULONG_ ul_HighTiming) |
-+----------------------------------------------------------------------------+
-| Task : Set a new timing. The ul_LowTiming, ul_HighTiming and |
-| ul_TimingUnit determine the low/high timing base for |
-| the period. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Module number to configure|
-| (0 to 3) |
-| unsigned char_ b_PWM : Selected PWM (0 or 1). |
-| unsigned char_ b_TimingUnit : Base timing Unit (0 to 4) |
-| 0 : ns |
-| 1 : æs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| ULONG_ ul_LowTiming : Low base timing value. |
-| ULONG_ ul_HighTiming : High base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised |
-| -6: Timing Unit selection is wrong |
-| -7: Low base timing selection is wrong |
-| -8: High base timing selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_SetNewPWMTiming(struct comedi_device *dev,
- unsigned char b_ModulNbr,
- unsigned char b_PWM,
- unsigned char b_TimingUnit,
- unsigned int ul_LowTiming,
- unsigned int ul_HighTiming)
-{
- struct addi_private *devpriv = dev->private;
- unsigned char b_ClockSelection;
- int i_ReturnValue = 0;
- unsigned int ul_LowTimerValue = 0;
- unsigned int ul_HighTimerValue = 0;
- unsigned int ul_RealLowTiming = 0;
- unsigned int ul_RealHighTiming = 0;
- unsigned int dw_Status;
- unsigned int dw_Command;
- double d_RealLowTiming = 0;
- double d_RealHighTiming = 0;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- b_ClockSelection = devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_PWMModuleInfo.
- b_ClockSelection;
-
- /************************/
- /* Test the timing unit */
- /************************/
-
- if (b_TimingUnit <= 4) {
- /*********************************/
- /* Test the low timing selection */
- /*********************************/
-
- if (((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 266)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230650UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571230UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 571UL))
- || ((b_ClockSelection ==
- APCI1710_30MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 9UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 242)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691043UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 519691UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 520UL))
- || ((b_ClockSelection ==
- APCI1710_33MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <= 8UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 0)
- && (ul_LowTiming
- >= 200)
- && (ul_LowTiming
- <=
- 0xFFFFFFFFUL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 1)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496729UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 2)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429496UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 3)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 429UL))
- || ((b_ClockSelection ==
- APCI1710_40MHZ)
- && (b_TimingUnit
- == 4)
- && (ul_LowTiming
- >= 1)
- && (ul_LowTiming
- <=
- 7UL))) {
- /**********************************/
- /* Test the High timing selection */
- /**********************************/
-
- if (((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 266) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230650UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571230UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 571UL)) || ((b_ClockSelection == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 9UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 242) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691043UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 519691UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 520UL)) || ((b_ClockSelection == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 8UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_HighTiming >= 200) && (ul_HighTiming <= 0xFFFFFFFFUL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496729UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429496UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_HighTiming >= 1) && (ul_HighTiming <= 429UL)) || ((b_ClockSelection == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_HighTiming >= 1) && (ul_HighTiming <= 7UL))) {
- /************************************/
- /* Calculate the low division fator */
- /************************************/
-
- fpu_begin();
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- ul_LowTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (ul_LowTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_LowTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealLowTiming
- =
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (ul_LowTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_LowTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_LowTimerValue + 0.5))) {
- ul_LowTimerValue
- =
- ul_LowTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealLowTiming
- =
- (unsigned int)
- (ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealLowTiming
- =
- (
- (double)
- ul_LowTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_LowTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealLowTiming + 0.5)) {
- ul_RealLowTiming
- =
- ul_RealLowTiming
- +
- 1;
- }
-
- ul_LowTiming
- =
- ul_LowTiming
- -
- 1;
- ul_LowTimerValue
- =
- ul_LowTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_LowTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_LowTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- /*************************************/
- /* Calculate the high division fator */
- /*************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.00025 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.00025 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.00025 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (0.00025
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.00025 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (0.25 * b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (0.25 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (0.25 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (0.25 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- ul_HighTiming
- *
- (250.0
- *
- b_ClockSelection);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250.0 * (double)b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (ul_HighTiming
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_HighTiming * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection));
- d_RealHighTiming
- =
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection);
-
- if ((double)((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (ul_HighTiming
- *
- 60)
- *
- (250000.0
- *
- b_ClockSelection));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_HighTiming * 60.0) * (250000.0 * (double)b_ClockSelection)) >= ((double)((double)ul_HighTimerValue + 0.5))) {
- ul_HighTimerValue
- =
- ul_HighTimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealHighTiming
- =
- (unsigned int)
- (ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60;
- d_RealHighTiming
- =
- (
- (double)
- ul_HighTimerValue
- /
- (250000.0
- *
- (double)
- b_ClockSelection))
- /
- 60.0;
-
- if ((double)(((double)ul_HighTimerValue / (250000.0 * (double)b_ClockSelection)) / 60.0) >= (double)((double)ul_RealHighTiming + 0.5)) {
- ul_RealHighTiming
- =
- ul_RealHighTiming
- +
- 1;
- }
-
- ul_HighTiming
- =
- ul_HighTiming
- -
- 1;
- ul_HighTimerValue
- =
- ul_HighTimerValue
- -
- 2;
-
- if (b_ClockSelection != APCI1710_40MHZ) {
- ul_HighTimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_HighTimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /****************************/
- /* Save the low base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_LowTiming
- =
- d_RealLowTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealLowTiming
- =
- ul_RealLowTiming;
-
- /****************************/
- /* Save the high base timing */
- /****************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- d_HighTiming
- =
- d_RealHighTiming;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_PWMModuleInfo.
- s_PWMInfo
- [b_PWM].
- ul_RealHighTiming
- =
- ul_RealHighTiming;
-
- /************************/
- /* Write the low timing */
- /************************/
-
- outl(ul_LowTimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /*************************/
- /* Write the high timing */
- /*************************/
-
- outl(ul_HighTimerValue, devpriv->s_BoardInfos.ui_Address + 4 + (20 * b_PWM) + (64 * b_ModulNbr));
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- dw_Command =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- + 8 +
- (20 * b_PWM) + (64 * b_ModulNbr));
-
- dw_Command =
- dw_Command
- & 0x7F;
-
- if (b_ClockSelection == APCI1710_40MHZ) {
- dw_Command
- =
- dw_Command
- |
- 0x80;
- }
-
- /***************************/
- /* Set the clock selection */
- /***************************/
-
- outl(dw_Command,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 8 +
- (20 * b_PWM) + (64 * b_ModulNbr));
- } else {
- /***************************************/
- /* High base timing selection is wrong */
- /***************************************/
- DPRINTK("High base timing selection is wrong\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**************************************/
- /* Low base timing selection is wrong */
- /**************************************/
- DPRINTK("Low base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
- * Pwm Enable Disable and Set New Timing
- */
-static int i_APCI1710_InsnWritePWM(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- unsigned char b_WriteType;
- int i_ReturnValue = 0;
- b_WriteType = CR_CHAN(insn->chanspec);
-
- switch (b_WriteType) {
- case APCI1710_PWM_ENABLE:
- i_ReturnValue = i_APCI1710_EnablePWM(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1],
- (unsigned char) data[2],
- (unsigned char) data[3], (unsigned char) data[4], (unsigned char) data[5]);
- break;
-
- case APCI1710_PWM_DISABLE:
- i_ReturnValue = i_APCI1710_DisablePWM(dev,
- (unsigned char) CR_AREF(insn->chanspec), (unsigned char) data[0]);
- break;
-
- case APCI1710_PWM_NEWTIMING:
- i_ReturnValue = i_APCI1710_SetNewPWMTiming(dev,
- (unsigned char) CR_AREF(insn->chanspec),
- (unsigned char) data[0],
- (unsigned char) data[1], (unsigned int) data[2], (unsigned int) data[3]);
- break;
-
- default:
- printk("Write Config Parameter Wrong\n");
- }
-
- if (i_ReturnValue >= 0)
- i_ReturnValue = insn->n;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetPWMStatus |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PWM, |
-| unsigned char *_ pb_PWMOutputStatus, |
-| unsigned char *_ pb_ExternGateStatus) |
-+----------------------------------------------------------------------------+
-| Task : Return the status from selected PWM (b_PWM) from |
-| selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_PWM : Selected PWM (0 or 1) |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)
- b_ModulNbr =(unsigned char) CR_AREF(insn->chanspec);
- b_PWM =(unsigned char) data[0];
-
- |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_PWMOutputStatus : Return the PWM output |
-| level status. |
-| 0 : The PWM output level|
-| is low. |
-| 1 : The PWM output level|
-| is high. |
-| unsigned char *_ pb_ExternGateStatus : Return the extern gate |
-| level status. |
-| 0 : The extern gate is |
-| low. |
-| 1 : The extern gate is |
-| high.
- pb_PWMOutputStatus =(unsigned char *) data[0];
- pb_ExternGateStatus =(unsigned char *) data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a PWM module |
-| -4: PWM selection is wrong |
-| -5: PWM not initialised see function |
-| "i_APCI1710_InitPWM" |
-| -6: PWM not enabled see function "i_APCI1710_EnablePWM"|
-+----------------------------------------------------------------------------+
-*/
-static int i_APCI1710_InsnReadGetPWMStatus(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned char b_ModulNbr;
- unsigned char b_PWM;
- unsigned char *pb_PWMOutputStatus;
- unsigned char *pb_ExternGateStatus;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_PWM = (unsigned char) CR_CHAN(insn->chanspec);
- pb_PWMOutputStatus = (unsigned char *) &data[0];
- pb_ExternGateStatus = (unsigned char *) &data[1];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***************/
- /* Test if PWM */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- /**************************/
- /* Test the PWM selection */
- /**************************/
-
- if (b_PWM <= 1) {
- /***************************/
- /* Test if PWM initialised */
- /***************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (20 * b_PWM) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /***********************/
- /* Test if PWM enabled */
- /***********************/
-
- if (dw_Status & 0x1) {
- *pb_PWMOutputStatus =
- (unsigned char) ((dw_Status >> 7)
- & 1);
- *pb_ExternGateStatus =
- (unsigned char) ((dw_Status >> 6)
- & 1);
- } /* if (dw_Status & 0x1) */
- else {
- /*******************/
- /* PWM not enabled */
- /*******************/
-
- DPRINTK("PWM not enabled \n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /***********************/
- /* PWM not initialised */
- /***********************/
-
- DPRINTK("PWM not initialised\n");
- i_ReturnValue = -5;
- } /* if (dw_Status & 0x10) */
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- else {
- /******************************/
- /* Tor PWM selection is wrong */
- /******************************/
-
- DPRINTK("Tor PWM selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_PWM >= 0 && b_PWM <= 1) */
- } else {
- /**********************************/
- /* The module is not a PWM module */
- /**********************************/
-
- DPRINTK("The module is not a PWM module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-static int i_APCI1710_InsnBitsReadPWMInterrupt(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c
deleted file mode 100644
index 6ef1d6a434d..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ssi.c
+++ /dev/null
@@ -1,845 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : SSI.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 SSI counter module |
- +-----------------------------------------------------------------------+
- | several changes done by S. Weber in 1998 and C. Guinot in 2000 |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_BINARY_MODE 0x1
-#define APCI1710_GRAY_MODE 0x0
-
-#define APCI1710_SSI_READ1VALUE 1
-#define APCI1710_SSI_READALLVALUE 2
-
-#define APCI1710_SSI_SET_CHANNELON 0
-#define APCI1710_SSI_SET_CHANNELOFF 1
-#define APCI1710_SSI_READ_1CHANNEL 2
-#define APCI1710_SSI_READ_ALLCHANNEL 3
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitSSI |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SSIProfile, |
-| unsigned char_ b_PositionTurnLength, |
-| unsigned char_ b_TurnCptLength, |
-| unsigned char_ b_PCIInputClock, |
-| ULONG_ ul_SSIOutputClock, |
-| unsigned char_ b_SSICountingMode) |
-+----------------------------------------------------------------------------+
-| Task : Configure the SSI operating mode from selected module |
-| (b_ModulNbr). You must calling this function be for you|
-| call any other function witch access of SSI. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_SSIProfile : Selection from SSI |
-| profile length (2 to 32).|
-| unsigned char_ b_PositionTurnLength : Selection from SSI |
-| position data length |
-| (1 to 31). |
-| unsigned char_ b_TurnCptLength : Selection from SSI turn |
-| counter data length |
-| (1 to 31). |
-| unsigned char b_PCIInputClock : Selection from PCI bus |
-| clock |
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| ULONG_ ul_SSIOutputClock : Selection from SSI output|
-| clock. |
-| From 229 to 5 000 000 Hz|
-| for 30 MHz selection. |
-| From 252 to 5 000 000 Hz|
-| for 33 MHz selection. |
-| unsigned char b_SSICountingMode : SSI counting mode |
-| selection |
-| - APCI1710_BINARY_MODE : |
-| Binary counting mode. |
-| - APCI1710_GRAY_MODE : |
-| Gray counting mode.
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SSIProfile = (unsigned char) data[0];
- b_PositionTurnLength= (unsigned char) data[1];
- b_TurnCptLength = (unsigned char) data[2];
- b_PCIInputClock = (unsigned char) data[3];
- ul_SSIOutputClock = (unsigned int) data[4];
- b_SSICountingMode = (unsigned char) data[5]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: The selected SSI profile length is wrong |
-| -5: The selected SSI position data length is wrong |
-| -6: The selected SSI turn counter data length is wrong |
-| -7: The selected PCI input clock is wrong |
-| -8: The selected SSI output clock is wrong |
-| -9: The selected SSI counting mode parameter is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitSSI(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ui_TimerValue;
- unsigned char b_ModulNbr, b_SSIProfile, b_PositionTurnLength, b_TurnCptLength,
- b_PCIInputClock, b_SSICountingMode;
- unsigned int ul_SSIOutputClock;
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SSIProfile = (unsigned char) data[0];
- b_PositionTurnLength = (unsigned char) data[1];
- b_TurnCptLength = (unsigned char) data[2];
- b_PCIInputClock = (unsigned char) data[3];
- ul_SSIOutputClock = (unsigned int) data[4];
- b_SSICountingMode = (unsigned char) data[5];
-
- i_ReturnValue = insn->n;
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- /*******************************/
- /* Test the SSI profile length */
- /*******************************/
-
- /* CG 22/03/00 b_SSIProfile >= 2 anstatt b_SSIProfile > 2 */
- if (b_SSIProfile >= 2 && b_SSIProfile < 33) {
- /*************************************/
- /* Test the SSI position data length */
- /*************************************/
-
- if (b_PositionTurnLength > 0
- && b_PositionTurnLength < 32) {
- /*****************************************/
- /* Test the SSI turn counter data length */
- /*****************************************/
-
- if (b_TurnCptLength > 0
- && b_TurnCptLength < 32) {
- /***************************/
- /* Test the profile length */
- /***************************/
-
- if ((b_TurnCptLength +
- b_PositionTurnLength)
- <= b_SSIProfile) {
- /****************************/
- /* Test the PCI input clock */
- /****************************/
-
- if (b_PCIInputClock ==
- APCI1710_30MHZ
- ||
- b_PCIInputClock
- ==
- APCI1710_33MHZ)
- {
- /*************************/
- /* Test the output clock */
- /*************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ && (ul_SSIOutputClock > 228 && ul_SSIOutputClock <= 5000000UL)) || (b_PCIInputClock == APCI1710_33MHZ && (ul_SSIOutputClock > 251 && ul_SSIOutputClock <= 5000000UL))) {
- if (b_SSICountingMode == APCI1710_BINARY_MODE || b_SSICountingMode == APCI1710_GRAY_MODE) {
- /**********************/
- /* Save configuration */
- /**********************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile
- =
- b_SSIProfile;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength
- =
- b_PositionTurnLength;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength
- =
- b_TurnCptLength;
-
- /*********************************/
- /* Initialise the profile length */
- /*********************************/
-
- if (b_SSICountingMode == APCI1710_BINARY_MODE) {
-
- outl(b_SSIProfile + 1, devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
- } else {
-
- outl(b_SSIProfile, devpriv->s_BoardInfos.ui_Address + 4 + (64 * b_ModulNbr));
- }
-
- /******************************/
- /* Calculate the output clock */
- /******************************/
-
- ui_TimerValue
- =
- (unsigned int)
- (
- ((unsigned int) (b_PCIInputClock) * 500000UL) / ul_SSIOutputClock);
-
- /************************/
- /* Initialise the timer */
- /************************/
-
- outl(ui_TimerValue, devpriv->s_BoardInfos.ui_Address + (64 * b_ModulNbr));
-
- /********************************/
- /* Initialise the counting mode */
- /********************************/
-
- outl(7 * b_SSICountingMode, devpriv->s_BoardInfos.ui_Address + 12 + (64 * b_ModulNbr));
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIInit
- =
- 1;
- } else {
- /*****************************************************/
- /* The selected SSI counting mode parameter is wrong */
- /*****************************************************/
-
- DPRINTK("The selected SSI counting mode parameter is wrong\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /******************************************/
- /* The selected SSI output clock is wrong */
- /******************************************/
-
- DPRINTK("The selected SSI output clock is wrong\n");
- i_ReturnValue
- =
- -8;
- }
- } else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /********************************************/
- /* The selected SSI profile length is wrong */
- /********************************************/
-
- DPRINTK("The selected SSI profile length is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /******************************************************/
- /* The selected SSI turn counter data length is wrong */
- /******************************************************/
-
- DPRINTK("The selected SSI turn counter data length is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /**************************************************/
- /* The selected SSI position data length is wrong */
- /**************************************************/
-
- DPRINTK("The selected SSI position data length is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /********************************************/
- /* The selected SSI profile length is wrong */
- /********************************************/
-
- DPRINTK("The selected SSI profile length is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_Read1SSIValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedSSI, |
-| PULONG_ pul_Position, |
-| PULONG_ pul_TurnCpt)
- int i_APCI1710_ReadSSIValue(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task :
-
-
- Read the selected SSI counter (b_SelectedSSI) from |
-| selected module (b_ModulNbr).
- or Read all SSI counter (b_SelectedSSI) from |
-| selected module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_SelectedSSI : Selection from SSI |
-| counter (0 to 2)
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_SelectedSSI = (unsigned char) CR_CHAN(insn->chanspec); (in case of single ssi)
- b_ReadType = (unsigned char) CR_RANGE(insn->chanspec);
-|
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_Position : SSI position in the turn |
-| PULONG_ pul_TurnCpt : Number of turns
-
-pul_Position = (unsigned int *) &data[0];
- pul_TurnCpt = (unsigned int *) &data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: SSI not initialised see function |
-| "i_APCI1710_InitSSI" |
-| -5: The selected SSI is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadSSIValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_Cpt;
- unsigned char b_Length;
- unsigned char b_Schift;
- unsigned char b_SSICpt;
- unsigned int dw_And;
- unsigned int dw_And1;
- unsigned int dw_And2;
- unsigned int dw_StatusReg;
- unsigned int dw_CounterValue;
- unsigned char b_ModulNbr;
- unsigned char b_SelectedSSI;
- unsigned char b_ReadType;
- unsigned int *pul_Position;
- unsigned int *pul_TurnCpt;
- unsigned int *pul_Position1;
- unsigned int *pul_TurnCpt1;
-
- i_ReturnValue = insn->n;
- pul_Position1 = (unsigned int *) &data[0];
-/* For Read1 */
- pul_TurnCpt1 = (unsigned int *) &data[1];
-/* For Read all */
- pul_Position = (unsigned int *) &data[0]; /* 0-2 */
- pul_TurnCpt = (unsigned int *) &data[3]; /* 3-5 */
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_SelectedSSI = (unsigned char) CR_CHAN(insn->chanspec);
- b_ReadType = (unsigned char) CR_RANGE(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- /***************************/
- /* Test if SSI initialised */
- /***************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.b_SSIInit == 1) {
-
- switch (b_ReadType) {
-
- case APCI1710_SSI_READ1VALUE:
- /****************************************/
- /* Test the selected SSI counter number */
- /****************************************/
-
- if (b_SelectedSSI < 3) {
- /************************/
- /* Start the conversion */
- /************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- do {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } while ((dw_StatusReg & 0x1)
- != 0);
-
- /******************************/
- /* Read the SSI counter value */
- /******************************/
-
- dw_CounterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (b_SelectedSSI * 4) +
- (64 * b_ModulNbr));
-
- b_Length =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile / 2;
-
- if ((b_Length * 2) !=
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile) {
- b_Length++;
- }
-
- b_Schift =
- b_Length -
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
-
- *pul_Position1 =
- dw_CounterValue >>
- b_Schift;
-
- dw_And = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
- b_Cpt++) {
- dw_And = dw_And * 2;
- }
-
- *pul_Position1 =
- *pul_Position1 &
- ((dw_And) - 1);
-
- *pul_TurnCpt1 =
- dw_CounterValue >>
- b_Length;
-
- dw_And = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength;
- b_Cpt++) {
- dw_And = dw_And * 2;
- }
-
- *pul_TurnCpt1 =
- *pul_TurnCpt1 &
- ((dw_And) - 1);
- } else {
- /*****************************/
- /* The selected SSI is wrong */
- /*****************************/
-
- DPRINTK("The selected SSI is wrong\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_SSI_READALLVALUE:
- dw_And1 = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength; b_Cpt++) {
- dw_And1 = dw_And1 * 2;
- }
-
- dw_And2 = 1;
-
- for (b_Cpt = 0;
- b_Cpt <
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_SSICounterInfo.
- b_TurnCptLength; b_Cpt++) {
- dw_And2 = dw_And2 * 2;
- }
-
- /************************/
- /* Start the conversion */
- /************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModulNbr));
-
- do {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } while ((dw_StatusReg & 0x1) != 0);
-
- for (b_SSICpt = 0; b_SSICpt < 3;
- b_SSICpt++) {
- /******************************/
- /* Read the SSI counter value */
- /******************************/
-
- dw_CounterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (b_SSICpt * 4) +
- (64 * b_ModulNbr));
-
- b_Length =
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile / 2;
-
- if ((b_Length * 2) !=
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_SSIProfile) {
- b_Length++;
- }
-
- b_Schift =
- b_Length -
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_SSICounterInfo.
- b_PositionTurnLength;
-
- pul_Position[b_SSICpt] =
- dw_CounterValue >>
- b_Schift;
- pul_Position[b_SSICpt] =
- pul_Position[b_SSICpt] &
- ((dw_And1) - 1);
-
- pul_TurnCpt[b_SSICpt] =
- dw_CounterValue >>
- b_Length;
- pul_TurnCpt[b_SSICpt] =
- pul_TurnCpt[b_SSICpt] &
- ((dw_And2) - 1);
- }
- break;
-
- default:
- printk("Read Type Inputs Wrong\n");
-
- } /* switch ending */
-
- } else {
- /***********************/
- /* SSI not initialised */
- /***********************/
-
- DPRINTK("SSI not initialised\n");
- i_ReturnValue = -4;
- }
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
-
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadSSI1DigitalInput |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task :
- (0) Set the digital output from selected SSI module |
-| (b_ModuleNbr) ON
- (1) Set the digital output from selected SSI module |
-| (b_ModuleNbr) OFF
- (2)Read the status from selected SSI digital input |
-| (b_InputChannel)
- (3)Read the status from all SSI digital inputs from |
-| selected SSI module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr CR_AREF : Module number to |
-| configure (0 to 3) |
-| unsigned char_ b_InputChannel CR_CHAN : Selection from digital |
-| data[0] which IOTYPE input ( 0 to 2) |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_ChannelStatus : Digital input channel |
-| data[0] status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a SSI module |
-| -4: The selected SSI digital input is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsSSIDigitalIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned char b_InputChannel;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_InputStatus;
- unsigned char b_IOType;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_IOType = (unsigned char) data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if SSI counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_SSI_COUNTER) {
- switch (b_IOType) {
- case APCI1710_SSI_SET_CHANNELON:
- /*****************************/
- /* Set the digital output ON */
- /*****************************/
-
- outl(1, devpriv->s_BoardInfos.ui_Address + 16 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_SSI_SET_CHANNELOFF:
- /******************************/
- /* Set the digital output OFF */
- /******************************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 16 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_SSI_READ_1CHANNEL:
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec);
- pb_ChannelStatus = (unsigned char *) &data[0];
-
- if (b_InputChannel <= 2) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
- *pb_ChannelStatus =
- (unsigned char) (((~dw_StatusReg) >> (4 +
- b_InputChannel))
- & 1);
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
-
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -4;
- }
- break;
-
- case APCI1710_SSI_READ_ALLCHANNEL:
- /**************************/
- /* Read all digital input */
- /**************************/
- pb_InputStatus = (unsigned char *) &data[0];
-
- dw_StatusReg =
- inl(devpriv->s_BoardInfos.ui_Address +
- (64 * b_ModulNbr));
- *pb_InputStatus =
- (unsigned char) (((~dw_StatusReg) >> 4) & 7);
- break;
-
- default:
- printk("IO type wrong\n");
-
- } /* switch end */
- } else {
- /**********************************/
- /* The module is not a SSI module */
- /**********************************/
-
- DPRINTK("The module is not a SSI module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
deleted file mode 100644
index 0b79531ac24..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Tor.c
+++ /dev/null
@@ -1,2065 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : TOR.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 tor counter module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 27/01/99 | S. Weber | 40 MHz implementation |
- +-----------------------------------------------------------------------+
- | 28/04/00 | S. Weber | Simple,double and quadruple mode implementation|
- | | | Extern clock implementation |
- +-----------------------------------------------------------------------+
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_30MHZ 30
-#define APCI1710_33MHZ 33
-#define APCI1710_40MHZ 40
-
-#define APCI1710_GATE_INPUT 10
-
-#define APCI1710_TOR_SIMPLE_MODE 2
-#define APCI1710_TOR_DOUBLE_MODE 3
-#define APCI1710_TOR_QUADRUPLE_MODE 4
-
-#define APCI1710_SINGLE 0
-#define APCI1710_CONTINUOUS 1
-
-#define APCI1710_TOR_GETPROGRESSSTATUS 0
-#define APCI1710_TOR_GETCOUNTERVALUE 1
-#define APCI1710_TOR_READINTERRUPT 2
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char_ b_PCIInputClock, |
-| unsigned char_ b_TimingUnit, |
-| ULONG_ ul_TimingInterval, |
-| PULONG_ pul_RealTimingInterval) |
-+----------------------------------------------------------------------------+
-| Task : Configure the selected tor counter (b_TorCounter) |
-| from selected module (b_ModulNbr). |
-| The ul_TimingInterval and ul_TimingUnit determine the |
-| timing base for the measurement. |
-| The pul_RealTimingInterval return the real timing |
-| value. You must calling this function be for you call |
-| any other function witch access of the tor counter. |
-| |
-+----------------------------------------------------------------------------+
-| Input Parameters : |
-|
- CR_AREF unsigned char_ b_ModulNbr : Module number to configure |
-| (0 to 3) |
-| data[0] unsigned char_ b_TorCounter : Tor counter selection |
-| (0 or 1). |
-| data[1] unsigned char_ b_PCIInputClock : Selection from PCI bus clock|
-| - APCI1710_30MHZ : |
-| The PC have a PCI bus |
-| clock from 30 MHz |
-| - APCI1710_33MHZ : |
-| The PC have a PCI bus |
-| clock from 33 MHz |
-| - APCI1710_40MHZ |
-| The APCI-1710 have a |
-| integrated 40Mhz |
-| quartz. |
-| - APCI1710_GATE_INPUT |
-| Used the gate input for |
-| the base clock. If you |
-| have selected this option,|
-| than it is not possibl to |
-| used the gate input for |
-| enabled the acquisition |
-| data[2] unsigned char_ b_TimingUnit : Base timing unit (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| data[3] ULONG_ ul_TimingInterval : Base timing value. |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_RealTimingInterval : Real base timing |
-| data[0] value. |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: The selected PCI input clock is wrong |
-| -6: Timing unit selection is wrong |
-| -7: Base timing selection is wrong |
-| -8: You can not used the 40MHz clock selection wich |
-| this board |
-| -9: You can not used the 40MHz clock selection wich |
-| this TOR version |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitTorCounter(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int ul_TimerValue = 0;
- unsigned int dw_Command;
- double d_RealTimingInterval = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char b_PCIInputClock;
- unsigned char b_TimingUnit;
- unsigned int ul_TimingInterval;
- unsigned int ul_RealTimingInterval = 0;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
-
- b_TorCounter = (unsigned char) data[0];
- b_PCIInputClock = (unsigned char) data[1];
- b_TimingUnit = (unsigned char) data[2];
- ul_TimingInterval = (unsigned int) data[3];
- printk("INPUT clock %d\n", b_PCIInputClock);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- /**************************/
- /* Test the PCI bus clock */
- /**************************/
-
- if ((b_PCIInputClock == APCI1710_30MHZ) ||
- (b_PCIInputClock == APCI1710_33MHZ) ||
- (b_PCIInputClock == APCI1710_40MHZ) ||
- (b_PCIInputClock ==
- APCI1710_GATE_INPUT)) {
- /************************/
- /* Test the timing unit */
- /************************/
-
- if ((b_TimingUnit <= 4)
- || (b_PCIInputClock ==
- APCI1710_GATE_INPUT)) {
- /**********************************/
- /* Test the base timing selection */
- /**********************************/
-
- if (((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 133) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230650UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571230UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 571UL)) || ((b_PCIInputClock == APCI1710_30MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 9UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 121) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691043UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 519691UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 520UL)) || ((b_PCIInputClock == APCI1710_33MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 8UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 0) && (ul_TimingInterval >= 100) && (ul_TimingInterval <= 0xFFFFFFFFUL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 1) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496729UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 2) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429496UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 3) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 429UL)) || ((b_PCIInputClock == APCI1710_40MHZ) && (b_TimingUnit == 4) && (ul_TimingInterval >= 1) && (ul_TimingInterval <= 7UL)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && (ul_TimingInterval >= 2))) {
- /**************************/
- /* Test the board version */
- /**************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && (devpriv->s_BoardInfos.b_BoardVersion > 0)) || (b_PCIInputClock != APCI1710_40MHZ)) {
- /************************/
- /* Test the TOR version */
- /************************/
-
- if (((b_PCIInputClock == APCI1710_40MHZ) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3131)) || ((b_PCIInputClock == APCI1710_GATE_INPUT) && ((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3132)) || (b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) {
- /*********************************/
- /* Test if not extern clock used */
- /*********************************/
-
- if (b_PCIInputClock != APCI1710_GATE_INPUT) {
- fpu_begin
- ();
- /****************************************/
- /* Calculate the timer 0 division fator */
- /****************************************/
-
- switch (b_TimingUnit) {
- /******/
- /* ns */
- /******/
-
- case 0:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.00025 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.00025 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.00025 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (0.00025
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.00025 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* æs */
- /******/
-
- case 1:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (0.25 * b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (0.25 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (0.25 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (
- (double)
- 0.25
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (0.25 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* ms */
- /******/
-
- case 2:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- *
- (250.0
- *
- b_PCIInputClock);
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250.0 * (double)b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /*****/
- /* s */
- /*****/
-
- case 3:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (ul_TimingInterval
- *
- (250000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)ul_TimingInterval * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock));
- d_RealTimingInterval
- =
- (double)
- ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock);
-
- if ((double)((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
-
- /******/
- /* mn */
- /******/
-
- case 4:
-
- /******************/
- /* Timer 0 factor */
- /******************/
-
- ul_TimerValue
- =
- (unsigned int)
- (
- (ul_TimingInterval
- *
- 60)
- *
- (250000.0
- *
- b_PCIInputClock));
-
- /*******************/
- /* Round the value */
- /*******************/
-
- if ((double)((double)(ul_TimingInterval * 60.0) * (250000.0 * (double)b_PCIInputClock)) >= ((double)((double)ul_TimerValue + 0.5))) {
- ul_TimerValue
- =
- ul_TimerValue
- +
- 1;
- }
-
- /*****************************/
- /* Calculate the real timing */
- /*****************************/
-
- ul_RealTimingInterval
- =
- (unsigned int)
- (ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60;
- d_RealTimingInterval
- =
- (
- (double)
- ul_TimerValue
- /
- (250000.0
- *
- (double)
- b_PCIInputClock))
- /
- 60.0;
-
- if ((double)(((double)ul_TimerValue / (250000.0 * (double)b_PCIInputClock)) / 60.0) >= (double)((double)ul_RealTimingInterval + 0.5)) {
- ul_RealTimingInterval
- =
- ul_RealTimingInterval
- +
- 1;
- }
-
- ul_TimingInterval
- =
- ul_TimingInterval
- -
- 1;
- ul_TimerValue
- =
- ul_TimerValue
- -
- 2;
-
- if (b_PCIInputClock != APCI1710_40MHZ) {
- ul_TimerValue
- =
- (unsigned int)
- (
- (double)
- (ul_TimerValue)
- *
- 1.007752288);
- }
-
- break;
- }
-
- fpu_end();
- } /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
- else {
- /*************************************************************/
- /* 2 Clock used for the overflow and the reload from counter */
- /*************************************************************/
-
- ul_TimerValue
- =
- ul_TimingInterval
- -
- 2;
- } /* if (b_PCIInputClock != APCI1710_GATE_INPUT) */
-
- /****************************/
- /* Save the PCI input clock */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- b_PCIInputClock
- =
- b_PCIInputClock;
-
- /************************/
- /* Save the timing unit */
- /************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_TimingUnit
- =
- b_TimingUnit;
-
- /************************/
- /* Save the base timing */
- /************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- d_TimingInterval
- =
- d_RealTimingInterval;
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- ul_RealTimingInterval
- =
- ul_RealTimingInterval;
-
- /*******************/
- /* Get the command */
- /*******************/
-
- dw_Command
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_Command
- =
- (dw_Command
- >>
- 4)
- &
- 0xF;
-
- /******************/
- /* Test if 40 MHz */
- /******************/
-
- if (b_PCIInputClock == APCI1710_40MHZ) {
- /****************************/
- /* Set the 40 MHz selection */
- /****************************/
-
- dw_Command
- =
- dw_Command
- |
- 0x10;
- }
-
- /*****************************/
- /* Test if extern clock used */
- /*****************************/
-
- if (b_PCIInputClock == APCI1710_GATE_INPUT) {
- /****************************/
- /* Set the 40 MHz selection */
- /****************************/
-
- dw_Command
- =
- dw_Command
- |
- 0x20;
- }
-
- /*************************/
- /* Write the new command */
- /*************************/
-
- outl(dw_Command, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /*******************/
- /* Disable the tor */
- /*******************/
-
- outl(0, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
- /*************************/
- /* Set the timer 1 value */
- /*************************/
-
- outl(ul_TimerValue, devpriv->s_BoardInfos.ui_Address + 0 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /*********************/
- /* Tor counter init. */
- /*********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_TorCounterInit
- =
- 1;
- } else {
- /***********************************************/
- /* TOR version error for 40MHz clock selection */
- /***********************************************/
-
- DPRINTK("TOR version error for 40MHz clock selection\n");
- i_ReturnValue
- =
- -9;
- }
- } else {
- /**************************************************************/
- /* You can not used the 40MHz clock selection wich this board */
- /**************************************************************/
-
- DPRINTK("You can not used the 40MHz clock selection wich this board\n");
- i_ReturnValue =
- -8;
- }
- } else {
- /**********************************/
- /* Base timing selection is wrong */
- /**********************************/
-
- DPRINTK("Base timing selection is wrong\n");
- i_ReturnValue = -7;
- }
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- else {
- /**********************************/
- /* Timing unit selection is wrong */
- /**********************************/
-
- DPRINTK("Timing unit selection is wrong\n");
- i_ReturnValue = -6;
- } /* if ((b_TimingUnit >= 0) && (b_TimingUnit <= 4)) */
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- else {
- /*****************************************/
- /* The selected PCI input clock is wrong */
- /*****************************************/
-
- DPRINTK("The selected PCI input clock is wrong\n");
- i_ReturnValue = -5;
- } /* if ((b_PCIInputClock == APCI1710_30MHZ) || (b_PCIInputClock == APCI1710_33MHZ)) */
- } /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
- else {
- /**********************************/
- /* Tor Counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor Counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounterMode >= 0 && b_TorCounterMode <= 7) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
- data[0] = (unsigned int) ul_RealTimingInterval;
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_EnableTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char_ b_InputMode, |
-| unsigned char_ b_ExternGate, |
-| unsigned char_ b_CycleMode, |
-| unsigned char_ b_InterruptEnable) |
-+----------------------------------------------------------------------------+
-| Task : Enable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTorCounter" function be for you call |
-| this function. |
-| If you enable the tor counter interrupt, the |
-| tor counter generate a interrupt after the timing cycle|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
-| unsigned char_ b_InputMode : Input signal level selection |
-| 0 : Tor count each low level |
-| 1 : Tor count each high level|
-| unsigned char_ b_ExternGate : Extern gate action selection |
-| 0 : Extern gate signal not |
-| used |
-| 1 : Extern gate signal used. |
-| If you selected the |
-| single mode, each high |
-| level signal start the |
-| counter. |
-| If you selected the |
-| continuous mode, the |
-| first high level signal |
-| start the tor counter |
-| |
-| APCI1710_TOR_QUADRUPLE _MODE : |
-| In the quadruple mode, the edge|
-| analysis circuit generates a |
-| counting pulse from each edge |
-| of 2 signals which are phase |
-| shifted in relation to each |
-| other. |
-| The gate input is used for the |
-| signal B |
-| |
-| APCI1710_TOR_DOUBLE_MODE: |
-| Functions in the same way as |
-| the quadruple mode, except that|
-| only two of the four edges are |
-| analysed per period. |
-| The gate input is used for the |
-| signal B |
-| |
-| APCI1710_TOR_SIMPLE_MODE: |
-| Functions in the same way as |
-| the quadruple mode, except that|
-| only one of the four edges is |
-| analysed per period. |
-| The gate input is used for the |
-| signal B |
-| |
-| unsigned char_ b_CycleMode : Selected the tor counter |
-| acquisition mode |
-| unsigned char_ b_InterruptEnable : Enable or disable the |
-| tor counter interrupt. |
-| APCI1710_ENABLE: |
-| Enable the tor counter |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the tor counter |
-| interrupt |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor input signal selection is wrong |
-| -7: Extern gate signal mode is wrong |
-| -8: Tor counter acquisition mode cycle is wrong |
-| -9: Interrupt parameter is wrong |
-| -10:Interrupt function not initialised. |
-| See function "i_APCI1710_SetBoardIntRoutineX" |
-+----------------------------------------------------------------------------+
-*/
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_DisableTorCounter |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter) |
-+----------------------------------------------------------------------------+
-| Task : Disable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). If you disable the tor counter |
-| after a start cycle occur and you restart the tor |
-| counter witch the " i_APCI1710_EnableTorCounter" |
-| function, the status register is cleared |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1). |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor counter not enabled see function |
-| "i_APCI1710_EnableTorCounter" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteEnableDisableTorCounter(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_DummyRead;
- unsigned int dw_ConfigReg;
- unsigned char b_ModulNbr, b_Action;
- unsigned char b_TorCounter;
- unsigned char b_InputMode;
- unsigned char b_ExternGate;
- unsigned char b_CycleMode;
- unsigned char b_InterruptEnable;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_Action = (unsigned char) data[0]; /* enable or disable */
- b_TorCounter = (unsigned char) data[1];
- b_InputMode = (unsigned char) data[2];
- b_ExternGate = (unsigned char) data[3];
- b_CycleMode = (unsigned char) data[4];
- b_InterruptEnable = (unsigned char) data[5];
- i_ReturnValue = insn->n;
- devpriv->tsk_Current = current; /* Save the current process task structure */
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- switch (b_Action) /* Enable or Disable */
- {
- case APCI1710_ENABLE:
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status =
- inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- /******************************/
- /* Test the input signal mode */
- /******************************/
-
- if (b_InputMode == 0 ||
- b_InputMode == 1 ||
- b_InputMode ==
- APCI1710_TOR_SIMPLE_MODE
- || b_InputMode ==
- APCI1710_TOR_DOUBLE_MODE
- || b_InputMode ==
- APCI1710_TOR_QUADRUPLE_MODE)
- {
- /************************************/
- /* Test the extern gate signal mode */
- /************************************/
-
- if (b_ExternGate == 0
- || b_ExternGate
- == 1
- || b_InputMode >
- 1) {
- /*********************************/
- /* Test the cycle mode parameter */
- /*********************************/
-
- if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) {
- /***************************/
- /* Test the interrupt flag */
- /***************************/
-
- if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) {
-
- /***************************/
- /* Save the interrupt mode */
- /***************************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_InterruptEnable
- =
- b_InterruptEnable;
-
- /*******************/
- /* Get the command */
- /*******************/
-
- dw_ConfigReg
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_ConfigReg
- =
- (dw_ConfigReg
- >>
- 4)
- &
- 0x30;
-
- /********************************/
- /* Test if not direct mode used */
- /********************************/
-
- if (b_InputMode > 1) {
- /*******************************/
- /* Extern gate can not be used */
- /*******************************/
-
- b_ExternGate
- =
- 0;
-
- /*******************************************/
- /* Enable the extern gate for the Signal B */
- /*******************************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x40;
-
- /***********************/
- /* Test if simple mode */
- /***********************/
-
- if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) {
- /**************************/
- /* Enable the sinple mode */
- /**************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x780;
-
- } /* if (b_InputMode == APCI1710_TOR_SIMPLE_MODE) */
-
- /***********************/
- /* Test if double mode */
- /***********************/
-
- if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) {
- /**************************/
- /* Enable the double mode */
- /**************************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- 0x180;
-
- } /* if (b_InputMode == APCI1710_TOR_DOUBLE_MODE) */
-
- b_InputMode
- =
- 0;
- } /* if (b_InputMode > 1) */
-
- /*******************/
- /* Set the command */
- /*******************/
-
- dw_ConfigReg
- =
- dw_ConfigReg
- |
- b_CycleMode
- |
- (b_InterruptEnable
- *
- 2)
- |
- (b_InputMode
- *
- 4)
- |
- (b_ExternGate
- *
- 8);
-
- /*****************************/
- /* Clear the status register */
- /*****************************/
-
- dw_DummyRead
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /***************************************/
- /* Clear the interrupt status register */
- /***************************************/
-
- dw_DummyRead
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 12
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /********************/
- /* Set the commando */
- /********************/
-
- outl(dw_ConfigReg, devpriv->s_BoardInfos.ui_Address + 4 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- /****************/
- /* Set the gate */
- /****************/
-
- outl(1, devpriv->s_BoardInfos.ui_Address + 8 + (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- else {
- /********************************/
- /* Interrupt parameter is wrong */
- /********************************/
-
- DPRINTK("Interrupt parameter is wrong\n");
- i_ReturnValue
- =
- -9;
- } /* if ((b_InterruptEnable == APCI1710_ENABLE) || (b_InterruptEnable == APCI1710_DISABLE)) */
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- else {
- /***********************************************/
- /* Tor counter acquisition mode cycle is wrong */
- /***********************************************/
-
- DPRINTK("Tor counter acquisition mode cycle is wrong\n");
- i_ReturnValue
- =
- -8;
- } /* if ((b_CycleMode == APCI1710_SINGLE) || (b_CycleMode == APCI1710_CONTINUOUS)) */
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- else {
- /***********************************/
- /* Extern gate input mode is wrong */
- /***********************************/
-
- DPRINTK("Extern gate input mode is wrong\n");
- i_ReturnValue =
- -7;
- } /* if (b_ExternGate >= 0 && b_ExternGate <= 1) */
- } /* if (b_InputMode >= 0 && b_InputMode <= 1) */
- else {
- /***************************************/
- /* Tor input signal selection is wrong */
- /***************************************/
-
- DPRINTK("Tor input signal selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
- break;
-
- case APCI1710_DISABLE:
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (dw_Status & 0x10) {
- /***************************/
- /* Test if counter enabled */
- /***************************/
-
- if (dw_Status & 0x1) {
- /****************************/
- /* Clear the interrupt mode */
- /****************************/
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo
- [b_TorCounter].
- b_InterruptEnable
- =
- APCI1710_DISABLE;
-
- /******************/
- /* Clear the gate */
- /******************/
-
- outl(0, devpriv->
- s_BoardInfos.
- ui_Address + 8 +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- } /* if (dw_Status & 0x1) */
- else {
- /***************************/
- /* Tor counter not enabled */
- /***************************/
-
- DPRINTK("Tor counter not enabled \n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } /* if (dw_Status & 0x10) */
- else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- } /* // if (dw_Status & 0x10) */
-
- } /* switch */
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module \n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error \n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_GetTorCounterInitialisation |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned char *_ pb_TimingUnit, |
-| PULONG_ pul_TimingInterval, |
-| unsigned char *_ pb_InputMode, |
-| unsigned char *_ pb_ExternGate, |
-| unsigned char *_ pb_CycleMode, |
-| unsigned char *_ pb_Enable, |
-| unsigned char *_ pb_InterruptEnable)|
-+----------------------------------------------------------------------------+
-| Task : Enable the tor counter (b_TorCounter) from selected |
-| module (b_ModulNbr). You must calling the |
-| "i_APCI1710_InitTorCounter" function be for you call |
-| this function. |
-| If you enable the tor counter interrupt, the |
-| tor counter generate a interrupt after the timing cycle|
-| See function "i_APCI1710_SetBoardIntRoutineX" and the |
-| Interrupt mask description chapter from this manual. |
-| The b_CycleMode parameter determine if you will |
-| measured a single or more cycle. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1)
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_TorCounter = CR_CHAN(insn->chanspec);
-. |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TimingUnit : Base timing unit (0 to 4) |
-| 0 : ns |
-| 1 : µs |
-| 2 : ms |
-| 3 : s |
-| 4 : mn |
-| PULONG_ pul_TimingInterval : Base timing value. |
-| unsigned char *_ pb_InputMode : Input signal level |
-| selection |
-| 0 : Tor count each low level |
-| 1 : Tor count each high level|
-| unsigned char *_ pb_ExternGate : Extern gate action |
-| selection |
-| 0 : Extern gate signal not |
-| used |
-| 1 : Extern gate signal used|
-| unsigned char *_ pb_CycleMode : Tor counter acquisition |
-| mode |
-| unsigned char *_ pb_Enable : Indicate if the tor counter|
-| is enabled or no |
-| 0 : Tor counter disabled |
-| 1 : Tor counter enabled |
-| unsigned char *_ pb_InterruptEnable : Enable or disable the |
-| tor counter interrupt. |
-| APCI1710_ENABLE: |
-| Enable the tor counter |
-| interrupt |
-| APCI1710_DISABLE: |
-| Disable the tor counter |
-| interrupt
- pb_TimingUnit = (unsigned char *) &data[0];
- pul_TimingInterval = (unsigned int *) &data[1];
- pb_InputMode = (unsigned char *) &data[2];
- pb_ExternGate = (unsigned char *) &data[3];
- pb_CycleMode = (unsigned char *) &data[4];
- pb_Enable = (unsigned char *) &data[5];
- pb_InterruptEnable = (unsigned char *) &data[6];
- |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadGetTorCounterInitialisation(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char *pb_TimingUnit;
- unsigned int *pul_TimingInterval;
- unsigned char *pb_InputMode;
- unsigned char *pb_ExternGate;
- unsigned char *pb_CycleMode;
- unsigned char *pb_Enable;
- unsigned char *pb_InterruptEnable;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_TorCounter = CR_CHAN(insn->chanspec);
-
- pb_TimingUnit = (unsigned char *) &data[0];
- pul_TimingInterval = (unsigned int *) &data[1];
- pb_InputMode = (unsigned char *) &data[2];
- pb_ExternGate = (unsigned char *) &data[3];
- pb_CycleMode = (unsigned char *) &data[4];
- pb_Enable = (unsigned char *) &data[5];
- pb_InterruptEnable = (unsigned char *) &data[6];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
-
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- if (dw_Status & 0x10) {
- *pb_Enable = dw_Status & 1;
-
- /********************/
- /* Get the commando */
- /********************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 4 +
- (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- *pb_CycleMode =
- (unsigned char) ((dw_Status >> 4) & 1);
- *pb_InterruptEnable =
- (unsigned char) ((dw_Status >> 5) & 1);
-
- /******************************************************/
- /* Test if extern gate used for clock or for signal B */
- /******************************************************/
-
- if (dw_Status & 0x600) {
- /*****************************************/
- /* Test if extern gate used for signal B */
- /*****************************************/
-
- if (dw_Status & 0x400) {
- /***********************/
- /* Test if simple mode */
- /***********************/
-
- if ((dw_Status & 0x7800)
- == 0x7800) {
- *pb_InputMode =
- APCI1710_TOR_SIMPLE_MODE;
- }
-
- /***********************/
- /* Test if double mode */
- /***********************/
-
- if ((dw_Status & 0x7800)
- == 0x1800) {
- *pb_InputMode =
- APCI1710_TOR_DOUBLE_MODE;
- }
-
- /**************************/
- /* Test if quadruple mode */
- /**************************/
-
- if ((dw_Status & 0x7800)
- == 0x0000) {
- *pb_InputMode =
- APCI1710_TOR_QUADRUPLE_MODE;
- }
- } /* if (dw_Status & 0x400) */
- else {
- *pb_InputMode = 1;
- } /* // if (dw_Status & 0x400) */
-
- /************************/
- /* Extern gate not used */
- /************************/
-
- *pb_ExternGate = 0;
- } /* if (dw_Status & 0x600) */
- else {
- *pb_InputMode =
- (unsigned char) ((dw_Status >> 6)
- & 1);
- *pb_ExternGate =
- (unsigned char) ((dw_Status >> 7)
- & 1);
- } /* if (dw_Status & 0x600) */
-
- *pb_TimingUnit =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounter].
- b_TimingUnit;
-
- *pul_TimingInterval =
- devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounter].
- ul_RealTimingInterval;
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
-
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong \n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTorCounterValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_TorCounter, |
-| unsigned int_ ui_TimeOut, |
-| unsigned char *_ pb_TorCounterStatus, |
-| PULONG_ pul_TorCounterValue) |
-+----------------------------------------------------------------------------+
-| Task case APCI1710_TOR_GETPROGRESSSTATUS: Return the tor counter
-(b_TorCounter) status (pb_TorCounterStatus) from selected tor counter |
-| module (b_ModulNbr).
-
- case APCI1710_TOR_GETCOUNTERVALUE :
- Return the tor counter (b_TorCounter) status |
-| (pb_TorCounterStatus) and the timing value |
-| (pul_TorCounterValue) after a conting cycle stop |
-| from selected tor counter module (b_ModulNbr). |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3) |
-| unsigned char_ b_TorCounter : Tor counter selection (0 or 1).
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
- b_TorCounter = (unsigned char) data[1];
- ui_TimeOut = (unsigned int) data[2]; |
-+----------------------------------------------------------------------------+
-| Output Parameters : unsigned char *_ pb_TorCounterStatus : Return the tor counter |
-| status. |
-| 0 : Conting cycle not started|
-| Software gate not set. |
-| 1 : Conting cycle started. |
-| Software gate set. |
-| 2 : Conting cycle stopped. |
-| The conting cycle is |
-| terminate. |
-| 3 : A overflow occur. You |
-| must change the base |
-| timing witch the |
-| function |
-| "i_APCI1710_InitTorCounter"|
-| 4 : Timeeout occur |
-| unsigned int * pul_TorCounterValue : Tor counter value.
- pb_TorCounterStatus=(unsigned char *) &data[0];
- pul_TorCounterValue=(unsigned int *) &data[1]; |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: Module selection wrong |
-| -3: The module is not a tor counter module |
-| -4: Tor counter selection is wrong |
-| -5: Tor counter not initialised see function |
-| "i_APCI1710_InitTorCounter" |
-| -6: Tor counter not enabled see function |
-| "i_APCI1710_EnableTorCounter" |
-| -7: Timeout parameter is wrong (0 to 65535) |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsGetTorCounterProgressStatusAndValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_Status;
- unsigned int dw_TimeOut = 0;
- unsigned char b_ModulNbr;
- unsigned char b_TorCounter;
- unsigned char b_ReadType;
- unsigned int ui_TimeOut;
- unsigned char *pb_TorCounterStatus;
- unsigned int *pul_TorCounterValue;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
- b_TorCounter = (unsigned char) data[1];
- ui_TimeOut = (unsigned int) data[2];
- pb_TorCounterStatus = (unsigned char *) &data[0];
- pul_TorCounterValue = (unsigned int *) &data[1];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ReadType == APCI1710_TOR_READINTERRUPT) {
-
- data[0] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].b_OldModuleMask;
- data[1] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldInterruptMask;
- data[2] = devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.ui_Read].ul_OldCounterLatchValue;
-
- /**************************/
- /* Increment the read FIFO */
- /***************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Read = (devpriv->
- s_InterruptParameters.
- ui_Read + 1) % APCI1710_SAVE_INTERRUPT;
-
- return insn->n;
- }
-
- if (b_ModulNbr < 4) {
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- /**********************************/
- /* Test the tor counter selection */
- /**********************************/
-
- if (b_TorCounter <= 1) {
- /***********************************/
- /* Test if tor counter initialised */
- /***********************************/
-
- dw_Status = inl(devpriv->s_BoardInfos.
- ui_Address + 8 + (16 * b_TorCounter) +
- (64 * b_ModulNbr));
-
- /*******************************/
- /* Test if counter initialised */
- /*******************************/
-
- if (dw_Status & 0x10) {
- /***************************/
- /* Test if counter enabled */
- /***************************/
-
- if (dw_Status & 0x1) {
-
- switch (b_ReadType) {
-
- case APCI1710_TOR_GETPROGRESSSTATUS:
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 4 +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- dw_Status =
- dw_Status & 0xF;
-
- /*****************/
- /* Test if start */
- /*****************/
-
- if (dw_Status & 1) {
- if (dw_Status &
- 2) {
- if (dw_Status & 4) {
- /************************/
- /* Tor counter overflow */
- /************************/
-
- *pb_TorCounterStatus
- =
- 3;
- } else {
- /***********************/
- /* Tor counter started */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 2;
- }
- } else {
- /***********************/
- /* Tor counter started */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 1;
- }
- } else {
- /***************************/
- /* Tor counter not started */
- /***************************/
-
- *pb_TorCounterStatus
- = 0;
- }
- break;
-
- case APCI1710_TOR_GETCOUNTERVALUE:
-
- /*****************************/
- /* Test the timout parameter */
- /*****************************/
-
- if ((ui_TimeOut >= 0)
- && (ui_TimeOut
- <=
- 65535UL))
- {
- for (;;) {
- /*******************/
- /* Read the status */
- /*******************/
-
- dw_Status
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 4
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- /********************/
- /* Test if overflow */
- /********************/
-
- if ((dw_Status & 4) == 4) {
- /******************/
- /* Overflow occur */
- /******************/
-
- *pb_TorCounterStatus
- =
- 3;
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_TorCounterValue
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
- break;
- } /* if ((dw_Status & 4) == 4) */
- else {
- /*******************************/
- /* Test if measurement stopped */
- /*******************************/
-
- if ((dw_Status & 2) == 2) {
- /***********************/
- /* A stop signal occur */
- /***********************/
-
- *pb_TorCounterStatus
- =
- 2;
-
- /******************/
- /* Read the value */
- /******************/
-
- *pul_TorCounterValue
- =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- 0
- +
- (16 * b_TorCounter) + (64 * b_ModulNbr));
-
- break;
- } /* if ((dw_Status & 2) == 2) */
- else {
- /*******************************/
- /* Test if measurement started */
- /*******************************/
-
- if ((dw_Status & 1) == 1) {
- /************************/
- /* A start signal occur */
- /************************/
-
- *pb_TorCounterStatus
- =
- 1;
- } /* if ((dw_Status & 1) == 1) */
- else {
- /***************************/
- /* Measurement not started */
- /***************************/
-
- *pb_TorCounterStatus
- =
- 0;
- } /* if ((dw_Status & 1) == 1) */
- } /* if ((dw_Status & 2) == 2) */
- } /* if ((dw_Status & 8) == 8) */
-
- if (dw_TimeOut == ui_TimeOut) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- break;
- } else {
- /*************************/
- /* Increment the timeout */
- /*************************/
-
- dw_TimeOut
- =
- dw_TimeOut
- +
- 1;
-
- mdelay(1000);
- }
- } /* for (;;) */
-
- /*************************/
- /* Test if timeout occur */
- /*************************/
-
- if ((*pb_TorCounterStatus != 3) && (dw_TimeOut == ui_TimeOut) && (ui_TimeOut != 0)) {
- /*****************/
- /* Timeout occur */
- /*****************/
-
- *pb_TorCounterStatus
- =
- 4;
- }
- } else {
- /******************************/
- /* Timeout parameter is wrong */
- /******************************/
-
- DPRINTK("Timeout parameter is wrong\n");
- i_ReturnValue =
- -7;
- }
- break;
-
- default:
- printk("Inputs wrong\n");
- } /* switch end */
- } /* if (dw_Status & 0x1) */
- else {
- /***************************/
- /* Tor counter not enabled */
- /***************************/
-
- DPRINTK("Tor counter not enabled\n");
- i_ReturnValue = -6;
- } /* if (dw_Status & 0x1) */
- } else {
- /*******************************/
- /* Tor counter not initialised */
- /*******************************/
-
- DPRINTK("Tor counter not initialised\n");
- i_ReturnValue = -5;
- }
- } /* if (b_TorCounter <= 1) */
- else {
- /**********************************/
- /* Tor counter selection is wrong */
- /**********************************/
-
- DPRINTK("Tor counter selection is wrong\n");
- i_ReturnValue = -4;
- } /* if (b_TorCounter <= 1) */
- } else {
- /******************************************/
- /* The module is not a tor counter module */
- /******************************************/
-
- DPRINTK("The module is not a tor counter module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c b/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c
deleted file mode 100644
index fb56360444e..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/APCI1710_Ttl.c
+++ /dev/null
@@ -1,1044 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
-
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-----------------------------------------------------------------------+
- | Project : API APCI1710 | Compiler : gcc |
- | Module name : TTL.C | Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-----------------------------------------------------------------------+
- | Description : APCI-1710 TTL I/O module |
- | |
- | |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +-----------------------------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | 13/05/98 | S. Weber | TTL digital input / output implementation |
- |----------|-----------|------------------------------------------------|
- | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 |
- | | | available |
- +-----------------------------------------------------------------------+
- | | | |
- | | | |
- +-----------------------------------------------------------------------+
-*/
-
-#define APCI1710_TTL_INIT 0
-#define APCI1710_TTL_INITDIRECTION 1
-
-#define APCI1710_TTL_READCHANNEL 0
-#define APCI1710_TTL_READPORT 1
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_InitTTLIODirection |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_PortAMode, |
-| unsigned char_ b_PortBMode, |
-| unsigned char_ b_PortCMode, |
-| unsigned char_ b_PortDMode) |
-+----------------------------------------------------------------------------+
-| Task APCI1710_TTL_INIT (using defaults) : Configure the TTL I/O operating mode from selected |
-| module (b_ModulNbr). You must calling this function be|
-| for you call any other function witch access of TTL. |
- APCI1710_TTL_INITDIRECTION(user inputs for direction)
-
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3)
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InitType = (unsigned char) data[0];
- b_PortAMode = (unsigned char) data[1];
- b_PortBMode = (unsigned char) data[2];
- b_PortCMode = (unsigned char) data[3];
- b_PortDMode = (unsigned char) data[4];|
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: Function not available for this version |
-| -5: Port A mode selection is wrong |
-| -6: Port B mode selection is wrong |
-| -7: Port C mode selection is wrong |
-| -8: Port D mode selection is wrong |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnConfigInitTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned char b_ModulNbr;
- unsigned char b_InitType;
- unsigned char b_PortAMode;
- unsigned char b_PortBMode;
- unsigned char b_PortCMode;
- unsigned char b_PortDMode;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- b_InitType = (unsigned char) data[0];
- i_ReturnValue = insn->n;
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- switch (b_InitType) {
- case APCI1710_TTL_INIT:
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit = 1;
-
- /***************************/
- /* Set TTL port A to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[0] = 0;
-
- /***************************/
- /* Set TTL port B to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[1] = 0;
-
- /***************************/
- /* Set TTL port C to input */
- /***************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[2] = 0;
-
- /****************************/
- /* Set TTL port D to output */
- /****************************/
-
- devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_PortConfiguration[3] = 1;
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl(0x8,
- devpriv->s_BoardInfos.ui_Address + 20 +
- (64 * b_ModulNbr));
- break;
-
- case APCI1710_TTL_INITDIRECTION:
-
- b_PortAMode = (unsigned char) data[1];
- b_PortBMode = (unsigned char) data[2];
- b_PortCMode = (unsigned char) data[3];
- b_PortDMode = (unsigned char) data[4];
-
- /********************/
- /* Test the version */
- /********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) >=
- 0x3230) {
- /************************/
- /* Test the port A mode */
- /************************/
-
- if ((b_PortAMode == 0)
- || (b_PortAMode == 1)) {
- /************************/
- /* Test the port B mode */
- /************************/
-
- if ((b_PortBMode == 0)
- || (b_PortBMode == 1)) {
- /************************/
- /* Test the port C mode */
- /************************/
-
- if ((b_PortCMode == 0)
- || (b_PortCMode
- == 1)) {
- /************************/
- /* Test the port D mode */
- /************************/
-
- if ((b_PortDMode == 0) || (b_PortDMode == 1)) {
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_TTLInit
- =
- 1;
-
- /***********************/
- /* Set TTL port A mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [0]
- =
- b_PortAMode;
-
- /***********************/
- /* Set TTL port B mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [1]
- =
- b_PortBMode;
-
- /***********************/
- /* Set TTL port C mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [2]
- =
- b_PortCMode;
-
- /***********************/
- /* Set TTL port D mode */
- /***********************/
-
- devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [3]
- =
- b_PortDMode;
-
- /*************************/
- /* Set the configuration */
- /*************************/
-
- outl((b_PortAMode << 0) | (b_PortBMode << 1) | (b_PortCMode << 2) | (b_PortDMode << 3), devpriv->s_BoardInfos.ui_Address + 20 + (64 * b_ModulNbr));
- } else {
- /**********************************/
- /* Port D mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port D mode selection is wrong\n");
- i_ReturnValue
- =
- -8;
- }
- } else {
- /**********************************/
- /* Port C mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port C mode selection is wrong\n");
- i_ReturnValue =
- -7;
- }
- } else {
- /**********************************/
- /* Port B mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port B mode selection is wrong\n");
- i_ReturnValue = -6;
- }
- } else {
- /**********************************/
- /* Port A mode selection is wrong */
- /**********************************/
-
- DPRINTK("Port A mode selection is wrong\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************************/
- /* Function not available for this version */
- /*******************************************/
-
- DPRINTK("Function not available for this version\n");
- i_ReturnValue = -4;
- }
- break;
-
- DPRINTK("\n");
- default:
- printk("Bad Config Type\n");
- } /* switch end */
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
-
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| INPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_ReadTTLIOChannelValue |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_SelectedPort, |
-| unsigned char_ b_InputChannel, |
-| unsigned char *_ pb_ChannelStatus) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from selected TTL digital input |
-| (b_InputChannel)
-+----------------------------------------------------------------------------+
-| Task : Read the status from digital input port |
-| (b_SelectedPort) from selected TTL module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 7) |
-| unsigned char_ b_SelectedPort, : Selection from TTL I/O |
-| port (0 to 2) |
-| 0 : Port A selection |
-| 1 : Port B selection |
-| 2 : Port C selection |
-| 3 : Port D selection |
-| unsigned char_ b_InputChannel : Selection from digital |
-| input ( 0 to 2)
-APCI1710_TTL_READCHANNEL
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort= CR_RANGE(insn->chanspec);
- b_InputChannel= CR_CHAN(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
-
- APCI1710_TTL_READPORT|
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort= CR_RANGE(insn->chanspec);
- b_ReadType = (unsigned char) data[0];
-
-+----------------------------------------------------------------------------+
-| Output Parameters : data[0]
-
- unsigned char *_ pb_ChannelStatus : Digital input channel |
-| status |
-| 0 : Channle is not active|
-| 1 : Channle is active |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: The selected TTL input port is wrong |
-| -5: The selected TTL digital input is wrong |
-| -6: TTL I/O not initialised |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnBitsReadTTLIO(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned char b_SelectedPort;
- unsigned char b_InputChannel;
- unsigned char b_ReadType;
- unsigned char *pb_ChannelStatus;
- unsigned char *pb_PortValue;
-
- i_ReturnValue = insn->n;
- b_ReadType = (unsigned char) data[0];
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_SelectedPort = CR_RANGE(insn->chanspec);
- b_InputChannel = CR_CHAN(insn->chanspec);
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- switch (b_ReadType) {
-
- case APCI1710_TTL_READCHANNEL:
- pb_ChannelStatus = (unsigned char *) &data[0];
- /********************************/
- /* Test the TTL I/O port number */
- /********************************/
-
- if (((b_SelectedPort <= 2)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_SelectedPort <= 3)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /******************************************/
- /* Test the digital imnput channel number */
- /******************************************/
-
- if (((b_InputChannel <= 7)
- && (b_SelectedPort < 3))
- || ((b_InputChannel <= 1)
- && (b_SelectedPort ==
- 3))) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.b_TTLInit ==
- 1) {
- /***********************************/
- /* Test if TTL port used for input */
- /***********************************/
-
- if (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) == 0x3130) || (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3230) && (devpriv->s_ModuleInfo[b_ModulNbr].s_TTLIOInfo.b_PortConfiguration[b_SelectedPort] == 0))) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- (64 * b_ModulNbr));
-
- *pb_ChannelStatus
- =
- (unsigned char) (
- (dw_StatusReg
- >>
- (8 * b_SelectedPort)) >> b_InputChannel) & 1;
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue =
- -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -6;
- }
- } else {
- /********************************/
- /* Selected digital input error */
- /********************************/
-
- DPRINTK("Selected digital input error\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- break;
-
- case APCI1710_TTL_READPORT:
- pb_PortValue = (unsigned char *) &data[0];
- /********************************/
- /* Test the TTL I/O port number */
- /********************************/
-
- if (((b_SelectedPort <= 2)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_SelectedPort <= 3)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /***********************************/
- /* Test if TTL port used for input */
- /***********************************/
-
- if (((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr]
- &
- 0xFFFF)
- == 0x3130)
- || (((devpriv->s_BoardInfos.dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF) >= 0x3230) && (devpriv->s_ModuleInfo[b_ModulNbr].s_TTLIOInfo.b_PortConfiguration[b_SelectedPort] == 0))) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg =
- inl(devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
-
- *pb_PortValue =
- (unsigned char) (
- (dw_StatusReg >>
- (8 * b_SelectedPort)) & 0xFF);
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /*******************************/
- /* Selected TTL I/O port error */
- /*******************************/
-
- DPRINTK("Selected TTL I/O port error\n");
- i_ReturnValue = -4;
- }
- break;
-
- default:
- printk("Bad ReadType\n");
-
- } /* End Switch */
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
-
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : int i_APCI1710_InsnReadTTLIOAllPortValue(comedi_device
-*dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Read the status from all digital input ports |
-| (port A, port B and port C) from selected TTL |
-| module (b_ModulNbr) |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710|
-| unsigned char_ b_ModulNbr : Module number to |
-| configure (0 to 3) |
-+----------------------------------------------------------------------------+
-| Output Parameters : PULONG_ pul_PortValue : Digital TTL inputs port |
-| status |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL module |
-| -4: TTL I/O not initialised |
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnReadTTLIOAllPortValue(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg;
- unsigned char b_ModulNbr;
- unsigned int *pul_PortValue;
-
- b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec);
- i_ReturnValue = insn->n;
- pul_PortValue = (unsigned int *) &data[0];
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /**************************/
- /* Read all digital input */
- /**************************/
-
- dw_StatusReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModulNbr));
-
- /**********************/
- /* Test if TTL Rev1.0 */
- /**********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] & 0xFFFF) ==
- 0x3130) {
- *pul_PortValue =
- dw_StatusReg & 0xFFFFFFUL;
- } else {
- /**************************************/
- /* Test if port A not used for output */
- /**************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[0] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x3FFFF00UL;
- }
-
- /**************************************/
- /* Test if port B not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[1] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x3FF00FFUL;
- }
-
- /**************************************/
- /* Test if port C not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[2] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0x300FFFFUL;
- }
-
- /**************************************/
- /* Test if port D not used for output */
- /**************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration[3] == 1) {
- *pul_PortValue =
- dw_StatusReg &
- 0xFFFFFFUL;
- }
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /**********************************/
- /* The module is not a TTL module */
- /**********************************/
- DPRINTK("The module is not a TTL module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| OUTPUT FUNCTIONS |
-+----------------------------------------------------------------------------+
-*/
-
-/*
-+----------------------------------------------------------------------------+
-| Function Name : _INT_ i_APCI1710_SetTTLIOChlOn |
-| (unsigned char_ b_BoardHandle, |
-| unsigned char_ b_ModulNbr, |
-| unsigned char_ b_OutputChannel)
-int i_APCI1710_InsnWriteSetTTLIOChlOnOff(struct comedi_device *dev,struct comedi_subdevice *s,
- struct comedi_insn *insn,unsigned int *data) |
-+----------------------------------------------------------------------------+
-| Task : Sets or resets the output witch has been passed with the |
-| parameter b_Channel. Setting an output means setting |
-| an ouput high. |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 |
-| unsigned char_ b_ModulNbr : Selected module number (0 to 3)|
-| unsigned char_ b_OutputChannel : Selection from digital output |
-| channel (0 or 1) |
-| 0 : PD0 |
-| 1 : PD1 |
-| 2 to 9 : PA |
-| 10 to 17: PB |
-| 18 to 25: PC |
-
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel= CR_CHAN(insn->chanspec);
- ui_State = data[0]; /* ON or OFF */
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0: No error |
-| -1: The handle parameter of the board is wrong |
-| -2: The module parameter is wrong |
-| -3: The module is not a TTL I/O module |
-| -4: The selected digital output is wrong |
-| -5: TTL I/O not initialised see function |
-| " i_APCI1710_InitTTLIO"
-+----------------------------------------------------------------------------+
-*/
-
-static int i_APCI1710_InsnWriteSetTTLIOChlOnOff(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
-{
- struct addi_private *devpriv = dev->private;
- int i_ReturnValue = 0;
- unsigned int dw_StatusReg = 0;
- unsigned char b_ModulNbr;
- unsigned char b_OutputChannel;
- unsigned int ui_State;
-
- i_ReturnValue = insn->n;
- b_ModulNbr = CR_AREF(insn->chanspec);
- b_OutputChannel = CR_CHAN(insn->chanspec);
- ui_State = data[0]; /* ON or OFF */
-
- /**************************/
- /* Test the module number */
- /**************************/
-
- if (b_ModulNbr < 4) {
- /**************************/
- /* Test if TTL I/O module */
- /**************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModulNbr] &
- 0xFFFF0000UL) == APCI1710_TTL_IO) {
- /******************************************/
- /* Test if the TTL I/O module initialised */
- /******************************************/
-
- if (devpriv->s_ModuleInfo[b_ModulNbr].
- s_TTLIOInfo.b_TTLInit == 1) {
- /***********************************/
- /* Test the TTL I/O channel number */
- /***********************************/
-
- if (((b_OutputChannel <= 1)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) ==
- 0x3130))
- || ((b_OutputChannel <= 25)
- && ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration
- [b_ModulNbr] &
- 0xFFFF) >=
- 0x3230))) {
- /****************************************************/
- /* Test if the selected channel is a output channel */
- /****************************************************/
-
- if (((b_OutputChannel <= 1)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [3] == 1))
- || ((b_OutputChannel >= 2)
- && (b_OutputChannel <=
- 9)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [0] == 1))
- || ((b_OutputChannel >= 10)
- && (b_OutputChannel <=
- 17)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [1] == 1))
- || ((b_OutputChannel >= 18)
- && (b_OutputChannel <=
- 25)
- && (devpriv->
- s_ModuleInfo
- [b_ModulNbr].
- s_TTLIOInfo.
- b_PortConfiguration
- [2] == 1))) {
- /************************/
- /* Test if PD0 selected */
- /************************/
-
- if (b_OutputChannel == 0) {
-
- outl(ui_State,
- devpriv->
- s_BoardInfos.
- ui_Address +
- (64 * b_ModulNbr));
- } else {
- /************************/
- /* Test if PD1 selected */
- /************************/
-
- if (b_OutputChannel ==
- 1) {
-
- outl(ui_State,
- devpriv->
- s_BoardInfos.
- ui_Address
- + 4 +
- (64 * b_ModulNbr));
- } else {
- b_OutputChannel
- =
- b_OutputChannel
- - 2;
-
- /********************/
- /* Read all channel */
- /********************/
-
- dw_StatusReg =
- inl
- (devpriv->
- s_BoardInfos.
- ui_Address
- +
- (64 * b_ModulNbr));
- if (ui_State) /* ON */
- {
- dw_StatusReg
- =
- (dw_StatusReg
- >>
- ((b_OutputChannel / 8) * 8)) & 0xFF;
- dw_StatusReg
- =
- dw_StatusReg
- |
- (1
- <<
- (b_OutputChannel
- %
- 8));
- } else /* Off */
- {
- dw_StatusReg
- =
- (dw_StatusReg
- >>
- ((b_OutputChannel / 8) * 8)) & 0xFF;
- dw_StatusReg
- =
- dw_StatusReg
- &
- (0xFF
- -
- (1 << (b_OutputChannel % 8)));
-
- }
-
- /****************************/
- /* Set the new output value */
- /****************************/
-
- outl(dw_StatusReg, devpriv->s_BoardInfos.ui_Address + 8 + ((b_OutputChannel / 8) * 4) + (64 * b_ModulNbr));
- }
- }
- } else {
- /************************************/
- /* The selected TTL output is wrong */
- /************************************/
-
- DPRINTK(" The selected TTL output is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /************************************/
- /* The selected TTL output is wrong */
- /************************************/
-
- DPRINTK("The selected TTL output is wrong\n");
- i_ReturnValue = -4;
- }
- } else {
- /***************************/
- /* TTL I/O not initialised */
- /***************************/
-
- DPRINTK("TTL I/O not initialised\n");
- i_ReturnValue = -5;
- }
- } else {
- /**************************************/
- /* The module is not a TTL I/O module */
- /**************************************/
-
- DPRINTK("The module is not a TTL I/O module\n");
- i_ReturnValue = -3;
- }
- } else {
- /***********************/
- /* Module number error */
- /***********************/
-
- DPRINTK("Module number error\n");
- i_ReturnValue = -2;
- }
-
- return i_ReturnValue;
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index f25e0085219..63dff7729ea 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -85,10 +85,9 @@ static int addi_auto_attach(struct comedi_device *dev,
dev->board_name = this_board->pc_DriverName;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.h b/drivers/staging/comedi/drivers/addi-data/addi_common.h
index f1be5ade996..dfd1e666cc1 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.h
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.h
@@ -113,150 +113,6 @@ struct addi_board {
struct comedi_insn *, unsigned int *);
};
-/* MODULE INFO STRUCTURE */
-
-union str_ModuleInfo {
- /* Incremental counter infos */
- struct {
- union {
- struct {
- unsigned char b_ModeRegister1;
- unsigned char b_ModeRegister2;
- unsigned char b_ModeRegister3;
- unsigned char b_ModeRegister4;
- } s_ByteModeRegister;
- unsigned int dw_ModeRegister1_2_3_4;
- } s_ModeRegister;
-
- struct {
- unsigned int b_IndexInit:1;
- unsigned int b_CounterInit:1;
- unsigned int b_ReferenceInit:1;
- unsigned int b_IndexInterruptOccur:1;
- unsigned int b_CompareLogicInit:1;
- unsigned int b_FrequencyMeasurementInit:1;
- unsigned int b_FrequencyMeasurementEnable:1;
- } s_InitFlag;
-
- } s_SiemensCounterInfo;
-
- /* SSI infos */
- struct {
- unsigned char b_SSIProfile;
- unsigned char b_PositionTurnLength;
- unsigned char b_TurnCptLength;
- unsigned char b_SSIInit;
- } s_SSICounterInfo;
-
- /* TTL I/O infos */
- struct {
- unsigned char b_TTLInit;
- unsigned char b_PortConfiguration[4];
- } s_TTLIOInfo;
-
- /* Digital I/O infos */
- struct {
- unsigned char b_DigitalInit;
- unsigned char b_ChannelAMode;
- unsigned char b_ChannelBMode;
- unsigned char b_OutputMemoryEnabled;
- unsigned int dw_OutputMemory;
- } s_DigitalIOInfo;
-
- /*********************/
- /* 82X54 timer infos */
- /*********************/
-
- struct {
- struct {
- unsigned char b_82X54Init;
- unsigned char b_InputClockSelection;
- unsigned char b_InputClockLevel;
- unsigned char b_OutputLevel;
- unsigned char b_HardwareGateLevel;
- unsigned int dw_ConfigurationWord;
- } s_82X54TimerInfo[3];
- unsigned char b_InterruptMask;
- } s_82X54ModuleInfo;
-
- /*********************/
- /* Chronometer infos */
- /*********************/
-
- struct {
- unsigned char b_ChronoInit;
- unsigned char b_InterruptMask;
- unsigned char b_PCIInputClock;
- unsigned char b_TimingUnit;
- unsigned char b_CycleMode;
- double d_TimingInterval;
- unsigned int dw_ConfigReg;
- } s_ChronoModuleInfo;
-
- /***********************/
- /* Pulse encoder infos */
- /***********************/
-
- struct {
- struct {
- unsigned char b_PulseEncoderInit;
- } s_PulseEncoderInfo[4];
- unsigned int dw_SetRegister;
- unsigned int dw_ControlRegister;
- unsigned int dw_StatusRegister;
- } s_PulseEncoderModuleInfo;
-
- /* Tor conter infos */
- struct {
- struct {
- unsigned char b_TorCounterInit;
- unsigned char b_TimingUnit;
- unsigned char b_InterruptEnable;
- double d_TimingInterval;
- unsigned int ul_RealTimingInterval;
- } s_TorCounterInfo[2];
- unsigned char b_PCIInputClock;
- } s_TorCounterModuleInfo;
-
- /* PWM infos */
- struct {
- struct {
- unsigned char b_PWMInit;
- unsigned char b_TimingUnit;
- unsigned char b_InterruptEnable;
- double d_LowTiming;
- double d_HighTiming;
- unsigned int ul_RealLowTiming;
- unsigned int ul_RealHighTiming;
- } s_PWMInfo[2];
- unsigned char b_ClockSelection;
- } s_PWMModuleInfo;
-
- /* ETM infos */
- struct {
- struct {
- unsigned char b_ETMEnable;
- unsigned char b_ETMInterrupt;
- } s_ETMInfo[2];
- unsigned char b_ETMInit;
- unsigned char b_TimingUnit;
- unsigned char b_ClockSelection;
- double d_TimingInterval;
- unsigned int ul_Timing;
- } s_ETMModuleInfo;
-
- /* CDA infos */
- struct {
- unsigned char b_CDAEnable;
- unsigned char b_CDAInterrupt;
- unsigned char b_CDAInit;
- unsigned char b_FctSelection;
- unsigned char b_CDAReadFIFOOverflow;
- } s_CDAModuleInfo;
-
-};
-
-/* Private structure for the addi_apci3120 driver */
struct addi_private {
int iobase;
int i_IobaseAmcc; /* base+size for AMCC chip */
@@ -299,31 +155,6 @@ struct addi_private {
/* Pointer to the current process */
struct task_struct *tsk_Current;
- /* Hardware board infos for 1710 */
- struct {
- unsigned int ui_Address; /* Board address */
- unsigned int ui_FlashAddress;
- unsigned char b_InterruptNbr; /* Board interrupt number */
- unsigned char b_SlotNumber; /* PCI slot number */
- unsigned char b_BoardVersion;
- unsigned int dw_MolduleConfiguration[4]; /* Module config */
- } s_BoardInfos;
-
- /* Interrupt infos */
- struct {
- unsigned int ul_InterruptOccur; /* 0 : No interrupt occur */
- /* > 0 : Interrupt occur */
- unsigned int ui_Read; /* Read FIFO */
- unsigned int ui_Write; /* Write FIFO */
- struct {
- unsigned char b_OldModuleMask;
- unsigned int ul_OldInterruptMask; /* Interrupt mask */
- unsigned int ul_OldCounterLatchValue; /* Interrupt counter value */
- } s_FIFOInterruptParameters[APCI1710_SAVE_INTERRUPT];
- } s_InterruptParameters;
-
- union str_ModuleInfo s_ModuleInfo[4];
-
/* Parameters read from EEPROM overriding static board info */
struct {
int i_NbrAiChannel; /* num of A/D chans */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
index dc031c494a2..aafc172f3a9 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_eeprom.c
@@ -22,6 +22,8 @@
* for more details.
*/
+#include <linux/delay.h>
+
#define NVRAM_USER_DATA_START 0x100
#define NVCMD_BEGIN_READ (0x7 << 5) /* nvRam begin read command */
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
deleted file mode 100644
index b1a7ec1035e..00000000000
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_APCI1710.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/**
-@verbatim
-
-Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
-
- ADDI-DATA GmbH
- Dieselstrasse 3
- D-77833 Ottersweier
- Tel: +19(0)7223/9493-0
- Fax: +49(0)7223/9493-92
- http://www.addi-data.com
- info@addi-data.com
-
-This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version.
-
-This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
-
-@endverbatim
-*/
-/*
- +-----------------------------------------------------------------------+
- | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier |
- +-----------------------------------------------------------------------+
- | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com |
- | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com |
- +-------------------------------+---------------------------------------+
- | Project : APCI-1710 | Compiler : GCC |
- | Module name : hwdrv_apci1710.c| Version : 2.96 |
- +-------------------------------+---------------------------------------+
- | Project manager: Eric Stolz | Date : 02/12/2002 |
- +-------------------------------+---------------------------------------+
- | Description : Hardware Layer Access For APCI-1710 |
- +-----------------------------------------------------------------------+
- | UPDATES |
- +----------+-----------+------------------------------------------------+
- | Date | Author | Description of updates |
- +----------+-----------+------------------------------------------------+
- | | | |
- | | | |
- | | | |
- +----------+-----------+------------------------------------------------+
-*/
-
-#define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */
-#define COMEDI_SUBD_PWM 12 /* Pulse width Measurement */
-#define COMEDI_SUBD_SSI 13 /* Synchronous serial interface */
-#define COMEDI_SUBD_TOR 14 /* Tor counter */
-#define COMEDI_SUBD_CHRONO 15 /* Chrono meter */
-#define COMEDI_SUBD_PULSEENCODER 16 /* Pulse Encoder INP CPT */
-#define COMEDI_SUBD_INCREMENTALCOUNTER 17 /* Incremental Counter */
-
-#define APCI1710_BOARD_NAME "apci1710"
-#define APCI1710_BOARD_DEVICE_ID 0x818F
-#define APCI1710_ADDRESS_RANGE 256
-#define APCI1710_CONFIG_ADDRESS_RANGE 8
-#define APCI1710_INCREMENTAL_COUNTER 0x53430000UL
-#define APCI1710_SSI_COUNTER 0x53490000UL
-#define APCI1710_TTL_IO 0x544C0000UL
-#define APCI1710_DIGITAL_IO 0x44490000UL
-#define APCI1710_82X54_TIMER 0x49430000UL
-#define APCI1710_CHRONOMETER 0x43480000UL
-#define APCI1710_PULSE_ENCODER 0x495A0000UL
-#define APCI1710_TOR_COUNTER 0x544F0000UL
-#define APCI1710_PWM 0x50570000UL
-#define APCI1710_ETM 0x45540000UL
-#define APCI1710_CDA 0x43440000UL
-#define APCI1710_DISABLE 0
-#define APCI1710_ENABLE 1
-#define APCI1710_SYNCHRONOUS_MODE 1
-#define APCI1710_ASYNCHRONOUS_MODE 0
-
-#include "APCI1710_Inp_cpt.c"
-
-#include "APCI1710_Ssi.c"
-#include "APCI1710_Tor.c"
-#include "APCI1710_Ttl.c"
-#include "APCI1710_Dig_io.c"
-#include "APCI1710_82x54.c"
-#include "APCI1710_Chrono.c"
-#include "APCI1710_Pwm.c"
-#include "APCI1710_INCCPT.c"
-
-static const struct comedi_lrange range_apci1710_ttl = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci1710_ssi = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static const struct comedi_lrange range_apci1710_inccpt = {
- 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2),
- BIP_RANGE(1)
- }
-};
-
-static void i_ADDI_AttachPCI1710(struct comedi_device *dev)
-{
- struct comedi_subdevice *s;
- int ret = 0;
- int n_subdevices = 9;
-
- ret = comedi_alloc_subdevices(dev, n_subdevices);
- if (ret)
- return;
-
- /* Allocate and Initialise Timer Subdevice Structures */
- s = &dev->subdevices[0];
-
- s->type = COMEDI_SUBD_TIMER;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 3;
- s->maxdata = 0;
- s->len_chanlist = 3;
- s->range_table = &range_digital;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableTimer;
- s->insn_read = i_APCI1710_InsnReadAllTimerValue;
- s->insn_config = i_APCI1710_InsnConfigInitTimer;
- s->insn_bits = i_APCI1710_InsnBitsTimer;
-
- /* Allocate and Initialise DIO Subdevice Structures */
- s = &dev->subdevices[1];
-
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 7;
- s->maxdata = 1;
- s->len_chanlist = 7;
- s->range_table = &range_digital;
- s->insn_config = i_APCI1710_InsnConfigDigitalIO;
- s->insn_read = i_APCI1710_InsnReadDigitalIOChlValue;
- s->insn_bits = i_APCI1710_InsnBitsDigitalIOPortOnOff;
- s->insn_write = i_APCI1710_InsnWriteDigitalIOChlOnOff;
-
- /* Allocate and Initialise Chrono Subdevice Structures */
- s = &dev->subdevices[2];
-
- s->type = COMEDI_SUBD_CHRONO;
- s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 0;
- s->len_chanlist = 4;
- s->range_table = &range_digital;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableChrono;
- s->insn_read = i_APCI1710_InsnReadChrono;
- s->insn_config = i_APCI1710_InsnConfigInitChrono;
- s->insn_bits = i_APCI1710_InsnBitsChronoDigitalIO;
-
- /* Allocate and Initialise PWM Subdevice Structures */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 3;
- s->maxdata = 1;
- s->len_chanlist = 3;
- s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
- s->insn_config = i_APCI1710_InsnConfigPWM;
- s->insn_read = i_APCI1710_InsnReadGetPWMStatus;
- s->insn_write = i_APCI1710_InsnWritePWM;
- s->insn_bits = i_APCI1710_InsnBitsReadPWMInterrupt;
-
- /* Allocate and Initialise TTLIO Subdevice Structures */
- s = &dev->subdevices[4];
- s->type = COMEDI_SUBD_TTLIO;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_apci1710_ttl; /* to pass arguments in range */
- s->insn_config = i_APCI1710_InsnConfigInitTTLIO;
- s->insn_bits = i_APCI1710_InsnBitsReadTTLIO;
- s->insn_write = i_APCI1710_InsnWriteSetTTLIOChlOnOff;
- s->insn_read = i_APCI1710_InsnReadTTLIOAllPortValue;
-
- /* Allocate and Initialise TOR Subdevice Structures */
- s = &dev->subdevices[5];
- s->type = COMEDI_SUBD_TOR;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 8;
- s->maxdata = 1;
- s->len_chanlist = 8;
- s->range_table = &range_digital;
- s->io_bits = 0; /* all bits input */
- s->insn_config = i_APCI1710_InsnConfigInitTorCounter;
- s->insn_read = i_APCI1710_InsnReadGetTorCounterInitialisation;
- s->insn_write = i_APCI1710_InsnWriteEnableDisableTorCounter;
- s->insn_bits = i_APCI1710_InsnBitsGetTorCounterProgressStatusAndValue;
-
- /* Allocate and Initialise SSI Subdevice Structures */
- s = &dev->subdevices[6];
- s->type = COMEDI_SUBD_SSI;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 1;
- s->len_chanlist = 4;
- s->range_table = &range_apci1710_ssi;
- s->insn_config = i_APCI1710_InsnConfigInitSSI;
- s->insn_read = i_APCI1710_InsnReadSSIValue;
- s->insn_bits = i_APCI1710_InsnBitsSSIDigitalIO;
-
- /* Allocate and Initialise PULSEENCODER Subdevice Structures */
- s = &dev->subdevices[7];
- s->type = COMEDI_SUBD_PULSEENCODER;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 4;
- s->maxdata = 1;
- s->len_chanlist = 4;
- s->range_table = &range_digital;
- s->insn_config = i_APCI1710_InsnConfigInitPulseEncoder;
- s->insn_write = i_APCI1710_InsnWriteEnableDisablePulseEncoder;
- s->insn_bits = i_APCI1710_InsnBitsReadWritePulseEncoder;
- s->insn_read = i_APCI1710_InsnReadInterruptPulseEncoder;
-
- /* Allocate and Initialise INCREMENTALCOUNTER Subdevice Structures */
- s = &dev->subdevices[8];
- s->type = COMEDI_SUBD_INCREMENTALCOUNTER;
- s->subdev_flags =
- SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON;
- s->n_chan = 500;
- s->maxdata = 1;
- s->len_chanlist = 500;
- s->range_table = &range_apci1710_inccpt;
- s->insn_config = i_APCI1710_InsnConfigINCCPT;
- s->insn_write = i_APCI1710_InsnWriteINCCPT;
- s->insn_read = i_APCI1710_InsnReadINCCPT;
- s->insn_bits = i_APCI1710_InsnBitsINCCPT;
-}
-
-static int i_APCI1710_Reset(struct comedi_device *dev)
-{
- struct addi_private *devpriv = dev->private;
- int ret;
- unsigned int dw_Dummy;
-
- /*********************************/
- /* Read all module configuration */
- /*********************************/
- ret = inl(devpriv->s_BoardInfos.ui_Address + 60);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[0] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 124);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[1] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 188);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[2] = ret;
-
- ret = inl(devpriv->s_BoardInfos.ui_Address + 252);
- devpriv->s_BoardInfos.dw_MolduleConfiguration[3] = ret;
-
- /* outl(0x80808082,devpriv->s_BoardInfos.ui_Address+0x60); */
- outl(0x83838383, devpriv->s_BoardInfos.ui_Address + 0x60);
-
- devpriv->s_BoardInfos.b_BoardVersion = 1;
-
- /* Enable the interrupt for the controller */
- dw_Dummy = inl(devpriv->s_BoardInfos.ui_Address + 0x38);
- outl(dw_Dummy | 0x2000, devpriv->s_BoardInfos.ui_Address + 0x38);
-
- return 0;
-}
-
-/*
-+----------------------------------------------------------------------------+
-| Function's Name : __void__ v_APCI1710_InterruptFunction |
-| (unsigned char b_Interrupt, __CPPARGS) |
-+----------------------------------------------------------------------------+
-| Task : APCI-1710 interrupt function |
-+----------------------------------------------------------------------------+
-| Input Parameters : unsigned char b_Interrupt : Interrupt number |
-+----------------------------------------------------------------------------+
-| Output Parameters : - |
-+----------------------------------------------------------------------------+
-| Return Value : 0 : OK |
-| -1 : Error |
-+----------------------------------------------------------------------------+
-*/
-
-static void v_APCI1710_Interrupt(int irq, void *d)
-{
- struct comedi_device *dev = d;
- struct addi_private *devpriv = dev->private;
- unsigned char b_ModuleCpt = 0;
- unsigned char b_InterruptFlag = 0;
- unsigned char b_PWMCpt = 0;
- unsigned char b_TorCounterCpt = 0;
- unsigned char b_PulseIncoderCpt = 0;
- unsigned int ui_16BitValue;
- unsigned int ul_InterruptLatchReg = 0;
- unsigned int ul_LatchRegisterValue = 0;
- unsigned int ul_82X54InterruptStatus;
- unsigned int ul_StatusRegister;
-
- union str_ModuleInfo *ps_ModuleInfo;
-
- printk("APCI1710 Interrupt\n");
- for (b_ModuleCpt = 0; b_ModuleCpt < 4; b_ModuleCpt++, ps_ModuleInfo++) {
-
- /**************************/
- /* 1199/0225 to 0100/0226 */
- /**************************/
- ps_ModuleInfo = &devpriv->s_ModuleInfo[b_ModuleCpt];
-
- /***********************/
- /* Test if 82X54 timer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_82X54_TIMER) {
-
- /* printk("TIMER Interrupt Occurred\n"); */
- ul_82X54InterruptStatus = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if ((ul_82X54InterruptStatus & ps_ModuleInfo->
- s_82X54ModuleInfo.
- b_InterruptMask) != 0) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- (ul_82X54InterruptStatus &
- ps_ModuleInfo->s_82X54ModuleInfo.
- b_InterruptMask) << 4;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask = 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].ul_OldCounterLatchValue = 0;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write + 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- } /* if ((ul_82X54InterruptStatus & 0x7) != 0) */
- } /* 82X54 timer */
-
- /***************************/
- /* Test if increm. counter */
- /***************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_INCREMENTAL_COUNTER) {
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + (64 * b_ModuleCpt));
-
- /*********************/
- /* Test if interrupt */
- /*********************/
-
- if ((ul_InterruptLatchReg & 0x22) && (ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 & 0x80)) {
- /************************************/
- /* Test if strobe latch I interrupt */
- /************************************/
-
- if (ul_InterruptLatchReg & 2) {
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + 4 +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 1UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
-
- /*************************************/
- /* Test if strobe latch II interrupt */
- /*************************************/
-
- if (ul_InterruptLatchReg & 0x20) {
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.
- ui_Address + 8 +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 2UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + 24 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if index interrupt */
- /***************************/
-
- if (ul_InterruptLatchReg & 0x8) {
- ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_InitFlag.b_IndexInterruptOccur = 1;
-
- if (ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister2 &
- APCI1710_INDEX_AUTO_MODE) {
-
- outl(ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- dw_ModeRegister1_2_3_4,
- devpriv->s_BoardInfos.
- ui_Address + 20 +
- (64 * b_ModuleCpt));
- }
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if ((ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_ENABLE_INDEX_INT) ==
- APCI1710_ENABLE_INDEX_INT) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 4UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- /*****************************/
- /* Test if compare interrupt */
- /*****************************/
-
- if (ul_InterruptLatchReg & 0x10) {
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if ((ps_ModuleInfo->
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister3 &
- APCI1710_ENABLE_COMPARE_INT) ==
- APCI1710_ENABLE_COMPARE_INT) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 8UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
-
- /*******************************************/
- /* Test if frequency measurement interrupt */
- /*******************************************/
-
- if (ul_InterruptLatchReg & 0x20) {
- /*******************/
- /* Read the status */
- /*******************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 32 + (64 * b_ModuleCpt));
-
- /******************/
- /* Read the value */
- /******************/
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.ui_Address +
- 28 + (64 * b_ModuleCpt));
-
- switch ((ul_StatusRegister >> 1) & 3) {
- case 0:
- /*************************/
- /* Test the counter mode */
- /*************************/
-
- if ((devpriv->s_ModuleInfo[b_ModuleCpt].
- s_SiemensCounterInfo.
- s_ModeRegister.
- s_ByteModeRegister.
- b_ModeRegister1 &
- APCI1710_16BIT_COUNTER)
- == APCI1710_16BIT_COUNTER) {
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFFU) != 0) {
- ui_16BitValue =
- (unsigned int)
- ul_LatchRegisterValue
- & 0xFFFFU;
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue
- & 0xFFFF0000UL)
- | (0xFFFFU -
- ui_16BitValue);
- }
-
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFF0000UL) !=
- 0) {
- ui_16BitValue =
- (unsigned int) (
- (ul_LatchRegisterValue
- >> 16) &
- 0xFFFFU);
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue
- & 0xFFFFUL) |
- ((0xFFFFU -
- ui_16BitValue)
- << 16);
- }
- } else {
- if (ul_LatchRegisterValue != 0) {
- ul_LatchRegisterValue =
- 0xFFFFFFFFUL -
- ul_LatchRegisterValue;
- }
- }
- break;
-
- case 1:
- /****************************************/
- /* Test if 16-bit counter 2 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue &
- 0xFFFF0000UL) != 0) {
- ui_16BitValue =
- (unsigned int) (
- (ul_LatchRegisterValue
- >> 16) &
- 0xFFFFU);
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue &
- 0xFFFFUL) | ((0xFFFFU -
- ui_16BitValue)
- << 16);
- }
- break;
-
- case 2:
- /****************************************/
- /* Test if 16-bit counter 1 pulse occur */
- /****************************************/
-
- if ((ul_LatchRegisterValue & 0xFFFFU) !=
- 0) {
- ui_16BitValue =
- (unsigned int)
- ul_LatchRegisterValue &
- 0xFFFFU;
- ul_LatchRegisterValue =
- (ul_LatchRegisterValue &
- 0xFFFF0000UL) | (0xFFFFU
- - ui_16BitValue);
- }
- break;
- }
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask = 0x10000UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask = 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters[devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write + 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current, 0);
-
- }
- } /* Incremental counter */
-
- /***************/
- /* Test if CDA */
- /***************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_CDA) {
- /******************************************/
- /* Test if CDA enable and functionality 0 */
- /******************************************/
-
- if ((devpriv->s_ModuleInfo[b_ModuleCpt].
- s_CDAModuleInfo.
- b_CDAEnable == APCI1710_ENABLE)
- && (devpriv->s_ModuleInfo[b_ModuleCpt].
- s_CDAModuleInfo.b_FctSelection == 0)) {
- /****************************/
- /* Get the interrupt status */
- /****************************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 16 + (64 * b_ModuleCpt));
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 1) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 0x80000UL;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue = 0;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- } /* if (ul_StatusRegister & 1) */
-
- }
- } /* CDA */
-
- /***********************/
- /* Test if PWM counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_PWM) {
- for (b_PWMCpt = 0; b_PWMCpt < 2; b_PWMCpt++) {
- /*************************************/
- /* Test if PWM interrupt initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModuleCpt].
- s_PWMModuleInfo.
- s_PWMInfo[b_PWMCpt].
- b_InterruptEnable == APCI1710_ENABLE) {
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 16 +
- (20 * b_PWMCpt) +
- (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 0x1) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x4000UL << b_PWMCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
-
- } /* if (ul_StatusRegister & 0x1) */
- } /* if (APCI1710_ENABLE) */
- } /* for (b_PWMCpt == 0; b_PWMCpt < 0; b_PWMCpt ++) */
- } /* PWM counter */
-
- /***********************/
- /* Test if tor counter */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_TOR_COUNTER) {
- for (b_TorCounterCpt = 0; b_TorCounterCpt < 2;
- b_TorCounterCpt++) {
- /*************************************/
- /* Test if tor interrupt initialised */
- /*************************************/
-
- if (devpriv->
- s_ModuleInfo[b_ModuleCpt].
- s_TorCounterModuleInfo.
- s_TorCounterInfo[b_TorCounterCpt].
- b_InterruptEnable == APCI1710_ENABLE) {
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_StatusRegister =
- inl(devpriv->s_BoardInfos.
- ui_Address + 12 +
- (16 * b_TorCounterCpt) +
- (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if (ul_StatusRegister & 0x1) {
- /******************************/
- /* Read the tor counter value */
- /******************************/
-
- ul_LatchRegisterValue =
- inl(devpriv->
- s_BoardInfos.
- ui_Address + 0 +
- (16 * b_TorCounterCpt) +
- (64 * b_ModuleCpt));
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x1000UL <<
- b_TorCounterCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue
- = ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
-
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
- } /* if (ul_StatusRegister & 0x1) */
- } /* if (APCI1710_ENABLE) */
- } /* for (b_TorCounterCpt == 0; b_TorCounterCpt < 0; b_TorCounterCpt ++) */
- } /* Tor counter */
-
- /***********************/
- /* Test if chronometer */
- /***********************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_CHRONOMETER) {
-
- /* printk("APCI1710 Chrono Interrupt\n"); */
- /*****************************/
- /* Read the interrupt status */
- /*****************************/
-
- ul_InterruptLatchReg = inl(devpriv->s_BoardInfos.
- ui_Address + 12 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if interrupt occur */
- /***************************/
-
- if ((ul_InterruptLatchReg & 0x8) == 0x8) {
- /****************************/
- /* Clear the interrupt flag */
- /****************************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 32 + (64 * b_ModuleCpt));
-
- /***************************/
- /* Test if continuous mode */
- /***************************/
-
- if (ps_ModuleInfo->
- s_ChronoModuleInfo.
- b_CycleMode == APCI1710_ENABLE) {
- /********************/
- /* Clear the status */
- /********************/
-
- outl(0, devpriv->s_BoardInfos.
- ui_Address + 36 +
- (64 * b_ModuleCpt));
- }
-
- /*************************/
- /* Read the timing value */
- /*************************/
-
- ul_LatchRegisterValue =
- inl(devpriv->s_BoardInfos.ui_Address +
- 4 + (64 * b_ModuleCpt));
-
- /*****************************/
- /* Test if interrupt enabled */
- /*****************************/
-
- if (ps_ModuleInfo->
- s_ChronoModuleInfo.b_InterruptMask) {
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].ul_OldInterruptMask =
- 0x80;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue =
- ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) % APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO, devpriv->tsk_Current,
- 0);
-
- }
- }
- } /* Chronometer */
-
- /*************************/
- /* Test if pulse encoder */
- /*************************/
-
- if ((devpriv->s_BoardInfos.
- dw_MolduleConfiguration[b_ModuleCpt] &
- 0xFFFF0000UL) == APCI1710_PULSE_ENCODER) {
- /****************************/
- /* Read the status register */
- /****************************/
-
- ul_StatusRegister = inl(devpriv->s_BoardInfos.
- ui_Address + 20 + (64 * b_ModuleCpt));
-
- if (ul_StatusRegister & 0xF) {
- for (b_PulseIncoderCpt = 0;
- b_PulseIncoderCpt < 4;
- b_PulseIncoderCpt++) {
- /*************************************/
- /* Test if pulse encoder initialised */
- /*************************************/
-
- if ((ps_ModuleInfo->
- s_PulseEncoderModuleInfo.
- s_PulseEncoderInfo
- [b_PulseIncoderCpt].
- b_PulseEncoderInit == 1)
- && (((ps_ModuleInfo->s_PulseEncoderModuleInfo.dw_SetRegister >> b_PulseIncoderCpt) & 1) == 1) && (((ul_StatusRegister >> (b_PulseIncoderCpt)) & 1) == 1)) {
- devpriv->s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldInterruptMask =
- 0x100UL <<
- b_PulseIncoderCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- b_OldModuleMask =
- 1 << b_ModuleCpt;
-
- devpriv->
- s_InterruptParameters.
- s_FIFOInterruptParameters
- [devpriv->
- s_InterruptParameters.
- ui_Write].
- ul_OldCounterLatchValue
- = ul_LatchRegisterValue;
-
- devpriv->
- s_InterruptParameters.
- ul_InterruptOccur++;
-
- /****************************/
- /* 0899/0224 to 1199/0225 */
- /****************************/
- /* Increment the write FIFO */
- /****************************/
-
- devpriv->
- s_InterruptParameters.
- ui_Write = (devpriv->
- s_InterruptParameters.
- ui_Write +
- 1) %
- APCI1710_SAVE_INTERRUPT;
-
- b_InterruptFlag = 1;
-
- /**********************/
- /* Call user function */
- /**********************/
- /* Send a signal to from kernel to user space */
- send_sig(SIGIO,
- devpriv->tsk_Current,
- 0);
-
- }
- }
- }
- } /* pulse encoder */
-
- }
- return;
-
-}
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
index a89e505c8a3..1449b92403e 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3120.c
@@ -40,6 +40,8 @@ This program is distributed in the hope that it will be useful, but WITHOUT ANY
+----------+-----------+------------------------------------------------+
*/
+#include <linux/delay.h>
+
/*
* ADDON RELATED ADDITIONS
*/
diff --git a/drivers/staging/comedi/drivers/addi_apci_035.c b/drivers/staging/comedi/drivers/addi_apci_035.c
index 43c2c10a7c3..8d229b2f097 100644
--- a/drivers/staging/comedi/drivers/addi_apci_035.c
+++ b/drivers/staging/comedi/drivers/addi_apci_035.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_1032.c b/drivers/staging/comedi/drivers/addi_apci_1032.c
index 8a93542faed..34ab0679e99 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1032.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -289,10 +290,9 @@ static int apci1032_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_1500.c b/drivers/staging/comedi/drivers/addi_apci_1500.c
index b52cfe01e6c..ae9ded63dce 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1500.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1500.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_1516.c b/drivers/staging/comedi/drivers/addi_apci_1516.c
index b626738bb73..08674c18cf4 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1516.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1516.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -136,10 +137,9 @@ static int apci1516_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_1564.c b/drivers/staging/comedi/drivers/addi_apci_1564.c
index 22bace62210..c5717d63e16 100644
--- a/drivers/staging/comedi/drivers/addi_apci_1564.c
+++ b/drivers/staging/comedi/drivers/addi_apci_1564.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_16xx.c b/drivers/staging/comedi/drivers/addi_apci_16xx.c
index 1f7bed9a3f7..96523744b8d 100644
--- a/drivers/staging/comedi/drivers/addi_apci_16xx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_16xx.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -59,36 +60,22 @@ static int apci16xx_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
- unsigned int bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- /*
- * Each 8-bit "port" is configurable as either input or
- * output. Changing the configuration of any channel in
- * a port changes the entire port.
- */
- if (chan_mask & 0x000000ff)
- bits = 0x000000ff;
- else if (chan_mask & 0x0000ff00)
- bits = 0x0000ff00;
- else if (chan_mask & 0x00ff0000)
- bits = 0x00ff0000;
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
else
- bits = 0xff000000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_INPUT : COMEDI_OUTPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
+ mask = 0xff000000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
outl(s->io_bits, dev->iobase + APCI16XX_DIR_REG(s->index));
diff --git a/drivers/staging/comedi/drivers/addi_apci_1710.c b/drivers/staging/comedi/drivers/addi_apci_1710.c
deleted file mode 100644
index c9e6471eb06..00000000000
--- a/drivers/staging/comedi/drivers/addi_apci_1710.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <linux/pci.h>
-
-#include <asm/i387.h>
-
-#include "../comedidev.h"
-#include "comedi_fc.h"
-#include "amcc_s5933.h"
-
-#include "addi-data/addi_common.h"
-
-static void fpu_begin(void)
-{
- kernel_fpu_begin();
-}
-
-static void fpu_end(void)
-{
- kernel_fpu_end();
-}
-
-#include "addi-data/addi_eeprom.c"
-#include "addi-data/hwdrv_APCI1710.c"
-
-static irqreturn_t v_ADDI_Interrupt(int irq, void *d)
-{
- v_APCI1710_Interrupt(irq, d);
- return IRQ_RETVAL(1);
-}
-
-static int apci1710_auto_attach(struct comedi_device *dev,
- unsigned long context_unused)
-{
- struct pci_dev *pcidev = comedi_to_pci_dev(dev);
- struct addi_private *devpriv;
- struct comedi_subdevice *s;
- int ret;
-
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- ret = comedi_pci_enable(dev);
- if (ret)
- return ret;
- devpriv->s_BoardInfos.ui_Address = pci_resource_start(pcidev, 2);
-
- if (pcidev->irq > 0) {
- ret = request_irq(pcidev->irq, v_ADDI_Interrupt, IRQF_SHARED,
- dev->board_name, dev);
- if (ret == 0)
- dev->irq = pcidev->irq;
- }
-
- i_ADDI_AttachPCI1710(dev);
-
- i_APCI1710_Reset(dev);
- return 0;
-}
-
-static void apci1710_detach(struct comedi_device *dev)
-{
- if (dev->iobase)
- i_APCI1710_Reset(dev);
- if (dev->irq)
- free_irq(dev->irq, dev);
- comedi_pci_disable(dev);
-}
-
-static struct comedi_driver apci1710_driver = {
- .driver_name = "addi_apci_1710",
- .module = THIS_MODULE,
- .auto_attach = apci1710_auto_attach,
- .detach = apci1710_detach,
-};
-
-static int apci1710_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
-{
- return comedi_pci_auto_config(dev, &apci1710_driver, id->driver_data);
-}
-
-static DEFINE_PCI_DEVICE_TABLE(apci1710_pci_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_AMCC, APCI1710_BOARD_DEVICE_ID) },
- { 0 }
-};
-MODULE_DEVICE_TABLE(pci, apci1710_pci_table);
-
-static struct pci_driver apci1710_pci_driver = {
- .name = "addi_apci_1710",
- .id_table = apci1710_pci_table,
- .probe = apci1710_pci_probe,
- .remove = comedi_pci_auto_unconfig,
-};
-module_comedi_pci_driver(apci1710_driver, apci1710_pci_driver);
-
-MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/addi_apci_2032.c b/drivers/staging/comedi/drivers/addi_apci_2032.c
index 89ead8eb3c7..6b0ea16ff54 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2032.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2032.c
@@ -22,8 +22,10 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
+#include <linux/slab.h>
#include "../comedidev.h"
#include "addi_watchdog.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_2200.c b/drivers/staging/comedi/drivers/addi_apci_2200.c
index ca1bd92ecb1..92ac8ece849 100644
--- a/drivers/staging/comedi/drivers/addi_apci_2200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_2200.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/addi_apci_3120.c b/drivers/staging/comedi/drivers/addi_apci_3120.c
index 61452848510..d804957018a 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3120.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3120.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -65,10 +66,9 @@ static int apci3120_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->pc_DriverName;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_3200.c b/drivers/staging/comedi/drivers/addi_apci_3200.c
index 17b540d3c6a..1213d5aa6be 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3200.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3200.c
@@ -1,3 +1,4 @@
+#include <linux/module.h>
#include <linux/pci.h>
#include <asm/i387.h>
diff --git a/drivers/staging/comedi/drivers/addi_apci_3501.c b/drivers/staging/comedi/drivers/addi_apci_3501.c
index f9b63689a12..d9650ffb7d2 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3501.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3501.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -332,10 +333,9 @@ static int apci3501_auto_attach(struct comedi_device *dev,
int ao_n_chan;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_apci_3xxx.c b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
index 5b37cbf9228..cf5dd10eaf9 100644
--- a/drivers/staging/comedi/drivers/addi_apci_3xxx.c
+++ b/drivers/staging/comedi/drivers/addi_apci_3xxx.c
@@ -22,6 +22,7 @@
* more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -685,38 +686,28 @@ static int apci3xxx_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
- unsigned int bits;
+ unsigned int mask;
+ int ret;
/*
* Port 0 (channels 0-7) are always inputs
* Port 1 (channels 8-15) are always outputs
* Port 2 (channels 16-23) are programmable i/o
- *
- * Changing any channel in port 2 changes the entire port.
*/
- if (mask & 0xff0000)
- bits = 0xff0000;
- else
- bits = 0;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
+ if (chan < 16) {
+ if (data[0] != INSN_CONFIG_DIO_QUERY)
+ return -EINVAL;
+ } else {
+ /* changing any channel in port 2 changes the entire port */
+ mask = 0xff0000;
}
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
/* update port 2 configuration */
- if (bits)
- outl((s->io_bits >> 24) & 0xff, dev->iobase + 224);
+ outl((s->io_bits >> 24) & 0xff, dev->iobase + 224);
return insn->n;
}
@@ -801,10 +792,9 @@ static int apci3xxx_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/addi_watchdog.c b/drivers/staging/comedi/drivers/addi_watchdog.c
index 7b21acc9392..23031feaa09 100644
--- a/drivers/staging/comedi/drivers/addi_watchdog.c
+++ b/drivers/staging/comedi/drivers/addi_watchdog.c
@@ -18,6 +18,7 @@
* GNU General Public License for more details.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include "addi_watchdog.h"
diff --git a/drivers/staging/comedi/drivers/adl_pci6208.c b/drivers/staging/comedi/drivers/adl_pci6208.c
index b5e4e53f737..a67ad57cefc 100644
--- a/drivers/staging/comedi/drivers/adl_pci6208.c
+++ b/drivers/staging/comedi/drivers/adl_pci6208.c
@@ -38,6 +38,7 @@ References:
- adl_pci9118.c
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -172,10 +173,9 @@ static int pci6208_auto_attach(struct comedi_device *dev,
dev->board_ptr = boardinfo;
dev->board_name = boardinfo->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci7x3x.c b/drivers/staging/comedi/drivers/adl_pci7x3x.c
index 0d9243a5f49..81b7203f824 100644
--- a/drivers/staging/comedi/drivers/adl_pci7x3x.c
+++ b/drivers/staging/comedi/drivers/adl_pci7x3x.c
@@ -44,6 +44,7 @@ driver.
Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 0b591b0b550..b3d009285ed 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -27,6 +27,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index af51c746004..78cea193504 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -64,6 +64,7 @@ TODO:
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -855,10 +856,9 @@ static int pci9111_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- dev_private = kzalloc(sizeof(*dev_private), GFP_KERNEL);
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
if (!dev_private)
return -ENOMEM;
- dev->private = dev_private;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adl_pci9118.c b/drivers/staging/comedi/drivers/adl_pci9118.c
index cb4ef2dcbf0..22196ada036 100644
--- a/drivers/staging/comedi/drivers/adl_pci9118.c
+++ b/drivers/staging/comedi/drivers/adl_pci9118.c
@@ -77,6 +77,7 @@ Configuration options:
* manual attachment.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/gfp.h>
@@ -2140,10 +2141,9 @@ static int pci9118_attach(struct comedi_device *dev,
softsshdelay = it->options[4];
hw_err_mask = it->options[5];
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
pcidev = pci9118_find_pci(dev, it);
if (!pcidev)
@@ -2160,10 +2160,9 @@ static int pci9118_auto_attach(struct comedi_device *dev,
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct pci9118_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pci9118_find_boardinfo(pcidev);
if (dev->board_ptr == NULL) {
diff --git a/drivers/staging/comedi/drivers/adq12b.c b/drivers/staging/comedi/drivers/adq12b.c
index d187a7bf0a5..cdf5ba26c59 100644
--- a/drivers/staging/comedi/drivers/adq12b.c
+++ b/drivers/staging/comedi/drivers/adq12b.c
@@ -73,6 +73,9 @@ If you do not specify any options, they will default to
*/
+#include <linux/module.h>
+#include <linux/delay.h>
+
#include "../comedidev.h"
/* address scheme (page 2.17 of the manual) */
@@ -214,10 +217,9 @@ static int adq12b_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->unipolar = it->options[1];
devpriv->differential = it->options[2];
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index f847bbc175e..f84df46d326 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -41,6 +41,7 @@ Configuration options:
device will be used.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -1233,10 +1234,9 @@ static int pci1710_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1723.c b/drivers/staging/comedi/drivers/adv_pci1723.c
index 8430a27ec1b..b793d6987b8 100644
--- a/drivers/staging/comedi/drivers/adv_pci1723.c
+++ b/drivers/staging/comedi/drivers/adv_pci1723.c
@@ -43,6 +43,7 @@ TODO:
3. Implement calibration.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -179,38 +180,29 @@ static int pci1723_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
- unsigned short dio_mode;
+ unsigned short mode;
+ int ret;
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x00FF)
- bits = 0x00FF;
+ if (chan < 8)
+ mask = 0x00ff;
else
- bits = 0xFF00;
+ mask = 0xff00;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
/* update hardware DIO mode */
- dio_mode = 0x0000; /* low byte output, high byte output */
- if ((s->io_bits & 0x00FF) == 0)
- dio_mode |= 0x0001; /* low byte input */
- if ((s->io_bits & 0xFF00) == 0)
- dio_mode |= 0x0002; /* high byte input */
- outw(dio_mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
- return 1;
+ mode = 0x0000; /* assume output */
+ if (!(s->io_bits & 0x00ff))
+ mode |= 0x0001; /* low byte input */
+ if (!(s->io_bits & 0xff00))
+ mode |= 0x0002; /* high byte input */
+ outw(mode, dev->iobase + PCI1723_DIGITAL_IO_PORT_SET);
+
+ return insn->n;
}
/*
@@ -237,10 +229,9 @@ static int pci1723_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/adv_pci1724.c b/drivers/staging/comedi/drivers/adv_pci1724.c
index da7462e01fa..009a3039fc4 100644
--- a/drivers/staging/comedi/drivers/adv_pci1724.c
+++ b/drivers/staging/comedi/drivers/adv_pci1724.c
@@ -52,6 +52,8 @@ supported PCI devices are configured as comedi devices automatically.
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -123,10 +125,6 @@ static const struct comedi_lrange ao_ranges_1724 = { 4,
}
};
-static const struct comedi_lrange *const ao_range_list_1724[NUM_AO_CHANNELS] = {
- [0 ... NUM_AO_CHANNELS - 1] = &ao_ranges_1724,
-};
-
/* this structure is for data unique to this hardware driver. */
struct adv_pci1724_private {
int ao_value[NUM_AO_CHANNELS];
@@ -306,7 +304,7 @@ static int setup_subdevices(struct comedi_device *dev)
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
s->n_chan = NUM_AO_CHANNELS;
s->maxdata = 0x3fff;
- s->range_table_list = ao_range_list_1724;
+ s->range_table = &ao_ranges_1724;
s->insn_read = ao_readback_insn;
s->insn_write = ao_winsn;
@@ -340,10 +338,9 @@ static int adv_pci1724_auto_attach(struct comedi_device *dev,
int retval;
unsigned int board_id;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* init software copies of output values to indicate we don't know
* what the output value is since it has never been written. */
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 8e6ec75bd29..f091fa0d304 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -29,6 +29,7 @@ Configuration options:
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
@@ -1107,10 +1108,9 @@ static int pci_dio_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/aio_aio12_8.c b/drivers/staging/comedi/drivers/aio_aio12_8.c
index 279dfe8951f..abb28498b58 100644
--- a/drivers/staging/comedi/drivers/aio_aio12_8.c
+++ b/drivers/staging/comedi/drivers/aio_aio12_8.c
@@ -35,8 +35,8 @@ Notes:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include "8255.h"
/*
@@ -202,10 +202,9 @@ static int aio_aio12_8_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/staging/comedi/drivers/aio_iiro_16.c b/drivers/staging/comedi/drivers/aio_iiro_16.c
index 029834d0ff1..afe87cc8976 100644
--- a/drivers/staging/comedi/drivers/aio_iiro_16.c
+++ b/drivers/staging/comedi/drivers/aio_iiro_16.c
@@ -30,8 +30,8 @@ Configuration Options:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#define AIO_IIRO_16_SIZE 0x08
#define AIO_IIRO_16_RELAY_0_7 0x00
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index e2478105ac1..dc1dee79fc1 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -192,8 +192,7 @@
* order they appear in the channel list.
*/
-#include <linux/slab.h>
-
+#include <linux/module.h>
#include "../comedidev.h"
#include "amplc_dio200.h"
@@ -272,10 +271,9 @@ static int dio200_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = it->options[1];
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], thisboard->mainsize);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index 649fc69724f..c1f723e8614 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -19,8 +19,8 @@
GNU General Public License for more details.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -976,34 +976,26 @@ static int dio200_subdev_8255_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
dio200_subdev_8255_set_dir(dev, s);
- return 1;
+
+ return insn->n;
}
/*
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_pci.c b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
index d7d9f5cc3ab..a810a241644 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_pci.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_pci.c
@@ -220,9 +220,9 @@
* order they appear in the channel list.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -380,10 +380,9 @@ static int dio200_pci_auto_attach(struct comedi_device *dev,
dev_info(dev->class_dev, "%s: attach pci %s (%s)\n",
dev->driver->driver_name, pci_name(pci_dev), dev->board_name);
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 4e889b82cbf..98075f999c9 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -47,6 +47,7 @@ the IRQ jumper. If no interrupt is connected, then subdevice 1 is
unused.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -467,10 +468,9 @@ static int pc236_attach(struct comedi_device *dev, struct comedi_devconfig *it)
struct pc236_private *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* Process options according to bus type. */
if (is_isa_board(thisboard)) {
@@ -510,10 +510,9 @@ static int pc236_auto_attach(struct comedi_device *dev,
dev_info(dev->class_dev, PC236_DRIVER_NAME ": attach pci %s\n",
pci_name(pci_dev));
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pc236_find_pci_board(pci_dev);
if (dev->board_ptr == NULL) {
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 6546095e7a4..e7108045f55 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -33,6 +33,7 @@ connected to a reed-relay. Relay contacts are closed when output is 1.
The state of the outputs can be read.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#define PC263_DRIVER_NAME "amplc_pc263"
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index f1e36f08b10..179de53a86f 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -98,6 +98,7 @@ Caveats:
correctly.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -1419,10 +1420,9 @@ static int pci224_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev_info(dev->class_dev, DRIVER_NAME ": attach\n");
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
pci_dev = pci224_find_pci_dev(dev, it);
if (!pci_dev)
@@ -1440,10 +1440,9 @@ pci224_auto_attach(struct comedi_device *dev, unsigned long context_unused)
dev_info(dev->class_dev, DRIVER_NAME ": attach pci %s\n",
pci_name(pci_dev));
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_ptr = pci224_find_pci_board(pci_dev);
if (dev->board_ptr == NULL) {
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 846d6448fa4..43059c25d5e 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -184,6 +184,7 @@ Support for PCI230+/260+, more triggered scan functionality, and workarounds
for (or detection of) various hardware problems added by Ian Abbott.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -2615,10 +2616,9 @@ static int pci230_alloc_private(struct comedi_device *dev)
{
struct pci230_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->isr_spinlock);
spin_lock_init(&devpriv->res_spinlock);
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index 4da900cc584..145bb48f618 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -32,6 +32,7 @@ connected to a reed-relay. Relay contacts are closed when output is 1.
The state of the outputs can be read.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 929218a3597..217aa19cdc3 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -35,8 +35,6 @@ http://robot0.ge.uiuc.edu/~spong/mecha/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/errno.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
#include <linux/timer.h>
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index ae9a2082b5a..0ce93da7084 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -34,8 +34,8 @@ Status: experimental
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/delay.h>
#include "../comedidev.h"
@@ -341,33 +341,22 @@ static int das16cs_dio_insn_bits(struct comedi_device *dev,
static int das16cs_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct das16cs_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- int bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
if (chan < 4)
- bits = 0x0f;
+ mask = 0x0f;
else
- bits = 0xf0;
+ mask = 0xf0;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
devpriv->status2 &= ~0x00c0;
devpriv->status2 |= (s->io_bits & 0xf0) ? 0x0080 : 0;
@@ -420,10 +409,9 @@ static int das16cs_auto_attach(struct comedi_device *dev,
return ret;
dev->irq = link->irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 58bca184bf2..41d89ee7fa3 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -61,6 +61,7 @@ TODO:
analog triggering on 1602 series
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1444,10 +1445,9 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 43c0bf58771..388dbd7a5d2 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -82,6 +82,7 @@ TODO:
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -3509,31 +3510,20 @@ static int do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
static int dio_60xx_config_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct pcidas64_private *devpriv = dev->private;
- unsigned int mask;
-
- mask = 1 << CR_CHAN(insn->chanspec);
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return 2;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
writeb(s->io_bits,
devpriv->dio_counter_iobase + DIO_DIRECTION_60XX_REG);
- return 1;
+ return insn->n;
}
static int dio_60xx_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -4034,10 +4024,9 @@ static int auto_attach(struct comedi_device *dev,
return -ENODEV;
dev->board_ptr = thisboard;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
retval = comedi_pci_enable(dev);
if (retval)
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 2d3e920e598..94f11582027 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -37,6 +37,7 @@
* Only simple analog output writing is supported.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -348,10 +349,9 @@ static int cb_pcidda_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index 8b5c198862a..30520d4c16a 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -35,8 +35,8 @@ No interrupts, multi channel or FIFO AI, although the card looks like it could s
See http://www.mccdaq.com/PDFs/Manuals/pcim-das1602-16.pdf for more details.
*/
+#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -210,10 +210,9 @@ static int cb_pcimdas_auto_attach(struct comedi_device *dev,
unsigned long iobase_8255;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 406cba8cba8..edf17b63096 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -74,6 +74,7 @@ Configuration Options: not applicable, uses PCI auto config
-Calin Culianu <calin@ajvar.org>
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -156,10 +157,9 @@ static int cb_pcimdda_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/comedi_bond.c b/drivers/staging/comedi/drivers/comedi_bond.c
index 1a51866be6f..51a59e5b8ec 100644
--- a/drivers/staging/comedi/drivers/comedi_bond.c
+++ b/drivers/staging/comedi/drivers/comedi_bond.c
@@ -1,130 +1,131 @@
/*
- comedi/drivers/comedi_bond.c
- A Comedi driver to 'bond' or merge multiple drivers and devices as one.
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: comedi_bond
-Description: A driver to 'bond' (merge) multiple subdevices from multiple
- devices together as one.
-Devices:
-Author: ds
-Updated: Mon, 10 Oct 00:18:25 -0500
-Status: works
-
-This driver allows you to 'bond' (merge) multiple comedi subdevices
-(coming from possibly difference boards and/or drivers) together. For
-example, if you had a board with 2 different DIO subdevices, and
-another with 1 DIO subdevice, you could 'bond' them with this driver
-so that they look like one big fat DIO subdevice. This makes writing
-applications slightly easier as you don't have to worry about managing
-different subdevices in the application -- you just worry about
-indexing one linear array of channel id's.
-
-Right now only DIO subdevices are supported as that's the personal itch
-I am scratching with this driver. If you want to add support for AI and AO
-subdevs, go right on ahead and do so!
-
-Commands aren't supported -- although it would be cool if they were.
-
-Configuration Options:
- List of comedi-minors to bond. All subdevices of the same type
- within each minor will be concatenated together in the order given here.
-*/
+ * comedi_bond.c
+ * A Comedi driver to 'bond' or merge multiple drivers and devices as one.
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2005 Calin A. Culianu <calin@ajvar.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+/*
+ * Driver: comedi_bond
+ * Description: A driver to 'bond' (merge) multiple subdevices from multiple
+ * devices together as one.
+ * Devices:
+ * Author: ds
+ * Updated: Mon, 10 Oct 00:18:25 -0500
+ * Status: works
+ *
+ * This driver allows you to 'bond' (merge) multiple comedi subdevices
+ * (coming from possibly difference boards and/or drivers) together. For
+ * example, if you had a board with 2 different DIO subdevices, and
+ * another with 1 DIO subdevice, you could 'bond' them with this driver
+ * so that they look like one big fat DIO subdevice. This makes writing
+ * applications slightly easier as you don't have to worry about managing
+ * different subdevices in the application -- you just worry about
+ * indexing one linear array of channel id's.
+ *
+ * Right now only DIO subdevices are supported as that's the personal itch
+ * I am scratching with this driver. If you want to add support for AI and AO
+ * subdevs, go right on ahead and do so!
+ *
+ * Commands aren't supported -- although it would be cool if they were.
+ *
+ * Configuration Options:
+ * List of comedi-minors to bond. All subdevices of the same type
+ * within each minor will be concatenated together in the order given here.
+ */
+
+#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "../comedi.h"
#include "../comedilib.h"
#include "../comedidev.h"
-/* The maxiumum number of channels per subdevice. */
-#define MAX_CHANS 256
-
-struct BondedDevice {
+struct bonded_device {
struct comedi_device *dev;
unsigned minor;
unsigned subdev;
- unsigned subdev_type;
unsigned nchans;
- unsigned chanid_offset; /* The offset into our unified linear
- channel-id's of chanid 0 on this
- subdevice. */
};
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
struct comedi_bond_private {
# define MAX_BOARD_NAME 256
char name[MAX_BOARD_NAME];
- struct BondedDevice **devs;
+ struct bonded_device **devs;
unsigned ndevs;
- struct BondedDevice *chanIdDevMap[MAX_CHANS];
unsigned nchans;
};
-/* DIO devices are slightly special. Although it is possible to
- * implement the insn_read/insn_write interface, it is much more
- * useful to applications if you implement the insn_bits interface.
- * This allows packed reading/writing of the DIO channels. The
- * comedi core can convert between insn_bits and insn_read/write */
static int bonding_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
-#define LSAMPL_BITS (sizeof(unsigned int)*8)
- unsigned nchans = LSAMPL_BITS, num_done = 0, i;
-
- if (devpriv->nchans < nchans)
- nchans = devpriv->nchans;
-
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- for (i = 0; num_done < nchans && i < devpriv->ndevs; ++i) {
- struct BondedDevice *bdev = devpriv->devs[i];
- /* Grab the channel mask and data of only the bits corresponding
- to this subdevice.. need to shift them to zero position of
- course. */
- /* Bits corresponding to this subdev. */
- unsigned int subdevMask = ((1 << bdev->nchans) - 1);
- unsigned int writeMask, dataBits;
-
- /* Argh, we have >= LSAMPL_BITS chans.. take all bits */
- if (bdev->nchans >= LSAMPL_BITS)
- subdevMask = (unsigned int)(-1);
-
- writeMask = (data[0] >> num_done) & subdevMask;
- dataBits = (data[1] >> num_done) & subdevMask;
-
- /* Read/Write the new digital lines */
- if (comedi_dio_bitfield(bdev->dev, bdev->subdev, writeMask,
- &dataBits) != 2)
- return -EINVAL;
-
- /* Make room for the new bits in data[1], the return value */
- data[1] &= ~(subdevMask << num_done);
- /* Put the bits in the return value */
- data[1] |= (dataBits & subdevMask) << num_done;
- /* Save the new bits to the saved state.. */
- s->state = data[1];
-
- num_done += bdev->nchans;
- }
+ unsigned int n_left, n_done, base_chan;
+ unsigned int write_mask, data_bits;
+ struct bonded_device **devs;
+
+ write_mask = data[0];
+ data_bits = data[1];
+ base_chan = CR_CHAN(insn->chanspec);
+ /* do a maximum of 32 channels, starting from base_chan. */
+ n_left = devpriv->nchans - base_chan;
+ if (n_left > 32)
+ n_left = 32;
+
+ n_done = 0;
+ devs = devpriv->devs;
+ do {
+ struct bonded_device *bdev = *devs++;
+
+ if (base_chan < bdev->nchans) {
+ /* base channel falls within bonded device */
+ unsigned int b_chans, b_mask, b_write_mask, b_data_bits;
+ int ret;
+
+ /*
+ * Get num channels to do for bonded device and set
+ * up mask and data bits for bonded device.
+ */
+ b_chans = bdev->nchans - base_chan;
+ if (b_chans > n_left)
+ b_chans = n_left;
+ b_mask = (1U << b_chans) - 1;
+ b_write_mask = (write_mask >> n_done) & b_mask;
+ b_data_bits = (data_bits >> n_done) & b_mask;
+ /* Read/Write the new digital lines. */
+ ret = comedi_dio_bitfield2(bdev->dev, bdev->subdev,
+ b_write_mask, &b_data_bits,
+ base_chan);
+ if (ret < 0)
+ return ret;
+ /* Place read bits into data[1]. */
+ data[1] &= ~(b_mask << n_done);
+ data[1] |= (b_data_bits & b_mask) << n_done;
+ /*
+ * Set up for following bonded device (if still have
+ * channels to read/write).
+ */
+ base_chan = 0;
+ n_done += b_chans;
+ n_left -= b_chans;
+ } else {
+ /* Skip bonded devices before base channel. */
+ base_chan -= bdev->nchans;
+ }
+ } while (n_left);
return insn->n;
}
@@ -134,99 +135,91 @@ static int bonding_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct comedi_bond_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec), ret, io_bits = s->io_bits;
- unsigned int io;
- struct BondedDevice *bdev;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret;
+ struct bonded_device *bdev;
+ struct bonded_device **devs;
- if (chan < 0 || chan >= devpriv->nchans)
- return -EINVAL;
- bdev = devpriv->chanIdDevMap[chan];
+ /*
+ * Locate bonded subdevice and adjust channel.
+ */
+ devs = devpriv->devs;
+ for (bdev = *devs++; chan >= bdev->nchans; bdev = *devs++)
+ chan -= bdev->nchans;
- /* The input or output configuration of each digital line is
+ /*
+ * The input or output configuration of each digital line is
* configured by a special insn_config instruction. chanspec
* contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ * configuration instruction INSN_CONFIG_DIO_OUTPUT,
+ * INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_QUERY.
+ *
+ * Note that INSN_CONFIG_DIO_OUTPUT == COMEDI_OUTPUT,
+ * and INSN_CONFIG_DIO_INPUT == COMEDI_INPUT. This is deliberate ;)
+ */
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- io = COMEDI_OUTPUT; /* is this really necessary? */
- io_bits |= 1 << chan;
- break;
case INSN_CONFIG_DIO_INPUT:
- io = COMEDI_INPUT; /* is this really necessary? */
- io_bits &= ~(1 << chan);
+ ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, data[0]);
break;
case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
+ ret = comedi_dio_get_config(bdev->dev, bdev->subdev, chan,
+ &data[1]);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
break;
}
- /* 'real' channel id for this subdev.. */
- chan -= bdev->chanid_offset;
- ret = comedi_dio_config(bdev->dev, bdev->subdev, chan, io);
- if (ret != 1)
- return -EINVAL;
- /* Finally, save the new io_bits values since we didn't get
- an error above. */
- s->io_bits = io_bits;
- return insn->n;
+ if (ret >= 0)
+ ret = insn->n;
+ return ret;
}
-static void *Realloc(const void *oldmem, size_t newlen, size_t oldlen)
-{
- void *newmem = kmalloc(newlen, GFP_KERNEL);
-
- if (newmem && oldmem)
- memcpy(newmem, oldmem, min(oldlen, newlen));
- kfree(oldmem);
- return newmem;
-}
-
-static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
+static int do_dev_config(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct comedi_bond_private *devpriv = dev->private;
+ DECLARE_BITMAP(devs_opened, COMEDI_NUM_BOARD_MINORS);
int i;
- struct comedi_device *devs_opened[COMEDI_NUM_BOARD_MINORS];
- memset(devs_opened, 0, sizeof(devs_opened));
+ memset(&devs_opened, 0, sizeof(devs_opened));
devpriv->name[0] = 0;
- /* Loop through all comedi devices specified on the command-line,
- building our device list */
+ /*
+ * Loop through all comedi devices specified on the command-line,
+ * building our device list.
+ */
for (i = 0; i < COMEDI_NDEVCONFOPTS && (!i || it->options[i]); ++i) {
- char file[] = "/dev/comediXXXXXX";
+ char file[sizeof("/dev/comediXXXXXX")];
int minor = it->options[i];
struct comedi_device *d;
- int sdev = -1, nchans, tmp;
- struct BondedDevice *bdev = NULL;
+ int sdev = -1, nchans;
+ struct bonded_device *bdev;
+ struct bonded_device **devs;
if (minor < 0 || minor >= COMEDI_NUM_BOARD_MINORS) {
dev_err(dev->class_dev,
"Minor %d is invalid!\n", minor);
- return 0;
+ return -EINVAL;
}
if (minor == dev->minor) {
dev_err(dev->class_dev,
"Cannot bond this driver to itself!\n");
- return 0;
+ return -EINVAL;
}
- if (devs_opened[minor]) {
+ if (test_and_set_bit(minor, devs_opened)) {
dev_err(dev->class_dev,
"Minor %d specified more than once!\n", minor);
- return 0;
+ return -EINVAL;
}
snprintf(file, sizeof(file), "/dev/comedi%u", minor);
file[sizeof(file) - 1] = 0;
- d = devs_opened[minor] = comedi_open(file);
+ d = comedi_open(file);
if (!d) {
dev_err(dev->class_dev,
"Minor %u could not be opened\n", minor);
- return 0;
+ return -ENODEV;
}
/* Do DIO, as that's all we support now.. */
@@ -237,45 +230,41 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
dev_err(dev->class_dev,
"comedi_get_n_channels() returned %d on minor %u subdev %d!\n",
nchans, minor, sdev);
- return 0;
+ return -EINVAL;
}
bdev = kmalloc(sizeof(*bdev), GFP_KERNEL);
if (!bdev)
- return 0;
+ return -ENOMEM;
bdev->dev = d;
bdev->minor = minor;
bdev->subdev = sdev;
- bdev->subdev_type = COMEDI_SUBD_DIO;
bdev->nchans = nchans;
- bdev->chanid_offset = devpriv->nchans;
+ devpriv->nchans += nchans;
- /* map channel id's to BondedDevice * pointer.. */
- while (nchans--)
- devpriv->chanIdDevMap[devpriv->nchans++] = bdev;
-
- /* Now put bdev pointer at end of devpriv->devs array
- * list.. */
+ /*
+ * Now put bdev pointer at end of devpriv->devs array
+ * list..
+ */
/* ergh.. ugly.. we need to realloc :( */
- tmp = devpriv->ndevs * sizeof(bdev);
- devpriv->devs =
- Realloc(devpriv->devs,
- ++devpriv->ndevs * sizeof(bdev), tmp);
- if (!devpriv->devs) {
+ devs = krealloc(devpriv->devs,
+ (devpriv->ndevs + 1) * sizeof(*devs),
+ GFP_KERNEL);
+ if (!devs) {
dev_err(dev->class_dev,
"Could not allocate memory. Out of memory?\n");
- return 0;
+ return -ENOMEM;
}
-
- devpriv->devs[devpriv->ndevs - 1] = bdev;
+ devpriv->devs = devs;
+ devpriv->devs[devpriv->ndevs++] = bdev;
{
- /** Append dev:subdev to devpriv->name */
+ /* Append dev:subdev to devpriv->name */
char buf[20];
int left =
MAX_BOARD_NAME - strlen(devpriv->name) - 1;
- snprintf(buf, sizeof(buf), "%d:%d ", dev->minor,
- bdev->subdev);
+ snprintf(buf, sizeof(buf), "%d:%d ",
+ bdev->minor, bdev->subdev);
buf[sizeof(buf) - 1] = 0;
strncat(devpriv->name, buf, left);
}
@@ -285,10 +274,10 @@ static int doDevConfig(struct comedi_device *dev, struct comedi_devconfig *it)
if (!devpriv->nchans) {
dev_err(dev->class_dev, "No channels found!\n");
- return 0;
+ return -EINVAL;
}
- return 1;
+ return 0;
}
static int bonding_attach(struct comedi_device *dev,
@@ -298,16 +287,16 @@ static int bonding_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/*
* Setup our bonding from config params.. sets up our private struct..
*/
- if (!doDevConfig(dev, it))
- return -EINVAL;
+ ret = do_dev_config(dev, it);
+ if (ret)
+ return ret;
dev->board_name = devpriv->name;
@@ -329,31 +318,29 @@ static int bonding_attach(struct comedi_device *dev,
dev->driver->driver_name, dev->board_name,
devpriv->nchans, devpriv->ndevs);
- return 1;
+ return 0;
}
static void bonding_detach(struct comedi_device *dev)
{
struct comedi_bond_private *devpriv = dev->private;
- unsigned long devs_closed = 0;
- if (devpriv) {
- while (devpriv->ndevs-- && devpriv->devs) {
- struct BondedDevice *bdev;
+ if (devpriv && devpriv->devs) {
+ DECLARE_BITMAP(devs_closed, COMEDI_NUM_BOARD_MINORS);
+
+ memset(&devs_closed, 0, sizeof(devs_closed));
+ while (devpriv->ndevs--) {
+ struct bonded_device *bdev;
bdev = devpriv->devs[devpriv->ndevs];
if (!bdev)
continue;
- if (!(devs_closed & (0x1 << bdev->minor))) {
+ if (!test_and_set_bit(bdev->minor, devs_closed))
comedi_close(bdev->dev);
- devs_closed |= (0x1 << bdev->minor);
- }
kfree(bdev);
}
kfree(devpriv->devs);
devpriv->devs = NULL;
- kfree(devpriv);
- dev->private = NULL;
}
}
@@ -366,7 +353,5 @@ static struct comedi_driver bonding_driver = {
module_comedi_driver(bonding_driver);
MODULE_AUTHOR("Calin A. Culianu");
-MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI "
- "devices together as one. In the words of John Lennon: "
- "'And the world will live as one...'");
+MODULE_DESCRIPTION("comedi_bond: A driver for COMEDI to bond multiple COMEDI devices together as one.");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/comedi_fc.c b/drivers/staging/comedi/drivers/comedi_fc.c
index b3d89c82d08..26d9dbcf8bd 100644
--- a/drivers/staging/comedi/drivers/comedi_fc.c
+++ b/drivers/staging/comedi/drivers/comedi_fc.c
@@ -19,6 +19,7 @@
GNU General Public License for more details.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include "comedi_fc.h"
diff --git a/drivers/staging/comedi/drivers/comedi_parport.c b/drivers/staging/comedi/drivers/comedi_parport.c
index 772a8f5f0c1..f28a15f0274 100644
--- a/drivers/staging/comedi/drivers/comedi_parport.c
+++ b/drivers/staging/comedi/drivers/comedi_parport.c
@@ -76,9 +76,9 @@ pin, which can be used to wake up tasks.
or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/interrupt.h>
-#include <linux/ioport.h>
#include "comedi_fc.h"
@@ -279,10 +279,9 @@ static int parport_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
s->type = COMEDI_SUBD_DIO;
diff --git a/drivers/staging/comedi/drivers/comedi_test.c b/drivers/staging/comedi/drivers/comedi_test.c
index 907e7a3822f..16c07802107 100644
--- a/drivers/staging/comedi/drivers/comedi_test.c
+++ b/drivers/staging/comedi/drivers/comedi_test.c
@@ -45,6 +45,7 @@ zero volts).
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <asm/div64.h>
@@ -379,10 +380,9 @@ static int waveform_attach(struct comedi_device *dev,
int i;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* set default amplitude and period */
if (amplitude <= 0)
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 0fb9027dde2..e781716bf35 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -25,6 +25,7 @@ Status: works
Configuration Options: not applicable, uses comedi PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 44c912b48b6..de920ccff40 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -102,6 +102,7 @@ Configuration options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -683,10 +684,9 @@ static int daqboard2000_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
result = comedi_pci_enable(dev);
if (result)
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 2e7e3e20239..5f669709501 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -33,7 +33,7 @@
* cheap das08 hardware doesn't really support them.
*/
-#include <linux/delay.h>
+#include <linux/module.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 885fb179c9b..f3ccc2ce6d4 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -39,8 +39,7 @@ Options (for pcm-das08):
Command support does not exist, but could be added for this board.
*/
-#include <linux/delay.h>
-#include <linux/slab.h>
+#include <linux/module.h>
#include "../comedidev.h"
@@ -78,10 +77,9 @@ static int das08_cs_auto_attach(struct comedi_device *dev,
return ret;
iobase = link->resource[0]->start;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
return das08_common_attach(dev, iobase);
}
diff --git a/drivers/staging/comedi/drivers/das08_isa.c b/drivers/staging/comedi/drivers/das08_isa.c
index 21a94389b8b..4fb03d3852d 100644
--- a/drivers/staging/comedi/drivers/das08_isa.c
+++ b/drivers/staging/comedi/drivers/das08_isa.c
@@ -43,6 +43,7 @@
* [0] - base io address
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include "das08.h"
@@ -177,10 +178,9 @@ static int das08_isa_attach(struct comedi_device *dev,
struct das08_private_struct *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], thisboard->iosize);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das08_pci.c b/drivers/staging/comedi/drivers/das08_pci.c
index 9c5d234e063..3a6d3725b25 100644
--- a/drivers/staging/comedi/drivers/das08_pci.c
+++ b/drivers/staging/comedi/drivers/das08_pci.c
@@ -31,6 +31,7 @@
* Configuration Options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -59,10 +60,9 @@ static int das08_pci_auto_attach(struct comedi_device *dev,
struct das08_private_struct *devpriv;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* The das08 driver needs the board_ptr */
dev->board_ptr = &das08_pci_boards[0];
diff --git a/drivers/staging/comedi/drivers/das16.c b/drivers/staging/comedi/drivers/das16.c
index dbec3ba9954..1b0793f33b9 100644
--- a/drivers/staging/comedi/drivers/das16.c
+++ b/drivers/staging/comedi/drivers/das16.c
@@ -1,80 +1,89 @@
/*
- comedi/drivers/das16.c
- DAS16 driver
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
- Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
- Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das16
-Description: DAS16 compatible boards
-Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
-Devices: [Keithley Metrabyte] DAS-16 (das-16), DAS-16G (das-16g),
- DAS-16F (das-16f), DAS-1201 (das-1201), DAS-1202 (das-1202),
- DAS-1401 (das-1401), DAS-1402 (das-1402), DAS-1601 (das-1601),
- DAS-1602 (das-1602),
- [ComputerBoards] PC104-DAS16/JR (pc104-das16jr),
- PC104-DAS16JR/16 (pc104-das16jr/16),
- CIO-DAS16JR/16 (cio-das16jr/16),
- CIO-DAS16/JR (cio-das16/jr), CIO-DAS1401/12 (cio-das1401/12),
- CIO-DAS1402/12 (cio-das1402/12), CIO-DAS1402/16 (cio-das1402/16),
- CIO-DAS1601/12 (cio-das1601/12), CIO-DAS1602/12 (cio-das1602/12),
- CIO-DAS1602/16 (cio-das1602/16), CIO-DAS16/330 (cio-das16/330)
-Status: works
-Updated: 2003-10-12
-
-A rewrite of the das16 and das1600 drivers.
-Options:
- [0] - base io address
- [1] - irq (does nothing, irq is not used anymore)
- [2] - dma (optional, required for comedi_command support)
- [3] - master clock speed in MHz (optional, 1 or 10, ignored if
- board can probe clock, defaults to 1)
- [4] - analog input range lowest voltage in microvolts (optional,
- only useful if your board does not have software
- programmable gain)
- [5] - analog input range highest voltage in microvolts (optional,
- only useful if board does not have software programmable
- gain)
- [6] - analog output range lowest voltage in microvolts (optional)
- [7] - analog output range highest voltage in microvolts (optional)
- [8] - use timer mode for DMA. Timer mode is needed e.g. for
- buggy DMA controllers in NS CS5530A (Geode Companion), and for
- 'jr' cards that lack a hardware fifo. This option is no
- longer needed, since timer mode is _always_ used.
-
-Passing a zero for an option is the same as leaving it unspecified.
+ * das16.c
+ * DAS16 driver
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2000 Chris R. Baugher <baugher@enteract.com>
+ * Copyright (C) 2001,2002 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-*/
/*
+ * Driver: das16
+ * Description: DAS16 compatible boards
+ * Author: Sam Moore, Warren Jasper, ds, Chris Baugher, Frank Hess, Roman Fietze
+ * Devices: (Keithley Metrabyte) DAS-16 [das-16]
+ * (Keithley Metrabyte) DAS-16G [das-16g]
+ * (Keithley Metrabyte) DAS-16F [das-16f]
+ * (Keithley Metrabyte) DAS-1201 [das-1201]
+ * (Keithley Metrabyte) DAS-1202 [das-1202]
+ * (Keithley Metrabyte) DAS-1401 [das-1401]
+ * (Keithley Metrabyte) DAS-1402 [das-1402]
+ * (Keithley Metrabyte) DAS-1601 [das-1601]
+ * (Keithley Metrabyte) DAS-1602 [das-1602]
+ * (ComputerBoards) PC104-DAS16/JR [pc104-das16jr]
+ * (ComputerBoards) PC104-DAS16JR/16 [pc104-das16jr/16]
+ * (ComputerBoards) CIO-DAS16 [cio-das16]
+ * (ComputerBoards) CIO-DAS16F [cio-das16/f]
+ * (ComputerBoards) CIO-DAS16/JR [cio-das16/jr]
+ * (ComputerBoards) CIO-DAS16JR/16 [cio-das16jr/16]
+ * (ComputerBoards) CIO-DAS1401/12 [cio-das1401/12]
+ * (ComputerBoards) CIO-DAS1402/12 [cio-das1402/12]
+ * (ComputerBoards) CIO-DAS1402/16 [cio-das1402/16]
+ * (ComputerBoards) CIO-DAS1601/12 [cio-das1601/12]
+ * (ComputerBoards) CIO-DAS1602/12 [cio-das1602/12]
+ * (ComputerBoards) CIO-DAS1602/16 [cio-das1602/16]
+ * (ComputerBoards) CIO-DAS16/330 [cio-das16/330]
+ * Status: works
+ * Updated: 2003-10-12
+ *
+ * A rewrite of the das16 and das1600 drivers.
+ *
+ * Options:
+ * [0] - base io address
+ * [1] - irq (does nothing, irq is not used anymore)
+ * [2] - dma channel (optional, required for comedi_command support)
+ * [3] - master clock speed in MHz (optional, 1 or 10, ignored if
+ * board can probe clock, defaults to 1)
+ * [4] - analog input range lowest voltage in microvolts (optional,
+ * only useful if your board does not have software
+ * programmable gain)
+ * [5] - analog input range highest voltage in microvolts (optional,
+ * only useful if board does not have software programmable
+ * gain)
+ * [6] - analog output range lowest voltage in microvolts (optional)
+ * [7] - analog output range highest voltage in microvolts (optional)
+ *
+ * Passing a zero for an option is the same as leaving it unspecified.
+ */
-Testing and debugging help provided by Daniel Koch.
-
-Keithley Manuals:
- 2309.PDF (das16)
- 4919.PDF (das1400, 1600)
- 4922.PDF (das-1400)
- 4923.PDF (das1200, 1400, 1600)
-
-Computer boards manuals also available from their website
-www.measurementcomputing.com
-
-*/
+/*
+ * Testing and debugging help provided by Daniel Koch.
+ *
+ * Keithley Manuals:
+ * 2309.PDF (das16)
+ * 4919.PDF (das1400, 1600)
+ * 4922.PDF (das-1400)
+ * 4923.PDF (das1200, 1400, 1600)
+ *
+ * Computer boards manuals also available from their website
+ * www.measurementcomputing.com
+ */
-#include <linux/pci.h>
+#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
#include <linux/interrupt.h>
#include <asm/dma.h>
@@ -85,214 +94,112 @@ www.measurementcomputing.com
#include "8255.h"
#include "comedi_fc.h"
-#undef DEBUG
-/* #define DEBUG */
-
-#ifdef DEBUG
-#define DEBUG_PRINT(format, args...) \
- printk(KERN_DEBUG "das16: " format, ## args)
-#else
-#define DEBUG_PRINT(format, args...)
-#endif
-
-#define DAS16_SIZE 20 /* number of ioports */
#define DAS16_DMA_SIZE 0xff00 /* size in bytes of allocated dma buffer */
/*
- cio-das16.pdf
-
- "das16"
- "das16/f"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4 unused ao0_lsb
- 5 unused ao0_msb
- 6 unused ao1_lsb
- 7 unused ao1_msb
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b reserved reserved
- cdef 8254
- 0123 8255
-
-*/
-
-/*
- cio-das16jr.pdf
-
- "das16jr"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4567 unused unused
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
-
-*/
-
-/*
- cio-das16jr_16.pdf
-
- "das16jr_16"
-
- 0 a/d bits 0-7 start 16 bit
- 1 a/d bits 8-15 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4567 unused unused
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
-
-*/
-/*
- cio-das160x-1x.pdf
-
- "das1601/12"
- "das1602/12"
- "das1602/16"
-
- 0 a/d bits 0-3 start 12 bit
- 1 a/d bits 4-11 unused
- 2 mux read mux set
- 3 di 4 bit do 4 bit
- 4 unused ao0_lsb
- 5 unused ao0_msb
- 6 unused ao1_lsb
- 7 unused ao1_msb
- 8 status eoc uni/bip interrupt reset
- 9 dma, int, trig ctrl set dma, int
- a pacer control unused
- b gain status gain control
- cdef 8254
- 400 8255
- 404 unused conversion enable
- 405 unused burst enable
- 406 unused das1600 enable
- 407 status
-
-*/
-
-/* size in bytes of a sample from board */
-static const int sample_size = 2;
-
-#define DAS16_TRIG 0
-#define DAS16_AI_LSB 0
-#define DAS16_AI_MSB 1
-#define DAS16_MUX 2
-#define DAS16_DIO 3
-#define DAS16_AO_LSB(x) ((x) ? 6 : 4)
-#define DAS16_AO_MSB(x) ((x) ? 7 : 5)
-#define DAS16_STATUS 8
-#define BUSY (1<<7)
-#define UNIPOLAR (1<<6)
-#define DAS16_MUXBIT (1<<5)
-#define DAS16_INT (1<<4)
-#define DAS16_CONTROL 9
-#define DAS16_INTE (1<<7)
-#define DAS16_IRQ(x) (((x) & 0x7) << 4)
-#define DMA_ENABLE (1<<2)
-#define PACING_MASK 0x3
-#define INT_PACER 0x03
-#define EXT_PACER 0x02
-#define DAS16_SOFT 0x00
-#define DAS16_PACER 0x0A
-#define DAS16_CTR0 (1<<1)
-#define DAS16_TRIG0 (1<<0)
-#define BURST_LEN_BITS(x) (((x) & 0xf) << 4)
-#define DAS16_GAIN 0x0B
-#define DAS16_CNTR0_DATA 0x0C
-#define DAS16_CNTR1_DATA 0x0D
-#define DAS16_CNTR2_DATA 0x0E
-#define DAS16_CNTR_CONTROL 0x0F
-#define DAS16_TERM_CNT 0x00
-#define DAS16_ONE_SHOT 0x02
-#define DAS16_RATE_GEN 0x04
-#define DAS16_CNTR_LSB_MSB 0x30
-#define DAS16_CNTR0 0x00
-#define DAS16_CNTR1 0x40
-#define DAS16_CNTR2 0x80
-
-#define DAS1600_CONV 0x404
-#define DAS1600_CONV_DISABLE 0x40
-#define DAS1600_BURST 0x405
-#define DAS1600_BURST_VAL 0x40
-#define DAS1600_ENABLE 0x406
-#define DAS1600_ENABLE_VAL 0x40
-#define DAS1600_STATUS_B 0x407
-#define DAS1600_BME 0x40
-#define DAS1600_ME 0x20
-#define DAS1600_CD 0x10
-#define DAS1600_WS 0x02
-#define DAS1600_CLK_10MHZ 0x01
-
-static const struct comedi_lrange range_das1x01_bip = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.01),
- }
+ * Register I/O map
+ */
+#define DAS16_TRIG_REG 0x00
+#define DAS16_AI_LSB_REG 0x00
+#define DAS16_AI_MSB_REG 0x01
+#define DAS16_MUX_REG 0x02
+#define DAS16_DIO_REG 0x03
+#define DAS16_AO_LSB_REG(x) ((x) ? 0x06 : 0x04)
+#define DAS16_AO_MSB_REG(x) ((x) ? 0x07 : 0x05)
+#define DAS16_STATUS_REG 0x08
+#define DAS16_STATUS_BUSY (1 << 7)
+#define DAS16_STATUS_UNIPOLAR (1 << 6)
+#define DAS16_STATUS_MUXBIT (1 << 5)
+#define DAS16_STATUS_INT (1 << 4)
+#define DAS16_CTRL_REG 0x09
+#define DAS16_CTRL_INTE (1 << 7)
+#define DAS16_CTRL_IRQ(x) (((x) & 0x7) << 4)
+#define DAS16_CTRL_DMAE (1 << 2)
+#define DAS16_CTRL_PACING_MASK (3 << 0)
+#define DAS16_CTRL_INT_PACER (3 << 0)
+#define DAS16_CTRL_EXT_PACER (2 << 0)
+#define DAS16_CTRL_SOFT_PACER (0 << 0)
+#define DAS16_PACER_REG 0x0a
+#define DAS16_PACER_BURST_LEN(x) (((x) & 0xf) << 4)
+#define DAS16_PACER_CTR0 (1 << 1)
+#define DAS16_PACER_TRIG0 (1 << 0)
+#define DAS16_GAIN_REG 0x0b
+#define DAS16_TIMER_BASE_REG 0x0c /* to 0x0f */
+
+#define DAS1600_CONV_REG 0x404
+#define DAS1600_CONV_DISABLE (1 << 6)
+#define DAS1600_BURST_REG 0x405
+#define DAS1600_BURST_VAL (1 << 6)
+#define DAS1600_ENABLE_REG 0x406
+#define DAS1600_ENABLE_VAL (1 << 6)
+#define DAS1600_STATUS_REG 0x407
+#define DAS1600_STATUS_BME (1 << 6)
+#define DAS1600_STATUS_ME (1 << 5)
+#define DAS1600_STATUS_CD (1 << 4)
+#define DAS1600_STATUS_WS (1 << 1)
+#define DAS1600_STATUS_CLK_10MHZ (1 << 0)
+
+static const struct comedi_lrange range_das1x01_bip = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(1),
+ BIP_RANGE(0.1),
+ BIP_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das1x01_unip = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.01),
- }
+static const struct comedi_lrange range_das1x01_unip = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(1),
+ UNI_RANGE(0.1),
+ UNI_RANGE(0.01)
+ }
};
-static const struct comedi_lrange range_das1x02_bip = { 4, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- }
+static const struct comedi_lrange range_das1x02_bip = {
+ 4, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das1x02_unip = { 4, {
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das1x02_unip = {
+ 4, {
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das16jr = { 9, {
- /* also used by 16/330 */
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- BIP_RANGE(0.625),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das16jr = {
+ 9, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ BIP_RANGE(0.625),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
-static const struct comedi_lrange range_das16jr_16 = { 8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25),
- }
+static const struct comedi_lrange range_das16jr_16 = {
+ 8, {
+ BIP_RANGE(10),
+ BIP_RANGE(5),
+ BIP_RANGE(2.5),
+ BIP_RANGE(1.25),
+ UNI_RANGE(10),
+ UNI_RANGE(5),
+ UNI_RANGE(2.5),
+ UNI_RANGE(1.25)
+ }
};
static const int das16jr_gainlist[] = { 8, 0, 1, 2, 3, 4, 5, 6, 7 };
@@ -330,30 +237,211 @@ static const struct comedi_lrange *const das16_ai_bip_lranges[] = {
&range_das1x02_bip,
};
-struct munge_info {
- uint8_t byte;
- unsigned have_byte:1;
-};
-
struct das16_board {
const char *name;
- void *ai;
- unsigned int ai_nbits;
+ unsigned int ai_maxdata;
unsigned int ai_speed; /* max conversion speed in nanosec */
unsigned int ai_pg;
- void *ao;
- unsigned int ao_nbits;
- void *di;
- void *do_;
+ unsigned int has_ao:1;
+ unsigned int has_8255:1;
unsigned int i8255_offset;
- unsigned int i8254_offset;
unsigned int size;
unsigned int id;
};
-#define DAS16_TIMEOUT 1000
+static const struct das16_board das16_boards[] = {
+ {
+ .name = "das-16",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 15000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "das-16g",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 15000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "das-16f",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 8500,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x00,
+ }, {
+ .name = "cio-das16",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 20000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x80,
+ }, {
+ .name = "cio-das16/f",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_none,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x10,
+ .size = 0x14,
+ .id = 0x80,
+ }, {
+ .name = "cio-das16/jr",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 7692,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "pc104-das16jr",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 3300,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "cio-das16jr/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_16jr_16,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "pc104-das16jr/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_16jr_16,
+ .size = 0x10,
+ .id = 0x00,
+ }, {
+ .name = "das-1201",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 20000,
+ .ai_pg = das16_pg_none,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0x20,
+ }, {
+ .name = "das-1202",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_none,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0x20,
+ }, {
+ .name = "das-1401",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1601,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1402",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1601",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1601,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "das-1602",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1401/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1601,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1402/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1402/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1601/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 6250,
+ .ai_pg = das16_pg_1601,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1602/12",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das1602/16",
+ .ai_maxdata = 0xffff,
+ .ai_speed = 10000,
+ .ai_pg = das16_pg_1602,
+ .has_ao = 1,
+ .has_8255 = 1,
+ .i8255_offset = 0x400,
+ .size = 0x408,
+ .id = 0xc0,
+ }, {
+ .name = "cio-das16/330",
+ .ai_maxdata = 0x0fff,
+ .ai_speed = 3030,
+ .ai_pg = das16_pg_16jr,
+ .size = 0x14,
+ .id = 0xf0,
+ },
+};
/* Period for timer interrupt in jiffies. It's a function
* to deal with possibility of dynamic HZ patches */
@@ -363,34 +451,155 @@ static inline int timer_period(void)
}
struct das16_private_struct {
- unsigned int ai_unipolar; /* unipolar flag */
- unsigned int ai_singleended; /* single ended flag */
- unsigned int clockbase; /* master clock speed in ns */
- volatile unsigned int control_state; /* dma, interrupt and trigger control bits */
- volatile unsigned long adc_byte_count; /* number of bytes remaining */
- /* divisor dividing master clock to get conversion frequency */
- unsigned int divisor1;
- /* divisor dividing master clock to get conversion frequency */
- unsigned int divisor2;
- unsigned int dma_chan; /* dma channel */
- uint16_t *dma_buffer[2];
- dma_addr_t dma_buffer_addr[2];
- unsigned int current_buffer;
- volatile unsigned int dma_transfer_size; /* target number of bytes to transfer per dma shot */
- /**
- * user-defined analog input and output ranges
- * defined from config options
- */
- struct comedi_lrange *user_ai_range_table;
- struct comedi_lrange *user_ao_range_table;
-
- struct timer_list timer; /* for timed interrupt */
- volatile short timer_running;
- volatile short timer_mode; /* true if using timer mode */
-
- unsigned long extra_iobase;
+ unsigned int clockbase;
+ unsigned int ctrl_reg;
+ unsigned long adc_byte_count;
+ unsigned int divisor1;
+ unsigned int divisor2;
+ unsigned int dma_chan;
+ uint16_t *dma_buffer[2];
+ dma_addr_t dma_buffer_addr[2];
+ unsigned int current_buffer;
+ unsigned int dma_transfer_size;
+ struct comedi_lrange *user_ai_range_table;
+ struct comedi_lrange *user_ao_range_table;
+ struct timer_list timer;
+ short timer_running;
+ unsigned long extra_iobase;
+ unsigned int can_burst:1;
};
+static void das16_ai_enable(struct comedi_device *dev,
+ unsigned int mode, unsigned int src)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
+ DAS16_CTRL_DMAE |
+ DAS16_CTRL_PACING_MASK);
+ devpriv->ctrl_reg |= mode;
+
+ if (src == TRIG_EXT)
+ devpriv->ctrl_reg |= DAS16_CTRL_EXT_PACER;
+ else
+ devpriv->ctrl_reg |= DAS16_CTRL_INT_PACER;
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+}
+
+static void das16_ai_disable(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+
+ /* disable interrupts, dma and pacer clocked conversions */
+ devpriv->ctrl_reg &= ~(DAS16_CTRL_INTE |
+ DAS16_CTRL_DMAE |
+ DAS16_CTRL_PACING_MASK);
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+}
+
+/* the pc104-das16jr (at least) has problems if the dma
+ transfer is interrupted in the middle of transferring
+ a 16 bit sample, so this function takes care to get
+ an even transfer count after disabling dma
+ channel.
+*/
+static int disable_dma_on_even(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+ int residue;
+ int i;
+ static const int disable_limit = 100;
+ static const int enable_timeout = 100;
+
+ disable_dma(devpriv->dma_chan);
+ residue = get_dma_residue(devpriv->dma_chan);
+ for (i = 0; i < disable_limit && (residue % 2); ++i) {
+ int j;
+ enable_dma(devpriv->dma_chan);
+ for (j = 0; j < enable_timeout; ++j) {
+ int new_residue;
+ udelay(2);
+ new_residue = get_dma_residue(devpriv->dma_chan);
+ if (new_residue != residue)
+ break;
+ }
+ disable_dma(devpriv->dma_chan);
+ residue = get_dma_residue(devpriv->dma_chan);
+ }
+ if (i == disable_limit) {
+ dev_err(dev->class_dev,
+ "failed to get an even dma transfer, could be trouble\n");
+ }
+ return residue;
+}
+
+static void das16_interrupt(struct comedi_device *dev)
+{
+ struct das16_private_struct *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ unsigned long spin_flags;
+ unsigned long dma_flags;
+ int num_bytes, residue;
+ int buffer_index;
+
+ spin_lock_irqsave(&dev->spinlock, spin_flags);
+ if (!(devpriv->ctrl_reg & DAS16_CTRL_DMAE)) {
+ spin_unlock_irqrestore(&dev->spinlock, spin_flags);
+ return;
+ }
+
+ dma_flags = claim_dma_lock();
+ clear_dma_ff(devpriv->dma_chan);
+ residue = disable_dma_on_even(dev);
+
+ /* figure out how many points to read */
+ if (residue > devpriv->dma_transfer_size) {
+ dev_err(dev->class_dev, "residue > transfer size!\n");
+ async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
+ num_bytes = 0;
+ } else
+ num_bytes = devpriv->dma_transfer_size - residue;
+
+ if (cmd->stop_src == TRIG_COUNT &&
+ num_bytes >= devpriv->adc_byte_count) {
+ num_bytes = devpriv->adc_byte_count;
+ async->events |= COMEDI_CB_EOA;
+ }
+
+ buffer_index = devpriv->current_buffer;
+ devpriv->current_buffer = (devpriv->current_buffer + 1) % 2;
+ devpriv->adc_byte_count -= num_bytes;
+
+ /* re-enable dma */
+ if ((async->events & COMEDI_CB_EOA) == 0) {
+ set_dma_addr(devpriv->dma_chan,
+ devpriv->dma_buffer_addr[devpriv->current_buffer]);
+ set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
+ enable_dma(devpriv->dma_chan);
+ }
+ release_dma_lock(dma_flags);
+
+ spin_unlock_irqrestore(&dev->spinlock, spin_flags);
+
+ cfc_write_array_to_buffer(s,
+ devpriv->dma_buffer[buffer_index], num_bytes);
+
+ cfc_handle_events(dev, s);
+}
+
+static void das16_timer_interrupt(unsigned long arg)
+{
+ struct comedi_device *dev = (struct comedi_device *)arg;
+ struct das16_private_struct *devpriv = dev->private;
+
+ das16_interrupt(dev);
+
+ if (devpriv->timer_running)
+ mod_timer(&devpriv->timer, jiffies + timer_period());
+}
+
static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
@@ -405,15 +614,13 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
mask = TRIG_FOLLOW;
- /* if board supports burst mode */
- if (board->size > 0x400)
+ if (devpriv->can_burst)
mask |= TRIG_TIMER | TRIG_EXT;
err |= cfc_check_trigger_src(&cmd->scan_begin_src, mask);
tmp = cmd->convert_src;
mask = TRIG_TIMER | TRIG_EXT;
- /* if board supports burst mode */
- if (board->size > 0x400)
+ if (devpriv->can_burst)
mask |= TRIG_NOW;
err |= cfc_check_trigger_src(&cmd->convert_src, mask);
@@ -469,9 +676,9 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int tmp = cmd->scan_begin_arg;
/* set divisors, correct timing arguments */
i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->scan_begin_arg),
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
err += (tmp != cmd->scan_begin_arg);
}
@@ -479,9 +686,9 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
unsigned int tmp = cmd->convert_arg;
/* set divisors, correct timing arguments */
i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
- &(devpriv->divisor1),
- &(devpriv->divisor2),
- &(cmd->convert_arg),
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
err += (tmp != cmd->convert_arg);
}
@@ -495,16 +702,13 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
for (i = 1; i < cmd->chanlist_len; i++) {
if (CR_CHAN(cmd->chanlist[i]) !=
(start_chan + i) % s->n_chan) {
- comedi_error(dev,
- "entries in chanlist must be "
- "consecutive channels, "
- "counting upwards\n");
+ dev_err(dev->class_dev,
+ "entries in chanlist must be consecutive channels, counting upwards\n");
err++;
}
if (CR_RANGE(cmd->chanlist[i]) != gain) {
- comedi_error(dev,
- "entries in chanlist must all "
- "have the same gain\n");
+ dev_err(dev->class_dev,
+ "entries in chanlist must all have the same gain\n");
err++;
}
}
@@ -515,61 +719,21 @@ static int das16_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
return 0;
}
-/* utility function that suggests a dma transfer size in bytes */
-static unsigned int das16_suggest_transfer_size(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
-{
- struct das16_private_struct *devpriv = dev->private;
- unsigned int size;
- unsigned int freq;
-
- /* if we are using timer interrupt, we don't care how long it
- * will take to complete transfer since it will be interrupted
- * by timer interrupt */
- if (devpriv->timer_mode)
- return DAS16_DMA_SIZE;
-
- /* otherwise, we are relying on dma terminal count interrupt,
- * so pick a reasonable size */
- if (cmd->convert_src == TRIG_TIMER)
- freq = 1000000000 / cmd->convert_arg;
- else if (cmd->scan_begin_src == TRIG_TIMER)
- freq = (1000000000 / cmd->scan_begin_arg) * cmd->chanlist_len;
- /* return some default value */
- else
- freq = 0xffffffff;
-
- if (cmd->flags & TRIG_WAKE_EOS) {
- size = sample_size * cmd->chanlist_len;
- } else {
- /* make buffer fill in no more than 1/3 second */
- size = (freq / 3) * sample_size;
- }
-
- /* set a minimum and maximum size allowed */
- if (size > DAS16_DMA_SIZE)
- size = DAS16_DMA_SIZE - DAS16_DMA_SIZE % sample_size;
- else if (size < sample_size)
- size = sample_size;
-
- if (cmd->stop_src == TRIG_COUNT && size > devpriv->adc_byte_count)
- size = devpriv->adc_byte_count;
-
- return size;
-}
-
static unsigned int das16_set_pacer(struct comedi_device *dev, unsigned int ns,
int rounding_flags)
{
struct das16_private_struct *devpriv = dev->private;
+ unsigned long timer_base = dev->iobase + DAS16_TIMER_BASE_REG;
- i8253_cascade_ns_to_timer_2div(devpriv->clockbase, &(devpriv->divisor1),
- &(devpriv->divisor2), &ns,
+ i8253_cascade_ns_to_timer_2div(devpriv->clockbase,
+ &devpriv->divisor1,
+ &devpriv->divisor2,
+ &ns,
rounding_flags & TRIG_ROUND_MASK);
/* Write the values of ctr1 and ctr2 into counters 1 and 2 */
- i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 1, devpriv->divisor1, 2);
- i8254_load(dev->iobase + DAS16_CNTR0_DATA, 0, 2, devpriv->divisor2, 2);
+ i8254_load(timer_base, 0, 1, devpriv->divisor1, 2);
+ i8254_load(timer_base, 0, 2, devpriv->divisor2, 2);
return ns;
}
@@ -584,30 +748,22 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
unsigned long flags;
int range;
- if (devpriv->dma_chan == 0 || (dev->irq == 0
- && devpriv->timer_mode == 0)) {
- comedi_error(dev,
- "irq (or use of 'timer mode') dma required to "
- "execute comedi_cmd");
- return -1;
- }
if (cmd->flags & TRIG_RT) {
- comedi_error(dev, "isa dma transfers cannot be performed with "
- "TRIG_RT, aborting");
+ dev_err(dev->class_dev,
+ "isa dma transfers cannot be performed with TRIG_RT, aborting\n");
return -1;
}
devpriv->adc_byte_count =
cmd->stop_arg * cmd->chanlist_len * sizeof(uint16_t);
- /* disable conversions for das1600 mode */
- if (board->size > 0x400)
- outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV);
+ if (devpriv->can_burst)
+ outb(DAS1600_CONV_DISABLE, dev->iobase + DAS1600_CONV_REG);
/* set scan limits */
byte = CR_CHAN(cmd->chanlist[0]);
byte |= CR_CHAN(cmd->chanlist[cmd->chanlist_len - 1]) << 4;
- outb(byte, dev->iobase + DAS16_MUX);
+ outb(byte, dev->iobase + DAS16_MUX_REG);
/* set gain (this is also burst rate register but according to
* computer boards manual, burst rate does nothing, even on
@@ -615,28 +771,27 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
if (board->ai_pg != das16_pg_none) {
range = CR_RANGE(cmd->chanlist[0]);
outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN);
+ dev->iobase + DAS16_GAIN_REG);
}
/* set counter mode and counts */
cmd->convert_arg =
das16_set_pacer(dev, cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
- DEBUG_PRINT("pacer period: %d ns\n", cmd->convert_arg);
/* enable counters */
byte = 0;
- /* Enable burst mode if appropriate. */
- if (board->size > 0x400) {
+ if (devpriv->can_burst) {
if (cmd->convert_src == TRIG_NOW) {
- outb(DAS1600_BURST_VAL, dev->iobase + DAS1600_BURST);
+ outb(DAS1600_BURST_VAL,
+ dev->iobase + DAS1600_BURST_REG);
/* set burst length */
- byte |= BURST_LEN_BITS(cmd->chanlist_len - 1);
+ byte |= DAS16_PACER_BURST_LEN(cmd->chanlist_len - 1);
} else {
- outb(0, dev->iobase + DAS1600_BURST);
+ outb(0, dev->iobase + DAS1600_BURST_REG);
}
}
- outb(byte, dev->iobase + DAS16_PACER);
+ outb(byte, dev->iobase + DAS16_PACER_REG);
/* set up dma transfer */
flags = claim_dma_lock();
@@ -647,465 +802,220 @@ static int das16_cmd_exec(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->current_buffer = 0;
set_dma_addr(devpriv->dma_chan,
devpriv->dma_buffer_addr[devpriv->current_buffer]);
- /* set appropriate size of transfer */
- devpriv->dma_transfer_size = das16_suggest_transfer_size(dev, cmd);
+ devpriv->dma_transfer_size = DAS16_DMA_SIZE;
set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
enable_dma(devpriv->dma_chan);
release_dma_lock(flags);
/* set up interrupt */
- if (devpriv->timer_mode) {
- devpriv->timer_running = 1;
- devpriv->timer.expires = jiffies + timer_period();
- add_timer(&devpriv->timer);
- devpriv->control_state &= ~DAS16_INTE;
- } else {
- /* clear interrupt bit */
- outb(0x00, dev->iobase + DAS16_STATUS);
- /* enable interrupts */
- devpriv->control_state |= DAS16_INTE;
- }
- devpriv->control_state |= DMA_ENABLE;
- devpriv->control_state &= ~PACING_MASK;
- if (cmd->convert_src == TRIG_EXT)
- devpriv->control_state |= EXT_PACER;
- else
- devpriv->control_state |= INT_PACER;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
+ devpriv->timer_running = 1;
+ devpriv->timer.expires = jiffies + timer_period();
+ add_timer(&devpriv->timer);
- /* Enable conversions if using das1600 mode */
- if (board->size > 0x400)
- outb(0, dev->iobase + DAS1600_CONV);
+ das16_ai_enable(dev, DAS16_CTRL_DMAE, cmd->convert_src);
+ if (devpriv->can_burst)
+ outb(0, dev->iobase + DAS1600_CONV_REG);
return 0;
}
static int das16_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
{
- const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&dev->spinlock, flags);
- /* disable interrupts, dma and pacer clocked conversions */
- devpriv->control_state &= ~DAS16_INTE & ~PACING_MASK & ~DMA_ENABLE;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
- if (devpriv->dma_chan)
- disable_dma(devpriv->dma_chan);
+
+ das16_ai_disable(dev);
+ disable_dma(devpriv->dma_chan);
/* disable SW timer */
- if (devpriv->timer_mode && devpriv->timer_running) {
+ if (devpriv->timer_running) {
devpriv->timer_running = 0;
del_timer(&devpriv->timer);
}
- /* disable burst mode */
- if (board->size > 0x400)
- outb(0, dev->iobase + DAS1600_BURST);
-
+ if (devpriv->can_burst)
+ outb(0, dev->iobase + DAS1600_BURST_REG);
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
-static void das16_reset(struct comedi_device *dev)
+static void das16_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s, void *array,
+ unsigned int num_bytes,
+ unsigned int start_chan_index)
{
- outb(0, dev->iobase + DAS16_STATUS);
- outb(0, dev->iobase + DAS16_CONTROL);
- outb(0, dev->iobase + DAS16_PACER);
- outb(0, dev->iobase + DAS16_CNTR_CONTROL);
+ unsigned int i, num_samples = num_bytes / sizeof(short);
+ short *data = array;
+
+ for (i = 0; i < num_samples; i++) {
+ data[i] = le16_to_cpu(data[i]);
+ if (s->maxdata == 0x0fff)
+ data[i] >>= 4;
+ data[i] &= s->maxdata;
+ }
}
-static int das16_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16_ai_wait_for_conv(struct comedi_device *dev,
+ unsigned int timeout)
+{
+ unsigned int status;
+ int i;
+
+ for (i = 0; i < timeout; i++) {
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+ if (!(status & DAS16_STATUS_BUSY))
+ return 0;
+ }
+ return -ETIME;
+}
+
+static int das16_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- int i, n;
- int range;
- int chan;
- int msb, lsb;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int ret;
+ int i;
- /* disable interrupts and pacing */
- devpriv->control_state &= ~DAS16_INTE & ~DMA_ENABLE & ~PACING_MASK;
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
+ das16_ai_disable(dev);
/* set multiplexer */
- chan = CR_CHAN(insn->chanspec);
- chan |= CR_CHAN(insn->chanspec) << 4;
- outb(chan, dev->iobase + DAS16_MUX);
+ outb(chan | (chan << 4), dev->iobase + DAS16_MUX_REG);
/* set gain */
if (board->ai_pg != das16_pg_none) {
- range = CR_RANGE(insn->chanspec);
outb((das16_gainlists[board->ai_pg])[range],
- dev->iobase + DAS16_GAIN);
+ dev->iobase + DAS16_GAIN_REG);
}
- for (n = 0; n < insn->n; n++) {
+ for (i = 0; i < insn->n; i++) {
/* trigger conversion */
- outb_p(0, dev->iobase + DAS16_TRIG);
-
- for (i = 0; i < DAS16_TIMEOUT; i++) {
- if (!(inb(dev->iobase + DAS16_STATUS) & BUSY))
- break;
- }
- if (i == DAS16_TIMEOUT) {
- printk("das16: timeout\n");
- return -ETIME;
- }
- msb = inb(dev->iobase + DAS16_AI_MSB);
- lsb = inb(dev->iobase + DAS16_AI_LSB);
- if (board->ai_nbits == 12)
- data[n] = ((lsb >> 4) & 0xf) | (msb << 4);
- else
- data[n] = lsb | (msb << 8);
-
- }
-
- return n;
-}
-
-static int das16_di_rbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int bits;
-
- bits = inb(dev->iobase + DAS16_DIO) & 0xf;
- data[1] = bits;
- data[0] = 0;
-
- return insn->n;
-}
+ outb_p(0, dev->iobase + DAS16_TRIG_REG);
-static int das16_do_wbits(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
-{
- unsigned int wbits;
+ ret = das16_ai_wait_for_conv(dev, 1000);
+ if (ret)
+ return ret;
- /* only set bits that have been masked */
- data[0] &= 0xf;
- wbits = s->state;
- /* zero bits that have been masked */
- wbits &= ~data[0];
- /* set masked bits */
- wbits |= data[0] & data[1];
- s->state = wbits;
- data[1] = wbits;
+ val = inb(dev->iobase + DAS16_AI_MSB_REG) << 8;
+ val |= inb(dev->iobase + DAS16_AI_LSB_REG);
+ if (s->maxdata == 0x0fff)
+ val >>= 4;
+ val &= s->maxdata;
- outb(s->state, dev->iobase + DAS16_DIO);
+ data[i] = val;
+ }
return insn->n;
}
-static int das16_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das16_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das16_board *board = comedi_board(dev);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val;
int i;
- int lsb, msb;
- int chan;
-
- chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++) {
- if (board->ao_nbits == 12) {
- lsb = (data[i] << 4) & 0xff;
- msb = (data[i] >> 4) & 0xff;
- } else {
- lsb = data[i] & 0xff;
- msb = (data[i] >> 8) & 0xff;
- }
- outb(lsb, dev->iobase + DAS16_AO_LSB(chan));
- outb(msb, dev->iobase + DAS16_AO_MSB(chan));
- }
-
- return i;
-}
-
-/* the pc104-das16jr (at least) has problems if the dma
- transfer is interrupted in the middle of transferring
- a 16 bit sample, so this function takes care to get
- an even transfer count after disabling dma
- channel.
-*/
-static int disable_dma_on_even(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- int residue;
- int i;
- static const int disable_limit = 100;
- static const int enable_timeout = 100;
+ val = data[i];
+ val <<= 4;
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- for (i = 0; i < disable_limit && (residue % 2); ++i) {
- int j;
- enable_dma(devpriv->dma_chan);
- for (j = 0; j < enable_timeout; ++j) {
- int new_residue;
- udelay(2);
- new_residue = get_dma_residue(devpriv->dma_chan);
- if (new_residue != residue)
- break;
- }
- disable_dma(devpriv->dma_chan);
- residue = get_dma_residue(devpriv->dma_chan);
- }
- if (i == disable_limit) {
- comedi_error(dev, "failed to get an even dma transfer, "
- "could be trouble.");
+ outb(val & 0xff, dev->iobase + DAS16_AO_LSB_REG(chan));
+ outb((val >> 8) & 0xff, dev->iobase + DAS16_AO_MSB_REG(chan));
}
- return residue;
+
+ return insn->n;
}
-static void das16_interrupt(struct comedi_device *dev)
+static int das16_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- unsigned long dma_flags, spin_flags;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async;
- struct comedi_cmd *cmd;
- int num_bytes, residue;
- int buffer_index;
-
- if (!dev->attached) {
- comedi_error(dev, "premature interrupt");
- return;
- }
- /* initialize async here to make sure it is not NULL */
- async = s->async;
- cmd = &async->cmd;
-
- if (devpriv->dma_chan == 0) {
- comedi_error(dev, "interrupt with no dma channel?");
- return;
- }
-
- spin_lock_irqsave(&dev->spinlock, spin_flags);
- if ((devpriv->control_state & DMA_ENABLE) == 0) {
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
- DEBUG_PRINT("interrupt while dma disabled?\n");
- return;
- }
-
- dma_flags = claim_dma_lock();
- clear_dma_ff(devpriv->dma_chan);
- residue = disable_dma_on_even(dev);
-
- /* figure out how many points to read */
- if (residue > devpriv->dma_transfer_size) {
- comedi_error(dev, "residue > transfer size!\n");
- async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA;
- num_bytes = 0;
- } else
- num_bytes = devpriv->dma_transfer_size - residue;
+ data[1] = inb(dev->iobase + DAS16_DIO_REG) & 0xf;
- if (cmd->stop_src == TRIG_COUNT &&
- num_bytes >= devpriv->adc_byte_count) {
- num_bytes = devpriv->adc_byte_count;
- async->events |= COMEDI_CB_EOA;
- }
-
- buffer_index = devpriv->current_buffer;
- devpriv->current_buffer = (devpriv->current_buffer + 1) % 2;
- devpriv->adc_byte_count -= num_bytes;
-
- /* figure out how many bytes for next transfer */
- if (cmd->stop_src == TRIG_COUNT && devpriv->timer_mode == 0 &&
- devpriv->dma_transfer_size > devpriv->adc_byte_count)
- devpriv->dma_transfer_size = devpriv->adc_byte_count;
-
- /* re-enable dma */
- if ((async->events & COMEDI_CB_EOA) == 0) {
- set_dma_addr(devpriv->dma_chan,
- devpriv->dma_buffer_addr[devpriv->current_buffer]);
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- /* reenable conversions for das1600 mode, (stupid hardware) */
- if (board->size > 0x400 && devpriv->timer_mode == 0)
- outb(0x00, dev->iobase + DAS1600_CONV);
-
- }
- release_dma_lock(dma_flags);
-
- spin_unlock_irqrestore(&dev->spinlock, spin_flags);
-
- cfc_write_array_to_buffer(s,
- devpriv->dma_buffer[buffer_index], num_bytes);
-
- cfc_handle_events(dev, s);
+ return insn->n;
}
-static irqreturn_t das16_dma_interrupt(int irq, void *d)
+static int das16_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int status;
- struct comedi_device *dev = d;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
- status = inb(dev->iobase + DAS16_STATUS);
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- if ((status & DAS16_INT) == 0) {
- DEBUG_PRINT("spurious interrupt\n");
- return IRQ_NONE;
+ outb(s->state, dev->iobase + DAS16_DIO_REG);
}
- /* clear interrupt */
- outb(0x00, dev->iobase + DAS16_STATUS);
- das16_interrupt(dev);
- return IRQ_HANDLED;
-}
-
-static void das16_timer_interrupt(unsigned long arg)
-{
- struct comedi_device *dev = (struct comedi_device *)arg;
- struct das16_private_struct *devpriv = dev->private;
-
- das16_interrupt(dev);
-
- if (devpriv->timer_running)
- mod_timer(&devpriv->timer, jiffies + timer_period());
-}
+ data[1] = s->state;
-static void reg_dump(struct comedi_device *dev)
-{
- DEBUG_PRINT("********DAS1600 REGISTER DUMP********\n");
- DEBUG_PRINT("DAS16_MUX: %x\n", inb(dev->iobase + DAS16_MUX));
- DEBUG_PRINT("DAS16_DIO: %x\n", inb(dev->iobase + DAS16_DIO));
- DEBUG_PRINT("DAS16_STATUS: %x\n", inb(dev->iobase + DAS16_STATUS));
- DEBUG_PRINT("DAS16_CONTROL: %x\n", inb(dev->iobase + DAS16_CONTROL));
- DEBUG_PRINT("DAS16_PACER: %x\n", inb(dev->iobase + DAS16_PACER));
- DEBUG_PRINT("DAS16_GAIN: %x\n", inb(dev->iobase + DAS16_GAIN));
- DEBUG_PRINT("DAS16_CNTR_CONTROL: %x\n",
- inb(dev->iobase + DAS16_CNTR_CONTROL));
- DEBUG_PRINT("DAS1600_CONV: %x\n", inb(dev->iobase + DAS1600_CONV));
- DEBUG_PRINT("DAS1600_BURST: %x\n", inb(dev->iobase + DAS1600_BURST));
- DEBUG_PRINT("DAS1600_ENABLE: %x\n", inb(dev->iobase + DAS1600_ENABLE));
- DEBUG_PRINT("DAS1600_STATUS_B: %x\n",
- inb(dev->iobase + DAS1600_STATUS_B));
+ return insn->n;
}
static int das16_probe(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das16_board *board = comedi_board(dev);
- struct das16_private_struct *devpriv = dev->private;
- int status;
int diobits;
- /* status is available on all boards */
-
- status = inb(dev->iobase + DAS16_STATUS);
-
- if ((status & UNIPOLAR))
- devpriv->ai_unipolar = 1;
- else
- devpriv->ai_unipolar = 0;
-
-
- if ((status & DAS16_MUXBIT))
- devpriv->ai_singleended = 1;
- else
- devpriv->ai_singleended = 0;
-
-
/* diobits indicates boards */
-
- diobits = inb(dev->iobase + DAS16_DIO) & 0xf0;
-
- printk(KERN_INFO " id bits are 0x%02x\n", diobits);
+ diobits = inb(dev->iobase + DAS16_DIO_REG) & 0xf0;
if (board->id != diobits) {
- printk(KERN_INFO " requested board's id bits are 0x%x (ignore)\n",
- board->id);
- }
-
- return 0;
-}
-
-static int das1600_mode_detect(struct comedi_device *dev)
-{
- struct das16_private_struct *devpriv = dev->private;
- int status = 0;
-
- status = inb(dev->iobase + DAS1600_STATUS_B);
-
- if (status & DAS1600_CLK_10MHZ) {
- devpriv->clockbase = 100;
- printk(KERN_INFO " 10MHz pacer clock\n");
- } else {
- devpriv->clockbase = 1000;
- printk(KERN_INFO " 1MHz pacer clock\n");
+ dev_err(dev->class_dev,
+ "requested board's id bits are incorrect (0x%x != 0x%x)\n",
+ board->id, diobits);
+ return -EINVAL;
}
- reg_dump(dev);
-
return 0;
}
-static void das16_ai_munge(struct comedi_device *dev,
- struct comedi_subdevice *s, void *array,
- unsigned int num_bytes,
- unsigned int start_chan_index)
+static void das16_reset(struct comedi_device *dev)
{
- const struct das16_board *board = comedi_board(dev);
- unsigned int i, num_samples = num_bytes / sizeof(short);
- short *data = array;
-
- for (i = 0; i < num_samples; i++) {
- data[i] = le16_to_cpu(data[i]);
- if (board->ai_nbits == 12)
- data[i] = (data[i] >> 4) & 0xfff;
-
- }
+ outb(0, dev->iobase + DAS16_STATUS_REG);
+ outb(0, dev->iobase + DAS16_CTRL_REG);
+ outb(0, dev->iobase + DAS16_PACER_REG);
+ outb(0, dev->iobase + DAS16_TIMER_BASE_REG + i8254_control_reg);
}
-/*
- *
- * Options list:
- * 0 I/O base
- * 1 IRQ
- * 2 DMA
- * 3 Clock speed (in MHz)
- */
static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv;
struct comedi_subdevice *s;
+ struct comedi_lrange *lrange;
+ struct comedi_krange *krange;
+ unsigned int dma_chan = it->options[2];
+ unsigned int status;
int ret;
- unsigned int irq;
- unsigned int dma_chan;
- int timer_mode;
- unsigned long flags;
- struct comedi_krange *user_ai_range, *user_ao_range;
-
-#if 0
- irq = it->options[1];
- timer_mode = it->options[8];
-#endif
- /* always use time_mode since using irq can drop samples while
- * waiting for dma done interrupt (due to hardware limitations) */
- irq = 0;
- timer_mode = 1;
- if (timer_mode)
- irq = 0;
/* check that clock setting is valid */
if (it->options[3]) {
if (it->options[3] != 0 &&
it->options[3] != 1 && it->options[3] != 10) {
- printk
- ("\n Invalid option. Master clock must be set "
- "to 1 or 10 (MHz)\n");
+ dev_err(dev->class_dev,
+ "Invalid option. Master clock must be set to 1 or 10 (MHz)\n");
return -EINVAL;
}
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
if (board->size < 0x400) {
ret = comedi_request_region(dev, it->options[0], board->size);
@@ -1121,207 +1031,183 @@ static int das16_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
devpriv->extra_iobase = dev->iobase + 0x400;
+ devpriv->can_burst = 1;
}
/* probe id bits to make sure they are consistent */
- if (das16_probe(dev, it)) {
- printk(KERN_ERR " id bits do not match selected board, aborting\n");
+ if (das16_probe(dev, it))
return -EINVAL;
- }
/* get master clock speed */
- if (board->size < 0x400) {
+ if (devpriv->can_burst) {
+ status = inb(dev->iobase + DAS1600_STATUS_REG);
+
+ if (status & DAS1600_STATUS_CLK_10MHZ)
+ devpriv->clockbase = 100;
+ else
+ devpriv->clockbase = 1000;
+ } else {
if (it->options[3])
devpriv->clockbase = 1000 / it->options[3];
else
devpriv->clockbase = 1000; /* 1 MHz default */
- } else {
- das1600_mode_detect(dev);
- }
-
- /* now for the irq */
- if (irq > 1 && irq < 8) {
- ret = request_irq(irq, das16_dma_interrupt, 0,
- dev->board_name, dev);
-
- if (ret < 0)
- return ret;
- dev->irq = irq;
- printk(KERN_INFO " ( irq = %u )", irq);
- } else if (irq == 0) {
- printk(" ( no irq )");
- } else {
- printk(" invalid irq\n");
- return -EINVAL;
}
- /* initialize dma */
- dma_chan = it->options[2];
+ /* initialize dma */
if (dma_chan == 1 || dma_chan == 3) {
- /* allocate dma buffers */
+ unsigned long flags;
int i;
- for (i = 0; i < 2; i++) {
- devpriv->dma_buffer[i] = pci_alloc_consistent(
- NULL, DAS16_DMA_SIZE,
- &devpriv->dma_buffer_addr[i]);
- if (devpriv->dma_buffer[i] == NULL)
- return -ENOMEM;
- }
if (request_dma(dma_chan, dev->board_name)) {
- printk(KERN_ERR " failed to allocate dma channel %i\n",
- dma_chan);
+ dev_err(dev->class_dev,
+ "failed to request dma channel %i\n",
+ dma_chan);
return -EINVAL;
}
devpriv->dma_chan = dma_chan;
+
+ /* allocate dma buffers */
+ for (i = 0; i < 2; i++) {
+ void *p;
+
+ p = pci_alloc_consistent(NULL, DAS16_DMA_SIZE,
+ &devpriv->dma_buffer_addr[i]);
+ if (!p)
+ return -ENOMEM;
+ devpriv->dma_buffer[i] = p;
+ }
+
flags = claim_dma_lock();
disable_dma(devpriv->dma_chan);
set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
release_dma_lock(flags);
- printk(KERN_INFO " ( dma = %u)\n", dma_chan);
- } else if (dma_chan == 0) {
- printk(KERN_INFO " ( no dma )\n");
- } else {
- printk(KERN_ERR " invalid dma channel\n");
- return -EINVAL;
+
+ init_timer(&devpriv->timer);
+ devpriv->timer.function = das16_timer_interrupt;
+ devpriv->timer.data = (unsigned long)dev;
}
- /* get any user-defined input range */
+ /* get any user-defined input range */
if (board->ai_pg == das16_pg_none &&
(it->options[4] || it->options[5])) {
- /* allocate single-range range table */
- devpriv->user_ai_range_table =
- kmalloc(sizeof(struct comedi_lrange) +
- sizeof(struct comedi_krange), GFP_KERNEL);
- /* initialize ai range */
- devpriv->user_ai_range_table->length = 1;
- user_ai_range = devpriv->user_ai_range_table->range;
- user_ai_range->min = it->options[4];
- user_ai_range->max = it->options[5];
- user_ai_range->flags = UNIT_volt;
- }
- /* get any user-defined output range */
- if (it->options[6] || it->options[7]) {
- /* allocate single-range range table */
- devpriv->user_ao_range_table =
- kmalloc(sizeof(struct comedi_lrange) +
- sizeof(struct comedi_krange), GFP_KERNEL);
- /* initialize ao range */
- devpriv->user_ao_range_table->length = 1;
- user_ao_range = devpriv->user_ao_range_table->range;
- user_ao_range->min = it->options[6];
- user_ao_range->max = it->options[7];
- user_ao_range->flags = UNIT_volt;
+ /* allocate single-range range table */
+ lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
+ if (!lrange)
+ return -ENOMEM;
+
+ /* initialize ai range */
+ devpriv->user_ai_range_table = lrange;
+ lrange->length = 1;
+ krange = devpriv->user_ai_range_table->range;
+ krange->min = it->options[4];
+ krange->max = it->options[5];
+ krange->flags = UNIT_volt;
}
- if (timer_mode) {
- init_timer(&(devpriv->timer));
- devpriv->timer.function = das16_timer_interrupt;
- devpriv->timer.data = (unsigned long)dev;
+ /* get any user-defined output range */
+ if (it->options[6] || it->options[7]) {
+ /* allocate single-range range table */
+ lrange = kzalloc(sizeof(*lrange) + sizeof(*krange), GFP_KERNEL);
+ if (!lrange)
+ return -ENOMEM;
+
+ /* initialize ao range */
+ devpriv->user_ao_range_table = lrange;
+ lrange->length = 1;
+ krange = devpriv->user_ao_range_table->range;
+ krange->min = it->options[6];
+ krange->max = it->options[7];
+ krange->flags = UNIT_volt;
}
- devpriv->timer_mode = timer_mode ? 1 : 0;
- ret = comedi_alloc_subdevices(dev, 5);
+ ret = comedi_alloc_subdevices(dev, 4 + board->has_8255);
if (ret)
return ret;
+ status = inb(dev->iobase + DAS16_STATUS_REG);
+
+ /* Analog Input subdevice */
s = &dev->subdevices[0];
- dev->read_subdev = s;
- /* ai */
- if (board->ai) {
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
- if (devpriv->ai_singleended) {
- s->n_chan = 16;
- s->len_chanlist = 16;
- s->subdev_flags |= SDF_GROUND;
- } else {
- s->n_chan = 8;
- s->len_chanlist = 8;
- s->subdev_flags |= SDF_DIFF;
- }
- s->maxdata = (1 << board->ai_nbits) - 1;
- if (devpriv->user_ai_range_table) { /* user defined ai range */
- s->range_table = devpriv->user_ai_range_table;
- } else if (devpriv->ai_unipolar) {
- s->range_table = das16_ai_uni_lranges[board->ai_pg];
- } else {
- s->range_table = das16_ai_bip_lranges[board->ai_pg];
- }
- s->insn_read = board->ai;
- s->do_cmdtest = das16_cmd_test;
- s->do_cmd = das16_cmd_exec;
- s->cancel = das16_cancel;
- s->munge = das16_ai_munge;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE;
+ if (status & DAS16_STATUS_MUXBIT) {
+ s->subdev_flags |= SDF_GROUND;
+ s->n_chan = 16;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->subdev_flags |= SDF_DIFF;
+ s->n_chan = 8;
}
-
- s = &dev->subdevices[1];
- /* ao */
- if (board->ao) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->maxdata = (1 << board->ao_nbits) - 1;
- /* user defined ao range */
- if (devpriv->user_ao_range_table)
- s->range_table = devpriv->user_ao_range_table;
- else
- s->range_table = &range_unknown;
-
- s->insn_write = board->ao;
+ s->len_chanlist = s->n_chan;
+ s->maxdata = board->ai_maxdata;
+ if (devpriv->user_ai_range_table) { /* user defined ai range */
+ s->range_table = devpriv->user_ai_range_table;
+ } else if (status & DAS16_STATUS_UNIPOLAR) {
+ s->range_table = das16_ai_uni_lranges[board->ai_pg];
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->range_table = das16_ai_bip_lranges[board->ai_pg];
}
-
- s = &dev->subdevices[2];
- /* di */
- if (board->di) {
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->di;
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->insn_read = das16_ai_insn_read;
+ if (devpriv->dma_chan) {
+ dev->read_subdev = s;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->do_cmdtest = das16_cmd_test;
+ s->do_cmd = das16_cmd_exec;
+ s->cancel = das16_cancel;
+ s->munge = das16_ai_munge;
}
- s = &dev->subdevices[3];
- /* do */
- if (board->do_) {
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = board->do_;
- /* initialize digital output lines */
- outb(s->state, dev->iobase + DAS16_DIO);
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ if (board->has_ao) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 2;
+ s->maxdata = 0x0fff;
+ s->range_table = devpriv->user_ao_range_table;
+ s->insn_write = das16_ao_insn_write;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- s = &dev->subdevices[4];
- /* 8255 */
- if (board->i8255_offset != 0) {
- subdev_8255_init(dev, s, NULL, (dev->iobase +
- board->i8255_offset));
- } else {
- s->type = COMEDI_SUBD_UNUSED;
+ /* Digital Input subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16_di_insn_bits;
+
+ /* Digital Output subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das16_do_insn_bits;
+
+ /* initialize digital output lines */
+ outb(s->state, dev->iobase + DAS16_DIO_REG);
+
+ /* 8255 Digital I/O subdevice */
+ if (board->has_8255) {
+ s = &dev->subdevices[4];
+ ret = subdev_8255_init(dev, s, NULL,
+ dev->iobase + board->i8255_offset);
+ if (ret)
+ return ret;
}
das16_reset(dev);
/* set the interrupt level */
- devpriv->control_state = DAS16_IRQ(dev->irq);
- outb(devpriv->control_state, dev->iobase + DAS16_CONTROL);
-
- /* turn on das1600 mode if available */
- if (board->size > 0x400) {
- outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE);
- outb(0, dev->iobase + DAS1600_CONV);
- outb(0, dev->iobase + DAS1600_BURST);
+ devpriv->ctrl_reg = DAS16_CTRL_IRQ(dev->irq);
+ outb(devpriv->ctrl_reg, dev->iobase + DAS16_CTRL_REG);
+
+ if (devpriv->can_burst) {
+ outb(DAS1600_ENABLE_VAL, dev->iobase + DAS1600_ENABLE_REG);
+ outb(0, dev->iobase + DAS1600_CONV_REG);
+ outb(0, dev->iobase + DAS1600_BURST_REG);
}
return 0;
@@ -1331,10 +1217,12 @@ static void das16_detach(struct comedi_device *dev)
{
const struct das16_board *board = comedi_board(dev);
struct das16_private_struct *devpriv = dev->private;
+ int i;
- das16_reset(dev);
if (devpriv) {
- int i;
+ if (dev->iobase)
+ das16_reset(dev);
+
for (i = 0; i < 2; i++) {
if (devpriv->dma_buffer[i])
pci_free_consistent(NULL, DAS16_DMA_SIZE,
@@ -1346,312 +1234,15 @@ static void das16_detach(struct comedi_device *dev)
free_dma(devpriv->dma_chan);
kfree(devpriv->user_ai_range_table);
kfree(devpriv->user_ao_range_table);
+
+ if (devpriv->extra_iobase)
+ release_region(devpriv->extra_iobase,
+ board->size & 0x3ff);
}
- if (devpriv->extra_iobase)
- release_region(devpriv->extra_iobase, board->size & 0x3ff);
+
comedi_legacy_detach(dev);
}
-static const struct das16_board das16_boards[] = {
- {
- .name = "das-16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16g",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 15000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "das-16f",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 8500,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x00,
- }, {
- .name = "cio-das16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/f",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x10,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0x80,
- }, {
- .name = "cio-das16/jr",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 7692,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 3300,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "cio-das16jr/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "pc104-das16jr/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_16jr_16,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x10,
- .id = 0x00,
- }, {
- .name = "das-1201",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 20000,
- .ai_pg = das16_pg_none,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1202",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_none,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0x20,
- }, {
- .name = "das-1401",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1402",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1601",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1601,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "das-1602",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1401/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1402/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1601/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 6250,
- .ai_pg = das16_pg_1601,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/12",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das1602/16",
- .ai = das16_ai_rinsn,
- .ai_nbits = 16,
- .ai_speed = 10000,
- .ai_pg = das16_pg_1602,
- .ao = das16_ao_winsn,
- .ao_nbits = 12,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0x400,
- .i8254_offset = 0x0c,
- .size = 0x408,
- .id = 0xc0,
- }, {
- .name = "cio-das16/330",
- .ai = das16_ai_rinsn,
- .ai_nbits = 12,
- .ai_speed = 3030,
- .ai_pg = das16_pg_16jr,
- .ao = NULL,
- .di = das16_di_rbits,
- .do_ = das16_do_wbits,
- .i8255_offset = 0,
- .i8254_offset = 0x0c,
- .size = 0x14,
- .id = 0xf0,
- },
-};
-
static struct comedi_driver das16_driver = {
.driver_name = "das16",
.module = THIS_MODULE,
@@ -1664,5 +1255,5 @@ static struct comedi_driver das16_driver = {
module_comedi_driver(das16_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for DAS16 compatible boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index 0b33808c3a7..b943c449b69 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -52,7 +52,7 @@ Options:
irq can be omitted, although the cmd interface will not work without it.
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
@@ -567,10 +567,9 @@ static int das16m1_attach(struct comedi_device *dev,
int ret;
unsigned int irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS16M1_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 23b4a661eb1..5b300294d32 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -94,12 +94,12 @@ TODO:
read insn for analog out
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <asm/dma.h>
#include "8253.h"
@@ -1511,10 +1511,9 @@ static int das1800_attach(struct comedi_device *dev,
int board;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS1800_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index f0530778bb3..fb25cb84703 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -33,11 +33,10 @@ Devices: [Keithley Metrabyte] DAS6402 (das6402)
This driver has suffered bitrot.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define DAS6402_SIZE 16
#define N_WORDS (3000*64)
@@ -294,10 +293,9 @@ static int das6402_attach(struct comedi_device *dev,
dev->irq = irq;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 091cd911b38..11e16114e4e 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -56,10 +56,10 @@ cmd triggers supported:
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "8253.h"
@@ -700,10 +700,9 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int board;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], DAS800_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dmm32at.c b/drivers/staging/comedi/drivers/dmm32at.c
index e29847d73b4..118a4fd129f 100644
--- a/drivers/staging/comedi/drivers/dmm32at.c
+++ b/drivers/staging/comedi/drivers/dmm32at.c
@@ -32,9 +32,10 @@ Configuration Options:
comedi_config /dev/comedi0 dmm32at baseaddr,irq
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include "comedi_fc.h"
@@ -647,31 +648,34 @@ static int dmm32at_dio_insn_bits(struct comedi_device *dev,
static int dmm32at_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dmm32at_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
unsigned char chanbit;
- int chan = CR_CHAN(insn->chanspec);
-
- if (insn->n != 1)
- return -EINVAL;
+ int ret;
- if (chan < 8)
+ if (chan < 8) {
+ mask = 0x0000ff;
chanbit = DMM32AT_DIRA;
- else if (chan < 16)
+ } else if (chan < 16) {
+ mask = 0x00ff00;
chanbit = DMM32AT_DIRB;
- else if (chan < 20)
+ } else if (chan < 20) {
+ mask = 0x0f0000;
chanbit = DMM32AT_DIRCL;
- else
+ } else {
+ mask = 0xf00000;
chanbit = DMM32AT_DIRCH;
+ }
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- /* if output clear the bit, otherwise set it */
- if (data[0] == COMEDI_OUTPUT)
+ if (data[0] == INSN_CONFIG_DIO_OUTPUT)
devpriv->dio_config &= ~chanbit;
else
devpriv->dio_config |= chanbit;
@@ -680,7 +684,7 @@ static int dmm32at_dio_insn_config(struct comedi_device *dev,
/* set the DIO's to the new configuration setting */
outb(devpriv->dio_config, dev->iobase + DMM32AT_DIOCONF);
- return 1;
+ return insn->n;
}
static int dmm32at_attach(struct comedi_device *dev,
@@ -753,10 +757,9 @@ static int dmm32at_attach(struct comedi_device *dev,
dev->irq = irq;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 3);
if (ret)
diff --git a/drivers/staging/comedi/drivers/dt2801.c b/drivers/staging/comedi/drivers/dt2801.c
index 8f5006d70da..38918a1198a 100644
--- a/drivers/staging/comedi/drivers/dt2801.c
+++ b/drivers/staging/comedi/drivers/dt2801.c
@@ -29,9 +29,9 @@ Configuration options:
[5] - D/A 1 range (same choices)
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#define DT2801_TIMEOUT 1000
@@ -551,32 +551,19 @@ static int dt2801_dio_insn_bits(struct comedi_device *dev,
static int dt2801_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int which = 0;
+ int ret;
- if (s == &dev->subdevices[3])
- which = 1;
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0xff);
+ if (ret)
+ return ret;
- /* configure */
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits = 0xff;
- dt2801_writecmd(dev, DT_C_SET_DIGOUT);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits = 0;
- dt2801_writecmd(dev, DT_C_SET_DIGIN);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = s->io_bits ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
- dt2801_writedata(dev, which);
+ dt2801_writecmd(dev, s->io_bits ? DT_C_SET_DIGOUT : DT_C_SET_DIGIN);
+ dt2801_writedata(dev, (s == &dev->subdevices[3]) ? 1 : 0);
- return 1;
+ return insn->n;
}
/*
@@ -627,10 +614,9 @@ havetype:
if (ret)
goto out;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
dev->board_name = board->name;
diff --git a/drivers/staging/comedi/drivers/dt2811.c b/drivers/staging/comedi/drivers/dt2811.c
index 5348cdae408..a41a5716f35 100644
--- a/drivers/staging/comedi/drivers/dt2811.c
+++ b/drivers/staging/comedi/drivers/dt2811.c
@@ -41,11 +41,10 @@ Configuration options:
[4] - D/A 1 range (same choices)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
static const struct comedi_lrange range_dt2811_pgh_ai_5_unipolar = {
4, {
RANGE(0, 5),
@@ -450,10 +449,9 @@ static int dt2811_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
switch (it->options[2]) {
case 0:
diff --git a/drivers/staging/comedi/drivers/dt2814.c b/drivers/staging/comedi/drivers/dt2814.c
index 87e9749c4be..6514b9e0055 100644
--- a/drivers/staging/comedi/drivers/dt2814.c
+++ b/drivers/staging/comedi/drivers/dt2814.c
@@ -34,10 +34,10 @@ a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
addition, the clock does not seem to be very accurate.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
@@ -298,10 +298,9 @@ static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
dev->read_subdev = s;
diff --git a/drivers/staging/comedi/drivers/dt2815.c b/drivers/staging/comedi/drivers/dt2815.c
index 0fcd4fe7acd..34040f0175e 100644
--- a/drivers/staging/comedi/drivers/dt2815.c
+++ b/drivers/staging/comedi/drivers/dt2815.c
@@ -51,9 +51,9 @@ Configuration options:
[12] - Analog output 7 range configuration (same options)
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#define DT2815_SIZE 2
@@ -165,10 +165,9 @@ static int dt2815_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* ao subdevice */
diff --git a/drivers/staging/comedi/drivers/dt2817.c b/drivers/staging/comedi/drivers/dt2817.c
index 2f46be715f7..f4a8529239b 100644
--- a/drivers/staging/comedi/drivers/dt2817.c
+++ b/drivers/staging/comedi/drivers/dt2817.c
@@ -33,10 +33,9 @@ Configuration options:
[0] - I/O port base base address
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define DT2817_SIZE 5
#define DT2817_CR 0
@@ -44,28 +43,26 @@ Configuration options:
static int dt2817_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask;
- int chan;
- int oe = 0;
-
- if (insn->n != 1)
- return -EINVAL;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int oe = 0;
+ unsigned int mask;
+ int ret;
- chan = CR_CHAN(insn->chanspec);
if (chan < 8)
- mask = 0xff;
+ mask = 0x000000ff;
else if (chan < 16)
- mask = 0xff00;
+ mask = 0x0000ff00;
else if (chan < 24)
- mask = 0xff0000;
+ mask = 0x00ff0000;
else
mask = 0xff000000;
- if (data[0])
- s->io_bits |= mask;
- else
- s->io_bits &= ~mask;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
if (s->io_bits & 0x000000ff)
oe |= 0x1;
@@ -78,7 +75,7 @@ static int dt2817_dio_insn_config(struct comedi_device *dev,
outb(oe, dev->iobase + DT2817_CR);
- return 1;
+ return insn->n;
}
static int dt2817_dio_insn_bits(struct comedi_device *dev,
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index c1950e3b19a..da3ee859bdb 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -51,13 +51,16 @@ Notes:
be fixed to check for this situation and return an error.
*/
+#include <linux/module.h>
#include "../comedidev.h"
+#include <linux/delay.h>
#include <linux/gfp.h>
-#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+
#include <asm/dma.h>
+
#include "comedi_fc.h"
#define DEBUG
@@ -264,8 +267,9 @@ struct dt282x_private {
} \
udelay(5); \
} \
- if (_i) \
+ if (_i) { \
b \
+ } \
} while (0)
static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
@@ -978,29 +982,32 @@ static int dt282x_dio_insn_bits(struct comedi_device *dev,
static int dt282x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct dt282x_private *devpriv = dev->private;
- int mask;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- mask = (CR_CHAN(insn->chanspec) < 8) ? 0x00ff : 0xff00;
- if (data[0])
- s->io_bits |= mask;
+ if (chan < 8)
+ mask = 0x00ff;
else
- s->io_bits &= ~mask;
+ mask = 0xff00;
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ devpriv->dacsr &= ~(DT2821_LBOE | DT2821_HBOE);
if (s->io_bits & 0x00ff)
devpriv->dacsr |= DT2821_LBOE;
- else
- devpriv->dacsr &= ~DT2821_LBOE;
if (s->io_bits & 0xff00)
devpriv->dacsr |= DT2821_HBOE;
- else
- devpriv->dacsr &= ~DT2821_HBOE;
outw(devpriv->dacsr, dev->iobase + DT2821_DACSR);
- return 1;
+ return insn->n;
}
static const struct comedi_lrange *const ai_range_table[] = {
@@ -1188,10 +1195,9 @@ static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = dt282x_grab_dma(dev, it->options[opt_dma1],
it->options[opt_dma2]);
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 01a2f889d5b..64ef87598b6 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -50,6 +50,7 @@ AO commands are not supported.
#define DEBUG 1
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -641,32 +642,23 @@ static void dt3k_dio_config(struct comedi_device *dev, int bits)
static int dt3k_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask;
-
- mask = (CR_CHAN(insn->chanspec) < 4) ? 0x0f : 0xf0;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- mask = (s->io_bits & 0x01) | ((s->io_bits & 0x10) >> 3);
- dt3k_dio_config(dev, mask);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ dt3k_dio_config(dev, (s->io_bits & 0x01) | ((s->io_bits & 0x10) >> 3));
return insn->n;
}
@@ -722,10 +714,9 @@ static int dt3000_auto_attach(struct comedi_device *dev,
dev->board_ptr = this_board;
dev->board_name = this_board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret < 0)
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 6c60949d919..b5e6f33dc21 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -39,10 +39,9 @@ for my needs.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
@@ -188,8 +187,8 @@ enum {
};
struct dt9812_flash_data {
- u16 numbytes;
- u16 address;
+ __le16 numbytes;
+ __le16 address;
};
#define DT9812_MAX_NUM_MULTI_BYTE_RDS \
@@ -230,7 +229,7 @@ struct dt9812_rmw_multi {
};
struct dt9812_usb_cmd {
- u32 cmd;
+ __le32 cmd;
union {
struct dt9812_flash_data flash_data_info;
struct dt9812_read_multi read_multi_info;
@@ -707,8 +706,9 @@ static int dt9812_reset_device(struct comedi_device *dev)
u32 serial;
u16 vendor;
u16 product;
- u16 tmp16;
u8 tmp8;
+ __le16 tmp16;
+ __le32 tmp32;
int ret;
int i;
@@ -731,19 +731,19 @@ static int dt9812_reset_device(struct comedi_device *dev)
}
}
- ret = dt9812_read_info(dev, 1, &vendor, sizeof(vendor));
+ ret = dt9812_read_info(dev, 1, &tmp16, sizeof(tmp16));
if (ret) {
dev_err(dev->class_dev, "failed to read vendor id\n");
return ret;
}
- vendor = le16_to_cpu(vendor);
+ vendor = le16_to_cpu(tmp16);
- ret = dt9812_read_info(dev, 3, &product, sizeof(product));
+ ret = dt9812_read_info(dev, 3, &tmp16, sizeof(tmp16));
if (ret) {
dev_err(dev->class_dev, "failed to read product id\n");
return ret;
}
- product = le16_to_cpu(product);
+ product = le16_to_cpu(tmp16);
ret = dt9812_read_info(dev, 5, &tmp16, sizeof(tmp16));
if (ret) {
@@ -752,12 +752,12 @@ static int dt9812_reset_device(struct comedi_device *dev)
}
devpriv->device = le16_to_cpu(tmp16);
- ret = dt9812_read_info(dev, 7, &serial, sizeof(serial));
+ ret = dt9812_read_info(dev, 7, &tmp32, sizeof(tmp32));
if (ret) {
dev_err(dev->class_dev, "failed to read serial number\n");
return ret;
}
- serial = le32_to_cpu(serial);
+ serial = le32_to_cpu(tmp32);
/* let the user know what node this device is now attached to */
dev_info(dev->class_dev, "USB DT9812 (%4.4x.%4.4x.%4.4x) #0x%8.8x\n",
@@ -781,10 +781,9 @@ static int dt9812_auto_attach(struct comedi_device *dev,
bool is_unipolar;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
sema_init(&devpriv->sem, 1);
usb_set_intfdata(intf, devpriv);
diff --git a/drivers/staging/comedi/drivers/dyna_pci10xx.c b/drivers/staging/comedi/drivers/dyna_pci10xx.c
index e14dd3ae9ec..fd525f499f2 100644
--- a/drivers/staging/comedi/drivers/dyna_pci10xx.c
+++ b/drivers/staging/comedi/drivers/dyna_pci10xx.c
@@ -33,6 +33,8 @@
their cards in their manuals.
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/mutex.h>
@@ -183,10 +185,9 @@ static int dyna_pci10xx_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/fl512.c b/drivers/staging/comedi/drivers/fl512.c
index ff6f0bd7c86..8d70f64b157 100644
--- a/drivers/staging/comedi/drivers/fl512.c
+++ b/drivers/staging/comedi/drivers/fl512.c
@@ -18,10 +18,10 @@ Configuration options:
#define DEBUG 0
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#define FL512_SIZE 16 /* the size of the used memory */
struct fl512_private {
@@ -118,10 +118,9 @@ static int fl512_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 2);
if (ret)
diff --git a/drivers/staging/comedi/drivers/gsc_hpdi.c b/drivers/staging/comedi/drivers/gsc_hpdi.c
index 2fceff93867..559bf558353 100644
--- a/drivers/staging/comedi/drivers/gsc_hpdi.c
+++ b/drivers/staging/comedi/drivers/gsc_hpdi.c
@@ -42,6 +42,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -223,37 +224,26 @@ struct hpdi_private {
volatile uint32_t bits[24];
/* number of bytes at which to generate COMEDI_CB_BLOCK events */
volatile unsigned int block_size;
- unsigned dio_config_output:1;
};
static int dio_config_insn(struct comedi_device *dev,
- struct comedi_subdevice *s, struct comedi_insn *insn,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
unsigned int *data)
{
- struct hpdi_private *devpriv = dev->private;
+ int ret;
switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- devpriv->dio_config_output = 1;
- return insn->n;
- break;
- case INSN_CONFIG_DIO_INPUT:
- devpriv->dio_config_output = 0;
- return insn->n;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- devpriv->dio_config_output ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
case INSN_CONFIG_BLOCK_SIZE:
return dio_config_block_size(dev, data);
- break;
default:
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0xffffffff);
+ if (ret)
+ return ret;
break;
}
- return -EINVAL;
+ return insn->n;
}
static void disable_plx_interrupts(struct comedi_device *dev)
@@ -483,10 +473,9 @@ static int hpdi_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
retval = comedi_pci_enable(dev);
if (retval)
@@ -673,9 +662,7 @@ static int di_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
static int hpdi_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- struct hpdi_private *devpriv = dev->private;
-
- if (devpriv->dio_config_output)
+ if (s->io_bits)
return -EINVAL;
else
return di_cmd_test(dev, s, cmd);
@@ -746,9 +733,7 @@ static int di_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
static int hpdi_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
- struct hpdi_private *devpriv = dev->private;
-
- if (devpriv->dio_config_output)
+ if (s->io_bits)
return -EINVAL;
else
return di_cmd(dev, s);
diff --git a/drivers/staging/comedi/drivers/icp_multi.c b/drivers/staging/comedi/drivers/icp_multi.c
index a11e015dc03..3889d23292d 100644
--- a/drivers/staging/comedi/drivers/icp_multi.c
+++ b/drivers/staging/comedi/drivers/icp_multi.c
@@ -42,6 +42,7 @@ There are 4 x 12-bit Analogue Outputs. Ranges : 5V, 10V, +/-5V, +/-10V
Configuration options: not applicable, uses PCI auto config
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -495,10 +496,9 @@ static int icp_multi_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ii_pci20kc.c b/drivers/staging/comedi/drivers/ii_pci20kc.c
index ee7537daf47..5c3a318b464 100644
--- a/drivers/staging/comedi/drivers/ii_pci20kc.c
+++ b/drivers/staging/comedi/drivers/ii_pci20kc.c
@@ -1,662 +1,537 @@
/*
- * comedi/drivers/ii_pci20kc.c
- * Driver for Intelligent Instruments PCI-20001C carrier board
- * and modules.
+ * ii_pci20kc.c
+ * Driver for Intelligent Instruments PCI-20001C carrier board and modules.
*
- * Copyright (C) 2000 Markus Kempf <kempf@matsci.uni-sb.de>
- * with suggestions from David Schleef
- * 16.06.2000
- *
- * Linux device driver for COMEDI
- * Intelligent Instrumentation
- * PCI-20001 C-2A Carrier Board
- * PCI-20341 M-1A 16-Bit analog input module
- * - differential
- * - range (-5V - +5V)
- * - 16 bit
- * PCI-20006 M-2 16-Bit analog output module
- * - ranges (-10V - +10V) (0V - +10V) (-5V - +5V)
- * - 16 bit
- *
- * only ONE PCI-20341 module possible
- * only ONE PCI-20006 module possible
- * no extern trigger implemented
- *
- * NOT WORKING (but soon) only 4 on-board differential channels supported
- * NOT WORKING (but soon) only ONE di-port and ONE do-port supported
- * instead of 4 digital ports
- * di-port == Port 0
- * do-port == Port 1
- *
- * The state of this driver is only a starting point for a complete
- * COMEDI-driver. The final driver should support all features of the
- * carrier board and modules.
+ * Copyright (C) 2000 Markus Kempf <kempf@matsci.uni-sb.de>
+ * with suggestions from David Schleef 16.06.2000
+ */
+
+/*
+ * Driver: ii_pci20kc
+ * Description: Intelligent Instruments PCI-20001C carrier board
+ * Devices: (Intelligent Instrumentation) PCI-20001C [ii_pci20kc]
+ * Author: Markus Kempf <kempf@matsci.uni-sb.de>
+ * Status: works
*
- * The test configuration:
+ * Supports the PCI-20001C-1a and PCI-20001C-2a carrier boards. The
+ * -2a version has 32 on-board DIO channels. Three add-on modules
+ * can be added to the carrier board for additional functionality.
*
- * kernel 2.2.14 with RTAI v1.2 and patch-2.2.14rthal2
- * COMEDI 0.7.45
- * COMEDILIB 0.7.9
+ * Supported add-on modules:
+ * PCI-20006M-1 1 channel, 16-bit analog output module
+ * PCI-20006M-2 2 channel, 16-bit analog output module
+ * PCI-20341M-1A 4 channel, 16-bit analog input module
*
+ * Options:
+ * 0 Board base address
+ * 1 IRQ (not-used)
*/
-/*
-Driver: ii_pci20kc
-Description: Intelligent Instruments PCI-20001C carrier board
-Author: Markus Kempf <kempf@matsci.uni-sb.de>
-Devices: [Intelligent Instrumentation] PCI-20001C (ii_pci20kc)
-Status: works
-
-Supports the PCI-20001 C-2a Carrier board, and could probably support
-the other carrier boards with small modifications. Modules supported
-are:
- PCI-20006 M-2 16-bit analog output module
- PCI-20341 M-1A 16-bit analog input module
-
-Options:
- 0 Board base address
- 1 IRQ
- 2 first option for module 1
- 3 second option for module 1
- 4 first option for module 2
- 5 second option for module 2
- 6 first option for module 3
- 7 second option for module 3
-
-options for PCI-20006M:
- first: Analog output channel 0 range configuration
- 0 bipolar 10 (-10V -- +10V)
- 1 unipolar 10 (0V -- +10V)
- 2 bipolar 5 (-5V -- 5V)
- second: Analog output channel 1 range configuration
-
-options for PCI-20341M:
- first: Analog input gain configuration
- 0 1
- 1 10
- 2 100
- 3 200
-*/
+#include <linux/module.h>
#include "../comedidev.h"
-#define PCI20000_ID 0x1d
-#define PCI20341_ID 0x77
-#define PCI20006_ID 0xe3
-#define PCI20xxx_EMPTY_ID 0xff
-
-#define PCI20000_OFFSET 0x100
-#define PCI20000_MODULES 3
-
-#define PCI20000_DIO_0 0x80
-#define PCI20000_DIO_1 0x81
-#define PCI20000_DIO_2 0xc0
-#define PCI20000_DIO_3 0xc1
-#define PCI20000_DIO_CONTROL_01 0x83 /* port 0, 1 control */
-#define PCI20000_DIO_CONTROL_23 0xc3 /* port 2, 3 control */
-#define PCI20000_DIO_BUFFER 0x82 /* buffer direction & enable */
-#define PCI20000_DIO_EOC 0xef /* even port, control output */
-#define PCI20000_DIO_OOC 0xfd /* odd port, control output */
-#define PCI20000_DIO_EIC 0x90 /* even port, control input */
-#define PCI20000_DIO_OIC 0x82 /* odd port, control input */
-#define DIO_CAND 0x12 /* and bit 1 & 4 of control */
-#define DIO_BE 0x01 /* buffer: port enable */
-#define DIO_BO 0x04 /* buffer: output */
-#define DIO_BI 0x05 /* buffer: input */
-#define DIO_PS_0 0x00 /* buffer: port shift 0 */
-#define DIO_PS_1 0x01 /* buffer: port shift 1 */
-#define DIO_PS_2 0x04 /* buffer: port shift 2 */
-#define DIO_PS_3 0x05 /* buffer: port shift 3 */
-
-#define PCI20006_LCHAN0 0x0d
-#define PCI20006_STROBE0 0x0b
-#define PCI20006_LCHAN1 0x15
-#define PCI20006_STROBE1 0x13
-
-#define PCI20341_INIT 0x04
-#define PCI20341_REPMODE 0x00 /* single shot mode */
-#define PCI20341_PACER 0x00 /* Hardware Pacer disabled */
-#define PCI20341_CHAN_NR 0x04 /* number of input channels */
-#define PCI20341_CONFIG_REG 0x10
-#define PCI20341_MOD_STATUS 0x01
-#define PCI20341_OPT_REG 0x11
-#define PCI20341_SET_TIME_REG 0x15
-#define PCI20341_LCHAN_ADDR_REG 0x13
-#define PCI20341_CHAN_LIST 0x80
-#define PCI20341_CC_RESET 0x1b
-#define PCI20341_CHAN_RESET 0x19
-#define PCI20341_SOFT_PACER 0x04
-#define PCI20341_STATUS_REG 0x12
-#define PCI20341_LDATA 0x02
-#define PCI20341_DAISY_CHAIN 0x20 /* On-board inputs only */
-#define PCI20341_MUX 0x04 /* Enable on-board MUX */
-#define PCI20341_SCANLIST 0x80 /* Channel/Gain Scan List */
-
-union pci20xxx_subdev_private {
- void __iomem *iobase;
- struct {
- void __iomem *iobase;
- const struct comedi_lrange *ao_range_list[2];
- /* range of channels of ao module */
- unsigned int last_data[2];
- } pci20006;
- struct {
- void __iomem *iobase;
- int timebase;
- int settling_time;
- int ai_gain;
- } pci20341;
+/*
+ * Register I/O map
+ */
+#define II20K_MOD_OFFSET 0x100
+#define II20K_ID_REG 0x00
+#define II20K_ID_MOD1_EMPTY (1 << 7)
+#define II20K_ID_MOD2_EMPTY (1 << 6)
+#define II20K_ID_MOD3_EMPTY (1 << 5)
+#define II20K_ID_MASK 0x1f
+#define II20K_ID_PCI20001C_1A 0x1b /* no on-board DIO */
+#define II20K_ID_PCI20001C_2A 0x1d /* on-board DIO */
+#define II20K_MOD_STATUS_REG 0x40
+#define II20K_MOD_STATUS_IRQ_MOD1 (1 << 7)
+#define II20K_MOD_STATUS_IRQ_MOD2 (1 << 6)
+#define II20K_MOD_STATUS_IRQ_MOD3 (1 << 5)
+#define II20K_DIO0_REG 0x80
+#define II20K_DIO1_REG 0x81
+#define II20K_DIR_ENA_REG 0x82
+#define II20K_DIR_DIO3_OUT (1 << 7)
+#define II20K_DIR_DIO2_OUT (1 << 6)
+#define II20K_BUF_DISAB_DIO3 (1 << 5)
+#define II20K_BUF_DISAB_DIO2 (1 << 4)
+#define II20K_DIR_DIO1_OUT (1 << 3)
+#define II20K_DIR_DIO0_OUT (1 << 2)
+#define II20K_BUF_DISAB_DIO1 (1 << 1)
+#define II20K_BUF_DISAB_DIO0 (1 << 0)
+#define II20K_CTRL01_REG 0x83
+#define II20K_CTRL01_SET (1 << 7)
+#define II20K_CTRL01_DIO0_IN (1 << 4)
+#define II20K_CTRL01_DIO1_IN (1 << 1)
+#define II20K_DIO2_REG 0xc0
+#define II20K_DIO3_REG 0xc1
+#define II20K_CTRL23_REG 0xc3
+#define II20K_CTRL23_SET (1 << 7)
+#define II20K_CTRL23_DIO2_IN (1 << 4)
+#define II20K_CTRL23_DIO3_IN (1 << 1)
+
+#define II20K_ID_PCI20006M_1 0xe2 /* 1 AO channels */
+#define II20K_ID_PCI20006M_2 0xe3 /* 2 AO channels */
+#define II20K_AO_STRB_REG(x) (0x0b + ((x) * 0x08))
+#define II20K_AO_LSB_REG(x) (0x0d + ((x) * 0x08))
+#define II20K_AO_MSB_REG(x) (0x0e + ((x) * 0x08))
+#define II20K_AO_STRB_BOTH_REG 0x1b
+
+#define II20K_ID_PCI20341M_1 0x77 /* 4 AI channels */
+#define II20K_AI_STATUS_CMD_REG 0x01
+#define II20K_AI_STATUS_CMD_BUSY (1 << 7)
+#define II20K_AI_STATUS_CMD_HW_ENA (1 << 1)
+#define II20K_AI_STATUS_CMD_EXT_START (1 << 0)
+#define II20K_AI_LSB_REG 0x02
+#define II20K_AI_MSB_REG 0x03
+#define II20K_AI_PACER_RESET_REG 0x04
+#define II20K_AI_16BIT_DATA_REG 0x06
+#define II20K_AI_CONF_REG 0x10
+#define II20K_AI_CONF_ENA (1 << 2)
+#define II20K_AI_OPT_REG 0x11
+#define II20K_AI_OPT_TRIG_ENA (1 << 5)
+#define II20K_AI_OPT_TRIG_INV (1 << 4)
+#define II20K_AI_OPT_TIMEBASE(x) (((x) & 0x3) << 1)
+#define II20K_AI_OPT_BURST_MODE (1 << 0)
+#define II20K_AI_STATUS_REG 0x12
+#define II20K_AI_STATUS_INT (1 << 7)
+#define II20K_AI_STATUS_TRIG (1 << 6)
+#define II20K_AI_STATUS_TRIG_ENA (1 << 5)
+#define II20K_AI_STATUS_PACER_ERR (1 << 2)
+#define II20K_AI_STATUS_DATA_ERR (1 << 1)
+#define II20K_AI_STATUS_SET_TIME_ERR (1 << 0)
+#define II20K_AI_LAST_CHAN_ADDR_REG 0x13
+#define II20K_AI_CUR_ADDR_REG 0x14
+#define II20K_AI_SET_TIME_REG 0x15
+#define II20K_AI_DELAY_LSB_REG 0x16
+#define II20K_AI_DELAY_MSB_REG 0x17
+#define II20K_AI_CHAN_ADV_REG 0x18
+#define II20K_AI_CHAN_RESET_REG 0x19
+#define II20K_AI_START_TRIG_REG 0x1a
+#define II20K_AI_COUNT_RESET_REG 0x1b
+#define II20K_AI_CHANLIST_REG 0x80
+#define II20K_AI_CHANLIST_ONBOARD_ONLY (1 << 5)
+#define II20K_AI_CHANLIST_GAIN(x) (((x) & 0x3) << 3)
+#define II20K_AI_CHANLIST_MUX_ENA (1 << 2)
+#define II20K_AI_CHANLIST_CHAN(x) (((x) & 0x3) << 0)
+#define II20K_AI_CHANLIST_LEN 0x80
+
+/* the AO range is set by jumpers on the 20006M module */
+static const struct comedi_lrange ii20k_ao_ranges = {
+ 3, {
+ BIP_RANGE(5), /* Chan 0 - W1/W3 in Chan 1 - W2/W4 in */
+ UNI_RANGE(10), /* Chan 0 - W1/W3 out Chan 1 - W2/W4 in */
+ BIP_RANGE(10) /* Chan 0 - W1/W3 in Chan 1 - W2/W4 out */
+ }
};
-struct pci20xxx_private {
-
- void __iomem *ioaddr;
- union pci20xxx_subdev_private subdev_private[PCI20000_MODULES];
+static const struct comedi_lrange ii20k_ai_ranges = {
+ 4, {
+ BIP_RANGE(5), /* gain 1 */
+ BIP_RANGE(0.5), /* gain 10 */
+ BIP_RANGE(0.05), /* gain 100 */
+ BIP_RANGE(0.025) /* gain 200 */
+ },
};
-#define CHAN (CR_CHAN(it->chanlist[0]))
+struct ii20k_ao_private {
+ unsigned int last_data[2];
+};
-static int pci20006_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1);
-static int pci20341_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1);
-static int pci20xxx_dio_init(struct comedi_device *dev,
- struct comedi_subdevice *s);
+struct ii20k_private {
+ void __iomem *ioaddr;
+};
-/*
- options[0] Board base address
- options[1] IRQ
- options[2] first option for module 1
- options[3] second option for module 1
- options[4] first option for module 2
- options[5] second option for module 2
- options[6] first option for module 3
- options[7] second option for module 3
-
- options for PCI-20341M:
- first Analog input gain configuration
- 0 == 1
- 1 == 10
- 2 == 100
- 3 == 200
-
- options for PCI-20006M:
- first Analog output channel 0 range configuration
- 0 == bipolar 10 (-10V -- +10V)
- 1 == unipolar 10V (0V -- +10V)
- 2 == bipolar 5V (-5V -- +5V)
- second Analog output channel 1 range configuration
- 0 == bipolar 10 (-10V -- +10V)
- 1 == unipolar 10V (0V -- +10V)
- 2 == bipolar 5V (-5V -- +5V)
-*/
-static int pci20xxx_attach(struct comedi_device *dev,
- struct comedi_devconfig *it)
+static void __iomem *ii20k_module_iobase(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
- struct pci20xxx_private *devpriv;
- unsigned char i;
- int ret;
- int id;
- struct comedi_subdevice *s;
- union pci20xxx_subdev_private *sdp;
-
- ret = comedi_alloc_subdevices(dev, 1 + PCI20000_MODULES);
- if (ret)
- return ret;
+ struct ii20k_private *devpriv = dev->private;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
- if (!devpriv)
- return -ENOMEM;
- dev->private = devpriv;
-
- devpriv->ioaddr = (void __iomem *)(unsigned long)it->options[0];
+ return devpriv->ioaddr + (s->index + 1) * II20K_MOD_OFFSET;
+}
- /* Check PCI-20001 C-2A Carrier Board ID */
- if ((readb(devpriv->ioaddr) & PCI20000_ID) != PCI20000_ID) {
- dev_warn(dev->class_dev,
- "PCI-20001 C-2A Carrier Board at base=0x%p not found !\n",
- devpriv->ioaddr);
- return -EINVAL;
- }
- dev_info(dev->class_dev, "PCI-20001 C-2A at base=0x%p\n",
- devpriv->ioaddr);
-
- for (i = 0; i < PCI20000_MODULES; i++) {
- s = &dev->subdevices[i];
- id = readb(devpriv->ioaddr + (i + 1) * PCI20000_OFFSET);
- s->private = devpriv->subdev_private + i;
- sdp = s->private;
- switch (id) {
- case PCI20006_ID:
- sdp->pci20006.iobase =
- devpriv->ioaddr + (i + 1) * PCI20000_OFFSET;
- pci20006_init(dev, s, it->options[2 * i + 2],
- it->options[2 * i + 3]);
- dev_info(dev->class_dev,
- "PCI-20006 module in slot %d\n", i + 1);
- break;
- case PCI20341_ID:
- sdp->pci20341.iobase =
- devpriv->ioaddr + (i + 1) * PCI20000_OFFSET;
- pci20341_init(dev, s, it->options[2 * i + 2],
- it->options[2 * i + 3]);
- dev_info(dev->class_dev,
- "PCI-20341 module in slot %d\n", i + 1);
- break;
- default:
- dev_warn(dev->class_dev,
- "unknown module code 0x%02x in slot %d: module disabled\n",
- id, i); /* XXX this looks like a bug! i + 1 ?? */
- /* fall through */
- case PCI20xxx_EMPTY_ID:
- s->type = COMEDI_SUBD_UNUSED;
- break;
- }
- }
+static int ii20k_ao_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ struct ii20k_ao_private *ao_spriv = s->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int i;
- /* initialize struct pci20xxx_private */
- pci20xxx_dio_init(dev, &dev->subdevices[PCI20000_MODULES]);
+ for (i = 0; i < insn->n; i++)
+ data[i] = ao_spriv->last_data[chan];
- return 1;
+ return insn->n;
}
-static void pci20xxx_detach(struct comedi_device *dev)
+static int ii20k_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- /* Nothing to cleanup */
-}
+ struct ii20k_ao_private *ao_spriv = s->private;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = ao_spriv->last_data[chan];
+ int i;
-/* pci20006m */
+ for (i = 0; i < insn->n; i++) {
+ val = data[i];
-static int pci20006_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int pci20006_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
+ /* munge data */
+ val += ((s->maxdata + 1) >> 1);
+ val &= s->maxdata;
-static const struct comedi_lrange *pci20006_range_list[] = {
- &range_bipolar10,
- &range_unipolar10,
- &range_bipolar5,
-};
+ writeb(val & 0xff, iobase + II20K_AO_LSB_REG(chan));
+ writeb((val >> 8) & 0xff, iobase + II20K_AO_MSB_REG(chan));
+ writeb(0x00, iobase + II20K_AO_STRB_REG(chan));
+ }
-static int pci20006_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1)
-{
- union pci20xxx_subdev_private *sdp = s->private;
-
- if (opt0 < 0 || opt0 > 2)
- opt0 = 0;
- if (opt1 < 0 || opt1 > 2)
- opt1 = 0;
-
- sdp->pci20006.ao_range_list[0] = pci20006_range_list[opt0];
- sdp->pci20006.ao_range_list[1] = pci20006_range_list[opt1];
-
- /* ao subdevice */
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 2;
- s->len_chanlist = 2;
- s->insn_read = pci20006_insn_read;
- s->insn_write = pci20006_insn_write;
- s->maxdata = 0xffff;
- s->range_table_list = sdp->pci20006.ao_range_list;
- return 0;
+ ao_spriv->last_data[chan] = val;
+
+ return insn->n;
}
-static int pci20006_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int ii20k_ai_wait_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ int timeout)
{
- union pci20xxx_subdev_private *sdp = s->private;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned char status;
- data[0] = sdp->pci20006.last_data[CR_CHAN(insn->chanspec)];
+ do {
+ status = readb(iobase + II20K_AI_STATUS_REG);
+ if ((status & II20K_AI_STATUS_INT) == 0)
+ return 0;
+ } while (timeout--);
- return 1;
+ return -ETIME;
}
-static int pci20006_insn_write(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static void ii20k_ai_setup(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- union pci20xxx_subdev_private *sdp = s->private;
- int hi, lo;
- unsigned int boarddata;
-
- sdp->pci20006.last_data[CR_CHAN(insn->chanspec)] = data[0];
- boarddata = (((unsigned int)data[0] + 0x8000) & 0xffff);
- /* comedi-data -> board-data */
- lo = (boarddata & 0xff);
- hi = ((boarddata >> 8) & 0xff);
-
- switch (CR_CHAN(insn->chanspec)) {
- case 0:
- writeb(lo, sdp->iobase + PCI20006_LCHAN0);
- writeb(hi, sdp->iobase + PCI20006_LCHAN0 + 1);
- writeb(0x00, sdp->iobase + PCI20006_STROBE0);
- break;
- case 1:
- writeb(lo, sdp->iobase + PCI20006_LCHAN1);
- writeb(hi, sdp->iobase + PCI20006_LCHAN1 + 1);
- writeb(0x00, sdp->iobase + PCI20006_STROBE1);
- break;
- default:
- dev_warn(dev->class_dev, "ao channel Error!\n");
- return -EINVAL;
- }
-
- return 1;
-}
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned int chan = CR_CHAN(chanspec);
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned char val;
-/* PCI20341M */
+ /* initialize module */
+ writeb(II20K_AI_CONF_ENA, iobase + II20K_AI_CONF_REG);
-static int pci20341_insn_read(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
+ /* software conversion */
+ writeb(0, iobase + II20K_AI_STATUS_CMD_REG);
-static const int pci20341_timebase[] = { 0x00, 0x00, 0x00, 0x04 };
-static const int pci20341_settling_time[] = { 0x58, 0x58, 0x93, 0x99 };
+ /* set the time base for the settling time counter based on the gain */
+ val = (range < 3) ? II20K_AI_OPT_TIMEBASE(0) : II20K_AI_OPT_TIMEBASE(2);
+ writeb(val, iobase + II20K_AI_OPT_REG);
-static const struct comedi_lrange range_bipolar0_5 = {
- 1,
- {BIP_RANGE(0.5)}
-};
+ /* set the settling time counter based on the gain */
+ val = (range < 2) ? 0x58 : (range < 3) ? 0x93 : 0x99;
+ writeb(val, iobase + II20K_AI_SET_TIME_REG);
-static const struct comedi_lrange range_bipolar0_05 = {
- 1,
- {BIP_RANGE(0.05)}
-};
+ /* set number of input channels */
+ writeb(1, iobase + II20K_AI_LAST_CHAN_ADDR_REG);
-static const struct comedi_lrange range_bipolar0_025 = {
- 1,
- {BIP_RANGE(0.025)}
-};
+ /* set the channel list byte */
+ val = II20K_AI_CHANLIST_ONBOARD_ONLY |
+ II20K_AI_CHANLIST_MUX_ENA |
+ II20K_AI_CHANLIST_GAIN(range) |
+ II20K_AI_CHANLIST_CHAN(chan);
+ writeb(val, iobase + II20K_AI_CHANLIST_REG);
-static const struct comedi_lrange *const pci20341_ranges[] = {
- &range_bipolar5,
- &range_bipolar0_5,
- &range_bipolar0_05,
- &range_bipolar0_025,
-};
+ /* reset settling time counter and trigger delay counter */
+ writeb(0, iobase + II20K_AI_COUNT_RESET_REG);
-static int pci20341_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int opt0, int opt1)
-{
- union pci20xxx_subdev_private *sdp = s->private;
- int option;
-
- /* options handling */
- if (opt0 < 0 || opt0 > 3)
- opt0 = 0;
- sdp->pci20341.timebase = pci20341_timebase[opt0];
- sdp->pci20341.settling_time = pci20341_settling_time[opt0];
-
- /* ai subdevice */
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = PCI20341_CHAN_NR;
- s->len_chanlist = PCI20341_SCANLIST;
- s->insn_read = pci20341_insn_read;
- s->maxdata = 0xffff;
- s->range_table = pci20341_ranges[opt0];
-
- /* depends on gain, trigger, repetition mode */
- option = sdp->pci20341.timebase | PCI20341_REPMODE;
-
- /* initialize Module */
- writeb(PCI20341_INIT, sdp->iobase + PCI20341_CONFIG_REG);
- /* set Pacer */
- writeb(PCI20341_PACER, sdp->iobase + PCI20341_MOD_STATUS);
- /* option register */
- writeb(option, sdp->iobase + PCI20341_OPT_REG);
- /* settling time counter */
- writeb(sdp->pci20341.settling_time,
- sdp->iobase + PCI20341_SET_TIME_REG);
- /* trigger not implemented */
- return 0;
+ /* reset channel scanner */
+ writeb(0, iobase + II20K_AI_CHAN_RESET_REG);
}
-static int pci20341_insn_read(struct comedi_device *dev,
+static int ii20k_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- union pci20xxx_subdev_private *sdp = s->private;
- unsigned int i = 0, j = 0;
- int lo, hi;
- unsigned char eoc; /* end of conversion */
- unsigned int clb; /* channel list byte */
- unsigned int boarddata;
-
- /* write number of input channels */
- writeb(1, sdp->iobase + PCI20341_LCHAN_ADDR_REG);
- clb = PCI20341_DAISY_CHAIN | PCI20341_MUX | (sdp->pci20341.ai_gain << 3)
- | CR_CHAN(insn->chanspec);
- writeb(clb, sdp->iobase + PCI20341_CHAN_LIST);
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ int ret;
+ int i;
- /* reset settling time counter and trigger delay counter */
- writeb(0x00, sdp->iobase + PCI20341_CC_RESET);
+ ii20k_ai_setup(dev, s, insn->chanspec);
- writeb(0x00, sdp->iobase + PCI20341_CHAN_RESET);
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val;
- /* generate Pacer */
+ /* generate a software start convert signal */
+ readb(iobase + II20K_AI_PACER_RESET_REG);
- for (i = 0; i < insn->n; i++) {
- /* data polling isn't the niciest way to get the data, I know,
- * but there are only 6 cycles (mean) and it is easier than
- * the whole interrupt stuff
- */
- j = 0;
- /* generate Pacer */
- readb(sdp->iobase + PCI20341_SOFT_PACER);
-
- eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
- /* poll Interrupt Flag */
- while ((eoc < 0x80) && j < 100) {
- j++;
- eoc = readb(sdp->iobase + PCI20341_STATUS_REG);
- }
- if (j >= 100) {
- dev_warn(dev->class_dev,
- "AI interrupt channel %i polling exit !\n", i);
- return -EINVAL;
- }
- lo = readb(sdp->iobase + PCI20341_LDATA);
- hi = readb(sdp->iobase + PCI20341_LDATA + 1);
- boarddata = lo + 0x100 * hi;
-
- /* board-data -> comedi-data */
- data[i] = (short)((boarddata + 0x8000) & 0xffff);
- }
+ ret = ii20k_ai_wait_eoc(dev, s, 100);
+ if (ret)
+ return ret;
- return i;
-}
+ val = readb(iobase + II20K_AI_LSB_REG);
+ val |= (readb(iobase + II20K_AI_MSB_REG) << 8);
+
+ /* munge two's complement data */
+ val += ((s->maxdata + 1) >> 1);
+ val &= s->maxdata;
-/* native DIO */
+ data[i] = val;
+ }
-static void pci20xxx_dio_config(struct comedi_device *dev,
- struct comedi_subdevice *s);
-static int pci20xxx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data);
-static int pci20xxx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data);
+ return insn->n;
+}
-/* initialize struct pci20xxx_private */
-static int pci20xxx_dio_init(struct comedi_device *dev,
+static void ii20k_dio_config(struct comedi_device *dev,
struct comedi_subdevice *s)
{
+ struct ii20k_private *devpriv = dev->private;
+ unsigned char ctrl01 = 0;
+ unsigned char ctrl23 = 0;
+ unsigned char dir_ena = 0;
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 32;
- s->insn_bits = pci20xxx_dio_insn_bits;
- s->insn_config = pci20xxx_dio_insn_config;
- s->maxdata = 1;
- s->len_chanlist = 32;
- s->range_table = &range_digital;
- s->io_bits = 0;
+ /* port 0 - channels 0-7 */
+ if (s->io_bits & 0x000000ff) {
+ /* output port */
+ ctrl01 &= ~II20K_CTRL01_DIO0_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO0;
+ dir_ena |= II20K_DIR_DIO0_OUT;
+ } else {
+ /* input port */
+ ctrl01 |= II20K_CTRL01_DIO0_IN;
+ dir_ena &= ~II20K_DIR_DIO0_OUT;
+ }
- /* digital I/O lines default to input on board reset. */
- pci20xxx_dio_config(dev, s);
+ /* port 1 - channels 8-15 */
+ if (s->io_bits & 0x0000ff00) {
+ /* output port */
+ ctrl01 &= ~II20K_CTRL01_DIO1_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO1;
+ dir_ena |= II20K_DIR_DIO1_OUT;
+ } else {
+ /* input port */
+ ctrl01 |= II20K_CTRL01_DIO1_IN;
+ dir_ena &= ~II20K_DIR_DIO1_OUT;
+ }
- return 0;
+ /* port 2 - channels 16-23 */
+ if (s->io_bits & 0x00ff0000) {
+ /* output port */
+ ctrl23 &= ~II20K_CTRL23_DIO2_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO2;
+ dir_ena |= II20K_DIR_DIO2_OUT;
+ } else {
+ /* input port */
+ ctrl23 |= II20K_CTRL23_DIO2_IN;
+ dir_ena &= ~II20K_DIR_DIO2_OUT;
+ }
+
+ /* port 3 - channels 24-31 */
+ if (s->io_bits & 0xff000000) {
+ /* output port */
+ ctrl23 &= ~II20K_CTRL23_DIO3_IN;
+ dir_ena &= ~II20K_BUF_DISAB_DIO3;
+ dir_ena |= II20K_DIR_DIO3_OUT;
+ } else {
+ /* input port */
+ ctrl23 |= II20K_CTRL23_DIO3_IN;
+ dir_ena &= ~II20K_DIR_DIO3_OUT;
+ }
+
+ ctrl23 |= II20K_CTRL01_SET;
+ ctrl23 |= II20K_CTRL23_SET;
+
+ /* order is important */
+ writeb(ctrl01, devpriv->ioaddr + II20K_CTRL01_REG);
+ writeb(ctrl23, devpriv->ioaddr + II20K_CTRL23_REG);
+ writeb(dir_ena, devpriv->ioaddr + II20K_DIR_ENA_REG);
}
-static int pci20xxx_dio_insn_config(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int ii20k_dio_insn_config(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int mask, bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x000000ff)
- bits = 0x000000ff;
- else if (mask & 0x0000ff00)
- bits = 0x0000ff00;
- else if (mask & 0x00ff0000)
- bits = 0x00ff0000;
- else
- bits = 0xff000000;
- if (data[0])
- s->io_bits |= bits;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
else
- s->io_bits &= ~bits;
- pci20xxx_dio_config(dev, s);
+ mask = 0xff000000;
- return 1;
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ ii20k_dio_config(dev, s);
+
+ return insn->n;
}
-static int pci20xxx_dio_insn_bits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int ii20k_dio_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned int mask = data[0];
-
- s->state &= ~mask;
- s->state |= (mask & data[1]);
-
- mask &= s->io_bits;
- if (mask & 0x000000ff)
- writeb((s->state >> 0) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_0);
- if (mask & 0x0000ff00)
- writeb((s->state >> 8) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_1);
- if (mask & 0x00ff0000)
- writeb((s->state >> 16) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_2);
- if (mask & 0xff000000)
- writeb((s->state >> 24) & 0xff,
- devpriv->ioaddr + PCI20000_DIO_3);
-
- data[1] = readb(devpriv->ioaddr + PCI20000_DIO_0);
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_1) << 8;
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_2) << 16;
- data[1] |= readb(devpriv->ioaddr + PCI20000_DIO_3) << 24;
+ struct ii20k_private *devpriv = dev->private;
+ unsigned int mask = data[0] & s->io_bits; /* outputs only */
+ unsigned int bits = data[1];
+
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+
+ if (mask & 0x000000ff)
+ writeb((s->state >> 0) & 0xff,
+ devpriv->ioaddr + II20K_DIO0_REG);
+ if (mask & 0x0000ff00)
+ writeb((s->state >> 8) & 0xff,
+ devpriv->ioaddr + II20K_DIO1_REG);
+ if (mask & 0x00ff0000)
+ writeb((s->state >> 16) & 0xff,
+ devpriv->ioaddr + II20K_DIO2_REG);
+ if (mask & 0xff000000)
+ writeb((s->state >> 24) & 0xff,
+ devpriv->ioaddr + II20K_DIO3_REG);
+ }
+
+ data[1] = readb(devpriv->ioaddr + II20K_DIO0_REG);
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO1_REG) << 8;
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO2_REG) << 16;
+ data[1] |= readb(devpriv->ioaddr + II20K_DIO3_REG) << 24;
return insn->n;
}
-static void pci20xxx_dio_config(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int ii20k_init_module(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+ struct ii20k_ao_private *ao_spriv;
+ void __iomem *iobase = ii20k_module_iobase(dev, s);
+ unsigned char id;
+
+ id = readb(iobase + II20K_ID_REG);
+ switch (id) {
+ case II20K_ID_PCI20006M_1:
+ case II20K_ID_PCI20006M_2:
+ ao_spriv = comedi_alloc_spriv(s, sizeof(*ao_spriv));
+ if (!ao_spriv)
+ return -ENOMEM;
+
+ /* Analog Output subdevice */
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (id == II20K_ID_PCI20006M_2) ? 2 : 1;
+ s->maxdata = 0xffff;
+ s->range_table = &ii20k_ao_ranges;
+ s->insn_read = ii20k_ao_insn_read;
+ s->insn_write = ii20k_ao_insn_write;
+ break;
+ case II20K_ID_PCI20341M_1:
+ /* Analog Input subdevice */
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF;
+ s->n_chan = 4;
+ s->maxdata = 0xffff;
+ s->range_table = &ii20k_ai_ranges;
+ s->insn_read = ii20k_ai_insn_read;
+ break;
+ default:
+ s->type = COMEDI_SUBD_UNUSED;
+ break;
+ }
+
+ return 0;
+}
+
+static int ii20k_attach(struct comedi_device *dev,
+ struct comedi_devconfig *it)
{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned char control_01;
- unsigned char control_23;
- unsigned char buffer;
+ struct ii20k_private *devpriv;
+ struct comedi_subdevice *s;
+ unsigned char id;
+ bool has_dio;
+ int ret;
- control_01 = readb(devpriv->ioaddr + PCI20000_DIO_CONTROL_01);
- control_23 = readb(devpriv->ioaddr + PCI20000_DIO_CONTROL_23);
- buffer = readb(devpriv->ioaddr + PCI20000_DIO_BUFFER);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
- if (s->io_bits & 0x000000ff) {
- /* output port 0 */
- control_01 &= PCI20000_DIO_EOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_0))) | (DIO_BO <<
- DIO_PS_0);
- } else {
- /* input port 0 */
- control_01 = (control_01 & DIO_CAND) | PCI20000_DIO_EIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_0)));
+ devpriv->ioaddr = (void __iomem *)(unsigned long)it->options[0];
+
+ id = readb(devpriv->ioaddr + II20K_ID_REG);
+ switch (id & II20K_ID_MASK) {
+ case II20K_ID_PCI20001C_1A:
+ break;
+ case II20K_ID_PCI20001C_2A:
+ has_dio = true;
+ break;
+ default:
+ return -ENODEV;
}
- if (s->io_bits & 0x0000ff00) {
- /* output port 1 */
- control_01 &= PCI20000_DIO_OOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_1))) | (DIO_BO <<
- DIO_PS_1);
+
+ ret = comedi_alloc_subdevices(dev, 4);
+ if (ret)
+ return ret;
+
+ s = &dev->subdevices[0];
+ if (id & II20K_ID_MOD1_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 1 */
- control_01 = (control_01 & DIO_CAND) | PCI20000_DIO_OIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_1)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- if (s->io_bits & 0x00ff0000) {
- /* output port 2 */
- control_23 &= PCI20000_DIO_EOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_2))) | (DIO_BO <<
- DIO_PS_2);
+
+ s = &dev->subdevices[1];
+ if (id & II20K_ID_MOD2_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 2 */
- control_23 = (control_23 & DIO_CAND) | PCI20000_DIO_EIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_2)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- if (s->io_bits & 0xff000000) {
- /* output port 3 */
- control_23 &= PCI20000_DIO_OOC;
- buffer = (buffer & (~(DIO_BE << DIO_PS_3))) | (DIO_BO <<
- DIO_PS_3);
+
+ s = &dev->subdevices[2];
+ if (id & II20K_ID_MOD3_EMPTY) {
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- /* input port 3 */
- control_23 = (control_23 & DIO_CAND) | PCI20000_DIO_OIC;
- buffer = (buffer & (~(DIO_BI << DIO_PS_3)));
+ ret = ii20k_init_module(dev, s);
+ if (ret)
+ return ret;
}
- writeb(control_01, devpriv->ioaddr + PCI20000_DIO_CONTROL_01);
- writeb(control_23, devpriv->ioaddr + PCI20000_DIO_CONTROL_23);
- writeb(buffer, devpriv->ioaddr + PCI20000_DIO_BUFFER);
-}
-#if 0
-static void pci20xxx_do(struct comedi_device *dev, struct comedi_subdevice *s)
-{
- struct pci20xxx_private *devpriv = dev->private;
-
- /* XXX if the channel is configured for input, does this
- do bad things? */
- /* XXX it would be a good idea to only update the registers
- that _need_ to be updated. This requires changes to
- comedi, however. */
- writeb((s->state >> 0) & 0xff, devpriv->ioaddr + PCI20000_DIO_0);
- writeb((s->state >> 8) & 0xff, devpriv->ioaddr + PCI20000_DIO_1);
- writeb((s->state >> 16) & 0xff, devpriv->ioaddr + PCI20000_DIO_2);
- writeb((s->state >> 24) & 0xff, devpriv->ioaddr + PCI20000_DIO_3);
-}
-
-static unsigned int pci20xxx_di(struct comedi_device *dev,
- struct comedi_subdevice *s)
-{
- struct pci20xxx_private *devpriv = dev->private;
- unsigned int bits;
-
- /* XXX same note as above */
- bits = readb(devpriv->ioaddr + PCI20000_DIO_0);
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_1) << 8;
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_2) << 16;
- bits |= readb(devpriv->ioaddr + PCI20000_DIO_3) << 24;
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[3];
+ if (has_dio) {
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 32;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ii20k_dio_insn_bits;
+ s->insn_config = ii20k_dio_insn_config;
+
+ /* default all channels to input */
+ ii20k_dio_config(dev, s);
+ } else {
+ s->type = COMEDI_SUBD_UNUSED;
+ }
- return bits;
+ return 0;
}
-#endif
-static struct comedi_driver pci20xxx_driver = {
+static struct comedi_driver ii20k_driver = {
.driver_name = "ii_pci20kc",
.module = THIS_MODULE,
- .attach = pci20xxx_attach,
- .detach = pci20xxx_detach,
+ .attach = ii20k_attach,
+ .detach = comedi_legacy_detach,
};
-module_comedi_driver(pci20xxx_driver);
+module_comedi_driver(ii20k_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 94609f4aa4c..b52d58e5de2 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -38,6 +38,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
@@ -638,10 +639,9 @@ static int jr3_pci_auto_attach(struct comedi_device *dev,
return -EINVAL;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
init_timer(&devpriv->timer);
switch (pcidev->device) {
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index f10cf10e5fe..15589f62a61 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -29,6 +29,7 @@ This driver is a simple driver to read the counter values from
Kolter Electronic PCI Counter Card.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/me4000.c b/drivers/staging/comedi/drivers/me4000.c
index c2308fd24d6..8f4afadab76 100644
--- a/drivers/staging/comedi/drivers/me4000.c
+++ b/drivers/staging/comedi/drivers/me4000.c
@@ -40,6 +40,7 @@ broken.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1357,98 +1358,57 @@ static int me4000_dio_insn_bits(struct comedi_device *dev,
static int me4000_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned long tmp;
- int chan = CR_CHAN(insn->chanspec);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ unsigned int tmp;
+ int ret;
- switch (data[0]) {
- default:
- return -EINVAL;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- case INSN_CONFIG_DIO_INPUT:
- case INSN_CONFIG_DIO_OUTPUT:
- break;
- }
+ if (chan < 8)
+ mask = 0x000000ff;
+ else if (chan < 16)
+ mask = 0x0000ff00;
+ else if (chan < 24)
+ mask = 0x00ff0000;
+ else
+ mask = 0xff000000;
- /*
- * The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT.
- * On the ME-4000 it is only possible to switch port wise (8 bit)
- */
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
tmp = inl(dev->iobase + ME4000_DIO_CTRL_REG);
+ tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1 |
+ ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3 |
+ ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5 |
+ ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7);
+ if (s->io_bits & 0x000000ff)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+ if (s->io_bits & 0x0000ff00)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
+ if (s->io_bits & 0x00ff0000)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
+ if (s->io_bits & 0xff000000)
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
- if (data[0] == INSN_CONFIG_DIO_OUTPUT) {
- if (chan < 8) {
- s->io_bits |= 0xFF;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
- ME4000_DIO_CTRL_BIT_MODE_1);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
- } else if (chan < 16) {
- /*
- * Chech for optoisolated ME-4000 version.
- * If one the first port is a fixed output
- * port and the second is a fixed input port.
- */
- if (!inl(dev->iobase + ME4000_DIO_DIR_REG))
- return -ENODEV;
-
- s->io_bits |= 0xFF00;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
- ME4000_DIO_CTRL_BIT_MODE_3);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_2;
- } else if (chan < 24) {
- s->io_bits |= 0xFF0000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 |
- ME4000_DIO_CTRL_BIT_MODE_5);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_4;
- } else if (chan < 32) {
- s->io_bits |= 0xFF000000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 |
- ME4000_DIO_CTRL_BIT_MODE_7);
- tmp |= ME4000_DIO_CTRL_BIT_MODE_6;
- } else {
- return -EINVAL;
- }
- } else {
- if (chan < 8) {
- /*
- * Chech for optoisolated ME-4000 version.
- * If one the first port is a fixed output
- * port and the second is a fixed input port.
- */
- if (!inl(dev->iobase + ME4000_DIO_DIR_REG))
- return -ENODEV;
-
- s->io_bits &= ~0xFF;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 |
- ME4000_DIO_CTRL_BIT_MODE_1);
- } else if (chan < 16) {
- s->io_bits &= ~0xFF00;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
- ME4000_DIO_CTRL_BIT_MODE_3);
- } else if (chan < 24) {
- s->io_bits &= ~0xFF0000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 |
- ME4000_DIO_CTRL_BIT_MODE_5);
- } else if (chan < 32) {
- s->io_bits &= ~0xFF000000;
- tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 |
- ME4000_DIO_CTRL_BIT_MODE_7);
- } else {
- return -EINVAL;
- }
+ /*
+ * Check for optoisolated ME-4000 version.
+ * If one the first port is a fixed output
+ * port and the second is a fixed input port.
+ */
+ if (inl(dev->iobase + ME4000_DIO_DIR_REG)) {
+ s->io_bits |= 0x000000ff;
+ s->io_bits &= ~0x0000ff00;
+ tmp |= ME4000_DIO_CTRL_BIT_MODE_0;
+ tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 |
+ ME4000_DIO_CTRL_BIT_MODE_3);
}
outl(tmp, dev->iobase + ME4000_DIO_CTRL_REG);
- return 1;
+ return insn->n;
}
/*=============================================================================
@@ -1544,10 +1504,9 @@ static int me4000_auto_attach(struct comedi_device *dev,
dev->board_ptr = thisboard;
dev->board_name = thisboard->name;
- info = kzalloc(sizeof(*info), GFP_KERNEL);
+ info = comedi_alloc_devpriv(dev, sizeof(*info));
if (!info)
return -ENOMEM;
- dev->private = info;
result = comedi_pci_enable(dev);
if (result)
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index 7533ece3670..a6f6d4a4658 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -30,6 +30,7 @@
* Analog Input, Analog Output, Digital I/O
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -185,38 +186,30 @@ static int me_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- struct me_private_data *dev_private = dev->private;
- unsigned int mask = 1 << CR_CHAN(insn->chanspec);
- unsigned int bits;
- unsigned int port;
+ struct me_private_data *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
- if (mask & 0x0000ffff) {
- bits = 0x0000ffff;
- port = ENABLE_PORT_A;
- } else {
- bits = 0xffff0000;
- port = ENABLE_PORT_B;
- }
+ if (chan < 16)
+ mask = 0x0000ffff;
+ else
+ mask = 0xffff0000;
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- dev_private->control_2 &= ~port;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- dev_private->control_2 |= port;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- /* Update the port configuration */
- writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2);
+ if (s->io_bits & 0x0000ffff)
+ devpriv->control_2 |= ENABLE_PORT_A;
+ else
+ devpriv->control_2 &= ~ENABLE_PORT_A;
+ if (s->io_bits & 0xffff0000)
+ devpriv->control_2 |= ENABLE_PORT_B;
+ else
+ devpriv->control_2 &= ~ENABLE_PORT_B;
+
+ writew(devpriv->control_2, devpriv->me_regbase + ME_CONTROL_2);
return insn->n;
}
@@ -490,10 +483,9 @@ static int me_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- dev_private = kzalloc(sizeof(*dev_private), GFP_KERNEL);
+ dev_private = comedi_alloc_devpriv(dev, sizeof(*dev_private));
if (!dev_private)
return -ENOMEM;
- dev->private = dev_private;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 12c34db61d6..35cb4ace797 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -46,6 +46,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index d4487e888e6..8423b8bf338 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -21,6 +21,7 @@
#include <linux/pci.h>
#include <linux/log2.h>
+#include <linux/slab.h>
#include "../comedidev.h"
/* #define DEBUG_MITE */
diff --git a/drivers/staging/comedi/drivers/mpc624.c b/drivers/staging/comedi/drivers/mpc624.c
index 713842ad6ff..acbaeee6250 100644
--- a/drivers/staging/comedi/drivers/mpc624.c
+++ b/drivers/staging/comedi/drivers/mpc624.c
@@ -51,9 +51,9 @@ Configuration Options:
1 -10.1V .. +10.1V
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
/* Consecutive I/O port addresses */
@@ -286,10 +286,9 @@ static int mpc624_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
switch (it->options[1]) {
case 0:
diff --git a/drivers/staging/comedi/drivers/multiq3.c b/drivers/staging/comedi/drivers/multiq3.c
index 5ecd1b1666f..9d75ea4e201 100644
--- a/drivers/staging/comedi/drivers/multiq3.c
+++ b/drivers/staging/comedi/drivers/multiq3.c
@@ -24,11 +24,10 @@ Devices: [Quanser Consulting] MultiQ-3 (multiq3)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define MULTIQ3_SIZE 16
/*
@@ -232,10 +231,9 @@ static int multiq3_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* ai subdevice */
diff --git a/drivers/staging/comedi/drivers/ni_6527.c b/drivers/staging/comedi/drivers/ni_6527.c
index 903c2ef5dd9..c2745f201f2 100644
--- a/drivers/staging/comedi/drivers/ni_6527.c
+++ b/drivers/staging/comedi/drivers/ni_6527.c
@@ -36,6 +36,7 @@ Updated: Sat, 25 Jan 2003 13:24:40 -0800
#define DEBUG 1
#define DEBUG_FLAGS
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -335,10 +336,9 @@ static int ni6527_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c
index 42a78de4731..3ba4c5712df 100644
--- a/drivers/staging/comedi/drivers/ni_65xx.c
+++ b/drivers/staging/comedi/drivers/ni_65xx.c
@@ -46,9 +46,9 @@ except maybe the 6514.
#define DEBUG 1
#define DEBUG_FLAGS
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -591,10 +591,9 @@ static int ni_65xx_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index a9e000461ec..3607336dafe 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -34,6 +34,7 @@
* DAQ 6601/6602 User Manual (NI 322137B-01)
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
@@ -929,10 +930,9 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
struct ni_660x_private *devpriv;
unsigned i;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
diff --git a/drivers/staging/comedi/drivers/ni_670x.c b/drivers/staging/comedi/drivers/ni_670x.c
index 1a185b9c529..e2926ce3fb2 100644
--- a/drivers/staging/comedi/drivers/ni_670x.c
+++ b/drivers/staging/comedi/drivers/ni_670x.c
@@ -36,9 +36,9 @@ Commands are not supported.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -158,27 +158,16 @@ static int ni_670x_dio_insn_bits(struct comedi_device *dev,
static int ni_670x_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_670x_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
writel(s->io_bits, devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET);
return insn->n;
@@ -205,10 +194,9 @@ static int ni_670x_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 7ea5aa32e9d..2512ce8dfca 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -58,12 +58,14 @@ TRIG_WAKE_EOS
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/io.h>
+
#include <asm/dma.h>
#include "8253.h"
@@ -719,10 +721,9 @@ static int a2150_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int i;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], A2150_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_at_ao.c b/drivers/staging/comedi/drivers/ni_at_ao.c
index e080053c697..b9122fd835e 100644
--- a/drivers/staging/comedi/drivers/ni_at_ao.c
+++ b/drivers/staging/comedi/drivers/ni_at_ao.c
@@ -36,10 +36,9 @@ Configuration options:
* document 320379.pdf.
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
/* board egisters */
/* registers with _2_ are accessed when GRP2WR is set in CFG1 */
@@ -249,42 +248,35 @@ static int atao_dio_insn_bits(struct comedi_device *dev,
static int atao_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct atao_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
- unsigned int mask, bit;
-
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
-
- mask = (chan < 4) ? 0x0f : 0xf0;
- bit = (chan < 4) ? DOUTEN1 : DOUTEN2;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- devpriv->cfg3 |= bit;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- devpriv->cfg3 &= ~bit;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ if (s->io_bits & 0x0f)
+ devpriv->cfg3 |= DOUTEN1;
+ else
+ devpriv->cfg3 &= ~DOUTEN1;
+ if (s->io_bits & 0xf0)
+ devpriv->cfg3 |= DOUTEN2;
+ else
+ devpriv->cfg3 &= ~DOUTEN2;
outw(devpriv->cfg3, dev->iobase + ATAO_CFG3);
- return 1;
+ return insn->n;
}
/*
@@ -341,10 +333,9 @@ static int atao_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/staging/comedi/drivers/ni_atmio.c b/drivers/staging/comedi/drivers/ni_atmio.c
index 713edd55a91..856c73d8b7c 100644
--- a/drivers/staging/comedi/drivers/ni_atmio.c
+++ b/drivers/staging/comedi/drivers/ni_atmio.c
@@ -89,10 +89,10 @@ are not supported.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/delay.h>
#include <linux/isapnp.h>
#include "ni_stc.h"
diff --git a/drivers/staging/comedi/drivers/ni_atmio16d.c b/drivers/staging/comedi/drivers/ni_atmio16d.c
index da7396f9429..bb3491f5ad2 100644
--- a/drivers/staging/comedi/drivers/ni_atmio16d.c
+++ b/drivers/staging/comedi/drivers/ni_atmio16d.c
@@ -30,11 +30,10 @@ Devices: [National Instruments] AT-MIO-16 (atmio16), AT-MIO-16D (atmio16d)
*
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#include "comedi_fc.h"
#include "8255.h"
@@ -577,15 +576,19 @@ static int atmio16d_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct atmio16d_private *devpriv = dev->private;
- int i;
- int mask;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
- for (i = 0; i < insn->n; i++) {
- mask = (CR_CHAN(insn->chanspec) < 4) ? 0x0f : 0xf0;
- s->io_bits &= ~mask;
- if (data[i])
- s->io_bits |= mask;
- }
devpriv->com_reg_2_state &= ~(COMREG2_DOUTEN0 | COMREG2_DOUTEN1);
if (s->io_bits & 0x0f)
devpriv->com_reg_2_state |= COMREG2_DOUTEN0;
@@ -593,7 +596,7 @@ static int atmio16d_dio_insn_config(struct comedi_device *dev,
devpriv->com_reg_2_state |= COMREG2_DOUTEN1;
outw(devpriv->com_reg_2_state, dev->iobase + COM_REG_2);
- return i;
+ return insn->n;
}
/*
@@ -645,10 +648,9 @@ static int atmio16d_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* reset the atmio16d hardware */
reset_atmio16d(dev);
diff --git a/drivers/staging/comedi/drivers/ni_daq_700.c b/drivers/staging/comedi/drivers/ni_daq_700.c
index 3c50e31ecc6..404f83de276 100644
--- a/drivers/staging/comedi/drivers/ni_daq_700.c
+++ b/drivers/staging/comedi/drivers/ni_daq_700.c
@@ -45,9 +45,9 @@ Manuals: Register level: http://www.ni.com/pdf/manuals/340698.pdf
User Manual: http://www.ni.com/pdf/manuals/320676d.pdf
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include "../comedidev.h"
@@ -90,21 +90,17 @@ static int daq700_dio_insn_bits(struct comedi_device *dev,
static int daq700_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int chan = 1 << CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & chan) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- }
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
+
+ /* The DIO channels are not configurable, fix the io_bits */
+ s->io_bits = 0x00ff;
return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index d3d4eb9356a..335ea34fa57 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -31,6 +31,7 @@ This is just a wrapper around the 8255.o driver to properly handle
the PCMCIA interface.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <pcmcia/cistpl.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index f161e70b3a0..1add114dc0b 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -57,6 +57,7 @@
* 320502b (lab-pc+)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/io.h>
@@ -64,79 +65,12 @@
#include "../comedidev.h"
-#include <asm/dma.h>
-
#include "8253.h"
#include "8255.h"
#include "comedi_fc.h"
#include "ni_labpc.h"
-
-/*
- * Register map (all registers are 8-bit)
- */
-#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
-#define CMD1_REG 0x00 /* W: Command 1 reg */
-#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
-#define CMD1_GAIN(x) (((x) & 0x7) << 4)
-#define CMD1_SCANEN (1 << 7)
-#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
-#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
-#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
-#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
-#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
-#define ADC_FIFO_CLEAR_REG 0x08 /* W: A/D FIFO Clear reg */
-#define ADC_FIFO_REG 0x0a /* R: A/D FIFO reg */
-#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
-#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
-#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
-#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
-#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
-#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
-#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
-#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
-#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
-#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
-#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
+#include "ni_labpc_regs.h"
+#include "ni_labpc_isadma.h"
#define LABPC_SIZE 0x20 /* size of ISA io region */
#define LABPC_TIMER_BASE 500 /* 2 MHz master clock */
@@ -239,11 +173,6 @@ static const struct labpc_boardinfo labpc_boards[] = {
};
#endif
-/* size in bytes of dma buffer */
-static const int dma_buffer_size = 0xff00;
-/* 2 bytes per sample */
-static const int sample_size = 2;
-
static int labpc_counter_load(struct comedi_device *dev,
unsigned long base_address,
unsigned int counter_number,
@@ -451,32 +380,6 @@ static int labpc_ai_insn_read(struct comedi_device *dev,
return insn->n;
}
-#ifdef CONFIG_ISA_DMA_API
-/* utility function that suggests a dma transfer size in bytes */
-static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
-{
- unsigned int size;
- unsigned int freq;
-
- if (cmd->convert_src == TRIG_TIMER)
- freq = 1000000000 / cmd->convert_arg;
- /* return some default value */
- else
- freq = 0xffffffff;
-
- /* make buffer fill in no more than 1/3 second */
- size = (freq / 3) * sample_size;
-
- /* set a minimum and maximum size allowed */
- if (size > dma_buffer_size)
- size = dma_buffer_size - dma_buffer_size % sample_size;
- else if (size < sample_size)
- size = sample_size;
-
- return size;
-}
-#endif
-
static bool labpc_use_continuous_mode(const struct comedi_cmd *cmd,
enum scan_mode mode)
{
@@ -869,25 +772,20 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return ret;
}
-#ifdef CONFIG_ISA_DMA_API
- /* figure out what method we will use to transfer data */
- if (devpriv->dma_chan && /* need a dma channel allocated */
- /*
- * dma unsafe at RT priority,
- * and too much setup time for TRIG_WAKE_EOS for
- */
- (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0) {
+ /* figure out what method we will use to transfer data */
+ if (labpc_have_dma_chan(dev) &&
+ /* dma unsafe at RT priority,
+ * and too much setup time for TRIG_WAKE_EOS */
+ (cmd->flags & (TRIG_WAKE_EOS | TRIG_RT)) == 0)
xfer = isa_dma_transfer;
- /* pc-plus has no fifo-half full interrupt */
- } else
-#endif
- if (board->is_labpc1200 &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & TRIG_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
+ else if (/* pc-plus has no fifo-half full interrupt */
+ board->is_labpc1200 &&
+ /* wake-end-of-scan should interrupt on fifo not empty */
+ (cmd->flags & TRIG_WAKE_EOS) == 0 &&
+ /* make sure we are taking more than just a few points */
+ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
xfer = fifo_half_full_transfer;
- } else
+ else
xfer = fifo_not_empty_transfer;
devpriv->current_transfer = xfer;
@@ -952,40 +850,14 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
labpc_clear_adc_fifo(dev);
-#ifdef CONFIG_ISA_DMA_API
- /* set up dma transfer */
- if (xfer == isa_dma_transfer) {
- unsigned long irq_flags;
-
- irq_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
- /* set appropriate size of transfer */
- devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
- if (cmd->stop_src == TRIG_COUNT &&
- devpriv->count * sample_size < devpriv->dma_transfer_size) {
- devpriv->dma_transfer_size =
- devpriv->count * sample_size;
- }
- set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
- enable_dma(devpriv->dma_chan);
- release_dma_lock(irq_flags);
- /* enable board's dma */
- devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
- } else
- devpriv->cmd3 &= ~(CMD3_DMAEN | CMD3_DMATCINTEN);
-#endif
+ if (xfer == isa_dma_transfer)
+ labpc_setup_dma(dev, s);
/* enable error interrupts */
devpriv->cmd3 |= CMD3_ERRINTEN;
/* enable fifo not empty interrupt? */
if (xfer == fifo_not_empty_transfer)
devpriv->cmd3 |= CMD3_FIFOINTEN;
- else
- devpriv->cmd3 &= ~CMD3_FIFOINTEN;
devpriv->write_byte(devpriv->cmd3, dev->iobase + CMD3_REG);
/* setup any external triggering/pacing (cmd4 register) */
@@ -1026,74 +898,6 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
return 0;
}
-#ifdef CONFIG_ISA_DMA_API
-static void labpc_drain_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->read_subdev;
- struct comedi_async *async = s->async;
- int status;
- unsigned long flags;
- unsigned int max_points, num_points, residue, leftover;
- int i;
-
- status = devpriv->stat1;
-
- flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- /* clear flip-flop to make sure 2-byte registers for
- * count and address get set correctly */
- clear_dma_ff(devpriv->dma_chan);
-
- /* figure out how many points to read */
- max_points = devpriv->dma_transfer_size / sample_size;
- /* residue is the number of points left to be done on the dma
- * transfer. It should always be zero at this point unless
- * the stop_src is set to external triggering.
- */
- residue = get_dma_residue(devpriv->dma_chan) / sample_size;
- num_points = max_points - residue;
- if (devpriv->count < num_points && async->cmd.stop_src == TRIG_COUNT)
- num_points = devpriv->count;
-
- /* figure out how many points will be stored next time */
- leftover = 0;
- if (async->cmd.stop_src != TRIG_COUNT) {
- leftover = devpriv->dma_transfer_size / sample_size;
- } else if (devpriv->count > num_points) {
- leftover = devpriv->count - num_points;
- if (leftover > max_points)
- leftover = max_points;
- }
-
- /* write data to comedi buffer */
- for (i = 0; i < num_points; i++)
- cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
-
- if (async->cmd.stop_src == TRIG_COUNT)
- devpriv->count -= num_points;
-
- /* set address and count for next transfer */
- set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
- set_dma_count(devpriv->dma_chan, leftover * sample_size);
- release_dma_lock(flags);
-
- async->events |= COMEDI_CB_BLOCK;
-}
-
-static void handle_isa_dma(struct comedi_device *dev)
-{
- struct labpc_private *devpriv = dev->private;
-
- labpc_drain_dma(dev);
-
- enable_dma(devpriv->dma_chan);
-
- /* clear dma tc interrupt */
- devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG);
-}
-#endif
-
/* read all available samples from ai fifo */
static int labpc_drain_fifo(struct comedi_device *dev)
{
@@ -1130,12 +934,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
* when acquisition is terminated by stop_src == TRIG_EXT). */
static void labpc_drain_dregs(struct comedi_device *dev)
{
-#ifdef CONFIG_ISA_DMA_API
struct labpc_private *devpriv = dev->private;
if (devpriv->current_transfer == isa_dma_transfer)
labpc_drain_dma(dev);
-#endif
labpc_drain_fifo(dev);
}
@@ -1180,18 +982,9 @@ static irqreturn_t labpc_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-#ifdef CONFIG_ISA_DMA_API
- if (devpriv->current_transfer == isa_dma_transfer) {
- /*
- * if a dma terminal count of external stop trigger
- * has occurred
- */
- if (devpriv->stat1 & STAT1_GATA0 ||
- (board->is_labpc1200 && devpriv->stat2 & STAT2_OUTA1)) {
- handle_isa_dma(dev);
- }
- } else
-#endif
+ if (devpriv->current_transfer == isa_dma_transfer)
+ labpc_handle_dma_status(dev);
+ else
labpc_drain_fifo(dev);
if (devpriv->stat1 & STAT1_CNTINT) {
@@ -1697,10 +1490,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned int dma_chan = it->options[2];
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_request_region(dev, it->options[0], LABPC_SIZE);
if (ret)
@@ -1710,29 +1502,8 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
-#ifdef CONFIG_ISA_DMA_API
- if (dev->irq && (dma_chan == 1 || dma_chan == 3)) {
- devpriv->dma_buffer = kmalloc(dma_buffer_size,
- GFP_KERNEL | GFP_DMA);
- if (devpriv->dma_buffer) {
- ret = request_dma(dma_chan, dev->board_name);
- if (ret == 0) {
- unsigned long dma_flags;
-
- devpriv->dma_chan = dma_chan;
- devpriv->dma_addr =
- virt_to_bus(devpriv->dma_buffer);
-
- dma_flags = claim_dma_lock();
- disable_dma(devpriv->dma_chan);
- set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
- release_dma_lock(dma_flags);
- } else {
- kfree(devpriv->dma_buffer);
- }
- }
- }
-#endif
+ if (dev->irq)
+ labpc_init_dma_chan(dev, dma_chan);
return 0;
}
@@ -1741,11 +1512,9 @@ static void labpc_detach(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
- if (devpriv) {
- kfree(devpriv->dma_buffer);
- if (devpriv->dma_chan)
- free_dma(devpriv->dma_chan);
- }
+ if (devpriv)
+ labpc_free_dma_chan(dev);
+
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index ce67f4bbb1f..0a8b3223f74 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -53,10 +53,10 @@ NI manuals:
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/slab.h>
#include "8253.h"
#include "8255.h"
@@ -96,10 +96,9 @@ static int labpc_auto_attach(struct comedi_device *dev,
if (!link->irq)
return -EINVAL;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
return labpc_common_attach(dev, link->irq, IRQF_SHARED);
}
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.c b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
new file mode 100644
index 00000000000..2149596830a
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.c
@@ -0,0 +1,226 @@
+/*
+ * comedi/drivers/ni_labpc_isadma.c
+ * ISA DMA support for National Instruments Lab-PC series boards and
+ * compatibles.
+ *
+ * Extracted from ni_labpc.c:
+ * Copyright (C) 2001-2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "../comedidev.h"
+
+#include <asm/dma.h>
+
+#include "comedi_fc.h"
+#include "ni_labpc.h"
+#include "ni_labpc_regs.h"
+#include "ni_labpc_isadma.h"
+
+/* size in bytes of dma buffer */
+static const int dma_buffer_size = 0xff00;
+/* 2 bytes per sample */
+static const int sample_size = 2;
+
+/* utility function that suggests a dma transfer size in bytes */
+static unsigned int labpc_suggest_transfer_size(const struct comedi_cmd *cmd)
+{
+ unsigned int size;
+ unsigned int freq;
+
+ if (cmd->convert_src == TRIG_TIMER)
+ freq = 1000000000 / cmd->convert_arg;
+ else
+ /* return some default value */
+ freq = 0xffffffff;
+
+ /* make buffer fill in no more than 1/3 second */
+ size = (freq / 3) * sample_size;
+
+ /* set a minimum and maximum size allowed */
+ if (size > dma_buffer_size)
+ size = dma_buffer_size - dma_buffer_size % sample_size;
+ else if (size < sample_size)
+ size = sample_size;
+
+ return size;
+}
+
+void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s)
+{
+ struct labpc_private *devpriv = dev->private;
+ struct comedi_cmd *cmd = &s->async->cmd;
+ unsigned long irq_flags;
+
+ irq_flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ /* clear flip-flop to make sure 2-byte registers for
+ * count and address get set correctly */
+ clear_dma_ff(devpriv->dma_chan);
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
+ /* set appropriate size of transfer */
+ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
+ if (cmd->stop_src == TRIG_COUNT &&
+ devpriv->count * sample_size < devpriv->dma_transfer_size)
+ devpriv->dma_transfer_size = devpriv->count * sample_size;
+ set_dma_count(devpriv->dma_chan, devpriv->dma_transfer_size);
+ enable_dma(devpriv->dma_chan);
+ release_dma_lock(irq_flags);
+ /* set CMD3 bits for caller to enable DMA and interrupt */
+ devpriv->cmd3 |= (CMD3_DMAEN | CMD3_DMATCINTEN);
+}
+EXPORT_SYMBOL_GPL(labpc_setup_dma);
+
+void labpc_drain_dma(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct comedi_async *async = s->async;
+ int status;
+ unsigned long flags;
+ unsigned int max_points, num_points, residue, leftover;
+ int i;
+
+ status = devpriv->stat1;
+
+ flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ /* clear flip-flop to make sure 2-byte registers for
+ * count and address get set correctly */
+ clear_dma_ff(devpriv->dma_chan);
+
+ /* figure out how many points to read */
+ max_points = devpriv->dma_transfer_size / sample_size;
+ /* residue is the number of points left to be done on the dma
+ * transfer. It should always be zero at this point unless
+ * the stop_src is set to external triggering.
+ */
+ residue = get_dma_residue(devpriv->dma_chan) / sample_size;
+ num_points = max_points - residue;
+ if (devpriv->count < num_points && async->cmd.stop_src == TRIG_COUNT)
+ num_points = devpriv->count;
+
+ /* figure out how many points will be stored next time */
+ leftover = 0;
+ if (async->cmd.stop_src != TRIG_COUNT) {
+ leftover = devpriv->dma_transfer_size / sample_size;
+ } else if (devpriv->count > num_points) {
+ leftover = devpriv->count - num_points;
+ if (leftover > max_points)
+ leftover = max_points;
+ }
+
+ /* write data to comedi buffer */
+ for (i = 0; i < num_points; i++)
+ cfc_write_to_buffer(s, devpriv->dma_buffer[i]);
+
+ if (async->cmd.stop_src == TRIG_COUNT)
+ devpriv->count -= num_points;
+
+ /* set address and count for next transfer */
+ set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
+ set_dma_count(devpriv->dma_chan, leftover * sample_size);
+ release_dma_lock(flags);
+
+ async->events |= COMEDI_CB_BLOCK;
+}
+EXPORT_SYMBOL_GPL(labpc_drain_dma);
+
+static void handle_isa_dma(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ labpc_drain_dma(dev);
+
+ enable_dma(devpriv->dma_chan);
+
+ /* clear dma tc interrupt */
+ devpriv->write_byte(0x1, dev->iobase + DMATC_CLEAR_REG);
+}
+
+void labpc_handle_dma_status(struct comedi_device *dev)
+{
+ const struct labpc_boardinfo *board = comedi_board(dev);
+ struct labpc_private *devpriv = dev->private;
+
+ /*
+ * if a dma terminal count of external stop trigger
+ * has occurred
+ */
+ if (devpriv->stat1 & STAT1_GATA0 ||
+ (board->is_labpc1200 && devpriv->stat2 & STAT2_OUTA1))
+ handle_isa_dma(dev);
+}
+EXPORT_SYMBOL_GPL(labpc_handle_dma_status);
+
+int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan)
+{
+ struct labpc_private *devpriv = dev->private;
+ void *dma_buffer;
+ unsigned long dma_flags;
+ int ret;
+
+ if (dma_chan != 1 && dma_chan != 3)
+ return -EINVAL;
+
+ dma_buffer = kmalloc(dma_buffer_size, GFP_KERNEL | GFP_DMA);
+ if (!dma_buffer)
+ return -ENOMEM;
+
+ ret = request_dma(dma_chan, dev->board_name);
+ if (ret) {
+ kfree(dma_buffer);
+ return ret;
+ }
+
+ devpriv->dma_buffer = dma_buffer;
+ devpriv->dma_chan = dma_chan;
+ devpriv->dma_addr = virt_to_bus(devpriv->dma_buffer);
+
+ dma_flags = claim_dma_lock();
+ disable_dma(devpriv->dma_chan);
+ set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
+ release_dma_lock(dma_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(labpc_init_dma_chan);
+
+void labpc_free_dma_chan(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ kfree(devpriv->dma_buffer);
+ devpriv->dma_buffer = NULL;
+ if (devpriv->dma_chan) {
+ free_dma(devpriv->dma_chan);
+ devpriv->dma_chan = 0;
+ }
+}
+EXPORT_SYMBOL_GPL(labpc_free_dma_chan);
+
+static int __init ni_labpc_isadma_init_module(void)
+{
+ return 0;
+}
+module_init(ni_labpc_isadma_init_module);
+
+static void __exit ni_labpc_isadma_cleanup_module(void)
+{
+}
+module_exit(ni_labpc_isadma_cleanup_module);
+
+MODULE_AUTHOR("Comedi http://www.comedi.org");
+MODULE_DESCRIPTION("Comedi NI Lab-PC ISA DMA support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc_isadma.h b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
new file mode 100644
index 00000000000..771af4bd5a7
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_isadma.h
@@ -0,0 +1,57 @@
+/*
+ * ni_labpc ISA DMA support.
+*/
+
+#ifndef _NI_LABPC_ISADMA_H
+#define _NI_LABPC_ISADMA_H
+
+#define NI_LABPC_HAVE_ISA_DMA IS_ENABLED(CONFIG_COMEDI_NI_LABPC_ISADMA)
+
+#if NI_LABPC_HAVE_ISA_DMA
+
+static inline bool labpc_have_dma_chan(struct comedi_device *dev)
+{
+ struct labpc_private *devpriv = dev->private;
+
+ return (bool)devpriv->dma_chan;
+}
+
+int labpc_init_dma_chan(struct comedi_device *dev, unsigned int dma_chan);
+void labpc_free_dma_chan(struct comedi_device *dev);
+void labpc_setup_dma(struct comedi_device *dev, struct comedi_subdevice *s);
+void labpc_drain_dma(struct comedi_device *dev);
+void labpc_handle_dma_status(struct comedi_device *dev);
+
+#else
+
+static inline bool labpc_have_dma_chan(struct comedi_device *dev)
+{
+ return false;
+}
+
+static inline int labpc_init_dma_chan(struct comedi_device *dev,
+ unsigned int dma_chan)
+{
+ return -ENOTSUPP;
+}
+
+static inline void labpc_free_dma_chan(struct comedi_device *dev)
+{
+}
+
+static inline void labpc_setup_dma(struct comedi_device *dev,
+ struct comedi_subdevice *s)
+{
+}
+
+static inline void labpc_drain_dma(struct comedi_device *dev)
+{
+}
+
+static inline void labpc_handle_dma_status(struct comedi_device *dev)
+{
+}
+
+#endif
+
+#endif /* _NI_LABPC_ISADMA_H */
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 6c79237b2b5..8be681fca90 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -29,8 +29,8 @@
* 340914a (pci-1200)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/slab.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -72,10 +72,9 @@ static int labpc_pci_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->mite = mite_alloc(pcidev);
if (!devpriv->mite)
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
new file mode 100644
index 00000000000..2a274a3e4e7
--- /dev/null
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -0,0 +1,75 @@
+/*
+ * ni_labpc register definitions.
+*/
+
+#ifndef _NI_LABPC_REGS_H
+#define _NI_LABPC_REGS_H
+
+/*
+ * Register map (all registers are 8-bit)
+ */
+#define STAT1_REG 0x00 /* R: Status 1 reg */
+#define STAT1_DAVAIL (1 << 0)
+#define STAT1_OVERRUN (1 << 1)
+#define STAT1_OVERFLOW (1 << 2)
+#define STAT1_CNTINT (1 << 3)
+#define STAT1_GATA0 (1 << 5)
+#define STAT1_EXTGATA0 (1 << 6)
+#define CMD1_REG 0x00 /* W: Command 1 reg */
+#define CMD1_MA(x) (((x) & 0x7) << 0)
+#define CMD1_TWOSCMP (1 << 3)
+#define CMD1_GAIN(x) (((x) & 0x7) << 4)
+#define CMD1_SCANEN (1 << 7)
+#define CMD2_REG 0x01 /* W: Command 2 reg */
+#define CMD2_PRETRIG (1 << 0)
+#define CMD2_HWTRIG (1 << 1)
+#define CMD2_SWTRIG (1 << 2)
+#define CMD2_TBSEL (1 << 3)
+#define CMD2_2SDAC0 (1 << 4)
+#define CMD2_2SDAC1 (1 << 5)
+#define CMD2_LDAC(x) (1 << (6 + (x)))
+#define CMD3_REG 0x02 /* W: Command 3 reg */
+#define CMD3_DMAEN (1 << 0)
+#define CMD3_DIOINTEN (1 << 1)
+#define CMD3_DMATCINTEN (1 << 2)
+#define CMD3_CNTINTEN (1 << 3)
+#define CMD3_ERRINTEN (1 << 4)
+#define CMD3_FIFOINTEN (1 << 5)
+#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
+#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
+#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
+#define ADC_FIFO_CLEAR_REG 0x08 /* W: A/D FIFO Clear reg */
+#define ADC_FIFO_REG 0x0a /* R: A/D FIFO reg */
+#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
+#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
+#define CMD6_REG 0x0e /* W: Command 6 reg */
+#define CMD6_NRSE (1 << 0)
+#define CMD6_ADCUNI (1 << 1)
+#define CMD6_DACUNI(x) (1 << (2 + (x)))
+#define CMD6_HFINTEN (1 << 5)
+#define CMD6_DQINTEN (1 << 6)
+#define CMD6_SCANUP (1 << 7)
+#define CMD4_REG 0x0f /* W: Command 3 reg */
+#define CMD4_INTSCAN (1 << 0)
+#define CMD4_EOIRCV (1 << 1)
+#define CMD4_ECLKDRV (1 << 2)
+#define CMD4_SEDIFF (1 << 3)
+#define CMD4_ECLKRCV (1 << 4)
+#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
+#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
+#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
+#define CMD5_REG 0x1c /* W: Command 5 reg */
+#define CMD5_WRTPRT (1 << 2)
+#define CMD5_DITHEREN (1 << 3)
+#define CMD5_CALDACLD (1 << 4)
+#define CMD5_SCLK (1 << 5)
+#define CMD5_SDATA (1 << 6)
+#define CMD5_EEPROMCS (1 << 7)
+#define STAT2_REG 0x1d /* R: Status 2 reg */
+#define STAT2_PROMOUT (1 << 0)
+#define STAT2_OUTA1 (1 << 1)
+#define STAT2_FIFONHF (1 << 2)
+#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
+#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
+
+#endif /* _NI_LABPC_REGS_H */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index 3e9f544e67f..4e02770e834 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -58,6 +58,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
+#include <linux/delay.h>
#include "8255.h"
#include "mite.h"
#include "comedi_fc.h"
@@ -3527,37 +3528,21 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
static int ni_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct ni_private *devpriv = dev->private;
+ int ret;
-#ifdef DEBUG_DIO
- printk("ni_dio_insn_config() chan=%d io=%d\n",
- CR_CHAN(insn->chanspec), data[0]);
-#endif
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
devpriv->dio_control |= DIO_Pins_Dir(s->io_bits);
devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
- return 1;
+ return insn->n;
}
static int ni_dio_insn_bits(struct comedi_device *dev,
@@ -3595,32 +3580,15 @@ static int ni_m_series_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv __maybe_unused = dev->private;
+ int ret;
-#ifdef DEBUG_DIO
- printk("ni_m_series_dio_insn_config() chan=%d io=%d\n",
- CR_CHAN(insn->chanspec), data[0]);
-#endif
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
ni_writel(s->io_bits, M_Offset_DIO_Direction);
- return 1;
+ return insn->n;
}
static int ni_m_series_dio_insn_bits(struct comedi_device *dev,
@@ -4363,10 +4331,9 @@ static int ni_alloc_private(struct comedi_device *dev)
{
struct ni_private *devpriv;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->window_lock);
spin_lock_init(&devpriv->soft_reg_copy_lock);
diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c
index f813f576367..229a273f201 100644
--- a/drivers/staging/comedi/drivers/ni_mio_cs.c
+++ b/drivers/staging/comedi/drivers/ni_mio_cs.c
@@ -36,6 +36,7 @@ See the notes in the ni_atmio.o driver.
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 5b2f72e102e..fad81bc97b6 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -50,6 +50,7 @@ comedi_nonfree_firmware tarball available from http://www.comedi.org
/* #define DEBUG 1 */
/* #define DEBUG_FLAGS */
+#include <linux/module.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -639,32 +640,19 @@ static void debug_int(struct comedi_device *dev)
static int ni_pcidio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
struct nidio96_private *devpriv = dev->private;
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- if (insn->n != 1)
- return -EINVAL;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << CR_CHAN(insn->chanspec);
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << CR_CHAN(insn->chanspec));
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->
- io_bits & (1 << CR_CHAN(insn->chanspec))) ? COMEDI_OUTPUT :
- COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
writel(s->io_bits, devpriv->mite->daq_io_addr + Port_Pin_Directions(0));
- return 1;
+ return insn->n;
}
static int ni_pcidio_insn_bits(struct comedi_device *dev,
@@ -1108,10 +1096,9 @@ static int nidio_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
spin_lock_init(&devpriv->mite_channel_lock);
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 35681ba1f36..536be83af54 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -106,6 +106,7 @@ Bugs:
*/
+#include <linux/module.h>
#include <linux/delay.h>
#include "../comedidev.h"
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index f2cf76d15d7..9b120c77d83 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -44,6 +44,9 @@ TODO:
Support use of both banks X and Y
*/
+#include <linux/module.h>
+#include <linux/slab.h>
+
#include "ni_tio_internal.h"
static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index cff50bc45bc..45691efefd0 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -44,6 +44,7 @@ TODO:
Support use of both banks X and Y
*/
+#include <linux/module.h>
#include "comedi_fc.h"
#include "ni_tio_internal.h"
#include "mite.h"
diff --git a/drivers/staging/comedi/drivers/pcl711.c b/drivers/staging/comedi/drivers/pcl711.c
index 7abf3f74144..e859f85a8e1 100644
--- a/drivers/staging/comedi/drivers/pcl711.c
+++ b/drivers/staging/comedi/drivers/pcl711.c
@@ -53,10 +53,10 @@ supported.
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
@@ -474,10 +474,9 @@ static int pcl711_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
s = &dev->subdevices[0];
/* AI subdevice */
diff --git a/drivers/staging/comedi/drivers/pcl724.c b/drivers/staging/comedi/drivers/pcl724.c
index cea657c7801..8af13e790ad 100644
--- a/drivers/staging/comedi/drivers/pcl724.c
+++ b/drivers/staging/comedi/drivers/pcl724.c
@@ -1,6 +1,6 @@
/*
* pcl724.c
- * Comedi driver for 8255 based ISA DIO boards
+ * Comedi driver for 8255 based ISA and PC/104 DIO boards
*
* Michal Dobes <dobes@tesnet.cz>
*/
@@ -14,6 +14,7 @@
* (ADLink) ACL-7122 [acl7122]
* (ADLink) ACL-7124 [acl7124]
* (ADLink) PET-48DIO [pet48dio]
+ * (WinSystems) PCM-IO48 [pcmio48]
* Author: Michal Dobes <dobes@tesnet.cz>
* Status: untested
*
@@ -25,11 +26,9 @@
* 1, 96: 96 DIO configuration
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/delay.h>
-
#include "8255.h"
#define SIZE_8255 4
@@ -70,6 +69,10 @@ static const struct pcl724_board boardtypes[] = {
.io_range = 0x02,
.is_pet48 = 1,
.numofports = 2, /* 48 DIO channels */
+ }, {
+ .name = "pcmio48",
+ .io_range = 0x08,
+ .numofports = 2, /* 48 DIO channels */
},
};
@@ -148,5 +151,5 @@ static struct comedi_driver pcl724_driver = {
module_comedi_driver(pcl724_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi driver for 8255 based ISA DIO boards");
+MODULE_DESCRIPTION("Comedi driver for 8255 based ISA and PC/104 DIO boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/pcl726.c b/drivers/staging/comedi/drivers/pcl726.c
index 893f012a1b7..a4d0bcc31e5 100644
--- a/drivers/staging/comedi/drivers/pcl726.c
+++ b/drivers/staging/comedi/drivers/pcl726.c
@@ -62,10 +62,9 @@ Interrupts are not supported.
their web page. (http://www.cir.com/)
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#undef ACL6126_IRQ /* no interrupt support (yet) */
#define PCL726_SIZE 16
@@ -229,10 +228,9 @@ static int pcl726_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
for (i = 0; i < 12; i++) {
devpriv->bipolar[i] = 0;
diff --git a/drivers/staging/comedi/drivers/pcl730.c b/drivers/staging/comedi/drivers/pcl730.c
index 862e75fd68f..2a659f23ecd 100644
--- a/drivers/staging/comedi/drivers/pcl730.c
+++ b/drivers/staging/comedi/drivers/pcl730.c
@@ -27,10 +27,9 @@
* The ACL-7130 card has an 8254 timer/counter not supported by this driver.
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
/*
* Register map
*
diff --git a/drivers/staging/comedi/drivers/pcl812.c b/drivers/staging/comedi/drivers/pcl812.c
index cd02786702c..03a098900d3 100644
--- a/drivers/staging/comedi/drivers/pcl812.c
+++ b/drivers/staging/comedi/drivers/pcl812.c
@@ -108,12 +108,12 @@
* 3= 20V unipolar inputs
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/io.h>
#include <asm/dma.h>
@@ -1110,10 +1110,9 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
irq = 0;
if (board->IRQbits != 0) { /* board support IRQ */
@@ -1405,6 +1404,7 @@ no_dma:
if (it->options[3] > 0)
/* we use external trigger */
devpriv->use_ext_trg = 1;
+ break;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
devpriv->mode_reg_int = (irq << 4) & 0xf0;
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 91bd2071f57..f0313496259 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -32,9 +32,9 @@ Configuration Options:
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -922,10 +922,9 @@ static int pcl816_attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* grab our IRQ */
irq = 0;
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index 91cb1bd6717..a52ba82ff0e 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -98,7 +98,7 @@ A word or two about DMA. Driver support DMA operations at two ways:
*/
-#include <linux/ioport.h>
+#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -1227,10 +1227,9 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long pages;
struct comedi_subdevice *s;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->io_range = board->io_range;
if ((board->fifo) && (it->options[2] == -1)) {
diff --git a/drivers/staging/comedi/drivers/pcm3724.c b/drivers/staging/comedi/drivers/pcm3724.c
index 5a9cd38e15f..cc1dc7f66e5 100644
--- a/drivers/staging/comedi/drivers/pcm3724.c
+++ b/drivers/staging/comedi/drivers/pcm3724.c
@@ -28,11 +28,9 @@ Copy/pasted/hacked from pcm724.c
* struct comedi_insn
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/delay.h>
-
#include "8255.h"
#define PCM3724_SIZE 16
@@ -186,39 +184,30 @@ static void enable_chan(struct comedi_device *dev, struct comedi_subdevice *s,
/* overriding the 8255 insn config */
static int subdev_3724_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
- unsigned int bits;
-
- mask = 1 << CR_CHAN(insn->chanspec);
- if (mask & 0x0000ff)
- bits = 0x0000ff;
- else if (mask & 0x00ff00)
- bits = 0x00ff00;
- else if (mask & 0x0f0000)
- bits = 0x0f0000;
+ int ret;
+
+ if (chan < 8)
+ mask = 0x0000ff;
+ else if (chan < 16)
+ mask = 0x00ff00;
+ else if (chan < 20)
+ mask = 0x0f0000;
else
- bits = 0xf00000;
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~bits;
- break;
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= bits;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & bits) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ mask = 0xf00000;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
do_3724_config(dev, s, insn->chanspec);
enable_chan(dev, s, insn->chanspec);
- return 1;
+
+ return insn->n;
}
static int pcm3724_attach(struct comedi_device *dev,
@@ -228,10 +217,9 @@ static int pcm3724_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret, i;
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = comedi_alloc_devpriv(dev, sizeof(*priv));
if (!priv)
return -ENOMEM;
- dev->private = priv;
ret = comedi_request_region(dev, it->options[0], PCM3724_SIZE);
if (ret)
diff --git a/drivers/staging/comedi/drivers/pcmad.c b/drivers/staging/comedi/drivers/pcmad.c
index d5c728dc619..423f23676d2 100644
--- a/drivers/staging/comedi/drivers/pcmad.c
+++ b/drivers/staging/comedi/drivers/pcmad.c
@@ -38,6 +38,7 @@
* 1 = two's complement (+-10V input range)
*/
+#include <linux/module.h>
#include "../comedidev.h"
#define PCMAD_STATUS 0
diff --git a/drivers/staging/comedi/drivers/pcmda12.c b/drivers/staging/comedi/drivers/pcmda12.c
index 774a63dfe04..1c7a135c91d 100644
--- a/drivers/staging/comedi/drivers/pcmda12.c
+++ b/drivers/staging/comedi/drivers/pcmda12.c
@@ -48,6 +48,7 @@
* [1] - Do Simultaneous Xfer (see description)
*/
+#include <linux/module.h>
#include "../comedidev.h"
/* AI range is not configurable, it's set by jumpers on the board */
@@ -138,10 +139,9 @@ static int pcmda12_attach(struct comedi_device *dev,
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->simultaneous_xfer_mode = it->options[1];
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 9f76b1f5998..574443df42d 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -72,6 +72,7 @@ Configuration Options:
leave out if you don't need this feature)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -309,68 +310,27 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
-/* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
static int pcmmio_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec), byte_no = chan / 8, bit_no =
- chan % 8;
- unsigned long ioaddr;
- unsigned char byte;
-
- /* Compute ioaddr for this channel */
- ioaddr = subpriv->iobases[byte_no];
-
- /* NOTE:
- writing a 0 an IO channel's bit sets the channel to INPUT
- and pulls the line high as well
-
- writing a 1 to an IO channel's bit pulls the line low
-
- All channels are implicitly always in OUTPUT mode -- but when
- they are high they can be considered to be in INPUT mode..
-
- Thus, we only force channels low if the config request was INPUT,
- otherwise we do nothing to the hardware. */
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- /* save to io_bits -- don't actually do anything since
- all input channels are also output channels... */
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- /* write a 0 to the actual register representing the channel
- to set it to 'input'. 0 means "float high". */
- byte = inb(ioaddr);
- byte &= ~(1 << bit_no);
- /**< set input channel to '0' */
-
- /*
- * write out byte -- this is the only time we actually affect
- * the hardware as all channels are implicitly output
- * -- but input channels are set to float-high
- */
- outb(byte, ioaddr);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int byte_no = chan / 8;
+ int bit_no = chan % 8;
+ int ret;
- /* save to io_bits */
- s->io_bits &= ~(1 << chan);
- break;
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- case INSN_CONFIG_DIO_QUERY:
- /* retrieve from shadow register */
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
+ if (data[0] == INSN_CONFIG_DIO_INPUT) {
+ unsigned long ioaddr = subpriv->iobases[byte_no];
+ unsigned char val;
- default:
- return -EINVAL;
- break;
+ val = inb(ioaddr);
+ val &= ~(1 << bit_no);
+ outb(val, ioaddr);
}
return insn->n;
@@ -1039,10 +999,9 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
for (asic = 0; asic < MAX_ASICS; ++asic) {
devpriv->asics[asic].num = asic;
@@ -1197,12 +1156,13 @@ static void pcmmio_detach(struct comedi_device *dev)
struct pcmmio_private *devpriv = dev->private;
int i;
- for (i = 0; i < MAX_ASICS; ++i) {
- if (devpriv && devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- if (devpriv && devpriv->sprivs)
+ if (devpriv) {
+ for (i = 0; i < MAX_ASICS; ++i) {
+ if (devpriv->asics[i].irq)
+ free_irq(devpriv->asics[i].irq, dev);
+ }
kfree(devpriv->sprivs);
+ }
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index c43b6334cea..67e2bb1d66f 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -73,6 +73,7 @@
* can be the same as first irq!)
*/
+#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
@@ -232,27 +233,19 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
static int pcmuio_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec);
int asic = s->index / 2;
int port = (s->index % 2) ? 3 : 0;
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= chan_mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~chan_mask;
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
+
+ if (data[0] == INSN_CONFIG_DIO_INPUT)
pcmuio_write(dev, s->io_bits, asic, 0, port);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & chan_mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- break;
- }
return insn->n;
}
@@ -609,10 +602,9 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
for (asic = 0; asic < PCMUIO_MAX_ASICS; ++asic)
spin_lock_init(&devpriv->asics[asic].spinlock);
@@ -680,12 +672,13 @@ static void pcmuio_detach(struct comedi_device *dev)
struct pcmuio_private *devpriv = dev->private;
int i;
- for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
- if (devpriv->asics[i].irq)
- free_irq(devpriv->asics[i].irq, dev);
- }
- if (devpriv && devpriv->sprivs)
+ if (devpriv) {
+ for (i = 0; i < PCMUIO_MAX_ASICS; ++i) {
+ if (devpriv->asics[i].irq)
+ free_irq(devpriv->asics[i].irq, dev);
+ }
kfree(devpriv->sprivs);
+ }
comedi_legacy_detach(dev);
}
diff --git a/drivers/staging/comedi/drivers/poc.c b/drivers/staging/comedi/drivers/poc.c
index 005fbefae29..2ae4ee15704 100644
--- a/drivers/staging/comedi/drivers/poc.c
+++ b/drivers/staging/comedi/drivers/poc.c
@@ -30,10 +30,9 @@ Configuration options:
[0] - I/O port base
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
struct boarddef_struct {
const char *name;
unsigned int iosize;
@@ -109,10 +108,9 @@ static int poc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* analog output subdevice */
s = &dev->subdevices[0];
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index e092ce87722..9775d3622a6 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -47,6 +47,7 @@ Status: works
Devices: [Quatech] DAQP-208 (daqp), DAQP-308
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/semaphore.h>
@@ -715,10 +716,9 @@ static int daqp_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
link->config_flags |= CONF_AUTO_SET_IO | CONF_ENABLE_IRQ;
ret = comedi_pcmcia_enable(dev, NULL);
diff --git a/drivers/staging/comedi/drivers/rtd520.c b/drivers/staging/comedi/drivers/rtd520.c
index 9b93a1fc4a5..93c980c62a2 100644
--- a/drivers/staging/comedi/drivers/rtd520.c
+++ b/drivers/staging/comedi/drivers/rtd520.c
@@ -95,6 +95,7 @@
* Digital-IO and Analog-Out only support instruction mode.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1237,23 +1238,11 @@ static int rtd_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct rtd_private *devpriv = dev->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
/* TODO support digital match interrupts and strobes */
@@ -1338,10 +1327,9 @@ static int rtd_auto_attach(struct comedi_device *dev,
dev->board_ptr = board;
dev->board_name = board->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/rti800.c b/drivers/staging/comedi/drivers/rti800.c
index f698c7fc572..cbb4ba5b852 100644
--- a/drivers/staging/comedi/drivers/rti800.c
+++ b/drivers/staging/comedi/drivers/rti800.c
@@ -49,11 +49,11 @@
* [8] - DAC 1 encoding (same as DAC 0)
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/interrupt.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
/*
* Register map
*/
@@ -298,10 +298,9 @@ static int rti800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
inb(dev->iobase + RTI800_ADCHI);
outb(0, dev->iobase + RTI800_CLRFLAGS);
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->adc_2comp = (it->options[4] == 0);
devpriv->dac_2comp[0] = (it->options[6] == 0);
diff --git a/drivers/staging/comedi/drivers/rti802.c b/drivers/staging/comedi/drivers/rti802.c
index 9e744505548..a3fa2a4baef 100644
--- a/drivers/staging/comedi/drivers/rti802.c
+++ b/drivers/staging/comedi/drivers/rti802.c
@@ -32,10 +32,9 @@ Configuration Options:
[17] - dac#7 ...
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-
#define RTI802_SIZE 4
#define RTI802_SELECT 0
@@ -93,10 +92,9 @@ static int rti802_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s526.c b/drivers/staging/comedi/drivers/s526.c
index e1587e58a73..d629463b85a 100644
--- a/drivers/staging/comedi/drivers/s526.c
+++ b/drivers/staging/comedi/drivers/s526.c
@@ -36,8 +36,8 @@ comedi_config /dev/comedi0 s526 0x2C0,0x3
*/
+#include <linux/module.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
#include <asm/byteorder.h>
#define S526_SIZE 64
@@ -515,32 +515,35 @@ static int s526_dio_insn_bits(struct comedi_device *dev,
static int s526_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
- int group, mask;
+ unsigned int mask;
+ int ret;
+
+ if (chan < 4)
+ mask = 0x0f;
+ else
+ mask = 0xf0;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, mask);
+ if (ret)
+ return ret;
+
+ /* bit 10/11 set the group 1/2's mode */
+ if (s->io_bits & 0x0f)
+ s->state |= (1 << 10);
+ else
+ s->state &= ~(1 << 10);
+ if (s->io_bits & 0xf0)
+ s->state |= (1 << 11);
+ else
+ s->state &= ~(1 << 11);
- group = chan >> 2;
- mask = 0xF << (group << 2);
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- /* bit 10/11 set the group 1/2's mode */
- s->state |= 1 << (group + 10);
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->state &= ~(1 << (group + 10)); /* 1 is output, 0 is input. */
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- default:
- return -EINVAL;
- }
outw(s->state, dev->iobase + REG_DIO);
- return 1;
+ return insn->n;
}
static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
@@ -553,10 +556,9 @@ static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.c b/drivers/staging/comedi/drivers/s626.c
index 48c4b70b736..d22b95dcb9b 100644
--- a/drivers/staging/comedi/drivers/s626.c
+++ b/drivers/staging/comedi/drivers/s626.c
@@ -59,6 +59,8 @@ INSN_CONFIG instructions:
comedi_do_insn(cf,&insn); //executing configuration
*/
+#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
@@ -1660,24 +1662,12 @@ static int s626_dio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
unsigned long group = (unsigned long)s->private;
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
+ int ret;
+
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- case COMEDI_INPUT:
- s->io_bits &= ~mask;
- break;
- case COMEDI_OUTPUT:
- s->io_bits |= mask;
- break;
- default:
- return -EINVAL;
- break;
- }
DEBIwrite(dev, LP_WRDOUT(group), s->io_bits);
return insn->n;
@@ -2585,10 +2575,9 @@ static int s626_auto_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
ret = comedi_pci_enable(dev);
if (ret)
diff --git a/drivers/staging/comedi/drivers/s626.h b/drivers/staging/comedi/drivers/s626.h
index d2756b83b62..a85e6bdcad0 100644
--- a/drivers/staging/comedi/drivers/s626.h
+++ b/drivers/staging/comedi/drivers/s626.h
@@ -65,8 +65,6 @@
#define FALSE (0)
#endif
-#include <linux/slab.h>
-
#define S626_SIZE 0x0200
#define DMABUF_SIZE 4096 /* 4k pages */
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index b4f5fe35b0f..441813ffb17 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -26,10 +26,10 @@ Status: in development
*/
+#include <linux/module.h>
#include "../comedidev.h"
#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
@@ -719,10 +719,9 @@ static int serial2002_attach(struct comedi_device *dev,
struct comedi_subdevice *s;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
diff --git a/drivers/staging/comedi/drivers/skel.c b/drivers/staging/comedi/drivers/skel.c
index 06aee302bbc..9e964950a56 100644
--- a/drivers/staging/comedi/drivers/skel.c
+++ b/drivers/staging/comedi/drivers/skel.c
@@ -67,6 +67,7 @@ Configuration Options:
* options that are used with comedi_config.
*/
+#include <linux/module.h>
#include <linux/pci.h>
#include "../comedidev.h"
@@ -361,31 +362,27 @@ static int skel_dio_insn_bits(struct comedi_device *dev,
static int skel_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* outw(s->io_bits,dev->iobase + SKEL_DIO_CONFIG); */
+ /*
+ * The input or output configuration of each digital line is
+ * configured by special insn_config instructions.
+ *
+ * chanspec contains the channel to be changed
+ * data[0] contains the instruction to perform on the channel
+ *
+ * Normally the core provided comedi_dio_insn_config() function
+ * can be used to handle the boilerplpate.
+ */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
+
+ /* Update the hardware to the new configuration */
+ /* outw(s->io_bits, dev->iobase + SKEL_DIO_CONFIG); */
return insn->n;
}
@@ -484,10 +481,9 @@ static int skel_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* dev->board_name = thisboard->name; */
/* Allocate the private data */
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/*
* Supported boards are usually either auto-attached via the
@@ -558,10 +554,9 @@ static int skel_auto_attach(struct comedi_device *dev,
dev->board_name = thisboard->name;
/* Allocate the private data */
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
/* Enable the PCI device. */
ret = comedi_pci_enable(dev);
diff --git a/drivers/staging/comedi/drivers/ssv_dnp.c b/drivers/staging/comedi/drivers/ssv_dnp.c
index 45c661cbdbb..11758a515c1 100644
--- a/drivers/staging/comedi/drivers/ssv_dnp.c
+++ b/drivers/staging/comedi/drivers/ssv_dnp.c
@@ -26,6 +26,7 @@ Status: unknown
/* include files ----------------------------------------------------------- */
+#include <linux/module.h>
#include "../comedidev.h"
/* Some global definitions: the registers of the DNP ----------------------- */
@@ -92,68 +93,48 @@ static int dnp_dio_insn_bits(struct comedi_device *dev,
}
-/* ------------------------------------------------------------------------- */
-/* Configure the direction of the bidirectional digital i/o pins. chanspec */
-/* contains the channel to be changed and data[0] contains either */
-/* COMEDI_INPUT or COMEDI_OUTPUT. */
-/* ------------------------------------------------------------------------- */
-
static int dnp_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int mask;
+ unsigned int val;
+ int ret;
- u8 register_buffer;
-
- /* reduces chanspec to lower 16 bits */
- int chan = CR_CHAN(insn->chanspec);
-
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- case INSN_CONFIG_DIO_INPUT:
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (inb(CSCDR) & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- return insn->n;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* Test: which port does the channel belong to? */
-
- /* We have to pay attention with port C: this is the meaning of PCMR: */
- /* Bit in PCMR: 7 6 5 4 3 2 1 0 */
- /* Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- if ((chan >= 0) && (chan <= 7)) {
- /* this is port A */
+ if (chan < 8) { /* Port A */
+ mask = 1 << chan;
outb(PAMR, CSCIR);
- } else if ((chan >= 8) && (chan <= 15)) {
- /* this is port B */
- chan -= 8;
+ } else if (chan < 16) { /* Port B */
+ mask = 1 << (chan - 8);
outb(PBMR, CSCIR);
- } else if ((chan >= 16) && (chan <= 19)) {
- /* this is port C; multiplication with 2 brings bits into */
- /* correct position for PCMR! */
- chan -= 16;
- chan *= 2;
+ } else { /* Port C */
+ /*
+ * We have to pay attention with port C.
+ * This is the meaning of PCMR:
+ * Bit in PCMR: 7 6 5 4 3 2 1 0
+ * Corresponding port C pin: d 3 d 2 d 1 d 0 d= don't touch
+ *
+ * Multiplication by 2 brings bits into correct position
+ * for PCMR!
+ */
+ mask = 1 << ((chan - 16) * 2);
outb(PCMR, CSCIR);
- } else {
- return -EINVAL;
}
- /* read 'old' direction of the port and set bits (out=1, in=0) */
- register_buffer = inb(CSCDR);
+ val = inb(CSCDR);
if (data[0] == COMEDI_OUTPUT)
- register_buffer |= (1 << chan);
+ val |= mask;
else
- register_buffer &= ~(1 << chan);
-
- outb(register_buffer, CSCDR);
+ val &= ~mask;
+ outb(val, CSCDR);
- return 1;
+ return insn->n;
}
diff --git a/drivers/staging/comedi/drivers/unioxx5.c b/drivers/staging/comedi/drivers/unioxx5.c
index c9201d821fb..93eec2fc254 100644
--- a/drivers/staging/comedi/drivers/unioxx5.c
+++ b/drivers/staging/comedi/drivers/unioxx5.c
@@ -40,9 +40,9 @@ Devices: [Fastwel] UNIOxx-5 (unioxx5),
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/delay.h>
#include "../comedidev.h"
-#include <linux/ioport.h>
-#include <linux/slab.h>
#define DRIVER_NAME "unioxx5"
#define UNIOXX5_SIZE 0x10
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 279e5bd493f..701ad1a6939 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -78,9 +78,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
*
*/
-/* generates loads of debug info */
-/* #define NOISY_DUX_DEBUGBUG */
-
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -94,42 +91,29 @@ sampling rate. If you sample two channels you get 4kHz and so on.
#include "comedi_fc.h"
-/* timeout for the USB-transfer in ms*/
-#define BULK_TIMEOUT 1000
-
-/* constants for "firmware" upload and download */
-#define FIRMWARE "usbdux_firmware.bin"
-#define USBDUXSUB_FIRMWARE 0xA0
-#define VENDOR_DIR_IN 0xC0
-#define VENDOR_DIR_OUT 0x40
-
-/* internal addresses of the 8051 processor */
-#define USBDUXSUB_CPUCS 0xE600
-
-/*
- * the minor device number, major is 180 only for debugging purposes and to
- * upload special firmware (programming the eeprom etc) which is not compatible
- * with the comedi framwork
- */
-#define USBDUXSUB_MINOR 32
-
-/* max lenghth of the transfer-buffer for software upload */
-#define TB_LEN 0x2000
-
-/* Input endpoint number: ISO/IRQ */
-#define ISOINEP 6
-
-/* Output endpoint number: ISO/IRQ */
-#define ISOOUTEP 2
-
-/* This EP sends DUX commands to USBDUX */
-#define COMMAND_OUT_EP 1
-
-/* This EP receives the DUX commands from USBDUX */
-#define COMMAND_IN_EP 8
-
-/* Output endpoint for PWM */
-#define PWM_EP 4
+/* constants for firmware upload and download */
+#define USBDUX_FIRMWARE "usbdux_firmware.bin"
+#define USBDUX_FIRMWARE_MAX_LEN 0x2000
+#define USBDUX_FIRMWARE_CMD 0xa0
+#define VENDOR_DIR_IN 0xc0
+#define VENDOR_DIR_OUT 0x40
+#define USBDUX_CPU_CS 0xe600
+
+/* usbdux bulk transfer commands */
+#define USBDUX_CMD_MULT_AI 0
+#define USBDUX_CMD_AO 1
+#define USBDUX_CMD_DIO_CFG 2
+#define USBDUX_CMD_DIO_BITS 3
+#define USBDUX_CMD_SINGLE_AI 4
+#define USBDUX_CMD_TIMER_RD 5
+#define USBDUX_CMD_TIMER_WR 6
+#define USBDUX_CMD_PWM_ON 7
+#define USBDUX_CMD_PWM_OFF 8
+
+#define USBDUX_NUM_AO_CHAN 4
+
+/* timeout for the USB-transfer in ms */
+#define BULK_TIMEOUT 1000
/* 300Hz max frequ under PWM */
#define MIN_PWM_PERIOD ((long)(1E9/300))
@@ -137,9 +121,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* Default PWM frequency */
#define PWM_DEFAULT_PERIOD ((long)(1E9/100))
-/* Number of channels */
-#define NUMCHANNELS 8
-
/* Size of one A/D value */
#define SIZEADIN ((sizeof(int16_t)))
@@ -152,9 +133,6 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* 16 bytes. */
#define SIZEINSNBUF 16
-/* Number of DA channels */
-#define NUMOUTCHANNELS 8
-
/* size of one value for the D/A converter: channel and value */
#define SIZEDAOUT ((sizeof(int8_t)+sizeof(int16_t)))
@@ -185,101 +163,56 @@ sampling rate. If you sample two channels you get 4kHz and so on.
/* must have more buffers due to buggy USB ctr */
#define NUMOFOUTBUFFERSHIGH 10
-/* Total number of usbdux devices */
-#define NUMUSBDUX 16
-
-/* Analogue in subdevice */
-#define SUBDEV_AD 0
-
-/* Analogue out subdevice */
-#define SUBDEV_DA 1
-
-/* Digital I/O */
-#define SUBDEV_DIO 2
-
-/* counter */
-#define SUBDEV_COUNTER 3
-
-/* timer aka pwm output */
-#define SUBDEV_PWM 4
-
/* number of retries to get the right dux command */
#define RETRIES 10
-/**************************************************/
-/* comedi constants */
-static const struct comedi_lrange range_usbdux_ai_range = { 4, {
- BIP_RANGE
- (4.096),
- BIP_RANGE(4.096
- / 2),
- UNI_RANGE
- (4.096),
- UNI_RANGE(4.096
- / 2)
- }
+static const struct comedi_lrange range_usbdux_ai_range = {
+ 4, {
+ BIP_RANGE(4.096),
+ BIP_RANGE(4.096 / 2),
+ UNI_RANGE(4.096),
+ UNI_RANGE(4.096 / 2)
+ }
};
-static const struct comedi_lrange range_usbdux_ao_range = { 2, {
- BIP_RANGE
- (4.096),
- UNI_RANGE
- (4.096),
- }
+static const struct comedi_lrange range_usbdux_ao_range = {
+ 2, {
+ BIP_RANGE(4.096),
+ UNI_RANGE(4.096)
+ }
};
-/*
- * private structure of one subdevice
- */
-
-/*
- * This is the structure which holds all the data of
- * this driver one sub device just now: A/D
- */
-struct usbduxsub {
- /* attached? */
- int attached;
- /* is it associated with a subdevice? */
- int probed;
- /* pointer to the usb-device */
- struct usb_device *usbdev;
+struct usbdux_private {
/* actual number of in-buffers */
- int num_in_buffers;
+ int n_ai_urbs;
/* actual number of out-buffers */
- int num_out_buffers;
+ int n_ao_urbs;
/* ISO-transfer handling: buffers */
- struct urb **urb_in;
- struct urb **urb_out;
+ struct urb **ai_urbs;
+ struct urb **ao_urbs;
/* pwm-transfer handling */
- struct urb *urb_pwm;
+ struct urb *pwm_urb;
/* PWM period */
unsigned int pwm_period;
/* PWM internal delay for the GPIF in the FX2 */
- int8_t pwn_delay;
+ int8_t pwm_delay;
/* size of the PWM buffer which holds the bit pattern */
- int size_pwm_buf;
+ int pwm_buf_sz;
/* input buffer for the ISO-transfer */
- int16_t *in_buffer;
+ int16_t *in_buf;
/* input buffer for single insn */
- int16_t *insn_buffer;
- /* output buffer for single DA outputs */
- int16_t *out_buffer;
- /* interface number */
- int ifnum;
- /* interface structure in 2.6 */
- struct usb_interface *interface;
- /* comedi device for the interrupt context */
- struct comedi_device *comedidev;
- /* is it USB_SPEED_HIGH or not? */
- short int high_speed;
- /* asynchronous command is running */
- short int ai_cmd_running;
- short int ao_cmd_running;
- /* pwm is running */
- short int pwm_cmd_running;
- /* continous acquisition */
- short int ai_continous;
- short int ao_continous;
+ int16_t *insn_buf;
+
+ int8_t ao_chanlist[USBDUX_NUM_AO_CHAN];
+ unsigned int ao_readback[USBDUX_NUM_AO_CHAN];
+
+ unsigned int high_speed:1;
+ unsigned int ai_cmd_running:1;
+ unsigned int ai_continous:1;
+ unsigned int ao_cmd_running:1;
+ unsigned int ao_continous:1;
+ unsigned int pwm_cmd_running:1;
+
/* number of samples to acquire */
int ai_sample_count;
int ao_sample_count;
@@ -291,132 +224,62 @@ struct usbduxsub {
unsigned int ao_counter;
/* interval in frames/uframes */
unsigned int ai_interval;
- /* D/A commands */
- int8_t *dac_commands;
/* commands */
int8_t *dux_commands;
struct semaphore sem;
};
-/*
- * The pointer to the private usb-data of the driver is also the private data
- * for the comedi-device. This has to be global as the usb subsystem needs
- * global variables. The other reason is that this structure must be there
- * _before_ any comedi command is issued. The usb subsystem must be initialised
- * before comedi can access it.
- */
-static struct usbduxsub usbduxsub[NUMUSBDUX];
-
-static DEFINE_SEMAPHORE(start_stop_sem);
-
-/*
- * Stops the data acquision
- * It should be safe to call this function from any context
- */
-static int usbduxsub_unlink_inurbs(struct usbduxsub *usbduxsub_tmp)
+static void usbdux_unlink_urbs(struct urb **urbs, int num_urbs)
{
- int i = 0;
- int err = 0;
+ int i;
- if (usbduxsub_tmp && usbduxsub_tmp->urb_in) {
- for (i = 0; i < usbduxsub_tmp->num_in_buffers; i++) {
- if (usbduxsub_tmp->urb_in[i]) {
- /* We wait here until all transfers have been
- * cancelled. */
- usb_kill_urb(usbduxsub_tmp->urb_in[i]);
- }
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked InURB %d, err=%d\n",
- i, err);
- }
- }
- return err;
+ for (i = 0; i < num_urbs; i++)
+ usb_kill_urb(urbs[i]);
}
-/*
- * This will stop a running acquisition operation
- * Is called from within this driver from both the
- * interrupt context and from comedi
- */
-static int usbdux_ai_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbdux_ai_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
-
- if (!this_usbduxsub) {
- pr_err("comedi?: usbdux_ai_stop: this_usbduxsub=NULL!\n");
- return -EFAULT;
- }
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_stop\n");
+ struct usbdux_private *devpriv = dev->private;
- if (do_unlink) {
- /* stop aquistion */
- ret = usbduxsub_unlink_inurbs(this_usbduxsub);
- }
+ if (do_unlink && devpriv->ai_urbs)
+ usbdux_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
- this_usbduxsub->ai_cmd_running = 0;
-
- return ret;
+ devpriv->ai_cmd_running = 0;
}
-/*
- * This will cancel a running acquisition operation.
- * This is called by comedi but never from inside the driver.
- */
static int usbdux_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub;
- int res = 0;
-
- /* force unlink of all urbs */
- this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ai_cancel\n");
+ struct usbdux_private *devpriv = dev->private;
/* prevent other CPUs from submitting new commands just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
/* unlink only if the urb really has been submitted */
- res = usbdux_ai_stop(this_usbduxsub, this_usbduxsub->ai_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ usbdux_ai_stop(dev, devpriv->ai_cmd_running);
+ up(&devpriv->sem);
+
+ return 0;
}
/* analogue IN - interrupt service routine */
static void usbduxsub_ai_isoc_irq(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->read_subdev;
+ struct usbdux_private *devpriv = dev->private;
int i, err, n;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
- /* subdevice which is the AD converter */
- s = &this_comedidev->subdevices[SUBDEV_AD];
/* first we test if something unusual has just happened */
switch (urb->status) {
case 0:
/* copy the result in the transfer buffer */
- memcpy(this_usbduxsub->in_buffer,
- urb->transfer_buffer, SIZEINBUF);
+ memcpy(devpriv->in_buf, urb->transfer_buffer, SIZEINBUF);
break;
case -EILSEQ:
/* error in the ISOchronous data */
/* we don't copy the data into the transfer buffer */
/* and recycle the last data byte */
- dev_dbg(&urb->dev->dev,
- "comedi%d: usbdux: CRC error in ISO IN stream.\n",
- this_usbduxsub->comedidev->minor);
-
+ dev_dbg(dev->class_dev, "CRC error in ISO IN stream\n");
break;
case -ECONNRESET:
@@ -424,29 +287,27 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
case -ESHUTDOWN:
case -ECONNABORTED:
/* happens after an unlink command */
- if (this_usbduxsub->ai_cmd_running) {
- /* we are still running a command */
- /* tell this comedi */
+ if (devpriv->ai_cmd_running) {
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* stop the transfer w/o unlink */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
}
return;
default:
/* a real error on the bus */
/* pass error to comedi if we are really running a command */
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&urb->dev->dev,
- "Non-zero urb status received in ai intr "
- "context: %d\n", urb->status);
+ if (devpriv->ai_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ai intr context: %d\n",
+ urb->status);
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
}
return;
}
@@ -455,7 +316,7 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
* at this point we are reasonably sure that nothing dodgy has happened
* are we running a command?
*/
- if (unlikely((!(this_usbduxsub->ai_cmd_running)))) {
+ if (unlikely(!devpriv->ai_cmd_running)) {
/*
* not running a command, do not continue execution if no
* asynchronous command is running in particular not resubmit
@@ -463,144 +324,101 @@ static void usbduxsub_ai_isoc_irq(struct urb *urb)
return;
}
- urb->dev = this_usbduxsub->usbdev;
+ urb->dev = comedi_to_usb_dev(dev);
/* resubmit the urb */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (unlikely(err < 0)) {
- dev_err(&urb->dev->dev,
- "comedi_: urb resubmit failed in int-context! err=%d\n",
- err);
+ dev_err(dev->class_dev,
+ "urb resubmit failed in int-context! err=%d\n", err);
if (err == -EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in IRQ "
- "handler!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handler!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
return;
}
- this_usbduxsub->ai_counter--;
- if (likely(this_usbduxsub->ai_counter > 0))
+ devpriv->ai_counter--;
+ if (likely(devpriv->ai_counter > 0))
return;
/* timer zero, transfer measurements to comedi */
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+ devpriv->ai_counter = devpriv->ai_timer;
/* test, if we transmit only a fixed number of samples */
- if (!(this_usbduxsub->ai_continous)) {
+ if (!devpriv->ai_continous) {
/* not continuous, fixed number of samples */
- this_usbduxsub->ai_sample_count--;
+ devpriv->ai_sample_count--;
/* all samples received? */
- if (this_usbduxsub->ai_sample_count < 0) {
+ if (devpriv->ai_sample_count < 0) {
/* prevent a resubmit next time */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
/* say comedi that the acquistion is over */
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
return;
}
}
/* get the data from the USB bus and hand it over to comedi */
n = s->async->cmd.chanlist_len;
for (i = 0; i < n; i++) {
+ unsigned int range = CR_RANGE(s->async->cmd.chanlist[i]);
+ int16_t val = le16_to_cpu(devpriv->in_buf[i]);
+
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
/* transfer data */
- if (CR_RANGE(s->async->cmd.chanlist[i]) <= 1) {
- err = comedi_buf_put
- (s->async,
- le16_to_cpu(this_usbduxsub->in_buffer[i]) ^ 0x800);
- } else {
- err = comedi_buf_put
- (s->async,
- le16_to_cpu(this_usbduxsub->in_buffer[i]));
- }
+ err = comedi_buf_put(s->async, val);
if (unlikely(err == 0)) {
/* buffer overflow */
- usbdux_ai_stop(this_usbduxsub, 0);
+ usbdux_ai_stop(dev, 0);
return;
}
}
/* tell comedi that data is there */
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
- comedi_event(this_usbduxsub->comedidev, s);
-}
-
-static int usbduxsub_unlink_outurbs(struct usbduxsub *usbduxsub_tmp)
-{
- int i = 0;
- int err = 0;
-
- if (usbduxsub_tmp && usbduxsub_tmp->urb_out) {
- for (i = 0; i < usbduxsub_tmp->num_out_buffers; i++) {
- if (usbduxsub_tmp->urb_out[i])
- usb_kill_urb(usbduxsub_tmp->urb_out[i]);
-
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: usbdux: unlinked OutURB %d: res=%d\n",
- i, err);
- }
- }
- return err;
+ comedi_event(dev, s);
}
-/* This will cancel a running acquisition operation
- * in any context.
- */
-static int usbdux_ao_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbdux_ao_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: usbdux_ao_cancel\n");
-
- if (do_unlink)
- ret = usbduxsub_unlink_outurbs(this_usbduxsub);
+ struct usbdux_private *devpriv = dev->private;
- this_usbduxsub->ao_cmd_running = 0;
+ if (do_unlink && devpriv->ao_urbs)
+ usbdux_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
- return ret;
+ devpriv->ao_cmd_running = 0;
}
-/* force unlink, is called by comedi */
static int usbdux_ao_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
/* prevent other CPUs from submitting a command just now */
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
/* unlink only if it is really running */
- res = usbdux_ao_stop(this_usbduxsub, this_usbduxsub->ao_cmd_running);
- up(&this_usbduxsub->sem);
- return res;
+ usbdux_ao_stop(dev, devpriv->ao_cmd_running);
+ up(&devpriv->sem);
+
+ return 0;
}
static void usbduxsub_ao_isoc_irq(struct urb *urb)
{
- int i, ret;
+ struct comedi_device *dev = urb->context;
+ struct comedi_subdevice *s = dev->write_subdev;
+ struct usbdux_private *devpriv = dev->private;
int8_t *datap;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
+ int len;
+ int ret;
+ int i;
switch (urb->status) {
case 0:
@@ -613,246 +431,131 @@ static void usbduxsub_ao_isoc_irq(struct urb *urb)
case -ECONNABORTED:
/* after an unlink command, unplug, ... etc */
/* no unlink needed here. Already shutting down. */
- if (this_usbduxsub->ao_cmd_running) {
+ if (devpriv->ao_cmd_running) {
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
- usbdux_ao_stop(this_usbduxsub, 0);
+ comedi_event(dev, s);
+ usbdux_ao_stop(dev, 0);
}
return;
default:
/* a real error */
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&urb->dev->dev,
- "comedi_: Non-zero urb status received in ao "
- "intr context: %d\n", urb->status);
+ if (devpriv->ao_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in ao intr context: %d\n",
+ urb->status);
s->async->events |= COMEDI_CB_ERROR;
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* we do an unlink if we are in the high speed mode */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
}
return;
}
/* are we actually running? */
- if (!(this_usbduxsub->ao_cmd_running))
+ if (!devpriv->ao_cmd_running)
return;
/* normal operation: executing a command in this subdevice */
- this_usbduxsub->ao_counter--;
- if ((int)this_usbduxsub->ao_counter <= 0) {
+ devpriv->ao_counter--;
+ if ((int)devpriv->ao_counter <= 0) {
/* timer zero */
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+ devpriv->ao_counter = devpriv->ao_timer;
/* handle non continous acquisition */
- if (!(this_usbduxsub->ao_continous)) {
+ if (!devpriv->ao_continous) {
/* fixed number of samples */
- this_usbduxsub->ao_sample_count--;
- if (this_usbduxsub->ao_sample_count < 0) {
+ devpriv->ao_sample_count--;
+ if (devpriv->ao_sample_count < 0) {
/* all samples transmitted */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
s->async->events |= COMEDI_CB_EOA;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* no resubmit of the urb */
return;
}
}
+
/* transmit data to the USB bus */
- ((uint8_t *) (urb->transfer_buffer))[0] =
- s->async->cmd.chanlist_len;
+ datap = urb->transfer_buffer;
+ len = s->async->cmd.chanlist_len;
+ *datap++ = len;
for (i = 0; i < s->async->cmd.chanlist_len; i++) {
- short temp;
- if (i >= NUMOUTCHANNELS)
- break;
+ unsigned int chan = devpriv->ao_chanlist[i];
+ short val;
- /* pointer to the DA */
- datap =
- (&(((int8_t *) urb->transfer_buffer)[i * 3 + 1]));
- /* get the data from comedi */
- ret = comedi_buf_get(s->async, &temp);
- datap[0] = temp;
- datap[1] = temp >> 8;
- datap[2] = this_usbduxsub->dac_commands[i];
- /* printk("data[0]=%x, data[1]=%x, data[2]=%x\n", */
- /* datap[0],datap[1],datap[2]); */
+ ret = comedi_buf_get(s->async, &val);
if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi: buffer underflow\n");
- s->async->events |= COMEDI_CB_EOA;
- s->async->events |= COMEDI_CB_OVERFLOW;
+ dev_err(dev->class_dev, "buffer underflow\n");
+ s->async->events |= (COMEDI_CB_EOA |
+ COMEDI_CB_OVERFLOW);
}
- /* transmit data to comedi */
+ /* pointer to the DA */
+ *datap++ = val & 0xff;
+ *datap++ = (val >> 8) & 0xff;
+ *datap++ = chan;
+ devpriv->ao_readback[chan] = val;
+
s->async->events |= COMEDI_CB_BLOCK;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
}
}
urb->transfer_buffer_length = SIZEOUTBUF;
- urb->dev = this_usbduxsub->usbdev;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->ao_cmd_running) {
- if (this_usbduxsub->high_speed) {
- /* uframes */
- urb->interval = 8;
- } else {
- /* frames */
- urb->interval = 1;
- }
+ if (devpriv->ao_cmd_running) {
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
urb->number_of_packets = 1;
urb->iso_frame_desc[0].offset = 0;
urb->iso_frame_desc[0].length = SIZEOUTBUF;
urb->iso_frame_desc[0].status = 0;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- dev_err(&urb->dev->dev,
- "comedi_: ao urb resubm failed in int-cont. "
- "ret=%d", ret);
+ dev_err(dev->class_dev,
+ "ao urb resubm failed in int-cont. ret=%d",
+ ret);
if (ret == EL2NSYNC)
- dev_err(&urb->dev->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handling!\n");
s->async->events |= COMEDI_CB_EOA;
s->async->events |= COMEDI_CB_ERROR;
- comedi_event(this_usbduxsub->comedidev, s);
+ comedi_event(dev, s);
/* don't do an unlink here */
- usbdux_ao_stop(this_usbduxsub, 0);
+ usbdux_ao_stop(dev, 0);
}
}
}
-#define FIRMWARE_MAX_LEN 0x2000
-
-static int usbdux_firmware_upload(struct comedi_device *dev,
- const u8 *data, size_t size,
- unsigned long context)
+static int usbdux_submit_urbs(struct comedi_device *dev,
+ struct urb **urbs, int num_urbs,
+ int input_urb)
{
- struct usbduxsub *usbduxsub = dev->private;
- struct usb_device *usb = usbduxsub->usbdev;
- uint8_t *buf;
- uint8_t *tmp;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
int ret;
-
- if (!data)
- return 0;
-
- if (size > FIRMWARE_MAX_LEN) {
- dev_err(&usbduxsub->interface->dev,
- "usbdux firmware binary it too large for FX2.\n");
- return -ENOMEM;
- }
-
- /* we generate a local buffer for the firmware */
- buf = kmemdup(data, size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* we need a malloc'ed buffer for usb_control_msg() */
- tmp = kmalloc(1, GFP_KERNEL);
- if (!tmp) {
- kfree(buf);
- return -ENOMEM;
- }
-
- /* stop the current firmware on the device */
- *tmp = 1; /* 7f92 to one */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXSUB_CPUCS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not stop firmware\n");
- goto done;
- }
-
- /* upload the new firmware to the device */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- 0, 0x0000,
- buf, size,
- BULK_TIMEOUT);
- if (ret < 0) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: firmware upload failed\n");
- goto done;
- }
-
- /* start the new firmware on the device */
- *tmp = 0; /* 7f92 to zero */
- ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
- USBDUXSUB_FIRMWARE,
- VENDOR_DIR_OUT,
- USBDUXSUB_CPUCS, 0x0000,
- tmp, 1,
- BULK_TIMEOUT);
- if (ret < 0)
- dev_err(&usbduxsub->interface->dev,
- "comedi_: can not start firmware\n");
-
-done:
- kfree(tmp);
- kfree(buf);
- return ret;
-}
-
-static int usbduxsub_submit_inurbs(struct usbduxsub *usbduxsub)
-{
- int i, err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
+ int i;
/* Submit all URBs and start the transfer on the bus */
- for (i = 0; i < usbduxsub->num_in_buffers; i++) {
- /* in case of a resubmission after an unlink... */
- usbduxsub->urb_in[i]->interval = usbduxsub->ai_interval;
- usbduxsub->urb_in[i]->context = usbduxsub->comedidev;
- usbduxsub->urb_in[i]->dev = usbduxsub->usbdev;
- usbduxsub->urb_in[i]->status = 0;
- usbduxsub->urb_in[i]->transfer_flags = URB_ISO_ASAP;
- dev_dbg(&usbduxsub->interface->dev,
- "comedi%d: submitting in-urb[%d]: %p,%p intv=%d\n",
- usbduxsub->comedidev->minor, i,
- (usbduxsub->urb_in[i]->context),
- (usbduxsub->urb_in[i]->dev),
- (usbduxsub->urb_in[i]->interval));
- err_flag = usb_submit_urb(usbduxsub->urb_in[i], GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ai: usb_submit_urb(%d) error %d\n",
- i, err_flag);
- return err_flag;
- }
- }
- return 0;
-}
+ for (i = 0; i < num_urbs; i++) {
+ urb = urbs[i];
-static int usbduxsub_submit_outurbs(struct usbduxsub *usbduxsub)
-{
- int i, err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
-
- for (i = 0; i < usbduxsub->num_out_buffers; i++) {
- dev_dbg(&usbduxsub->interface->dev,
- "comedi_: submitting out-urb[%d]\n", i);
/* in case of a resubmission after an unlink... */
- usbduxsub->urb_out[i]->context = usbduxsub->comedidev;
- usbduxsub->urb_out[i]->dev = usbduxsub->usbdev;
- usbduxsub->urb_out[i]->status = 0;
- usbduxsub->urb_out[i]->transfer_flags = URB_ISO_ASAP;
- err_flag = usb_submit_urb(usbduxsub->urb_out[i], GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: ao: usb_submit_urb(%d) error %d\n",
- i, err_flag);
- return err_flag;
- }
+ if (input_urb)
+ urb->interval = devpriv->ai_interval;
+ urb->context = dev;
+ urb->dev = usb;
+ urb->status = 0;
+ urb->transfer_flags = URB_ISO_ASAP;
+
+ ret = usb_submit_urb(urb, GFP_ATOMIC);
+ if (ret)
+ return ret;
}
return 0;
}
@@ -860,13 +563,10 @@ static int usbduxsub_submit_outurbs(struct usbduxsub *usbduxsub)
static int usbdux_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *this_usbduxsub = dev->private;
int err = 0, i;
unsigned int tmp_timer;
- if (!(this_usbduxsub->probed))
- return -ENODEV;
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
@@ -956,221 +656,143 @@ static int8_t create_adc_command(unsigned int chan, int range)
return (chan << 4) | ((p == 1) << 2) | ((r == 1) << 3);
}
-/* bulk transfers to usbdux */
+static int send_dux_commands(struct comedi_device *dev, int cmd_type)
+{
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ int nsent;
-#define SENDADCOMMANDS 0
-#define SENDDACOMMANDS 1
-#define SENDDIOCONFIGCOMMAND 2
-#define SENDDIOBITSCOMMAND 3
-#define SENDSINGLEAD 4
-#define READCOUNTERCOMMAND 5
-#define WRITECOUNTERCOMMAND 6
-#define SENDPWMON 7
-#define SENDPWMOFF 8
+ devpriv->dux_commands[0] = cmd_type;
-static int send_dux_commands(struct usbduxsub *this_usbduxsub, int cmd_type)
-{
- int result, nsent;
-
- this_usbduxsub->dux_commands[0] = cmd_type;
-#ifdef NOISY_DUX_DEBUGBUG
- printk(KERN_DEBUG "comedi%d: usbdux: dux_commands: ",
- this_usbduxsub->comedidev->minor);
- for (result = 0; result < SIZEOFDUXBUFFER; result++)
- printk(" %02x", this_usbduxsub->dux_commands[result]);
- printk("\n");
-#endif
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_sndbulkpipe(this_usbduxsub->usbdev,
- COMMAND_OUT_EP),
- this_usbduxsub->dux_commands, SIZEOFDUXBUFFER,
- &nsent, BULK_TIMEOUT);
- if (result < 0)
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "could not transmit dux_command to the usb-device, "
- "err=%d\n", this_usbduxsub->comedidev->minor, result);
-
- return result;
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
+ devpriv->dux_commands, SIZEOFDUXBUFFER,
+ &nsent, BULK_TIMEOUT);
}
-static int receive_dux_commands(struct usbduxsub *this_usbduxsub, int command)
+static int receive_dux_commands(struct comedi_device *dev, int command)
{
- int result = (-EFAULT);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ int ret;
int nrec;
int i;
for (i = 0; i < RETRIES; i++) {
- result = usb_bulk_msg(this_usbduxsub->usbdev,
- usb_rcvbulkpipe(this_usbduxsub->usbdev,
- COMMAND_IN_EP),
- this_usbduxsub->insn_buffer, SIZEINSNBUF,
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
+ devpriv->insn_buf, SIZEINSNBUF,
&nrec, BULK_TIMEOUT);
- if (result < 0) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "insn: USB error %d while receiving DUX command"
- "\n", this_usbduxsub->comedidev->minor, result);
- return result;
- }
- if (le16_to_cpu(this_usbduxsub->insn_buffer[0]) == command)
- return result;
+ if (ret < 0)
+ return ret;
+ if (le16_to_cpu(devpriv->insn_buf[0]) == command)
+ return ret;
}
- /* this is only reached if the data has been requested a couple of
- * times */
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: insn: "
- "wrong data returned from firmware: want cmd %d, got cmd %d.\n",
- this_usbduxsub->comedidev->minor, command,
- le16_to_cpu(this_usbduxsub->insn_buffer[0]));
+ /* command not received */
return -EFAULT;
}
static int usbdux_ai_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
- int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = -EINVAL;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig\n", dev->minor);
-
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: invalid trignum\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
- if (!(this_usbduxsub->ai_cmd_running)) {
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_inurbs(this_usbduxsub);
+ down(&devpriv->sem);
+
+ if (trignum != 0)
+ goto ai_trig_exit;
+
+ if (!devpriv->ai_cmd_running) {
+ devpriv->ai_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_inttrig: "
- "urbSubmit: err=%d\n", dev->minor, ret);
- this_usbduxsub->ai_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ devpriv->ai_cmd_running = 0;
+ goto ai_trig_exit;
}
s->async->inttrig = NULL;
} else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_inttrig but acqu is already running\n",
- dev->minor);
+ ret = -EBUSY;
}
- up(&this_usbduxsub->sem);
- return 1;
+
+ai_trig_exit:
+ up(&devpriv->sem);
+ return ret;
}
static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct usbdux_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan, range;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
- int result;
-
- if (!this_usbduxsub)
- return -EFAULT;
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ai_cmd\n", dev->minor);
+ int len = cmd->chanlist_len;
+ int ret = -EBUSY;
+ int i;
/* block other CPUs from starting an ai_cmd */
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
+
+ if (devpriv->ai_cmd_running)
+ goto ai_cmd_exit;
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: "
- "ai_cmd not possible. Another ai_cmd is running.\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EBUSY;
- }
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
- this_usbduxsub->dux_commands[1] = cmd->chanlist_len;
- for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- range = CR_RANGE(cmd->chanlist[i]);
- if (i >= NUMCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: channel list too long\n",
- dev->minor);
- break;
- }
- this_usbduxsub->dux_commands[i + 2] =
- create_adc_command(chan, range);
- }
-
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending commands to the usb device: size=%u\n",
- dev->minor, NUMCHANNELS);
+ devpriv->dux_commands[1] = len;
+ for (i = 0; i < len; ++i) {
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+ unsigned int range = CR_RANGE(cmd->chanlist[i]);
- result = send_dux_commands(this_usbduxsub, SENDADCOMMANDS);
- if (result < 0) {
- up(&this_usbduxsub->sem);
- return result;
+ devpriv->dux_commands[i + 2] = create_adc_command(chan, range);
}
- if (this_usbduxsub->high_speed) {
+ ret = send_dux_commands(dev, USBDUX_CMD_MULT_AI);
+ if (ret < 0)
+ goto ai_cmd_exit;
+
+ if (devpriv->high_speed) {
/*
* every channel gets a time window of 125us. Thus, if we
* sample all 8 channels we need 1ms. If we sample only one
* channel we need only 125us
*/
- this_usbduxsub->ai_interval = 1;
+ devpriv->ai_interval = 1;
/* find a power of 2 for the interval */
- while ((this_usbduxsub->ai_interval) < (cmd->chanlist_len)) {
- this_usbduxsub->ai_interval =
- (this_usbduxsub->ai_interval) * 2;
- }
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / (125000 *
- (this_usbduxsub->
- ai_interval));
+ while (devpriv->ai_interval < len)
+ devpriv->ai_interval *= 2;
+
+ devpriv->ai_timer = cmd->scan_begin_arg /
+ (125000 * devpriv->ai_interval);
} else {
/* interval always 1ms */
- this_usbduxsub->ai_interval = 1;
- this_usbduxsub->ai_timer = cmd->scan_begin_arg / 1000000;
+ devpriv->ai_interval = 1;
+ devpriv->ai_timer = cmd->scan_begin_arg / 1000000;
}
- if (this_usbduxsub->ai_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev, "comedi%d: ai_cmd: "
- "timer=%d, scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n", dev->minor,
- this_usbduxsub->ai_timer, cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
+ if (devpriv->ai_timer < 1) {
+ ret = -EINVAL;
+ goto ai_cmd_exit;
}
- this_usbduxsub->ai_counter = this_usbduxsub->ai_timer;
+
+ devpriv->ai_counter = devpriv->ai_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* data arrives as one packet */
- this_usbduxsub->ai_sample_count = cmd->stop_arg;
- this_usbduxsub->ai_continous = 0;
+ devpriv->ai_sample_count = cmd->stop_arg;
+ devpriv->ai_continous = 0;
} else {
/* continous acquisition */
- this_usbduxsub->ai_continous = 1;
- this_usbduxsub->ai_sample_count = 0;
+ devpriv->ai_continous = 1;
+ devpriv->ai_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ai_cmd_running = 1;
- ret = usbduxsub_submit_inurbs(this_usbduxsub);
+ devpriv->ai_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ai_urbs,
+ devpriv->n_ai_urbs, 1);
if (ret < 0) {
- this_usbduxsub->ai_cmd_running = 0;
+ devpriv->ai_cmd_running = 0;
/* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
- return ret;
+ goto ai_cmd_exit;
}
s->async->inttrig = NULL;
} else {
@@ -1179,202 +801,156 @@ static int usbdux_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* wait for an internal signal */
s->async->inttrig = usbdux_ai_inttrig;
}
- up(&this_usbduxsub->sem);
- return 0;
+
+ai_cmd_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
/* Mode 0 is used to get a single conversion on demand */
static int usbdux_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int range = CR_RANGE(insn->chanspec);
+ unsigned int val;
+ int ret = -EBUSY;
int i;
- unsigned int one = 0;
- int chan, range;
- int err;
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return 0;
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read, insn->n=%d, insn->subdev=%d\n",
- dev->minor, insn->n, insn->subdev);
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ai_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ai_insn_read not possible. "
- "Async Command is running.\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
- }
+ if (devpriv->ai_cmd_running)
+ goto ai_read_exit;
- /* sample one channel */
- chan = CR_CHAN(insn->chanspec);
- range = CR_RANGE(insn->chanspec);
/* set command for the first channel */
- this_usbduxsub->dux_commands[1] = create_adc_command(chan, range);
+ devpriv->dux_commands[1] = create_adc_command(chan, range);
/* adc commands */
- err = send_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ ret = send_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
+ if (ret < 0)
+ goto ai_read_exit;
for (i = 0; i < insn->n; i++) {
- err = receive_dux_commands(this_usbduxsub, SENDSINGLEAD);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return 0;
- }
- one = le16_to_cpu(this_usbduxsub->insn_buffer[1]);
- if (CR_RANGE(insn->chanspec) <= 1)
- one = one ^ 0x800;
+ ret = receive_dux_commands(dev, USBDUX_CMD_SINGLE_AI);
+ if (ret < 0)
+ goto ai_read_exit;
+
+ val = le16_to_cpu(devpriv->insn_buf[1]);
- data[i] = one;
+ /* bipolar data is two's-complement */
+ if (comedi_range_is_bipolar(s, range))
+ val ^= ((s->maxdata + 1) >> 1);
+
+ data[i] = val;
}
- up(&this_usbduxsub->sem);
- return i;
-}
-/************************************/
-/* analog out */
+ai_read_exit:
+ up(&devpriv->sem);
+
+ return ret ? ret : insn->n;
+}
static int usbdux_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
int i;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
-
- if (!this_usbduxsub)
- return -EFAULT;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ down(&devpriv->sem);
for (i = 0; i < insn->n; i++)
- data[i] = this_usbduxsub->out_buffer[chan];
+ data[i] = devpriv->ao_readback[chan];
+ up(&devpriv->sem);
- up(&this_usbduxsub->sem);
- return i;
+ return insn->n;
}
static int usbdux_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int i, err;
- int chan = CR_CHAN(insn->chanspec);
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int val = devpriv->ao_readback[chan];
+ int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ int ret = -EBUSY;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write\n", dev->minor);
+ if (devpriv->ao_cmd_running)
+ goto ao_write_exit;
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (this_usbduxsub->ao_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: "
- "ERROR: asynchronous ao_cmd is running\n", dev->minor);
- up(&this_usbduxsub->sem);
- return 0;
- }
+ /* number of channels: 1 */
+ devpriv->dux_commands[1] = 1;
+ /* channel number */
+ devpriv->dux_commands[4] = chan << 6;
for (i = 0; i < insn->n; i++) {
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_insn_write: data[chan=%d,i=%d]=%d\n",
- dev->minor, chan, i, data[i]);
+ val = data[i];
- /* number of channels: 1 */
- this_usbduxsub->dux_commands[1] = 1;
/* one 16 bit value */
- *((int16_t *) (this_usbduxsub->dux_commands + 2)) =
- cpu_to_le16(data[i]);
- this_usbduxsub->out_buffer[chan] = data[i];
- /* channel number */
- this_usbduxsub->dux_commands[4] = (chan << 6);
- err = send_dux_commands(this_usbduxsub, SENDDACOMMANDS);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ *p = cpu_to_le16(val);
+
+ ret = send_dux_commands(dev, USBDUX_CMD_AO);
+ if (ret < 0)
+ goto ao_write_exit;
}
- up(&this_usbduxsub->sem);
+ devpriv->ao_readback[chan] = val;
+
+ao_write_exit:
+ up(&devpriv->sem);
- return i;
+ return ret ? ret : insn->n;
}
static int usbdux_ao_inttrig(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int trignum)
+ struct comedi_subdevice *s,
+ unsigned int trignum)
{
- int ret;
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = -EINVAL;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- if (trignum != 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: invalid trignum\n",
- dev->minor);
- up(&this_usbduxsub->sem);
- return -EINVAL;
- }
- if (!(this_usbduxsub->ao_cmd_running)) {
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_outurbs(this_usbduxsub);
+ if (trignum != 0)
+ goto ao_trig_exit;
+
+ if (!devpriv->ao_cmd_running) {
+ devpriv->ao_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux_ao_inttrig: submitURB: "
- "err=%d\n", dev->minor, ret);
- this_usbduxsub->ao_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ devpriv->ao_cmd_running = 0;
+ goto ao_trig_exit;
}
s->async->inttrig = NULL;
} else {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: ao_inttrig but acqu is already running.\n",
- dev->minor);
+ ret = -EBUSY;
}
- up(&this_usbduxsub->sem);
- return 1;
+
+ao_trig_exit:
+ up(&devpriv->sem);
+ return ret;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *this_usbduxsub = dev->private;
int err = 0;
unsigned int flags;
if (!this_usbduxsub)
return -EFAULT;
- if (!(this_usbduxsub->probed))
- return -ENODEV;
-
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT);
@@ -1451,99 +1027,72 @@ static int usbdux_ao_cmdtest(struct comedi_device *dev,
static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
+ struct usbdux_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
- unsigned int chan, gain;
- int i, ret;
- struct usbduxsub *this_usbduxsub = dev->private;
+ int ret = -EBUSY;
+ int i;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s\n", dev->minor, __func__);
+ if (devpriv->ao_cmd_running)
+ goto ao_cmd_exit;
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
+
for (i = 0; i < cmd->chanlist_len; ++i) {
- chan = CR_CHAN(cmd->chanlist[i]);
- gain = CR_RANGE(cmd->chanlist[i]);
- if (i >= NUMOUTCHANNELS) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: %s: channel list too long\n",
- dev->minor, __func__);
- break;
- }
- this_usbduxsub->dac_commands[i] = (chan << 6);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: dac command for ch %d is %x\n",
- dev->minor, i, this_usbduxsub->dac_commands[i]);
+ unsigned int chan = CR_CHAN(cmd->chanlist[i]);
+
+ devpriv->ao_chanlist[i] = chan << 6;
}
/* we count in steps of 1ms (125us) */
/* 125us mode not used yet */
- if (0) { /* (this_usbduxsub->high_speed) */
+ if (0) { /* (devpriv->high_speed) */
/* 125us */
/* timing of the conversion itself: every 125 us */
- this_usbduxsub->ao_timer = cmd->convert_arg / 125000;
+ devpriv->ao_timer = cmd->convert_arg / 125000;
} else {
/* 1ms */
/* timing of the scan: we get all channels at once */
- this_usbduxsub->ao_timer = cmd->scan_begin_arg / 1000000;
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: scan_begin_src=%d, scan_begin_arg=%d, "
- "convert_src=%d, convert_arg=%d\n", dev->minor,
- cmd->scan_begin_src, cmd->scan_begin_arg,
- cmd->convert_src, cmd->convert_arg);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: ao_timer=%d (ms)\n",
- dev->minor, this_usbduxsub->ao_timer);
- if (this_usbduxsub->ao_timer < 1) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: usbdux: ao_timer=%d, "
- "scan_begin_arg=%d. "
- "Not properly tested by cmdtest?\n",
- dev->minor, this_usbduxsub->ao_timer,
- cmd->scan_begin_arg);
- up(&this_usbduxsub->sem);
- return -EINVAL;
+ devpriv->ao_timer = cmd->scan_begin_arg / 1000000;
+ if (devpriv->ao_timer < 1) {
+ ret = -EINVAL;
+ goto ao_cmd_exit;
}
}
- this_usbduxsub->ao_counter = this_usbduxsub->ao_timer;
+
+ devpriv->ao_counter = devpriv->ao_timer;
if (cmd->stop_src == TRIG_COUNT) {
/* not continuous */
/* counter */
/* high speed also scans everything at once */
- if (0) { /* (this_usbduxsub->high_speed) */
- this_usbduxsub->ao_sample_count =
- (cmd->stop_arg) * (cmd->scan_end_arg);
+ if (0) { /* (devpriv->high_speed) */
+ devpriv->ao_sample_count = cmd->stop_arg *
+ cmd->scan_end_arg;
} else {
/* there's no scan as the scan has been */
/* perf inside the FX2 */
/* data arrives as one packet */
- this_usbduxsub->ao_sample_count = cmd->stop_arg;
+ devpriv->ao_sample_count = cmd->stop_arg;
}
- this_usbduxsub->ao_continous = 0;
+ devpriv->ao_continous = 0;
} else {
/* continous acquisition */
- this_usbduxsub->ao_continous = 1;
- this_usbduxsub->ao_sample_count = 0;
+ devpriv->ao_continous = 1;
+ devpriv->ao_sample_count = 0;
}
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
- this_usbduxsub->ao_cmd_running = 1;
- ret = usbduxsub_submit_outurbs(this_usbduxsub);
+ devpriv->ao_cmd_running = 1;
+ ret = usbdux_submit_urbs(dev, devpriv->ao_urbs,
+ devpriv->n_ao_urbs, 0);
if (ret < 0) {
- this_usbduxsub->ao_cmd_running = 0;
+ devpriv->ao_cmd_running = 0;
/* fixme: unlink here?? */
- up(&this_usbduxsub->sem);
- return ret;
+ goto ao_cmd_exit;
}
s->async->inttrig = NULL;
} else {
@@ -1553,149 +1102,123 @@ static int usbdux_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
s->async->inttrig = usbdux_ao_inttrig;
}
- up(&this_usbduxsub->sem);
- return 0;
+ao_cmd_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
static int usbdux_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- int chan = CR_CHAN(insn->chanspec);
+ int ret;
- /* The input or output configuration of each digital line is
- * configured by a special insn_config instruction. chanspec
- * contains the channel to be changed, and data[0] contains the
- * value COMEDI_INPUT or COMEDI_OUTPUT. */
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= 1 << chan; /* 1 means Out */
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~(1 << chan);
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] =
- (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- break;
- }
- /* we don't tell the firmware here as it would take 8 frames */
- /* to submit the information. We do it in the insn_bits. */
+ /*
+ * We don't tell the firmware here as it would take 8 frames
+ * to submit the information. We do it in the insn_bits.
+ */
return insn->n;
}
static int usbdux_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int err;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int mask = data[0];
+ unsigned int bits = data[1];
+ int ret;
- if (!this_usbduxsub)
- return -EFAULT;
+ down(&devpriv->sem);
- down(&this_usbduxsub->sem);
+ s->state &= ~mask;
+ s->state |= (bits & mask);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ devpriv->dux_commands[1] = s->io_bits;
+ devpriv->dux_commands[2] = s->state;
- /* The insn data is a mask in data[0] and the new data
- * in data[1], each channel cooresponding to a bit. */
- s->state &= ~data[0];
- s->state |= data[0] & data[1];
- this_usbduxsub->dux_commands[1] = s->io_bits;
- this_usbduxsub->dux_commands[2] = s->state;
-
- /* This command also tells the firmware to return */
- /* the digital input lines */
- err = send_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
- err = receive_dux_commands(this_usbduxsub, SENDDIOBITSCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+ /*
+ * This command also tells the firmware to return
+ * the digital input lines.
+ */
+ ret = send_dux_commands(dev, USBDUX_CMD_DIO_BITS);
+ if (ret < 0)
+ goto dio_exit;
+ ret = receive_dux_commands(dev, USBDUX_CMD_DIO_BITS);
+ if (ret < 0)
+ goto dio_exit;
- data[1] = le16_to_cpu(this_usbduxsub->insn_buffer[1]);
- up(&this_usbduxsub->sem);
- return insn->n;
+ data[1] = le16_to_cpu(devpriv->insn_buf[1]);
+
+dio_exit:
+ up(&devpriv->sem);
+
+ return ret ? ret : insn->n;
}
-/* reads the 4 counters, only two are used just now */
static int usbdux_counter_read(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int chan = insn->chanspec;
- int err;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int ret = 0;
+ int i;
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ for (i = 0; i < insn->n; i++) {
+ ret = send_dux_commands(dev, USBDUX_CMD_TIMER_RD);
+ if (ret < 0)
+ goto counter_read_exit;
+ ret = receive_dux_commands(dev, USBDUX_CMD_TIMER_RD);
+ if (ret < 0)
+ goto counter_read_exit;
- err = send_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
+ data[i] = le16_to_cpu(devpriv->insn_buf[chan + 1]);
}
- err = receive_dux_commands(this_usbduxsub, READCOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
- }
+counter_read_exit:
+ up(&devpriv->sem);
- data[0] = le16_to_cpu(this_usbduxsub->insn_buffer[chan + 1]);
- up(&this_usbduxsub->sem);
- return 1;
+ return ret ? ret : insn->n;
}
static int usbdux_counter_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int err;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ int16_t *p = (int16_t *)&devpriv->dux_commands[2];
+ int ret = 0;
+ int i;
- down(&this_usbduxsub->sem);
+ down(&devpriv->sem);
- if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
- }
+ devpriv->dux_commands[1] = chan;
- this_usbduxsub->dux_commands[1] = insn->chanspec;
- *((int16_t *) (this_usbduxsub->dux_commands + 2)) = cpu_to_le16(*data);
+ for (i = 0; i < insn->n; i++) {
+ *p = cpu_to_le16(data[i]);
- err = send_dux_commands(this_usbduxsub, WRITECOUNTERCOMMAND);
- if (err < 0) {
- up(&this_usbduxsub->sem);
- return err;
+ ret = send_dux_commands(dev, USBDUX_CMD_TIMER_WR);
+ if (ret < 0)
+ break;
}
- up(&this_usbduxsub->sem);
+ up(&devpriv->sem);
- return 1;
+ return ret ? ret : insn->n;
}
static int usbdux_counter_config(struct comedi_device *dev,
@@ -1706,73 +1229,43 @@ static int usbdux_counter_config(struct comedi_device *dev,
return 2;
}
-/***********************************/
-/* PWM */
-
-static int usbduxsub_unlink_pwm_urbs(struct usbduxsub *usbduxsub_tmp)
+static void usbduxsub_unlink_pwm_urbs(struct comedi_device *dev)
{
- int err = 0;
+ struct usbdux_private *devpriv = dev->private;
- if (usbduxsub_tmp && usbduxsub_tmp->urb_pwm) {
- if (usbduxsub_tmp->urb_pwm)
- usb_kill_urb(usbduxsub_tmp->urb_pwm);
- dev_dbg(&usbduxsub_tmp->interface->dev,
- "comedi: unlinked PwmURB: res=%d\n", err);
- }
- return err;
+ usb_kill_urb(devpriv->pwm_urb);
}
-/* This cancels a running acquisition operation
- * in any context.
- */
-static int usbdux_pwm_stop(struct usbduxsub *this_usbduxsub, int do_unlink)
+static void usbdux_pwm_stop(struct comedi_device *dev, int do_unlink)
{
- int ret = 0;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi: %s\n", __func__);
if (do_unlink)
- ret = usbduxsub_unlink_pwm_urbs(this_usbduxsub);
-
- this_usbduxsub->pwm_cmd_running = 0;
+ usbduxsub_unlink_pwm_urbs(dev);
- return ret;
+ devpriv->pwm_cmd_running = 0;
}
-/* force unlink - is called by comedi */
static int usbdux_pwm_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int res = 0;
+ struct usbdux_private *devpriv = dev->private;
+ int ret;
+ down(&devpriv->sem);
/* unlink only if it is really running */
- res = usbdux_pwm_stop(this_usbduxsub, this_usbduxsub->pwm_cmd_running);
+ usbdux_pwm_stop(dev, devpriv->pwm_cmd_running);
+ ret = send_dux_commands(dev, USBDUX_CMD_PWM_OFF);
+ up(&devpriv->sem);
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi %d: sending pwm off command to the usb device.\n",
- dev->minor);
-
- return send_dux_commands(this_usbduxsub, SENDPWMOFF);
+ return ret;
}
static void usbduxsub_pwm_irq(struct urb *urb)
{
+ struct comedi_device *dev = urb->context;
+ struct usbdux_private *devpriv = dev->private;
int ret;
- struct usbduxsub *this_usbduxsub;
- struct comedi_device *this_comedidev;
- struct comedi_subdevice *s;
-
- /* printk(KERN_DEBUG "PWM: IRQ\n"); */
-
- /* the context variable points to the subdevice */
- this_comedidev = urb->context;
- /* the private structure of the subdevice is struct usbduxsub */
- this_usbduxsub = this_comedidev->private;
-
- s = &this_comedidev->subdevices[SUBDEV_DA];
switch (urb->status) {
case 0:
@@ -1787,220 +1280,171 @@ static void usbduxsub_pwm_irq(struct urb *urb)
* after an unlink command, unplug, ... etc
* no unlink needed here. Already shutting down.
*/
- if (this_usbduxsub->pwm_cmd_running)
- usbdux_pwm_stop(this_usbduxsub, 0);
+ if (devpriv->pwm_cmd_running)
+ usbdux_pwm_stop(dev, 0);
return;
default:
/* a real error */
- if (this_usbduxsub->pwm_cmd_running) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: Non-zero urb status received in "
- "pwm intr context: %d\n", urb->status);
- usbdux_pwm_stop(this_usbduxsub, 0);
+ if (devpriv->pwm_cmd_running) {
+ dev_err(dev->class_dev,
+ "Non-zero urb status received in pwm intr context: %d\n",
+ urb->status);
+ usbdux_pwm_stop(dev, 0);
}
return;
}
/* are we actually running? */
- if (!(this_usbduxsub->pwm_cmd_running))
+ if (!devpriv->pwm_cmd_running)
return;
- urb->transfer_buffer_length = this_usbduxsub->size_pwm_buf;
- urb->dev = this_usbduxsub->usbdev;
+ urb->transfer_buffer_length = devpriv->pwm_buf_sz;
+ urb->dev = comedi_to_usb_dev(dev);
urb->status = 0;
- if (this_usbduxsub->pwm_cmd_running) {
+ if (devpriv->pwm_cmd_running) {
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret < 0) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi_: pwm urb resubm failed in int-cont. "
- "ret=%d", ret);
+ dev_err(dev->class_dev,
+ "pwm urb resubm failed in int-cont. ret=%d",
+ ret);
if (ret == EL2NSYNC)
- dev_err(&this_usbduxsub->interface->dev,
- "buggy USB host controller or bug in "
- "IRQ handling!\n");
+ dev_err(dev->class_dev,
+ "buggy USB host controller or bug in IRQ handling!\n");
/* don't do an unlink here */
- usbdux_pwm_stop(this_usbduxsub, 0);
+ usbdux_pwm_stop(dev, 0);
}
}
}
-static int usbduxsub_submit_pwm_urbs(struct usbduxsub *usbduxsub)
+static int usbduxsub_submit_pwm_urbs(struct comedi_device *dev)
{
- int err_flag;
-
- if (!usbduxsub)
- return -EFAULT;
-
- dev_dbg(&usbduxsub->interface->dev, "comedi_: submitting pwm-urb\n");
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb = devpriv->pwm_urb;
/* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(usbduxsub->urb_pwm,
- usbduxsub->usbdev,
- usb_sndbulkpipe(usbduxsub->usbdev, PWM_EP),
- usbduxsub->urb_pwm->transfer_buffer,
- usbduxsub->size_pwm_buf, usbduxsub_pwm_irq,
- usbduxsub->comedidev);
-
- err_flag = usb_submit_urb(usbduxsub->urb_pwm, GFP_ATOMIC);
- if (err_flag) {
- dev_err(&usbduxsub->interface->dev,
- "comedi_: usbdux: pwm: usb_submit_urb error %d\n",
- err_flag);
- return err_flag;
- }
- return 0;
+ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
+ urb->transfer_buffer,
+ devpriv->pwm_buf_sz,
+ usbduxsub_pwm_irq,
+ dev);
+
+ return usb_submit_urb(urb, GFP_ATOMIC);
}
static int usbdux_pwm_period(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned int period)
+ struct comedi_subdevice *s,
+ unsigned int period)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
int fx2delay = 255;
if (period < MIN_PWM_PERIOD) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: illegal period setting for pwm.\n",
- dev->minor);
return -EAGAIN;
} else {
- fx2delay = period / ((int)(6 * 512 * (1.0 / 0.033))) - 6;
- if (fx2delay > 255) {
- dev_err(&this_usbduxsub->interface->dev,
- "comedi%d: period %d for pwm is too low.\n",
- dev->minor, period);
+ fx2delay = (period / (6 * 512 * 1000 / 33)) - 6;
+ if (fx2delay > 255)
return -EAGAIN;
- }
}
- this_usbduxsub->pwn_delay = fx2delay;
- this_usbduxsub->pwm_period = period;
- dev_dbg(&this_usbduxsub->interface->dev, "%s: frequ=%d, period=%d\n",
- __func__, period, fx2delay);
+ devpriv->pwm_delay = fx2delay;
+ devpriv->pwm_period = period;
+
return 0;
}
-/* is called from insn so there's no need to do all the sanity checks */
static int usbdux_pwm_start(struct comedi_device *dev,
struct comedi_subdevice *s)
{
- int ret, i;
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ int ret = 0;
- dev_dbg(&this_usbduxsub->interface->dev, "comedi%d: %s\n",
- dev->minor, __func__);
+ down(&devpriv->sem);
- if (this_usbduxsub->pwm_cmd_running) {
- /* already running */
- return 0;
- }
+ if (devpriv->pwm_cmd_running)
+ goto pwm_start_exit;
- this_usbduxsub->dux_commands[1] = ((int8_t) this_usbduxsub->pwn_delay);
- ret = send_dux_commands(this_usbduxsub, SENDPWMON);
+ devpriv->dux_commands[1] = devpriv->pwm_delay;
+ ret = send_dux_commands(dev, USBDUX_CMD_PWM_ON);
if (ret < 0)
- return ret;
+ goto pwm_start_exit;
/* initialise the buffer */
- for (i = 0; i < this_usbduxsub->size_pwm_buf; i++)
- ((char *)(this_usbduxsub->urb_pwm->transfer_buffer))[i] = 0;
+ memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
- this_usbduxsub->pwm_cmd_running = 1;
- ret = usbduxsub_submit_pwm_urbs(this_usbduxsub);
- if (ret < 0) {
- this_usbduxsub->pwm_cmd_running = 0;
- return ret;
- }
- return 0;
+ devpriv->pwm_cmd_running = 1;
+ ret = usbduxsub_submit_pwm_urbs(dev);
+ if (ret < 0)
+ devpriv->pwm_cmd_running = 0;
+
+pwm_start_exit:
+ up(&devpriv->sem);
+
+ return ret;
}
-/* generates the bit pattern for PWM with the optional sign bit */
-static int usbdux_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s, int channel,
- unsigned int value, unsigned int sign)
+static void usbdux_pwm_pattern(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int value,
+ unsigned int sign)
{
- struct usbduxsub *this_usbduxsub = dev->private;
- int i, szbuf;
- char *p_buf;
- char pwm_mask;
- char sgn_mask;
- char c;
-
- if (!this_usbduxsub)
- return -EFAULT;
+ struct usbdux_private *devpriv = dev->private;
+ char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
+ char sgn_mask = (16 << chan); /* DIO bit for the sign */
+ char *buf = (char *)(devpriv->pwm_urb->transfer_buffer);
+ int szbuf = devpriv->pwm_buf_sz;
+ int i;
- /* this is the DIO bit which carries the PWM data */
- pwm_mask = (1 << channel);
- /* this is the DIO bit which carries the optional direction bit */
- sgn_mask = (16 << channel);
- /* this is the buffer which will be filled with the with bit */
- /* pattern for one period */
- szbuf = this_usbduxsub->size_pwm_buf;
- p_buf = (char *)(this_usbduxsub->urb_pwm->transfer_buffer);
for (i = 0; i < szbuf; i++) {
- c = *p_buf;
- /* reset bits */
- c = c & (~pwm_mask);
- /* set the bit as long as the index is lower than the value */
+ char c = *buf;
+
+ c &= ~pwm_mask;
if (i < value)
- c = c | pwm_mask;
- /* set the optional sign bit for a relay */
- if (!sign) {
- /* positive value */
- c = c & (~sgn_mask);
- } else {
- /* negative value */
- c = c | sgn_mask;
- }
- *(p_buf++) = c;
+ c |= pwm_mask;
+ if (!sign)
+ c &= ~sgn_mask;
+ else
+ c |= sgn_mask;
+ *buf++ = c;
}
- return 1;
}
static int usbdux_pwm_write(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
- if (!this_usbduxsub)
- return -EFAULT;
-
- if ((insn->n) != 1) {
- /*
- * doesn't make sense to have more than one value here because
- * it would just overwrite the PWM buffer a couple of times
- */
+ /*
+ * It doesn't make sense to support more than one value here
+ * because it would just overwrite the PWM buffer.
+ */
+ if (insn->n != 1)
return -EINVAL;
- }
/*
- * the sign is set via a special INSN only, this gives us 8 bits for
- * normal operation
- * relay sign 0 by default
+ * The sign is set via a special INSN only, this gives us 8 bits
+ * for normal operation, sign is 0 by default.
*/
- return usbdux_pwm_pattern(dev, s, CR_CHAN(insn->chanspec), data[0], 0);
-}
+ usbdux_pwm_pattern(dev, s, chan, data[0], 0);
-static int usbdux_pwm_read(struct comedi_device *x1,
- struct comedi_subdevice *x2, struct comedi_insn *x3,
- unsigned int *x4)
-{
- /* not needed */
- return -EINVAL;
-};
+ return insn->n;
+}
-/* switches on/off PWM */
static int usbdux_pwm_config(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct usbduxsub *this_usbduxsub = dev->private;
+ struct usbdux_private *devpriv = dev->private;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+
switch (data[0]) {
case INSN_CONFIG_ARM:
- /* switch it on */
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm on\n", dev->minor, __func__);
/*
* if not zero the PWM is limited to a certain time which is
* not supported here
@@ -2009,33 +1453,22 @@ static int usbdux_pwm_config(struct comedi_device *dev,
return -EINVAL;
return usbdux_pwm_start(dev, s);
case INSN_CONFIG_DISARM:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: pwm off\n", dev->minor, __func__);
return usbdux_pwm_cancel(dev, s);
case INSN_CONFIG_GET_PWM_STATUS:
- /*
- * to check if the USB transmission has failed or in case PWM
- * was limited to n cycles to check if it has terminated
- */
- data[1] = this_usbduxsub->pwm_cmd_running;
+ data[1] = devpriv->pwm_cmd_running;
return 0;
case INSN_CONFIG_PWM_SET_PERIOD:
- dev_dbg(&this_usbduxsub->interface->dev,
- "comedi%d: %s: setting period\n", dev->minor, __func__);
return usbdux_pwm_period(dev, s, data[1]);
case INSN_CONFIG_PWM_GET_PERIOD:
- data[1] = this_usbduxsub->pwm_period;
+ data[1] = devpriv->pwm_period;
return 0;
case INSN_CONFIG_PWM_SET_H_BRIDGE:
- /* value in the first byte and the sign in the second for a
- relay */
- return usbdux_pwm_pattern(dev, s,
- /* the channel number */
- CR_CHAN(insn->chanspec),
- /* actual PWM data */
- data[1],
- /* just a sign */
- (data[2] != 0));
+ /*
+ * data[1] = value
+ * data[2] = sign (for a relay)
+ */
+ usbdux_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
+ return 0;
case INSN_CONFIG_PWM_GET_H_BRIDGE:
/* values are not kept in this driver, nothing to return here */
return -EINVAL;
@@ -2043,253 +1476,331 @@ static int usbdux_pwm_config(struct comedi_device *dev,
return -EINVAL;
}
-/* end of PWM */
-/*****************************************************************/
-
-static void tidy_up(struct usbduxsub *usbduxsub_tmp)
+static int usbdux_firmware_upload(struct comedi_device *dev,
+ const u8 *data, size_t size,
+ unsigned long context)
{
- int i;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ uint8_t *buf;
+ uint8_t *tmp;
+ int ret;
- if (!usbduxsub_tmp)
- return;
- dev_dbg(&usbduxsub_tmp->interface->dev, "comedi_: tiding up\n");
+ if (!data)
+ return 0;
- /* shows the usb subsystem that the driver is down */
- if (usbduxsub_tmp->interface)
- usb_set_intfdata(usbduxsub_tmp->interface, NULL);
+ if (size > USBDUX_FIRMWARE_MAX_LEN) {
+ dev_err(dev->class_dev,
+ "usbdux firmware binary it too large for FX2.\n");
+ return -ENOMEM;
+ }
- usbduxsub_tmp->probed = 0;
+ /* we generate a local buffer for the firmware */
+ buf = kmemdup(data, size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
- if (usbduxsub_tmp->urb_in) {
- if (usbduxsub_tmp->ai_cmd_running) {
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_unlink_inurbs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->num_in_buffers; i++) {
- kfree(usbduxsub_tmp->urb_in[i]->transfer_buffer);
- usbduxsub_tmp->urb_in[i]->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urb_in[i]);
- usb_free_urb(usbduxsub_tmp->urb_in[i]);
- usbduxsub_tmp->urb_in[i] = NULL;
- }
- kfree(usbduxsub_tmp->urb_in);
- usbduxsub_tmp->urb_in = NULL;
+ /* we need a malloc'ed buffer for usb_control_msg() */
+ tmp = kmalloc(1, GFP_KERNEL);
+ if (!tmp) {
+ kfree(buf);
+ return -ENOMEM;
}
- if (usbduxsub_tmp->urb_out) {
- if (usbduxsub_tmp->ao_cmd_running) {
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_unlink_outurbs(usbduxsub_tmp);
- }
- for (i = 0; i < usbduxsub_tmp->num_out_buffers; i++) {
- kfree(usbduxsub_tmp->urb_out[i]->transfer_buffer);
- usbduxsub_tmp->urb_out[i]->transfer_buffer = NULL;
- if (usbduxsub_tmp->urb_out[i]) {
- usb_kill_urb(usbduxsub_tmp->urb_out[i]);
- usb_free_urb(usbduxsub_tmp->urb_out[i]);
- usbduxsub_tmp->urb_out[i] = NULL;
- }
- }
- kfree(usbduxsub_tmp->urb_out);
- usbduxsub_tmp->urb_out = NULL;
+
+ /* stop the current firmware on the device */
+ *tmp = 1; /* 7f92 to one */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ USBDUX_CPU_CS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "can not stop firmware\n");
+ goto done;
}
- if (usbduxsub_tmp->urb_pwm) {
- if (usbduxsub_tmp->pwm_cmd_running) {
- usbduxsub_tmp->pwm_cmd_running = 0;
- usbduxsub_unlink_pwm_urbs(usbduxsub_tmp);
- }
- kfree(usbduxsub_tmp->urb_pwm->transfer_buffer);
- usbduxsub_tmp->urb_pwm->transfer_buffer = NULL;
- usb_kill_urb(usbduxsub_tmp->urb_pwm);
- usb_free_urb(usbduxsub_tmp->urb_pwm);
- usbduxsub_tmp->urb_pwm = NULL;
+
+ /* upload the new firmware to the device */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ 0, 0x0000,
+ buf, size,
+ BULK_TIMEOUT);
+ if (ret < 0) {
+ dev_err(dev->class_dev, "firmware upload failed\n");
+ goto done;
}
- kfree(usbduxsub_tmp->in_buffer);
- usbduxsub_tmp->in_buffer = NULL;
- kfree(usbduxsub_tmp->insn_buffer);
- usbduxsub_tmp->insn_buffer = NULL;
- kfree(usbduxsub_tmp->out_buffer);
- usbduxsub_tmp->out_buffer = NULL;
- kfree(usbduxsub_tmp->dac_commands);
- usbduxsub_tmp->dac_commands = NULL;
- kfree(usbduxsub_tmp->dux_commands);
- usbduxsub_tmp->dux_commands = NULL;
- usbduxsub_tmp->ai_cmd_running = 0;
- usbduxsub_tmp->ao_cmd_running = 0;
- usbduxsub_tmp->pwm_cmd_running = 0;
+
+ /* start the new firmware on the device */
+ *tmp = 0; /* 7f92 to zero */
+ ret = usb_control_msg(usb, usb_sndctrlpipe(usb, 0),
+ USBDUX_FIRMWARE_CMD,
+ VENDOR_DIR_OUT,
+ USBDUX_CPU_CS, 0x0000,
+ tmp, 1,
+ BULK_TIMEOUT);
+ if (ret < 0)
+ dev_err(dev->class_dev, "can not start firmware\n");
+
+done:
+ kfree(tmp);
+ kfree(buf);
+ return ret;
}
-static int usbdux_attach_common(struct comedi_device *dev,
- struct usbduxsub *udev)
+static int usbdux_alloc_usb_buffers(struct comedi_device *dev)
{
- int ret;
- struct comedi_subdevice *s = NULL;
- int n_subdevs;
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
+ int i;
- down(&udev->sem);
- /* pointer back to the corresponding comedi device */
- udev->comedidev = dev;
+ devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
+ devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
+ devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
+ devpriv->ai_urbs = kcalloc(devpriv->n_ai_urbs, sizeof(void *),
+ GFP_KERNEL);
+ devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(void *),
+ GFP_KERNEL);
+ if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
+ !devpriv->ai_urbs || !devpriv->ao_urbs)
+ return -ENOMEM;
- /* set number of subdevices */
- if (udev->high_speed) {
- /* with pwm */
- n_subdevs = 5;
- } else {
- /* without pwm */
- n_subdevs = 4;
- }
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
+ /* one frame: 1ms */
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->ai_urbs[i] = urb;
+
+ urb->dev = usb;
+ urb->context = dev;
+ urb->pipe = usb_rcvisocpipe(usb, 6);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret) {
- up(&udev->sem);
- return ret;
+ urb->complete = usbduxsub_ai_isoc_irq;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEINBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEINBUF;
}
- /* private structure is also simply the usb-structure */
- dev->private = udev;
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
+ /* one frame: 1ms */
+ urb = usb_alloc_urb(1, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->ao_urbs[i] = urb;
+
+ urb->dev = usb;
+ urb->context = dev;
+ urb->pipe = usb_sndisocpipe(usb, 2);
+ urb->transfer_flags = URB_ISO_ASAP;
+ urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
- /* the first subdevice is the A/D converter */
- s = &dev->subdevices[SUBDEV_AD];
- /* the URBs get the comedi subdevice */
- /* which is responsible for reading */
- /* this is the subdevice which reads data */
- dev->read_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* analog input */
- s->type = COMEDI_SUBD_AI;
- /* readable and ref is to ground */
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
- /* 8 channels */
- s->n_chan = 8;
- /* length of the channellist */
- s->len_chanlist = 8;
- /* callback functions */
- s->insn_read = usbdux_ai_insn_read;
- s->do_cmdtest = usbdux_ai_cmdtest;
- s->do_cmd = usbdux_ai_cmd;
- s->cancel = usbdux_ai_cancel;
- /* max value from the A/D converter (12bit) */
- s->maxdata = 0xfff;
- /* range table to convert to physical units */
- s->range_table = (&range_usbdux_ai_range);
-
- /* analog out */
- s = &dev->subdevices[SUBDEV_DA];
- /* analog out */
- s->type = COMEDI_SUBD_AO;
- /* backward pointer */
- dev->write_subdev = s;
- /* the subdevice receives as private structure the */
- /* usb-structure */
- s->private = NULL;
- /* are writable */
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- /* 4 channels */
- s->n_chan = 4;
- /* length of the channellist */
- s->len_chanlist = 4;
- /* 12 bit resolution */
- s->maxdata = 0x0fff;
- /* bipolar range */
- s->range_table = (&range_usbdux_ao_range);
- /* callback */
- s->do_cmdtest = usbdux_ao_cmdtest;
- s->do_cmd = usbdux_ao_cmd;
- s->cancel = usbdux_ao_cancel;
- s->insn_read = usbdux_ao_insn_read;
- s->insn_write = usbdux_ao_insn_write;
-
- /* digital I/O */
- s = &dev->subdevices[SUBDEV_DIO];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 8;
- s->maxdata = 1;
- s->range_table = (&range_digital);
- s->insn_bits = usbdux_dio_insn_bits;
- s->insn_config = usbdux_dio_insn_config;
- /* we don't use it */
- s->private = NULL;
-
- /* counter */
- s = &dev->subdevices[SUBDEV_COUNTER];
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 0xFFFF;
- s->insn_read = usbdux_counter_read;
- s->insn_write = usbdux_counter_write;
- s->insn_config = usbdux_counter_config;
-
- if (udev->high_speed) {
- /* timer / pwm */
- s = &dev->subdevices[SUBDEV_PWM];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- /* this defines the max duty cycle resolution */
- s->maxdata = udev->size_pwm_buf;
- s->insn_write = usbdux_pwm_write;
- s->insn_read = usbdux_pwm_read;
- s->insn_config = usbdux_pwm_config;
- usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ urb->complete = usbduxsub_ao_isoc_irq;
+ urb->number_of_packets = 1;
+ urb->transfer_buffer_length = SIZEOUTBUF;
+ urb->iso_frame_desc[0].offset = 0;
+ urb->iso_frame_desc[0].length = SIZEOUTBUF;
+ if (devpriv->high_speed)
+ urb->interval = 8; /* uframes */
+ else
+ urb->interval = 1; /* frames */
}
- /* finally decide that it's attached */
- udev->attached = 1;
- up(&udev->sem);
+ /* pwm */
+ if (devpriv->pwm_buf_sz) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+ devpriv->pwm_urb = urb;
- dev_info(&udev->interface->dev, "comedi%d: attached to usbdux.\n",
- dev->minor);
+ /* max bulk ep size in high speed */
+ urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
+ GFP_KERNEL);
+ if (!urb->transfer_buffer)
+ return -ENOMEM;
+ }
return 0;
}
+static void usbdux_free_usb_buffers(struct comedi_device *dev)
+{
+ struct usbdux_private *devpriv = dev->private;
+ struct urb *urb;
+ int i;
+
+ urb = devpriv->pwm_urb;
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ if (devpriv->ao_urbs) {
+ for (i = 0; i < devpriv->n_ao_urbs; i++) {
+ urb = devpriv->ao_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+ kfree(devpriv->ao_urbs);
+ }
+ if (devpriv->ai_urbs) {
+ for (i = 0; i < devpriv->n_ai_urbs; i++) {
+ urb = devpriv->ai_urbs[i];
+ if (urb) {
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ }
+ kfree(devpriv->ai_urbs);
+ }
+ kfree(devpriv->insn_buf);
+ kfree(devpriv->in_buf);
+ kfree(devpriv->dux_commands);
+}
+
static int usbdux_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
- struct usb_interface *uinterf = comedi_to_usb_interface(dev);
- struct usbduxsub *this_usbduxsub = usb_get_intfdata(uinterf);
- struct usb_device *usb = usbduxsub->usbdev;
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usb_device *usb = comedi_to_usb_dev(dev);
+ struct usbdux_private *devpriv;
+ struct comedi_subdevice *s;
int ret;
- dev->private = this_usbduxsub; /* This is temporary... */
- ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
- usbdux_firmware_upload, 0);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
+ if (!devpriv)
+ return -ENOMEM;
+
+ sema_init(&devpriv->sem, 1);
+
+ usb_set_intfdata(intf, devpriv);
+
+ devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
+ if (devpriv->high_speed) {
+ devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
+ devpriv->pwm_buf_sz = 512;
+ } else {
+ devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
+ devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
+ }
+
+ ret = usbdux_alloc_usb_buffers(dev);
+ if (ret)
+ return ret;
+
+ /* setting to alternate setting 3: enabling iso ep and bulk ep. */
+ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
+ 3);
if (ret < 0) {
- dev->private = NULL;
+ dev_err(dev->class_dev,
+ "could not set alternate setting 3 in high speed\n");
return ret;
}
- dev->private = NULL;
+ ret = comedi_load_firmware(dev, &usb->dev, USBDUX_FIRMWARE,
+ usbdux_firmware_upload, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 5 : 4);
+ if (ret)
+ return ret;
- down(&start_stop_sem);
- if (!this_usbduxsub || !this_usbduxsub->probed) {
- dev_err(dev->class_dev,
- "usbdux: error: auto_attach failed, not connected\n");
- ret = -ENODEV;
- } else if (this_usbduxsub->attached) {
- dev_err(dev->class_dev,
- "error: auto_attach failed, already attached\n");
- ret = -ENODEV;
- } else
- ret = usbdux_attach_common(dev, this_usbduxsub);
- up(&start_stop_sem);
- return ret;
+ /* Analog Input subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
+ s->n_chan = 8;
+ s->maxdata = 0x0fff;
+ s->len_chanlist = 8;
+ s->range_table = &range_usbdux_ai_range;
+ s->insn_read = usbdux_ai_insn_read;
+ s->do_cmdtest = usbdux_ai_cmdtest;
+ s->do_cmd = usbdux_ai_cmd;
+ s->cancel = usbdux_ai_cancel;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ dev->write_subdev = s;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
+ s->n_chan = USBDUX_NUM_AO_CHAN;
+ s->maxdata = 0x0fff;
+ s->len_chanlist = s->n_chan;
+ s->range_table = &range_usbdux_ao_range;
+ s->do_cmdtest = usbdux_ao_cmdtest;
+ s->do_cmd = usbdux_ao_cmd;
+ s->cancel = usbdux_ao_cancel;
+ s->insn_read = usbdux_ao_insn_read;
+ s->insn_write = usbdux_ao_insn_write;
+
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 8;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = usbdux_dio_insn_bits;
+ s->insn_config = usbdux_dio_insn_config;
+
+ /* Counter subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_WRITABLE | SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 0xffff;
+ s->insn_read = usbdux_counter_read;
+ s->insn_write = usbdux_counter_write;
+ s->insn_config = usbdux_counter_config;
+
+ if (devpriv->high_speed) {
+ /* PWM subdevice */
+ s = &dev->subdevices[4];
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
+ s->n_chan = 8;
+ s->maxdata = devpriv->pwm_buf_sz;
+ s->insn_write = usbdux_pwm_write;
+ s->insn_config = usbdux_pwm_config;
+
+ usbdux_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ }
+
+ return 0;
}
static void usbdux_detach(struct comedi_device *dev)
{
- struct usbduxsub *usb = dev->private;
-
- if (usb) {
- down(&usb->sem);
- dev->private = NULL;
- usb->attached = 0;
- usb->comedidev = NULL;
- up(&usb->sem);
- }
+ struct usb_interface *intf = comedi_to_usb_interface(dev);
+ struct usbdux_private *devpriv = dev->private;
+
+ usb_set_intfdata(intf, NULL);
+
+ if (!devpriv)
+ return;
+
+ down(&devpriv->sem);
+
+ /* force unlink all urbs */
+ usbdux_pwm_stop(dev, 1);
+ usbdux_ao_stop(dev, 1);
+ usbdux_ai_stop(dev, 1);
+
+ usbdux_free_usb_buffers(dev);
+
+ up(&devpriv->sem);
}
static struct comedi_driver usbdux_driver = {
@@ -2299,253 +1810,10 @@ static struct comedi_driver usbdux_driver = {
.detach = usbdux_detach,
};
-static int usbdux_usb_probe(struct usb_interface *uinterf,
+static int usbdux_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *udev = interface_to_usbdev(uinterf);
- struct device *dev = &uinterf->dev;
- int i;
- int index;
-
- dev_dbg(dev, "comedi_: usbdux_: "
- "finding a free structure for the usb-device\n");
-
- down(&start_stop_sem);
- /* look for a free place in the usbdux array */
- index = -1;
- for (i = 0; i < NUMUSBDUX; i++) {
- if (!(usbduxsub[i].probed)) {
- index = i;
- break;
- }
- }
-
- /* no more space */
- if (index == -1) {
- dev_err(dev, "Too many usbdux-devices connected.\n");
- up(&start_stop_sem);
- return -EMFILE;
- }
- dev_dbg(dev, "comedi_: usbdux: "
- "usbduxsub[%d] is ready to connect to comedi.\n", index);
-
- sema_init(&(usbduxsub[index].sem), 1);
- /* save a pointer to the usb device */
- usbduxsub[index].usbdev = udev;
-
- /* 2.6: save the interface itself */
- usbduxsub[index].interface = uinterf;
- /* get the interface number from the interface */
- usbduxsub[index].ifnum = uinterf->altsetting->desc.bInterfaceNumber;
- /* hand the private data over to the usb subsystem */
- /* will be needed for disconnect */
- usb_set_intfdata(uinterf, &(usbduxsub[index]));
-
- dev_dbg(dev, "comedi_: usbdux: ifnum=%d\n", usbduxsub[index].ifnum);
-
- /* test if it is high speed (USB 2.0) */
- usbduxsub[index].high_speed =
- (usbduxsub[index].usbdev->speed == USB_SPEED_HIGH);
-
- /* create space for the commands of the DA converter */
- usbduxsub[index].dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
- if (!usbduxsub[index].dac_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the commands going to the usb device */
- usbduxsub[index].dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
- if (!usbduxsub[index].dux_commands) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the in buffer and set it to zero */
- usbduxsub[index].in_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].in_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space of the instruction buffer */
- usbduxsub[index].insn_buffer = kzalloc(SIZEINSNBUF, GFP_KERNEL);
- if (!(usbduxsub[index].insn_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* create space for the outbuffer */
- usbduxsub[index].out_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].out_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- /* setting to alternate setting 3: enabling iso ep and bulk ep. */
- i = usb_set_interface(usbduxsub[index].usbdev,
- usbduxsub[index].ifnum, 3);
- if (i < 0) {
- dev_err(dev, "comedi_: usbdux%d: "
- "could not set alternate setting 3 in high speed.\n",
- index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENODEV;
- }
- if (usbduxsub[index].high_speed)
- usbduxsub[index].num_in_buffers = NUMOFINBUFFERSHIGH;
- else
- usbduxsub[index].num_in_buffers = NUMOFINBUFFERSFULL;
-
- usbduxsub[index].urb_in =
- kcalloc(usbduxsub[index].num_in_buffers, sizeof(struct urb *),
- GFP_KERNEL);
- if (!(usbduxsub[index].urb_in)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].num_in_buffers; i++) {
- /* one frame: 1ms */
- usbduxsub[index].urb_in[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urb_in[i] == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_in[i]->dev = usbduxsub[index].usbdev;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- usbduxsub[index].urb_in[i]->context = NULL;
- usbduxsub[index].urb_in[i]->pipe =
- usb_rcvisocpipe(usbduxsub[index].usbdev, ISOINEP);
- usbduxsub[index].urb_in[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urb_in[i]->transfer_buffer =
- kzalloc(SIZEINBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urb_in[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_in[i]->complete = usbduxsub_ai_isoc_irq;
- usbduxsub[index].urb_in[i]->number_of_packets = 1;
- usbduxsub[index].urb_in[i]->transfer_buffer_length = SIZEINBUF;
- usbduxsub[index].urb_in[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urb_in[i]->iso_frame_desc[0].length = SIZEINBUF;
- }
-
- /* out */
- if (usbduxsub[index].high_speed)
- usbduxsub[index].num_out_buffers = NUMOFOUTBUFFERSHIGH;
- else
- usbduxsub[index].num_out_buffers = NUMOFOUTBUFFERSFULL;
-
- usbduxsub[index].urb_out =
- kcalloc(usbduxsub[index].num_out_buffers, sizeof(struct urb *),
- GFP_KERNEL);
- if (!(usbduxsub[index].urb_out)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- for (i = 0; i < usbduxsub[index].num_out_buffers; i++) {
- /* one frame: 1ms */
- usbduxsub[index].urb_out[i] = usb_alloc_urb(1, GFP_KERNEL);
- if (usbduxsub[index].urb_out[i] == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. urb(%d)\n", index, i);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_out[i]->dev = usbduxsub[index].usbdev;
- /* will be filled later with a pointer to the comedi-device */
- /* and ONLY then the urb should be submitted */
- usbduxsub[index].urb_out[i]->context = NULL;
- usbduxsub[index].urb_out[i]->pipe =
- usb_sndisocpipe(usbduxsub[index].usbdev, ISOOUTEP);
- usbduxsub[index].urb_out[i]->transfer_flags = URB_ISO_ASAP;
- usbduxsub[index].urb_out[i]->transfer_buffer =
- kzalloc(SIZEOUTBUF, GFP_KERNEL);
- if (!(usbduxsub[index].urb_out[i]->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_out[i]->complete = usbduxsub_ao_isoc_irq;
- usbduxsub[index].urb_out[i]->number_of_packets = 1;
- usbduxsub[index].urb_out[i]->transfer_buffer_length = SIZEOUTBUF;
- usbduxsub[index].urb_out[i]->iso_frame_desc[0].offset = 0;
- usbduxsub[index].urb_out[i]->iso_frame_desc[0].length =
- SIZEOUTBUF;
- if (usbduxsub[index].high_speed) {
- /* uframes */
- usbduxsub[index].urb_out[i]->interval = 8;
- } else {
- /* frames */
- usbduxsub[index].urb_out[i]->interval = 1;
- }
- }
-
- /* pwm */
- if (usbduxsub[index].high_speed) {
- /* max bulk ep size in high speed */
- usbduxsub[index].size_pwm_buf = 512;
- usbduxsub[index].urb_pwm = usb_alloc_urb(0, GFP_KERNEL);
- if (usbduxsub[index].urb_pwm == NULL) {
- dev_err(dev, "comedi_: usbdux%d: "
- "Could not alloc. pwm urb\n", index);
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- usbduxsub[index].urb_pwm->transfer_buffer =
- kzalloc(usbduxsub[index].size_pwm_buf, GFP_KERNEL);
- if (!(usbduxsub[index].urb_pwm->transfer_buffer)) {
- tidy_up(&(usbduxsub[index]));
- up(&start_stop_sem);
- return -ENOMEM;
- }
- } else {
- usbduxsub[index].urb_pwm = NULL;
- usbduxsub[index].size_pwm_buf = 0;
- }
-
- usbduxsub[index].ai_cmd_running = 0;
- usbduxsub[index].ao_cmd_running = 0;
- usbduxsub[index].pwm_cmd_running = 0;
-
- /* we've reached the bottom of the function */
- usbduxsub[index].probed = 1;
- up(&start_stop_sem);
-
- return comedi_usb_auto_config(uinterf, &usbdux_driver, 0);
-}
-
-static void usbdux_usb_disconnect(struct usb_interface *intf)
-{
- struct usbduxsub *usbduxsub_tmp = usb_get_intfdata(intf);
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (!usbduxsub_tmp) {
- dev_err(&intf->dev,
- "comedi_: disconnect called with null pointer.\n");
- return;
- }
- if (usbduxsub_tmp->usbdev != udev) {
- dev_err(&intf->dev, "comedi_: BUG! called with wrong ptr!!!\n");
- return;
- }
- comedi_usb_auto_unconfig(intf);
- down(&start_stop_sem);
- down(&usbduxsub_tmp->sem);
- tidy_up(usbduxsub_tmp);
- up(&usbduxsub_tmp->sem);
- up(&start_stop_sem);
- dev_dbg(&intf->dev, "comedi_: disconnected from the usb\n");
+ return comedi_usb_auto_config(intf, &usbdux_driver, 0);
}
static const struct usb_device_id usbdux_usb_table[] = {
@@ -2553,13 +1821,12 @@ static const struct usb_device_id usbdux_usb_table[] = {
{ USB_DEVICE(0x13d8, 0x0002) },
{ }
};
-
MODULE_DEVICE_TABLE(usb, usbdux_usb_table);
static struct usb_driver usbdux_usb_driver = {
.name = "usbdux",
.probe = usbdux_usb_probe,
- .disconnect = usbdux_usb_disconnect,
+ .disconnect = comedi_usb_auto_unconfig,
.id_table = usbdux_usb_table,
};
module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
@@ -2567,4 +1834,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver);
MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com");
MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com");
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE(FIRMWARE);
+MODULE_FIRMWARE(USBDUX_FIRMWARE);
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 27898c44e54..9707dd1239c 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -1061,10 +1061,9 @@ static int usbduxfast_auto_attach(struct comedi_device *dev,
return -ENODEV;
}
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
sema_init(&devpriv->sem, 1);
usb_set_intfdata(intf, devpriv);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 898c3c45040..c47f4087568 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -66,13 +66,6 @@
/* internal addresses of the 8051 processor */
#define USBDUXSUB_CPUCS 0xE600
-/* USB endpoints */
-#define USBDUXSIGMA_CMD_OUT_EP 1 /* command output */
-#define USBDUXSIGMA_ISO_OUT_EP 2 /* analog output ISO/IRQ */
-#define USBDUXSIGMA_PWM_OUT_EP 4 /* pwm output */
-#define USBDUXSIGMA_ISO_IN_EP 6 /* analog input ISO/IRQ */
-#define USBDUXSIGMA_CMD_IN_EP 8 /* command input */
-
/* 300Hz max frequ under PWM */
#define MIN_PWM_PERIOD ((long)(1E9/300))
@@ -168,6 +161,7 @@ struct usbduxsigma_private {
/* input buffer for single insn */
int8_t *insn_buf;
+ int8_t ao_chanlist[USBDUXSIGMA_NUM_AO_CHAN];
unsigned int ao_readback[USBDUXSIGMA_NUM_AO_CHAN];
unsigned high_speed:1;
@@ -188,25 +182,25 @@ struct usbduxsigma_private {
unsigned int ao_counter;
/* interval in frames/uframes */
unsigned int ai_interval;
- /* D/A commands */
- uint8_t *dac_commands;
/* commands */
uint8_t *dux_commands;
struct semaphore sem;
};
+static void usbduxsigma_unlink_urbs(struct urb **urbs, int num_urbs)
+{
+ int i;
+
+ for (i = 0; i < num_urbs; i++)
+ usb_kill_urb(urbs[i]);
+}
+
static void usbduxsigma_ai_stop(struct comedi_device *dev, int do_unlink)
{
struct usbduxsigma_private *devpriv = dev->private;
- if (do_unlink) {
- int i;
-
- for (i = 0; i < devpriv->n_ai_urbs; i++) {
- if (devpriv->ai_urbs[i])
- usb_kill_urb(devpriv->ai_urbs[i]);
- }
- }
+ if (do_unlink && devpriv->ai_urbs)
+ usbduxsigma_unlink_urbs(devpriv->ai_urbs, devpriv->n_ai_urbs);
devpriv->ai_cmd_running = 0;
}
@@ -342,14 +336,8 @@ static void usbduxsigma_ao_stop(struct comedi_device *dev, int do_unlink)
{
struct usbduxsigma_private *devpriv = dev->private;
- if (do_unlink) {
- int i;
-
- for (i = 0; i < devpriv->n_ao_urbs; i++) {
- if (devpriv->ao_urbs[i])
- usb_kill_urb(devpriv->ao_urbs[i]);
- }
- }
+ if (do_unlink && devpriv->ao_urbs)
+ usbduxsigma_unlink_urbs(devpriv->ao_urbs, devpriv->n_ao_urbs);
devpriv->ao_cmd_running = 0;
}
@@ -432,7 +420,7 @@ static void usbduxsigma_ao_urb_complete(struct urb *urb)
len = s->async->cmd.chanlist_len;
*datap++ = len;
for (i = 0; i < len; i++) {
- unsigned int chan = devpriv->dac_commands[i];
+ unsigned int chan = devpriv->ao_chanlist[i];
short val;
ret = comedi_buf_get(s->async, &val);
@@ -643,7 +631,7 @@ static int usbbuxsigma_send_cmd(struct comedi_device *dev, int cmd_type)
devpriv->dux_commands[0] = cmd_type;
- return usb_bulk_msg(usb, usb_sndbulkpipe(usb, USBDUXSIGMA_CMD_OUT_EP),
+ return usb_bulk_msg(usb, usb_sndbulkpipe(usb, 1),
devpriv->dux_commands, SIZEOFDUXBUFFER,
&nsent, BULK_TIMEOUT);
}
@@ -657,8 +645,7 @@ static int usbduxsigma_receive_cmd(struct comedi_device *dev, int command)
int i;
for (i = 0; i < RETRIES; i++) {
- ret = usb_bulk_msg(usb,
- usb_rcvbulkpipe(usb, USBDUXSIGMA_CMD_IN_EP),
+ ret = usb_bulk_msg(usb, usb_rcvbulkpipe(usb, 8),
devpriv->insn_buf, SIZEINSNBUF,
&nrec, BULK_TIMEOUT);
if (ret < 0)
@@ -686,13 +673,14 @@ static int usbduxsigma_ai_inttrig(struct comedi_device *dev,
down(&devpriv->sem);
if (!devpriv->ai_cmd_running) {
+ devpriv->ai_cmd_running = 1;
ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
devpriv->n_ai_urbs, 1);
if (ret < 0) {
+ devpriv->ai_cmd_running = 0;
up(&devpriv->sem);
return ret;
}
- devpriv->ai_cmd_running = 1;
s->async->inttrig = NULL;
}
up(&devpriv->sem);
@@ -740,14 +728,15 @@ static int usbduxsigma_ai_cmd(struct comedi_device *dev,
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
+ devpriv->ai_cmd_running = 1;
ret = usbduxsigma_submit_urbs(dev, devpriv->ai_urbs,
devpriv->n_ai_urbs, 1);
if (ret < 0) {
+ devpriv->ai_cmd_running = 0;
up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
- devpriv->ai_cmd_running = 1;
} else { /* TRIG_INT */
/* wait for an internal signal and submit the urbs later */
s->async->inttrig = usbduxsigma_ai_inttrig;
@@ -876,13 +865,14 @@ static int usbduxsigma_ao_inttrig(struct comedi_device *dev,
down(&devpriv->sem);
if (!devpriv->ao_cmd_running) {
+ devpriv->ao_cmd_running = 1;
ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
devpriv->n_ao_urbs, 0);
if (ret < 0) {
+ devpriv->ao_cmd_running = 0;
up(&devpriv->sem);
return ret;
}
- devpriv->ao_cmd_running = 1;
s->async->inttrig = NULL;
}
up(&devpriv->sem);
@@ -1020,20 +1010,21 @@ static int usbduxsigma_ao_cmd(struct comedi_device *dev,
/* set current channel of the running acquisition to zero */
s->async->cur_chan = 0;
for (i = 0; i < cmd->chanlist_len; ++i)
- devpriv->dac_commands[i] = CR_CHAN(cmd->chanlist[i]);
+ devpriv->ao_chanlist[i] = CR_CHAN(cmd->chanlist[i]);
devpriv->ao_counter = devpriv->ao_timer;
if (cmd->start_src == TRIG_NOW) {
/* enable this acquisition operation */
+ devpriv->ao_cmd_running = 1;
ret = usbduxsigma_submit_urbs(dev, devpriv->ao_urbs,
devpriv->n_ao_urbs, 0);
if (ret < 0) {
+ devpriv->ao_cmd_running = 0;
up(&devpriv->sem);
return ret;
}
s->async->inttrig = NULL;
- devpriv->ao_cmd_running = 1;
} else { /* TRIG_INT */
/* wait for an internal signal and submit the urbs later */
s->async->inttrig = usbduxsigma_ao_inttrig;
@@ -1049,23 +1040,11 @@ static int usbduxsigma_dio_insn_config(struct comedi_device *dev,
struct comedi_insn *insn,
unsigned int *data)
{
- unsigned int chan = CR_CHAN(insn->chanspec);
- unsigned int mask = 1 << chan;
+ int ret;
- switch (data[0]) {
- case INSN_CONFIG_DIO_OUTPUT:
- s->io_bits |= mask;
- break;
- case INSN_CONFIG_DIO_INPUT:
- s->io_bits &= ~mask;
- break;
- case INSN_CONFIG_DIO_QUERY:
- data[1] = (s->io_bits & mask) ? COMEDI_OUTPUT : COMEDI_INPUT;
- break;
- default:
- return -EINVAL;
- break;
- }
+ ret = comedi_dio_insn_config(dev, s, insn, data, 0);
+ if (ret)
+ return ret;
/*
* We don't tell the firmware here as it would take 8 frames
@@ -1194,8 +1173,7 @@ static int usbduxsigma_submit_pwm_urb(struct comedi_device *dev)
struct urb *urb = devpriv->pwm_urb;
/* in case of a resubmission after an unlink... */
- usb_fill_bulk_urb(urb,
- usb, usb_sndbulkpipe(usb, USBDUXSIGMA_PWM_OUT_EP),
+ usb_fill_bulk_urb(urb, usb, usb_sndbulkpipe(usb, 4),
urb->transfer_buffer, devpriv->pwm_buf_sz,
usbduxsigma_pwm_urb_complete, dev);
@@ -1237,19 +1215,21 @@ static int usbduxsigma_pwm_start(struct comedi_device *dev,
memset(devpriv->pwm_urb->transfer_buffer, 0, devpriv->pwm_buf_sz);
+ devpriv->pwm_cmd_running = 1;
ret = usbduxsigma_submit_pwm_urb(dev);
- if (ret < 0)
+ if (ret < 0) {
+ devpriv->pwm_cmd_running = 0;
return ret;
- devpriv->pwm_cmd_running = 1;
+ }
return 0;
}
-static int usbduxsigma_pwm_pattern(struct comedi_device *dev,
- struct comedi_subdevice *s,
- unsigned int chan,
- unsigned int value,
- unsigned int sign)
+static void usbduxsigma_pwm_pattern(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned int chan,
+ unsigned int value,
+ unsigned int sign)
{
struct usbduxsigma_private *devpriv = dev->private;
char pwm_mask = (1 << chan); /* DIO bit for the PWM data */
@@ -1270,7 +1250,6 @@ static int usbduxsigma_pwm_pattern(struct comedi_device *dev,
c |= sgn_mask;
*buf++ = c;
}
- return 1;
}
static int usbduxsigma_pwm_write(struct comedi_device *dev,
@@ -1291,7 +1270,9 @@ static int usbduxsigma_pwm_write(struct comedi_device *dev,
* The sign is set via a special INSN only, this gives us 8 bits
* for normal operation, sign is 0 by default.
*/
- return usbduxsigma_pwm_pattern(dev, s, chan, data[0], 0);
+ usbduxsigma_pwm_pattern(dev, s, chan, data[0], 0);
+
+ return insn->n;
}
static int usbduxsigma_pwm_config(struct comedi_device *dev,
@@ -1326,8 +1307,8 @@ static int usbduxsigma_pwm_config(struct comedi_device *dev,
* data[1] = value
* data[2] = sign (for a relay)
*/
- return usbduxsigma_pwm_pattern(dev, s, chan,
- data[1], (data[2] != 0));
+ usbduxsigma_pwm_pattern(dev, s, chan, data[1], (data[2] != 0));
+ return 0;
case INSN_CONFIG_PWM_GET_H_BRIDGE:
/* values are not kept in this driver, nothing to return */
return -EINVAL;
@@ -1386,90 +1367,6 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return (int)val;
}
-static int usbduxsigma_attach_common(struct comedi_device *dev)
-{
- struct usbduxsigma_private *devpriv = dev->private;
- struct comedi_subdevice *s;
- int n_subdevs;
- int offset;
- int ret;
-
- down(&devpriv->sem);
-
- if (devpriv->high_speed)
- n_subdevs = 4; /* with pwm */
- else
- n_subdevs = 3; /* without pwm */
- ret = comedi_alloc_subdevices(dev, n_subdevs);
- if (ret) {
- up(&devpriv->sem);
- return ret;
- }
-
- /* Analog Input subdevice */
- s = &dev->subdevices[0];
- dev->read_subdev = s;
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ | SDF_LSAMPL;
- s->n_chan = NUMCHANNELS;
- s->len_chanlist = NUMCHANNELS;
- s->maxdata = 0x00ffffff;
- s->range_table = &usbduxsigma_ai_range;
- s->insn_read = usbduxsigma_ai_insn_read;
- s->do_cmdtest = usbduxsigma_ai_cmdtest;
- s->do_cmd = usbduxsigma_ai_cmd;
- s->cancel = usbduxsigma_ai_cancel;
-
- /* Analog Output subdevice */
- s = &dev->subdevices[1];
- dev->write_subdev = s;
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
- s->n_chan = USBDUXSIGMA_NUM_AO_CHAN;
- s->len_chanlist = s->n_chan;
- s->maxdata = 0x00ff;
- s->range_table = &range_unipolar2_5;
- s->insn_write = usbduxsigma_ao_insn_write;
- s->insn_read = usbduxsigma_ao_insn_read;
- s->do_cmdtest = usbduxsigma_ao_cmdtest;
- s->do_cmd = usbduxsigma_ao_cmd;
- s->cancel = usbduxsigma_ao_cancel;
-
- /* Digital I/O subdevice */
- s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = 24;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = usbduxsigma_dio_insn_bits;
- s->insn_config = usbduxsigma_dio_insn_config;
-
- if (devpriv->high_speed) {
- /* Timer / pwm subdevice */
- s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_PWM;
- s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
- s->n_chan = 8;
- s->maxdata = devpriv->pwm_buf_sz;
- s->insn_write = usbduxsigma_pwm_write;
- s->insn_config = usbduxsigma_pwm_config;
-
- usbduxsigma_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
- }
-
- up(&devpriv->sem);
-
- offset = usbduxsigma_getstatusinfo(dev, 0);
- if (offset < 0)
- dev_err(dev->class_dev,
- "Communication to USBDUXSIGMA failed! Check firmware and cabling\n");
-
- dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset);
-
- return 0;
-}
-
static int usbduxsigma_firmware_upload(struct comedi_device *dev,
const u8 *data, size_t size,
unsigned long context)
@@ -1548,7 +1445,6 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
struct urb *urb;
int i;
- devpriv->dac_commands = kzalloc(NUMOUTCHANNELS, GFP_KERNEL);
devpriv->dux_commands = kzalloc(SIZEOFDUXBUFFER, GFP_KERNEL);
devpriv->in_buf = kzalloc(SIZEINBUF, GFP_KERNEL);
devpriv->insn_buf = kzalloc(SIZEINSNBUF, GFP_KERNEL);
@@ -1556,8 +1452,7 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
GFP_KERNEL);
devpriv->ao_urbs = kcalloc(devpriv->n_ao_urbs, sizeof(*urb),
GFP_KERNEL);
- if (!devpriv->dac_commands || !devpriv->dux_commands ||
- !devpriv->in_buf || !devpriv->insn_buf ||
+ if (!devpriv->dux_commands || !devpriv->in_buf || !devpriv->insn_buf ||
!devpriv->ai_urbs || !devpriv->ao_urbs)
return -ENOMEM;
@@ -1571,7 +1466,7 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
urb->context = NULL;
- urb->pipe = usb_rcvisocpipe(usb, USBDUXSIGMA_ISO_IN_EP);
+ urb->pipe = usb_rcvisocpipe(usb, 6);
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = kzalloc(SIZEINBUF, GFP_KERNEL);
if (!urb->transfer_buffer)
@@ -1593,7 +1488,7 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
/* will be filled later with a pointer to the comedi-device */
/* and ONLY then the urb should be submitted */
urb->context = NULL;
- urb->pipe = usb_sndisocpipe(usb, USBDUXSIGMA_ISO_OUT_EP);
+ urb->pipe = usb_sndisocpipe(usb, 2);
urb->transfer_flags = URB_ISO_ASAP;
urb->transfer_buffer = kzalloc(SIZEOUTBUF, GFP_KERNEL);
if (!urb->transfer_buffer)
@@ -1609,19 +1504,16 @@ static int usbduxsigma_alloc_usb_buffers(struct comedi_device *dev)
urb->interval = 1; /* frames */
}
- if (devpriv->high_speed) {
- /* max bulk ep size in high speed */
- devpriv->pwm_buf_sz = 512;
+ if (devpriv->pwm_buf_sz) {
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return -ENOMEM;
devpriv->pwm_urb = urb;
- urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz, GFP_KERNEL);
+
+ urb->transfer_buffer = kzalloc(devpriv->pwm_buf_sz,
+ GFP_KERNEL);
if (!urb->transfer_buffer)
return -ENOMEM;
- } else {
- devpriv->pwm_urb = NULL;
- devpriv->pwm_buf_sz = 0;
}
return 0;
@@ -1633,11 +1525,6 @@ static void usbduxsigma_free_usb_buffers(struct comedi_device *dev)
struct urb *urb;
int i;
- /* force unlink all urbs */
- usbduxsigma_ai_stop(dev, 1);
- usbduxsigma_ao_stop(dev, 1);
- usbduxsigma_pwm_stop(dev, 1);
-
urb = devpriv->pwm_urb;
if (urb) {
kfree(urb->transfer_buffer);
@@ -1666,7 +1553,6 @@ static void usbduxsigma_free_usb_buffers(struct comedi_device *dev)
kfree(devpriv->insn_buf);
kfree(devpriv->in_buf);
kfree(devpriv->dux_commands);
- kfree(devpriv->dac_commands);
}
static int usbduxsigma_auto_attach(struct comedi_device *dev,
@@ -1675,29 +1561,23 @@ static int usbduxsigma_auto_attach(struct comedi_device *dev,
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_device *usb = comedi_to_usb_dev(dev);
struct usbduxsigma_private *devpriv;
+ struct comedi_subdevice *s;
+ int offset;
int ret;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
sema_init(&devpriv->sem, 1);
- usb_set_intfdata(intf, devpriv);
- ret = usb_set_interface(usb,
- intf->altsetting->desc.bInterfaceNumber, 3);
- if (ret < 0) {
- dev_err(dev->class_dev,
- "could not set alternate setting 3 in high speed\n");
- return -ENODEV;
- }
+ usb_set_intfdata(intf, devpriv);
- /* test if it is high speed (USB 2.0) */
devpriv->high_speed = (usb->speed == USB_SPEED_HIGH);
if (devpriv->high_speed) {
devpriv->n_ai_urbs = NUMOFINBUFFERSHIGH;
devpriv->n_ao_urbs = NUMOFOUTBUFFERSHIGH;
+ devpriv->pwm_buf_sz = 512;
} else {
devpriv->n_ai_urbs = NUMOFINBUFFERSFULL;
devpriv->n_ao_urbs = NUMOFOUTBUFFERSFULL;
@@ -1707,12 +1587,84 @@ static int usbduxsigma_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* setting to alternate setting 3: enabling iso ep and bulk ep. */
+ ret = usb_set_interface(usb, intf->altsetting->desc.bInterfaceNumber,
+ 3);
+ if (ret < 0) {
+ dev_err(dev->class_dev,
+ "could not set alternate setting 3 in high speed\n");
+ return ret;
+ }
+
ret = comedi_load_firmware(dev, &usb->dev, FIRMWARE,
usbduxsigma_firmware_upload, 0);
if (ret)
return ret;
- return usbduxsigma_attach_common(dev);
+ ret = comedi_alloc_subdevices(dev, (devpriv->high_speed) ? 4 : 3);
+ if (ret)
+ return ret;
+
+ /* Analog Input subdevice */
+ s = &dev->subdevices[0];
+ dev->read_subdev = s;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ | SDF_LSAMPL;
+ s->n_chan = NUMCHANNELS;
+ s->len_chanlist = NUMCHANNELS;
+ s->maxdata = 0x00ffffff;
+ s->range_table = &usbduxsigma_ai_range;
+ s->insn_read = usbduxsigma_ai_insn_read;
+ s->do_cmdtest = usbduxsigma_ai_cmdtest;
+ s->do_cmd = usbduxsigma_ai_cmd;
+ s->cancel = usbduxsigma_ai_cancel;
+
+ /* Analog Output subdevice */
+ s = &dev->subdevices[1];
+ dev->write_subdev = s;
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE | SDF_GROUND | SDF_CMD_WRITE;
+ s->n_chan = USBDUXSIGMA_NUM_AO_CHAN;
+ s->len_chanlist = s->n_chan;
+ s->maxdata = 0x00ff;
+ s->range_table = &range_unipolar2_5;
+ s->insn_write = usbduxsigma_ao_insn_write;
+ s->insn_read = usbduxsigma_ao_insn_read;
+ s->do_cmdtest = usbduxsigma_ao_cmdtest;
+ s->do_cmd = usbduxsigma_ao_cmd;
+ s->cancel = usbduxsigma_ao_cancel;
+
+ /* Digital I/O subdevice */
+ s = &dev->subdevices[2];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = 24;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = usbduxsigma_dio_insn_bits;
+ s->insn_config = usbduxsigma_dio_insn_config;
+
+ if (devpriv->high_speed) {
+ /* Timer / pwm subdevice */
+ s = &dev->subdevices[3];
+ s->type = COMEDI_SUBD_PWM;
+ s->subdev_flags = SDF_WRITABLE | SDF_PWM_HBRIDGE;
+ s->n_chan = 8;
+ s->maxdata = devpriv->pwm_buf_sz;
+ s->insn_write = usbduxsigma_pwm_write;
+ s->insn_config = usbduxsigma_pwm_config;
+
+ usbduxsigma_pwm_period(dev, s, PWM_DEFAULT_PERIOD);
+ }
+
+ offset = usbduxsigma_getstatusinfo(dev, 0);
+ if (offset < 0)
+ dev_err(dev->class_dev,
+ "Communication to USBDUXSIGMA failed! Check firmware and cabling\n");
+
+ dev_info(dev->class_dev, "attached, ADC_zero = %x\n", offset);
+
+ return 0;
}
static void usbduxsigma_detach(struct comedi_device *dev)
@@ -1720,13 +1672,20 @@ static void usbduxsigma_detach(struct comedi_device *dev)
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usbduxsigma_private *devpriv = dev->private;
+ usb_set_intfdata(intf, NULL);
+
if (!devpriv)
return;
- usb_set_intfdata(intf, NULL);
-
down(&devpriv->sem);
+
+ /* force unlink all urbs */
+ usbduxsigma_ai_stop(dev, 1);
+ usbduxsigma_ao_stop(dev, 1);
+ usbduxsigma_pwm_stop(dev, 1);
+
usbduxsigma_free_usb_buffers(dev);
+
up(&devpriv->sem);
}
diff --git a/drivers/staging/comedi/drivers/vmk80xx.c b/drivers/staging/comedi/drivers/vmk80xx.c
index 0ab04c0dd41..06efa16b9af 100644
--- a/drivers/staging/comedi/drivers/vmk80xx.c
+++ b/drivers/staging/comedi/drivers/vmk80xx.c
@@ -875,10 +875,9 @@ static int vmk80xx_auto_attach(struct comedi_device *dev,
dev->board_ptr = boardinfo;
dev->board_name = boardinfo->name;
- devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
+ devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
- dev->private = devpriv;
devpriv->model = boardinfo->model;
diff --git a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
index da8988c6bf5..cd60677a3ed 100644
--- a/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
+++ b/drivers/staging/comedi/kcomedilib/kcomedilib_main.c
@@ -22,8 +22,6 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fcntl.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/io.h>
@@ -125,6 +123,27 @@ error:
return ret;
}
+int comedi_dio_get_config(struct comedi_device *dev, unsigned int subdev,
+ unsigned int chan, unsigned int *io)
+{
+ struct comedi_insn insn;
+ unsigned int data[2];
+ int ret;
+
+ memset(&insn, 0, sizeof(insn));
+ insn.insn = INSN_CONFIG;
+ insn.n = 2;
+ insn.subdev = subdev;
+ insn.chanspec = CR_PACK(chan, 0, 0);
+ data[0] = INSN_CONFIG_DIO_QUERY;
+ data[1] = 0;
+ ret = comedi_do_insn(dev, &insn, data);
+ if (ret >= 0)
+ *io = data[1];
+ return ret;
+}
+EXPORT_SYMBOL_GPL(comedi_dio_get_config);
+
int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
unsigned int chan, unsigned int io)
{
@@ -140,28 +159,53 @@ int comedi_dio_config(struct comedi_device *dev, unsigned int subdev,
}
EXPORT_SYMBOL_GPL(comedi_dio_config);
-int comedi_dio_bitfield(struct comedi_device *dev, unsigned int subdev,
- unsigned int mask, unsigned int *bits)
+int comedi_dio_bitfield2(struct comedi_device *dev, unsigned int subdev,
+ unsigned int mask, unsigned int *bits,
+ unsigned int base_channel)
{
struct comedi_insn insn;
unsigned int data[2];
+ unsigned int n_chan;
+ unsigned int shift;
int ret;
+ if (subdev >= dev->n_subdevices)
+ return -EINVAL;
+
+ base_channel = CR_CHAN(base_channel);
+ n_chan = comedi_get_n_channels(dev, subdev);
+ if (base_channel >= n_chan)
+ return -EINVAL;
+
memset(&insn, 0, sizeof(insn));
insn.insn = INSN_BITS;
+ insn.chanspec = base_channel;
insn.n = 2;
insn.subdev = subdev;
data[0] = mask;
data[1] = *bits;
- ret = comedi_do_insn(dev, &insn, data);
-
- *bits = data[1];
+ /*
+ * Most drivers ignore the base channel in insn->chanspec.
+ * Fix this here if the subdevice has <= 32 channels.
+ */
+ if (n_chan <= 32) {
+ shift = base_channel;
+ if (shift) {
+ insn.chanspec = 0;
+ data[0] <<= shift;
+ data[1] <<= shift;
+ }
+ } else {
+ shift = 0;
+ }
+ ret = comedi_do_insn(dev, &insn, data);
+ *bits = data[1] >> shift;
return ret;
}
-EXPORT_SYMBOL_GPL(comedi_dio_bitfield);
+EXPORT_SYMBOL_GPL(comedi_dio_bitfield2);
int comedi_find_subdevice_by_type(struct comedi_device *dev, int type,
unsigned int subd)
diff --git a/drivers/staging/comedi/proc.c b/drivers/staging/comedi/proc.c
index 8ee94424bc8..ade00035d3b 100644
--- a/drivers/staging/comedi/proc.c
+++ b/drivers/staging/comedi/proc.c
@@ -55,6 +55,7 @@ static int comedi_read(struct seq_file *m, void *v)
if (!devices_q)
seq_puts(m, "no devices\n");
+ mutex_lock(&comedi_drivers_list_lock);
for (driv = comedi_drivers; driv; driv = driv->next) {
seq_printf(m, "%s:\n", driv->driver_name);
for (i = 0; i < driv->num_names; i++)
@@ -65,6 +66,7 @@ static int comedi_read(struct seq_file *m, void *v)
if (!driv->num_names)
seq_printf(m, " %s\n", driv->driver_name);
}
+ mutex_unlock(&comedi_drivers_list_lock);
return 0;
}
diff --git a/drivers/staging/comedi/range.c b/drivers/staging/comedi/range.c
index 1f20332cc45..8fde55495d3 100644
--- a/drivers/staging/comedi/range.c
+++ b/drivers/staging/comedi/range.c
@@ -127,38 +127,35 @@ static int aref_invalid(struct comedi_subdevice *s, unsigned int chanspec)
return 1;
}
-/*
- This function checks each element in a channel/gain list to make
- make sure it is valid.
+/**
+ * comedi_check_chanlist() - Validate each element in a chanlist.
+ * @s: comedi_subdevice struct
+ * @n: number of elements in the chanlist
+ * @chanlist: the chanlist to validate
*/
int comedi_check_chanlist(struct comedi_subdevice *s, int n,
unsigned int *chanlist)
{
struct comedi_device *dev = s->device;
- int i;
- int chan;
+ unsigned int chanspec;
+ int chan, range_len, i;
- if (s->range_table) {
- for (i = 0; i < n; i++)
- if (CR_CHAN(chanlist[i]) >= s->n_chan ||
- CR_RANGE(chanlist[i]) >= s->range_table->length
- || aref_invalid(s, chanlist[i])) {
- dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x in_chan=%d range length=%d\n",
- i, chanlist[i], s->n_chan,
- s->range_table->length);
- return -EINVAL;
- }
- } else if (s->range_table_list) {
+ if (s->range_table || s->range_table_list) {
for (i = 0; i < n; i++) {
- chan = CR_CHAN(chanlist[i]);
+ chanspec = chanlist[i];
+ chan = CR_CHAN(chanspec);
+ if (s->range_table)
+ range_len = s->range_table->length;
+ else if (s->range_table_list && chan < s->n_chan)
+ range_len = s->range_table_list[chan]->length;
+ else
+ range_len = 0;
if (chan >= s->n_chan ||
- CR_RANGE(chanlist[i]) >=
- s->range_table_list[chan]->length
- || aref_invalid(s, chanlist[i])) {
+ CR_RANGE(chanspec) >= range_len ||
+ aref_invalid(s, chanspec)) {
dev_warn(dev->class_dev,
- "bad chanlist[%d]=0x%08x\n",
- i, chanlist[i]);
+ "bad chanlist[%d]=0x%08x chan=%d range length=%d\n",
+ i, chanspec, chan, range_len);
return -EINVAL;
}
}
diff --git a/drivers/staging/crystalhd/crystalhd_cmds.c b/drivers/staging/crystalhd/crystalhd_cmds.c
index 3ab502b8c3b..07a2f24d0d4 100644
--- a/drivers/staging/crystalhd/crystalhd_cmds.c
+++ b/drivers/staging/crystalhd/crystalhd_cmds.c
@@ -94,8 +94,7 @@ static enum BC_STATUS bc_cproc_notify_mode(struct crystalhd_cmd *ctx,
for (i = 0; i < BC_LINK_MAX_OPENS; i++) {
if (ctx->user[i].mode == DTS_DIAG_MODE ||
ctx->user[i].mode == DTS_PLAYBACK_MODE) {
- BCMLOG_ERR("multiple playback sessions are not "
- "supported..\n");
+ BCMLOG_ERR("multiple playback sessions are not supported..\n");
return BC_STS_ERR_USAGE;
}
}
diff --git a/drivers/staging/crystalhd/crystalhd_hw.c b/drivers/staging/crystalhd/crystalhd_hw.c
index 0c8cb329420..5845e899ee8 100644
--- a/drivers/staging/crystalhd/crystalhd_hw.c
+++ b/drivers/staging/crystalhd/crystalhd_hw.c
@@ -1061,7 +1061,7 @@ static void cpy_pib_to_app(struct c011_pib *src_pib,
dst_pib->aspect_ratio = src_pib->ppb.aspect_ratio;
dst_pib->colour_primaries = src_pib->ppb.colour_primaries;
dst_pib->picture_meta_payload = src_pib->ppb.picture_meta_payload;
- dst_pib->frame_rate = src_pib->resolution ;
+ dst_pib->frame_rate = src_pib->resolution;
return;
}
@@ -1553,11 +1553,10 @@ static void crystalhd_rx_isr(struct crystalhd_hw *hw, uint32_t intr_sts)
crystalhd_get_dnsz(hw, i, &y_dn_sz, &uv_dn_sz);
/* FIXME: jarod: this is where
my mini pci-e card is tripping up */
- BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x "
- "UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
+ BCMLOG(BCMLOG_DBG, "list_index:%x rx[%d] Y:%x UV:%x Int:%x YDnSz:%x UVDnSz:%x\n",
i, hw->stats.rx_errors, y_err_sts,
uv_err_sts, intr_sts, y_dn_sz,
- uv_dn_sz);
+ uv_dn_sz);
hw->rx_list_sts[i] = sts_free;
comp_sts = BC_STS_ERROR;
break;
diff --git a/drivers/staging/crystalhd/crystalhd_lnx.c b/drivers/staging/crystalhd/crystalhd_lnx.c
index c1f6163cdeb..b17fbf8181c 100644
--- a/drivers/staging/crystalhd/crystalhd_lnx.c
+++ b/drivers/staging/crystalhd/crystalhd_lnx.c
@@ -545,8 +545,7 @@ static int chd_dec_pci_probe(struct pci_dev *pdev,
int rc;
enum BC_STATUS sts = BC_STS_SUCCESS;
- BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
- "s_vendor:0x%04x s_device: 0x%04x\n",
+ BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x s_vendor:0x%04x s_device: 0x%04x\n",
pdev->vendor, pdev->device, pdev->subsystem_vendor,
pdev->subsystem_device);
diff --git a/drivers/staging/crystalhd/crystalhd_misc.h b/drivers/staging/crystalhd/crystalhd_misc.h
index 4dae3a797e9..aa736c8855d 100644
--- a/drivers/staging/crystalhd/crystalhd_misc.h
+++ b/drivers/staging/crystalhd/crystalhd_misc.h
@@ -177,8 +177,8 @@ extern enum BC_STATUS crystalhd_map_dio(struct crystalhd_adp *, void *,
extern enum BC_STATUS crystalhd_unmap_dio(struct crystalhd_adp *,
struct crystalhd_dio_req*);
-#define crystalhd_get_sgle_paddr(_dio, _ix) (cpu_to_le64(sg_dma_address(&_dio->sg[_ix])))
-#define crystalhd_get_sgle_len(_dio, _ix) (cpu_to_le32(sg_dma_len(&_dio->sg[_ix])))
+#define crystalhd_get_sgle_paddr(_dio, _ix) (sg_dma_address(&_dio->sg[_ix]))
+#define crystalhd_get_sgle_len(_dio, _ix) (sg_dma_len(&_dio->sg[_ix]))
/*================ General Purpose Queues ==================*/
extern enum BC_STATUS crystalhd_create_dioq(struct crystalhd_adp *,
diff --git a/drivers/staging/cxt1e1/Makefile b/drivers/staging/cxt1e1/Makefile
index b9ccb765025..2f217e9daac 100644
--- a/drivers/staging/cxt1e1/Makefile
+++ b/drivers/staging/cxt1e1/Makefile
@@ -2,7 +2,6 @@ obj-$(CONFIG_CXT1E1) += cxt1e1.o
ccflags-y := -DSBE_PMCC4_ENABLE
ccflags-y += -DSBE_ISR_TASKLET
-ccflags-y += -DSBE_INCLUDE_SYMBOLS
cxt1e1-y := \
ossiRelease.o \
diff --git a/drivers/staging/cxt1e1/comet.c b/drivers/staging/cxt1e1/comet.c
index fabfd779c66..d71aea54181 100644
--- a/drivers/staging/cxt1e1/comet.c
+++ b/drivers/staging/cxt1e1/comet.c
@@ -22,22 +22,15 @@
#include "comet.h"
#include "comet_tables.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
extern int cxt1e1_log_level;
#define COMET_NUM_SAMPLES 24 /* Number of entries in the waveform table */
#define COMET_NUM_UNITS 5 /* Number of points per entry in table */
/* forward references */
-STATIC void SetPwrLevel(comet_t *comet);
-STATIC void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
-STATIC void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
+static void SetPwrLevel(comet_t *comet);
+static void WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table);
+static void WrtXmtWaveformTbl(ci_t *ci, comet_t *comet, u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS]);
void *TWV_table[12] = {
@@ -407,7 +400,7 @@ void init_comet(void *ci, comet_t *comet, u_int32_t port_mode, int clockmaster,
** Write the data to the Pulse Waveform Storage Data register.
** Returns: Nothing
*/
-STATIC void
+static void
WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int8_t data)
{
u_int8_t WaveformAddr;
@@ -425,7 +418,7 @@ WrtXmtWaveform(ci_t *ci, comet_t *comet, u_int32_t sample, u_int32_t unit, u_int
** for driving the transmitter DAC.
** Returns: Nothing
*/
-STATIC void
+static void
WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
u_int8_t table[COMET_NUM_SAMPLES][COMET_NUM_UNITS])
{
@@ -452,7 +445,7 @@ WrtXmtWaveformTbl(ci_t *ci, comet_t *comet,
** is coded with early setup of indirect address.
*/
-STATIC void
+static void
WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
{
u_int32_t ramaddr;
@@ -516,7 +509,7 @@ WrtRcvEqualizerTbl(ci_t *ci, comet_t *comet, u_int32_t *table)
** Returns: Nothing
*/
-STATIC void
+static void
SetPwrLevel(comet_t *comet)
{
volatile u_int32_t temp;
@@ -558,7 +551,7 @@ SetPwrLevel(comet_t *comet)
** Returns: Nothing
*/
#if 0
-STATIC void
+static void
SetCometOps(comet_t *comet)
{
volatile u_int8_t rd_value;
diff --git a/drivers/staging/cxt1e1/functions.c b/drivers/staging/cxt1e1/functions.c
index 6167dc57457..d021b312ffa 100644
--- a/drivers/staging/cxt1e1/functions.c
+++ b/drivers/staging/cxt1e1/functions.c
@@ -24,13 +24,6 @@
#include "libsbew.h"
#include "pmcc4.h"
-
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#if defined(CONFIG_SBE_HDLC_V7) || defined(CONFIG_SBE_WAN256T3_HDLC_V7) || \
defined(CONFIG_SBE_HDLC_V7_MODULE) || defined(CONFIG_SBE_WAN256T3_HDLC_V7_MODULE)
#define _v7_hdlc_ 1
@@ -111,7 +104,7 @@ pci_flush_write (ci_t *ci)
}
-STATIC void
+static void
watchdog_func (unsigned long arg)
{
struct watchdog *wd = (void *) arg;
diff --git a/drivers/staging/cxt1e1/hwprobe.c b/drivers/staging/cxt1e1/hwprobe.c
index 110c252d38d..53e923701ae 100644
--- a/drivers/staging/cxt1e1/hwprobe.c
+++ b/drivers/staging/cxt1e1/hwprobe.c
@@ -31,12 +31,6 @@
#include "sbeproc.h"
#endif
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
extern int cxt1e1_log_level;
extern int error_flag;
extern int drvr_state;
@@ -221,7 +215,7 @@ cleanup_devs (void)
}
-STATIC int __init
+static int __init
c4_hdw_init (struct pci_dev *pdev, int found)
{
hdw_info_t *hi;
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index e5889ef190a..142691c8d8d 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -52,12 +52,6 @@
/*****************************************************************************************/
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#define CHANNAME "hdlc"
/*******************************************************************/
@@ -285,7 +279,7 @@ void_open (struct net_device *ndev)
}
-STATIC int
+static int
chan_open (struct net_device *ndev)
{
hdlc_device *hdlc = dev_to_hdlc (ndev);
@@ -305,7 +299,7 @@ chan_open (struct net_device *ndev)
}
-STATIC int
+static int
chan_close (struct net_device *ndev)
{
hdlc_device *hdlc = dev_to_hdlc (ndev);
@@ -319,14 +313,14 @@ chan_close (struct net_device *ndev)
}
-STATIC int
+static int
chan_dev_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
{
return hdlc_ioctl (dev, ifr, cmd);
}
-STATIC int
+static int
chan_attach_noop (struct net_device *ndev, unsigned short foo_1, unsigned short foo_2)
{
return 0; /* our driver has nothing to do here, show's
@@ -334,7 +328,7 @@ chan_attach_noop (struct net_device *ndev, unsigned short foo_1, unsigned short
}
-STATIC struct net_device_stats *
+static struct net_device_stats *
chan_get_stats (struct net_device *ndev)
{
mch_t *ch;
@@ -394,7 +388,7 @@ get_ci_by_dev (struct net_device *ndev)
}
-STATIC int
+static int
c4_linux_xmit (struct sk_buff *skb, struct net_device *ndev)
{
const struct c4_priv *priv;
@@ -416,7 +410,7 @@ static const struct net_device_ops chan_ops = {
.ndo_get_stats = chan_get_stats,
};
-STATIC struct net_device *
+static struct net_device *
create_chan (struct net_device *ndev, ci_t *ci,
struct sbecom_chan_param *cp)
{
@@ -509,7 +503,7 @@ create_chan (struct net_device *ndev, ci_t *ci,
/* the idea here is to get port information and pass it back (using pointer) */
-STATIC status_t
+static status_t
do_get_port (struct net_device *ndev, void *data)
{
int ret;
@@ -534,7 +528,7 @@ do_get_port (struct net_device *ndev, void *data)
}
/* this function copys the user data and then calls the real action function */
-STATIC status_t
+static status_t
do_set_port (struct net_device *ndev, void *data)
{
ci_t *ci; /* ci stands for card information */
@@ -556,7 +550,7 @@ do_set_port (struct net_device *ndev, void *data)
}
/* work the port loopback mode as per directed */
-STATIC status_t
+static status_t
do_port_loop (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
@@ -571,7 +565,7 @@ do_port_loop (struct net_device *ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
+static status_t
do_framer_rw (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
@@ -592,7 +586,7 @@ do_framer_rw (struct net_device *ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
+static status_t
do_pld_rw (struct net_device *ndev, void *data)
{
struct sbecom_port_param pp;
@@ -613,7 +607,7 @@ do_pld_rw (struct net_device *ndev, void *data)
}
/* set the specified register with the given value / or just read it */
-STATIC status_t
+static status_t
do_musycc_rw (struct net_device *ndev, void *data)
{
struct c4_musycc_param mp;
@@ -633,7 +627,7 @@ do_musycc_rw (struct net_device *ndev, void *data)
return 0;
}
-STATIC status_t
+static status_t
do_get_chan (struct net_device *ndev, void *data)
{
struct sbecom_chan_param cp;
@@ -651,7 +645,7 @@ do_get_chan (struct net_device *ndev, void *data)
return 0;
}
-STATIC status_t
+static status_t
do_set_chan (struct net_device *ndev, void *data)
{
struct sbecom_chan_param cp;
@@ -672,7 +666,7 @@ do_set_chan (struct net_device *ndev, void *data)
}
}
-STATIC status_t
+static status_t
do_create_chan (struct net_device *ndev, void *data)
{
ci_t *ci;
@@ -699,7 +693,7 @@ do_create_chan (struct net_device *ndev, void *data)
return ret;
}
-STATIC status_t
+static status_t
do_get_chan_stats (struct net_device *ndev, void *data)
{
struct c4_chan_stats_wrap ccs;
@@ -720,7 +714,7 @@ do_get_chan_stats (struct net_device *ndev, void *data)
return -EFAULT;
return 0;
}
-STATIC status_t
+static status_t
do_set_loglevel (struct net_device *ndev, void *data)
{
unsigned int cxt1e1_log_level;
@@ -731,7 +725,7 @@ do_set_loglevel (struct net_device *ndev, void *data)
return 0;
}
-STATIC status_t
+static status_t
do_deluser (struct net_device *ndev, int lockit)
{
if (ndev->flags & IFF_UP)
@@ -826,7 +820,7 @@ do_reset_chan_stats (struct net_device *musycc_dev, void *data)
return mkret (c4_del_chan_stats (cp.channum));
}
-STATIC status_t
+static status_t
c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
{
ci_t *ci;
@@ -1102,7 +1096,7 @@ c4_add_dev (hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
return ndev;
}
-STATIC int __init
+static int __init
c4_mod_init (void)
{
int rtn;
@@ -1144,7 +1138,7 @@ c4_mod_init (void)
* do_deluser()
*/
-STATIC void __exit
+static void __exit
cleanup_hdlc (void)
{
hdw_info_t *hi;
@@ -1168,7 +1162,7 @@ cleanup_hdlc (void)
}
-STATIC void __exit
+static void __exit
c4_mod_remove (void)
{
cleanup_hdlc(); /* delete any missed channels */
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 1037086d00a..52b6d7f5fd4 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -35,12 +35,6 @@ unsigned int max_bh = 0;
#include "pmcc4.h"
#include "musycc.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
#define sd_find_chan(ci,ch) c4_find_chan(ch)
@@ -65,7 +59,6 @@ void c4_wk_chan_restart(mch_t *);
void musycc_bh_tx_eom(mpi_t *, int);
int musycc_chan_up(ci_t *, int);
status_t __init musycc_init(ci_t *);
-STATIC void __init musycc_init_port(mpi_t *);
void musycc_intr_bh_tasklet(ci_t *);
void musycc_serv_req(mpi_t *, u_int32_t);
void musycc_update_timeslots(mpi_t *);
@@ -73,7 +66,7 @@ void musycc_update_timeslots(mpi_t *);
/*******************************************************************/
#if 1
-STATIC int
+static int
musycc_dump_rxbuffer_ring(mch_t *ch, int lockit)
{
struct mdesc *m;
@@ -139,7 +132,7 @@ musycc_dump_rxbuffer_ring(mch_t *ch, int lockit)
#endif
#if 1
-STATIC int
+static int
musycc_dump_txbuffer_ring(mch_t *ch, int lockit)
{
struct mdesc *m;
@@ -702,7 +695,7 @@ musycc_chan_proto(int proto)
}
#ifdef SBE_WAN256T3_ENABLE
-STATIC void __init
+static void __init
musycc_init_port(mpi_t *pi)
{
pci_write_32((u_int32_t *) &pi->reg->gbp, OS_vtophys(pi->regram));
@@ -1009,7 +1002,7 @@ musycc_bh_tx_eom(mpi_t *pi, int gchan)
}
-STATIC void
+static void
musycc_bh_rx_eom(mpi_t *pi, int gchan)
{
mch_t *ch;
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.c b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
index 62b12fb45fc..137b63cb553 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.c
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.c
@@ -34,13 +34,6 @@
#define FALSE 0
#endif
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
/*------------------------------------------------------------------------
* EEPROM address definitions
*------------------------------------------------------------------------
@@ -120,7 +113,7 @@ short mfg_template[sizeof (FLD_TYPE2)] =
* (the MSB becomes the LSB etc.).
*/
-STATIC void
+static void
BuildByteReverse (void)
{
long half; /* Used to build by powers to 2 */
@@ -141,7 +134,7 @@ BuildByteReverse (void)
*------------------------------------------------------------------------
*/
-STATIC void
+static void
eeprom_delay (void)
{
int timeout;
@@ -224,7 +217,7 @@ eeprom_get_byte (long addr)
* Issue the EEPROM command to disable writes.
*/
-STATIC void
+static void
disable_pmc_eeprom (long addr)
{
eeprom_put_byte (addr, EPROM_EWDS, SIZE_ADDR_OP);
@@ -241,7 +234,7 @@ disable_pmc_eeprom (long addr)
* Issue the EEPROM command to enable writes.
*/
-STATIC void
+static void
enable_pmc_eeprom (long addr)
{
eeprom_put_byte (addr, EPROM_EWEN, SIZE_ADDR_OP);
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index 32d7a216a41..2383c609bf3 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -39,13 +39,6 @@
#include "comet.h"
#include "sbe_bid.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
#define KERN_WARN KERN_WARNING
/* forward references */
@@ -458,7 +451,7 @@ checkPorts (ci_t *ci)
}
-STATIC void
+static void
c4_watchdog (ci_t *ci)
{
if (drvr_state != SBE_DRVR_AVAILABLE)
@@ -1184,7 +1177,7 @@ c4_get_chan_stats (int channum, struct sbecom_chan_stats *p)
return 0;
}
-STATIC int
+static int
c4_fifo_alloc (mpi_t *pi, int chan, int *len)
{
int i, l = 0, start = 0, max = 0, maxstart = 0;
diff --git a/drivers/staging/cxt1e1/sbeid.c b/drivers/staging/cxt1e1/sbeid.c
index 0f9bd5f8136..791993fec96 100644
--- a/drivers/staging/cxt1e1/sbeid.c
+++ b/drivers/staging/cxt1e1/sbeid.c
@@ -19,13 +19,6 @@
#include "pmcc4.h"
#include "sbe_bid.h"
-#ifdef SBE_INCLUDE_SYMBOLS
-#define STATIC
-#else
-#define STATIC static
-#endif
-
-
char *
sbeid_get_bdname (ci_t *ci)
{
diff --git a/drivers/staging/dgap/Kconfig b/drivers/staging/dgap/Kconfig
new file mode 100644
index 00000000000..31f1d7533ee
--- /dev/null
+++ b/drivers/staging/dgap/Kconfig
@@ -0,0 +1,6 @@
+config DGAP
+ tristate "Digi EPCA PCI products"
+ default n
+ depends on TTY
+ ---help---
+ Driver for the Digi International EPCA PCI based product line
diff --git a/drivers/staging/dgap/Makefile b/drivers/staging/dgap/Makefile
new file mode 100644
index 00000000000..9f1fce157c7
--- /dev/null
+++ b/drivers/staging/dgap/Makefile
@@ -0,0 +1,9 @@
+EXTRA_CFLAGS += -DDG_NAME=\"dgap-1.3-16\" -DDG_PART=\"40002347_C\"
+
+obj-$(CONFIG_DGAP) += dgap.o
+
+
+dgap-objs := dgap_driver.o dgap_fep5.o \
+ dgap_parse.o dgap_trace.o \
+ dgap_tty.o dgap_sysfs.o
+
diff --git a/drivers/staging/dgap/dgap_conf.h b/drivers/staging/dgap/dgap_conf.h
new file mode 100644
index 00000000000..88097013ed0
--- /dev/null
+++ b/drivers/staging/dgap/dgap_conf.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *****************************************************************************
+ *
+ * dgap_conf.h - Header file for installations and parse files.
+ *
+ * $Id: dgap_conf.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef _DGAP_CONF_H
+#define _DGAP_CONF_H
+
+#define NULLNODE 0 /* header node, not used */
+#define BNODE 1 /* Board node */
+#define LNODE 2 /* Line node */
+#define CNODE 3 /* Concentrator node */
+#define MNODE 4 /* EBI Module node */
+#define TNODE 5 /* tty name prefix node */
+#define CUNODE 6 /* cu name prefix (non-SCO) */
+#define PNODE 7 /* trans. print prefix node */
+#define JNODE 8 /* maJor number node */
+#define ANODE 9 /* altpin */
+#define TSNODE 10 /* tty structure size */
+#define CSNODE 11 /* channel structure size */
+#define BSNODE 12 /* board structure size */
+#define USNODE 13 /* unit schedule structure size */
+#define FSNODE 14 /* f2200 structure size */
+#define VSNODE 15 /* size of VPIX structures */
+#define INTRNODE 16 /* enable interrupt */
+
+/* Enumeration of tokens */
+#define BEGIN 1
+#define END 2
+#define BOARD 10
+
+#define EPCFS 11 /* start of EPC family definitions */
+#define ICX 11
+#define MCX 13
+#define PCX 14
+#define IEPC 15
+#define EEPC 16
+#define MEPC 17
+#define IPCM 18
+#define EPCM 19
+#define MPCM 20
+#define PEPC 21
+#define PPCM 22
+#ifdef CP
+#define ICP 23
+#define ECP 24
+#define MCP 25
+#endif
+#define EPCFE 25 /* end of EPC family definitions */
+#define PC2E 26
+#define PC4E 27
+#define PC4E8K 28
+#define PC8E 29
+#define PC8E8K 30
+#define PC16E 31
+#define MC2E8K 34
+#define MC4E8K 35
+#define MC8E8K 36
+
+#define AVANFS 42 /* start of Avanstar family definitions */
+#define A8P 42
+#define A16P 43
+#define AVANFE 43 /* end of Avanstar family definitions */
+
+#define DA2000FS 44 /* start of AccelePort 2000 family definitions */
+#define DA22 44 /* AccelePort 2002 */
+#define DA24 45 /* AccelePort 2004 */
+#define DA28 46 /* AccelePort 2008 */
+#define DA216 47 /* AccelePort 2016 */
+#define DAR4 48 /* AccelePort RAS 4 port */
+#define DAR8 49 /* AccelePort RAS 8 port */
+#define DDR24 50 /* DataFire RAS 24 port */
+#define DDR30 51 /* DataFire RAS 30 port */
+#define DDR48 52 /* DataFire RAS 48 port */
+#define DDR60 53 /* DataFire RAS 60 port */
+#define DA2000FE 53 /* end of AccelePort 2000/RAS family definitions */
+
+#define PCXRFS 106 /* start of PCXR family definitions */
+#define APORT4 106
+#define APORT8 107
+#define PAPORT4 108
+#define PAPORT8 109
+#define APORT4_920I 110
+#define APORT8_920I 111
+#define APORT4_920P 112
+#define APORT8_920P 113
+#define APORT2_920P 114
+#define PCXRFE 117 /* end of PCXR family definitions */
+
+#define LINE 82
+#ifdef T1
+#define T1M 83
+#define E1M 84
+#endif
+#define CONC 64
+#define CX 65
+#define EPC 66
+#define MOD 67
+#define PORTS 68
+#define METHOD 69
+#define CUSTOM 70
+#define BASIC 71
+#define STATUS 72
+#define MODEM 73
+/* The following tokens can appear in multiple places */
+#define SPEED 74
+#define NPORTS 75
+#define ID 76
+#define CABLE 77
+#define CONNECT 78
+#define IO 79
+#define MEM 80
+#define DPSZ 81
+
+#define TTYN 90
+#define CU 91
+#define PRINT 92
+#define XPRINT 93
+#define CMAJOR 94
+#define ALTPIN 95
+#define STARTO 96
+#define USEINTR 97
+#define PCIINFO 98
+
+#define TTSIZ 100
+#define CHSIZ 101
+#define BSSIZ 102
+#define UNTSIZ 103
+#define F2SIZ 104
+#define VPSIZ 105
+
+#define TOTAL_BOARD 2
+#define CURRENT_BRD 4
+#define BOARD_TYPE 6
+#define IO_ADDRESS 8
+#define MEM_ADDRESS 10
+
+#define FIELDS_PER_PAGE 18
+
+#define TB_FIELD 1
+#define CB_FIELD 3
+#define BT_FIELD 5
+#define IO_FIELD 7
+#define ID_FIELD 8
+#define ME_FIELD 9
+#define TTY_FIELD 11
+#define CU_FIELD 13
+#define PR_FIELD 15
+#define MPR_FIELD 17
+
+#define MAX_FIELD 512
+
+#define INIT 0
+#define NITEMS 128
+#define MAX_ITEM 512
+
+#define DSCRINST 1
+#define DSCRNUM 3
+#define ALTPINQ 5
+#define SSAVE 7
+
+#define DSCR "32"
+#define ONETONINE "123456789"
+#define ALL "1234567890"
+
+
+struct cnode {
+ struct cnode *next;
+ int type;
+ int numbrd;
+
+ union {
+ struct {
+ char type; /* Board Type */
+ short port; /* I/O Address */
+ char *portstr; /* I/O Address in string */
+ long addr; /* Memory Address */
+ char *addrstr; /* Memory Address in string */
+ long pcibus; /* PCI BUS */
+ char *pcibusstr; /* PCI BUS in string */
+ long pcislot; /* PCI SLOT */
+ char *pcislotstr; /* PCI SLOT in string */
+ char nport; /* Number of Ports */
+ char *id; /* tty id */
+ int start; /* start of tty counting */
+ char *method; /* Install method */
+ char v_type;
+ char v_port;
+ char v_addr;
+ char v_pcibus;
+ char v_pcislot;
+ char v_nport;
+ char v_id;
+ char v_start;
+ char v_method;
+ char line1;
+ char line2;
+ char conc1; /* total concs in line1 */
+ char conc2; /* total concs in line2 */
+ char module1; /* total modules for line1 */
+ char module2; /* total modules for line2 */
+ char *status; /* config status */
+ char *dimstatus; /* Y/N */
+ int status_index; /* field pointer */
+ } board;
+
+ struct {
+ char *cable;
+ char v_cable;
+ char speed;
+ char v_speed;
+ } line;
+
+ struct {
+ char type;
+ char *connect;
+ char speed;
+ char nport;
+ char *id;
+ char *idstr;
+ int start;
+ char v_type;
+ char v_connect;
+ char v_speed;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } conc;
+
+ struct {
+ char type;
+ char nport;
+ char *id;
+ char *idstr;
+ int start;
+ char v_type;
+ char v_nport;
+ char v_id;
+ char v_start;
+ } module;
+
+ char *ttyname;
+
+ char *cuname;
+
+ char *printname;
+
+ int majornumber;
+
+ int altpin;
+
+ int ttysize;
+
+ int chsize;
+
+ int bssize;
+
+ int unsize;
+
+ int f2size;
+
+ int vpixsize;
+
+ int useintr;
+ } u;
+};
+
+#endif
diff --git a/drivers/staging/dgap/dgap_downld.h b/drivers/staging/dgap/dgap_downld.h
new file mode 100644
index 00000000000..f79e65cd1d5
--- /dev/null
+++ b/drivers/staging/dgap/dgap_downld.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: dgap_downld.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+/*
+** downld.h
+** - describes the interface between the user level download process
+** and the concentrator download driver.
+*/
+
+#ifndef _DGAP_DOWNLD_H_
+#define _DGAP_DOWNLD_H_
+
+
+struct fepimg {
+ int type; /* board type */
+ int len; /* length of image */
+ char fepimage[1]; /* begining of image */
+};
+
+struct downldio {
+ unsigned int req_type; /* FEP or concentrator */
+ unsigned int bdid; /* opaque board identifier */
+ union {
+ struct downld_t dl; /* download structure */
+ struct fepimg fi; /* fep/bios image structure */
+ } image;
+};
+
+#define DIGI_DLREQ_GET (('d'<<8) | 220)
+#define DIGI_DLREQ_SET (('d'<<8) | 221)
+
+#define DIGI_DL_NUKE (('d'<<8) | 222) /* Not really a dl request, but
+ dangerous enuff to not put in
+ digi.h */
+/* Packed bits of intarg for DIGI_DL_NUKE */
+#define DIGI_NUKE_RESET_ALL (1 << 31)
+#define DIGI_NUKE_INHIBIT_POLLER (1 << 30)
+#define DIGI_NUKE_BRD_NUMB 0x0f
+
+
+
+#define DLREQ_BIOS 0
+#define DLREQ_FEP 1
+#define DLREQ_CONC 2
+#define DLREQ_CONFIG 3
+#define DLREQ_DEVCREATE 4
+
+#endif
diff --git a/drivers/staging/dgap/dgap_driver.c b/drivers/staging/dgap/dgap_driver.c
new file mode 100644
index 00000000000..724a685753d
--- /dev/null
+++ b/drivers/staging/dgap/dgap_driver.c
@@ -0,0 +1,1048 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ * $Id: dgap_driver.c,v 1.3 2011/06/21 10:35:16 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <linux/slab.h>
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgap_driver.h"
+#include "dgap_pci.h"
+#include "dgap_fep5.h"
+#include "dgap_tty.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+#include "dgap_trace.h"
+#include "dgap_sysfs.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgap");
+
+/*
+ * insmod command line overrideable parameters
+ *
+ * NOTE: we use a set of macros to create the variables, which allows
+ * us to specify the variable type, name, initial value, and description.
+ */
+PARM_INT(debug, 0x00, 0644, "Driver debugging level");
+PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
+PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
+
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+
+static int dgap_start(void);
+static void dgap_init_globals(void);
+static int dgap_found_board(struct pci_dev *pdev, int id);
+static void dgap_cleanup_board(struct board_t *brd);
+static void dgap_poll_handler(ulong dummy);
+static int dgap_init_pci(void);
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgap_remove_one(struct pci_dev *dev);
+static int dgap_probe1(struct pci_dev *pdev, int card_type);
+static void dgap_mbuf(struct board_t *brd, const char *fmt, ...);
+static int dgap_do_remap(struct board_t *brd);
+static irqreturn_t dgap_intr(int irq, void *voidbrd);
+
+/* Driver load/unload functions */
+int dgap_init_module(void);
+void dgap_cleanup_module(void);
+
+module_init(dgap_init_module);
+module_exit(dgap_cleanup_module);
+
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static struct file_operations DgapBoardFops =
+{
+ .owner = THIS_MODULE,
+};
+
+
+/*
+ * Globals
+ */
+uint dgap_NumBoards;
+struct board_t *dgap_Board[MAXBOARDS];
+DEFINE_SPINLOCK(dgap_global_lock);
+ulong dgap_poll_counter;
+char *dgap_config_buf;
+int dgap_driver_state = DRIVER_INITIALIZED;
+DEFINE_SPINLOCK(dgap_dl_lock);
+wait_queue_head_t dgap_dl_wait;
+int dgap_dl_action;
+int dgap_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static int dgap_Major_Control_Registered = FALSE;
+static uint dgap_driver_start = FALSE;
+
+static struct class * dgap_class;
+
+/*
+ * Poller stuff
+ */
+static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
+static ulong dgap_poll_time; /* Time of next poll */
+static uint dgap_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgap_poll_timer;
+
+
+static struct pci_device_id dgap_pci_tbl[] = {
+ { DIGI_VID, PCI_DEVICE_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEVICE_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEVICE_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEVICE_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEVICE_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEVICE_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEVICE_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEVICE_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEVICE_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEVICE_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEVICE_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEVICE_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEVICE_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEVICE_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEVICE_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
+
+
+/*
+ * A generic list of Product names, PCI Vendor ID, and PCI Device ID.
+ */
+struct board_id {
+ uint config_type;
+ uchar *name;
+ uint maxports;
+ uint dpatype;
+};
+
+static struct board_id dgap_Ids[] =
+{
+ { PPCM, PCI_DEVICE_XEM_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
+ { PCX, PCI_DEVICE_CX_NAME, 128, (T_CX | T_PCIBUS) },
+ { PCX, PCI_DEVICE_CX_IBM_NAME, 128, (T_CX | T_PCIBUS) },
+ { PEPC, PCI_DEVICE_EPCJ_NAME, 224, (T_EPC | T_PCIBUS) },
+ { APORT2_920P, PCI_DEVICE_920_2_NAME, 2, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT4_920P, PCI_DEVICE_920_4_NAME, 4, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT8_920P, PCI_DEVICE_920_8_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XRJ_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_422_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_IBM_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_SAIP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PAPORT8, PCI_DEVICE_XR_BULL_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { APORT8_920P, PCI_DEVICE_920_8_HP_NAME, 8, (T_PCXR | T_PCLITE | T_PCIBUS) },
+ { PPCM, PCI_DEVICE_XEM_HP_NAME, 64, (T_PCXM | T_PCLITE | T_PCIBUS) },
+ {0,} /* 0 terminated list. */
+};
+
+static struct pci_driver dgap_driver = {
+ .name = "dgap",
+ .probe = dgap_init_one,
+ .id_table = dgap_pci_tbl,
+ .remove = dgap_remove_one,
+};
+
+
+char *dgap_state_text[] = {
+ "Board Failed",
+ "Configuration for board not found.\n\t\t\tRun mpi to configure board.",
+ "Board Found",
+ "Need Reset",
+ "Finished Reset",
+ "Need Config",
+ "Finished Config",
+ "Need Device Creation",
+ "Requested Device Creation",
+ "Finished Device Creation",
+ "Need BIOS Load",
+ "Requested BIOS",
+ "Doing BIOS Load",
+ "Finished BIOS Load",
+ "Need FEP Load",
+ "Requested FEP",
+ "Doing FEP Load",
+ "Finished FEP Load",
+ "Requested PROC creation",
+ "Finished PROC creation",
+ "Board READY",
+};
+
+char *dgap_driver_state_text[] = {
+ "Driver Initialized",
+ "Driver needs configuration load.",
+ "Driver requested configuration from download daemon.",
+ "Driver Ready."
+};
+
+
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+int dgap_init_module(void)
+{
+ int rc = 0;
+
+ APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
+
+ /*
+ * Initialize global stuff
+ */
+ rc = dgap_start();
+
+ if (rc < 0) {
+ return(rc);
+ }
+
+ /*
+ * Find and configure all the cards
+ */
+ rc = dgap_init_pci();
+
+ /*
+ * If something went wrong in the scan, bail out of driver.
+ */
+ if (rc < 0) {
+ /* Only unregister the pci driver if it was actually registered. */
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+ else
+ printk("WARNING: dgap driver load failed. No DGAP boards found.\n");
+
+ dgap_cleanup_module();
+ }
+ else {
+ dgap_create_driver_sysfiles(&dgap_driver);
+ }
+
+ DPR_INIT(("Finished init_module. Returning %d\n", rc));
+ return (rc);
+}
+
+
+/*
+ * Start of driver.
+ */
+static int dgap_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (dgap_driver_start == FALSE) {
+
+ dgap_driver_start = TRUE;
+
+ /* make sure that the globals are init'd before we do anything else */
+ dgap_init_globals();
+
+ dgap_NumBoards = 0;
+
+ APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ */
+ if (!dgap_Major_Control_Registered) {
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &DgapBoardFops);
+ if (rc < 0) {
+ APR(("Can't register dgap driver device (%d)\n", rc));
+ return (rc);
+ }
+
+ dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
+ device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 0),
+ NULL, "dgap_mgmt");
+ device_create(dgap_class, NULL,
+ MKDEV(DIGI_DGAP_MAJOR, 1),
+ NULL, "dgap_downld");
+ dgap_Major_Control_Registered = TRUE;
+ }
+
+ /*
+ * Init any global tty stuff.
+ */
+ rc = dgap_tty_preinit();
+
+ if (rc < 0) {
+ APR(("tty preinit - not enough memory (%d)\n", rc));
+ return(rc);
+ }
+
+ /* Start the poller */
+ DGAP_LOCK(dgap_poll_lock, flags);
+ init_timer(&dgap_poll_timer);
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ dgap_poll_timer.expires = dgap_poll_time;
+ DGAP_UNLOCK(dgap_poll_lock, flags);
+
+ add_timer(&dgap_poll_timer);
+
+ dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
+ }
+
+ return (rc);
+}
+
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgap_init_pci(void)
+{
+ return pci_register_driver(&dgap_driver);
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgap_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgap_NumBoards++;
+ DPR_INIT(("Incrementing numboards to %d\n", dgap_NumBoards));
+ }
+ }
+ return rc;
+}
+
+
+static int dgap_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgap_found_board(pdev, card_type);
+}
+
+
+static void dgap_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+
+/*
+ * dgap_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+void dgap_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ DGAP_LOCK(dgap_poll_lock, lock_flags);
+ dgap_poll_stop = 1;
+ DGAP_UNLOCK(dgap_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync( &dgap_poll_timer);
+
+ dgap_remove_driver_sysfiles(&dgap_driver);
+
+
+ if (dgap_Major_Control_Registered) {
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
+ device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 1));
+ class_destroy(dgap_class);
+ unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
+ }
+
+ if (dgap_config_buf)
+ kfree(dgap_config_buf);
+
+ for (i = 0; i < dgap_NumBoards; ++i) {
+ dgap_remove_ports_sysfiles(dgap_Board[i]);
+ dgap_tty_uninit(dgap_Board[i]);
+ dgap_cleanup_board(dgap_Board[i]);
+ }
+
+ dgap_tty_post_uninit();
+
+#if defined(DGAP_TRACER)
+ /* last thing, make sure we release the tracebuffer */
+ dgap_tracer_free();
+#endif
+ if (dgap_NumBoards)
+ pci_unregister_driver(&dgap_driver);
+}
+
+
+/*
+ * dgap_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgap_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if(!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ if (brd->intr_used && brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_port) {
+ release_mem_region(brd->membase + 0x200000, 0x200000);
+ iounmap(brd->re_map_port);
+ brd->re_map_port = NULL;
+ }
+
+ if (brd->re_map_membase) {
+ release_mem_region(brd->membase, 0x200000);
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ if (brd->msgbuf_head) {
+ unsigned long flags;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk(brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ if (brd->flipbuf)
+ kfree(brd->flipbuf);
+ if (brd->flipflagbuf)
+ kfree(brd->flipflagbuf);
+
+ dgap_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+
+/*
+ * dgap_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgap_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+ unsigned long flags;
+
+ /* get the board structure and prep it */
+ brd = dgap_Board[dgap_NumBoards] =
+ (struct board_t *) dgap_driver_kzmalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd) {
+ APR(("memory allocation for board structure failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* make a temporary message buffer for the boot messages */
+ brd->msgbuf = brd->msgbuf_head =
+ (char *) dgap_driver_kzmalloc(sizeof(char) * 8192, GFP_KERNEL);
+ if(!brd->msgbuf) {
+ kfree(brd);
+ APR(("memory allocation for board msgbuf failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* store the info for the board we've found */
+ brd->magic = DGAP_BOARD_MAGIC;
+ brd->boardnum = dgap_NumBoards;
+ brd->firstminor = 0;
+ brd->vendor = dgap_pci_tbl[id].vendor;
+ brd->device = dgap_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgap_Ids[id].name;
+ brd->maxports = dgap_Ids[id].maxports;
+ brd->type = dgap_Ids[id].config_type;
+ brd->dpatype = dgap_Ids[id].dpatype;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ DGAP_SPINLOCK_INIT(brd->bd_lock);
+
+ brd->state = BOARD_FOUND;
+ brd->runwait = 0;
+ brd->inhibit_poller = FALSE;
+ brd->wait_for_bios = 0;
+ brd->wait_for_fep = 0;
+
+ for (i = 0; i < MAXPORTS; i++) {
+ brd->channels[i] = NULL;
+ }
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+ /* get the PCI Base Address Registers */
+
+ /* Xr Jupiter and EPC use BAR 2 */
+ if (brd->device == PCI_DEVICE_XRJ_DID || brd->device == PCI_DEVICE_EPCJ_DID) {
+ brd->membase = pci_resource_start(pdev, 2);
+ brd->membase_end = pci_resource_end(pdev, 2);
+ }
+ /* Everyone else uses BAR 0 */
+ else {
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+ }
+
+ if (!brd->membase) {
+ APR(("card has no PCI IO resources, failing board.\n"));
+ return -ENODEV;
+ }
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+ brd->port = brd->membase + PCI_IO_OFFSET;
+ brd->port_end = brd->port + PCI_IO_SIZE;
+
+
+ /*
+ * Special initialization for non-PLX boards
+ */
+ if (brd->device != PCI_DEVICE_XRJ_DID && brd->device != PCI_DEVICE_EPCJ_DID) {
+ unsigned short cmd;
+
+ pci_write_config_byte(pdev, 0x40, 0);
+ pci_write_config_byte(pdev, 0x46, 0);
+
+ /* Limit burst length to 2 doubleword transactions */
+ pci_write_config_byte(pdev, 0x42, 1);
+
+ /*
+ * Enable IO and mem if not already done.
+ * This was needed for support on Itanium.
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+ }
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet, (unsigned long) brd);
+
+ /* Log the information about the board */
+ dgap_mbuf(brd, DRVSTR": board %d: %s (rev %d), irq %d\n",
+ dgap_NumBoards, brd->name, brd->rev, brd->irq);
+
+ DPR_INIT(("dgap_scan(%d) - printing out the msgbuf\n", i));
+ DGAP_LOCK(dgap_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk(brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGAP_UNLOCK(dgap_global_lock, flags);
+
+ i = dgap_do_remap(brd);
+ if (i)
+ brd->state = BOARD_FAILED;
+ else
+ brd->state = NEED_RESET;
+
+ return(0);
+}
+
+
+int dgap_finalize_board_init(struct board_t *brd) {
+
+ int rc;
+
+ DPR_INIT(("dgap_finalize_board_init() - start\n"));
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return(-ENODEV);
+
+ DPR_INIT(("dgap_finalize_board_init() - start #2\n"));
+
+ brd->use_interrupts = dgap_config_get_useintr(brd);
+
+ /*
+ * Set up our interrupt handler if we are set to do interrupts.
+ */
+ if (brd->use_interrupts && brd->irq) {
+
+ rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
+
+ if (rc) {
+ dgap_mbuf(brd, DRVSTR": Failed to hook IRQ %d. Board will work in poll mode.\n",
+ brd->irq);
+ brd->intr_used = 0;
+ }
+ else
+ brd->intr_used = 1;
+ } else {
+ brd->intr_used = 0;
+ }
+
+ return(0);
+}
+
+
+/*
+ * Remap PCI memory.
+ */
+static int dgap_do_remap(struct board_t *brd)
+{
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return -ENXIO;
+
+ if (!request_mem_region(brd->membase, 0x200000, "dgap")) {
+ APR(("dgap: mem_region %lx already in use.\n", brd->membase));
+ return -ENOMEM;
+ }
+
+ if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000, "dgap")) {
+ APR(("dgap: mem_region IO %lx already in use.\n",
+ brd->membase + PCI_IO_OFFSET));
+ release_mem_region(brd->membase, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_membase = ioremap(brd->membase, 0x200000);
+ if (!brd->re_map_membase) {
+ APR(("dgap: ioremap mem %lx cannot be mapped.\n", brd->membase));
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ return -ENOMEM;
+ }
+
+ brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
+ if (!brd->re_map_port) {
+ release_mem_region(brd->membase, 0x200000);
+ release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
+ iounmap(brd->re_map_membase);
+ APR(("dgap: ioremap IO mem %lx cannot be mapped.\n",
+ brd->membase + PCI_IO_OFFSET));
+ return -ENOMEM;
+ }
+
+ DPR_INIT(("remapped io: 0x%p remapped mem: 0x%p\n",
+ brd->re_map_port, brd->re_map_membase));
+ return 0;
+}
+
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgap_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgap_poll_handler(ulong dummy)
+{
+ int i;
+ struct board_t *brd;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+ ulong new_time;
+
+ dgap_poll_counter++;
+
+
+ /*
+ * If driver needs the config file still,
+ * keep trying to wake up the downloader to
+ * send us the file.
+ */
+ if (dgap_driver_state == DRIVER_NEED_CONFIG_LOAD) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ goto schedule_poller;
+ }
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ else if (dgap_driver_state != DRIVER_READY) {
+ goto schedule_poller;
+ }
+
+ /*
+ * If we have just 1 board, or the system is not SMP,
+ * then use the typical old style poller.
+ * Otherwise, use our new tasklet based poller, which should
+ * speed things up for multiple boards.
+ */
+ if ( (dgap_NumBoards == 1) || (num_online_cpus() <= 1) ) {
+ for (i = 0; i < dgap_NumBoards; i++) {
+
+ brd = dgap_Board[i];
+
+ if (brd->state == BOARD_FAILED) {
+ continue;
+ }
+ if (!brd->intr_running) {
+ /* Call the real board poller directly */
+ dgap_poll_tasklet((unsigned long) brd);
+ }
+ }
+ }
+ else {
+ /* Go thru each board, kicking off a tasklet for each if needed */
+ for (i = 0; i < dgap_NumBoards; i++) {
+ brd = dgap_Board[i];
+
+ /*
+ * Attempt to grab the board lock.
+ *
+ * If we can't get it, no big deal, the next poll will get it.
+ * Basically, I just really don't want to spin in here, because I want
+ * to kick off my tasklets as fast as I can, and then get out the poller.
+ */
+ if (!spin_trylock(&brd->bd_lock)) {
+ continue;
+ }
+
+ /* If board is in a failed state, don't bother scheduling a tasklet */
+ if (brd->state == BOARD_FAILED) {
+ spin_unlock(&brd->bd_lock);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ if (!brd->intr_running) {
+ tasklet_schedule(&brd->helper_tasklet);
+ }
+
+ /*
+ * Can't do DGAP_UNLOCK here, as we don't have
+ * lock_flags because we did a trylock above.
+ */
+ spin_unlock(&brd->bd_lock);
+ }
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ DGAP_LOCK(dgap_poll_lock, lock_flags );
+ dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
+
+ new_time = dgap_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgap_poll_tick) {
+ dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
+ }
+
+ dgap_poll_timer.function = dgap_poll_handler;
+ dgap_poll_timer.data = 0;
+ dgap_poll_timer.expires = dgap_poll_time;
+ DGAP_UNLOCK(dgap_poll_lock, lock_flags );
+
+ if (!dgap_poll_stop)
+ add_timer(&dgap_poll_timer);
+}
+
+
+
+
+/*
+ * dgap_intr()
+ *
+ * Driver interrupt handler.
+ */
+static irqreturn_t dgap_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGAP_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ brd->intr_count++;
+
+ /*
+ * Schedule tasklet to run at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * dgap_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgap_init_globals(void)
+{
+ int i = 0;
+
+ dgap_rawreadok = rawreadok;
+ dgap_trcbuf_size = trcbuf_size;
+ dgap_debug = debug;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ dgap_Board[i] = NULL;
+ }
+
+ init_timer( &dgap_poll_timer );
+
+ init_waitqueue_head(&dgap_dl_wait);
+ dgap_dl_action = 0;
+}
+
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+
+/*
+ * dgap_driver_kzmalloc()
+ *
+ * Malloc and clear memory,
+ */
+void *dgap_driver_kzmalloc(size_t size, int priority)
+{
+ void *p = kmalloc(size, priority);
+ if(p)
+ memset(p, 0, size);
+ return(p);
+}
+
+
+/*
+ * dgap_mbuf()
+ *
+ * Used to print to the message buffer during board init.
+ */
+static void dgap_mbuf(struct board_t *brd, const char *fmt, ...) {
+ va_list ap;
+ char buf[1024];
+ int i;
+ unsigned long flags;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsprintf(buf, fmt, ap);
+ va_end(ap);
+
+ DPR((buf));
+
+ if (!brd || !brd->msgbuf) {
+ printk(buf);
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ return;
+ }
+
+ memcpy(brd->msgbuf, buf, strlen(buf));
+ brd->msgbuf += strlen(buf);
+ *brd->msgbuf = 0;
+
+ DGAP_UNLOCK(dgap_global_lock, flags);
+}
+
+
+/*
+ * dgap_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+int dgap_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return (signal_pending(current));
+}
+
+
+
+/*
+ * dgap_ioctl_name() : Returns a text version of each ioctl value.
+ */
+char *dgap_ioctl_name(int cmd)
+{
+ switch(cmd) {
+
+ case TCGETA: return("TCGETA");
+ case TCGETS: return("TCGETS");
+ case TCSETA: return("TCSETA");
+ case TCSETS: return("TCSETS");
+ case TCSETAW: return("TCSETAW");
+ case TCSETSW: return("TCSETSW");
+ case TCSETAF: return("TCSETAF");
+ case TCSETSF: return("TCSETSF");
+ case TCSBRK: return("TCSBRK");
+ case TCXONC: return("TCXONC");
+ case TCFLSH: return("TCFLSH");
+ case TIOCGSID: return("TIOCGSID");
+
+ case TIOCGETD: return("TIOCGETD");
+ case TIOCSETD: return("TIOCSETD");
+ case TIOCGWINSZ: return("TIOCGWINSZ");
+ case TIOCSWINSZ: return("TIOCSWINSZ");
+
+ case TIOCMGET: return("TIOCMGET");
+ case TIOCMSET: return("TIOCMSET");
+ case TIOCMBIS: return("TIOCMBIS");
+ case TIOCMBIC: return("TIOCMBIC");
+
+ /* from digi.h */
+ case DIGI_SETA: return("DIGI_SETA");
+ case DIGI_SETAW: return("DIGI_SETAW");
+ case DIGI_SETAF: return("DIGI_SETAF");
+ case DIGI_SETFLOW: return("DIGI_SETFLOW");
+ case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
+ case DIGI_GETFLOW: return("DIGI_GETFLOW");
+ case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
+ case DIGI_GETA: return("DIGI_GETA");
+ case DIGI_GEDELAY: return("DIGI_GEDELAY");
+ case DIGI_SEDELAY: return("DIGI_SEDELAY");
+ case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
+ case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
+ case TIOCMODG: return("TIOCMODG");
+ case TIOCMODS: return("TIOCMODS");
+ case TIOCSDTR: return("TIOCSDTR");
+ case TIOCCDTR: return("TIOCCDTR");
+
+ default: return("unknown");
+ }
+}
diff --git a/drivers/staging/dgap/dgap_driver.h b/drivers/staging/dgap/dgap_driver.h
new file mode 100644
index 00000000000..b1cf489a729
--- /dev/null
+++ b/drivers/staging/dgap/dgap_driver.h
@@ -0,0 +1,618 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_DRIVER_H
+#define __DGAP_DRIVER_H
+
+#include <linux/version.h> /* To get the current Linux version */
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#include "dgap_types.h" /* Additional types needed by the Digi header files */
+#include "digi.h" /* Digi specific ioctl header */
+#include "dgap_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgap_sysfs.h" /* Support for SYSFS */
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification, error and debugging statments
+ *
+ * In theory, you can change all occurances of "digi" in the next
+ * three lines, and the driver printk's will all automagically change.
+ *
+ * APR((fmt, args, ...)); Always prints message
+ * DPR((fmt, args, ...)); Only prints if DGAP_TRACER is defined at
+ * compile time and dgap_debug!=0
+ */
+#define PROCSTR "dgap" /* /proc entries */
+#define DEVSTR "/dev/dg/dgap" /* /dev entries */
+#define DRVSTR "dgap" /* Driver name string
+ * displayed by APR */
+#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
+ } while (0)
+#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
+
+#define TRC_TO_CONSOLE 1
+
+/*
+ * Debugging levels can be set using debug insmod variable
+ * They can also be compiled out completely.
+ */
+
+#define DBG_INIT (dgap_debug & 0x01)
+#define DBG_BASIC (dgap_debug & 0x02)
+#define DBG_CORE (dgap_debug & 0x04)
+
+#define DBG_OPEN (dgap_debug & 0x08)
+#define DBG_CLOSE (dgap_debug & 0x10)
+#define DBG_READ (dgap_debug & 0x20)
+#define DBG_WRITE (dgap_debug & 0x40)
+
+#define DBG_IOCTL (dgap_debug & 0x80)
+
+#define DBG_PROC (dgap_debug & 0x100)
+#define DBG_PARAM (dgap_debug & 0x200)
+#define DBG_PSCAN (dgap_debug & 0x400)
+#define DBG_EVENT (dgap_debug & 0x800)
+
+#define DBG_DRAIN (dgap_debug & 0x1000)
+#define DBG_CARR (dgap_debug & 0x2000)
+
+#define DBG_MGMT (dgap_debug & 0x4000)
+
+
+#if defined(DGAP_TRACER)
+
+# if defined(TRC_TO_KMEM)
+/* Choose one: */
+# define TRC_ON_OVERFLOW_WRAP_AROUND
+# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
+# endif //TRC_TO_KMEM
+
+# define TRC_MAXMSG 1024
+# define TRC_OVERFLOW "(OVERFLOW)"
+# define TRC_DTRC "/usr/bin/dtrc"
+
+#if defined TRC_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
+#else //!defined TRACE_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args)
+#endif
+
+#if defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args) dgap_tracef args
+#else //!defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args)
+#endif
+
+#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
+
+# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
+# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
+# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
+# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
+# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
+# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
+# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
+# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
+# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
+# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
+# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
+# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
+# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
+# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
+# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
+
+# define DPR(ARGS) if (dgap_debug) TRC(ARGS)
+# define P(X) dgap_tracef(#X "=%p\n", X)
+# define X(X) dgap_tracef(#X "=%x\n", X)
+
+#else//!defined DGAP_TRACER
+
+#define PRINTF_TO_KMEM(args)
+# define TRC(ARGS)
+# define DPR_INIT(ARGS)
+# define DPR_BASIC(ARGS)
+# define DPR_CORE(ARGS)
+# define DPR_OPEN(ARGS)
+# define DPR_CLOSE(ARGS)
+# define DPR_READ(ARGS)
+# define DPR_WRITE(ARGS)
+# define DPR_IOCTL(ARGS)
+# define DPR_PROC(ARGS)
+# define DPR_PARAM(ARGS)
+# define DPR_PSCAN(ARGS)
+# define DPR_EVENT(ARGS)
+# define DPR_DRAIN(ARGS)
+# define DPR_CARR(ARGS)
+# define DPR_MGMT(ARGS)
+
+# define DPR(args)
+
+#endif//DGAP_TRACER
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 32
+#define MAXPORTS 224
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGAP_BOARD_MAGIC 0x5c6df104
+#define DGAP_CHANNEL_MAGIC 0x6c6df104
+#define DGAP_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGAP_SERIAL 0
+#define DGAP_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define SBREAK_TIME 0x25
+#define U2BSIZE 0x400
+
+#define dgap_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Our major for the mgmt devices.
+ *
+ * We can use 22, because Digi was allocated 22 and 23 for the epca driver.
+ * 22 has now become obsolete now that the "cu" devices have
+ * been removed from 2.6.
+ * Also, this *IS* the epca driver, just PCI only now.
+ */
+#ifndef DIGI_DGAP_MAJOR
+# define DIGI_DGAP_MAJOR 22
+#endif
+
+/*
+ * The parameters we use to define the periods of the moving averages.
+ */
+#define MA_PERIOD (HZ / 10)
+#define SMA_DUR (1 * HZ)
+#define EMA_DUR (1 * HZ)
+#define SMA_NPERIODS (SMA_DUR / MA_PERIOD)
+#define EMA_NPERIODS (EMA_DUR / MA_PERIOD)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+#define VPDSIZE (512)
+
+/*
+ * Lock function/defines.
+ * Makes spotting lock/unlock locations easier.
+ */
+# define DGAP_SPINLOCK_INIT(x) spin_lock_init(&(x))
+# define DGAP_LOCK(x,y) spin_lock_irqsave(&(x), y)
+# define DGAP_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
+# define DGAP_TRYLOCK(x,y) spin_trylock(&(x))
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_NEED_CONFIG_LOAD,
+ DRIVER_REQUESTED_CONFIG,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ CONFIG_NOT_FOUND,
+ BOARD_FOUND,
+ NEED_RESET,
+ FINISHED_RESET,
+ NEED_CONFIG,
+ FINISHED_CONFIG,
+ NEED_DEVICE_CREATION,
+ REQUESTED_DEVICE_CREATION,
+ FINISHED_DEVICE_CREATION,
+ NEED_BIOS_LOAD,
+ REQUESTED_BIOS,
+ WAIT_BIOS_LOAD,
+ FINISHED_BIOS_LOAD,
+ NEED_FEP_LOAD,
+ REQUESTED_FEP,
+ WAIT_FEP_LOAD,
+ FINISHED_FEP_LOAD,
+ NEED_PROC_CREATION,
+ FINISHED_PROC_CREATION,
+ BOARD_READY
+};
+
+/*
+ * All the possible states that a requested concentrator image can be in.
+ */
+enum {
+ NO_PENDING_CONCENTRATOR_REQUESTS = 0,
+ NEED_CONCENTRATOR,
+ REQUESTED_CONCENTRATOR
+};
+
+extern char *dgap_state_text[];
+extern char *dgap_driver_state_text[];
+
+
+/*
+ * Modem line constants are defined as macros because DSR and
+ * DCD are swapable using the ditty altpin option.
+ */
+#define D_CD(ch) ch->ch_cd /* Carrier detect */
+#define D_DSR(ch) ch->ch_dsr /* Data set ready */
+#define D_RTS(ch) DM_RTS /* Request to send */
+#define D_CTS(ch) DM_CTS /* Clear to send */
+#define D_RI(ch) DM_RI /* Ring indicator */
+#define D_DTR(ch) DM_DTR /* Data terminal ready */
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+
+/*
+ * A structure to hold a statistics counter. We also
+ * compute moving averages for this counter.
+ */
+struct macounter
+{
+ u32 cnt; /* Total count */
+ ulong accum; /* Acuumulator per period */
+ ulong sma; /* Simple moving average */
+ ulong ema; /* Exponential moving average */
+};
+
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_FEP5PLUS 0x0001 /* Supports FEP5 Plus commands */
+#define BD_HAS_VPD 0x0002 /* Board has VPD info available */
+
+
+/*
+ * Per-board information
+ */
+struct board_t
+{
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-3 */
+ int firstminor; /* First minor, e.g. 0, 30, 60 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ u16 maxports; /* MAX ports this board can handle */
+ uchar vpd[VPDSIZE]; /* VPD of board, if found */
+ u32 bd_flags; /* Board flags */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ u32 state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ u32 wait_for_bios;
+ u32 wait_for_fep;
+
+ struct cnode * bd_config; /* Config of board */
+
+ u16 nasync; /* Number of ports on card */
+
+ u32 use_interrupts; /* Should we be interrupt driven? */
+ ulong irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ u32 intr_used; /* Non-zero if using interrupts */
+ u32 intr_running; /* Non-zero if FEP knows its doing interrupts */
+
+ ulong port; /* Start of base io port of the card */
+ ulong port_end; /* End of base io port of the card */
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ uchar *re_map_port; /* Remapped io port of the card */
+ uchar *re_map_membase;/* Remapped memory of the card */
+
+ uchar runwait; /* # Processes waiting for FEP */
+ uchar inhibit_poller; /* Tells the poller to leave us alone */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver *SerialDriver;
+ char SerialName[200];
+ struct tty_driver *PrintDriver;
+ char PrintName[200];
+
+ u32 dgap_Major_Serial_Registered;
+ u32 dgap_Major_TransparentPrint_Registered;
+
+ u32 dgap_Serial_Major;
+ u32 dgap_TransparentPrint_Major;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ u32 TtyRefCnt;
+#endif
+
+ struct bs_t *bd_bs; /* Base structure pointer */
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+ char *flipflagbuf; /* Our flip flag buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+ wait_queue_head_t kme_wait; /* Needed for DPA support */
+
+ u32 conc_dl_status; /* Status of any pending conc download */
+ /*
+ * Mgmt data.
+ */
+ char *msgbuf_head;
+ char *msgbuf;
+};
+
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ u32 un_time;
+ u32 un_type;
+ u32 un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ u32 un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ u32 un_dev; /* Minor device number */
+ tcflag_t un_oflag; /* oflags being done on board */
+ tcflag_t un_lflag; /* lflags being done on board */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_OUT 0x0002 /* Dial-out device open */
+#define CH_STOP 0x0004 /* Output is stopped */
+#define CH_STOPI 0x0008 /* Input is stopped */
+#define CH_CD 0x0010 /* Carrier is present */
+#define CH_FCAR 0x0020 /* Carrier forced on */
+
+#define CH_RXBLOCK 0x0080 /* Enable rx blocked flag */
+#define CH_WLOW 0x0100 /* Term waiting low event */
+#define CH_WEMPTY 0x0200 /* Term waiting empty event */
+#define CH_RENABLE 0x0400 /* Buffer just emptied */
+#define CH_RACTIVE 0x0800 /* Process active in xxread() */
+#define CH_RWAIT 0x1000 /* Process waiting in xxread() */
+#define CH_BAUD0 0x2000 /* Used for checking B0 transitions */
+#define CH_HANGUP 0x8000 /* Hangup received */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct bs_t *ch_bs; /* Base structure pointer */
+ struct cm_t *ch_cm; /* Command queue pointer */
+ struct board_t *ch_bd; /* Board structure pointer */
+ unsigned char *ch_vaddr; /* FEP memory origin */
+ unsigned char *ch_taddr; /* Write buffer origin */
+ unsigned char *ch_raddr; /* Read buffer origin */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ u32 pscan_state;
+ uchar pscan_savechar;
+
+ u32 ch_portnum; /* Port number, 0 offset. */
+ u32 ch_open_count; /* open count */
+ u32 ch_flags; /* Channel flags */
+
+
+ u32 ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ u32 ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+
+ u16 ch_fepiflag; /* FEP tty iflags */
+ u16 ch_fepcflag; /* FEP tty cflags */
+ u16 ch_fepoflag; /* FEP tty oflags */
+ u16 ch_wopen; /* Waiting for open process cnt */
+ u16 ch_tstart; /* Transmit buffer start */
+ u16 ch_tsize; /* Transmit buffer size */
+ u16 ch_rstart; /* Receive buffer start */
+ u16 ch_rsize; /* Receive buffer size */
+ u16 ch_rdelay; /* Receive delay time */
+
+ u16 ch_tlw; /* Our currently set low water mark */
+
+ u16 ch_cook; /* Output character mask */
+
+ uchar ch_card; /* Card channel is on */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+ uchar ch_mforce; /* Modem values to be forced */
+ uchar ch_mval; /* Force values */
+ uchar ch_fepstopc; /* FEP stop character */
+ uchar ch_fepstartc; /* FEP start character */
+
+ uchar ch_astopc; /* Auxiliary Stop character */
+ uchar ch_astartc; /* Auxiliary Start character */
+ uchar ch_fepastopc; /* Auxiliary FEP stop char */
+ uchar ch_fepastartc; /* Auxiliary FEP start char */
+
+ uchar ch_hflow; /* FEP hardware handshake */
+ uchar ch_dsr; /* stores real dsr value */
+ uchar ch_cd; /* stores real cd value */
+ uchar ch_tx_win; /* channel tx buffer window */
+ uchar ch_rx_win; /* channel rx buffer window */
+ uint ch_custom_speed; /* Custom baud, if set */
+ uint ch_baud_info; /* Current baud info for /proc output */
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+
+/*************************************************************************
+ *
+ * Prototypes for non-static functions used in more than one module
+ *
+ *************************************************************************/
+
+extern int dgap_ms_sleep(ulong ms);
+extern void *dgap_driver_kzmalloc(size_t size, int priority);
+extern char *dgap_ioctl_name(int cmd);
+extern void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len);
+extern void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len);
+extern void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len);
+extern void dgap_do_config_load(uchar __user *uaddr, int len);
+extern int dgap_after_config_loaded(void);
+extern int dgap_finalize_board_init(struct board_t *brd);
+
+/*
+ * Our Global Variables.
+ */
+extern int dgap_driver_state; /* The state of the driver */
+extern int dgap_debug; /* Debug variable */
+extern int dgap_rawreadok; /* Set if user wants rawreads */
+extern int dgap_poll_tick; /* Poll interval - 20 ms */
+extern spinlock_t dgap_global_lock; /* Driver global spinlock */
+extern uint dgap_NumBoards; /* Total number of boards */
+extern struct board_t *dgap_Board[MAXBOARDS]; /* Array of board structs */
+extern ulong dgap_poll_counter; /* Times the poller has run */
+extern char *dgap_config_buf; /* The config file buffer */
+extern spinlock_t dgap_dl_lock; /* Downloader spinlock */
+extern wait_queue_head_t dgap_dl_wait; /* Wait queue for downloader */
+extern int dgap_dl_action; /* Action flag for downloader */
+extern int dgap_registerttyswithsysfs; /* Should we register the */
+ /* ttys with sysfs or not */
+
+/*
+ * Global functions declared in dgap_fep5.c, but must be hidden from
+ * user space programs.
+ */
+extern void dgap_poll_tasklet(unsigned long data);
+extern void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds);
+extern void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds);
+extern void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
+extern int dgap_param(struct tty_struct *tty);
+extern void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len);
+extern uint dgap_get_custom_baud(struct channel_t *ch);
+extern void dgap_firmware_reset_port(struct channel_t *ch);
+
+#endif
diff --git a/drivers/staging/dgap/dgap_fep5.c b/drivers/staging/dgap/dgap_fep5.c
new file mode 100644
index 00000000000..4464f02c957
--- /dev/null
+++ b/drivers/staging/dgap/dgap_fep5.c
@@ -0,0 +1,1953 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ * $Id: dgap_fep5.c,v 1.2 2011/06/21 10:35:40 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/tty.h>
+#include <linux/tty_flip.h> /* For tty_schedule_flip */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgap_driver.h"
+#include "dgap_pci.h"
+#include "dgap_fep5.h"
+#include "dgap_tty.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+#include "dgap_trace.h"
+
+/*
+ * Our function prototypes
+ */
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
+static int dgap_event(struct board_t *bd);
+
+/*
+ * internal variables
+ */
+static uint dgap_count = 500;
+
+
+/*
+ * Loads the dgap.conf config file from the user.
+ */
+void dgap_do_config_load(uchar __user *uaddr, int len)
+{
+ int orig_len = len;
+ char *to_addr;
+ uchar __user *from_addr = uaddr;
+ char buf[U2BSIZE];
+ int n;
+
+ to_addr = dgap_config_buf = dgap_driver_kzmalloc(len + 1, GFP_ATOMIC);
+ if (!dgap_config_buf) {
+ DPR_INIT(("dgap_do_config_load - unable to allocate memory for file\n"));
+ dgap_driver_state = DRIVER_NEED_CONFIG_LOAD;
+ return;
+ }
+
+ n = U2BSIZE;
+ while (len) {
+
+ if (n > len)
+ n = len;
+
+ if (copy_from_user((char *) &buf, from_addr, n) == -1 )
+ return;
+
+ /* Copy data from buffer to kernel memory */
+ memcpy(to_addr, buf, n);
+
+ /* increment counts */
+ len -= n;
+ to_addr += n;
+ from_addr += n;
+ n = U2BSIZE;
+ }
+
+ dgap_config_buf[orig_len] = '\0';
+
+ to_addr = dgap_config_buf;
+ dgap_parsefile(&to_addr, TRUE);
+
+ DPR_INIT(("dgap_config_load() finish\n"));
+
+ return;
+}
+
+
+int dgap_after_config_loaded(void)
+{
+ int i = 0;
+ int rc = 0;
+
+ /*
+ * Register our ttys, now that we have the config loaded.
+ */
+ for (i = 0; i < dgap_NumBoards; ++i) {
+
+ /*
+ * Initialize KME waitqueues...
+ */
+ init_waitqueue_head(&(dgap_Board[i]->kme_wait));
+
+ /*
+ * allocate flip buffer for board.
+ */
+ dgap_Board[i]->flipbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ dgap_Board[i]->flipflagbuf = dgap_driver_kzmalloc(MYFLIPLEN, GFP_ATOMIC);
+ }
+
+ return (rc);
+}
+
+
+
+/*=======================================================================
+ *
+ * usertoboard - copy from user space to board space.
+ *
+ *=======================================================================*/
+static int dgap_usertoboard(struct board_t *brd, char *to_addr, char __user *from_addr, int len)
+{
+ char buf[U2BSIZE];
+ int n = U2BSIZE;
+
+ if (!brd || brd->magic != DGAP_BOARD_MAGIC)
+ return(-EFAULT);
+
+ while (len) {
+ if (n > len)
+ n = len;
+
+ if (copy_from_user((char *) &buf, from_addr, n) == -1 ) {
+ return(-EFAULT);
+ }
+
+ /* Copy data from buffer to card memory */
+ memcpy_toio(to_addr, buf, n);
+
+ /* increment counts */
+ len -= n;
+ to_addr += n;
+ from_addr += n;
+ n = U2BSIZE;
+ }
+ return(0);
+}
+
+
+/*
+ * Copies the BIOS code from the user to the board,
+ * and starts the BIOS running.
+ */
+void dgap_do_bios_load(struct board_t *brd, uchar __user *ubios, int len)
+{
+ uchar *addr;
+ uint offset;
+ int i;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ DPR_INIT(("dgap_do_bios_load() start\n"));
+
+ addr = brd->re_map_membase;
+
+ /*
+ * clear POST area
+ */
+ for (i = 0; i < 16; i++)
+ writeb(0, addr + POSTAREA + i);
+
+ /*
+ * Download bios
+ */
+ offset = 0x1000;
+ if (dgap_usertoboard(brd, addr + offset, ubios, len) == -1 ) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ writel(0x0bf00401, addr);
+ writel(0, (addr + 4));
+
+ /* Clear the reset, and change states. */
+ writeb(FEPCLR, brd->re_map_port);
+ brd->state = WAIT_BIOS_LOAD;
+}
+
+
+/*
+ * Checks to see if the BIOS completed running on the card.
+ */
+static void dgap_do_wait_for_bios(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+ word = readw(addr + POSTAREA);
+
+ /* Check to see if BIOS thinks board is good. (GD). */
+ if (word == *(u16 *) "GD") {
+ DPR_INIT(("GOT GD in memory, moving states.\n"));
+ brd->state = FINISHED_BIOS_LOAD;
+ return;
+ }
+
+ /* Give up on board after too long of time taken */
+ if (brd->wait_for_bios++ > 5000) {
+ u16 err1 = readw(addr + SEQUENCE);
+ u16 err2 = readw(addr + ERROR);
+ APR(("***WARNING*** %s failed diagnostics. Error #(%x,%x).\n",
+ brd->name, err1, err2));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ }
+}
+
+
+/*
+ * Copies the FEP code from the user to the board,
+ * and starts the FEP running.
+ */
+void dgap_do_fep_load(struct board_t *brd, uchar __user *ufep, int len)
+{
+ uchar *addr;
+ uint offset;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ DPR_INIT(("dgap_do_fep_load() for board %s : start\n", brd->name));
+
+ /*
+ * Download FEP
+ */
+ offset = 0x1000;
+ if (dgap_usertoboard(brd, addr + offset, ufep, len) == -1 ) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return;
+ }
+
+ /*
+ * If board is a concentrator product, we need to give
+ * it its config string describing how the concentrators look.
+ */
+ if ((brd->type == PCX) || (brd->type == PEPC)) {
+ uchar string[100];
+ uchar *config, *xconfig;
+ int i = 0;
+
+ xconfig = dgap_create_config_string(brd, string);
+
+ /* Write string to board memory */
+ config = addr + CONFIG;
+ for (; i < CONFIGSIZE; i++, config++, xconfig++) {
+ writeb(*xconfig, config);
+ if ((*xconfig & 0xff) == 0xff)
+ break;
+ }
+ }
+
+ writel(0xbfc01004, (addr + 0xc34));
+ writel(0x3, (addr + 0xc30));
+
+ /* change states. */
+ brd->state = WAIT_FEP_LOAD;
+
+ DPR_INIT(("dgap_do_fep_load() for board %s : finish\n", brd->name));
+
+}
+
+
+/*
+ * Waits for the FEP to report thats its ready for us to use.
+ */
+static void dgap_do_wait_for_fep(struct board_t *brd)
+{
+ uchar *addr;
+ u16 word;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ addr = brd->re_map_membase;
+
+ DPR_INIT(("dgap_do_wait_for_fep() for board %s : start. addr: %p\n", brd->name, addr));
+
+ word = readw(addr + FEPSTAT);
+
+ /* Check to see if FEP is up and running now. */
+ if (word == *(u16 *) "OS") {
+ DPR_INIT(("GOT OS in memory for board %s, moving states.\n", brd->name));
+ brd->state = FINISHED_FEP_LOAD;
+
+ /*
+ * Check to see if the board can support FEP5+ commands.
+ */
+ word = readw(addr + FEP5_PLUS);
+ if (word == *(u16 *) "5A") {
+ DPR_INIT(("GOT 5A in memory for board %s, board supports extended FEP5 commands.\n", brd->name));
+ brd->bd_flags |= BD_FEP5PLUS;
+ }
+
+ return;
+ }
+
+ /* Give up on board after too long of time taken */
+ if (brd->wait_for_fep++ > 5000) {
+ u16 err1 = readw(addr + SEQUENCE);
+ u16 err2 = readw(addr + ERROR);
+ APR(("***WARNING*** FEPOS for %s not functioning. Error #(%x,%x).\n",
+ brd->name, err1, err2));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ }
+
+ DPR_INIT(("dgap_do_wait_for_fep() for board %s : finish\n", brd->name));
+}
+
+
+/*
+ * Physically forces the FEP5 card to reset itself.
+ */
+static void dgap_do_reset_board(struct board_t *brd)
+{
+ uchar check;
+ u32 check1;
+ u32 check2;
+ int i = 0;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase || !brd->re_map_port) {
+ DPR_INIT(("dgap_do_reset_board() start. bad values. brd: %p mem: %p io: %p\n",
+ brd, brd ? brd->re_map_membase : 0, brd ? brd->re_map_port : 0));
+ return;
+ }
+
+ DPR_INIT(("dgap_do_reset_board() start. io: %p\n", brd->re_map_port));
+
+ /* FEPRST does not vary among supported boards */
+ writeb(FEPRST, brd->re_map_port);
+
+ for (i = 0; i <= 1000; i++) {
+ check = readb(brd->re_map_port) & 0xe;
+ if (check == FEPRST)
+ break;
+ udelay(10);
+
+ }
+ if (i > 1000) {
+ APR(("*** WARNING *** Board not resetting... Failing board.\n"));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ /*
+ * Make sure there really is memory out there.
+ */
+ writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
+ writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
+ check1 = readl(brd->re_map_membase + LOWMEM);
+ check2 = readl(brd->re_map_membase + HIGHMEM);
+
+ if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
+ APR(("*** Warning *** No memory at %p for board.\n", brd->re_map_membase));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ if (brd->state != BOARD_FAILED)
+ brd->state = FINISHED_RESET;
+
+failed:
+ DPR_INIT(("dgap_do_reset_board() finish\n"));
+}
+
+
+/*
+ * Sends a concentrator image into the FEP5 board.
+ */
+void dgap_do_conc_load(struct board_t *brd, uchar *uaddr, int len)
+{
+ char *vaddr;
+ u16 offset = 0;
+ struct downld_t *to_dp;
+
+ if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
+ return;
+
+ vaddr = brd->re_map_membase;
+
+ offset = readw((u16 *) (vaddr + DOWNREQ));
+ to_dp = (struct downld_t *) (vaddr + (int) offset);
+
+ /*
+ * The image was already read into kernel space,
+ * we do NOT need a user space read here
+ */
+ memcpy_toio((char *) to_dp, uaddr, sizeof(struct downld_t));
+
+ /* Tell card we have data for it */
+ writew(0, vaddr + (DOWNREQ));
+
+ brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
+}
+
+
+#define EXPANSION_ROM_SIZE (64 * 1024)
+#define FEP5_ROM_MAGIC (0xFEFFFFFF)
+
+static void dgap_get_vpd(struct board_t *brd)
+{
+ u32 magic;
+ u32 base_offset;
+ u16 rom_offset;
+ u16 vpd_offset;
+ u16 image_length;
+ u16 i;
+ uchar byte1;
+ uchar byte2;
+
+ /*
+ * Poke the magic number at the PCI Rom Address location.
+ * If VPD is supported, the value read from that address
+ * will be non-zero.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ /* VPD not supported, bail */
+ if (!magic)
+ return;
+
+ /*
+ * To get to the OTPROM memory, we have to send the boards base
+ * address or'ed with 1 into the PCI Rom Address location.
+ */
+ magic = brd->membase | 0x01;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+ pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
+
+ byte1 = readb(brd->re_map_membase);
+ byte2 = readb(brd->re_map_membase + 1);
+
+ /*
+ * If the board correctly swapped to the OTPROM memory,
+ * the first 2 bytes (header) should be 0x55, 0xAA
+ */
+ if (byte1 == 0x55 && byte2 == 0xAA) {
+
+ base_offset = 0;
+
+ /*
+ * We have to run through all the OTPROM memory looking
+ * for the VPD offset.
+ */
+ while (base_offset <= EXPANSION_ROM_SIZE) {
+
+ /*
+ * Lots of magic numbers here.
+ *
+ * The VPD offset is located inside the ROM Data Structure.
+ * We also have to remember the length of each
+ * ROM Data Structure, so we can "hop" to the next
+ * entry if the VPD isn't in the current
+ * ROM Data Structure.
+ */
+ rom_offset = readw(brd->re_map_membase + base_offset + 0x18);
+ image_length = readw(brd->re_map_membase + rom_offset + 0x10) * 512;
+ vpd_offset = readw(brd->re_map_membase + rom_offset + 0x08);
+
+ /* Found the VPD entry */
+ if (vpd_offset)
+ break;
+
+ /* We didn't find a VPD entry, go to next ROM entry. */
+ base_offset += image_length;
+
+ byte1 = readb(brd->re_map_membase + base_offset);
+ byte2 = readb(brd->re_map_membase + base_offset + 1);
+
+ /*
+ * If the new ROM offset doesn't have 0x55, 0xAA
+ * as its header, we have run out of ROM.
+ */
+ if (byte1 != 0x55 || byte2 != 0xAA)
+ break;
+ }
+
+ /*
+ * If we have a VPD offset, then mark the board
+ * as having a valid VPD, and copy VPDSIZE (512) bytes of
+ * that VPD to the buffer we have in our board structure.
+ */
+ if (vpd_offset) {
+ brd->bd_flags |= BD_HAS_VPD;
+ for (i = 0; i < VPDSIZE; i++)
+ brd->vpd[i] = readb(brd->re_map_membase + vpd_offset + i);
+ }
+ }
+
+ /*
+ * We MUST poke the magic number at the PCI Rom Address location again.
+ * This makes the card report the regular board memory back to us,
+ * rather than the OTPROM memory.
+ */
+ magic = FEP5_ROM_MAGIC;
+ pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
+}
+
+
+/*
+ * Our board poller function.
+ */
+void dgap_poll_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ ulong lock_flags;
+ ulong lock_flags2;
+ char *vaddr;
+ u16 head, tail;
+ u16 *chk_addr;
+ u16 check = 0;
+
+ if (!bd || (bd->magic != DGAP_BOARD_MAGIC)) {
+ APR(("dgap_poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ if (bd->inhibit_poller)
+ return;
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if (bd->state == BOARD_READY) {
+
+ struct ev_t *eaddr = NULL;
+
+ if (!bd->re_map_membase) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+ if (!bd->re_map_port) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ if (!bd->nasync) {
+ goto out;
+ }
+
+ /*
+ * If this is a CX or EPCX, we need to see if the firmware
+ * is requesting a concentrator image from us.
+ */
+ if ((bd->type == PCX) || (bd->type == PEPC)) {
+ chk_addr = (u16 *) (vaddr + DOWNREQ);
+ check = readw(chk_addr);
+ /* Nonzero if FEP is requesting concentrator image. */
+ if (check) {
+ if (bd->conc_dl_status == NO_PENDING_CONCENTRATOR_REQUESTS)
+ bd->conc_dl_status = NEED_CONCENTRATOR;
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+
+ }
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * If there is an event pending. Go service it.
+ */
+ if (head != tail) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_event(bd);
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ }
+
+out:
+ /*
+ * If board is doing interrupts, ACK the interrupt.
+ */
+ if (bd && bd->intr_running) {
+ readb(bd->re_map_port + 2);
+ }
+
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /* Our state machine to get the board up and running */
+
+ /* Reset board */
+ if (bd->state == NEED_RESET) {
+
+ /* Get VPD info */
+ dgap_get_vpd(bd);
+
+ dgap_do_reset_board(bd);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_RESET) {
+ bd->state = NEED_CONFIG;
+ }
+
+ if (bd->state == NEED_CONFIG) {
+ /*
+ * Match this board to a config the user created for us.
+ */
+ bd->bd_config = dgap_find_config(bd->type, bd->pci_bus, bd->pci_slot);
+
+ /*
+ * Because the 4 port Xr products share the same PCI ID
+ * as the 8 port Xr products, if we receive a NULL config
+ * back, and this is a PAPORT8 board, retry with a
+ * PAPORT4 attempt as well.
+ */
+ if (bd->type == PAPORT8 && !bd->bd_config) {
+ bd->bd_config = dgap_find_config(PAPORT4, bd->pci_bus, bd->pci_slot);
+ }
+
+ /*
+ * Register the ttys (if any) into the kernel.
+ */
+ if (bd->bd_config) {
+ bd->state = FINISHED_CONFIG;
+ }
+ else {
+ bd->state = CONFIG_NOT_FOUND;
+ }
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_CONFIG) {
+ bd->state = NEED_DEVICE_CREATION;
+ }
+
+ /* Move to next state */
+ if (bd->state == NEED_DEVICE_CREATION) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_DEVICE_CREATION) {
+ bd->state = NEED_BIOS_LOAD;
+ }
+
+ /* Move to next state */
+ if (bd->state == NEED_BIOS_LOAD) {
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Wait for BIOS to test board... */
+ if (bd->state == WAIT_BIOS_LOAD) {
+ dgap_do_wait_for_bios(bd);
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_BIOS_LOAD) {
+ bd->state = NEED_FEP_LOAD;
+
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+
+ /* Wait for FEP to load on board... */
+ if (bd->state == WAIT_FEP_LOAD) {
+ dgap_do_wait_for_fep(bd);
+ }
+
+
+ /* Move to next state */
+ if (bd->state == FINISHED_FEP_LOAD) {
+
+ /*
+ * Do tty device initialization.
+ */
+ int rc = dgap_tty_init(bd);
+
+ if (rc < 0) {
+ dgap_tty_uninit(bd);
+ APR(("Can't init tty devices (%d)\n", rc));
+ bd->state = BOARD_FAILED;
+ bd->dpastatus = BD_NOFEP;
+ }
+ else {
+ bd->state = NEED_PROC_CREATION;
+
+ /*
+ * Signal downloader, its got some work to do.
+ */
+ DGAP_LOCK(dgap_dl_lock, lock_flags2);
+ if (dgap_dl_action != 1) {
+ dgap_dl_action = 1;
+ wake_up_interruptible(&dgap_dl_wait);
+ }
+ DGAP_UNLOCK(dgap_dl_lock, lock_flags2);
+ }
+ }
+
+ /* Move to next state */
+ if (bd->state == FINISHED_PROC_CREATION) {
+
+ bd->state = BOARD_READY;
+ bd->dpastatus = BD_RUNNING;
+
+ /*
+ * If user requested the board to run in interrupt mode,
+ * go and set it up on the board.
+ */
+ if (bd->intr_used) {
+ writew(1, (bd->re_map_membase + ENABLE_INTR));
+ /*
+ * Tell the board to poll the UARTS as fast as possible.
+ */
+ writew(FEPPOLL_MIN, (bd->re_map_membase + FEPPOLL));
+ bd->intr_running = 1;
+ }
+
+ /* Wake up anyone waiting for board state to change to ready */
+ wake_up_interruptible(&bd->state_wait);
+ }
+
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+}
+
+
+/*=======================================================================
+ *
+ * dgap_cmdb - Sends a 2 byte command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * byte1 - Integer containing first byte to be sent.
+ * byte2 - Integer containing second byte to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+void dgap_cmdb(struct channel_t *ch, uchar cmd, uchar byte1, uchar byte2, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is in failed state.\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d pointers out of range, failing board!\n", __FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writeb(byte1, (char *) (vaddr + head + CMDSTART + 2));
+ writeb(byte2, (char *) (vaddr + head + CMDSTART + 3));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+/*=======================================================================
+ *
+ * dgap_cmdw - Sends a 1 word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+void dgap_cmdw(struct channel_t *ch, uchar cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+ writeb(cmd, (char *) (vaddr + head + CMDSTART + 0));
+ writeb((uchar) ch->ch_portnum, (char *) (vaddr + head + CMDSTART + 1));
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 2));
+
+ head = (head + 4) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+
+/*=======================================================================
+ *
+ * dgap_cmdw_ext - Sends a extended word command to the FEP.
+ *
+ * ch - Pointer to channel structure.
+ * cmd - Command to be sent.
+ * word - Integer containing word to be sent.
+ * ncmds - Wait until ncmds or fewer cmds are left
+ * in the cmd buffer before returning.
+ *
+ *=======================================================================*/
+static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
+{
+ char *vaddr = NULL;
+ struct cm_t *cm_addr = NULL;
+ uint count;
+ uint n;
+ u16 head;
+ u16 tail;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check if board is still alive.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ DPR_CORE(("%s:%d board is failed!\n", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * Make sure the pointers are in range before
+ * writing to the FEP memory.
+ */
+ vaddr = ch->ch_bd->re_map_membase;
+ if (!vaddr)
+ return;
+
+ cm_addr = (struct cm_t *) (vaddr + CMDBUF);
+ head = readw(&(cm_addr->cm_head));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+ if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
+ DPR_CORE(("%s:%d Pointers out of range. Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+
+ /*
+ * Put the data in the circular command buffer.
+ */
+
+ /* Write an FF to tell the FEP that we want an extended command */
+ writeb((uchar) 0xff, (char *) (vaddr + head + CMDSTART + 0));
+
+ writeb((uchar) ch->ch_portnum, (uchar *) (vaddr + head + CMDSTART + 1));
+ writew((u16) cmd, (char *) (vaddr + head + CMDSTART + 2));
+
+ /*
+ * If the second part of the command won't fit,
+ * put it at the beginning of the circular buffer.
+ */
+ if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03))) {
+ writew((u16) word, (char *) (vaddr + CMDSTART));
+ } else {
+ writew((u16) word, (char *) (vaddr + head + CMDSTART + 4));
+ }
+
+ head = (head + 8) & (CMDMAX - CMDSTART - 4);
+
+ writew(head, &(cm_addr->cm_head));
+
+ /*
+ * Wait if necessary before updating the head
+ * pointer to limit the number of outstanding
+ * commands to the FEP. If the time spent waiting
+ * is outlandish, declare the FEP dead.
+ */
+ for (count = dgap_count ;;) {
+
+ head = readw(&(cm_addr->cm_head));
+ tail = readw(&(cm_addr->cm_tail));
+
+ n = (head - tail) & (CMDMAX - CMDSTART - 4);
+
+ if (n <= ncmds * sizeof(struct cm_t))
+ break;
+
+ if (--count == 0) {
+ DPR_CORE(("%s:%d Failing board.\n",__FILE__, __LINE__));
+ ch->ch_bd->state = BOARD_FAILED;
+ return;
+ }
+ udelay(10);
+ }
+}
+
+
+/*=======================================================================
+ *
+ * dgap_wmove - Write data to FEP buffer.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * cnt - Number of characters to move.
+ *
+ *=======================================================================*/
+void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
+{
+ int n;
+ char *taddr;
+ struct bs_t *bs;
+ u16 head;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * Check parameters.
+ */
+ bs = ch->ch_bs;
+ head = readw(&(bs->tx_head));
+
+ /*
+ * If pointers are out of range, just return.
+ */
+ if ((cnt > ch->ch_tsize) || (unsigned)(head - ch->ch_tstart) >= ch->ch_tsize) {
+ DPR_CORE(("%s:%d pointer out of range", __FILE__, __LINE__));
+ return;
+ }
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ n = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (cnt >= n) {
+ cnt -= n;
+ taddr = ch->ch_taddr + head;
+ memcpy_toio(taddr, buf, n);
+ head = ch->ch_tstart;
+ buf += n;
+ }
+
+ /*
+ * Move rest of data.
+ */
+ taddr = ch->ch_taddr + head;
+ n = cnt;
+ memcpy_toio(taddr, buf, n);
+ head += cnt;
+
+ writew(head, &(bs->tx_head));
+}
+
+/*
+ * Retrives the current custom baud rate from FEP memory,
+ * and returns it back to the user.
+ * Returns 0 on error.
+ */
+uint dgap_get_custom_baud(struct channel_t *ch)
+{
+ uchar *vaddr;
+ ulong offset = 0;
+ uint value = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (0);
+ }
+
+ if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC) {
+ return (0);
+ }
+
+ if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
+ return (0);
+
+ vaddr = ch->ch_bd->re_map_membase;
+
+ if (!vaddr)
+ return (0);
+
+ /*
+ * Go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ offset = ((((*(unsigned short *)(vaddr + ECS_SEG)) << 4) +
+ (ch->ch_portnum * 0x28) + LINE_SPEED));
+
+ value = readw(vaddr + offset);
+ return (value);
+}
+
+
+/*
+ * Calls the firmware to reset this channel.
+ */
+void dgap_firmware_reset_port(struct channel_t *ch)
+{
+ dgap_cmdb(ch, CHRESET, 0, 0, 0);
+
+ /*
+ * Now that the channel is reset, we need to make sure
+ * all the current settings get reapplied to the port
+ * in the firmware.
+ *
+ * So we will set the driver's cache of firmware
+ * settings all to 0, and then call param.
+ */
+ ch->ch_fepiflag = 0;
+ ch->ch_fepcflag = 0;
+ ch->ch_fepoflag = 0;
+ ch->ch_fepstartc = 0;
+ ch->ch_fepstopc = 0;
+ ch->ch_fepastartc = 0;
+ ch->ch_fepastopc = 0;
+ ch->ch_mostat = 0;
+ ch->ch_hflow = 0;
+}
+
+
+/*=======================================================================
+ *
+ * dgap_param - Set Digi parameters.
+ *
+ * struct tty_struct * - TTY for port.
+ *
+ *=======================================================================*/
+int dgap_param(struct tty_struct *tty)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct un_t *un;
+ u16 head;
+ u16 cflag;
+ u16 iflag;
+ uchar mval;
+ uchar hflow;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC) {
+ return (-ENXIO);
+ }
+
+ bs = ch->ch_bs;
+ if (bs == 0) {
+ return (-ENXIO);
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ ts = &tty->termios;
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+
+ /* flush rx */
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+
+ /* flush tx */
+ head = readw(&(ch->ch_bs->tx_head));
+ writew(head, &(ch->ch_bs->tx_tail));
+
+ ch->ch_flags |= (CH_BAUD0);
+
+ /* Drop RTS and DTR */
+ ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
+ mval = D_DTR(ch) | D_RTS(ch);
+ ch->ch_baud_info = 0;
+
+ } else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
+ /*
+ * Tell the fep to do the command
+ */
+
+ DPR_PARAM(("param: Want %d speed\n", ch->ch_custom_speed));
+
+ dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
+
+ /*
+ * Now go get from fep mem, what the fep
+ * believes the custom baud rate is.
+ */
+ ch->ch_baud_info = ch->ch_custom_speed = dgap_get_custom_baud(ch);
+
+ DPR_PARAM(("param: Got %d speed\n", ch->ch_custom_speed));
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+
+ } else {
+ /*
+ * Set baud rate, character size, and parity.
+ */
+
+
+ int iindex = 0;
+ int jindex = 0;
+ int baud = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 14400, 57600, 230400, 76800,
+ 115200, 230400, 28800, 460800,
+ 921600, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGAP_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ ch->ch_baud_info = baud;
+
+
+ /*
+ * CBAUD has bit position 0x1000 set these days to indicate Linux
+ * baud rate remap.
+ * We use a different bit assignment for high speed. Clear this
+ * bit out while grabbing the parts of "cflag" we want.
+ */
+ cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB | CSTOPB | CSIZE);
+
+ /*
+ * HUPCL bit is used by FEP to indicate fast baud
+ * table is to be used.
+ */
+ if ((ch->ch_digi.digi_flags & DIGI_FAST) || (ch->ch_c_cflag & CBAUDEX))
+ cflag |= HUPCL;
+
+
+ if ((ch->ch_c_cflag & CBAUDEX) && !(ch->ch_digi.digi_flags & DIGI_FAST)) {
+ /*
+ * The below code is trying to guarantee that only baud rates
+ * 115200, 230400, 460800, 921600 are remapped. We use exclusive or
+ * because the various baud rates share common bit positions
+ * and therefore can't be tested for easily.
+ */
+ tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
+ int baudpart = 0;
+
+ /* Map high speed requests to index into FEP's baud table */
+ switch (tcflag) {
+ case B57600 :
+ baudpart = 1;
+ break;
+#ifdef B76800
+ case B76800 :
+ baudpart = 2;
+ break;
+#endif
+ case B115200 :
+ baudpart = 3;
+ break;
+ case B230400 :
+ baudpart = 9;
+ break;
+ case B460800 :
+ baudpart = 11;
+ break;
+#ifdef B921600
+ case B921600 :
+ baudpart = 12;
+ break;
+#endif
+ default:
+ baudpart = 0;
+ }
+
+ if (baudpart)
+ cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
+ }
+
+ cflag &= 0xffff;
+
+ if (cflag != ch->ch_fepcflag) {
+ ch->ch_fepcflag = (u16) (cflag & 0xffff);
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
+ }
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+ ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
+ }
+ mval = D_DTR(ch) | D_RTS(ch);
+ }
+
+ /*
+ * Get input flags.
+ */
+ iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | INPCK | ISTRIP | IXON | IXANY | IXOFF);
+
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE)) {
+ iflag &= ~(IXON | IXOFF);
+ ch->ch_c_iflag &= ~(IXON | IXOFF);
+ }
+
+ /*
+ * Only the IBM Xr card can switch between
+ * 232 and 422 modes on the fly
+ */
+ if (bd->device == PCI_DEVICE_XR_IBM_DID) {
+ if (ch->ch_digi.digi_flags & DIGI_422)
+ dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
+ else
+ dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
+ iflag |= IALTPIN ;
+
+ if (iflag != ch->ch_fepiflag) {
+ ch->ch_fepiflag = iflag;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
+ }
+
+ /*
+ * Select hardware handshaking.
+ */
+ hflow = 0;
+
+ if (ch->ch_c_cflag & CRTSCTS) {
+ hflow |= (D_RTS(ch) | D_CTS(ch));
+ }
+ if (ch->ch_digi.digi_flags & RTSPACE)
+ hflow |= D_RTS(ch);
+ if (ch->ch_digi.digi_flags & DTRPACE)
+ hflow |= D_DTR(ch);
+ if (ch->ch_digi.digi_flags & CTSPACE)
+ hflow |= D_CTS(ch);
+ if (ch->ch_digi.digi_flags & DSRPACE)
+ hflow |= D_DSR(ch);
+ if (ch->ch_digi.digi_flags & DCDPACE)
+ hflow |= D_CD(ch);
+
+ if (hflow != ch->ch_hflow) {
+ ch->ch_hflow = hflow;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SHFLOW, (uchar) hflow, 0xff, 0);
+ }
+
+
+ /*
+ * Set RTS and/or DTR Toggle if needed, but only if product is FEP5+ based.
+ */
+ if (bd->bd_flags & BD_FEP5PLUS) {
+ u16 hflow2 = 0;
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ hflow2 |= (D_RTS(ch));
+ }
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ hflow2 |= (D_DTR(ch));
+ }
+
+ dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
+ }
+
+ /*
+ * Set modem control lines.
+ */
+
+ mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
+
+ DPR_PARAM(("dgap_param: mval: %x ch_mforce: %x ch_mval: %x ch_mostat: %x\n",
+ mval, ch->ch_mforce, ch->ch_mval, ch->ch_mostat));
+
+ if (ch->ch_mostat ^ mval) {
+ ch->ch_mostat = mval;
+
+ /* Okay to have channel and board locks held calling this */
+ DPR_PARAM(("dgap_param: Sending SMODEM mval: %x\n", mval));
+ dgap_cmdb(ch, SMODEM, (uchar) mval, D_RTS(ch)|D_DTR(ch), 0);
+ }
+
+ /*
+ * Read modem signals, and then call carrier function.
+ */
+ ch->ch_mistat = readb(&(bs->m_stat));
+ dgap_carrier(ch);
+
+ /*
+ * Set the start and stop characters.
+ */
+ if (ch->ch_startc != ch->ch_fepstartc || ch->ch_stopc != ch->ch_fepstopc) {
+ ch->ch_fepstartc = ch->ch_startc;
+ ch->ch_fepstopc = ch->ch_stopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
+ }
+
+ /*
+ * Set the Auxiliary start and stop characters.
+ */
+ if (ch->ch_astartc != ch->ch_fepastartc || ch->ch_astopc != ch->ch_fepastopc) {
+ ch->ch_fepastartc = ch->ch_astartc;
+ ch->ch_fepastopc = ch->ch_astopc;
+
+ /* Okay to have channel and board locks held calling this */
+ dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
+ }
+
+ DPR_PARAM(("param finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_parity_scan()
+ *
+ * Convert the FEP5 way of reporting parity errors and breaks into
+ * the Linux line discipline way.
+ */
+void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf, unsigned char *fbuf, int *len)
+{
+ int l = *len;
+ int count = 0;
+ unsigned char *in, *cout, *fout;
+ unsigned char c;
+
+ in = cbuf;
+ cout = cbuf;
+ fout = fbuf;
+
+ DPR_PSCAN(("dgap_parity_scan start\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ while (l--) {
+ c = *in++;
+ switch (ch->pscan_state) {
+ default:
+ /* reset to sanity and fall through */
+ ch->pscan_state = 0;
+
+ case 0:
+ /* No FF seen yet */
+ if (c == (unsigned char) '\377') {
+ /* delete this character from stream */
+ ch->pscan_state = 1;
+ } else {
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ }
+ break;
+
+ case 1:
+ /* first FF seen */
+ if (c == (unsigned char) '\377') {
+ /* doubled ff, transform to single ff */
+ *cout++ = c;
+ *fout++ = TTY_NORMAL;
+ count += 1;
+ ch->pscan_state = 0;
+ } else {
+ /* save value examination in next state */
+ ch->pscan_savechar = c;
+ ch->pscan_state = 2;
+ }
+ break;
+
+ case 2:
+ /* third character of ff sequence */
+
+ *cout++ = c;
+
+ if (ch->pscan_savechar == 0x0) {
+
+ if (c == 0x0) {
+ DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting break.\n", c));
+ ch->ch_err_break++;
+ *fout++ = TTY_BREAK;
+ }
+ else {
+ DPR_PSCAN(("dgap_parity_scan in 3rd char of ff seq. c: %x setting parity.\n", c));
+ ch->ch_err_parity++;
+ *fout++ = TTY_PARITY;
+ }
+ }
+ else {
+ DPR_PSCAN(("%s:%d Logic Error.\n", __FILE__, __LINE__));
+ }
+
+ count += 1;
+ ch->pscan_state = 0;
+ }
+ }
+ *len = count;
+ DPR_PSCAN(("dgap_parity_scan finish\n"));
+}
+
+
+
+
+/*=======================================================================
+ *
+ * dgap_event - FEP to host event processing routine.
+ *
+ * bd - Board of current event.
+ *
+ *=======================================================================*/
+static int dgap_event(struct board_t *bd)
+{
+ struct channel_t *ch;
+ ulong lock_flags;
+ ulong lock_flags2;
+ struct bs_t *bs;
+ uchar *event;
+ uchar *vaddr = NULL;
+ struct ev_t *eaddr = NULL;
+ uint head;
+ uint tail;
+ int port;
+ int reason;
+ int modem;
+ int b1;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-ENXIO);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+
+ vaddr = bd->re_map_membase;
+
+ if (!vaddr) {
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ eaddr = (struct ev_t *) (vaddr + EVBUF);
+
+ /* Get our head and tail */
+ head = readw(&(eaddr->ev_head));
+ tail = readw(&(eaddr->ev_tail));
+
+ /*
+ * Forget it if pointers out of range.
+ */
+
+ if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
+ (head | tail) & 03) {
+ DPR_EVENT(("should be calling xxfail %d\n", __LINE__));
+ /* Let go of board lock */
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ /*
+ * Loop to process all the events in the buffer.
+ */
+ while (tail != head) {
+
+ /*
+ * Get interrupt information.
+ */
+
+ event = bd->re_map_membase + tail + EVSTART;
+
+ port = event[0];
+ reason = event[1];
+ modem = event[2];
+ b1 = event[3];
+
+ DPR_EVENT(("event: jiffies: %ld port: %d reason: %x modem: %x\n",
+ jiffies, port, reason, modem));
+
+ /*
+ * Make sure the interrupt is valid.
+ */
+ if ( port >= bd->nasync) {
+ goto next;
+ }
+
+ if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA))) {
+ goto next;
+ }
+
+ ch = bd->channels[port];
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ goto next;
+ }
+
+ /*
+ * If we have made it here, the event was valid.
+ * Lock down the channel.
+ */
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ bs = ch->ch_bs;
+
+ if (!bs) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ goto next;
+ }
+
+ /*
+ * Process received data.
+ */
+ if (reason & IFDATA) {
+
+ /*
+ * ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
+ * input could send some data to ld, which in turn
+ * could do a callback to one of our other functions.
+ */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ dgap_input(ch);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (ch->ch_flags & CH_RACTIVE)
+ ch->ch_flags |= CH_RENABLE;
+ else
+ writeb(1, &(bs->idata));
+
+ if (ch->ch_flags & CH_RWAIT) {
+ ch->ch_flags &= ~CH_RWAIT;
+
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ /*
+ * Process Modem change signals.
+ */
+ if (reason & IFMODEM) {
+ ch->ch_mistat = modem;
+ dgap_carrier(ch);
+ }
+
+ /*
+ * Process break.
+ */
+ if (reason & IFBREAK) {
+
+ DPR_EVENT(("got IFBREAK\n"));
+
+ if (ch->ch_tun.un_tty) {
+ /* A break has been indicated */
+ ch->ch_err_break++;
+ tty_buffer_request_room(ch->ch_tun.un_tty->port, 1);
+ tty_insert_flip_char(ch->ch_tun.un_tty->port, 0, TTY_BREAK);
+ tty_flip_buffer_push(ch->ch_tun.un_tty->port);
+ }
+ }
+
+ /*
+ * Process Transmit low.
+ */
+ if (reason & IFTLW) {
+
+ DPR_EVENT(("event: got low event\n"));
+
+ if (ch->ch_tun.un_flags & UN_LOW) {
+ ch->ch_tun.un_flags &= ~UN_LOW;
+
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_tun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+#else
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+
+ DPR_EVENT(("event: Got low event. jiffies: %lu\n", jiffies));
+ }
+ }
+
+ if (ch->ch_pun.un_flags & UN_LOW) {
+ ch->ch_pun.un_flags &= ~UN_LOW;
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_pun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+#else
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ }
+
+ if (ch->ch_flags & CH_WLOW) {
+ ch->ch_flags &= ~CH_WLOW;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ /*
+ * Process Transmit empty.
+ */
+ if (reason & IFTEM) {
+ DPR_EVENT(("event: got empty event\n"));
+
+ if (ch->ch_tun.un_flags & UN_EMPTY) {
+ ch->ch_tun.un_flags &= ~UN_EMPTY;
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_tun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+#else
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ }
+
+ if (ch->ch_pun.un_flags & UN_EMPTY) {
+ ch->ch_pun.un_flags &= ~UN_EMPTY;
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+ if ((ch->ch_pun.un_tty->flags &
+ (1 << TTY_DO_WRITE_WAKEUP)) &&
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+#else
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+#endif
+ {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31)
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+#else
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+#endif
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ }
+
+
+ if (ch->ch_flags & CH_WEMPTY) {
+ ch->ch_flags &= ~CH_WEMPTY;
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+
+next:
+ tail = (tail + 4) & (EVMAX - EVSTART - 4);
+ }
+
+ writew(tail, &(eaddr->ev_tail));
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ return (0);
+}
diff --git a/drivers/staging/dgap/dgap_fep5.h b/drivers/staging/dgap/dgap_fep5.h
new file mode 100644
index 00000000000..3a12ba5e3c2
--- /dev/null
+++ b/drivers/staging/dgap/dgap_fep5.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ ************************************************************************
+ *** FEP Version 5 dependent definitions
+ ************************************************************************/
+
+#ifndef __DGAP_FEP5_H
+#define __DGAP_FEP5_H
+
+/************************************************************************
+ * FEP memory offsets
+ ************************************************************************/
+#define START 0x0004L /* Execution start address */
+
+#define CMDBUF 0x0d10L /* Command (cm_t) structure offset */
+#define CMDSTART 0x0400L /* Start of command buffer */
+#define CMDMAX 0x0800L /* End of command buffer */
+
+#define EVBUF 0x0d18L /* Event (ev_t) structure */
+#define EVSTART 0x0800L /* Start of event buffer */
+#define EVMAX 0x0c00L /* End of event buffer */
+#define FEP5_PLUS 0x0E40 /* ASCII '5' and ASCII 'A' is here */
+#define ECS_SEG 0x0E44 /* Segment of the extended channel structure */
+#define LINE_SPEED 0x10 /* Offset into ECS_SEG for line speed */
+ /* if the fep has extended capabilities */
+
+/* BIOS MAGIC SPOTS */
+#define ERROR 0x0C14L /* BIOS error code */
+#define SEQUENCE 0x0C12L /* BIOS sequence indicator */
+#define POSTAREA 0x0C00L /* POST complete message area */
+
+/* FEP MAGIC SPOTS */
+#define FEPSTAT POSTAREA /* OS here when FEP comes up */
+#define NCHAN 0x0C02L /* number of ports FEP sees */
+#define PANIC 0x0C10L /* PANIC area for FEP */
+#define KMEMEM 0x0C30L /* Memory for KME use */
+#define CONFIG 0x0CD0L /* Concentrator configuration info */
+#define CONFIGSIZE 0x0030 /* configuration info size */
+#define DOWNREQ 0x0D00 /* Download request buffer pointer */
+
+#define CHANBUF 0x1000L /* Async channel (bs_t) structs */
+#define FEPOSSIZE 0x1FFF /* 8K FEPOS */
+
+#define XEMPORTS 0xC02 /*
+ * Offset in board memory where FEP5 stores
+ * how many ports it has detected.
+ * NOTE: FEP5 reports 64 ports when the user
+ * has the cable in EBI OUT instead of EBI IN.
+ */
+
+#define FEPCLR 0x00
+#define FEPMEM 0x02
+#define FEPRST 0x04
+#define FEPINT 0x08
+#define FEPMASK 0x0e
+#define FEPWIN 0x80
+
+#define LOWMEM 0x0100
+#define HIGHMEM 0x7f00
+
+#define FEPTIMEOUT 200000
+
+#define ENABLE_INTR 0x0e04 /* Enable interrupts flag */
+#define FEPPOLL_MIN 1 /* minimum of 1 millisecond */
+#define FEPPOLL_MAX 20 /* maximum of 20 milliseconds */
+#define FEPPOLL 0x0c26 /* Fep event poll interval */
+
+#define IALTPIN 0x0080 /* Input flag to swap DSR <-> DCD */
+
+/************************************************************************
+ * Command structure definition.
+ ************************************************************************/
+struct cm_t {
+ volatile unsigned short cm_head; /* Command buffer head offset */
+ volatile unsigned short cm_tail; /* Command buffer tail offset */
+ volatile unsigned short cm_start; /* start offset of buffer */
+ volatile unsigned short cm_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Event structure definition.
+ ************************************************************************/
+struct ev_t {
+ volatile unsigned short ev_head; /* Command buffer head offset */
+ volatile unsigned short ev_tail; /* Command buffer tail offset */
+ volatile unsigned short ev_start; /* start offset of buffer */
+ volatile unsigned short ev_max; /* last offset of buffer */
+};
+
+/************************************************************************
+ * Download buffer structure.
+ ************************************************************************/
+struct downld_t {
+ uchar dl_type; /* Header */
+ uchar dl_seq; /* Download sequence */
+ ushort dl_srev; /* Software revision number */
+ ushort dl_lrev; /* Low revision number */
+ ushort dl_hrev; /* High revision number */
+ ushort dl_seg; /* Start segment address */
+ ushort dl_size; /* Number of bytes to download */
+ uchar dl_data[1024]; /* Download data */
+};
+
+/************************************************************************
+ * Per channel buffer structure
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * C = changed by commands only *
+ * U = unknown (may be changed w/o notice) *
+ ************************************************************************/
+struct bs_t {
+ volatile unsigned short tp_jmp; /* Transmit poll jump */
+ volatile unsigned short tc_jmp; /* Cooked procedure jump */
+ volatile unsigned short ri_jmp; /* Not currently used */
+ volatile unsigned short rp_jmp; /* Receive poll jump */
+
+ volatile unsigned short tx_seg; /* W Tx segment */
+ volatile unsigned short tx_head; /* W Tx buffer head offset */
+ volatile unsigned short tx_tail; /* R Tx buffer tail offset */
+ volatile unsigned short tx_max; /* W Tx buffer size - 1 */
+
+ volatile unsigned short rx_seg; /* W Rx segment */
+ volatile unsigned short rx_head; /* W Rx buffer head offset */
+ volatile unsigned short rx_tail; /* R Rx buffer tail offset */
+ volatile unsigned short rx_max; /* W Rx buffer size - 1 */
+
+ volatile unsigned short tx_lw; /* W Tx buffer low water mark */
+ volatile unsigned short rx_lw; /* W Rx buffer low water mark */
+ volatile unsigned short rx_hw; /* W Rx buffer high water mark */
+ volatile unsigned short incr; /* W Increment to next channel */
+
+ volatile unsigned short fepdev; /* U SCC device base address */
+ volatile unsigned short edelay; /* W Exception delay */
+ volatile unsigned short blen; /* W Break length */
+ volatile unsigned short btime; /* U Break complete time */
+
+ volatile unsigned short iflag; /* C UNIX input flags */
+ volatile unsigned short oflag; /* C UNIX output flags */
+ volatile unsigned short cflag; /* C UNIX control flags */
+ volatile unsigned short wfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char num; /* U Channel number */
+ volatile unsigned char ract; /* U Receiver active counter */
+ volatile unsigned char bstat; /* U Break status bits */
+ volatile unsigned char tbusy; /* W Transmit busy */
+ volatile unsigned char iempty; /* W Transmit empty event enable */
+ volatile unsigned char ilow; /* W Transmit low-water event enable */
+ volatile unsigned char idata; /* W Receive data interrupt enable */
+ volatile unsigned char eflag; /* U Host event flags */
+
+ volatile unsigned char tflag; /* U Transmit flags */
+ volatile unsigned char rflag; /* U Receive flags */
+ volatile unsigned char xmask; /* U Transmit ready flags */
+ volatile unsigned char xval; /* U Transmit ready value */
+ volatile unsigned char m_stat; /* RC Modem status bits */
+ volatile unsigned char m_change; /* U Modem bits which changed */
+ volatile unsigned char m_int; /* W Modem interrupt enable bits */
+ volatile unsigned char m_last; /* U Last modem status */
+
+ volatile unsigned char mtran; /* C Unreported modem trans */
+ volatile unsigned char orun; /* C Buffer overrun occurred */
+ volatile unsigned char astartc; /* W Auxiliary Xon char */
+ volatile unsigned char astopc; /* W Auxiliary Xoff char */
+ volatile unsigned char startc; /* W Xon character */
+ volatile unsigned char stopc; /* W Xoff character */
+ volatile unsigned char vnextc; /* W Vnext character */
+ volatile unsigned char hflow; /* C Software flow control */
+
+ volatile unsigned char fillc; /* U Delay Fill character */
+ volatile unsigned char ochar; /* U Saved output character */
+ volatile unsigned char omask; /* U Output character mask */
+
+ volatile unsigned char bfill[13]; /* U Reserved for expansion */
+
+ volatile unsigned char scc[16]; /* U SCC registers */
+};
+
+
+/************************************************************************
+ * FEP supported functions
+ ************************************************************************/
+#define SRLOW 0xe0 /* Set receive low water */
+#define SRHIGH 0xe1 /* Set receive high water */
+#define FLUSHTX 0xe2 /* Flush transmit buffer */
+#define PAUSETX 0xe3 /* Pause data transmission */
+#define RESUMETX 0xe4 /* Resume data transmission */
+#define SMINT 0xe5 /* Set Modem Interrupt */
+#define SAFLOWC 0xe6 /* Set Aux. flow control chars */
+#define SBREAK 0xe8 /* Send break */
+#define SMODEM 0xe9 /* Set 8530 modem control lines */
+#define SIFLAG 0xea /* Set UNIX iflags */
+#define SFLOWC 0xeb /* Set flow control characters */
+#define STLOW 0xec /* Set transmit low water mark */
+#define RPAUSE 0xee /* Pause recieve */
+#define RRESUME 0xef /* Resume receive */
+#define CHRESET 0xf0 /* Reset Channel */
+#define BUFSETALL 0xf2 /* Set Tx & Rx buffer size avail*/
+#define SOFLAG 0xf3 /* Set UNIX oflags */
+#define SHFLOW 0xf4 /* Set hardware handshake */
+#define SCFLAG 0xf5 /* Set UNIX cflags */
+#define SVNEXT 0xf6 /* Set VNEXT character */
+#define SPINTFC 0xfc /* Reserved */
+#define SCOMMODE 0xfd /* Set RS232/422 mode */
+
+
+/************************************************************************
+ * Modes for SCOMMODE
+ ************************************************************************/
+#define MODE_232 0x00
+#define MODE_422 0x01
+
+
+/************************************************************************
+ * Event flags.
+ ************************************************************************/
+#define IFBREAK 0x01 /* Break received */
+#define IFTLW 0x02 /* Transmit low water */
+#define IFTEM 0x04 /* Transmitter empty */
+#define IFDATA 0x08 /* Receive data present */
+#define IFMODEM 0x20 /* Modem status change */
+
+/************************************************************************
+ * Modem flags
+ ************************************************************************/
+# define DM_RTS 0x02 /* Request to send */
+# define DM_CD 0x80 /* Carrier detect */
+# define DM_DSR 0x20 /* Data set ready */
+# define DM_CTS 0x10 /* Clear to send */
+# define DM_RI 0x40 /* Ring indicator */
+# define DM_DTR 0x01 /* Data terminal ready */
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_kcompat.h b/drivers/staging/dgap/dgap_kcompat.h
new file mode 100644
index 00000000000..8ebf4b7373b
--- /dev/null
+++ b/drivers/staging/dgap/dgap_kcompat.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * This file is intended to contain all the kernel "differences" between the
+ * various kernels that we support.
+ *
+ *************************************************************************/
+
+#ifndef __DGAP_KCOMPAT_H
+#define __DGAP_KCOMPAT_H
+
+# ifndef KERNEL_VERSION
+# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+# endif
+
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+
+/* Sparse stuff */
+# ifndef __user
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __chk_user_ptr(x) (void)0
+# endif
+
+
+# define PARM_STR(VAR, INIT, PERM, DESC) \
+ static char *VAR = INIT; \
+ char *dgap_##VAR; \
+ module_param(VAR, charp, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_INT(VAR, INIT, PERM, DESC) \
+ static int VAR = INIT; \
+ int dgap_##VAR; \
+ module_param(VAR, int, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_ULONG(VAR, INIT, PERM, DESC) \
+ static ulong VAR = INIT; \
+ ulong dgap_##VAR; \
+ module_param(VAR, long, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+
+
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+
+
+
+
+/* NOTHING YET */
+
+
+
+
+# else
+
+
+
+# error "this driver does not support anything below the 2.6.27 kernel series."
+
+
+
+# endif
+
+#endif /* ! __DGAP_KCOMPAT_H */
diff --git a/drivers/staging/dgap/dgap_parse.c b/drivers/staging/dgap/dgap_parse.c
new file mode 100644
index 00000000000..5497e6de060
--- /dev/null
+++ b/drivers/staging/dgap/dgap_parse.c
@@ -0,0 +1,1371 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ *
+ *****************************************************************************
+ *
+ * dgap_parse.c - Parses the configuration information from the input file.
+ *
+ * $Id: dgap_parse.c,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+
+#include "dgap_types.h"
+#include "dgap_fep5.h"
+#include "dgap_driver.h"
+#include "dgap_conf.h"
+
+
+/*
+ * Function prototypes.
+ */
+static int dgap_gettok(char **in, struct cnode *p);
+static char *dgap_getword(char **in);
+static char *dgap_savestring(char *s);
+static struct cnode *dgap_newnode(int t);
+static int dgap_checknode(struct cnode *p);
+static void dgap_err(char *s);
+
+/*
+ * Our needed internal static variables...
+ */
+static struct cnode dgap_head;
+#define MAXCWORD 200
+static char dgap_cword[MAXCWORD];
+
+struct toklist {
+ int token;
+ char *string;
+};
+
+static struct toklist dgap_tlist[] = {
+ { BEGIN, "config_begin" },
+ { END, "config_end" },
+ { BOARD, "board" },
+ { PCX, "Digi_AccelePort_C/X_PCI" }, /* C/X_PCI */
+ { PEPC, "Digi_AccelePort_EPC/X_PCI" }, /* EPC/X_PCI */
+ { PPCM, "Digi_AccelePort_Xem_PCI" }, /* PCI/Xem */
+ { APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
+ { APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
+ { APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
+ { PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
+ { PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
+ { IO, "io" },
+ { PCIINFO, "pciinfo" },
+ { LINE, "line" },
+ { CONC, "conc" },
+ { CONC, "concentrator" },
+ { CX, "cx" },
+ { CX, "ccon" },
+ { EPC, "epccon" },
+ { EPC, "epc" },
+ { MOD, "module" },
+ { ID, "id" },
+ { STARTO, "start" },
+ { SPEED, "speed" },
+ { CABLE, "cable" },
+ { CONNECT, "connect" },
+ { METHOD, "method" },
+ { STATUS, "status" },
+ { CUSTOM, "Custom" },
+ { BASIC, "Basic" },
+ { MEM, "mem" },
+ { MEM, "memory" },
+ { PORTS, "ports" },
+ { MODEM, "modem" },
+ { NPORTS, "nports" },
+ { TTYN, "ttyname" },
+ { CU, "cuname" },
+ { PRINT, "prname" },
+ { CMAJOR, "major" },
+ { ALTPIN, "altpin" },
+ { USEINTR, "useintr" },
+ { TTSIZ, "ttysize" },
+ { CHSIZ, "chsize" },
+ { BSSIZ, "boardsize" },
+ { UNTSIZ, "schedsize" },
+ { F2SIZ, "f2200size" },
+ { VPSIZ, "vpixsize" },
+ { 0, NULL }
+};
+
+
+/*
+ * Parse a configuration file read into memory as a string.
+ */
+int dgap_parsefile(char **in, int Remove)
+{
+ struct cnode *p, *brd, *line, *conc;
+ int rc;
+ char *s = NULL, *s2 = NULL;
+ int linecnt = 0;
+
+ p = &dgap_head;
+ brd = line = conc = NULL;
+
+ /* perhaps we are adding to an existing list? */
+ while (p->next != NULL) {
+ p = p->next;
+ }
+
+ /* file must start with a BEGIN */
+ while ( (rc = dgap_gettok(in,p)) != BEGIN ) {
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return(-1);
+ }
+ }
+
+ for (; ; ) {
+ rc = dgap_gettok(in,p);
+ if (rc == 0) {
+ dgap_err("unexpected EOF");
+ return(-1);
+ }
+
+ switch (rc) {
+ case 0:
+ dgap_err("unexpected end of file");
+ return(-1);
+
+ case BEGIN: /* should only be 1 begin */
+ dgap_err("unexpected config_begin\n");
+ return(-1);
+
+ case END:
+ return(0);
+
+ case BOARD: /* board info */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(BNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+
+ p->u.board.status = dgap_savestring("No");
+ line = conc = NULL;
+ brd = p;
+ linecnt = -1;
+ break;
+
+ case APORT2_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_2r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT2_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_2r_920 PCI to config...\n"));
+ break;
+
+ case APORT4_920P: /* AccelePort_4 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT4_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_4r_920 PCI to config...\n"));
+ break;
+
+ case APORT8_920P: /* AccelePort_8 */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r_920 string");
+ return(-1);
+ }
+ p->u.board.type = APORT8_920P;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_8r_920 PCI to config...\n"));
+ break;
+
+ case PAPORT4: /* AccelePort_4 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_4r(PCI) string");
+ return(-1);
+ }
+ p->u.board.type = PAPORT4;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_4r PCI to config...\n"));
+ break;
+
+ case PAPORT8: /* AccelePort_8 PCI */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_8r string");
+ return(-1);
+ }
+ p->u.board.type = PAPORT8;
+ p->u.board.v_type = 1;
+ DPR_INIT(("Adding Digi_8r PCI to config...\n"));
+ break;
+
+ case PCX: /* PCI C/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected Digi_C/X_(PCI) string");
+ return(-1);
+ }
+ p->u.board.type = PCX;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ DPR_INIT(("Adding PCI C/X to config...\n"));
+ break;
+
+ case PEPC: /* PCI EPC/X */
+ if (p->type != BNODE) {
+ dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
+ return(-1);
+ }
+ p->u.board.type = PEPC;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ p->u.board.module1 = 0;
+ p->u.board.module2 = 0;
+ DPR_INIT(("Adding PCI EPC/X to config...\n"));
+ break;
+
+ case PPCM: /* PCI/Xem */
+ if (p->type != BNODE) {
+ dgap_err("unexpected PCI/Xem string");
+ return(-1);
+ }
+ p->u.board.type = PPCM;
+ p->u.board.v_type = 1;
+ p->u.board.conc1 = 0;
+ p->u.board.conc2 = 0;
+ DPR_INIT(("Adding PCI XEM to config...\n"));
+ break;
+
+ case IO: /* i/o port */
+ if (p->type != BNODE) {
+ dgap_err("IO port only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.portstr = dgap_savestring(s);
+ p->u.board.port = (short)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for IO port");
+ return(-1);
+ }
+ p->u.board.v_port = 1;
+ DPR_INIT(("Adding IO (%s) to config...\n", s));
+ break;
+
+ case MEM: /* memory address */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.addrstr = dgap_savestring(s);
+ p->u.board.addr = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for memory address");
+ return(-1);
+ }
+ p->u.board.v_addr = 1;
+ DPR_INIT(("Adding MEM (%s) to config...\n", s));
+ break;
+
+ case PCIINFO: /* pci information */
+ if (p->type != BNODE) {
+ dgap_err("memory address only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.pcibusstr = dgap_savestring(s);
+ p->u.board.pcibus = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for pci bus");
+ return(-1);
+ }
+ p->u.board.v_pcibus = 1;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.pcislotstr = dgap_savestring(s);
+ p->u.board.pcislot = simple_strtoul(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for pci slot");
+ return(-1);
+ }
+ p->u.board.v_pcislot = 1;
+
+ DPR_INIT(("Adding PCIINFO (%s %s) to config...\n", p->u.board.pcibusstr,
+ p->u.board.pcislotstr));
+ break;
+
+ case METHOD:
+ if (p->type != BNODE) {
+ dgap_err("install method only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.method = dgap_savestring(s);
+ p->u.board.v_method = 1;
+ DPR_INIT(("Adding METHOD (%s) to config...\n", s));
+ break;
+
+ case STATUS:
+ if (p->type != BNODE) {
+ dgap_err("config status only vaild for boards");
+ return(-1);
+ }
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.status = dgap_savestring(s);
+ DPR_INIT(("Adding STATUS (%s) to config...\n", s));
+ break;
+
+ case NPORTS: /* number of ports */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.board.v_nport = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.conc.v_nport = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.module.nport = (char)simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for number of ports");
+ return(-1);
+ }
+ p->u.module.v_nport = 1;
+ } else {
+ dgap_err("nports only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding NPORTS (%s) to config...\n", s));
+ break;
+
+ case ID: /* letter ID used in tty name */
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+
+ p->u.board.status = dgap_savestring(s);
+
+ if (p->type == CNODE) {
+ p->u.conc.id = dgap_savestring(s);
+ p->u.conc.v_id = 1;
+ } else if (p->type == MNODE) {
+ p->u.module.id = dgap_savestring(s);
+ p->u.module.v_id = 1;
+ } else {
+ dgap_err("id only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding ID (%s) to config...\n", s));
+ break;
+
+ case STARTO: /* start offset of ID */
+ if (p->type == BNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.board.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.board.v_start = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.conc.v_start = 1;
+ } else if (p->type == MNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.module.start = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for start of tty count");
+ return(-1);
+ }
+ p->u.module.v_start = 1;
+ } else {
+ dgap_err("start only valid for concentrators or modules");
+ return(-1);
+ }
+ DPR_INIT(("Adding START (%s) to config...\n", s));
+ break;
+
+ case TTYN: /* tty name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(TNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.ttyname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding TTY (%s) to config...\n", s));
+ break;
+
+ case CU: /* cu name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(CUNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.cuname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding CU (%s) to config...\n", s));
+ break;
+
+ case LINE: /* line information */
+ if (dgap_checknode(p))
+ return(-1);
+ if (brd == NULL) {
+ dgap_err("must specify board before line info");
+ return(-1);
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ dgap_err("line not vaild for PC/em");
+ return(-1);
+ }
+ if ( (p->next = dgap_newnode(LNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ conc = NULL;
+ line = p;
+ linecnt++;
+ DPR_INIT(("Adding LINE to config...\n"));
+ break;
+
+ case CONC: /* concentrator information */
+ if (dgap_checknode(p))
+ return(-1);
+ if (line == NULL) {
+ dgap_err("must specify line info before concentrator");
+ return(-1);
+ }
+ if ( (p->next = dgap_newnode(CNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ conc = p;
+ if (linecnt)
+ brd->u.board.conc2++;
+ else
+ brd->u.board.conc1++;
+
+ DPR_INIT(("Adding CONC to config...\n"));
+ break;
+
+ case CX: /* c/x type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return(-1);
+ }
+ p->u.conc.type = CX;
+ p->u.conc.v_type = 1;
+ DPR_INIT(("Adding CX to config...\n"));
+ break;
+
+ case EPC: /* epc type concentrator */
+ if (p->type != CNODE) {
+ dgap_err("cx only valid for concentrators");
+ return(-1);
+ }
+ p->u.conc.type = EPC;
+ p->u.conc.v_type = 1;
+ DPR_INIT(("Adding EPC to config...\n"));
+ break;
+
+ case MOD: /* EBI module */
+ if (dgap_checknode(p))
+ return(-1);
+ if (brd == NULL) {
+ dgap_err("must specify board info before EBI modules");
+ return(-1);
+ }
+ switch (brd->u.board.type) {
+ case PPCM:
+ linecnt = 0;
+ break;
+ default:
+ if (conc == NULL) {
+ dgap_err("must specify concentrator info before EBI module");
+ return(-1);
+ }
+ }
+ if ( (p->next = dgap_newnode(MNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if (linecnt)
+ brd->u.board.module2++;
+ else
+ brd->u.board.module1++;
+
+ DPR_INIT(("Adding MOD to config...\n"));
+ break;
+
+ case PORTS: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("ports only valid for EBI modules");
+ return(-1);
+ }
+ p->u.module.type = PORTS;
+ p->u.module.v_type = 1;
+ DPR_INIT(("Adding PORTS to config...\n"));
+ break;
+
+ case MODEM: /* ports type EBI module */
+ if (p->type != MNODE) {
+ dgap_err("modem only valid for modem modules");
+ return(-1);
+ }
+ p->u.module.type = MODEM;
+ p->u.module.v_type = 1;
+ DPR_INIT(("Adding MODEM to config...\n"));
+ break;
+
+ case CABLE:
+ if (p->type == LNODE) {
+ if ((s = dgap_getword(in)) == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.line.cable = dgap_savestring(s);
+ p->u.line.v_cable = 1;
+ }
+ DPR_INIT(("Adding CABLE (%s) to config...\n", s));
+ break;
+
+ case SPEED: /* sync line speed indication */
+ if (p->type == LNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.line.speed = (char)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for line speed");
+ return(-1);
+ }
+ p->u.line.v_speed = 1;
+ } else if (p->type == CNODE) {
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.speed = (char)simple_strtol(s, &s2, 0);
+ if ((short)strlen(s) > (short)(s2 - s)) {
+ dgap_err("bad number for line speed");
+ return(-1);
+ }
+ p->u.conc.v_speed = 1;
+ } else {
+ dgap_err("speed valid only for lines or concentrators.");
+ return(-1);
+ }
+ DPR_INIT(("Adding SPEED (%s) to config...\n", s));
+ break;
+
+ case CONNECT:
+ if (p->type == CNODE) {
+ if ((s = dgap_getword(in)) == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.conc.connect = dgap_savestring(s);
+ p->u.conc.v_connect = 1;
+ }
+ DPR_INIT(("Adding CONNECT (%s) to config...\n", s));
+ break;
+ case PRINT: /* transparent print name prefix */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(PNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ if ( (s = dgap_getword(in)) == NULL ) {
+ dgap_err("unexpeced end of file");
+ return(-1);
+ }
+ if ( (p->u.printname = dgap_savestring(s)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ DPR_INIT(("Adding PRINT (%s) to config...\n", s));
+ break;
+
+ case CMAJOR: /* major number */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(JNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.majornumber = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for major number");
+ return(-1);
+ }
+ DPR_INIT(("Adding CMAJOR (%s) to config...\n", s));
+ break;
+
+ case ALTPIN: /* altpin setting */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(ANODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.altpin = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for altpin");
+ return(-1);
+ }
+ DPR_INIT(("Adding ALTPIN (%s) to config...\n", s));
+ break;
+
+ case USEINTR: /* enable interrupt setting */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(INTRNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.useintr = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for useintr");
+ return(-1);
+ }
+ DPR_INIT(("Adding USEINTR (%s) to config...\n", s));
+ break;
+
+ case TTSIZ: /* size of tty structure */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(TSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.ttysize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for ttysize");
+ return(-1);
+ }
+ DPR_INIT(("Adding TTSIZ (%s) to config...\n", s));
+ break;
+
+ case CHSIZ: /* channel structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(CSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.chsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for chsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding CHSIZE (%s) to config...\n", s));
+ break;
+
+ case BSSIZ: /* board structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(BSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.bssize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for bssize");
+ return(-1);
+ }
+ DPR_INIT(("Adding BSSIZ (%s) to config...\n", s));
+ break;
+
+ case UNTSIZ: /* sched structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(USNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.unsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for schedsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding UNTSIZ (%s) to config...\n", s));
+ break;
+
+ case F2SIZ: /* f2200 structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(FSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.f2size = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for f2200size");
+ return(-1);
+ }
+ DPR_INIT(("Adding F2SIZ (%s) to config...\n", s));
+ break;
+
+ case VPSIZ: /* vpix structure size */
+ if (dgap_checknode(p))
+ return(-1);
+ if ( (p->next = dgap_newnode(VSNODE)) == NULL ) {
+ dgap_err("out of memory");
+ return(-1);
+ }
+ p = p->next;
+ s = dgap_getword(in);
+ if (s == NULL) {
+ dgap_err("unexpected end of file");
+ return(-1);
+ }
+ p->u.vpixsize = simple_strtol(s, &s2, 0);
+ if ((int)strlen(s) > (int)(s2 - s)) {
+ dgap_err("bad number for vpixsize");
+ return(-1);
+ }
+ DPR_INIT(("Adding VPSIZ (%s) to config...\n", s));
+ break;
+ }
+ }
+}
+
+
+/*
+ * dgap_sindex: much like index(), but it looks for a match of any character in
+ * the group, and returns that position. If the first character is a ^, then
+ * this will match the first occurence not in that group.
+ */
+static char *dgap_sindex (char *string, char *group)
+{
+ char *ptr;
+
+ if (!string || !group)
+ return (char *) NULL;
+
+ if (*group == '^') {
+ group++;
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ break;
+ }
+ if (*ptr == '\0')
+ return string;
+ }
+ }
+ else {
+ for (; *string; string++) {
+ for (ptr = group; *ptr; ptr++) {
+ if (*ptr == *string)
+ return string;
+ }
+ }
+ }
+
+ return (char *) NULL;
+}
+
+
+/*
+ * Get a token from the input file; return 0 if end of file is reached
+ */
+static int dgap_gettok(char **in, struct cnode *p)
+{
+ char *w;
+ struct toklist *t;
+
+ if (strstr(dgap_cword, "boar")) {
+ w = dgap_getword(in);
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if ( !strcmp(w, t->string)) {
+ return(t->token);
+ }
+ }
+ dgap_err("board !!type not specified");
+ return(1);
+ }
+ else {
+ while ( (w = dgap_getword(in)) != NULL ) {
+ snprintf(dgap_cword, MAXCWORD, "%s", w);
+ for (t = dgap_tlist; t->token != 0; t++) {
+ if ( !strcmp(w, t->string) )
+ return(t->token);
+ }
+ }
+ return(0);
+ }
+}
+
+
+/*
+ * get a word from the input stream, also keep track of current line number.
+ * words are separated by whitespace.
+ */
+static char *dgap_getword(char **in)
+{
+ char *ret_ptr = *in;
+
+ char *ptr = dgap_sindex(*in, " \t\n");
+
+ /* If no word found, return null */
+ if (!ptr)
+ return NULL;
+
+ /* Mark new location for our buffer */
+ *ptr = '\0';
+ *in = ptr + 1;
+
+ /* Eat any extra spaces/tabs/newlines that might be present */
+ while (*in && **in && ((**in == ' ') || (**in == '\t') || (**in == '\n'))) {
+ **in = '\0';
+ *in = *in + 1;
+ }
+
+ return ret_ptr;
+}
+
+
+/*
+ * print an error message, giving the line number in the file where
+ * the error occurred.
+ */
+static void dgap_err(char *s)
+{
+ printk("DGAP: parse: %s\n", s);
+}
+
+
+/*
+ * allocate a new configuration node of type t
+ */
+static struct cnode *dgap_newnode(int t)
+{
+ struct cnode *n;
+ if ( (n = (struct cnode *) kmalloc(sizeof(struct cnode ), GFP_ATOMIC) ) != NULL) {
+ memset( (char *)n, 0, sizeof(struct cnode ) );
+ n->type = t;
+ }
+ return(n);
+}
+
+
+/*
+ * dgap_checknode: see if all the necessary info has been supplied for a node
+ * before creating the next node.
+ */
+static int dgap_checknode(struct cnode *p)
+{
+ switch (p->type) {
+ case BNODE:
+ if (p->u.board.v_type == 0) {
+ dgap_err("board type !not specified");
+ return(1);
+ }
+
+ return(0);
+
+ case LNODE:
+ if (p->u.line.v_speed == 0) {
+ dgap_err("line speed not specified");
+ return(1);
+ }
+ return(0);
+
+ case CNODE:
+ if (p->u.conc.v_type == 0) {
+ dgap_err("concentrator type not specified");
+ return(1);
+ }
+ if (p->u.conc.v_speed == 0) {
+ dgap_err("concentrator line speed not specified");
+ return(1);
+ }
+ if (p->u.conc.v_nport == 0) {
+ dgap_err("number of ports on concentrator not specified");
+ return(1);
+ }
+ if (p->u.conc.v_id == 0) {
+ dgap_err("concentrator id letter not specified");
+ return(1);
+ }
+ return(0);
+
+ case MNODE:
+ if (p->u.module.v_type == 0) {
+ dgap_err("EBI module type not specified");
+ return(1);
+ }
+ if (p->u.module.v_nport == 0) {
+ dgap_err("number of ports on EBI module not specified");
+ return(1);
+ }
+ if (p->u.module.v_id == 0) {
+ dgap_err("EBI module id letter not specified");
+ return(1);
+ }
+ return(0);
+ }
+ return(0);
+}
+
+/*
+ * save a string somewhere
+ */
+static char *dgap_savestring(char *s)
+{
+ char *p;
+ if ( (p = kmalloc(strlen(s) + 1, GFP_ATOMIC) ) != NULL) {
+ strcpy(p, s);
+ }
+ return(p);
+}
+
+
+/*
+ * Given a board pointer, returns whether we should use interrupts or not.
+ */
+uint dgap_config_get_useintr(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case INTRNODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.useintr;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+
+/*
+ * Given a board pointer, returns whether we turn on altpin or not.
+ */
+uint dgap_config_get_altpin(struct board_t *bd)
+{
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+ switch (p->type) {
+ case ANODE:
+ /*
+ * check for pcxr types.
+ */
+ return p->u.altpin;
+ default:
+ break;
+ }
+ }
+
+ /* If not found, then don't turn on interrupts. */
+ return 0;
+}
+
+
+
+/*
+ * Given a specific type of board, if found, detached link and
+ * returns the first occurance in the list.
+ */
+struct cnode *dgap_find_config(int type, int bus, int slot)
+{
+ struct cnode *p, *prev = NULL, *prev2 = NULL, *found = NULL;
+
+ p = &dgap_head;
+
+ while (p->next != NULL) {
+ prev = p;
+ p = p->next;
+
+ if (p->type == BNODE) {
+
+ if (p->u.board.type == type) {
+
+ if (p->u.board.v_pcibus && p->u.board.pcibus != bus) {
+ DPR(("Found matching board, but wrong bus position. System says bus %d, we want bus %ld\n",
+ bus, p->u.board.pcibus));
+ continue;
+ }
+ if (p->u.board.v_pcislot && p->u.board.pcislot != slot) {
+ DPR_INIT(("Found matching board, but wrong slot position. System says slot %d, we want slot %ld\n",
+ slot, p->u.board.pcislot));
+ continue;
+ }
+
+ DPR_INIT(("Matched type in config file\n"));
+
+ found = p;
+ /*
+ * Keep walking thru the list till we find the next board.
+ */
+ while (p->next != NULL) {
+ prev2 = p;
+ p = p->next;
+ if (p->type == BNODE) {
+
+ /*
+ * Mark the end of our 1 board chain of configs.
+ */
+ prev2->next = NULL;
+
+ /*
+ * Link the "next" board to the previous board,
+ * effectively "unlinking" our board from the main config.
+ */
+ prev->next = p;
+
+ return found;
+ }
+ }
+ /*
+ * It must be the last board in the list.
+ */
+ prev->next = NULL;
+ return found;
+ }
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Given a board pointer, walks the config link, counting up
+ * all ports user specified should be on the board.
+ * (This does NOT mean they are all actually present right now tho)
+ */
+uint dgap_config_get_number_of_ports(struct board_t *bd)
+{
+ int count = 0;
+ struct cnode *p = NULL;
+
+ if (!bd)
+ return(0);
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case BNODE:
+ /*
+ * check for pcxr types.
+ */
+ if (p->u.board.type > EPCFE)
+ count += p->u.board.nport;
+ break;
+ case CNODE:
+ count += p->u.conc.nport;
+ break;
+ case MNODE:
+ count += p->u.module.nport;
+ break;
+ }
+ }
+ return (count);
+}
+
+char *dgap_create_config_string(struct board_t *bd, char *string)
+{
+ char *ptr = string;
+ struct cnode *p = NULL;
+ struct cnode *q = NULL;
+ int speed;
+
+ if (!bd) {
+ *ptr = 0xff;
+ return string;
+ }
+
+ for (p = bd->bd_config; p; p = p->next) {
+
+ switch (p->type) {
+ case LNODE:
+ *ptr = '\0';
+ ptr++;
+ *ptr = p->u.line.speed;
+ ptr++;
+ break;
+ case CNODE:
+ /*
+ * Because the EPC/con concentrators can have EM modules
+ * hanging off of them, we have to walk ahead in the list
+ * and keep adding the number of ports on each EM to the config.
+ * UGH!
+ */
+ speed = p->u.conc.speed;
+ q = p->next;
+ if ((q != NULL) && (q->type == MNODE) ) {
+ *ptr = (p->u.conc.nport + 0x80);
+ ptr++;
+ p = q;
+ while ((q->next != NULL) && (q->next->type) == MNODE) {
+ *ptr = (q->u.module.nport + 0x80);
+ ptr++;
+ p = q;
+ q = q->next;
+ }
+ *ptr = q->u.module.nport;
+ ptr++;
+ } else {
+ *ptr = p->u.conc.nport;
+ ptr++;
+ }
+
+ *ptr = speed;
+ ptr++;
+ break;
+ }
+ }
+
+ *ptr = 0xff;
+ return string;
+}
+
+
+
+char *dgap_get_config_letters(struct board_t *bd, char *string)
+{
+ int found = FALSE;
+ char *ptr = string;
+ struct cnode *cptr = NULL;
+ int len = 0;
+ int left = MAXTTYNAMELEN;
+
+ if (!bd) {
+ return "<NULL>";
+ }
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ }
+ else {
+ ptr1 = cptr->u.ttyname;
+ }
+ if (ptr1) {
+ len = snprintf(ptr, left, "%s", ptr1);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+
+ if (cptr->type == CNODE) {
+ if (cptr->u.conc.id) {
+ len = snprintf(ptr, left, "%s", cptr->u.conc.id);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+
+ if (cptr->type == MNODE) {
+ if (cptr->u.module.id) {
+ len = snprintf(ptr, left, "%s", cptr->u.module.id);
+ left -= len;
+ ptr += len;
+ if (left <= 0)
+ break;
+ }
+ }
+ }
+
+ return string;
+}
diff --git a/drivers/staging/dgap/dgap_parse.h b/drivers/staging/dgap/dgap_parse.h
new file mode 100644
index 00000000000..8128c47343c
--- /dev/null
+++ b/drivers/staging/dgap/dgap_parse.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef _DGAP_PARSE_H
+#define _DGAP_PARSE_H
+
+#include "dgap_driver.h"
+
+extern int dgap_parsefile(char **in, int Remove);
+extern struct cnode *dgap_find_config(int type, int bus, int slot);
+extern uint dgap_config_get_number_of_ports(struct board_t *bd);
+extern char *dgap_create_config_string(struct board_t *bd, char *string);
+extern char *dgap_get_config_letters(struct board_t *bd, char *string);
+extern uint dgap_config_get_useintr(struct board_t *bd);
+extern uint dgap_config_get_altpin(struct board_t *bd);
+
+#endif
diff --git a/drivers/staging/dgap/dgap_pci.h b/drivers/staging/dgap/dgap_pci.h
new file mode 100644
index 00000000000..05ed374f08e
--- /dev/null
+++ b/drivers/staging/dgap/dgap_pci.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+/* $Id: dgap_pci.h,v 1.1 2009/10/23 14:01:57 markh Exp $ */
+
+#ifndef __DGAP_PCI_H
+#define __DGAP_PCI_H
+
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEVICE_EPC_DID 0x0002
+#define PCI_DEVICE_XEM_DID 0x0004
+#define PCI_DEVICE_XR_DID 0x0005
+#define PCI_DEVICE_CX_DID 0x0006
+#define PCI_DEVICE_XRJ_DID 0x0009 /* PLX-based Xr adapter */
+#define PCI_DEVICE_XR_IBM_DID 0x0011 /* IBM 8-port Async Adapter */
+#define PCI_DEVICE_XR_BULL_DID 0x0013 /* BULL 8-port Async Adapter */
+#define PCI_DEVICE_XR_SAIP_DID 0x001c /* SAIP card - Xr adapter */
+#define PCI_DEVICE_XR_422_DID 0x0012 /* Xr-422 */
+#define PCI_DEVICE_920_2_DID 0x0034 /* XR-Plus 920 K, 2 port */
+#define PCI_DEVICE_920_4_DID 0x0026 /* XR-Plus 920 K, 4 port */
+#define PCI_DEVICE_920_8_DID 0x0027 /* XR-Plus 920 K, 8 port */
+#define PCI_DEVICE_EPCJ_DID 0x000a /* PLX 9060 chip for PCI */
+#define PCI_DEVICE_CX_IBM_DID 0x001b /* IBM 128-port Async Adapter */
+#define PCI_DEVICE_920_8_HP_DID 0x0058 /* HP XR-Plus 920 K, 8 port */
+#define PCI_DEVICE_XEM_HP_DID 0x0059 /* HP Xem PCI */
+
+#define PCI_DEVICE_XEM_NAME "AccelePort XEM"
+#define PCI_DEVICE_CX_NAME "AccelePort CX"
+#define PCI_DEVICE_XR_NAME "AccelePort Xr"
+#define PCI_DEVICE_XRJ_NAME "AccelePort Xr (PLX)"
+#define PCI_DEVICE_XR_SAIP_NAME "AccelePort Xr (SAIP)"
+#define PCI_DEVICE_920_2_NAME "AccelePort Xr920 2 port"
+#define PCI_DEVICE_920_4_NAME "AccelePort Xr920 4 port"
+#define PCI_DEVICE_920_8_NAME "AccelePort Xr920 8 port"
+#define PCI_DEVICE_XR_422_NAME "AccelePort Xr 422"
+#define PCI_DEVICE_EPCJ_NAME "AccelePort EPC (PLX)"
+#define PCI_DEVICE_XR_BULL_NAME "AccelePort Xr (BULL)"
+#define PCI_DEVICE_XR_IBM_NAME "AccelePort Xr (IBM)"
+#define PCI_DEVICE_CX_IBM_NAME "AccelePort CX (IBM)"
+#define PCI_DEVICE_920_8_HP_NAME "AccelePort Xr920 8 port (HP)"
+#define PCI_DEVICE_XEM_HP_NAME "AccelePort XEM (HP)"
+
+
+/*
+ * On the PCI boards, there is no IO space allocated
+ * The I/O registers will be in the first 3 bytes of the
+ * upper 2MB of the 4MB memory space. The board memory
+ * will be mapped into the low 2MB of the 4MB memory space
+ */
+
+/* Potential location of PCI Bios from E0000 to FFFFF*/
+#define PCI_BIOS_SIZE 0x00020000
+
+/* Size of Memory and I/O for PCI (4MB) */
+#define PCI_RAM_SIZE 0x00400000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x00200000
+
+/* Max PCI Window Size (2MB) */
+#define PCI_WIN_SIZE 0x00200000
+
+#define PCI_WIN_SHIFT 21 /* 21 bits max */
+
+/* Offset of I/0 in Memory (2MB) */
+#define PCI_IO_OFFSET 0x00200000
+
+/* Size of IO (2MB) */
+#define PCI_IO_SIZE 0x00200000
+
+#endif
diff --git a/drivers/staging/dgap/dgap_sysfs.c b/drivers/staging/dgap/dgap_sysfs.c
new file mode 100644
index 00000000000..94da06fcf7e
--- /dev/null
+++ b/drivers/staging/dgap/dgap_sysfs.c
@@ -0,0 +1,793 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ *
+ *
+ * $Id: dgap_sysfs.c,v 1.1 2009/10/23 14:01:57 markh Exp $
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/serial_reg.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+
+#include "dgap_driver.h"
+#include "dgap_conf.h"
+#include "dgap_parse.h"
+
+
+static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
+
+
+static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgap_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
+
+
+static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
+
+
+static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
+
+
+static ssize_t dgap_driver_state_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", dgap_driver_state_text[dgap_driver_state]);
+}
+static DRIVER_ATTR(state, S_IRUSR, dgap_driver_state_show, NULL);
+
+
+static ssize_t dgap_driver_debug_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_debug);
+}
+
+static ssize_t dgap_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgap_debug);
+ return count;
+}
+static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgap_driver_debug_show, dgap_driver_debug_store);
+
+
+static ssize_t dgap_driver_rawreadok_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgap_rawreadok);
+}
+
+static ssize_t dgap_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgap_rawreadok);
+ return count;
+}
+static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgap_driver_rawreadok_show, dgap_driver_rawreadok_store);
+
+
+static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
+}
+
+static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "%d\n", &dgap_poll_tick);
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show, dgap_driver_pollrate_store);
+
+
+void dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgap_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_debug);
+ rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+ rc |= driver_create_file(driverfs, &driver_attr_state);
+ if (rc) {
+ printk(KERN_ERR "DGAP: sysfs driver_create_file failed!\n");
+ }
+}
+
+
+void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
+{
+ struct device_driver *driverfs = &dgap_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_debug);
+ driver_remove_file(driverfs, &driver_attr_rawreadok);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+ driver_remove_file(driverfs, &driver_attr_state);
+}
+
+
+#define DGAP_VERIFY_BOARD(p, bd) \
+ if (!p) \
+ return (0); \
+ \
+ bd = dev_get_drvdata(p); \
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC) \
+ return (0); \
+ if (bd->state != BOARD_READY) \
+ return (0); \
+
+
+static ssize_t dgap_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
+
+
+static ssize_t dgap_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_baud_info);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
+
+
+static ssize_t dgap_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ } else {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
+
+
+static ssize_t dgap_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
+
+
+static ssize_t dgap_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
+
+
+static ssize_t dgap_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
+
+
+static ssize_t dgap_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
+
+
+static ssize_t dgap_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
+
+
+static ssize_t dgap_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
+
+
+static ssize_t dgap_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGAP_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
+
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+void dgap_create_ports_sysfiles(struct board_t *bd)
+{
+ int rc = 0;
+
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ if (rc) {
+ printk(KERN_ERR "DGAP: sysfs device_create_file failed!\n");
+ }
+}
+
+
+/* removes all the sys files created for that port */
+void dgap_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+}
+
+
+static ssize_t dgap_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
+
+
+static ssize_t dgap_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
+
+
+static ssize_t dgap_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
+
+
+static ssize_t dgap_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
+
+
+static ssize_t dgap_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
+
+
+static ssize_t dgap_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
+
+
+static ssize_t dgap_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
+
+
+static ssize_t dgap_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
+
+
+static ssize_t dgap_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
+
+
+static ssize_t dgap_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
+
+
+static ssize_t dgap_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int cn;
+ int bn;
+ struct cnode *cptr = NULL;
+ int found = FALSE;
+ int ncount = 0;
+ int starto = 0;
+ int i = 0;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ bn = bd->boardnum;
+ cn = ch->ch_portnum;
+
+ for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
+
+ if ((cptr->type == BNODE) &&
+ ((cptr->u.board.type == APORT2_920P) || (cptr->u.board.type == APORT4_920P) ||
+ (cptr->u.board.type == APORT8_920P) || (cptr->u.board.type == PAPORT4) ||
+ (cptr->u.board.type == PAPORT8))) {
+
+ found = TRUE;
+ if (cptr->u.board.v_start)
+ starto = cptr->u.board.start;
+ else
+ starto = 1;
+ }
+
+ if (cptr->type == TNODE && found == TRUE) {
+ char *ptr1;
+ if (strstr(cptr->u.ttyname, "tty")) {
+ ptr1 = cptr->u.ttyname;
+ ptr1 += 3;
+ }
+ else {
+ ptr1 = cptr->u.ttyname;
+ }
+
+ for (i = 0; i < dgap_config_get_number_of_ports(bd); i++) {
+ if (cn == i) {
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ ptr1, i + starto);
+ }
+ }
+ }
+
+ if (cptr->type == CNODE) {
+
+ for (i = 0; i < cptr->u.conc.nport; i++) {
+ if (cn == (i + ncount)) {
+
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ cptr->u.conc.id,
+ i + (cptr->u.conc.v_start ? cptr->u.conc.start : 1));
+ }
+ }
+
+ ncount += cptr->u.conc.nport;
+ }
+
+ if (cptr->type == MNODE) {
+
+ for (i = 0; i < cptr->u.module.nport; i++) {
+ if (cn == (i + ncount)) {
+ return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty",
+ cptr->u.module.id,
+ i + (cptr->u.module.v_start ? cptr->u.module.start : 1));
+ }
+ }
+
+ ncount += cptr->u.module.nport;
+
+ }
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
+ (un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
+
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
+
+
+static struct attribute *dgap_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+
+static struct attribute_group dgap_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgap_sysfs_tty_entries,
+};
+
+
+
+
+void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "dgap: failed to create sysfs tty device attributes.\n");
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+ return;
+ }
+
+ dev_set_drvdata(c, un);
+
+}
+
+
+void dgap_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
+}
diff --git a/drivers/staging/dgap/dgap_sysfs.h b/drivers/staging/dgap/dgap_sysfs.h
new file mode 100644
index 00000000000..dde690eec5c
--- /dev/null
+++ b/drivers/staging/dgap/dgap_sysfs.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_SYSFS_H
+#define __DGAP_SYSFS_H
+
+#include "dgap_driver.h"
+
+#include <linux/device.h>
+
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+extern void dgap_create_ports_sysfiles(struct board_t *bd);
+extern void dgap_remove_ports_sysfiles(struct board_t *bd);
+
+extern void dgap_create_driver_sysfiles(struct pci_driver *);
+extern void dgap_remove_driver_sysfiles(struct pci_driver *);
+
+extern int dgap_tty_class_init(void);
+extern int dgap_tty_class_destroy(void);
+
+extern void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
+extern void dgap_remove_tty_sysfs(struct device *c);
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_trace.c b/drivers/staging/dgap/dgap_trace.c
new file mode 100644
index 00000000000..0f9a9569ea2
--- /dev/null
+++ b/drivers/staging/dgap/dgap_trace.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/* $Id: dgap_trace.c,v 1.1 2009/10/23 14:01:57 markh Exp $ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/vmalloc.h>
+
+#include "dgap_driver.h"
+
+#define TRC_TO_CONSOLE 1
+
+/* file level globals */
+static char *dgap_trcbuf; /* the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static int dgap_trcbufi = 0; /* index of the tilde at the end of */
+#endif
+
+extern int dgap_trcbuf_size; /* size of the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static DEFINE_SPINLOCK(dgap_tracef_lock);
+#endif
+
+#if 0
+
+#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+void dgap_tracef(const char *fmt, ...)
+{
+ return;
+}
+
+#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+void dgap_tracef(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
+# if defined(TRC_TO_KMEM)
+ unsigned long flags;
+#endif
+
+ if(failed)
+ return;
+# if defined(TRC_TO_KMEM)
+ DGAP_LOCK(dgap_tracef_lock, flags);
+#endif
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsprintf(buf, fmt, ap);
+ va_end(ap);
+ lenbuf = strlen(buf);
+
+# if defined(TRC_TO_KMEM)
+ {
+ static int initd=0;
+
+ /*
+ * Now, in addition to (or instead of) printing this stuff out
+ * (which is a buffered operation), also tuck it away into a
+ * corner of memory which can be examined post-crash in kdb.
+ */
+ if (!initd) {
+ dgap_trcbuf = (char *) vmalloc(dgap_trcbuf_size);
+ if(!dgap_trcbuf) {
+ failed = TRUE;
+ printk("dgap: tracing init failed!\n");
+ return;
+ }
+
+ memset(dgap_trcbuf, '\0', dgap_trcbuf_size);
+ dgap_trcbufi = 0;
+ initd++;
+
+ printk("dgap: tracing enabled - " TRC_DTRC
+ " 0x%lx 0x%x\n",
+ (unsigned long)dgap_trcbuf,
+ dgap_trcbuf_size);
+ }
+
+# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
+ /*
+ * This is the less CPU-intensive way to do things. We simply
+ * wrap around before we fall off the end of the buffer. A
+ * tilde (~) demarcates the current end of the trace.
+ *
+ * This method should be used if you are concerned about race
+ * conditions as it is less likely to affect the timing of
+ * things.
+ */
+
+ if (dgap_trcbufi + lenbuf >= dgap_trcbuf_size) {
+ /* We are wrapping, so wipe out the last tilde. */
+ dgap_trcbuf[dgap_trcbufi] = '\0';
+ /* put the new string at the beginning of the buffer */
+ dgap_trcbufi = 0;
+ }
+
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ dgap_trcbufi += lenbuf;
+ dgap_trcbuf[dgap_trcbufi] = '~';
+
+# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
+ /*
+ * This is the more CPU-intensive way to do things. If we
+ * venture into the last 1/8 of the buffer, we shift the
+ * last 7/8 of the buffer forward, wiping out the first 1/8.
+ * Advantage: No wrap-around, only truncation from the
+ * beginning.
+ *
+ * This method should not be used if you are concerned about
+ * timing changes affecting the behaviour of the driver (ie,
+ * race conditions).
+ */
+ strcpy(&dgap_trcbuf[dgap_trcbufi], buf);
+ dgap_trcbufi += lenbuf;
+ dgap_trcbuf[dgap_trcbufi] = '~';
+ dgap_trcbuf[dgap_trcbufi+1] = '\0';
+
+ /* If we're near the end of the trace buffer... */
+ if (dgap_trcbufi > (dgap_trcbuf_size/8)*7) {
+ /* Wipe out the first eighth to make some more room. */
+ strcpy(dgap_trcbuf, &dgap_trcbuf[dgap_trcbuf_size/8]);
+ dgap_trcbufi = strlen(dgap_trcbuf)-1;
+ /* Plop overflow message at the top of the buffer. */
+ bcopy(TRC_OVERFLOW, dgap_trcbuf, strlen(TRC_OVERFLOW));
+ }
+# else
+# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
+# endif
+ }
+ DGAP_UNLOCK(dgap_tracef_lock, flags);
+
+# endif /* defined(TRC_TO_KMEM) */
+}
+
+#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+#endif
+
+/*
+ * dgap_tracer_free()
+ *
+ *
+ */
+void dgap_tracer_free(void)
+{
+ if(dgap_trcbuf)
+ vfree(dgap_trcbuf);
+}
diff --git a/drivers/staging/dgap/dgap_trace.h b/drivers/staging/dgap/dgap_trace.h
new file mode 100644
index 00000000000..b21f46198e7
--- /dev/null
+++ b/drivers/staging/dgap/dgap_trace.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *****************************************************************************
+ * Header file for dgap_trace.c
+ *
+ * $Id: dgap_trace.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ */
+
+#ifndef __DGAP_TRACE_H
+#define __DGAP_TRACE_H
+
+#include "dgap_driver.h"
+
+void dgap_tracef(const char *fmt, ...);
+void dgap_tracer_free(void);
+
+#endif
+
diff --git a/drivers/staging/dgap/dgap_tty.c b/drivers/staging/dgap/dgap_tty.c
new file mode 100644
index 00000000000..b906db30b61
--- /dev/null
+++ b/drivers/staging/dgap/dgap_tty.c
@@ -0,0 +1,3597 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ */
+
+/************************************************************************
+ *
+ * This file implements the tty driver functionality for the
+ * FEP5 based product lines.
+ *
+ ************************************************************************
+ *
+ * $Id: dgap_tty.c,v 1.3 2011/06/23 12:11:31 markh Exp $
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/pci.h>
+
+#include "dgap_driver.h"
+#include "dgap_tty.h"
+#include "dgap_types.h"
+#include "dgap_fep5.h"
+#include "dgap_parse.h"
+#include "dgap_conf.h"
+#include "dgap_sysfs.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+#endif
+
+/*
+ * internal variables
+ */
+static struct board_t *dgap_BoardsByMajor[256];
+static uchar *dgap_TmpWriteBuf = NULL;
+static DECLARE_MUTEX(dgap_TmpWriteSem);
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgap_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+
+static struct ktermios DgapDefaultTermios =
+{
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+/* Our function prototypes */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file);
+static void dgap_tty_close(struct tty_struct *tty, struct file *file);
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
+static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_write_room(struct tty_struct* tty);
+static int dgap_tty_chars_in_buffer(struct tty_struct* tty);
+static void dgap_tty_start(struct tty_struct *tty);
+static void dgap_tty_stop(struct tty_struct *tty);
+static void dgap_tty_throttle(struct tty_struct *tty);
+static void dgap_tty_unthrottle(struct tty_struct *tty);
+static void dgap_tty_flush_chars(struct tty_struct *tty);
+static void dgap_tty_flush_buffer(struct tty_struct *tty);
+static void dgap_tty_hangup(struct tty_struct *tty);
+static int dgap_wait_for_drain(struct tty_struct *tty);
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value);
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info);
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmget(struct tty_struct *tty);
+static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
+#else
+static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file);
+static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
+#endif
+static int dgap_tty_send_break(struct tty_struct *tty, int msec);
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
+static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
+
+static const struct tty_operations dgap_tty_ops = {
+ .open = dgap_tty_open,
+ .close = dgap_tty_close,
+ .write = dgap_tty_write,
+ .write_room = dgap_tty_write_room,
+ .flush_buffer = dgap_tty_flush_buffer,
+ .chars_in_buffer = dgap_tty_chars_in_buffer,
+ .flush_chars = dgap_tty_flush_chars,
+ .ioctl = dgap_tty_ioctl,
+ .set_termios = dgap_tty_set_termios,
+ .stop = dgap_tty_stop,
+ .start = dgap_tty_start,
+ .throttle = dgap_tty_throttle,
+ .unthrottle = dgap_tty_unthrottle,
+ .hangup = dgap_tty_hangup,
+ .put_char = dgap_tty_put_char,
+ .tiocmget = dgap_tty_tiocmget,
+ .tiocmset = dgap_tty_tiocmset,
+ .break_ctl = dgap_tty_send_break,
+ .wait_until_sent = dgap_tty_wait_until_sent,
+ .send_xchar = dgap_tty_send_xchar
+};
+
+
+
+
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_preinit()
+ *
+ * Initialize any global tty related data before we download any boards.
+ */
+int dgap_tty_preinit(void)
+{
+ unsigned long flags;
+
+ DGAP_LOCK(dgap_global_lock, flags);
+
+ /*
+ * Allocate a buffer for doing the copy from user space to
+ * kernel space in dgap_input(). We only use one buffer and
+ * control access to it with a semaphore. If we are paging, we
+ * are already in trouble so one buffer won't hurt much anyway.
+ */
+ dgap_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_ATOMIC);
+
+ if (!dgap_TmpWriteBuf) {
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ DPR_INIT(("unable to allocate tmp write buf"));
+ return (-ENOMEM);
+ }
+
+ DGAP_UNLOCK(dgap_global_lock, flags);
+ return(0);
+}
+
+
+/*
+ * dgap_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+int dgap_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ DPR_INIT(("tty_register start"));
+
+ brd->SerialDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgap_%d_", brd->boardnum);
+ brd->SerialDriver->name = brd->SerialName;
+ brd->SerialDriver->name_base = 0;
+ brd->SerialDriver->major = 0;
+ brd->SerialDriver->minor_start = 0;
+ brd->SerialDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver->init_termios = DgapDefaultTermios;
+ brd->SerialDriver->driver_name = DRVSTR;
+ brd->SerialDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->SerialDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver->ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->SerialDriver->refcount = brd->TtyRefCnt;
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->SerialDriver, &dgap_tty_ops);
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, seperately so we don't get the LD confused about what major
+ * we are when we get into the dgap_tty_open() routine.
+ */
+ brd->PrintDriver = alloc_tty_driver(MAXPORTS);
+
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgap_%d_", brd->boardnum);
+ brd->PrintDriver->name = brd->PrintName;
+ brd->PrintDriver->name_base = 0;
+ brd->PrintDriver->major = 0;
+ brd->PrintDriver->minor_start = 0;
+ brd->PrintDriver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver->subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver->init_termios = DgapDefaultTermios;
+ brd->PrintDriver->driver_name = DRVSTR;
+ brd->PrintDriver->flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /* The kernel wants space to store pointers to tty_structs */
+ brd->PrintDriver->ttys = dgap_driver_kzmalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver->ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->PrintDriver->refcount = brd->TtyRefCnt;
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(brd->PrintDriver, &dgap_tty_ops);
+
+ if (!brd->dgap_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(brd->SerialDriver);
+ if (rc < 0) {
+ APR(("Can't register tty device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgap_Major_Serial_Registered = TRUE;
+ dgap_BoardsByMajor[brd->SerialDriver->major] = brd;
+ brd->dgap_Serial_Major = brd->SerialDriver->major;
+ }
+
+ if (!brd->dgap_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(brd->PrintDriver);
+ if (rc < 0) {
+ APR(("Can't register Transparent Print device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgap_Major_TransparentPrint_Registered = TRUE;
+ dgap_BoardsByMajor[brd->PrintDriver->major] = brd;
+ brd->dgap_TransparentPrint_Major = brd->PrintDriver->major;
+ }
+
+ DPR_INIT(("DGAP REGISTER TTY: MAJORS: %d %d\n", brd->SerialDriver->major,
+ brd->PrintDriver->major));
+
+ return (rc);
+}
+
+
+/*
+ * dgap_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+int dgap_tty_init(struct board_t *brd)
+{
+ int i;
+ int tlw;
+ uint true_count = 0;
+ uchar *vaddr;
+ uchar modem = 0;
+ struct channel_t *ch;
+ struct bs_t *bs;
+ struct cm_t *cm;
+
+ if (!brd)
+ return (-ENXIO);
+
+ DPR_INIT(("dgap_tty_init start\n"));
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+ true_count = readw((vaddr + NCHAN));
+
+ brd->nasync = dgap_config_get_number_of_ports(brd);
+
+ if (!brd->nasync) {
+ brd->nasync = brd->maxports;
+ }
+
+ if (brd->nasync > brd->maxports) {
+ brd->nasync = brd->maxports;
+ }
+
+ if (true_count != brd->nasync) {
+ if ((brd->type == PPCM) && (true_count == 64)) {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
+ brd->name, brd->nasync, true_count));
+ }
+ else if ((brd->type == PPCM) && (true_count == 0)) {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\nPlease make SURE the EBI cable running from the card\nto each EM module is plugged into EBI IN!\n",
+ brd->name, brd->nasync, true_count));
+ }
+ else {
+ APR(("***WARNING**** %s configured for %d ports, has %d ports.\n",
+ brd->name, brd->nasync, true_count));
+ }
+
+ brd->nasync = true_count;
+
+ /* If no ports, don't bother going any further */
+ if (!brd->nasync) {
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ return(-ENXIO);
+ }
+ }
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+ brd->channels[i] = dgap_driver_kzmalloc(sizeof(struct channel_t), GFP_ATOMIC);
+ if (!brd->channels[i]) {
+ DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
+ __FILE__, __LINE__));
+ }
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ bs = (struct bs_t *) ((ulong) vaddr + CHANBUF);
+ cm = (struct cm_t *) ((ulong) vaddr + CMDBUF);
+
+ brd->bd_bs = bs;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
+
+ if (!brd->channels[i])
+ continue;
+
+ DGAP_SPINLOCK_INIT(ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGAP_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_tun.un_type = DGAP_SERIAL;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGAP_UNIT_MAGIC;
+ ch->ch_pun.un_type = DGAP_PRINT;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_dev = i;
+
+ ch->ch_vaddr = vaddr;
+ ch->ch_bs = bs;
+ ch->ch_cm = cm;
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgap_digi_init;
+
+ /*
+ * Set up digi dsr and dcd bits based on altpin flag.
+ */
+ if (dgap_config_get_altpin(brd)) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ ch->ch_digi.digi_flags |= DIGI_ALTPIN;
+ }
+ else {
+ ch->ch_cd = DM_CD;
+ ch->ch_dsr = DM_DSR;
+ }
+
+ ch->ch_taddr = vaddr + ((ch->ch_bs->tx_seg) << 4);
+ ch->ch_raddr = vaddr + ((ch->ch_bs->rx_seg) << 4);
+ ch->ch_tx_win = 0;
+ ch->ch_rx_win = 0;
+ ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
+ ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
+ ch->ch_tstart = 0;
+ ch->ch_rstart = 0;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ /*
+ * Set queue water marks, interrupt mask,
+ * and general tty parameters.
+ */
+ ch->ch_tlw = tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) : ch->ch_tsize / 2;
+
+ dgap_cmdw(ch, STLOW, tlw, 0);
+
+ dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
+
+ dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
+
+ ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ /* Turn on all modem interrupts for now */
+ modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
+ writeb(modem, &(ch->ch_bs->m_int));
+
+ /*
+ * Set edelay to 0 if interrupts are turned on,
+ * otherwise set edelay to the usual 100.
+ */
+ if (brd->intr_used)
+ writew(0, &(ch->ch_bs->edelay));
+ else
+ writew(100, &(ch->ch_bs->edelay));
+
+ writeb(1, &(ch->ch_bs->idata));
+ }
+
+
+ DPR_INIT(("dgap_tty_init finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_post_uninit()
+ *
+ * UnInitialize any global tty related data.
+ */
+void dgap_tty_post_uninit(void)
+{
+ if (dgap_TmpWriteBuf) {
+ kfree(dgap_TmpWriteBuf);
+ dgap_TmpWriteBuf = NULL;
+ }
+}
+
+
+/*
+ * dgap_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+void dgap_tty_uninit(struct board_t *brd)
+{
+ int i = 0;
+
+ if (brd->dgap_Major_Serial_Registered) {
+ dgap_BoardsByMajor[brd->SerialDriver->major] = NULL;
+ brd->dgap_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgap_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
+ tty_unregister_device(brd->SerialDriver, i);
+ }
+ tty_unregister_driver(brd->SerialDriver);
+ if (brd->SerialDriver->ttys) {
+ kfree(brd->SerialDriver->ttys);
+ brd->SerialDriver->ttys = NULL;
+ }
+ put_tty_driver(brd->SerialDriver);
+ brd->dgap_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgap_Major_TransparentPrint_Registered) {
+ dgap_BoardsByMajor[brd->PrintDriver->major] = NULL;
+ brd->dgap_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgap_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
+ tty_unregister_device(brd->PrintDriver, i);
+ }
+ tty_unregister_driver(brd->PrintDriver);
+ if (brd->PrintDriver->ttys) {
+ kfree(brd->PrintDriver->ttys);
+ brd->PrintDriver->ttys = NULL;
+ }
+ put_tty_driver(brd->PrintDriver);
+ brd->dgap_Major_TransparentPrint_Registered = FALSE;
+ }
+}
+
+
+#define TMPBUFLEN (1024)
+
+/*
+ * dgap_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+static void dgap_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff buffer,
+ * we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because this
+ * function was probably called by the interrupt/timer routines!
+ */
+ if (n == 0) {
+ return;
+ }
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+
+/*=======================================================================
+ *
+ * dgap_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+
+void dgap_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct bs_t *bs;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ uint head;
+ uint tail;
+ int data_len;
+ ulong lock_flags;
+ ulong lock_flags2;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ uchar *buf;
+ uchar tmpchar;
+ int s = 0;
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bs = ch->ch_bs;
+ if (!bs) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if(!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_READ(("dgap_input start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+
+ rmask = ch->ch_rsize - 1;
+
+ head = readw(&(bs->rx_head));
+ head &= rmask;
+ tail = readw(&(bs->rx_tail));
+ tail &= rmask;
+
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("No data on port %d\n", ch->ch_portnum));
+ return;
+ }
+
+ /*
+ * If the device is not open, or CREAD is off, flush
+ * input data and return immediately.
+ */
+ if ((bd->state != BOARD_READY) || !tp || (tp->magic != TTY_MAGIC) ||
+ !(ch->ch_tun.un_flags & UN_ISOPEN) || !(tp->termios.c_cflag & CREAD) ||
+ (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
+ DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
+ tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
+ writew(head, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_RXBLOCK) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
+ ch->ch_portnum, head, tail));
+ return;
+ }
+
+ /*
+ * Ignore oruns.
+ */
+ tmpchar = readb(&(bs->orun));
+ if (tmpchar) {
+ ch->ch_err_overrun++;
+ writeb(0, &(bs->orun));
+ }
+
+ DPR_READ(("dgap_input start 2\n"));
+
+ /* Decide how much data we can send into the tty layer */
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ writew(head, &(bs->rx_tail));
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ writeb(1, &(bs->idata));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ DPR_READ(("dgap_input 1 - finish\n"));
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ buf = ch->ch_bd->flipbuf;
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by our buffer size or the amount
+ * of data the card actually has pending...
+ */
+ while (n) {
+
+ s = ((head >= tail) ? head : ch->ch_rsize) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ memcpy_fromio(buf, (char *) ch->ch_raddr + tail, s);
+ dgap_sniff_nowait_nolock(ch, "USER READ", buf, s);
+
+ tail += s;
+ buf += s;
+
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ writew(tail, &(bs->rx_tail));
+ writeb(1, &(bs->idata));
+ ch->ch_rxcount += len;
+
+ /*
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the tty layer, which has its API more well defined.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ dgap_parity_scan(ch, ch->ch_bd->flipbuf, ch->ch_bd->flipflagbuf, &len);
+
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
+ ch->ch_bd->flipflagbuf, len);
+ }
+ else {
+ len = tty_buffer_request_room(tp->port, len);
+ tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+ DPR_READ(("dgap_input - finish\n"));
+}
+
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+void dgap_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ DPR_CARR(("dgap_carrier called...\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ /* Make sure altpin is always set correctly */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ ch->ch_dsr = DM_CD;
+ ch->ch_cd = DM_DSR;
+ }
+ else {
+ ch->ch_dsr = DM_DSR;
+ ch->ch_cd = DM_CD;
+ }
+
+ if (ch->ch_mistat & D_CD(ch)) {
+ DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, D_CD(ch)));
+ phys_carrier = 1;
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
+ virt_carrier = 1;
+ }
+
+ if (ch->ch_c_cflag & CLOCAL) {
+ virt_carrier = 1;
+ }
+
+
+ DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: virt DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: physical DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0))
+ {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0) {
+ DPR_CARR(("Sending tty hangup\n"));
+ tty_hangup(ch->ch_tun.un_tty);
+ }
+
+ if (ch->ch_pun.un_open_count > 0) {
+ DPR_CARR(("Sending pr hangup\n"));
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgap_tty_open()
+ *
+ */
+static int dgap_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255) {
+ return -ENXIO;
+ }
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgap_BoardsByMajor[major];
+ if (!brd) {
+ return -ENXIO;
+ }
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc) {
+ return rc;
+ }
+
+ DGAP_LOCK(brd->bd_lock, lock_flags);
+
+ /* The wait above should guarentee this cannot happen */
+ if (brd->state != BOARD_READY) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (MINOR(tty_devnum(tty)) > brd->nasync) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[minor];
+ if (!ch) {
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Grab channel lock */
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /* Figure out our type */
+ if (major == brd->dgap_Serial_Major) {
+ un = &brd->channels[minor]->ch_tun;
+ un->un_type = DGAP_SERIAL;
+ }
+ else if (major == brd->dgap_TransparentPrint_Major) {
+ un = &brd->channels[minor]->ch_pun;
+ un->un_type = DGAP_PRINT;
+ }
+ else {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ DPR_OPEN(("Open called. MAJOR: %d MINOR:%d unit: %p NAME: %s\n",
+ MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), un, brd->name));
+
+ /*
+ * Error if channel info pointer is 0.
+ */
+ if ((bs = ch->ch_bs) == 0) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+ DPR_OPEN(("%d BS is 0!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ DPR_OPEN(("dgap_open: initializing channel in open...\n"));
+
+ ch->ch_mforce = 0;
+ ch->ch_mval = 0;
+
+ /*
+ * Flush input queue.
+ */
+ head = readw(&(bs->rx_head));
+ writew(head, &(bs->rx_tail));
+
+ ch->ch_flags = 0;
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /* TODO: flush our TTY struct here? */
+ }
+
+ dgap_carrier(ch);
+ /*
+ * Run param in case we changed anything
+ */
+ dgap_param(tty);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(brd->bd_lock, lock_flags);
+
+ rc = dgap_block_til_ready(tty, file, ch);
+
+ if (!un->un_tty) {
+ return -ENODEV;
+ }
+
+ if (rc) {
+ DPR_OPEN(("dgap_tty_open returning after dgap_block_til_ready "
+ "with %d\n", rc));
+ }
+
+ /* No going back now, increment our unit and channel counters */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgap_tty_open finished\n"));
+ return (rc);
+}
+
+
+/*
+ * dgap_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgap_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGAP_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - before block.\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep, bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK) {
+ break;
+ }
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ break;
+ }
+
+ if (ch->ch_flags & CH_CD) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+
+ if (ch->ch_flags & CH_FCAR) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+ }
+ else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ DPR_OPEN(("%d: signal pending...\n", __LINE__));
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - blocking.\n"));
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("Going to sleep on %s flags...\n",
+ (sleep_on_un_flags ? "un" : "ch")));
+
+ /*
+ * Wait for something in the flags to change from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
+ }
+ else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ DPR_OPEN(("After sleep... retval: %x\n", retval));
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgap_block_til_ready - after blocking.\n"));
+
+ if (retval) {
+ DPR_OPEN(("dgap_block_til_ready - done. error. retval: %x\n", retval));
+ return(retval);
+ }
+
+ DPR_OPEN(("dgap_block_til_ready - done no error. jiffies: %lu\n", jiffies));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgap_tty_hangup(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_CLOSE(("dgap_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ /* flush the transmit queues */
+ dgap_tty_flush_buffer(tty);
+
+ DPR_CLOSE(("dgap_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
+ ch->ch_open_count, un->un_open_count));
+}
+
+
+
+/*
+ * dgap_tty_close()
+ *
+ */
+static void dgap_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ DPR_CLOSE(("Close called\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0) {
+ APR(("bad serial port open count of %d\n", un->un_open_count));
+ un->un_open_count = 0;
+ }
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ DPR_CLOSE(("dgap_tty_close: not last close ch: %d un:%d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+ DPR_CLOSE(("dgap_tty_close - last close on unit procedures\n"));
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ DPR_CLOSE(("Calling wait_for_drain\n"));
+ rc = dgap_wait_for_drain(tty);
+ DPR_CLOSE(("After calling wait_for_drain\n"));
+
+ if (rc) {
+ DPR_BASIC(("dgap_tty_close - bad return: %d ", rc));
+ }
+
+ dgap_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL ) {
+ DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
+ ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
+ dgap_cmdb( ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0 );
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ dgap_ms_sleep(ch->ch_close_delay);
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
+ }
+ }
+
+ ch->pscan_state = 0;
+ ch->pscan_savechar = 0;
+ ch->ch_baud_info = 0;
+
+ }
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+ tty->driver_data = NULL;
+
+ DPR_CLOSE(("Close. Doing wakeups\n"));
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_BASIC(("dgap_tty_close - complete\n"));
+}
+
+
+/*
+ * dgap_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd = NULL;
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ uchar tbusy;
+ uint chars = 0;
+ u16 thead, ttail, tmask, chead, ctail;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+
+ if (tty == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return (0);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ tmask = (ch->ch_tsize - 1);
+
+ /* Get Transmit queue pointers */
+ thead = readw(&(bs->tx_head)) & tmask;
+ ttail = readw(&(bs->tx_tail)) & tmask;
+
+ /* Get tbusy flag */
+ tbusy = readb(&(bs->tbusy));
+
+ /* Get Command queue pointers */
+ chead = readw(&(ch->ch_cm->cm_head));
+ ctail = readw(&(ch->ch_cm->cm_tail));
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * The only way we know for sure if there is no pending
+ * data left to be transferred, is if:
+ * 1) Transmit head and tail are equal (empty).
+ * 2) Command queue head and tail are equal (empty).
+ * 3) The "TBUSY" flag is 0. (Transmitter not busy).
+ */
+
+ if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
+ chars = 0;
+ }
+ else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + ch->ch_tsize;
+ /*
+ * Fudge factor here.
+ * If chars is zero, we know that the command queue had
+ * something in it or tbusy was set. Because we cannot
+ * be sure if there is still some data to be transmitted,
+ * lets lie, and tell ld we have 1 byte left.
+ */
+ if (chars == 0) {
+ /*
+ * If TBUSY is still set, and our tx buffers are empty,
+ * force the firmware to send me another wakeup after
+ * TBUSY has been cleared.
+ */
+ if (tbusy != 0) {
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ chars = 1;
+ }
+ }
+
+ DPR_WRITE(("dgap_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d tsize: %d)\n",
+ ch->ch_portnum, chars, thead, ttail, ch->ch_tsize));
+ return(chars);
+}
+
+
+static int dgap_wait_for_drain(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct bs_t *bs;
+ int ret = -EIO;
+ uint count = 1;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return ret;
+
+ ret = 0;
+
+ DPR_DRAIN(("dgap_wait_for_drain start\n"));
+
+ /* Loop until data is drained */
+ while (count != 0) {
+
+ count = dgap_tty_chars_in_buffer(tty);
+
+ if (count == 0)
+ break;
+
+ /* Set flag waiting for drain */
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Go to sleep till we get woken up */
+ ret = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (ret) {
+ break;
+ }
+ }
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags &= ~(UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_DRAIN(("dgap_wait_for_drain finish\n"));
+ return (ret);
+}
+
+
+/*
+ * dgap_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgap_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (tty == NULL)
+ return (bytes_available);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (bytes_available);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (bytes_available);
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGAP_PRINT)
+ return (bytes_available);
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ }
+ else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
+ }
+ else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return (bytes_available);
+}
+
+
+static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
+{
+ struct channel_t *ch = NULL;
+ struct bs_t *bs = NULL;
+
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+ bs = ch->ch_bs;
+ if (!bs)
+ return;
+
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_LOW) == 0) {
+ un->un_flags |= UN_LOW;
+ writeb(1, &(bs->ilow));
+ }
+ }
+ if ((event & UN_LOW) != 0) {
+ if ((un->un_flags & UN_EMPTY) == 0) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ }
+}
+
+
+/*
+ * dgap_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgap_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ u16 head, tail, tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL || dgap_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return (0);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if ((ret = tail - head - 1) < 0)
+ ret += ch->ch_tsize;
+
+ /* Limit printer to maxcps */
+ ret = dgap_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGAP_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ }
+ else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ /*
+ * Schedule FEP to wake us up if needed.
+ *
+ * TODO: This might be overkill...
+ * Do we really need to schedule callbacks from the FEP
+ * in every case? Can we get smarter based on ret?
+ */
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_WRITE(("dgap_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
+
+ return(ret);
+}
+
+
+/*
+ * dgap_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ DPR_WRITE(("dgap_tty_put_char called\n"));
+ dgap_tty_write(tty, &c, 1);
+ return 1;
+}
+
+
+/*
+ * dgap_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ struct bs_t *bs = NULL;
+ char *vaddr = NULL;
+ u16 head, tail, tmask, remain;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+ int from_user = 0;
+
+ if (tty == NULL || dgap_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return(0);
+
+ bs = ch->ch_bs;
+ if (!bs)
+ return(0);
+
+ if (!count)
+ return(0);
+
+ DPR_WRITE(("dgap_tty_write: Port: %x tty=%p user=%d len=%d\n",
+ ch->ch_portnum, tty, from_user, count));
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = ch->ch_tsize - 1;
+ head = readw(&(bs->tx_head)) & tmask;
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if ((bufcount = tail - head - 1) < 0)
+ bufcount += ch->ch_tsize;
+
+ DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
+ __LINE__, bufcount, count, tail, head, tmask));
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgap_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ if (from_user) {
+
+ count = min(count, WRITEBUFLEN);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If data is coming from user space, copy it into a temporary
+ * buffer so we don't get swapped out while doing the copy to
+ * the board.
+ */
+ /* we're allowed to block if it's from_user */
+ if (down_interruptible(&dgap_TmpWriteSem)) {
+ return (-EINTR);
+ }
+
+ if (copy_from_user(dgap_TmpWriteBuf, (const uchar __user *) buf, count)) {
+ up(&dgap_TmpWriteSem);
+ printk("Write: Copy from user failed!\n");
+ return -EFAULT;
+ }
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ buf = dgap_TmpWriteBuf;
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = ch->ch_tstart + ch->ch_tsize - head;
+
+ if (n >= remain) {
+ n -= remain;
+ vaddr = ch->ch_taddr + head;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
+
+ head = ch->ch_tstart;
+ buf += remain;
+ }
+
+ if (n > 0) {
+
+ /*
+ * Move rest of data.
+ */
+ vaddr = ch->ch_taddr + head;
+ remain = n;
+
+ memcpy_toio(vaddr, (uchar *) buf, remain);
+ dgap_sniff_nowait_nolock(ch, "USER WRITE", (uchar *) buf, remain);
+
+ head += remain;
+
+ }
+
+ if (count) {
+ ch->ch_txcount += count;
+ head &= tmask;
+ writew(head, &(bs->tx_head));
+ }
+
+
+ dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
+
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle. If the buffer is
+ * non-empty, wait until it goes empty.
+ * Otherwise turn it off right now.
+ */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
+ tail = readw(&(bs->tx_tail)) & tmask;
+
+ if (tail != head) {
+ un->un_flags |= UN_EMPTY;
+ writeb(1, &(bs->iempty));
+ }
+ else {
+ dgap_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = readw(&(bs->tx_head)) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ if (from_user) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ up(&dgap_TmpWriteSem);
+ }
+ else {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
+
+ return (count);
+}
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmget(struct tty_struct *tty)
+#else
+static int dgap_tty_tiocmget(struct tty_struct *tty, struct file *file)
+#endif
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return result;
+
+ DPR_IOCTL(("dgap_tty_tiocmget start\n"));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgap_tty_tiocmget finish\n"));
+
+ return result;
+}
+
+
+/*
+ * dgap_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgap_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+#else
+static int dgap_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+#endif
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgap_tty_tiocmset start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_tiocmset finish\n"));
+
+ return (0);
+}
+
+
+
+/*
+ * dgap_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgap_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 1;
+ break;
+ default:
+ msec /= 10;
+ break;
+ }
+
+ DPR_IOCTL(("dgap_tty_send_break start 1. %lx\n", jiffies));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+#if 0
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+#endif
+ dgap_cmdw(ch, SBREAK, (u16) msec, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_send_break finish\n"));
+
+ return (0);
+}
+
+
+
+
+/*
+ * dgap_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ int rc;
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return;
+ }
+ return;
+}
+
+
+
+/*
+ * dgap_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_send_xchar start 1. %lx\n", jiffies));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /*
+ * This is technically what we should do.
+ * However, the NIST tests specifically want
+ * to see each XON or XOFF character that it
+ * sends, so lets just send each character
+ * by hand...
+ */
+#if 0
+ if (c == STOP_CHAR(tty)) {
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+ }
+ else if (c == START_CHAR(tty)) {
+ dgap_cmdw(ch, RRESUME, 0, 0);
+ }
+ else {
+ dgap_wmove(ch, &c, 1);
+ }
+#else
+ dgap_wmove(ch, &c, 1);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_send_xchar finish\n"));
+
+ return;
+}
+
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result = 0;
+ uchar mstat = 0;
+ ulong lock_flags;
+ int rc = 0;
+
+ DPR_IOCTL(("dgap_get_modem_info start\n"));
+
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = readb(&(ch->ch_bs->m_stat));
+ /* Append any outbound signals that might be pending... */
+ mstat |= ch->ch_mostat;
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & D_DTR(ch))
+ result |= TIOCM_DTR;
+ if (mstat & D_RTS(ch))
+ result |= TIOCM_RTS;
+ if (mstat & D_CTS(ch))
+ result |= TIOCM_CTS;
+ if (mstat & D_DSR(ch))
+ result |= TIOCM_DSR;
+ if (mstat & D_RI(ch))
+ result |= TIOCM_RI;
+ if (mstat & D_CD(ch))
+ result |= TIOCM_CD;
+
+ rc = put_user(result, value);
+
+ DPR_IOCTL(("dgap_get_modem_info finish\n"));
+ return(rc);
+}
+
+
+/*
+ * dgap_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgap_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgap_set_modem_info() start\n"));
+
+ ret = get_user(arg, value);
+ if (ret) {
+ DPR_IOCTL(("dgap_set_modem_info %d ret: %x. finished.\n", __LINE__, ret));
+ return(ret);
+ }
+
+ DPR_IOCTL(("dgap_set_modem_info: command: %x arg: %x\n", command, arg));
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval |= D_RTS(ch);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval |= D_DTR(ch);
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mforce |= D_RTS(ch);
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mforce |= D_DTR(ch);
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ case TIOCMSET:
+ ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
+
+ if (arg & TIOCM_RTS) {
+ ch->ch_mval |= D_RTS(ch);
+ }
+ else {
+ ch->ch_mval &= ~(D_RTS(ch));
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mval |= (D_DTR(ch));
+ }
+ else {
+ ch->ch_mval &= ~(D_DTR(ch));
+ }
+
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_set_modem_info finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgap_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags = 0;
+ unsigned long lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
+ DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_digigetedelay()
+ *
+ * Ioctl to get the current edelay setting.
+ *
+ *
+ *
+ */
+static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ tmp = readw(&(ch->ch_bs->edelay));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digisetedelay()
+ *
+ * Ioctl to set the EDELAY setting
+ *
+ */
+static int dgap_tty_digisetedelay(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int new_digi;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(int))) {
+ DPR_IOCTL(("DIGI_SETEDELAY failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ writew((u16) new_digi, &(ch->ch_bs->edelay));
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_tty_digigetcustombaud()
+ *
+ * Ioctl to get the current custom baud rate setting.
+ */
+static int dgap_tty_digigetcustombaud(struct tty_struct *tty, int __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGAP_LOCK(ch->ch_lock, lock_flags);
+ tmp = dgap_get_custom_baud(ch);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_GETCUSTOMBAUD. Returning %d\n", tmp));
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgap_tty_digisetcustombaud()
+ *
+ * Ioctl to set the custom baud rate setting
+ */
+static int dgap_tty_digisetcustombaud(struct tty_struct *tty, int __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint new_rate;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-EFAULT);
+
+
+ if (copy_from_user(&new_rate, new_info, sizeof(unsigned int))) {
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ if (bd->bd_flags & BD_FEP5PLUS) {
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD. Setting %d\n", new_rate));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_custom_speed = new_rate;
+
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ }
+
+ DPR_IOCTL(("DIGI_SETCUSTOMBAUD finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgap_set_termios()
+ */
+static void dgap_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ dgap_carrier(ch);
+ dgap_param(tty);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+}
+
+
+static void dgap_tty_throttle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_throttle start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags |= (CH_RXBLOCK);
+#if 1
+ dgap_cmdw(ch, RPAUSE, 0, 0);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_throttle finish\n"));
+}
+
+
+static void dgap_tty_unthrottle(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_unthrottle start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~(CH_RXBLOCK);
+
+#if 1
+ dgap_cmdw(ch, RRESUME, 0, 0);
+#endif
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_unthrottle finish\n"));
+}
+
+
+static void dgap_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_start start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_start finish\n"));
+}
+
+
+static void dgap_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_stop start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, PAUSETX, 0, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_stop finish\n"));
+}
+
+
+/*
+ * dgap_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgap_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_flush_chars start\n"));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ /* TODO: Do something here */
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_flush_chars finish\n"));
+}
+
+
+
+/*
+ * dgap_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgap_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ ulong lock_flags2;
+ u16 head = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgap_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+ tty_wakeup(tty);
+
+ DPR_IOCTL(("dgap_tty_flush_buffer finish\n"));
+}
+
+
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgap_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ u16 head = 0;
+ ulong lock_flags = 0;
+ ulong lock_flags2 = 0;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-ENODEV);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGAP_UNIT_MAGIC)
+ return (-ENODEV);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGAP_BOARD_MAGIC)
+ return (-ENODEV);
+
+ DPR_IOCTL(("dgap_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if (un->un_open_count <= 0) {
+ DPR_BASIC(("dgap_tty_ioctl - unit not open.\n"));
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-EIO);
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCSBRK:
+ /*
+ * FEP5 doesn't support turning on a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ rc = tty_check_change(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+
+ dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return 0;
+
+ case TIOCCBRK:
+ /*
+ * FEP5 doesn't support turning off a break unconditionally.
+ * The FEP5 device will stop sending a break automatically
+ * after the specified time value that was sent when turning on
+ * the break.
+ */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
+ return(rc);
+
+ case TIOCSSOFTCAR:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return(rc);
+
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
+ dgap_param(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ return(0);
+
+ case TIOCMGET:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_get_modem_info(ch, uarg));
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_set_modem_info(tty, cmd, uarg));
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(rc);
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGAP_PRINT)) {
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ writeb(0, &(ch->ch_bs->orun));
+ }
+ }
+
+ if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->tx_head));
+ dgap_cmdw(ch, FLUSHTX, (u16) head, 0 );
+ dgap_cmdw(ch, RESUMETX, 0, 0);
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+ if (waitqueue_active(&tty->write_wait))
+ wake_up_interruptible(&tty->write_wait);
+
+ /* Can't hold any locks when calling tty_wakeup! */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ tty_wakeup(tty);
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+
+ /* pretend we didn't recognize this IOCTL */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl (LINE:%d) finish on port %d - cmd %s (%x), arg %lx\n",
+ __LINE__, ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ head = readw(&(ch->ch_bs->rx_head));
+ writew(head, &(ch->ch_bs->rx_tail));
+ }
+
+ /* now wait for all the output to drain */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DPR_IOCTL(("dgap_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgap_ioctl_name(cmd), cmd, arg));
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCSETAW:
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCXONC:
+ /*
+ * The Linux Line Discipline (LD) would do this for us if we
+ * let it, but we have the special firmware options to do this
+ * the "right way" regardless of hardware or software flow
+ * control so we'll do it outselves instead of letting the LD
+ * do it.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(rc);
+ }
+
+ DPR_IOCTL(("dgap_ioctl - in TCXONC - %d\n", cmd));
+ switch (arg) {
+
+ case TCOON:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_tty_start(tty);
+ return(0);
+ case TCOOFF:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ dgap_tty_stop(tty);
+ return(0);
+ case TCION:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+ case TCIOFF:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+ default:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-EINVAL);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(-ENOIOCTLCMD);
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigeta(tty, uarg));
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ rc = dgap_wait_for_drain(tty);
+ if (rc) {
+ DPR_IOCTL(("dgap_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+ DGAP_LOCK(bd->bd_lock, lock_flags);
+ DGAP_LOCK(ch->ch_lock, lock_flags2);
+ }
+ else {
+ tty_ldisc_flush(tty);
+ }
+ /* fall thru */
+
+ case DIGI_SETA:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digiseta(tty, uarg));
+
+ case DIGI_GEDELAY:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigetedelay(tty, uarg));
+
+ case DIGI_SEDELAY:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digisetedelay(tty, uarg));
+
+ case DIGI_GETCUSTOMBAUD:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digigetcustombaud(tty, uarg));
+
+ case DIGI_SETCUSTOMBAUD:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return(dgap_tty_digisetcustombaud(tty, uarg));
+
+ case DIGI_RESET_PORT:
+ dgap_firmware_reset_port(ch);
+ dgap_param(tty);
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+ return 0;
+
+ default:
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl - in default\n"));
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgap_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+ }
+
+ DGAP_UNLOCK(ch->ch_lock, lock_flags2);
+ DGAP_UNLOCK(bd->bd_lock, lock_flags);
+
+ DPR_IOCTL(("dgap_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgap_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+}
diff --git a/drivers/staging/dgap/dgap_tty.h b/drivers/staging/dgap/dgap_tty.h
new file mode 100644
index 00000000000..464a460b6be
--- /dev/null
+++ b/drivers/staging/dgap/dgap_tty.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_TTY_H
+#define __DGAP_TTY_H
+
+#include "dgap_driver.h"
+
+int dgap_tty_register(struct board_t *brd);
+
+int dgap_tty_preinit(void);
+int dgap_tty_init(struct board_t *);
+
+void dgap_tty_post_uninit(void);
+void dgap_tty_uninit(struct board_t *);
+
+void dgap_carrier(struct channel_t *ch);
+void dgap_input(struct channel_t *ch);
+
+
+#endif
diff --git a/drivers/staging/dgap/dgap_types.h b/drivers/staging/dgap/dgap_types.h
new file mode 100644
index 00000000000..eca38c7f359
--- /dev/null
+++ b/drivers/staging/dgap/dgap_types.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGAP_TYPES_H
+#define __DGAP_TYPES_H
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#endif
diff --git a/drivers/staging/dgap/digi.h b/drivers/staging/dgap/digi.h
new file mode 100644
index 00000000000..651e2e5e93c
--- /dev/null
+++ b/drivers/staging/dgap/digi.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: digi.h,v 1.1 2009/10/23 14:01:57 markh Exp $
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DIGI_H
+#define __DIGI_H
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+
+/*
+ * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
+ */
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
+#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
+#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
+#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
+#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA ('e'<<8) | 94 /* Read params */
+
+#define DIGI_SETA ('e'<<8) | 95 /* Set params */
+#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
+#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
+
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
+#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
+#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned long rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned long shrink_buf_vaddr; /* Virtual address of board */
+ unsigned long shrink_buf_phys; /* Physical address of board */
+ unsigned long shrink_buf_bseg; /* Amount of board memory */
+ unsigned long shrink_buf_hseg; /* '186 Begining of Dual-Port */
+
+ unsigned long shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned long shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned long shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned long shrink_buf_reserva; /* Reserved */
+ unsigned long shrink_buf_reservb; /* Reserved */
+ unsigned long shrink_buf_reservc; /* Reserved */
+ unsigned long shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned long dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_ioport; /* io port address */
+ unsigned long info_physaddr; /* memory address */
+ unsigned long info_physsize; /* Size of host mem window */
+ unsigned long info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned long info_cflag; /* cflag for channel */
+ unsigned long info_iflag; /* iflag for channel */
+ unsigned long info_oflag; /* oflag for channel */
+ unsigned long info_mstat; /* mstat for channel */
+ unsigned long info_tx_data; /* tx_data for channel */
+ unsigned long info_rx_data; /* rx_data for channel */
+ unsigned long info_hflow; /* hflow for channel */
+ unsigned long info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned long info_bdnum; /* Board number (0 based) */
+ unsigned long info_channel; /* Channel index number */
+ unsigned long info_ch_cflag; /* Channel cflag */
+ unsigned long info_ch_iflag; /* Channel iflag */
+ unsigned long info_ch_oflag; /* Channel oflag */
+ unsigned long info_chsize; /* Channel structure size */
+ unsigned long info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ long reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
+#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+#define DIGI_RESET_PORT ('e'<<8) | 93 /* Reset port */
+
+#endif /* DIGI_H */
diff --git a/drivers/staging/dgap/downld.c b/drivers/staging/dgap/downld.c
new file mode 100644
index 00000000000..57dfd6bafcf
--- /dev/null
+++ b/drivers/staging/dgap/downld.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * $Id: downld.c,v 1.6 2009/01/14 14:10:54 markh Exp $
+ */
+
+/*
+** downld.c
+**
+** This is the daemon that sends the fep, bios, and concentrator images
+** from user space to the driver.
+** BUGS:
+** If the file changes in the middle of the download, you probably
+** will get what you deserve.
+**
+*/
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/errno.h>
+
+#include "dgap_types.h"
+#include "digi.h"
+#include "dgap_fep5.h"
+
+#include "dgap_downld.h"
+
+#include <string.h>
+#include <malloc.h>
+#include <stddef.h>
+#include <unistd.h>
+
+char *pgm;
+void myperror();
+
+/*
+** This structure is used to keep track of the diferent images available
+** to give to the driver. It is arranged so that the things that are
+** constants or that have defaults are first inthe strucutre to simplify
+** the table of initializers.
+*/
+struct image_info {
+ short type; /* bios, fep, conc */
+ short family; /* boards this applies to */
+ short subtype; /* subtype */
+ int len; /* size of image */
+ char *image; /* ioctl struct + image */
+ char *name;
+ char *fname; /* filename of binary (i.e. "asfep.bin") */
+ char *pathname; /* pathname to this binary ("/etc/dgap/xrfep.bin"); */
+ time_t mtime; /* Last modification time */
+};
+
+#define IBIOS 0
+#define IFEP 1
+#define ICONC 2
+#define ICONFIG 3
+#define IBAD 4
+
+#define DEFAULT_LOC "/lib/firmware/dgap/"
+
+struct image_info *image_list;
+int nimages, count;
+
+struct image_info images[] = {
+{IBIOS, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxbios.bin", DEFAULT_LOC "fxbios.bin", 0 },
+{IFEP, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxfep.bin", DEFAULT_LOC "fxfep.bin", 0 },
+{ICONC, T_EPC, SUBTYPE, 0, NULL, "EPC/X", "fxcon.bin", DEFAULT_LOC "fxcon.bin", 0 },
+
+{IBIOS, T_CX, SUBTYPE, 0, NULL, "C/X", "cxbios.bin", DEFAULT_LOC "cxbios.bin", 0 },
+{IFEP, T_CX, SUBTYPE, 0, NULL, "C/X", "cxhost.bin", DEFAULT_LOC "cxhost.bin", 0 },
+
+{IBIOS, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpbios.bin", DEFAULT_LOC "cxpbios.bin", 0 },
+{IFEP, T_CX, T_PCIBUS, 0, NULL, "C/X PCI", "cxpfep.bin", DEFAULT_LOC "cxpfep.bin", 0 },
+
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "cxcon.bin", DEFAULT_LOC "cxcon.bin", 0 },
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmcxcon.bin", DEFAULT_LOC "ibmcxcon.bin", 0 },
+{ICONC, T_CX, SUBTYPE, 0, NULL, "C/X", "ibmencon.bin", DEFAULT_LOC "ibmencon.bin", 0 },
+
+{IBIOS, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrbios.bin", DEFAULT_LOC "xrbios.bin", 0 },
+{IFEP, FAMILY, T_PCXR, 0, NULL, "PCXR", "xrfep.bin", DEFAULT_LOC "xrfep.bin", 0 },
+
+{IBIOS, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxbios.bin", DEFAULT_LOC "sxbios.bin", 0 },
+{IFEP, T_PCLITE, SUBTYPE, 0, NULL, "X/em", "sxfep.bin", DEFAULT_LOC "sxfep.bin", 0 },
+
+{IBIOS, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcibios.bin", DEFAULT_LOC "pcibios.bin", 0 },
+{IFEP, T_EPC, T_PCIBUS, 0, NULL, "PCI", "pcifep.bin", DEFAULT_LOC "pcifep.bin", 0 },
+{ICONFIG, 0, 0, 0, NULL, NULL, "dgap.conf", "/etc/dgap.conf", 0 },
+
+/* IBAD/NULL entry indicating end-of-table */
+
+{IBAD, 0, 0, 0, NULL, NULL, NULL, NULL, 0 }
+
+} ;
+
+int errorprint = 1;
+int nodldprint = 1;
+int debugflag;
+int fd;
+
+struct downld_t *ip; /* Image pointer in current image */
+struct downld_t *dp; /* conc. download */
+
+
+/*
+ * The same for either the FEP or the BIOS.
+ * Append the downldio header, issue the ioctl, then free
+ * the buffer. Not horribly CPU efficient, but quite RAM efficient.
+ */
+
+void squirt(int req_type, int bdid, struct image_info *ii)
+{
+ struct downldio *dliop;
+ int size_buf;
+ int sfd;
+ struct stat sb;
+
+ /*
+ * If this binary comes from a file, stat it to see how
+ * large it is. Yes, we intentionally do this each
+ * time for the binary may change between loads.
+ */
+
+ if (ii->pathname) {
+ sfd = open(ii->pathname, O_RDONLY);
+
+ if (sfd < 0 ) {
+ myperror(ii->pathname);
+ goto squirt_end;
+ }
+
+ if (fstat(sfd, &sb) == -1 ) {
+ myperror(ii->pathname);
+ goto squirt_end;
+ }
+
+ ii->len = sb.st_size ;
+ }
+
+ size_buf = ii->len + sizeof(struct downldio);
+
+ /*
+ * This buffer will be freed at the end of this function. It is
+ * not resilient and should be around only long enough for the d/l
+ * to happen.
+ */
+ dliop = (struct downldio *) malloc(size_buf);
+
+ if (dliop == NULL) {
+ fprintf(stderr,"%s: can't get %d bytes of memory; aborting\n",
+ pgm, size_buf);
+ exit (1);
+ }
+
+ /* Now, stick the image in fepimage. This can come from either
+ * the compiled-in image or from the filesystem.
+ */
+ if (ii->pathname)
+ read(sfd, dliop->image.fi.fepimage, ii->len);
+ else
+ memcpy(dliop ->image.fi.fepimage, ii->image, ii->len);
+
+ dliop->req_type = req_type;
+ dliop->bdid = bdid;
+
+ dliop->image.fi.len = ii->len;
+
+ if (debugflag)
+ printf("sending %d bytes of %s %s from %s\n",
+ ii->len,
+ (ii->type == IFEP) ? "FEP" : (ii->type == IBIOS) ? "BIOS" : "CONFIG",
+ ii->name ? ii->name : "",
+ (ii->pathname) ? ii->pathname : "internal image" );
+
+ if (ioctl(fd, DIGI_DLREQ_SET, (char *) dliop) == -1) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+
+squirt_end:
+
+ if (ii->pathname) {
+ close(sfd);
+ }
+ free(dliop);
+}
+
+
+/*
+ * See if we need to reload the download image in core
+ *
+ */
+void consider_file_rescan(struct image_info *ii)
+{
+ int sfd ;
+ int len ;
+ struct stat sb;
+
+ /* This operation only makes sense when we're working from a file */
+
+ if (ii->pathname) {
+
+ sfd = open (ii->pathname, O_RDONLY) ;
+ if (sfd < 0 ) {
+ myperror(ii->pathname);
+ exit(1) ;
+ }
+
+ if( fstat(sfd,&sb) == -1 ) {
+ myperror(ii->pathname);
+ exit(1);
+ }
+
+ /* If the file hasn't changed since we last did this,
+ * and we have not done a free() on the image, bail
+ */
+ if (ii->image && (sb.st_mtime == ii->mtime))
+ goto end_rescan;
+
+ ii->len = len = sb.st_size ;
+
+ /* Record the timestamp of the file */
+ ii->mtime = sb.st_mtime;
+
+ /* image should be NULL unless there is an image malloced
+ * in already. Before we malloc again, make sure we don't
+ * have a memory leak.
+ */
+ if ( ii->image ) {
+ free( ii->image );
+ /* ii->image = NULL; */ /* not necessary */
+ }
+
+ /* This image will be kept only long enough for the
+ * download to happen. After sending the last block,
+ * it will be freed
+ */
+ ii->image = malloc(len) ;
+
+ if (ii->image == NULL) {
+ fprintf(stderr,
+ "%s: can't get %d bytes of memory; aborting\n",
+ pgm, len);
+ exit (1);
+ }
+
+ if (read(sfd, ii->image, len) < len) {
+ fprintf(stderr,"%s: read error on %s; aborting\n",
+ pgm, ii->pathname);
+ exit (1);
+ }
+
+end_rescan:
+ close(sfd);
+
+ }
+}
+
+/*
+ * Scan for images to match the driver requests
+ */
+
+struct image_info * find_conc_image()
+{
+ int x ;
+ struct image_info *i = NULL ;
+
+ for ( x = 0; x < nimages; x++ ) {
+ i=&image_list[x];
+
+ if(i->type != ICONC)
+ continue;
+
+ consider_file_rescan(i) ;
+
+ ip = (struct downld_t *) image_list[x].image;
+ if (ip == NULL) continue;
+
+ /*
+ * When I removed Clusterport, I kept only the code that I
+ * was SURE wasn't ClusterPort. We may not need the next two
+ * lines of code.
+ */
+ if ((dp->dl_type != 'P' ) && ( ip->dl_srev == dp->dl_srev ))
+ return i;
+ }
+ return NULL ;
+}
+
+
+int main(int argc, char **argv)
+{
+ struct downldio dlio;
+ int offset, bsize;
+ int x;
+ char *down, *image, *fname;
+ struct image_info *ii;
+
+ pgm = argv[0];
+ dp = &dlio.image.dl; /* conc. download */
+
+ while((argc > 2) && !strcmp(argv[1],"-d")) {
+ debugflag++ ;
+ argc-- ;
+ argv++ ;
+ }
+
+ if(argc < 2) {
+ fprintf(stderr,
+ "usage: %s download-device [image-file] ...\n",
+ pgm);
+ exit(1);
+ }
+
+
+
+ /*
+ * Daemonize, unless debugging is turned on.
+ */
+ if (debugflag == 0) {
+ switch (fork())
+ {
+ case 0:
+ break;
+
+ case -1:
+ return 1;
+
+ default:
+ return 0;
+ }
+
+ setsid();
+
+ /*
+ * The child no longer needs "stdin", "stdout", or "stderr",
+ * and should not block processes waiting for them to close.
+ */
+ fclose(stdin);
+ fclose(stdout);
+ fclose(stderr);
+
+ }
+
+ while (1) {
+ if( (fd = open(argv[1], O_RDWR)) == -1 ) {
+ sleep(1);
+ }
+ else
+ break;
+ }
+
+ /*
+ ** create a list of images to search through when trying to match
+ ** requests from the driver. Put images from the command line in
+ ** the list before built in images so that the command line images
+ ** can override the built in ones.
+ */
+
+ /* allocate space for the list */
+
+ nimages = argc - 2;
+
+ /* count the number of default list entries */
+
+ for (count = 0; images[count].type != IBAD; ++count) ;
+
+ nimages += count;
+
+ /* Really should just remove the variable "image_list".... robertl */
+ image_list = images ;
+
+ /* get the images from the command line */
+ for(x = 2; x < argc; x++) {
+ int xx;
+
+ /*
+ * strip off any leading path information for
+ * determining file type
+ */
+ if( (fname = strrchr(argv[x],'/')) == NULL)
+ fname = argv[x];
+ else
+ fname++; /* skip the slash */
+
+ for (xx = 0; xx < count; xx++) {
+ if (strcmp(fname, images[xx].fname) == 0 ) {
+ images[xx].pathname = argv[x];
+
+ /* image should be NULL until */
+ /* space is malloced */
+ images[xx].image = NULL ;
+ }
+ }
+ }
+
+ sleep(3);
+
+ /*
+ ** Endless loop: get a request from the fep, and service that request.
+ */
+ for(;;) {
+ /* get the request */
+ if (debugflag)
+ printf("b4 get ioctl...");
+
+ if (ioctl(fd,DIGI_DLREQ_GET, &dlio) == -1 ) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ } else {
+ if (debugflag)
+ printf("dlio.req_type is %d bd %d\n",
+ dlio.req_type,dlio.bdid);
+
+ switch(dlio.req_type) {
+ case DLREQ_BIOS:
+ /*
+ ** find the bios image for this type
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != IBIOS)
+ continue;
+
+ if ((dlio.image.fi.type & FAMILY) ==
+ image_list[x].family) {
+
+ if ( image_list[x].family == T_CX ) {
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ break;
+ }
+ }
+ else if ( image_list[x].family == T_EPC ) {
+ /* If subtype of image is T_PCIBUS, it is */
+ /* a PCI EPC image, so the board must */
+ /* have bus type T_PCIBUS to match */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* NON PCI EPC doesn't use PCI image */
+ if ( image_list[x].subtype
+ != T_PCIBUS )
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
+ /* PCXR board will break out of the loop here */
+ if ( image_list[x].subtype == T_PCXR ) {
+ break;
+ }
+ }
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct BIOS image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type = -1;
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break ;
+
+ case DLREQ_FEP:
+ /*
+ ** find the fep image for this type
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != IFEP)
+ continue;
+ if( (dlio.image.fi.type & FAMILY) ==
+ image_list[x].family ) {
+ if ( image_list[x].family == T_CX ) {
+ /* C/X PCI board */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* Regular CX */
+ break;
+ }
+ }
+ else if ( image_list[x].family == T_EPC ) {
+ /* If subtype of image is T_PCIBUS, it is */
+ /* a PCI EPC image, so the board must */
+ /* have bus type T_PCIBUS to match */
+ if ((dlio.image.fi.type & BUSTYPE)
+ == T_PCIBUS ) {
+ if ( image_list[x].subtype
+ == T_PCIBUS )
+ break;
+ }
+ else {
+ /* NON PCI EPC doesn't use PCI image */
+ if ( image_list[x].subtype
+ != T_PCIBUS )
+ break;
+ }
+ }
+ else
+ break;
+ }
+ else if ((dlio.image.fi.type & SUBTYPE) == image_list[x].subtype) {
+ /* PCXR board will break out of the loop here */
+ if ( image_list[x].subtype == T_PCXR ) {
+ break;
+ }
+ }
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct FEP image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type=-1;
+ if( ioctl(fd,DIGI_DLREQ_SET,&dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break;
+
+ case DLREQ_DEVCREATE:
+ {
+ char string[1024];
+#if 0
+ sprintf(string, "%s /proc/dgap/%d/mknod", DEFSHELL, dlio.bdid);
+#endif
+ sprintf(string, "%s /usr/sbin/dgap_updatedevs %d", DEFSHELL, dlio.bdid);
+ system(string);
+
+ if (debugflag)
+ printf("Created Devices.\n");
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr, "%s: warning - DEVCREATE ioctl failed\n",pgm);
+ errorprint = 0;
+ }
+ sleep(2);
+ }
+ if (debugflag)
+ printf("After ioctl set - Created Device.\n");
+ }
+
+ break;
+
+ case DLREQ_CONFIG:
+ for ( x = 0; x < nimages; x++ ) {
+ if(image_list[x].type != ICONFIG)
+ continue;
+ else
+ break;
+ }
+
+ if ( x >= nimages) {
+ /*
+ ** no valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct CONFIG image\n",
+ pgm);
+ nodldprint = 0;
+ }
+ dlio.image.fi.type=-1;
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if(errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ }
+
+ squirt(dlio.req_type, dlio.bdid, &image_list[x]);
+ break;
+
+ case DLREQ_CONC:
+ /*
+ ** find the image needed for this download
+ */
+ if ( dp->dl_seq == 0 ) {
+ /*
+ ** find image for hardware rev range
+ */
+ for ( x = 0; x < nimages; x++ ) {
+ ii=&image_list[x];
+
+ if(image_list[x].type != ICONC)
+ continue;
+
+ consider_file_rescan(ii) ;
+
+ ip = (struct downld_t *) image_list[x].image;
+ if (ip == NULL) continue;
+
+ /*
+ * When I removed Clusterport, I kept only the
+ * code that I was SURE wasn't ClusterPort.
+ * We may not need the next four lines of code.
+ */
+
+ if ((dp->dl_type != 'P' ) &&
+ (ip->dl_lrev <= dp->dl_lrev ) &&
+ ( dp->dl_lrev <= ip->dl_hrev))
+ break;
+ }
+
+ if ( x >= nimages ) {
+ /*
+ ** No valid images exist
+ */
+ if(nodldprint) {
+ fprintf(stderr,
+ "%s: cannot find correct download image %d\n",
+ pgm, dp->dl_lrev);
+ nodldprint=0;
+ }
+ continue;
+ }
+
+ } else {
+ /*
+ ** find image version required
+ */
+ if ((ii = find_conc_image()) == NULL ) {
+ /*
+ ** No valid images exist
+ */
+ fprintf(stderr,
+ "%s: can't find rest of download image??\n",
+ pgm);
+ continue;
+ }
+ }
+
+ /*
+ ** download block of image
+ */
+
+ offset = 1024 * dp->dl_seq;
+
+ /*
+ ** test if block requested within image
+ */
+ if ( offset < ii->len ) {
+
+ /*
+ ** if it is, determine block size, set segment,
+ ** set size, set pointers, and copy block
+ */
+ if (( bsize = ii->len - offset ) > 1024 )
+ bsize = 1024;
+
+ /*
+ ** copy image version info to download area
+ */
+ dp->dl_srev = ip->dl_srev;
+ dp->dl_lrev = ip->dl_lrev;
+ dp->dl_hrev = ip->dl_hrev;
+
+ dp->dl_seg = (64 * dp->dl_seq) + ip->dl_seg;
+ dp->dl_size = bsize;
+
+ down = (char *)&dp->dl_data[0];
+ image = (char *)((char *)ip + offset);
+
+ memcpy(down, image, bsize);
+ }
+ else {
+ /*
+ ** Image has been downloaded, set segment and
+ ** size to indicate no more blocks
+ */
+ dp->dl_seg = ip->dl_seg;
+ dp->dl_size = 0;
+
+ /* Now, we can release the concentrator */
+ /* image from memory if we're running */
+ /* from filesystem images */
+
+ if (ii->pathname)
+ if (ii->image) {
+ free(ii->image);
+ ii->image = NULL ;
+ }
+ }
+
+ if (debugflag)
+ printf(
+ "sending conc dl section %d to %s from %s\n",
+ dp->dl_seq, ii->name,
+ ii->pathname ? ii->pathname : "Internal Image");
+
+ if (ioctl(fd, DIGI_DLREQ_SET, &dlio) == -1 ) {
+ if (errorprint) {
+ fprintf(stderr,
+ "%s: warning - download ioctl failed\n",
+ pgm);
+ errorprint=0;
+ }
+ sleep(2);
+ }
+ break;
+ } /* switch */
+ }
+ if (debugflag > 1) {
+ printf("pausing: "); fflush(stdout);
+ fflush(stdin);
+ while(getchar() != '\n');
+ printf("continuing\n");
+ }
+ }
+}
+
+/*
+** myperror()
+**
+** Same as normal perror(), but places the program name at the begining
+** of the message.
+*/
+void myperror(char *s)
+{
+ fprintf(stderr,"%s: %s: %s.\n",pgm, s, strerror(errno));
+}
diff --git a/drivers/staging/dgnc/Kconfig b/drivers/staging/dgnc/Kconfig
new file mode 100644
index 00000000000..032c2a79523
--- /dev/null
+++ b/drivers/staging/dgnc/Kconfig
@@ -0,0 +1,6 @@
+config DGNC
+ tristate "Digi Neo and Classic PCI Products"
+ default n
+ depends on TTY && PCI
+ ---help---
+ Driver for the Digi International Neo and Classic PCI based product line.
diff --git a/drivers/staging/dgnc/Makefile b/drivers/staging/dgnc/Makefile
new file mode 100644
index 00000000000..888c4334236
--- /dev/null
+++ b/drivers/staging/dgnc/Makefile
@@ -0,0 +1,7 @@
+EXTRA_CFLAGS += -DDG_NAME=\"dgnc-1.3-16\" -DDG_PART=\"40002369_F\"
+
+obj-$(CONFIG_DGNC) += dgnc.o
+
+dgnc-objs := dgnc_cls.o dgnc_driver.o\
+ dgnc_mgmt.o dgnc_neo.o\
+ dgnc_trace.o dgnc_tty.o dgnc_sysfs.o
diff --git a/drivers/staging/dgnc/TODO b/drivers/staging/dgnc/TODO
new file mode 100644
index 00000000000..1ff2d1874aa
--- /dev/null
+++ b/drivers/staging/dgnc/TODO
@@ -0,0 +1,17 @@
+* remove kzalloc casts
+* checkpatch fixes
+* sparse fixes
+* fix use of sizeof(). Example replace sizeof(struct board_t)
+ with sizeof(*brd) and remove sizeof(char)
+* change name of board_t to dgnc_board
+* split two assignments into the two assignments on two lines;
+ don't use two equals signs
+* remove unecessary comments
+* remove unecessary error messages. Example kzalloc() has its
+ own error message. Adding an extra one is useless.
+* use goto statements for error handling when appropriate
+* there is a lot of unecessary code in the driver. It was
+ originally a standalone driver. Remove uneeded code.
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
+Cc: Lidza Louina <lidza.louina@gmail.com>
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
new file mode 100644
index 00000000000..117e1580824
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -0,0 +1,1409 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/delay.h> /* For udelay */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/serial.h> /* For struct async_serial */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+#include <linux/pci.h>
+
+#include "dgnc_driver.h" /* Driver main header file */
+#include "dgnc_cls.h"
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+
+static inline void cls_parse_isr(struct board_t *brd, uint port);
+static inline void cls_clear_break(struct channel_t *ch, int force);
+static inline void cls_set_cts_flow_control(struct channel_t *ch);
+static inline void cls_set_rts_flow_control(struct channel_t *ch);
+static inline void cls_set_ixon_flow_control(struct channel_t *ch);
+static inline void cls_set_ixoff_flow_control(struct channel_t *ch);
+static inline void cls_set_no_output_flow_control(struct channel_t *ch);
+static inline void cls_set_no_input_flow_control(struct channel_t *ch);
+static void cls_parse_modem(struct channel_t *ch, uchar signals);
+static void cls_tasklet(unsigned long data);
+static void cls_vpd(struct board_t *brd);
+static void cls_uart_init(struct channel_t *ch);
+static void cls_uart_off(struct channel_t *ch);
+static int cls_drain(struct tty_struct *tty, uint seconds);
+static void cls_param(struct tty_struct *tty);
+static void cls_assert_modem_signals(struct channel_t *ch);
+static void cls_flush_uart_write(struct channel_t *ch);
+static void cls_flush_uart_read(struct channel_t *ch);
+static void cls_disable_receiver(struct channel_t *ch);
+static void cls_enable_receiver(struct channel_t *ch);
+static void cls_send_break(struct channel_t *ch, int msecs);
+static void cls_send_start_character(struct channel_t *ch);
+static void cls_send_stop_character(struct channel_t *ch);
+static void cls_copy_data_from_uart_to_queue(struct channel_t *ch);
+static void cls_copy_data_from_queue_to_uart(struct channel_t *ch);
+static uint cls_get_uart_bytes_left(struct channel_t *ch);
+static void cls_send_immediate_char(struct channel_t *ch, unsigned char);
+static irqreturn_t cls_intr(int irq, void *voidbrd);
+
+struct board_ops dgnc_cls_ops = {
+ .tasklet = cls_tasklet,
+ .intr = cls_intr,
+ .uart_init = cls_uart_init,
+ .uart_off = cls_uart_off,
+ .drain = cls_drain,
+ .param = cls_param,
+ .vpd = cls_vpd,
+ .assert_modem_signals = cls_assert_modem_signals,
+ .flush_uart_write = cls_flush_uart_write,
+ .flush_uart_read = cls_flush_uart_read,
+ .disable_receiver = cls_disable_receiver,
+ .enable_receiver = cls_enable_receiver,
+ .send_break = cls_send_break,
+ .send_start_character = cls_send_start_character,
+ .send_stop_character = cls_send_stop_character,
+ .copy_data_from_queue_to_uart = cls_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = cls_get_uart_bytes_left,
+ .send_immediate_char = cls_send_immediate_char
+};
+
+
+static inline void cls_set_cts_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting CTSFLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on CTS flow control, turn off IXON flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_CTSDSR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Enable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ ier |= (UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+
+}
+
+
+static inline void cls_set_ixon_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting IXON FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXON);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for CTS flow, turn on interrupts for received XOFF chars */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier |= (UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+}
+
+
+static inline void cls_set_no_output_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Unsetting Output FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXON flow control, turn off CTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_CTSDSR | UART_EXAR654_EFR_IXON);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for CTS flow, turn off interrupts for received XOFF chars */
+ ier &= ~(UART_EXAR654_IER_CTSDSR);
+ ier &= ~(UART_EXAR654_IER_XOFF);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_r_watermark = 0;
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+
+}
+
+
+static inline void cls_set_rts_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting RTSFLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on RTS flow control, turn off IXOFF flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_RTSDTR);
+ isr_fcr &= ~(UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Enable interrupts for RTS flow */
+ ier |= (UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_56 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+
+ ch->ch_r_watermark = 4;
+ ch->ch_r_tlevel = 8;
+
+}
+
+
+static inline void cls_set_ixoff_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Setting IXOFF FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB | UART_EXAR654_EFR_IXOFF);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Now set our current start/stop chars while in enhanced mode */
+ writeb(ch->ch_startc, &ch->ch_cls_uart->mcr);
+ writeb(0, &ch->ch_cls_uart->lsr);
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->msr);
+ writeb(0, &ch->ch_cls_uart->spr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+}
+
+
+static inline void cls_set_no_input_flow_control(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar ier = readb(&ch->ch_cls_uart->ier);
+ uchar isr_fcr = 0;
+
+ DPR_PARAM(("Unsetting Input FLOW\n"));
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn off IXOFF flow control, turn off RTS flow control */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+ isr_fcr &= ~(UART_EXAR654_EFR_RTSDTR | UART_EXAR654_EFR_IXOFF);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Disable interrupts for RTS flow */
+ ier &= ~(UART_EXAR654_IER_RTSDTR);
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ /* Set the usual FIFO values */
+ writeb((UART_FCR_ENABLE_FIFO), &ch->ch_cls_uart->isr_fcr);
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_16654_FCR_RXTRIGGER_16 |
+ UART_16654_FCR_TXTRIGGER_16 | UART_FCR_CLEAR_RCVR),
+ &ch->ch_cls_uart->isr_fcr);
+
+ ch->ch_t_tlevel = 16;
+ ch->ch_r_tlevel = 16;
+
+}
+
+
+/*
+ * cls_clear_break.
+ * Determines whether its time to shut off break condition.
+ *
+ * No locks are assumed to be held when calling this function.
+ * channel lock is held and released in this function.
+ */
+static inline void cls_clear_break(struct channel_t *ch, int force)
+{
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Bail if we aren't currently sending a break. */
+ if (!ch->ch_stop_sending_break) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/* Parse the ISR register for the specific port */
+static inline void cls_parse_isr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ uchar isr = 0;
+ ulong lock_flags;
+
+ /*
+ * No need to verify board pointer, it was already
+ * verified in the interrupt routine.
+ */
+
+ if (port > brd->nasync)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+
+ isr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Bail if no pending interrupt on port */
+ if (isr & UART_IIR_NO_INT) {
+ break;
+ }
+
+ DPR_INTR(("%s:%d port: %x isr: %x\n", __FILE__, __LINE__, port, isr));
+
+ /* Receive Interrupt pending */
+ if (isr & (UART_IIR_RDI | UART_IIR_RDI_TIMEOUT)) {
+ /* Read data from uart -> queue */
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ cls_copy_data_from_uart_to_queue(ch);
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ /* Transmit Hold register empty pending */
+ if (isr & UART_IIR_THRI) {
+ /* Transfer data (if any) from Write Queue -> UART. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ cls_copy_data_from_queue_to_uart(ch);
+ }
+
+ /* Received Xoff signal/Special character */
+ if (isr & UART_IIR_XOFF) {
+ /* Empty */
+ }
+
+ /* CTS/RTS change of state */
+ if (isr & UART_IIR_CTSRTS) {
+ brd->intr_modem++;
+ ch->ch_intr_modem++;
+ /*
+ * Don't need to do anything, the cls_parse_modem
+ * below will grab the updated modem signals.
+ */
+ }
+
+ /* Parse any modem signal changes */
+ DPR_INTR(("MOD_STAT: sending to parse_modem_sigs\n"));
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+ }
+}
+
+
+/*
+ * cls_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void cls_param(struct tty_struct *tty)
+{
+ uchar lcr = 0;
+ uchar uart_lcr = 0;
+ uchar ier = 0;
+ uchar uart_ier = 0;
+ uint baud = 9600;
+ int quot = 0;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return;
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return;
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ return;
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ cls_flush_uart_write(ch);
+ cls_flush_uart_read(ch);
+
+ /* The baudrate is B0 so all modem lines are to be dropped. */
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ ch->ch_old_baud = 0;
+ return;
+ } else if (ch->ch_custom_speed) {
+
+ baud = ch->ch_custom_speed;
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+
+ } else {
+ int iindex = 0;
+ int jindex = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 131657, 153600, 230400, 460800,
+ 921600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ }
+
+ if (ch->ch_c_cflag & PARENB) {
+ lcr |= UART_LCR_PARITY;
+ }
+
+ if (!(ch->ch_c_cflag & PARODD)) {
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = uart_ier = readb(&ch->ch_cls_uart->ier);
+ uart_lcr = readb(&ch->ch_cls_uart->lcr);
+
+ if (baud == 0)
+ baud = 9600;
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0 && ch->ch_old_baud != baud) {
+ ch->ch_old_baud = baud;
+ writeb(UART_LCR_DLAB, &ch->ch_cls_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_cls_uart->txrx);
+ writeb((quot >> 8), &ch->ch_cls_uart->ier);
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_cls_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD) {
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+ }
+ else {
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ }
+
+ /*
+ * Have the UART interrupt on modem signal changes ONLY when
+ * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
+ */
+ if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ !(ch->ch_c_cflag & CLOCAL))
+ {
+ ier |= UART_IER_MSI;
+ }
+ else {
+ ier &= ~UART_IER_MSI;
+ }
+
+ ier |= UART_IER_THRI;
+
+ if (ier != uart_ier)
+ writeb(ier, &ch->ch_cls_uart->ier);
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ cls_set_cts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXON) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ cls_set_no_output_flow_control(ch);
+ else
+ cls_set_ixon_flow_control(ch);
+ }
+ else {
+ cls_set_no_output_flow_control(ch);
+ }
+
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ cls_set_rts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXOFF) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ cls_set_no_input_flow_control(ch);
+ else
+ cls_set_ixoff_flow_control(ch);
+ }
+ else {
+ cls_set_no_input_flow_control(ch);
+ }
+
+ cls_assert_modem_signals(ch);
+
+ /* Get current status of the modem signals now */
+ cls_parse_modem(ch, readb(&ch->ch_cls_uart->msr));
+}
+
+
+/*
+ * Our board poller function.
+ */
+static void cls_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ struct channel_t *ch;
+ ulong lock_flags;
+ int i;
+ int state = 0;
+ int ports = 0;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ APR(("poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ /* Cache a couple board values */
+ DGNC_LOCK(bd->bd_lock, lock_flags);
+ state = bd->state;
+ ports = bd->nasync;
+ DGNC_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * Do NOT allow the interrupt routine to read the intr registers
+ * Until we release this lock.
+ */
+ DGNC_LOCK(bd->bd_intr_lock, lock_flags);
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if ((state == BOARD_READY) && (ports > 0)) {
+
+ /* Loop on each port */
+ for (i = 0; i < ports; i++) {
+ ch = bd->channels[i];
+ if (!ch)
+ continue;
+
+ /*
+ * NOTE: Remember you CANNOT hold any channel
+ * locks when calling input.
+ * During input processing, its possible we
+ * will call ld, which might do callbacks back
+ * into us.
+ */
+ dgnc_input(ch);
+
+ /*
+ * Channel lock is grabbed and then released
+ * inside this routine.
+ */
+ cls_copy_data_from_queue_to_uart(ch);
+ dgnc_wakeup_writes(ch);
+
+ /*
+ * Check carrier function.
+ */
+ dgnc_carrier(ch);
+
+ /*
+ * The timing check of turning off the break is done
+ * inside clear_break()
+ */
+ if (ch->ch_stop_sending_break)
+ cls_clear_break(ch, 0);
+ }
+ }
+
+ DGNC_UNLOCK(bd->bd_intr_lock, lock_flags);
+
+}
+
+
+/*
+ * cls_intr()
+ *
+ * Classic specific interrupt handler.
+ */
+static irqreturn_t cls_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+ uint i = 0;
+ uchar poll_reg;
+ unsigned long lock_flags;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGNC_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ DGNC_LOCK(brd->bd_intr_lock, lock_flags);
+
+ brd->intr_count++;
+
+ /*
+ * Check the board's global interrupt offset to see if we
+ * we actually do have an interrupt pending for us.
+ */
+ poll_reg = readb(brd->re_map_membase + UART_CLASSIC_POLL_ADDR_OFFSET);
+
+ /* If 0, no interrupts pending */
+ if (!poll_reg) {
+ DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ DPR_INTR(("%s:%d poll_reg: %x\n", __FILE__, __LINE__, poll_reg));
+
+ /* Parse each port to find out what caused the interrupt */
+ for (i = 0; i < brd->nasync; i++) {
+ cls_parse_isr(brd, i);
+ }
+
+ /*
+ * Schedule tasklet to more in-depth servicing at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+
+ DPR_INTR(("dgnc_intr finish.\n"));
+ return IRQ_HANDLED;
+}
+
+
+static void cls_disable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_cls_uart->ier);
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+
+static void cls_enable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_cls_uart->ier);
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_cls_uart->ier);
+}
+
+
+static void cls_copy_data_from_uart_to_queue(struct channel_t *ch)
+{
+ int qleft = 0;
+ uchar linestatus = 0;
+ uchar error_mask = 0;
+ ushort head;
+ ushort tail;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head;
+ tail = ch->ch_r_tail;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = tail - head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ while (1) {
+ linestatus = readb(&ch->ch_cls_uart->lsr);
+
+ if (!(linestatus & (UART_LSR_DR)))
+ break;
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ uchar discard;
+ linestatus = 0;
+ discard = readb(&ch->ch_cls_uart->txrx);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some data.
+ * The assumption is that HWFLOW or SWFLOW should have stopped
+ * things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ DPR_READ(("Queue full, dropping DATA:%x LSR:%x\n",
+ ch->ch_rqueue[tail], ch->ch_equeue[tail]));
+
+ ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ ch->ch_equeue[head] = linestatus & (UART_LSR_BI | UART_LSR_PE | UART_LSR_FE);
+ ch->ch_rqueue[head] = readb(&ch->ch_cls_uart->txrx);
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+
+ qleft--;
+
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+
+ if (ch->ch_equeue[head] & UART_LSR_PE)
+ ch->ch_err_parity++;
+ if (ch->ch_equeue[head] & UART_LSR_BI)
+ ch->ch_err_break++;
+ if (ch->ch_equeue[head] & UART_LSR_FE)
+ ch->ch_err_frame++;
+
+ /* Add to, and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * This function basically goes to sleep for secs, or until
+ * it gets signalled that the port has fully drained.
+ */
+static int cls_drain(struct tty_struct *tty, uint seconds)
+{
+ ulong lock_flags;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * NOTE: Do something with time passed in.
+ */
+ rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc)
+ DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
+
+ return (rc);
+}
+
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_write(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ udelay(10);
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+
+/* Channel lock MUST be held before calling this function! */
+static void cls_flush_uart_read(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ /*
+ * For complete POSIX compatibility, we should be purging the
+ * read FIFO in the UART here.
+ *
+ * However, doing the statement below also incorrectly flushes
+ * write data as well as just basically trashing the FIFO.
+ *
+ * I believe this is a BUG in this UART.
+ * So for now, we will leave the code #ifdef'ed out...
+ */
+#if 0
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_cls_uart->isr_fcr);
+#endif
+ udelay(10);
+}
+
+
+static void cls_copy_data_from_queue_to_uart(struct channel_t *ch)
+{
+ ushort head;
+ ushort tail;
+ int n;
+ int qlen;
+ uint len_written = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* No data to write to the UART */
+ if (ch->ch_w_tail == ch->ch_w_head) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ n = 32;
+
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ cls_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+ writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_cls_uart->txrx);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + ch->ch_w_tail, 1);
+ DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_tail]));
+ ch->ch_w_tail++;
+ ch->ch_w_tail &= WQUEUEMASK;
+ ch->ch_txcount++;
+ len_written++;
+ n--;
+ }
+
+ if (len_written > 0)
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ return;
+}
+
+
+static void cls_parse_modem(struct channel_t *ch, uchar signals)
+{
+ volatile uchar msignals = signals;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_MSIGS(("cls_parse_modem: port: %d signals: %d\n", ch->ch_portnum, msignals));
+
+ /*
+ * Do altpin switching. Altpin switches DCD and DSR.
+ * This prolly breaks DSRPACE, so we should be more clever here.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ uchar mswap = signals;
+ if (mswap & UART_MSR_DDCD) {
+ msignals &= ~UART_MSR_DDCD;
+ msignals |= UART_MSR_DDSR;
+ }
+ if (mswap & UART_MSR_DDSR) {
+ msignals &= ~UART_MSR_DDSR;
+ msignals |= UART_MSR_DDCD;
+ }
+ if (mswap & UART_MSR_DCD) {
+ msignals &= ~UART_MSR_DCD;
+ msignals |= UART_MSR_DSR;
+ }
+ if (mswap & UART_MSR_DSR) {
+ msignals &= ~UART_MSR_DSR;
+ msignals |= UART_MSR_DCD;
+ }
+ }
+
+ /* Scrub off lower bits. They signify delta's, which I don't care about */
+ signals &= 0xf0;
+
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+
+ DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)));
+}
+
+
+/* Make the UART raise any of the output signals we want up */
+static void cls_assert_modem_signals(struct channel_t *ch)
+{
+ uchar out;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ out = ch->ch_mostat;
+
+ if (ch->ch_flags & CH_LOOPBACK)
+ out |= UART_MCR_LOOP;
+
+ writeb(out, &ch->ch_cls_uart->mcr);
+
+ /* Give time for the UART to actually drop the signals */
+ udelay(10);
+}
+
+
+static void cls_send_start_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_startc != _POSIX_VDISABLE) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+
+static void cls_send_stop_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_stopc != _POSIX_VDISABLE) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_cls_uart->txrx);
+ }
+}
+
+
+/* Inits UART */
+static void cls_uart_init(struct channel_t *ch)
+{
+ uchar lcrb = readb(&ch->ch_cls_uart->lcr);
+ uchar isr_fcr = 0;
+
+ writeb(0, &ch->ch_cls_uart->ier);
+
+ /*
+ * The Enhanced Register Set may only be accessed when
+ * the Line Control Register is set to 0xBFh.
+ */
+ writeb(UART_EXAR654_ENHANCED_REGISTER_SET, &ch->ch_cls_uart->lcr);
+
+ isr_fcr = readb(&ch->ch_cls_uart->isr_fcr);
+
+ /* Turn on Enhanced/Extended controls */
+ isr_fcr |= (UART_EXAR654_EFR_ECB);
+
+ writeb(isr_fcr, &ch->ch_cls_uart->isr_fcr);
+
+ /* Write old LCR value back out, which turns enhanced access off */
+ writeb(lcrb, &ch->ch_cls_uart->lcr);
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_cls_uart->txrx);
+
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_cls_uart->isr_fcr);
+ udelay(10);
+
+ ch->ch_flags |= (CH_FIFO_ENABLED | CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+
+ readb(&ch->ch_cls_uart->lsr);
+ readb(&ch->ch_cls_uart->msr);
+}
+
+
+/*
+ * Turns off UART.
+ */
+static void cls_uart_off(struct channel_t *ch)
+{
+ writeb(0, &ch->ch_cls_uart->ier);
+}
+
+
+/*
+ * cls_get_uarts_bytes_left.
+ * Returns 0 is nothing left in the FIFO, returns 1 otherwise.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static uint cls_get_uart_bytes_left(struct channel_t *ch)
+{
+ uchar left = 0;
+ uchar lsr = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return 0;
+
+ lsr = readb(&ch->ch_cls_uart->lsr);
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (ch->ch_flags & CH_TX_FIFO_EMPTY) {
+ tasklet_schedule(&ch->ch_bd->helper_tasklet);
+ }
+ left = 1;
+ }
+ else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+
+/*
+ * cls_send_break.
+ * Starts sending a break thru the UART.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_break(struct channel_t *ch, int msecs)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /*
+ * If we receive a time of 0, this means turn off the break.
+ */
+ if (msecs == 0) {
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ return;
+ }
+
+ /*
+ * Set the time we should stop sending the break.
+ * If we are already sending a break, toss away the existing
+ * time to stop, and use this new value instead.
+ */
+ ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
+
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ uchar temp = readb(&ch->ch_cls_uart->lcr);
+ writeb((temp | UART_LCR_SBC), &ch->ch_cls_uart->lcr);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+ DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
+ }
+}
+
+
+/*
+ * cls_send_immediate_char.
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void cls_send_immediate_char(struct channel_t *ch, unsigned char c)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ writeb(c, &ch->ch_cls_uart->txrx);
+}
+
+static void cls_vpd(struct board_t *brd)
+{
+ ulong vpdbase; /* Start of io base of the card */
+ u8 __iomem *re_map_vpdbase;/* Remapped memory of the card */
+ int i = 0;
+
+
+ vpdbase = pci_resource_start(brd->pdev, 3);
+
+ /* No VPD */
+ if (!vpdbase)
+ return;
+
+ re_map_vpdbase = ioremap(vpdbase, 0x400);
+
+ if (!re_map_vpdbase)
+ return;
+
+ /* Store the VPD into our buffer */
+ for (i = 0; i < 0x40; i++) {
+ brd->vpd[i] = readb(re_map_vpdbase + i);
+ printk("%x ", brd->vpd[i]);
+ }
+ printk("\n");
+
+ if (re_map_vpdbase)
+ iounmap(re_map_vpdbase);
+}
+
diff --git a/drivers/staging/dgnc/dgnc_cls.h b/drivers/staging/dgnc/dgnc_cls.h
new file mode 100644
index 00000000000..ffe8535a84a
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_cls.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+#ifndef __DGNC_CLS_H
+#define __DGNC_CLS_H
+
+#include "dgnc_types.h"
+
+
+/************************************************************************
+ * Per channel/port Classic UART structure *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct cls_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+};
+
+/* Where to read the interrupt register (8bits) */
+#define UART_CLASSIC_POLL_ADDR_OFFSET 0x40
+
+#define UART_EXAR654_ENHANCED_REGISTER_SET 0xBF
+
+#define UART_16654_FCR_TXTRIGGER_8 0x0
+#define UART_16654_FCR_TXTRIGGER_16 0x10
+#define UART_16654_FCR_TXTRIGGER_32 0x20
+#define UART_16654_FCR_TXTRIGGER_56 0x30
+
+#define UART_16654_FCR_RXTRIGGER_8 0x0
+#define UART_16654_FCR_RXTRIGGER_16 0x40
+#define UART_16654_FCR_RXTRIGGER_56 0x80
+#define UART_16654_FCR_RXTRIGGER_60 0xC0
+
+#define UART_IIR_XOFF 0x10 /* Received Xoff signal/Special character */
+#define UART_IIR_CTSRTS 0x20 /* Received CTS/RTS change of state */
+#define UART_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+
+/*
+ * These are the EXTENDED definitions for the Exar 654's Interrupt
+ * Enable Register.
+ */
+#define UART_EXAR654_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_EXAR654_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_EXAR654_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_EXAR654_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_EXAR654_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_EXAR654_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_EXAR654_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_EXAR654_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_EXAR654_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_EXAR654_IER_CTSDSR 0x80 /* Input Interrupt Enable */
+
+/*
+ * Our Global Variables
+ */
+extern struct board_ops dgnc_cls_ops;
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
new file mode 100644
index 00000000000..f8c1e22585d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -0,0 +1,958 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+#include <linux/sched.h>
+#endif
+
+#include "dgnc_driver.h"
+#include "dgnc_pci.h"
+#include "dpacompat.h"
+#include "dgnc_mgmt.h"
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+#include "dgnc_cls.h"
+#include "dgnc_neo.h"
+#include "dgnc_sysfs.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Digi International, http://www.digi.com");
+MODULE_DESCRIPTION("Driver for the Digi International Neo and Classic PCI based product line");
+MODULE_SUPPORTED_DEVICE("dgnc");
+
+/*
+ * insmod command line overrideable parameters
+ *
+ * NOTE: we use a set of macros to create the variables, which allows
+ * us to specify the variable type, name, initial value, and description.
+ */
+PARM_INT(debug, 0x00, 0644, "Driver debugging level");
+PARM_INT(rawreadok, 1, 0644, "Bypass flip buffers on input");
+PARM_INT(trcbuf_size, 0x100000, 0644, "Debugging trace buffer size.");
+
+/**************************************************************************
+ *
+ * protos for this file
+ *
+ */
+static int dgnc_start(void);
+static int dgnc_finalize_board_init(struct board_t *brd);
+static void dgnc_init_globals(void);
+static int dgnc_found_board(struct pci_dev *pdev, int id);
+static void dgnc_cleanup_board(struct board_t *brd);
+static void dgnc_poll_handler(ulong dummy);
+static int dgnc_init_pci(void);
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void dgnc_remove_one(struct pci_dev *dev);
+static int dgnc_probe1(struct pci_dev *pdev, int card_type);
+static void dgnc_do_remap(struct board_t *brd);
+
+/* Driver load/unload functions */
+int dgnc_init_module(void);
+void dgnc_cleanup_module(void);
+
+module_init(dgnc_init_module);
+module_exit(dgnc_cleanup_module);
+
+
+/*
+ * File operations permitted on Control/Management major.
+ */
+static struct file_operations dgnc_BoardFops =
+{
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = dgnc_mgmt_ioctl,
+ .open = dgnc_mgmt_open,
+ .release = dgnc_mgmt_close
+};
+
+
+/*
+ * Globals
+ */
+uint dgnc_NumBoards;
+struct board_t *dgnc_Board[MAXBOARDS];
+DEFINE_SPINLOCK(dgnc_global_lock);
+int dgnc_driver_state = DRIVER_INITIALIZED;
+ulong dgnc_poll_counter;
+uint dgnc_Major;
+int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
+
+/*
+ * Static vars.
+ */
+static uint dgnc_Major_Control_Registered = FALSE;
+static uint dgnc_driver_start = FALSE;
+
+static struct class *dgnc_class;
+
+/*
+ * Poller stuff
+ */
+static DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
+static ulong dgnc_poll_time; /* Time of next poll */
+static uint dgnc_poll_stop; /* Used to tell poller to stop */
+static struct timer_list dgnc_poll_timer;
+
+
+static struct pci_device_id dgnc_pci_tbl[] = {
+ { DIGI_VID, PCI_DEVICE_CLASSIC_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_4_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { DIGI_VID, PCI_DEVICE_CLASSIC_8_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { DIGI_VID, PCI_DEVICE_NEO_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { DIGI_VID, PCI_DEVICE_NEO_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { DIGI_VID, PCI_DEVICE_NEO_2DB9_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { DIGI_VID, PCI_DEVICE_NEO_2DB9PRI_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { DIGI_VID, PCI_DEVICE_NEO_2RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { DIGI_VID, PCI_DEVICE_NEO_2RJ45PRI_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { DIGI_VID, PCI_DEVICE_NEO_1_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { DIGI_VID, PCI_DEVICE_NEO_1_422_485_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { DIGI_VID, PCI_DEVICE_NEO_2_422_485_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_4RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
+ { DIGI_VID, PCI_DEVICE_NEO_EXPRESS_8RJ45_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
+ {0,} /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, dgnc_pci_tbl);
+
+struct board_id {
+ uchar *name;
+ uint maxports;
+ unsigned int is_pci_express;
+};
+
+static struct board_id dgnc_Ids[] =
+{
+ { PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_CLASSIC_8_422_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_NEO_4_PCI_NAME, 4, 0 },
+ { PCI_DEVICE_NEO_8_PCI_NAME, 8, 0 },
+ { PCI_DEVICE_NEO_2DB9_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2DB9PRI_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2RJ45_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_1_422_PCI_NAME, 1, 0 },
+ { PCI_DEVICE_NEO_1_422_485_PCI_NAME, 1, 0 },
+ { PCI_DEVICE_NEO_2_422_485_PCI_NAME, 2, 0 },
+ { PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME, 8, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME, 4, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME, 4, 1 },
+ { PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME, 8, 1 },
+ { NULL, 0, 0 }
+};
+
+static struct pci_driver dgnc_driver = {
+ .name = "dgnc",
+ .probe = dgnc_init_one,
+ .id_table = dgnc_pci_tbl,
+ .remove = dgnc_remove_one,
+};
+
+
+char *dgnc_state_text[] = {
+ "Board Failed",
+ "Board Found",
+ "Board READY",
+};
+
+char *dgnc_driver_state_text[] = {
+ "Driver Initialized",
+ "Driver Ready."
+};
+
+
+
+/************************************************************************
+ *
+ * Driver load/unload functions
+ *
+ ************************************************************************/
+
+
+/*
+ * init_module()
+ *
+ * Module load. This is where it all starts.
+ */
+int dgnc_init_module(void)
+{
+ int rc = 0;
+
+ APR(("%s, Digi International Part Number %s\n", DG_NAME, DG_PART));
+
+ /*
+ * Initialize global stuff
+ */
+ rc = dgnc_start();
+
+ if (rc < 0) {
+ return(rc);
+ }
+
+ /*
+ * Find and configure all the cards
+ */
+ rc = dgnc_init_pci();
+
+ /*
+ * If something went wrong in the scan, bail out of driver.
+ */
+ if (rc < 0) {
+ /* Only unregister the pci driver if it was actually registered. */
+ if (dgnc_NumBoards)
+ pci_unregister_driver(&dgnc_driver);
+ else
+ printk("WARNING: dgnc driver load failed. No Digi Neo or Classic boards found.\n");
+
+ dgnc_cleanup_module();
+ }
+ else {
+ dgnc_create_driver_sysfiles(&dgnc_driver);
+ }
+
+ DPR_INIT(("Finished init_module. Returning %d\n", rc));
+ return (rc);
+}
+
+
+/*
+ * Start of driver.
+ */
+static int dgnc_start(void)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (dgnc_driver_start == FALSE) {
+
+ dgnc_driver_start = TRUE;
+
+ /* make sure that the globals are init'd before we do anything else */
+ dgnc_init_globals();
+
+ dgnc_NumBoards = 0;
+
+ APR(("For the tools package or updated drivers please visit http://www.digi.com\n"));
+
+ /*
+ * Register our base character device into the kernel.
+ * This allows the download daemon to connect to the downld device
+ * before any of the boards are init'ed.
+ */
+ if (!dgnc_Major_Control_Registered) {
+ /*
+ * Register management/dpa devices
+ */
+ rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
+ if (rc <= 0) {
+ APR(("Can't register dgnc driver device (%d)\n", rc));
+ rc = -ENXIO;
+ return(rc);
+ }
+ dgnc_Major = rc;
+
+ dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ device_create_drvdata(dgnc_class, NULL,
+ MKDEV(dgnc_Major, 0),
+ NULL, "dgnc_mgmt");
+#else
+ device_create(dgnc_class, NULL,
+ MKDEV(dgnc_Major, 0),
+ NULL, "dgnc_mgmt");
+#endif
+
+ dgnc_Major_Control_Registered = TRUE;
+ }
+
+ /*
+ * Init any global tty stuff.
+ */
+ rc = dgnc_tty_preinit();
+
+ if (rc < 0) {
+ APR(("tty preinit - not enough memory (%d)\n", rc));
+ return(rc);
+ }
+
+ /* Start the poller */
+ DGNC_LOCK(dgnc_poll_lock, flags);
+ init_timer(&dgnc_poll_timer);
+ dgnc_poll_timer.function = dgnc_poll_handler;
+ dgnc_poll_timer.data = 0;
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ DGNC_UNLOCK(dgnc_poll_lock, flags);
+
+ add_timer(&dgnc_poll_timer);
+
+ dgnc_driver_state = DRIVER_READY;
+ }
+
+ return(rc);
+}
+
+/*
+ * Register pci driver, and return how many boards we have.
+ */
+static int dgnc_init_pci(void)
+{
+ return pci_register_driver(&dgnc_driver);
+}
+
+
+/* returns count (>= 0), or negative on error */
+static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int rc;
+
+ /* wake up and enable device */
+ rc = pci_enable_device(pdev);
+
+ if (rc < 0) {
+ rc = -EIO;
+ } else {
+ rc = dgnc_probe1(pdev, ent->driver_data);
+ if (rc == 0) {
+ dgnc_NumBoards++;
+ DPR_INIT(("Incrementing numboards to %d\n", dgnc_NumBoards));
+ }
+ }
+ return rc;
+}
+
+static int dgnc_probe1(struct pci_dev *pdev, int card_type)
+{
+ return dgnc_found_board(pdev, card_type);
+}
+
+
+static void dgnc_remove_one(struct pci_dev *dev)
+{
+ /* Do Nothing */
+}
+
+/*
+ * dgnc_cleanup_module()
+ *
+ * Module unload. This is where it all ends.
+ */
+void dgnc_cleanup_module(void)
+{
+ int i;
+ ulong lock_flags;
+
+ DGNC_LOCK(dgnc_poll_lock, lock_flags);
+ dgnc_poll_stop = 1;
+ DGNC_UNLOCK(dgnc_poll_lock, lock_flags);
+
+ /* Turn off poller right away. */
+ del_timer_sync(&dgnc_poll_timer);
+
+ dgnc_remove_driver_sysfiles(&dgnc_driver);
+
+ if (dgnc_Major_Control_Registered) {
+ device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ class_destroy(dgnc_class);
+ unregister_chrdev(dgnc_Major, "dgnc");
+ }
+
+ for (i = 0; i < dgnc_NumBoards; ++i) {
+ dgnc_remove_ports_sysfiles(dgnc_Board[i]);
+ dgnc_tty_uninit(dgnc_Board[i]);
+ dgnc_cleanup_board(dgnc_Board[i]);
+ }
+
+ dgnc_tty_post_uninit();
+
+#if defined(DGNC_TRACER)
+ /* last thing, make sure we release the tracebuffer */
+ dgnc_tracer_free();
+#endif
+ if (dgnc_NumBoards)
+ pci_unregister_driver(&dgnc_driver);
+}
+
+
+/*
+ * dgnc_cleanup_board()
+ *
+ * Free all the memory associated with a board
+ */
+static void dgnc_cleanup_board(struct board_t *brd)
+{
+ int i = 0;
+
+ if(!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ switch (brd->device) {
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ /* Tell card not to interrupt anymore. */
+ outb(0, brd->iobase + 0x4c);
+ break;
+
+ default:
+ break;
+ }
+
+ if (brd->irq)
+ free_irq(brd->irq, brd);
+
+ tasklet_kill(&brd->helper_tasklet);
+
+ if (brd->re_map_membase) {
+ iounmap(brd->re_map_membase);
+ brd->re_map_membase = NULL;
+ }
+
+ if (brd->msgbuf_head) {
+ unsigned long flags;
+
+ DGNC_LOCK(dgnc_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk(brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGNC_UNLOCK(dgnc_global_lock, flags);
+ }
+
+ /* Free all allocated channels structs */
+ for (i = 0; i < MAXPORTS ; i++) {
+ if (brd->channels[i]) {
+ if (brd->channels[i]->ch_rqueue)
+ kfree(brd->channels[i]->ch_rqueue);
+ if (brd->channels[i]->ch_equeue)
+ kfree(brd->channels[i]->ch_equeue);
+ if (brd->channels[i]->ch_wqueue)
+ kfree(brd->channels[i]->ch_wqueue);
+
+ kfree(brd->channels[i]);
+ brd->channels[i] = NULL;
+ }
+ }
+
+ if (brd->flipbuf)
+ kfree(brd->flipbuf);
+
+ dgnc_Board[brd->boardnum] = NULL;
+
+ kfree(brd);
+}
+
+
+/*
+ * dgnc_found_board()
+ *
+ * A board has been found, init it.
+ */
+static int dgnc_found_board(struct pci_dev *pdev, int id)
+{
+ struct board_t *brd;
+ unsigned int pci_irq;
+ int i = 0;
+ int rc = 0;
+ unsigned long flags;
+
+ /* get the board structure and prep it */
+ brd = dgnc_Board[dgnc_NumBoards] =
+ (struct board_t *) kzalloc(sizeof(struct board_t), GFP_KERNEL);
+ if (!brd) {
+ APR(("memory allocation for board structure failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* make a temporary message buffer for the boot messages */
+ brd->msgbuf = brd->msgbuf_head =
+ (char *) kzalloc(sizeof(char) * 8192, GFP_KERNEL);
+ if (!brd->msgbuf) {
+ kfree(brd);
+ APR(("memory allocation for board msgbuf failed\n"));
+ return(-ENOMEM);
+ }
+
+ /* store the info for the board we've found */
+ brd->magic = DGNC_BOARD_MAGIC;
+ brd->boardnum = dgnc_NumBoards;
+ brd->vendor = dgnc_pci_tbl[id].vendor;
+ brd->device = dgnc_pci_tbl[id].device;
+ brd->pdev = pdev;
+ brd->pci_bus = pdev->bus->number;
+ brd->pci_slot = PCI_SLOT(pdev->devfn);
+ brd->name = dgnc_Ids[id].name;
+ brd->maxports = dgnc_Ids[id].maxports;
+ if (dgnc_Ids[i].is_pci_express)
+ brd->bd_flags |= BD_IS_PCI_EXPRESS;
+ brd->dpastatus = BD_NOFEP;
+ init_waitqueue_head(&brd->state_wait);
+
+ DGNC_SPINLOCK_INIT(brd->bd_lock);
+ DGNC_SPINLOCK_INIT(brd->bd_intr_lock);
+
+ brd->state = BOARD_FOUND;
+
+ for (i = 0; i < MAXPORTS; i++) {
+ brd->channels[i] = NULL;
+ }
+
+ /* store which card & revision we have */
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
+
+ pci_irq = pdev->irq;
+ brd->irq = pci_irq;
+
+
+ switch(brd->device) {
+
+ case PCI_DEVICE_CLASSIC_4_DID:
+ case PCI_DEVICE_CLASSIC_8_DID:
+ case PCI_DEVICE_CLASSIC_4_422_DID:
+ case PCI_DEVICE_CLASSIC_8_422_DID:
+
+ brd->dpatype = T_CLASSIC | T_PCIBUS;
+
+ DPR_INIT(("dgnc_found_board - Classic.\n"));
+
+ /*
+ * For PCI ClassicBoards
+ * PCI Local Address (i.e. "resource" number) space
+ * 0 PLX Memory Mapped Config
+ * 1 PLX I/O Mapped Config
+ * 2 I/O Mapped UARTs and Status
+ * 3 Memory Mapped VPD
+ * 4 Memory Mapped UARTs and Status
+ */
+
+
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 4);
+
+ if (!brd->membase) {
+ APR(("card has no PCI IO resources, failing board.\n"));
+ return -ENODEV;
+ }
+
+ brd->membase_end = pci_resource_end(pdev, 4);
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ brd->iobase = pci_resource_start(pdev, 1);
+ brd->iobase_end = pci_resource_end(pdev, 1);
+ brd->iobase = ((unsigned int) (brd->iobase)) & 0xFFFE;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &dgnc_cls_ops;
+
+ brd->bd_uart_offset = 0x8;
+ brd->bd_dividend = 921600;
+
+ dgnc_do_remap(brd);
+
+ /* Get and store the board VPD, if it exists */
+ brd->bd_ops->vpd(brd);
+
+ /*
+ * Enable Local Interrupt 1 (0x1),
+ * Local Interrupt 1 Polarity Active high (0x2),
+ * Enable PCI interrupt (0x40)
+ */
+ outb(0x43, brd->iobase + 0x4c);
+
+ break;
+
+
+ case PCI_DEVICE_NEO_4_DID:
+ case PCI_DEVICE_NEO_8_DID:
+ case PCI_DEVICE_NEO_2DB9_DID:
+ case PCI_DEVICE_NEO_2DB9PRI_DID:
+ case PCI_DEVICE_NEO_2RJ45_DID:
+ case PCI_DEVICE_NEO_2RJ45PRI_DID:
+ case PCI_DEVICE_NEO_1_422_DID:
+ case PCI_DEVICE_NEO_1_422_485_DID:
+ case PCI_DEVICE_NEO_2_422_485_DID:
+ case PCI_DEVICE_NEO_EXPRESS_8_DID:
+ case PCI_DEVICE_NEO_EXPRESS_4_DID:
+ case PCI_DEVICE_NEO_EXPRESS_4RJ45_DID:
+ case PCI_DEVICE_NEO_EXPRESS_8RJ45_DID:
+
+ /*
+ * This chip is set up 100% when we get to it.
+ * No need to enable global interrupts or anything.
+ */
+ if (brd->bd_flags & BD_IS_PCI_EXPRESS)
+ brd->dpatype = T_NEO_EXPRESS | T_PCIBUS;
+ else
+ brd->dpatype = T_NEO | T_PCIBUS;
+
+ DPR_INIT(("dgnc_found_board - NEO.\n"));
+
+ /* get the PCI Base Address Registers */
+ brd->membase = pci_resource_start(pdev, 0);
+ brd->membase_end = pci_resource_end(pdev, 0);
+
+ if (brd->membase & 1)
+ brd->membase &= ~3;
+ else
+ brd->membase &= ~15;
+
+ /* Assign the board_ops struct */
+ brd->bd_ops = &dgnc_neo_ops;
+
+ brd->bd_uart_offset = 0x200;
+ brd->bd_dividend = 921600;
+
+ dgnc_do_remap(brd);
+
+ if (brd->re_map_membase) {
+
+ /* After remap is complete, we need to read and store the dvid */
+ brd->dvid = readb(brd->re_map_membase + 0x8D);
+
+ /* Get and store the board VPD, if it exists */
+ brd->bd_ops->vpd(brd);
+ }
+ break;
+
+ default:
+ APR(("Did not find any compatible Neo or Classic PCI boards in system.\n"));
+ return (-ENXIO);
+
+ }
+
+ /*
+ * Do tty device initialization.
+ */
+
+ rc = dgnc_tty_register(brd);
+ if (rc < 0) {
+ dgnc_tty_uninit(brd);
+ APR(("Can't register tty devices (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ goto failed;
+ }
+
+ rc = dgnc_finalize_board_init(brd);
+ if (rc < 0) {
+ APR(("Can't finalize board init (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ goto failed;
+ }
+
+ rc = dgnc_tty_init(brd);
+ if (rc < 0) {
+ dgnc_tty_uninit(brd);
+ APR(("Can't init tty devices (%d)\n", rc));
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+
+ goto failed;
+ }
+
+ brd->state = BOARD_READY;
+ brd->dpastatus = BD_RUNNING;
+
+ dgnc_create_ports_sysfiles(brd);
+
+ /* init our poll helper tasklet */
+ tasklet_init(&brd->helper_tasklet, brd->bd_ops->tasklet, (unsigned long) brd);
+
+ DPR_INIT(("dgnc_scan(%d) - printing out the msgbuf\n", i));
+ DGNC_LOCK(dgnc_global_lock, flags);
+ brd->msgbuf = NULL;
+ printk(brd->msgbuf_head);
+ kfree(brd->msgbuf_head);
+ brd->msgbuf_head = NULL;
+ DGNC_UNLOCK(dgnc_global_lock, flags);
+
+ /*
+ * allocate flip buffer for board.
+ *
+ * Okay to malloc with GFP_KERNEL, we are not at interrupt
+ * context, and there are no locks held.
+ */
+ brd->flipbuf = kzalloc(MYFLIPLEN, GFP_KERNEL);
+
+ wake_up_interruptible(&brd->state_wait);
+
+ return(0);
+
+failed:
+
+ return (-ENXIO);
+
+}
+
+
+static int dgnc_finalize_board_init(struct board_t *brd) {
+ int rc = 0;
+
+ DPR_INIT(("dgnc_finalize_board_init() - start\n"));
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return(-ENODEV);
+
+ DPR_INIT(("dgnc_finalize_board_init() - start #2\n"));
+
+ if (brd->irq) {
+ rc = request_irq(brd->irq, brd->bd_ops->intr, IRQF_SHARED, "DGNC", brd);
+
+ if (rc) {
+ printk("Failed to hook IRQ %d\n",brd->irq);
+ brd->state = BOARD_FAILED;
+ brd->dpastatus = BD_NOFEP;
+ rc = -ENODEV;
+ } else {
+ DPR_INIT(("Requested and received usage of IRQ %d\n", brd->irq));
+ }
+ }
+ return(rc);
+}
+
+/*
+ * Remap PCI memory.
+ */
+static void dgnc_do_remap(struct board_t *brd)
+{
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ brd->re_map_membase = ioremap(brd->membase, 0x1000);
+
+ DPR_INIT(("remapped mem: 0x%p\n", brd->re_map_membase));
+}
+
+
+/*****************************************************************************
+*
+* Function:
+*
+* dgnc_poll_handler
+*
+* Author:
+*
+* Scott H Kilau
+*
+* Parameters:
+*
+* dummy -- ignored
+*
+* Return Values:
+*
+* none
+*
+* Description:
+*
+* As each timer expires, it determines (a) whether the "transmit"
+* waiter needs to be woken up, and (b) whether the poller needs to
+* be rescheduled.
+*
+******************************************************************************/
+
+static void dgnc_poll_handler(ulong dummy)
+{
+ struct board_t *brd;
+ unsigned long lock_flags;
+ int i;
+ unsigned long new_time;
+
+ dgnc_poll_counter++;
+
+ /*
+ * Do not start the board state machine until
+ * driver tells us its up and running, and has
+ * everything it needs.
+ */
+ if (dgnc_driver_state != DRIVER_READY) {
+ goto schedule_poller;
+ }
+
+ /* Go thru each board, kicking off a tasklet for each if needed */
+ for (i = 0; i < dgnc_NumBoards; i++) {
+ brd = dgnc_Board[i];
+
+ DGNC_LOCK(brd->bd_lock, lock_flags);
+
+ /* If board is in a failed state, don't bother scheduling a tasklet */
+ if (brd->state == BOARD_FAILED) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ continue;
+ }
+
+ /* Schedule a poll helper task */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ }
+
+schedule_poller:
+
+ /*
+ * Schedule ourself back at the nominal wakeup interval.
+ */
+ DGNC_LOCK(dgnc_poll_lock, lock_flags);
+ dgnc_poll_time += dgnc_jiffies_from_ms(dgnc_poll_tick);
+
+ new_time = dgnc_poll_time - jiffies;
+
+ if ((ulong) new_time >= 2 * dgnc_poll_tick) {
+ dgnc_poll_time = jiffies + dgnc_jiffies_from_ms(dgnc_poll_tick);
+ }
+
+ init_timer(&dgnc_poll_timer);
+ dgnc_poll_timer.function = dgnc_poll_handler;
+ dgnc_poll_timer.data = 0;
+ dgnc_poll_timer.expires = dgnc_poll_time;
+ DGNC_UNLOCK(dgnc_poll_lock, lock_flags);
+
+ if (!dgnc_poll_stop)
+ add_timer(&dgnc_poll_timer);
+}
+
+/*
+ * dgnc_init_globals()
+ *
+ * This is where we initialize the globals from the static insmod
+ * configuration variables. These are declared near the head of
+ * this file.
+ */
+static void dgnc_init_globals(void)
+{
+ int i = 0;
+
+ dgnc_rawreadok = rawreadok;
+ dgnc_trcbuf_size = trcbuf_size;
+ dgnc_debug = debug;
+
+ for (i = 0; i < MAXBOARDS; i++) {
+ dgnc_Board[i] = NULL;
+ }
+
+ init_timer(&dgnc_poll_timer);
+}
+
+
+/************************************************************************
+ *
+ * Utility functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_ms_sleep()
+ *
+ * Put the driver to sleep for x ms's
+ *
+ * Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
+ */
+int dgnc_ms_sleep(ulong ms)
+{
+ current->state = TASK_INTERRUPTIBLE;
+ schedule_timeout((ms * HZ) / 1000);
+ return (signal_pending(current));
+}
+
+
+
+/*
+ * dgnc_ioctl_name() : Returns a text version of each ioctl value.
+ */
+char *dgnc_ioctl_name(int cmd)
+{
+ switch(cmd) {
+
+ case TCGETA: return("TCGETA");
+ case TCGETS: return("TCGETS");
+ case TCSETA: return("TCSETA");
+ case TCSETS: return("TCSETS");
+ case TCSETAW: return("TCSETAW");
+ case TCSETSW: return("TCSETSW");
+ case TCSETAF: return("TCSETAF");
+ case TCSETSF: return("TCSETSF");
+ case TCSBRK: return("TCSBRK");
+ case TCXONC: return("TCXONC");
+ case TCFLSH: return("TCFLSH");
+ case TIOCGSID: return("TIOCGSID");
+
+ case TIOCGETD: return("TIOCGETD");
+ case TIOCSETD: return("TIOCSETD");
+ case TIOCGWINSZ: return("TIOCGWINSZ");
+ case TIOCSWINSZ: return("TIOCSWINSZ");
+
+ case TIOCMGET: return("TIOCMGET");
+ case TIOCMSET: return("TIOCMSET");
+ case TIOCMBIS: return("TIOCMBIS");
+ case TIOCMBIC: return("TIOCMBIC");
+
+ /* from digi.h */
+ case DIGI_SETA: return("DIGI_SETA");
+ case DIGI_SETAW: return("DIGI_SETAW");
+ case DIGI_SETAF: return("DIGI_SETAF");
+ case DIGI_SETFLOW: return("DIGI_SETFLOW");
+ case DIGI_SETAFLOW: return("DIGI_SETAFLOW");
+ case DIGI_GETFLOW: return("DIGI_GETFLOW");
+ case DIGI_GETAFLOW: return("DIGI_GETAFLOW");
+ case DIGI_GETA: return("DIGI_GETA");
+ case DIGI_GEDELAY: return("DIGI_GEDELAY");
+ case DIGI_SEDELAY: return("DIGI_SEDELAY");
+ case DIGI_GETCUSTOMBAUD: return("DIGI_GETCUSTOMBAUD");
+ case DIGI_SETCUSTOMBAUD: return("DIGI_SETCUSTOMBAUD");
+ case TIOCMODG: return("TIOCMODG");
+ case TIOCMODS: return("TIOCMODS");
+ case TIOCSDTR: return("TIOCSDTR");
+ case TIOCCDTR: return("TIOCCDTR");
+
+ default: return("unknown");
+ }
+}
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
new file mode 100644
index 00000000000..218b15dccb7
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -0,0 +1,563 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * Driver includes
+ *
+ *************************************************************************/
+
+#ifndef __DGNC_DRIVER_H
+#define __DGNC_DRIVER_H
+
+#include <linux/types.h> /* To pick up the varions Linux types */
+#include <linux/tty.h> /* To pick up the various tty structs/defines */
+#include <linux/interrupt.h> /* For irqreturn_t type */
+
+#include "dgnc_types.h" /* Additional types needed by the Digi header files */
+#include "digi.h" /* Digi specific ioctl header */
+#include "dgnc_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgnc_sysfs.h" /* Support for SYSFS */
+
+/*************************************************************************
+ *
+ * Driver defines
+ *
+ *************************************************************************/
+
+/*
+ * Driver identification, error and debugging statments
+ *
+ * In theory, you can change all occurances of "digi" in the next
+ * three lines, and the driver printk's will all automagically change.
+ *
+ * APR((fmt, args, ...)); Always prints message
+ * DPR((fmt, args, ...)); Only prints if DGNC_TRACER is defined at
+ * compile time and dgnc_debug!=0
+ */
+#define PROCSTR "dgnc" /* /proc entries */
+#define DEVSTR "/dev/dg/dgnc" /* /dev entries */
+#define DRVSTR "dgnc" /* Driver name string
+ * displayed by APR */
+#define APR(args) do { PRINTF_TO_KMEM(args); printk(DRVSTR": "); printk args; \
+ } while (0)
+#define RAPR(args) do { PRINTF_TO_KMEM(args); printk args; } while (0)
+
+#define TRC_TO_CONSOLE 1
+
+/*
+ * Debugging levels can be set using debug insmod variable
+ * They can also be compiled out completely.
+ */
+
+#define DBG_INIT (dgnc_debug & 0x01)
+#define DBG_BASIC (dgnc_debug & 0x02)
+#define DBG_CORE (dgnc_debug & 0x04)
+
+#define DBG_OPEN (dgnc_debug & 0x08)
+#define DBG_CLOSE (dgnc_debug & 0x10)
+#define DBG_READ (dgnc_debug & 0x20)
+#define DBG_WRITE (dgnc_debug & 0x40)
+
+#define DBG_IOCTL (dgnc_debug & 0x80)
+
+#define DBG_PROC (dgnc_debug & 0x100)
+#define DBG_PARAM (dgnc_debug & 0x200)
+#define DBG_PSCAN (dgnc_debug & 0x400)
+#define DBG_EVENT (dgnc_debug & 0x800)
+
+#define DBG_DRAIN (dgnc_debug & 0x1000)
+#define DBG_MSIGS (dgnc_debug & 0x2000)
+
+#define DBG_MGMT (dgnc_debug & 0x4000)
+#define DBG_INTR (dgnc_debug & 0x8000)
+
+#define DBG_CARR (dgnc_debug & 0x10000)
+
+
+#if defined(DGNC_TRACER)
+
+# if defined(TRC_TO_KMEM)
+/* Choose one: */
+# define TRC_ON_OVERFLOW_WRAP_AROUND
+# undef TRC_ON_OVERFLOW_SHIFT_BUFFER
+# endif //TRC_TO_KMEM
+
+# define TRC_MAXMSG 1024
+# define TRC_OVERFLOW "(OVERFLOW)"
+# define TRC_DTRC "/usr/bin/dtrc"
+
+#if defined TRC_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args) { printk(DRVSTR": "); printk args; }
+#else //!defined TRACE_TO_CONSOLE
+#define PRINTF_TO_CONSOLE(args)
+#endif
+
+#if defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args) dgnc_tracef args
+#else //!defined TRC_TO_KMEM
+#define PRINTF_TO_KMEM(args)
+#endif
+
+#define TRC(args) { PRINTF_TO_KMEM(args); PRINTF_TO_CONSOLE(args) }
+
+# define DPR_INIT(ARGS) if (DBG_INIT) TRC(ARGS)
+# define DPR_BASIC(ARGS) if (DBG_BASIC) TRC(ARGS)
+# define DPR_CORE(ARGS) if (DBG_CORE) TRC(ARGS)
+# define DPR_OPEN(ARGS) if (DBG_OPEN) TRC(ARGS)
+# define DPR_CLOSE(ARGS) if (DBG_CLOSE) TRC(ARGS)
+# define DPR_READ(ARGS) if (DBG_READ) TRC(ARGS)
+# define DPR_WRITE(ARGS) if (DBG_WRITE) TRC(ARGS)
+# define DPR_IOCTL(ARGS) if (DBG_IOCTL) TRC(ARGS)
+# define DPR_PROC(ARGS) if (DBG_PROC) TRC(ARGS)
+# define DPR_PARAM(ARGS) if (DBG_PARAM) TRC(ARGS)
+# define DPR_PSCAN(ARGS) if (DBG_PSCAN) TRC(ARGS)
+# define DPR_EVENT(ARGS) if (DBG_EVENT) TRC(ARGS)
+# define DPR_DRAIN(ARGS) if (DBG_DRAIN) TRC(ARGS)
+# define DPR_CARR(ARGS) if (DBG_CARR) TRC(ARGS)
+# define DPR_MGMT(ARGS) if (DBG_MGMT) TRC(ARGS)
+# define DPR_INTR(ARGS) if (DBG_INTR) TRC(ARGS)
+# define DPR_MSIGS(ARGS) if (DBG_MSIGS) TRC(ARGS)
+
+# define DPR(ARGS) if (dgnc_debug) TRC(ARGS)
+# define P(X) dgnc_tracef(#X "=%p\n", X)
+# define X(X) dgnc_tracef(#X "=%x\n", X)
+
+#else//!defined DGNC_TRACER
+
+#define PRINTF_TO_KMEM(args)
+# define TRC(ARGS)
+# define DPR_INIT(ARGS)
+# define DPR_BASIC(ARGS)
+# define DPR_CORE(ARGS)
+# define DPR_OPEN(ARGS)
+# define DPR_CLOSE(ARGS)
+# define DPR_READ(ARGS)
+# define DPR_WRITE(ARGS)
+# define DPR_IOCTL(ARGS)
+# define DPR_PROC(ARGS)
+# define DPR_PARAM(ARGS)
+# define DPR_PSCAN(ARGS)
+# define DPR_EVENT(ARGS)
+# define DPR_DRAIN(ARGS)
+# define DPR_CARR(ARGS)
+# define DPR_MGMT(ARGS)
+# define DPR_INTR(ARGS)
+# define DPR_MSIGS(ARGS)
+
+# define DPR(args)
+
+#endif//DGNC_TRACER
+
+/* Number of boards we support at once. */
+#define MAXBOARDS 20
+#define MAXPORTS 8
+#define MAXTTYNAMELEN 200
+
+/* Our 3 magic numbers for our board, channel and unit structs */
+#define DGNC_BOARD_MAGIC 0x5c6df104
+#define DGNC_CHANNEL_MAGIC 0x6c6df104
+#define DGNC_UNIT_MAGIC 0x7c6df104
+
+/* Serial port types */
+#define DGNC_SERIAL 0
+#define DGNC_PRINT 1
+
+#define SERIAL_TYPE_NORMAL 1
+
+#define PORT_NUM(dev) ((dev) & 0x7f)
+#define IS_PRINT(dev) (((dev) & 0xff) >= 0x80)
+
+/* MAX number of stop characters we will send when our read queue is getting full */
+#define MAX_STOPS_SENT 5
+
+/* 4 extra for alignment play space */
+#define WRITEBUFLEN ((4096) + 4)
+#define MYFLIPLEN N_TTY_BUF_SIZE
+
+#define dgnc_jiffies_from_ms(a) (((a) * HZ) / 1000)
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially. This is the same structure that is defined
+ * as the default in tty_io.c with the same settings overriden as in serial.c
+ *
+ * In short, this should match the internal serial ports' defaults.
+ */
+#define DEFAULT_IFLAGS (ICRNL | IXON)
+#define DEFAULT_OFLAGS (OPOST | ONLCR)
+#define DEFAULT_CFLAGS (B9600 | CS8 | CREAD | HUPCL | CLOCAL)
+#define DEFAULT_LFLAGS (ISIG | ICANON | ECHO | ECHOE | ECHOK | \
+ ECHOCTL | ECHOKE | IEXTEN)
+
+#ifndef _POSIX_VDISABLE
+#define _POSIX_VDISABLE '\0'
+#endif
+
+#define SNIFF_MAX 65536 /* Sniff buffer size (2^n) */
+#define SNIFF_MASK (SNIFF_MAX - 1) /* Sniff wrap mask */
+
+/*
+ * Lock function/defines.
+ * Makes spotting lock/unlock locations easier.
+ */
+# define DGNC_SPINLOCK_INIT(x) spin_lock_init(&(x))
+# define DGNC_LOCK(x,y) spin_lock_irqsave(&(x), y)
+# define DGNC_UNLOCK(x,y) spin_unlock_irqrestore(&(x), y)
+
+/*
+ * All the possible states the driver can be while being loaded.
+ */
+enum {
+ DRIVER_INITIALIZED = 0,
+ DRIVER_READY
+};
+
+/*
+ * All the possible states the board can be while booting up.
+ */
+enum {
+ BOARD_FAILED = 0,
+ BOARD_FOUND,
+ BOARD_READY
+};
+
+
+/*************************************************************************
+ *
+ * Structures and closely related defines.
+ *
+ *************************************************************************/
+
+struct board_t;
+struct channel_t;
+
+/************************************************************************
+ * Per board operations structure *
+ ************************************************************************/
+struct board_ops {
+ void (*tasklet) (unsigned long data);
+ irqreturn_t (*intr) (int irq, void *voidbrd);
+ void (*uart_init) (struct channel_t *ch);
+ void (*uart_off) (struct channel_t *ch);
+ int (*drain) (struct tty_struct *tty, uint seconds);
+ void (*param) (struct tty_struct *tty);
+ void (*vpd) (struct board_t *brd);
+ void (*assert_modem_signals) (struct channel_t *ch);
+ void (*flush_uart_write) (struct channel_t *ch);
+ void (*flush_uart_read) (struct channel_t *ch);
+ void (*disable_receiver) (struct channel_t *ch);
+ void (*enable_receiver) (struct channel_t *ch);
+ void (*send_break) (struct channel_t *ch, int);
+ void (*send_start_character) (struct channel_t *ch);
+ void (*send_stop_character) (struct channel_t *ch);
+ void (*copy_data_from_queue_to_uart) (struct channel_t *ch);
+ uint (*get_uart_bytes_left) (struct channel_t *ch);
+ void (*send_immediate_char) (struct channel_t *ch, unsigned char);
+};
+
+/************************************************************************
+ * Device flag definitions for bd_flags.
+ ************************************************************************/
+#define BD_IS_PCI_EXPRESS 0x0001 /* Is a PCI Express board */
+
+
+/*
+ * Per-board information
+ */
+struct board_t {
+ int magic; /* Board Magic number. */
+ int boardnum; /* Board number: 0-32 */
+
+ int type; /* Type of board */
+ char *name; /* Product Name */
+ struct pci_dev *pdev; /* Pointer to the pci_dev struct */
+ unsigned long bd_flags; /* Board flags */
+ u16 vendor; /* PCI vendor ID */
+ u16 device; /* PCI device ID */
+ u16 subvendor; /* PCI subsystem vendor ID */
+ u16 subdevice; /* PCI subsystem device ID */
+ uchar rev; /* PCI revision ID */
+ uint pci_bus; /* PCI bus value */
+ uint pci_slot; /* PCI slot value */
+ uint maxports; /* MAX ports this board can handle */
+ uchar dvid; /* Board specific device id */
+ uchar vpd[128]; /* VPD of board, if found */
+ uchar serial_num[20]; /* Serial number of board, if found in VPD */
+
+ spinlock_t bd_lock; /* Used to protect board */
+
+ spinlock_t bd_intr_lock; /* Used to protect the poller tasklet and
+ * the interrupt routine from each other.
+ */
+
+ uint state; /* State of card. */
+ wait_queue_head_t state_wait; /* Place to sleep on for state change */
+
+ struct tasklet_struct helper_tasklet; /* Poll helper tasklet */
+
+ uint nasync; /* Number of ports on card */
+
+ uint irq; /* Interrupt request number */
+ ulong intr_count; /* Count of interrupts */
+ ulong intr_modem; /* Count of interrupts */
+ ulong intr_tx; /* Count of interrupts */
+ ulong intr_rx; /* Count of interrupts */
+
+ ulong membase; /* Start of base memory of the card */
+ ulong membase_end; /* End of base memory of the card */
+
+ u8 __iomem *re_map_membase;/* Remapped memory of the card */
+
+ ulong iobase; /* Start of io base of the card */
+ ulong iobase_end; /* End of io base of the card */
+
+ uint bd_uart_offset; /* Space between each UART */
+
+ struct channel_t *channels[MAXPORTS]; /* array of pointers to our channels. */
+
+ struct tty_driver SerialDriver;
+ char SerialName[200];
+ struct tty_driver PrintDriver;
+ char PrintName[200];
+
+ uint dgnc_Major_Serial_Registered;
+ uint dgnc_Major_TransparentPrint_Registered;
+
+ uint dgnc_Serial_Major;
+ uint dgnc_TransparentPrint_Major;
+
+ uint TtyRefCnt;
+
+ char *flipbuf; /* Our flip buffer, alloced if board is found */
+
+ u16 dpatype; /* The board "type", as defined by DPA */
+ u16 dpastatus; /* The board "status", as defined by DPA */
+
+ /*
+ * Mgmt data.
+ */
+ char *msgbuf_head;
+ char *msgbuf;
+
+ uint bd_dividend; /* Board/UARTs specific dividend */
+
+ struct board_ops *bd_ops;
+
+ /* /proc/<board> entries */
+ struct proc_dir_entry *proc_entry_pointer;
+ struct dgnc_proc_entry *dgnc_board_table;
+
+};
+
+
+/************************************************************************
+ * Unit flag definitions for un_flags.
+ ************************************************************************/
+#define UN_ISOPEN 0x0001 /* Device is open */
+#define UN_CLOSING 0x0002 /* Line is being closed */
+#define UN_IMM 0x0004 /* Service immediately */
+#define UN_BUSY 0x0008 /* Some work this channel */
+#define UN_BREAKI 0x0010 /* Input break received */
+#define UN_PWAIT 0x0020 /* Printer waiting for terminal */
+#define UN_TIME 0x0040 /* Waiting on time */
+#define UN_EMPTY 0x0080 /* Waiting output queue empty */
+#define UN_LOW 0x0100 /* Waiting output low water mark*/
+#define UN_EXCL_OPEN 0x0200 /* Open for exclusive use */
+#define UN_WOPEN 0x0400 /* Device waiting for open */
+#define UN_WIOCTL 0x0800 /* Device waiting for open */
+#define UN_HANGUP 0x8000 /* Carrier lost */
+
+struct device;
+
+/************************************************************************
+ * Structure for terminal or printer unit.
+ ************************************************************************/
+struct un_t {
+ int magic; /* Unit Magic Number. */
+ struct channel_t *un_ch;
+ ulong un_time;
+ uint un_type;
+ uint un_open_count; /* Counter of opens to port */
+ struct tty_struct *un_tty;/* Pointer to unit tty structure */
+ uint un_flags; /* Unit flags */
+ wait_queue_head_t un_flags_wait; /* Place to sleep to wait on unit */
+ uint un_dev; /* Minor device number */
+ struct device *un_sysfs;
+};
+
+
+/************************************************************************
+ * Device flag definitions for ch_flags.
+ ************************************************************************/
+#define CH_PRON 0x0001 /* Printer on string */
+#define CH_STOP 0x0002 /* Output is stopped */
+#define CH_STOPI 0x0004 /* Input is stopped */
+#define CH_CD 0x0008 /* Carrier is present */
+#define CH_FCAR 0x0010 /* Carrier forced on */
+#define CH_HANGUP 0x0020 /* Hangup received */
+
+#define CH_RECEIVER_OFF 0x0040 /* Receiver is off */
+#define CH_OPENING 0x0080 /* Port in fragile open state */
+#define CH_CLOSING 0x0100 /* Port in fragile close state */
+#define CH_FIFO_ENABLED 0x0200 /* Port has FIFOs enabled */
+#define CH_TX_FIFO_EMPTY 0x0400 /* TX Fifo is completely empty */
+#define CH_TX_FIFO_LWM 0x0800 /* TX Fifo is below Low Water */
+#define CH_BREAK_SENDING 0x1000 /* Break is being sent */
+#define CH_LOOPBACK 0x2000 /* Channel is in lookback mode */
+#define CH_FLIPBUF_IN_USE 0x4000 /* Channel's flipbuf is in use */
+#define CH_BAUD0 0x08000 /* Used for checking B0 transitions */
+#define CH_FORCED_STOP 0x20000 /* Output is forcibly stopped */
+#define CH_FORCED_STOPI 0x40000 /* Input is forcibly stopped */
+
+/*
+ * Definitions for ch_sniff_flags
+ */
+#define SNIFF_OPEN 0x1
+#define SNIFF_WAIT_DATA 0x2
+#define SNIFF_WAIT_SPACE 0x4
+
+
+/* Our Read/Error/Write queue sizes */
+#define RQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define EQUEUEMASK 0x1FFF /* 8 K - 1 */
+#define WQUEUEMASK 0x0FFF /* 4 K - 1 */
+#define RQUEUESIZE (RQUEUEMASK + 1)
+#define EQUEUESIZE RQUEUESIZE
+#define WQUEUESIZE (WQUEUEMASK + 1)
+
+
+/************************************************************************
+ * Channel information structure.
+ ************************************************************************/
+struct channel_t {
+ int magic; /* Channel Magic Number */
+ struct board_t *ch_bd; /* Board structure pointer */
+ struct digi_t ch_digi; /* Transparent Print structure */
+ struct un_t ch_tun; /* Terminal unit info */
+ struct un_t ch_pun; /* Printer unit info */
+
+ spinlock_t ch_lock; /* provide for serialization */
+ wait_queue_head_t ch_flags_wait;
+
+ uint ch_portnum; /* Port number, 0 offset. */
+ uint ch_open_count; /* open count */
+ uint ch_flags; /* Channel flags */
+
+ ulong ch_close_delay; /* How long we should drop RTS/DTR for */
+
+ ulong ch_cpstime; /* Time for CPS calculations */
+
+ tcflag_t ch_c_iflag; /* channel iflags */
+ tcflag_t ch_c_cflag; /* channel cflags */
+ tcflag_t ch_c_oflag; /* channel oflags */
+ tcflag_t ch_c_lflag; /* channel lflags */
+ uchar ch_stopc; /* Stop character */
+ uchar ch_startc; /* Start character */
+
+ uint ch_old_baud; /* Cache of the current baud */
+ uint ch_custom_speed;/* Custom baud, if set */
+
+ uint ch_wopen; /* Waiting for open process cnt */
+
+ uchar ch_mostat; /* FEP output modem status */
+ uchar ch_mistat; /* FEP input modem status */
+
+ struct neo_uart_struct __iomem *ch_neo_uart; /* Pointer to the "mapped" UART struct */
+ struct cls_uart_struct __iomem *ch_cls_uart; /* Pointer to the "mapped" UART struct */
+
+ uchar ch_cached_lsr; /* Cached value of the LSR register */
+
+ uchar *ch_rqueue; /* Our read queue buffer - malloc'ed */
+ ushort ch_r_head; /* Head location of the read queue */
+ ushort ch_r_tail; /* Tail location of the read queue */
+
+ uchar *ch_equeue; /* Our error queue buffer - malloc'ed */
+ ushort ch_e_head; /* Head location of the error queue */
+ ushort ch_e_tail; /* Tail location of the error queue */
+
+ uchar *ch_wqueue; /* Our write queue buffer - malloc'ed */
+ ushort ch_w_head; /* Head location of the write queue */
+ ushort ch_w_tail; /* Tail location of the write queue */
+
+ ulong ch_rxcount; /* total of data received so far */
+ ulong ch_txcount; /* total of data transmitted so far */
+
+ uchar ch_r_tlevel; /* Receive Trigger level */
+ uchar ch_t_tlevel; /* Transmit Trigger level */
+
+ uchar ch_r_watermark; /* Receive Watermark */
+
+ ulong ch_stop_sending_break; /* Time we should STOP sending a break */
+
+ uint ch_stops_sent; /* How many times I have sent a stop character
+ * to try to stop the other guy sending.
+ */
+ ulong ch_err_parity; /* Count of parity errors on channel */
+ ulong ch_err_frame; /* Count of framing errors on channel */
+ ulong ch_err_break; /* Count of breaks on channel */
+ ulong ch_err_overrun; /* Count of overruns on channel */
+
+ ulong ch_xon_sends; /* Count of xons transmitted */
+ ulong ch_xoff_sends; /* Count of xoffs transmitted */
+
+ ulong ch_intr_modem; /* Count of interrupts */
+ ulong ch_intr_tx; /* Count of interrupts */
+ ulong ch_intr_rx; /* Count of interrupts */
+
+
+ /* /proc/<board>/<channel> entries */
+ struct proc_dir_entry *proc_entry_pointer;
+ struct dgnc_proc_entry *dgnc_channel_table;
+
+ uint ch_sniff_in;
+ uint ch_sniff_out;
+ char *ch_sniff_buf; /* Sniff buffer for proc */
+ ulong ch_sniff_flags; /* Channel flags */
+ wait_queue_head_t ch_sniff_wait;
+};
+
+
+/*************************************************************************
+ *
+ * Prototypes for non-static functions used in more than one module
+ *
+ *************************************************************************/
+
+extern int dgnc_ms_sleep(ulong ms);
+extern char *dgnc_ioctl_name(int cmd);
+
+/*
+ * Our Global Variables.
+ */
+extern int dgnc_driver_state; /* The state of the driver */
+extern uint dgnc_Major; /* Our driver/mgmt major */
+extern int dgnc_debug; /* Debug variable */
+extern int dgnc_rawreadok; /* Set if user wants rawreads */
+extern int dgnc_poll_tick; /* Poll interval - 20 ms */
+extern int dgnc_trcbuf_size; /* Size of the ringbuffer */
+extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
+extern uint dgnc_NumBoards; /* Total number of boards */
+extern struct board_t *dgnc_Board[MAXBOARDS]; /* Array of board structs */
+extern ulong dgnc_poll_counter; /* Times the poller has run */
+extern char *dgnc_state_text[]; /* Array of state text */
+extern char *dgnc_driver_state_text[];/* Array of driver state text */
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_kcompat.h b/drivers/staging/dgnc/dgnc_kcompat.h
new file mode 100644
index 00000000000..00f589a13ab
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_kcompat.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *************************************************************************
+ *
+ * This file is intended to contain all the kernel "differences" between the
+ * various kernels that we support.
+ *
+ *************************************************************************/
+
+#ifndef __DGNC_KCOMPAT_H
+#define __DGNC_KCOMPAT_H
+
+#include <linux/version.h>
+
+# ifndef KERNEL_VERSION
+# define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
+# endif
+
+
+#if !defined(TTY_FLIPBUF_SIZE)
+# define TTY_FLIPBUF_SIZE 512
+#endif
+
+
+/* Sparse stuff */
+# ifndef __user
+# define __user
+# define __kernel
+# define __safe
+# define __force
+# define __chk_user_ptr(x) (void)0
+# endif
+
+
+# define PARM_STR(VAR, INIT, PERM, DESC) \
+ static char *VAR = INIT; \
+ char *dgnc_##VAR; \
+ module_param(VAR, charp, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_INT(VAR, INIT, PERM, DESC) \
+ static int VAR = INIT; \
+ int dgnc_##VAR; \
+ module_param(VAR, int, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+# define PARM_ULONG(VAR, INIT, PERM, DESC) \
+ static ulong VAR = INIT; \
+ ulong dgnc_##VAR; \
+ module_param(VAR, long, PERM); \
+ MODULE_PARM_DESC(VAR, DESC);
+
+
+
+
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
+
+
+
+/* NOTHING YET */
+
+
+
+# else
+
+
+
+# error "this driver does not support anything below the 2.6.27 kernel series."
+
+
+
+# endif
+
+#endif /* ! __DGNC_KCOMPAT_H */
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
new file mode 100644
index 00000000000..c4629d7c80b
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+/************************************************************************
+ *
+ * This file implements the mgmt functionality for the
+ * Neo and ClassicBoard based product lines.
+ *
+ ************************************************************************
+ */
+#include <linux/kernel.h>
+#include <linux/ctype.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/serial_reg.h>
+#include <linux/termios.h>
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+
+#include "dgnc_driver.h"
+#include "dgnc_pci.h"
+#include "dgnc_kcompat.h" /* Kernel 2.4/2.6 compat includes */
+#include "dgnc_mgmt.h"
+#include "dpacompat.h"
+
+
+/* Our "in use" variables, to enforce 1 open only */
+static int dgnc_mgmt_in_use[MAXMGMTDEVICES];
+
+
+/*
+ * dgnc_mgmt_open()
+ *
+ * Open the mgmt/downld/dpa device
+ */
+int dgnc_mgmt_open(struct inode *inode, struct file *file)
+{
+ unsigned long lock_flags;
+ unsigned int minor = iminor(inode);
+
+ DPR_MGMT(("dgnc_mgmt_open start.\n"));
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ /* mgmt device */
+ if (minor < MAXMGMTDEVICES) {
+ /* Only allow 1 open at a time on mgmt device */
+ if (dgnc_mgmt_in_use[minor]) {
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+ return (-EBUSY);
+ }
+ dgnc_mgmt_in_use[minor]++;
+ }
+ else {
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+ return (-ENXIO);
+ }
+
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("dgnc_mgmt_open finish.\n"));
+
+ return 0;
+}
+
+
+/*
+ * dgnc_mgmt_close()
+ *
+ * Open the mgmt/dpa device
+ */
+int dgnc_mgmt_close(struct inode *inode, struct file *file)
+{
+ unsigned long lock_flags;
+ unsigned int minor = iminor(inode);
+
+ DPR_MGMT(("dgnc_mgmt_close start.\n"));
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ /* mgmt device */
+ if (minor < MAXMGMTDEVICES) {
+ if (dgnc_mgmt_in_use[minor]) {
+ dgnc_mgmt_in_use[minor] = 0;
+ }
+ }
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("dgnc_mgmt_close finish.\n"));
+
+ return 0;
+}
+
+
+/*
+ * dgnc_mgmt_ioctl()
+ *
+ * ioctl the mgmt/dpa device
+ */
+
+long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ unsigned long lock_flags;
+ void __user *uarg = (void __user *) arg;
+
+ DPR_MGMT(("dgnc_mgmt_ioctl start.\n"));
+
+ switch (cmd) {
+
+ case DIGI_GETDD:
+ {
+ /*
+ * This returns the total number of boards
+ * in the system, as well as driver version
+ * and has space for a reserved entry
+ */
+ struct digi_dinfo ddi;
+
+ DGNC_LOCK(dgnc_global_lock, lock_flags);
+
+ ddi.dinfo_nboards = dgnc_NumBoards;
+ sprintf(ddi.dinfo_version, "%s", DG_PART);
+
+ DGNC_UNLOCK(dgnc_global_lock, lock_flags);
+
+ DPR_MGMT(("DIGI_GETDD returning numboards: %d version: %s\n",
+ ddi.dinfo_nboards, ddi.dinfo_version));
+
+ if (copy_to_user(uarg, &ddi, sizeof (ddi)))
+ return(-EFAULT);
+
+ break;
+ }
+
+ case DIGI_GETBD:
+ {
+ int brd;
+
+ struct digi_info di;
+
+ if (copy_from_user(&brd, uarg, sizeof(int))) {
+ return(-EFAULT);
+ }
+
+ DPR_MGMT(("DIGI_GETBD asking about board: %d\n", brd));
+
+ if ((brd < 0) || (brd > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ return (-ENODEV);
+
+ memset(&di, 0, sizeof(di));
+
+ di.info_bdnum = brd;
+
+ DGNC_LOCK(dgnc_Board[brd]->bd_lock, lock_flags);
+
+ di.info_bdtype = dgnc_Board[brd]->dpatype;
+ di.info_bdstate = dgnc_Board[brd]->dpastatus;
+ di.info_ioport = 0;
+ di.info_physaddr = (ulong) dgnc_Board[brd]->membase;
+ di.info_physsize = (ulong) dgnc_Board[brd]->membase - dgnc_Board[brd]->membase_end;
+ if (dgnc_Board[brd]->state != BOARD_FAILED)
+ di.info_nports = dgnc_Board[brd]->nasync;
+ else
+ di.info_nports = 0;
+
+ DGNC_UNLOCK(dgnc_Board[brd]->bd_lock, lock_flags);
+
+ DPR_MGMT(("DIGI_GETBD returning type: %x state: %x ports: %x size: %x\n",
+ di.info_bdtype, di.info_bdstate, di.info_nports, di.info_physsize));
+
+ if (copy_to_user(uarg, &di, sizeof (di)))
+ return (-EFAULT);
+
+ break;
+ }
+
+ case DIGI_GET_NI_INFO:
+ {
+ struct channel_t *ch;
+ struct ni_info ni;
+ uchar mstat = 0;
+ uint board = 0;
+ uint channel = 0;
+
+ if (copy_from_user(&ni, uarg, sizeof(struct ni_info))) {
+ return(-EFAULT);
+ }
+
+ DPR_MGMT(("DIGI_GETBD asking about board: %d channel: %d\n",
+ ni.board, ni.channel));
+
+ board = ni.board;
+ channel = ni.channel;
+
+ /* Verify boundaries on board */
+ if ((board < 0) || (board > dgnc_NumBoards) || (dgnc_NumBoards == 0))
+ return (-ENODEV);
+
+ /* Verify boundaries on channel */
+ if ((channel < 0) || (channel > dgnc_Board[board]->nasync))
+ return (-ENODEV);
+
+ ch = dgnc_Board[board]->channels[channel];
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ memset(&ni, 0, sizeof(ni));
+ ni.board = board;
+ ni.channel = channel;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ if (mstat & UART_MCR_DTR) {
+ ni.mstat |= TIOCM_DTR;
+ ni.dtr = TIOCM_DTR;
+ }
+ if (mstat & UART_MCR_RTS) {
+ ni.mstat |= TIOCM_RTS;
+ ni.rts = TIOCM_RTS;
+ }
+ if (mstat & UART_MSR_CTS) {
+ ni.mstat |= TIOCM_CTS;
+ ni.cts = TIOCM_CTS;
+ }
+ if (mstat & UART_MSR_RI) {
+ ni.mstat |= TIOCM_RI;
+ ni.ri = TIOCM_RI;
+ }
+ if (mstat & UART_MSR_DCD) {
+ ni.mstat |= TIOCM_CD;
+ ni.dcd = TIOCM_CD;
+ }
+ if (mstat & UART_MSR_DSR)
+ ni.mstat |= TIOCM_DSR;
+
+ ni.iflag = ch->ch_c_iflag;
+ ni.oflag = ch->ch_c_oflag;
+ ni.cflag = ch->ch_c_cflag;
+ ni.lflag = ch->ch_c_lflag;
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS)
+ ni.hflow = 1;
+ else
+ ni.hflow = 0;
+
+ if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI))
+ ni.recv_stopped = 1;
+ else
+ ni.recv_stopped = 0;
+
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP))
+ ni.xmit_stopped = 1;
+ else
+ ni.xmit_stopped = 0;
+
+ ni.curtx = ch->ch_txcount;
+ ni.currx = ch->ch_rxcount;
+
+ ni.baud = ch->ch_old_baud;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &ni, sizeof(ni)))
+ return (-EFAULT);
+
+ break;
+ }
+
+
+ }
+
+ DPR_MGMT(("dgnc_mgmt_ioctl finish.\n"));
+
+ return 0;
+}
diff --git a/drivers/staging/dgnc/dgnc_mgmt.h b/drivers/staging/dgnc/dgnc_mgmt.h
new file mode 100644
index 00000000000..567f687b18d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_mgmt.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_MGMT_H
+#define __DGNC_MGMT_H
+
+#define MAXMGMTDEVICES 8
+
+int dgnc_mgmt_open(struct inode *inode, struct file *file);
+int dgnc_mgmt_close(struct inode *inode, struct file *file);
+long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#endif
+
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
new file mode 100644
index 00000000000..8b9e09a83f7
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -0,0 +1,1974 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/delay.h> /* For udelay */
+#include <asm/io.h> /* For read[bwl]/write[bwl] */
+#include <linux/serial.h> /* For struct async_serial */
+#include <linux/serial_reg.h> /* For the various UART offsets */
+
+#include "dgnc_driver.h" /* Driver main header file */
+#include "dgnc_neo.h" /* Our header file */
+#include "dgnc_tty.h"
+#include "dgnc_trace.h"
+
+static inline void neo_parse_lsr(struct board_t *brd, uint port);
+static inline void neo_parse_isr(struct board_t *brd, uint port);
+static void neo_copy_data_from_uart_to_queue(struct channel_t *ch);
+static inline void neo_clear_break(struct channel_t *ch, int force);
+static inline void neo_set_cts_flow_control(struct channel_t *ch);
+static inline void neo_set_rts_flow_control(struct channel_t *ch);
+static inline void neo_set_ixon_flow_control(struct channel_t *ch);
+static inline void neo_set_ixoff_flow_control(struct channel_t *ch);
+static inline void neo_set_no_output_flow_control(struct channel_t *ch);
+static inline void neo_set_no_input_flow_control(struct channel_t *ch);
+static inline void neo_set_new_start_stop_chars(struct channel_t *ch);
+static void neo_parse_modem(struct channel_t *ch, uchar signals);
+static void neo_tasklet(unsigned long data);
+static void neo_vpd(struct board_t *brd);
+static void neo_uart_init(struct channel_t *ch);
+static void neo_uart_off(struct channel_t *ch);
+static int neo_drain(struct tty_struct *tty, uint seconds);
+static void neo_param(struct tty_struct *tty);
+static void neo_assert_modem_signals(struct channel_t *ch);
+static void neo_flush_uart_write(struct channel_t *ch);
+static void neo_flush_uart_read(struct channel_t *ch);
+static void neo_disable_receiver(struct channel_t *ch);
+static void neo_enable_receiver(struct channel_t *ch);
+static void neo_send_break(struct channel_t *ch, int msecs);
+static void neo_send_start_character(struct channel_t *ch);
+static void neo_send_stop_character(struct channel_t *ch);
+static void neo_copy_data_from_queue_to_uart(struct channel_t *ch);
+static uint neo_get_uart_bytes_left(struct channel_t *ch);
+static void neo_send_immediate_char(struct channel_t *ch, unsigned char c);
+static irqreturn_t neo_intr(int irq, void *voidbrd);
+
+
+struct board_ops dgnc_neo_ops = {
+ .tasklet = neo_tasklet,
+ .intr = neo_intr,
+ .uart_init = neo_uart_init,
+ .uart_off = neo_uart_off,
+ .drain = neo_drain,
+ .param = neo_param,
+ .vpd = neo_vpd,
+ .assert_modem_signals = neo_assert_modem_signals,
+ .flush_uart_write = neo_flush_uart_write,
+ .flush_uart_read = neo_flush_uart_read,
+ .disable_receiver = neo_disable_receiver,
+ .enable_receiver = neo_enable_receiver,
+ .send_break = neo_send_break,
+ .send_start_character = neo_send_start_character,
+ .send_stop_character = neo_send_stop_character,
+ .copy_data_from_queue_to_uart = neo_copy_data_from_queue_to_uart,
+ .get_uart_bytes_left = neo_get_uart_bytes_left,
+ .send_immediate_char = neo_send_immediate_char
+};
+
+static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
+
+
+/*
+ * This function allows calls to ensure that all outstanding
+ * PCI writes have been completed, by doing a PCI read against
+ * a non-destructive, read-only location on the Neo card.
+ *
+ * In this case, we are reading the DVID (Read-only Device Identification)
+ * value of the Neo card.
+ */
+static inline void neo_pci_posting_flush(struct board_t *bd)
+{
+ readb(bd->re_map_membase + 0x8D);
+}
+
+static inline void neo_set_cts_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+
+ DPR_PARAM(("Setting CTSFLOW\n"));
+
+ /* Turn on auto CTS flow control */
+#if 1
+ ier |= (UART_17158_IER_CTSDSR);
+#else
+ ier &= ~(UART_17158_IER_CTSDSR);
+#endif
+
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ efr &= ~(UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+
+ /* Feed the UART our trigger levels */
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_rts_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting RTSFLOW\n"));
+
+ /* Turn on auto RTS flow control */
+#if 1
+ ier |= (UART_17158_IER_RTSDTR);
+#else
+ ier &= ~(UART_17158_IER_RTSDTR);
+#endif
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ efr &= ~(UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(32, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 32;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /*
+ * From the Neo UART spec sheet:
+ * The auto RTS/DTR function must be started by asserting
+ * RTS/DTR# output pin (MCR bit-0 or 1 to logic 1 after
+ * it is enabled.
+ */
+ ch->ch_mostat |= (UART_MCR_RTS);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_ixon_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting IXON FLOW\n"));
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn on auto Xon flow control */
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ ch->ch_r_watermark = 4;
+
+ writeb(32, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 32;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Setting IXOFF FLOW\n"));
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn on auto Xoff flow control */
+ ier |= (UART_17158_IER_XOFF);
+ efr |= (UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ writeb(8, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 8;
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_no_input_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Unsetting Input FLOW\n"));
+
+ /* Turn off auto RTS flow control */
+ ier &= ~(UART_17158_IER_RTSDTR);
+ efr &= ~(UART_17158_EFR_RTSDTR);
+
+ /* Turn off auto Xoff flow control */
+ ier &= ~(UART_17158_IER_XOFF);
+ if (ch->ch_c_iflag & IXON)
+ efr &= ~(UART_17158_EFR_IXOFF);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXOFF);
+
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static inline void neo_set_no_output_flow_control(struct channel_t *ch)
+{
+ uchar ier = readb(&ch->ch_neo_uart->ier);
+ uchar efr = readb(&ch->ch_neo_uart->efr);
+
+ DPR_PARAM(("Unsetting Output FLOW\n"));
+
+ /* Turn off auto CTS flow control */
+ ier &= ~(UART_17158_IER_CTSDSR);
+ efr &= ~(UART_17158_EFR_CTSDSR);
+
+ /* Turn off auto Xon flow control */
+ if (ch->ch_c_iflag & IXOFF)
+ efr &= ~(UART_17158_EFR_IXON);
+ else
+ efr &= ~(UART_17158_EFR_ECB | UART_17158_EFR_IXON);
+
+ /* Why? Becuz Exar's spec says we have to zero it out before setting it */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Turn on UART enhanced bits */
+ writeb(efr, &ch->ch_neo_uart->efr);
+
+ /* Turn on table D, with 8 char hi/low watermarks */
+ writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+
+ ch->ch_r_watermark = 0;
+
+ writeb(16, &ch->ch_neo_uart->tfifo);
+ ch->ch_t_tlevel = 16;
+
+ writeb(16, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 16;
+
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/* change UARTs start/stop chars */
+static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
+{
+
+ /* if hardware flow control is set, then skip this whole thing */
+ if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
+ return;
+
+ DPR_PARAM(("In new start stop chars\n"));
+
+ /* Tell UART what start/stop chars it should be looking for */
+ writeb(ch->ch_startc, &ch->ch_neo_uart->xonchar1);
+ writeb(0, &ch->ch_neo_uart->xonchar2);
+
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->xoffchar1);
+ writeb(0, &ch->ch_neo_uart->xoffchar2);
+
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * No locks are assumed to be held when calling this function.
+ */
+static inline void neo_clear_break(struct channel_t *ch, int force)
+{
+ ulong lock_flags;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Bail if we aren't currently sending a break. */
+ if (!ch->ch_stop_sending_break) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* Turn break off, and unset some variables */
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ if ((jiffies >= ch->ch_stop_sending_break) || force) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * Parse the ISR register.
+ */
+static inline void neo_parse_isr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ uchar isr;
+ uchar cause;
+ ulong lock_flags;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ /* Here we try to figure out what caused the interrupt to happen */
+ while (1) {
+
+ isr = readb(&ch->ch_neo_uart->isr_fcr);
+
+ /* Bail if no pending interrupt */
+ if (isr & UART_IIR_NO_INT) {
+ break;
+ }
+
+ /*
+ * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ */
+ isr &= ~(UART_17158_IIR_FIFO_ENABLED);
+
+ DPR_INTR(("%s:%d isr: %x\n", __FILE__, __LINE__, isr));
+
+ if (isr & (UART_17158_IIR_RDI_TIMEOUT | UART_IIR_RDI)) {
+ /* Read data from uart -> queue */
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ if (isr & UART_IIR_THRI) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ /* Transfer data (if any) from Write Queue -> UART. */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+
+ if (isr & UART_17158_IIR_XONXOFF) {
+ cause = readb(&ch->ch_neo_uart->xoffchar1);
+
+ DPR_INTR(("Port %d. Got ISR_XONXOFF: cause:%x\n", port, cause));
+
+ /*
+ * Since the UART detected either an XON or
+ * XOFF match, we need to figure out which
+ * one it was, so we can suspend or resume data flow.
+ */
+ if (cause == UART_17158_XON_DETECT) {
+ /* Is output stopped right now, if so, resume it */
+ if (brd->channels[port]->ch_flags & CH_STOP) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags &= ~(CH_STOP);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ DPR_INTR(("Port %d. XON detected in incoming data\n", port));
+ }
+ else if (cause == UART_17158_XOFF_DETECT) {
+ if (!(brd->channels[port]->ch_flags & CH_STOP)) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= CH_STOP;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_INTR(("Setting CH_STOP\n"));
+ }
+ DPR_INTR(("Port: %d. XOFF detected in incoming data\n", port));
+ }
+ }
+
+ if (isr & UART_17158_IIR_HWFLOW_STATE_CHANGE) {
+ /*
+ * If we get here, this means the hardware is doing auto flow control.
+ * Check to see whether RTS/DTR or CTS/DSR caused this interrupt.
+ */
+ brd->intr_modem++;
+ ch->ch_intr_modem++;
+ cause = readb(&ch->ch_neo_uart->mcr);
+ /* Which pin is doing auto flow? RTS or DTR? */
+ if ((cause & 0x4) == 0) {
+ if (cause & UART_MCR_RTS) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat |= UART_MCR_RTS;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ } else {
+ if (cause & UART_MCR_DTR) {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat |= UART_MCR_DTR;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+ }
+ }
+
+ /* Parse any modem signal changes */
+ DPR_INTR(("MOD_STAT: sending to parse_modem_sigs\n"));
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+ }
+}
+
+
+static inline void neo_parse_lsr(struct board_t *brd, uint port)
+{
+ struct channel_t *ch;
+ int linestatus;
+ ulong lock_flags;
+
+ if (!brd)
+ return;
+
+ if (brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (port > brd->maxports)
+ return;
+
+ ch = brd->channels[port];
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ DPR_INTR(("%s:%d port: %d linestatus: %x\n", __FILE__, __LINE__, port, linestatus));
+
+ ch->ch_cached_lsr |= linestatus;
+
+ if (ch->ch_cached_lsr & UART_LSR_DR) {
+ brd->intr_rx++;
+ ch->ch_intr_rx++;
+ /* Read data from uart -> queue */
+ neo_copy_data_from_uart_to_queue(ch);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ /*
+ * This is a special flag. It indicates that at least 1
+ * RX error (parity, framing, or break) has happened.
+ * Mark this in our struct, which will tell me that I have
+ *to do the special RX+LSR read for this FIFO load.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR) {
+ DPR_INTR(("%s:%d Port: %d Got an RX error, need to parse LSR\n",
+ __FILE__, __LINE__, port));
+ }
+
+ /*
+ * The next 3 tests should *NOT* happen, as the above test
+ * should encapsulate all 3... At least, thats what Exar says.
+ */
+
+ if (linestatus & UART_LSR_PE) {
+ ch->ch_err_parity++;
+ DPR_INTR(("%s:%d Port: %d. PAR ERR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_FE) {
+ ch->ch_err_frame++;
+ DPR_INTR(("%s:%d Port: %d. FRM ERR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_BI) {
+ ch->ch_err_break++;
+ DPR_INTR(("%s:%d Port: %d. BRK INTR!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_OE) {
+ /*
+ * Rx Oruns. Exar says that an orun will NOT corrupt
+ * the FIFO. It will just replace the holding register
+ * with this new data byte. So basically just ignore this.
+ * Probably we should eventually have an orun stat in our driver...
+ */
+ ch->ch_err_overrun++;
+ DPR_INTR(("%s:%d Port: %d. Rx Overrun!\n", __FILE__, __LINE__, port));
+ }
+
+ if (linestatus & UART_LSR_THRE) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+ else if (linestatus & UART_17158_TX_AND_FIFO_CLR) {
+ brd->intr_tx++;
+ ch->ch_intr_tx++;
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Transfer data (if any) from Write Queue -> UART. */
+ neo_copy_data_from_queue_to_uart(ch);
+ }
+}
+
+
+/*
+ * neo_param()
+ * Send any/all changes to the line to the UART.
+ */
+static void neo_param(struct tty_struct *tty)
+{
+ uchar lcr = 0;
+ uchar uart_lcr = 0;
+ uchar ier = 0;
+ uchar uart_ier = 0;
+ uint baud = 9600;
+ int quot = 0;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return;
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return;
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ return;
+ }
+
+ DPR_PARAM(("param start: tdev: %x cflags: %x oflags: %x iflags: %x\n",
+ ch->ch_tun.un_dev, ch->ch_c_cflag, ch->ch_c_oflag, ch->ch_c_iflag));
+
+ /*
+ * If baud rate is zero, flush queues, and set mval to drop DTR.
+ */
+ if ((ch->ch_c_cflag & (CBAUD)) == 0) {
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ neo_flush_uart_write(ch);
+ neo_flush_uart_read(ch);
+
+ /* The baudrate is B0 so all modem lines are to be dropped. */
+ ch->ch_flags |= (CH_BAUD0);
+ ch->ch_mostat &= ~(UART_MCR_RTS | UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ ch->ch_old_baud = 0;
+ return;
+
+ } else if (ch->ch_custom_speed) {
+
+ baud = ch->ch_custom_speed;
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ } else {
+ int iindex = 0;
+ int jindex = 0;
+
+ ulong bauds[4][16] = {
+ { /* slowbaud */
+ 0, 50, 75, 110,
+ 134, 150, 200, 300,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* slowbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud */
+ 0, 57600, 76800, 115200,
+ 131657, 153600, 230400, 460800,
+ 921600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 },
+ { /* fastbaud & CBAUDEX */
+ 0, 57600, 115200, 230400,
+ 460800, 150, 200, 921600,
+ 600, 1200, 1800, 2400,
+ 4800, 9600, 19200, 38400 }
+ };
+
+ /* Only use the TXPrint baud rate if the terminal unit is NOT open */
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
+ else
+ baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
+
+ if (ch->ch_c_cflag & CBAUDEX)
+ iindex = 1;
+
+ if (ch->ch_digi.digi_flags & DIGI_FAST)
+ iindex += 2;
+
+ jindex = baud;
+
+ if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16)) {
+ baud = bauds[iindex][jindex];
+ } else {
+ DPR_IOCTL(("baud indices were out of range (%d)(%d)",
+ iindex, jindex));
+ baud = 0;
+ }
+
+ if (baud == 0)
+ baud = 9600;
+
+ /* Handle transition from B0 */
+ if (ch->ch_flags & CH_BAUD0) {
+ ch->ch_flags &= ~(CH_BAUD0);
+
+ /*
+ * Bring back up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+ }
+ }
+
+ if (ch->ch_c_cflag & PARENB) {
+ lcr |= UART_LCR_PARITY;
+ }
+
+ if (!(ch->ch_c_cflag & PARODD)) {
+ lcr |= UART_LCR_EPAR;
+ }
+
+ /*
+ * Not all platforms support mark/space parity,
+ * so this will hide behind an ifdef.
+ */
+#ifdef CMSPAR
+ if (ch->ch_c_cflag & CMSPAR)
+ lcr |= UART_LCR_SPAR;
+#endif
+
+ if (ch->ch_c_cflag & CSTOPB)
+ lcr |= UART_LCR_STOP;
+
+ switch (ch->ch_c_cflag & CSIZE) {
+ case CS5:
+ lcr |= UART_LCR_WLEN5;
+ break;
+ case CS6:
+ lcr |= UART_LCR_WLEN6;
+ break;
+ case CS7:
+ lcr |= UART_LCR_WLEN7;
+ break;
+ case CS8:
+ default:
+ lcr |= UART_LCR_WLEN8;
+ break;
+ }
+
+ ier = uart_ier = readb(&ch->ch_neo_uart->ier);
+ uart_lcr = readb(&ch->ch_neo_uart->lcr);
+
+ if (baud == 0)
+ baud = 9600;
+
+ quot = ch->ch_bd->bd_dividend / baud;
+
+ if (quot != 0 && ch->ch_old_baud != baud) {
+ ch->ch_old_baud = baud;
+ writeb(UART_LCR_DLAB, &ch->ch_neo_uart->lcr);
+ writeb((quot & 0xff), &ch->ch_neo_uart->txrx);
+ writeb((quot >> 8), &ch->ch_neo_uart->ier);
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+ }
+
+ if (uart_lcr != lcr)
+ writeb(lcr, &ch->ch_neo_uart->lcr);
+
+ if (ch->ch_c_cflag & CREAD) {
+ ier |= (UART_IER_RDI | UART_IER_RLSI);
+ }
+ else {
+ ier &= ~(UART_IER_RDI | UART_IER_RLSI);
+ }
+
+ /*
+ * Have the UART interrupt on modem signal changes ONLY when
+ * we are in hardware flow control mode, or CLOCAL/FORCEDCD is not set.
+ */
+ if ((ch->ch_digi.digi_flags & CTSPACE) || (ch->ch_digi.digi_flags & RTSPACE) ||
+ (ch->ch_c_cflag & CRTSCTS) || !(ch->ch_digi.digi_flags & DIGI_FORCEDCD) ||
+ !(ch->ch_c_cflag & CLOCAL))
+ {
+ ier |= UART_IER_MSI;
+ }
+ else {
+ ier &= ~UART_IER_MSI;
+ }
+
+ ier |= UART_IER_THRI;
+
+ if (ier != uart_ier)
+ writeb(ier, &ch->ch_neo_uart->ier);
+
+ /* Set new start/stop chars */
+ neo_set_new_start_stop_chars(ch);
+
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ neo_set_cts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXON) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ neo_set_no_output_flow_control(ch);
+ else
+ neo_set_ixon_flow_control(ch);
+ }
+ else {
+ neo_set_no_output_flow_control(ch);
+ }
+
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ neo_set_rts_flow_control(ch);
+ }
+ else if (ch->ch_c_iflag & IXOFF) {
+ /* If start/stop is set to disable, then we should disable flow control */
+ if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ neo_set_no_input_flow_control(ch);
+ else
+ neo_set_ixoff_flow_control(ch);
+ }
+ else {
+ neo_set_no_input_flow_control(ch);
+ }
+
+ /*
+ * Adjust the RX FIFO Trigger level if baud is less than 9600.
+ * Not exactly elegant, but this is needed because of the Exar chip's
+ * delay on firing off the RX FIFO interrupt on slower baud rates.
+ */
+ if (baud < 9600) {
+ writeb(1, &ch->ch_neo_uart->rfifo);
+ ch->ch_r_tlevel = 1;
+ }
+
+ neo_assert_modem_signals(ch);
+
+ /* Get current status of the modem signals now */
+ neo_parse_modem(ch, readb(&ch->ch_neo_uart->msr));
+}
+
+
+/*
+ * Our board poller function.
+ */
+static void neo_tasklet(unsigned long data)
+{
+ struct board_t *bd = (struct board_t *) data;
+ struct channel_t *ch;
+ ulong lock_flags;
+ int i;
+ int state = 0;
+ int ports = 0;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) {
+ APR(("poll_tasklet() - NULL or bad bd.\n"));
+ return;
+ }
+
+ /* Cache a couple board values */
+ DGNC_LOCK(bd->bd_lock, lock_flags);
+ state = bd->state;
+ ports = bd->nasync;
+ DGNC_UNLOCK(bd->bd_lock, lock_flags);
+
+ /*
+ * Do NOT allow the interrupt routine to read the intr registers
+ * Until we release this lock.
+ */
+ DGNC_LOCK(bd->bd_intr_lock, lock_flags);
+
+ /*
+ * If board is ready, parse deeper to see if there is anything to do.
+ */
+ if ((state == BOARD_READY) && (ports > 0)) {
+ /* Loop on each port */
+ for (i = 0; i < ports; i++) {
+ ch = bd->channels[i];
+
+ /* Just being careful... */
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ continue;
+
+ /*
+ * NOTE: Remember you CANNOT hold any channel
+ * locks when calling the input routine.
+ *
+ * During input processing, its possible we
+ * will call the Linux ld, which might in turn,
+ * do a callback right back into us, resulting
+ * in us trying to grab the channel lock twice!
+ */
+ dgnc_input(ch);
+
+ /*
+ * Channel lock is grabbed and then released
+ * inside both of these routines, but neither
+ * call anything else that could call back into us.
+ */
+ neo_copy_data_from_queue_to_uart(ch);
+ dgnc_wakeup_writes(ch);
+
+ /*
+ * Call carrier carrier function, in case something
+ * has changed.
+ */
+ dgnc_carrier(ch);
+
+ /*
+ * Check to see if we need to turn off a sending break.
+ * The timing check is done inside clear_break()
+ */
+ if (ch->ch_stop_sending_break)
+ neo_clear_break(ch, 0);
+ }
+ }
+
+ /* Allow interrupt routine to access the interrupt register again */
+ DGNC_UNLOCK(bd->bd_intr_lock, lock_flags);
+
+}
+
+
+/*
+ * dgnc_neo_intr()
+ *
+ * Neo specific interrupt handler.
+ */
+static irqreturn_t neo_intr(int irq, void *voidbrd)
+{
+ struct board_t *brd = (struct board_t *) voidbrd;
+ struct channel_t *ch;
+ int port = 0;
+ int type = 0;
+ int current_port;
+ u32 tmp;
+ u32 uart_poll;
+ unsigned long lock_flags;
+ unsigned long lock_flags2;
+
+ if (!brd) {
+ APR(("Received interrupt (%d) with null board associated\n", irq));
+ return IRQ_NONE;
+ }
+
+ /*
+ * Check to make sure its for us.
+ */
+ if (brd->magic != DGNC_BOARD_MAGIC) {
+ APR(("Received interrupt (%d) with a board pointer that wasn't ours!\n", irq));
+ return IRQ_NONE;
+ }
+
+ brd->intr_count++;
+
+ /* Lock out the slow poller from running on this board. */
+ DGNC_LOCK(brd->bd_intr_lock, lock_flags);
+
+ /*
+ * Read in "extended" IRQ information from the 32bit Neo register.
+ * Bits 0-7: What port triggered the interrupt.
+ * Bits 8-31: Each 3bits indicate what type of interrupt occurred.
+ */
+ uart_poll = readl(brd->re_map_membase + UART_17158_POLL_ADDR_OFFSET);
+
+ DPR_INTR(("%s:%d uart_poll: %x\n", __FILE__, __LINE__, uart_poll));
+
+ /*
+ * If 0, no interrupts pending.
+ * This can happen if the IRQ is shared among a couple Neo/Classic boards.
+ */
+ if (!uart_poll) {
+ DPR_INTR(("Kernel interrupted to me, but no pending interrupts...\n"));
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+ return IRQ_NONE;
+ }
+
+ /* At this point, we have at least SOMETHING to service, dig further... */
+
+ current_port = 0;
+
+ /* Loop on each port */
+ while ((uart_poll & 0xff) != 0) {
+
+ tmp = uart_poll;
+
+ /* Check current port to see if it has interrupt pending */
+ if ((tmp & dgnc_offset_table[current_port]) != 0) {
+ port = current_port;
+ type = tmp >> (8 + (port * 3));
+ type &= 0x7;
+ } else {
+ current_port++;
+ continue;
+ }
+
+ DPR_INTR(("%s:%d port: %x type: %x\n", __FILE__, __LINE__, port, type));
+
+ /* Remove this port + type from uart_poll */
+ uart_poll &= ~(dgnc_offset_table[port]);
+
+ if (!type) {
+ /* If no type, just ignore it, and move onto next port */
+ DPR_INTR(("Interrupt with no type! port: %d\n", port));
+ continue;
+ }
+
+ /* Switch on type of interrupt we have */
+ switch (type) {
+
+ case UART_17158_RXRDY_TIMEOUT:
+ /*
+ * RXRDY Time-out is cleared by reading data in the
+ * RX FIFO until it falls below the trigger level.
+ */
+
+ /* Verify the port is in range. */
+ if (port > brd->nasync)
+ continue;
+
+ ch = brd->channels[port];
+ neo_copy_data_from_uart_to_queue(ch);
+
+ /* Call our tty layer to enforce queue flow control if needed. */
+ DGNC_LOCK(ch->ch_lock, lock_flags2);
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags2);
+
+ continue;
+
+ case UART_17158_RX_LINE_STATUS:
+ /*
+ * RXRDY and RX LINE Status (logic OR of LSR[4:1])
+ */
+ neo_parse_lsr(brd, port);
+ continue;
+
+ case UART_17158_TXRDY:
+ /*
+ * TXRDY interrupt clears after reading ISR register for the UART channel.
+ */
+
+ /*
+ * Yes, this is odd...
+ * Why would I check EVERY possibility of type of
+ * interrupt, when we know its TXRDY???
+ * Becuz for some reason, even tho we got triggered for TXRDY,
+ * it seems to be occassionally wrong. Instead of TX, which
+ * it should be, I was getting things like RXDY too. Weird.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ case UART_17158_MSR:
+ /*
+ * MSR or flow control was seen.
+ */
+ neo_parse_isr(brd, port);
+ continue;
+
+ default:
+ /*
+ * The UART triggered us with a bogus interrupt type.
+ * It appears the Exar chip, when REALLY bogged down, will throw
+ * these once and awhile.
+ * Its harmless, just ignore it and move on.
+ */
+ DPR_INTR(("%s:%d Unknown Interrupt type: %x\n", __FILE__, __LINE__, type));
+ continue;
+ }
+ }
+
+ /*
+ * Schedule tasklet to more in-depth servicing at a better time.
+ */
+ tasklet_schedule(&brd->helper_tasklet);
+
+ DGNC_UNLOCK(brd->bd_intr_lock, lock_flags);
+
+ DPR_INTR(("dgnc_intr finish.\n"));
+ return IRQ_HANDLED;
+}
+
+
+/*
+ * Neo specific way of turning off the receiver.
+ * Used as a way to enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_disable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_neo_uart->ier);
+ tmp &= ~(UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * Neo specific way of turning on the receiver.
+ * Used as a way to un-enforce queue flow control when in
+ * hardware flow control mode.
+ */
+static void neo_enable_receiver(struct channel_t *ch)
+{
+ uchar tmp = readb(&ch->ch_neo_uart->ier);
+ tmp |= (UART_IER_RDI);
+ writeb(tmp, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
+{
+ int qleft = 0;
+ uchar linestatus = 0;
+ uchar error_mask = 0;
+ int n = 0;
+ int total = 0;
+ ushort head;
+ ushort tail;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* cache head and tail of queue */
+ head = ch->ch_r_head & RQUEUEMASK;
+ tail = ch->ch_r_tail & RQUEUEMASK;
+
+ /* Get our cached LSR */
+ linestatus = ch->ch_cached_lsr;
+ ch->ch_cached_lsr = 0;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = tail - head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * If the UART is not in FIFO mode, force the FIFO copy to
+ * NOT be run, by setting total to 0.
+ *
+ * On the other hand, if the UART IS in FIFO mode, then ask
+ * the UART to give us an approximation of data it has RX'ed.
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED))
+ total = 0;
+ else {
+ total = readb(&ch->ch_neo_uart->rfifo);
+
+ /*
+ * EXAR chip bug - RX FIFO COUNT - Fudge factor.
+ *
+ * This resolves a problem/bug with the Exar chip that sometimes
+ * returns a bogus value in the rfifo register.
+ * The count can be any where from 0-3 bytes "off".
+ * Bizarre, but true.
+ */
+ if ((ch->ch_bd->dvid & 0xf0) >= UART_XR17E158_DVID) {
+ total -= 1;
+ }
+ else {
+ total -= 3;
+ }
+ }
+
+
+ /*
+ * Finally, bound the copy to make sure we don't overflow
+ * our own queue...
+ * The byte by byte copy loop below this loop this will
+ * deal with the queue overflow possibility.
+ */
+ total = min(total, qleft);
+
+ while (total > 0) {
+
+ /*
+ * Grab the linestatus register, we need to check
+ * to see if there are any errors in the FIFO.
+ */
+ linestatus = readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * Break out if there is a FIFO error somewhere.
+ * This will allow us to go byte by byte down below,
+ * finding the exact location of the error.
+ */
+ if (linestatus & UART_17158_RX_FIFO_DATA_ERROR)
+ break;
+
+ /* Make sure we don't go over the end of our queue */
+ n = min(((uint) total), (RQUEUESIZE - (uint) head));
+
+ /*
+ * Cut down n even further if needed, this is to fix
+ * a problem with memcpy_fromio() with the Neo on the
+ * IBM pSeries platform.
+ * 15 bytes max appears to be the magic number.
+ */
+ n = min((uint) n, (uint) 12);
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ linestatus = 0;
+
+ /* Copy data from uart to the queue */
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, n);
+
+ /*
+ * Since RX_FIFO_DATA_ERROR was 0, we are guarenteed
+ * that all the data currently in the FIFO is free of
+ * breaks and parity/frame/orun errors.
+ */
+ memset(ch->ch_equeue + head, 0, n);
+
+ /* Add to and flip head if needed */
+ head = (head + n) & RQUEUEMASK;
+ total -= n;
+ qleft -= n;
+ ch->ch_rxcount += n;
+ }
+
+ /*
+ * Create a mask to determine whether we should
+ * insert the character (if any) into our queue.
+ */
+ if (ch->ch_c_iflag & IGNBRK)
+ error_mask |= UART_LSR_BI;
+
+ /*
+ * Now cleanup any leftover bytes still in the UART.
+ * Also deal with any possible queue overflow here as well.
+ */
+ while (1) {
+
+ /*
+ * Its possible we have a linestatus from the loop above
+ * this, so we "OR" on any extra bits.
+ */
+ linestatus |= readb(&ch->ch_neo_uart->lsr);
+
+ /*
+ * If the chip tells us there is no more data pending to
+ * be read, we can then leave.
+ * But before we do, cache the linestatus, just in case.
+ */
+ if (!(linestatus & UART_LSR_DR)) {
+ ch->ch_cached_lsr = linestatus;
+ break;
+ }
+
+ /* No need to store this bit */
+ linestatus &= ~UART_LSR_DR;
+
+ /*
+ * Since we are grabbing the linestatus register, which
+ * will reset some bits after our read, we need to ensure
+ * we don't miss our TX FIFO emptys.
+ */
+ if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
+ linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ /*
+ * Discard character if we are ignoring the error mask.
+ */
+ if (linestatus & error_mask) {
+ uchar discard;
+ linestatus = 0;
+ memcpy_fromio(&discard, &ch->ch_neo_uart->txrxburst, 1);
+ continue;
+ }
+
+ /*
+ * If our queue is full, we have no choice but to drop some data.
+ * The assumption is that HWFLOW or SWFLOW should have stopped
+ * things way way before we got to this point.
+ *
+ * I decided that I wanted to ditch the oldest data first,
+ * I hope thats okay with everyone? Yes? Good.
+ */
+ while (qleft < 1) {
+ DPR_READ(("Queue full, dropping DATA:%x LSR:%x\n",
+ ch->ch_rqueue[tail], ch->ch_equeue[tail]));
+
+ ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK;
+ ch->ch_err_overrun++;
+ qleft++;
+ }
+
+ memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ ch->ch_equeue[head] = (uchar) linestatus;
+ dgnc_sniff_nowait_nolock(ch, "UART READ", ch->ch_rqueue + head, 1);
+
+ DPR_READ(("DATA/LSR pair: %x %x\n", ch->ch_rqueue[head], ch->ch_equeue[head]));
+
+ /* Ditch any remaining linestatus value. */
+ linestatus = 0;
+
+ /* Add to and flip head if needed */
+ head = (head + 1) & RQUEUEMASK;
+
+ qleft--;
+ ch->ch_rxcount++;
+ }
+
+ /*
+ * Write new final heads to channel structure.
+ */
+ ch->ch_r_head = head & RQUEUEMASK;
+ ch->ch_e_head = head & EQUEUEMASK;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+/*
+ * This function basically goes to sleep for secs, or until
+ * it gets signalled that the port has fully drained.
+ */
+static int neo_drain(struct tty_struct *tty, uint seconds)
+{
+ ulong lock_flags;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = (struct un_t *) tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_IOCTL(("%d Drain wait started.\n", __LINE__));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ un->un_flags |= UN_EMPTY;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Go to sleep waiting for the tty layer to wake me back up when
+ * the empty flag goes away.
+ *
+ * NOTE: TODO: Do something with time passed in.
+ */
+ rc = wait_event_interruptible(un->un_flags_wait, ((un->un_flags & UN_EMPTY) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_IOCTL(("%d Drain - User ctrl c'ed\n", __LINE__));
+ }
+ else {
+ DPR_IOCTL(("%d Drain wait finished.\n", __LINE__));
+ }
+
+ return (rc);
+}
+
+
+/*
+ * Flush the WRITE FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_write(struct channel_t *ch)
+{
+ uchar tmp = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 4) {
+ DPR_IOCTL(("Still flushing TX UART... i: %d\n", i));
+ udelay(10);
+ }
+ else
+ break;
+ }
+
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+}
+
+
+/*
+ * Flush the READ FIFO on the Neo.
+ *
+ * NOTE: Channel lock MUST be held before calling this function!
+ */
+static void neo_flush_uart_read(struct channel_t *ch)
+{
+ uchar tmp = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return;
+ }
+
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ for (i = 0; i < 10; i++) {
+
+ /* Check to see if the UART feels it completely flushed the FIFO. */
+ tmp = readb(&ch->ch_neo_uart->isr_fcr);
+ if (tmp & 2) {
+ DPR_IOCTL(("Still flushing RX UART... i: %d\n", i));
+ udelay(10);
+ }
+ else
+ break;
+ }
+}
+
+
+static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
+{
+ ushort head;
+ ushort tail;
+ int n;
+ int s;
+ int qlen;
+ uint len_written = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* No data to write to the UART */
+ if (ch->ch_w_tail == ch->ch_w_head) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* If port is "stopped", don't send any data to the UART */
+ if ((ch->ch_flags & CH_FORCED_STOP) || (ch->ch_flags & CH_BREAK_SENDING)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If FIFOs are disabled. Send data directly to txrx register
+ */
+ if (!(ch->ch_flags & CH_FIFO_ENABLED)) {
+ uchar lsrbits = readb(&ch->ch_neo_uart->lsr);
+
+ /* Cache the LSR bits for later parsing */
+ ch->ch_cached_lsr |= lsrbits;
+ if (ch->ch_cached_lsr & UART_LSR_THRE) {
+ ch->ch_cached_lsr &= ~(UART_LSR_THRE);
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ DPR_WRITE(("Tx data: %x\n", ch->ch_wqueue[ch->ch_w_head]));
+ ch->ch_w_tail++;
+ ch->ch_w_tail &= WQUEUEMASK;
+ ch->ch_txcount++;
+ }
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * We have to do it this way, because of the EXAR TXFIFO count bug.
+ */
+ if ((ch->ch_bd->dvid & 0xf0) < UART_XR17E158_DVID) {
+ if (!(ch->ch_flags & (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM))) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ len_written = 0;
+
+ n = readb(&ch->ch_neo_uart->tfifo);
+
+ if ((unsigned int) n > ch->ch_t_tlevel) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ n = UART_17158_TX_FIFOSIZE - ch->ch_t_tlevel;
+ }
+ else {
+ n = UART_17158_TX_FIFOSIZE - readb(&ch->ch_neo_uart->tfifo);
+ }
+
+ /* cache head and tail of queue */
+ head = ch->ch_w_head & WQUEUEMASK;
+ tail = ch->ch_w_tail & WQUEUEMASK;
+ qlen = (head - tail) & WQUEUEMASK;
+
+ /* Find minimum of the FIFO space, versus queue length */
+ n = min(n, qlen);
+
+ while (n > 0) {
+
+ s = ((head >= tail) ? head : WQUEUESIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ /*
+ * If RTS Toggle mode is on, turn on RTS now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_RTS)) {
+ ch->ch_mostat |= (UART_MCR_RTS);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ /*
+ * If DTR Toggle mode is on, turn on DTR now if not already set,
+ * and make sure we get an event when the data transfer has completed.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ if (!(ch->ch_mostat & UART_MCR_DTR)) {
+ ch->ch_mostat |= (UART_MCR_DTR);
+ neo_assert_modem_signals(ch);
+ }
+ ch->ch_tun.un_flags |= (UN_EMPTY);
+ }
+
+ memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ dgnc_sniff_nowait_nolock(ch, "UART WRITE", ch->ch_wqueue + tail, s);
+
+ /* Add and flip queue if needed */
+ tail = (tail + s) & WQUEUEMASK;
+ n -= s;
+ ch->ch_txcount += s;
+ len_written += s;
+ }
+
+ /* Update the final tail */
+ ch->ch_w_tail = tail & WQUEUEMASK;
+
+ if (len_written > 0) {
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+static void neo_parse_modem(struct channel_t *ch, uchar signals)
+{
+ volatile uchar msignals = signals;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_MSIGS(("neo_parse_modem: port: %d msignals: %x\n", ch->ch_portnum, msignals));
+
+ /*
+ * Do altpin switching. Altpin switches DCD and DSR.
+ * This prolly breaks DSRPACE, so we should be more clever here.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
+ uchar mswap = msignals;
+
+ if (mswap & UART_MSR_DDCD) {
+ msignals &= ~UART_MSR_DDCD;
+ msignals |= UART_MSR_DDSR;
+ }
+ if (mswap & UART_MSR_DDSR) {
+ msignals &= ~UART_MSR_DDSR;
+ msignals |= UART_MSR_DDCD;
+ }
+ if (mswap & UART_MSR_DCD) {
+ msignals &= ~UART_MSR_DCD;
+ msignals |= UART_MSR_DSR;
+ }
+ if (mswap & UART_MSR_DSR) {
+ msignals &= ~UART_MSR_DSR;
+ msignals |= UART_MSR_DCD;
+ }
+ }
+
+ /* Scrub off lower bits. They signify delta's, which I don't care about */
+ msignals &= 0xf0;
+
+ if (msignals & UART_MSR_DCD)
+ ch->ch_mistat |= UART_MSR_DCD;
+ else
+ ch->ch_mistat &= ~UART_MSR_DCD;
+
+ if (msignals & UART_MSR_DSR)
+ ch->ch_mistat |= UART_MSR_DSR;
+ else
+ ch->ch_mistat &= ~UART_MSR_DSR;
+
+ if (msignals & UART_MSR_RI)
+ ch->ch_mistat |= UART_MSR_RI;
+ else
+ ch->ch_mistat &= ~UART_MSR_RI;
+
+ if (msignals & UART_MSR_CTS)
+ ch->ch_mistat |= UART_MSR_CTS;
+ else
+ ch->ch_mistat &= ~UART_MSR_CTS;
+
+ DPR_MSIGS(("Port: %d DTR: %d RTS: %d CTS: %d DSR: %d " "RI: %d CD: %d\n",
+ ch->ch_portnum,
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_DTR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MCR_RTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_CTS),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DSR),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_RI),
+ !!((ch->ch_mistat | ch->ch_mostat) & UART_MSR_DCD)));
+}
+
+
+/* Make the UART raise any of the output signals we want up */
+static void neo_assert_modem_signals(struct channel_t *ch)
+{
+ uchar out;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ out = ch->ch_mostat;
+
+ if (ch->ch_flags & CH_LOOPBACK)
+ out |= UART_MCR_LOOP;
+
+ writeb(out, &ch->ch_neo_uart->mcr);
+ neo_pci_posting_flush(ch->ch_bd);
+
+ /* Give time for the UART to actually raise/drop the signals */
+ udelay(10);
+}
+
+
+static void neo_send_start_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_startc != _POSIX_VDISABLE) {
+ ch->ch_xon_sends++;
+ writeb(ch->ch_startc, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+ udelay(10);
+ }
+}
+
+
+static void neo_send_stop_character(struct channel_t *ch)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ if (ch->ch_stopc != _POSIX_VDISABLE) {
+ ch->ch_xoff_sends++;
+ writeb(ch->ch_stopc, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+ udelay(10);
+ }
+}
+
+
+/*
+ * neo_uart_init
+ */
+static void neo_uart_init(struct channel_t *ch)
+{
+
+ writeb(0, &ch->ch_neo_uart->ier);
+ writeb(0, &ch->ch_neo_uart->efr);
+ writeb(UART_EFR_ECB, &ch->ch_neo_uart->efr);
+
+
+ /* Clear out UART and FIFO */
+ readb(&ch->ch_neo_uart->txrx);
+ writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ readb(&ch->ch_neo_uart->lsr);
+ readb(&ch->ch_neo_uart->msr);
+
+ ch->ch_flags |= CH_FIFO_ENABLED;
+
+ /* Assert any signals we want up */
+ writeb(ch->ch_mostat, &ch->ch_neo_uart->mcr);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+/*
+ * Make the UART completely turn off.
+ */
+static void neo_uart_off(struct channel_t *ch)
+{
+ /* Turn off UART enhanced bits */
+ writeb(0, &ch->ch_neo_uart->efr);
+
+ /* Stop all interrupts from occurring. */
+ writeb(0, &ch->ch_neo_uart->ier);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static uint neo_get_uart_bytes_left(struct channel_t *ch)
+{
+ uchar left = 0;
+ uchar lsr = readb(&ch->ch_neo_uart->lsr);
+
+ /* We must cache the LSR as some of the bits get reset once read... */
+ ch->ch_cached_lsr |= lsr;
+
+ /* Determine whether the Transmitter is empty or not */
+ if (!(lsr & UART_LSR_TEMT)) {
+ if (ch->ch_flags & CH_TX_FIFO_EMPTY) {
+ tasklet_schedule(&ch->ch_bd->helper_tasklet);
+ }
+ left = 1;
+ } else {
+ ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
+ left = 0;
+ }
+
+ return left;
+}
+
+
+/* Channel lock MUST be held by the calling function! */
+static void neo_send_break(struct channel_t *ch, int msecs)
+{
+ /*
+ * If we receive a time of 0, this means turn off the break.
+ */
+ if (msecs == 0) {
+ if (ch->ch_flags & CH_BREAK_SENDING) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags &= ~(CH_BREAK_SENDING);
+ ch->ch_stop_sending_break = 0;
+ DPR_IOCTL(("Finishing UART_LCR_SBC! finished: %lx\n", jiffies));
+ }
+ return;
+ }
+
+ /*
+ * Set the time we should stop sending the break.
+ * If we are already sending a break, toss away the existing
+ * time to stop, and use this new value instead.
+ */
+ ch->ch_stop_sending_break = jiffies + dgnc_jiffies_from_ms(msecs);
+
+ /* Tell the UART to start sending the break */
+ if (!(ch->ch_flags & CH_BREAK_SENDING)) {
+ uchar temp = readb(&ch->ch_neo_uart->lcr);
+ writeb((temp | UART_LCR_SBC), &ch->ch_neo_uart->lcr);
+ neo_pci_posting_flush(ch->ch_bd);
+ ch->ch_flags |= (CH_BREAK_SENDING);
+ DPR_IOCTL(("Port %d. Starting UART_LCR_SBC! start: %lx should end: %lx\n",
+ ch->ch_portnum, jiffies, ch->ch_stop_sending_break));
+ }
+}
+
+
+/*
+ * neo_send_immediate_char.
+ *
+ * Sends a specific character as soon as possible to the UART,
+ * jumping over any bytes that might be in the write queue.
+ *
+ * The channel lock MUST be held by the calling function.
+ */
+static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
+{
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ writeb(c, &ch->ch_neo_uart->txrx);
+ neo_pci_posting_flush(ch->ch_bd);
+}
+
+
+static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
+{
+ unsigned int enable;
+ unsigned int bits;
+ unsigned int databit;
+ unsigned int val;
+
+ /* enable chip select */
+ writeb(NEO_EECS, base + NEO_EEREG);
+ /* READ */
+ enable = (address | 0x180);
+
+ for (bits = 9; bits--; ) {
+ databit = (enable & (1 << bits)) ? NEO_EEDI : 0;
+ /* Set read address */
+ writeb(databit | NEO_EECS, base + NEO_EEREG);
+ writeb(databit | NEO_EECS | NEO_EECK, base + NEO_EEREG);
+ }
+
+ val = 0;
+
+ for (bits = 17; bits--; ) {
+ /* clock to EEPROM */
+ writeb(NEO_EECS, base + NEO_EEREG);
+ writeb(NEO_EECS | NEO_EECK, base + NEO_EEREG);
+ val <<= 1;
+ /* read EEPROM */
+ if (readb(base + NEO_EEREG) & NEO_EEDO)
+ val |= 1;
+ }
+
+ /* clock falling edge */
+ writeb(NEO_EECS, base + NEO_EEREG);
+
+ /* drop chip select */
+ writeb(0x00, base + NEO_EEREG);
+
+ return val;
+}
+
+
+static void neo_vpd(struct board_t *brd)
+{
+ unsigned int i = 0;
+ unsigned int a;
+
+ if (!brd || brd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (!brd->re_map_membase)
+ return;
+
+ /* Store the VPD into our buffer */
+ for (i = 0; i < NEO_VPD_IMAGESIZE; i++) {
+ a = neo_read_eeprom(brd->re_map_membase, i);
+ brd->vpd[i*2] = a & 0xff;
+ brd->vpd[(i*2)+1] = (a >> 8) & 0xff;
+ }
+
+ if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
+ && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
+ || (brd->vpd[0x7F] != 0x78)) /* small resource end tag */
+ {
+ memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
+ }
+ else {
+ /* Search for the serial number */
+ for (i = 0; i < NEO_VPD_IMAGESIZE * 2; i++) {
+ if (brd->vpd[i] == 'S' && brd->vpd[i + 1] == 'N') {
+ strncpy(brd->serial_num, &(brd->vpd[i + 3]), 9);
+ }
+ }
+ }
+}
diff --git a/drivers/staging/dgnc/dgnc_neo.h b/drivers/staging/dgnc/dgnc_neo.h
new file mode 100644
index 00000000000..7ec5710a434
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_neo.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ */
+
+#ifndef __DGNC_NEO_H
+#define __DGNC_NEO_H
+
+#include "dgnc_types.h"
+#include "dgnc_driver.h"
+
+/************************************************************************
+ * Per channel/port NEO UART structure *
+ ************************************************************************
+ * Base Structure Entries Usage Meanings to Host *
+ * *
+ * W = read write R = read only *
+ * U = Unused. *
+ ************************************************************************/
+
+struct neo_uart_struct {
+ u8 txrx; /* WR RHR/THR - Holding Reg */
+ u8 ier; /* WR IER - Interrupt Enable Reg */
+ u8 isr_fcr; /* WR ISR/FCR - Interrupt Status Reg/Fifo Control Reg */
+ u8 lcr; /* WR LCR - Line Control Reg */
+ u8 mcr; /* WR MCR - Modem Control Reg */
+ u8 lsr; /* WR LSR - Line Status Reg */
+ u8 msr; /* WR MSR - Modem Status Reg */
+ u8 spr; /* WR SPR - Scratch Pad Reg */
+ u8 fctr; /* WR FCTR - Feature Control Reg */
+ u8 efr; /* WR EFR - Enhanced Function Reg */
+ u8 tfifo; /* WR TXCNT/TXTRG - Transmit FIFO Reg */
+ u8 rfifo; /* WR RXCNT/RXTRG - Recieve FIFO Reg */
+ u8 xoffchar1; /* WR XOFF 1 - XOff Character 1 Reg */
+ u8 xoffchar2; /* WR XOFF 2 - XOff Character 2 Reg */
+ u8 xonchar1; /* WR XON 1 - Xon Character 1 Reg */
+ u8 xonchar2; /* WR XON 2 - XOn Character 2 Reg */
+
+ u8 reserved1[0x2ff - 0x200]; /* U Reserved by Exar */
+ u8 txrxburst[64]; /* RW 64 bytes of RX/TX FIFO Data */
+ u8 reserved2[0x37f - 0x340]; /* U Reserved by Exar */
+ u8 rxburst_with_errors[64]; /* R 64 bytes of RX FIFO Data + LSR */
+};
+
+/* Where to read the extended interrupt register (32bits instead of 8bits) */
+#define UART_17158_POLL_ADDR_OFFSET 0x80
+
+/* These are the current dvid's of the Neo boards */
+#define UART_XR17C158_DVID 0x20
+#define UART_XR17D158_DVID 0x20
+#define UART_XR17E158_DVID 0x40
+
+#define NEO_EECK 0x10 /* Clock */
+#define NEO_EECS 0x20 /* Chip Select */
+#define NEO_EEDI 0x40 /* Data In is an Output Pin */
+#define NEO_EEDO 0x80 /* Data Out is an Input Pin */
+#define NEO_EEREG 0x8E /* offset to EEPROM control reg */
+
+
+#define NEO_VPD_IMAGESIZE 0x40 /* size of image to read from EEPROM in words */
+#define NEO_VPD_IMAGEBYTES (NEO_VPD_IMAGESIZE * 2)
+
+/*
+ * These are the redefinitions for the FCTR on the XR17C158, since
+ * Exar made them different than their earlier design. (XR16C854)
+ */
+
+/* These are only applicable when table D is selected */
+#define UART_17158_FCTR_RTS_NODELAY 0x00
+#define UART_17158_FCTR_RTS_4DELAY 0x01
+#define UART_17158_FCTR_RTS_6DELAY 0x02
+#define UART_17158_FCTR_RTS_8DELAY 0x03
+#define UART_17158_FCTR_RTS_12DELAY 0x12
+#define UART_17158_FCTR_RTS_16DELAY 0x05
+#define UART_17158_FCTR_RTS_20DELAY 0x13
+#define UART_17158_FCTR_RTS_24DELAY 0x06
+#define UART_17158_FCTR_RTS_28DELAY 0x14
+#define UART_17158_FCTR_RTS_32DELAY 0x07
+#define UART_17158_FCTR_RTS_36DELAY 0x16
+#define UART_17158_FCTR_RTS_40DELAY 0x08
+#define UART_17158_FCTR_RTS_44DELAY 0x09
+#define UART_17158_FCTR_RTS_48DELAY 0x10
+#define UART_17158_FCTR_RTS_52DELAY 0x11
+
+#define UART_17158_FCTR_RTS_IRDA 0x10
+#define UART_17158_FCTR_RS485 0x20
+#define UART_17158_FCTR_TRGA 0x00
+#define UART_17158_FCTR_TRGB 0x40
+#define UART_17158_FCTR_TRGC 0x80
+#define UART_17158_FCTR_TRGD 0xC0
+
+/* 17158 trigger table selects.. */
+#define UART_17158_FCTR_BIT6 0x40
+#define UART_17158_FCTR_BIT7 0x80
+
+/* 17158 TX/RX memmapped buffer offsets */
+#define UART_17158_RX_FIFOSIZE 64
+#define UART_17158_TX_FIFOSIZE 64
+
+/* 17158 Extended IIR's */
+#define UART_17158_IIR_RDI_TIMEOUT 0x0C /* Receiver data TIMEOUT */
+#define UART_17158_IIR_XONXOFF 0x10 /* Received an XON/XOFF char */
+#define UART_17158_IIR_HWFLOW_STATE_CHANGE 0x20 /* CTS/DSR or RTS/DTR state change */
+#define UART_17158_IIR_FIFO_ENABLED 0xC0 /* 16550 FIFOs are Enabled */
+
+/*
+ * These are the extended interrupts that get sent
+ * back to us from the UART's 32bit interrupt register
+ */
+#define UART_17158_RX_LINE_STATUS 0x1 /* RX Ready */
+#define UART_17158_RXRDY_TIMEOUT 0x2 /* RX Ready Timeout */
+#define UART_17158_TXRDY 0x3 /* TX Ready */
+#define UART_17158_MSR 0x4 /* Modem State Change */
+#define UART_17158_TX_AND_FIFO_CLR 0x40 /* Transmitter Holding Reg Empty */
+#define UART_17158_RX_FIFO_DATA_ERROR 0x80 /* UART detected an RX FIFO Data error */
+
+/*
+ * These are the EXTENDED definitions for the 17C158's Interrupt
+ * Enable Register.
+ */
+#define UART_17158_EFR_ECB 0x10 /* Enhanced control bit */
+#define UART_17158_EFR_IXON 0x2 /* Receiver compares Xon1/Xoff1 */
+#define UART_17158_EFR_IXOFF 0x8 /* Transmit Xon1/Xoff1 */
+#define UART_17158_EFR_RTSDTR 0x40 /* Auto RTS/DTR Flow Control Enable */
+#define UART_17158_EFR_CTSDSR 0x80 /* Auto CTS/DSR Flow COntrol Enable */
+
+#define UART_17158_XOFF_DETECT 0x1 /* Indicates whether chip saw an incoming XOFF char */
+#define UART_17158_XON_DETECT 0x2 /* Indicates whether chip saw an incoming XON char */
+
+#define UART_17158_IER_RSVD1 0x10 /* Reserved by Exar */
+#define UART_17158_IER_XOFF 0x20 /* Xoff Interrupt Enable */
+#define UART_17158_IER_RTSDTR 0x40 /* Output Interrupt Enable */
+#define UART_17158_IER_CTSDSR 0x80 /* Input Interrupt Enable */
+
+/*
+ * Our Global Variables
+ */
+extern struct board_ops dgnc_neo_ops;
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_pci.h b/drivers/staging/dgnc/dgnc_pci.h
new file mode 100644
index 00000000000..5b6f76d98aa
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_pci.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_PCI_H
+#define __DGNC_PCI_H
+
+#define PCIMAX 32 /* maximum number of PCI boards */
+
+#define DIGI_VID 0x114F
+
+#define PCI_DEVICE_CLASSIC_4_DID 0x0028
+#define PCI_DEVICE_CLASSIC_8_DID 0x0029
+#define PCI_DEVICE_CLASSIC_4_422_DID 0x00D0
+#define PCI_DEVICE_CLASSIC_8_422_DID 0x00D1
+#define PCI_DEVICE_NEO_4_DID 0x00B0
+#define PCI_DEVICE_NEO_8_DID 0x00B1
+#define PCI_DEVICE_NEO_2DB9_DID 0x00C8
+#define PCI_DEVICE_NEO_2DB9PRI_DID 0x00C9
+#define PCI_DEVICE_NEO_2RJ45_DID 0x00CA
+#define PCI_DEVICE_NEO_2RJ45PRI_DID 0x00CB
+#define PCI_DEVICE_NEO_1_422_DID 0x00CC
+#define PCI_DEVICE_NEO_1_422_485_DID 0x00CD
+#define PCI_DEVICE_NEO_2_422_485_DID 0x00CE
+#define PCI_DEVICE_NEO_EXPRESS_8_DID 0x00F0
+#define PCI_DEVICE_NEO_EXPRESS_4_DID 0x00F1
+#define PCI_DEVICE_NEO_EXPRESS_4RJ45_DID 0x00F2
+#define PCI_DEVICE_NEO_EXPRESS_8RJ45_DID 0x00F3
+#define PCI_DEVICE_NEO_EXPRESS_4_IBM_DID 0x00F4
+
+#define PCI_DEVICE_CLASSIC_4_PCI_NAME "ClassicBoard 4 PCI"
+#define PCI_DEVICE_CLASSIC_8_PCI_NAME "ClassicBoard 8 PCI"
+#define PCI_DEVICE_CLASSIC_4_422_PCI_NAME "ClassicBoard 4 422 PCI"
+#define PCI_DEVICE_CLASSIC_8_422_PCI_NAME "ClassicBoard 8 422 PCI"
+#define PCI_DEVICE_NEO_4_PCI_NAME "Neo 4 PCI"
+#define PCI_DEVICE_NEO_8_PCI_NAME "Neo 8 PCI"
+#define PCI_DEVICE_NEO_2DB9_PCI_NAME "Neo 2 - DB9 Universal PCI"
+#define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
+#define PCI_DEVICE_NEO_2RJ45_PCI_NAME "Neo 2 - RJ45 Universal PCI"
+#define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
+#define PCI_DEVICE_NEO_1_422_PCI_NAME "Neo 1 422 PCI"
+#define PCI_DEVICE_NEO_1_422_485_PCI_NAME "Neo 1 422/485 PCI"
+#define PCI_DEVICE_NEO_2_422_485_PCI_NAME "Neo 2 422/485 PCI"
+
+#define PCI_DEVICE_NEO_EXPRESS_8_PCI_NAME "Neo 8 PCI Express"
+#define PCI_DEVICE_NEO_EXPRESS_4_PCI_NAME "Neo 4 PCI Express"
+#define PCI_DEVICE_NEO_EXPRESS_4RJ45_PCI_NAME "Neo 4 PCI Express RJ45"
+#define PCI_DEVICE_NEO_EXPRESS_8RJ45_PCI_NAME "Neo 8 PCI Express RJ45"
+#define PCI_DEVICE_NEO_EXPRESS_4_IBM_PCI_NAME "Neo 4 PCI Express IBM"
+
+
+/* Size of Memory and I/O for PCI (4 K) */
+#define PCI_RAM_SIZE 0x1000
+
+/* Size of Memory (2MB) */
+#define PCI_MEM_SIZE 0x1000
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
new file mode 100644
index 00000000000..0ea6c800280
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright 2004 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/serial_reg.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+
+#include "dgnc_driver.h"
+#include "dgnc_mgmt.h"
+
+
+static ssize_t dgnc_driver_version_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
+}
+static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
+
+
+static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
+}
+static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
+
+
+static ssize_t dgnc_driver_maxboards_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
+}
+static DRIVER_ATTR(maxboards, S_IRUSR, dgnc_driver_maxboards_show, NULL);
+
+
+static ssize_t dgnc_driver_pollcounter_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%ld\n", dgnc_poll_counter);
+}
+static DRIVER_ATTR(pollcounter, S_IRUSR, dgnc_driver_pollcounter_show, NULL);
+
+
+static ssize_t dgnc_driver_state_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%s\n", dgnc_driver_state_text[dgnc_driver_state]);
+}
+static DRIVER_ATTR(state, S_IRUSR, dgnc_driver_state_show, NULL);
+
+
+static ssize_t dgnc_driver_debug_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_debug);
+}
+
+static ssize_t dgnc_driver_debug_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgnc_debug);
+ return count;
+}
+static DRIVER_ATTR(debug, (S_IRUSR | S_IWUSR), dgnc_driver_debug_show, dgnc_driver_debug_store);
+
+
+static ssize_t dgnc_driver_rawreadok_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%x\n", dgnc_rawreadok);
+}
+
+static ssize_t dgnc_driver_rawreadok_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "0x%x\n", &dgnc_rawreadok);
+ return count;
+}
+static DRIVER_ATTR(rawreadok, (S_IRUSR | S_IWUSR), dgnc_driver_rawreadok_show, dgnc_driver_rawreadok_store);
+
+
+static ssize_t dgnc_driver_pollrate_show(struct device_driver *ddp, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%dms\n", dgnc_poll_tick);
+}
+
+static ssize_t dgnc_driver_pollrate_store(struct device_driver *ddp, const char *buf, size_t count)
+{
+ sscanf(buf, "%d\n", &dgnc_poll_tick);
+ return count;
+}
+static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgnc_driver_pollrate_show, dgnc_driver_pollrate_store);
+
+
+void dgnc_create_driver_sysfiles(struct pci_driver *dgnc_driver)
+{
+ int rc = 0;
+ struct device_driver *driverfs = &dgnc_driver->driver;
+
+ rc |= driver_create_file(driverfs, &driver_attr_version);
+ rc |= driver_create_file(driverfs, &driver_attr_boards);
+ rc |= driver_create_file(driverfs, &driver_attr_maxboards);
+ rc |= driver_create_file(driverfs, &driver_attr_debug);
+ rc |= driver_create_file(driverfs, &driver_attr_rawreadok);
+ rc |= driver_create_file(driverfs, &driver_attr_pollrate);
+ rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
+ rc |= driver_create_file(driverfs, &driver_attr_state);
+ if (rc) {
+ printk(KERN_ERR "DGNC: sysfs driver_create_file failed!\n");
+ }
+}
+
+
+void dgnc_remove_driver_sysfiles(struct pci_driver *dgnc_driver)
+{
+ struct device_driver *driverfs = &dgnc_driver->driver;
+ driver_remove_file(driverfs, &driver_attr_version);
+ driver_remove_file(driverfs, &driver_attr_boards);
+ driver_remove_file(driverfs, &driver_attr_maxboards);
+ driver_remove_file(driverfs, &driver_attr_debug);
+ driver_remove_file(driverfs, &driver_attr_rawreadok);
+ driver_remove_file(driverfs, &driver_attr_pollrate);
+ driver_remove_file(driverfs, &driver_attr_pollcounter);
+ driver_remove_file(driverfs, &driver_attr_state);
+}
+
+
+#define DGNC_VERIFY_BOARD(p, bd) \
+ if (!p) \
+ return (0); \
+ \
+ bd = dev_get_drvdata(p); \
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC) \
+ return (0); \
+ if (bd->state != BOARD_READY) \
+ return (0); \
+
+
+
+static ssize_t dgnc_vpd_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ count += sprintf(buf + count, "\n 0 1 2 3 4 5 6 7 8 9 A B C D E F");
+ for (i = 0; i < 0x40 * 2; i++) {
+ if (!(i % 16))
+ count += sprintf(buf + count, "\n%04X ", i * 2);
+ count += sprintf(buf + count, "%02X ", bd->vpd[i]);
+ }
+ count += sprintf(buf + count, "\n");
+
+ return count;
+}
+static DEVICE_ATTR(vpd, S_IRUSR, dgnc_vpd_show, NULL);
+
+static ssize_t dgnc_serial_number_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ if (bd->serial_num[0] == '\0')
+ count += sprintf(buf + count, "<UNKNOWN>\n");
+ else
+ count += sprintf(buf + count, "%s\n", bd->serial_num);
+
+ return count;
+}
+static DEVICE_ATTR(serial_number, S_IRUSR, dgnc_serial_number_show, NULL);
+
+
+static ssize_t dgnc_ports_state_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s\n", bd->channels[i]->ch_portnum,
+ bd->channels[i]->ch_open_count ? "Open" : "Closed");
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_state, S_IRUSR, dgnc_ports_state_show, NULL);
+
+
+static ssize_t dgnc_ports_baud_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %d\n", bd->channels[i]->ch_portnum, bd->channels[i]->ch_old_baud);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_baud, S_IRUSR, dgnc_ports_baud_show, NULL);
+
+
+static ssize_t dgnc_ports_msignals_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ if (bd->channels[i]->ch_open_count) {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d %s %s %s %s %s %s\n", bd->channels[i]->ch_portnum,
+ (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ } else {
+ count += snprintf(buf + count, PAGE_SIZE - count,
+ "%d\n", bd->channels[i]->ch_portnum);
+ }
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_msignals, S_IRUSR, dgnc_ports_msignals_show, NULL);
+
+
+static ssize_t dgnc_ports_iflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_iflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_iflag, S_IRUSR, dgnc_ports_iflag_show, NULL);
+
+
+static ssize_t dgnc_ports_cflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_cflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_cflag, S_IRUSR, dgnc_ports_cflag_show, NULL);
+
+
+static ssize_t dgnc_ports_oflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_oflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_oflag, S_IRUSR, dgnc_ports_oflag_show, NULL);
+
+
+static ssize_t dgnc_ports_lflag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_c_lflag);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_lflag, S_IRUSR, dgnc_ports_lflag_show, NULL);
+
+
+static ssize_t dgnc_ports_digi_flag_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_digi.digi_flags);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgnc_ports_digi_flag_show, NULL);
+
+
+static ssize_t dgnc_ports_rxcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_rxcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgnc_ports_rxcount_show, NULL);
+
+
+static ssize_t dgnc_ports_txcount_show(struct device *p, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ int count = 0;
+ int i = 0;
+
+ DGNC_VERIFY_BOARD(p, bd);
+
+ for (i = 0; i < bd->nasync; i++) {
+ count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
+ bd->channels[i]->ch_portnum, bd->channels[i]->ch_txcount);
+ }
+ return count;
+}
+static DEVICE_ATTR(ports_txcount, S_IRUSR, dgnc_ports_txcount_show, NULL);
+
+
+/* this function creates the sys files that will export each signal status
+ * to sysfs each value will be put in a separate filename
+ */
+void dgnc_create_ports_sysfiles(struct board_t *bd)
+{
+ int rc = 0;
+
+ dev_set_drvdata(&bd->pdev->dev, bd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_vpd);
+ rc |= device_create_file(&(bd->pdev->dev), &dev_attr_serial_number);
+ if (rc) {
+ printk(KERN_ERR "DGNC: sysfs device_create_file failed!\n");
+ }
+}
+
+
+/* removes all the sys files created for that port */
+void dgnc_remove_ports_sysfiles(struct board_t *bd)
+{
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_vpd);
+ device_remove_file(&(bd->pdev->dev), &dev_attr_serial_number);
+}
+
+
+static ssize_t dgnc_tty_state_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ? "Open" : "Closed");
+}
+static DEVICE_ATTR(state, S_IRUSR, dgnc_tty_state_show, NULL);
+
+
+static ssize_t dgnc_tty_baud_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_old_baud);
+}
+static DEVICE_ATTR(baud, S_IRUSR, dgnc_tty_baud_show, NULL);
+
+
+static ssize_t dgnc_tty_msignals_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ if (ch->ch_open_count) {
+ return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ }
+ return 0;
+}
+static DEVICE_ATTR(msignals, S_IRUSR, dgnc_tty_msignals_show, NULL);
+
+
+static ssize_t dgnc_tty_iflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
+}
+static DEVICE_ATTR(iflag, S_IRUSR, dgnc_tty_iflag_show, NULL);
+
+
+static ssize_t dgnc_tty_cflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
+}
+static DEVICE_ATTR(cflag, S_IRUSR, dgnc_tty_cflag_show, NULL);
+
+
+static ssize_t dgnc_tty_oflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
+}
+static DEVICE_ATTR(oflag, S_IRUSR, dgnc_tty_oflag_show, NULL);
+
+
+static ssize_t dgnc_tty_lflag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
+}
+static DEVICE_ATTR(lflag, S_IRUSR, dgnc_tty_lflag_show, NULL);
+
+
+static ssize_t dgnc_tty_digi_flag_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
+}
+static DEVICE_ATTR(digi_flag, S_IRUSR, dgnc_tty_digi_flag_show, NULL);
+
+
+static ssize_t dgnc_tty_rxcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
+}
+static DEVICE_ATTR(rxcount, S_IRUSR, dgnc_tty_rxcount_show, NULL);
+
+
+static ssize_t dgnc_tty_txcount_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
+}
+static DEVICE_ATTR(txcount, S_IRUSR, dgnc_tty_txcount_show, NULL);
+
+
+static ssize_t dgnc_tty_name_show(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+
+ if (!d)
+ return (0);
+ un = (struct un_t *) dev_get_drvdata(d);
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (0);
+ if (bd->state != BOARD_READY)
+ return (0);
+
+ return snprintf(buf, PAGE_SIZE, "%sn%d%c\n",
+ (un->un_type == DGNC_PRINT) ? "pr" : "tty",
+ bd->boardnum + 1, 'a' + ch->ch_portnum);
+}
+static DEVICE_ATTR(custom_name, S_IRUSR, dgnc_tty_name_show, NULL);
+
+
+static struct attribute *dgnc_sysfs_tty_entries[] = {
+ &dev_attr_state.attr,
+ &dev_attr_baud.attr,
+ &dev_attr_msignals.attr,
+ &dev_attr_iflag.attr,
+ &dev_attr_cflag.attr,
+ &dev_attr_oflag.attr,
+ &dev_attr_lflag.attr,
+ &dev_attr_digi_flag.attr,
+ &dev_attr_rxcount.attr,
+ &dev_attr_txcount.attr,
+ &dev_attr_custom_name.attr,
+ NULL
+};
+
+
+static struct attribute_group dgnc_tty_attribute_group = {
+ .name = NULL,
+ .attrs = dgnc_sysfs_tty_entries,
+};
+
+
+void dgnc_create_tty_sysfs(struct un_t *un, struct device *c)
+{
+ int ret;
+
+ ret = sysfs_create_group(&c->kobj, &dgnc_tty_attribute_group);
+ if (ret) {
+ printk(KERN_ERR "dgnc: failed to create sysfs tty device attributes.\n");
+ sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
+ return;
+ }
+
+ dev_set_drvdata(c, un);
+
+}
+
+
+void dgnc_remove_tty_sysfs(struct device *c)
+{
+ sysfs_remove_group(&c->kobj, &dgnc_tty_attribute_group);
+}
+
diff --git a/drivers/staging/dgnc/dgnc_sysfs.h b/drivers/staging/dgnc/dgnc_sysfs.h
new file mode 100644
index 00000000000..4b87ce1cc7a
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_sysfs.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_SYSFS_H
+#define __DGNC_SYSFS_H
+
+#include "dgnc_driver.h"
+
+#include <linux/device.h>
+
+struct board_t;
+struct channel_t;
+struct un_t;
+struct pci_driver;
+struct class_device;
+
+extern void dgnc_create_ports_sysfiles(struct board_t *bd);
+extern void dgnc_remove_ports_sysfiles(struct board_t *bd);
+
+extern void dgnc_create_driver_sysfiles(struct pci_driver *);
+extern void dgnc_remove_driver_sysfiles(struct pci_driver *);
+
+extern int dgnc_tty_class_init(void);
+extern int dgnc_tty_class_destroy(void);
+
+extern void dgnc_create_tty_sysfs(struct un_t *un, struct device *c);
+extern void dgnc_remove_tty_sysfs(struct device *c);
+
+
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_trace.c b/drivers/staging/dgnc/dgnc_trace.c
new file mode 100644
index 00000000000..a98b7d4255c
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_trace.c
@@ -0,0 +1,184 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/vmalloc.h>
+
+#include "dgnc_driver.h"
+
+#define TRC_TO_CONSOLE 1
+
+/* file level globals */
+static char *dgnc_trcbuf; /* the ringbuffer */
+
+#if defined(TRC_TO_KMEM)
+static int dgnc_trcbufi = 0; /* index of the tilde at the end of */
+#endif
+
+#if defined(TRC_TO_KMEM)
+static DEFINE_SPINLOCK(dgnc_tracef_lock);
+#endif
+
+
+#if 0
+
+#if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+
+void dgnc_tracef(const char *fmt, ...)
+{
+ return;
+}
+
+#else /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+void dgnc_tracef(const char *fmt, ...)
+{
+ va_list ap;
+ char buf[TRC_MAXMSG+1];
+ size_t lenbuf;
+ int i;
+ static int failed = FALSE;
+# if defined(TRC_TO_KMEM)
+ unsigned long flags;
+#endif
+
+ if(failed)
+ return;
+# if defined(TRC_TO_KMEM)
+ DGNC_LOCK(dgnc_tracef_lock, flags);
+#endif
+
+ /* Format buf using fmt and arguments contained in ap. */
+ va_start(ap, fmt);
+ i = vsprintf(buf, fmt, ap);
+ va_end(ap);
+ lenbuf = strlen(buf);
+
+# if defined(TRC_TO_KMEM)
+ {
+ static int initd=0;
+
+ /*
+ * Now, in addition to (or instead of) printing this stuff out
+ * (which is a buffered operation), also tuck it away into a
+ * corner of memory which can be examined post-crash in kdb.
+ */
+ if (!initd) {
+ dgnc_trcbuf = (char *) vmalloc(dgnc_trcbuf_size);
+ if(!dgnc_trcbuf) {
+ failed = TRUE;
+ printk("dgnc: tracing init failed!\n");
+ return;
+ }
+
+ memset(dgnc_trcbuf, '\0', dgnc_trcbuf_size);
+ dgnc_trcbufi = 0;
+ initd++;
+
+ printk("dgnc: tracing enabled - " TRC_DTRC
+ " 0x%lx 0x%x\n",
+ (unsigned long)dgnc_trcbuf,
+ dgnc_trcbuf_size);
+ }
+
+# if defined(TRC_ON_OVERFLOW_WRAP_AROUND)
+ /*
+ * This is the less CPU-intensive way to do things. We simply
+ * wrap around before we fall off the end of the buffer. A
+ * tilde (~) demarcates the current end of the trace.
+ *
+ * This method should be used if you are concerned about race
+ * conditions as it is less likely to affect the timing of
+ * things.
+ */
+
+ if (dgnc_trcbufi + lenbuf >= dgnc_trcbuf_size) {
+ /* We are wrapping, so wipe out the last tilde. */
+ dgnc_trcbuf[dgnc_trcbufi] = '\0';
+ /* put the new string at the beginning of the buffer */
+ dgnc_trcbufi = 0;
+ }
+
+ strcpy(&dgnc_trcbuf[dgnc_trcbufi], buf);
+ dgnc_trcbufi += lenbuf;
+ dgnc_trcbuf[dgnc_trcbufi] = '~';
+
+# elif defined(TRC_ON_OVERFLOW_SHIFT_BUFFER)
+ /*
+ * This is the more CPU-intensive way to do things. If we
+ * venture into the last 1/8 of the buffer, we shift the
+ * last 7/8 of the buffer forward, wiping out the first 1/8.
+ * Advantage: No wrap-around, only truncation from the
+ * beginning.
+ *
+ * This method should not be used if you are concerned about
+ * timing changes affecting the behaviour of the driver (ie,
+ * race conditions).
+ */
+ strcpy(&dgnc_trcbuf[dgnc_trcbufi], buf);
+ dgnc_trcbufi += lenbuf;
+ dgnc_trcbuf[dgnc_trcbufi] = '~';
+ dgnc_trcbuf[dgnc_trcbufi+1] = '\0';
+
+ /* If we're near the end of the trace buffer... */
+ if (dgnc_trcbufi > (dgnc_trcbuf_size/8)*7) {
+ /* Wipe out the first eighth to make some more room. */
+ strcpy(dgnc_trcbuf, &dgnc_trcbuf[dgnc_trcbuf_size/8]);
+ dgnc_trcbufi = strlen(dgnc_trcbuf)-1;
+ /* Plop overflow message at the top of the buffer. */
+ bcopy(TRC_OVERFLOW, dgnc_trcbuf, strlen(TRC_OVERFLOW));
+ }
+# else
+# error "TRC_ON_OVERFLOW_WRAP_AROUND or TRC_ON_OVERFLOW_SHIFT_BUFFER?"
+# endif
+ }
+ DGNC_UNLOCK(dgnc_tracef_lock, flags);
+
+# endif /* defined(TRC_TO_KMEM) */
+}
+
+#endif /* !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE) */
+
+#endif
+
+
+/*
+ * dgnc_tracer_free()
+ *
+ *
+ */
+void dgnc_tracer_free(void)
+{
+ if(dgnc_trcbuf)
+ vfree(dgnc_trcbuf);
+}
diff --git a/drivers/staging/dgnc/dgnc_trace.h b/drivers/staging/dgnc/dgnc_trace.h
new file mode 100644
index 00000000000..efed88a627d
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_trace.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ *
+ *****************************************************************************
+ * Header file for dgnc_trace.c
+ *
+ */
+
+#ifndef __DGNC_TRACE_H
+#define __DGNC_TRACE_H
+
+#include "dgnc_driver.h"
+
+#if 0
+
+# if !defined(TRC_TO_KMEM) && !defined(TRC_TO_CONSOLE)
+ void dgnc_tracef(const char *fmt, ...);
+# else
+ void dgnc_tracef(const char *fmt, ...);
+# endif
+
+#endif
+
+void dgnc_tracer_free(void);
+
+#endif
+
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
new file mode 100644
index 00000000000..a7bb6bceb9e
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -0,0 +1,3544 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * NOTE TO LINUX KERNEL HACKERS: DO NOT REFORMAT THIS CODE!
+ *
+ * This is shared code between Digi's CVS archive and the
+ * Linux Kernel sources.
+ * Changing the source just for reformatting needlessly breaks
+ * our CVS diff history.
+ *
+ * Send any bug fixes/changes to: Eng.Linux at digi dot com.
+ * Thank you.
+ */
+
+/************************************************************************
+ *
+ * This file implements the tty driver functionality for the
+ * Neo and ClassicBoard PCI based product lines.
+ *
+ ************************************************************************
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/sched.h> /* For jiffies, task states */
+#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_reg.h>
+#include <linux/slab.h>
+#include <linux/delay.h> /* For udelay */
+#include <asm/uaccess.h> /* For copy_from_user/copy_to_user */
+#include <linux/pci.h>
+
+#include "dgnc_driver.h"
+#include "dgnc_tty.h"
+#include "dgnc_types.h"
+#include "dgnc_trace.h"
+#include "dgnc_neo.h"
+#include "dgnc_cls.h"
+#include "dpacompat.h"
+#include "dgnc_sysfs.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)
+#define init_MUTEX(sem) sema_init(sem, 1)
+#define DECLARE_MUTEX(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+#endif
+
+/*
+ * internal variables
+ */
+static struct board_t *dgnc_BoardsByMajor[256];
+static uchar *dgnc_TmpWriteBuf = NULL;
+static DECLARE_MUTEX(dgnc_TmpWriteSem);
+
+/*
+ * Default transparent print information.
+ */
+static struct digi_t dgnc_digi_init = {
+ .digi_flags = DIGI_COOK, /* Flags */
+ .digi_maxcps = 100, /* Max CPS */
+ .digi_maxchar = 50, /* Max chars in print queue */
+ .digi_bufsize = 100, /* Printer buffer size */
+ .digi_onlen = 4, /* size of printer on string */
+ .digi_offlen = 4, /* size of printer off string */
+ .digi_onstr = "\033[5i", /* ANSI printer on string ] */
+ .digi_offstr = "\033[4i", /* ANSI printer off string ] */
+ .digi_term = "ansi" /* default terminal type */
+};
+
+
+/*
+ * Define a local default termios struct. All ports will be created
+ * with this termios initially.
+ *
+ * This defines a raw port at 9600 baud, 8 data bits, no parity,
+ * 1 stop bit.
+ */
+static struct ktermios DgncDefaultTermios =
+{
+ .c_iflag = (DEFAULT_IFLAGS), /* iflags */
+ .c_oflag = (DEFAULT_OFLAGS), /* oflags */
+ .c_cflag = (DEFAULT_CFLAGS), /* cflags */
+ .c_lflag = (DEFAULT_LFLAGS), /* lflags */
+ .c_cc = INIT_C_CC,
+ .c_line = 0,
+};
+
+
+/* Our function prototypes */
+static int dgnc_tty_open(struct tty_struct *tty, struct file *file);
+static void dgnc_tty_close(struct tty_struct *tty, struct file *file);
+static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch);
+static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg);
+static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo);
+static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info);
+static int dgnc_tty_write_room(struct tty_struct *tty);
+static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c);
+static int dgnc_tty_chars_in_buffer(struct tty_struct *tty);
+static void dgnc_tty_start(struct tty_struct *tty);
+static void dgnc_tty_stop(struct tty_struct *tty);
+static void dgnc_tty_throttle(struct tty_struct *tty);
+static void dgnc_tty_unthrottle(struct tty_struct *tty);
+static void dgnc_tty_flush_chars(struct tty_struct *tty);
+static void dgnc_tty_flush_buffer(struct tty_struct *tty);
+static void dgnc_tty_hangup(struct tty_struct *tty);
+static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value);
+static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmget(struct tty_struct *tty);
+static int dgnc_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear);
+#else
+static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file);
+static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear);
+#endif
+static int dgnc_tty_send_break(struct tty_struct *tty, int msec);
+static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout);
+static int dgnc_tty_write(struct tty_struct *tty, const unsigned char *buf, int count);
+static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios);
+static void dgnc_tty_send_xchar(struct tty_struct *tty, char ch);
+
+
+static const struct tty_operations dgnc_tty_ops = {
+ .open = dgnc_tty_open,
+ .close = dgnc_tty_close,
+ .write = dgnc_tty_write,
+ .write_room = dgnc_tty_write_room,
+ .flush_buffer = dgnc_tty_flush_buffer,
+ .chars_in_buffer = dgnc_tty_chars_in_buffer,
+ .flush_chars = dgnc_tty_flush_chars,
+ .ioctl = dgnc_tty_ioctl,
+ .set_termios = dgnc_tty_set_termios,
+ .stop = dgnc_tty_stop,
+ .start = dgnc_tty_start,
+ .throttle = dgnc_tty_throttle,
+ .unthrottle = dgnc_tty_unthrottle,
+ .hangup = dgnc_tty_hangup,
+ .put_char = dgnc_tty_put_char,
+ .tiocmget = dgnc_tty_tiocmget,
+ .tiocmset = dgnc_tty_tiocmset,
+ .break_ctl = dgnc_tty_send_break,
+ .wait_until_sent = dgnc_tty_wait_until_sent,
+ .send_xchar = dgnc_tty_send_xchar
+};
+
+/************************************************************************
+ *
+ * TTY Initialization/Cleanup Functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_tty_preinit()
+ *
+ * Initialize any global tty related data before we download any boards.
+ */
+int dgnc_tty_preinit(void)
+{
+ /*
+ * Allocate a buffer for doing the copy from user space to
+ * kernel space in dgnc_write(). We only use one buffer and
+ * control access to it with a semaphore. If we are paging, we
+ * are already in trouble so one buffer won't hurt much anyway.
+ *
+ * We are okay to sleep in the malloc, as this routine
+ * is only called during module load, (not in interrupt context),
+ * and with no locks held.
+ */
+ dgnc_TmpWriteBuf = kmalloc(WRITEBUFLEN, GFP_KERNEL);
+
+ if (!dgnc_TmpWriteBuf) {
+ DPR_INIT(("unable to allocate tmp write buf"));
+ return (-ENOMEM);
+ }
+
+ return(0);
+}
+
+
+/*
+ * dgnc_tty_register()
+ *
+ * Init the tty subsystem for this board.
+ */
+int dgnc_tty_register(struct board_t *brd)
+{
+ int rc = 0;
+
+ DPR_INIT(("tty_register start\n"));
+
+ memset(&brd->SerialDriver, 0, sizeof(struct tty_driver));
+ memset(&brd->PrintDriver, 0, sizeof(struct tty_driver));
+
+ brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
+
+ snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+
+ brd->SerialDriver.name = brd->SerialName;
+ brd->SerialDriver.name_base = 0;
+ brd->SerialDriver.major = 0;
+ brd->SerialDriver.minor_start = 0;
+ brd->SerialDriver.num = brd->maxports;
+ brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
+ brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
+ brd->SerialDriver.init_termios = DgncDefaultTermios;
+ brd->SerialDriver.driver_name = DRVSTR;
+ brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /*
+ * The kernel wants space to store pointers to
+ * tty_struct's and termios's.
+ */
+ brd->SerialDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->SerialDriver.ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->SerialDriver.refcount = brd->TtyRefCnt;
+#else
+ kref_init(&brd->SerialDriver.kref);
+#endif
+
+ brd->SerialDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->SerialDriver.termios)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ brd->SerialDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->SerialDriver.termios_locked)
+ return(-ENOMEM);
+#endif
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
+
+ if (!brd->dgnc_Major_Serial_Registered) {
+ /* Register tty devices */
+ rc = tty_register_driver(&brd->SerialDriver);
+ if (rc < 0) {
+ APR(("Can't register tty device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgnc_Major_Serial_Registered = TRUE;
+ }
+
+ /*
+ * If we're doing transparent print, we have to do all of the above
+ * again, seperately so we don't get the LD confused about what major
+ * we are when we get into the dgnc_tty_open() routine.
+ */
+ brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
+ snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
+
+ brd->PrintDriver.name = brd->PrintName;
+ brd->PrintDriver.name_base = 0;
+ brd->PrintDriver.major = brd->SerialDriver.major;
+ brd->PrintDriver.minor_start = 0x80;
+ brd->PrintDriver.num = brd->maxports;
+ brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
+ brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
+ brd->PrintDriver.init_termios = DgncDefaultTermios;
+ brd->PrintDriver.driver_name = DRVSTR;
+ brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_HARDWARE_BREAK);
+
+ /*
+ * The kernel wants space to store pointers to
+ * tty_struct's and termios's. Must be seperate from
+ * the Serial Driver so we don't get confused
+ */
+ brd->PrintDriver.ttys = kzalloc(brd->maxports * sizeof(struct tty_struct *), GFP_KERNEL);
+ if (!brd->PrintDriver.ttys)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28)
+ brd->PrintDriver.refcount = brd->TtyRefCnt;
+#else
+ kref_init(&brd->PrintDriver.kref);
+#endif
+
+ brd->PrintDriver.termios = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->PrintDriver.termios)
+ return(-ENOMEM);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0)
+ brd->PrintDriver.termios_locked = kzalloc(brd->maxports * sizeof(struct ktermios *), GFP_KERNEL);
+ if (!brd->PrintDriver.termios_locked)
+ return(-ENOMEM);
+#endif
+
+ /*
+ * Entry points for driver. Called by the kernel from
+ * tty_io.c and n_tty.c.
+ */
+ tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
+
+ if (!brd->dgnc_Major_TransparentPrint_Registered) {
+ /* Register Transparent Print devices */
+ rc = tty_register_driver(&brd->PrintDriver);
+ if (rc < 0) {
+ APR(("Can't register Transparent Print device (%d)\n", rc));
+ return(rc);
+ }
+ brd->dgnc_Major_TransparentPrint_Registered = TRUE;
+ }
+
+ dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
+ brd->dgnc_Serial_Major = brd->SerialDriver.major;
+ brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
+
+ DPR_INIT(("DGNC REGISTER TTY: MAJOR: %d\n", brd->SerialDriver.major));
+
+ return (rc);
+}
+
+
+/*
+ * dgnc_tty_init()
+ *
+ * Init the tty subsystem. Called once per board after board has been
+ * downloaded and init'ed.
+ */
+int dgnc_tty_init(struct board_t *brd)
+{
+ int i;
+ void __iomem *vaddr;
+ struct channel_t *ch;
+
+ if (!brd)
+ return (-ENXIO);
+
+ DPR_INIT(("dgnc_tty_init start\n"));
+
+ /*
+ * Initialize board structure elements.
+ */
+
+ vaddr = brd->re_map_membase;
+
+ brd->nasync = brd->maxports;
+
+ /*
+ * Allocate channel memory that might not have been allocated
+ * when the driver was first loaded.
+ */
+ for (i = 0; i < brd->nasync; i++) {
+ if (!brd->channels[i]) {
+
+ /*
+ * Okay to malloc with GFP_KERNEL, we are not at
+ * interrupt context, and there are no locks held.
+ */
+ brd->channels[i] = kzalloc(sizeof(struct channel_t), GFP_KERNEL);
+ if (!brd->channels[i]) {
+ DPR_CORE(("%s:%d Unable to allocate memory for channel struct\n",
+ __FILE__, __LINE__));
+ }
+ }
+ }
+
+ ch = brd->channels[0];
+ vaddr = brd->re_map_membase;
+
+ /* Set up channel variables */
+ for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
+
+ if (!brd->channels[i])
+ continue;
+
+ DGNC_SPINLOCK_INIT(ch->ch_lock);
+
+ /* Store all our magic numbers */
+ ch->magic = DGNC_CHANNEL_MAGIC;
+ ch->ch_tun.magic = DGNC_UNIT_MAGIC;
+ ch->ch_tun.un_ch = ch;
+ ch->ch_tun.un_type = DGNC_SERIAL;
+ ch->ch_tun.un_dev = i;
+
+ ch->ch_pun.magic = DGNC_UNIT_MAGIC;
+ ch->ch_pun.un_ch = ch;
+ ch->ch_pun.un_type = DGNC_PRINT;
+ ch->ch_pun.un_dev = i + 128;
+
+ if (brd->bd_uart_offset == 0x200)
+ ch->ch_neo_uart = vaddr + (brd->bd_uart_offset * i);
+ else
+ ch->ch_cls_uart = vaddr + (brd->bd_uart_offset * i);
+
+ ch->ch_bd = brd;
+ ch->ch_portnum = i;
+ ch->ch_digi = dgnc_digi_init;
+
+ /* .25 second delay */
+ ch->ch_close_delay = 250;
+
+ init_waitqueue_head(&ch->ch_flags_wait);
+ init_waitqueue_head(&ch->ch_tun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_pun.un_flags_wait);
+ init_waitqueue_head(&ch->ch_sniff_wait);
+
+ {
+ struct device *classp;
+ classp = tty_register_device(&brd->SerialDriver, i,
+ &(ch->ch_bd->pdev->dev));
+ ch->ch_tun.un_sysfs = classp;
+ dgnc_create_tty_sysfs(&ch->ch_tun, classp);
+
+ classp = tty_register_device(&brd->PrintDriver, i,
+ &(ch->ch_bd->pdev->dev));
+ ch->ch_pun.un_sysfs = classp;
+ dgnc_create_tty_sysfs(&ch->ch_pun, classp);
+ }
+
+ }
+
+ DPR_INIT(("dgnc_tty_init finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_post_uninit()
+ *
+ * UnInitialize any global tty related data.
+ */
+void dgnc_tty_post_uninit(void)
+{
+ if (dgnc_TmpWriteBuf) {
+ kfree(dgnc_TmpWriteBuf);
+ dgnc_TmpWriteBuf = NULL;
+ }
+}
+
+
+/*
+ * dgnc_tty_uninit()
+ *
+ * Uninitialize the TTY portion of this driver. Free all memory and
+ * resources.
+ */
+void dgnc_tty_uninit(struct board_t *brd)
+{
+ int i = 0;
+
+ if (brd->dgnc_Major_Serial_Registered) {
+ dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
+ brd->dgnc_Serial_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgnc_remove_tty_sysfs(brd->channels[i]->ch_tun.un_sysfs);
+ tty_unregister_device(&brd->SerialDriver, i);
+ }
+ tty_unregister_driver(&brd->SerialDriver);
+ brd->dgnc_Major_Serial_Registered = FALSE;
+ }
+
+ if (brd->dgnc_Major_TransparentPrint_Registered) {
+ dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
+ brd->dgnc_TransparentPrint_Major = 0;
+ for (i = 0; i < brd->nasync; i++) {
+ dgnc_remove_tty_sysfs(brd->channels[i]->ch_pun.un_sysfs);
+ tty_unregister_device(&brd->PrintDriver, i);
+ }
+ tty_unregister_driver(&brd->PrintDriver);
+ brd->dgnc_Major_TransparentPrint_Registered = FALSE;
+ }
+
+ if (brd->SerialDriver.ttys) {
+ kfree(brd->SerialDriver.ttys);
+ brd->SerialDriver.ttys = NULL;
+ }
+ if (brd->PrintDriver.ttys) {
+ kfree(brd->PrintDriver.ttys);
+ brd->PrintDriver.ttys = NULL;
+ }
+}
+
+
+#define TMPBUFLEN (1024)
+
+/*
+ * dgnc_sniff - Dump data out to the "sniff" buffer if the
+ * proc sniff file is opened...
+ */
+void dgnc_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int len)
+{
+ struct timeval tv;
+ int n;
+ int r;
+ int nbuf;
+ int i;
+ int tmpbuflen;
+ char tmpbuf[TMPBUFLEN];
+ char *p = tmpbuf;
+ int too_much_data;
+
+ /* Leave if sniff not open */
+ if (!(ch->ch_sniff_flags & SNIFF_OPEN))
+ return;
+
+ do_gettimeofday(&tv);
+
+ /* Create our header for data dump */
+ p += sprintf(p, "<%ld %ld><%s><", tv.tv_sec, tv.tv_usec, text);
+ tmpbuflen = p - tmpbuf;
+
+ do {
+ too_much_data = 0;
+
+ for (i = 0; i < len && tmpbuflen < (TMPBUFLEN - 4); i++) {
+ p += sprintf(p, "%02x ", *buf);
+ buf++;
+ tmpbuflen = p - tmpbuf;
+ }
+
+ if (tmpbuflen < (TMPBUFLEN - 4)) {
+ if (i > 0)
+ p += sprintf(p - 1, "%s\n", ">");
+ else
+ p += sprintf(p, "%s\n", ">");
+ } else {
+ too_much_data = 1;
+ len -= i;
+ }
+
+ nbuf = strlen(tmpbuf);
+ p = tmpbuf;
+
+ /*
+ * Loop while data remains.
+ */
+ while (nbuf > 0 && ch->ch_sniff_buf != 0) {
+ /*
+ * Determine the amount of available space left in the
+ * buffer. If there's none, wait until some appears.
+ */
+ n = (ch->ch_sniff_out - ch->ch_sniff_in - 1) & SNIFF_MASK;
+
+ /*
+ * If there is no space left to write to in our sniff buffer,
+ * we have no choice but to drop the data.
+ * We *cannot* sleep here waiting for space, because this
+ * function was probably called by the interrupt/timer routines!
+ */
+ if (n == 0) {
+ return;
+ }
+
+ /*
+ * Copy as much data as will fit.
+ */
+
+ if (n > nbuf)
+ n = nbuf;
+
+ r = SNIFF_MAX - ch->ch_sniff_in;
+
+ if (r <= n) {
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, r);
+
+ n -= r;
+ ch->ch_sniff_in = 0;
+ p += r;
+ nbuf -= r;
+ }
+
+ memcpy(ch->ch_sniff_buf + ch->ch_sniff_in, p, n);
+
+ ch->ch_sniff_in += n;
+ p += n;
+ nbuf -= n;
+
+ /*
+ * Wakeup any thread waiting for data
+ */
+ if (ch->ch_sniff_flags & SNIFF_WAIT_DATA) {
+ ch->ch_sniff_flags &= ~SNIFF_WAIT_DATA;
+ wake_up_interruptible(&ch->ch_sniff_wait);
+ }
+ }
+
+ /*
+ * If the user sent us too much data to push into our tmpbuf,
+ * we need to keep looping around on all the data.
+ */
+ if (too_much_data) {
+ p = tmpbuf;
+ tmpbuflen = 0;
+ }
+
+ } while (too_much_data);
+}
+
+
+/*=======================================================================
+ *
+ * dgnc_wmove - Write data to transmit queue.
+ *
+ * ch - Pointer to channel structure.
+ * buf - Poiter to characters to be moved.
+ * n - Number of characters to move.
+ *
+ *=======================================================================*/
+static void dgnc_wmove(struct channel_t *ch, char *buf, uint n)
+{
+ int remain;
+ uint head;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ head = ch->ch_w_head & WQUEUEMASK;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = WQUEUESIZE - head;
+
+ if (n >= remain) {
+ n -= remain;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ head = 0;
+ buf += remain;
+ }
+
+ if (n > 0) {
+ /*
+ * Move rest of data.
+ */
+ remain = n;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ head += remain;
+ }
+
+ head &= WQUEUEMASK;
+ ch->ch_w_head = head;
+}
+
+
+
+
+/*=======================================================================
+ *
+ * dgnc_input - Process received data.
+ *
+ * ch - Pointer to channel structure.
+ *
+ *=======================================================================*/
+void dgnc_input(struct channel_t *ch)
+{
+ struct board_t *bd;
+ struct tty_struct *tp;
+ struct tty_ldisc *ld;
+ uint rmask;
+ ushort head;
+ ushort tail;
+ int data_len;
+ ulong lock_flags;
+ int flip_len;
+ int len = 0;
+ int n = 0;
+ int s = 0;
+ int i = 0;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ tp = ch->ch_tun.un_tty;
+
+ bd = ch->ch_bd;
+ if(!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Figure the number of characters in the buffer.
+ * Exit immediately if none.
+ */
+ rmask = RQUEUEMASK;
+ head = ch->ch_r_head & rmask;
+ tail = ch->ch_r_tail & rmask;
+ data_len = (head - tail) & rmask;
+
+ if (data_len == 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ DPR_READ(("dgnc_input start\n"));
+
+ /*
+ * If the device is not open, or CREAD is off,
+ * flush input data and return immediately.
+ */
+ if (!tp || (tp->magic != TTY_MAGIC) || !(ch->ch_tun.un_flags & UN_ISOPEN) ||
+ !(tp->termios.c_cflag & CREAD) || (ch->ch_tun.un_flags & UN_CLOSING)) {
+
+ DPR_READ(("input. dropping %d bytes on port %d...\n", data_len, ch->ch_portnum));
+ DPR_READ(("input. tp: %p tp->magic: %x MAGIC:%x ch flags: %x\n",
+ tp, tp ? tp->magic : 0, TTY_MAGIC, ch->ch_tun.un_flags));
+
+ ch->ch_r_head = tail;
+
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /*
+ * If we are throttled, simply don't read any data.
+ */
+ if (ch->ch_flags & CH_FORCED_STOPI) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_READ(("Port %d throttled, not reading any data. head: %x tail: %x\n",
+ ch->ch_portnum, head, tail));
+ return;
+ }
+
+ DPR_READ(("dgnc_input start 2\n"));
+
+ flip_len = TTY_FLIPBUF_SIZE;
+
+ /* Chop down the length, if needed */
+ len = min(data_len, flip_len);
+ len = min(len, (N_TTY_BUF_SIZE - 1));
+
+ ld = tty_ldisc_ref(tp);
+
+#ifdef TTY_DONT_FLIP
+ /*
+ * If the DONT_FLIP flag is on, don't flush our buffer, and act
+ * like the ld doesn't have any space to put the data right now.
+ */
+ if (test_bit(TTY_DONT_FLIP, &tp->flags))
+ len = 0;
+#endif
+
+ /*
+ * If we were unable to get a reference to the ld,
+ * don't flush our buffer, and act like the ld doesn't
+ * have any space to put the data right now.
+ */
+ if (!ld) {
+ len = 0;
+ } else {
+ /*
+ * If ld doesn't have a pointer to a receive_buf function,
+ * flush the data, then act like the ld doesn't have any
+ * space to put the data right now.
+ */
+ if (!ld->ops->receive_buf) {
+ ch->ch_r_head = ch->ch_r_tail;
+ len = 0;
+ }
+ }
+
+ if (len <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (ld)
+ tty_ldisc_deref(ld);
+ return;
+ }
+
+ /*
+ * The tty layer in the kernel has changed in 2.6.16+.
+ *
+ * The flip buffers in the tty structure are no longer exposed,
+ * and probably will be going away eventually.
+ *
+ * If we are completely raw, we don't need to go through a lot
+ * of the tty layers that exist.
+ * In this case, we take the shortest and fastest route we
+ * can to relay the data to the user.
+ *
+ * On the other hand, if we are not raw, we need to go through
+ * the new 2.6.16+ tty layer, which has its API more well defined.
+ */
+ len = tty_buffer_request_room(tp->port, len);
+ n = len;
+
+ /*
+ * n now contains the most amount of data we can copy,
+ * bounded either by how much the Linux tty layer can handle,
+ * or the amount of data the card actually has pending...
+ */
+ while (n) {
+ s = ((head >= tail) ? head : RQUEUESIZE) - tail;
+ s = min(s, n);
+
+ if (s <= 0)
+ break;
+
+ /*
+ * If conditions are such that ld needs to see all
+ * UART errors, we will have to walk each character
+ * and error byte and send them to the buffer one at
+ * a time.
+ */
+ if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
+ for (i = 0; i < s; i++) {
+ if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_BREAK);
+ else if (*(ch->ch_equeue + tail + i) & UART_LSR_PE)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_PARITY);
+ else if (*(ch->ch_equeue + tail + i) & UART_LSR_FE)
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_FRAME);
+ else
+ tty_insert_flip_char(tp->port, *(ch->ch_rqueue + tail + i), TTY_NORMAL);
+ }
+ }
+ else {
+ tty_insert_flip_string(tp->port, ch->ch_rqueue + tail, s);
+ }
+
+ dgnc_sniff_nowait_nolock(ch, "USER READ", ch->ch_rqueue + tail, s);
+
+ tail += s;
+ n -= s;
+ /* Flip queue if needed */
+ tail &= rmask;
+ }
+
+ ch->ch_r_tail = tail & rmask;
+ ch->ch_e_tail = tail & rmask;
+ dgnc_check_queue_flow_control(ch);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /* Tell the tty layer its okay to "eat" the data now */
+ tty_flip_buffer_push(tp->port);
+
+ if (ld)
+ tty_ldisc_deref(ld);
+
+ DPR_READ(("dgnc_input - finish\n"));
+}
+
+
+/************************************************************************
+ * Determines when CARRIER changes state and takes appropriate
+ * action.
+ ************************************************************************/
+void dgnc_carrier(struct channel_t *ch)
+{
+ struct board_t *bd;
+
+ int virt_carrier = 0;
+ int phys_carrier = 0;
+
+ DPR_CARR(("dgnc_carrier called...\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ if (ch->ch_mistat & UART_MSR_DCD) {
+ DPR_CARR(("mistat: %x D_CD: %x\n", ch->ch_mistat, ch->ch_mistat & UART_MSR_DCD));
+ phys_carrier = 1;
+ }
+
+ if (ch->ch_digi.digi_flags & DIGI_FORCEDCD) {
+ virt_carrier = 1;
+ }
+
+ if (ch->ch_c_cflag & CLOCAL) {
+ virt_carrier = 1;
+ }
+
+
+ DPR_CARR(("DCD: physical: %d virt: %d\n", phys_carrier, virt_carrier));
+
+ /*
+ * Test for a VIRTUAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: virt DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL carrier transition to HIGH.
+ */
+ if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
+
+ /*
+ * When carrier rises, wake any threads waiting
+ * for carrier in the open routine.
+ */
+
+ DPR_CARR(("carrier: physical DCD rose\n"));
+
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+ }
+
+ /*
+ * Test for a PHYSICAL transition to low, so long as we aren't
+ * currently ignoring physical transitions (which is what "virtual
+ * carrier" indicates).
+ *
+ * The transition of the virtual carrier to low really doesn't
+ * matter... it really only means "ignore carrier state", not
+ * "make pretend that carrier is there".
+ */
+ if ((virt_carrier == 0) && ((ch->ch_flags & CH_CD) != 0) &&
+ (phys_carrier == 0))
+ {
+
+ /*
+ * When carrier drops:
+ *
+ * Drop carrier on all open units.
+ *
+ * Flush queues, waking up any task waiting in the
+ * line discipline.
+ *
+ * Send a hangup to the control terminal.
+ *
+ * Enable all select calls.
+ */
+ if (waitqueue_active(&(ch->ch_flags_wait)))
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ if (ch->ch_tun.un_open_count > 0) {
+ DPR_CARR(("Sending tty hangup\n"));
+ tty_hangup(ch->ch_tun.un_tty);
+ }
+
+ if (ch->ch_pun.un_open_count > 0) {
+ DPR_CARR(("Sending pr hangup\n"));
+ tty_hangup(ch->ch_pun.un_tty);
+ }
+ }
+
+ /*
+ * Make sure that our cached values reflect the current reality.
+ */
+ if (virt_carrier == 1)
+ ch->ch_flags |= CH_FCAR;
+ else
+ ch->ch_flags &= ~CH_FCAR;
+
+ if (phys_carrier == 1)
+ ch->ch_flags |= CH_CD;
+ else
+ ch->ch_flags &= ~CH_CD;
+}
+
+/*
+ * Assign the custom baud rate to the channel structure
+ */
+static void dgnc_set_custom_speed(struct channel_t *ch, uint newrate)
+{
+ int testdiv;
+ int testrate_high;
+ int testrate_low;
+ int deltahigh;
+ int deltalow;
+
+ if (newrate < 0)
+ newrate = 0;
+
+ /*
+ * Since the divisor is stored in a 16-bit integer, we make sure
+ * we don't allow any rates smaller than a 16-bit integer would allow.
+ * And of course, rates above the dividend won't fly.
+ */
+ if (newrate && newrate < ((ch->ch_bd->bd_dividend / 0xFFFF) + 1))
+ newrate = ((ch->ch_bd->bd_dividend / 0xFFFF) + 1);
+
+ if (newrate && newrate > ch->ch_bd->bd_dividend)
+ newrate = ch->ch_bd->bd_dividend;
+
+ while (newrate > 0) {
+ testdiv = ch->ch_bd->bd_dividend / newrate;
+
+ /*
+ * If we try to figure out what rate the board would use
+ * with the test divisor, it will be either equal or higher
+ * than the requested baud rate. If we then determine the
+ * rate with a divisor one higher, we will get the next lower
+ * supported rate below the requested.
+ */
+ testrate_high = ch->ch_bd->bd_dividend / testdiv;
+ testrate_low = ch->ch_bd->bd_dividend / (testdiv + 1);
+
+ /*
+ * If the rate for the requested divisor is correct, just
+ * use it and be done.
+ */
+ if (testrate_high == newrate )
+ break;
+
+ /*
+ * Otherwise, pick the rate that is closer (i.e. whichever rate
+ * has a smaller delta).
+ */
+ deltahigh = testrate_high - newrate;
+ deltalow = newrate - testrate_low;
+
+ if (deltahigh < deltalow) {
+ newrate = testrate_high;
+ } else {
+ newrate = testrate_low;
+ }
+
+ break;
+ }
+
+ ch->ch_custom_speed = newrate;
+
+ return;
+}
+
+
+void dgnc_check_queue_flow_control(struct channel_t *ch)
+{
+ int qleft = 0;
+
+ /* Store how much space we have left in the queue */
+ if ((qleft = ch->ch_r_tail - ch->ch_r_head - 1) < 0)
+ qleft += RQUEUEMASK + 1;
+
+ /*
+ * Check to see if we should enforce flow control on our queue because
+ * the ld (or user) isn't reading data out of our queue fast enuf.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn off the UART's Receive interrupt.
+ * This will cause the UART's FIFO to back up, and force
+ * the RTS signal to be dropped.
+ * 2) SWFLOW (IXOFF) - Keep trying to send a stop character to
+ * the other side, in hopes it will stop sending data to us.
+ * 3) NONE - Nothing we can do. We will simply drop any extra data
+ * that gets sent into us when the queue fills up.
+ */
+ if (qleft < 256) {
+ /* HWFLOW */
+ if (ch->ch_digi.digi_flags & CTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ if(!(ch->ch_flags & CH_RECEIVER_OFF)) {
+ ch->ch_bd->bd_ops->disable_receiver(ch);
+ ch->ch_flags |= (CH_RECEIVER_OFF);
+ DPR_READ(("Internal queue hit hilevel mark (%d)! Turning off interrupts.\n",
+ qleft));
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF) {
+ if (ch->ch_stops_sent <= MAX_STOPS_SENT) {
+ ch->ch_bd->bd_ops->send_stop_character(ch);
+ ch->ch_stops_sent++;
+ DPR_READ(("Sending stop char! Times sent: %x\n", ch->ch_stops_sent));
+ }
+ }
+ /* No FLOW */
+ else {
+ /* Empty... Can't do anything about the impending overflow... */
+ }
+ }
+
+ /*
+ * Check to see if we should unenforce flow control because
+ * ld (or user) finally read enuf data out of our queue.
+ *
+ * NOTE: This is done based on what the current flow control of the
+ * port is set for.
+ *
+ * 1) HWFLOW (RTS) - Turn back on the UART's Receive interrupt.
+ * This will cause the UART's FIFO to raise RTS back up,
+ * which will allow the other side to start sending data again.
+ * 2) SWFLOW (IXOFF) - Send a start character to
+ * the other side, so it will start sending data to us again.
+ * 3) NONE - Do nothing. Since we didn't do anything to turn off the
+ * other side, we don't need to do anything now.
+ */
+ if (qleft > (RQUEUESIZE / 2)) {
+ /* HWFLOW */
+ if (ch->ch_digi.digi_flags & RTSPACE || ch->ch_c_cflag & CRTSCTS) {
+ if (ch->ch_flags & CH_RECEIVER_OFF) {
+ ch->ch_bd->bd_ops->enable_receiver(ch);
+ ch->ch_flags &= ~(CH_RECEIVER_OFF);
+ DPR_READ(("Internal queue hit lowlevel mark (%d)! Turning on interrupts.\n",
+ qleft));
+ }
+ }
+ /* SWFLOW */
+ else if (ch->ch_c_iflag & IXOFF && ch->ch_stops_sent) {
+ ch->ch_stops_sent = 0;
+ ch->ch_bd->bd_ops->send_start_character(ch);
+ DPR_READ(("Sending start char!\n"));
+ }
+ /* No FLOW */
+ else {
+ /* Nothing needed. */
+ }
+ }
+}
+
+
+void dgnc_wakeup_writes(struct channel_t *ch)
+{
+ int qlen = 0;
+ ulong lock_flags;
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If channel now has space, wake up anyone waiting on the condition.
+ */
+ if ((qlen = ch->ch_w_head - ch->ch_w_tail) < 0)
+ qlen += WQUEUESIZE;
+
+ if (qlen >= (WQUEUESIZE - 256)) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ if (ch->ch_tun.un_flags & UN_ISOPEN) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_tun.un_tty->ldisc->ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_tun.un_tty->ldisc->ops->write_wakeup)(ch->ch_tun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#else
+ if ((ch->ch_tun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_tun.un_tty->ldisc.ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_tun.un_tty->ldisc.ops->write_wakeup)(ch->ch_tun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#endif
+
+ wake_up_interruptible(&ch->ch_tun.un_tty->write_wait);
+
+ /*
+ * If unit is set to wait until empty, check to make sure
+ * the queue AND FIFO are both empty.
+ */
+ if (ch->ch_tun.un_flags & UN_EMPTY) {
+ if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
+ ch->ch_tun.un_flags &= ~(UN_EMPTY);
+
+ /*
+ * If RTS Toggle mode is on, whenever
+ * the queue and UART is empty, keep RTS low.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+ }
+
+ /*
+ * If DTR Toggle mode is on, whenever
+ * the queue and UART is empty, keep DTR low.
+ */
+ if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+ }
+ }
+ }
+
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+
+ if (ch->ch_pun.un_flags & UN_ISOPEN) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)
+ if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_pun.un_tty->ldisc->ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_pun.un_tty->ldisc->ops->write_wakeup)(ch->ch_pun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#else
+ if ((ch->ch_pun.un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
+ ch->ch_pun.un_tty->ldisc.ops->write_wakeup)
+ {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ (ch->ch_pun.un_tty->ldisc.ops->write_wakeup)(ch->ch_pun.un_tty);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+#endif
+
+ wake_up_interruptible(&ch->ch_pun.un_tty->write_wait);
+
+ /*
+ * If unit is set to wait until empty, check to make sure
+ * the queue AND FIFO are both empty.
+ */
+ if (ch->ch_pun.un_flags & UN_EMPTY) {
+ if ((qlen == 0) && (ch->ch_bd->bd_ops->get_uart_bytes_left(ch) == 0)) {
+ ch->ch_pun.un_flags &= ~(UN_EMPTY);
+ }
+ }
+
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+
+/************************************************************************
+ *
+ * TTY Entry points and helper functions
+ *
+ ************************************************************************/
+
+/*
+ * dgnc_tty_open()
+ *
+ */
+static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
+{
+ struct board_t *brd;
+ struct channel_t *ch;
+ struct un_t *un;
+ uint major = 0;
+ uint minor = 0;
+ int rc = 0;
+ ulong lock_flags;
+
+ rc = 0;
+
+ major = MAJOR(tty_devnum(tty));
+ minor = MINOR(tty_devnum(tty));
+
+ if (major > 255) {
+ return -ENXIO;
+ }
+
+ /* Get board pointer from our array of majors we have allocated */
+ brd = dgnc_BoardsByMajor[major];
+ if (!brd) {
+ return -ENXIO;
+ }
+
+ /*
+ * If board is not yet up to a state of READY, go to
+ * sleep waiting for it to happen or they cancel the open.
+ */
+ rc = wait_event_interruptible(brd->state_wait,
+ (brd->state & BOARD_READY));
+
+ if (rc) {
+ return rc;
+ }
+
+ DGNC_LOCK(brd->bd_lock, lock_flags);
+
+ /* If opened device is greater than our number of ports, bail. */
+ if (PORT_NUM(minor) > brd->nasync) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ ch = brd->channels[PORT_NUM(minor)];
+ if (!ch) {
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+ return -ENXIO;
+ }
+
+ /* Drop board lock */
+ DGNC_UNLOCK(brd->bd_lock, lock_flags);
+
+ /* Grab channel lock */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Figure out our type */
+ if (!IS_PRINT(minor)) {
+ un = &brd->channels[PORT_NUM(minor)]->ch_tun;
+ un->un_type = DGNC_SERIAL;
+ }
+ else if (IS_PRINT(minor)) {
+ un = &brd->channels[PORT_NUM(minor)]->ch_pun;
+ un->un_type = DGNC_PRINT;
+ }
+ else {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ DPR_OPEN(("%d Unknown TYPE!\n", __LINE__));
+ return -ENXIO;
+ }
+
+ /*
+ * If the port is still in a previous open, and in a state
+ * where we simply cannot safely keep going, wait until the
+ * state clears.
+ */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = wait_event_interruptible(ch->ch_flags_wait, ((ch->ch_flags & CH_OPENING) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_OPEN(("%d User ctrl c'ed\n", __LINE__));
+ return -EINTR;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_flags_wait to wake us back up.
+ */
+ rc = wait_event_interruptible(ch->ch_flags_wait,
+ (((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING) == 0));
+
+ /* If ret is non-zero, user ctrl-c'ed us */
+ if (rc) {
+ DPR_OPEN(("%d User ctrl c'ed\n", __LINE__));
+ return -EINTR;
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+
+ /* Store our unit into driver_data, so we always have it available. */
+ tty->driver_data = un;
+
+ DPR_OPEN(("Open called. MAJOR: %d MINOR:%d PORT_NUM: %x unit: %p NAME: %s\n",
+ MAJOR(tty_devnum(tty)), MINOR(tty_devnum(tty)), PORT_NUM(minor), un, brd->name));
+
+ DPR_OPEN(("%d: tflag=%x pflag=%x\n", __LINE__, ch->ch_tun.un_flags, ch->ch_pun.un_flags));
+
+ /*
+ * Initialize tty's
+ */
+ if (!(un->un_flags & UN_ISOPEN)) {
+ /* Store important variables. */
+ un->un_tty = tty;
+
+ /* Maybe do something here to the TTY struct as well? */
+ }
+
+
+ /*
+ * Allocate channel buffers for read/write/error.
+ * Set flag, so we don't get trounced on.
+ */
+ ch->ch_flags |= (CH_OPENING);
+
+ /* Drop locks, as malloc with GFP_KERNEL can sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (!ch->ch_rqueue)
+ ch->ch_rqueue = kzalloc(RQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_equeue)
+ ch->ch_equeue = kzalloc(EQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_wqueue)
+ ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_OPENING);
+ wake_up_interruptible(&ch->ch_flags_wait);
+
+ /*
+ * Initialize if neither terminal or printer is open.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
+
+ DPR_OPEN(("dgnc_open: initializing channel in open...\n"));
+
+ /*
+ * Flush input queues.
+ */
+ ch->ch_r_head = ch->ch_r_tail = 0;
+ ch->ch_e_head = ch->ch_e_tail = 0;
+ ch->ch_w_head = ch->ch_w_tail = 0;
+
+ brd->bd_ops->flush_uart_write(ch);
+ brd->bd_ops->flush_uart_read(ch);
+
+ ch->ch_flags = 0;
+ ch->ch_cached_lsr = 0;
+ ch->ch_stop_sending_break = 0;
+ ch->ch_stops_sent = 0;
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ /*
+ * Bring up RTS and DTR...
+ * Also handle RTS or DTR toggle if set.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+
+ /* Tell UART to init itself */
+ brd->bd_ops->uart_init(ch);
+ }
+
+ /*
+ * Run param in case we changed anything
+ */
+ brd->bd_ops->param(tty);
+
+ dgnc_carrier(ch);
+
+ /*
+ * follow protocol for opening port
+ */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = dgnc_block_til_ready(tty, file, ch);
+
+ if (rc) {
+ DPR_OPEN(("dgnc_tty_open returning after dgnc_block_til_ready "
+ "with %d\n", rc));
+ }
+
+ /* No going back now, increment our unit and channel counters */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_open_count++;
+ un->un_open_count++;
+ un->un_flags |= (UN_ISOPEN);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgnc_tty_open finished\n"));
+ return (rc);
+}
+
+
+/*
+ * dgnc_block_til_ready()
+ *
+ * Wait for DCD, if needed.
+ */
+static int dgnc_block_til_ready(struct tty_struct *tty, struct file *file, struct channel_t *ch)
+{
+ int retval = 0;
+ struct un_t *un = NULL;
+ ulong lock_flags;
+ uint old_flags = 0;
+ int sleep_on_un_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC || !file || !ch || ch->magic != DGNC_CHANNEL_MAGIC) {
+ return (-ENXIO);
+ }
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC) {
+ return (-ENXIO);
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - before block.\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_wopen++;
+
+ /* Loop forever */
+ while (1) {
+
+ sleep_on_un_flags = 0;
+
+ /*
+ * If board has failed somehow during our sleep, bail with error.
+ */
+ if (ch->ch_bd->state == BOARD_FAILED) {
+ retval = -ENXIO;
+ break;
+ }
+
+ /* If tty was hung up, break out of loop and set error. */
+ if (tty_hung_up_p(file)) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ /*
+ * If either unit is in the middle of the fragile part of close,
+ * we just cannot touch the channel safely.
+ * Go back to sleep, knowing that when the channel can be
+ * touched safely, the close routine will signal the
+ * ch_wait_flags to wake us back up.
+ */
+ if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_CLOSING)) {
+
+ /*
+ * Our conditions to leave cleanly and happily:
+ * 1) NONBLOCKING on the tty is set.
+ * 2) CLOCAL is set.
+ * 3) DCD (fake or real) is active.
+ */
+
+ if (file->f_flags & O_NONBLOCK) {
+ break;
+ }
+
+ if (tty->flags & (1 << TTY_IO_ERROR)) {
+ retval = -EIO;
+ break;
+ }
+
+ if (ch->ch_flags & CH_CD) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+
+ if (ch->ch_flags & CH_FCAR) {
+ DPR_OPEN(("%d: ch_flags: %x\n", __LINE__, ch->ch_flags));
+ break;
+ }
+ }
+ else {
+ sleep_on_un_flags = 1;
+ }
+
+ /*
+ * If there is a signal pending, the user probably
+ * interrupted (ctrl-c) us.
+ * Leave loop with error set.
+ */
+ if (signal_pending(current)) {
+ DPR_OPEN(("%d: signal pending...\n", __LINE__));
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - blocking.\n"));
+
+ /*
+ * Store the flags before we let go of channel lock
+ */
+ if (sleep_on_un_flags)
+ old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
+ else
+ old_flags = ch->ch_flags;
+
+ /*
+ * Let go of channel lock before calling schedule.
+ * Our poller will get any FEP events and wake us up when DCD
+ * eventually goes active.
+ */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("Going to sleep on %s flags...\n",
+ (sleep_on_un_flags ? "un" : "ch")));
+
+ /*
+ * Wait for something in the flags to change from the current value.
+ */
+ if (sleep_on_un_flags) {
+ retval = wait_event_interruptible(un->un_flags_wait,
+ (old_flags != (ch->ch_tun.un_flags | ch->ch_pun.un_flags)));
+ }
+ else {
+ retval = wait_event_interruptible(ch->ch_flags_wait,
+ (old_flags != ch->ch_flags));
+ }
+
+ DPR_OPEN(("After sleep... retval: %x\n", retval));
+
+ /*
+ * We got woken up for some reason.
+ * Before looping around, grab our channel lock.
+ */
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+
+ ch->ch_wopen--;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_OPEN(("dgnc_block_til_ready - after blocking.\n"));
+
+ if (retval) {
+ DPR_OPEN(("dgnc_block_til_ready - done. error. retval: %x\n", retval));
+ return(retval);
+ }
+
+ DPR_OPEN(("dgnc_block_til_ready - done no error. jiffies: %lu\n", jiffies));
+
+ return(0);
+}
+
+
+/*
+ * dgnc_tty_hangup()
+ *
+ * Hangup the port. Like a close, but don't wait for output to drain.
+ */
+static void dgnc_tty_hangup(struct tty_struct *tty)
+{
+ struct un_t *un;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ DPR_CLOSE(("dgnc_hangup called. ch->ch_open_count: %d un->un_open_count: %d\n",
+ un->un_ch->ch_open_count, un->un_open_count));
+
+ /* flush the transmit queues */
+ dgnc_tty_flush_buffer(tty);
+
+ DPR_CLOSE(("dgnc_hangup finished. ch->ch_open_count: %d un->un_open_count: %d\n",
+ un->un_ch->ch_open_count, un->un_open_count));
+}
+
+
+/*
+ * dgnc_tty_close()
+ *
+ */
+static void dgnc_tty_close(struct tty_struct *tty, struct file *file)
+{
+ struct ktermios *ts;
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+ int rc = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ ts = &tty->termios;
+
+ DPR_CLOSE(("Close called\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Determine if this is the last close or not - and if we agree about
+ * which type of close it is with the Line Discipline
+ */
+ if ((tty->count == 1) && (un->un_open_count != 1)) {
+ /*
+ * Uh, oh. tty->count is 1, which means that the tty
+ * structure will be freed. un_open_count should always
+ * be one in these conditions. If it's greater than
+ * one, we've got real problems, since it means the
+ * serial port won't be shutdown.
+ */
+ APR(("tty->count is 1, un open count is %d\n", un->un_open_count));
+ un->un_open_count = 1;
+ }
+
+ if (--un->un_open_count < 0) {
+ APR(("bad serial port open count of %d\n", un->un_open_count));
+ un->un_open_count = 0;
+ }
+
+ ch->ch_open_count--;
+
+ if (ch->ch_open_count && un->un_open_count) {
+ DPR_CLOSE(("dgnc_tty_close: not last close ch: %d un:%d\n",
+ ch->ch_open_count, un->un_open_count));
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return;
+ }
+
+ /* OK, its the last close on the unit */
+ DPR_CLOSE(("dgnc_tty_close - last close on unit procedures\n"));
+
+ un->un_flags |= UN_CLOSING;
+
+ tty->closing = 1;
+
+
+ /*
+ * Only officially close channel if count is 0 and
+ * DIGI_PRINTER bit is not set.
+ */
+ if ((ch->ch_open_count == 0) && !(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
+
+ ch->ch_flags &= ~(CH_STOPI | CH_FORCED_STOPI);
+
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ /* wait for output to drain */
+ /* This will also return if we take an interrupt */
+
+ DPR_CLOSE(("Calling wait_for_drain\n"));
+ rc = bd->bd_ops->drain(tty, 0);
+
+ DPR_CLOSE(("After calling wait_for_drain\n"));
+
+ if (rc) {
+ DPR_BASIC(("dgnc_tty_close - bad return: %d ", rc));
+ }
+
+ dgnc_tty_flush_buffer(tty);
+ tty_ldisc_flush(tty);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tty->closing = 0;
+
+ /*
+ * If we have HUPCL set, lower DTR and RTS
+ */
+ if (ch->ch_c_cflag & HUPCL) {
+ DPR_CLOSE(("Close. HUPCL set, dropping DTR/RTS\n"));
+
+ /* Drop RTS/DTR */
+ ch->ch_mostat &= ~(UART_MCR_DTR | UART_MCR_RTS);
+ bd->bd_ops->assert_modem_signals(ch);
+
+ /*
+ * Go to sleep to ensure RTS/DTR
+ * have been dropped for modems to see it.
+ */
+ if (ch->ch_close_delay) {
+ DPR_CLOSE(("Close. Sleeping for RTS/DTR drop\n"));
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ dgnc_ms_sleep(ch->ch_close_delay);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ DPR_CLOSE(("Close. After sleeping for RTS/DTR drop\n"));
+ }
+ }
+
+ ch->ch_old_baud = 0;
+
+ /* Turn off UART interrupts for this port */
+ ch->ch_bd->bd_ops->uart_off(ch);
+ }
+ else {
+ /*
+ * turn off print device when closing print device.
+ */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON) ) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+
+ un->un_tty = NULL;
+ un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
+
+ DPR_CLOSE(("Close. Doing wakeups\n"));
+ wake_up_interruptible(&ch->ch_flags_wait);
+ wake_up_interruptible(&un->un_flags_wait);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_BASIC(("dgnc_tty_close - complete\n"));
+}
+
+
+/*
+ * dgnc_tty_chars_in_buffer()
+ *
+ * Return number of characters that have not been transmitted yet.
+ *
+ * This routine is used by the line discipline to determine if there
+ * is data waiting to be transmitted/drained/flushed or not.
+ */
+static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ ushort thead;
+ ushort ttail;
+ uint tmask;
+ uint chars = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = WQUEUEMASK;
+ thead = ch->ch_w_head & tmask;
+ ttail = ch->ch_w_tail & tmask;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (ttail == thead) {
+ chars = 0;
+ } else {
+ if (thead >= ttail)
+ chars = thead - ttail;
+ else
+ chars = thead - ttail + WQUEUESIZE;
+ }
+
+ DPR_WRITE(("dgnc_tty_chars_in_buffer. Port: %x - %d (head: %d tail: %d)\n",
+ ch->ch_portnum, chars, thead, ttail));
+
+ return(chars);
+}
+
+
+/*
+ * dgnc_maxcps_room
+ *
+ * Reduces bytes_available to the max number of characters
+ * that can be sent currently given the maxcps value, and
+ * returns the new bytes_available. This only affects printer
+ * output.
+ */
+static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+
+ if (!tty)
+ return (bytes_available);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (bytes_available);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (bytes_available);
+
+ /*
+ * If its not the Transparent print device, return
+ * the full data amount.
+ */
+ if (un->un_type != DGNC_PRINT)
+ return (bytes_available);
+
+ if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0 ) {
+ int cps_limit = 0;
+ unsigned long current_time = jiffies;
+ unsigned long buffer_time = current_time +
+ (HZ * ch->ch_digi.digi_bufsize) / ch->ch_digi.digi_maxcps;
+
+ if (ch->ch_cpstime < current_time) {
+ /* buffer is empty */
+ ch->ch_cpstime = current_time; /* reset ch_cpstime */
+ cps_limit = ch->ch_digi.digi_bufsize;
+ }
+ else if (ch->ch_cpstime < buffer_time) {
+ /* still room in the buffer */
+ cps_limit = ((buffer_time - ch->ch_cpstime) * ch->ch_digi.digi_maxcps) / HZ;
+ }
+ else {
+ /* no room in the buffer */
+ cps_limit = 0;
+ }
+
+ bytes_available = min(cps_limit, bytes_available);
+ }
+
+ return (bytes_available);
+}
+
+
+/*
+ * dgnc_tty_write_room()
+ *
+ * Return space available in Tx buffer
+ */
+static int dgnc_tty_write_room(struct tty_struct *tty)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ ushort head;
+ ushort tail;
+ ushort tmask;
+ int ret = 0;
+ ulong lock_flags = 0;
+
+ if (tty == NULL || dgnc_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (0);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ tmask = WQUEUEMASK;
+ head = (ch->ch_w_head) & tmask;
+ tail = (ch->ch_w_tail) & tmask;
+
+ if ((ret = tail - head - 1) < 0)
+ ret += WQUEUESIZE;
+
+ /* Limit printer to maxcps */
+ ret = dgnc_maxcps_room(tty, ret);
+
+ /*
+ * If we are printer device, leave space for
+ * possibly both the on and off strings.
+ */
+ if (un->un_type == DGNC_PRINT) {
+ if (!(ch->ch_flags & CH_PRON))
+ ret -= ch->ch_digi.digi_onlen;
+ ret -= ch->ch_digi.digi_offlen;
+ }
+ else {
+ if (ch->ch_flags & CH_PRON)
+ ret -= ch->ch_digi.digi_offlen;
+ }
+
+ if (ret < 0)
+ ret = 0;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_WRITE(("dgnc_tty_write_room - %d tail: %d head: %d\n", ret, tail, head));
+
+ return(ret);
+}
+
+
+/*
+ * dgnc_tty_put_char()
+ *
+ * Put a character into ch->ch_buf
+ *
+ * - used by the line discipline for OPOST processing
+ */
+static int dgnc_tty_put_char(struct tty_struct *tty, unsigned char c)
+{
+ /*
+ * Simply call tty_write.
+ */
+ DPR_WRITE(("dgnc_tty_put_char called\n"));
+ dgnc_tty_write(tty, &c, 1);
+ return 1;
+}
+
+
+/*
+ * dgnc_tty_write()
+ *
+ * Take data from the user or kernel and send it out to the FEP.
+ * In here exists all the Transparent Print magic as well.
+ */
+static int dgnc_tty_write(struct tty_struct *tty,
+ const unsigned char *buf, int count)
+{
+ struct channel_t *ch = NULL;
+ struct un_t *un = NULL;
+ int bufcount = 0, n = 0;
+ int orig_count = 0;
+ ulong lock_flags;
+ ushort head;
+ ushort tail;
+ ushort tmask;
+ uint remain;
+ int from_user = 0;
+
+ if (tty == NULL || dgnc_TmpWriteBuf == NULL)
+ return(0);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return(0);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(0);
+
+ if (!count)
+ return(0);
+
+ DPR_WRITE(("dgnc_tty_write: Port: %x tty=%p user=%d len=%d\n",
+ ch->ch_portnum, tty, from_user, count));
+
+ /*
+ * Store original amount of characters passed in.
+ * This helps to figure out if we should ask the FEP
+ * to send us an event when it has more space available.
+ */
+ orig_count = count;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Get our space available for the channel from the board */
+ tmask = WQUEUEMASK;
+ head = (ch->ch_w_head) & tmask;
+ tail = (ch->ch_w_tail) & tmask;
+
+ if ((bufcount = tail - head - 1) < 0)
+ bufcount += WQUEUESIZE;
+
+ DPR_WRITE(("%d: bufcount: %x count: %x tail: %x head: %x tmask: %x\n",
+ __LINE__, bufcount, count, tail, head, tmask));
+
+ /*
+ * Limit printer output to maxcps overall, with bursts allowed
+ * up to bufsize characters.
+ */
+ bufcount = dgnc_maxcps_room(tty, bufcount);
+
+ /*
+ * Take minimum of what the user wants to send, and the
+ * space available in the FEP buffer.
+ */
+ count = min(count, bufcount);
+
+ /*
+ * Bail if no space left.
+ */
+ if (count <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * Output the printer ON string, if we are in terminal mode, but
+ * need to be in printer mode.
+ */
+ if ((un->un_type == DGNC_PRINT) && !(ch->ch_flags & CH_PRON)) {
+ dgnc_wmove(ch, ch->ch_digi.digi_onstr,
+ (int) ch->ch_digi.digi_onlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags |= CH_PRON;
+ }
+
+ /*
+ * On the other hand, output the printer OFF string, if we are
+ * currently in printer mode, but need to output to the terminal.
+ */
+ if ((un->un_type != DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+
+ /*
+ * If there is nothing left to copy, or I can't handle any more data, leave.
+ */
+ if (count <= 0) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ if (from_user) {
+
+ count = min(count, WRITEBUFLEN);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * If data is coming from user space, copy it into a temporary
+ * buffer so we don't get swapped out while doing the copy to
+ * the board.
+ */
+ /* we're allowed to block if it's from_user */
+ if (down_interruptible(&dgnc_TmpWriteSem)) {
+ return (-EINTR);
+ }
+
+ /*
+ * copy_from_user() returns the number
+ * of bytes that could *NOT* be copied.
+ */
+ count -= copy_from_user(dgnc_TmpWriteBuf, (const uchar __user *) buf, count);
+
+ if (!count) {
+ up(&dgnc_TmpWriteSem);
+ return(-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ buf = dgnc_TmpWriteBuf;
+
+ }
+
+ n = count;
+
+ /*
+ * If the write wraps over the top of the circular buffer,
+ * move the portion up to the wrap point, and reset the
+ * pointers to the bottom.
+ */
+ remain = WQUEUESIZE - head;
+
+ if (n >= remain) {
+ n -= remain;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
+ head = 0;
+ buf += remain;
+ }
+
+ if (n > 0) {
+ /*
+ * Move rest of data.
+ */
+ remain = n;
+ memcpy(ch->ch_wqueue + head, buf, remain);
+ dgnc_sniff_nowait_nolock(ch, "USER WRITE", ch->ch_wqueue + head, remain);
+ head += remain;
+ }
+
+ if (count) {
+ head &= tmask;
+ ch->ch_w_head = head;
+ }
+
+#if 0
+ /*
+ * If this is the print device, and the
+ * printer is still on, we need to turn it
+ * off before going idle.
+ */
+ if (count == orig_count) {
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_flags & CH_PRON)) {
+ head &= tmask;
+ ch->ch_w_head = head;
+ dgnc_wmove(ch, ch->ch_digi.digi_offstr,
+ (int) ch->ch_digi.digi_offlen);
+ head = (ch->ch_w_head) & tmask;
+ ch->ch_flags &= ~CH_PRON;
+ }
+ }
+#endif
+
+ /* Update printer buffer empty time. */
+ if ((un->un_type == DGNC_PRINT) && (ch->ch_digi.digi_maxcps > 0)
+ && (ch->ch_digi.digi_bufsize > 0)) {
+ ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
+ }
+
+ if (from_user) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ up(&dgnc_TmpWriteSem);
+ } else {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ }
+
+ DPR_WRITE(("Write finished - Write %d bytes of %d.\n", count, orig_count));
+
+ if (count) {
+ /*
+ * Channel lock is grabbed and then released
+ * inside this routine.
+ */
+ ch->ch_bd->bd_ops->copy_data_from_queue_to_uart(ch);
+ }
+
+ return (count);
+}
+
+
+/*
+ * Return modem signals to ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmget(struct tty_struct *tty)
+#else
+static int dgnc_tty_tiocmget(struct tty_struct *tty, struct file *file)
+#endif
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ int result = -EIO;
+ uchar mstat = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return result;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return result;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return result;
+
+ DPR_IOCTL(("dgnc_tty_tiocmget start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & UART_MCR_DTR)
+ result |= TIOCM_DTR;
+ if (mstat & UART_MCR_RTS)
+ result |= TIOCM_RTS;
+ if (mstat & UART_MSR_CTS)
+ result |= TIOCM_CTS;
+ if (mstat & UART_MSR_DSR)
+ result |= TIOCM_DSR;
+ if (mstat & UART_MSR_RI)
+ result |= TIOCM_RI;
+ if (mstat & UART_MSR_DCD)
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgnc_tty_tiocmget finish\n"));
+
+ return result;
+}
+
+
+/*
+ * dgnc_tty_tiocmset()
+ *
+ * Set modem signals, called by ld.
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39)
+static int dgnc_tty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
+#else
+static int dgnc_tty_tiocmset(struct tty_struct *tty, struct file *file,
+ unsigned int set, unsigned int clear)
+#endif
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ DPR_IOCTL(("dgnc_tty_tiocmset start\n"));
+
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if (set & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+
+ if (set & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+
+ if (clear & TIOCM_RTS) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (clear & TIOCM_DTR) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_tiocmset finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_send_break()
+ *
+ * Send a Break, called by ld.
+ */
+static int dgnc_tty_send_break(struct tty_struct *tty, int msec)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -EIO;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ switch (msec) {
+ case -1:
+ msec = 0xFFFF;
+ break;
+ case 0:
+ msec = 0;
+ break;
+ default:
+ break;
+ }
+
+ DPR_IOCTL(("dgnc_tty_send_break start 1. %lx\n", jiffies));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, msec);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_send_break finish\n"));
+
+ return (0);
+
+}
+
+
+/*
+ * dgnc_tty_wait_until_sent()
+ *
+ * wait until data has been transmitted, called by ld.
+ */
+static void dgnc_tty_wait_until_sent(struct tty_struct *tty, int timeout)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ rc = bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return;
+ }
+ return;
+}
+
+
+/*
+ * dgnc_send_xchar()
+ *
+ * send a high priority character, called by ld.
+ */
+static void dgnc_tty_send_xchar(struct tty_struct *tty, char c)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_send_xchar start\n"));
+ printk("dgnc_tty_send_xchar start\n");
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ bd->bd_ops->send_immediate_char(ch, c);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_send_xchar finish\n"));
+ printk("dgnc_tty_send_xchar finish\n");
+ return;
+}
+
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static inline int dgnc_get_mstat(struct channel_t *ch)
+{
+ unsigned char mstat;
+ int result = -EIO;
+ ulong lock_flags;
+
+ DPR_IOCTL(("dgnc_getmstat start\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ mstat = (ch->ch_mostat | ch->ch_mistat);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ result = 0;
+
+ if (mstat & UART_MCR_DTR)
+ result |= TIOCM_DTR;
+ if (mstat & UART_MCR_RTS)
+ result |= TIOCM_RTS;
+ if (mstat & UART_MSR_CTS)
+ result |= TIOCM_CTS;
+ if (mstat & UART_MSR_DSR)
+ result |= TIOCM_DSR;
+ if (mstat & UART_MSR_RI)
+ result |= TIOCM_RI;
+ if (mstat & UART_MSR_DCD)
+ result |= TIOCM_CD;
+
+ DPR_IOCTL(("dgnc_getmstat finish\n"));
+
+ return(result);
+}
+
+
+
+/*
+ * Return modem signals to ld.
+ */
+static int dgnc_get_modem_info(struct channel_t *ch, unsigned int __user *value)
+{
+ int result;
+ int rc;
+
+ DPR_IOCTL(("dgnc_get_modem_info start\n"));
+
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return(-ENXIO);
+
+ result = dgnc_get_mstat(ch);
+
+ if (result < 0)
+ return (-ENXIO);
+
+ rc = put_user(result, value);
+
+ DPR_IOCTL(("dgnc_get_modem_info finish\n"));
+ return(rc);
+}
+
+
+/*
+ * dgnc_set_modem_info()
+ *
+ * Set modem signals, called by ld.
+ */
+static int dgnc_set_modem_info(struct tty_struct *tty, unsigned int command, unsigned int __user *value)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int ret = -ENXIO;
+ unsigned int arg = 0;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return ret;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return ret;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return ret;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return ret;
+
+ ret = 0;
+
+ DPR_IOCTL(("dgnc_set_modem_info() start\n"));
+
+ ret = get_user(arg, value);
+ if (ret)
+ return(ret);
+
+ switch (command) {
+ case TIOCMBIS:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+
+ break;
+
+ case TIOCMBIC:
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ break;
+
+ case TIOCMSET:
+
+ if (arg & TIOCM_RTS) {
+ ch->ch_mostat |= UART_MCR_RTS;
+ }
+ else {
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ }
+
+ if (arg & TIOCM_DTR) {
+ ch->ch_mostat |= UART_MCR_DTR;
+ }
+ else {
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ }
+
+ break;
+
+ default:
+ return(-EINVAL);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->assert_modem_signals(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_set_modem_info finish\n"));
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_digigeta()
+ *
+ * Ioctl to get the information for ditty.
+ *
+ *
+ *
+ */
+static int dgnc_tty_digigeta(struct tty_struct *tty, struct digi_t __user *retinfo)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t tmp;
+ ulong lock_flags;
+
+ if (!retinfo)
+ return (-EFAULT);
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ memset(&tmp, 0, sizeof(tmp));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
+ return (-EFAULT);
+
+ return (0);
+}
+
+
+/*
+ * dgnc_tty_digiseta()
+ *
+ * Ioctl to set the information for ditty.
+ *
+ *
+ *
+ */
+static int dgnc_tty_digiseta(struct tty_struct *tty, struct digi_t __user *new_info)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ struct digi_t new_digi;
+ ulong lock_flags;
+
+ DPR_IOCTL(("DIGI_SETA start\n"));
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-EFAULT);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-EFAULT);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-EFAULT);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (-EFAULT);
+
+ if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t))) {
+ DPR_IOCTL(("DIGI_SETA failed copy_from_user\n"));
+ return(-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Handle transistions to and from RTS Toggle.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && (new_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat &= ~(UART_MCR_RTS);
+ if ((ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE) && !(new_digi.digi_flags & DIGI_RTS_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_RTS);
+
+ /*
+ * Handle transistions to and from DTR Toggle.
+ */
+ if (!(ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && (new_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat &= ~(UART_MCR_DTR);
+ if ((ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE) && !(new_digi.digi_flags & DIGI_DTR_TOGGLE))
+ ch->ch_mostat |= (UART_MCR_DTR);
+
+ memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
+
+ if (ch->ch_digi.digi_maxcps < 1)
+ ch->ch_digi.digi_maxcps = 1;
+
+ if (ch->ch_digi.digi_maxcps > 10000)
+ ch->ch_digi.digi_maxcps = 10000;
+
+ if (ch->ch_digi.digi_bufsize < 10)
+ ch->ch_digi.digi_bufsize = 10;
+
+ if (ch->ch_digi.digi_maxchar < 1)
+ ch->ch_digi.digi_maxchar = 1;
+
+ if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
+ ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
+
+ if (ch->ch_digi.digi_onlen > DIGI_PLEN)
+ ch->ch_digi.digi_onlen = DIGI_PLEN;
+
+ if (ch->ch_digi.digi_offlen > DIGI_PLEN)
+ ch->ch_digi.digi_offlen = DIGI_PLEN;
+
+ ch->ch_bd->bd_ops->param(tty);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("DIGI_SETA finish\n"));
+
+ return(0);
+}
+
+
+/*
+ * dgnc_set_termios()
+ */
+static void dgnc_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ unsigned long lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_c_cflag = tty->termios.c_cflag;
+ ch->ch_c_iflag = tty->termios.c_iflag;
+ ch->ch_c_oflag = tty->termios.c_oflag;
+ ch->ch_c_lflag = tty->termios.c_lflag;
+ ch->ch_startc = tty->termios.c_cc[VSTART];
+ ch->ch_stopc = tty->termios.c_cc[VSTOP];
+
+ ch->ch_bd->bd_ops->param(tty);
+ dgnc_carrier(ch);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+}
+
+
+static void dgnc_tty_throttle(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags = 0;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_throttle start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags |= (CH_FORCED_STOPI);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_throttle finish\n"));
+}
+
+
+static void dgnc_tty_unthrottle(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_unthrottle start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_FORCED_STOPI);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_unthrottle finish\n"));
+}
+
+
+static void dgnc_tty_start(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgcn_tty_start start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~(CH_FORCED_STOP);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_start finish\n"));
+}
+
+
+static void dgnc_tty_stop(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_stop start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags |= (CH_FORCED_STOP);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_stop finish\n"));
+}
+
+
+/*
+ * dgnc_tty_flush_chars()
+ *
+ * Flush the cook buffer
+ *
+ * Note to self, and any other poor souls who venture here:
+ *
+ * flush in this case DOES NOT mean dispose of the data.
+ * instead, it means "stop buffering and send it if you
+ * haven't already." Just guess how I figured that out... SRW 2-Jun-98
+ *
+ * It is also always called in interrupt context - JAR 8-Sept-99
+ */
+static void dgnc_tty_flush_chars(struct tty_struct *tty)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_flush_chars start\n"));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Do something maybe here */
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_flush_chars finish\n"));
+}
+
+
+
+/*
+ * dgnc_tty_flush_buffer()
+ *
+ * Flush Tx buffer (make in == out)
+ */
+static void dgnc_tty_flush_buffer(struct tty_struct *tty)
+{
+ struct channel_t *ch;
+ struct un_t *un;
+ ulong lock_flags;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return;
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return;
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return;
+
+ DPR_IOCTL(("dgnc_tty_flush_buffer on port: %d start\n", ch->ch_portnum));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_flags &= ~CH_STOP;
+
+ /* Flush our write queue */
+ ch->ch_w_head = ch->ch_w_tail;
+
+ /* Flush UARTs transmit FIFO */
+ ch->ch_bd->bd_ops->flush_uart_write(ch);
+
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_flush_buffer finish\n"));
+}
+
+
+
+/*****************************************************************************
+ *
+ * The IOCTL function and all of its helpers
+ *
+ *****************************************************************************/
+
+/*
+ * dgnc_tty_ioctl()
+ *
+ * The usual assortment of ioctl's
+ */
+static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
+ unsigned long arg)
+{
+ struct board_t *bd;
+ struct channel_t *ch;
+ struct un_t *un;
+ int rc;
+ ulong lock_flags;
+ void __user *uarg = (void __user *) arg;
+
+ if (!tty || tty->magic != TTY_MAGIC)
+ return (-ENODEV);
+
+ un = tty->driver_data;
+ if (!un || un->magic != DGNC_UNIT_MAGIC)
+ return (-ENODEV);
+
+ ch = un->un_ch;
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
+ return (-ENODEV);
+
+ bd = ch->ch_bd;
+ if (!bd || bd->magic != DGNC_BOARD_MAGIC)
+ return (-ENODEV);
+
+ DPR_IOCTL(("dgnc_tty_ioctl start on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if (un->un_open_count <= 0) {
+ DPR_BASIC(("dgnc_tty_ioctl - unit not open.\n"));
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(-EIO);
+ }
+
+ switch (cmd) {
+
+ /* Here are all the standard ioctl's that we MUST implement */
+
+ case TCSBRK:
+ /*
+ * TCSBRK is SVID version: non-zero arg --> no break
+ * this behaviour is exploited by tcdrain().
+ *
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ if(((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP)) {
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+
+ case TCSBRKP:
+ /* support for POSIX tcsendbreak()
+ * According to POSIX.1 spec (7.2.2.1.2) breaks should be
+ * between 0.25 and 0.5 seconds so we'll ask for something
+ * in the middle: 0.375 seconds.
+ */
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCSBRK:
+ rc = tty_check_change(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ if (rc) {
+ return(rc);
+ }
+
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ ch->ch_bd->bd_ops->send_break(ch, 250);
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+
+ case TIOCCBRK:
+ /* Do Nothing */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return 0;
+
+ case TIOCGSOFTCAR:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ rc = put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) arg);
+ return(rc);
+
+ case TIOCSSOFTCAR:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(arg, (unsigned long __user *) arg);
+ if (rc)
+ return(rc);
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ return(0);
+
+ case TIOCMGET:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_get_modem_info(ch, uarg));
+
+ case TIOCMBIS:
+ case TIOCMBIC:
+ case TIOCMSET:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_set_modem_info(tty, cmd, uarg));
+
+ /*
+ * Here are any additional ioctl's that we want to implement
+ */
+
+ case TCFLSH:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ rc = tty_check_change(tty);
+ if (rc) {
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(rc);
+ }
+
+ if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
+ ch->ch_r_head = ch->ch_r_tail;
+ ch->ch_bd->bd_ops->flush_uart_read(ch);
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
+ if (!(un->un_type == DGNC_PRINT)) {
+ ch->ch_w_head = ch->ch_w_tail;
+ ch->ch_bd->bd_ops->flush_uart_write(ch);
+
+ if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_tun.un_flags_wait);
+ }
+
+ if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
+ ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
+ wake_up_interruptible(&ch->ch_pun.un_flags_wait);
+ }
+
+ }
+ }
+
+ /* pretend we didn't recognize this IOCTL */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(-ENOIOCTLCMD);
+ case TCSETSF:
+ case TCSETSW:
+ /*
+ * The linux tty driver doesn't have a flush
+ * input routine for the driver, assuming all backed
+ * up data is in the line disc. buffers. However,
+ * we all know that's not the case. Here, we
+ * act on the ioctl, but then lie and say we didn't
+ * so the line discipline will process the flush
+ * also.
+ */
+ if (cmd == TCSETSF) {
+ /* flush rx */
+ ch->ch_flags &= ~CH_STOP;
+ ch->ch_r_head = ch->ch_r_tail;
+ ch->ch_bd->bd_ops->flush_uart_read(ch);
+ /* Force queue flow control to be released, if needed */
+ dgnc_check_queue_flow_control(ch);
+ }
+
+ /* now wait for all the output to drain */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d\n", rc));
+ return(-EINTR);
+ }
+
+ DPR_IOCTL(("dgnc_tty_ioctl finish on port %d - cmd %s (%x), arg %lx\n",
+ ch->ch_portnum, dgnc_ioctl_name(cmd), cmd, arg));
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCSETAW:
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+
+ /* pretend we didn't recognize this */
+ return(-ENOIOCTLCMD);
+
+ case TCXONC:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ /* Make the ld do it */
+ return(-ENOIOCTLCMD);
+
+ case DIGI_GETA:
+ /* get information for ditty */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_tty_digigeta(tty, uarg));
+
+ case DIGI_SETAW:
+ case DIGI_SETAF:
+
+ /* set information for ditty */
+ if (cmd == (DIGI_SETAW)) {
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ if (rc) {
+ DPR_IOCTL(("dgnc_tty_ioctl - bad return: %d ", rc));
+ return(-EINTR);
+ }
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ }
+ else {
+ tty_ldisc_flush(tty);
+ }
+ /* fall thru */
+
+ case DIGI_SETA:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(dgnc_tty_digiseta(tty, uarg));
+
+ case DIGI_LOOPBACK:
+ {
+ uint loopback = 0;
+ /* Let go of locks when accessing user space, could sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(loopback, (unsigned int __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /* Enable/disable internal loopback for this port */
+ if (loopback)
+ ch->ch_flags |= CH_LOOPBACK;
+ else
+ ch->ch_flags &= ~(CH_LOOPBACK);
+
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ case DIGI_GETCUSTOMBAUD:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = put_user(ch->ch_custom_speed, (unsigned int __user *) arg);
+ return(rc);
+
+ case DIGI_SETCUSTOMBAUD:
+ {
+ uint new_rate;
+ /* Let go of locks when accessing user space, could sleep */
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(new_rate, (unsigned int __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ dgnc_set_custom_speed(ch, new_rate);
+ ch->ch_bd->bd_ops->param(tty);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * This ioctl allows insertion of a character into the front
+ * of any pending data to be transmitted.
+ *
+ * This ioctl is to satify the "Send Character Immediate"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_SENDIMMEDIATE:
+ {
+ unsigned char c;
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = get_user(c, (unsigned char __user *) arg);
+ if (rc)
+ return(rc);
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+ ch->ch_bd->bd_ops->send_immediate_char(ch, c);
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ return(0);
+ }
+
+ /*
+ * This ioctl returns all the current counts for the port.
+ *
+ * This ioctl is to satify the "Line Error Counters"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_GETCOUNTERS:
+ {
+ struct digi_getcounter buf;
+
+ buf.norun = ch->ch_err_overrun;
+ buf.noflow = 0; /* The driver doesn't keep this stat */
+ buf.nframe = ch->ch_err_frame;
+ buf.nparity = ch->ch_err_parity;
+ buf.nbreak = ch->ch_err_break;
+ buf.rbytes = ch->ch_rxcount;
+ buf.tbytes = ch->ch_txcount;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &buf, sizeof(struct digi_getcounter))) {
+ return (-EFAULT);
+ }
+ return(0);
+ }
+
+ /*
+ * This ioctl returns all current events.
+ *
+ * This ioctl is to satify the "Event Reporting"
+ * call that the RealPort protocol spec requires.
+ */
+ case DIGI_REALPORT_GETEVENTS:
+ {
+ unsigned int events = 0;
+
+ /* NOTE: MORE EVENTS NEEDS TO BE ADDED HERE */
+ if (ch->ch_flags & CH_BREAK_SENDING)
+ events |= EV_TXB;
+ if ((ch->ch_flags & CH_STOP) || (ch->ch_flags & CH_FORCED_STOP)) {
+ events |= (EV_OPU | EV_OPS);
+ }
+ if ((ch->ch_flags & CH_STOPI) || (ch->ch_flags & CH_FORCED_STOPI)) {
+ events |= (EV_IPU | EV_IPS);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+ rc = put_user(events, (unsigned int __user *) arg);
+ return(rc);
+ }
+
+ /*
+ * This ioctl returns TOUT and TIN counters based
+ * upon the values passed in by the RealPort Server.
+ * It also passes back whether the UART Transmitter is
+ * empty as well.
+ */
+ case DIGI_REALPORT_GETBUFFERS:
+ {
+ struct digi_getbuffer buf;
+ int tdist;
+ int count;
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Get data from user first.
+ */
+ if (copy_from_user(&buf, uarg, sizeof(struct digi_getbuffer))) {
+ return (-EFAULT);
+ }
+
+ DGNC_LOCK(ch->ch_lock, lock_flags);
+
+ /*
+ * Figure out how much data is in our RX and TX queues.
+ */
+ buf.rxbuf = (ch->ch_r_head - ch->ch_r_tail) & RQUEUEMASK;
+ buf.txbuf = (ch->ch_w_head - ch->ch_w_tail) & WQUEUEMASK;
+
+ /*
+ * Is the UART empty? Add that value to whats in our TX queue.
+ */
+ count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
+
+ /*
+ * Figure out how much data the RealPort Server believes should
+ * be in our TX queue.
+ */
+ tdist = (buf.tIn - buf.tOut) & 0xffff;
+
+ /*
+ * If we have more data than the RealPort Server believes we
+ * should have, reduce our count to its amount.
+ *
+ * This count difference CAN happen because the Linux LD can
+ * insert more characters into our queue for OPOST processing
+ * that the RealPort Server doesn't know about.
+ */
+ if (buf.txbuf > tdist) {
+ buf.txbuf = tdist;
+ }
+
+ /*
+ * Report whether our queue and UART TX are completely empty.
+ */
+ if (count) {
+ buf.txdone = 0;
+ } else {
+ buf.txdone = 1;
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ if (copy_to_user(uarg, &buf, sizeof(struct digi_getbuffer))) {
+ return (-EFAULT);
+ }
+ return(0);
+ }
+ default:
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl - in default\n"));
+ DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(-ENOIOCTLCMD);
+ }
+
+ DGNC_UNLOCK(ch->ch_lock, lock_flags);
+
+ DPR_IOCTL(("dgnc_tty_ioctl end - cmd %s (%x), arg %lx\n",
+ dgnc_ioctl_name(cmd), cmd, arg));
+
+ return(0);
+}
diff --git a/drivers/staging/dgnc/dgnc_tty.h b/drivers/staging/dgnc/dgnc_tty.h
new file mode 100644
index 00000000000..deb388d2f4c
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_tty.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_TTY_H
+#define __DGNC_TTY_H
+
+#include "dgnc_driver.h"
+
+int dgnc_tty_register(struct board_t *brd);
+
+int dgnc_tty_preinit(void);
+int dgnc_tty_init(struct board_t *);
+
+void dgnc_tty_post_uninit(void);
+void dgnc_tty_uninit(struct board_t *);
+
+void dgnc_input(struct channel_t *ch);
+void dgnc_carrier(struct channel_t *ch);
+void dgnc_wakeup_writes(struct channel_t *ch);
+void dgnc_check_queue_flow_control(struct channel_t *ch);
+
+void dgnc_sniff_nowait_nolock(struct channel_t *ch, uchar *text, uchar *buf, int nbuf);
+
+#endif
diff --git a/drivers/staging/dgnc/dgnc_types.h b/drivers/staging/dgnc/dgnc_types.h
new file mode 100644
index 00000000000..4fa358535f8
--- /dev/null
+++ b/drivers/staging/dgnc/dgnc_types.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DGNC_TYPES_H
+#define __DGNC_TYPES_H
+
+#ifndef TRUE
+# define TRUE 1
+#endif
+
+#ifndef FALSE
+# define FALSE 0
+#endif
+
+/* Required for our shared headers! */
+typedef unsigned char uchar;
+
+#endif
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
new file mode 100644
index 00000000000..eb6e3712572
--- /dev/null
+++ b/drivers/staging/dgnc/digi.h
@@ -0,0 +1,416 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+#ifndef __DIGI_H
+#define __DIGI_H
+
+/************************************************************************
+ *** Definitions for Digi ditty(1) command.
+ ************************************************************************/
+
+
+/*
+ * Copyright (c) 1988-96 Digi International Inc., All Rights Reserved.
+ */
+
+/************************************************************************
+ * This module provides application access to special Digi
+ * serial line enhancements which are not standard UNIX(tm) features.
+ ************************************************************************/
+
+#if !defined(TIOCMODG)
+
+#define TIOCMODG ('d'<<8) | 250 /* get modem ctrl state */
+#define TIOCMODS ('d'<<8) | 251 /* set modem ctrl state */
+
+#ifndef TIOCM_LE
+#define TIOCM_LE 0x01 /* line enable */
+#define TIOCM_DTR 0x02 /* data terminal ready */
+#define TIOCM_RTS 0x04 /* request to send */
+#define TIOCM_ST 0x08 /* secondary transmit */
+#define TIOCM_SR 0x10 /* secondary receive */
+#define TIOCM_CTS 0x20 /* clear to send */
+#define TIOCM_CAR 0x40 /* carrier detect */
+#define TIOCM_RNG 0x80 /* ring indicator */
+#define TIOCM_DSR 0x100 /* data set ready */
+#define TIOCM_RI TIOCM_RNG /* ring (alternate) */
+#define TIOCM_CD TIOCM_CAR /* carrier detect (alt) */
+#endif
+
+#endif
+
+#if !defined(TIOCMSET)
+#define TIOCMSET ('d'<<8) | 252 /* set modem ctrl state */
+#define TIOCMGET ('d'<<8) | 253 /* set modem ctrl state */
+#endif
+
+#if !defined(TIOCMBIC)
+#define TIOCMBIC ('d'<<8) | 254 /* set modem ctrl state */
+#define TIOCMBIS ('d'<<8) | 255 /* set modem ctrl state */
+#endif
+
+
+#if !defined(TIOCSDTR)
+#define TIOCSDTR ('e'<<8) | 0 /* set DTR */
+#define TIOCCDTR ('e'<<8) | 1 /* clear DTR */
+#endif
+
+/************************************************************************
+ * Ioctl command arguments for DIGI parameters.
+ ************************************************************************/
+#define DIGI_GETA ('e'<<8) | 94 /* Read params */
+
+#define DIGI_SETA ('e'<<8) | 95 /* Set params */
+#define DIGI_SETAW ('e'<<8) | 96 /* Drain & set params */
+#define DIGI_SETAF ('e'<<8) | 97 /* Drain, flush & set params */
+
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+ /* Adapter Memory */
+
+#define DIGI_GETFLOW ('e'<<8) | 99 /* Get startc/stopc flow */
+ /* control characters */
+#define DIGI_SETFLOW ('e'<<8) | 100 /* Set startc/stopc flow */
+ /* control characters */
+#define DIGI_GETAFLOW ('e'<<8) | 101 /* Get Aux. startc/stopc */
+ /* flow control chars */
+#define DIGI_SETAFLOW ('e'<<8) | 102 /* Set Aux. startc/stopc */
+ /* flow control chars */
+
+#define DIGI_GEDELAY ('d'<<8) | 246 /* Get edelay */
+#define DIGI_SEDELAY ('d'<<8) | 247 /* Set edelay */
+
+struct digiflow_t {
+ unsigned char startc; /* flow cntl start char */
+ unsigned char stopc; /* flow cntl stop char */
+};
+
+
+#ifdef FLOW_2200
+#define F2200_GETA ('e'<<8) | 104 /* Get 2x36 flow cntl flags */
+#define F2200_SETAW ('e'<<8) | 105 /* Set 2x36 flow cntl flags */
+#define F2200_MASK 0x03 /* 2200 flow cntl bit mask */
+#define FCNTL_2200 0x01 /* 2x36 terminal flow cntl */
+#define PCNTL_2200 0x02 /* 2x36 printer flow cntl */
+#define F2200_XON 0xf8
+#define P2200_XON 0xf9
+#define F2200_XOFF 0xfa
+#define P2200_XOFF 0xfb
+
+#define FXOFF_MASK 0x03 /* 2200 flow status mask */
+#define RCVD_FXOFF 0x01 /* 2x36 Terminal XOFF rcvd */
+#define RCVD_PXOFF 0x02 /* 2x36 Printer XOFF rcvd */
+#endif
+
+/************************************************************************
+ * Values for digi_flags
+ ************************************************************************/
+#define DIGI_IXON 0x0001 /* Handle IXON in the FEP */
+#define DIGI_FAST 0x0002 /* Fast baud rates */
+#define RTSPACE 0x0004 /* RTS input flow control */
+#define CTSPACE 0x0008 /* CTS output flow control */
+#define DSRPACE 0x0010 /* DSR output flow control */
+#define DCDPACE 0x0020 /* DCD output flow control */
+#define DTRPACE 0x0040 /* DTR input flow control */
+#define DIGI_COOK 0x0080 /* Cooked processing done in FEP */
+#define DIGI_FORCEDCD 0x0100 /* Force carrier */
+#define DIGI_ALTPIN 0x0200 /* Alternate RJ-45 pin config */
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+#define DIGI_PRINTER 0x0800 /* Hold port open for flow cntrl*/
+#define DIGI_PP_INPUT 0x1000 /* Change parallel port to input*/
+#define DIGI_DTR_TOGGLE 0x2000 /* Support DTR Toggle */
+#define DIGI_422 0x4000 /* for 422/232 selectable panel */
+#define DIGI_RTS_TOGGLE 0x8000 /* Support RTS Toggle */
+
+/************************************************************************
+ * These options are not supported on the comxi.
+ ************************************************************************/
+#define DIGI_COMXI (DIGI_FAST|DIGI_COOK|DSRPACE|DCDPACE|DTRPACE)
+
+#define DIGI_PLEN 28 /* String length */
+#define DIGI_TSIZ 10 /* Terminal string len */
+
+/************************************************************************
+ * Structure used with ioctl commands for DIGI parameters.
+ ************************************************************************/
+struct digi_t {
+ unsigned short digi_flags; /* Flags (see above) */
+ unsigned short digi_maxcps; /* Max printer CPS */
+ unsigned short digi_maxchar; /* Max chars in print queue */
+ unsigned short digi_bufsize; /* Buffer size */
+ unsigned char digi_onlen; /* Length of ON string */
+ unsigned char digi_offlen; /* Length of OFF string */
+ char digi_onstr[DIGI_PLEN]; /* Printer on string */
+ char digi_offstr[DIGI_PLEN]; /* Printer off string */
+ char digi_term[DIGI_TSIZ]; /* terminal string */
+};
+
+/************************************************************************
+ * KME definitions and structures.
+ ************************************************************************/
+#define RW_IDLE 0 /* Operation complete */
+#define RW_READ 1 /* Read Concentrator Memory */
+#define RW_WRITE 2 /* Write Concentrator Memory */
+
+struct rw_t {
+ unsigned char rw_req; /* Request type */
+ unsigned char rw_board; /* Host Adapter board number */
+ unsigned char rw_conc; /* Concentrator number */
+ unsigned char rw_reserved; /* Reserved for expansion */
+ unsigned int rw_addr; /* Address in concentrator */
+ unsigned short rw_size; /* Read/write request length */
+ unsigned char rw_data[128]; /* Data to read/write */
+};
+
+/***********************************************************************
+ * Shrink Buffer and Board Information definitions and structures.
+
+ ************************************************************************/
+ /* Board type return codes */
+#define PCXI_TYPE 1 /* Board type at the designated port is a PC/Xi */
+#define PCXM_TYPE 2 /* Board type at the designated port is a PC/Xm */
+#define PCXE_TYPE 3 /* Board type at the designated port is a PC/Xe */
+#define MCXI_TYPE 4 /* Board type at the designated port is a MC/Xi */
+#define COMXI_TYPE 5 /* Board type at the designated port is a COM/Xi */
+
+ /* Non-Zero Result codes. */
+#define RESULT_NOBDFND 1 /* A Digi product at that port is not config installed */
+#define RESULT_NODESCT 2 /* A memory descriptor was not obtainable */
+#define RESULT_NOOSSIG 3 /* FEP/OS signature was not detected on the board */
+#define RESULT_TOOSML 4 /* Too small an area to shrink. */
+#define RESULT_NOCHAN 5 /* Channel structure for the board was not found */
+
+struct shrink_buf_struct {
+ unsigned int shrink_buf_vaddr; /* Virtual address of board */
+ unsigned int shrink_buf_phys; /* Physical address of board */
+ unsigned int shrink_buf_bseg; /* Amount of board memory */
+ unsigned int shrink_buf_hseg; /* '186 Begining of Dual-Port */
+
+ unsigned int shrink_buf_lseg; /* '186 Begining of freed memory */
+ unsigned int shrink_buf_mseg; /* Linear address from start of
+ dual-port were freed memory
+ begins, host viewpoint. */
+
+ unsigned int shrink_buf_bdparam; /* Parameter for xxmemon and
+ xxmemoff */
+
+ unsigned int shrink_buf_reserva; /* Reserved */
+ unsigned int shrink_buf_reservb; /* Reserved */
+ unsigned int shrink_buf_reservc; /* Reserved */
+ unsigned int shrink_buf_reservd; /* Reserved */
+
+ unsigned char shrink_buf_result; /* Reason for call failing
+ Zero is Good return */
+ unsigned char shrink_buf_init; /* Non-Zero if it caused an
+ xxinit call. */
+
+ unsigned char shrink_buf_anports; /* Number of async ports */
+ unsigned char shrink_buf_snports; /* Number of sync ports */
+ unsigned char shrink_buf_type; /* Board type 1 = PC/Xi,
+ 2 = PC/Xm,
+ 3 = PC/Xe
+ 4 = MC/Xi
+ 5 = COMX/i */
+ unsigned char shrink_buf_card; /* Card number */
+
+};
+
+/************************************************************************
+ * Structure to get driver status information
+ ************************************************************************/
+struct digi_dinfo {
+ unsigned int dinfo_nboards; /* # boards configured */
+ char dinfo_reserved[12]; /* for future expansion */
+ char dinfo_version[16]; /* driver version */
+};
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+
+/************************************************************************
+ * Structure used with ioctl commands for per-board information
+ *
+ * physsize and memsize differ when board has "windowed" memory
+ ************************************************************************/
+struct digi_info {
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_ioport; /* io port address */
+ unsigned int info_physaddr; /* memory address */
+ unsigned int info_physsize; /* Size of host mem window */
+ unsigned int info_memsize; /* Amount of dual-port mem */
+ /* on board */
+ unsigned short info_bdtype; /* Board type */
+ unsigned short info_nports; /* number of ports */
+ char info_bdstate; /* board state */
+ char info_reserved[7]; /* for future expansion */
+};
+
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+
+struct digi_stat {
+ unsigned int info_chan; /* Channel number (0 based) */
+ unsigned int info_brd; /* Board number (0 based) */
+ unsigned int info_cflag; /* cflag for channel */
+ unsigned int info_iflag; /* iflag for channel */
+ unsigned int info_oflag; /* oflag for channel */
+ unsigned int info_mstat; /* mstat for channel */
+ unsigned int info_tx_data; /* tx_data for channel */
+ unsigned int info_rx_data; /* rx_data for channel */
+ unsigned int info_hflow; /* hflow for channel */
+ unsigned int info_reserved[8]; /* for future expansion */
+};
+
+#define DIGI_GETSTAT ('d'<<8) | 244 /* get board info */
+/************************************************************************
+ *
+ * Structure used with ioctl commands for per-channel information
+ *
+ ************************************************************************/
+struct digi_ch {
+ unsigned int info_bdnum; /* Board number (0 based) */
+ unsigned int info_channel; /* Channel index number */
+ unsigned int info_ch_cflag; /* Channel cflag */
+ unsigned int info_ch_iflag; /* Channel iflag */
+ unsigned int info_ch_oflag; /* Channel oflag */
+ unsigned int info_chsize; /* Channel structure size */
+ unsigned int info_sleep_stat; /* sleep status */
+ dev_t info_dev; /* device number */
+ unsigned char info_initstate; /* Channel init state */
+ unsigned char info_running; /* Channel running state */
+ int reserved[8]; /* reserved for future use */
+};
+
+/*
+* This structure is used with the DIGI_FEPCMD ioctl to
+* tell the driver which port to send the command for.
+*/
+struct digi_cmd {
+ int cmd;
+ int word;
+ int ncmds;
+ int chan; /* channel index (zero based) */
+ int bdid; /* board index (zero based) */
+};
+
+
+struct digi_getbuffer /* Struct for holding buffer use counts */
+{
+ unsigned long tIn;
+ unsigned long tOut;
+ unsigned long rxbuf;
+ unsigned long txbuf;
+ unsigned long txdone;
+};
+
+struct digi_getcounter {
+ unsigned long norun; /* number of UART overrun errors */
+ unsigned long noflow; /* number of buffer overflow errors */
+ unsigned long nframe; /* number of framing errors */
+ unsigned long nparity; /* number of parity errors */
+ unsigned long nbreak; /* number of breaks received */
+ unsigned long rbytes; /* number of received bytes */
+ unsigned long tbytes; /* number of bytes transmitted fully */
+};
+
+/*
+* info_sleep_stat defines
+*/
+#define INFO_RUNWAIT 0x0001
+#define INFO_WOPEN 0x0002
+#define INFO_TTIOW 0x0004
+#define INFO_CH_RWAIT 0x0008
+#define INFO_CH_WEMPTY 0x0010
+#define INFO_CH_WLOW 0x0020
+#define INFO_XXBUF_BUSY 0x0040
+
+#define DIGI_GETCH ('d'<<8) | 245 /* get board info */
+
+/* Board type definitions */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+# define T_HERC 0000
+# define T_HOU 0001
+# define T_LON 0002
+# define T_CHA 0003
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_SPOLL ('d'<<8) | 254 /* change poller rate */
+
+#define DIGI_SETCUSTOMBAUD _IOW('e', 106, int) /* Set integer baud rate */
+#define DIGI_GETCUSTOMBAUD _IOR('e', 107, int) /* Get integer baud rate */
+
+#define DIGI_REALPORT_GETBUFFERS ('e'<<8 ) | 108
+#define DIGI_REALPORT_SENDIMMEDIATE ('e'<<8 ) | 109
+#define DIGI_REALPORT_GETCOUNTERS ('e'<<8 ) | 110
+#define DIGI_REALPORT_GETEVENTS ('e'<<8 ) | 111
+
+#define EV_OPU 0x0001 //!<Output paused by client
+#define EV_OPS 0x0002 //!<Output paused by reqular sw flowctrl
+#define EV_OPX 0x0004 //!<Output paused by extra sw flowctrl
+#define EV_OPH 0x0008 //!<Output paused by hw flowctrl
+#define EV_OPT 0x0800 //!<Output paused for RTS Toggle predelay
+
+#define EV_IPU 0x0010 //!<Input paused unconditionally by user
+#define EV_IPS 0x0020 //!<Input paused by high/low water marks
+//#define EV_IPH 0x0040 //!<Input paused w/ hardware
+#define EV_IPA 0x0400 //!<Input paused by pattern alarm module
+
+#define EV_TXB 0x0040 //!<Transmit break pending
+#define EV_TXI 0x0080 //!<Transmit immediate pending
+#define EV_TXF 0x0100 //!<Transmit flowctrl char pending
+#define EV_RXB 0x0200 //!<Break received
+
+#define EV_OPALL 0x080f //!<Output pause flags
+#define EV_IPALL 0x0430 //!<Input pause flags
+
+#endif /* DIGI_H */
diff --git a/drivers/staging/dgnc/dpacompat.h b/drivers/staging/dgnc/dpacompat.h
new file mode 100644
index 00000000000..f96963b9843
--- /dev/null
+++ b/drivers/staging/dgnc/dpacompat.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2003 Digi International (www.digi.com)
+ * Scott H Kilau <Scott_Kilau at digi dot com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
+ * implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ * PURPOSE. See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * NOTE: THIS IS A SHARED HEADER. DO NOT CHANGE CODING STYLE!!!
+ */
+
+
+/*
+ * This structure holds data needed for the intelligent <--> nonintelligent
+ * DPA translation
+ */
+ struct ni_info {
+ int board;
+ int channel;
+ int dtr;
+ int rts;
+ int cts;
+ int dsr;
+ int ri;
+ int dcd;
+ int curtx;
+ int currx;
+ unsigned short iflag;
+ unsigned short oflag;
+ unsigned short cflag;
+ unsigned short lflag;
+
+ unsigned int mstat;
+ unsigned char hflow;
+
+ unsigned char xmit_stopped;
+ unsigned char recv_stopped;
+
+ unsigned int baud;
+};
+
+#define RW_READ 1
+#define RW_WRITE 2
+#define DIGI_KME ('e'<<8) | 98 /* Read/Write Host */
+
+#define SUBTYPE 0007
+#define T_PCXI 0000
+#define T_PCXEM 0001
+#define T_PCXE 0002
+#define T_PCXR 0003
+#define T_SP 0004
+#define T_SP_PLUS 0005
+
+#define T_HERC 0000
+#define T_HOU 0001
+#define T_LON 0002
+#define T_CHA 0003
+
+#define T_NEO 0000
+#define T_NEO_EXPRESS 0001
+#define T_CLASSIC 0002
+
+#define FAMILY 0070
+#define T_COMXI 0000
+#define T_NI 0000
+#define T_PCXX 0010
+#define T_CX 0020
+#define T_EPC 0030
+#define T_PCLITE 0040
+#define T_SPXX 0050
+#define T_AVXX 0060
+#define T_DXB 0070
+#define T_A2K_4_8 0070
+
+#define BUSTYPE 0700
+#define T_ISABUS 0000
+#define T_MCBUS 0100
+#define T_EISABUS 0200
+#define T_PCIBUS 0400
+
+/* Board State Definitions */
+
+#define BD_RUNNING 0x0
+#define BD_REASON 0x7f
+#define BD_NOTFOUND 0x1
+#define BD_NOIOPORT 0x2
+#define BD_NOMEM 0x3
+#define BD_NOBIOS 0x4
+#define BD_NOFEP 0x5
+#define BD_FAILED 0x6
+#define BD_ALLOCATED 0x7
+#define BD_TRIBOOT 0x8
+#define BD_BADKME 0x80
+
+#define DIGI_AIXON 0x0400 /* Aux flow control in fep */
+
+/* Ioctls needed for dpa operation */
+
+#define DIGI_GETDD ('d'<<8) | 248 /* get driver info */
+#define DIGI_GETBD ('d'<<8) | 249 /* get board info */
+#define DIGI_GET_NI_INFO ('d'<<8) | 250 /* nonintelligent state snfo */
+
+/* Other special ioctls */
+#define DIGI_TIMERIRQ ('d'<<8) | 251 /* Enable/disable RS_TIMER use */
+#define DIGI_LOOPBACK ('d'<<8) | 252 /* Enable/disable UART internal loopback */
diff --git a/drivers/staging/dgrp/dgrp_driver.c b/drivers/staging/dgrp/dgrp_driver.c
index e456dc6cb36..08eedf0867e 100644
--- a/drivers/staging/dgrp/dgrp_driver.c
+++ b/drivers/staging/dgrp/dgrp_driver.c
@@ -52,19 +52,12 @@ MODULE_PARM_DESC(register_prdevices, "Turn on/off registering transparent print
module_param_named(pollrate, dgrp_poll_tick, int, 0644);
MODULE_PARM_DESC(pollrate, "Poll interval in ms");
-/* Driver load/unload functions */
-static int dgrp_init_module(void);
-static void dgrp_cleanup_module(void);
-
-module_init(dgrp_init_module);
-module_exit(dgrp_cleanup_module);
-
/*
* init_module()
*
* Module load. This is where it all starts.
*/
-static int dgrp_init_module(void)
+static int __init dgrp_init_module(void)
{
int ret;
@@ -89,7 +82,7 @@ static int dgrp_init_module(void)
/*
* Module unload. This is where it all ends.
*/
-static void dgrp_cleanup_module(void)
+static void __exit dgrp_cleanup_module(void)
{
struct nd_struct *nd, *next;
@@ -108,3 +101,6 @@ static void dgrp_cleanup_module(void)
kfree(nd);
}
}
+
+module_init(dgrp_init_module);
+module_exit(dgrp_cleanup_module);
diff --git a/drivers/staging/dgrp/dgrp_tty.c b/drivers/staging/dgrp/dgrp_tty.c
index 654f6010b47..0d52de3729c 100644
--- a/drivers/staging/dgrp/dgrp_tty.c
+++ b/drivers/staging/dgrp/dgrp_tty.c
@@ -1120,7 +1120,9 @@ static void dgrp_tty_close(struct tty_struct *tty, struct file *file)
if (!sent_printer_offstr)
dgrp_tty_flush_buffer(tty);
+ spin_unlock_irqrestore(&nd->nd_lock, lock_flags);
tty_ldisc_flush(tty);
+ spin_lock_irqsave(&nd->nd_lock, lock_flags);
break;
}
diff --git a/drivers/staging/dwc2/Kconfig b/drivers/staging/dwc2/Kconfig
index d15d9d58e5a..be947d67384 100644
--- a/drivers/staging/dwc2/Kconfig
+++ b/drivers/staging/dwc2/Kconfig
@@ -1,7 +1,6 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
depends on USB
- depends on VIRT_TO_BUS
help
Say Y or M here if your system has a Dual Role HighSpeed
USB controller based on the DesignWare HSOTG IP Core.
diff --git a/drivers/staging/dwc2/core.c b/drivers/staging/dwc2/core.c
index e3a0e770301..06dae67a9d6 100644
--- a/drivers/staging/dwc2/core.c
+++ b/drivers/staging/dwc2/core.c
@@ -90,12 +90,10 @@ static void dwc2_enable_common_interrupts(struct dwc2_hsotg *hsotg)
*/
static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
{
- u32 hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- u32 fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
u32 hcfg, val;
- if ((hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ if ((hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->core_params->ulpi_fs_ls > 0) ||
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
/* Full speed PHY */
@@ -108,7 +106,7 @@ static void dwc2_init_fs_ls_pclk_sel(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val);
hcfg = readl(hsotg->regs + HCFG);
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= val;
+ hcfg |= val << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
}
@@ -245,7 +243,7 @@ static void dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
{
- u32 usbcfg, hs_phy_type, fs_phy_type;
+ u32 usbcfg;
if (hsotg->core_params->speed == DWC2_SPEED_PARAM_FULL &&
hsotg->core_params->phy_type == DWC2_PHY_TYPE_PARAM_FS) {
@@ -256,11 +254,8 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
dwc2_hs_phy_init(hsotg, select_phy);
}
- hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
-
- if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
- fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
+ if (hsotg->hw_params.hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED &&
hsotg->core_params->ulpi_fs_ls > 0) {
dev_dbg(hsotg->dev, "Setting ULPI FSLS\n");
usbcfg = readl(hsotg->regs + GUSBCFG);
@@ -277,20 +272,20 @@ static void dwc2_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
{
- u32 ahbcfg = 0;
+ u32 ahbcfg = readl(hsotg->regs + GAHBCFG);
- switch (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) {
+ switch (hsotg->hw_params.arch) {
case GHWCFG2_EXT_DMA_ARCH:
dev_err(hsotg->dev, "External DMA Mode not supported\n");
return -EINVAL;
case GHWCFG2_INT_DMA_ARCH:
dev_dbg(hsotg->dev, "Internal DMA Mode\n");
- /*
- * Old value was GAHBCFG_HBSTLEN_INCR - done for
- * Host mode ISOC in issue fix - vahrama
- */
- ahbcfg |= GAHBCFG_HBSTLEN_INCR4;
+ if (hsotg->core_params->ahbcfg != -1) {
+ ahbcfg &= GAHBCFG_CTRL_MASK;
+ ahbcfg |= hsotg->core_params->ahbcfg &
+ ~GAHBCFG_CTRL_MASK;
+ }
break;
case GHWCFG2_SLAVE_ONLY_ARCH:
@@ -313,9 +308,6 @@ static int dwc2_gahbcfg_init(struct dwc2_hsotg *hsotg)
hsotg->core_params->dma_desc_enable = 0;
}
- if (hsotg->core_params->ahb_single > 0)
- ahbcfg |= GAHBCFG_AHB_SINGLE;
-
if (hsotg->core_params->dma_enable > 0)
ahbcfg |= GAHBCFG_DMA_EN;
@@ -331,7 +323,7 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
usbcfg = readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_HNPCAP | GUSBCFG_SRPCAP);
- switch (hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
if (hsotg->core_params->otg_cap ==
DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
@@ -392,21 +384,6 @@ int dwc2_core_init(struct dwc2_hsotg *hsotg, bool select_phy, int irq)
/* Reset the Controller */
dwc2_core_reset(hsotg);
- dev_dbg(hsotg->dev, "num_dev_perio_in_ep=%d\n",
- hsotg->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT &
- GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >>
- GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT);
-
- hsotg->total_fifo_size = hsotg->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT &
- GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT;
- hsotg->rx_fifo_size = readl(hsotg->regs + GRXFSIZ);
- hsotg->nperio_tx_fifo_size =
- readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
-
- dev_dbg(hsotg->dev, "Total FIFO SZ=%d\n", hsotg->total_fifo_size);
- dev_dbg(hsotg->dev, "RxFIFO SZ=%d\n", hsotg->rx_fifo_size);
- dev_dbg(hsotg->dev, "NP TxFIFO SZ=%d\n", hsotg->nperio_tx_fifo_size);
-
/*
* This needs to happen in FS mode before any other programming occurs
*/
@@ -504,22 +481,18 @@ void dwc2_disable_host_interrupts(struct dwc2_hsotg *hsotg)
static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *params = hsotg->core_params;
- u32 rxfsiz, nptxfsiz, ptxfsiz, hptxfsiz, dfifocfg;
+ u32 nptxfsiz, hptxfsiz, dfifocfg, grxfsiz;
if (!params->enable_dynamic_fifo)
return;
- dev_dbg(hsotg->dev, "Total FIFO Size=%d\n", hsotg->total_fifo_size);
- dev_dbg(hsotg->dev, "Rx FIFO Size=%d\n", params->host_rx_fifo_size);
- dev_dbg(hsotg->dev, "NP Tx FIFO Size=%d\n",
- params->host_nperio_tx_fifo_size);
- dev_dbg(hsotg->dev, "P Tx FIFO Size=%d\n",
- params->host_perio_tx_fifo_size);
-
/* Rx FIFO */
- dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n",
- readl(hsotg->regs + GRXFSIZ));
- writel(params->host_rx_fifo_size, hsotg->regs + GRXFSIZ);
+ grxfsiz = readl(hsotg->regs + GRXFSIZ);
+ dev_dbg(hsotg->dev, "initial grxfsiz=%08x\n", grxfsiz);
+ grxfsiz &= ~GRXFSIZ_DEPTH_MASK;
+ grxfsiz |= params->host_rx_fifo_size <<
+ GRXFSIZ_DEPTH_SHIFT & GRXFSIZ_DEPTH_MASK;
+ writel(grxfsiz, hsotg->regs + GRXFSIZ);
dev_dbg(hsotg->dev, "new grxfsiz=%08x\n", readl(hsotg->regs + GRXFSIZ));
/* Non-periodic Tx FIFO */
@@ -536,27 +509,26 @@ static void dwc2_config_fifos(struct dwc2_hsotg *hsotg)
/* Periodic Tx FIFO */
dev_dbg(hsotg->dev, "initial hptxfsiz=%08x\n",
readl(hsotg->regs + HPTXFSIZ));
- ptxfsiz = params->host_perio_tx_fifo_size <<
- FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
- ptxfsiz |= (params->host_rx_fifo_size +
- params->host_nperio_tx_fifo_size) <<
- FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
- writel(ptxfsiz, hsotg->regs + HPTXFSIZ);
+ hptxfsiz = params->host_perio_tx_fifo_size <<
+ FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK;
+ hptxfsiz |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size) <<
+ FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK;
+ writel(hptxfsiz, hsotg->regs + HPTXFSIZ);
dev_dbg(hsotg->dev, "new hptxfsiz=%08x\n",
readl(hsotg->regs + HPTXFSIZ));
if (hsotg->core_params->en_multiple_tx_fifo > 0 &&
- hsotg->snpsid <= DWC2_CORE_REV_2_94a) {
+ hsotg->hw_params.snpsid <= DWC2_CORE_REV_2_94a) {
/*
* Global DFIFOCFG calculation for Host mode -
* include RxFIFO, NPTXFIFO and HPTXFIFO
*/
dfifocfg = readl(hsotg->regs + GDFIFOCFG);
- rxfsiz = readl(hsotg->regs + GRXFSIZ) & 0x0000ffff;
- nptxfsiz = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
- hptxfsiz = readl(hsotg->regs + HPTXFSIZ) >> 16 & 0xffff;
dfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK;
- dfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) <<
+ dfifocfg |= (params->host_rx_fifo_size +
+ params->host_nperio_tx_fifo_size +
+ params->host_perio_tx_fifo_size) <<
GDFIFOCFG_EPINFOBASE_SHIFT &
GDFIFOCFG_EPINFOBASE_MASK;
writel(dfifocfg, hsotg->regs + GDFIFOCFG);
@@ -602,10 +574,9 @@ void dwc2_core_host_init(struct dwc2_hsotg *hsotg)
}
if (hsotg->core_params->dma_desc_enable > 0) {
- u32 op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
-
- if (hsotg->snpsid < DWC2_CORE_REV_2_90a ||
- !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA) ||
+ u32 op_mode = hsotg->hw_params.op_mode;
+ if (hsotg->hw_params.snpsid < DWC2_CORE_REV_2_90a ||
+ !hsotg->hw_params.dma_desc_enable ||
op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE ||
op_mode == GHWCFG2_OP_MODE_UNDEFINED) {
@@ -883,26 +854,20 @@ void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
dev_vdbg(hsotg->dev, "set HCCHAR(%d) to %08x\n",
hc_num, hcchar);
- dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__, hc_num);
+ dev_vdbg(hsotg->dev, "%s: Channel %d\n",
+ __func__, hc_num);
dev_vdbg(hsotg->dev, " Dev Addr: %d\n",
- hcchar >> HCCHAR_DEVADDR_SHIFT &
- HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT);
+ chan->dev_addr);
dev_vdbg(hsotg->dev, " Ep Num: %d\n",
- hcchar >> HCCHAR_EPNUM_SHIFT &
- HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT);
+ chan->ep_num);
dev_vdbg(hsotg->dev, " Is In: %d\n",
- !!(hcchar & HCCHAR_EPDIR));
+ chan->ep_is_in);
dev_vdbg(hsotg->dev, " Is Low Speed: %d\n",
- !!(hcchar & HCCHAR_LSPDDEV));
+ chan->speed == USB_SPEED_LOW);
dev_vdbg(hsotg->dev, " Ep Type: %d\n",
- hcchar >> HCCHAR_EPTYPE_SHIFT &
- HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT);
+ chan->ep_type);
dev_vdbg(hsotg->dev, " Max Pkt: %d\n",
- hcchar >> HCCHAR_MPS_SHIFT &
- HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
- dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ chan->max_packet);
}
/* Program the HCSPLT register for SPLITs */
@@ -932,8 +897,7 @@ void dwc2_hc_init(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
dev_vdbg(hsotg->dev, " is_in %d\n",
chan->ep_is_in);
dev_vdbg(hsotg->dev, " Max Pkt %d\n",
- hcchar >> HCCHAR_MPS_SHIFT &
- HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT);
+ chan->max_packet);
dev_vdbg(hsotg->dev, " xferlen %d\n",
chan->xfer_len);
}
@@ -1382,14 +1346,14 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s: Channel %d\n", __func__,
chan->hc_num);
dev_vdbg(hsotg->dev, " Xfer Size: %d\n",
- hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+ (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " Num Pkts: %d\n",
- hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+ (hctsiz & TSIZ_PKTCNT_MASK) >>
+ TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " Start PID: %d\n",
- hctsiz >> TSIZ_SC_MC_PID_SHIFT &
- TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT);
+ (hctsiz & TSIZ_SC_MC_PID_MASK) >>
+ TSIZ_SC_MC_PID_SHIFT);
}
if (hsotg->core_params->dma_enable > 0) {
@@ -1433,8 +1397,8 @@ void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
if (dbg_hc(chan))
@@ -1522,8 +1486,8 @@ void dwc2_hc_start_transfer_ddma(struct dwc2_hsotg *hsotg,
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, " Multi Cnt: %d\n",
- hcchar >> HCCHAR_MULTICNT_SHIFT &
- HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT);
+ (hcchar & HCCHAR_MULTICNT_MASK) >>
+ HCCHAR_MULTICNT_SHIFT);
writel(hcchar, hsotg->regs + HCCHAR(chan->hc_num));
if (dbg_hc(chan))
@@ -1658,18 +1622,16 @@ void dwc2_hc_do_ping(struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan)
u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
{
u32 usbcfg;
- u32 hwcfg2;
u32 hprt0;
int clock = 60; /* default value */
usbcfg = readl(hsotg->regs + GUSBCFG);
- hwcfg2 = readl(hsotg->regs + GHWCFG2);
hprt0 = readl(hsotg->regs + HPRT0);
if (!(usbcfg & GUSBCFG_PHYSEL) && (usbcfg & GUSBCFG_ULPI_UTMI_SEL) &&
!(usbcfg & GUSBCFG_PHYIF16))
clock = 60;
- if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
+ if ((usbcfg & GUSBCFG_PHYSEL) && hsotg->hw_params.fs_phy_type ==
GHWCFG2_FS_PHY_TYPE_SHARED_ULPI)
clock = 48;
if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL) && !(usbcfg & GUSBCFG_PHYSEL) &&
@@ -1682,14 +1644,13 @@ u32 dwc2_calc_frame_interval(struct dwc2_hsotg *hsotg)
!(usbcfg & GUSBCFG_ULPI_UTMI_SEL) && (usbcfg & GUSBCFG_PHYIF16))
clock = 48;
if ((usbcfg & GUSBCFG_PHYSEL) && !(usbcfg & GUSBCFG_PHYIF16) &&
- (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
- GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_SHARED_UTMI)
clock = 48;
- if ((usbcfg & GUSBCFG_PHYSEL) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) ==
- GHWCFG2_FS_PHY_TYPE_DEDICATED)
+ if ((usbcfg & GUSBCFG_PHYSEL) &&
+ hsotg->hw_params.fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED)
clock = 48;
- if ((hprt0 & HPRT0_SPD_MASK) == HPRT0_SPD_HIGH_SPEED)
+ if ((hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT == HPRT0_SPD_HIGH_SPEED)
/* High speed case */
return 125 * clock;
else
@@ -1958,17 +1919,14 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- u32 op_mode;
-
- op_mode = hsotg->hwcfg2 & GHWCFG2_OP_MODE_MASK;
switch (val) {
case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
- if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
+ if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
valid = 0;
break;
case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (op_mode) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
@@ -1992,7 +1950,7 @@ int dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for otg_cap parameter. Check HW configuration.\n",
val);
- switch (op_mode) {
+ switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
break;
@@ -2018,8 +1976,7 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val > 0 && (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) ==
- GHWCFG2_SLAVE_ONLY_ARCH)
+ if (val > 0 && hsotg->hw_params.arch == GHWCFG2_SLAVE_ONLY_ARCH)
valid = 0;
if (val < 0)
valid = 0;
@@ -2029,8 +1986,7 @@ int dwc2_set_param_dma_enable(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for dma_enable parameter. Check HW configuration.\n",
val);
- val = (hsotg->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) !=
- GHWCFG2_SLAVE_ONLY_ARCH;
+ val = hsotg->hw_params.arch != GHWCFG2_SLAVE_ONLY_ARCH;
dev_dbg(hsotg->dev, "Setting dma_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2045,7 +2001,7 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
int retval = 0;
if (val > 0 && (hsotg->core_params->dma_enable <= 0 ||
- !(hsotg->hwcfg4 & GHWCFG4_DESC_DMA)))
+ !hsotg->hw_params.dma_desc_enable))
valid = 0;
if (val < 0)
valid = 0;
@@ -2056,7 +2012,7 @@ int dwc2_set_param_dma_desc_enable(struct dwc2_hsotg *hsotg, int val)
"%d invalid for dma_desc_enable parameter. Check HW configuration.\n",
val);
val = (hsotg->core_params->dma_enable > 0 &&
- (hsotg->hwcfg4 & GHWCFG4_DESC_DMA));
+ hsotg->hw_params.dma_desc_enable);
dev_dbg(hsotg->dev, "Setting dma_desc_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2092,7 +2048,7 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val > 0 && !(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO))
+ if (val > 0 && !hsotg->hw_params.enable_dynamic_fifo)
valid = 0;
if (val < 0)
valid = 0;
@@ -2102,7 +2058,7 @@ int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ val = hsotg->hw_params.enable_dynamic_fifo;
dev_dbg(hsotg->dev, "Setting enable_dynamic_fifo to %d\n", val);
retval = -EINVAL;
}
@@ -2116,7 +2072,7 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > readl(hsotg->regs + GRXFSIZ))
+ if (val < 16 || val > hsotg->hw_params.host_rx_fifo_size)
valid = 0;
if (!valid) {
@@ -2124,7 +2080,7 @@ int dwc2_set_param_host_rx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_rx_fifo_size. Check HW configuration.\n",
val);
- val = readl(hsotg->regs + GRXFSIZ);
+ val = hsotg->hw_params.host_rx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_rx_fifo_size to %d\n", val);
retval = -EINVAL;
}
@@ -2138,7 +2094,7 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > (readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff))
+ if (val < 16 || val > hsotg->hw_params.host_nperio_tx_fifo_size)
valid = 0;
if (!valid) {
@@ -2146,7 +2102,7 @@ int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n",
val);
- val = readl(hsotg->regs + GNPTXFSIZ) >> 16 & 0xffff;
+ val = hsotg->hw_params.host_nperio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_nperio_tx_fifo_size to %d\n",
val);
retval = -EINVAL;
@@ -2161,7 +2117,7 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
int valid = 1;
int retval = 0;
- if (val < 16 || val > (hsotg->hptxfsiz >> 16))
+ if (val < 16 || val > hsotg->hw_params.host_perio_tx_fifo_size)
valid = 0;
if (!valid) {
@@ -2169,7 +2125,7 @@ int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n",
val);
- val = hsotg->hptxfsiz >> 16;
+ val = hsotg->hw_params.host_perio_tx_fifo_size;
dev_dbg(hsotg->dev, "Setting host_perio_tx_fifo_size to %d\n",
val);
retval = -EINVAL;
@@ -2183,11 +2139,8 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int width = hsotg->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT &
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
- if (val < 2047 || val >= (1 << (width + 11)))
+ if (val < 2047 || val > hsotg->hw_params.max_transfer_size)
valid = 0;
if (!valid) {
@@ -2195,7 +2148,7 @@ int dwc2_set_param_max_transfer_size(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for max_transfer_size. Check HW configuration.\n",
val);
- val = (1 << (width + 11)) - 1;
+ val = hsotg->hw_params.max_transfer_size;
dev_dbg(hsotg->dev, "Setting max_transfer_size to %d\n", val);
retval = -EINVAL;
}
@@ -2208,11 +2161,8 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int width = hsotg->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT &
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
- if (val < 15 || val > (1 << (width + 4)))
+ if (val < 15 || val > hsotg->hw_params.max_packet_count)
valid = 0;
if (!valid) {
@@ -2220,7 +2170,7 @@ int dwc2_set_param_max_packet_count(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for max_packet_count. Check HW configuration.\n",
val);
- val = (1 << (width + 4)) - 1;
+ val = hsotg->hw_params.max_packet_count;
dev_dbg(hsotg->dev, "Setting max_packet_count to %d\n", val);
retval = -EINVAL;
}
@@ -2233,10 +2183,8 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
{
int valid = 1;
int retval = 0;
- int num_chan = hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
- GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT;
- if (val < 1 || val > num_chan + 1)
+ if (val < 1 || val > hsotg->hw_params.host_channels)
valid = 0;
if (!valid) {
@@ -2244,7 +2192,7 @@ int dwc2_set_param_host_channels(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for host_channels. Check HW configuration.\n",
val);
- val = num_chan + 1;
+ val = hsotg->hw_params.host_channels;
dev_dbg(hsotg->dev, "Setting host_channels to %d\n", val);
retval = -EINVAL;
}
@@ -2257,8 +2205,7 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
{
#ifndef NO_FS_PHY_HW_CHECKS
int valid = 0;
- u32 hs_phy_type;
- u32 fs_phy_type;
+ u32 hs_phy_type, fs_phy_type;
#endif
int retval = 0;
@@ -2279,9 +2226,8 @@ int dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg, int val)
}
#ifndef NO_FS_PHY_HW_CHECKS
- hs_phy_type = hsotg->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK;
- fs_phy_type = hsotg->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK;
-
+ hs_phy_type = hsotg->hw_params.hs_phy_type;
+ fs_phy_type = hsotg->hw_params.fs_phy_type;
if (val == DWC2_PHY_TYPE_PARAM_UTMI &&
(hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI ||
hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI))
@@ -2430,14 +2376,29 @@ int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hsotg *hsotg, int val)
int dwc2_set_param_phy_utmi_width(struct dwc2_hsotg *hsotg, int val)
{
+ int valid = 0;
int retval = 0;
- if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) {
+ switch (hsotg->hw_params.utmi_phy_data_width) {
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8:
+ valid = (val == 8);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_16:
+ valid = (val == 16);
+ break;
+ case GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16:
+ valid = (val == 8 || val == 16);
+ break;
+ }
+
+ if (!valid) {
if (val >= 0) {
- dev_err(hsotg->dev, "Wrong value for phy_utmi_width\n");
- dev_err(hsotg->dev, "phy_utmi_width must be 8 or 16\n");
+ dev_err(hsotg->dev,
+ "%d invalid for phy_utmi_width. Check HW configuration.\n",
+ val);
}
- val = 8;
+ val = (hsotg->hw_params.utmi_phy_data_width ==
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_8) ? 8 : 16;
dev_dbg(hsotg->dev, "Setting phy_utmi_width to %d\n", val);
retval = -EINVAL;
}
@@ -2505,7 +2466,7 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
}
#ifndef NO_FS_PHY_HW_CHECKS
- if (val == 1 && !(hsotg->hwcfg3 & GHWCFG3_I2C))
+ if (val == 1 && !(hsotg->hw_params.i2c_enable))
valid = 0;
if (!valid) {
@@ -2513,7 +2474,7 @@ int dwc2_set_param_i2c_enable(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for i2c_enable. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg3 & GHWCFG3_I2C);
+ val = hsotg->hw_params.i2c_enable;
dev_dbg(hsotg->dev, "Setting i2c_enable to %d\n", val);
retval = -EINVAL;
}
@@ -2538,7 +2499,7 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
valid = 0;
}
- if (val == 1 && !(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN))
+ if (val == 1 && !hsotg->hw_params.en_multiple_tx_fifo)
valid = 0;
if (!valid) {
@@ -2546,7 +2507,7 @@ int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n",
val);
- val = !!(hsotg->hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ val = hsotg->hw_params.en_multiple_tx_fifo;
dev_dbg(hsotg->dev, "Setting en_multiple_tx_fifo to %d\n", val);
retval = -EINVAL;
}
@@ -2569,7 +2530,7 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
valid = 0;
}
- if (val == 1 && hsotg->snpsid < DWC2_CORE_REV_2_92a)
+ if (val == 1 && hsotg->hw_params.snpsid < DWC2_CORE_REV_2_92a)
valid = 0;
if (!valid) {
@@ -2577,7 +2538,7 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
dev_err(hsotg->dev,
"%d invalid for parameter reload_ctl. Check HW configuration.\n",
val);
- val = hsotg->snpsid >= DWC2_CORE_REV_2_92a;
+ val = hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_92a;
dev_dbg(hsotg->dev, "Setting reload_ctl to %d\n", val);
retval = -EINVAL;
}
@@ -2586,35 +2547,14 @@ int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val)
return retval;
}
-int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val)
+int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val)
{
- int valid = 1;
- int retval = 0;
-
- if (DWC2_PARAM_TEST(val, 0, 1)) {
- if (val >= 0) {
- dev_err(hsotg->dev,
- "'%d' invalid for parameter ahb_single\n", val);
- dev_err(hsotg->dev, "ahb_single must be 0 or 1\n");
- }
- valid = 0;
- }
-
- if (val > 0 && hsotg->snpsid < DWC2_CORE_REV_2_94a)
- valid = 0;
-
- if (!valid) {
- if (val >= 0)
- dev_err(hsotg->dev,
- "%d invalid for parameter ahb_single. Check HW configuration.\n",
- val);
- val = 0;
- dev_dbg(hsotg->dev, "Setting ahb_single to %d\n", val);
- retval = -EINVAL;
- }
-
- hsotg->core_params->ahb_single = val;
- return retval;
+ if (val != -1)
+ hsotg->core_params->ahbcfg = val;
+ else
+ hsotg->core_params->ahbcfg = GAHBCFG_HBSTLEN_INCR4 <<
+ GAHBCFG_HBSTLEN_SHIFT;
+ return 0;
}
int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
@@ -2637,6 +2577,165 @@ int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val)
return retval;
}
+/**
+ * During device initialization, read various hardware configuration
+ * registers and interpret the contents.
+ */
+int dwc2_get_hwparams(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_hw_params *hw = &hsotg->hw_params;
+ unsigned width;
+ u32 hwcfg1, hwcfg2, hwcfg3, hwcfg4;
+ u32 hptxfsiz, grxfsiz, gnptxfsiz;
+ u32 gusbcfg;
+
+ /*
+ * Attempt to ensure this device is really a DWC_otg Controller.
+ * Read and verify the GSNPSID register contents. The value should be
+ * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
+ * as in "OTG version 2.xx" or "OTG version 3.xx".
+ */
+ hw->snpsid = readl(hsotg->regs + GSNPSID);
+ if ((hw->snpsid & 0xfffff000) != 0x4f542000 &&
+ (hw->snpsid & 0xfffff000) != 0x4f543000) {
+ dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n",
+ hw->snpsid);
+ return -ENODEV;
+ }
+
+ dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x (snpsid=%x)\n",
+ hw->snpsid >> 12 & 0xf, hw->snpsid >> 8 & 0xf,
+ hw->snpsid >> 4 & 0xf, hw->snpsid & 0xf, hw->snpsid);
+
+ hwcfg1 = readl(hsotg->regs + GHWCFG1);
+ hwcfg2 = readl(hsotg->regs + GHWCFG2);
+ hwcfg3 = readl(hsotg->regs + GHWCFG3);
+ hwcfg4 = readl(hsotg->regs + GHWCFG4);
+ gnptxfsiz = readl(hsotg->regs + GNPTXFSIZ);
+ grxfsiz = readl(hsotg->regs + GRXFSIZ);
+
+ dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hwcfg1);
+ dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hwcfg2);
+ dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hwcfg3);
+ dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hwcfg4);
+ dev_dbg(hsotg->dev, "gnptxfsiz=%08x\n", gnptxfsiz);
+ dev_dbg(hsotg->dev, "grxfsiz=%08x\n", grxfsiz);
+
+ /* Force host mode to get HPTXFSIZ exact power on value */
+ gusbcfg = readl(hsotg->regs + GUSBCFG);
+ gusbcfg |= GUSBCFG_FORCEHOSTMODE;
+ writel(gusbcfg, hsotg->regs + GUSBCFG);
+ usleep_range(100000, 150000);
+
+ hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
+ dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hptxfsiz);
+ gusbcfg = readl(hsotg->regs + GUSBCFG);
+ gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
+ writel(gusbcfg, hsotg->regs + GUSBCFG);
+ usleep_range(100000, 150000);
+
+ /* hwcfg2 */
+ hw->op_mode = (hwcfg2 & GHWCFG2_OP_MODE_MASK) >>
+ GHWCFG2_OP_MODE_SHIFT;
+ hw->arch = (hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) >>
+ GHWCFG2_ARCHITECTURE_SHIFT;
+ hw->enable_dynamic_fifo = !!(hwcfg2 & GHWCFG2_DYNAMIC_FIFO);
+ hw->host_channels = 1 + ((hwcfg2 & GHWCFG2_NUM_HOST_CHAN_MASK) >>
+ GHWCFG2_NUM_HOST_CHAN_SHIFT);
+ hw->hs_phy_type = (hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) >>
+ GHWCFG2_HS_PHY_TYPE_SHIFT;
+ hw->fs_phy_type = (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) >>
+ GHWCFG2_FS_PHY_TYPE_SHIFT;
+ hw->num_dev_ep = (hwcfg2 & GHWCFG2_NUM_DEV_EP_MASK) >>
+ GHWCFG2_NUM_DEV_EP_SHIFT;
+ hw->nperio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->host_perio_tx_q_depth =
+ (hwcfg2 & GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK) >>
+ GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT << 1;
+ hw->dev_token_q_depth =
+ (hwcfg2 & GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK) >>
+ GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT;
+
+ /* hwcfg3 */
+ width = (hwcfg3 & GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_transfer_size = (1 << (width + 11)) - 1;
+ width = (hwcfg3 & GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK) >>
+ GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT;
+ hw->max_packet_count = (1 << (width + 4)) - 1;
+ hw->i2c_enable = !!(hwcfg3 & GHWCFG3_I2C);
+ hw->total_fifo_size = (hwcfg3 & GHWCFG3_DFIFO_DEPTH_MASK) >>
+ GHWCFG3_DFIFO_DEPTH_SHIFT;
+
+ /* hwcfg4 */
+ hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN);
+ hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >>
+ GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT;
+ hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA);
+ hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ);
+ hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >>
+ GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT;
+
+ /* fifo sizes */
+ hw->host_rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >>
+ GRXFSIZ_DEPTH_SHIFT;
+ hw->host_nperio_tx_fifo_size = (gnptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+ hw->host_perio_tx_fifo_size = (hptxfsiz & FIFOSIZE_DEPTH_MASK) >>
+ FIFOSIZE_DEPTH_SHIFT;
+
+ dev_dbg(hsotg->dev, "Detected values from hardware:\n");
+ dev_dbg(hsotg->dev, " op_mode=%d\n",
+ hw->op_mode);
+ dev_dbg(hsotg->dev, " arch=%d\n",
+ hw->arch);
+ dev_dbg(hsotg->dev, " dma_desc_enable=%d\n",
+ hw->dma_desc_enable);
+ dev_dbg(hsotg->dev, " power_optimized=%d\n",
+ hw->power_optimized);
+ dev_dbg(hsotg->dev, " i2c_enable=%d\n",
+ hw->i2c_enable);
+ dev_dbg(hsotg->dev, " hs_phy_type=%d\n",
+ hw->hs_phy_type);
+ dev_dbg(hsotg->dev, " fs_phy_type=%d\n",
+ hw->fs_phy_type);
+ dev_dbg(hsotg->dev, " utmi_phy_data_wdith=%d\n",
+ hw->utmi_phy_data_width);
+ dev_dbg(hsotg->dev, " num_dev_ep=%d\n",
+ hw->num_dev_ep);
+ dev_dbg(hsotg->dev, " num_dev_perio_in_ep=%d\n",
+ hw->num_dev_perio_in_ep);
+ dev_dbg(hsotg->dev, " host_channels=%d\n",
+ hw->host_channels);
+ dev_dbg(hsotg->dev, " max_transfer_size=%d\n",
+ hw->max_transfer_size);
+ dev_dbg(hsotg->dev, " max_packet_count=%d\n",
+ hw->max_packet_count);
+ dev_dbg(hsotg->dev, " nperio_tx_q_depth=0x%0x\n",
+ hw->nperio_tx_q_depth);
+ dev_dbg(hsotg->dev, " host_perio_tx_q_depth=0x%0x\n",
+ hw->host_perio_tx_q_depth);
+ dev_dbg(hsotg->dev, " dev_token_q_depth=0x%0x\n",
+ hw->dev_token_q_depth);
+ dev_dbg(hsotg->dev, " enable_dynamic_fifo=%d\n",
+ hw->enable_dynamic_fifo);
+ dev_dbg(hsotg->dev, " en_multiple_tx_fifo=%d\n",
+ hw->en_multiple_tx_fifo);
+ dev_dbg(hsotg->dev, " total_fifo_size=%d\n",
+ hw->total_fifo_size);
+ dev_dbg(hsotg->dev, " host_rx_fifo_size=%d\n",
+ hw->host_rx_fifo_size);
+ dev_dbg(hsotg->dev, " host_nperio_tx_fifo_size=%d\n",
+ hw->host_nperio_tx_fifo_size);
+ dev_dbg(hsotg->dev, " host_perio_tx_fifo_size=%d\n",
+ hw->host_perio_tx_fifo_size);
+ dev_dbg(hsotg->dev, "\n");
+
+ return 0;
+}
+
/*
* This function is called during module intialization to pass module parameters
* for the DWC_otg core. It returns non-0 if any parameters are invalid.
@@ -2681,7 +2780,7 @@ int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
retval |= dwc2_set_param_en_multiple_tx_fifo(hsotg,
params->en_multiple_tx_fifo);
retval |= dwc2_set_param_reload_ctl(hsotg, params->reload_ctl);
- retval |= dwc2_set_param_ahb_single(hsotg, params->ahb_single);
+ retval |= dwc2_set_param_ahbcfg(hsotg, params->ahbcfg);
retval |= dwc2_set_param_otg_ver(hsotg, params->otg_ver);
return retval;
diff --git a/drivers/staging/dwc2/core.h b/drivers/staging/dwc2/core.h
index fc075a7c1de..9102f66d011 100644
--- a/drivers/staging/dwc2/core.h
+++ b/drivers/staging/dwc2/core.h
@@ -68,16 +68,18 @@ enum dwc2_lx_state {
/**
* struct dwc2_core_params - Parameters for configuring the core
*
- * @otg_cap: Specifies the OTG capabilities. The driver will
- * automatically detect the value for this parameter if
- * none is specified.
- * 0 - HNP and SRP capable (default)
+ * @otg_cap: Specifies the OTG capabilities.
+ * 0 - HNP and SRP capable
* 1 - SRP Only capable
- * 2 - No HNP/SRP capable
+ * 2 - No HNP/SRP capable (always available)
+ * Defaults to best available option (0, 1, then 2)
+ * @otg_ver: OTG version supported
+ * 0 - 1.3 (default)
+ * 1 - 2.0
* @dma_enable: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
- * 0 - Slave
+ * 0 - Slave (always available)
* 1 - DMA (default, if available)
* @dma_desc_enable: When DMA mode is enabled, specifies whether to use
* address DMA mode or descriptor DMA mode for accessing
@@ -88,39 +90,47 @@ enum dwc2_lx_state {
* @speed: Specifies the maximum speed of operation in host and
* device mode. The actual speed depends on the speed of
* the attached device and the value of phy_type.
- * 0 - High Speed (default)
+ * 0 - High Speed
+ * (default when phy_type is UTMI+ or ULPI)
* 1 - Full Speed
- * @host_support_fs_ls_low_power: Specifies whether low power mode is supported
- * when attached to a Full Speed or Low Speed device in
- * host mode.
- * 0 - Don't support low power mode (default)
- * 1 - Support low power mode
- * @host_ls_low_power_phy_clk: Specifies the PHY clock rate in low power mode
- * when connected to a Low Speed device in host mode. This
- * parameter is applicable only if
- * host_support_fs_ls_low_power is enabled. If phy_type is
- * set to FS then defaults to 6 MHZ otherwise 48 MHZ.
- * 0 - 48 MHz
- * 1 - 6 MHz
+ * (default when phy_type is Full Speed)
* @enable_dynamic_fifo: 0 - Use coreConsultant-specified FIFO size parameters
- * 1 - Allow dynamic FIFO sizing (default)
+ * 1 - Allow dynamic FIFO sizing (default, if available)
+ * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
+ * are enabled
* @host_rx_fifo_size: Number of 4-byte words in the Rx FIFO in host mode when
* dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_nperio_tx_fifo_size: Number of 4-byte words in the non-periodic Tx FIFO
* in host mode when dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_perio_tx_fifo_size: Number of 4-byte words in the periodic Tx FIFO in
* host mode when dynamic FIFO sizing is enabled
- * 16 to 32768 (default 1024)
+ * 16 to 32768
+ * Actual maximum value is autodetected and also
+ * the default.
* @max_transfer_size: The maximum transfer size supported, in bytes
- * 2047 to 65,535 (default 65,535)
+ * 2047 to 65,535
+ * Actual maximum value is autodetected and also
+ * the default.
* @max_packet_count: The maximum number of packets in a transfer
- * 15 to 511 (default 511)
+ * 15 to 511
+ * Actual maximum value is autodetected and also
+ * the default.
* @host_channels: The number of host channel registers to use
- * 1 to 16 (default 12)
+ * 1 to 16
+ * Actual maximum value is autodetected and also
+ * the default.
* @phy_type: Specifies the type of PHY interface to use. By default,
* the driver will automatically detect the phy_type.
+ * 0 - Full Speed Phy
+ * 1 - UTMI+ Phy
+ * 2 - ULPI Phy
+ * Defaults to best available option (2, 1, then 0)
* @phy_utmi_width: Specifies the UTMI+ Data Width (in bits). This parameter
* is applicable for a phy_type of UTMI+ or ULPI. (For a
* ULPI phy_type, this parameter indicates the data width
@@ -129,7 +139,7 @@ enum dwc2_lx_state {
* parameter was set to "8 and 16 bits", meaning that the
* core has been configured to work at either data path
* width.
- * 8 or 16 (default 16)
+ * 8 or 16 (default 16 if available)
* @phy_ulpi_ddr: Specifies whether the ULPI operates at double or single
* data rate. This parameter is only applicable if phy_type
* is ULPI.
@@ -139,27 +149,51 @@ enum dwc2_lx_state {
* data bus
* @phy_ulpi_ext_vbus: For a ULPI phy, specifies whether to use the internal or
* external supply to drive the VBus
+ * 0 - Internal supply (default)
+ * 1 - External supply
* @i2c_enable: Specifies whether to use the I2Cinterface for a full
* speed PHY. This parameter is only applicable if phy_type
* is FS.
* 0 - No (default)
* 1 - Yes
- * @ulpi_fs_ls: True to make ULPI phy operate in FS/LS mode only
- * @ts_dline: True to enable Term Select Dline pulsing
- * @en_multiple_tx_fifo: Specifies whether dedicated per-endpoint transmit FIFOs
- * are enabled
- * @reload_ctl: True to allow dynamic reloading of HFIR register during
- * runtime
- * @ahb_single: This bit enables SINGLE transfers for remainder data in
- * a transfer for DMA mode of operation.
- * 0 - remainder data will be sent using INCR burst size
- * 1 - remainder data will be sent using SINGLE burst size
- * @otg_ver: OTG version supported
- * 0 - 1.3
- * 1 - 2.0
+ * @ulpi_fs_ls: Make ULPI phy operate in FS/LS mode only
+ * 0 - No (default)
+ * 1 - Yes
+ * @host_support_fs_ls_low_power: Specifies whether low power mode is supported
+ * when attached to a Full Speed or Low Speed device in
+ * host mode.
+ * 0 - Don't support low power mode (default)
+ * 1 - Support low power mode
+ * @host_ls_low_power_phy_clk: Specifies the PHY clock rate in low power mode
+ * when connected to a Low Speed device in host
+ * mode. This parameter is applicable only if
+ * host_support_fs_ls_low_power is enabled.
+ * 0 - 48 MHz
+ * (default when phy_type is UTMI+ or ULPI)
+ * 1 - 6 MHz
+ * (default when phy_type is Full Speed)
+ * @ts_dline: Enable Term Select Dline pulsing
+ * 0 - No (default)
+ * 1 - Yes
+ * @reload_ctl: Allow dynamic reloading of HFIR register during runtime
+ * 0 - No (default for core < 2.92a)
+ * 1 - Yes (default for core >= 2.92a)
+ * @ahbcfg: This field allows the default value of the GAHBCFG
+ * register to be overridden
+ * -1 - GAHBCFG value will be set to 0x06
+ * (INCR4, default)
+ * all others - GAHBCFG value will be overridden with
+ * this value
+ * Not all bits can be controlled like this, the
+ * bits defined by GAHBCFG_CTRL_MASK are controlled
+ * by the driver and are ignored in this
+ * configuration value.
*
* The following parameters may be specified when starting the module. These
- * parameters define how the DWC_otg controller should be configured.
+ * parameters define how the DWC_otg controller should be configured. A
+ * value of -1 (or any other out of range value) for any parameter means
+ * to read the value from hardware (if possible) or use the builtin
+ * default described above.
*/
struct dwc2_core_params {
/*
@@ -189,7 +223,85 @@ struct dwc2_core_params {
int host_ls_low_power_phy_clk;
int ts_dline;
int reload_ctl;
- int ahb_single;
+ int ahbcfg;
+};
+
+/**
+ * struct dwc2_hw_params - Autodetected parameters.
+ *
+ * These parameters are the various parameters read from hardware
+ * registers during initialization. They typically contain the best
+ * supported or maximum value that can be configured in the
+ * corresponding dwc2_core_params value.
+ *
+ * The values that are not in dwc2_core_params are documented below.
+ *
+ * @op_mode Mode of Operation
+ * 0 - HNP- and SRP-Capable OTG (Host & Device)
+ * 1 - SRP-Capable OTG (Host & Device)
+ * 2 - Non-HNP and Non-SRP Capable OTG (Host & Device)
+ * 3 - SRP-Capable Device
+ * 4 - Non-OTG Device
+ * 5 - SRP-Capable Host
+ * 6 - Non-OTG Host
+ * @arch Architecture
+ * 0 - Slave only
+ * 1 - External DMA
+ * 2 - Internal DMA
+ * @power_optimized Are power optimizations enabled?
+ * @num_dev_ep Number of device endpoints available
+ * @num_dev_perio_in_ep Number of device periodic IN endpoints
+ * avaialable
+ * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue
+ * Depth
+ * 0 to 30
+ * @host_perio_tx_q_depth
+ * Host Mode Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @nperio_tx_q_depth
+ * Non-Periodic Request Queue Depth
+ * 2, 4 or 8
+ * @hs_phy_type High-speed PHY interface type
+ * 0 - High-speed interface not supported
+ * 1 - UTMI+
+ * 2 - ULPI
+ * 3 - UTMI+ and ULPI
+ * @fs_phy_type Full-speed PHY interface type
+ * 0 - Full speed interface not supported
+ * 1 - Dedicated full speed interface
+ * 2 - FS pins shared with UTMI+ pins
+ * 3 - FS pins shared with ULPI pins
+ * @total_fifo_size: Total internal RAM for FIFOs (bytes)
+ * @utmi_phy_data_width UTMI+ PHY data width
+ * 0 - 8 bits
+ * 1 - 16 bits
+ * 2 - 8 or 16 bits
+ * @snpsid: Value from SNPSID register
+ */
+struct dwc2_hw_params {
+ unsigned op_mode:3;
+ unsigned arch:2;
+ unsigned dma_desc_enable:1;
+ unsigned enable_dynamic_fifo:1;
+ unsigned en_multiple_tx_fifo:1;
+ unsigned host_rx_fifo_size:16;
+ unsigned host_nperio_tx_fifo_size:16;
+ unsigned host_perio_tx_fifo_size:16;
+ unsigned nperio_tx_q_depth:3;
+ unsigned host_perio_tx_q_depth:3;
+ unsigned dev_token_q_depth:5;
+ unsigned max_transfer_size:26;
+ unsigned max_packet_count:11;
+ unsigned host_channels:4;
+ unsigned hs_phy_type:2;
+ unsigned fs_phy_type:2;
+ unsigned i2c_enable:1;
+ unsigned num_dev_ep:4;
+ unsigned num_dev_perio_in_ep:4;
+ unsigned total_fifo_size:16;
+ unsigned power_optimized:1;
+ unsigned utmi_phy_data_width:2;
+ u32 snpsid;
};
/**
@@ -199,15 +311,8 @@ struct dwc2_core_params {
* @dev: The struct device pointer
* @regs: Pointer to controller regs
* @core_params: Parameters that define how the core should be configured
- * @hwcfg1: Hardware Configuration - stored here for convenience
- * @hwcfg2: Hardware Configuration - stored here for convenience
- * @hwcfg3: Hardware Configuration - stored here for convenience
- * @hwcfg4: Hardware Configuration - stored here for convenience
- * @hptxfsiz: Hardware Configuration - stored here for convenience
- * @snpsid: Value from SNPSID register
- * @total_fifo_size: Total internal RAM for FIFOs (bytes)
- * @rx_fifo_size: Size of Rx FIFO (bytes)
- * @nperio_tx_fifo_size: Size of Non-periodic Tx FIFO (Bytes)
+ * @hw_params: Parameters that were autodetected from the
+ * hardware registers
* @op_state: The operational State, during transitions (a_host=>
* a_peripheral and b_device=>b_host) this may not match
* the core, but allows the software to determine
@@ -295,16 +400,10 @@ struct dwc2_core_params {
struct dwc2_hsotg {
struct device *dev;
void __iomem *regs;
+ /** Params detected from hardware */
+ struct dwc2_hw_params hw_params;
+ /** Params to actually use */
struct dwc2_core_params *core_params;
- u32 hwcfg1;
- u32 hwcfg2;
- u32 hwcfg3;
- u32 hwcfg4;
- u32 hptxfsiz;
- u32 snpsid;
- u16 total_fifo_size;
- u16 rx_fifo_size;
- u16 nperio_tx_fifo_size;
enum usb_otg_state op_state;
unsigned int queuing_high_bandwidth:1;
@@ -643,7 +742,7 @@ extern int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hsotg *hsotg,
extern int dwc2_set_param_reload_ctl(struct dwc2_hsotg *hsotg, int val);
-extern int dwc2_set_param_ahb_single(struct dwc2_hsotg *hsotg, int val);
+extern int dwc2_set_param_ahbcfg(struct dwc2_hsotg *hsotg, int val);
extern int dwc2_set_param_otg_ver(struct dwc2_hsotg *hsotg, int val);
diff --git a/drivers/staging/dwc2/core_intr.c b/drivers/staging/dwc2/core_intr.c
index 98c51bba662..07cfa2f6aa2 100644
--- a/drivers/staging/dwc2/core_intr.c
+++ b/drivers/staging/dwc2/core_intr.c
@@ -166,7 +166,7 @@ static void dwc2_handle_otg_intr(struct dwc2_hsotg *hsotg)
* WA for 3.00a- HW is not setting cur_mode, even sometimes
* this does not help
*/
- if (hsotg->snpsid >= DWC2_CORE_REV_3_00a)
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a)
udelay(100);
if (gotgctl & GOTGCTL_HSTNEGSCS) {
if (dwc2_is_host_mode(hsotg)) {
@@ -380,7 +380,7 @@ static void dwc2_handle_usb_suspend_intr(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev,
"DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d\n",
!!(dsts & DSTS_SUSPSTS),
- !!(hsotg->hwcfg4 & GHWCFG4_POWER_OPTIMIZ));
+ hsotg->hw_params.power_optimized);
} else {
if (hsotg->op_state == OTG_STATE_A_PERIPHERAL) {
dev_dbg(hsotg->dev, "a_peripheral->a_host\n");
diff --git a/drivers/staging/dwc2/hcd.c b/drivers/staging/dwc2/hcd.c
index 2ed54b172a3..da0d35cc33c 100644
--- a/drivers/staging/dwc2/hcd.c
+++ b/drivers/staging/dwc2/hcd.c
@@ -134,11 +134,8 @@ static void dwc2_kill_urbs_in_qh_list(struct dwc2_hsotg *hsotg,
list_for_each_entry_safe(qh, qh_tmp, qh_list, qh_list_entry) {
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
- if (qtd->urb != NULL) {
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, -ETIMEDOUT);
- dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- }
+ dwc2_host_complete(hsotg, qtd, -ETIMEDOUT);
+ dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
}
}
@@ -421,6 +418,8 @@ static int dwc2_hcd_urb_dequeue(struct dwc2_hsotg *hsotg,
return -EINVAL;
}
+ urb->priv = NULL;
+
if (urb_qtd->in_process && qh->channel) {
dwc2_dump_channel_info(hsotg, qh->channel);
@@ -1006,10 +1005,10 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "Queue periodic transactions\n");
tx_status = readl(hsotg->regs + HPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
if (dbg_perio()) {
dev_vdbg(hsotg->dev, " P Tx Req Queue Space Avail (before queue): %d\n",
@@ -1021,7 +1020,9 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
qh_ptr = hsotg->periodic_sched_assigned.next;
while (qh_ptr != &hsotg->periodic_sched_assigned) {
tx_status = readl(hsotg->regs + HPTXSTS);
- if ((tx_status & TXSTS_QSPCAVAIL_MASK) == 0) {
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ if (qspcavail == 0) {
no_queue_space = 1;
break;
}
@@ -1047,8 +1048,8 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
qh->channel->multi_count > 1)
hsotg->queuing_high_bandwidth = 1;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status < 0) {
no_fifo_space = 1;
@@ -1079,10 +1080,10 @@ static void dwc2_process_periodic_channels(struct dwc2_hsotg *hsotg)
if (hsotg->core_params->dma_enable <= 0) {
tx_status = readl(hsotg->regs + HPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
if (dbg_perio()) {
dev_vdbg(hsotg->dev,
" P Tx Req Queue Space Avail (after queue): %d\n",
@@ -1144,10 +1145,10 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "Queue non-periodic transactions\n");
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev, " NP Tx Req Queue Space Avail (before queue): %d\n",
qspcavail);
dev_vdbg(hsotg->dev, " NP Tx FIFO Space Avail (before queue): %d\n",
@@ -1167,8 +1168,8 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
*/
do {
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
if (hsotg->core_params->dma_enable <= 0 && qspcavail == 0) {
no_queue_space = 1;
break;
@@ -1183,8 +1184,8 @@ static void dwc2_process_non_periodic_channels(struct dwc2_hsotg *hsotg)
if (qh->tt_buffer_dirty)
goto next;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
status = dwc2_queue_transaction(hsotg, qh->channel, fspcavail);
if (status > 0) {
@@ -1204,10 +1205,10 @@ next:
if (hsotg->core_params->dma_enable <= 0) {
tx_status = readl(hsotg->regs + GNPTXSTS);
- qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
- fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+ qspcavail = (tx_status & TXSTS_QSPCAVAIL_MASK) >>
+ TXSTS_QSPCAVAIL_SHIFT;
+ fspcavail = (tx_status & TXSTS_FSPCAVAIL_MASK) >>
+ TXSTS_FSPCAVAIL_SHIFT;
dev_vdbg(hsotg->dev,
" NP Tx Req Queue Space Avail (after queue): %d\n",
qspcavail);
@@ -1613,7 +1614,7 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
if (hprt0 & HPRT0_PWR)
port_status |= USB_PORT_STAT_POWER;
- speed = hprt0 & HPRT0_SPD_MASK;
+ speed = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (speed == HPRT0_SPD_HIGH_SPEED)
port_status |= USB_PORT_STAT_HIGH_SPEED;
else if (speed == HPRT0_SPD_LOW_SPEED)
@@ -1762,11 +1763,9 @@ int dwc2_hcd_get_frame_number(struct dwc2_hsotg *hsotg)
#ifdef DWC2_DEBUG_SOF
dev_vdbg(hsotg->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
- hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT);
+ (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT);
#endif
- return hfnum >> HFNUM_FRNUM_SHIFT &
- HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+ return (hfnum & HFNUM_FRNUM_MASK) >> HFNUM_FRNUM_SHIFT;
}
int dwc2_hcd_is_b_host(struct dwc2_hsotg *hsotg)
@@ -1917,18 +1916,14 @@ void dwc2_hcd_dump_state(struct dwc2_hsotg *hsotg)
dev_dbg(hsotg->dev, " periodic_usecs: %d\n", hsotg->periodic_usecs);
np_tx_status = readl(hsotg->regs + GNPTXSTS);
dev_dbg(hsotg->dev, " NP Tx Req Queue Space Avail: %d\n",
- np_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+ (np_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " NP Tx FIFO Space Avail: %d\n",
- np_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+ (np_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
p_tx_status = readl(hsotg->regs + HPTXSTS);
dev_dbg(hsotg->dev, " P Tx Req Queue Space Avail: %d\n",
- p_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
- TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+ (p_tx_status & TXSTS_QSPCAVAIL_MASK) >> TXSTS_QSPCAVAIL_SHIFT);
dev_dbg(hsotg->dev, " P Tx FIFO Space Avail: %d\n",
- p_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
- TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+ (p_tx_status & TXSTS_FSPCAVAIL_MASK) >> TXSTS_FSPCAVAIL_SHIFT);
dwc2_hcd_dump_frrem(hsotg);
dwc2_dump_global_registers(hsotg);
dwc2_dump_host_registers(hsotg);
@@ -2088,23 +2083,29 @@ static void dwc2_free_bus_bandwidth(struct usb_hcd *hcd, u16 bw,
*
* Must be called with interrupt disabled and spinlock held
*/
-void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
- struct dwc2_hcd_urb *dwc2_urb, int status)
+void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status)
{
- struct urb *urb = context;
+ struct urb *urb;
int i;
- if (!urb) {
- dev_dbg(hsotg->dev, "## %s: context is NULL ##\n", __func__);
+ if (!qtd) {
+ dev_dbg(hsotg->dev, "## %s: qtd is NULL ##\n", __func__);
return;
}
- if (!dwc2_urb) {
- dev_dbg(hsotg->dev, "## %s: dwc2_urb is NULL ##\n", __func__);
+ if (!qtd->urb) {
+ dev_dbg(hsotg->dev, "## %s: qtd->urb is NULL ##\n", __func__);
+ return;
+ }
+
+ urb = qtd->urb->priv;
+ if (!urb) {
+ dev_dbg(hsotg->dev, "## %s: urb->priv is NULL ##\n", __func__);
return;
}
- urb->actual_length = dwc2_hcd_urb_get_actual_length(dwc2_urb);
+ urb->actual_length = dwc2_hcd_urb_get_actual_length(qtd->urb);
if (dbg_urb(urb))
dev_vdbg(hsotg->dev,
@@ -2121,18 +2122,17 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
}
if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
- urb->error_count = dwc2_hcd_urb_get_error_count(dwc2_urb);
+ urb->error_count = dwc2_hcd_urb_get_error_count(qtd->urb);
for (i = 0; i < urb->number_of_packets; ++i) {
urb->iso_frame_desc[i].actual_length =
dwc2_hcd_urb_get_iso_desc_actual_length(
- dwc2_urb, i);
+ qtd->urb, i);
urb->iso_frame_desc[i].status =
- dwc2_hcd_urb_get_iso_desc_status(dwc2_urb, i);
+ dwc2_hcd_urb_get_iso_desc_status(qtd->urb, i);
}
}
urb->status = status;
- urb->hcpriv = NULL;
if (!status) {
if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
urb->actual_length < urb->transfer_buffer_length)
@@ -2149,7 +2149,10 @@ void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
urb);
}
- kfree(dwc2_urb);
+ usb_hcd_unlink_urb_from_ep(dwc2_hsotg_to_hcd(hsotg), urb);
+ urb->hcpriv = NULL;
+ kfree(qtd->urb);
+ qtd->urb = NULL;
spin_unlock(&hsotg->lock);
usb_hcd_giveback_urb(dwc2_hsotg_to_hcd(hsotg), urb, status);
@@ -2337,8 +2340,8 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
struct usb_host_endpoint *ep = urb->ep;
struct dwc2_hcd_urb *dwc2_urb;
int i;
+ int retval;
int alloc_bandwidth = 0;
- int retval = 0;
u8 ep_type = 0;
u32 tflags = 0;
void *buf;
@@ -2389,14 +2392,15 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
!(usb_pipein(urb->pipe))));
buf = urb->transfer_buffer;
+
if (hcd->self.uses_dma) {
- /*
- * Calculate virtual address from physical address, because
- * some class driver may not fill transfer_buffer.
- * In Buffer DMA mode virtual address is used, when handling
- * non-DWORD aligned buffers.
- */
- buf = bus_to_virt(urb->transfer_dma);
+ if (!buf && (urb->transfer_dma & 3)) {
+ dev_err(hsotg->dev,
+ "%s: unaligned transfer with no transfer_buffer",
+ __func__);
+ retval = -EINVAL;
+ goto fail1;
+ }
}
if (!(urb->transfer_flags & URB_NO_INTERRUPT))
@@ -2420,21 +2424,36 @@ static int _dwc2_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
urb->iso_frame_desc[i].length);
urb->hcpriv = dwc2_urb;
- retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv,
- mem_flags);
- if (retval) {
- urb->hcpriv = NULL;
- kfree(dwc2_urb);
- } else {
- if (alloc_bandwidth) {
- spin_lock_irqsave(&hsotg->lock, flags);
- dwc2_allocate_bus_bandwidth(hcd,
- dwc2_hcd_get_ep_bandwidth(hsotg, ep),
- urb);
- spin_unlock_irqrestore(&hsotg->lock, flags);
- }
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ retval = usb_hcd_link_urb_to_ep(hcd, urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ if (retval)
+ goto fail1;
+
+ retval = dwc2_hcd_urb_enqueue(hsotg, dwc2_urb, &ep->hcpriv, mem_flags);
+ if (retval)
+ goto fail2;
+
+ if (alloc_bandwidth) {
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_allocate_bus_bandwidth(hcd,
+ dwc2_hcd_get_ep_bandwidth(hsotg, ep),
+ urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
}
+ return 0;
+
+fail2:
+ spin_lock_irqsave(&hsotg->lock, flags);
+ dwc2_urb->priv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+fail1:
+ urb->hcpriv = NULL;
+ kfree(dwc2_urb);
+
return retval;
}
@@ -2445,7 +2464,7 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
int status)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int rc = 0;
+ int rc;
unsigned long flags;
dev_dbg(hsotg->dev, "DWC OTG HCD URB Dequeue\n");
@@ -2453,6 +2472,10 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
spin_lock_irqsave(&hsotg->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto out;
+
if (!urb->hcpriv) {
dev_dbg(hsotg->dev, "## urb->hcpriv is NULL ##\n");
goto out;
@@ -2460,6 +2483,8 @@ static int _dwc2_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
rc = dwc2_hcd_urb_dequeue(hsotg, urb->hcpriv);
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
kfree(urb->hcpriv);
urb->hcpriv = NULL;
@@ -2653,7 +2678,7 @@ static void dwc2_hcd_free(struct dwc2_hsotg *hsotg)
writel(ahbcfg, hsotg->regs + GAHBCFG);
writel(0, hsotg->regs + GINTMSK);
- if (hsotg->snpsid >= DWC2_CORE_REV_3_00a) {
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_3_00a) {
dctl = readl(hsotg->regs + DCTL);
dctl |= DCTL_SFTDISCON;
writel(dctl, hsotg->regs + DCTL);
@@ -2690,7 +2715,7 @@ void dwc2_set_all_params(struct dwc2_core_params *params, int value)
int i;
for (i = 0; i < size; i++)
- p[i] = -1;
+ p[i] = value;
}
EXPORT_SYMBOL_GPL(dwc2_set_all_params);
@@ -2705,79 +2730,22 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
{
struct usb_hcd *hcd;
struct dwc2_host_chan *channel;
- u32 snpsid, gusbcfg, hcfg;
+ u32 hcfg;
int i, num_channels;
- int retval = -ENOMEM;
+ int retval;
dev_dbg(hsotg->dev, "DWC OTG HCD INIT\n");
- /*
- * Attempt to ensure this device is really a DWC_otg Controller.
- * Read and verify the GSNPSID register contents. The value should be
- * 0x45f42xxx or 0x45f43xxx, which corresponds to either "OT2" or "OT3",
- * as in "OTG version 2.xx" or "OTG version 3.xx".
- */
- snpsid = readl(hsotg->regs + GSNPSID);
- if ((snpsid & 0xfffff000) != 0x4f542000 &&
- (snpsid & 0xfffff000) != 0x4f543000) {
- dev_err(hsotg->dev, "Bad value for GSNPSID: 0x%08x\n", snpsid);
- retval = -ENODEV;
- goto error1;
- }
+ /* Detect config values from hardware */
+ retval = dwc2_get_hwparams(hsotg);
- /*
- * Store the contents of the hardware configuration registers here for
- * easy access later
- */
- hsotg->hwcfg1 = readl(hsotg->regs + GHWCFG1);
- hsotg->hwcfg2 = readl(hsotg->regs + GHWCFG2);
- hsotg->hwcfg3 = readl(hsotg->regs + GHWCFG3);
- hsotg->hwcfg4 = readl(hsotg->regs + GHWCFG4);
-
- dev_dbg(hsotg->dev, "hwcfg1=%08x\n", hsotg->hwcfg1);
- dev_dbg(hsotg->dev, "hwcfg2=%08x\n", hsotg->hwcfg2);
- dev_dbg(hsotg->dev, "hwcfg3=%08x\n", hsotg->hwcfg3);
- dev_dbg(hsotg->dev, "hwcfg4=%08x\n", hsotg->hwcfg4);
-
- /* Force host mode to get HPTXFSIZ exact power on value */
- gusbcfg = readl(hsotg->regs + GUSBCFG);
- gusbcfg |= GUSBCFG_FORCEHOSTMODE;
- writel(gusbcfg, hsotg->regs + GUSBCFG);
- usleep_range(100000, 150000);
-
- hsotg->hptxfsiz = readl(hsotg->regs + HPTXFSIZ);
- dev_dbg(hsotg->dev, "hptxfsiz=%08x\n", hsotg->hptxfsiz);
- gusbcfg = readl(hsotg->regs + GUSBCFG);
- gusbcfg &= ~GUSBCFG_FORCEHOSTMODE;
- writel(gusbcfg, hsotg->regs + GUSBCFG);
- usleep_range(100000, 150000);
+ if (retval)
+ return retval;
+
+ retval = -ENOMEM;
hcfg = readl(hsotg->regs + HCFG);
dev_dbg(hsotg->dev, "hcfg=%08x\n", hcfg);
- dev_dbg(hsotg->dev, "op_mode=%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_OP_MODE_SHIFT &
- GHWCFG2_OP_MODE_MASK >> GHWCFG2_OP_MODE_SHIFT);
- dev_dbg(hsotg->dev, "arch=%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_ARCHITECTURE_SHIFT &
- GHWCFG2_ARCHITECTURE_MASK >> GHWCFG2_ARCHITECTURE_SHIFT);
- dev_dbg(hsotg->dev, "num_dev_ep=%d\n",
- hsotg->hwcfg2 >> GHWCFG2_NUM_DEV_EP_SHIFT &
- GHWCFG2_NUM_DEV_EP_MASK >> GHWCFG2_NUM_DEV_EP_SHIFT);
- dev_dbg(hsotg->dev, "max_host_chan=%d\n",
- hsotg->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
- GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT);
- dev_dbg(hsotg->dev, "nonperio_tx_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT &
- GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK >>
- GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT);
- dev_dbg(hsotg->dev, "host_perio_tx_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT &
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK >>
- GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT);
- dev_dbg(hsotg->dev, "dev_token_q_depth=0x%0x\n",
- hsotg->hwcfg2 >> GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT &
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
- GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT);
#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
hsotg->frame_num_array = kzalloc(sizeof(*hsotg->frame_num_array) *
@@ -2801,22 +2769,30 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
/* Validate parameter values */
dwc2_set_parameters(hsotg, params);
+ /* Check if the bus driver or platform code has setup a dma_mask */
+ if (hsotg->core_params->dma_enable > 0 &&
+ hsotg->dev->dma_mask == NULL) {
+ dev_warn(hsotg->dev,
+ "dma_mask not set, disabling DMA\n");
+ hsotg->core_params->dma_enable = 0;
+ hsotg->core_params->dma_desc_enable = 0;
+ }
+
/* Set device flags indicating whether the HCD supports DMA */
if (hsotg->core_params->dma_enable > 0) {
if (dma_set_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
dev_warn(hsotg->dev, "can't set DMA mask\n");
- if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(31)) < 0)
- dev_warn(hsotg->dev,
- "can't enable workaround for >2GB RAM\n");
- } else {
- dma_set_mask(hsotg->dev, 0);
- dma_set_coherent_mask(hsotg->dev, 0);
+ if (dma_set_coherent_mask(hsotg->dev, DMA_BIT_MASK(32)) < 0)
+ dev_warn(hsotg->dev, "can't set coherent DMA mask\n");
}
hcd = usb_create_hcd(&dwc2_hc_driver, hsotg->dev, dev_name(hsotg->dev));
if (!hcd)
goto error1;
+ if (hsotg->core_params->dma_enable <= 0)
+ hcd->self.uses_dma = 0;
+
hcd->has_tt = 1;
spin_lock_init(&hsotg->lock);
@@ -2843,11 +2819,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- hsotg->snpsid = readl(hsotg->regs + GSNPSID);
- dev_dbg(hsotg->dev, "Core Release: %1x.%1x%1x%1x\n",
- hsotg->snpsid >> 12 & 0xf, hsotg->snpsid >> 8 & 0xf,
- hsotg->snpsid >> 4 & 0xf, hsotg->snpsid & 0xf);
-
setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
(unsigned long)hsotg);
@@ -2922,8 +2893,6 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg, int irq,
if (retval < 0)
goto error3;
- dwc2_dump_global_registers(hsotg);
- dwc2_dump_host_registers(hsotg);
dwc2_hcd_dump_state(hsotg);
dwc2_enable_global_interrupts(hsotg);
diff --git a/drivers/staging/dwc2/hcd.h b/drivers/staging/dwc2/hcd.h
index cf6c055aec8..cc0a1170831 100644
--- a/drivers/staging/dwc2/hcd.h
+++ b/drivers/staging/dwc2/hcd.h
@@ -122,11 +122,11 @@ struct dwc2_host_chan {
unsigned ep_type:2;
unsigned max_packet:11;
unsigned data_pid_start:2;
-#define DWC2_HC_PID_DATA0 (TSIZ_SC_MC_PID_DATA0 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_DATA2 (TSIZ_SC_MC_PID_DATA2 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_DATA1 (TSIZ_SC_MC_PID_DATA1 >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_MDATA (TSIZ_SC_MC_PID_MDATA >> TSIZ_SC_MC_PID_SHIFT)
-#define DWC2_HC_PID_SETUP (TSIZ_SC_MC_PID_SETUP >> TSIZ_SC_MC_PID_SHIFT)
+#define DWC2_HC_PID_DATA0 TSIZ_SC_MC_PID_DATA0
+#define DWC2_HC_PID_DATA2 TSIZ_SC_MC_PID_DATA2
+#define DWC2_HC_PID_DATA1 TSIZ_SC_MC_PID_DATA1
+#define DWC2_HC_PID_MDATA TSIZ_SC_MC_PID_MDATA
+#define DWC2_HC_PID_SETUP TSIZ_SC_MC_PID_SETUP
unsigned multi_count:2;
@@ -146,10 +146,10 @@ struct dwc2_host_chan {
u8 hub_addr;
u8 hub_port;
u8 xact_pos;
-#define DWC2_HCSPLT_XACTPOS_MID (HCSPLT_XACTPOS_MID >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_END (HCSPLT_XACTPOS_END >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_BEGIN (HCSPLT_XACTPOS_BEGIN >> HCSPLT_XACTPOS_SHIFT)
-#define DWC2_HCSPLT_XACTPOS_ALL (HCSPLT_XACTPOS_ALL >> HCSPLT_XACTPOS_SHIFT)
+#define DWC2_HCSPLT_XACTPOS_MID HCSPLT_XACTPOS_MID
+#define DWC2_HCSPLT_XACTPOS_END HCSPLT_XACTPOS_END
+#define DWC2_HCSPLT_XACTPOS_BEGIN HCSPLT_XACTPOS_BEGIN
+#define DWC2_HCSPLT_XACTPOS_ALL HCSPLT_XACTPOS_ALL
u8 requests;
u8 schinfo;
@@ -232,16 +232,19 @@ enum dwc2_transaction_type {
* - DWC2_HC_PID_DATA1
* @ping_state: Ping state
* @do_split: Full/low speed endpoint on high-speed hub requires split
- * @qtd_list: List of QTDs for this QH
- * @channel: Host channel currently processing transfers for this QH
+ * @td_first: Index of first activated isochronous transfer descriptor
+ * @td_last: Index of last activated isochronous transfer descriptor
* @usecs: Bandwidth in microseconds per (micro)frame
* @interval: Interval between transfers in (micro)frames
- * @sched_frame: (micro)frame to initialize a periodic transfer.
+ * @sched_frame: (Micro)frame to initialize a periodic transfer.
* The transfer executes in the following (micro)frame.
* @start_split_frame: (Micro)frame at which last start split was initialized
+ * @ntd: Actual number of transfer descriptors in a list
* @dw_align_buf: Used instead of original buffer if its physical address
* is not dword-aligned
* @dw_align_buf_dma: DMA address for align_buf
+ * @qtd_list: List of QTDs for this QH
+ * @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
* schedule
* @desc_list: List of transfer descriptors
@@ -249,9 +252,6 @@ enum dwc2_transaction_type {
* @n_bytes: Xfer Bytes array. Each element corresponds to a transfer
* descriptor and indicates original XferSize value for the
* descriptor
- * @ntd: Actual number of transfer descriptors in a list
- * @td_first: Index of first activated isochronous transfer descriptor
- * @td_last: Index of last activated isochronous transfer descriptor
* @tt_buffer_dirty True if clear_tt_buffer_complete is pending
*
* A Queue Head (QH) holds the static characteristics of an endpoint and
@@ -266,21 +266,21 @@ struct dwc2_qh {
u8 data_toggle;
u8 ping_state;
u8 do_split;
- struct list_head qtd_list;
- struct dwc2_host_chan *channel;
+ u8 td_first;
+ u8 td_last;
u16 usecs;
u16 interval;
u16 sched_frame;
u16 start_split_frame;
+ u16 ntd;
u8 *dw_align_buf;
dma_addr_t dw_align_buf_dma;
+ struct list_head qtd_list;
+ struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
struct dwc2_hcd_dma_desc *desc_list;
dma_addr_t desc_list_dma;
u32 *n_bytes;
- u16 ntd;
- u8 td_first;
- u8 td_last;
unsigned tt_buffer_dirty:1;
};
@@ -453,6 +453,7 @@ extern void dwc2_hcd_remove(struct dwc2_hsotg *hsotg);
extern int dwc2_set_parameters(struct dwc2_hsotg *hsotg,
const struct dwc2_core_params *params);
extern void dwc2_set_all_params(struct dwc2_core_params *params, int value);
+extern int dwc2_get_hwparams(struct dwc2_hsotg *hsotg);
/* Transaction Execution Functions */
extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
@@ -716,8 +717,8 @@ extern void dwc2_host_disconnect(struct dwc2_hsotg *hsotg);
extern void dwc2_host_hub_info(struct dwc2_hsotg *hsotg, void *context,
int *hub_addr, int *hub_port);
extern int dwc2_host_get_speed(struct dwc2_hsotg *hsotg, void *context);
-extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, void *context,
- struct dwc2_hcd_urb *dwc2_urb, int status);
+extern void dwc2_host_complete(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
+ int status);
#ifdef DEBUG
/*
diff --git a/drivers/staging/dwc2/hcd_ddma.c b/drivers/staging/dwc2/hcd_ddma.c
index 5c0fd273a7b..69070f4442a 100644
--- a/drivers/staging/dwc2/hcd_ddma.c
+++ b/drivers/staging/dwc2/hcd_ddma.c
@@ -800,11 +800,14 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
u16 remain = 0;
int rc = 0;
+ if (!qtd->urb)
+ return -EINVAL;
+
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
if (chan->ep_is_in)
- remain = dma_desc->status >> HOST_DMA_ISOC_NBYTES_SHIFT &
- HOST_DMA_ISOC_NBYTES_MASK >> HOST_DMA_ISOC_NBYTES_SHIFT;
+ remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
+ HOST_DMA_ISOC_NBYTES_SHIFT;
if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
/*
@@ -826,7 +829,7 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
* urb->status is not used for isoc transfers here. The
* individual frame_desc status are used instead.
*/
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
/*
@@ -884,13 +887,16 @@ static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
qtd_list_entry) {
- for (idx = 0; idx < qtd->urb->packet_count; idx++) {
- frame_desc = &qtd->urb->iso_descs[idx];
- frame_desc->status = err;
+ if (qtd->urb) {
+ for (idx = 0; idx < qtd->urb->packet_count;
+ idx++) {
+ frame_desc = &qtd->urb->iso_descs[idx];
+ frame_desc->status = err;
+ }
+
+ dwc2_host_complete(hsotg, qtd, err);
}
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- err);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
}
@@ -929,8 +935,8 @@ static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
u16 remain = 0;
if (chan->ep_is_in)
- remain = dma_desc->status >> HOST_DMA_NBYTES_SHIFT &
- HOST_DMA_NBYTES_MASK >> HOST_DMA_NBYTES_SHIFT;
+ remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
+ HOST_DMA_NBYTES_SHIFT;
dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
@@ -1015,6 +1021,9 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, "%s()\n", __func__);
+ if (!urb)
+ return -EINVAL;
+
dma_desc = &qh->desc_list[desc_num];
n_bytes = qh->n_bytes[desc_num];
dev_vdbg(hsotg->dev,
@@ -1024,7 +1033,7 @@ static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
halt_status, n_bytes,
xfer_done);
if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
failed, *xfer_done, urb->status);
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c
index e75dccb3b80..e143f69939f 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/staging/dwc2/hcd_intr.c
@@ -89,15 +89,20 @@ static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
{
struct urb *usb_urb;
- if (!chan->qh || !qtd->urb)
+ if (!chan->qh)
+ return;
+
+ if (chan->qh->dev_speed == USB_SPEED_HIGH)
+ return;
+
+ if (!qtd->urb)
return;
usb_urb = qtd->urb->priv;
- if (!usb_urb || !usb_urb->dev)
+ if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
return;
- if (chan->qh->dev_speed != USB_SPEED_HIGH &&
- qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
+ if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
chan->qh->tt_buffer_dirty = 1;
if (usb_hub_clear_tt_buffer(usb_urb))
/* Clear failed; let's hope things work anyway */
@@ -160,19 +165,16 @@ static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
grxsts = readl(hsotg->regs + GRXSTSP);
- chnum = grxsts >> GRXSTS_HCHNUM_SHIFT &
- GRXSTS_HCHNUM_MASK >> GRXSTS_HCHNUM_SHIFT;
+ chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
chan = hsotg->hc_ptr_array[chnum];
if (!chan) {
dev_err(hsotg->dev, "Unable to get corresponding channel\n");
return;
}
- bcnt = grxsts >> GRXSTS_BYTECNT_SHIFT &
- GRXSTS_BYTECNT_MASK >> GRXSTS_BYTECNT_SHIFT;
- dpid = grxsts >> GRXSTS_DPID_SHIFT &
- GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
- pktsts = grxsts & GRXSTS_PKTSTS_MASK;
+ bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
+ dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
+ pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
/* Packet Status */
if (dbg_perio()) {
@@ -180,9 +182,7 @@ static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
dev_vdbg(hsotg->dev, " Count = %d\n", bcnt);
dev_vdbg(hsotg->dev, " DPID = %d, chan.dpid = %d\n", dpid,
chan->data_pid_start);
- dev_vdbg(hsotg->dev, " PStatus = %d\n",
- pktsts >> GRXSTS_PKTSTS_SHIFT &
- GRXSTS_PKTSTS_MASK >> GRXSTS_PKTSTS_SHIFT);
+ dev_vdbg(hsotg->dev, " PStatus = %d\n", pktsts);
}
switch (pktsts) {
@@ -261,7 +261,7 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
}
usbcfg = readl(hsotg->regs + GUSBCFG);
- prtspd = hprt0 & HPRT0_SPD_MASK;
+ prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
/* Low power */
@@ -273,7 +273,8 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
}
hcfg = readl(hsotg->regs + HCFG);
- fslspclksel = hcfg & HCFG_FSLSPCLKSEL_MASK;
+ fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
+ HCFG_FSLSPCLKSEL_SHIFT;
if (prtspd == HPRT0_SPD_LOW_SPEED &&
params->host_ls_low_power_phy_clk ==
@@ -282,8 +283,9 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 6 MHz\n");
if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
do_reset = 1;
}
@@ -292,8 +294,9 @@ static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
dev_vdbg(hsotg->dev,
"FS_PHY programming HCFG to 48 MHz\n");
if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
+ fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
- hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
+ hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
writel(hcfg, hsotg->regs + HCFG);
do_reset = 1;
}
@@ -406,8 +409,8 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
if (halt_status == DWC2_HC_XFER_COMPLETE) {
if (chan->ep_is_in) {
- count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
+ count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
+ TSIZ_XFERSIZE_SHIFT;
length = chan->xfer_len - count;
if (short_read != NULL)
*short_read = (count != 0);
@@ -426,8 +429,7 @@ static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
* hctsiz.xfersize field because that reflects the number of
* bytes transferred via the AHB, not the USB).
*/
- count = hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
+ count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
length = (chan->start_pkt_count - count) * chan->max_packet;
}
@@ -462,7 +464,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && xfer_length && chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
DMA_FROM_DEVICE);
memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
@@ -490,8 +492,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
__func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
dev_vdbg(hsotg->dev, " chan->xfer_len %d\n", chan->xfer_len);
dev_vdbg(hsotg->dev, " hctsiz.xfersize %d\n",
- hctsiz >> TSIZ_XFERSIZE_SHIFT &
- TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+ (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
dev_vdbg(hsotg->dev, " urb->transfer_buffer_length %d\n", urb->length);
dev_vdbg(hsotg->dev, " urb->actual_length %d\n", urb->actual_length);
dev_vdbg(hsotg->dev, " short_read %d, xfer_done %d\n", short_read,
@@ -510,7 +511,7 @@ void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
struct dwc2_qtd *qtd)
{
u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
- u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
+ u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
if (pid == TSIZ_SC_MC_PID_DATA0)
@@ -557,8 +558,8 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && frame_desc->actual_length &&
chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
+ __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma,
urb->length, DMA_FROM_DEVICE);
memcpy(urb->buf + frame_desc->offset +
@@ -591,8 +592,8 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && frame_desc->actual_length &&
chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n",
- __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
+ __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma,
urb->length, DMA_FROM_DEVICE);
memcpy(urb->buf + frame_desc->offset +
@@ -623,7 +624,7 @@ static enum dwc2_halt_status dwc2_update_isoc_urb_state(
* urb->status is not used for isoc transfers. The individual
* frame_desc statuses are used instead.
*/
- dwc2_host_complete(hsotg, urb->priv, urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
@@ -714,11 +715,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev,
" Complete URB with transaction error\n");
free_qtd = 1;
- if (qtd->urb) {
- qtd->urb->status = -EPROTO;
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, -EPROTO);
- }
+ dwc2_host_complete(hsotg, qtd, -EPROTO);
}
break;
case DWC2_HC_XFER_URB_DEQUEUE:
@@ -731,11 +728,7 @@ static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
dev_vdbg(hsotg->dev, " Complete URB with I/O error\n");
free_qtd = 1;
- if (qtd && qtd->urb) {
- qtd->urb->status = -EIO;
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- -EIO);
- }
+ dwc2_host_complete(hsotg, qtd, -EIO);
break;
case DWC2_HC_XFER_NO_HALT_STATUS:
default:
@@ -938,7 +931,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc->actual_length += len;
if (chan->align_buf && len) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
qtd->urb->length, DMA_FROM_DEVICE);
memcpy(qtd->urb->buf + frame_desc->offset +
@@ -957,7 +950,7 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
}
if (qtd->isoc_frame_index == qtd->urb->packet_count) {
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb, 0);
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
@@ -1040,7 +1033,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, " Control transfer complete\n");
if (urb->status == -EINPROGRESS)
urb->status = 0;
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
break;
}
@@ -1053,7 +1046,7 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
qtd);
if (urb_xfer_done) {
- dwc2_host_complete(hsotg, urb->priv, urb, urb->status);
+ dwc2_host_complete(hsotg, qtd, urb->status);
halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
halt_status = DWC2_HC_XFER_COMPLETE;
@@ -1073,11 +1066,10 @@ static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
* interrupt
*/
if (urb_xfer_done) {
- dwc2_host_complete(hsotg, urb->priv, urb,
- urb->status);
- halt_status = DWC2_HC_XFER_URB_COMPLETE;
+ dwc2_host_complete(hsotg, qtd, urb->status);
+ halt_status = DWC2_HC_XFER_URB_COMPLETE;
} else {
- halt_status = DWC2_HC_XFER_COMPLETE;
+ halt_status = DWC2_HC_XFER_COMPLETE;
}
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
@@ -1123,11 +1115,11 @@ static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
goto handle_stall_halt;
if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
- dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
if (pipe_type == USB_ENDPOINT_XFER_BULK ||
pipe_type == USB_ENDPOINT_XFER_INT) {
- dwc2_host_complete(hsotg, urb->priv, urb, -EPIPE);
+ dwc2_host_complete(hsotg, qtd, -EPIPE);
/*
* USB protocol requires resetting the data toggle for bulk
* and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
@@ -1168,7 +1160,7 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
/* Non DWORD-aligned buffer case handling */
if (chan->align_buf && xfer_length && chan->ep_is_in) {
- dev_dbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
+ dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
DMA_FROM_DEVICE);
memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
@@ -1185,8 +1177,7 @@ static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
dev_vdbg(hsotg->dev, " chan->start_pkt_count %d\n",
chan->start_pkt_count);
dev_vdbg(hsotg->dev, " hctsiz.pktcnt %d\n",
- hctsiz >> TSIZ_PKTCNT_SHIFT &
- TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+ (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
dev_vdbg(hsotg->dev, " chan->max_packet %d\n", chan->max_packet);
dev_vdbg(hsotg->dev, " bytes_transferred %d\n",
xfer_length);
@@ -1372,10 +1363,10 @@ static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
hsotg->core_params->dma_enable > 0) {
qtd->complete_split = 0;
qtd->isoc_split_offset = 0;
+ qtd->isoc_frame_index++;
if (qtd->urb &&
- ++qtd->isoc_frame_index == qtd->urb->packet_count) {
- dwc2_host_complete(hsotg, qtd->urb->priv,
- qtd->urb, 0);
+ qtd->isoc_frame_index == qtd->urb->packet_count) {
+ dwc2_host_complete(hsotg, qtd, 0);
dwc2_release_channel(hsotg, chan, qtd,
DWC2_HC_XFER_URB_COMPLETE);
} else {
@@ -1445,16 +1436,16 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_BABBLE_ERR);
- goto handle_babble_done;
+ goto disable_int;
}
if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
- if (qtd->urb)
- dwc2_host_complete(hsotg, qtd->urb->priv, qtd->urb,
- -EOVERFLOW);
+ dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
} else {
enum dwc2_halt_status halt_status;
@@ -1464,8 +1455,7 @@ static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
dwc2_halt_channel(hsotg, chan, qtd, halt_status);
}
-handle_babble_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+disable_int:
disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
}
@@ -1490,6 +1480,8 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
if (!urb)
goto handle_ahberr_halt;
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
hcchar = readl(hsotg->regs + HCCHAR(chnum));
hcsplt = readl(hsotg->regs + HCSPLT(chnum));
hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
@@ -1557,7 +1549,7 @@ static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
goto handle_ahberr_done;
}
- dwc2_host_complete(hsotg, urb->priv, urb, -EIO);
+ dwc2_host_complete(hsotg, qtd, -EIO);
handle_ahberr_halt:
/*
@@ -1567,7 +1559,6 @@ handle_ahberr_halt:
dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
handle_ahberr_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
}
@@ -1582,6 +1573,8 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev,
"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
if (hsotg->core_params->dma_desc_enable > 0) {
dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
DWC2_HC_XFER_XACT_ERR);
@@ -1625,7 +1618,6 @@ static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
}
handle_xacterr_done:
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
}
@@ -1643,6 +1635,8 @@ static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
chnum);
+ dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
+
switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_BULK:
@@ -1657,7 +1651,6 @@ static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
break;
}
- dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
}
@@ -1766,7 +1759,7 @@ static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
* For core with OUT NAK enhancement, the flow for high-speed
* CONTROL/BULK OUT is handled a little differently
*/
- if (hsotg->snpsid >= DWC2_CORE_REV_2_71a) {
+ if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
(chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
diff --git a/drivers/staging/dwc2/hcd_queue.c b/drivers/staging/dwc2/hcd_queue.c
index b36f783dd3e..b1980ef28fa 100644
--- a/drivers/staging/dwc2/hcd_queue.c
+++ b/drivers/staging/dwc2/hcd_queue.c
@@ -116,7 +116,7 @@ static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
qh->interval = 8;
#endif
hprt = readl(hsotg->regs + HPRT0);
- prtspd = hprt & HPRT0_SPD_MASK;
+ prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
if (prtspd == HPRT0_SPD_HIGH_SPEED &&
(dev_speed == USB_SPEED_LOW ||
dev_speed == USB_SPEED_FULL)) {
@@ -197,6 +197,9 @@ static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
{
struct dwc2_qh *qh;
+ if (!urb->priv)
+ return NULL;
+
/* Allocate memory */
qh = kzalloc(sizeof(*qh), mem_flags);
if (!qh)
@@ -638,7 +641,7 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
struct dwc2_hcd_urb *urb = qtd->urb;
unsigned long flags;
int allocated = 0;
- int retval = 0;
+ int retval;
/*
* Get the QH which holds the QTD-list to insert to. Create QH if it
@@ -652,8 +655,19 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
}
spin_lock_irqsave(&hsotg->lock, flags);
+
retval = dwc2_hcd_qh_add(hsotg, *qh);
- if (retval && allocated) {
+ if (retval)
+ goto fail;
+
+ qtd->qh = *qh;
+ list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ return 0;
+
+fail:
+ if (allocated) {
struct dwc2_qtd *qtd2, *qtd2_tmp;
struct dwc2_qh *qh_tmp = *qh;
@@ -668,8 +682,6 @@ int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
spin_unlock_irqrestore(&hsotg->lock, flags);
dwc2_hcd_qh_free(hsotg, qh_tmp);
} else {
- qtd->qh = *qh;
- list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/staging/dwc2/hw.h b/drivers/staging/dwc2/hw.h
index 382a1d74865..9c92a3c7588 100644
--- a/drivers/staging/dwc2/hw.h
+++ b/drivers/staging/dwc2/hw.h
@@ -72,12 +72,16 @@
#define GAHBCFG_DMA_EN (1 << 5)
#define GAHBCFG_HBSTLEN_MASK (0xf << 1)
#define GAHBCFG_HBSTLEN_SHIFT 1
-#define GAHBCFG_HBSTLEN_SINGLE (0 << 1)
-#define GAHBCFG_HBSTLEN_INCR (1 << 1)
-#define GAHBCFG_HBSTLEN_INCR4 (3 << 1)
-#define GAHBCFG_HBSTLEN_INCR8 (5 << 1)
-#define GAHBCFG_HBSTLEN_INCR16 (7 << 1)
+#define GAHBCFG_HBSTLEN_SINGLE 0
+#define GAHBCFG_HBSTLEN_INCR 1
+#define GAHBCFG_HBSTLEN_INCR4 3
+#define GAHBCFG_HBSTLEN_INCR8 5
+#define GAHBCFG_HBSTLEN_INCR16 7
#define GAHBCFG_GLBL_INTR_EN (1 << 0)
+#define GAHBCFG_CTRL_MASK (GAHBCFG_P_TXF_EMP_LVL | \
+ GAHBCFG_NP_TXF_EMP_LVL | \
+ GAHBCFG_DMA_EN | \
+ GAHBCFG_GLBL_INTR_EN)
#define GUSBCFG HSOTG_REG(0x00C)
#define GUSBCFG_FORCEDEVMODE (1 << 30)
@@ -165,15 +169,15 @@
#define GRXSTS_FN_SHIFT 25
#define GRXSTS_PKTSTS_MASK (0xf << 17)
#define GRXSTS_PKTSTS_SHIFT 17
-#define GRXSTS_PKTSTS_GLOBALOUTNAK (1 << 17)
-#define GRXSTS_PKTSTS_OUTRX (2 << 17)
-#define GRXSTS_PKTSTS_HCHIN (2 << 17)
-#define GRXSTS_PKTSTS_OUTDONE (3 << 17)
-#define GRXSTS_PKTSTS_HCHIN_XFER_COMP (3 << 17)
-#define GRXSTS_PKTSTS_SETUPDONE (4 << 17)
-#define GRXSTS_PKTSTS_DATATOGGLEERR (5 << 17)
-#define GRXSTS_PKTSTS_SETUPRX (6 << 17)
-#define GRXSTS_PKTSTS_HCHHALTED (7 << 17)
+#define GRXSTS_PKTSTS_GLOBALOUTNAK 1
+#define GRXSTS_PKTSTS_OUTRX 2
+#define GRXSTS_PKTSTS_HCHIN 2
+#define GRXSTS_PKTSTS_OUTDONE 3
+#define GRXSTS_PKTSTS_HCHIN_XFER_COMP 3
+#define GRXSTS_PKTSTS_SETUPDONE 4
+#define GRXSTS_PKTSTS_DATATOGGLEERR 5
+#define GRXSTS_PKTSTS_SETUPRX 6
+#define GRXSTS_PKTSTS_HCHHALTED 7
#define GRXSTS_HCHNUM_MASK (0xf << 0)
#define GRXSTS_HCHNUM_SHIFT 0
#define GRXSTS_DPID_MASK (0x3 << 15)
@@ -184,16 +188,11 @@
#define GRXSTS_EPNUM_SHIFT 0
#define GRXFSIZ HSOTG_REG(0x024)
+#define GRXFSIZ_DEPTH_MASK (0xffff << 0)
+#define GRXFSIZ_DEPTH_SHIFT 0
#define GNPTXFSIZ HSOTG_REG(0x028)
-#define GNPTXFSIZ_NP_TXF_DEP_MASK (0xffff << 16)
-#define GNPTXFSIZ_NP_TXF_DEP_SHIFT 16
-#define GNPTXFSIZ_NP_TXF_DEP_LIMIT 0xffff
-#define GNPTXFSIZ_NP_TXF_DEP(_x) ((_x) << 16)
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_MASK (0xffff << 0)
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_SHIFT 0
-#define GNPTXFSIZ_NP_TXF_ST_ADDR_LIMIT 0xffff
-#define GNPTXFSIZ_NP_TXF_ST_ADDR(_x) ((_x) << 0)
+/* Use FIFOSIZE_* constants to access this register */
#define GNPTXSTS HSOTG_REG(0x02C)
#define GNPTXSTS_NP_TXQ_TOP_MASK (0x7f << 24)
@@ -244,32 +243,32 @@
#define GHWCFG2_NUM_DEV_EP_SHIFT 10
#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8)
#define GHWCFG2_FS_PHY_TYPE_SHIFT 8
-#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED (0 << 8)
-#define GHWCFG2_FS_PHY_TYPE_DEDICATED (1 << 8)
-#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI (2 << 8)
-#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI (3 << 8)
+#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_FS_PHY_TYPE_DEDICATED 1
+#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI 2
+#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI 3
#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6)
#define GHWCFG2_HS_PHY_TYPE_SHIFT 6
-#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED (0 << 6)
-#define GHWCFG2_HS_PHY_TYPE_UTMI (1 << 6)
-#define GHWCFG2_HS_PHY_TYPE_ULPI (2 << 6)
-#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI (3 << 6)
+#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED 0
+#define GHWCFG2_HS_PHY_TYPE_UTMI 1
+#define GHWCFG2_HS_PHY_TYPE_ULPI 2
+#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI 3
#define GHWCFG2_POINT2POINT (1 << 5)
#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3)
#define GHWCFG2_ARCHITECTURE_SHIFT 3
-#define GHWCFG2_SLAVE_ONLY_ARCH (0 << 3)
-#define GHWCFG2_EXT_DMA_ARCH (1 << 3)
-#define GHWCFG2_INT_DMA_ARCH (2 << 3)
+#define GHWCFG2_SLAVE_ONLY_ARCH 0
+#define GHWCFG2_EXT_DMA_ARCH 1
+#define GHWCFG2_INT_DMA_ARCH 2
#define GHWCFG2_OP_MODE_MASK (0x7 << 0)
#define GHWCFG2_OP_MODE_SHIFT 0
-#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE (0 << 0)
-#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE (1 << 0)
-#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE (2 << 0)
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE (3 << 0)
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE (4 << 0)
-#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST (5 << 0)
-#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST (6 << 0)
-#define GHWCFG2_OP_MODE_UNDEFINED (7 << 0)
+#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE 0
+#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE 1
+#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE 2
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE 3
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE 4
+#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST 5
+#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST 6
+#define GHWCFG2_OP_MODE_UNDEFINED 7
#define GHWCFG3 HSOTG_REG(0x004c)
#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16)
@@ -303,6 +302,9 @@
#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14)
#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8 0
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_16 1
+#define GHWCFG4_UTMI_PHY_DATA_WIDTH_8_OR_16 2
#define GHWCFG4_XHIBER (1 << 7)
#define GHWCFG4_HIBER (1 << 6)
#define GHWCFG4_MIN_AHB_FREQ (1 << 5)
@@ -391,16 +393,12 @@
#define ADPCTL_PRB_DSCHRG_SHIFT 0
#define HPTXFSIZ HSOTG_REG(0x100)
+/* Use FIFOSIZE_* constants to access this register */
#define DPTXFSIZN(_a) HSOTG_REG(0x104 + (((_a) - 1) * 4))
-#define DPTXFSIZN_DP_TXF_SIZE_MASK (0xffff << 16)
-#define DPTXFSIZN_DP_TXF_SIZE_SHIFT 16
-#define DPTXFSIZN_DP_TXF_SIZE_GET(_v) (((_v) >> 16) & 0xffff)
-#define DPTXFSIZN_DP_TXF_SIZE_LIMIT 0xffff
-#define DPTXFSIZN_DP_TXF_SIZE(_x) ((_x) << 16)
-#define DPTXFSIZN_DP_TXF_ST_ADDR_MASK (0xffff << 0)
-#define DPTXFSIZN_DP_TXF_ST_ADDR_SHIFT 0
+/* Use FIFOSIZE_* constants to access this register */
+/* These apply to the GNPTXFSIZ, HPTXFSIZ and DPTXFSIZN registers */
#define FIFOSIZE_DEPTH_MASK (0xffff << 16)
#define FIFOSIZE_DEPTH_SHIFT 16
#define FIFOSIZE_STARTADDR_MASK (0xffff << 0)
@@ -424,10 +422,10 @@
#define DCFG_NZ_STS_OUT_HSHK (1 << 2)
#define DCFG_DEVSPD_MASK (0x3 << 0)
#define DCFG_DEVSPD_SHIFT 0
-#define DCFG_DEVSPD_HS (0 << 0)
-#define DCFG_DEVSPD_FS (1 << 0)
-#define DCFG_DEVSPD_LS (2 << 0)
-#define DCFG_DEVSPD_FS48 (3 << 0)
+#define DCFG_DEVSPD_HS 0
+#define DCFG_DEVSPD_FS 1
+#define DCFG_DEVSPD_LS 2
+#define DCFG_DEVSPD_FS48 3
#define DCTL HSOTG_REG(0x804)
#define DCTL_PWRONPRGDONE (1 << 11)
@@ -450,10 +448,10 @@
#define DSTS_ERRATICERR (1 << 3)
#define DSTS_ENUMSPD_MASK (0x3 << 1)
#define DSTS_ENUMSPD_SHIFT 1
-#define DSTS_ENUMSPD_HS (0 << 1)
-#define DSTS_ENUMSPD_FS (1 << 1)
-#define DSTS_ENUMSPD_LS (2 << 1)
-#define DSTS_ENUMSPD_FS48 (3 << 1)
+#define DSTS_ENUMSPD_HS 0
+#define DSTS_ENUMSPD_FS 1
+#define DSTS_ENUMSPD_LS 2
+#define DSTS_ENUMSPD_FS48 3
#define DSTS_SUSPSTS (1 << 0)
#define DIEPMSK HSOTG_REG(0x810)
@@ -501,10 +499,10 @@
*/
#define D0EPCTL_MPS_MASK (0x3 << 0)
#define D0EPCTL_MPS_SHIFT 0
-#define D0EPCTL_MPS_64 (0 << 0)
-#define D0EPCTL_MPS_32 (1 << 0)
-#define D0EPCTL_MPS_16 (2 << 0)
-#define D0EPCTL_MPS_8 (3 << 0)
+#define D0EPCTL_MPS_64 0
+#define D0EPCTL_MPS_32 1
+#define D0EPCTL_MPS_16 2
+#define D0EPCTL_MPS_8 3
#define DXEPCTL_EPENA (1 << 31)
#define DXEPCTL_EPDIS (1 << 30)
@@ -522,10 +520,10 @@
#define DXEPCTL_SNP (1 << 20)
#define DXEPCTL_EPTYPE_MASK (0x3 << 18)
#define DXEPCTL_EPTYPE_SHIFT 18
-#define DXEPCTL_EPTYPE_CONTROL (0 << 18)
-#define DXEPCTL_EPTYPE_ISO (1 << 18)
-#define DXEPCTL_EPTYPE_BULK (2 << 18)
-#define DXEPCTL_EPTYPE_INTTERUPT (3 << 18)
+#define DXEPCTL_EPTYPE_CONTROL 0
+#define DXEPCTL_EPTYPE_ISO 1
+#define DXEPCTL_EPTYPE_BULK 2
+#define DXEPCTL_EPTYPE_INTTERUPT 3
#define DXEPCTL_NAKSTS (1 << 17)
#define DXEPCTL_DPID (1 << 16)
#define DXEPCTL_EOFRNUM (1 << 16)
@@ -645,9 +643,9 @@
#define HCFG_FSLSSUPP (1 << 2)
#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0)
#define HCFG_FSLSPCLKSEL_SHIFT 0
-#define HCFG_FSLSPCLKSEL_30_60_MHZ (0 << 0)
-#define HCFG_FSLSPCLKSEL_48_MHZ (1 << 0)
-#define HCFG_FSLSPCLKSEL_6_MHZ (2 << 0)
+#define HCFG_FSLSPCLKSEL_30_60_MHZ 0
+#define HCFG_FSLSPCLKSEL_48_MHZ 1
+#define HCFG_FSLSPCLKSEL_6_MHZ 2
#define HFIR HSOTG_REG(0x0404)
#define HFIR_FRINT_MASK (0xffff << 0)
@@ -680,9 +678,9 @@
#define HPRT0 HSOTG_REG(0x0440)
#define HPRT0_SPD_MASK (0x3 << 17)
#define HPRT0_SPD_SHIFT 17
-#define HPRT0_SPD_HIGH_SPEED (0 << 17)
-#define HPRT0_SPD_FULL_SPEED (1 << 17)
-#define HPRT0_SPD_LOW_SPEED (2 << 17)
+#define HPRT0_SPD_HIGH_SPEED 0
+#define HPRT0_SPD_FULL_SPEED 1
+#define HPRT0_SPD_LOW_SPEED 2
#define HPRT0_TSTCTL_MASK (0xf << 13)
#define HPRT0_TSTCTL_SHIFT 13
#define HPRT0_PWR (1 << 12)
@@ -720,10 +718,10 @@
#define HCSPLT_COMPSPLT (1 << 16)
#define HCSPLT_XACTPOS_MASK (0x3 << 14)
#define HCSPLT_XACTPOS_SHIFT 14
-#define HCSPLT_XACTPOS_MID (0 << 14)
-#define HCSPLT_XACTPOS_END (1 << 14)
-#define HCSPLT_XACTPOS_BEGIN (2 << 14)
-#define HCSPLT_XACTPOS_ALL (3 << 14)
+#define HCSPLT_XACTPOS_MID 0
+#define HCSPLT_XACTPOS_END 1
+#define HCSPLT_XACTPOS_BEGIN 2
+#define HCSPLT_XACTPOS_ALL 3
#define HCSPLT_HUBADDR_MASK (0x7f << 7)
#define HCSPLT_HUBADDR_SHIFT 7
#define HCSPLT_PRTADDR_MASK (0x7f << 0)
@@ -751,11 +749,11 @@
#define TSIZ_DOPNG (1 << 31)
#define TSIZ_SC_MC_PID_MASK (0x3 << 29)
#define TSIZ_SC_MC_PID_SHIFT 29
-#define TSIZ_SC_MC_PID_DATA0 (0 << 29)
-#define TSIZ_SC_MC_PID_DATA2 (1 << 29)
-#define TSIZ_SC_MC_PID_DATA1 (2 << 29)
-#define TSIZ_SC_MC_PID_MDATA (3 << 29)
-#define TSIZ_SC_MC_PID_SETUP (3 << 29)
+#define TSIZ_SC_MC_PID_DATA0 0
+#define TSIZ_SC_MC_PID_DATA2 1
+#define TSIZ_SC_MC_PID_DATA1 2
+#define TSIZ_SC_MC_PID_MDATA 3
+#define TSIZ_SC_MC_PID_SETUP 3
#define TSIZ_PKTCNT_MASK (0x3ff << 19)
#define TSIZ_PKTCNT_SHIFT 19
#define TSIZ_NTD_MASK (0xff << 8)
diff --git a/drivers/staging/dwc2/pci.c b/drivers/staging/dwc2/pci.c
index 3ca54d6782f..9020260d5df 100644
--- a/drivers/staging/dwc2/pci.c
+++ b/drivers/staging/dwc2/pci.c
@@ -74,7 +74,7 @@ static const struct dwc2_core_params dwc2_module_params = {
.max_packet_count = 511,
.host_channels = -1,
.phy_type = -1,
- .phy_utmi_width = 16, /* 16 bits - NOT DETECTABLE */
+ .phy_utmi_width = -1,
.phy_ulpi_ddr = -1,
.phy_ulpi_ext_vbus = -1,
.i2c_enable = -1,
@@ -83,7 +83,7 @@ static const struct dwc2_core_params dwc2_module_params = {
.host_ls_low_power_phy_clk = -1,
.ts_dline = -1,
.reload_ctl = -1,
- .ahb_single = -1,
+ .ahbcfg = -1,
};
/**
diff --git a/drivers/staging/et131x/README b/drivers/staging/et131x/README
index 05ad0850166..9272a24ae61 100644
--- a/drivers/staging/et131x/README
+++ b/drivers/staging/et131x/README
@@ -8,7 +8,6 @@ Note, the powermanagement options were removed from the vendor provided
driver as they did not build properly at the time.
TODO:
- - some rx packets have CRC/code/frame errors
- Look at reducing the number of spinlocks
- Simplify code in nic_rx_pkts(), when determining multicast_pkts_rcvd
- Implement NAPI support
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 5590ebf1da1..817f837b240 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -827,11 +827,11 @@ static void usb_alphatrack_disconnect(struct usb_interface *intf)
mutex_unlock(&dev->mtx);
usb_alphatrack_delete(dev);
} else {
+ atomic_set(&dev->writes_pending, 0);
dev->intf = NULL;
mutex_unlock(&dev->mtx);
}
- atomic_set(&dev->writes_pending, 0);
mutex_unlock(&disconnect_mutex);
dev_info(&intf->dev, "Alphatrack Surface #%d now disconnected\n",
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 6cbf9c7c1d3..074b0e5bcc6 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -177,24 +177,24 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
}
#define show_int(value) \
- static ssize_t show_##value(struct device *dev, \
+ static ssize_t value##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
return sprintf(buf, "%d\n", t->value); \
} \
- static DEVICE_ATTR(value, S_IRUGO, show_##value, NULL);
+ static DEVICE_ATTR_RO(value)
#define show_set_int(value) \
- static ssize_t show_##value(struct device *dev, \
+ static ssize_t value##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
return sprintf(buf, "%d\n", t->value); \
} \
- static ssize_t set_##value(struct device *dev, \
+ static ssize_t value##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
@@ -206,7 +206,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
t->value = temp; \
return count; \
} \
- static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
+ static DEVICE_ATTR_RW(value)
show_int(enable);
show_int(offline);
diff --git a/drivers/staging/gdm724x/Kconfig b/drivers/staging/gdm724x/Kconfig
new file mode 100644
index 00000000000..0a1f090bbf3
--- /dev/null
+++ b/drivers/staging/gdm724x/Kconfig
@@ -0,0 +1,15 @@
+#
+# GCT GDM724x LTE driver configuration
+#
+
+config LTE_GDM724X
+ tristate "GCT GDM724x LTE support"
+ depends on NET && USB && TTY && m
+ help
+ This driver supports GCT GDM724x LTE chip based USB modem devices.
+ It exposes 4 network devices to be used per PDN and 2 tty devices to be
+ used for AT commands and DM monitoring applications.
+ The modules will be called gdmulte.ko and gdmtty.ko
+
+ GCT-ATCx can be used for AT Commands
+ GCT-DMx can be used for LTE protocol monitoring
diff --git a/drivers/staging/gdm724x/Makefile b/drivers/staging/gdm724x/Makefile
new file mode 100644
index 00000000000..ba7f11a6a09
--- /dev/null
+++ b/drivers/staging/gdm724x/Makefile
@@ -0,0 +1,7 @@
+obj-$(CONFIG_LTE_GDM724X) := gdmulte.o
+gdmulte-y += gdm_lte.o netlink_k.o
+gdmulte-y += gdm_usb.o gdm_endian.o
+
+obj-$(CONFIG_LTE_GDM724X) += gdmtty.o
+gdmtty-y := gdm_tty.o gdm_mux.o
+
diff --git a/drivers/staging/gdm724x/TODO b/drivers/staging/gdm724x/TODO
new file mode 100644
index 00000000000..b2b571ecb06
--- /dev/null
+++ b/drivers/staging/gdm724x/TODO
@@ -0,0 +1,16 @@
+TODO:
+- Clean up coding style to meet kernel standard. (80 line limit, netdev_err)
+- Remove test for host endian
+- Remove confusing macros (endian, hci_send, sdu_send, rcv_with_cb)
+- Fixes for every instances of function returning -1
+- Check for skb->len in gdm_lte_emulate_arp()
+- Use ALIGN() macro for dummy_cnt in up_to_host()
+- Error handling in init_usb()
+- Explain reason for multiples of 512 bytes in alloc_tx_struct()
+- Review use of atomic allocation for tx structs
+- No error checking for alloc_tx_struct in do_tx()
+- fix up static tty port allocation to be dynamic
+
+Patches to:
+ Jonathan Kim <jonathankim@gctsemi.com>
+ Dean ahn <deanahn@gctsemi.com>
diff --git a/drivers/staging/gdm724x/gdm_endian.c b/drivers/staging/gdm724x/gdm_endian.c
new file mode 100644
index 00000000000..f6cc90ae9ba
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_endian.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "gdm_endian.h"
+
+void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian)
+{
+ u8 a[2] = {0x12, 0x34};
+ u8 b[2] = {0, };
+ u16 c = 0x1234;
+
+ if (dev_endian == ENDIANNESS_BIG)
+ ed->dev_ed = ENDIANNESS_BIG;
+ else
+ ed->dev_ed = ENDIANNESS_LITTLE;
+
+ memcpy(b, &c, 2);
+
+ if (a[0] != b[0])
+ ed->host_ed = ENDIANNESS_LITTLE;
+ else
+ ed->host_ed = ENDIANNESS_BIG;
+
+}
+
+u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian16_Swap(x);
+}
+
+u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian16_Swap(x);
+}
+
+u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian32_Swap(x);
+}
+
+u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x)
+{
+ if (ed->dev_ed == ed->host_ed)
+ return x;
+
+ return Endian32_Swap(x);
+}
diff --git a/drivers/staging/gdm724x/gdm_endian.h b/drivers/staging/gdm724x/gdm_endian.h
new file mode 100644
index 00000000000..9b2531ff908
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_endian.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __GDM_ENDIAN_H__
+#define __GDM_ENDIAN_H__
+
+#include <linux/types.h>
+
+#define Endian16_Swap(value) \
+ ((((u16)((value) & 0x00FF)) << 8) | \
+ (((u16)((value) & 0xFF00)) >> 8))
+
+#define Endian32_Swap(value) \
+ ((((u32)((value) & 0x000000FF)) << 24) | \
+ (((u32)((value) & 0x0000FF00)) << 8) | \
+ (((u32)((value) & 0x00FF0000)) >> 8) | \
+ (((u32)((value) & 0xFF000000)) >> 24))
+
+enum {
+ ENDIANNESS_MIN = 0,
+ ENDIANNESS_UNKNOWN,
+ ENDIANNESS_LITTLE,
+ ENDIANNESS_BIG,
+ ENDIANNESS_MIDDLE,
+ ENDIANNESS_MAX
+};
+
+struct gdm_endian {
+ u8 dev_ed;
+ u8 host_ed;
+};
+
+void gdm_set_endian(struct gdm_endian *ed, u8 dev_endian);
+u16 gdm_cpu_to_dev16(struct gdm_endian *ed, u16 x);
+u16 gdm_dev16_to_cpu(struct gdm_endian *ed, u16 x);
+u32 gdm_cpu_to_dev32(struct gdm_endian *ed, u32 x);
+u32 gdm_dev32_to_cpu(struct gdm_endian *ed, u32 x);
+
+#endif /*__GDM_ENDIAN_H__*/
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
new file mode 100644
index 00000000000..bc0d510fb0a
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -0,0 +1,877 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/in6.h>
+#include <linux/tcp.h>
+#include <linux/icmp.h>
+#include <linux/icmpv6.h>
+#include <linux/uaccess.h>
+#include <net/ndisc.h>
+
+#include "gdm_lte.h"
+#include "netlink_k.h"
+#include "hci.h"
+#include "hci_packet.h"
+#include "gdm_endian.h"
+
+/*
+ * Netlink protocol number
+ */
+#define NETLINK_LTE 30
+
+/*
+ * Default MTU Size
+ */
+#define DEFAULT_MTU_SIZE 1500
+
+#define gdm_dev_endian(n) (\
+ n->phy_dev->get_endian(n->phy_dev->priv_dev))
+
+#define gdm_lte_hci_send(n, d, l) (\
+ n->phy_dev->send_hci_func(n->phy_dev->priv_dev, d, l, NULL, NULL))
+
+#define gdm_lte_sdu_send(n, d, l, c, b, i, t) (\
+ n->phy_dev->send_sdu_func(n->phy_dev->priv_dev, d, l, n->pdn_table.dft_eps_id, 0, c, b, i, t))
+
+#define gdm_lte_rcv_with_cb(n, c, b, e) (\
+ n->rcv_func(n->priv_dev, c, b, e))
+
+#define IP_VERSION_4 4
+#define IP_VERSION_6 6
+
+static struct {
+ int ref_cnt;
+ struct sock *sock;
+} lte_event;
+
+static struct device_type wwan_type = {
+ .name = "wwan",
+};
+
+static int gdm_lte_open(struct net_device *dev)
+{
+ netif_start_queue(dev);
+ return 0;
+}
+
+static int gdm_lte_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static int gdm_lte_set_config(struct net_device *dev, struct ifmap *map)
+{
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+ return 0;
+}
+
+static void tx_complete(void *arg)
+{
+ struct nic *nic = arg;
+
+ if (netif_queue_stopped(nic->netdev))
+ netif_wake_queue(nic->netdev);
+}
+
+static int gdm_lte_rx(struct sk_buff *skb, struct nic *nic, int nic_type)
+{
+ int ret;
+
+ ret = netif_rx_ni(skb);
+ if (ret == NET_RX_DROP) {
+ nic->stats.rx_dropped++;
+ } else {
+ nic->stats.rx_packets++;
+ nic->stats.rx_bytes += skb->len + ETH_HLEN;
+ }
+
+ return 0;
+}
+
+int gdm_lte_emulate_arp(struct sk_buff *skb_in, u32 nic_type)
+{
+ struct nic *nic = netdev_priv(skb_in->dev);
+ struct sk_buff *skb_out;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ struct arphdr *arp_in;
+ struct arphdr *arp_out;
+ struct arpdata {
+ u8 ar_sha[ETH_ALEN];
+ u8 ar_sip[4];
+ u8 ar_tha[ETH_ALEN];
+ u8 ar_tip[4];
+ };
+ struct arpdata *arp_data_in;
+ struct arpdata *arp_data_out;
+ u8 arp_temp[60];
+ void *mac_header_data;
+ u32 mac_header_len;
+
+ /* Format the mac header so that it can be put to skb */
+ if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
+ memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
+ mac_header_data = &vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
+ mac_header_data = &eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Get the pointer of the original request */
+ arp_in = (struct arphdr *)(skb_in->data + mac_header_len);
+ arp_data_in = (struct arpdata *)(skb_in->data + mac_header_len + sizeof(struct arphdr));
+
+ /* Get the pointer of the outgoing response */
+ arp_out = (struct arphdr *)arp_temp;
+ arp_data_out = (struct arpdata *)(arp_temp + sizeof(struct arphdr));
+
+ /* Copy the arp header */
+ memcpy(arp_out, arp_in, sizeof(struct arphdr));
+ arp_out->ar_op = htons(ARPOP_REPLY);
+
+ /* Copy the arp payload: based on 2 bytes of mac and fill the IP */
+ arp_data_out->ar_sha[0] = arp_data_in->ar_sha[0];
+ arp_data_out->ar_sha[1] = arp_data_in->ar_sha[1];
+ memcpy(&arp_data_out->ar_sha[2], &arp_data_in->ar_tip[0], 4);
+ memcpy(&arp_data_out->ar_sip[0], &arp_data_in->ar_tip[0], 4);
+ memcpy(&arp_data_out->ar_tha[0], &arp_data_in->ar_sha[0], 6);
+ memcpy(&arp_data_out->ar_tip[0], &arp_data_in->ar_sip[0], 4);
+
+ /* Fill the destination mac with source mac of the received packet */
+ memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
+ /* Fill the source mac with nic's source mac */
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ /* Alloc skb and reserve align */
+ skb_out = dev_alloc_skb(skb_in->len);
+ if (!skb_out)
+ return -ENOMEM;
+ skb_reserve(skb_out, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct arphdr)), arp_out, sizeof(struct arphdr));
+ memcpy(skb_put(skb_out, sizeof(struct arpdata)), arp_data_out, sizeof(struct arpdata));
+
+ skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb_out->dev = skb_in->dev;
+ skb_reset_mac_header(skb_out);
+ skb_pull(skb_out, ETH_HLEN);
+
+ gdm_lte_rx(skb_out, nic, nic_type);
+
+ return 0;
+}
+
+int icmp6_checksum(struct ipv6hdr *ipv6, u16 *ptr, int len)
+{
+ unsigned short *w = ptr;
+ int sum = 0;
+ int i;
+
+ union {
+ struct {
+ u8 ph_src[16];
+ u8 ph_dst[16];
+ u32 ph_len;
+ u8 ph_zero[3];
+ u8 ph_nxt;
+ } ph __packed;
+ u16 pa[20];
+ } pseudo_header;
+
+ memset(&pseudo_header, 0, sizeof(pseudo_header));
+ memcpy(&pseudo_header.ph.ph_src, &ipv6->saddr.in6_u.u6_addr8, 16);
+ memcpy(&pseudo_header.ph.ph_dst, &ipv6->daddr.in6_u.u6_addr8, 16);
+ pseudo_header.ph.ph_len = ipv6->payload_len;
+ pseudo_header.ph.ph_nxt = ipv6->nexthdr;
+
+ w = (u16 *)&pseudo_header;
+ for (i = 0; i < sizeof(pseudo_header.pa) / sizeof(pseudo_header.pa[0]); i++)
+ sum += pseudo_header.pa[i];
+
+ w = ptr;
+ while (len > 1) {
+ sum += *w++;
+ len -= 2;
+ }
+
+ sum = (sum >> 16) + (sum & 0xFFFF);
+ sum += (sum >> 16);
+ sum = ~sum & 0xffff;
+
+ return sum;
+}
+
+int gdm_lte_emulate_ndp(struct sk_buff *skb_in, u32 nic_type)
+{
+ struct nic *nic = netdev_priv(skb_in->dev);
+ struct sk_buff *skb_out;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ struct neighbour_advertisement {
+ u8 target_address[16];
+ u8 type;
+ u8 length;
+ u8 link_layer_address[6];
+ };
+ struct neighbour_advertisement na;
+ struct neighbour_solicitation {
+ u8 target_address[16];
+ };
+ struct neighbour_solicitation *ns;
+ struct ipv6hdr *ipv6_in;
+ struct ipv6hdr ipv6_out;
+ struct icmp6hdr *icmp6_in;
+ struct icmp6hdr icmp6_out;
+
+ void *mac_header_data;
+ u32 mac_header_len;
+
+ /* Format the mac header so that it can be put to skb */
+ if (ntohs(((struct ethhdr *)skb_in->data)->h_proto) == ETH_P_8021Q) {
+ memcpy(&vlan_eth, skb_in->data, sizeof(struct vlan_ethhdr));
+ if (ntohs(vlan_eth.h_vlan_encapsulated_proto) != ETH_P_IPV6)
+ return -1;
+ mac_header_data = &vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ memcpy(&eth, skb_in->data, sizeof(struct ethhdr));
+ if (ntohs(eth.h_proto) != ETH_P_IPV6)
+ return -1;
+ mac_header_data = &eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Check if this is IPv6 ICMP packet */
+ ipv6_in = (struct ipv6hdr *)(skb_in->data + mac_header_len);
+ if (ipv6_in->version != 6 || ipv6_in->nexthdr != IPPROTO_ICMPV6)
+ return -1;
+
+ /* Check if this is NDP packet */
+ icmp6_in = (struct icmp6hdr *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr));
+ if (icmp6_in->icmp6_type == NDISC_ROUTER_SOLICITATION) { /* Check RS */
+ return -1;
+ } else if (icmp6_in->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) { /* Check NS */
+ u8 icmp_na[sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement)];
+ u8 zero_addr8[16] = {0,};
+
+ if (memcmp(ipv6_in->saddr.in6_u.u6_addr8, zero_addr8, 16) == 0)
+ /* Duplicate Address Detection: Source IP is all zero */
+ return 0;
+
+ icmp6_out.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ icmp6_out.icmp6_code = 0;
+ icmp6_out.icmp6_cksum = 0;
+ icmp6_out.icmp6_dataun.un_data32[0] = htonl(0x60000000); /* R=0, S=1, O=1 */
+
+ ns = (struct neighbour_solicitation *)(skb_in->data + mac_header_len + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr));
+ memcpy(&na.target_address, ns->target_address, 16);
+ na.type = 0x02;
+ na.length = 1;
+ na.link_layer_address[0] = 0x00;
+ na.link_layer_address[1] = 0x0a;
+ na.link_layer_address[2] = 0x3b;
+ na.link_layer_address[3] = 0xaf;
+ na.link_layer_address[4] = 0x63;
+ na.link_layer_address[5] = 0xc7;
+
+ memcpy(&ipv6_out, ipv6_in, sizeof(struct ipv6hdr));
+ memcpy(ipv6_out.saddr.in6_u.u6_addr8, &na.target_address, 16);
+ memcpy(ipv6_out.daddr.in6_u.u6_addr8, ipv6_in->saddr.in6_u.u6_addr8, 16);
+ ipv6_out.payload_len = htons(sizeof(struct icmp6hdr) + sizeof(struct neighbour_advertisement));
+
+ memcpy(icmp_na, &icmp6_out, sizeof(struct icmp6hdr));
+ memcpy(icmp_na + sizeof(struct icmp6hdr), &na, sizeof(struct neighbour_advertisement));
+
+ icmp6_out.icmp6_cksum = icmp6_checksum(&ipv6_out, (u16 *)icmp_na, sizeof(icmp_na));
+ } else {
+ return -1;
+ }
+
+ /* Fill the destination mac with source mac of the received packet */
+ memcpy(mac_header_data, mac_header_data + ETH_ALEN, ETH_ALEN);
+ /* Fill the source mac with nic's source mac */
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ /* Alloc skb and reserve align */
+ skb_out = dev_alloc_skb(skb_in->len);
+ if (!skb_out)
+ return -ENOMEM;
+ skb_reserve(skb_out, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb_out, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb_out, sizeof(struct ipv6hdr)), &ipv6_out, sizeof(struct ipv6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct icmp6hdr)), &icmp6_out, sizeof(struct icmp6hdr));
+ memcpy(skb_put(skb_out, sizeof(struct neighbour_advertisement)), &na, sizeof(struct neighbour_advertisement));
+
+ skb_out->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb_out->dev = skb_in->dev;
+ skb_reset_mac_header(skb_out);
+ skb_pull(skb_out, ETH_HLEN);
+
+ gdm_lte_rx(skb_out, nic, nic_type);
+
+ return 0;
+}
+
+static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct ethhdr *eth;
+ struct vlan_ethhdr *vlan_eth;
+ struct iphdr *ip;
+ struct ipv6hdr *ipv6;
+ int mac_proto;
+ void *network_data;
+ u32 nic_type = 0;
+
+ /* NIC TYPE is based on the nic_id of this net_device */
+ nic_type = 0x00000010 | nic->nic_id;
+
+ /* Get ethernet protocol */
+ eth = (struct ethhdr *)skb->data;
+ if (ntohs(eth->h_proto) == ETH_P_8021Q) {
+ vlan_eth = (struct vlan_ethhdr *)skb->data;
+ mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto);
+ network_data = skb->data + VLAN_ETH_HLEN;
+ nic_type |= NIC_TYPE_F_VLAN;
+ } else {
+ mac_proto = ntohs(eth->h_proto);
+ network_data = skb->data + ETH_HLEN;
+ }
+
+ /* Process packet for nic type */
+ switch (mac_proto) {
+ case ETH_P_ARP:
+ nic_type |= NIC_TYPE_ARP;
+ break;
+ case ETH_P_IP:
+ nic_type |= NIC_TYPE_F_IPV4;
+ ip = (struct iphdr *)network_data;
+
+ /* Check DHCPv4 */
+ if (ip->protocol == IPPROTO_UDP) {
+ struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct iphdr));
+ if (ntohs(udp->dest) == 67 || ntohs(udp->dest) == 68)
+ nic_type |= NIC_TYPE_F_DHCP;
+ }
+ break;
+ case ETH_P_IPV6:
+ nic_type |= NIC_TYPE_F_IPV6;
+ ipv6 = (struct ipv6hdr *)network_data;
+
+ if (ipv6->nexthdr == IPPROTO_ICMPV6) /* Check NDP request */ {
+ struct icmp6hdr *icmp6 = (struct icmp6hdr *)(network_data + sizeof(struct ipv6hdr));
+ if (/*icmp6->icmp6_type == NDISC_ROUTER_SOLICITATION || */
+ icmp6->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION)
+ nic_type |= NIC_TYPE_ICMPV6;
+ } else if (ipv6->nexthdr == IPPROTO_UDP) /* Check DHCPv6 */ {
+ struct udphdr *udp = (struct udphdr *)(network_data + sizeof(struct ipv6hdr));
+ if (ntohs(udp->dest) == 546 || ntohs(udp->dest) == 547)
+ nic_type |= NIC_TYPE_F_DHCP;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return nic_type;
+}
+
+static int gdm_lte_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct nic *nic = netdev_priv(dev);
+ u32 nic_type;
+ void *data_buf;
+ int data_len;
+ int idx;
+ int ret = 0;
+
+ nic_type = gdm_lte_tx_nic_type(dev, skb);
+ if (nic_type == 0) {
+ netdev_err(dev, "tx - invalid nic_type\n");
+ return -1;
+ }
+
+ if (nic_type & NIC_TYPE_ARP) {
+ if (gdm_lte_emulate_arp(skb, nic_type) == 0) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ if (nic_type & NIC_TYPE_ICMPV6) {
+ if (gdm_lte_emulate_ndp(skb, nic_type) == 0) {
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+
+ /*
+ Need byte shift (that is, remove VLAN tag) if there is one
+ For the case of ARP, this breaks the offset as vlan_ethhdr+4 is treated as ethhdr
+ However, it shouldn't be a problem as the response starts from arp_hdr and ethhdr
+ is created by this driver based on the NIC mac
+ */
+ if (nic_type & NIC_TYPE_F_VLAN) {
+ struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data;
+ nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK;
+ data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN);
+ data_len = skb->len - (VLAN_ETH_HLEN - ETH_HLEN);
+ } else {
+ nic->vlan_id = 0;
+ data_buf = skb->data;
+ data_len = skb->len;
+ }
+
+ /* If it is a ICMPV6 packet, clear all the other bits : for backward compatibility with the firmware */
+ if (nic_type & NIC_TYPE_ICMPV6)
+ nic_type = NIC_TYPE_ICMPV6;
+
+ /* If it is not a dhcp packet, clear all the flag bits : original NIC, otherwise the special flag (IPVX | DHCP) */
+ if (!(nic_type & NIC_TYPE_F_DHCP))
+ nic_type &= NIC_TYPE_MASK;
+
+ sscanf(dev->name, "lte%d", &idx);
+
+ ret = gdm_lte_sdu_send(nic,
+ data_buf,
+ data_len,
+ tx_complete,
+ nic,
+ idx,
+ nic_type);
+
+ if (ret == TX_NO_BUFFER || ret == TX_NO_SPC) {
+ netif_stop_queue(dev);
+ if (ret == TX_NO_BUFFER)
+ ret = 0;
+ else
+ ret = -ENOSPC;
+ } else if (ret == TX_NO_DEV) {
+ ret = -ENODEV;
+ }
+
+ /* Updates tx stats */
+ if (ret) {
+ nic->stats.tx_dropped++;
+ } else {
+ nic->stats.tx_packets++;
+ nic->stats.tx_bytes += data_len;
+ }
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static struct net_device_stats *gdm_lte_stats(struct net_device *dev)
+{
+ struct nic *nic = netdev_priv(dev);
+ return &nic->stats;
+}
+
+static int gdm_lte_event_send(struct net_device *dev, char *buf, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ int idx;
+
+ sscanf(dev->name, "lte%d", &idx);
+
+ return netlink_send(lte_event.sock, idx, 0, buf,
+ gdm_dev16_to_cpu(gdm_dev_endian(nic), hci->len) + HCI_HEADER_SIZE);
+}
+
+static void gdm_lte_event_rcv(struct net_device *dev, u16 type, void *msg, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+
+ gdm_lte_hci_send(nic, msg, len);
+}
+
+int gdm_lte_event_init(void)
+{
+ if (lte_event.ref_cnt == 0)
+ lte_event.sock = netlink_init(NETLINK_LTE, gdm_lte_event_rcv);
+
+ if (lte_event.sock) {
+ lte_event.ref_cnt++;
+ return 0;
+ }
+
+ pr_err("event init failed\n");
+ return -1;
+}
+
+void gdm_lte_event_exit(void)
+{
+ if (lte_event.sock && --lte_event.ref_cnt == 0) {
+ netlink_exit(lte_event.sock);
+ lte_event.sock = NULL;
+ }
+}
+
+static u8 find_dev_index(u32 nic_type)
+{
+ u8 index;
+
+ index = (u8)(nic_type & 0x0000000f);
+ if (index > MAX_NIC_TYPE)
+ index = 0;
+
+ return index;
+}
+
+static void gdm_lte_netif_rx(struct net_device *dev, char *buf, int len, int flagged_nic_type)
+{
+ u32 nic_type;
+ struct nic *nic;
+ struct sk_buff *skb;
+ struct ethhdr eth;
+ struct vlan_ethhdr vlan_eth;
+ void *mac_header_data;
+ u32 mac_header_len;
+ char ip_version = 0;
+
+ nic_type = flagged_nic_type & NIC_TYPE_MASK;
+ nic = netdev_priv(dev);
+
+ if (flagged_nic_type & NIC_TYPE_F_DHCP) {
+ /* Change the destination mac address with the one requested the IP */
+ if (flagged_nic_type & NIC_TYPE_F_IPV4) {
+ struct dhcp_packet {
+ u8 op; /* BOOTREQUEST or BOOTREPLY */
+ u8 htype; /* hardware address type. 1 = 10mb ethernet */
+ u8 hlen; /* hardware address length */
+ u8 hops; /* used by relay agents only */
+ u32 xid; /* unique id */
+ u16 secs; /* elapsed since client began acquisition/renewal */
+ u16 flags; /* only one flag so far: */
+ #define BROADCAST_FLAG 0x8000 /* "I need broadcast replies" */
+ u32 ciaddr; /* client IP (if client is in BOUND, RENEW or REBINDING state) */
+ u32 yiaddr; /* 'your' (client) IP address */
+ /* IP address of next server to use in bootstrap, returned in DHCPOFFER, DHCPACK by server */
+ u32 siaddr_nip;
+ u32 gateway_nip; /* relay agent IP address */
+ u8 chaddr[16]; /* link-layer client hardware address (MAC) */
+ u8 sname[64]; /* server host name (ASCIZ) */
+ u8 file[128]; /* boot file name (ASCIZ) */
+ u32 cookie; /* fixed first four option bytes (99,130,83,99 dec) */
+ } __packed;
+ void *addr = buf + sizeof(struct iphdr) + sizeof(struct udphdr) + offsetof(struct dhcp_packet, chaddr);
+ memcpy(nic->dest_mac_addr, addr, ETH_ALEN);
+ }
+ }
+
+ if (nic->vlan_id > 0) {
+ mac_header_data = (void *)&vlan_eth;
+ mac_header_len = VLAN_ETH_HLEN;
+ } else {
+ mac_header_data = (void *)&eth;
+ mac_header_len = ETH_HLEN;
+ }
+
+ /* Format the data so that it can be put to skb */
+ memcpy(mac_header_data, nic->dest_mac_addr, ETH_ALEN);
+ memcpy(mac_header_data + ETH_ALEN, nic->src_mac_addr, ETH_ALEN);
+
+ vlan_eth.h_vlan_TCI = htons(nic->vlan_id);
+ vlan_eth.h_vlan_proto = htons(ETH_P_8021Q);
+
+ if (nic_type == NIC_TYPE_ARP) {
+ /* Should be response: Only happens because there was a request from the host */
+ eth.h_proto = htons(ETH_P_ARP);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_ARP);
+ } else {
+ ip_version = buf[0] >> 4;
+ if (ip_version == IP_VERSION_4) {
+ eth.h_proto = htons(ETH_P_IP);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IP);
+ } else if (ip_version == IP_VERSION_6) {
+ eth.h_proto = htons(ETH_P_IPV6);
+ vlan_eth.h_vlan_encapsulated_proto = htons(ETH_P_IPV6);
+ } else {
+ netdev_err(dev, "Unknown IP version %d\n", ip_version);
+ return;
+ }
+ }
+
+ /* Alloc skb and reserve align */
+ skb = dev_alloc_skb(len + mac_header_len + NET_IP_ALIGN);
+ if (!skb)
+ return;
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ memcpy(skb_put(skb, mac_header_len), mac_header_data, mac_header_len);
+ memcpy(skb_put(skb, len), buf, len);
+
+ skb->protocol = ((struct ethhdr *)mac_header_data)->h_proto;
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, ETH_HLEN);
+
+ gdm_lte_rx(skb, nic, nic_type);
+}
+
+static void gdm_lte_multi_sdu_pkt(struct phy_dev *phy_dev, char *buf, int len)
+{
+ struct net_device *dev;
+ struct multi_sdu *multi_sdu = (struct multi_sdu *)buf;
+ struct sdu *sdu = NULL;
+ u8 *data = (u8 *)multi_sdu->data;
+ u16 i = 0;
+ u16 num_packet;
+ u16 hci_len;
+ u16 cmd_evt;
+ u32 nic_type;
+ u8 index;
+
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->len);
+ num_packet = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), multi_sdu->num_packet);
+
+ for (i = 0; i < num_packet; i++) {
+ sdu = (struct sdu *)data;
+
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->cmd_evt);
+ hci_len = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->len);
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+
+ if (cmd_evt != LTE_RX_SDU) {
+ pr_err("rx sdu wrong hci %04x\n", cmd_evt);
+ return;
+ }
+ if (hci_len < 12) {
+ pr_err("rx sdu invalid len %d\n", hci_len);
+ return;
+ }
+
+ index = find_dev_index(nic_type);
+ if (index < MAX_NIC_TYPE) {
+ dev = phy_dev->dev[index];
+ gdm_lte_netif_rx(dev, (char *)sdu->data, (int)(hci_len-12), nic_type);
+ } else {
+ pr_err("rx sdu invalid nic_type :%x\n", nic_type);
+ }
+
+ data += ((hci_len+3) & 0xfffc) + HCI_HEADER_SIZE;
+ }
+}
+
+static void gdm_lte_pdn_table(struct net_device *dev, char *buf, int len)
+{
+ struct nic *nic = netdev_priv(dev);
+ struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
+
+ if (pdn_table->activate) {
+ nic->pdn_table.activate = pdn_table->activate;
+ nic->pdn_table.dft_eps_id = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->dft_eps_id);
+ nic->pdn_table.nic_type = gdm_dev32_to_cpu(gdm_dev_endian(nic), pdn_table->nic_type);
+
+ netdev_info(dev, "pdn activated, nic_type=0x%x\n",
+ nic->pdn_table.nic_type);
+ } else {
+ memset(&nic->pdn_table, 0x00, sizeof(struct pdn_table));
+ netdev_info(dev, "pdn deactivated\n");
+ }
+}
+
+static int gdm_lte_receive_pkt(struct phy_dev *phy_dev, char *buf, int len)
+{
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ struct hci_pdn_table_ind *pdn_table = (struct hci_pdn_table_ind *)buf;
+ struct sdu *sdu;
+ struct net_device *dev;
+ int ret = 0;
+ u16 cmd_evt;
+ u32 nic_type;
+ u8 index;
+
+ if (!len)
+ return ret;
+
+ cmd_evt = gdm_dev16_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), hci->cmd_evt);
+
+ dev = phy_dev->dev[0];
+ if (dev == NULL)
+ return 0;
+
+ switch (cmd_evt) {
+ case LTE_RX_SDU:
+ sdu = (struct sdu *)hci->data;
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), sdu->nic_type);
+ index = find_dev_index(nic_type);
+ dev = phy_dev->dev[index];
+ gdm_lte_netif_rx(dev, hci->data, len, nic_type);
+ break;
+ case LTE_RX_MULTI_SDU:
+ gdm_lte_multi_sdu_pkt(phy_dev, buf, len);
+ break;
+ case LTE_LINK_ON_OFF_INDICATION:
+ netdev_info(dev, "link %s\n",
+ ((struct hci_connect_ind *)buf)->connect
+ ? "on" : "off");
+ break;
+ case LTE_PDN_TABLE_IND:
+ pdn_table = (struct hci_pdn_table_ind *)buf;
+ nic_type = gdm_dev32_to_cpu(phy_dev->get_endian(phy_dev->priv_dev), pdn_table->nic_type);
+ index = find_dev_index(nic_type);
+ dev = phy_dev->dev[index];
+ gdm_lte_pdn_table(dev, buf, len);
+ /* Fall through */
+ default:
+ ret = gdm_lte_event_send(dev, buf, len);
+ break;
+ }
+
+ return ret;
+}
+
+static int rx_complete(void *arg, void *data, int len, int context)
+{
+ struct phy_dev *phy_dev = (struct phy_dev *)arg;
+
+ return gdm_lte_receive_pkt(phy_dev, (char *)data, len);
+}
+
+void start_rx_proc(struct phy_dev *phy_dev)
+{
+ int i;
+
+ for (i = 0; i < MAX_RX_SUBMIT_COUNT; i++)
+ gdm_lte_rcv_with_cb(phy_dev, rx_complete, phy_dev, USB_COMPLETE);
+}
+
+static struct net_device_ops gdm_netdev_ops = {
+ .ndo_open = gdm_lte_open,
+ .ndo_stop = gdm_lte_close,
+ .ndo_set_config = gdm_lte_set_config,
+ .ndo_start_xmit = gdm_lte_tx,
+ .ndo_get_stats = gdm_lte_stats,
+};
+
+static u8 gdm_lte_macaddr[ETH_ALEN] = {0x00, 0x0a, 0x3b, 0x00, 0x00, 0x00};
+
+static void form_mac_address(u8 *dev_addr, u8 *nic_src, u8 *nic_dest, u8 *mac_address, u8 index)
+{
+ /* Form the dev_addr */
+ if (!mac_address)
+ memcpy(dev_addr, gdm_lte_macaddr, ETH_ALEN);
+ else
+ memcpy(dev_addr, mac_address, ETH_ALEN);
+
+ /* The last byte of the mac address should be less than or equal to 0xFC */
+ dev_addr[ETH_ALEN-1] += index;
+
+ /* Create random nic src and copy the first 3 bytes to be the same as dev_addr */
+ random_ether_addr(nic_src);
+ memcpy(nic_src, dev_addr, 3);
+
+ /* Copy the nic_dest from dev_addr*/
+ memcpy(nic_dest, dev_addr, ETH_ALEN);
+}
+
+static void validate_mac_address(u8 *mac_address)
+{
+ /* if zero address or multicast bit set, restore the default value */
+ if (is_zero_ether_addr(mac_address) || (mac_address[0] & 0x01)) {
+ pr_err("MAC invalid, restoring default\n");
+ memcpy(mac_address, gdm_lte_macaddr, 6);
+ }
+}
+
+int register_lte_device(struct phy_dev *phy_dev, struct device *dev, u8 *mac_address)
+{
+ struct nic *nic;
+ struct net_device *net;
+ char pdn_dev_name[16];
+ int ret = 0;
+ u8 index;
+
+ validate_mac_address(mac_address);
+
+ for (index = 0; index < MAX_NIC_TYPE; index++) {
+ /* Create device name lteXpdnX */
+ sprintf(pdn_dev_name, "lte%%dpdn%d", index);
+
+ /* Allocate netdev */
+ net = alloc_netdev(sizeof(struct nic), pdn_dev_name, ether_setup);
+ if (net == NULL) {
+ pr_err("alloc_netdev failed\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ net->netdev_ops = &gdm_netdev_ops;
+ net->flags &= ~IFF_MULTICAST;
+ net->mtu = DEFAULT_MTU_SIZE;
+
+ nic = netdev_priv(net);
+ memset(nic, 0, sizeof(struct nic));
+ nic->netdev = net;
+ nic->phy_dev = phy_dev;
+ nic->nic_id = index;
+
+ form_mac_address(
+ net->dev_addr,
+ nic->src_mac_addr,
+ nic->dest_mac_addr,
+ mac_address,
+ index);
+
+ SET_NETDEV_DEV(net, dev);
+ SET_NETDEV_DEVTYPE(net, &wwan_type);
+
+ ret = register_netdev(net);
+ if (ret)
+ goto err;
+
+ netif_carrier_on(net);
+
+ phy_dev->dev[index] = net;
+ }
+
+ return 0;
+
+err:
+ unregister_lte_device(phy_dev);
+
+ return ret;
+}
+
+void unregister_lte_device(struct phy_dev *phy_dev)
+{
+ struct net_device *net;
+ int index;
+
+ for (index = 0; index < MAX_NIC_TYPE; index++) {
+ net = phy_dev->dev[index];
+ if (net == NULL)
+ continue;
+
+ unregister_netdev(net);
+ free_netdev(net);
+ }
+}
diff --git a/drivers/staging/gdm724x/gdm_lte.h b/drivers/staging/gdm724x/gdm_lte.h
new file mode 100644
index 00000000000..9287d310d8e
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_lte.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_LTE_H_
+#define _GDM_LTE_H_
+
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+#include "gdm_endian.h"
+
+#define MAX_NIC_TYPE 4
+#define MAX_RX_SUBMIT_COUNT 3
+#define DRIVER_VERSION "3.7.17.0"
+
+enum TX_ERROR_CODE {
+ TX_NO_ERROR = 0,
+ TX_NO_DEV,
+ TX_NO_SPC,
+ TX_NO_BUFFER,
+};
+
+enum CALLBACK_CONTEXT {
+ KERNEL_THREAD = 0,
+ USB_COMPLETE,
+};
+
+struct pdn_table {
+ u8 activate;
+ u32 dft_eps_id;
+ u32 nic_type;
+} __packed;
+
+struct nic;
+
+struct phy_dev {
+ void *priv_dev;
+ struct net_device *dev[MAX_NIC_TYPE];
+ int (*send_hci_func)(void *priv_dev, void *data, int len,
+ void (*cb)(void *cb_data), void *cb_data);
+ int (*send_sdu_func)(void *priv_dev, void *data, int len,
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *cb_data), void *cb_data,
+ int dev_idx, int nic_type);
+ int (*rcv_func)(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len,
+ int context),
+ void *cb_data, int context);
+ struct gdm_endian *(*get_endian)(void *priv_dev);
+};
+
+struct nic {
+ struct net_device *netdev;
+ struct phy_dev *phy_dev;
+ struct net_device_stats stats;
+ struct pdn_table pdn_table;
+ u8 dest_mac_addr[ETH_ALEN];
+ u8 src_mac_addr[ETH_ALEN];
+ u32 nic_id;
+ u16 vlan_id;
+};
+
+int gdm_lte_event_init(void);
+void gdm_lte_event_exit(void);
+
+void start_rx_proc(struct phy_dev *phy_dev);
+int register_lte_device(struct phy_dev *phy_dev, struct device *dev,
+ u8 *mac_address);
+void unregister_lte_device(struct phy_dev *phy_dev);
+
+#endif /* _GDM_LTE_H_ */
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
new file mode 100644
index 00000000000..5b1ef4000d0
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -0,0 +1,690 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/slab.h>
+#include <linux/usb/cdc.h>
+
+#include "gdm_mux.h"
+
+struct workqueue_struct *mux_rx_wq;
+
+static u16 packet_type[TTY_MAX_COUNT] = {0xF011, 0xF010};
+
+#define USB_DEVICE_CDC_DATA(vid, pid) \
+ .match_flags = \
+ USB_DEVICE_ID_MATCH_DEVICE |\
+ USB_DEVICE_ID_MATCH_INT_CLASS |\
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceClass = USB_CLASS_COMM,\
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ACM
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_CDC_DATA(0x1076, 0x8000) }, /* GCT GDM7240 */
+ { USB_DEVICE_CDC_DATA(0x1076, 0x8f00) }, /* GCT GDM7243 */
+ { USB_DEVICE_CDC_DATA(0x1076, 0x9000) }, /* GCT GDM7243 */
+ { USB_DEVICE_CDC_DATA(0x1d74, 0x2300) }, /* LGIT Phoenix */
+ {}
+};
+
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+int packet_type_to_index(u16 packetType)
+{
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ if (packet_type[i] == packetType)
+ return i;
+ }
+
+ return -1;
+}
+
+static struct mux_tx *alloc_mux_tx(int len)
+{
+ struct mux_tx *t = NULL;
+
+ t = kzalloc(sizeof(struct mux_tx), GFP_ATOMIC);
+ if (!t)
+ return NULL;
+
+ t->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ t->buf = kmalloc(MUX_TX_MAX_SIZE, GFP_ATOMIC);
+ if (!t->urb || !t->buf) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ return NULL;
+ }
+
+ return t;
+}
+
+static void free_mux_tx(struct mux_tx *t)
+{
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+}
+
+static struct mux_rx *alloc_mux_rx(void)
+{
+ struct mux_rx *r = NULL;
+
+ r = kzalloc(sizeof(struct mux_rx), GFP_ATOMIC);
+ if (!r)
+ return NULL;
+
+ r->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ r->buf = kmalloc(MUX_RX_MAX_SIZE, GFP_ATOMIC);
+ if (!r->urb || !r->buf) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ return NULL;
+ }
+
+ return r;
+}
+
+static void free_mux_rx(struct mux_rx *r)
+{
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+}
+
+static struct mux_rx *get_rx_struct(struct rx_cxt *rx)
+{
+ struct mux_rx *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+
+ if (list_empty(&rx->rx_free_list)) {
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+ return NULL;
+ }
+
+ r = list_entry(rx->rx_free_list.prev, struct mux_rx, free_list);
+ list_del(&r->free_list);
+
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+
+ return r;
+}
+
+static void put_rx_struct(struct rx_cxt *rx, struct mux_rx *r)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+ list_add_tail(&r->free_list, &rx->rx_free_list);
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+}
+
+
+static int up_to_host(struct mux_rx *r)
+{
+ struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
+ struct mux_pkt_header *mux_header;
+ unsigned int start_flag;
+ unsigned int payload_size;
+ unsigned short packet_type;
+ int remain;
+ int dummy_cnt;
+ u32 packet_size_sum = r->offset;
+ int index;
+ int ret = TO_HOST_INVALID_PACKET;
+ int len = r->len;
+
+ while (1) {
+ mux_header = (struct mux_pkt_header *)(r->buf + packet_size_sum);
+ start_flag = __le32_to_cpu(mux_header->start_flag);
+ payload_size = __le32_to_cpu(mux_header->payload_size);
+ packet_type = __le16_to_cpu(mux_header->packet_type);
+
+ if (start_flag != START_FLAG) {
+ pr_err("invalid START_FLAG %x\n", start_flag);
+ break;
+ }
+
+ remain = (MUX_HEADER_SIZE + payload_size) % 4;
+ dummy_cnt = remain ? (4-remain) : 0;
+
+ if (len - packet_size_sum <
+ MUX_HEADER_SIZE + payload_size + dummy_cnt) {
+ pr_err("invalid payload : %d %d %04x\n",
+ payload_size, len, packet_type);
+ break;
+ }
+
+ index = packet_type_to_index(packet_type);
+ if (index < 0) {
+ pr_err("invalid index %d\n", index);
+ break;
+ }
+
+ ret = r->callback(mux_header->data,
+ payload_size,
+ index,
+ mux_dev->tty_dev,
+ RECV_PACKET_PROCESS_CONTINUE
+ );
+ if (ret == TO_HOST_BUFFER_REQUEST_FAIL) {
+ r->offset += packet_size_sum;
+ break;
+ }
+
+ packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
+ if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
+ ret = r->callback(NULL,
+ 0,
+ index,
+ mux_dev->tty_dev,
+ RECV_PACKET_PROCESS_COMPLETE
+ );
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void do_rx(struct work_struct *work)
+{
+ struct mux_dev *mux_dev =
+ container_of(work, struct mux_dev , work_rx.work);
+ struct mux_rx *r;
+ struct rx_cxt *rx = (struct rx_cxt *)&mux_dev->rx;
+ unsigned long flags;
+ int ret = 0;
+
+ while (1) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ if (list_empty(&rx->to_host_list)) {
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ break;
+ }
+ r = list_entry(rx->to_host_list.next, struct mux_rx, to_host_list);
+ list_del(&r->to_host_list);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+
+ ret = up_to_host(r);
+ if (ret == TO_HOST_BUFFER_REQUEST_FAIL)
+ pr_err("failed to send mux data to host\n");
+ else
+ put_rx_struct(rx, r);
+ }
+}
+
+static void remove_rx_submit_list(struct mux_rx *r, struct rx_cxt *rx)
+{
+ unsigned long flags;
+ struct mux_rx *r_remove, *r_remove_next;
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list) {
+ if (r == r_remove)
+ list_del(&r->rx_submit_list);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+}
+
+static void gdm_mux_rcv_complete(struct urb *urb)
+{
+ struct mux_rx *r = urb->context;
+ struct mux_dev *mux_dev = (struct mux_dev *)r->mux_dev;
+ struct rx_cxt *rx = &mux_dev->rx;
+ unsigned long flags;
+
+ remove_rx_submit_list(r, rx);
+
+ if (urb->status) {
+ if (mux_dev->usb_state == PM_NORMAL)
+ pr_err("%s: urb status error %d\n",
+ __func__, urb->status);
+ put_rx_struct(rx, r);
+ } else {
+ r->len = r->urb->actual_length;
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_add_tail(&r->to_host_list, &rx->to_host_list);
+ queue_work(mux_rx_wq, &mux_dev->work_rx.work);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ }
+}
+
+static int gdm_mux_recv(void *priv_dev,
+ int (*cb)(void *data, int len, int tty_index, struct tty_dev *tty_dev, int complete)
+ )
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ struct mux_rx *r;
+ struct rx_cxt *rx = &mux_dev->rx;
+ unsigned long flags;
+ int ret;
+
+ if (!usbdev) {
+ pr_err("device is disconnected\n");
+ return -ENODEV;
+ }
+
+ r = get_rx_struct(rx);
+ if (!r) {
+ pr_err("get_rx_struct fail\n");
+ return -ENOMEM;
+ }
+
+ r->offset = 0;
+ r->mux_dev = (void *)mux_dev;
+ r->callback = cb;
+ mux_dev->rx_cb = cb;
+
+ usb_fill_bulk_urb(r->urb,
+ usbdev,
+ usb_rcvbulkpipe(usbdev, 0x86),
+ r->buf,
+ MUX_RX_MAX_SIZE,
+ gdm_mux_rcv_complete,
+ r);
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ ret = usb_submit_urb(r->urb, GFP_KERNEL);
+
+ if (ret) {
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_del(&r->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ put_rx_struct(rx, r);
+
+ pr_err("usb_submit_urb ret=%d\n", ret);
+ }
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static void gdm_mux_send_complete(struct urb *urb)
+{
+ struct mux_tx *t = urb->context;
+
+ if (urb->status == -ECONNRESET) {
+ pr_info("CONNRESET\n");
+ free_mux_tx(t);
+ return;
+ }
+
+ if (t->callback)
+ t->callback(t->cb_data);
+
+ free_mux_tx(t);
+}
+
+static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
+ void (*cb)(void *data), void *cb_data)
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ struct mux_pkt_header *mux_header;
+ struct mux_tx *t = NULL;
+ static u32 seq_num = 1;
+ int remain;
+ int dummy_cnt;
+ int total_len;
+ int ret;
+ unsigned long flags;
+
+ if (mux_dev->usb_state == PM_SUSPEND) {
+ ret = usb_autopm_get_interface(mux_dev->intf);
+ if (!ret)
+ usb_autopm_put_interface(mux_dev->intf);
+ }
+
+ spin_lock_irqsave(&mux_dev->write_lock, flags);
+
+ remain = (MUX_HEADER_SIZE + len) % 4;
+ dummy_cnt = remain ? (4 - remain) : 0;
+
+ total_len = len + MUX_HEADER_SIZE + dummy_cnt;
+
+ t = alloc_mux_tx(total_len);
+ if (!t) {
+ pr_err("alloc_mux_tx fail\n");
+ spin_unlock_irqrestore(&mux_dev->write_lock, flags);
+ return -ENOMEM;
+ }
+
+ mux_header = (struct mux_pkt_header *)t->buf;
+ mux_header->start_flag = __cpu_to_le32(START_FLAG);
+ mux_header->seq_num = __cpu_to_le32(seq_num++);
+ mux_header->payload_size = __cpu_to_le32((u32)len);
+ mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
+
+ memcpy(t->buf+MUX_HEADER_SIZE, data, len);
+ memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
+
+ t->len = total_len;
+ t->callback = cb;
+ t->cb_data = cb_data;
+
+ usb_fill_bulk_urb(t->urb,
+ usbdev,
+ usb_sndbulkpipe(usbdev, 5),
+ t->buf,
+ total_len,
+ gdm_mux_send_complete,
+ t);
+
+ ret = usb_submit_urb(t->urb, GFP_ATOMIC);
+
+ spin_unlock_irqrestore(&mux_dev->write_lock, flags);
+
+ if (ret)
+ pr_err("usb_submit_urb Error: %d\n", ret);
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static int gdm_mux_send_control(void *priv_dev, int request, int value, void *buf, int len)
+{
+ struct mux_dev *mux_dev = priv_dev;
+ struct usb_device *usbdev = mux_dev->usbdev;
+ int ret;
+
+ ret = usb_control_msg(usbdev,
+ usb_sndctrlpipe(usbdev, 0),
+ request,
+ USB_RT_ACM,
+ value,
+ 2,
+ buf,
+ len,
+ 5000
+ );
+
+ if (ret < 0)
+ pr_err("usb_control_msg error: %d\n", ret);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void release_usb(struct mux_dev *mux_dev)
+{
+ struct rx_cxt *rx = &mux_dev->rx;
+ struct mux_rx *r, *r_next;
+ unsigned long flags;
+
+ cancel_delayed_work(&mux_dev->work_rx);
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ spin_lock_irqsave(&rx->free_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_free_list, free_list) {
+ list_del(&r->free_list);
+ free_mux_rx(r);
+ }
+ spin_unlock_irqrestore(&rx->free_list_lock, flags);
+
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list) {
+ if (r->mux_dev == (void *)mux_dev) {
+ list_del(&r->to_host_list);
+ free_mux_rx(r);
+ }
+ }
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+}
+
+
+static int init_usb(struct mux_dev *mux_dev)
+{
+ struct mux_rx *r;
+ struct rx_cxt *rx = &mux_dev->rx;
+ int ret = 0;
+ int i;
+
+ spin_lock_init(&mux_dev->write_lock);
+ INIT_LIST_HEAD(&rx->to_host_list);
+ INIT_LIST_HEAD(&rx->rx_submit_list);
+ INIT_LIST_HEAD(&rx->rx_free_list);
+ spin_lock_init(&rx->to_host_lock);
+ spin_lock_init(&rx->submit_list_lock);
+ spin_lock_init(&rx->free_list_lock);
+
+ for (i = 0; i < MAX_ISSUE_NUM * 2; i++) {
+ r = alloc_mux_rx();
+ if (r == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ list_add(&r->free_list, &rx->rx_free_list);
+ }
+
+ INIT_DELAYED_WORK(&mux_dev->work_rx, do_rx);
+
+ return ret;
+}
+
+static int gdm_mux_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ struct mux_dev *mux_dev;
+ struct tty_dev *tty_dev;
+ u16 idVendor, idProduct;
+ int bInterfaceNumber;
+ int ret;
+ int i;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+ bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
+
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ pr_info("mux vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
+
+ if (bInterfaceNumber != 2)
+ return -ENODEV;
+
+ mux_dev = kzalloc(sizeof(struct mux_dev), GFP_KERNEL);
+ if (!mux_dev)
+ return -ENOMEM;
+
+ tty_dev = kzalloc(sizeof(struct tty_dev), GFP_KERNEL);
+ if (!tty_dev) {
+ ret = -ENOMEM;
+ goto err_free_mux;
+ }
+
+ mux_dev->usbdev = usbdev;
+ mux_dev->control_intf = intf;
+
+ ret = init_usb(mux_dev);
+ if (ret)
+ goto err_free_tty;
+
+ tty_dev->priv_dev = (void *)mux_dev;
+ tty_dev->send_func = gdm_mux_send;
+ tty_dev->recv_func = gdm_mux_recv;
+ tty_dev->send_control = gdm_mux_send_control;
+
+ ret = register_lte_tty_device(tty_dev, &intf->dev);
+ if (ret)
+ goto err_unregister_tty;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++)
+ mux_dev->tty_dev = tty_dev;
+
+ mux_dev->intf = intf;
+ mux_dev->usb_state = PM_NORMAL;
+
+ usb_get_dev(usbdev);
+ usb_set_intfdata(intf, tty_dev);
+
+ return 0;
+
+err_unregister_tty:
+ unregister_lte_tty_device(tty_dev);
+ release_usb(mux_dev);
+err_free_tty:
+ kfree(tty_dev);
+err_free_mux:
+ kfree(mux_dev);
+
+ return ret;
+}
+
+static void gdm_mux_disconnect(struct usb_interface *intf)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ tty_dev = usb_get_intfdata(intf);
+
+ mux_dev = tty_dev->priv_dev;
+
+ release_usb(mux_dev);
+ unregister_lte_tty_device(tty_dev);
+
+ kfree(mux_dev);
+ kfree(tty_dev);
+
+ usb_put_dev(usbdev);
+}
+
+static int gdm_mux_suspend(struct usb_interface *intf, pm_message_t pm_msg)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ struct rx_cxt *rx;
+ struct mux_rx *r, *r_next;
+ unsigned long flags;
+
+ tty_dev = usb_get_intfdata(intf);
+ mux_dev = tty_dev->priv_dev;
+ rx = &mux_dev->rx;
+
+ if (mux_dev->usb_state != PM_NORMAL) {
+ pr_err("usb suspend - invalid state\n");
+ return -1;
+ }
+
+ mux_dev->usb_state = PM_SUSPEND;
+
+
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list) {
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_list_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_list_lock, flags);
+
+ return 0;
+}
+
+static int gdm_mux_resume(struct usb_interface *intf)
+{
+ struct tty_dev *tty_dev;
+ struct mux_dev *mux_dev;
+ u8 i;
+
+ tty_dev = usb_get_intfdata(intf);
+ mux_dev = tty_dev->priv_dev;
+
+ if (mux_dev->usb_state != PM_SUSPEND) {
+ pr_err("usb resume - invalid state\n");
+ return -1;
+ }
+
+ mux_dev->usb_state = PM_NORMAL;
+
+ for (i = 0; i < MAX_ISSUE_NUM; i++)
+ gdm_mux_recv(mux_dev, mux_dev->rx_cb);
+
+ return 0;
+}
+
+static struct usb_driver gdm_mux_driver = {
+ .name = "gdm_mux",
+ .probe = gdm_mux_probe,
+ .disconnect = gdm_mux_disconnect,
+ .id_table = id_table,
+ .supports_autosuspend = 1,
+ .suspend = gdm_mux_suspend,
+ .resume = gdm_mux_resume,
+ .reset_resume = gdm_mux_resume,
+};
+
+static int __init gdm_usb_mux_init(void)
+{
+
+ mux_rx_wq = create_workqueue("mux_rx_wq");
+ if (mux_rx_wq == NULL) {
+ pr_err("work queue create fail\n");
+ return -1;
+ }
+
+ register_lte_tty_driver();
+
+ return usb_register(&gdm_mux_driver);
+}
+
+static void __exit gdm_usb_mux_exit(void)
+{
+ unregister_lte_tty_driver();
+
+ if (mux_rx_wq) {
+ flush_workqueue(mux_rx_wq);
+ destroy_workqueue(mux_rx_wq);
+ }
+
+ usb_deregister(&gdm_mux_driver);
+}
+
+module_init(gdm_usb_mux_init);
+module_exit(gdm_usb_mux_exit);
+
+MODULE_DESCRIPTION("GCT LTE TTY Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_mux.h b/drivers/staging/gdm724x/gdm_mux.h
new file mode 100644
index 00000000000..0163b243d3e
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_mux.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_MUX_H_
+#define _GDM_MUX_H_
+
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+
+#include "gdm_tty.h"
+
+#define PM_NORMAL 0
+#define PM_SUSPEND 1
+
+#define USB_RT_ACM (USB_TYPE_CLASS | USB_RECIP_INTERFACE)
+
+#define START_FLAG 0xA512485A
+#define MUX_HEADER_SIZE 14
+#define MUX_TX_MAX_SIZE (1024*10)
+#define MUX_RX_MAX_SIZE (1024*30)
+#define AT_PKT_TYPE 0xF011
+#define DM_PKT_TYPE 0xF010
+
+#define RETRY_TIMER 30 /* msec */
+
+struct mux_pkt_header {
+ unsigned int start_flag;
+ unsigned int seq_num;
+ unsigned int payload_size;
+ unsigned short packet_type;
+ unsigned char data[0];
+};
+
+struct mux_tx {
+ struct urb *urb;
+ u8 *buf;
+ int len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+};
+
+struct mux_rx {
+ struct list_head free_list;
+ struct list_head rx_submit_list;
+ struct list_head to_host_list;
+ struct urb *urb;
+ u8 *buf;
+ void *mux_dev;
+ u32 offset;
+ u32 len;
+ int (*callback)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete);
+};
+
+struct rx_cxt {
+ struct list_head to_host_list;
+ struct list_head rx_submit_list;
+ struct list_head rx_free_list;
+ spinlock_t to_host_lock;
+ spinlock_t submit_list_lock;
+ spinlock_t free_list_lock;
+};
+
+struct mux_dev {
+ struct usb_device *usbdev;
+ struct usb_interface *control_intf;
+ struct usb_interface *data_intf;
+ struct rx_cxt rx;
+ struct delayed_work work_rx;
+ struct usb_interface *intf;
+ int usb_state;
+ int (*rx_cb)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete);
+ spinlock_t write_lock;
+ struct tty_dev *tty_dev;
+};
+
+#endif /* _GDM_MUX_H_ */
diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
new file mode 100644
index 00000000000..0247a2055e8
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_tty.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb/cdc.h>
+#include <linux/serial.h>
+#include "gdm_tty.h"
+
+#define GDM_TTY_MAJOR 0
+#define GDM_TTY_MINOR 32
+
+#define ACM_CTRL_DTR 0x01
+#define ACM_CTRL_RTS 0x02
+#define ACM_CTRL_DSR 0x02
+#define ACM_CTRL_RI 0x08
+#define ACM_CTRL_DCD 0x01
+
+#define WRITE_SIZE 2048
+
+#define MUX_TX_MAX_SIZE 2048
+
+#define gdm_tty_send(n, d, l, i, c, b) (\
+ n->tty_dev->send_func(n->tty_dev->priv_dev, d, l, i, c, b))
+#define gdm_tty_recv(n, c) (\
+ n->tty_dev->recv_func(n->tty_dev->priv_dev, c))
+#define gdm_tty_send_control(n, r, v, d, l) (\
+ n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
+
+#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
+
+static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
+static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
+static DEFINE_MUTEX(gdm_table_lock);
+
+static char *DRIVER_STRING[TTY_MAX_COUNT] = {"GCTATC", "GCTDM"};
+static char *DEVICE_STRING[TTY_MAX_COUNT] = {"GCT-ATC", "GCT-DM"};
+
+static void gdm_port_destruct(struct tty_port *port)
+{
+ struct gdm *gdm = container_of(port, struct gdm, port);
+
+ mutex_lock(&gdm_table_lock);
+ gdm_table[gdm->index][gdm->minor] = NULL;
+ mutex_unlock(&gdm_table_lock);
+
+ kfree(gdm);
+}
+
+static struct tty_port_operations gdm_port_ops = {
+ .destruct = gdm_port_destruct,
+};
+
+static int gdm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct gdm *gdm = NULL;
+ int ret;
+ int i;
+ int j;
+
+ j = GDM_TTY_MINOR;
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ if (!strcmp(tty->driver->driver_name, DRIVER_STRING[i])) {
+ j = tty->index;
+ break;
+ }
+ }
+
+ if (j == GDM_TTY_MINOR)
+ return -ENODEV;
+
+ mutex_lock(&gdm_table_lock);
+ gdm = gdm_table[i][j];
+ if (gdm == NULL) {
+ mutex_unlock(&gdm_table_lock);
+ return -ENODEV;
+ }
+
+ tty_port_get(&gdm->port);
+
+ ret = tty_standard_install(driver, tty);
+ if (ret) {
+ tty_port_put(&gdm->port);
+ mutex_unlock(&gdm_table_lock);
+ return ret;
+ }
+
+ tty->driver_data = gdm;
+ mutex_unlock(&gdm_table_lock);
+
+ return 0;
+}
+
+static int gdm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct gdm *gdm = tty->driver_data;
+ return tty_port_open(&gdm->port, tty, filp);
+}
+
+static void gdm_tty_cleanup(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_put(&gdm->port);
+}
+
+static void gdm_tty_hangup(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_hangup(&gdm->port);
+}
+
+static void gdm_tty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct gdm *gdm = tty->driver_data;
+ tty_port_close(&gdm->port, tty, filp);
+}
+
+static int gdm_tty_recv_complete(void *data,
+ int len,
+ int index,
+ struct tty_dev *tty_dev,
+ int complete)
+{
+ struct gdm *gdm = tty_dev->gdm[index];
+ if (!GDM_TTY_READY(gdm)) {
+ if (complete == RECV_PACKET_PROCESS_COMPLETE)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+ return TO_HOST_PORT_CLOSE;
+ }
+
+ if (data && len) {
+ if (tty_buffer_request_room(&gdm->port, len) == len) {
+ tty_insert_flip_string(&gdm->port, data, len);
+ tty_flip_buffer_push(&gdm->port);
+ } else {
+ return TO_HOST_BUFFER_REQUEST_FAIL;
+ }
+ }
+
+ if (complete == RECV_PACKET_PROCESS_COMPLETE)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+
+ return 0;
+}
+
+static void gdm_tty_send_complete(void *arg)
+{
+ struct gdm *gdm = (struct gdm *)arg;
+
+ if (!GDM_TTY_READY(gdm))
+ return;
+
+ tty_port_tty_wakeup(&gdm->port);
+}
+
+static int gdm_tty_write(struct tty_struct *tty, const unsigned char *buf, int len)
+{
+ struct gdm *gdm = tty->driver_data;
+ int remain = len;
+ int sent_len = 0;
+ int sending_len = 0;
+
+ if (!GDM_TTY_READY(gdm))
+ return -ENODEV;
+
+ if (!len)
+ return 0;
+
+ while (1) {
+ sending_len = remain > MUX_TX_MAX_SIZE ? MUX_TX_MAX_SIZE : remain;
+ gdm_tty_send(gdm,
+ (void *)(buf+sent_len),
+ sending_len,
+ gdm->index,
+ gdm_tty_send_complete,
+ gdm
+ );
+ sent_len += sending_len;
+ remain -= sending_len;
+ if (remain <= 0)
+ break;
+ }
+
+ return len;
+}
+
+static int gdm_tty_write_room(struct tty_struct *tty)
+{
+ struct gdm *gdm = tty->driver_data;
+
+ if (!GDM_TTY_READY(gdm))
+ return -ENODEV;
+
+ return WRITE_SIZE;
+}
+
+int register_lte_tty_device(struct tty_dev *tty_dev, struct device *device)
+{
+ struct gdm *gdm;
+ int i;
+ int j;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+
+ gdm = kmalloc(sizeof(struct gdm), GFP_KERNEL);
+ if (!gdm)
+ return -ENOMEM;
+
+ mutex_lock(&gdm_table_lock);
+ for (j = 0; j < GDM_TTY_MINOR; j++) {
+ if (!gdm_table[i][j])
+ break;
+ }
+
+ if (j == GDM_TTY_MINOR) {
+ kfree(gdm);
+ mutex_unlock(&gdm_table_lock);
+ return -EINVAL;
+ }
+
+ gdm_table[i][j] = gdm;
+ mutex_unlock(&gdm_table_lock);
+
+ tty_dev->gdm[i] = gdm;
+ tty_port_init(&gdm->port);
+
+ gdm->port.ops = &gdm_port_ops;
+ gdm->index = i;
+ gdm->minor = j;
+ gdm->tty_dev = tty_dev;
+
+ tty_port_register_device(&gdm->port, gdm_driver[i], gdm->minor, device);
+ }
+
+ for (i = 0; i < MAX_ISSUE_NUM; i++)
+ gdm_tty_recv(gdm, gdm_tty_recv_complete);
+
+ return 0;
+}
+
+void unregister_lte_tty_device(struct tty_dev *tty_dev)
+{
+ struct gdm *gdm;
+ struct tty_struct *tty;
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ gdm = tty_dev->gdm[i];
+ if (!gdm)
+ continue;
+
+ mutex_lock(&gdm_table_lock);
+ gdm_table[gdm->index][gdm->minor] = NULL;
+ mutex_unlock(&gdm_table_lock);
+
+ tty = tty_port_tty_get(&gdm->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ tty_unregister_device(gdm_driver[i], gdm->minor);
+ tty_port_put(&gdm->port);
+ }
+}
+
+static const struct tty_operations gdm_tty_ops = {
+ .install = gdm_tty_install,
+ .open = gdm_tty_open,
+ .close = gdm_tty_close,
+ .cleanup = gdm_tty_cleanup,
+ .hangup = gdm_tty_hangup,
+ .write = gdm_tty_write,
+ .write_room = gdm_tty_write_room,
+};
+
+int register_lte_tty_driver(void)
+{
+ struct tty_driver *tty_driver;
+ int i;
+ int ret;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ tty_driver = alloc_tty_driver(GDM_TTY_MINOR);
+ if (!tty_driver)
+ return -ENOMEM;
+
+ tty_driver->owner = THIS_MODULE;
+ tty_driver->driver_name = DRIVER_STRING[i];
+ tty_driver->name = DEVICE_STRING[i];
+ tty_driver->major = GDM_TTY_MAJOR;
+ tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ tty_driver->subtype = SERIAL_TYPE_NORMAL;
+ tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ tty_driver->init_termios = tty_std_termios;
+ tty_driver->init_termios.c_cflag = B9600 | CS8 | HUPCL | CLOCAL;
+ tty_driver->init_termios.c_lflag = ISIG | ICANON | IEXTEN;
+ tty_set_operations(tty_driver, &gdm_tty_ops);
+
+ ret = tty_register_driver(tty_driver);
+ if (ret) {
+ put_tty_driver(tty_driver);
+ return ret;
+ }
+
+ gdm_driver[i] = tty_driver;
+ }
+
+ return ret;
+}
+
+void unregister_lte_tty_driver(void)
+{
+ struct tty_driver *tty_driver;
+ int i;
+
+ for (i = 0; i < TTY_MAX_COUNT; i++) {
+ tty_driver = gdm_driver[i];
+ if (tty_driver) {
+ tty_unregister_driver(tty_driver);
+ put_tty_driver(tty_driver);
+ }
+ }
+}
+
diff --git a/drivers/staging/gdm724x/gdm_tty.h b/drivers/staging/gdm724x/gdm_tty.h
new file mode 100644
index 00000000000..297438b4ddc
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_tty.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_TTY_H_
+#define _GDM_TTY_H_
+
+#include <linux/types.h>
+#include <linux/tty.h>
+
+
+#define TTY_MAX_COUNT 2
+
+#define MAX_ISSUE_NUM 3
+
+enum TO_HOST_RESULT {
+ TO_HOST_BUFFER_REQUEST_FAIL = 1,
+ TO_HOST_PORT_CLOSE = 2,
+ TO_HOST_INVALID_PACKET = 3,
+};
+
+enum RECV_PACKET_PROCESS {
+ RECV_PACKET_PROCESS_COMPLETE = 0,
+ RECV_PACKET_PROCESS_CONTINUE = 1,
+};
+
+struct gdm {
+ struct tty_dev *tty_dev;
+ struct tty_port port;
+ unsigned int index;
+ unsigned int minor;
+};
+
+struct tty_dev {
+ void *priv_dev;
+ int (*send_func)(void *priv_dev,
+ void *data,
+ int len,
+ int tty_index,
+ void (*cb)(void *cb_data),
+ void *cb_data);
+ int (*recv_func)(void *priv_dev,
+ int (*cb)(void *data,
+ int len,
+ int tty_index,
+ struct tty_dev *tty_dev,
+ int complete));
+ int (*send_control)(void *priv_dev,
+ int request,
+ int value,
+ void *data,
+ int len);
+ struct gdm *gdm[2];
+};
+
+int register_lte_tty_driver(void);
+void unregister_lte_tty_driver(void);
+int register_lte_tty_device(struct tty_dev *tty_dev, struct device *dev);
+void unregister_lte_tty_device(struct tty_dev *tty_dev);
+
+#endif /* _GDM_USB_H_ */
+
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
new file mode 100644
index 00000000000..bdc96370e43
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -0,0 +1,1049 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/usb.h>
+#include <linux/sched.h>
+#include <linux/kthread.h>
+#include <linux/usb/cdc.h>
+#include <linux/wait.h>
+#include <linux/if_ether.h>
+#include <linux/pm_runtime.h>
+
+#include "gdm_usb.h"
+#include "gdm_lte.h"
+#include "hci.h"
+#include "hci_packet.h"
+#include "gdm_endian.h"
+
+#define USB_DEVICE_CDC_DATA(vid, pid) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_CLASS | USB_DEVICE_ID_MATCH_INT_SUBCLASS,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceClass = USB_CLASS_COMM,\
+ .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET
+
+#define USB_DEVICE_MASS_DATA(vid, pid) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO,\
+ .idVendor = vid,\
+ .idProduct = pid,\
+ .bInterfaceSubClass = USB_SC_SCSI, \
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,\
+ .bInterfaceProtocol = USB_PR_BULK
+
+static const struct usb_device_id id_table[] = {
+ { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7240) }, /* GCT GDM7240 */
+ { USB_DEVICE_CDC_DATA(VID_GCT, PID_GDM7243) }, /* GCT GDM7243 */
+ { }
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct workqueue_struct *usb_tx_wq;
+static struct workqueue_struct *usb_rx_wq;
+
+static void do_tx(struct work_struct *work);
+static void do_rx(struct work_struct *work);
+
+static int gdm_usb_recv(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len, int context),
+ void *cb_data,
+ int context);
+
+static int request_mac_address(struct lte_udev *udev)
+{
+ u8 buf[16] = {0,};
+ struct hci_packet *hci = (struct hci_packet *)buf;
+ struct usb_device *usbdev = udev->usbdev;
+ int actual;
+ int ret = -1;
+
+ hci->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_GET_INFORMATION);
+ hci->len = gdm_cpu_to_dev16(&udev->gdm_ed, 1);
+ hci->data[0] = MAC_ADDRESS;
+
+ ret = usb_bulk_msg(usbdev, usb_sndbulkpipe(usbdev, 2), buf, 5,
+ &actual, 1000);
+
+ udev->request_mac_addr = 1;
+
+ return ret;
+}
+
+static struct usb_tx *alloc_tx_struct(int len)
+{
+ struct usb_tx *t = NULL;
+ int ret = 0;
+
+ t = kmalloc(sizeof(struct usb_tx), GFP_ATOMIC);
+ if (!t) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(t, 0, sizeof(struct usb_tx));
+
+ t->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!(len % 512))
+ len++;
+
+ t->buf = kmalloc(len, GFP_ATOMIC);
+ if (!t->urb || !t->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+out:
+ if (ret < 0) {
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+ return NULL;
+ }
+
+ return t;
+}
+
+static struct usb_tx_sdu *alloc_tx_sdu_struct(void)
+{
+ struct usb_tx_sdu *t_sdu = NULL;
+ int ret = 0;
+
+
+ t_sdu = kmalloc(sizeof(struct usb_tx_sdu), GFP_ATOMIC);
+ if (!t_sdu) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ memset(t_sdu, 0, sizeof(struct usb_tx_sdu));
+
+ t_sdu->buf = kmalloc(SDU_BUF_SIZE, GFP_ATOMIC);
+ if (!t_sdu->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+
+ if (ret < 0) {
+ if (t_sdu) {
+ kfree(t_sdu->buf);
+ kfree(t_sdu);
+ }
+ return NULL;
+ }
+
+ return t_sdu;
+}
+
+static void free_tx_struct(struct usb_tx *t)
+{
+ if (t) {
+ usb_free_urb(t->urb);
+ kfree(t->buf);
+ kfree(t);
+ }
+}
+
+static void free_tx_sdu_struct(struct usb_tx_sdu *t_sdu)
+{
+ if (t_sdu) {
+ kfree(t_sdu->buf);
+ kfree(t_sdu);
+ }
+}
+
+static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc)
+{
+ struct usb_tx_sdu *t_sdu;
+
+ if (list_empty(&tx->free_list))
+ return NULL;
+
+ t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list);
+ list_del(&t_sdu->list);
+
+ tx->avail_count--;
+
+ *no_spc = list_empty(&tx->free_list) ? 1 : 0;
+
+ return t_sdu;
+}
+
+static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu)
+{
+ list_add_tail(&t_sdu->list, &tx->free_list);
+ tx->avail_count++;
+}
+
+static struct usb_rx *alloc_rx_struct(void)
+{
+ struct usb_rx *r = NULL;
+ int ret = 0;
+
+ r = kmalloc(sizeof(struct usb_rx), GFP_ATOMIC);
+ if (!r) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ r->urb = usb_alloc_urb(0, GFP_ATOMIC);
+ r->buf = kmalloc(RX_BUF_SIZE, GFP_ATOMIC);
+ if (!r->urb || !r->buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+out:
+
+ if (ret < 0) {
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+ return NULL;
+ }
+
+ return r;
+}
+
+static void free_rx_struct(struct usb_rx *r)
+{
+ if (r) {
+ usb_free_urb(r->urb);
+ kfree(r->buf);
+ kfree(r);
+ }
+}
+
+static struct usb_rx *get_rx_struct(struct rx_cxt *rx, int *no_spc)
+{
+ struct usb_rx *r;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+
+ if (list_empty(&rx->free_list)) {
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+ return NULL;
+ }
+
+ r = list_entry(rx->free_list.next, struct usb_rx, free_list);
+ list_del(&r->free_list);
+
+ rx->avail_count--;
+
+ *no_spc = list_empty(&rx->free_list) ? 1 : 0;
+
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ return r;
+}
+
+static void put_rx_struct(struct rx_cxt *rx, struct usb_rx *r)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+
+ list_add_tail(&r->free_list, &rx->free_list);
+ rx->avail_count++;
+
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+}
+
+static void release_usb(struct lte_udev *udev)
+{
+ struct rx_cxt *rx = &udev->rx;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t, *t_next;
+ struct usb_rx *r, *r_next;
+ struct usb_tx_sdu *t_sdu, *t_sdu_next;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list)
+ {
+ list_del(&t_sdu->list);
+ free_tx_sdu_struct(t_sdu);
+ }
+
+ list_for_each_entry_safe(t, t_next, &tx->hci_list, list)
+ {
+ list_del(&t->list);
+ free_tx_struct(t);
+ }
+
+ list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list)
+ {
+ list_del(&t_sdu->list);
+ free_tx_sdu_struct(t_sdu);
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->free_list, free_list)
+ {
+ list_del(&r->free_list);
+ free_rx_struct(r);
+ }
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->to_host_list, to_host_list)
+ {
+ if (r->index == (void *)udev) {
+ list_del(&r->to_host_list);
+ free_rx_struct(r);
+ }
+ }
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+}
+
+static int init_usb(struct lte_udev *udev)
+{
+ int ret = 0;
+ int i;
+ struct tx_cxt *tx = &udev->tx;
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_tx_sdu *t_sdu = NULL;
+ struct usb_rx *r = NULL;
+
+ udev->send_complete = 1;
+ udev->tx_stop = 0;
+ udev->request_mac_addr = 0;
+ udev->usb_state = PM_NORMAL;
+
+ INIT_LIST_HEAD(&tx->sdu_list);
+ INIT_LIST_HEAD(&tx->hci_list);
+ INIT_LIST_HEAD(&tx->free_list);
+ INIT_LIST_HEAD(&rx->rx_submit_list);
+ INIT_LIST_HEAD(&rx->free_list);
+ INIT_LIST_HEAD(&rx->to_host_list);
+ spin_lock_init(&tx->lock);
+ spin_lock_init(&rx->rx_lock);
+ spin_lock_init(&rx->submit_lock);
+ spin_lock_init(&rx->to_host_lock);
+
+ tx->avail_count = 0;
+ rx->avail_count = 0;
+
+ udev->rx_cb = NULL;
+
+ for (i = 0; i < MAX_NUM_SDU_BUF; i++) {
+ t_sdu = alloc_tx_sdu_struct();
+ if (t_sdu == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add(&t_sdu->list, &tx->free_list);
+ tx->avail_count++;
+ }
+
+ for (i = 0; i < MAX_RX_SUBMIT_COUNT*2; i++) {
+ r = alloc_rx_struct();
+ if (r == NULL) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ list_add(&r->free_list, &rx->free_list);
+ rx->avail_count++;
+ }
+ INIT_DELAYED_WORK(&udev->work_tx, do_tx);
+ INIT_DELAYED_WORK(&udev->work_rx, do_rx);
+ return 0;
+fail:
+ return ret;
+}
+
+static int set_mac_address(u8 *data, void *arg)
+{
+ struct phy_dev *phy_dev = (struct phy_dev *)arg;
+ struct lte_udev *udev = phy_dev->priv_dev;
+ struct tlv *tlv = (struct tlv *)data;
+ u8 mac_address[ETH_ALEN] = {0, };
+
+ if (tlv->type == MAC_ADDRESS && udev->request_mac_addr) {
+ memcpy(mac_address, tlv->data, tlv->len);
+
+ if (register_lte_device(phy_dev, &udev->intf->dev, mac_address) < 0)
+ pr_err("register lte device failed\n");
+
+ udev->request_mac_addr = 0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+static void do_rx(struct work_struct *work)
+{
+ struct lte_udev *udev = container_of(work, struct lte_udev, work_rx.work);
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_rx *r;
+ struct hci_packet *hci;
+ struct phy_dev *phy_dev;
+ u16 cmd_evt;
+ int ret;
+ unsigned long flags;
+
+ while (1) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ if (list_empty(&rx->to_host_list)) {
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ break;
+ }
+ r = list_entry(rx->to_host_list.next, struct usb_rx, to_host_list);
+ list_del(&r->to_host_list);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+
+ phy_dev = (struct phy_dev *)r->cb_data;
+ udev = (struct lte_udev *)phy_dev->priv_dev;
+ hci = (struct hci_packet *)r->buf;
+ cmd_evt = gdm_dev16_to_cpu(&udev->gdm_ed, hci->cmd_evt);
+
+ switch (cmd_evt) {
+ case LTE_GET_INFORMATION_RESULT:
+ if (set_mac_address(hci->data, r->cb_data) == 0) {
+ ret = r->callback(r->cb_data,
+ r->buf,
+ r->urb->actual_length,
+ KERNEL_THREAD);
+ }
+ break;
+
+ default:
+ if (r->callback) {
+ ret = r->callback(r->cb_data,
+ r->buf,
+ r->urb->actual_length,
+ KERNEL_THREAD);
+
+ if (ret == -EAGAIN)
+ pr_err("failed to send received data\n");
+ }
+ break;
+ }
+
+ put_rx_struct(rx, r);
+
+ gdm_usb_recv(udev,
+ r->callback,
+ r->cb_data,
+ USB_COMPLETE);
+ }
+}
+
+static void remove_rx_submit_list(struct usb_rx *r, struct rx_cxt *rx)
+{
+ unsigned long flags;
+ struct usb_rx *r_remove, *r_remove_next;
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r_remove, r_remove_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ if (r == r_remove) {
+ list_del(&r->rx_submit_list);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+}
+
+static void gdm_usb_rcv_complete(struct urb *urb)
+{
+ struct usb_rx *r = urb->context;
+ struct rx_cxt *rx = r->rx;
+ unsigned long flags;
+ struct lte_udev *udev = container_of(r->rx, struct lte_udev, rx);
+ struct usb_device *usbdev = udev->usbdev;
+
+ remove_rx_submit_list(r, rx);
+
+ if (!urb->status && r->callback) {
+ spin_lock_irqsave(&rx->to_host_lock, flags);
+ list_add_tail(&r->to_host_list, &rx->to_host_list);
+ queue_work(usb_rx_wq, &udev->work_rx.work);
+ spin_unlock_irqrestore(&rx->to_host_lock, flags);
+ } else {
+ if (urb->status && udev->usb_state == PM_NORMAL)
+ pr_err("%s: urb status error %d\n",
+ __func__, urb->status);
+
+ put_rx_struct(rx, r);
+ }
+
+ usb_mark_last_busy(usbdev);
+}
+
+static int gdm_usb_recv(void *priv_dev,
+ int (*cb)(void *cb_data, void *data, int len, int context),
+ void *cb_data,
+ int context)
+{
+ struct lte_udev *udev = priv_dev;
+ struct usb_device *usbdev = udev->usbdev;
+ struct rx_cxt *rx = &udev->rx;
+ struct usb_rx *r;
+ int no_spc;
+ int ret;
+ unsigned long flags;
+
+ if (!udev->usbdev) {
+ pr_err("invalid device\n");
+ return -ENODEV;
+ }
+
+ r = get_rx_struct(rx, &no_spc);
+ if (!r) {
+ pr_err("Out of Memory\n");
+ return -ENOMEM;
+ }
+
+ udev->rx_cb = cb;
+ r->callback = cb;
+ r->cb_data = cb_data;
+ r->index = (void *)udev;
+ r->rx = rx;
+
+ usb_fill_bulk_urb(r->urb,
+ usbdev,
+ usb_rcvbulkpipe(usbdev, 0x83),
+ r->buf,
+ RX_BUF_SIZE,
+ gdm_usb_rcv_complete,
+ r);
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_add_tail(&r->rx_submit_list, &rx->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ if (context == KERNEL_THREAD)
+ ret = usb_submit_urb(r->urb, GFP_KERNEL);
+ else
+ ret = usb_submit_urb(r->urb, GFP_ATOMIC);
+
+ if (ret) {
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_del(&r->rx_submit_list);
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ pr_err("usb_submit_urb failed (%p)\n", r);
+ put_rx_struct(rx, r);
+ }
+
+ return ret;
+}
+
+static void gdm_usb_send_complete(struct urb *urb)
+{
+ struct usb_tx *t = urb->context;
+ struct tx_cxt *tx = t->tx;
+ struct lte_udev *udev = container_of(tx, struct lte_udev, tx);
+ unsigned long flags;
+
+ if (urb->status == -ECONNRESET) {
+ pr_info("CONNRESET\n");
+ return;
+ }
+
+ if (t->callback)
+ t->callback(t->cb_data);
+
+ free_tx_struct(t);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ udev->send_complete = 1;
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+}
+
+static int send_tx_packet(struct usb_device *usbdev, struct usb_tx *t, u32 len)
+{
+ int ret = 0;
+
+ if (!(len%512))
+ len++;
+
+ usb_fill_bulk_urb(t->urb,
+ usbdev,
+ usb_sndbulkpipe(usbdev, 2),
+ t->buf,
+ len,
+ gdm_usb_send_complete,
+ t);
+
+ ret = usb_submit_urb(t->urb, GFP_ATOMIC);
+
+ if (ret)
+ pr_err("usb_submit_urb failed: %d\n", ret);
+
+ usb_mark_last_busy(usbdev);
+
+ return ret;
+}
+
+static u32 packet_aggregation(struct lte_udev *udev, u8 *send_buf)
+{
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx_sdu *t_sdu = NULL;
+ struct multi_sdu *multi_sdu = (struct multi_sdu *)send_buf;
+ u16 send_len = 0;
+ u16 num_packet = 0;
+ unsigned long flags;
+
+ multi_sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_MULTI_SDU);
+
+ while (num_packet < MAX_PACKET_IN_MULTI_SDU) {
+ spin_lock_irqsave(&tx->lock, flags);
+ if (list_empty(&tx->sdu_list)) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ break;
+ }
+
+ t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list);
+ if (send_len + t_sdu->len > MAX_SDU_SIZE) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ break;
+ }
+
+ list_del(&t_sdu->list);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ memcpy(multi_sdu->data + send_len, t_sdu->buf, t_sdu->len);
+
+ send_len += (t_sdu->len + 3) & 0xfffc;
+ num_packet++;
+
+ if (tx->avail_count > 10)
+ t_sdu->callback(t_sdu->cb_data);
+
+ spin_lock_irqsave(&tx->lock, flags);
+ put_tx_struct(tx, t_sdu);
+ spin_unlock_irqrestore(&tx->lock, flags);
+ }
+
+ multi_sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
+ multi_sdu->num_packet = gdm_cpu_to_dev16(&udev->gdm_ed, num_packet);
+
+ return send_len + offsetof(struct multi_sdu, data);
+}
+
+static void do_tx(struct work_struct *work)
+{
+ struct lte_udev *udev = container_of(work, struct lte_udev, work_tx.work);
+ struct usb_device *usbdev = udev->usbdev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t = NULL;
+ int is_send = 0;
+ u32 len = 0;
+ unsigned long flags;
+
+ if (!usb_autopm_get_interface(udev->intf))
+ usb_autopm_put_interface(udev->intf);
+
+ if (udev->usb_state == PM_SUSPEND)
+ return;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ if (!udev->send_complete) {
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ } else {
+ udev->send_complete = 0;
+ }
+
+ if (!list_empty(&tx->hci_list)) {
+ t = list_entry(tx->hci_list.next, struct usb_tx, list);
+ list_del(&t->list);
+ len = t->len;
+ t->is_sdu = 0;
+ is_send = 1;
+ } else if (!list_empty(&tx->sdu_list)) {
+ if (udev->tx_stop) {
+ udev->send_complete = 1;
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ }
+
+ t = alloc_tx_struct(TX_BUF_SIZE);
+ t->callback = NULL;
+ t->tx = tx;
+ t->is_sdu = 1;
+ is_send = 1;
+ }
+
+ if (!is_send) {
+ udev->send_complete = 1;
+ spin_unlock_irqrestore(&tx->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (t->is_sdu)
+ len = packet_aggregation(udev, t->buf);
+
+ if (send_tx_packet(usbdev, t, len)) {
+ pr_err("send_tx_packet failed\n");
+ t->callback = NULL;
+ gdm_usb_send_complete(t->urb);
+ }
+}
+
+#define SDU_PARAM_LEN 12
+static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
+ unsigned int dftEpsId, unsigned int epsId,
+ void (*cb)(void *data), void *cb_data,
+ int dev_idx, int nic_type)
+{
+ struct lte_udev *udev = priv_dev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx_sdu *t_sdu;
+ struct sdu *sdu = NULL;
+ unsigned long flags;
+ int no_spc = 0;
+ u16 send_len;
+
+ if (!udev->usbdev) {
+ pr_err("sdu send - invalid device\n");
+ return TX_NO_DEV;
+ }
+
+ spin_lock_irqsave(&tx->lock, flags);
+ t_sdu = get_tx_sdu_struct(tx, &no_spc);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (t_sdu == NULL) {
+ pr_err("sdu send - free list empty\n");
+ return TX_NO_SPC;
+ }
+
+ sdu = (struct sdu *)t_sdu->buf;
+ sdu->cmd_evt = gdm_cpu_to_dev16(&udev->gdm_ed, LTE_TX_SDU);
+ if (nic_type == NIC_TYPE_ARP) {
+ send_len = len + SDU_PARAM_LEN;
+ memcpy(sdu->data, data, len);
+ } else {
+ send_len = len - ETH_HLEN;
+ send_len += SDU_PARAM_LEN;
+ memcpy(sdu->data, data+ETH_HLEN, len-ETH_HLEN);
+ }
+
+ sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
+ sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
+ sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
+ sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
+
+ t_sdu->len = send_len + HCI_HEADER_SIZE;
+ t_sdu->callback = cb;
+ t_sdu->cb_data = cb_data;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_add_tail(&t_sdu->list, &tx->sdu_list);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ if (no_spc)
+ return TX_NO_BUFFER;
+
+ return 0;
+}
+
+static int gdm_usb_hci_send(void *priv_dev, void *data, int len,
+ void (*cb)(void *data), void *cb_data)
+{
+ struct lte_udev *udev = priv_dev;
+ struct tx_cxt *tx = &udev->tx;
+ struct usb_tx *t;
+ unsigned long flags;
+
+ if (!udev->usbdev) {
+ pr_err("hci send - invalid device\n");
+ return -ENODEV;
+ }
+
+ t = alloc_tx_struct(len);
+ if (t == NULL) {
+ pr_err("hci_send - out of memory\n");
+ return -ENOMEM;
+ }
+
+ memcpy(t->buf, data, len);
+ t->callback = cb;
+ t->cb_data = cb_data;
+ t->len = len;
+ t->tx = tx;
+ t->is_sdu = 0;
+
+ spin_lock_irqsave(&tx->lock, flags);
+ list_add_tail(&t->list, &tx->hci_list);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return 0;
+}
+
+static struct gdm_endian *gdm_usb_get_endian(void *priv_dev)
+{
+ struct lte_udev *udev = priv_dev;
+
+ return &udev->gdm_ed;
+}
+
+static int gdm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
+{
+ int ret = 0;
+ struct phy_dev *phy_dev = NULL;
+ struct lte_udev *udev = NULL;
+ u16 idVendor, idProduct;
+ int bInterfaceNumber;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
+
+ bInterfaceNumber = intf->cur_altsetting->desc.bInterfaceNumber;
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ pr_info("net vid = 0x%04x pid = 0x%04x\n", idVendor, idProduct);
+
+ if (bInterfaceNumber > NETWORK_INTERFACE) {
+ pr_info("not a network device\n");
+ return -1;
+ }
+
+ phy_dev = kmalloc(sizeof(struct phy_dev), GFP_ATOMIC);
+ if (!phy_dev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ udev = kmalloc(sizeof(struct lte_udev), GFP_ATOMIC);
+ if (!udev) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memset(phy_dev, 0, sizeof(struct phy_dev));
+ memset(udev, 0, sizeof(struct lte_udev));
+
+ phy_dev->priv_dev = (void *)udev;
+ phy_dev->send_hci_func = gdm_usb_hci_send;
+ phy_dev->send_sdu_func = gdm_usb_sdu_send;
+ phy_dev->rcv_func = gdm_usb_recv;
+ phy_dev->get_endian = gdm_usb_get_endian;
+
+ udev->usbdev = usbdev;
+ ret = init_usb(udev);
+ if (ret < 0) {
+ pr_err("init_usb func failed\n");
+ goto out;
+ }
+ udev->intf = intf;
+
+ intf->needs_remote_wakeup = 1;
+ usb_enable_autosuspend(usbdev);
+ pm_runtime_set_autosuspend_delay(&usbdev->dev, AUTO_SUSPEND_TIMER);
+
+ /* List up hosts with big endians, otherwise, defaults to little endian */
+ if (idProduct == PID_GDM7243)
+ gdm_set_endian(&udev->gdm_ed, ENDIANNESS_BIG);
+ else
+ gdm_set_endian(&udev->gdm_ed, ENDIANNESS_LITTLE);
+
+ ret = request_mac_address(udev);
+ if (ret < 0) {
+ pr_err("request Mac address failed\n");
+ goto out;
+ }
+
+ start_rx_proc(phy_dev);
+out:
+
+ if (ret < 0) {
+ kfree(phy_dev);
+ if (udev) {
+ release_usb(udev);
+ kfree(udev);
+ }
+ }
+
+ usb_get_dev(usbdev);
+ usb_set_intfdata(intf, phy_dev);
+
+ return ret;
+}
+
+static void gdm_usb_disconnect(struct usb_interface *intf)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ u16 idVendor, idProduct;
+ struct usb_device *usbdev;
+ usbdev = interface_to_usbdev(intf);
+
+ idVendor = __le16_to_cpu(usbdev->descriptor.idVendor);
+ idProduct = __le16_to_cpu(usbdev->descriptor.idProduct);
+
+ phy_dev = usb_get_intfdata(intf);
+
+ udev = phy_dev->priv_dev;
+ unregister_lte_device(phy_dev);
+
+ release_usb(udev);
+
+ kfree(udev);
+ udev = NULL;
+
+ kfree(phy_dev);
+ phy_dev = NULL;
+
+ usb_put_dev(usbdev);
+}
+
+static int gdm_usb_suspend(struct usb_interface *intf, pm_message_t pm_msg)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ struct rx_cxt *rx;
+ struct usb_rx *r;
+ struct usb_rx *r_next;
+ unsigned long flags;
+
+ phy_dev = usb_get_intfdata(intf);
+ udev = phy_dev->priv_dev;
+ rx = &udev->rx;
+ if (udev->usb_state != PM_NORMAL) {
+ pr_err("usb suspend - invalid state\n");
+ return -1;
+ }
+
+ udev->usb_state = PM_SUSPEND;
+
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ list_for_each_entry_safe(r, r_next, &rx->rx_submit_list, rx_submit_list)
+ {
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+ usb_kill_urb(r->urb);
+ spin_lock_irqsave(&rx->submit_lock, flags);
+ }
+ spin_unlock_irqrestore(&rx->submit_lock, flags);
+
+ return 0;
+}
+
+static int gdm_usb_resume(struct usb_interface *intf)
+{
+ struct phy_dev *phy_dev;
+ struct lte_udev *udev;
+ struct tx_cxt *tx;
+ struct rx_cxt *rx;
+ unsigned long flags;
+ int issue_count;
+ int i;
+
+ phy_dev = usb_get_intfdata(intf);
+ udev = phy_dev->priv_dev;
+ rx = &udev->rx;
+
+ if (udev->usb_state != PM_SUSPEND) {
+ pr_err("usb resume - invalid state\n");
+ return -1;
+ }
+ udev->usb_state = PM_NORMAL;
+
+ spin_lock_irqsave(&rx->rx_lock, flags);
+ issue_count = rx->avail_count - MAX_RX_SUBMIT_COUNT;
+ spin_unlock_irqrestore(&rx->rx_lock, flags);
+
+ if (issue_count >= 0) {
+ for (i = 0; i < issue_count; i++)
+ gdm_usb_recv(phy_dev->priv_dev,
+ udev->rx_cb,
+ phy_dev,
+ USB_COMPLETE);
+ }
+
+ tx = &udev->tx;
+ spin_lock_irqsave(&tx->lock, flags);
+ queue_work(usb_tx_wq, &udev->work_tx.work);
+ spin_unlock_irqrestore(&tx->lock, flags);
+
+ return 0;
+}
+
+static struct usb_driver gdm_usb_lte_driver = {
+ .name = "gdm_lte",
+ .probe = gdm_usb_probe,
+ .disconnect = gdm_usb_disconnect,
+ .id_table = id_table,
+ .supports_autosuspend = 1,
+ .suspend = gdm_usb_suspend,
+ .resume = gdm_usb_resume,
+ .reset_resume = gdm_usb_resume,
+};
+
+static int __init gdm_usb_lte_init(void)
+{
+ if (gdm_lte_event_init() < 0) {
+ pr_err("error creating event\n");
+ return -1;
+ }
+
+ usb_tx_wq = create_workqueue("usb_tx_wq");
+ if (usb_tx_wq == NULL)
+ return -1;
+
+ usb_rx_wq = create_workqueue("usb_rx_wq");
+ if (usb_rx_wq == NULL)
+ return -1;
+
+ return usb_register(&gdm_usb_lte_driver);
+}
+
+static void __exit gdm_usb_lte_exit(void)
+{
+ gdm_lte_event_exit();
+
+ usb_deregister(&gdm_usb_lte_driver);
+
+ if (usb_tx_wq) {
+ flush_workqueue(usb_tx_wq);
+ destroy_workqueue(usb_tx_wq);
+ }
+
+ if (usb_rx_wq) {
+ flush_workqueue(usb_rx_wq);
+ destroy_workqueue(usb_rx_wq);
+ }
+}
+
+module_init(gdm_usb_lte_init);
+module_exit(gdm_usb_lte_exit);
+
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("GCT LTE USB Device Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gdm724x/gdm_usb.h b/drivers/staging/gdm724x/gdm_usb.h
new file mode 100644
index 00000000000..e6486e71a42
--- /dev/null
+++ b/drivers/staging/gdm724x/gdm_usb.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _GDM_USB_H_
+#define _GDM_USB_H_
+
+#include <linux/types.h>
+#include <linux/usb.h>
+#include <linux/list.h>
+#include <linux/time.h>
+
+#include "gdm_endian.h"
+#include "hci_packet.h"
+
+#define PM_NORMAL 0
+#define PM_SUSPEND 1
+#define AUTO_SUSPEND_TIMER 5000 /* ms */
+
+#define RX_BUF_SIZE (1024*32)
+#define TX_BUF_SIZE (1024*32)
+#define SDU_BUF_SIZE 2048
+#define MAX_SDU_SIZE (1024*30)
+#define MAX_PACKET_IN_MULTI_SDU 256
+
+#define VID_GCT 0x1076
+#define PID_GDM7240 0x8000
+#define PID_GDM7243 0x9000
+
+#define NETWORK_INTERFACE 1
+#define USB_SC_SCSI 0x06
+#define USB_PR_BULK 0x50
+
+#define MAX_NUM_SDU_BUF 64
+
+struct usb_tx {
+ struct list_head list;
+ struct urb *urb;
+ u8 *buf;
+ u32 len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+ struct tx_cxt *tx;
+ u8 is_sdu;
+};
+
+struct usb_tx_sdu {
+ struct list_head list;
+ u8 *buf;
+ u32 len;
+ void (*callback)(void *cb_data);
+ void *cb_data;
+};
+
+struct usb_rx {
+ struct list_head to_host_list;
+ struct list_head free_list;
+ struct list_head rx_submit_list;
+ struct rx_cxt *rx;
+ struct urb *urb;
+ u8 *buf;
+ int (*callback)(void *cb_data, void *data, int len, int context);
+ void *cb_data;
+ void *index;
+};
+
+struct tx_cxt {
+ struct list_head sdu_list;
+ struct list_head hci_list;
+ struct list_head free_list;
+ u32 avail_count;
+ spinlock_t lock;
+};
+
+struct rx_cxt {
+ struct list_head to_host_list;
+ struct list_head rx_submit_list;
+ struct list_head free_list;
+ u32 avail_count;
+ spinlock_t to_host_lock;
+ spinlock_t rx_lock;
+ spinlock_t submit_lock;
+};
+
+struct lte_udev {
+ struct usb_device *usbdev;
+ struct gdm_endian gdm_ed;
+ struct tx_cxt tx;
+ struct rx_cxt rx;
+ struct delayed_work work_tx;
+ struct delayed_work work_rx;
+ u8 send_complete;
+ u8 tx_stop;
+ struct usb_interface *intf;
+ int (*rx_cb)(void *cb_data, void *data, int len, int context);
+ int usb_state;
+ u8 request_mac_addr;
+};
+
+#endif /* _GDM_USB_H_ */
diff --git a/drivers/staging/gdm724x/hci.h b/drivers/staging/gdm724x/hci.h
new file mode 100644
index 00000000000..9a591b0db51
--- /dev/null
+++ b/drivers/staging/gdm724x/hci.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HCI_H_
+#define _HCI_H_
+
+#define LTE_GET_INFORMATION 0x3002
+#define LTE_GET_INFORMATION_RESULT 0xB003
+ #define MAC_ADDRESS 0xA2
+
+#define LTE_LINK_ON_OFF_INDICATION 0xB133
+#define LTE_PDN_TABLE_IND 0xB143
+
+#define LTE_TX_SDU 0x3200
+#define LTE_RX_SDU 0xB201
+#define LTE_TX_MULTI_SDU 0x3202
+#define LTE_RX_MULTI_SDU 0xB203
+
+#define LTE_DL_SDU_FLOW_CONTROL 0x3305
+#define LTE_UL_SDU_FLOW_CONTROL 0xB306
+
+#define LTE_AT_CMD_TO_DEVICE 0x3307
+#define LTE_AT_CMD_FROM_DEVICE 0xB308
+
+#define LTE_SDIO_DM_SEND_PKT 0x3312
+#define LTE_SDIO_DM_RECV_PKT 0xB313
+
+#define LTE_NV_RESTORE_REQUEST 0xB30C
+#define LTE_NV_RESTORE_RESPONSE 0x330D
+#define LTE_NV_SAVE_REQUEST 0xB30E
+ #define NV_TYPE_LTE_INFO 0x00
+ #define NV_TYPE_BOARD_CONFIG 0x01
+ #define NV_TYPE_RF_CAL 0x02
+ #define NV_TYPE_TEMP 0x03
+ #define NV_TYPE_NET_INFO 0x04
+ #define NV_TYPE_SAFETY_INFO 0x05
+ #define NV_TYPE_CDMA_CAL 0x06
+ #define NV_TYPE_VENDOR 0x07
+ #define NV_TYPE_ALL 0xff
+#define LTE_NV_SAVE_RESPONSE 0x330F
+
+#define LTE_AT_CMD_TO_DEVICE_EXT 0x3323
+#define LTE_AT_CMD_FROM_DEVICE_EXT 0xB324
+
+#endif /* _HCI_H_ */
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
new file mode 100644
index 00000000000..7fba8a687fa
--- /dev/null
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _HCI_PACKET_H_
+#define _HCI_PACKET_H_
+
+#define HCI_HEADER_SIZE 4
+
+/*
+ * The NIC type definition:
+ * For backward compatibility, lower 16 bits used as they were.
+ * Lower 16 bit: NIC_TYPE values
+ * Uppoer 16 bit: NIC_TYPE Flags
+ */
+#define NIC_TYPE_NIC0 0x00000010
+#define NIC_TYPE_NIC1 0x00000011
+#define NIC_TYPE_NIC2 0x00000012
+#define NIC_TYPE_NIC3 0x00000013
+#define NIC_TYPE_ARP 0x00000100
+#define NIC_TYPE_ICMPV6 0x00000200
+#define NIC_TYPE_MASK 0x0000FFFF
+#define NIC_TYPE_F_IPV4 0x00010000
+#define NIC_TYPE_F_IPV6 0x00020000
+#define NIC_TYPE_F_DHCP 0x00040000
+#define NIC_TYPE_F_NDP 0x00080000
+#define NIC_TYPE_F_VLAN 0x00100000
+
+struct hci_packet {
+ u16 cmd_evt;
+ u16 len;
+ u8 data[0];
+} __packed;
+
+struct tlv {
+ u8 type;
+ u8 len;
+ u8 *data[1];
+} __packed;
+
+struct sdu_header {
+ u16 cmd_evt;
+ u16 len;
+ u32 dftEpsId;
+ u32 bearer_ID;
+ u32 nic_type;
+} __packed;
+
+struct sdu {
+ u16 cmd_evt;
+ u16 len;
+ u32 dftEpsId;
+ u32 bearer_ID;
+ u32 nic_type;
+ u8 data[0];
+} __packed;
+
+struct multi_sdu {
+ u16 cmd_evt;
+ u16 len;
+ u16 num_packet;
+ u16 reserved;
+ u8 data[0];
+} __packed;
+
+struct hci_pdn_table_ind {
+ u16 cmd_evt;
+ u16 len;
+ u8 activate;
+ u32 dft_eps_id;
+ u32 nic_type;
+ u8 pdn_type;
+ u8 ipv4_addr[4];
+ u8 ipv6_intf_id[8];
+} __packed;
+
+struct hci_connect_ind {
+ u16 cmd_evt;
+ u16 len;
+ u32 connect;
+} __packed;
+
+
+#endif /* _HCI_PACKET_H_ */
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
new file mode 100644
index 00000000000..77fc64e2842
--- /dev/null
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/netlink.h>
+#include <asm/byteorder.h>
+#include <net/sock.h>
+
+#include "netlink_k.h"
+
+#if defined(DEFINE_MUTEX)
+static DEFINE_MUTEX(netlink_mutex);
+#else
+static struct semaphore netlink_mutex;
+#define mutex_lock(x) down(x)
+#define mutex_unlock(x) up(x)
+#endif
+
+#define ND_MAX_GROUP 30
+#define ND_IFINDEX_LEN sizeof(int)
+#define ND_NLMSG_SPACE(len) (NLMSG_SPACE(len) + ND_IFINDEX_LEN)
+#define ND_NLMSG_DATA(nlh) ((void *)((char *)NLMSG_DATA(nlh) + ND_IFINDEX_LEN))
+#define ND_NLMSG_S_LEN(len) (len+ND_IFINDEX_LEN)
+#define ND_NLMSG_R_LEN(nlh) (nlh->nlmsg_len-ND_IFINDEX_LEN)
+#define ND_NLMSG_IFIDX(nlh) NLMSG_DATA(nlh)
+#define ND_MAX_MSG_LEN (1024 * 32)
+
+static void (*rcv_cb)(struct net_device *dev, u16 type, void *msg, int len);
+
+static void netlink_rcv_cb(struct sk_buff *skb)
+{
+ struct nlmsghdr *nlh;
+ struct net_device *dev;
+ u32 mlen;
+ void *msg;
+ int ifindex;
+
+ if (!rcv_cb) {
+ pr_err("nl cb - unregistered\n");
+ return;
+ }
+
+ if (skb->len < NLMSG_SPACE(0)) {
+ pr_err("nl cb - invalid skb length\n");
+ return;
+ }
+
+ nlh = (struct nlmsghdr *)skb->data;
+
+ if (skb->len < nlh->nlmsg_len || nlh->nlmsg_len > ND_MAX_MSG_LEN) {
+ pr_err("nl cb - invalid length (%d,%d)\n",
+ skb->len, nlh->nlmsg_len);
+ return;
+ }
+
+ memcpy(&ifindex, ND_NLMSG_IFIDX(nlh), ND_IFINDEX_LEN);
+ msg = ND_NLMSG_DATA(nlh);
+ mlen = ND_NLMSG_R_LEN(nlh);
+
+ dev = dev_get_by_index(&init_net, ifindex);
+ if (dev) {
+ rcv_cb(dev, nlh->nlmsg_type, msg, mlen);
+ dev_put(dev);
+ } else {
+ pr_err("nl cb - dev (%d) not found\n", ifindex);
+ }
+}
+
+static void netlink_rcv(struct sk_buff *skb)
+{
+ mutex_lock(&netlink_mutex);
+ netlink_rcv_cb(skb);
+ mutex_unlock(&netlink_mutex);
+}
+
+struct sock *netlink_init(int unit,
+ void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
+{
+ struct sock *sock;
+ struct netlink_kernel_cfg cfg = {
+ .input = netlink_rcv,
+ };
+
+#if !defined(DEFINE_MUTEX)
+ init_MUTEX(&netlink_mutex);
+#endif
+
+ sock = netlink_kernel_create(&init_net, unit, &cfg);
+
+ if (sock)
+ rcv_cb = cb;
+
+ return sock;
+}
+
+void netlink_exit(struct sock *sock)
+{
+ sock_release(sock->sk_socket);
+}
+
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
+{
+ static u32 seq;
+ struct sk_buff *skb = NULL;
+ struct nlmsghdr *nlh;
+ int ret = 0;
+
+ if (group > ND_MAX_GROUP)
+ return -EINVAL;
+
+ if (!netlink_has_listeners(sock, group+1))
+ return -ESRCH;
+
+ skb = alloc_skb(NLMSG_SPACE(len), GFP_ATOMIC);
+ if (!skb)
+ return -ENOMEM;
+
+ seq++;
+
+ nlh = nlmsg_put(skb, 0, seq, type, len, 0);
+ memcpy(NLMSG_DATA(nlh), msg, len);
+ NETLINK_CB(skb).portid = 0;
+ NETLINK_CB(skb).dst_group = 0;
+
+ ret = netlink_broadcast(sock, skb, 0, group+1, GFP_ATOMIC);
+ if (!ret)
+ return len;
+
+ if (ret != -ESRCH)
+ pr_err("nl broadcast g=%d, t=%d, l=%d, r=%d\n",
+ group, type, len, ret);
+ else if (netlink_has_listeners(sock, group+1))
+ return -EAGAIN;
+
+ return ret;
+}
diff --git a/drivers/staging/gdm724x/netlink_k.h b/drivers/staging/gdm724x/netlink_k.h
new file mode 100644
index 00000000000..589486d7671
--- /dev/null
+++ b/drivers/staging/gdm724x/netlink_k.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2012 GCT Semiconductor, Inc. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _NETLINK_K_H
+#define _NETLINK_K_H
+
+#include <linux/netdevice.h>
+#include <net/sock.h>
+
+struct sock *netlink_init(int unit,
+ void (*cb)(struct net_device *dev, u16 type, void *msg, int len));
+void netlink_exit(struct sock *sock);
+int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len);
+
+#endif /* _NETLINK_K_H_ */
diff --git a/drivers/staging/gdm72xx/gdm_qos.c b/drivers/staging/gdm72xx/gdm_qos.c
index b795353e834..cc3692439a5 100644
--- a/drivers/staging/gdm72xx/gdm_qos.c
+++ b/drivers/staging/gdm72xx/gdm_qos.c
@@ -250,8 +250,8 @@ static void send_qos_list(struct nic *nic, struct list_head *head)
list_for_each_entry_safe(entry, n, head, list) {
list_del(&entry->list);
- free_qos_entry(entry);
gdm_wimax_send_tx(entry->skb, entry->dev);
+ free_qos_entry(entry);
}
}
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index ea08d621337..8be32e5a0af 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -56,7 +56,7 @@ Then fill in the following:
- indio_dev->modes:
Specify whether direct access and / or ring buffer access is supported.
-- indio_dev->ring:
+- indio_dev->buffer:
An optional associated buffer.
- indio_dev->pollfunc:
Poll function related elements. This controls what occurs when a trigger
@@ -67,7 +67,7 @@ Then fill in the following:
- indio_dev->num_channels:
How many channels are there?
-Once these are set up, a call to iio_device_register(indio_dev),
+Once these are set up, a call to iio_device_register(indio_dev)
will register the device with the iio core.
Worth noting here is that, if a ring buffer is to be used, it can be
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index ab8ec7af88b..2105576fa77 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -182,11 +182,10 @@ static int adis16201_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
+
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -201,10 +200,10 @@ static int adis16201_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16201_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -218,9 +217,6 @@ static int adis16201_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -231,7 +227,6 @@ static int adis16201_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index b08ac8fdeee..409a28ed904 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -148,11 +148,9 @@ static int adis16203_probe(struct spi_device *spi)
struct adis *st;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -166,11 +164,11 @@ static int adis16203_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16203_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -185,9 +183,6 @@ static int adis16203_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -198,7 +193,6 @@ static int adis16203_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 792ec25a50d..b8ea76857cd 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -187,11 +187,9 @@ static int adis16204_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -205,11 +203,11 @@ static int adis16204_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16204_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -223,9 +221,6 @@ static int adis16204_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -236,7 +231,6 @@ static int adis16204_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 323c169d699..4492e51d888 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -183,11 +183,9 @@ static int adis16209_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -201,10 +199,10 @@ static int adis16209_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16209_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -218,9 +216,6 @@ static int adis16209_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -231,7 +226,6 @@ static int adis16209_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 0e72f795ed0..5c289614357 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -428,11 +428,9 @@ static int adis16220_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
@@ -447,7 +445,7 @@ static int adis16220_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
if (ret)
@@ -478,9 +476,6 @@ error_rm_accel_bin:
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
error_unregister_dev:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -492,7 +487,6 @@ static int adis16220_remove(struct spi_device *spi)
sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index fd1f0fd0fba..3a303a03d02 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -236,11 +236,9 @@ static int adis16240_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -254,10 +252,10 @@ static int adis16240_probe(struct spi_device *spi)
ret = adis_init(st, indio_dev, spi, &adis16240_data);
if (ret)
- goto error_free_dev;
+ return ret;
ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
if (ret)
- goto error_free_dev;
+ return ret;
/* Get the device into a sane initial state */
ret = adis_initial_startup(st);
@@ -270,9 +268,6 @@ static int adis16240_probe(struct spi_device *spi)
error_cleanup_buffer_trigger:
adis_cleanup_buffer_and_trigger(st, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -283,7 +278,6 @@ static int adis16240_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
adis_cleanup_buffer_and_trigger(st, indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 8ed75a94f46..bb852dc9c98 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -668,11 +668,9 @@ static int lis3l02dq_probe(struct spi_device *spi)
struct lis3l02dq_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof *st);
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
@@ -690,7 +688,7 @@ static int lis3l02dq_probe(struct spi_device *spi)
ret = lis3l02dq_configure_buffer(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
ret = iio_buffer_register(indio_dev,
lis3l02dq_channels,
@@ -736,9 +734,6 @@ error_uninitialize_buffer:
iio_buffer_unregister(indio_dev);
error_unreg_buffer_funcs:
lis3l02dq_unconfigure_buffer(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
return ret;
}
@@ -786,8 +781,6 @@ static int lis3l02dq_remove(struct spi_device *spi)
iio_buffer_unregister(indio_dev);
lis3l02dq_unconfigure_buffer(indio_dev);
- iio_device_free(indio_dev);
-
return 0;
}
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index 32950ad9485..48a25ba290f 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -1135,11 +1135,9 @@ static int sca3000_probe(struct spi_device *spi)
struct sca3000_state *st;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
st = iio_priv(indio_dev);
spi_set_drvdata(spi, indio_dev);
@@ -1162,7 +1160,7 @@ static int sca3000_probe(struct spi_device *spi)
sca3000_configure_ring(indio_dev);
ret = iio_device_register(indio_dev);
if (ret < 0)
- goto error_free_dev;
+ return ret;
ret = iio_buffer_register(indio_dev,
sca3000_channels,
@@ -1198,10 +1196,6 @@ error_unregister_ring:
iio_buffer_unregister(indio_dev);
error_unregister_dev:
iio_device_unregister(indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-
-error_ret:
return ret;
}
@@ -1235,7 +1229,6 @@ static int sca3000_remove(struct spi_device *spi)
iio_device_unregister(indio_dev);
iio_buffer_unregister(indio_dev);
sca3000_unconfigure_ring(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 9f52a285792..a08c1736458 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -225,6 +225,9 @@ struct mxs_lradc {
#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
+#define LRADC_RESOLUTION 12
+#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+
/*
* Raw I/O operations
*/
@@ -540,9 +543,10 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
__set_bit(EV_ABS, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
- input_set_abs_params(input, ABS_X, 0, LRADC_CH_VALUE_MASK, 0, 0);
- input_set_abs_params(input, ABS_Y, 0, LRADC_CH_VALUE_MASK, 0, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_CH_VALUE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
+ 0, 0);
lradc->ts_input = input;
input_set_drvdata(input, lradc);
@@ -817,7 +821,7 @@ static const struct iio_buffer_setup_ops mxs_lradc_buffer_ops = {
.channel = (idx), \
.scan_type = { \
.sign = 'u', \
- .realbits = 18, \
+ .realbits = LRADC_RESOLUTION, \
.storagebits = 32, \
}, \
}
@@ -841,14 +845,16 @@ static const struct iio_chan_spec mxs_lradc_chan_spec[] = {
MXS_ADC_CHAN(15, IIO_VOLTAGE), /* VDD5V */
};
-static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
+static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
{
/* The ADC always uses DELAY CHANNEL 0. */
const uint32_t adc_cfg =
(1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
(LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
- stmp_reset_block(lradc->base);
+ int ret = stmp_reset_block(lradc->base);
+ if (ret)
+ return ret;
/* Configure DELAY CHANNEL 0 for generic ADC sampling. */
writel(adc_cfg, lradc->base + LRADC_DELAY(0));
@@ -869,6 +875,8 @@ static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
/* Start internal temperature sensing. */
writel(0, lradc->base + LRADC_CTRL2);
+
+ return 0;
}
static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
@@ -905,7 +913,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
int i;
/* Allocate the IIO device. */
- iio = iio_device_alloc(sizeof(*lradc));
+ iio = devm_iio_device_alloc(dev, sizeof(*lradc));
if (!iio) {
dev_err(dev, "Failed to allocate IIO device\n");
return -ENOMEM;
@@ -917,10 +925,8 @@ static int mxs_lradc_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
lradc->dev = &pdev->dev;
lradc->base = devm_ioremap_resource(dev, iores);
- if (IS_ERR(lradc->base)) {
- ret = PTR_ERR(lradc->base);
- goto err_addr;
- }
+ if (IS_ERR(lradc->base))
+ return PTR_ERR(lradc->base);
INIT_WORK(&lradc->ts_work, mxs_lradc_ts_work);
@@ -940,16 +946,14 @@ static int mxs_lradc_probe(struct platform_device *pdev)
/* Grab all IRQ sources */
for (i = 0; i < of_cfg->irq_count; i++) {
lradc->irq[i] = platform_get_irq(pdev, i);
- if (lradc->irq[i] < 0) {
- ret = -EINVAL;
- goto err_addr;
- }
+ if (lradc->irq[i] < 0)
+ return -EINVAL;
ret = devm_request_irq(dev, lradc->irq[i],
mxs_lradc_handle_irq, 0,
of_cfg->irq_name[i], iio);
if (ret)
- goto err_addr;
+ return ret;
}
platform_set_drvdata(pdev, iio);
@@ -969,14 +973,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
&mxs_lradc_trigger_handler,
&mxs_lradc_buffer_ops);
if (ret)
- goto err_addr;
+ return ret;
ret = mxs_lradc_trigger_init(iio);
if (ret)
goto err_trig;
/* Configure the hardware. */
- mxs_lradc_hw_init(lradc);
+ ret = mxs_lradc_hw_init(lradc);
+ if (ret)
+ goto err_dev;
/* Register the touchscreen input device. */
ret = mxs_lradc_ts_register(lradc);
@@ -998,8 +1004,6 @@ err_dev:
mxs_lradc_trigger_remove(iio);
err_trig:
iio_triggered_buffer_cleanup(iio);
-err_addr:
- iio_device_free(iio);
return ret;
}
@@ -1015,7 +1019,6 @@ static int mxs_lradc_remove(struct platform_device *pdev)
iio_device_unregister(iio);
iio_triggered_buffer_cleanup(iio);
mxs_lradc_trigger_remove(iio);
- iio_device_free(iio);
return 0;
}
diff --git a/drivers/staging/iio/adc/spear_adc.c b/drivers/staging/iio/adc/spear_adc.c
index 736219c3030..20f2d555e7c 100644
--- a/drivers/staging/iio/adc/spear_adc.c
+++ b/drivers/staging/iio/adc/spear_adc.c
@@ -300,11 +300,10 @@ static int spear_adc_probe(struct platform_device *pdev)
int ret = -ENODEV;
int irq;
- iodev = iio_device_alloc(sizeof(struct spear_adc_info));
+ iodev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_info));
if (!iodev) {
dev_err(dev, "failed allocating iio device\n");
- ret = -ENOMEM;
- goto errout1;
+ return -ENOMEM;
}
info = iio_priv(iodev);
@@ -318,8 +317,7 @@ static int spear_adc_probe(struct platform_device *pdev)
info->adc_base_spear6xx = of_iomap(np, 0);
if (!info->adc_base_spear6xx) {
dev_err(dev, "failed mapping memory\n");
- ret = -ENOMEM;
- goto errout2;
+ return -ENOMEM;
}
info->adc_base_spear3xx =
(struct adc_regs_spear3xx *)info->adc_base_spear6xx;
@@ -327,33 +325,33 @@ static int spear_adc_probe(struct platform_device *pdev)
info->clk = clk_get(dev, NULL);
if (IS_ERR(info->clk)) {
dev_err(dev, "failed getting clock\n");
- goto errout3;
+ goto errout1;
}
ret = clk_prepare_enable(info->clk);
if (ret) {
dev_err(dev, "failed enabling clock\n");
- goto errout4;
+ goto errout2;
}
irq = platform_get_irq(pdev, 0);
if ((irq < 0) || (irq >= NR_IRQS)) {
dev_err(dev, "failed getting interrupt resource\n");
ret = -EINVAL;
- goto errout5;
+ goto errout3;
}
ret = devm_request_irq(dev, irq, spear_adc_isr, 0, MOD_NAME, info);
if (ret < 0) {
dev_err(dev, "failed requesting interrupt\n");
- goto errout5;
+ goto errout3;
}
if (of_property_read_u32(np, "sampling-frequency",
&info->sampling_freq)) {
dev_err(dev, "sampling-frequency missing in DT\n");
ret = -EINVAL;
- goto errout5;
+ goto errout3;
}
/*
@@ -383,21 +381,18 @@ static int spear_adc_probe(struct platform_device *pdev)
ret = iio_device_register(iodev);
if (ret)
- goto errout5;
+ goto errout3;
dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);
return 0;
-errout5:
- clk_disable_unprepare(info->clk);
-errout4:
- clk_put(info->clk);
errout3:
- iounmap(info->adc_base_spear6xx);
+ clk_disable_unprepare(info->clk);
errout2:
- iio_device_free(iodev);
+ clk_put(info->clk);
errout1:
+ iounmap(info->adc_base_spear6xx);
return ret;
}
@@ -410,7 +405,6 @@ static int spear_adc_remove(struct platform_device *pdev)
clk_disable_unprepare(info->clk);
clk_put(info->clk);
iounmap(info->adc_base_spear6xx);
- iio_device_free(iodev);
return 0;
}
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 506b5a7d96c..1e1356825d6 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -551,31 +551,6 @@ static IIO_DEVICE_ATTR(enable_smbus_timeout, S_IRUGO | S_IWUSR,
adt7316_store_enable_smbus_timeout,
0);
-
-static ssize_t adt7316_store_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *dev_info = dev_to_iio_dev(dev);
- struct adt7316_chip_info *chip = iio_priv(dev_info);
- u8 config2;
- int ret;
-
- config2 = chip->config2 | ADT7316_RESET;
-
- ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG2, config2);
- if (ret)
- return -EIO;
-
- return len;
-}
-
-static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL,
- adt7316_store_reset,
- 0);
-
static ssize_t adt7316_show_powerdown(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -1675,7 +1650,6 @@ static IIO_DEVICE_ATTR(bus_type, S_IRUGO, adt7316_show_bus_type, NULL, 0);
static struct attribute *adt7316_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
&iio_dev_attr_mode.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_enabled.dev_attr.attr,
&iio_dev_attr_ad_channel.dev_attr.attr,
&iio_dev_attr_all_ad_channels.dev_attr.attr,
@@ -1719,7 +1693,6 @@ static struct attribute *adt7516_attributes[] = {
&iio_dev_attr_all_modes.dev_attr.attr,
&iio_dev_attr_mode.dev_attr.attr,
&iio_dev_attr_select_ex_temp.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_enabled.dev_attr.attr,
&iio_dev_attr_ad_channel.dev_attr.attr,
&iio_dev_attr_all_ad_channels.dev_attr.attr,
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index 687dd2c9143..f4a0341cc70 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -558,11 +558,9 @@ static int ad7150_probe(struct i2c_client *client,
struct ad7150_chip_info *chip;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
mutex_init(&chip->state_lock);
/* this is only used for device removal purposes */
@@ -581,7 +579,7 @@ static int ad7150_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
if (client->irq) {
- ret = request_threaded_irq(client->irq,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
NULL,
&ad7150_event_handler,
IRQF_TRIGGER_RISING |
@@ -590,11 +588,11 @@ static int ad7150_probe(struct i2c_client *client,
"ad7150_irq1",
indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
}
if (client->dev.platform_data) {
- ret = request_threaded_irq(*(unsigned int *)
+ ret = devm_request_threaded_irq(&client->dev, *(unsigned int *)
client->dev.platform_data,
NULL,
&ad7150_event_handler,
@@ -604,28 +602,17 @@ static int ad7150_probe(struct i2c_client *client,
"ad7150_irq2",
indio_dev);
if (ret)
- goto error_free_irq;
+ return ret;
}
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_irq2;
+ return ret;
dev_info(&client->dev, "%s capacitive sensor registered,irq: %d\n",
id->name, client->irq);
return 0;
-error_free_irq2:
- if (client->dev.platform_data)
- free_irq(*(unsigned int *)client->dev.platform_data,
- indio_dev);
-error_free_irq:
- if (client->irq)
- free_irq(client->irq, indio_dev);
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7150_remove(struct i2c_client *client)
@@ -633,13 +620,6 @@ static int ad7150_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- if (client->irq)
- free_irq(client->irq, indio_dev);
-
- if (client->dev.platform_data)
- free_irq(*(unsigned int *)client->dev.platform_data, indio_dev);
-
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 1d7c5283a85..f2c309d1eb5 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -481,11 +481,9 @@ static int ad7152_probe(struct i2c_client *client,
struct ad7152_chip_info *chip;
struct iio_dev *indio_dev;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -506,16 +504,11 @@ static int ad7152_probe(struct i2c_client *client,
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
dev_err(&client->dev, "%s capacitive sensor registered\n", id->name);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7152_remove(struct i2c_client *client)
@@ -523,7 +516,6 @@ static int ad7152_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 94f9ca726d1..75a533bce02 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -699,11 +699,9 @@ static int ad7746_probe(struct i2c_client *client,
int ret = 0;
unsigned char regval = 0;
- indio_dev = iio_device_alloc(sizeof(*chip));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
+ if (!indio_dev)
+ return -ENOMEM;
chip = iio_priv(indio_dev);
/* this is only used for device removal purposes */
i2c_set_clientdata(client, indio_dev);
@@ -748,20 +746,15 @@ static int ad7746_probe(struct i2c_client *client,
ret = i2c_smbus_write_byte_data(chip->client,
AD7746_REG_EXC_SETUP, regval);
if (ret < 0)
- goto error_free_dev;
+ return ret;
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
dev_info(&client->dev, "%s capacitive sensor registered\n", id->name);
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
static int ad7746_remove(struct i2c_client *client)
@@ -769,7 +762,6 @@ static int ad7746_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index b4333715536..88b199bb292 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -10,16 +10,4 @@ config ADIS16060
Say yes here to build support for Analog Devices adis16060 wide bandwidth
yaw rate gyroscope with SPI.
-config ADIS16260
- tristate "Analog Devices ADIS16260 Digital Gyroscope Sensor SPI driver"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say yes here to build support for Analog Devices ADIS16260 ADIS16265
- ADIS16250 ADIS16255 and ADIS16251 programmable digital gyroscope sensors.
-
- This driver can also be built as a module. If so, the module
- will be called adis16260.
-
endmenu
diff --git a/drivers/staging/iio/gyro/Makefile b/drivers/staging/iio/gyro/Makefile
index 975f95b141d..cf22d6d55e2 100644
--- a/drivers/staging/iio/gyro/Makefile
+++ b/drivers/staging/iio/gyro/Makefile
@@ -4,6 +4,3 @@
adis16060-y := adis16060_core.o
obj-$(CONFIG_ADIS16060) += adis16060.o
-
-adis16260-y := adis16260_core.o
-obj-$(CONFIG_ADIS16260) += adis16260.o
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index c67d3a832ae..6d3d771154f 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -151,11 +151,9 @@ static int adis16060_r_probe(struct spi_device *spi)
struct iio_dev *indio_dev;
/* setup the industrialio driver allocated elements */
- indio_dev = iio_device_alloc(sizeof(*st));
- if (indio_dev == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
+ indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
+ if (!indio_dev)
+ return -ENOMEM;
/* this is only used for removal purposes */
spi_set_drvdata(spi, indio_dev);
st = iio_priv(indio_dev);
@@ -171,23 +169,16 @@ static int adis16060_r_probe(struct spi_device *spi)
ret = iio_device_register(indio_dev);
if (ret)
- goto error_free_dev;
+ return ret;
adis16060_iio_dev = indio_dev;
return 0;
-
-error_free_dev:
- iio_device_free(indio_dev);
-error_ret:
- return ret;
}
/* fixme, confirm ordering in this function */
static int adis16060_r_remove(struct spi_device *spi)
{
iio_device_unregister(spi_get_drvdata(spi));
- iio_device_free(spi_get_drvdata(spi));
-
return 0;
}
diff --git a/drivers/staging/iio/gyro/adis16260.h b/drivers/staging/iio/gyro/adis16260.h
deleted file mode 100644
index df3c0b7e954..00000000000
--- a/drivers/staging/iio/gyro/adis16260.h
+++ /dev/null
@@ -1,98 +0,0 @@
-#ifndef SPI_ADIS16260_H_
-#define SPI_ADIS16260_H_
-
-#include "adis16260_platform_data.h"
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16260_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16260_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16260_SUPPLY_OUT 0x02 /* Power supply measurement */
-#define ADIS16260_GYRO_OUT 0x04 /* X-axis gyroscope output */
-#define ADIS16260_AUX_ADC 0x0A /* analog input channel measurement */
-#define ADIS16260_TEMP_OUT 0x0C /* internal temperature measurement */
-#define ADIS16260_ANGL_OUT 0x0E /* angle displacement */
-#define ADIS16260_GYRO_OFF 0x14 /* Calibration, offset/bias adjustment */
-#define ADIS16260_GYRO_SCALE 0x16 /* Calibration, scale adjustment */
-#define ADIS16260_ALM_MAG1 0x20 /* Alarm 1 magnitude/polarity setting */
-#define ADIS16260_ALM_MAG2 0x22 /* Alarm 2 magnitude/polarity setting */
-#define ADIS16260_ALM_SMPL1 0x24 /* Alarm 1 dynamic rate of change setting */
-#define ADIS16260_ALM_SMPL2 0x26 /* Alarm 2 dynamic rate of change setting */
-#define ADIS16260_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16260_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16260_GPIO_CTRL 0x32 /* Control, digital I/O line */
-#define ADIS16260_MSC_CTRL 0x34 /* Control, data ready, self-test settings */
-#define ADIS16260_SMPL_PRD 0x36 /* Control, internal sample rate */
-#define ADIS16260_SENS_AVG 0x38 /* Control, dynamic range, filtering */
-#define ADIS16260_SLP_CNT 0x3A /* Control, sleep mode initiation */
-#define ADIS16260_DIAG_STAT 0x3C /* Diagnostic, error flags */
-#define ADIS16260_GLOB_CMD 0x3E /* Control, global commands */
-#define ADIS16260_LOT_ID1 0x52 /* Lot Identification Code 1 */
-#define ADIS16260_LOT_ID2 0x54 /* Lot Identification Code 2 */
-#define ADIS16260_PROD_ID 0x56 /* Product identifier;
- * convert to decimal = 16,265/16,260 */
-#define ADIS16260_SERIAL_NUM 0x58 /* Serial number */
-
-#define ADIS16260_ERROR_ACTIVE (1<<14)
-#define ADIS16260_NEW_DATA (1<<15)
-
-/* MSC_CTRL */
-#define ADIS16260_MSC_CTRL_MEM_TEST (1<<11)
-/* Internal self-test enable */
-#define ADIS16260_MSC_CTRL_INT_SELF_TEST (1<<10)
-#define ADIS16260_MSC_CTRL_NEG_SELF_TEST (1<<9)
-#define ADIS16260_MSC_CTRL_POS_SELF_TEST (1<<8)
-#define ADIS16260_MSC_CTRL_DATA_RDY_EN (1<<2)
-#define ADIS16260_MSC_CTRL_DATA_RDY_POL_HIGH (1<<1)
-#define ADIS16260_MSC_CTRL_DATA_RDY_DIO2 (1<<0)
-
-/* SMPL_PRD */
-/* Time base (tB): 0 = 1.953 ms, 1 = 60.54 ms */
-#define ADIS16260_SMPL_PRD_TIME_BASE (1<<7)
-#define ADIS16260_SMPL_PRD_DIV_MASK 0x7F
-
-/* SLP_CNT */
-#define ADIS16260_SLP_CNT_POWER_OFF 0x80
-
-/* DIAG_STAT */
-#define ADIS16260_DIAG_STAT_ALARM2 (1<<9)
-#define ADIS16260_DIAG_STAT_ALARM1 (1<<8)
-#define ADIS16260_DIAG_STAT_FLASH_CHK_BIT 6
-#define ADIS16260_DIAG_STAT_SELF_TEST_BIT 5
-#define ADIS16260_DIAG_STAT_OVERFLOW_BIT 4
-#define ADIS16260_DIAG_STAT_SPI_FAIL_BIT 3
-#define ADIS16260_DIAG_STAT_FLASH_UPT_BIT 2
-#define ADIS16260_DIAG_STAT_POWER_HIGH_BIT 1
-#define ADIS16260_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16260_GLOB_CMD_SW_RESET (1<<7)
-#define ADIS16260_GLOB_CMD_FLASH_UPD (1<<3)
-#define ADIS16260_GLOB_CMD_DAC_LATCH (1<<2)
-#define ADIS16260_GLOB_CMD_FAC_CALIB (1<<1)
-#define ADIS16260_GLOB_CMD_AUTO_NULL (1<<0)
-
-#define ADIS16260_SPI_SLOW (u32)(300 * 1000)
-#define ADIS16260_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16260_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16260_state - device instance specific data
- * @negate: negate the scale parameter
- **/
-struct adis16260_state {
- unsigned negate:1;
- struct adis adis;
-};
-
-/* At the moment triggers are only used for ring buffer
- * filling. This may change!
- */
-
-#define ADIS16260_SCAN_GYRO 0
-#define ADIS16260_SCAN_SUPPLY 1
-#define ADIS16260_SCAN_AUX_ADC 2
-#define ADIS16260_SCAN_TEMP 3
-#define ADIS16260_SCAN_ANGL 4
-
-#endif /* SPI_ADIS16260_H_ */
diff --git a/drivers/staging/iio/gyro/adis16260_platform_data.h b/drivers/staging/iio/gyro/adis16260_platform_data.h
deleted file mode 100644
index 12802e97be9..00000000000
--- a/drivers/staging/iio/gyro/adis16260_platform_data.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * ADIS16260 Programmable Digital Gyroscope Sensor Driver Platform Data
- *
- * Based on adis16255.h Matthia Brugger <m_brugger&web.de>
- *
- * Copyright (C) 2010 Fraunhofer Institute for Integrated Circuits
- *
- * Licensed under the GPL-2 or later.
- */
-
-/**
- * struct adis16260_platform_data - instance specific data
- * @direction: x y or z
- * @negate: flag to indicate value should be inverted.
- **/
-struct adis16260_platform_data {
- char direction;
- unsigned negate:1;
-};
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 82478a59e42..351936c3efd 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -550,11 +550,10 @@ static int isl29018_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int err;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (indio_dev == NULL) {
dev_err(&client->dev, "iio allocation fails\n");
- err = -ENOMEM;
- goto exit;
+ return -ENOMEM;
}
chip = iio_priv(indio_dev);
@@ -572,12 +571,12 @@ static int isl29018_probe(struct i2c_client *client,
if (IS_ERR(chip->regmap)) {
err = PTR_ERR(chip->regmap);
dev_err(chip->dev, "regmap initialization failed: %d\n", err);
- goto exit;
+ return err;
}
err = isl29018_chip_init(chip);
if (err)
- goto exit_iio_free;
+ return err;
indio_dev->info = &isl29108_info;
indio_dev->channels = isl29018_channels;
@@ -588,14 +587,10 @@ static int isl29018_probe(struct i2c_client *client,
err = iio_device_register(indio_dev);
if (err) {
dev_err(&client->dev, "iio registration fails\n");
- goto exit_iio_free;
+ return err;
}
return 0;
-exit_iio_free:
- iio_device_free(indio_dev);
-exit:
- return err;
}
static int isl29018_remove(struct i2c_client *client)
@@ -604,7 +599,6 @@ static int isl29018_remove(struct i2c_client *client)
dev_dbg(&client->dev, "%s()\n", __func__);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 8bb0d03627f..6014625920b 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -482,7 +482,7 @@ static int isl29028_probe(struct i2c_client *client,
struct iio_dev *indio_dev;
int ret;
- indio_dev = iio_device_alloc(sizeof(*chip));
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
if (!indio_dev) {
dev_err(&client->dev, "iio allocation fails\n");
return -ENOMEM;
@@ -498,13 +498,13 @@ static int isl29028_probe(struct i2c_client *client,
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
- goto exit_iio_free;
+ return ret;
}
ret = isl29028_chip_init(chip);
if (ret < 0) {
dev_err(chip->dev, "chip initialization failed: %d\n", ret);
- goto exit_iio_free;
+ return ret;
}
indio_dev->info = &isl29028_info;
@@ -517,13 +517,9 @@ static int isl29028_probe(struct i2c_client *client,
if (ret < 0) {
dev_err(chip->dev, "iio registration fails with error %d\n",
ret);
- goto exit_iio_free;
+ return ret;
}
return 0;
-
-exit_iio_free:
- iio_device_free(indio_dev);
- return ret;
}
static int isl29028_remove(struct i2c_client *client)
@@ -531,7 +527,6 @@ static int isl29028_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
- iio_device_free(indio_dev);
return 0;
}
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 86c6bf9d5dd..d2748c329ea 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -20,12 +20,10 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/types.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
#define HMC5843_CONFIG_REG_A 0x00
#define HMC5843_CONFIG_REG_B 0x01
@@ -42,9 +40,6 @@
#define HMC5883_DATA_OUT_Y_MSB_REG 0x07
#define HMC5883_DATA_OUT_Y_LSB_REG 0x08
#define HMC5843_STATUS_REG 0x09
-#define HMC5843_ID_REG_A 0x0A
-#define HMC5843_ID_REG_B 0x0B
-#define HMC5843_ID_REG_C 0x0C
enum hmc5843_ids {
HMC5843_ID,
@@ -53,14 +48,6 @@ enum hmc5843_ids {
};
/*
- * Beware: identification of the HMC5883 is still "H43";
- * I2C address is also unchanged
- */
-#define HMC5843_ID_REG_LENGTH 0x03
-#define HMC5843_ID_STRING "H43"
-#define HMC5843_I2C_ADDRESS 0x1E
-
-/*
* Range gain settings in (+-)Ga
* Beware: HMC5843 and HMC5883 have different recommended sensor field
* ranges; default corresponds to +-1.0 Ga and +-1.3 Ga, respectively
@@ -185,14 +172,9 @@ static const char * const hmc5883_regval_to_sample_freq[] = {
"0.75", "1.5", "3", "7.5", "15", "30", "75",
};
-/* Addresses to scan: 0x1E */
-static const unsigned short normal_i2c[] = { HMC5843_I2C_ADDRESS,
- I2C_CLIENT_END };
-
/* Describe chip variants */
struct hmc5843_chip_info {
const struct iio_chan_spec *channels;
- int num_channels;
const char * const *regval_to_sample_freq;
const int *regval_to_input_field_mga;
const int *regval_to_nanoscale;
@@ -225,18 +207,29 @@ static int hmc5843_read_measurement(struct iio_dev *indio_dev,
struct i2c_client *client = to_i2c_client(indio_dev->dev.parent);
struct hmc5843_data *data = iio_priv(indio_dev);
s32 result;
+ int tries = 150;
mutex_lock(&data->lock);
- result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
- while (!(result & HMC5843_DATA_READY))
- result = i2c_smbus_read_byte_data(client, HMC5843_STATUS_REG);
+ while (tries-- > 0) {
+ result = i2c_smbus_read_byte_data(client,
+ HMC5843_STATUS_REG);
+ if (result & HMC5843_DATA_READY)
+ break;
+ msleep(20);
+ }
+
+ if (tries < 0) {
+ dev_err(&client->dev, "data not ready\n");
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
- result = i2c_smbus_read_word_data(client, address);
+ result = i2c_smbus_read_word_swapped(client, address);
mutex_unlock(&data->lock);
if (result < 0)
return -EINVAL;
- *val = (s16)swab16((u16)result);
+ *val = result;
return IIO_VAL_INT;
}
@@ -559,14 +552,14 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
-#define HMC5843_CHANNEL(axis, add) \
+#define HMC5843_CHANNEL(axis, addr) \
{ \
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .address = add \
+ .address = addr \
}
static const struct iio_chan_spec hmc5843_channels[] = {
@@ -597,7 +590,6 @@ static const struct attribute_group hmc5843_group = {
static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
[HMC5843_ID] = {
.channels = hmc5843_channels,
- .num_channels = ARRAY_SIZE(hmc5843_channels),
.regval_to_sample_freq = hmc5843_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5843_regval_to_input_field_mga,
@@ -605,7 +597,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
[HMC5883_ID] = {
.channels = hmc5883_channels,
- .num_channels = ARRAY_SIZE(hmc5883_channels),
.regval_to_sample_freq = hmc5883_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5883_regval_to_input_field_mga,
@@ -613,7 +604,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
[HMC5883L_ID] = {
.channels = hmc5883_channels,
- .num_channels = ARRAY_SIZE(hmc5883_channels),
.regval_to_sample_freq = hmc5883_regval_to_sample_freq,
.regval_to_input_field_mga =
hmc5883l_regval_to_input_field_mga,
@@ -621,25 +611,6 @@ static const struct hmc5843_chip_info hmc5843_chip_info_tbl[] = {
},
};
-static int hmc5843_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- unsigned char id_str[HMC5843_ID_REG_LENGTH];
-
- if (client->addr != HMC5843_I2C_ADDRESS)
- return -ENODEV;
-
- if (i2c_smbus_read_i2c_block_data(client, HMC5843_ID_REG_A,
- HMC5843_ID_REG_LENGTH, id_str)
- != HMC5843_ID_REG_LENGTH)
- return -ENODEV;
-
- if (0 != strncmp(id_str, HMC5843_ID_STRING, HMC5843_ID_REG_LENGTH))
- return -ENODEV;
-
- return 0;
-}
-
/* Called when we have found a new HMC58X3 */
static void hmc5843_init_client(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -649,7 +620,7 @@ static void hmc5843_init_client(struct i2c_client *client,
data->variant = &hmc5843_chip_info_tbl[id->driver_data];
indio_dev->channels = data->variant->channels;
- indio_dev->num_channels = data->variant->num_channels;
+ indio_dev->num_channels = 3;
hmc5843_set_meas_conf(client, data->meas_conf);
hmc5843_set_rate(client, data->rate);
hmc5843_configure(client, data->operating_mode);
@@ -756,8 +727,6 @@ static struct i2c_driver hmc5843_driver = {
.id_table = hmc5843_id,
.probe = hmc5843_probe,
.remove = hmc5843_remove,
- .detect = hmc5843_detect,
- .address_list = normal_i2c,
};
module_i2c_driver(hmc5843_driver);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index e5943e2287c..74025fbae67 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -225,21 +225,6 @@ static int ade7753_reset(struct device *dev)
return ade7753_spi_write_reg_16(dev, ADE7753_MODE, val);
}
-static ssize_t ade7753_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7753_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7753_read_24bit, ADE7753_AENERGY);
static IIO_DEV_ATTR_LAENERGY(ade7753_read_24bit, ADE7753_LAENERGY);
static IIO_DEV_ATTR_VAENERGY(ade7753_read_24bit, ADE7753_VAENERGY);
@@ -458,8 +443,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7753_read_frequency,
ade7753_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7753_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7753_attributes[] = {
@@ -468,7 +451,6 @@ static struct attribute *ade7753_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_phcal.dev_attr.attr,
&iio_dev_attr_cfden.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 7b6503bf9a7..f649ebe55a0 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -224,22 +224,6 @@ static int ade7754_reset(struct device *dev)
return ade7754_spi_write_reg_8(dev, ADE7754_OPMODE, val);
}
-
-static ssize_t ade7754_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7754_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7754_read_24bit, ADE7754_AENERGY);
static IIO_DEV_ATTR_LAENERGY(ade7754_read_24bit, ADE7754_LAENERGY);
static IIO_DEV_ATTR_VAENERGY(ade7754_read_24bit, ADE7754_VAENERGY);
@@ -478,8 +462,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7754_read_frequency,
ade7754_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7754_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26000 13000 65000 33000");
static struct attribute *ade7754_attributes[] = {
@@ -488,7 +470,6 @@ static struct attribute *ade7754_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
&iio_dev_attr_laenergy.dev_attr.attr,
&iio_dev_attr_vaenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 8f5bcfab356..6005d4aab0c 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -313,21 +313,6 @@ static int ade7758_reset(struct device *dev)
return ret;
}
-static ssize_t ade7758_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7758_reset(dev);
- }
- return len;
-}
-
static IIO_DEV_ATTR_VPEAK(S_IWUSR | S_IRUGO,
ade7758_read_8bit,
ade7758_write_8bit,
@@ -591,8 +576,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7758_read_frequency,
ade7758_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7758_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("26040 13020 6510 3255");
static struct attribute *ade7758_attributes[] = {
@@ -601,7 +584,6 @@ static struct attribute *ade7758_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_awatthr.dev_attr.attr,
&iio_dev_attr_bwatthr.dev_attr.attr,
&iio_dev_attr_cwatthr.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index b29e2d5d993..7d5db717557 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -54,7 +54,7 @@ out:
return ret;
}
-/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
+/* Whilst this makes a lot of calls to iio_sw_ring functions - it is too device
* specific to be rolled into the core.
*/
static irqreturn_t ade7758_trigger_handler(int irq, void *p)
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 17dc373e108..d214ac4932c 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -229,21 +229,6 @@ static int ade7759_reset(struct device *dev)
return ret;
}
-static ssize_t ade7759_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7759_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AENERGY(ade7759_read_40bit, ADE7759_AENERGY);
static IIO_DEV_ATTR_CFDEN(S_IWUSR | S_IRUGO,
ade7759_read_16bit,
@@ -418,8 +403,6 @@ static IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO,
ade7759_read_frequency,
ade7759_write_frequency);
-static IIO_DEV_ATTR_RESET(ade7759_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("27900 14000 7000 3500");
static struct attribute *ade7759_attributes[] = {
@@ -428,7 +411,6 @@ static struct attribute *ade7759_attributes[] = {
&iio_const_attr_in_temp_scale.dev_attr.attr,
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_phcal.dev_attr.attr,
&iio_dev_attr_cfden.dev_attr.attr,
&iio_dev_attr_aenergy.dev_attr.attr,
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index c642da84842..e8379c0f117 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -186,22 +186,6 @@ static int ade7854_reset(struct device *dev)
return st->write_reg_16(dev, ADE7854_CONFIG, val);
}
-
-static ssize_t ade7854_write_reset(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- if (len < 1)
- return -1;
- switch (buf[0]) {
- case '1':
- case 'y':
- case 'Y':
- return ade7854_reset(dev);
- }
- return -1;
-}
-
static IIO_DEV_ATTR_AIGAIN(S_IWUSR | S_IRUGO,
ade7854_read_24bit,
ade7854_write_24bit,
@@ -468,8 +452,6 @@ err_ret:
return ret;
}
-static IIO_DEV_ATTR_RESET(ade7854_write_reset);
-
static IIO_CONST_ATTR_SAMP_FREQ_AVAIL("8000");
static IIO_CONST_ATTR(name, "ade7854");
@@ -515,7 +497,6 @@ static struct attribute *ade7854_attributes[] = {
&iio_dev_attr_bvahr.dev_attr.attr,
&iio_dev_attr_cvahr.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
- &iio_dev_attr_reset.dev_attr.attr,
&iio_const_attr_name.dev_attr.attr,
&iio_dev_attr_vpeak.dev_attr.attr,
&iio_dev_attr_ipeak.dev_attr.attr,
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 0d3356d4b7d..dcdadbbcf7e 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -192,21 +192,6 @@ static inline int ad2s1210_soft_reset(struct ad2s1210_state *st)
return ad2s1210_config_write(st, 0x0);
}
-static ssize_t ad2s1210_store_softreset(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct ad2s1210_state *st = iio_priv(dev_to_iio_dev(dev));
- int ret;
-
- mutex_lock(&st->lock);
- ret = ad2s1210_soft_reset(st);
- mutex_unlock(&st->lock);
-
- return ret < 0 ? ret : len;
-}
-
static ssize_t ad2s1210_show_fclkin(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -536,8 +521,6 @@ error_ret:
return ret;
}
-static IIO_DEVICE_ATTR(reset, S_IWUSR,
- NULL, ad2s1210_store_softreset, 0);
static IIO_DEVICE_ATTR(fclkin, S_IRUGO | S_IWUSR,
ad2s1210_show_fclkin, ad2s1210_store_fclkin, 0);
static IIO_DEVICE_ATTR(fexcit, S_IRUGO | S_IWUSR,
@@ -587,7 +570,6 @@ static const struct iio_chan_spec ad2s1210_channels[] = {
};
static struct attribute *ad2s1210_attributes[] = {
- &iio_dev_attr_reset.dev_attr.attr,
&iio_dev_attr_fclkin.dev_attr.attr,
&iio_dev_attr_fexcit.dev_attr.attr,
&iio_dev_attr_control.dev_attr.attr,
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 22339059837..394254f7d6b 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -15,7 +15,7 @@ config DRM_IMX_FB_HELPER
help
The DRM framework can provide a legacy /dev/fb0 framebuffer
for your device. This is necessary to get a framebuffer console
- and also for appplications using the legacy framebuffer API
+ and also for applications using the legacy framebuffer API
config DRM_IMX_PARALLEL_DISPLAY
tristate "Support for parallel displays"
@@ -32,8 +32,7 @@ config DRM_IMX_TVE
config DRM_IMX_LDB
tristate "Support for LVDS displays"
- depends on DRM_IMX
- select OF_VIDEOMODE
+ depends on DRM_IMX && MFD_SYSCON
help
Choose this to enable the internal LVDS Display Bridge (LDB)
found on i.MX53 and i.MX6 processors.
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
index f80641528f7..9cfa2a7efdc 100644
--- a/drivers/staging/imx-drm/TODO
+++ b/drivers/staging/imx-drm/TODO
@@ -10,7 +10,6 @@ TODO:
Missing features (not necessarily for moving out of staging):
- Add KMS plane support for CRTC driver
-- Add LDB (LVDS Display Bridge) support
- Add i.MX6 HDMI support
- Add support for IC (Image converter)
- Add support for CSI (CMOS Sensor interface)
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 9854a1daf60..47c5888461f 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -69,28 +69,20 @@ struct imx_drm_connector {
struct module *owner;
};
-static int imx_drm_driver_firstopen(struct drm_device *drm)
-{
- if (!imx_drm_device_get())
- return -EINVAL;
-
- return 0;
-}
-
static void imx_drm_driver_lastclose(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
if (imxdrm->fbhelper)
drm_fbdev_cma_restore_mode(imxdrm->fbhelper);
-
- imx_drm_device_put();
}
static int imx_drm_driver_unload(struct drm_device *drm)
{
struct imx_drm_device *imxdrm = drm->dev_private;
+ imx_drm_device_put();
+
drm_mode_config_cleanup(imxdrm->drm);
drm_kms_helper_poll_fini(imxdrm->drm);
@@ -207,7 +199,6 @@ static const struct file_operations imx_drm_driver_fops = {
.unlocked_ioctl = drm_ioctl,
.mmap = drm_gem_cma_mmap,
.poll = drm_poll,
- .fasync = drm_fasync,
.read = drm_read,
.llseek = noop_llseek,
};
@@ -226,8 +217,6 @@ struct drm_device *imx_drm_device_get(void)
struct imx_drm_connector *con;
struct imx_drm_crtc *crtc;
- mutex_lock(&imxdrm->mutex);
-
list_for_each_entry(enc, &imxdrm->encoder_list, list) {
if (!try_module_get(enc->owner)) {
dev_err(imxdrm->dev, "could not get module %s\n",
@@ -254,8 +243,6 @@ struct drm_device *imx_drm_device_get(void)
imxdrm->references++;
- mutex_unlock(&imxdrm->mutex);
-
return imxdrm->drm;
unwind_crtc:
@@ -447,6 +434,9 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
imxdrm->drm->vblank_disable_allowed = 1;
+ if (!imx_drm_device_get())
+ ret = -EINVAL;
+
ret = 0;
err_init:
@@ -678,6 +668,7 @@ found:
return i;
}
+EXPORT_SYMBOL_GPL(imx_drm_encoder_get_mux_id);
/*
* imx_drm_remove_encoder - remove an encoder
@@ -783,7 +774,7 @@ int imx_drm_remove_connector(struct imx_drm_connector *imx_drm_connector)
}
EXPORT_SYMBOL_GPL(imx_drm_remove_connector);
-static struct drm_ioctl_desc imx_drm_ioctls[] = {
+static const struct drm_ioctl_desc imx_drm_ioctls[] = {
/* none so far */
};
@@ -791,13 +782,12 @@ static struct drm_driver imx_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM,
.load = imx_drm_driver_load,
.unload = imx_drm_driver_unload,
- .firstopen = imx_drm_driver_firstopen,
.lastclose = imx_drm_driver_lastclose,
.gem_free_object = drm_gem_cma_free_object,
.gem_vm_ops = &drm_gem_cma_vm_ops,
.dumb_create = drm_gem_cma_dumb_create,
.dumb_map_offset = drm_gem_cma_dumb_map_offset,
- .dumb_destroy = drm_gem_cma_dumb_destroy,
+ .dumb_destroy = drm_gem_dumb_destroy,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = imx_drm_enable_vblank,
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c
index 8af7f3b40ba..af733ea4856 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/staging/imx-drm/imx-ldb.c
@@ -497,7 +497,7 @@ static int imx_ldb_probe(struct platform_device *pdev)
imx_ldb->ldb_ctrl |= LDB_SPLIT_MODE_EN;
/*
- * There are three diferent possible clock mux configurations:
+ * There are three different possible clock mux configurations:
* i.MX53: ipu1_di0_sel, ipu1_di1_sel
* i.MX6q: ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel, ipu2_di1_sel
* i.MX6dl: ipu1_di0_sel, ipu1_di1_sel, lcdif_sel
@@ -623,3 +623,4 @@ module_platform_driver(imx_ldb_driver);
MODULE_DESCRIPTION("i.MX LVDS driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index a56797d88ed..33d6525cf99 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -21,7 +21,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/module.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
@@ -131,12 +131,14 @@ struct imx_tve {
};
static void tve_lock(void *__tve)
+__acquires(&tve->lock)
{
struct imx_tve *tve = __tve;
spin_lock(&tve->lock);
}
static void tve_unlock(void *__tve)
+__releases(&tve->lock)
{
struct imx_tve *tve = __tve;
spin_unlock(&tve->lock);
@@ -164,7 +166,10 @@ static void tve_enable(struct imx_tve *tve)
regmap_write(tve->regmap, TVE_INT_CONT_REG, 0);
else
regmap_write(tve->regmap, TVE_INT_CONT_REG,
- TVE_CD_SM_IEN | TVE_CD_LM_IEN | TVE_CD_MON_END_IEN);
+ TVE_CD_SM_IEN |
+ TVE_CD_LM_IEN |
+ TVE_CD_MON_END_IEN);
+
spin_unlock_irqrestore(&tve->enable_lock, flags);
}
@@ -465,7 +470,9 @@ static int clk_tve_di_set_rate(struct clk_hw *hw, unsigned long rate,
else
val = TVE_DAC_FULL_RATE;
- ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, TVE_DAC_SAMP_RATE_MASK, val);
+ ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
+ TVE_DAC_SAMP_RATE_MASK, val);
+
if (ret < 0) {
dev_err(tve->dev, "failed to set divider: %d\n", ret);
return ret;
@@ -609,13 +616,17 @@ static int imx_tve_probe(struct platform_device *pdev)
}
if (tve->mode == TVE_MODE_VGA) {
- ret = of_property_read_u32(np, "fsl,hsync-pin", &tve->hsync_pin);
+ ret = of_property_read_u32(np, "fsl,hsync-pin",
+ &tve->hsync_pin);
+
if (ret < 0) {
dev_err(&pdev->dev, "failed to get vsync pin\n");
return ret;
}
- ret |= of_property_read_u32(np, "fsl,vsync-pin", &tve->vsync_pin);
+ ret |= of_property_read_u32(np, "fsl,vsync-pin",
+ &tve->vsync_pin);
+
if (ret < 0) {
dev_err(&pdev->dev, "failed to get vsync pin\n");
return ret;
@@ -623,11 +634,6 @@ static int imx_tve_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "failed to get memory region\n");
- return -ENOENT;
- }
-
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -743,3 +749,4 @@ module_platform_driver(imx_tve_driver);
MODULE_DESCRIPTION("i.MX Television Encoder driver");
MODULE_AUTHOR("Philipp Zabel, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-tve");
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index e35d0bf03c7..ba464e5d9f1 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -1075,21 +1075,23 @@ static int ipu_probe(struct platform_device *pdev)
ipu->cpmem_base = devm_ioremap(&pdev->dev,
ipu_base + devtype->cpmem_ofs, PAGE_SIZE);
- if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base) {
- ret = -ENOMEM;
- goto failed_ioremap;
- }
+ if (!ipu->cm_reg || !ipu->idmac_reg || !ipu->cpmem_base)
+ return -ENOMEM;
ipu->clk = devm_clk_get(&pdev->dev, "bus");
if (IS_ERR(ipu->clk)) {
ret = PTR_ERR(ipu->clk);
dev_err(&pdev->dev, "clk_get failed with %d", ret);
- goto failed_clk_get;
+ return ret;
}
platform_set_drvdata(pdev, ipu);
- clk_prepare_enable(ipu->clk);
+ ret = clk_prepare_enable(ipu->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret);
+ return ret;
+ }
ipu->dev = &pdev->dev;
ipu->irq_sync = irq_sync;
@@ -1134,8 +1136,6 @@ out_failed_reset:
ipu_irq_exit(ipu);
out_failed_irq:
clk_disable_unprepare(ipu->clk);
-failed_clk_get:
-failed_ioremap:
return ret;
}
@@ -1163,6 +1163,7 @@ static struct platform_driver imx_ipu_driver = {
module_platform_driver(imx_ipu_driver);
+MODULE_ALIAS("platform:imx-ipuv3");
MODULE_DESCRIPTION("i.MX IPU v3 driver");
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
index 59f03f9aefc..21bf1c80652 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dc.c
@@ -161,14 +161,15 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
u32 pixel_fmt, u32 width)
{
struct ipu_dc_priv *priv = dc->priv;
- u32 reg = 0, map;
+ u32 reg = 0;
+ int map;
dc->di = ipu_di_get_num(di);
map = ipu_pixfmt_to_map(pixel_fmt);
if (map < 0) {
dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
- return -EINVAL;
+ return map;
}
if (interlaced) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-di.c b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
index 0b6806e2069..948a49b289e 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-di.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-di.c
@@ -654,7 +654,9 @@ EXPORT_SYMBOL_GPL(ipu_di_init_sync_panel);
int ipu_di_enable(struct ipu_di *di)
{
- clk_prepare_enable(di->clk_di_pixel);
+ int ret = clk_prepare_enable(di->clk_di_pixel);
+ if (ret)
+ return ret;
ipu_module_enable(di->ipu, di->module);
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
index 113b046c022..231afd6c60f 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-dp.c
@@ -46,6 +46,8 @@
#define DP_COM_CONF_CSC_DEF_BG (2 << 8)
#define DP_COM_CONF_CSC_DEF_BOTH (1 << 8)
+#define IPUV3_NUM_FLOWS 3
+
struct ipu_dp_priv;
struct ipu_dp {
@@ -67,7 +69,7 @@ struct ipu_dp_priv {
struct ipu_soc *ipu;
struct device *dev;
void __iomem *base;
- struct ipu_flow flow[3];
+ struct ipu_flow flow[IPUV3_NUM_FLOWS];
struct mutex mutex;
int use_count;
};
@@ -280,7 +282,7 @@ struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow)
struct ipu_dp_priv *priv = ipu->dp_priv;
struct ipu_dp *dp;
- if (flow > 5)
+ if ((flow >> 1) >= IPUV3_NUM_FLOWS)
return ERR_PTR(-EINVAL);
if (flow & 1)
@@ -309,19 +311,20 @@ int ipu_dp_init(struct ipu_soc *ipu, struct device *dev, unsigned long base)
int i;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
priv->dev = dev;
priv->ipu = ipu;
ipu->dp_priv = priv;
priv->base = devm_ioremap(dev, base, PAGE_SIZE);
- if (!priv->base) {
+ if (!priv->base)
return -ENOMEM;
- }
mutex_init(&priv->mutex);
- for (i = 0; i < 3; i++) {
+ for (i = 0; i < IPUV3_NUM_FLOWS; i++) {
priv->flow[i].foreground.foreground = 1;
priv->flow[i].base = priv->base + ipu_dp_flow_base[i];
priv->flow[i].priv = priv;
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/staging/imx-drm/ipuv3-crtc.c
index 9176a8171e6..6fd37a7453e 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/staging/imx-drm/ipuv3-crtc.c
@@ -129,7 +129,8 @@ static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode)
static int ipu_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event,
+ uint32_t page_flip_flags)
{
struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
int ret;
@@ -562,3 +563,4 @@ module_platform_driver(ipu_drm_driver);
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-ipuv3-crtc");
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/staging/imx-drm/parallel-display.c
index cea9f14fff4..24aa9beedcf 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/staging/imx-drm/parallel-display.c
@@ -272,3 +272,4 @@ module_platform_driver(imx_pd_driver);
MODULE_DESCRIPTION("i.MX parallel display driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx-parallel-display");
diff --git a/drivers/staging/keucr/scsiglue.c b/drivers/staging/keucr/scsiglue.c
index afb00d84679..ac3d34dcc43 100644
--- a/drivers/staging/keucr/scsiglue.c
+++ b/drivers/staging/keucr/scsiglue.c
@@ -289,10 +289,7 @@ US_DO_ALL_FLAGS
***********************************************************************/
/* Output routine for the sysfs max_sectors file */
-/*
- * show_max_sectors()
- */
-static ssize_t show_max_sectors(struct device *dev,
+static ssize_t max_sectors_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -302,10 +299,7 @@ static ssize_t show_max_sectors(struct device *dev,
}
/* Input routine for the sysfs max_sectors file */
-/*
- * store_max_sectors()
- */
-static ssize_t store_max_sectors(struct device *dev,
+static ssize_t max_sectors_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -319,9 +313,8 @@ static ssize_t store_max_sectors(struct device *dev,
}
return -EINVAL;
}
+static DEVICE_ATTR_RW(max_sectors);
-static DEVICE_ATTR(max_sectors, S_IRUGO | S_IWUSR, show_max_sectors,
- store_max_sectors);
static struct device_attribute *sysfs_device_attr_list[] = {
&dev_attr_max_sectors, NULL,
};
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 6252aca8286..471c10c116e 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -568,15 +568,6 @@ ssize_t line6_nop_read(struct device *dev, struct device_attribute *attr,
}
/*
- No operation (i.e., unsupported).
-*/
-ssize_t line6_nop_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return count;
-}
-
-/*
Generic destructor.
*/
static void line6_destruct(struct usb_interface *interface)
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index a8341f9fdb9..34ae95e7e51 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -190,9 +190,6 @@ extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
int code2, int size);
extern ssize_t line6_nop_read(struct device *dev,
struct device_attribute *attr, char *buf);
-extern ssize_t line6_nop_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
size_t datalen);
extern int line6_read_serial_number(struct usb_line6 *line6,
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 0dd08ef5139..6a0648cd03a 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -34,8 +34,8 @@ static struct snd_line6_pcm *dev2pcm(struct device *dev)
/*
"read" request on "impulse_volume" special file.
*/
-static ssize_t pcm_get_impulse_volume(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t impulse_volume_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_volume);
}
@@ -43,9 +43,9 @@ static ssize_t pcm_get_impulse_volume(struct device *dev,
/*
"write" request on "impulse_volume" special file.
*/
-static ssize_t pcm_set_impulse_volume(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t impulse_volume_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct snd_line6_pcm *line6pcm = dev2pcm(dev);
int value;
@@ -64,12 +64,13 @@ static ssize_t pcm_set_impulse_volume(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(impulse_volume);
/*
"read" request on "impulse_period" special file.
*/
-static ssize_t pcm_get_impulse_period(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t impulse_period_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev2pcm(dev)->impulse_period);
}
@@ -77,9 +78,9 @@ static ssize_t pcm_get_impulse_period(struct device *dev,
/*
"write" request on "impulse_period" special file.
*/
-static ssize_t pcm_set_impulse_period(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t impulse_period_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
int value;
int ret;
@@ -91,11 +92,7 @@ static ssize_t pcm_set_impulse_period(struct device *dev,
dev2pcm(dev)->impulse_period = value;
return count;
}
-
-static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume,
- pcm_set_impulse_volume);
-static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
- pcm_set_impulse_period);
+static DEVICE_ATTR_RW(impulse_period);
#endif
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index 699b2172506..f4e95a614e3 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -192,8 +192,8 @@ static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
/*
"read" request on "serial_number" special file.
*/
-static ssize_t pod_get_serial_number(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t serial_number_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -203,9 +203,8 @@ static ssize_t pod_get_serial_number(struct device *dev,
/*
"read" request on "firmware_version" special file.
*/
-static ssize_t pod_get_firmware_version(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -216,8 +215,8 @@ static ssize_t pod_get_firmware_version(struct device *dev,
/*
"read" request on "device_id" special file.
*/
-static ssize_t pod_get_device_id(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t device_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
@@ -274,11 +273,9 @@ static void pod_startup4(struct work_struct *work)
}
/* POD special files: */
-static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
-static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version,
- line6_nop_write);
-static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number,
- line6_nop_write);
+static DEVICE_ATTR_RO(device_id);
+static DEVICE_ATTR_RO(firmware_version);
+static DEVICE_ATTR_RO(serial_number);
/* control info callback */
static int snd_pod_control_monitor_info(struct snd_kcontrol *kcontrol,
diff --git a/drivers/staging/lustre/Makefile b/drivers/staging/lustre/Makefile
index 26162893fd2..fb0e0faf076 100644
--- a/drivers/staging/lustre/Makefile
+++ b/drivers/staging/lustre/Makefile
@@ -1,4 +1,4 @@
subdir-ccflags-y := -I$(src)/include/
-obj-$(CONFIG_LUSTRE_FS) += lustre/
obj-$(CONFIG_LNET) += lnet/
+obj-$(CONFIG_LUSTRE_FS) += lustre/
diff --git a/drivers/staging/lustre/include/linux/libcfs/bitmap.h b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
index 3f1c37b4bb7..f3d4a896a75 100644
--- a/drivers/staging/lustre/include/linux/libcfs/bitmap.h
+++ b/drivers/staging/lustre/include/linux/libcfs/bitmap.h
@@ -52,11 +52,11 @@ cfs_bitmap_t *CFS_ALLOCATE_BITMAP(int size)
OBD_ALLOC(ptr, CFS_BITMAP_SIZE(size));
if (ptr == NULL)
- RETURN(ptr);
+ return ptr;
ptr->size = size;
- RETURN (ptr);
+ return ptr;
}
#define CFS_FREE_BITMAP(ptr) OBD_FREE(ptr, CFS_BITMAP_SIZE(ptr->size))
diff --git a/drivers/staging/lustre/include/linux/libcfs/curproc.h b/drivers/staging/lustre/include/linux/libcfs/curproc.h
index 90d7ce630e9..de8e35b796a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/curproc.h
+++ b/drivers/staging/lustre/include/linux/libcfs/curproc.h
@@ -49,8 +49,6 @@
* Implemented in portals/include/libcfs/<os>/
*/
int cfs_curproc_groups_nr(void);
-int current_is_in_group(gid_t group);
-void cfs_curproc_groups_dump(gid_t *array, int size);
/*
* Plus, platform-specific constant
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 1ab1f2be9aa..687dbab2c4e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -80,26 +80,10 @@ static inline int __is_po2(unsigned long long val)
#define LERRCHKSUM(hexnum) (((hexnum) & 0xf) ^ ((hexnum) >> 4 & 0xf) ^ \
((hexnum) >> 8 & 0xf))
-
-/*
- * Some (nomina odiosa sunt) platforms define NULL as naked 0. This confuses
- * Lustre RETURN(NULL) macro.
- */
-#if defined(NULL)
-#undef NULL
-#endif
-
-#define NULL ((void *)0)
-
#define LUSTRE_SRV_LNET_PID LUSTRE_LNET_PID
-
#include <linux/list.h>
-#ifndef cfs_for_each_possible_cpu
-# error cfs_for_each_possible_cpu is not supported by kernel!
-#endif
-
/* libcfs tcpip */
int libcfs_ipif_query(char *name, int *up, __u32 *ip, __u32 *mask);
int libcfs_ipif_enumerate(char ***names);
@@ -117,31 +101,6 @@ int libcfs_sock_write(socket_t *sock, void *buffer, int nob, int timeout);
int libcfs_sock_read(socket_t *sock, void *buffer, int nob, int timeout);
void libcfs_sock_release(socket_t *sock);
-/* libcfs watchdogs */
-struct lc_watchdog;
-
-/* Add a watchdog which fires after "time" milliseconds of delay. You have to
- * touch it once to enable it. */
-struct lc_watchdog *lc_watchdog_add(int time,
- void (*cb)(pid_t pid, void *),
- void *data);
-
-/* Enables a watchdog and resets its timer. */
-void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout);
-#define CFS_GET_TIMEOUT(svc) (max_t(int, obd_timeout, \
- AT_OFF ? 0 : at_get(&svc->srv_at_estimate)) * \
- svc->srv_watchdog_factor)
-
-/* Disable a watchdog; touch it to restart it. */
-void lc_watchdog_disable(struct lc_watchdog *lcw);
-
-/* Clean up the watchdog */
-void lc_watchdog_delete(struct lc_watchdog *lcw);
-
-/* Dump a debug log */
-void lc_watchdog_dumplog(pid_t pid, void *data);
-
-
/* need both kernel and user-land acceptor */
#define LNET_ACCEPTOR_MIN_RESERVED_PORT 512
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
@@ -149,11 +108,6 @@ void lc_watchdog_dumplog(pid_t pid, void *data);
/*
* libcfs pseudo device operations
*
- * struct psdev_t and
- * misc_register() and
- * misc_deregister() are declared in
- * libcfs/<os>/<os>-prim.h
- *
* It's just draft now.
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 6ae7415a3b9..c87efb49ebc 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -75,11 +75,19 @@
#ifndef __LIBCFS_CPU_H__
#define __LIBCFS_CPU_H__
-#ifndef HAVE_LIBCFS_CPT
-
-typedef unsigned long cpumask_t;
-typedef unsigned long nodemask_t;
+/* any CPU partition */
+#define CFS_CPT_ANY (-1)
+#ifdef CONFIG_SMP
+/**
+ * return cpumask of CPU partition \a cpt
+ */
+cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
+/**
+ * print string information of cpt-table
+ */
+int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
+#else /* !CONFIG_SMP */
struct cfs_cpt_table {
/* # of CPU partitions */
int ctb_nparts;
@@ -91,10 +99,18 @@ struct cfs_cpt_table {
__u64 ctb_version;
};
-#endif /* !HAVE_LIBCFS_CPT */
+static inline cpumask_t *
+cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt)
+{
+ return NULL;
+}
-/* any CPU partition */
-#define CFS_CPT_ANY (-1)
+static inline int
+cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len)
+{
+ return 0;
+}
+#endif /* CONFIG_SMP */
extern struct cfs_cpt_table *cfs_cpt_table;
@@ -107,10 +123,6 @@ void cfs_cpt_table_free(struct cfs_cpt_table *cptab);
*/
struct cfs_cpt_table *cfs_cpt_table_alloc(unsigned int ncpt);
/**
- * print string information of cpt-table
- */
-int cfs_cpt_table_print(struct cfs_cpt_table *cptab, char *buf, int len);
-/**
* return total number of CPU partitions in \a cptab
*/
int
@@ -124,10 +136,6 @@ int cfs_cpt_weight(struct cfs_cpt_table *cptab, int cpt);
*/
int cfs_cpt_online(struct cfs_cpt_table *cptab, int cpt);
/**
- * return cpumask of CPU partition \a cpt
- */
-cpumask_t *cfs_cpt_cpumask(struct cfs_cpt_table *cptab, int cpt);
-/**
* return nodemask of CPU partition \a cpt
*/
nodemask_t *cfs_cpt_nodemask(struct cfs_cpt_table *cptab, int cpt);
@@ -200,14 +208,6 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
#define cfs_cpt_for_each(i, cptab) \
for (i = 0; i < cfs_cpt_number(cptab); i++)
-#ifndef __read_mostly
-# define __read_mostly
-#endif
-
-#ifndef ____cacheline_aligned
-#define ____cacheline_aligned
-#endif
-
int cfs_cpu_init(void);
void cfs_cpu_fini(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index 64ca62f0cc9..776e9c0e48c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -136,13 +136,13 @@ int cfs_crypto_hash_digest(unsigned char alg,
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
-/** Allocate and initialize desriptor for hash algorithm.
+/** Allocate and initialize descriptor for hash algorithm.
* @param alg algorithm id
* @param key initial value for algorithm, if it is NULL,
* default initial value should be used.
* @param key_len len of initial value
* @returns pointer to descriptor of hash instance
- * @retval ERR_PTR(error) when errors occured.
+ * @retval ERR_PTR(error) when errors occurred.
*/
struct cfs_crypto_hash_desc*
cfs_crypto_hash_init(unsigned char alg,
@@ -175,7 +175,7 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
* @param desc hash descriptor
* @param hash buffer pointer to store hash digest
* @param hash_len pointer to hash buffer size, if NULL
- * destory hash descriptor
+ * destroy hash descriptor
* @returns status of operation
* @retval -ENOSPC if hash is NULL, or *hash_len less than
* digest size
@@ -195,7 +195,7 @@ int cfs_crypto_register(void);
void cfs_crypto_unregister(void);
/** Return hash speed in Mbytes per second for valid hash algorithm
- * identifier. If test was unsuccessfull -1 would be return.
+ * identifier. If test was unsuccessful -1 would be returned.
*/
int cfs_crypto_hash_speed(unsigned char hash_alg);
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index dd8ac2f52c9..e6439d19f3e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -262,74 +262,6 @@ do { \
} while (0)
-/*
- * if rc == NULL, we need to code as RETURN((void *)NULL), otherwise
- * there will be a warning in osx.
- */
-#if defined(__GNUC__)
-
-long libcfs_log_return(struct libcfs_debug_msg_data *, long rc);
-#if BITS_PER_LONG > 32
-#define RETURN(rc) \
-do { \
- EXIT_NESTING; \
- if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- return (typeof(rc))libcfs_log_return(&msgdata, \
- (long)(rc)); \
- } \
- \
- return (rc); \
-} while (0)
-#else /* BITS_PER_LONG == 32 */
-/* We need an on-stack variable, because we cannot case a 32-bit pointer
- * directly to (long long) without generating a complier warning/error, yet
- * casting directly to (long) will truncate 64-bit return values. The log
- * values will print as 32-bit values, but they always have been. LU-1436
- */
-#define RETURN(rc) \
-do { \
- EXIT_NESTING; \
- if (cfs_cdebug_show(D_TRACE, DEBUG_SUBSYSTEM)) { \
- typeof(rc) __rc = (rc); \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_TRACE, NULL); \
- libcfs_log_return(&msgdata, (long_ptr_t)__rc); \
- return __rc; \
- } \
- \
- return (rc); \
-} while (0)
-#endif /* BITS_PER_LONG > 32 */
-
-#elif defined(_MSC_VER)
-#define RETURN(rc) \
-do { \
- CDEBUG(D_TRACE, "Process leaving.\n"); \
- EXIT_NESTING; \
- return (rc); \
-} while (0)
-#else
-# error "Unkown compiler"
-#endif /* __GNUC__ */
-
-#define ENTRY \
-ENTRY_NESTING; \
-do { \
- CDEBUG(D_TRACE, "Process entered\n"); \
-} while (0)
-
-#define EXIT \
-do { \
- CDEBUG(D_TRACE, "Process leaving\n"); \
- EXIT_NESTING; \
-} while(0)
-
-#define RETURN_EXIT \
-do { \
- EXIT; \
- return; \
-} while (0)
-
extern int libcfs_debug_msg(struct libcfs_debug_msg_data *msgdata,
const char *format1, ...)
__attribute__ ((format (printf, 2, 3)));
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index f6361b3f0a0..98f5be243c8 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -720,7 +720,7 @@ __u64 cfs_hash_size_get(cfs_hash_t *hs);
/*
* Rehash - Theta is calculated to be the average chained
- * hash depth assuming a perfectly uniform hash funcion.
+ * hash depth assuming a perfectly uniform hash function.
*/
void cfs_hash_rehash_cancel_locked(cfs_hash_t *hs);
void cfs_hash_rehash_cancel(cfs_hash_t *hs);
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 9c40ed904da..e6e417aeefd 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -40,10 +40,6 @@
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
-#ifndef EXPORT_SYMBOL
-# define EXPORT_SYMBOL(s)
-#endif
-
/*
* Schedule
*/
@@ -53,20 +49,20 @@ void cfs_pause(cfs_duration_t ticks);
* Timer
*/
typedef void (cfs_timer_func_t)(ulong_ptr_t);
-void schedule_timeout_and_set_state(cfs_task_state_t, int64_t);
+void schedule_timeout_and_set_state(long, int64_t);
void init_waitqueue_entry_current(wait_queue_t *link);
-int64_t waitq_timedwait(wait_queue_t *, cfs_task_state_t, int64_t);
-void waitq_wait(wait_queue_t *, cfs_task_state_t);
+int64_t waitq_timedwait(wait_queue_t *, long, int64_t);
+void waitq_wait(wait_queue_t *, long);
void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
-void cfs_init_timer(timer_list_t *t);
-void cfs_timer_init(timer_list_t *t, cfs_timer_func_t *func, void *arg);
-void cfs_timer_done(timer_list_t *t);
-void cfs_timer_arm(timer_list_t *t, cfs_time_t deadline);
-void cfs_timer_disarm(timer_list_t *t);
-int cfs_timer_is_armed(timer_list_t *t);
-cfs_time_t cfs_timer_deadline(timer_list_t *t);
+void cfs_init_timer(struct timer_list *t);
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg);
+void cfs_timer_done(struct timer_list *t);
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline);
+void cfs_timer_disarm(struct timer_list *t);
+int cfs_timer_is_armed(struct timer_list *t);
+cfs_time_t cfs_timer_deadline(struct timer_list *t);
/*
* Memory
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 056caa46712..d0d942ced01 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -210,7 +210,6 @@ do { \
#define ntohs(x) ___ntohs(x)
#endif
-void libcfs_debug_dumpstack(task_t *tsk);
void libcfs_run_upcall(char **argv);
void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *);
void libcfs_debug_dumplog(void);
@@ -230,7 +229,7 @@ void libcfs_debug_set_level(unsigned int debug_level);
*/
void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
/*
- * destory per-cpu-partition variable
+ * destroy per-cpu-partition variable
*/
void cfs_percpt_free(void *vars);
int cfs_percpt_number(void *vars);
@@ -456,10 +455,6 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *list);
/* logical equivalence */
#define equi(a, b) (!!(a) == !!(b))
-#ifndef CFS_CURRENT_TIME
-# define CFS_CURRENT_TIME time(0)
-#endif
-
/* --------------------------------------------------------------------
* Light-weight trace
* Support for temporary event tracing with minimal Heisenberg effect.
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
index 4b7ae1c5bd3..c204b677796 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/kp30.h
@@ -55,7 +55,6 @@
#include <linux/interrupt.h>
#include <linux/highmem.h>
#include <linux/module.h>
-#include <linux/version.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
#include <linux/rwsem.h>
@@ -74,34 +73,6 @@
#include <linux/libcfs/linux/portals_compat25.h>
-#define prepare_work(wq,cb,cbdata) \
-do { \
- INIT_WORK((wq), (void *)(cb)); \
-} while (0)
-
-#define cfs_get_work_data(type,field,data) container_of(data,type,field)
-
-
-#define our_recalc_sigpending(current) recalc_sigpending()
-#define strtok(a,b) strpbrk(a, b)
-#define work_struct_t struct work_struct
-
-#ifdef CONFIG_SMP
-#else
-#endif
-
-
-#define SEM_COUNT(sem) ((sem)->count)
-
-
-/* ------------------------------------------------------------------- */
-
-#define PORTAL_SYMBOL_REGISTER(x)
-#define PORTAL_SYMBOL_UNREGISTER(x)
-
-
-
-
/******************************************************************************/
/* Module parameter support */
#define CFS_MODULE_PARM(name, t, type, perm, desc) \
@@ -111,26 +82,6 @@ do { \
#define CFS_SYSFS_MODULE_PARM 1 /* module parameters accessible via sysfs */
/******************************************************************************/
-
-#if (__GNUC__)
-/* Use the special GNU C __attribute__ hack to have the compiler check the
- * printf style argument string against the actual argument count and
- * types.
- */
-#ifdef printf
-# warning printf has been defined as a macro...
-# undef printf
-#endif
-
-#endif /* __GNUC__ */
-
-# define fprintf(a, format, b...) CDEBUG(D_OTHER, format , ## b)
-# define printf(format, b...) CDEBUG(D_OTHER, format , ## b)
-# define time(a) CURRENT_TIME
-
-# define cfs_num_present_cpus() num_present_cpus()
-
-/******************************************************************************/
/* Light-weight trace
* Support for temporary event tracing with minimal Heisenberg effect. */
#define LWT_SUPPORT 0
@@ -236,9 +187,13 @@ extern int lwt_snapshot (cfs_cycles_t *now, int *ncpu, int *total_size,
# endif
#endif
-# define LI_POISON ((int)0x5a5a5a5a5a5a5a5a)
-# define LL_POISON ((long)0x5a5a5a5a5a5a5a5a)
-# define LP_POISON ((void *)(long)0x5a5a5a5a5a5a5a5a)
+# define LI_POISON 0x5a5a5a5a
+#if BITS_PER_LONG > 32
+# define LL_POISON 0x5a5a5a5a5a5a5a5aL
+#else
+# define LL_POISON 0x5a5a5a5aL
+#endif
+# define LP_POISON ((void *)LL_POISON)
/* this is a bit chunky */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index 292a3ba1fb9..60ecaf63f9f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -97,9 +97,6 @@ do { \
/* initial pid */
#define LUSTRE_LNET_PID 12345
-#define ENTRY_NESTING_SUPPORT (1)
-#define ENTRY_NESTING do {;} while (0)
-#define EXIT_NESTING do {;} while (0)
#define __current_nesting_level() (0)
/**
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index 224371c92f7..8dd354d5160 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -46,8 +46,6 @@
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/topology.h>
-#include <linux/version.h>
-
#ifdef CONFIG_SMP
@@ -81,15 +79,8 @@ struct cfs_cpt_table {
nodemask_t *ctb_nodemask;
};
-void cfs_cpu_core_siblings(int cpu, cpumask_t *mask);
-void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask);
-void cfs_node_to_cpumask(int node, cpumask_t *mask);
-int cfs_cpu_core_nsiblings(int cpu);
-int cfs_cpu_ht_nsiblings(int cpu);
-
/**
* comment out definitions for compatible layer
- * #define CFS_CPU_NR NR_CPUS
*
* typedef cpumask_t cfs_cpumask_t;
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
index 6fbcbf3ab0d..d6e00f92e4a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-lock.h
@@ -52,7 +52,7 @@
* IMPORTANT !!!!!!!!
*
* All locks' declaration are not guaranteed to be initialized,
- * Althought some of they are initialized in Linux. All locks
+ * although some of them are initialized in Linux. All locks
* declared by CFS_DECL_* should be initialized explicitly.
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
index 042a2bc432b..63efb7b456c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
@@ -63,9 +63,9 @@
#if BITS_PER_LONG == 32
/* limit to lowmem on 32-bit systems */
#define NUM_CACHEPAGES \
- min(num_physpages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
+ min(totalram_pages, 1UL << (30 - PAGE_CACHE_SHIFT) * 3 / 4)
#else
-#define NUM_CACHEPAGES num_physpages
+#define NUM_CACHEPAGES totalram_pages
#endif
/*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
index a4963a8dfdd..1ec4ca1a6e3 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-prim.h
@@ -49,7 +49,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/timer.h>
@@ -64,43 +63,12 @@
#include <linux/libcfs/linux/linux-time.h>
-
-/*
- * CPU
- */
-#ifdef for_each_possible_cpu
-#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
-#elif defined(for_each_cpu)
-#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
-#endif
-
-#ifdef NR_CPUS
-#else
-#define NR_CPUS 1
-#endif
-
-/*
- * cache
- */
-
-/*
- * IRQs
- */
-
-
-/*
- * Pseudo device register
- */
-typedef struct miscdevice psdev_t;
-
/*
* Sysctl register
*/
typedef struct ctl_table ctl_table_t;
typedef struct ctl_table_header ctl_table_header_t;
-#define cfs_register_sysctl_table(t, a) register_sysctl_table(t)
-
#define DECLARE_PROC_HANDLER(name) \
static int \
LL_PROC_PROTO(name) \
@@ -112,130 +80,4 @@ LL_PROC_PROTO(name) \
__##name); \
}
-/*
- * Symbol register
- */
-#define cfs_symbol_register(s, p) do {} while(0)
-#define cfs_symbol_unregister(s) do {} while(0)
-#define cfs_symbol_get(s) symbol_get(s)
-#define cfs_symbol_put(s) symbol_put(s)
-
-typedef struct module module_t;
-
-/*
- * Proc file system APIs
- */
-typedef struct proc_dir_entry proc_dir_entry_t;
-
-/*
- * Wait Queue
- */
-
-
-typedef long cfs_task_state_t;
-
-#define CFS_DECL_WAITQ(wq) DECLARE_WAIT_QUEUE_HEAD(wq)
-
-/*
- * Task struct
- */
-typedef struct task_struct task_t;
-#define DECL_JOURNAL_DATA void *journal_info
-#define PUSH_JOURNAL do { \
- journal_info = current->journal_info; \
- current->journal_info = NULL; \
- } while(0)
-#define POP_JOURNAL do { \
- current->journal_info = journal_info; \
- } while(0)
-
-/* Module interfaces */
-#define cfs_module(name, version, init, fini) \
- module_init(init); \
- module_exit(fini)
-
-/*
- * Signal
- */
-
-/*
- * Timer
- */
-typedef struct timer_list timer_list_t;
-
-
-#ifndef wait_event_timeout /* Only for RHEL3 2.4.21 kernel */
-#define __wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- int __ret = 0; \
- if (!(condition)) { \
- wait_queue_t __wait; \
- unsigned long expire; \
- \
- init_waitqueue_entry(&__wait, current); \
- expire = timeout + jiffies; \
- add_wait_queue(&wq, &__wait); \
- for (;;) { \
- set_current_state(TASK_UNINTERRUPTIBLE); \
- if (condition) \
- break; \
- if (jiffies > expire) { \
- ret = jiffies - expire; \
- break; \
- } \
- schedule_timeout(timeout); \
- } \
- current->state = TASK_RUNNING; \
- remove_wait_queue(&wq, &__wait); \
- } \
-} while (0)
-/*
- retval == 0; condition met; we're good.
- retval > 0; timed out.
-*/
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
-do { \
- ret = 0; \
- if (!(condition)) \
- __wait_event_timeout(wq, condition, timeout, ret); \
-} while (0)
-#else
-#define cfs_waitq_wait_event_timeout(wq, condition, timeout, ret) \
- ret = wait_event_timeout(wq, condition, timeout)
-#endif
-
-#define cfs_waitq_wait_event_interruptible_timeout(wq, c, timeout, ret) \
- ret = wait_event_interruptible_timeout(wq, c, timeout)
-
-/*
- * atomic
- */
-
-
-#define cfs_atomic_add_unless(atom, a, u) atomic_add_unless(atom, a, u)
-#define cfs_atomic_cmpxchg(atom, old, nv) atomic_cmpxchg(atom, old, nv)
-
-/*
- * membar
- */
-
-
-/*
- * interrupt
- */
-
-
-/*
- * might_sleep
- */
-
-/*
- * group_info
- */
-typedef struct group_info group_info_t;
-
-
-/*
- * Random bytes
- */
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h
index 687f33f4e8a..7a8d006903b 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-tcpip.h
@@ -48,21 +48,6 @@
#include <net/sock.h>
-#ifndef HIPQUAD
-// XXX Should just kill all users
-#if defined(__LITTLE_ENDIAN)
-#define HIPQUAD(addr) \
- ((unsigned char *)&addr)[3], \
- ((unsigned char *)&addr)[2], \
- ((unsigned char *)&addr)[1], \
- ((unsigned char *)&addr)[0]
-#elif defined(__BIG_ENDIAN)
-#define HIPQUAD NIPQUAD
-#else
-#error "Please fix asm/byteorder.h"
-#endif /* __LITTLE_ENDIAN */
-#endif
-
typedef struct socket socket_t;
#define SOCK_SNDBUF(so) ((so)->sk->sk_sndbuf)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index 4a48b914b42..a386d1b1286 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -91,7 +91,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/time.h>
#include <asm/div64.h>
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h b/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h
index 132a4bec357..fe4c63fb40a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/portals_compat25.h
@@ -95,20 +95,5 @@ int proc_call_handler(void *data, int write,
loff_t *ppos, void *buffer, size_t *lenp,
int (*handler)(void *data, int write,
loff_t pos, void *buffer, int len));
-/*
- * CPU
- */
-#ifdef for_each_possible_cpu
-#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
-#elif defined(for_each_cpu)
-#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
-#endif
-
-#ifdef NR_CPUS
-#else
-#define NR_CPUS 1
-#endif
-
-#define cfs_register_sysctl_table(t, a) register_sysctl_table(t)
#endif /* _PORTALS_COMPAT_H */
diff --git a/drivers/staging/lustre/include/linux/libcfs/lucache.h b/drivers/staging/lustre/include/linux/libcfs/lucache.h
index 7ae36fc88d7..9668b397f0f 100644
--- a/drivers/staging/lustre/include/linux/libcfs/lucache.h
+++ b/drivers/staging/lustre/include/linux/libcfs/lucache.h
@@ -77,7 +77,7 @@ struct md_identity {
struct upcall_cache_entry *mi_uc_entry;
uid_t mi_uid;
gid_t mi_gid;
- group_info_t *mi_ginfo;
+ struct group_info *mi_ginfo;
int mi_nperms;
struct md_perm *mi_perms;
};
diff --git a/drivers/staging/lustre/include/linux/libcfs/params_tree.h b/drivers/staging/lustre/include/linux/libcfs/params_tree.h
index 3f18a446703..78a2c4ed4d6 100644
--- a/drivers/staging/lustre/include/linux/libcfs/params_tree.h
+++ b/drivers/staging/lustre/include/linux/libcfs/params_tree.h
@@ -54,7 +54,6 @@ typedef struct proc_inode cfs_proc_inode_t;
typedef struct seq_file cfs_seq_file_t;
typedef struct seq_operations cfs_seq_ops_t;
typedef struct file_operations cfs_param_file_ops_t;
-typedef module_t *cfs_param_module_t;
typedef struct proc_dir_entry cfs_param_dentry_t;
typedef struct poll_table_struct cfs_poll_table_t;
#define CFS_PARAM_MODULE THIS_MODULE
@@ -115,11 +114,10 @@ typedef struct cfs_seq_operations {
int (*show) (cfs_seq_file_t *m, void *v);
} cfs_seq_ops_t;
-typedef void *cfs_param_module_t;
typedef void *cfs_poll_table_t;
typedef struct cfs_param_file_ops {
- cfs_param_module_t owner;
+ struct module *owner;
int (*open) (cfs_inode_t *, struct file *);
loff_t (*llseek)(struct file *, loff_t, int);
int (*release) (cfs_inode_t *, cfs_param_file_t *);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 86428d4b993..e579e7ed507 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -181,11 +181,11 @@ typedef struct lnet_msg {
lnet_nid_t msg_from;
__u32 msg_type;
- /* commited for sending */
+ /* committed for sending */
unsigned int msg_tx_committed:1;
/* CPT # this message committed for sending */
unsigned int msg_tx_cpt:15;
- /* commited for receiving */
+ /* committed for receiving */
unsigned int msg_rx_committed:1;
/* CPT # this message committed for receiving */
unsigned int msg_rx_cpt:15;
@@ -619,7 +619,7 @@ typedef struct lnet_portal {
unsigned int ptl_index; /* portal ID, reserved */
/* flags on this portal: lazy, unique... */
unsigned int ptl_options;
- /* list of messags which are stealing buffer */
+ /* list of messages which are stealing buffer */
struct list_head ptl_msg_stealing;
/* messages blocking for MD */
struct list_head ptl_msg_delayed;
diff --git a/drivers/staging/lustre/include/linux/lnet/lnetst.h b/drivers/staging/lustre/include/linux/lnet/lnetst.h
index d90f94e9460..e060599314d 100644
--- a/drivers/staging/lustre/include/linux/lnet/lnetst.h
+++ b/drivers/staging/lustre/include/linux/lnet/lnetst.h
@@ -59,7 +59,7 @@
#define LSTIO_SESSION_INFO 0xC03 /* query session */
#define LSTIO_GROUP_ADD 0xC10 /* add group */
#define LSTIO_GROUP_LIST 0xC11 /* list all groups in session */
-#define LSTIO_GROUP_INFO 0xC12 /* query defailt infomation of specified group */
+#define LSTIO_GROUP_INFO 0xC12 /* query default information of specified group */
#define LSTIO_GROUP_DEL 0xC13 /* delete group */
#define LSTIO_NODES_ADD 0xC14 /* add nodes to specified group */
#define LSTIO_GROUP_UPDATE 0xC15 /* update group */
diff --git a/drivers/staging/lustre/include/linux/lnet/ptllnd.h b/drivers/staging/lustre/include/linux/lnet/ptllnd.h
index fc1ce8ed1f8..564f5d3a9b4 100644
--- a/drivers/staging/lustre/include/linux/lnet/ptllnd.h
+++ b/drivers/staging/lustre/include/linux/lnet/ptllnd.h
@@ -68,7 +68,7 @@
/* Can compare handles directly on Cray Portals */
#define PtlHandleIsEqual(a,b) ((a) == (b))
-/* Diffrent error types on Cray Portals*/
+/* Different error types on Cray Portals*/
#define ptl_err_t ptl_ni_fail_t
/*
@@ -76,7 +76,7 @@
* maximum is limited only by memory and size of the
* int parameters (2^31-1).
* Lustre only really require that the underyling
- * implemenation to support at least LNET_MAX_IOV,
+ * implementation to support at least LNET_MAX_IOV,
* so for Cray portals we can safely just use that
* value here.
*
diff --git a/drivers/staging/lustre/lnet/Makefile b/drivers/staging/lustre/lnet/Makefile
index 374212b1555..f6f03e304d8 100644
--- a/drivers/staging/lustre/lnet/Makefile
+++ b/drivers/staging/lustre/lnet/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_LNET) := klnds/ lnet/ selftest/
+obj-$(CONFIG_LNET) += lnet/ klnds/ selftest/
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 29a97943e4c..86397f96b03 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -702,6 +702,8 @@ kiblnd_get_completion_vector(kib_conn_t *conn, int cpt)
return 0;
mask = cfs_cpt_cpumask(lnet_cpt_table(), cpt);
+ if (mask == NULL)
+ return 0;
/* hash NID to CPU id in this partition... */
off = do_div(nid, cpus_weight(*mask));
@@ -2574,8 +2576,8 @@ kiblnd_dev_need_failover(kib_dev_t *dev)
rc = rdma_resolve_addr(cmid, (struct sockaddr *)&srcaddr,
(struct sockaddr *)&dstaddr, 1);
if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%u.%u.%u.%u to device(%p): %d\n",
- dev->ibd_ifname, HIPQUAD(dev->ibd_ifip),
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
rdma_destroy_id(cmid);
return rc;
@@ -2647,8 +2649,8 @@ kiblnd_dev_failover(kib_dev_t *dev)
/* Bind to failover device or port */
rc = rdma_bind_addr(cmid, (struct sockaddr *)&addr);
if (rc != 0 || cmid->device == NULL) {
- CERROR("Failed to bind %s:%u.%u.%u.%u to device(%p): %d\n",
- dev->ibd_ifname, HIPQUAD(dev->ibd_ifip),
+ CERROR("Failed to bind %s:%pI4h to device(%p): %d\n",
+ dev->ibd_ifname, &dev->ibd_ifip,
cmid->device, rc);
rdma_destroy_id(cmid);
goto out;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index e4626bf82fc..938df0cf8c6 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -53,7 +53,6 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/stat.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index cc6232126dd..086ca3d7241 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -1319,9 +1319,9 @@ kiblnd_connect_peer (kib_peer_t *peer)
}
LASSERT (cmid->device != NULL);
- CDEBUG(D_NET, "%s: connection bound to %s:%u.%u.%u.%u:%s\n",
+ CDEBUG(D_NET, "%s: connection bound to %s:%pI4h:%s\n",
libcfs_nid2str(peer->ibp_nid), dev->ibd_ifname,
- HIPQUAD(dev->ibd_ifip), cmid->device->name);
+ &dev->ibd_ifip, cmid->device->name);
return;
@@ -1802,7 +1802,7 @@ kiblnd_recv (lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg, int delayed,
int
kiblnd_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- task_t *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, name);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -2209,8 +2209,8 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (*kiblnd_tunables.kib_require_priv_port &&
ntohs(peer_addr->sin_port) >= PROT_SOCK) {
__u32 ip = ntohl(peer_addr->sin_addr.s_addr);
- CERROR("Peer's port (%u.%u.%u.%u:%hu) is not privileged\n",
- HIPQUAD(ip), ntohs(peer_addr->sin_port));
+ CERROR("Peer's port (%pI4h:%hu) is not privileged\n",
+ &ip, ntohs(peer_addr->sin_port));
goto failed;
}
@@ -2254,11 +2254,11 @@ kiblnd_passive_connect (struct rdma_cm_id *cmid, void *priv, int priv_nob)
if (ni == NULL || /* no matching net */
ni->ni_nid != reqmsg->ibm_dstnid || /* right NET, wrong NID! */
net->ibn_dev != ibdev) { /* wrong device */
- CERROR("Can't accept %s on %s (%s:%d:%u.%u.%u.%u): "
+ CERROR("Can't accept %s on %s (%s:%d:%pI4h): "
"bad dst nid %s\n", libcfs_nid2str(nid),
ni == NULL ? "NA" : libcfs_nid2str(ni->ni_nid),
ibdev->ibd_ifname, ibdev->ibd_nnets,
- HIPQUAD(ibdev->ibd_ifip),
+ &ibdev->ibd_ifip,
libcfs_nid2str(reqmsg->ibm_dstnid));
goto failed;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index e21028b7230..92dc5672e2d 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -404,7 +404,7 @@ kiblnd_sysctl_init (void)
sizeof(ipif_basename_space));
kiblnd_tunables.kib_sysctl =
- cfs_register_sysctl_table(kiblnd_top_ctl_table, 0);
+ register_sysctl_table(kiblnd_top_ctl_table);
if (kiblnd_tunables.kib_sysctl == NULL)
CWARN("Can't setup /proc tunables\n");
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index c826bf9d49a..6825b452e5f 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -334,17 +334,17 @@ ksocknal_associate_route_conn_locked(ksock_route_t *route, ksock_conn_t *conn)
if (route->ksnr_myipaddr != conn->ksnc_myipaddr) {
if (route->ksnr_myipaddr == 0) {
/* route wasn't bound locally yet (the initial route) */
- CDEBUG(D_NET, "Binding %s %u.%u.%u.%u to %u.%u.%u.%u\n",
+ CDEBUG(D_NET, "Binding %s %pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(route->ksnr_ipaddr),
- HIPQUAD(conn->ksnc_myipaddr));
+ &route->ksnr_ipaddr,
+ &conn->ksnc_myipaddr);
} else {
- CDEBUG(D_NET, "Rebinding %s %u.%u.%u.%u from "
- "%u.%u.%u.%u to %u.%u.%u.%u\n",
+ CDEBUG(D_NET, "Rebinding %s %pI4h from "
+ "%pI4h to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(route->ksnr_ipaddr),
- HIPQUAD(route->ksnr_myipaddr),
- HIPQUAD(conn->ksnc_myipaddr));
+ &route->ksnr_ipaddr,
+ &route->ksnr_myipaddr,
+ &conn->ksnc_myipaddr);
iface = ksocknal_ip2iface(route->ksnr_peer->ksnp_ni,
route->ksnr_myipaddr);
@@ -384,9 +384,9 @@ ksocknal_add_route_locked (ksock_peer_t *peer, ksock_route_t *route)
route2 = list_entry(tmp, ksock_route_t, ksnr_list);
if (route2->ksnr_ipaddr == route->ksnr_ipaddr) {
- CERROR ("Duplicate route %s %u.%u.%u.%u\n",
+ CERROR("Duplicate route %s %pI4h\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(route->ksnr_ipaddr));
+ &route->ksnr_ipaddr);
LBUG();
}
}
@@ -982,8 +982,8 @@ ksocknal_accept (lnet_ni_t *ni, socket_t *sock)
LIBCFS_ALLOC(cr, sizeof(*cr));
if (cr == NULL) {
LCONSOLE_ERROR_MSG(0x12f, "Dropping connection request from "
- "%u.%u.%u.%u: memory exhausted\n",
- HIPQUAD(peer_ip));
+ "%pI4h: memory exhausted\n",
+ &peer_ip);
return -ENOMEM;
}
@@ -1236,10 +1236,10 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
* code below probably isn't going to work. */
if (active &&
route->ksnr_ipaddr != conn->ksnc_ipaddr) {
- CERROR("Route %s %u.%u.%u.%u connected to %u.%u.%u.%u\n",
+ CERROR("Route %s %pI4h connected to %pI4h\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(route->ksnr_ipaddr),
- HIPQUAD(conn->ksnc_ipaddr));
+ &route->ksnr_ipaddr,
+ &conn->ksnc_ipaddr);
}
/* Search for a route corresponding to the new connection and
@@ -1297,10 +1297,10 @@ ksocknal_create_conn (lnet_ni_t *ni, ksock_route_t *route,
* socket callbacks.
*/
- CDEBUG(D_NET, "New conn %s p %d.x %u.%u.%u.%u -> %u.%u.%u.%u/%d"
+ CDEBUG(D_NET, "New conn %s p %d.x %pI4h -> %pI4h/%d"
" incarnation:"LPD64" sched[%d:%d]\n",
libcfs_id2str(peerid), conn->ksnc_proto->pro_version,
- HIPQUAD(conn->ksnc_myipaddr), HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_myipaddr, &conn->ksnc_ipaddr,
conn->ksnc_port, incarnation, cpt,
(int)(sched - &sched->kss_info->ksi_scheds[0]));
@@ -1441,7 +1441,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
conn->ksnc_route = NULL;
-#if 0 /* irrelevent with only eager routes */
+#if 0 /* irrelevant with only eager routes */
/* make route least favourite */
list_del (&route->ksnr_list);
list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
@@ -1496,7 +1496,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
/* There has been a connection failure or comms error; but I'll only
* tell LNET I think the peer is dead if it's to another kernel and
- * there are no connections or connection attempts in existance. */
+ * there are no connections or connection attempts in existence. */
read_lock(&ksocknal_data.ksnd_global_lock);
@@ -1648,10 +1648,10 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
last_rcv = conn->ksnc_rx_deadline -
cfs_time_seconds(*ksocknal_tunables.ksnd_timeout);
CERROR("Completing partial receive from %s[%d]"
- ", ip %d.%d.%d.%d:%d, with error, wanted: %d, left: %d, "
+ ", ip %pI4h:%d, with error, wanted: %d, left: %d, "
"last alive is %ld secs ago\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id), conn->ksnc_type,
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
+ &conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_rx_nob_wanted, conn->ksnc_rx_nob_left,
cfs_duration_sec(cfs_time_sub(cfs_time_current(),
last_rcv)));
@@ -1661,25 +1661,25 @@ ksocknal_destroy_conn (ksock_conn_t *conn)
case SOCKNAL_RX_LNET_HEADER:
if (conn->ksnc_rx_started)
CERROR("Incomplete receive of lnet header from %s"
- ", ip %d.%d.%d.%d:%d, with error, protocol: %d.x.\n",
+ ", ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
+ &conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_KSM_HEADER:
if (conn->ksnc_rx_started)
CERROR("Incomplete receive of ksock message from %s"
- ", ip %d.%d.%d.%d:%d, with error, protocol: %d.x.\n",
+ ", ip %pI4h:%d, with error, protocol: %d.x.\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port,
+ &conn->ksnc_ipaddr, conn->ksnc_port,
conn->ksnc_proto->pro_version);
break;
case SOCKNAL_RX_SLOP:
if (conn->ksnc_rx_started)
CERROR("Incomplete receive of slops from %s"
- ", ip %d.%d.%d.%d:%d, with error\n",
+ ", ip %pI4h:%d, with error\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ &conn->ksnc_ipaddr, conn->ksnc_port);
break;
default:
LBUG ();
@@ -2358,7 +2358,7 @@ ksocknal_new_incarnation (void)
/* The incarnation number is the time this module loaded and it
* identifies this particular instance of the socknal. Hopefully
* we won't be able to reboot more frequently than 1MHz for the
- * forseeable future :) */
+ * foreseeable future :) */
do_gettimeofday(&tv);
@@ -2898,5 +2898,7 @@ ksocknal_module_init (void)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Kernel TCP Socket LND v3.0.0");
MODULE_LICENSE("GPL");
+MODULE_VERSION("3.0.0");
-cfs_module(ksocknal, "3.0.0", ksocknal_module_init, ksocknal_module_fini);
+module_init(ksocknal_module_init);
+module_exit(ksocknal_module_fini);
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index ad5e2410423..2c581b7fa8a 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -343,7 +343,6 @@ ksocknal_receive (ksock_conn_t *conn)
* Caller checks ksnc_rx_nob_wanted to determine
* progress/completion. */
int rc;
- ENTRY;
if (ksocknal_data.ksnd_stall_rx != 0) {
cfs_pause(cfs_time_seconds (ksocknal_data.ksnd_stall_rx));
@@ -381,7 +380,7 @@ ksocknal_receive (ksock_conn_t *conn)
}
ksocknal_connsock_decref(conn);
- RETURN (rc);
+ return rc;
}
void
@@ -389,7 +388,6 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
{
lnet_msg_t *lnetmsg = tx->tx_lnetmsg;
int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO;
- ENTRY;
LASSERT(ni != NULL || tx->tx_conn != NULL);
@@ -402,8 +400,6 @@ ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx)
ksocknal_free_tx (tx);
if (lnetmsg != NULL) /* KSOCK_MSG_NOOP go without lnetmsg */
lnet_finalize (ni, lnetmsg, rc);
-
- EXIT;
}
void
@@ -553,21 +549,21 @@ ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx)
if (!conn->ksnc_closing) {
switch (rc) {
case -ECONNRESET:
- LCONSOLE_WARN("Host %u.%u.%u.%u reset our connection "
+ LCONSOLE_WARN("Host %pI4h reset our connection "
"while we were sending data; it may have "
"rebooted.\n",
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
break;
default:
LCONSOLE_WARN("There was an unexpected network error "
- "while writing to %u.%u.%u.%u: %d.\n",
- HIPQUAD(conn->ksnc_ipaddr), rc);
+ "while writing to %pI4h: %d.\n",
+ &conn->ksnc_ipaddr, rc);
break;
}
CDEBUG(D_NET, "[%p] Error %d on write to %s"
- " ip %d.%d.%d.%d:%d\n", conn, rc,
+ " ip %pI4h:%d\n", conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
}
@@ -700,9 +696,9 @@ ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn)
* ksnc_sock... */
LASSERT(!conn->ksnc_closing);
- CDEBUG (D_NET, "Sending to %s ip %d.%d.%d.%d:%d\n",
+ CDEBUG(D_NET, "Sending to %s ip %pI4h:%d\n",
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
ksocknal_tx_prep(conn, tx);
@@ -801,9 +797,9 @@ ksocknal_find_connectable_route_locked (ksock_peer_t *peer)
if (!(route->ksnr_retry_interval == 0 || /* first attempt */
cfs_time_aftereq(now, route->ksnr_timeout))) {
CDEBUG(D_NET,
- "Too soon to retry route %u.%u.%u.%u "
+ "Too soon to retry route %pI4h "
"(cnted %d, interval %ld, %ld secs later)\n",
- HIPQUAD(route->ksnr_ipaddr),
+ &route->ksnr_ipaddr,
route->ksnr_connected,
route->ksnr_retry_interval,
cfs_duration_sec(route->ksnr_timeout - now));
@@ -1009,7 +1005,7 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
int
ksocknal_thread_start(int (*fn)(void *arg), void *arg, char *name)
{
- task_t *task = kthread_run(fn, arg, name);
+ struct task_struct *task = kthread_run(fn, arg, name);
if (IS_ERR(task))
return PTR_ERR(task);
@@ -1120,7 +1116,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
/* NB: sched lock NOT held */
- /* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
+ /* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
@@ -1133,17 +1129,17 @@ ksocknal_process_receive (ksock_conn_t *conn)
LASSERT (rc != -EAGAIN);
if (rc == 0)
- CDEBUG (D_NET, "[%p] EOF from %s"
- " ip %d.%d.%d.%d:%d\n", conn,
+ CDEBUG(D_NET, "[%p] EOF from %s"
+ " ip %pI4h:%d\n", conn,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
else if (!conn->ksnc_closing)
- CERROR ("[%p] Error %d on read from %s"
- " ip %d.%d.%d.%d:%d\n",
+ CERROR("[%p] Error %d on read from %s"
+ " ip %pI4h:%d\n",
conn, rc,
libcfs_id2str(conn->ksnc_peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
/* it's not an error if conn is being closed */
@@ -1562,7 +1558,6 @@ int ksocknal_scheduler(void *arg)
void ksocknal_read_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
- ENTRY;
sched = conn->ksnc_scheduler;
@@ -1580,8 +1575,6 @@ void ksocknal_read_callback (ksock_conn_t *conn)
wake_up (&sched->kss_waitq);
}
spin_unlock_bh(&sched->kss_lock);
-
- EXIT;
}
/*
@@ -1591,7 +1584,6 @@ void ksocknal_read_callback (ksock_conn_t *conn)
void ksocknal_write_callback (ksock_conn_t *conn)
{
ksock_sched_t *sched;
- ENTRY;
sched = conn->ksnc_scheduler;
@@ -1611,8 +1603,6 @@ void ksocknal_write_callback (ksock_conn_t *conn)
}
spin_unlock_bh(&sched->kss_lock);
-
- EXIT;
}
ksock_proto_t *
@@ -1722,8 +1712,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
rc = libcfs_sock_read(sock, &hello->kshm_magic, sizeof (hello->kshm_magic), timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
@@ -1732,18 +1722,18 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
hello->kshm_magic != __swab32(LNET_PROTO_MAGIC) &&
hello->kshm_magic != le32_to_cpu (LNET_PROTO_TCP_MAGIC)) {
/* Unexpected magic! */
- CERROR ("Bad magic(1) %#08x (%#08x expected) from "
- "%u.%u.%u.%u\n", __cpu_to_le32 (hello->kshm_magic),
+ CERROR("Bad magic(1) %#08x (%#08x expected) from "
+ "%pI4h\n", __cpu_to_le32 (hello->kshm_magic),
LNET_PROTO_TCP_MAGIC,
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
rc = libcfs_sock_read(sock, &hello->kshm_version,
sizeof(hello->kshm_version), timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
@@ -1763,10 +1753,10 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
ksocknal_send_hello(ni, conn, ni->ni_nid, hello);
}
- CERROR ("Unknown protocol version (%d.x expected)"
- " from %u.%u.%u.%u\n",
+ CERROR("Unknown protocol version (%d.x expected)"
+ " from %pI4h\n",
conn->ksnc_proto->pro_version,
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1777,8 +1767,8 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
/* receive the rest of hello message anyway */
rc = conn->ksnc_proto->pro_recv_hello(conn, hello, timeout);
if (rc != 0) {
- CERROR("Error %d reading or checking hello from from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading or checking hello from from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0);
return rc;
}
@@ -1787,7 +1777,7 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (hello->kshm_src_nid == LNET_NID_ANY) {
CERROR("Expecting a HELLO hdr with a NID, but got LNET_NID_ANY"
- "from %u.%u.%u.%u\n", HIPQUAD(conn->ksnc_ipaddr));
+ "from %pI4h\n", &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1807,9 +1797,9 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
/* peer determines type */
conn->ksnc_type = ksocknal_invert_type(hello->kshm_ctype);
if (conn->ksnc_type == SOCKLND_CONN_NONE) {
- CERROR ("Unexpected type %d from %s ip %u.%u.%u.%u\n",
+ CERROR("Unexpected type %d from %s ip %pI4h\n",
hello->kshm_ctype, libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -1819,11 +1809,11 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
if (peerid->pid != recv_id.pid ||
peerid->nid != recv_id.nid) {
LCONSOLE_ERROR_MSG(0x130, "Connected successfully to %s on host"
- " %u.%u.%u.%u, but they claimed they were "
+ " %pI4h, but they claimed they were "
"%s; please check your Lustre "
"configuration.\n",
libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
libcfs_id2str(recv_id));
return -EPROTO;
}
@@ -1834,9 +1824,9 @@ ksocknal_recv_hello (lnet_ni_t *ni, ksock_conn_t *conn,
}
if (ksocknal_invert_type(hello->kshm_ctype) != conn->ksnc_type) {
- CERROR ("Mismatched types: me %d, %s ip %u.%u.%u.%u %d\n",
+ CERROR("Mismatched types: me %d, %s ip %pI4h %d\n",
conn->ksnc_type, libcfs_id2str(*peerid),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
hello->kshm_ctype);
return -EPROTO;
}
@@ -1995,7 +1985,7 @@ ksocknal_connect (ksock_route_t *route)
list_splice_init(&peer->ksnp_tx_queue, &zombies);
}
-#if 0 /* irrelevent with only eager routes */
+#if 0 /* irrelevant with only eager routes */
if (!route->ksnr_deleted) {
/* make this route least-favourite for re-selection */
list_del(&route->ksnr_list);
@@ -2208,8 +2198,8 @@ ksocknal_connd (void *arg)
/* consecutive retry */
if (cons_retry++ > SOCKNAL_INSANITY_RECONN) {
CWARN("massive consecutive "
- "re-connecting to %u.%u.%u.%u\n",
- HIPQUAD(route->ksnr_ipaddr));
+ "re-connecting to %pI4h\n",
+ &route->ksnr_ipaddr);
cons_retry = 0;
}
} else {
@@ -2274,26 +2264,26 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
switch (error) {
case ECONNRESET:
CNETERR("A connection with %s "
- "(%u.%u.%u.%u:%d) was reset; "
+ "(%pI4h:%d) was reset; "
"it may have rebooted.\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
break;
case ETIMEDOUT:
CNETERR("A connection with %s "
- "(%u.%u.%u.%u:%d) timed out; the "
+ "(%pI4h:%d) timed out; the "
"network or node may be down.\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
break;
default:
CNETERR("An unexpected network error %d "
"occurred with %s "
- "(%u.%u.%u.%u:%d\n", error,
+ "(%pI4h:%d\n", error,
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
break;
}
@@ -2306,10 +2296,10 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
conn->ksnc_rx_deadline)) {
/* Timed out incomplete incoming message */
ksocknal_conn_addref(conn);
- CNETERR("Timeout receiving from %s (%u.%u.%u.%u:%d), "
+ CNETERR("Timeout receiving from %s (%pI4h:%d), "
"state %d wanted %d left %d\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port,
conn->ksnc_rx_state,
conn->ksnc_rx_nob_wanted,
@@ -2324,10 +2314,10 @@ ksocknal_find_timed_out_conn (ksock_peer_t *peer)
/* Timed out messages queued for sending or
* buffered in the socket's send buffer */
ksocknal_conn_addref(conn);
- CNETERR("Timeout sending data to %s (%u.%u.%u.%u:%d) "
+ CNETERR("Timeout sending data to %s (%pI4h:%d) "
"the network or that node may be down.\n",
libcfs_id2str(peer->ksnp_id),
- HIPQUAD(conn->ksnc_ipaddr),
+ &conn->ksnc_ipaddr,
conn->ksnc_port);
return (conn);
}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
index 3e08fe2d148..a1c6a519bf5 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.c
@@ -316,7 +316,7 @@ ksocknal_lib_tunables_init ()
*ksocknal_tunables.ksnd_zc_recv_min_nfrags = LNET_MAX_IOV;
ksocknal_tunables.ksnd_sysctl =
- cfs_register_sysctl_table(ksocknal_top_ctl_table, 0);
+ register_sysctl_table(ksocknal_top_ctl_table);
if (ksocknal_tunables.ksnd_sysctl == NULL)
CWARN("Can't setup /proc tunables\n");
@@ -325,20 +325,20 @@ ksocknal_lib_tunables_init ()
}
void
-ksocknal_lib_tunables_fini ()
+ksocknal_lib_tunables_fini(void)
{
if (ksocknal_tunables.ksnd_sysctl != NULL)
unregister_sysctl_table(ksocknal_tunables.ksnd_sysctl);
}
#else
int
-ksocknal_lib_tunables_init ()
+ksocknal_lib_tunables_init(void)
{
return 0;
}
void
-ksocknal_lib_tunables_fini ()
+ksocknal_lib_tunables_fini(void)
{
}
#endif /* # if CONFIG_SYSCTL && !CFS_SYSFS_MODULE_PARM */
@@ -964,7 +964,6 @@ static void
ksocknal_data_ready (struct sock *sk, int n)
{
ksock_conn_t *conn;
- ENTRY;
/* interleave correctly with closing sockets... */
LASSERT(!in_irq());
@@ -978,8 +977,6 @@ ksocknal_data_ready (struct sock *sk, int n)
ksocknal_read_callback(conn);
read_unlock(&ksocknal_data.ksnd_global_lock);
-
- EXIT;
}
static void
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
index 3c135786dc1..1cfc1b168be 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib-linux.h
@@ -41,7 +41,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/stat.h>
@@ -58,11 +57,9 @@
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/stat.h>
#include <linux/list.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
-#include <asm/uaccess.h>
#include <asm/div64.h>
#include <linux/syscalls.h>
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
index ec57179f8d2..71205e2015c 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_proto.c
@@ -218,7 +218,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
__u64 tmp = 0;
- /* two seperated cookies: (a+2, a) or (a+1, a) */
+ /* two separated cookies: (a+2, a) or (a+1, a) */
LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
tx->tx_msg.ksm_zc_cookies[1] <= 2);
@@ -496,8 +496,8 @@ ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
rc = libcfs_sock_write(sock, hdr, sizeof(*hdr),lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
- rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
+ rc, &conn->ksnc_ipaddr, conn->ksnc_port);
goto out;
}
@@ -513,8 +513,8 @@ ksocknal_send_hello_v1 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
lnet_acceptor_timeout());
if (rc != 0) {
CNETERR("Error %d sending HELLO payload (%d)"
- " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ " to %pI4h/%d\n", rc, hello->kshm_nips,
+ &conn->ksnc_ipaddr, conn->ksnc_port);
}
out:
LIBCFS_FREE(hdr, sizeof(*hdr));
@@ -545,8 +545,8 @@ ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
lnet_acceptor_timeout());
if (rc != 0) {
- CNETERR("Error %d sending HELLO hdr to %u.%u.%u.%u/%d\n",
- rc, HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ CNETERR("Error %d sending HELLO hdr to %pI4h/%d\n",
+ rc, &conn->ksnc_ipaddr, conn->ksnc_port);
return rc;
}
@@ -558,8 +558,8 @@ ksocknal_send_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello)
lnet_acceptor_timeout());
if (rc != 0) {
CNETERR("Error %d sending HELLO payload (%d)"
- " to %u.%u.%u.%u/%d\n", rc, hello->kshm_nips,
- HIPQUAD(conn->ksnc_ipaddr), conn->ksnc_port);
+ " to %pI4h/%d\n", rc, hello->kshm_nips,
+ &conn->ksnc_ipaddr, conn->ksnc_port);
}
return rc;
@@ -583,18 +583,18 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
sizeof (*hdr) - offsetof (lnet_hdr_t, src_nid),
timeout);
if (rc != 0) {
- CERROR ("Error %d reading rest of HELLO hdr from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Error %d reading rest of HELLO hdr from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
LASSERT (rc < 0 && rc != -EALREADY);
goto out;
}
/* ...and check we got what we expected */
if (hdr->type != cpu_to_le32 (LNET_MSG_HELLO)) {
- CERROR ("Expecting a HELLO hdr,"
- " but got type %d from %u.%u.%u.%u\n",
+ CERROR("Expecting a HELLO hdr,"
+ " but got type %d from %pI4h\n",
le32_to_cpu (hdr->type),
- HIPQUAD(conn->ksnc_ipaddr));
+ &conn->ksnc_ipaddr);
rc = -EPROTO;
goto out;
}
@@ -607,8 +607,8 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
sizeof (__u32);
if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
- hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Bad nips %d from ip %pI4h\n",
+ hello->kshm_nips, &conn->ksnc_ipaddr);
rc = -EPROTO;
goto out;
}
@@ -619,9 +619,9 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
rc = libcfs_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
if (rc != 0) {
- CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
- LASSERT (rc < 0 && rc != -EALREADY);
+ CERROR("Error %d reading IPs from ip %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
goto out;
}
@@ -629,8 +629,8 @@ ksocknal_recv_hello_v1(ksock_conn_t *conn, ksock_hello_msg_t *hello,int timeout)
hello->kshm_ips[i] = __le32_to_cpu(hello->kshm_ips[i]);
if (hello->kshm_ips[i] == 0) {
- CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
- i, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Zero IP[%d] from ip %pI4h\n",
+ i, &conn->ksnc_ipaddr);
rc = -EPROTO;
break;
}
@@ -658,9 +658,9 @@ ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeou
offsetof(ksock_hello_msg_t, kshm_src_nid),
timeout);
if (rc != 0) {
- CERROR ("Error %d reading HELLO from %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
- LASSERT (rc < 0 && rc != -EALREADY);
+ CERROR("Error %d reading HELLO from %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -676,8 +676,8 @@ ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeou
}
if (hello->kshm_nips > LNET_MAX_INTERFACES) {
- CERROR("Bad nips %d from ip %u.%u.%u.%u\n",
- hello->kshm_nips, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Bad nips %d from ip %pI4h\n",
+ hello->kshm_nips, &conn->ksnc_ipaddr);
return -EPROTO;
}
@@ -687,9 +687,9 @@ ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeou
rc = libcfs_sock_read(sock, hello->kshm_ips,
hello->kshm_nips * sizeof(__u32), timeout);
if (rc != 0) {
- CERROR ("Error %d reading IPs from ip %u.%u.%u.%u\n",
- rc, HIPQUAD(conn->ksnc_ipaddr));
- LASSERT (rc < 0 && rc != -EALREADY);
+ CERROR("Error %d reading IPs from ip %pI4h\n",
+ rc, &conn->ksnc_ipaddr);
+ LASSERT(rc < 0 && rc != -EALREADY);
return rc;
}
@@ -698,8 +698,8 @@ ksocknal_recv_hello_v2 (ksock_conn_t *conn, ksock_hello_msg_t *hello, int timeou
__swab32s(&hello->kshm_ips[i]);
if (hello->kshm_ips[i] == 0) {
- CERROR("Zero IP[%d] from ip %u.%u.%u.%u\n",
- i, HIPQUAD(conn->ksnc_ipaddr));
+ CERROR("Zero IP[%d] from ip %pI4h\n",
+ i, &conn->ksnc_ipaddr);
return -EPROTO;
}
}
diff --git a/drivers/staging/lustre/lnet/lnet/Makefile b/drivers/staging/lustre/lnet/lnet/Makefile
index 1bd9ef77420..b815fe12b10 100644
--- a/drivers/staging/lustre/lnet/lnet/Makefile
+++ b/drivers/staging/lustre/lnet/lnet/Makefile
@@ -1,6 +1,6 @@
obj-$(CONFIG_LNET) += lnet.o
-lnet-y := api-errno.o api-ni.o config.o lib-me.o lib-msg.o lib-eq.o \
+lnet-y := api-ni.o config.o lib-me.o lib-msg.o lib-eq.o \
lib-md.o lib-ptl.o lib-move.o module.o lo.o router.o \
router_proc.o acceptor.o peer.o
diff --git a/drivers/staging/lustre/lnet/lnet/acceptor.c b/drivers/staging/lustre/lnet/lnet/acceptor.c
index 81ef28bbcba..bb15bde0704 100644
--- a/drivers/staging/lustre/lnet/lnet/acceptor.c
+++ b/drivers/staging/lustre/lnet/lnet/acceptor.c
@@ -101,52 +101,52 @@ lnet_connect_console_error (int rc, lnet_nid_t peer_nid,
switch (rc) {
/* "normal" errors */
case -ECONNREFUSED:
- CNETERR("Connection to %s at host %u.%u.%u.%u on port %d was "
+ CNETERR("Connection to %s at host %pI4h on port %d was "
"refused: check that Lustre is running on that node.\n",
libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
break;
case -EHOSTUNREACH:
case -ENETUNREACH:
- CNETERR("Connection to %s at host %u.%u.%u.%u "
+ CNETERR("Connection to %s at host %pI4h "
"was unreachable: the network or that node may "
"be down, or Lustre may be misconfigured.\n",
- libcfs_nid2str(peer_nid), HIPQUAD(peer_ip));
+ libcfs_nid2str(peer_nid), &peer_ip);
break;
case -ETIMEDOUT:
- CNETERR("Connection to %s at host %u.%u.%u.%u on "
+ CNETERR("Connection to %s at host %pI4h on "
"port %d took too long: that node may be hung "
"or experiencing high load.\n",
libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
break;
case -ECONNRESET:
- LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %u.%u.%u.%u"
+ LCONSOLE_ERROR_MSG(0x11b, "Connection to %s at host %pI4h"
" on port %d was reset: "
"is it running a compatible version of "
"Lustre and is %s one of its NIDs?\n",
libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port,
+ &peer_ip, peer_port,
libcfs_nid2str(peer_nid));
break;
case -EPROTO:
LCONSOLE_ERROR_MSG(0x11c, "Protocol error connecting to %s at "
- "host %u.%u.%u.%u on port %d: is it running "
+ "host %pI4h on port %d: is it running "
"a compatible version of Lustre?\n",
libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
break;
case -EADDRINUSE:
LCONSOLE_ERROR_MSG(0x11d, "No privileged ports available to "
- "connect to %s at host %u.%u.%u.%u on port "
+ "connect to %s at host %pI4h on port "
"%d\n", libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
break;
default:
LCONSOLE_ERROR_MSG(0x11e, "Unexpected error %d connecting to %s"
- " at host %u.%u.%u.%u on port %d\n", rc,
+ " at host %pI4h on port %d\n", rc,
libcfs_nid2str(peer_nid),
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
break;
}
}
@@ -253,8 +253,8 @@ lnet_accept(socket_t *sock, __u32 magic)
if (rc != 0)
CERROR("Error sending magic+version in response"
- "to LNET magic from %u.%u.%u.%u: %d\n",
- HIPQUAD(peer_ip), rc);
+ "to LNET magic from %pI4h: %d\n",
+ &peer_ip, rc);
return -EPROTO;
}
@@ -265,9 +265,9 @@ lnet_accept(socket_t *sock, __u32 magic)
else
str = "unrecognised";
- LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %u.%u.%u.%u"
+ LCONSOLE_ERROR_MSG(0x11f, "Refusing connection from %pI4h"
" magic %08x: %s acceptor protocol\n",
- HIPQUAD(peer_ip), magic, str);
+ &peer_ip, magic, str);
return -EPROTO;
}
@@ -278,7 +278,7 @@ lnet_accept(socket_t *sock, __u32 magic)
accept_timeout);
if (rc != 0) {
CERROR("Error %d reading connection request version from "
- "%u.%u.%u.%u\n", rc, HIPQUAD(peer_ip));
+ "%pI4h\n", rc, &peer_ip);
return -EIO;
}
@@ -301,8 +301,8 @@ lnet_accept(socket_t *sock, __u32 magic)
if (rc != 0)
CERROR("Error sending magic+version in response"
- "to version %d from %u.%u.%u.%u: %d\n",
- peer_version, HIPQUAD(peer_ip), rc);
+ "to version %d from %pI4h: %d\n",
+ peer_version, &peer_ip, rc);
return -EPROTO;
}
@@ -312,7 +312,7 @@ lnet_accept(socket_t *sock, __u32 magic)
accept_timeout);
if (rc != 0) {
CERROR("Error %d reading connection request from "
- "%u.%u.%u.%u\n", rc, HIPQUAD(peer_ip));
+ "%pI4h\n", rc, &peer_ip);
return -EIO;
}
@@ -324,23 +324,23 @@ lnet_accept(socket_t *sock, __u32 magic)
ni->ni_nid != cr.acr_nid) { /* right NET, wrong NID! */
if (ni != NULL)
lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %u.%u.%u.%u"
+ LCONSOLE_ERROR_MSG(0x120, "Refusing connection from %pI4h"
" for %s: No matching NI\n",
- HIPQUAD(peer_ip), libcfs_nid2str(cr.acr_nid));
+ &peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
if (ni->ni_lnd->lnd_accept == NULL) {
/* This catches a request for the loopback LND */
lnet_ni_decref(ni);
- LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %u.%u.%u.%u"
+ LCONSOLE_ERROR_MSG(0x121, "Refusing connection from %pI4h"
" for %s: NI doesn not accept IP connections\n",
- HIPQUAD(peer_ip), libcfs_nid2str(cr.acr_nid));
+ &peer_ip, libcfs_nid2str(cr.acr_nid));
return -EPERM;
}
- CDEBUG(D_NET, "Accept %s from %u.%u.%u.%u\n",
- libcfs_nid2str(cr.acr_nid), HIPQUAD(peer_ip));
+ CDEBUG(D_NET, "Accept %s from %pI4h\n",
+ libcfs_nid2str(cr.acr_nid), &peer_ip);
rc = ni->ni_lnd->lnd_accept(ni, sock);
@@ -410,9 +410,9 @@ lnet_acceptor(void *arg)
}
if (secure && peer_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
- CERROR("Refusing connection from %u.%u.%u.%u: "
+ CERROR("Refusing connection from %pI4h: "
"insecure port %d\n",
- HIPQUAD(peer_ip), peer_port);
+ &peer_ip, peer_port);
goto failed;
}
@@ -420,7 +420,7 @@ lnet_acceptor(void *arg)
accept_timeout);
if (rc != 0) {
CERROR("Error %d reading connection request from "
- "%u.%u.%u.%u\n", rc, HIPQUAD(peer_ip));
+ "%pI4h\n", rc, &peer_ip);
goto failed;
}
diff --git a/drivers/staging/lustre/lnet/lnet/api-errno.c b/drivers/staging/lustre/lnet/lnet/api-errno.c
deleted file mode 100644
index 695b27265e2..00000000000
--- a/drivers/staging/lustre/lnet/lnet/api-errno.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lnet/lnet/api-errno.c
- *
- * Instantiate the string table of errors
- */
-
-/* If you change these, you must update the number table in portals/errno.h */
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index e88bee36249..160a4292c6c 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1371,7 +1371,7 @@ EXPORT_SYMBOL(LNetNIInit);
* \return always 0 for current implementation.
*/
int
-LNetNIFini()
+LNetNIFini(void)
{
LNET_MUTEX_LOCK(&the_lnet.ln_api_mutex);
@@ -1541,7 +1541,10 @@ LNetGetId(unsigned int index, lnet_process_id_t *id)
int rc = -ENOENT;
LASSERT(the_lnet.ln_init);
- LASSERT(the_lnet.ln_refcount > 0);
+
+ /* LNetNI initilization failed? */
+ if (the_lnet.ln_refcount == 0)
+ return rc;
cpt = lnet_net_lock_current();
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index 78297a7d94e..4ce68d3b093 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -244,11 +244,10 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
int new_index = eq->eq_deq_seq & (eq->eq_size - 1);
lnet_event_t *new_event = &eq->eq_events[new_index];
int rc;
- ENTRY;
/* must called with lnet_eq_wait_lock hold */
if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
- RETURN(0);
+ return 0;
/* We've got a new event... */
*ev = *new_event;
@@ -268,7 +267,7 @@ lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
}
eq->eq_deq_seq = new_event->sequence + 1;
- RETURN(rc);
+ return rc;
}
/**
@@ -400,13 +399,12 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
int wait = 1;
int rc;
int i;
- ENTRY;
LASSERT (the_lnet.ln_init);
LASSERT (the_lnet.ln_refcount > 0);
if (neq < 1)
- RETURN(-ENOENT);
+ return -ENOENT;
lnet_eq_wait_lock();
@@ -416,14 +414,14 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
if (eq == NULL) {
lnet_eq_wait_unlock();
- RETURN(-ENOENT);
+ return -ENOENT;
}
rc = lnet_eq_dequeue_event(eq, event);
if (rc != 0) {
lnet_eq_wait_unlock();
*which = i;
- RETURN(rc);
+ return rc;
}
}
@@ -443,5 +441,5 @@ LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
}
lnet_eq_wait_unlock();
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lnet/lnet/lib-msg.c b/drivers/staging/lustre/lnet/lnet/lib-msg.c
index 8f3a50bd5f6..61ae88be6f0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-msg.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-msg.c
@@ -45,8 +45,6 @@
void
lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
{
- ENTRY;
-
memset(ev, 0, sizeof(*ev));
ev->status = 0;
@@ -54,7 +52,6 @@ lnet_build_unlink_event (lnet_libmd_t *md, lnet_event_t *ev)
ev->type = LNET_EVENT_UNLINK;
lnet_md_deconstruct(md, &ev->md);
lnet_md2handle(&ev->md_handle, md);
- EXIT;
}
/*
@@ -319,7 +316,7 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
LASSERT(!msg->msg_routing);
msg->msg_md = md;
- if (msg->msg_receiving) { /* commited for receiving */
+ if (msg->msg_receiving) { /* committed for receiving */
msg->msg_offset = offset;
msg->msg_wanted = mlen;
}
@@ -395,7 +392,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
* NB: message is committed for sending, we should return
* on success because LND will finalize this message later.
*
- * Also, there is possibility that message is commited for
+ * Also, there is possibility that message is committed for
* sending and also failed before delivering to LND,
* i.e: ENOMEM, in that case we can't fall through either
* because CPT for sending can be different with CPT for
@@ -417,7 +414,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
* NB: message is committed for sending, we should return
* on success because LND will finalize this message later.
*
- * Also, there is possibility that message is commited for
+ * Also, there is possibility that message is committed for
* sending and also failed before delivering to LND,
* i.e: ENOMEM, in that case we can't fall through either:
* - The rule is message must decommit for sending first if
@@ -477,14 +474,14 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
again:
rc = 0;
if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
- /* not commited to network yet */
+ /* not committed to network yet */
LASSERT(!msg->msg_onactivelist);
lnet_msg_free(msg);
return;
}
/*
- * NB: routed message can be commited for both receiving and sending,
+ * NB: routed message can be committed for both receiving and sending,
* we should finalize in LIFO order and keep counters correct.
* (finalize sending first then finalize receiving)
*/
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index c8323854580..afb81755cba 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -114,14 +114,13 @@ int
init_lnet(void)
{
int rc;
- ENTRY;
mutex_init(&lnet_config_mutex);
rc = LNetInit();
if (rc != 0) {
CERROR("LNetInit: error %d\n", rc);
- RETURN(rc);
+ return rc;
}
rc = libcfs_register_ioctl(&lnet_ioctl_handler);
@@ -133,7 +132,7 @@ init_lnet(void)
(void) kthread_run(lnet_configure, NULL, "lnet_initd");
}
- RETURN(0);
+ return 0;
}
void
@@ -150,5 +149,7 @@ fini_lnet(void)
MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
MODULE_DESCRIPTION("Portals v3.1");
MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
-cfs_module(lnet, "1.0.0", init_lnet, fini_lnet);
+module_init(init_lnet);
+module_exit(fini_lnet);
diff --git a/drivers/staging/lustre/lnet/lnet/router_proc.c b/drivers/staging/lustre/lnet/lnet/router_proc.c
index 3084b0c7598..931f6ca25dc 100644
--- a/drivers/staging/lustre/lnet/lnet/router_proc.c
+++ b/drivers/staging/lustre/lnet/lnet/router_proc.c
@@ -920,7 +920,7 @@ lnet_proc_init(void)
{
#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
- lnet_table_header = cfs_register_sysctl_table(top_table, 0);
+ lnet_table_header = register_sysctl_table(top_table);
#endif
}
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index 3bb6fbe23f7..ef5064e0055 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -361,7 +361,7 @@ brw_server_rpc_done (srpc_server_rpc_t *rpc)
blk->bk_sink ? "from" : "to",
libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
else
- CDEBUG (D_NET, "Transfered %d pages bulk data %s %s\n",
+ CDEBUG (D_NET, "Transferred %d pages bulk data %s %s\n",
blk->bk_niov, blk->bk_sink ? "from" : "to",
libcfs_id2str(rpc->srpc_peer));
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 446de0e4672..cbce662b987 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -1356,7 +1356,7 @@ lstcon_rpc_cleanup_wait(void)
lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
console_session.ses_rpc_lock,
- "Network is not accessable or target is down, "
+ "Network is not accessible or target is down, "
"waiting for %d console RPCs to being recycled\n",
atomic_read(&console_session.ses_rpc_counter));
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 78e8d046726..09e4700af64 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -1773,7 +1773,7 @@ lstcon_session_info(lst_sid_t *sid_up, int *key_up, unsigned *featp,
}
int
-lstcon_session_end()
+lstcon_session_end(void)
{
lstcon_rpc_trans_t *trans;
lstcon_group_t *grp;
diff --git a/drivers/staging/lustre/lnet/selftest/module.c b/drivers/staging/lustre/lnet/selftest/module.c
index 5257e5630a0..6dd4309dc5e 100644
--- a/drivers/staging/lustre/lnet/selftest/module.c
+++ b/drivers/staging/lustre/lnet/selftest/module.c
@@ -165,5 +165,7 @@ error:
MODULE_DESCRIPTION("LNet Selftest");
MODULE_LICENSE("GPL");
+MODULE_VERSION("0.9.0");
-cfs_module(lnet, "0.9.0", lnet_selftest_init, lnet_selftest_fini);
+module_init(lnet_selftest_init);
+module_exit(lnet_selftest_fini);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index bc1f38b8048..7659a26676b 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -661,8 +661,10 @@ srpc_finish_service(struct srpc_service *sv)
cfs_percpt_for_each(scd, i, sv->sv_cpt_data) {
spin_lock(&scd->scd_lock);
- if (!swi_deschedule_workitem(&scd->scd_buf_wi))
+ if (!swi_deschedule_workitem(&scd->scd_buf_wi)) {
+ spin_unlock(&scd->scd_lock);
return 0;
+ }
if (scd->scd_buf_nposted > 0) {
CDEBUG(D_NET, "waiting for %d posted buffers to unlink",
@@ -1115,7 +1117,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
if (rpc->crpc_timeout == 0)
return;
- /* timer sucessfully defused */
+ /* timer successfully defused */
if (stt_del_timer(&rpc->crpc_timer))
return;
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 2c078550277..3bf4afb42ff 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -195,7 +195,7 @@ stt_timer_main (void *arg)
int
stt_start_timer_thread (void)
{
- task_t *task;
+ struct task_struct *task;
LASSERT(!stt_data.stt_shuttingdown);
diff --git a/drivers/staging/lustre/lustre/Kconfig b/drivers/staging/lustre/lustre/Kconfig
index e0eb8303a50..4e898e49186 100644
--- a/drivers/staging/lustre/lustre/Kconfig
+++ b/drivers/staging/lustre/lustre/Kconfig
@@ -1,6 +1,6 @@
config LUSTRE_FS
tristate "Lustre file system client support"
- depends on STAGING && INET && BROKEN
+ depends on INET && m
select LNET
select CRYPTO
select CRYPTO_CRC32
@@ -43,9 +43,18 @@ config LUSTRE_OBD_MAX_IOCTL_BUFFER
config LUSTRE_DEBUG_EXPENSIVE_CHECK
bool "Enable Lustre DEBUG checks"
depends on LUSTRE_FS
- default false
help
This option is mainly for debug purpose. It enables Lustre code to do
expensive checks that may have a performance impact.
Use with caution. If unsure, say N.
+
+config LUSTRE_TRANSLATE_ERRNOS
+ bool
+ depends on LUSTRE_FS && !X86
+ default true
+
+config LUSTRE_LLITE_LLOOP
+ bool "Lustre virtual block device"
+ depends on LUSTRE_FS && BLOCK
+ default m
diff --git a/drivers/staging/lustre/lustre/Makefile b/drivers/staging/lustre/lustre/Makefile
index 3fb94fc1206..d1eb0bdef06 100644
--- a/drivers/staging/lustre/lustre/Makefile
+++ b/drivers/staging/lustre/lustre/Makefile
@@ -1,2 +1,2 @@
-obj-$(CONFIG_LUSTRE_FS) := fid/ lvfs/ obdclass/ ptlrpc/ obdecho/ mgc/ lov/ \
- osc/ mdc/ lmv/ llite/ fld/ libcfs/
+obj-$(CONFIG_LUSTRE_FS) += libcfs/ lvfs/ obdclass/ ptlrpc/ fld/ osc/ mgc/ \
+ fid/ lov/ mdc/ lmv/ llite/ obdecho/
diff --git a/drivers/staging/lustre/lustre/fid/Makefile b/drivers/staging/lustre/lustre/fid/Makefile
index b8d6d21b39f..ed21bea162b 100644
--- a/drivers/staging/lustre/lustre/fid/Makefile
+++ b/drivers/staging/lustre/lustre/fid/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_LUSTRE_FS) += fid.o
-fid-y := fid_handler.o fid_store.o fid_request.o lproc_fid.o fid_lib.o
+fid-y := fid_request.o lproc_fid.o fid_lib.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fid/fid_handler.c b/drivers/staging/lustre/lustre/fid/fid_handler.c
deleted file mode 100644
index bbbb3cfe57b..00000000000
--- a/drivers/staging/lustre/lustre/fid/fid_handler.c
+++ /dev/null
@@ -1,661 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_handler.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <dt_object.h>
-#include <md_object.h>
-#include <obd_support.h>
-#include <lustre_req_layout.h>
-#include <lustre_fid.h>
-#include "fid_internal.h"
-
-int client_fid_init(struct obd_device *obd,
- struct obd_export *exp, enum lu_cli_type type)
-{
- struct client_obd *cli = &obd->u.cli;
- char *prefix;
- int rc;
- ENTRY;
-
- OBD_ALLOC_PTR(cli->cl_seq);
- if (cli->cl_seq == NULL)
- RETURN(-ENOMEM);
-
- OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
- if (prefix == NULL)
- GOTO(out_free_seq, rc = -ENOMEM);
-
- snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
-
- /* Init client side sequence-manager */
- rc = seq_client_init(cli->cl_seq, exp, type, prefix, NULL);
- OBD_FREE(prefix, MAX_OBD_NAME + 5);
- if (rc)
- GOTO(out_free_seq, rc);
-
- RETURN(rc);
-out_free_seq:
- OBD_FREE_PTR(cli->cl_seq);
- cli->cl_seq = NULL;
- return rc;
-}
-EXPORT_SYMBOL(client_fid_init);
-
-int client_fid_fini(struct obd_device *obd)
-{
- struct client_obd *cli = &obd->u.cli;
- ENTRY;
-
- if (cli->cl_seq != NULL) {
- seq_client_fini(cli->cl_seq);
- OBD_FREE_PTR(cli->cl_seq);
- cli->cl_seq = NULL;
- }
-
- RETURN(0);
-}
-EXPORT_SYMBOL(client_fid_fini);
-
-static void seq_server_proc_fini(struct lu_server_seq *seq);
-
-/* Assigns client to sequence controller node. */
-int seq_server_set_cli(struct lu_server_seq *seq,
- struct lu_client_seq *cli,
- const struct lu_env *env)
-{
- int rc = 0;
- ENTRY;
-
- /*
- * Ask client for new range, assign that range to ->seq_space and write
- * seq state to backing store should be atomic.
- */
- mutex_lock(&seq->lss_mutex);
-
- if (cli == NULL) {
- CDEBUG(D_INFO, "%s: Detached sequence client %s\n",
- seq->lss_name, cli->lcs_name);
- seq->lss_cli = cli;
- GOTO(out_up, rc = 0);
- }
-
- if (seq->lss_cli != NULL) {
- CDEBUG(D_HA, "%s: Sequence controller is already "
- "assigned\n", seq->lss_name);
- GOTO(out_up, rc = -EEXIST);
- }
-
- CDEBUG(D_INFO, "%s: Attached sequence controller %s\n",
- seq->lss_name, cli->lcs_name);
-
- seq->lss_cli = cli;
- cli->lcs_space.lsr_index = seq->lss_site->ss_node_id;
- EXIT;
-out_up:
- mutex_unlock(&seq->lss_mutex);
- return rc;
-}
-EXPORT_SYMBOL(seq_server_set_cli);
-/*
- * allocate \a w units of sequence from range \a from.
- */
-static inline void range_alloc(struct lu_seq_range *to,
- struct lu_seq_range *from,
- __u64 width)
-{
- width = min(range_space(from), width);
- to->lsr_start = from->lsr_start;
- to->lsr_end = from->lsr_start + width;
- from->lsr_start += width;
-}
-
-/**
- * On controller node, allocate new super sequence for regular sequence server.
- * As this super sequence controller, this node suppose to maintain fld
- * and update index.
- * \a out range always has currect mds node number of requester.
- */
-
-static int __seq_server_alloc_super(struct lu_server_seq *seq,
- struct lu_seq_range *out,
- const struct lu_env *env)
-{
- struct lu_seq_range *space = &seq->lss_space;
- int rc;
- ENTRY;
-
- LASSERT(range_is_sane(space));
-
- if (range_is_exhausted(space)) {
- CERROR("%s: Sequences space is exhausted\n",
- seq->lss_name);
- RETURN(-ENOSPC);
- } else {
- range_alloc(out, space, seq->lss_width);
- }
-
- rc = seq_store_update(env, seq, out, 1 /* sync */);
-
- LCONSOLE_INFO("%s: super-sequence allocation rc = %d " DRANGE"\n",
- seq->lss_name, rc, PRANGE(out));
-
- RETURN(rc);
-}
-
-int seq_server_alloc_super(struct lu_server_seq *seq,
- struct lu_seq_range *out,
- const struct lu_env *env)
-{
- int rc;
- ENTRY;
-
- mutex_lock(&seq->lss_mutex);
- rc = __seq_server_alloc_super(seq, out, env);
- mutex_unlock(&seq->lss_mutex);
-
- RETURN(rc);
-}
-
-static int __seq_set_init(const struct lu_env *env,
- struct lu_server_seq *seq)
-{
- struct lu_seq_range *space = &seq->lss_space;
- int rc;
-
- range_alloc(&seq->lss_lowater_set, space, seq->lss_set_width);
- range_alloc(&seq->lss_hiwater_set, space, seq->lss_set_width);
-
- rc = seq_store_update(env, seq, NULL, 1);
-
- return rc;
-}
-
-/*
- * This function implements new seq allocation algorithm using async
- * updates to seq file on disk. ref bug 18857 for details.
- * there are four variable to keep track of this process
- *
- * lss_space; - available lss_space
- * lss_lowater_set; - lu_seq_range for all seqs before barrier, i.e. safe to use
- * lss_hiwater_set; - lu_seq_range after barrier, i.e. allocated but may be
- * not yet committed
- *
- * when lss_lowater_set reaches the end it is replaced with hiwater one and
- * a write operation is initiated to allocate new hiwater range.
- * if last seq write opearion is still not commited, current operation is
- * flaged as sync write op.
- */
-static int range_alloc_set(const struct lu_env *env,
- struct lu_seq_range *out,
- struct lu_server_seq *seq)
-{
- struct lu_seq_range *space = &seq->lss_space;
- struct lu_seq_range *loset = &seq->lss_lowater_set;
- struct lu_seq_range *hiset = &seq->lss_hiwater_set;
- int rc = 0;
-
- if (range_is_zero(loset))
- __seq_set_init(env, seq);
-
- if (OBD_FAIL_CHECK(OBD_FAIL_SEQ_ALLOC)) /* exhaust set */
- loset->lsr_start = loset->lsr_end;
-
- if (range_is_exhausted(loset)) {
- /* reached high water mark. */
- struct lu_device *dev = seq->lss_site->ss_lu->ls_top_dev;
- int obd_num_clients = dev->ld_obd->obd_num_exports;
- __u64 set_sz;
-
- /* calculate new seq width based on number of clients */
- set_sz = max(seq->lss_set_width,
- obd_num_clients * seq->lss_width);
- set_sz = min(range_space(space), set_sz);
-
- /* Switch to hiwater range now */
- *loset = *hiset;
- /* allocate new hiwater range */
- range_alloc(hiset, space, set_sz);
-
- /* update ondisk seq with new *space */
- rc = seq_store_update(env, seq, NULL, seq->lss_need_sync);
- }
-
- LASSERTF(!range_is_exhausted(loset) || range_is_sane(loset),
- DRANGE"\n", PRANGE(loset));
-
- if (rc == 0)
- range_alloc(out, loset, seq->lss_width);
-
- RETURN(rc);
-}
-
-static int __seq_server_alloc_meta(struct lu_server_seq *seq,
- struct lu_seq_range *out,
- const struct lu_env *env)
-{
- struct lu_seq_range *space = &seq->lss_space;
- int rc = 0;
-
- ENTRY;
-
- LASSERT(range_is_sane(space));
-
- /* Check if available space ends and allocate new super seq */
- if (range_is_exhausted(space)) {
- if (!seq->lss_cli) {
- CERROR("%s: No sequence controller is attached.\n",
- seq->lss_name);
- RETURN(-ENODEV);
- }
-
- rc = seq_client_alloc_super(seq->lss_cli, env);
- if (rc) {
- CERROR("%s: Can't allocate super-sequence, rc %d\n",
- seq->lss_name, rc);
- RETURN(rc);
- }
-
- /* Saving new range to allocation space. */
- *space = seq->lss_cli->lcs_space;
- LASSERT(range_is_sane(space));
- }
-
- rc = range_alloc_set(env, out, seq);
- if (rc != 0) {
- CERROR("%s: Allocated meta-sequence failed: rc = %d\n",
- seq->lss_name, rc);
- RETURN(rc);
- }
-
- CDEBUG(D_INFO, "%s: Allocated meta-sequence " DRANGE"\n",
- seq->lss_name, PRANGE(out));
-
- RETURN(rc);
-}
-
-int seq_server_alloc_meta(struct lu_server_seq *seq,
- struct lu_seq_range *out,
- const struct lu_env *env)
-{
- int rc;
- ENTRY;
-
- mutex_lock(&seq->lss_mutex);
- rc = __seq_server_alloc_meta(seq, out, env);
- mutex_unlock(&seq->lss_mutex);
-
- RETURN(rc);
-}
-EXPORT_SYMBOL(seq_server_alloc_meta);
-
-static int seq_server_handle(struct lu_site *site,
- const struct lu_env *env,
- __u32 opc, struct lu_seq_range *out)
-{
- int rc;
- struct seq_server_site *ss_site;
- ENTRY;
-
- ss_site = lu_site2seq(site);
-
- switch (opc) {
- case SEQ_ALLOC_META:
- if (!ss_site->ss_server_seq) {
- CERROR("Sequence server is not "
- "initialized\n");
- RETURN(-EINVAL);
- }
- rc = seq_server_alloc_meta(ss_site->ss_server_seq, out, env);
- break;
- case SEQ_ALLOC_SUPER:
- if (!ss_site->ss_control_seq) {
- CERROR("Sequence controller is not "
- "initialized\n");
- RETURN(-EINVAL);
- }
- rc = seq_server_alloc_super(ss_site->ss_control_seq, out, env);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- RETURN(rc);
-}
-
-static int seq_req_handle(struct ptlrpc_request *req,
- const struct lu_env *env,
- struct seq_thread_info *info)
-{
- struct lu_seq_range *out, *tmp;
- struct lu_site *site;
- int rc = -EPROTO;
- __u32 *opc;
- ENTRY;
-
- LASSERT(!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY));
- site = req->rq_export->exp_obd->obd_lu_dev->ld_site;
- LASSERT(site != NULL);
-
- rc = req_capsule_server_pack(info->sti_pill);
- if (rc)
- RETURN(err_serious(rc));
-
- opc = req_capsule_client_get(info->sti_pill, &RMF_SEQ_OPC);
- if (opc != NULL) {
- out = req_capsule_server_get(info->sti_pill, &RMF_SEQ_RANGE);
- if (out == NULL)
- RETURN(err_serious(-EPROTO));
-
- tmp = req_capsule_client_get(info->sti_pill, &RMF_SEQ_RANGE);
-
- /* seq client passed mdt id, we need to pass that using out
- * range parameter */
-
- out->lsr_index = tmp->lsr_index;
- out->lsr_flags = tmp->lsr_flags;
- rc = seq_server_handle(site, env, *opc, out);
- } else
- rc = err_serious(-EPROTO);
-
- RETURN(rc);
-}
-
-/* context key constructor/destructor: seq_key_init, seq_key_fini */
-LU_KEY_INIT_FINI(seq, struct seq_thread_info);
-
-/* context key: seq_thread_key */
-LU_CONTEXT_KEY_DEFINE(seq, LCT_MD_THREAD | LCT_DT_THREAD);
-
-static void seq_thread_info_init(struct ptlrpc_request *req,
- struct seq_thread_info *info)
-{
- info->sti_pill = &req->rq_pill;
- /* Init request capsule */
- req_capsule_init(info->sti_pill, req, RCL_SERVER);
- req_capsule_set(info->sti_pill, &RQF_SEQ_QUERY);
-}
-
-static void seq_thread_info_fini(struct seq_thread_info *info)
-{
- req_capsule_fini(info->sti_pill);
-}
-
-int seq_handle(struct ptlrpc_request *req)
-{
- const struct lu_env *env;
- struct seq_thread_info *info;
- int rc;
-
- env = req->rq_svc_thread->t_env;
- LASSERT(env != NULL);
-
- info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
- LASSERT(info != NULL);
-
- seq_thread_info_init(req, info);
- rc = seq_req_handle(req, env, info);
- /* XXX: we don't need replay but MDT assign transno in any case,
- * remove it manually before reply*/
- lustre_msg_set_transno(req->rq_repmsg, 0);
- seq_thread_info_fini(info);
-
- return rc;
-}
-EXPORT_SYMBOL(seq_handle);
-
-/*
- * Entry point for handling FLD RPCs called from MDT.
- */
-int seq_query(struct com_thread_info *info)
-{
- return seq_handle(info->cti_pill->rc_req);
-}
-EXPORT_SYMBOL(seq_query);
-
-
-#ifdef LPROCFS
-static int seq_server_proc_init(struct lu_server_seq *seq)
-{
- int rc;
- ENTRY;
-
- seq->lss_proc_dir = lprocfs_register(seq->lss_name,
- seq_type_proc_dir,
- NULL, NULL);
- if (IS_ERR(seq->lss_proc_dir)) {
- rc = PTR_ERR(seq->lss_proc_dir);
- RETURN(rc);
- }
-
- rc = lprocfs_add_vars(seq->lss_proc_dir,
- seq_server_proc_list, seq);
- if (rc) {
- CERROR("%s: Can't init sequence manager "
- "proc, rc %d\n", seq->lss_name, rc);
- GOTO(out_cleanup, rc);
- }
-
- RETURN(0);
-
-out_cleanup:
- seq_server_proc_fini(seq);
- return rc;
-}
-
-static void seq_server_proc_fini(struct lu_server_seq *seq)
-{
- ENTRY;
- if (seq->lss_proc_dir != NULL) {
- if (!IS_ERR(seq->lss_proc_dir))
- lprocfs_remove(&seq->lss_proc_dir);
- seq->lss_proc_dir = NULL;
- }
- EXIT;
-}
-#else
-static int seq_server_proc_init(struct lu_server_seq *seq)
-{
- return 0;
-}
-
-static void seq_server_proc_fini(struct lu_server_seq *seq)
-{
- return;
-}
-#endif
-
-
-int seq_server_init(struct lu_server_seq *seq,
- struct dt_device *dev,
- const char *prefix,
- enum lu_mgr_type type,
- struct seq_server_site *ss,
- const struct lu_env *env)
-{
- int rc, is_srv = (type == LUSTRE_SEQ_SERVER);
- ENTRY;
-
- LASSERT(dev != NULL);
- LASSERT(prefix != NULL);
- LASSERT(ss != NULL);
- LASSERT(ss->ss_lu != NULL);
-
- seq->lss_cli = NULL;
- seq->lss_type = type;
- seq->lss_site = ss;
- range_init(&seq->lss_space);
-
- range_init(&seq->lss_lowater_set);
- range_init(&seq->lss_hiwater_set);
- seq->lss_set_width = LUSTRE_SEQ_BATCH_WIDTH;
-
- mutex_init(&seq->lss_mutex);
-
- seq->lss_width = is_srv ?
- LUSTRE_SEQ_META_WIDTH : LUSTRE_SEQ_SUPER_WIDTH;
-
- snprintf(seq->lss_name, sizeof(seq->lss_name),
- "%s-%s", (is_srv ? "srv" : "ctl"), prefix);
-
- rc = seq_store_init(seq, env, dev);
- if (rc)
- GOTO(out, rc);
- /* Request backing store for saved sequence info. */
- rc = seq_store_read(seq, env);
- if (rc == -ENODATA) {
-
- /* Nothing is read, init by default value. */
- seq->lss_space = is_srv ?
- LUSTRE_SEQ_ZERO_RANGE:
- LUSTRE_SEQ_SPACE_RANGE;
-
- LASSERT(ss != NULL);
- seq->lss_space.lsr_index = ss->ss_node_id;
- LCONSOLE_INFO("%s: No data found "
- "on store. Initialize space\n",
- seq->lss_name);
-
- rc = seq_store_update(env, seq, NULL, 0);
- if (rc) {
- CERROR("%s: Can't write space data, "
- "rc %d\n", seq->lss_name, rc);
- }
- } else if (rc) {
- CERROR("%s: Can't read space data, rc %d\n",
- seq->lss_name, rc);
- GOTO(out, rc);
- }
-
- if (is_srv) {
- LASSERT(range_is_sane(&seq->lss_space));
- } else {
- LASSERT(!range_is_zero(&seq->lss_space) &&
- range_is_sane(&seq->lss_space));
- }
-
- rc = seq_server_proc_init(seq);
- if (rc)
- GOTO(out, rc);
-
- EXIT;
-out:
- if (rc)
- seq_server_fini(seq, env);
- return rc;
-}
-EXPORT_SYMBOL(seq_server_init);
-
-void seq_server_fini(struct lu_server_seq *seq,
- const struct lu_env *env)
-{
- ENTRY;
-
- seq_server_proc_fini(seq);
- seq_store_fini(seq, env);
-
- EXIT;
-}
-EXPORT_SYMBOL(seq_server_fini);
-
-int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss)
-{
- if (ss == NULL)
- RETURN(0);
-
- if (ss->ss_server_seq) {
- seq_server_fini(ss->ss_server_seq, env);
- OBD_FREE_PTR(ss->ss_server_seq);
- ss->ss_server_seq = NULL;
- }
-
- if (ss->ss_control_seq) {
- seq_server_fini(ss->ss_control_seq, env);
- OBD_FREE_PTR(ss->ss_control_seq);
- ss->ss_control_seq = NULL;
- }
-
- if (ss->ss_client_seq) {
- seq_client_fini(ss->ss_client_seq);
- OBD_FREE_PTR(ss->ss_client_seq);
- ss->ss_client_seq = NULL;
- }
-
- RETURN(0);
-}
-EXPORT_SYMBOL(seq_site_fini);
-
-proc_dir_entry_t *seq_type_proc_dir = NULL;
-
-static int __init fid_mod_init(void)
-{
- seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME,
- proc_lustre_root,
- NULL, NULL);
- if (IS_ERR(seq_type_proc_dir))
- return PTR_ERR(seq_type_proc_dir);
-
- LU_CONTEXT_KEY_INIT(&seq_thread_key);
- lu_context_key_register(&seq_thread_key);
- return 0;
-}
-
-static void __exit fid_mod_exit(void)
-{
- lu_context_key_degister(&seq_thread_key);
- if (seq_type_proc_dir != NULL && !IS_ERR(seq_type_proc_dir)) {
- lprocfs_remove(&seq_type_proc_dir);
- seq_type_proc_dir = NULL;
- }
-}
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FID Module");
-MODULE_LICENSE("GPL");
-
-cfs_module(fid, "0.1.0", fid_mod_init, fid_mod_exit);
diff --git a/drivers/staging/lustre/lustre/fid/fid_internal.h b/drivers/staging/lustre/lustre/fid/fid_internal.h
index 407a7435583..1dbe46be0f4 100644
--- a/drivers/staging/lustre/lustre/fid/fid_internal.h
+++ b/drivers/staging/lustre/lustre/fid/fid_internal.h
@@ -41,44 +41,16 @@
#define __FID_INTERNAL_H
#include <lustre/lustre_idl.h>
-#include <dt_object.h>
-
#include <linux/libcfs/libcfs.h>
-struct seq_thread_info {
- struct req_capsule *sti_pill;
- struct lu_seq_range sti_space;
- struct lu_buf sti_buf;
-};
-
-enum {
- SEQ_TXN_STORE_CREDITS = 20
-};
-
-extern struct lu_context_key seq_thread_key;
-
+/* Functions used internally in module. */
int seq_client_alloc_super(struct lu_client_seq *seq,
const struct lu_env *env);
-/* Store API functions. */
-int seq_store_init(struct lu_server_seq *seq,
- const struct lu_env *env,
- struct dt_device *dt);
-void seq_store_fini(struct lu_server_seq *seq,
- const struct lu_env *env);
-
-int seq_store_read(struct lu_server_seq *seq,
- const struct lu_env *env);
-
-int seq_store_update(const struct lu_env *env, struct lu_server_seq *seq,
- struct lu_seq_range *out, int sync);
-
-#ifdef LPROCFS
-extern struct lprocfs_vars seq_server_proc_list[];
+# ifdef LPROCFS
extern struct lprocfs_vars seq_client_proc_list[];
-#endif
-
+# endif
-extern proc_dir_entry_t *seq_type_proc_dir;
+extern struct proc_dir_entry *seq_type_proc_dir;
#endif /* __FID_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fid/fid_lib.c b/drivers/staging/lustre/lustre/fid/fid_lib.c
index eaff51a555f..f03afdec027 100644
--- a/drivers/staging/lustre/lustre/fid/fid_lib.c
+++ b/drivers/staging/lustre/lustre/fid/fid_lib.c
@@ -43,11 +43,9 @@
#define DEBUG_SUBSYSTEM S_FID
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
-
-#include <obd.h>
-#include <lu_object.h>
+#include <linux/libcfs/libcfs.h>
+#include <linux/module.h>
+#include <lustre/lustre_idl.h>
#include <lustre_fid.h>
/**
@@ -56,9 +54,9 @@
*
* Fid namespace:
* <pre>
- * Normal FID: seq:64 [2^33,2^64-1] oid:32 ver:32
- * IGIF : 0:32, ino:32 gen:32 0:32
- * IDIF : 0:31, 1:1, ost-index:16, objd:48 0:32
+ * Normal FID: seq:64 [2^33,2^64-1] oid:32 ver:32
+ * IGIF : 0:32, ino:32 gen:32 0:32
+ * IDIF : 0:31, 1:1, ost-index:16, objd:48 0:32
* </pre>
*
* The first 0x400 sequences of normal FID are reserved for special purpose.
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index fcaaca7e2e0..66007b57018 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -27,7 +27,7 @@
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2013, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
@@ -42,15 +42,12 @@
#define DEBUG_SUBSYSTEM S_FID
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
+#include <linux/libcfs/libcfs.h>
+#include <linux/module.h>
#include <obd.h>
#include <obd_class.h>
-#include <dt_object.h>
-#include <md_object.h>
#include <obd_support.h>
-#include <lustre_req_layout.h>
#include <lustre_fid.h>
/* mdc RPC locks */
#include <lustre_mdc.h>
@@ -63,15 +60,14 @@ static int seq_client_rpc(struct lu_client_seq *seq,
struct obd_export *exp = seq->lcs_exp;
struct ptlrpc_request *req;
struct lu_seq_range *out, *in;
- __u32 *op;
- unsigned int debug_mask;
- int rc;
- ENTRY;
+ __u32 *op;
+ unsigned int debug_mask;
+ int rc;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* Init operation code */
op = req_capsule_client_get(&req->rq_pill, &RMF_SEQ_OPC);
@@ -137,7 +133,6 @@ static int seq_client_rpc(struct lu_client_seq *seq,
CDEBUG_LIMIT(debug_mask, "%s: Allocated %s-sequence "DRANGE"]\n",
seq->lcs_name, opcname, PRANGE(output));
- EXIT;
out_req:
ptlrpc_req_finished(req);
return rc;
@@ -148,27 +143,24 @@ int seq_client_alloc_super(struct lu_client_seq *seq,
const struct lu_env *env)
{
int rc;
- ENTRY;
mutex_lock(&seq->lcs_mutex);
if (seq->lcs_srv) {
- LASSERT(env != NULL);
- rc = seq_server_alloc_super(seq->lcs_srv, &seq->lcs_space,
- env);
+ rc = 0;
} else {
/* Check whether the connection to seq controller has been
* setup (lcs_exp != NULL) */
if (seq->lcs_exp == NULL) {
mutex_unlock(&seq->lcs_mutex);
- RETURN(-EINPROGRESS);
+ return -EINPROGRESS;
}
rc = seq_client_rpc(seq, &seq->lcs_space,
SEQ_ALLOC_SUPER, "super");
}
mutex_unlock(&seq->lcs_mutex);
- RETURN(rc);
+ return rc;
}
/* Request sequence-controller node to allocate new meta-sequence. */
@@ -176,11 +168,9 @@ static int seq_client_alloc_meta(const struct lu_env *env,
struct lu_client_seq *seq)
{
int rc;
- ENTRY;
if (seq->lcs_srv) {
- LASSERT(env != NULL);
- rc = seq_server_alloc_meta(seq->lcs_srv, &seq->lcs_space, env);
+ rc = 0;
} else {
do {
/* If meta server return -EINPROGRESS or EAGAIN,
@@ -191,7 +181,8 @@ static int seq_client_alloc_meta(const struct lu_env *env,
SEQ_ALLOC_META, "meta");
} while (rc == -EINPROGRESS || rc == -EAGAIN);
}
- RETURN(rc);
+
+ return rc;
}
/* Allocate new sequence for client. */
@@ -199,7 +190,6 @@ static int seq_client_alloc_seq(const struct lu_env *env,
struct lu_client_seq *seq, seqno_t *seqnr)
{
int rc;
- ENTRY;
LASSERT(range_is_sane(&seq->lcs_space));
@@ -208,7 +198,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
if (rc) {
CERROR("%s: Can't allocate new meta-sequence,"
"rc %d\n", seq->lcs_name, rc);
- RETURN(rc);
+ return rc;
} else {
CDEBUG(D_INFO, "%s: New range - "DRANGE"\n",
seq->lcs_name, PRANGE(&seq->lcs_space));
@@ -224,7 +214,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
CDEBUG(D_INFO, "%s: Allocated sequence ["LPX64"]\n", seq->lcs_name,
*seqnr);
- RETURN(rc);
+ return rc;
}
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
@@ -312,7 +302,6 @@ int seq_client_alloc_fid(const struct lu_env *env,
{
wait_queue_t link;
int rc;
- ENTRY;
LASSERT(seq != NULL);
LASSERT(fid != NULL);
@@ -344,7 +333,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
"rc %d\n", seq->lcs_name, rc);
seq_fid_alloc_fini(seq);
mutex_unlock(&seq->lcs_mutex);
- RETURN(rc);
+ return rc;
}
CDEBUG(D_INFO, "%s: Switch to sequence "
@@ -368,7 +357,7 @@ int seq_client_alloc_fid(const struct lu_env *env,
mutex_unlock(&seq->lcs_mutex);
CDEBUG(D_INFO, "%s: Allocated FID "DFID"\n", seq->lcs_name, PFID(fid));
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(seq_client_alloc_fid);
@@ -409,13 +398,21 @@ void seq_client_flush(struct lu_client_seq *seq)
}
EXPORT_SYMBOL(seq_client_flush);
-static void seq_client_proc_fini(struct lu_client_seq *seq);
-
+static void seq_client_proc_fini(struct lu_client_seq *seq)
+{
#ifdef LPROCFS
+ if (seq->lcs_proc_dir) {
+ if (!IS_ERR(seq->lcs_proc_dir))
+ lprocfs_remove(&seq->lcs_proc_dir);
+ seq->lcs_proc_dir = NULL;
+ }
+#endif /* LPROCFS */
+}
+
static int seq_client_proc_init(struct lu_client_seq *seq)
{
+#ifdef LPROCFS
int rc;
- ENTRY;
seq->lcs_proc_dir = lprocfs_register(seq->lcs_name,
seq_type_proc_dir,
@@ -425,7 +422,7 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
CERROR("%s: LProcFS failed in seq-init\n",
seq->lcs_name);
rc = PTR_ERR(seq->lcs_proc_dir);
- RETURN(rc);
+ return rc;
}
rc = lprocfs_add_vars(seq->lcs_proc_dir,
@@ -436,34 +433,16 @@ static int seq_client_proc_init(struct lu_client_seq *seq)
GOTO(out_cleanup, rc);
}
- RETURN(0);
+ return 0;
out_cleanup:
seq_client_proc_fini(seq);
return rc;
-}
-static void seq_client_proc_fini(struct lu_client_seq *seq)
-{
- ENTRY;
- if (seq->lcs_proc_dir) {
- if (!IS_ERR(seq->lcs_proc_dir))
- lprocfs_remove(&seq->lcs_proc_dir);
- seq->lcs_proc_dir = NULL;
- }
- EXIT;
-}
-#else
-static int seq_client_proc_init(struct lu_client_seq *seq)
-{
+#else /* LPROCFS */
return 0;
-}
-
-static void seq_client_proc_fini(struct lu_client_seq *seq)
-{
- return;
-}
#endif
+}
int seq_client_init(struct lu_client_seq *seq,
struct obd_export *exp,
@@ -472,7 +451,6 @@ int seq_client_init(struct lu_client_seq *seq,
struct lu_server_seq *srv)
{
int rc;
- ENTRY;
LASSERT(seq != NULL);
LASSERT(prefix != NULL);
@@ -501,14 +479,12 @@ int seq_client_init(struct lu_client_seq *seq,
rc = seq_client_proc_init(seq);
if (rc)
seq_client_fini(seq);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(seq_client_init);
void seq_client_fini(struct lu_client_seq *seq)
{
- ENTRY;
-
seq_client_proc_fini(seq);
if (seq->lcs_exp != NULL) {
@@ -517,6 +493,78 @@ void seq_client_fini(struct lu_client_seq *seq)
}
seq->lcs_srv = NULL;
- EXIT;
}
EXPORT_SYMBOL(seq_client_fini);
+
+int client_fid_init(struct obd_device *obd,
+ struct obd_export *exp, enum lu_cli_type type)
+{
+ struct client_obd *cli = &obd->u.cli;
+ char *prefix;
+ int rc;
+
+ OBD_ALLOC_PTR(cli->cl_seq);
+ if (cli->cl_seq == NULL)
+ return -ENOMEM;
+
+ OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
+ if (prefix == NULL)
+ GOTO(out_free_seq, rc = -ENOMEM);
+
+ snprintf(prefix, MAX_OBD_NAME + 5, "cli-%s", obd->obd_name);
+
+ /* Init client side sequence-manager */
+ rc = seq_client_init(cli->cl_seq, exp, type, prefix, NULL);
+ OBD_FREE(prefix, MAX_OBD_NAME + 5);
+ if (rc)
+ GOTO(out_free_seq, rc);
+
+ return rc;
+out_free_seq:
+ OBD_FREE_PTR(cli->cl_seq);
+ cli->cl_seq = NULL;
+ return rc;
+}
+EXPORT_SYMBOL(client_fid_init);
+
+int client_fid_fini(struct obd_device *obd)
+{
+ struct client_obd *cli = &obd->u.cli;
+
+ if (cli->cl_seq != NULL) {
+ seq_client_fini(cli->cl_seq);
+ OBD_FREE_PTR(cli->cl_seq);
+ cli->cl_seq = NULL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(client_fid_fini);
+
+struct proc_dir_entry *seq_type_proc_dir;
+
+static int __init fid_mod_init(void)
+{
+ seq_type_proc_dir = lprocfs_register(LUSTRE_SEQ_NAME,
+ proc_lustre_root,
+ NULL, NULL);
+ if (IS_ERR(seq_type_proc_dir))
+ return PTR_ERR(seq_type_proc_dir);
+ return 0;
+}
+
+static void __exit fid_mod_exit(void)
+{
+ if (seq_type_proc_dir != NULL && !IS_ERR(seq_type_proc_dir)) {
+ lprocfs_remove(&seq_type_proc_dir);
+ seq_type_proc_dir = NULL;
+ }
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre FID Module");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.1.0");
+
+module_init(fid_mod_init);
+module_exit(fid_mod_exit);
diff --git a/drivers/staging/lustre/lustre/fid/fid_store.c b/drivers/staging/lustre/lustre/fid/fid_store.c
deleted file mode 100644
index a90e6e37d68..00000000000
--- a/drivers/staging/lustre/lustre/fid/fid_store.c
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fid/fid_store.c
- *
- * Lustre Sequence Manager
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FID
-
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <dt_object.h>
-#include <md_object.h>
-#include <obd_support.h>
-#include <lustre_req_layout.h>
-#include <lustre_fid.h>
-#include "fid_internal.h"
-
-
-static struct lu_buf *seq_store_buf(struct seq_thread_info *info)
-{
- struct lu_buf *buf;
-
- buf = &info->sti_buf;
- buf->lb_buf = &info->sti_space;
- buf->lb_len = sizeof(info->sti_space);
- return buf;
-}
-
-struct seq_update_callback {
- struct dt_txn_commit_cb suc_cb;
- struct lu_server_seq *suc_seq;
-};
-
-void seq_update_cb(struct lu_env *env, struct thandle *th,
- struct dt_txn_commit_cb *cb, int err)
-{
- struct seq_update_callback *ccb;
-
- ccb = container_of0(cb, struct seq_update_callback, suc_cb);
-
- LASSERT(ccb->suc_seq != NULL);
-
- ccb->suc_seq->lss_need_sync = 0;
- OBD_FREE_PTR(ccb);
-}
-
-int seq_update_cb_add(struct thandle *th, struct lu_server_seq *seq)
-{
- struct seq_update_callback *ccb;
- struct dt_txn_commit_cb *dcb;
- int rc;
-
- OBD_ALLOC_PTR(ccb);
- if (ccb == NULL)
- return -ENOMEM;
-
- ccb->suc_seq = seq;
- seq->lss_need_sync = 1;
-
- dcb = &ccb->suc_cb;
- dcb->dcb_func = seq_update_cb;
- INIT_LIST_HEAD(&dcb->dcb_linkage);
- strncpy(dcb->dcb_name, "seq_update_cb", MAX_COMMIT_CB_STR_LEN);
- dcb->dcb_name[MAX_COMMIT_CB_STR_LEN - 1] = '\0';
-
- rc = dt_trans_cb_add(th, dcb);
- if (rc)
- OBD_FREE_PTR(ccb);
- return rc;
-}
-
-/* This function implies that caller takes care about locking. */
-int seq_store_update(const struct lu_env *env, struct lu_server_seq *seq,
- struct lu_seq_range *out, int sync)
-{
- struct dt_device *dt_dev = lu2dt_dev(seq->lss_obj->do_lu.lo_dev);
- struct seq_thread_info *info;
- struct thandle *th;
- loff_t pos = 0;
- int rc;
-
- info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
- LASSERT(info != NULL);
-
- th = dt_trans_create(env, dt_dev);
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
- rc = dt_declare_record_write(env, seq->lss_obj,
- sizeof(struct lu_seq_range), 0, th);
- if (rc)
- GOTO(exit, rc);
-
- if (out != NULL) {
- rc = fld_declare_server_create(env,
- seq->lss_site->ss_server_fld,
- out, th);
- if (rc)
- GOTO(exit, rc);
- }
-
- rc = dt_trans_start_local(env, dt_dev, th);
- if (rc)
- GOTO(exit, rc);
-
- /* Store ranges in le format. */
- range_cpu_to_le(&info->sti_space, &seq->lss_space);
-
- rc = dt_record_write(env, seq->lss_obj, seq_store_buf(info), &pos, th);
- if (rc) {
- CERROR("%s: Can't write space data, rc %d\n",
- seq->lss_name, rc);
- GOTO(exit, rc);
- } else if (out != NULL) {
- rc = fld_server_create(env, seq->lss_site->ss_server_fld, out,
- th);
- if (rc) {
- CERROR("%s: Can't Update fld database, rc %d\n",
- seq->lss_name, rc);
- GOTO(exit, rc);
- }
- }
- /* next sequence update will need sync until this update is committed
- * in case of sync operation this is not needed obviously */
- if (!sync)
- /* if callback can't be added then sync always */
- sync = !!seq_update_cb_add(th, seq);
-
- th->th_sync |= sync;
-exit:
- dt_trans_stop(env, dt_dev, th);
- return rc;
-}
-
-/*
- * This function implies that caller takes care about locking or locking is not
- * needed (init time).
- */
-int seq_store_read(struct lu_server_seq *seq,
- const struct lu_env *env)
-{
- struct seq_thread_info *info;
- loff_t pos = 0;
- int rc;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &seq_thread_key);
- LASSERT(info != NULL);
-
- rc = seq->lss_obj->do_body_ops->dbo_read(env, seq->lss_obj,
- seq_store_buf(info),
- &pos, BYPASS_CAPA);
-
- if (rc == sizeof(info->sti_space)) {
- range_le_to_cpu(&seq->lss_space, &info->sti_space);
- CDEBUG(D_INFO, "%s: Space - "DRANGE"\n",
- seq->lss_name, PRANGE(&seq->lss_space));
- rc = 0;
- } else if (rc == 0) {
- rc = -ENODATA;
- } else if (rc > 0) {
- CERROR("%s: Read only %d bytes of %d\n", seq->lss_name,
- rc, (int)sizeof(info->sti_space));
- rc = -EIO;
- }
-
- RETURN(rc);
-}
-
-int seq_store_init(struct lu_server_seq *seq,
- const struct lu_env *env,
- struct dt_device *dt)
-{
- struct dt_object *dt_obj;
- struct lu_fid fid;
- struct lu_attr attr;
- struct dt_object_format dof;
- const char *name;
- int rc;
- ENTRY;
-
- name = seq->lss_type == LUSTRE_SEQ_SERVER ?
- LUSTRE_SEQ_SRV_NAME : LUSTRE_SEQ_CTL_NAME;
-
- if (seq->lss_type == LUSTRE_SEQ_SERVER)
- lu_local_obj_fid(&fid, FID_SEQ_SRV_OID);
- else
- lu_local_obj_fid(&fid, FID_SEQ_CTL_OID);
-
- memset(&attr, 0, sizeof(attr));
- attr.la_valid = LA_MODE;
- attr.la_mode = S_IFREG | 0666;
- dof.dof_type = DFT_REGULAR;
-
- dt_obj = dt_find_or_create(env, dt, &fid, &dof, &attr);
- if (!IS_ERR(dt_obj)) {
- seq->lss_obj = dt_obj;
- rc = 0;
- } else {
- CERROR("%s: Can't find \"%s\" obj %d\n",
- seq->lss_name, name, (int)PTR_ERR(dt_obj));
- rc = PTR_ERR(dt_obj);
- }
-
- RETURN(rc);
-}
-
-void seq_store_fini(struct lu_server_seq *seq,
- const struct lu_env *env)
-{
- ENTRY;
-
- if (seq->lss_obj != NULL) {
- if (!IS_ERR(seq->lss_obj))
- lu_object_put(env, &seq->lss_obj->do_lu);
- seq->lss_obj = NULL;
- }
-
- EXIT;
-}
diff --git a/drivers/staging/lustre/lustre/fid/lproc_fid.c b/drivers/staging/lustre/lustre/fid/lproc_fid.c
index af817a867f8..294070da9d4 100644
--- a/drivers/staging/lustre/lustre/fid/lproc_fid.c
+++ b/drivers/staging/lustre/lustre/fid/lproc_fid.c
@@ -65,7 +65,6 @@ lprocfs_fid_write_common(const char *buffer, unsigned long count,
{
struct lu_seq_range tmp;
int rc;
- ENTRY;
LASSERT(range != NULL);
@@ -73,9 +72,9 @@ lprocfs_fid_write_common(const char *buffer, unsigned long count,
(long long unsigned *)&tmp.lsr_start,
(long long unsigned *)&tmp.lsr_end);
if (rc != 2 || !range_is_sane(&tmp) || range_is_zero(&tmp))
- RETURN(-EINVAL);
+ return -EINVAL;
*range = tmp;
- RETURN(0);
+ return 0;
}
/* Client side procfs stuff */
@@ -85,7 +84,6 @@ lprocfs_fid_space_seq_write(struct file *file, const char *buffer,
{
struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
@@ -99,7 +97,7 @@ lprocfs_fid_space_seq_write(struct file *file, const char *buffer,
mutex_unlock(&seq->lcs_mutex);
- RETURN(count);
+ return count;
}
static int
@@ -107,7 +105,6 @@ lprocfs_fid_space_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
@@ -115,7 +112,7 @@ lprocfs_fid_space_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, "["LPX64" - "LPX64"]:%x:%s\n", PRANGE(&seq->lcs_space));
mutex_unlock(&seq->lcs_mutex);
- RETURN(rc);
+ return rc;
}
static ssize_t
@@ -125,13 +122,12 @@ lprocfs_fid_width_seq_write(struct file *file, const char *buffer,
struct lu_client_seq *seq = ((struct seq_file *)file->private_data)->private;
__u64 max;
int rc, val;
- ENTRY;
LASSERT(seq != NULL);
rc = lprocfs_write_helper(buffer, count, &val);
if (rc)
- RETURN(rc);
+ return rc;
mutex_lock(&seq->lcs_mutex);
if (seq->lcs_type == LUSTRE_SEQ_DATA)
@@ -150,7 +146,7 @@ lprocfs_fid_width_seq_write(struct file *file, const char *buffer,
mutex_unlock(&seq->lcs_mutex);
- RETURN(count);
+ return count;
}
static int
@@ -158,7 +154,6 @@ lprocfs_fid_width_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
@@ -166,7 +161,7 @@ lprocfs_fid_width_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, LPU64"\n", seq->lcs_width);
mutex_unlock(&seq->lcs_mutex);
- RETURN(rc);
+ return rc;
}
static int
@@ -174,7 +169,6 @@ lprocfs_fid_fid_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
int rc;
- ENTRY;
LASSERT(seq != NULL);
@@ -182,7 +176,7 @@ lprocfs_fid_fid_seq_show(struct seq_file *m, void *unused)
rc = seq_printf(m, DFID"\n", PFID(&seq->lcs_fid));
mutex_unlock(&seq->lcs_mutex);
- RETURN(rc);
+ return rc;
}
static int
@@ -191,7 +185,6 @@ lprocfs_fid_server_seq_show(struct seq_file *m, void *unused)
struct lu_client_seq *seq = (struct lu_client_seq *)m->private;
struct client_obd *cli;
int rc;
- ENTRY;
LASSERT(seq != NULL);
@@ -201,12 +194,9 @@ lprocfs_fid_server_seq_show(struct seq_file *m, void *unused)
} else {
rc = seq_printf(m, "%s\n", seq->lcs_srv->lss_name);
}
- RETURN(rc);
+ return rc;
}
-struct lprocfs_vars seq_server_proc_list[] = {
-};
-
LPROC_SEQ_FOPS(lprocfs_fid_space);
LPROC_SEQ_FOPS(lprocfs_fid_width);
LPROC_SEQ_FOPS_RO(lprocfs_fid_server);
diff --git a/drivers/staging/lustre/lustre/fld/Makefile b/drivers/staging/lustre/lustre/fld/Makefile
index e7f2881a1d9..90d46d84fbb 100644
--- a/drivers/staging/lustre/lustre/fld/Makefile
+++ b/drivers/staging/lustre/lustre/fld/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_LUSTRE_FS) += fld.o
-fld-y := fld_handler.o fld_request.o fld_cache.o fld_index.o lproc_fld.o
+fld-y := fld_request.o fld_cache.o lproc_fld.o
ccflags-y := -I$(src)/../include
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 347f2ae83bc..25099cbe37e 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -45,7 +45,6 @@
# include <linux/libcfs/libcfs.h>
# include <linux/module.h>
-# include <linux/jbd.h>
# include <asm/div64.h>
#include <obd.h>
@@ -67,14 +66,13 @@ struct fld_cache *fld_cache_init(const char *name,
int cache_size, int cache_threshold)
{
struct fld_cache *cache;
- ENTRY;
LASSERT(name != NULL);
LASSERT(cache_threshold < cache_size);
OBD_ALLOC_PTR(cache);
if (cache == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&cache->fci_entries_head);
INIT_LIST_HEAD(&cache->fci_lru);
@@ -94,7 +92,7 @@ struct fld_cache *fld_cache_init(const char *name,
CDEBUG(D_INFO, "%s: FLD cache - Size: %d, Threshold: %d\n",
cache->fci_name, cache_size, cache_threshold);
- RETURN(cache);
+ return cache;
}
/**
@@ -103,7 +101,6 @@ struct fld_cache *fld_cache_init(const char *name,
void fld_cache_fini(struct fld_cache *cache)
{
__u64 pct;
- ENTRY;
LASSERT(cache != NULL);
fld_cache_flush(cache);
@@ -121,8 +118,6 @@ void fld_cache_fini(struct fld_cache *cache)
CDEBUG(D_INFO, " Cache hits: "LPU64"%%\n", pct);
OBD_FREE_PTR(cache);
-
- EXIT;
}
/**
@@ -147,7 +142,6 @@ static void fld_fix_new_list(struct fld_cache *cache)
struct lu_seq_range *c_range;
struct lu_seq_range *n_range;
struct list_head *head = &cache->fci_entries_head;
- ENTRY;
restart_fixup:
@@ -200,8 +194,6 @@ restart_fixup:
c_range->lsr_end == n_range->lsr_end)
fld_cache_entry_delete(cache, f_curr);
}
-
- EXIT;
}
/**
@@ -227,12 +219,11 @@ static int fld_cache_shrink(struct fld_cache *cache)
struct fld_cache_entry *flde;
struct list_head *curr;
int num = 0;
- ENTRY;
LASSERT(cache != NULL);
if (cache->fci_cache_count < cache->fci_cache_size)
- RETURN(0);
+ return 0;
curr = cache->fci_lru.prev;
@@ -248,7 +239,7 @@ static int fld_cache_shrink(struct fld_cache *cache)
CDEBUG(D_INFO, "%s: FLD cache - Shrunk by "
"%d entries\n", cache->fci_name, num);
- RETURN(0);
+ return 0;
}
/**
@@ -256,14 +247,10 @@ static int fld_cache_shrink(struct fld_cache *cache)
*/
void fld_cache_flush(struct fld_cache *cache)
{
- ENTRY;
-
write_lock(&cache->fci_lock);
cache->fci_cache_size = 0;
fld_cache_shrink(cache);
write_unlock(&cache->fci_lock);
-
- EXIT;
}
/**
@@ -280,11 +267,9 @@ void fld_cache_punch_hole(struct fld_cache *cache,
const seqno_t new_end = range->lsr_end;
struct fld_cache_entry *fldt;
- ENTRY;
OBD_ALLOC_GFP(fldt, sizeof *fldt, GFP_ATOMIC);
if (!fldt) {
OBD_FREE_PTR(f_new);
- EXIT;
/* overlap is not allowed, so dont mess up list. */
return;
}
@@ -307,7 +292,6 @@ void fld_cache_punch_hole(struct fld_cache *cache,
fld_cache_entry_add(cache, fldt, &f_new->fce_list);
/* no need to fixup */
- EXIT;
}
/**
@@ -383,10 +367,10 @@ struct fld_cache_entry
OBD_ALLOC_PTR(f_new);
if (!f_new)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
f_new->fce_range = *range;
- RETURN(f_new);
+ return f_new;
}
/**
@@ -405,7 +389,6 @@ int fld_cache_insert_nolock(struct fld_cache *cache,
const seqno_t new_start = f_new->fce_range.lsr_start;
const seqno_t new_end = f_new->fce_range.lsr_end;
__u32 new_flags = f_new->fce_range.lsr_flags;
- ENTRY;
/*
* Duplicate entries are eliminated in insert op.
@@ -441,7 +424,7 @@ int fld_cache_insert_nolock(struct fld_cache *cache,
/* Add new entry to cache and lru list. */
fld_cache_entry_add(cache, f_new, prev);
out:
- RETURN(0);
+ return 0;
}
int fld_cache_insert(struct fld_cache *cache,
@@ -452,7 +435,7 @@ int fld_cache_insert(struct fld_cache *cache,
flde = fld_cache_entry_create(range);
if (IS_ERR(flde))
- RETURN(PTR_ERR(flde));
+ return PTR_ERR(flde);
write_lock(&cache->fci_lock);
rc = fld_cache_insert_nolock(cache, flde);
@@ -460,7 +443,7 @@ int fld_cache_insert(struct fld_cache *cache,
if (rc)
OBD_FREE_PTR(flde);
- RETURN(rc);
+ return rc;
}
void fld_cache_delete_nolock(struct fld_cache *cache,
@@ -512,7 +495,7 @@ struct fld_cache_entry
}
}
- RETURN(got);
+ return got;
}
/**
@@ -522,12 +505,11 @@ struct fld_cache_entry
*fld_cache_entry_lookup(struct fld_cache *cache, struct lu_seq_range *range)
{
struct fld_cache_entry *got = NULL;
- ENTRY;
read_lock(&cache->fci_lock);
got = fld_cache_entry_lookup_nolock(cache, range);
read_unlock(&cache->fci_lock);
- RETURN(got);
+ return got;
}
/**
@@ -539,7 +521,6 @@ int fld_cache_lookup(struct fld_cache *cache,
struct fld_cache_entry *flde;
struct fld_cache_entry *prev = NULL;
struct list_head *head;
- ENTRY;
read_lock(&cache->fci_lock);
head = &cache->fci_entries_head;
@@ -558,9 +539,9 @@ int fld_cache_lookup(struct fld_cache *cache,
cache->fci_stat.fst_cache++;
read_unlock(&cache->fci_lock);
- RETURN(0);
+ return 0;
}
}
read_unlock(&cache->fci_lock);
- RETURN(-ENOENT);
+ return -ENOENT;
}
diff --git a/drivers/staging/lustre/lustre/fld/fld_handler.c b/drivers/staging/lustre/lustre/fld/fld_handler.c
deleted file mode 100644
index d2707ae4ad5..00000000000
--- a/drivers/staging/lustre/lustre/fld/fld_handler.c
+++ /dev/null
@@ -1,447 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_handler.c
- *
- * FLD (Fids Location Database)
- *
- * Author: Yury Umanets <umka@clusterfs.com>
- * Author: WangDi <wangdi@clusterfs.com>
- * Author: Pravin Shelar <pravin.shelar@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-# include <asm/div64.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
-#include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <md_object.h>
-#include <lustre_fid.h>
-#include <lustre_req_layout.h>
-#include "fld_internal.h"
-#include <lustre_fid.h>
-
-
-/* context key constructor/destructor: fld_key_init, fld_key_fini */
-LU_KEY_INIT_FINI(fld, struct fld_thread_info);
-
-/* context key: fld_thread_key */
-LU_CONTEXT_KEY_DEFINE(fld, LCT_MD_THREAD | LCT_DT_THREAD | LCT_MG_THREAD);
-
-proc_dir_entry_t *fld_type_proc_dir = NULL;
-
-static int __init fld_mod_init(void)
-{
- fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
- proc_lustre_root,
- NULL, NULL);
- if (IS_ERR(fld_type_proc_dir))
- return PTR_ERR(fld_type_proc_dir);
-
- LU_CONTEXT_KEY_INIT(&fld_thread_key);
- lu_context_key_register(&fld_thread_key);
- return 0;
-}
-
-static void __exit fld_mod_exit(void)
-{
- lu_context_key_degister(&fld_thread_key);
- if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
- lprocfs_remove(&fld_type_proc_dir);
- fld_type_proc_dir = NULL;
- }
-}
-
-int fld_declare_server_create(const struct lu_env *env,
- struct lu_server_fld *fld,
- struct lu_seq_range *range,
- struct thandle *th)
-{
- int rc;
-
- rc = fld_declare_index_create(env, fld, range, th);
- RETURN(rc);
-}
-EXPORT_SYMBOL(fld_declare_server_create);
-
-/**
- * Insert FLD index entry and update FLD cache.
- *
- * This function is called from the sequence allocator when a super-sequence
- * is granted to a server.
- */
-int fld_server_create(const struct lu_env *env, struct lu_server_fld *fld,
- struct lu_seq_range *range, struct thandle *th)
-{
- int rc;
-
- mutex_lock(&fld->lsf_lock);
- rc = fld_index_create(env, fld, range, th);
- mutex_unlock(&fld->lsf_lock);
-
- RETURN(rc);
-}
-EXPORT_SYMBOL(fld_server_create);
-
-/**
- * Lookup mds by seq, returns a range for given seq.
- *
- * If that entry is not cached in fld cache, request is sent to super
- * sequence controller node (MDT0). All other MDT[1...N] and client
- * cache fld entries, but this cache is not persistent.
- */
-int fld_server_lookup(const struct lu_env *env, struct lu_server_fld *fld,
- seqno_t seq, struct lu_seq_range *range)
-{
- struct lu_seq_range *erange;
- struct fld_thread_info *info;
- int rc;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT(info != NULL);
- erange = &info->fti_lrange;
-
- /* Lookup it in the cache. */
- rc = fld_cache_lookup(fld->lsf_cache, seq, erange);
- if (rc == 0) {
- if (unlikely(fld_range_type(erange) != fld_range_type(range) &&
- !fld_range_is_any(range))) {
- CERROR("%s: FLD cache range "DRANGE" does not match"
- "requested flag %x: rc = %d\n", fld->lsf_name,
- PRANGE(erange), range->lsr_flags, -EIO);
- RETURN(-EIO);
- }
- *range = *erange;
- RETURN(0);
- }
-
- if (fld->lsf_obj) {
- /* On server side, all entries should be in cache.
- * If we can not find it in cache, just return error */
- CERROR("%s: Cannot find sequence "LPX64": rc = %d\n",
- fld->lsf_name, seq, -EIO);
- RETURN(-EIO);
- } else {
- LASSERT(fld->lsf_control_exp);
- /* send request to mdt0 i.e. super seq. controller.
- * This is temporary solution, long term solution is fld
- * replication on all mdt servers.
- */
- range->lsr_start = seq;
- rc = fld_client_rpc(fld->lsf_control_exp,
- range, FLD_LOOKUP);
- if (rc == 0)
- fld_cache_insert(fld->lsf_cache, range);
- }
- RETURN(rc);
-}
-EXPORT_SYMBOL(fld_server_lookup);
-
-/**
- * All MDT server handle fld lookup operation. But only MDT0 has fld index.
- * if entry is not found in cache we need to forward lookup request to MDT0
- */
-
-static int fld_server_handle(struct lu_server_fld *fld,
- const struct lu_env *env,
- __u32 opc, struct lu_seq_range *range,
- struct fld_thread_info *info)
-{
- int rc;
- ENTRY;
-
- switch (opc) {
- case FLD_LOOKUP:
- rc = fld_server_lookup(env, fld, range->lsr_start, range);
- break;
- default:
- rc = -EINVAL;
- break;
- }
-
- CDEBUG(D_INFO, "%s: FLD req handle: error %d (opc: %d, range: "
- DRANGE"\n", fld->lsf_name, rc, opc, PRANGE(range));
-
- RETURN(rc);
-
-}
-
-static int fld_req_handle(struct ptlrpc_request *req,
- struct fld_thread_info *info)
-{
- struct obd_export *exp = req->rq_export;
- struct lu_site *site = exp->exp_obd->obd_lu_dev->ld_site;
- struct lu_seq_range *in;
- struct lu_seq_range *out;
- int rc;
- __u32 *opc;
- ENTRY;
-
- rc = req_capsule_server_pack(info->fti_pill);
- if (rc)
- RETURN(err_serious(rc));
-
- opc = req_capsule_client_get(info->fti_pill, &RMF_FLD_OPC);
- if (opc != NULL) {
- in = req_capsule_client_get(info->fti_pill, &RMF_FLD_MDFLD);
- if (in == NULL)
- RETURN(err_serious(-EPROTO));
- out = req_capsule_server_get(info->fti_pill, &RMF_FLD_MDFLD);
- if (out == NULL)
- RETURN(err_serious(-EPROTO));
- *out = *in;
-
- /* For old 2.0 client, the 'lsr_flags' is uninitialized.
- * Set it as 'LU_SEQ_RANGE_MDT' by default. */
- if (!(exp_connect_flags(exp) & OBD_CONNECT_64BITHASH) &&
- !(exp_connect_flags(exp) & OBD_CONNECT_MDS_MDS) &&
- !(exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT) &&
- !exp->exp_libclient)
- fld_range_set_mdt(out);
-
- rc = fld_server_handle(lu_site2seq(site)->ss_server_fld,
- req->rq_svc_thread->t_env,
- *opc, out, info);
- } else {
- rc = err_serious(-EPROTO);
- }
-
- RETURN(rc);
-}
-
-static void fld_thread_info_init(struct ptlrpc_request *req,
- struct fld_thread_info *info)
-{
- info->fti_pill = &req->rq_pill;
- /* Init request capsule. */
- req_capsule_init(info->fti_pill, req, RCL_SERVER);
- req_capsule_set(info->fti_pill, &RQF_FLD_QUERY);
-}
-
-static void fld_thread_info_fini(struct fld_thread_info *info)
-{
- req_capsule_fini(info->fti_pill);
-}
-
-static int fld_handle(struct ptlrpc_request *req)
-{
- struct fld_thread_info *info;
- const struct lu_env *env;
- int rc;
-
- env = req->rq_svc_thread->t_env;
- LASSERT(env != NULL);
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT(info != NULL);
-
- fld_thread_info_init(req, info);
- rc = fld_req_handle(req, info);
- fld_thread_info_fini(info);
-
- return rc;
-}
-
-/*
- * Entry point for handling FLD RPCs called from MDT.
- */
-int fld_query(struct com_thread_info *info)
-{
- return fld_handle(info->cti_pill->rc_req);
-}
-EXPORT_SYMBOL(fld_query);
-
-/*
- * Returns true, if fid is local to this server node.
- *
- * WARNING: this function is *not* guaranteed to return false if fid is
- * remote: it makes an educated conservative guess only.
- *
- * fid_is_local() is supposed to be used in assertion checks only.
- */
-int fid_is_local(const struct lu_env *env,
- struct lu_site *site, const struct lu_fid *fid)
-{
- int result;
- struct seq_server_site *ss_site;
- struct lu_seq_range *range;
- struct fld_thread_info *info;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- range = &info->fti_lrange;
-
- result = 1; /* conservatively assume fid is local */
- ss_site = lu_site2seq(site);
- if (ss_site->ss_client_fld != NULL) {
- int rc;
-
- rc = fld_cache_lookup(ss_site->ss_client_fld->lcf_cache,
- fid_seq(fid), range);
- if (rc == 0)
- result = (range->lsr_index == ss_site->ss_node_id);
- }
- return result;
-}
-EXPORT_SYMBOL(fid_is_local);
-
-static void fld_server_proc_fini(struct lu_server_fld *fld);
-
-#ifdef LPROCFS
-static int fld_server_proc_init(struct lu_server_fld *fld)
-{
- int rc = 0;
- ENTRY;
-
- fld->lsf_proc_dir = lprocfs_register(fld->lsf_name,
- fld_type_proc_dir,
- fld_server_proc_list, fld);
- if (IS_ERR(fld->lsf_proc_dir)) {
- rc = PTR_ERR(fld->lsf_proc_dir);
- RETURN(rc);
- }
-
- rc = lprocfs_seq_create(fld->lsf_proc_dir, "fldb", 0444,
- &fld_proc_seq_fops, fld);
- if (rc) {
- lprocfs_remove(&fld->lsf_proc_dir);
- fld->lsf_proc_dir = NULL;
- }
-
- RETURN(rc);
-}
-
-static void fld_server_proc_fini(struct lu_server_fld *fld)
-{
- ENTRY;
- if (fld->lsf_proc_dir != NULL) {
- if (!IS_ERR(fld->lsf_proc_dir))
- lprocfs_remove(&fld->lsf_proc_dir);
- fld->lsf_proc_dir = NULL;
- }
- EXIT;
-}
-#else
-static int fld_server_proc_init(struct lu_server_fld *fld)
-{
- return 0;
-}
-
-static void fld_server_proc_fini(struct lu_server_fld *fld)
-{
- return;
-}
-#endif
-
-int fld_server_init(const struct lu_env *env, struct lu_server_fld *fld,
- struct dt_device *dt, const char *prefix, int mds_node_id,
- int type)
-{
- int cache_size, cache_threshold;
- int rc;
- ENTRY;
-
- snprintf(fld->lsf_name, sizeof(fld->lsf_name),
- "srv-%s", prefix);
-
- cache_size = FLD_SERVER_CACHE_SIZE /
- sizeof(struct fld_cache_entry);
-
- cache_threshold = cache_size *
- FLD_SERVER_CACHE_THRESHOLD / 100;
-
- mutex_init(&fld->lsf_lock);
- fld->lsf_cache = fld_cache_init(fld->lsf_name,
- cache_size, cache_threshold);
- if (IS_ERR(fld->lsf_cache)) {
- rc = PTR_ERR(fld->lsf_cache);
- fld->lsf_cache = NULL;
- GOTO(out, rc);
- }
-
- if (!mds_node_id && type == LU_SEQ_RANGE_MDT) {
- rc = fld_index_init(env, fld, dt);
- if (rc)
- GOTO(out, rc);
- } else {
- fld->lsf_obj = NULL;
- }
-
- rc = fld_server_proc_init(fld);
- if (rc)
- GOTO(out, rc);
-
- fld->lsf_control_exp = NULL;
-
- GOTO(out, rc);
-
-out:
- if (rc)
- fld_server_fini(env, fld);
- return rc;
-}
-EXPORT_SYMBOL(fld_server_init);
-
-void fld_server_fini(const struct lu_env *env, struct lu_server_fld *fld)
-{
- ENTRY;
-
- fld_server_proc_fini(fld);
- fld_index_fini(env, fld);
-
- if (fld->lsf_cache != NULL) {
- if (!IS_ERR(fld->lsf_cache))
- fld_cache_fini(fld->lsf_cache);
- fld->lsf_cache = NULL;
- }
-
- EXIT;
-}
-EXPORT_SYMBOL(fld_server_fini);
-
-MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
-MODULE_DESCRIPTION("Lustre FLD");
-MODULE_LICENSE("GPL");
-
-cfs_module(mdd, "0.1.0", fld_mod_init, fld_mod_exit);
diff --git a/drivers/staging/lustre/lustre/fld/fld_index.c b/drivers/staging/lustre/lustre/fld/fld_index.c
deleted file mode 100644
index ec68a54c23b..00000000000
--- a/drivers/staging/lustre/lustre/fld/fld_index.c
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2013, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * lustre/fld/fld_index.c
- *
- * Author: WangDi <wangdi@clusterfs.com>
- * Author: Yury Umanets <umka@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_FLD
-
-# include <linux/libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/jbd.h>
-
-#include <obd.h>
-#include <obd_class.h>
-#include <lustre_ver.h>
-#include <obd_support.h>
-#include <lprocfs_status.h>
-
-#include <dt_object.h>
-#include <md_object.h>
-#include <lustre_mdc.h>
-#include <lustre_fid.h>
-#include <lustre_fld.h>
-#include "fld_internal.h"
-
-const char fld_index_name[] = "fld";
-
-static const struct lu_seq_range IGIF_FLD_RANGE = {
- .lsr_start = FID_SEQ_IGIF,
- .lsr_end = FID_SEQ_IGIF_MAX + 1,
- .lsr_index = 0,
- .lsr_flags = LU_SEQ_RANGE_MDT
-};
-
-static const struct lu_seq_range DOT_LUSTRE_FLD_RANGE = {
- .lsr_start = FID_SEQ_DOT_LUSTRE,
- .lsr_end = FID_SEQ_DOT_LUSTRE + 1,
- .lsr_index = 0,
- .lsr_flags = LU_SEQ_RANGE_MDT
-};
-
-static const struct lu_seq_range ROOT_FLD_RANGE = {
- .lsr_start = FID_SEQ_ROOT,
- .lsr_end = FID_SEQ_ROOT + 1,
- .lsr_index = 0,
- .lsr_flags = LU_SEQ_RANGE_MDT
-};
-
-const struct dt_index_features fld_index_features = {
- .dif_flags = DT_IND_UPDATE,
- .dif_keysize_min = sizeof(seqno_t),
- .dif_keysize_max = sizeof(seqno_t),
- .dif_recsize_min = sizeof(struct lu_seq_range),
- .dif_recsize_max = sizeof(struct lu_seq_range),
- .dif_ptrsize = 4
-};
-
-extern struct lu_context_key fld_thread_key;
-
-int fld_declare_index_create(const struct lu_env *env,
- struct lu_server_fld *fld,
- const struct lu_seq_range *new_range,
- struct thandle *th)
-{
- struct lu_seq_range *tmp;
- struct lu_seq_range *range;
- struct fld_thread_info *info;
- int rc = 0;
-
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- range = &info->fti_lrange;
- tmp = &info->fti_irange;
- memset(range, 0, sizeof(*range));
-
- rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
- if (rc == 0) {
- /* In case of duplicate entry, the location must be same */
- LASSERT((range_compare_loc(new_range, range) == 0));
- GOTO(out, rc = -EEXIST);
- }
-
- if (rc != -ENOENT) {
- CERROR("%s: lookup range "DRANGE" error: rc = %d\n",
- fld->lsf_name, PRANGE(range), rc);
- GOTO(out, rc);
- }
-
- /* Check for merge case, since the fld entry can only be increamental,
- * so we will only check whether it can be merged from the left. */
- if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
- range_compare_loc(new_range, range) == 0) {
- range_cpu_to_be(tmp, range);
- rc = dt_declare_delete(env, fld->lsf_obj,
- (struct dt_key *)&tmp->lsr_start, th);
- if (rc) {
- CERROR("%s: declare record "DRANGE" failed: rc = %d\n",
- fld->lsf_name, PRANGE(range), rc);
- GOTO(out, rc);
- }
- memcpy(tmp, new_range, sizeof(*new_range));
- tmp->lsr_start = range->lsr_start;
- } else {
- memcpy(tmp, new_range, sizeof(*new_range));
- }
-
- range_cpu_to_be(tmp, tmp);
- rc = dt_declare_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
- (struct dt_key *)&tmp->lsr_start, th);
-out:
- RETURN(rc);
-}
-
-/**
- * insert range in fld store.
- *
- * \param range range to be inserted
- * \param th transaction for this operation as it could compound
- * transaction.
- *
- * \retval 0 success
- * \retval -ve error
- *
- * The whole fld index insertion is protected by seq->lss_mutex (see
- * seq_server_alloc_super), i.e. only one thread will access fldb each
- * time, so we do not need worry the fld file and cache will being
- * changed between declare and create.
- * Because the fld entry can only be increamental, so we will only check
- * whether it can be merged from the left.
- **/
-int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
- const struct lu_seq_range *new_range, struct thandle *th)
-{
- struct lu_seq_range *range;
- struct lu_seq_range *tmp;
- struct fld_thread_info *info;
- int rc = 0;
- int deleted = 0;
- struct fld_cache_entry *flde;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
-
- LASSERT(mutex_is_locked(&fld->lsf_lock));
-
- range = &info->fti_lrange;
- memset(range, 0, sizeof(*range));
- tmp = &info->fti_irange;
- rc = fld_index_lookup(env, fld, new_range->lsr_start, range);
- if (rc != -ENOENT) {
- rc = rc == 0 ? -EEXIST : rc;
- GOTO(out, rc);
- }
-
- if (new_range->lsr_start == range->lsr_end && range->lsr_end != 0 &&
- range_compare_loc(new_range, range) == 0) {
- range_cpu_to_be(tmp, range);
- rc = dt_delete(env, fld->lsf_obj,
- (struct dt_key *)&tmp->lsr_start, th,
- BYPASS_CAPA);
- if (rc != 0)
- GOTO(out, rc);
- memcpy(tmp, new_range, sizeof(*new_range));
- tmp->lsr_start = range->lsr_start;
- deleted = 1;
- } else {
- memcpy(tmp, new_range, sizeof(*new_range));
- }
-
- range_cpu_to_be(tmp, tmp);
- rc = dt_insert(env, fld->lsf_obj, (struct dt_rec *)tmp,
- (struct dt_key *)&tmp->lsr_start, th, BYPASS_CAPA, 1);
- if (rc != 0) {
- CERROR("%s: insert range "DRANGE" failed: rc = %d\n",
- fld->lsf_name, PRANGE(new_range), rc);
- GOTO(out, rc);
- }
-
- flde = fld_cache_entry_create(new_range);
- if (IS_ERR(flde))
- GOTO(out, rc = PTR_ERR(flde));
-
- write_lock(&fld->lsf_cache->fci_lock);
- if (deleted)
- fld_cache_delete_nolock(fld->lsf_cache, new_range);
- rc = fld_cache_insert_nolock(fld->lsf_cache, flde);
- write_unlock(&fld->lsf_cache->fci_lock);
- if (rc)
- OBD_FREE_PTR(flde);
-out:
- RETURN(rc);
-}
-
-/**
- * lookup range for a seq passed. note here we only care about the start/end,
- * caller should handle the attached location data (flags, index).
- *
- * \param seq seq for lookup.
- * \param range result of lookup.
- *
- * \retval 0 found, \a range is the matched range;
- * \retval -ENOENT not found, \a range is the left-side range;
- * \retval -ve other error;
- */
-int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
- seqno_t seq, struct lu_seq_range *range)
-{
- struct lu_seq_range *fld_rec;
- struct fld_thread_info *info;
- int rc;
-
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- fld_rec = &info->fti_rec;
-
- rc = fld_cache_lookup(fld->lsf_cache, seq, fld_rec);
- if (rc == 0) {
- *range = *fld_rec;
- if (range_within(range, seq))
- rc = 0;
- else
- rc = -ENOENT;
- }
-
- CDEBUG(D_INFO, "%s: lookup seq = "LPX64" range : "DRANGE" rc = %d\n",
- fld->lsf_name, seq, PRANGE(range), rc);
-
- RETURN(rc);
-}
-
-int fld_insert_entry(const struct lu_env *env,
- struct lu_server_fld *fld,
- const struct lu_seq_range *range)
-{
- struct thandle *th;
- int rc;
- ENTRY;
-
- th = dt_trans_create(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev));
- if (IS_ERR(th))
- RETURN(PTR_ERR(th));
-
- rc = fld_declare_index_create(env, fld, range, th);
- if (rc != 0) {
- if (rc == -EEXIST)
- rc = 0;
- GOTO(out, rc);
- }
-
- rc = dt_trans_start_local(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev),
- th);
- if (rc)
- GOTO(out, rc);
-
- rc = fld_index_create(env, fld, range, th);
- if (rc == -EEXIST)
- rc = 0;
-out:
- dt_trans_stop(env, lu2dt_dev(fld->lsf_obj->do_lu.lo_dev), th);
- RETURN(rc);
-}
-EXPORT_SYMBOL(fld_insert_entry);
-
-static int fld_insert_special_entries(const struct lu_env *env,
- struct lu_server_fld *fld)
-{
- int rc;
-
- rc = fld_insert_entry(env, fld, &IGIF_FLD_RANGE);
- if (rc != 0)
- RETURN(rc);
-
- rc = fld_insert_entry(env, fld, &DOT_LUSTRE_FLD_RANGE);
- if (rc != 0)
- RETURN(rc);
-
- rc = fld_insert_entry(env, fld, &ROOT_FLD_RANGE);
-
- RETURN(rc);
-}
-
-int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
- struct dt_device *dt)
-{
- struct dt_object *dt_obj = NULL;
- struct lu_fid fid;
- struct lu_attr *attr = NULL;
- struct lu_seq_range *range = NULL;
- struct fld_thread_info *info;
- struct dt_object_format dof;
- struct dt_it *it;
- const struct dt_it_ops *iops;
- int rc;
- ENTRY;
-
- info = lu_context_key_get(&env->le_ctx, &fld_thread_key);
- LASSERT(info != NULL);
-
- lu_local_obj_fid(&fid, FLD_INDEX_OID);
- OBD_ALLOC_PTR(attr);
- if (attr == NULL)
- RETURN(-ENOMEM);
-
- memset(attr, 0, sizeof(*attr));
- attr->la_valid = LA_MODE;
- attr->la_mode = S_IFREG | 0666;
- dof.dof_type = DFT_INDEX;
- dof.u.dof_idx.di_feat = &fld_index_features;
-
- dt_obj = dt_find_or_create(env, dt, &fid, &dof, attr);
- if (IS_ERR(dt_obj)) {
- rc = PTR_ERR(dt_obj);
- CERROR("%s: Can't find \"%s\" obj %d\n", fld->lsf_name,
- fld_index_name, rc);
- dt_obj = NULL;
- GOTO(out, rc);
- }
-
- fld->lsf_obj = dt_obj;
- rc = dt_obj->do_ops->do_index_try(env, dt_obj, &fld_index_features);
- if (rc != 0) {
- CERROR("%s: File \"%s\" is not an index: rc = %d!\n",
- fld->lsf_name, fld_index_name, rc);
- GOTO(out, rc);
- }
-
- range = &info->fti_rec;
- /* Load fld entry to cache */
- iops = &dt_obj->do_index_ops->dio_it;
- it = iops->init(env, dt_obj, 0, NULL);
- if (IS_ERR(it))
- GOTO(out, rc = PTR_ERR(it));
-
- rc = iops->load(env, it, 0);
- if (rc < 0)
- GOTO(out_it_fini, rc);
-
- if (rc > 0) {
- /* Load FLD entry into server cache */
- do {
- rc = iops->rec(env, it, (struct dt_rec *)range, 0);
- if (rc != 0)
- GOTO(out_it_put, rc);
- LASSERT(range != NULL);
- range_be_to_cpu(range, range);
- rc = fld_cache_insert(fld->lsf_cache, range);
- if (rc != 0)
- GOTO(out_it_put, rc);
- rc = iops->next(env, it);
- } while (rc == 0);
- }
-
- /* Note: fld_insert_entry will detect whether these
- * special entries already exist inside FLDB */
- mutex_lock(&fld->lsf_lock);
- rc = fld_insert_special_entries(env, fld);
- mutex_unlock(&fld->lsf_lock);
- if (rc != 0) {
- CERROR("%s: insert special entries failed!: rc = %d\n",
- fld->lsf_name, rc);
- GOTO(out_it_put, rc);
- }
-
-out_it_put:
- iops->put(env, it);
-out_it_fini:
- iops->fini(env, it);
-out:
- if (attr != NULL)
- OBD_FREE_PTR(attr);
-
- if (rc != 0) {
- if (dt_obj != NULL)
- lu_object_put(env, &dt_obj->do_lu);
- fld->lsf_obj = NULL;
- }
- RETURN(rc);
-}
-
-void fld_index_fini(const struct lu_env *env, struct lu_server_fld *fld)
-{
- ENTRY;
- if (fld->lsf_obj != NULL) {
- if (!IS_ERR(fld->lsf_obj))
- lu_object_put(env, &fld->lsf_obj->do_lu);
- fld->lsf_obj = NULL;
- }
- EXIT;
-}
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index 9fa9e01cdb6..56686b138ac 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -139,38 +139,10 @@ enum {
extern struct lu_fld_hash fld_hash[];
-
-struct fld_thread_info {
- struct req_capsule *fti_pill;
- __u64 fti_key;
- struct lu_seq_range fti_rec;
- struct lu_seq_range fti_lrange;
- struct lu_seq_range fti_irange;
-};
-
-extern struct lu_context_key fld_thread_key;
-
-int fld_index_init(const struct lu_env *env, struct lu_server_fld *fld,
- struct dt_device *dt);
-
-void fld_index_fini(const struct lu_env *env, struct lu_server_fld *fld);
-
-int fld_declare_index_create(const struct lu_env *env,
- struct lu_server_fld *fld,
- const struct lu_seq_range *new,
- struct thandle *th);
-
-int fld_index_create(const struct lu_env *env, struct lu_server_fld *fld,
- const struct lu_seq_range *new, struct thandle *th);
-
-int fld_index_lookup(const struct lu_env *env, struct lu_server_fld *fld,
- seqno_t seq, struct lu_seq_range *range);
-
int fld_client_rpc(struct obd_export *exp,
struct lu_seq_range *range, __u32 fld_op);
#ifdef LPROCFS
-extern struct lprocfs_vars fld_server_proc_list[];
extern struct lprocfs_vars fld_client_proc_list[];
#endif
@@ -218,6 +190,5 @@ fld_target_name(struct lu_fld_target *tar)
return (const char *)tar->ft_exp->exp_obd->obd_name;
}
-extern proc_dir_entry_t *fld_type_proc_dir;
-extern struct file_operations fld_proc_seq_fops;
+extern struct proc_dir_entry *fld_type_proc_dir;
#endif /* __FLD_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index e9f07398b68..078e98bda68 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -44,7 +44,6 @@
# include <linux/libcfs/libcfs.h>
# include <linux/module.h>
-# include <linux/jbd.h>
# include <asm/div64.h>
#include <obd.h>
@@ -60,16 +59,18 @@
#include <lustre_mdc.h>
#include "fld_internal.h"
+struct lu_context_key fld_thread_key;
+
/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
* It should be common thing. The same about mdc RPC lock */
static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- ENTRY;
+
client_obd_list_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
+ return rc;
};
static void fld_enter_request(struct client_obd *cli)
@@ -123,7 +124,6 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
{
struct lu_fld_target *target;
int hash;
- ENTRY;
/* Because almost all of special sequence located in MDT0,
* it should go to index 0 directly, instead of calculating
@@ -137,7 +137,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
- RETURN(target);
+ return target;
}
CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
@@ -161,7 +161,7 @@ fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
* LBUG() to catch this situation.
*/
LBUG();
- RETURN(NULL);
+ return NULL;
}
struct lu_fld_hash fld_hash[] = {
@@ -179,7 +179,6 @@ static struct lu_fld_target *
fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
{
struct lu_fld_target *target;
- ENTRY;
LASSERT(fld->lcf_hash != NULL);
@@ -193,7 +192,7 @@ fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
target->ft_idx, seq);
}
- RETURN(target);
+ return target;
}
/*
@@ -205,7 +204,6 @@ int fld_client_add_target(struct lu_client_fld *fld,
{
const char *name;
struct lu_fld_target *target, *tmp;
- ENTRY;
LASSERT(tar != NULL);
name = fld_target_name(tar);
@@ -216,7 +214,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
CERROR("%s: Attempt to add target %s (idx "LPU64") "
"on fly - skip it\n", fld->lcf_name, name,
tar->ft_idx);
- RETURN(0);
+ return 0;
} else {
CDEBUG(D_INFO, "%s: Adding target %s (idx "
LPU64")\n", fld->lcf_name, name, tar->ft_idx);
@@ -224,7 +222,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
OBD_ALLOC_PTR(target);
if (target == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
spin_lock(&fld->lcf_lock);
list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
@@ -233,7 +231,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
OBD_FREE_PTR(target);
CERROR("Target %s exists in FLD and known as %s:#"LPU64"\n",
name, fld_target_name(tmp), tmp->ft_idx);
- RETURN(-EEXIST);
+ return -EEXIST;
}
}
@@ -249,7 +247,7 @@ int fld_client_add_target(struct lu_client_fld *fld,
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(fld_client_add_target);
@@ -257,7 +255,6 @@ EXPORT_SYMBOL(fld_client_add_target);
int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
{
struct lu_fld_target *target, *tmp;
- ENTRY;
spin_lock(&fld->lcf_lock);
list_for_each_entry_safe(target, tmp,
@@ -271,19 +268,20 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx)
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
- RETURN(0);
+ return 0;
}
}
spin_unlock(&fld->lcf_lock);
- RETURN(-ENOENT);
+ return -ENOENT;
}
EXPORT_SYMBOL(fld_client_del_target);
#ifdef LPROCFS
+struct proc_dir_entry *fld_type_proc_dir = NULL;
+
static int fld_client_proc_init(struct lu_client_fld *fld)
{
int rc;
- ENTRY;
fld->lcf_proc_dir = lprocfs_register(fld->lcf_name,
fld_type_proc_dir,
@@ -293,7 +291,7 @@ static int fld_client_proc_init(struct lu_client_fld *fld)
CERROR("%s: LProcFS failed in fld-init\n",
fld->lcf_name);
rc = PTR_ERR(fld->lcf_proc_dir);
- RETURN(rc);
+ return rc;
}
rc = lprocfs_add_vars(fld->lcf_proc_dir,
@@ -304,7 +302,7 @@ static int fld_client_proc_init(struct lu_client_fld *fld)
GOTO(out_cleanup, rc);
}
- RETURN(0);
+ return 0;
out_cleanup:
fld_client_proc_fini(fld);
@@ -313,13 +311,11 @@ out_cleanup:
void fld_client_proc_fini(struct lu_client_fld *fld)
{
- ENTRY;
if (fld->lcf_proc_dir) {
if (!IS_ERR(fld->lcf_proc_dir))
lprocfs_remove(&fld->lcf_proc_dir);
fld->lcf_proc_dir = NULL;
}
- EXIT;
}
#else
static int fld_client_proc_init(struct lu_client_fld *fld)
@@ -345,7 +341,6 @@ int fld_client_init(struct lu_client_fld *fld,
{
int cache_size, cache_threshold;
int rc;
- ENTRY;
LASSERT(fld != NULL);
@@ -355,7 +350,7 @@ int fld_client_init(struct lu_client_fld *fld,
if (!hash_is_sane(hash)) {
CERROR("%s: Wrong hash function %#x\n",
fld->lcf_name, hash);
- RETURN(-EINVAL);
+ return -EINVAL;
}
fld->lcf_count = 0;
@@ -381,7 +376,6 @@ int fld_client_init(struct lu_client_fld *fld,
rc = fld_client_proc_init(fld);
if (rc)
GOTO(out, rc);
- EXIT;
out:
if (rc)
fld_client_fini(fld);
@@ -395,7 +389,6 @@ EXPORT_SYMBOL(fld_client_init);
void fld_client_fini(struct lu_client_fld *fld)
{
struct lu_fld_target *target, *tmp;
- ENTRY;
spin_lock(&fld->lcf_lock);
list_for_each_entry_safe(target, tmp,
@@ -413,8 +406,6 @@ void fld_client_fini(struct lu_client_fld *fld)
fld_cache_fini(fld->lcf_cache);
fld->lcf_cache = NULL;
}
-
- EXIT;
}
EXPORT_SYMBOL(fld_client_fini);
@@ -426,7 +417,6 @@ int fld_client_rpc(struct obd_export *exp,
__u32 *op;
int rc;
struct obd_import *imp;
- ENTRY;
LASSERT(exp != NULL);
@@ -434,7 +424,7 @@ int fld_client_rpc(struct obd_export *exp,
req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
FLD_QUERY);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
*op = fld_op;
@@ -464,7 +454,6 @@ int fld_client_rpc(struct obd_export *exp,
if (prange == NULL)
GOTO(out_req, rc = -EFAULT);
*range = *prange;
- EXIT;
out_req:
ptlrpc_req_finished(req);
return rc;
@@ -476,14 +465,13 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
struct lu_seq_range res = { 0 };
struct lu_fld_target *target;
int rc;
- ENTRY;
fld->lcf_flags |= LUSTRE_FLD_RUN;
rc = fld_cache_lookup(fld->lcf_cache, seq, &res);
if (rc == 0) {
*mds = res.lsr_index;
- RETURN(0);
+ return 0;
}
/* Can not find it in the cache */
@@ -496,19 +484,14 @@ int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
res.lsr_start = seq;
fld_range_set_type(&res, flags);
- if (target->ft_srv != NULL) {
- LASSERT(env != NULL);
- rc = fld_server_lookup(env, target->ft_srv, seq, &res);
- } else {
- rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
- }
+ rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
if (rc == 0) {
*mds = res.lsr_index;
fld_cache_insert(fld->lcf_cache, &res);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(fld_client_lookup);
@@ -517,3 +500,32 @@ void fld_client_flush(struct lu_client_fld *fld)
fld_cache_flush(fld->lcf_cache);
}
EXPORT_SYMBOL(fld_client_flush);
+
+static int __init fld_mod_init(void)
+{
+ fld_type_proc_dir = lprocfs_register(LUSTRE_FLD_NAME,
+ proc_lustre_root,
+ NULL, NULL);
+ if (IS_ERR(fld_type_proc_dir))
+ return PTR_ERR(fld_type_proc_dir);
+
+ LU_CONTEXT_KEY_INIT(&fld_thread_key);
+ lu_context_key_register(&fld_thread_key);
+ return 0;
+}
+
+static void __exit fld_mod_exit(void)
+{
+ lu_context_key_degister(&fld_thread_key);
+ if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
+ lprocfs_remove(&fld_type_proc_dir);
+ fld_type_proc_dir = NULL;
+ }
+}
+
+MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
+MODULE_DESCRIPTION("Lustre FLD");
+MODULE_LICENSE("GPL");
+
+module_init(fld_mod_init)
+module_exit(fld_mod_exit)
diff --git a/drivers/staging/lustre/lustre/fld/lproc_fld.c b/drivers/staging/lustre/lustre/fld/lproc_fld.c
index c1bd80339e6..052f7d51a07 100644
--- a/drivers/staging/lustre/lustre/fld/lproc_fld.c
+++ b/drivers/staging/lustre/lustre/fld/lproc_fld.c
@@ -62,7 +62,6 @@ fld_proc_targets_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
struct lu_fld_target *target;
- ENTRY;
LASSERT(fld != NULL);
@@ -72,14 +71,13 @@ fld_proc_targets_seq_show(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", fld_target_name(target));
spin_unlock(&fld->lcf_lock);
- RETURN(0);
+ return 0;
}
static int
fld_proc_hash_seq_show(struct seq_file *m, void *unused)
{
struct lu_client_fld *fld = (struct lu_client_fld *)m->private;
- ENTRY;
LASSERT(fld != NULL);
@@ -87,7 +85,7 @@ fld_proc_hash_seq_show(struct seq_file *m, void *unused)
seq_printf(m, "%s\n", fld->lcf_hash->fh_name);
spin_unlock(&fld->lcf_lock);
- RETURN(0);
+ return 0;
}
static ssize_t
@@ -97,7 +95,6 @@ fld_proc_hash_seq_write(struct file *file, const char *buffer,
struct lu_client_fld *fld = ((struct seq_file *)file->private_data)->private;
struct lu_fld_hash *hash = NULL;
int i;
- ENTRY;
LASSERT(fld != NULL);
@@ -120,7 +117,7 @@ fld_proc_hash_seq_write(struct file *file, const char *buffer,
fld->lcf_name, hash->fh_name);
}
- RETURN(count);
+ return count;
}
static ssize_t
@@ -128,7 +125,6 @@ fld_proc_cache_flush_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
struct lu_client_fld *fld = file->private_data;
- ENTRY;
LASSERT(fld != NULL);
@@ -136,7 +132,7 @@ fld_proc_cache_flush_write(struct file *file, const char __user *buffer,
CDEBUG(D_INFO, "%s: Lookup cache is flushed\n", fld->lcf_name);
- RETURN(count);
+ return count;
}
static int fld_proc_cache_flush_open(struct inode *inode, struct file *file)
@@ -158,202 +154,6 @@ struct file_operations fld_proc_cache_flush_fops = {
.release = fld_proc_cache_flush_release,
};
-struct fld_seq_param {
- struct lu_env fsp_env;
- struct dt_it *fsp_it;
- struct lu_server_fld *fsp_fld;
- unsigned int fsp_stop:1;
-};
-
-static void *fldb_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct fld_seq_param *param = p->private;
- struct lu_server_fld *fld;
- struct dt_object *obj;
- const struct dt_it_ops *iops;
-
- if (param == NULL || param->fsp_stop)
- return NULL;
-
- fld = param->fsp_fld;
- obj = fld->lsf_obj;
- LASSERT(obj != NULL);
- iops = &obj->do_index_ops->dio_it;
-
- iops->load(&param->fsp_env, param->fsp_it, *pos);
-
- *pos = be64_to_cpu(*(__u64 *)iops->key(&param->fsp_env, param->fsp_it));
- return param;
-}
-
-static void fldb_seq_stop(struct seq_file *p, void *v)
-{
- struct fld_seq_param *param = p->private;
- const struct dt_it_ops *iops;
- struct lu_server_fld *fld;
- struct dt_object *obj;
-
- if (param == NULL)
- return;
-
- fld = param->fsp_fld;
- obj = fld->lsf_obj;
- LASSERT(obj != NULL);
- iops = &obj->do_index_ops->dio_it;
-
- iops->put(&param->fsp_env, param->fsp_it);
-}
-
-static void *fldb_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct fld_seq_param *param = p->private;
- struct lu_server_fld *fld;
- struct dt_object *obj;
- const struct dt_it_ops *iops;
- int rc;
-
- if (param == NULL || param->fsp_stop)
- return NULL;
-
- fld = param->fsp_fld;
- obj = fld->lsf_obj;
- LASSERT(obj != NULL);
- iops = &obj->do_index_ops->dio_it;
-
- rc = iops->next(&param->fsp_env, param->fsp_it);
- if (rc > 0) {
- param->fsp_stop = 1;
- return NULL;
- }
-
- *pos = be64_to_cpu(*(__u64 *)iops->key(&param->fsp_env, param->fsp_it));
- return param;
-}
-
-static int fldb_seq_show(struct seq_file *p, void *v)
-{
- struct fld_seq_param *param = p->private;
- struct lu_server_fld *fld;
- struct dt_object *obj;
- const struct dt_it_ops *iops;
- struct fld_thread_info *info;
- struct lu_seq_range *fld_rec;
- int rc;
-
- if (param == NULL || param->fsp_stop)
- return 0;
-
- fld = param->fsp_fld;
- obj = fld->lsf_obj;
- LASSERT(obj != NULL);
- iops = &obj->do_index_ops->dio_it;
-
- info = lu_context_key_get(&param->fsp_env.le_ctx,
- &fld_thread_key);
- fld_rec = &info->fti_rec;
- rc = iops->rec(&param->fsp_env, param->fsp_it,
- (struct dt_rec *)fld_rec, 0);
- if (rc != 0) {
- CERROR("%s:read record error: rc %d\n",
- fld->lsf_name, rc);
- } else if (fld_rec->lsr_start != 0) {
- range_be_to_cpu(fld_rec, fld_rec);
- rc = seq_printf(p, DRANGE"\n", PRANGE(fld_rec));
- }
-
- return rc;
-}
-
-struct seq_operations fldb_sops = {
- .start = fldb_seq_start,
- .stop = fldb_seq_stop,
- .next = fldb_seq_next,
- .show = fldb_seq_show,
-};
-
-static int fldb_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- struct lu_server_fld *fld = (struct lu_server_fld *)PDE_DATA(inode);
- struct dt_object *obj;
- const struct dt_it_ops *iops;
- struct fld_seq_param *param = NULL;
- int env_init = 0;
- int rc;
-
- rc = seq_open(file, &fldb_sops);
- if (rc)
- GOTO(out, rc);
-
- obj = fld->lsf_obj;
- if (obj == NULL) {
- seq = file->private_data;
- seq->private = NULL;
- return 0;
- }
-
- OBD_ALLOC_PTR(param);
- if (param == NULL)
- GOTO(out, rc = -ENOMEM);
-
- rc = lu_env_init(&param->fsp_env, LCT_MD_THREAD);
- if (rc != 0)
- GOTO(out, rc);
-
- env_init = 1;
- iops = &obj->do_index_ops->dio_it;
- param->fsp_it = iops->init(&param->fsp_env, obj, 0, NULL);
- if (IS_ERR(param->fsp_it))
- GOTO(out, rc = PTR_ERR(param->fsp_it));
-
- param->fsp_fld = fld;
- param->fsp_stop = 0;
-
- seq = file->private_data;
- seq->private = param;
-out:
- if (rc != 0) {
- if (env_init == 1)
- lu_env_fini(&param->fsp_env);
- if (param != NULL)
- OBD_FREE_PTR(param);
- }
- return rc;
-}
-
-static int fldb_seq_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- struct fld_seq_param *param;
- struct lu_server_fld *fld;
- struct dt_object *obj;
- const struct dt_it_ops *iops;
-
- param = seq->private;
- if (param == NULL) {
- lprocfs_seq_release(inode, file);
- return 0;
- }
-
- fld = param->fsp_fld;
- obj = fld->lsf_obj;
- LASSERT(obj != NULL);
- iops = &obj->do_index_ops->dio_it;
-
- LASSERT(iops != NULL);
- LASSERT(obj != NULL);
- LASSERT(param->fsp_it != NULL);
- iops->fini(&param->fsp_env, param->fsp_it);
- lu_env_fini(&param->fsp_env);
- OBD_FREE_PTR(param);
- lprocfs_seq_release(inode, file);
-
- return 0;
-}
-
-struct lprocfs_vars fld_server_proc_list[] = {
- { NULL }};
-
LPROC_SEQ_FOPS_RO(fld_proc_targets);
LPROC_SEQ_FOPS(fld_proc_hash);
@@ -363,11 +163,4 @@ struct lprocfs_vars fld_client_proc_list[] = {
{ "cache_flush", &fld_proc_cache_flush_fops },
{ NULL }};
-struct file_operations fld_proc_seq_fops = {
- .owner = THIS_MODULE,
- .open = fldb_seq_open,
- .read = seq_read,
- .release = fldb_seq_release,
-};
-
-#endif
+#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index 4bb68801d3a..edb40afe66f 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -758,7 +758,7 @@ struct cl_page {
/**
* Debug information, the task is owning the page.
*/
- task_t *cp_task;
+ struct task_struct *cp_task;
/**
* Owning IO request in cl_page_state::CPS_PAGEOUT and
* cl_page_state::CPS_PAGEIN states. This field is maintained only in
@@ -768,11 +768,11 @@ struct cl_page {
/** List of references to this page, for debugging. */
struct lu_ref cp_reference;
/** Link to an object, for debugging. */
- struct lu_ref_link *cp_obj_ref;
+ struct lu_ref_link cp_obj_ref;
/** Link to a queue, for debugging. */
- struct lu_ref_link *cp_queue_ref;
+ struct lu_ref_link cp_queue_ref;
/** Per-page flags from enum cl_page_flags. Protected by a VM lock. */
- unsigned cp_flags;
+ unsigned cp_flags;
/** Assigned if doing a sync_io */
struct cl_sync_io *cp_sync_io;
};
@@ -1576,13 +1576,13 @@ struct cl_lock {
* \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
*/
struct mutex cll_guard;
- task_t *cll_guarder;
+ struct task_struct *cll_guarder;
int cll_depth;
/**
* the owner for INTRANSIT state
*/
- task_t *cll_intransit_owner;
+ struct task_struct *cll_intransit_owner;
int cll_error;
/**
* Number of holds on a lock. A hold prevents a lock from being
@@ -1625,7 +1625,7 @@ struct cl_lock {
/**
* A reference for cl_lock::cll_descr::cld_obj. For debugging.
*/
- struct lu_ref_link *cll_obj_ref;
+ struct lu_ref_link cll_obj_ref;
#ifdef CONFIG_LOCKDEP
/* "dep_map" name is assumed by lockdep.h macros. */
struct lockdep_map dep_map;
@@ -1869,7 +1869,7 @@ do { \
struct cl_page_list {
unsigned pl_nr;
struct list_head pl_pages;
- task_t *pl_owner;
+ struct task_struct *pl_owner;
};
/**
@@ -2517,7 +2517,7 @@ struct cl_req_obj {
/** object itself */
struct cl_object *ro_obj;
/** reference to cl_req_obj::ro_obj. For debugging. */
- struct lu_ref_link *ro_obj_ref;
+ struct lu_ref_link ro_obj_ref;
/* something else? Number of pages for a given object? */
};
diff --git a/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h b/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h
index 586692272d7..4bcc4dcca3d 100644
--- a/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/linux/lprocfs_status.h
@@ -48,7 +48,6 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/version.h>
#include <linux/smp.h>
#include <linux/rwsem.h>
#include <linux/libcfs/libcfs.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
index dff04688945..9243dfab43d 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
@@ -87,22 +87,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define LTIME_S(time) (time.tv_sec)
-#define ll_permission(inode,mask,nd) inode_permission(inode,mask)
-
-# define ll_generic_permission(inode, mask, flags, check_acl) \
- generic_permission(inode, mask)
-
-#define ll_blkdev_put(a, b) blkdev_put(a, b)
-
-#define ll_dentry_open(a,b,c) dentry_open(a,b,c)
-
-#define ll_vfs_symlink(dir, dentry, mnt, path, mode) \
- vfs_symlink(dir, dentry, path)
-
-
-#define ll_generic_file_llseek_size(file, offset, origin, maxbytes, eof) \
- generic_file_llseek_size(file, offset, origin, maxbytes, eof);
-
/* inode_dio_wait(i) use as-is for write lock */
# define inode_dio_write_done(i) do {} while (0) /* for write unlock */
# define inode_dio_read(i) atomic_inc(&(i)->i_dio_count)
@@ -111,88 +95,10 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
#define TREE_READ_LOCK_IRQ(mapping) spin_lock_irq(&(mapping)->tree_lock)
#define TREE_READ_UNLOCK_IRQ(mapping) spin_unlock_irq(&(mapping)->tree_lock)
-static inline
-int ll_unregister_blkdev(unsigned int dev, const char *name)
-{
- unregister_blkdev(dev, name);
- return 0;
-}
-
-#define ll_invalidate_bdev(a,b) invalidate_bdev((a))
-
#ifndef FS_HAS_FIEMAP
#define FS_HAS_FIEMAP (0)
#endif
-
-
-/* add a lustre compatible layer for crypto API */
-#include <linux/crypto.h>
-#define ll_crypto_hash crypto_hash
-#define ll_crypto_cipher crypto_blkcipher
-#define ll_crypto_alloc_hash(name, type, mask) crypto_alloc_hash(name, type, mask)
-#define ll_crypto_hash_setkey(tfm, key, keylen) crypto_hash_setkey(tfm, key, keylen)
-#define ll_crypto_hash_init(desc) crypto_hash_init(desc)
-#define ll_crypto_hash_update(desc, sl, bytes) crypto_hash_update(desc, sl, bytes)
-#define ll_crypto_hash_final(desc, out) crypto_hash_final(desc, out)
-#define ll_crypto_blkcipher_setkey(tfm, key, keylen) \
- crypto_blkcipher_setkey(tfm, key, keylen)
-#define ll_crypto_blkcipher_set_iv(tfm, src, len) \
- crypto_blkcipher_set_iv(tfm, src, len)
-#define ll_crypto_blkcipher_get_iv(tfm, dst, len) \
- crypto_blkcipher_get_iv(tfm, dst, len)
-#define ll_crypto_blkcipher_encrypt(desc, dst, src, bytes) \
- crypto_blkcipher_encrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt(desc, dst, src, bytes) \
- crypto_blkcipher_decrypt(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_encrypt_iv(desc, dst, src, bytes) \
- crypto_blkcipher_encrypt_iv(desc, dst, src, bytes)
-#define ll_crypto_blkcipher_decrypt_iv(desc, dst, src, bytes) \
- crypto_blkcipher_decrypt_iv(desc, dst, src, bytes)
-
-static inline
-struct ll_crypto_cipher *ll_crypto_alloc_blkcipher(const char *name,
- u32 type, u32 mask)
-{
- struct ll_crypto_cipher *rtn = crypto_alloc_blkcipher(name, type, mask);
-
- return (rtn == NULL ? ERR_PTR(-ENOMEM) : rtn);
-}
-
-static inline int ll_crypto_hmac(struct ll_crypto_hash *tfm,
- u8 *key, unsigned int *keylen,
- struct scatterlist *sg,
- unsigned int size, u8 *result)
-{
- struct hash_desc desc;
- int rv;
- desc.tfm = tfm;
- desc.flags = 0;
- rv = crypto_hash_setkey(desc.tfm, key, *keylen);
- if (rv) {
- CERROR("failed to hash setkey: %d\n", rv);
- return rv;
- }
- return crypto_hash_digest(&desc, sg, size, result);
-}
-static inline
-unsigned int ll_crypto_tfm_alg_max_keysize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.max_keysize;
-}
-static inline
-unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
-}
-
-#define ll_crypto_hash_blocksize(tfm) crypto_hash_blocksize(tfm)
-#define ll_crypto_hash_digestsize(tfm) crypto_hash_digestsize(tfm)
-#define ll_crypto_blkcipher_ivsize(tfm) crypto_blkcipher_ivsize(tfm)
-#define ll_crypto_blkcipher_blocksize(tfm) crypto_blkcipher_blocksize(tfm)
-#define ll_crypto_free_hash(tfm) crypto_free_hash(tfm)
-#define ll_crypto_free_blkcipher(tfm) crypto_free_blkcipher(tfm)
-
#define ll_vfs_rmdir(dir,entry,mnt) vfs_rmdir(dir,entry)
#define ll_vfs_mkdir(inode,dir,mnt,mode) vfs_mkdir(inode,dir,mode)
#define ll_vfs_link(old,mnt,dir,new,mnt1) vfs_link(old,dir,new)
@@ -202,12 +108,6 @@ unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
#define ll_vfs_rename(old,old_dir,mnt,new,new_dir,mnt1) \
vfs_rename(old,old_dir,new,new_dir)
-#ifdef for_each_possible_cpu
-#define cfs_for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
-#elif defined(for_each_cpu)
-#define cfs_for_each_possible_cpu(cpu) for_each_cpu(cpu)
-#endif
-
#define cfs_bio_io_error(a,b) bio_io_error((a))
#define cfs_bio_endio(a,b,c) bio_endio((a),(c))
@@ -266,9 +166,6 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
#define queue_max_phys_segments(rq) queue_max_segments(rq)
#define queue_max_hw_segments(rq) queue_max_segments(rq)
-#define ll_kmap_atomic(a, b) kmap_atomic(a)
-#define ll_kunmap_atomic(a, b) kunmap_atomic(a)
-
#define ll_d_hlist_node hlist_node
#define ll_d_hlist_empty(list) hlist_empty(list)
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h b/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h
index 6c726095738..4da6e372e00 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_fsfilt.h
@@ -54,7 +54,7 @@ typedef void (*fsfilt_cb_t)(struct obd_device *obd, __u64 last_rcvd,
struct fsfilt_operations {
struct list_head fs_list;
- module_t *fs_owner;
+ struct module *fs_owner;
char *fs_type;
char *(* fs_getlabel)(struct super_block *sb);
void *(* fs_start)(struct inode *inode, int op, void *desc_private,
@@ -145,16 +145,6 @@ static inline int fsfilt_commit(struct obd_device *obd, struct inode *inode,
return rc;
}
-static inline int fsfilt_map_inode_pages(struct obd_device *obd,
- struct inode *inode,
- struct page **page, int pages,
- unsigned long *blocks,
- int create, struct mutex *mutex)
-{
- return obd->obd_fsops->fs_map_inode_pages(inode, page, pages, blocks,
- create, mutex);
-}
-
static inline int fsfilt_read_record(struct obd_device *obd, struct file *file,
void *buf, loff_t size, loff_t *offs)
{
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_handles.h b/drivers/staging/lustre/lustre/include/linux/lustre_handles.h
index ecf18405125..459b2380600 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_handles.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_handles.h
@@ -42,7 +42,6 @@
#include <asm/types.h>
#include <asm/atomic.h>
#include <linux/list.h>
-#include <linux/version.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lib.h b/drivers/staging/lustre/lustre/include/linux/lustre_lib.h
index b2f755acadf..57f3b01d1a3 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lib.h
@@ -53,15 +53,13 @@
# include <linux/lustre_common.h>
#ifndef LP_POISON
+# define LI_POISON 0x5a5a5a5a
#if BITS_PER_LONG > 32
-# define LI_POISON ((int)0x5a5a5a5a5a5a5a5a)
-# define LL_POISON ((long)0x5a5a5a5a5a5a5a5a)
-# define LP_POISON ((void *)(long)0x5a5a5a5a5a5a5a5a)
+# define LL_POISON 0x5a5a5a5a5a5a5a5aL
#else
-# define LI_POISON ((int)0x5a5a5a5a)
-# define LL_POISON ((long)0x5a5a5a5a)
-# define LP_POISON ((void *)(long)0x5a5a5a5a)
+# define LL_POISON 0x5a5a5a5aL
#endif
+# define LP_POISON ((void *)LL_POISON)
#endif
/* This macro is only for compatibility reasons with older Linux Lustre user
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
index c95dff900b5..9e5df8dabe8 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_lite.h
@@ -40,8 +40,6 @@
#endif
-#include <linux/version.h>
-
#include <asm/statfs.h>
#include <linux/fs.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_net.h b/drivers/staging/lustre/lustre/include/linux/lustre_net.h
index 2d7c425d701..05de4d87db9 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_net.h
@@ -39,7 +39,6 @@
#error Do not #include this file directly. #include <lustre_net.h> instead
#endif
-#include <linux/version.h>
#include <linux/workqueue.h>
/* XXX Liang: should be moved to other header instead of here */
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
index a8e9c0c8ffd..a260e99a444 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_patchless_compat.h
@@ -53,7 +53,7 @@ truncate_complete_page(struct address_space *mapping, struct page *page)
return;
if (PagePrivate(page))
- page->mapping->a_ops->invalidatepage(page, 0);
+ page->mapping->a_ops->invalidatepage(page, 0, PAGE_CACHE_SIZE);
cancel_dirty_page(page, PAGE_SIZE);
ClearPageMappedToDisk(page);
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_quota.h b/drivers/staging/lustre/lustre/include/linux/lustre_quota.h
index 421866b004c..a39505014c8 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_quota.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_quota.h
@@ -39,7 +39,6 @@
#error Do not #include this file directly. #include <lustre_quota.h> instead
#endif
-#include <linux/version.h>
#include <linux/fs.h>
#include <linux/quota.h>
#include <linux/quotaops.h>
diff --git a/drivers/staging/lustre/lustre/include/linux/lustre_user.h b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
index ebaf92977f7..9cc2849f3f8 100644
--- a/drivers/staging/lustre/lustre/include/linux/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/linux/lustre_user.h
@@ -41,7 +41,6 @@
#ifndef _LINUX_LUSTRE_USER_H
#define _LINUX_LUSTRE_USER_H
-# include <linux/version.h>
# include <linux/quota.h>
/*
@@ -53,15 +52,19 @@
#include <linux/string.h>
-#if defined(__x86_64__) || defined(__ia64__) || defined(__ppc64__) || \
- defined(__craynv) || defined (__mips64__) || defined(__powerpc64__)
-typedef struct stat lstat_t;
-#define lstat_f lstat
-#define HAVE_LOV_USER_MDS_DATA
-#else
+/*
+ * We need to always use 64bit version because the structure
+ * is shared across entire cluster where 32bit and 64bit machines
+ * are co-existing.
+ */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
typedef struct stat64 lstat_t;
#define lstat_f lstat64
-#define HAVE_LOV_USER_MDS_DATA
+#else
+typedef struct stat lstat_t;
+#define lstat_f lstat
#endif
+#define HAVE_LOV_USER_MDS_DATA
+
#endif /* _LUSTRE_USER_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/lvfs.h b/drivers/staging/lustre/lustre/include/linux/lvfs.h
index eb59ac7d594..e61f1b87f82 100644
--- a/drivers/staging/lustre/lustre/include/linux/lvfs.h
+++ b/drivers/staging/lustre/lustre/include/linux/lvfs.h
@@ -54,10 +54,10 @@
/* simple.c */
struct lvfs_ucred {
- __u32 luc_uid;
- __u32 luc_gid;
- __u32 luc_fsuid;
- __u32 luc_fsgid;
+ kuid_t luc_uid;
+ kgid_t luc_gid;
+ kuid_t luc_fsuid;
+ kgid_t luc_fsgid;
kernel_cap_t luc_cap;
__u32 luc_umask;
struct group_info *luc_ginfo;
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
index 2c36c0d19d0..01a50265239 100644
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ b/drivers/staging/lustre/lustre/include/linux/obd.h
@@ -93,11 +93,8 @@ static inline void __client_obd_list_lock(client_obd_lock_t *lock,
lock, task->comm, task->pid,
lock->func, lock->line,
(jiffies - lock->time) / HZ);
- LCONSOLE_WARN("====== for process holding the "
- "lock =====\n");
- libcfs_debug_dumpstack(task);
LCONSOLE_WARN("====== for current process =====\n");
- libcfs_debug_dumpstack(NULL);
+ dump_stack();
LCONSOLE_WARN("====== end =======\n");
cfs_pause(1000 * HZ);
}
diff --git a/drivers/staging/lustre/lustre/include/lprocfs_status.h b/drivers/staging/lustre/lustre/include/lprocfs_status.h
index 55f182205d7..56b05728f61 100644
--- a/drivers/staging/lustre/lustre/include/lprocfs_status.h
+++ b/drivers/staging/lustre/lustre/include/lprocfs_status.h
@@ -345,7 +345,7 @@ enum {
#define EXTRA_FIRST_OPC LDLM_GLIMPSE_ENQUEUE
/* class_obd.c */
-extern proc_dir_entry_t *proc_lustre_root;
+extern struct proc_dir_entry *proc_lustre_root;
struct obd_device;
struct obd_histogram;
@@ -370,18 +370,6 @@ static inline void s2dhms(struct dhms *ts, time_t secs)
#define JOBSTATS_DISABLE "disable"
#define JOBSTATS_PROCNAME_UID "procname_uid"
-typedef void (*cntr_init_callback)(struct lprocfs_stats *stats);
-
-struct obd_job_stats {
- cfs_hash_t *ojs_hash;
- struct list_head ojs_list;
- rwlock_t ojs_lock; /* protect the obj_list */
- cntr_init_callback ojs_cntr_init_fn;
- int ojs_cntr_num;
- int ojs_cleanup_interval;
- time_t ojs_last_cleanup;
-};
-
#ifdef LPROCFS
extern int lprocfs_stats_alloc_one(struct lprocfs_stats *stats,
@@ -562,11 +550,11 @@ extern void lprocfs_free_md_stats(struct obd_device *obddev);
struct obd_export;
struct nid_stat;
extern int lprocfs_add_clear_entry(struct obd_device * obd,
- proc_dir_entry_t *entry);
+ struct proc_dir_entry *entry);
extern int lprocfs_exp_setup(struct obd_export *exp,
lnet_nid_t *peer_nid, int *newnid);
extern int lprocfs_exp_cleanup(struct obd_export *exp);
-extern proc_dir_entry_t *lprocfs_add_simple(struct proc_dir_entry *root,
+extern struct proc_dir_entry *lprocfs_add_simple(struct proc_dir_entry *root,
char *name,
void *data,
struct file_operations *fops);
@@ -579,27 +567,27 @@ lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
unsigned long count, void *data);
extern int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data);
-extern int lprocfs_register_stats(proc_dir_entry_t *root, const char *name,
+extern int lprocfs_register_stats(struct proc_dir_entry *root, const char *name,
struct lprocfs_stats *stats);
/* lprocfs_status.c */
-extern int lprocfs_add_vars(proc_dir_entry_t *root,
+extern int lprocfs_add_vars(struct proc_dir_entry *root,
struct lprocfs_vars *var,
void *data);
-extern proc_dir_entry_t *lprocfs_register(const char *name,
- proc_dir_entry_t *parent,
+extern struct proc_dir_entry *lprocfs_register(const char *name,
+ struct proc_dir_entry *parent,
struct lprocfs_vars *list,
void *data);
-extern void lprocfs_remove(proc_dir_entry_t **root);
+extern void lprocfs_remove(struct proc_dir_entry **root);
extern void lprocfs_remove_proc_entry(const char *name,
struct proc_dir_entry *parent);
extern int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list);
extern int lprocfs_obd_cleanup(struct obd_device *obd);
-extern int lprocfs_seq_create(proc_dir_entry_t *parent, const char *name,
+extern int lprocfs_seq_create(struct proc_dir_entry *parent, const char *name,
umode_t mode,
const struct file_operations *seq_fops,
void *data);
@@ -663,8 +651,8 @@ extern int lprocfs_write_u64_helper(const char *buffer, unsigned long count,
extern int lprocfs_write_frac_u64_helper(const char *buffer,
unsigned long count,
__u64 *val, int mult);
-char *lprocfs_find_named_value(const char *buffer, const char *name,
- unsigned long *count);
+extern char *lprocfs_find_named_value(const char *buffer, const char *name,
+ size_t *count);
void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value);
void lprocfs_oh_clear(struct obd_histogram *oh);
@@ -748,16 +736,6 @@ struct file_operations name##_fops = { \
.release = lprocfs_single_release, \
};
-/* lprocfs_jobstats.c */
-int lprocfs_job_stats_log(struct obd_device *obd, char *jobid,
- int event, long amount);
-void lprocfs_job_stats_fini(struct obd_device *obd);
-int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
- cntr_init_callback fn);
-int lprocfs_rd_job_interval(struct seq_file *m, void *data);
-int lprocfs_wr_job_interval(struct file *file, const char *buffer,
- unsigned long count, void *data);
-
/* lproc_ptlrpc.c */
struct ptlrpc_request;
extern void target_print_req(void *seq_file, struct ptlrpc_request *req);
@@ -826,9 +804,6 @@ extern int lprocfs_quota_rd_qs_factor(char *page, char **start, loff_t off,
extern int lprocfs_quota_wr_qs_factor(struct file *file,
const char *buffer,
unsigned long count, void *data);
-
-
-
#else
/* LPROCFS is not defined */
@@ -863,7 +838,7 @@ static inline void lprocfs_clear_stats(struct lprocfs_stats *stats)
{ return; }
static inline void lprocfs_free_stats(struct lprocfs_stats **stats)
{ return; }
-static inline int lprocfs_register_stats(proc_dir_entry_t *root,
+static inline int lprocfs_register_stats(struct proc_dir_entry *root,
const char *name,
struct lprocfs_stats *stats)
{ return 0; }
@@ -894,7 +869,7 @@ static inline int lprocfs_exp_setup(struct obd_export *exp,lnet_nid_t *peer_nid,
{ return 0; }
static inline int lprocfs_exp_cleanup(struct obd_export *exp)
{ return 0; }
-static inline proc_dir_entry_t *
+static inline struct proc_dir_entry *
lprocfs_add_simple(struct proc_dir_entry *root, char *name,
void *data, struct file_operations *fops)
{return 0; }
@@ -912,15 +887,15 @@ static inline
int lprocfs_nid_stats_clear_read(struct seq_file *m, void *data)
{ return 0; }
-static inline proc_dir_entry_t *
-lprocfs_register(const char *name, proc_dir_entry_t *parent,
+static inline struct proc_dir_entry *
+lprocfs_register(const char *name, struct proc_dir_entry *parent,
struct lprocfs_vars *list, void *data)
{ return NULL; }
-static inline int lprocfs_add_vars(proc_dir_entry_t *root,
+static inline int lprocfs_add_vars(struct proc_dir_entry *root,
struct lprocfs_vars *var,
void *data)
{ return 0; }
-static inline void lprocfs_remove(proc_dir_entry_t **root)
+static inline void lprocfs_remove(struct proc_dir_entry **root)
{ return; }
static inline void lprocfs_remove_proc_entry(const char *name,
struct proc_dir_entry *parent)
@@ -1021,20 +996,6 @@ __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
#define LPROC_SEQ_FOPS_RW_TYPE(name, type)
#define LPROC_SEQ_FOPS_WR_ONLY(name, type)
-/* lprocfs_jobstats.c */
-static inline
-int lprocfs_job_stats_log(struct obd_device *obd, char *jobid, int event,
- long amount)
-{ return 0; }
-static inline
-void lprocfs_job_stats_fini(struct obd_device *obd)
-{ return; }
-static inline
-int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
- cntr_init_callback fn)
-{ return 0; }
-
-
/* lproc_ptlrpc.c */
#define target_print_req NULL
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index d40ad81b4eb..fa31be886ef 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -496,7 +496,7 @@ struct lu_object {
/**
* Link to the device, for debugging.
*/
- struct lu_ref_link *lo_dev_ref;
+ struct lu_ref_link lo_dev_ref;
};
enum lu_object_header_flags {
@@ -665,6 +665,11 @@ lu_site_bkt_from_fid(struct lu_site *site, struct lu_fid *fid)
return cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
}
+static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
+{
+ return s->ld_seq_site;
+}
+
/** \name ctors
* Constructors/destructors.
* @{
@@ -868,11 +873,19 @@ static inline __u32 lu_object_attr(const struct lu_object *o)
return o->lo_header->loh_attr;
}
-static inline struct lu_ref_link *lu_object_ref_add(struct lu_object *o,
- const char *scope,
- const void *source)
+static inline void lu_object_ref_add(struct lu_object *o,
+ const char *scope,
+ const void *source)
+{
+ lu_ref_add(&o->lo_header->loh_reference, scope, source);
+}
+
+static inline void lu_object_ref_add_at(struct lu_object *o,
+ struct lu_ref_link *link,
+ const char *scope,
+ const void *source)
{
- return lu_ref_add(&o->lo_header->loh_reference, scope, source);
+ lu_ref_add_at(&o->lo_header->loh_reference, link, scope, source);
}
static inline void lu_object_ref_del(struct lu_object *o,
@@ -1118,7 +1131,7 @@ struct lu_context_key {
/**
* Internal implementation detail: module for this key.
*/
- module_t *lct_owner;
+ struct module *lct_owner;
/**
* References to this key. For debugging.
*/
diff --git a/drivers/staging/lustre/lustre/include/lu_ref.h b/drivers/staging/lustre/lustre/include/lu_ref.h
index 624c19be152..50a2a7f786d 100644
--- a/drivers/staging/lustre/lustre/include/lu_ref.h
+++ b/drivers/staging/lustre/lustre/include/lu_ref.h
@@ -108,7 +108,12 @@
*/
-struct lu_ref {};
+/*
+ * dummy data structures/functions to pass compile for now.
+ * We need to reimplement them with kref.
+ */
+struct lu_ref {};
+struct lu_ref_link {};
static inline void lu_ref_init(struct lu_ref *ref)
{
@@ -132,6 +137,13 @@ static inline struct lu_ref_link *lu_ref_add_atomic(struct lu_ref *ref,
return NULL;
}
+static inline void lu_ref_add_at(struct lu_ref *ref,
+ struct lu_ref_link *link,
+ const char *scope,
+ const void *source)
+{
+}
+
static inline void lu_ref_del(struct lu_ref *ref, const char *scope,
const void *source)
{
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
new file mode 100644
index 00000000000..2870487dd28
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
@@ -0,0 +1,215 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.txt
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#ifndef LUSTRE_ERRNO_H
+#define LUSTRE_ERRNO_H
+
+/*
+ * Only "network" errnos, which are defined below, are allowed on wire (or on
+ * disk). Generic routines exist to help translate between these and a subset
+ * of the "host" errnos. Some host errnos (e.g., EDEADLOCK) are intentionally
+ * left out. See also the comment on lustre_errno_hton_mapping[].
+ *
+ * To maintain compatibility with existing x86 clients and servers, each of
+ * these network errnos has the same numerical value as its corresponding host
+ * errno on x86.
+ */
+#define LUSTRE_EPERM 1 /* Operation not permitted */
+#define LUSTRE_ENOENT 2 /* No such file or directory */
+#define LUSTRE_ESRCH 3 /* No such process */
+#define LUSTRE_EINTR 4 /* Interrupted system call */
+#define LUSTRE_EIO 5 /* I/O error */
+#define LUSTRE_ENXIO 6 /* No such device or address */
+#define LUSTRE_E2BIG 7 /* Argument list too long */
+#define LUSTRE_ENOEXEC 8 /* Exec format error */
+#define LUSTRE_EBADF 9 /* Bad file number */
+#define LUSTRE_ECHILD 10 /* No child processes */
+#define LUSTRE_EAGAIN 11 /* Try again */
+#define LUSTRE_ENOMEM 12 /* Out of memory */
+#define LUSTRE_EACCES 13 /* Permission denied */
+#define LUSTRE_EFAULT 14 /* Bad address */
+#define LUSTRE_ENOTBLK 15 /* Block device required */
+#define LUSTRE_EBUSY 16 /* Device or resource busy */
+#define LUSTRE_EEXIST 17 /* File exists */
+#define LUSTRE_EXDEV 18 /* Cross-device link */
+#define LUSTRE_ENODEV 19 /* No such device */
+#define LUSTRE_ENOTDIR 20 /* Not a directory */
+#define LUSTRE_EISDIR 21 /* Is a directory */
+#define LUSTRE_EINVAL 22 /* Invalid argument */
+#define LUSTRE_ENFILE 23 /* File table overflow */
+#define LUSTRE_EMFILE 24 /* Too many open files */
+#define LUSTRE_ENOTTY 25 /* Not a typewriter */
+#define LUSTRE_ETXTBSY 26 /* Text file busy */
+#define LUSTRE_EFBIG 27 /* File too large */
+#define LUSTRE_ENOSPC 28 /* No space left on device */
+#define LUSTRE_ESPIPE 29 /* Illegal seek */
+#define LUSTRE_EROFS 30 /* Read-only file system */
+#define LUSTRE_EMLINK 31 /* Too many links */
+#define LUSTRE_EPIPE 32 /* Broken pipe */
+#define LUSTRE_EDOM 33 /* Math argument out of domain of
+ func */
+#define LUSTRE_ERANGE 34 /* Math result not representable */
+#define LUSTRE_EDEADLK 35 /* Resource deadlock would occur */
+#define LUSTRE_ENAMETOOLONG 36 /* File name too long */
+#define LUSTRE_ENOLCK 37 /* No record locks available */
+#define LUSTRE_ENOSYS 38 /* Function not implemented */
+#define LUSTRE_ENOTEMPTY 39 /* Directory not empty */
+#define LUSTRE_ELOOP 40 /* Too many symbolic links
+ encountered */
+#define LUSTRE_ENOMSG 42 /* No message of desired type */
+#define LUSTRE_EIDRM 43 /* Identifier removed */
+#define LUSTRE_ECHRNG 44 /* Channel number out of range */
+#define LUSTRE_EL2NSYNC 45 /* Level 2 not synchronized */
+#define LUSTRE_EL3HLT 46 /* Level 3 halted */
+#define LUSTRE_EL3RST 47 /* Level 3 reset */
+#define LUSTRE_ELNRNG 48 /* Link number out of range */
+#define LUSTRE_EUNATCH 49 /* Protocol driver not attached */
+#define LUSTRE_ENOCSI 50 /* No CSI structure available */
+#define LUSTRE_EL2HLT 51 /* Level 2 halted */
+#define LUSTRE_EBADE 52 /* Invalid exchange */
+#define LUSTRE_EBADR 53 /* Invalid request descriptor */
+#define LUSTRE_EXFULL 54 /* Exchange full */
+#define LUSTRE_ENOANO 55 /* No anode */
+#define LUSTRE_EBADRQC 56 /* Invalid request code */
+#define LUSTRE_EBADSLT 57 /* Invalid slot */
+#define LUSTRE_EBFONT 59 /* Bad font file format */
+#define LUSTRE_ENOSTR 60 /* Device not a stream */
+#define LUSTRE_ENODATA 61 /* No data available */
+#define LUSTRE_ETIME 62 /* Timer expired */
+#define LUSTRE_ENOSR 63 /* Out of streams resources */
+#define LUSTRE_ENONET 64 /* Machine is not on the network */
+#define LUSTRE_ENOPKG 65 /* Package not installed */
+#define LUSTRE_EREMOTE 66 /* Object is remote */
+#define LUSTRE_ENOLINK 67 /* Link has been severed */
+#define LUSTRE_EADV 68 /* Advertise error */
+#define LUSTRE_ESRMNT 69 /* Srmount error */
+#define LUSTRE_ECOMM 70 /* Communication error on send */
+#define LUSTRE_EPROTO 71 /* Protocol error */
+#define LUSTRE_EMULTIHOP 72 /* Multihop attempted */
+#define LUSTRE_EDOTDOT 73 /* RFS specific error */
+#define LUSTRE_EBADMSG 74 /* Not a data message */
+#define LUSTRE_EOVERFLOW 75 /* Value too large for defined data
+ type */
+#define LUSTRE_ENOTUNIQ 76 /* Name not unique on network */
+#define LUSTRE_EBADFD 77 /* File descriptor in bad state */
+#define LUSTRE_EREMCHG 78 /* Remote address changed */
+#define LUSTRE_ELIBACC 79 /* Can not access a needed shared
+ library */
+#define LUSTRE_ELIBBAD 80 /* Accessing a corrupted shared
+ library */
+#define LUSTRE_ELIBSCN 81 /* .lib section in a.out corrupted */
+#define LUSTRE_ELIBMAX 82 /* Attempting to link in too many shared
+ libraries */
+#define LUSTRE_ELIBEXEC 83 /* Cannot exec a shared library
+ directly */
+#define LUSTRE_EILSEQ 84 /* Illegal byte sequence */
+#define LUSTRE_ERESTART 85 /* Interrupted system call should be
+ restarted */
+#define LUSTRE_ESTRPIPE 86 /* Streams pipe error */
+#define LUSTRE_EUSERS 87 /* Too many users */
+#define LUSTRE_ENOTSOCK 88 /* Socket operation on non-socket */
+#define LUSTRE_EDESTADDRREQ 89 /* Destination address required */
+#define LUSTRE_EMSGSIZE 90 /* Message too long */
+#define LUSTRE_EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define LUSTRE_ENOPROTOOPT 92 /* Protocol not available */
+#define LUSTRE_EPROTONOSUPPORT 93 /* Protocol not supported */
+#define LUSTRE_ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define LUSTRE_EOPNOTSUPP 95 /* Operation not supported on transport
+ endpoint */
+#define LUSTRE_EPFNOSUPPORT 96 /* Protocol family not supported */
+#define LUSTRE_EAFNOSUPPORT 97 /* Address family not supported by
+ protocol */
+#define LUSTRE_EADDRINUSE 98 /* Address already in use */
+#define LUSTRE_EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define LUSTRE_ENETDOWN 100 /* Network is down */
+#define LUSTRE_ENETUNREACH 101 /* Network is unreachable */
+#define LUSTRE_ENETRESET 102 /* Network dropped connection because of
+ reset */
+#define LUSTRE_ECONNABORTED 103 /* Software caused connection abort */
+#define LUSTRE_ECONNRESET 104 /* Connection reset by peer */
+#define LUSTRE_ENOBUFS 105 /* No buffer space available */
+#define LUSTRE_EISCONN 106 /* Transport endpoint is already
+ connected */
+#define LUSTRE_ENOTCONN 107 /* Transport endpoint is not
+ connected */
+#define LUSTRE_ESHUTDOWN 108 /* Cannot send after transport endpoint
+ shutdown */
+#define LUSTRE_ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define LUSTRE_ETIMEDOUT 110 /* Connection timed out */
+#define LUSTRE_ECONNREFUSED 111 /* Connection refused */
+#define LUSTRE_EHOSTDOWN 112 /* Host is down */
+#define LUSTRE_EHOSTUNREACH 113 /* No route to host */
+#define LUSTRE_EALREADY 114 /* Operation already in progress */
+#define LUSTRE_EINPROGRESS 115 /* Operation now in progress */
+#define LUSTRE_ESTALE 116 /* Stale NFS file handle */
+#define LUSTRE_EUCLEAN 117 /* Structure needs cleaning */
+#define LUSTRE_ENOTNAM 118 /* Not a XENIX named type file */
+#define LUSTRE_ENAVAIL 119 /* No XENIX semaphores available */
+#define LUSTRE_EISNAM 120 /* Is a named type file */
+#define LUSTRE_EREMOTEIO 121 /* Remote I/O error */
+#define LUSTRE_EDQUOT 122 /* Quota exceeded */
+#define LUSTRE_ENOMEDIUM 123 /* No medium found */
+#define LUSTRE_EMEDIUMTYPE 124 /* Wrong medium type */
+#define LUSTRE_ECANCELED 125 /* Operation Canceled */
+#define LUSTRE_ENOKEY 126 /* Required key not available */
+#define LUSTRE_EKEYEXPIRED 127 /* Key has expired */
+#define LUSTRE_EKEYREVOKED 128 /* Key has been revoked */
+#define LUSTRE_EKEYREJECTED 129 /* Key was rejected by service */
+#define LUSTRE_EOWNERDEAD 130 /* Owner died */
+#define LUSTRE_ENOTRECOVERABLE 131 /* State not recoverable */
+#define LUSTRE_ERESTARTSYS 512
+#define LUSTRE_ERESTARTNOINTR 513
+#define LUSTRE_ERESTARTNOHAND 514 /* restart if no handler.. */
+#define LUSTRE_ENOIOCTLCMD 515 /* No ioctl command */
+#define LUSTRE_ERESTART_RESTARTBLOCK 516 /* restart by calling
+ sys_restart_syscall */
+#define LUSTRE_EBADHANDLE 521 /* Illegal NFS file handle */
+#define LUSTRE_ENOTSYNC 522 /* Update synchronization mismatch */
+#define LUSTRE_EBADCOOKIE 523 /* Cookie is stale */
+#define LUSTRE_ENOTSUPP 524 /* Operation is not supported */
+#define LUSTRE_ETOOSMALL 525 /* Buffer or request is too small */
+#define LUSTRE_ESERVERFAULT 526 /* An untranslatable error occurred */
+#define LUSTRE_EBADTYPE 527 /* Type not supported by server */
+#define LUSTRE_EJUKEBOX 528 /* Request initiated, but will not
+ complete before timeout */
+#define LUSTRE_EIOCBQUEUED 529 /* iocb queued, will get completion
+ event */
+#define LUSTRE_EIOCBRETRY 530 /* iocb queued, will trigger a retry */
+
+/*
+ * Translations are optimized away on x86. Host errnos that shouldn't be put
+ * on wire could leak through as a result. Do not count on this side effect.
+ */
+#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
+unsigned int lustre_errno_hton(unsigned int h);
+unsigned int lustre_errno_ntoh(unsigned int n);
+#else
+#define lustre_errno_hton(h) (h)
+#define lustre_errno_ntoh(n) (n)
+#endif
+
+#endif /* LUSTRE_ERRNO_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 8825460f12a..984235ccd3a 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -98,6 +98,8 @@
/* Defn's shared with user-space. */
#include <lustre/lustre_user.h>
+#include <lustre/lustre_errno.h>
+
/*
* GENERAL STUFF
*/
@@ -911,7 +913,7 @@ static inline int lu_fid_cmp(const struct lu_fid *f0,
__diff_normalize(fid_ver(f0), fid_ver(f1));
}
-static inline void ostid_cpu_to_le(struct ost_id *src_oi,
+static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
struct ost_id *dst_oi)
{
if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
@@ -922,7 +924,7 @@ static inline void ostid_cpu_to_le(struct ost_id *src_oi,
}
}
-static inline void ostid_le_to_cpu(struct ost_id *src_oi,
+static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
struct ost_id *dst_oi)
{
if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
@@ -1544,10 +1546,16 @@ enum obdo_flags {
#define LOV_MAGIC_V1_DEF 0x0CD10BD0
#define LOV_MAGIC_V3_DEF 0x0CD30BD0
-#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
-#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
-#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
-#define LOV_PATTERN_CMOBD 0x200
+#define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
+#define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
+#define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
+#define LOV_PATTERN_CMOBD 0x200
+
+#define LOV_PATTERN_F_MASK 0xffff0000
+#define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
+
+#define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
+#define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
#define lov_ost_data lov_ost_data_v1
struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
@@ -1662,6 +1670,17 @@ struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
};
+static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
+{
+ if (lmm_magic == LOV_MAGIC_V3)
+ return sizeof(struct lov_mds_md_v3) +
+ stripes * sizeof(struct lov_ost_data_v1);
+ else
+ return sizeof(struct lov_mds_md_v1) +
+ stripes * sizeof(struct lov_ost_data_v1);
+}
+
+
#define OBD_MD_FLID (0x00000001ULL) /* object ID */
#define OBD_MD_FLATIME (0x00000002ULL) /* access time */
#define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
@@ -2671,6 +2690,10 @@ struct ldlm_res_id {
__u64 name[RES_NAME_SIZE];
};
+#define DLDLMRES "["LPX64":"LPX64":"LPX64"]."LPX64i
+#define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
+ (res)->lr_name.name[2], (res)->lr_name.name[3]
+
extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
@@ -2963,6 +2986,7 @@ typedef enum {
/* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
+ HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
} llog_op_type;
@@ -3082,6 +3106,52 @@ struct llog_changelog_user_rec {
struct llog_rec_tail cur_tail;
} __attribute__((packed));
+enum agent_req_status {
+ ARS_WAITING,
+ ARS_STARTED,
+ ARS_FAILED,
+ ARS_CANCELED,
+ ARS_SUCCEED,
+};
+
+static inline char *agent_req_status2name(enum agent_req_status ars)
+{
+ switch (ars) {
+ case ARS_WAITING:
+ return "WAITING";
+ case ARS_STARTED:
+ return "STARTED";
+ case ARS_FAILED:
+ return "FAILED";
+ case ARS_CANCELED:
+ return "CANCELED";
+ case ARS_SUCCEED:
+ return "SUCCEED";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static inline bool agent_req_in_final_state(enum agent_req_status ars)
+{
+ return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
+ (ars == ARS_CANCELED));
+}
+
+struct llog_agent_req_rec {
+ struct llog_rec_hdr arr_hdr; /**< record header */
+ __u32 arr_status; /**< status of the request */
+ /* must match enum
+ * agent_req_status */
+ __u32 arr_archive_id; /**< backend archive number */
+ __u64 arr_flags; /**< req flags */
+ __u64 arr_compound_id; /**< compound cookie */
+ __u64 arr_req_create; /**< req. creation time */
+ __u64 arr_req_change; /**< req. status change time */
+ struct hsm_action_item arr_hai; /**< req. to the agent */
+ struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
+} __attribute__((packed));
+
/* Old llog gen for compatibility */
struct llog_gen {
__u64 mnt_cnt;
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 7e9f57507f0..c7bd4473a1d 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -347,6 +347,16 @@ struct lov_user_md_v3 { /* LOV EA user data (host-endian) */
struct lov_user_ost_data_v1 lmm_objects[0]; /* per-stripe data */
} __attribute__((packed));
+static inline __u32 lov_user_md_size(__u16 stripes, __u32 lmm_magic)
+{
+ if (lmm_magic == LOV_USER_MAGIC_V3)
+ return sizeof(struct lov_user_md_v3) +
+ stripes * sizeof(struct lov_user_ost_data_v1);
+ else
+ return sizeof(struct lov_user_md_v1) +
+ stripes * sizeof(struct lov_user_ost_data_v1);
+}
+
/* Compile with -D_LARGEFILE64_SOURCE or -D_GNU_SOURCE (or #define) to
* use this. It is unsafe to #define those values in this header as it
* is possible the application has already #included <sys/stat.h>. */
@@ -462,6 +472,8 @@ static inline void obd_uuid2fsname(char *buf, char *uuid, int buflen)
/* printf display format
e.g. printf("file FID is "DFID"\n", PFID(fid)); */
+#define FID_NOBRACE_LEN 40
+#define FID_LEN (FID_NOBRACE_LEN + 2)
#define DFID_NOBRACE LPX64":0x%x:0x%x"
#define DFID "["DFID_NOBRACE"]"
#define PFID(fid) \
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index f12429f3821..e14a5f674e8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -211,13 +211,12 @@ static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
{
int i;
int len;
- ENTRY;
len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++)
len += cfs_size_round(buflens[i]);
- RETURN(cfs_size_round(len));
+ return cfs_size_round(len);
}
@@ -230,12 +229,10 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd,
char *ptr;
int i;
- ENTRY;
-
OBD_ALLOC(lcfg, lustre_cfg_len(bufs->lcfg_bufcount,
bufs->lcfg_buflen));
if (!lcfg)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
lcfg->lcfg_version = LUSTRE_CFG_VERSION;
lcfg->lcfg_command = cmd;
@@ -246,7 +243,7 @@ static inline struct lustre_cfg *lustre_cfg_new(int cmd,
lcfg->lcfg_buflens[i] = bufs->lcfg_buflen[i];
LOGL((char *)bufs->lcfg_buf[i], bufs->lcfg_buflen[i], ptr);
}
- RETURN(lcfg);
+ return lcfg;
}
static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
@@ -256,44 +253,39 @@ static inline void lustre_cfg_free(struct lustre_cfg *lcfg)
len = lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens);
OBD_FREE(lcfg, len);
- EXIT;
return;
}
static inline int lustre_cfg_sanity_check(void *buf, int len)
{
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
- ENTRY;
+
if (!lcfg)
- RETURN(-EINVAL);
+ return -EINVAL;
/* check that the first bits of the struct are valid */
if (len < LCFG_HDR_SIZE(0))
- RETURN(-EINVAL);
+ return -EINVAL;
if (lcfg->lcfg_version != LUSTRE_CFG_VERSION)
- RETURN(-EINVAL);
+ return -EINVAL;
if (lcfg->lcfg_bufcount >= LUSTRE_CFG_MAX_BUFCOUNT)
- RETURN(-EINVAL);
+ return -EINVAL;
/* check that the buflens are valid */
if (len < LCFG_HDR_SIZE(lcfg->lcfg_bufcount))
- RETURN(-EINVAL);
+ return -EINVAL;
/* make sure all the pointers point inside the data */
if (len < lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens))
- RETURN(-EINVAL);
+ return -EINVAL;
- RETURN(0);
+ return 0;
}
#include <lustre/lustre_user.h>
-#ifndef INVALID_UID
-#define INVALID_UID (-1)
-#endif
-
/** @} cfg */
#endif // _LUSTRE_CFG_H
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 8db6086ea4e..9228b165b25 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -53,20 +53,21 @@
/****************** on-disk files *********************/
-#define MDT_LOGS_DIR "LOGS" /* COMPAT_146 */
-#define MOUNT_CONFIGS_DIR "CONFIGS"
-#define CONFIGS_FILE "mountdata"
+#define MDT_LOGS_DIR "LOGS" /* COMPAT_146 */
+#define MOUNT_CONFIGS_DIR "CONFIGS"
+#define CONFIGS_FILE "mountdata"
/** Persistent mount data are stored on the disk in this file. */
-#define MOUNT_DATA_FILE MOUNT_CONFIGS_DIR"/"CONFIGS_FILE
-#define LAST_RCVD "last_rcvd"
-#define LOV_OBJID "lov_objid"
+#define MOUNT_DATA_FILE MOUNT_CONFIGS_DIR"/"CONFIGS_FILE
+#define LAST_RCVD "last_rcvd"
+#define LOV_OBJID "lov_objid"
#define LOV_OBJSEQ "lov_objseq"
-#define HEALTH_CHECK "health_check"
-#define CAPA_KEYS "capa_keys"
-#define CHANGELOG_USERS "changelog_users"
-#define MGS_NIDTBL_DIR "NIDTBL_VERSIONS"
-#define QMT_DIR "quota_master"
-#define QSD_DIR "quota_slave"
+#define HEALTH_CHECK "health_check"
+#define CAPA_KEYS "capa_keys"
+#define CHANGELOG_USERS "changelog_users"
+#define MGS_NIDTBL_DIR "NIDTBL_VERSIONS"
+#define QMT_DIR "quota_master"
+#define QSD_DIR "quota_slave"
+#define HSM_ACTIONS "hsm_actions"
/****************** persistent mount data *********************/
@@ -226,21 +227,22 @@ struct lustre_mount_data {
char *lmd_osd_type; /* OSD type */
};
-#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
-#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
-#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
-#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
- no other services */
-#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing
- existing MGS services */
-#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
-#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
-#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
-#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */
-#define LMD_FLG_IAM 0x0400 /* IAM dir */
-#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */
-#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */
-#define LMD_FLG_UPDATE 0x2000 /* update parameters */
+#define LMD_FLG_SERVER 0x0001 /* Mounting a server */
+#define LMD_FLG_CLIENT 0x0002 /* Mounting a client */
+#define LMD_FLG_ABORT_RECOV 0x0008 /* Abort recovery */
+#define LMD_FLG_NOSVC 0x0010 /* Only start MGS/MGC for servers,
+ no other services */
+#define LMD_FLG_NOMGS 0x0020 /* Only start target for servers, reusing
+ existing MGS services */
+#define LMD_FLG_WRITECONF 0x0040 /* Rewrite config log */
+#define LMD_FLG_NOIR 0x0080 /* NO imperative recovery */
+#define LMD_FLG_NOSCRUB 0x0100 /* Do not trigger scrub automatically */
+#define LMD_FLG_MGS 0x0200 /* Also start MGS along with server */
+#define LMD_FLG_IAM 0x0400 /* IAM dir */
+#define LMD_FLG_NO_PRIMNODE 0x0800 /* all nodes are service nodes */
+#define LMD_FLG_VIRGIN 0x1000 /* the service registers first time */
+#define LMD_FLG_UPDATE 0x2000 /* update parameters */
+#define LMD_FLG_HSM 0x4000 /* Start coordinator */
#define lmd_is_client(x) ((x)->lmd_flags & LMD_FLG_CLIENT)
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 317f928fc15..7020d9cd9eb 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -57,6 +57,8 @@
#include <interval_tree.h> /* for interval_node{}, ldlm_extent */
#include <lu_ref.h>
+#include "lustre_dlm_flags.h"
+
struct obd_ops;
struct obd_device;
@@ -96,161 +98,6 @@ typedef enum {
} ldlm_side_t;
/**
- * Declaration of flags sent through the wire.
- **/
-#define LDLM_FL_LOCK_CHANGED 0x000001 /* extent, mode, or resource changed */
-
-/**
- * If the server returns one of these flags, then the lock was put on that list.
- * If the client sends one of these flags (during recovery ONLY!), it wants the
- * lock added to the specified list, no questions asked.
- */
-#define LDLM_FL_BLOCK_GRANTED 0x000002
-#define LDLM_FL_BLOCK_CONV 0x000004
-#define LDLM_FL_BLOCK_WAIT 0x000008
-
-/* Used to be LDLM_FL_CBPENDING 0x000010 moved to non-wire flags */
-
-#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
- * queued for sending. */
-/* Used to be LDLM_FL_WAIT_NOREPROC 0x000040 moved to non-wire flags */
-/* Used to be LDLM_FL_CANCEL 0x000080 moved to non-wire flags */
-
-/**
- * Lock is being replayed. This could probably be implied by the fact that one
- * of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous.
- */
-#define LDLM_FL_REPLAY 0x000100
-
-#define LDLM_FL_INTENT_ONLY 0x000200 /* Don't grant lock, just do intent. */
-
-/* Used to be LDLM_FL_LOCAL_ONLY 0x000400 moved to non-wire flags */
-/* Used to be LDLM_FL_FAILED 0x000800 moved to non-wire flags */
-
-#define LDLM_FL_HAS_INTENT 0x001000 /* lock request has intent */
-
-/* Used to be LDLM_FL_CANCELING 0x002000 moved to non-wire flags */
-/* Used to be LDLM_FL_LOCAL 0x004000 moved to non-wire flags */
-
-#define LDLM_FL_DISCARD_DATA 0x010000 /* discard (no writeback) on cancel */
-
-#define LDLM_FL_NO_TIMEOUT 0x020000 /* Blocked by group lock - wait
- * indefinitely */
-
-/** file & record locking */
-#define LDLM_FL_BLOCK_NOWAIT 0x040000 /* Server told not to wait if blocked.
- * For AGL, OST will not send glimpse
- * callback. */
-#define LDLM_FL_TEST_LOCK 0x080000 // return blocking lock
-
-/* Used to be LDLM_FL_LVB_READY 0x100000 moved to non-wire flags */
-/* Used to be LDLM_FL_KMS_IGNORE 0x200000 moved to non-wire flags */
-/* Used to be LDLM_FL_NO_LRU 0x400000 moved to non-wire flags */
-
-/* Immediatelly cancel such locks when they block some other locks. Send
- * cancel notification to original lock holder, but expect no reply. This is
- * for clients (like liblustre) that cannot be expected to reliably response
- * to blocking AST. */
-#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
-
-/* Flags flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS (LDLM_FL_CANCEL_ON_BLOCK)
-
-/* Used to be LDLM_FL_CP_REQD 0x1000000 moved to non-wire flags */
-/* Used to be LDLM_FL_CLEANED 0x2000000 moved to non-wire flags */
-/* Used to be LDLM_FL_ATOMIC_CB 0x4000000 moved to non-wire flags */
-/* Used to be LDLM_FL_BL_AST 0x10000000 moved to non-wire flags */
-/* Used to be LDLM_FL_BL_DONE 0x20000000 moved to non-wire flags */
-
-/* measure lock contention and return -EUSERS if locking contention is high */
-#define LDLM_FL_DENY_ON_CONTENTION 0x40000000
-
-/* These are flags that are mapped into the flags and ASTs of blocking locks */
-#define LDLM_AST_DISCARD_DATA 0x80000000 /* Add FL_DISCARD to blocking ASTs */
-
-/* Flags sent in AST lock_flags to be mapped into the receiving lock. */
-#define LDLM_AST_FLAGS (LDLM_FL_DISCARD_DATA)
-
-/*
- * --------------------------------------------------------------------------
- * NOTE! Starting from this point, that is, LDLM_FL_* flags with values above
- * 0x80000000 will not be sent over the wire.
- * --------------------------------------------------------------------------
- */
-
-/**
- * Declaration of flags not sent through the wire.
- **/
-
-/**
- * Used for marking lock as a target for -EINTR while cp_ast sleep
- * emulation + race with upcoming bl_ast.
- */
-#define LDLM_FL_FAIL_LOC 0x100000000ULL
-
-/**
- * Used while processing the unused list to know that we have already
- * handled this lock and decided to skip it.
- */
-#define LDLM_FL_SKIPPED 0x200000000ULL
-/* this lock is being destroyed */
-#define LDLM_FL_CBPENDING 0x400000000ULL
-/* not a real flag, not saved in lock */
-#define LDLM_FL_WAIT_NOREPROC 0x800000000ULL
-/* cancellation callback already run */
-#define LDLM_FL_CANCEL 0x1000000000ULL
-#define LDLM_FL_LOCAL_ONLY 0x2000000000ULL
-/* don't run the cancel callback under ldlm_cli_cancel_unused */
-#define LDLM_FL_FAILED 0x4000000000ULL
-/* lock cancel has already been sent */
-#define LDLM_FL_CANCELING 0x8000000000ULL
-/* local lock (ie, no srv/cli split) */
-#define LDLM_FL_LOCAL 0x10000000000ULL
-/* XXX FIXME: This is being added to b_size as a low-risk fix to the fact that
- * the LVB filling happens _after_ the lock has been granted, so another thread
- * can match it before the LVB has been updated. As a dirty hack, we set
- * LDLM_FL_LVB_READY only after we've done the LVB poop.
- * this is only needed on LOV/OSC now, where LVB is actually used and callers
- * must set it in input flags.
- *
- * The proper fix is to do the granting inside of the completion AST, which can
- * be replaced with a LVB-aware wrapping function for OSC locks. That change is
- * pretty high-risk, though, and would need a lot more testing. */
-#define LDLM_FL_LVB_READY 0x20000000000ULL
-/* A lock contributes to the known minimum size (KMS) calculation until it has
- * finished the part of its cancelation that performs write back on its dirty
- * pages. It can remain on the granted list during this whole time. Threads
- * racing to update the KMS after performing their writeback need to know to
- * exclude each other's locks from the calculation as they walk the granted
- * list. */
-#define LDLM_FL_KMS_IGNORE 0x40000000000ULL
-/* completion AST to be executed */
-#define LDLM_FL_CP_REQD 0x80000000000ULL
-/* cleanup_resource has already handled the lock */
-#define LDLM_FL_CLEANED 0x100000000000ULL
-/* optimization hint: LDLM can run blocking callback from current context
- * w/o involving separate thread. in order to decrease cs rate */
-#define LDLM_FL_ATOMIC_CB 0x200000000000ULL
-
-/* It may happen that a client initiates two operations, e.g. unlink and
- * mkdir, such that the server sends a blocking AST for conflicting
- * locks to this client for the first operation, whereas the second
- * operation has canceled this lock and is waiting for rpc_lock which is
- * taken by the first operation. LDLM_FL_BL_AST is set by
- * ldlm_callback_handler() in the lock to prevent the Early Lock Cancel
- * (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock
- * cache is dropped to let ldlm_callback_handler() return EINVAL to the
- * server. It is used when ELC RPC is already prepared and is waiting
- * for rpc_lock, too late to send a separate CANCEL RPC. */
-#define LDLM_FL_BL_AST 0x400000000000ULL
-#define LDLM_FL_BL_DONE 0x800000000000ULL
-/* Don't put lock into the LRU list, so that it is not canceled due to aging.
- * Used by MGC locks, they are cancelled only at unmount or by callback. */
-#define LDLM_FL_NO_LRU 0x1000000000000ULL
-
-/**
* The blocking callback is overloaded to perform two functions. These flags
* indicate which operation should be performed.
*/
@@ -388,7 +235,7 @@ struct ldlm_pool_ops {
*/
struct ldlm_pool {
/** Pool proc directory. */
- proc_dir_entry_t *pl_proc_dir;
+ struct proc_dir_entry *pl_proc_dir;
/** Pool name, must be long enough to hold compound proc entry name. */
char pl_name[100];
/** Lock for protecting SLV/CLV updates. */
@@ -720,8 +567,6 @@ typedef int (*ldlm_completion_callback)(struct ldlm_lock *lock, __u64 flags,
void *data);
/** Type for glimpse callback function of a lock. */
typedef int (*ldlm_glimpse_callback)(struct ldlm_lock *lock, void *data);
-/** Type for weight callback function of a lock. */
-typedef unsigned long (*ldlm_weigh_callback)(struct ldlm_lock *lock);
/** Work list for sending GL ASTs to multiple locks. */
struct ldlm_glimpse_work {
@@ -890,9 +735,6 @@ struct ldlm_lock {
*/
ldlm_glimpse_callback l_glimpse_ast;
- /** XXX apparently unused "weight" handler. To be removed? */
- ldlm_weigh_callback l_weigh_ast;
-
/**
* Lock export.
* This is a pointer to actual client export for locks that were granted
@@ -919,11 +761,11 @@ struct ldlm_lock {
ldlm_policy_data_t l_policy_data;
/**
- * Lock state flags.
- * Like whenever we receive any blocking requests for this lock, etc.
- * Protected by lr_lock.
+ * Lock state flags. Protected by lr_lock.
+ * \see lustre_dlm_flags.h where the bits are defined.
*/
__u64 l_flags;
+
/**
* Lock r/w usage counters.
* Protected by lr_lock.
@@ -952,34 +794,6 @@ struct ldlm_lock {
/** Originally requested extent for the extent lock. */
struct ldlm_extent l_req_extent;
- unsigned int l_failed:1,
- /**
- * Set for locks that were removed from class hash table and will be
- * destroyed when last reference to them is released. Set by
- * ldlm_lock_destroy_internal().
- *
- * Protected by lock and resource locks.
- */
- l_destroyed:1,
- /*
- * it's set in lock_res_and_lock() and unset in unlock_res_and_lock().
- *
- * NB: compared with check_res_locked(), checking this bit is cheaper.
- * Also, spin_is_locked() is deprecated for kernel code; one reason is
- * because it works only for SMP so user needs to add extra macros like
- * LASSERT_SPIN_LOCKED for uniprocessor kernels.
- */
- l_res_locked:1,
- /*
- * It's set once we call ldlm_add_waiting_lock_res_locked()
- * to start the lock-timeout timer and it will never be reset.
- *
- * Protected by lock_res_and_lock().
- */
- l_waited:1,
- /** Flag whether this is a server namespace lock. */
- l_ns_srv:1;
-
/*
* Client-side-only members.
*/
@@ -1230,7 +1044,6 @@ struct ldlm_enqueue_info {
void *ei_cb_bl; /** blocking lock callback */
void *ei_cb_cp; /** lock completion callback */
void *ei_cb_gl; /** lock glimpse callback */
- void *ei_cb_wg; /** lock weigh callback */
void *ei_cbdata; /** Data to be passed into callbacks. */
};
@@ -1328,7 +1141,6 @@ struct ldlm_callback_suite {
ldlm_completion_callback lcs_completion;
ldlm_blocking_callback lcs_blocking;
ldlm_glimpse_callback lcs_glimpse;
- ldlm_weigh_callback lcs_weigh;
};
/* ldlm_lockd.c */
@@ -1471,8 +1283,6 @@ void ldlm_namespace_free(struct ldlm_namespace *ns,
struct obd_import *imp, int force);
void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client);
void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client);
-void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client);
-struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client);
void ldlm_namespace_get(struct ldlm_namespace *ns);
void ldlm_namespace_put(struct ldlm_namespace *ns);
int ldlm_proc_setup(void);
@@ -1645,7 +1455,7 @@ void unlock_res_and_lock(struct ldlm_lock *lock);
* There are not used outside of ldlm.
* @{
*/
-void ldlm_pools_recalc(ldlm_side_t client);
+int ldlm_pools_recalc(ldlm_side_t client);
int ldlm_pools_init(void);
void ldlm_pools_fini(void);
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
new file mode 100644
index 00000000000..8c34d9d4d25
--- /dev/null
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -0,0 +1,460 @@
+/* -*- buffer-read-only: t -*- vi: set ro:
+ *
+ * DO NOT EDIT THIS FILE (lustre_dlm_flags.h)
+ *
+ * It has been AutoGen-ed
+ * From the definitions lustre_dlm_flags.def
+ * and the template file lustre_dlm_flags.tpl
+ *
+ * lustre is free software: you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * lustre is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ * See the GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/**
+ * \file lustre_dlm_flags.h
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * This file is derived from flag definitions in lustre_dlm_flags.def.
+ * The format is defined in the lustre_dlm_flags.tpl template file.
+ *
+ * \addtogroup LDLM Lustre Distributed Lock Manager
+ * @{
+ *
+ * \name flags
+ * The flags and collections of flags (masks) for \see struct ldlm_lock.
+ * @{
+ */
+#ifndef LDLM_ALL_FLAGS_MASK
+
+/** l_flags bits marked as "all_flags" bits */
+#define LDLM_FL_ALL_FLAGS_MASK 0x007FFFFFC08F132FULL
+
+/** l_flags bits marked as "ast" bits */
+#define LDLM_FL_AST_MASK 0x0000000080000000ULL
+
+/** l_flags bits marked as "blocked" bits */
+#define LDLM_FL_BLOCKED_MASK 0x000000000000000EULL
+
+/** l_flags bits marked as "gone" bits */
+#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
+
+/** l_flags bits marked as "hide_lock" bits */
+#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
+
+/** l_flags bits marked as "inherit" bits */
+#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
+
+/** l_flags bits marked as "local_only" bits */
+#define LDLM_FL_LOCAL_ONLY_MASK 0x007FFFFF00000000ULL
+
+/** l_flags bits marked as "on_wire" bits */
+#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F132FULL
+
+/** extent, mode, or resource changed */
+#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL // bit 0
+#define ldlm_is_lock_changed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 0)
+#define ldlm_set_lock_changed(_l) LDLM_SET_FLAG(( _l), 1ULL << 0)
+#define ldlm_clear_lock_changed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 0)
+
+/**
+ * Server placed lock on granted list, or a recovering client wants the
+ * lock added to the granted list, no questions asked. */
+#define LDLM_FL_BLOCK_GRANTED 0x0000000000000002ULL // bit 1
+#define ldlm_is_block_granted(_l) LDLM_TEST_FLAG(( _l), 1ULL << 1)
+#define ldlm_set_block_granted(_l) LDLM_SET_FLAG(( _l), 1ULL << 1)
+#define ldlm_clear_block_granted(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 1)
+
+/**
+ * Server placed lock on conv list, or a recovering client wants the lock
+ * added to the conv list, no questions asked. */
+#define LDLM_FL_BLOCK_CONV 0x0000000000000004ULL // bit 2
+#define ldlm_is_block_conv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 2)
+#define ldlm_set_block_conv(_l) LDLM_SET_FLAG(( _l), 1ULL << 2)
+#define ldlm_clear_block_conv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 2)
+
+/**
+ * Server placed lock on wait list, or a recovering client wants the lock
+ * added to the wait list, no questions asked. */
+#define LDLM_FL_BLOCK_WAIT 0x0000000000000008ULL // bit 3
+#define ldlm_is_block_wait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 3)
+#define ldlm_set_block_wait(_l) LDLM_SET_FLAG(( _l), 1ULL << 3)
+#define ldlm_clear_block_wait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 3)
+
+/** blocking or cancel packet was queued for sending. */
+#define LDLM_FL_AST_SENT 0x0000000000000020ULL // bit 5
+#define ldlm_is_ast_sent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 5)
+#define ldlm_set_ast_sent(_l) LDLM_SET_FLAG(( _l), 1ULL << 5)
+#define ldlm_clear_ast_sent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 5)
+
+/**
+ * Lock is being replayed. This could probably be implied by the fact that
+ * one of BLOCK_{GRANTED,CONV,WAIT} is set, but that is pretty dangerous. */
+#define LDLM_FL_REPLAY 0x0000000000000100ULL // bit 8
+#define ldlm_is_replay(_l) LDLM_TEST_FLAG(( _l), 1ULL << 8)
+#define ldlm_set_replay(_l) LDLM_SET_FLAG(( _l), 1ULL << 8)
+#define ldlm_clear_replay(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 8)
+
+/** Don't grant lock, just do intent. */
+#define LDLM_FL_INTENT_ONLY 0x0000000000000200ULL // bit 9
+#define ldlm_is_intent_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 9)
+#define ldlm_set_intent_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 9)
+#define ldlm_clear_intent_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 9)
+
+/** lock request has intent */
+#define LDLM_FL_HAS_INTENT 0x0000000000001000ULL // bit 12
+#define ldlm_is_has_intent(_l) LDLM_TEST_FLAG(( _l), 1ULL << 12)
+#define ldlm_set_has_intent(_l) LDLM_SET_FLAG(( _l), 1ULL << 12)
+#define ldlm_clear_has_intent(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 12)
+
+/** discard (no writeback) on cancel */
+#define LDLM_FL_DISCARD_DATA 0x0000000000010000ULL // bit 16
+#define ldlm_is_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 16)
+#define ldlm_set_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 16)
+#define ldlm_clear_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 16)
+
+/** Blocked by group lock - wait indefinitely */
+#define LDLM_FL_NO_TIMEOUT 0x0000000000020000ULL // bit 17
+#define ldlm_is_no_timeout(_l) LDLM_TEST_FLAG(( _l), 1ULL << 17)
+#define ldlm_set_no_timeout(_l) LDLM_SET_FLAG(( _l), 1ULL << 17)
+#define ldlm_clear_no_timeout(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 17)
+
+/**
+ * Server told not to wait if blocked. For AGL, OST will not send glimpse
+ * callback. */
+#define LDLM_FL_BLOCK_NOWAIT 0x0000000000040000ULL // bit 18
+#define ldlm_is_block_nowait(_l) LDLM_TEST_FLAG(( _l), 1ULL << 18)
+#define ldlm_set_block_nowait(_l) LDLM_SET_FLAG(( _l), 1ULL << 18)
+#define ldlm_clear_block_nowait(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 18)
+
+/** return blocking lock */
+#define LDLM_FL_TEST_LOCK 0x0000000000080000ULL // bit 19
+#define ldlm_is_test_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 19)
+#define ldlm_set_test_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 19)
+#define ldlm_clear_test_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 19)
+
+/**
+ * Immediatelly cancel such locks when they block some other locks. Send
+ * cancel notification to original lock holder, but expect no reply. This
+ * is for clients (like liblustre) that cannot be expected to reliably
+ * response to blocking AST. */
+#define LDLM_FL_CANCEL_ON_BLOCK 0x0000000000800000ULL // bit 23
+#define ldlm_is_cancel_on_block(_l) LDLM_TEST_FLAG(( _l), 1ULL << 23)
+#define ldlm_set_cancel_on_block(_l) LDLM_SET_FLAG(( _l), 1ULL << 23)
+#define ldlm_clear_cancel_on_block(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 23)
+
+/**
+ * measure lock contention and return -EUSERS if locking contention is high */
+#define LDLM_FL_DENY_ON_CONTENTION 0x0000000040000000ULL // bit 30
+#define ldlm_is_deny_on_contention(_l) LDLM_TEST_FLAG(( _l), 1ULL << 30)
+#define ldlm_set_deny_on_contention(_l) LDLM_SET_FLAG(( _l), 1ULL << 30)
+#define ldlm_clear_deny_on_contention(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 30)
+
+/**
+ * These are flags that are mapped into the flags and ASTs of blocking
+ * locks Add FL_DISCARD to blocking ASTs */
+#define LDLM_FL_AST_DISCARD_DATA 0x0000000080000000ULL // bit 31
+#define ldlm_is_ast_discard_data(_l) LDLM_TEST_FLAG(( _l), 1ULL << 31)
+#define ldlm_set_ast_discard_data(_l) LDLM_SET_FLAG(( _l), 1ULL << 31)
+#define ldlm_clear_ast_discard_data(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 31)
+
+/**
+ * Used for marking lock as a target for -EINTR while cp_ast sleep emulation
+ * + race with upcoming bl_ast. */
+#define LDLM_FL_FAIL_LOC 0x0000000100000000ULL // bit 32
+#define ldlm_is_fail_loc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 32)
+#define ldlm_set_fail_loc(_l) LDLM_SET_FLAG(( _l), 1ULL << 32)
+#define ldlm_clear_fail_loc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 32)
+
+/**
+ * Used while processing the unused list to know that we have already
+ * handled this lock and decided to skip it. */
+#define LDLM_FL_SKIPPED 0x0000000200000000ULL // bit 33
+#define ldlm_is_skipped(_l) LDLM_TEST_FLAG(( _l), 1ULL << 33)
+#define ldlm_set_skipped(_l) LDLM_SET_FLAG(( _l), 1ULL << 33)
+#define ldlm_clear_skipped(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 33)
+
+/** this lock is being destroyed */
+#define LDLM_FL_CBPENDING 0x0000000400000000ULL // bit 34
+#define ldlm_is_cbpending(_l) LDLM_TEST_FLAG(( _l), 1ULL << 34)
+#define ldlm_set_cbpending(_l) LDLM_SET_FLAG(( _l), 1ULL << 34)
+#define ldlm_clear_cbpending(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 34)
+
+/** not a real flag, not saved in lock */
+#define LDLM_FL_WAIT_NOREPROC 0x0000000800000000ULL // bit 35
+#define ldlm_is_wait_noreproc(_l) LDLM_TEST_FLAG(( _l), 1ULL << 35)
+#define ldlm_set_wait_noreproc(_l) LDLM_SET_FLAG(( _l), 1ULL << 35)
+#define ldlm_clear_wait_noreproc(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 35)
+
+/** cancellation callback already run */
+#define LDLM_FL_CANCEL 0x0000001000000000ULL // bit 36
+#define ldlm_is_cancel(_l) LDLM_TEST_FLAG(( _l), 1ULL << 36)
+#define ldlm_set_cancel(_l) LDLM_SET_FLAG(( _l), 1ULL << 36)
+#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
+
+/** whatever it might mean */
+#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL // bit 37
+#define ldlm_is_local_only(_l) LDLM_TEST_FLAG(( _l), 1ULL << 37)
+#define ldlm_set_local_only(_l) LDLM_SET_FLAG(( _l), 1ULL << 37)
+#define ldlm_clear_local_only(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 37)
+
+/** don't run the cancel callback under ldlm_cli_cancel_unused */
+#define LDLM_FL_FAILED 0x0000004000000000ULL // bit 38
+#define ldlm_is_failed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 38)
+#define ldlm_set_failed(_l) LDLM_SET_FLAG(( _l), 1ULL << 38)
+#define ldlm_clear_failed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 38)
+
+/** lock cancel has already been sent */
+#define LDLM_FL_CANCELING 0x0000008000000000ULL // bit 39
+#define ldlm_is_canceling(_l) LDLM_TEST_FLAG(( _l), 1ULL << 39)
+#define ldlm_set_canceling(_l) LDLM_SET_FLAG(( _l), 1ULL << 39)
+#define ldlm_clear_canceling(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 39)
+
+/** local lock (ie, no srv/cli split) */
+#define LDLM_FL_LOCAL 0x0000010000000000ULL // bit 40
+#define ldlm_is_local(_l) LDLM_TEST_FLAG(( _l), 1ULL << 40)
+#define ldlm_set_local(_l) LDLM_SET_FLAG(( _l), 1ULL << 40)
+#define ldlm_clear_local(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 40)
+
+/**
+ * XXX FIXME: This is being added to b_size as a low-risk fix to the
+ * fact that the LVB filling happens _after_ the lock has been granted,
+ * so another thread can match it before the LVB has been updated. As a
+ * dirty hack, we set LDLM_FL_LVB_READY only after we've done the LVB poop.
+ * this is only needed on LOV/OSC now, where LVB is actually used and
+ * callers must set it in input flags.
+ *
+ * The proper fix is to do the granting inside of the completion AST,
+ * which can be replaced with a LVB-aware wrapping function for OSC locks.
+ * That change is pretty high-risk, though, and would need a lot more
+ * testing. */
+#define LDLM_FL_LVB_READY 0x0000020000000000ULL // bit 41
+#define ldlm_is_lvb_ready(_l) LDLM_TEST_FLAG(( _l), 1ULL << 41)
+#define ldlm_set_lvb_ready(_l) LDLM_SET_FLAG(( _l), 1ULL << 41)
+#define ldlm_clear_lvb_ready(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 41)
+
+/**
+ * A lock contributes to the known minimum size (KMS) calculation until it
+ * has finished the part of its cancelation that performs write back on its
+ * dirty pages. It can remain on the granted list during this whole time.
+ * Threads racing to update the KMS after performing their writeback need
+ * to know to exclude each other's locks from the calculation as they walk
+ * the granted list. */
+#define LDLM_FL_KMS_IGNORE 0x0000040000000000ULL // bit 42
+#define ldlm_is_kms_ignore(_l) LDLM_TEST_FLAG(( _l), 1ULL << 42)
+#define ldlm_set_kms_ignore(_l) LDLM_SET_FLAG(( _l), 1ULL << 42)
+#define ldlm_clear_kms_ignore(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 42)
+
+/** completion AST to be executed */
+#define LDLM_FL_CP_REQD 0x0000080000000000ULL // bit 43
+#define ldlm_is_cp_reqd(_l) LDLM_TEST_FLAG(( _l), 1ULL << 43)
+#define ldlm_set_cp_reqd(_l) LDLM_SET_FLAG(( _l), 1ULL << 43)
+#define ldlm_clear_cp_reqd(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 43)
+
+/** cleanup_resource has already handled the lock */
+#define LDLM_FL_CLEANED 0x0000100000000000ULL // bit 44
+#define ldlm_is_cleaned(_l) LDLM_TEST_FLAG(( _l), 1ULL << 44)
+#define ldlm_set_cleaned(_l) LDLM_SET_FLAG(( _l), 1ULL << 44)
+#define ldlm_clear_cleaned(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 44)
+
+/**
+ * optimization hint: LDLM can run blocking callback from current context
+ * w/o involving separate thread. in order to decrease cs rate */
+#define LDLM_FL_ATOMIC_CB 0x0000200000000000ULL // bit 45
+#define ldlm_is_atomic_cb(_l) LDLM_TEST_FLAG(( _l), 1ULL << 45)
+#define ldlm_set_atomic_cb(_l) LDLM_SET_FLAG(( _l), 1ULL << 45)
+#define ldlm_clear_atomic_cb(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 45)
+
+/**
+ * It may happen that a client initiates two operations, e.g. unlink and
+ * mkdir, such that the server sends a blocking AST for conflicting locks
+ * to this client for the first operation, whereas the second operation
+ * has canceled this lock and is waiting for rpc_lock which is taken by
+ * the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
+ * the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
+ *
+ * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
+ * dropped to let ldlm_callback_handler() return EINVAL to the server. It
+ * is used when ELC RPC is already prepared and is waiting for rpc_lock,
+ * too late to send a separate CANCEL RPC. */
+#define LDLM_FL_BL_AST 0x0000400000000000ULL // bit 46
+#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG(( _l), 1ULL << 46)
+#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG(( _l), 1ULL << 46)
+#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
+
+/** whatever it might mean */
+#define LDLM_FL_BL_DONE 0x0000800000000000ULL // bit 47
+#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG(( _l), 1ULL << 47)
+#define ldlm_set_bl_done(_l) LDLM_SET_FLAG(( _l), 1ULL << 47)
+#define ldlm_clear_bl_done(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 47)
+
+/**
+ * Don't put lock into the LRU list, so that it is not canceled due
+ * to aging. Used by MGC locks, they are cancelled only at unmount or
+ * by callback. */
+#define LDLM_FL_NO_LRU 0x0001000000000000ULL // bit 48
+#define ldlm_is_no_lru(_l) LDLM_TEST_FLAG(( _l), 1ULL << 48)
+#define ldlm_set_no_lru(_l) LDLM_SET_FLAG(( _l), 1ULL << 48)
+#define ldlm_clear_no_lru(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 48)
+
+/**
+ * Set for locks that failed and where the server has been notified.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_FAIL_NOTIFIED 0x0002000000000000ULL // bit 49
+#define ldlm_is_fail_notified(_l) LDLM_TEST_FLAG(( _l), 1ULL << 49)
+#define ldlm_set_fail_notified(_l) LDLM_SET_FLAG(( _l), 1ULL << 49)
+#define ldlm_clear_fail_notified(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 49)
+
+/**
+ * Set for locks that were removed from class hash table and will
+ * be destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_DESTROYED 0x0004000000000000ULL // bit 50
+#define ldlm_is_destroyed(_l) LDLM_TEST_FLAG(( _l), 1ULL << 50)
+#define ldlm_set_destroyed(_l) LDLM_SET_FLAG(( _l), 1ULL << 50)
+#define ldlm_clear_destroyed(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 50)
+
+/** flag whether this is a server namespace lock */
+#define LDLM_FL_SERVER_LOCK 0x0008000000000000ULL // bit 51
+#define ldlm_is_server_lock(_l) LDLM_TEST_FLAG(( _l), 1ULL << 51)
+#define ldlm_set_server_lock(_l) LDLM_SET_FLAG(( _l), 1ULL << 51)
+#define ldlm_clear_server_lock(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 51)
+
+/**
+ * It's set in lock_res_and_lock() and unset in unlock_res_and_lock().
+ *
+ * NB: compared with check_res_locked(), checking this bit is cheaper.
+ * Also, spin_is_locked() is deprecated for kernel code; one reason is
+ * because it works only for SMP so user needs to add extra macros like
+ * LASSERT_SPIN_LOCKED for uniprocessor kernels. */
+#define LDLM_FL_RES_LOCKED 0x0010000000000000ULL // bit 52
+#define ldlm_is_res_locked(_l) LDLM_TEST_FLAG(( _l), 1ULL << 52)
+#define ldlm_set_res_locked(_l) LDLM_SET_FLAG(( _l), 1ULL << 52)
+#define ldlm_clear_res_locked(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 52)
+
+/**
+ * It's set once we call ldlm_add_waiting_lock_res_locked() to start the
+ * lock-timeout timer and it will never be reset.
+ *
+ * Protected by lock and resource locks. */
+#define LDLM_FL_WAITED 0x0020000000000000ULL // bit 53
+#define ldlm_is_waited(_l) LDLM_TEST_FLAG(( _l), 1ULL << 53)
+#define ldlm_set_waited(_l) LDLM_SET_FLAG(( _l), 1ULL << 53)
+#define ldlm_clear_waited(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 53)
+
+/** Flag whether this is a server namespace lock. */
+#define LDLM_FL_NS_SRV 0x0040000000000000ULL // bit 54
+#define ldlm_is_ns_srv(_l) LDLM_TEST_FLAG(( _l), 1ULL << 54)
+#define ldlm_set_ns_srv(_l) LDLM_SET_FLAG(( _l), 1ULL << 54)
+#define ldlm_clear_ns_srv(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 54)
+
+/** test for ldlm_lock flag bit set */
+#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+
+/** set a ldlm_lock flag bit */
+#define LDLM_SET_FLAG(_l, _b) (((_l)->l_flags |= (_b))
+
+/** clear a ldlm_lock flag bit */
+#define LDLM_CLEAR_FLAG(_l, _b) (((_l)->l_flags &= ~(_b))
+
+/** Mask of flags inherited from parent lock when doing intents. */
+#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
+
+/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
+#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
+
+/** @} subgroup */
+/** @} group */
+#ifdef WIRESHARK_COMPILE
+static int hf_lustre_ldlm_fl_lock_changed = -1;
+static int hf_lustre_ldlm_fl_block_granted = -1;
+static int hf_lustre_ldlm_fl_block_conv = -1;
+static int hf_lustre_ldlm_fl_block_wait = -1;
+static int hf_lustre_ldlm_fl_ast_sent = -1;
+static int hf_lustre_ldlm_fl_replay = -1;
+static int hf_lustre_ldlm_fl_intent_only = -1;
+static int hf_lustre_ldlm_fl_has_intent = -1;
+static int hf_lustre_ldlm_fl_discard_data = -1;
+static int hf_lustre_ldlm_fl_no_timeout = -1;
+static int hf_lustre_ldlm_fl_block_nowait = -1;
+static int hf_lustre_ldlm_fl_test_lock = -1;
+static int hf_lustre_ldlm_fl_cancel_on_block = -1;
+static int hf_lustre_ldlm_fl_deny_on_contention = -1;
+static int hf_lustre_ldlm_fl_ast_discard_data = -1;
+static int hf_lustre_ldlm_fl_fail_loc = -1;
+static int hf_lustre_ldlm_fl_skipped = -1;
+static int hf_lustre_ldlm_fl_cbpending = -1;
+static int hf_lustre_ldlm_fl_wait_noreproc = -1;
+static int hf_lustre_ldlm_fl_cancel = -1;
+static int hf_lustre_ldlm_fl_local_only = -1;
+static int hf_lustre_ldlm_fl_failed = -1;
+static int hf_lustre_ldlm_fl_canceling = -1;
+static int hf_lustre_ldlm_fl_local = -1;
+static int hf_lustre_ldlm_fl_lvb_ready = -1;
+static int hf_lustre_ldlm_fl_kms_ignore = -1;
+static int hf_lustre_ldlm_fl_cp_reqd = -1;
+static int hf_lustre_ldlm_fl_cleaned = -1;
+static int hf_lustre_ldlm_fl_atomic_cb = -1;
+static int hf_lustre_ldlm_fl_bl_ast = -1;
+static int hf_lustre_ldlm_fl_bl_done = -1;
+static int hf_lustre_ldlm_fl_no_lru = -1;
+static int hf_lustre_ldlm_fl_fail_notified = -1;
+static int hf_lustre_ldlm_fl_destroyed = -1;
+static int hf_lustre_ldlm_fl_server_lock = -1;
+static int hf_lustre_ldlm_fl_res_locked = -1;
+static int hf_lustre_ldlm_fl_waited = -1;
+static int hf_lustre_ldlm_fl_ns_srv = -1;
+
+const value_string lustre_ldlm_flags_vals[] = {
+ {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
+ {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
+ {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
+ {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
+ {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
+ {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
+ {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
+ {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
+ {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
+ {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
+ {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
+ {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
+ {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
+ {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
+ {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
+ {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
+ {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
+ {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
+ {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
+ {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
+ {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
+ {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
+ {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
+ {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
+ {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
+ {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
+ {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
+ {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
+ {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
+ {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
+ {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
+ {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
+ {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
+ {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
+ {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
+ {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
+ {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
+ {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
+ { 0, NULL }
+};
+#endif /* WIRESHARK_COMPILE */
+#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index 7d20cba0728..d9d5814e318 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -38,8 +38,8 @@
* Author: Yury Umanets <umka@clusterfs.com>
*/
-#ifndef __LINUX_FID_H
-#define __LINUX_FID_H
+#ifndef __LUSTRE_FID_H
+#define __LUSTRE_FID_H
/** \defgroup fid fid
*
@@ -154,13 +154,12 @@
#include <linux/libcfs/libcfs.h>
#include <lustre/lustre_idl.h>
-#include <lustre_req_layout.h>
-#include <lustre_mdt.h>
-#include <obd.h>
-
+struct lu_env;
struct lu_site;
struct lu_context;
+struct obd_device;
+struct obd_export;
/* Whole sequences space range and zero range definitions */
extern const struct lu_seq_range LUSTRE_SEQ_SPACE_RANGE;
@@ -320,6 +319,12 @@ static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
fid->f_ver = 0;
}
+/* seq client type */
+enum lu_cli_type {
+ LUSTRE_SEQ_METADATA = 1,
+ LUSTRE_SEQ_DATA
+};
+
enum lu_mgr_type {
LUSTRE_SEQ_SERVER,
LUSTRE_SEQ_CONTROLLER
@@ -341,7 +346,7 @@ struct lu_client_seq {
struct lu_seq_range lcs_space;
/* Seq related proc */
- proc_dir_entry_t *lcs_proc_dir;
+ struct proc_dir_entry *lcs_proc_dir;
/* This holds last allocated fid in last obtained seq */
struct lu_fid lcs_fid;
@@ -388,7 +393,7 @@ struct lu_server_seq {
struct dt_object *lss_obj;
/* Seq related proc */
- proc_dir_entry_t *lss_proc_dir;
+ struct proc_dir_entry *lss_proc_dir;
/* LUSTRE_SEQ_SERVER or LUSTRE_SEQ_CONTROLLER */
enum lu_mgr_type lss_type;
@@ -426,10 +431,14 @@ struct lu_server_seq {
struct seq_server_site *lss_site;
};
+struct com_thread_info;
int seq_query(struct com_thread_info *info);
+
+struct ptlrpc_request;
int seq_handle(struct ptlrpc_request *req);
/* Server methods */
+
int seq_server_init(struct lu_server_seq *seq,
struct dt_device *dev,
const char *prefix,
@@ -472,6 +481,7 @@ int seq_site_fini(const struct lu_env *env, struct seq_server_site *ss);
int fid_is_local(const struct lu_env *env,
struct lu_site *site, const struct lu_fid *fid);
+enum lu_cli_type;
int client_fid_init(struct obd_device *obd, struct obd_export *exp,
enum lu_cli_type type);
int client_fid_fini(struct obd_device *obd);
@@ -488,74 +498,75 @@ struct ldlm_namespace;
* renaming name[2,3] fields that need to be used for the quota identifier.
*/
static inline struct ldlm_res_id *
-fid_build_reg_res_name(const struct lu_fid *f,
- struct ldlm_res_id *name)
+fid_build_reg_res_name(const struct lu_fid *fid, struct ldlm_res_id *res)
{
- memset(name, 0, sizeof *name);
- name->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(f);
- name->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(f);
- return name;
+ memset(res, 0, sizeof(*res));
+ res->name[LUSTRE_RES_ID_SEQ_OFF] = fid_seq(fid);
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] = fid_ver_oid(fid);
+
+ return res;
+}
+
+/*
+ * Return true if resource is for object identified by FID.
+ */
+static inline int fid_res_name_eq(const struct lu_fid *fid,
+ const struct ldlm_res_id *res)
+{
+ return res->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(fid) &&
+ res->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(fid);
+}
+
+/*
+ * Extract FID from LDLM resource. Reverse of fid_build_reg_res_name().
+ */
+static inline struct lu_fid *
+fid_extract_from_res_name(struct lu_fid *fid, const struct ldlm_res_id *res)
+{
+ fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
+ fid->f_oid = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF]);
+ fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
+ LASSERT(fid_res_name_eq(fid, res));
+
+ return fid;
}
/*
* Build (DLM) resource identifier from global quota FID and quota ID.
*/
static inline struct ldlm_res_id *
-fid_build_quota_resid(const struct lu_fid *glb_fid, union lquota_id *qid,
+fid_build_quota_res_name(const struct lu_fid *glb_fid, union lquota_id *qid,
struct ldlm_res_id *res)
{
fid_build_reg_res_name(glb_fid, res);
res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] = fid_seq(&qid->qid_fid);
res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] = fid_ver_oid(&qid->qid_fid);
+
return res;
}
/*
* Extract global FID and quota ID from resource name
*/
-static inline void fid_extract_quota_resid(struct ldlm_res_id *res,
- struct lu_fid *glb_fid,
- union lquota_id *qid)
+static inline void fid_extract_from_quota_res(struct lu_fid *glb_fid,
+ union lquota_id *qid,
+ const struct ldlm_res_id *res)
{
- glb_fid->f_seq = res->name[LUSTRE_RES_ID_SEQ_OFF];
- glb_fid->f_oid = (__u32)res->name[LUSTRE_RES_ID_VER_OID_OFF];
- glb_fid->f_ver = (__u32)(res->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32);
-
+ fid_extract_from_res_name(glb_fid, res);
qid->qid_fid.f_seq = res->name[LUSTRE_RES_ID_QUOTA_SEQ_OFF];
qid->qid_fid.f_oid = (__u32)res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF];
qid->qid_fid.f_ver =
(__u32)(res->name[LUSTRE_RES_ID_QUOTA_VER_OID_OFF] >> 32);
}
-/*
- * Return true if resource is for object identified by fid.
- */
-static inline int fid_res_name_eq(const struct lu_fid *f,
- const struct ldlm_res_id *name)
-{
- return name->name[LUSTRE_RES_ID_SEQ_OFF] == fid_seq(f) &&
- name->name[LUSTRE_RES_ID_VER_OID_OFF] == fid_ver_oid(f);
-}
-
-/* reverse function of fid_build_reg_res_name() */
-static inline void fid_build_from_res_name(struct lu_fid *f,
- const struct ldlm_res_id *name)
-{
- fid_zero(f);
- f->f_seq = name->name[LUSTRE_RES_ID_SEQ_OFF];
- f->f_oid = name->name[LUSTRE_RES_ID_VER_OID_OFF] & 0xffffffff;
- f->f_ver = name->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32;
- LASSERT(fid_res_name_eq(f, name));
-}
-
static inline struct ldlm_res_id *
-fid_build_pdo_res_name(const struct lu_fid *f,
- unsigned int hash,
- struct ldlm_res_id *name)
+fid_build_pdo_res_name(const struct lu_fid *fid, unsigned int hash,
+ struct ldlm_res_id *res)
{
- fid_build_reg_res_name(f, name);
- name->name[LUSTRE_RES_ID_HSH_OFF] = hash;
- return name;
+ fid_build_reg_res_name(fid, res);
+ res->name[LUSTRE_RES_ID_HSH_OFF] = hash;
+
+ return res;
}
/**
@@ -584,7 +595,7 @@ static inline void ostid_build_res_name(struct ost_id *oi,
name->name[LUSTRE_RES_ID_SEQ_OFF] = ostid_id(oi);
name->name[LUSTRE_RES_ID_VER_OID_OFF] = ostid_seq(oi);
} else {
- fid_build_reg_res_name((struct lu_fid *)oi, name);
+ fid_build_reg_res_name(&oi->oi_fid, name);
}
}
@@ -597,7 +608,7 @@ static inline void ostid_res_name_to_id(struct ost_id *oi,
ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
} else {
/* new resid */
- fid_build_from_res_name((struct lu_fid *)oi, name);
+ fid_extract_from_res_name(&oi->oi_fid, name);
}
}
@@ -644,7 +655,7 @@ static inline void ost_fid_from_resid(struct lu_fid *fid,
ostid_to_fid(fid, &oi, 0);
} else {
/* new resid */
- fid_build_from_res_name(fid, name);
+ fid_extract_from_res_name(fid, name);
}
}
@@ -666,14 +677,14 @@ static inline __u64 fid_flatten(const struct lu_fid *fid)
if (fid_is_igif(fid)) {
ino = lu_igif_ino(fid);
- RETURN(ino);
+ return ino;
}
seq = fid_seq(fid);
ino = (seq << 24) + ((seq >> 24) & 0xffffff0000ULL) + fid_oid(fid);
- RETURN(ino ? ino : fid_oid(fid));
+ return ino ? ino : fid_oid(fid);
}
static inline __u32 fid_hash(const struct lu_fid *f, int bits)
@@ -692,7 +703,7 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
if (fid_is_igif(fid)) {
ino = lu_igif_ino(fid);
- RETURN(ino);
+ return ino;
}
seq = fid_seq(fid) - FID_SEQ_START;
@@ -706,7 +717,7 @@ static inline __u32 fid_flatten32(const struct lu_fid *fid)
(seq >> (64 - (40-8)) & 0xffffff00) +
(fid_oid(fid) & 0xff000fff) + ((fid_oid(fid) & 0x00fff000) << 8);
- RETURN(ino ? ino : fid_oid(fid));
+ return ino ? ino : fid_oid(fid);
}
static inline int lu_fid_diff(struct lu_fid *fid1, struct lu_fid *fid2)
@@ -759,4 +770,4 @@ static inline void range_be_to_cpu(struct lu_seq_range *dst, const struct lu_seq
/** @} fid */
-#endif /* __LINUX_FID_H */
+#endif /* __LUSTRE_FID_H */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fld.h b/drivers/staging/lustre/lustre/include/lustre_fld.h
index 11e034a65b1..550fff58745 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fld.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fld.h
@@ -43,9 +43,6 @@
*/
#include <lustre/lustre_idl.h>
-#include <lustre_mdt.h>
-#include <dt_object.h>
-
#include <linux/libcfs/libcfs.h>
struct lu_client_fld;
@@ -75,7 +72,7 @@ struct lu_fld_target {
struct lu_server_fld {
/**
* Fld dir proc entry. */
- proc_dir_entry_t *lsf_proc_dir;
+ struct proc_dir_entry *lsf_proc_dir;
/**
* /fld file object device */
@@ -103,7 +100,7 @@ struct lu_server_fld {
struct lu_client_fld {
/**
* Client side proc entry. */
- proc_dir_entry_t *lcf_proc_dir;
+ struct proc_dir_entry *lcf_proc_dir;
/**
* List of exports client FLD knows about. */
@@ -129,47 +126,9 @@ struct lu_client_fld {
* Client fld proc entry name. */
char lcf_name[80];
- const struct lu_context *lcf_ctx;
-
int lcf_flags;
};
-/**
- * number of blocks to reserve for particular operations. Should be function of
- * ... something. Stub for now.
- */
-enum {
- /* one insert operation can involve two delete and one insert */
- FLD_TXN_INDEX_INSERT_CREDITS = 60,
- FLD_TXN_INDEX_DELETE_CREDITS = 20,
-};
-
-int fld_query(struct com_thread_info *info);
-
-/* Server methods */
-int fld_server_init(const struct lu_env *env, struct lu_server_fld *fld,
- struct dt_device *dt, const char *prefix, int mds_node_id,
- int type);
-
-void fld_server_fini(const struct lu_env *env, struct lu_server_fld *fld);
-
-int fld_declare_server_create(const struct lu_env *env,
- struct lu_server_fld *fld,
- struct lu_seq_range *new,
- struct thandle *th);
-
-int fld_server_create(const struct lu_env *env,
- struct lu_server_fld *fld,
- struct lu_seq_range *add_range,
- struct thandle *th);
-
-int fld_insert_entry(const struct lu_env *env,
- struct lu_server_fld *fld,
- const struct lu_seq_range *range);
-
-int fld_server_lookup(const struct lu_env *env, struct lu_server_fld *fld,
- seqno_t seq, struct lu_seq_range *range);
-
/* Client methods */
int fld_client_init(struct lu_client_fld *fld,
const char *prefix, int hash);
diff --git a/drivers/staging/lustre/lustre/include/lustre_idmap.h b/drivers/staging/lustre/lustre/include/lustre_idmap.h
index 084bdd6ab4d..2da859691d6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_idmap.h
+++ b/drivers/staging/lustre/lustre/include/lustre_idmap.h
@@ -80,8 +80,8 @@ struct lustre_idmap_table {
struct lu_ucred;
-extern void lustre_groups_from_list(group_info_t *ginfo, gid_t *glist);
-extern void lustre_groups_sort(group_info_t *group_info);
+extern void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist);
+extern void lustre_groups_sort(struct group_info *group_info);
extern int lustre_in_group_p(struct lu_ucred *mu, gid_t grp);
extern int lustre_idmap_add(struct lustre_idmap_table *t,
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index 3a5dd6a94c0..67259eb43cd 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -336,9 +336,11 @@ static inline unsigned int at_timeout2est(unsigned int val)
}
static inline void at_reset(struct adaptive_timeout *at, int val) {
+ spin_lock(&at->at_lock);
at->at_current = val;
at->at_worst_ever = val;
at->at_worst_time = cfs_time_current_sec();
+ spin_unlock(&at->at_lock);
}
static inline void at_init(struct adaptive_timeout *at, int val, int flags) {
memset(at, 0, sizeof(*at));
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index bdfc5391c6d..5e11107d4c6 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -96,7 +96,7 @@ void statfs_unpack(struct kstatfs *sfs, struct obd_statfs *osfs);
/* l_lock.c */
struct lustre_lock {
int l_depth;
- task_t *l_owner;
+ struct task_struct *l_owner;
struct semaphore l_sem;
spinlock_t l_spin;
};
@@ -260,10 +260,7 @@ int obd_ioctl_popdata(void *arg, void *data, int len);
static inline void obd_ioctl_freedata(char *buf, int len)
{
- ENTRY;
-
OBD_FREE_LARGE(buf, len);
- EXIT;
return;
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_log.h b/drivers/staging/lustre/lustre/include/lustre_log.h
index 714ab378e43..721aa05dff3 100644
--- a/drivers/staging/lustre/lustre/include/lustre_log.h
+++ b/drivers/staging/lustre/lustre/include/lustre_log.h
@@ -469,16 +469,14 @@ static inline int llog_destroy(const struct lu_env *env,
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_destroy == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_destroy(env, handle);
- RETURN(rc);
+ return rc;
}
static inline int llog_next_block(const struct lu_env *env,
@@ -489,17 +487,15 @@ static inline int llog_next_block(const struct lu_env *env,
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_next_block == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_next_block(env, loghandle, cur_idx, next_idx,
cur_offset, buf, len);
- RETURN(rc);
+ return rc;
}
static inline int llog_prev_block(const struct lu_env *env,
@@ -509,16 +505,14 @@ static inline int llog_prev_block(const struct lu_env *env,
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_prev_block == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_prev_block(env, loghandle, prev_idx, buf, len);
- RETURN(rc);
+ return rc;
}
static inline int llog_connect(struct llog_ctxt *ctxt,
@@ -528,16 +522,14 @@ static inline int llog_connect(struct llog_ctxt *ctxt,
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_obd2ops(ctxt, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_connect == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_connect(ctxt, logid, gen, uuid);
- RETURN(rc);
+ return rc;
}
/* llog.c */
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index fb1561a809b..19000259a5e 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -84,9 +84,8 @@ static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck)
static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
- ENTRY;
-
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP))
+ if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
return;
/* This would normally block until the existing request finishes.
@@ -123,8 +122,9 @@ static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck,
static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
struct lookup_intent *it)
{
- if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP))
- goto out;
+ if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP ||
+ it->it_op == IT_LAYOUT))
+ return;
if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */
mutex_lock(&lck->rpcl_mutex);
@@ -141,8 +141,6 @@ static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck,
}
mutex_unlock(&lck->rpcl_mutex);
- out:
- EXIT;
}
static inline void mdc_update_max_ea_from_body(struct obd_export *exp,
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdt.h b/drivers/staging/lustre/lustre/include/lustre_mdt.h
deleted file mode 100644
index dba26a6cfa3..00000000000
--- a/drivers/staging/lustre/lustre/include/lustre_mdt.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_MDT_H
-#define __LINUX_MDT_H
-
-/** \defgroup mdt mdt
- *
- * @{
- */
-
-#include <lustre/lustre_idl.h>
-#include <lustre_req_layout.h>
-#include <md_object.h>
-#include <dt_object.h>
-#include <linux/libcfs/libcfs.h>
-
-/*
- * Common thread info for mdt, seq and fld
- */
-struct com_thread_info {
- /*
- * for req-layout interface.
- */
- struct req_capsule *cti_pill;
-};
-
-enum {
- ESERIOUS = 0x0001000
-};
-
-static inline int err_serious(int rc)
-{
- LASSERT(rc < 0);
- LASSERT(-rc < ESERIOUS);
- return -(-rc | ESERIOUS);
-}
-
-static inline int clear_serious(int rc)
-{
- if (rc < 0)
- rc = -(-rc & ~ESERIOUS);
- return rc;
-}
-
-static inline int is_serious(int rc)
-{
- return (rc < 0 && -rc & ESERIOUS);
-}
-
-/** @} mdt */
-
-#endif
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 293dd90e5b6..e947002fae0 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1136,7 +1136,7 @@ struct ptlrpc_nrs_pol_conf {
* different module to the one the NRS framework is held within
* (currently ptlrpc), should set this field to THIS_MODULE.
*/
- module_t *nc_owner;
+ struct module *nc_owner;
/**
* Policy registration flags; a bitmast of \e nrs_policy_flags
*/
@@ -1211,7 +1211,7 @@ struct ptlrpc_nrs_pol_desc {
* then unregistration and lprocfs operations will be properly
* serialized.
*/
- module_t *pd_owner;
+ struct module *pd_owner;
/**
* Bitmask of \e nrs_policy_flags
*/
@@ -2322,8 +2322,13 @@ struct ptlrpc_thread {
pid_t t_pid;
/**
* put watchdog in the structure per thread b=14840
+ *
+ * Lustre watchdog is removed for client in the hope
+ * of a generic watchdog can be merged in kernel.
+ * When that happens, we should add below back.
+ *
+ * struct lc_watchdog *t_watchdog;
*/
- struct lc_watchdog *t_watchdog;
/**
* the svc this thread belonged to b=18582
*/
@@ -2484,7 +2489,7 @@ struct ptlrpc_service {
/** limit of threads number for each partition */
int srv_nthrs_cpt_limit;
/** Root of /proc dir tree for this service */
- proc_dir_entry_t *srv_procroot;
+ struct proc_dir_entry *srv_procroot;
/** Pointer to statistic data for this service */
struct lprocfs_stats *srv_stats;
/** # hp per lp reqs to handle */
@@ -2631,7 +2636,7 @@ struct ptlrpc_service_part {
/** reqs waiting for replies */
struct ptlrpc_at_array scp_at_array;
/** early reply timer */
- timer_list_t scp_at_timer;
+ struct timer_list scp_at_timer;
/** debug */
cfs_time_t scp_at_checktime;
/** check early replies */
@@ -3161,6 +3166,38 @@ lustre_shrink_reply(struct ptlrpc_request *req, int segment,
req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment,
newlen, move_data);
}
+
+#ifdef CONFIG_LUSTRE_TRANSLATE_ERRNOS
+
+static inline int ptlrpc_status_hton(int h)
+{
+ /*
+ * Positive errnos must be network errnos, such as LUSTRE_EDEADLK,
+ * ELDLM_LOCK_ABORTED, etc.
+ */
+ if (h < 0)
+ return -lustre_errno_hton(-h);
+ else
+ return h;
+}
+
+static inline int ptlrpc_status_ntoh(int n)
+{
+ /*
+ * See the comment in ptlrpc_status_hton().
+ */
+ if (n < 0)
+ return -lustre_errno_ntoh(-n);
+ else
+ return n;
+}
+
+#else
+
+#define ptlrpc_status_hton(h) (h)
+#define ptlrpc_status_ntoh(n) (n)
+
+#endif
/** @} */
/** Change request phase of \a req to \a new_phase */
diff --git a/drivers/staging/lustre/lustre/include/lustre_quota.h b/drivers/staging/lustre/lustre/include/lustre_quota.h
index 1c3041f5004..71b5d97e034 100644
--- a/drivers/staging/lustre/lustre/include/lustre_quota.h
+++ b/drivers/staging/lustre/lustre/include/lustre_quota.h
@@ -168,7 +168,7 @@ struct qsd_instance;
* enforcement. Arguments are documented where each function is defined. */
struct qsd_instance *qsd_init(const struct lu_env *, char *, struct dt_device *,
- proc_dir_entry_t *);
+ struct proc_dir_entry *);
int qsd_prepare(const struct lu_env *, struct qsd_instance *);
int qsd_start(const struct lu_env *, struct qsd_instance *);
void qsd_fini(const struct lu_env *, struct qsd_instance *);
diff --git a/drivers/staging/lustre/lustre/include/lustre_sec.h b/drivers/staging/lustre/lustre/include/lustre_sec.h
index 9e0908e1c4d..70b8b133a5c 100644
--- a/drivers/staging/lustre/lustre/include/lustre_sec.h
+++ b/drivers/staging/lustre/lustre/include/lustre_sec.h
@@ -796,7 +796,7 @@ struct ptlrpc_sec_sops {
};
struct ptlrpc_sec_policy {
- module_t *sp_owner;
+ struct module *sp_owner;
char *sp_name;
__u16 sp_policy; /* policy number */
struct ptlrpc_sec_cops *sp_cops; /* client ops */
diff --git a/drivers/staging/lustre/lustre/include/md_object.h b/drivers/staging/lustre/lustre/include/md_object.h
index 92d6420b21d..daf93afe3fe 100644
--- a/drivers/staging/lustre/lustre/include/md_object.h
+++ b/drivers/staging/lustre/lustre/include/md_object.h
@@ -503,11 +503,6 @@ static inline struct md_device *md_obj2dev(const struct md_object *o)
return container_of0(o->mo_lu.lo_dev, struct md_device, md_lu_dev);
}
-static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
-{
- return s->ld_seq_site;
-}
-
static inline int md_device_init(struct md_device *md, struct lu_device_type *t)
{
return lu_device_init(&md->md_lu_dev, t);
@@ -876,7 +871,7 @@ struct lu_ucred {
__u32 uc_suppgids[2];
cfs_cap_t uc_cap;
__u32 uc_umask;
- group_info_t *uc_ginfo;
+ struct group_info *uc_ginfo;
struct md_identity *uc_identity;
};
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 0a251fdfe16..a6122559d55 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -49,15 +49,14 @@
#define IOC_MDC_MAX_NR 50
#include <lustre/lustre_idl.h>
-#include <lu_ref.h>
#include <lustre_lib.h>
+#include <linux/libcfs/bitmap.h>
+#include <lu_ref.h>
#include <lustre_export.h>
+#include <lustre_fid.h>
#include <lustre_fld.h>
#include <lustre_capa.h>
-#include <linux/libcfs/bitmap.h>
-
-
#define MAX_OBD_DEVICES 8192
struct osc_async_rc {
@@ -119,6 +118,20 @@ struct lov_stripe_md {
#define lsm_stripe_count lsm_wire.lw_stripe_count
#define lsm_pool_name lsm_wire.lw_pool_name
+static inline bool lsm_is_released(struct lov_stripe_md *lsm)
+{
+ return !!(lsm->lsm_pattern & LOV_PATTERN_F_RELEASED);
+}
+
+static inline bool lsm_has_objects(struct lov_stripe_md *lsm)
+{
+ if (lsm == NULL)
+ return false;
+ if (lsm_is_released(lsm))
+ return false;
+ return true;
+}
+
struct obd_info;
typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
@@ -225,7 +238,7 @@ struct obd_type {
struct list_head typ_chain;
struct obd_ops *typ_dt_ops;
struct md_ops *typ_md_ops;
- proc_dir_entry_t *typ_procroot;
+ struct proc_dir_entry *typ_procroot;
char *typ_name;
int typ_refcnt;
struct lu_device_type *typ_lu;
@@ -239,30 +252,6 @@ struct brw_page {
obd_flag flag;
};
-/* Individual type definitions */
-
-struct ost_server_data;
-
-struct osd_properties {
- size_t osd_max_ea_size;
-};
-
-#define OBT_MAGIC 0xBDDECEAE
-/* hold common fields for "target" device */
-struct obd_device_target {
- __u32 obt_magic;
- __u32 obt_instance;
- struct super_block *obt_sb;
- /** last_rcvd file */
- struct file *obt_rcvd_filp;
- __u64 obt_mount_count;
- struct rw_semaphore obt_rwsem;
- struct vfsmount *obt_vfsmnt;
- struct file *obt_health_check_filp;
- struct osd_properties obt_osd_properties;
- struct obd_job_stats obt_jobstats;
-};
-
/* llog contexts */
enum llog_ctxt_id {
LLOG_CONFIG_ORIG_CTXT = 0,
@@ -277,100 +266,13 @@ enum llog_ctxt_id {
LLOG_TEST_REPL_CTXT,
LLOG_LOVEA_ORIG_CTXT,
LLOG_LOVEA_REPL_CTXT,
- LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
- LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
- LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
+ LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
+ LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
+ LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
+ LLOG_AGENT_ORIG_CTXT, /**< agent requests generation on cdt */
LLOG_MAX_CTXTS
};
-#define FILTER_SUBDIR_COUNT 32 /* set to zero for no subdirs */
-
-struct filter_subdirs {
- struct dentry *dentry[FILTER_SUBDIR_COUNT];
-};
-
-
-struct filter_ext {
- __u64 fe_start;
- __u64 fe_end;
-};
-
-struct filter_obd {
- /* NB this field MUST be first */
- struct obd_device_target fo_obt;
- const char *fo_fstype;
-
- int fo_group_count;
- struct dentry *fo_dentry_O;
- struct dentry **fo_dentry_O_groups;
- struct filter_subdirs *fo_dentry_O_sub;
- struct mutex fo_init_lock; /* group initialization lock*/
- int fo_committed_group;
-
- spinlock_t fo_objidlock; /* protect fo_lastobjid */
-
- unsigned long fo_destroys_in_progress;
- struct mutex fo_create_locks[FILTER_SUBDIR_COUNT];
-
- struct list_head fo_export_list;
- int fo_subdir_count;
-
- obd_size fo_tot_dirty; /* protected by obd_osfs_lock */
- obd_size fo_tot_granted; /* all values in bytes */
- obd_size fo_tot_pending;
- int fo_tot_granted_clients;
-
- obd_size fo_readcache_max_filesize;
- spinlock_t fo_flags_lock;
- unsigned int fo_read_cache:1, /**< enable read-only cache */
- fo_writethrough_cache:1,/**< read cache writes */
- fo_mds_ost_sync:1, /**< MDS-OST orphan recovery*/
- fo_raid_degraded:1;/**< RAID device degraded */
-
- struct obd_import *fo_mdc_imp;
- struct obd_uuid fo_mdc_uuid;
- struct lustre_handle fo_mdc_conn;
- struct file **fo_last_objid_files;
- __u64 *fo_last_objids; /* last created objid for groups,
- * protected by fo_objidlock */
-
- struct mutex fo_alloc_lock;
-
- atomic_t fo_r_in_flight;
- atomic_t fo_w_in_flight;
-
- /*
- * per-filter pool of kiobuf's allocated by filter_common_setup() and
- * torn down by filter_cleanup().
- *
- * This pool contains kiobuf used by
- * filter_{prep,commit}rw_{read,write}() and is shared by all OST
- * threads.
- *
- * Locking: protected by internal lock of cfs_hash, pool can be
- * found from this hash table by t_id of ptlrpc_thread.
- */
- struct cfs_hash *fo_iobuf_hash;
-
- struct brw_stats fo_filter_stats;
-
- int fo_fmd_max_num; /* per exp filter_mod_data */
- int fo_fmd_max_age; /* jiffies to fmd expiry */
- unsigned long fo_syncjournal:1, /* sync journal on writes */
- fo_sync_lock_cancel:2;/* sync on lock cancel */
-
-
- /* sptlrpc stuff */
- rwlock_t fo_sptlrpc_lock;
- struct sptlrpc_rule_set fo_sptlrpc_rset;
-
- /* capability related */
- unsigned int fo_fl_oss_capa;
- struct list_head fo_capa_keys;
- struct hlist_head *fo_capa_hash;
- int fo_sec_level;
-};
-
struct timeout_item {
enum timeout_event ti_event;
cfs_time_t ti_timeout;
@@ -536,25 +438,6 @@ struct obd_id_info {
obd_id *data;
};
-/* */
-
-struct echo_obd {
- struct obd_device_target eo_obt;
- struct obdo eo_oa;
- spinlock_t eo_lock;
- __u64 eo_lastino;
- struct lustre_handle eo_nl_lock;
- atomic_t eo_prep;
-};
-
-struct ost_obd {
- struct ptlrpc_service *ost_service;
- struct ptlrpc_service *ost_create_service;
- struct ptlrpc_service *ost_io_service;
- struct ptlrpc_service *ost_seq_service;
- struct mutex ost_health_mutex;
-};
-
struct echo_client_obd {
struct obd_export *ec_exp; /* the local connection to osc/lov */
spinlock_t ec_lock;
@@ -654,7 +537,7 @@ struct pool_desc {
struct lov_qos_rr pool_rr; /* round robin qos */
struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
- proc_dir_entry_t *pool_proc_entry; /* file in /proc */
+ struct proc_dir_entry *pool_proc_entry; /* file in /proc */
struct obd_device *pool_lobd; /* obd of the lov/lod to which
* this pool belongs */
};
@@ -675,7 +558,7 @@ struct lov_obd {
int lov_pool_count;
cfs_hash_t *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
- proc_dir_entry_t *lov_pool_proc_entry;
+ struct proc_dir_entry *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
/* Cached LRU pages from upper layer */
@@ -1017,7 +900,7 @@ struct obd_device {
int obd_requests_queued_for_recovery;
wait_queue_head_t obd_next_transno_waitq;
/* protected by obd_recovery_task_lock */
- timer_list_t obd_recovery_timer;
+ struct timer_list obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
int obd_recovery_time_hard;
@@ -1036,12 +919,8 @@ struct obd_device {
int obd_recovery_stage;
union {
- struct obd_device_target obt;
- struct filter_obd filter;
struct client_obd cli;
- struct ost_obd ost;
struct echo_client_obd echo_client;
- struct echo_obd echo;
struct lov_obd lov;
struct lmv_obd lmv;
} u;
@@ -1052,10 +931,10 @@ struct obd_device {
unsigned int md_cntr_base;
struct lprocfs_stats *md_stats;
- proc_dir_entry_t *obd_proc_entry;
+ struct proc_dir_entry *obd_proc_entry;
void *obd_proc_private; /* type private PDEs */
- proc_dir_entry_t *obd_proc_exports_entry;
- proc_dir_entry_t *obd_svc_procroot;
+ struct proc_dir_entry *obd_proc_exports_entry;
+ struct proc_dir_entry *obd_svc_procroot;
struct lprocfs_stats *obd_svc_stats;
atomic_t obd_evict_inprogress;
wait_queue_head_t obd_evict_inprogress_waitq;
@@ -1218,12 +1097,6 @@ typedef int (* md_enqueue_cb_t)(struct ptlrpc_request *req,
struct md_enqueue_info *minfo,
int rc);
-/* seq client type */
-enum lu_cli_type {
- LUSTRE_SEQ_METADATA = 1,
- LUSTRE_SEQ_DATA
-};
-
struct md_enqueue_info {
struct md_op_data mi_data;
struct lookup_intent mi_it;
@@ -1235,7 +1108,7 @@ struct md_enqueue_info {
};
struct obd_ops {
- module_t *o_owner;
+ struct module *o_owner;
int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg);
int (*o_get_info)(const struct lu_env *env, struct obd_export *,
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index de5c5853647..983718fe1e5 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -326,7 +326,7 @@ void obdo_le_to_cpu(struct obdo *dobdo, struct obdo *sobdo);
do { \
if (!(obd)) { \
CERROR("NULL device\n"); \
- RETURN(-ENODEV); \
+ return -ENODEV; \
} \
} while (0)
@@ -337,7 +337,7 @@ do { \
if (!(obd)->obd_set_up || (obd)->obd_stopping) { \
CERROR("Device %d not setup\n", \
(obd)->obd_minor); \
- RETURN(-ENODEV); \
+ return -ENODEV; \
} \
} while (0)
@@ -424,7 +424,7 @@ do { \
if (err) \
CERROR("md_" #op ": dev %s/%d no operation\n", \
obd->obd_name, obd->obd_minor); \
- RETURN(err); \
+ return err; \
} \
} while (0)
@@ -432,17 +432,17 @@ do { \
do { \
if ((exp) == NULL) { \
CERROR("obd_" #op ": NULL export\n"); \
- RETURN(-ENODEV); \
+ return -ENODEV; \
} \
if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
- RETURN(-EOPNOTSUPP); \
+ return -EOPNOTSUPP; \
} \
if (!OBT((exp)->exp_obd) || !MDP((exp)->exp_obd, op)) { \
CERROR("obd_" #op ": dev %s/%d no operation\n", \
(exp)->exp_obd->obd_name, \
(exp)->exp_obd->obd_minor); \
- RETURN(-EOPNOTSUPP); \
+ return -EOPNOTSUPP; \
} \
} while (0)
@@ -453,7 +453,7 @@ do { \
if (err) \
CERROR("obd_" #op ": dev %d no operation\n", \
obd->obd_minor); \
- RETURN(err); \
+ return err; \
} \
} while (0)
@@ -461,16 +461,16 @@ do { \
do { \
if ((exp) == NULL) { \
CERROR("obd_" #op ": NULL export\n"); \
- RETURN(-ENODEV); \
+ return -ENODEV; \
} \
if ((exp)->exp_obd == NULL || !OBT((exp)->exp_obd)) { \
CERROR("obd_" #op ": cleaned up obd\n"); \
- RETURN(-EOPNOTSUPP); \
+ return -EOPNOTSUPP; \
} \
if (!OBT((exp)->exp_obd) || !OBP((exp)->exp_obd, op)) { \
CERROR("obd_" #op ": dev %d no operation\n", \
(exp)->exp_obd->obd_minor); \
- RETURN(-EOPNOTSUPP); \
+ return -EOPNOTSUPP; \
} \
} while (0)
@@ -480,7 +480,7 @@ do { \
if (err) \
CERROR("lop_" #op ": dev %d no operation\n", \
ctxt->loc_obd->obd_minor); \
- RETURN(err); \
+ return err; \
} \
} while (0)
@@ -495,14 +495,13 @@ static inline int obd_get_info(const struct lu_env *env,
struct lov_stripe_md *lsm)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, get_info);
EXP_COUNTER_INCREMENT(exp, get_info);
rc = OBP(exp->exp_obd, get_info)(env, exp, keylen, key, vallen, val,
lsm);
- RETURN(rc);
+ return rc;
}
static inline int obd_set_info_async(const struct lu_env *env,
@@ -511,14 +510,13 @@ static inline int obd_set_info_async(const struct lu_env *env,
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, set_info_async);
EXP_COUNTER_INCREMENT(exp, set_info_async);
rc = OBP(exp->exp_obd, set_info_async)(env, exp, keylen, key, vallen,
val, set);
- RETURN(rc);
+ return rc;
}
/*
@@ -547,7 +545,6 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
ldt = obd->obd_type->typ_lu;
if (ldt != NULL) {
@@ -577,7 +574,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
OBD_COUNTER_INCREMENT(obd, setup);
rc = OBP(obd, setup)(obd, cfg);
}
- RETURN(rc);
+ return rc;
}
static inline int obd_precleanup(struct obd_device *obd,
@@ -585,7 +582,6 @@ static inline int obd_precleanup(struct obd_device *obd,
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
ldt = obd->obd_type->typ_lu;
@@ -605,14 +601,13 @@ static inline int obd_precleanup(struct obd_device *obd,
OBD_COUNTER_INCREMENT(obd, precleanup);
rc = OBP(obd, precleanup)(obd, cleanup_stage);
- RETURN(rc);
+ return rc;
}
static inline int obd_cleanup(struct obd_device *obd)
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
@@ -632,13 +627,11 @@ static inline int obd_cleanup(struct obd_device *obd)
OBD_COUNTER_INCREMENT(obd, cleanup);
rc = OBP(obd, cleanup)(obd);
- RETURN(rc);
+ return rc;
}
static inline void obd_cleanup_client_import(struct obd_device *obd)
{
- ENTRY;
-
/* If we set up but never connected, the
client import will not have been cleaned. */
down_write(&obd->u.cli.cl_sem);
@@ -656,8 +649,6 @@ static inline void obd_cleanup_client_import(struct obd_device *obd)
obd->u.cli.cl_import = NULL;
}
up_write(&obd->u.cli.cl_sem);
-
- EXIT;
}
static inline int
@@ -665,7 +656,6 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
{
int rc;
DECLARE_LU_VARS(ldt, d);
- ENTRY;
OBD_CHECK_DEV(obd);
@@ -687,7 +677,7 @@ obd_process_config(struct obd_device *obd, int datalen, void *data)
OBD_COUNTER_INCREMENT(obd, process_config);
obd->obd_process_conf = 0;
- RETURN(rc);
+ return rc;
}
/* Pack an in-memory MD struct for storage on disk.
@@ -702,13 +692,12 @@ static inline int obd_packmd(struct obd_export *exp,
struct lov_stripe_md *mem_src)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, packmd);
EXP_COUNTER_INCREMENT(exp, packmd);
rc = OBP(exp->exp_obd, packmd)(exp, disk_tgt, mem_src);
- RETURN(rc);
+ return rc;
}
static inline int obd_size_diskmd(struct obd_export *exp,
@@ -757,13 +746,12 @@ static inline int obd_unpackmd(struct obd_export *exp,
int disk_len)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, unpackmd);
EXP_COUNTER_INCREMENT(exp, unpackmd);
rc = OBP(exp->exp_obd, unpackmd)(exp, mem_tgt, disk_src, disk_len);
- RETURN(rc);
+ return rc;
}
/* helper functions */
@@ -790,13 +778,12 @@ static inline int obd_free_memmd(struct obd_export *exp,
static inline int obd_precreate(struct obd_export *exp)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, precreate);
OBD_COUNTER_INCREMENT(exp->exp_obd, precreate);
rc = OBP(exp->exp_obd, precreate)(exp);
- RETURN(rc);
+ return rc;
}
static inline int obd_create_async(struct obd_export *exp,
@@ -805,13 +792,12 @@ static inline int obd_create_async(struct obd_export *exp,
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, create_async);
EXP_COUNTER_INCREMENT(exp, create_async);
rc = OBP(exp->exp_obd, create_async)(exp, oinfo, ea, oti);
- RETURN(rc);
+ return rc;
}
static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
@@ -819,13 +805,12 @@ static inline int obd_create(const struct lu_env *env, struct obd_export *exp,
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, create);
EXP_COUNTER_INCREMENT(exp, create);
rc = OBP(exp->exp_obd, create)(env, exp, obdo, ea, oti);
- RETURN(rc);
+ return rc;
}
static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
@@ -834,26 +819,24 @@ static inline int obd_destroy(const struct lu_env *env, struct obd_export *exp,
struct obd_export *md_exp, void *capa)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, destroy);
EXP_COUNTER_INCREMENT(exp, destroy);
rc = OBP(exp->exp_obd, destroy)(env, exp, obdo, ea, oti, md_exp, capa);
- RETURN(rc);
+ return rc;
}
static inline int obd_getattr(const struct lu_env *env, struct obd_export *exp,
struct obd_info *oinfo)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, getattr);
EXP_COUNTER_INCREMENT(exp, getattr);
rc = OBP(exp->exp_obd, getattr)(env, exp, oinfo);
- RETURN(rc);
+ return rc;
}
static inline int obd_getattr_async(struct obd_export *exp,
@@ -861,13 +844,12 @@ static inline int obd_getattr_async(struct obd_export *exp,
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, getattr_async);
EXP_COUNTER_INCREMENT(exp, getattr_async);
rc = OBP(exp->exp_obd, getattr_async)(exp, oinfo, set);
- RETURN(rc);
+ return rc;
}
static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
@@ -875,13 +857,12 @@ static inline int obd_setattr(const struct lu_env *env, struct obd_export *exp,
struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr);
EXP_COUNTER_INCREMENT(exp, setattr);
rc = OBP(exp->exp_obd, setattr)(env, exp, oinfo, oti);
- RETURN(rc);
+ return rc;
}
/* This performs all the requests set init/wait/destroy actions. */
@@ -891,20 +872,19 @@ static inline int obd_setattr_rqset(struct obd_export *exp,
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr_async);
EXP_COUNTER_INCREMENT(exp, setattr_async);
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
/* This adds all the requests into @set if @set != NULL, otherwise
@@ -915,13 +895,12 @@ static inline int obd_setattr_async(struct obd_export *exp,
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, setattr_async);
EXP_COUNTER_INCREMENT(exp, setattr_async);
rc = OBP(exp->exp_obd, setattr_async)(exp, oinfo, oti, set);
- RETURN(rc);
+ return rc;
}
static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
@@ -929,40 +908,37 @@ static inline int obd_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
{
struct obd_device *obd = imp->imp_obd;
int rc;
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, add_conn, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, add_conn);
rc = OBP(obd, add_conn)(imp, uuid, priority);
- RETURN(rc);
+ return rc;
}
static inline int obd_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
{
struct obd_device *obd = imp->imp_obd;
int rc;
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, del_conn, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, del_conn);
rc = OBP(obd, del_conn)(imp, uuid);
- RETURN(rc);
+ return rc;
}
static inline struct obd_uuid *obd_get_uuid(struct obd_export *exp)
{
struct obd_uuid *uuid;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, get_uuid, NULL);
EXP_COUNTER_INCREMENT(exp, get_uuid);
uuid = OBP(exp->exp_obd, get_uuid)(exp);
- RETURN(uuid);
+ return uuid;
}
/** Create a new /a exp on device /a obd for the uuid /a cluuid
@@ -979,7 +955,6 @@ static inline int obd_connect(const struct lu_env *env,
int rc;
__u64 ocf = data ? data->ocd_connect_flags : 0; /* for post-condition
* check */
- ENTRY;
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, connect, -EOPNOTSUPP);
@@ -989,7 +964,7 @@ static inline int obd_connect(const struct lu_env *env,
/* check that only subset is granted */
LASSERT(ergo(data != NULL, (data->ocd_connect_flags & ocf) ==
data->ocd_connect_flags));
- RETURN(rc);
+ return rc;
}
static inline int obd_reconnect(const struct lu_env *env,
@@ -1003,8 +978,6 @@ static inline int obd_reconnect(const struct lu_env *env,
__u64 ocf = d ? d->ocd_connect_flags : 0; /* for post-condition
* check */
- ENTRY;
-
OBD_CHECK_DEV_ACTIVE(obd);
OBD_CHECK_DT_OP(obd, reconnect, 0);
OBD_COUNTER_INCREMENT(obd, reconnect);
@@ -1013,44 +986,41 @@ static inline int obd_reconnect(const struct lu_env *env,
/* check that only subset is granted */
LASSERT(ergo(d != NULL,
(d->ocd_connect_flags & ocf) == d->ocd_connect_flags));
- RETURN(rc);
+ return rc;
}
static inline int obd_disconnect(struct obd_export *exp)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, disconnect);
EXP_COUNTER_INCREMENT(exp, disconnect);
rc = OBP(exp->exp_obd, disconnect)(exp);
- RETURN(rc);
+ return rc;
}
static inline int obd_fid_init(struct obd_device *obd, struct obd_export *exp,
enum lu_cli_type type)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, fid_init, 0);
OBD_COUNTER_INCREMENT(obd, fid_init);
rc = OBP(obd, fid_init)(obd, exp, type);
- RETURN(rc);
+ return rc;
}
static inline int obd_fid_fini(struct obd_device *obd)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, fid_fini, 0);
OBD_COUNTER_INCREMENT(obd, fid_fini);
rc = OBP(obd, fid_fini)(obd);
- RETURN(rc);
+ return rc;
}
static inline int obd_fid_alloc(struct obd_export *exp,
@@ -1058,113 +1028,101 @@ static inline int obd_fid_alloc(struct obd_export *exp,
struct md_op_data *op_data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, fid_alloc);
EXP_COUNTER_INCREMENT(exp, fid_alloc);
rc = OBP(exp->exp_obd, fid_alloc)(exp, fid, op_data);
- RETURN(rc);
+ return rc;
}
static inline int obd_ping(const struct lu_env *env, struct obd_export *exp)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, ping, 0);
EXP_COUNTER_INCREMENT(exp, ping);
rc = OBP(exp->exp_obd, ping)(env, exp);
- RETURN(rc);
+ return rc;
}
static inline int obd_pool_new(struct obd_device *obd, char *poolname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_new, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_new);
rc = OBP(obd, pool_new)(obd, poolname);
- RETURN(rc);
+ return rc;
}
static inline int obd_pool_del(struct obd_device *obd, char *poolname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_del, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_del);
rc = OBP(obd, pool_del)(obd, poolname);
- RETURN(rc);
+ return rc;
}
static inline int obd_pool_add(struct obd_device *obd, char *poolname, char *ostname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_add, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_add);
rc = OBP(obd, pool_add)(obd, poolname, ostname);
- RETURN(rc);
+ return rc;
}
static inline int obd_pool_rem(struct obd_device *obd, char *poolname, char *ostname)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(obd, pool_rem, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, pool_rem);
rc = OBP(obd, pool_rem)(obd, poolname, ostname);
- RETURN(rc);
+ return rc;
}
static inline void obd_getref(struct obd_device *obd)
{
- ENTRY;
if (OBT(obd) && OBP(obd, getref)) {
OBD_COUNTER_INCREMENT(obd, getref);
OBP(obd, getref)(obd);
}
- EXIT;
}
static inline void obd_putref(struct obd_device *obd)
{
- ENTRY;
if (OBT(obd) && OBP(obd, putref)) {
OBD_COUNTER_INCREMENT(obd, putref);
OBP(obd, putref)(obd);
}
- EXIT;
}
static inline int obd_init_export(struct obd_export *exp)
{
int rc = 0;
- ENTRY;
if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, init_export))
rc = OBP(exp->exp_obd, init_export)(exp);
- RETURN(rc);
+ return rc;
}
static inline int obd_destroy_export(struct obd_export *exp)
{
- ENTRY;
if ((exp)->exp_obd != NULL && OBT((exp)->exp_obd) &&
OBP((exp)->exp_obd, destroy_export))
OBP(exp->exp_obd, destroy_export)(exp);
- RETURN(0);
+ return 0;
}
static inline int obd_extent_calc(struct obd_export *exp,
@@ -1172,10 +1130,10 @@ static inline int obd_extent_calc(struct obd_export *exp,
int cmd, obd_off *offset)
{
int rc;
- ENTRY;
+
EXP_CHECK_DT_OP(exp, extent_calc);
rc = OBP(exp->exp_obd, extent_calc)(exp, md, cmd, offset);
- RETURN(rc);
+ return rc;
}
static inline struct dentry *
@@ -1198,10 +1156,9 @@ static inline int obd_statfs_async(struct obd_export *exp,
{
int rc = 0;
struct obd_device *obd;
- ENTRY;
if (exp == NULL || exp->exp_obd == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
obd = exp->exp_obd;
OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
@@ -1224,7 +1181,7 @@ static inline int obd_statfs_async(struct obd_export *exp,
if (oinfo->oi_cb_up)
oinfo->oi_cb_up(oinfo, 0);
}
- RETURN(rc);
+ return rc;
}
static inline int obd_statfs_rqset(struct obd_export *exp,
@@ -1234,11 +1191,10 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
struct ptlrpc_request_set *set = NULL;
struct obd_info oinfo = { { { 0 } } };
int rc = 0;
- ENTRY;
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oinfo.oi_osfs = osfs;
oinfo.oi_flags = flags;
@@ -1246,7 +1202,7 @@ static inline int obd_statfs_rqset(struct obd_export *exp,
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
/* @max_age is the oldest time in jiffies that we accept using a cached data.
@@ -1258,10 +1214,9 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
{
int rc = 0;
struct obd_device *obd = exp->exp_obd;
- ENTRY;
if (obd == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
OBD_CHECK_DT_OP(obd, statfs, -EOPNOTSUPP);
OBD_COUNTER_INCREMENT(obd, statfs);
@@ -1286,7 +1241,7 @@ static inline int obd_statfs(const struct lu_env *env, struct obd_export *exp,
memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
spin_unlock(&obd->obd_osfs_lock);
}
- RETURN(rc);
+ return rc;
}
static inline int obd_sync_rqset(struct obd_export *exp, struct obd_info *oinfo,
@@ -1294,20 +1249,19 @@ static inline int obd_sync_rqset(struct obd_export *exp, struct obd_info *oinfo,
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
EXP_COUNTER_INCREMENT(exp, sync);
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = OBP(exp->exp_obd, sync)(NULL, exp, oinfo, start, end, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
static inline int obd_sync(const struct lu_env *env, struct obd_export *exp,
@@ -1315,13 +1269,12 @@ static inline int obd_sync(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, sync, -EOPNOTSUPP);
EXP_COUNTER_INCREMENT(exp, sync);
rc = OBP(exp->exp_obd, sync)(env, exp, oinfo, start, end, set);
- RETURN(rc);
+ return rc;
}
static inline int obd_punch_rqset(struct obd_export *exp,
@@ -1330,20 +1283,19 @@ static inline int obd_punch_rqset(struct obd_export *exp,
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, punch);
EXP_COUNTER_INCREMENT(exp, punch);
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = OBP(exp->exp_obd, punch)(NULL, exp, oinfo, oti, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
static inline int obd_punch(const struct lu_env *env, struct obd_export *exp,
@@ -1351,13 +1303,12 @@ static inline int obd_punch(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request_set *rqset)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, punch);
EXP_COUNTER_INCREMENT(exp, punch);
rc = OBP(exp->exp_obd, punch)(env, exp, oinfo, oti, rqset);
- RETURN(rc);
+ return rc;
}
static inline int obd_brw(int cmd, struct obd_export *exp,
@@ -1365,7 +1316,6 @@ static inline int obd_brw(int cmd, struct obd_export *exp,
struct brw_page *pg, struct obd_trans_info *oti)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, brw);
EXP_COUNTER_INCREMENT(exp, brw);
@@ -1377,7 +1327,7 @@ static inline int obd_brw(int cmd, struct obd_export *exp,
}
rc = OBP(exp->exp_obd, brw)(cmd, exp, oinfo, oa_bufs, pg, oti);
- RETURN(rc);
+ return rc;
}
static inline int obd_preprw(const struct lu_env *env, int cmd,
@@ -1389,14 +1339,13 @@ static inline int obd_preprw(const struct lu_env *env, int cmd,
struct lustre_capa *capa)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, preprw);
EXP_COUNTER_INCREMENT(exp, preprw);
rc = OBP(exp->exp_obd, preprw)(env, cmd, exp, oa, objcount, obj, remote,
pages, local, oti, capa);
- RETURN(rc);
+ return rc;
}
static inline int obd_commitrw(const struct lu_env *env, int cmd,
@@ -1406,14 +1355,12 @@ static inline int obd_commitrw(const struct lu_env *env, int cmd,
struct niobuf_local *local,
struct obd_trans_info *oti, int rc)
{
- ENTRY;
-
EXP_CHECK_DT_OP(exp, commitrw);
EXP_COUNTER_INCREMENT(exp, commitrw);
rc = OBP(exp->exp_obd, commitrw)(env, cmd, exp, oa, objcount, obj,
rnb, pages, local, oti, rc);
- RETURN(rc);
+ return rc;
}
static inline int obd_merge_lvb(struct obd_export *exp,
@@ -1421,13 +1368,12 @@ static inline int obd_merge_lvb(struct obd_export *exp,
struct ost_lvb *lvb, int kms_only)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, merge_lvb);
EXP_COUNTER_INCREMENT(exp, merge_lvb);
rc = OBP(exp->exp_obd, merge_lvb)(exp, lsm, lvb, kms_only);
- RETURN(rc);
+ return rc;
}
static inline int obd_adjust_kms(struct obd_export *exp,
@@ -1435,26 +1381,24 @@ static inline int obd_adjust_kms(struct obd_export *exp,
int shrink)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, adjust_kms);
EXP_COUNTER_INCREMENT(exp, adjust_kms);
rc = OBP(exp->exp_obd, adjust_kms)(exp, lsm, size, shrink);
- RETURN(rc);
+ return rc;
}
static inline int obd_iocontrol(unsigned int cmd, struct obd_export *exp,
int len, void *karg, void *uarg)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, iocontrol);
EXP_COUNTER_INCREMENT(exp, iocontrol);
rc = OBP(exp->exp_obd, iocontrol)(cmd, exp, len, karg, uarg);
- RETURN(rc);
+ return rc;
}
static inline int obd_enqueue_rqset(struct obd_export *exp,
@@ -1463,20 +1407,19 @@ static inline int obd_enqueue_rqset(struct obd_export *exp,
{
struct ptlrpc_request_set *set = NULL;
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, enqueue);
EXP_COUNTER_INCREMENT(exp, enqueue);
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = OBP(exp->exp_obd, enqueue)(exp, oinfo, einfo, set);
if (rc == 0)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
static inline int obd_enqueue(struct obd_export *exp,
@@ -1485,13 +1428,12 @@ static inline int obd_enqueue(struct obd_export *exp,
struct ptlrpc_request_set *set)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, enqueue);
EXP_COUNTER_INCREMENT(exp, enqueue);
rc = OBP(exp->exp_obd, enqueue)(exp, oinfo, einfo, set);
- RETURN(rc);
+ return rc;
}
static inline int obd_change_cbdata(struct obd_export *exp,
@@ -1499,13 +1441,12 @@ static inline int obd_change_cbdata(struct obd_export *exp,
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, change_cbdata);
EXP_COUNTER_INCREMENT(exp, change_cbdata);
rc = OBP(exp->exp_obd, change_cbdata)(exp, lsm, it, data);
- RETURN(rc);
+ return rc;
}
static inline int obd_find_cbdata(struct obd_export *exp,
@@ -1513,13 +1454,12 @@ static inline int obd_find_cbdata(struct obd_export *exp,
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, find_cbdata);
EXP_COUNTER_INCREMENT(exp, find_cbdata);
rc = OBP(exp->exp_obd, find_cbdata)(exp, lsm, it, data);
- RETURN(rc);
+ return rc;
}
static inline int obd_cancel(struct obd_export *exp,
@@ -1527,13 +1467,12 @@ static inline int obd_cancel(struct obd_export *exp,
struct lustre_handle *lockh)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, cancel);
EXP_COUNTER_INCREMENT(exp, cancel);
rc = OBP(exp->exp_obd, cancel)(exp, ea, mode, lockh);
- RETURN(rc);
+ return rc;
}
static inline int obd_cancel_unused(struct obd_export *exp,
@@ -1542,13 +1481,12 @@ static inline int obd_cancel_unused(struct obd_export *exp,
void *opaque)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, cancel_unused);
EXP_COUNTER_INCREMENT(exp, cancel_unused);
rc = OBP(exp->exp_obd, cancel_unused)(exp, ea, flags, opaque);
- RETURN(rc);
+ return rc;
}
static inline int obd_pin(struct obd_export *exp, const struct lu_fid *fid,
@@ -1556,26 +1494,24 @@ static inline int obd_pin(struct obd_export *exp, const struct lu_fid *fid,
int flag)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, pin);
EXP_COUNTER_INCREMENT(exp, pin);
rc = OBP(exp->exp_obd, pin)(exp, fid, oc, handle, flag);
- RETURN(rc);
+ return rc;
}
static inline int obd_unpin(struct obd_export *exp,
struct obd_client_handle *handle, int flag)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, unpin);
EXP_COUNTER_INCREMENT(exp, unpin);
rc = OBP(exp->exp_obd, unpin)(exp, handle, flag);
- RETURN(rc);
+ return rc;
}
@@ -1583,30 +1519,26 @@ static inline void obd_import_event(struct obd_device *obd,
struct obd_import *imp,
enum obd_import_event event)
{
- ENTRY;
if (!obd) {
CERROR("NULL device\n");
- EXIT;
return;
}
if (obd->obd_set_up && OBP(obd, import_event)) {
OBD_COUNTER_INCREMENT(obd, import_event);
OBP(obd, import_event)(obd, imp, event);
}
- EXIT;
}
static inline int obd_llog_connect(struct obd_export *exp,
struct llogd_conn_body *body)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, llog_connect, 0);
EXP_COUNTER_INCREMENT(exp, llog_connect);
rc = OBP(exp->exp_obd, llog_connect)(exp, body);
- RETURN(rc);
+ return rc;
}
@@ -1616,7 +1548,7 @@ static inline int obd_notify(struct obd_device *obd,
void *data)
{
int rc;
- ENTRY;
+
OBD_CHECK_DEV(obd);
/* the check for async_recov is a complete hack - I'm hereby
@@ -1625,17 +1557,17 @@ static inline int obd_notify(struct obd_device *obd,
by this point, and it needs to get them to execute mds_postrecov. */
if (!obd->obd_set_up && !obd->obd_async_recov) {
CDEBUG(D_HA, "obd %s not set up\n", obd->obd_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (!OBP(obd, notify)) {
CDEBUG(D_HA, "obd %s has no notify handler\n", obd->obd_name);
- RETURN(-ENOSYS);
+ return -ENOSYS;
}
OBD_COUNTER_INCREMENT(obd, notify);
rc = OBP(obd, notify)(obd, watched, ev, data);
- RETURN(rc);
+ return rc;
}
static inline int obd_notify_observer(struct obd_device *observer,
@@ -1669,26 +1601,24 @@ static inline int obd_quotacheck(struct obd_export *exp,
struct obd_quotactl *oqctl)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, quotacheck);
EXP_COUNTER_INCREMENT(exp, quotacheck);
rc = OBP(exp->exp_obd, quotacheck)(exp->exp_obd, exp, oqctl);
- RETURN(rc);
+ return rc;
}
static inline int obd_quotactl(struct obd_export *exp,
struct obd_quotactl *oqctl)
{
int rc;
- ENTRY;
EXP_CHECK_DT_OP(exp, quotactl);
EXP_COUNTER_INCREMENT(exp, quotactl);
rc = OBP(exp->exp_obd, quotactl)(exp->exp_obd, exp, oqctl);
- RETURN(rc);
+ return rc;
}
static inline int obd_health_check(const struct lu_env *env,
@@ -1702,56 +1632,52 @@ static inline int obd_health_check(const struct lu_env *env,
* <0 on error
*/
int rc;
- ENTRY;
/* don't use EXP_CHECK_DT_OP, because NULL method is normal here */
if (obd == NULL || !OBT(obd)) {
CERROR("cleaned up obd\n");
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
if (!obd->obd_set_up || obd->obd_stopping)
- RETURN(0);
+ return 0;
if (!OBP(obd, health_check))
- RETURN(0);
+ return 0;
rc = OBP(obd, health_check)(env, obd);
- RETURN(rc);
+ return rc;
}
static inline int obd_register_observer(struct obd_device *obd,
struct obd_device *observer)
{
- ENTRY;
OBD_CHECK_DEV(obd);
down_write(&obd->obd_observer_link_sem);
if (obd->obd_observer && observer) {
up_write(&obd->obd_observer_link_sem);
- RETURN(-EALREADY);
+ return -EALREADY;
}
obd->obd_observer = observer;
up_write(&obd->obd_observer_link_sem);
- RETURN(0);
+ return 0;
}
static inline int obd_pin_observer(struct obd_device *obd,
struct obd_device **observer)
{
- ENTRY;
down_read(&obd->obd_observer_link_sem);
if (!obd->obd_observer) {
*observer = NULL;
up_read(&obd->obd_observer_link_sem);
- RETURN(-ENOENT);
+ return -ENOENT;
}
*observer = obd->obd_observer;
- RETURN(0);
+ return 0;
}
static inline int obd_unpin_observer(struct obd_device *obd)
{
- ENTRY;
up_read(&obd->obd_observer_link_sem);
- RETURN(0);
+ return 0;
}
#if 0
@@ -1760,52 +1686,48 @@ static inline int obd_register_page_removal_cb(struct obd_export *exp,
obd_pin_extent_cb pin_cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, register_page_removal_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, register_page_removal_cb);
rc = OBP(exp->exp_obd, register_page_removal_cb)(exp, cb, pin_cb);
- RETURN(rc);
+ return rc;
}
static inline int obd_unregister_page_removal_cb(struct obd_export *exp,
obd_page_removal_cb_t cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, unregister_page_removal_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_page_removal_cb);
rc = OBP(exp->exp_obd, unregister_page_removal_cb)(exp, cb);
- RETURN(rc);
+ return rc;
}
static inline int obd_register_lock_cancel_cb(struct obd_export *exp,
obd_lock_cancel_cb cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, register_lock_cancel_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, register_lock_cancel_cb);
rc = OBP(exp->exp_obd, register_lock_cancel_cb)(exp, cb);
- RETURN(rc);
+ return rc;
}
static inline int obd_unregister_lock_cancel_cb(struct obd_export *exp,
obd_lock_cancel_cb cb)
{
int rc;
- ENTRY;
OBD_CHECK_DT_OP(exp->exp_obd, unregister_lock_cancel_cb, 0);
OBD_COUNTER_INCREMENT(exp->exp_obd, unregister_lock_cancel_cb);
rc = OBP(exp->exp_obd, unregister_lock_cancel_cb)(exp, cb);
- RETURN(rc);
+ return rc;
}
#endif
@@ -1814,34 +1736,33 @@ static inline int md_getstatus(struct obd_export *exp,
struct lu_fid *fid, struct obd_capa **pc)
{
int rc;
- ENTRY;
EXP_CHECK_MD_OP(exp, getstatus);
EXP_MD_COUNTER_INCREMENT(exp, getstatus);
rc = MDP(exp->exp_obd, getstatus)(exp, fid, pc);
- RETURN(rc);
+ return rc;
}
static inline int md_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, getattr);
EXP_MD_COUNTER_INCREMENT(exp, getattr);
rc = MDP(exp->exp_obd, getattr)(exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static inline int md_null_inode(struct obd_export *exp,
const struct lu_fid *fid)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, null_inode);
EXP_MD_COUNTER_INCREMENT(exp, null_inode);
rc = MDP(exp->exp_obd, null_inode)(exp, fid);
- RETURN(rc);
+ return rc;
}
static inline int md_find_cbdata(struct obd_export *exp,
@@ -1849,11 +1770,11 @@ static inline int md_find_cbdata(struct obd_export *exp,
ldlm_iterator_t it, void *data)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, find_cbdata);
EXP_MD_COUNTER_INCREMENT(exp, find_cbdata);
rc = MDP(exp->exp_obd, find_cbdata)(exp, fid, it, data);
- RETURN(rc);
+ return rc;
}
static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
@@ -1861,11 +1782,11 @@ static inline int md_close(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, close);
EXP_MD_COUNTER_INCREMENT(exp, close);
rc = MDP(exp->exp_obd, close)(exp, op_data, mod, request);
- RETURN(rc);
+ return rc;
}
static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
@@ -1874,12 +1795,12 @@ static inline int md_create(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, create);
EXP_MD_COUNTER_INCREMENT(exp, create);
rc = MDP(exp->exp_obd, create)(exp, op_data, data, datalen, mode,
uid, gid, cap_effective, rdev, request);
- RETURN(rc);
+ return rc;
}
static inline int md_done_writing(struct obd_export *exp,
@@ -1887,11 +1808,11 @@ static inline int md_done_writing(struct obd_export *exp,
struct md_open_data *mod)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, done_writing);
EXP_MD_COUNTER_INCREMENT(exp, done_writing);
rc = MDP(exp->exp_obd, done_writing)(exp, op_data, mod);
- RETURN(rc);
+ return rc;
}
static inline int md_enqueue(struct obd_export *exp,
@@ -1904,12 +1825,12 @@ static inline int md_enqueue(struct obd_export *exp,
int extra_lock_flags)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, enqueue);
EXP_MD_COUNTER_INCREMENT(exp, enqueue);
rc = MDP(exp->exp_obd, enqueue)(exp, einfo, it, op_data, lockh,
lmm, lmmsize, req, extra_lock_flags);
- RETURN(rc);
+ return rc;
}
static inline int md_getattr_name(struct obd_export *exp,
@@ -1917,11 +1838,11 @@ static inline int md_getattr_name(struct obd_export *exp,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, getattr_name);
EXP_MD_COUNTER_INCREMENT(exp, getattr_name);
rc = MDP(exp->exp_obd, getattr_name)(exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static inline int md_intent_lock(struct obd_export *exp,
@@ -1932,24 +1853,24 @@ static inline int md_intent_lock(struct obd_export *exp,
__u64 extra_lock_flags)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, intent_lock);
EXP_MD_COUNTER_INCREMENT(exp, intent_lock);
rc = MDP(exp->exp_obd, intent_lock)(exp, op_data, lmm, lmmsize,
it, lookup_flags, reqp, cb_blocking,
extra_lock_flags);
- RETURN(rc);
+ return rc;
}
static inline int md_link(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, link);
EXP_MD_COUNTER_INCREMENT(exp, link);
rc = MDP(exp->exp_obd, link)(exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
@@ -1957,12 +1878,12 @@ static inline int md_rename(struct obd_export *exp, struct md_op_data *op_data,
int newlen, struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, rename);
EXP_MD_COUNTER_INCREMENT(exp, rename);
rc = MDP(exp->exp_obd, rename)(exp, op_data, old, oldlen, new,
newlen, request);
- RETURN(rc);
+ return rc;
}
static inline int md_is_subdir(struct obd_export *exp,
@@ -1971,11 +1892,11 @@ static inline int md_is_subdir(struct obd_export *exp,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, is_subdir);
EXP_MD_COUNTER_INCREMENT(exp, is_subdir);
rc = MDP(exp->exp_obd, is_subdir)(exp, pfid, cfid, request);
- RETURN(rc);
+ return rc;
}
static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
@@ -1984,23 +1905,23 @@ static inline int md_setattr(struct obd_export *exp, struct md_op_data *op_data,
struct md_open_data **mod)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, setattr);
EXP_MD_COUNTER_INCREMENT(exp, setattr);
rc = MDP(exp->exp_obd, setattr)(exp, op_data, ea, ealen,
ea2, ea2len, request, mod);
- RETURN(rc);
+ return rc;
}
static inline int md_sync(struct obd_export *exp, const struct lu_fid *fid,
struct obd_capa *oc, struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, sync);
EXP_MD_COUNTER_INCREMENT(exp, sync);
rc = MDP(exp->exp_obd, sync)(exp, fid, oc, request);
- RETURN(rc);
+ return rc;
}
static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
@@ -2008,22 +1929,22 @@ static inline int md_readpage(struct obd_export *exp, struct md_op_data *opdata,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, readpage);
EXP_MD_COUNTER_INCREMENT(exp, readpage);
rc = MDP(exp->exp_obd, readpage)(exp, opdata, pages, request);
- RETURN(rc);
+ return rc;
}
static inline int md_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct ptlrpc_request **request)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, unlink);
EXP_MD_COUNTER_INCREMENT(exp, unlink);
rc = MDP(exp->exp_obd, unlink)(exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static inline int md_get_lustre_md(struct obd_export *exp,
@@ -2032,19 +1953,17 @@ static inline int md_get_lustre_md(struct obd_export *exp,
struct obd_export *md_exp,
struct lustre_md *md)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, get_lustre_md);
EXP_MD_COUNTER_INCREMENT(exp, get_lustre_md);
- RETURN(MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md));
+ return MDP(exp->exp_obd, get_lustre_md)(exp, req, dt_exp, md_exp, md);
}
static inline int md_free_lustre_md(struct obd_export *exp,
struct lustre_md *md)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, free_lustre_md);
EXP_MD_COUNTER_INCREMENT(exp, free_lustre_md);
- RETURN(MDP(exp->exp_obd, free_lustre_md)(exp, md));
+ return MDP(exp->exp_obd, free_lustre_md)(exp, md);
}
static inline int md_setxattr(struct obd_export *exp,
@@ -2054,12 +1973,11 @@ static inline int md_setxattr(struct obd_export *exp,
int output_size, int flags, __u32 suppgid,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, setxattr);
EXP_MD_COUNTER_INCREMENT(exp, setxattr);
- RETURN(MDP(exp->exp_obd, setxattr)(exp, fid, oc, valid, name, input,
+ return MDP(exp->exp_obd, setxattr)(exp, fid, oc, valid, name, input,
input_size, output_size, flags,
- suppgid, request));
+ suppgid, request);
}
static inline int md_getxattr(struct obd_export *exp,
@@ -2069,40 +1987,36 @@ static inline int md_getxattr(struct obd_export *exp,
int output_size, int flags,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, getxattr);
EXP_MD_COUNTER_INCREMENT(exp, getxattr);
- RETURN(MDP(exp->exp_obd, getxattr)(exp, fid, oc, valid, name, input,
+ return MDP(exp->exp_obd, getxattr)(exp, fid, oc, valid, name, input,
input_size, output_size, flags,
- request));
+ request);
}
static inline int md_set_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och,
struct ptlrpc_request *open_req)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, set_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, set_open_replay_data);
- RETURN(MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req));
+ return MDP(exp->exp_obd, set_open_replay_data)(exp, och, open_req);
}
static inline int md_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, clear_open_replay_data);
EXP_MD_COUNTER_INCREMENT(exp, clear_open_replay_data);
- RETURN(MDP(exp->exp_obd, clear_open_replay_data)(exp, och));
+ return MDP(exp->exp_obd, clear_open_replay_data)(exp, och);
}
static inline int md_set_lock_data(struct obd_export *exp,
__u64 *lockh, void *data, __u64 *bits)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, set_lock_data);
EXP_MD_COUNTER_INCREMENT(exp, set_lock_data);
- RETURN(MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits));
+ return MDP(exp->exp_obd, set_lock_data)(exp, lockh, data, bits);
}
static inline int md_cancel_unused(struct obd_export *exp,
@@ -2113,14 +2027,13 @@ static inline int md_cancel_unused(struct obd_export *exp,
void *opaque)
{
int rc;
- ENTRY;
EXP_CHECK_MD_OP(exp, cancel_unused);
EXP_MD_COUNTER_INCREMENT(exp, cancel_unused);
rc = MDP(exp->exp_obd, cancel_unused)(exp, fid, policy, mode,
flags, opaque);
- RETURN(rc);
+ return rc;
}
static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
@@ -2130,21 +2043,19 @@ static inline ldlm_mode_t md_lock_match(struct obd_export *exp, __u64 flags,
ldlm_mode_t mode,
struct lustre_handle *lockh)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, lock_match);
EXP_MD_COUNTER_INCREMENT(exp, lock_match);
- RETURN(MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
- policy, mode, lockh));
+ return MDP(exp->exp_obd, lock_match)(exp, flags, fid, type,
+ policy, mode, lockh);
}
static inline int md_init_ea_size(struct obd_export *exp, int easize,
int def_asize, int cookiesize)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, init_ea_size);
EXP_MD_COUNTER_INCREMENT(exp, init_ea_size);
- RETURN(MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
- cookiesize));
+ return MDP(exp->exp_obd, init_ea_size)(exp, easize, def_asize,
+ cookiesize);
}
static inline int md_get_remote_perm(struct obd_export *exp,
@@ -2152,22 +2063,21 @@ static inline int md_get_remote_perm(struct obd_export *exp,
struct obd_capa *oc, __u32 suppgid,
struct ptlrpc_request **request)
{
- ENTRY;
EXP_CHECK_MD_OP(exp, get_remote_perm);
EXP_MD_COUNTER_INCREMENT(exp, get_remote_perm);
- RETURN(MDP(exp->exp_obd, get_remote_perm)(exp, fid, oc, suppgid,
- request));
+ return MDP(exp->exp_obd, get_remote_perm)(exp, fid, oc, suppgid,
+ request);
}
static inline int md_renew_capa(struct obd_export *exp, struct obd_capa *ocapa,
renew_capa_cb_t cb)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, renew_capa);
EXP_MD_COUNTER_INCREMENT(exp, renew_capa);
rc = MDP(exp->exp_obd, renew_capa)(exp, ocapa, cb);
- RETURN(rc);
+ return rc;
}
static inline int md_unpack_capa(struct obd_export *exp,
@@ -2176,11 +2086,11 @@ static inline int md_unpack_capa(struct obd_export *exp,
struct obd_capa **oc)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, unpack_capa);
EXP_MD_COUNTER_INCREMENT(exp, unpack_capa);
rc = MDP(exp->exp_obd, unpack_capa)(exp, req, field, oc);
- RETURN(rc);
+ return rc;
}
static inline int md_intent_getattr_async(struct obd_export *exp,
@@ -2188,11 +2098,11 @@ static inline int md_intent_getattr_async(struct obd_export *exp,
struct ldlm_enqueue_info *einfo)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, intent_getattr_async);
EXP_MD_COUNTER_INCREMENT(exp, intent_getattr_async);
rc = MDP(exp->exp_obd, intent_getattr_async)(exp, minfo, einfo);
- RETURN(rc);
+ return rc;
}
static inline int md_revalidate_lock(struct obd_export *exp,
@@ -2200,11 +2110,11 @@ static inline int md_revalidate_lock(struct obd_export *exp,
struct lu_fid *fid, __u64 *bits)
{
int rc;
- ENTRY;
+
EXP_CHECK_MD_OP(exp, revalidate_lock);
EXP_MD_COUNTER_INCREMENT(exp, revalidate_lock);
rc = MDP(exp->exp_obd, revalidate_lock)(exp, it, fid, bits);
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/include/obd_lov.h b/drivers/staging/lustre/lustre/include/obd_lov.h
index d82f3341d0a..235718b366d 100644
--- a/drivers/staging/lustre/lustre/include/obd_lov.h
+++ b/drivers/staging/lustre/lustre/include/obd_lov.h
@@ -44,16 +44,6 @@ static inline int lov_stripe_md_size(__u16 stripes)
return sizeof(struct lov_stripe_md) + stripes*sizeof(struct lov_oinfo*);
}
-static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
-{
- if (lmm_magic == LOV_MAGIC_V3)
- return sizeof(struct lov_mds_md_v3) +
- stripes * sizeof(struct lov_ost_data_v1);
- else
- return sizeof(struct lov_mds_md_v1) +
- stripes * sizeof(struct lov_ost_data_v1);
-}
-
struct lov_version_size {
__u32 lvs_magic;
size_t lvs_lmm_size;
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index b5d40afc359..03e6133ef50 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -470,6 +470,7 @@ int obd_alloc_fail(const void *ptr, const char *name, const char *type,
#define OBD_FAIL_LFSCK_DELAY3 0x1602
#define OBD_FAIL_LFSCK_LINKEA_CRASH 0x1603
#define OBD_FAIL_LFSCK_LINKEA_MORE 0x1604
+#define OBD_FAIL_LFSCK_LINKEA_MORE2 0x1605
#define OBD_FAIL_LFSCK_FATAL1 0x1608
#define OBD_FAIL_LFSCK_FATAL2 0x1609
#define OBD_FAIL_LFSCK_CRASH 0x160a
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/lclient/glimpse.c
index 7f3974be1f9..7bbca4bf6b8 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/lclient/glimpse.c
@@ -93,7 +93,6 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lock *lock;
int result;
- ENTRY;
result = 0;
if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
@@ -131,10 +130,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
cio->cui_glimpse = 0;
if (lock == NULL)
- RETURN(0);
+ return 0;
if (IS_ERR(lock))
- RETURN(PTR_ERR(lock));
+ return PTR_ERR(lock);
LASSERT(agl == 0);
result = cl_wait(env, lock);
@@ -159,7 +158,7 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
}
}
- RETURN(result);
+ return result;
}
static int cl_io_get(struct inode *inode, struct lu_env **envout,
@@ -203,8 +202,6 @@ int cl_glimpse_size0(struct inode *inode, int agl)
int result;
int refcheck;
- ENTRY;
-
result = cl_io_get(inode, &env, &io, &refcheck);
if (result > 0) {
again:
@@ -226,7 +223,7 @@ int cl_glimpse_size0(struct inode *inode, int agl)
goto again;
cl_env_put(env, &refcheck);
}
- RETURN(result);
+ return result;
}
int cl_local_size(struct inode *inode)
@@ -240,14 +237,12 @@ int cl_local_size(struct inode *inode)
int result;
int refcheck;
- ENTRY;
-
if (!cl_i2info(inode)->lli_has_smd)
- RETURN(0);
+ return 0;
result = cl_io_get(inode, &env, &io, &refcheck);
if (result <= 0)
- RETURN(result);
+ return result;
clob = io->ci_obj;
result = cl_io_init(env, io, CIT_MISC, clob);
@@ -270,5 +265,5 @@ int cl_local_size(struct inode *inode)
}
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
index 4a0166687f0..8ff38c64b7a 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
@@ -169,7 +169,6 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d,
{
struct ccc_device *vdv;
int rc;
- ENTRY;
vdv = lu2ccc_dev(d);
vdv->cdv_next = lu2cl_dev(next);
@@ -182,7 +181,7 @@ int ccc_device_init(const struct lu_env *env, struct lu_device *d,
lu_device_get(next);
lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
}
- RETURN(rc);
+ return rc;
}
struct lu_device *ccc_device_fini(const struct lu_env *env,
@@ -201,11 +200,10 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
struct lu_device *lud;
struct cl_site *site;
int rc;
- ENTRY;
OBD_ALLOC_PTR(vdv);
if (vdv == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
lud = &vdv->cdv_cl.cd_lu_dev;
cl_device_init(&vdv->cdv_cl, t);
@@ -228,7 +226,7 @@ struct lu_device *ccc_device_alloc(const struct lu_env *env,
ccc_device_free(env, lud);
lud = ERR_PTR(rc);
}
- RETURN(lud);
+ return lud;
}
struct lu_device *ccc_device_free(const struct lu_env *env,
@@ -418,7 +416,6 @@ int ccc_object_glimpse(const struct lu_env *env,
{
struct inode *inode = ccc_object_inode(obj);
- ENTRY;
lvb->lvb_mtime = cl_inode_mtime(inode);
lvb->lvb_atime = cl_inode_atime(inode);
lvb->lvb_ctime = cl_inode_ctime(inode);
@@ -429,7 +426,7 @@ int ccc_object_glimpse(const struct lu_env *env,
*/
if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
lvb->lvb_blocks = dirty_cnt(inode);
- RETURN(0);
+ return 0;
}
@@ -479,8 +476,6 @@ int ccc_page_is_under_lock(const struct lu_env *env,
int result;
- ENTRY;
-
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
io->ci_type == CIT_FAULT) {
if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
@@ -495,7 +490,7 @@ int ccc_page_is_under_lock(const struct lu_env *env,
}
} else
result = 0;
- RETURN(result);
+ return result;
}
int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
@@ -559,9 +554,8 @@ int ccc_transient_page_prep(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- ENTRY;
/* transient page should always be sent. */
- RETURN(0);
+ return 0;
}
/*****************************************************************************
@@ -623,7 +617,6 @@ int ccc_lock_fits_into(const struct lu_env *env,
const struct ccc_io *cio = ccc_env_io(env);
int result;
- ENTRY;
/*
* Work around DLM peculiarity: it assumes that glimpse
* (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
@@ -642,7 +635,7 @@ int ccc_lock_fits_into(const struct lu_env *env,
result = lock->cll_state >= CLS_ENQUEUED;
else
result = 1;
- RETURN(result);
+ return result;
}
/**
@@ -655,7 +648,6 @@ void ccc_lock_state(const struct lu_env *env,
enum cl_lock_state state)
{
struct cl_lock *lock = slice->cls_lock;
- ENTRY;
/*
* Refresh inode attributes when the lock is moving into CLS_HELD
@@ -682,7 +674,6 @@ void ccc_lock_state(const struct lu_env *env,
lock->cll_descr.cld_end == CL_PAGE_EOF)
cl_merge_lvb(env, inode);
}
- EXIT;
}
/*****************************************************************************
@@ -707,7 +698,6 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
struct cl_object *obj = io->ci_obj;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
@@ -725,7 +715,7 @@ int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
descr->cld_enq_flags = enqflags;
cl_io_lock_add(env, io, &cio->cui_link);
- RETURN(0);
+ return 0;
}
void ccc_io_update_iov(const struct lu_env *env,
@@ -986,11 +976,9 @@ int cl_setattr_ost(struct inode *inode, const struct iattr *attr,
int result;
int refcheck;
- ENTRY;
-
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
io = ccc_env_thread_io(env);
io->ci_obj = cl_i2info(inode)->lli_clob;
@@ -1019,7 +1007,7 @@ again:
if (unlikely(io->ci_need_restart))
goto again;
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
/*****************************************************************************
@@ -1166,7 +1154,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
* locked by I_NEW bit.
*/
lli->lli_clob = clob;
- lli->lli_has_smd = md->lsm != NULL;
+ lli->lli_has_smd = lsm_has_objects(md->lsm);
lu_object_ref_add(&clob->co_lu, "inode", inode);
} else
result = PTR_ERR(clob);
@@ -1284,9 +1272,9 @@ __u16 ll_dirent_type_get(struct lu_dirent *ent)
__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
{
if (BITS_PER_LONG == 32 || api32)
- RETURN(fid_flatten32(fid));
+ return fid_flatten32(fid);
else
- RETURN(fid_flatten(fid));
+ return fid_flatten(fid);
}
/**
@@ -1295,15 +1283,14 @@ __u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
__u32 cl_fid_build_gen(const struct lu_fid *fid)
{
__u32 gen;
- ENTRY;
if (fid_is_igif(fid)) {
gen = lu_igif_gen(fid);
- RETURN(gen);
+ return gen;
}
gen = (fid_flatten(fid) >> 32);
- RETURN(gen);
+ return gen;
}
/* lsm is unreliable after hsm implementation as layout can be changed at
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
index 8ecbef92753..2b4dbeebcd5 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
@@ -57,12 +57,11 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
int rc, easize, def_easize, cookiesize;
struct lov_desc desc;
__u16 stripes;
- ENTRY;
rc = obd_get_info(NULL, dt_exp, sizeof(KEY_LOVDESC), KEY_LOVDESC,
&valsize, &desc, NULL);
if (rc)
- RETURN(rc);
+ return rc;
stripes = min(desc.ld_tgt_count, (__u32)LOV_MAX_STRIPE_COUNT);
lsm.lsm_stripe_count = stripes;
@@ -77,7 +76,7 @@ int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp)
easize, cookiesize);
rc = md_init_ea_size(md_exp, easize, def_easize, cookiesize);
- RETURN(rc);
+ return rc;
}
/**
@@ -95,7 +94,6 @@ int cl_ocd_update(struct obd_device *host,
__u64 flags;
int result;
- ENTRY;
if (!strcmp(watched->obd_type->typ_name, LUSTRE_OSC_NAME)) {
cli = &watched->u.cli;
lco = owner;
@@ -116,7 +114,7 @@ int cl_ocd_update(struct obd_device *host,
watched->obd_name);
result = -EINVAL;
}
- RETURN(result);
+ return result;
}
#define GROUPLOCK_SCOPE "grouplock"
diff --git a/drivers/staging/lustre/lustre/ldlm/interval_tree.c b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
index ce90c7e3c48..c65b13c800f 100644
--- a/drivers/staging/lustre/lustre/ldlm/interval_tree.c
+++ b/drivers/staging/lustre/lustre/ldlm/interval_tree.c
@@ -133,53 +133,45 @@ for (node = interval_last(root); node != NULL; \
static struct interval_node *interval_first(struct interval_node *node)
{
- ENTRY;
-
if (!node)
- RETURN(NULL);
+ return NULL;
while (node->in_left)
node = node->in_left;
- RETURN(node);
+ return node;
}
static struct interval_node *interval_last(struct interval_node *node)
{
- ENTRY;
-
if (!node)
- RETURN(NULL);
+ return NULL;
while (node->in_right)
node = node->in_right;
- RETURN(node);
+ return node;
}
static struct interval_node *interval_next(struct interval_node *node)
{
- ENTRY;
-
if (!node)
- RETURN(NULL);
+ return NULL;
if (node->in_right)
- RETURN(interval_first(node->in_right));
+ return interval_first(node->in_right);
while (node->in_parent && node_is_right_child(node))
node = node->in_parent;
- RETURN(node->in_parent);
+ return node->in_parent;
}
static struct interval_node *interval_prev(struct interval_node *node)
{
- ENTRY;
-
if (!node)
- RETURN(NULL);
+ return NULL;
if (node->in_left)
- RETURN(interval_last(node->in_left));
+ return interval_last(node->in_left);
while (node->in_parent && node_is_left_child(node))
node = node->in_parent;
- RETURN(node->in_parent);
+ return node->in_parent;
}
enum interval_iter interval_iterate(struct interval_node *root,
@@ -188,7 +180,6 @@ enum interval_iter interval_iterate(struct interval_node *root,
{
struct interval_node *node;
enum interval_iter rc = INTERVAL_ITER_CONT;
- ENTRY;
interval_for_each(node, root) {
rc = func(node, data);
@@ -196,7 +187,7 @@ enum interval_iter interval_iterate(struct interval_node *root,
break;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(interval_iterate);
@@ -206,7 +197,6 @@ enum interval_iter interval_iterate_reverse(struct interval_node *root,
{
struct interval_node *node;
enum interval_iter rc = INTERVAL_ITER_CONT;
- ENTRY;
interval_for_each_reverse(node, root) {
rc = func(node, data);
@@ -214,7 +204,7 @@ enum interval_iter interval_iterate_reverse(struct interval_node *root,
break;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(interval_iterate_reverse);
@@ -225,7 +215,6 @@ struct interval_node *interval_find(struct interval_node *root,
{
struct interval_node *walk = root;
int rc;
- ENTRY;
while (walk) {
rc = extent_compare(ex, &walk->in_extent);
@@ -237,7 +226,7 @@ struct interval_node *interval_find(struct interval_node *root,
walk = walk->in_right;
}
- RETURN(walk);
+ return walk;
}
EXPORT_SYMBOL(interval_find);
@@ -326,7 +315,6 @@ static void interval_insert_color(struct interval_node *node,
struct interval_node **root)
{
struct interval_node *parent, *gparent;
- ENTRY;
while ((parent = node->in_parent) && node_is_red(parent)) {
gparent = parent->in_parent;
@@ -373,7 +361,6 @@ static void interval_insert_color(struct interval_node *node,
}
(*root)->in_color = INTERVAL_BLACK;
- EXIT;
}
struct interval_node *interval_insert(struct interval_node *node,
@@ -381,14 +368,13 @@ struct interval_node *interval_insert(struct interval_node *node,
{
struct interval_node **p, *parent = NULL;
- ENTRY;
LASSERT(!interval_is_intree(node));
p = root;
while (*p) {
parent = *p;
if (node_equal(parent, node))
- RETURN(parent);
+ return parent;
/* max_high field must be updated after each iteration */
if (parent->in_max_high < interval_high(node))
@@ -409,7 +395,7 @@ struct interval_node *interval_insert(struct interval_node *node,
interval_insert_color(node, root);
node->in_intree = 1;
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(interval_insert);
@@ -423,7 +409,6 @@ static void interval_erase_color(struct interval_node *node,
struct interval_node **root)
{
struct interval_node *tmp;
- ENTRY;
while (node_is_black_or_0(node) && node != *root) {
if (parent->in_left == node) {
@@ -490,7 +475,6 @@ static void interval_erase_color(struct interval_node *node,
}
if (node)
node->in_color = INTERVAL_BLACK;
- EXIT;
}
/*
@@ -501,7 +485,6 @@ static void update_maxhigh(struct interval_node *node,
__u64 old_maxhigh)
{
__u64 left_max, right_max;
- ENTRY;
while (node) {
left_max = node->in_left ? node->in_left->in_max_high : 0;
@@ -513,7 +496,6 @@ static void update_maxhigh(struct interval_node *node,
break;
node = node->in_parent;
}
- EXIT;
}
void interval_erase(struct interval_node *node,
@@ -521,7 +503,6 @@ void interval_erase(struct interval_node *node,
{
struct interval_node *child, *parent;
int color;
- ENTRY;
LASSERT(interval_is_intree(node));
node->in_intree = 0;
@@ -586,7 +567,6 @@ void interval_erase(struct interval_node *node,
color:
if (color == INTERVAL_BLACK)
interval_erase_color(child, parent, root);
- EXIT;
}
EXPORT_SYMBOL(interval_erase);
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index 853409aa945..32f4d52b536 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -51,12 +51,12 @@
struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- if (!lock->l_ns_srv)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_lock(&lock->l_lock);
lock_res(lock->l_resource);
- lock->l_res_locked = 1;
+ lock->l_flags |= LDLM_FL_RES_LOCKED;
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
@@ -67,10 +67,10 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_res_locked = 0;
+ lock->l_flags &= ~LDLM_FL_RES_LOCKED;
unlock_res(lock->l_resource);
- if (!lock->l_ns_srv)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) == 0)
spin_unlock(&lock->l_lock);
}
EXPORT_SYMBOL(unlock_res_and_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index f7432f78e39..7e316637369 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -72,7 +72,6 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
struct list_head *tmp;
struct ldlm_lock *lck;
__u64 kms = 0;
- ENTRY;
/* don't let another thread in ldlm_extent_shift_kms race in
* just after we finish and take our lock into account in its
@@ -86,7 +85,7 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
- RETURN(old_kms);
+ return old_kms;
/* This extent _has_ to be smaller than old_kms (checked above)
* so kms can only ever be smaller or the same as old_kms. */
@@ -95,7 +94,7 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
}
LASSERTF(kms <= old_kms, "kms "LPU64" old_kms "LPU64"\n", kms, old_kms);
- RETURN(kms);
+ return kms;
}
EXPORT_SYMBOL(ldlm_extent_shift_kms);
@@ -103,16 +102,15 @@ struct kmem_cache *ldlm_interval_slab;
struct ldlm_interval *ldlm_interval_alloc(struct ldlm_lock *lock)
{
struct ldlm_interval *node;
- ENTRY;
LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
if (node == NULL)
- RETURN(NULL);
+ return NULL;
INIT_LIST_HEAD(&node->li_group);
ldlm_interval_attach(node, lock);
- RETURN(node);
+ return node;
}
void ldlm_interval_free(struct ldlm_interval *node)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index f100a84bde7..c68ed276633 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -142,8 +142,6 @@ static inline void ldlm_flock_blocking_unlink(struct ldlm_lock *req)
static inline void
ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
{
- ENTRY;
-
LDLM_DEBUG(lock, "ldlm_flock_destroy(mode: %d, flags: 0x%llx)",
mode, flags);
@@ -162,7 +160,6 @@ ldlm_flock_destroy(struct ldlm_lock *lock, ldlm_mode_t mode, __u64 flags)
}
ldlm_lock_destroy_nolock(lock);
- EXIT;
}
/**
@@ -198,6 +195,7 @@ ldlm_flock_deadlock(struct ldlm_lock *req, struct ldlm_lock *bl_lock)
if (lock == NULL)
break;
+ LASSERT(req != lock);
flock = &lock->l_policy_data.l_flock;
LASSERT(flock->owner == bl_owner);
bl_owner = flock->blocking_owner;
@@ -253,7 +251,6 @@ ldlm_process_flock_lock(struct ldlm_lock *req, __u64 *flags, int first_enq,
int splitted = 0;
const struct ldlm_callback_suite null_cbs = { NULL };
int rc;
- ENTRY;
CDEBUG(D_DLMTRACE, "flags %#llx owner "LPU64" pid %u mode %u start "
LPU64" end "LPU64"\n", *flags,
@@ -308,12 +305,12 @@ reprocess:
continue;
if (!first_enq)
- RETURN(LDLM_ITER_CONTINUE);
+ return LDLM_ITER_CONTINUE;
if (*flags & LDLM_FL_BLOCK_NOWAIT) {
ldlm_flock_destroy(req, mode, *flags);
*err = -EAGAIN;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
}
if (*flags & LDLM_FL_TEST_LOCK) {
@@ -326,24 +323,27 @@ reprocess:
req->l_policy_data.l_flock.end =
lock->l_policy_data.l_flock.end;
*flags |= LDLM_FL_LOCK_CHANGED;
- RETURN(LDLM_ITER_STOP);
- }
-
- if (ldlm_flock_deadlock(req, lock)) {
- ldlm_flock_destroy(req, mode, *flags);
- *err = -EDEADLK;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
}
+ /* add lock to blocking list before deadlock
+ * check to prevent race */
rc = ldlm_flock_blocking_link(req, lock);
if (rc) {
ldlm_flock_destroy(req, mode, *flags);
*err = rc;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
+ }
+ if (ldlm_flock_deadlock(req, lock)) {
+ ldlm_flock_blocking_unlink(req);
+ ldlm_flock_destroy(req, mode, *flags);
+ *err = -EDEADLK;
+ return LDLM_ITER_STOP;
}
+
ldlm_resource_add_lock(res, &res->lr_waiting, req);
*flags |= LDLM_FL_BLOCK_GRANTED;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
}
}
@@ -351,7 +351,7 @@ reprocess:
ldlm_flock_destroy(req, mode, *flags);
req->l_req_mode = LCK_NL;
*flags |= LDLM_FL_LOCK_CHANGED;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
}
/* In case we had slept on this lock request take it off of the
@@ -463,7 +463,7 @@ reprocess:
ldlm_flock_destroy(req, lock->l_granted_mode,
*flags);
*err = -ENOLCK;
- RETURN(LDLM_ITER_STOP);
+ return LDLM_ITER_STOP;
}
goto reprocess;
}
@@ -530,7 +530,7 @@ reprocess:
ldlm_flock_destroy(req, mode, *flags);
ldlm_resource_dump(D_INFO, res);
- RETURN(LDLM_ITER_CONTINUE);
+ return LDLM_ITER_CONTINUE;
}
struct ldlm_flock_wait_data {
@@ -542,7 +542,6 @@ static void
ldlm_flock_interrupted_wait(void *data)
{
struct ldlm_lock *lock;
- ENTRY;
lock = ((struct ldlm_flock_wait_data *)data)->fwd_lock;
@@ -553,8 +552,6 @@ ldlm_flock_interrupted_wait(void *data)
/* client side - set flag to prevent lock from being put on LRU list */
lock->l_flags |= LDLM_FL_CBPENDING;
unlock_res_and_lock(lock);
-
- EXIT;
}
/**
@@ -577,7 +574,6 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
struct l_wait_info lwi;
ldlm_error_t err;
int rc = 0;
- ENTRY;
CDEBUG(D_DLMTRACE, "flags: 0x%llx data: %p getlk: %p\n",
flags, data, getlk);
@@ -595,7 +591,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
/* Need to wake up the waiter if we were evicted */
wake_up(&lock->l_waitq);
- RETURN(0);
+ return 0;
}
LASSERT(flags != LDLM_FL_WAIT_NOREPROC);
@@ -607,7 +603,7 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
goto granted;
/* CP AST RPC: lock get granted, wake it up */
wake_up(&lock->l_waitq);
- RETURN(0);
+ return 0;
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
@@ -633,26 +629,26 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
- RETURN(rc);
+ return rc;
}
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- RETURN(0);
+ return 0;
}
if (lock->l_flags & LDLM_FL_FAILED) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
- RETURN(-EIO);
+ return -EIO;
}
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
- RETURN(rc);
+ return rc;
}
LDLM_DEBUG(lock, "client-side enqueue granted");
@@ -694,15 +690,13 @@ granted:
ldlm_process_flock_lock(lock, &noreproc, 1, &err, NULL);
}
unlock_res_and_lock(lock);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_flock_completion_ast);
int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- ENTRY;
-
LASSERT(lock);
LASSERT(flag == LDLM_CB_CANCELING);
@@ -710,7 +704,7 @@ int ldlm_flock_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
lock_res_and_lock(lock);
ldlm_flock_blocking_unlink(lock);
unlock_res_and_lock(lock);
- RETURN(0);
+ return 0;
}
void ldlm_flock_policy_wire18_to_local(const ldlm_wire_policy_data_t *wpolicy,
@@ -831,19 +825,17 @@ int ldlm_init_flock_export(struct obd_export *exp)
&ldlm_export_flock_ops,
CFS_HASH_DEFAULT | CFS_HASH_NBLK_CHANGE);
if (!exp->exp_flock_hash)
- RETURN(-ENOMEM);
+ return -ENOMEM;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_init_flock_export);
void ldlm_destroy_flock_export(struct obd_export *exp)
{
- ENTRY;
if (exp->exp_flock_hash) {
cfs_hash_putref(exp->exp_flock_hash);
exp->exp_flock_hash = NULL;
}
- EXIT;
}
EXPORT_SYMBOL(ldlm_destroy_flock_export);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index 141a957462f..8cd79633dea 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -36,23 +36,46 @@
#define MAX_STRING_SIZE 128
-extern atomic_t ldlm_srv_namespace_nr;
-extern atomic_t ldlm_cli_namespace_nr;
+extern int ldlm_srv_namespace_nr;
+extern int ldlm_cli_namespace_nr;
extern struct mutex ldlm_srv_namespace_lock;
extern struct list_head ldlm_srv_namespace_list;
extern struct mutex ldlm_cli_namespace_lock;
-extern struct list_head ldlm_cli_namespace_list;
+extern struct list_head ldlm_cli_active_namespace_list;
+extern struct list_head ldlm_cli_inactive_namespace_list;
-static inline atomic_t *ldlm_namespace_nr(ldlm_side_t client)
+static inline int ldlm_namespace_nr_read(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_nr : &ldlm_cli_namespace_nr;
+ ldlm_srv_namespace_nr : ldlm_cli_namespace_nr;
+}
+
+static inline void ldlm_namespace_nr_inc(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr++;
+ else
+ ldlm_cli_namespace_nr++;
+}
+
+static inline void ldlm_namespace_nr_dec(ldlm_side_t client)
+{
+ if (client == LDLM_NAMESPACE_SERVER)
+ ldlm_srv_namespace_nr--;
+ else
+ ldlm_cli_namespace_nr--;
}
static inline struct list_head *ldlm_namespace_list(ldlm_side_t client)
{
return client == LDLM_NAMESPACE_SERVER ?
- &ldlm_srv_namespace_list : &ldlm_cli_namespace_list;
+ &ldlm_srv_namespace_list : &ldlm_cli_active_namespace_list;
+}
+
+static inline struct list_head *ldlm_namespace_inactive_list(ldlm_side_t client)
+{
+ return client == LDLM_NAMESPACE_SERVER ?
+ &ldlm_srv_namespace_list : &ldlm_cli_inactive_namespace_list;
}
static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
@@ -61,6 +84,16 @@ static inline struct mutex *ldlm_namespace_lock(ldlm_side_t client)
&ldlm_srv_namespace_lock : &ldlm_cli_namespace_lock;
}
+/* ns_bref is the number of resources in this namespace */
+static inline int ldlm_ns_empty(struct ldlm_namespace *ns)
+{
+ return atomic_read(&ns->ns_bref) == 0;
+}
+
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *, ldlm_side_t);
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *, ldlm_side_t);
+struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t);
+
/* ldlm_request.c */
/* Cancel lru flag, it indicates we cancel aged locks. */
enum {
@@ -159,8 +192,8 @@ void ldlm_destroy_flock_export(struct obd_export *exp);
void l_check_ns_lock(struct ldlm_namespace *ns);
void l_check_no_ns_lock(struct ldlm_namespace *ns);
-extern proc_dir_entry_t *ldlm_svc_proc_dir;
-extern proc_dir_entry_t *ldlm_type_proc_dir;
+extern struct proc_dir_entry *ldlm_svc_proc_dir;
+extern struct proc_dir_entry *ldlm_type_proc_dir;
struct ldlm_state {
struct ptlrpc_service *ldlm_cb_service;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 42df53072dc..1a8c0d7005c 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -60,17 +60,16 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
struct ptlrpc_connection *ptlrpc_conn;
struct obd_import_conn *imp_conn = NULL, *item;
int rc = 0;
- ENTRY;
if (!create && !priority) {
CDEBUG(D_HA, "Nothing to do\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
ptlrpc_conn = ptlrpc_uuid_to_connection(uuid);
if (!ptlrpc_conn) {
CDEBUG(D_HA, "can't find connection %s\n", uuid->uuid);
- RETURN (-ENOENT);
+ return -ENOENT;
}
if (create) {
@@ -115,13 +114,13 @@ static int import_set_conn(struct obd_import *imp, struct obd_uuid *uuid,
}
spin_unlock(&imp->imp_lock);
- RETURN(0);
+ return 0;
out_free:
if (imp_conn)
OBD_FREE(imp_conn, sizeof(*imp_conn));
out_put:
ptlrpc_connection_put(ptlrpc_conn);
- RETURN(rc);
+ return rc;
}
int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid)
@@ -141,7 +140,6 @@ int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid)
struct obd_import_conn *imp_conn;
struct obd_export *dlmexp;
int rc = -ENOENT;
- ENTRY;
spin_lock(&imp->imp_lock);
if (list_empty(&imp->imp_conn_list)) {
@@ -187,7 +185,7 @@ out:
spin_unlock(&imp->imp_lock);
if (rc == -ENOENT)
CERROR("connection %s not found\n", uuid->uuid);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(client_import_del_conn);
@@ -200,7 +198,6 @@ int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
{
struct obd_import_conn *conn;
int rc = -ENOENT;
- ENTRY;
spin_lock(&imp->imp_lock);
list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
@@ -212,7 +209,7 @@ int client_import_find_conn(struct obd_import *imp, lnet_nid_t peer,
}
}
spin_unlock(&imp->imp_lock);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(client_import_find_conn);
@@ -267,7 +264,6 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
int rc;
char *cli_name = lustre_cfg_buf(lcfg, 0);
- ENTRY;
/* In a more perfect world, we would hang a ptlrpc_client off of
* obd_type and just use the values from there. */
@@ -305,27 +301,27 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
} else {
CERROR("unknown client OBD type \"%s\", can't setup\n",
name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("requires a TARGET UUID\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 1) > 37) {
CERROR("client UUID must be less than 38 characters\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 2) < 1) {
CERROR("setup requires a SERVER UUID\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (LUSTRE_CFG_BUFLEN(lcfg, 2) > 37) {
CERROR("target UUID must be less than 38 characters\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
init_rwsem(&cli->cl_sem);
@@ -339,8 +335,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_avail_grant = 0;
/* FIXME: Should limit this for the sum of all cl_dirty_max. */
cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
- if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8)
- cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3);
+ if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
+ cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
INIT_LIST_HEAD(&cli->cl_cache_waiters);
INIT_LIST_HEAD(&cli->cl_loi_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -388,11 +384,11 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
cli->cl_max_rpcs_in_flight = 2;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
cli->cl_max_rpcs_in_flight = 3;
- } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+ } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
cli->cl_max_rpcs_in_flight = 4;
} else {
if (osc_on_mdt(obddev->obd_name))
@@ -452,29 +448,27 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_qchk_stat = CL_NOT_QUOTACHECKED;
- RETURN(rc);
+ return rc;
err_import:
class_destroy_import(imp);
err_ldlm:
ldlm_put_ref();
err:
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(client_obd_setup);
int client_obd_cleanup(struct obd_device *obddev)
{
- ENTRY;
-
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
LASSERT(obddev->u.cli.cl_import == NULL);
ldlm_put_ref();
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(client_obd_cleanup);
@@ -489,7 +483,6 @@ int client_connect_import(const struct lu_env *env,
struct obd_connect_data *ocd;
struct lustre_handle conn = { 0 };
int rc;
- ENTRY;
*exp = NULL;
down_write(&cli->cl_sem);
@@ -532,8 +525,6 @@ int client_connect_import(const struct lu_env *env,
ptlrpc_pinger_add_import(imp);
- EXIT;
-
if (rc) {
out_ldlm:
cli->cl_conn_count--;
@@ -553,12 +544,11 @@ int client_disconnect_export(struct obd_export *exp)
struct client_obd *cli;
struct obd_import *imp;
int rc = 0, err;
- ENTRY;
if (!obd) {
CERROR("invalid export for disconnect: exp %p cookie "LPX64"\n",
exp, exp ? exp->exp_handle.h_cookie : -1);
- RETURN(-EINVAL);
+ return -EINVAL;
}
cli = &obd->u.cli;
@@ -605,8 +595,6 @@ int client_disconnect_export(struct obd_export *exp)
ptlrpc_invalidate_import(imp);
- EXIT;
-
out_disconnect:
/* Use server style - class_disconnect should be always called for
* o_disconnect. */
@@ -616,7 +604,7 @@ out_disconnect:
up_write(&cli->cl_sem);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(client_disconnect_export);
@@ -627,7 +615,6 @@ EXPORT_SYMBOL(client_disconnect_export);
int target_pack_pool_reply(struct ptlrpc_request *req)
{
struct obd_device *obd;
- ENTRY;
/* Check that we still have all structures alive as this may
* be some late RPC at shutdown time. */
@@ -635,7 +622,7 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
!exp_connect_lru_resize(req->rq_export))) {
lustre_msg_set_slv(req->rq_repmsg, 0);
lustre_msg_set_limit(req->rq_repmsg, 0);
- RETURN(0);
+ return 0;
}
/* OBD is alive here as export is alive, which we checked above. */
@@ -646,7 +633,7 @@ int target_pack_pool_reply(struct ptlrpc_request *req)
lustre_msg_set_limit(req->rq_repmsg, obd->obd_pool_limit);
read_unlock(&obd->obd_pool_lock);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(target_pack_pool_reply);
@@ -674,10 +661,8 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
int netrc;
struct ptlrpc_reply_state *rs;
struct obd_export *exp;
- ENTRY;
if (req->rq_no_reply) {
- EXIT;
return;
}
@@ -686,7 +671,6 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
if (rs == NULL || !rs->rs_difficult) {
/* no notifiers */
target_send_reply_msg (req, rc, fail_id);
- EXIT;
return;
}
@@ -757,19 +741,18 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id)
}
spin_unlock(&rs->rs_lock);
spin_unlock(&svcpt->scp_rep_lock);
- EXIT;
}
EXPORT_SYMBOL(target_send_reply);
ldlm_mode_t lck_compat_array[] = {
- [LCK_EX] LCK_COMPAT_EX,
- [LCK_PW] LCK_COMPAT_PW,
- [LCK_PR] LCK_COMPAT_PR,
- [LCK_CW] LCK_COMPAT_CW,
- [LCK_CR] LCK_COMPAT_CR,
- [LCK_NL] LCK_COMPAT_NL,
- [LCK_GROUP] LCK_COMPAT_GROUP,
- [LCK_COS] LCK_COMPAT_COS,
+ [LCK_EX] = LCK_COMPAT_EX,
+ [LCK_PW] = LCK_COMPAT_PW,
+ [LCK_PR] = LCK_COMPAT_PR,
+ [LCK_CW] = LCK_COMPAT_CW,
+ [LCK_CR] = LCK_COMPAT_CR,
+ [LCK_NL] = LCK_COMPAT_NL,
+ [LCK_GROUP] = LCK_COMPAT_GROUP,
+ [LCK_COS] = LCK_COMPAT_COS,
};
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index 33b76a1e5de..6133b3f3471 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -49,45 +49,45 @@
/* lock types */
char *ldlm_lockname[] = {
- [0] "--",
- [LCK_EX] "EX",
- [LCK_PW] "PW",
- [LCK_PR] "PR",
- [LCK_CW] "CW",
- [LCK_CR] "CR",
- [LCK_NL] "NL",
- [LCK_GROUP] "GROUP",
- [LCK_COS] "COS"
+ [0] = "--",
+ [LCK_EX] = "EX",
+ [LCK_PW] = "PW",
+ [LCK_PR] = "PR",
+ [LCK_CW] = "CW",
+ [LCK_CR] = "CR",
+ [LCK_NL] = "NL",
+ [LCK_GROUP] = "GROUP",
+ [LCK_COS] = "COS",
};
EXPORT_SYMBOL(ldlm_lockname);
char *ldlm_typename[] = {
- [LDLM_PLAIN] "PLN",
- [LDLM_EXTENT] "EXT",
- [LDLM_FLOCK] "FLK",
- [LDLM_IBITS] "IBT",
+ [LDLM_PLAIN] = "PLN",
+ [LDLM_EXTENT] = "EXT",
+ [LDLM_FLOCK] = "FLK",
+ [LDLM_IBITS] = "IBT",
};
EXPORT_SYMBOL(ldlm_typename);
static ldlm_policy_wire_to_local_t ldlm_policy_wire18_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire18_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local,
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire18_to_local,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
};
static ldlm_policy_wire_to_local_t ldlm_policy_wire21_to_local[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_wire_to_local,
- [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_wire_to_local,
- [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_wire21_to_local,
- [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_wire_to_local,
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_wire_to_local,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_wire_to_local,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_wire21_to_local,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_wire_to_local,
};
static ldlm_policy_local_to_wire_t ldlm_policy_local_to_wire[] = {
- [LDLM_PLAIN - LDLM_MIN_TYPE] ldlm_plain_policy_local_to_wire,
- [LDLM_EXTENT - LDLM_MIN_TYPE] ldlm_extent_policy_local_to_wire,
- [LDLM_FLOCK - LDLM_MIN_TYPE] ldlm_flock_policy_local_to_wire,
- [LDLM_IBITS - LDLM_MIN_TYPE] ldlm_ibits_policy_local_to_wire,
+ [LDLM_PLAIN - LDLM_MIN_TYPE] = ldlm_plain_policy_local_to_wire,
+ [LDLM_EXTENT - LDLM_MIN_TYPE] = ldlm_extent_policy_local_to_wire,
+ [LDLM_FLOCK - LDLM_MIN_TYPE] = ldlm_flock_policy_local_to_wire,
+ [LDLM_IBITS - LDLM_MIN_TYPE] = ldlm_ibits_policy_local_to_wire,
};
/**
@@ -188,8 +188,6 @@ EXPORT_SYMBOL(ldlm_lock_get);
*/
void ldlm_lock_put(struct ldlm_lock *lock)
{
- ENTRY;
-
LASSERT(lock->l_resource != LP_POISON);
LASSERT(atomic_read(&lock->l_refc) > 0);
if (atomic_dec_and_test(&lock->l_refc)) {
@@ -199,7 +197,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_destroyed);
+ LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain));
@@ -220,8 +218,6 @@ void ldlm_lock_put(struct ldlm_lock *lock)
lu_ref_fini(&lock->l_reference);
OBD_FREE_RCU(lock, sizeof(*lock), &lock->l_handle);
}
-
- EXIT;
}
EXPORT_SYMBOL(ldlm_lock_put);
@@ -253,16 +249,14 @@ int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
int rc;
- ENTRY;
- if (lock->l_ns_srv) {
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru));
- RETURN(0);
+ return 0;
}
spin_lock(&ns->ns_lock);
rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
- EXIT;
return rc;
}
@@ -289,11 +283,9 @@ void ldlm_lock_add_to_lru(struct ldlm_lock *lock)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
spin_lock(&ns->ns_lock);
ldlm_lock_add_to_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
- EXIT;
}
/**
@@ -304,10 +296,8 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- ENTRY;
- if (lock->l_ns_srv) {
+ if (lock->l_flags & LDLM_FL_NS_SRV) {
LASSERT(list_empty(&lock->l_lru));
- EXIT;
return;
}
@@ -317,7 +307,6 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
ldlm_lock_add_to_lru_nolock(lock);
}
spin_unlock(&ns->ns_lock);
- EXIT;
}
/**
@@ -341,8 +330,6 @@ void ldlm_lock_touch_in_lru(struct ldlm_lock *lock)
*/
int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
{
- ENTRY;
-
if (lock->l_readers || lock->l_writers) {
LDLM_ERROR(lock, "lock still has references");
LBUG();
@@ -353,12 +340,11 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
LASSERT(list_empty(&lock->l_lru));
- EXIT;
return 0;
}
- lock->l_destroyed = 1;
+ lock->l_flags |= LDLM_FL_DESTROYED;
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -383,7 +369,6 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
if (lock->l_export && lock->l_completion_ast)
lock->l_completion_ast(lock, 0);
#endif
- EXIT;
return 1;
}
@@ -393,7 +378,7 @@ int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
void ldlm_lock_destroy(struct ldlm_lock *lock)
{
int first;
- ENTRY;
+
lock_res_and_lock(lock);
first = ldlm_lock_destroy_internal(lock);
unlock_res_and_lock(lock);
@@ -403,7 +388,6 @@ void ldlm_lock_destroy(struct ldlm_lock *lock)
lu_ref_del(&lock->l_reference, "hash", lock);
LDLM_LOCK_RELEASE(lock);
}
- EXIT;
}
/**
@@ -412,14 +396,13 @@ void ldlm_lock_destroy(struct ldlm_lock *lock)
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock)
{
int first;
- ENTRY;
+
first = ldlm_lock_destroy_internal(lock);
/* drop reference from hashtable only for first destroy */
if (first) {
lu_ref_del(&lock->l_reference, "hash", lock);
LDLM_LOCK_RELEASE(lock);
}
- EXIT;
}
/* this is called by portals_handle2object with the handle lock taken */
@@ -450,14 +433,13 @@ struct portals_handle_ops lock_handle_ops = {
static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
{
struct ldlm_lock *lock;
- ENTRY;
if (resource == NULL)
LBUG();
OBD_SLAB_ALLOC_PTR_GFP(lock, ldlm_lock_slab, __GFP_IO);
if (lock == NULL)
- RETURN(NULL);
+ return NULL;
spin_lock_init(&lock->l_lock);
lock->l_resource = resource;
@@ -493,7 +475,7 @@ static struct ldlm_lock *ldlm_lock_new(struct ldlm_resource *resource)
#endif
INIT_LIST_HEAD(&lock->l_exp_list);
- RETURN(lock);
+ return lock;
}
/**
@@ -507,7 +489,6 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
struct ldlm_resource *oldres = lock->l_resource;
struct ldlm_resource *newres;
int type;
- ENTRY;
LASSERT(ns_is_client(ns));
@@ -516,7 +497,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
sizeof(lock->l_resource->lr_name)) == 0) {
/* Nothing to do */
unlock_res_and_lock(lock);
- RETURN(0);
+ return 0;
}
LASSERT(new_resid->name[0] != 0);
@@ -529,7 +510,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
newres = ldlm_resource_get(ns, NULL, new_resid, type, 1);
if (newres == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lu_ref_add(&newres->lr_reference, "lock", lock);
/*
@@ -557,7 +538,7 @@ int ldlm_lock_change_resource(struct ldlm_namespace *ns, struct ldlm_lock *lock,
lu_ref_del(&oldres->lr_reference, "lock", lock);
ldlm_resource_putref(oldres);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_lock_change_resource);
@@ -586,19 +567,18 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
__u64 flags)
{
struct ldlm_lock *lock;
- ENTRY;
LASSERT(handle);
lock = class_handle2object(handle->cookie);
if (lock == NULL)
- RETURN(NULL);
+ return NULL;
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it */
- if (flags == 0 && !lock->l_destroyed) {
+ if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED)== 0)) {
lu_ref_add(&lock->l_reference, "handle", current);
- RETURN(lock);
+ return lock;
}
lock_res_and_lock(lock);
@@ -606,24 +586,24 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource != NULL);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_destroyed)) {
+ if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
- RETURN(NULL);
+ return NULL;
}
if (flags && (lock->l_flags & flags)) {
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
- RETURN(NULL);
+ return NULL;
}
if (flags)
lock->l_flags |= flags;
unlock_res_and_lock(lock);
- RETURN(lock);
+ return lock;
}
EXPORT_SYMBOL(__ldlm_handle2lock);
/** @} ldlm_handles */
@@ -695,7 +675,7 @@ void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
lock->l_flags |= LDLM_FL_AST_SENT;
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back. */
- if (new->l_flags & LDLM_AST_DISCARD_DATA)
+ if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
lock->l_flags |= LDLM_FL_DISCARD_DATA;
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
@@ -728,13 +708,11 @@ void ldlm_add_cp_work_item(struct ldlm_lock *lock, struct list_head *work_list)
void ldlm_add_ast_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- ENTRY;
check_res_locked(lock->l_resource);
if (new)
ldlm_add_bl_work_item(lock, new, work_list);
else
ldlm_add_cp_work_item(lock, work_list);
- EXIT;
}
/**
@@ -853,7 +831,6 @@ void ldlm_lock_decref_internal_nolock(struct ldlm_lock *lock, __u32 mode)
void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
{
struct ldlm_namespace *ns;
- ENTRY;
lock_res_and_lock(lock);
@@ -873,7 +850,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
(lock->l_flags & LDLM_FL_CBPENDING)) {
/* If we received a blocked AST and this was the last reference,
* run the callback. */
- if (lock->l_ns_srv && lock->l_export)
+ if ((lock->l_flags & LDLM_FL_NS_SRV) && lock->l_export)
CERROR("FL_CBPENDING set on non-local lock--just a "
"warning\n");
@@ -914,8 +891,6 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
LDLM_DEBUG(lock, "do not add lock into lru list");
unlock_res_and_lock(lock);
}
-
- EXIT;
}
/**
@@ -940,7 +915,6 @@ EXPORT_SYMBOL(ldlm_lock_decref);
void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
{
struct ldlm_lock *lock = __ldlm_handle2lock(lockh, 0);
- ENTRY;
LASSERT(lock != NULL);
@@ -979,7 +953,6 @@ static void search_granted_lock(struct list_head *queue,
{
struct list_head *tmp;
struct ldlm_lock *lock, *mode_end, *policy_end;
- ENTRY;
list_for_each(tmp, queue) {
lock = list_entry(tmp, struct ldlm_lock, l_res_link);
@@ -999,7 +972,6 @@ static void search_granted_lock(struct list_head *queue,
prev->res_link = &mode_end->l_res_link;
prev->mode_link = &mode_end->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
- EXIT;
return;
} else if (lock->l_resource->lr_type == LDLM_IBITS) {
for (;;) {
@@ -1018,7 +990,6 @@ static void search_granted_lock(struct list_head *queue,
&policy_end->l_sl_mode;
prev->policy_link =
&policy_end->l_sl_policy;
- EXIT;
return;
}
@@ -1037,7 +1008,6 @@ static void search_granted_lock(struct list_head *queue,
prev->res_link = &mode_end->l_res_link;
prev->mode_link = &mode_end->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
- EXIT;
return;
} else {
LDLM_ERROR(lock,"is not LDLM_PLAIN or LDLM_IBITS lock");
@@ -1050,7 +1020,6 @@ static void search_granted_lock(struct list_head *queue,
prev->res_link = queue->prev;
prev->mode_link = &req->l_sl_mode;
prev->policy_link = &req->l_sl_policy;
- EXIT;
return;
}
@@ -1062,14 +1031,13 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
struct sl_insert_point *prev)
{
struct ldlm_resource *res = lock->l_resource;
- ENTRY;
check_res_locked(res);
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1088,8 +1056,6 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
list_add(&lock->l_sl_mode, prev->mode_link);
if (&lock->l_sl_policy != prev->policy_link)
list_add(&lock->l_sl_policy, prev->policy_link);
-
- EXIT;
}
/**
@@ -1099,13 +1065,11 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
{
struct sl_insert_point prev;
- ENTRY;
LASSERT(lock->l_req_mode == lock->l_granted_mode);
search_granted_lock(&lock->l_resource->lr_granted, lock, &prev);
ldlm_granted_list_add_lock(lock, &prev);
- EXIT;
}
/**
@@ -1122,7 +1086,6 @@ static void ldlm_grant_lock_with_skiplist(struct ldlm_lock *lock)
void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
- ENTRY;
check_res_locked(res);
@@ -1141,7 +1104,6 @@ void ldlm_grant_lock(struct ldlm_lock *lock, struct list_head *work_list)
ldlm_add_ast_work_item(lock, NULL, work_list);
ldlm_pool_add(&ldlm_res_to_ns(res)->ns_pool, lock);
- EXIT;
}
/**
@@ -1203,9 +1165,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits))
continue;
- if (!unref &&
- (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_failed))
+ if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
continue;
if ((flags & LDLM_FL_LOCAL_ONLY) &&
@@ -1227,8 +1187,8 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
void ldlm_lock_fail_match_locked(struct ldlm_lock *lock)
{
- if (!lock->l_failed) {
- lock->l_failed = 1;
+ if ((lock->l_flags & LDLM_FL_FAIL_NOTIFIED) == 0) {
+ lock->l_flags |= LDLM_FL_FAIL_NOTIFIED;
wake_up_all(&lock->l_waitq);
}
}
@@ -1306,7 +1266,6 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
struct ldlm_resource *res;
struct ldlm_lock *lock, *old_lock = NULL;
int rc = 0;
- ENTRY;
if (ns == NULL) {
old_lock = ldlm_handle2lock(lockh);
@@ -1321,7 +1280,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
res = ldlm_resource_get(ns, NULL, res_id, type, 0);
if (res == NULL) {
LASSERT(old_lock == NULL);
- RETURN(0);
+ return 0;
}
LDLM_RESOURCE_ADDREF(res);
@@ -1342,7 +1301,6 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
if (lock != NULL)
GOTO(out, rc = 1);
- EXIT;
out:
unlock_res(res);
LDLM_RESOURCE_DELREF(res);
@@ -1352,6 +1310,8 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
ldlm_lock2handle(lock, lockh);
if ((flags & LDLM_FL_LVB_READY) &&
(!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ __u64 wait_flags = LDLM_FL_LVB_READY |
+ LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
if (lock->l_completion_ast) {
int err = lock->l_completion_ast(lock,
@@ -1373,8 +1333,7 @@ ldlm_mode_t ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
/* XXX FIXME see comment on CAN_MATCH in lustre_dlm.h */
l_wait_event(lock->l_waitq,
- lock->l_flags & LDLM_FL_LVB_READY ||
- lock->l_destroyed || lock->l_failed,
+ lock->l_flags & wait_flags,
&lwi);
if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
if (flags & LDLM_FL_TEST_LOCK)
@@ -1426,13 +1385,11 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
{
struct ldlm_lock *lock;
ldlm_mode_t mode = 0;
- ENTRY;
lock = ldlm_handle2lock(lockh);
if (lock != NULL) {
lock_res_and_lock(lock);
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED ||
- lock->l_failed)
+ if (lock->l_flags & LDLM_FL_GONE_MASK)
GOTO(out, mode);
if (lock->l_flags & LDLM_FL_CBPENDING &&
@@ -1445,8 +1402,6 @@ ldlm_mode_t ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
ldlm_lock_addref_internal_nolock(lock, mode);
}
- EXIT;
-
out:
if (lock != NULL) {
unlock_res_and_lock(lock);
@@ -1461,7 +1416,6 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
enum req_location loc, void *data, int size)
{
void *lvb;
- ENTRY;
LASSERT(data != NULL);
LASSERT(size >= 0);
@@ -1479,7 +1433,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_ost_lvb);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
- RETURN(-EPROTO);
+ return -EPROTO;
}
memcpy(data, lvb, size);
@@ -1496,7 +1450,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_ost_lvb_v1);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
- RETURN(-EPROTO);
+ return -EPROTO;
}
memcpy(data, lvb, size);
@@ -1506,7 +1460,7 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
} else {
LDLM_ERROR(lock, "Replied unexpected ost LVB size %d",
size);
- RETURN(-EINVAL);
+ return -EINVAL;
}
break;
case LVB_T_LQUOTA:
@@ -1521,14 +1475,14 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lustre_swab_lquota_lvb);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
- RETURN(-EPROTO);
+ return -EPROTO;
}
memcpy(data, lvb, size);
} else {
LDLM_ERROR(lock, "Replied unexpected lquota LVB size %d",
size);
- RETURN(-EINVAL);
+ return -EINVAL;
}
break;
case LVB_T_LAYOUT:
@@ -1541,18 +1495,18 @@ int ldlm_fill_lvb(struct ldlm_lock *lock, struct req_capsule *pill,
lvb = req_capsule_server_get(pill, &RMF_DLM_LVB);
if (unlikely(lvb == NULL)) {
LDLM_ERROR(lock, "no LVB");
- RETURN(-EPROTO);
+ return -EPROTO;
}
memcpy(data, lvb, size);
break;
default:
LDLM_ERROR(lock, "Unknown LVB type: %d\n", lock->l_lvb_type);
- libcfs_debug_dumpstack(NULL);
- RETURN(-EINVAL);
+ dump_stack();
+ return -EINVAL;
}
- RETURN(0);
+ return 0;
}
/**
@@ -1569,26 +1523,25 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
{
struct ldlm_lock *lock;
struct ldlm_resource *res;
- ENTRY;
res = ldlm_resource_get(ns, NULL, res_id, type, 1);
if (res == NULL)
- RETURN(NULL);
+ return NULL;
lock = ldlm_lock_new(res);
if (lock == NULL)
- RETURN(NULL);
+ return NULL;
lock->l_req_mode = mode;
lock->l_ast_data = data;
lock->l_pid = current_pid();
- lock->l_ns_srv = !!ns_is_server(ns);
+ if (ns_is_server(ns))
+ lock->l_flags |= LDLM_FL_NS_SRV;
if (cbs) {
lock->l_blocking_ast = cbs->lcs_blocking;
lock->l_completion_ast = cbs->lcs_completion;
lock->l_glimpse_ast = cbs->lcs_glimpse;
- lock->l_weigh_ast = cbs->lcs_weigh;
}
lock->l_tree_node = NULL;
@@ -1609,7 +1562,7 @@ struct ldlm_lock *ldlm_lock_create(struct ldlm_namespace *ns,
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_NEW_LOCK))
GOTO(out, 0);
- RETURN(lock);
+ return lock;
out:
ldlm_lock_destroy(lock);
@@ -1636,7 +1589,6 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
int local = ns_is_client(ldlm_res_to_ns(res));
ldlm_error_t rc = ELDLM_OK;
struct ldlm_interval *node = NULL;
- ENTRY;
lock->l_last_activity = cfs_time_current_sec();
/* policies are not executed on the client or during replay */
@@ -1654,11 +1606,11 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
LDLM_LOCK_RELEASE(lock);
}
*flags |= LDLM_FL_LOCK_CHANGED;
- RETURN(0);
+ return 0;
} else if (rc != ELDLM_OK ||
(rc == ELDLM_OK && (*flags & LDLM_FL_INTENT_ONLY))) {
ldlm_lock_destroy(lock);
- RETURN(rc);
+ return rc;
}
}
@@ -1693,7 +1645,7 @@ ldlm_error_t ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags. */
- lock->l_flags |= *flags & LDLM_AST_DISCARD_DATA;
+ lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
/* This distinction between local lock trees is very important; a client
* namespace only has information about locks taken by that client, and
@@ -1738,10 +1690,9 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
struct ldlm_lock_desc d;
int rc;
struct ldlm_lock *lock;
- ENTRY;
if (list_empty(arg->list))
- RETURN(-ENOENT);
+ return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_bl_ast);
@@ -1762,7 +1713,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock->l_blocking_lock = NULL;
LDLM_LOCK_RELEASE(lock);
- RETURN(rc);
+ return rc;
}
/**
@@ -1775,10 +1726,9 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
int rc = 0;
struct ldlm_lock *lock;
ldlm_completion_callback completion_callback;
- ENTRY;
if (list_empty(arg->list))
- RETURN(-ENOENT);
+ return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_cp_ast);
@@ -1807,7 +1757,7 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
rc = completion_callback(lock, 0, (void *)arg);
LDLM_LOCK_RELEASE(lock);
- RETURN(rc);
+ return rc;
}
/**
@@ -1820,10 +1770,9 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
struct ldlm_lock_desc desc;
int rc;
struct ldlm_lock *lock;
- ENTRY;
if (list_empty(arg->list))
- RETURN(-ENOENT);
+ return -ENOENT;
lock = list_entry(arg->list->next, struct ldlm_lock, l_rk_ast);
list_del_init(&lock->l_rk_ast);
@@ -1836,7 +1785,7 @@ ldlm_work_revoke_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
rc = lock->l_blocking_ast(lock, &desc, (void*)arg, LDLM_CB_BLOCKING);
LDLM_LOCK_RELEASE(lock);
- RETURN(rc);
+ return rc;
}
/**
@@ -1848,10 +1797,9 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
struct ldlm_glimpse_work *gl_work;
struct ldlm_lock *lock;
int rc = 0;
- ENTRY;
if (list_empty(arg->list))
- RETURN(-ENOENT);
+ return -ENOENT;
gl_work = list_entry(arg->list->next, struct ldlm_glimpse_work,
gl_list);
@@ -1871,7 +1819,7 @@ int ldlm_work_gl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
if ((gl_work->gl_flags & LDLM_GL_WORK_NOFREE) == 0)
OBD_FREE_PTR(gl_work);
- RETURN(rc);
+ return rc;
}
/**
@@ -1888,11 +1836,11 @@ int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
int rc;
if (list_empty(rpc_list))
- RETURN(0);
+ return 0;
OBD_ALLOC_PTR(arg);
if (arg == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
atomic_set(&arg->restart, 0);
arg->list = rpc_list;
@@ -1960,13 +1908,10 @@ static int ldlm_reprocess_res(cfs_hash_t *hs, cfs_hash_bd_t *bd,
*/
void ldlm_reprocess_all_ns(struct ldlm_namespace *ns)
{
- ENTRY;
-
if (ns != NULL) {
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_reprocess_res, NULL);
}
- EXIT;
}
EXPORT_SYMBOL(ldlm_reprocess_all_ns);
@@ -1982,13 +1927,11 @@ void ldlm_reprocess_all(struct ldlm_resource *res)
{
LIST_HEAD(rpc_list);
- ENTRY;
if (!ns_is_client(ldlm_res_to_ns(res))) {
CERROR("This is client-side-only module, cannot handle "
"LDLM_NAMESPACE_SERVER resource type lock.\n");
LBUG();
}
- EXIT;
}
/**
@@ -2032,7 +1975,6 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
{
struct ldlm_resource *res;
struct ldlm_namespace *ns;
- ENTRY;
lock_res_and_lock(lock);
@@ -2046,15 +1988,15 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_waited)
+ if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock);
/* Releases cancel callback. */
ldlm_cancel_callback(lock);
/* Yes, second time, just in case it was added again while we were
- running with no res lock in ldlm_cancel_callback */
- if (lock->l_waited)
+ * running with no res lock in ldlm_cancel_callback */
+ if (lock->l_flags & LDLM_FL_WAITED)
ldlm_del_waiting_lock(lock);
ldlm_resource_unlink_lock(lock);
@@ -2067,8 +2009,6 @@ void ldlm_lock_cancel(struct ldlm_lock *lock)
* if not to zero out lock->l_granted_mode */
lock->l_granted_mode = LCK_MINMODE;
unlock_res_and_lock(lock);
-
- EXIT;
}
EXPORT_SYMBOL(ldlm_lock_cancel);
@@ -2079,7 +2019,6 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
{
struct ldlm_lock *lock = ldlm_handle2lock(lockh);
int rc = -EINVAL;
- ENTRY;
if (lock) {
if (lock->l_ast_data == NULL)
@@ -2088,7 +2027,7 @@ int ldlm_lock_set_data(struct lustre_handle *lockh, void *data)
rc = 0;
LDLM_LOCK_PUT(lock);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_lock_set_data);
@@ -2160,8 +2099,6 @@ void ldlm_cancel_locks_for_export(struct obd_export *exp)
*/
void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
{
- ENTRY;
-
LASSERT(lock->l_granted_mode & (LCK_PW | LCK_EX));
LASSERT(new_mode == LCK_COS);
@@ -2177,8 +2114,6 @@ void ldlm_lock_downgrade(struct ldlm_lock *lock, int new_mode)
ldlm_grant_lock(lock, NULL);
unlock_res_and_lock(lock);
ldlm_reprocess_all(lock->l_resource);
-
- EXIT;
}
EXPORT_SYMBOL(ldlm_lock_downgrade);
@@ -2197,19 +2132,19 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
struct ldlm_namespace *ns;
int granted = 0;
struct ldlm_interval *node;
- ENTRY;
/* Just return if mode is unchanged. */
if (new_mode == lock->l_granted_mode) {
*flags |= LDLM_FL_BLOCK_GRANTED;
- RETURN(lock->l_resource);
+ return lock->l_resource;
}
/* I can't check the type of lock here because the bitlock of lock
* is not held here, so do the allocation blindly. -jay */
OBD_SLAB_ALLOC_PTR_GFP(node, ldlm_interval_slab, __GFP_IO);
- if (node == NULL) /* Actually, this causes EDEADLOCK to be returned */
- RETURN(NULL);
+ if (node == NULL)
+ /* Actually, this causes EDEADLOCK to be returned */
+ return NULL;
LASSERTF((new_mode == LCK_PW && lock->l_granted_mode == LCK_PR),
"new_mode %u, granted %u\n", new_mode, lock->l_granted_mode);
@@ -2268,7 +2203,7 @@ struct ldlm_resource *ldlm_lock_convert(struct ldlm_lock *lock, int new_mode,
ldlm_run_ast_work(ns, &rpc_list, LDLM_WORK_CP_AST);
if (node)
OBD_SLAB_FREE(node, ldlm_interval_slab, sizeof(*node));
- RETURN(res);
+ return res;
}
EXPORT_SYMBOL(ldlm_lock_convert);
@@ -2337,91 +2272,90 @@ void _ldlm_lock_debug(struct ldlm_lock *lock,
switch (resource->lr_type) {
case LDLM_EXTENT:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
- "res: "LPU64"/"LPU64" rrc: %d type: %s ["LPU64"->"LPU64
- "] (req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote:"
- " "LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- resource->lr_name.name[0],
- resource->lr_name.name[1],
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_extent.start,
- lock->l_policy_data.l_extent.end,
- lock->l_req_extent.start, lock->l_req_extent.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s ["LPU64"->"LPU64"] "
+ "(req "LPU64"->"LPU64") flags: "LPX64" nid: %s remote: "
+ LPX64" expref: %d pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_extent.start,
+ lock->l_policy_data.l_extent.end,
+ lock->l_req_extent.start, lock->l_req_extent.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
case LDLM_FLOCK:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
- "res: "LPU64"/"LPU64" rrc: %d type: %s pid: %d "
- "["LPU64"->"LPU64"] flags: "LPX64" nid: %s remote: "LPX64
- " expref: %d pid: %u timeout: %lu\n",
- ldlm_lock_to_ns_name(lock), lock,
- lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- resource->lr_name.name[0],
- resource->lr_name.name[1],
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_policy_data.l_flock.pid,
- lock->l_policy_data.l_flock.start,
- lock->l_policy_data.l_flock.end,
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout);
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s pid: %d "
+ "["LPU64"->"LPU64"] flags: "LPX64" nid: %s "
+ "remote: "LPX64" expref: %d pid: %u timeout: %lu\n",
+ ldlm_lock_to_ns_name(lock), lock,
+ lock->l_handle.h_cookie, atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_policy_data.l_flock.pid,
+ lock->l_policy_data.l_flock.start,
+ lock->l_policy_data.l_flock.end,
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout);
break;
case LDLM_IBITS:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
- "res: "LPU64"/"LPU64" bits "LPX64" rrc: %d type: %s "
- "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
- "pid: %u timeout: %lu lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- resource->lr_name.name[0],
- resource->lr_name.name[1],
- lock->l_policy_data.l_inodebits.bits,
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" bits "LPX64" rrc: %d type: %s "
+ "flags: "LPX64" nid: %s remote: "LPX64" expref: %d "
+ "pid: %u timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ lock->l_policy_data.l_inodebits.bits,
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
default:
libcfs_debug_vmsg2(msgdata, fmt, args,
- " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
- "res: "LPU64"/"LPU64" rrc: %d type: %s flags: "LPX64" "
- "nid: %s remote: "LPX64" expref: %d pid: %u timeout: %lu"
- "lvb_type: %d\n",
- ldlm_lock_to_ns_name(lock),
- lock, lock->l_handle.h_cookie,
- atomic_read (&lock->l_refc),
- lock->l_readers, lock->l_writers,
- ldlm_lockname[lock->l_granted_mode],
- ldlm_lockname[lock->l_req_mode],
- resource->lr_name.name[0],
- resource->lr_name.name[1],
- atomic_read(&resource->lr_refcount),
- ldlm_typename[resource->lr_type],
- lock->l_flags, nid, lock->l_remote_handle.cookie,
- exp ? atomic_read(&exp->exp_refcount) : -99,
- lock->l_pid, lock->l_callback_timeout, lock->l_lvb_type);
+ " ns: %s lock: %p/"LPX64" lrc: %d/%d,%d mode: %s/%s "
+ "res: "DLDLMRES" rrc: %d type: %s flags: "LPX64" "
+ "nid: %s remote: "LPX64" expref: %d pid: %u "
+ "timeout: %lu lvb_type: %d\n",
+ ldlm_lock_to_ns_name(lock),
+ lock, lock->l_handle.h_cookie,
+ atomic_read(&lock->l_refc),
+ lock->l_readers, lock->l_writers,
+ ldlm_lockname[lock->l_granted_mode],
+ ldlm_lockname[lock->l_req_mode],
+ PLDLMRES(resource),
+ atomic_read(&resource->lr_refcount),
+ ldlm_typename[resource->lr_type],
+ lock->l_flags, nid, lock->l_remote_handle.cookie,
+ exp ? atomic_read(&exp->exp_refcount) : -99,
+ lock->l_pid, lock->l_callback_timeout,
+ lock->l_lvb_type);
break;
}
va_end(args);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index 324d5e4286d..3916bda3004 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -127,12 +127,12 @@ struct ldlm_bl_work_item {
int ldlm_del_waiting_lock(struct ldlm_lock *lock)
{
- RETURN(0);
+ return 0;
}
int ldlm_refresh_waiting_lock(struct ldlm_lock *lock, int timeout)
{
- RETURN(0);
+ return 0;
}
@@ -146,7 +146,6 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
{
int do_ast;
- ENTRY;
LDLM_DEBUG(lock, "client blocking AST callback handler");
@@ -172,7 +171,6 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
LDLM_DEBUG(lock, "client blocking callback handler END");
LDLM_LOCK_RELEASE(lock);
- EXIT;
}
/**
@@ -188,7 +186,6 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
int lvb_len;
LIST_HEAD(ast_list);
int rc = 0;
- ENTRY;
LDLM_DEBUG(lock, "client completion callback handler START");
@@ -198,7 +195,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
schedule_timeout_and_set_state(
TASK_INTERRUPTIBLE, to);
if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_destroyed)
+ lock->l_flags & LDLM_FL_DESTROYED)
break;
}
}
@@ -238,7 +235,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
- if (lock->l_destroyed ||
+ if ((lock->l_flags & LDLM_FL_DESTROYED) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
@@ -332,7 +329,6 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
struct ldlm_lock *lock)
{
int rc = -ENOSYS;
- ENTRY;
LDLM_DEBUG(lock, "client glimpse AST callback handler");
@@ -356,12 +352,10 @@ static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
if (ldlm_bl_to_thread_lock(ns, NULL, lock))
ldlm_handle_bl_callback(ns, NULL, lock);
- EXIT;
return;
}
unlock_res_and_lock(lock);
LDLM_LOCK_RELEASE(lock);
- EXIT;
}
static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
@@ -382,7 +376,6 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
ldlm_cancel_flags_t cancel_flags)
{
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
- ENTRY;
spin_lock(&blp->blp_lock);
if (blwi->blwi_lock &&
@@ -402,7 +395,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
if (!(cancel_flags & LCF_ASYNC))
wait_for_completion(&blwi->blwi_comp);
- RETURN(0);
+ return 0;
}
static inline void init_blwi(struct ldlm_bl_work_item *blwi,
@@ -446,20 +439,18 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
struct list_head *cancels, int count,
ldlm_cancel_flags_t cancel_flags)
{
- ENTRY;
-
if (cancels && count == 0)
- RETURN(0);
+ return 0;
if (cancel_flags & LCF_ASYNC) {
struct ldlm_bl_work_item *blwi;
OBD_ALLOC(blwi, sizeof(*blwi));
if (blwi == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
- RETURN(__ldlm_bl_to_thread(blwi, cancel_flags));
+ return __ldlm_bl_to_thread(blwi, cancel_flags);
} else {
/* if it is synchronous call do minimum mem alloc, as it could
* be triggered from kernel shrinker
@@ -468,7 +459,7 @@ static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
memset(&blwi, 0, sizeof(blwi));
init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
- RETURN(__ldlm_bl_to_thread(&blwi, cancel_flags));
+ return __ldlm_bl_to_thread(&blwi, cancel_flags);
}
}
@@ -494,7 +485,6 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
void *val;
int keylen, vallen;
int rc = -ENOSYS;
- ENTRY;
DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
@@ -503,14 +493,14 @@ static int ldlm_handle_setinfo(struct ptlrpc_request *req)
key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
if (key == NULL) {
DEBUG_REQ(D_IOCTL, req, "no set_info key");
- RETURN(-EFAULT);
+ return -EFAULT;
}
keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT);
val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
if (val == NULL) {
DEBUG_REQ(D_IOCTL, req, "no set_info val");
- RETURN(-EFAULT);
+ return -EFAULT;
}
vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
RCL_CLIENT);
@@ -552,9 +542,11 @@ static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
if (oqctl == NULL) {
CERROR("Can't unpack obd_quotactl\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
+ oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
+
cli->cl_qchk_stat = oqctl->qc_stat;
return 0;
}
@@ -566,7 +558,6 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
struct ldlm_request *dlm_req;
struct ldlm_lock *lock;
int rc;
- ENTRY;
/* Requests arrive in sender's byte order. The ptlrpc service
* handler has already checked and, if necessary, byte-swapped the
@@ -575,7 +566,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
/* do nothing for sec context finalize */
if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
- RETURN(0);
+ return 0;
req_capsule_init(&req->rq_pill, req, RCL_SERVER);
@@ -583,7 +574,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -ENOTCONN);
ldlm_callback_errmsg(req, "Operate on unconnected server",
rc, NULL);
- RETURN(0);
+ return 0;
}
LASSERT(req->rq_export != NULL);
@@ -592,71 +583,71 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
switch (lustre_msg_get_opc(req->rq_reqmsg)) {
case LDLM_BL_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
- RETURN(0);
+ return 0;
break;
case LDLM_CP_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
- RETURN(0);
+ return 0;
break;
case LDLM_GL_CALLBACK:
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
- RETURN(0);
+ return 0;
break;
case LDLM_SET_INFO:
rc = ldlm_handle_setinfo(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case OBD_LOG_CANCEL: /* remove this eventually - for 1.4.0 compat */
CERROR("shouldn't be handling OBD_LOG_CANCEL on DLM thread\n");
req_capsule_set(&req->rq_pill, &RQF_LOG_CANCEL);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_NET))
- RETURN(0);
+ return 0;
rc = llog_origin_handle_cancel(req);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOG_CANCEL_REP))
- RETURN(0);
+ return 0;
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case LLOG_ORIGIN_HANDLE_CREATE:
req_capsule_set(&req->rq_pill, &RQF_LLOG_ORIGIN_HANDLE_CREATE);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- RETURN(0);
+ return 0;
rc = llog_origin_handle_open(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
req_capsule_set(&req->rq_pill,
&RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- RETURN(0);
+ return 0;
rc = llog_origin_handle_next_block(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case LLOG_ORIGIN_HANDLE_READ_HEADER:
req_capsule_set(&req->rq_pill,
&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- RETURN(0);
+ return 0;
rc = llog_origin_handle_read_header(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case LLOG_ORIGIN_HANDLE_CLOSE:
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_LOGD_NET))
- RETURN(0);
+ return 0;
rc = llog_origin_handle_close(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
case OBD_QC_CALLBACK:
req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
- RETURN(0);
+ return 0;
rc = ldlm_handle_qc_callback(req);
ldlm_callback_reply(req, rc);
- RETURN(0);
+ return 0;
default:
CERROR("unknown opcode %u\n",
lustre_msg_get_opc(req->rq_reqmsg));
ldlm_callback_reply(req, -EPROTO);
- RETURN(0);
+ return 0;
}
ns = req->rq_export->exp_obd->obd_namespace;
@@ -669,7 +660,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EPROTO);
ldlm_callback_errmsg(req, "Operate without parameter", rc,
NULL);
- RETURN(0);
+ return 0;
}
/* Force a known safe race, send a cancel to the server for a lock
@@ -688,7 +679,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
&dlm_req->lock_handle[0]);
- RETURN(0);
+ return 0;
}
if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
@@ -715,7 +706,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
rc = ldlm_callback_reply(req, -EINVAL);
ldlm_callback_errmsg(req, "Operate on stale lock", rc,
&dlm_req->lock_handle[0]);
- RETURN(0);
+ return 0;
}
/* BL_AST locks are not needed in LRU.
* Let ldlm_cancel_lru() be fast. */
@@ -761,7 +752,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
LBUG(); /* checked above */
}
- RETURN(0);
+ return 0;
}
@@ -805,7 +796,7 @@ static int ldlm_bl_thread_main(void *arg);
static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
{
struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
- task_t *task;
+ struct task_struct *task;
init_completion(&bltd.bltd_comp);
bltd.bltd_num = atomic_read(&blp->blp_num_threads);
@@ -832,7 +823,6 @@ static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
static int ldlm_bl_thread_main(void *arg)
{
struct ldlm_bl_pool *blp;
- ENTRY;
{
struct ldlm_bl_thread_data *bltd = arg;
@@ -904,7 +894,7 @@ static int ldlm_bl_thread_main(void *arg)
atomic_dec(&blp->blp_busy_threads);
atomic_dec(&blp->blp_num_threads);
complete(&blp->blp_comp);
- RETURN(0);
+ return 0;
}
@@ -914,7 +904,7 @@ static int ldlm_cleanup(void);
int ldlm_get_ref(void)
{
int rc = 0;
- ENTRY;
+
mutex_lock(&ldlm_ref_mutex);
if (++ldlm_refcount == 1) {
rc = ldlm_setup();
@@ -923,13 +913,12 @@ int ldlm_get_ref(void)
}
mutex_unlock(&ldlm_ref_mutex);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_get_ref);
void ldlm_put_ref(void)
{
- ENTRY;
mutex_lock(&ldlm_ref_mutex);
if (ldlm_refcount == 1) {
int rc = ldlm_cleanup();
@@ -941,8 +930,6 @@ void ldlm_put_ref(void)
ldlm_refcount--;
}
mutex_unlock(&ldlm_ref_mutex);
-
- EXIT;
}
EXPORT_SYMBOL(ldlm_put_ref);
@@ -1016,8 +1003,6 @@ static cfs_hash_ops_t ldlm_export_lock_ops = {
int ldlm_init_export(struct obd_export *exp)
{
- ENTRY;
-
exp->exp_lock_hash =
cfs_hash_create(obd_uuid2str(&exp->exp_client_uuid),
HASH_EXP_LOCK_CUR_BITS,
@@ -1029,20 +1014,18 @@ int ldlm_init_export(struct obd_export *exp)
CFS_HASH_NBLK_CHANGE);
if (!exp->exp_lock_hash)
- RETURN(-ENOMEM);
+ return -ENOMEM;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_init_export);
void ldlm_destroy_export(struct obd_export *exp)
{
- ENTRY;
cfs_hash_putref(exp->exp_lock_hash);
exp->exp_lock_hash = NULL;
ldlm_destroy_flock_export(exp);
- EXIT;
}
EXPORT_SYMBOL(ldlm_destroy_export);
@@ -1052,14 +1035,13 @@ static int ldlm_setup(void)
struct ldlm_bl_pool *blp = NULL;
int rc = 0;
int i;
- ENTRY;
if (ldlm_state != NULL)
- RETURN(-EALREADY);
+ return -EALREADY;
OBD_ALLOC(ldlm_state, sizeof(*ldlm_state));
if (ldlm_state == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
#ifdef LPROCFS
rc = ldlm_proc_setup();
@@ -1139,23 +1121,21 @@ static int ldlm_setup(void)
CERROR("Failed to initialize LDLM pools: %d\n", rc);
GOTO(out, rc);
}
- RETURN(0);
+ return 0;
out:
ldlm_cleanup();
- RETURN(rc);
+ return rc;
}
static int ldlm_cleanup(void)
{
- ENTRY;
-
if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
CERROR("ldlm still has namespaces; clean these up first.\n");
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- RETURN(-EBUSY);
+ return -EBUSY;
}
ldlm_pools_fini();
@@ -1188,7 +1168,7 @@ static int ldlm_cleanup(void)
OBD_FREE(ldlm_state, sizeof(*ldlm_state));
ldlm_state = NULL;
- RETURN(0);
+ return 0;
}
int ldlm_init(void)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
index b3b60288e5f..454027d68d5 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c
@@ -142,7 +142,7 @@
*/
#define LDLM_POOL_SLV_SHIFT (10)
-extern proc_dir_entry_t *ldlm_ns_proc_dir;
+extern struct proc_dir_entry *ldlm_ns_proc_dir;
static inline __u64 dru(__u64 val, __u32 shift, int round_up)
{
@@ -335,17 +335,16 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
- ENTRY;
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
- RETURN(0);
+ return 0;
spin_lock(&pl->pl_lock);
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
- RETURN(0);
+ return 0;
}
/*
* Recalc SLV after last period. This should be done
@@ -367,7 +366,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
recalc_interval_sec);
spin_unlock(&pl->pl_lock);
- RETURN(0);
+ return 0;
}
/**
@@ -394,7 +393,7 @@ static int ldlm_srv_pool_shrink(struct ldlm_pool *pl,
* and can't cancel anything. Let's catch this race.
*/
if (atomic_read(&pl->pl_granted) == 0)
- RETURN(0);
+ return 0;
spin_lock(&pl->pl_lock);
@@ -473,11 +472,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
{
time_t recalc_interval_sec;
- ENTRY;
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period)
- RETURN(0);
+ return 0;
spin_lock(&pl->pl_lock);
/*
@@ -486,7 +484,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec < pl->pl_recalc_period) {
spin_unlock(&pl->pl_lock);
- RETURN(0);
+ return 0;
}
/*
@@ -503,7 +501,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* Do not cancel locks in case lru resize is disabled for this ns.
*/
if (!ns_connect_lru_resize(ldlm_pl2ns(pl)))
- RETURN(0);
+ return 0;
/*
* In the time of canceling locks on client we do not need to maintain
@@ -511,8 +509,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
* It may be called when SLV has changed much, this is why we do not
* take into account pl->pl_recalc_time here.
*/
- RETURN(ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC,
- LDLM_CANCEL_LRUR));
+ return ldlm_cancel_lru(ldlm_pl2ns(pl), 0, LCF_ASYNC, LDLM_CANCEL_LRUR);
}
/**
@@ -532,7 +529,7 @@ static int ldlm_cli_pool_shrink(struct ldlm_pool *pl,
* Do not cancel locks in case lru resize is disabled for this ns.
*/
if (!ns_connect_lru_resize(ns))
- RETURN(0);
+ return 0;
/*
* Make sure that pool knows last SLV and Limit from obd.
@@ -578,7 +575,6 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
goto recalc;
spin_lock(&pl->pl_lock);
- recalc_interval_sec = cfs_time_current_sec() - pl->pl_recalc_time;
if (recalc_interval_sec > 0) {
/*
* Update pool statistics every 1s.
@@ -598,12 +594,12 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
count = pl->pl_ops->po_recalc(pl);
lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
count);
- return count;
}
+ recalc_interval_sec = pl->pl_recalc_time - cfs_time_current_sec() +
+ pl->pl_recalc_period;
- return 0;
+ return recalc_interval_sec;
}
-EXPORT_SYMBOL(ldlm_pool_recalc);
/**
* Pool shrink wrapper. Will call either client or server pool recalc callback
@@ -734,11 +730,10 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
struct lprocfs_vars pool_vars[2];
char *var_name = NULL;
int rc = 0;
- ENTRY;
OBD_ALLOC(var_name, MAX_STRING_SIZE + 1);
if (!var_name)
- RETURN(-ENOMEM);
+ return -ENOMEM;
parent_ns_proc = ns->ns_proc_dir_entry;
if (parent_ns_proc == NULL) {
@@ -751,6 +746,7 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
if (IS_ERR(pl->pl_proc_dir)) {
CERROR("LProcFS failed in ldlm-pool-init\n");
rc = PTR_ERR(pl->pl_proc_dir);
+ pl->pl_proc_dir = NULL;
GOTO(out_free_name, rc);
}
@@ -813,7 +809,6 @@ static int ldlm_pool_proc_init(struct ldlm_pool *pl)
"recalc_timing", "sec");
rc = lprocfs_register_stats(pl->pl_proc_dir, "stats", pl->pl_stats);
- EXIT;
out_free_name:
OBD_FREE(var_name, MAX_STRING_SIZE + 1);
return rc;
@@ -835,7 +830,6 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
int idx, ldlm_side_t client)
{
int rc;
- ENTRY;
spin_lock_init(&pl->pl_lock);
atomic_set(&pl->pl_granted, 0);
@@ -863,17 +857,16 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
pl->pl_client_lock_volume = 0;
rc = ldlm_pool_proc_init(pl);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_DLMTRACE, "Lock pool %s is initialized\n", pl->pl_name);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_pool_init);
void ldlm_pool_fini(struct ldlm_pool *pl)
{
- ENTRY;
ldlm_pool_proc_fini(pl);
/*
@@ -882,7 +875,6 @@ void ldlm_pool_fini(struct ldlm_pool *pl)
* any abnormal using cases.
*/
POISON(pl, 0x5a, sizeof(*pl));
- EXIT;
}
EXPORT_SYMBOL(ldlm_pool_fini);
@@ -1039,6 +1031,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
{
int total = 0, cached = 0, nr_ns;
struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL; /* loop detection */
void *cookie;
if (client == LDLM_NAMESPACE_CLIENT && nr != 0 &&
@@ -1053,7 +1046,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
/*
* Find out how many resources we may release.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = ldlm_namespace_nr_read(client);
nr_ns > 0; nr_ns--)
{
mutex_lock(ldlm_namespace_lock(client));
@@ -1063,8 +1056,23 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
return 0;
}
ns = ldlm_namespace_first_locked(client);
+
+ if (ns == ns_old) {
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
ldlm_namespace_get(ns);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
total += ldlm_pool_shrink(&ns->ns_pool, 0, gfp_mask);
ldlm_namespace_put(ns);
@@ -1078,7 +1086,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
/*
* Shrink at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr_ns = atomic_read(ldlm_namespace_nr(client));
+ for (nr_ns = ldlm_namespace_nr_read(client) - nr_ns;
nr_ns > 0; nr_ns--)
{
int cancel, nr_locks;
@@ -1099,7 +1107,7 @@ static int ldlm_pools_shrink(ldlm_side_t client, int nr,
}
ns = ldlm_namespace_first_locked(client);
ldlm_namespace_get(ns);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
nr_locks = ldlm_pool_granted(&ns->ns_pool);
@@ -1128,11 +1136,13 @@ static int ldlm_pools_cli_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
shrink_param(sc, gfp_mask));
}
-void ldlm_pools_recalc(ldlm_side_t client)
+int ldlm_pools_recalc(ldlm_side_t client)
{
__u32 nr_l = 0, nr_p = 0, l;
struct ldlm_namespace *ns;
+ struct ldlm_namespace *ns_old = NULL;
int nr, equal = 0;
+ int time = 50; /* seconds of sleep if no active namespaces */
/*
* No need to setup pool limit for client pools.
@@ -1190,16 +1200,14 @@ void ldlm_pools_recalc(ldlm_side_t client)
* for _all_ pools.
*/
l = LDLM_POOL_HOST_L /
- atomic_read(
- ldlm_namespace_nr(client));
+ ldlm_namespace_nr_read(client);
} else {
/*
* All the rest of greedy pools will have
* all locks in equal parts.
*/
l = (LDLM_POOL_HOST_L - nr_l) /
- (atomic_read(
- ldlm_namespace_nr(client)) -
+ (ldlm_namespace_nr_read(client) -
nr_p);
}
ldlm_pool_setup(&ns->ns_pool, l);
@@ -1210,7 +1218,7 @@ void ldlm_pools_recalc(ldlm_side_t client)
/*
* Recalc at least ldlm_namespace_nr(client) namespaces.
*/
- for (nr = atomic_read(ldlm_namespace_nr(client)); nr > 0; nr--) {
+ for (nr = ldlm_namespace_nr_read(client); nr > 0; nr--) {
int skip;
/*
* Lock the list, get first @ns in the list, getref, move it
@@ -1226,6 +1234,30 @@ void ldlm_pools_recalc(ldlm_side_t client)
}
ns = ldlm_namespace_first_locked(client);
+ if (ns_old == ns) { /* Full pass complete */
+ mutex_unlock(ldlm_namespace_lock(client));
+ break;
+ }
+
+ /* We got an empty namespace, need to move it back to inactive
+ * list.
+ * The race with parallel resource creation is fine:
+ * - If they do namespace_get before our check, we fail the
+ * check and they move this item to the end of the list anyway
+ * - If we do the check and then they do namespace_get, then
+ * we move the namespace to inactive and they will move
+ * it back to active (synchronised by the lock, so no clash
+ * there).
+ */
+ if (ldlm_ns_empty(ns)) {
+ ldlm_namespace_move_to_inactive_locked(ns, client);
+ mutex_unlock(ldlm_namespace_lock(client));
+ continue;
+ }
+
+ if (ns_old == NULL)
+ ns_old = ns;
+
spin_lock(&ns->ns_lock);
/*
* skip ns which is being freed, and we don't want to increase
@@ -1239,24 +1271,29 @@ void ldlm_pools_recalc(ldlm_side_t client)
}
spin_unlock(&ns->ns_lock);
- ldlm_namespace_move_locked(ns, client);
+ ldlm_namespace_move_to_active_locked(ns, client);
mutex_unlock(ldlm_namespace_lock(client));
/*
* After setup is done - recalc the pool.
*/
if (!skip) {
- ldlm_pool_recalc(&ns->ns_pool);
+ int ttime = ldlm_pool_recalc(&ns->ns_pool);
+
+ if (ttime < time)
+ time = ttime;
+
ldlm_namespace_put(ns);
}
}
+ return time;
}
EXPORT_SYMBOL(ldlm_pools_recalc);
static int ldlm_pools_thread_main(void *arg)
{
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- ENTRY;
+ int s_time, c_time;
thread_set_flags(thread, SVC_RUNNING);
wake_up(&thread->t_ctl_waitq);
@@ -1270,14 +1307,14 @@ static int ldlm_pools_thread_main(void *arg)
/*
* Recal all pools on this tick.
*/
- ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
- ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
+ s_time = ldlm_pools_recalc(LDLM_NAMESPACE_SERVER);
+ c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT);
/*
* Wait until the next check time, or until we're
* stopped.
*/
- lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
+ lwi = LWI_TIMEOUT(cfs_time_seconds(min(s_time, c_time)),
NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
thread_is_stopping(thread) ||
@@ -1302,15 +1339,14 @@ static int ldlm_pools_thread_main(void *arg)
static int ldlm_pools_thread_start(void)
{
struct l_wait_info lwi = { 0 };
- task_t *task;
- ENTRY;
+ struct task_struct *task;
if (ldlm_pools_thread != NULL)
- RETURN(-EALREADY);
+ return -EALREADY;
OBD_ALLOC_PTR(ldlm_pools_thread);
if (ldlm_pools_thread == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
init_completion(&ldlm_pools_comp);
init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq);
@@ -1321,19 +1357,16 @@ static int ldlm_pools_thread_start(void)
CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task));
OBD_FREE(ldlm_pools_thread, sizeof(*ldlm_pools_thread));
ldlm_pools_thread = NULL;
- RETURN(PTR_ERR(task));
+ return PTR_ERR(task);
}
l_wait_event(ldlm_pools_thread->t_ctl_waitq,
thread_is_running(ldlm_pools_thread), &lwi);
- RETURN(0);
+ return 0;
}
static void ldlm_pools_thread_stop(void)
{
- ENTRY;
-
if (ldlm_pools_thread == NULL) {
- EXIT;
return;
}
@@ -1348,13 +1381,11 @@ static void ldlm_pools_thread_stop(void)
wait_for_completion(&ldlm_pools_comp);
OBD_FREE_PTR(ldlm_pools_thread);
ldlm_pools_thread = NULL;
- EXIT;
}
int ldlm_pools_init(void)
{
int rc;
- ENTRY;
rc = ldlm_pools_thread_start();
if (rc == 0) {
@@ -1365,7 +1396,7 @@ int ldlm_pools_init(void)
set_shrinker(DEFAULT_SEEKS,
ldlm_pools_cli_shrink);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_pools_init);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 1a690edaba0..21cb523ac4a 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -94,12 +94,11 @@ int ldlm_expired_completion_wait(void *data)
struct obd_import *imp;
struct obd_device *obd;
- ENTRY;
if (lock->l_conn_export == NULL) {
static cfs_time_t next_dump = 0, last_dump = 0;
if (ptlrpc_check_suspend())
- RETURN(0);
+ return 0;
LCONSOLE_WARN("lock timed out (enqueued at "CFS_TIME_T", "
CFS_DURATION_T"s ago)\n",
@@ -120,7 +119,7 @@ int ldlm_expired_completion_wait(void *data)
if (last_dump == 0)
libcfs_debug_dumplog();
}
- RETURN(0);
+ return 0;
}
obd = lock->l_conn_export->exp_obd;
@@ -132,7 +131,7 @@ int ldlm_expired_completion_wait(void *data)
cfs_time_sub(cfs_time_current_sec(), lock->l_last_activity),
obd2cli_tgt(obd), imp->imp_connection->c_remote_uuid.uuid);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_expired_completion_wait);
@@ -160,7 +159,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay;
int result;
- if (lock->l_destroyed || lock->l_flags & LDLM_FL_FAILED) {
+ if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
@@ -184,23 +183,21 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
*/
int ldlm_completion_ast_async(struct ldlm_lock *lock, __u64 flags, void *data)
{
- ENTRY;
-
if (flags == LDLM_FL_WAIT_NOREPROC) {
LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
- RETURN(0);
+ return 0;
}
if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
LDLM_FL_BLOCK_CONV))) {
wake_up(&lock->l_waitq);
- RETURN(ldlm_completion_tail(lock));
+ return ldlm_completion_tail(lock);
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
"going forward");
ldlm_reprocess_all(lock->l_resource);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_completion_ast_async);
@@ -234,7 +231,6 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
struct l_wait_info lwi;
__u32 timeout;
int rc = 0;
- ENTRY;
if (flags == LDLM_FL_WAIT_NOREPROC) {
LDLM_DEBUG(lock, "client-side enqueue waiting on pending lock");
@@ -244,7 +240,7 @@ int ldlm_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
if (!(flags & (LDLM_FL_BLOCK_WAIT | LDLM_FL_BLOCK_GRANTED |
LDLM_FL_BLOCK_CONV))) {
wake_up(&lock->l_waitq);
- RETURN(0);
+ return 0;
}
LDLM_DEBUG(lock, "client-side enqueue returned a blocked lock, "
@@ -295,10 +291,10 @@ noreproc:
if (rc) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
rc);
- RETURN(rc);
+ return rc;
}
- RETURN(ldlm_completion_tail(lock));
+ return ldlm_completion_tail(lock);
}
EXPORT_SYMBOL(ldlm_completion_ast);
@@ -316,7 +312,6 @@ EXPORT_SYMBOL(ldlm_completion_ast);
int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
{
int do_ast;
- ENTRY;
lock->l_flags |= LDLM_FL_CBPENDING;
do_ast = (!lock->l_readers && !lock->l_writers);
@@ -335,7 +330,7 @@ int ldlm_blocking_ast_nocheck(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "Lock still has references, will be "
"cancelled later");
}
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
@@ -355,11 +350,9 @@ EXPORT_SYMBOL(ldlm_blocking_ast_nocheck);
int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
void *data, int flag)
{
- ENTRY;
-
if (flag == LDLM_CB_CANCELING) {
/* Don't need to do anything here. */
- RETURN(0);
+ return 0;
}
lock_res_and_lock(lock);
@@ -370,9 +363,9 @@ int ldlm_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
* early, if so. */
if (lock->l_blocking_ast != ldlm_blocking_ast) {
unlock_res_and_lock(lock);
- RETURN(0);
+ return 0;
}
- RETURN(ldlm_blocking_ast_nocheck(lock));
+ return ldlm_blocking_ast_nocheck(lock);
}
EXPORT_SYMBOL(ldlm_blocking_ast);
@@ -424,7 +417,6 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
.lcs_blocking = blocking,
.lcs_glimpse = glimpse,
};
- ENTRY;
LASSERT(!(*flags & LDLM_FL_REPLAY));
if (unlikely(ns_is_client(ns))) {
@@ -464,7 +456,6 @@ int ldlm_cli_enqueue_local(struct ldlm_namespace *ns,
lock->l_completion_ast(lock, *flags, NULL);
LDLM_DEBUG(lock, "client-side local enqueue handler, new lock created");
- EXIT;
out:
LDLM_LOCK_RELEASE(lock);
out_nolock:
@@ -530,13 +521,12 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
struct ldlm_reply *reply;
int cleanup_phase = 1;
int size = 0;
- ENTRY;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
if (!lock) {
LASSERT(type == LDLM_FLOCK);
- RETURN(-ENOLCK);
+ return -ENOLCK;
}
LASSERTF(ergo(lvb_len != 0, lvb_len == lock->l_lvb_len),
@@ -698,7 +688,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
LDLM_DEBUG(lock, "client-side enqueue END");
- EXIT;
cleanup:
if (cleanup_phase == 1 && rc)
failed_lock_cleanup(ns, lock, mode);
@@ -763,7 +752,6 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
int flags, avail, to_free, pack = 0;
LIST_HEAD(head);
int rc;
- ENTRY;
if (cancels == NULL)
cancels = &head;
@@ -794,7 +782,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
rc = ptlrpc_request_pack(req, version, opc);
if (rc) {
ldlm_lock_list_put(cancels, l_bl_ast, count);
- RETURN(rc);
+ return rc;
}
if (ns_connect_cancelset(ns)) {
@@ -814,7 +802,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
} else {
ldlm_lock_list_put(cancels, l_bl_ast, count);
}
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_prep_elc_req);
@@ -830,21 +818,20 @@ struct ptlrpc_request *ldlm_enqueue_pack(struct obd_export *exp, int lvb_len)
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
if (req == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
ptlrpc_request_set_replen(req);
- RETURN(req);
+ return req;
}
EXPORT_SYMBOL(ldlm_enqueue_pack);
@@ -872,7 +859,6 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
int req_passed_in = 1;
int rc, err;
struct ptlrpc_request *req;
- ENTRY;
LASSERT(exp != NULL);
@@ -888,15 +874,14 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
} else {
const struct ldlm_callback_suite cbs = {
.lcs_completion = einfo->ei_cb_cp,
- .lcs_blocking = einfo->ei_cb_bl,
- .lcs_glimpse = einfo->ei_cb_gl,
- .lcs_weigh = einfo->ei_cb_wg
+ .lcs_blocking = einfo->ei_cb_bl,
+ .lcs_glimpse = einfo->ei_cb_gl
};
lock = ldlm_lock_create(ns, res_id, einfo->ei_type,
einfo->ei_mode, &cbs, einfo->ei_cbdata,
lvb_len, lvb_type);
if (lock == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* for the local lock, add the reference */
ldlm_lock_addref_internal(lock, einfo->ei_mode);
ldlm_lock2handle(lock, lockh);
@@ -937,7 +922,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
if (req == NULL) {
failed_lock_cleanup(ns, lock, einfo->ei_mode);
LDLM_LOCK_RELEASE(lock);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
req_passed_in = 0;
if (reqp)
@@ -978,7 +963,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
if (async) {
LASSERT(reqp != NULL);
- RETURN(0);
+ return 0;
}
LDLM_DEBUG(lock, "sending request");
@@ -1002,7 +987,7 @@ int ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp,
*reqp = NULL;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_cli_enqueue);
@@ -1011,7 +996,7 @@ static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
{
struct ldlm_resource *res;
int rc;
- ENTRY;
+
if (ns_is_client(ldlm_lock_to_ns(lock))) {
CERROR("Trying to cancel local lock\n");
LBUG();
@@ -1023,11 +1008,11 @@ static int ldlm_cli_convert_local(struct ldlm_lock *lock, int new_mode,
ldlm_reprocess_all(res);
rc = 0;
} else {
- rc = EDEADLOCK;
+ rc = LUSTRE_EDEADLK;
}
LDLM_DEBUG(lock, "client-side local convert handler END");
LDLM_LOCK_PUT(lock);
- RETURN(rc);
+ return rc;
}
/* FIXME: one of ldlm_cli_convert or the server side should reject attempted
@@ -1042,17 +1027,16 @@ int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
struct ldlm_resource *res;
struct ptlrpc_request *req;
int rc;
- ENTRY;
lock = ldlm_handle2lock(lockh);
if (!lock) {
LBUG();
- RETURN(-EINVAL);
+ return -EINVAL;
}
*flags = 0;
if (lock->l_conn_export == NULL)
- RETURN(ldlm_cli_convert_local(lock, new_mode, flags));
+ return ldlm_cli_convert_local(lock, new_mode, flags);
LDLM_DEBUG(lock, "client-side convert");
@@ -1061,7 +1045,7 @@ int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
LDLM_CONVERT);
if (req == NULL) {
LDLM_LOCK_PUT(lock);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
body = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
@@ -1095,9 +1079,8 @@ int ldlm_cli_convert(struct lustre_handle *lockh, int new_mode, __u32 *flags)
GOTO(out, rc);
}
} else {
- rc = EDEADLOCK;
+ rc = LUSTRE_EDEADLK;
}
- EXIT;
out:
LDLM_LOCK_PUT(lock);
ptlrpc_req_finished(req);
@@ -1115,7 +1098,6 @@ EXPORT_SYMBOL(ldlm_cli_convert);
static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
{
__u64 rc = LDLM_FL_LOCAL_ONLY;
- ENTRY;
if (lock->l_conn_export) {
bool local_only;
@@ -1147,7 +1129,7 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
ldlm_reprocess_all(lock->l_resource);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -1159,7 +1141,6 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
struct ldlm_request *dlm;
struct ldlm_lock *lock;
int max, packed = 0;
- ENTRY;
dlm = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
LASSERT(dlm != NULL);
@@ -1184,7 +1165,6 @@ static void ldlm_cancel_pack(struct ptlrpc_request *req,
packed++;
}
CDEBUG(D_DLMTRACE, "%d locks packed\n", packed);
- EXIT;
}
/**
@@ -1197,7 +1177,6 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
struct obd_import *imp;
int free, sent = 0;
int rc = 0;
- ENTRY;
LASSERT(exp != NULL);
LASSERT(count > 0);
@@ -1205,7 +1184,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
CFS_FAIL_TIMEOUT(OBD_FAIL_LDLM_PAUSE_CANCEL, cfs_fail_val);
if (CFS_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_RACE))
- RETURN(count);
+ return count;
free = ldlm_format_handles_avail(class_exp2cliimp(exp),
&RQF_LDLM_CANCEL, RCL_CLIENT, 0);
@@ -1217,7 +1196,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
if (imp == NULL || imp->imp_invalid) {
CDEBUG(D_DLMTRACE,
"skipping cancel on invalid import %p\n", imp);
- RETURN(count);
+ return count;
}
req = ptlrpc_request_alloc(imp, &RQF_LDLM_CANCEL);
@@ -1248,7 +1227,7 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
} else {
rc = ptlrpc_queue_wait(req);
}
- if (rc == ESTALE) {
+ if (rc == LUSTRE_ESTALE) {
CDEBUG(D_DLMTRACE, "client/server (nid %s) "
"out of sync -- not fatal\n",
libcfs_nid2str(req->rq_import->
@@ -1270,7 +1249,6 @@ int ldlm_cli_cancel_req(struct obd_export *exp, struct list_head *cancels,
}
ptlrpc_req_finished(req);
- EXIT;
out:
return sent ? sent : rc;
}
@@ -1290,14 +1268,14 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
struct obd_device *obd;
__u64 new_slv;
__u32 new_limit;
- ENTRY;
+
if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
!imp_connect_lru_resize(req->rq_import)))
{
/*
* Do nothing for corner cases.
*/
- RETURN(0);
+ return 0;
}
/* In some cases RPC may contain SLV and limit zeroed out. This
@@ -1311,7 +1289,7 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
"(SLV: "LPU64", Limit: %u)",
lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
- RETURN(0);
+ return 0;
}
new_limit = lustre_msg_get_limit(req->rq_repmsg);
@@ -1328,7 +1306,7 @@ int ldlm_cli_update_pool(struct ptlrpc_request *req)
obd->obd_pool_limit = new_limit;
write_unlock(&obd->obd_pool_lock);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_cli_update_pool);
@@ -1346,19 +1324,18 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
struct ldlm_namespace *ns;
struct ldlm_lock *lock;
LIST_HEAD(cancels);
- ENTRY;
/* concurrent cancels on the same handle can happen */
lock = ldlm_handle2lock_long(lockh, LDLM_FL_CANCELING);
if (lock == NULL) {
LDLM_DEBUG_NOLOCK("lock is already being destroyed\n");
- RETURN(0);
+ return 0;
}
rc = ldlm_cli_cancel_local(lock);
if (rc == LDLM_FL_LOCAL_ONLY) {
LDLM_LOCK_RELEASE(lock);
- RETURN(0);
+ return 0;
}
/* Even if the lock is marked as LDLM_FL_BL_AST, this is a LDLM_CANCEL
* RPC which goes to canceld portal, so we can cancel other LRU locks
@@ -1380,7 +1357,7 @@ int ldlm_cli_cancel(struct lustre_handle *lockh,
LCF_BL_AST, flags);
}
ldlm_cli_cancel_list(&cancels, count, NULL, cancel_flags);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_cli_cancel);
@@ -1430,7 +1407,7 @@ int ldlm_cli_cancel_list_local(struct list_head *cancels, int count,
ldlm_cli_cancel_list(&head, bl_ast, NULL, 0);
}
- RETURN(count);
+ return count;
}
EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
@@ -1462,7 +1439,7 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
}
unlock_res_and_lock(lock);
- RETURN(result);
+ return result;
}
/**
@@ -1631,7 +1608,6 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
- ENTRY;
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1754,7 +1730,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns, struct list_head *ca
unused--;
}
spin_unlock(&ns->ns_lock);
- RETURN(added);
+ return added;
}
int ldlm_cancel_lru_local(struct ldlm_namespace *ns, struct list_head *cancels,
@@ -1782,16 +1758,15 @@ int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
{
LIST_HEAD(cancels);
int count, rc;
- ENTRY;
/* Just prepare the list of locks, do not actually cancel them yet.
* Locks are cancelled later in a separate thread. */
count = ldlm_prepare_lru_list(ns, &cancels, nr, 0, flags);
rc = ldlm_bl_to_thread_list(ns, NULL, &cancels, count, cancel_flags);
if (rc == 0)
- RETURN(count);
+ return count;
- RETURN(0);
+ return 0;
}
/**
@@ -1807,7 +1782,6 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
{
struct ldlm_lock *lock;
int count = 0;
- ENTRY;
lock_res(res);
list_for_each_entry(lock, &res->lr_granted, l_res_link) {
@@ -1848,7 +1822,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
}
unlock_res(res);
- RETURN(ldlm_cli_cancel_list_local(cancels, count, cancel_flags));
+ return ldlm_cli_cancel_list_local(cancels, count, cancel_flags);
}
EXPORT_SYMBOL(ldlm_cancel_resource_local);
@@ -1867,10 +1841,9 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
{
struct ldlm_lock *lock;
int res = 0;
- ENTRY;
if (list_empty(cancels) || count == 0)
- RETURN(0);
+ return 0;
/* XXX: requests (both batched and not) could be sent in parallel.
* Usually it is enough to have just 1 RPC, but it is possible that
@@ -1906,7 +1879,7 @@ int ldlm_cli_cancel_list(struct list_head *cancels, int count,
ldlm_lock_list_put(cancels, l_bl_ast, res);
}
LASSERT(count == 0);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_cli_cancel_list);
@@ -1926,13 +1899,12 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
LIST_HEAD(cancels);
int count;
int rc;
- ENTRY;
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
if (res == NULL) {
/* This is not a problem. */
CDEBUG(D_INFO, "No resource "LPU64"\n", res_id->name[0]);
- RETURN(0);
+ return 0;
}
LDLM_RESOURCE_ADDREF(res);
@@ -1944,7 +1916,7 @@ int ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns,
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ldlm_cli_cancel_unused_resource);
@@ -1986,19 +1958,17 @@ int ldlm_cli_cancel_unused(struct ldlm_namespace *ns,
.lc_opaque = opaque,
};
- ENTRY;
-
if (ns == NULL)
- RETURN(ELDLM_OK);
+ return ELDLM_OK;
if (res_id != NULL) {
- RETURN(ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
+ return ldlm_cli_cancel_unused_resource(ns, res_id, NULL,
LCK_MINMODE, flags,
- opaque));
+ opaque);
} else {
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_cli_hash_cancel_unused, &arg);
- RETURN(ELDLM_OK);
+ return ELDLM_OK;
}
}
EXPORT_SYMBOL(ldlm_cli_cancel_unused);
@@ -2012,10 +1982,8 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
struct ldlm_lock *lock;
int rc = LDLM_ITER_CONTINUE;
- ENTRY;
-
if (!res)
- RETURN(LDLM_ITER_CONTINUE);
+ return LDLM_ITER_CONTINUE;
lock_res(res);
list_for_each_safe(tmp, next, &res->lr_granted) {
@@ -2040,7 +2008,7 @@ int ldlm_resource_foreach(struct ldlm_resource *res, ldlm_iterator_t iter,
}
out:
unlock_res(res);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_resource_foreach);
@@ -2069,7 +2037,10 @@ void ldlm_namespace_foreach(struct ldlm_namespace *ns,
ldlm_iterator_t iter, void *closure)
{
- struct iter_helper_data helper = { iter: iter, closure: closure };
+ struct iter_helper_data helper = {
+ .iter = iter,
+ .closure = closure,
+ };
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper);
@@ -2088,7 +2059,6 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns,
{
struct ldlm_resource *res;
int rc;
- ENTRY;
if (ns == NULL) {
CERROR("must pass in namespace\n");
@@ -2097,13 +2067,13 @@ int ldlm_resource_iterate(struct ldlm_namespace *ns,
res = ldlm_resource_get(ns, NULL, res_id, 0, 0);
if (res == NULL)
- RETURN(0);
+ return 0;
LDLM_RESOURCE_ADDREF(res);
rc = ldlm_resource_foreach(res, iter, data);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_resource_iterate);
@@ -2137,7 +2107,6 @@ static int replay_lock_interpret(const struct lu_env *env,
struct ldlm_reply *reply;
struct obd_export *exp;
- ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (rc != ELDLM_OK)
GOTO(out, rc);
@@ -2178,7 +2147,7 @@ out:
if (rc != ELDLM_OK)
ptlrpc_connect_import(req->rq_import);
- RETURN(rc);
+ return rc;
}
static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
@@ -2187,13 +2156,11 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
struct ldlm_async_args *aa;
struct ldlm_request *body;
int flags;
- ENTRY;
-
/* Bug 11974: Do not replay a lock which is actively being canceled */
if (lock->l_flags & LDLM_FL_CANCELING) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
- RETURN(0);
+ return 0;
}
/* If this is reply-less callback lock, we cannot replay it, since
@@ -2202,7 +2169,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
- RETURN(0);
+ return 0;
}
/*
@@ -2231,7 +2198,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
req = ptlrpc_request_alloc_pack(imp, &RQF_LDLM_ENQUEUE,
LUSTRE_DLM_VERSION, LDLM_ENQUEUE);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* We're part of recovery, so don't wait for it. */
req->rq_send_state = LUSTRE_IMP_REPLAY_LOCKS;
@@ -2261,7 +2228,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
req->rq_interpret_reply = (ptlrpc_interpterer_t)replay_lock_interpret;
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ return 0;
}
/**
@@ -2300,13 +2267,11 @@ int ldlm_replay_locks(struct obd_import *imp)
struct ldlm_lock *lock, *next;
int rc = 0;
- ENTRY;
-
LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
/* don't replay locks if import failed recovery */
if (imp->imp_vbr_failed)
- RETURN(0);
+ return 0;
/* ensure this doesn't fall to 0 before all have been queued */
atomic_inc(&imp->imp_replay_inflight);
@@ -2328,6 +2293,6 @@ int ldlm_replay_locks(struct obd_import *imp)
atomic_dec(&imp->imp_replay_inflight);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ldlm_replay_locks);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9052dc5e7ad..208751a154b 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -48,18 +48,23 @@
struct kmem_cache *ldlm_resource_slab, *ldlm_lock_slab;
-atomic_t ldlm_srv_namespace_nr = ATOMIC_INIT(0);
-atomic_t ldlm_cli_namespace_nr = ATOMIC_INIT(0);
+int ldlm_srv_namespace_nr = 0;
+int ldlm_cli_namespace_nr = 0;
struct mutex ldlm_srv_namespace_lock;
LIST_HEAD(ldlm_srv_namespace_list);
struct mutex ldlm_cli_namespace_lock;
-LIST_HEAD(ldlm_cli_namespace_list);
+/* Client Namespaces that have active resources in them.
+ * Once all resources go away, ldlm_poold moves such namespaces to the
+ * inactive list */
+LIST_HEAD(ldlm_cli_active_namespace_list);
+/* Client namespaces that don't have any locks in them */
+LIST_HEAD(ldlm_cli_inactive_namespace_list);
-proc_dir_entry_t *ldlm_type_proc_dir = NULL;
-proc_dir_entry_t *ldlm_ns_proc_dir = NULL;
-proc_dir_entry_t *ldlm_svc_proc_dir = NULL;
+struct proc_dir_entry *ldlm_type_proc_dir = NULL;
+struct proc_dir_entry *ldlm_ns_proc_dir = NULL;
+struct proc_dir_entry *ldlm_svc_proc_dir = NULL;
extern unsigned int ldlm_cancel_unused_locks_before_replay;
@@ -73,7 +78,7 @@ static ssize_t lprocfs_wr_dump_ns(struct file *file, const char *buffer,
{
ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
- RETURN(count);
+ return count;
}
LPROC_SEQ_FOPS_WR_ONLY(ldlm, dump_ns);
@@ -90,7 +95,6 @@ int ldlm_proc_setup(void)
{ "cancel_unused_locks_before_replay", &ldlm_rw_uint_fops,
&ldlm_cancel_unused_locks_before_replay },
{ NULL }};
- ENTRY;
LASSERT(ldlm_ns_proc_dir == NULL);
ldlm_type_proc_dir = lprocfs_register(OBD_LDLM_DEVICENAME,
@@ -122,7 +126,7 @@ int ldlm_proc_setup(void)
rc = lprocfs_add_vars(ldlm_type_proc_dir, list, NULL);
- RETURN(0);
+ return 0;
err_ns:
lprocfs_remove(&ldlm_ns_proc_dir);
@@ -132,7 +136,7 @@ err:
ldlm_svc_proc_dir = NULL;
ldlm_type_proc_dir = NULL;
ldlm_ns_proc_dir = NULL;
- RETURN(rc);
+ return rc;
}
void ldlm_proc_cleanup(void)
@@ -325,7 +329,7 @@ int ldlm_namespace_proc_register(struct ldlm_namespace *ns)
{
struct lprocfs_vars lock_vars[2];
char lock_name[MAX_STRING_SIZE + 1];
- proc_dir_entry_t *ns_pde;
+ struct proc_dir_entry *ns_pde;
LASSERT(ns != NULL);
LASSERT(ns->ns_rs_hash != NULL);
@@ -563,14 +567,13 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
cfs_hash_bd_t bd;
int idx;
int rc;
- ENTRY;
LASSERT(obd != NULL);
rc = ldlm_get_ref();
if (rc) {
CERROR("ldlm_get_ref failed: %d\n", rc);
- RETURN(NULL);
+ return NULL;
}
for (idx = 0;;idx++) {
@@ -636,7 +639,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
GOTO(out_hash, rc);
}
- idx = atomic_read(ldlm_namespace_nr(client));
+ idx = ldlm_namespace_nr_read(client);
rc = ldlm_pool_init(&ns->ns_pool, ns, idx, client);
if (rc) {
CERROR("Can't initialize lock pool, rc %d\n", rc);
@@ -644,7 +647,7 @@ struct ldlm_namespace *ldlm_namespace_new(struct obd_device *obd, char *name,
}
ldlm_namespace_register(ns, client);
- RETURN(ns);
+ return ns;
out_proc:
ldlm_namespace_proc_unregister(ns);
ldlm_namespace_cleanup(ns, 0);
@@ -654,7 +657,7 @@ out_ns:
OBD_FREE_PTR(ns);
out_ref:
ldlm_put_ref();
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(ldlm_namespace_new);
@@ -803,8 +806,6 @@ EXPORT_SYMBOL(ldlm_namespace_cleanup);
*/
static int __ldlm_namespace_free(struct ldlm_namespace *ns, int force)
{
- ENTRY;
-
/* At shutdown time, don't call the cancellation callback */
ldlm_namespace_cleanup(ns, force ? LDLM_FL_LOCAL_ONLY : 0);
@@ -836,13 +837,13 @@ force_wait:
"with %d resources in use, (rc=%d)\n",
ldlm_ns_name(ns),
atomic_read(&ns->ns_bref), rc);
- RETURN(ELDLM_NAMESPACE_EXISTS);
+ return ELDLM_NAMESPACE_EXISTS;
}
CDEBUG(D_DLMTRACE, "dlm namespace %s free done waiting\n",
ldlm_ns_name(ns));
}
- RETURN(ELDLM_OK);
+ return ELDLM_OK;
}
/**
@@ -859,9 +860,8 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
int force)
{
int rc;
- ENTRY;
+
if (!ns) {
- EXIT;
return;
}
@@ -886,7 +886,6 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
rc = __ldlm_namespace_free(ns, 1);
LASSERT(rc == 0);
}
- EXIT;
}
/**
@@ -896,9 +895,7 @@ void ldlm_namespace_free_prior(struct ldlm_namespace *ns,
*/
void ldlm_namespace_free_post(struct ldlm_namespace *ns)
{
- ENTRY;
if (!ns) {
- EXIT;
return;
}
@@ -917,7 +914,6 @@ void ldlm_namespace_free_post(struct ldlm_namespace *ns)
LASSERT(list_empty(&ns->ns_list_chain));
OBD_FREE_PTR(ns);
ldlm_put_ref();
- EXIT;
}
/**
@@ -953,6 +949,12 @@ void ldlm_namespace_get(struct ldlm_namespace *ns)
}
EXPORT_SYMBOL(ldlm_namespace_get);
+/* This is only for callers that care about refcount */
+int ldlm_namespace_get_return(struct ldlm_namespace *ns)
+{
+ return atomic_inc_return(&ns->ns_bref);
+}
+
void ldlm_namespace_put(struct ldlm_namespace *ns)
{
if (atomic_dec_and_lock(&ns->ns_bref, &ns->ns_lock)) {
@@ -967,8 +969,8 @@ void ldlm_namespace_register(struct ldlm_namespace *ns, ldlm_side_t client)
{
mutex_lock(ldlm_namespace_lock(client));
LASSERT(list_empty(&ns->ns_list_chain));
- list_add(&ns->ns_list_chain, ldlm_namespace_list(client));
- atomic_inc(ldlm_namespace_nr(client));
+ list_add(&ns->ns_list_chain, ldlm_namespace_inactive_list(client));
+ ldlm_namespace_nr_inc(client);
mutex_unlock(ldlm_namespace_lock(client));
}
@@ -981,12 +983,13 @@ void ldlm_namespace_unregister(struct ldlm_namespace *ns, ldlm_side_t client)
* using list_empty(&ns->ns_list_chain). This is why it is
* important to use list_del_init() here. */
list_del_init(&ns->ns_list_chain);
- atomic_dec(ldlm_namespace_nr(client));
+ ldlm_namespace_nr_dec(client);
mutex_unlock(ldlm_namespace_lock(client));
}
/** Should be called with ldlm_namespace_lock(client) taken. */
-void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
+void ldlm_namespace_move_to_active_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
{
LASSERT(!list_empty(&ns->ns_list_chain));
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -994,6 +997,16 @@ void ldlm_namespace_move_locked(struct ldlm_namespace *ns, ldlm_side_t client)
}
/** Should be called with ldlm_namespace_lock(client) taken. */
+void ldlm_namespace_move_to_inactive_locked(struct ldlm_namespace *ns,
+ ldlm_side_t client)
+{
+ LASSERT(!list_empty(&ns->ns_list_chain));
+ LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
+ list_move_tail(&ns->ns_list_chain,
+ ldlm_namespace_inactive_list(client));
+}
+
+/** Should be called with ldlm_namespace_lock(client) taken. */
struct ldlm_namespace *ldlm_namespace_first_locked(ldlm_side_t client)
{
LASSERT(mutex_is_locked(ldlm_namespace_lock(client)));
@@ -1049,6 +1062,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
struct ldlm_resource *res;
cfs_hash_bd_t bd;
__u64 version;
+ int ns_refcount = 0;
LASSERT(ns != NULL);
LASSERT(parent == NULL);
@@ -1119,7 +1133,7 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
/* We won! Let's add the resource. */
cfs_hash_bd_add_locked(ns->ns_rs_hash, &bd, &res->lr_hash);
if (cfs_hash_bd_count_get(&bd) == 1)
- ldlm_namespace_get(ns);
+ ns_refcount = ldlm_namespace_get_return(ns);
cfs_hash_bd_unlock(ns->ns_rs_hash, &bd, 1);
if (ns->ns_lvbo && ns->ns_lvbo->lvbo_init) {
@@ -1128,8 +1142,9 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CREATE_RESOURCE, 2);
rc = ns->ns_lvbo->lvbo_init(res);
if (rc < 0) {
- CERROR("lvbo_init failed for resource "
- LPU64": rc %d\n", name->name[0], rc);
+ CERROR("%s: lvbo_init failed for resource "LPX64":"
+ LPX64": rc = %d\n", ns->ns_obd->obd_name,
+ name->name[0], name->name[1], rc);
if (res->lr_lvb_data) {
OBD_FREE(res->lr_lvb_data, res->lr_lvb_len);
res->lr_lvb_data = NULL;
@@ -1144,6 +1159,16 @@ ldlm_resource_get(struct ldlm_namespace *ns, struct ldlm_resource *parent,
/* We create resource with locked lr_lvb_mutex. */
mutex_unlock(&res->lr_lvb_mutex);
+ /* Let's see if we happened to be the very first resource in this
+ * namespace. If so, and this is a client namespace, we need to move
+ * the namespace into the active namespaces list to be patrolled by
+ * the ldlm_poold. */
+ if (ns_is_client(ns) && ns_refcount == 1) {
+ mutex_lock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ ldlm_namespace_move_to_active_locked(ns, LDLM_NAMESPACE_CLIENT);
+ mutex_unlock(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
+ }
+
return res;
}
EXPORT_SYMBOL(ldlm_resource_get);
@@ -1249,7 +1274,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_destroyed) {
+ if (lock->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1274,7 +1299,7 @@ void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p:\n", original);
- if (new->l_destroyed) {
+ if (new->l_flags & LDLM_FL_DESTROYED) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
goto out;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/Makefile b/drivers/staging/lustre/lustre/libcfs/Makefile
index bf5c563dcac..6e489d7aaa8 100644
--- a/drivers/staging/lustre/lustre/libcfs/Makefile
+++ b/drivers/staging/lustre/lustre/libcfs/Makefile
@@ -11,7 +11,7 @@ libcfs-linux-objs += linux-crypto-adler.o
libcfs-linux-objs := $(addprefix linux/,$(libcfs-linux-objs))
libcfs-all-objs := debug.o fail.o nidstrings.o module.o tracefile.o \
- watchdog.o libcfs_string.o hash.o kernel_user_comm.o \
+ libcfs_string.o hash.o kernel_user_comm.o \
prng.o workitem.o upcall_cache.o libcfs_cpu.o \
libcfs_mem.o libcfs_lock.o
diff --git a/drivers/staging/lustre/lustre/libcfs/debug.c b/drivers/staging/lustre/lustre/libcfs/debug.c
index 5a87b083207..9b9c45116ee 100644
--- a/drivers/staging/lustre/lustre/libcfs/debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/debug.c
@@ -335,9 +335,10 @@ libcfs_debug_str2mask(int *mask, const char *str, int is_subsys)
*/
void libcfs_debug_dumplog_internal(void *arg)
{
- DECL_JOURNAL_DATA;
+ void *journal_info;
- PUSH_JOURNAL;
+ journal_info = current->journal_info;
+ current->journal_info = NULL;
if (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0) {
snprintf(debug_file_name, sizeof(debug_file_name) - 1,
@@ -348,7 +349,8 @@ void libcfs_debug_dumplog_internal(void *arg)
cfs_tracefile_dump_all_pages(debug_file_name);
libcfs_run_debug_log_upcall(debug_file_name);
}
- POP_JOURNAL;
+
+ current->journal_info = journal_info;
}
int libcfs_debug_dumplog_thread(void *arg)
@@ -361,8 +363,7 @@ int libcfs_debug_dumplog_thread(void *arg)
void libcfs_debug_dumplog(void)
{
wait_queue_t wait;
- task_t *dumper;
- ENTRY;
+ struct task_struct *dumper;
/* we're being careful to ensure that the kernel thread is
* able to set our state to running as it exits before we
@@ -459,14 +460,6 @@ void libcfs_debug_set_level(unsigned int debug_level)
EXPORT_SYMBOL(libcfs_debug_set_level);
-long libcfs_log_return(struct libcfs_debug_msg_data *msgdata, long rc)
-{
- libcfs_debug_msg(msgdata, "Process leaving (rc=%lu : %ld : %lx)\n",
- rc, rc, rc);
- return rc;
-}
-EXPORT_SYMBOL(libcfs_log_return);
-
void libcfs_log_goto(struct libcfs_debug_msg_data *msgdata, const char *label,
long_ptr_t rc)
{
diff --git a/drivers/staging/lustre/lustre/libcfs/hash.c b/drivers/staging/lustre/lustre/libcfs/hash.c
index 98c76dfac3d..0dd12c8c91b 100644
--- a/drivers/staging/lustre/lustre/libcfs/hash.c
+++ b/drivers/staging/lustre/lustre/libcfs/hash.c
@@ -1026,8 +1026,6 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
cfs_hash_t *hs;
int len;
- ENTRY;
-
CLASSERT(CFS_HASH_THETA_BITS < 15);
LASSERT(name != NULL);
@@ -1055,7 +1053,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
if (hs == NULL)
- RETURN(NULL);
+ return NULL;
strncpy(hs->hs_name, name, len);
hs->hs_name[len - 1] = '\0';
@@ -1087,7 +1085,7 @@ cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
return hs;
LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(cfs_hash_create);
@@ -1101,7 +1099,6 @@ cfs_hash_destroy(cfs_hash_t *hs)
struct hlist_node *pos;
cfs_hash_bd_t bd;
int i;
- ENTRY;
LASSERT(hs != NULL);
LASSERT(!cfs_hash_is_exiting(hs) &&
@@ -1152,8 +1149,6 @@ cfs_hash_destroy(cfs_hash_t *hs)
i = cfs_hash_with_bigname(hs) ?
CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
-
- EXIT;
}
cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
@@ -1449,7 +1444,6 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
int excl = !!remove_safe;
int loop = 0;
int i;
- ENTRY;
cfs_hash_for_each_enter(hs);
@@ -1489,7 +1483,7 @@ cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
cfs_hash_unlock(hs, 0);
cfs_hash_for_each_exit(hs);
- RETURN(count);
+ return count;
}
typedef struct {
@@ -1594,7 +1588,6 @@ cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
int stop_on_change;
int rc;
int i;
- ENTRY;
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
@@ -1649,23 +1642,21 @@ int
cfs_hash_for_each_nolock(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
- ENTRY;
-
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (CFS_HOP(hs, get) == NULL ||
(CFS_HOP(hs, put) == NULL &&
CFS_HOP(hs, put_locked) == NULL))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
cfs_hash_for_each_relax(hs, func, data);
cfs_hash_for_each_exit(hs);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_nolock);
@@ -1685,7 +1676,6 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
cfs_hash_for_each_cb_t func, void *data)
{
unsigned i = 0;
- ENTRY;
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
@@ -1701,7 +1691,7 @@ cfs_hash_for_each_empty(cfs_hash_t *hs,
hs->hs_name, i++);
}
cfs_hash_for_each_exit(hs);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_empty);
diff --git a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
index d6d3b2e0f30..74a0db5c154 100644
--- a/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
+++ b/drivers/staging/lustre/lustre/libcfs/kernel_user_comm.c
@@ -246,10 +246,9 @@ EXPORT_SYMBOL(libcfs_kkuc_group_add);
int libcfs_kkuc_group_rem(int uid, int group)
{
struct kkuc_reg *reg, *next;
- ENTRY;
if (kkuc_groups[group].next == NULL)
- RETURN(0);
+ return 0;
if (uid == 0) {
/* Broadcast a shutdown message */
@@ -275,7 +274,7 @@ int libcfs_kkuc_group_rem(int uid, int group)
}
up_write(&kg_sem);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(libcfs_kkuc_group_rem);
@@ -284,7 +283,6 @@ int libcfs_kkuc_group_put(int group, void *payload)
struct kkuc_reg *reg;
int rc = 0;
int one_success = 0;
- ENTRY;
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
@@ -305,7 +303,7 @@ int libcfs_kkuc_group_put(int group, void *payload)
if (one_success)
rc = 0;
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_put);
@@ -320,16 +318,15 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
{
struct kkuc_reg *reg;
int rc = 0;
- ENTRY;
if (group > KUC_GRP_MAX) {
CDEBUG(D_WARNING, "Kernelcomm: bad group %d\n", group);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* no link for this group */
if (kkuc_groups[group].next == NULL)
- RETURN(0);
+ return 0;
down_read(&kg_sem);
list_for_each_entry(reg, &kkuc_groups[group], kr_chain) {
@@ -339,7 +336,7 @@ int libcfs_kkuc_group_foreach(int group, libcfs_kkuc_cb_t cb_func,
}
up_read(&kg_sem);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(libcfs_kkuc_group_foreach);
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
index 8e88eb59dd5..1fb37008cda 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_cpu.c
@@ -33,9 +33,6 @@
* Author: liang@whamcloud.com
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/libcfs/libcfs.h>
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
index 8d6c4adf2ee..a2ce4c0eb3d 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_lock.c
@@ -30,9 +30,6 @@
* Author: liang@whamcloud.com
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/libcfs/libcfs.h>
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
index 87913730348..feab537c728 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_mem.c
@@ -31,9 +31,6 @@
* Author: liang@whamcloud.com
*/
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/libcfs/libcfs.h>
diff --git a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
index 9edccc99683..922debd0a41 100644
--- a/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
+++ b/drivers/staging/lustre/lustre/libcfs/libcfs_string.c
@@ -69,7 +69,6 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
const char *debugstr;
char op = 0;
int newmask = minmask, i, len, found = 0;
- ENTRY;
/* <str> must be a list of tokens separated by whitespace
* and optionally an operator ('+' or '-'). If an operator
@@ -132,54 +131,6 @@ int cfs_str2mask(const char *str, const char *(*bit2str)(int bit),
}
EXPORT_SYMBOL(cfs_str2mask);
-/* Duplicate a string in a platform-independent way */
-char *cfs_strdup(const char *str, u_int32_t flags)
-{
- size_t lenz; /* length of str + zero byte */
- char *dup_str;
-
- lenz = strlen(str) + 1;
-
- dup_str = kmalloc(lenz, flags);
- if (dup_str == NULL)
- return NULL;
-
- memcpy(dup_str, str, lenz);
-
- return dup_str;
-}
-EXPORT_SYMBOL(cfs_strdup);
-
-/**
- * cfs_{v}snprintf() return the actual size that is printed rather than
- * the size that would be printed in standard functions.
- */
-/* safe vsnprintf */
-int cfs_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
-{
- int i;
-
- LASSERT(size > 0);
- i = vsnprintf(buf, size, fmt, args);
-
- return (i >= size ? size - 1 : i);
-}
-EXPORT_SYMBOL(cfs_vsnprintf);
-
-/* safe snprintf */
-int cfs_snprintf(char *buf, size_t size, const char *fmt, ...)
-{
- va_list args;
- int i;
-
- va_start(args, fmt);
- i = cfs_vsnprintf(buf, size, fmt, args);
- va_end(args);
-
- return i;
-}
-EXPORT_SYMBOL(cfs_snprintf);
-
/* get the first string out of @str */
char *cfs_firststr(char *str, size_t size)
{
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
index 95142d19097..00ab8fdc105 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
@@ -76,62 +76,22 @@ struct cfs_cpt_data {
static struct cfs_cpt_data cpt_data;
-void
-cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
+static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
{
/* return cpumask of cores in the same socket */
cpumask_copy(mask, topology_core_cpumask(cpu));
}
-EXPORT_SYMBOL(cfs_cpu_core_siblings);
-
-/* return number of cores in the same socket of \a cpu */
-int
-cfs_cpu_core_nsiblings(int cpu)
-{
- int num;
-
- down(&cpt_data.cpt_mutex);
-
- cfs_cpu_core_siblings(cpu, cpt_data.cpt_cpumask);
- num = cpus_weight(*cpt_data.cpt_cpumask);
-
- up(&cpt_data.cpt_mutex);
-
- return num;
-}
-EXPORT_SYMBOL(cfs_cpu_core_nsiblings);
/* return cpumask of HTs in the same core */
-void
-cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
+static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
{
cpumask_copy(mask, topology_thread_cpumask(cpu));
}
-EXPORT_SYMBOL(cfs_cpu_ht_siblings);
-/* return number of HTs in the same core of \a cpu */
-int
-cfs_cpu_ht_nsiblings(int cpu)
-{
- int num;
-
- down(&cpt_data.cpt_mutex);
-
- cfs_cpu_ht_siblings(cpu, cpt_data.cpt_cpumask);
- num = cpus_weight(*cpt_data.cpt_cpumask);
-
- up(&cpt_data.cpt_mutex);
-
- return num;
-}
-EXPORT_SYMBOL(cfs_cpu_ht_nsiblings);
-
-void
-cfs_node_to_cpumask(int node, cpumask_t *mask)
+static void cfs_node_to_cpumask(int node, cpumask_t *mask)
{
cpumask_copy(mask, cpumask_of_node(node));
}
-EXPORT_SYMBOL(cfs_node_to_cpumask);
void
cfs_cpt_table_free(struct cfs_cpt_table *cptab)
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
index 8e35777b4da..b6c79bc177a 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-crypto.c
@@ -274,6 +274,8 @@ static int adler32;
int cfs_crypto_register(void)
{
+ request_module("crc32c");
+
adler32 = cfs_crypto_adler32_register();
/* check all algorithms and do performance test */
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
index f236510a2f3..ea9e9490031 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-curproc.c
@@ -65,20 +65,6 @@ int cfs_curproc_groups_nr(void)
return nr;
}
-void cfs_curproc_groups_dump(gid_t *array, int size)
-{
- task_lock(current);
- size = min_t(int, size, current_cred()->group_info->ngroups);
- memcpy(array, current_cred()->group_info->blocks[0], size * sizeof(__u32));
- task_unlock(current);
-}
-
-
-int current_is_in_group(gid_t gid)
-{
- return in_group_p(gid);
-}
-
/* Currently all the CFS_CAP_* defines match CAP_* ones. */
#define cfs_cap_pack(cap) (cap)
#define cfs_cap_unpack(cap) (cap)
@@ -226,16 +212,15 @@ int cfs_get_environ(const char *key, char *value, int *val_len)
int key_len = strlen(key);
unsigned long addr;
int rc;
- ENTRY;
buffer = kmalloc(buf_len, GFP_USER);
if (!buffer)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mm = get_task_mm(current);
if (!mm) {
kfree(buffer);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* Avoid deadlocks on mmap_sem if called from sys_mmap_pgoff(),
@@ -318,8 +303,6 @@ out:
EXPORT_SYMBOL(cfs_get_environ);
EXPORT_SYMBOL(cfs_curproc_groups_nr);
-EXPORT_SYMBOL(cfs_curproc_groups_dump);
-EXPORT_SYMBOL(current_is_in_group);
EXPORT_SYMBOL(cfs_cap_raise);
EXPORT_SYMBOL(cfs_cap_lower);
EXPORT_SYMBOL(cfs_cap_raised);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
index e2c195b8dd5..ab1e7316847 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-debug.c
@@ -48,14 +48,10 @@
#include <linux/errno.h>
#include <linux/unistd.h>
#include <linux/interrupt.h>
-#include <asm/uaccess.h>
#include <linux/completion.h>
-
#include <linux/fs.h>
-#include <linux/stat.h>
#include <asm/uaccess.h>
#include <linux/miscdevice.h>
-#include <linux/version.h>
# define DEBUG_SUBSYSTEM S_LNET
@@ -82,7 +78,6 @@ void libcfs_run_debug_log_upcall(char *file)
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL};
- ENTRY;
argv[0] = lnet_debug_log_upcall;
@@ -100,8 +95,6 @@ void libcfs_run_debug_log_upcall(char *file)
CDEBUG(D_HA, "Invoked LNET debug log upcall %s %s\n",
argv[0], argv[1]);
}
-
- EXIT;
}
void libcfs_run_upcall(char **argv)
@@ -112,7 +105,6 @@ void libcfs_run_upcall(char **argv)
"HOME=/",
"PATH=/sbin:/bin:/usr/sbin:/usr/bin",
NULL};
- ENTRY;
argv[0] = lnet_upcall;
argc = 1;
@@ -145,7 +137,6 @@ void libcfs_run_lbug_upcall(struct libcfs_debug_msg_data *msgdata)
char *argv[6];
char buf[32];
- ENTRY;
snprintf (buf, sizeof buf, "%d", msgdata->msg_line);
argv[1] = "LBUG";
@@ -168,7 +159,7 @@ void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
/* not reached */
}
- libcfs_debug_dumpstack(NULL);
+ dump_stack();
if (!libcfs_panic_on_lbug)
libcfs_debug_dumplog();
libcfs_run_lbug_upcall(msgdata);
@@ -179,54 +170,6 @@ void lbug_with_loc(struct libcfs_debug_msg_data *msgdata)
schedule();
}
-
-#include <linux/nmi.h>
-#include <asm/stacktrace.h>
-
-
-static int print_trace_stack(void *data, char *name)
-{
- printk(" <%s> ", name);
- return 0;
-}
-
-# define RELIABLE reliable
-# define DUMP_TRACE_CONST const
-static void print_trace_address(void *data, unsigned long addr, int reliable)
-{
- char fmt[32];
- touch_nmi_watchdog();
- sprintf(fmt, " [<%016lx>] %s%%s\n", addr, RELIABLE ? "": "? ");
- __print_symbol(fmt, addr);
-}
-
-static DUMP_TRACE_CONST struct stacktrace_ops print_trace_ops = {
- .stack = print_trace_stack,
- .address = print_trace_address,
- .walk_stack = print_context_stack,
-};
-
-void libcfs_debug_dumpstack(struct task_struct *tsk)
-{
- /* dump_stack() */
- /* show_trace() */
- if (tsk == NULL)
- tsk = current;
- printk("Pid: %d, comm: %.20s\n", tsk->pid, tsk->comm);
- /* show_trace_log_lvl() */
- printk("\nCall Trace:\n");
- dump_trace(tsk, NULL, NULL,
- 0,
- &print_trace_ops, NULL);
- printk("\n");
-}
-
-task_t *libcfs_current(void)
-{
- CWARN("current task struct is %p\n", current);
- return current;
-}
-
static int panic_notifier(struct notifier_block *self, unsigned long unused1,
void *unused2)
{
@@ -240,9 +183,9 @@ static int panic_notifier(struct notifier_block *self, unsigned long unused1,
}
static struct notifier_block libcfs_panic_notifier = {
- notifier_call : panic_notifier,
- next : NULL,
- priority : 10000
+ .notifier_call = panic_notifier,
+ .next = NULL,
+ .priority = 10000,
};
void libcfs_register_panic_notifier(void)
@@ -255,10 +198,6 @@ void libcfs_unregister_panic_notifier(void)
atomic_notifier_chain_unregister(&panic_notifier_list, &libcfs_panic_notifier);
}
-EXPORT_SYMBOL(libcfs_debug_dumpstack);
-EXPORT_SYMBOL(libcfs_current);
-
-
EXPORT_SYMBOL(libcfs_run_upcall);
EXPORT_SYMBOL(libcfs_run_lbug_upcall);
EXPORT_SYMBOL(lbug_with_loc);
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
index 2c7d4a3d660..55296a3591d 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-module.c
@@ -45,38 +45,37 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
struct libcfs_ioctl_hdr *hdr;
struct libcfs_ioctl_data *data;
int err;
- ENTRY;
hdr = (struct libcfs_ioctl_hdr *)buf;
data = (struct libcfs_ioctl_data *)buf;
err = copy_from_user(buf, (void *)arg, sizeof(*hdr));
if (err)
- RETURN(err);
+ return err;
if (hdr->ioc_version != LIBCFS_IOCTL_VERSION) {
CERROR("PORTALS: version mismatch kernel vs application\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (hdr->ioc_len + buf >= end) {
CERROR("PORTALS: user buffer exceeds kernel buffer\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (hdr->ioc_len < sizeof(struct libcfs_ioctl_data)) {
CERROR("PORTALS: user buffer too small for ioctl\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
err = copy_from_user(buf, (void *)arg, hdr->ioc_len);
if (err)
- RETURN(err);
+ return err;
if (libcfs_ioctl_is_invalid(data)) {
CERROR("PORTALS: ioctl not correctly formatted\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (data->ioc_inllen1)
@@ -86,7 +85,7 @@ int libcfs_ioctl_getdata(char *buf, char *end, void *arg)
data->ioc_inlbuf2 = &data->ioc_bulk[0] +
cfs_size_round(data->ioc_inllen1);
- RETURN(0);
+ return 0;
}
int libcfs_ioctl_popdata(void *arg, void *data, int size)
@@ -137,7 +136,7 @@ static long libcfs_ioctl(struct file *file,
struct cfs_psdev_file pfile;
int rc = 0;
- if (current_fsuid() != 0)
+ if (!capable(CAP_SYS_ADMIN))
return -EACCES;
if ( _IOC_TYPE(cmd) != IOC_LIBCFS_TYPE ||
@@ -171,13 +170,13 @@ static long libcfs_ioctl(struct file *file,
}
static struct file_operations libcfs_fops = {
- unlocked_ioctl: libcfs_ioctl,
- open : libcfs_psdev_open,
- release : libcfs_psdev_release
+ .unlocked_ioctl = libcfs_ioctl,
+ .open = libcfs_psdev_open,
+ .release = libcfs_psdev_release,
};
-psdev_t libcfs_dev = {
- LNET_MINOR,
- "lnet",
- &libcfs_fops
+struct miscdevice libcfs_dev = {
+ .minor = LNET_MINOR,
+ .name = "lnet",
+ .fops = &libcfs_fops,
};
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
index b652a79a481..cc9829ffbdc 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-prim.c
@@ -81,22 +81,21 @@ add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
EXPORT_SYMBOL(add_wait_queue_exclusive_head);
void
-waitq_wait(wait_queue_t *link, cfs_task_state_t state)
+waitq_wait(wait_queue_t *link, long state)
{
schedule();
}
EXPORT_SYMBOL(waitq_wait);
int64_t
-waitq_timedwait(wait_queue_t *link, cfs_task_state_t state,
- int64_t timeout)
+waitq_timedwait(wait_queue_t *link, long state, int64_t timeout)
{
return schedule_timeout(timeout);
}
EXPORT_SYMBOL(waitq_timedwait);
void
-schedule_timeout_and_set_state(cfs_task_state_t state, int64_t timeout)
+schedule_timeout_and_set_state(long state, int64_t timeout)
{
set_current_state(state);
schedule_timeout(timeout);
@@ -112,13 +111,13 @@ cfs_pause(cfs_duration_t ticks)
}
EXPORT_SYMBOL(cfs_pause);
-void cfs_init_timer(timer_list_t *t)
+void cfs_init_timer(struct timer_list *t)
{
init_timer(t);
}
EXPORT_SYMBOL(cfs_init_timer);
-void cfs_timer_init(timer_list_t *t, cfs_timer_func_t *func, void *arg)
+void cfs_timer_init(struct timer_list *t, cfs_timer_func_t *func, void *arg)
{
init_timer(t);
t->function = func;
@@ -126,31 +125,31 @@ void cfs_timer_init(timer_list_t *t, cfs_timer_func_t *func, void *arg)
}
EXPORT_SYMBOL(cfs_timer_init);
-void cfs_timer_done(timer_list_t *t)
+void cfs_timer_done(struct timer_list *t)
{
return;
}
EXPORT_SYMBOL(cfs_timer_done);
-void cfs_timer_arm(timer_list_t *t, cfs_time_t deadline)
+void cfs_timer_arm(struct timer_list *t, cfs_time_t deadline)
{
mod_timer(t, deadline);
}
EXPORT_SYMBOL(cfs_timer_arm);
-void cfs_timer_disarm(timer_list_t *t)
+void cfs_timer_disarm(struct timer_list *t)
{
del_timer(t);
}
EXPORT_SYMBOL(cfs_timer_disarm);
-int cfs_timer_is_armed(timer_list_t *t)
+int cfs_timer_is_armed(struct timer_list *t)
{
return timer_pending(t);
}
EXPORT_SYMBOL(cfs_timer_is_armed);
-cfs_time_t cfs_timer_deadline(timer_list_t *t)
+cfs_time_t cfs_timer_deadline(struct timer_list *t)
{
return t->expires;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
index 522b28e99e4..fc6c9774948 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
@@ -54,9 +54,7 @@
#include <linux/fs.h>
#include <linux/file.h>
-#include <linux/stat.h>
#include <linux/list.h>
-#include <asm/uaccess.h>
#include <linux/proc_fs.h>
#include <linux/sysctl.h>
@@ -564,7 +562,7 @@ int insert_proc(void)
{
#ifdef CONFIG_SYSCTL
if (lnet_table_header == NULL)
- lnet_table_header = cfs_register_sysctl_table(top_table, 0);
+ lnet_table_header = register_sysctl_table(top_table);
#endif
return 0;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
index 855c7e87d96..e6069d78af6 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tcpip.c
@@ -36,7 +36,6 @@
#define DEBUG_SUBSYSTEM S_LNET
#include <linux/libcfs/libcfs.h>
-#include <linux/libcfs/libcfs.h>
#include <linux/if.h>
#include <linux/in.h>
@@ -641,8 +640,8 @@ libcfs_sock_connect (struct socket **sockp, int *fatal,
*fatal = !(rc == -EADDRNOTAVAIL);
CDEBUG_LIMIT(*fatal ? D_NETERROR : D_NET,
- "Error %d connecting %u.%u.%u.%u/%d -> %u.%u.%u.%u/%d\n", rc,
- HIPQUAD(local_ip), local_port, HIPQUAD(peer_ip), peer_port);
+ "Error %d connecting %pI4h/%d -> %pI4h/%d\n", rc,
+ &local_ip, local_port, &peer_ip, peer_port);
sock_release(*sockp);
return rc;
diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
index 6f563436a25..162beee24a7 100644
--- a/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-tracefile.c
@@ -51,7 +51,7 @@ char *cfs_trace_console_buffers[NR_CPUS][CFS_TCD_TYPE_MAX];
struct rw_semaphore cfs_tracefile_sem;
-int cfs_tracefile_init_arch()
+int cfs_tracefile_init_arch(void)
{
int i;
int j;
@@ -96,7 +96,7 @@ out:
return -ENOMEM;
}
-void cfs_tracefile_fini_arch()
+void cfs_tracefile_fini_arch(void)
{
int i;
int j;
@@ -116,27 +116,27 @@ void cfs_tracefile_fini_arch()
fini_rwsem(&cfs_tracefile_sem);
}
-void cfs_tracefile_read_lock()
+void cfs_tracefile_read_lock(void)
{
down_read(&cfs_tracefile_sem);
}
-void cfs_tracefile_read_unlock()
+void cfs_tracefile_read_unlock(void)
{
up_read(&cfs_tracefile_sem);
}
-void cfs_tracefile_write_lock()
+void cfs_tracefile_write_lock(void)
{
down_write(&cfs_tracefile_sem);
}
-void cfs_tracefile_write_unlock()
+void cfs_tracefile_write_unlock(void)
{
up_write(&cfs_tracefile_sem);
}
-cfs_trace_buf_type_t cfs_trace_buf_idx_get()
+cfs_trace_buf_type_t cfs_trace_buf_idx_get(void)
{
if (in_irq())
return CFS_TCD_TYPE_IRQ;
@@ -269,7 +269,7 @@ void cfs_print_to_console(struct ptldebug_header *hdr, int mask,
int cfs_trace_max_debug_mb(void)
{
- int total_mb = (num_physpages >> (20 - PAGE_SHIFT));
+ int total_mb = (totalram_pages >> (20 - PAGE_SHIFT));
return MAX(512, (total_mb * 80)/100);
}
diff --git a/drivers/staging/lustre/lustre/libcfs/module.c b/drivers/staging/lustre/lustre/libcfs/module.c
index 3372537c6f3..f3108c7f818 100644
--- a/drivers/staging/lustre/lustre/libcfs/module.c
+++ b/drivers/staging/lustre/lustre/libcfs/module.c
@@ -155,7 +155,6 @@ kportal_memhog_alloc (struct libcfs_device_userstate *ldu, int npages, int flags
static int libcfs_psdev_open(unsigned long flags, void *args)
{
struct libcfs_device_userstate *ldu;
- ENTRY;
try_module_get(THIS_MODULE);
@@ -166,14 +165,13 @@ static int libcfs_psdev_open(unsigned long flags, void *args)
}
*(struct libcfs_device_userstate **)args = ldu;
- RETURN(0);
+ return 0;
}
/* called when closing /dev/device */
static int libcfs_psdev_release(unsigned long flags, void *args)
{
struct libcfs_device_userstate *ldu;
- ENTRY;
ldu = (struct libcfs_device_userstate *)args;
if (ldu != NULL) {
@@ -182,7 +180,7 @@ static int libcfs_psdev_release(unsigned long flags, void *args)
}
module_put(THIS_MODULE);
- RETURN(0);
+ return 0;
}
static struct rw_semaphore ioctl_list_sem;
@@ -222,12 +220,11 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
void *arg, struct libcfs_ioctl_data *data)
{
int err = -EINVAL;
- ENTRY;
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
- RETURN(0);
+ return 0;
/*
* case IOC_LIBCFS_PANIC:
* Handled in arch/cfs_module.c
@@ -235,9 +232,9 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
case IOC_LIBCFS_MARK_DEBUG:
if (data->ioc_inlbuf1 == NULL ||
data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- RETURN(0);
+ return 0;
#if LWT_SUPPORT
case IOC_LIBCFS_LWT_CONTROL:
err = lwt_control ((data->ioc_flags & 1) != 0,
@@ -301,7 +298,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
ping(data);
symbol_put(kping_client);
}
- RETURN(0);
+ return 0;
}
default: {
@@ -322,7 +319,7 @@ static int libcfs_ioctl_int(struct cfs_psdev_file *pfile,unsigned long cmd,
}
}
- RETURN(err);
+ return err;
}
static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *arg)
@@ -330,11 +327,10 @@ static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *a
char *buf;
struct libcfs_ioctl_data *data;
int err = 0;
- ENTRY;
LIBCFS_ALLOC_GFP(buf, 1024, GFP_IOFS);
if (buf == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* 'cmd' and permissions get checked in our arch-specific caller */
if (libcfs_ioctl_getdata(buf, buf + 800, (void *)arg)) {
@@ -347,7 +343,7 @@ static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd, void *a
out:
LIBCFS_FREE(buf, 1024);
- RETURN(err);
+ return err;
}
@@ -365,7 +361,7 @@ MODULE_AUTHOR("Peter J. Braam <braam@clusterfs.com>");
MODULE_DESCRIPTION("Portals v3.1");
MODULE_LICENSE("GPL");
-extern psdev_t libcfs_dev;
+extern struct miscdevice libcfs_dev;
extern struct rw_semaphore cfs_tracefile_sem;
extern struct mutex cfs_trace_thread_mutex;
extern struct cfs_wi_sched *cfs_sched_rehash;
@@ -495,4 +491,6 @@ static void exit_libcfs_module(void)
libcfs_arch_cleanup();
}
-cfs_module(libcfs, "1.0.0", init_libcfs_module, exit_libcfs_module);
+MODULE_VERSION("1.0.0");
+module_init(init_libcfs_module);
+module_exit(exit_libcfs_module);
diff --git a/drivers/staging/lustre/lustre/libcfs/nidstrings.c b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
index ccfd1078a90..99c9e9d2493 100644
--- a/drivers/staging/lustre/lustre/libcfs/nidstrings.c
+++ b/drivers/staging/lustre/lustre/libcfs/nidstrings.c
@@ -785,7 +785,6 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
struct cfs_lstr src;
struct cfs_lstr res;
int rc;
- ENTRY;
src.ls_str = str;
src.ls_len = len;
@@ -794,15 +793,15 @@ cfs_parse_nidlist(char *str, int len, struct list_head *nidlist)
rc = cfs_gettok(&src, ' ', &res);
if (rc == 0) {
cfs_free_nidlist(nidlist);
- RETURN(0);
+ return 0;
}
rc = parse_nidrange(&res, nidlist);
if (rc == 0) {
cfs_free_nidlist(nidlist);
- RETURN(0);
+ return 0;
}
}
- RETURN(1);
+ return 1;
}
/*
@@ -834,7 +833,6 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
{
struct nidrange *nr;
struct addrrange *ar;
- ENTRY;
list_for_each_entry(nr, nidlist, nr_link) {
if (nr->nr_netstrfns->nf_type != LNET_NETTYP(LNET_NIDNET(nid)))
@@ -842,13 +840,13 @@ int cfs_match_nid(lnet_nid_t nid, struct list_head *nidlist)
if (nr->nr_netnum != LNET_NETNUM(LNET_NIDNET(nid)))
continue;
if (nr->nr_all)
- RETURN(1);
+ return 1;
list_for_each_entry(ar, &nr->nr_addrranges, ar_link)
if (nr->nr_netstrfns->nf_match_addr(LNET_NIDADDR(nid),
&ar->ar_numaddr_ranges))
- RETURN(1);
+ return 1;
}
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/libcfs/tracefile.c b/drivers/staging/lustre/lustre/libcfs/tracefile.c
index 439e71dfae3..357f40079ae 100644
--- a/drivers/staging/lustre/lustre/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lustre/libcfs/tracefile.c
@@ -275,12 +275,9 @@ int libcfs_debug_vmsg2(struct libcfs_debug_msg_data *msgdata,
int i;
int remain;
int mask = msgdata->msg_mask;
- char *file = (char *)msgdata->msg_file;
+ const char *file = kbasename(msgdata->msg_file);
cfs_debug_limit_state_t *cdls = msgdata->msg_cdls;
- if (strchr(file, '/'))
- file = strrchr(file, '/') + 1;
-
tcd = cfs_trace_get_tcd();
/* cfs_trace_get_tcd() grabs a lock, which disables preemption and
@@ -529,7 +526,7 @@ static void collect_pages_on_all_cpus(struct page_collection *pc)
int i, cpu;
spin_lock(&pc->pc_lock);
- cfs_for_each_possible_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
list_splice_init(&tcd->tcd_pages, &pc->pc_pages);
tcd->tcd_cur_pages = 0;
@@ -562,7 +559,7 @@ static void put_pages_back_on_all_cpus(struct page_collection *pc)
int i, cpu;
spin_lock(&pc->pc_lock);
- cfs_for_each_possible_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
cur_head = tcd->tcd_pages.next;
@@ -630,7 +627,7 @@ static void put_pages_on_daemon_list(struct page_collection *pc)
struct cfs_trace_cpu_data *tcd;
int i, cpu;
- cfs_for_each_possible_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu)
put_pages_on_tcd_daemon_list(pc, tcd);
}
@@ -1159,7 +1156,7 @@ static void trace_cleanup_on_all_cpus(void)
struct cfs_trace_page *tmp;
int i, cpu;
- cfs_for_each_possible_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
cfs_tcd_for_each_type_lock(tcd, i, cpu) {
tcd->tcd_shutting_down = 1;
diff --git a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
index 18c68c3493b..245b46f0dd9 100644
--- a/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
+++ b/drivers/staging/lustre/lustre/libcfs/upcall_cache.c
@@ -152,7 +152,6 @@ struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
struct list_head *head;
wait_queue_t wait;
int rc, found;
- ENTRY;
LASSERT(cache);
@@ -176,7 +175,7 @@ find_again:
new = alloc_entry(cache, key, args);
if (!new) {
CERROR("fail to alloc entry\n");
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
}
goto find_again;
} else {
@@ -266,17 +265,14 @@ find_again:
/* Now we know it's good */
out:
spin_unlock(&cache->uc_lock);
- RETURN(entry);
+ return entry;
}
EXPORT_SYMBOL(upcall_cache_get_entry);
void upcall_cache_put_entry(struct upcall_cache *cache,
struct upcall_cache_entry *entry)
{
- ENTRY;
-
if (!entry) {
- EXIT;
return;
}
@@ -284,7 +280,6 @@ void upcall_cache_put_entry(struct upcall_cache *cache,
spin_lock(&cache->uc_lock);
put_entry(cache, entry);
spin_unlock(&cache->uc_lock);
- EXIT;
}
EXPORT_SYMBOL(upcall_cache_put_entry);
@@ -294,7 +289,6 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
struct upcall_cache_entry *entry = NULL;
struct list_head *head;
int found = 0, rc = 0;
- ENTRY;
LASSERT(cache);
@@ -314,7 +308,7 @@ int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
cache->uc_name, key);
/* haven't found, it's possible */
spin_unlock(&cache->uc_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (err) {
@@ -356,7 +350,7 @@ out:
wake_up_all(&entry->ue_waitq);
put_entry(cache, entry);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(upcall_cache_downcall);
@@ -364,7 +358,6 @@ static void cache_flush(struct upcall_cache *cache, int force)
{
struct upcall_cache_entry *entry, *next;
int i;
- ENTRY;
spin_lock(&cache->uc_lock);
for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
@@ -379,7 +372,6 @@ static void cache_flush(struct upcall_cache *cache, int force)
}
}
spin_unlock(&cache->uc_lock);
- EXIT;
}
void upcall_cache_flush_idle(struct upcall_cache *cache)
@@ -399,7 +391,6 @@ void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
struct list_head *head;
struct upcall_cache_entry *entry;
int found = 0;
- ENTRY;
head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
@@ -431,11 +422,10 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
{
struct upcall_cache *cache;
int i;
- ENTRY;
LIBCFS_ALLOC(cache, sizeof(*cache));
if (!cache)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
spin_lock_init(&cache->uc_lock);
rwlock_init(&cache->uc_upcall_rwlock);
@@ -448,7 +438,7 @@ struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
cache->uc_acquire_expire = 30;
cache->uc_ops = ops;
- RETURN(cache);
+ return cache;
}
EXPORT_SYMBOL(upcall_cache_init);
diff --git a/drivers/staging/lustre/lustre/libcfs/watchdog.c b/drivers/staging/lustre/lustre/libcfs/watchdog.c
deleted file mode 100644
index 7c385ada3e1..00000000000
--- a/drivers/staging/lustre/lustre/libcfs/watchdog.c
+++ /dev/null
@@ -1,516 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/libcfs/watchdog.c
- *
- * Author: Jacob Berkman <jacob@clusterfs.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LNET
-
-#include <linux/libcfs/libcfs.h>
-#include "tracefile.h"
-
-struct lc_watchdog {
- spinlock_t lcw_lock; /* check or change lcw_list */
- int lcw_refcount; /* must hold lcw_pending_timers_lock */
- timer_list_t lcw_timer; /* kernel timer */
- struct list_head lcw_list; /* chain on pending list */
- cfs_time_t lcw_last_touched; /* last touched stamp */
- task_t *lcw_task; /* owner task */
- void (*lcw_callback)(pid_t, void *);
- void *lcw_data;
-
- pid_t lcw_pid;
-
- enum {
- LC_WATCHDOG_DISABLED,
- LC_WATCHDOG_ENABLED,
- LC_WATCHDOG_EXPIRED
- } lcw_state;
-};
-
-#ifdef WITH_WATCHDOG
-/*
- * The dispatcher will complete lcw_start_completion when it starts,
- * and lcw_stop_completion when it exits.
- * Wake lcw_event_waitq to signal timer callback dispatches.
- */
-static struct completion lcw_start_completion;
-static struct completion lcw_stop_completion;
-static wait_queue_head_t lcw_event_waitq;
-
-/*
- * Set this and wake lcw_event_waitq to stop the dispatcher.
- */
-enum {
- LCW_FLAG_STOP = 0
-};
-static unsigned long lcw_flags = 0;
-
-/*
- * Number of outstanding watchdogs.
- * When it hits 1, we start the dispatcher.
- * When it hits 0, we stop the dispatcher.
- */
-static __u32 lcw_refcount = 0;
-static DEFINE_MUTEX(lcw_refcount_mutex);
-
-/*
- * List of timers that have fired that need their callbacks run by the
- * dispatcher.
- */
-/* BH lock! */
-static DEFINE_SPINLOCK(lcw_pending_timers_lock);
-static struct list_head lcw_pending_timers = LIST_HEAD_INIT(lcw_pending_timers);
-
-/* Last time a watchdog expired */
-static cfs_time_t lcw_last_watchdog_time;
-static int lcw_recent_watchdog_count;
-
-static void
-lcw_dump(struct lc_watchdog *lcw)
-{
- ENTRY;
- rcu_read_lock();
- if (lcw->lcw_task == NULL) {
- LCONSOLE_WARN("Process " LPPID " was not found in the task "
- "list; watchdog callback may be incomplete\n",
- (int)lcw->lcw_pid);
- } else {
- libcfs_debug_dumpstack(lcw->lcw_task);
- }
-
- rcu_read_unlock();
- EXIT;
-}
-
-static void lcw_cb(ulong_ptr_t data)
-{
- struct lc_watchdog *lcw = (struct lc_watchdog *)data;
- ENTRY;
-
- if (lcw->lcw_state != LC_WATCHDOG_ENABLED) {
- EXIT;
- return;
- }
-
- lcw->lcw_state = LC_WATCHDOG_EXPIRED;
-
- spin_lock_bh(&lcw->lcw_lock);
- LASSERT(list_empty(&lcw->lcw_list));
-
- spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount++; /* +1 for pending list */
- list_add(&lcw->lcw_list, &lcw_pending_timers);
- wake_up(&lcw_event_waitq);
-
- spin_unlock_bh(&lcw_pending_timers_lock);
- spin_unlock_bh(&lcw->lcw_lock);
- EXIT;
-}
-
-static int is_watchdog_fired(void)
-{
- int rc;
-
- if (test_bit(LCW_FLAG_STOP, &lcw_flags))
- return 1;
-
- spin_lock_bh(&lcw_pending_timers_lock);
- rc = !list_empty(&lcw_pending_timers);
- spin_unlock_bh(&lcw_pending_timers_lock);
- return rc;
-}
-
-static void lcw_dump_stack(struct lc_watchdog *lcw)
-{
- cfs_time_t current_time;
- cfs_duration_t delta_time;
- struct timeval timediff;
-
- current_time = cfs_time_current();
- delta_time = cfs_time_sub(current_time, lcw->lcw_last_touched);
- cfs_duration_usec(delta_time, &timediff);
-
- /*
- * Check to see if we should throttle the watchdog timer to avoid
- * too many dumps going to the console thus triggering an NMI.
- */
- delta_time = cfs_duration_sec(cfs_time_sub(current_time,
- lcw_last_watchdog_time));
-
- if (delta_time < libcfs_watchdog_ratelimit &&
- lcw_recent_watchdog_count > 3) {
- LCONSOLE_WARN("Service thread pid %u was inactive for "
- "%lu.%.02lus. Watchdog stack traces are limited "
- "to 3 per %d seconds, skipping this one.\n",
- (int)lcw->lcw_pid,
- timediff.tv_sec,
- timediff.tv_usec / 10000,
- libcfs_watchdog_ratelimit);
- } else {
- if (delta_time < libcfs_watchdog_ratelimit) {
- lcw_recent_watchdog_count++;
- } else {
- memcpy(&lcw_last_watchdog_time, &current_time,
- sizeof(current_time));
- lcw_recent_watchdog_count = 0;
- }
-
- LCONSOLE_WARN("Service thread pid %u was inactive for "
- "%lu.%.02lus. The thread might be hung, or it "
- "might only be slow and will resume later. "
- "Dumping the stack trace for debugging purposes:"
- "\n",
- (int)lcw->lcw_pid,
- timediff.tv_sec,
- timediff.tv_usec / 10000);
- lcw_dump(lcw);
- }
-}
-
-static int lcw_dispatch_main(void *data)
-{
- int rc = 0;
- struct lc_watchdog *lcw;
- LIST_HEAD (zombies);
-
- ENTRY;
-
- complete(&lcw_start_completion);
-
- while (1) {
- int dumplog = 1;
-
- cfs_wait_event_interruptible(lcw_event_waitq,
- is_watchdog_fired(), rc);
- CDEBUG(D_INFO, "Watchdog got woken up...\n");
- if (test_bit(LCW_FLAG_STOP, &lcw_flags)) {
- CDEBUG(D_INFO, "LCW_FLAG_STOP set, shutting down...\n");
-
- spin_lock_bh(&lcw_pending_timers_lock);
- rc = !list_empty(&lcw_pending_timers);
- spin_unlock_bh(&lcw_pending_timers_lock);
- if (rc) {
- CERROR("pending timers list was not empty at "
- "time of watchdog dispatch shutdown\n");
- }
- break;
- }
-
- spin_lock_bh(&lcw_pending_timers_lock);
- while (!list_empty(&lcw_pending_timers)) {
- int is_dumplog;
-
- lcw = list_entry(lcw_pending_timers.next,
- struct lc_watchdog, lcw_list);
- /* +1 ref for callback to make sure lwc wouldn't be
- * deleted after releasing lcw_pending_timers_lock */
- lcw->lcw_refcount++;
- spin_unlock_bh(&lcw_pending_timers_lock);
-
- /* lock ordering */
- spin_lock_bh(&lcw->lcw_lock);
- spin_lock_bh(&lcw_pending_timers_lock);
-
- if (list_empty(&lcw->lcw_list)) {
- /* already removed from pending list */
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- list_add(&lcw->lcw_list, &zombies);
- spin_unlock_bh(&lcw->lcw_lock);
- /* still hold lcw_pending_timers_lock */
- continue;
- }
-
- list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
-
- spin_unlock_bh(&lcw_pending_timers_lock);
- spin_unlock_bh(&lcw->lcw_lock);
-
- CDEBUG(D_INFO, "found lcw for pid " LPPID "\n",
- lcw->lcw_pid);
- lcw_dump_stack(lcw);
-
- is_dumplog = lcw->lcw_callback == lc_watchdog_dumplog;
- if (lcw->lcw_state != LC_WATCHDOG_DISABLED &&
- (dumplog || !is_dumplog)) {
- lcw->lcw_callback(lcw->lcw_pid, lcw->lcw_data);
- if (dumplog && is_dumplog)
- dumplog = 0;
- }
-
- spin_lock_bh(&lcw_pending_timers_lock);
- lcw->lcw_refcount--; /* -1 ref for callback */
- if (lcw->lcw_refcount == 0)
- list_add(&lcw->lcw_list, &zombies);
- }
- spin_unlock_bh(&lcw_pending_timers_lock);
-
- while (!list_empty(&zombies)) {
- lcw = list_entry(lcw_pending_timers.next,
- struct lc_watchdog, lcw_list);
- list_del(&lcw->lcw_list);
- LIBCFS_FREE(lcw, sizeof(*lcw));
- }
- }
-
- complete(&lcw_stop_completion);
-
- RETURN(rc);
-}
-
-static void lcw_dispatch_start(void)
-{
- task_t *task;
-
- ENTRY;
- LASSERT(lcw_refcount == 1);
-
- init_completion(&lcw_stop_completion);
- init_completion(&lcw_start_completion);
- init_waitqueue_head(&lcw_event_waitq);
-
- CDEBUG(D_INFO, "starting dispatch thread\n");
- task = kthread_run(lcw_dispatch_main, NULL, "lc_watchdogd");
- if (IS_ERR(task)) {
- CERROR("error spawning watchdog dispatch thread: %ld\n",
- PTR_ERR(task));
- EXIT;
- return;
- }
- wait_for_completion(&lcw_start_completion);
- CDEBUG(D_INFO, "watchdog dispatcher initialization complete.\n");
-
- EXIT;
-}
-
-static void lcw_dispatch_stop(void)
-{
- ENTRY;
- LASSERT(lcw_refcount == 0);
-
- CDEBUG(D_INFO, "trying to stop watchdog dispatcher.\n");
-
- set_bit(LCW_FLAG_STOP, &lcw_flags);
- wake_up(&lcw_event_waitq);
-
- wait_for_completion(&lcw_stop_completion);
-
- CDEBUG(D_INFO, "watchdog dispatcher has shut down.\n");
-
- EXIT;
-}
-
-struct lc_watchdog *lc_watchdog_add(int timeout,
- void (*callback)(pid_t, void *),
- void *data)
-{
- struct lc_watchdog *lcw = NULL;
- ENTRY;
-
- LIBCFS_ALLOC(lcw, sizeof(*lcw));
- if (lcw == NULL) {
- CDEBUG(D_INFO, "Could not allocate new lc_watchdog\n");
- RETURN(ERR_PTR(-ENOMEM));
- }
-
- spin_lock_init(&lcw->lcw_lock);
- lcw->lcw_refcount = 1; /* refcount for owner */
- lcw->lcw_task = current;
- lcw->lcw_pid = current_pid();
- lcw->lcw_callback = (callback != NULL) ? callback : lc_watchdog_dumplog;
- lcw->lcw_data = data;
- lcw->lcw_state = LC_WATCHDOG_DISABLED;
-
- INIT_LIST_HEAD(&lcw->lcw_list);
- cfs_timer_init(&lcw->lcw_timer, lcw_cb, lcw);
-
- mutex_lock(&lcw_refcount_mutex);
- if (++lcw_refcount == 1)
- lcw_dispatch_start();
- mutex_unlock(&lcw_refcount_mutex);
-
- /* Keep this working in case we enable them by default */
- if (lcw->lcw_state == LC_WATCHDOG_ENABLED) {
- lcw->lcw_last_touched = cfs_time_current();
- cfs_timer_arm(&lcw->lcw_timer, cfs_time_seconds(timeout) +
- cfs_time_current());
- }
-
- RETURN(lcw);
-}
-EXPORT_SYMBOL(lc_watchdog_add);
-
-static void lcw_update_time(struct lc_watchdog *lcw, const char *message)
-{
- cfs_time_t newtime = cfs_time_current();;
-
- if (lcw->lcw_state == LC_WATCHDOG_EXPIRED) {
- struct timeval timediff;
- cfs_time_t delta_time = cfs_time_sub(newtime,
- lcw->lcw_last_touched);
- cfs_duration_usec(delta_time, &timediff);
-
- LCONSOLE_WARN("Service thread pid %u %s after %lu.%.02lus. "
- "This indicates the system was overloaded (too "
- "many service threads, or there were not enough "
- "hardware resources).\n",
- lcw->lcw_pid,
- message,
- timediff.tv_sec,
- timediff.tv_usec / 10000);
- }
- lcw->lcw_last_touched = newtime;
-}
-
-static void lc_watchdog_del_pending(struct lc_watchdog *lcw)
-{
- spin_lock_bh(&lcw->lcw_lock);
- if (unlikely(!list_empty(&lcw->lcw_list))) {
- spin_lock_bh(&lcw_pending_timers_lock);
- list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- spin_unlock_bh(&lcw_pending_timers_lock);
- }
-
- spin_unlock_bh(&lcw->lcw_lock);
-}
-
-void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
-{
- ENTRY;
- LASSERT(lcw != NULL);
-
- lc_watchdog_del_pending(lcw);
-
- lcw_update_time(lcw, "resumed");
- lcw->lcw_state = LC_WATCHDOG_ENABLED;
-
- cfs_timer_arm(&lcw->lcw_timer, cfs_time_current() +
- cfs_time_seconds(timeout));
-
- EXIT;
-}
-EXPORT_SYMBOL(lc_watchdog_touch);
-
-void lc_watchdog_disable(struct lc_watchdog *lcw)
-{
- ENTRY;
- LASSERT(lcw != NULL);
-
- lc_watchdog_del_pending(lcw);
-
- lcw_update_time(lcw, "completed");
- lcw->lcw_state = LC_WATCHDOG_DISABLED;
-
- EXIT;
-}
-EXPORT_SYMBOL(lc_watchdog_disable);
-
-void lc_watchdog_delete(struct lc_watchdog *lcw)
-{
- int dead;
-
- ENTRY;
- LASSERT(lcw != NULL);
-
- cfs_timer_disarm(&lcw->lcw_timer);
-
- lcw_update_time(lcw, "stopped");
-
- spin_lock_bh(&lcw->lcw_lock);
- spin_lock_bh(&lcw_pending_timers_lock);
- if (unlikely(!list_empty(&lcw->lcw_list))) {
- list_del_init(&lcw->lcw_list);
- lcw->lcw_refcount--; /* -1 ref for pending list */
- }
-
- lcw->lcw_refcount--; /* -1 ref for owner */
- dead = lcw->lcw_refcount == 0;
- spin_unlock_bh(&lcw_pending_timers_lock);
- spin_unlock_bh(&lcw->lcw_lock);
-
- if (dead)
- LIBCFS_FREE(lcw, sizeof(*lcw));
-
- mutex_lock(&lcw_refcount_mutex);
- if (--lcw_refcount == 0)
- lcw_dispatch_stop();
- mutex_unlock(&lcw_refcount_mutex);
-
- EXIT;
-}
-EXPORT_SYMBOL(lc_watchdog_delete);
-
-/*
- * Provided watchdog handlers
- */
-
-void lc_watchdog_dumplog(pid_t pid, void *data)
-{
- libcfs_debug_dumplog_internal((void *)((long_ptr_t)pid));
-}
-EXPORT_SYMBOL(lc_watchdog_dumplog);
-
-#else /* !defined(WITH_WATCHDOG) */
-
-struct lc_watchdog *lc_watchdog_add(int timeout,
- void (*callback)(pid_t pid, void *),
- void *data)
-{
- static struct lc_watchdog watchdog;
- return &watchdog;
-}
-EXPORT_SYMBOL(lc_watchdog_add);
-
-void lc_watchdog_touch(struct lc_watchdog *lcw, int timeout)
-{
-}
-EXPORT_SYMBOL(lc_watchdog_touch);
-
-void lc_watchdog_disable(struct lc_watchdog *lcw)
-{
-}
-EXPORT_SYMBOL(lc_watchdog_disable);
-
-void lc_watchdog_delete(struct lc_watchdog *lcw)
-{
-}
-EXPORT_SYMBOL(lc_watchdog_delete);
-
-#endif
diff --git a/drivers/staging/lustre/lustre/libcfs/workitem.c b/drivers/staging/lustre/lustre/libcfs/workitem.c
index b533666c190..462172d1a75 100644
--- a/drivers/staging/lustre/lustre/libcfs/workitem.c
+++ b/drivers/staging/lustre/lustre/libcfs/workitem.c
@@ -376,7 +376,8 @@ cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab,
rc = 0;
while (nthrs > 0) {
char name[16];
- task_t *task;
+ struct task_struct *task;
+
spin_lock(&cfs_wi_data.wi_glock);
while (sched->ws_starting > 0) {
spin_unlock(&cfs_wi_data.wi_glock);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index dff0c0486e7..f493e074000 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -1,5 +1,5 @@
obj-$(CONFIG_LUSTRE_FS) += lustre.o
-obj-$(CONFIG_LUSTRE_FS) += llite_lloop.o
+obj-$(CONFIG_LUSTRE_LLITE_LLOOP) += llite_lloop.o
lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o lproc_llite.o namei.o symlink.o llite_mmap.o \
xattr.o remote_perm.o llite_rmtacl.o llite_capa.o \
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index ff0d085077c..e7629be3973 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -59,11 +59,11 @@ static void free_dentry_data(struct rcu_head *head)
static void ll_release(struct dentry *de)
{
struct ll_dentry_data *lld;
- ENTRY;
+
LASSERT(de != NULL);
lld = ll_d2d(de);
if (lld == NULL) /* NFS copies the de->d_op methods (bug 4655) */
- RETURN_EXIT;
+ return;
if (lld->lld_it) {
ll_intent_release(lld->lld_it);
@@ -73,8 +73,6 @@ static void ll_release(struct dentry *de)
LASSERT(lld->lld_mnt_count == 0);
de->d_fsdata = NULL;
call_rcu(&lld->lld_rcu_head, free_dentry_data);
-
- EXIT;
}
/* Compare if two dentries are the same. Don't match if the existing dentry
@@ -84,17 +82,14 @@ static void ll_release(struct dentry *de)
* an AST before calling d_revalidate_it(). The dentry still exists (marked
* INVALID) so d_lookup() matches it, but we have no lock on it (so
* lock_match() fails) and we spin around real_lookup(). */
-int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
- const struct dentry *dentry, const struct inode *inode,
+int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *name)
{
- ENTRY;
-
if (len != name->len)
- RETURN(1);
+ return 1;
if (memcmp(str, name->name, len))
- RETURN(1);
+ return 1;
CDEBUG(D_DENTRY, "found name %.*s(%p) flags %#x refc %d\n",
name->len, name->name, dentry, dentry->d_flags,
@@ -102,12 +97,12 @@ int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
/* mountpoint is always valid */
if (d_mountpoint((struct dentry *)dentry))
- RETURN(0);
+ return 0;
if (d_lustre_invalid(dentry))
- RETURN(1);
+ return 1;
- RETURN(0);
+ return 0;
}
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
@@ -128,22 +123,21 @@ static int find_cbdata(struct inode *inode)
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct lov_stripe_md *lsm;
int rc = 0;
- ENTRY;
LASSERT(inode);
rc = md_find_cbdata(sbi->ll_md_exp, ll_inode2fid(inode),
return_if_equal, NULL);
if (rc != 0)
- RETURN(rc);
+ return rc;
lsm = ccc_inode_lsm_get(inode);
if (lsm == NULL)
- RETURN(rc);
+ return rc;
rc = obd_find_cbdata(sbi->ll_dt_exp, lsm, return_if_equal, NULL);
ccc_inode_lsm_put(inode, lsm);
- RETURN(rc);
+ return rc;
}
/**
@@ -155,7 +149,6 @@ static int find_cbdata(struct inode *inode)
*/
static int ll_ddelete(const struct dentry *de)
{
- ENTRY;
LASSERT(de);
CDEBUG(D_DENTRY, "%s dentry %.*s (%p, parent %p, inode %p) %s%s\n",
@@ -179,13 +172,12 @@ static int ll_ddelete(const struct dentry *de)
#endif
if (d_lustre_invalid((struct dentry *)de))
- RETURN(1);
- RETURN(0);
+ return 1;
+ return 0;
}
static int ll_set_dd(struct dentry *de)
{
- ENTRY;
LASSERT(de != NULL);
CDEBUG(D_DENTRY, "ldd on dentry %.*s (%p) parent %p inode %p refc %d\n",
@@ -204,11 +196,11 @@ static int ll_set_dd(struct dentry *de)
OBD_FREE_PTR(lld);
spin_unlock(&de->d_lock);
} else {
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
}
- RETURN(0);
+ return 0;
}
int ll_dops_init(struct dentry *de, int block, int init_sa)
@@ -260,8 +252,6 @@ void ll_intent_drop_lock(struct lookup_intent *it)
void ll_intent_release(struct lookup_intent *it)
{
- ENTRY;
-
CDEBUG(D_INFO, "intent %p released\n", it);
ll_intent_drop_lock(it);
/* We are still holding extra reference on a request, need to free it */
@@ -275,14 +265,12 @@ void ll_intent_release(struct lookup_intent *it)
it->d.lustre.it_disposition = 0;
it->d.lustre.it_data = NULL;
- EXIT;
}
void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
struct ll_d_hlist_node *p;
- ENTRY;
LASSERT(inode != NULL);
@@ -296,18 +284,17 @@ void ll_invalidate_aliases(struct inode *inode)
dentry->d_name.name, dentry, dentry->d_parent,
dentry->d_inode, dentry->d_flags);
- if (dentry->d_name.len == 1 && dentry->d_name.name[0] == '/') {
- CERROR("called on root (?) dentry=%p, inode=%p "
- "ino=%lu\n", dentry, inode, inode->i_ino);
+ if (unlikely(dentry == dentry->d_sb->s_root)) {
+ CERROR("%s: called on root dentry=%p, fid="DFID"\n",
+ ll_get_fsname(dentry->d_sb, NULL, 0),
+ dentry, PFID(ll_inode2fid(inode)));
lustre_dump_dentry(dentry, 1);
- libcfs_debug_dumpstack(NULL);
+ dump_stack();
}
d_lustre_invalidate(dentry, 0);
}
ll_unlock_dcache(inode);
-
- EXIT;
}
int ll_revalidate_it_finish(struct ptlrpc_request *request,
@@ -315,17 +302,16 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
struct dentry *de)
{
int rc = 0;
- ENTRY;
if (!request)
- RETURN(0);
+ return 0;
if (it_disposition(it, DISP_LOOKUP_NEG))
- RETURN(-ENOENT);
+ return -ENOENT;
rc = ll_prep_inode(&de->d_inode, request, NULL, it);
- RETURN(rc);
+ return rc;
}
void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
@@ -370,7 +356,6 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
struct inode *parent = de->d_parent->d_inode;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%s,intent=%s\n", de->d_name.name,
LL_IT2STR(it));
@@ -383,10 +368,10 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
away this negative dentry and actually do the request to
kernel to create whatever needs to be created (if possible)*/
if (it && (it->it_op & IT_CREAT))
- RETURN(0);
+ return 0;
if (d_lustre_invalid(de))
- RETURN(0);
+ return 0;
ibits = MDS_INODELOCK_UPDATE;
rc = ll_have_md_lock(parent, &ibits, LCK_MINMODE);
@@ -413,7 +398,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
LASSERT(it);
if (it->it_op == IT_LOOKUP && !d_lustre_invalid(de))
- RETURN(1);
+ return 1;
if (it->it_op == IT_OPEN) {
struct inode *inode = de->d_inode;
@@ -460,7 +445,7 @@ int ll_revalidate_it(struct dentry *de, int lookup_flags,
if it would be, we'll reopen the open request to
MDS later during file open path */
mutex_unlock(&lli->lli_och_mutex);
- RETURN(1);
+ return 1;
} else {
mutex_unlock(&lli->lli_och_mutex);
}
@@ -479,7 +464,7 @@ do_lock:
de->d_name.name, de->d_name.len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
if (!IS_POSIXACL(parent) || !exp_connect_umask(exp))
it->it_create_mode &= ~current_umask();
@@ -566,7 +551,7 @@ out:
mark:
if (it != NULL && it->it_op == IT_GETATTR && rc > 0)
ll_statahead_mark(parent, de);
- RETURN(rc);
+ return rc;
/*
* This part is here to combat evil-evil race in real_lookup on 2.6
@@ -598,7 +583,7 @@ do_lookup:
LUSTRE_OPC_CREATE :
LUSTRE_OPC_ANY), NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
rc = md_intent_lock(exp, op_data, NULL, 0, it, 0, &req,
ll_md_blocking_ast, 0);
@@ -639,14 +624,13 @@ int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
struct inode *parent = dentry->d_parent->d_inode;
int unplug = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%s,flags=%u\n",
dentry->d_name.name, flags);
if (!(flags & (LOOKUP_PARENT|LOOKUP_OPEN|LOOKUP_CREATE)) &&
ll_need_statahead(parent, dentry) > 0) {
if (flags & LOOKUP_RCU)
- RETURN(-ECHILD);
+ return -ECHILD;
if (dentry->d_inode == NULL)
unplug = 1;
@@ -654,7 +638,7 @@ int ll_revalidate_nd(struct dentry *dentry, unsigned int flags)
ll_statahead_mark(parent, dentry);
}
- RETURN(1);
+ return 1;
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 23c61fe8196..09844be5eec 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -41,14 +41,13 @@
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
-#include <linux/version.h>
#include <asm/uaccess.h>
#include <linux/buffer_head.h> // for wait_on_buffer
#include <linux/pagevec.h>
+#include <linux/prefetch.h>
#define DEBUG_SUBSYSTEM S_LLITE
-#include <lustre/lustre_idl.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_lib.h>
@@ -158,7 +157,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
int npages;
int i;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash "LPU64"\n",
inode->i_ino, inode->i_generation, inode, hash);
@@ -239,7 +237,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
if (page_pool != &page0)
OBD_FREE(page_pool, sizeof(struct page *) * max_pages);
- EXIT;
return rc;
}
@@ -355,15 +352,12 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
rc = md_lock_match(ll_i2sbi(dir)->ll_md_exp, LDLM_FL_BLOCK_GRANTED,
ll_inode2fid(dir), LDLM_IBITS, &policy, mode, &lockh);
if (!rc) {
- struct ldlm_enqueue_info einfo = {.ei_type = LDLM_IBITS,
- .ei_mode = mode,
- .ei_cb_bl =
- ll_md_blocking_ast,
- .ei_cb_cp =
- ldlm_completion_ast,
- .ei_cb_gl = NULL,
- .ei_cb_wg = NULL,
- .ei_cbdata = NULL};
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = mode,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
struct lookup_intent it = { .it_op = IT_READDIR };
struct ptlrpc_request *request;
struct md_op_data *op_data;
@@ -482,19 +476,17 @@ fail:
goto out_unlock;
}
-int ll_dir_read(struct inode *inode, __u64 *_pos, void *cookie,
- filldir_t filldir)
+int ll_dir_read(struct inode *inode, struct dir_context *ctx)
{
struct ll_inode_info *info = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = *_pos;
+ __u64 pos = ctx->pos;
int api32 = ll_need_32bit_api(sbi);
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
struct page *page;
struct ll_dir_chain chain;
int done = 0;
int rc = 0;
- ENTRY;
ll_dir_chain_init(&chain);
@@ -547,12 +539,14 @@ int ll_dir_read(struct inode *inode, __u64 *_pos, void *cookie,
fid_le_to_cpu(&fid, &ent->lde_fid);
ino = cl_fid_build_ino(&fid, api32);
type = ll_dirent_type_get(ent);
+ ctx->pos = lhash;
/* For 'll_nfs_get_name_filldir()', it will try
* to access the 'ent' through its 'lde_name',
- * so the parameter 'name' for 'filldir()' must
- * be part of the 'ent'. */
- done = filldir(cookie, ent->lde_name, namelen,
- lhash, ino, type);
+ * so the parameter 'name' for 'ctx->actor()'
+ * must be part of the 'ent'.
+ */
+ done = !dir_emit(ctx, ent->lde_name,
+ namelen, ino, type);
}
next = le64_to_cpu(dp->ldp_hash_end);
if (!done) {
@@ -593,56 +587,49 @@ int ll_dir_read(struct inode *inode, __u64 *_pos, void *cookie,
}
}
- *_pos = pos;
+ ctx->pos = pos;
ll_dir_chain_fini(&chain);
- RETURN(rc);
+ return rc;
}
-static int ll_readdir(struct file *filp, void *cookie, filldir_t filldir)
+static int ll_readdir(struct file *filp, struct dir_context *ctx)
{
struct inode *inode = filp->f_dentry->d_inode;
struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- __u64 pos = lfd->lfd_pos;
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
int rc;
- struct path path;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu "
" 32bit_api %d\n", inode->i_ino, inode->i_generation,
- inode, (unsigned long)pos, i_size_read(inode), api32);
+ inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
- if (pos == MDS_DIR_END_OFF)
+ if (lfd->lfd_pos == MDS_DIR_END_OFF)
/*
* end-of-file.
*/
GOTO(out, rc = 0);
- rc = ll_dir_read(inode, &pos, cookie, filldir);
- lfd->lfd_pos = pos;
- if (pos == MDS_DIR_END_OFF) {
+ ctx->pos = lfd->lfd_pos;
+ rc = ll_dir_read(inode, ctx);
+ lfd->lfd_pos = ctx->pos;
+ if (ctx->pos == MDS_DIR_END_OFF) {
if (api32)
- filp->f_pos = LL_DIR_END_OFF_32BIT;
+ ctx->pos = LL_DIR_END_OFF_32BIT;
else
- filp->f_pos = LL_DIR_END_OFF;
+ ctx->pos = LL_DIR_END_OFF;
} else {
if (api32 && hash64)
- filp->f_pos = pos >> 32;
- else
- filp->f_pos = pos;
+ ctx->pos >>= 32;
}
filp->f_version = inode->i_version;
- path.mnt = filp->f_path.mnt;
- path.dentry = filp->f_dentry;
- touch_atime(&path);
out:
if (!rc)
ll_stats_ops_tally(sbi, LPROC_LL_READDIR, 1);
- RETURN(rc);
+ return rc;
}
int ll_send_mgc_param(struct obd_export *mgc, char *string)
@@ -673,8 +660,6 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
int mode;
int err;
- ENTRY;
-
mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
strlen(filename), mode, LUSTRE_OPC_MKDIR,
@@ -684,7 +669,8 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
op_data->op_cli_flags |= CLI_SET_MEA;
err = md_create(sbi->ll_md_exp, op_data, lump, sizeof(*lump), mode,
- current_fsuid(), current_fsgid(),
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), 0, &request);
ll_finish_md_op_data(op_data);
if (err)
@@ -704,7 +690,6 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
struct lustre_sb_info *lsi = s2lsi(inode->i_sb);
struct obd_device *mgc = lsi->lsi_mgc;
int lum_size;
- ENTRY;
if (lump != NULL) {
/*
@@ -731,7 +716,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
" %#08x != %#08x nor %#08x\n",
lump->lmm_magic, LOV_USER_MAGIC_V1,
LOV_USER_MAGIC_V3);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
} else {
@@ -741,7 +726,7 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
if (lump != NULL && lump->lmm_magic == cpu_to_le32(LMV_USER_MAGIC))
op_data->op_cli_flags |= CLI_SET_MEA;
@@ -797,7 +782,7 @@ end:
if (param != NULL)
OBD_FREE(param, MGS_PARAM_MAXLEN);
}
- RETURN(rc);
+ return rc;
}
int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
@@ -812,13 +797,13 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
- RETURN(rc);
+ return rc;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL,
0, lmmsize, LUSTRE_OPC_ANY,
NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
@@ -878,12 +863,11 @@ int ll_get_mdt_idx(struct inode *inode)
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct md_op_data *op_data;
int rc, mdtidx;
- ENTRY;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_flags |= MF_GET_MDT_IDX;
rc = md_getattr(sbi->ll_md_exp, op_data, NULL);
@@ -891,7 +875,7 @@ int ll_get_mdt_idx(struct inode *inode)
ll_finish_md_op_data(op_data);
if (rc < 0) {
CDEBUG(D_INFO, "md_getattr_name: %d\n", rc);
- RETURN(rc);
+ return rc;
}
return mdtidx;
}
@@ -912,7 +896,6 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
int rc;
- ENTRY;
/* Forge a hsm_progress based on data from copy. */
hpk.hpk_fid = copy->hc_hai.hai_fid;
@@ -962,7 +945,7 @@ progress:
rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
&hpk, NULL);
- RETURN(rc);
+ return rc;
}
/**
@@ -985,7 +968,6 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct hsm_progress_kernel hpk;
int rc;
- ENTRY;
/* If you modify the logic here, also check llapi_hsm_copy_end(). */
/* Take care: copy->hc_hai.hai_action, len, gid and data are not
@@ -1062,7 +1044,7 @@ progress:
rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
&hpk, NULL);
- RETURN(rc);
+ return rc;
}
@@ -1090,7 +1072,6 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
int id = qctl->qc_id;
int valid = qctl->qc_valid;
int rc = 0;
- ENTRY;
switch (cmd) {
case LUSTRE_Q_INVALIDATE:
@@ -1101,32 +1082,34 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
case Q_SETINFO:
if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
- RETURN(-EPERM);
+ return -EPERM;
break;
case Q_GETQUOTA:
- if (((type == USRQUOTA && current_euid() != id) ||
- (type == GRPQUOTA && !in_egroup_p(id))) &&
+ if (((type == USRQUOTA &&
+ uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ (type == GRPQUOTA &&
+ !in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT))
- RETURN(-EPERM);
+ return -EPERM;
break;
case Q_GETINFO:
break;
default:
CERROR("unsupported quotactl op: %#x\n", cmd);
- RETURN(-ENOTTY);
+ return -ENOTTY;
}
if (valid != QC_GENERAL) {
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (cmd == Q_GETINFO)
qctl->qc_cmd = Q_GETOINFO;
else if (cmd == Q_GETQUOTA)
qctl->qc_cmd = Q_GETOQUOTA;
else
- RETURN(-EINVAL);
+ return -EINVAL;
switch (valid) {
case QC_MDTIDX:
@@ -1151,7 +1134,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
}
if (rc)
- RETURN(rc);
+ return rc;
qctl->qc_cmd = cmd;
} else {
@@ -1159,7 +1142,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
OBD_ALLOC_PTR(oqctl);
if (oqctl == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(sbi->ll_md_exp, oqctl);
@@ -1169,7 +1152,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
obd_quotactl(sbi->ll_md_exp, oqctl);
}
OBD_FREE_PTR(oqctl);
- RETURN(rc);
+ return rc;
}
/* If QIF_SPACE is not set, client should collect the
* space usage from OSSs by itself */
@@ -1216,7 +1199,7 @@ out:
OBD_FREE_PTR(oqctl);
}
- RETURN(rc);
+ return rc;
}
static char *
@@ -1249,7 +1232,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_ioctl_data *data;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
inode->i_ino, inode->i_generation, inode, cmd);
@@ -1262,10 +1244,10 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
switch(cmd) {
case FSFILT_IOC_GETFLAGS:
case FSFILT_IOC_SETFLAGS:
- RETURN(ll_iocontrol(inode, file, cmd, arg));
+ return ll_iocontrol(inode, file, cmd, arg);
case FSFILT_IOC_GETVERSION_OLD:
case FSFILT_IOC_GETVERSION:
- RETURN(put_user(inode->i_generation, (int *)arg));
+ return put_user(inode->i_generation, (int *)arg);
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
* network encode the arg field.
@@ -1277,10 +1259,10 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mdtidx = ll_get_mdt_idx(inode);
if (mdtidx < 0)
- RETURN(mdtidx);
+ return mdtidx;
if (put_user((int)mdtidx, (int*)arg))
- RETURN(-EFAULT);
+ return -EFAULT;
return 0;
}
@@ -1293,7 +1275,7 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
if (rc)
- RETURN(rc);
+ return rc;
data = (void *)buf;
filename = data->ioc_inlbuf1;
@@ -1317,7 +1299,6 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
GOTO(out_free, rc);
}
ptlrpc_req_finished(request);
- EXIT;
out_free:
obd_ioctl_freedata(buf, len);
return rc;
@@ -1333,7 +1314,7 @@ out_free:
rc = obd_ioctl_getdata(&buf, &len, (void *)arg);
if (rc)
- RETURN(rc);
+ return rc;
data = (void *)buf;
if (data->ioc_inlbuf1 == NULL || data->ioc_inlbuf2 == NULL ||
@@ -1364,7 +1345,7 @@ out_free:
rc = ll_dir_setdirstripe(inode, lum, filename);
lmv_out_free:
obd_ioctl_freedata(buf, len);
- RETURN(rc);
+ return rc;
}
case LL_IOC_LOV_SETSTRIPE: {
@@ -1380,11 +1361,11 @@ lmv_out_free:
sizeof(lumv3p->lmm_objects[0]));
/* first try with v1 which is smaller than v3 */
if (copy_from_user(lumv1, lumv1p, sizeof(*lumv1)))
- RETURN(-EFAULT);
+ return -EFAULT;
if ((lumv1->lmm_magic == LOV_USER_MAGIC_V3) ) {
if (copy_from_user(&lumv3, lumv3p, sizeof(lumv3)))
- RETURN(-EFAULT);
+ return -EFAULT;
}
if (inode->i_sb->s_root == file->f_dentry)
@@ -1393,7 +1374,7 @@ lmv_out_free:
/* in v1 and v3 cases lumv1 points to data */
rc = ll_dir_setstripe(inode, lumv1, set_default);
- RETURN(rc);
+ return rc;
}
case LL_IOC_LMV_GETSTRIPE: {
struct lmv_user_md *lump = (struct lmv_user_md *)arg;
@@ -1404,10 +1385,10 @@ lmv_out_free:
int mdtindex;
if (copy_from_user(&lum, lump, sizeof(struct lmv_user_md)))
- RETURN(-EFAULT);
+ return -EFAULT;
if (lum.lum_magic != LMV_MAGIC_V1)
- RETURN(-EINVAL);
+ return -EINVAL;
lum_size = lmv_user_md_size(1, LMV_MAGIC_V1);
OBD_ALLOC(tmp, lum_size);
@@ -1430,7 +1411,7 @@ lmv_out_free:
free_lmv:
if (tmp)
OBD_FREE(tmp, lum_size);
- RETURN(rc);
+ return rc;
}
case LL_IOC_REMOVE_ENTRY: {
char *filename = NULL;
@@ -1447,7 +1428,7 @@ free_lmv:
filename = ll_getname((const char *)arg);
if (IS_ERR(filename))
- RETURN(PTR_ERR(filename));
+ return PTR_ERR(filename);
namelen = strlen(filename);
if (namelen < 1)
@@ -1457,12 +1438,12 @@ free_lmv:
out_rmdir:
if (filename)
ll_putname(filename);
- RETURN(rc);
+ return rc;
}
case LL_IOC_LOV_SWAP_LAYOUTS:
- RETURN(-EPERM);
+ return -EPERM;
case LL_IOC_OBD_STATFS:
- RETURN(ll_obd_statfs(inode, (void *)arg));
+ return ll_obd_statfs(inode, (void *)arg);
case LL_IOC_LOV_GETSTRIPE:
case LL_IOC_MDC_GETINFO:
case IOC_MDC_GETFILEINFO:
@@ -1478,7 +1459,7 @@ out_rmdir:
cmd == IOC_MDC_GETFILESTRIPE) {
filename = ll_getname((const char *)arg);
if (IS_ERR(filename))
- RETURN(PTR_ERR(filename));
+ return PTR_ERR(filename);
rc = ll_lov_getstripe_ea_info(inode, filename, &lmm,
&lmmsize, &request);
@@ -1539,7 +1520,6 @@ out_rmdir:
GOTO(out_req, rc = -EFAULT);
}
- EXIT;
out_req:
ptlrpc_req_finished(request);
if (filename)
@@ -1559,9 +1539,11 @@ out_rmdir:
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
- RETURN(rc);
+ return rc;
OBD_ALLOC_LARGE(lmm, lmmsize);
+ if (lmm == NULL)
+ return -ENOMEM;
if (copy_from_user(lmm, lum, lmmsize))
GOTO(free_lmm, rc = -EFAULT);
@@ -1602,7 +1584,6 @@ out_rmdir:
if (copy_to_user(&lumd->lmd_st, &st, sizeof(st)))
GOTO(free_lsm, rc = -EFAULT);
- EXIT;
free_lsm:
obd_free_memmd(sbi->ll_dt_exp, &lsm);
free_lmm:
@@ -1610,7 +1591,7 @@ out_rmdir:
return rc;
}
case OBD_IOC_LLOG_CATINFO: {
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
case OBD_IOC_QUOTACHECK: {
struct obd_quotactl *oqctl;
@@ -1618,11 +1599,11 @@ out_rmdir:
if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
- RETURN(-EPERM);
+ return -EPERM;
OBD_ALLOC_PTR(oqctl);
if (!oqctl)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oqctl->qc_type = arg;
rc = obd_quotacheck(sbi->ll_md_exp, oqctl);
if (rc < 0) {
@@ -1642,11 +1623,11 @@ out_rmdir:
if (!cfs_capable(CFS_CAP_SYS_ADMIN) ||
sbi->ll_flags & LL_SBI_RMT_CLIENT)
- RETURN(-EPERM);
+ return -EPERM;
OBD_ALLOC_PTR(check);
if (!check)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = obd_iocontrol(cmd, sbi->ll_md_exp, 0, (void *)check,
NULL);
@@ -1669,7 +1650,7 @@ out_rmdir:
}
out_poll:
OBD_FREE_PTR(check);
- RETURN(rc);
+ return rc;
}
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 7, 50, 0)
case LL_IOC_QUOTACTL_18: {
@@ -1680,7 +1661,7 @@ out_rmdir:
OBD_ALLOC_PTR(qctl_18);
if (!qctl_18)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC_PTR(qctl_20);
if (!qctl_20)
@@ -1720,7 +1701,7 @@ out_rmdir:
OBD_FREE_PTR(qctl_20);
out_quotactl_18:
OBD_FREE_PTR(qctl_18);
- RETURN(rc);
+ return rc;
}
#else
#warning "remove old LL_IOC_QUOTACTL_18 compatibility code"
@@ -1730,7 +1711,7 @@ out_rmdir:
OBD_ALLOC_PTR(qctl);
if (!qctl)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(qctl, (void *)arg, sizeof(*qctl)))
GOTO(out_quotactl, rc = -EFAULT);
@@ -1742,13 +1723,13 @@ out_rmdir:
out_quotactl:
OBD_FREE_PTR(qctl);
- RETURN(rc);
+ return rc;
}
case OBD_IOC_GETDTNAME:
case OBD_IOC_GETMDNAME:
- RETURN(ll_get_obd_name(inode, cmd, arg));
+ return ll_get_obd_name(inode, cmd, arg);
case LL_IOC_FLUSHCTX:
- RETURN(ll_flush_ctx(inode));
+ return ll_flush_ctx(inode);
#ifdef CONFIG_FS_POSIX_ACL
case LL_IOC_RMTACL: {
if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
@@ -1759,9 +1740,9 @@ out_rmdir:
rc = rct_add(&sbi->ll_rct, current_pid(), arg);
if (!rc)
fd->fd_flags |= LL_FILE_RMTACL;
- RETURN(rc);
+ return rc;
} else
- RETURN(0);
+ return 0;
}
#endif
case LL_IOC_GETOBDCOUNT: {
@@ -1769,7 +1750,7 @@ out_rmdir:
struct obd_export *exp;
if (copy_from_user(&count, (int *)arg, sizeof(int)))
- RETURN(-EFAULT);
+ return -EFAULT;
/* get ost count when count is zero, get mdt count otherwise */
exp = count ? sbi->ll_md_exp : sbi->ll_dt_exp;
@@ -1778,41 +1759,41 @@ out_rmdir:
KEY_TGT_COUNT, &vallen, &count, NULL);
if (rc) {
CERROR("get target count failed: %d\n", rc);
- RETURN(rc);
+ return rc;
}
if (copy_to_user((int *)arg, &count, sizeof(int)))
- RETURN(-EFAULT);
+ return -EFAULT;
- RETURN(0);
+ return 0;
}
case LL_IOC_PATH2FID:
if (copy_to_user((void *)arg, ll_inode2fid(inode),
sizeof(struct lu_fid)))
- RETURN(-EFAULT);
- RETURN(0);
+ return -EFAULT;
+ return 0;
case LL_IOC_GET_CONNECT_FLAGS: {
- RETURN(obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void*)arg));
+ return obd_iocontrol(cmd, sbi->ll_md_exp, 0, NULL, (void*)arg);
}
case OBD_IOC_CHANGELOG_SEND:
case OBD_IOC_CHANGELOG_CLEAR:
rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
sizeof(struct ioc_changelog));
- RETURN(rc);
+ return rc;
case OBD_IOC_FID2PATH:
- RETURN(ll_fid2path(inode, (void *)arg));
+ return ll_fid2path(inode, (void *)arg);
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur;
int totalsize;
OBD_ALLOC_PTR(hur);
if (hur == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* We don't know the true size yet; copy the fixed-size part */
if (copy_from_user(hur, (void *)arg, sizeof(*hur))) {
OBD_FREE_PTR(hur);
- RETURN(-EFAULT);
+ return -EFAULT;
}
/* Compute the whole struct size */
@@ -1820,12 +1801,12 @@ out_rmdir:
OBD_FREE_PTR(hur);
OBD_ALLOC_LARGE(hur, totalsize);
if (hur == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* Copy the whole struct */
if (copy_from_user(hur, (void *)arg, totalsize)) {
OBD_FREE_LARGE(hur, totalsize);
- RETURN(-EFAULT);
+ return -EFAULT;
}
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), totalsize,
@@ -1833,14 +1814,14 @@ out_rmdir:
OBD_FREE_LARGE(hur, totalsize);
- RETURN(rc);
+ return rc;
}
case LL_IOC_HSM_PROGRESS: {
struct hsm_progress_kernel hpk;
struct hsm_progress hp;
if (copy_from_user(&hp, (void *)arg, sizeof(hp)))
- RETURN(-EFAULT);
+ return -EFAULT;
hpk.hpk_fid = hp.hp_fid;
hpk.hpk_cookie = hp.hp_cookie;
@@ -1853,12 +1834,12 @@ out_rmdir:
* reported to Lustre root */
rc = obd_iocontrol(cmd, sbi->ll_md_exp, sizeof(hpk), &hpk,
NULL);
- RETURN(rc);
+ return rc;
}
case LL_IOC_HSM_CT_START:
rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void *)arg,
sizeof(struct lustre_kernelcomm));
- RETURN(rc);
+ return rc;
case LL_IOC_HSM_COPY_START: {
struct hsm_copy *copy;
@@ -1866,10 +1847,10 @@ out_rmdir:
OBD_ALLOC_PTR(copy);
if (copy == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
OBD_FREE_PTR(copy);
- RETURN(-EFAULT);
+ return -EFAULT;
}
rc = ll_ioc_copy_start(inode->i_sb, copy);
@@ -1877,7 +1858,7 @@ out_rmdir:
rc = -EFAULT;
OBD_FREE_PTR(copy);
- RETURN(rc);
+ return rc;
}
case LL_IOC_HSM_COPY_END: {
struct hsm_copy *copy;
@@ -1885,10 +1866,10 @@ out_rmdir:
OBD_ALLOC_PTR(copy);
if (copy == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(copy, (char *)arg, sizeof(*copy))) {
OBD_FREE_PTR(copy);
- RETURN(-EFAULT);
+ return -EFAULT;
}
rc = ll_ioc_copy_end(inode->i_sb, copy);
@@ -1896,11 +1877,10 @@ out_rmdir:
rc = -EFAULT;
OBD_FREE_PTR(copy);
- RETURN(rc);
+ return rc;
}
default:
- RETURN(obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL,
- (void *)arg));
+ return obd_iocontrol(cmd, sbi->ll_dt_exp, 0, NULL, (void *)arg);
}
}
@@ -1911,7 +1891,6 @@ static loff_t ll_dir_seek(struct file *file, loff_t offset, int origin)
struct ll_sb_info *sbi = ll_i2sbi(inode);
int api32 = ll_need_32bit_api(sbi);
loff_t ret = -EINVAL;
- ENTRY;
mutex_lock(&inode->i_mutex);
switch (origin) {
@@ -1957,14 +1936,12 @@ out:
int ll_dir_open(struct inode *inode, struct file *file)
{
- ENTRY;
- RETURN(ll_file_open(inode, file));
+ return ll_file_open(inode, file);
}
int ll_dir_release(struct inode *inode, struct file *file)
{
- ENTRY;
- RETURN(ll_file_release(inode, file));
+ return ll_file_release(inode, file);
}
struct file_operations ll_dir_operations = {
@@ -1972,7 +1949,7 @@ struct file_operations ll_dir_operations = {
.open = ll_dir_open,
.release = ll_dir_release,
.read = generic_read_dir,
- .readdir = ll_readdir,
+ .iterate = ll_readdir,
.unlocked_ioctl = ll_dir_ioctl,
.fsync = ll_fsync,
};
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index ed1e3f7b4e5..253f02688f4 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -55,6 +55,8 @@ struct ll_file_data *ll_file_data_get(void)
struct ll_file_data *fd;
OBD_SLAB_ALLOC_PTR_GFP(fd, ll_file_data_slab, __GFP_IO);
+ if (fd == NULL)
+ return NULL;
fd->fd_write_failed = false;
return fd;
}
@@ -93,8 +95,6 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
static void ll_prepare_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle *och)
{
- ENTRY;
-
op_data->op_attr.ia_valid = ATTR_MODE | ATTR_ATIME | ATTR_ATIME_SET |
ATTR_MTIME | ATTR_MTIME_SET |
ATTR_CTIME | ATTR_CTIME_SET;
@@ -111,7 +111,6 @@ out:
ll_pack_inode2opdata(inode, op_data, &och->och_fh);
ll_prep_md_op_data(op_data, inode, NULL, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
- EXIT;
}
static int ll_close_inode_openhandle(struct obd_export *md_exp,
@@ -124,7 +123,6 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
struct obd_device *obd = class_exp2obd(exp);
int epoch_close = 1;
int rc;
- ENTRY;
if (obd == NULL) {
/*
@@ -178,9 +176,7 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
inode->i_ino, rc);
}
- EXIT;
out:
-
if (exp_connect_som(exp) && !epoch_close &&
S_ISREG(inode->i_mode) && (och->och_flags & FMODE_WRITE)) {
ll_queue_done_writing(inode, LLIF_DONE_WRITING);
@@ -202,7 +198,6 @@ int ll_md_real_close(struct inode *inode, int flags)
struct obd_client_handle *och;
__u64 *och_usecount;
int rc = 0;
- ENTRY;
if (flags & FMODE_WRITE) {
och_p = &lli->lli_mds_write_och;
@@ -220,7 +215,7 @@ int ll_md_real_close(struct inode *inode, int flags)
if (*och_usecount) { /* There are still users of this handle, so
skip freeing it. */
mutex_unlock(&lli->lli_och_mutex);
- RETURN(0);
+ return 0;
}
och=*och_p;
*och_p = NULL;
@@ -232,7 +227,7 @@ int ll_md_real_close(struct inode *inode, int flags)
inode, och);
}
- RETURN(rc);
+ return rc;
}
int ll_md_close(struct obd_export *md_exp, struct inode *inode,
@@ -241,7 +236,6 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
- ENTRY;
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
@@ -287,7 +281,7 @@ int ll_md_close(struct obd_export *md_exp, struct inode *inode,
ll_file_data_put(fd);
ll_capa_close(inode);
- RETURN(rc);
+ return rc;
}
/* While this returns an error code, fput() the caller does not, so we need
@@ -301,7 +295,6 @@ int ll_file_release(struct inode *inode, struct file *file)
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
@@ -335,7 +328,7 @@ int ll_file_release(struct inode *inode, struct file *file)
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = NULL;
ll_file_data_put(fd);
- RETURN(0);
+ return 0;
}
if (!S_ISDIR(inode->i_mode)) {
@@ -348,7 +341,7 @@ int ll_file_release(struct inode *inode, struct file *file)
if (CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_DUMP_LOG, cfs_fail_val))
libcfs_debug_dumplog();
- RETURN(rc);
+ return rc;
}
static int ll_intent_file_open(struct file *file, void *lmm,
@@ -362,10 +355,9 @@ static int ll_intent_file_open(struct file *file, void *lmm,
struct ptlrpc_request *req;
__u32 opc = LUSTRE_OPC_ANY;
int rc;
- ENTRY;
if (!parent)
- RETURN(-ENOENT);
+ return -ENOENT;
/* Usually we come here only for NFSD, and we want open lock.
But we can also get here with pre 2.6.15 patchless kernels, and in
@@ -386,7 +378,7 @@ static int ll_intent_file_open(struct file *file, void *lmm,
file->f_dentry->d_inode, name, len,
O_RDWR, opc, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
itp->it_flags |= MDS_OPEN_BY_FID;
rc = md_intent_lock(sbi->ll_md_exp, op_data, lmm, lmmsize, itp,
@@ -422,7 +414,7 @@ out:
it_clear_disposition(itp, DISP_ENQ_COMPLETE);
ll_intent_drop_lock(itp);
- RETURN(rc);
+ return rc;
}
/**
@@ -464,7 +456,6 @@ int ll_local_open(struct file *file, struct lookup_intent *it,
{
struct inode *inode = file->f_dentry->d_inode;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
LASSERT(!LUSTRE_FPRIVATE(file));
@@ -477,7 +468,7 @@ int ll_local_open(struct file *file, struct lookup_intent *it,
rc = ll_och_fill(ll_i2sbi(inode)->ll_md_exp, lli, it, och);
if (rc)
- RETURN(rc);
+ return rc;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if ((it->it_flags & FMODE_WRITE) &&
@@ -489,7 +480,7 @@ int ll_local_open(struct file *file, struct lookup_intent *it,
LUSTRE_FPRIVATE(file) = fd;
ll_readahead_init(inode, &fd->fd_ras);
fd->fd_omode = it->it_flags;
- RETURN(0);
+ return 0;
}
/* Open a file, and (for the very first open) create objects on the OSTs at
@@ -514,7 +505,6 @@ int ll_file_open(struct inode *inode, struct file *file)
__u64 *och_usecount = NULL;
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
inode->i_generation, inode, file->f_flags);
@@ -524,7 +514,7 @@ int ll_file_open(struct inode *inode, struct file *file)
fd = ll_file_data_get();
if (fd == NULL)
- GOTO(out_och_free, rc = -ENOMEM);
+ GOTO(out_openerr, rc = -ENOMEM);
fd->fd_file = file;
if (S_ISDIR(inode->i_mode)) {
@@ -540,7 +530,7 @@ int ll_file_open(struct inode *inode, struct file *file)
if (inode->i_sb->s_root == file->f_dentry) {
LUSTRE_FPRIVATE(file) = fd;
- RETURN(0);
+ return 0;
}
if (!it || !it->d.lustre.it_disposition) {
@@ -700,8 +690,6 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
struct obd_info oinfo = { { { 0 } } };
int rc;
- ENTRY;
-
LASSERT(lsm != NULL);
oinfo.oi_md = lsm;
@@ -736,7 +724,7 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
OBD_MD_FLATIME | OBD_MD_FLMTIME |
OBD_MD_FLCTIME | OBD_MD_FLSIZE |
OBD_MD_FLDATAVERSION);
- RETURN(rc);
+ return rc;
}
/**
@@ -749,7 +737,6 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
struct obd_capa *capa = ll_mdscapa_get(inode);
struct lov_stripe_md *lsm;
int rc;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
@@ -765,7 +752,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
(unsigned long)ll_inode_blksize(inode));
}
ccc_inode_lsm_put(inode, lsm);
- RETURN(rc);
+ return rc;
}
int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
@@ -776,8 +763,6 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
struct ost_lvb lvb;
int rc = 0;
- ENTRY;
-
ll_inode_size_lock(inode);
/* merge timestamps the most recently obtained from mds with
timestamps obtained from osts */
@@ -810,7 +795,7 @@ int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
}
ll_inode_size_unlock(inode);
- RETURN(rc);
+ return rc;
}
int ll_glimpse_ioctl(struct ll_sb_info *sbi, struct lov_stripe_md *lsm,
@@ -860,7 +845,6 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct cl_io *io;
ssize_t result;
- ENTRY;
restart:
io = ccc_env_thread_io(env);
@@ -986,15 +970,14 @@ static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
size_t count;
ssize_t result;
int refcheck;
- ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &count);
if (result)
- RETURN(result);
+ return result;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
args = vvp_env_args(env, IO_NORMAL);
args->u.normal.via_iov = (struct iovec *)iov;
@@ -1004,7 +987,7 @@ static ssize_t ll_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_READ,
&iocb->ki_pos, count);
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
@@ -1015,11 +998,10 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
struct kiocb *kiocb;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
local_iov = &vvp_env_info(env)->vti_local_iov;
kiocb = &vvp_env_info(env)->vti_kiocb;
@@ -1033,7 +1015,7 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count,
*ppos = kiocb->ki_pos;
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
/*
@@ -1047,15 +1029,14 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
size_t count;
ssize_t result;
int refcheck;
- ENTRY;
result = ll_file_get_iov_count(iov, &nr_segs, &count);
if (result)
- RETURN(result);
+ return result;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
args = vvp_env_args(env, IO_NORMAL);
args->u.normal.via_iov = (struct iovec *)iov;
@@ -1065,7 +1046,7 @@ static ssize_t ll_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
result = ll_file_io_generic(env, args, iocb->ki_filp, CIT_WRITE,
&iocb->ki_pos, count);
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
@@ -1076,11 +1057,10 @@ static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
struct kiocb *kiocb;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
local_iov = &vvp_env_info(env)->vti_local_iov;
kiocb = &vvp_env_info(env)->vti_kiocb;
@@ -1094,7 +1074,7 @@ static ssize_t ll_file_write(struct file *file, const char *buf, size_t count,
*ppos = kiocb->ki_pos;
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
@@ -1110,11 +1090,10 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
struct vvp_io_args *args;
ssize_t result;
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
args = vvp_env_args(env, IO_SPLICE);
args->u.splice.via_pipe = pipe;
@@ -1122,7 +1101,7 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count);
cl_env_put(env, &refcheck);
- RETURN(result);
+ return result;
}
static int ll_lov_recreate(struct inode *inode, struct ost_id *oi,
@@ -1134,14 +1113,13 @@ static int ll_lov_recreate(struct inode *inode, struct ost_id *oi,
int lsm_size;
int rc = 0;
struct lov_stripe_md *lsm = NULL, *lsm2;
- ENTRY;
OBDO_ALLOC(oa);
if (oa == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
+ if (!lsm_has_objects(lsm))
GOTO(out, rc = -ENOENT);
lsm_size = sizeof(*lsm) + (sizeof(struct lov_oinfo) *
@@ -1175,18 +1153,17 @@ static int ll_lov_recreate_obj(struct inode *inode, unsigned long arg)
{
struct ll_recreate_obj ucreat;
struct ost_id oi;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ return -EPERM;
if (copy_from_user(&ucreat, (struct ll_recreate_obj *)arg,
sizeof(ucreat)))
- RETURN(-EFAULT);
+ return -EFAULT;
ostid_set_seq_mdt0(&oi);
ostid_set_id(&oi, ucreat.lrc_id);
- RETURN(ll_lov_recreate(inode, &oi, ucreat.lrc_ost_idx));
+ return ll_lov_recreate(inode, &oi, ucreat.lrc_ost_idx);
}
static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
@@ -1194,17 +1171,16 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
struct lu_fid fid;
struct ost_id oi;
obd_count ost_idx;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ return -EPERM;
if (copy_from_user(&fid, (struct lu_fid *)arg, sizeof(fid)))
- RETURN(-EFAULT);
+ return -EFAULT;
fid_to_ostid(&fid, &oi);
ost_idx = (fid_seq(&fid) >> 16) & 0xffff;
- RETURN(ll_lov_recreate(inode, &oi, ost_idx));
+ return ll_lov_recreate(inode, &oi, ost_idx);
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
@@ -1213,14 +1189,13 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
int rc = 0;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
if (lsm != NULL) {
ccc_inode_lsm_put(inode, lsm);
CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
inode->i_ino);
- RETURN(-EEXIST);
+ return -EEXIST;
}
ll_inode_size_lock(inode);
@@ -1237,7 +1212,7 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct file *file,
ll_inode_size_unlock(inode);
ll_intent_release(&oit);
ccc_inode_lsm_put(inode, lsm);
- RETURN(rc);
+ return rc;
out_req_free:
ptlrpc_req_finished((struct ptlrpc_request *) oit.d.lustre.it_data);
goto out;
@@ -1256,13 +1231,13 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc)
- RETURN(rc);
+ return rc;
op_data = ll_prep_md_op_data(NULL, inode, NULL, filename,
strlen(filename), lmmsize,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA;
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
@@ -1297,6 +1272,12 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
* passing it to userspace.
*/
if (LOV_MAGIC != cpu_to_le32(LOV_MAGIC)) {
+ int stripe_count;
+
+ stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+
/* if function called for directory - we should
* avoid swab not existent lsm objects */
if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V1)) {
@@ -1304,13 +1285,13 @@ int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
if (S_ISREG(body->mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v1 *)lmm)->lmm_objects,
- ((struct lov_user_md_v1 *)lmm)->lmm_stripe_count);
+ stripe_count);
} else if (lmm->lmm_magic == cpu_to_le32(LOV_MAGIC_V3)) {
lustre_swab_lov_user_md_v3((struct lov_user_md_v3 *)lmm);
if (S_ISREG(body->mode))
lustre_swab_lov_user_md_objects(
((struct lov_user_md_v3 *)lmm)->lmm_objects,
- ((struct lov_user_md_v3 *)lmm)->lmm_stripe_count);
+ stripe_count);
}
}
@@ -1329,24 +1310,23 @@ static int ll_lov_setea(struct inode *inode, struct file *file,
int lum_size = sizeof(struct lov_user_md) +
sizeof(struct lov_user_ost_data);
int rc;
- ENTRY;
if (!cfs_capable(CFS_CAP_SYS_ADMIN))
- RETURN(-EPERM);
+ return -EPERM;
OBD_ALLOC_LARGE(lump, lum_size);
if (lump == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(lump, (struct lov_user_md *)arg, lum_size)) {
OBD_FREE_LARGE(lump, lum_size);
- RETURN(-EFAULT);
+ return -EFAULT;
}
rc = ll_lov_setstripe_ea_info(inode, file, flags, lump, lum_size);
OBD_FREE_LARGE(lump, lum_size);
- RETURN(rc);
+ return rc;
}
static int ll_lov_setstripe(struct inode *inode, struct file *file,
@@ -1358,17 +1338,16 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
struct lov_user_md_v3 *lumv3p = (struct lov_user_md_v3 *)arg;
int lum_size, rc;
int flags = FMODE_WRITE;
- ENTRY;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(lumv1, lumv1p, lum_size))
- RETURN(-EFAULT);
+ return -EFAULT;
if (lumv1->lmm_magic == LOV_USER_MAGIC_V3) {
lum_size = sizeof(struct lov_user_md_v3);
if (copy_from_user(&lumv3, lumv3p, lum_size))
- RETURN(-EFAULT);
+ return -EFAULT;
}
rc = ll_lov_setstripe_ea_info(inode, file, flags, lumv1, lum_size);
@@ -1384,21 +1363,20 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
0, lsm, (void *)arg);
ccc_inode_lsm_put(inode, lsm);
}
- RETURN(rc);
+ return rc;
}
static int ll_lov_getstripe(struct inode *inode, unsigned long arg)
{
struct lov_stripe_md *lsm;
int rc = -ENODATA;
- ENTRY;
lsm = ccc_inode_lsm_get(inode);
if (lsm != NULL)
rc = obd_iocontrol(LL_IOC_LOV_GETSTRIPE, ll_i2dtexp(inode), 0,
lsm, (void *)arg);
ccc_inode_lsm_put(inode, lsm);
- RETURN(rc);
+ return rc;
}
int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
@@ -1407,17 +1385,16 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ccc_grouplock grouplock;
int rc;
- ENTRY;
if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
fd->fd_grouplock.cg_gid);
spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
LASSERT(fd->fd_grouplock.cg_lock == NULL);
spin_unlock(&lli->lli_lock);
@@ -1425,14 +1402,14 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
- RETURN(rc);
+ return rc;
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
spin_unlock(&lli->lli_lock);
CERROR("another thread just won the race\n");
cl_put_grouplock(&grouplock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
fd->fd_flags |= LL_FILE_GROUP_LOCKED;
@@ -1440,7 +1417,7 @@ int ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
spin_unlock(&lli->lli_lock);
CDEBUG(D_INFO, "group lock %lu obtained\n", arg);
- RETURN(0);
+ return 0;
}
int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
@@ -1448,13 +1425,12 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ccc_grouplock grouplock;
- ENTRY;
spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
spin_unlock(&lli->lli_lock);
CWARN("no group lock held\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
LASSERT(fd->fd_grouplock.cg_lock != NULL);
@@ -1462,7 +1438,7 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
CWARN("group lock %lu doesn't match current id %lu\n",
arg, fd->fd_grouplock.cg_gid);
spin_unlock(&lli->lli_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
grouplock = fd->fd_grouplock;
@@ -1472,7 +1448,7 @@ int ll_put_grouplock(struct inode *inode, struct file *file, unsigned long arg)
cl_put_grouplock(&grouplock);
CDEBUG(D_INFO, "group lock %lu released\n", arg);
- RETURN(0);
+ return 0;
}
/**
@@ -1489,17 +1465,16 @@ int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
struct inode *inode = dentry->d_inode;
struct obd_client_handle *och;
int rc;
- ENTRY;
LASSERT(inode);
/* Root ? Do nothing. */
if (dentry->d_inode->i_sb->s_root == dentry)
- RETURN(0);
+ return 0;
/* No open handle to close? Move away */
if (!it_disposition(it, DISP_OPEN_OPEN))
- RETURN(0);
+ return 0;
LASSERT(it_open_error(DISP_OPEN_OPEN, it) == 0);
@@ -1518,7 +1493,7 @@ int ll_release_openhandle(struct dentry *dentry, struct lookup_intent *it)
ptlrpc_req_finished(it->d.lustre.it_data);
it_clear_disposition(it, DISP_ENQ_OPEN_REF);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -1533,7 +1508,6 @@ int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
struct ll_fiemap_info_key fm_key = { .name = KEY_FIEMAP, };
int vallen = num_bytes;
int rc;
- ENTRY;
/* Checks for fiemap flags */
if (fiemap->fm_flags & ~LUSTRE_FIEMAP_FLAGS_COMPAT) {
@@ -1579,7 +1553,7 @@ int ll_do_fiemap(struct inode *inode, struct ll_user_fiemap *fiemap,
out:
ccc_inode_lsm_put(inode, lsm);
- RETURN(rc);
+ return rc;
}
int ll_fid2path(struct inode *inode, void *arg)
@@ -1587,26 +1561,25 @@ int ll_fid2path(struct inode *inode, void *arg)
struct obd_export *exp = ll_i2mdexp(inode);
struct getinfo_fid2path *gfout, *gfin;
int outsize, rc;
- ENTRY;
if (!cfs_capable(CFS_CAP_DAC_READ_SEARCH) &&
!(ll_i2sbi(inode)->ll_flags & LL_SBI_USER_FID2PATH))
- RETURN(-EPERM);
+ return -EPERM;
/* Need to get the buflen */
OBD_ALLOC_PTR(gfin);
if (gfin == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(gfin, arg, sizeof(*gfin))) {
OBD_FREE_PTR(gfin);
- RETURN(-EFAULT);
+ return -EFAULT;
}
outsize = sizeof(*gfout) + gfin->gf_pathlen;
OBD_ALLOC(gfout, outsize);
if (gfout == NULL) {
OBD_FREE_PTR(gfin);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
memcpy(gfout, gfin, sizeof(*gfout));
OBD_FREE_PTR(gfin);
@@ -1621,7 +1594,7 @@ int ll_fid2path(struct inode *inode, void *arg)
gf_free:
OBD_FREE(gfout, outsize);
- RETURN(rc);
+ return rc;
}
static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
@@ -1635,13 +1608,13 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
* required fiemap buffer */
if (get_user(extent_count,
&((struct ll_user_fiemap __user *)arg)->fm_extent_count))
- RETURN(-EFAULT);
+ return -EFAULT;
num_bytes = sizeof(*fiemap_s) + (extent_count *
sizeof(struct ll_fiemap_extent));
OBD_ALLOC_LARGE(fiemap_s, num_bytes);
if (fiemap_s == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* get the fiemap value */
if (copy_from_user(fiemap_s, (struct ll_user_fiemap __user *)arg,
@@ -1673,7 +1646,7 @@ static int ll_ioctl_fiemap(struct inode *inode, unsigned long arg)
error:
OBD_FREE_LARGE(fiemap_s, num_bytes);
- RETURN(rc);
+ return rc;
}
/*
@@ -1692,24 +1665,21 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obdo *obdo = NULL;
int rc;
- ENTRY;
/* If no stripe, we consider version is 0. */
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL) {
+ if (!lsm_has_objects(lsm)) {
*data_version = 0;
CDEBUG(D_INODE, "No object for inode\n");
- RETURN(0);
+ GOTO(out, rc = 0);
}
OBD_ALLOC_PTR(obdo);
- if (obdo == NULL) {
- ccc_inode_lsm_put(inode, lsm);
- RETURN(-ENOMEM);
- }
+ if (obdo == NULL)
+ GOTO(out, rc = -ENOMEM);
rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, NULL, obdo, 0, extent_lock);
- if (!rc) {
+ if (rc == 0) {
if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
rc = -EOPNOTSUPP;
else
@@ -1717,9 +1687,9 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
}
OBD_FREE_PTR(obdo);
+out:
ccc_inode_lsm_put(inode, lsm);
-
- RETURN(rc);
+ return rc;
}
struct ll_swap_stack {
@@ -1741,7 +1711,7 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
OBD_ALLOC_PTR(llss);
if (llss == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
llss->inode1 = file1->f_dentry->d_inode;
llss->inode2 = file2->f_dentry->d_inode;
@@ -1749,8 +1719,8 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
if (!S_ISREG(llss->inode2->i_mode))
GOTO(free, rc = -EINVAL);
- if (ll_permission(llss->inode1, MAY_WRITE, NULL) ||
- ll_permission(llss->inode2, MAY_WRITE, NULL))
+ if (inode_permission(llss->inode1, MAY_WRITE) ||
+ inode_permission(llss->inode2, MAY_WRITE))
GOTO(free, rc = -EPERM);
if (llss->inode2->i_sb != llss->inode1->i_sb)
@@ -1830,12 +1800,12 @@ static int ll_swap_layouts(struct file *file1, struct file *file2,
rc = -ENOMEM;
op_data = ll_prep_md_op_data(NULL, llss->inode1, llss->inode2, NULL, 0,
0, LUSTRE_OPC_ANY, &msl);
- if (op_data != NULL) {
- rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS,
- ll_i2mdexp(llss->inode1),
- sizeof(*op_data), op_data, NULL);
- ll_finish_md_op_data(op_data);
- }
+ if (IS_ERR(op_data))
+ GOTO(free, rc = PTR_ERR(op_data));
+
+ rc = obd_iocontrol(LL_IOC_LOV_SWAP_LAYOUTS, ll_i2mdexp(llss->inode1),
+ sizeof(*op_data), op_data, NULL);
+ ll_finish_md_op_data(op_data);
putgl:
if (gid != 0) {
@@ -1880,7 +1850,7 @@ free:
if (llss != NULL)
OBD_FREE_PTR(llss);
- RETURN(rc);
+ return rc;
}
long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -1888,7 +1858,6 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct inode *inode = file->f_dentry->d_inode;
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
inode->i_generation, inode, cmd);
@@ -1896,7 +1865,7 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
- RETURN(-ENOTTY);
+ return -ENOTTY;
switch(cmd) {
case LL_IOC_GETFLAGS:
@@ -1909,66 +1878,66 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* not abused, and to handle any flag side effects.
*/
if (get_user(flags, (int *) arg))
- RETURN(-EFAULT);
+ return -EFAULT;
if (cmd == LL_IOC_SETFLAGS) {
if ((flags & LL_FILE_IGNORE_LOCK) &&
!(file->f_flags & O_DIRECT)) {
CERROR("%s: unable to disable locking on "
"non-O_DIRECT file\n", current->comm);
- RETURN(-EINVAL);
+ return -EINVAL;
}
fd->fd_flags |= flags;
} else {
fd->fd_flags &= ~flags;
}
- RETURN(0);
+ return 0;
case LL_IOC_LOV_SETSTRIPE:
- RETURN(ll_lov_setstripe(inode, file, arg));
+ return ll_lov_setstripe(inode, file, arg);
case LL_IOC_LOV_SETEA:
- RETURN(ll_lov_setea(inode, file, arg));
+ return ll_lov_setea(inode, file, arg);
case LL_IOC_LOV_SWAP_LAYOUTS: {
struct file *file2;
struct lustre_swap_layouts lsl;
if (copy_from_user(&lsl, (char *)arg,
sizeof(struct lustre_swap_layouts)))
- RETURN(-EFAULT);
+ return -EFAULT;
if ((file->f_flags & O_ACCMODE) == 0) /* O_RDONLY */
- RETURN(-EPERM);
+ return -EPERM;
file2 = fget(lsl.sl_fd);
if (file2 == NULL)
- RETURN(-EBADF);
+ return -EBADF;
rc = -EPERM;
if ((file2->f_flags & O_ACCMODE) != 0) /* O_WRONLY or O_RDWR */
rc = ll_swap_layouts(file, file2, &lsl);
fput(file2);
- RETURN(rc);
+ return rc;
}
case LL_IOC_LOV_GETSTRIPE:
- RETURN(ll_lov_getstripe(inode, arg));
+ return ll_lov_getstripe(inode, arg);
case LL_IOC_RECREATE_OBJ:
- RETURN(ll_lov_recreate_obj(inode, arg));
+ return ll_lov_recreate_obj(inode, arg);
case LL_IOC_RECREATE_FID:
- RETURN(ll_lov_recreate_fid(inode, arg));
+ return ll_lov_recreate_fid(inode, arg);
case FSFILT_IOC_FIEMAP:
- RETURN(ll_ioctl_fiemap(inode, arg));
+ return ll_ioctl_fiemap(inode, arg);
case FSFILT_IOC_GETFLAGS:
case FSFILT_IOC_SETFLAGS:
- RETURN(ll_iocontrol(inode, file, cmd, arg));
+ return ll_iocontrol(inode, file, cmd, arg);
case FSFILT_IOC_GETVERSION_OLD:
case FSFILT_IOC_GETVERSION:
- RETURN(put_user(inode->i_generation, (int *)arg));
+ return put_user(inode->i_generation, (int *)arg);
case LL_IOC_GROUP_LOCK:
- RETURN(ll_get_grouplock(inode, file, arg));
+ return ll_get_grouplock(inode, file, arg);
case LL_IOC_GROUP_UNLOCK:
- RETURN(ll_put_grouplock(inode, file, arg));
+ return ll_put_grouplock(inode, file, arg);
case IOC_OBD_STATFS:
- RETURN(ll_obd_statfs(inode, (void *)arg));
+ return ll_obd_statfs(inode, (void *)arg);
/* We need to special case any other ioctls we want to handle,
* to send them to the MDS/OST as appropriate and to properly
@@ -1977,30 +1946,30 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FSFILT_IOC_SETVERSION:
*/
case LL_IOC_FLUSHCTX:
- RETURN(ll_flush_ctx(inode));
+ return ll_flush_ctx(inode);
case LL_IOC_PATH2FID: {
if (copy_to_user((void *)arg, ll_inode2fid(inode),
sizeof(struct lu_fid)))
- RETURN(-EFAULT);
+ return -EFAULT;
- RETURN(0);
+ return 0;
}
case OBD_IOC_FID2PATH:
- RETURN(ll_fid2path(inode, (void *)arg));
+ return ll_fid2path(inode, (void *)arg);
case LL_IOC_DATA_VERSION: {
struct ioc_data_version idv;
int rc;
if (copy_from_user(&idv, (char *)arg, sizeof(idv)))
- RETURN(-EFAULT);
+ return -EFAULT;
rc = ll_data_version(inode, &idv.idv_version,
!(idv.idv_flags & LL_DV_NOFLUSH));
if (rc == 0 && copy_to_user((char *) arg, &idv, sizeof(idv)))
- RETURN(-EFAULT);
+ return -EFAULT;
- RETURN(rc);
+ return rc;
}
case LL_IOC_GET_MDTIDX: {
@@ -2008,16 +1977,16 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
mdtidx = ll_get_mdt_idx(inode);
if (mdtidx < 0)
- RETURN(mdtidx);
+ return mdtidx;
if (put_user((int)mdtidx, (int*)arg))
- RETURN(-EFAULT);
+ return -EFAULT;
- RETURN(0);
+ return 0;
}
case OBD_IOC_GETDTNAME:
case OBD_IOC_GETMDNAME:
- RETURN(ll_get_obd_name(inode, cmd, arg));
+ return ll_get_obd_name(inode, cmd, arg);
case LL_IOC_HSM_STATE_GET: {
struct md_op_data *op_data;
struct hsm_user_state *hus;
@@ -2025,13 +1994,13 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
OBD_ALLOC_PTR(hus);
if (hus == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, hus);
- if (op_data == NULL) {
+ if (IS_ERR(op_data)) {
OBD_FREE_PTR(hus);
- RETURN(-ENOMEM);
+ return PTR_ERR(op_data);
}
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
@@ -2042,7 +2011,7 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ll_finish_md_op_data(op_data);
OBD_FREE_PTR(hus);
- RETURN(rc);
+ return rc;
}
case LL_IOC_HSM_STATE_SET: {
struct md_op_data *op_data;
@@ -2051,10 +2020,10 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
OBD_ALLOC_PTR(hss);
if (hss == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(hss, (char *)arg, sizeof(*hss))) {
OBD_FREE_PTR(hss);
- RETURN(-EFAULT);
+ return -EFAULT;
}
/* Non-root users are forbidden to set or clear flags which are
@@ -2062,14 +2031,14 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (((hss->hss_setmask | hss->hss_clearmask) & ~HSM_USER_MASK)
&& !cfs_capable(CFS_CAP_SYS_ADMIN)) {
OBD_FREE_PTR(hss);
- RETURN(-EPERM);
+ return -EPERM;
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, hss);
- if (op_data == NULL) {
+ if (IS_ERR(op_data)) {
OBD_FREE_PTR(hss);
- RETURN(-ENOMEM);
+ return PTR_ERR(op_data);
}
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
@@ -2078,7 +2047,7 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ll_finish_md_op_data(op_data);
OBD_FREE_PTR(hss);
- RETURN(rc);
+ return rc;
}
case LL_IOC_HSM_ACTION: {
struct md_op_data *op_data;
@@ -2087,13 +2056,13 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
OBD_ALLOC_PTR(hca);
if (hca == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, hca);
- if (op_data == NULL) {
+ if (IS_ERR(op_data)) {
OBD_FREE_PTR(hca);
- RETURN(-ENOMEM);
+ return PTR_ERR(op_data);
}
rc = obd_iocontrol(cmd, ll_i2mdexp(inode), sizeof(*op_data),
@@ -2104,17 +2073,17 @@ long ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ll_finish_md_op_data(op_data);
OBD_FREE_PTR(hca);
- RETURN(rc);
+ return rc;
}
default: {
int err;
if (LLIOC_STOP ==
ll_iocontrol_call(inode, file, cmd, arg, &err))
- RETURN(err);
+ return err;
- RETURN(obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
- (void *)arg));
+ return obd_iocontrol(cmd, ll_i2dtexp(inode), 0, NULL,
+ (void *)arg);
}
}
}
@@ -2125,7 +2094,6 @@ loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
struct inode *inode = file->f_dentry->d_inode;
loff_t retval, eof = 0;
- ENTRY;
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
(origin == SEEK_CUR) ? file->f_pos : 0);
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
@@ -2136,13 +2104,13 @@ loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
retval = ll_glimpse_size(inode);
if (retval != 0)
- RETURN(retval);
+ return retval;
eof = i_size_read(inode);
}
- retval = ll_generic_file_llseek_size(file, offset, origin,
+ retval = generic_file_llseek_size(file, offset, origin,
ll_file_maxbytes(inode), eof);
- RETURN(retval);
+ return retval;
}
int ll_flush(struct file *file, fl_owner_t id)
@@ -2184,15 +2152,14 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
struct obd_capa *capa = NULL;
struct cl_fsync_io *fio;
int result;
- ENTRY;
if (mode != CL_FSYNC_NONE && mode != CL_FSYNC_LOCAL &&
mode != CL_FSYNC_DISCARD && mode != CL_FSYNC_ALL)
- RETURN(-EINVAL);
+ return -EINVAL;
env = cl_env_nested_get(&nest);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
capa = ll_osscapa_get(inode, CAPA_OPC_OSS_WRITE);
@@ -2220,7 +2187,7 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
capa_put(capa);
- RETURN(result);
+ return result;
}
/*
@@ -2237,7 +2204,6 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct ptlrpc_request *req;
struct obd_capa *oc;
int rc, err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
@@ -2281,23 +2247,24 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
mutex_unlock(&inode->i_mutex);
- RETURN(rc);
+ return rc;
}
int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
{
struct inode *inode = file->f_dentry->d_inode;
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_FLOCK,
- .ei_cb_cp =ldlm_flock_completion_ast,
- .ei_cbdata = file_lock };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_FLOCK,
+ .ei_cb_cp = ldlm_flock_completion_ast,
+ .ei_cbdata = file_lock,
+ };
struct md_op_data *op_data;
struct lustre_handle lockh = {0};
ldlm_policy_data_t flock = {{0}};
int flags = 0;
int rc;
int rc2 = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
inode->i_ino, file_lock);
@@ -2315,7 +2282,7 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
flock.l_flock.start = file_lock->fl_start;
flock.l_flock.end = file_lock->fl_end;
} else {
- RETURN(-EINVAL);
+ return -EINVAL;
}
flock.l_flock.pid = file_lock->fl_pid;
@@ -2350,7 +2317,7 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
default:
CDEBUG(D_INFO, "Unknown fcntl lock type: %d\n",
file_lock->fl_type);
- RETURN (-ENOTSUPP);
+ return -ENOTSUPP;
}
switch (cmd) {
@@ -2377,13 +2344,13 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
break;
default:
CERROR("unknown fcntl lock command: %d\n", cmd);
- RETURN (-EINVAL);
+ return -EINVAL;
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#x, mode=%u, "
"start="LPU64", end="LPU64"\n", inode->i_ino, flock.l_flock.pid,
@@ -2409,14 +2376,12 @@ int ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
ll_finish_md_op_data(op_data);
- RETURN(rc);
+ return rc;
}
int ll_file_noflock(struct file *file, int cmd, struct file_lock *file_lock)
{
- ENTRY;
-
- RETURN(-ENOSYS);
+ return -ENOSYS;
}
/**
@@ -2438,17 +2403,16 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
struct lu_fid *fid;
__u64 flags;
int i;
- ENTRY;
if (!inode)
- RETURN(0);
+ return 0;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID" mode %s\n", PFID(fid),
ldlm_lockname[mode]);
flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_CBPENDING | LDLM_FL_TEST_LOCK;
- for (i = 0; i < MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
+ for (i = 0; i <= MDS_INODELOCK_MAXSHIFT && *bits != 0; i++) {
policy.l_inodebits.bits = *bits & (1 << i);
if (policy.l_inodebits.bits == 0)
continue;
@@ -2467,7 +2431,7 @@ int ll_have_md_lock(struct inode *inode, __u64 *bits, ldlm_mode_t l_req_mode)
}
}
}
- RETURN(*bits == 0);
+ return *bits == 0;
}
ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
@@ -2476,7 +2440,6 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
ldlm_policy_data_t policy = { .l_inodebits = {bits}};
struct lu_fid *fid;
ldlm_mode_t rc;
- ENTRY;
fid = &ll_i2info(inode)->lli_fid;
CDEBUG(D_INFO, "trying to match res "DFID"\n", PFID(fid));
@@ -2484,7 +2447,7 @@ ldlm_mode_t ll_take_md_lock(struct inode *inode, __u64 bits,
rc = md_lock_match(ll_i2mdexp(inode), LDLM_FL_BLOCK_GRANTED|flags,
fid, LDLM_IBITS, &policy,
LCK_CR|LCK_CW|LCK_PR|LCK_PW, lockh);
- RETURN(rc);
+ return rc;
}
static int ll_inode_revalidate_fini(struct inode *inode, int rc)
@@ -2513,7 +2476,6 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
struct ptlrpc_request *req = NULL;
struct obd_export *exp;
int rc = 0;
- ENTRY;
LASSERT(inode != NULL);
@@ -2537,7 +2499,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
dentry->d_inode, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
oit.it_create_mode |= M_CHECK_STALE;
rc = md_intent_lock(exp, op_data, NULL, 0,
@@ -2575,7 +2537,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
if (S_ISREG(inode->i_mode)) {
rc = ll_get_max_mdsize(sbi, &ealen);
if (rc)
- RETURN(rc);
+ return rc;
valid |= OBD_MD_FLEASIZE | OBD_MD_FLMODEASIZE;
}
@@ -2583,7 +2545,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
0, ealen, LUSTRE_OPC_ANY,
NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_valid = valid;
/* Once OBD_CONNECT_ATTRFID is not supported, we can't find one
@@ -2593,7 +2555,7 @@ int __ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
ll_finish_md_op_data(op_data);
if (rc) {
rc = ll_inode_revalidate_fini(inode, rc);
- RETURN(rc);
+ return rc;
}
rc = ll_prep_inode(&inode, req, NULL, NULL);
@@ -2608,11 +2570,10 @@ int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
{
struct inode *inode = dentry->d_inode;
int rc;
- ENTRY;
rc = __ll_inode_revalidate_it(dentry, it, ibits);
if (rc != 0)
- RETURN(rc);
+ return rc;
/* if object isn't regular file, don't validate size */
if (!S_ISREG(inode->i_mode)) {
@@ -2622,7 +2583,7 @@ int ll_inode_revalidate_it(struct dentry *dentry, struct lookup_intent *it,
} else {
rc = ll_glimpse_size(inode);
}
- RETURN(rc);
+ return rc;
}
int ll_getattr_it(struct vfsmount *mnt, struct dentry *de,
@@ -2672,21 +2633,19 @@ struct posix_acl * ll_get_acl(struct inode *inode, int type)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct posix_acl *acl = NULL;
- ENTRY;
spin_lock(&lli->lli_lock);
/* VFS' acl_permission_check->check_acl will release the refcount */
acl = posix_acl_dup(lli->lli_posix_acl);
spin_unlock(&lli->lli_lock);
- RETURN(acl);
+ return acl;
}
int ll_inode_permission(struct inode *inode, int mask)
{
int rc = 0;
- ENTRY;
#ifdef MAY_NOT_BLOCK
if (mask & MAY_NOT_BLOCK)
@@ -2702,7 +2661,7 @@ int ll_inode_permission(struct inode *inode, int mask)
rc = __ll_inode_revalidate_it(inode->i_sb->s_root, &it,
MDS_INODELOCK_LOOKUP);
if (rc)
- RETURN(rc);
+ return rc;
}
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
@@ -2712,9 +2671,9 @@ int ll_inode_permission(struct inode *inode, int mask)
return lustre_check_remote_perm(inode, mask);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_INODE_PERM, 1);
- rc = ll_generic_permission(inode, mask, flags, ll_check_acl);
+ rc = generic_permission(inode, mask);
- RETURN(rc);
+ return rc;
}
#define READ_METHOD aio_read
@@ -2806,16 +2765,15 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
{
unsigned int size;
struct llioc_data *in_data = NULL;
- ENTRY;
if (cb == NULL || cmd == NULL ||
count > LLIOC_MAX_CMD || count < 0)
- RETURN(NULL);
+ return NULL;
size = sizeof(*in_data) + count * sizeof(unsigned int);
OBD_ALLOC(in_data, size);
if (in_data == NULL)
- RETURN(NULL);
+ return NULL;
memset(in_data, 0, sizeof(*in_data));
in_data->iocd_size = size;
@@ -2827,7 +2785,7 @@ void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd)
list_add_tail(&in_data->iocd_list, &llioc.ioc_head);
up_write(&llioc.ioc_sem);
- RETURN(in_data);
+ return in_data;
}
void ll_iocontrol_unregister(void *magic)
@@ -2890,14 +2848,13 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
struct cl_env_nest nest;
struct lu_env *env;
int result;
- ENTRY;
if (lli->lli_clob == NULL)
- RETURN(0);
+ return 0;
env = cl_env_nested_get(&nest);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
result = cl_conf_set(env, lli->lli_clob, conf);
cl_env_nested_put(&nest, env);
@@ -2915,7 +2872,7 @@ int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf)
ldlm_lock_allow_match(lock);
}
}
- RETURN(result);
+ return result;
}
/* Fetch layout from MDT with getxattr request, if it's not ready yet */
@@ -2930,10 +2887,13 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
void *lmm;
int lmmsize;
int rc;
- ENTRY;
- if (lock->l_lvb_data != NULL)
- RETURN(0);
+ CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
+ PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ lock->l_lvb_data, lock->l_lvb_len);
+
+ if ((lock->l_lvb_data != NULL) && (lock->l_flags & LDLM_FL_LVB_READY))
+ return 0;
/* if layout lock was granted right away, the layout is returned
* within DLM_LVB of dlm reply; otherwise if the lock was ever
@@ -2948,7 +2908,7 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
lmmsize, 0, &req);
capa_put(oc);
if (rc < 0)
- RETURN(rc);
+ return rc;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
if (body == NULL || body->eadatasize > lmmsize)
@@ -2968,16 +2928,12 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
memcpy(lvbdata, lmm, lmmsize);
lock_res_and_lock(lock);
- if (lock->l_lvb_data == NULL) {
- lock->l_lvb_data = lvbdata;
- lock->l_lvb_len = lmmsize;
- lvbdata = NULL;
- }
- unlock_res_and_lock(lock);
+ if (lock->l_lvb_data != NULL)
+ OBD_FREE_LARGE(lock->l_lvb_data, lock->l_lvb_len);
- if (lvbdata != NULL)
- OBD_FREE_LARGE(lvbdata, lmmsize);
- EXIT;
+ lock->l_lvb_data = lvbdata;
+ lock->l_lvb_len = lmmsize;
+ unlock_res_and_lock(lock);
out:
ptlrpc_req_finished(req);
@@ -2999,7 +2955,6 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
int rc = 0;
bool lvb_ready;
bool wait_layout = false;
- ENTRY;
LASSERT(lustre_handle_is_used(lockh));
@@ -3008,7 +2963,7 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
LASSERT(ldlm_has_layout(lock));
LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d.\n",
- inode, PFID(&lli->lli_fid), reconf);
+ inode, PFID(&lli->lli_fid), reconf);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
@@ -3068,7 +3023,6 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, ldlm_mode_t mode,
/* refresh layout failed, need to wait */
wait_layout = rc == -EBUSY;
- EXIT;
out:
LDLM_LOCK_PUT(lock);
@@ -3090,7 +3044,7 @@ out:
CDEBUG(D_INODE, "file: "DFID" waiting layout return: %d.\n",
PFID(&lli->lli_fid), rc);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -3114,17 +3068,17 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
struct lookup_intent it;
struct lustre_handle lockh;
ldlm_mode_t mode;
- struct ldlm_enqueue_info einfo = { .ei_type = LDLM_IBITS,
- .ei_mode = LCK_CR,
- .ei_cb_bl = ll_md_blocking_ast,
- .ei_cb_cp = ldlm_completion_ast,
- .ei_cbdata = NULL };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = LCK_CR,
+ .ei_cb_bl = ll_md_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
int rc;
- ENTRY;
*gen = lli->lli_layout_gen;
if (!(sbi->ll_flags & LL_SBI_LAYOUT_LOCK))
- RETURN(0);
+ return 0;
/* sanity checks */
LASSERT(fid_is_sane(ll_inode2fid(inode)));
@@ -3136,7 +3090,7 @@ int ll_layout_refresh(struct inode *inode, __u32 *gen)
if (mode != 0) { /* hit cached lock */
rc = ll_layout_lock_set(&lockh, mode, inode, gen, false);
if (rc == 0)
- RETURN(0);
+ return 0;
/* better hold lli_layout_mutex to try again otherwise
* it will have starvation problem. */
@@ -3154,14 +3108,14 @@ again:
goto again;
mutex_unlock(&lli->lli_layout_mutex);
- RETURN(rc);
+ return rc;
}
op_data = ll_prep_md_op_data(NULL, inode, inode, NULL,
0, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data)) {
mutex_unlock(&lli->lli_layout_mutex);
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
}
/* have to enqueue one */
@@ -3194,5 +3148,5 @@ again:
}
mutex_unlock(&lli->lli_layout_mutex);
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_capa.c b/drivers/staging/lustre/lustre/llite/llite_capa.c
index b6fd9593325..edd512b20ee 100644
--- a/drivers/staging/lustre/lustre/llite/llite_capa.c
+++ b/drivers/staging/lustre/lustre/llite/llite_capa.c
@@ -41,7 +41,6 @@
#define DEBUG_SUBSYSTEM S_LLITE
#include <linux/fs.h>
-#include <linux/version.h>
#include <asm/uaccess.h>
#include <linux/file.h>
#include <linux/kmod.h>
@@ -171,7 +170,6 @@ static int capa_thread_main(void *unused)
struct inode *inode = NULL;
struct l_wait_info lwi = { 0 };
int rc;
- ENTRY;
thread_set_flags(&ll_capa_thread, SVC_RUNNING);
wake_up(&ll_capa_thread.t_ctl_waitq);
@@ -281,7 +279,7 @@ static int capa_thread_main(void *unused)
thread_set_flags(&ll_capa_thread, SVC_STOPPED);
wake_up(&ll_capa_thread.t_ctl_waitq);
- RETURN(0);
+ return 0;
}
void ll_capa_timer_callback(unsigned long unused)
@@ -291,8 +289,7 @@ void ll_capa_timer_callback(unsigned long unused)
int ll_capa_thread_start(void)
{
- task_t *task;
- ENTRY;
+ struct task_struct *task;
init_waitqueue_head(&ll_capa_thread.t_ctl_waitq);
@@ -300,12 +297,12 @@ int ll_capa_thread_start(void)
if (IS_ERR(task)) {
CERROR("cannot start expired capa thread: rc %ld\n",
PTR_ERR(task));
- RETURN(PTR_ERR(task));
+ return PTR_ERR(task);
}
wait_event(ll_capa_thread.t_ctl_waitq,
thread_is_running(&ll_capa_thread));
- RETURN(0);
+ return 0;
}
void ll_capa_thread_stop(void)
@@ -322,10 +319,8 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
struct obd_capa *ocapa;
int found = 0;
- ENTRY;
-
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_OSS_CAPA) == 0)
- RETURN(NULL);
+ return NULL;
LASSERT(opc == CAPA_OPC_OSS_WRITE || opc == CAPA_OPC_OSS_RW ||
opc == CAPA_OPC_OSS_TRUNC);
@@ -369,7 +364,7 @@ struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
}
spin_unlock(&capa_lock);
- RETURN(ocapa);
+ return ocapa;
}
EXPORT_SYMBOL(ll_osscapa_get);
@@ -377,12 +372,11 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct obd_capa *ocapa;
- ENTRY;
LASSERT(inode != NULL);
if ((ll_i2sbi(inode)->ll_flags & LL_SBI_MDS_CAPA) == 0)
- RETURN(NULL);
+ return NULL;
spin_lock(&capa_lock);
ocapa = capa_get(lli->lli_mds_capa);
@@ -392,7 +386,7 @@ struct obd_capa *ll_mdscapa_get(struct inode *inode)
atomic_set(&ll_capa_debug, 0);
}
- RETURN(ocapa);
+ return ocapa;
}
static struct obd_capa *do_add_mds_capa(struct inode *inode,
@@ -525,7 +519,6 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
{
struct inode *inode = ocapa->u.cli.inode;
int rc = 0;
- ENTRY;
LASSERT(ocapa);
@@ -561,7 +554,7 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
capa_put(ocapa);
iput(inode);
- RETURN(rc);
+ return rc;
}
spin_lock(&ocapa->c_lock);
@@ -575,7 +568,6 @@ int ll_update_capa(struct obd_capa *ocapa, struct lustre_capa *capa)
if (capa_for_oss(capa))
inode_add_oss_capa(inode, ocapa);
DEBUG_CAPA(D_SEC, capa, "renew");
- EXIT;
retry:
list_del_init(&ocapa->c_list);
sort_add_capa(ocapa, ll_capa_list);
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index 00b2b38d4c9..1f5825c87a7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -50,14 +50,12 @@ void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
{
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
- ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
if (page != NULL && list_empty(&page->cpg_pending_linkage))
list_add(&page->cpg_pending_linkage,
&club->cob_pending_list);
spin_unlock(&lli->lli_lock);
- EXIT;
}
/** records that a write has completed */
@@ -66,7 +64,6 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
int rc = 0;
- ENTRY;
spin_lock(&lli->lli_lock);
if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
list_del_init(&page->cpg_pending_linkage);
@@ -75,7 +72,6 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
spin_unlock(&lli->lli_lock);
if (rc)
ll_queue_done_writing(club->cob_inode, 0);
- EXIT;
}
/** Queues DONE_WRITING if
@@ -85,7 +81,6 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
@@ -119,14 +114,12 @@ void ll_queue_done_writing(struct inode *inode, unsigned long flags)
spin_unlock(&lcq->lcq_lock);
}
spin_unlock(&lli->lli_lock);
- EXIT;
}
/** Pack SOM attributes info @opdata for CLOSE, DONE_WRITING rpc. */
void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
{
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
@@ -140,7 +133,6 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_attr.ia_valid |= ATTR_MTIME_SET | ATTR_CTIME_SET |
ATTR_ATIME_SET | ATTR_SIZE | ATTR_BLOCKS;
}
- EXIT;
}
/** Closes ioepoch and packs Size-on-MDS attribute if needed into @op_data. */
@@ -149,7 +141,6 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
- ENTRY;
spin_lock(&lli->lli_lock);
if (!(list_empty(&club->cob_pending_list))) {
@@ -209,7 +200,6 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
- EXIT;
out:
return;
}
@@ -225,7 +215,6 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
__u32 old_flags;
struct obdo *oa;
int rc;
- ENTRY;
LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
@@ -236,7 +225,7 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
OBDO_ALLOC(oa);
if (!oa) {
CERROR("can't allocate memory for Size-on-MDS update.\n");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
old_flags = op_data->op_flags;
@@ -266,7 +255,7 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
ptlrpc_req_finished(request);
OBDO_FREE(oa);
- RETURN(rc);
+ return rc;
}
/**
@@ -293,14 +282,12 @@ static void ll_done_writing(struct inode *inode)
struct obd_client_handle *och = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
LASSERT(exp_connect_som(ll_i2mdexp(inode)));
OBD_ALLOC_PTR(op_data);
if (op_data == NULL) {
CERROR("can't allocate op_data\n");
- EXIT;
return;
}
@@ -324,7 +311,6 @@ out:
md_clear_open_replay_data(ll_i2sbi(inode)->ll_md_exp, och);
OBD_FREE_PTR(och);
}
- EXIT;
}
static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
@@ -347,7 +333,6 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
static int ll_close_thread(void *arg)
{
struct ll_close_queue *lcq = arg;
- ENTRY;
complete(&lcq->lcq_comp);
@@ -371,13 +356,13 @@ static int ll_close_thread(void *arg)
CDEBUG(D_INFO, "ll_close exiting\n");
complete(&lcq->lcq_comp);
- RETURN(0);
+ return 0;
}
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
{
struct ll_close_queue *lcq;
- task_t *task;
+ struct task_struct *task;
if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
return -EINTR;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 5227c5c4ebe..47e443d90fe 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -90,6 +90,7 @@ extern struct file_operations ll_pgcache_seq_fops;
#define REMOTE_PERM_HASHSIZE 16
struct ll_getname_data {
+ struct dir_context ctx;
char *lgd_name; /* points to a buffer with NAME_MAX+1 size */
struct lu_fid lgd_fid; /* target fid we are looking for */
int lgd_found; /* inode matched? */
@@ -438,14 +439,6 @@ struct rmtacl_ctl_table {
#define EE_HASHES 32
-struct eacl_entry {
- struct list_head ee_list;
- pid_t ee_key; /* hash key */
- struct lu_fid ee_fid;
- int ee_type; /* ACL type for ACCESS or DEFAULT */
- ext_acl_xattr_header *ee_acl;
-};
-
struct eacl_table {
spinlock_t et_lock;
struct list_head et_entries[EE_HASHES];
@@ -512,6 +505,7 @@ struct ll_sb_info {
* clustred nfs */
struct rmtacl_ctl_table ll_rct;
struct eacl_table ll_et;
+ __kernel_fsid_t ll_fsid;
};
#define LL_DEFAULT_MAX_RW_CHUNK (32 * 1024 * 1024)
@@ -687,8 +681,7 @@ extern struct file_operations ll_dir_operations;
extern struct inode_operations ll_dir_inode_operations;
struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
struct ll_dir_chain *chain);
-int ll_dir_read(struct inode *inode, __u64 *_pos, void *cookie,
- filldir_t filldir);
+int ll_dir_read(struct inode *inode, struct dir_context *ctx);
int ll_get_mdt_idx(struct inode *inode);
/* llite/namei.c */
@@ -792,8 +785,7 @@ void ll_intent_release(struct lookup_intent *);
void ll_invalidate_aliases(struct inode *);
void ll_frob_intent(struct lookup_intent **itp, struct lookup_intent *deft);
void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
-int ll_dcompare(const struct dentry *parent, const struct inode *pinode,
- const struct dentry *dentry, const struct inode *inode,
+int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
unsigned int len, const char *str, const struct qstr *d_name);
int ll_revalidate_it_finish(struct ptlrpc_request *request,
struct lookup_intent *it, struct dentry *de);
@@ -842,6 +834,7 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen);
/* llite/llite_nfs.c */
extern struct export_operations lustre_export_operations;
__u32 get_uuid2int(const char *name, int len);
+void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid);
struct inode *search_inode_for_lustre(struct super_block *sb,
const struct lu_fid *fid);
@@ -1129,7 +1122,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
/* llite/llite_capa.c */
-extern timer_list_t ll_capa_timer;
+extern struct timer_list ll_capa_timer;
int ll_capa_thread_start(void);
void ll_capa_thread_stop(void);
@@ -1168,6 +1161,14 @@ void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
/* llite/llite_rmtacl.c */
#ifdef CONFIG_FS_POSIX_ACL
+struct eacl_entry {
+ struct list_head ee_list;
+ pid_t ee_key; /* hash key */
+ struct lu_fid ee_fid;
+ int ee_type; /* ACL type for ACCESS or DEFAULT */
+ ext_acl_xattr_header *ee_acl;
+};
+
obd_valid rce_ops2valid(int ops);
struct rmtacl_ctl_entry *rct_search(struct rmtacl_ctl_table *rct, pid_t key);
int rct_add(struct rmtacl_ctl_table *rct, pid_t key, int ops);
@@ -1183,6 +1184,11 @@ struct eacl_entry *et_search_del(struct eacl_table *et, pid_t key,
void et_search_free(struct eacl_table *et, pid_t key);
void et_init(struct eacl_table *et);
void et_fini(struct eacl_table *et);
+#else
+static inline obd_valid rce_ops2valid(int ops)
+{
+ return 0;
+}
#endif
/* statahead.c */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index afae8010623..b868c2bd58d 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -42,7 +42,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/mm.h>
#include <lustre_lite.h>
@@ -79,11 +78,10 @@ static struct ll_sb_info *ll_init_sbi(void)
struct sysinfo si;
class_uuid_t uuid;
int i;
- ENTRY;
OBD_ALLOC(sbi, sizeof(*sbi));
if (!sbi)
- RETURN(NULL);
+ return NULL;
spin_lock_init(&sbi->ll_lock);
mutex_init(&sbi->ll_lco.lco_lock);
@@ -141,13 +139,12 @@ static struct ll_sb_info *ll_init_sbi(void)
atomic_set(&sbi->ll_agl_total, 0);
sbi->ll_flags |= LL_SBI_AGL_ENABLED;
- RETURN(sbi);
+ return sbi;
}
void ll_free_sbi(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ENTRY;
if (sbi != NULL) {
spin_lock(&ll_sb_lock);
@@ -155,7 +152,6 @@ void ll_free_sbi(struct super_block *sb)
spin_unlock(&ll_sb_lock);
OBD_FREE(sbi, sizeof(*sbi));
}
- EXIT;
}
static struct dentry_operations ll_d_root_ops = {
@@ -178,22 +174,21 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
struct lustre_md lmd;
obd_valid valid;
int size, err, checksum;
- ENTRY;
obd = class_name2obd(md);
if (!obd) {
CERROR("MD %s: not setup or attached\n", md);
- RETURN(-EINVAL);
+ return -EINVAL;
}
OBD_ALLOC_PTR(data);
if (data == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC_PTR(osfs);
if (osfs == NULL) {
OBD_FREE_PTR(data);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
if (proc_lustre_fs_root) {
@@ -583,15 +578,17 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* s_dev is also used in lt_compare() to compare two fs, but that is
* only a node-local comparison. */
uuid = obd_get_uuid(sbi->ll_md_exp);
- if (uuid != NULL)
+ if (uuid != NULL) {
sb->s_dev = get_uuid2int(uuid->uuid, strlen(uuid->uuid));
+ get_uuid2fsid(uuid->uuid, strlen(uuid->uuid), &sbi->ll_fsid);
+ }
if (data != NULL)
OBD_FREE_PTR(data);
if (osfs != NULL)
OBD_FREE_PTR(osfs);
- RETURN(err);
+ return err;
out_root:
if (root)
iput(root);
@@ -627,7 +624,7 @@ int ll_get_max_mdsize(struct ll_sb_info *sbi, int *lmmsize)
if (rc)
CERROR("Get max mdsize error rc %d \n", rc);
- RETURN(rc);
+ return rc;
}
void ll_dump_inode(struct inode *inode)
@@ -676,7 +673,6 @@ void lustre_dump_dentry(struct dentry *dentry, int recur)
void client_common_put_super(struct super_block *sb)
{
struct ll_sb_info *sbi = ll_s2sbi(sb);
- ENTRY;
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT) {
@@ -703,16 +699,12 @@ void client_common_put_super(struct super_block *sb)
obd_fid_fini(sbi->ll_md_exp->exp_obd);
obd_disconnect(sbi->ll_md_exp);
sbi->ll_md_exp = NULL;
-
- EXIT;
}
void ll_kill_super(struct super_block *sb)
{
struct ll_sb_info *sbi;
- ENTRY;
-
/* not init sb ?*/
if (!(sb->s_flags & MS_ACTIVE))
return;
@@ -725,31 +717,29 @@ void ll_kill_super(struct super_block *sb)
sb->s_dev = sbi->ll_sdev_orig;
sbi->ll_umounting = 1;
}
- EXIT;
}
char *ll_read_opt(const char *opt, char *data)
{
char *value;
char *retval;
- ENTRY;
CDEBUG(D_SUPER, "option: %s, data %s\n", opt, data);
if (strncmp(opt, data, strlen(opt)))
- RETURN(NULL);
+ return NULL;
if ((value = strchr(data, '=')) == NULL)
- RETURN(NULL);
+ return NULL;
value++;
OBD_ALLOC(retval, strlen(value) + 1);
if (!retval) {
CERROR("out of memory!\n");
- RETURN(NULL);
+ return NULL;
}
memcpy(retval, value, strlen(value)+1);
CDEBUG(D_SUPER, "Assigned option: %s, value %s\n", opt, retval);
- RETURN(retval);
+ return retval;
}
static inline int ll_set_opt(const char *opt, char *data, int fl)
@@ -765,10 +755,9 @@ static int ll_options(char *options, int *flags)
{
int tmp;
char *s1 = options, *s2;
- ENTRY;
if (!options)
- RETURN(0);
+ return 0;
CDEBUG(D_CONFIG, "Parsing opts %s\n", options);
@@ -891,7 +880,7 @@ static int ll_options(char *options, int *flags)
}
LCONSOLE_ERROR_MSG(0x152, "Unknown option '%s', won't mount.\n",
s1);
- RETURN(-EINVAL);
+ return -EINVAL;
next:
/* Find next opt */
@@ -900,7 +889,7 @@ next:
break;
s1 = s2 + 1;
}
- RETURN(0);
+ return 0;
}
void ll_lli_init(struct ll_inode_info *lli)
@@ -977,13 +966,12 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
/* %p for void* in printf needs 16+2 characters: 0xffffffffffffffff */
const int instlen = sizeof(cfg->cfg_instance) * 2 + 2;
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p\n", sb);
OBD_ALLOC_PTR(cfg);
if (cfg == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
try_module_get(THIS_MODULE);
@@ -992,7 +980,7 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
if (!sbi) {
module_put(THIS_MODULE);
OBD_FREE_PTR(cfg);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
err = ll_options(lsi->lsi_lmd->lmd_opts, &sbi->ll_flags);
@@ -1058,7 +1046,7 @@ out_free:
LCONSOLE_WARN("Mounted %s\n", profilenm);
OBD_FREE_PTR(cfg);
- RETURN(err);
+ return err;
} /* ll_fill_super */
void ll_put_super(struct super_block *sb)
@@ -1069,7 +1057,6 @@ void ll_put_super(struct super_block *sb)
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
int next, force = 1;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
@@ -1121,8 +1108,6 @@ void ll_put_super(struct super_block *sb)
lustre_common_put_super(sb);
module_put(THIS_MODULE);
-
- EXIT;
} /* client_put_super */
struct inode *ll_inode_from_resource_lock(struct ldlm_lock *lock)
@@ -1176,7 +1161,6 @@ void ll_clear_inode(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
inode->i_generation, inode);
@@ -1188,7 +1172,9 @@ void ll_clear_inode(struct inode *inode)
LASSERT(lli->lli_opendir_pid == 0);
}
+ spin_lock(&lli->lli_lock);
ll_i2info(inode)->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
md_null_inode(sbi->ll_md_exp, ll_inode2fid(inode));
LASSERT(!lli->lli_open_fd_write_count);
@@ -1235,8 +1221,6 @@ void ll_clear_inode(struct inode *inode)
*/
cl_inode_fini(inode);
lli->lli_has_smd = false;
-
- EXIT;
}
int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
@@ -1247,12 +1231,11 @@ int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *request = NULL;
int rc, ia_valid;
- ENTRY;
op_data = ll_prep_md_op_data(op_data, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
rc = md_setattr(sbi->ll_md_exp, op_data, NULL, 0, NULL, 0,
&request, mod);
@@ -1272,14 +1255,14 @@ int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
} else if (rc != -EPERM && rc != -EACCES && rc != -ETXTBSY) {
CERROR("md_setattr fails: rc = %d\n", rc);
}
- RETURN(rc);
+ return rc;
}
rc = md_get_lustre_md(sbi->ll_md_exp, request, sbi->ll_dt_exp,
sbi->ll_md_exp, &md);
if (rc) {
ptlrpc_req_finished(request);
- RETURN(rc);
+ return rc;
}
ia_valid = op_data->op_attr.ia_valid;
@@ -1296,7 +1279,7 @@ int ll_md_setattr(struct dentry *dentry, struct md_op_data *op_data,
ll_update_inode(inode, &md);
ptlrpc_req_finished(request);
- RETURN(rc);
+ return rc;
}
/* Close IO epoch and send Size-on-MDS attribute update. */
@@ -1306,11 +1289,10 @@ static int ll_setattr_done_writing(struct inode *inode,
{
struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
- ENTRY;
LASSERT(op_data != NULL);
if (!S_ISREG(inode->i_mode))
- RETURN(0);
+ return 0;
CDEBUG(D_INODE, "Epoch "LPU64" closed on "DFID" for truncate\n",
op_data->op_ioepoch, PFID(&lli->lli_fid));
@@ -1328,7 +1310,7 @@ static int ll_setattr_done_writing(struct inode *inode,
CERROR("inode %lu mdc truncate failed: rc = %d\n",
inode->i_ino, rc);
}
- RETURN(rc);
+ return rc;
}
static int ll_setattr_ost(struct inode *inode, struct iattr *attr)
@@ -1372,7 +1354,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
struct md_op_data *op_data = NULL;
struct md_open_data *mod = NULL;
int rc = 0, rc1 = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "%s: setattr inode %p/fid:"DFID" from %llu to %llu, "
"valid %x\n", ll_get_fsname(inode->i_sb, NULL, 0), inode,
@@ -1383,7 +1364,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
/* Check new size against VFS/VM file size limit and rlimit */
rc = inode_newsize_ok(inode, attr->ia_size);
if (rc)
- RETURN(rc);
+ return rc;
/* The maximum Lustre file size is variable, based on the
* OST maximum object size and number of stripes. This
@@ -1392,7 +1373,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
CDEBUG(D_INODE,"file "DFID" too large %llu > "LPU64"\n",
PFID(&lli->lli_fid), attr->ia_size,
ll_file_maxbytes(inode));
- RETURN(-EFBIG);
+ return -EFBIG;
}
attr->ia_valid |= ATTR_MTIME | ATTR_CTIME;
@@ -1400,24 +1381,24 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
/* POSIX: check before ATTR_*TIME_SET set (from inode_change_ok) */
if (attr->ia_valid & TIMES_SET_FLAGS) {
- if (current_fsuid() != inode->i_uid &&
+ if ((!uid_eq(current_fsuid(), inode->i_uid)) &&
!cfs_capable(CFS_CAP_FOWNER))
- RETURN(-EPERM);
+ return -EPERM;
}
/* We mark all of the fields "set" so MDS/OST does not re-set them */
if (attr->ia_valid & ATTR_CTIME) {
- attr->ia_ctime = CFS_CURRENT_TIME;
+ attr->ia_ctime = CURRENT_TIME;
attr->ia_valid |= ATTR_CTIME_SET;
}
if (!(attr->ia_valid & ATTR_ATIME_SET) &&
(attr->ia_valid & ATTR_ATIME)) {
- attr->ia_atime = CFS_CURRENT_TIME;
+ attr->ia_atime = CURRENT_TIME;
attr->ia_valid |= ATTR_ATIME_SET;
}
if (!(attr->ia_valid & ATTR_MTIME_SET) &&
(attr->ia_valid & ATTR_MTIME)) {
- attr->ia_mtime = CFS_CURRENT_TIME;
+ attr->ia_mtime = CURRENT_TIME;
attr->ia_valid |= ATTR_MTIME_SET;
}
@@ -1439,7 +1420,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
OBD_ALLOC_PTR(op_data);
if (op_data == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (!S_ISDIR(inode->i_mode)) {
if (attr->ia_valid & ATTR_SIZE)
@@ -1480,7 +1461,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr)
* setting times to past, but it is necessary due to possible
* time de-synchronization between MDT inode and OST objects */
rc = ll_setattr_ost(inode, attr);
- EXIT;
out:
if (op_data) {
if (op_data->op_ioepoch) {
@@ -1537,12 +1517,11 @@ int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_statfs obd_osfs;
int rc;
- ENTRY;
rc = obd_statfs(NULL, sbi->ll_md_exp, osfs, max_age, flags);
if (rc) {
CERROR("md_statfs fails: rc = %d\n", rc);
- RETURN(rc);
+ return rc;
}
osfs->os_type = sb->s_magic;
@@ -1556,7 +1535,7 @@ int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
rc = obd_statfs_rqset(sbi->ll_dt_exp, &obd_osfs, max_age, flags);
if (rc) {
CERROR("obd_statfs fails: rc = %d\n", rc);
- RETURN(rc);
+ return rc;
}
CDEBUG(D_SUPER, "OSC blocks "LPU64"/"LPU64" objects "LPU64"/"LPU64"\n",
@@ -1578,7 +1557,7 @@ int ll_statfs_internal(struct super_block *sb, struct obd_statfs *osfs,
osfs->os_ffree = obd_osfs.os_ffree;
}
- RETURN(rc);
+ return rc;
}
int ll_statfs(struct dentry *de, struct kstatfs *sfs)
{
@@ -1615,7 +1594,7 @@ int ll_statfs(struct dentry *de, struct kstatfs *sfs)
sfs->f_blocks = osfs.os_blocks;
sfs->f_bfree = osfs.os_bfree;
sfs->f_bavail = osfs.os_bavail;
-
+ sfs->f_fsid = ll_s2sbi(sb)->ll_fsid;
return 0;
}
@@ -1707,9 +1686,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
inode->i_blkbits = inode->i_sb->s_blocksize_bits;
}
if (body->valid & OBD_MD_FLUID)
- inode->i_uid = body->uid;
+ inode->i_uid = make_kuid(&init_user_ns, body->uid);
if (body->valid & OBD_MD_FLGID)
- inode->i_gid = body->gid;
+ inode->i_gid = make_kgid(&init_user_ns, body->gid);
if (body->valid & OBD_MD_FLFLAGS)
inode->i_flags = ll_ext_to_inode_flags(body->flags);
if (body->valid & OBD_MD_FLNLINK)
@@ -1755,7 +1734,9 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326 */
i_size_write(inode, body->size);
+ spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
}
ldlm_lock_decref(&lockh, mode);
}
@@ -1786,7 +1767,6 @@ void ll_read_inode2(struct inode *inode, void *opaque)
{
struct lustre_md *md = opaque;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
PFID(&lli->lli_fid), inode);
@@ -1814,28 +1794,22 @@ void ll_read_inode2(struct inode *inode, void *opaque)
inode->i_op = &ll_file_inode_operations;
inode->i_fop = sbi->ll_fop;
inode->i_mapping->a_ops = (struct address_space_operations *)&ll_aops;
- EXIT;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op = &ll_dir_inode_operations;
inode->i_fop = &ll_dir_operations;
- EXIT;
} else if (S_ISLNK(inode->i_mode)) {
inode->i_op = &ll_fast_symlink_inode_operations;
- EXIT;
} else {
inode->i_op = &ll_special_inode_operations;
init_special_inode(inode, inode->i_mode,
inode->i_rdev);
-
- EXIT;
}
}
void ll_delete_inode(struct inode *inode)
{
struct cl_inode_info *lli = cl_i2info(inode);
- ENTRY;
if (S_ISREG(inode->i_mode) && lli->lli_clob != NULL)
/* discard all dirty pages before truncating them, required by
@@ -1859,8 +1833,6 @@ void ll_delete_inode(struct inode *inode)
ll_clear_inode(inode);
clear_inode(inode);
-
- EXIT;
}
int ll_iocontrol(struct inode *inode, struct file *file,
@@ -1869,7 +1841,6 @@ int ll_iocontrol(struct inode *inode, struct file *file,
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct ptlrpc_request *req = NULL;
int rc, flags = 0;
- ENTRY;
switch(cmd) {
case FSFILT_IOC_GETFLAGS: {
@@ -1880,14 +1851,14 @@ int ll_iocontrol(struct inode *inode, struct file *file,
0, 0, LUSTRE_OPC_ANY,
NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_valid = OBD_MD_FLFLAGS;
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
CERROR("failure %d inode %lu\n", rc, inode->i_ino);
- RETURN(-abs(rc));
+ return -abs(rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
@@ -1896,7 +1867,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
ptlrpc_req_finished(req);
- RETURN(put_user(flags, (int *)arg));
+ return put_user(flags, (int *)arg);
}
case FSFILT_IOC_SETFLAGS: {
struct lov_stripe_md *lsm;
@@ -1904,12 +1875,12 @@ int ll_iocontrol(struct inode *inode, struct file *file,
struct md_op_data *op_data;
if (get_user(flags, (int *)arg))
- RETURN(-EFAULT);
+ return -EFAULT;
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
@@ -1918,18 +1889,20 @@ int ll_iocontrol(struct inode *inode, struct file *file,
ll_finish_md_op_data(op_data);
ptlrpc_req_finished(req);
if (rc)
- RETURN(rc);
+ return rc;
inode->i_flags = ll_ext_to_inode_flags(flags);
lsm = ccc_inode_lsm_get(inode);
- if (lsm == NULL)
- RETURN(0);
+ if (!lsm_has_objects(lsm)) {
+ ccc_inode_lsm_put(inode, lsm);
+ return 0;
+ }
OBDO_ALLOC(oinfo.oi_oa);
if (!oinfo.oi_oa) {
ccc_inode_lsm_put(inode, lsm);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
oinfo.oi_md = lsm;
oinfo.oi_oa->o_oi = lsm->lsm_oi;
@@ -1946,20 +1919,21 @@ int ll_iocontrol(struct inode *inode, struct file *file,
if (rc && rc != -EPERM && rc != -EACCES)
CERROR("osc_setattr_async fails: rc = %d\n", rc);
- RETURN(rc);
+ return rc;
}
default:
- RETURN(-ENOSYS);
+ return -ENOSYS;
}
- RETURN(0);
+ return 0;
}
int ll_flush_ctx(struct inode *inode)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_SEC, "flush context for user %d\n", current_uid());
+ CDEBUG(D_SEC, "flush context for user %d\n",
+ from_kuid(&init_user_ns, current_uid()));
obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_FLUSH_CTX), KEY_FLUSH_CTX,
@@ -1976,8 +1950,6 @@ void ll_umount_begin(struct super_block *sb)
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_device *obd;
struct obd_ioctl_data *ioc_data;
- ENTRY;
-
CDEBUG(D_VFSTRACE, "VFS Op: superblock %p count %d active %d\n", sb,
sb->s_count, atomic_read(&sb->s_active));
@@ -1986,7 +1958,6 @@ void ll_umount_begin(struct super_block *sb)
if (obd == NULL) {
CERROR("Invalid MDC connection handle "LPX64"\n",
sbi->ll_md_exp->exp_handle.h_cookie);
- EXIT;
return;
}
obd->obd_force = 1;
@@ -1995,7 +1966,6 @@ void ll_umount_begin(struct super_block *sb)
if (obd == NULL) {
CERROR("Invalid LOV connection handle "LPX64"\n",
sbi->ll_dt_exp->exp_handle.h_cookie);
- EXIT;
return;
}
obd->obd_force = 1;
@@ -2016,8 +1986,6 @@ void ll_umount_begin(struct super_block *sb)
* schedule() and sleep one second if needed, and hope.
*/
schedule();
-
- EXIT;
}
int ll_remount_fs(struct super_block *sb, int *flags, char *data)
@@ -2058,14 +2026,13 @@ int ll_prep_inode(struct inode **inode, struct ptlrpc_request *req,
struct ll_sb_info *sbi = NULL;
struct lustre_md md;
int rc;
- ENTRY;
LASSERT(*inode || sb);
sbi = sb ? ll_s2sbi(sb) : ll_i2sbi(*inode);
rc = md_get_lustre_md(sbi->ll_md_exp, req, sbi->ll_dt_exp,
sbi->ll_md_exp, &md);
if (rc)
- RETURN(rc);
+ return rc;
if (*inode) {
ll_update_inode(*inode, &md);
@@ -2127,7 +2094,7 @@ out:
if (md.lsm != NULL)
obd_free_memmd(sbi->ll_dt_exp, &md.lsm);
md_free_lustre_md(sbi->ll_md_exp, &md);
- RETURN(rc);
+ return rc;
}
int ll_obd_statfs(struct inode *inode, void *arg)
@@ -2238,8 +2205,8 @@ struct md_op_data * ll_prep_md_op_data(struct md_op_data *op_data,
op_data->op_namelen = namelen;
op_data->op_mode = mode;
op_data->op_mod_time = cfs_time_current_sec();
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
op_data->op_bias = 0;
op_data->op_cli_flags = 0;
@@ -2303,7 +2270,7 @@ int ll_show_options(struct seq_file *seq, struct dentry *dentry)
if (sbi->ll_flags & LL_SBI_USER_FID2PATH)
seq_puts(seq, ",user_fid2path");
- RETURN(0);
+ return 0;
}
/**
@@ -2313,23 +2280,22 @@ int ll_get_obd_name(struct inode *inode, unsigned int cmd, unsigned long arg)
{
struct ll_sb_info *sbi = ll_i2sbi(inode);
struct obd_device *obd;
- ENTRY;
if (cmd == OBD_IOC_GETDTNAME)
obd = class_exp2obd(sbi->ll_dt_exp);
else if (cmd == OBD_IOC_GETMDNAME)
obd = class_exp2obd(sbi->ll_md_exp);
else
- RETURN(-EINVAL);
+ return -EINVAL;
if (!obd)
- RETURN(-ENOENT);
+ return -ENOENT;
if (copy_to_user((void *)arg, obd->obd_name,
strlen(obd->obd_name) + 1))
- RETURN(-EFAULT);
+ return -EFAULT;
- RETURN(0);
+ return 0;
}
/**
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index d9590d85634..caed6423e4e 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -40,13 +40,9 @@
#include <linux/stat.h>
#include <linux/errno.h>
#include <linux/unistd.h>
-#include <linux/version.h>
#include <asm/uaccess.h>
#include <linux/fs.h>
-#include <linux/stat.h>
-#include <asm/uaccess.h>
-#include <linux/mm.h>
#include <linux/pagemap.h>
#define DEBUG_SUBSYSTEM S_LLITE
@@ -74,7 +70,6 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
size_t count)
{
struct vm_area_struct *vma, *ret = NULL;
- ENTRY;
/* mmap_sem must have been held by caller. */
LASSERT(!down_write_trylock(&mm->mmap_sem));
@@ -87,7 +82,7 @@ struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
break;
}
}
- RETURN(ret);
+ return ret;
}
/**
@@ -107,16 +102,16 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
struct cl_env_nest *nest,
pgoff_t index, unsigned long *ra_flags)
{
- struct file *file = vma->vm_file;
- struct inode *inode = file->f_dentry->d_inode;
- struct cl_io *io;
- struct cl_fault_io *fio;
- struct lu_env *env;
- ENTRY;
+ struct file *file = vma->vm_file;
+ struct inode *inode = file->f_dentry->d_inode;
+ struct cl_io *io;
+ struct cl_fault_io *fio;
+ struct lu_env *env;
+ int rc;
*env_ret = NULL;
if (ll_file_nolock(file))
- RETURN(ERR_PTR(-EOPNOTSUPP));
+ return ERR_PTR(-EOPNOTSUPP);
/*
* page fault can be called when lustre IO is
@@ -127,7 +122,7 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
*/
env = cl_env_nested_get(nest);
if (IS_ERR(env))
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
*env_ret = env;
@@ -152,17 +147,22 @@ struct cl_io *ll_fault_io_init(struct vm_area_struct *vma,
CDEBUG(D_MMAP, "vm_flags: %lx (%lu %d)\n", vma->vm_flags,
fio->ft_index, fio->ft_executable);
- if (cl_io_init(env, io, CIT_FAULT, io->ci_obj) == 0) {
+ rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
+ if (rc == 0) {
struct ccc_io *cio = ccc_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
LASSERT(cio->cui_cl.cis_io == io);
- /* mmap lock must be MANDATORY
- * it has to cache pages. */
+ /* mmap lock must be MANDATORY it has to cache
+ * pages. */
io->ci_lockreq = CILR_MANDATORY;
-
- cio->cui_fd = fd;
+ cio->cui_fd = fd;
+ } else {
+ LASSERT(rc < 0);
+ cl_io_fini(env, io);
+ cl_env_nested_put(nest, env);
+ io = ERR_PTR(rc);
}
return io;
@@ -180,7 +180,6 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
sigset_t set;
struct inode *inode;
struct ll_inode_info *lli;
- ENTRY;
LASSERT(vmpage != NULL);
@@ -190,7 +189,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
result = io->ci_result;
if (result < 0)
- GOTO(out, result);
+ GOTO(out_io, result);
io->u.ci_fault.ft_mkwrite = 1;
io->u.ci_fault.ft_writable = 1;
@@ -250,16 +249,15 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
spin_unlock(&lli->lli_lock);
}
}
- EXIT;
-out:
+out_io:
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
-
+out:
CDEBUG(D_MMAP, "%s mkwrite with %d\n", current->comm, result);
-
LASSERT(ergo(result == 0, PageLocked(vmpage)));
- return(result);
+
+ return result;
}
@@ -304,11 +302,10 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
struct cl_env_nest nest;
int result;
int fault_ret = 0;
- ENTRY;
io = ll_fault_io_init(vma, &env, &nest, vmf->pgoff, &ra_flags);
if (IS_ERR(io))
- RETURN(to_fault_error(PTR_ERR(io)));
+ return to_fault_error(PTR_ERR(io));
result = io->ci_result;
if (result == 0) {
@@ -335,7 +332,7 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
CDEBUG(D_MMAP, "%s fault %d/%d\n",
current->comm, fault_ret, result);
- RETURN(fault_ret);
+ return fault_ret;
}
static int ll_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -431,11 +428,9 @@ static void ll_vm_open(struct vm_area_struct * vma)
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct ccc_object *vob = cl_inode2ccc(inode);
- ENTRY;
LASSERT(vma->vm_file);
LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
atomic_inc(&vob->cob_mmap_cnt);
- EXIT;
}
/**
@@ -446,11 +441,9 @@ static void ll_vm_close(struct vm_area_struct *vma)
struct inode *inode = vma->vm_file->f_dentry->d_inode;
struct ccc_object *vob = cl_inode2ccc(inode);
- ENTRY;
LASSERT(vma->vm_file);
atomic_dec(&vob->cob_mmap_cnt);
LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- EXIT;
}
@@ -466,7 +459,6 @@ static inline unsigned long file_to_user(struct vm_area_struct *vma, __u64 byte)
int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
{
int rc = -ENOENT;
- ENTRY;
LASSERTF(last > first, "last "LPU64" first "LPU64"\n", last, first);
if (mapping_mapped(mapping)) {
@@ -475,7 +467,7 @@ int ll_teardown_mmaps(struct address_space *mapping, __u64 first, __u64 last)
last - first + 1, 0);
}
- RETURN(rc);
+ return rc;
}
static struct vm_operations_struct ll_file_vm_ops = {
@@ -489,10 +481,9 @@ int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
{
struct inode *inode = file->f_dentry->d_inode;
int rc;
- ENTRY;
if (ll_file_nolock(file))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_MAP, 1);
rc = generic_file_mmap(file, vma);
@@ -503,5 +494,5 @@ int ll_file_mmap(struct file *file, struct vm_area_struct * vma)
rc = ll_glimpse_size(inode);
}
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 28cc41e9058..1767c741fb7 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -58,6 +58,22 @@ __u32 get_uuid2int(const char *name, int len)
return (key0 << 1);
}
+void get_uuid2fsid(const char *name, int len, __kernel_fsid_t *fsid)
+{
+ __u64 key = 0, key0 = 0x12a3fe2d, key1 = 0x37abe8f9;
+
+ while (len--) {
+ key = key1 + (key0 ^ (*name++ * 7152373));
+ if (key & 0x8000000000000000ULL)
+ key -= 0x7fffffffffffffffULL;
+ key1 = key0;
+ key0 = key;
+ }
+
+ fsid->val[0] = key;
+ fsid->val[1] = key >> 32;
+}
+
static int ll_nfs_test_inode(struct inode *inode, void *opaque)
{
return lu_fid_eq(&ll_i2info(inode)->lli_fid,
@@ -75,17 +91,16 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
ll_need_32bit_api(sbi));
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_INFO, "searching inode for:(%lu,"DFID")\n", hash, PFID(fid));
inode = ilookup5(sb, hash, ll_nfs_test_inode, (void *)fid);
if (inode)
- RETURN(inode);
+ return inode;
rc = ll_get_max_mdsize(sbi, &eadatalen);
if (rc)
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
/* Because inode is NULL, ll_prep_md_op_data can not
* be used here. So we allocate op_data ourselves */
@@ -103,14 +118,14 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
if (rc) {
CERROR("can't get object attrs, fid "DFID", rc %d\n",
PFID(fid), rc);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
rc = ll_prep_inode(&inode, req, sb, NULL);
ptlrpc_req_finished(req);
if (rc)
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
- RETURN(inode);
+ return inode;
}
struct lustre_nfs_fid {
@@ -123,20 +138,19 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
{
struct inode *inode;
struct dentry *result;
- ENTRY;
CDEBUG(D_INFO, "Get dentry for fid: "DFID"\n", PFID(fid));
if (!fid_is_sane(fid))
- RETURN(ERR_PTR(-ESTALE));
+ return ERR_PTR(-ESTALE);
inode = search_inode_for_lustre(sb, fid);
if (IS_ERR(inode))
- RETURN(ERR_PTR(PTR_ERR(inode)));
+ return ERR_CAST(inode);
if (is_bad_inode(inode)) {
/* we didn't find the right inode.. */
iput(inode);
- RETURN(ERR_PTR(-ESTALE));
+ return ERR_PTR(-ESTALE);
}
/**
@@ -154,11 +168,11 @@ ll_iget_for_nfs(struct super_block *sb, struct lu_fid *fid, struct lu_fid *paren
result = d_obtain_alias(inode);
if (IS_ERR(result))
- RETURN(result);
+ return result;
ll_dops_init(result, 1, 0);
- RETURN(result);
+ return result;
}
#define LUSTRE_NFS_FID 0x97
@@ -176,20 +190,19 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
struct inode *parent)
{
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- ENTRY;
CDEBUG(D_INFO, "encoding for (%lu,"DFID") maxlen=%d minlen=%d\n",
inode->i_ino, PFID(ll_inode2fid(inode)), *plen,
(int)sizeof(struct lustre_nfs_fid));
if (*plen < sizeof(struct lustre_nfs_fid) / 4)
- RETURN(255);
+ return 255;
nfs_fid->lnf_child = *ll_inode2fid(inode);
nfs_fid->lnf_parent = *ll_inode2fid(parent);
*plen = sizeof(struct lustre_nfs_fid) / 4;
- RETURN(LUSTRE_NFS_FID);
+ return LUSTRE_NFS_FID;
}
static int ll_nfs_get_name_filldir(void *cookie, const char *name, int namelen,
@@ -214,10 +227,12 @@ static int ll_get_name(struct dentry *dentry, char *name,
struct dentry *child)
{
struct inode *dir = dentry->d_inode;
- struct ll_getname_data lgd;
- __u64 offset = 0;
int rc;
- ENTRY;
+ struct ll_getname_data lgd = {
+ .lgd_name = name,
+ .lgd_fid = ll_i2info(child->d_inode)->lli_fid,
+ .ctx.actor = ll_nfs_get_name_filldir,
+ };
if (!dir || !S_ISDIR(dir->i_mode))
GOTO(out, rc = -ENOTDIR);
@@ -225,17 +240,11 @@ static int ll_get_name(struct dentry *dentry, char *name,
if (!dir->i_fop)
GOTO(out, rc = -EINVAL);
- lgd.lgd_name = name;
- lgd.lgd_fid = ll_i2info(child->d_inode)->lli_fid;
- lgd.lgd_found = 0;
-
mutex_lock(&dir->i_mutex);
- rc = ll_dir_read(dir, &offset, &lgd, ll_nfs_get_name_filldir);
+ rc = ll_dir_read(dir, &lgd.ctx);
mutex_unlock(&dir->i_mutex);
if (!rc && !lgd.lgd_found)
rc = -ENOENT;
- EXIT;
-
out:
return rc;
}
@@ -246,9 +255,9 @@ static struct dentry *ll_fh_to_dentry(struct super_block *sb, struct fid *fid,
struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
if (fh_type != LUSTRE_NFS_FID)
- RETURN(ERR_PTR(-EPROTO));
+ return ERR_PTR(-EPROTO);
- RETURN(ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent));
+ return ll_iget_for_nfs(sb, &nfs_fid->lnf_child, &nfs_fid->lnf_parent);
}
static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
@@ -257,9 +266,9 @@ static struct dentry *ll_fh_to_parent(struct super_block *sb, struct fid *fid,
struct lustre_nfs_fid *nfs_fid = (struct lustre_nfs_fid *)fid;
if (fh_type != LUSTRE_NFS_FID)
- RETURN(ERR_PTR(-EPROTO));
+ return ERR_PTR(-EPROTO);
- RETURN(ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL));
+ return ll_iget_for_nfs(sb, &nfs_fid->lnf_parent, NULL);
}
static struct dentry *ll_get_parent(struct dentry *dchild)
@@ -273,7 +282,6 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
struct md_op_data *op_data;
int rc;
int lmmsize;
- ENTRY;
LASSERT(dir && S_ISDIR(dir->i_mode));
@@ -284,19 +292,19 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
rc = ll_get_max_mdsize(sbi, &lmmsize);
if (rc != 0)
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
op_data = ll_prep_md_op_data(NULL, dir, NULL, dotdot,
strlen(dotdot), lmmsize,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN((void *)op_data);
+ return (void *)op_data;
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body->valid & OBD_MD_FLID);
@@ -307,7 +315,7 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
ptlrpc_req_finished(req);
- RETURN(result);
+ return result;
}
struct export_operations lustre_export_operations = {
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 9d4c17ea880..2340458b8a0 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -99,7 +99,6 @@
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
-#include <linux/swap.h>
#include <linux/pagevec.h>
#include <asm/uaccess.h>
@@ -574,7 +573,7 @@ static int loop_clr_fd(struct lloop_device *lo, struct block_device *bdev,
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
lo->lo_flags = 0;
- ll_invalidate_bdev(bdev, 0);
+ invalidate_bdev(bdev);
set_capacity(disks[lo->lo_number], 0);
bd_set_size(bdev, 0);
mapping_set_gfp_mask(filp->f_mapping, gfp);
@@ -618,7 +617,7 @@ static int lo_ioctl(struct block_device *bdev, fmode_t mode,
case LL_IOC_LLOOP_DETACH: {
err = loop_clr_fd(lo, bdev, 2);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
@@ -713,7 +712,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
err = loop_set_fd(lo, NULL, bdev, file);
if (err) {
fput(file);
- ll_blkdev_put(bdev, 0);
+ blkdev_put(bdev, 0);
}
break;
@@ -737,7 +736,7 @@ static enum llioc_iter lloop_ioctl(struct inode *unused, struct file *file,
bdev = lo->lo_device;
err = loop_clr_fd(lo, bdev, 1);
if (err == 0)
- ll_blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
+ blkdev_put(bdev, 0); /* grabbed in LLOOP_ATTACH */
break;
}
@@ -849,10 +848,8 @@ static void lloop_exit(void)
blk_cleanup_queue(loop_dev[i].lo_queue);
put_disk(disks[i]);
}
- if (ll_unregister_blkdev(lloop_major, "lloop"))
- CWARN("lloop: cannot unregister blkdev\n");
- else
- CDEBUG(D_CONFIG, "unregistered lloop major %d\n", lloop_major);
+
+ unregister_blkdev(lloop_major, "lloop");
OBD_FREE(disks, max_loop * sizeof(*disks));
OBD_FREE(loop_dev, max_loop * sizeof(*loop_dev));
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 6a82505c793..d4d3c17547c 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_LLITE
-#include <linux/version.h>
#include <lustre_lite.h>
#include <lprocfs_status.h>
#include <linux/seq_file.h>
@@ -243,9 +242,9 @@ static ssize_t ll_max_readahead_mb_seq_write(struct file *file, const char *buff
if (rc)
return rc;
- if (pages_number < 0 || pages_number > num_physpages / 2) {
+ if (pages_number < 0 || pages_number > totalram_pages / 2) {
CERROR("can't set file readahead more than %lu MB\n",
- num_physpages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
}
@@ -380,23 +379,22 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
- ENTRY;
mult = 1 << (20 - PAGE_CACHE_SHIFT);
buffer = lprocfs_find_named_value(buffer, "max_cached_mb:", &count);
rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
if (rc)
- RETURN(rc);
+ return rc;
- if (pages_number < 0 || pages_number > num_physpages) {
+ if (pages_number < 0 || pages_number > totalram_pages) {
CERROR("%s: can't set max cache more than %lu MB\n",
ll_get_fsname(sb, NULL, 0),
- num_physpages >> (20 - PAGE_CACHE_SHIFT));
- RETURN(-ERANGE);
+ totalram_pages >> (20 - PAGE_CACHE_SHIFT));
+ return -ERANGE;
}
if (sbi->ll_dt_exp == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
spin_lock(&sbi->ll_lock);
diff = pages_number - cache->ccc_lru_max;
@@ -421,7 +419,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, const char *buffer,
break;
nv = ov > diff ? ov - diff : 0;
- rc = cfs_atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
+ rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
if (likely(ov == rc)) {
diff -= ov - nv;
nrpages += ov - nv;
@@ -822,7 +820,8 @@ void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
sbi->ll_stats_track_id == current->parent->pid)
lprocfs_counter_add(sbi->ll_stats, op, count);
else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
- sbi->ll_stats_track_id == current_gid())
+ sbi->ll_stats_track_id ==
+ from_kgid(&init_user_ns, current_gid()))
lprocfs_counter_add(sbi->ll_stats, op, count);
}
EXPORT_SYMBOL(ll_stats_ops_tally);
@@ -852,10 +851,9 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct obd_device *obd;
- proc_dir_entry_t *dir;
+ struct proc_dir_entry *dir;
char name[MAX_STRING_SIZE + 1], *ptr;
int err, id, len, rc;
- ENTRY;
memset(lvars, 0, sizeof(lvars));
@@ -880,7 +878,7 @@ int lprocfs_register_mountpoint(struct proc_dir_entry *parent,
if (IS_ERR(sbi->ll_proc_root)) {
err = PTR_ERR(sbi->ll_proc_root);
sbi->ll_proc_root = NULL;
- RETURN(err);
+ return err;
}
rc = lprocfs_seq_create(sbi->ll_proc_root, "dump_page_cache", 0444,
@@ -994,7 +992,7 @@ out:
lprocfs_free_stats(&sbi->ll_ra_stats);
lprocfs_free_stats(&sbi->ll_stats);
}
- RETURN(err);
+ return err;
}
void lprocfs_unregister_mountpoint(struct ll_sb_info *sbi)
@@ -1302,8 +1300,9 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
/* We stored the discontiguous offsets here; print them first */
for(i = 0; i < LL_OFFSET_HIST_MAX; i++) {
if (offset[i].rw_pid != 0)
- seq_printf(seq,"%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- offset[i].rw_op ? 'W' : 'R',
+ seq_printf(seq,
+ "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
+ offset[i].rw_op == READ ? 'R' : 'W',
offset[i].rw_pid,
offset[i].rw_range_start,
offset[i].rw_range_end,
@@ -1314,8 +1313,9 @@ static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
/* Then print the current offsets for each process */
for(i = 0; i < LL_PROCESS_HIST_MAX; i++) {
if (process[i].rw_pid != 0)
- seq_printf(seq,"%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
- process[i].rw_op ? 'W' : 'R',
+ seq_printf(seq,
+ "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
+ process[i].rw_op == READ ? 'R' : 'W',
process[i].rw_pid,
process[i].rw_range_start,
process[i].rw_last_file_pos,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index ff8f63de561..34815b550e7 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -77,11 +77,9 @@ static int ll_d_mountpoint(struct dentry *dparent, struct dentry *dchild,
int ll_unlock(__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
-
ldlm_lock_decref(lockh, mode);
- RETURN(0);
+ return 0;
}
@@ -139,7 +137,6 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
struct lustre_md *md)
{
struct inode *inode;
- ENTRY;
LASSERT(hash != 0);
inode = iget5_locked(sb, hash, ll_test_inode, ll_set_inode, md);
@@ -169,7 +166,7 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
inode, PFID(&md->body->fid1));
}
- RETURN(inode);
+ return inode;
}
static void ll_invalidate_negative_children(struct inode *dir)
@@ -200,7 +197,6 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
{
int rc;
struct lustre_handle lockh;
- ENTRY;
switch (flag) {
case LDLM_CB_BLOCKING:
@@ -208,7 +204,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
rc = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (rc < 0) {
CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
- RETURN(rc);
+ return rc;
}
break;
case LDLM_CB_CANCELING: {
@@ -275,8 +271,11 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
CDEBUG(D_INODE, "invaliding layout %d.\n", rc);
}
- if (bits & MDS_INODELOCK_UPDATE)
+ if (bits & MDS_INODELOCK_UPDATE) {
+ spin_lock(&lli->lli_lock);
lli->lli_flags &= ~LLIF_MDS_SIZE_LOCK;
+ spin_unlock(&lli->lli_lock);
+ }
if (S_ISDIR(inode->i_mode) &&
(bits & MDS_INODELOCK_UPDATE)) {
@@ -297,13 +296,13 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
LBUG();
}
- RETURN(0);
+ return 0;
}
__u32 ll_i2suppgid(struct inode *i)
{
- if (current_is_in_group(i->i_gid))
- return (__u32)i->i_gid;
+ if (in_group_p(i->i_gid))
+ return (__u32)from_kgid(&init_user_ns, i->i_gid);
else
return (__u32)(-1);
}
@@ -430,7 +429,6 @@ int ll_lookup_it_finish(struct ptlrpc_request *request,
struct inode *inode = NULL;
__u64 bits = 0;
int rc;
- ENTRY;
/* NB 1 request reference will be taken away by ll_intent_lock()
* when I return */
@@ -439,7 +437,7 @@ int ll_lookup_it_finish(struct ptlrpc_request *request,
if (!it_disposition(it, DISP_LOOKUP_NEG)) {
rc = ll_prep_inode(&inode, request, (*de)->d_sb, it);
if (rc)
- RETURN(rc);
+ return rc;
ll_set_lock_data(ll_i2sbi(parent)->ll_md_exp, inode, it, &bits);
@@ -480,7 +478,7 @@ int ll_lookup_it_finish(struct ptlrpc_request *request,
}
}
- RETURN(0);
+ return 0;
}
static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
@@ -493,10 +491,9 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
struct it_cb_data icbd;
__u32 opc;
int rc;
- ENTRY;
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
- RETURN(ERR_PTR(-ENAMETOOLONG));
+ return ERR_PTR(-ENAMETOOLONG);
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n",
dentry->d_name.len, dentry->d_name.name, parent->i_ino,
@@ -514,7 +511,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
rc = ll_inode_revalidate_it(parent->i_sb->s_root, it,
MDS_INODELOCK_LOOKUP);
if (rc)
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
if (it->it_op == IT_GETATTR) {
@@ -539,7 +536,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
dentry->d_name.len, lookup_flags, opc,
NULL);
if (IS_ERR(op_data))
- RETURN((void *)op_data);
+ return (void *)op_data;
/* enforce umask if acl disabled or MDS doesn't support umask */
if (!IS_POSIXACL(parent) || !exp_connect_umask(ll_i2mdexp(parent)))
@@ -618,7 +615,6 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
struct dentry *de;
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),file %p,"
"open_flags %x,mode %x opened %d\n",
@@ -627,7 +623,7 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
OBD_ALLOC(it, sizeof(*it));
if (!it)
- RETURN(-ENOMEM);
+ return -ENOMEM;
it->it_op = IT_OPEN;
if (mode) {
@@ -686,7 +682,7 @@ out_release:
ll_intent_release(it);
OBD_FREE(it, sizeof(*it));
- RETURN(rc);
+ return rc;
}
@@ -700,7 +696,6 @@ static struct inode *ll_create_node(struct inode *dir, const char *name,
struct ptlrpc_request *request = NULL;
struct ll_sb_info *sbi = ll_i2sbi(dir);
int rc;
- ENTRY;
LASSERT(it && it->d.lustre.it_disposition);
@@ -719,7 +714,6 @@ static struct inode *ll_create_node(struct inode *dir, const char *name,
CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
inode, inode->i_ino, inode->i_generation);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
- EXIT;
out:
ptlrpc_req_finished(request);
return inode;
@@ -744,7 +738,6 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
{
struct inode *inode;
int rc = 0;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),intent=%s\n",
dentry->d_name.len, dentry->d_name.name, dir->i_ino,
@@ -752,18 +745,18 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
rc = it_open_error(DISP_OPEN_CREATE, it);
if (rc)
- RETURN(rc);
+ return rc;
inode = ll_create_node(dir, dentry->d_name.name, dentry->d_name.len,
NULL, 0, mode, 0, it);
if (IS_ERR(inode))
- RETURN(PTR_ERR(inode));
+ return PTR_ERR(inode);
if (filename_is_volatile(dentry->d_name.name, dentry->d_name.len, NULL))
ll_i2info(inode)->lli_volatile = true;
d_instantiate(dentry, inode);
- RETURN(0);
+ return 0;
}
static void ll_update_times(struct ptlrpc_request *request,
@@ -795,7 +788,6 @@ static int ll_new_node(struct inode *dir, struct qstr *name,
int tgt_len = 0;
int err;
- ENTRY;
if (unlikely(tgt != NULL))
tgt_len = strlen(tgt) + 1;
@@ -805,7 +797,8 @@ static int ll_new_node(struct inode *dir, struct qstr *name,
GOTO(err_exit, err = PTR_ERR(op_data));
err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
- current_fsuid(), current_fsgid(),
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()),
cfs_curproc_cap_pack(), rdev, &request);
ll_finish_md_op_data(op_data);
if (err)
@@ -820,7 +813,6 @@ static int ll_new_node(struct inode *dir, struct qstr *name,
d_instantiate(dchild, inode);
}
- EXIT;
err_exit:
ptlrpc_req_finished(request);
@@ -831,7 +823,6 @@ static int ll_mknod_generic(struct inode *dir, struct qstr *name, int mode,
unsigned rdev, struct dentry *dchild)
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p) mode %o dev %x\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir,
@@ -861,7 +852,7 @@ static int ll_mknod_generic(struct inode *dir, struct qstr *name, int mode,
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKNOD, 1);
- RETURN(err);
+ return err;
}
/*
@@ -891,7 +882,6 @@ static int ll_symlink_generic(struct inode *dir, struct qstr *name,
const char *tgt, struct dentry *dchild)
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p),target=%.*s\n",
name->len, name->name, dir->i_ino, dir->i_generation,
@@ -903,7 +893,7 @@ static int ll_symlink_generic(struct inode *dir, struct qstr *name,
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_SYMLINK, 1);
- RETURN(err);
+ return err;
}
static int ll_link_generic(struct inode *src, struct inode *dir,
@@ -914,7 +904,6 @@ static int ll_link_generic(struct inode *src, struct inode *dir,
struct md_op_data *op_data;
int err;
- ENTRY;
CDEBUG(D_VFSTRACE,
"VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%.*s\n",
src->i_ino, src->i_generation, src, dir->i_ino,
@@ -923,7 +912,7 @@ static int ll_link_generic(struct inode *src, struct inode *dir,
op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
err = md_link(sbi->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -932,10 +921,9 @@ static int ll_link_generic(struct inode *src, struct inode *dir,
ll_update_times(request, dir);
ll_stats_ops_tally(sbi, LPROC_LL_LINK, 1);
- EXIT;
out:
ptlrpc_req_finished(request);
- RETURN(err);
+ return err;
}
static int ll_mkdir_generic(struct inode *dir, struct qstr *name,
@@ -943,7 +931,6 @@ static int ll_mkdir_generic(struct inode *dir, struct qstr *name,
{
int err;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
@@ -956,7 +943,7 @@ static int ll_mkdir_generic(struct inode *dir, struct qstr *name,
if (!err)
ll_stats_ops_tally(ll_i2sbi(dir), LPROC_LL_MKDIR, 1);
- RETURN(err);
+ return err;
}
/* Try to find the child dentry by its name.
@@ -981,18 +968,17 @@ static int ll_rmdir_generic(struct inode *dir, struct dentry *dparent,
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
- RETURN(-EBUSY);
+ return -EBUSY;
op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name, name->len,
S_IFDIR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
ll_get_child_fid(dir, name, &op_data->op_fid3);
op_data->op_fid2 = op_data->op_fid3;
@@ -1004,7 +990,7 @@ static int ll_rmdir_generic(struct inode *dir, struct dentry *dparent,
}
ptlrpc_req_finished(request);
- RETURN(rc);
+ return rc;
}
/**
@@ -1015,7 +1001,6 @@ int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
namelen, name, dir->i_ino, dir->i_generation, dir);
@@ -1023,7 +1008,7 @@ int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
op_data = ll_prep_md_op_data(NULL, dir, NULL, name, strlen(name),
S_IFDIR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_cli_flags |= CLI_RM_ENTRY;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -1033,7 +1018,7 @@ int ll_rmdir_entry(struct inode *dir, char *name, int namelen)
}
ptlrpc_req_finished(request);
- RETURN(rc);
+ return rc;
}
int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
@@ -1045,12 +1030,11 @@ int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
struct obdo *oa;
struct obd_capa *oc = NULL;
int rc;
- ENTRY;
/* req is swabbed so this is safe */
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
if (!(body->valid & OBD_MD_FLEASIZE))
- RETURN(0);
+ return 0;
if (body->eadatasize == 0) {
CERROR("OBD_MD_FLEASIZE set but eadatasize zero\n");
@@ -1122,7 +1106,6 @@ static int ll_unlink_generic(struct inode *dir, struct dentry *dparent,
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:name=%.*s,dir=%lu/%u(%p)\n",
name->len, name->name, dir->i_ino, dir->i_generation, dir);
@@ -1131,12 +1114,12 @@ static int ll_unlink_generic(struct inode *dir, struct dentry *dparent,
* just check it as vfs_unlink does.
*/
if (unlikely(ll_d_mountpoint(dparent, dchild, name)))
- RETURN(-EBUSY);
+ return -EBUSY;
op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name,
name->len, 0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
ll_get_child_fid(dir, name, &op_data->op_fid3);
op_data->op_fid2 = op_data->op_fid3;
@@ -1151,7 +1134,7 @@ static int ll_unlink_generic(struct inode *dir, struct dentry *dparent,
rc = ll_objects_destroy(request, dir);
out:
ptlrpc_req_finished(request);
- RETURN(rc);
+ return rc;
}
static int ll_rename_generic(struct inode *src, struct dentry *src_dparent,
@@ -1163,7 +1146,7 @@ static int ll_rename_generic(struct inode *src, struct dentry *src_dparent,
struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
- ENTRY;
+
CDEBUG(D_VFSTRACE,"VFS Op:oldname=%.*s,src_dir=%lu/%u(%p),newname=%.*s,"
"tgt_dir=%lu/%u(%p)\n", src_name->len, src_name->name,
src->i_ino, src->i_generation, src, tgt_name->len,
@@ -1171,12 +1154,12 @@ static int ll_rename_generic(struct inode *src, struct dentry *src_dparent,
if (unlikely(ll_d_mountpoint(src_dparent, src_dchild, src_name) ||
ll_d_mountpoint(tgt_dparent, tgt_dchild, tgt_name)))
- RETURN(-EBUSY);
+ return -EBUSY;
op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
ll_get_child_fid(src, src_name, &op_data->op_fid3);
ll_get_child_fid(tgt, tgt_name, &op_data->op_fid4);
@@ -1193,7 +1176,7 @@ static int ll_rename_generic(struct inode *src, struct dentry *src_dparent,
ptlrpc_req_finished(request);
- RETURN(err);
+ return err;
}
static int ll_mknod(struct inode *dir, struct dentry *dchild, ll_umode_t mode,
diff --git a/drivers/staging/lustre/lustre/llite/remote_perm.c b/drivers/staging/lustre/lustre/llite/remote_perm.c
index 68b2dc4a7b6..dedd56ae1e6 100644
--- a/drivers/staging/lustre/lustre/llite/remote_perm.c
+++ b/drivers/staging/lustre/lustre/llite/remote_perm.c
@@ -45,7 +45,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <lustre_lite.h>
#include <lustre_ha.h>
@@ -124,22 +123,22 @@ static int do_check_remote_perm(struct ll_inode_info *lli, int mask)
struct hlist_head *head;
struct ll_remote_perm *lrp;
int found = 0, rc;
- ENTRY;
if (!lli->lli_remote_perms)
- RETURN(-ENOENT);
+ return -ENOENT;
- head = lli->lli_remote_perms + remote_perm_hashfunc(current_uid());
+ head = lli->lli_remote_perms +
+ remote_perm_hashfunc(from_kuid(&init_user_ns, current_uid()));
spin_lock(&lli->lli_lock);
hlist_for_each_entry(lrp, head, lrp_list) {
- if (lrp->lrp_uid != current_uid())
+ if (lrp->lrp_uid != from_kuid(&init_user_ns, current_uid()))
continue;
- if (lrp->lrp_gid != current_gid())
+ if (lrp->lrp_gid != from_kgid(&init_user_ns, current_gid()))
continue;
- if (lrp->lrp_fsuid != current_fsuid())
+ if (lrp->lrp_fsuid != from_kuid(&init_user_ns, current_fsuid()))
continue;
- if (lrp->lrp_fsgid != current_fsgid())
+ if (lrp->lrp_fsgid != from_kgid(&init_user_ns, current_fsgid()))
continue;
found = 1;
break;
@@ -163,7 +162,6 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_remote_perm *lrp = NULL, *tmp = NULL;
struct hlist_head *head, *perm_hash = NULL;
- ENTRY;
LASSERT(ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT);
@@ -178,7 +176,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
perm->rp_uid, perm->rp_gid, perm->rp_fsuid,
perm->rp_fsgid, current->uid, current->gid,
current->fsuid, current->fsgid);
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
#endif
@@ -186,7 +184,7 @@ int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm)
perm_hash = alloc_rmtperm_hash();
if (perm_hash == NULL) {
CERROR("alloc lli_remote_perms failed!\n");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
}
@@ -220,7 +218,7 @@ again:
lrp = alloc_ll_remote_perm();
if (!lrp) {
CERROR("alloc memory for ll_remote_perm failed!\n");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
spin_lock(&lli->lli_lock);
goto again;
@@ -241,7 +239,7 @@ again:
lrp, lrp->lrp_uid, lrp->lrp_gid, lrp->lrp_fsuid, lrp->lrp_fsgid,
lrp->lrp_access_perm);
- RETURN(0);
+ return 0;
}
int lustre_check_remote_perm(struct inode *inode, int mask)
@@ -253,7 +251,6 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
struct obd_capa *oc;
cfs_time_t save;
int i = 0, rc;
- ENTRY;
do {
save = lli->lli_rmtperm_time;
@@ -304,7 +301,7 @@ int lustre_check_remote_perm(struct inode *inode, int mask)
req = NULL;
} while (1);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
#if 0 /* NB: remote perms can't be freed in ll_mdc_blocking_ast of UPDATE lock,
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index fac11788901..ae0dc441d1d 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -48,9 +48,6 @@
#include <asm/uaccess.h>
#include <linux/fs.h>
-#include <linux/stat.h>
-#include <asm/uaccess.h>
-#include <linux/mm.h>
#include <linux/pagemap.h>
/* current_is_kswapd() */
#include <linux/swap.h>
@@ -110,7 +107,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- return ERR_PTR(PTR_ERR(env));
+ return ERR_CAST(env);
lcc = &vvp_env_info(env)->vti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
@@ -132,7 +129,7 @@ static struct ll_cl_context *ll_cl_init(struct file *file,
* add dirty pages into cache during truncate */
CERROR("Proc %s is dirting page w/o inode lock, this"
"will break truncate.\n", current->comm);
- libcfs_debug_dumpstack(NULL);
+ dump_stack();
LBUG();
return ERR_PTR(-EIO);
}
@@ -228,7 +225,6 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
{
struct ll_cl_context *lcc;
int result;
- ENTRY;
lcc = ll_cl_init(file, vmpage, 1);
if (!IS_ERR(lcc)) {
@@ -256,7 +252,7 @@ int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
} else {
result = PTR_ERR(lcc);
}
- RETURN(result);
+ return result;
}
int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
@@ -267,7 +263,6 @@ int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
struct cl_io *io;
struct cl_page *page;
int result = 0;
- ENTRY;
lcc = ll_cl_get();
env = lcc->lcc_env;
@@ -287,7 +282,7 @@ int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
lu_ref_del(&page->cp_reference, "prepare_write", current);
cl_page_put(env, page);
ll_cl_fini(lcc);
- RETURN(result);
+ return result;
}
struct obd_capa *cl_capa_lookup(struct inode *inode, enum cl_req_type crt)
@@ -325,7 +320,6 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
- ENTRY;
/* If read-ahead pages left are less than 1M, do not do read-ahead,
* otherwise it will form small read RPC(< 1M), which hurt server
@@ -357,7 +351,7 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
}
out:
- RETURN(ret);
+ return ret;
}
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len)
@@ -468,8 +462,6 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct ccc_page *cp;
int rc;
- ENTRY;
-
rc = 0;
cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
@@ -491,7 +483,7 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
}
lu_ref_del(&page->cp_reference, "ra", current);
cl_page_put(env, page);
- RETURN(rc);
+ return rc;
}
/**
@@ -516,8 +508,6 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
int rc = 0;
const char *msg = NULL;
- ENTRY;
-
gfp_mask = GFP_HIGHUSER & ~__GFP_WAIT;
#ifdef __GFP_NOWARN
gfp_mask |= __GFP_NOWARN;
@@ -554,7 +544,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
ll_ra_stats_inc(mapping, which);
CDEBUG(D_READA, "%s\n", msg);
}
- RETURN(rc);
+ return rc;
}
#define RIA_DEBUG(ria) \
@@ -722,7 +712,6 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
struct cl_object *clob;
int ret = 0;
__u64 kms;
- ENTRY;
inode = mapping->host;
lli = ll_i2info(inode);
@@ -735,11 +724,11 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
cl_object_attr_unlock(clob);
if (ret != 0)
- RETURN(ret);
+ return ret;
kms = attr->cat_kms;
if (kms == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
- RETURN(0);
+ return 0;
}
spin_lock(&ras->ras_lock);
@@ -797,11 +786,11 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
if (end == 0) {
ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
- RETURN(0);
+ return 0;
}
len = ria_page_count(ria);
if (len == 0)
- RETURN(0);
+ return 0;
reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
if (reserved < len)
@@ -840,7 +829,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_unlock(&ras->ras_lock);
}
- RETURN(ret);
+ return ret;
}
static void ras_set_start(struct inode *inode, struct ll_readahead_state *ras,
@@ -999,7 +988,6 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
int zero = 0, stride_detect = 0, ra_miss = 0;
- ENTRY;
spin_lock(&ras->ras_lock);
@@ -1135,7 +1123,6 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
if ((ras->ras_consecutive_requests > 1 || stride_detect) &&
!ras->ras_request_index)
ras_increase_window(inode, ras, ra);
- EXIT;
out_unlock:
RAS_CDEBUG(ras);
ras->ras_request_index++;
@@ -1155,7 +1142,6 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
bool redirtied = false;
bool unlocked = false;
int result;
- ENTRY;
LASSERT(PageLocked(vmpage));
LASSERT(!PageWriteback(vmpage));
@@ -1247,7 +1233,6 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
int range_whole = 0;
int result;
int ignore_layout = 0;
- ENTRY;
if (wbc->range_cyclic) {
start = mapping->writeback_index << PAGE_CACHE_SHIFT;
@@ -1281,14 +1266,13 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
end = i_size_read(inode);
mapping->writeback_index = (end >> PAGE_CACHE_SHIFT) + 1;
}
- RETURN(result);
+ return result;
}
int ll_readpage(struct file *file, struct page *vmpage)
{
struct ll_cl_context *lcc;
int result;
- ENTRY;
lcc = ll_cl_init(file, vmpage, 0);
if (!IS_ERR(lcc)) {
@@ -1310,5 +1294,5 @@ int ll_readpage(struct file *file, struct page *vmpage)
unlock_page(vmpage);
result = PTR_ERR(lcc);
}
- RETURN(result);
+ return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 27e4e64bc1e..96c29ad2fc8 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -51,9 +51,6 @@
#include <linux/buffer_head.h>
#include <linux/mpage.h>
#include <linux/writeback.h>
-#include <linux/stat.h>
-#include <asm/uaccess.h>
-#include <linux/mm.h>
#include <linux/pagemap.h>
#define DEBUG_SUBSYSTEM S_LLITE
@@ -72,7 +69,8 @@
* aligned truncate). Lustre leaves partially truncated page in the cache,
* relying on struct inode::i_size to limit further accesses.
*/
-static void ll_invalidatepage(struct page *vmpage, unsigned long offset)
+static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
+ unsigned int length)
{
struct inode *inode;
struct lu_env *env;
@@ -89,7 +87,7 @@ static void ll_invalidatepage(struct page *vmpage, unsigned long offset)
* below because they are run with page locked and all our io is
* happening with locked page too
*/
- if (offset == 0) {
+ if (offset == 0 && length == PAGE_CACHE_SIZE) {
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
inode = vmpage->mapping->host;
@@ -182,7 +180,7 @@ static int ll_set_page_dirty(struct page *vmpage)
*/
vvp_write_pending(obj, cpg);
#endif
- RETURN(__set_page_dirty_nobuffers(vmpage));
+ return __set_page_dirty_nobuffers(vmpage);
}
#define MAX_DIRECTIO_SIZE 2*1024*1024*1024UL
@@ -249,7 +247,6 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
long page_size = cl_page_size(obj);
bool do_io;
int io_pages = 0;
- ENTRY;
queue = &io->ci_queue;
cl_2queue_init(queue);
@@ -286,11 +283,11 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
src_page = (rw == WRITE) ? pages[i] : vmpage;
dst_page = (rw == WRITE) ? vmpage : pages[i];
- src = ll_kmap_atomic(src_page, KM_USER0);
- dst = ll_kmap_atomic(dst_page, KM_USER1);
+ src = kmap_atomic(src_page);
+ dst = kmap_atomic(dst_page);
memcpy(dst, src, min(page_size, size));
- ll_kunmap_atomic(dst, KM_USER1);
- ll_kunmap_atomic(src, KM_USER0);
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
/* make sure page will be added to the transfer by
* cl_io_submit()->...->vvp_page_prep_write(). */
@@ -335,7 +332,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
cl_2queue_discard(env, io, queue);
cl_2queue_disown(env, io, queue);
cl_2queue_fini(env, queue);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ll_direct_rw_pages);
@@ -383,14 +380,13 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
unsigned long seg = 0;
long size = MAX_DIO_SIZE;
int refcheck;
- ENTRY;
if (!lli->lli_has_smd)
- RETURN(-EBADF);
+ return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), size=%lu (max %lu), "
"offset=%lld=%llx, pages %lu (max %lu)\n",
@@ -402,7 +398,7 @@ static ssize_t ll_direct_IO_26(int rw, struct kiocb *iocb,
for (seg = 0; seg < nr_segs; seg++) {
if (((unsigned long)iov[seg].iov_base & ~CFS_PAGE_MASK) ||
(iov[seg].iov_len & ~CFS_PAGE_MASK))
- RETURN(-EINVAL);
+ return -EINVAL;
}
env = cl_env_get(&refcheck);
@@ -495,7 +491,7 @@ out:
}
cl_env_put(env, &refcheck);
- RETURN(tot_bytes ? : result);
+ return tot_bytes ? : result;
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
@@ -506,11 +502,10 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
struct page *page;
int rc;
unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- ENTRY;
page = grab_cache_page_write_begin(mapping, index, flags);
if (!page)
- RETURN(-ENOMEM);
+ return -ENOMEM;
*pagep = page;
@@ -519,7 +514,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
}
- RETURN(rc);
+ return rc;
}
static int ll_write_end(struct file *file, struct address_space *mapping,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 7747f8f2079..8eaa38e91b9 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -200,12 +200,11 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
struct ll_sa_entry *entry;
int entry_size;
char *dname;
- ENTRY;
entry_size = sizeof(struct ll_sa_entry) + (len & ~3) + 4;
OBD_ALLOC(entry, entry_size);
if (unlikely(entry == NULL))
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
CDEBUG(D_READA, "alloc sa entry %.*s(%p) index "LPU64"\n",
len, name, entry, index);
@@ -254,7 +253,7 @@ ll_sa_entry_alloc(struct ll_statahead_info *sai, __u64 index,
atomic_inc(&sai->sai_cache_count);
- RETURN(entry);
+ return entry;
}
/*
@@ -465,11 +464,10 @@ static struct ll_statahead_info *ll_sai_alloc(void)
{
struct ll_statahead_info *sai;
int i;
- ENTRY;
OBD_ALLOC_PTR(sai);
if (!sai)
- RETURN(NULL);
+ return NULL;
atomic_set(&sai->sai_refcount, 1);
@@ -496,7 +494,7 @@ static struct ll_statahead_info *ll_sai_alloc(void)
}
atomic_set(&sai->sai_cache_count, 0);
- RETURN(sai);
+ return sai;
}
static inline struct ll_statahead_info *
@@ -510,7 +508,6 @@ static void ll_sai_put(struct ll_statahead_info *sai)
{
struct inode *inode = sai->sai_inode;
struct ll_inode_info *lli = ll_i2info(inode);
- ENTRY;
if (atomic_dec_and_lock(&sai->sai_refcount, &lli->lli_sa_lock)) {
struct ll_sa_entry *entry, *next;
@@ -519,7 +516,7 @@ static void ll_sai_put(struct ll_statahead_info *sai)
/* It is race case, the interpret callback just hold
* a reference count */
spin_unlock(&lli->lli_sa_lock);
- RETURN_EXIT;
+ return;
}
LASSERT(lli->lli_opendir_key == NULL);
@@ -550,8 +547,6 @@ static void ll_sai_put(struct ll_statahead_info *sai)
iput(inode);
OBD_FREE_PTR(sai);
}
-
- EXIT;
}
/* Do NOT forget to drop inode refcount when into sai_entries_agl. */
@@ -560,7 +555,6 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
struct ll_inode_info *lli = ll_i2info(inode);
__u64 index = lli->lli_agl_index;
int rc;
- ENTRY;
LASSERT(list_empty(&lli->lli_agl_list));
@@ -568,7 +562,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
if (is_omitted_entry(sai, index + 1)) {
lli->lli_agl_index = 0;
iput(inode);
- RETURN_EXIT;
+ return;
}
/* Someone is in glimpse (sync or async), do nothing. */
@@ -576,7 +570,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
if (rc == 0) {
lli->lli_agl_index = 0;
iput(inode);
- RETURN_EXIT;
+ return;
}
/*
@@ -597,7 +591,7 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
up_write(&lli->lli_glimpse_sem);
lli->lli_agl_index = 0;
iput(inode);
- RETURN_EXIT;
+ return;
}
CDEBUG(D_READA, "Handling (init) async glimpse: inode = "
@@ -613,8 +607,6 @@ static void ll_agl_trigger(struct inode *inode, struct ll_statahead_info *sai)
PFID(&lli->lli_fid), index, rc);
iput(inode);
-
- EXIT;
}
static void ll_post_statahead(struct ll_statahead_info *sai)
@@ -628,12 +620,11 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
struct ptlrpc_request *req;
struct mdt_body *body;
int rc = 0;
- ENTRY;
spin_lock(&lli->lli_sa_lock);
if (unlikely(sa_received_empty(sai))) {
spin_unlock(&lli->lli_sa_lock);
- RETURN_EXIT;
+ return;
}
entry = sa_first_received_entry(sai);
atomic_inc(&entry->se_refcount);
@@ -690,8 +681,6 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
if (agl_should_run(sai, child))
ll_agl_add(sai, child, entry->se_index);
- EXIT;
-
out:
/* The "ll_sa_entry_to_stated()" will drop related ldlm ibits lock
* reference count by calling "ll_intent_drop_lock()" in spite of the
@@ -713,7 +702,6 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
struct ll_statahead_info *sai = NULL;
struct ll_sa_entry *entry;
int wakeup;
- ENTRY;
if (it_disposition(it, DISP_LOOKUP_NEG))
rc = -ENOENT;
@@ -763,8 +751,6 @@ static int ll_statahead_interpret(struct ptlrpc_request *req,
wake_up(&sai->sai_thread.t_ctl_waitq);
}
- EXIT;
-
out:
if (rc != 0) {
ll_intent_release(it);
@@ -852,11 +838,10 @@ static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
struct ldlm_enqueue_info *einfo;
struct obd_capa *capas[2];
int rc;
- ENTRY;
rc = sa_args_init(dir, NULL, entry, &minfo, &einfo, capas);
if (rc)
- RETURN(rc);
+ return rc;
rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
if (!rc) {
@@ -866,7 +851,7 @@ static int do_sa_lookup(struct inode *dir, struct ll_sa_entry *entry)
sa_args_fini(minfo, einfo);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -885,30 +870,29 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
struct ldlm_enqueue_info *einfo;
struct obd_capa *capas[2];
int rc;
- ENTRY;
if (unlikely(inode == NULL))
- RETURN(1);
+ return 1;
if (d_mountpoint(dentry))
- RETURN(1);
+ return 1;
if (unlikely(dentry == dentry->d_sb->s_root))
- RETURN(1);
+ return 1;
entry->se_inode = igrab(inode);
rc = md_revalidate_lock(ll_i2mdexp(dir), &it, ll_inode2fid(inode),NULL);
if (rc == 1) {
entry->se_handle = it.d.lustre.it_lock_handle;
ll_intent_release(&it);
- RETURN(1);
+ return 1;
}
rc = sa_args_init(dir, inode, entry, &minfo, &einfo, capas);
if (rc) {
entry->se_inode = NULL;
iput(inode);
- RETURN(rc);
+ return rc;
}
rc = md_intent_getattr_async(ll_i2mdexp(dir), minfo, einfo);
@@ -921,7 +905,7 @@ static int do_sa_revalidate(struct inode *dir, struct ll_sa_entry *entry,
sa_args_fini(minfo, einfo);
}
- RETURN(rc);
+ return rc;
}
static void ll_statahead_one(struct dentry *parent, const char* entry_name,
@@ -934,12 +918,11 @@ static void ll_statahead_one(struct dentry *parent, const char* entry_name,
struct ll_sa_entry *entry;
int rc;
int rc1;
- ENTRY;
entry = ll_sa_entry_alloc(sai, sai->sai_index, entry_name,
entry_name_len);
if (IS_ERR(entry))
- RETURN_EXIT;
+ return;
dentry = d_lookup(parent, &entry->se_qstr);
if (!dentry) {
@@ -965,8 +948,6 @@ static void ll_statahead_one(struct dentry *parent, const char* entry_name,
sai->sai_index++;
/* drop one refcount on entry by ll_sa_entry_alloc */
ll_sa_entry_put(sai, entry);
-
- EXIT;
}
static int ll_agl_thread(void *arg)
@@ -979,7 +960,6 @@ static int ll_agl_thread(void *arg)
struct ll_statahead_info *sai = ll_sai_get(plli->lli_sai);
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
struct l_wait_info lwi = { 0 };
- ENTRY;
CDEBUG(D_READA, "agl thread started: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
@@ -1029,7 +1009,7 @@ static int ll_agl_thread(void *arg)
ll_sai_put(sai);
CDEBUG(D_READA, "agl thread stopped: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
- RETURN(0);
+ return 0;
}
static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
@@ -1037,8 +1017,7 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
struct ptlrpc_thread *thread = &sai->sai_agl_thread;
struct l_wait_info lwi = { 0 };
struct ll_inode_info *plli;
- task_t *task;
- ENTRY;
+ struct task_struct *task;
CDEBUG(D_READA, "start agl thread: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
@@ -1049,13 +1028,12 @@ static void ll_start_agl(struct dentry *parent, struct ll_statahead_info *sai)
if (IS_ERR(task)) {
CERROR("can't start ll_agl thread, rc: %ld\n", PTR_ERR(task));
thread_set_flags(thread, SVC_STOPPED);
- RETURN_EXIT;
+ return;
}
l_wait_event(thread->t_ctl_waitq,
thread_is_running(thread) || thread_is_stopped(thread),
&lwi);
- EXIT;
}
static int ll_statahead_thread(void *arg)
@@ -1074,7 +1052,6 @@ static int ll_statahead_thread(void *arg)
int rc = 0;
struct ll_dir_chain chain;
struct l_wait_info lwi = { 0 };
- ENTRY;
CDEBUG(D_READA, "statahead thread started: [pid %d] [parent %.*s]\n",
current_pid(), parent->d_name.len, parent->d_name.name);
@@ -1257,7 +1234,6 @@ do_it:
*/
}
}
- EXIT;
out:
if (sai->sai_agl_valid) {
@@ -1369,7 +1345,6 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
__u64 pos = 0;
int dot_de;
int rc = LS_NONE_FIRST_DE;
- ENTRY;
ll_dir_chain_init(&chain);
page = ll_get_dir_page(dir, pos, &chain);
@@ -1468,7 +1443,6 @@ static int is_first_dirent(struct inode *dir, struct dentry *dentry)
ll_release_page(page, 1);
}
}
- EXIT;
out:
ll_dir_chain_fini(&chain);
@@ -1481,7 +1455,6 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
struct ptlrpc_thread *thread = &sai->sai_thread;
struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
int hit;
- ENTRY;
if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
hit = 1;
@@ -1516,8 +1489,6 @@ ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
if (!thread_is_stopped(thread))
wake_up(&thread->t_ctl_waitq);
-
- EXIT;
}
/**
@@ -1540,7 +1511,6 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
struct l_wait_info lwi = { 0 };
int rc = 0;
struct ll_inode_info *plli;
- ENTRY;
LASSERT(lli->lli_opendir_pid == current_pid());
@@ -1550,7 +1520,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
list_empty(&sai->sai_entries_stated))) {
/* to release resource */
ll_stop_statahead(dir, lli->lli_opendir_key);
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
if ((*dentryp)->d_name.name[0] == '.') {
@@ -1576,14 +1546,14 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
* "sai_ls_all" enabled as above.
*/
sai->sai_miss_hidden++;
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
}
entry = ll_sa_entry_get_byname(sai, &(*dentryp)->d_name);
if (entry == NULL || only_unplug) {
ll_sai_unplug(sai, entry);
- RETURN(entry ? 1 : -EAGAIN);
+ return entry ? 1 : -EAGAIN;
}
/* if statahead is busy in readdir, help it do post-work */
@@ -1602,7 +1572,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
&lwi);
if (rc < 0) {
ll_sai_unplug(sai, entry);
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
}
@@ -1632,7 +1602,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
inode->i_ino,
inode->i_generation);
ll_sai_unplug(sai, entry);
- RETURN(-ESTALE);
+ return -ESTALE;
} else {
iput(inode);
}
@@ -1646,7 +1616,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
}
ll_sai_unplug(sai, entry);
- RETURN(rc);
+ return rc;
}
/* I am the "lli_opendir_pid" owner, only me can set "lli_sai". */
@@ -1698,7 +1668,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
thread_set_flags(&sai->sai_agl_thread, SVC_STOPPED);
ll_sai_put(sai);
LASSERT(lli->lli_sai == NULL);
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
l_wait_event(thread->t_ctl_waitq,
@@ -1709,7 +1679,7 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
* We don't stat-ahead for the first dirent since we are already in
* lookup.
*/
- RETURN(-EAGAIN);
+ return -EAGAIN;
out:
if (sai != NULL)
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 82c14a993cc..0beaf4e76b4 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -38,7 +38,6 @@
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <lustre_lite.h>
#include <lustre_ha.h>
#include <lustre_dlm.h>
@@ -214,7 +213,7 @@ static void __exit exit_lustre_lite(void)
ll_remote_perm_cachep = NULL;
kmem_cache_destroy(ll_file_data_slab);
- if (proc_lustre_fs_root)
+ if (proc_lustre_fs_root && !IS_ERR(proc_lustre_fs_root))
lprocfs_remove(&proc_lustre_fs_root);
}
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 5260e989a4e..ab06891f7fc 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -37,7 +37,6 @@
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/stat.h>
-#include <linux/version.h>
#define DEBUG_SUBSYSTEM S_LLITE
#include <lustre_lite.h>
@@ -51,7 +50,6 @@ static int ll_readlink_internal(struct inode *inode,
int rc, symlen = i_size_read(inode) + 1;
struct mdt_body *body;
struct md_op_data *op_data;
- ENTRY;
*request = NULL;
@@ -65,13 +63,13 @@ static int ll_readlink_internal(struct inode *inode,
CDEBUG(D_INODE, "using cached symlink %s%.*s, len = %d\n",
print_limit < symlen ? "..." : "", print_limit,
(*symname) + symlen - print_limit, symlen);
- RETURN(0);
+ return 0;
}
op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, symlen,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
- RETURN(PTR_ERR(op_data));
+ return PTR_ERR(op_data);
op_data->op_valid = OBD_MD_LINKNAME;
rc = md_getattr(sbi->ll_md_exp, op_data, request);
@@ -111,10 +109,10 @@ static int ll_readlink_internal(struct inode *inode,
memcpy(lli->lli_symlink_name, *symname, symlen);
*symname = lli->lli_symlink_name;
}
- RETURN(0);
+ return 0;
failed:
- RETURN (rc);
+ return rc;
}
static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
@@ -123,7 +121,6 @@ static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
struct ptlrpc_request *request;
char *symname;
int rc;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op\n");
@@ -136,7 +133,7 @@ static int ll_readlink(struct dentry *dentry, char *buffer, int buflen)
out:
ptlrpc_req_finished(request);
ll_inode_size_unlock(inode);
- RETURN(rc);
+ return rc;
}
static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
@@ -145,7 +142,6 @@ static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
struct ptlrpc_request *request = NULL;
int rc;
char *symname;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op\n");
/* Limit the recursive symlink depth to 5 instead of default
@@ -170,7 +166,7 @@ static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
/* symname may contain a pointer to the request message buffer,
* we delay request releasing until ll_put_link then.
*/
- RETURN(request);
+ return request;
}
static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 9254b990d31..be125b98b7f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -213,7 +213,7 @@ int cl_sb_init(struct super_block *sb)
cl_env_put(env, &refcheck);
} else
rc = PTR_ERR(env);
- RETURN(rc);
+ return rc;
}
int cl_sb_fini(struct super_block *sb)
@@ -224,7 +224,6 @@ int cl_sb_fini(struct super_block *sb)
int refcheck;
int result;
- ENTRY;
sbi = ll_s2sbi(sb);
env = cl_env_get(&refcheck);
if (!IS_ERR(env)) {
@@ -247,7 +246,7 @@ int cl_sb_fini(struct super_block *sb)
* automatically when last device is destroyed).
*/
lu_types_stop();
- RETURN(result);
+ return result;
}
/****************************************************************************
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index eb964acad45..3ff664ce750 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -176,19 +176,18 @@ static int vvp_mmap_locks(const struct lu_env *env,
unsigned long seg;
ssize_t count;
int result;
- ENTRY;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
if (!cl_is_normalio(env, io))
- RETURN(0);
+ return 0;
if (vio->cui_iov == NULL) /* nfs or loop back device write */
- RETURN(0);
+ return 0;
/* No MM (e.g. NFS)? No vmas too. */
if (mm == NULL)
- RETURN(0);
+ return 0;
for (seg = 0; seg < vio->cui_nrsegs; seg++) {
const struct iovec *iv = &vio->cui_iov[seg];
@@ -234,7 +233,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
descr->cld_end);
if (result < 0)
- RETURN(result);
+ return result;
if (vma->vm_end - addr >= count)
break;
@@ -244,7 +243,7 @@ static int vvp_mmap_locks(const struct lu_env *env,
}
up_read(&mm->mmap_sem);
}
- RETURN(0);
+ return 0;
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
@@ -255,7 +254,6 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
ccc_io_update_iov(env, cio, io);
@@ -264,7 +262,7 @@ static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
result = vvp_mmap_locks(env, cio, io);
if (result == 0)
result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
- RETURN(result);
+ return result;
}
static int vvp_io_read_lock(const struct lu_env *env,
@@ -274,7 +272,6 @@ static int vvp_io_read_lock(const struct lu_env *env,
struct ll_inode_info *lli = ll_i2info(ccc_object_inode(io->ci_obj));
int result;
- ENTRY;
/* XXX: Layer violation, we shouldn't see lsm at llite level. */
if (lli->lli_has_smd) /* lsm-less file doesn't need to lock */
result = vvp_io_rw_lock(env, io, CLM_READ,
@@ -283,7 +280,7 @@ static int vvp_io_read_lock(const struct lu_env *env,
io->u.ci_rd.rd.crw_count - 1);
else
result = 0;
- RETURN(result);
+ return result;
}
static int vvp_io_fault_lock(const struct lu_env *env,
@@ -407,13 +404,15 @@ static int vvp_io_setattr_start(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct inode *inode = ccc_object_inode(io->ci_obj);
+ int result = 0;
mutex_lock(&inode->i_mutex);
if (cl_io_is_trunc(io))
- return vvp_io_setattr_trunc(env, ios, inode,
- io->u.ci_setattr.sa_attr.lvb_size);
- else
- return vvp_io_setattr_time(env, ios);
+ result = vvp_io_setattr_trunc(env, ios, inode,
+ io->u.ci_setattr.sa_attr.lvb_size);
+ if (result == 0)
+ result = vvp_io_setattr_time(env, ios);
+ return result;
}
static void vvp_io_setattr_end(const struct lu_env *env,
@@ -525,7 +524,7 @@ out:
io->ci_continue = 0;
io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, 0);
+ cio->cui_fd, pos, result, READ);
result = 0;
}
return result;
@@ -554,8 +553,6 @@ static int vvp_io_write_start(const struct lu_env *env,
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
- ENTRY;
-
if (!can_populate_pages(env, io, inode))
return 0;
@@ -580,10 +577,10 @@ static int vvp_io_write_start(const struct lu_env *env,
io->ci_continue = 0;
io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, 0);
+ cio->cui_fd, pos, result, WRITE);
result = 0;
}
- RETURN(result);
+ return result;
}
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
@@ -767,7 +764,6 @@ static int vvp_io_fault_start(const struct lu_env *env,
lu_ref_add(&page->cp_reference, "fault", io);
fio->ft_page = page;
- EXIT;
out:
/* return unlocked vmpage to avoid deadlocking */
@@ -805,8 +801,6 @@ static int vvp_io_read_page(const struct lu_env *env,
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
LASSERT(slice->cpl_obj == obj);
- ENTRY;
-
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
ras_update(sbi, inode, ras, page->cp_index,
@@ -819,7 +813,7 @@ static int vvp_io_read_page(const struct lu_env *env,
rc == -ENODATA ? "without a lock" :
"match failed", rc);
if (rc != -ENODATA)
- RETURN(rc);
+ return rc;
}
if (cp->cpg_defer_uptodate) {
@@ -836,7 +830,7 @@ static int vvp_io_read_page(const struct lu_env *env,
ll_readahead(env, io, ras,
vmpage->mapping, &queue->c2_qin, fd->fd_flags);
- RETURN(0);
+ return 0;
}
static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
@@ -887,10 +881,10 @@ static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
* purposes here we can treat it like i_size.
*/
if (attr->cat_kms <= offset) {
- char *kaddr = ll_kmap_atomic(cp->cpg_page, KM_USER0);
+ char *kaddr = kmap_atomic(cp->cpg_page);
memset(kaddr, 0, cl_page_size(obj));
- ll_kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
} else if (cp->cpg_defer_uptodate)
cp->cpg_ra_used = 1;
else
@@ -921,8 +915,6 @@ static int vvp_io_prepare_write(const struct lu_env *env,
int result;
- ENTRY;
-
LINVRNT(cl_page_is_vmlocked(env, pg));
LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
@@ -942,7 +934,7 @@ static int vvp_io_prepare_write(const struct lu_env *env,
pg, cp, from, to);
} else
CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
- RETURN(result);
+ return result;
}
static int vvp_io_commit_write(const struct lu_env *env,
@@ -963,12 +955,10 @@ static int vvp_io_commit_write(const struct lu_env *env,
int tallyop;
loff_t size;
- ENTRY;
-
LINVRNT(cl_page_is_vmlocked(env, pg));
LASSERT(vmpage->mapping->host == inode);
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "commiting page write\n");
+ LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
/*
@@ -1067,7 +1057,7 @@ static int vvp_io_commit_write(const struct lu_env *env,
cl_page_discard(env, io, pg);
}
ll_inode_size_unlock(inode);
- RETURN(result);
+ return result;
}
static const struct cl_io_operations vvp_io_ops = {
@@ -1120,7 +1110,6 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
int result;
CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- ENTRY;
CL_IO_SLICE_CLEAN(cio, cui_cl);
cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
@@ -1174,7 +1163,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
PFID(lu_object_fid(&obj->co_lu)), result);
}
- RETURN(result);
+ return result;
}
static struct vvp_io *cl2vvp_io(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index 9b8712bccd9..e16b31e4ff7 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -63,8 +63,7 @@ static unsigned long vvp_lock_weigh(const struct lu_env *env,
{
struct ccc_object *cob = cl2ccc(slice->cls_obj);
- ENTRY;
- RETURN(atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0);
+ return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
}
static const struct cl_lock_operations vvp_lock_ops = {
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 01edc5b63e1..33173fce478 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -91,8 +91,8 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
attr->cat_atime = LTIME_S(inode->i_atime);
attr->cat_ctime = LTIME_S(inode->i_ctime);
attr->cat_blocks = inode->i_blocks;
- attr->cat_uid = inode->i_uid;
- attr->cat_gid = inode->i_gid;
+ attr->cat_uid = from_kuid(&init_user_ns, inode->i_uid);
+ attr->cat_gid = from_kgid(&init_user_ns, inode->i_gid);
/* KMS is not known by this layer */
return 0; /* layers below have to fill in the rest */
}
@@ -103,9 +103,9 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
struct inode *inode = ccc_object_inode(obj);
if (valid & CAT_UID)
- inode->i_uid = attr->cat_uid;
+ inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
if (valid & CAT_GID)
- inode->i_gid = attr->cat_gid;
+ inode->i_gid = make_kgid(&init_user_ns, attr->cat_gid);
if (valid & CAT_ATIME)
LTIME_S(inode->i_atime) = attr->cat_atime;
if (valid & CAT_MTIME)
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 4568e69bb9f..1c02c128e0e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -218,9 +218,8 @@ static int vvp_page_prep_read(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *unused)
{
- ENTRY;
/* Skip the page already marked as PG_uptodate. */
- RETURN(PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0);
+ return PageUptodate(cl2vm_page(slice)) ? -EALREADY : 0;
}
static int vvp_page_prep_write(const struct lu_env *env,
@@ -274,7 +273,6 @@ static void vvp_page_completion_read(const struct lu_env *env,
struct page *vmpage = cp->cpg_page;
struct cl_page *page = cl_page_top(slice->cpl_page);
struct inode *inode = ccc_object_inode(page->cp_obj);
- ENTRY;
LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
@@ -290,8 +288,6 @@ static void vvp_page_completion_read(const struct lu_env *env,
if (page->cp_sync_io == NULL)
unlock_page(vmpage);
-
- EXIT;
}
static void vvp_page_completion_write(const struct lu_env *env,
@@ -301,7 +297,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
struct ccc_page *cp = cl2ccc_page(slice);
struct cl_page *pg = slice->cpl_page;
struct page *vmpage = cp->cpg_page;
- ENTRY;
LASSERT(ergo(pg->cp_sync_io != NULL, PageLocked(vmpage)));
LASSERT(PageWriteback(vmpage));
@@ -329,7 +324,6 @@ static void vvp_page_completion_write(const struct lu_env *env,
vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
end_page_writeback(vmpage);
- EXIT;
}
/**
@@ -372,7 +366,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
LBUG();
}
unlock_page(vmpage);
- RETURN(result);
+ return result;
}
static int vvp_page_print(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index 4176264984b..bcf86bac30a 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -112,31 +112,32 @@ int ll_setxattr_common(struct inode *inode, const char *name,
struct ptlrpc_request *req;
int xattr_type, rc;
struct obd_capa *oc;
+#ifdef CONFIG_FS_POSIX_ACL
posix_acl_xattr_header *new_value = NULL;
struct rmtacl_ctl_entry *rce = NULL;
ext_acl_xattr_header *acl = NULL;
+#endif
const char *pv = value;
- ENTRY;
xattr_type = get_xattr_type(name);
rc = xattr_type_filter(sbi, xattr_type);
if (rc)
- RETURN(rc);
+ return rc;
/* b10667: ignore lustre special xattr for now */
if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
(xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
- RETURN(0);
+ return 0;
/* b15587: ignore security.capability xattr for now */
if ((xattr_type == XATTR_SECURITY_T &&
strcmp(name, "security.capability") == 0))
- RETURN(0);
+ return 0;
/* LU-549: Disable security.selinux when selinux is disabled */
if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
strcmp(name, "security.selinux") == 0)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
@@ -146,7 +147,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
if (rce == NULL ||
(rce->rce_ops != RMT_LSETFACL &&
rce->rce_ops != RMT_RSETFACL))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (rce->rce_ops == RMT_LSETFACL) {
struct eacl_entry *ee;
@@ -160,7 +161,7 @@ int ll_setxattr_common(struct inode *inode, const char *name,
size, ee->ee_acl);
if (IS_ERR(acl)) {
ee_free(ee);
- RETURN(PTR_ERR(acl));
+ return PTR_ERR(acl);
}
size = CFS_ACL_XATTR_SIZE(\
le32_to_cpu(acl->a_count), \
@@ -173,11 +174,11 @@ int ll_setxattr_common(struct inode *inode, const char *name,
(posix_acl_xattr_header *)value,
size, &new_value);
if (unlikely(size < 0))
- RETURN(size);
+ return size;
pv = (const char *)new_value;
} else
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
valid |= rce_ops2valid(rce->rce_ops);
}
@@ -199,11 +200,11 @@ int ll_setxattr_common(struct inode *inode, const char *name,
"it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
- RETURN(rc);
+ return rc;
}
ptlrpc_req_finished(req);
- RETURN(0);
+ return 0;
}
int ll_setxattr(struct dentry *dentry, const char *name,
@@ -285,7 +286,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
void *xdata;
struct obd_capa *oc;
struct rmtacl_ctl_entry *rce = NULL;
- ENTRY;
CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
inode->i_ino, inode->i_generation, inode);
@@ -302,17 +302,17 @@ int ll_getxattr_common(struct inode *inode, const char *name,
xattr_type = get_xattr_type(name);
rc = xattr_type_filter(sbi, xattr_type);
if (rc)
- RETURN(rc);
+ return rc;
/* b15587: ignore security.capability xattr for now */
if ((xattr_type == XATTR_SECURITY_T &&
strcmp(name, "security.capability") == 0))
- RETURN(-ENODATA);
+ return -ENODATA;
/* LU-549: Disable security.selinux when selinux is disabled */
if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
strcmp(name, "security.selinux") == 0)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT &&
@@ -324,7 +324,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
rce->rce_ops != RMT_LGETFACL &&
rce->rce_ops != RMT_RSETFACL &&
rce->rce_ops != RMT_RGETFACL))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
/* posix acl is under protection of LOOKUP lock. when calling to this,
@@ -341,14 +341,14 @@ int ll_getxattr_common(struct inode *inode, const char *name,
spin_unlock(&lli->lli_lock);
if (!acl)
- RETURN(-ENODATA);
+ return -ENODATA;
rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
posix_acl_release(acl);
- RETURN(rc);
+ return rc;
}
if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
- RETURN(-ENODATA);
+ return -ENODATA;
#endif
do_getxattr:
@@ -363,7 +363,7 @@ do_getxattr:
"it is not supported on the server\n");
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
}
- RETURN(rc);
+ return rc;
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
@@ -413,7 +413,6 @@ do_getxattr:
memcpy(buffer, xdata, body->eadatasize);
rc = body->eadatasize;
}
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -562,7 +561,12 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
const size_t name_len = sizeof("lov") - 1;
const size_t total_len = prefix_len + name_len + 1;
- if (buffer && (rc + total_len) <= size) {
+ if (((rc + total_len) > size) && (buffer != NULL)) {
+ ptlrpc_req_finished(request);
+ return -ERANGE;
+ }
+
+ if (buffer != NULL) {
buffer += rc;
memcpy(buffer, XATTR_LUSTRE_PREFIX, prefix_len);
memcpy(buffer + prefix_len, "lov", name_len);
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_fld.c b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
index a4805aefa68..0b2d38d1362 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_fld.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_fld.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
@@ -58,8 +57,6 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
mdsno_t *mds)
{
int rc;
- ENTRY;
-
/* FIXME: Currently ZFS still use local seq for ROOT unfortunately, and
* this fid_is_local check should be removed once LU-2240 is fixed */
@@ -72,7 +69,7 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
if (rc) {
CERROR("Error while looking for mds number. Seq "LPX64
", err = %d\n", fid_seq(fid), rc);
- RETURN(rc);
+ return rc;
}
CDEBUG(D_INODE, "FLD lookup got mds #%x for fid="DFID"\n",
@@ -84,5 +81,5 @@ int lmv_fld_lookup(struct lmv_obd *lmv,
PFID(fid));
rc = -EINVAL;
}
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_intent.c b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
index 7eefab5ef5d..511b3b4b699 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_intent.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_intent.c
@@ -38,7 +38,6 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/pagemap.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
@@ -70,11 +69,10 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
struct mdt_body *body;
int pmode;
int rc = 0;
- ENTRY;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
LASSERT((body->valid & OBD_MD_MDS));
@@ -142,7 +140,6 @@ static int lmv_intent_remote(struct obd_export *exp, void *lmm,
it->d.lustre.it_lock_handle = plock.cookie;
it->d.lustre.it_lock_mode = pmode;
- EXIT;
out_free_op_data:
OBD_FREE_PTR(op_data);
out:
@@ -169,11 +166,10 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_tgt_desc *tgt;
struct mdt_body *body;
int rc;
- ENTRY;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
/* If it is ready to open the file by FID, do not need
* allocate FID at all, otherwise it will confuse MDT */
@@ -186,7 +182,7 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
op_data->op_fid3 = op_data->op_fid2;
rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
if (rc != 0)
- RETURN(rc);
+ return rc;
}
CDEBUG(D_INODE, "OPEN_INTENT with fid1="DFID", fid2="DFID","
@@ -196,7 +192,7 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
rc = md_intent_lock(tgt->ltd_exp, op_data, lmm, lmmsize, it, flags,
reqp, cb_blocking, extra_lock_flags);
if (rc != 0)
- RETURN(rc);
+ return rc;
/*
* Nothing is found, do not access body->fid1 as it is zero and thus
* pointless.
@@ -204,16 +200,16 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
if ((it->d.lustre.it_disposition & DISP_LOOKUP_NEG) &&
!(it->d.lustre.it_disposition & DISP_OPEN_CREATE) &&
!(it->d.lustre.it_disposition & DISP_OPEN_OPEN))
- RETURN(rc);
+ return rc;
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
/*
* Not cross-ref case, just get out of here.
*/
if (likely(!(body->valid & OBD_MD_MDS)))
- RETURN(0);
+ return 0;
/*
* Okay, MDS has returned success. Probably name has been resolved in
@@ -233,10 +229,10 @@ int lmv_intent_open(struct obd_export *exp, struct md_op_data *op_data,
"%*s: %d\n", LL_IT2STR(it), PFID(&op_data->op_fid2),
PFID(&op_data->op_fid1), op_data->op_namelen,
op_data->op_name, rc);
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
/*
@@ -253,11 +249,10 @@ int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
int rc = 0;
- ENTRY;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
if (!fid_is_sane(&op_data->op_fid2))
fid_zero(&op_data->op_fid2);
@@ -274,7 +269,7 @@ int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
flags, reqp, cb_blocking, extra_lock_flags);
if (rc < 0 || *reqp == NULL)
- RETURN(rc);
+ return rc;
/*
* MDS has returned success. Probably name has been resolved in
@@ -282,15 +277,15 @@ int lmv_intent_lookup(struct obd_export *exp, struct md_op_data *op_data,
*/
body = req_capsule_server_get(&(*reqp)->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
/* Not cross-ref case, just get out of here. */
if (likely(!(body->valid & OBD_MD_MDS)))
- RETURN(0);
+ return 0;
rc = lmv_intent_remote(exp, lmm, lmmsize, it, NULL, flags, reqp,
cb_blocking, extra_lock_flags);
- RETURN(rc);
+ return rc;
}
int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
@@ -301,7 +296,6 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
{
struct obd_device *obd = exp->exp_obd;
int rc;
- ENTRY;
LASSERT(it != NULL);
LASSERT(fid_is_sane(&op_data->op_fid1));
@@ -312,7 +306,7 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
if (it->it_op & (IT_LOOKUP | IT_GETATTR | IT_LAYOUT))
rc = lmv_intent_lookup(exp, op_data, lmm, lmmsize, it,
@@ -324,5 +318,5 @@ int lmv_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
extra_lock_flags);
else
LBUG();
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 1eebfbf3487..c2866046fc3 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -38,12 +38,12 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/mm.h>
#include <asm/div64.h>
#include <linux/seq_file.h>
#include <linux/namei.h>
+#include <asm/uaccess.h>
#include <lustre/lustre_idl.h>
#include <obd_support.h>
@@ -80,7 +80,6 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
struct obd_device *obd;
int i;
int rc = 0;
- ENTRY;
CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
lmv, uuid->uuid, activate);
@@ -119,7 +118,6 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
activate ? "" : "in");
lmv_activate_target(lmv, tgt, activate);
- EXIT;
out_lmv_lock:
spin_unlock(&lmv->lmv_lock);
@@ -140,13 +138,12 @@ static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_uuid *uuid;
int rc = 0;
- ENTRY;
if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
CERROR("unexpected notification of %s %s!\n",
watched->obd_type->typ_name,
watched->obd_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
uuid = &watched->u.cli.cl_target_uuid;
@@ -161,7 +158,7 @@ static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
CERROR("%sactivation of %s failed: %d\n",
ev == OBD_NOTIFY_ACTIVE ? "" : "de",
uuid->uuid, rc);
- RETURN(rc);
+ return rc;
}
} else if (ev == OBD_NOTIFY_OCD) {
conn_data = &watched->u.cli.cl_import->imp_connect_data;
@@ -186,7 +183,7 @@ static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
if (obd->obd_observer)
rc = obd_notify(obd->obd_observer, watched, ev, data);
- RETURN(rc);
+ return rc;
}
/**
@@ -202,7 +199,6 @@ static int lmv_connect(const struct lu_env *env,
struct lmv_obd *lmv = &obd->u.lmv;
struct lustre_handle conn = { 0 };
int rc = 0;
- ENTRY;
/*
* We don't want to actually do the underlying connections more than
@@ -211,13 +207,13 @@ static int lmv_connect(const struct lu_env *env,
lmv->refcount++;
if (lmv->refcount > 1) {
*exp = NULL;
- RETURN(0);
+ return 0;
}
rc = class_connect(&conn, obd, cluuid);
if (rc) {
CERROR("class_connection() returned %d\n", rc);
- RETURN(rc);
+ return rc;
}
*exp = class_conn2export(&conn);
@@ -257,7 +253,7 @@ static int lmv_connect(const struct lu_env *env,
obd->obd_proc_private = NULL;
}
- RETURN(rc);
+ return rc;
}
static void lmv_set_timeouts(struct obd_device *obd)
@@ -291,7 +287,6 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
int i;
int rc = 0;
int change = 0;
- ENTRY;
if (lmv->max_easize < easize) {
lmv->max_easize = easize;
@@ -306,10 +301,10 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
change = 1;
}
if (change == 0)
- RETURN(0);
+ return 0;
if (lmv->connected == 0)
- RETURN(0);
+ return 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
if (lmv->tgts[i] == NULL ||
@@ -327,7 +322,7 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
break;
}
}
- RETURN(rc);
+ return rc;
}
#define MAX_STRING_SIZE 128
@@ -342,13 +337,12 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
struct obd_export *mdc_exp;
struct lu_fld_target target;
int rc;
- ENTRY;
mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
&obd->obd_uuid);
if (!mdc_obd) {
CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
@@ -358,14 +352,14 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
if (!mdc_obd->obd_set_up) {
CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
&lmv->conn_data, NULL);
if (rc) {
CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
- RETURN(rc);
+ return rc;
}
/*
@@ -373,7 +367,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
*/
rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
if (rc)
- RETURN(rc);
+ return rc;
target.ft_srv = NULL;
target.ft_exp = mdc_exp;
@@ -386,7 +380,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
obd_disconnect(mdc_exp);
CERROR("target %s register_observer error %d\n",
tgt->ltd_uuid.uuid, rc);
- RETURN(rc);
+ return rc;
}
if (obd->obd_observer) {
@@ -398,7 +392,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
(void *)(tgt - lmv->tgts[0]));
if (rc) {
obd_disconnect(mdc_exp);
- RETURN(rc);
+ return rc;
}
}
@@ -433,7 +427,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
obd->obd_proc_private = NULL;
}
}
- RETURN(0);
+ return 0;
}
static void lmv_del_target(struct lmv_obd *lmv, int index)
@@ -452,7 +446,6 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc = 0;
- ENTRY;
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
@@ -467,7 +460,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
lmv_init_unlock(lmv);
CERROR("%s: Target %s not attached: rc = %d\n",
obd->obd_name, uuidp->uuid, -EINVAL);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
@@ -477,7 +470,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
" rc = %d\n", obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
lmv_init_unlock(lmv);
- RETURN(-EEXIST);
+ return -EEXIST;
}
if (index >= lmv->tgts_size) {
@@ -491,7 +484,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
lmv_init_unlock(lmv);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
if (lmv->tgts_size) {
@@ -514,7 +507,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
OBD_ALLOC_PTR(tgt);
if (!tgt) {
lmv_init_unlock(lmv);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mutex_init(&tgt->ltd_fid_mutex);
@@ -541,7 +534,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
}
lmv_init_unlock(lmv);
- RETURN(rc);
+ return rc;
}
int lmv_check_connect(struct obd_device *obd)
@@ -551,21 +544,20 @@ int lmv_check_connect(struct obd_device *obd)
int i;
int rc;
int easize;
- ENTRY;
if (lmv->connected)
- RETURN(0);
+ return 0;
lmv_init_lock(lmv);
if (lmv->connected) {
lmv_init_unlock(lmv);
- RETURN(0);
+ return 0;
}
if (lmv->desc.ld_tgt_count == 0) {
lmv_init_unlock(lmv);
CERROR("%s: no targets configured.\n", obd->obd_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
@@ -588,7 +580,7 @@ int lmv_check_connect(struct obd_device *obd)
easize = lmv_get_easize(lmv);
lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
lmv_init_unlock(lmv);
- RETURN(0);
+ return 0;
out_disc:
while (i-- > 0) {
@@ -609,7 +601,7 @@ int lmv_check_connect(struct obd_device *obd)
}
class_disconnect(lmv->exp);
lmv_init_unlock(lmv);
- RETURN(rc);
+ return rc;
}
static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
@@ -618,7 +610,6 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
struct lmv_obd *lmv = &obd->u.lmv;
struct obd_device *mdc_obd;
int rc;
- ENTRY;
LASSERT(tgt != NULL);
LASSERT(obd != NULL);
@@ -654,7 +645,7 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
lmv_activate_target(lmv, tgt, 0);
tgt->ltd_exp = NULL;
- RETURN(0);
+ return 0;
}
static int lmv_disconnect(struct obd_export *exp)
@@ -663,7 +654,6 @@ static int lmv_disconnect(struct obd_export *exp)
struct lmv_obd *lmv = &obd->u.lmv;
int rc;
int i;
- ENTRY;
if (!lmv->tgts)
goto out_local;
@@ -683,7 +673,7 @@ static int lmv_disconnect(struct obd_export *exp)
}
if (obd->obd_proc_private)
- lprocfs_remove((proc_dir_entry_t **)&obd->obd_proc_private);
+ lprocfs_remove((struct proc_dir_entry **)&obd->obd_proc_private);
else
CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
obd->obd_type->typ_name, obd->obd_name);
@@ -698,7 +688,7 @@ out_local:
rc = class_disconnect(exp);
if (lmv->refcount == 0)
lmv->connected = 0;
- RETURN(rc);
+ return rc;
}
static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
@@ -714,7 +704,7 @@ static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
gf = (struct getinfo_fid2path *)karg;
tgt = lmv_find_target(lmv, &gf->gf_fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
repeat_fid2path:
rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
@@ -780,9 +770,126 @@ repeat_fid2path:
out_fid2path:
if (remote_gf != NULL)
OBD_FREE(remote_gf, remote_gf_size);
- RETURN(rc);
+ return rc;
+}
+
+static int lmv_hsm_req_count(struct lmv_obd *lmv,
+ const struct hsm_user_request *hur,
+ const struct lmv_tgt_desc *tgt_mds)
+{
+ int i, nr = 0;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* count how many requests must be sent to the given target */
+ for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
+ nr++;
+ }
+ return nr;
+}
+
+static void lmv_hsm_req_build(struct lmv_obd *lmv,
+ struct hsm_user_request *hur_in,
+ const struct lmv_tgt_desc *tgt_mds,
+ struct hsm_user_request *hur_out)
+{
+ int i, nr_out;
+ struct lmv_tgt_desc *curr_tgt;
+
+ /* build the hsm_user_request for the given target */
+ hur_out->hur_request = hur_in->hur_request;
+ nr_out = 0;
+ for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
+ curr_tgt = lmv_find_target(lmv,
+ &hur_in->hur_user_item[i].hui_fid);
+ if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
+ hur_out->hur_user_item[nr_out] =
+ hur_in->hur_user_item[i];
+ nr_out++;
+ }
+ }
+ hur_out->hur_request.hr_itemcount = nr_out;
+ memcpy(hur_data(hur_out), hur_data(hur_in),
+ hur_in->hur_request.hr_data_len);
+}
+
+static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ int i, rc = 0;
+
+ /* unregister request (call from llapi_hsm_copytool_fini) */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ /* best effort: try to clean as much as possible
+ * (continue on error) */
+ obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ }
+
+ /* Whatever the result, remove copytool from kuc groups.
+ * Unreached coordinators will get EPIPE on next requests
+ * and will unregister automatically.
+ */
+ rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
+ return rc;
}
+static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
+ struct lustre_kernelcomm *lk, void *uarg)
+{
+ struct file *filp;
+ int i, j, err;
+ int rc = 0;
+ bool any_set = false;
+
+ /* All or nothing: try to register to all MDS.
+ * In case of failure, unregister from previous MDS,
+ * except if it because of inactive target. */
+ for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ len, lk, uarg);
+ if (err) {
+ if (lmv->tgts[i]->ltd_active) {
+ /* permanent error */
+ CERROR("error: iocontrol MDC %s on MDT"
+ "idx %d cmd %x: err = %d\n",
+ lmv->tgts[i]->ltd_uuid.uuid,
+ i, cmd, err);
+ rc = err;
+ lk->lk_flags |= LK_FLG_STOP;
+ /* unregister from previous MDS */
+ for (j = 0; j < i; j++)
+ obd_iocontrol(cmd,
+ lmv->tgts[j]->ltd_exp,
+ len, lk, uarg);
+ return rc;
+ }
+ /* else: transient error.
+ * kuc will register to the missing MDT
+ * when it is back */
+ } else {
+ any_set = true;
+ }
+ }
+
+ if (!any_set)
+ /* no registration done: return error */
+ return -ENOTCONN;
+
+ /* at least one registration done, with no failure */
+ filp = fget(lk->lk_wfd);
+ if (filp == NULL) {
+ return -EBADF;
+ }
+ rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, lk->lk_data);
+ if (rc != 0 && filp != NULL)
+ fput(filp);
+ return rc;
+}
+
+
+
+
static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int len, void *karg, void *uarg)
{
@@ -792,10 +899,9 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int rc = 0;
int set = 0;
int count = lmv->desc.ld_tgt_count;
- ENTRY;
if (count == 0)
- RETURN(-ENOTTY);
+ return -ENOTTY;
switch (cmd) {
case IOC_OBD_STATFS: {
@@ -806,31 +912,31 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
if ((index >= count))
- RETURN(-ENODEV);
+ return -ENODEV;
if (lmv->tgts[index] == NULL ||
lmv->tgts[index]->ltd_active == 0)
- RETURN(-ENODATA);
+ return -ENODATA;
mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
if (!mdc_obd)
- RETURN(-EINVAL);
+ return -EINVAL;
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
min((int) data->ioc_plen2,
(int) sizeof(struct obd_uuid))))
- RETURN(-EFAULT);
+ return -EFAULT;
rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc)
- RETURN(rc);
+ return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
- RETURN(-EFAULT);
+ return -EFAULT;
break;
}
case OBD_IOC_QUOTACTL: {
@@ -840,11 +946,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (qctl->qc_valid == QC_MDTIDX) {
if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
- RETURN(-EINVAL);
+ return -EINVAL;
tgt = lmv->tgts[qctl->qc_idx];
if (tgt == NULL || tgt->ltd_exp == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
} else if (qctl->qc_valid == QC_UUID) {
for (i = 0; i < count; i++) {
tgt = lmv->tgts[i];
@@ -855,21 +961,21 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
continue;
if (tgt->ltd_exp == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
break;
}
} else {
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (i >= count)
- RETURN(-EAGAIN);
+ return -EAGAIN;
LASSERT(tgt && tgt->ltd_exp);
OBD_ALLOC_PTR(oqctl);
if (!oqctl)
- RETURN(-ENOMEM);
+ return -ENOMEM;
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(tgt->ltd_exp, oqctl);
@@ -886,19 +992,19 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct ioc_changelog *icc = karg;
if (icc->icc_mdtindex >= count)
- RETURN(-ENODEV);
+ return -ENODEV;
if (lmv->tgts[icc->icc_mdtindex] == NULL ||
lmv->tgts[icc->icc_mdtindex]->ltd_exp == NULL ||
lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
- RETURN(-ENODEV);
+ return -ENODEV;
rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
sizeof(*icc), icc, NULL);
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
if (lmv->tgts[0] == NULL)
- RETURN(-ENODATA);
+ return -ENODATA;
rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
break;
}
@@ -908,29 +1014,107 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_STATE_GET:
case LL_IOC_HSM_STATE_SET:
- case LL_IOC_HSM_ACTION:
+ case LL_IOC_HSM_ACTION: {
+ struct md_op_data *op_data = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &op_data->op_fid1);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+
+ if (tgt->ltd_exp == NULL)
+ return -EINVAL;
+
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_PROGRESS: {
+ const struct hsm_progress_kernel *hpk = karg;
+ struct lmv_tgt_desc *tgt;
+
+ tgt = lmv_find_target(lmv, &hpk->hpk_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ break;
+ }
+ case LL_IOC_HSM_REQUEST: {
+ struct hsm_user_request *hur = karg;
+ struct lmv_tgt_desc *tgt;
+ unsigned int reqcount = hur->hur_request.hr_itemcount;
+
+ if (reqcount == 0)
+ return 0;
+
+ /* if the request is about a single fid
+ * or if there is a single MDS, no need to split
+ * the request. */
+ if (reqcount == 1 || count == 1) {
+ tgt = lmv_find_target(lmv,
+ &hur->hur_user_item[0].hui_fid);
+ if (IS_ERR(tgt))
+ return PTR_ERR(tgt);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
+ } else {
+ /* split fid list to their respective MDS */
+ for (i = 0; i < count; i++) {
+ unsigned int nr, reqlen;
+ int rc1;
+ struct hsm_user_request *req;
+
+ nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ if (nr == 0) /* nothing for this MDS */
+ continue;
+
+ /* build a request with fids for this MDS */
+ reqlen = offsetof(typeof(*hur),
+ hur_user_item[nr])
+ + hur->hur_request.hr_data_len;
+ OBD_ALLOC_LARGE(req, reqlen);
+ if (req == NULL)
+ return -ENOMEM;
+
+ lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+
+ rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
+ reqlen, req, uarg);
+ if (rc1 != 0 && rc == 0)
+ rc = rc1;
+ OBD_FREE_LARGE(req, reqlen);
+ }
+ }
+ break;
+ }
case LL_IOC_LOV_SWAP_LAYOUTS: {
struct md_op_data *op_data = karg;
struct lmv_tgt_desc *tgt1, *tgt2;
tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt1))
- RETURN(PTR_ERR(tgt1));
+ return PTR_ERR(tgt1);
tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
if (IS_ERR(tgt2))
- RETURN(PTR_ERR(tgt2));
+ return PTR_ERR(tgt2);
if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
- RETURN(-EINVAL);
+ return -EINVAL;
/* only files on same MDT can have their layouts swapped */
if (tgt1->ltd_idx != tgt2->ltd_idx)
- RETURN(-EPERM);
+ return -EPERM;
rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
break;
}
+ case LL_IOC_HSM_CT_START: {
+ struct lustre_kernelcomm *lk = karg;
+ if (lk->lk_flags & LK_FLG_STOP)
+ rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
+ else
+ rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
+ break;
+ }
default:
for (i = 0; i < count; i++) {
struct obd_device *mdc_obd;
@@ -946,7 +1130,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- RETURN(err);
+ return err;
} else if (err) {
if (lmv->tgts[i]->ltd_active) {
CERROR("error: iocontrol MDC %s on MDT"
@@ -962,7 +1146,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (!set && !rc)
rc = -EIO;
}
- RETURN(rc);
+ return rc;
}
#if 0
@@ -1018,13 +1202,12 @@ static int lmv_placement_policy(struct obd_device *obd,
mdsno_t *mds)
{
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
LASSERT(mds != NULL);
if (lmv->desc.ld_tgt_count == 1) {
*mds = 0;
- RETURN(0);
+ return 0;
}
/**
@@ -1042,17 +1225,17 @@ static int lmv_placement_policy(struct obd_device *obd,
" rc = %d\n", obd->obd_name,
lum->lum_stripe_offset,
lmv->desc.ld_tgt_count, -ERANGE);
- RETURN(-ERANGE);
+ return -ERANGE;
}
*mds = lum->lum_stripe_offset;
- RETURN(0);
+ return 0;
}
}
/* Allocate new fid on target according to operation type and parent
* home mds. */
*mds = op_data->op_mds;
- RETURN(0);
+ return 0;
}
int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
@@ -1060,11 +1243,10 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
{
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
tgt = lmv_get_target(lmv, mds);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
/*
* New seq alloc and FLD setup should be atomic. Otherwise we may find
@@ -1084,7 +1266,6 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
rc = 0;
}
- EXIT;
out:
mutex_unlock(&tgt->ltd_fid_mutex);
return rc;
@@ -1097,7 +1278,6 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
mdsno_t mds = 0;
int rc;
- ENTRY;
LASSERT(op_data != NULL);
LASSERT(fid != NULL);
@@ -1106,16 +1286,16 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
if (rc) {
CERROR("Can't get target for allocating fid, "
"rc %d\n", rc);
- RETURN(rc);
+ return rc;
}
rc = __lmv_fid_alloc(lmv, fid, mds);
if (rc) {
CERROR("Can't alloc new fid, rc %d\n", rc);
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
@@ -1124,23 +1304,22 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct lprocfs_static_vars lvars;
struct lmv_desc *desc;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("LMV setup requires a descriptor\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
CERROR("Lmv descriptor size wrong: %d > %d\n",
(int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- RETURN(-EINVAL);
+ return -EINVAL;
}
OBD_ALLOC(lmv->tgts, sizeof(*lmv->tgts) * 32);
if (lmv->tgts == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lmv->tgts_size = 32;
obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
@@ -1173,7 +1352,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
GOTO(out, rc);
}
- RETURN(0);
+ return 0;
out:
return rc;
@@ -1182,7 +1361,6 @@ out:
static int lmv_cleanup(struct obd_device *obd)
{
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
fld_client_fini(&lmv->lmv_fld);
if (lmv->tgts != NULL) {
@@ -1195,7 +1373,7 @@ static int lmv_cleanup(struct obd_device *obd)
OBD_FREE(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
lmv->tgts_size = 0;
}
- RETURN(0);
+ return 0;
}
static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
@@ -1205,7 +1383,6 @@ static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
int gen;
__u32 index;
int rc;
- ENTRY;
switch (lcfg->lcfg_command) {
case LCFG_ADD_MDC:
@@ -1227,7 +1404,7 @@ static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
GOTO(out, rc = -EINVAL);
}
out:
- RETURN(rc);
+ return rc;
}
static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
@@ -1238,15 +1415,14 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
struct obd_statfs *temp;
int rc = 0;
int i;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
OBD_ALLOC(temp, sizeof(*temp));
if (temp == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
@@ -1279,7 +1455,6 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
}
}
- EXIT;
out_free_temp:
OBD_FREE(temp, sizeof(*temp));
return rc;
@@ -1292,14 +1467,13 @@ static int lmv_getstatus(struct obd_export *exp,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
rc = md_getstatus(lmv->tgts[0]->ltd_exp, fid, pc);
- RETURN(rc);
+ return rc;
}
static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
@@ -1311,20 +1485,19 @@ static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_getxattr(tgt->ltd_exp, fid, oc, valid, name, input,
input_size, output_size, flags, request);
- RETURN(rc);
+ return rc;
}
static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
@@ -1337,21 +1510,20 @@ static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_setxattr(tgt->ltd_exp, fid, oc, valid, name, input,
input_size, output_size, flags, suppgid,
request);
- RETURN(rc);
+ return rc;
}
static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
@@ -1361,24 +1533,23 @@ static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
if (op_data->op_flags & MF_GET_MDT_IDX) {
op_data->op_mds = tgt->ltd_idx;
- RETURN(0);
+ return 0;
}
rc = md_getattr(tgt->ltd_exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
@@ -1387,11 +1558,10 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
struct lmv_obd *lmv = &obd->u.lmv;
int i;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
@@ -1406,7 +1576,7 @@ static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
md_null_inode(lmv->tgts[i]->ltd_exp, fid);
}
- RETURN(0);
+ return 0;
}
static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
@@ -1416,11 +1586,10 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
int i;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
@@ -1434,10 +1603,10 @@ static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
continue;
rc = md_find_cbdata(lmv->tgts[i]->ltd_exp, fid, it, data);
if (rc)
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
@@ -1448,19 +1617,18 @@ static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
rc = md_close(tgt->ltd_exp, op_data, mod, request);
- RETURN(rc);
+ return rc;
}
struct lmv_tgt_desc
@@ -1487,22 +1655,21 @@ int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
if (!lmv->desc.ld_active_tgt_count)
- RETURN(-EIO);
+ return -EIO;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
@@ -1514,10 +1681,10 @@ int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
if (rc == 0) {
if (*request == NULL)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
}
- RETURN(rc);
+ return rc;
}
static int lmv_done_writing(struct obd_export *exp,
@@ -1528,18 +1695,17 @@ static int lmv_done_writing(struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_done_writing(tgt->ltd_exp, op_data, mod);
- RETURN(rc);
+ return rc;
}
static int
@@ -1558,13 +1724,12 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct mdt_body *body;
int rc = 0;
int pmode;
- ENTRY;
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
LASSERT(body != NULL);
if (!(body->valid & OBD_MD_MDS))
- RETURN(0);
+ return 0;
CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
@@ -1596,7 +1761,6 @@ lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
lmm, lmmsize, NULL, extra_lock_flags);
OBD_FREE_PTR(rdata);
- EXIT;
out:
ldlm_lock_decref(&plock, pmode);
return rc;
@@ -1612,18 +1776,17 @@ lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
LL_IT2STR(it), PFID(&op_data->op_fid1));
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
@@ -1635,7 +1798,7 @@ lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
lmm, lmmsize, extra_lock_flags);
}
- RETURN(rc);
+ return rc;
}
static int
@@ -1648,15 +1811,14 @@ lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
struct lmv_tgt_desc *tgt;
struct mdt_body *body;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
@@ -1664,7 +1826,7 @@ lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
rc = md_getattr_name(tgt->ltd_exp, op_data, request);
if (rc != 0)
- RETURN(rc);
+ return rc;
body = req_capsule_server_get(&(*request)->rq_pill,
&RMF_MDT_BODY);
@@ -1678,7 +1840,7 @@ lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
tgt = lmv_find_target(lmv, &rid);
if (IS_ERR(tgt)) {
ptlrpc_req_finished(*request);
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
}
op_data->op_fid1 = rid;
@@ -1690,7 +1852,7 @@ lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
*request = req;
}
- RETURN(rc);
+ return rc;
}
#define md_op_data_fid(op_data, fl) \
@@ -1709,14 +1871,13 @@ static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_tgt_desc *tgt;
ldlm_policy_data_t policy = {{0}};
int rc = 0;
- ENTRY;
if (!fid_is_sane(fid))
- RETURN(0);
+ return 0;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
if (tgt->ltd_idx != op_tgt) {
CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
@@ -1731,7 +1892,7 @@ static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
rc = 0;
}
- RETURN(rc);
+ return rc;
}
/*
@@ -1745,11 +1906,10 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
LASSERT(op_data->op_namelen != 0);
@@ -1757,12 +1917,12 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
PFID(&op_data->op_fid2), op_data->op_namelen,
op_data->op_name, PFID(&op_data->op_fid1));
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
/*
* Cancel UPDATE lock on child (fid1).
@@ -1771,11 +1931,11 @@ static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
if (rc != 0)
- RETURN(rc);
+ return rc;
rc = md_link(tgt->ltd_exp, op_data, request);
- RETURN(rc);
+ return rc;
}
static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
@@ -1787,7 +1947,6 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_tgt_desc *src_tgt;
struct lmv_tgt_desc *tgt_tgt;
int rc;
- ENTRY;
LASSERT(oldlen != 0);
@@ -1797,18 +1956,18 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(src_tgt))
- RETURN(PTR_ERR(src_tgt));
+ return PTR_ERR(src_tgt);
tgt_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
if (IS_ERR(tgt_tgt))
- RETURN(PTR_ERR(tgt_tgt));
+ return PTR_ERR(tgt_tgt);
/*
* LOOKUP lock on src child (fid3) should also be cancelled for
* src_tgt in mdc_rename.
@@ -1843,7 +2002,7 @@ static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
if (rc == 0)
rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
new, newlen, request);
- RETURN(rc);
+ return rc;
}
static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
@@ -1855,11 +2014,10 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc = 0;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n",
PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
@@ -1867,12 +2025,12 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
op_data->op_flags |= MF_MDC_CANCEL_FID1;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
ea2len, request, mod);
- RETURN(rc);
+ return rc;
}
static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -1882,18 +2040,17 @@ static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_sync(tgt->ltd_exp, fid, oc, request);
- RETURN(rc);
+ return rc;
}
/*
@@ -1959,7 +2116,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
__u64 hash_end = dp->ldp_hash_end;
__u32 flags = dp->ldp_flags;
- for (; nlupgs > 1; nlupgs--) {
+ while (--nlupgs > 0) {
ent = lu_dirent_start(dp);
for (end_dirent = ent; ent != NULL;
end_dirent = ent, ent = lu_dirent_next(ent));
@@ -1993,6 +2150,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
kunmap(pages[i]);
}
+ LASSERTF(nlupgs == 0, "left = %d", nlupgs);
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
@@ -2008,22 +2166,21 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
int ncfspgs; /* pages read in PAGE_CACHE_SIZE */
int nlupgs; /* pages read in LU_PAGE_SIZE */
struct lmv_tgt_desc *tgt;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n",
offset, PFID(&op_data->op_fid1));
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
if (rc != 0)
- RETURN(rc);
+ return rc;
ncfspgs = ((*request)->rq_bulk->bd_nob_transferred + PAGE_CACHE_SIZE - 1)
>> PAGE_CACHE_SHIFT;
@@ -2036,7 +2193,7 @@ static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
lmv_adjust_dirpages(pages, ncfspgs, nlupgs);
- RETURN(rc);
+ return rc;
}
static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
@@ -2047,11 +2204,10 @@ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct lmv_tgt_desc *tgt = NULL;
struct mdt_body *body;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
retry:
/* Send unlink requests to the MDT where the child is located */
if (likely(!fid_is_zero(&op_data->op_fid2)))
@@ -2059,10 +2215,10 @@ retry:
else
tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
- op_data->op_fsuid = current_fsuid();
- op_data->op_fsgid = current_fsgid();
+ op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
op_data->op_cap = cfs_curproc_cap_pack();
/*
@@ -2081,22 +2237,22 @@ retry:
MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
if (rc != 0)
- RETURN(rc);
+ return rc;
CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%d\n",
PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
rc = md_unlink(tgt->ltd_exp, op_data, request);
if (rc != 0 && rc != -EREMOTE)
- RETURN(rc);
+ return rc;
body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
if (body == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
/* Not cross-ref case, just get out of here. */
if (likely(!(body->valid & OBD_MD_MDS)))
- RETURN(0);
+ return 0;
CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
exp->exp_obd->obd_name, PFID(&body->fid1));
@@ -2144,7 +2300,7 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
default:
break;
}
- RETURN(rc);
+ return rc;
}
static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
@@ -2154,13 +2310,12 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
struct obd_device *obd;
struct lmv_obd *lmv;
int rc = 0;
- ENTRY;
obd = class_exp2obd(exp);
if (obd == NULL) {
CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
- RETURN(-EINVAL);
+ return -EINVAL;
}
lmv = &obd->u.lmv;
@@ -2170,7 +2325,7 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
LASSERT(*vallen == sizeof(__u32));
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
@@ -2183,13 +2338,13 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
vallen, val, NULL))
- RETURN(0);
+ return 0;
}
- RETURN(-EINVAL);
+ return -EINVAL;
} else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) {
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
/*
* Forwarding this request to first MDS, it should know LOV
@@ -2199,14 +2354,14 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
vallen, val, NULL);
if (!rc && KEY_IS(KEY_CONN_DATA))
exp->exp_connect_data = *(struct obd_connect_data *)val;
- RETURN(rc);
+ return rc;
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = lmv->desc.ld_tgt_count;
- RETURN(0);
+ return 0;
}
CDEBUG(D_IOCTL, "Invalid key\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
@@ -2217,13 +2372,12 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
struct obd_device *obd;
struct lmv_obd *lmv;
int rc = 0;
- ENTRY;
obd = class_exp2obd(exp);
if (obd == NULL) {
CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
- RETURN(-EINVAL);
+ return -EINVAL;
}
lmv = &obd->u.lmv;
@@ -2242,10 +2396,10 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
rc = err;
}
- RETURN(rc);
+ return rc;
}
- RETURN(-EINVAL);
+ return -EINVAL;
}
int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
@@ -2257,33 +2411,32 @@ int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
struct lmv_stripe_md *lsmp;
int mea_size;
int i;
- ENTRY;
mea_size = lmv_get_easize(lmv);
if (!lmmp)
- RETURN(mea_size);
+ return mea_size;
if (*lmmp && !lsm) {
OBD_FREE_LARGE(*lmmp, mea_size);
*lmmp = NULL;
- RETURN(0);
+ return 0;
}
if (*lmmp == NULL) {
OBD_ALLOC_LARGE(*lmmp, mea_size);
if (*lmmp == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
if (!lsm)
- RETURN(mea_size);
+ return mea_size;
lsmp = (struct lmv_stripe_md *)lsm;
meap = (struct lmv_stripe_md *)*lmmp;
if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
- RETURN(-EINVAL);
+ return -EINVAL;
meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
meap->mea_count = cpu_to_le32(lsmp->mea_count);
@@ -2294,7 +2447,7 @@ int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
fid_cpu_to_le(&meap->mea_ids[i], &lsmp->mea_ids[i]);
}
- RETURN(mea_size);
+ return mea_size;
}
int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
@@ -2307,7 +2460,6 @@ int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
int mea_size;
int i;
__u32 magic;
- ENTRY;
mea_size = lmv_get_easize(lmv);
if (lsmp == NULL)
@@ -2316,17 +2468,17 @@ int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
if (*lsmp != NULL && lmm == NULL) {
OBD_FREE_LARGE(*tmea, mea_size);
*lsmp = NULL;
- RETURN(0);
+ return 0;
}
LASSERT(mea_size == lmm_size);
OBD_ALLOC_LARGE(*tmea, mea_size);
if (*tmea == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (!lmm)
- RETURN(mea_size);
+ return mea_size;
if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
@@ -2349,7 +2501,7 @@ int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
(*tmea)->mea_ids[i] = mea->mea_ids[i];
fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
}
- RETURN(mea_size);
+ return mea_size;
}
static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
@@ -2361,7 +2513,6 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
int rc = 0;
int err;
int i;
- ENTRY;
LASSERT(fid != NULL);
@@ -2375,7 +2526,7 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
if (!rc)
rc = err;
}
- RETURN(rc);
+ return rc;
}
int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
@@ -2383,10 +2534,9 @@ int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
int rc;
- ENTRY;
rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
- RETURN(rc);
+ return rc;
}
ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
@@ -2398,7 +2548,6 @@ ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
struct lmv_obd *lmv = &obd->u.lmv;
ldlm_mode_t rc;
int i;
- ENTRY;
CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
@@ -2417,10 +2566,10 @@ ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
type, policy, mode, lockh);
if (rc)
- RETURN(rc);
+ return rc;
}
- RETURN(0);
+ return 0;
}
int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
@@ -2436,11 +2585,10 @@ int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
- ENTRY;
if (md->mea)
obd_free_memmd(exp, (void *)&md->mea);
- RETURN(md_free_lustre_md(lmv->tgts[0]->ltd_exp, md));
+ return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
}
int lmv_set_open_replay_data(struct obd_export *exp,
@@ -2450,13 +2598,12 @@ int lmv_set_open_replay_data(struct obd_export *exp,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- ENTRY;
tgt = lmv_find_target(lmv, &och->och_fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
- RETURN(md_set_open_replay_data(tgt->ltd_exp, och, open_req));
+ return md_set_open_replay_data(tgt->ltd_exp, och, open_req);
}
int lmv_clear_open_replay_data(struct obd_export *exp,
@@ -2465,13 +2612,12 @@ int lmv_clear_open_replay_data(struct obd_export *exp,
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
- ENTRY;
tgt = lmv_find_target(lmv, &och->och_fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
- RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
+ return md_clear_open_replay_data(tgt->ltd_exp, och);
}
static int lmv_get_remote_perm(struct obd_export *exp,
@@ -2483,18 +2629,17 @@ static int lmv_get_remote_perm(struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_get_remote_perm(tgt->ltd_exp, fid, oc, suppgid, request);
- RETURN(rc);
+ return rc;
}
static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
@@ -2504,18 +2649,17 @@ static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, &oc->c_capa.lc_fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_renew_capa(tgt->ltd_exp, oc, cb);
- RETURN(rc);
+ return rc;
}
int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
@@ -2535,18 +2679,17 @@ int lmv_intent_getattr_async(struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt = NULL;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
- RETURN(rc);
+ return rc;
}
int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -2556,18 +2699,17 @@ int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int rc;
- ENTRY;
rc = lmv_check_connect(obd);
if (rc)
- RETURN(rc);
+ return rc;
tgt = lmv_find_target(lmv, fid);
if (IS_ERR(tgt))
- RETURN(PTR_ERR(tgt));
+ return PTR_ERR(tgt);
rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
- RETURN(rc);
+ return rc;
}
/**
@@ -2583,16 +2725,15 @@ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc = 0, i;
__u64 curspace, curinodes;
- ENTRY;
if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
CERROR("master lmv inactive\n");
- RETURN(-EIO);
+ return -EIO;
}
if (oqctl->qc_cmd != Q_GETOQUOTA) {
rc = obd_quotactl(tgt->ltd_exp, oqctl);
- RETURN(rc);
+ return rc;
}
curspace = curinodes = 0;
@@ -2620,7 +2761,7 @@ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
oqctl->qc_dqblk.dqb_curspace = curspace;
oqctl->qc_dqblk.dqb_curinodes = curinodes;
- RETURN(rc);
+ return rc;
}
int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
@@ -2630,14 +2771,13 @@ int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct lmv_obd *lmv = &obd->u.lmv;
struct lmv_tgt_desc *tgt;
int i, rc = 0;
- ENTRY;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
int err;
tgt = lmv->tgts[i];
if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
CERROR("lmv idx %d inactive\n", i);
- RETURN(-EIO);
+ return -EIO;
}
err = obd_quotacheck(tgt->ltd_exp, oqctl);
@@ -2645,7 +2785,7 @@ int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
rc = err;
}
- RETURN(rc);
+ return rc;
}
struct obd_ops lmv_obd_ops = {
diff --git a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
index d1c45b583cb..edb5a3a99d5 100644
--- a/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
+++ b/drivers/staging/lustre/lustre/lmv/lproc_lmv.c
@@ -36,7 +36,6 @@
#define DEBUG_SUBSYSTEM S_CLASS
-#include <linux/version.h>
#include <linux/seq_file.h>
#include <asm/statfs.h>
#include <lprocfs_status.h>
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 28801b8b5fd..33d9ce68fed 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -162,10 +162,9 @@ struct lov_device {
* Layout type.
*/
enum lov_layout_type {
- /** empty file without body */
- LLT_EMPTY,
- /** striped file */
- LLT_RAID0,
+ LLT_EMPTY, /** empty file without body (mknod + truncate) */
+ LLT_RAID0, /** striped file */
+ LLT_RELEASED, /** file with no objects (data in HSM) */
LLT_NR
};
@@ -255,12 +254,14 @@ struct lov_object {
} raid0;
struct lov_layout_state_empty {
} empty;
+ struct lov_layout_state_released {
+ } released;
} u;
/**
* Thread that acquired lov_object::lo_type_guard in an exclusive
* mode.
*/
- task_t *lo_owner;
+ struct task_struct *lo_owner;
};
/**
@@ -582,6 +583,8 @@ int lov_io_init_raid0 (const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
int lov_io_init_empty (const struct lu_env *env, struct cl_object *obj,
struct cl_io *io);
+int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
void lov_lock_unlink (const struct lu_env *env, struct lov_lock_link *link,
struct lovsub_lock *sub);
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index f94f8d9d33d..a4006ef46ad 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -122,10 +122,8 @@ static void lov_req_completion(const struct lu_env *env,
{
struct lov_req *lr;
- ENTRY;
lr = cl2lov_req(slice);
OBD_SLAB_FREE_PTR(lr, lov_req_kmem);
- EXIT;
}
static const struct cl_req_operations lov_req_ops = {
@@ -200,7 +198,7 @@ static struct lu_device *lov_device_fini(const struct lu_env *env,
LASSERT(ld->ld_lov != NULL);
if (ld->ld_target == NULL)
- RETURN(NULL);
+ return NULL;
lov_foreach_target(ld, i) {
struct lovsub_device *lsd;
@@ -211,7 +209,7 @@ static struct lu_device *lov_device_fini(const struct lu_env *env,
ld->ld_target[i] = NULL;
}
}
- RETURN(NULL);
+ return NULL;
}
static int lov_device_init(const struct lu_env *env, struct lu_device *d,
@@ -223,7 +221,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
LASSERT(d->ld_site != NULL);
if (ld->ld_target == NULL)
- RETURN(rc);
+ return rc;
lov_foreach_target(ld, i) {
struct lovsub_device *lsd;
@@ -251,7 +249,7 @@ static int lov_device_init(const struct lu_env *env, struct lu_device *d,
else
ld->ld_flags |= LOV_DEV_INITIALIZED;
- RETURN(rc);
+ return rc;
}
static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
@@ -260,14 +258,13 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
struct lov_req *lr;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lr, lov_req_kmem, __GFP_IO);
if (lr != NULL) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
static const struct cl_device_operations lov_cl_ops = {
@@ -311,13 +308,11 @@ static void lov_cl_del_target(const struct lu_env *env, struct lu_device *dev,
__u32 index)
{
struct lov_device *ld = lu2lov_dev(dev);
- ENTRY;
if (ld->ld_target[index] != NULL) {
cl_stack_fini(env, lovsub2cl_dev(ld->ld_target[index]));
ld->ld_target[index] = NULL;
}
- EXIT;
}
static struct lov_device_emerg **lov_emerg_alloc(int nr)
@@ -360,7 +355,6 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
__u32 tgt_size;
__u32 sub_size;
- ENTRY;
result = 0;
tgt_size = dev->ld_lov->lov_tgt_size;
sub_size = dev->ld_target_nr;
@@ -371,7 +365,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
emerg = lov_emerg_alloc(tgt_size);
if (IS_ERR(emerg))
- RETURN(PTR_ERR(emerg));
+ return PTR_ERR(emerg);
OBD_ALLOC(newd, tgt_size * sz);
if (newd != NULL) {
@@ -392,7 +386,7 @@ static int lov_expand_targets(const struct lu_env *env, struct lov_device *dev)
result = -ENOMEM;
}
}
- RETURN(result);
+ return result;
}
static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
@@ -404,7 +398,6 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
struct lovsub_device *lsd;
struct cl_device *cl;
int rc;
- ENTRY;
obd_getref(obd);
@@ -414,7 +407,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
if (!tgt->ltd_obd->obd_set_up) {
CERROR("Target %s not set up\n", obd_uuid2str(&tgt->ltd_uuid));
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = lov_expand_targets(env, ld);
@@ -436,7 +429,7 @@ static int lov_cl_add_target(const struct lu_env *env, struct lu_device *dev,
}
}
obd_putref(obd);
- RETURN(rc);
+ return rc;
}
static int lov_process_config(const struct lu_env *env,
@@ -466,7 +459,7 @@ static int lov_process_config(const struct lu_env *env,
}
}
obd_putref(obd);
- RETURN(rc);
+ return rc;
}
static const struct lu_device_operations lov_lu_ops = {
@@ -485,7 +478,7 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
OBD_ALLOC_PTR(ld);
if (ld == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
cl_device_init(&ld->ld_cl, t);
d = lov2lu_dev(ld);
@@ -501,11 +494,11 @@ static struct lu_device *lov_device_alloc(const struct lu_env *env,
rc = lov_setup(obd, cfg);
if (rc) {
lov_device_free(env, d);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
ld->ld_lov = &obd->u.lov;
- RETURN(d);
+ return d;
}
static const struct lu_device_type_operations lov_device_type_ops = {
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index 340dbcf829e..e6c60151dc6 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -57,7 +57,7 @@ struct lovea_unpack_args {
static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
__u16 stripe_count)
{
- if (stripe_count == 0 || stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
+ if (stripe_count > LOV_V1_INSANE_STRIPE_COUNT) {
CERROR("bad stripe count %d\n", stripe_count);
lov_dump_lmm_common(D_WARNING, lmm);
return -EINVAL;
@@ -69,7 +69,7 @@ static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
return -EINVAL;
}
- if (lmm->lmm_pattern != cpu_to_le32(LOV_PATTERN_RAID0)) {
+ if (lov_pattern(le32_to_cpu(lmm->lmm_pattern)) != LOV_PATTERN_RAID0) {
CERROR("bad striping pattern\n");
lov_dump_lmm_common(D_WARNING, lmm);
return -EINVAL;
@@ -197,6 +197,8 @@ static int lsm_lmm_verify_v1(struct lov_mds_md_v1 *lmm, int lmm_bytes,
}
*stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ *stripe_count = 0;
if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V1)) {
CERROR("LOV EA V1 too small: %d, need %d\n",
@@ -213,11 +215,14 @@ int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
{
struct lov_oinfo *loi;
int i;
+ int stripe_count;
__u64 stripe_maxbytes = OBD_OBJECT_EOF;
lsm_unpackmd_common(lsm, lmm);
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
+ for (i = 0; i < stripe_count; i++) {
/* XXX LOV STACKING call down to osc_unpackmd() */
loi = lsm->lsm_oinfo[i];
ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
@@ -240,6 +245,8 @@ int lsm_unpackmd_v1(struct lov_obd *lov, struct lov_stripe_md *lsm,
}
lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+ if (lsm->lsm_stripe_count == 0)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
return 0;
}
@@ -267,6 +274,8 @@ static int lsm_lmm_verify_v3(struct lov_mds_md *lmmv1, int lmm_bytes,
}
*stripe_count = le16_to_cpu(lmm->lmm_stripe_count);
+ if (le32_to_cpu(lmm->lmm_pattern) & LOV_PATTERN_F_RELEASED)
+ *stripe_count = 0;
if (lmm_bytes < lov_mds_md_size(*stripe_count, LOV_MAGIC_V3)) {
CERROR("LOV EA V3 too small: %d, need %d\n",
@@ -285,18 +294,22 @@ int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
struct lov_mds_md_v3 *lmm;
struct lov_oinfo *loi;
int i;
+ int stripe_count;
__u64 stripe_maxbytes = OBD_OBJECT_EOF;
int cplen = 0;
lmm = (struct lov_mds_md_v3 *)lmmv1;
lsm_unpackmd_common(lsm, (struct lov_mds_md_v1 *)lmm);
+
+ stripe_count = lsm_is_released(lsm) ? 0 : lsm->lsm_stripe_count;
+
cplen = strlcpy(lsm->lsm_pool_name, lmm->lmm_pool_name,
sizeof(lsm->lsm_pool_name));
if (cplen >= sizeof(lsm->lsm_pool_name))
return -E2BIG;
- for (i = 0; i < lsm->lsm_stripe_count; i++) {
+ for (i = 0; i < stripe_count; i++) {
/* XXX LOV STACKING call down to osc_unpackmd() */
loi = lsm->lsm_oinfo[i];
ostid_le_to_cpu(&lmm->lmm_objects[i].l_ost_oi, &loi->loi_oi);
@@ -319,6 +332,8 @@ int lsm_unpackmd_v3(struct lov_obd *lov, struct lov_stripe_md *lsm,
}
lsm->lsm_maxbytes = stripe_maxbytes * lsm->lsm_stripe_count;
+ if (lsm->lsm_stripe_count == 0)
+ lsm->lsm_maxbytes = stripe_maxbytes * lov->desc.ld_tgt_count;
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 1a87abdf095..b611aa4e9dc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -59,7 +59,6 @@ static inline void lov_sub_exit(struct lov_io_sub *sub)
static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
struct lov_io_sub *sub)
{
- ENTRY;
if (sub->sub_io != NULL) {
if (sub->sub_io_initialized) {
lov_sub_enter(sub);
@@ -79,7 +78,6 @@ static void lov_io_sub_fini(const struct lu_env *env, struct lov_io *lio,
cl_env_put(sub->sub_env, &sub->sub_refcheck);
sub->sub_env = NULL;
}
- EXIT;
}
static void lov_io_sub_inherit(struct cl_io *io, struct lov_io *lio,
@@ -149,7 +147,6 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
LASSERT(sub->sub_io == NULL);
LASSERT(sub->sub_env == NULL);
LASSERT(sub->sub_stripe < lio->lis_stripe_count);
- ENTRY;
result = 0;
sub->sub_io_initialized = 0;
@@ -210,7 +207,7 @@ static int lov_io_sub_init(const struct lu_env *env, struct lov_io *lio,
}
if (result != 0)
lov_io_sub_fini(env, lio, sub);
- RETURN(result);
+ return result;
}
struct lov_io_sub *lov_sub_get(const struct lu_env *env,
@@ -220,7 +217,6 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
struct lov_io_sub *sub = &lio->lis_subs[stripe];
LASSERT(stripe < lio->lis_stripe_count);
- ENTRY;
if (!sub->sub_io_initialized) {
sub->sub_stripe = stripe;
@@ -231,7 +227,7 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
lov_sub_enter(sub);
else
sub = ERR_PTR(rc);
- RETURN(sub);
+ return sub;
}
void lov_sub_put(struct lov_io_sub *sub)
@@ -249,12 +245,11 @@ static int lov_page_stripe(const struct cl_page *page)
{
struct lovsub_object *subobj;
- ENTRY;
subobj = lu2lovsub(
lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
&lovsub_device_type));
LASSERT(subobj != NULL);
- RETURN(subobj->lso_index);
+ return subobj->lso_index;
}
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
@@ -268,10 +263,9 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
LASSERT(cl2lov(slice->cpl_obj) == lio->lis_object);
LASSERT(lsm != NULL);
LASSERT(lio->lis_nr_subios > 0);
- ENTRY;
stripe = lov_page_stripe(page);
- RETURN(lov_sub_get(env, lio, stripe));
+ return lov_sub_get(env, lio, stripe);
}
@@ -282,7 +276,6 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
int result;
LASSERT(lio->lis_object != NULL);
- ENTRY;
/*
* Need to be optimized, we can't afford to allocate a piece of memory
@@ -297,14 +290,12 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
result = 0;
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
static void lov_io_slice_init(struct lov_io *lio,
struct lov_object *obj, struct cl_io *io)
{
- ENTRY;
-
io->ci_result = 0;
lio->lis_object = obj;
@@ -353,8 +344,6 @@ static void lov_io_slice_init(struct lov_io *lio,
default:
LBUG();
}
-
- EXIT;
}
static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
@@ -363,7 +352,6 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
struct lov_object *lov = cl2lov(ios->cis_obj);
int i;
- ENTRY;
if (lio->lis_subs != NULL) {
for (i = 0; i < lio->lis_nr_subios; i++)
lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
@@ -375,7 +363,6 @@ static void lov_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
LASSERT(atomic_read(&lov->lo_active_ios) > 0);
if (atomic_dec_and_test(&lov->lo_active_ios))
wake_up_all(&lov->lo_waitq);
- EXIT;
}
static obd_off lov_offset_mod(obd_off val, int delta)
@@ -397,7 +384,6 @@ static int lov_io_iter_init(const struct lu_env *env,
int stripe;
int rc = 0;
- ENTRY;
endpos = lov_offset_mod(lio->lis_endpos, -1);
for (stripe = 0; stripe < lio->lis_stripe_count; stripe++) {
if (!lov_stripe_intersects(lsm, stripe, lio->lis_pos,
@@ -421,7 +407,7 @@ static int lov_io_iter_init(const struct lu_env *env,
else
break;
}
- RETURN(rc);
+ return rc;
}
static int lov_io_rw_iter_init(const struct lu_env *env,
@@ -430,12 +416,11 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
struct lov_io *lio = cl2lov_io(env, ios);
struct cl_io *io = ios->cis_io;
struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
- loff_t start = io->u.ci_rw.crw_pos;
+ __u64 start = io->u.ci_rw.crw_pos;
loff_t next;
unsigned long ssize = lsm->lsm_stripe_size;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ENTRY;
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
@@ -458,7 +443,7 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
* XXX The following call should be optimized: we know, that
* [lio->lis_pos, lio->lis_endpos) intersects with exactly one stripe.
*/
- RETURN(lov_io_iter_init(env, ios));
+ return lov_io_iter_init(env, ios);
}
static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
@@ -468,7 +453,6 @@ static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
struct lov_io_sub *sub;
int rc = 0;
- ENTRY;
list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
lov_sub_enter(sub);
rc = iofunc(sub->sub_env, sub->sub_io);
@@ -479,24 +463,21 @@ static int lov_io_call(const struct lu_env *env, struct lov_io *lio,
if (parent->ci_result == 0)
parent->ci_result = sub->sub_io->ci_result;
}
- RETURN(rc);
+ return rc;
}
static int lov_io_lock(const struct lu_env *env, const struct cl_io_slice *ios)
{
- ENTRY;
- RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_lock));
+ return lov_io_call(env, cl2lov_io(env, ios), cl_io_lock);
}
static int lov_io_start(const struct lu_env *env, const struct cl_io_slice *ios)
{
- ENTRY;
- RETURN(lov_io_call(env, cl2lov_io(env, ios), cl_io_start));
+ return lov_io_call(env, cl2lov_io(env, ios), cl_io_start);
}
static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
{
- ENTRY;
/*
* It's possible that lov_io_start() wasn't called against this
* sub-io, either because previous sub-io failed, or upper layer
@@ -506,19 +487,19 @@ static int lov_io_end_wrapper(const struct lu_env *env, struct cl_io *io)
cl_io_end(env, io);
else
io->ci_state = CIS_IO_FINISHED;
- RETURN(0);
+ return 0;
}
static int lov_io_iter_fini_wrapper(const struct lu_env *env, struct cl_io *io)
{
cl_io_iter_fini(env, io);
- RETURN(0);
+ return 0;
}
static int lov_io_unlock_wrapper(const struct lu_env *env, struct cl_io *io)
{
cl_io_unlock(env, io);
- RETURN(0);
+ return 0;
}
static void lov_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
@@ -535,12 +516,10 @@ static void lov_io_iter_fini(const struct lu_env *env,
struct lov_io *lio = cl2lov_io(env, ios);
int rc;
- ENTRY;
rc = lov_io_call(env, lio, lov_io_iter_fini_wrapper);
LASSERT(rc == 0);
while (!list_empty(&lio->lis_active))
list_del_init(lio->lis_active.next);
- EXIT;
}
static void lov_io_unlock(const struct lu_env *env,
@@ -548,10 +527,8 @@ static void lov_io_unlock(const struct lu_env *env,
{
int rc;
- ENTRY;
rc = lov_io_call(env, cl2lov_io(env, ios), lov_io_unlock_wrapper);
LASSERT(rc == 0);
- EXIT;
}
@@ -596,7 +573,7 @@ static int lov_io_submit(const struct lu_env *env,
int rc = 0;
int alloc =
!(current->flags & PF_MEMALLOC);
- ENTRY;
+
if (lio->lis_active_subios == 1) {
int idx = lio->lis_single_subio_index;
struct lov_io_sub *sub;
@@ -608,7 +585,7 @@ static int lov_io_submit(const struct lu_env *env,
rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
crt, queue);
lov_sub_put(sub);
- RETURN(rc);
+ return rc;
}
LASSERT(lio->lis_subs != NULL);
@@ -616,7 +593,7 @@ static int lov_io_submit(const struct lu_env *env,
OBD_ALLOC_LARGE(stripes_qin,
sizeof(*stripes_qin) * lio->lis_nr_subios);
if (stripes_qin == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
cl_page_list_init(&stripes_qin[stripe]);
@@ -682,7 +659,7 @@ static int lov_io_submit(const struct lu_env *env,
mutex_unlock(&ld->ld_mutex);
}
- RETURN(rc);
+ return rc;
#undef QIN
}
@@ -696,7 +673,6 @@ static int lov_io_prepare_write(const struct lu_env *env,
struct lov_io_sub *sub;
int result;
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
@@ -704,7 +680,7 @@ static int lov_io_prepare_write(const struct lu_env *env,
lov_sub_put(sub);
} else
result = PTR_ERR(sub);
- RETURN(result);
+ return result;
}
static int lov_io_commit_write(const struct lu_env *env,
@@ -717,7 +693,6 @@ static int lov_io_commit_write(const struct lu_env *env,
struct lov_io_sub *sub;
int result;
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
result = cl_io_commit_write(sub->sub_env, sub->sub_io,
@@ -725,7 +700,7 @@ static int lov_io_commit_write(const struct lu_env *env,
lov_sub_put(sub);
} else
result = PTR_ERR(sub);
- RETURN(result);
+ return result;
}
static int lov_io_fault_start(const struct lu_env *env,
@@ -735,13 +710,12 @@ static int lov_io_fault_start(const struct lu_env *env,
struct lov_io *lio;
struct lov_io_sub *sub;
- ENTRY;
fio = &ios->cis_io->u.ci_fault;
lio = cl2lov_io(env, ios);
sub = lov_sub_get(env, lio, lov_page_stripe(fio->ft_page));
sub->sub_io->u.ci_fault.ft_nob = fio->ft_nob;
lov_sub_put(sub);
- RETURN(lov_io_start(env, ios));
+ return lov_io_start(env, ios);
}
static void lov_io_fsync_end(const struct lu_env *env,
@@ -750,7 +724,6 @@ static void lov_io_fsync_end(const struct lu_env *env,
struct lov_io *lio = cl2lov_io(env, ios);
struct lov_io_sub *sub;
unsigned int *written = &ios->cis_io->u.ci_fsync.fi_nr_written;
- ENTRY;
*written = 0;
list_for_each_entry(sub, &lio->lis_active, sub_linkage) {
@@ -763,7 +736,6 @@ static void lov_io_fsync_end(const struct lu_env *env,
if (subio->ci_result == 0)
*written += subio->u.ci_fsync.fi_nr_written;
}
- RETURN_EXIT;
}
static const struct cl_io_operations lov_io_ops = {
@@ -839,11 +811,9 @@ static void lov_empty_io_fini(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct lov_object *lov = cl2lov(ios->cis_obj);
- ENTRY;
if (atomic_dec_and_test(&lov->lo_active_ios))
wake_up_all(&lov->lo_waitq);
- EXIT;
}
static void lov_empty_impossible(const struct lu_env *env,
@@ -913,7 +883,6 @@ int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct lov_io *lio = lov_env_io(env);
struct lov_object *lov = cl2lov(obj);
- ENTRY;
INIT_LIST_HEAD(&lio->lis_active);
lov_io_slice_init(lio, lov, io);
if (io->ci_result == 0) {
@@ -923,7 +892,7 @@ int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
atomic_inc(&lov->lo_active_ios);
}
}
- RETURN(io->ci_result);
+ return io->ci_result;
}
int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
@@ -932,7 +901,6 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
struct lov_object *lov = cl2lov(obj);
struct lov_io *lio = lov_env_io(env);
int result;
- ENTRY;
lio->lis_object = lov;
switch (io->ci_type) {
@@ -961,7 +929,40 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- RETURN(result != 0);
+ return result != 0;
}
+int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io)
+{
+ struct lov_object *lov = cl2lov(obj);
+ struct lov_io *lio = lov_env_io(env);
+ int result;
+
+ LASSERT(lov->lo_lsm != NULL);
+ lio->lis_object = lov;
+
+ switch (io->ci_type) {
+ default:
+ LASSERTF(0, "invalid type %d\n", io->ci_type);
+ case CIT_MISC:
+ case CIT_FSYNC:
+ result = +1;
+ break;
+ case CIT_SETATTR:
+ case CIT_READ:
+ case CIT_WRITE:
+ case CIT_FAULT:
+ /* TODO: need to restore the file. */
+ result = -EBADF;
+ break;
+ }
+ if (result == 0) {
+ cl_io_slice_add(io, &lio->lis_cl, obj, &lov_empty_io_ops);
+ atomic_inc(&lov->lo_active_ios);
+ }
+
+ io->ci_result = result < 0 ? result : 0;
+ return result != 0;
+}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index bdf3334e0c9..ec297e87c2a 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -110,7 +110,6 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
LASSERT(cl_lock_is_mutexed(parent));
LASSERT(cl_lock_is_mutexed(sublock));
- ENTRY;
lsl = cl2sub_lock(sublock);
/*
@@ -132,7 +131,6 @@ static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
LASSERT(rc == 0); /* there is no way this can fail, currently */
- EXIT;
}
static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
@@ -145,7 +143,6 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
struct lov_lock_link *link;
LASSERT(idx < lck->lls_nr);
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
if (link != NULL) {
@@ -179,7 +176,7 @@ static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
} else
sublock = ERR_PTR(-ENOMEM);
- RETURN(sublock);
+ return sublock;
}
static void lov_sublock_unlock(const struct lu_env *env,
@@ -187,11 +184,9 @@ static void lov_sublock_unlock(const struct lu_env *env,
struct cl_lock_closure *closure,
struct lov_sublock_env *subenv)
{
- ENTRY;
lov_sublock_env_put(subenv);
lsl->lss_active = NULL;
cl_lock_disclosure(env, closure);
- EXIT;
}
static int lov_sublock_lock(const struct lu_env *env,
@@ -203,7 +198,6 @@ static int lov_sublock_lock(const struct lu_env *env,
struct lovsub_lock *sublock;
struct cl_lock *child;
int result = 0;
- ENTRY;
LASSERT(list_empty(&closure->clc_list));
@@ -243,7 +237,7 @@ static int lov_sublock_lock(const struct lu_env *env,
}
}
}
- RETURN(result);
+ return result;
}
/**
@@ -267,8 +261,6 @@ static int lov_subresult(int result, int rc)
int result_rank;
int rc_rank;
- ENTRY;
-
LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
"result = %d", result);
LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
@@ -281,7 +273,7 @@ static int lov_subresult(int result, int rc)
if (result_rank < rc_rank)
result = rc;
- RETURN(result);
+ return result;
}
/**
@@ -307,8 +299,6 @@ static int lov_lock_sub_init(const struct lu_env *env,
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct cl_lock *parent = lck->lls_cl.cls_lock;
- ENTRY;
-
lck->lls_orig = parent->cll_descr;
file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
@@ -325,7 +315,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
LASSERT(nr > 0);
OBD_ALLOC_LARGE(lck->lls_sub, nr * sizeof lck->lls_sub[0]);
if (lck->lls_sub == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lck->lls_nr = nr;
/*
@@ -396,7 +386,7 @@ static int lov_lock_sub_init(const struct lu_env *env,
* because enqueue will create them anyway. Main duty of this function
* is to fill in sub-lock descriptions in a race free manner.
*/
- RETURN(result);
+ return result;
}
static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
@@ -405,7 +395,6 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
struct cl_lock *parent = lck->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(parent));
- ENTRY;
if (lck->lls_sub[i].sub_flags & LSF_HELD) {
struct cl_lock *sublock;
@@ -442,7 +431,7 @@ static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
* sub-lock is destroyed.
*/
}
- RETURN(rc);
+ return rc;
}
static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
@@ -451,7 +440,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
struct cl_lock *parent = lck->lls_cl.cls_lock;
LASSERT(cl_lock_is_mutexed(parent));
- ENTRY;
if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
struct cl_lock *sublock;
@@ -468,7 +456,6 @@ static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
cl_lock_user_add(env, sublock);
cl_lock_put(env, sublock);
}
- EXIT;
}
static void lov_lock_fini(const struct lu_env *env,
@@ -477,7 +464,6 @@ static void lov_lock_fini(const struct lu_env *env,
struct lov_lock *lck;
int i;
- ENTRY;
lck = cl2lov_lock(slice);
LASSERT(lck->lls_nr_filled == 0);
if (lck->lls_sub != NULL) {
@@ -491,7 +477,6 @@ static void lov_lock_fini(const struct lu_env *env,
lck->lls_nr * sizeof lck->lls_sub[0]);
}
OBD_SLAB_FREE_PTR(lck, lov_lock_kmem);
- EXIT;
}
static int lov_lock_enqueue_wait(const struct lu_env *env,
@@ -500,14 +485,13 @@ static int lov_lock_enqueue_wait(const struct lu_env *env,
{
struct cl_lock *lock = lck->lls_cl.cls_lock;
int result;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
cl_lock_mutex_put(env, lock);
result = cl_lock_enqueue_wait(env, sublock, 0);
cl_lock_mutex_get(env, lock);
- RETURN(result ?: CLO_REPEAT);
+ return result ?: CLO_REPEAT;
}
/**
@@ -522,7 +506,6 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
struct cl_io *io, __u32 enqflags, int last)
{
int result;
- ENTRY;
/* first, try to enqueue a sub-lock ... */
result = cl_enqueue_try(env, sublock, io, enqflags);
@@ -541,7 +524,7 @@ static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
(enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
result = 0;
- RETURN(result);
+ return result;
}
/**
@@ -600,8 +583,6 @@ static int lov_lock_enqueue(const struct lu_env *env,
int result;
enum cl_lock_state minstate;
- ENTRY;
-
for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
@@ -680,7 +661,7 @@ static int lov_lock_enqueue(const struct lu_env *env,
break;
}
cl_lock_closure_fini(closure);
- RETURN(result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT);
+ return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
}
static int lov_lock_unuse(const struct lu_env *env,
@@ -691,8 +672,6 @@ static int lov_lock_unuse(const struct lu_env *env,
int i;
int result;
- ENTRY;
-
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
@@ -728,7 +707,7 @@ static int lov_lock_unuse(const struct lu_env *env,
result = -ESTALE;
}
cl_lock_closure_fini(closure);
- RETURN(result);
+ return result;
}
@@ -740,8 +719,6 @@ static void lov_lock_cancel(const struct lu_env *env,
int i;
int result;
- ENTRY;
-
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
struct lovsub_lock *sub;
@@ -802,8 +779,6 @@ static int lov_lock_wait(const struct lu_env *env,
int result;
int i;
- ENTRY;
-
again:
for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
i < lck->lls_nr; ++i) {
@@ -839,7 +814,7 @@ again:
if (result == 0 && reenqueued != 0)
goto again;
cl_lock_closure_fini(closure);
- RETURN(result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT);
+ return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
}
static int lov_lock_use(const struct lu_env *env,
@@ -851,7 +826,6 @@ static int lov_lock_use(const struct lu_env *env,
int i;
LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- ENTRY;
for (result = 0, i = 0; i < lck->lls_nr; ++i) {
int rc;
@@ -908,7 +882,7 @@ static int lov_lock_use(const struct lu_env *env,
result = -ESTALE;
}
cl_lock_closure_fini(closure);
- RETURN(result);
+ return result;
}
#if 0
@@ -1016,8 +990,6 @@ static int lov_lock_fits_into(const struct lu_env *env,
LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
LASSERT(lov->lls_nr > 0);
- ENTRY;
-
/* for top lock, it's necessary to match enq flags otherwise it will
* run into problem if a sublock is missing and reenqueue. */
if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
@@ -1055,7 +1027,7 @@ static int lov_lock_fits_into(const struct lu_env *env,
PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
result);
- RETURN(result);
+ return result;
}
void lov_lock_unlink(const struct lu_env *env,
@@ -1066,7 +1038,6 @@ void lov_lock_unlink(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(parent));
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
- ENTRY;
list_del_init(&link->lll_list);
LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
@@ -1077,7 +1048,6 @@ void lov_lock_unlink(const struct lu_env *env,
lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
cl_lock_put(env, parent);
OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- EXIT;
}
struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
@@ -1087,13 +1057,12 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lov_lock_link *scan;
LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
- ENTRY;
list_for_each_entry(scan, &sub->lss_parents, lll_list) {
if (scan->lll_super == lck)
- RETURN(scan);
+ return scan;
}
- RETURN(NULL);
+ return NULL;
}
/**
@@ -1120,7 +1089,6 @@ static void lov_lock_delete(const struct lu_env *env,
int i;
LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
- ENTRY;
for (i = 0; i < lck->lls_nr; ++i) {
struct lov_lock_sub *lls = &lck->lls_sub[i];
@@ -1150,7 +1118,6 @@ static void lov_lock_delete(const struct lu_env *env,
}
cl_lock_closure_fini(closure);
- EXIT;
}
static int lov_lock_print(const struct lu_env *env, void *cookie,
@@ -1192,14 +1159,13 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct lov_lock *lck;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
result = lov_lock_sub_init(env, lck, io);
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
static void lov_empty_lock_fini(const struct lu_env *env,
@@ -1228,14 +1194,13 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
struct lov_lock *lck;
int result = -ENOMEM;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;
result = 0;
}
- RETURN(result);
+ return result;
}
static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/lov/lov_log.c b/drivers/staging/lustre/lustre/lov/lov_log.c
index 63b7f8d3182..3eedd935d1b 100644
--- a/drivers/staging/lustre/lustre/lov/lov_log.c
+++ b/drivers/staging/lustre/lustre/lov/lov_log.c
@@ -71,7 +71,6 @@ static int lov_llog_origin_add(const struct lu_env *env,
struct obd_device *obd = ctxt->loc_obd;
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0, cookies = 0;
- ENTRY;
LASSERTF(logcookies && numcookies >= lsm->lsm_stripe_count,
"logcookies %p, numcookies %d lsm->lsm_stripe_count %d \n",
@@ -118,7 +117,7 @@ static int lov_llog_origin_add(const struct lu_env *env,
/* Note that rc is always 1 if llog_obd_add was successful */
cookies += rc;
}
- RETURN(cookies);
+ return cookies;
}
static int lov_llog_origin_connect(struct llog_ctxt *ctxt,
@@ -129,7 +128,6 @@ static int lov_llog_origin_connect(struct llog_ctxt *ctxt,
struct obd_device *obd = ctxt->loc_obd;
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0, err = 0;
- ENTRY;
obd_getref(obd);
for (i = 0; i < lov->desc.ld_tgt_count; i++) {
@@ -154,7 +152,7 @@ static int lov_llog_origin_connect(struct llog_ctxt *ctxt,
}
obd_putref(obd);
- RETURN(err);
+ return err;
}
/* the replicators commit callback */
@@ -167,7 +165,6 @@ static int lov_llog_repl_cancel(const struct lu_env *env,
struct lov_obd *lov;
struct obd_device *obd = ctxt->loc_obd;
int rc = 0, i;
- ENTRY;
LASSERT(lsm != NULL);
LASSERT(count == lsm->lsm_stripe_count);
@@ -194,7 +191,7 @@ static int lov_llog_repl_cancel(const struct lu_env *env,
}
}
obd_putref(obd);
- RETURN(rc);
+ return rc;
}
static struct llog_operations lov_mds_ost_orig_logops = {
@@ -212,13 +209,12 @@ int lov_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
struct lov_obd *lov = &obd->u.lov;
struct obd_device *child;
int i, rc = 0;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_MDS_OST_ORIG_CTXT, disk_obd,
&lov_mds_ost_orig_logops);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_setup(NULL, obd, olg, LLOG_SIZE_REPL_CTXT, disk_obd,
&lov_size_repl_logops);
@@ -261,8 +257,6 @@ int lov_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
- ENTRY;
-
/* cleanup our llogs only if the ctxts have been setup
* (client lov doesn't setup, mds lov does). */
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
@@ -274,5 +268,5 @@ int lov_llog_finish(struct obd_device *obd, int count)
llog_cleanup(NULL, ctxt);
/* lov->tgt llogs are cleaned during osc_cleanup. */
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index ddbac122026..d204fedea34 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -109,7 +109,7 @@ int lov_merge_lvb_kms(struct lov_stripe_md *lsm,
lvb->lvb_mtime = current_mtime;
lvb->lvb_atime = current_atime;
lvb->lvb_ctime = current_ctime;
- RETURN(rc);
+ return rc;
}
/** Merge the lock value block(&lvb) attributes from each of the stripes in a
@@ -127,7 +127,6 @@ int lov_merge_lvb(struct obd_export *exp,
int rc;
__u64 kms;
- ENTRY;
lov_stripe_lock(lsm);
rc = lov_merge_lvb_kms(lsm, lvb, &kms);
lov_stripe_unlock(lsm);
@@ -137,7 +136,7 @@ int lov_merge_lvb(struct obd_export *exp,
CDEBUG(D_INODE, "merged for ID "DOSTID" s="LPU64" m="LPU64" a="LPU64
" c="LPU64" b="LPU64"\n", POSTID(&lsm->lsm_oi), lvb->lvb_size,
lvb->lvb_mtime, lvb->lvb_atime, lvb->lvb_ctime, lvb->lvb_blocks);
- RETURN(rc);
+ return rc;
}
/* Must be called under the lov_stripe_lock() */
@@ -147,7 +146,6 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_oinfo *loi;
int stripe = 0;
__u64 kms;
- ENTRY;
LASSERT(spin_is_locked(&lsm->lsm_lock));
LASSERT(lsm->lsm_lock_owner == current_pid());
@@ -162,7 +160,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
loi->loi_kms, kms);
loi_kms_set(loi, loi->loi_lvb.lvb_size = kms);
}
- RETURN(0);
+ return 0;
}
if (size > 0)
@@ -175,7 +173,7 @@ int lov_adjust_kms(struct obd_export *exp, struct lov_stripe_md *lsm,
if (kms > loi->loi_kms)
loi_kms_set(loi, kms);
- RETURN(0);
+ return 0;
}
void lov_merge_attrs(struct obdo *tgt, struct obdo *src, obd_valid valid,
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index ef7ff091f04..0b47aba1332 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -131,19 +131,18 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_device *tgt_obd;
static struct obd_uuid lov_osc_uuid = { "LOV_OSC_UUID" };
struct obd_import *imp;
- proc_dir_entry_t *lov_proc_dir;
+ struct proc_dir_entry *lov_proc_dir;
int rc;
- ENTRY;
if (!lov->lov_tgts[index])
- RETURN(-EINVAL);
+ return -EINVAL;
tgt_uuid = &lov->lov_tgts[index]->ltd_uuid;
tgt_obd = lov->lov_tgts[index]->ltd_obd;
if (!tgt_obd->obd_set_up) {
CERROR("Target %s not set up\n", obd_uuid2str(tgt_uuid));
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* override the sp_me from lov */
@@ -168,14 +167,14 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
if (rc) {
CERROR("Target %s register_observer error %d\n",
obd_uuid2str(tgt_uuid), rc);
- RETURN(rc);
+ return rc;
}
if (imp->imp_invalid) {
CDEBUG(D_CONFIG, "not connecting OSC %s; administratively "
"disabled\n", obd_uuid2str(tgt_uuid));
- RETURN(0);
+ return 0;
}
rc = obd_connect(NULL, &lov->lov_tgts[index]->ltd_exp, tgt_obd,
@@ -183,7 +182,7 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
if (rc || !lov->lov_tgts[index]->ltd_exp) {
CERROR("Target %s connect error %d\n",
obd_uuid2str(tgt_uuid), rc);
- RETURN(-ENODEV);
+ return -ENODEV;
}
lov->lov_tgts[index]->ltd_reap = 0;
@@ -194,7 +193,7 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
lov_proc_dir = obd->obd_proc_private;
if (lov_proc_dir) {
struct obd_device *osc_obd = lov->lov_tgts[index]->ltd_exp->exp_obd;
- proc_dir_entry_t *osc_symlink;
+ struct proc_dir_entry *osc_symlink;
LASSERT(osc_obd != NULL);
LASSERT(osc_obd->obd_magic == OBD_DEVICE_MAGIC);
@@ -215,7 +214,7 @@ int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
}
}
- RETURN(0);
+ return 0;
}
static int lov_connect(const struct lu_env *env,
@@ -227,13 +226,12 @@ static int lov_connect(const struct lu_env *env,
struct lov_tgt_desc *tgt;
struct lustre_handle conn;
int i, rc;
- ENTRY;
CDEBUG(D_CONFIG, "connect #%d\n", lov->lov_connects);
rc = class_connect(&conn, obd, cluuid);
if (rc)
- RETURN(rc);
+ return rc;
*exp = class_conn2export(&conn);
@@ -270,16 +268,15 @@ static int lov_connect(const struct lu_env *env,
}
obd_putref(obd);
- RETURN(0);
+ return 0;
}
static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
{
- proc_dir_entry_t *lov_proc_dir;
+ struct proc_dir_entry *lov_proc_dir;
struct lov_obd *lov = &obd->u.lov;
struct obd_device *osc_obd;
int rc;
- ENTRY;
osc_obd = class_exp2obd(tgt->ltd_exp);
CDEBUG(D_CONFIG, "%s: disconnecting target %s\n",
@@ -315,7 +312,7 @@ static int lov_disconnect_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
}
tgt->ltd_exp = NULL;
- RETURN(0);
+ return 0;
}
static int lov_disconnect(struct obd_export *exp)
@@ -323,7 +320,6 @@ static int lov_disconnect(struct obd_export *exp)
struct obd_device *obd = class_exp2obd(exp);
struct lov_obd *lov = &obd->u.lov;
int i, rc;
- ENTRY;
if (!lov->lov_tgts)
goto out;
@@ -350,7 +346,7 @@ static int lov_disconnect(struct obd_export *exp)
out:
rc = class_disconnect(exp); /* bz 9811 */
- RETURN(rc);
+ return rc;
}
/* Error codes:
@@ -366,7 +362,6 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
struct lov_obd *lov = &obd->u.lov;
struct lov_tgt_desc *tgt;
int index, activate, active;
- ENTRY;
CDEBUG(D_INFO, "Searching in lov %p for uuid %s event(%d)\n",
lov, uuid->uuid, ev);
@@ -438,7 +433,7 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
out:
obd_putref(obd);
- RETURN(index);
+ return index;
}
static int lov_notify(struct obd_device *obd, struct obd_device *watched,
@@ -446,12 +441,11 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
{
int rc = 0;
struct lov_obd *lov = &obd->u.lov;
- ENTRY;
down_read(&lov->lov_notify_lock);
if (!lov->lov_connects) {
up_read(&lov->lov_notify_lock);
- RETURN(rc);
+ return rc;
}
if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE ||
@@ -465,7 +459,7 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
CERROR("unexpected notification of %s %s!\n",
watched->obd_type->typ_name,
watched->obd_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
uuid = &watched->u.cli.cl_target_uuid;
@@ -477,7 +471,7 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
up_read(&lov->lov_notify_lock);
CERROR("event(%d) of %s failed: %d\n", ev,
obd_uuid2str(uuid), rc);
- RETURN(rc);
+ return rc;
}
/* active event should be pass lov target index as data */
data = &rc;
@@ -520,7 +514,7 @@ static int lov_notify(struct obd_device *obd, struct obd_device *watched,
}
up_read(&lov->lov_notify_lock);
- RETURN(rc);
+ return rc;
}
static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
@@ -530,7 +524,6 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
struct lov_tgt_desc *tgt;
struct obd_device *tgt_obd;
int rc;
- ENTRY;
CDEBUG(D_CONFIG, "uuid:%s idx:%d gen:%d active:%d\n",
uuidp->uuid, index, gen, active);
@@ -538,13 +531,13 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
if (gen <= 0) {
CERROR("request to add OBD %s with invalid generation: %d\n",
uuidp->uuid, gen);
- RETURN(-EINVAL);
+ return -EINVAL;
}
tgt_obd = class_find_client_obd(uuidp, LUSTRE_OSC_NAME,
&obd->obd_uuid);
if (tgt_obd == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
mutex_lock(&lov->lov_lock);
@@ -553,7 +546,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CERROR("UUID %s already assigned at LOV target index %d\n",
obd_uuid2str(&tgt->ltd_uuid), index);
mutex_unlock(&lov->lov_lock);
- RETURN(-EEXIST);
+ return -EEXIST;
}
if (index >= lov->lov_tgt_size) {
@@ -567,7 +560,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
if (newtgts == NULL) {
mutex_unlock(&lov->lov_lock);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
if (lov->lov_tgt_size) {
@@ -590,14 +583,14 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
OBD_ALLOC_PTR(tgt);
if (!tgt) {
mutex_unlock(&lov->lov_lock);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
rc = lov_ost_pool_add(&lov->lov_packed, index, lov->lov_tgt_size);
if (rc) {
mutex_unlock(&lov->lov_lock);
OBD_FREE_PTR(tgt);
- RETURN(rc);
+ return rc;
}
tgt->ltd_uuid = *uuidp;
@@ -621,7 +614,7 @@ static int lov_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
/* lov_connect hasn't been called yet. We'll do the
lov_connect_obd on this target when that fn first runs,
because we don't know the connect flags yet. */
- RETURN(0);
+ return 0;
}
obd_getref(obd);
@@ -654,7 +647,7 @@ out:
lov_del_target(obd, index, 0, 0);
}
obd_putref(obd);
- RETURN(rc);
+ return rc;
}
/* Schedule a target for deletion */
@@ -664,12 +657,11 @@ int lov_del_target(struct obd_device *obd, __u32 index,
struct lov_obd *lov = &obd->u.lov;
int count = lov->desc.ld_tgt_count;
int rc = 0;
- ENTRY;
if (index >= count) {
CERROR("LOV target index %d >= number of LOV OBDs %d.\n",
index, count);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* to make sure there's no ongoing lov_notify() now */
@@ -700,7 +692,7 @@ out:
obd_putref(obd);
up_write(&lov->lov_notify_lock);
- RETURN(rc);
+ return rc;
}
static void __lov_del_obd(struct obd_device *obd, struct lov_tgt_desc *tgt)
@@ -780,11 +772,10 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct lov_desc *desc;
struct lov_obd *lov = &obd->u.lov;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("LOV setup requires a descriptor\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
desc = (struct lov_desc *)lustre_cfg_buf(lcfg, 1);
@@ -792,7 +783,7 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
CERROR("descriptor size wrong: %d > %d\n",
(int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (desc->ld_magic != LOV_DESC_MAGIC) {
@@ -803,7 +794,7 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
} else {
CERROR("%s: Bad lov desc magic: %#x\n",
obd->obd_name, desc->ld_magic);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
@@ -836,11 +827,11 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
lprocfs_obd_setup(obd, lvars.obd_vars);
#ifdef LPROCFS
{
- int rc;
+ int rc1;
- rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
+ rc1 = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
0444, &lov_proc_target_fops, obd);
- if (rc)
+ if (rc1)
CWARN("Error adding the target_obd file\n");
}
#endif
@@ -848,7 +839,7 @@ int lov_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
obd->obd_proc_entry,
NULL, NULL);
- RETURN(0);
+ return 0;
out:
return rc;
@@ -859,8 +850,6 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
int rc = 0;
struct lov_obd *lov = &obd->u.lov;
- ENTRY;
-
switch (stage) {
case OBD_CLEANUP_EARLY: {
int i;
@@ -878,7 +867,7 @@ static int lov_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
CERROR("failed to cleanup llogging subsystems\n");
break;
}
- RETURN(rc);
+ return rc;
}
static int lov_cleanup(struct obd_device *obd)
@@ -886,7 +875,6 @@ static int lov_cleanup(struct obd_device *obd)
struct lov_obd *lov = &obd->u.lov;
struct list_head *pos, *tmp;
struct pool_desc *pool;
- ENTRY;
list_for_each_safe(pos, tmp, &lov->lov_pool_list) {
pool = list_entry(pos, struct pool_desc, pool_list);
@@ -925,7 +913,7 @@ static int lov_cleanup(struct obd_device *obd)
lov->lov_tgt_size);
lov->lov_tgt_size = 0;
}
- RETURN(0);
+ return 0;
}
int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
@@ -934,7 +922,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
struct obd_uuid obd_uuid;
int cmd;
int rc = 0;
- ENTRY;
switch(cmd = lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD:
@@ -990,7 +977,7 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
}
}
out:
- RETURN(rc);
+ return rc;
}
static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
@@ -1000,14 +987,13 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
struct lov_obd *lov = &exp->exp_obd->u.lov;
unsigned ost_idx;
int rc, i;
- ENTRY;
LASSERT(src_oa->o_valid & OBD_MD_FLFLAGS &&
src_oa->o_flags & OBD_FL_RECREATE_OBJS);
OBD_ALLOC(obj_mdp, sizeof(*obj_mdp));
if (obj_mdp == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
ost_idx = src_oa->o_nlink;
lsm = *ea;
@@ -1032,7 +1018,7 @@ static int lov_recreate(struct obd_export *exp, struct obdo *src_oa,
src_oa, &obj_mdp, oti);
out:
OBD_FREE(obj_mdp, sizeof(*obj_mdp));
- RETURN(rc);
+ return rc;
}
/* the LOV expects oa->o_id to be set to the LOV object id */
@@ -1042,11 +1028,10 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
{
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(ea != NULL);
if (exp == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
if ((src_oa->o_valid & OBD_MD_FLFLAGS) &&
src_oa->o_flags == OBD_FL_DELORPHAN) {
@@ -1056,7 +1041,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
lov = &exp->exp_obd->u.lov;
if (!lov->desc.ld_active_tgt_count)
- RETURN(-EIO);
+ return -EIO;
obd_getref(exp->exp_obd);
/* Recreate a specific object id at the given OST index */
@@ -1066,7 +1051,7 @@ static int lov_create(const struct lu_env *env, struct obd_export *exp,
}
obd_putref(exp->exp_obd);
- RETURN(rc);
+ return rc;
}
#define ASSERT_LSM_MAGIC(lsmp) \
@@ -1088,12 +1073,11 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
struct list_head *pos;
struct lov_obd *lov;
int rc = 0, err = 0;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
if (oa->o_valid & OBD_MD_FLCOOKIE) {
LASSERT(oti);
@@ -1133,7 +1117,7 @@ static int lov_destroy(const struct lu_env *env, struct obd_export *exp,
err = lov_fini_destroy_set(set);
out:
obd_putref(exp->exp_obd);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
static int lov_getattr(const struct lu_env *env, struct obd_export *exp,
@@ -1144,19 +1128,18 @@ static int lov_getattr(const struct lu_env *env, struct obd_export *exp,
struct list_head *pos;
struct lov_obd *lov;
int err = 0, rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_getattr_set(exp, oinfo, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1182,7 +1165,7 @@ static int lov_getattr(const struct lu_env *env, struct obd_export *exp,
rc = lov_fini_getattr_set(set);
if (err)
rc = err;
- RETURN(rc);
+ return rc;
}
static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
@@ -1190,13 +1173,12 @@ static int lov_getattr_interpret(struct ptlrpc_request_set *rqset,
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
/* don't do attribute merge if this aysnc op failed */
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_getattr_set(lovset);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -1207,19 +1189,18 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
struct list_head *pos;
struct lov_request *req;
int rc = 0, err;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_getattr_set(exp, oinfo, &lovset);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
POSTID(&oinfo->oi_md->lsm_oi), oinfo->oi_md->lsm_stripe_count,
@@ -1249,13 +1230,13 @@ static int lov_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
LASSERT (rqset->set_interpret == NULL);
rqset->set_interpret = lov_getattr_interpret;
rqset->set_arg = (void *)lovset;
- RETURN(rc);
+ return rc;
}
out:
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_getattr_set(lovset);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
@@ -1266,13 +1247,12 @@ static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
struct list_head *pos;
struct lov_request *req;
int err = 0, rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
/* for now, we only expect the following updates here */
LASSERT(!(oinfo->oi_oa->o_valid & ~(OBD_MD_FLID | OBD_MD_FLTYPE |
@@ -1285,7 +1265,7 @@ static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
lov = &exp->exp_obd->u.lov;
rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1307,7 +1287,7 @@ static int lov_setattr(const struct lu_env *env, struct obd_export *exp,
err = lov_fini_setattr_set(set);
if (!rc)
rc = err;
- RETURN(rc);
+ return rc;
}
static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
@@ -1315,12 +1295,11 @@ static int lov_setattr_interpret(struct ptlrpc_request_set *rqset,
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_setattr_set(lovset);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
/* If @oti is given, the request goes from MDS and responses from OSTs are not
@@ -1334,7 +1313,6 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
@@ -1344,12 +1322,12 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
}
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_setattr_set(exp, oinfo, oti, &set);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INFO, "objid "DOSTID": %ux%u byte stripes\n",
POSTID(&oinfo->oi_md->lsm_oi),
@@ -1384,14 +1362,14 @@ static int lov_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
if (rc)
atomic_set(&set->set_completes, 0);
err = lov_fini_setattr_set(set);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_setattr_interpret;
rqset->set_arg = (void *)set;
- RETURN(0);
+ return 0;
}
static int lov_punch_interpret(struct ptlrpc_request_set *rqset,
@@ -1399,12 +1377,11 @@ static int lov_punch_interpret(struct ptlrpc_request_set *rqset,
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_punch_set(lovset);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
/* FIXME: maybe we'll just make one node the authoritative attribute node, then
@@ -1419,18 +1396,17 @@ static int lov_punch(const struct lu_env *env, struct obd_export *exp,
struct list_head *pos;
struct lov_request *req;
int rc = 0;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_punch_set(exp, oinfo, oti, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1450,14 +1426,14 @@ static int lov_punch(const struct lu_env *env, struct obd_export *exp,
if (rc || list_empty(&rqset->set_requests)) {
int err;
err = lov_fini_punch_set(set);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_punch_interpret;
rqset->set_arg = (void *)set;
- RETURN(0);
+ return 0;
}
static int lov_sync_interpret(struct ptlrpc_request_set *rqset,
@@ -1465,12 +1441,11 @@ static int lov_sync_interpret(struct ptlrpc_request_set *rqset,
{
struct lov_request_set *lovset = data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_sync_set(lovset);
- RETURN(rc ?: err);
+ return rc ?: err;
}
static int lov_sync(const struct lu_env *env, struct obd_export *exp,
@@ -1482,18 +1457,17 @@ static int lov_sync(const struct lu_env *env, struct obd_export *exp,
struct list_head *pos;
struct lov_request *req;
int rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(oinfo->oi_md);
LASSERT(rqset != NULL);
if (!exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_sync_set(exp, oinfo, start, end, &set);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_INFO, "fsync objid "DOSTID" ["LPX64", "LPX64"]\n",
POSTID(&set->set_oi->oi_oa->o_oi), start, end);
@@ -1519,14 +1493,14 @@ static int lov_sync(const struct lu_env *env, struct obd_export *exp,
if (rc || list_empty(&rqset->set_requests)) {
int err = lov_fini_sync_set(set);
- RETURN(rc ?: err);
+ return rc ?: err;
}
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_sync_interpret;
rqset->set_arg = (void *)set;
- RETURN(0);
+ return 0;
}
static int lov_brw_check(struct lov_obd *lov, struct obd_info *lov_oinfo,
@@ -1571,18 +1545,17 @@ static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
struct list_head *pos;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int err, rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(oinfo->oi_md);
if (cmd == OBD_BRW_CHECK) {
rc = lov_brw_check(lov, oinfo, oa_bufs, pga);
- RETURN(rc);
+ return rc;
}
rc = lov_prep_brw_set(exp, oinfo, oa_bufs, pga, oti, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
struct obd_export *sub_exp;
@@ -1601,16 +1574,16 @@ static int lov_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
err = lov_fini_brw_set(set);
if (!rc)
rc = err;
- RETURN(rc);
+ return rc;
}
static int lov_enqueue_interpret(struct ptlrpc_request_set *rqset,
void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
- ENTRY;
+
rc = lov_fini_enqueue_set(lovset, lovset->set_ei->ei_mode, rc, rqset);
- RETURN(rc);
+ return rc;
}
static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
@@ -1623,7 +1596,6 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
struct list_head *pos;
struct lov_obd *lov;
ldlm_error_t rc;
- ENTRY;
LASSERT(oinfo);
ASSERT_LSM_MAGIC(oinfo->oi_md);
@@ -1633,12 +1605,12 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
LASSERT((oinfo->oi_flags & LDLM_FL_REPLAY) == 0);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
rc = lov_prep_enqueue_set(exp, oinfo, einfo, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1654,11 +1626,11 @@ static int lov_enqueue(struct obd_export *exp, struct obd_info *oinfo,
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_enqueue_interpret;
rqset->set_arg = (void *)set;
- RETURN(rc);
+ return rc;
}
out:
rc = lov_fini_enqueue_set(set, mode, rc, rqset);
- RETURN(rc);
+ return rc;
}
static int lov_change_cbdata(struct obd_export *exp,
@@ -1667,12 +1639,11 @@ static int lov_change_cbdata(struct obd_export *exp,
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
for (i = 0; i < lsm->lsm_stripe_count; i++) {
@@ -1689,7 +1660,7 @@ static int lov_change_cbdata(struct obd_export *exp,
rc = obd_change_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
&submd, it, data);
}
- RETURN(rc);
+ return rc;
}
/* find any ldlm lock of the inode in lov
@@ -1702,12 +1673,11 @@ static int lov_find_cbdata(struct obd_export *exp,
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
for (i = 0; i < lsm->lsm_stripe_count; i++) {
@@ -1723,9 +1693,9 @@ static int lov_find_cbdata(struct obd_export *exp,
rc = obd_find_cbdata(lov->lov_tgts[loi->loi_ost_idx]->ltd_exp,
&submd, it, data);
if (rc != 0)
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
@@ -1738,18 +1708,17 @@ static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lov_obd *lov;
struct lustre_handle *lov_lockhp;
int err = 0, rc = 0;
- ENTRY;
ASSERT_LSM_MAGIC(lsm);
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
LASSERT(lockh);
lov = &exp->exp_obd->u.lov;
rc = lov_prep_cancel_set(exp, &oinfo, lsm, mode, lockh, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each(pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1769,7 +1738,7 @@ static int lov_cancel(struct obd_export *exp, struct lov_stripe_md *lsm,
}
lov_fini_cancel_set(set);
- RETURN(err);
+ return err;
}
static int lov_cancel_unused(struct obd_export *exp,
@@ -1778,10 +1747,9 @@ static int lov_cancel_unused(struct obd_export *exp,
{
struct lov_obd *lov;
int rc = 0, i;
- ENTRY;
if (!exp || !exp->exp_obd)
- RETURN(-ENODEV);
+ return -ENODEV;
lov = &exp->exp_obd->u.lov;
if (lsm == NULL) {
@@ -1795,7 +1763,7 @@ static int lov_cancel_unused(struct obd_export *exp,
if (!rc)
rc = err;
}
- RETURN(rc);
+ return rc;
}
ASSERT_LSM_MAGIC(lsm);
@@ -1827,20 +1795,19 @@ static int lov_cancel_unused(struct obd_export *exp,
rc = err;
}
}
- RETURN(rc);
+ return rc;
}
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc)
{
struct lov_request_set *lovset = (struct lov_request_set *)data;
int err;
- ENTRY;
if (rc)
atomic_set(&lovset->set_completes, 0);
err = lov_fini_statfs_set(lovset);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -1852,7 +1819,6 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
struct list_head *pos;
struct lov_obd *lov;
int rc = 0;
- ENTRY;
LASSERT(oinfo != NULL);
LASSERT(oinfo->oi_osfs != NULL);
@@ -1860,7 +1826,7 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
lov = &obd->u.lov;
rc = lov_prep_statfs_set(obd, oinfo, &set);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -1875,13 +1841,13 @@ static int lov_statfs_async(struct obd_export *exp, struct obd_info *oinfo,
if (rc)
atomic_set(&set->set_completes, 0);
err = lov_fini_statfs_set(set);
- RETURN(rc ? rc : err);
+ return rc ? rc : err;
}
LASSERT(rqset->set_interpret == NULL);
rqset->set_interpret = lov_statfs_interpret;
rqset->set_arg = (void *)set;
- RETURN(0);
+ return 0;
}
static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
@@ -1890,14 +1856,12 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request_set *set = NULL;
struct obd_info oinfo = { { { 0 } } };
int rc = 0;
- ENTRY;
-
/* for obdclass we forbid using obd_statfs_rqset, but prefer using async
* statfs requests */
set = ptlrpc_prep_set();
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oinfo.oi_osfs = osfs;
oinfo.oi_flags = flags;
@@ -1906,7 +1870,7 @@ static int lov_statfs(const struct lu_env *env, struct obd_export *exp,
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
@@ -1916,7 +1880,6 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct lov_obd *lov = &obddev->u.lov;
int i = 0, rc = 0, count = lov->desc.ld_tgt_count;
struct obd_uuid *uuidp;
- ENTRY;
switch (cmd) {
case IOC_OBD_STATFS: {
@@ -1928,23 +1891,23 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
if ((index >= count))
- RETURN(-ENODEV);
+ return -ENODEV;
if (!lov->lov_tgts[index])
/* Try again with the next index */
- RETURN(-EAGAIN);
+ return -EAGAIN;
if (!lov->lov_tgts[index]->ltd_active)
- RETURN(-ENODATA);
+ return -ENODATA;
osc_obd = class_exp2obd(lov->lov_tgts[index]->ltd_exp);
if (!osc_obd)
- RETURN(-EINVAL);
+ return -EINVAL;
/* copy UUID */
if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(osc_obd),
min((int) data->ioc_plen2,
(int) sizeof(struct obd_uuid))))
- RETURN(-EFAULT);
+ return -EFAULT;
flags = uarg ? *(__u32*)uarg : 0;
/* got statfs data */
@@ -1952,11 +1915,11 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
flags);
if (rc)
- RETURN(rc);
+ return rc;
if (copy_to_user(data->ioc_pbuf1, &stat_buf,
min((int) data->ioc_plen1,
(int) sizeof(stat_buf))))
- RETURN(-EFAULT);
+ return -EFAULT;
break;
}
case OBD_IOC_LOV_GET_CONFIG: {
@@ -1967,23 +1930,23 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
len = 0;
if (obd_ioctl_getdata(&buf, &len, (void *)uarg))
- RETURN(-EINVAL);
+ return -EINVAL;
data = (struct obd_ioctl_data *)buf;
if (sizeof(*desc) > data->ioc_inllen1) {
obd_ioctl_freedata(buf, len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (sizeof(uuidp->uuid) * count > data->ioc_inllen2) {
obd_ioctl_freedata(buf, len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (sizeof(__u32) * count > data->ioc_inllen3) {
obd_ioctl_freedata(buf, len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
desc = (struct lov_desc *)data->ioc_inlbuf1;
@@ -2020,11 +1983,11 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
if (qctl->qc_valid == QC_OSTIDX) {
if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
- RETURN(-EINVAL);
+ return -EINVAL;
tgt = lov->lov_tgts[qctl->qc_idx];
if (!tgt || !tgt->ltd_exp)
- RETURN(-EINVAL);
+ return -EINVAL;
} else if (qctl->qc_valid == QC_UUID) {
for (i = 0; i < count; i++) {
tgt = lov->lov_tgts[i];
@@ -2034,21 +1997,21 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
continue;
if (tgt->ltd_exp == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
break;
}
} else {
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (i >= count)
- RETURN(-EAGAIN);
+ return -EAGAIN;
LASSERT(tgt && tgt->ltd_exp);
OBD_ALLOC_PTR(oqctl);
if (!oqctl)
- RETURN(-ENOMEM);
+ return -ENOMEM;
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(tgt->ltd_exp, oqctl);
@@ -2064,7 +2027,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
int set = 0;
if (count == 0)
- RETURN(-ENOTTY);
+ return -ENOTTY;
for (i = 0; i < count; i++) {
int err;
@@ -2081,7 +2044,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
err = obd_iocontrol(cmd, lov->lov_tgts[i]->ltd_exp,
len, karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
- RETURN(err);
+ return err;
} else if (err) {
if (lov->lov_tgts[i]->ltd_active) {
CDEBUG(err == -ENOTTY ?
@@ -2102,7 +2065,7 @@ static int lov_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
}
}
- RETURN(rc);
+ return rc;
}
#define FIEMAP_BUFFER_SIZE 4096
@@ -2259,7 +2222,7 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
int cur_stripe = 0, cur_stripe_wrap = 0, stripe_count;
unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
- if (lsm == NULL)
+ if (!lsm_has_objects(lsm))
GOTO(out, rc = 0);
if (fiemap_count_to_size(fm_key->fiemap.fm_extent_count) < buffer_size)
@@ -2469,10 +2432,9 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
struct obd_device *obddev = class_exp2obd(exp);
struct lov_obd *lov = &obddev->u.lov;
int i, rc;
- ENTRY;
if (!vallen || !val)
- RETURN(-EFAULT);
+ return -EFAULT;
obd_getref(obddev);
@@ -2553,7 +2515,7 @@ static int lov_get_info(const struct lu_env *env, struct obd_export *exp,
out:
obd_putref(obddev);
- RETURN(rc);
+ return rc;
}
static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
@@ -2568,14 +2530,13 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
unsigned incr, check_uuid,
do_inactive, no_set;
unsigned next_id = 0, mds_con = 0, capa = 0;
- ENTRY;
incr = check_uuid = do_inactive = no_set = 0;
if (set == NULL) {
no_set = 1;
set = ptlrpc_prep_set();
if (!set)
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
obd_getref(obddev);
@@ -2667,7 +2628,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
rc = err;
ptlrpc_set_destroy(set);
}
- RETURN(rc);
+ return rc;
}
static int lov_extent_calc(struct obd_export *exp, struct lov_stripe_md *lsm,
@@ -2691,7 +2652,7 @@ static int lov_extent_calc(struct obd_export *exp, struct lov_stripe_md *lsm,
LBUG();
}
- RETURN(0);
+ return 0;
}
void lov_stripe_lock(struct lov_stripe_md *md)
@@ -2719,7 +2680,6 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
__u64 curspace = 0;
__u64 bhardlimit = 0;
int i, rc = 0;
- ENTRY;
if (oqctl->qc_cmd != LUSTRE_Q_QUOTAON &&
oqctl->qc_cmd != LUSTRE_Q_QUOTAOFF &&
@@ -2728,7 +2688,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
oqctl->qc_cmd != LUSTRE_Q_SETQUOTA &&
oqctl->qc_cmd != Q_FINVALIDATE) {
CERROR("bad quota opc %x for lov obd", oqctl->qc_cmd);
- RETURN(-EFAULT);
+ return -EFAULT;
}
/* for lov tgt */
@@ -2770,7 +2730,7 @@ static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
oqctl->qc_dqblk.dqb_curspace = curspace;
oqctl->qc_dqblk.dqb_bhardlimit = bhardlimit;
}
- RETURN(rc);
+ return rc;
}
static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
@@ -2778,7 +2738,6 @@ static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
{
struct lov_obd *lov = &obd->u.lov;
int i, rc = 0;
- ENTRY;
obd_getref(obd);
@@ -2814,7 +2773,7 @@ static int lov_quotacheck(struct obd_device *obd, struct obd_export *exp,
out:
obd_putref(obd);
- RETURN(rc);
+ return rc;
}
struct obd_ops lov_obd_ops = {
@@ -2870,7 +2829,6 @@ int __init lov_init(void)
{
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
@@ -2898,7 +2856,7 @@ int __init lov_init(void)
lu_kmem_fini(lov_caches);
}
- RETURN(rc);
+ return rc;
}
static void /*__exit*/ lov_exit(void)
@@ -2912,5 +2870,7 @@ static void /*__exit*/ lov_exit(void)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Logical Object Volume OBD driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
-cfs_module(lov, LUSTRE_VERSION_STRING, lov_init, lov_exit);
+module_init(lov_init);
+module_exit(lov_exit);
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index aa8ae80e812..84e55ce3ccd 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -116,10 +116,9 @@ static struct cl_object *lov_sub_find(const struct lu_env *env,
{
struct lu_object *o;
- ENTRY;
o = lu_object_find_at(env, cl2lu_dev(dev), fid, &conf->coc_lu);
LASSERT(ergo(!IS_ERR(o), o->lo_dev->ld_type == &lovsub_device_type));
- RETURN(lu2cl(o));
+ return lu2cl(o);
}
static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
@@ -204,8 +203,6 @@ static int lov_init_raid0(const struct lu_env *env,
struct lu_fid *ofid = &lti->lti_fid;
struct lov_layout_raid0 *r0 = &state->raid0;
- ENTRY;
-
if (lsm->lsm_magic != LOV_MAGIC_V1 && lsm->lsm_magic != LOV_MAGIC_V3) {
dump_lsm(D_ERROR, lsm);
LASSERTF(0, "magic mismatch, expected %d/%d, actual %d.\n",
@@ -255,13 +252,28 @@ static int lov_init_raid0(const struct lu_env *env,
} else
result = -ENOMEM;
out:
- RETURN(result);
+ return result;
+}
+
+static int lov_init_released(const struct lu_env *env,
+ struct lov_device *dev, struct lov_object *lov,
+ const struct cl_object_conf *conf,
+ union lov_layout_state *state)
+{
+ struct lov_stripe_md *lsm = conf->u.coc_md->lsm;
+
+ LASSERT(lsm != NULL);
+ LASSERT(lsm_is_released(lsm));
+ LASSERT(lov->lo_lsm == NULL);
+
+ lov->lo_lsm = lsm_addref(lsm);
+ return 0;
}
static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
- LASSERT(lov->lo_type == LLT_EMPTY);
+ LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
lov_layout_wait(env, lov);
@@ -323,8 +335,6 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
struct lov_stripe_md *lsm = lov->lo_lsm;
int i;
- ENTRY;
-
dump_lsm(D_INODE, lsm);
lov_layout_wait(env, lov);
@@ -343,20 +353,19 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
}
}
cl_object_prune(env, &lov->lo_cl);
- RETURN(0);
+ return 0;
}
static void lov_fini_empty(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
- LASSERT(lov->lo_type == LLT_EMPTY);
+ LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
}
static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
union lov_layout_state *state)
{
struct lov_layout_raid0 *r0 = &state->raid0;
- ENTRY;
if (r0->lo_sub != NULL) {
OBD_FREE_LARGE(r0->lo_sub, r0->lo_nr * sizeof r0->lo_sub[0]);
@@ -365,8 +374,13 @@ static void lov_fini_raid0(const struct lu_env *env, struct lov_object *lov,
dump_lsm(D_INODE, lov->lo_lsm);
lov_free_memmd(&lov->lo_lsm);
+}
- EXIT;
+static void lov_fini_released(const struct lu_env *env, struct lov_object *lov,
+ union lov_layout_state *state)
+{
+ dump_lsm(D_INODE, lov->lo_lsm);
+ lov_free_memmd(&lov->lo_lsm);
}
static int lov_print_empty(const struct lu_env *env, void *cookie,
@@ -400,6 +414,13 @@ static int lov_print_raid0(const struct lu_env *env, void *cookie,
return 0;
}
+static int lov_print_released(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct lu_object *o)
+{
+ (*p)(env, cookie, "released\n");
+ return 0;
+}
+
/**
* Implements cl_object_operations::coo_attr_get() method for an object
* without stripes (LLT_EMPTY layout type).
@@ -422,8 +443,6 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *lov_attr = &r0->lo_attr;
int result = 0;
- ENTRY;
-
/* this is called w/o holding type guard mutex, so it must be inside
* an on going IO otherwise lsm may be replaced.
* LU-2117: it turns out there exists one exception. For mmaped files,
@@ -478,7 +497,7 @@ static int lov_attr_get_raid0(const struct lu_env *env, struct cl_object *obj,
if (attr->cat_mtime < lov_attr->cat_mtime)
attr->cat_mtime = lov_attr->cat_mtime;
}
- RETURN(result);
+ return result;
}
const static struct lov_layout_operations lov_dispatch[] = {
@@ -503,10 +522,20 @@ const static struct lov_layout_operations lov_dispatch[] = {
.llo_lock_init = lov_lock_init_raid0,
.llo_io_init = lov_io_init_raid0,
.llo_getattr = lov_attr_get_raid0
+ },
+ [LLT_RELEASED] = {
+ .llo_init = lov_init_released,
+ .llo_delete = lov_delete_empty,
+ .llo_fini = lov_fini_released,
+ .llo_install = lov_install_empty,
+ .llo_print = lov_print_released,
+ .llo_page_init = lov_page_init_empty,
+ .llo_lock_init = lov_lock_init_empty,
+ .llo_io_init = lov_io_init_released,
+ .llo_getattr = lov_attr_get_empty
}
};
-
/**
* Performs a double-dispatch based on the layout type of an object.
*/
@@ -520,6 +549,18 @@ const static struct lov_layout_operations lov_dispatch[] = {
lov_dispatch[__llt].op(__VA_ARGS__); \
})
+/**
+ * Return lov_layout_type associated with a given lsm
+ */
+enum lov_layout_type lov_type(struct lov_stripe_md *lsm)
+{
+ if (lsm == NULL)
+ return LLT_EMPTY;
+ if (lsm_is_released(lsm))
+ return LLT_RELEASED;
+ return LLT_RAID0;
+}
+
static inline void lov_conf_freeze(struct lov_object *lov)
{
if (lov->lo_owner != current)
@@ -581,7 +622,6 @@ static void lov_conf_unlock(struct lov_object *lov)
static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
{
struct l_wait_info lwi = { 0 };
- ENTRY;
while (atomic_read(&lov->lo_active_ios) > 0) {
CDEBUG(D_INODE, "file:"DFID" wait for active IO, now: %d.\n",
@@ -591,7 +631,7 @@ static int lov_layout_wait(const struct lu_env *env, struct lov_object *lov)
l_wait_event(lov->lo_waitq,
atomic_read(&lov->lo_active_ios) == 0, &lwi);
}
- RETURN(0);
+ return 0;
}
static int lov_layout_change(const struct lu_env *unused,
@@ -608,19 +648,18 @@ static int lov_layout_change(const struct lu_env *unused,
void *cookie;
struct lu_env *env;
int refcheck;
- ENTRY;
LASSERT(0 <= lov->lo_type && lov->lo_type < ARRAY_SIZE(lov_dispatch));
- if (conf->u.coc_md != NULL && conf->u.coc_md->lsm != NULL)
- llt = LLT_RAID0; /* only raid0 is supported. */
+ if (conf->u.coc_md != NULL)
+ llt = lov_type(conf->u.coc_md->lsm);
LASSERT(0 <= llt && llt < ARRAY_SIZE(lov_dispatch));
cookie = cl_env_reenter();
env = cl_env_get(&refcheck);
if (IS_ERR(env)) {
cl_env_reexit(cookie);
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
}
old_ops = &lov_dispatch[lov->lo_type];
@@ -650,7 +689,7 @@ static int lov_layout_change(const struct lu_env *unused,
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
- RETURN(result);
+ return result;
}
/*****************************************************************************
@@ -658,7 +697,6 @@ static int lov_layout_change(const struct lu_env *unused,
* Lov object operations.
*
*/
-
int lov_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lu_object_conf *conf)
{
@@ -669,7 +707,6 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
const struct lov_layout_operations *ops;
int result;
- ENTRY;
init_rwsem(&lov->lo_type_guard);
atomic_set(&lov->lo_active_ios, 0);
init_waitqueue_head(&lov->lo_waitq);
@@ -677,21 +714,20 @@ int lov_object_init(const struct lu_env *env, struct lu_object *obj,
cl_object_page_init(lu2cl(obj), sizeof(struct lov_page));
/* no locking is necessary, as object is being created */
- lov->lo_type = cconf->u.coc_md->lsm != NULL ? LLT_RAID0 : LLT_EMPTY;
+ lov->lo_type = lov_type(cconf->u.coc_md->lsm);
ops = &lov_dispatch[lov->lo_type];
result = ops->llo_init(env, dev, lov, cconf, set);
if (result == 0)
ops->llo_install(env, lov, set);
- RETURN(result);
+ return result;
}
static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf)
{
- struct lov_stripe_md *lsm = NULL;
- struct lov_object *lov = cl2lov(obj);
- int result = 0;
- ENTRY;
+ struct lov_stripe_md *lsm = NULL;
+ struct lov_object *lov = cl2lov(obj);
+ int result = 0;
lov_conf_lock(lov);
if (conf->coc_opc == OBJECT_CONF_INVALIDATE) {
@@ -728,31 +764,26 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
}
lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
- EXIT;
out:
lov_conf_unlock(lov);
- RETURN(result);
+ return result;
}
static void lov_object_delete(const struct lu_env *env, struct lu_object *obj)
{
struct lov_object *lov = lu2lov(obj);
- ENTRY;
LOV_2DISPATCH_VOID(lov, llo_delete, env, lov, &lov->u);
- EXIT;
}
static void lov_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct lov_object *lov = lu2lov(obj);
- ENTRY;
LOV_2DISPATCH_VOID(lov, llo_fini, env, lov, &lov->u);
lu_object_fini(obj);
OBD_SLAB_FREE_PTR(lov, lov_object_kmem);
- EXIT;
}
static int lov_object_print(const struct lu_env *env, void *cookie,
@@ -835,7 +866,6 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
struct lov_object *lov;
struct lu_object *obj;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lov, lov_object_kmem, __GFP_IO);
if (lov != NULL) {
obj = lov2lu(lov);
@@ -850,7 +880,7 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
obj->lo_ops = &lov_lu_obj_ops;
} else
obj = NULL;
- RETURN(obj);
+ return obj;
}
struct lov_stripe_md *lov_lsm_addref(struct lov_object *lov)
@@ -906,7 +936,6 @@ int lov_read_and_clear_async_rc(struct cl_object *clob)
{
struct lu_object *luobj;
int rc = 0;
- ENTRY;
luobj = lu_object_locate(&cl_object_header(clob)->coh_lu,
&lov_device_type);
@@ -928,6 +957,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob)
loi->loi_ar.ar_rc = 0;
}
}
+ case LLT_RELEASED:
case LLT_EMPTY:
break;
default:
@@ -935,7 +965,7 @@ int lov_read_and_clear_async_rc(struct cl_object *clob)
}
lov_conf_thaw(lov);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lov_read_and_clear_async_rc);
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index f62b7e53b66..04863a7c5e0 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -52,10 +52,9 @@ obd_size lov_stripe_size(struct lov_stripe_md *lsm, obd_size ost_size,
obd_off swidth;
obd_size lov_size;
int magic = lsm->lsm_magic;
- ENTRY;
if (ost_size == 0)
- RETURN(0);
+ return 0;
LASSERT(lsm_op_find(magic) != NULL);
lsm_op_find(magic)->lsm_stripe_by_index(lsm, &stripeno, NULL, &swidth);
@@ -67,7 +66,7 @@ obd_size lov_stripe_size(struct lov_stripe_md *lsm, obd_size ost_size,
else
lov_size = (ost_size - 1) * swidth + (stripeno + 1) * ssize;
- RETURN(lov_size);
+ return lov_size;
}
/* we have an offset in file backed by an lov and want to find out where
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 492948aad68..55ec26778f8 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -143,7 +143,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
int lmm_size, lmm_magic;
int i;
int cplen = 0;
- ENTRY;
if (lsm) {
lmm_magic = lsm->lsm_magic;
@@ -159,7 +158,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
(lmm_magic != LOV_MAGIC_V3)) {
CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
- RETURN(-EINVAL);
+ return -EINVAL;
}
@@ -168,10 +167,12 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
* to the actual number of OSTs in this filesystem. */
if (!lmmp) {
stripe_count = lov_get_stripecnt(lov, lmm_magic,
- lsm->lsm_stripe_count);
+ lsm->lsm_stripe_count);
lsm->lsm_stripe_count = stripe_count;
- } else {
+ } else if (!lsm_is_released(lsm)) {
stripe_count = lsm->lsm_stripe_count;
+ } else {
+ stripe_count = 0;
}
} else {
/* No need to allocate more than maximum supported stripes.
@@ -188,20 +189,20 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
if (!lmmp)
- RETURN(lmm_size);
+ return lmm_size;
if (*lmmp && !lsm) {
stripe_count = le16_to_cpu((*lmmp)->lmm_stripe_count);
lmm_size = lov_mds_md_size(stripe_count, lmm_magic);
OBD_FREE_LARGE(*lmmp, lmm_size);
*lmmp = NULL;
- RETURN(0);
+ return 0;
}
if (!*lmmp) {
OBD_ALLOC_LARGE(*lmmp, lmm_size);
if (!*lmmp)
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
CDEBUG(D_INFO, "lov_packmd: LOV_MAGIC 0x%08X, lmm_size = %d \n",
@@ -215,7 +216,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
lmmv1->lmm_magic = cpu_to_le32(LOV_MAGIC_V1);
if (!lsm)
- RETURN(lmm_size);
+ return lmm_size;
/* lmmv1 and lmmv3 point to the same struct and have the
* same first fields
@@ -229,7 +230,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
cplen = strlcpy(lmmv3->lmm_pool_name, lsm->lsm_pool_name,
sizeof(lmmv3->lmm_pool_name));
if (cplen >= sizeof(lmmv3->lmm_pool_name))
- RETURN(-E2BIG);
+ return -E2BIG;
lmm_objects = lmmv3->lmm_objects;
} else {
lmm_objects = lmmv1->lmm_objects;
@@ -246,7 +247,7 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
lmm_objects[i].l_ost_idx = cpu_to_le32(loi->loi_ost_idx);
}
- RETURN(lmm_size);
+ return lmm_size;
}
/* Find the max stripecount we should use */
@@ -307,14 +308,13 @@ int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
int pattern, int magic)
{
int i, lsm_size;
- ENTRY;
CDEBUG(D_INFO, "alloc lsm, stripe_count %d\n", stripe_count);
*lsmp = lsm_alloc_plain(stripe_count, &lsm_size);
if (!*lsmp) {
CERROR("can't allocate lsmp stripe_count %d\n", stripe_count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
atomic_set(&(*lsmp)->lsm_refc, 1);
@@ -325,12 +325,13 @@ int lov_alloc_memmd(struct lov_stripe_md **lsmp, __u16 stripe_count,
(*lsmp)->lsm_pattern = pattern;
(*lsmp)->lsm_pool_name[0] = '\0';
(*lsmp)->lsm_layout_gen = 0;
- (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
+ if (stripe_count > 0)
+ (*lsmp)->lsm_oinfo[0]->loi_ost_idx = ~0;
for (i = 0; i < stripe_count; i++)
loi_init((*lsmp)->lsm_oinfo[i]);
- RETURN(lsm_size);
+ return lsm_size;
}
int lov_free_memmd(struct lov_stripe_md **lsmp)
@@ -359,13 +360,13 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
int rc = 0, lsm_size;
__u16 stripe_count;
__u32 magic;
- ENTRY;
+ __u32 pattern;
/* If passed an MDS struct use values from there, otherwise defaults */
if (lmm) {
rc = lov_verify_lmm(lmm, lmm_bytes, &stripe_count);
if (rc)
- RETURN(rc);
+ return rc;
magic = le32_to_cpu(lmm->lmm_magic);
} else {
magic = LOV_MAGIC;
@@ -376,31 +377,31 @@ int lov_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
if (!lsmp) {
/* XXX LOV STACKING call into osc for sizes */
LBUG();
- RETURN(lov_stripe_md_size(stripe_count));
+ return lov_stripe_md_size(stripe_count);
}
/* If we are passed an allocated struct but nothing to unpack, free */
if (*lsmp && !lmm) {
lov_free_memmd(lsmp);
- RETURN(0);
+ return 0;
}
- lsm_size = lov_alloc_memmd(lsmp, stripe_count, LOV_PATTERN_RAID0,
- magic);
+ pattern = le32_to_cpu(lmm->lmm_pattern);
+ lsm_size = lov_alloc_memmd(lsmp, stripe_count, pattern, magic);
if (lsm_size < 0)
- RETURN(lsm_size);
+ return lsm_size;
/* If we are passed a pointer but nothing to unpack, we only alloc */
if (!lmm)
- RETURN(lsm_size);
+ return lsm_size;
LASSERT(lsm_op_find(magic) != NULL);
rc = lsm_op_find(magic)->lsm_unpackmd(lov, *lsmp, lmm);
if (rc) {
lov_free_memmd(lsmp);
- RETURN(rc);
+ return rc;
}
- RETURN(lsm_size);
+ return lsm_size;
}
static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
@@ -416,11 +417,10 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
__u16 stripe_count;
int rc;
int cplen = 0;
- ENTRY;
rc = lov_lum_swab_if_needed(lumv3, &lmm_magic, lump);
if (rc)
- RETURN(rc);
+ return rc;
/* in the rest of the tests, as *lumv1 and lumv3 have the same
* fields, we use lumv1 to avoid code duplication */
@@ -430,10 +430,10 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
lov->desc.ld_pattern : LOV_PATTERN_RAID0;
}
- if (lumv1->lmm_pattern != LOV_PATTERN_RAID0) {
+ if (lov_pattern(lumv1->lmm_pattern) != LOV_PATTERN_RAID0) {
CDEBUG(D_IOCTL, "bad userland stripe pattern: %#x\n",
lumv1->lmm_pattern);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* 64kB is the largest common page size we see (ia64), and matches the
@@ -449,7 +449,7 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
(typeof(lumv1->lmm_stripe_offset))(-1))) {
CDEBUG(D_IOCTL, "stripe offset %u > number of OSTs %u\n",
lumv1->lmm_stripe_offset, lov->desc.ld_tgt_count);
- RETURN(-EINVAL);
+ return -EINVAL;
}
stripe_count = lov_get_stripecnt(lov, lmm_magic,
lumv1->lmm_stripe_count);
@@ -479,7 +479,7 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
lumv3->lmm_stripe_offset, pool);
if (rc < 0) {
lov_pool_putref(pool);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
@@ -490,6 +490,9 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
}
}
+ if (lumv1->lmm_pattern & LOV_PATTERN_F_RELEASED)
+ stripe_count = 0;
+
rc = lov_alloc_memmd(lsmp, stripe_count, lumv1->lmm_pattern, lmm_magic);
if (rc >= 0) {
@@ -505,7 +508,7 @@ static int __lov_setstripe(struct obd_export *exp, int max_lmm_size,
rc = 0;
}
- RETURN(rc);
+ return rc;
}
/* Configure object striping information on a new file.
@@ -526,7 +529,7 @@ int lov_setstripe(struct obd_export *exp, int max_lmm_size,
rc = __lov_setstripe(exp, max_lmm_size, lsmp, lump);
set_fs(seg);
- RETURN(rc);
+ return rc;
}
int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
@@ -539,8 +542,6 @@ int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
obd_id last_id = 0;
struct lov_user_ost_data_v1 *lmm_objects;
- ENTRY;
-
if (lump->lmm_magic == LOV_USER_MAGIC_V3)
lmm_objects = ((struct lov_user_md_v3 *)lump)->lmm_objects;
else
@@ -552,26 +553,26 @@ int lov_setea(struct obd_export *exp, struct lov_stripe_md **lsmp,
rc = obd_get_info(NULL, oexp, sizeof(KEY_LAST_ID), KEY_LAST_ID,
&len, &last_id, NULL);
if (rc)
- RETURN(rc);
+ return rc;
if (ostid_id(&lmm_objects[i].l_ost_oi) > last_id) {
CERROR("Setting EA for object > than last id on"
" ost idx %d "DOSTID" > "LPD64" \n",
lmm_objects[i].l_ost_idx,
POSTID(&lmm_objects[i].l_ost_oi), last_id);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
rc = lov_setstripe(exp, 0, lsmp, lump);
if (rc)
- RETURN(rc);
+ return rc;
for (i = 0; i < lump->lmm_stripe_count; i++) {
(*lsmp)->lsm_oinfo[i]->loi_ost_idx =
lmm_objects[i].l_ost_idx;
(*lsmp)->lsm_oinfo[i]->loi_oi = lmm_objects[i].l_ost_oi;
}
- RETURN(0);
+ return 0;
}
@@ -593,10 +594,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
int rc, lmm_size;
int lum_size;
mm_segment_t seg;
- ENTRY;
if (!lsm)
- RETURN(-ENODATA);
+ return -ENODATA;
/*
* "Switch to kernel segment" to allow copying from kernel space by
@@ -674,5 +674,5 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
obd_free_diskmd(exp, &lmmk);
out_set:
set_fs(seg);
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index 65790d68472..674e61781c2 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -69,7 +69,6 @@ static void lov_page_fini(const struct lu_env *env,
struct cl_page *sub = lov_sub_page(slice);
LINVRNT(lov_page_invariant(slice));
- ENTRY;
if (sub != NULL) {
LASSERT(sub->cp_state == CPS_FREEING);
@@ -78,7 +77,6 @@ static void lov_page_fini(const struct lu_env *env,
slice->cpl_page->cp_child = NULL;
cl_page_put(env, sub);
}
- EXIT;
}
static int lov_page_own(const struct lu_env *env,
@@ -90,7 +88,6 @@ static int lov_page_own(const struct lu_env *env,
LINVRNT(lov_page_invariant(slice));
LINVRNT(!cl2lov_page(slice)->lps_invalid);
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
@@ -98,7 +95,7 @@ static int lov_page_own(const struct lu_env *env,
lov_sub_put(sub);
} else
LBUG(); /* Arrgh */
- RETURN(0);
+ return 0;
}
static void lov_page_assume(const struct lu_env *env,
@@ -117,7 +114,6 @@ static int lov_page_cache_add(const struct lu_env *env,
LINVRNT(lov_page_invariant(slice));
LINVRNT(!cl2lov_page(slice)->lps_invalid);
- ENTRY;
sub = lov_page_subio(env, lio, slice);
if (!IS_ERR(sub)) {
@@ -128,7 +124,7 @@ static int lov_page_cache_add(const struct lu_env *env,
rc = PTR_ERR(sub);
CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
}
- RETURN(rc);
+ return rc;
}
static int lov_page_print(const struct lu_env *env,
@@ -172,7 +168,6 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
obd_off suboff;
int stripe;
int rc;
- ENTRY;
offset = cl_offset(obj, page->cp_index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
@@ -205,7 +200,6 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
LASSERT(0);
}
- EXIT;
out:
return rc;
}
@@ -221,14 +215,13 @@ int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
- ENTRY;
cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
addr = kmap(vmpage);
memset(addr, 0, cl_page_size(obj));
kunmap(vmpage);
cl_page_export(env, page, 1);
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index a96f90880c6..dd3c07d5c4d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -68,7 +68,6 @@ void lov_pool_putref(struct pool_desc *pool)
lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
OBD_FREE_PTR(pool);
- EXIT;
}
}
@@ -322,8 +321,6 @@ void lov_dump_pool(int level, struct pool_desc *pool)
#define LOV_POOL_INIT_COUNT 2
int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
{
- ENTRY;
-
if (count == 0)
count = LOV_POOL_INIT_COUNT;
op->op_array = NULL;
@@ -333,9 +330,8 @@ int lov_ost_pool_init(struct ost_pool *op, unsigned int count)
OBD_ALLOC(op->op_array, op->op_size * sizeof(op->op_array[0]));
if (op->op_array == NULL) {
op->op_size = 0;
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
- EXIT;
return 0;
}
@@ -366,7 +362,6 @@ int lov_ost_pool_extend(struct ost_pool *op, unsigned int min_count)
int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
{
int rc = 0, i;
- ENTRY;
down_write(&op->op_rw_sem);
@@ -382,7 +377,6 @@ int lov_ost_pool_add(struct ost_pool *op, __u32 idx, unsigned int min_count)
/* ost not found we add it */
op->op_array[op->op_count] = idx;
op->op_count++;
- EXIT;
out:
up_write(&op->op_rw_sem);
return rc;
@@ -391,7 +385,6 @@ out:
int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
{
int i;
- ENTRY;
down_write(&op->op_rw_sem);
@@ -401,21 +394,18 @@ int lov_ost_pool_remove(struct ost_pool *op, __u32 idx)
(op->op_count - i - 1) * sizeof(op->op_array[0]));
op->op_count--;
up_write(&op->op_rw_sem);
- EXIT;
return 0;
}
}
up_write(&op->op_rw_sem);
- RETURN(-EINVAL);
+ return -EINVAL;
}
int lov_ost_pool_free(struct ost_pool *op)
{
- ENTRY;
-
if (op->op_size == 0)
- RETURN(0);
+ return 0;
down_write(&op->op_rw_sem);
@@ -425,7 +415,7 @@ int lov_ost_pool_free(struct ost_pool *op)
op->op_size = 0;
up_write(&op->op_rw_sem);
- RETURN(0);
+ return 0;
}
@@ -434,16 +424,15 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
struct lov_obd *lov;
struct pool_desc *new_pool;
int rc;
- ENTRY;
lov = &(obd->u.lov);
if (strlen(poolname) > LOV_MAXPOOLNAME)
- RETURN(-ENAMETOOLONG);
+ return -ENAMETOOLONG;
OBD_ALLOC_PTR(new_pool);
if (new_pool == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
strncpy(new_pool->pool_name, poolname, LOV_MAXPOOLNAME);
new_pool->pool_name[LOV_MAXPOOLNAME] = '\0';
@@ -492,7 +481,7 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
CDEBUG(D_CONFIG, LOV_POOLNAMEF" is pool #%d\n",
poolname, lov->lov_pool_count);
- RETURN(0);
+ return 0;
out_err:
spin_lock(&obd->obd_dev_lock);
@@ -513,14 +502,13 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
{
struct lov_obd *lov;
struct pool_desc *pool;
- ENTRY;
lov = &(obd->u.lov);
/* lookup and kill hash reference */
pool = cfs_hash_del_key(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
- RETURN(-ENOENT);
+ return -ENOENT;
if (pool->pool_proc_entry != NULL) {
CDEBUG(D_INFO, "proc entry %p\n", pool->pool_proc_entry);
@@ -536,7 +524,7 @@ int lov_pool_del(struct obd_device *obd, char *poolname)
/* release last reference */
lov_pool_putref(pool);
- RETURN(0);
+ return 0;
}
@@ -547,13 +535,12 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
struct pool_desc *pool;
unsigned int lov_idx;
int rc;
- ENTRY;
lov = &(obd->u.lov);
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
- RETURN(-ENOENT);
+ return -ENOENT;
obd_str2uuid(&ost_uuid, ostname);
@@ -580,7 +567,6 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
- EXIT;
out:
obd_putref(obd);
lov_pool_putref(pool);
@@ -594,13 +580,12 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
struct pool_desc *pool;
unsigned int lov_idx;
int rc = 0;
- ENTRY;
lov = &(obd->u.lov);
pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
if (pool == NULL)
- RETURN(-ENOENT);
+ return -ENOENT;
obd_str2uuid(&ost_uuid, ostname);
@@ -626,7 +611,6 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
- EXIT;
out:
obd_putref(obd);
lov_pool_putref(pool);
@@ -636,7 +620,6 @@ out:
int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
{
int i, rc;
- ENTRY;
/* caller may no have a ref on pool if it got the pool
* without calling lov_find_pool() (e.g. go through the lov pool
@@ -651,7 +634,6 @@ int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
GOTO(out, rc = 0);
}
rc = -ENOENT;
- EXIT;
out:
up_read(&pool_tgt_rw_sem(pool));
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 13f1637bc70..61e6d0b46c9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -60,7 +60,6 @@ static void lov_init_set(struct lov_request_set *set)
void lov_finish_set(struct lov_request_set *set)
{
struct list_head *pos, *n;
- ENTRY;
LASSERT(set);
list_for_each_safe(pos, n, &set->set_list) {
@@ -87,7 +86,6 @@ void lov_finish_set(struct lov_request_set *set)
lov_llh_put(set->set_lockh);
OBD_FREE(set, sizeof(*set));
- EXIT;
}
int lov_set_finished(struct lov_request_set *set, int idempotent)
@@ -122,7 +120,6 @@ int lov_update_common_set(struct lov_request_set *set,
struct lov_request *req, int rc)
{
struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
- ENTRY;
lov_update_set(set, req, rc);
@@ -132,7 +129,7 @@ int lov_update_common_set(struct lov_request_set *set,
rc = 0;
/* FIXME in raid1 regime, should return 0 */
- RETURN(rc);
+ return rc;
}
void lov_set_add_req(struct lov_request *req, struct lov_request_set *set)
@@ -232,7 +229,6 @@ int lov_update_enqueue_set(struct lov_request *req, __u32 mode, int rc)
struct lustre_handle *lov_lockhp;
struct obd_info *oi = set->set_oi;
struct lov_oinfo *loi;
- ENTRY;
LASSERT(oi != NULL);
@@ -254,7 +250,7 @@ int lov_update_enqueue_set(struct lov_request *req, __u32 mode, int rc)
req->rq_idx, &oi->oi_md->lsm_oi, rc);
lov_stripe_unlock(oi->oi_md);
lov_update_set(set, req, rc);
- RETURN(rc);
+ return rc;
}
/* The callback for osc_enqueue that updates lov info for every OSC request. */
@@ -275,11 +271,10 @@ static int enqueue_done(struct lov_request_set *set, __u32 mode)
struct lov_obd *lov = &set->set_exp->exp_obd->u.lov;
int completes = atomic_read(&set->set_completes);
int rc = 0;
- ENTRY;
/* enqueue/match success, just return */
if (completes && completes == atomic_read(&set->set_success))
- RETURN(0);
+ return 0;
/* cancel enqueued/matched locks */
list_for_each_entry(req, &set->set_list, rq_link) {
@@ -305,17 +300,16 @@ static int enqueue_done(struct lov_request_set *set, __u32 mode)
}
if (set->set_lockh)
lov_llh_put(set->set_lockh);
- RETURN(rc);
+ return rc;
}
int lov_fini_enqueue_set(struct lov_request_set *set, __u32 mode, int rc,
struct ptlrpc_request_set *rqset)
{
int ret = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
/* Do enqueue_done only for sync requests and if any request
* succeeded. */
@@ -328,7 +322,7 @@ int lov_fini_enqueue_set(struct lov_request_set *set, __u32 mode, int rc,
lov_put_reqset(set);
- RETURN(rc ? rc : ret);
+ return rc ? rc : ret;
}
static void lov_llh_addref(void *llhp)
@@ -369,11 +363,10 @@ int lov_prep_enqueue_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_obd *lov = &exp->exp_obd->u.lov;
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -445,19 +438,18 @@ int lov_prep_enqueue_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(0);
+ return 0;
out_set:
lov_fini_enqueue_set(set, einfo->ei_mode, rc, NULL);
- RETURN(rc);
+ return rc;
}
int lov_fini_match_set(struct lov_request_set *set, __u32 mode, int flags)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
rc = enqueue_done(set, mode);
if ((set->set_count == atomic_read(&set->set_success)) &&
@@ -466,7 +458,7 @@ int lov_fini_match_set(struct lov_request_set *set, __u32 mode, int flags)
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
int lov_prep_match_set(struct obd_export *exp, struct obd_info *oinfo,
@@ -477,11 +469,10 @@ int lov_prep_match_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_obd *lov = &exp->exp_obd->u.lov;
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -535,19 +526,18 @@ int lov_prep_match_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_match_set(set, mode, 0);
- RETURN(rc);
+ return rc;
}
int lov_fini_cancel_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (set->set_lockh)
@@ -555,7 +545,7 @@ int lov_fini_cancel_set(struct lov_request_set *set)
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
int lov_prep_cancel_set(struct obd_export *exp, struct obd_info *oinfo,
@@ -565,11 +555,10 @@ int lov_prep_cancel_set(struct obd_export *exp, struct obd_info *oinfo,
{
struct lov_request_set *set;
int i, rc = 0;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -617,10 +606,10 @@ int lov_prep_cancel_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_cancel_set(set);
- RETURN(rc);
+ return rc;
}
static int common_attr_done(struct lov_request_set *set)
{
@@ -628,15 +617,14 @@ static int common_attr_done(struct lov_request_set *set)
struct lov_request *req;
struct obdo *tmp_oa;
int rc = 0, attrset = 0;
- ENTRY;
LASSERT(set->set_oi != NULL);
if (set->set_oi->oi_oa == NULL)
- RETURN(0);
+ return 0;
if (!atomic_read(&set->set_success))
- RETURN(-EIO);
+ return -EIO;
OBDO_ALLOC(tmp_oa);
if (tmp_oa == NULL)
@@ -670,7 +658,7 @@ static int common_attr_done(struct lov_request_set *set)
out:
if (tmp_oa)
OBDO_FREE(tmp_oa);
- RETURN(rc);
+ return rc;
}
@@ -680,7 +668,6 @@ static int brw_done(struct lov_request_set *set)
struct lov_oinfo *loi = NULL;
struct list_head *pos;
struct lov_request *req;
- ENTRY;
list_for_each (pos, &set->set_list) {
req = list_entry(pos, struct lov_request, rq_link);
@@ -694,16 +681,15 @@ static int brw_done(struct lov_request_set *set)
loi->loi_lvb.lvb_blocks = req->rq_oi.oi_oa->o_blocks;
}
- RETURN(0);
+ return 0;
}
int lov_fini_brw_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
rc = brw_done(set);
@@ -711,7 +697,7 @@ int lov_fini_brw_set(struct lov_request_set *set)
}
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
int lov_prep_brw_set(struct obd_export *exp, struct obd_info *oinfo,
@@ -727,11 +713,10 @@ int lov_prep_brw_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i, shift;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -832,23 +817,22 @@ out:
else
lov_fini_brw_set(set);
- RETURN(rc);
+ return rc;
}
int lov_fini_getattr_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes))
rc = common_attr_done(set);
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
/* The callback for osc_getattr_async that finilizes a request info when a
@@ -867,11 +851,10 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -913,18 +896,16 @@ int lov_prep_getattr_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_getattr_set(set);
- RETURN(rc);
+ return rc;
}
int lov_fini_destroy_set(struct lov_request_set *set)
{
- ENTRY;
-
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
/* FIXME update qos data here */
@@ -932,7 +913,7 @@ int lov_fini_destroy_set(struct lov_request_set *set)
lov_put_reqset(set);
- RETURN(0);
+ return 0;
}
int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
@@ -943,11 +924,10 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -987,19 +967,18 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_destroy_set(set);
- RETURN(rc);
+ return rc;
}
int lov_fini_setattr_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
rc = common_attr_done(set);
@@ -1007,7 +986,7 @@ int lov_fini_setattr_set(struct lov_request_set *set)
}
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
int lov_update_setattr_set(struct lov_request_set *set,
@@ -1015,7 +994,6 @@ int lov_update_setattr_set(struct lov_request_set *set,
{
struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
- ENTRY;
lov_update_set(set, req, rc);
@@ -1036,7 +1014,7 @@ int lov_update_setattr_set(struct lov_request_set *set,
req->rq_oi.oi_oa->o_atime;
}
- RETURN(rc);
+ return rc;
}
/* The callback for osc_setattr_async that finilizes a request info when a
@@ -1056,11 +1034,10 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -1113,19 +1090,18 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_setattr_set(set);
- RETURN(rc);
+ return rc;
}
int lov_fini_punch_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
rc = -EIO;
@@ -1136,7 +1112,7 @@ int lov_fini_punch_set(struct lov_request_set *set)
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
int lov_update_punch_set(struct lov_request_set *set,
@@ -1144,7 +1120,6 @@ int lov_update_punch_set(struct lov_request_set *set,
{
struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov;
struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md;
- ENTRY;
lov_update_set(set, req, rc);
@@ -1162,7 +1137,7 @@ int lov_update_punch_set(struct lov_request_set *set,
lov_stripe_unlock(lsm);
}
- RETURN(rc);
+ return rc;
}
/* The callback for osc_punch that finilizes a request info when a response
@@ -1182,11 +1157,10 @@ int lov_prep_punch_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_oi = oinfo;
@@ -1238,19 +1212,18 @@ int lov_prep_punch_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_punch_set(set);
- RETURN(rc);
+ return rc;
}
int lov_fini_sync_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
LASSERT(set->set_exp);
if (atomic_read(&set->set_completes)) {
if (!atomic_read(&set->set_success))
@@ -1260,7 +1233,7 @@ int lov_fini_sync_set(struct lov_request_set *set)
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
/* The callback for osc_sync that finilizes a request info when a
@@ -1281,11 +1254,10 @@ int lov_prep_sync_set(struct obd_export *exp, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &exp->exp_obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC_PTR(set);
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_exp = exp;
@@ -1330,10 +1302,10 @@ int lov_prep_sync_set(struct obd_export *exp, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_sync_set(set);
- RETURN(rc);
+ return rc;
}
#define LOV_U64_MAX ((__u64)~0ULL)
@@ -1347,8 +1319,6 @@ out_set:
int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
{
- ENTRY;
-
if (success) {
__u32 expected_stripes = lov_get_stripecnt(&obd->u.lov,
LOV_MAGIC, 0);
@@ -1361,26 +1331,25 @@ int lov_fini_statfs(struct obd_device *obd, struct obd_statfs *osfs,int success)
memcpy(&obd->obd_osfs, osfs, sizeof(*osfs));
obd->obd_osfs_age = cfs_time_current_64();
spin_unlock(&obd->obd_osfs_lock);
- RETURN(0);
+ return 0;
}
- RETURN(-EIO);
+ return -EIO;
}
int lov_fini_statfs_set(struct lov_request_set *set)
{
int rc = 0;
- ENTRY;
if (set == NULL)
- RETURN(0);
+ return 0;
if (atomic_read(&set->set_completes)) {
rc = lov_fini_statfs(set->set_obd, set->set_oi->oi_osfs,
atomic_read(&set->set_success));
}
lov_put_reqset(set);
- RETURN(rc);
+ return rc;
}
void lov_update_statfs(struct obd_statfs *osfs, struct obd_statfs *lov_sfs,
@@ -1450,7 +1419,6 @@ static int cb_statfs_update(void *cookie, int rc)
struct lov_tgt_desc *tgt;
struct obd_device *lovobd, *tgtobd;
int success;
- ENTRY;
lovreq = container_of(oinfo, struct lov_request, rq_oi);
set = lovreq->rq_rqset;
@@ -1488,7 +1456,7 @@ out:
atomic_read(&set->set_success));
}
- RETURN(0);
+ return 0;
}
int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
@@ -1497,11 +1465,10 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request_set *set;
struct lov_obd *lov = &obd->u.lov;
int rc = 0, i;
- ENTRY;
OBD_ALLOC(set, sizeof(*set));
if (set == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lov_init_set(set);
set->set_obd = obd;
@@ -1544,8 +1511,8 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
if (!set->set_count)
GOTO(out_set, rc = -EIO);
*reqset = set;
- RETURN(rc);
+ return rc;
out_set:
lov_fini_statfs_set(set);
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index 204ecd0b863..998ea1cbc7b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -55,10 +55,8 @@ static void lovsub_req_completion(const struct lu_env *env,
{
struct lovsub_req *lsr;
- ENTRY;
lsr = cl2lovsub_req(slice);
OBD_SLAB_FREE_PTR(lsr, lovsub_req_kmem);
- EXIT;
}
/**
@@ -73,14 +71,12 @@ static void lovsub_req_attr_set(const struct lu_env *env,
{
struct lovsub_object *subobj;
- ENTRY;
subobj = cl2lovsub(obj);
/*
* There is no OBD_MD_* flag for obdo::o_stripe_idx, so set it
* unconditionally. It never changes anyway.
*/
attr->cra_oa->o_stripe_idx = subobj->lso_index;
- EXIT;
}
static const struct cl_req_operations lovsub_req_ops = {
@@ -101,20 +97,19 @@ static int lovsub_device_init(const struct lu_env *env, struct lu_device *d,
struct lu_device_type *ldt;
int rc;
- ENTRY;
next->ld_site = d->ld_site;
ldt = next->ld_type;
LASSERT(ldt != NULL);
rc = ldt->ldt_ops->ldto_device_init(env, next, ldt->ldt_name, NULL);
if (rc) {
next->ld_site = NULL;
- RETURN(rc);
+ return rc;
}
lu_device_get(next);
lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
lsd->acid_next = lu2cl_dev(next);
- RETURN(rc);
+ return rc;
}
static struct lu_device *lovsub_device_fini(const struct lu_env *env,
@@ -123,12 +118,11 @@ static struct lu_device *lovsub_device_fini(const struct lu_env *env,
struct lu_device *next;
struct lovsub_device *lsd;
- ENTRY;
lsd = lu2lovsub_dev(d);
next = cl2lu_dev(lsd->acid_next);
lsd->acid_super = NULL;
lsd->acid_next = NULL;
- RETURN(next);
+ return next;
}
static struct lu_device *lovsub_device_free(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 03bab17ccc6..80305aa6171 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -57,35 +57,29 @@ static void lovsub_lock_fini(const struct lu_env *env,
{
struct lovsub_lock *lsl;
- ENTRY;
lsl = cl2lovsub_lock(slice);
LASSERT(list_empty(&lsl->lss_parents));
OBD_SLAB_FREE_PTR(lsl, lovsub_lock_kmem);
- EXIT;
}
static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
{
struct cl_lock *parent;
- ENTRY;
parent = lov->lls_cl.cls_lock;
cl_lock_get(parent);
lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
cl_lock_mutex_get(env, parent);
- EXIT;
}
static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
{
struct cl_lock *parent;
- ENTRY;
parent = lov->lls_cl.cls_lock;
cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
cl_lock_put(env, parent);
- EXIT;
}
/**
@@ -101,7 +95,6 @@ static void lovsub_lock_state(const struct lu_env *env,
struct lov_lock_link *scan;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- ENTRY;
list_for_each_entry(scan, &sub->lss_parents, lll_list) {
struct lov_lock *lov = scan->lll_super;
@@ -113,7 +106,6 @@ static void lovsub_lock_state(const struct lu_env *env,
lovsub_parent_unlock(env, lov);
}
}
- EXIT;
}
/**
@@ -127,8 +119,6 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env,
struct lov_lock *lov;
unsigned long dumbbell;
- ENTRY;
-
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
if (!list_empty(&lock->lss_parents)) {
@@ -146,7 +136,7 @@ static unsigned long lovsub_lock_weigh(const struct lu_env *env,
} else
dumbbell = 0;
- RETURN(dumbbell);
+ return dumbbell;
}
/**
@@ -162,7 +152,6 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
pgoff_t start;
pgoff_t end;
- ENTRY;
start = in->cld_start;
end = in->cld_end;
@@ -184,7 +173,6 @@ static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
}
out->cld_start = start;
out->cld_end = end;
- EXIT;
}
/**
@@ -241,8 +229,6 @@ static int lovsub_lock_modify(const struct lu_env *env,
struct lov_lock *lov;
int result = 0;
- ENTRY;
-
LASSERT(cl_lock_mode_match(d->cld_mode,
s->cls_lock->cll_descr.cld_mode));
list_for_each_entry(scan, &lock->lss_parents, lll_list) {
@@ -254,7 +240,7 @@ static int lovsub_lock_modify(const struct lu_env *env,
lovsub_parent_unlock(env, lov);
result = result ?: rc;
}
- RETURN(result);
+ return result;
}
static int lovsub_lock_closure(const struct lu_env *env,
@@ -267,7 +253,6 @@ static int lovsub_lock_closure(const struct lu_env *env,
int result;
LASSERT(cl_lock_is_mutexed(slice->cls_lock));
- ENTRY;
sub = cl2lovsub_lock(slice);
result = 0;
@@ -278,7 +263,7 @@ static int lovsub_lock_closure(const struct lu_env *env,
if (result != 0)
break;
}
- RETURN(result);
+ return result;
}
/**
@@ -290,11 +275,10 @@ static int lovsub_lock_delete_one(const struct lu_env *env,
{
struct cl_lock *parent;
int result;
- ENTRY;
parent = lov->lls_cl.cls_lock;
if (parent->cll_error)
- RETURN(0);
+ return 0;
result = 0;
switch (parent->cll_state) {
@@ -386,7 +370,7 @@ static int lovsub_lock_delete_one(const struct lu_env *env,
break;
}
- RETURN(result);
+ return result;
}
/**
@@ -403,7 +387,6 @@ static void lovsub_lock_delete(const struct lu_env *env,
LASSERT(cl_lock_is_mutexed(child));
- ENTRY;
/*
* Destruction of a sub-lock might take multiple iterations, because
* when the last sub-lock of a given top-lock is deleted, top-lock is
@@ -434,7 +417,6 @@ static void lovsub_lock_delete(const struct lu_env *env,
}
}
} while (restart);
- EXIT;
}
static int lovsub_lock_print(const struct lu_env *env, void *cookie,
@@ -471,7 +453,6 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
struct lovsub_lock *lsk;
int result;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lsk, lovsub_lock_kmem, __GFP_IO);
if (lsk != NULL) {
INIT_LIST_HEAD(&lsk->lss_parents);
@@ -479,7 +460,7 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
result = 0;
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 1b83d9081c4..89760b3bf3f 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -61,7 +61,6 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
int result;
- ENTRY;
under = &dev->acid_next->cd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
if (below != NULL) {
@@ -70,7 +69,7 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
result = 0;
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
@@ -78,7 +77,6 @@ static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct lovsub_object *los = lu2lovsub(obj);
struct lov_object *lov = los->lso_super;
- ENTRY;
/* We can't assume lov was assigned here, because of the shadow
* object handling in lu_object_find.
@@ -94,7 +92,6 @@ static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
lu_object_fini(obj);
lu_object_header_fini(&los->lso_header.coh_lu);
OBD_SLAB_FREE_PTR(los, lovsub_object_kmem);
- EXIT;
}
static int lovsub_object_print(const struct lu_env *env, void *cookie,
@@ -110,9 +107,8 @@ static int lovsub_attr_set(const struct lu_env *env, struct cl_object *obj,
{
struct lov_object *lov = cl2lovsub(obj)->lso_super;
- ENTRY;
lov_r0(lov)->lo_attr_valid = 0;
- RETURN(0);
+ return 0;
}
static int lovsub_object_glimpse(const struct lu_env *env,
@@ -121,8 +117,7 @@ static int lovsub_object_glimpse(const struct lu_env *env,
{
struct lovsub_object *los = cl2lovsub(obj);
- ENTRY;
- RETURN(cl_object_glimpse(env, &los->lso_super->lo_cl, lvb));
+ return cl_object_glimpse(env, &los->lso_super->lo_cl, lvb);
}
@@ -150,7 +145,6 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
struct lovsub_object *los;
struct lu_object *obj;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(los, lovsub_object_kmem, __GFP_IO);
if (los != NULL) {
struct cl_object_header *hdr;
@@ -164,7 +158,7 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
obj->lo_ops = &lovsub_lu_obj_ops;
} else
obj = NULL;
- RETURN(obj);
+ return obj;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index bc9e683968d..3f00ce9677b 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -63,10 +63,9 @@ int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, struct page *unused)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
- ENTRY;
cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
- RETURN(0);
+ return 0;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lproc_lov.c b/drivers/staging/lustre/lustre/lov/lproc_lov.c
index 5b2c0d88add..15744e13a3f 100644
--- a/drivers/staging/lustre/lustre/lov/lproc_lov.c
+++ b/drivers/staging/lustre/lustre/lov/lproc_lov.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include <linux/version.h>
#include <asm/statfs.h>
#include <lprocfs_status.h>
#include <obd_class.h>
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt.c b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
index 064445cbdb5..e86df7356cb 100644
--- a/drivers/staging/lustre/lustre/lvfs/fsfilt.c
+++ b/drivers/staging/lustre/lustre/lvfs/fsfilt.c
@@ -35,7 +35,6 @@
#define DEBUG_SUBSYSTEM S_FILTER
#include <linux/fs.h>
-#include <linux/jbd.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/slab.h>
@@ -68,7 +67,7 @@ int fsfilt_register_ops(struct fsfilt_operations *fs_ops)
CERROR("different operations for type %s\n",
fs_ops->fs_type);
/* unlock fsfilt_types list */
- RETURN(-EEXIST);
+ return -EEXIST;
}
} else {
try_module_get(THIS_MODULE);
@@ -120,7 +119,7 @@ struct fsfilt_operations *fsfilt_get_ops(const char *type)
if (rc) {
CERROR("Can't find %s interface\n", name);
- RETURN(ERR_PTR(rc < 0 ? rc : -rc));
+ return ERR_PTR(rc < 0 ? rc : -rc);
/* unlock fsfilt_types list */
}
}
diff --git a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
index c1e99b37572..ee75994003e 100644
--- a/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
+++ b/drivers/staging/lustre/lustre/lvfs/fsfilt_ext3.c
@@ -48,7 +48,6 @@
#include <ldiskfs/ldiskfs_config.h>
#include <ext4/ext4.h>
#include <ext4/ext4_jbd2.h>
-#include <linux/version.h>
#include <linux/bitops.h>
#include <linux/quota.h>
diff --git a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
index e70d8fe9988..18e1b47a1d6 100644
--- a/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
+++ b/drivers/staging/lustre/lustre/lvfs/lvfs_linux.c
@@ -40,16 +40,12 @@
#define DEBUG_SUBSYSTEM S_FILTER
-#include <linux/version.h>
#include <linux/fs.h>
#include <asm/unistd.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
-#include <linux/version.h>
#include <linux/libcfs/libcfs.h>
-#include <lustre_fsfilt.h>
-#include <obd.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/lustre_compat25.h>
@@ -207,7 +203,6 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
{
struct dentry *dchild_old, *dchild_new;
int err = 0;
- ENTRY;
ASSERT_KERNEL_CTXT("kernel doing rename outside kernel context\n");
CDEBUG(D_INODE, "renaming file %.*s to %.*s\n",
@@ -215,7 +210,7 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
dchild_old = ll_lookup_one_len(oldname, dir, strlen(oldname));
if (IS_ERR(dchild_old))
- RETURN(PTR_ERR(dchild_old));
+ return PTR_ERR(dchild_old);
if (!dchild_old->d_inode)
GOTO(put_old, err = -ENOENT);
@@ -230,7 +225,7 @@ int lustre_rename(struct dentry *dir, struct vfsmount *mnt,
dput(dchild_new);
put_old:
dput(dchild_old);
- RETURN(err);
+ return err;
}
EXPORT_SYMBOL(lustre_rename);
@@ -242,7 +237,7 @@ struct l_file *l_dentry_open(struct lvfs_run_ctxt *ctxt, struct l_dentry *de,
.dentry = de,
.mnt = ctxt->pwdmnt,
};
- return ll_dentry_open(&path, flags, current_cred());
+ return dentry_open(&path, flags, current_cred());
}
EXPORT_SYMBOL(l_dentry_open);
@@ -255,7 +250,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
__s64 ret = 0;
if (lc == NULL || header == NULL)
- RETURN(0);
+ return 0;
switch (field) {
case LPROCFS_FIELDS_FLAGS_CONFIG:
@@ -285,7 +280,7 @@ __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
break;
};
- RETURN(ret);
+ return ret;
}
EXPORT_SYMBOL(lprocfs_read_helper);
#endif /* LPROCFS */
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 6592478e9b6..e0b8f186625 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include <linux/version.h>
#include <linux/vfs.h>
#include <obd_class.h>
#include <lprocfs_status.h>
@@ -93,14 +92,13 @@ static ssize_t mdc_kuc_write(struct file *file, const char *buffer,
struct hsm_action_item *hai;
int len;
int fd, rc;
- ENTRY;
rc = lprocfs_write_helper(buffer, count, &fd);
if (rc)
- RETURN(rc);
+ return rc;
if (fd < 0)
- RETURN(-ERANGE);
+ return -ERANGE;
CWARN("message to fd %d\n", fd);
len = sizeof(*lh) + sizeof(*hal) + MTI_NAME_MAXLEN +
@@ -141,8 +139,8 @@ static ssize_t mdc_kuc_write(struct file *file, const char *buffer,
}
OBD_FREE(lh, len);
if (rc < 0)
- RETURN(rc);
- RETURN(count);
+ return rc;
+ return count;
}
struct file_operations mdc_kuc_fops = {
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index e789aed98de..b2de4780367 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -45,10 +45,10 @@ static void __mdc_pack_body(struct mdt_body *b, __u32 suppgid)
LASSERT (b != NULL);
b->suppgid = suppgid;
- b->uid = current_uid();
- b->gid = current_gid();
- b->fsuid = current_fsuid();
- b->fsgid = current_fsgid();
+ b->uid = from_kuid(&init_user_ns, current_uid());
+ b->gid = from_kgid(&init_user_ns, current_gid());
+ b->fsuid = from_kuid(&init_user_ns, current_fsuid());
+ b->fsgid = from_kgid(&init_user_ns, current_fsgid());
b->capability = cfs_curproc_cap_pack();
}
@@ -219,8 +219,8 @@ void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
/* XXX do something about time, uid, gid */
rec->cr_opcode = REINT_OPEN;
- rec->cr_fsuid = current_fsuid();
- rec->cr_fsgid = current_fsgid();
+ rec->cr_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->cr_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->cr_cap = cfs_curproc_cap_pack();
if (op_data != NULL) {
rec->cr_fid1 = op_data->op_fid1;
@@ -299,16 +299,16 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
struct md_op_data *op_data)
{
rec->sa_opcode = REINT_SETATTR;
- rec->sa_fsuid = current_fsuid();
- rec->sa_fsgid = current_fsgid();
+ rec->sa_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->sa_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->sa_cap = cfs_curproc_cap_pack();
rec->sa_suppgid = -1;
rec->sa_fid = op_data->op_fid1;
rec->sa_valid = attr_pack(op_data->op_attr.ia_valid);
rec->sa_mode = op_data->op_attr.ia_mode;
- rec->sa_uid = op_data->op_attr.ia_uid;
- rec->sa_gid = op_data->op_attr.ia_gid;
+ rec->sa_uid = from_kuid(&init_user_ns, op_data->op_attr.ia_uid);
+ rec->sa_gid = from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
rec->sa_size = op_data->op_attr.ia_size;
rec->sa_blocks = op_data->op_attr_blocks;
rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
@@ -316,8 +316,9 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
rec->sa_attr_flags = ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
if ((op_data->op_attr.ia_valid & ATTR_GID) &&
- current_is_in_group(op_data->op_attr.ia_gid))
- rec->sa_suppgid = op_data->op_attr.ia_gid;
+ in_group_p(op_data->op_attr.ia_gid))
+ rec->sa_suppgid =
+ from_kgid(&init_user_ns, op_data->op_attr.ia_gid);
else
rec->sa_suppgid = op_data->op_suppgids[0];
@@ -504,11 +505,11 @@ void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data)
static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- ENTRY;
+
client_obd_list_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
+ return rc;
};
/* We record requests in flight in cli->cl_r_in_flight here.
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 1cc90b635fb..fb5a9959bf7 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -115,13 +115,12 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
{
struct ldlm_lock *lock;
struct inode *new_inode = data;
- ENTRY;
if(bits)
*bits = 0;
if (!*lockh)
- RETURN(0);
+ return 0;
lock = ldlm_handle2lock((struct lustre_handle *)lockh);
@@ -144,7 +143,7 @@ int mdc_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
unlock_res_and_lock(lock);
LDLM_LOCK_PUT(lock);
- RETURN(0);
+ return 0;
}
ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
@@ -154,12 +153,11 @@ ldlm_mode_t mdc_lock_match(struct obd_export *exp, __u64 flags,
{
struct ldlm_res_id res_id;
ldlm_mode_t rc;
- ENTRY;
fid_build_reg_res_name(fid, &res_id);
rc = ldlm_lock_match(class_exp2obd(exp)->obd_namespace, flags,
&res_id, type, policy, mode, lockh, 0);
- RETURN(rc);
+ return rc;
}
int mdc_cancel_unused(struct obd_export *exp,
@@ -173,12 +171,10 @@ int mdc_cancel_unused(struct obd_export *exp,
struct obd_device *obd = class_exp2obd(exp);
int rc;
- ENTRY;
-
fid_build_reg_res_name(fid, &res_id);
rc = ldlm_cli_cancel_unused_resource(obd->obd_namespace, &res_id,
policy, mode, flags, opaque);
- RETURN(rc);
+ return rc;
}
int mdc_null_inode(struct obd_export *exp,
@@ -187,7 +183,6 @@ int mdc_null_inode(struct obd_export *exp,
struct ldlm_res_id res_id;
struct ldlm_resource *res;
struct ldlm_namespace *ns = class_exp2obd(exp)->obd_namespace;
- ENTRY;
LASSERTF(ns != NULL, "no namespace passed\n");
@@ -195,14 +190,14 @@ int mdc_null_inode(struct obd_export *exp,
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if(res == NULL)
- RETURN(0);
+ return 0;
lock_res(res);
res->lr_lvb_inode = NULL;
unlock_res(res);
ldlm_resource_putref(res);
- RETURN(0);
+ return 0;
}
/* find any ldlm lock of the inode in mdc
@@ -215,16 +210,15 @@ int mdc_find_cbdata(struct obd_export *exp,
{
struct ldlm_res_id res_id;
int rc = 0;
- ENTRY;
fid_build_reg_res_name((struct lu_fid*)fid, &res_id);
rc = ldlm_resource_iterate(class_exp2obd(exp)->obd_namespace, &res_id,
it, data);
if (rc == LDLM_ITER_STOP)
- RETURN(1);
+ return 1;
else if (rc == LDLM_ITER_CONTINUE)
- RETURN(0);
- RETURN(rc);
+ return 0;
+ return rc;
}
static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc)
@@ -281,7 +275,6 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
int count = 0;
int mode;
int rc;
- ENTRY;
it->it_create_mode = (it->it_create_mode & ~S_IFMT) | S_IFREG;
@@ -314,7 +307,7 @@ static struct ptlrpc_request *mdc_intent_open_pack(struct obd_export *exp,
&RQF_LDLM_INTENT_OPEN);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
}
/* parent capability */
@@ -362,12 +355,11 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
struct obd_device *obddev = class_exp2obd(exp);
struct ldlm_intent *lit;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_UNLINK);
if (req == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -376,7 +368,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
/* pack the intent */
@@ -391,7 +383,7 @@ static struct ptlrpc_request *mdc_intent_unlink_pack(struct obd_export *exp,
req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
obddev->u.cli.cl_max_mds_cookiesize);
ptlrpc_request_set_replen(req);
- RETURN(req);
+ return req;
}
static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
@@ -407,12 +399,11 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
OBD_MD_FLRMTPERM : OBD_MD_FLACL);
struct ldlm_intent *lit;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_GETATTR);
if (req == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -421,7 +412,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
/* pack the intent */
@@ -438,7 +429,7 @@ static struct ptlrpc_request *mdc_intent_getattr_pack(struct obd_export *exp,
req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER,
sizeof(struct mdt_remote_perm));
ptlrpc_request_set_replen(req);
- RETURN(req);
+ return req;
}
static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
@@ -450,18 +441,17 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
struct ldlm_intent *lit;
struct layout_intent *layout;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_INTENT_LAYOUT);
if (req == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0);
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
/* pack the intent */
@@ -477,7 +467,7 @@ static struct ptlrpc_request *mdc_intent_layout_pack(struct obd_export *exp,
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
obd->u.cli.cl_max_mds_easize);
ptlrpc_request_set_replen(req);
- RETURN(req);
+ return req;
}
static struct ptlrpc_request *
@@ -485,21 +475,20 @@ mdc_enqueue_pack(struct obd_export *exp, int lvb_len)
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE);
if (req == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len);
ptlrpc_request_set_replen(req);
- RETURN(req);
+ return req;
}
static int mdc_finish_enqueue(struct obd_export *exp,
@@ -516,7 +505,6 @@ static int mdc_finish_enqueue(struct obd_export *exp,
struct ldlm_lock *lock;
void *lvb_data = NULL;
int lvb_len = 0;
- ENTRY;
LASSERT(rc >= 0);
/* Similarly, if we're going to replay this request, we don't want to
@@ -579,7 +567,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
if (body == NULL) {
CERROR ("Can't swab mdt_body\n");
- RETURN (-EPROTO);
+ return -EPROTO;
}
if (it_disposition(it, DISP_OPEN_OPEN) &&
@@ -605,7 +593,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (eadata == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
/* save lvb data and length in case this is for layout
* lock */
@@ -649,14 +637,14 @@ static int mdc_finish_enqueue(struct obd_export *exp,
perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (perm == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (body->valid & OBD_MD_FLMDSCAPA) {
struct lustre_capa *capa, *p;
capa = req_capsule_server_get(pill, &RMF_CAPA1);
if (capa == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
if (it->it_op & IT_OPEN) {
/* client fid capa will be checked in replay */
@@ -670,7 +658,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
capa = req_capsule_server_get(pill, &RMF_CAPA2);
if (capa == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
} else if (it->it_op & IT_LAYOUT) {
/* maybe the lock was granted right away and layout
@@ -680,7 +668,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
lvb_data = req_capsule_server_sized_get(pill,
&RMF_DLM_LVB, lvb_len);
if (lvb_data == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
}
@@ -695,7 +683,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
OBD_ALLOC_LARGE(lmm, lvb_len);
if (lmm == NULL) {
LDLM_LOCK_PUT(lock);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
memcpy(lmm, lvb_data, lvb_len);
@@ -713,7 +701,7 @@ static int mdc_finish_enqueue(struct obd_export *exp,
if (lock != NULL)
LDLM_LOCK_PUT(lock);
- RETURN(rc);
+ return rc;
}
/* We always reserve enough space in the reply packet for a stripe MD, because
@@ -738,7 +726,6 @@ int mdc_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
int generation, resends = 0;
struct ldlm_reply *lockrep;
enum lvb_type lvb_type = 0;
- ENTRY;
LASSERTF(!it || einfo->ei_type == LDLM_IBITS, "lock type %d\n",
einfo->ei_type);
@@ -780,17 +767,17 @@ resend:
req = mdc_enqueue_pack(exp, 0);
} else if (it->it_op & IT_LAYOUT) {
if (!imp_connect_lvb_type(class_exp2cliimp(exp)))
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
req = mdc_intent_layout_pack(exp, it, op_data);
lvb_type = LVB_T_LAYOUT;
} else {
LBUG();
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (IS_ERR(req))
- RETURN(PTR_ERR(req));
+ return PTR_ERR(req);
if (req != NULL && it && it->it_op & IT_CREAT)
/* ask ptlrpc not to resend on EINPROGRESS since we have our own
@@ -813,7 +800,7 @@ resend:
mdc_put_rpc_lock(obddev->u.cli.cl_rpc_lock, it);
mdc_clear_replay_flag(req, 0);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
}
@@ -823,8 +810,14 @@ resend:
/* For flock requests we immediatelly return without further
delay and let caller deal with the rest, since rest of
this function metadata processing makes no sense for flock
- requests anyway */
- RETURN(rc);
+ requests anyway. But in case of problem during comms with
+ Server (ETIMEDOUT) or any signal/kill attempt (EINTR), we
+ can not rely on caller and this mainly for F_UNLCKs
+ (explicits or automatically generated by Kernel to clean
+ current FLocks upon exit) that can't be trashed */
+ if ((rc == -EINTR) || (rc == -ETIMEDOUT))
+ goto resend;
+ return rc;
}
mdc_exit_request(&obddev->u.cli);
@@ -834,12 +827,15 @@ resend:
CERROR("ldlm_cli_enqueue: %d\n", rc);
mdc_clear_replay_flag(req, rc);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
LASSERT(lockrep != NULL);
+ lockrep->lock_policy_res2 =
+ ptlrpc_status_ntoh(lockrep->lock_policy_res2);
+
/* Retry the create infinitely when we get -EINPROGRESS from
* server. This is required by the new quota design. */
if (it && it->it_op & IT_CREAT &&
@@ -856,7 +852,7 @@ resend:
goto resend;
} else {
CDEBUG(D_HA, "resend cross eviction\n");
- RETURN(-EIO);
+ return -EIO;
}
}
@@ -868,7 +864,7 @@ resend:
}
ptlrpc_req_finished(req);
}
- RETURN(rc);
+ return rc;
}
static int mdc_finish_intent_lock(struct obd_export *exp,
@@ -882,7 +878,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
struct ldlm_lock *lock;
int rc;
-
LASSERT(request != NULL);
LASSERT(request != LP_POISON);
LASSERT(request->rq_repmsg != LP_POISON);
@@ -891,11 +886,11 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
/* The server failed before it even started executing the
* intent, i.e. because it couldn't unpack the request. */
LASSERT(it->d.lustre.it_status != 0);
- RETURN(it->d.lustre.it_status);
+ return it->d.lustre.it_status;
}
rc = it_open_error(DISP_IT_EXECD, it);
if (rc)
- RETURN(rc);
+ return rc;
mdt_body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
LASSERT(mdt_body != NULL); /* mdc_enqueue checked */
@@ -917,13 +912,13 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
CDEBUG(D_DENTRY, "Found stale data "DFID"("DFID")/"DFID
"\n", PFID(&op_data->op_fid2),
PFID(&op_data->op_fid2), PFID(&mdt_body->fid1));
- RETURN(-ESTALE);
+ return -ESTALE;
}
}
rc = it_open_error(DISP_LOOKUP_EXECD, it);
if (rc)
- RETURN(rc);
+ return rc;
/* keep requests around for the multiple phases of the call
* this shows the DISP_XX must guarantee we make it into the call
@@ -984,7 +979,7 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
CDEBUG(D_DENTRY,"D_IT dentry %.*s intent: %s status %d disp %x rc %d\n",
op_data->op_namelen, op_data->op_name, ldlm_it2str(it->it_op),
it->d.lustre.it_status, it->d.lustre.it_disposition, rc);
- RETURN(rc);
+ return rc;
}
int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
@@ -997,7 +992,6 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
struct lustre_handle lockh;
ldlm_policy_data_t policy;
ldlm_mode_t mode;
- ENTRY;
if (it->d.lustre.it_lock_handle) {
lockh.cookie = it->d.lustre.it_lock_handle;
@@ -1029,7 +1023,7 @@ int mdc_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
it->d.lustre.it_lock_mode = 0;
}
- RETURN(!!mode);
+ return !!mode;
}
/*
@@ -1067,7 +1061,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
{
struct lustre_handle lockh;
int rc = 0;
- ENTRY;
+
LASSERT(it);
CDEBUG(D_DLMTRACE, "(name: %.*s,"DFID") in obj "DFID
@@ -1087,7 +1081,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
/* Only return failure if it was not GETATTR by cfid
(from inode_revalidate) */
if (rc || op_data->op_namelen != 0)
- RETURN(rc);
+ return rc;
}
/* lookup_it may be called only after revalidate_it has run, because
@@ -1099,22 +1093,25 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
* this and use the request from revalidate. In this case, revalidate
* never dropped its reference, so the refcounts are all OK */
if (!it_disposition(it, DISP_ENQ_COMPLETE)) {
- struct ldlm_enqueue_info einfo =
- { LDLM_IBITS, it_to_lock_mode(it), cb_blocking,
- ldlm_completion_ast, NULL, NULL, NULL };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = LDLM_IBITS,
+ .ei_mode = it_to_lock_mode(it),
+ .ei_cb_bl = cb_blocking,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2) && it->it_op & IT_CREAT) {
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("Can't alloc new fid, rc %d\n", rc);
- RETURN(rc);
+ return rc;
}
}
rc = mdc_enqueue(exp, &einfo, it, op_data, &lockh,
lmm, lmmsize, NULL, extra_lock_flags);
if (rc < 0)
- RETURN(rc);
+ return rc;
} else if (!fid_is_sane(&op_data->op_fid2) ||
!(it->it_create_mode & M_CHECK_STALE)) {
/* DISP_ENQ_COMPLETE set means there is extra reference on
@@ -1125,7 +1122,7 @@ int mdc_intent_lock(struct obd_export *exp, struct md_op_data *op_data,
}
*reqp = it->d.lustre.it_data;
rc = mdc_finish_intent_lock(exp, *reqp, op_data, it, &lockh);
- RETURN(rc);
+ return rc;
}
static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
@@ -1139,8 +1136,8 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
struct lookup_intent *it;
struct lustre_handle *lockh;
struct obd_device *obddev;
+ struct ldlm_reply *lockrep;
__u64 flags = LDLM_FL_HAS_INTENT;
- ENTRY;
it = &minfo->mi_it;
lockh = &minfo->mi_lockh;
@@ -1159,12 +1156,17 @@ static int mdc_intent_getattr_async_interpret(const struct lu_env *env,
GOTO(out, rc);
}
+ lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
+ LASSERT(lockrep != NULL);
+
+ lockrep->lock_policy_res2 =
+ ptlrpc_status_ntoh(lockrep->lock_policy_res2);
+
rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc);
if (rc)
GOTO(out, rc);
rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh);
- EXIT;
out:
OBD_FREE_PTR(einfo);
@@ -1191,7 +1193,6 @@ int mdc_intent_getattr_async(struct obd_export *exp,
};
int rc = 0;
__u64 flags = LDLM_FL_HAS_INTENT;
- ENTRY;
CDEBUG(D_DLMTRACE,"name: %.*s in inode "DFID", intent: %s flags %#o\n",
op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
@@ -1200,12 +1201,12 @@ int mdc_intent_getattr_async(struct obd_export *exp,
fid_build_reg_res_name(&op_data->op_fid1, &res_id);
req = mdc_intent_getattr_pack(exp, it, op_data);
if (!req)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = mdc_enter_request(&obddev->u.cli);
if (rc != 0) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL,
@@ -1213,7 +1214,7 @@ int mdc_intent_getattr_async(struct obd_export *exp,
if (rc < 0) {
mdc_exit_request(&obddev->u.cli);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args));
@@ -1225,5 +1226,5 @@ int mdc_intent_getattr_async(struct obd_export *exp,
req->rq_interpret_reply = mdc_intent_getattr_async_interpret;
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_reint.c b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
index 5e25a07c52b..9f3a345f34e 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_reint.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_reint.c
@@ -75,7 +75,6 @@ int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
- ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
@@ -84,13 +83,13 @@ int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
* when we still want to cancel locks in advance and just cancel them
* locally, without sending any RPC. */
if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- RETURN(0);
+ return 0;
fid_build_reg_res_name(fid, &res_id);
res = ldlm_resource_get(exp->exp_obd->obd_namespace,
NULL, &res_id, 0, 0);
if (res == NULL)
- RETURN(0);
+ return 0;
LDLM_RESOURCE_ADDREF(res);
/* Initialize ibits lock policy. */
policy.l_inodebits.bits = bits;
@@ -98,7 +97,7 @@ int mdc_resource_get_unused(struct obd_export *exp, struct lu_fid *fid,
mode, 0, 0, NULL);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
- RETURN(count);
+ return count;
}
int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
@@ -111,7 +110,6 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = exp->exp_obd;
int count = 0, rc;
__u64 bits;
- ENTRY;
LASSERT(op_data != NULL);
@@ -127,7 +125,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
&RQF_MDS_REINT_SETATTR);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
if ((op_data->op_flags & (MF_SOM_CHANGE | MF_EPOCH_OPEN)) == 0)
@@ -140,7 +138,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
rpc_lock = obd->u.cli.cl_rpc_lock;
@@ -203,7 +201,7 @@ int mdc_setattr(struct obd_export *exp, struct md_op_data *op_data,
obd_mod_put(*mod);
req->rq_commit_cb(req);
}
- RETURN(rc);
+ return rc;
}
int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
@@ -217,7 +215,6 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
struct obd_import *import = exp->exp_obd->u.cli.cl_import;
int generation = import->imp_generation;
LIST_HEAD(cancels);
- ENTRY;
/* For case if upper layer did not alloc fid, do it now. */
if (!fid_is_sane(&op_data->op_fid2)) {
@@ -228,7 +225,7 @@ int mdc_create(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_fid_alloc(exp, &op_data->op_fid2, op_data);
if (rc < 0) {
CERROR("Can't alloc new fid, rc %d\n", rc);
- RETURN(rc);
+ return rc;
}
}
@@ -244,7 +241,7 @@ rebuild:
&RQF_MDS_REINT_CREATE_RMT_ACL);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -255,7 +252,7 @@ rebuild:
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
/*
@@ -298,7 +295,7 @@ rebuild:
goto rebuild;
} else {
CDEBUG(D_HA, "resend cross eviction\n");
- RETURN(-EIO);
+ return -EIO;
}
} else if (rc == 0) {
struct mdt_body *body;
@@ -315,7 +312,7 @@ rebuild:
}
*request = req;
- RETURN(rc);
+ return rc;
}
int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
@@ -325,7 +322,6 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req = *request;
int count = 0, rc;
- ENTRY;
LASSERT(req == NULL);
@@ -345,7 +341,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
&RQF_MDS_REINT_UNLINK);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -354,7 +350,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_unlink_pack(req, op_data);
@@ -370,7 +366,7 @@ int mdc_unlink(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL);
if (rc == -ERESTARTSYS)
rc = 0;
- RETURN(rc);
+ return rc;
}
int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
@@ -380,7 +376,6 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
- ENTRY;
if ((op_data->op_flags & MF_MDC_CANCEL_FID2) &&
(fid_is_sane(&op_data->op_fid2)))
@@ -396,7 +391,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2);
@@ -406,7 +401,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_link_pack(req, op_data);
@@ -417,7 +412,7 @@ int mdc_link(struct obd_export *exp, struct md_op_data *op_data,
if (rc == -ERESTARTSYS)
rc = 0;
- RETURN(rc);
+ return rc;
}
int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
@@ -428,7 +423,6 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = exp->exp_obd;
struct ptlrpc_request *req;
int count = 0, rc;
- ENTRY;
if ((op_data->op_flags & MF_MDC_CANCEL_FID1) &&
(fid_is_sane(&op_data->op_fid1)))
@@ -455,7 +449,7 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
&RQF_MDS_REINT_RENAME);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
@@ -466,7 +460,7 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
if (exp_connect_cancelset(exp) && req)
@@ -485,5 +479,5 @@ int mdc_rename(struct obd_export *exp, struct md_op_data *op_data,
if (rc == -ERESTARTSYS)
rc = 0;
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index 3cf9d8d3f2e..ed3a7a05557 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -65,21 +65,20 @@ int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
{
struct lustre_capa *capa;
struct obd_capa *c;
- ENTRY;
/* swabbed already in mdc_enqueue */
capa = req_capsule_server_get(&req->rq_pill, field);
if (capa == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
c = alloc_capa(CAPA_SITE_CLIENT);
if (IS_ERR(c)) {
CDEBUG(D_INFO, "alloc capa failed!\n");
- RETURN(PTR_ERR(c));
+ return PTR_ERR(c);
} else {
c->c_capa = *capa;
*oc = c;
- RETURN(0);
+ return 0;
}
}
@@ -109,12 +108,11 @@ static int send_getstatus(struct obd_import *imp, struct lu_fid *rootfid,
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_GETSTATUS,
LUSTRE_MDS_VERSION, MDS_GETSTATUS);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_pack_body(req, NULL, NULL, 0, 0, -1, 0);
lustre_msg_add_flags(req->rq_reqmsg, msg_flags);
@@ -141,7 +139,6 @@ static int send_getstatus(struct obd_import *imp, struct lu_fid *rootfid,
"root fid="DFID", last_committed="LPU64"\n",
PFID(rootfid),
lustre_msg_get_last_committed(req->rq_repmsg));
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -172,17 +169,16 @@ static int mdc_getattr_common(struct obd_export *exp,
struct mdt_body *body;
void *eadata;
int rc;
- ENTRY;
/* Request message already built. */
rc = ptlrpc_queue_wait(req);
if (rc != 0)
- RETURN(rc);
+ return rc;
/* sanity check for the reply */
body = req_capsule_server_get(pill, &RMF_MDT_BODY);
if (body == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
CDEBUG(D_NET, "mode: %o\n", body->mode);
@@ -192,7 +188,7 @@ static int mdc_getattr_common(struct obd_export *exp,
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (eadata == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (body->valid & OBD_MD_FLRMTPERM) {
@@ -202,17 +198,17 @@ static int mdc_getattr_common(struct obd_export *exp,
perm = req_capsule_server_swab_get(pill, &RMF_ACL,
lustre_swab_mdt_remote_perm);
if (perm == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (body->valid & OBD_MD_FLMDSCAPA) {
struct lustre_capa *capa;
capa = req_capsule_server_get(pill, &RMF_CAPA1);
if (capa == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
}
- RETURN(0);
+ return 0;
}
int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
@@ -220,24 +216,23 @@ int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
/* Single MDS without an LMV case */
if (op_data->op_flags & MF_GET_MDT_IDX) {
op_data->op_mds = 0;
- RETURN(0);
+ return 0;
}
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
@@ -257,7 +252,7 @@ int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
@@ -265,13 +260,12 @@ int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_GETATTR_NAME);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
@@ -280,7 +274,7 @@ int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
@@ -303,7 +297,7 @@ int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
static int mdc_is_subdir(struct obd_export *exp,
@@ -314,14 +308,12 @@ static int mdc_is_subdir(struct obd_export *exp,
struct ptlrpc_request *req;
int rc;
- ENTRY;
-
*request = NULL;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_IS_SUBDIR, LUSTRE_MDS_VERSION,
MDS_IS_SUBDIR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_is_subdir_pack(req, pfid, cfid, 0);
ptlrpc_request_set_replen(req);
@@ -331,7 +323,7 @@ static int mdc_is_subdir(struct obd_export *exp,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
@@ -345,12 +337,11 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
int xattr_namelen = 0;
char *tmp;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
if (xattr_name) {
@@ -367,7 +358,7 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
if (opcode == MDS_REINT) {
@@ -377,12 +368,8 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
- /* TODO:
- * cfs_curproc_fs{u,g}id() should replace
- * current->fs{u,g}id for portability.
- */
- rec->sx_fsuid = current_fsuid();
- rec->sx_fsgid = current_fsgid();
+ rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
rec->sx_cap = cfs_curproc_cap_pack();
rec->sx_suppgid1 = suppgid;
rec->sx_suppgid2 = -1;
@@ -424,7 +411,7 @@ static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
@@ -457,32 +444,31 @@ static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
struct posix_acl *acl;
void *buf;
int rc;
- ENTRY;
if (!body->aclsize)
- RETURN(0);
+ return 0;
buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->aclsize);
if (!buf)
- RETURN(-EPROTO);
+ return -EPROTO;
acl = posix_acl_from_xattr(&init_user_ns, buf, body->aclsize);
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
CERROR("convert xattr to acl: %d\n", rc);
- RETURN(rc);
+ return rc;
}
rc = posix_acl_valid(acl);
if (rc) {
CERROR("validate acl: %d\n", rc);
posix_acl_release(acl);
- RETURN(rc);
+ return rc;
}
md->posix_acl = acl;
- RETURN(0);
+ return 0;
}
#else
#define mdc_unpack_acl(req, md) 0
@@ -494,7 +480,6 @@ int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
{
struct req_capsule *pill = &req->rq_pill;
int rc;
- ENTRY;
LASSERT(md);
memset(md, 0, sizeof(*md));
@@ -546,7 +531,7 @@ int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
if (md->body->eadatasize == 0) {
CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, "
"but eadatasize 0\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (md->body->valid & OBD_MD_MEA) {
lmvsize = md->body->eadatasize;
@@ -611,7 +596,6 @@ int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
md->oss_capa = oc;
}
- EXIT;
out:
if (rc) {
if (md->oss_capa) {
@@ -633,8 +617,7 @@ out:
int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
- ENTRY;
- RETURN(0);
+ return 0;
}
/**
@@ -648,12 +631,10 @@ void mdc_replay_open(struct ptlrpc_request *req)
struct obd_client_handle *och;
struct lustre_handle old;
struct mdt_body *body;
- ENTRY;
if (mod == NULL) {
DEBUG_REQ(D_ERROR, req,
"Can't properly replay without open data.");
- EXIT;
return;
}
@@ -687,7 +668,6 @@ void mdc_replay_open(struct ptlrpc_request *req)
DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
epoch->handle = body->handle;
}
- EXIT;
}
void mdc_commit_open(struct ptlrpc_request *req)
@@ -726,10 +706,9 @@ int mdc_set_open_replay_data(struct obd_export *exp,
struct mdt_rec_create *rec;
struct mdt_body *body;
struct obd_import *imp = open_req->rq_import;
- ENTRY;
if (!open_req->rq_replay)
- RETURN(0);
+ return 0;
rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
@@ -744,7 +723,7 @@ int mdc_set_open_replay_data(struct obd_export *exp,
if (mod == NULL) {
DEBUG_REQ(D_ERROR, open_req,
"Can't allocate md_open_data");
- RETURN(0);
+ return 0;
}
/**
@@ -776,21 +755,20 @@ int mdc_set_open_replay_data(struct obd_export *exp,
}
DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
- RETURN(0);
+ return 0;
}
int mdc_clear_open_replay_data(struct obd_export *exp,
struct obd_client_handle *och)
{
struct md_open_data *mod = och->och_mod;
- ENTRY;
/**
* It is possible to not have \var mod in a case of eviction between
* lookup and ll_file_open().
**/
if (mod == NULL)
- RETURN(0);
+ return 0;
LASSERT(mod != LP_POISON);
@@ -798,7 +776,7 @@ int mdc_clear_open_replay_data(struct obd_export *exp,
och->och_mod = NULL;
obd_mod_put(mod);
- RETURN(0);
+ return 0;
}
/* Prepares the request for the replay by the given reply */
@@ -823,19 +801,18 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_CLOSE);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
/* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
@@ -916,7 +893,7 @@ int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
}
*request = req;
mdc_close_handle_reply(req, op_data, rc);
- RETURN(rc);
+ return rc;
}
int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
@@ -925,18 +902,17 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
struct obd_device *obd = class_exp2obd(exp);
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_DONE_WRITING);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
if (mod != NULL) {
@@ -983,7 +959,7 @@ int mdc_done_writing(struct obd_export *exp, struct md_op_data *op_data,
mdc_close_handle_reply(req, op_data, rc);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
@@ -997,7 +973,6 @@ int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
int resends = 0;
struct l_wait_info lwi;
int rc;
- ENTRY;
*request = NULL;
init_waitqueue_head(&waitq);
@@ -1005,14 +980,14 @@ int mdc_readpage(struct obd_export *exp, struct md_op_data *op_data,
restart_bulk:
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
req->rq_request_portal = MDS_READPAGE_PORTAL;
@@ -1022,7 +997,7 @@ restart_bulk:
MDS_BULK_PORTAL);
if (desc == NULL) {
ptlrpc_request_free(req);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
/* NB req now owns desc and will free it when it gets freed */
@@ -1038,12 +1013,12 @@ restart_bulk:
if (rc) {
ptlrpc_req_finished(req);
if (rc != -ETIMEDOUT)
- RETURN(rc);
+ return rc;
resends++;
if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
CERROR("too many resend retries, returning error\n");
- RETURN(-EIO);
+ return -EIO;
}
lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL, NULL);
l_wait_event(waitq, 0, &lwi);
@@ -1055,7 +1030,7 @@ restart_bulk:
req->rq_bulk->bd_nob_transferred);
if (rc < 0) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
@@ -1063,11 +1038,11 @@ restart_bulk:
req->rq_bulk->bd_nob_transferred,
PAGE_CACHE_SIZE * op_data->op_npages);
ptlrpc_req_finished(req);
- RETURN(-EPROTO);
+ return -EPROTO;
}
*request = req;
- RETURN(0);
+ return 0;
}
static int mdc_statfs(const struct lu_env *env,
@@ -1079,7 +1054,6 @@ static int mdc_statfs(const struct lu_env *env,
struct obd_statfs *msfs;
struct obd_import *imp = NULL;
int rc;
- ENTRY;
/*
* Since the request might also come from lprocfs, so we need
@@ -1090,7 +1064,7 @@ static int mdc_statfs(const struct lu_env *env,
imp = class_import_get(obd->u.cli.cl_import);
up_read(&obd->u.cli.cl_sem);
if (!imp)
- RETURN(-ENODEV);
+ return -ENODEV;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
LUSTRE_MDS_VERSION, MDS_STATFS);
@@ -1118,7 +1092,6 @@ static int mdc_statfs(const struct lu_env *env,
GOTO(out, rc = -EPROTO);
*osfs = *msfs;
- EXIT;
out:
ptlrpc_req_finished(req);
output:
@@ -1133,15 +1106,15 @@ static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
int rc;
if (gf->gf_pathlen > PATH_MAX)
- RETURN(-ENAMETOOLONG);
+ return -ENAMETOOLONG;
if (gf->gf_pathlen < 2)
- RETURN(-EOVERFLOW);
+ return -EOVERFLOW;
/* Key is KEY_FID2PATH + getinfo_fid2path description */
keylen = cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf);
OBD_ALLOC(key, keylen);
if (key == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
@@ -1178,7 +1151,6 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
struct hsm_progress_kernel *req_hpk;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
@@ -1193,6 +1165,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
GOTO(out, rc = -EPROTO);
*req_hpk = *hpk;
+ req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
ptlrpc_request_set_replen(req);
@@ -1208,7 +1181,6 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
__u32 *archive_mask;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
LUSTRE_MDS_VERSION,
@@ -1242,19 +1214,18 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
struct hsm_current_action *req_hca;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_ACTION);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
@@ -1273,7 +1244,6 @@ static int mdc_ioc_hsm_current_action(struct obd_export *exp,
*hca = *req_hca;
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -1283,7 +1253,6 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
LUSTRE_MDS_VERSION,
@@ -1309,19 +1278,18 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
struct hsm_user_state *req_hus;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_GET);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
if (rc != 0) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
@@ -1339,7 +1307,6 @@ static int mdc_ioc_hsm_state_get(struct obd_export *exp,
*hus = *req_hus;
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -1352,19 +1319,18 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
struct hsm_state_set *req_hss;
struct ptlrpc_request *req;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_MDS_HSM_STATE_SET);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1,
@@ -1381,7 +1347,6 @@ static int mdc_ioc_hsm_state_set(struct obd_export *exp,
rc = mdc_queue_wait(req);
GOTO(out, rc);
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -1396,7 +1361,6 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
struct hsm_user_item *req_hui;
char *req_opaque;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
if (req == NULL)
@@ -1411,7 +1375,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
@@ -1476,21 +1440,20 @@ static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
struct kuc_hdr *lh;
int len, rc;
- ENTRY;
if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
rc = -EINVAL;
CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
cs->cs_obd->obd_name, rec->cr_hdr.lrh_type,
rec->cr.cr_type, rc);
- RETURN(rc);
+ return rc;
}
if (rec->cr.cr_index < cs->cs_startrec) {
/* Skip entries earlier than what we are interested in */
CDEBUG(D_CHANGELOG, "rec="LPU64" start="LPU64"\n",
rec->cr.cr_index, cs->cs_startrec);
- RETURN(0);
+ return 0;
}
CDEBUG(D_CHANGELOG, LPU64" %02d%-5s "LPU64" 0x%x t="DFID" p="DFID
@@ -1509,7 +1472,7 @@ static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
CDEBUG(D_CHANGELOG, "kucmsg fp %p len %d rc %d\n", cs->cs_fp, len,rc);
- RETURN(rc);
+ return rc;
}
static int mdc_changelog_send_thread(void *csdata)
@@ -1608,13 +1571,12 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct ptlrpc_request *req;
struct obd_quotactl *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACHECK, LUSTRE_MDS_VERSION,
MDS_QUOTACHECK);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*body = *oqctl;
@@ -1628,7 +1590,7 @@ static int mdc_quotacheck(struct obd_device *unused, struct obd_export *exp,
if (rc)
cli->cl_qchk_stat = rc;
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int mdc_quota_poll_check(struct obd_export *exp,
@@ -1636,7 +1598,6 @@ static int mdc_quota_poll_check(struct obd_export *exp,
{
struct client_obd *cli = &exp->exp_obd->u.cli;
int rc;
- ENTRY;
qchk->obd_uuid = cli->cl_target_uuid;
memcpy(qchk->obd_type, LUSTRE_MDS_NAME, strlen(LUSTRE_MDS_NAME));
@@ -1645,7 +1606,7 @@ static int mdc_quota_poll_check(struct obd_export *exp,
/* the client is not the previous one */
if (rc == CL_NOT_QUOTACHECKED)
rc = -EINTR;
- RETURN(rc);
+ return rc;
}
static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
@@ -1654,13 +1615,12 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct ptlrpc_request *req;
struct obd_quotactl *oqc;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
MDS_QUOTACTL);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*oqc = *oqctl;
@@ -1682,7 +1642,7 @@ static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
}
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int mdc_ioc_swap_layouts(struct obd_export *exp,
@@ -1692,7 +1652,6 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
struct ptlrpc_request *req;
int rc, count;
struct mdc_swap_layouts *msl, *payload;
- ENTRY;
msl = op_data->op_data;
@@ -1711,7 +1670,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
&RQF_MDS_SWAP_LAYOUTS);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1);
@@ -1720,7 +1679,7 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_swap_layouts_pack(req, op_data);
@@ -1735,7 +1694,6 @@ static int mdc_ioc_swap_layouts(struct obd_export *exp,
rc = ptlrpc_queue_wait(req);
if (rc)
GOTO(out, rc);
- EXIT;
out:
ptlrpc_req_finished(req);
@@ -1750,7 +1708,6 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_import *imp = obd->u.cli.cl_import;
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
@@ -1774,6 +1731,9 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO(out, rc);
case LL_IOC_HSM_CT_START:
rc = mdc_ioc_hsm_ct_start(exp, karg);
+ /* ignore if it was already registered on this MDS. */
+ if (rc == -EEXIST)
+ rc = 0;
GOTO(out, rc);
case LL_IOC_HSM_PROGRESS:
rc = mdc_ioc_hsm_progress(exp, karg);
@@ -1855,7 +1815,7 @@ static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
OBD_ALLOC_PTR(oqctl);
if (!oqctl)
- RETURN(-ENOMEM);
+ return -ENOMEM;
QCTL_COPY(oqctl, qctl);
rc = obd_quotactl(exp, oqctl);
@@ -1897,11 +1857,10 @@ int mdc_get_info_rpc(struct obd_export *exp,
struct ptlrpc_request *req;
char *tmp;
int rc = -EINVAL;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
RCL_CLIENT, keylen);
@@ -1911,7 +1870,7 @@ int mdc_get_info_rpc(struct obd_export *exp,
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
@@ -1936,7 +1895,7 @@ int mdc_get_info_rpc(struct obd_export *exp,
}
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static void lustre_swab_hai(struct hsm_action_item *h)
@@ -1991,19 +1950,10 @@ static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
lk->lk_uid, lk->lk_group, lk->lk_flags);
if (lk->lk_flags & LK_FLG_STOP) {
- rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
/* Unregister with the coordinator */
- if (rc == 0)
- rc = mdc_ioc_hsm_ct_unregister(imp);
+ rc = mdc_ioc_hsm_ct_unregister(imp);
} else {
- struct file *fp = fget(lk->lk_wfd);
-
- rc = libcfs_kkuc_group_add(fp, lk->lk_uid, lk->lk_group,
- lk->lk_data);
- if (rc && fp)
- fput(fp);
- if (rc == 0)
- rc = mdc_ioc_hsm_ct_register(imp, archive);
+ rc = mdc_ioc_hsm_ct_register(imp, archive);
}
return rc;
@@ -2019,19 +1969,18 @@ static int mdc_hsm_copytool_send(int len, void *val)
struct kuc_hdr *lh = (struct kuc_hdr *)val;
struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
int rc;
- ENTRY;
if (len < sizeof(*lh) + sizeof(*hal)) {
CERROR("Short HSM message %d < %d\n", len,
(int) (sizeof(*lh) + sizeof(*hal)));
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
lustre_swab_kuch(lh);
lustre_swab_hal(hal);
} else if (lh->kuc_magic != KUC_MAGIC) {
CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
- RETURN(-EPROTO);
+ return -EPROTO;
}
CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d "
@@ -2042,7 +1991,7 @@ static int mdc_hsm_copytool_send(int len, void *val)
/* Broadcast to HSM listeners */
rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
- RETURN(rc);
+ return rc;
}
/**
@@ -2084,11 +2033,10 @@ int mdc_set_info_async(const struct lu_env *env,
{
struct obd_import *imp = class_exp2cliimp(exp);
int rc;
- ENTRY;
if (KEY_IS(KEY_READ_ONLY)) {
if (vallen != sizeof(int))
- RETURN(-EINVAL);
+ return -EINVAL;
spin_lock(&imp->imp_lock);
if (*((int *)val)) {
@@ -2104,15 +2052,15 @@ int mdc_set_info_async(const struct lu_env *env,
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
sptlrpc_conf_client_adapt(exp->exp_obd);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_FLUSH_CTX)) {
sptlrpc_import_flush_my_ctx(imp);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_MDS_CONN)) {
/* mds-mds import */
@@ -2121,20 +2069,20 @@ int mdc_set_info_async(const struct lu_env *env,
spin_unlock(&imp->imp_lock);
imp->imp_client->cli_request_portal = MDS_MDS_PORTAL;
CDEBUG(D_OTHER, "%s: timeout / 2\n", exp->exp_obd->obd_name);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
keylen, key, vallen, val, set);
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
rc = mdc_hsm_copytool_send(vallen, val);
- RETURN(rc);
+ return rc;
}
CERROR("Unknown key %s\n", (char *)key);
- RETURN(-EINVAL);
+ return -EINVAL;
}
int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
@@ -2147,30 +2095,30 @@ int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
int mdsize, *max_easize;
if (*vallen != sizeof(int))
- RETURN(-EINVAL);
+ return -EINVAL;
mdsize = *(int*)val;
if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
max_easize = val;
*max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
- RETURN(0);
+ return 0;
} else if (KEY_IS(KEY_CONN_DATA)) {
struct obd_import *imp = class_exp2cliimp(exp);
struct obd_connect_data *data = val;
if (*vallen != sizeof(*data))
- RETURN(-EINVAL);
+ return -EINVAL;
*data = imp->imp_connect_data;
- RETURN(0);
+ return 0;
} else if (KEY_IS(KEY_TGT_COUNT)) {
*((int *)val) = 1;
- RETURN(0);
+ return 0;
}
rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
- RETURN(rc);
+ return rc;
}
static int mdc_pin(struct obd_export *exp, const struct lu_fid *fid,
@@ -2180,18 +2128,17 @@ static int mdc_pin(struct obd_export *exp, const struct lu_fid *fid,
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_PIN);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_PIN);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, fid, oc, 0, 0, -1, flags);
@@ -2220,11 +2167,11 @@ static int mdc_pin(struct obd_export *exp, const struct lu_fid *fid,
}
handle->och_mod->mod_open_req = req; /* will be dropped by unpin */
- RETURN(0);
+ return 0;
err_out:
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int mdc_unpin(struct obd_export *exp, struct obd_client_handle *handle,
@@ -2233,12 +2180,11 @@ static int mdc_unpin(struct obd_export *exp, struct obd_client_handle *handle,
struct ptlrpc_request *req;
struct mdt_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_UNPIN,
LUSTRE_MDS_VERSION, MDS_UNPIN);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
body->handle = handle->och_fh;
@@ -2257,7 +2203,7 @@ static int mdc_unpin(struct obd_export *exp, struct obd_client_handle *handle,
ptlrpc_req_finished(handle->och_mod->mod_open_req);
obd_mod_put(handle->och_mod);
- RETURN(rc);
+ return rc;
}
int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
@@ -2265,19 +2211,18 @@ int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, fid, oc, 0, 0, -1, 0);
@@ -2289,7 +2234,7 @@ int mdc_sync(struct obd_export *exp, const struct lu_fid *fid,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
@@ -2328,7 +2273,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
}
case IMP_EVENT_ACTIVE:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
- /* restore re-establish kuc registration after reconnecting */
+ /* redo the kuc registration after reconnecting */
if (rc == 0)
rc = mdc_kuc_reregister(imp);
break;
@@ -2342,7 +2287,7 @@ static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
CERROR("Unknown import event %x\n", event);
LBUG();
}
- RETURN(rc);
+ return rc;
}
int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
@@ -2350,8 +2295,8 @@ int mdc_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
{
struct client_obd *cli = &exp->exp_obd->u.cli;
struct lu_client_seq *seq = cli->cl_seq;
- ENTRY;
- RETURN(seq_client_alloc_fid(NULL, seq, fid));
+
+ return seq_client_alloc_fid(NULL, seq, fid);
}
struct obd_uuid *mdc_get_uuid(struct obd_export *exp) {
@@ -2367,15 +2312,15 @@ struct obd_uuid *mdc_get_uuid(struct obd_export *exp) {
static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
{
if (lock->l_resource->lr_type != LDLM_IBITS)
- RETURN(0);
+ return 0;
/* FIXME: if we ever get into a situation where there are too many
* opened files with open locks on a single node, then we really
* should replay these open locks to reget it */
if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
- RETURN(0);
+ return 0;
- RETURN(1);
+ return 1;
}
static int mdc_resource_inode_free(struct ldlm_resource *res)
@@ -2387,7 +2332,7 @@ static int mdc_resource_inode_free(struct ldlm_resource *res)
}
struct ldlm_valblock_ops inode_lvbo = {
- lvbo_free: mdc_resource_inode_free
+ .lvbo_free = mdc_resource_inode_free,
};
static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
@@ -2395,11 +2340,10 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
struct client_obd *cli = &obd->u.cli;
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
OBD_ALLOC(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
if (!cli->cl_rpc_lock)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_init_rpc_lock(cli->cl_rpc_lock);
ptlrpcd_addref();
@@ -2427,14 +2371,14 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
CERROR("failed to setup llogging subsystems\n");
}
- RETURN(rc);
+ return rc;
err_close_lock:
OBD_FREE(cli->cl_close_lock, sizeof (*cli->cl_close_lock));
err_rpc_lock:
OBD_FREE(cli->cl_rpc_lock, sizeof (*cli->cl_rpc_lock));
ptlrpcd_decref();
- RETURN(rc);
+ return rc;
}
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
@@ -2446,7 +2390,6 @@ static int mdc_init_ea_size(struct obd_export *exp, int easize,
{
struct obd_device *obd = exp->exp_obd;
struct client_obd *cli = &obd->u.cli;
- ENTRY;
if (cli->cl_max_mds_easize < easize)
cli->cl_max_mds_easize = easize;
@@ -2457,13 +2400,12 @@ static int mdc_init_ea_size(struct obd_export *exp, int easize,
if (cli->cl_max_mds_cookiesize < cookiesize)
cli->cl_max_mds_cookiesize = cookiesize;
- RETURN(0);
+ return 0;
}
static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
@@ -2482,7 +2424,7 @@ static int mdc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
CERROR("failed to cleanup llogging subsystems\n");
break;
}
- RETURN(rc);
+ return rc;
}
static int mdc_cleanup(struct obd_device *obd)
@@ -2504,33 +2446,29 @@ static int mdc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, tgt,
&llog_client_ops);
if (rc)
- RETURN(rc);
+ return rc;
ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
llog_initiator_connect(ctxt);
llog_ctxt_put(ctxt);
- RETURN(0);
+ return 0;
}
static int mdc_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
- RETURN(0);
+ return 0;
}
static int mdc_process_config(struct obd_device *obd, obd_count len, void *buf)
@@ -2559,21 +2497,20 @@ int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
{
struct ptlrpc_request *req;
int rc;
- ENTRY;
LASSERT(client_is_remote(exp));
*request = NULL;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
mdc_set_capa_size(req, &RMF_CAPA1, oc);
rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
mdc_pack_body(req, fid, oc, OBD_MD_FLRMTPERM, 0, suppgid, 0);
@@ -2588,7 +2525,7 @@ int mdc_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
ptlrpc_req_finished(req);
else
*request = req;
- RETURN(rc);
+ return rc;
}
static int mdc_interpret_renew_capa(const struct lu_env *env,
@@ -2598,7 +2535,6 @@ static int mdc_interpret_renew_capa(const struct lu_env *env,
struct mdc_renew_capa_args *ra = args;
struct mdt_body *body = NULL;
struct lustre_capa *capa;
- ENTRY;
if (status)
GOTO(out, capa = ERR_PTR(status));
@@ -2613,7 +2549,6 @@ static int mdc_interpret_renew_capa(const struct lu_env *env,
capa = req_capsule_server_get(&req->rq_pill, &RMF_CAPA2);
if (!capa)
GOTO(out, capa = ERR_PTR(-EFAULT));
- EXIT;
out:
ra->ra_cb(ra->ra_oc, capa);
return 0;
@@ -2624,12 +2559,11 @@ static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
{
struct ptlrpc_request *req;
struct mdc_renew_capa_args *ra;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR,
LUSTRE_MDS_VERSION, MDS_GETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* NB, OBD_MD_FLOSSCAPA is set here, but it doesn't necessarily mean the
* capa to renew is oss capa.
@@ -2643,7 +2577,7 @@ static int mdc_renew_capa(struct obd_export *exp, struct obd_capa *oc,
ra->ra_cb = cb;
req->rq_interpret_reply = mdc_interpret_renew_capa;
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ return 0;
}
static int mdc_connect(const struct lu_env *env,
@@ -2737,7 +2671,7 @@ int __init mdc_init(void)
rc = class_register_type(&mdc_obd_ops, &mdc_md_ops, lvars.module_vars,
LUSTRE_MDC_NAME, NULL);
- RETURN(rc);
+ return rc;
}
static void /*__exit*/ mdc_exit(void)
diff --git a/drivers/staging/lustre/lustre/mgc/libmgc.c b/drivers/staging/lustre/lustre/mgc/libmgc.c
index 442146cc7e6..7b4947cec3a 100644
--- a/drivers/staging/lustre/lustre/mgc/libmgc.c
+++ b/drivers/staging/lustre/lustre/mgc/libmgc.c
@@ -56,7 +56,6 @@
static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int rc;
- ENTRY;
ptlrpcd_addref();
@@ -73,19 +72,18 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
GOTO(err_cleanup, rc);
}
- RETURN(rc);
+ return rc;
err_cleanup:
client_obd_cleanup(obd);
err_decref:
ptlrpcd_decref();
- RETURN(rc);
+ return rc;
}
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
@@ -96,21 +94,20 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
CERROR("failed to cleanup llogging subsystems\n");
break;
}
- RETURN(rc);
+ return rc;
}
static int mgc_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt == NULL);
ptlrpcd_decref();
rc = client_obd_cleanup(obd);
- RETURN(rc);
+ return rc;
}
static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
@@ -118,32 +115,30 @@ static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
{
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
rc = llog_setup(NULL, obd, olg, LLOG_CONFIG_REPL_CTXT, tgt,
&llog_client_ops);
if (rc < 0)
- RETURN(rc);
+ return rc;
ctxt = llog_group_get_ctxt(olg, LLOG_CONFIG_REPL_CTXT);
llog_initiator_connect(ctxt);
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
static int mgc_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
- ENTRY;
ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
- RETURN(0);
+ return 0;
}
struct obd_ops mgc_obd_ops = {
diff --git a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
index 1105eaa2431..ebecec2b007 100644
--- a/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
+++ b/drivers/staging/lustre/lustre/mgc/lproc_mgc.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include <linux/version.h>
#include <linux/vfs.h>
#include <obd_class.h>
#include <lprocfs_status.h>
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index c6c84d97ce4..12a9ede21a8 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -118,19 +118,16 @@ static DEFINE_SPINLOCK(config_list_lock);
/* Take a reference to a config log */
static int config_log_get(struct config_llog_data *cld)
{
- ENTRY;
atomic_inc(&cld->cld_refcount);
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
atomic_read(&cld->cld_refcount));
- RETURN(0);
+ return 0;
}
/* Drop a reference to a config log. When no longer referenced,
we can free the config log data */
static void config_log_put(struct config_llog_data *cld)
{
- ENTRY;
-
CDEBUG(D_INFO, "log %s refs %d\n", cld->cld_logname,
atomic_read(&cld->cld_refcount));
LASSERT(atomic_read(&cld->cld_refcount) > 0);
@@ -152,8 +149,6 @@ static void config_log_put(struct config_llog_data *cld)
class_export_put(cld->cld_mgcexp);
OBD_FREE(cld, sizeof(*cld) + strlen(cld->cld_logname) + 1);
}
-
- EXIT;
}
/* Find a config log by name */
@@ -164,7 +159,6 @@ struct config_llog_data *config_log_find(char *logname,
struct config_llog_data *cld;
struct config_llog_data *found = NULL;
void * instance;
- ENTRY;
LASSERT(logname != NULL);
@@ -186,7 +180,7 @@ struct config_llog_data *config_log_find(char *logname,
LASSERT(found->cld_stopping == 0 || cld_is_sptlrpc(found) == 0);
}
spin_unlock(&config_list_lock);
- RETURN(found);
+ return found;
}
static
@@ -198,14 +192,13 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
{
struct config_llog_data *cld;
int rc;
- ENTRY;
CDEBUG(D_MGC, "do adding config log %s:%p\n", logname,
cfg ? cfg->cfg_instance : 0);
OBD_ALLOC(cld, sizeof(*cld) + strlen(logname) + 1);
if (!cld)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
strcpy(cld->cld_logname, logname);
if (cfg)
@@ -235,7 +228,7 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
if (rc) {
config_log_put(cld);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
if (cld_is_sptlrpc(cld)) {
@@ -244,7 +237,7 @@ struct config_llog_data *do_config_log_add(struct obd_device *obd,
CERROR("failed processing sptlrpc log: %d\n", rc);
}
- RETURN(cld);
+ return cld;
}
static struct config_llog_data *config_recover_log_add(struct obd_device *obd,
@@ -296,7 +289,6 @@ static int config_log_add(struct obd_device *obd, char *logname,
struct config_llog_data *sptlrpc_cld;
char seclogname[32];
char *ptr;
- ENTRY;
CDEBUG(D_MGC, "adding config log %s:%p\n", logname, cfg->cfg_instance);
@@ -307,7 +299,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
ptr = strrchr(logname, '-');
if (ptr == NULL || ptr - logname > 8) {
CERROR("logname %s is too long\n", logname);
- RETURN(-EINVAL);
+ return -EINVAL;
}
memcpy(seclogname, logname, ptr - logname);
@@ -319,7 +311,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
CONFIG_T_SPTLRPC, NULL, NULL);
if (IS_ERR(sptlrpc_cld)) {
CERROR("can't create sptlrpc log: %s\n", seclogname);
- RETURN(PTR_ERR(sptlrpc_cld));
+ return PTR_ERR(sptlrpc_cld);
}
}
@@ -327,7 +319,7 @@ static int config_log_add(struct obd_device *obd, char *logname,
if (IS_ERR(cld)) {
CERROR("can't create log: %s\n", logname);
config_log_put(sptlrpc_cld);
- RETURN(PTR_ERR(cld));
+ return PTR_ERR(cld);
}
cld->cld_sptlrpc = sptlrpc_cld;
@@ -339,12 +331,12 @@ static int config_log_add(struct obd_device *obd, char *logname,
recover_cld = config_recover_log_add(obd, seclogname, cfg, sb);
if (IS_ERR(recover_cld)) {
config_log_put(cld);
- RETURN(PTR_ERR(recover_cld));
+ return PTR_ERR(recover_cld);
}
cld->cld_recover = recover_cld;
}
- RETURN(0);
+ return 0;
}
DEFINE_MUTEX(llog_process_lock);
@@ -357,11 +349,10 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
struct config_llog_data *cld_sptlrpc = NULL;
struct config_llog_data *cld_recover = NULL;
int rc = 0;
- ENTRY;
cld = config_log_find(logname, cfg);
if (cld == NULL)
- RETURN(-ENOENT);
+ return -ENOENT;
mutex_lock(&cld->cld_lock);
/*
@@ -375,7 +366,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
mutex_unlock(&cld->cld_lock);
/* drop the ref from the find */
config_log_put(cld);
- RETURN(rc);
+ return rc;
}
cld->cld_stopping = 1;
@@ -406,7 +397,7 @@ static int config_log_end(char *logname, struct config_llog_instance *cfg)
CDEBUG(D_MGC, "end config log %s (%d)\n", logname ? logname : "client",
rc);
- RETURN(rc);
+ return rc;
}
int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
@@ -415,7 +406,6 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
struct obd_import *imp = obd->u.cli.cl_import;
struct obd_connect_data *ocd = &imp->imp_connect_data;
struct config_llog_data *cld;
- ENTRY;
seq_printf(m, "imperative_recovery: %s\n",
OCD_HAS_FLAG(ocd, IMP_RECOV) ? "ENABLED" : "DISABLED");
@@ -431,7 +421,7 @@ int lprocfs_mgc_rd_ir_state(struct seq_file *m, void *data)
}
spin_unlock(&config_list_lock);
- RETURN(0);
+ return 0;
}
/* reenqueue any lost locks */
@@ -445,7 +435,6 @@ static DECLARE_COMPLETION(rq_exit);
static void do_requeue(struct config_llog_data *cld)
{
- ENTRY;
LASSERT(atomic_read(&cld->cld_refcount) > 0);
/* Do not run mgc_process_log on a disconnected export or an
@@ -460,8 +449,6 @@ static void do_requeue(struct config_llog_data *cld)
cld->cld_logname);
}
up_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
-
- EXIT;
}
/* this timeout represents how many seconds MGC should wait before
@@ -474,7 +461,6 @@ static void do_requeue(struct config_llog_data *cld)
static int mgc_requeue_thread(void *data)
{
int rc = 0;
- ENTRY;
CDEBUG(D_MGC, "Starting requeue thread\n");
@@ -556,15 +542,13 @@ static int mgc_requeue_thread(void *data)
complete(&rq_exit);
CDEBUG(D_MGC, "Ending requeue thread\n");
- RETURN(rc);
+ return rc;
}
/* Add a cld to the list to requeue. Start the requeue thread if needed.
We are responsible for dropping the config log reference from here on out. */
static void mgc_requeue_add(struct config_llog_data *cld)
{
- ENTRY;
-
CDEBUG(D_INFO, "log %s: requeue (r=%d sp=%d st=%x)\n",
cld->cld_logname, atomic_read(&cld->cld_refcount),
cld->cld_stopping, rq_state);
@@ -573,7 +557,7 @@ static void mgc_requeue_add(struct config_llog_data *cld)
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping || cld->cld_lostlock) {
mutex_unlock(&cld->cld_lock);
- RETURN_EXIT;
+ return;
}
/* this refcount will be released in mgc_requeue_thread. */
config_log_get(cld);
@@ -591,7 +575,6 @@ static void mgc_requeue_add(struct config_llog_data *cld)
spin_unlock(&config_list_lock);
wake_up(&rq_waitq);
}
- EXIT;
}
/********************** class fns **********************/
@@ -605,7 +588,6 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
struct dentry *dentry;
char *label;
int err = 0;
- ENTRY;
LASSERT(lsi);
LASSERT(lsi->lsi_srv_mnt == mnt);
@@ -620,7 +602,7 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
up(&cli->cl_mgc_sem);
CERROR("%s: No fstype %s: rc = %ld\n", lsi->lsi_fstype,
obd->obd_name, PTR_ERR(obd->obd_fsops));
- RETURN(PTR_ERR(obd->obd_fsops));
+ return PTR_ERR(obd->obd_fsops);
}
cli->cl_mgc_vfsmnt = mnt;
@@ -654,21 +636,20 @@ static int mgc_fs_setup(struct obd_device *obd, struct super_block *sb,
CDEBUG(D_MGC, "MGC using disk labelled=%s\n", label);
/* We keep the cl_mgc_sem until mgc_fs_cleanup */
- RETURN(0);
+ return 0;
err_ops:
fsfilt_put_ops(obd->obd_fsops);
obd->obd_fsops = NULL;
cli->cl_mgc_vfsmnt = NULL;
up(&cli->cl_mgc_sem);
- RETURN(err);
+ return err;
}
static int mgc_fs_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int rc = 0;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt != NULL);
@@ -687,14 +668,13 @@ static int mgc_fs_cleanup(struct obd_device *obd)
up(&cli->cl_mgc_sem);
- RETURN(rc);
+ return rc;
}
static atomic_t mgc_count = ATOMIC_INIT(0);
static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY:
@@ -719,14 +699,13 @@ static int mgc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
CERROR("failed to cleanup llogging subsystems\n");
break;
}
- RETURN(rc);
+ return rc;
}
static int mgc_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
LASSERT(cli->cl_mgc_vfsmnt == NULL);
@@ -740,14 +719,13 @@ static int mgc_cleanup(struct obd_device *obd)
ptlrpcd_decref();
rc = client_obd_cleanup(obd);
- RETURN(rc);
+ return rc;
}
static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
struct lprocfs_static_vars lvars;
int rc;
- ENTRY;
ptlrpcd_addref();
@@ -782,13 +760,13 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
rc = 0;
}
- RETURN(rc);
+ return rc;
err_cleanup:
client_obd_cleanup(obd);
err_decref:
ptlrpcd_decref();
- RETURN(rc);
+ return rc;
}
/* based on ll_mdc_blocking_ast */
@@ -798,7 +776,6 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
struct lustre_handle lockh;
struct config_llog_data *cld = (struct config_llog_data *)data;
int rc = 0;
- ENTRY;
switch (flag) {
case LDLM_CB_BLOCKING:
@@ -847,7 +824,7 @@ static int mgc_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
LBUG();
}
- RETURN(rc);
+ return rc;
}
/* Not sure where this should go... */
@@ -862,18 +839,17 @@ static int mgc_set_mgs_param(struct obd_export *exp,
struct ptlrpc_request *req;
struct mgs_send_param *req_msp, *rep_msp;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MGS_SET_INFO, LUSTRE_MGS_VERSION,
MGS_SET_INFO);
if (!req)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_msp = req_capsule_client_get(&req->rq_pill, &RMF_MGS_SEND_PARAM);
if (!req_msp) {
ptlrpc_req_finished(req);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
memcpy(req_msp, msp, sizeof(*req_msp));
@@ -889,7 +865,7 @@ static int mgc_set_mgs_param(struct obd_export *exp,
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
/* Take a config lock so we can get cancel notifications */
@@ -900,12 +876,15 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
struct lustre_handle *lockh)
{
struct config_llog_data *cld = (struct config_llog_data *)data;
- struct ldlm_enqueue_info einfo = { type, mode, mgc_blocking_ast,
- ldlm_completion_ast, NULL, NULL, NULL };
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = type,
+ .ei_mode = mode,
+ .ei_cb_bl = mgc_blocking_ast,
+ .ei_cb_cp = ldlm_completion_ast,
+ };
struct ptlrpc_request *req;
int short_limit = cld_is_sptlrpc(cld);
int rc;
- ENTRY;
CDEBUG(D_MGC, "Enqueue for %s (res "LPX64")\n", cld->cld_logname,
cld->cld_resid.name[0]);
@@ -916,7 +895,7 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
&RQF_LDLM_ENQUEUE, LUSTRE_DLM_VERSION,
LDLM_ENQUEUE);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, 0);
ptlrpc_request_set_replen(req);
@@ -934,17 +913,15 @@ static int mgc_enqueue(struct obd_export *exp, struct lov_stripe_md *lsm,
/* A failed enqueue should still call the mgc_blocking_ast,
where it will be requeued if needed ("grant failed"). */
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int mgc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
-
ldlm_lock_decref(lockh, mode);
- RETURN(0);
+ return 0;
}
static void mgc_notify_active(struct obd_device *unused)
@@ -965,18 +942,17 @@ static int mgc_target_register(struct obd_export *exp,
struct ptlrpc_request *req;
struct mgs_target_info *req_mti, *rep_mti;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_MGS_TARGET_REG, LUSTRE_MGS_VERSION,
MGS_TARGET_REG);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_mti = req_capsule_client_get(&req->rq_pill, &RMF_MGS_TARGET_INFO);
if (!req_mti) {
ptlrpc_req_finished(req);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
memcpy(req_mti, mti, sizeof(*req_mti));
@@ -995,7 +971,7 @@ static int mgc_target_register(struct obd_export *exp,
}
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
@@ -1003,14 +979,13 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
void *val, struct ptlrpc_request_set *set)
{
int rc = -EINVAL;
- ENTRY;
/* Turn off initial_recov after we try all backup servers once */
if (KEY_IS(KEY_INIT_RECOV_BACKUP)) {
struct obd_import *imp = class_exp2cliimp(exp);
int value;
if (vallen != sizeof(int))
- RETURN(-EINVAL);
+ return -EINVAL;
value = *(int *)val;
CDEBUG(D_MGC, "InitRecov %s %d/d%d:i%d:r%d:or%d:%s\n",
imp->imp_obd->obd_name, value,
@@ -1021,46 +996,46 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
if ((imp->imp_state != LUSTRE_IMP_FULL &&
imp->imp_state != LUSTRE_IMP_NEW) || value > 1)
ptlrpc_reconnect_import(imp);
- RETURN(0);
+ return 0;
}
/* FIXME move this to mgc_process_config */
if (KEY_IS(KEY_REGISTER_TARGET)) {
struct mgs_target_info *mti;
if (vallen != sizeof(struct mgs_target_info))
- RETURN(-EINVAL);
+ return -EINVAL;
mti = (struct mgs_target_info *)val;
CDEBUG(D_MGC, "register_target %s %#x\n",
mti->mti_svname, mti->mti_flags);
rc = mgc_target_register(exp, mti);
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_SET_FS)) {
struct super_block *sb = (struct super_block *)val;
struct lustre_sb_info *lsi;
if (vallen != sizeof(struct super_block))
- RETURN(-EINVAL);
+ return -EINVAL;
lsi = s2lsi(sb);
rc = mgc_fs_setup(exp->exp_obd, sb, lsi->lsi_srv_mnt);
if (rc) {
CERROR("set_fs got %d\n", rc);
}
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_CLEAR_FS)) {
if (vallen != 0)
- RETURN(-EINVAL);
+ return -EINVAL;
rc = mgc_fs_cleanup(exp->exp_obd);
if (rc) {
CERROR("clear_fs got %d\n", rc);
}
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_SET_INFO)) {
struct mgs_send_param *msp;
msp = (struct mgs_send_param *)val;
rc = mgc_set_mgs_param(exp, msp);
- RETURN(rc);
+ return rc;
}
if (KEY_IS(KEY_MGSSEC)) {
struct client_obd *cli = &exp->exp_obd->u.cli;
@@ -1075,7 +1050,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
*/
if (vallen == 0) {
if (cli->cl_flvr_mgc.sf_rpc != SPTLRPC_FLVR_INVALID)
- RETURN(0);
+ return 0;
val = "null";
vallen = 4;
}
@@ -1084,7 +1059,7 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
if (rc) {
CERROR("invalid sptlrpc flavor %s to MGS\n",
(char *) val);
- RETURN(rc);
+ return rc;
}
/*
@@ -1103,10 +1078,10 @@ int mgc_set_info_async(const struct lu_env *env, struct obd_export *exp,
(char *) val, str);
rc = -EPERM;
}
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
static int mgc_get_info(const struct lu_env *env, struct obd_export *exp,
@@ -1167,7 +1142,7 @@ static int mgc_import_event(struct obd_device *obd,
CERROR("Unknown import event %#x\n", event);
LBUG();
}
- RETURN(rc);
+ return rc;
}
static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
@@ -1175,7 +1150,6 @@ static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
{
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
LASSERT(olg == &obd->obd_olg);
@@ -1192,20 +1166,18 @@ static int mgc_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
llog_initiator_connect(ctxt);
llog_ctxt_put(ctxt);
- RETURN(0);
+ return 0;
out:
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
- RETURN(rc);
+ return rc;
}
static int mgc_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_CONFIG_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
@@ -1213,7 +1185,7 @@ static int mgc_llog_finish(struct obd_device *obd, int count)
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
- RETURN(0);
+ return 0;
}
enum {
@@ -1238,14 +1210,13 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
int pos;
int rc = 0;
int off = 0;
- ENTRY;
LASSERT(cfg->cfg_instance != NULL);
LASSERT(cfg->cfg_sb == cfg->cfg_instance);
OBD_ALLOC(inst, PAGE_CACHE_SIZE);
if (inst == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (!IS_SERVER(lsi)) {
pos = snprintf(inst, PAGE_CACHE_SIZE, "%p", cfg->cfg_instance);
@@ -1259,7 +1230,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
PAGE_CACHE_SIZE);
if (rc) {
OBD_FREE(inst, PAGE_CACHE_SIZE);
- RETURN(-EINVAL);
+ return -EINVAL;
}
pos = strlen(inst);
}
@@ -1417,7 +1388,7 @@ static int mgc_apply_recover_logs(struct obd_device *mgc,
}
OBD_FREE(inst, PAGE_CACHE_SIZE);
- RETURN(rc);
+ return rc;
}
/**
@@ -1439,7 +1410,6 @@ static int mgc_process_recover_log(struct obd_device *obd,
int i;
int ealen;
int rc;
- ENTRY;
/* allocate buffer for bulk transfer.
* if this is the first time for this mgs to read logs,
@@ -1582,8 +1552,6 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
int rc = 0, must_pop = 0;
bool sptlrpc_started = false;
- ENTRY;
-
LASSERT(cld);
LASSERT(mutex_is_locked(&cld->cld_lock));
@@ -1592,7 +1560,7 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
* read it up here.
*/
if (cld_is_sptlrpc(cld) && local_only)
- RETURN(0);
+ return 0;
if (cld->cld_cfg.cfg_sb)
lsi = s2lsi(cld->cld_cfg.cfg_sb);
@@ -1600,12 +1568,12 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
ctxt = llog_get_context(mgc, LLOG_CONFIG_REPL_CTXT);
if (!ctxt) {
CERROR("missing llog context\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
OBD_ALLOC_PTR(saved_ctxt);
if (saved_ctxt == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lctxt = llog_get_context(mgc, LLOG_CONFIG_ORIG_CTXT);
@@ -1623,7 +1591,6 @@ static int mgc_process_cfg_log(struct obd_device *mgc,
be updated here. */
rc = class_config_parse_llog(NULL, ctxt, cld->cld_logname,
&cld->cld_cfg);
- EXIT;
out_pop:
llog_ctxt_put(ctxt);
@@ -1647,7 +1614,7 @@ out_pop:
strlen("-sptlrpc"));
}
- RETURN(rc);
+ return rc;
}
/** Get a config log from the MGS and process it.
@@ -1659,7 +1626,6 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
struct lustre_handle lockh = { 0 };
__u64 flags = LDLM_FL_NO_LRU;
int rc = 0, rcl;
- ENTRY;
LASSERT(cld);
@@ -1670,7 +1636,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
mutex_lock(&cld->cld_lock);
if (cld->cld_stopping) {
mutex_unlock(&cld->cld_lock);
- RETURN(0);
+ return 0;
}
OBD_FAIL_TIMEOUT(OBD_FAIL_MGC_PAUSE_PROCESS_LOG, 20);
@@ -1719,7 +1685,7 @@ int mgc_process_log(struct obd_device *mgc, struct config_llog_data *cld)
CERROR("Can't drop cfg lock: %d\n", rcl);
}
- RETURN(rc);
+ return rc;
}
@@ -1733,7 +1699,6 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
struct config_llog_instance *cfg = NULL;
char *logname;
int rc = 0;
- ENTRY;
switch(lcfg->lcfg_command) {
case LCFG_LOV_ADD_OBD: {
@@ -1818,7 +1783,7 @@ static int mgc_process_config(struct obd_device *obd, obd_count len, void *buf)
}
}
out:
- RETURN(rc);
+ return rc;
}
struct obd_ops mgc_obd_ops = {
diff --git a/drivers/staging/lustre/lustre/obdclass/Makefile b/drivers/staging/lustre/lustre/obdclass/Makefile
index b80c13c6f5d..8a0e08ced45 100644
--- a/drivers/staging/lustre/lustre/obdclass/Makefile
+++ b/drivers/staging/lustre/lustre/obdclass/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_LUSTRE_FS) += obdclass.o llog_test.o
obdclass-y := linux/linux-module.o linux/linux-obdo.o linux/linux-sysctl.o \
llog.o llog_cat.o llog_obd.o llog_swab.o class_obd.o debug.o \
genops.o uuid.o llog_ioctl.o lprocfs_status.o \
- lprocfs_jobstats.o lustre_handles.o lustre_peer.o llog_osd.o \
+ lustre_handles.o lustre_peer.o llog_osd.o \
local_storage.o statfs_pack.o obdo.o obd_config.o obd_mount.o\
mea.o lu_object.o dt_object.o capa.o cl_object.o \
cl_page.o cl_lock.o cl_io.o lu_ref.o acl.o idmap.o \
diff --git a/drivers/staging/lustre/lustre/obdclass/acl.c b/drivers/staging/lustre/lustre/obdclass/acl.c
index c2a6702c9f2..f0bb632a70a 100644
--- a/drivers/staging/lustre/lustre/obdclass/acl.c
+++ b/drivers/staging/lustre/lustre/obdclass/acl.c
@@ -144,10 +144,9 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
{
int count, i, esize;
ext_acl_xattr_header *new;
- ENTRY;
if (unlikely(size < 0))
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
else if (!size)
count = 0;
else
@@ -155,7 +154,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
esize = CFS_ACL_XATTR_SIZE(count, ext_acl_xattr);
OBD_ALLOC(new, esize);
if (unlikely(new == NULL))
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
new->a_count = cpu_to_le32(count);
for (i = 0; i < count; i++) {
@@ -165,7 +164,7 @@ lustre_posix_acl_xattr_2ext(posix_acl_xattr_header *header, int size)
new->a_entries[i].e_stat = cpu_to_le32(ES_UNK);
}
- RETURN(new);
+ return new;
}
EXPORT_SYMBOL(lustre_posix_acl_xattr_2ext);
@@ -178,16 +177,15 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
int count, i, j, rc = 0;
__u32 id;
posix_acl_xattr_header *new;
- ENTRY;
if (unlikely(size < 0))
- RETURN(-EINVAL);
+ return -EINVAL;
else if (!size)
- RETURN(0);
+ return 0;
OBD_ALLOC(new, size);
if (unlikely(new == NULL))
- RETURN(-ENOMEM);
+ return -ENOMEM;
new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
count = CFS_ACL_XATTR_COUNT(size, posix_acl_xattr);
@@ -228,7 +226,6 @@ int lustre_posix_acl_xattr_filter(posix_acl_xattr_header *header, int size,
*out = new;
rc = 0;
}
- EXIT;
_out:
if (rc) {
@@ -302,7 +299,6 @@ int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
posix_acl_xattr_entry pe = {ACL_MASK, 0, ACL_UNDEFINED_ID};
posix_acl_xattr_header *new;
ext_acl_xattr_entry *ee, ae;
- ENTRY;
lustre_posix_acl_cpu_to_le(&pe, &pe);
ee = lustre_ext_acl_xattr_search(ext_header, &pe, &pos);
@@ -312,7 +308,7 @@ int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
posix_size = CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
OBD_ALLOC(new, posix_size);
if (unlikely(new == NULL))
- RETURN(-ENOMEM);
+ return -ENOMEM;
new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
for (i = 0, j = 0; i < ext_count; i++) {
@@ -349,7 +345,7 @@ int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
int ori_posix_count;
if (unlikely(size < 0))
- RETURN(-EINVAL);
+ return -EINVAL;
else if (!size)
ori_posix_count = 0;
else
@@ -360,7 +356,7 @@ int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
CFS_ACL_XATTR_SIZE(posix_count, posix_acl_xattr);
OBD_ALLOC(new, posix_size);
if (unlikely(new == NULL))
- RETURN(-ENOMEM);
+ return -ENOMEM;
new->a_version = cpu_to_le32(CFS_ACL_XATTR_VERSION);
/* 1. process the unchanged ACL entries
@@ -397,7 +393,6 @@ int lustre_acl_xattr_merge2posix(posix_acl_xattr_header *posix_header, int size,
*out = new;
rc = 0;
}
- EXIT;
_out:
if (rc) {
@@ -420,10 +415,9 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
posix_acl_xattr_entry pae;
ext_acl_xattr_header *new;
ext_acl_xattr_entry *ee, eae;
- ENTRY;
if (unlikely(size < 0))
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
else if (!size)
posix_count = 0;
else
@@ -434,7 +428,7 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
OBD_ALLOC(new, ext_size);
if (unlikely(new == NULL))
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
for (i = 0, j = 0; i < posix_count; i++) {
lustre_posix_acl_le_to_cpu(&pae, &posix_header->a_entries[i]);
@@ -532,7 +526,6 @@ lustre_acl_xattr_merge2ext(posix_acl_xattr_header *posix_header, int size,
new->a_count = cpu_to_le32(j);
/* free unused space. */
rc = lustre_ext_acl_xattr_reduce_space(&new, ext_count);
- EXIT;
out:
if (rc) {
diff --git a/drivers/staging/lustre/lustre/obdclass/capa.c b/drivers/staging/lustre/lustre/obdclass/capa.c
index 3e532f5106e..68d797ba8ae 100644
--- a/drivers/staging/lustre/lustre/obdclass/capa.c
+++ b/drivers/staging/lustre/lustre/obdclass/capa.c
@@ -42,12 +42,12 @@
#define DEBUG_SUBSYSTEM S_SEC
-#include <linux/version.h>
#include <linux/fs.h>
#include <asm/unistd.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/crypto.h>
#include <obd_class.h>
#include <lustre_debug.h>
@@ -77,6 +77,12 @@ EXPORT_SYMBOL(capa_list);
EXPORT_SYMBOL(capa_lock);
EXPORT_SYMBOL(capa_count);
+static inline
+unsigned int ll_crypto_tfm_alg_min_keysize(struct crypto_blkcipher *tfm)
+{
+ return crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher.min_keysize;
+}
+
struct hlist_head *init_capa_hash(void)
{
struct hlist_head *hash;
@@ -235,9 +241,26 @@ struct obd_capa *capa_lookup(struct hlist_head *hash, struct lustre_capa *capa,
}
EXPORT_SYMBOL(capa_lookup);
+static inline int ll_crypto_hmac(struct crypto_hash *tfm,
+ u8 *key, unsigned int *keylen,
+ struct scatterlist *sg,
+ unsigned int size, u8 *result)
+{
+ struct hash_desc desc;
+ int rv;
+ desc.tfm = tfm;
+ desc.flags = 0;
+ rv = crypto_hash_setkey(desc.tfm, key, *keylen);
+ if (rv) {
+ CERROR("failed to hash setkey: %d\n", rv);
+ return rv;
+ }
+ return crypto_hash_digest(&desc, sg, size, result);
+}
+
int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
{
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
struct capa_hmac_alg *alg;
int keylen;
struct scatterlist sl;
@@ -249,7 +272,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
alg = &capa_hmac_algs[capa_alg(capa)];
- tfm = ll_crypto_alloc_hash(alg->ha_name, 0, 0);
+ tfm = crypto_alloc_hash(alg->ha_name, 0, 0);
if (!tfm) {
CERROR("crypto_alloc_tfm failed, check whether your kernel"
"has crypto support!\n");
@@ -262,7 +285,7 @@ int capa_hmac(__u8 *hmac, struct lustre_capa *capa, __u8 *key)
(unsigned long)(capa) % PAGE_CACHE_SIZE);
ll_crypto_hmac(tfm, key, &keylen, &sl, sl.length, hmac);
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return 0;
}
@@ -270,21 +293,20 @@ EXPORT_SYMBOL(capa_hmac);
int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
unsigned int min;
int rc;
char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
- ENTRY;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
- RETURN(PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
min = ll_crypto_tfm_alg_min_keysize(tfm);
@@ -293,7 +315,7 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
@@ -307,37 +329,34 @@ int capa_encrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_encrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to encrypt for aes\n");
GOTO(out, rc);
}
- EXIT;
-
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_encrypt_id);
int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
{
- struct ll_crypto_cipher *tfm;
+ struct crypto_blkcipher *tfm;
struct scatterlist sd;
struct scatterlist ss;
struct blkcipher_desc desc;
unsigned int min;
int rc;
char alg[CRYPTO_MAX_ALG_NAME+1] = "aes";
- ENTRY;
/* passing "aes" in a variable instead of a constant string keeps gcc
* 4.3.2 happy */
- tfm = ll_crypto_alloc_blkcipher(alg, 0, 0 );
+ tfm = crypto_alloc_blkcipher(alg, 0, 0 );
if (IS_ERR(tfm)) {
CERROR("failed to load transform for aes\n");
- RETURN(PTR_ERR(tfm));
+ return PTR_ERR(tfm);
}
min = ll_crypto_tfm_alg_min_keysize(tfm);
@@ -346,7 +365,7 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
GOTO(out, rc = -EINVAL);
}
- rc = ll_crypto_blkcipher_setkey(tfm, key, min);
+ rc = crypto_blkcipher_setkey(tfm, key, min);
if (rc) {
CERROR("failed to setting key for aes\n");
GOTO(out, rc);
@@ -361,16 +380,14 @@ int capa_decrypt_id(__u32 *d, __u32 *s, __u8 *key, int keylen)
desc.tfm = tfm;
desc.info = NULL;
desc.flags = 0;
- rc = ll_crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
+ rc = crypto_blkcipher_decrypt(&desc, &sd, &ss, 16);
if (rc) {
CERROR("failed to decrypt for aes\n");
GOTO(out, rc);
}
- EXIT;
-
out:
- ll_crypto_free_blkcipher(tfm);
+ crypto_free_blkcipher(tfm);
return rc;
}
EXPORT_SYMBOL(capa_decrypt_id);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index 75c9be8875e..42697934155 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -106,7 +106,6 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_type_is_valid(io->ci_type));
LINVRNT(cl_io_invariant(io));
- ENTRY;
while (!list_empty(&io->ci_layers)) {
slice = container_of(io->ci_layers.prev, struct cl_io_slice,
@@ -144,7 +143,6 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
default:
LBUG();
}
- EXIT;
}
EXPORT_SYMBOL(cl_io_fini);
@@ -157,7 +155,6 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_state == CIS_ZERO || io->ci_state == CIS_FINI);
LINVRNT(cl_io_type_is_valid(iot));
LINVRNT(cl_io_invariant(io));
- ENTRY;
io->ci_type = iot;
INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
@@ -175,7 +172,7 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
}
if (result == 0)
io->ci_state = CIS_INIT;
- RETURN(result);
+ return result;
}
/**
@@ -228,7 +225,6 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
{
LINVRNT(iot == CIT_READ || iot == CIT_WRITE);
LINVRNT(io->ci_obj != NULL);
- ENTRY;
LU_OBJECT_HEADER(D_VFSTRACE, env, &io->ci_obj->co_lu,
"io range: %u ["LPU64", "LPU64") %u %u\n",
@@ -236,7 +232,7 @@ int cl_io_rw_init(const struct lu_env *env, struct cl_io *io,
io->u.ci_rw.crw_nonblock, io->u.ci_wr.wr_append);
io->u.ci_rw.crw_pos = pos;
io->u.ci_rw.crw_count = count;
- RETURN(cl_io_init(env, io, iot, io->ci_obj));
+ return cl_io_init(env, io, iot, io->ci_obj);
}
EXPORT_SYMBOL(cl_io_rw_init);
@@ -288,7 +284,6 @@ static void cl_io_locks_sort(struct cl_io *io)
{
int done = 0;
- ENTRY;
/* hidden treasure: bubble sort for now. */
do {
struct cl_io_lock_link *curr;
@@ -325,7 +320,6 @@ static void cl_io_locks_sort(struct cl_io *io)
prev = curr;
}
} while (!done);
- EXIT;
}
/**
@@ -339,12 +333,11 @@ int cl_queue_match(const struct list_head *queue,
{
struct cl_io_lock_link *scan;
- ENTRY;
list_for_each_entry(scan, queue, cill_linkage) {
if (cl_lock_descr_match(&scan->cill_descr, need))
- RETURN(+1);
+ return +1;
}
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(cl_queue_match);
@@ -353,7 +346,6 @@ static int cl_queue_merge(const struct list_head *queue,
{
struct cl_io_lock_link *scan;
- ENTRY;
list_for_each_entry(scan, queue, cill_linkage) {
if (cl_lock_descr_cmp(&scan->cill_descr, need))
continue;
@@ -361,9 +353,9 @@ static int cl_queue_merge(const struct list_head *queue,
CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
scan->cill_descr.cld_end);
- RETURN(+1);
+ return +1;
}
- RETURN(0);
+ return 0;
}
@@ -388,8 +380,6 @@ static int cl_lockset_lock_one(const struct lu_env *env,
struct cl_lock *lock;
int result;
- ENTRY;
-
lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
if (!IS_ERR(lock)) {
@@ -404,7 +394,7 @@ static int cl_lockset_lock_one(const struct lu_env *env,
result = 0;
} else
result = PTR_ERR(lock);
- RETURN(result);
+ return result;
}
static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
@@ -412,7 +402,6 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
{
struct cl_lock *lock = link->cill_lock;
- ENTRY;
list_del_init(&link->cill_linkage);
if (lock != NULL) {
cl_lock_release(env, lock, "io", io);
@@ -420,7 +409,6 @@ static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
}
if (link->cill_fini != NULL)
link->cill_fini(env, link);
- EXIT;
}
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
@@ -431,7 +419,6 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lock *lock;
int result;
- ENTRY;
result = 0;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
if (!cl_lockset_match(set, &link->cill_descr)) {
@@ -455,7 +442,7 @@ static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
break;
}
}
- RETURN(result);
+ return result;
}
/**
@@ -474,7 +461,6 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
LINVRNT(io->ci_state == CIS_IT_STARTED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_lock == NULL)
continue;
@@ -490,7 +476,7 @@ int cl_io_lock(const struct lu_env *env, struct cl_io *io)
cl_io_unlock(env, io);
else
io->ci_state = CIS_LOCKED;
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_lock);
@@ -508,7 +494,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
LASSERT(CIS_IT_STARTED <= io->ci_state && io->ci_state < CIS_UNLOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
set = &io->ci_lockset;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
@@ -527,7 +512,6 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
}
io->ci_state = CIS_UNLOCKED;
LASSERT(!cl_env_info(env)->clt_counters[CNL_TOP].ctc_nr_locks_acquired);
- EXIT;
}
EXPORT_SYMBOL(cl_io_unlock);
@@ -547,7 +531,6 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
LINVRNT(io->ci_state == CIS_INIT || io->ci_state == CIS_IT_ENDED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
result = 0;
cl_io_for_each(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_iter_init == NULL)
@@ -559,7 +542,7 @@ int cl_io_iter_init(const struct lu_env *env, struct cl_io *io)
}
if (result == 0)
io->ci_state = CIS_IT_STARTED;
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_iter_init);
@@ -576,13 +559,11 @@ void cl_io_iter_fini(const struct lu_env *env, struct cl_io *io)
LINVRNT(io->ci_state == CIS_UNLOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_iter_fini != NULL)
scan->cis_iop->op[io->ci_type].cio_iter_fini(env, scan);
}
io->ci_state = CIS_IT_ENDED;
- EXIT;
}
EXPORT_SYMBOL(cl_io_iter_fini);
@@ -598,8 +579,6 @@ void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
LINVRNT(cl_io_is_loopable(io));
LINVRNT(cl_io_invariant(io));
- ENTRY;
-
io->u.ci_rw.crw_pos += nob;
io->u.ci_rw.crw_count -= nob;
@@ -609,7 +588,6 @@ void cl_io_rw_advance(const struct lu_env *env, struct cl_io *io, size_t nob)
scan->cis_iop->op[io->ci_type].cio_advance(env, scan,
nob);
}
- EXIT;
}
EXPORT_SYMBOL(cl_io_rw_advance);
@@ -621,14 +599,13 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
{
int result;
- ENTRY;
if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
result = +1;
else {
list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_lock_add);
@@ -647,7 +624,6 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_io_lock_link *link;
int result;
- ENTRY;
OBD_ALLOC_PTR(link);
if (link != NULL) {
link->cill_descr = *descr;
@@ -658,7 +634,7 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
} else
result = -ENOMEM;
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_lock_alloc_add);
@@ -673,7 +649,6 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_is_loopable(io));
LINVRNT(io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
- ENTRY;
io->ci_state = CIS_IO_GOING;
cl_io_for_each(scan, io) {
@@ -685,7 +660,7 @@ int cl_io_start(const struct lu_env *env, struct cl_io *io)
}
if (result >= 0)
result = 0;
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_start);
@@ -700,7 +675,6 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
LINVRNT(cl_io_is_loopable(io));
LINVRNT(io->ci_state == CIS_IO_GOING);
LINVRNT(cl_io_invariant(io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_end != NULL)
@@ -708,7 +682,6 @@ void cl_io_end(const struct lu_env *env, struct cl_io *io)
/* TODO: error handling. */
}
io->ci_state = CIS_IO_FINISHED;
- EXIT;
}
EXPORT_SYMBOL(cl_io_end);
@@ -774,7 +747,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
- ENTRY;
queue = &io->ci_queue;
@@ -807,7 +779,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
*/
cl_page_list_disown(env, io, &queue->c2_qin);
cl_2queue_fini(env, queue);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_read_page);
@@ -827,7 +799,6 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
LINVRNT(cl_io_invariant(io));
LASSERT(cl_page_in_io(page, io));
- ENTRY;
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->cio_prepare_write != NULL) {
@@ -841,7 +812,7 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
break;
}
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_prepare_write);
@@ -867,7 +838,6 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
*/
LASSERT(cl_page_is_owned(page, io) || page->cp_parent != NULL);
LASSERT(cl_page_in_io(page, io));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->cio_commit_write != NULL) {
@@ -882,7 +852,7 @@ int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
}
}
LINVRNT(result <= 0);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_commit_write);
@@ -903,7 +873,6 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
int result = 0;
LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
- ENTRY;
cl_io_for_each(scan, io) {
if (scan->cis_iop->req_op[crt].cio_submit == NULL)
@@ -917,7 +886,7 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
* If ->cio_submit() failed, no pages were sent.
*/
LASSERT(ergo(result != 0, list_empty(&queue->c2_qout.pl_pages)));
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_io_submit_rw);
@@ -1009,7 +978,6 @@ int cl_io_loop(const struct lu_env *env, struct cl_io *io)
int result = 0;
LINVRNT(cl_io_is_loopable(io));
- ENTRY;
do {
size_t nob;
@@ -1043,7 +1011,7 @@ int cl_io_loop(const struct lu_env *env, struct cl_io *io)
} while (result == 0 && io->ci_continue);
if (result == 0)
result = io->ci_result;
- RETURN(result < 0 ? result : 0);
+ return result < 0 ? result : 0;
}
EXPORT_SYMBOL(cl_io_loop);
@@ -1064,13 +1032,11 @@ void cl_io_slice_add(struct cl_io *io, struct cl_io_slice *slice,
LASSERT((linkage->prev == NULL && linkage->next == NULL) ||
list_empty(linkage));
- ENTRY;
list_add_tail(linkage, &io->ci_layers);
slice->cis_io = io;
slice->cis_obj = obj;
slice->cis_iop = ops;
- EXIT;
}
EXPORT_SYMBOL(cl_io_slice_add);
@@ -1080,11 +1046,9 @@ EXPORT_SYMBOL(cl_io_slice_add);
*/
void cl_page_list_init(struct cl_page_list *plist)
{
- ENTRY;
plist->pl_nr = 0;
INIT_LIST_HEAD(&plist->pl_pages);
plist->pl_owner = current;
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_init);
@@ -1093,7 +1057,6 @@ EXPORT_SYMBOL(cl_page_list_init);
*/
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
{
- ENTRY;
/* it would be better to check that page is owned by "current" io, but
* it is not passed here. */
LASSERT(page->cp_owner != NULL);
@@ -1105,9 +1068,8 @@ void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page)
LASSERT(list_empty(&page->cp_batch));
list_add_tail(&page->cp_batch, &plist->pl_pages);
++plist->pl_nr;
- page->cp_queue_ref = lu_ref_add(&page->cp_reference, "queue", plist);
+ lu_ref_add_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_get(page);
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_add);
@@ -1120,15 +1082,13 @@ void cl_page_list_del(const struct lu_env *env,
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
- ENTRY;
list_del_init(&page->cp_batch);
lockdep_off();
mutex_unlock(&page->cp_mutex);
lockdep_on();
--plist->pl_nr;
- lu_ref_del_at(&page->cp_reference, page->cp_queue_ref, "queue", plist);
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_del);
@@ -1142,13 +1102,11 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
LINVRNT(dst->pl_owner == current);
LINVRNT(src->pl_owner == current);
- ENTRY;
list_move_tail(&page->cp_batch, &dst->pl_pages);
--src->pl_nr;
++dst->pl_nr;
- lu_ref_set_at(&page->cp_reference,
- page->cp_queue_ref, "queue", src, dst);
- EXIT;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
}
EXPORT_SYMBOL(cl_page_list_move);
@@ -1163,10 +1121,8 @@ void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
LINVRNT(list->pl_owner == current);
LINVRNT(head->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, tmp, list)
cl_page_list_move(head, list, page);
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_splice);
@@ -1184,7 +1140,6 @@ void cl_page_list_disown(const struct lu_env *env,
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(plist->pl_nr > 0);
@@ -1202,10 +1157,10 @@ void cl_page_list_disown(const struct lu_env *env,
* XXX cl_page_disown0() will fail if page is not locked.
*/
cl_page_disown0(env, io, page);
- lu_ref_del(&page->cp_reference, "queue", plist);
+ lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ plist);
cl_page_put(env, page);
}
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_disown);
@@ -1219,11 +1174,9 @@ void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each_safe(page, temp, plist)
cl_page_list_del(env, plist, page);
LASSERT(plist->pl_nr == 0);
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_fini);
@@ -1240,7 +1193,6 @@ int cl_page_list_own(const struct lu_env *env,
LINVRNT(plist->pl_owner == current);
- ENTRY;
result = 0;
cl_page_list_for_each_safe(page, temp, plist) {
LASSERT(index <= page->cp_index);
@@ -1250,7 +1202,7 @@ int cl_page_list_own(const struct lu_env *env,
else
cl_page_list_del(env, plist, page);
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_page_list_own);
@@ -1278,10 +1230,8 @@ void cl_page_list_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *page;
LINVRNT(plist->pl_owner == current);
- ENTRY;
cl_page_list_for_each(page, plist)
cl_page_discard(env, io, page);
- EXIT;
}
EXPORT_SYMBOL(cl_page_list_discard);
@@ -1295,14 +1245,13 @@ int cl_page_list_unmap(const struct lu_env *env, struct cl_io *io,
int result;
LINVRNT(plist->pl_owner == current);
- ENTRY;
result = 0;
cl_page_list_for_each(page, plist) {
result = cl_page_unmap(env, io, page);
if (result != 0)
break;
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_page_list_unmap);
@@ -1311,10 +1260,8 @@ EXPORT_SYMBOL(cl_page_list_unmap);
*/
void cl_2queue_init(struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_init(&queue->c2_qin);
cl_page_list_init(&queue->c2_qout);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_init);
@@ -1323,9 +1270,7 @@ EXPORT_SYMBOL(cl_2queue_init);
*/
void cl_2queue_add(struct cl_2queue *queue, struct cl_page *page)
{
- ENTRY;
cl_page_list_add(&queue->c2_qin, page);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_add);
@@ -1335,10 +1280,8 @@ EXPORT_SYMBOL(cl_2queue_add);
void cl_2queue_disown(const struct lu_env *env,
struct cl_io *io, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_disown(env, io, &queue->c2_qin);
cl_page_list_disown(env, io, &queue->c2_qout);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_disown);
@@ -1348,10 +1291,8 @@ EXPORT_SYMBOL(cl_2queue_disown);
void cl_2queue_discard(const struct lu_env *env,
struct cl_io *io, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_discard(env, io, &queue->c2_qin);
cl_page_list_discard(env, io, &queue->c2_qout);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_discard);
@@ -1371,10 +1312,8 @@ EXPORT_SYMBOL(cl_2queue_assume);
*/
void cl_2queue_fini(const struct lu_env *env, struct cl_2queue *queue)
{
- ENTRY;
cl_page_list_fini(env, &queue->c2_qout);
cl_page_list_fini(env, &queue->c2_qin);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_fini);
@@ -1383,10 +1322,8 @@ EXPORT_SYMBOL(cl_2queue_fini);
*/
void cl_2queue_init_page(struct cl_2queue *queue, struct cl_page *page)
{
- ENTRY;
cl_2queue_init(queue);
cl_2queue_add(queue, page);
- EXIT;
}
EXPORT_SYMBOL(cl_2queue_init_page);
@@ -1397,10 +1334,9 @@ EXPORT_SYMBOL(cl_2queue_init_page);
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
- ENTRY;
while (io->ci_parent != NULL)
io = io->ci_parent;
- RETURN(io);
+ return io;
}
EXPORT_SYMBOL(cl_io_top);
@@ -1425,12 +1361,10 @@ void cl_req_slice_add(struct cl_req *req, struct cl_req_slice *slice,
struct cl_device *dev,
const struct cl_req_operations *ops)
{
- ENTRY;
list_add_tail(&slice->crs_linkage, &req->crq_layers);
slice->crs_dev = dev;
slice->crs_ops = ops;
slice->crs_req = req;
- EXIT;
}
EXPORT_SYMBOL(cl_req_slice_add);
@@ -1442,14 +1376,13 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
LASSERT(req->crq_nrpages == 0);
LINVRNT(list_empty(&req->crq_layers));
LINVRNT(equi(req->crq_nrobjs > 0, req->crq_o != NULL));
- ENTRY;
if (req->crq_o != NULL) {
for (i = 0; i < req->crq_nrobjs; ++i) {
struct cl_object *obj = req->crq_o[i].ro_obj;
if (obj != NULL) {
lu_object_ref_del_at(&obj->co_lu,
- req->crq_o[i].ro_obj_ref,
+ &req->crq_o[i].ro_obj_ref,
"cl_req", req);
cl_object_put(env, obj);
}
@@ -1457,7 +1390,6 @@ static void cl_req_free(const struct lu_env *env, struct cl_req *req)
OBD_FREE(req->crq_o, req->crq_nrobjs * sizeof req->crq_o[0]);
}
OBD_FREE_PTR(req);
- EXIT;
}
static int cl_req_init(const struct lu_env *env, struct cl_req *req,
@@ -1467,7 +1399,6 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
struct cl_page_slice *slice;
int result;
- ENTRY;
result = 0;
page = cl_page_top(page);
do {
@@ -1482,7 +1413,7 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
}
page = page->cp_child;
} while (page != NULL && result == 0);
- RETURN(result);
+ return result;
}
/**
@@ -1493,7 +1424,6 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
{
struct cl_req_slice *slice;
- ENTRY;
/*
* for the lack of list_for_each_entry_reverse_safe()...
*/
@@ -1505,7 +1435,6 @@ void cl_req_completion(const struct lu_env *env, struct cl_req *req, int rc)
slice->crs_ops->cro_completion(env, slice, rc);
}
cl_req_free(env, req);
- EXIT;
}
EXPORT_SYMBOL(cl_req_completion);
@@ -1518,7 +1447,6 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
struct cl_req *req;
LINVRNT(nr_objects > 0);
- ENTRY;
OBD_ALLOC_PTR(req);
if (req != NULL) {
@@ -1539,7 +1467,7 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
}
} else
req = ERR_PTR(-ENOMEM);
- RETURN(req);
+ return req;
}
EXPORT_SYMBOL(cl_req_alloc);
@@ -1553,7 +1481,6 @@ void cl_req_page_add(const struct lu_env *env,
struct cl_req_obj *rqo;
int i;
- ENTRY;
page = cl_page_top(page);
LASSERT(list_empty(&page->cp_flight));
@@ -1570,13 +1497,12 @@ void cl_req_page_add(const struct lu_env *env,
if (rqo->ro_obj == NULL) {
rqo->ro_obj = obj;
cl_object_get(obj);
- rqo->ro_obj_ref = lu_object_ref_add(&obj->co_lu,
- "cl_req", req);
+ lu_object_ref_add_at(&obj->co_lu, &rqo->ro_obj_ref,
+ "cl_req", req);
break;
}
}
LASSERT(i < req->crq_nrobjs);
- EXIT;
}
EXPORT_SYMBOL(cl_req_page_add);
@@ -1587,7 +1513,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
{
struct cl_req *req = page->cp_req;
- ENTRY;
page = cl_page_top(page);
LASSERT(!list_empty(&page->cp_flight));
@@ -1596,7 +1521,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
list_del_init(&page->cp_flight);
--req->crq_nrpages;
page->cp_req = NULL;
- EXIT;
}
EXPORT_SYMBOL(cl_req_page_done);
@@ -1610,7 +1534,6 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
int result;
const struct cl_req_slice *slice;
- ENTRY;
/*
* Check that the caller of cl_req_alloc() didn't lie about the number
* of objects.
@@ -1626,7 +1549,7 @@ int cl_req_prep(const struct lu_env *env, struct cl_req *req)
break;
}
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_req_prep);
@@ -1643,7 +1566,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
int i;
LASSERT(!list_empty(&req->crq_pages));
- ENTRY;
/* Take any page to use as a model. */
page = list_entry(req->crq_pages.next, struct cl_page, cp_flight);
@@ -1662,7 +1584,6 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
attr + i, flags);
}
}
- EXIT;
}
EXPORT_SYMBOL(cl_req_attr_set);
@@ -1675,12 +1596,10 @@ EXPORT_SYMBOL(cl_req_attr_set);
*/
void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
{
- ENTRY;
init_waitqueue_head(&anchor->csi_waitq);
atomic_set(&anchor->csi_sync_nr, nrpages);
atomic_set(&anchor->csi_barrier, nrpages > 0);
anchor->csi_sync_rc = 0;
- EXIT;
}
EXPORT_SYMBOL(cl_sync_io_init);
@@ -1695,7 +1614,6 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
NULL, NULL, NULL);
int rc;
- ENTRY;
LASSERT(timeout >= 0);
@@ -1725,7 +1643,7 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
}
POISON(anchor, 0x5a, sizeof *anchor);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(cl_sync_io_wait);
@@ -1734,7 +1652,6 @@ EXPORT_SYMBOL(cl_sync_io_wait);
*/
void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
{
- ENTRY;
if (anchor->csi_sync_rc == 0 && ioret < 0)
anchor->csi_sync_rc = ioret;
/*
@@ -1748,6 +1665,5 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
/* it's safe to nuke or reuse anchor now */
atomic_set(&anchor->csi_barrier, 0);
}
- EXIT;
}
EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index d34e044fc85..749eb082f97 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -191,12 +191,10 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
const struct cl_lock_operations *ops)
{
- ENTRY;
slice->cls_lock = lock;
list_add_tail(&slice->cls_linkage, &lock->cll_layers);
slice->cls_obj = obj;
slice->cls_ops = ops;
- EXIT;
}
EXPORT_SYMBOL(cl_lock_slice_add);
@@ -254,7 +252,6 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(!cl_lock_is_mutexed(lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
might_sleep();
while (!list_empty(&lock->cll_layers)) {
@@ -267,13 +264,12 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
}
CS_LOCK_DEC(obj, total);
CS_LOCKSTATE_DEC(obj, lock->cll_state);
- lu_object_ref_del_at(&obj->co_lu, lock->cll_obj_ref, "cl_lock", lock);
+ lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
cl_object_put(env, obj);
lu_ref_fini(&lock->cll_reference);
lu_ref_fini(&lock->cll_holders);
mutex_destroy(&lock->cll_guard);
OBD_SLAB_FREE_PTR(lock, cl_lock_kmem);
- EXIT;
}
/**
@@ -290,7 +286,6 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
struct cl_object *obj;
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
obj = lock->cll_descr.cld_obj;
LINVRNT(obj != NULL);
@@ -304,7 +299,6 @@ void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
}
CS_LOCK_DEC(obj, busy);
}
- EXIT;
}
EXPORT_SYMBOL(cl_lock_put);
@@ -366,15 +360,14 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
struct cl_lock *lock;
struct lu_object_header *head;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(lock, cl_lock_kmem, __GFP_IO);
if (lock != NULL) {
atomic_set(&lock->cll_ref, 1);
lock->cll_descr = *descr;
lock->cll_state = CLS_NEW;
cl_object_get(obj);
- lock->cll_obj_ref = lu_object_ref_add(&obj->co_lu,
- "cl_lock", lock);
+ lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
+ lock);
INIT_LIST_HEAD(&lock->cll_layers);
INIT_LIST_HEAD(&lock->cll_linkage);
INIT_LIST_HEAD(&lock->cll_inclosure);
@@ -401,7 +394,7 @@ static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
}
} else
lock = ERR_PTR(-ENOMEM);
- RETURN(lock);
+ return lock;
}
/**
@@ -468,13 +461,12 @@ static int cl_lock_fits_into(const struct lu_env *env,
const struct cl_lock_slice *slice;
LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_fits_into != NULL &&
!slice->cls_ops->clo_fits_into(env, slice, need, io))
- RETURN(0);
+ return 0;
}
- RETURN(1);
+ return 1;
}
static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
@@ -485,8 +477,6 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
struct cl_lock *lock;
struct cl_object_header *head;
- ENTRY;
-
head = cl_object_header(obj);
LINVRNT(spin_is_locked(&head->coh_lock_guard));
CS_LOCK_INC(obj, lookup);
@@ -504,10 +494,10 @@ static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
if (matched) {
cl_lock_get_trust(lock);
CS_LOCK_INC(obj, hit);
- RETURN(lock);
+ return lock;
}
}
- RETURN(NULL);
+ return NULL;
}
/**
@@ -528,8 +518,6 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
struct cl_object *obj;
struct cl_lock *lock;
- ENTRY;
-
obj = need->cld_obj;
head = cl_object_header(obj);
@@ -561,7 +549,7 @@ static struct cl_lock *cl_lock_find(const struct lu_env *env,
}
}
}
- RETURN(lock);
+ return lock;
}
/**
@@ -630,13 +618,12 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct cl_lock_slice *slice;
LINVRNT(cl_lock_invariant_trusted(NULL, lock));
- ENTRY;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
- RETURN(slice);
+ return slice;
}
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(cl_lock_at);
@@ -705,7 +692,6 @@ int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
int result;
LINVRNT(cl_lock_invariant_trusted(env, lock));
- ENTRY;
result = 0;
if (lock->cll_guarder == current) {
@@ -717,7 +703,7 @@ int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
cl_lock_mutex_tail(env, lock);
} else
result = -EBUSY;
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_lock_mutex_try);
@@ -784,7 +770,6 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
{
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (!(lock->cll_flags & CLF_CANCELLED)) {
const struct cl_lock_slice *slice;
@@ -795,7 +780,6 @@ static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
slice->cls_ops->clo_cancel(env, slice);
}
}
- EXIT;
}
static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
@@ -806,7 +790,6 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (lock->cll_state < CLS_FREEING) {
LASSERT(lock->cll_state != CLS_INTRANSIT);
cl_lock_state_set(env, lock, CLS_FREEING);
@@ -836,7 +819,6 @@ static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
* existing references goes away.
*/
}
- EXIT;
}
/**
@@ -886,7 +868,6 @@ void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_holds > 0);
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
lu_ref_del(&lock->cll_holders, scope, source);
cl_lock_hold_mod(env, lock, -1);
@@ -910,7 +891,6 @@ void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
cl_lock_delete0(env, lock);
}
}
- EXIT;
}
EXPORT_SYMBOL(cl_lock_hold_release);
@@ -939,7 +919,6 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
sigset_t blocked;
int result;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_depth == 1);
@@ -976,7 +955,7 @@ int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
/* Restore old blocked signals */
cfs_restore_sigs(blocked);
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_lock_state_wait);
@@ -985,7 +964,6 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
{
const struct cl_lock_slice *slice;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
@@ -993,7 +971,6 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
if (slice->cls_ops->clo_state != NULL)
slice->cls_ops->clo_state(env, slice, state);
wake_up_all(&lock->cll_wq);
- EXIT;
}
/**
@@ -1005,10 +982,8 @@ static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
*/
void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
{
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
cl_lock_state_signal(env, lock, lock->cll_state);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_signal);
@@ -1025,7 +1000,6 @@ EXPORT_SYMBOL(cl_lock_signal);
void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
enum cl_lock_state state)
{
- ENTRY;
LASSERT(lock->cll_state <= state ||
(lock->cll_state == CLS_CACHED &&
(state == CLS_HELD || /* lock found in cache */
@@ -1041,7 +1015,6 @@ void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
cl_lock_state_signal(env, lock, state);
lock->cll_state = state;
}
- EXIT;
}
EXPORT_SYMBOL(cl_lock_state_set);
@@ -1084,12 +1057,11 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
int result;
enum cl_lock_state state;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
LASSERT(lock->cll_state == CLS_CACHED);
if (lock->cll_error)
- RETURN(lock->cll_error);
+ return lock->cll_error;
result = -ENOSYS;
state = cl_lock_intransit(env, lock);
@@ -1129,7 +1101,7 @@ int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
}
cl_lock_extransit(env, lock, state);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_use_try);
@@ -1144,7 +1116,6 @@ static int cl_enqueue_kick(const struct lu_env *env,
int result;
const struct cl_lock_slice *slice;
- ENTRY;
result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_ops->clo_enqueue != NULL) {
@@ -1155,7 +1126,7 @@ static int cl_enqueue_kick(const struct lu_env *env,
}
}
LASSERT(result != -ENOSYS);
- RETURN(result);
+ return result;
}
/**
@@ -1176,7 +1147,6 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
{
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
do {
LINVRNT(cl_lock_is_mutexed(lock));
@@ -1219,7 +1189,7 @@ int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
LBUG();
}
} while (result == CLO_REPEAT);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_enqueue_try);
@@ -1235,7 +1205,6 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
{
struct cl_lock *conflict;
int rc = 0;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
LASSERT(lock->cll_state == CLS_QUEUING);
@@ -1265,7 +1234,7 @@ int cl_lock_enqueue_wait(const struct lu_env *env,
cl_lock_mutex_get(env, lock);
LASSERT(rc <= 0);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(cl_lock_enqueue_wait);
@@ -1274,8 +1243,6 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
{
int result;
- ENTRY;
-
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_holds > 0);
@@ -1298,7 +1265,7 @@ static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
lock->cll_state == CLS_ENQUEUED ||
lock->cll_state == CLS_HELD));
- RETURN(result);
+ return result;
}
/**
@@ -1315,8 +1282,6 @@ int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
{
int result;
- ENTRY;
-
cl_lock_lockdep_acquire(env, lock, enqflags);
cl_lock_mutex_get(env, lock);
result = cl_enqueue_locked(env, lock, io, enqflags);
@@ -1325,7 +1290,7 @@ int cl_enqueue(const struct lu_env *env, struct cl_lock *lock,
cl_lock_lockdep_release(env, lock);
LASSERT(ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
lock->cll_state == CLS_HELD));
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_enqueue);
@@ -1346,19 +1311,18 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
int result;
enum cl_lock_state state = CLS_NEW;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
if (lock->cll_users > 1) {
cl_lock_user_del(env, lock);
- RETURN(0);
+ return 0;
}
/* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
* underlying resources. */
if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
cl_lock_user_del(env, lock);
- RETURN(0);
+ return 0;
}
/*
@@ -1404,20 +1368,17 @@ int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
state = CLS_NEW;
cl_lock_extransit(env, lock, state);
}
- RETURN(result ?: lock->cll_error);
+ return result ?: lock->cll_error;
}
EXPORT_SYMBOL(cl_unuse_try);
static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
{
int result;
- ENTRY;
result = cl_unuse_try(env, lock);
if (result)
CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-
- EXIT;
}
/**
@@ -1425,12 +1386,10 @@ static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
*/
void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
{
- ENTRY;
cl_lock_mutex_get(env, lock);
cl_unuse_locked(env, lock);
cl_lock_mutex_put(env, lock);
cl_lock_lockdep_release(env, lock);
- EXIT;
}
EXPORT_SYMBOL(cl_unuse);
@@ -1449,7 +1408,6 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
const struct cl_lock_slice *slice;
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
do {
LINVRNT(cl_lock_is_mutexed(lock));
@@ -1489,7 +1447,7 @@ int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
cl_lock_state_set(env, lock, CLS_HELD);
}
} while (result == CLO_REPEAT);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_wait_try);
@@ -1506,7 +1464,6 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock)
{
int result;
- ENTRY;
cl_lock_mutex_get(env, lock);
LINVRNT(cl_lock_invariant(env, lock));
@@ -1530,7 +1487,7 @@ int cl_wait(const struct lu_env *env, struct cl_lock *lock)
cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
cl_lock_mutex_put(env, lock);
LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_wait);
@@ -1544,7 +1501,6 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
unsigned long pound;
unsigned long ounce;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
@@ -1557,7 +1513,7 @@ unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
pound = ~0UL;
}
}
- RETURN(pound);
+ return pound;
}
EXPORT_SYMBOL(cl_lock_weigh);
@@ -1579,7 +1535,6 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
struct cl_object_header *hdr = cl_object_header(obj);
int result;
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
/* don't allow object to change */
LASSERT(obj == desc->cld_obj);
@@ -1590,7 +1545,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
if (slice->cls_ops->clo_modify != NULL) {
result = slice->cls_ops->clo_modify(env, slice, desc);
if (result != 0)
- RETURN(result);
+ return result;
}
}
CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
@@ -1603,7 +1558,7 @@ int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
spin_lock(&hdr->coh_lock_guard);
lock->cll_descr = *desc;
spin_unlock(&hdr->coh_lock_guard);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(cl_lock_modify);
@@ -1642,7 +1597,6 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
const struct cl_lock_slice *slice;
int result;
- ENTRY;
LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
LINVRNT(cl_lock_invariant(env, closure->clc_origin));
@@ -1659,7 +1613,7 @@ int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
}
if (result != 0)
cl_lock_disclosure(env, closure);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_lock_closure_build);
@@ -1674,7 +1628,7 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
struct cl_lock_closure *closure)
{
int result = 0;
- ENTRY;
+
cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
if (!cl_lock_mutex_try(env, lock)) {
/*
@@ -1706,7 +1660,7 @@ int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
}
result = CLO_REPEAT;
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_lock_enclosure);
@@ -1766,13 +1720,11 @@ void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
cl_lock_nr_mutexed(env) == 1));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
if (lock->cll_holds == 0)
cl_lock_delete0(env, lock);
else
lock->cll_flags |= CLF_DOOMED;
- EXIT;
}
EXPORT_SYMBOL(cl_lock_delete);
@@ -1791,7 +1743,6 @@ void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
if (lock->cll_error == 0 && error != 0) {
cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
lock->cll_error = error;
@@ -1799,7 +1750,6 @@ void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
cl_lock_cancel(env, lock);
cl_lock_delete(env, lock);
}
- EXIT;
}
EXPORT_SYMBOL(cl_lock_error);
@@ -1819,13 +1769,11 @@ void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
if (lock->cll_holds == 0)
cl_lock_cancel0(env, lock);
else
lock->cll_flags |= CLF_CANCELPEND;
- EXIT;
}
EXPORT_SYMBOL(cl_lock_cancel);
@@ -1843,8 +1791,6 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
struct cl_lock *lock;
struct cl_lock_descr *need;
- ENTRY;
-
head = cl_object_header(obj);
need = &cl_env_info(env)->clt_descr;
lock = NULL;
@@ -1878,7 +1824,7 @@ struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
}
}
spin_unlock(&head->coh_lock_guard);
- RETURN(lock);
+ return lock;
}
EXPORT_SYMBOL(cl_lock_at_pgoff);
@@ -1979,7 +1925,6 @@ int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
int result;
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
io->ci_obj = cl_object_top(descr->cld_obj);
io->ci_ignore_layout = 1;
@@ -2001,7 +1946,7 @@ int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
} while (res != CLP_GANG_OKAY);
out:
cl_io_fini(env, io);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_lock_discard_pages);
@@ -2018,7 +1963,6 @@ void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
struct cl_object_header *head;
struct cl_lock *lock;
- ENTRY;
head = cl_object_header(obj);
/*
* If locks are destroyed without cancellation, all pages must be
@@ -2059,7 +2003,6 @@ again:
spin_lock(&head->coh_lock_guard);
}
spin_unlock(&head->coh_lock_guard);
- EXIT;
}
EXPORT_SYMBOL(cl_locks_prune);
@@ -2070,8 +2013,6 @@ static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
{
struct cl_lock *lock;
- ENTRY;
-
while (1) {
lock = cl_lock_find(env, io, need);
if (IS_ERR(lock))
@@ -2087,7 +2028,7 @@ static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
cl_lock_mutex_put(env, lock);
cl_lock_put(env, lock);
}
- RETURN(lock);
+ return lock;
}
/**
@@ -2103,12 +2044,10 @@ struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
{
struct cl_lock *lock;
- ENTRY;
-
lock = cl_lock_hold_mutex(env, io, need, scope, source);
if (!IS_ERR(lock))
cl_lock_mutex_put(env, lock);
- RETURN(lock);
+ return lock;
}
EXPORT_SYMBOL(cl_lock_hold);
@@ -2124,7 +2063,6 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
int rc;
__u32 enqflags = need->cld_enq_flags;
- ENTRY;
do {
lock = cl_lock_hold_mutex(env, io, need, scope, source);
if (IS_ERR(lock))
@@ -2156,7 +2094,7 @@ struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
lock = ERR_PTR(rc);
}
} while (rc == 0);
- RETURN(lock);
+ return lock;
}
EXPORT_SYMBOL(cl_lock_request);
@@ -2170,12 +2108,10 @@ void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_state != CLS_FREEING);
- ENTRY;
cl_lock_hold_mod(env, lock, +1);
cl_lock_get(lock);
lu_ref_add(&lock->cll_holders, scope, source);
lu_ref_add(&lock->cll_reference, scope, source);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_hold_add);
@@ -2187,11 +2123,9 @@ void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source)
{
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_hold_release(env, lock, scope, source);
lu_ref_del(&lock->cll_reference, scope, source);
cl_lock_put(env, lock);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_unhold);
@@ -2202,14 +2136,12 @@ void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
const char *scope, const void *source)
{
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
cl_lock_mutex_get(env, lock);
cl_lock_hold_release(env, lock, scope, source);
cl_lock_mutex_put(env, lock);
lu_ref_del(&lock->cll_reference, scope, source);
cl_lock_put(env, lock);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_release);
@@ -2218,9 +2150,7 @@ void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_is_mutexed(lock));
LINVRNT(cl_lock_invariant(env, lock));
- ENTRY;
cl_lock_used_mod(env, lock, +1);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_user_add);
@@ -2230,11 +2160,9 @@ void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
LINVRNT(cl_lock_invariant(env, lock));
LASSERT(lock->cll_users > 0);
- ENTRY;
cl_lock_used_mod(env, lock, -1);
if (lock->cll_users == 0)
wake_up_all(&lock->cll_wq);
- EXIT;
}
EXPORT_SYMBOL(cl_lock_user_del);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index cdb5fba0459..7b0e9d26b6c 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -79,7 +79,6 @@ int cl_object_header_init(struct cl_object_header *h)
{
int result;
- ENTRY;
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
spin_lock_init(&h->coh_page_guard);
@@ -94,7 +93,7 @@ int cl_object_header_init(struct cl_object_header *h)
INIT_LIST_HEAD(&h->coh_locks);
h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_object_header_init);
@@ -222,7 +221,6 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
@@ -236,7 +234,7 @@ int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
}
}
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_object_attr_get);
@@ -254,7 +252,6 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
int result;
LASSERT(spin_is_locked(cl_object_attr_guard(obj)));
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
@@ -269,7 +266,7 @@ int cl_object_attr_set(const struct lu_env *env, struct cl_object *obj,
}
}
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_object_attr_set);
@@ -287,7 +284,6 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct lu_object_header *top;
int result;
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry_reverse(obj, &top->loh_layers,
@@ -303,7 +299,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
"ctime: "LPU64" blocks: "LPU64"\n",
lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
lvb->lvb_ctime, lvb->lvb_blocks);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_object_glimpse);
@@ -316,7 +312,6 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
struct lu_object_header *top;
int result;
- ENTRY;
top = obj->co_lu.lo_header;
result = 0;
list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
@@ -326,7 +321,7 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
break;
}
}
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_conf_set);
@@ -362,10 +357,8 @@ EXPORT_SYMBOL(cl_object_kill);
*/
void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
{
- ENTRY;
cl_pages_prune(env, obj);
cl_locks_prune(env, obj, 1);
- EXIT;
}
EXPORT_SYMBOL(cl_object_prune);
@@ -941,13 +934,11 @@ EXPORT_SYMBOL(cl_env_nested_put);
*/
void cl_attr2lvb(struct ost_lvb *lvb, const struct cl_attr *attr)
{
- ENTRY;
lvb->lvb_size = attr->cat_size;
lvb->lvb_mtime = attr->cat_mtime;
lvb->lvb_atime = attr->cat_atime;
lvb->lvb_ctime = attr->cat_ctime;
lvb->lvb_blocks = attr->cat_blocks;
- EXIT;
}
EXPORT_SYMBOL(cl_attr2lvb);
@@ -958,13 +949,11 @@ EXPORT_SYMBOL(cl_attr2lvb);
*/
void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
{
- ENTRY;
attr->cat_size = lvb->lvb_size;
attr->cat_mtime = lvb->lvb_mtime;
attr->cat_atime = lvb->lvb_atime;
attr->cat_ctime = lvb->lvb_ctime;
attr->cat_blocks = lvb->lvb_blocks;
- EXIT;
}
EXPORT_SYMBOL(cl_lvb2attr);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index bb9335911c3..2a5ce376e57 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -108,17 +108,16 @@ cl_page_at_trusted(const struct cl_page *page,
const struct lu_device_type *dtype)
{
const struct cl_page_slice *slice;
- ENTRY;
page = cl_page_top_trusted((struct cl_page *)page);
do {
list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- RETURN(slice);
+ return slice;
}
page = page->cp_child;
} while (page != NULL);
- RETURN(NULL);
+ return NULL;
}
/**
@@ -167,7 +166,6 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
unsigned int j;
int res = CLP_GANG_OKAY;
int tree_lock = 1;
- ENTRY;
idx = start;
hdr = cl_object_header(obj);
@@ -243,7 +241,7 @@ int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
}
if (tree_lock)
spin_unlock(&hdr->coh_page_guard);
- RETURN(res);
+ return res;
}
EXPORT_SYMBOL(cl_page_gang_lookup);
@@ -258,7 +256,6 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, page->cp_parent == NULL);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- ENTRY;
might_sleep();
while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
@@ -270,11 +267,10 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
}
CS_PAGE_DEC(obj, total);
CS_PAGESTATE_DEC(obj, page->cp_state);
- lu_object_ref_del_at(&obj->co_lu, page->cp_obj_ref, "cl_page", page);
+ lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
lu_ref_fini(&page->cp_reference);
OBD_FREE(page, pagesize);
- EXIT;
}
/**
@@ -295,7 +291,6 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
struct cl_page *page;
struct lu_object_header *head;
- ENTRY;
OBD_ALLOC_GFP(page, cl_object_header(o)->coh_page_bufsize,
__GFP_IO);
if (page != NULL) {
@@ -305,7 +300,8 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
atomic_inc(&page->cp_ref);
page->cp_obj = o;
cl_object_get(o);
- page->cp_obj_ref = lu_object_ref_add(&o->co_lu, "cl_page",page);
+ lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
+ page);
page->cp_index = ind;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
@@ -336,7 +332,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
} else {
page = ERR_PTR(-ENOMEM);
}
- RETURN(page);
+ return page;
}
/**
@@ -364,8 +360,6 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
might_sleep();
- ENTRY;
-
hdr = cl_object_header(o);
CS_PAGE_INC(o, lookup);
@@ -395,13 +389,13 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
if (page != NULL) {
CS_PAGE_INC(o, hit);
- RETURN(page);
+ return page;
}
/* allocate and initialize cl_page */
page = cl_page_alloc(env, o, idx, vmpage, type);
if (IS_ERR(page))
- RETURN(page);
+ return page;
if (type == CPT_TRANSIENT) {
if (parent) {
@@ -409,7 +403,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
page->cp_parent = parent;
parent->cp_child = page;
}
- RETURN(page);
+ return page;
}
/*
@@ -450,7 +444,7 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
cl_page_delete0(env, ghost, 0);
cl_page_free(env, ghost);
}
- RETURN(page);
+ return page;
}
struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
@@ -553,7 +547,6 @@ static void cl_page_state_set0(const struct lu_env *env,
}
};
- ENTRY;
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
@@ -566,7 +559,6 @@ static void cl_page_state_set0(const struct lu_env *env,
CS_PAGESTATE_INC(page->cp_obj, state);
cl_page_state_set_trust(page, state);
}
- EXIT;
}
static void cl_page_state_set(const struct lu_env *env,
@@ -585,9 +577,7 @@ static void cl_page_state_set(const struct lu_env *env,
*/
void cl_page_get(struct cl_page *page)
{
- ENTRY;
cl_page_get_trust(page);
- EXIT;
}
EXPORT_SYMBOL(cl_page_get);
@@ -604,7 +594,6 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
- ENTRY;
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
atomic_read(&page->cp_ref));
@@ -620,8 +609,6 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
*/
cl_page_free(env, page);
}
-
- EXIT;
}
EXPORT_SYMBOL(cl_page_put);
@@ -640,7 +627,7 @@ struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
do {
list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
if (slice->cpl_ops->cpo_vmpage != NULL)
- RETURN(slice->cpl_ops->cpo_vmpage(env, slice));
+ return slice->cpl_ops->cpo_vmpage(env, slice);
}
page = page->cp_child;
} while (page != NULL);
@@ -656,7 +643,6 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
struct cl_page *top;
struct cl_page *page;
- ENTRY;
KLASSERT(PageLocked(vmpage));
/*
@@ -671,7 +657,7 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
*/
top = (struct cl_page *)vmpage->private;
if (top == NULL)
- RETURN(NULL);
+ return NULL;
for (page = top; page != NULL; page = page->cp_child) {
if (cl_object_same(page->cp_obj, obj)) {
@@ -680,7 +666,7 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
}
}
LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
- RETURN(page);
+ return page;
}
EXPORT_SYMBOL(cl_vmpage_page);
@@ -785,11 +771,10 @@ static int cl_page_invoke(const struct lu_env *env,
{
PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
- RETURN(CL_PAGE_INVOKE(env, page, op,
+ return CL_PAGE_INVOKE(env, page, op,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
- io));
+ io);
}
static void cl_page_invoid(const struct lu_env *env,
@@ -797,16 +782,13 @@ static void cl_page_invoid(const struct lu_env *env,
{
PINVRNT(env, page, cl_object_same(page->cp_obj, io->ci_obj));
- ENTRY;
CL_PAGE_INVOID(env, page, op,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *), io);
- EXIT;
}
static void cl_page_owner_clear(struct cl_page *page)
{
- ENTRY;
for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
if (page->cp_owner != NULL) {
LASSERT(page->cp_owner->ci_owned_nr > 0);
@@ -815,17 +797,14 @@ static void cl_page_owner_clear(struct cl_page *page)
page->cp_task = NULL;
}
}
- EXIT;
}
static void cl_page_owner_set(struct cl_page *page)
{
- ENTRY;
for (page = cl_page_top(page); page != NULL; page = page->cp_child) {
LASSERT(page->cp_owner != NULL);
page->cp_owner->ci_owned_nr++;
}
- EXIT;
}
void cl_page_disown0(const struct lu_env *env,
@@ -833,7 +812,6 @@ void cl_page_disown0(const struct lu_env *env,
{
enum cl_page_state state;
- ENTRY;
state = pg->cp_state;
PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
PINVRNT(env, pg, cl_page_invariant(pg));
@@ -850,7 +828,6 @@ void cl_page_disown0(const struct lu_env *env,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
io);
- EXIT;
}
/**
@@ -859,8 +836,7 @@ void cl_page_disown0(const struct lu_env *env,
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- ENTRY;
- RETURN(pg->cp_state == CPS_OWNED && pg->cp_owner == io);
+ return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
}
EXPORT_SYMBOL(cl_page_is_owned);
@@ -891,7 +867,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
@@ -918,7 +893,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
}
}
PINVRNT(env, pg, ergo(result == 0, cl_page_invariant(pg)));
- RETURN(result);
+ return result;
}
/**
@@ -960,7 +935,6 @@ void cl_page_assume(const struct lu_env *env,
{
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
@@ -970,7 +944,6 @@ void cl_page_assume(const struct lu_env *env,
pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
- EXIT;
}
EXPORT_SYMBOL(cl_page_assume);
@@ -991,7 +964,6 @@ void cl_page_unassume(const struct lu_env *env,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_owner_clear(pg);
@@ -1000,7 +972,6 @@ void cl_page_unassume(const struct lu_env *env,
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
io);
- EXIT;
}
EXPORT_SYMBOL(cl_page_unassume);
@@ -1020,11 +991,9 @@ void cl_page_disown(const struct lu_env *env,
{
PINVRNT(env, pg, cl_page_is_owned(pg, io));
- ENTRY;
pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_disown0(env, io, pg);
- EXIT;
}
EXPORT_SYMBOL(cl_page_disown);
@@ -1057,7 +1026,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
int radix)
{
struct cl_page *tmp = pg;
- ENTRY;
PASSERT(env, pg, pg == cl_page_top(pg));
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
@@ -1102,8 +1070,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
cl_page_put(env, tmp);
}
}
-
- EXIT;
}
/**
@@ -1134,9 +1100,7 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
cl_page_delete0(env, pg, 1);
- EXIT;
}
EXPORT_SYMBOL(cl_page_delete);
@@ -1186,7 +1150,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
int result;
const struct cl_page_slice *slice;
- ENTRY;
pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
@@ -1198,14 +1161,13 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
*/
result = slice->cpl_ops->cpo_is_vmlocked(env, slice);
PASSERT(env, pg, result == -EBUSY || result == -ENODATA);
- RETURN(result == -EBUSY);
+ return result == -EBUSY;
}
EXPORT_SYMBOL(cl_page_is_vmlocked);
static enum cl_page_state cl_req_type_state(enum cl_req_type crt)
{
- ENTRY;
- RETURN(crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN);
+ return crt == CRT_WRITE ? CPS_PAGEOUT : CPS_PAGEIN;
}
static void cl_page_io_start(const struct lu_env *env,
@@ -1214,10 +1176,8 @@ static void cl_page_io_start(const struct lu_env *env,
/*
* Page is queued for IO, change its state.
*/
- ENTRY;
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, cl_req_type_state(crt));
- EXIT;
}
/**
@@ -1280,7 +1240,6 @@ void cl_page_completion(const struct lu_env *env,
PASSERT(env, pg, pg->cp_req == NULL);
PASSERT(env, pg, pg->cp_state == cl_req_type_state(crt));
- ENTRY;
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, ioret);
if (crt == CRT_READ && ioret == 0) {
PASSERT(env, pg, !(pg->cp_flags & CPF_READ_COMPLETED));
@@ -1307,8 +1266,6 @@ void cl_page_completion(const struct lu_env *env,
if (anchor)
cl_sync_io_note(anchor, ioret);
-
- EXIT;
}
EXPORT_SYMBOL(cl_page_completion);
@@ -1328,9 +1285,8 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
PINVRNT(env, pg, crt < CRT_NR);
- ENTRY;
if (crt >= CRT_NR)
- RETURN(-EINVAL);
+ return -EINVAL;
result = CL_PAGE_INVOKE(env, pg, CL_PAGE_OP(io[crt].cpo_make_ready),
(const struct lu_env *,
const struct cl_page_slice *));
@@ -1339,7 +1295,7 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
cl_page_io_start(env, pg, crt);
}
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_page_make_ready);
@@ -1365,10 +1321,8 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
-
if (crt >= CRT_NR)
- RETURN(-EINVAL);
+ return -EINVAL;
list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
if (scan->cpl_ops->io[crt].cpo_cache_add == NULL)
@@ -1379,7 +1333,7 @@ int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
break;
}
CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_page_cache_add);
@@ -1399,12 +1353,10 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- ENTRY;
-
result = cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_flush));
CL_PAGE_HEADER(D_TRACE, env, pg, "%d\n", result);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_page_flush);
@@ -1422,13 +1374,12 @@ int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, page, cl_page_invariant(page));
- ENTRY;
rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
(const struct lu_env *,
const struct cl_page_slice *, struct cl_io *),
io);
PASSERT(env, page, rc != 0);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(cl_page_is_under_lock);
@@ -1452,7 +1403,6 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
struct cl_io *io;
int result;
- ENTRY;
info = cl_env_info(env);
io = &info->clt_io;
@@ -1465,7 +1415,7 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
result = cl_io_init(env, io, CIT_MISC, obj);
if (result != 0) {
cl_io_fini(env, io);
- RETURN(io->ci_result);
+ return io->ci_result;
}
do {
@@ -1476,7 +1426,7 @@ int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
} while (result != CLP_GANG_OKAY);
cl_io_fini(env, io);
- RETURN(result);
+ return result;
}
EXPORT_SYMBOL(cl_pages_prune);
@@ -1586,12 +1536,10 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
struct cl_object *obj,
const struct cl_page_operations *ops)
{
- ENTRY;
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
slice->cpl_ops = ops;
slice->cpl_page = page;
- EXIT;
}
EXPORT_SYMBOL(cl_page_slice_add);
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index af1c2d09c47..b1024a6d37d 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -112,18 +112,18 @@ int lustre_get_jobid(char *jobid)
{
int jobid_len = JOBSTATS_JOBID_SIZE;
int rc = 0;
- ENTRY;
memset(jobid, 0, JOBSTATS_JOBID_SIZE);
/* Jobstats isn't enabled */
if (strcmp(obd_jobid_var, JOBSTATS_DISABLE) == 0)
- RETURN(0);
+ return 0;
/* Use process name + fsuid as jobid */
if (strcmp(obd_jobid_var, JOBSTATS_PROCNAME_UID) == 0) {
snprintf(jobid, JOBSTATS_JOBID_SIZE, "%s.%u",
- current_comm(), current_fsuid());
- RETURN(0);
+ current_comm(),
+ from_kuid(&init_user_ns, current_fsuid()));
+ return 0;
}
rc = cfs_get_environ(obd_jobid_var, jobid, &jobid_len);
@@ -150,7 +150,7 @@ int lustre_get_jobid(char *jobid)
obd_jobid_var, rc);
}
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lustre_get_jobid);
@@ -193,7 +193,6 @@ int class_resolve_dev_name(__u32 len, const char *name)
int rc;
int dev;
- ENTRY;
if (!len || !name) {
CERROR("No name passed,!\n");
GOTO(out, rc = -EINVAL);
@@ -214,7 +213,7 @@ int class_resolve_dev_name(__u32 len, const char *name)
rc = dev;
out:
- RETURN(rc);
+ return rc;
}
int class_handle_ioctl(unsigned int cmd, unsigned long arg)
@@ -224,7 +223,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
struct libcfs_debug_ioctl_data *debug_data;
struct obd_device *obd = NULL;
int err = 0, len = 0;
- ENTRY;
/* only for debugging */
if (cmd == LIBCFS_IOC_DEBUG_MASK) {
@@ -237,7 +235,7 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
CDEBUG(D_IOCTL, "cmd = %x\n", cmd);
if (obd_ioctl_getdata(&buf, &len, (void *)arg)) {
CERROR("OBD ioctl: data error\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
data = (struct obd_ioctl_data *)buf;
@@ -428,10 +426,10 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
out:
if (buf)
obd_ioctl_freedata(buf, len);
- RETURN(err);
+ return err;
} /* class_handle_ioctl */
-extern psdev_t obd_psdev;
+extern struct miscdevice obd_psdev;
#define OBD_INIT_CHECK
int obd_init_checks(void)
@@ -524,7 +522,7 @@ static int __init init_obdclass(void)
LPROCFS_STATS_FLAG_IRQ_SAFE);
if (obd_memory == NULL) {
CERROR("kmalloc of 'obd_memory' failed\n");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
lprocfs_counter_init(obd_memory, OBD_MEMORY_STAT,
@@ -558,10 +556,10 @@ static int __init init_obdclass(void)
/* Default the dirty page cache cap to 1/2 of system memory.
* For clients with less memory, a larger fraction is needed
* for other purposes (mostly for BGL). */
- if (num_physpages <= 512 << (20 - PAGE_CACHE_SHIFT))
- obd_max_dirty_pages = num_physpages / 4;
+ if (totalram_pages <= 512 << (20 - PAGE_CACHE_SHIFT))
+ obd_max_dirty_pages = totalram_pages / 4;
else
- obd_max_dirty_pages = num_physpages / 2;
+ obd_max_dirty_pages = totalram_pages / 2;
err = obd_init_caches();
if (err)
@@ -638,7 +636,6 @@ static void cleanup_obdclass(void)
int lustre_unregister_fs(void);
__u64 memory_leaked, pages_leaked;
__u64 memory_max, pages_max;
- ENTRY;
lustre_unregister_fs();
@@ -678,12 +675,12 @@ static void cleanup_obdclass(void)
CDEBUG((pages_leaked) ? D_ERROR : D_INFO,
"obd_memory_pages max: "LPU64", leaked: "LPU64"\n",
pages_max, pages_leaked);
-
- EXIT;
}
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Class Driver Build Version: " BUILD_VERSION);
MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
-cfs_module(obdclass, LUSTRE_VERSION_STRING, init_obdclass, cleanup_obdclass);
+module_init(init_obdclass);
+module_exit(cleanup_obdclass);
diff --git a/drivers/staging/lustre/lustre/obdclass/dt_object.c b/drivers/staging/lustre/lustre/obdclass/dt_object.c
index 1c962dd3bd2..1b164c7027b 100644
--- a/drivers/staging/lustre/lustre/obdclass/dt_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/dt_object.c
@@ -219,7 +219,6 @@ struct dt_object *dt_locate_at(const struct lu_env *env,
struct lu_device *top_dev)
{
struct lu_object *lo, *n;
- ENTRY;
lo = lu_object_find_at(env, top_dev, fid, NULL);
if (IS_ERR(lo))
@@ -376,15 +375,13 @@ struct dt_object *dt_find_or_create(const struct lu_env *env,
struct thandle *th;
int rc;
- ENTRY;
-
dto = dt_locate(env, dt, fid);
if (IS_ERR(dto))
- RETURN(dto);
+ return dto;
LASSERT(dto != NULL);
if (dt_object_exists(dto))
- RETURN(dto);
+ return dto;
th = dt_trans_create(env, dt);
if (IS_ERR(th))
@@ -415,9 +412,9 @@ trans_stop:
out:
if (rc) {
lu_object_put(env, &dto->do_lu);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
- RETURN(dto);
+ return dto;
}
EXPORT_SYMBOL(dt_find_or_create);
@@ -659,7 +656,6 @@ static int dt_index_page_build(const struct lu_env *env, union lu_page *lp,
struct lu_idxpage *lip = &lp->lp_idx;
char *entry;
int rc, size;
- ENTRY;
/* no support for variable key & record size for now */
LASSERT((ii->ii_flags & II_FL_VARKEY) == 0);
@@ -763,21 +759,20 @@ int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
const struct dt_it_ops *iops;
unsigned int pageidx, nob, nlupgs = 0;
int rc;
- ENTRY;
LASSERT(rdpg->rp_pages != NULL);
LASSERT(obj->do_index_ops != NULL);
nob = rdpg->rp_count;
if (nob <= 0)
- RETURN(-EFAULT);
+ return -EFAULT;
/* Iterate through index and fill containers from @rdpg */
iops = &obj->do_index_ops->dio_it;
LASSERT(iops != NULL);
it = iops->init(env, obj, rdpg->rp_attrs, BYPASS_CAPA);
if (IS_ERR(it))
- RETURN(PTR_ERR(it));
+ return PTR_ERR(it);
rc = iops->load(env, it, rdpg->rp_hash);
if (rc == 0) {
@@ -831,7 +826,7 @@ int dt_index_walk(const struct lu_env *env, struct dt_object *obj,
if (rc >= 0)
rc = min_t(unsigned int, nlupgs * LU_PAGE_SIZE, rdpg->rp_count);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(dt_index_walk);
@@ -855,26 +850,25 @@ int dt_index_read(const struct lu_env *env, struct dt_device *dev,
const struct dt_index_features *feat;
struct dt_object *obj;
int rc;
- ENTRY;
/* rp_count shouldn't be null and should be a multiple of the container
* size */
if (rdpg->rp_count <= 0 && (rdpg->rp_count & (LU_PAGE_SIZE - 1)) != 0)
- RETURN(-EFAULT);
+ return -EFAULT;
if (fid_seq(&ii->ii_fid) >= FID_SEQ_NORMAL)
/* we don't support directory transfer via OBD_IDX_READ for the
* time being */
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (!fid_is_quota(&ii->ii_fid))
/* block access to all local files except quota files */
- RETURN(-EPERM);
+ return -EPERM;
/* lookup index object subject to the transfer */
obj = dt_locate(env, dev, &ii->ii_fid);
if (IS_ERR(obj))
- RETURN(PTR_ERR(obj));
+ return PTR_ERR(obj);
if (dt_object_exists(obj) == 0)
GOTO(out, rc = -ENOENT);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index d96876e0bc6..68fe71c8a2a 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -163,20 +163,19 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
{
struct obd_type *type;
int rc = 0;
- ENTRY;
/* sanity check */
LASSERT(strnlen(name, CLASS_MAX_NAME) < CLASS_MAX_NAME);
if (class_search_type(name)) {
CDEBUG(D_IOCTL, "Type %s already registered\n", name);
- RETURN(-EEXIST);
+ return -EEXIST;
}
rc = -ENOMEM;
OBD_ALLOC(type, sizeof(*type));
if (type == NULL)
- RETURN(rc);
+ return rc;
OBD_ALLOC_PTR(type->typ_dt_ops);
OBD_ALLOC_PTR(type->typ_md_ops);
@@ -214,7 +213,7 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
list_add(&type->typ_chain, &obd_types);
spin_unlock(&obd_types_lock);
- RETURN (0);
+ return 0;
failed:
if (type->typ_name != NULL)
@@ -224,18 +223,17 @@ int class_register_type(struct obd_ops *dt_ops, struct md_ops *md_ops,
if (type->typ_dt_ops != NULL)
OBD_FREE_PTR(type->typ_dt_ops);
OBD_FREE(type, sizeof(*type));
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_register_type);
int class_unregister_type(const char *name)
{
struct obd_type *type = class_search_type(name);
- ENTRY;
if (!type) {
CERROR("unknown obd type\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (type->typ_refcnt) {
@@ -244,7 +242,7 @@ int class_unregister_type(const char *name)
/* Remove ops, but leave the name for debugging */
OBD_FREE_PTR(type->typ_dt_ops);
OBD_FREE_PTR(type->typ_md_ops);
- RETURN(-EBUSY);
+ return -EBUSY;
}
if (type->typ_procroot) {
@@ -263,7 +261,7 @@ int class_unregister_type(const char *name)
if (type->typ_md_ops != NULL)
OBD_FREE_PTR(type->typ_md_ops);
OBD_FREE(type, sizeof(*type));
- RETURN(0);
+ return 0;
} /* class_unregister_type */
EXPORT_SYMBOL(class_unregister_type);
@@ -285,17 +283,16 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
struct obd_type *type = NULL;
int i;
int new_obd_minor = 0;
- ENTRY;
if (strlen(name) >= MAX_OBD_NAME) {
CERROR("name/uuid must be < %u bytes long\n", MAX_OBD_NAME);
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
}
type = class_get_type(type_name);
if (type == NULL){
CERROR("OBD: unknown type: %s\n", type_name);
- RETURN(ERR_PTR(-ENODEV));
+ return ERR_PTR(-ENODEV);
}
newdev = obd_device_alloc();
@@ -349,7 +346,7 @@ struct obd_device *class_newdev(const char *type_name, const char *name)
CDEBUG(D_IOCTL, "Adding new device %s (%p)\n",
result->obd_name, result);
- RETURN(result);
+ return result;
out:
obd_device_free(newdev);
out_type:
@@ -635,7 +632,6 @@ EXPORT_SYMBOL(class_notify_sptlrpc_conf);
void obd_cleanup_caches(void)
{
- ENTRY;
if (obd_device_cachep) {
kmem_cache_destroy(obd_device_cachep);
obd_device_cachep = NULL;
@@ -652,13 +648,10 @@ void obd_cleanup_caches(void)
kmem_cache_destroy(capa_cachep);
capa_cachep = NULL;
}
- EXIT;
}
int obd_init_caches(void)
{
- ENTRY;
-
LASSERT(obd_device_cachep == NULL);
obd_device_cachep = kmem_cache_create("ll_obd_dev_cache",
sizeof(struct obd_device),
@@ -685,10 +678,10 @@ int obd_init_caches(void)
if (!capa_cachep)
GOTO(out, -ENOMEM);
- RETURN(0);
+ return 0;
out:
obd_cleanup_caches();
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
@@ -696,21 +689,20 @@ int obd_init_caches(void)
struct obd_export *class_conn2export(struct lustre_handle *conn)
{
struct obd_export *export;
- ENTRY;
if (!conn) {
CDEBUG(D_CACHE, "looking for null handle\n");
- RETURN(NULL);
+ return NULL;
}
if (conn->cookie == -1) { /* this means assign a new connection */
CDEBUG(D_CACHE, "want a new connection\n");
- RETURN(NULL);
+ return NULL;
}
CDEBUG(D_INFO, "looking for export cookie "LPX64"\n", conn->cookie);
export = class_handle2object(conn->cookie);
- RETURN(export);
+ return export;
}
EXPORT_SYMBOL(class_conn2export);
@@ -757,7 +749,6 @@ EXPORT_SYMBOL(class_conn2cliimp);
static void class_export_destroy(struct obd_export *exp)
{
struct obd_device *obd = exp->exp_obd;
- ENTRY;
LASSERT_ATOMIC_ZERO(&exp->exp_refcount);
LASSERT(obd != NULL);
@@ -777,7 +768,6 @@ static void class_export_destroy(struct obd_export *exp)
class_decref(obd, "export", exp);
OBD_FREE_RCU(exp, sizeof(*exp), &exp->exp_handle);
- EXIT;
}
static void export_handle_addref(void *export)
@@ -828,7 +818,6 @@ struct obd_export *class_new_export(struct obd_device *obd,
struct obd_export *export;
cfs_hash_t *hash = NULL;
int rc = 0;
- ENTRY;
OBD_ALLOC_PTR(export);
if (!export)
@@ -899,7 +888,7 @@ struct obd_export *class_new_export(struct obd_device *obd,
export->exp_obd->obd_num_exports++;
spin_unlock(&obd->obd_dev_lock);
cfs_hash_putref(hash);
- RETURN(export);
+ return export;
exit_unlock:
spin_unlock(&obd->obd_dev_lock);
@@ -936,8 +925,6 @@ EXPORT_SYMBOL(class_unlink_export);
/* Import management functions */
void class_import_destroy(struct obd_import *imp)
{
- ENTRY;
-
CDEBUG(D_IOCTL, "destroying import %p for %s\n", imp,
imp->imp_obd->obd_name);
@@ -958,7 +945,6 @@ void class_import_destroy(struct obd_import *imp)
LASSERT(imp->imp_sec == NULL);
class_decref(imp->imp_obd, "import", imp);
OBD_FREE_RCU(imp, sizeof(*imp), &imp->imp_handle);
- EXIT;
}
static void import_handle_addref(void *import)
@@ -983,8 +969,6 @@ EXPORT_SYMBOL(class_import_get);
void class_import_put(struct obd_import *imp)
{
- ENTRY;
-
LASSERT(list_empty(&imp->imp_zombie_chain));
LASSERT_ATOMIC_GT_LT(&imp->imp_refcount, 0, LI_POISON);
@@ -999,7 +983,6 @@ void class_import_put(struct obd_import *imp)
/* catch possible import put race */
LASSERT_ATOMIC_GE_LT(&imp->imp_refcount, 0, LI_POISON);
- EXIT;
}
EXPORT_SYMBOL(class_import_put);
@@ -1121,18 +1104,17 @@ int class_connect(struct lustre_handle *conn, struct obd_device *obd,
LASSERT(conn != NULL);
LASSERT(obd != NULL);
LASSERT(cluuid != NULL);
- ENTRY;
export = class_new_export(obd, cluuid);
if (IS_ERR(export))
- RETURN(PTR_ERR(export));
+ return PTR_ERR(export);
conn->cookie = export->exp_handle.h_cookie;
class_export_put(export);
CDEBUG(D_IOCTL, "connect: client %s, cookie "LPX64"\n",
cluuid->uuid, conn->cookie);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(class_connect);
@@ -1188,11 +1170,10 @@ void class_export_recovery_cleanup(struct obd_export *exp)
int class_disconnect(struct obd_export *export)
{
int already_disconnected;
- ENTRY;
if (export == NULL) {
CWARN("attempting to free NULL export %p\n", export);
- RETURN(-EINVAL);
+ return -EINVAL;
}
spin_lock(&export->exp_lock);
@@ -1220,7 +1201,7 @@ int class_disconnect(struct obd_export *export)
class_unlink_export(export);
no_disconn:
class_export_put(export);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(class_disconnect);
@@ -1243,7 +1224,6 @@ static void class_disconnect_export_list(struct list_head *list,
{
int rc;
struct obd_export *exp;
- ENTRY;
/* It's possible that an export may disconnect itself, but
* nothing else will be added to this list. */
@@ -1281,13 +1261,11 @@ static void class_disconnect_export_list(struct list_head *list,
obd_export_nid2str(exp), exp, rc);
class_export_put(exp);
}
- EXIT;
}
void class_disconnect_exports(struct obd_device *obd)
{
struct list_head work_list;
- ENTRY;
/* Move all of the exports from obd_exports to a work list, en masse. */
INIT_LIST_HEAD(&work_list);
@@ -1304,7 +1282,6 @@ void class_disconnect_exports(struct obd_device *obd)
} else
CDEBUG(D_HA, "OBD device %d (%p) has no exports\n",
obd->obd_minor, obd);
- EXIT;
}
EXPORT_SYMBOL(class_disconnect_exports);
@@ -1316,7 +1293,6 @@ void class_disconnect_stale_exports(struct obd_device *obd,
struct list_head work_list;
struct obd_export *exp, *n;
int evicted = 0;
- ENTRY;
INIT_LIST_HEAD(&work_list);
spin_lock(&obd->obd_dev_lock);
@@ -1356,7 +1332,6 @@ void class_disconnect_stale_exports(struct obd_device *obd,
class_disconnect_export_list(&work_list, exp_flags_from_obd(obd) |
OBD_OPT_ABORT_RECOV);
- EXIT;
}
EXPORT_SYMBOL(class_disconnect_stale_exports);
@@ -1484,7 +1459,7 @@ int obd_export_evict_by_uuid(struct obd_device *obd, const char *uuid)
CERROR("%s: can't disconnect %s: no exports found\n",
obd->obd_name, uuid);
} else {
- CWARN("%s: evicting %s at adminstrative request\n",
+ CWARN("%s: evicting %s at administrative request\n",
obd->obd_name, doomed_exp->exp_client_uuid.uuid);
class_fail_export(doomed_exp);
class_export_put(doomed_exp);
@@ -1585,7 +1560,6 @@ void obd_zombie_impexp_cull(void)
{
struct obd_import *import;
struct obd_export *export;
- ENTRY;
do {
spin_lock(&obd_zombie_impexp_lock);
@@ -1624,7 +1598,6 @@ void obd_zombie_impexp_cull(void)
cond_resched();
} while (import != NULL || export != NULL);
- EXIT;
}
static struct completion obd_zombie_start;
@@ -1649,7 +1622,7 @@ static int obd_zombie_impexp_check(void *arg)
!test_bit(OBD_ZOMBIE_STOP, &obd_zombie_flags);
spin_unlock(&obd_zombie_impexp_lock);
- RETURN(rc);
+ return rc;
}
/**
@@ -1751,7 +1724,7 @@ static int obd_zombie_impexp_thread(void *unused)
complete(&obd_zombie_stop);
- RETURN(0);
+ return 0;
}
@@ -1760,7 +1733,7 @@ static int obd_zombie_impexp_thread(void *unused)
*/
int obd_zombie_impexp_init(void)
{
- task_t *task;
+ struct task_struct *task;
INIT_LIST_HEAD(&obd_zombie_imports);
INIT_LIST_HEAD(&obd_zombie_exports);
@@ -1772,10 +1745,10 @@ int obd_zombie_impexp_init(void)
task = kthread_run(obd_zombie_impexp_thread, NULL, "obd_zombid");
if (IS_ERR(task))
- RETURN(PTR_ERR(task));
+ return PTR_ERR(task);
wait_for_completion(&obd_zombie_start);
- RETURN(0);
+ return 0;
}
/**
* stop destroy zombie import/export thread
diff --git a/drivers/staging/lustre/lustre/obdclass/idmap.c b/drivers/staging/lustre/lustre/obdclass/idmap.c
index 622f8d16527..ec2590f5cfe 100644
--- a/drivers/staging/lustre/lustre/obdclass/idmap.c
+++ b/drivers/staging/lustre/lustre/obdclass/idmap.c
@@ -59,8 +59,7 @@
* groups_search() is copied from linux kernel!
* A simple bsearch.
*/
-static int lustre_groups_search(group_info_t *group_info,
- gid_t grp)
+static int lustre_groups_search(const struct group_info *group_info, gid_t grp)
{
int left, right;
@@ -71,7 +70,8 @@ static int lustre_groups_search(group_info_t *group_info,
right = group_info->ngroups;
while (left < right) {
int mid = (left + right) / 2;
- int cmp = grp - CFS_GROUP_AT(group_info, mid);
+ int cmp = grp -
+ from_kgid(&init_user_ns, CFS_GROUP_AT(group_info, mid));
if (cmp > 0)
left = mid + 1;
@@ -83,7 +83,7 @@ static int lustre_groups_search(group_info_t *group_info,
return 0;
}
-void lustre_groups_from_list(group_info_t *ginfo, gid_t *glist)
+void lustre_groups_from_list(struct group_info *ginfo, gid_t *glist)
{
int i;
int count = ginfo->ngroups;
@@ -102,7 +102,7 @@ EXPORT_SYMBOL(lustre_groups_from_list);
/* groups_sort() is copied from linux kernel! */
/* a simple shell-metzner sort */
-void lustre_groups_sort(group_info_t *group_info)
+void lustre_groups_sort(struct group_info *group_info)
{
int base, max, stride;
int gidsetsize = group_info->ngroups;
@@ -116,16 +116,19 @@ void lustre_groups_sort(group_info_t *group_info)
for (base = 0; base < max; base++) {
int left = base;
int right = left + stride;
- gid_t tmp = CFS_GROUP_AT(group_info, right);
+ gid_t tmp = from_kgid(&init_user_ns,
+ CFS_GROUP_AT(group_info, right));
while (left >= 0 &&
- CFS_GROUP_AT(group_info, left) > tmp) {
+ tmp < from_kgid(&init_user_ns,
+ CFS_GROUP_AT(group_info, left))) {
CFS_GROUP_AT(group_info, right) =
CFS_GROUP_AT(group_info, left);
right = left;
left -= stride;
}
- CFS_GROUP_AT(group_info, right) = tmp;
+ CFS_GROUP_AT(group_info, right) =
+ make_kgid(&init_user_ns, tmp);
}
stride /= 3;
}
@@ -137,7 +140,7 @@ int lustre_in_group_p(struct lu_ucred *mu, gid_t grp)
int rc = 1;
if (grp != mu->uc_fsgid) {
- group_info_t *group_info = NULL;
+ struct group_info *group_info = NULL;
if (mu->uc_ginfo || !mu->uc_identity ||
mu->uc_valid == UCRED_OLD)
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index d2c3072541d..d1a57ebfda9 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -83,27 +83,26 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
struct obd_ioctl_data *data;
int err;
int offset = 0;
- ENTRY;
err = copy_from_user(&hdr, (void *)arg, sizeof(hdr));
if ( err )
- RETURN(err);
+ return err;
if (hdr.ioc_version != OBD_IOCTL_VERSION) {
CERROR("Version mismatch kernel (%x) vs application (%x)\n",
OBD_IOCTL_VERSION, hdr.ioc_version);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (hdr.ioc_len > OBD_MAX_IOCTL_BUFFER) {
CERROR("User buffer len %d exceeds %d max buffer\n",
hdr.ioc_len, OBD_MAX_IOCTL_BUFFER);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (hdr.ioc_len < sizeof(struct obd_ioctl_data)) {
CERROR("User buffer too small for ioctl (%d)\n", hdr.ioc_len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* When there are lots of processes calling vmalloc on multi-core
@@ -114,7 +113,7 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
if (*buf == NULL) {
CERROR("Cannot allocate control buffer of len %d\n",
hdr.ioc_len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
*len = hdr.ioc_len;
data = (struct obd_ioctl_data *)*buf;
@@ -122,13 +121,13 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
err = copy_from_user(*buf, (void *)arg, hdr.ioc_len);
if ( err ) {
OBD_FREE_LARGE(*buf, hdr.ioc_len);
- RETURN(err);
+ return err;
}
if (obd_ioctl_is_invalid(data)) {
CERROR("ioctl not correctly formatted\n");
OBD_FREE_LARGE(*buf, hdr.ioc_len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (data->ioc_inllen1) {
@@ -150,7 +149,6 @@ int obd_ioctl_getdata(char **buf, int *len, void *arg)
data->ioc_inlbuf4 = &data->ioc_bulk[0] + offset;
}
- EXIT;
return 0;
}
EXPORT_SYMBOL(obd_ioctl_getdata);
@@ -169,19 +167,15 @@ EXPORT_SYMBOL(obd_ioctl_popdata);
/* opening /dev/obd */
static int obd_class_open(struct inode * inode, struct file * file)
{
- ENTRY;
-
try_module_get(THIS_MODULE);
- RETURN(0);
+ return 0;
}
/* closing /dev/obd */
static int obd_class_release(struct inode * inode, struct file * file)
{
- ENTRY;
-
module_put(THIS_MODULE);
- RETURN(0);
+ return 0;
}
/* to control /dev/obd */
@@ -189,17 +183,16 @@ static long obd_class_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int err = 0;
- ENTRY;
/* Allow non-root access for OBD_IOC_PING_TARGET - used by lfs check */
if (!cfs_capable(CFS_CAP_SYS_ADMIN) && (cmd != OBD_IOC_PING_TARGET))
- RETURN(err = -EACCES);
+ return err = -EACCES;
if ((cmd & 0xffffff00) == ((int)'T') << 8) /* ignore all tty ioctls */
- RETURN(err = -ENOTTY);
+ return err = -ENOTTY;
err = class_handle_ioctl(cmd, (unsigned long)arg);
- RETURN(err);
+ return err;
}
/* declare character device */
@@ -211,7 +204,7 @@ static struct file_operations obd_psdev_fops = {
};
/* modules setup */
-psdev_t obd_psdev = {
+struct miscdevice obd_psdev = {
.minor = OBD_DEV_MINOR,
.name = OBD_DEV_NAME,
.fops = &obd_psdev_fops,
@@ -385,24 +378,29 @@ struct file_operations obd_device_list_fops = {
int class_procfs_init(void)
{
- int rc;
- ENTRY;
+ int rc = 0;
obd_sysctl_init();
proc_lustre_root = lprocfs_register("fs/lustre", NULL,
lprocfs_base, NULL);
+ if (IS_ERR(proc_lustre_root)) {
+ rc = PTR_ERR(proc_lustre_root);
+ proc_lustre_root = NULL;
+ goto out;
+ }
+
rc = lprocfs_seq_create(proc_lustre_root, "devices", 0444,
&obd_device_list_fops, NULL);
+out:
if (rc)
CERROR("error adding /proc/fs/lustre/devices file\n");
- RETURN(0);
+ return 0;
}
int class_procfs_clean(void)
{
- ENTRY;
if (proc_lustre_root) {
lprocfs_remove(&proc_lustre_root);
}
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
index 6ee347153a1..d3bb5ffc564 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-obdo.c
@@ -213,9 +213,9 @@ void obdo_to_inode(struct inode *dst, struct obdo *src, obd_flag valid)
if (valid & OBD_MD_FLMODE)
dst->i_mode = (dst->i_mode & S_IFMT) | (src->o_mode & ~S_IFMT);
if (valid & OBD_MD_FLUID)
- dst->i_uid = src->o_uid;
+ dst->i_uid = make_kuid(&init_user_ns, src->o_uid);
if (valid & OBD_MD_FLGID)
- dst->i_gid = src->o_gid;
+ dst->i_gid = make_kgid(&init_user_ns, src->o_gid);
if (valid & OBD_MD_FLFLAGS)
dst->i_flags = src->o_flags;
}
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
index 46aad6813ca..acd2619227d 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-sysctl.c
@@ -38,8 +38,6 @@
#include <linux/sysctl.h>
#include <linux/sched.h>
#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/version.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -202,12 +200,12 @@ int LL_PROC_PROTO(proc_max_dirty_pages_in_mb)
1 << (20 - PAGE_CACHE_SHIFT));
/* Don't allow them to let dirty pages exceed 90% of system
* memory and set a hard minimum of 4MB. */
- if (obd_max_dirty_pages > ((num_physpages / 10) * 9)) {
+ if (obd_max_dirty_pages > ((totalram_pages / 10) * 9)) {
CERROR("Refusing to set max dirty pages to %u, which "
"is more than 90%% of available RAM; setting "
"to %lu\n", obd_max_dirty_pages,
- ((num_physpages / 10) * 9));
- obd_max_dirty_pages = ((num_physpages / 10) * 9);
+ ((totalram_pages / 10) * 9));
+ obd_max_dirty_pages = ((totalram_pages / 10) * 9);
} else if (obd_max_dirty_pages < 4 << (20 - PAGE_CACHE_SHIFT)) {
obd_max_dirty_pages = 4 << (20 - PAGE_CACHE_SHIFT);
}
@@ -431,7 +429,7 @@ void obd_sysctl_init (void)
{
#ifdef CONFIG_SYSCTL
if ( !obd_table_header )
- obd_table_header = cfs_register_sysctl_table(parent_table, 0);
+ obd_table_header = register_sysctl_table(parent_table);
#endif
}
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index b1d215e5699..0cb44287502 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -111,21 +111,20 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
{
struct llog_log_hdr *llh = loghandle->lgh_hdr;
int rc = 0;
- ENTRY;
CDEBUG(D_RPCTRACE, "Canceling %d in log "DOSTID"\n",
index, POSTID(&loghandle->lgh_id.lgl_oi));
if (index == 0) {
CERROR("Can't cancel index 0 which is header\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
spin_lock(&loghandle->lgh_hdr_lock);
if (!ext2_clear_bit(index, llh->llh_bitmap)) {
spin_unlock(&loghandle->lgh_hdr_lock);
CDEBUG(D_RPCTRACE, "Catalog index %u already clear?\n", index);
- RETURN(-ENOENT);
+ return -ENOENT;
}
llh->llh_count--;
@@ -143,7 +142,7 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
loghandle->lgh_id.lgl_ogen, rc);
GOTO(out_err, rc);
}
- RETURN(1);
+ return 1;
}
spin_unlock(&loghandle->lgh_hdr_lock);
@@ -156,7 +155,7 @@ int llog_cancel_rec(const struct lu_env *env, struct llog_handle *loghandle,
loghandle->lgh_id.lgl_ogen, rc);
GOTO(out_err, rc);
}
- RETURN(0);
+ return 0;
out_err:
spin_lock(&loghandle->lgh_hdr_lock);
ext2_set_bit(index, llh->llh_bitmap);
@@ -175,10 +174,10 @@ static int llog_read_header(const struct lu_env *env,
rc = llog_handle2ops(handle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_read_header == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_read_header(env, handle);
if (rc == LLOG_EEMPTY) {
@@ -206,12 +205,11 @@ int llog_init_handle(const struct lu_env *env, struct llog_handle *handle,
struct llog_log_hdr *llh;
int rc;
- ENTRY;
LASSERT(handle->lgh_hdr == NULL);
OBD_ALLOC_PTR(llh);
if (llh == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
handle->lgh_hdr = llh;
/* first assign flags to use llog_client_ops */
llh->llh_flags = flags;
@@ -263,7 +261,7 @@ out:
OBD_FREE_PTR(llh);
handle->lgh_hdr = NULL;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_init_handle);
@@ -277,7 +275,6 @@ int llog_copy_handler(const struct lu_env *env,
char *cfg_buf = (char*) (rec + 1);
struct lustre_cfg *lcfg;
int rc = 0;
- ENTRY;
/* Append all records */
local_rec.lrh_len -= sizeof(*rec) + sizeof(struct llog_rec_tail);
@@ -289,7 +286,7 @@ int llog_copy_handler(const struct lu_env *env,
rec->lrh_index, rc, rec->lrh_len, lcfg->lcfg_command,
lustre_cfg_string(lcfg, 0), lustre_cfg_string(lcfg, 1));
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_copy_handler);
@@ -306,14 +303,12 @@ static int llog_process_thread(void *arg)
int saved_index = 0;
int last_called_index = 0;
- ENTRY;
-
LASSERT(llh);
OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
if (!buf) {
lpi->lpi_rc = -ENOMEM;
- RETURN(0);
+ return 0;
}
if (cd != NULL) {
@@ -457,12 +452,10 @@ int llog_process_or_fork(const struct lu_env *env,
struct llog_process_info *lpi;
int rc;
- ENTRY;
-
OBD_ALLOC_PTR(lpi);
if (lpi == NULL) {
CERROR("cannot alloc pointer\n");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
lpi->lpi_loghandle = loghandle;
lpi->lpi_cb = cb;
@@ -480,7 +473,7 @@ int llog_process_or_fork(const struct lu_env *env,
CERROR("%s: cannot start thread: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name, rc);
OBD_FREE_PTR(lpi);
- RETURN(rc);
+ return rc;
}
wait_for_completion(&lpi->lpi_completion);
} else {
@@ -489,7 +482,7 @@ int llog_process_or_fork(const struct lu_env *env,
}
rc = lpi->lpi_rc;
OBD_FREE_PTR(lpi);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_process_or_fork);
@@ -516,11 +509,10 @@ int llog_reverse_process(const struct lu_env *env,
struct llog_process_cat_data *cd = catdata;
void *buf;
int rc = 0, first_index = 1, index, idx;
- ENTRY;
OBD_ALLOC(buf, LLOG_CHUNK_SIZE);
if (!buf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (cd != NULL)
first_index = cd->lpcd_first_idx + 1;
@@ -594,7 +586,7 @@ int llog_reverse_process(const struct lu_env *env,
out:
if (buf)
OBD_FREE(buf, LLOG_CHUNK_SIZE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_reverse_process);
@@ -617,16 +609,14 @@ int llog_exist(struct llog_handle *loghandle)
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_exist == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = lop->lop_exist(loghandle);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_exist);
@@ -636,13 +626,11 @@ int llog_declare_create(const struct lu_env *env,
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_declare_create == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
@@ -650,7 +638,7 @@ int llog_declare_create(const struct lu_env *env,
rc = lop->lop_declare_create(env, loghandle, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_declare_create);
@@ -660,13 +648,11 @@ int llog_create(const struct lu_env *env, struct llog_handle *handle,
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
if (lop->lop_create == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
@@ -674,7 +660,7 @@ int llog_create(const struct lu_env *env, struct llog_handle *handle,
rc = lop->lop_create(env, handle, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_create);
@@ -686,14 +672,12 @@ int llog_declare_write_rec(const struct lu_env *env,
struct llog_operations *lop;
int raised, rc;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
LASSERT(lop);
if (lop->lop_declare_write_rec == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
@@ -701,7 +685,7 @@ int llog_declare_write_rec(const struct lu_env *env,
rc = lop->lop_declare_write_rec(env, handle, rec, idx, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_declare_write_rec);
@@ -712,15 +696,13 @@ int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
struct llog_operations *lop;
int raised, rc, buflen;
- ENTRY;
-
rc = llog_handle2ops(handle, &lop);
if (rc)
- RETURN(rc);
+ return rc;
LASSERT(lop);
if (lop->lop_write_rec == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (buf)
buflen = rec->lrh_len + sizeof(struct llog_rec_hdr) +
@@ -736,7 +718,7 @@ int llog_write_rec(const struct lu_env *env, struct llog_handle *handle,
buf, idx, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_write_rec);
@@ -746,10 +728,8 @@ int llog_add(const struct lu_env *env, struct llog_handle *lgh,
{
int raised, rc;
- ENTRY;
-
if (lgh->lgh_logops->lop_add == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
@@ -757,7 +737,7 @@ int llog_add(const struct lu_env *env, struct llog_handle *lgh,
rc = lgh->lgh_logops->lop_add(env, lgh, rec, logcookies, buf, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_add);
@@ -766,10 +746,8 @@ int llog_declare_add(const struct lu_env *env, struct llog_handle *lgh,
{
int raised, rc;
- ENTRY;
-
if (lgh->lgh_logops->lop_declare_add == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
if (!raised)
@@ -777,7 +755,7 @@ int llog_declare_add(const struct lu_env *env, struct llog_handle *lgh,
rc = lgh->lgh_logops->lop_declare_add(env, lgh, rec, th);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_declare_add);
@@ -792,14 +770,12 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
struct thandle *th;
int rc;
- ENTRY;
-
rc = llog_open(env, ctxt, res, logid, name, LLOG_OPEN_NEW);
if (rc)
- RETURN(rc);
+ return rc;
if (llog_exist(*res))
- RETURN(0);
+ return 0;
if ((*res)->lgh_obj != NULL) {
struct dt_device *d;
@@ -825,7 +801,7 @@ int llog_open_create(const struct lu_env *env, struct llog_ctxt *ctxt,
out:
if (rc)
llog_close(env, *res);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_open_create);
@@ -838,15 +814,13 @@ int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle *handle;
int rc = 0, rc2;
- ENTRY;
-
/* nothing to erase */
if (name == NULL && logid == NULL)
- RETURN(0);
+ return 0;
rc = llog_open(env, ctxt, &handle, logid, name, LLOG_OPEN_EXISTS);
if (rc < 0)
- RETURN(rc);
+ return rc;
rc = llog_init_handle(env, handle, LLOG_F_IS_PLAIN, NULL);
if (rc == 0)
@@ -855,7 +829,7 @@ int llog_erase(const struct lu_env *env, struct llog_ctxt *ctxt,
rc2 = llog_close(env, handle);
if (rc == 0)
rc = rc2;
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_erase);
@@ -870,8 +844,6 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
{
int rc;
- ENTRY;
-
LASSERT(loghandle);
LASSERT(loghandle->lgh_ctxt);
@@ -883,7 +855,7 @@ int llog_write(const struct lu_env *env, struct llog_handle *loghandle,
th = dt_trans_create(env, dt);
if (IS_ERR(th))
- RETURN(PTR_ERR(th));
+ return PTR_ERR(th);
rc = llog_declare_write_rec(env, loghandle, rec, idx, th);
if (rc)
@@ -905,7 +877,7 @@ out_trans:
cookiecount, buf, idx, NULL);
up_write(&loghandle->lgh_lock);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_write);
@@ -916,19 +888,17 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
int raised;
int rc;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_logops);
if (ctxt->loc_logops->lop_open == NULL) {
*lgh = NULL;
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
}
*lgh = llog_alloc_handle();
if (*lgh == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
(*lgh)->lgh_ctxt = ctxt;
(*lgh)->lgh_logops = ctxt->loc_logops;
@@ -942,7 +912,7 @@ int llog_open(const struct lu_env *env, struct llog_ctxt *ctxt,
llog_free_handle(*lgh);
*lgh = NULL;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_open);
@@ -951,8 +921,6 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
struct llog_operations *lop;
int rc;
- ENTRY;
-
rc = llog_handle2ops(loghandle, &lop);
if (rc)
GOTO(out, rc);
@@ -961,6 +929,6 @@ int llog_close(const struct lu_env *env, struct llog_handle *loghandle)
rc = lop->lop_close(env, loghandle);
out:
llog_handle_put(loghandle);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_close);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_cat.c b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
index cf00b2f550a..c0f3af72574 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_cat.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_cat.c
@@ -67,7 +67,6 @@ static int llog_cat_new_log(const struct lu_env *env,
struct llog_log_hdr *llh;
struct llog_logid_rec rec = { { 0 }, };
int rc, index, bitmap_size;
- ENTRY;
llh = cathandle->lgh_hdr;
bitmap_size = LLOG_BITMAP_SIZE(llh);
@@ -77,20 +76,20 @@ static int llog_cat_new_log(const struct lu_env *env,
/* maximum number of available slots in catlog is bitmap_size - 2 */
if (llh->llh_cat_idx == index) {
CERROR("no free catalog slots for log...\n");
- RETURN(-ENOSPC);
+ return -ENOSPC;
}
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_LLOG_CREATE_FAILED))
- RETURN(-ENOSPC);
+ return -ENOSPC;
rc = llog_create(env, loghandle, th);
/* if llog is already created, no need to initialize it */
if (rc == -EEXIST) {
- RETURN(0);
+ return 0;
} else if (rc != 0) {
CERROR("%s: can't create new plain llog in catalog: rc = %d\n",
loghandle->lgh_ctxt->loc_obd->obd_name, rc);
- RETURN(rc);
+ return rc;
}
rc = llog_init_handle(env, loghandle,
@@ -134,10 +133,10 @@ static int llog_cat_new_log(const struct lu_env *env,
GOTO(out_destroy, rc);
loghandle->lgh_hdr->llh_cat_idx = index;
- RETURN(0);
+ return 0;
out_destroy:
llog_destroy(env, loghandle);
- RETURN(rc);
+ return rc;
}
/* Open an existent log handle and add it to the open list.
@@ -155,10 +154,8 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
struct llog_handle *loghandle;
int rc = 0;
- ENTRY;
-
if (cathandle == NULL)
- RETURN(-EBADF);
+ return -EBADF;
down_write(&cathandle->lgh_lock);
list_for_each_entry(loghandle, &cathandle->u.chd.chd_head,
@@ -187,14 +184,14 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
CERROR("%s: error opening log id "DOSTID":%x: rc = %d\n",
cathandle->lgh_ctxt->loc_obd->obd_name,
POSTID(&logid->lgl_oi), logid->lgl_ogen, rc);
- RETURN(rc);
+ return rc;
}
rc = llog_init_handle(env, loghandle, LLOG_F_IS_PLAIN, NULL);
if (rc < 0) {
llog_close(env, loghandle);
loghandle = NULL;
- RETURN(rc);
+ return rc;
}
down_write(&cathandle->lgh_lock);
@@ -205,7 +202,6 @@ int llog_cat_id2handle(const struct lu_env *env, struct llog_handle *cathandle,
loghandle->u.phd.phd_cookie.lgc_lgl = cathandle->lgh_id;
loghandle->u.phd.phd_cookie.lgc_index =
loghandle->lgh_hdr->llh_cat_idx;
- EXIT;
out:
llog_handle_get(loghandle);
*res = loghandle;
@@ -217,8 +213,6 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
struct llog_handle *loghandle, *n;
int rc;
- ENTRY;
-
list_for_each_entry_safe(loghandle, n, &cathandle->u.chd.chd_head,
u.phd.phd_entry) {
struct llog_log_hdr *llh = loghandle->lgh_hdr;
@@ -246,7 +240,7 @@ int llog_cat_close(const struct lu_env *env, struct llog_handle *cathandle)
if (cathandle->lgh_ctxt->loc_handle == cathandle)
cathandle->lgh_ctxt->loc_handle = NULL;
rc = llog_close(env, cathandle);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_close);
@@ -272,7 +266,6 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
struct thandle *th)
{
struct llog_handle *loghandle = NULL;
- ENTRY;
down_read_nested(&cathandle->lgh_lock, LLOGH_CAT);
loghandle = cathandle->u.chd.chd_current_log;
@@ -284,7 +277,7 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
if (llh == NULL ||
loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
up_read(&cathandle->lgh_lock);
- RETURN(loghandle);
+ return loghandle;
} else {
up_write(&loghandle->lgh_lock);
}
@@ -304,7 +297,7 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
LASSERT(llh);
if (loghandle->lgh_last_idx < LLOG_BITMAP_SIZE(llh) - 1) {
up_write(&cathandle->lgh_lock);
- RETURN(loghandle);
+ return loghandle;
} else {
up_write(&loghandle->lgh_lock);
}
@@ -318,7 +311,7 @@ static struct llog_handle *llog_cat_current_log(struct llog_handle *cathandle,
down_write_nested(&loghandle->lgh_lock, LLOGH_LOG);
up_write(&cathandle->lgh_lock);
LASSERT(loghandle);
- RETURN(loghandle);
+ return loghandle;
}
/* Add a single record to the recovery log(s) using a catalog
@@ -332,7 +325,6 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
{
struct llog_handle *loghandle;
int rc;
- ENTRY;
LASSERT(rec->lrh_len <= LLOG_CHUNK_SIZE);
loghandle = llog_cat_current_log(cathandle, th);
@@ -343,7 +335,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
rc = llog_cat_new_log(env, cathandle, loghandle, th);
if (rc < 0) {
up_write(&loghandle->lgh_lock);
- RETURN(rc);
+ return rc;
}
}
/* now let's try to add the record */
@@ -361,7 +353,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
rc = llog_cat_new_log(env, cathandle, loghandle, th);
if (rc < 0) {
up_write(&loghandle->lgh_lock);
- RETURN(rc);
+ return rc;
}
}
/* now let's try to add the record */
@@ -372,7 +364,7 @@ int llog_cat_add_rec(const struct lu_env *env, struct llog_handle *cathandle,
up_write(&loghandle->lgh_lock);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_add_rec);
@@ -383,8 +375,6 @@ int llog_cat_declare_add_rec(const struct lu_env *env,
struct llog_handle *loghandle, *next;
int rc = 0;
- ENTRY;
-
if (cathandle->u.chd.chd_current_log == NULL) {
/* declare new plain llog */
down_write(&cathandle->lgh_lock);
@@ -437,7 +427,7 @@ int llog_cat_declare_add_rec(const struct lu_env *env,
llog_declare_write_rec(env, next, rec, -1, th);
}
out:
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_declare_add_rec);
@@ -460,7 +450,7 @@ int llog_cat_add(const struct lu_env *env, struct llog_handle *cathandle,
th = dt_trans_create(env, dt);
if (IS_ERR(th))
- RETURN(PTR_ERR(th));
+ return PTR_ERR(th);
rc = llog_cat_declare_add_rec(env, cathandle, rec, th);
if (rc)
@@ -479,7 +469,7 @@ out_trans:
rc = llog_cat_add_rec(env, cathandle, rec, reccookie,
buf, th);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_add);
@@ -498,8 +488,6 @@ int llog_cat_cancel_records(const struct lu_env *env,
{
int i, index, rc = 0, failed = 0;
- ENTRY;
-
for (i = 0; i < count; i++, cookies++) {
struct llog_handle *loghandle;
struct llog_logid *lgl = &cookies->lgc_lgl;
@@ -533,7 +521,7 @@ int llog_cat_cancel_records(const struct lu_env *env,
cathandle->lgh_ctxt->loc_obd->obd_name, failed, count,
rc);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_cancel_records);
@@ -545,10 +533,9 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
struct llog_handle *llh;
int rc;
- ENTRY;
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
@@ -559,12 +546,12 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
cat_llh->lgh_ctxt->loc_obd->obd_name,
POSTID(&lir->lid_id.lgl_oi), rc);
- RETURN(rc);
+ return rc;
}
if (rec->lrh_index < d->lpd_startcat)
/* Skip processing of the logs until startcat */
- RETURN(0);
+ return 0;
if (d->lpd_startidx > 0) {
struct llog_process_cat_data cd;
@@ -581,7 +568,7 @@ int llog_cat_process_cb(const struct lu_env *env, struct llog_handle *cat_llh,
}
llog_handle_put(llh);
- RETURN(rc);
+ return rc;
}
int llog_cat_process_or_fork(const struct lu_env *env,
@@ -592,7 +579,6 @@ int llog_cat_process_or_fork(const struct lu_env *env,
struct llog_process_data d;
struct llog_log_hdr *llh = cat_llh->lgh_hdr;
int rc;
- ENTRY;
LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
d.lpd_data = data;
@@ -611,7 +597,7 @@ int llog_cat_process_or_fork(const struct lu_env *env,
rc = llog_process_or_fork(env, cat_llh, llog_cat_process_cb,
&d, &cd, fork);
if (rc != 0)
- RETURN(rc);
+ return rc;
cd.lpcd_first_idx = 0;
cd.lpcd_last_idx = cat_llh->lgh_last_idx;
@@ -622,7 +608,7 @@ int llog_cat_process_or_fork(const struct lu_env *env,
&d, NULL, fork);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_process_or_fork);
@@ -645,7 +631,7 @@ static int llog_cat_reverse_process_cb(const struct lu_env *env,
if (le32_to_cpu(rec->lrh_type) != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
DOSTID"\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen,
@@ -656,12 +642,12 @@ static int llog_cat_reverse_process_cb(const struct lu_env *env,
CERROR("%s: cannot find handle for llog "DOSTID": %d\n",
cat_llh->lgh_ctxt->loc_obd->obd_name,
POSTID(&lir->lid_id.lgl_oi), rc);
- RETURN(rc);
+ return rc;
}
rc = llog_reverse_process(env, llh, d->lpd_cb, d->lpd_data, NULL);
llog_handle_put(llh);
- RETURN(rc);
+ return rc;
}
int llog_cat_reverse_process(const struct lu_env *env,
@@ -672,7 +658,6 @@ int llog_cat_reverse_process(const struct lu_env *env,
struct llog_process_cat_data cd;
struct llog_log_hdr *llh = cat_llh->lgh_hdr;
int rc;
- ENTRY;
LASSERT(llh->llh_flags & LLOG_F_IS_CAT);
d.lpd_data = data;
@@ -688,7 +673,7 @@ int llog_cat_reverse_process(const struct lu_env *env,
llog_cat_reverse_process_cb,
&d, &cd);
if (rc != 0)
- RETURN(rc);
+ return rc;
cd.lpcd_first_idx = le32_to_cpu(llh->llh_cat_idx);
cd.lpcd_last_idx = 0;
@@ -701,7 +686,7 @@ int llog_cat_reverse_process(const struct lu_env *env,
&d, NULL);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cat_reverse_process);
@@ -709,7 +694,6 @@ int llog_cat_set_first_idx(struct llog_handle *cathandle, int index)
{
struct llog_log_hdr *llh = cathandle->lgh_hdr;
int i, bitmap_size, idx;
- ENTRY;
bitmap_size = LLOG_BITMAP_SIZE(llh);
if (llh->llh_cat_idx == (index - 1)) {
@@ -734,7 +718,7 @@ out:
POSTID(&cathandle->lgh_id.lgl_oi), llh->llh_cat_idx);
}
- RETURN(0);
+ return 0;
}
/* Cleanup deleted plain llog traces from catalog */
@@ -774,11 +758,9 @@ int cat_cancel_cb(const struct lu_env *env, struct llog_handle *cathandle,
struct llog_log_hdr *llh;
int rc;
- ENTRY;
-
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_HA, "processing log "DOSTID":%x at index %u of catalog "
@@ -794,7 +776,7 @@ int cat_cancel_cb(const struct lu_env *env, struct llog_handle *cathandle,
/* remove index from catalog */
llog_cat_cleanup(env, cathandle, NULL, rec->lrh_index);
}
- RETURN(rc);
+ return rc;
}
llh = loghandle->lgh_hdr;
@@ -810,7 +792,7 @@ int cat_cancel_cb(const struct lu_env *env, struct llog_handle *cathandle,
}
llog_handle_put(loghandle);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(cat_cancel_cb);
@@ -822,12 +804,12 @@ int llog_cat_init_and_process(const struct lu_env *env,
rc = llog_init_handle(env, llh, LLOG_F_IS_CAT, NULL);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_process_or_fork(env, llh, cat_cancel_cb, NULL, NULL, false);
if (rc)
CERROR("%s: llog_process() with cat_cancel_cb failed: rc = "
"%d\n", llh->lgh_ctxt->loc_obd->obd_name, rc);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(llog_cat_init_and_process);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c b/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
index 0732874e26c..da558a5dc92 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
@@ -45,46 +45,45 @@ static int str2logid(struct llog_logid *logid, char *str, int len)
char *start, *end, *endp;
__u64 id, seq;
- ENTRY;
start = str;
if (*start != '#')
- RETURN(-EINVAL);
+ return -EINVAL;
start++;
if (start - str >= len - 1)
- RETURN(-EINVAL);
+ return -EINVAL;
end = strchr(start, '#');
if (end == NULL || end == start)
- RETURN(-EINVAL);
+ return -EINVAL;
*end = '\0';
id = simple_strtoull(start, &endp, 0);
if (endp != end)
- RETURN(-EINVAL);
+ return -EINVAL;
start = ++end;
if (start - str >= len - 1)
- RETURN(-EINVAL);
+ return -EINVAL;
end = strchr(start, '#');
if (end == NULL || end == start)
- RETURN(-EINVAL);
+ return -EINVAL;
*end = '\0';
seq = simple_strtoull(start, &endp, 0);
if (endp != end)
- RETURN(-EINVAL);
+ return -EINVAL;
ostid_set_seq(&logid->lgl_oi, seq);
ostid_set_id(&logid->lgl_oi, id);
start = ++end;
if (start - str >= len - 1)
- RETURN(-EINVAL);
+ return -EINVAL;
logid->lgl_ogen = simple_strtoul(start, &endp, 16);
if (*endp != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
- RETURN(0);
+ return 0;
}
static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
@@ -96,8 +95,6 @@ static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
char *endp;
int cur_index, rc = 0;
- ENTRY;
-
if (ioc_data && ioc_data->ioc_inllen1 > 0) {
l = 0;
remains = ioc_data->ioc_inllen4 +
@@ -106,19 +103,19 @@ static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
cfs_size_round(ioc_data->ioc_inllen3);
from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
if (*endp != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0);
if (*endp != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
ioc_data->ioc_inllen1 = 0;
out = ioc_data->ioc_bulk;
}
cur_index = rec->lrh_index;
if (cur_index < from)
- RETURN(0);
+ return 0;
if (to > 0 && cur_index > to)
- RETURN(-LLOG_EEMPTY);
+ return -LLOG_EEMPTY;
if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
@@ -131,13 +128,13 @@ static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
rec->lrh_len);
}
if (handle->lgh_ctxt == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
rc = llog_cat_id2handle(env, handle, &loghandle, &lir->lid_id);
if (rc) {
CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n",
POSTID(&lir->lid_id.lgl_oi),
lir->lid_id.lgl_ogen);
- RETURN(rc);
+ return rc;
}
rc = llog_process(env, loghandle, llog_check_cb, NULL, NULL);
llog_handle_put(loghandle);
@@ -167,10 +164,10 @@ static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle,
if (remains <= 0) {
CERROR("%s: no space to print log records\n",
handle->lgh_ctxt->loc_obd->obd_name);
- RETURN(-LLOG_EEMPTY);
+ return -LLOG_EEMPTY;
}
}
- RETURN(rc);
+ return rc;
}
static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
@@ -182,7 +179,6 @@ static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
char *endp;
int cur_index;
- ENTRY;
if (ioc_data != NULL && ioc_data->ioc_inllen1 > 0) {
l = 0;
remains = ioc_data->ioc_inllen4 +
@@ -191,26 +187,26 @@ static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
cfs_size_round(ioc_data->ioc_inllen3);
from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0);
if (*endp != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0);
if (*endp != '\0')
- RETURN(-EINVAL);
+ return -EINVAL;
out = ioc_data->ioc_bulk;
ioc_data->ioc_inllen1 = 0;
}
cur_index = rec->lrh_index;
if (cur_index < from)
- RETURN(0);
+ return 0;
if (to > 0 && cur_index > to)
- RETURN(-LLOG_EEMPTY);
+ return -LLOG_EEMPTY;
if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) {
struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
l = snprintf(out, remains,
@@ -222,7 +218,7 @@ static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
rc = class_config_parse_rec(rec, out, remains);
if (rc < 0)
- RETURN(rc);
+ return rc;
l = rc;
} else {
l = snprintf(out, remains,
@@ -233,10 +229,10 @@ static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle,
remains -= l;
if (remains <= 0) {
CERROR("not enough space for print log records\n");
- RETURN(-LLOG_EEMPTY);
+ return -LLOG_EEMPTY;
}
- RETURN(0);
+ return 0;
}
static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat,
struct llog_logid *logid)
@@ -244,13 +240,11 @@ static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat,
struct llog_handle *log;
int rc;
- ENTRY;
-
rc = llog_cat_id2handle(env, cat, &log, logid);
if (rc) {
CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n",
POSTID(&logid->lgl_oi), logid->lgl_ogen);
- RETURN(-ENOENT);
+ return -ENOENT;
}
rc = llog_destroy(env, log);
@@ -261,7 +255,7 @@ static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat,
llog_cat_cleanup(env, cat, log, log->u.phd.phd_cookie.lgc_index);
out:
llog_handle_put(log);
- RETURN(rc);
+ return rc;
}
@@ -271,12 +265,11 @@ static int llog_delete_cb(const struct lu_env *env, struct llog_handle *handle,
struct llog_logid_rec *lir = (struct llog_logid_rec *)rec;
int rc;
- ENTRY;
if (rec->lrh_type != LLOG_LOGID_MAGIC)
- RETURN(-EINVAL);
+ return -EINVAL;
rc = llog_remove_log(env, handle, &lir->lid_id);
- RETURN(rc);
+ return rc;
}
@@ -287,25 +280,23 @@ int llog_ioctl(const struct lu_env *env, struct llog_ctxt *ctxt, int cmd,
int rc = 0;
struct llog_handle *handle = NULL;
- ENTRY;
-
if (*data->ioc_inlbuf1 == '#') {
rc = str2logid(&logid, data->ioc_inlbuf1, data->ioc_inllen1);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_open(env, ctxt, &handle, &logid, NULL,
LLOG_OPEN_EXISTS);
if (rc)
- RETURN(rc);
+ return rc;
} else if (*data->ioc_inlbuf1 == '$') {
char *name = data->ioc_inlbuf1 + 1;
rc = llog_open(env, ctxt, &handle, NULL, name,
LLOG_OPEN_EXISTS);
if (rc)
- RETURN(rc);
+ return rc;
} else {
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = llog_init_handle(env, handle, 0, NULL);
@@ -422,6 +413,6 @@ out_close:
llog_cat_close(env, handle);
else
llog_close(env, handle);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_ioctl);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
index 7e12dc62141..5385d8e658c 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_lvfs.c
@@ -64,7 +64,6 @@ static int llog_lvfs_pad(struct obd_device *obd, struct l_file *file,
struct llog_rec_hdr rec = { 0 };
struct llog_rec_tail tail;
int rc;
- ENTRY;
LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
@@ -86,7 +85,7 @@ static int llog_lvfs_pad(struct obd_device *obd, struct l_file *file,
}
out:
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_write_blob(struct obd_device *obd, struct l_file *file,
@@ -97,8 +96,6 @@ static int llog_lvfs_write_blob(struct obd_device *obd, struct l_file *file,
loff_t saved_off = file->f_pos;
int buflen = rec->lrh_len;
- ENTRY;
-
file->f_pos = off;
if (buflen == 0)
@@ -140,7 +137,7 @@ static int llog_lvfs_write_blob(struct obd_device *obd, struct l_file *file,
if (saved_off > file->f_pos)
file->f_pos = saved_off;
LASSERT(rc <= 0);
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_read_blob(struct obd_device *obd, struct l_file *file,
@@ -148,14 +145,13 @@ static int llog_lvfs_read_blob(struct obd_device *obd, struct l_file *file,
{
loff_t offset = off;
int rc;
- ENTRY;
rc = fsfilt_read_record(obd, file, buf, size, &offset);
if (rc) {
CERROR("error reading log record: rc %d\n", rc);
- RETURN(rc);
+ return rc;
}
- RETURN(0);
+ return 0;
}
static int llog_lvfs_read_header(const struct lu_env *env,
@@ -163,7 +159,6 @@ static int llog_lvfs_read_header(const struct lu_env *env,
{
struct obd_device *obd;
int rc;
- ENTRY;
LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
@@ -171,7 +166,7 @@ static int llog_lvfs_read_header(const struct lu_env *env,
if (i_size_read(handle->lgh_file->f_dentry->d_inode) == 0) {
CDEBUG(D_HA, "not reading header from 0-byte log\n");
- RETURN(LLOG_EEMPTY);
+ return LLOG_EEMPTY;
}
rc = llog_lvfs_read_blob(obd, handle->lgh_file, handle->lgh_hdr,
@@ -206,7 +201,7 @@ static int llog_lvfs_read_header(const struct lu_env *env,
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
handle->lgh_file->f_pos = i_size_read(handle->lgh_file->f_dentry->d_inode);
- RETURN(rc);
+ return rc;
}
/* returns negative in on error; 0 if success && reccookie == 0; 1 otherwise */
@@ -223,7 +218,6 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
struct obd_device *obd;
struct file *file;
size_t left;
- ENTRY;
llh = loghandle->lgh_hdr;
file = loghandle->lgh_file;
@@ -236,7 +230,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
else
rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
if (rc)
- RETURN(rc);
+ return rc;
if (buf)
/* write_blob adds header and tail to lrh_len. */
@@ -253,7 +247,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
}
if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
- RETURN(-EINVAL);
+ return -EINVAL;
if (!ext2_test_bit(idx, llh->llh_bitmap))
CERROR("Modify unset record %u\n", idx);
@@ -263,7 +257,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
/* we are done if we only write the header or on error */
if (rc || idx == 0)
- RETURN(rc);
+ return rc;
if (buf) {
/* We assume that caller has set lgh_cur_* */
@@ -277,7 +271,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
if (rec->lrh_index != loghandle->lgh_cur_idx) {
CERROR("modify idx mismatch %u/%d\n",
idx, loghandle->lgh_cur_idx);
- RETURN(-EFAULT);
+ return -EFAULT;
}
} else {
/* Assumes constant lrh_len */
@@ -290,7 +284,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
reccookie->lgc_index = idx;
rc = 1;
}
- RETURN(rc);
+ return rc;
}
/* Make sure that records don't cross a chunk boundary, so we can
@@ -308,12 +302,12 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
index = loghandle->lgh_last_idx + 1;
rc = llog_lvfs_pad(obd, file, left, index);
if (rc)
- RETURN(rc);
+ return rc;
loghandle->lgh_last_idx++; /*for pad rec*/
}
/* if it's the last idx in log file, then return -ENOSPC */
if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
- RETURN(-ENOSPC);
+ return -ENOSPC;
loghandle->lgh_last_idx++;
index = loghandle->lgh_last_idx;
LASSERT(index < LLOG_BITMAP_SIZE(llh));
@@ -339,11 +333,11 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
rc = llog_lvfs_write_blob(obd, file, &llh->llh_hdr, NULL, 0);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_lvfs_write_blob(obd, file, rec, buf, file->f_pos);
if (rc)
- RETURN(rc);
+ return rc;
CDEBUG(D_RPCTRACE, "added record "DOSTID": idx: %u, %u \n",
POSTID(&loghandle->lgh_id.lgl_oi), index, rec->lrh_len);
@@ -362,7 +356,7 @@ static int llog_lvfs_write_rec(const struct lu_env *env,
if (rc == 0 && rec->lrh_type == LLOG_GEN_REC)
rc = 1;
- RETURN(rc);
+ return rc;
}
/* We can skip reading at least as many log blocks as the number of
@@ -391,10 +385,9 @@ static int llog_lvfs_next_block(const struct lu_env *env,
int len)
{
int rc;
- ENTRY;
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64")\n",
next_idx, *cur_idx, *cur_offset);
@@ -419,7 +412,7 @@ static int llog_lvfs_next_block(const struct lu_env *env,
POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen,
*cur_offset);
- RETURN(rc);
+ return rc;
}
/* put number of bytes read into rc to make code simpler */
@@ -430,13 +423,13 @@ static int llog_lvfs_next_block(const struct lu_env *env,
}
if (rc == 0) /* end of file, nothing to do */
- RETURN(0);
+ return 0;
if (rc < sizeof(*tail)) {
CERROR("Invalid llog block at log id "DOSTID"/%u offset"
LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen, *cur_offset);
- RETURN(-EINVAL);
+ return -EINVAL;
}
rec = buf;
@@ -461,7 +454,7 @@ static int llog_lvfs_next_block(const struct lu_env *env,
CERROR("Invalid llog tail at log id "DOSTID"/%u offset "
LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen, *cur_offset);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (tail->lrt_index < next_idx)
continue;
@@ -471,11 +464,11 @@ static int llog_lvfs_next_block(const struct lu_env *env,
if (rec->lrh_index > next_idx) {
CERROR("missed desired record? %u > %u\n",
rec->lrh_index, next_idx);
- RETURN(-ENOENT);
+ return -ENOENT;
}
- RETURN(0);
+ return 0;
}
- RETURN(-EIO);
+ return -EIO;
}
static int llog_lvfs_prev_block(const struct lu_env *env,
@@ -484,10 +477,9 @@ static int llog_lvfs_prev_block(const struct lu_env *env,
{
__u64 cur_offset;
int rc;
- ENTRY;
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_OTHER, "looking for log index %u\n", prev_idx);
@@ -508,20 +500,20 @@ static int llog_lvfs_prev_block(const struct lu_env *env,
POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen,
cur_offset);
- RETURN(rc);
+ return rc;
}
/* put number of bytes read into rc to make code simpler */
rc = cur_offset - ppos;
if (rc == 0) /* end of file, nothing to do */
- RETURN(0);
+ return 0;
if (rc < sizeof(*tail)) {
CERROR("Invalid llog block at log id "DOSTID"/%u offset"
LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen, cur_offset);
- RETURN(-EINVAL);
+ return -EINVAL;
}
rec = buf;
@@ -544,7 +536,7 @@ static int llog_lvfs_prev_block(const struct lu_env *env,
CERROR("Invalid llog tail at log id "DOSTID"/%u offset"
LPU64"\n", POSTID(&loghandle->lgh_id.lgl_oi),
loghandle->lgh_id.lgl_ogen, cur_offset);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (tail->lrt_index < prev_idx)
continue;
@@ -554,11 +546,11 @@ static int llog_lvfs_prev_block(const struct lu_env *env,
if (rec->lrh_index > prev_idx) {
CERROR("missed desired record? %u > %u\n",
rec->lrh_index, prev_idx);
- RETURN(-ENOENT);
+ return -ENOENT;
}
- RETURN(0);
+ return 0;
}
- RETURN(-EIO);
+ return -EIO;
}
static struct file *llog_filp_open(char *dir, char *name, int flags, int mode)
@@ -593,8 +585,6 @@ static int llog_lvfs_open(const struct lu_env *env, struct llog_handle *handle,
struct obd_device *obd;
int rc = 0;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
LASSERT(ctxt->loc_exp->exp_obd);
@@ -661,12 +651,12 @@ static int llog_lvfs_open(const struct lu_env *env, struct llog_handle *handle,
if (open_param != LLOG_OPEN_NEW && handle->lgh_file == NULL)
GOTO(out_name, rc = -ENOENT);
- RETURN(0);
+ return 0;
out_name:
if (handle->lgh_name != NULL)
OBD_FREE(handle->lgh_name, strlen(name) + 1);
out:
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_exist(struct llog_handle *handle)
@@ -688,8 +678,6 @@ static int llog_lvfs_create(const struct lu_env *env,
int rc = 0;
int open_flags = O_RDWR | O_CREAT | O_LARGEFILE;
- ENTRY;
-
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
obd = ctxt->loc_exp->exp_obd;
@@ -699,7 +687,7 @@ static int llog_lvfs_create(const struct lu_env *env,
file = llog_filp_open(MOUNT_CONFIGS_DIR, handle->lgh_name,
open_flags, 0644);
if (IS_ERR(file))
- RETURN(PTR_ERR(file));
+ return PTR_ERR(file);
lustre_build_llog_lvfs_oid(&handle->lgh_id,
file->f_dentry->d_inode->i_ino,
@@ -708,7 +696,7 @@ static int llog_lvfs_create(const struct lu_env *env,
} else {
OBDO_ALLOC(oa);
if (oa == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
ostid_set_seq_llog(&oa->o_oi);
oa->o_valid = OBD_MD_FLGENER | OBD_MD_FLGROUP;
@@ -736,7 +724,7 @@ static int llog_lvfs_create(const struct lu_env *env,
out:
OBDO_FREE(oa);
}
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_close(const struct lu_env *env,
@@ -744,10 +732,8 @@ static int llog_lvfs_close(const struct lu_env *env,
{
int rc;
- ENTRY;
-
if (handle->lgh_file == NULL)
- RETURN(0);
+ return 0;
rc = filp_close(handle->lgh_file, 0);
if (rc)
CERROR("%s: error closing llog #"DOSTID"#%08x: "
@@ -759,7 +745,7 @@ static int llog_lvfs_close(const struct lu_env *env,
OBD_FREE(handle->lgh_name, strlen(handle->lgh_name) + 1);
handle->lgh_name = NULL;
}
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_destroy(const struct lu_env *env,
@@ -772,7 +758,6 @@ static int llog_lvfs_destroy(const struct lu_env *env,
void *th;
struct inode *inode;
int rc, rc1;
- ENTRY;
dir = MOUNT_CONFIGS_DIR;
@@ -795,12 +780,12 @@ static int llog_lvfs_destroy(const struct lu_env *env,
dput(fdentry);
pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
- RETURN(rc);
+ return rc;
}
OBDO_ALLOC(oa);
if (oa == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oa->o_oi = handle->lgh_id.lgl_oi;
oa->o_generation = handle->lgh_id.lgl_ogen;
@@ -825,7 +810,7 @@ static int llog_lvfs_destroy(const struct lu_env *env,
rc = rc1;
out:
OBDO_FREE(oa);
- RETURN(rc);
+ return rc;
}
static int llog_lvfs_declare_create(const struct lu_env *env,
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_obd.c b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
index 7e229079631..71817af7539 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_obd.c
@@ -110,7 +110,6 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct obd_llog_group *olg;
int rc, idx;
- ENTRY;
LASSERT(ctxt != NULL);
LASSERT(ctxt != LP_POISON);
@@ -139,7 +138,7 @@ int llog_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
l_wait_event(olg->olg_waitq,
llog_group_ctxt_null(olg, idx), &lwi);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cleanup);
@@ -149,16 +148,15 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
{
struct llog_ctxt *ctxt;
int rc = 0;
- ENTRY;
if (index < 0 || index >= LLOG_MAX_CTXTS)
- RETURN(-EINVAL);
+ return -EINVAL;
LASSERT(olg != NULL);
ctxt = llog_new_ctxt(obd);
if (!ctxt)
- RETURN(-ENOMEM);
+ return -ENOMEM;
ctxt->loc_obd = obd;
ctxt->loc_olg = olg;
@@ -189,7 +187,7 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
}
rc = 0;
}
- RETURN(rc);
+ return rc;
}
if (op->lop_setup) {
@@ -210,22 +208,21 @@ int llog_setup(const struct lu_env *env, struct obd_device *obd,
ctxt->loc_flags &= ~LLOG_CTXT_FLAG_UNINITIALIZED;
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_setup);
int llog_sync(struct llog_ctxt *ctxt, struct obd_export *exp, int flags)
{
int rc = 0;
- ENTRY;
if (!ctxt)
- RETURN(0);
+ return 0;
if (CTXTP(ctxt, sync))
rc = CTXTP(ctxt, sync)(ctxt, exp, flags);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_sync);
@@ -234,15 +231,14 @@ int llog_obd_add(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_cookie *logcookies, int numcookies)
{
int raised, rc;
- ENTRY;
if (!ctxt) {
CERROR("No ctxt\n");
- RETURN(-ENODEV);
+ return -ENODEV;
}
if (ctxt->loc_flags & LLOG_CTXT_FLAG_UNINITIALIZED)
- RETURN(-ENXIO);
+ return -ENXIO;
CTXT_CHECK_OP(ctxt, obd_add, -EOPNOTSUPP);
raised = cfs_cap_raised(CFS_CAP_SYS_RESOURCE);
@@ -252,7 +248,7 @@ int llog_obd_add(const struct lu_env *env, struct llog_ctxt *ctxt,
numcookies);
if (!raised)
cfs_cap_lower(CFS_CAP_SYS_RESOURCE);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_obd_add);
@@ -261,16 +257,15 @@ int llog_cancel(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_cookie *cookies, int flags)
{
int rc;
- ENTRY;
if (!ctxt) {
CERROR("No ctxt\n");
- RETURN(-ENODEV);
+ return -ENODEV;
}
CTXT_CHECK_OP(ctxt, cancel, -EOPNOTSUPP);
rc = CTXTP(ctxt, cancel)(env, ctxt, lsm, count, cookies, flags);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_cancel);
@@ -278,24 +273,24 @@ int obd_llog_init(struct obd_device *obd, struct obd_llog_group *olg,
struct obd_device *disk_obd, int *index)
{
int rc;
- ENTRY;
+
OBD_CHECK_DT_OP(obd, llog_init, 0);
OBD_COUNTER_INCREMENT(obd, llog_init);
rc = OBP(obd, llog_init)(obd, olg, disk_obd, index);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(obd_llog_init);
int obd_llog_finish(struct obd_device *obd, int count)
{
int rc;
- ENTRY;
+
OBD_CHECK_DT_OP(obd, llog_finish, 0);
OBD_COUNTER_INCREMENT(obd, llog_finish);
rc = OBP(obd, llog_finish)(obd, count);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(obd_llog_finish);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_osd.c b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
index 6dbd21a863c..654c8e18965 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_osd.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_osd.c
@@ -41,10 +41,6 @@
#define DEBUG_SUBSYSTEM S_LOG
-#ifndef EXPORT_SYMTAB
-#define EXPORT_SYMTAB
-#endif
-
#include <obd.h>
#include <obd_class.h>
#include <lustre_fid.h>
@@ -97,8 +93,6 @@ static int llog_osd_pad(const struct lu_env *env, struct dt_object *o,
struct llog_thread_info *lgi = llog_info(env);
int rc;
- ENTRY;
-
LASSERT(th);
LASSERT(off);
LASSERT(len >= LLOG_MIN_REC_SIZE && (len & 0x7) == 0);
@@ -126,7 +120,7 @@ static int llog_osd_pad(const struct lu_env *env, struct dt_object *o,
o->do_lu.lo_dev->ld_obd->obd_name, rc);
out:
dt_write_unlock(env, o);
- RETURN(rc);
+ return rc;
}
static int llog_osd_write_blob(const struct lu_env *env, struct dt_object *o,
@@ -137,8 +131,6 @@ static int llog_osd_write_blob(const struct lu_env *env, struct dt_object *o,
int buflen = rec->lrh_len;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(o);
@@ -203,7 +195,7 @@ out:
dt_attr_set(env, o, &lgi->lgi_attr, th, BYPASS_CAPA);
}
- RETURN(rc);
+ return rc;
}
static int llog_osd_read_header(const struct lu_env *env,
@@ -214,8 +206,6 @@ static int llog_osd_read_header(const struct lu_env *env,
struct llog_thread_info *lgi;
int rc;
- ENTRY;
-
LASSERT(sizeof(*handle->lgh_hdr) == LLOG_CHUNK_SIZE);
o = handle->lgh_obj;
@@ -225,13 +215,13 @@ static int llog_osd_read_header(const struct lu_env *env,
rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
if (rc)
- RETURN(rc);
+ return rc;
LASSERT(lgi->lgi_attr.la_valid & LA_SIZE);
if (lgi->lgi_attr.la_size == 0) {
CDEBUG(D_HA, "not reading header from 0-byte log\n");
- RETURN(LLOG_EEMPTY);
+ return LLOG_EEMPTY;
}
lgi->lgi_off = 0;
@@ -243,7 +233,7 @@ static int llog_osd_read_header(const struct lu_env *env,
CERROR("%s: error reading log header from "DFID": rc = %d\n",
o->do_lu.lo_dev->ld_obd->obd_name,
PFID(lu_object_fid(&o->do_lu)), rc);
- RETURN(rc);
+ return rc;
}
llh_hdr = &handle->lgh_hdr->llh_hdr;
@@ -256,7 +246,7 @@ static int llog_osd_read_header(const struct lu_env *env,
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
llh_hdr->lrh_type, LLOG_HDR_MAGIC);
- RETURN(-EIO);
+ return -EIO;
} else if (llh_hdr->lrh_len != LLOG_CHUNK_SIZE) {
CERROR("%s: incorrectly sized log %s "DFID" header: "
"%#x (expected %#x)\n"
@@ -265,12 +255,12 @@ static int llog_osd_read_header(const struct lu_env *env,
handle->lgh_name ? handle->lgh_name : "",
PFID(lu_object_fid(&o->do_lu)),
llh_hdr->lrh_len, LLOG_CHUNK_SIZE);
- RETURN(-EIO);
+ return -EIO;
}
handle->lgh_last_idx = handle->lgh_hdr->llh_tail.lrt_index;
- RETURN(0);
+ return 0;
}
static int llog_osd_declare_write_rec(const struct lu_env *env,
@@ -282,8 +272,6 @@ static int llog_osd_declare_write_rec(const struct lu_env *env,
struct dt_object *o;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(th);
LASSERT(loghandle);
@@ -295,18 +283,18 @@ static int llog_osd_declare_write_rec(const struct lu_env *env,
rc = dt_declare_record_write(env, o, sizeof(struct llog_log_hdr), 0,
th);
if (rc || idx == 0) /* if error or just header */
- RETURN(rc);
+ return rc;
if (dt_object_exists(o)) {
rc = dt_attr_get(env, o, &lgi->lgi_attr, BYPASS_CAPA);
lgi->lgi_off = lgi->lgi_attr.la_size;
LASSERT(ergo(rc == 0, lgi->lgi_attr.la_valid & LA_SIZE));
if (rc)
- RETURN(rc);
+ return rc;
rc = dt_declare_punch(env, o, lgi->lgi_off, OBD_OBJECT_EOF, th);
if (rc)
- RETURN(rc);
+ return rc;
} else {
lgi->lgi_off = 0;
}
@@ -314,7 +302,7 @@ static int llog_osd_declare_write_rec(const struct lu_env *env,
/* XXX: implement declared window or multi-chunks approach */
rc = dt_declare_record_write(env, o, 32 * 1024, lgi->lgi_off, th);
- RETURN(rc);
+ return rc;
}
/* returns negative in on error; 0 if success && reccookie == 0; 1 otherwise */
@@ -333,8 +321,6 @@ static int llog_osd_write_rec(const struct lu_env *env,
struct dt_object *o;
size_t left;
- ENTRY;
-
LASSERT(env);
llh = loghandle->lgh_hdr;
LASSERT(llh);
@@ -352,11 +338,11 @@ static int llog_osd_write_rec(const struct lu_env *env,
else
rc = (reclen > LLOG_CHUNK_SIZE) ? -E2BIG : 0;
if (rc)
- RETURN(rc);
+ return rc;
rc = dt_attr_get(env, o, &lgi->lgi_attr, NULL);
if (rc)
- RETURN(rc);
+ return rc;
if (buf)
/* write_blob adds header and tail to lrh_len. */
@@ -369,7 +355,7 @@ static int llog_osd_write_rec(const struct lu_env *env,
LBUG();
if (idx && llh->llh_size && llh->llh_size != rec->lrh_len)
- RETURN(-EINVAL);
+ return -EINVAL;
if (!ext2_test_bit(idx, llh->llh_bitmap))
CERROR("%s: modify unset record %u\n",
@@ -384,7 +370,7 @@ static int llog_osd_write_rec(const struct lu_env *env,
&lgi->lgi_off, th);
/* we are done if we only write the header or on error */
if (rc || idx == 0)
- RETURN(rc);
+ return rc;
if (buf) {
/* We assume that caller has set lgh_cur_* */
@@ -400,7 +386,7 @@ static int llog_osd_write_rec(const struct lu_env *env,
CERROR("%s: modify idx mismatch %u/%d\n",
o->do_lu.lo_dev->ld_obd->obd_name, idx,
loghandle->lgh_cur_idx);
- RETURN(-EFAULT);
+ return -EFAULT;
}
} else {
/* Assumes constant lrh_len */
@@ -413,7 +399,7 @@ static int llog_osd_write_rec(const struct lu_env *env,
reccookie->lgc_index = idx;
rc = 1;
}
- RETURN(rc);
+ return rc;
}
/* Make sure that records don't cross a chunk boundary, so we can
@@ -432,12 +418,12 @@ static int llog_osd_write_rec(const struct lu_env *env,
index = loghandle->lgh_last_idx + 1;
rc = llog_osd_pad(env, o, &lgi->lgi_off, left, index, th);
if (rc)
- RETURN(rc);
+ return rc;
loghandle->lgh_last_idx++; /*for pad rec*/
}
/* if it's the last idx in log file, then return -ENOSPC */
if (loghandle->lgh_last_idx >= LLOG_BITMAP_SIZE(llh) - 1)
- RETURN(-ENOSPC);
+ return -ENOSPC;
loghandle->lgh_last_idx++;
index = loghandle->lgh_last_idx;
@@ -509,7 +495,7 @@ out:
reccookie->lgc_subsys = -1;
rc = 1;
}
- RETURN(rc);
+ return rc;
}
/* We can skip reading at least as many log blocks as the number of
@@ -541,13 +527,11 @@ static int llog_osd_next_block(const struct lu_env *env,
struct dt_device *dt;
int rc;
- ENTRY;
-
LASSERT(env);
LASSERT(lgi);
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_OTHER, "looking for log index %u (cur idx %u off "LPU64")\n",
next_idx, *cur_idx, *cur_offset);
@@ -668,10 +652,8 @@ static int llog_osd_prev_block(const struct lu_env *env,
loff_t cur_offset;
int rc;
- ENTRY;
-
if (len == 0 || len & (LLOG_CHUNK_SIZE - 1))
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_OTHER, "looking for log index %u\n", prev_idx);
@@ -798,8 +780,6 @@ static int llog_osd_open(const struct lu_env *env, struct llog_handle *handle,
struct local_oid_storage *los;
int rc = 0;
- ENTRY;
-
LASSERT(env);
LASSERT(ctxt);
LASSERT(ctxt->loc_exp);
@@ -809,7 +789,7 @@ static int llog_osd_open(const struct lu_env *env, struct llog_handle *handle,
ls = ls_device_get(dt);
if (IS_ERR(ls))
- RETURN(PTR_ERR(ls));
+ return PTR_ERR(ls);
mutex_lock(&ls->ls_los_mutex);
los = dt_los_find(ls, name != NULL ? FID_SEQ_LLOG_NAME : FID_SEQ_LLOG);
@@ -864,7 +844,7 @@ static int llog_osd_open(const struct lu_env *env, struct llog_handle *handle,
handle->private_data = los;
LASSERT(handle->lgh_ctxt);
- RETURN(rc);
+ return rc;
out_put:
lu_object_put(env, &o->do_lu);
@@ -873,7 +853,7 @@ out_name:
OBD_FREE(handle->lgh_name, strlen(name) + 1);
out:
dt_los_put(los);
- RETURN(rc);
+ return rc;
}
static int llog_osd_exist(struct llog_handle *handle)
@@ -891,33 +871,31 @@ static int llog_osd_declare_create(const struct lu_env *env,
struct dt_object *o;
int rc;
- ENTRY;
-
LASSERT(res->lgh_obj);
LASSERT(th);
/* object can be created by another thread */
o = res->lgh_obj;
if (dt_object_exists(o))
- RETURN(0);
+ return 0;
los = res->private_data;
LASSERT(los);
rc = llog_osd_declare_new_object(env, los, o, th);
if (rc)
- RETURN(rc);
+ return rc;
rc = dt_declare_record_write(env, o, LLOG_CHUNK_SIZE, 0, th);
if (rc)
- RETURN(rc);
+ return rc;
if (res->lgh_name) {
struct dt_object *llog_dir;
llog_dir = llog_osd_dir_get(env, res->lgh_ctxt);
if (IS_ERR(llog_dir))
- RETURN(PTR_ERR(llog_dir));
+ return PTR_ERR(llog_dir);
logid_to_fid(&res->lgh_id, &lgi->lgi_fid);
rc = dt_declare_insert(env, llog_dir,
(struct dt_rec *)&lgi->lgi_fid,
@@ -928,7 +906,7 @@ static int llog_osd_declare_create(const struct lu_env *env,
o->do_lu.lo_dev->ld_obd->obd_name,
res->lgh_name, rc);
}
- RETURN(rc);
+ return rc;
}
/* This is a callback from the llog_* functions.
@@ -941,15 +919,13 @@ static int llog_osd_create(const struct lu_env *env, struct llog_handle *res,
struct dt_object *o;
int rc = 0;
- ENTRY;
-
LASSERT(env);
o = res->lgh_obj;
LASSERT(o);
/* llog can be already created */
if (dt_object_exists(o))
- RETURN(-EEXIST);
+ return -EEXIST;
los = res->private_data;
LASSERT(los);
@@ -962,14 +938,14 @@ static int llog_osd_create(const struct lu_env *env, struct llog_handle *res,
dt_write_unlock(env, o);
if (rc)
- RETURN(rc);
+ return rc;
if (res->lgh_name) {
struct dt_object *llog_dir;
llog_dir = llog_osd_dir_get(env, res->lgh_ctxt);
if (IS_ERR(llog_dir))
- RETURN(PTR_ERR(llog_dir));
+ return PTR_ERR(llog_dir);
logid_to_fid(&res->lgh_id, &lgi->lgi_fid);
dt_read_lock(env, llog_dir, 0);
@@ -984,7 +960,7 @@ static int llog_osd_create(const struct lu_env *env, struct llog_handle *res,
o->do_lu.lo_dev->ld_obd->obd_name,
res->lgh_name, rc);
}
- RETURN(rc);
+ return rc;
}
static int llog_osd_close(const struct lu_env *env, struct llog_handle *handle)
@@ -992,8 +968,6 @@ static int llog_osd_close(const struct lu_env *env, struct llog_handle *handle)
struct local_oid_storage *los;
int rc = 0;
- ENTRY;
-
LASSERT(handle->lgh_obj);
lu_object_put(env, &handle->lgh_obj->do_lu);
@@ -1005,7 +979,7 @@ static int llog_osd_close(const struct lu_env *env, struct llog_handle *handle)
if (handle->lgh_name)
OBD_FREE(handle->lgh_name, strlen(handle->lgh_name) + 1);
- RETURN(rc);
+ return rc;
}
static int llog_osd_destroy(const struct lu_env *env,
@@ -1018,8 +992,6 @@ static int llog_osd_destroy(const struct lu_env *env,
char *name = NULL;
int rc;
- ENTRY;
-
ctxt = loghandle->lgh_ctxt;
LASSERT(ctxt);
@@ -1032,7 +1004,7 @@ static int llog_osd_destroy(const struct lu_env *env,
th = dt_trans_create(env, d);
if (IS_ERR(th))
- RETURN(PTR_ERR(th));
+ return PTR_ERR(th);
if (loghandle->lgh_name) {
llog_dir = llog_osd_dir_get(env, ctxt);
@@ -1082,7 +1054,7 @@ out_trans:
dt_trans_stop(env, d, th);
if (llog_dir != NULL)
lu_object_put(env, &llog_dir->do_lu);
- RETURN(rc);
+ return rc;
}
static int llog_osd_setup(const struct lu_env *env, struct obd_device *obd,
@@ -1094,8 +1066,6 @@ static int llog_osd_setup(const struct lu_env *env, struct obd_device *obd,
struct llog_ctxt *ctxt;
int rc = 0;
- ENTRY;
-
LASSERT(obd);
LASSERT(olg->olg_ctxts[ctxt_idx]);
@@ -1131,7 +1101,7 @@ static int llog_osd_cleanup(const struct lu_env *env, struct llog_ctxt *ctxt)
dt = ctxt->loc_exp->exp_obd->obd_lvfs_ctxt.dt;
ls = ls_device_get(dt);
if (IS_ERR(ls))
- RETURN(PTR_ERR(ls));
+ return PTR_ERR(ls);
mutex_lock(&ls->ls_los_mutex);
los = dt_los_find(ls, FID_SEQ_LLOG);
@@ -1175,8 +1145,6 @@ int llog_osd_get_cat_list(const struct lu_env *env, struct dt_device *d,
struct thandle *th;
int rc, size;
- ENTRY;
-
LASSERT(d);
size = sizeof(*idarray) * count;
@@ -1186,7 +1154,7 @@ int llog_osd_get_cat_list(const struct lu_env *env, struct dt_device *d,
o = dt_locate(env, d, &lgi->lgi_fid);
if (IS_ERR(o))
- RETURN(PTR_ERR(o));
+ return PTR_ERR(o);
if (!dt_object_exists(o)) {
th = dt_trans_create(env, d);
@@ -1253,10 +1221,9 @@ out_trans:
GOTO(out, rc);
}
- EXIT;
out:
lu_object_put(env, &o->do_lu);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_osd_get_cat_list);
@@ -1270,7 +1237,7 @@ int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
int rc, size;
if (!count)
- RETURN(0);
+ return 0;
LASSERT(d);
@@ -1281,7 +1248,7 @@ int llog_osd_put_cat_list(const struct lu_env *env, struct dt_device *d,
o = dt_locate(env, d, &lgi->lgi_fid);
if (IS_ERR(o))
- RETURN(PTR_ERR(o));
+ return PTR_ERR(o);
if (!dt_object_exists(o))
GOTO(out, rc = -ENOENT);
@@ -1318,6 +1285,6 @@ out_trans:
dt_trans_stop(env, d, th);
out:
lu_object_put(env, &o->do_lu);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_osd_put_cat_list);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_swab.c b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
index dedfecff95b..24ca099b01d 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_swab.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_swab.c
@@ -88,7 +88,6 @@ EXPORT_SYMBOL(lustre_swab_llog_id);
void lustre_swab_llogd_body (struct llogd_body *d)
{
- ENTRY;
print_llogd_body(d);
lustre_swab_llog_id(&d->lgd_logid);
__swab32s (&d->lgd_ctxt_idx);
@@ -98,7 +97,6 @@ void lustre_swab_llogd_body (struct llogd_body *d)
__swab32s (&d->lgd_len);
__swab64s (&d->lgd_cur_offset);
print_llogd_body(d);
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_llogd_body);
@@ -203,6 +201,23 @@ void lustre_swab_llog_rec(struct llog_rec_hdr *rec)
break;
}
+ case HSM_AGENT_REC: {
+ struct llog_agent_req_rec *arr =
+ (struct llog_agent_req_rec *)rec;
+
+ __swab32s(&arr->arr_hai.hai_len);
+ __swab32s(&arr->arr_hai.hai_action);
+ lustre_swab_lu_fid(&arr->arr_hai.hai_fid);
+ lustre_swab_lu_fid(&arr->arr_hai.hai_dfid);
+ __swab64s(&arr->arr_hai.hai_cookie);
+ __swab64s(&arr->arr_hai.hai_extent.offset);
+ __swab64s(&arr->arr_hai.hai_extent.length);
+ __swab64s(&arr->arr_hai.hai_gid);
+ /* no swabing for opaque data */
+ /* hai_data[0]; */
+ break;
+ }
+
case MDS_SETATTR64_REC:
{
struct llog_setattr64_rec *lsr =
@@ -281,20 +296,17 @@ static void print_llog_hdr(struct llog_log_hdr *h)
void lustre_swab_llog_hdr (struct llog_log_hdr *h)
{
- ENTRY;
print_llog_hdr(h);
lustre_swab_llog_rec(&h->llh_hdr);
print_llog_hdr(h);
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_llog_hdr);
static void print_lustre_cfg(struct lustre_cfg *lcfg)
{
int i;
- ENTRY;
if (!(libcfs_debug & D_OTHER)) /* don't loop on nothing */
return;
@@ -311,20 +323,17 @@ static void print_lustre_cfg(struct lustre_cfg *lcfg)
for (i = 0; i < lcfg->lcfg_bufcount; i++)
CDEBUG(D_OTHER, "\tlcfg->lcfg_buflens[%d]: %d\n",
i, lcfg->lcfg_buflens[i]);
- EXIT;
}
void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
{
int i;
- ENTRY;
__swab32s(&lcfg->lcfg_version);
if (lcfg->lcfg_version != LUSTRE_CFG_VERSION) {
CERROR("not swabbing lustre_cfg version %#x (expecting %#x)\n",
lcfg->lcfg_version, LUSTRE_CFG_VERSION);
- EXIT;
return;
}
@@ -337,7 +346,6 @@ void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg)
__swab32s(&lcfg->lcfg_buflens[i]);
print_lustre_cfg(lcfg);
- EXIT;
return;
}
EXPORT_SYMBOL(lustre_swab_lustre_cfg);
@@ -360,7 +368,6 @@ struct cfg_marker32 {
void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
{
struct cfg_marker32 *cm32 = (struct cfg_marker32*)marker;
- ENTRY;
if (swab) {
__swab32s(&marker->cm_step);
@@ -401,7 +408,6 @@ void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size)
__swab64s(&marker->cm_canceltime);
}
- EXIT;
return;
}
EXPORT_SYMBOL(lustre_swab_cfg_marker);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog_test.c b/drivers/staging/lustre/lustre/obdclass/llog_test.c
index d397f781ec4..d9e6d12215f 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog_test.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog_test.c
@@ -78,22 +78,22 @@ static int verify_handle(char *test, struct llog_handle *llh, int num_recs)
if (active_recs != num_recs) {
CERROR("%s: expected %d active recs after write, found %d\n",
test, num_recs, active_recs);
- RETURN(-ERANGE);
+ return -ERANGE;
}
if (llh->lgh_hdr->llh_count != num_recs) {
CERROR("%s: handle->count is %d, expected %d after write\n",
test, llh->lgh_hdr->llh_count, num_recs);
- RETURN(-ERANGE);
+ return -ERANGE;
}
if (llh->lgh_last_idx < last_idx) {
CERROR("%s: handle->last_idx is %d, expected %d after write\n",
test, llh->lgh_last_idx, last_idx);
- RETURN(-ERANGE);
+ return -ERANGE;
}
- RETURN(0);
+ return 0;
}
/* Test named-log create/open, close */
@@ -105,8 +105,6 @@ static int llog_test_1(const struct lu_env *env,
int rc;
int rc2;
- ENTRY;
-
CWARN("1a: create a log with name: %s\n", name);
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
@@ -134,7 +132,7 @@ out_close:
}
out:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
/* Test named-log reopen; returns opened log on success */
@@ -146,8 +144,6 @@ static int llog_test_2(const struct lu_env *env, struct obd_device *obd,
struct llog_logid logid;
int rc;
- ENTRY;
-
CWARN("2a: re-open a log with name: %s\n", name);
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
@@ -213,7 +209,7 @@ out_close_llh:
out_put:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
/* Test record writing, single and in bulk */
@@ -224,8 +220,6 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
int rc, i;
int num_recs = 1; /* 1 for the header */
- ENTRY;
-
lgr.lgr_hdr.lrh_len = lgr.lgr_tail.lrt_len = sizeof(lgr);
lgr.lgr_hdr.lrh_type = LLOG_GEN_REC;
@@ -234,12 +228,12 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
num_recs++;
if (rc < 0) {
CERROR("3a: write one log record failed: %d\n", rc);
- RETURN(rc);
+ return rc;
}
rc = verify_handle("3a", llh, num_recs);
if (rc)
- RETURN(rc);
+ return rc;
CWARN("3b: write 10 cfg log records with 8 bytes bufs\n");
for (i = 0; i < 10; i++) {
@@ -253,14 +247,14 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
if (rc < 0) {
CERROR("3b: write 10 records failed at #%d: %d\n",
i + 1, rc);
- RETURN(rc);
+ return rc;
}
num_recs++;
}
rc = verify_handle("3b", llh, num_recs);
if (rc)
- RETURN(rc);
+ return rc;
CWARN("3c: write 1000 more log records\n");
for (i = 0; i < 1000; i++) {
@@ -268,14 +262,14 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
if (rc < 0) {
CERROR("3c: write 1000 records failed at #%d: %d\n",
i + 1, rc);
- RETURN(rc);
+ return rc;
}
num_recs++;
}
rc = verify_handle("3c", llh, num_recs);
if (rc)
- RETURN(rc);
+ return rc;
CWARN("3d: write log more than BITMAP_SIZE, return -ENOSPC\n");
for (i = 0; i < LLOG_BITMAP_SIZE(llh->lgh_hdr) + 1; i++) {
@@ -299,20 +293,20 @@ static int llog_test_3(const struct lu_env *env, struct obd_device *obd,
} else if (rc < 0) {
CERROR("3d: write recs failed at #%d: %d\n",
i + 1, rc);
- RETURN(rc);
+ return rc;
}
num_recs++;
}
if (rc != -ENOSPC) {
CWARN("3d: write record more than BITMAP size!\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
CWARN("3d: wrote %d more records before end of llog is reached\n",
num_recs);
rc = verify_handle("3d", llh, num_recs);
- RETURN(rc);
+ return rc;
}
/* Test catalogue additions */
@@ -328,8 +322,6 @@ static int llog_test_4(const struct lu_env *env, struct obd_device *obd)
char *buf;
struct llog_rec_hdr rec;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
@@ -424,7 +416,7 @@ out:
}
ctxt_release:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
static int cat_counter;
@@ -437,7 +429,7 @@ static int cat_print_cb(const struct lu_env *env, struct llog_handle *llh,
if (rec->lrh_type != LLOG_LOGID_MAGIC) {
CERROR("invalid record in catalog\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
logid_to_fid(&lir->lid_id, &fid);
@@ -448,7 +440,7 @@ static int cat_print_cb(const struct lu_env *env, struct llog_handle *llh,
cat_counter++;
- RETURN(0);
+ return 0;
}
static int plain_counter;
@@ -460,7 +452,7 @@ static int plain_print_cb(const struct lu_env *env, struct llog_handle *llh,
if (!(llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)) {
CERROR("log is not plain\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
logid_to_fid(&llh->lgh_id, &fid);
@@ -470,7 +462,7 @@ static int plain_print_cb(const struct lu_env *env, struct llog_handle *llh,
plain_counter++;
- RETURN(0);
+ return 0;
}
static int cancel_count;
@@ -483,7 +475,7 @@ static int llog_cancel_rec_cb(const struct lu_env *env,
if (!(llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN)) {
CERROR("log is not plain\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
cookie.lgc_lgl = llh->lgh_id;
@@ -492,8 +484,8 @@ static int llog_cancel_rec_cb(const struct lu_env *env,
llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle, 1, &cookie);
cancel_count++;
if (cancel_count == LLOG_TEST_RECNUM)
- RETURN(-LLOG_EEMPTY);
- RETURN(0);
+ return -LLOG_EEMPTY;
+ return 0;
}
/* Test log and catalogue processing */
@@ -505,8 +497,6 @@ static int llog_test_5(const struct lu_env *env, struct obd_device *obd)
struct llog_mini_rec lmr;
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
@@ -602,7 +592,7 @@ out:
out_put:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
/* Test client api; open log by name and process */
@@ -686,7 +676,7 @@ nctxt_put:
llog_ctxt_put(nctxt);
ctxt_release:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
static union {
@@ -728,12 +718,10 @@ static int llog_test_7_sub(const struct lu_env *env, struct llog_ctxt *ctxt)
int rc = 0, i, process_count;
int num_recs = 0;
- ENTRY;
-
rc = llog_open_create(env, ctxt, &llh, NULL, NULL);
if (rc) {
CERROR("7_sub: create log failed\n");
- RETURN(rc);
+ return rc;
}
rc = llog_init_handle(env, llh,
@@ -804,7 +792,7 @@ out_close:
if (rc)
llog_destroy(env, llh);
llog_close(env, llh);
- RETURN(rc);
+ return rc;
}
/* Test all llog records writing and processing */
@@ -813,8 +801,6 @@ static int llog_test_7(const struct lu_env *env, struct obd_device *obd)
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
CWARN("7a: test llog_logid_rec\n");
@@ -895,7 +881,7 @@ static int llog_test_7(const struct lu_env *env, struct obd_device *obd)
}
out:
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
/* -------------------------------------------------------------------------
@@ -908,7 +894,6 @@ static int llog_run_tests(const struct lu_env *env, struct obd_device *obd)
int rc, err;
char name[10];
- ENTRY;
ctxt = llog_get_context(obd, LLOG_TEST_ORIG_CTXT);
LASSERT(ctxt);
@@ -970,18 +955,16 @@ static int llog_test_cleanup(struct obd_device *obd)
struct lu_env env;
int rc;
- ENTRY;
-
rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
if (rc)
- RETURN(rc);
+ return rc;
tgt = obd->obd_lvfs_ctxt.dt->dd_lu_dev.ld_obd;
rc = llog_cleanup(&env, llog_get_context(tgt, LLOG_TEST_ORIG_CTXT));
if (rc)
CERROR("failed to llog_test_llog_finish: %d\n", rc);
lu_env_fini(&env);
- RETURN(rc);
+ return rc;
}
static int llog_test_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
@@ -993,16 +976,14 @@ static int llog_test_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct lu_context test_session;
int rc;
- ENTRY;
-
if (lcfg->lcfg_bufcount < 2) {
CERROR("requires a TARGET OBD name\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (lcfg->lcfg_buflens[1] < 1) {
CERROR("requires a TARGET OBD name\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* disk obd */
@@ -1010,12 +991,12 @@ static int llog_test_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
CERROR("target device not attached or not set up (%s)\n",
lustre_cfg_string(lcfg, 1));
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = lu_env_init(&env, LCT_LOCAL | LCT_MG_THREAD);
if (rc)
- RETURN(rc);
+ return rc;
rc = lu_context_init(&test_session, LCT_SESSION);
if (rc)
@@ -1056,7 +1037,7 @@ cleanup_session:
lu_context_fini(&test_session);
cleanup_env:
lu_env_fini(&env);
- RETURN(rc);
+ return rc;
}
static struct obd_ops llog_obd_ops = {
diff --git a/drivers/staging/lustre/lustre/obdclass/local_storage.c b/drivers/staging/lustre/lustre/obdclass/local_storage.c
index 3be35a83a49..cc19fbab020 100644
--- a/drivers/staging/lustre/lustre/obdclass/local_storage.c
+++ b/drivers/staging/lustre/lustre/obdclass/local_storage.c
@@ -45,17 +45,15 @@ static int ls_object_init(const struct lu_env *env, struct lu_object *o,
struct lu_object *below;
struct lu_device *under;
- ENTRY;
-
ls = container_of0(o->lo_dev, struct ls_device, ls_top_dev.dd_lu_dev);
under = &ls->ls_osd->dd_lu_dev;
below = under->ld_ops->ldo_object_alloc(env, o->lo_header, under);
if (below == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lu_object_add(o, below);
- RETURN(0);
+ return 0;
}
static void ls_object_free(const struct lu_env *env, struct lu_object *o)
@@ -143,8 +141,6 @@ struct ls_device *ls_device_get(struct dt_device *dev)
{
struct ls_device *ls;
- ENTRY;
-
mutex_lock(&ls_list_mutex);
ls = __ls_find_dev(dev);
if (ls)
@@ -170,7 +166,7 @@ struct ls_device *ls_device_get(struct dt_device *dev)
list_add(&ls->ls_linkage, &ls_list_head);
out_ls:
mutex_unlock(&ls_list_mutex);
- RETURN(ls);
+ return ls;
}
void ls_device_put(const struct lu_env *env, struct ls_device *ls)
@@ -224,26 +220,24 @@ int local_object_declare_create(const struct lu_env *env,
struct dt_thread_info *dti = dt_info(env);
int rc;
- ENTRY;
-
/* update fid generation file */
if (los != NULL) {
LASSERT(dt_object_exists(los->los_obj));
rc = dt_declare_record_write(env, los->los_obj,
sizeof(struct los_ondisk), 0, th);
if (rc)
- RETURN(rc);
+ return rc;
}
rc = dt_declare_create(env, o, attr, NULL, dof, th);
if (rc)
- RETURN(rc);
+ return rc;
dti->dti_lb.lb_buf = NULL;
dti->dti_lb.lb_len = sizeof(dti->dti_lma);
rc = dt_declare_xattr_set(env, o, &dti->dti_lb, XATTR_NAME_LMA, 0, th);
- RETURN(rc);
+ return rc;
}
int local_object_create(const struct lu_env *env,
@@ -255,14 +249,12 @@ int local_object_create(const struct lu_env *env,
obd_id lastid;
int rc;
- ENTRY;
-
rc = dt_create(env, o, attr, NULL, dof, th);
if (rc)
- RETURN(rc);
+ return rc;
if (los == NULL)
- RETURN(rc);
+ return rc;
LASSERT(los->los_obj);
LASSERT(dt_object_exists(los->los_obj));
@@ -283,7 +275,7 @@ int local_object_create(const struct lu_env *env,
th);
mutex_unlock(&los->los_id_lock);
- RETURN(rc);
+ return rc;
}
/*
@@ -304,7 +296,7 @@ struct dt_object *__local_file_create(const struct lu_env *env,
dto = ls_locate(env, ls, fid);
if (unlikely(IS_ERR(dto)))
- RETURN(dto);
+ return dto;
LASSERT(dto != NULL);
if (dt_object_exists(dto))
@@ -377,7 +369,7 @@ out:
lu_object_put_nocache(env, &dto->do_lu);
dto = ERR_PTR(rc);
}
- RETURN(dto);
+ return dto;
}
/*
@@ -443,7 +435,7 @@ struct dt_object *local_file_find_or_create_with_fid(const struct lu_env *env,
ls = ls_device_get(dt);
if (IS_ERR(ls)) {
- dto = ERR_PTR(PTR_ERR(ls));
+ dto = ERR_CAST(ls);
} else {
/* create the object */
dti->dti_attr.la_valid = LA_MODE;
@@ -537,7 +529,7 @@ local_index_find_or_create_with_fid(const struct lu_env *env,
ls = ls_device_get(dt);
if (IS_ERR(ls)) {
- dto = ERR_PTR(PTR_ERR(ls));
+ dto = ERR_CAST(ls);
} else {
/* create the object */
dti->dti_attr.la_valid = LA_MODE;
@@ -588,17 +580,15 @@ int local_object_unlink(const struct lu_env *env, struct dt_device *dt,
struct thandle *th;
int rc;
- ENTRY;
-
rc = dt_lookup_dir(env, parent, name, &dti->dti_fid);
if (rc == -ENOENT)
- RETURN(0);
+ return 0;
else if (rc < 0)
- RETURN(rc);
+ return rc;
dto = dt_locate(env, dt, &dti->dti_fid);
if (unlikely(IS_ERR(dto)))
- RETURN(PTR_ERR(dto));
+ return PTR_ERR(dto);
th = dt_trans_create(env, dt);
if (IS_ERR(th))
@@ -761,11 +751,9 @@ int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
__u32 first_oid = fid_oid(first_fid);
int rc = 0;
- ENTRY;
-
ls = ls_device_get(dev);
if (IS_ERR(ls))
- RETURN(PTR_ERR(ls));
+ return PTR_ERR(ls);
mutex_lock(&ls->ls_los_mutex);
*los = dt_los_find(ls, fid_seq(first_fid));
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_jobstats.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_jobstats.c
deleted file mode 100644
index e2d57fef0da..00000000000
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_jobstats.c
+++ /dev/null
@@ -1,562 +0,0 @@
-/* GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2011, 2012, Intel Corporation.
- * Use is subject to license terms.
- *
- * Author: Niu Yawei <niu@whamcloud.com>
- */
-/*
- * lustre/obdclass/lprocfs_jobstats.c
- */
-
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-#define DEBUG_SUBSYSTEM S_CLASS
-
-
-#include <obd_class.h>
-#include <lprocfs_status.h>
-#include <lustre/lustre_idl.h>
-
-#if defined(LPROCFS)
-
-/*
- * JobID formats & JobID environment variable names for supported
- * job schedulers:
- *
- * SLURM:
- * JobID format: 32 bit integer.
- * JobID env var: SLURM_JOB_ID.
- * SGE:
- * JobID format: Decimal integer range to 99999.
- * JobID env var: JOB_ID.
- * LSF:
- * JobID format: 6 digit integer by default (up to 999999), can be
- * increased to 10 digit (up to 2147483646).
- * JobID env var: LSB_JOBID.
- * Loadleveler:
- * JobID format: String of machine_name.cluster_id.process_id, for
- * example: fr2n02.32.0
- * JobID env var: LOADL_STEP_ID.
- * PBS:
- * JobID format: String of sequence_number[.server_name][@server].
- * JobID env var: PBS_JOBID.
- * Maui/MOAB:
- * JobID format: Same as PBS.
- * JobID env var: Same as PBS.
- */
-
-struct job_stat {
- struct hlist_node js_hash;
- struct list_head js_list;
- atomic_t js_refcount;
- char js_jobid[JOBSTATS_JOBID_SIZE];
- time_t js_timestamp; /* seconds */
- struct lprocfs_stats *js_stats;
- struct obd_job_stats *js_jobstats;
-};
-
-static unsigned job_stat_hash(cfs_hash_t *hs, const void *key, unsigned mask)
-{
- return cfs_hash_djb2_hash(key, strlen(key), mask);
-}
-
-static void *job_stat_key(struct hlist_node *hnode)
-{
- struct job_stat *job;
- job = hlist_entry(hnode, struct job_stat, js_hash);
- return job->js_jobid;
-}
-
-static int job_stat_keycmp(const void *key, struct hlist_node *hnode)
-{
- struct job_stat *job;
- job = hlist_entry(hnode, struct job_stat, js_hash);
- return (strlen(job->js_jobid) == strlen(key)) &&
- !strncmp(job->js_jobid, key, strlen(key));
-}
-
-static void *job_stat_object(struct hlist_node *hnode)
-{
- return hlist_entry(hnode, struct job_stat, js_hash);
-}
-
-static void job_stat_get(cfs_hash_t *hs, struct hlist_node *hnode)
-{
- struct job_stat *job;
- job = hlist_entry(hnode, struct job_stat, js_hash);
- atomic_inc(&job->js_refcount);
-}
-
-static void job_free(struct job_stat *job)
-{
- LASSERT(atomic_read(&job->js_refcount) == 0);
- LASSERT(job->js_jobstats);
-
- write_lock(&job->js_jobstats->ojs_lock);
- list_del_init(&job->js_list);
- write_unlock(&job->js_jobstats->ojs_lock);
-
- lprocfs_free_stats(&job->js_stats);
- OBD_FREE_PTR(job);
-}
-
-static void job_putref(struct job_stat *job)
-{
- LASSERT(atomic_read(&job->js_refcount) > 0);
- if (atomic_dec_and_test(&job->js_refcount))
- job_free(job);
-}
-
-static void job_stat_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
-{
- struct job_stat *job;
- job = hlist_entry(hnode, struct job_stat, js_hash);
- job_putref(job);
-}
-
-static void job_stat_exit(cfs_hash_t *hs, struct hlist_node *hnode)
-{
- CERROR("Should not have any items!");
-}
-
-static cfs_hash_ops_t job_stats_hash_ops = {
- .hs_hash = job_stat_hash,
- .hs_key = job_stat_key,
- .hs_keycmp = job_stat_keycmp,
- .hs_object = job_stat_object,
- .hs_get = job_stat_get,
- .hs_put_locked = job_stat_put_locked,
- .hs_exit = job_stat_exit,
-};
-
-static int job_iter_callback(cfs_hash_t *hs, cfs_hash_bd_t *bd,
- struct hlist_node *hnode, void *data)
-{
- time_t oldest = *((time_t *)data);
- struct job_stat *job;
-
- job = hlist_entry(hnode, struct job_stat, js_hash);
- if (!oldest || job->js_timestamp < oldest)
- cfs_hash_bd_del_locked(hs, bd, hnode);
-
- return 0;
-}
-
-static void lprocfs_job_cleanup(struct obd_job_stats *stats, bool force)
-{
- time_t oldest, now;
-
- if (stats->ojs_cleanup_interval == 0)
- return;
-
- now = cfs_time_current_sec();
- if (!force && now < stats->ojs_last_cleanup +
- stats->ojs_cleanup_interval)
- return;
-
- oldest = now - stats->ojs_cleanup_interval;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
- &oldest);
- stats->ojs_last_cleanup = cfs_time_current_sec();
-}
-
-static struct job_stat *job_alloc(char *jobid, struct obd_job_stats *jobs)
-{
- struct job_stat *job;
-
- LASSERT(jobs->ojs_cntr_num && jobs->ojs_cntr_init_fn);
-
- OBD_ALLOC_PTR(job);
- if (job == NULL)
- return NULL;
-
- job->js_stats = lprocfs_alloc_stats(jobs->ojs_cntr_num, 0);
- if (job->js_stats == NULL) {
- OBD_FREE_PTR(job);
- return NULL;
- }
-
- jobs->ojs_cntr_init_fn(job->js_stats);
-
- memcpy(job->js_jobid, jobid, JOBSTATS_JOBID_SIZE);
- job->js_timestamp = cfs_time_current_sec();
- job->js_jobstats = jobs;
- INIT_HLIST_NODE(&job->js_hash);
- INIT_LIST_HEAD(&job->js_list);
- atomic_set(&job->js_refcount, 1);
-
- return job;
-}
-
-int lprocfs_job_stats_log(struct obd_device *obd, char *jobid,
- int event, long amount)
-{
- struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
- struct job_stat *job, *job2;
- ENTRY;
-
- LASSERT(stats && stats->ojs_hash);
-
- lprocfs_job_cleanup(stats, false);
-
- if (!jobid || !strlen(jobid))
- RETURN(-EINVAL);
-
- if (strlen(jobid) >= JOBSTATS_JOBID_SIZE) {
- CERROR("Invalid jobid size (%lu), expect(%d)\n",
- (unsigned long)strlen(jobid) + 1, JOBSTATS_JOBID_SIZE);
- RETURN(-EINVAL);
- }
-
- job = cfs_hash_lookup(stats->ojs_hash, jobid);
- if (job)
- goto found;
-
- job = job_alloc(jobid, stats);
- if (job == NULL)
- RETURN(-ENOMEM);
-
- job2 = cfs_hash_findadd_unique(stats->ojs_hash, job->js_jobid,
- &job->js_hash);
- if (job2 != job) {
- job_putref(job);
- job = job2;
- /* We cannot LASSERT(!list_empty(&job->js_list)) here,
- * since we just lost the race for inserting "job" into the
- * ojs_list, and some other thread is doing it _right_now_.
- * Instead, be content the other thread is doing this, since
- * "job2" was initialized in job_alloc() already. LU-2163 */
- } else {
- LASSERT(list_empty(&job->js_list));
- write_lock(&stats->ojs_lock);
- list_add_tail(&job->js_list, &stats->ojs_list);
- write_unlock(&stats->ojs_lock);
- }
-
-found:
- LASSERT(stats == job->js_jobstats);
- LASSERT(stats->ojs_cntr_num > event);
- job->js_timestamp = cfs_time_current_sec();
- lprocfs_counter_add(job->js_stats, event, amount);
-
- job_putref(job);
- RETURN(0);
-}
-EXPORT_SYMBOL(lprocfs_job_stats_log);
-
-void lprocfs_job_stats_fini(struct obd_device *obd)
-{
- struct obd_job_stats *stats = &obd->u.obt.obt_jobstats;
- time_t oldest = 0;
-
- if (stats->ojs_hash == NULL)
- return;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback, &oldest);
- cfs_hash_putref(stats->ojs_hash);
- stats->ojs_hash = NULL;
- LASSERT(list_empty(&stats->ojs_list));
-}
-EXPORT_SYMBOL(lprocfs_job_stats_fini);
-
-static void *lprocfs_jobstats_seq_start(struct seq_file *p, loff_t *pos)
-{
- struct obd_job_stats *stats = p->private;
- loff_t off = *pos;
- struct job_stat *job;
-
- read_lock(&stats->ojs_lock);
- if (off == 0)
- return SEQ_START_TOKEN;
- off--;
- list_for_each_entry(job, &stats->ojs_list, js_list) {
- if (!off--)
- return job;
- }
- return NULL;
-}
-
-static void lprocfs_jobstats_seq_stop(struct seq_file *p, void *v)
-{
- struct obd_job_stats *stats = p->private;
-
- read_unlock(&stats->ojs_lock);
-}
-
-static void *lprocfs_jobstats_seq_next(struct seq_file *p, void *v, loff_t *pos)
-{
- struct obd_job_stats *stats = p->private;
- struct job_stat *job;
- struct list_head *next;
-
- ++*pos;
- if (v == SEQ_START_TOKEN) {
- next = stats->ojs_list.next;
- } else {
- job = (struct job_stat *)v;
- next = job->js_list.next;
- }
-
- return next == &stats->ojs_list ? NULL :
- list_entry(next, struct job_stat, js_list);
-}
-
-/*
- * Example of output on MDT:
- *
- * job_stats:
- * - job_id: test_id.222.25844
- * snapshot_time: 1322494486
- * open: { samples: 3, unit: reqs }
- * close: { samples: 3, unit: reqs }
- * mknod: { samples: 0, unit: reqs }
- * link: { samples: 0, unit: reqs }
- * unlink: { samples: 0, unit: reqs }
- * mkdir: { samples: 0, unit: reqs }
- * rmdir: { samples: 0, unit: reqs }
- * rename: { samples: 1, unit: reqs }
- * getattr: { samples: 7, unit: reqs }
- * setattr: { samples: 0, unit: reqs }
- * getxattr: { samples: 0, unit: reqs }
- * setxattr: { samples: 0, unit: reqs }
- * statfs: { samples: 0, unit: reqs }
- * sync: { samples: 0, unit: reqs }
- *
- * Example of output on OST:
- *
- * job_stats:
- * - job_id 4854
- * snapshot_time: 1322494602
- * read: { samples: 0, unit: bytes, min: 0, max: 0, sum: 0 }
- * write: { samples: 1, unit: bytes, min: 10, max: 10, sum: 10 }
- * setattr: { samples: 0, unit: reqs }
- * punch: { samples: 0, unit: reqs }
- * sync: { samples: 0, unit: reqs }
- */
-
-static const char spaces[] = " ";
-
-static int inline width(const char *str, int len)
-{
- return len - min((int)strlen(str), 15);
-}
-
-static int lprocfs_jobstats_seq_show(struct seq_file *p, void *v)
-{
- struct job_stat *job = v;
- struct lprocfs_stats *s;
- struct lprocfs_counter ret;
- struct lprocfs_counter *cntr;
- struct lprocfs_counter_header *cntr_header;
- int i;
-
- if (v == SEQ_START_TOKEN) {
- seq_printf(p, "job_stats:\n");
- return 0;
- }
-
- seq_printf(p, "- %-16s %s\n", "job_id:", job->js_jobid);
- seq_printf(p, " %-16s %ld\n", "snapshot_time:", job->js_timestamp);
-
- s = job->js_stats;
- for (i = 0; i < s->ls_num; i++) {
- cntr = lprocfs_stats_counter_get(s, 0, i);
- cntr_header = &s->ls_cnt_header[i];
- lprocfs_stats_collect(s, i, &ret);
-
- seq_printf(p, " %s:%.*s { samples: %11"LPF64"u",
- cntr_header->lc_name,
- width(cntr_header->lc_name, 15), spaces,
- ret.lc_count);
- if (cntr_header->lc_units[0] != '\0')
- seq_printf(p, ", unit: %5s", cntr_header->lc_units);
-
- if (cntr_header->lc_config & LPROCFS_CNTR_AVGMINMAX) {
- seq_printf(p, ", min:%8"LPF64"u, max:%8"LPF64"u,"
- " sum:%16"LPF64"u",
- ret.lc_count ? ret.lc_min : 0,
- ret.lc_count ? ret.lc_max : 0,
- ret.lc_count ? ret.lc_sum : 0);
- }
- if (cntr_header->lc_config & LPROCFS_CNTR_STDDEV) {
- seq_printf(p, ", sumsq: %18"LPF64"u",
- ret.lc_count ? ret.lc_sumsquare : 0);
- }
-
- seq_printf(p, " }\n");
-
- }
- return 0;
-}
-
-struct seq_operations lprocfs_jobstats_seq_sops = {
- start: lprocfs_jobstats_seq_start,
- stop: lprocfs_jobstats_seq_stop,
- next: lprocfs_jobstats_seq_next,
- show: lprocfs_jobstats_seq_show,
-};
-
-static int lprocfs_jobstats_seq_open(struct inode *inode, struct file *file)
-{
- struct seq_file *seq;
- int rc;
-
- rc = seq_open(file, &lprocfs_jobstats_seq_sops);
- if (rc)
- return rc;
- seq = file->private_data;
- seq->private = PDE_DATA(inode);
- return 0;
-}
-
-static ssize_t lprocfs_jobstats_seq_write(struct file *file, const char *buf,
- size_t len, loff_t *off)
-{
- struct seq_file *seq = file->private_data;
- struct obd_job_stats *stats = seq->private;
- char jobid[JOBSTATS_JOBID_SIZE];
- int all = 0;
- struct job_stat *job;
-
- if (!memcmp(buf, "clear", strlen("clear"))) {
- all = 1;
- } else if (len < JOBSTATS_JOBID_SIZE) {
- memset(jobid, 0, JOBSTATS_JOBID_SIZE);
- /* Trim '\n' if any */
- if (buf[len - 1] == '\n')
- memcpy(jobid, buf, len - 1);
- else
- memcpy(jobid, buf, len);
- } else {
- return -EINVAL;
- }
-
- LASSERT(stats->ojs_hash);
- if (all) {
- time_t oldest = 0;
- cfs_hash_for_each_safe(stats->ojs_hash, job_iter_callback,
- &oldest);
- return len;
- }
-
- if (!strlen(jobid))
- return -EINVAL;
-
- job = cfs_hash_lookup(stats->ojs_hash, jobid);
- if (!job)
- return -EINVAL;
-
- cfs_hash_del_key(stats->ojs_hash, jobid);
-
- job_putref(job);
- return len;
-}
-
-struct file_operations lprocfs_jobstats_seq_fops = {
- .owner = THIS_MODULE,
- .open = lprocfs_jobstats_seq_open,
- .read = seq_read,
- .write = lprocfs_jobstats_seq_write,
- .llseek = seq_lseek,
- .release = lprocfs_seq_release,
-};
-
-int lprocfs_job_stats_init(struct obd_device *obd, int cntr_num,
- cntr_init_callback init_fn)
-{
- struct proc_dir_entry *entry;
- struct obd_job_stats *stats;
- ENTRY;
-
- LASSERT(obd->obd_proc_entry != NULL);
- LASSERT(obd->obd_type->typ_name);
-
- if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) &&
- strcmp(obd->obd_type->typ_name, LUSTRE_OST_NAME)) {
- CERROR("Invalid obd device type.\n");
- RETURN(-EINVAL);
- }
- stats = &obd->u.obt.obt_jobstats;
-
- LASSERT(stats->ojs_hash == NULL);
- stats->ojs_hash = cfs_hash_create("JOB_STATS",
- HASH_JOB_STATS_CUR_BITS,
- HASH_JOB_STATS_MAX_BITS,
- HASH_JOB_STATS_BKT_BITS, 0,
- CFS_HASH_MIN_THETA,
- CFS_HASH_MAX_THETA,
- &job_stats_hash_ops,
- CFS_HASH_DEFAULT);
- if (stats->ojs_hash == NULL)
- RETURN(-ENOMEM);
-
- INIT_LIST_HEAD(&stats->ojs_list);
- rwlock_init(&stats->ojs_lock);
- stats->ojs_cntr_num = cntr_num;
- stats->ojs_cntr_init_fn = init_fn;
- stats->ojs_cleanup_interval = 600; /* 10 mins by default */
- stats->ojs_last_cleanup = cfs_time_current_sec();
-
- entry = proc_create_data("job_stats", 0644, obd->obd_proc_entry,
- &lprocfs_jobstats_seq_fops, stats);
- if (entry)
- RETURN(0);
- else
- RETURN(-ENOMEM);
-}
-EXPORT_SYMBOL(lprocfs_job_stats_init);
-
-int lprocfs_rd_job_interval(struct seq_file *m, void *data)
-{
- struct obd_device *obd = (struct obd_device *)data;
- struct obd_job_stats *stats;
-
- LASSERT(obd != NULL);
- stats = &obd->u.obt.obt_jobstats;
- return seq_printf(m, "%d\n", stats->ojs_cleanup_interval);
-}
-EXPORT_SYMBOL(lprocfs_rd_job_interval);
-
-int lprocfs_wr_job_interval(struct file *file, const char *buffer,
- unsigned long count, void *data)
-{
- struct obd_device *obd = (struct obd_device *)data;
- struct obd_job_stats *stats;
- int val, rc;
-
- LASSERT(obd != NULL);
- stats = &obd->u.obt.obt_jobstats;
-
- rc = lprocfs_write_helper(buffer, count, &val);
- if (rc)
- return rc;
-
- stats->ojs_cleanup_interval = val;
- lprocfs_job_cleanup(stats, true);
-
- return count;
-
-}
-EXPORT_SYMBOL(lprocfs_wr_job_interval);
-
-#endif /* LPROCFS*/
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index f7af3d6a4ef..a95f60a4f90 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -68,11 +68,11 @@ EXPORT_SYMBOL(lprocfs_seq_release);
/* lprocfs API calls */
-proc_dir_entry_t *lprocfs_add_simple(struct proc_dir_entry *root,
+struct proc_dir_entry *lprocfs_add_simple(struct proc_dir_entry *root,
char *name, void *data,
struct file_operations *fops)
{
- proc_dir_entry_t *proc;
+ struct proc_dir_entry *proc;
umode_t mode = 0;
if (root == NULL || name == NULL || fops == NULL)
@@ -179,17 +179,21 @@ struct proc_dir_entry *lprocfs_register(const char *name,
struct proc_dir_entry *parent,
struct lprocfs_vars *list, void *data)
{
- struct proc_dir_entry *newchild;
+ struct proc_dir_entry *entry;
+
+ entry = proc_mkdir(name, parent);
+ if (entry == NULL)
+ GOTO(out, entry = ERR_PTR(-ENOMEM));
- newchild = proc_mkdir(name, parent);
- if (newchild != NULL && list != NULL) {
- int rc = lprocfs_add_vars(newchild, list, data);
- if (rc) {
- lprocfs_remove(&newchild);
- return ERR_PTR(rc);
+ if (list != NULL) {
+ int rc = lprocfs_add_vars(entry, list, data);
+ if (rc != 0) {
+ lprocfs_remove(&entry);
+ entry = ERR_PTR(rc);
}
}
- return newchild;
+out:
+ return entry;
}
EXPORT_SYMBOL(lprocfs_register);
@@ -896,7 +900,6 @@ void lprocfs_free_per_client_stats(struct obd_device *obd)
{
cfs_hash_t *hash = obd->obd_nid_stats_hash;
struct nid_stat *stat;
- ENTRY;
/* we need extra list - because hash_exit called to early */
/* not need locking because all clients is died */
@@ -907,7 +910,6 @@ void lprocfs_free_per_client_stats(struct obd_device *obd)
cfs_hash_del(hash, &stat->nid, &stat->nid_hash);
lprocfs_free_client_stats(stat);
}
- EXIT;
}
EXPORT_SYMBOL(lprocfs_free_per_client_stats);
@@ -1494,7 +1496,6 @@ EXPORT_SYMBOL(lprocfs_nid_stats_clear_read);
static int lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
{
struct nid_stat *stat = obj;
- ENTRY;
CDEBUG(D_INFO,"refcnt %d\n", atomic_read(&stat->nid_exp_ref_count));
if (atomic_read(&stat->nid_exp_ref_count) == 1) {
@@ -1502,13 +1503,13 @@ static int lprocfs_nid_stats_clear_write_cb(void *obj, void *data)
spin_lock(&stat->nid_obd->obd_nid_lock);
list_move(&stat->nid_list, data);
spin_unlock(&stat->nid_obd->obd_nid_lock);
- RETURN(1);
+ return 1;
}
/* we has reference to object - only clear data*/
if (stat->nid_stats)
lprocfs_clear_stats(stat->nid_stats);
- RETURN(0);
+ return 0;
}
int lprocfs_nid_stats_clear_write(struct file *file, const char *buffer,
@@ -1536,22 +1537,21 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
{
struct nid_stat *new_stat, *old_stat;
struct obd_device *obd = NULL;
- proc_dir_entry_t *entry;
+ struct proc_dir_entry *entry;
char *buffer = NULL;
int rc = 0;
- ENTRY;
*newnid = 0;
if (!exp || !exp->exp_obd || !exp->exp_obd->obd_proc_exports_entry ||
!exp->exp_obd->obd_nid_stats_hash)
- RETURN(-EINVAL);
+ return -EINVAL;
/* not test against zero because eric say:
* You may only test nid against another nid, or LNET_NID_ANY.
* Anything else is nonsense.*/
if (!nid || *nid == LNET_NID_ANY)
- RETURN(0);
+ return 0;
obd = exp->exp_obd;
@@ -1559,7 +1559,7 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
OBD_ALLOC_PTR(new_stat);
if (new_stat == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
new_stat->nid = *nid;
new_stat->nid_obd = exp->exp_obd;
@@ -1596,10 +1596,12 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
NULL, NULL);
OBD_FREE(buffer, LNET_NIDSTR_SIZE);
- if (new_stat->nid_proc == NULL) {
+ if (IS_ERR(new_stat->nid_proc)) {
CERROR("Error making export directory for nid %s\n",
libcfs_nid2str(*nid));
- GOTO(destroy_new_ns, rc = -ENOMEM);
+ rc = PTR_ERR(new_stat->nid_proc);
+ new_stat->nid_proc = NULL;
+ GOTO(destroy_new_ns, rc);
}
entry = lprocfs_add_simple(new_stat->nid_proc, "uuid",
@@ -1625,7 +1627,7 @@ int lprocfs_exp_setup(struct obd_export *exp, lnet_nid_t *nid, int *newnid)
list_add(&new_stat->nid_list, &obd->obd_nid_stats);
spin_unlock(&obd->obd_nid_lock);
- RETURN(rc);
+ return rc;
destroy_new_ns:
if (new_stat->nid_proc != NULL)
@@ -1635,7 +1637,7 @@ destroy_new_ns:
destroy_new:
nidstat_putref(new_stat);
OBD_FREE_PTR(new_stat);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lprocfs_exp_setup);
@@ -1644,7 +1646,7 @@ int lprocfs_exp_cleanup(struct obd_export *exp)
struct nid_stat *stat = exp->exp_nid_stats;
if(!stat || !exp->exp_obd)
- RETURN(0);
+ return 0;
nidstat_putref(exp->exp_nid_stats);
exp->exp_nid_stats = NULL;
@@ -1873,7 +1875,7 @@ static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len)
* If \a name is not found the original \a buffer is returned.
*/
char *lprocfs_find_named_value(const char *buffer, const char *name,
- unsigned long *count)
+ size_t *count)
{
char *val;
size_t buflen = *count;
@@ -1897,23 +1899,22 @@ char *lprocfs_find_named_value(const char *buffer, const char *name,
}
EXPORT_SYMBOL(lprocfs_find_named_value);
-int lprocfs_seq_create(proc_dir_entry_t *parent,
+int lprocfs_seq_create(struct proc_dir_entry *parent,
const char *name,
umode_t mode,
const struct file_operations *seq_fops,
void *data)
{
struct proc_dir_entry *entry;
- ENTRY;
/* Disallow secretly (un)writable entries. */
LASSERT((seq_fops->write == NULL) == ((mode & 0222) == 0));
entry = proc_create_data(name, mode, parent, seq_fops, data);
if (entry == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(lprocfs_seq_create);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index fdf0ed36769..c29ac1c2def 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -202,7 +202,6 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
struct list_head *layers;
int clean;
int result;
- ENTRY;
/*
* Create top-level object slice. This will also create
@@ -210,9 +209,9 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
*/
top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
if (top == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
if (IS_ERR(top))
- RETURN(top);
+ return top;
/*
* This is the only place where object fid is assigned. It's constant
* after this point.
@@ -233,7 +232,7 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
result = scan->lo_ops->loo_object_init(env, scan, conf);
if (result != 0) {
lu_object_free(env, top);
- RETURN(ERR_PTR(result));
+ return ERR_PTR(result);
}
scan->lo_flags |= LU_OBJECT_ALLOCATED;
}
@@ -244,13 +243,13 @@ static struct lu_object *lu_object_alloc(const struct lu_env *env,
result = scan->lo_ops->loo_object_start(env, scan);
if (result != 0) {
lu_object_free(env, top);
- RETURN(ERR_PTR(result));
+ return ERR_PTR(result);
}
}
}
lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
- RETURN(top);
+ return top;
}
/**
@@ -317,7 +316,7 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
int i;
if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
- RETURN(0);
+ return 0;
INIT_LIST_HEAD(&dispose);
/*
@@ -538,7 +537,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
__u64 ver = cfs_hash_bd_version_get(bd);
if (*version == ver)
- return NULL;
+ return ERR_PTR(-ENOENT);
*version = ver;
bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
@@ -547,7 +546,7 @@ static struct lu_object *htable_lookup(struct lu_site *s,
hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
if (hnode == NULL) {
lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
h = container_of0(hnode, struct lu_object_header, loh_hash);
@@ -651,7 +650,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
o = htable_lookup(s, &bd, f, waiter, &version);
cfs_hash_bd_unlock(hs, &bd, 1);
- if (o != NULL)
+ if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
return o;
/*
@@ -667,7 +666,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
cfs_hash_bd_lock(hs, &bd, 1);
shadow = htable_lookup(s, &bd, f, waiter, &version);
- if (likely(shadow == NULL)) {
+ if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
struct lu_site_bkt_data *bkt;
bkt = cfs_hash_bd_extra_get(hs, &bd);
@@ -849,7 +848,7 @@ static int lu_htable_order(void)
*
* Size of lu_object is (arbitrary) taken as 1K (together with inode).
*/
- cache_size = num_physpages;
+ cache_size = totalram_pages;
#if BITS_PER_LONG == 32
/* limit hashtable size for lowmem systems to low RAM */
@@ -980,7 +979,6 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
char name[16];
int bits;
int i;
- ENTRY;
memset(s, 0, sizeof *s);
bits = lu_htable_order();
@@ -1041,7 +1039,7 @@ int lu_site_init(struct lu_site *s, struct lu_device *top)
lu_dev_add_linkage(s, top);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(lu_site_init);
@@ -1147,15 +1145,16 @@ EXPORT_SYMBOL(lu_device_fini);
* Initialize object \a o that is part of compound object \a h and was created
* by device \a d.
*/
-int lu_object_init(struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d)
+int lu_object_init(struct lu_object *o, struct lu_object_header *h,
+ struct lu_device *d)
{
- memset(o, 0, sizeof *o);
+ memset(o, 0, sizeof(*o));
o->lo_header = h;
- o->lo_dev = d;
+ o->lo_dev = d;
lu_device_get(d);
- o->lo_dev_ref = lu_ref_add(&d->ld_reference, "lu_object", o);
+ lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
INIT_LIST_HEAD(&o->lo_linkage);
+
return 0;
}
EXPORT_SYMBOL(lu_object_init);
@@ -1170,8 +1169,8 @@ void lu_object_fini(struct lu_object *o)
LASSERT(list_empty(&o->lo_linkage));
if (dev != NULL) {
- lu_ref_del_at(&dev->ld_reference,
- o->lo_dev_ref , "lu_object", o);
+ lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
+ "lu_object", o);
lu_device_put(dev);
o->lo_dev = NULL;
}
@@ -1315,7 +1314,6 @@ int lu_context_key_register(struct lu_context_key *key)
LASSERT(key->lct_init != NULL);
LASSERT(key->lct_fini != NULL);
LASSERT(key->lct_tags != 0);
- LASSERT(key->lct_owner != NULL);
result = -ENFILE;
spin_lock(&lu_keys_guard);
@@ -1349,7 +1347,6 @@ static void key_fini(struct lu_context *ctx, int index)
lu_ref_del(&key->lct_reference, "ctx", ctx);
atomic_dec(&key->lct_used);
- LASSERT(key->lct_owner != NULL);
if ((ctx->lc_tags & LCT_NOREF) == 0) {
#ifdef CONFIG_MODULE_UNLOAD
LINVRNT(module_refcount(key->lct_owner) > 0);
@@ -1557,7 +1554,6 @@ static int keys_fill(struct lu_context *ctx)
if (unlikely(IS_ERR(value)))
return PTR_ERR(value);
- LASSERT(key->lct_owner != NULL);
if (!(ctx->lc_tags & LCT_NOREF))
try_module_get(key->lct_owner);
lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
@@ -2079,7 +2075,7 @@ void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
shadow = htable_lookup(s, &bd, fid, &waiter, &version);
/* supposed to be unique */
- LASSERT(shadow == NULL);
+ LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
*old = *fid;
bkt = cfs_hash_bd_extra_get(hs, &bd);
cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_ucred.c b/drivers/staging/lustre/lustre/obdclass/lu_ucred.c
index 229db6c39b7..e23e545b0d6 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_ucred.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_ucred.c
@@ -33,13 +33,13 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * lustre/obdclass/lu_object.c
+ * lustre/obdclass/lu_ucred.c
*
- * Lustre Object.
- * These are the only exported functions, they provide some generic
- * infrastructure for managing object devices
+ * Lustre user credentials context infrastructure.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Fan Yong <fan.yong@intel.com>
+ * Author: Vitaly Fertman <vitaly_fertman@xyratex.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
index 69d6499ef73..be31d32b82c 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_handles.c
@@ -65,7 +65,6 @@ void class_handle_hash(struct portals_handle *h,
struct portals_handle_ops *ops)
{
struct handle_bucket *bucket;
- ENTRY;
LASSERT(h != NULL);
LASSERT(list_empty(&h->h_link));
@@ -100,7 +99,6 @@ void class_handle_hash(struct portals_handle *h,
CDEBUG(D_INFO, "added object %p with handle "LPX64" to hash\n",
h, h->h_cookie);
- EXIT;
}
EXPORT_SYMBOL(class_handle_hash);
@@ -139,7 +137,6 @@ EXPORT_SYMBOL(class_handle_unhash);
void class_handle_hash_back(struct portals_handle *h)
{
struct handle_bucket *bucket;
- ENTRY;
bucket = handle_hash + (h->h_cookie & HANDLE_HASH_MASK);
@@ -147,8 +144,6 @@ void class_handle_hash_back(struct portals_handle *h)
list_add_rcu(&h->h_link, &bucket->head);
h->h_in = 1;
spin_unlock(&bucket->lock);
-
- EXIT;
}
EXPORT_SYMBOL(class_handle_hash_back);
@@ -157,7 +152,6 @@ void *class_handle2object(__u64 cookie)
struct handle_bucket *bucket;
struct portals_handle *h;
void *retval = NULL;
- ENTRY;
LASSERT(handle_hash != NULL);
@@ -180,7 +174,7 @@ void *class_handle2object(__u64 cookie)
}
rcu_read_unlock();
- RETURN(retval);
+ return retval;
}
EXPORT_SYMBOL(class_handle2object);
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 2fa2589dc8e..df4936ad237 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -191,7 +191,6 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
{
struct uuid_nid_data *entry;
int found = 0;
- ENTRY;
CDEBUG(D_INFO, "check if uuid %s has %s.\n",
obd_uuid2str(uuid), libcfs_nid2str(nid));
@@ -213,6 +212,6 @@ int class_check_uuid(struct obd_uuid *uuid, __u64 nid)
break;
}
spin_unlock(&g_uuid_lock);
- RETURN(found);
+ return found;
}
EXPORT_SYMBOL(class_check_uuid);
diff --git a/drivers/staging/lustre/lustre/obdclass/md_attrs.c b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
index b71344a04c7..f7187829e27 100644
--- a/drivers/staging/lustre/lustre/obdclass/md_attrs.c
+++ b/drivers/staging/lustre/lustre/obdclass/md_attrs.c
@@ -99,19 +99,18 @@ EXPORT_SYMBOL(lustre_som_swab);
int lustre_buf2som(void *buf, int rc, struct md_som_data *msd)
{
struct som_attrs *attrs = (struct som_attrs *)buf;
- ENTRY;
if (rc == 0 || rc == -ENODATA)
/* no SOM attributes */
- RETURN(-ENODATA);
+ return -ENODATA;
if (rc < 0)
/* error hit while fetching xattr */
- RETURN(rc);
+ return rc;
/* check SOM compatibility */
if (attrs->som_incompat & ~cpu_to_le32(SOM_INCOMPAT_SUPP))
- RETURN(-ENODATA);
+ return -ENODATA;
/* unpack SOM attributes */
lustre_som_swab(attrs);
@@ -124,7 +123,7 @@ int lustre_buf2som(void *buf, int rc, struct md_som_data *msd)
msd->msd_blocks = attrs->som_blocks;
msd->msd_mountid = attrs->som_mountid;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(lustre_buf2som);
@@ -156,15 +155,14 @@ EXPORT_SYMBOL(lustre_hsm_swab);
int lustre_buf2hsm(void *buf, int rc, struct md_hsm *mh)
{
struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
- ENTRY;
if (rc == 0 || rc == -ENODATA)
/* no HSM attributes */
- RETURN(-ENODATA);
+ return -ENODATA;
if (rc < 0)
/* error hit while fetching xattr */
- RETURN(rc);
+ return rc;
/* unpack HSM attributes */
lustre_hsm_swab(attrs);
@@ -175,7 +173,7 @@ int lustre_buf2hsm(void *buf, int rc, struct md_hsm *mh)
mh->mh_arch_id = attrs->hsm_arch_id;
mh->mh_arch_ver = attrs->hsm_arch_ver;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(lustre_buf2hsm);
@@ -188,7 +186,6 @@ EXPORT_SYMBOL(lustre_buf2hsm);
void lustre_hsm2buf(void *buf, struct md_hsm *mh)
{
struct hsm_attrs *attrs = (struct hsm_attrs *)buf;
- ENTRY;
/* copy HSM attributes */
attrs->hsm_compat = mh->mh_compat;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index bbf06d009fd..d0a64ff5358 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -93,7 +93,7 @@ struct cfg_interop_param *class_find_old_param(const char *param,
int name_len = 0;
if (param == NULL || ptr == NULL)
- RETURN(NULL);
+ return NULL;
value = strchr(param, '=');
if (value == NULL)
@@ -104,11 +104,11 @@ struct cfg_interop_param *class_find_old_param(const char *param,
while (ptr->old_param != NULL) {
if (strncmp(param, ptr->old_param, name_len) == 0 &&
name_len == strlen(ptr->old_param))
- RETURN(ptr);
+ return ptr;
ptr++;
}
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(class_find_old_param);
@@ -335,23 +335,22 @@ int class_attach(struct lustre_cfg *lcfg)
struct obd_device *obd = NULL;
char *typename, *name, *uuid;
int rc, len;
- ENTRY;
if (!LUSTRE_CFG_BUFLEN(lcfg, 1)) {
CERROR("No type passed!\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
typename = lustre_cfg_string(lcfg, 1);
if (!LUSTRE_CFG_BUFLEN(lcfg, 0)) {
CERROR("No name passed!\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
name = lustre_cfg_string(lcfg, 0);
if (!LUSTRE_CFG_BUFLEN(lcfg, 2)) {
CERROR("No UUID passed!\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
uuid = lustre_cfg_string(lcfg, 2);
@@ -433,7 +432,7 @@ int class_attach(struct lustre_cfg *lcfg)
obd->obd_attached = 1;
CDEBUG(D_IOCTL, "OBD: dev %d attached type %s with refcount %d\n",
obd->obd_minor, typename, atomic_read(&obd->obd_refcount));
- RETURN(0);
+ return 0;
out:
if (obd != NULL) {
class_release_dev(obd);
@@ -449,7 +448,6 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int err = 0;
struct obd_export *exp;
- ENTRY;
LASSERT(obd != NULL);
LASSERTF(obd == class_num2obd(obd->obd_minor),
@@ -462,13 +460,13 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
/* have we attached a type to this device? */
if (!obd->obd_attached) {
CERROR("Device %d not attached\n", obd->obd_minor);
- RETURN(-ENODEV);
+ return -ENODEV;
}
if (obd->obd_set_up) {
CERROR("Device %d already setup (type %s)\n",
obd->obd_minor, obd->obd_type->typ_name);
- RETURN(-EEXIST);
+ return -EEXIST;
}
/* is someone else setting us up right now? (attach inits spinlock) */
@@ -477,7 +475,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
spin_unlock(&obd->obd_dev_lock);
CERROR("Device %d setup in progress (type %s)\n",
obd->obd_minor, obd->obd_type->typ_name);
- RETURN(-EEXIST);
+ return -EEXIST;
}
/* just leave this on forever. I can't use obd_set_up here because
other fns check that status, and we're not actually set up yet. */
@@ -542,7 +540,7 @@ int class_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
CDEBUG(D_IOCTL, "finished setup of obd %s (uuid %s)\n",
obd->obd_name, obd->obd_uuid.uuid);
- RETURN(0);
+ return 0;
err_exp:
if (obd->obd_self_export) {
class_unlink_export(obd->obd_self_export);
@@ -572,18 +570,16 @@ EXPORT_SYMBOL(class_setup);
*/
int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
{
- ENTRY;
-
if (obd->obd_set_up) {
CERROR("OBD device %d still set up\n", obd->obd_minor);
- RETURN(-EBUSY);
+ return -EBUSY;
}
spin_lock(&obd->obd_dev_lock);
if (!obd->obd_attached) {
spin_unlock(&obd->obd_dev_lock);
CERROR("OBD device %d not attached\n", obd->obd_minor);
- RETURN(-ENODEV);
+ return -ENODEV;
}
obd->obd_attached = 0;
spin_unlock(&obd->obd_dev_lock);
@@ -592,7 +588,7 @@ int class_detach(struct obd_device *obd, struct lustre_cfg *lcfg)
obd->obd_name, obd->obd_uuid.uuid);
class_decref(obd, "attach", obd);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(class_detach);
@@ -604,20 +600,19 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
{
int err = 0;
char *flag;
- ENTRY;
OBD_RACE(OBD_FAIL_LDLM_RECOV_CLIENTS);
if (!obd->obd_set_up) {
CERROR("Device %d not setup\n", obd->obd_minor);
- RETURN(-ENODEV);
+ return -ENODEV;
}
spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
spin_unlock(&obd->obd_dev_lock);
CERROR("OBD %d already stopping\n", obd->obd_minor);
- RETURN(-ENODEV);
+ return -ENODEV;
}
/* Leave this on forever */
obd->obd_stopping = 1;
@@ -696,7 +691,7 @@ int class_cleanup(struct obd_device *obd, struct lustre_cfg *lcfg)
class_decref(obd, "setup", obd);
obd->obd_set_up = 0;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(class_cleanup);
@@ -767,12 +762,11 @@ int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
struct obd_import *imp;
struct obd_uuid uuid;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
CERROR("invalid conn_uuid\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) &&
@@ -780,19 +774,19 @@ int class_add_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
strcmp(obd->obd_type->typ_name, LUSTRE_LWP_NAME) &&
strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
CERROR("can't add connection on non-client dev\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
imp = obd->u.cli.cl_import;
if (!imp) {
CERROR("try to add conn on immature client dev\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
rc = obd_add_conn(imp, &uuid, lcfg->lcfg_num);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_add_conn);
@@ -803,29 +797,28 @@ int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
struct obd_import *imp;
struct obd_uuid uuid;
int rc;
- ENTRY;
if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1 ||
LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(struct obd_uuid)) {
CERROR("invalid conn_uuid\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME)) {
CERROR("can't del connection on non-client dev\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
imp = obd->u.cli.cl_import;
if (!imp) {
CERROR("try to del conn on immature client dev\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
obd_str2uuid(&uuid, lustre_cfg_string(lcfg, 1));
rc = obd_del_conn(imp, &uuid);
- RETURN(rc);
+ return rc;
}
LIST_HEAD(lustre_profile_list);
@@ -834,13 +827,12 @@ struct lustre_profile *class_get_profile(const char * prof)
{
struct lustre_profile *lprof;
- ENTRY;
list_for_each_entry(lprof, &lustre_profile_list, lp_list) {
if (!strcmp(lprof->lp_profile, prof)) {
- RETURN(lprof);
+ return lprof;
}
}
- RETURN(NULL);
+ return NULL;
}
EXPORT_SYMBOL(class_get_profile);
@@ -853,13 +845,12 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc,
{
struct lustre_profile *lprof;
int err = 0;
- ENTRY;
CDEBUG(D_CONFIG, "Add profile %s\n", prof);
OBD_ALLOC(lprof, sizeof(*lprof));
if (lprof == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
INIT_LIST_HEAD(&lprof->lp_list);
LASSERT(proflen == (strlen(prof) + 1));
@@ -883,7 +874,7 @@ int class_add_profile(int proflen, char *prof, int osclen, char *osc,
}
list_add(&lprof->lp_list, &lustre_profile_list);
- RETURN(err);
+ return err;
out:
if (lprof->lp_md)
@@ -893,13 +884,12 @@ out:
if (lprof->lp_profile)
OBD_FREE(lprof->lp_profile, proflen);
OBD_FREE(lprof, sizeof(*lprof));
- RETURN(err);
+ return err;
}
void class_del_profile(const char *prof)
{
struct lustre_profile *lprof;
- ENTRY;
CDEBUG(D_CONFIG, "Del profile %s\n", prof);
@@ -912,7 +902,6 @@ void class_del_profile(const char *prof)
OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
OBD_FREE(lprof, sizeof *lprof);
}
- EXIT;
}
EXPORT_SYMBOL(class_del_profile);
@@ -920,7 +909,6 @@ EXPORT_SYMBOL(class_del_profile);
void class_del_profiles(void)
{
struct lustre_profile *lprof, *n;
- ENTRY;
list_for_each_entry_safe(lprof, n, &lustre_profile_list, lp_list) {
list_del(&lprof->lp_list);
@@ -930,13 +918,11 @@ void class_del_profiles(void)
OBD_FREE(lprof->lp_md, strlen(lprof->lp_md) + 1);
OBD_FREE(lprof, sizeof *lprof);
}
- EXIT;
}
EXPORT_SYMBOL(class_del_profiles);
static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
{
- ENTRY;
if (class_match_param(ptr, PARAM_AT_MIN, NULL) == 0)
at_min = val;
else if (class_match_param(ptr, PARAM_AT_MAX, NULL) == 0)
@@ -951,10 +937,10 @@ static int class_set_global(char *ptr, int val, struct lustre_cfg *lcfg)
strlcpy(obd_jobid_var, lustre_cfg_string(lcfg, 2),
JOBSTATS_JOBID_VAR_MAX_LEN + 1);
else
- RETURN(-EINVAL);
+ return -EINVAL;
CDEBUG(D_IOCTL, "global %s = %d\n", ptr, val);
- RETURN(0);
+ return 0;
}
@@ -991,14 +977,13 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
char *value = NULL;
int name_len = 0;
int new_len = 0;
- ENTRY;
if (cfg == NULL || new_name == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
param = lustre_cfg_string(cfg, 1);
if (param == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
value = strchr(param, '=');
if (value == NULL)
@@ -1010,7 +995,7 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
OBD_ALLOC(new_param, new_len);
if (new_param == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
strcpy(new_param, new_name);
if (value != NULL)
@@ -1019,7 +1004,7 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
OBD_ALLOC_PTR(bufs);
if (bufs == NULL) {
OBD_FREE(new_param, new_len);
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
}
lustre_cfg_bufs_reset(bufs, NULL);
@@ -1031,14 +1016,14 @@ struct lustre_cfg *lustre_cfg_rename(struct lustre_cfg *cfg,
OBD_FREE(new_param, new_len);
OBD_FREE_PTR(bufs);
if (new_cfg == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
new_cfg->lcfg_num = cfg->lcfg_num;
new_cfg->lcfg_flags = cfg->lcfg_flags;
new_cfg->lcfg_nid = cfg->lcfg_nid;
new_cfg->lcfg_nal = cfg->lcfg_nal;
- RETURN(new_cfg);
+ return new_cfg;
}
EXPORT_SYMBOL(lustre_cfg_rename);
@@ -1244,11 +1229,10 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
int matched = 0, j = 0;
int rc = 0;
int skip = 0;
- ENTRY;
if (lcfg->lcfg_command != LCFG_PARAM) {
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* fake a seq file so that var->fops->write can work... */
@@ -1295,7 +1279,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
/* If the prefix doesn't match, return error so we
can pass it down the stack */
if (strnchr(key, keylen, '.'))
- RETURN(-ENOSYS);
+ return -ENOSYS;
CERROR("%s: unknown param %s\n",
(char *)lustre_cfg_string(lcfg, 0), key);
/* rc = -EINVAL; continue parsing other params */
@@ -1316,7 +1300,7 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
rc = 0;
if (!rc && skip)
rc = skip;
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_process_proc_param);
@@ -1335,7 +1319,6 @@ int class_config_llog_handler(const struct lu_env *env,
int cfg_len = rec->lrh_len;
char *cfg_buf = (char*) (rec + 1);
int rc = 0;
- ENTRY;
//class_config_dump_handler(handle, rec, data);
@@ -1426,10 +1409,13 @@ int class_config_llog_handler(const struct lu_env *env,
}
- if ((clli->cfg_flags & CFG_F_EXCLUDE) &&
- (lcfg->lcfg_command == LCFG_LOV_ADD_OBD))
- /* Add inactive instead */
- lcfg->lcfg_command = LCFG_LOV_ADD_INA;
+ if (clli->cfg_flags & CFG_F_EXCLUDE) {
+ CDEBUG(D_CONFIG, "cmd: %x marked EXCLUDED\n",
+ lcfg->lcfg_command);
+ if (lcfg->lcfg_command == LCFG_LOV_ADD_OBD)
+ /* Add inactive instead */
+ lcfg->lcfg_command = LCFG_LOV_ADD_INA;
+ }
lustre_cfg_bufs_init(&bufs, lcfg);
@@ -1513,7 +1499,7 @@ out:
handle->lgh_ctxt->loc_obd->obd_name, rc);
class_config_dump_handler(NULL, handle, rec, data);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_config_llog_handler);
@@ -1524,12 +1510,11 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle *llh;
llog_cb_t callback;
int rc;
- ENTRY;
CDEBUG(D_INFO, "looking up llog %s\n", name);
rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
if (rc)
@@ -1555,7 +1540,7 @@ int class_config_parse_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
parse_out:
llog_close(env, llh);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_config_parse_llog);
@@ -1571,12 +1556,10 @@ int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size)
char *end = buf + size;
int rc = 0;
- ENTRY;
-
LASSERT(rec->lrh_type == OBD_CFG_REC);
rc = lustre_cfg_sanity_check(lcfg, rec->lrh_len);
if (rc < 0)
- RETURN(rc);
+ return rc;
ptr += snprintf(ptr, end-ptr, "cmd=%05x ", lcfg->lcfg_command);
if (lcfg->lcfg_flags)
@@ -1607,7 +1590,7 @@ int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf, int size)
}
/* return consumed bytes */
rc = ptr - buf;
- RETURN(rc);
+ return rc;
}
int class_config_dump_handler(const struct lu_env *env,
@@ -1617,11 +1600,9 @@ int class_config_dump_handler(const struct lu_env *env,
char *outstr;
int rc = 0;
- ENTRY;
-
OBD_ALLOC(outstr, 256);
if (outstr == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (rec->lrh_type == OBD_CFG_REC) {
class_config_parse_rec(rec, outstr, 256);
@@ -1632,7 +1613,7 @@ int class_config_dump_handler(const struct lu_env *env,
}
OBD_FREE(outstr, 256);
- RETURN(rc);
+ return rc;
}
int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
@@ -1641,13 +1622,11 @@ int class_config_dump_llog(const struct lu_env *env, struct llog_ctxt *ctxt,
struct llog_handle *llh;
int rc;
- ENTRY;
-
LCONSOLE_INFO("Dumping config log %s\n", name);
rc = llog_open(env, ctxt, &llh, NULL, name, LLOG_OPEN_EXISTS);
if (rc)
- RETURN(rc);
+ return rc;
rc = llog_init_handle(env, llh, LLOG_F_IS_PLAIN, NULL);
if (rc)
@@ -1658,7 +1637,7 @@ parse_out:
llog_close(env, llh);
LCONSOLE_INFO("End config log %s\n", name);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_config_dump_llog);
@@ -1671,11 +1650,10 @@ int class_manual_cleanup(struct obd_device *obd)
struct lustre_cfg *lcfg;
struct lustre_cfg_bufs bufs;
int rc;
- ENTRY;
if (!obd) {
CERROR("empty cleanup\n");
- RETURN(-EALREADY);
+ return -EALREADY;
}
if (obd->obd_force)
@@ -1690,7 +1668,7 @@ int class_manual_cleanup(struct obd_device *obd)
lustre_cfg_bufs_set_string(&bufs, 1, flags);
lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
if (!lcfg)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = class_process_config(lcfg);
if (rc) {
@@ -1705,7 +1683,7 @@ int class_manual_cleanup(struct obd_device *obd)
CERROR("detach failed %d: %s\n", rc, obd->obd_name);
out:
lustre_cfg_free(lcfg);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(class_manual_cleanup);
@@ -1797,7 +1775,7 @@ nid_key(struct hlist_node *hnode)
exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
- RETURN(&exp->exp_connection->c_peer.nid);
+ return &exp->exp_connection->c_peer.nid;
}
/*
@@ -1812,8 +1790,8 @@ nid_kepcmp(const void *key, struct hlist_node *hnode)
LASSERT(key);
exp = hlist_entry(hnode, struct obd_export, exp_nid_hash);
- RETURN(exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
- !exp->exp_failed);
+ return exp->exp_connection->c_peer.nid == *(lnet_nid_t *)key &&
+ !exp->exp_failed;
}
static void *
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index 99adad9793c..68a4d6a0eb0 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -47,10 +47,8 @@
#include <obd.h>
#include <lvfs.h>
-#include <lustre_fsfilt.h>
#include <obd_class.h>
#include <lustre/lustre_user.h>
-#include <linux/version.h>
#include <lustre_log.h>
#include <lustre_disk.h>
#include <lustre_param.h>
@@ -82,14 +80,13 @@ int lustre_process_log(struct super_block *sb, char *logname,
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *mgc = lsi->lsi_mgc;
int rc;
- ENTRY;
LASSERT(mgc);
LASSERT(cfg);
OBD_ALLOC_PTR(bufs);
if (bufs == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* mgc_process_config */
lustre_cfg_bufs_reset(bufs, mgc->obd_name);
@@ -119,7 +116,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
rc);
/* class_obd_list(); */
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lustre_process_log);
@@ -132,10 +129,9 @@ int lustre_end_log(struct super_block *sb, char *logname,
struct lustre_sb_info *lsi = s2lsi(sb);
struct obd_device *mgc = lsi->lsi_mgc;
int rc;
- ENTRY;
if (!mgc)
- RETURN(-ENOENT);
+ return -ENOENT;
/* mgc_process_config */
lustre_cfg_bufs_reset(&bufs, mgc->obd_name);
@@ -145,7 +141,7 @@ int lustre_end_log(struct super_block *sb, char *logname,
lcfg = lustre_cfg_new(LCFG_LOG_END, &bufs);
rc = obd_process_config(mgc, sizeof(*lcfg), lcfg);
lustre_cfg_free(lcfg);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lustre_end_log);
@@ -225,7 +221,6 @@ int lustre_start_mgc(struct super_block *sb)
char *ptr;
int recov_bk;
int rc = 0, i = 0, j, len;
- ENTRY;
LASSERT(lsi->lsi_lmd);
@@ -254,7 +249,7 @@ int lustre_start_mgc(struct super_block *sb)
}
if (i == 0) {
CERROR("No valid MGS nids found.\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
mutex_lock(&mgc_start_lock);
@@ -478,7 +473,7 @@ out_free:
OBD_FREE(mgcname, len);
if (niduuid)
OBD_FREE(niduuid, len + 2);
- RETURN(rc);
+ return rc;
}
static int lustre_stop_mgc(struct super_block *sb)
@@ -487,13 +482,12 @@ static int lustre_stop_mgc(struct super_block *sb)
struct obd_device *obd;
char *niduuid = 0, *ptr = 0;
int i, rc = 0, len = 0;
- ENTRY;
if (!lsi)
- RETURN(-ENOENT);
+ return -ENOENT;
obd = lsi->lsi_mgc;
if (!obd)
- RETURN(-ENOENT);
+ return -ENOENT;
lsi->lsi_mgc = NULL;
mutex_lock(&mgc_start_lock);
@@ -549,7 +543,7 @@ out:
/* class_import_put will get rid of the additional connections */
mutex_unlock(&mgc_start_lock);
- RETURN(rc);
+ return rc;
}
/***************** lustre superblock **************/
@@ -557,15 +551,14 @@ out:
struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi;
- ENTRY;
OBD_ALLOC_PTR(lsi);
if (!lsi)
- RETURN(NULL);
+ return NULL;
OBD_ALLOC_PTR(lsi->lsi_lmd);
if (!lsi->lsi_lmd) {
OBD_FREE_PTR(lsi);
- RETURN(NULL);
+ return NULL;
}
lsi->lsi_lmd->lmd_exclude_count = 0;
@@ -578,13 +571,12 @@ struct lustre_sb_info *lustre_init_lsi(struct super_block *sb)
/* Default umount style */
lsi->lsi_flags = LSI_UMOUNT_FAILOVER;
- RETURN(lsi);
+ return lsi;
}
static int lustre_free_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- ENTRY;
LASSERT(lsi != NULL);
CDEBUG(D_MOUNT, "Freeing lsi %p\n", lsi);
@@ -625,7 +617,7 @@ static int lustre_free_lsi(struct super_block *sb)
OBD_FREE(lsi, sizeof(*lsi));
s2lsi_nocast(sb) = NULL;
- RETURN(0);
+ return 0;
}
/* The lsi has one reference for every server that is using the disk -
@@ -633,7 +625,6 @@ static int lustre_free_lsi(struct super_block *sb)
int lustre_put_lsi(struct super_block *sb)
{
struct lustre_sb_info *lsi = s2lsi(sb);
- ENTRY;
LASSERT(lsi != NULL);
@@ -645,11 +636,20 @@ int lustre_put_lsi(struct super_block *sb)
obd_zombie_barrier();
}
lustre_free_lsi(sb);
- RETURN(1);
+ return 1;
}
- RETURN(0);
+ return 0;
}
+/*** SERVER NAME ***
+ * <FSNAME><SEPERATOR><TYPE><INDEX>
+ * FSNAME is between 1 and 8 characters (inclusive).
+ * Excluded characters are '/' and ':'
+ * SEPERATOR is either ':' or '-'
+ * TYPE: "OST", "MDT", etc.
+ * INDEX: Hex representation of the index
+ */
+
/** Get the fsname ("lustre") from the server name ("lustre-OST003F").
* @param [in] svname server name including type and index
* @param [out] fsname Buffer to copy filesystem name prefix into.
@@ -659,22 +659,13 @@ int lustre_put_lsi(struct super_block *sb)
*/
int server_name2fsname(const char *svname, char *fsname, const char **endptr)
{
- const char *dash = strrchr(svname, '-');
- if (!dash) {
- dash = strrchr(svname, ':');
- if (!dash)
- return -EINVAL;
- }
+ const char *dash;
- /* interpret <fsname>-MDTXXXXX-mdc as mdt, the better way is to pass
- * in the fsname, then determine the server index */
- if (!strcmp(LUSTRE_MDC_NAME, dash + 1)) {
- dash--;
- for (; dash > svname && *dash != '-' && *dash != ':'; dash--)
- ;
- if (dash == svname)
- return -EINVAL;
- }
+ dash = svname + strnlen(svname, 8); /* max fsname length is 8 */
+ for (; dash > svname && *dash != '-' && *dash != ':'; dash--)
+ ;
+ if (dash == svname)
+ return -EINVAL;
if (fsname != NULL) {
strncpy(fsname, svname, dash - svname);
@@ -697,15 +688,15 @@ int server_name2svname(const char *label, char *svname, const char **endptr,
size_t svsize)
{
int rc;
- const const char *dash;
+ const char *dash;
/* We use server_name2fsname() just for parsing */
rc = server_name2fsname(label, NULL, &dash);
if (rc != 0)
return rc;
- if (*dash != '-')
- return -1;
+ if (endptr != NULL)
+ *endptr = dash;
if (strlcpy(svname, dash + 1, svsize) >= svsize)
return -E2BIG;
@@ -730,9 +721,6 @@ int server_name2index(const char *svname, __u32 *idx, const char **endptr)
if (rc != 0)
return rc;
- if (*dash != '-')
- return -EINVAL;
-
dash++;
if (strncmp(dash, "MDT", 3) == 0)
@@ -744,11 +732,20 @@ int server_name2index(const char *svname, __u32 *idx, const char **endptr)
dash += 3;
- if (strcmp(dash, "all") == 0)
+ if (strncmp(dash, "all", 3) == 0) {
+ if (endptr != NULL)
+ *endptr = dash + 3;
return rc | LDD_F_SV_ALL;
+ }
index = simple_strtoul(dash, (char **)endptr, 16);
- *idx = index;
+ if (idx != NULL)
+ *idx = index;
+
+ /* Account for -mdc after index that is possible when specifying mdt */
+ if (endptr != NULL && strncmp(LUSTRE_MDC_NAME, *endptr + 1,
+ sizeof(LUSTRE_MDC_NAME)-1) == 0)
+ *endptr += sizeof(LUSTRE_MDC_NAME);
return rc;
}
@@ -760,7 +757,6 @@ EXPORT_SYMBOL(server_name2index);
int lustre_common_put_super(struct super_block *sb)
{
int rc;
- ENTRY;
CDEBUG(D_MOUNT, "dropping sb %p\n", sb);
@@ -769,7 +765,7 @@ int lustre_common_put_super(struct super_block *sb)
if (rc && (rc != -ENOENT)) {
if (rc != -EBUSY) {
CERROR("Can't stop MGC: %d\n", rc);
- RETURN(rc);
+ return rc;
}
/* BUSY just means that there's some other obd that
needs the mgc. Let him clean it up. */
@@ -778,7 +774,7 @@ int lustre_common_put_super(struct super_block *sb)
/* Drop a ref to the mounted disk */
lustre_put_lsi(sb);
lu_types_stop();
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(lustre_common_put_super);
@@ -816,12 +812,11 @@ int lustre_check_exclusion(struct super_block *sb, char *svname)
struct lustre_mount_data *lmd = lsi->lsi_lmd;
__u32 index;
int i, rc;
- ENTRY;
rc = server_name2index(svname, &index, NULL);
if (rc != LDD_F_SV_TYPE_OST)
/* Only exclude OSTs */
- RETURN(0);
+ return 0;
CDEBUG(D_MOUNT, "Check exclusion %s (%d) in %d of %s\n", svname,
index, lmd->lmd_exclude_count, lmd->lmd_dev);
@@ -829,10 +824,10 @@ int lustre_check_exclusion(struct super_block *sb, char *svname)
for(i = 0; i < lmd->lmd_exclude_count; i++) {
if (index == lmd->lmd_exclude[i]) {
CWARN("Excluding %s (on exclusion list)\n", svname);
- RETURN(1);
+ return 1;
}
}
- RETURN(0);
+ return 0;
}
/* mount -v -o exclude=lustre-OST0001:lustre-OST0002 -t lustre ... */
@@ -841,7 +836,6 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
const char *s1 = ptr, *s2;
__u32 index, *exclude_list;
int rc = 0, devmax;
- ENTRY;
/* The shortest an ost name can be is 8 chars: -OST0000.
We don't actually know the fsname at this time, so in fact
@@ -851,20 +845,22 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
/* temp storage until we figure out how many we have */
OBD_ALLOC(exclude_list, sizeof(index) * devmax);
if (!exclude_list)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* we enter this fn pointing at the '=' */
while (*s1 && *s1 != ' ' && *s1 != ',') {
s1++;
rc = server_name2index(s1, &index, &s2);
if (rc < 0) {
- CERROR("Can't parse server name '%s'\n", s1);
+ CERROR("Can't parse server name '%s': rc = %d\n",
+ s1, rc);
break;
}
if (rc == LDD_F_SV_TYPE_OST)
exclude_list[lmd->lmd_exclude_count++] = index;
else
- CDEBUG(D_MOUNT, "ignoring exclude %.7s\n", s1);
+ CDEBUG(D_MOUNT, "ignoring exclude %.*s: type = %#x\n",
+ (uint)(s2-s1), s1, rc);
s1 = s2;
/* now we are pointing at ':' (next exclude)
or ',' (end of excludes) */
@@ -887,7 +883,7 @@ static int lmd_make_exclusion(struct lustre_mount_data *lmd, const char *ptr)
}
}
OBD_FREE(exclude_list, sizeof(index) * devmax);
- RETURN(rc);
+ return rc;
}
static int lmd_parse_mgssec(struct lustre_mount_data *lmd, char *ptr)
@@ -991,13 +987,12 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
char *s1, *s2, *devname = NULL;
struct lustre_mount_data *raw = (struct lustre_mount_data *)options;
int rc = 0;
- ENTRY;
LASSERT(lmd);
if (!options) {
LCONSOLE_ERROR_MSG(0x162, "Missing mount data: check that "
"/sbin/mount.lustre is installed.\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* Options should be a string - try to detect old lmd data */
@@ -1005,13 +1000,13 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
LCONSOLE_ERROR_MSG(0x163, "You're using an old version of "
"/sbin/mount.lustre. Please install "
"version %s\n", LUSTRE_VERSION_STRING);
- RETURN(-EINVAL);
+ return -EINVAL;
}
lmd->lmd_magic = LMD_MAGIC;
OBD_ALLOC(lmd->lmd_params, 4096);
if (lmd->lmd_params == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lmd->lmd_params[0] = '\0';
/* Set default flags here */
@@ -1150,14 +1145,14 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
/* Freed in lustre_free_lsi */
OBD_ALLOC(lmd->lmd_profile, strlen(s1) + 8);
if (!lmd->lmd_profile)
- RETURN(-ENOMEM);
+ return -ENOMEM;
sprintf(lmd->lmd_profile, "%s-client", s1);
}
/* Freed in lustre_free_lsi */
OBD_ALLOC(lmd->lmd_dev, strlen(devname) + 1);
if (!lmd->lmd_dev)
- RETURN(-ENOMEM);
+ return -ENOMEM;
strcpy(lmd->lmd_dev, devname);
/* Save mount options */
@@ -1168,18 +1163,18 @@ static int lmd_parse(char *options, struct lustre_mount_data *lmd)
/* Freed in lustre_free_lsi */
OBD_ALLOC(lmd->lmd_opts, strlen(options) + 1);
if (!lmd->lmd_opts)
- RETURN(-ENOMEM);
+ return -ENOMEM;
strcpy(lmd->lmd_opts, options);
}
lmd_print(lmd);
lmd->lmd_magic = LMD_MAGIC;
- RETURN(rc);
+ return rc;
invalid:
CERROR("Bad mount options %s\n", options);
- RETURN(-EINVAL);
+ return -EINVAL;
}
struct lustre_mount_data2 {
@@ -1198,13 +1193,12 @@ int lustre_fill_super(struct super_block *sb, void *data, int silent)
struct lustre_mount_data2 *lmd2 = data;
struct lustre_sb_info *lsi;
int rc;
- ENTRY;
CDEBUG(D_MOUNT|D_VFSTRACE, "VFS Op: sb %p\n", sb);
lsi = lustre_init_lsi(sb);
if (!lsi)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lmd = lsi->lsi_lmd;
/*
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index 01a0e1f83a6..70997648a4f 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -100,11 +100,11 @@ void obdo_from_inode(struct obdo *dst, struct inode *src, obd_flag valid)
newvalid |= OBD_MD_FLMODE;
}
if (valid & OBD_MD_FLUID) {
- dst->o_uid = src->i_uid;
+ dst->o_uid = from_kuid(&init_user_ns, src->i_uid);
newvalid |= OBD_MD_FLUID;
}
if (valid & OBD_MD_FLGID) {
- dst->o_gid = src->i_gid;
+ dst->o_gid = from_kgid(&init_user_ns, src->i_gid);
newvalid |= OBD_MD_FLGID;
}
if (valid & OBD_MD_FLFLAGS) {
@@ -232,16 +232,16 @@ void obdo_from_iattr(struct obdo *oa, struct iattr *attr, unsigned int ia_valid)
if (ia_valid & ATTR_MODE) {
oa->o_mode = attr->ia_mode;
oa->o_valid |= OBD_MD_FLTYPE | OBD_MD_FLMODE;
- if (!current_is_in_group(oa->o_gid) &&
+ if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
!cfs_capable(CFS_CAP_FSETID))
oa->o_mode &= ~S_ISGID;
}
if (ia_valid & ATTR_UID) {
- oa->o_uid = attr->ia_uid;
+ oa->o_uid = from_kuid(&init_user_ns, attr->ia_uid);
oa->o_valid |= OBD_MD_FLUID;
}
if (ia_valid & ATTR_GID) {
- oa->o_gid = attr->ia_gid;
+ oa->o_gid = from_kgid(&init_user_ns, attr->ia_gid);
oa->o_valid |= OBD_MD_FLGID;
}
}
@@ -281,16 +281,16 @@ void iattr_from_obdo(struct iattr *attr, struct obdo *oa, obd_flag valid)
if (valid & OBD_MD_FLMODE) {
attr->ia_mode = (attr->ia_mode & S_IFMT)|(oa->o_mode & ~S_IFMT);
attr->ia_valid |= ATTR_MODE;
- if (!current_is_in_group(oa->o_gid) &&
+ if (!in_group_p(make_kgid(&init_user_ns, oa->o_gid)) &&
!cfs_capable(CFS_CAP_FSETID))
attr->ia_mode &= ~S_ISGID;
}
if (valid & OBD_MD_FLUID) {
- attr->ia_uid = oa->o_uid;
+ attr->ia_uid = make_kuid(&init_user_ns, oa->o_uid);
attr->ia_valid |= ATTR_UID;
}
if (valid & OBD_MD_FLGID) {
- attr->ia_gid = oa->o_gid;
+ attr->ia_gid = make_kgid(&init_user_ns, oa->o_gid);
attr->ia_valid |= ATTR_GID;
}
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo.c b/drivers/staging/lustre/lustre/obdecho/echo.c
index 9e64939af9d..debb9cec490 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo.c
@@ -96,12 +96,10 @@ static int echo_init_export(struct obd_export *exp)
static int echo_destroy_export(struct obd_export *exp)
{
- ENTRY;
-
target_destroy_export(exp);
ldlm_destroy_export(exp);
- RETURN(0);
+ return 0;
}
static __u64 echo_next_id(struct obd_device *obddev)
@@ -151,25 +149,24 @@ static int echo_destroy(const struct lu_env *env, struct obd_export *exp,
{
struct obd_device *obd = class_exp2obd(exp);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (!(oa->o_valid & OBD_MD_FLID)) {
CERROR("obdo missing FLID valid flag: "LPX64"\n", oa->o_valid);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (ostid_id(&oa->o_oi) > obd->u.echo.eo_lastino ||
ostid_id(&oa->o_oi) < ECHO_INIT_OID) {
CERROR("bad destroy objid: "DOSTID"\n", POSTID(&oa->o_oi));
- RETURN(-EINVAL);
+ return -EINVAL;
}
- RETURN(0);
+ return 0;
}
static int echo_getattr(const struct lu_env *env, struct obd_export *exp,
@@ -178,24 +175,23 @@ static int echo_getattr(const struct lu_env *env, struct obd_export *exp,
struct obd_device *obd = class_exp2obd(exp);
obd_id id = ostid_id(&oinfo->oi_oa->o_oi);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
CERROR("obdo missing FLID valid flag: "LPX64"\n",
oinfo->oi_oa->o_valid);
- RETURN(-EINVAL);
+ return -EINVAL;
}
obdo_cpy_md(oinfo->oi_oa, &obd->u.echo.eo_oa, oinfo->oi_oa->o_valid);
ostid_set_seq_echo(&oinfo->oi_oa->o_oi);
ostid_set_id(&oinfo->oi_oa->o_oi, id);
- RETURN(0);
+ return 0;
}
static int echo_setattr(const struct lu_env *env, struct obd_export *exp,
@@ -203,17 +199,16 @@ static int echo_setattr(const struct lu_env *env, struct obd_export *exp,
{
struct obd_device *obd = class_exp2obd(exp);
- ENTRY;
if (!obd) {
CERROR("invalid client cookie "LPX64"\n",
exp->exp_handle.h_cookie);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (!(oinfo->oi_oa->o_valid & OBD_MD_FLID)) {
CERROR("obdo missing FLID valid flag: "LPX64"\n",
oinfo->oi_oa->o_valid);
- RETURN(-EINVAL);
+ return -EINVAL;
}
memcpy(&obd->u.echo.eo_oa, oinfo->oi_oa, sizeof(*oinfo->oi_oa));
@@ -225,7 +220,7 @@ static int echo_setattr(const struct lu_env *env, struct obd_export *exp,
oti->oti_ack_locks[0].lock = obd->u.echo.eo_nl_lock;
}
- RETURN(0);
+ return 0;
}
static void
@@ -410,11 +405,10 @@ static int echo_preprw(const struct lu_env *env, int cmd,
int tot_bytes = 0;
int rc = 0;
int i, left;
- ENTRY;
obd = export->exp_obd;
if (obd == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
/* Temp fix to stop falling foul of osc_announce_cached() */
oa->o_valid &= ~(OBD_MD_FLBLOCKS | OBD_MD_FLGRANT);
@@ -456,7 +450,7 @@ static int echo_preprw(const struct lu_env *env, int cmd,
CDEBUG(D_PAGE, "%d pages allocated after prep\n",
atomic_read(&obd->u.echo.eo_prep));
- RETURN(0);
+ return 0;
preprw_cleanup:
/* It is possible that we would rather handle errors by allow
@@ -487,11 +481,10 @@ static int echo_commitrw(const struct lu_env *env, int cmd,
struct obd_device *obd;
int pgs = 0;
int i;
- ENTRY;
obd = export->exp_obd;
if (obd == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
if (rc)
GOTO(commitrw_cleanup, rc);
@@ -506,7 +499,7 @@ static int echo_commitrw(const struct lu_env *env, int cmd,
if (niocount && res == NULL) {
CERROR("NULL res niobuf with niocount %d\n", niocount);
- RETURN(-EINVAL);
+ return -EINVAL;
}
LASSERT(oti == NULL || oti->oti_handle == (void *)DESC_PRIV);
@@ -537,7 +530,7 @@ static int echo_commitrw(const struct lu_env *env, int cmd,
CDEBUG(D_PAGE, "%d pages remain after commit\n",
atomic_read(&obd->u.echo.eo_prep));
- RETURN(rc);
+ return rc;
commitrw_cleanup:
atomic_sub(pgs, &obd->u.echo.eo_prep);
@@ -565,7 +558,6 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
__u64 lock_flags = 0;
struct ldlm_res_id res_id = {.name = {1}};
char ns_name[48];
- ENTRY;
obd->u.echo.eo_obt.obt_magic = OBT_MAGIC;
spin_lock_init(&obd->u.echo.eo_lock);
@@ -578,7 +570,7 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
LDLM_NS_TYPE_OST);
if (obd->obd_namespace == NULL) {
LBUG();
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id, LDLM_PLAIN,
@@ -600,13 +592,12 @@ static int echo_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
ptlrpc_init_client (LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
"echo_ldlm_cb_client", &obd->obd_ldlm_client);
- RETURN(0);
+ return 0;
}
static int echo_cleanup(struct obd_device *obd)
{
int leaked;
- ENTRY;
lprocfs_obd_cleanup(obd);
lprocfs_free_obd_stats(obd);
@@ -624,7 +615,7 @@ static int echo_cleanup(struct obd_device *obd)
if (leaked != 0)
CERROR("%d prep/commitrw pages leaked\n", leaked);
- RETURN(0);
+ return 0;
}
struct obd_ops echo_obd_ops = {
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 184195fde62..2644edf438c 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -43,6 +43,7 @@
#include <lustre_debug.h>
#include <lprocfs_status.h>
#include <cl_object.h>
+#include <md_object.h>
#include <lustre_fid.h>
#include <lustre_acl.h>
#include <lustre_net.h>
@@ -312,11 +313,9 @@ static void echo_page_fini(const struct lu_env *env,
struct echo_page *ep = cl2echo_page(slice);
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
struct page *vmpage = ep->ep_vmpage;
- ENTRY;
atomic_dec(&eco->eo_npages);
page_cache_release(vmpage);
- EXIT;
}
static int echo_page_prep(const struct lu_env *env,
@@ -408,14 +407,13 @@ static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
- ENTRY;
ep->ep_vmpage = vmpage;
page_cache_get(vmpage);
mutex_init(&ep->ep_lock);
cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
atomic_inc(&eco->eo_npages);
- RETURN(0);
+ return 0;
}
static int echo_io_init(const struct lu_env *env, struct cl_object *obj,
@@ -429,7 +427,6 @@ static int echo_lock_init(const struct lu_env *env,
const struct cl_io *unused)
{
struct echo_lock *el;
- ENTRY;
OBD_SLAB_ALLOC_PTR_GFP(el, echo_lock_kmem, __GFP_IO);
if (el != NULL) {
@@ -438,7 +435,7 @@ static int echo_lock_init(const struct lu_env *env,
INIT_LIST_HEAD(&el->el_chain);
atomic_set(&el->el_refcount, 0);
}
- RETURN(el == NULL ? -ENOMEM : 0);
+ return el == NULL ? -ENOMEM : 0;
}
static int echo_conf_set(const struct lu_env *env, struct cl_object *obj,
@@ -467,7 +464,6 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
struct echo_device *ed = cl2echo_dev(lu2cl_dev(obj->lo_dev));
struct echo_client_obd *ec = ed->ed_ec;
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
- ENTRY;
if (ed->ed_next) {
struct lu_object *below;
@@ -477,7 +473,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
below = under->ld_ops->ldo_object_alloc(env, obj->lo_header,
under);
if (below == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
lu_object_add(obj, below);
}
@@ -501,7 +497,7 @@ static int echo_object_init(const struct lu_env *env, struct lu_object *obj,
list_add_tail(&eco->eo_obj_chain, &ec->ec_objects);
spin_unlock(&ec->ec_lock);
- RETURN(0);
+ return 0;
}
/* taken from osc_unpackmd() */
@@ -510,8 +506,6 @@ static int echo_alloc_memmd(struct echo_device *ed,
{
int lsm_size;
- ENTRY;
-
/* If export is lov/osc then use their obd method */
if (ed->ed_next != NULL)
return obd_alloc_memmd(ed->ed_ec->ec_exp, lsmp);
@@ -521,27 +515,25 @@ static int echo_alloc_memmd(struct echo_device *ed,
LASSERT(*lsmp == NULL);
OBD_ALLOC(*lsmp, lsm_size);
if (*lsmp == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
if ((*lsmp)->lsm_oinfo[0] == NULL) {
OBD_FREE(*lsmp, lsm_size);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
loi_init((*lsmp)->lsm_oinfo[0]);
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
ostid_set_seq_echo(&(*lsmp)->lsm_oi);
- RETURN(lsm_size);
+ return lsm_size;
}
static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
{
int lsm_size;
- ENTRY;
-
/* If export is lov/osc then use their obd method */
if (ed->ed_next != NULL)
return obd_free_memmd(ed->ed_ec->ec_exp, lsmp);
@@ -552,14 +544,13 @@ static int echo_free_memmd(struct echo_device *ed, struct lov_stripe_md **lsmp)
OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
OBD_FREE(*lsmp, lsm_size);
*lsmp = NULL;
- RETURN(0);
+ return 0;
}
static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
{
struct echo_object *eco = cl2echo_obj(lu2cl(obj));
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
- ENTRY;
LASSERT(atomic_read(&eco->eo_npages) == 0);
@@ -573,7 +564,6 @@ static void echo_object_free(const struct lu_env *env, struct lu_object *obj)
if (eco->eo_lsm)
echo_free_memmd(eco->eo_dev, &eco->eo_lsm);
OBD_SLAB_FREE_PTR(eco, echo_object_kmem);
- EXIT;
}
static int echo_object_print(const struct lu_env *env, void *cookie,
@@ -606,7 +596,6 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
{
struct echo_object *eco;
struct lu_object *obj = NULL;
- ENTRY;
/* we're the top dev. */
LASSERT(hdr == NULL);
@@ -622,7 +611,7 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
eco->eo_cl.co_ops = &echo_cl_obj_ops;
obj->lo_ops = &echo_lu_obj_ops;
}
- RETURN(obj);
+ return obj;
}
static struct lu_device_operations echo_device_lu_ops = {
@@ -648,7 +637,7 @@ static int echo_site_init(const struct lu_env *env, struct echo_device *ed)
/* initialize site */
rc = cl_site_init(site, &ed->ed_cl);
if (rc) {
- CERROR("Cannot initilize site for echo client(%d)\n", rc);
+ CERROR("Cannot initialize site for echo client(%d)\n", rc);
return rc;
}
@@ -737,11 +726,10 @@ static int echo_fid_init(struct echo_device *ed, char *obd_name,
{
char *prefix;
int rc;
- ENTRY;
OBD_ALLOC_PTR(ed->ed_cl_seq);
if (ed->ed_cl_seq == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC(prefix, MAX_OBD_NAME + 5);
if (prefix == NULL)
@@ -758,18 +746,17 @@ static int echo_fid_init(struct echo_device *ed, char *obd_name,
if (rc)
GOTO(out_free_seq, rc);
- RETURN(0);
+ return 0;
out_free_seq:
OBD_FREE_PTR(ed->ed_cl_seq);
ed->ed_cl_seq = NULL;
- RETURN(rc);
+ return rc;
}
static int echo_fid_fini(struct obd_device *obddev)
{
struct echo_device *ed = obd2echo_dev(obddev);
- ENTRY;
if (ed->ed_cl_seq != NULL) {
seq_client_fini(ed->ed_cl_seq);
@@ -777,7 +764,7 @@ static int echo_fid_fini(struct obd_device *obddev)
ed->ed_cl_seq = NULL;
}
- RETURN(0);
+ return 0;
}
static struct lu_device *echo_device_alloc(const struct lu_env *env,
@@ -792,7 +779,6 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
const char *tgt_type_name;
int rc;
int cleanup = 0;
- ENTRY;
OBD_ALLOC_PTR(ed);
if (ed == NULL)
@@ -916,7 +902,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
}
ed->ed_next = next;
- RETURN(&cd->cd_lu_dev);
+ return &cd->cd_lu_dev;
out:
switch(cleanup) {
case 4: {
@@ -1076,7 +1062,6 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
struct lu_fid *fid;
int refcheck;
int rc;
- ENTRY;
LASSERT(lsmp);
lsm = *lsmp;
@@ -1087,11 +1072,11 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
/* Never return an object if the obd is to be freed. */
if (echo_dev2cl(d)->cd_lu_dev.ld_obd->obd_stopping)
- RETURN(ERR_PTR(-ENODEV));
+ return ERR_PTR(-ENODEV);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN((void *)env);
+ return (void *)env;
info = echo_env_info(env);
conf = &info->eti_conf;
@@ -1131,7 +1116,7 @@ static struct echo_object *cl_echo_object_find(struct echo_device *d,
out:
cl_env_put(env, &refcheck);
- RETURN(eco);
+ return eco;
}
static int cl_echo_object_put(struct echo_object *eco)
@@ -1139,11 +1124,10 @@ static int cl_echo_object_put(struct echo_object *eco)
struct lu_env *env;
struct cl_object *obj = echo_obj2cl(eco);
int refcheck;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
/* an external function to kill an object? */
if (eco->eo_deleted) {
@@ -1154,7 +1138,7 @@ static int cl_echo_object_put(struct echo_object *eco)
cl_object_put(env, obj);
cl_env_put(env, &refcheck);
- RETURN(0);
+ return 0;
}
static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
@@ -1167,7 +1151,6 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
struct cl_lock_descr *descr;
struct echo_thread_info *info;
int rc = -ENOMEM;
- ENTRY;
info = echo_env_info(env);
io = &info->eti_io;
@@ -1201,7 +1184,7 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
cl_lock_release(env, lck, "ec enqueue", current);
}
}
- RETURN(rc);
+ return rc;
}
static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
@@ -1212,11 +1195,10 @@ static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
struct cl_io *io;
int refcheck;
int result;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
info = echo_env_info(env);
io = &info->eti_io;
@@ -1230,7 +1212,6 @@ static int cl_echo_enqueue(struct echo_object *eco, obd_off start, obd_off end,
result = cl_echo_enqueue0(env, eco, start, end, mode, cookie, 0);
cl_io_fini(env, io);
- EXIT;
out:
cl_env_put(env, &refcheck);
return result;
@@ -1243,7 +1224,6 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
struct echo_lock *ecl = NULL;
struct list_head *el;
int found = 0, still_used = 0;
- ENTRY;
LASSERT(ec != NULL);
spin_lock(&ec->ec_lock);
@@ -1262,10 +1242,10 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
spin_unlock(&ec->ec_lock);
if (!found)
- RETURN(-ENOENT);
+ return -ENOENT;
echo_lock_release(env, ecl, still_used);
- RETURN(0);
+ return 0;
}
static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
@@ -1273,16 +1253,15 @@ static int cl_echo_cancel(struct echo_device *ed, __u64 cookie)
struct lu_env *env;
int refcheck;
int rc;
- ENTRY;
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
rc = cl_echo_cancel0(env, ed, cookie);
cl_env_put(env, &refcheck);
- RETURN(rc);
+ return rc;
}
static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
@@ -1291,7 +1270,6 @@ static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
struct cl_page *clp;
struct cl_page *temp;
int result = 0;
- ENTRY;
cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
int rc;
@@ -1300,7 +1278,7 @@ static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
continue;
result = result ?: rc;
}
- RETURN(result);
+ return result;
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
@@ -1318,13 +1296,12 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
int refcheck;
int rc;
int i;
- ENTRY;
LASSERT((offset & ~CFS_PAGE_MASK) == 0);
LASSERT(ed->ed_next != NULL);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
info = echo_env_info(env);
io = &info->eti_io;
@@ -1386,7 +1363,6 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, obd_off offset,
}
cl_echo_cancel0(env, ed, lh.cookie);
- EXIT;
error_lock:
cl_2queue_discard(env, io, queue);
cl_2queue_disown(env, io, queue);
@@ -1467,13 +1443,11 @@ static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
struct echo_thread_info *info = echo_env_info(env);
int rc;
- ENTRY;
-
LASSERT(ma->ma_lmm_size > 0);
rc = mo_xattr_get(env, o, &LU_BUF_NULL, XATTR_NAME_LOV);
if (rc < 0)
- RETURN(rc);
+ return rc;
/* big_lmm may need to be grown */
if (info->eti_big_lmmsize < rc) {
@@ -1490,7 +1464,7 @@ static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
OBD_ALLOC_LARGE(info->eti_big_lmm, size);
if (info->eti_big_lmm == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
info->eti_big_lmmsize = size;
}
LASSERT(info->eti_big_lmmsize >= rc);
@@ -1499,13 +1473,13 @@ static int echo_big_lmm_get(const struct lu_env *env, struct md_object *o,
info->eti_buf.lb_len = info->eti_big_lmmsize;
rc = mo_xattr_get(env, o, &info->eti_buf, XATTR_NAME_LOV);
if (rc < 0)
- RETURN(rc);
+ return rc;
ma->ma_valid |= MA_LOV;
ma->ma_lmm = info->eti_big_lmm;
ma->ma_lmm_size = rc;
- RETURN(0);
+ return 0;
}
int echo_attr_get_complex(const struct lu_env *env, struct md_object *next,
@@ -1517,8 +1491,6 @@ int echo_attr_get_complex(const struct lu_env *env, struct md_object *next,
int need = ma->ma_need;
int rc = 0, rc2;
- ENTRY;
-
ma->ma_valid = 0;
if (need & MA_INODE) {
@@ -1571,7 +1543,7 @@ out:
ma->ma_need = need;
CDEBUG(D_INODE, "after getattr rc = %d, ma_valid = "LPX64" ma_lmm=%p\n",
rc, ma->ma_valid, ma->ma_lmm);
- RETURN(rc);
+ return rc;
}
static int
@@ -1587,8 +1559,6 @@ echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
struct lu_object_conf conf = { .loc_flags = LOC_F_NEW };
int rc;
- ENTRY;
-
rc = mdo_lookup(env, parent, lname, fid2, spec);
if (rc == 0)
return -EEXIST;
@@ -1600,7 +1570,7 @@ echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
if (IS_ERR(ec_child)) {
CERROR("Can not find the child "DFID": rc = %ld\n", PFID(fid),
PTR_ERR(ec_child));
- RETURN(PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
}
child = lu_object_locate(ec_child->lo_header, ld->ld_type);
@@ -1623,7 +1593,6 @@ echo_md_create_internal(const struct lu_env *env, struct echo_device *ed,
}
CDEBUG(D_RPCTRACE, "End creating object "DFID" %s %p rc = %d\n",
PFID(lu_object_fid(&parent->mo_lu)), lname->ln_name, parent, rc);
- EXIT;
out_put:
lu_object_put(env, ec_child);
return rc;
@@ -1663,13 +1632,11 @@ static int echo_create_md_object(const struct lu_env *env,
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
- RETURN(-ENXIO);
+ return -ENXIO;
memset(ma, 0, sizeof(*ma));
memset(spec, 0, sizeof(*spec));
@@ -1699,7 +1666,7 @@ static int echo_create_md_object(const struct lu_env *env,
/* If name is specified, only create one object by name */
rc = echo_md_create_internal(env, ed, lu2md(parent), fid, lname,
spec, ma);
- RETURN(rc);
+ return rc;
}
/* Create multiple object sequenced by id */
@@ -1719,7 +1686,7 @@ static int echo_create_md_object(const struct lu_env *env,
fid->f_oid++;
}
- RETURN(rc);
+ return rc;
}
static struct lu_object *echo_md_lookup(const struct lu_env *env,
@@ -1731,14 +1698,13 @@ static struct lu_object *echo_md_lookup(const struct lu_env *env,
struct lu_fid *fid = &info->eti_fid;
struct lu_object *child;
int rc;
- ENTRY;
CDEBUG(D_INFO, "lookup %s in parent "DFID" %p\n", lname->ln_name,
PFID(fid), parent);
rc = mdo_lookup(env, parent, lname, fid, NULL);
if (rc) {
CERROR("lookup %s: rc = %d\n", lname->ln_name, rc);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
/* In the function below, .hs_keycmp resolves to
@@ -1746,7 +1712,7 @@ static struct lu_object *echo_md_lookup(const struct lu_env *env,
/* coverity[overrun-buffer-val] */
child = lu_object_find_at(env, &ed->ed_cl.cd_lu_dev, fid, NULL);
- RETURN(child);
+ return child;
}
static int echo_setattr_object(const struct lu_env *env,
@@ -1763,13 +1729,11 @@ static int echo_setattr_object(const struct lu_env *env,
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
- RETURN(-ENXIO);
+ return -ENXIO;
for (i = 0; i < count; i++) {
struct lu_object *ec_child, *child;
@@ -1780,7 +1744,7 @@ static int echo_setattr_object(const struct lu_env *env,
if (IS_ERR(ec_child)) {
CERROR("Can't find child %s: rc = %ld\n",
lname->ln_name, PTR_ERR(ec_child));
- RETURN(PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
}
child = lu_object_locate(ec_child->lo_header, ld->ld_type);
@@ -1811,7 +1775,7 @@ static int echo_setattr_object(const struct lu_env *env,
id++;
lu_object_put(env, ec_child);
}
- RETURN(rc);
+ return rc;
}
static int echo_getattr_object(const struct lu_env *env,
@@ -1828,13 +1792,11 @@ static int echo_getattr_object(const struct lu_env *env,
int rc = 0;
int i;
- ENTRY;
-
if (ec_parent == NULL)
return -1;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
- RETURN(-ENXIO);
+ return -ENXIO;
memset(ma, 0, sizeof(*ma));
ma->ma_need |= MA_INODE | MA_LOV | MA_PFID | MA_HSM | MA_ACL_DEF;
@@ -1852,14 +1814,14 @@ static int echo_getattr_object(const struct lu_env *env,
if (IS_ERR(ec_child)) {
CERROR("Can't find child %s: rc = %ld\n",
lname->ln_name, PTR_ERR(ec_child));
- RETURN(PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
}
child = lu_object_locate(ec_child->lo_header, ld->ld_type);
if (child == NULL) {
CERROR("Can not locate the child %s\n", lname->ln_name);
lu_object_put(env, ec_child);
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_RPCTRACE, "Start getattr object "DFID"\n",
@@ -1877,7 +1839,7 @@ static int echo_getattr_object(const struct lu_env *env,
lu_object_put(env, ec_child);
}
- RETURN(rc);
+ return rc;
}
static int echo_lookup_object(const struct lu_env *env,
@@ -1931,13 +1893,11 @@ static int echo_md_destroy_internal(const struct lu_env *env,
struct lu_object *child;
int rc;
- ENTRY;
-
ec_child = echo_md_lookup(env, ed, parent, lname);
if (IS_ERR(ec_child)) {
CERROR("Can't find child %s: rc = %ld\n", lname->ln_name,
PTR_ERR(ec_child));
- RETURN(PTR_ERR(ec_child));
+ return PTR_ERR(ec_child);
}
child = lu_object_locate(ec_child->lo_header, ld->ld_type);
@@ -1976,11 +1936,10 @@ static int echo_destroy_object(const struct lu_env *env,
struct lu_object *parent;
int rc = 0;
int i;
- ENTRY;
parent = lu_object_locate(ec_parent->lo_header, ld->ld_type);
if (parent == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
memset(ma, 0, sizeof(*ma));
ma->ma_attr.la_mode = mode;
@@ -1994,7 +1953,7 @@ static int echo_destroy_object(const struct lu_env *env,
lname->ln_namelen = namelen;
rc = echo_md_destroy_internal(env, ed, lu2md(parent), lname,
ma);
- RETURN(rc);
+ return rc;
}
/*prepare the requests*/
@@ -2013,7 +1972,7 @@ static int echo_destroy_object(const struct lu_env *env,
id++;
}
- RETURN(rc);
+ return rc;
}
static struct lu_object *echo_resolve_path(const struct lu_env *env,
@@ -2028,13 +1987,12 @@ static struct lu_object *echo_resolve_path(const struct lu_env *env,
struct lu_object *parent = NULL;
struct lu_object *child = NULL;
int rc = 0;
- ENTRY;
/*Only support MDD layer right now*/
rc = md->md_ops->mdo_root_get(env, md, fid);
if (rc) {
CERROR("get root error: rc = %d\n", rc);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
/* In the function below, .hs_keycmp resolves to
@@ -2044,7 +2002,7 @@ static struct lu_object *echo_resolve_path(const struct lu_env *env,
if (IS_ERR(parent)) {
CERROR("Can not find the parent "DFID": rc = %ld\n",
PFID(fid), PTR_ERR(parent));
- RETURN(parent);
+ return parent;
}
while (1) {
@@ -2083,9 +2041,9 @@ static struct lu_object *echo_resolve_path(const struct lu_env *env,
parent = child;
}
if (rc)
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
- RETURN(parent);
+ return parent;
}
static void echo_ucred_init(struct lu_env *env)
@@ -2097,10 +2055,14 @@ static void echo_ucred_init(struct lu_env *env)
ucred->uc_suppgids[0] = -1;
ucred->uc_suppgids[1] = -1;
- ucred->uc_uid = ucred->uc_o_uid = current_uid();
- ucred->uc_gid = ucred->uc_o_gid = current_gid();
- ucred->uc_fsuid = ucred->uc_o_fsuid = current_fsuid();
- ucred->uc_fsgid = ucred->uc_o_fsgid = current_fsgid();
+ ucred->uc_uid = ucred->uc_o_uid =
+ from_kuid(&init_user_ns, current_uid());
+ ucred->uc_gid = ucred->uc_o_gid =
+ from_kgid(&init_user_ns, current_gid());
+ ucred->uc_fsuid = ucred->uc_o_fsuid =
+ from_kuid(&init_user_ns, current_fsuid());
+ ucred->uc_fsgid = ucred->uc_o_fsgid =
+ from_kgid(&init_user_ns, current_fsgid());
ucred->uc_cap = cfs_curproc_cap_pack();
/* remove fs privilege for non-root user. */
@@ -2129,21 +2091,20 @@ static int echo_md_handler(struct echo_device *ed, int command,
char *name = NULL;
int namelen = data->ioc_plen2;
int rc = 0;
- ENTRY;
if (ld == NULL) {
CERROR("MD echo client is not being initialized properly\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (strcmp(ld->ld_type->ldt_name, LUSTRE_MDD_NAME)) {
CERROR("Only support MDD layer right now!\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
env = cl_env_get(&refcheck);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
rc = lu_env_refill_by_tags(env, ECHO_MD_CTX_TAG, ECHO_MD_SES_TAG);
if (rc != 0)
@@ -2243,13 +2204,12 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
struct lov_stripe_md *lsm = NULL;
int rc;
int created = 0;
- ENTRY;
if ((oa->o_valid & OBD_MD_FLID) == 0 && /* no obj id */
(on_target || /* set_stripe */
ec->ec_nstripes != 0)) { /* LOV */
CERROR ("No valid oid\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = echo_alloc_memmd(ed, &lsm);
@@ -2315,7 +2275,6 @@ static int echo_create_object(const struct lu_env *env, struct echo_device *ed,
cl_echo_object_put(eco);
CDEBUG(D_INFO, "oa oid "DOSTID"\n", POSTID(&oa->o_oi));
- EXIT;
failed:
if (created && rc)
@@ -2333,17 +2292,16 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
struct lov_stripe_md *lsm = NULL;
struct echo_object *eco;
int rc;
- ENTRY;
if ((oa->o_valid & OBD_MD_FLID) == 0 || ostid_id(&oa->o_oi) == 0) {
/* disallow use of object id 0 */
CERROR ("No valid oid\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = echo_alloc_memmd(ed, &lsm);
if (rc < 0)
- RETURN(rc);
+ return rc;
lsm->lsm_oi = oa->o_oi;
if (!(oa->o_valid & OBD_MD_FLGROUP))
@@ -2357,7 +2315,7 @@ static int echo_get_object(struct echo_object **ecop, struct echo_device *ed,
rc = PTR_ERR(eco);
if (lsm)
echo_free_memmd(ed, &lsm);
- RETURN(rc);
+ return rc;
}
static void echo_put_object(struct echo_object *eco)
@@ -2476,7 +2434,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
int verify;
int gfp_mask;
int brw_flags = 0;
- ENTRY;
verify = (ostid_id(&oa->o_oi) != ECHO_PERSISTENT_OBJID &&
(oa->o_valid & OBD_MD_FLFLAGS) != 0 &&
@@ -2490,7 +2447,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
if (count <= 0 ||
(count & (~CFS_PAGE_MASK)) != 0)
- RETURN(-EINVAL);
+ return -EINVAL;
/* XXX think again with misaligned I/O */
npages = count >> PAGE_CACHE_SHIFT;
@@ -2500,12 +2457,12 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
OBD_ALLOC(pga, npages * sizeof(*pga));
if (pga == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC(pages, npages * sizeof(*pages));
if (pages == NULL) {
OBD_FREE(pga, npages * sizeof(*pga));
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
for (i = 0, pgp = pga, off = offset;
@@ -2554,7 +2511,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
}
OBD_FREE(pga, npages * sizeof(*pga));
OBD_FREE(pages, npages * sizeof(*pages));
- RETURN(rc);
+ return rc;
}
static int echo_client_prep_commit(const struct lu_env *env,
@@ -2572,11 +2529,9 @@ static int echo_client_prep_commit(const struct lu_env *env,
obd_size npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- ENTRY;
-
if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0 ||
(lsm != NULL && ostid_id(&lsm->lsm_oi) != ostid_id(&oa->o_oi)))
- RETURN(-EINVAL);
+ return -EINVAL;
npages = batch >> PAGE_CACHE_SHIFT;
tot_pages = count >> PAGE_CACHE_SHIFT;
@@ -2661,7 +2616,7 @@ out:
OBD_FREE(lnb, npages * sizeof(struct niobuf_local));
if (rnb)
OBD_FREE(rnb, npages * sizeof(struct niobuf_remote));
- RETURN(ret);
+ return ret;
}
static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
@@ -2677,13 +2632,12 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
int rc;
int async = 1;
long test_mode;
- ENTRY;
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
rc = echo_get_object(&eco, ed, oa);
if (rc)
- RETURN(rc);
+ return rc;
oa->o_valid &= ~OBD_MD_FLHANDLE;
@@ -2719,7 +2673,7 @@ static int echo_client_brw_ioctl(const struct lu_env *env, int rw,
rc = -EINVAL;
}
echo_put_object(eco);
- RETURN(rc);
+ return rc;
}
static int
@@ -2731,21 +2685,20 @@ echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
struct echo_object *eco;
obd_off end;
int rc;
- ENTRY;
if (ed->ed_next == NULL)
- RETURN(-EOPNOTSUPP);
+ return -EOPNOTSUPP;
if (!(mode == LCK_PR || mode == LCK_PW))
- RETURN(-EINVAL);
+ return -EINVAL;
if ((offset & (~CFS_PAGE_MASK)) != 0 ||
(nob & (~CFS_PAGE_MASK)) != 0)
- RETURN(-EINVAL);
+ return -EINVAL;
rc = echo_get_object (&eco, ed, oa);
if (rc != 0)
- RETURN(rc);
+ return rc;
end = (nob == 0) ? ((obd_off) -1) : (offset + nob - 1);
rc = cl_echo_enqueue(eco, offset, end, mode, &ulh->cookie);
@@ -2754,7 +2707,7 @@ echo_client_enqueue(struct obd_export *exp, struct obdo *oa,
CDEBUG(D_INFO, "Cookie is "LPX64"\n", ulh->cookie);
}
echo_put_object(eco);
- RETURN(rc);
+ return rc;
}
static int
@@ -2787,7 +2740,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
int rw = OBD_BRW_READ;
int rc = 0;
int i;
- ENTRY;
memset(&dummy_oti, 0, sizeof(dummy_oti));
@@ -2800,11 +2752,11 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
/* This FID is unpacked just for validation at this point */
rc = ostid_to_fid(&fid, &oa->o_oi, 0);
if (rc < 0)
- RETURN(rc);
+ return rc;
OBD_ALLOC_PTR(env);
if (env == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = lu_env_init(env, LCT_DT_THREAD);
if (rc)
@@ -2980,7 +2932,6 @@ echo_client_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
GOTO (out, rc = -ENOTTY);
}
- EXIT;
out:
lu_env_fini(env);
OBD_FREE_PTR(env);
@@ -3004,18 +2955,17 @@ static int echo_client_setup(const struct lu_env *env,
struct obd_uuid echo_uuid = { "ECHO_UUID" };
struct obd_connect_data *ocd = NULL;
int rc;
- ENTRY;
if (lcfg->lcfg_bufcount < 2 || LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
CERROR("requires a TARGET OBD name\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
tgt = class_name2obd(lustre_cfg_string(lcfg, 1));
if (!tgt || !tgt->obd_attached || !tgt->obd_set_up) {
CERROR("device not attached or not set up (%s)\n",
lustre_cfg_string(lcfg, 1));
- RETURN(-EINVAL);
+ return -EINVAL;
}
spin_lock_init(&ec->ec_lock);
@@ -3027,7 +2977,7 @@ static int echo_client_setup(const struct lu_env *env,
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
lu_context_tags_update(ECHO_MD_CTX_TAG);
lu_session_tags_update(ECHO_MD_SES_TAG);
- RETURN(0);
+ return 0;
}
OBD_ALLOC(ocd, sizeof(*ocd));
@@ -3062,7 +3012,7 @@ static int echo_client_setup(const struct lu_env *env,
return (rc);
}
- RETURN(rc);
+ return rc;
}
static int echo_client_cleanup(struct obd_device *obddev)
@@ -3070,21 +3020,20 @@ static int echo_client_cleanup(struct obd_device *obddev)
struct echo_device *ed = obd2echo_dev(obddev);
struct echo_client_obd *ec = &obddev->u.echo_client;
int rc;
- ENTRY;
/*Do nothing for Metadata echo client*/
if (ed == NULL )
- RETURN(0);
+ return 0;
if (ed->ed_next_ismd) {
lu_context_tags_clear(ECHO_MD_CTX_TAG);
lu_session_tags_clear(ECHO_MD_SES_TAG);
- RETURN(0);
+ return 0;
}
if (!list_empty(&obddev->obd_exports)) {
CERROR("still has clients!\n");
- RETURN(-EBUSY);
+ return -EBUSY;
}
LASSERT(atomic_read(&ec->ec_exp->exp_refcount) > 0);
@@ -3092,7 +3041,7 @@ static int echo_client_cleanup(struct obd_device *obddev)
if (rc != 0)
CERROR("fail to disconnect device: %d\n", rc);
- RETURN(rc);
+ return rc;
}
static int echo_client_connect(const struct lu_env *env,
@@ -3103,13 +3052,12 @@ static int echo_client_connect(const struct lu_env *env,
int rc;
struct lustre_handle conn = { 0 };
- ENTRY;
rc = class_connect(&conn, src, cluuid);
if (rc == 0) {
*exp = class_conn2export(&conn);
}
- RETURN (rc);
+ return rc;
}
static int echo_client_disconnect(struct obd_export *exp)
@@ -3120,7 +3068,6 @@ static int echo_client_disconnect(struct obd_export *exp)
struct ec_lock *ecl;
#endif
int rc;
- ENTRY;
if (exp == NULL)
GOTO(out, rc = -EINVAL);
@@ -3195,7 +3142,6 @@ static int __init obdecho_init(void)
struct lprocfs_static_vars lvars;
int rc;
- ENTRY;
LCONSOLE_INFO("Echo OBD driver; http://www.lustre.org/\n");
LASSERT(PAGE_CACHE_SIZE % OBD_ECHO_BLOCK_SIZE == 0);
@@ -3205,7 +3151,7 @@ static int __init obdecho_init(void)
rc = echo_client_init();
- RETURN(rc);
+ return rc;
}
static void /*__exit*/ obdecho_exit(void)
@@ -3217,7 +3163,9 @@ static void /*__exit*/ obdecho_exit(void)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Testing Echo OBD driver");
MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
-cfs_module(obdecho, LUSTRE_VERSION_STRING, obdecho_init, obdecho_exit);
+module_init(obdecho_init);
+module_exit(obdecho_exit);
/** @} echo_client */
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index 198cf3ba137..90d24d8dea2 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -35,7 +35,6 @@
*/
#define DEBUG_SUBSYSTEM S_CLASS
-#include <linux/version.h>
#include <asm/statfs.h>
#include <obd_cksum.h>
#include <obd_class.h>
@@ -146,7 +145,7 @@ static ssize_t osc_max_dirty_mb_seq_write(struct file *file, const char *buffer,
if (pages_number <= 0 ||
pages_number > OSC_MAX_DIRTY_MB_MAX << (20 - PAGE_CACHE_SHIFT) ||
- pages_number > num_physpages / 4) /* 1/4 of RAM */
+ pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
client_obd_list_lock(&cli->cl_loi_list_lock);
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 0a0ec6f7d2d..00295da4ab3 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -540,7 +540,6 @@ int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
{
struct osc_object *obj = ext->oe_obj;
int rc = 0;
- ENTRY;
LASSERT(atomic_read(&ext->oe_users) > 0);
LASSERT(sanity_check(ext) == 0);
@@ -572,7 +571,7 @@ int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
osc_io_unplug_async(env, osc_cli(obj), obj);
}
osc_extent_put(env, ext);
- RETURN(rc);
+ return rc;
}
static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
@@ -602,11 +601,10 @@ struct osc_extent *osc_extent_find(const struct lu_env *env,
int ppc_bits; /* pages per chunk bits */
int chunk_mask;
int rc;
- ENTRY;
cur = osc_extent_alloc(obj);
if (cur == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
LASSERT(lock != NULL);
@@ -783,7 +781,6 @@ restart:
goto restart;
}
- EXIT;
out:
osc_extent_put(env, cur);
@@ -805,7 +802,6 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
__u64 last_off = 0;
int last_count = -1;
- ENTRY;
OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
@@ -846,7 +842,7 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
osc_extent_remove(ext);
/* put the refcount for RPC */
osc_extent_put(env, ext);
- RETURN(0);
+ return 0;
}
static int extent_wait_cb(struct osc_extent *ext, int state)
@@ -870,7 +866,6 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
LWI_ON_SIGNAL_NOOP, NULL);
int rc = 0;
- ENTRY;
osc_object_lock(obj);
LASSERT(sanity_check_nolock(ext) == 0);
@@ -902,7 +897,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
}
if (rc == 0 && ext->oe_rc < 0)
rc = ext->oe_rc;
- RETURN(rc);
+ return rc;
}
/**
@@ -925,7 +920,6 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
int grants = 0;
int nr_pages = 0;
int rc = 0;
- ENTRY;
LASSERT(sanity_check(ext) == 0);
LASSERT(ext->oe_state == OES_TRUNC);
@@ -1021,7 +1015,7 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
out:
cl_io_fini(env, io);
cl_env_nested_put(&nest, env);
- RETURN(rc);
+ return rc;
}
/**
@@ -1036,7 +1030,6 @@ static int osc_extent_make_ready(const struct lu_env *env,
struct osc_object *obj = ext->oe_obj;
int page_count = 0;
int rc;
- ENTRY;
/* we're going to grab page lock, so object lock must not be taken. */
LASSERT(sanity_check(ext) == 0);
@@ -1096,7 +1089,7 @@ static int osc_extent_make_ready(const struct lu_env *env,
/* get a refcount for RPC. */
osc_extent_get(ext);
- RETURN(0);
+ return 0;
}
/**
@@ -1115,7 +1108,6 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
pgoff_t end_index;
int chunksize = 1 << cli->cl_chunkbits;
int rc = 0;
- ENTRY;
LASSERT(ext->oe_max_end >= index && ext->oe_start <= index);
osc_object_lock(obj);
@@ -1143,11 +1135,10 @@ static int osc_extent_expand(struct osc_extent *ext, pgoff_t index, int *grants)
LASSERT(*grants >= 0);
EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
"overlapped after expanding for %lu.\n", index);
- EXIT;
out:
osc_object_unlock(obj);
- RETURN(rc);
+ return rc;
}
static void osc_extent_tree_dump0(int level, struct osc_object *obj,
@@ -1207,11 +1198,10 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
- ENTRY;
result = cl_page_make_ready(env, page, CRT_WRITE);
if (result == 0)
opg->ops_submit_time = cfs_time_current();
- RETURN(result);
+ return result;
}
static int osc_refresh_count(const struct lu_env *env,
@@ -1255,8 +1245,6 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
enum cl_req_type crt;
int srvlock;
- ENTRY;
-
cmd &= ~OBD_BRW_NOQUOTA;
LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
@@ -1305,7 +1293,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
cl_page_completion(env, page, crt, rc);
- RETURN(0);
+ return 0;
}
#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
@@ -1338,11 +1326,8 @@ static void osc_consume_write_grant(struct client_obd *cli,
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- ENTRY;
-
LASSERT(spin_is_locked(&cli->cl_loi_list_lock.lock));
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
- EXIT;
return;
}
@@ -1354,7 +1339,6 @@ static void osc_release_write_grant(struct client_obd *cli,
atomic_dec(&obd_dirty_transit_pages);
cli->cl_dirty_transit -= PAGE_CACHE_SIZE;
}
- EXIT;
}
/**
@@ -1503,7 +1487,6 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_cache_waiter ocw;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
int rc = -EDQUOT;
- ENTRY;
OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
@@ -1557,11 +1540,10 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
if (osc_enter_cache_try(cli, oap, bytes, 0))
GOTO(out, rc = 0);
}
- EXIT;
out:
client_obd_list_unlock(&cli->cl_loi_list_lock);
OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
- RETURN(rc);
+ return rc;
}
/* caller must hold loi_list_lock */
@@ -1570,7 +1552,6 @@ void osc_wake_cache_waiters(struct client_obd *cli)
struct list_head *l, *tmp;
struct osc_cache_waiter *ocw;
- ENTRY;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
list_del_init(&ocw->ocw_entry);
@@ -1596,8 +1577,6 @@ wakeup:
wake_up(&ocw->ocw_waitq);
}
-
- EXIT;
}
static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
@@ -1613,7 +1592,6 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
int cmd)
{
int invalid_import = 0;
- ENTRY;
/* if we have an invalid import we want to drain the queued pages
* by forcing them through rpcs that immediately fail and complete
@@ -1624,42 +1602,42 @@ static int osc_makes_rpc(struct client_obd *cli, struct osc_object *osc,
if (cmd & OBD_BRW_WRITE) {
if (atomic_read(&osc->oo_nr_writes) == 0)
- RETURN(0);
+ return 0;
if (invalid_import) {
CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- RETURN(1);
+ return 1;
}
if (!list_empty(&osc->oo_hp_exts)) {
CDEBUG(D_CACHE, "high prio request forcing RPC\n");
- RETURN(1);
+ return 1;
}
if (!list_empty(&osc->oo_urgent_exts)) {
CDEBUG(D_CACHE, "urgent request forcing RPC\n");
- RETURN(1);
+ return 1;
}
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
* create more pages to coalesce with what's waiting.. */
if (!list_empty(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
- RETURN(1);
+ return 1;
}
if (atomic_read(&osc->oo_nr_writes) >=
cli->cl_max_pages_per_rpc)
- RETURN(1);
+ return 1;
} else {
if (atomic_read(&osc->oo_nr_reads) == 0)
- RETURN(0);
+ return 0;
if (invalid_import) {
CDEBUG(D_CACHE, "invalid import forcing RPC\n");
- RETURN(1);
+ return 1;
}
/* all read are urgent. */
if (!list_empty(&osc->oo_reading_exts))
- RETURN(1);
+ return 1;
}
- RETURN(0);
+ return 0;
}
static void osc_update_pending(struct osc_object *obj, int cmd, int delta)
@@ -1757,7 +1735,6 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
struct lov_oinfo *loi = osc->oo_oinfo;
__u64 xid = 0;
- ENTRY;
if (oap->oap_request != NULL) {
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
@@ -1781,8 +1758,6 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
if (rc)
CERROR("completion on oap %p obj %p returns %d.\n",
oap, osc, rc);
-
- EXIT;
}
/**
@@ -1795,14 +1770,13 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
int *pc, unsigned int *max_pages)
{
struct osc_extent *tmp;
- ENTRY;
EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
ext);
*max_pages = max(ext->oe_mppr, *max_pages);
if (*pc + ext->oe_nr_pages > *max_pages)
- RETURN(0);
+ return 0;
list_for_each_entry(tmp, rpclist, oe_link) {
EASSERT(tmp->oe_owner == current, tmp);
@@ -1815,7 +1789,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
if (tmp->oe_srvlock != ext->oe_srvlock ||
!tmp->oe_grants != !ext->oe_grants)
- RETURN(0);
+ return 0;
/* remove break for strict check */
break;
@@ -1824,7 +1798,7 @@ static int try_to_add_extent_for_io(struct client_obd *cli,
*pc += ext->oe_nr_pages;
list_move_tail(&ext->oe_link, rpclist);
ext->oe_owner = current;
- RETURN(1);
+ return 1;
}
/**
@@ -1913,7 +1887,6 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
obd_count page_count = 0;
int srvlock = 0;
int rc = 0;
- ENTRY;
LASSERT(osc_object_is_locked(osc));
@@ -1921,7 +1894,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
LASSERT(equi(page_count == 0, list_empty(&rpclist)));
if (list_empty(&rpclist))
- RETURN(0);
+ return 0;
osc_update_pending(osc, OBD_BRW_WRITE, -page_count);
@@ -1962,7 +1935,7 @@ osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
}
osc_object_lock(osc);
- RETURN(rc);
+ return rc;
}
/**
@@ -1985,7 +1958,6 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
int page_count = 0;
unsigned int max_pages = cli->cl_max_pages_per_rpc;
int rc = 0;
- ENTRY;
LASSERT(osc_object_is_locked(osc));
list_for_each_entry_safe(ext, next,
@@ -2010,7 +1982,7 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
osc_object_lock(osc);
}
- RETURN(rc);
+ return rc;
}
#define list_to_obj(list, item) ({ \
@@ -2023,15 +1995,13 @@ osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
* we could be sending. These lists are maintained by osc_makes_rpc(). */
static struct osc_object *osc_next_obj(struct client_obd *cli)
{
- ENTRY;
-
/* First return objects that have blocked locks so that they
* will be flushed quickly and other clients can get the lock,
* then objects which have pages ready to be stuffed into RPCs */
if (!list_empty(&cli->cl_loi_hp_ready_list))
- RETURN(list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item));
+ return list_to_obj(&cli->cl_loi_hp_ready_list, hp_ready_item);
if (!list_empty(&cli->cl_loi_ready_list))
- RETURN(list_to_obj(&cli->cl_loi_ready_list, ready_item));
+ return list_to_obj(&cli->cl_loi_ready_list, ready_item);
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
@@ -2039,19 +2009,17 @@ static struct osc_object *osc_next_obj(struct client_obd *cli)
* they don't pass the nr_pending/object threshhold */
if (!list_empty(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
- RETURN(list_to_obj(&cli->cl_loi_write_list, write_item));
+ return list_to_obj(&cli->cl_loi_write_list, write_item);
/* then return all queued objects when we have an invalid import
* so that they get flushed */
if (cli->cl_import == NULL || cli->cl_import->imp_invalid) {
if (!list_empty(&cli->cl_loi_write_list))
- RETURN(list_to_obj(&cli->cl_loi_write_list,
- write_item));
+ return list_to_obj(&cli->cl_loi_write_list, write_item);
if (!list_empty(&cli->cl_loi_read_list))
- RETURN(list_to_obj(&cli->cl_loi_read_list,
- read_item));
+ return list_to_obj(&cli->cl_loi_read_list, read_item);
}
- RETURN(NULL);
+ return NULL;
}
/* called with the loi list lock held */
@@ -2060,11 +2028,10 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
{
struct osc_object *osc;
int rc = 0;
- ENTRY;
while ((osc = osc_next_obj(cli)) != NULL) {
struct cl_object *obj = osc2cl(osc);
- struct lu_ref_link *link;
+ struct lu_ref_link link;
OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
@@ -2075,7 +2042,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
cl_object_get(obj);
client_obd_list_unlock(&cli->cl_loi_list_lock);
- link = lu_object_ref_add(&obj->co_lu, "check", current);
+ lu_object_ref_add_at(&obj->co_lu, &link, "check",
+ current);
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
@@ -2116,7 +2084,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
osc_object_unlock(osc);
osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, link, "check", current);
+ lu_object_ref_del_at(&obj->co_lu, &link, "check",
+ current);
cl_object_put(env, obj);
client_obd_list_lock(&cli->cl_loi_list_lock);
@@ -2165,7 +2134,6 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
{
struct obd_export *exp = osc_export(osc);
struct osc_async_page *oap = &ops->ops_oap;
- ENTRY;
if (!page)
return cfs_size_round(sizeof(*oap));
@@ -2187,7 +2155,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
spin_lock_init(&oap->oap_lock);
CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
oap, page, oap->oap_obj_off);
- RETURN(0);
+ return 0;
}
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
@@ -2204,17 +2172,16 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
int cmd = OBD_BRW_WRITE;
int need_release = 0;
int rc = 0;
- ENTRY;
if (oap->oap_magic != OAP_MAGIC)
- RETURN(-EINVAL);
+ return -EINVAL;
if (cli->cl_import == NULL || cli->cl_import->imp_invalid)
- RETURN(-EIO);
+ return -EIO;
if (!list_empty(&oap->oap_pending_item) ||
!list_empty(&oap->oap_rpc_item))
- RETURN(-EBUSY);
+ return -EBUSY;
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
@@ -2242,7 +2209,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
rc = -EDQUOT;
if (rc)
- RETURN(rc);
+ return rc;
}
oap->oap_cmd = cmd;
@@ -2350,7 +2317,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
osc_object_unlock(osc);
}
- RETURN(rc);
+ return rc;
}
int osc_teardown_async_page(const struct lu_env *env,
@@ -2359,7 +2326,6 @@ int osc_teardown_async_page(const struct lu_env *env,
struct osc_async_page *oap = &ops->ops_oap;
struct osc_extent *ext = NULL;
int rc = 0;
- ENTRY;
LASSERT(oap->oap_magic == OAP_MAGIC);
@@ -2384,7 +2350,7 @@ int osc_teardown_async_page(const struct lu_env *env,
osc_object_unlock(obj);
if (ext != NULL)
osc_extent_put(env, ext);
- RETURN(rc);
+ return rc;
}
/**
@@ -2404,7 +2370,6 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_async_page *oap = &ops->ops_oap;
bool unplug = false;
int rc = 0;
- ENTRY;
osc_object_lock(obj);
ext = osc_extent_lookup(obj, index);
@@ -2454,7 +2419,6 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
unplug = true;
}
rc = 0;
- EXIT;
out:
osc_object_unlock(obj);
@@ -2482,7 +2446,6 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
pgoff_t index = oap2cl_page(oap)->cp_index;
int rc = -EBUSY;
int cmd;
- ENTRY;
LASSERT(!oap->oap_interrupted);
oap->oap_interrupted = 1;
@@ -2526,7 +2489,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
}
osc_list_maint(cli, obj);
- RETURN(rc);
+ return rc;
}
int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
@@ -2539,7 +2502,6 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
int mppr = cli->cl_max_pages_per_rpc;
pgoff_t start = CL_PAGE_EOF;
pgoff_t end = 0;
- ENTRY;
list_for_each_entry(oap, list, oap_pending_item) {
struct cl_page *cp = oap2cl_page(oap);
@@ -2557,7 +2519,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
list_del_init(&oap->oap_pending_item);
osc_ap_completion(env, cli, oap, 0, -ENOMEM);
}
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
ext->oe_rw = !!(cmd & OBD_BRW_READ);
@@ -2583,7 +2545,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
osc_object_unlock(obj);
osc_io_unplug(env, cli, obj, PDL_POLICY_ROUND);
- RETURN(0);
+ return 0;
}
/**
@@ -2599,7 +2561,6 @@ int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
LIST_HEAD(list);
int result = 0;
bool partial;
- ENTRY;
/* pages with index greater or equal to index will be truncated. */
index = cl_index(osc2cl(obj), size);
@@ -2705,7 +2666,7 @@ again:
waiting = NULL;
goto again;
}
- RETURN(result);
+ return result;
}
/**
@@ -2756,7 +2717,6 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
struct osc_extent *ext;
pgoff_t index = start;
int result = 0;
- ENTRY;
again:
osc_object_lock(obj);
@@ -2794,7 +2754,7 @@ again:
osc_object_unlock(obj);
OSC_IO_DEBUG(obj, "sync file range.\n");
- RETURN(result);
+ return result;
}
/**
@@ -2813,7 +2773,6 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
LIST_HEAD(discard_list);
bool unplug = false;
int result = 0;
- ENTRY;
osc_object_lock(obj);
ext = osc_extent_search(obj, start);
@@ -2910,7 +2869,7 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
}
OSC_IO_DEBUG(obj, "cache page out.\n");
- RETURN(result);
+ return result;
}
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index 158e8fff838..a3aa9b6596e 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -374,7 +374,7 @@ struct osc_page {
/**
* Thread that submitted this page for transfer. For debugging.
*/
- task_t *ops_submitter;
+ struct task_struct *ops_submitter;
/**
* Submit time - the time when the page is starting RPC. For debugging.
*/
@@ -660,7 +660,7 @@ struct osc_extent {
/** lock covering this extent */
struct cl_lock *oe_osclock;
/** terminator of this extent. Must be true if this extent is in IO. */
- task_t *oe_owner;
+ struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
* this value can be known by outside world. */
int oe_rc;
diff --git a/drivers/staging/lustre/lustre/osc/osc_dev.c b/drivers/staging/lustre/lustre/osc/osc_dev.c
index 4208ddfd73b..35f25786763 100644
--- a/drivers/staging/lustre/lustre/osc/osc_dev.c
+++ b/drivers/staging/lustre/lustre/osc/osc_dev.c
@@ -171,8 +171,7 @@ LU_TYPE_INIT_FINI(osc, &osc_key, &osc_session_key);
static int osc_cl_process_config(const struct lu_env *env,
struct lu_device *d, struct lustre_cfg *cfg)
{
- ENTRY;
- RETURN(osc_process_config_base(d->ld_obd, cfg));
+ return osc_process_config_base(d->ld_obd, cfg);
}
static const struct lu_device_operations osc_lu_ops = {
@@ -188,7 +187,7 @@ static const struct cl_device_operations osc_cl_ops = {
static int osc_device_init(const struct lu_env *env, struct lu_device *d,
const char *name, struct lu_device *next)
{
- RETURN(0);
+ return 0;
}
static struct lu_device *osc_device_fini(const struct lu_env *env,
@@ -218,7 +217,7 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
OBD_ALLOC_PTR(od);
if (od == NULL)
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
cl_device_init(&od->od_cl, t);
d = osc2lu_dev(od);
@@ -231,10 +230,10 @@ static struct lu_device *osc_device_alloc(const struct lu_env *env,
rc = osc_setup(obd, cfg);
if (rc) {
osc_device_free(env, d);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
od->od_exp = obd->obd_self_export;
- RETURN(d);
+ return d;
}
static const struct lu_device_type_operations osc_device_type_ops = {
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 1b277045b3e..3aeaf845cf2 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -261,7 +261,6 @@ static int osc_io_prepare_write(const struct lu_env *env,
struct obd_import *imp = class_exp2cliimp(dev->od_exp);
struct osc_io *oio = cl2osc_io(env, ios);
int result = 0;
- ENTRY;
/*
* This implements OBD_BRW_CHECK logic from old client.
@@ -276,7 +275,7 @@ static int osc_io_prepare_write(const struct lu_env *env,
* [from, to) bytes of this page to OST. -jay */
cl_page_export(env, slice->cpl_page, 1);
- RETURN(result);
+ return result;
}
static int osc_io_commit_write(const struct lu_env *env,
@@ -288,7 +287,6 @@ static int osc_io_commit_write(const struct lu_env *env,
struct osc_page *opg = cl2osc_page(slice);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
struct osc_async_page *oap = &opg->ops_oap;
- ENTRY;
LASSERT(to > 0);
/*
@@ -306,7 +304,7 @@ static int osc_io_commit_write(const struct lu_env *env,
/* see osc_io_prepare_write() for lockless io handling. */
cl_page_clip(env, slice->cpl_page, from, to);
- RETURN(0);
+ return 0;
}
static int osc_io_fault_start(const struct lu_env *env,
@@ -315,8 +313,6 @@ static int osc_io_fault_start(const struct lu_env *env,
struct cl_io *io;
struct cl_fault_io *fio;
- ENTRY;
-
io = ios->cis_io;
fio = &io->u.ci_fault;
CDEBUG(D_INFO, "%lu %d %d\n",
@@ -329,7 +325,7 @@ static int osc_io_fault_start(const struct lu_env *env,
if (fio->ft_writable)
osc_page_touch_at(env, ios->cis_obj,
fio->ft_index, fio->ft_nob);
- RETURN(0);
+ return 0;
}
static int osc_async_upcall(void *a, int rc)
@@ -517,19 +513,18 @@ static int osc_io_read_start(const struct lu_env *env,
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result = 0;
- ENTRY;
if (oio->oi_lockless == 0) {
cl_object_attr_lock(obj);
result = cl_object_attr_get(env, obj, attr);
if (result == 0) {
- attr->cat_atime = LTIME_S(CFS_CURRENT_TIME);
+ attr->cat_atime = LTIME_S(CURRENT_TIME);
result = cl_object_attr_set(env, obj, attr,
CAT_ATIME);
}
cl_object_attr_unlock(obj);
}
- RETURN(result);
+ return result;
}
static int osc_io_write_start(const struct lu_env *env,
@@ -539,7 +534,6 @@ static int osc_io_write_start(const struct lu_env *env,
struct cl_object *obj = slice->cis_obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
int result = 0;
- ENTRY;
if (oio->oi_lockless == 0) {
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_DELAY_SETTIME, 1);
@@ -547,13 +541,13 @@ static int osc_io_write_start(const struct lu_env *env,
result = cl_object_attr_get(env, obj, attr);
if (result == 0) {
attr->cat_mtime = attr->cat_ctime =
- LTIME_S(CFS_CURRENT_TIME);
+ LTIME_S(CURRENT_TIME);
result = cl_object_attr_set(env, obj, attr,
CAT_MTIME | CAT_CTIME);
}
cl_object_attr_unlock(obj);
}
- RETURN(result);
+ return result;
}
static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
@@ -565,7 +559,6 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
struct lov_oinfo *loi = obj->oo_oinfo;
struct osc_async_cbargs *cbargs = &oio->oi_cbarg;
int rc = 0;
- ENTRY;
memset(oa, 0, sizeof(*oa));
oa->o_oi = loi->loi_oi;
@@ -585,7 +578,7 @@ static int osc_fsync_ost(const struct lu_env *env, struct osc_object *obj,
rc = osc_sync_base(osc_export(obj), oinfo, osc_async_upcall, cbargs,
PTLRPCD_SET);
- RETURN(rc);
+ return rc;
}
static int osc_io_fsync_start(const struct lu_env *env,
@@ -598,7 +591,6 @@ static int osc_io_fsync_start(const struct lu_env *env,
pgoff_t start = cl_index(obj, fio->fi_start);
pgoff_t end = cl_index(obj, fio->fi_end);
int result = 0;
- ENTRY;
if (fio->fi_end == OBD_OBJECT_EOF)
end = CL_PAGE_EOF;
@@ -625,7 +617,7 @@ static int osc_io_fsync_start(const struct lu_env *env,
result = rc;
}
- RETURN(result);
+ return result;
}
static void osc_io_fsync_end(const struct lu_env *env,
@@ -785,7 +777,7 @@ static void osc_req_attr_set(const struct lu_env *env,
"no cover page!\n");
CL_PAGE_DEBUG(D_ERROR, env, apage,
"dump uncover page!\n");
- libcfs_debug_dumpstack(NULL);
+ dump_stack();
LBUG();
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 640bc3d3470..5d7bdbfc871 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -89,35 +89,49 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
*/
static int osc_lock_invariant(struct osc_lock *ols)
{
- struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
- int handle_used = lustre_handle_is_used(&ols->ols_handle);
-
- return
- ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && ols->ols_lock == NULL) ||
- (ergo(olock != NULL, handle_used) &&
- ergo(olock != NULL,
- olock->l_handle.h_cookie == ols->ols_handle.cookie) &&
- /*
- * Check that ->ols_handle and ->ols_lock are consistent, but
- * take into account that they are set at the different time.
- */
- ergo(handle_used,
- ergo(lock != NULL && olock != NULL, lock == olock) &&
- ergo(lock == NULL, olock == NULL)) &&
- ergo(ols->ols_state == OLS_CANCELLED,
- olock == NULL && !handle_used) &&
- /*
- * DLM lock is destroyed only after we have seen cancellation
- * ast.
- */
- ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
- !olock->l_destroyed) &&
- ergo(ols->ols_state == OLS_GRANTED,
- olock != NULL &&
- olock->l_req_mode == olock->l_granted_mode &&
- ols->ols_hold));
+ struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
+ struct ldlm_lock *olock = ols->ols_lock;
+ int handle_used = lustre_handle_is_used(&ols->ols_handle);
+
+ if (ergo(osc_lock_is_lockless(ols),
+ ols->ols_locklessable && ols->ols_lock == NULL))
+ return 1;
+
+ /*
+ * If all the following "ergo"s are true, return 1, otherwise 0
+ */
+ if (! ergo(olock != NULL, handle_used))
+ return 0;
+
+ if (! ergo(olock != NULL,
+ olock->l_handle.h_cookie == ols->ols_handle.cookie))
+ return 0;
+
+ if (! ergo(handle_used,
+ ergo(lock != NULL && olock != NULL, lock == olock) &&
+ ergo(lock == NULL, olock == NULL)))
+ return 0;
+ /*
+ * Check that ->ols_handle and ->ols_lock are consistent, but
+ * take into account that they are set at the different time.
+ */
+ if (! ergo(ols->ols_state == OLS_CANCELLED,
+ olock == NULL && !handle_used))
+ return 0;
+ /*
+ * DLM lock is destroyed only after we have seen cancellation
+ * ast.
+ */
+ if (! ergo(olock != NULL && ols->ols_state < OLS_CANCELLED,
+ ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ return 0;
+
+ if (! ergo(ols->ols_state == OLS_GRANTED,
+ olock != NULL &&
+ olock->l_req_mode == olock->l_granted_mode &&
+ ols->ols_hold))
+ return 0;
+ return 1;
}
/*****************************************************************************
@@ -261,7 +275,7 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
if (enqflags & CEF_ASYNC)
result |= LDLM_FL_HAS_INTENT;
if (enqflags & CEF_DISCARD_DATA)
- result |= LDLM_AST_DISCARD_DATA;
+ result |= LDLM_FL_AST_DISCARD_DATA;
return result;
}
@@ -329,10 +343,8 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
struct cl_attr *attr;
unsigned valid;
- ENTRY;
-
if (!(olck->ols_flags & LDLM_FL_LVB_READY))
- RETURN_EXIT;
+ return;
lvb = &olck->ols_lvb;
obj = olck->ols_cl.cls_obj;
@@ -378,8 +390,6 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
cl_object_attr_set(env, obj, attr, valid);
cl_object_attr_unlock(obj);
-
- EXIT;
}
/**
@@ -398,7 +408,6 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
- ENTRY;
if (olck->ols_state < OLS_GRANTED) {
lock = olck->ols_cl.cls_lock;
ext = &dlmlock->l_policy_data.l_extent;
@@ -428,7 +437,6 @@ static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
LINVRNT(osc_lock_invariant(olck));
lock_res_and_lock(dlmlock);
}
- EXIT;
}
static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
@@ -436,8 +444,6 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
{
struct ldlm_lock *dlmlock;
- ENTRY;
-
dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
LASSERT(dlmlock != NULL);
@@ -483,7 +489,6 @@ static int osc_lock_upcall(void *cookie, int errcode)
struct lu_env *env;
struct cl_env_nest nest;
- ENTRY;
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
int rc;
@@ -575,7 +580,7 @@ static int osc_lock_upcall(void *cookie, int errcode)
/* should never happen, similar to osc_ldlm_blocking_ast(). */
LBUG();
}
- RETURN(errcode);
+ return errcode;
}
/**
@@ -896,55 +901,6 @@ static unsigned long osc_lock_weigh(const struct lu_env *env,
return cl_object_header(slice->cls_obj)->coh_pages;
}
-/**
- * Get the weight of dlm lock for early cancellation.
- *
- * XXX: it should return the pages covered by this \a dlmlock.
- */
-static unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *lock;
- struct cl_lock *cll;
- unsigned long weight;
- ENTRY;
-
- might_sleep();
- /*
- * osc_ldlm_weigh_ast has a complex context since it might be called
- * because of lock canceling, or from user's input. We have to make
- * a new environment for it. Probably it is implementation safe to use
- * the upper context because cl_lock_put don't modify environment
- * variables. But in case of ..
- */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* Mostly because lack of memory, tend to eliminate this lock*/
- RETURN(0);
-
- LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
- lock = osc_ast_data_get(dlmlock);
- if (lock == NULL) {
- /* cl_lock was destroyed because of memory pressure.
- * It is much reasonable to assign this type of lock
- * a lower cost.
- */
- GOTO(out, weight = 0);
- }
-
- cll = lock->ols_cl.cls_lock;
- cl_lock_mutex_get(env, cll);
- weight = cl_lock_weigh(env, cll);
- cl_lock_mutex_put(env, cll);
- osc_ast_data_put(env, lock);
- EXIT;
-
-out:
- cl_env_nested_put(&nest, env);
- return weight;
-}
-
static void osc_lock_build_einfo(const struct lu_env *env,
const struct cl_lock *clock,
struct osc_lock *lock,
@@ -966,7 +922,6 @@ static void osc_lock_build_einfo(const struct lu_env *env,
einfo->ei_cb_bl = osc_ldlm_blocking_ast;
einfo->ei_cb_cp = osc_ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cb_wg = osc_ldlm_weigh_ast;
einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
}
@@ -1059,7 +1014,6 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
struct cl_lock *conflict= NULL;
int lockless = osc_lock_is_lockless(olck);
int rc = 0;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
@@ -1130,7 +1084,7 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
rc = CLO_WAIT;
}
}
- RETURN(rc);
+ return rc;
}
/**
@@ -1154,7 +1108,6 @@ static int osc_lock_enqueue(const struct lu_env *env,
struct osc_lock *ols = cl2osc_lock(slice);
struct cl_lock *lock = ols->ols_cl.cls_lock;
int result;
- ENTRY;
LASSERT(cl_lock_is_mutexed(lock));
LASSERTF(ols->ols_state == OLS_NEW,
@@ -1207,7 +1160,7 @@ static int osc_lock_enqueue(const struct lu_env *env,
}
}
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
- RETURN(result);
+ return result;
}
static int osc_lock_wait(const struct lu_env *env,
@@ -1298,7 +1251,6 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
struct cl_env_nest nest;
struct lu_env *env;
int result = 0;
- ENTRY;
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
@@ -1328,7 +1280,7 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
ols->ols_flush = 1;
LINVRNT(!osc_lock_has_pages(ols));
}
- RETURN(result);
+ return result;
}
/**
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index ca94e633138..9d34de873fa 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -191,10 +191,9 @@ static int osc_object_glimpse(const struct lu_env *env,
{
struct lov_oinfo *oinfo = cl2osc(obj)->oo_oinfo;
- ENTRY;
lvb->lvb_size = oinfo->loi_kms;
lvb->lvb_blocks = oinfo->loi_lvb.lvb_blocks;
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index baba959a745..d272322b29b 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -219,7 +219,6 @@ static int osc_page_cache_add(const struct lu_env *env,
struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
- ENTRY;
LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
@@ -240,7 +239,7 @@ static int osc_page_cache_add(const struct lu_env *env,
}
}
- RETURN(result);
+ return result;
}
void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
@@ -294,7 +293,6 @@ static int osc_page_is_under_lock(const struct lu_env *env,
struct cl_lock *lock;
int result = -ENODATA;
- ENTRY;
lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
NULL, 1, 0);
if (lock != NULL) {
@@ -302,7 +300,7 @@ static int osc_page_is_under_lock(const struct lu_env *env,
result = -EBUSY;
cl_lock_put(env, lock);
}
- RETURN(result);
+ return result;
}
static void osc_page_disown(const struct lu_env *env,
@@ -421,7 +419,6 @@ static void osc_page_delete(const struct lu_env *env,
LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
- ENTRY;
CDEBUG(D_TRACE, "%p\n", opg);
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
@@ -440,7 +437,6 @@ static void osc_page_delete(const struct lu_env *env,
spin_unlock(&obj->oo_seatbelt);
osc_lru_del(osc_cli(obj), opg, true);
- EXIT;
}
void osc_page_clip(const struct lu_env *env, const struct cl_page_slice *slice,
@@ -481,9 +477,9 @@ static int osc_page_flush(const struct lu_env *env,
{
struct osc_page *opg = cl2osc_page(slice);
int rc = 0;
- ENTRY;
+
rc = osc_flush_async_page(env, io, opg);
- RETURN(rc);
+ return rc;
}
static const struct cl_page_operations osc_page_ops = {
@@ -586,7 +582,7 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
* at any time.
*/
-static CFS_DECL_WAITQ(osc_lru_waitq);
+static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and.. */
@@ -666,15 +662,14 @@ int osc_lru_shrink(struct client_obd *cli, int target)
int count = 0;
int index = 0;
int rc = 0;
- ENTRY;
LASSERT(atomic_read(&cli->cl_lru_in_list) >= 0);
if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
- RETURN(0);
+ return 0;
env = cl_env_nested_get(&nest);
if (IS_ERR(env))
- RETURN(PTR_ERR(env));
+ return PTR_ERR(env);
pvec = osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
@@ -757,7 +752,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
cl_env_nested_put(&nest, env);
atomic_dec(&cli->cl_lru_shrinkers);
- RETURN(count > 0 ? count : rc);
+ return count > 0 ? count : rc;
}
static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
@@ -881,13 +876,12 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
struct client_obd *cli = osc_cli(obj);
int rc = 0;
- ENTRY;
if (cli->cl_cache == NULL) /* shall not be in LRU */
- RETURN(0);
+ return 0;
LASSERT(atomic_read(cli->cl_lru_left) >= 0);
- while (!cfs_atomic_add_unless(cli->cl_lru_left, -1, 0)) {
+ while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
int gen;
/* run out of LRU spaces, try to drop some by itself */
@@ -921,7 +915,7 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
rc = 0;
}
- RETURN(rc);
+ return rc;
}
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_quota.c b/drivers/staging/lustre/lustre/osc/osc_quota.c
index 69caab76ced..9720c0e865c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_quota.c
+++ b/drivers/staging/lustre/lustre/osc/osc_quota.c
@@ -45,7 +45,6 @@ static inline struct osc_quota_info *osc_oqi_alloc(obd_uid id)
int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
{
int type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
@@ -62,11 +61,11 @@ int osc_quota_chkdq(struct client_obd *cli, const unsigned int qid[])
* quota space on this OST */
CDEBUG(D_QUOTA, "chkdq found noquota for %s %d\n",
type == USRQUOTA ? "user" : "grout", qid[type]);
- RETURN(NO_QUOTA);
+ return NO_QUOTA;
}
}
- RETURN(QUOTA_OK);
+ return QUOTA_OK;
}
#define MD_QUOTA_FLAG(type) ((type == USRQUOTA) ? OBD_MD_FLUSRQUOTA \
@@ -79,10 +78,9 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
{
int type;
int rc = 0;
- ENTRY;
if ((valid & (OBD_MD_FLUSRQUOTA | OBD_MD_FLGRPQUOTA)) == 0)
- RETURN(0);
+ return 0;
for (type = 0; type < MAXQUOTAS; type++) {
struct osc_quota_info *oqi;
@@ -134,7 +132,7 @@ int osc_quota_setdq(struct client_obd *cli, const unsigned int qid[],
}
}
- RETURN(rc);
+ return rc;
}
/*
@@ -211,7 +209,6 @@ int osc_quota_setup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int i, type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++) {
cli->cl_quota_hash[type] = cfs_hash_create("QUOTA_HASH",
@@ -228,24 +225,23 @@ int osc_quota_setup(struct obd_device *obd)
}
if (type == MAXQUOTAS)
- RETURN(0);
+ return 0;
for (i = 0; i < type; i++)
cfs_hash_putref(cli->cl_quota_hash[i]);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
int osc_quota_cleanup(struct obd_device *obd)
{
struct client_obd *cli = &obd->u.cli;
int type;
- ENTRY;
for (type = 0; type < MAXQUOTAS; type++)
cfs_hash_putref(cli->cl_quota_hash[type]);
- RETURN(0);
+ return 0;
}
int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
@@ -254,13 +250,12 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
struct ptlrpc_request *req;
struct obd_quotactl *oqc;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACTL, LUSTRE_OST_VERSION,
OST_QUOTACTL);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*oqc = *oqctl;
@@ -282,7 +277,7 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
}
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
@@ -292,13 +287,12 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct ptlrpc_request *req;
struct obd_quotactl *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
&RQF_OST_QUOTACHECK, LUSTRE_OST_VERSION,
OST_QUOTACHECK);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
*body = *oqctl;
@@ -312,14 +306,13 @@ int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
if (rc)
cli->cl_qchk_stat = rc;
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
int rc;
- ENTRY;
qchk->obd_uuid = cli->cl_target_uuid;
memcpy(qchk->obd_type, LUSTRE_OST_NAME, strlen(LUSTRE_OST_NAME));
@@ -328,5 +321,5 @@ int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk)
/* the client is not the previous one */
if (rc == CL_NOT_QUOTACHECKED)
rc = -EINTR;
- RETURN(rc);
+ return rc;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 53d6a35c80b..ee6707a5ea9 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -69,30 +69,29 @@ static int osc_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
struct lov_stripe_md *lsm)
{
int lmm_size;
- ENTRY;
lmm_size = sizeof(**lmmp);
if (lmmp == NULL)
- RETURN(lmm_size);
+ return lmm_size;
if (*lmmp != NULL && lsm == NULL) {
OBD_FREE(*lmmp, lmm_size);
*lmmp = NULL;
- RETURN(0);
+ return 0;
} else if (unlikely(lsm != NULL && ostid_id(&lsm->lsm_oi) == 0)) {
- RETURN(-EBADF);
+ return -EBADF;
}
if (*lmmp == NULL) {
OBD_ALLOC(*lmmp, lmm_size);
if (*lmmp == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
if (lsm)
ostid_cpu_to_le(&lsm->lsm_oi, &(*lmmp)->lmm_oi);
- RETURN(lmm_size);
+ return lmm_size;
}
/* Unpack OSC object metadata from disk storage (LE byte order). */
@@ -101,47 +100,46 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
{
int lsm_size;
struct obd_import *imp = class_exp2cliimp(exp);
- ENTRY;
if (lmm != NULL) {
if (lmm_bytes < sizeof(*lmm)) {
CERROR("%s: lov_mds_md too small: %d, need %d\n",
exp->exp_obd->obd_name, lmm_bytes,
(int)sizeof(*lmm));
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* XXX LOV_MAGIC etc check? */
if (unlikely(ostid_id(&lmm->lmm_oi) == 0)) {
CERROR("%s: zero lmm_object_id: rc = %d\n",
exp->exp_obd->obd_name, -EINVAL);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
lsm_size = lov_stripe_md_size(1);
if (lsmp == NULL)
- RETURN(lsm_size);
+ return lsm_size;
if (*lsmp != NULL && lmm == NULL) {
OBD_FREE((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
OBD_FREE(*lsmp, lsm_size);
*lsmp = NULL;
- RETURN(0);
+ return 0;
}
if (*lsmp == NULL) {
OBD_ALLOC(*lsmp, lsm_size);
if (unlikely(*lsmp == NULL))
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC((*lsmp)->lsm_oinfo[0], sizeof(struct lov_oinfo));
if (unlikely((*lsmp)->lsm_oinfo[0] == NULL)) {
OBD_FREE(*lsmp, lsm_size);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
loi_init((*lsmp)->lsm_oinfo[0]);
} else if (unlikely(ostid_id(&(*lsmp)->lsm_oi) == 0)) {
- RETURN(-EBADF);
+ return -EBADF;
}
if (lmm != NULL)
@@ -154,7 +152,7 @@ static int osc_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
else
(*lsmp)->lsm_maxbytes = LUSTRE_STRIPE_MAXBYTES;
- RETURN(lsm_size);
+ return lsm_size;
}
static inline void osc_pack_capa(struct ptlrpc_request *req,
@@ -202,7 +200,6 @@ static int osc_getattr_interpret(const struct lu_env *env,
struct osc_async_args *aa, int rc)
{
struct ost_body *body;
- ENTRY;
if (rc != 0)
GOTO(out, rc);
@@ -223,7 +220,7 @@ static int osc_getattr_interpret(const struct lu_env *env,
}
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- RETURN(rc);
+ return rc;
}
static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -232,17 +229,16 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
struct ptlrpc_request *req;
struct osc_async_args *aa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
osc_pack_req_body(req, oinfo);
@@ -255,7 +251,7 @@ static int osc_getattr_async(struct obd_export *exp, struct obd_info *oinfo,
aa->aa_oi = oinfo;
ptlrpc_set_add_req(set, req);
- RETURN(0);
+ return 0;
}
static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
@@ -264,17 +260,16 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
osc_pack_req_body(req, oinfo);
@@ -296,7 +291,6 @@ static int osc_getattr(const struct lu_env *env, struct obd_export *exp,
oinfo->oi_oa->o_blksize = cli_brw_size(exp->exp_obd);
oinfo->oi_oa->o_valid |= OBD_MD_FLBLKSZ;
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -308,19 +302,18 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request *req;
struct ost_body *body;
int rc;
- ENTRY;
LASSERT(oinfo->oi_oa->o_valid & OBD_MD_FLGROUP);
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
osc_pack_req_body(req, oinfo);
@@ -338,10 +331,9 @@ static int osc_setattr(const struct lu_env *env, struct obd_export *exp,
lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa,
&body->oa);
- EXIT;
out:
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int osc_setattr_interpret(const struct lu_env *env,
@@ -349,7 +341,6 @@ static int osc_setattr_interpret(const struct lu_env *env,
struct osc_setattr_args *sa, int rc)
{
struct ost_body *body;
- ENTRY;
if (rc != 0)
GOTO(out, rc);
@@ -362,7 +353,7 @@ static int osc_setattr_interpret(const struct lu_env *env,
&body->oa);
out:
rc = sa->sa_upcall(sa->sa_cookie, rc);
- RETURN(rc);
+ return rc;
}
int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
@@ -373,17 +364,16 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
struct ptlrpc_request *req;
struct osc_setattr_args *sa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
@@ -413,7 +403,7 @@ int osc_setattr_async_base(struct obd_export *exp, struct obd_info *oinfo,
ptlrpc_set_add_req(rqset, req);
}
- RETURN(0);
+ return 0;
}
static int osc_setattr_async(struct obd_export *exp, struct obd_info *oinfo,
@@ -431,7 +421,6 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa,
struct ost_body *body;
struct lov_stripe_md *lsm;
int rc;
- ENTRY;
LASSERT(oa);
LASSERT(ea);
@@ -440,7 +429,7 @@ int osc_real_create(struct obd_export *exp, struct obdo *oa,
if (!lsm) {
rc = obd_alloc_memmd(exp, &lsm);
if (rc < 0)
- RETURN(rc);
+ return rc;
}
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE);
@@ -506,7 +495,7 @@ out_req:
out:
if (rc && !*ea)
obd_free_memmd(exp, &lsm);
- RETURN(rc);
+ return rc;
}
int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
@@ -517,17 +506,16 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
struct osc_setattr_args *sa;
struct ost_body *body;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
@@ -551,7 +539,7 @@ int osc_punch_base(struct obd_export *exp, struct obd_info *oinfo,
else
ptlrpc_set_add_req(rqset, req);
- RETURN(0);
+ return 0;
}
static int osc_punch(const struct lu_env *env, struct obd_export *exp,
@@ -571,7 +559,6 @@ static int osc_sync_interpret(const struct lu_env *env,
{
struct osc_fsync_args *fa = arg;
struct ost_body *body;
- ENTRY;
if (rc)
GOTO(out, rc);
@@ -585,7 +572,7 @@ static int osc_sync_interpret(const struct lu_env *env,
*fa->fa_oi->oi_oa = body->oa;
out:
rc = fa->fa_upcall(fa->fa_cookie, rc);
- RETURN(rc);
+ return rc;
}
int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
@@ -596,17 +583,16 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
struct ost_body *body;
struct osc_fsync_args *fa;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
/* overload the size and blocks fields in the oa with start/end */
@@ -630,25 +616,23 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
else
ptlrpc_set_add_req(rqset, req);
- RETURN (0);
+ return 0;
}
static int osc_sync(const struct lu_env *env, struct obd_export *exp,
struct obd_info *oinfo, obd_size start, obd_size end,
struct ptlrpc_request_set *set)
{
- ENTRY;
-
if (!oinfo->oi_oa) {
CDEBUG(D_INFO, "oa NULL\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
oinfo->oi_oa->o_size = start;
oinfo->oi_oa->o_blocks = end;
oinfo->oi_oa->o_valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
- RETURN(osc_sync_base(exp, oinfo, oinfo->oi_cb_up, oinfo, set));
+ return osc_sync_base(exp, oinfo, oinfo->oi_cb_up, oinfo, set);
}
/* Find and cancel locally locks matched by @mode in the resource found by
@@ -662,7 +646,6 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
struct ldlm_res_id res_id;
struct ldlm_resource *res;
int count;
- ENTRY;
/* Return, i.e. cancel nothing, only if ELC is supported (flag in
* export) but disabled through procfs (flag in NS).
@@ -671,19 +654,19 @@ static int osc_resource_get_unused(struct obd_export *exp, struct obdo *oa,
* when we still want to cancel locks in advance and just cancel them
* locally, without sending any RPC. */
if (exp_connect_cancelset(exp) && !ns_connect_cancelset(ns))
- RETURN(0);
+ return 0;
ostid_build_res_name(&oa->o_oi, &res_id);
res = ldlm_resource_get(ns, NULL, &res_id, 0, 0);
if (res == NULL)
- RETURN(0);
+ return 0;
LDLM_RESOURCE_ADDREF(res);
count = ldlm_cancel_resource_local(res, cancels, NULL, mode,
lock_flags, 0, NULL);
LDLM_RESOURCE_DELREF(res);
ldlm_resource_putref(res);
- RETURN(count);
+ return count;
}
static int osc_destroy_interpret(const struct lu_env *env,
@@ -720,7 +703,6 @@ int osc_create(const struct lu_env *env, struct obd_export *exp,
struct obd_trans_info *oti)
{
int rc = 0;
- ENTRY;
LASSERT(oa);
LASSERT(ea);
@@ -728,16 +710,16 @@ int osc_create(const struct lu_env *env, struct obd_export *exp,
if ((oa->o_valid & OBD_MD_FLFLAGS) &&
oa->o_flags == OBD_FL_RECREATE_OBJS) {
- RETURN(osc_real_create(exp, oa, ea, oti));
+ return osc_real_create(exp, oa, ea, oti);
}
if (!fid_seq_is_mdt(ostid_seq(&oa->o_oi)))
- RETURN(osc_real_create(exp, oa, ea, oti));
+ return osc_real_create(exp, oa, ea, oti);
/* we should not get here anymore */
LBUG();
- RETURN(rc);
+ return rc;
}
/* Destroy requests can be async always on the client, and we don't even really
@@ -760,11 +742,10 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
struct ost_body *body;
LIST_HEAD(cancels);
int rc, count;
- ENTRY;
if (!oa) {
CDEBUG(D_INFO, "oa NULL\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
count = osc_resource_get_unused(exp, oa, &cancels, LCK_PW,
@@ -773,7 +754,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY);
if (req == NULL) {
ldlm_lock_list_put(&cancels, l_bl_ast, count);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa);
@@ -781,7 +762,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
0, &cancels, count);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
@@ -817,7 +798,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp,
/* Do not wait for response */
ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- RETURN(0);
+ return 0;
}
static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
@@ -948,7 +929,6 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
{
int rc = 0;
struct ost_body *body;
- ENTRY;
client_obd_list_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
@@ -959,13 +939,13 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
if (target_bytes >= cli->cl_avail_grant) {
client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(0);
+ return 0;
}
client_obd_list_unlock(&cli->cl_loi_list_lock);
OBD_ALLOC_PTR(body);
if (!body)
- RETURN(-ENOMEM);
+ return -ENOMEM;
osc_announce_cached(cli, &body->oa, 0);
@@ -986,7 +966,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
if (rc != 0)
__osc_update_grant(cli, body->oa.o_grant);
OBD_FREE_PTR(body);
- RETURN(rc);
+ return rc;
}
static int osc_should_shrink_grant(struct client_obd *client)
@@ -1256,11 +1236,10 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
struct req_capsule *pill;
struct brw_page *pg_prev;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
- RETURN(-ENOMEM); /* Recoverable */
+ return -ENOMEM; /* Recoverable */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
- RETURN(-EINVAL); /* Fatal */
+ return -EINVAL; /* Fatal */
if ((cmd & OBD_BRW_WRITE) != 0) {
opc = OST_WRITE;
@@ -1272,7 +1251,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ);
}
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
for (niocount = i = 1; i < page_count; i++) {
if (!can_merge_pages(pga[i - 1], pga[i]))
@@ -1289,7 +1268,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */
ptlrpc_at_set_req_timeout(req);
@@ -1435,11 +1414,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,struct obdo *oa,
aa->aa_ocapa = capa_get(ocapa);
*reqp = req;
- RETURN(0);
+ return 0;
out:
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int check_write_checksum(struct obdo *oa, const lnet_process_id_t *peer,
@@ -1496,18 +1475,17 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
struct client_obd *cli = aa->aa_cli;
struct ost_body *body;
__u32 client_cksum = 0;
- ENTRY;
if (rc < 0 && rc != -EDQUOT) {
DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc);
- RETURN(rc);
+ return rc;
}
LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc);
body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
if (body == NULL) {
DEBUG_REQ(D_INFO, req, "Can't unpack body\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
/* set/clear over quota flag for a uid/gid */
@@ -1524,7 +1502,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
osc_update_grant(cli, body);
if (rc < 0)
- RETURN(rc);
+ return rc;
if (aa->aa_oa->o_valid & OBD_MD_FLCKSUM)
client_cksum = aa->aa_oa->o_cksum; /* save for later */
@@ -1532,19 +1510,19 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
if (rc > 0) {
CERROR("Unexpected +ve rc %d\n", rc);
- RETURN(-EPROTO);
+ return -EPROTO;
}
LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob);
if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk))
- RETURN(-EAGAIN);
+ return -EAGAIN;
if ((aa->aa_oa->o_valid & OBD_MD_FLCKSUM) && client_cksum &&
check_write_checksum(&body->oa, peer, client_cksum,
body->oa.o_cksum, aa->aa_requested_nob,
aa->aa_page_count, aa->aa_ppga,
cksum_type_unpack(aa->aa_oa->o_flags)))
- RETURN(-EAGAIN);
+ return -EAGAIN;
rc = check_write_rcs(req, aa->aa_requested_nob,aa->aa_nio_count,
aa->aa_page_count, aa->aa_ppga);
@@ -1561,7 +1539,7 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
if (rc > aa->aa_requested_nob) {
CERROR("Unexpected rc %d (%d requested)\n", rc,
aa->aa_requested_nob);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (rc != req->rq_bulk->bd_nob_transferred) {
@@ -1641,7 +1619,7 @@ out:
lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
aa->aa_oa, &body->oa);
- RETURN(rc);
+ return rc;
}
static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
@@ -1655,8 +1633,6 @@ static int osc_brw_internal(int cmd, struct obd_export *exp, struct obdo *oa,
int generation, resends = 0;
struct l_wait_info lwi;
- ENTRY;
-
init_waitqueue_head(&waitq);
generation = exp->exp_obd->u.cli.cl_import->imp_generation;
@@ -1711,7 +1687,7 @@ restart_bulk:
out:
if (rc == -EAGAIN || rc == -EINPROGRESS)
rc = -EIO;
- RETURN (rc);
+ return rc;
}
static int osc_brw_redo_request(struct ptlrpc_request *request,
@@ -1720,7 +1696,6 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
struct ptlrpc_request *new_req;
struct osc_brw_async_args *new_aa;
struct osc_async_page *oap;
- ENTRY;
DEBUG_REQ(rc == -EINPROGRESS ? D_RPCTRACE : D_ERROR, request,
"redo for recoverable error %d", rc);
@@ -1732,7 +1707,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
aa->aa_page_count, aa->aa_ppga,
&new_req, aa->aa_ocapa, 0, 1);
if (rc)
- RETURN(rc);
+ return rc;
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
if (oap->oap_request != NULL) {
@@ -1741,7 +1716,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
request, oap->oap_request);
if (oap->oap_interrupted) {
ptlrpc_req_finished(new_req);
- RETURN(-EINTR);
+ return -EINTR;
}
}
}
@@ -1784,7 +1759,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
ptlrpcd_add_req(new_req, PDL_POLICY_SAME, -1);
DEBUG_REQ(D_INFO, new_req, "new request");
- RETURN(0);
+ return 0;
}
/*
@@ -1873,7 +1848,6 @@ static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
struct obd_import *imp = class_exp2cliimp(exp);
struct client_obd *cli;
int rc, page_count_orig;
- ENTRY;
LASSERT((imp != NULL) && (imp->imp_obd != NULL));
cli = &imp->imp_obd->u.cli;
@@ -1883,8 +1857,8 @@ static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
* I/O can succeed */
if (imp->imp_invalid)
- RETURN(-EIO);
- RETURN(0);
+ return -EIO;
+ return 0;
}
/* test_brw with a failed create can trip this, maybe others. */
@@ -1894,7 +1868,7 @@ static int osc_brw(int cmd, struct obd_export *exp, struct obd_info *oinfo,
orig = ppga = osc_build_ppga(pga, page_count);
if (ppga == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
page_count_orig = page_count;
sort_brw_pages(ppga, page_count);
@@ -1935,7 +1909,7 @@ out:
if (saved_oa != NULL)
OBDO_FREE(saved_oa);
- RETURN(rc);
+ return rc;
}
static int brw_interpret(const struct lu_env *env,
@@ -1946,7 +1920,6 @@ static int brw_interpret(const struct lu_env *env,
struct osc_extent *tmp;
struct cl_object *obj = NULL;
struct client_obd *cli = aa->aa_cli;
- ENTRY;
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
@@ -1970,7 +1943,7 @@ static int brw_interpret(const struct lu_env *env,
}
if (rc == 0)
- RETURN(0);
+ return 0;
else if (rc == -EAGAIN || rc == -EINPROGRESS)
rc = -EIO;
}
@@ -2040,7 +2013,7 @@ static int brw_interpret(const struct lu_env *env,
client_obd_list_unlock(&cli->cl_loi_list_lock);
osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
- RETURN(rc);
+ return rc;
}
/**
@@ -2072,7 +2045,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
int rc;
LIST_HEAD(rpc_list);
- ENTRY;
LASSERT(!list_empty(ext_list));
/* add pages into rpc_list to build BRW rpc */
@@ -2228,7 +2200,6 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
*/
ptlrpcd_add_req(req, pol, -1);
rc = 0;
- EXIT;
out:
if (mem_tight != 0)
@@ -2257,7 +2228,7 @@ out:
if (clerq && !IS_ERR(clerq))
cl_req_completion(env, clerq, rc);
}
- RETURN(rc);
+ return rc;
}
static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
@@ -2337,7 +2308,6 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
__u64 *flags, int agl, int rc)
{
int intent = *flags & LDLM_FL_HAS_INTENT;
- ENTRY;
if (intent) {
/* The request was created before ldlm_cli_enqueue call. */
@@ -2347,6 +2317,8 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
&RMF_DLM_REP);
LASSERT(rep != NULL);
+ rep->lock_policy_res1 =
+ ptlrpc_status_ntoh(rep->lock_policy_res1);
if (rep->lock_policy_res1)
rc = rep->lock_policy_res1;
}
@@ -2361,7 +2333,7 @@ static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
/* Call the update callback. */
rc = (*upcall)(cookie, rc);
- RETURN(rc);
+ return rc;
}
static int osc_enqueue_interpret(const struct lu_env *env,
@@ -2494,7 +2466,6 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
int match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
ldlm_mode_t mode;
int rc;
- ENTRY;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother. */
@@ -2536,7 +2507,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
* Return -ECANCELED to tell the caller. */
ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(matched);
- RETURN(-ECANCELED);
+ return -ECANCELED;
} else if (osc_set_lock_data_with_check(matched, einfo)) {
*flags |= LDLM_FL_LVB_READY;
/* addref the lock only if not async requests and PW
@@ -2561,7 +2532,7 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
/* For async requests, decref the lock. */
ldlm_lock_decref(lockh, einfo->ei_mode);
LDLM_LOCK_PUT(matched);
- RETURN(ELDLM_OK);
+ return ELDLM_OK;
} else {
ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(matched);
@@ -2574,12 +2545,12 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE_LVB);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
@@ -2615,14 +2586,14 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
} else if (intent) {
ptlrpc_req_finished(req);
}
- RETURN(rc);
+ return rc;
}
rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
if (intent)
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
@@ -2631,7 +2602,6 @@ static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
{
struct ldlm_res_id res_id;
int rc;
- ENTRY;
ostid_build_res_name(&oinfo->oi_md->lsm_oi, &res_id);
rc = osc_enqueue_base(exp, &res_id, &oinfo->oi_flags, &oinfo->oi_policy,
@@ -2639,7 +2609,7 @@ static int osc_enqueue(struct obd_export *exp, struct obd_info *oinfo,
oinfo->oi_md->lsm_oinfo[0]->loi_kms_valid,
oinfo->oi_cb_up, oinfo, einfo, oinfo->oi_lockh,
rqset, rqset != NULL, 0);
- RETURN(rc);
+ return rc;
}
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
@@ -2650,10 +2620,9 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
struct obd_device *obd = exp->exp_obd;
int lflags = *flags;
ldlm_mode_t rc;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_MATCH))
- RETURN(-EIO);
+ return -EIO;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother */
@@ -2674,35 +2643,32 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if (!osc_set_data_with_check(lockh, data)) {
if (!(lflags & LDLM_FL_TEST_LOCK))
ldlm_lock_decref(lockh, rc);
- RETURN(0);
+ return 0;
}
}
if (!(lflags & LDLM_FL_TEST_LOCK) && mode != rc) {
ldlm_lock_addref(lockh, LCK_PR);
ldlm_lock_decref(lockh, LCK_PW);
}
- RETURN(rc);
+ return rc;
}
- RETURN(rc);
+ return rc;
}
int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
{
- ENTRY;
-
if (unlikely(mode == LCK_GROUP))
ldlm_lock_decref_and_cancel(lockh, mode);
else
ldlm_lock_decref(lockh, mode);
- RETURN(0);
+ return 0;
}
static int osc_cancel(struct obd_export *exp, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *lockh)
{
- ENTRY;
- RETURN(osc_cancel_base(lockh, mode));
+ return osc_cancel_base(lockh, mode);
}
static int osc_cancel_unused(struct obd_export *exp,
@@ -2726,7 +2692,6 @@ static int osc_statfs_interpret(const struct lu_env *env,
struct osc_async_args *aa, int rc)
{
struct obd_statfs *msfs;
- ENTRY;
if (rc == -EBADR)
/* The request has in fact never been sent
@@ -2734,7 +2699,7 @@ static int osc_statfs_interpret(const struct lu_env *env,
* Exit immediately since the caller is
* aware of the problem and takes care
* of the clean up */
- RETURN(rc);
+ return rc;
if ((rc == -ENOTCONN || rc == -EAGAIN) &&
(aa->aa_oi->oi_flags & OBD_STATFS_NODELAY))
@@ -2751,7 +2716,7 @@ static int osc_statfs_interpret(const struct lu_env *env,
*aa->aa_oi->oi_osfs = *msfs;
out:
rc = aa->aa_oi->oi_cb_up(aa->aa_oi, rc);
- RETURN(rc);
+ return rc;
}
static int osc_statfs_async(struct obd_export *exp,
@@ -2762,7 +2727,6 @@ static int osc_statfs_async(struct obd_export *exp,
struct ptlrpc_request *req;
struct osc_async_args *aa;
int rc;
- ENTRY;
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
@@ -2772,12 +2736,12 @@ static int osc_statfs_async(struct obd_export *exp,
* timestamps are not ideal because they need time synchronization. */
req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
ptlrpc_request_set_replen(req);
req->rq_request_portal = OST_CREATE_PORTAL;
@@ -2795,7 +2759,7 @@ static int osc_statfs_async(struct obd_export *exp,
aa->aa_oi = oinfo;
ptlrpc_set_add_req(rqset, req);
- RETURN(0);
+ return 0;
}
static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
@@ -2806,7 +2770,6 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
struct ptlrpc_request *req;
struct obd_import *imp = NULL;
int rc;
- ENTRY;
/*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
@@ -2815,7 +2778,7 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
imp = class_import_get(obd->u.cli.cl_import);
up_read(&obd->u.cli.cl_sem);
if (!imp)
- RETURN(-ENODEV);
+ return -ENODEV;
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
@@ -2828,12 +2791,12 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
class_import_put(imp);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
ptlrpc_request_set_replen(req);
req->rq_request_portal = OST_CREATE_PORTAL;
@@ -2856,7 +2819,6 @@ static int osc_statfs(const struct lu_env *env, struct obd_export *exp,
*osfs = *msfs;
- EXIT;
out:
ptlrpc_req_finished(req);
return rc;
@@ -2874,20 +2836,19 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
struct lov_user_md_v3 lum, *lumk;
struct lov_user_ost_data_v1 *lmm_objects;
int rc = 0, lum_size;
- ENTRY;
if (!lsm)
- RETURN(-ENODATA);
+ return -ENODATA;
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
if (copy_from_user(&lum, lump, lum_size))
- RETURN(-EFAULT);
+ return -EFAULT;
if ((lum.lmm_magic != LOV_USER_MAGIC_V1) &&
(lum.lmm_magic != LOV_USER_MAGIC_V3))
- RETURN(-EINVAL);
+ return -EINVAL;
/* lov_user_md_vX and lov_mds_md_vX must have the same size */
LASSERT(sizeof(struct lov_user_md_v1) == sizeof(struct lov_mds_md_v1));
@@ -2900,7 +2861,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
lum_size = lov_mds_md_size(lum.lmm_stripe_count, lum.lmm_magic);
OBD_ALLOC(lumk, lum_size);
if (!lumk)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (lum.lmm_magic == LOV_USER_MAGIC_V1)
lmm_objects =
@@ -2922,7 +2883,7 @@ static int osc_getstripe(struct lov_stripe_md *lsm, struct lov_user_md *lump)
if (lumk != &lum)
OBD_FREE(lumk, lum_size);
- RETURN(rc);
+ return rc;
}
@@ -2932,7 +2893,6 @@ static int osc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
struct obd_device *obd = exp->exp_obd;
struct obd_ioctl_data *data = karg;
int err = 0;
- ENTRY;
if (!try_module_get(THIS_MODULE)) {
CERROR("Can't get module. Is it alive?");
@@ -3016,15 +2976,14 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
obd_count keylen, void *key, __u32 *vallen, void *val,
struct lov_stripe_md *lsm)
{
- ENTRY;
if (!vallen || !val)
- RETURN(-EFAULT);
+ return -EFAULT;
if (KEY_IS(KEY_LOCK_TO_STRIPE)) {
__u32 *stripe = val;
*vallen = sizeof(*stripe);
*stripe = 0;
- RETURN(0);
+ return 0;
} else if (KEY_IS(KEY_LAST_ID)) {
struct ptlrpc_request *req;
obd_id *reply;
@@ -3034,14 +2993,14 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_OST_GET_INFO_LAST_ID);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT, keylen);
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
@@ -3060,7 +3019,7 @@ static int osc_get_info(const struct lu_env *env, struct obd_export *exp,
*((obd_id *)val) = *reply;
out:
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
} else if (KEY_IS(KEY_FIEMAP)) {
struct ll_fiemap_info_key *fm_key =
(struct ll_fiemap_info_key *)key;
@@ -3142,10 +3101,10 @@ fini_req:
drop_lock:
if (mode)
ldlm_lock_decref(&lockh, LCK_PR);
- RETURN(rc);
+ return rc;
}
- RETURN(-EINVAL);
+ return -EINVAL;
}
static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
@@ -3157,25 +3116,24 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
struct obd_import *imp = class_exp2cliimp(exp);
char *tmp;
int rc;
- ENTRY;
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_SHUTDOWN, 10);
if (KEY_IS(KEY_CHECKSUM)) {
if (vallen != sizeof(int))
- RETURN(-EINVAL);
+ return -EINVAL;
exp->exp_obd->u.cli.cl_checksum = (*(int *)val) ? 1 : 0;
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_SPTLRPC_CONF)) {
sptlrpc_conf_client_adapt(obd);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_FLUSH_CTX)) {
sptlrpc_import_flush_my_ctx(imp);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_CACHE_SET)) {
@@ -3192,7 +3150,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
list_add(&cli->cl_lru_osc, &cli->cl_cache->ccc_lru);
spin_unlock(&cli->cl_cache->ccc_lru_lock);
- RETURN(0);
+ return 0;
}
if (KEY_IS(KEY_CACHE_LRU_SHRINK)) {
@@ -3202,11 +3160,11 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
nr = osc_lru_shrink(cli, min(nr, target));
*(int *)val -= nr;
- RETURN(0);
+ return 0;
}
if (!set && !KEY_IS(KEY_GRANT_SHRINK))
- RETURN(-EINVAL);
+ return -EINVAL;
/* We pass all other commands directly to OST. Since nobody calls osc
methods directly and everybody is supposed to go through LOV, we
@@ -3219,7 +3177,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
&RQF_OST_SET_GRANT_INFO :
&RQF_OBD_SET_INFO);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT, keylen);
@@ -3229,7 +3187,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
@@ -3248,7 +3206,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
OBDO_ALLOC(oa);
if (!oa) {
ptlrpc_req_finished(req);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
*oa = ((struct ost_body *)val)->oa;
aa->aa_oa = oa;
@@ -3263,7 +3221,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
} else
ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- RETURN(0);
+ return 0;
}
@@ -3280,8 +3238,6 @@ static int osc_llog_finish(struct obd_device *obd, int count)
{
struct llog_ctxt *ctxt;
- ENTRY;
-
ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
if (ctxt) {
llog_cat_close(NULL, ctxt->loc_handle);
@@ -3291,7 +3247,7 @@ static int osc_llog_finish(struct obd_device *obd, int count)
ctxt = llog_get_context(obd, LLOG_SIZE_REPL_CTXT);
if (ctxt)
llog_cleanup(NULL, ctxt);
- RETURN(0);
+ return 0;
}
static int osc_reconnect(const struct lu_env *env,
@@ -3317,7 +3273,7 @@ static int osc_reconnect(const struct lu_env *env,
data->ocd_version, data->ocd_grant, lost_grant);
}
- RETURN(0);
+ return 0;
}
static int osc_disconnect(struct obd_export *exp)
@@ -3369,7 +3325,6 @@ static int osc_import_event(struct obd_device *obd,
struct client_obd *cli;
int rc = 0;
- ENTRY;
LASSERT(imp->imp_obd == obd);
switch (event) {
@@ -3433,7 +3388,7 @@ static int osc_import_event(struct obd_device *obd,
CERROR("Unknown import event %d\n", event);
LBUG();
}
- RETURN(rc);
+ return rc;
}
/**
@@ -3457,9 +3412,9 @@ static int osc_cancel_for_recovery(struct ldlm_lock *lock)
(lock->l_granted_mode == LCK_PR ||
lock->l_granted_mode == LCK_CR) &&
(osc_dlm_lock_pageref(lock) == 0))
- RETURN(1);
+ return 1;
- RETURN(0);
+ return 0;
}
static int brw_queue_work(const struct lu_env *env, void *data)
@@ -3469,7 +3424,7 @@ static int brw_queue_work(const struct lu_env *env, void *data)
CDEBUG(D_CACHE, "Run writeback work for client obd %p.\n", cli);
osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
- RETURN(0);
+ return 0;
}
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
@@ -3478,11 +3433,10 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct client_obd *cli = &obd->u.cli;
void *handler;
int rc;
- ENTRY;
rc = ptlrpcd_addref();
if (rc)
- RETURN(rc);
+ return rc;
rc = client_obd_setup(obd, lcfg);
if (rc)
@@ -3517,7 +3471,7 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
- RETURN(rc);
+ return rc;
out_ptlrpcd_work:
ptlrpcd_destroy_work(handler);
@@ -3525,13 +3479,12 @@ out_client_setup:
client_obd_cleanup(obd);
out_ptlrpcd:
ptlrpcd_decref();
- RETURN(rc);
+ return rc;
}
static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
{
int rc = 0;
- ENTRY;
switch (stage) {
case OBD_CLEANUP_EARLY: {
@@ -3570,7 +3523,7 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
break;
}
}
- RETURN(rc);
+ return rc;
}
int osc_cleanup(struct obd_device *obd)
@@ -3578,8 +3531,6 @@ int osc_cleanup(struct obd_device *obd)
struct client_obd *cli = &obd->u.cli;
int rc;
- ENTRY;
-
/* lru cleanup */
if (cli->cl_cache != NULL) {
LASSERT(atomic_read(&cli->cl_cache->ccc_users) > 0);
@@ -3597,7 +3548,7 @@ int osc_cleanup(struct obd_device *obd)
rc = client_obd_cleanup(obd);
ptlrpcd_decref();
- RETURN(rc);
+ return rc;
}
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg)
@@ -3671,7 +3622,6 @@ int __init osc_init(void)
{
struct lprocfs_static_vars lvars = { 0 };
int rc;
- ENTRY;
/* print an address of _any_ initialized kernel symbol from this
* module, to allow debugging with gdb that doesn't support data
@@ -3679,6 +3629,8 @@ int __init osc_init(void)
CDEBUG(D_INFO, "Lustre OSC module (%p).\n", &osc_caches);
rc = lu_kmem_init(osc_caches);
+ if (rc)
+ return rc;
lprocfs_osc_init_vars(&lvars);
@@ -3686,13 +3638,13 @@ int __init osc_init(void)
LUSTRE_OSC_NAME, &osc_device_type);
if (rc) {
lu_kmem_fini(osc_caches);
- RETURN(rc);
+ return rc;
}
spin_lock_init(&osc_ast_guard);
lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
- RETURN(rc);
+ return rc;
}
static void /*__exit*/ osc_exit(void)
@@ -3704,5 +3656,7 @@ static void /*__exit*/ osc_exit(void)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Storage Client (OSC)");
MODULE_LICENSE("GPL");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
-cfs_module(osc, LUSTRE_VERSION_STRING, osc_init, osc_exit);
+module_init(osc_init);
+module_exit(osc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/Makefile b/drivers/staging/lustre/lustre/ptlrpc/Makefile
index 983eb66a554..6d78b80487f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/Makefile
+++ b/drivers/staging/lustre/lustre/ptlrpc/Makefile
@@ -16,6 +16,7 @@ ptlrpc_objs += sec.o sec_bulk.o sec_gc.o sec_config.o sec_lproc.o
ptlrpc_objs += sec_null.o sec_plain.o nrs.o nrs_fifo.o
ptlrpc-y := $(ldlm_objs) $(ptlrpc_objs)
+ptlrpc-$(CONFIG_LUSTRE_TRANSLATE_ERRNOS) += errno.o
obj-$(CONFIG_PTLRPC_GSS) += gss/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index 22f7e654c9d..810a458caed 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -137,11 +137,10 @@ struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
struct obd_import *imp = req->rq_import;
struct ptlrpc_bulk_desc *desc;
- ENTRY;
LASSERT(type == BULK_PUT_SINK || type == BULK_GET_SOURCE);
desc = ptlrpc_new_bulk(npages, max_brw, type, portal);
if (desc == NULL)
- RETURN(NULL);
+ return NULL;
desc->bd_import_generation = req->rq_import_generation;
desc->bd_import = class_import_get(imp);
@@ -187,7 +186,6 @@ EXPORT_SYMBOL(__ptlrpc_prep_bulk_page);
void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
{
int i;
- ENTRY;
LASSERT(desc != NULL);
LASSERT(desc->bd_iov_count != LI_POISON); /* not freed already */
@@ -208,7 +206,6 @@ void __ptlrpc_free_bulk(struct ptlrpc_bulk_desc *desc, int unpin)
OBD_FREE(desc, offsetof(struct ptlrpc_bulk_desc,
bd_iov[desc->bd_max_iov]));
- EXIT;
}
EXPORT_SYMBOL(__ptlrpc_free_bulk);
@@ -336,7 +333,6 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
struct ptlrpc_request *early_req;
time_t olddl;
int rc;
- ENTRY;
req->rq_early = 0;
spin_unlock(&req->rq_lock);
@@ -344,7 +340,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
rc = sptlrpc_cli_unwrap_early_reply(req, &early_req);
if (rc) {
spin_lock(&req->rq_lock);
- RETURN(rc);
+ return rc;
}
rc = unpack_reply(early_req);
@@ -360,7 +356,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
if (rc != 0) {
spin_lock(&req->rq_lock);
- RETURN(rc);
+ return rc;
}
/* Adjust the local timeout for this req */
@@ -379,7 +375,7 @@ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
cfs_time_sub(req->rq_deadline, cfs_time_current_sec()),
cfs_time_sub(req->rq_deadline, olddl));
- RETURN(rc);
+ return rc;
}
/**
@@ -547,7 +543,6 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
{
struct obd_import *imp = request->rq_import;
int rc;
- ENTRY;
if (unlikely(ctx))
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
@@ -601,7 +596,7 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
lustre_msg_set_opc(request->rq_reqmsg, opcode);
- RETURN(0);
+ return 0;
out_ctx:
sptlrpc_cli_ctx_put(request->rq_cli_ctx, 1);
out_free:
@@ -822,10 +817,9 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
{
struct ptlrpc_request_set *set;
- ENTRY;
OBD_ALLOC(set, sizeof *set);
if (!set)
- RETURN(NULL);
+ return NULL;
atomic_set(&set->set_refcount, 1);
INIT_LIST_HEAD(&set->set_requests);
init_waitqueue_head(&set->set_waitq);
@@ -839,7 +833,7 @@ struct ptlrpc_request_set *ptlrpc_prep_set(void)
set->set_producer_arg = NULL;
set->set_rc = 0;
- RETURN(set);
+ return set;
}
EXPORT_SYMBOL(ptlrpc_prep_set);
@@ -859,13 +853,13 @@ struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
set = ptlrpc_prep_set();
if (!set)
- RETURN(NULL);
+ return NULL;
set->set_max_inflight = max;
set->set_producer = func;
set->set_producer_arg = arg;
- RETURN(set);
+ return set;
}
EXPORT_SYMBOL(ptlrpc_prep_fcset);
@@ -883,7 +877,6 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
struct list_head *next;
int expected_phase;
int n = 0;
- ENTRY;
/* Requests on the set should either all be completed, or all be new */
expected_phase = (atomic_read(&set->set_remaining) == 0) ?
@@ -925,7 +918,6 @@ void ptlrpc_set_destroy(struct ptlrpc_request_set *set)
LASSERT(atomic_read(&set->set_remaining) == 0);
ptlrpc_reqset_put(set);
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_set_destroy);
@@ -941,13 +933,13 @@ int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
OBD_ALLOC_PTR(cbdata);
if (cbdata == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
cbdata->psc_interpret = fn;
cbdata->psc_data = data;
list_add_tail(&cbdata->psc_item, &set->set_cblist);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_set_add_cb);
@@ -1027,7 +1019,6 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
struct ptlrpc_request *req, int *status)
{
int delay = 0;
- ENTRY;
LASSERT (status != NULL);
*status = 0;
@@ -1078,7 +1069,7 @@ static int ptlrpc_import_delay_req(struct obd_import *imp,
}
}
- RETURN(delay);
+ return delay;
}
/**
@@ -1120,7 +1111,6 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
static int ptlrpc_check_status(struct ptlrpc_request *req)
{
int err;
- ENTRY;
err = lustre_msg_get_status(req->rq_repmsg);
if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
@@ -1133,7 +1123,7 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
libcfs_nid2str(
imp->imp_connection->c_peer.nid),
ll_opcode2str(opc), err);
- RETURN(err < 0 ? err : -EINVAL);
+ return err < 0 ? err : -EINVAL;
}
if (err < 0) {
@@ -1143,7 +1133,7 @@ static int ptlrpc_check_status(struct ptlrpc_request *req)
DEBUG_REQ(D_INFO, req, "status is %d", err);
}
- RETURN(err);
+ return err;
}
/**
@@ -1156,7 +1146,6 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
struct lustre_msg *repmsg = req->rq_repmsg;
struct lustre_msg *reqmsg = req->rq_reqmsg;
__u64 *versions = lustre_msg_get_versions(repmsg);
- ENTRY;
if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
return;
@@ -1165,8 +1154,6 @@ static void ptlrpc_save_versions(struct ptlrpc_request *req)
lustre_msg_set_versions(reqmsg, versions);
CDEBUG(D_INFO, "Client save versions ["LPX64"/"LPX64"]\n",
versions[0], versions[1]);
-
- EXIT;
}
/**
@@ -1183,7 +1170,6 @@ static int after_reply(struct ptlrpc_request *req)
int rc;
struct timeval work_start;
long timediff;
- ENTRY;
LASSERT(obd != NULL);
/* repbuf must be unlinked */
@@ -1194,7 +1180,7 @@ static int after_reply(struct ptlrpc_request *req)
DEBUG_REQ(D_ERROR, req, "reply buffer overflow,"
" expected: %d, actual size: %d",
req->rq_nob_received, req->rq_repbuf_len);
- RETURN(-EOVERFLOW);
+ return -EOVERFLOW;
}
sptlrpc_cli_free_repbuf(req);
@@ -1205,7 +1191,7 @@ static int after_reply(struct ptlrpc_request *req)
req->rq_replen = req->rq_nob_received;
req->rq_nob_received = 0;
req->rq_resend = 1;
- RETURN(0);
+ return 0;
}
/*
@@ -1215,18 +1201,18 @@ static int after_reply(struct ptlrpc_request *req)
rc = sptlrpc_cli_unwrap_reply(req);
if (rc) {
DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc);
- RETURN(rc);
+ return rc;
}
/*
* Security layer unwrap might ask resend this request.
*/
if (req->rq_resend)
- RETURN(0);
+ return 0;
rc = unpack_reply(req);
if (rc)
- RETURN(rc);
+ return rc;
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
@@ -1257,7 +1243,7 @@ static int after_reply(struct ptlrpc_request *req)
else
req->rq_sent = now + req->rq_nr_resend;
- RETURN(0);
+ return 0;
}
do_gettimeofday(&work_start);
@@ -1272,7 +1258,7 @@ static int after_reply(struct ptlrpc_request *req)
lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {
DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)",
lustre_msg_get_type(req->rq_repmsg));
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING)
@@ -1293,10 +1279,10 @@ static int after_reply(struct ptlrpc_request *req)
if (ll_rpc_recoverable_error(rc)) {
if (req->rq_send_state != LUSTRE_IMP_FULL ||
imp->imp_obd->obd_no_recov || imp->imp_dlm_fake) {
- RETURN(rc);
+ return rc;
}
ptlrpc_request_handle_notconn(req);
- RETURN(rc);
+ return rc;
}
} else {
/*
@@ -1360,7 +1346,7 @@ static int after_reply(struct ptlrpc_request *req)
spin_unlock(&imp->imp_lock);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -1372,13 +1358,12 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
{
struct obd_import *imp = req->rq_import;
int rc;
- ENTRY;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
if (req->rq_sent && (req->rq_sent > cfs_time_current_sec()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
- RETURN (0);
+ return 0;
ptlrpc_rqphase_move(req, RQ_PHASE_RPC);
@@ -1400,14 +1385,14 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
list_add_tail(&req->rq_list, &imp->imp_delayed_list);
atomic_inc(&req->rq_import->imp_inflight);
spin_unlock(&imp->imp_lock);
- RETURN(0);
+ return 0;
}
if (rc != 0) {
spin_unlock(&imp->imp_lock);
req->rq_status = rc;
ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET);
- RETURN(rc);
+ return rc;
}
LASSERT(list_empty(&req->rq_list));
@@ -1421,10 +1406,10 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
if (rc) {
if (req->rq_err) {
req->rq_status = rc;
- RETURN(1);
+ return 1;
} else {
req->rq_wait_ctx = 1;
- RETURN(0);
+ return 0;
}
}
@@ -1439,15 +1424,14 @@ static int ptlrpc_send_new_req(struct ptlrpc_request *req)
if (rc) {
DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc);
req->rq_net_err = 1;
- RETURN(rc);
+ return rc;
}
- RETURN(0);
+ return 0;
}
static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
{
int remaining, rc;
- ENTRY;
LASSERT(set->set_producer != NULL);
@@ -1461,11 +1445,11 @@ static inline int ptlrpc_set_producer(struct ptlrpc_request_set *set)
/* no more RPC to produce */
set->set_producer = NULL;
set->set_producer_arg = NULL;
- RETURN(0);
+ return 0;
}
}
- RETURN((atomic_read(&set->set_remaining) - remaining));
+ return (atomic_read(&set->set_remaining) - remaining);
}
/**
@@ -1478,10 +1462,9 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
{
struct list_head *tmp, *next;
int force_timer_recalc = 0;
- ENTRY;
if (atomic_read(&set->set_remaining) == 0)
- RETURN(1);
+ return 1;
list_for_each_safe(tmp, next, &set->set_requests) {
struct ptlrpc_request *req =
@@ -1834,7 +1817,7 @@ int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set)
}
/* If we hit an error, we want to recover promptly. */
- RETURN(atomic_read(&set->set_remaining) == 0 || force_timer_recalc);
+ return atomic_read(&set->set_remaining) == 0 || force_timer_recalc;
}
EXPORT_SYMBOL(ptlrpc_check_set);
@@ -1847,7 +1830,6 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
{
struct obd_import *imp = req->rq_import;
int rc = 0;
- ENTRY;
spin_lock(&req->rq_lock);
req->rq_timedout = 1;
@@ -1873,14 +1855,14 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
if (imp == NULL) {
DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?");
- RETURN(1);
+ return 1;
}
atomic_inc(&imp->imp_timeouts);
/* The DLM server doesn't want recovery run on its imports. */
if (imp->imp_dlm_fake)
- RETURN(1);
+ return 1;
/* If this request is for recovery or other primordial tasks,
* then error it out here. */
@@ -1894,7 +1876,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
req->rq_status = -ETIMEDOUT;
req->rq_err = 1;
spin_unlock(&req->rq_lock);
- RETURN(1);
+ return 1;
}
/* if a request can't be resent we can't wait for an answer after
@@ -1906,7 +1888,7 @@ int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink)
ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg));
- RETURN(rc);
+ return rc;
}
/**
@@ -1919,7 +1901,6 @@ int ptlrpc_expired_set(void *data)
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
time_t now = cfs_time_current_sec();
- ENTRY;
LASSERT(set != NULL);
@@ -1955,7 +1936,7 @@ int ptlrpc_expired_set(void *data)
* sleep so we can recalculate the timeout, or enable interrupts
* if everyone's timed out.
*/
- RETURN(1);
+ return 1;
}
EXPORT_SYMBOL(ptlrpc_expired_set);
@@ -2006,7 +1987,6 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
int timeout = 0;
struct ptlrpc_request *req;
int deadline;
- ENTRY;
SIGNAL_MASK_ASSERT(); /* XXX BUG 1511 */
@@ -2045,7 +2025,7 @@ int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
else if (timeout == 0 || timeout > deadline - now)
timeout = deadline - now;
}
- RETURN(timeout);
+ return timeout;
}
EXPORT_SYMBOL(ptlrpc_set_next_timeout);
@@ -2061,7 +2041,6 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
struct ptlrpc_request *req;
struct l_wait_info lwi;
int rc, timeout;
- ENTRY;
if (set->set_producer)
(void)ptlrpc_set_producer(set);
@@ -2074,7 +2053,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
}
if (list_empty(&set->set_requests))
- RETURN(0);
+ return 0;
do {
timeout = ptlrpc_set_next_timeout(set);
@@ -2171,7 +2150,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
}
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_set_wait);
@@ -2185,9 +2164,7 @@ EXPORT_SYMBOL(ptlrpc_set_wait);
*/
static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
{
- ENTRY;
if (request == NULL) {
- EXIT;
return;
}
@@ -2240,7 +2217,6 @@ static void __ptlrpc_free_req(struct ptlrpc_request *request, int locked)
__ptlrpc_free_req_to_pool(request);
else
OBD_FREE(request, sizeof(*request));
- EXIT;
}
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
@@ -2263,15 +2239,14 @@ EXPORT_SYMBOL(ptlrpc_req_finished_with_imp_lock);
*/
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
{
- ENTRY;
if (request == NULL)
- RETURN(1);
+ return 1;
if (request == LP_POISON ||
request->rq_reqmsg == LP_POISON) {
CERROR("dereferencing freed request (bug 575)\n");
LBUG();
- RETURN(1);
+ return 1;
}
DEBUG_REQ(D_INFO, request, "refcount now %u",
@@ -2279,10 +2254,10 @@ static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked)
if (atomic_dec_and_test(&request->rq_refcount)) {
__ptlrpc_free_req(request, locked);
- RETURN(1);
+ return 1;
}
- RETURN(0);
+ return 0;
}
/**
@@ -2332,7 +2307,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
* Nothing left to do.
*/
if (!ptlrpc_client_recv_or_unlink(request))
- RETURN(1);
+ return 1;
LNetMDUnlink(request->rq_reply_md_h);
@@ -2340,7 +2315,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
* Let's check it once again.
*/
if (!ptlrpc_client_recv_or_unlink(request))
- RETURN(1);
+ return 1;
/*
* Move to "Unregistering" phase as reply was not unlinked yet.
@@ -2351,7 +2326,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
* Do not wait for unlink to finish.
*/
if (async)
- RETURN(0);
+ return 0;
/*
* We have to l_wait_event() whatever the result, to give liblustre
@@ -2372,7 +2347,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
&lwi);
if (rc == 0) {
ptlrpc_rqphase_move(request, request->rq_next_phase);
- RETURN(1);
+ return 1;
}
LASSERT(rc == -ETIMEDOUT);
@@ -2380,7 +2355,7 @@ int ptlrpc_unregister_reply(struct ptlrpc_request *request, int async)
"rvcng=%d unlnk=%d", request->rq_receiving_reply,
request->rq_must_unlink);
}
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_unregister_reply);
@@ -2397,7 +2372,6 @@ void ptlrpc_free_committed(struct obd_import *imp)
struct list_head *tmp, *saved;
struct ptlrpc_request *req;
struct ptlrpc_request *last_req = NULL; /* temporary fire escape */
- ENTRY;
LASSERT(imp != NULL);
@@ -2408,7 +2382,6 @@ void ptlrpc_free_committed(struct obd_import *imp)
imp->imp_generation == imp->imp_last_generation_checked) {
CDEBUG(D_INFO, "%s: skip recheck: last_committed "LPU64"\n",
imp->imp_obd->obd_name, imp->imp_peer_committed_transno);
- EXIT;
return;
}
CDEBUG(D_RPCTRACE, "%s: committing for last_committed "LPU64" gen %d\n",
@@ -2456,16 +2429,10 @@ free_req:
list_del_init(&req->rq_replay_list);
__ptlrpc_req_finished(req, 1);
}
-
- EXIT;
- return;
}
void ptlrpc_cleanup_client(struct obd_import *imp)
{
- ENTRY;
- EXIT;
- return;
}
EXPORT_SYMBOL(ptlrpc_cleanup_client);
@@ -2517,9 +2484,8 @@ EXPORT_SYMBOL(ptlrpc_restart_req);
*/
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req)
{
- ENTRY;
atomic_inc(&req->rq_refcount);
- RETURN(req);
+ return req;
}
EXPORT_SYMBOL(ptlrpc_request_addref);
@@ -2588,7 +2554,6 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
{
struct ptlrpc_request_set *set;
int rc;
- ENTRY;
LASSERT(req->rq_set == NULL);
LASSERT(!req->rq_receiving_reply);
@@ -2596,7 +2561,7 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
set = ptlrpc_prep_set();
if (set == NULL) {
CERROR("Unable to allocate ptlrpc set.");
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
/* for distributed debugging */
@@ -2608,7 +2573,7 @@ int ptlrpc_queue_wait(struct ptlrpc_request *req)
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_queue_wait);
@@ -2629,7 +2594,6 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
struct ptlrpc_replay_async_args *aa = data;
struct obd_import *imp = req->rq_import;
- ENTRY;
atomic_dec(&imp->imp_replay_inflight);
if (!ptlrpc_client_replied(req)) {
@@ -2710,7 +2674,7 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
/* this replay failed, so restart recovery */
ptlrpc_connect_import(imp);
- RETURN(rc);
+ return rc;
}
/**
@@ -2721,7 +2685,6 @@ static int ptlrpc_replay_interpret(const struct lu_env *env,
int ptlrpc_replay_req(struct ptlrpc_request *req)
{
struct ptlrpc_replay_async_args *aa;
- ENTRY;
LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY);
@@ -2751,7 +2714,7 @@ int ptlrpc_replay_req(struct ptlrpc_request *req)
ptlrpc_request_addref(req); /* ptlrpcd needs a ref */
ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_replay_req);
@@ -2761,7 +2724,6 @@ EXPORT_SYMBOL(ptlrpc_replay_req);
void ptlrpc_abort_inflight(struct obd_import *imp)
{
struct list_head *tmp, *n;
- ENTRY;
/* Make sure that no new requests get processed for this import.
* ptlrpc_{queue,set}_wait must (and does) hold imp_lock while testing
@@ -2809,8 +2771,6 @@ void ptlrpc_abort_inflight(struct obd_import *imp)
ptlrpc_free_committed(imp);
spin_unlock(&imp->imp_lock);
-
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_abort_inflight);
@@ -2969,18 +2929,17 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
{
struct ptlrpc_request *req = NULL;
struct ptlrpc_work_async_args *args;
- ENTRY;
might_sleep();
if (cb == NULL)
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
/* copy some code from deprecated fakereq. */
OBD_ALLOC_PTR(req);
if (req == NULL) {
CERROR("ptlrpc: run out of memory!\n");
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
}
req->rq_send_state = LUSTRE_IMP_FULL;
@@ -3009,7 +2968,7 @@ void *ptlrpcd_alloc_work(struct obd_import *imp,
args->cb = cb;
args->cbdata = cbdata;
- RETURN(req);
+ return req;
}
EXPORT_SYMBOL(ptlrpcd_alloc_work);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/connection.c b/drivers/staging/lustre/lustre/ptlrpc/connection.c
index a0757f372be..17ca8420887 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/connection.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/connection.c
@@ -49,7 +49,6 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
struct obd_uuid *uuid)
{
struct ptlrpc_connection *conn, *conn2;
- ENTRY;
conn = cfs_hash_lookup(conn_hash, &peer);
if (conn)
@@ -57,7 +56,7 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
OBD_ALLOC_PTR(conn);
if (!conn)
- RETURN(NULL);
+ return NULL;
conn->c_peer = peer;
conn->c_self = self;
@@ -80,7 +79,6 @@ ptlrpc_connection_get(lnet_process_id_t peer, lnet_nid_t self,
OBD_FREE_PTR(conn);
conn = conn2;
}
- EXIT;
out:
CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
conn, atomic_read(&conn->c_refcount),
@@ -92,10 +90,9 @@ EXPORT_SYMBOL(ptlrpc_connection_get);
int ptlrpc_connection_put(struct ptlrpc_connection *conn)
{
int rc = 0;
- ENTRY;
if (!conn)
- RETURN(rc);
+ return rc;
LASSERT(atomic_read(&conn->c_refcount) > 1);
@@ -122,28 +119,24 @@ int ptlrpc_connection_put(struct ptlrpc_connection *conn)
conn, atomic_read(&conn->c_refcount),
libcfs_nid2str(conn->c_peer.nid));
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_connection_put);
struct ptlrpc_connection *
ptlrpc_connection_addref(struct ptlrpc_connection *conn)
{
- ENTRY;
-
atomic_inc(&conn->c_refcount);
CDEBUG(D_INFO, "conn=%p refcount %d to %s\n",
conn, atomic_read(&conn->c_refcount),
libcfs_nid2str(conn->c_peer.nid));
- RETURN(conn);
+ return conn;
}
EXPORT_SYMBOL(ptlrpc_connection_addref);
int ptlrpc_connection_init(void)
{
- ENTRY;
-
conn_hash = cfs_hash_create("CONN_HASH",
HASH_CONN_CUR_BITS,
HASH_CONN_MAX_BITS,
@@ -152,16 +145,15 @@ int ptlrpc_connection_init(void)
CFS_HASH_MAX_THETA,
&conn_hash_ops, CFS_HASH_DEFAULT);
if (!conn_hash)
- RETURN(-ENOMEM);
+ return -ENOMEM;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_connection_init);
-void ptlrpc_connection_fini(void) {
- ENTRY;
+void ptlrpc_connection_fini(void)
+{
cfs_hash_putref(conn_hash);
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_connection_fini);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/errno.c b/drivers/staging/lustre/lustre/ptlrpc/errno.c
new file mode 100644
index 00000000000..1c100633396
--- /dev/null
+++ b/drivers/staging/lustre/lustre/ptlrpc/errno.c
@@ -0,0 +1,380 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.txt
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (C) 2011 FUJITSU LIMITED. All rights reserved.
+ *
+ * Copyright (c) 2013, Intel Corporation.
+ */
+
+#include <linux/libcfs/libcfs.h>
+#include <lustre/lustre_errno.h>
+
+/*
+ * The two translation tables below must define a one-to-one mapping between
+ * host and network errnos.
+ *
+ * EWOULDBLOCK is equal to EAGAIN on all architectures except for parisc, which
+ * appears irrelevant. Thus, existing references to EWOULDBLOCK are fine.
+ *
+ * EDEADLOCK is equal to EDEADLK on x86 but not on sparc, at least. A sparc
+ * host has no context-free way to determine if a LUSTRE_EDEADLK represents an
+ * EDEADLK or an EDEADLOCK. Therefore, all existing references to EDEADLOCK
+ * that need to be transferred on wire have been replaced with EDEADLK.
+ */
+static int lustre_errno_hton_mapping[] = {
+ [EPERM] = LUSTRE_EPERM,
+ [ENOENT] = LUSTRE_ENOENT,
+ [ESRCH] = LUSTRE_ESRCH,
+ [EINTR] = LUSTRE_EINTR,
+ [EIO] = LUSTRE_EIO,
+ [ENXIO] = LUSTRE_ENXIO,
+ [E2BIG] = LUSTRE_E2BIG,
+ [ENOEXEC] = LUSTRE_ENOEXEC,
+ [EBADF] = LUSTRE_EBADF,
+ [ECHILD] = LUSTRE_ECHILD,
+ [EAGAIN] = LUSTRE_EAGAIN,
+ [ENOMEM] = LUSTRE_ENOMEM,
+ [EACCES] = LUSTRE_EACCES,
+ [EFAULT] = LUSTRE_EFAULT,
+ [ENOTBLK] = LUSTRE_ENOTBLK,
+ [EBUSY] = LUSTRE_EBUSY,
+ [EEXIST] = LUSTRE_EEXIST,
+ [EXDEV] = LUSTRE_EXDEV,
+ [ENODEV] = LUSTRE_ENODEV,
+ [ENOTDIR] = LUSTRE_ENOTDIR,
+ [EISDIR] = LUSTRE_EISDIR,
+ [EINVAL] = LUSTRE_EINVAL,
+ [ENFILE] = LUSTRE_ENFILE,
+ [EMFILE] = LUSTRE_EMFILE,
+ [ENOTTY] = LUSTRE_ENOTTY,
+ [ETXTBSY] = LUSTRE_ETXTBSY,
+ [EFBIG] = LUSTRE_EFBIG,
+ [ENOSPC] = LUSTRE_ENOSPC,
+ [ESPIPE] = LUSTRE_ESPIPE,
+ [EROFS] = LUSTRE_EROFS,
+ [EMLINK] = LUSTRE_EMLINK,
+ [EPIPE] = LUSTRE_EPIPE,
+ [EDOM] = LUSTRE_EDOM,
+ [ERANGE] = LUSTRE_ERANGE,
+ [EDEADLK] = LUSTRE_EDEADLK,
+ [ENAMETOOLONG] = LUSTRE_ENAMETOOLONG,
+ [ENOLCK] = LUSTRE_ENOLCK,
+ [ENOSYS] = LUSTRE_ENOSYS,
+ [ENOTEMPTY] = LUSTRE_ENOTEMPTY,
+ [ELOOP] = LUSTRE_ELOOP,
+ [ENOMSG] = LUSTRE_ENOMSG,
+ [EIDRM] = LUSTRE_EIDRM,
+ [ECHRNG] = LUSTRE_ECHRNG,
+ [EL2NSYNC] = LUSTRE_EL2NSYNC,
+ [EL3HLT] = LUSTRE_EL3HLT,
+ [EL3RST] = LUSTRE_EL3RST,
+ [ELNRNG] = LUSTRE_ELNRNG,
+ [EUNATCH] = LUSTRE_EUNATCH,
+ [ENOCSI] = LUSTRE_ENOCSI,
+ [EL2HLT] = LUSTRE_EL2HLT,
+ [EBADE] = LUSTRE_EBADE,
+ [EBADR] = LUSTRE_EBADR,
+ [EXFULL] = LUSTRE_EXFULL,
+ [ENOANO] = LUSTRE_ENOANO,
+ [EBADRQC] = LUSTRE_EBADRQC,
+ [EBADSLT] = LUSTRE_EBADSLT,
+ [EBFONT] = LUSTRE_EBFONT,
+ [ENOSTR] = LUSTRE_ENOSTR,
+ [ENODATA] = LUSTRE_ENODATA,
+ [ETIME] = LUSTRE_ETIME,
+ [ENOSR] = LUSTRE_ENOSR,
+ [ENONET] = LUSTRE_ENONET,
+ [ENOPKG] = LUSTRE_ENOPKG,
+ [EREMOTE] = LUSTRE_EREMOTE,
+ [ENOLINK] = LUSTRE_ENOLINK,
+ [EADV] = LUSTRE_EADV,
+ [ESRMNT] = LUSTRE_ESRMNT,
+ [ECOMM] = LUSTRE_ECOMM,
+ [EPROTO] = LUSTRE_EPROTO,
+ [EMULTIHOP] = LUSTRE_EMULTIHOP,
+ [EDOTDOT] = LUSTRE_EDOTDOT,
+ [EBADMSG] = LUSTRE_EBADMSG,
+ [EOVERFLOW] = LUSTRE_EOVERFLOW,
+ [ENOTUNIQ] = LUSTRE_ENOTUNIQ,
+ [EBADFD] = LUSTRE_EBADFD,
+ [EREMCHG] = LUSTRE_EREMCHG,
+ [ELIBACC] = LUSTRE_ELIBACC,
+ [ELIBBAD] = LUSTRE_ELIBBAD,
+ [ELIBSCN] = LUSTRE_ELIBSCN,
+ [ELIBMAX] = LUSTRE_ELIBMAX,
+ [ELIBEXEC] = LUSTRE_ELIBEXEC,
+ [EILSEQ] = LUSTRE_EILSEQ,
+ [ERESTART] = LUSTRE_ERESTART,
+ [ESTRPIPE] = LUSTRE_ESTRPIPE,
+ [EUSERS] = LUSTRE_EUSERS,
+ [ENOTSOCK] = LUSTRE_ENOTSOCK,
+ [EDESTADDRREQ] = LUSTRE_EDESTADDRREQ,
+ [EMSGSIZE] = LUSTRE_EMSGSIZE,
+ [EPROTOTYPE] = LUSTRE_EPROTOTYPE,
+ [ENOPROTOOPT] = LUSTRE_ENOPROTOOPT,
+ [EPROTONOSUPPORT] = LUSTRE_EPROTONOSUPPORT,
+ [ESOCKTNOSUPPORT] = LUSTRE_ESOCKTNOSUPPORT,
+ [EOPNOTSUPP] = LUSTRE_EOPNOTSUPP,
+ [EPFNOSUPPORT] = LUSTRE_EPFNOSUPPORT,
+ [EAFNOSUPPORT] = LUSTRE_EAFNOSUPPORT,
+ [EADDRINUSE] = LUSTRE_EADDRINUSE,
+ [EADDRNOTAVAIL] = LUSTRE_EADDRNOTAVAIL,
+ [ENETDOWN] = LUSTRE_ENETDOWN,
+ [ENETUNREACH] = LUSTRE_ENETUNREACH,
+ [ENETRESET] = LUSTRE_ENETRESET,
+ [ECONNABORTED] = LUSTRE_ECONNABORTED,
+ [ECONNRESET] = LUSTRE_ECONNRESET,
+ [ENOBUFS] = LUSTRE_ENOBUFS,
+ [EISCONN] = LUSTRE_EISCONN,
+ [ENOTCONN] = LUSTRE_ENOTCONN,
+ [ESHUTDOWN] = LUSTRE_ESHUTDOWN,
+ [ETOOMANYREFS] = LUSTRE_ETOOMANYREFS,
+ [ETIMEDOUT] = LUSTRE_ETIMEDOUT,
+ [ECONNREFUSED] = LUSTRE_ECONNREFUSED,
+ [EHOSTDOWN] = LUSTRE_EHOSTDOWN,
+ [EHOSTUNREACH] = LUSTRE_EHOSTUNREACH,
+ [EALREADY] = LUSTRE_EALREADY,
+ [EINPROGRESS] = LUSTRE_EINPROGRESS,
+ [ESTALE] = LUSTRE_ESTALE,
+ [EUCLEAN] = LUSTRE_EUCLEAN,
+ [ENOTNAM] = LUSTRE_ENOTNAM,
+ [ENAVAIL] = LUSTRE_ENAVAIL,
+ [EISNAM] = LUSTRE_EISNAM,
+ [EREMOTEIO] = LUSTRE_EREMOTEIO,
+ [EDQUOT] = LUSTRE_EDQUOT,
+ [ENOMEDIUM] = LUSTRE_ENOMEDIUM,
+ [EMEDIUMTYPE] = LUSTRE_EMEDIUMTYPE,
+ [ECANCELED] = LUSTRE_ECANCELED,
+ [ENOKEY] = LUSTRE_ENOKEY,
+ [EKEYEXPIRED] = LUSTRE_EKEYEXPIRED,
+ [EKEYREVOKED] = LUSTRE_EKEYREVOKED,
+ [EKEYREJECTED] = LUSTRE_EKEYREJECTED,
+ [EOWNERDEAD] = LUSTRE_EOWNERDEAD,
+ [ENOTRECOVERABLE] = LUSTRE_ENOTRECOVERABLE,
+ [ERESTARTSYS] = LUSTRE_ERESTARTSYS,
+ [ERESTARTNOINTR] = LUSTRE_ERESTARTNOINTR,
+ [ERESTARTNOHAND] = LUSTRE_ERESTARTNOHAND,
+ [ENOIOCTLCMD] = LUSTRE_ENOIOCTLCMD,
+ [ERESTART_RESTARTBLOCK] = LUSTRE_ERESTART_RESTARTBLOCK,
+ [EBADHANDLE] = LUSTRE_EBADHANDLE,
+ [ENOTSYNC] = LUSTRE_ENOTSYNC,
+ [EBADCOOKIE] = LUSTRE_EBADCOOKIE,
+ [ENOTSUPP] = LUSTRE_ENOTSUPP,
+ [ETOOSMALL] = LUSTRE_ETOOSMALL,
+ [ESERVERFAULT] = LUSTRE_ESERVERFAULT,
+ [EBADTYPE] = LUSTRE_EBADTYPE,
+ [EJUKEBOX] = LUSTRE_EJUKEBOX,
+ [EIOCBQUEUED] = LUSTRE_EIOCBQUEUED,
+};
+
+static int lustre_errno_ntoh_mapping[] = {
+ [LUSTRE_EPERM] = EPERM,
+ [LUSTRE_ENOENT] = ENOENT,
+ [LUSTRE_ESRCH] = ESRCH,
+ [LUSTRE_EINTR] = EINTR,
+ [LUSTRE_EIO] = EIO,
+ [LUSTRE_ENXIO] = ENXIO,
+ [LUSTRE_E2BIG] = E2BIG,
+ [LUSTRE_ENOEXEC] = ENOEXEC,
+ [LUSTRE_EBADF] = EBADF,
+ [LUSTRE_ECHILD] = ECHILD,
+ [LUSTRE_EAGAIN] = EAGAIN,
+ [LUSTRE_ENOMEM] = ENOMEM,
+ [LUSTRE_EACCES] = EACCES,
+ [LUSTRE_EFAULT] = EFAULT,
+ [LUSTRE_ENOTBLK] = ENOTBLK,
+ [LUSTRE_EBUSY] = EBUSY,
+ [LUSTRE_EEXIST] = EEXIST,
+ [LUSTRE_EXDEV] = EXDEV,
+ [LUSTRE_ENODEV] = ENODEV,
+ [LUSTRE_ENOTDIR] = ENOTDIR,
+ [LUSTRE_EISDIR] = EISDIR,
+ [LUSTRE_EINVAL] = EINVAL,
+ [LUSTRE_ENFILE] = ENFILE,
+ [LUSTRE_EMFILE] = EMFILE,
+ [LUSTRE_ENOTTY] = ENOTTY,
+ [LUSTRE_ETXTBSY] = ETXTBSY,
+ [LUSTRE_EFBIG] = EFBIG,
+ [LUSTRE_ENOSPC] = ENOSPC,
+ [LUSTRE_ESPIPE] = ESPIPE,
+ [LUSTRE_EROFS] = EROFS,
+ [LUSTRE_EMLINK] = EMLINK,
+ [LUSTRE_EPIPE] = EPIPE,
+ [LUSTRE_EDOM] = EDOM,
+ [LUSTRE_ERANGE] = ERANGE,
+ [LUSTRE_EDEADLK] = EDEADLK,
+ [LUSTRE_ENAMETOOLONG] = ENAMETOOLONG,
+ [LUSTRE_ENOLCK] = ENOLCK,
+ [LUSTRE_ENOSYS] = ENOSYS,
+ [LUSTRE_ENOTEMPTY] = ENOTEMPTY,
+ [LUSTRE_ELOOP] = ELOOP,
+ [LUSTRE_ENOMSG] = ENOMSG,
+ [LUSTRE_EIDRM] = EIDRM,
+ [LUSTRE_ECHRNG] = ECHRNG,
+ [LUSTRE_EL2NSYNC] = EL2NSYNC,
+ [LUSTRE_EL3HLT] = EL3HLT,
+ [LUSTRE_EL3RST] = EL3RST,
+ [LUSTRE_ELNRNG] = ELNRNG,
+ [LUSTRE_EUNATCH] = EUNATCH,
+ [LUSTRE_ENOCSI] = ENOCSI,
+ [LUSTRE_EL2HLT] = EL2HLT,
+ [LUSTRE_EBADE] = EBADE,
+ [LUSTRE_EBADR] = EBADR,
+ [LUSTRE_EXFULL] = EXFULL,
+ [LUSTRE_ENOANO] = ENOANO,
+ [LUSTRE_EBADRQC] = EBADRQC,
+ [LUSTRE_EBADSLT] = EBADSLT,
+ [LUSTRE_EBFONT] = EBFONT,
+ [LUSTRE_ENOSTR] = ENOSTR,
+ [LUSTRE_ENODATA] = ENODATA,
+ [LUSTRE_ETIME] = ETIME,
+ [LUSTRE_ENOSR] = ENOSR,
+ [LUSTRE_ENONET] = ENONET,
+ [LUSTRE_ENOPKG] = ENOPKG,
+ [LUSTRE_EREMOTE] = EREMOTE,
+ [LUSTRE_ENOLINK] = ENOLINK,
+ [LUSTRE_EADV] = EADV,
+ [LUSTRE_ESRMNT] = ESRMNT,
+ [LUSTRE_ECOMM] = ECOMM,
+ [LUSTRE_EPROTO] = EPROTO,
+ [LUSTRE_EMULTIHOP] = EMULTIHOP,
+ [LUSTRE_EDOTDOT] = EDOTDOT,
+ [LUSTRE_EBADMSG] = EBADMSG,
+ [LUSTRE_EOVERFLOW] = EOVERFLOW,
+ [LUSTRE_ENOTUNIQ] = ENOTUNIQ,
+ [LUSTRE_EBADFD] = EBADFD,
+ [LUSTRE_EREMCHG] = EREMCHG,
+ [LUSTRE_ELIBACC] = ELIBACC,
+ [LUSTRE_ELIBBAD] = ELIBBAD,
+ [LUSTRE_ELIBSCN] = ELIBSCN,
+ [LUSTRE_ELIBMAX] = ELIBMAX,
+ [LUSTRE_ELIBEXEC] = ELIBEXEC,
+ [LUSTRE_EILSEQ] = EILSEQ,
+ [LUSTRE_ERESTART] = ERESTART,
+ [LUSTRE_ESTRPIPE] = ESTRPIPE,
+ [LUSTRE_EUSERS] = EUSERS,
+ [LUSTRE_ENOTSOCK] = ENOTSOCK,
+ [LUSTRE_EDESTADDRREQ] = EDESTADDRREQ,
+ [LUSTRE_EMSGSIZE] = EMSGSIZE,
+ [LUSTRE_EPROTOTYPE] = EPROTOTYPE,
+ [LUSTRE_ENOPROTOOPT] = ENOPROTOOPT,
+ [LUSTRE_EPROTONOSUPPORT] = EPROTONOSUPPORT,
+ [LUSTRE_ESOCKTNOSUPPORT] = ESOCKTNOSUPPORT,
+ [LUSTRE_EOPNOTSUPP] = EOPNOTSUPP,
+ [LUSTRE_EPFNOSUPPORT] = EPFNOSUPPORT,
+ [LUSTRE_EAFNOSUPPORT] = EAFNOSUPPORT,
+ [LUSTRE_EADDRINUSE] = EADDRINUSE,
+ [LUSTRE_EADDRNOTAVAIL] = EADDRNOTAVAIL,
+ [LUSTRE_ENETDOWN] = ENETDOWN,
+ [LUSTRE_ENETUNREACH] = ENETUNREACH,
+ [LUSTRE_ENETRESET] = ENETRESET,
+ [LUSTRE_ECONNABORTED] = ECONNABORTED,
+ [LUSTRE_ECONNRESET] = ECONNRESET,
+ [LUSTRE_ENOBUFS] = ENOBUFS,
+ [LUSTRE_EISCONN] = EISCONN,
+ [LUSTRE_ENOTCONN] = ENOTCONN,
+ [LUSTRE_ESHUTDOWN] = ESHUTDOWN,
+ [LUSTRE_ETOOMANYREFS] = ETOOMANYREFS,
+ [LUSTRE_ETIMEDOUT] = ETIMEDOUT,
+ [LUSTRE_ECONNREFUSED] = ECONNREFUSED,
+ [LUSTRE_EHOSTDOWN] = EHOSTDOWN,
+ [LUSTRE_EHOSTUNREACH] = EHOSTUNREACH,
+ [LUSTRE_EALREADY] = EALREADY,
+ [LUSTRE_EINPROGRESS] = EINPROGRESS,
+ [LUSTRE_ESTALE] = ESTALE,
+ [LUSTRE_EUCLEAN] = EUCLEAN,
+ [LUSTRE_ENOTNAM] = ENOTNAM,
+ [LUSTRE_ENAVAIL] = ENAVAIL,
+ [LUSTRE_EISNAM] = EISNAM,
+ [LUSTRE_EREMOTEIO] = EREMOTEIO,
+ [LUSTRE_EDQUOT] = EDQUOT,
+ [LUSTRE_ENOMEDIUM] = ENOMEDIUM,
+ [LUSTRE_EMEDIUMTYPE] = EMEDIUMTYPE,
+ [LUSTRE_ECANCELED] = ECANCELED,
+ [LUSTRE_ENOKEY] = ENOKEY,
+ [LUSTRE_EKEYEXPIRED] = EKEYEXPIRED,
+ [LUSTRE_EKEYREVOKED] = EKEYREVOKED,
+ [LUSTRE_EKEYREJECTED] = EKEYREJECTED,
+ [LUSTRE_EOWNERDEAD] = EOWNERDEAD,
+ [LUSTRE_ENOTRECOVERABLE] = ENOTRECOVERABLE,
+ [LUSTRE_ERESTARTSYS] = ERESTARTSYS,
+ [LUSTRE_ERESTARTNOINTR] = ERESTARTNOINTR,
+ [LUSTRE_ERESTARTNOHAND] = ERESTARTNOHAND,
+ [LUSTRE_ENOIOCTLCMD] = ENOIOCTLCMD,
+ [LUSTRE_ERESTART_RESTARTBLOCK] = ERESTART_RESTARTBLOCK,
+ [LUSTRE_EBADHANDLE] = EBADHANDLE,
+ [LUSTRE_ENOTSYNC] = ENOTSYNC,
+ [LUSTRE_EBADCOOKIE] = EBADCOOKIE,
+ [LUSTRE_ENOTSUPP] = ENOTSUPP,
+ [LUSTRE_ETOOSMALL] = ETOOSMALL,
+ [LUSTRE_ESERVERFAULT] = ESERVERFAULT,
+ [LUSTRE_EBADTYPE] = EBADTYPE,
+ [LUSTRE_EJUKEBOX] = EJUKEBOX,
+ [LUSTRE_EIOCBQUEUED] = EIOCBQUEUED,
+};
+
+unsigned int lustre_errno_hton(unsigned int h)
+{
+ unsigned int n;
+
+ if (h == 0) {
+ n = 0;
+ } else if (h < ARRAY_SIZE(lustre_errno_hton_mapping)) {
+ n = lustre_errno_hton_mapping[h];
+ if (n == 0)
+ goto generic;
+ } else {
+generic:
+ /*
+ * A generic errno is better than the unknown one that could
+ * mean anything to a different host.
+ */
+ n = LUSTRE_EIO;
+ }
+
+ return n;
+}
+EXPORT_SYMBOL(lustre_errno_hton);
+
+unsigned int lustre_errno_ntoh(unsigned int n)
+{
+ unsigned int h;
+
+ if (n == 0) {
+ h = 0;
+ } else if (n < ARRAY_SIZE(lustre_errno_ntoh_mapping)) {
+ h = lustre_errno_ntoh_mapping[n];
+ if (h == 0)
+ goto generic;
+ } else {
+generic:
+ /*
+ * Similar to the situation in lustre_errno_hton(), an unknown
+ * network errno could coincide with anything. Hence, it is
+ * better to return a generic errno.
+ */
+ h = EIO;
+ }
+
+ return h;
+}
+EXPORT_SYMBOL(lustre_errno_ntoh);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 0264c102cb3..58d089c3fef 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -55,7 +55,6 @@ void request_out_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_UNLINK);
@@ -79,8 +78,6 @@ void request_out_callback(lnet_event_t *ev)
}
ptlrpc_req_finished(req);
-
- EXIT;
}
/*
@@ -90,7 +87,6 @@ void reply_in_callback(lnet_event_t *ev)
{
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_request *req = cbid->cbid_arg;
- ENTRY;
DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status);
@@ -166,7 +162,6 @@ out_wake:
* since we don't have our own ref */
ptlrpc_client_wake_req(req);
spin_unlock(&req->rq_lock);
- EXIT;
}
/*
@@ -177,7 +172,6 @@ void client_bulk_callback (lnet_event_t *ev)
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_bulk_desc *desc = cbid->cbid_arg;
struct ptlrpc_request *req;
- ENTRY;
LASSERT ((desc->bd_type == BULK_PUT_SINK &&
ev->type == LNET_EVENT_PUT) ||
@@ -220,7 +214,6 @@ void client_bulk_callback (lnet_event_t *ev)
ptlrpc_client_wake_req(desc->bd_req);
spin_unlock(&desc->bd_lock);
- EXIT;
}
/*
@@ -289,7 +282,6 @@ void request_in_callback(lnet_event_t *ev)
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *service = svcpt->scp_service;
struct ptlrpc_request *req;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_PUT ||
ev->type == LNET_EVENT_UNLINK);
@@ -378,7 +370,6 @@ void request_in_callback(lnet_event_t *ev)
wake_up(&svcpt->scp_waitq);
spin_unlock(&svcpt->scp_lock);
- EXIT;
}
/*
@@ -389,7 +380,6 @@ void reply_out_callback(lnet_event_t *ev)
struct ptlrpc_cb_id *cbid = ev->md.user_ptr;
struct ptlrpc_reply_state *rs = cbid->cbid_arg;
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
- ENTRY;
LASSERT (ev->type == LNET_EVENT_SEND ||
ev->type == LNET_EVENT_ACK ||
@@ -400,7 +390,6 @@ void reply_out_callback(lnet_event_t *ev)
* net's ref on 'rs' */
LASSERT (ev->unlinked);
ptlrpc_rs_decref(rs);
- EXIT;
return;
}
@@ -421,7 +410,6 @@ void reply_out_callback(lnet_event_t *ev)
spin_unlock(&rs->rs_lock);
spin_unlock(&svcpt->scp_rep_lock);
}
- EXIT;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h
index feac60482c9..0e9f6c472a3 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_api.h
@@ -100,7 +100,7 @@ struct subflavor_desc {
/* Each mechanism is described by the following struct: */
struct gss_api_mech {
struct list_head gm_list;
- module_t *gm_owner;
+ struct module *gm_owner;
char *gm_name;
rawobj_t gm_oid;
atomic_t gm_count;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
index ed95bbba95c..b518d8a0aab 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_bulk.c
@@ -68,7 +68,6 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
__u32 maj;
int offset;
int rc;
- ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
@@ -104,7 +103,7 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- RETURN(0);
+ return 0;
LASSERT(bsd->bsd_svc == SPTLRPC_BULK_SVC_INTG ||
bsd->bsd_svc == SPTLRPC_BULK_SVC_PRIV);
@@ -132,18 +131,18 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
&token);
if (maj != GSS_S_COMPLETE) {
CWARN("failed to sign bulk data: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
} else {
/* privacy mode */
if (desc->bd_iov_count == 0)
- RETURN(0);
+ return 0;
rc = sptlrpc_enc_pool_get_pages(desc);
if (rc) {
CERROR("bulk write: failed to allocate "
"encryption pages: %d\n", rc);
- RETURN(rc);
+ return rc;
}
token.data = bsd->bsd_data;
@@ -153,12 +152,12 @@ int gss_cli_ctx_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
maj = lgss_wrap_bulk(gctx->gc_mechctx, desc, &token, 0);
if (maj != GSS_S_COMPLETE) {
CWARN("fail to encrypt bulk data: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
}
}
- RETURN(0);
+ return 0;
}
int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
@@ -171,7 +170,6 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
rawobj_t token;
__u32 maj;
int roff, voff;
- ENTRY;
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read || req->rq_bulk_write);
@@ -220,7 +218,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
"(%u,%u,%u) != (%u,%u,%u)\n",
bsdr->bsd_version, bsdr->bsd_type, bsdr->bsd_svc,
bsdv->bsd_version, bsdv->bsd_type, bsdv->bsd_svc);
- RETURN(-EPROTO);
+ return -EPROTO;
}
LASSERT(bsdv->bsd_svc == SPTLRPC_BULK_SVC_NULL ||
@@ -235,7 +233,7 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
if (req->rq_bulk_write) {
if (bsdv->bsd_flags & BSD_FL_ERR) {
CERROR("server reported bulk i/o failure\n");
- RETURN(-EIO);
+ return -EIO;
}
if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV)
@@ -270,12 +268,12 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
&token);
if (maj != GSS_S_COMPLETE) {
CERROR("failed to verify bulk read: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
} else if (bsdv->bsd_svc == SPTLRPC_BULK_SVC_PRIV) {
desc->bd_nob = bsdv->bsd_nob;
if (desc->bd_nob == 0)
- RETURN(0);
+ return 0;
token.data = bsdv->bsd_data;
token.len = lustre_msg_buflen(vmsg, voff) -
@@ -286,14 +284,14 @@ int gss_cli_ctx_unwrap_bulk(struct ptlrpc_cli_ctx *ctx,
if (maj != GSS_S_COMPLETE) {
CERROR("failed to decrypt bulk read: %x\n",
maj);
- RETURN(-EACCES);
+ return -EACCES;
}
desc->bd_nob_transferred = desc->bd_nob;
}
}
- RETURN(0);
+ return 0;
}
static int gss_prep_bulk(struct ptlrpc_bulk_desc *desc,
@@ -318,21 +316,20 @@ int gss_cli_prep_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_desc *desc)
{
int rc;
- ENTRY;
LASSERT(req->rq_cli_ctx);
LASSERT(req->rq_pack_bulk);
LASSERT(req->rq_bulk_read);
if (SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_BULK_SVC_PRIV)
- RETURN(0);
+ return 0;
rc = gss_prep_bulk(desc, ctx2gctx(req->rq_cli_ctx)->gc_mechctx);
if (rc)
CERROR("bulk read: failed to prepare encryption "
"pages: %d\n", rc);
- RETURN(rc);
+ return rc;
}
int gss_svc_prep_bulk(struct ptlrpc_request *req,
@@ -341,7 +338,6 @@ int gss_svc_prep_bulk(struct ptlrpc_request *req,
struct gss_svc_reqctx *grctx;
struct ptlrpc_bulk_sec_desc *bsd;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
@@ -355,14 +351,14 @@ int gss_svc_prep_bulk(struct ptlrpc_request *req,
bsd = grctx->src_reqbsd;
if (bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)
- RETURN(0);
+ return 0;
rc = gss_prep_bulk(desc, grctx->src_ctx->gsc_mechctx);
if (rc)
CERROR("bulk write: failed to prepare encryption "
"pages: %d\n", rc);
- RETURN(rc);
+ return rc;
}
int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
@@ -372,7 +368,6 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
struct ptlrpc_bulk_sec_desc *bsdr, *bsdv;
rawobj_t token;
__u32 maj;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
@@ -404,7 +399,7 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to verify bulk signature: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
break;
case SPTLRPC_BULK_SVC_PRIV:
@@ -412,7 +407,7 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("prepared nob %d doesn't match the actual "
"nob %d\n", desc->bd_nob, bsdr->bsd_nob);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (desc->bd_iov_count == 0) {
@@ -428,12 +423,12 @@ int gss_svc_unwrap_bulk(struct ptlrpc_request *req,
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed decrypt bulk data: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
break;
}
- RETURN(0);
+ return 0;
}
int gss_svc_wrap_bulk(struct ptlrpc_request *req,
@@ -444,7 +439,6 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
rawobj_t token;
__u32 maj;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_pack_bulk);
@@ -476,7 +470,7 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to sign bulk data: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
break;
case SPTLRPC_BULK_SVC_PRIV:
@@ -492,7 +486,7 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("bulk read: failed to allocate encryption "
"pages: %d\n", rc);
- RETURN(rc);
+ return rc;
}
token.data = bsdv->bsd_data;
@@ -503,10 +497,10 @@ int gss_svc_wrap_bulk(struct ptlrpc_request *req,
if (maj != GSS_S_COMPLETE) {
bsdv->bsd_flags |= BSD_FL_ERR;
CERROR("failed to encrypt bulk data: %x\n", maj);
- RETURN(-EACCES);
+ return -EACCES;
}
break;
}
- RETURN(0);
+ return 0;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
index 142c789b1bc..55247af3910 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_cli_upcall.c
@@ -243,41 +243,41 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
if (count != sizeof(param)) {
CERROR("ioctl size %lu, expect %lu, please check lgss_keyring "
"version\n", count, (unsigned long) sizeof(param));
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (copy_from_user(&param, buffer, sizeof(param))) {
CERROR("failed copy data from lgssd\n");
- RETURN(-EFAULT);
+ return -EFAULT;
}
if (param.version != GSSD_INTERFACE_VERSION) {
CERROR("gssd interface version %d (expect %d)\n",
param.version, GSSD_INTERFACE_VERSION);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* take name */
if (strncpy_from_user(obdname, param.uuid, sizeof(obdname)) <= 0) {
CERROR("Invalid obdname pointer\n");
- RETURN(-EFAULT);
+ return -EFAULT;
}
obd = class_name2obd(obdname);
if (!obd) {
CERROR("no such obd %s\n", obdname);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (unlikely(!obd->obd_set_up)) {
CERROR("obd %s not setup\n", obdname);
- RETURN(-EINVAL);
+ return -EINVAL;
}
spin_lock(&obd->obd_dev_lock);
if (obd->obd_stopping) {
CERROR("obd %s has stopped\n", obdname);
spin_unlock(&obd->obd_dev_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) &&
@@ -285,7 +285,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
strcmp(obd->obd_type->typ_name, LUSTRE_MGC_NAME)) {
CERROR("obd %s is not a client device\n", obdname);
spin_unlock(&obd->obd_dev_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
spin_unlock(&obd->obd_dev_lock);
@@ -293,7 +293,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
if (obd->u.cli.cl_import == NULL) {
CERROR("obd %s: import has gone\n", obd->obd_name);
up_read(&obd->u.cli.cl_sem);
- RETURN(-EINVAL);
+ return -EINVAL;
}
imp = class_import_get(obd->u.cli.cl_import);
up_read(&obd->u.cli.cl_sem);
@@ -301,7 +301,7 @@ int gss_do_ctx_init_rpc(__user char *buffer, unsigned long count)
if (imp->imp_deactive) {
CERROR("import has been deactivated\n");
class_import_put(imp);
- RETURN(-EINVAL);
+ return -EINVAL;
}
req = ptlrpc_request_alloc_pack(imp, &RQF_SEC_CTX, LUSTRE_OBD_VERSION,
@@ -368,7 +368,7 @@ out_copy:
class_import_put(imp);
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
@@ -378,7 +378,6 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
struct ptlrpc_request *req;
struct ptlrpc_user_desc *pud;
int rc;
- ENTRY;
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
@@ -386,7 +385,7 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
CDEBUG(D_SEC, "ctx %p(%u->%s) not uptodate, "
"don't send destroy rpc\n", ctx,
ctx->cc_vcred.vc_uid, sec2target_str(ctx->cc_sec));
- RETURN(0);
+ return 0;
}
might_sleep();
@@ -434,7 +433,7 @@ int gss_do_ctx_fini_rpc(struct gss_cli_ctx *gctx)
out_ref:
ptlrpc_req_finished(req);
out:
- RETURN(rc);
+ return rc;
}
int __init gss_init_cli_upcall(void)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
index bb571ae5105..188dbbfbd2f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_keyring.c
@@ -524,7 +524,6 @@ void rvs_sec_install_root_ctx_kr(struct ptlrpc_sec *sec,
struct gss_sec_keyring *gsec_kr = sec2gsec_keyring(sec);
struct ptlrpc_cli_ctx *ctx;
cfs_time_t now;
- ENTRY;
LASSERT(sec_is_reverse(sec));
@@ -569,11 +568,10 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
struct sptlrpc_flavor *sf)
{
struct gss_sec_keyring *gsec_kr;
- ENTRY;
OBD_ALLOC(gsec_kr, sizeof(*gsec_kr));
if (gsec_kr == NULL)
- RETURN(NULL);
+ return NULL;
INIT_HLIST_HEAD(&gsec_kr->gsk_clist);
gsec_kr->gsk_root_ctx = NULL;
@@ -592,11 +590,11 @@ struct ptlrpc_sec * gss_sec_create_kr(struct obd_import *imp,
goto err_free;
}
- RETURN(&gsec_kr->gsk_base.gs_base);
+ return &gsec_kr->gsk_base.gs_base;
err_free:
OBD_FREE(gsec_kr, sizeof(*gsec_kr));
- RETURN(NULL);
+ return NULL;
}
static
@@ -683,7 +681,6 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
char *coinfo;
int coinfo_size;
char *co_flags = "";
- ENTRY;
LASSERT(imp != NULL);
@@ -697,7 +694,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
* always succeed.
*/
if (ctx || sec_is_reverse(sec))
- RETURN(ctx);
+ return ctx;
}
LASSERT(create != 0);
@@ -821,7 +818,7 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_kr(struct ptlrpc_sec *sec,
out:
if (is_root)
mutex_unlock(&gsec_kr->gsk_root_uc_lock);
- RETURN(ctx);
+ return ctx;
}
static
@@ -891,7 +888,6 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node *next;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
gsec_kr = sec2gsec_keyring(sec);
@@ -930,15 +926,12 @@ void flush_spec_ctx_cache_kr(struct ptlrpc_sec *sec,
spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
- EXIT;
}
static
int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
uid_t uid, int grace, int force)
{
- ENTRY;
-
CDEBUG(D_SEC, "sec %p(%d, nctx %d), uid %d, grace %d, force %d\n",
sec, atomic_read(&sec->ps_refcount),
atomic_read(&sec->ps_nctx),
@@ -949,7 +942,7 @@ int gss_sec_flush_ctx_cache_kr(struct ptlrpc_sec *sec,
else
flush_spec_ctx_cache_kr(sec, uid, grace, force);
- RETURN(0);
+ return 0;
}
static
@@ -959,7 +952,6 @@ void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
struct hlist_head freelist = HLIST_HEAD_INIT;
struct hlist_node *next;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
CWARN("running gc\n");
@@ -981,8 +973,6 @@ void gss_sec_gc_ctx_kr(struct ptlrpc_sec *sec)
spin_unlock(&sec->ps_lock);
dispose_ctx_list_kr(&freelist);
- EXIT;
- return;
}
static
@@ -993,7 +983,6 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
struct ptlrpc_cli_ctx *ctx;
struct gss_cli_ctx *gctx;
time_t now = cfs_time_current_sec();
- ENTRY;
spin_lock(&sec->ps_lock);
hlist_for_each_entry_safe(ctx, next,
@@ -1032,7 +1021,7 @@ int gss_sec_display_kr(struct ptlrpc_sec *sec, struct seq_file *seq)
}
spin_unlock(&sec->ps_lock);
- RETURN(0);
+ return 0;
}
/****************************************
@@ -1148,16 +1137,15 @@ static
int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
{
int rc;
- ENTRY;
if (data != NULL || datalen != 0) {
CERROR("invalid: data %p, len %lu\n", data, (long)datalen);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (key->payload.data != 0) {
CERROR("key already have payload\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* link the key to session keyring, so following context negotiation
@@ -1179,11 +1167,11 @@ int gss_kt_instantiate(struct key *key, const void *data, size_t datalen)
CERROR("failed to link key %08x to keyring %08x: %d\n",
key->serial,
key_tgcred(current)->session_keyring->serial, rc);
- RETURN(rc);
+ return rc;
}
CDEBUG(D_SEC, "key %p instantiated, ctx %p\n", key, key->payload.data);
- RETURN(0);
+ return 0;
}
/*
@@ -1198,11 +1186,10 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen)
rawobj_t tmpobj = RAWOBJ_EMPTY;
__u32 datalen32 = (__u32) datalen;
int rc;
- ENTRY;
if (data == NULL || datalen == 0) {
CWARN("invalid: data %p, len %lu\n", data, (long)datalen);
- RETURN(-EINVAL);
+ return -EINVAL;
}
/* if upcall finished negotiation too fast (mostly likely because
@@ -1216,9 +1203,9 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen)
rc = key_validate(key);
if (rc == 0)
- RETURN(-EAGAIN);
+ return -EAGAIN;
else
- RETURN(rc);
+ return rc;
}
LASSERT(atomic_read(&ctx->cc_refcount) > 0);
@@ -1229,7 +1216,7 @@ int gss_kt_update(struct key *key, const void *data, size_t datalen)
/* don't proceed if already refreshed */
if (cli_ctx_is_refreshed(ctx)) {
CWARN("ctx already done refresh\n");
- RETURN(0);
+ return 0;
}
sptlrpc_cli_ctx_get(ctx);
@@ -1304,7 +1291,7 @@ out:
/* let user space think it's a success */
sptlrpc_cli_ctx_put(ctx, 1);
- RETURN(0);
+ return 0;
}
static
@@ -1316,10 +1303,8 @@ int gss_kt_match(const struct key *key, const void *desc)
static
void gss_kt_destroy(struct key *key)
{
- ENTRY;
LASSERT(key->payload.data == NULL);
CDEBUG(D_SEC, "destroy key %p\n", key);
- EXIT;
}
static
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
index 4b28931bbc9..c106a9e049a 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_krb5_mech.c
@@ -54,6 +54,7 @@
#include <linux/slab.h>
#include <linux/crypto.h>
#include <linux/mutex.h>
+#include <linux/crypto.h>
#include <obd.h>
#include <obd_class.h>
@@ -147,14 +148,14 @@ static const char * enctype2str(__u32 enctype)
static
int keyblock_init(struct krb5_keyblock *kb, char *alg_name, int alg_mode)
{
- kb->kb_tfm = ll_crypto_alloc_blkcipher(alg_name, alg_mode, 0);
+ kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0);
if (IS_ERR(kb->kb_tfm)) {
CERROR("failed to alloc tfm: %s, mode %d\n",
alg_name, alg_mode);
return -1;
}
- if (ll_crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
+ if (crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len)) {
CERROR("failed to set %s key, len %d\n",
alg_name, kb->kb_key.len);
return -1;
@@ -197,7 +198,7 @@ void keyblock_free(struct krb5_keyblock *kb)
{
rawobj_free(&kb->kb_key);
if (kb->kb_tfm)
- ll_crypto_free_blkcipher(kb->kb_tfm);
+ crypto_free_blkcipher(kb->kb_tfm);
}
static
@@ -341,7 +342,7 @@ __u32 import_context_rfc1964(struct krb5_ctx *kctx, char *p, char *end)
if (p != end)
goto out_err;
- CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
+ CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
@@ -403,7 +404,7 @@ __u32 import_context_rfc4121(struct krb5_ctx *kctx, char *p, char *end)
if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
goto out_err;
- CDEBUG(D_SEC, "succesfully imported v2 context\n");
+ CDEBUG(D_SEC, "successfully imported v2 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
@@ -494,7 +495,7 @@ __u32 gss_copy_reverse_context_kerberos(struct gss_ctx *gctx,
goto out_err;
gctx_new->internal_ctx_id = knew;
- CDEBUG(D_SEC, "succesfully copied reverse context\n");
+ CDEBUG(D_SEC, "successfully copied reverse context\n");
return GSS_S_COMPLETE;
out_err:
@@ -529,7 +530,7 @@ void buf_to_sg(struct scatterlist *sg, void *ptr, int len)
}
static
-__u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
+__u32 krb5_encrypt(struct crypto_blkcipher *tfm,
int decrypt,
void * iv,
void * in,
@@ -546,27 +547,27 @@ __u32 krb5_encrypt(struct ll_crypto_cipher *tfm,
desc.info = local_iv;
desc.flags= 0;
- if (length % ll_crypto_blkcipher_blocksize(tfm) != 0) {
+ if (length % crypto_blkcipher_blocksize(tfm) != 0) {
CERROR("output length %d mismatch blocksize %d\n",
- length, ll_crypto_blkcipher_blocksize(tfm));
+ length, crypto_blkcipher_blocksize(tfm));
goto out;
}
- if (ll_crypto_blkcipher_ivsize(tfm) > 16) {
- CERROR("iv size too large %d\n", ll_crypto_blkcipher_ivsize(tfm));
+ if (crypto_blkcipher_ivsize(tfm) > 16) {
+ CERROR("iv size too large %d\n", crypto_blkcipher_ivsize(tfm));
goto out;
}
if (iv)
- memcpy(local_iv, iv, ll_crypto_blkcipher_ivsize(tfm));
+ memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
memcpy(out, in, length);
buf_to_sg(&sg, out, length);
if (decrypt)
- ret = ll_crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_decrypt_iv(&desc, &sg, &sg, length);
else
- ret = ll_crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
+ ret = crypto_blkcipher_encrypt_iv(&desc, &sg, &sg, length);
out:
return(ret);
@@ -574,7 +575,7 @@ out:
static inline
-int krb5_digest_hmac(struct ll_crypto_hash *tfm,
+int krb5_digest_hmac(struct crypto_hash *tfm,
rawobj_t *key,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
@@ -585,17 +586,17 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
struct scatterlist sg[1];
int i;
- ll_crypto_hash_setkey(tfm, key->data, key->len);
+ crypto_hash_setkey(tfm, key->data, key->len);
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
@@ -604,20 +605,20 @@ int krb5_digest_hmac(struct ll_crypto_hash *tfm,
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- return ll_crypto_hash_final(&desc, cksum->data);
+ return crypto_hash_final(&desc, cksum->data);
}
static inline
-int krb5_digest_norm(struct ll_crypto_hash *tfm,
+int krb5_digest_norm(struct crypto_hash *tfm,
struct krb5_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
@@ -632,13 +633,13 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
desc.tfm = tfm;
desc.flags= 0;
- ll_crypto_hash_init(&desc);
+ crypto_hash_init(&desc);
for (i = 0; i < msgcnt; i++) {
if (msgs[i].len == 0)
continue;
buf_to_sg(sg, (char *) msgs[i].data, msgs[i].len);
- ll_crypto_hash_update(&desc, sg, msgs[i].len);
+ crypto_hash_update(&desc, sg, msgs[i].len);
}
for (i = 0; i < iovcnt; i++) {
@@ -647,15 +648,15 @@ int krb5_digest_norm(struct ll_crypto_hash *tfm,
sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
iovs[i].kiov_offset);
- ll_crypto_hash_update(&desc, sg, iovs[i].kiov_len);
+ crypto_hash_update(&desc, sg, iovs[i].kiov_len);
}
if (khdr) {
buf_to_sg(sg, (char *) khdr, sizeof(*khdr));
- ll_crypto_hash_update(&desc, sg, sizeof(*khdr));
+ crypto_hash_update(&desc, sg, sizeof(*khdr));
}
- ll_crypto_hash_final(&desc, cksum->data);
+ crypto_hash_final(&desc, cksum->data);
return krb5_encrypt(kb->kb_tfm, 0, NULL, cksum->data,
cksum->data, cksum->len);
@@ -674,7 +675,7 @@ __s32 krb5_make_checksum(__u32 enctype,
rawobj_t *cksum)
{
struct krb5_enctype *ke = &enctypes[enctype];
- struct ll_crypto_hash *tfm;
+ struct crypto_hash *tfm;
__u32 code = GSS_S_FAILURE;
int rc;
@@ -683,7 +684,7 @@ __s32 krb5_make_checksum(__u32 enctype,
return GSS_S_FAILURE;
}
- cksum->len = ll_crypto_hash_digestsize(tfm);
+ cksum->len = crypto_hash_digestsize(tfm);
OBD_ALLOC_LARGE(cksum->data, cksum->len);
if (!cksum->data) {
cksum->len = 0;
@@ -700,7 +701,7 @@ __s32 krb5_make_checksum(__u32 enctype,
if (rc == 0)
code = GSS_S_COMPLETE;
out_tfm:
- ll_crypto_free_hash(tfm);
+ crypto_free_hash(tfm);
return code;
}
@@ -878,7 +879,7 @@ int add_padding(rawobj_t *msg, int msg_buflen, int blocksize)
}
static
-int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_rawobjs(struct crypto_blkcipher *tfm,
int mode_ecb,
int inobj_cnt,
rawobj_t *inobjs,
@@ -890,7 +891,6 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
__u8 local_iv[16] = {0}, *buf;
__u32 datalen = 0;
int i, rc;
- ENTRY;
buf = outobj->data;
desc.tfm = tfm;
@@ -905,23 +905,23 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
if (mode_ecb) {
if (enc)
- rc = ll_crypto_blkcipher_encrypt(
+ rc = crypto_blkcipher_encrypt(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt(
+ rc = crypto_blkcipher_decrypt(
&desc, &dst, &src, src.length);
} else {
if (enc)
- rc = ll_crypto_blkcipher_encrypt_iv(
+ rc = crypto_blkcipher_encrypt_iv(
&desc, &dst, &src, src.length);
else
- rc = ll_crypto_blkcipher_decrypt_iv(
+ rc = crypto_blkcipher_decrypt_iv(
&desc, &dst, &src, src.length);
}
if (rc) {
CERROR("encrypt error %d\n", rc);
- RETURN(rc);
+ return rc;
}
datalen += inobjs[i].len;
@@ -929,14 +929,14 @@ int krb5_encrypt_rawobjs(struct ll_crypto_cipher *tfm,
}
outobj->len = datalen;
- RETURN(0);
+ return 0;
}
/*
* if adj_nob != 0, we adjust desc->bd_nob to the actual cipher text size.
*/
static
-int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_encrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
char *confounder,
struct ptlrpc_bulk_desc *desc,
@@ -951,7 +951,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
LASSERT(desc->bd_iov_count);
LASSERT(desc->bd_enc_iov);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
@@ -963,7 +963,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, confounder, blocksize);
buf_to_sg(&dst, cipher->data, blocksize);
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to encrypt confounder: %d\n", rc);
return rc;
@@ -983,7 +983,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
desc->bd_enc_iov[i].kiov_offset = dst.offset;
desc->bd_enc_iov[i].kiov_len = dst.length;
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to encrypt page: %d\n", rc);
@@ -995,7 +995,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, khdr, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_encrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_encrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to encrypt krb5 header: %d\n", rc);
@@ -1025,7 +1025,7 @@ int krb5_encrypt_bulk(struct ll_crypto_cipher *tfm,
* should have been done by prep_bulk().
*/
static
-int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
+int krb5_decrypt_bulk(struct crypto_blkcipher *tfm,
struct krb5_header *khdr,
struct ptlrpc_bulk_desc *desc,
rawobj_t *cipher,
@@ -1042,7 +1042,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
LASSERT(desc->bd_enc_iov);
LASSERT(desc->bd_nob_transferred);
- blocksize = ll_crypto_blkcipher_blocksize(tfm);
+ blocksize = crypto_blkcipher_blocksize(tfm);
LASSERT(blocksize > 1);
LASSERT(cipher->len == blocksize + sizeof(*khdr));
@@ -1059,7 +1059,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, cipher->data, blocksize);
buf_to_sg(&dst, plain->data, blocksize);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src, blocksize);
if (rc) {
CERROR("error to decrypt confounder: %d\n", rc);
return rc;
@@ -1102,7 +1102,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
if (desc->bd_iov[i].kiov_len % blocksize == 0)
sg_assign_page(&dst, desc->bd_iov[i].kiov_page);
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
if (rc) {
CERROR("error to decrypt page: %d\n", rc);
@@ -1142,7 +1142,7 @@ int krb5_decrypt_bulk(struct ll_crypto_cipher *tfm,
buf_to_sg(&src, cipher->data + blocksize, sizeof(*khdr));
buf_to_sg(&dst, cipher->data + blocksize, sizeof(*khdr));
- rc = ll_crypto_blkcipher_decrypt_iv(&ciph_desc,
+ rc = crypto_blkcipher_decrypt_iv(&ciph_desc,
&dst, &src, sizeof(*khdr));
if (rc) {
CERROR("error to decrypt tail: %d\n", rc);
@@ -1177,7 +1177,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
LASSERT(ke->ke_conf_size <= GSS_MAX_CIPHER_BLOCK);
LASSERT(kctx->kc_keye.kb_tfm == NULL ||
ke->ke_conf_size >=
- ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
+ crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm));
/*
* final token format:
@@ -1201,7 +1201,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(blocksize <= ke->ke_conf_size);
@@ -1248,7 +1248,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
if (krb5_make_checksum(ENCTYPE_ARCFOUR_HMAC, &kctx->kc_keyi,
NULL, 1, &cksum, 0, NULL, &arc4_keye)) {
@@ -1256,13 +1256,13 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
+ if (crypto_blkcipher_setkey(arc4_tfm, arc4_keye.data,
arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
@@ -1272,7 +1272,7 @@ __u32 gss_wrap_kerberos(struct gss_ctx *gctx,
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
3, data_desc, &cipher, 1);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
@@ -1310,7 +1310,7 @@ __u32 gss_prep_bulk_kerberos(struct gss_ctx *gctx,
LASSERT(desc->bd_enc_iov);
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(desc->bd_enc_iov[i].kiov_page);
@@ -1371,7 +1371,7 @@ __u32 gss_wrap_bulk_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/*
@@ -1481,7 +1481,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
blocksize = 1;
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
/* expected token layout:
@@ -1521,7 +1521,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
if (kctx->kc_enctype == ENCTYPE_ARCFOUR_HMAC) {
rawobj_t arc4_keye;
- struct ll_crypto_cipher *arc4_tfm;
+ struct crypto_blkcipher *arc4_tfm;
cksum.data = token->data + token->len - ke->ke_hash_size;
cksum.len = ke->ke_hash_size;
@@ -1532,13 +1532,13 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
GOTO(arc4_out, rc = -EACCES);
}
- arc4_tfm = ll_crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
+ arc4_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, 0);
if (IS_ERR(arc4_tfm)) {
CERROR("failed to alloc tfm arc4 in ECB mode\n");
GOTO(arc4_out_key, rc = -EACCES);
}
- if (ll_crypto_blkcipher_setkey(arc4_tfm,
+ if (crypto_blkcipher_setkey(arc4_tfm,
arc4_keye.data, arc4_keye.len)) {
CERROR("failed to set arc4 key, len %d\n",
arc4_keye.len);
@@ -1548,7 +1548,7 @@ __u32 gss_unwrap_kerberos(struct gss_ctx *gctx,
rc = krb5_encrypt_rawobjs(arc4_tfm, 1,
1, &cipher_in, &plain_out, 0);
arc4_out_tfm:
- ll_crypto_free_blkcipher(arc4_tfm);
+ crypto_free_blkcipher(arc4_tfm);
arc4_out_key:
rawobj_free(&arc4_keye);
arc4_out:
@@ -1647,7 +1647,7 @@ __u32 gss_unwrap_bulk_kerberos(struct gss_ctx *gctx,
LBUG();
} else {
LASSERT(kctx->kc_keye.kb_tfm);
- blocksize = ll_crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
+ blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
}
LASSERT(sizeof(*khdr) >= blocksize && sizeof(*khdr) % blocksize == 0);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
index 3df7257b7fa..c624518c181 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_pipefs.c
@@ -262,7 +262,6 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
struct hlist_node *next;
HLIST_HEAD(freelist);
unsigned int hash;
- ENTRY;
gsec_pf = container_of(gsec, struct gss_sec_pipefs, gsp_base);
@@ -287,7 +286,6 @@ void gss_sec_ctx_replace_pf(struct gss_sec *gsec,
spin_unlock(&gsec->gs_base.ps_lock);
ctx_list_destroy_pf(&freelist);
- EXIT;
}
static
@@ -297,23 +295,22 @@ int gss_install_rvs_cli_ctx_pf(struct gss_sec *gsec,
struct vfs_cred vcred;
struct ptlrpc_cli_ctx *cli_ctx;
int rc;
- ENTRY;
vcred.vc_uid = 0;
vcred.vc_gid = 0;
cli_ctx = ctx_create_pf(&gsec->gs_base, &vcred);
if (!cli_ctx)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rc = gss_copy_rvc_cli_ctx(cli_ctx, svc_ctx);
if (rc) {
ctx_destroy_pf(cli_ctx->cc_sec, cli_ctx);
- RETURN(rc);
+ return rc;
}
gss_sec_ctx_replace_pf(gsec, cli_ctx);
- RETURN(0);
+ return 0;
}
static
@@ -324,7 +321,6 @@ void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
struct ptlrpc_cli_ctx *ctx;
struct hlist_node *next;
int i;
- ENTRY;
sec = &gsec_pf->gsp_base.gs_base;
@@ -337,7 +333,6 @@ void gss_ctx_cache_gc_pf(struct gss_sec_pipefs *gsec_pf,
}
sec->ps_gc_next = cfs_time_current_sec() + sec->ps_gc_interval;
- EXIT;
}
static
@@ -347,7 +342,6 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
{
struct gss_sec_pipefs *gsec_pf;
int alloc_size, hash_size, i;
- ENTRY;
#define GSS_SEC_PIPEFS_CTX_HASH_SIZE (32)
@@ -362,7 +356,7 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
OBD_ALLOC(gsec_pf, alloc_size);
if (!gsec_pf)
- RETURN(NULL);
+ return NULL;
gsec_pf->gsp_chash_size = hash_size;
for (i = 0; i < hash_size; i++)
@@ -380,13 +374,13 @@ struct ptlrpc_sec* gss_sec_create_pf(struct obd_import *imp,
goto err_destroy;
}
- RETURN(&gsec_pf->gsp_base.gs_base);
+ return &gsec_pf->gsp_base.gs_base;
err_destroy:
gss_sec_destroy_common(&gsec_pf->gsp_base);
err_free:
OBD_FREE(gsec_pf, alloc_size);
- RETURN(NULL);
+ return NULL;
}
static
@@ -423,7 +417,6 @@ struct ptlrpc_cli_ctx * gss_sec_lookup_ctx_pf(struct ptlrpc_sec *sec,
struct hlist_node *next;
HLIST_HEAD(freelist);
unsigned int hash, gc = 0, found = 0;
- ENTRY;
might_sleep();
@@ -473,7 +466,7 @@ retry:
/* don't allocate for reverse sec */
if (sec_is_reverse(sec)) {
spin_unlock(&sec->ps_lock);
- RETURN(NULL);
+ return NULL;
}
if (new) {
@@ -504,7 +497,7 @@ retry:
}
ctx_list_destroy_pf(&freelist);
- RETURN(ctx);
+ return ctx;
}
static
@@ -545,7 +538,6 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
struct hlist_node *next;
HLIST_HEAD(freelist);
int i, busy = 0;
- ENTRY;
might_sleep_if(grace);
@@ -584,7 +576,7 @@ int gss_sec_flush_ctx_cache_pf(struct ptlrpc_sec *sec,
spin_unlock(&sec->ps_lock);
ctx_list_destroy_pf(&freelist);
- RETURN(busy);
+ return busy;
}
/****************************************
@@ -704,11 +696,9 @@ void upcall_msg_delist(struct gss_upcall_msg *msg)
static
void gss_release_msg(struct gss_upcall_msg *gmsg)
{
- ENTRY;
LASSERT(atomic_read(&gmsg->gum_refcount) > 0);
if (!atomic_dec_and_test(&gmsg->gum_refcount)) {
- EXIT;
return;
}
@@ -721,7 +711,6 @@ void gss_release_msg(struct gss_upcall_msg *gmsg)
LASSERT(list_empty(&gmsg->gum_list));
LASSERT(list_empty(&gmsg->gum_base.list));
OBD_FREE_PTR(gmsg);
- EXIT;
}
static
@@ -809,19 +798,18 @@ ssize_t gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
ssize_t left;
- ENTRY;
if (mlen > buflen)
mlen = buflen;
left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
- RETURN(left);
+ return left;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
- RETURN(mlen);
+ return mlen;
}
static
@@ -835,14 +823,13 @@ ssize_t gss_pipe_downcall(struct file *filp, const char *src, size_t mlen)
int datalen;
int timeout, rc;
__u32 mechidx, seq, gss_err;
- ENTRY;
mechidx = (__u32) (long) rpci->private;
LASSERT(mechidx < MECH_MAX);
OBD_ALLOC(buf, mlen);
if (!buf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (copy_from_user(buf, src, mlen)) {
CERROR("failed copy user space data\n");
@@ -940,7 +927,7 @@ out_free:
* hack pipefs: always return asked length unless all following
* downcalls might be messed up. */
rc = mlen;
- RETURN(rc);
+ return rc;
}
static
@@ -949,13 +936,11 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
struct gss_upcall_msg *gmsg;
struct gss_upcall_msg_data *gumd;
static cfs_time_t ratelimit = 0;
- ENTRY;
LASSERT(list_empty(&msg->list));
/* normally errno is >= 0 */
if (msg->errno >= 0) {
- EXIT;
return;
}
@@ -980,7 +965,6 @@ void gss_pipe_destroy_msg(struct rpc_pipe_msg *msg)
}
gss_msg_fail_ctx(gmsg);
gss_release_msg(gmsg);
- EXIT;
}
static
@@ -988,7 +972,6 @@ void gss_pipe_release(struct inode *inode)
{
struct rpc_inode *rpci = RPC_I(inode);
__u32 idx;
- ENTRY;
idx = (__u32) (long) rpci->private;
LASSERT(idx < MECH_MAX);
@@ -1020,7 +1003,6 @@ void gss_pipe_release(struct inode *inode)
upcall_list_lock(idx);
}
upcall_list_unlock(idx);
- EXIT;
}
static struct rpc_pipe_ops gss_upcall_ops = {
@@ -1041,7 +1023,6 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
struct gss_sec *gsec;
struct gss_upcall_msg *gmsg;
int rc = 0;
- ENTRY;
might_sleep();
@@ -1052,14 +1033,14 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
imp = ctx->cc_sec->ps_import;
if (!imp->imp_connection) {
CERROR("import has no connection set\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
gsec = container_of(ctx->cc_sec, struct gss_sec, gs_base);
OBD_ALLOC_PTR(gmsg);
if (!gmsg)
- RETURN(-ENOMEM);
+ return -ENOMEM;
/* initialize pipefs base msg */
INIT_LIST_HEAD(&gmsg->gum_base.list);
@@ -1107,10 +1088,10 @@ int gss_ctx_refresh_pf(struct ptlrpc_cli_ctx *ctx)
goto err_free;
}
- RETURN(0);
+ return 0;
err_free:
OBD_FREE_PTR(gmsg);
- RETURN(rc);
+ return rc;
}
static
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c
index 474ecf80530..fb298aef66e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_rawobj.c
@@ -65,7 +65,7 @@ int rawobj_alloc(rawobj_t *obj, char *buf, int len)
OBD_ALLOC_LARGE(obj->data, len);
if (!obj->data) {
obj->len = 0;
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
memcpy(obj->data, buf, len);
} else
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
index 31b50ea19c2..5b5365b4629 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/gss_svc_upcall.c
@@ -259,8 +259,6 @@ static int rsi_parse(struct cache_detail *cd, char *mesg, int mlen)
struct rsi rsii, *rsip = NULL;
time_t expiry;
int status = -EINVAL;
- ENTRY;
-
memset(&rsii, 0, sizeof(rsii));
@@ -341,7 +339,7 @@ out:
if (status)
CERROR("rsi parse error %d\n", status);
- RETURN(status);
+ return status;
}
static struct cache_detail rsi_cache = {
@@ -662,7 +660,6 @@ static void rsc_flush(rsc_entry_match *match, long data)
struct cache_head **ch;
struct rsc *rscp;
int n;
- ENTRY;
write_lock(&rsc_cache.hash_lock);
for (n = 0; n < RSC_HASHMAX; n++) {
@@ -684,7 +681,6 @@ static void rsc_flush(rsc_entry_match *match, long data)
}
}
write_unlock(&rsc_cache.hash_lock);
- EXIT;
}
static int match_uid(struct rsc *rscp, long uid)
@@ -744,7 +740,6 @@ int gss_svc_upcall_install_rvs_ctx(struct obd_import *imp,
unsigned long ctx_expiry;
__u32 major;
int rc;
- ENTRY;
memset(&rsci, 0, sizeof(rsci));
@@ -792,7 +787,7 @@ out:
if (rc)
CERROR("create reverse svc ctx: idx "LPX64", rc %d\n",
gsec->gs_rvs_hdl, rc);
- RETURN(rc);
+ return rc;
}
int gss_svc_upcall_expire_rvs_ctx(rawobj_t *handle)
@@ -855,7 +850,6 @@ int gss_svc_upcall_handle_init(struct ptlrpc_request *req,
struct gss_rep_header *rephdr;
int first_check = 1;
int rc = SECSVC_DROP;
- ENTRY;
memset(&rsikey, 0, sizeof(rsikey));
rsikey.lustre_svc = lustre_svc;
@@ -1016,7 +1010,7 @@ out:
COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
}
- RETURN(rc);
+ return rc;
}
struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
index 340400089a5..de100a14ab5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/lproc_gss.c
@@ -199,15 +199,17 @@ int gss_init_lproc(void)
gss_proc_root = lprocfs_register("gss", sptlrpc_proc_root,
gss_lprocfs_vars, NULL);
if (IS_ERR(gss_proc_root)) {
+ rc = PTR_ERR(gss_proc_root);
gss_proc_root = NULL;
- GOTO(err_out, rc = PTR_ERR(gss_proc_root));
+ GOTO(err_out, rc);
}
gss_proc_lk = lprocfs_register("lgss_keyring", gss_proc_root,
gss_lk_lprocfs_vars, NULL);
if (IS_ERR(gss_proc_lk)) {
+ rc = PTR_ERR(gss_proc_lk);
gss_proc_lk = NULL;
- GOTO(err_out, rc = PTR_ERR(gss_proc_root));
+ GOTO(err_out, rc);
}
return 0;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
index ebca858ca18..b42ddda9ee2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/gss/sec_gss.c
@@ -280,11 +280,10 @@ __u32 gss_unseal_msg(struct gss_ctx *mechctx,
__u8 *clear_buf;
int clear_buflen;
__u32 major;
- ENTRY;
if (msgbuf->lm_bufcount != 2) {
CERROR("invalid bufcount %d\n", msgbuf->lm_bufcount);
- RETURN(GSS_S_FAILURE);
+ return GSS_S_FAILURE;
}
/* allocate a temporary clear text buffer, same sized as token,
@@ -292,7 +291,7 @@ __u32 gss_unseal_msg(struct gss_ctx *mechctx,
clear_buflen = lustre_msg_buflen(msgbuf, 1);
OBD_ALLOC_LARGE(clear_buf, clear_buflen);
if (!clear_buf)
- RETURN(GSS_S_FAILURE);
+ return GSS_S_FAILURE;
/* buffer objects */
hdrobj.len = lustre_msg_buflen(msgbuf, 0);
@@ -317,7 +316,7 @@ __u32 gss_unseal_msg(struct gss_ctx *mechctx,
major = GSS_S_COMPLETE;
out_free:
OBD_FREE_LARGE(clear_buf, clear_buflen);
- RETURN(major);
+ return major;
}
/********************************************
@@ -646,7 +645,6 @@ int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
struct gss_cli_ctx *gctx = ctx2gctx(ctx);
__u32 flags = 0, seq, svc;
int rc;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf->lm_bufcount >= 2);
@@ -654,7 +652,7 @@ int gss_cli_ctx_sign(struct ptlrpc_cli_ctx *ctx,
/* nothing to do for context negotiation RPCs */
if (req->rq_ctx_init)
- RETURN(0);
+ return 0;
svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
if (req->rq_pack_bulk)
@@ -670,7 +668,7 @@ redo:
flags, gctx->gc_proc, seq, svc,
&gctx->gc_handle);
if (rc < 0)
- RETURN(rc);
+ return rc;
/* gss_sign_msg() msg might take long time to finish, in which period
* more rpcs could be wrapped up and sent out. if we found too many
@@ -689,7 +687,7 @@ redo:
}
req->rq_reqdata_len = rc;
- RETURN(0);
+ return 0;
}
static
@@ -765,7 +763,6 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
struct lustre_msg *msg = req->rq_repdata;
__u32 major;
int pack_bulk, swabbed, rc = 0;
- ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(msg);
@@ -777,12 +774,12 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
if (req->rq_ctx_init && !req->rq_early) {
req->rq_repmsg = lustre_msg_buf(msg, 1, 0);
req->rq_replen = msg->lm_buflens[1];
- RETURN(0);
+ return 0;
}
if (msg->lm_bufcount < 2 || msg->lm_bufcount > 4) {
CERROR("unexpected bufcount %u\n", msg->lm_bufcount);
- RETURN(-EPROTO);
+ return -EPROTO;
}
swabbed = ptlrpc_rep_need_swab(req);
@@ -790,7 +787,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
/* sanity checks */
@@ -800,7 +797,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
if (ghdr->gh_version != reqhdr->gh_version) {
CERROR("gss version %u mismatch, expect %u\n",
ghdr->gh_version, reqhdr->gh_version);
- RETURN(-EPROTO);
+ return -EPROTO;
}
switch (ghdr->gh_proc) {
@@ -810,19 +807,19 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (ghdr->gh_seq != reqhdr->gh_seq) {
CERROR("seqnum %u mismatch, expect %u\n",
ghdr->gh_seq, reqhdr->gh_seq);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (ghdr->gh_svc != reqhdr->gh_svc) {
CERROR("svc %u mismatch, expect %u\n",
ghdr->gh_svc, reqhdr->gh_svc);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (swabbed)
@@ -831,7 +828,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
major = gss_verify_msg(msg, gctx->gc_mechctx, reqhdr->gh_svc);
if (major != GSS_S_COMPLETE) {
CERROR("failed to verify reply: %x\n", major);
- RETURN(-EPERM);
+ return -EPERM;
}
if (req->rq_early && reqhdr->gh_svc == SPTLRPC_SVC_NULL) {
@@ -843,7 +840,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
if (cksum != msg->lm_cksum) {
CWARN("early reply checksum mismatch: "
"%08x != %08x\n", cksum, msg->lm_cksum);
- RETURN(-EPROTO);
+ return -EPROTO;
}
}
@@ -852,13 +849,13 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
if (msg->lm_bufcount < 3) {
CERROR("Invalid reply bufcount %u\n",
msg->lm_bufcount);
- RETURN(-EPROTO);
+ return -EPROTO;
}
rc = bulk_sec_desc_unpack(msg, 2, swabbed);
if (rc) {
CERROR("unpack bulk desc: %d\n", rc);
- RETURN(rc);
+ return rc;
}
}
@@ -878,7 +875,7 @@ int gss_cli_ctx_verify(struct ptlrpc_cli_ctx *ctx,
rc = -EPROTO;
}
- RETURN(rc);
+ return rc;
}
int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
@@ -889,7 +886,6 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
struct gss_header *ghdr;
__u32 buflens[2], major;
int wiresize, rc;
- ENTRY;
LASSERT(req->rq_clrbuf);
LASSERT(req->rq_cli_ctx == ctx);
@@ -915,7 +911,7 @@ int gss_cli_ctx_seal(struct ptlrpc_cli_ctx *ctx,
} else {
OBD_ALLOC_LARGE(req->rq_reqbuf, wiresize);
if (!req->rq_reqbuf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_reqbuf_len = wiresize;
}
@@ -969,7 +965,7 @@ redo:
/* now set the final wire data length */
req->rq_reqdata_len = lustre_shrink_msg(req->rq_reqbuf, 1, token.len,0);
- RETURN(0);
+ return 0;
err_free:
if (!req->rq_pool) {
@@ -977,7 +973,7 @@ err_free:
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
- RETURN(rc);
+ return rc;
}
int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
@@ -988,7 +984,6 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
struct lustre_msg *msg = req->rq_repdata;
int msglen, pack_bulk, swabbed, rc;
__u32 major;
- ENTRY;
LASSERT(req->rq_cli_ctx == ctx);
LASSERT(req->rq_ctx_init == 0);
@@ -1000,14 +995,14 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
ghdr = gss_swab_header(msg, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
/* sanity checks */
if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
CERROR("gss version %u mismatch, expect %u\n",
ghdr->gh_version, PTLRPC_GSS_VERSION);
- RETURN(-EPROTO);
+ return -EPROTO;
}
switch (ghdr->gh_proc) {
@@ -1017,7 +1012,7 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
if (!req->rq_early && !equi(req->rq_pack_bulk == 1, pack_bulk)){
CERROR("%s bulk flag in reply\n",
req->rq_pack_bulk ? "missing" : "unexpected");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (swabbed)
@@ -1038,25 +1033,25 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
swabbed = __lustre_unpack_msg(msg, msglen);
if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (msg->lm_bufcount < 1) {
CERROR("Invalid reply buffer: empty\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (pack_bulk) {
if (msg->lm_bufcount < 2) {
CERROR("bufcount %u: missing bulk sec desc\n",
msg->lm_bufcount);
- RETURN(-EPROTO);
+ return -EPROTO;
}
/* bulk checksum is the last segment */
if (bulk_sec_desc_unpack(msg, msg->lm_bufcount - 1,
swabbed))
- RETURN(-EPROTO);
+ return -EPROTO;
}
req->rq_repmsg = lustre_msg_buf(msg, 0, 0);
@@ -1077,7 +1072,7 @@ int gss_cli_ctx_unseal(struct ptlrpc_cli_ctx *ctx,
rc = -EPERM;
}
- RETURN(rc);
+ return rc;
}
/*********************************************
@@ -1148,7 +1143,6 @@ int gss_sec_create_common(struct gss_sec *gsec,
void gss_sec_destroy_common(struct gss_sec *gsec)
{
struct ptlrpc_sec *sec = &gsec->gs_base;
- ENTRY;
LASSERT(sec->ps_import);
LASSERT(atomic_read(&sec->ps_refcount) == 0);
@@ -1163,8 +1157,6 @@ void gss_sec_destroy_common(struct gss_sec *gsec)
if (SPTLRPC_FLVR_BULK_SVC(sec->ps_flvr.sf_rpc) == SPTLRPC_BULK_SVC_PRIV)
sptlrpc_enc_pool_del_user();
-
- EXIT;
}
void gss_sec_kill(struct ptlrpc_sec *sec)
@@ -1260,7 +1252,6 @@ int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
int bufsize, txtsize;
int bufcnt = 2;
__u32 buflens[5];
- ENTRY;
/*
* on-wire data layout:
@@ -1312,7 +1303,7 @@ int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(req->rq_reqbuf, bufsize);
if (!req->rq_reqbuf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_reqbuf_len = bufsize;
} else {
@@ -1331,7 +1322,7 @@ int gss_alloc_reqbuf_intg(struct ptlrpc_sec *sec,
if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_reqbuf, 2);
- RETURN(0);
+ return 0;
}
static
@@ -1342,7 +1333,6 @@ int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
__u32 ibuflens[3], wbuflens[2];
int ibufcnt;
int clearsize, wiresize;
- ENTRY;
LASSERT(req->rq_clrbuf == NULL);
LASSERT(req->rq_clrbuf_len == 0);
@@ -1399,7 +1389,7 @@ int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(req->rq_clrbuf, clearsize);
if (!req->rq_clrbuf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
req->rq_clrbuf_len = clearsize;
@@ -1409,7 +1399,7 @@ int gss_alloc_reqbuf_priv(struct ptlrpc_sec *sec,
if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_clrbuf, 1);
- RETURN(0);
+ return 0;
}
/*
@@ -1442,7 +1432,6 @@ void gss_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
int privacy;
- ENTRY;
LASSERT(!req->rq_pool || req->rq_reqbuf);
privacy = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) == SPTLRPC_SVC_PRIV;
@@ -1471,8 +1460,6 @@ release_reqbuf:
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
-
- EXIT;
}
static int do_alloc_repbuf(struct ptlrpc_request *req, int bufsize)
@@ -1578,7 +1565,6 @@ int gss_alloc_repbuf(struct ptlrpc_sec *sec,
int msgsize)
{
int svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
- ENTRY;
LASSERT(!req->rq_pack_bulk ||
(req->rq_bulk_read || req->rq_bulk_write));
@@ -1697,7 +1683,7 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(newbuf, newbuf_size);
if (newbuf == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
@@ -1717,7 +1703,7 @@ int gss_enlarge_reqbuf_intg(struct ptlrpc_sec *sec,
_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
req->rq_reqlen = newmsg_size;
- RETURN(0);
+ return 0;
}
static
@@ -1786,7 +1772,7 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(newclrbuf, newclrbuf_size);
if (newclrbuf == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
memcpy(newclrbuf, req->rq_clrbuf, req->rq_clrbuf_len);
@@ -1806,7 +1792,7 @@ int gss_enlarge_reqbuf_priv(struct ptlrpc_sec *sec,
_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
req->rq_reqlen = newmsg_size;
- RETURN(0);
+ return 0;
}
int gss_enlarge_reqbuf(struct ptlrpc_sec *sec,
@@ -1891,7 +1877,6 @@ int gss_svc_sign(struct ptlrpc_request *req,
{
__u32 flags = 0;
int rc;
- ENTRY;
LASSERT(rs->rs_msg == lustre_msg_buf(rs->rs_repbuf, 1, 0));
@@ -1906,7 +1891,7 @@ int gss_svc_sign(struct ptlrpc_request *req,
LUSTRE_SP_ANY, flags, PTLRPC_GSS_PROC_DATA,
grctx->src_wirectx.gw_seq, svc, NULL);
if (rc < 0)
- RETURN(rc);
+ return rc;
rs->rs_repdata_len = rc;
@@ -1923,7 +1908,7 @@ int gss_svc_sign(struct ptlrpc_request *req,
req->rq_reply_off = 0;
}
- RETURN(0);
+ return 0;
}
int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
@@ -1933,10 +1918,9 @@ int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
struct gss_err_header *ghdr;
int replen = sizeof(struct ptlrpc_body);
int rc;
- ENTRY;
//if (OBD_FAIL_CHECK_ORSET(OBD_FAIL_SVCGSS_ERR_NOTIFY, OBD_FAIL_ONCE))
- // RETURN(-EINVAL);
+ // return -EINVAL;
grctx->src_err_notify = 1;
grctx->src_reserve_len = 0;
@@ -1944,7 +1928,7 @@ int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
rc = lustre_pack_reply_v2(req, 1, &replen, NULL, 0);
if (rc) {
CERROR("could not pack reply, err %d\n", rc);
- RETURN(rc);
+ return rc;
}
/* gss hdr */
@@ -1963,7 +1947,7 @@ int gss_pack_err_notify(struct ptlrpc_request *req, __u32 major, __u32 minor)
CDEBUG(D_SEC, "prepare gss error notify(0x%x/0x%x) to %s\n",
major, minor, libcfs_nid2str(req->rq_peer.nid));
- RETURN(0);
+ return 0;
}
static
@@ -1978,7 +1962,6 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
__u32 lustre_svc;
__u32 *secdata, seclen;
int swabbed, rc;
- ENTRY;
CDEBUG(D_SEC, "processing gss init(%d) request from %s\n", gw->gw_proc,
libcfs_nid2str(req->rq_peer.nid));
@@ -1987,18 +1970,18 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
CERROR("unexpected bulk flag\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (gw->gw_proc == PTLRPC_GSS_PROC_INIT && gw->gw_handle.len != 0) {
CERROR("proc %u: invalid handle length %u\n",
gw->gw_proc, gw->gw_handle.len);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (reqbuf->lm_bufcount < 3 || reqbuf->lm_bufcount > 4){
CERROR("Invalid bufcount %d\n", reqbuf->lm_bufcount);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
swabbed = ptlrpc_req_need_swab(req);
@@ -2009,7 +1992,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
if (seclen < 4 + 4) {
CERROR("sec size %d too small\n", seclen);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
/* lustre svc type */
@@ -2020,7 +2003,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
* because touched internal structure of obd_uuid */
if (rawobj_extract(&uuid_obj, &secdata, &seclen)) {
CERROR("failed to extract target uuid\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
uuid_obj.data[uuid_obj.len - 1] = '\0';
@@ -2030,25 +2013,25 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
CERROR("target '%s' is not available for context init (%s)\n",
uuid->uuid, target == NULL ? "no target" :
(target->obd_stopping ? "stopping" : "not set up"));
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
/* extract reverse handle */
if (rawobj_extract(&rvs_hdl, &secdata, &seclen)) {
CERROR("failed extract reverse handle\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
/* extract token */
if (rawobj_extract(&in_token, &secdata, &seclen)) {
CERROR("can't extract token\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
rc = gss_svc_upcall_handle_init(req, grctx, gw, target, lustre_svc,
&rvs_hdl, &in_token);
if (rc != SECSVC_OK)
- RETURN(rc);
+ return rc;
if (grctx->src_ctx->gsc_usr_mds || grctx->src_ctx->gsc_usr_oss ||
grctx->src_ctx->gsc_usr_root)
@@ -2064,11 +2047,11 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (reqbuf->lm_bufcount < 4) {
CERROR("missing user descriptor\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (sptlrpc_unpack_user_desc(reqbuf, 2, swabbed)) {
CERROR("Mal-formed user descriptor\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
req->rq_pack_udesc = 1;
@@ -2078,7 +2061,7 @@ int gss_svc_handle_init(struct ptlrpc_request *req,
req->rq_reqmsg = lustre_msg_buf(reqbuf, 1, 0);
req->rq_reqlen = lustre_msg_buflen(reqbuf, 1);
- RETURN(rc);
+ return rc;
}
/*
@@ -2094,13 +2077,12 @@ int gss_svc_verify_request(struct ptlrpc_request *req,
struct lustre_msg *msg = req->rq_reqbuf;
int offset = 2;
int swabbed;
- ENTRY;
*major = GSS_S_COMPLETE;
if (msg->lm_bufcount < 2) {
CERROR("Too few segments (%u) in request\n", msg->lm_bufcount);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (gw->gw_svc == SPTLRPC_SVC_NULL)
@@ -2109,20 +2091,20 @@ int gss_svc_verify_request(struct ptlrpc_request *req,
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
- RETURN(-EACCES);
+ return -EACCES;
}
*major = gss_verify_msg(msg, gctx->gsc_mechctx, gw->gw_svc);
if (*major != GSS_S_COMPLETE) {
CERROR("failed to verify request: %x\n", *major);
- RETURN(-EACCES);
+ return -EACCES;
}
if (gctx->gsc_reverse == 0 &&
gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
- RETURN(-EACCES);
+ return -EACCES;
}
verified:
@@ -2132,12 +2114,12 @@ verified:
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (msg->lm_bufcount < (offset + 1)) {
CERROR("no user desc included\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
req->rq_pack_udesc = 1;
@@ -2149,11 +2131,11 @@ verified:
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < (offset + 1)) {
CERROR("missing bulk sec descriptor\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (bulk_sec_desc_unpack(msg, offset, swabbed))
- RETURN(-EINVAL);
+ return -EINVAL;
req->rq_pack_bulk = 1;
grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
@@ -2162,7 +2144,7 @@ verified:
req->rq_reqmsg = lustre_msg_buf(msg, 1, 0);
req->rq_reqlen = msg->lm_buflens[1];
- RETURN(0);
+ return 0;
}
static
@@ -2174,48 +2156,47 @@ int gss_svc_unseal_request(struct ptlrpc_request *req,
struct gss_svc_ctx *gctx = grctx->src_ctx;
struct lustre_msg *msg = req->rq_reqbuf;
int swabbed, msglen, offset = 1;
- ENTRY;
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 0)) {
CERROR("phase 0: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
- RETURN(-EACCES);
+ return -EACCES;
}
*major = gss_unseal_msg(gctx->gsc_mechctx, msg,
&msglen, req->rq_reqdata_len);
if (*major != GSS_S_COMPLETE) {
CERROR("failed to unwrap request: %x\n", *major);
- RETURN(-EACCES);
+ return -EACCES;
}
if (gss_check_seq_num(&gctx->gsc_seqdata, gw->gw_seq, 1)) {
CERROR("phase 1+: discard replayed req: seq %u\n", gw->gw_seq);
*major = GSS_S_DUPLICATE_TOKEN;
- RETURN(-EACCES);
+ return -EACCES;
}
swabbed = __lustre_unpack_msg(msg, msglen);
if (swabbed < 0) {
CERROR("Failed to unpack after decryption\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
req->rq_reqdata_len = msglen;
if (msg->lm_bufcount < 1) {
CERROR("Invalid buffer: is empty\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (msg->lm_bufcount < offset + 1) {
CERROR("no user descriptor included\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (sptlrpc_unpack_user_desc(msg, offset, swabbed)) {
CERROR("Mal-formed user descriptor\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
req->rq_pack_udesc = 1;
@@ -2226,11 +2207,11 @@ int gss_svc_unseal_request(struct ptlrpc_request *req,
if (gw->gw_flags & LUSTRE_GSS_PACK_BULK) {
if (msg->lm_bufcount < offset + 1) {
CERROR("no bulk checksum included\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (bulk_sec_desc_unpack(msg, offset, swabbed))
- RETURN(-EINVAL);
+ return -EINVAL;
req->rq_pack_bulk = 1;
grctx->src_reqbsd = lustre_msg_buf(msg, offset, 0);
@@ -2239,7 +2220,7 @@ int gss_svc_unseal_request(struct ptlrpc_request *req,
req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, 0, 0);
req->rq_reqlen = req->rq_reqbuf->lm_buflens[0];
- RETURN(0);
+ return 0;
}
static
@@ -2249,7 +2230,6 @@ int gss_svc_handle_data(struct ptlrpc_request *req,
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
__u32 major = 0;
int rc = 0;
- ENTRY;
grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
if (!grctx->src_ctx) {
@@ -2272,7 +2252,7 @@ int gss_svc_handle_data(struct ptlrpc_request *req,
}
if (rc == 0)
- RETURN(SECSVC_OK);
+ return SECSVC_OK;
CERROR("svc %u failed: major 0x%08x: req xid "LPU64" ctx %p idx "
LPX64"(%u->%s)\n", gw->gw_svc, major, req->rq_xid,
@@ -2283,9 +2263,9 @@ error:
* might happen after server reboot, to allow recovery. */
if ((major == GSS_S_NO_CONTEXT || major == GSS_S_BAD_SIG) &&
gss_pack_err_notify(req, major, 0) == 0)
- RETURN(SECSVC_COMPLETE);
+ return SECSVC_COMPLETE;
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
static
@@ -2294,7 +2274,6 @@ int gss_svc_handle_destroy(struct ptlrpc_request *req,
{
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
__u32 major;
- ENTRY;
req->rq_ctx_fini = 1;
req->rq_no_reply = 1;
@@ -2302,16 +2281,16 @@ int gss_svc_handle_destroy(struct ptlrpc_request *req,
grctx->src_ctx = gss_svc_upcall_get_ctx(req, gw);
if (!grctx->src_ctx) {
CDEBUG(D_SEC, "invalid gss context handle for destroy.\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (gw->gw_svc != SPTLRPC_SVC_INTG) {
CERROR("svc %u is not supported in destroy.\n", gw->gw_svc);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (gss_svc_verify_request(req, grctx, gw, &major))
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
CWARN("destroy svc ctx %p idx "LPX64" (%u->%s)\n",
grctx->src_ctx, gss_handle_to_u64(&gw->gw_handle),
@@ -2322,19 +2301,19 @@ int gss_svc_handle_destroy(struct ptlrpc_request *req,
if (gw->gw_flags & LUSTRE_GSS_PACK_USER) {
if (req->rq_reqbuf->lm_bufcount < 4) {
CERROR("missing user descriptor, ignore it\n");
- RETURN(SECSVC_OK);
+ return SECSVC_OK;
}
if (sptlrpc_unpack_user_desc(req->rq_reqbuf, 2,
ptlrpc_req_need_swab(req))) {
CERROR("Mal-formed user descriptor, ignore it\n");
- RETURN(SECSVC_OK);
+ return SECSVC_OK;
}
req->rq_pack_udesc = 1;
req->rq_user_desc = lustre_msg_buf(req->rq_reqbuf, 2, 0);
}
- RETURN(SECSVC_OK);
+ return SECSVC_OK;
}
int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
@@ -2343,14 +2322,13 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
struct gss_svc_reqctx *grctx;
struct gss_wire_ctx *gw;
int swabbed, rc;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_svc_ctx == NULL);
if (req->rq_reqbuf->lm_bufcount < 2) {
CERROR("buf count only %d\n", req->rq_reqbuf->lm_bufcount);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
swabbed = ptlrpc_req_need_swab(req);
@@ -2358,14 +2336,14 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
ghdr = gss_swab_header(req->rq_reqbuf, 0, swabbed);
if (ghdr == NULL) {
CERROR("can't decode gss header\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
/* sanity checks */
if (ghdr->gh_version != PTLRPC_GSS_VERSION) {
CERROR("gss version %u, expect %u\n", ghdr->gh_version,
PTLRPC_GSS_VERSION);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
req->rq_sp_from = ghdr->gh_sp;
@@ -2373,7 +2351,7 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
/* alloc grctx data */
OBD_ALLOC_PTR(grctx);
if (!grctx)
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
grctx->src_base.sc_policy = sptlrpc_policy_get(policy);
atomic_set(&grctx->src_base.sc_refcount, 1);
@@ -2428,16 +2406,14 @@ int gss_svc_accept(struct ptlrpc_sec_policy *policy, struct ptlrpc_request *req)
break;
}
- RETURN(rc);
+ return rc;
}
void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
{
struct gss_svc_reqctx *grctx;
- ENTRY;
if (svc_ctx == NULL) {
- EXIT;
return;
}
@@ -2446,8 +2422,6 @@ void gss_svc_invalidate_ctx(struct ptlrpc_svc_ctx *svc_ctx)
CWARN("gss svc invalidate ctx %p(%u)\n",
grctx->src_ctx, grctx->src_ctx->gsc_uid);
gss_svc_upcall_destroy_ctx(grctx->src_ctx);
-
- EXIT;
}
static inline
@@ -2496,13 +2470,12 @@ int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
__u32 ibuflens[2], buflens[4];
int ibufcnt = 0, bufcnt;
int txtsize, wmsg_size, rs_size;
- ENTRY;
LASSERT(msglen % 8 == 0);
if (req->rq_pack_bulk && !req->rq_bulk_read && !req->rq_bulk_write) {
CERROR("client request bulk sec on non-bulk rpc\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
svc = SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc);
@@ -2575,7 +2548,7 @@ int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
} else {
OBD_ALLOC_LARGE(rs, rs_size);
if (rs == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rs->rs_size = rs_size;
}
@@ -2605,7 +2578,7 @@ int gss_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
LASSERT(rs->rs_msg);
req->rq_reply_state = rs;
- RETURN(0);
+ return 0;
}
static int gss_svc_seal(struct ptlrpc_request *req,
@@ -2619,7 +2592,6 @@ static int gss_svc_seal(struct ptlrpc_request *req,
int token_buflen;
__u32 buflens[2], major;
int msglen, rc;
- ENTRY;
/* get clear data length. note embedded lustre_msg might
* have been shrinked */
@@ -2647,7 +2619,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
token_buflen = gss_mech_payload(gctx->gsc_mechctx, msglen, 1);
OBD_ALLOC_LARGE(token_buf, token_buflen);
if (token_buf == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
hdrobj.len = PTLRPC_GSS_HEADER_SIZE;
hdrobj.data = (__u8 *) ghdr;
@@ -2703,7 +2675,7 @@ static int gss_svc_seal(struct ptlrpc_request *req,
rc = 0;
out_free:
OBD_FREE_LARGE(token_buf, token_buflen);
- RETURN(rc);
+ return rc;
}
int gss_svc_authorize(struct ptlrpc_request *req)
@@ -2712,7 +2684,6 @@ int gss_svc_authorize(struct ptlrpc_request *req)
struct gss_svc_reqctx *grctx = gss_svc_ctx2reqctx(req->rq_svc_ctx);
struct gss_wire_ctx *gw = &grctx->src_wirectx;
int early, rc;
- ENTRY;
early = (req->rq_packed_final == 0);
@@ -2720,7 +2691,7 @@ int gss_svc_authorize(struct ptlrpc_request *req)
LASSERT(rs->rs_repdata_len != 0);
req->rq_reply_off = gss_at_reply_off_integ;
- RETURN(0);
+ return 0;
}
/* early reply could happen in many cases */
@@ -2728,7 +2699,7 @@ int gss_svc_authorize(struct ptlrpc_request *req)
gw->gw_proc != PTLRPC_GSS_PROC_DATA &&
gw->gw_proc != PTLRPC_GSS_PROC_DESTROY) {
CERROR("proc %d not support\n", gw->gw_proc);
- RETURN(-EINVAL);
+ return -EINVAL;
}
LASSERT(grctx->src_ctx);
@@ -2749,7 +2720,7 @@ int gss_svc_authorize(struct ptlrpc_request *req)
rc = 0;
out:
- RETURN(rc);
+ return rc;
}
void gss_svc_free_rs(struct ptlrpc_reply_state *rs)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index 47a3c051273..5ca69aec72e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -195,7 +195,6 @@ int ptlrpc_set_import_discon(struct obd_import *imp, __u32 conn_cnt)
/* Must be called with imp_lock held! */
static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
{
- ENTRY;
LASSERT(spin_is_locked(&imp->imp_lock));
CDEBUG(D_HA, "setting import %s INVALID\n", obd2cli_tgt(imp->imp_obd));
@@ -205,8 +204,6 @@ static void ptlrpc_deactivate_and_unlock_import(struct obd_import *imp)
ptlrpc_abort_inflight(imp);
obd_import_event(imp->imp_obd, imp, IMP_EVENT_INACTIVE);
-
- EXIT;
}
/*
@@ -394,8 +391,6 @@ EXPORT_SYMBOL(ptlrpc_activate_import);
void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
{
- ENTRY;
-
LASSERT(!imp->imp_dlm_fake);
if (ptlrpc_set_import_discon(imp, conn_cnt)) {
@@ -417,7 +412,6 @@ void ptlrpc_fail_import(struct obd_import *imp, __u32 conn_cnt)
ptlrpc_pinger_wake_up();
}
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_fail_import);
@@ -461,7 +455,6 @@ static int import_select_connection(struct obd_import *imp)
struct obd_export *dlmexp;
char *target_start;
int target_len, tried_all = 1;
- ENTRY;
spin_lock(&imp->imp_lock);
@@ -469,7 +462,7 @@ static int import_select_connection(struct obd_import *imp)
CERROR("%s: no connections available\n",
imp->imp_obd->obd_name);
spin_unlock(&imp->imp_lock);
- RETURN(-EINVAL);
+ return -EINVAL;
}
list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
@@ -558,7 +551,7 @@ static int import_select_connection(struct obd_import *imp)
spin_unlock(&imp->imp_lock);
- RETURN(0);
+ return 0;
}
/*
@@ -602,21 +595,20 @@ int ptlrpc_connect_import(struct obd_import *imp)
(char *)&imp->imp_connect_data };
struct ptlrpc_connect_async_args *aa;
int rc;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
spin_unlock(&imp->imp_lock);
CERROR("can't connect to a closed import\n");
- RETURN(-EINVAL);
+ return -EINVAL;
} else if (imp->imp_state == LUSTRE_IMP_FULL) {
spin_unlock(&imp->imp_lock);
CERROR("already connected\n");
- RETURN(0);
+ return 0;
} else if (imp->imp_state == LUSTRE_IMP_CONNECTING) {
spin_unlock(&imp->imp_lock);
CERROR("already connecting\n");
- RETURN(-EALREADY);
+ return -EALREADY;
}
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CONNECTING);
@@ -716,7 +708,7 @@ out:
IMPORT_SET_STATE(imp, LUSTRE_IMP_DISCON);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_connect_import);
@@ -756,13 +748,12 @@ static int ptlrpc_connect_interpret(const struct lu_env *env,
struct obd_connect_data *ocd;
struct obd_export *exp;
int ret;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_CLOSED) {
imp->imp_connect_tried = 1;
spin_unlock(&imp->imp_lock);
- RETURN(0);
+ return 0;
}
if (rc) {
@@ -984,7 +975,7 @@ finish:
imp->imp_connection->c_remote_uuid.uuid);
ptlrpc_connect_import(imp);
imp->imp_connect_tried = 1;
- RETURN(0);
+ return 0;
}
} else {
@@ -1137,7 +1128,7 @@ out:
/* reply message might not be ready */
if (request->rq_repmsg == NULL)
- RETURN(-EPROTO);
+ return -EPROTO;
ocd = req_capsule_server_get(&request->rq_pill,
&RMF_CONNECT_DATA);
@@ -1161,7 +1152,7 @@ out:
ptlrpc_deactivate_import(imp);
IMPORT_SET_STATE(imp, LUSTRE_IMP_CLOSED);
}
- RETURN(-EPROTO);
+ return -EPROTO;
}
ptlrpc_maybe_ping_import_soon(imp);
@@ -1172,7 +1163,7 @@ out:
}
wake_up_all(&imp->imp_recovery_waitq);
- RETURN(rc);
+ return rc;
}
/**
@@ -1183,7 +1174,6 @@ static int completed_replay_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
void * data, int rc)
{
- ENTRY;
atomic_dec(&req->rq_import->imp_replay_inflight);
if (req->rq_status == 0 &&
!req->rq_import->imp_vbr_failed) {
@@ -1202,7 +1192,7 @@ static int completed_replay_interpret(const struct lu_env *env,
ptlrpc_connect_import(req->rq_import);
}
- RETURN(0);
+ return 0;
}
/**
@@ -1212,10 +1202,9 @@ static int completed_replay_interpret(const struct lu_env *env,
static int signal_completed_replay(struct obd_import *imp)
{
struct ptlrpc_request *req;
- ENTRY;
if (unlikely(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_FINISH_REPLAY)))
- RETURN(0);
+ return 0;
LASSERT(atomic_read(&imp->imp_replay_inflight) == 0);
atomic_inc(&imp->imp_replay_inflight);
@@ -1224,7 +1213,7 @@ static int signal_completed_replay(struct obd_import *imp)
OBD_PING);
if (req == NULL) {
atomic_dec(&imp->imp_replay_inflight);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
ptlrpc_request_set_replen(req);
@@ -1236,7 +1225,7 @@ static int signal_completed_replay(struct obd_import *imp)
req->rq_interpret_reply = completed_replay_interpret;
ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- RETURN(0);
+ return 0;
}
/**
@@ -1248,8 +1237,6 @@ static int ptlrpc_invalidate_import_thread(void *data)
{
struct obd_import *imp = data;
- ENTRY;
-
unshare_fs_struct();
CDEBUG(D_HA, "thread invalidate import %s to %s@%s\n",
@@ -1267,7 +1254,7 @@ static int ptlrpc_invalidate_import_thread(void *data)
ptlrpc_import_recovery_state_machine(imp);
class_import_put(imp);
- RETURN(0);
+ return 0;
}
/**
@@ -1297,7 +1284,6 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
char *target_start;
int target_len;
- ENTRY;
if (imp->imp_state == LUSTRE_IMP_EVICTED) {
deuuidify(obd2cli_tgt(imp->imp_obd), NULL,
&target_start, &target_len);
@@ -1319,7 +1305,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
spin_unlock(&imp->imp_lock);
{
- task_t *task;
+ struct task_struct *task;
/* bug 17802: XXX client_disconnect_export vs connect request
* race. if client will evicted at this time, we start
* invalidate thread without reference to import and import can
@@ -1334,7 +1320,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
} else {
rc = 0;
}
- RETURN(rc);
+ return rc;
}
}
@@ -1393,7 +1379,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
}
out:
- RETURN(rc);
+ return rc;
}
int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
@@ -1401,7 +1387,6 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
struct ptlrpc_request *req;
int rq_opc, rc = 0;
int nowait = imp->imp_obd->obd_force;
- ENTRY;
if (nowait)
GOTO(set_state, rc);
@@ -1413,7 +1398,7 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
default:
CERROR("don't know how to disconnect from %s (connect_op %d)\n",
obd2cli_tgt(imp->imp_obd), imp->imp_connect_op);
- RETURN(-EINVAL);
+ return -EINVAL;
}
if (ptlrpc_import_in_recovery(imp)) {
@@ -1476,21 +1461,17 @@ out:
memset(&imp->imp_remote_handle, 0, sizeof(imp->imp_remote_handle));
spin_unlock(&imp->imp_lock);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_disconnect_import);
void ptlrpc_cleanup_imp(struct obd_import *imp)
{
- ENTRY;
-
spin_lock(&imp->imp_lock);
IMPORT_SET_STATE_NOLOCK(imp, LUSTRE_IMP_CLOSED);
imp->imp_generation++;
spin_unlock(&imp->imp_lock);
ptlrpc_abort_inflight(imp);
-
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_cleanup_imp);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
index 367ca8ef7d6..379e59477ea 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_client.c
@@ -85,7 +85,6 @@ static int llog_client_open(const struct lu_env *env,
struct llog_ctxt *ctxt = lgh->lgh_ctxt;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(ctxt, imp);
@@ -133,7 +132,6 @@ static int llog_client_open(const struct lu_env *env,
lgh->lgh_id = body->lgd_logid;
lgh->lgh_ctxt = ctxt;
- EXIT;
out:
LLOG_CLIENT_EXIT(ctxt, imp);
ptlrpc_req_finished(req);
@@ -147,7 +145,6 @@ static int llog_client_destroy(const struct lu_env *env,
struct ptlrpc_request *req = NULL;
struct llogd_body *body;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_DESTROY,
@@ -170,7 +167,7 @@ static int llog_client_destroy(const struct lu_env *env,
ptlrpc_req_finished(req);
err_exit:
LLOG_CLIENT_EXIT(loghandle->lgh_ctxt, imp);
- RETURN(rc);
+ return rc;
}
@@ -184,7 +181,6 @@ static int llog_client_next_block(const struct lu_env *env,
struct llogd_body *body;
void *ptr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK,
@@ -221,7 +217,6 @@ static int llog_client_next_block(const struct lu_env *env,
*cur_offset = body->lgd_cur_offset;
memcpy(buf, ptr, len);
- EXIT;
out:
ptlrpc_req_finished(req);
err_exit:
@@ -238,7 +233,6 @@ static int llog_client_prev_block(const struct lu_env *env,
struct llogd_body *body;
void *ptr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(loghandle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK,
@@ -270,7 +264,6 @@ static int llog_client_prev_block(const struct lu_env *env,
GOTO(out, rc =-EFAULT);
memcpy(buf, ptr, len);
- EXIT;
out:
ptlrpc_req_finished(req);
err_exit:
@@ -287,7 +280,6 @@ static int llog_client_read_header(const struct lu_env *env,
struct llog_log_hdr *hdr;
struct llog_rec_hdr *llh_hdr;
int rc;
- ENTRY;
LLOG_CLIENT_ENTRY(handle->lgh_ctxt, imp);
req = ptlrpc_request_alloc_pack(imp,&RQF_LLOG_ORIGIN_HANDLE_READ_HEADER,
@@ -326,7 +318,6 @@ static int llog_client_read_header(const struct lu_env *env,
CERROR("you may need to re-run lconf --write_conf.\n");
rc = -EIO;
}
- EXIT;
out:
ptlrpc_req_finished(req);
err_exit:
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
index a81f557d779..17c06a32df6 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_net.c
@@ -57,7 +57,6 @@
int llog_initiator_connect(struct llog_ctxt *ctxt)
{
struct obd_import *new_imp;
- ENTRY;
LASSERT(ctxt);
new_imp = ctxt->loc_obd->u.cli.cl_import;
@@ -70,6 +69,6 @@ int llog_initiator_connect(struct llog_ctxt *ctxt)
ctxt->loc_imp = class_import_get(new_imp);
}
mutex_unlock(&ctxt->loc_mutex);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(llog_initiator_connect);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
index bc1fcd8c7e7..af9d2ac391e 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/llog_server.c
@@ -71,11 +71,9 @@ int llog_origin_handle_open(struct ptlrpc_request *req)
char *name = NULL;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
logid = &body->lgd_logid;
@@ -83,7 +81,7 @@ int llog_origin_handle_open(struct ptlrpc_request *req)
if (req_capsule_field_present(&req->rq_pill, &RMF_NAME, RCL_CLIENT)) {
name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
if (name == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
CDEBUG(D_INFO, "%s: opening log %s\n", obd->obd_name, name);
}
@@ -91,7 +89,7 @@ int llog_origin_handle_open(struct ptlrpc_request *req)
if (ctxt == NULL) {
CDEBUG(D_WARNING, "%s: no ctxt. group=%p idx=%d name=%s\n",
obd->obd_name, &obd->obd_olg, body->lgd_ctxt_idx, name);
- RETURN(-ENODEV);
+ return -ENODEV;
}
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
@@ -108,7 +106,6 @@ int llog_origin_handle_open(struct ptlrpc_request *req)
body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY);
body->lgd_logid = loghandle->lgh_id;
- EXIT;
out_close:
llog_origin_close(req->rq_svc_thread->t_env, loghandle);
out_pop:
@@ -127,11 +124,9 @@ int llog_origin_handle_destroy(struct ptlrpc_request *req)
struct llog_ctxt *ctxt;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
if (ostid_id(&body->lgd_logid.lgl_oi) > 0)
logid = &body->lgd_logid;
@@ -142,7 +137,7 @@ int llog_origin_handle_destroy(struct ptlrpc_request *req)
ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
if (ctxt == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
@@ -153,7 +148,7 @@ int llog_origin_handle_destroy(struct ptlrpc_request *req)
rc = llog_erase(req->rq_svc_thread->t_env, ctxt, logid, NULL);
pop_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
llog_ctxt_put(ctxt);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(llog_origin_handle_destroy);
@@ -169,15 +164,13 @@ int llog_origin_handle_next_block(struct ptlrpc_request *req)
void *ptr;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
if (ctxt == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
@@ -208,7 +201,6 @@ int llog_origin_handle_next_block(struct ptlrpc_request *req)
&repbody->lgd_cur_offset, ptr, LLOG_CHUNK_SIZE);
if (rc)
GOTO(out_close, rc);
- EXIT;
out_close:
llog_origin_close(req->rq_svc_thread->t_env, loghandle);
out_pop:
@@ -230,15 +222,13 @@ int llog_origin_handle_prev_block(struct ptlrpc_request *req)
void *ptr;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
if (ctxt == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
@@ -269,7 +259,6 @@ int llog_origin_handle_prev_block(struct ptlrpc_request *req)
if (rc)
GOTO(out_close, rc);
- EXIT;
out_close:
llog_origin_close(req->rq_svc_thread->t_env, loghandle);
out_pop:
@@ -290,15 +279,13 @@ int llog_origin_handle_read_header(struct ptlrpc_request *req)
__u32 flags;
int rc;
- ENTRY;
-
body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY);
if (body == NULL)
- RETURN(-EFAULT);
+ return -EFAULT;
ctxt = llog_get_context(req->rq_export->exp_obd, body->lgd_ctxt_idx);
if (ctxt == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
@@ -324,7 +311,6 @@ int llog_origin_handle_read_header(struct ptlrpc_request *req)
hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR);
*hdr = *loghandle->lgh_hdr;
- EXIT;
out_close:
llog_origin_close(req->rq_svc_thread->t_env, loghandle);
out_pop:
@@ -336,9 +322,8 @@ EXPORT_SYMBOL(llog_origin_handle_read_header);
int llog_origin_handle_close(struct ptlrpc_request *req)
{
- ENTRY;
/* Nothing to do */
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(llog_origin_handle_close);
@@ -352,20 +337,19 @@ int llog_origin_handle_cancel(struct ptlrpc_request *req)
struct llog_handle *cathandle;
struct inode *inode;
void *handle;
- ENTRY;
logcookies = req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES);
num_cookies = req_capsule_get_size(&req->rq_pill, &RMF_LOGCOOKIES,
RCL_CLIENT) / sizeof(*logcookies);
if (logcookies == NULL || num_cookies == 0) {
DEBUG_REQ(D_HA, req, "No llog cookies sent");
- RETURN(-EFAULT);
+ return -EFAULT;
}
ctxt = llog_get_context(req->rq_export->exp_obd,
logcookies->lgc_subsys);
if (ctxt == NULL)
- RETURN(-ENODEV);
+ return -ENODEV;
disk_obd = ctxt->loc_exp->exp_obd;
push_ctxt(&saved, &disk_obd->obd_lvfs_ctxt, NULL);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index 3e7325499d0..bea44a3d4a2 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -302,7 +302,7 @@ ptlrpc_lprocfs_req_history_max_seq_write(struct file *file, const char *buffer,
* hose a kernel by allowing the request history to grow too
* far. */
bufpages = (svc->srv_buf_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
- if (val > num_physpages/(2 * bufpages))
+ if (val > totalram_pages / (2 * bufpages))
return -ERANGE;
spin_lock(&svc->srv_lock);
@@ -480,7 +480,6 @@ static int ptlrpc_lprocfs_nrs_seq_show(struct seq_file *m, void *n)
bool hp = false;
int i;
int rc = 0;
- ENTRY;
/**
* Serialize NRS core lprocfs operations with policy registration/
@@ -613,7 +612,7 @@ out:
mutex_unlock(&nrs_core.nrs_mutex);
- RETURN(rc);
+ return rc;
}
/**
@@ -638,7 +637,6 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file, const char *buffe
char *cmd_copy = NULL;
char *token;
int rc = 0;
- ENTRY;
if (count >= LPROCFS_NRS_WR_MAX_CMD)
GOTO(out, rc = -EINVAL);
@@ -698,7 +696,7 @@ out:
if (cmd_copy)
OBD_FREE(cmd_copy, LPROCFS_NRS_WR_MAX_CMD);
- RETURN(rc < 0 ? rc : count);
+ return rc < 0 ? rc : count;
}
LPROC_SEQ_FOPS(ptlrpc_lprocfs_nrs);
@@ -1217,13 +1215,12 @@ int lprocfs_wr_ping(struct file *file, const char *buffer,
struct obd_device *obd = ((struct seq_file *)file->private_data)->private;
struct ptlrpc_request *req;
int rc;
- ENTRY;
LPROCFS_CLIMP_CHECK(obd);
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
LPROCFS_CLIMP_EXIT(obd);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_send_state = LUSTRE_IMP_FULL;
@@ -1231,8 +1228,8 @@ int lprocfs_wr_ping(struct file *file, const char *buffer,
ptlrpc_req_finished(req);
if (rc >= 0)
- RETURN(count);
- RETURN(rc);
+ return count;
+ return rc;
}
EXPORT_SYMBOL(lprocfs_wr_ping);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
index de3f0db0ba4..a0e009717a5 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/niobuf.c
@@ -54,7 +54,6 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
{
int rc;
lnet_md_t md;
- ENTRY;
LASSERT (portal != 0);
LASSERT (conn != NULL);
@@ -76,7 +75,7 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
if (unlikely(rc != 0)) {
CERROR ("LNetMDBind failed: %d\n", rc);
LASSERT (rc == -ENOMEM);
- RETURN (-ENOMEM);
+ return -ENOMEM;
}
CDEBUG(D_NET, "Sending %d bytes to portal %d, xid "LPD64", offset %u\n",
@@ -95,7 +94,7 @@ static int ptl_send_buf (lnet_handle_md_t *mdh, void *base, int len,
LASSERTF(rc2 == 0, "rc2 = %d\n", rc2);
}
- RETURN (0);
+ return 0;
}
static void mdunlink_iterate_helper(lnet_handle_md_t *bd_mds, int count)
@@ -122,10 +121,9 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
__u64 xid;
lnet_handle_me_t me_h;
lnet_md_t md;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_BULK_GET_NET))
- RETURN(0);
+ return 0;
/* NB no locking required until desc is on the network */
LASSERT(desc->bd_nob > 0);
@@ -207,7 +205,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
LASSERT(desc->bd_md_count >= 0);
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
req->rq_status = -ENOMEM;
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
/* Set rq_xid to matchbits of the final bulk so that server can
@@ -231,7 +229,7 @@ int ptlrpc_register_bulk(struct ptlrpc_request *req)
desc->bd_iov_count, desc->bd_nob,
desc->bd_last_xid, req->rq_xid, desc->bd_portal);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_register_bulk);
@@ -247,7 +245,6 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
wait_queue_head_t *wq;
struct l_wait_info lwi;
int rc;
- ENTRY;
LASSERT(!in_interrupt()); /* might sleep */
@@ -257,7 +254,7 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
req->rq_bulk_deadline = cfs_time_current_sec() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- RETURN(1); /* never registered */
+ return 1; /* never registered */
LASSERT(desc->bd_req == req); /* bd_req NULL until registered */
@@ -268,14 +265,14 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw);
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
- RETURN(1); /* never registered */
+ return 1; /* never registered */
/* Move to "Unregistering" phase as bulk was not unlinked yet. */
ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING);
/* Do not wait for unlink to finish. */
if (async)
- RETURN(0);
+ return 0;
if (req->rq_set != NULL)
wq = &req->rq_set->set_waitq;
@@ -290,14 +287,14 @@ int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async)
rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi);
if (rc == 0) {
ptlrpc_rqphase_move(req, req->rq_next_phase);
- RETURN(1);
+ return 1;
}
LASSERT(rc == -ETIMEDOUT);
DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p",
desc);
}
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_unregister_bulk);
@@ -400,7 +397,8 @@ int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
req->rq_type = PTL_RPC_MSG_REPLY;
lustre_msg_set_type(req->rq_repmsg, req->rq_type);
- lustre_msg_set_status(req->rq_repmsg, req->rq_status);
+ lustre_msg_set_status(req->rq_repmsg,
+ ptlrpc_status_hton(req->rq_status));
lustre_msg_set_opc(req->rq_repmsg,
req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0);
@@ -455,15 +453,14 @@ EXPORT_SYMBOL(ptlrpc_reply);
int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
{
int rc;
- ENTRY;
if (req->rq_no_reply)
- RETURN(0);
+ return 0;
if (!req->rq_repmsg) {
rc = lustre_pack_reply(req, 1, NULL, NULL);
if (rc)
- RETURN(rc);
+ return rc;
}
if (req->rq_status != -ENOSPC && req->rq_status != -EACCES &&
@@ -472,7 +469,7 @@ int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult)
req->rq_type = PTL_RPC_MSG_ERR;
rc = ptlrpc_send_reply(req, may_be_difficult);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_send_error);
@@ -497,10 +494,9 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
lnet_handle_me_t reply_me_h;
lnet_md_t reply_md;
struct obd_device *obd = request->rq_import->imp_obd;
- ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_RPC))
- RETURN(0);
+ return 0;
LASSERT(request->rq_type == PTL_RPC_MSG_REQUEST);
LASSERT(request->rq_wait_ctx == 0);
@@ -516,7 +512,7 @@ int ptl_send_rpc(struct ptlrpc_request *request, int noreply)
/* this prevents us from waiting in ptlrpc_queue_wait */
request->rq_err = 1;
request->rq_status = -ENODEV;
- RETURN(-ENODEV);
+ return -ENODEV;
}
connection = request->rq_import->imp_connection;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 1996431e35f..0abcd6d8227 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -81,17 +81,16 @@ static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy,
* policy->pol_private will be NULL in such a case.
*/
if (policy->pol_state == NRS_POL_STATE_STOPPED)
- RETURN(-ENODEV);
+ return -ENODEV;
- RETURN(policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
+ return policy->pol_desc->pd_ops->op_policy_ctl != NULL ?
policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) :
- -ENOSYS);
+ -ENOSYS;
}
static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
- ENTRY;
if (policy->pol_desc->pd_ops->op_policy_stop != NULL) {
spin_unlock(&nrs->nrs_lock);
@@ -111,24 +110,21 @@ static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy)
if (atomic_dec_and_test(&policy->pol_desc->pd_refs))
module_put(policy->pol_desc->pd_owner);
-
- EXIT;
}
static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
- ENTRY;
if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping)
- RETURN(-EPERM);
+ return -EPERM;
if (policy->pol_state == NRS_POL_STATE_STARTING)
- RETURN(-EAGAIN);
+ return -EAGAIN;
/* In progress or already stopped */
if (policy->pol_state != NRS_POL_STATE_STARTED)
- RETURN(0);
+ return 0;
policy->pol_state = NRS_POL_STATE_STOPPING;
@@ -145,7 +141,7 @@ static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
if (policy->pol_ref == 1)
nrs_policy_stop0(policy);
- RETURN(0);
+ return 0;
}
/**
@@ -158,15 +154,8 @@ static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy)
static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
{
struct ptlrpc_nrs_policy *tmp = nrs->nrs_policy_primary;
- ENTRY;
if (tmp == NULL) {
- /**
- * XXX: This should really be RETURN_EXIT, but the latter does
- * not currently print anything out, and possibly should be
- * fixed to do so.
- */
- EXIT;
return;
}
@@ -177,7 +166,6 @@ static void nrs_policy_stop_primary(struct ptlrpc_nrs *nrs)
if (tmp->pol_ref == 0)
nrs_policy_stop0(tmp);
- EXIT;
}
/**
@@ -203,19 +191,18 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
{
struct ptlrpc_nrs *nrs = policy->pol_nrs;
int rc = 0;
- ENTRY;
/**
* Don't allow multiple starting which is too complex, and has no real
* benefit.
*/
if (nrs->nrs_policy_starting)
- RETURN(-EAGAIN);
+ return -EAGAIN;
LASSERT(policy->pol_state != NRS_POL_STATE_STARTING);
if (policy->pol_state == NRS_POL_STATE_STOPPING)
- RETURN(-EAGAIN);
+ return -EAGAIN;
if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) {
/**
@@ -226,7 +213,7 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
*/
if (policy == nrs->nrs_policy_fallback) {
nrs_policy_stop_primary(nrs);
- RETURN(0);
+ return 0;
}
/**
@@ -241,10 +228,10 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
* Shouldn't start primary policy if w/o fallback policy.
*/
if (nrs->nrs_policy_fallback == NULL)
- RETURN(-EPERM);
+ return -EPERM;
if (policy->pol_state == NRS_POL_STATE_STARTED)
- RETURN(0);
+ return 0;
}
/**
@@ -256,7 +243,7 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
atomic_dec(&policy->pol_desc->pd_refs);
CERROR("NRS: cannot get module for policy %s; is it alive?\n",
policy->pol_desc->pd_name);
- RETURN(-ENODEV);
+ return -ENODEV;
}
/**
@@ -303,7 +290,7 @@ static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy)
out:
nrs->nrs_policy_starting = 0;
- RETURN(rc);
+ return rc;
}
/**
@@ -644,7 +631,6 @@ static int nrs_policy_ctl(struct ptlrpc_nrs *nrs, char *name,
{
struct ptlrpc_nrs_policy *policy;
int rc = 0;
- ENTRY;
spin_lock(&nrs->nrs_lock);
@@ -674,7 +660,7 @@ out:
spin_unlock(&nrs->nrs_lock);
- RETURN(rc);
+ return rc;
}
/**
@@ -690,7 +676,6 @@ out:
static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
{
struct ptlrpc_nrs_policy *policy = NULL;
- ENTRY;
spin_lock(&nrs->nrs_lock);
@@ -699,7 +684,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
spin_unlock(&nrs->nrs_lock);
CERROR("Can't find NRS policy %s\n", name);
- RETURN(-ENOENT);
+ return -ENOENT;
}
if (policy->pol_ref > 1) {
@@ -708,7 +693,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
nrs_policy_put_locked(policy);
spin_unlock(&nrs->nrs_lock);
- RETURN(-EBUSY);
+ return -EBUSY;
}
LASSERT(policy->pol_req_queued == 0);
@@ -731,7 +716,7 @@ static int nrs_policy_unregister(struct ptlrpc_nrs *nrs, char *name)
LASSERT(policy->pol_private == NULL);
OBD_FREE_PTR(policy);
- RETURN(0);
+ return 0;
}
/**
@@ -751,7 +736,6 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
struct ptlrpc_nrs_policy *tmp;
struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
int rc;
- ENTRY;
LASSERT(svcpt != NULL);
LASSERT(desc->pd_ops != NULL);
@@ -764,7 +748,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable,
svcpt->scp_cpt, sizeof(*policy), __GFP_IO);
if (policy == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
policy->pol_nrs = nrs;
policy->pol_desc = desc;
@@ -777,7 +761,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
rc = nrs_policy_init(policy);
if (rc != 0) {
OBD_FREE_PTR(policy);
- RETURN(rc);
+ return rc;
}
spin_lock(&nrs->nrs_lock);
@@ -793,7 +777,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
nrs_policy_fini(policy);
OBD_FREE_PTR(policy);
- RETURN(-EEXIST);
+ return -EEXIST;
}
list_add_tail(&policy->pol_list, &nrs->nrs_policy_list);
@@ -807,7 +791,7 @@ static int nrs_policy_register(struct ptlrpc_nrs *nrs,
if (rc != 0)
(void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name);
- RETURN(rc);
+ return rc;
}
/**
@@ -844,7 +828,6 @@ static void ptlrpc_nrs_req_add_nolock(struct ptlrpc_request *req)
static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
{
int opc = lustre_msg_get_opc(req->rq_reqmsg);
- ENTRY;
spin_lock(&req->rq_lock);
req->rq_hp = 1;
@@ -852,7 +835,6 @@ static void ptlrpc_nrs_hpreq_add_nolock(struct ptlrpc_request *req)
if (opc != OBD_PING)
DEBUG_REQ(D_NET, req, "high priority req");
spin_unlock(&req->rq_lock);
- EXIT;
}
/**
@@ -891,7 +873,6 @@ static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
struct ptlrpc_service_part *svcpt = nrs->nrs_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
int rc = -EINVAL;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
@@ -912,7 +893,7 @@ static int nrs_register_policies_locked(struct ptlrpc_nrs *nrs)
}
}
- RETURN(rc);
+ return rc;
}
/**
@@ -950,7 +931,7 @@ static int nrs_svcpt_setup_locked0(struct ptlrpc_nrs *nrs,
rc = nrs_register_policies_locked(nrs);
- RETURN(rc);
+ return rc;
}
/**
@@ -966,7 +947,6 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_nrs *nrs;
int rc;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
@@ -994,7 +974,7 @@ static int nrs_svcpt_setup_locked(struct ptlrpc_service_part *svcpt)
rc = nrs_svcpt_setup_locked0(nrs, svcpt);
out:
- RETURN(rc);
+ return rc;
}
/**
@@ -1012,7 +992,6 @@ static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
struct ptlrpc_nrs_policy *tmp;
int rc;
bool hp = false;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
@@ -1036,8 +1015,6 @@ again:
if (hp)
OBD_FREE_PTR(nrs);
-
- EXIT;
}
/**
@@ -1051,13 +1028,12 @@ again:
static struct ptlrpc_nrs_pol_desc *nrs_policy_find_desc_locked(const char *name)
{
struct ptlrpc_nrs_pol_desc *tmp;
- ENTRY;
list_for_each_entry(tmp, &nrs_core.nrs_policies, pd_list) {
if (strncmp(tmp->pd_name, name, NRS_POL_NAME_MAX) == 0)
- RETURN(tmp);
+ return tmp;
}
- RETURN(NULL);
+ return NULL;
}
/**
@@ -1079,7 +1055,6 @@ static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
struct ptlrpc_service_part *svcpt;
int i;
int rc = 0;
- ENTRY;
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
@@ -1107,7 +1082,7 @@ again:
"partition %d of service %s: %d\n",
desc->pd_name, svcpt->scp_cpt,
svcpt->scp_service->srv_name, rc);
- RETURN(rc);
+ return rc;
}
if (!hp && nrs_svc_has_hp(svc)) {
@@ -1120,7 +1095,7 @@ again:
desc->pd_ops->op_lprocfs_fini(svc);
}
- RETURN(rc);
+ return rc;
}
/**
@@ -1143,7 +1118,6 @@ int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
struct ptlrpc_service *svc;
struct ptlrpc_nrs_pol_desc *desc;
int rc = 0;
- ENTRY;
LASSERT(conf != NULL);
LASSERT(conf->nc_ops != NULL);
@@ -1171,7 +1145,7 @@ int ptlrpc_nrs_policy_register(struct ptlrpc_nrs_pol_conf *conf)
"policy flags; external policies cannot act as fallback "
"policies, or be started immediately upon registration "
"without interaction with lprocfs\n", conf->nc_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
mutex_lock(&nrs_core.nrs_mutex);
@@ -1274,7 +1248,7 @@ internal:
fail:
mutex_unlock(&nrs_core.nrs_mutex);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_nrs_policy_register);
@@ -1296,14 +1270,13 @@ int ptlrpc_nrs_policy_unregister(struct ptlrpc_nrs_pol_conf *conf)
{
struct ptlrpc_nrs_pol_desc *desc;
int rc;
- ENTRY;
LASSERT(conf != NULL);
if (conf->nc_flags & PTLRPC_NRS_FL_FALLBACK) {
CERROR("Unable to unregister a fallback policy, unless the "
"PTLRPC service is stopping.\n");
- RETURN(-EPERM);
+ return -EPERM;
}
conf->nc_name[NRS_POL_NAME_MAX - 1] = '\0';
@@ -1341,7 +1314,7 @@ fail:
not_exist:
mutex_unlock(&nrs_core.nrs_mutex);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_nrs_policy_unregister);
@@ -1396,7 +1369,7 @@ failed:
mutex_unlock(&nrs_core.nrs_mutex);
- RETURN(rc);
+ return rc;
}
/**
@@ -1630,7 +1603,6 @@ void ptlrpc_nrs_req_hp_move(struct ptlrpc_request *req)
struct ptlrpc_nrs_request *nrq = &req->rq_nrq;
struct ptlrpc_nrs_resource *res1[NRS_RES_MAX];
struct ptlrpc_nrs_resource *res2[NRS_RES_MAX];
- ENTRY;
/**
* Obtain the high-priority NRS head resources.
@@ -1660,7 +1632,6 @@ out:
* returned false.
*/
nrs_resource_put_safe(res1);
- EXIT;
}
/**
@@ -1696,7 +1667,6 @@ int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
struct ptlrpc_service_part *svcpt;
int i;
int rc = 0;
- ENTRY;
LASSERT(opc != PTLRPC_NRS_CTL_INVALID);
@@ -1728,7 +1698,7 @@ int ptlrpc_nrs_policy_control(const struct ptlrpc_service *svc,
}
}
out:
- RETURN(rc);
+ return rc;
}
@@ -1745,7 +1715,6 @@ extern struct ptlrpc_nrs_pol_conf nrs_conf_fifo;
int ptlrpc_nrs_init(void)
{
int rc;
- ENTRY;
mutex_init(&nrs_core.nrs_mutex);
INIT_LIST_HEAD(&nrs_core.nrs_policies);
@@ -1755,7 +1724,7 @@ int ptlrpc_nrs_init(void)
GOTO(fail, rc);
- RETURN(rc);
+ return rc;
fail:
/**
* Since no PTLRPC services have been started at this point, all we need
@@ -1763,7 +1732,7 @@ fail:
*/
ptlrpc_nrs_fini();
- RETURN(rc);
+ return rc;
}
/**
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 1437636dfe2..cd2611a3b53 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -115,7 +115,7 @@ int lustre_msg_check_version(struct lustre_msg *msg, __u32 version)
EXPORT_SYMBOL(lustre_msg_check_version);
/* early reply size */
-int lustre_msg_early_size()
+int lustre_msg_early_size(void)
{
static int size = 0;
if (!size) {
@@ -329,7 +329,6 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
{
struct ptlrpc_reply_state *rs;
int msg_len, rc;
- ENTRY;
LASSERT(req->rq_reply_state == NULL);
@@ -342,7 +341,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
msg_len = lustre_msg_size_v2(count, lens);
rc = sptlrpc_svc_alloc_rs(req, msg_len);
if (rc)
- RETURN(rc);
+ return rc;
rs = req->rq_reply_state;
atomic_set(&rs->rs_refcount, 1); /* 1 ref for rq_reply_state */
@@ -363,7 +362,7 @@ int lustre_pack_reply_v2(struct ptlrpc_request *req, int count,
PTLRPC_RS_DEBUG_LRU_ADD(rs);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(lustre_pack_reply_v2);
@@ -574,7 +573,6 @@ static int lustre_unpack_msg_v2(struct lustre_msg_v2 *m, int len)
int __lustre_unpack_msg(struct lustre_msg *m, int len)
{
int required_len, rc;
- ENTRY;
/* We can provide a slightly better error log, if we check the
* message magic and version first. In the future, struct
@@ -588,12 +586,12 @@ int __lustre_unpack_msg(struct lustre_msg *m, int len)
/* can't even look inside the message */
CERROR("message length %d too small for magic/version check\n",
len);
- RETURN(-EINVAL);
+ return -EINVAL;
}
rc = lustre_unpack_msg_v2(m, len);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(__lustre_unpack_msg);
@@ -642,6 +640,9 @@ static inline int lustre_unpack_ptlrpc_body_v2(struct ptlrpc_request *req,
return -EINVAL;
}
+ if (!inout)
+ pb->pb_status = ptlrpc_status_ntoh(pb->pb_status);
+
return 0;
}
@@ -1613,11 +1614,10 @@ int do_set_info_async(struct obd_import *imp,
struct ptlrpc_request *req;
char *tmp;
int rc;
- ENTRY;
req = ptlrpc_request_alloc(imp, &RQF_OBD_SET_INFO);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY,
RCL_CLIENT, keylen);
@@ -1626,7 +1626,7 @@ int do_set_info_async(struct obd_import *imp,
rc = ptlrpc_request_pack(req, version, opcode);
if (rc) {
ptlrpc_request_free(req);
- RETURN(rc);
+ return rc;
}
tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
@@ -1644,7 +1644,7 @@ int do_set_info_async(struct obd_import *imp,
ptlrpc_req_finished(req);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(do_set_info_async);
@@ -2163,7 +2163,6 @@ static void lustre_swab_lmm_oi(struct ost_id *oi)
static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
{
- ENTRY;
__swab32s(&lum->lmm_magic);
__swab32s(&lum->lmm_pattern);
lustre_swab_lmm_oi(&lum->lmm_oi);
@@ -2171,31 +2170,25 @@ static void lustre_swab_lov_user_md_common(struct lov_user_md_v1 *lum)
__swab16s(&lum->lmm_stripe_count);
__swab16s(&lum->lmm_stripe_offset);
print_lum(lum);
- EXIT;
}
void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_user_md v1\n");
lustre_swab_lov_user_md_common(lum);
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_v1);
void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_user_md v3\n");
lustre_swab_lov_user_md_common((struct lov_user_md_v1 *)lum);
/* lmm_pool_name nothing to do with char */
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_v3);
void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
{
- ENTRY;
CDEBUG(D_IOCTL, "swabbing lov_mds_md\n");
__swab32s(&lmm->lmm_magic);
__swab32s(&lmm->lmm_pattern);
@@ -2203,7 +2196,6 @@ void lustre_swab_lov_mds_md(struct lov_mds_md *lmm)
__swab32s(&lmm->lmm_stripe_size);
__swab16s(&lmm->lmm_stripe_count);
__swab16s(&lmm->lmm_layout_gen);
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_lov_mds_md);
@@ -2211,13 +2203,12 @@ void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
int stripe_count)
{
int i;
- ENTRY;
+
for (i = 0; i < stripe_count; i++) {
lustre_swab_ost_id(&(lod[i].l_ost_oi));
__swab32s(&(lod[i].l_ost_gen));
__swab32s(&(lod[i].l_ost_idx));
}
- EXIT;
}
EXPORT_SYMBOL(lustre_swab_lov_user_md_objects);
@@ -2459,6 +2450,7 @@ void _debug_req(struct ptlrpc_request *req,
rep_ok ? lustre_msg_get_flags(req->rq_repmsg) : -1,
req->rq_status,
rep_ok ? lustre_msg_get_status(req->rq_repmsg) : -1);
+ va_end(args);
}
EXPORT_SYMBOL(_debug_req);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pinger.c b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
index ef5269aee0d..227a0ae9593 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pinger.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pinger.c
@@ -51,7 +51,7 @@ struct mutex pinger_mutex;
static LIST_HEAD(pinger_imports);
static struct list_head timeout_list = LIST_HEAD_INIT(timeout_list);
-int ptlrpc_pinger_suppress_pings()
+int ptlrpc_pinger_suppress_pings(void)
{
return suppress_pings;
}
@@ -75,11 +75,10 @@ int ptlrpc_obd_ping(struct obd_device *obd)
{
int rc;
struct ptlrpc_request *req;
- ENTRY;
req = ptlrpc_prep_ping(obd->u.cli.cl_import);
if (req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_send_state = LUSTRE_IMP_FULL;
@@ -87,28 +86,27 @@ int ptlrpc_obd_ping(struct obd_device *obd)
ptlrpc_req_finished(req);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_obd_ping);
int ptlrpc_ping(struct obd_import *imp)
{
struct ptlrpc_request *req;
- ENTRY;
req = ptlrpc_prep_ping(imp);
if (req == NULL) {
CERROR("OOM trying to ping %s->%s\n",
imp->imp_obd->obd_uuid.uuid,
obd2cli_tgt(imp->imp_obd));
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
DEBUG_REQ(D_INFO, req, "pinging %s->%s",
imp->imp_obd->obd_uuid.uuid, obd2cli_tgt(imp->imp_obd));
ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
- RETURN(0);
+ return 0;
}
void ptlrpc_update_next_ping(struct obd_import *imp, int soon)
@@ -297,7 +295,6 @@ static void ptlrpc_pinger_process_import(struct obd_import *imp,
static int ptlrpc_pinger_main(void *arg)
{
struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg;
- ENTRY;
/* Record that the thread is running */
thread_set_flags(thread, SVC_RUNNING);
@@ -353,7 +350,6 @@ static int ptlrpc_pinger_main(void *arg)
thread_is_event(thread),
&lwi);
if (thread_test_and_clear_flags(thread, SVC_STOPPING)) {
- EXIT;
break;
} else {
/* woken after adding import to reset timer */
@@ -369,37 +365,32 @@ static int ptlrpc_pinger_main(void *arg)
return 0;
}
-static struct ptlrpc_thread *pinger_thread = NULL;
+static struct ptlrpc_thread pinger_thread;
int ptlrpc_start_pinger(void)
{
struct l_wait_info lwi = { 0 };
int rc;
- ENTRY;
- if (pinger_thread != NULL)
- RETURN(-EALREADY);
+ if (!thread_is_init(&pinger_thread) &&
+ !thread_is_stopped(&pinger_thread))
+ return -EALREADY;
- OBD_ALLOC_PTR(pinger_thread);
- if (pinger_thread == NULL)
- RETURN(-ENOMEM);
- init_waitqueue_head(&pinger_thread->t_ctl_waitq);
+ init_waitqueue_head(&pinger_thread.t_ctl_waitq);
init_waitqueue_head(&suspend_timeouts_waitq);
- strcpy(pinger_thread->t_name, "ll_ping");
+ strcpy(pinger_thread.t_name, "ll_ping");
/* CLONE_VM and CLONE_FILES just avoid a needless copy, because we
* just drop the VM and FILES in cfs_daemonize_ctxt() right away. */
rc = PTR_ERR(kthread_run(ptlrpc_pinger_main,
- pinger_thread, pinger_thread->t_name));
+ &pinger_thread, pinger_thread.t_name));
if (IS_ERR_VALUE(rc)) {
CERROR("cannot start thread: %d\n", rc);
- OBD_FREE(pinger_thread, sizeof(*pinger_thread));
- pinger_thread = NULL;
- RETURN(rc);
+ return rc;
}
- l_wait_event(pinger_thread->t_ctl_waitq,
- thread_is_running(pinger_thread), &lwi);
+ l_wait_event(pinger_thread.t_ctl_waitq,
+ thread_is_running(&pinger_thread), &lwi);
if (suppress_pings)
CWARN("Pings will be suppressed at the request of the "
@@ -408,7 +399,7 @@ int ptlrpc_start_pinger(void)
"(Search for the \"suppress_pings\" kernel module "
"parameter.)\n");
- RETURN(0);
+ return 0;
}
int ptlrpc_pinger_remove_timeouts(void);
@@ -417,23 +408,19 @@ int ptlrpc_stop_pinger(void)
{
struct l_wait_info lwi = { 0 };
int rc = 0;
- ENTRY;
- if (pinger_thread == NULL)
- RETURN(-EALREADY);
+ if (!thread_is_init(&pinger_thread) &&
+ !thread_is_stopped(&pinger_thread))
+ return -EALREADY;
ptlrpc_pinger_remove_timeouts();
- mutex_lock(&pinger_mutex);
- thread_set_flags(pinger_thread, SVC_STOPPING);
- wake_up(&pinger_thread->t_ctl_waitq);
- mutex_unlock(&pinger_mutex);
+ thread_set_flags(&pinger_thread, SVC_STOPPING);
+ wake_up(&pinger_thread.t_ctl_waitq);
- l_wait_event(pinger_thread->t_ctl_waitq,
- thread_is_stopped(pinger_thread), &lwi);
+ l_wait_event(pinger_thread.t_ctl_waitq,
+ thread_is_stopped(&pinger_thread), &lwi);
- OBD_FREE_PTR(pinger_thread);
- pinger_thread = NULL;
- RETURN(rc);
+ return rc;
}
void ptlrpc_pinger_sending_on_import(struct obd_import *imp)
@@ -459,9 +446,8 @@ void ptlrpc_pinger_commit_expected(struct obd_import *imp)
int ptlrpc_pinger_add_import(struct obd_import *imp)
{
- ENTRY;
if (!list_empty(&imp->imp_pinger_chain))
- RETURN(-EALREADY);
+ return -EALREADY;
mutex_lock(&pinger_mutex);
CDEBUG(D_HA, "adding pingable import %s->%s\n",
@@ -476,15 +462,14 @@ int ptlrpc_pinger_add_import(struct obd_import *imp)
ptlrpc_pinger_wake_up();
mutex_unlock(&pinger_mutex);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_pinger_add_import);
int ptlrpc_pinger_del_import(struct obd_import *imp)
{
- ENTRY;
if (list_empty(&imp->imp_pinger_chain))
- RETURN(-ENOENT);
+ return -ENOENT;
mutex_lock(&pinger_mutex);
list_del_init(&imp->imp_pinger_chain);
@@ -494,7 +479,7 @@ int ptlrpc_pinger_del_import(struct obd_import *imp)
imp->imp_obd->obd_no_recov = 1;
class_import_put(imp);
mutex_unlock(&pinger_mutex);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_pinger_del_import);
@@ -615,10 +600,10 @@ int ptlrpc_pinger_remove_timeouts(void)
return 0;
}
-void ptlrpc_pinger_wake_up()
+void ptlrpc_pinger_wake_up(void)
{
- thread_add_flags(pinger_thread, SVC_EVENT);
- wake_up(&pinger_thread->t_ctl_waitq);
+ thread_add_flags(&pinger_thread, SVC_EVENT);
+ wake_up(&pinger_thread.t_ctl_waitq);
}
/* Ping evictor thread */
@@ -659,7 +644,6 @@ static int ping_evictor_main(void *arg)
struct obd_export *exp;
struct l_wait_info lwi = { 0 };
time_t expire_time;
- ENTRY;
unshare_fs_struct();
@@ -731,12 +715,12 @@ static int ping_evictor_main(void *arg)
}
CDEBUG(D_HA, "Exiting Ping Evictor\n");
- RETURN(0);
+ return 0;
}
void ping_evictor_start(void)
{
- task_t *task;
+ struct task_struct *task;
if (++pet_refcount > 1)
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
index f6ea80f0b10..419e634854d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpc_module.c
@@ -54,7 +54,6 @@ extern struct mutex ptlrpcd_mutex;
__init int ptlrpc_init(void)
{
int rc, cleanup_phase = 0;
- ENTRY;
lustre_assert_wire_constants();
#if RS_DEBUG
@@ -67,11 +66,11 @@ __init int ptlrpc_init(void)
rc = req_layout_init();
if (rc)
- RETURN(rc);
+ return rc;
rc = ptlrpc_hr_init();
if (rc)
- RETURN(rc);
+ return rc;
cleanup_phase = 1;
@@ -110,7 +109,7 @@ __init int ptlrpc_init(void)
rc = tgt_mod_init();
if (rc)
GOTO(cleanup, rc);
- RETURN(0);
+ return 0;
cleanup:
switch(cleanup_phase) {
@@ -150,5 +149,7 @@ static void __exit ptlrpc_exit(void)
MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Request Processor and Lock Management");
MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0.0");
-cfs_module(ptlrpc, "1.0.0", ptlrpc_init, ptlrpc_exit);
+module_init(ptlrpc_init);
+module_exit(ptlrpc_exit);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index 5a66a1be422..fbdeff65d05 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -268,7 +268,6 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
struct ptlrpc_request_set *set = pc->pc_set;
int rc = 0;
int rc2;
- ENTRY;
if (atomic_read(&set->set_new_count)) {
spin_lock(&set->set_new_req_lock);
@@ -302,7 +301,7 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
* new modules are loaded, i.e., early during boot up.
*/
CERROR("Failure to refill session: %d\n", rc2);
- RETURN(rc);
+ return rc;
}
if (atomic_read(&set->set_remaining))
@@ -368,7 +367,7 @@ static int ptlrpcd_check(struct lu_env *env, struct ptlrpcd_ctl *pc)
}
}
- RETURN(rc);
+ return rc;
}
/**
@@ -383,7 +382,6 @@ static int ptlrpcd(void *arg)
struct ptlrpc_request_set *set = pc->pc_set;
struct lu_env env = { .le_ses = NULL };
int rc, exit = 0;
- ENTRY;
unshare_fs_struct();
#if defined(CONFIG_SMP)
@@ -410,7 +408,7 @@ static int ptlrpcd(void *arg)
complete(&pc->pc_starting);
if (rc != 0)
- RETURN(rc);
+ return rc;
/*
* This mainloop strongly resembles ptlrpc_set_wait() except that our
@@ -501,7 +499,6 @@ static int ptlrpcd_bind(int index, int max)
#if defined(CONFIG_NUMA)
cpumask_t mask;
#endif
- ENTRY;
LASSERT(index <= max - 1);
pc = &ptlrpcds->pd_threads[index];
@@ -596,7 +593,7 @@ static int ptlrpcd_bind(int index, int max)
}
}
- RETURN(rc);
+ return rc;
}
@@ -604,7 +601,6 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
{
int rc;
int env = 0;
- ENTRY;
/*
* Do not allow start second thread for one pc.
@@ -612,7 +608,7 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
if (test_and_set_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Starting second thread (%s) for same pc %p\n",
name, pc);
- RETURN(0);
+ return 0;
}
pc->pc_index = index;
@@ -634,7 +630,8 @@ int ptlrpcd_start(int index, int max, const char *name, struct ptlrpcd_ctl *pc)
env = 1;
{
- task_t *task;
+ struct task_struct *task;
+
if (index >= 0) {
rc = ptlrpcd_bind(index, max);
if (rc < 0)
@@ -663,31 +660,25 @@ out:
clear_bit(LIOD_BIND, &pc->pc_flags);
clear_bit(LIOD_START, &pc->pc_flags);
}
- RETURN(rc);
+ return rc;
}
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force)
{
- ENTRY;
-
if (!test_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Thread for pc %p was not started\n", pc);
- goto out;
+ return;
}
set_bit(LIOD_STOP, &pc->pc_flags);
if (force)
set_bit(LIOD_FORCE, &pc->pc_flags);
wake_up(&pc->pc_set->set_waitq);
-
-out:
- EXIT;
}
void ptlrpcd_free(struct ptlrpcd_ctl *pc)
{
struct ptlrpc_request_set *set = pc->pc_set;
- ENTRY;
if (!test_bit(LIOD_START, &pc->pc_flags)) {
CWARN("Thread for pc %p was not started\n", pc);
@@ -716,13 +707,11 @@ out:
pc->pc_partners = NULL;
}
pc->pc_npartners = 0;
- EXIT;
}
static void ptlrpcd_fini(void)
{
int i;
- ENTRY;
if (ptlrpcds != NULL) {
for (i = 0; i < ptlrpcds->pd_nthreads; i++)
@@ -734,8 +723,6 @@ static void ptlrpcd_fini(void)
OBD_FREE(ptlrpcds, ptlrpcds->pd_size);
ptlrpcds = NULL;
}
-
- EXIT;
}
static int ptlrpcd_init(void)
@@ -743,7 +730,6 @@ static int ptlrpcd_init(void)
int nthreads = num_online_cpus();
char name[16];
int size, i = -1, j, rc = 0;
- ENTRY;
if (max_ptlrpcds > 0 && max_ptlrpcds < nthreads)
nthreads = max_ptlrpcds;
@@ -800,19 +786,18 @@ out:
ptlrpcds = NULL;
}
- RETURN(0);
+ return 0;
}
int ptlrpcd_addref(void)
{
int rc = 0;
- ENTRY;
mutex_lock(&ptlrpcd_mutex);
if (++ptlrpcd_users == 1)
rc = ptlrpcd_init();
mutex_unlock(&ptlrpcd_mutex);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpcd_addref);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/recover.c b/drivers/staging/lustre/lustre/ptlrpc/recover.c
index 2960889834a..84c39e083ea 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/recover.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/recover.c
@@ -60,12 +60,8 @@
*/
void ptlrpc_initiate_recovery(struct obd_import *imp)
{
- ENTRY;
-
CDEBUG(D_HA, "%s: starting recovery\n", obd2cli_tgt(imp->imp_obd));
ptlrpc_connect_import(imp);
-
- EXIT;
}
/**
@@ -78,7 +74,6 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
struct list_head *tmp, *pos;
struct ptlrpc_request *req = NULL;
__u64 last_transno;
- ENTRY;
*inflight = 0;
@@ -137,11 +132,11 @@ int ptlrpc_replay_next(struct obd_import *imp, int *inflight)
if (rc) {
CERROR("recovery replay error %d for req "
LPU64"\n", rc, req->rq_xid);
- RETURN(rc);
+ return rc;
}
*inflight = 1;
}
- RETURN(rc);
+ return rc;
}
/**
@@ -152,8 +147,6 @@ int ptlrpc_resend(struct obd_import *imp)
{
struct ptlrpc_request *req, *next;
- ENTRY;
-
/* As long as we're in recovery, nothing should be added to the sending
* list, so we don't need to hold the lock during this iteration and
* resend process.
@@ -163,7 +156,7 @@ int ptlrpc_resend(struct obd_import *imp)
spin_lock(&imp->imp_lock);
if (imp->imp_state != LUSTRE_IMP_RECOVER) {
spin_unlock(&imp->imp_lock);
- RETURN(-1);
+ return -1;
}
list_for_each_entry_safe(req, next, &imp->imp_sending_list,
@@ -176,7 +169,7 @@ int ptlrpc_resend(struct obd_import *imp)
}
spin_unlock(&imp->imp_lock);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_resend);
@@ -203,7 +196,6 @@ EXPORT_SYMBOL(ptlrpc_wake_delayed);
void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
{
struct obd_import *imp = failed_req->rq_import;
- ENTRY;
CDEBUG(D_HA, "import %s of %s@%s abruptly disconnected: reconnecting\n",
imp->imp_obd->obd_name, obd2cli_tgt(imp->imp_obd),
@@ -230,8 +222,6 @@ void ptlrpc_request_handle_notconn(struct ptlrpc_request *failed_req)
if (!failed_req->rq_no_resend)
failed_req->rq_resend = 1;
spin_unlock(&failed_req->rq_lock);
-
- EXIT;
}
/**
@@ -246,7 +236,6 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
struct obd_device *obd = imp->imp_obd;
int rc = 0;
- ENTRY;
LASSERT(obd);
/* When deactivating, mark import invalid, and abort in-flight
@@ -279,7 +268,7 @@ int ptlrpc_set_import_active(struct obd_import *imp, int active)
rc = ptlrpc_recover_import(imp, NULL, 0);
}
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_set_import_active);
@@ -287,7 +276,6 @@ EXPORT_SYMBOL(ptlrpc_set_import_active);
int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
{
int rc = 0;
- ENTRY;
spin_lock(&imp->imp_lock);
if (imp->imp_state == LUSTRE_IMP_NEW || imp->imp_deactive ||
@@ -337,7 +325,6 @@ int ptlrpc_recover_import(struct obd_import *imp, char *new_uuid, int async)
CDEBUG(D_HA, "%s: recovery finished\n",
obd2cli_tgt(imp->imp_obd));
}
- EXIT;
out:
return rc;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec.c b/drivers/staging/lustre/lustre/ptlrpc/sec.c
index 36e8bed5458..962b31d163d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec.c
@@ -269,8 +269,8 @@ struct ptlrpc_cli_ctx *get_my_ctx(struct ptlrpc_sec *sec)
remove_dead = 0;
}
} else {
- vcred.vc_uid = current_uid();
- vcred.vc_gid = current_gid();
+ vcred.vc_uid = from_kuid(&init_user_ns, current_uid());
+ vcred.vc_gid = from_kgid(&init_user_ns, current_gid());
}
return sec->ps_policy->sp_cops->lookup_ctx(sec, &vcred,
@@ -396,14 +396,13 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
struct obd_import *imp = req->rq_import;
struct ptlrpc_sec *sec;
int rc;
- ENTRY;
LASSERT(!req->rq_cli_ctx);
LASSERT(imp);
rc = import_sec_validate_get(imp, &sec);
if (rc)
- RETURN(rc);
+ return rc;
req->rq_cli_ctx = get_my_ctx(sec);
@@ -411,10 +410,10 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
if (!req->rq_cli_ctx) {
CERROR("req %p: fail to get context\n", req);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
- RETURN(0);
+ return 0;
}
/**
@@ -428,8 +427,6 @@ int sptlrpc_req_get_ctx(struct ptlrpc_request *req)
*/
void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
{
- ENTRY;
-
LASSERT(req);
LASSERT(req->rq_cli_ctx);
@@ -444,7 +441,6 @@ void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync)
sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync);
req->rq_cli_ctx = NULL;
- EXIT;
}
static
@@ -520,7 +516,6 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx;
struct ptlrpc_cli_ctx *newctx;
int rc;
- ENTRY;
LASSERT(oldctx);
@@ -533,7 +528,7 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
/* restore old ctx */
req->rq_cli_ctx = oldctx;
- RETURN(rc);
+ return rc;
}
newctx = req->rq_cli_ctx;
@@ -560,14 +555,14 @@ int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req)
/* restore old ctx */
sptlrpc_req_put_ctx(req, 0);
req->rq_cli_ctx = oldctx;
- RETURN(rc);
+ return rc;
}
LASSERT(req->rq_cli_ctx == newctx);
}
sptlrpc_cli_ctx_put(oldctx, 1);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(sptlrpc_req_replace_dead_ctx);
@@ -639,12 +634,11 @@ int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
struct ptlrpc_sec *sec;
struct l_wait_info lwi;
int rc;
- ENTRY;
LASSERT(ctx);
if (req->rq_ctx_init || req->rq_ctx_fini)
- RETURN(0);
+ return 0;
/*
* during the process a request's context might change type even
@@ -654,7 +648,7 @@ int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout)
again:
rc = import_sec_validate_get(req->rq_import, &sec);
if (rc)
- RETURN(rc);
+ return rc;
if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) {
CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n",
@@ -666,7 +660,7 @@ again:
sptlrpc_sec_put(sec);
if (cli_ctx_is_eternal(ctx))
- RETURN(0);
+ return 0;
if (unlikely(test_bit(PTLRPC_CTX_NEW_BIT, &ctx->cc_flags))) {
LASSERT(ctx->cc_ops->refresh);
@@ -677,7 +671,7 @@ again:
LASSERT(ctx->cc_ops->validate);
if (ctx->cc_ops->validate(ctx) == 0) {
req_off_ctx_list(req, ctx);
- RETURN(0);
+ return 0;
}
if (unlikely(test_bit(PTLRPC_CTX_ERROR_BIT, &ctx->cc_flags))) {
@@ -685,7 +679,7 @@ again:
req->rq_err = 1;
spin_unlock(&req->rq_lock);
req_off_ctx_list(req, ctx);
- RETURN(-EPERM);
+ return -EPERM;
}
/*
@@ -719,7 +713,7 @@ again:
unlikely(req->rq_reqmsg) &&
lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
req_off_ctx_list(req, ctx);
- RETURN(0);
+ return 0;
}
if (unlikely(test_bit(PTLRPC_CTX_DEAD_BIT, &ctx->cc_flags))) {
@@ -731,7 +725,7 @@ again:
spin_lock(&req->rq_lock);
req->rq_err = 1;
spin_unlock(&req->rq_lock);
- RETURN(-EINTR);
+ return -EINTR;
}
rc = sptlrpc_req_replace_dead_ctx(req);
@@ -742,7 +736,7 @@ again:
spin_lock(&req->rq_lock);
req->rq_err = 1;
spin_unlock(&req->rq_lock);
- RETURN(rc);
+ return rc;
}
ctx = req->rq_cli_ctx;
@@ -759,7 +753,7 @@ again:
spin_unlock(&ctx->cc_lock);
if (timeout < 0)
- RETURN(-EWOULDBLOCK);
+ return -EWOULDBLOCK;
/* Clear any flags that may be present from previous sends */
LASSERT(req->rq_receiving_reply == 0);
@@ -789,7 +783,7 @@ again:
req_off_ctx_list(req, ctx);
LASSERT(rc != 0);
- RETURN(rc);
+ return rc;
}
goto again;
@@ -889,7 +883,6 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
struct ptlrpc_cli_ctx *ctx;
struct ptlrpc_request *req = NULL;
int rc;
- ENTRY;
might_sleep();
@@ -898,22 +891,22 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
sptlrpc_sec_put(sec);
if (!ctx)
- RETURN(-ENOMEM);
+ return -ENOMEM;
if (cli_ctx_is_eternal(ctx) ||
ctx->cc_ops->validate(ctx) == 0) {
sptlrpc_cli_ctx_put(ctx, 1);
- RETURN(0);
+ return 0;
}
if (cli_ctx_is_error(ctx)) {
sptlrpc_cli_ctx_put(ctx, 1);
- RETURN(-EACCES);
+ return -EACCES;
}
OBD_ALLOC_PTR(req);
if (!req)
- RETURN(-ENOMEM);
+ return -ENOMEM;
spin_lock_init(&req->rq_lock);
atomic_set(&req->rq_refcount, 10000);
@@ -929,7 +922,7 @@ int sptlrpc_import_check_ctx(struct obd_import *imp)
sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1);
OBD_FREE_PTR(req);
- RETURN(rc);
+ return rc;
}
/**
@@ -941,7 +934,6 @@ int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc = 0;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
@@ -953,7 +945,7 @@ int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
if (req->rq_bulk) {
rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk);
if (rc)
- RETURN(rc);
+ return rc;
}
switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
@@ -977,14 +969,13 @@ int sptlrpc_cli_wrap_request(struct ptlrpc_request *req)
LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len);
}
- RETURN(rc);
+ return rc;
}
static int do_cli_unwrap_reply(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
int rc;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
@@ -1002,13 +993,13 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req)
break;
default:
CERROR("failed unpack reply: x"LPU64"\n", req->rq_xid);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (req->rq_repdata_len < sizeof(struct lustre_msg)) {
CERROR("replied data length %d too small\n",
req->rq_repdata_len);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) !=
@@ -1016,7 +1007,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req)
CERROR("reply policy %u doesn't match request policy %u\n",
SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr),
SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc));
- RETURN(-EPROTO);
+ return -EPROTO;
}
switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) {
@@ -1038,7 +1029,7 @@ static int do_cli_unwrap_reply(struct ptlrpc_request *req)
if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL &&
!req->rq_ctx_init)
req->rq_rep_swab_mask = 0;
- RETURN(rc);
+ return rc;
}
/**
@@ -1096,11 +1087,10 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
char *early_buf;
int early_bufsz, early_size;
int rc;
- ENTRY;
OBD_ALLOC_PTR(early_req);
if (early_req == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
early_size = req->rq_nob_received;
early_bufsz = size_roundup_power2(early_size);
@@ -1163,7 +1153,7 @@ int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
LASSERT(early_req->rq_repmsg);
*req_ret = early_req;
- RETURN(0);
+ return 0;
err_ctx:
sptlrpc_cli_ctx_put(early_req->rq_cli_ctx, 1);
@@ -1171,7 +1161,7 @@ err_buf:
OBD_FREE_LARGE(early_buf, early_bufsz);
err_req:
OBD_FREE_PTR(early_req);
- RETURN(rc);
+ return rc;
}
/**
@@ -1285,7 +1275,6 @@ struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
struct ptlrpc_sec_policy *policy;
struct ptlrpc_sec *sec;
char str[32];
- ENTRY;
if (svc_ctx) {
LASSERT(imp->imp_dlm_fake == 1);
@@ -1308,7 +1297,7 @@ struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
policy = sptlrpc_wireflavor2policy(sf->sf_rpc);
if (!policy) {
CERROR("invalid flavor 0x%x\n", sf->sf_rpc);
- RETURN(NULL);
+ return NULL;
}
}
@@ -1324,7 +1313,7 @@ struct ptlrpc_sec * sptlrpc_sec_create(struct obd_import *imp,
sptlrpc_policy_put(policy);
}
- RETURN(sec);
+ return sec;
}
struct ptlrpc_sec *sptlrpc_import_sec_ref(struct obd_import *imp)
@@ -1406,12 +1395,11 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
enum lustre_sec_part sp;
char str[24];
int rc = 0;
- ENTRY;
might_sleep();
if (imp == NULL)
- RETURN(0);
+ return 0;
conn = imp->imp_connection;
@@ -1485,7 +1473,7 @@ int sptlrpc_import_sec_adapt(struct obd_import *imp,
mutex_unlock(&imp->imp_sec_mutex);
out:
sptlrpc_sec_put(sec);
- RETURN(rc);
+ return rc;
}
void sptlrpc_import_sec_put(struct obd_import *imp)
@@ -1523,7 +1511,8 @@ void sptlrpc_import_flush_root_ctx(struct obd_import *imp)
void sptlrpc_import_flush_my_ctx(struct obd_import *imp)
{
- import_flush_ctx_common(imp, current_uid(), 1, 1);
+ import_flush_ctx_common(imp, from_kuid(&init_user_ns, current_uid()),
+ 1, 1);
}
EXPORT_SYMBOL(sptlrpc_import_flush_my_ctx);
@@ -1668,17 +1657,16 @@ int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
struct ptlrpc_sec_policy *policy;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
LASSERT(ctx->cc_sec->ps_policy);
if (req->rq_repbuf)
- RETURN(0);
+ return 0;
policy = ctx->cc_sec->ps_policy;
- RETURN(policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize));
+ return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize);
}
/**
@@ -1689,7 +1677,6 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
{
struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx;
struct ptlrpc_sec_policy *policy;
- ENTRY;
LASSERT(ctx);
LASSERT(ctx->cc_sec);
@@ -1703,7 +1690,6 @@ void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req)
policy = ctx->cc_sec->ps_policy;
policy->sp_cops->free_repbuf(ctx->cc_sec, req);
req->rq_repmsg = NULL;
- EXIT;
}
int sptlrpc_cli_install_rvs_ctx(struct obd_import *imp,
@@ -2032,7 +2018,6 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
struct ptlrpc_sec_policy *policy;
struct lustre_msg *msg = req->rq_reqbuf;
int rc;
- ENTRY;
LASSERT(msg);
LASSERT(req->rq_reqmsg == NULL);
@@ -2050,18 +2035,18 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
default:
CERROR("error unpacking request from %s x"LPU64"\n",
libcfs_id2str(req->rq_peer), req->rq_xid);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr);
req->rq_sp_from = LUSTRE_SP_ANY;
- req->rq_auth_uid = INVALID_UID;
- req->rq_auth_mapped_uid = INVALID_UID;
+ req->rq_auth_uid = -1;
+ req->rq_auth_mapped_uid = -1;
policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc);
if (!policy) {
CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
LASSERT(policy->sp_sops->accept);
@@ -2079,7 +2064,7 @@ int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req)
/* sanity check for the request source */
rc = sptlrpc_svc_check_from(req, rc);
- RETURN(rc);
+ return rc;
}
/**
@@ -2092,7 +2077,6 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
struct ptlrpc_sec_policy *policy;
struct ptlrpc_reply_state *rs;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_svc_ctx->sc_policy);
@@ -2105,7 +2089,7 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
/* failed alloc, try emergency pool */
rs = lustre_get_emerg_rs(req->rq_rqbd->rqbd_svcpt);
if (rs == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_reply_state = rs;
rc = policy->sp_sops->alloc_rs(req, msglen);
@@ -2118,7 +2102,7 @@ int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen)
LASSERT(rc != 0 ||
(req->rq_reply_state && req->rq_reply_state->rs_msg));
- RETURN(rc);
+ return rc;
}
/**
@@ -2131,7 +2115,6 @@ int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
{
struct ptlrpc_sec_policy *policy;
int rc;
- ENTRY;
LASSERT(req->rq_svc_ctx);
LASSERT(req->rq_svc_ctx->sc_policy);
@@ -2142,7 +2125,7 @@ int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req)
rc = policy->sp_sops->authorize(req);
LASSERT(rc || req->rq_reply_state->rs_repdata_len);
- RETURN(rc);
+ return rc;
}
/**
@@ -2152,7 +2135,6 @@ void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
{
struct ptlrpc_sec_policy *policy;
unsigned int prealloc;
- ENTRY;
LASSERT(rs->rs_svc_ctx);
LASSERT(rs->rs_svc_ctx->sc_policy);
@@ -2165,7 +2147,6 @@ void sptlrpc_svc_free_rs(struct ptlrpc_reply_state *rs)
if (prealloc)
lustre_put_emerg_rs(rs);
- EXIT;
}
void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req)
@@ -2314,10 +2295,10 @@ int sptlrpc_pack_user_desc(struct lustre_msg *msg, int offset)
pud = lustre_msg_buf(msg, offset, 0);
- pud->pud_uid = current_uid();
- pud->pud_gid = current_gid();
- pud->pud_fsuid = current_fsuid();
- pud->pud_fsgid = current_fsgid();
+ pud->pud_uid = from_kuid(&init_user_ns, current_uid());
+ pud->pud_gid = from_kgid(&init_user_ns, current_gid());
+ pud->pud_fsuid = from_kuid(&init_user_ns, current_fsuid());
+ pud->pud_fsgid = from_kgid(&init_user_ns, current_fsgid());
pud->pud_cap = cfs_curproc_cap_pack();
pud->pud_ngroups = (msg->lm_buflens[offset] - sizeof(*pud)) / 4;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index bf53f1bc174..9013745ab10 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -156,7 +156,7 @@ int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
"max waitqueue depth: %u\n"
"max wait time: "CFS_TIME_T"/%u\n"
,
- num_physpages,
+ totalram_pages,
PAGES_PER_POOL,
page_pools.epp_max_pages,
page_pools.epp_max_pools,
@@ -705,7 +705,7 @@ int sptlrpc_enc_pool_init(void)
* maximum capacity is 1/8 of total physical memory.
* is the 1/8 a good number?
*/
- page_pools.epp_max_pages = num_physpages / 8;
+ page_pools.epp_max_pages = totalram_pages / 8;
page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
init_waitqueue_head(&page_pools.epp_waitq);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
index a45a3929b59..6cc3f23c27c 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_config.c
@@ -195,7 +195,7 @@ int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
flavor = strchr(param, '=');
if (flavor == NULL) {
CERROR("invalid param, no '='\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
*flavor++ = '\0';
@@ -208,7 +208,7 @@ int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
rule->sr_netid = libcfs_str2net(param);
if (rule->sr_netid == LNET_NIDNET(LNET_NID_ANY)) {
CERROR("invalid network name: %s\n", param);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
@@ -228,16 +228,16 @@ int sptlrpc_parse_rule(char *param, struct sptlrpc_rule *rule)
rule->sr_to = LUSTRE_SP_MDT;
} else {
CERROR("invalid rule dir segment: %s\n", dir);
- RETURN(-EINVAL);
+ return -EINVAL;
}
}
/* 2.1 flavor */
rc = sptlrpc_parse_flavor(flavor, &rule->sr_flvr);
if (rc)
- RETURN(-EINVAL);
+ return -EINVAL;
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(sptlrpc_parse_rule);
@@ -661,18 +661,17 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
char fsname[MTI_NAME_MAXLEN];
struct sptlrpc_rule rule;
int rc;
- ENTRY;
target = lustre_cfg_string(lcfg, 1);
if (target == NULL) {
CERROR("missing target name\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
param = lustre_cfg_string(lcfg, 2);
if (param == NULL) {
CERROR("missing parameter\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_SEC, "processing rule: %s.%s\n", target, param);
@@ -680,13 +679,13 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
/* parse rule to make sure the format is correct */
if (strncmp(param, PARAM_SRPC_FLVR, sizeof(PARAM_SRPC_FLVR) - 1) != 0) {
CERROR("Invalid sptlrpc parameter: %s\n", param);
- RETURN(-EINVAL);
+ return -EINVAL;
}
param += sizeof(PARAM_SRPC_FLVR) - 1;
rc = sptlrpc_parse_rule(param, &rule);
if (rc)
- RETURN(-EINVAL);
+ return -EINVAL;
if (conf == NULL) {
target2fsname(target, fsname, sizeof(fsname));
@@ -708,7 +707,7 @@ static int __sptlrpc_process_config(struct lustre_cfg *lcfg,
if (rc == 0)
conf->sc_modified++;
- RETURN(rc);
+ return rc;
}
int sptlrpc_process_config(struct lustre_cfg *lcfg)
@@ -905,7 +904,6 @@ EXPORT_SYMBOL(sptlrpc_target_choose_flavor);
void sptlrpc_conf_client_adapt(struct obd_device *obd)
{
struct obd_import *imp;
- ENTRY;
LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0 ||
strcmp(obd->obd_type->typ_name, LUSTRE_OSC_NAME) ==0);
@@ -924,7 +922,6 @@ void sptlrpc_conf_client_adapt(struct obd_device *obd)
}
up_read(&obd->u.cli.cl_sem);
- EXIT;
}
EXPORT_SYMBOL(sptlrpc_conf_client_adapt);
@@ -1011,11 +1008,10 @@ int sptlrpc_target_local_copy_conf(struct obd_device *obd,
struct lvfs_run_ctxt saved;
struct dentry *dentry;
int rc;
- ENTRY;
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
if (ctxt == NULL)
- RETURN(-EINVAL);
+ return -EINVAL;
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
@@ -1058,7 +1054,7 @@ out_ctx:
llog_ctxt_put(ctxt);
CDEBUG(D_SEC, "target %s: write local sptlrpc conf: rc = %d\n",
obd->obd_name, rc);
- RETURN(rc);
+ return rc;
}
static int local_read_handler(const struct lu_env *env,
@@ -1068,11 +1064,10 @@ static int local_read_handler(const struct lu_env *env,
struct sptlrpc_conf *conf = (struct sptlrpc_conf *) data;
struct lustre_cfg *lcfg = (struct lustre_cfg *)(rec + 1);
int cfg_len, rc;
- ENTRY;
if (rec->lrh_type != OBD_CFG_REC) {
CERROR("unhandled lrh_type: %#x\n", rec->lrh_type);
- RETURN(-EINVAL);
+ return -EINVAL;
}
cfg_len = rec->lrh_len - sizeof(struct llog_rec_hdr) -
@@ -1081,15 +1076,15 @@ static int local_read_handler(const struct lu_env *env,
rc = lustre_cfg_sanity_check(lcfg, cfg_len);
if (rc) {
CERROR("Insane cfg\n");
- RETURN(rc);
+ return rc;
}
if (lcfg->lcfg_command != LCFG_SPTLRPC_CONF) {
CERROR("invalid command (%x)\n", lcfg->lcfg_command);
- RETURN(-EINVAL);
+ return -EINVAL;
}
- RETURN(__sptlrpc_process_config(lcfg, conf));
+ return __sptlrpc_process_config(lcfg, conf);
}
static
@@ -1100,14 +1095,13 @@ int sptlrpc_target_local_read_conf(struct obd_device *obd,
struct llog_ctxt *ctxt;
struct lvfs_run_ctxt saved;
int rc;
- ENTRY;
LASSERT(conf->sc_updated == 0 && conf->sc_local == 0);
ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
if (ctxt == NULL) {
CERROR("missing llog context\n");
- RETURN(-EINVAL);
+ return -EINVAL;
}
push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
@@ -1143,7 +1137,7 @@ out_pop:
llog_ctxt_put(ctxt);
CDEBUG(D_SEC, "target %s: read local sptlrpc conf: rc = %d\n",
obd->obd_name, rc);
- RETURN(rc);
+ return rc;
}
@@ -1160,7 +1154,6 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
enum lustre_sec_part sp_dst;
char fsname[MTI_NAME_MAXLEN];
int rc = 0;
- ENTRY;
if (strcmp(obd->obd_type->typ_name, LUSTRE_MDT_NAME) == 0) {
sp_dst = LUSTRE_SP_MDT;
@@ -1168,7 +1161,7 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
sp_dst = LUSTRE_SP_OST;
} else {
CERROR("unexpected obd type %s\n", obd->obd_type->typ_name);
- RETURN(-EINVAL);
+ return -EINVAL;
}
CDEBUG(D_SEC, "get rules for target %s\n", obd->obd_uuid.uuid);
@@ -1210,7 +1203,7 @@ int sptlrpc_conf_target_get_rules(struct obd_device *obd,
LUSTRE_SP_ANY, sp_dst, rset);
out:
mutex_unlock(&sptlrpc_conf_lock);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(sptlrpc_conf_target_get_rules);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
index 4c96a14a1bb..d2eb20eb56d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_gc.c
@@ -217,7 +217,7 @@ again:
int sptlrpc_gc_init(void)
{
struct l_wait_info lwi = { 0 };
- task_t *task;
+ struct task_struct *task;
mutex_init(&sec_gc_mutex);
spin_lock_init(&sec_gc_list_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index f552d2f182b..416401be6d4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -192,7 +192,6 @@ int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
{
struct lustre_msg *msg = req->rq_reqbuf;
struct plain_header *phdr;
- ENTRY;
msg->lm_secflvr = req->rq_flvr.sf_rpc;
@@ -209,7 +208,7 @@ int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount,
msg->lm_buflens);
- RETURN(0);
+ return 0;
}
static
@@ -219,11 +218,10 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
struct plain_header *phdr;
__u32 cksum;
int swabbed;
- ENTRY;
if (msg->lm_bufcount != PLAIN_PACK_SEGMENTS) {
CERROR("unexpected reply buf count %u\n", msg->lm_bufcount);
- RETURN(-EPROTO);
+ return -EPROTO;
}
swabbed = ptlrpc_rep_need_swab(req);
@@ -231,24 +229,24 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
if (phdr == NULL) {
CERROR("missing plain header\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (phdr->ph_ver != 0) {
CERROR("Invalid header version\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
/* expect no user desc in reply */
if (phdr->ph_flags & PLAIN_FL_USER) {
CERROR("Unexpected udesc flag in reply\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) {
CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg,
req->rq_flvr.u_bulk.hash.hash_alg);
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (unlikely(req->rq_early)) {
@@ -262,7 +260,7 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
CDEBUG(D_SEC,
"early reply checksum mismatch: %08x != %08x\n",
cpu_to_le32(cksum), msg->lm_cksum);
- RETURN(-EINVAL);
+ return -EINVAL;
}
} else {
/* whether we sent with bulk or not, we expect the same
@@ -272,18 +270,18 @@ int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req)
phdr->ph_flags & PLAIN_FL_BULK)) {
CERROR("%s bulk checksum in reply\n",
req->rq_pack_bulk ? "Missing" : "Unexpected");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (phdr->ph_flags & PLAIN_FL_BULK) {
if (plain_unpack_bsd(msg, swabbed))
- RETURN(-EPROTO);
+ return -EPROTO;
}
}
req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0);
req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF);
- RETURN(0);
+ return 0;
}
static
@@ -307,10 +305,10 @@ int plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx,
bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc);
if (bsd->bsd_svc == SPTLRPC_BULK_SVC_NULL)
- RETURN(0);
+ return 0;
if (req->rq_bulk_read)
- RETURN(0);
+ return 0;
rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,
token);
@@ -417,7 +415,6 @@ static
void plain_destroy_sec(struct ptlrpc_sec *sec)
{
struct plain_sec *plsec = sec2plsec(sec);
- ENTRY;
LASSERT(sec->ps_policy == &plain_policy);
LASSERT(sec->ps_import);
@@ -428,7 +425,6 @@ void plain_destroy_sec(struct ptlrpc_sec *sec)
class_import_put(sec->ps_import);
OBD_FREE_PTR(plsec);
- EXIT;
}
static
@@ -445,13 +441,12 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
struct plain_sec *plsec;
struct ptlrpc_sec *sec;
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
LASSERT(SPTLRPC_FLVR_POLICY(sf->sf_rpc) == SPTLRPC_POLICY_PLAIN);
OBD_ALLOC_PTR(plsec);
if (plsec == NULL)
- RETURN(NULL);
+ return NULL;
/*
* initialize plain_sec
@@ -476,12 +471,12 @@ struct ptlrpc_sec *plain_create_sec(struct obd_import *imp,
ctx = plain_sec_install_ctx(plsec);
if (ctx == NULL) {
plain_destroy_sec(sec);
- RETURN(NULL);
+ return NULL;
}
sptlrpc_cli_ctx_put(ctx, 1);
}
- RETURN(sec);
+ return sec;
}
static
@@ -491,7 +486,6 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
{
struct plain_sec *plsec = sec2plsec(sec);
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
read_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
@@ -502,7 +496,7 @@ struct ptlrpc_cli_ctx *plain_lookup_ctx(struct ptlrpc_sec *sec,
if (unlikely(ctx == NULL))
ctx = plain_sec_install_ctx(plsec);
- RETURN(ctx);
+ return ctx;
}
static
@@ -526,11 +520,10 @@ int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
{
struct plain_sec *plsec = sec2plsec(sec);
struct ptlrpc_cli_ctx *ctx;
- ENTRY;
/* do nothing unless caller want to flush for 'all' */
if (uid != -1)
- RETURN(0);
+ return 0;
write_lock(&plsec->pls_lock);
ctx = plsec->pls_ctx;
@@ -539,7 +532,7 @@ int plain_flush_ctx_cache(struct ptlrpc_sec *sec,
if (ctx)
sptlrpc_cli_ctx_put(ctx, 1);
- RETURN(0);
+ return 0;
}
static
@@ -549,7 +542,6 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int alloc_len;
- ENTRY;
buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
@@ -570,7 +562,7 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
alloc_len = size_roundup_power2(alloc_len);
OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len);
if (!req->rq_reqbuf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_reqbuf_len = alloc_len;
} else {
@@ -585,20 +577,18 @@ int plain_alloc_reqbuf(struct ptlrpc_sec *sec,
if (req->rq_pack_udesc)
sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF);
- RETURN(0);
+ return 0;
}
static
void plain_free_reqbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- ENTRY;
if (!req->rq_pool) {
OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len);
req->rq_reqbuf = NULL;
req->rq_reqbuf_len = 0;
}
- EXIT;
}
static
@@ -608,7 +598,6 @@ int plain_alloc_repbuf(struct ptlrpc_sec *sec,
{
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int alloc_len;
- ENTRY;
buflens[PLAIN_PACK_HDR_OFF] = sizeof(struct plain_header);
buflens[PLAIN_PACK_MSG_OFF] = msgsize;
@@ -627,21 +616,19 @@ int plain_alloc_repbuf(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len);
if (!req->rq_repbuf)
- RETURN(-ENOMEM);
+ return -ENOMEM;
req->rq_repbuf_len = alloc_len;
- RETURN(0);
+ return 0;
}
static
void plain_free_repbuf(struct ptlrpc_sec *sec,
struct ptlrpc_request *req)
{
- ENTRY;
OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len);
req->rq_repbuf = NULL;
req->rq_repbuf_len = 0;
- EXIT;
}
static
@@ -652,7 +639,6 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
struct lustre_msg *newbuf;
int oldsize;
int newmsg_size, newbuf_size;
- ENTRY;
LASSERT(req->rq_reqbuf);
LASSERT(req->rq_reqbuf_len >= req->rq_reqlen);
@@ -681,7 +667,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
OBD_ALLOC_LARGE(newbuf, newbuf_size);
if (newbuf == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len);
@@ -697,7 +683,7 @@ int plain_enlarge_reqbuf(struct ptlrpc_sec *sec,
_sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize);
req->rq_reqlen = newmsg_size;
- RETURN(0);
+ return 0;
}
/****************************************
@@ -715,7 +701,6 @@ int plain_accept(struct ptlrpc_request *req)
struct lustre_msg *msg = req->rq_reqbuf;
struct plain_header *phdr;
int swabbed;
- ENTRY;
LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) ==
SPTLRPC_POLICY_PLAIN);
@@ -725,12 +710,12 @@ int plain_accept(struct ptlrpc_request *req)
SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) !=
SPTLRPC_FLVR_BULK_TYPE(SPTLRPC_FLVR_PLAIN)) {
CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
if (msg->lm_bufcount < PLAIN_PACK_SEGMENTS) {
CERROR("unexpected request buf count %u\n", msg->lm_bufcount);
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
swabbed = ptlrpc_req_need_swab(req);
@@ -738,17 +723,17 @@ int plain_accept(struct ptlrpc_request *req)
phdr = lustre_msg_buf(msg, PLAIN_PACK_HDR_OFF, sizeof(*phdr));
if (phdr == NULL) {
CERROR("missing plain header\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (phdr->ph_ver != 0) {
CERROR("Invalid header version\n");
- RETURN(-EPROTO);
+ return -EPROTO;
}
if (phdr->ph_bulk_hash_alg >= BULK_HASH_ALG_MAX) {
CERROR("invalid hash algorithm: %u\n", phdr->ph_bulk_hash_alg);
- RETURN(-EPROTO);
+ return -EPROTO;
}
req->rq_sp_from = phdr->ph_sp;
@@ -758,7 +743,7 @@ int plain_accept(struct ptlrpc_request *req)
if (sptlrpc_unpack_user_desc(msg, PLAIN_PACK_USER_OFF,
swabbed)) {
CERROR("Mal-formed user descriptor\n");
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
}
req->rq_pack_udesc = 1;
@@ -767,7 +752,7 @@ int plain_accept(struct ptlrpc_request *req)
if (phdr->ph_flags & PLAIN_FL_BULK) {
if (plain_unpack_bsd(msg, swabbed))
- RETURN(SECSVC_DROP);
+ return SECSVC_DROP;
req->rq_pack_bulk = 1;
}
@@ -778,7 +763,7 @@ int plain_accept(struct ptlrpc_request *req)
req->rq_svc_ctx = &plain_svc_ctx;
atomic_inc(&req->rq_svc_ctx->sc_refcount);
- RETURN(SECSVC_OK);
+ return SECSVC_OK;
}
static
@@ -787,7 +772,6 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
struct ptlrpc_reply_state *rs;
__u32 buflens[PLAIN_PACK_SEGMENTS] = { 0, };
int rs_size = sizeof(*rs);
- ENTRY;
LASSERT(msgsize % 8 == 0);
@@ -807,7 +791,7 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
} else {
OBD_ALLOC_LARGE(rs, rs_size);
if (rs == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
rs->rs_size = rs_size;
}
@@ -821,20 +805,17 @@ int plain_alloc_rs(struct ptlrpc_request *req, int msgsize)
rs->rs_msg = lustre_msg_buf_v2(rs->rs_repbuf, PLAIN_PACK_MSG_OFF, 0);
req->rq_reply_state = rs;
- RETURN(0);
+ return 0;
}
static
void plain_free_rs(struct ptlrpc_reply_state *rs)
{
- ENTRY;
-
LASSERT(atomic_read(&rs->rs_svc_ctx->sc_refcount) > 1);
atomic_dec(&rs->rs_svc_ctx->sc_refcount);
if (!rs->rs_prealloc)
OBD_FREE_LARGE(rs, rs->rs_size);
- EXIT;
}
static
@@ -844,7 +825,6 @@ int plain_authorize(struct ptlrpc_request *req)
struct lustre_msg_v2 *msg = rs->rs_repbuf;
struct plain_header *phdr;
int len;
- ENTRY;
LASSERT(rs);
LASSERT(msg);
@@ -882,7 +862,7 @@ int plain_authorize(struct ptlrpc_request *req)
req->rq_reply_off = 0;
}
- RETURN(0);
+ return 0;
}
static
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 1667b8e8601..ac8b5fd2300 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -369,7 +369,6 @@ static void rs_batch_fini(struct rs_batch *b)
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
{
struct ptlrpc_hr_thread *hrt;
- ENTRY;
LASSERT(list_empty(&rs->rs_list));
@@ -380,28 +379,23 @@ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
spin_unlock(&hrt->hrt_lock);
wake_up(&hrt->hrt_waitq);
- EXIT;
}
void
ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs)
{
- ENTRY;
-
LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
LASSERT(spin_is_locked(&rs->rs_lock));
LASSERT (rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
if (rs->rs_scheduled) { /* being set up or already notified */
- EXIT;
return;
}
rs->rs_scheduled = 1;
list_del_init(&rs->rs_list);
ptlrpc_dispatch_difficult_reply(rs);
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply);
@@ -409,7 +403,6 @@ void ptlrpc_commit_replies(struct obd_export *exp)
{
struct ptlrpc_reply_state *rs, *nxt;
DECLARE_RS_BATCH(batch);
- ENTRY;
rs_batch_init(&batch);
/* Find any replies that have been committed and get their service
@@ -429,7 +422,6 @@ void ptlrpc_commit_replies(struct obd_export *exp)
}
spin_unlock(&exp->exp_uncommitted_replies_lock);
rs_batch_fini(&batch);
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_commit_replies);
@@ -551,6 +543,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
if (tc->tc_thr_factor != 0) {
int factor = tc->tc_thr_factor;
const int fade = 4;
+ cpumask_t mask;
/*
* User wants to increase number of threads with for
@@ -564,7 +557,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
* have too many threads no matter how many cores/HTs
* there are.
*/
- if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
+ cpumask_copy(&mask, topology_thread_cpumask(0));
+ if (cpus_weight(mask) > 1) { /* weight is # of HTs */
/* depress thread factor for hyper-thread */
factor = factor - (factor >> 1) + (factor >> 3);
}
@@ -689,7 +683,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
*/
struct ptlrpc_service *
ptlrpc_register_service(struct ptlrpc_service_conf *conf,
- proc_dir_entry_t *proc_entry)
+ struct proc_dir_entry *proc_entry)
{
struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
struct ptlrpc_service *service;
@@ -700,7 +694,6 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
int cpt;
int rc;
int i;
- ENTRY;
LASSERT(conf->psc_buf.bc_nbufs > 0);
LASSERT(conf->psc_buf.bc_buf_size >=
@@ -724,7 +717,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
if (rc != 0) {
CERROR("%s: invalid CPT pattern string: %s",
conf->psc_name, cconf->cc_pattern);
- RETURN(ERR_PTR(-EINVAL));
+ return ERR_PTR(-EINVAL);
}
rc = cfs_expr_list_values(el, ncpts, &cpts);
@@ -734,7 +727,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
conf->psc_name, cconf->cc_pattern, rc);
if (cpts != NULL)
OBD_FREE(cpts, sizeof(*cpts) * ncpts);
- RETURN(ERR_PTR(rc < 0 ? rc : -EINVAL));
+ return ERR_PTR(rc < 0 ? rc : -EINVAL);
}
ncpts = rc;
}
@@ -744,7 +737,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
if (service == NULL) {
if (cpts != NULL)
OBD_FREE(cpts, sizeof(*cpts) * ncpts);
- RETURN(ERR_PTR(-ENOMEM));
+ return ERR_PTR(-ENOMEM);
}
service->srv_cptable = cptable;
@@ -823,10 +816,10 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf,
GOTO(failed, rc);
}
- RETURN(service);
+ return service;
failed:
ptlrpc_unregister_service(service);
- RETURN(ERR_PTR(rc));
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL(ptlrpc_register_service);
@@ -1035,8 +1028,6 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
struct obd_export *oldest_exp;
time_t oldest_time, new_time;
- ENTRY;
-
LASSERT(exp);
/* Compensate for slow machines, etc, by faking our request time
@@ -1048,7 +1039,7 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
/* Do not pay attention on 1sec or smaller renewals. */
new_time = cfs_time_current_sec() + extra_delay;
if (exp->exp_last_request_time + 1 /*second */ >= new_time)
- RETURN_EXIT;
+ return;
exp->exp_last_request_time = new_time;
CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
@@ -1063,7 +1054,7 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
if (list_empty(&exp->exp_obd_chain_timed)) {
/* this one is not timed */
spin_unlock(&exp->exp_obd->obd_dev_lock);
- RETURN_EXIT;
+ return;
}
list_move_tail(&exp->exp_obd_chain_timed,
@@ -1076,7 +1067,6 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
if (exp->exp_obd->obd_recovering) {
/* be nice to everyone during recovery */
- EXIT;
return;
}
@@ -1105,8 +1095,6 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
exp->exp_obd->obd_eviction_timer = 0;
}
}
-
- EXIT;
}
/**
@@ -1259,7 +1247,6 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
time_t newdl;
int rc;
- ENTRY;
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
@@ -1270,7 +1257,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
at_get(&svcpt->scp_at_estimate), at_extra);
if (AT_OFF)
- RETURN(0);
+ return 0;
if (olddl < 0) {
DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
@@ -1278,13 +1265,13 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
"at_early_margin (%d)?", olddl, at_early_margin);
/* Return an error so we're not re-added to the timed list. */
- RETURN(-ETIMEDOUT);
+ return -ETIMEDOUT;
}
if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0){
DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, "
"but no AT support");
- RETURN(-ENOSYS);
+ return -ENOSYS;
}
if (req->rq_export &&
@@ -1314,18 +1301,18 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
cfs_time_current_sec());
- RETURN(-ETIMEDOUT);
+ return -ETIMEDOUT;
}
}
newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
OBD_ALLOC(reqcopy, sizeof *reqcopy);
if (reqcopy == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
if (!reqmsg) {
OBD_FREE(reqcopy, sizeof *reqcopy);
- RETURN(-ENOMEM);
+ return -ENOMEM;
}
*reqcopy = *req;
@@ -1384,7 +1371,7 @@ out:
sptlrpc_svc_ctx_decref(reqcopy);
OBD_FREE_LARGE(reqmsg, req->rq_reqlen);
OBD_FREE(reqcopy, sizeof *reqcopy);
- RETURN(rc);
+ return rc;
}
/* Send early replies to everybody expiring within at_early_margin
@@ -1399,19 +1386,18 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
time_t now = cfs_time_current_sec();
cfs_duration_t delay;
int first, counter = 0;
- ENTRY;
spin_lock(&svcpt->scp_at_lock);
if (svcpt->scp_at_check == 0) {
spin_unlock(&svcpt->scp_at_lock);
- RETURN(0);
+ return 0;
}
delay = cfs_time_sub(cfs_time_current(), svcpt->scp_at_checktime);
svcpt->scp_at_check = 0;
if (array->paa_count == 0) {
spin_unlock(&svcpt->scp_at_lock);
- RETURN(0);
+ return 0;
}
/* The timer went off, but maybe the nearest rpc already completed. */
@@ -1420,7 +1406,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
/* We've still got plenty of time. Reset the timer. */
ptlrpc_at_set_timer(svcpt);
spin_unlock(&svcpt->scp_at_lock);
- RETURN(0);
+ return 0;
}
/* We're close to a timeout, and we don't know how much longer the
@@ -1490,7 +1476,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt)
ptlrpc_server_drop_request(rq);
}
- RETURN(1); /* return "did_something" for liblustre */
+ return 1; /* return "did_something" for liblustre */
}
/**
@@ -1501,12 +1487,11 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
int rc = 0;
- ENTRY;
if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
if (rc < 0)
- RETURN(rc);
+ return rc;
LASSERT(rc == 0);
}
if (req->rq_export && req->rq_ops) {
@@ -1527,7 +1512,7 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
* ost_brw_write().
*/
if (rc < 0)
- RETURN(rc);
+ return rc;
LASSERT(rc == 0 || rc == 1);
}
@@ -1539,13 +1524,12 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
ptlrpc_nrs_req_initialize(svcpt, req, rc);
- RETURN(rc);
+ return rc;
}
/** Remove the request from the export list. */
static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
{
- ENTRY;
if (req->rq_export && req->rq_ops) {
/* refresh lock timeout again so that client has more
* room to send lock cancel RPC. */
@@ -1556,7 +1540,6 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
list_del_init(&req->rq_exp_list);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
- EXIT;
}
static int ptlrpc_hpreq_check(struct ptlrpc_request *req)
@@ -1587,15 +1570,14 @@ static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
int rc;
- ENTRY;
rc = ptlrpc_server_hpreq_init(svcpt, req);
if (rc < 0)
- RETURN(rc);
+ return rc;
ptlrpc_nrs_req_add(svcpt, req, !!rc);
- RETURN(0);
+ return 0;
}
/**
@@ -1701,7 +1683,6 @@ static struct ptlrpc_request *
ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
{
struct ptlrpc_request *req = NULL;
- ENTRY;
spin_lock(&svcpt->scp_req_lock);
@@ -1722,7 +1703,7 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force)
}
spin_unlock(&svcpt->scp_req_lock);
- RETURN(NULL);
+ return NULL;
got_request:
svcpt->scp_nreqs_active++;
@@ -1734,7 +1715,7 @@ got_request:
if (likely(req->rq_export))
class_export_rpc_inc(req->rq_export);
- RETURN(req);
+ return req;
}
/**
@@ -1751,12 +1732,11 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req;
__u32 deadline;
int rc;
- ENTRY;
spin_lock(&svcpt->scp_lock);
if (list_empty(&svcpt->scp_req_incoming)) {
spin_unlock(&svcpt->scp_lock);
- RETURN(0);
+ return 0;
}
req = list_entry(svcpt->scp_req_incoming.next,
@@ -1875,12 +1855,12 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
GOTO(err_req, rc);
wake_up(&svcpt->scp_waitq);
- RETURN(1);
+ return 1;
err_req:
ptlrpc_server_finish_request(svcpt, req);
- RETURN(1);
+ return 1;
}
/**
@@ -1898,11 +1878,10 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
long timediff;
int rc;
int fail_opc = 0;
- ENTRY;
request = ptlrpc_server_request_get(svcpt, false);
if (request == NULL)
- RETURN(0);
+ return 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT))
fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT;
@@ -2041,7 +2020,7 @@ put_conn:
out_req:
ptlrpc_server_finish_active_request(svcpt, request);
- RETURN(1);
+ return 1;
}
/**
@@ -2055,7 +2034,6 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
struct obd_export *exp;
int nlocks;
int been_handled;
- ENTRY;
exp = rs->rs_export;
@@ -2141,12 +2119,12 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
- RETURN(1);
+ return 1;
}
/* still on the net; callback will schedule */
spin_unlock(&rs->rs_lock);
- RETURN(1);
+ return 1;
}
@@ -2252,7 +2230,9 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
struct l_wait_info lwi = LWI_TIMEOUT(svcpt->scp_rqbd_timeout,
ptlrpc_retry_rqbds, svcpt);
+ /* XXX: Add this back when libcfs watchdog is merged upstream
lc_watchdog_disable(thread->t_watchdog);
+ */
cond_resched();
@@ -2266,8 +2246,10 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt,
if (ptlrpc_thread_stopping(thread))
return -EINTR;
+ /*
lc_watchdog_touch(thread->t_watchdog,
ptlrpc_server_get_timeout(svcpt));
+ */
return 0;
}
@@ -2284,11 +2266,10 @@ static int ptlrpc_main(void *arg)
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_reply_state *rs;
#ifdef WITH_GROUP_INFO
- group_info_t *ginfo = NULL;
+ struct group_info *ginfo = NULL;
#endif
struct lu_env *env;
int counter = 0, rc = 0;
- ENTRY;
thread->t_pid = current_pid();
unshare_fs_struct();
@@ -2370,8 +2351,10 @@ static int ptlrpc_main(void *arg)
/* wake up our creator in case he's still waiting. */
wake_up(&thread->t_ctl_waitq);
+ /*
thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt),
NULL, NULL);
+ */
spin_lock(&svcpt->scp_rep_lock);
list_add(&rs->rs_list, &svcpt->scp_rep_idle);
@@ -2426,8 +2409,10 @@ static int ptlrpc_main(void *arg)
}
}
+ /*
lc_watchdog_delete(thread->t_watchdog);
thread->t_watchdog = NULL;
+ */
out_srv_fini:
/*
@@ -2550,7 +2535,6 @@ static int ptlrpc_start_hr_threads(void)
struct ptlrpc_hr_partition *hrp;
int i;
int j;
- ENTRY;
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
int rc = 0;
@@ -2573,9 +2557,9 @@ static int ptlrpc_start_hr_threads(void)
CERROR("Reply handling thread %d:%d Failed on starting: "
"rc = %d\n", i, j, rc);
ptlrpc_stop_hr_threads();
- RETURN(rc);
+ return rc;
}
- RETURN(0);
+ return 0;
}
static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
@@ -2584,8 +2568,6 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
struct ptlrpc_thread *thread;
LIST_HEAD (zombie);
- ENTRY;
-
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
@@ -2625,7 +2607,6 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt)
list_del(&thread->t_link);
OBD_FREE_PTR(thread);
}
- EXIT;
}
/**
@@ -2635,14 +2616,11 @@ void ptlrpc_stop_all_threads(struct ptlrpc_service *svc)
{
struct ptlrpc_service_part *svcpt;
int i;
- ENTRY;
ptlrpc_service_for_each_part(svcpt, i, svc) {
if (svcpt->scp_service != NULL)
ptlrpc_svcpt_stop_threads(svcpt);
}
-
- EXIT;
}
EXPORT_SYMBOL(ptlrpc_stop_all_threads);
@@ -2651,7 +2629,6 @@ int ptlrpc_start_threads(struct ptlrpc_service *svc)
int rc = 0;
int i;
int j;
- ENTRY;
/* We require 2 threads min, see note in ptlrpc_server_handle_request */
LASSERT(svc->srv_nthrs_cpt_init >= PTLRPC_NTHRS_INIT);
@@ -2669,12 +2646,12 @@ int ptlrpc_start_threads(struct ptlrpc_service *svc)
}
}
- RETURN(0);
+ return 0;
failed:
CERROR("cannot start %s thread #%d_%d: rc %d\n",
svc->srv_thread_name, i, j, rc);
ptlrpc_stop_all_threads(svc);
- RETURN(rc);
+ return rc;
}
EXPORT_SYMBOL(ptlrpc_start_threads);
@@ -2684,7 +2661,6 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
struct ptlrpc_thread *thread;
struct ptlrpc_service *svc;
int rc;
- ENTRY;
LASSERT(svcpt != NULL);
@@ -2696,23 +2672,23 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
again:
if (unlikely(svc->srv_is_stopping))
- RETURN(-ESRCH);
+ return -ESRCH;
if (!ptlrpc_threads_increasable(svcpt) ||
(OBD_FAIL_CHECK(OBD_FAIL_TGT_TOOMANY_THREADS) &&
svcpt->scp_nthrs_running == svc->srv_nthrs_cpt_init - 1))
- RETURN(-EMFILE);
+ return -EMFILE;
OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt);
if (thread == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
init_waitqueue_head(&thread->t_ctl_waitq);
spin_lock(&svcpt->scp_lock);
if (!ptlrpc_threads_increasable(svcpt)) {
spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(thread);
- RETURN(-EMFILE);
+ return -EMFILE;
}
if (svcpt->scp_nthrs_starting != 0) {
@@ -2730,7 +2706,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
CDEBUG(D_INFO, "Creating thread %s #%d race, retry later\n",
svc->srv_thread_name, svcpt->scp_thr_nextid);
- RETURN(-EAGAIN);
+ return -EAGAIN;
}
svcpt->scp_nthrs_starting++;
@@ -2755,33 +2731,42 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
CERROR("cannot start thread '%s': rc %d\n",
thread->t_name, rc);
spin_lock(&svcpt->scp_lock);
- list_del(&thread->t_link);
--svcpt->scp_nthrs_starting;
- spin_unlock(&svcpt->scp_lock);
-
- OBD_FREE(thread, sizeof(*thread));
- RETURN(rc);
+ if (thread_is_stopping(thread)) {
+ /* this ptlrpc_thread is being hanled
+ * by ptlrpc_svcpt_stop_threads now
+ */
+ thread_add_flags(thread, SVC_STOPPED);
+ wake_up(&thread->t_ctl_waitq);
+ spin_unlock(&svcpt->scp_lock);
+ } else {
+ list_del(&thread->t_link);
+ spin_unlock(&svcpt->scp_lock);
+ OBD_FREE_PTR(thread);
+ }
+ return rc;
}
if (!wait)
- RETURN(0);
+ return 0;
l_wait_event(thread->t_ctl_waitq,
thread_is_running(thread) || thread_is_stopped(thread),
&lwi);
rc = thread_is_stopped(thread) ? thread->t_id : 0;
- RETURN(rc);
+ return rc;
}
int ptlrpc_hr_init(void)
{
+ cpumask_t mask;
struct ptlrpc_hr_partition *hrp;
struct ptlrpc_hr_thread *hrt;
int rc;
int i;
int j;
- ENTRY;
+ int weight;
memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
ptlrpc_hr.hr_cpt_table = cfs_cpt_table;
@@ -2789,10 +2774,13 @@ int ptlrpc_hr_init(void)
ptlrpc_hr.hr_partitions = cfs_percpt_alloc(ptlrpc_hr.hr_cpt_table,
sizeof(*hrp));
if (ptlrpc_hr.hr_partitions == NULL)
- RETURN(-ENOMEM);
+ return -ENOMEM;
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
+ cpumask_copy(&mask, topology_thread_cpumask(0));
+ weight = cpus_weight(mask);
+
cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
hrp->hrp_cpt = i;
@@ -2800,7 +2788,7 @@ int ptlrpc_hr_init(void)
atomic_set(&hrp->hrp_nstopped, 0);
hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
- hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0);
+ hrp->hrp_nthrs /= weight;
LASSERT(hrp->hrp_nthrs > 0);
OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
@@ -2823,7 +2811,7 @@ int ptlrpc_hr_init(void)
out:
if (rc != 0)
ptlrpc_hr_fini();
- RETURN(rc);
+ return rc;
}
void ptlrpc_hr_fini(void)
@@ -3045,8 +3033,6 @@ ptlrpc_service_free(struct ptlrpc_service *svc)
int ptlrpc_unregister_service(struct ptlrpc_service *service)
{
- ENTRY;
-
CDEBUG(D_NET, "%s: tearing down\n", service->srv_name);
service->srv_is_stopping = 1;
@@ -3066,7 +3052,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service)
ptlrpc_service_free(service);
- RETURN(0);
+ return 0;
}
EXPORT_SYMBOL(ptlrpc_unregister_service);
diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig
index ae0abc350e3..46f1e619cbd 100644
--- a/drivers/staging/media/Kconfig
+++ b/drivers/staging/media/Kconfig
@@ -29,6 +29,8 @@ source "drivers/staging/media/dt3155v4l/Kconfig"
source "drivers/staging/media/go7007/Kconfig"
+source "drivers/staging/media/msi3101/Kconfig"
+
source "drivers/staging/media/solo6x10/Kconfig"
# Keep LIRC at the end, as it has sub-menus
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 2b97cae9949..eb7f30b1ccd 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -4,4 +4,5 @@ obj-$(CONFIG_LIRC_STAGING) += lirc/
obj-$(CONFIG_SOLO6X10) += solo6x10/
obj-$(CONFIG_VIDEO_DT3155) += dt3155v4l/
obj-$(CONFIG_VIDEO_GO7007) += go7007/
+obj-$(CONFIG_USB_MSI3101) += msi3101/
obj-$(CONFIG_VIDEO_DM365_VPFE) += davinci_vpfe/
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 2faa391006d..28c8b0bcf5b 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -240,10 +240,6 @@ static int unregister_from_lirc(struct igorplug *ir)
dprintk(DRIVER_NAME "[%d]: calling lirc_unregister_driver\n", devnum);
lirc_unregister_driver(d->minor);
- kfree(d);
- ir->d = NULL;
- kfree(ir);
-
return devnum;
}
@@ -377,20 +373,16 @@ static int igorplugusb_remote_poll(void *data, struct lirc_buffer *buf)
return -ENODATA;
}
-
-
static int igorplugusb_remote_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *dev = NULL;
+ struct usb_device *dev;
struct usb_host_interface *idesc = NULL;
struct usb_endpoint_descriptor *ep;
struct igorplug *ir = NULL;
struct lirc_driver *driver = NULL;
int devnum, pipe, maxp;
- int minor = 0;
char buf[63], name[128] = "";
- int mem_failure = 0;
int ret;
dprintk(DRIVER_NAME ": usb probe called.\n");
@@ -416,24 +408,18 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
dprintk(DRIVER_NAME "[%d]: bytes_in_key=%zu maxp=%d\n",
devnum, CODE_LENGTH, maxp);
- mem_failure = 0;
- ir = kzalloc(sizeof(struct igorplug), GFP_KERNEL);
- if (!ir) {
- mem_failure = 1;
- goto mem_failure_switch;
- }
- driver = kzalloc(sizeof(struct lirc_driver), GFP_KERNEL);
- if (!driver) {
- mem_failure = 2;
- goto mem_failure_switch;
- }
+ ir = devm_kzalloc(&intf->dev, sizeof(*ir), GFP_KERNEL);
+ if (!ir)
+ return -ENOMEM;
+
+ driver = devm_kzalloc(&intf->dev, sizeof(*driver), GFP_KERNEL);
+ if (!driver)
+ return -ENOMEM;
ir->buf_in = usb_alloc_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
GFP_ATOMIC, &ir->dma_in);
- if (!ir->buf_in) {
- mem_failure = 3;
- goto mem_failure_switch;
- }
+ if (!ir->buf_in)
+ return -ENOMEM;
strcpy(driver->name, DRIVER_NAME " ");
driver->minor = -1;
@@ -449,27 +435,14 @@ static int igorplugusb_remote_probe(struct usb_interface *intf,
driver->dev = &intf->dev;
driver->owner = THIS_MODULE;
- minor = lirc_register_driver(driver);
- if (minor < 0)
- mem_failure = 9;
-
-mem_failure_switch:
-
- switch (mem_failure) {
- case 9:
+ ret = lirc_register_driver(driver);
+ if (ret < 0) {
usb_free_coherent(dev, DEVICE_BUFLEN + DEVICE_HEADERLEN,
ir->buf_in, ir->dma_in);
- case 3:
- kfree(driver);
- case 2:
- kfree(ir);
- case 1:
- printk(DRIVER_NAME "[%d]: out of memory (code=%d)\n",
- devnum, mem_failure);
- return -ENOMEM;
+ return ret;
}
- driver->minor = minor;
+ driver->minor = ret;
ir->d = driver;
ir->devnum = devnum;
ir->usbdev = dev;
@@ -502,7 +475,6 @@ mem_failure_switch:
return 0;
}
-
static void igorplugusb_remote_disconnect(struct usb_interface *intf)
{
struct usb_device *usbdev = interface_to_usbdev(intf);
diff --git a/drivers/staging/media/msi3101/Kconfig b/drivers/staging/media/msi3101/Kconfig
new file mode 100644
index 00000000000..b94a95a597d
--- /dev/null
+++ b/drivers/staging/media/msi3101/Kconfig
@@ -0,0 +1,3 @@
+config USB_MSI3101
+ tristate "Mirics MSi3101 SDR Dongle"
+ depends on USB && VIDEO_DEV && VIDEO_V4L2
diff --git a/drivers/staging/media/msi3101/Makefile b/drivers/staging/media/msi3101/Makefile
new file mode 100644
index 00000000000..3730654b0eb
--- /dev/null
+++ b/drivers/staging/media/msi3101/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_USB_MSI3101) += sdr-msi3101.o
diff --git a/drivers/staging/media/msi3101/sdr-msi3101.c b/drivers/staging/media/msi3101/sdr-msi3101.c
new file mode 100644
index 00000000000..24c7b70a6cb
--- /dev/null
+++ b/drivers/staging/media/msi3101/sdr-msi3101.c
@@ -0,0 +1,1931 @@
+/*
+ * Mirics MSi3101 SDR Dongle driver
+ *
+ * Copyright (C) 2013 Antti Palosaari <crope@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * That driver is somehow based of pwc driver:
+ * (C) 1999-2004 Nemosoft Unv.
+ * (C) 2004-2006 Luc Saillard (luc@saillard.org)
+ * (C) 2011 Hans de Goede <hdegoede@redhat.com>
+ *
+ * Development tree of that driver will be on:
+ * http://git.linuxtv.org/anttip/media_tree.git/shortlog/refs/heads/mirics
+ *
+ * GNU Radio plugin "gr-kernel" for device usage will be on:
+ * http://git.linuxtv.org/anttip/gr-kernel.git
+ *
+ * TODO:
+ * Help is very highly welcome for these + all the others you could imagine:
+ * - split USB ADC interface and RF tuner to own drivers (msi2500 and msi001)
+ * - move controls to V4L2 API
+ * - use libv4l2 for stream format conversions
+ * - gr-kernel: switch to v4l2_mmap (current read eats a lot of cpu)
+ * - SDRSharp support
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/gcd.h>
+#include <asm/div64.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-event.h>
+#include <linux/usb.h>
+#include <media/videobuf2-vmalloc.h>
+
+struct msi3101_gain {
+ u8 tot:7;
+ u8 baseband:6;
+ bool lna:1;
+ bool mixer:1;
+};
+
+/* 60 – 120 MHz band, lna 24dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_120[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 12, 0, 0},
+ { 13, 13, 0, 0},
+ { 14, 14, 0, 0},
+ { 15, 15, 0, 0},
+ { 16, 16, 0, 0},
+ { 17, 17, 0, 0},
+ { 18, 18, 0, 0},
+ { 19, 19, 0, 0},
+ { 20, 20, 0, 0},
+ { 21, 21, 0, 0},
+ { 22, 22, 0, 0},
+ { 23, 23, 0, 0},
+ { 24, 24, 0, 0},
+ { 25, 25, 0, 0},
+ { 26, 26, 0, 0},
+ { 27, 27, 0, 0},
+ { 28, 28, 0, 0},
+ { 29, 5, 1, 0},
+ { 30, 6, 1, 0},
+ { 31, 7, 1, 0},
+ { 32, 8, 1, 0},
+ { 33, 9, 1, 0},
+ { 34, 10, 1, 0},
+ { 35, 11, 1, 0},
+ { 36, 12, 1, 0},
+ { 37, 13, 1, 0},
+ { 38, 14, 1, 0},
+ { 39, 15, 1, 0},
+ { 40, 16, 1, 0},
+ { 41, 17, 1, 0},
+ { 42, 18, 1, 0},
+ { 43, 19, 1, 0},
+ { 44, 20, 1, 0},
+ { 45, 21, 1, 0},
+ { 46, 22, 1, 0},
+ { 47, 23, 1, 0},
+ { 48, 24, 1, 0},
+ { 49, 25, 1, 0},
+ { 50, 26, 1, 0},
+ { 51, 27, 1, 0},
+ { 52, 28, 1, 0},
+ { 53, 29, 1, 0},
+ { 54, 30, 1, 0},
+ { 55, 31, 1, 0},
+ { 56, 32, 1, 0},
+ { 57, 33, 1, 0},
+ { 58, 34, 1, 0},
+ { 59, 35, 1, 0},
+ { 60, 36, 1, 0},
+ { 61, 37, 1, 0},
+ { 62, 38, 1, 0},
+ { 63, 39, 1, 0},
+ { 64, 40, 1, 0},
+ { 65, 41, 1, 0},
+ { 66, 42, 1, 0},
+ { 67, 43, 1, 0},
+ { 68, 44, 1, 0},
+ { 69, 45, 1, 0},
+ { 70, 46, 1, 0},
+ { 71, 47, 1, 0},
+ { 72, 48, 1, 0},
+ { 73, 49, 1, 0},
+ { 74, 50, 1, 0},
+ { 75, 51, 1, 0},
+ { 76, 52, 1, 0},
+ { 77, 53, 1, 0},
+ { 78, 54, 1, 0},
+ { 79, 55, 1, 0},
+ { 80, 56, 1, 0},
+ { 81, 57, 1, 0},
+ { 82, 58, 1, 0},
+ { 83, 40, 1, 1},
+ { 84, 41, 1, 1},
+ { 85, 42, 1, 1},
+ { 86, 43, 1, 1},
+ { 87, 44, 1, 1},
+ { 88, 45, 1, 1},
+ { 89, 46, 1, 1},
+ { 90, 47, 1, 1},
+ { 91, 48, 1, 1},
+ { 92, 49, 1, 1},
+ { 93, 50, 1, 1},
+ { 94, 51, 1, 1},
+ { 95, 52, 1, 1},
+ { 96, 53, 1, 1},
+ { 97, 54, 1, 1},
+ { 98, 55, 1, 1},
+ { 99, 56, 1, 1},
+ {100, 57, 1, 1},
+ {101, 58, 1, 1},
+ {102, 59, 1, 1},
+};
+
+/* 120 – 245 MHz band, lna 24dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_245[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 12, 0, 0},
+ { 13, 13, 0, 0},
+ { 14, 14, 0, 0},
+ { 15, 15, 0, 0},
+ { 16, 16, 0, 0},
+ { 17, 17, 0, 0},
+ { 18, 18, 0, 0},
+ { 19, 19, 0, 0},
+ { 20, 20, 0, 0},
+ { 21, 21, 0, 0},
+ { 22, 22, 0, 0},
+ { 23, 23, 0, 0},
+ { 24, 24, 0, 0},
+ { 25, 25, 0, 0},
+ { 26, 26, 0, 0},
+ { 27, 27, 0, 0},
+ { 28, 28, 0, 0},
+ { 29, 5, 1, 0},
+ { 30, 6, 1, 0},
+ { 31, 7, 1, 0},
+ { 32, 8, 1, 0},
+ { 33, 9, 1, 0},
+ { 34, 10, 1, 0},
+ { 35, 11, 1, 0},
+ { 36, 12, 1, 0},
+ { 37, 13, 1, 0},
+ { 38, 14, 1, 0},
+ { 39, 15, 1, 0},
+ { 40, 16, 1, 0},
+ { 41, 17, 1, 0},
+ { 42, 18, 1, 0},
+ { 43, 19, 1, 0},
+ { 44, 20, 1, 0},
+ { 45, 21, 1, 0},
+ { 46, 22, 1, 0},
+ { 47, 23, 1, 0},
+ { 48, 24, 1, 0},
+ { 49, 25, 1, 0},
+ { 50, 26, 1, 0},
+ { 51, 27, 1, 0},
+ { 52, 28, 1, 0},
+ { 53, 29, 1, 0},
+ { 54, 30, 1, 0},
+ { 55, 31, 1, 0},
+ { 56, 32, 1, 0},
+ { 57, 33, 1, 0},
+ { 58, 34, 1, 0},
+ { 59, 35, 1, 0},
+ { 60, 36, 1, 0},
+ { 61, 37, 1, 0},
+ { 62, 38, 1, 0},
+ { 63, 39, 1, 0},
+ { 64, 40, 1, 0},
+ { 65, 41, 1, 0},
+ { 66, 42, 1, 0},
+ { 67, 43, 1, 0},
+ { 68, 44, 1, 0},
+ { 69, 45, 1, 0},
+ { 70, 46, 1, 0},
+ { 71, 47, 1, 0},
+ { 72, 48, 1, 0},
+ { 73, 49, 1, 0},
+ { 74, 50, 1, 0},
+ { 75, 51, 1, 0},
+ { 76, 52, 1, 0},
+ { 77, 53, 1, 0},
+ { 78, 54, 1, 0},
+ { 79, 55, 1, 0},
+ { 80, 56, 1, 0},
+ { 81, 57, 1, 0},
+ { 82, 58, 1, 0},
+ { 83, 40, 1, 1},
+ { 84, 41, 1, 1},
+ { 85, 42, 1, 1},
+ { 86, 43, 1, 1},
+ { 87, 44, 1, 1},
+ { 88, 45, 1, 1},
+ { 89, 46, 1, 1},
+ { 90, 47, 1, 1},
+ { 91, 48, 1, 1},
+ { 92, 49, 1, 1},
+ { 93, 50, 1, 1},
+ { 94, 51, 1, 1},
+ { 95, 52, 1, 1},
+ { 96, 53, 1, 1},
+ { 97, 54, 1, 1},
+ { 98, 55, 1, 1},
+ { 99, 56, 1, 1},
+ {100, 57, 1, 1},
+ {101, 58, 1, 1},
+ {102, 59, 1, 1},
+};
+
+/* 420 – 1000 MHz band, lna 7dB, mixer 19dB */
+static const struct msi3101_gain msi3101_gain_lut_1000[] = {
+ { 0, 0, 0, 0},
+ { 1, 1, 0, 0},
+ { 2, 2, 0, 0},
+ { 3, 3, 0, 0},
+ { 4, 4, 0, 0},
+ { 5, 5, 0, 0},
+ { 6, 6, 0, 0},
+ { 7, 7, 0, 0},
+ { 8, 8, 0, 0},
+ { 9, 9, 0, 0},
+ { 10, 10, 0, 0},
+ { 11, 11, 0, 0},
+ { 12, 5, 1, 0},
+ { 13, 6, 1, 0},
+ { 14, 7, 1, 0},
+ { 15, 8, 1, 0},
+ { 16, 9, 1, 0},
+ { 17, 10, 1, 0},
+ { 18, 11, 1, 0},
+ { 19, 12, 1, 0},
+ { 20, 13, 1, 0},
+ { 21, 14, 1, 0},
+ { 22, 15, 1, 0},
+ { 23, 16, 1, 0},
+ { 24, 17, 1, 0},
+ { 25, 18, 1, 0},
+ { 26, 19, 1, 0},
+ { 27, 20, 1, 0},
+ { 28, 21, 1, 0},
+ { 29, 22, 1, 0},
+ { 30, 23, 1, 0},
+ { 31, 24, 1, 0},
+ { 32, 25, 1, 0},
+ { 33, 26, 1, 0},
+ { 34, 27, 1, 0},
+ { 35, 28, 1, 0},
+ { 36, 29, 1, 0},
+ { 37, 30, 1, 0},
+ { 38, 31, 1, 0},
+ { 39, 32, 1, 0},
+ { 40, 33, 1, 0},
+ { 41, 34, 1, 0},
+ { 42, 35, 1, 0},
+ { 43, 36, 1, 0},
+ { 44, 37, 1, 0},
+ { 45, 38, 1, 0},
+ { 46, 39, 1, 0},
+ { 47, 40, 1, 0},
+ { 48, 41, 1, 0},
+ { 49, 42, 1, 0},
+ { 50, 43, 1, 0},
+ { 51, 44, 1, 0},
+ { 52, 45, 1, 0},
+ { 53, 46, 1, 0},
+ { 54, 47, 1, 0},
+ { 55, 48, 1, 0},
+ { 56, 49, 1, 0},
+ { 57, 50, 1, 0},
+ { 58, 51, 1, 0},
+ { 59, 52, 1, 0},
+ { 60, 53, 1, 0},
+ { 61, 54, 1, 0},
+ { 62, 55, 1, 0},
+ { 63, 56, 1, 0},
+ { 64, 57, 1, 0},
+ { 65, 58, 1, 0},
+ { 66, 40, 1, 1},
+ { 67, 41, 1, 1},
+ { 68, 42, 1, 1},
+ { 69, 43, 1, 1},
+ { 70, 44, 1, 1},
+ { 71, 45, 1, 1},
+ { 72, 46, 1, 1},
+ { 73, 47, 1, 1},
+ { 74, 48, 1, 1},
+ { 75, 49, 1, 1},
+ { 76, 50, 1, 1},
+ { 77, 51, 1, 1},
+ { 78, 52, 1, 1},
+ { 79, 53, 1, 1},
+ { 80, 54, 1, 1},
+ { 81, 55, 1, 1},
+ { 82, 56, 1, 1},
+ { 83, 57, 1, 1},
+ { 84, 58, 1, 1},
+ { 85, 59, 1, 1},
+};
+
+/*
+ * iConfiguration 0
+ * bInterfaceNumber 0
+ * bAlternateSetting 1
+ * bNumEndpoints 1
+ * bEndpointAddress 0x81 EP 1 IN
+ * bmAttributes 1
+ * Transfer Type Isochronous
+ * wMaxPacketSize 0x1400 3x 1024 bytes
+ * bInterval 1
+ */
+#define MAX_ISO_BUFS (8)
+#define ISO_FRAMES_PER_DESC (8)
+#define ISO_MAX_FRAME_SIZE (3 * 1024)
+#define ISO_BUFFER_SIZE (ISO_FRAMES_PER_DESC * ISO_MAX_FRAME_SIZE)
+#define MAX_ISOC_ERRORS 20
+
+/* TODO: These should be moved to V4L2 API */
+#define MSI3101_CID_SAMPLING_MODE ((V4L2_CID_USER_BASE | 0xf000) + 0)
+#define MSI3101_CID_SAMPLING_RATE ((V4L2_CID_USER_BASE | 0xf000) + 1)
+#define MSI3101_CID_SAMPLING_RESOLUTION ((V4L2_CID_USER_BASE | 0xf000) + 2)
+#define MSI3101_CID_TUNER_RF ((V4L2_CID_USER_BASE | 0xf000) + 10)
+#define MSI3101_CID_TUNER_BW ((V4L2_CID_USER_BASE | 0xf000) + 11)
+#define MSI3101_CID_TUNER_IF ((V4L2_CID_USER_BASE | 0xf000) + 12)
+#define MSI3101_CID_TUNER_GAIN ((V4L2_CID_USER_BASE | 0xf000) + 13)
+
+/* intermediate buffers with raw data from the USB device */
+struct msi3101_frame_buf {
+ struct vb2_buffer vb; /* common v4l buffer stuff -- must be first */
+ struct list_head list;
+};
+
+struct msi3101_state {
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+
+ /* videobuf2 queue and queued buffers list */
+ struct vb2_queue vb_queue;
+ struct list_head queued_bufs;
+ spinlock_t queued_bufs_lock; /* Protects queued_bufs */
+
+ /* Note if taking both locks v4l2_lock must always be locked first! */
+ struct mutex v4l2_lock; /* Protects everything else */
+ struct mutex vb_queue_lock; /* Protects vb_queue and capt_file */
+
+ /* Pointer to our usb_device, will be NULL after unplug */
+ struct usb_device *udev; /* Both mutexes most be hold when setting! */
+
+ unsigned int isoc_errors; /* number of contiguous ISOC errors */
+ unsigned int vb_full; /* vb is full and packets dropped */
+
+ struct urb *urbs[MAX_ISO_BUFS];
+ int (*convert_stream) (struct msi3101_state *s, u32 *dst, u8 *src,
+ unsigned int src_len);
+
+ /* Controls */
+ struct v4l2_ctrl_handler ctrl_handler;
+ struct v4l2_ctrl *ctrl_sampling_rate;
+ struct v4l2_ctrl *ctrl_tuner_rf;
+ struct v4l2_ctrl *ctrl_tuner_bw;
+ struct v4l2_ctrl *ctrl_tuner_if;
+ struct v4l2_ctrl *ctrl_tuner_gain;
+
+ u32 next_sample; /* for track lost packets */
+ u32 sample; /* for sample rate calc */
+ unsigned long jiffies;
+ unsigned int sample_ctrl_bit[4];
+};
+
+/* Private functions */
+static struct msi3101_frame_buf *msi3101_get_next_fill_buf(
+ struct msi3101_state *s)
+{
+ unsigned long flags = 0;
+ struct msi3101_frame_buf *buf = NULL;
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ if (list_empty(&s->queued_bufs))
+ goto leave;
+
+ buf = list_entry(s->queued_bufs.next, struct msi3101_frame_buf, list);
+ list_del(&buf->list);
+leave:
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+ return buf;
+}
+
+/*
+ * +===========================================================================
+ * | 00-1023 | USB packet type '384'
+ * +===========================================================================
+ * | 00- 03 | sequence number of first sample in that USB packet
+ * +---------------------------------------------------------------------------
+ * | 04- 15 | garbage
+ * +---------------------------------------------------------------------------
+ * | 16- 175 | samples
+ * +---------------------------------------------------------------------------
+ * | 176- 179 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 180- 339 | samples
+ * +---------------------------------------------------------------------------
+ * | 340- 343 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 344- 503 | samples
+ * +---------------------------------------------------------------------------
+ * | 504- 507 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 508- 667 | samples
+ * +---------------------------------------------------------------------------
+ * | 668- 671 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 672- 831 | samples
+ * +---------------------------------------------------------------------------
+ * | 832- 835 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 836- 995 | samples
+ * +---------------------------------------------------------------------------
+ * | 996- 999 | control bits for previous samples
+ * +---------------------------------------------------------------------------
+ * | 1000-1023 | garbage
+ * +---------------------------------------------------------------------------
+ *
+ * Bytes 4 - 7 could have some meaning?
+ *
+ * Control bits for previous samples is 32-bit field, containing 16 x 2-bit
+ * numbers. This results one 2-bit number for 8 samples. It is likely used for
+ * for bit shifting sample by given bits, increasing actual sampling resolution.
+ * Number 2 (0b10) was never seen.
+ *
+ * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes
+ */
+
+/*
+ * Integer to 32-bit IEEE floating point representation routine is taken
+ * from Radeon R600 driver (drivers/gpu/drm/radeon/r600_blit_kms.c).
+ *
+ * TODO: Currently we do conversion here in Kernel, but in future that will
+ * be moved to the libv4l2 library as video format conversions are.
+ */
+#define I2F_FRAC_BITS 23
+#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
+
+/*
+ * Converts signed 8-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_504(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 7)) {
+ x = -x;
+ x &= 0x7f; /* result is 7 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_504(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 2) {
+ sample[0] = src[j + 0];
+ sample[1] = src[j + 1];
+
+ *dst++ = msi3101_convert_sample_504(s, sample[0]);
+ *dst++ = msi3101_convert_sample_504(s, sample[1]);
+ }
+ /* 504 x I+Q 32bit float samples */
+ dst_len += 504 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 504) */
+ s->next_sample = sample_num[i_max - 1] + 504;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed ~10+2-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_384(struct msi3101_state *s, u16 x, int shift)
+{
+ u32 msb, exponent, fraction, sign;
+ s->sample_ctrl_bit[shift]++;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ if (shift == 3)
+ shift = 2;
+
+ /* Convert 10-bit two's complement to 12-bit */
+ if (x & (1 << 9)) {
+ x |= ~0U << 10; /* set all the rest bits to one */
+ x <<= shift;
+ x = -x;
+ x &= 0x7ff; /* result is 11 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ x <<= shift;
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_384(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, k, l, i_max, dst_len = 0;
+ u16 sample[4];
+ u32 bits;
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%*ph %*ph\n", 12, &src[4], 24, &src[1000]);
+
+ src += 16;
+ for (j = 0; j < 6; j++) {
+ bits = src[160 + 3] << 24 | src[160 + 2] << 16 | src[160 + 1] << 8 | src[160 + 0] << 0;
+ for (k = 0; k < 16; k++) {
+ for (l = 0; l < 10; l += 5) {
+ sample[0] = (src[l + 0] & 0xff) >> 0 | (src[l + 1] & 0x03) << 8;
+ sample[1] = (src[l + 1] & 0xfc) >> 2 | (src[l + 2] & 0x0f) << 6;
+ sample[2] = (src[l + 2] & 0xf0) >> 4 | (src[l + 3] & 0x3f) << 4;
+ sample[3] = (src[l + 3] & 0xc0) >> 6 | (src[l + 4] & 0xff) << 2;
+
+ *dst++ = msi3101_convert_sample_384(s, sample[0], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[1], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[2], (bits >> (2 * k)) & 0x3);
+ *dst++ = msi3101_convert_sample_384(s, sample[3], (bits >> (2 * k)) & 0x3);
+ }
+ src += 10;
+ }
+ dev_dbg_ratelimited(&s->udev->dev,
+ "sample control bits %08x\n", bits);
+ src += 4;
+ }
+ /* 384 x I+Q 32bit float samples */
+ dst_len += 384 * 2 * 4;
+ src += 24;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu bits=%d.%d.%d.%d\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs,
+ s->sample_ctrl_bit[0], s->sample_ctrl_bit[1],
+ s->sample_ctrl_bit[2], s->sample_ctrl_bit[3]);
+ }
+
+ /* next sample (sample = sample + i * 384) */
+ s->next_sample = sample_num[i_max - 1] + 384;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed 12-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_336(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 11)) {
+ x = -x;
+ x &= 0x7ff; /* result is 11 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_336(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 3) {
+ sample[0] = (src[j + 0] & 0xff) >> 0 | (src[j + 1] & 0x0f) << 8;
+ sample[1] = (src[j + 1] & 0xf0) >> 4 | (src[j + 2] & 0xff) << 4;
+
+ *dst++ = msi3101_convert_sample_336(s, sample[0]);
+ *dst++ = msi3101_convert_sample_336(s, sample[1]);
+ }
+ /* 336 x I+Q 32bit float samples */
+ dst_len += 336 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 336) */
+ s->next_sample = sample_num[i_max - 1] + 336;
+
+ return dst_len;
+}
+
+/*
+ * Converts signed 14-bit integer into 32-bit IEEE floating point
+ * representation.
+ */
+static u32 msi3101_convert_sample_252(struct msi3101_state *s, u16 x)
+{
+ u32 msb, exponent, fraction, sign;
+
+ /* Zero is special */
+ if (!x)
+ return 0;
+
+ /* Negative / positive value */
+ if (x & (1 << 13)) {
+ x = -x;
+ x &= 0x1fff; /* result is 13 bit ... + sign */
+ sign = 1 << 31;
+ } else {
+ sign = 0 << 31;
+ }
+
+ /* Get location of the most significant bit */
+ msb = __fls(x);
+
+ fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
+ exponent = (127 + msb) << I2F_FRAC_BITS;
+
+ return (fraction + exponent) | sign;
+}
+
+static int msi3101_convert_stream_252(struct msi3101_state *s, u32 *dst,
+ u8 *src, unsigned int src_len)
+{
+ int i, j, i_max, dst_len = 0;
+ u16 sample[2];
+ u32 sample_num[3];
+
+ /* There could be 1-3 1024 bytes URB frames */
+ i_max = src_len / 1024;
+
+ for (i = 0; i < i_max; i++) {
+ sample_num[i] = src[3] << 24 | src[2] << 16 | src[1] << 8 | src[0] << 0;
+ if (i == 0 && s->next_sample != sample_num[0]) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "%d samples lost, %d %08x:%08x\n",
+ sample_num[0] - s->next_sample,
+ src_len, s->next_sample, sample_num[0]);
+ }
+
+ /*
+ * Dump all unknown 'garbage' data - maybe we will discover
+ * someday if there is something rational...
+ */
+ dev_dbg_ratelimited(&s->udev->dev, "%*ph\n", 12, &src[4]);
+
+ src += 16;
+ for (j = 0; j < 1008; j += 4) {
+ sample[0] = src[j + 0] >> 0 | src[j + 1] << 8;
+ sample[1] = src[j + 2] >> 0 | src[j + 3] << 8;
+
+ *dst++ = msi3101_convert_sample_252(s, sample[0]);
+ *dst++ = msi3101_convert_sample_252(s, sample[1]);
+ }
+ /* 252 x I+Q 32bit float samples */
+ dst_len += 252 * 2 * 4;
+ src += 1008;
+ }
+
+ /* calculate samping rate and output it in 10 seconds intervals */
+ if ((s->jiffies + msecs_to_jiffies(10000)) <= jiffies) {
+ unsigned long jiffies_now = jiffies;
+ unsigned long msecs = jiffies_to_msecs(jiffies_now) - jiffies_to_msecs(s->jiffies);
+ unsigned int samples = sample_num[i_max - 1] - s->sample;
+ s->jiffies = jiffies_now;
+ s->sample = sample_num[i_max - 1];
+ dev_dbg(&s->udev->dev,
+ "slen=%d samples=%u msecs=%lu sampling rate=%lu\n",
+ src_len, samples, msecs,
+ samples * 1000UL / msecs);
+ }
+
+ /* next sample (sample = sample + i * 252) */
+ s->next_sample = sample_num[i_max - 1] + 252;
+
+ return dst_len;
+}
+
+/*
+ * This gets called for the Isochronous pipe (stream). This is done in interrupt
+ * time, so it has to be fast, not crash, and not stall. Neat.
+ */
+static void msi3101_isoc_handler(struct urb *urb)
+{
+ struct msi3101_state *s = (struct msi3101_state *)urb->context;
+ int i, flen, fstatus;
+ unsigned char *iso_buf = NULL;
+ struct msi3101_frame_buf *fbuf;
+
+ if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
+ urb->status == -ESHUTDOWN) {
+ dev_dbg(&s->udev->dev, "URB (%p) unlinked %ssynchronuously\n",
+ urb, urb->status == -ENOENT ? "" : "a");
+ return;
+ }
+
+ if (urb->status != 0) {
+ dev_dbg(&s->udev->dev,
+ "msi3101_isoc_handler() called with status %d\n",
+ urb->status);
+ /* Give up after a number of contiguous errors */
+ if (++s->isoc_errors > MAX_ISOC_ERRORS)
+ dev_dbg(&s->udev->dev,
+ "Too many ISOC errors, bailing out\n");
+ goto handler_end;
+ } else {
+ /* Reset ISOC error counter. We did get here, after all. */
+ s->isoc_errors = 0;
+ }
+
+ /* Compact data */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ void *ptr;
+
+ /* Check frame error */
+ fstatus = urb->iso_frame_desc[i].status;
+ if (fstatus) {
+ dev_dbg_ratelimited(&s->udev->dev,
+ "frame=%d/%d has error %d skipping\n",
+ i, urb->number_of_packets, fstatus);
+ goto skip;
+ }
+
+ /* Check if that frame contains data */
+ flen = urb->iso_frame_desc[i].actual_length;
+ if (flen == 0)
+ goto skip;
+
+ iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
+
+ /* Get free framebuffer */
+ fbuf = msi3101_get_next_fill_buf(s);
+ if (fbuf == NULL) {
+ s->vb_full++;
+ dev_dbg_ratelimited(&s->udev->dev,
+ "videobuf is full, %d packets dropped\n",
+ s->vb_full);
+ goto skip;
+ }
+
+ /* fill framebuffer */
+ ptr = vb2_plane_vaddr(&fbuf->vb, 0);
+ flen = s->convert_stream(s, ptr, iso_buf, flen);
+ vb2_set_plane_payload(&fbuf->vb, 0, flen);
+ vb2_buffer_done(&fbuf->vb, VB2_BUF_STATE_DONE);
+skip:
+ ;
+ }
+
+handler_end:
+ i = usb_submit_urb(urb, GFP_ATOMIC);
+ if (i != 0)
+ dev_dbg(&s->udev->dev,
+ "Error (%d) re-submitting urb in msi3101_isoc_handler\n",
+ i);
+}
+
+static void msi3101_iso_stop(struct msi3101_state *s)
+{
+ int i;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ /* Unlinking ISOC buffers one by one */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ if (s->urbs[i]) {
+ dev_dbg(&s->udev->dev, "Unlinking URB %p\n",
+ s->urbs[i]);
+ usb_kill_urb(s->urbs[i]);
+ }
+ }
+}
+
+static void msi3101_iso_free(struct msi3101_state *s)
+{
+ int i;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ /* Freeing ISOC buffers one by one */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ if (s->urbs[i]) {
+ dev_dbg(&s->udev->dev, "Freeing URB\n");
+ if (s->urbs[i]->transfer_buffer) {
+ usb_free_coherent(s->udev,
+ s->urbs[i]->transfer_buffer_length,
+ s->urbs[i]->transfer_buffer,
+ s->urbs[i]->transfer_dma);
+ }
+ usb_free_urb(s->urbs[i]);
+ s->urbs[i] = NULL;
+ }
+ }
+}
+
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
+static void msi3101_isoc_cleanup(struct msi3101_state *s)
+{
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ msi3101_iso_stop(s);
+ msi3101_iso_free(s);
+}
+
+/* Both v4l2_lock and vb_queue_lock should be locked when calling this */
+static int msi3101_isoc_init(struct msi3101_state *s)
+{
+ struct usb_device *udev;
+ struct urb *urb;
+ int i, j, ret;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ s->isoc_errors = 0;
+ udev = s->udev;
+
+ ret = usb_set_interface(s->udev, 0, 1);
+ if (ret < 0)
+ return ret;
+
+ /* Allocate and init Isochronuous urbs */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ urb = usb_alloc_urb(ISO_FRAMES_PER_DESC, GFP_KERNEL);
+ if (urb == NULL) {
+ dev_err(&s->udev->dev,
+ "Failed to allocate urb %d\n", i);
+ msi3101_isoc_cleanup(s);
+ return -ENOMEM;
+ }
+ s->urbs[i] = urb;
+ dev_dbg(&s->udev->dev, "Allocated URB at 0x%p\n", urb);
+
+ urb->interval = 1;
+ urb->dev = udev;
+ urb->pipe = usb_rcvisocpipe(udev, 0x81);
+ urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_buffer = usb_alloc_coherent(udev, ISO_BUFFER_SIZE,
+ GFP_KERNEL, &urb->transfer_dma);
+ if (urb->transfer_buffer == NULL) {
+ dev_err(&s->udev->dev,
+ "Failed to allocate urb buffer %d\n",
+ i);
+ msi3101_isoc_cleanup(s);
+ return -ENOMEM;
+ }
+ urb->transfer_buffer_length = ISO_BUFFER_SIZE;
+ urb->complete = msi3101_isoc_handler;
+ urb->context = s;
+ urb->start_frame = 0;
+ urb->number_of_packets = ISO_FRAMES_PER_DESC;
+ for (j = 0; j < ISO_FRAMES_PER_DESC; j++) {
+ urb->iso_frame_desc[j].offset = j * ISO_MAX_FRAME_SIZE;
+ urb->iso_frame_desc[j].length = ISO_MAX_FRAME_SIZE;
+ }
+ }
+
+ /* link */
+ for (i = 0; i < MAX_ISO_BUFS; i++) {
+ ret = usb_submit_urb(s->urbs[i], GFP_KERNEL);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "isoc_init() submit_urb %d failed with error %d\n",
+ i, ret);
+ msi3101_isoc_cleanup(s);
+ return ret;
+ }
+ dev_dbg(&s->udev->dev, "URB 0x%p submitted.\n", s->urbs[i]);
+ }
+
+ /* All is done... */
+ return 0;
+}
+
+/* Must be called with vb_queue_lock hold */
+static void msi3101_cleanup_queued_bufs(struct msi3101_state *s)
+{
+ unsigned long flags = 0;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ while (!list_empty(&s->queued_bufs)) {
+ struct msi3101_frame_buf *buf;
+
+ buf = list_entry(s->queued_bufs.next, struct msi3101_frame_buf,
+ list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ }
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+/* The user yanked out the cable... */
+static void msi3101_disconnect(struct usb_interface *intf)
+{
+ struct v4l2_device *v = usb_get_intfdata(intf);
+ struct msi3101_state *s =
+ container_of(v, struct msi3101_state, v4l2_dev);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ mutex_lock(&s->vb_queue_lock);
+ mutex_lock(&s->v4l2_lock);
+ /* No need to keep the urbs around after disconnection */
+ s->udev = NULL;
+
+ v4l2_device_disconnect(&s->v4l2_dev);
+ video_unregister_device(&s->vdev);
+ mutex_unlock(&s->v4l2_lock);
+ mutex_unlock(&s->vb_queue_lock);
+
+ v4l2_device_put(&s->v4l2_dev);
+}
+
+static int msi3101_querycap(struct file *file, void *fh,
+ struct v4l2_capability *cap)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ strlcpy(cap->driver, KBUILD_MODNAME, sizeof(cap->driver));
+ strlcpy(cap->card, s->vdev.name, sizeof(cap->card));
+ usb_make_path(s->udev, cap->bus_info, sizeof(cap->bus_info));
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING |
+ V4L2_CAP_READWRITE;
+ cap->device_caps = V4L2_CAP_TUNER;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+ return 0;
+}
+
+
+/* Videobuf2 operations */
+static int msi3101_queue_setup(struct vb2_queue *vq,
+ const struct v4l2_format *fmt, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s: *nbuffers=%d\n", __func__, *nbuffers);
+
+ /* Absolute min and max number of buffers available for mmap() */
+ *nbuffers = 32;
+ *nplanes = 1;
+ sizes[0] = PAGE_ALIGN(3 * 3072); /* 3 * 768 * 4 */
+ dev_dbg(&s->udev->dev, "%s: nbuffers=%d sizes[0]=%d\n",
+ __func__, *nbuffers, sizes[0]);
+ return 0;
+}
+
+static int msi3101_buf_prepare(struct vb2_buffer *vb)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
+
+ /* Don't allow queing new buffers after device disconnection */
+ if (!s->udev)
+ return -ENODEV;
+
+ return 0;
+}
+
+static void msi3101_buf_queue(struct vb2_buffer *vb)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vb->vb2_queue);
+ struct msi3101_frame_buf *buf =
+ container_of(vb, struct msi3101_frame_buf, vb);
+ unsigned long flags = 0;
+
+ /* Check the device has not disconnected between prep and queuing */
+ if (!s->udev) {
+ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
+ return;
+ }
+
+ spin_lock_irqsave(&s->queued_bufs_lock, flags);
+ list_add_tail(&buf->list, &s->queued_bufs);
+ spin_unlock_irqrestore(&s->queued_bufs_lock, flags);
+}
+
+#define CMD_WREG 0x41
+#define CMD_START_STREAMING 0x43
+#define CMD_STOP_STREAMING 0x45
+#define CMD_READ_UNKNOW 0x48
+
+#define msi3101_dbg_usb_control_msg(udev, r, t, v, _i, b, l) { \
+ char *direction; \
+ if (t == (USB_TYPE_VENDOR | USB_DIR_OUT)) \
+ direction = ">>>"; \
+ else \
+ direction = "<<<"; \
+ dev_dbg(&udev->dev, "%s: %02x %02x %02x %02x %02x %02x %02x %02x " \
+ "%s %*ph\n", __func__, t, r, v & 0xff, v >> 8, \
+ _i & 0xff, _i >> 8, l & 0xff, l >> 8, direction, l, b); \
+}
+
+static int msi3101_ctrl_msg(struct msi3101_state *s, u8 cmd, u32 data)
+{
+ int ret;
+ u8 request = cmd;
+ u8 requesttype = USB_DIR_OUT | USB_TYPE_VENDOR;
+ u16 value = (data >> 0) & 0xffff;
+ u16 index = (data >> 16) & 0xffff;
+
+ msi3101_dbg_usb_control_msg(s->udev,
+ request, requesttype, value, index, NULL, 0);
+
+ ret = usb_control_msg(s->udev, usb_sndctrlpipe(s->udev, 0),
+ request, requesttype, value, index, NULL, 0, 2000);
+
+ if (ret)
+ dev_err(&s->udev->dev, "%s: failed %d, cmd %02x, data %04x\n",
+ __func__, ret, cmd, data);
+
+ return ret;
+};
+
+static int msi3101_tuner_write(struct msi3101_state *s, u32 data)
+{
+ return msi3101_ctrl_msg(s, CMD_WREG, data << 8 | 0x09);
+};
+
+#define F_REF 24000000
+#define DIV_R_IN 2
+static int msi3101_set_usb_adc(struct msi3101_state *s)
+{
+ int ret, div_n, div_m, div_r_out, f_sr, f_vco, fract;
+ u32 reg3, reg4, reg7;
+
+ f_sr = s->ctrl_sampling_rate->val64;
+
+ /* select stream format */
+ if (f_sr < 6000000) {
+ s->convert_stream = msi3101_convert_stream_252;
+ reg7 = 0x00009407;
+ } else if (f_sr < 8000000) {
+ s->convert_stream = msi3101_convert_stream_336;
+ reg7 = 0x00008507;
+ } else if (f_sr < 9000000) {
+ s->convert_stream = msi3101_convert_stream_384;
+ reg7 = 0x0000a507;
+ } else {
+ s->convert_stream = msi3101_convert_stream_504;
+ reg7 = 0x000c9407;
+ }
+
+ /*
+ * Synthesizer config is just a educated guess...
+ *
+ * [7:0] 0x03, register address
+ * [8] 1, always
+ * [9] ?
+ * [12:10] output divider
+ * [13] 0 ?
+ * [14] 0 ?
+ * [15] fractional MSB, bit 20
+ * [16:19] N
+ * [23:20] ?
+ * [24:31] 0x01
+ *
+ * output divider
+ * val div
+ * 0 - (invalid)
+ * 1 4
+ * 2 6
+ * 3 8
+ * 4 10
+ * 5 12
+ * 6 14
+ * 7 16
+ *
+ * VCO 202000000 - 720000000++
+ */
+ reg3 = 0x01000303;
+ reg4 = 0x00000004;
+
+ /* XXX: Filters? AGC? */
+ if (f_sr < 6000000)
+ reg3 |= 0x1 << 20;
+ else if (f_sr < 7000000)
+ reg3 |= 0x5 << 20;
+ else if (f_sr < 8500000)
+ reg3 |= 0x9 << 20;
+ else
+ reg3 |= 0xd << 20;
+
+ for (div_r_out = 4; div_r_out < 16; div_r_out += 2) {
+ f_vco = f_sr * div_r_out * 12;
+ dev_dbg(&s->udev->dev, "%s: div_r_out=%d f_vco=%d\n",
+ __func__, div_r_out, f_vco);
+ if (f_vco >= 202000000)
+ break;
+ }
+
+ div_n = f_vco / (F_REF * DIV_R_IN);
+ div_m = f_vco % (F_REF * DIV_R_IN);
+ fract = 0x200000ul * div_m / (F_REF * DIV_R_IN);
+
+ reg3 |= div_n << 16;
+ reg3 |= (div_r_out / 2 - 1) << 10;
+ reg3 |= ((fract >> 20) & 0x000001) << 15; /* [20] */
+ reg4 |= ((fract >> 0) & 0x0fffff) << 8; /* [19:0] */
+
+ dev_dbg(&s->udev->dev,
+ "%s: f_sr=%d f_vco=%d div_n=%d div_m=%d div_r_out=%d reg3=%08x reg4=%08x\n",
+ __func__, f_sr, f_vco, div_n, div_m, div_r_out, reg3, reg4);
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00608008);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00000c05);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00020000);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00480102);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, 0x00f38008);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg7);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg4);
+ if (ret)
+ goto err;
+
+ ret = msi3101_ctrl_msg(s, CMD_WREG, reg3);
+ if (ret)
+ goto err;
+err:
+ return ret;
+};
+
+static int msi3101_set_tuner(struct msi3101_state *s)
+{
+ int ret, i, len;
+ unsigned int n, m, thresh, frac, vco_step, tmp, f_if1;
+ u32 reg;
+ u64 f_vco, tmp64;
+ u8 mode, filter_mode, lo_div;
+ const struct msi3101_gain *gain_lut;
+ static const struct {
+ u32 rf;
+ u8 mode;
+ u8 lo_div;
+ } band_lut[] = {
+ { 50000000, 0xe1, 16}, /* AM_MODE2, antenna 2 */
+ {108000000, 0x42, 32}, /* VHF_MODE */
+ {330000000, 0x44, 16}, /* B3_MODE */
+ {960000000, 0x48, 4}, /* B45_MODE */
+ { ~0U, 0x50, 2}, /* BL_MODE */
+ };
+ static const struct {
+ u32 freq;
+ u8 filter_mode;
+ } if_freq_lut[] = {
+ { 0, 0x03}, /* Zero IF */
+ { 450000, 0x02}, /* 450 kHz IF */
+ {1620000, 0x01}, /* 1.62 MHz IF */
+ {2048000, 0x00}, /* 2.048 MHz IF */
+ };
+ static const struct {
+ u32 freq;
+ u8 val;
+ } bandwidth_lut[] = {
+ { 200000, 0x00}, /* 200 kHz */
+ { 300000, 0x01}, /* 300 kHz */
+ { 600000, 0x02}, /* 600 kHz */
+ {1536000, 0x03}, /* 1.536 MHz */
+ {5000000, 0x04}, /* 5 MHz */
+ {6000000, 0x05}, /* 6 MHz */
+ {7000000, 0x06}, /* 7 MHz */
+ {8000000, 0x07}, /* 8 MHz */
+ };
+
+ unsigned int f_rf = s->ctrl_tuner_rf->val64;
+
+ /*
+ * bandwidth (Hz)
+ * 200000, 300000, 600000, 1536000, 5000000, 6000000, 7000000, 8000000
+ */
+ unsigned int bandwidth = s->ctrl_tuner_bw->val;
+
+ /*
+ * intermediate frequency (Hz)
+ * 0, 450000, 1620000, 2048000
+ */
+ unsigned int f_if = s->ctrl_tuner_if->val;
+
+ /*
+ * gain reduction (dB)
+ * 0 - 102 below 420 MHz
+ * 0 - 85 above 420 MHz
+ */
+ int gain = s->ctrl_tuner_gain->val;
+
+ dev_dbg(&s->udev->dev,
+ "%s: f_rf=%d bandwidth=%d f_if=%d gain=%d\n",
+ __func__, f_rf, bandwidth, f_if, gain);
+
+ ret = -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(band_lut); i++) {
+ if (f_rf <= band_lut[i].rf) {
+ mode = band_lut[i].mode;
+ lo_div = band_lut[i].lo_div;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(band_lut))
+ goto err;
+
+ /* AM_MODE is upconverted */
+ if ((mode >> 0) & 0x1)
+ f_if1 = 5 * F_REF;
+ else
+ f_if1 = 0;
+
+ for (i = 0; i < ARRAY_SIZE(if_freq_lut); i++) {
+ if (f_if == if_freq_lut[i].freq) {
+ filter_mode = if_freq_lut[i].filter_mode;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(if_freq_lut))
+ goto err;
+
+ for (i = 0; i < ARRAY_SIZE(bandwidth_lut); i++) {
+ if (bandwidth == bandwidth_lut[i].freq) {
+ bandwidth = bandwidth_lut[i].val;
+ break;
+ }
+ }
+
+ if (i == ARRAY_SIZE(bandwidth_lut))
+ goto err;
+
+#define F_OUT_STEP 1
+#define R_REF 4
+ f_vco = (f_rf + f_if + f_if1) * lo_div;
+
+ tmp64 = f_vco;
+ m = do_div(tmp64, F_REF * R_REF);
+ n = (unsigned int) tmp64;
+
+ vco_step = F_OUT_STEP * lo_div;
+ thresh = (F_REF * R_REF) / vco_step;
+ frac = 1ul * thresh * m / (F_REF * R_REF);
+
+ /* Find out greatest common divisor and divide to smaller. */
+ tmp = gcd(thresh, frac);
+ thresh /= tmp;
+ frac /= tmp;
+
+ /* Force divide to reg max. Resolution will be reduced. */
+ tmp = DIV_ROUND_UP(thresh, 4095);
+ thresh = DIV_ROUND_CLOSEST(thresh, tmp);
+ frac = DIV_ROUND_CLOSEST(frac, tmp);
+
+ /* calc real RF set */
+ tmp = 1ul * F_REF * R_REF * n;
+ tmp += 1ul * F_REF * R_REF * frac / thresh;
+ tmp /= lo_div;
+
+ dev_dbg(&s->udev->dev,
+ "%s: rf=%u:%u n=%d thresh=%d frac=%d\n",
+ __func__, f_rf, tmp, n, thresh, frac);
+
+ ret = msi3101_tuner_write(s, 0x00000e);
+ if (ret)
+ goto err;
+
+ ret = msi3101_tuner_write(s, 0x000003);
+ if (ret)
+ goto err;
+
+ reg = 0 << 0;
+ reg |= mode << 4;
+ reg |= filter_mode << 12;
+ reg |= bandwidth << 14;
+ reg |= 0x02 << 17;
+ reg |= 0x00 << 20;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 5 << 0;
+ reg |= thresh << 4;
+ reg |= 1 << 19;
+ reg |= 1 << 21;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 2 << 0;
+ reg |= frac << 4;
+ reg |= n << 16;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ if (f_rf < 120000000) {
+ gain_lut = msi3101_gain_lut_120;
+ len = ARRAY_SIZE(msi3101_gain_lut_120);
+ } else if (f_rf < 245000000) {
+ gain_lut = msi3101_gain_lut_245;
+ len = ARRAY_SIZE(msi3101_gain_lut_120);
+ } else {
+ gain_lut = msi3101_gain_lut_1000;
+ len = ARRAY_SIZE(msi3101_gain_lut_1000);
+ }
+
+ for (i = 0; i < len; i++) {
+ if (gain_lut[i].tot >= gain)
+ break;
+ }
+
+ if (i == len)
+ goto err;
+
+ dev_dbg(&s->udev->dev,
+ "%s: gain tot=%d baseband=%d lna=%d mixer=%d\n",
+ __func__, gain_lut[i].tot, gain_lut[i].baseband,
+ gain_lut[i].lna, gain_lut[i].mixer);
+
+ reg = 1 << 0;
+ reg |= gain_lut[i].baseband << 4;
+ reg |= 0 << 10;
+ reg |= gain_lut[i].mixer << 12;
+ reg |= gain_lut[i].lna << 13;
+ reg |= 4 << 14;
+ reg |= 0 << 17;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ reg = 6 << 0;
+ reg |= 63 << 4;
+ reg |= 4095 << 10;
+ ret = msi3101_tuner_write(s, reg);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ dev_dbg(&s->udev->dev, "%s: failed %d\n", __func__, ret);
+ return ret;
+};
+
+static int msi3101_start_streaming(struct vb2_queue *vq, unsigned int count)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ int ret;
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (!s->udev)
+ return -ENODEV;
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ ret = msi3101_set_usb_adc(s);
+
+ ret = msi3101_isoc_init(s);
+ if (ret)
+ msi3101_cleanup_queued_bufs(s);
+
+ ret = msi3101_ctrl_msg(s, CMD_START_STREAMING, 0);
+
+ mutex_unlock(&s->v4l2_lock);
+
+ return ret;
+}
+
+static int msi3101_stop_streaming(struct vb2_queue *vq)
+{
+ struct msi3101_state *s = vb2_get_drv_priv(vq);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ if (mutex_lock_interruptible(&s->v4l2_lock))
+ return -ERESTARTSYS;
+
+ if (s->udev)
+ msi3101_isoc_cleanup(s);
+
+ msi3101_cleanup_queued_bufs(s);
+
+ /* according to tests, at least 700us delay is required */
+ msleep(20);
+ msi3101_ctrl_msg(s, CMD_STOP_STREAMING, 0);
+
+ mutex_unlock(&s->v4l2_lock);
+
+ return 0;
+}
+
+static struct vb2_ops msi3101_vb2_ops = {
+ .queue_setup = msi3101_queue_setup,
+ .buf_prepare = msi3101_buf_prepare,
+ .buf_queue = msi3101_buf_queue,
+ .start_streaming = msi3101_start_streaming,
+ .stop_streaming = msi3101_stop_streaming,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+};
+
+static int msi3101_enum_input(struct file *file, void *fh, struct v4l2_input *i)
+{
+ if (i->index != 0)
+ return -EINVAL;
+
+ strlcpy(i->name, "SDR data", sizeof(i->name));
+ i->type = V4L2_INPUT_TYPE_CAMERA;
+
+ return 0;
+}
+
+static int msi3101_g_input(struct file *file, void *fh, unsigned int *i)
+{
+ *i = 0;
+
+ return 0;
+}
+
+static int msi3101_s_input(struct file *file, void *fh, unsigned int i)
+{
+ return i ? -EINVAL : 0;
+}
+
+static int vidioc_s_tuner(struct file *file, void *priv,
+ const struct v4l2_tuner *v)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ return 0;
+}
+
+static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *v)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s:\n", __func__);
+
+ strcpy(v->name, "SDR RX");
+ v->capability = V4L2_TUNER_CAP_LOW;
+
+ return 0;
+}
+
+static int vidioc_s_frequency(struct file *file, void *priv,
+ const struct v4l2_frequency *f)
+{
+ struct msi3101_state *s = video_drvdata(file);
+ dev_dbg(&s->udev->dev, "%s: frequency=%lu Hz (%u)\n",
+ __func__, f->frequency * 625UL / 10UL, f->frequency);
+
+ return v4l2_ctrl_s_ctrl_int64(s->ctrl_tuner_rf,
+ f->frequency * 625UL / 10UL);
+}
+
+const struct v4l2_ioctl_ops msi3101_ioctl_ops = {
+ .vidioc_querycap = msi3101_querycap,
+
+ .vidioc_enum_input = msi3101_enum_input,
+ .vidioc_g_input = msi3101_g_input,
+ .vidioc_s_input = msi3101_s_input,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+
+ .vidioc_streamon = vb2_ioctl_streamon,
+ .vidioc_streamoff = vb2_ioctl_streamoff,
+
+ .vidioc_g_tuner = vidioc_g_tuner,
+ .vidioc_s_tuner = vidioc_s_tuner,
+ .vidioc_s_frequency = vidioc_s_frequency,
+
+ .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
+ .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
+ .vidioc_log_status = v4l2_ctrl_log_status,
+};
+
+static const struct v4l2_file_operations msi3101_fops = {
+ .owner = THIS_MODULE,
+ .open = v4l2_fh_open,
+ .release = vb2_fop_release,
+ .read = vb2_fop_read,
+ .poll = vb2_fop_poll,
+ .mmap = vb2_fop_mmap,
+ .unlocked_ioctl = video_ioctl2,
+};
+
+static struct video_device msi3101_template = {
+ .name = "Mirics MSi3101 SDR Dongle",
+ .release = video_device_release_empty,
+ .fops = &msi3101_fops,
+ .ioctl_ops = &msi3101_ioctl_ops,
+};
+
+static int msi3101_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct msi3101_state *s =
+ container_of(ctrl->handler, struct msi3101_state,
+ ctrl_handler);
+ int ret;
+ dev_dbg(&s->udev->dev,
+ "%s: id=%d name=%s val=%d min=%d max=%d step=%d\n",
+ __func__, ctrl->id, ctrl->name, ctrl->val,
+ ctrl->minimum, ctrl->maximum, ctrl->step);
+
+ switch (ctrl->id) {
+ case MSI3101_CID_SAMPLING_MODE:
+ case MSI3101_CID_SAMPLING_RATE:
+ case MSI3101_CID_SAMPLING_RESOLUTION:
+ ret = 0;
+ break;
+ case MSI3101_CID_TUNER_RF:
+ case MSI3101_CID_TUNER_BW:
+ case MSI3101_CID_TUNER_IF:
+ case MSI3101_CID_TUNER_GAIN:
+ ret = msi3101_set_tuner(s);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops msi3101_ctrl_ops = {
+ .s_ctrl = msi3101_s_ctrl,
+};
+
+static void msi3101_video_release(struct v4l2_device *v)
+{
+ struct msi3101_state *s =
+ container_of(v, struct msi3101_state, v4l2_dev);
+
+ v4l2_ctrl_handler_free(&s->ctrl_handler);
+ v4l2_device_unregister(&s->v4l2_dev);
+ kfree(s);
+}
+
+static int msi3101_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+ struct msi3101_state *s = NULL;
+ int ret;
+ static const char * const ctrl_sampling_mode_qmenu_strings[] = {
+ "Quadrature Sampling",
+ NULL,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_mode = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_MODE,
+ .type = V4L2_CTRL_TYPE_MENU,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Sampling Mode",
+ .qmenu = ctrl_sampling_mode_qmenu_strings,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_rate = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_RATE,
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .name = "Sampling Rate",
+ .min = 500000,
+ .max = 12000000,
+ .def = 2048000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_sampling_resolution = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_SAMPLING_RESOLUTION,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Sampling Resolution",
+ .min = 10,
+ .max = 10,
+ .def = 10,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_rf = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_RF,
+ .type = V4L2_CTRL_TYPE_INTEGER64,
+ .name = "Tuner RF",
+ .min = 40000000,
+ .max = 2000000000,
+ .def = 100000000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_bw = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_BW,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Tuner BW",
+ .min = 200000,
+ .max = 8000000,
+ .def = 600000,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_if = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_IF,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .flags = V4L2_CTRL_FLAG_INACTIVE,
+ .name = "Tuner IF",
+ .min = 0,
+ .max = 2048000,
+ .def = 0,
+ .step = 1,
+ };
+ static const struct v4l2_ctrl_config ctrl_tuner_gain = {
+ .ops = &msi3101_ctrl_ops,
+ .id = MSI3101_CID_TUNER_GAIN,
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .name = "Tuner Gain",
+ .min = 0,
+ .max = 102,
+ .def = 0,
+ .step = 1,
+ };
+
+ s = kzalloc(sizeof(struct msi3101_state), GFP_KERNEL);
+ if (s == NULL) {
+ pr_err("Could not allocate memory for msi3101_state\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&s->v4l2_lock);
+ mutex_init(&s->vb_queue_lock);
+ spin_lock_init(&s->queued_bufs_lock);
+ INIT_LIST_HEAD(&s->queued_bufs);
+
+ s->udev = udev;
+
+ /* Init videobuf2 queue structure */
+ s->vb_queue.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+ s->vb_queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
+ s->vb_queue.drv_priv = s;
+ s->vb_queue.buf_struct_size = sizeof(struct msi3101_frame_buf);
+ s->vb_queue.ops = &msi3101_vb2_ops;
+ s->vb_queue.mem_ops = &vb2_vmalloc_memops;
+ s->vb_queue.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ ret = vb2_queue_init(&s->vb_queue);
+ if (ret < 0) {
+ dev_err(&s->udev->dev, "Could not initialize vb2 queue\n");
+ goto err_free_mem;
+ }
+
+ /* Init video_device structure */
+ s->vdev = msi3101_template;
+ s->vdev.queue = &s->vb_queue;
+ s->vdev.queue->lock = &s->vb_queue_lock;
+ set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev.flags);
+ video_set_drvdata(&s->vdev, s);
+
+ /* Register controls */
+ v4l2_ctrl_handler_init(&s->ctrl_handler, 7);
+ v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_mode, NULL);
+ s->ctrl_sampling_rate = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_rate, NULL);
+ v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_sampling_resolution, NULL);
+ s->ctrl_tuner_rf = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_rf, NULL);
+ s->ctrl_tuner_bw = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_bw, NULL);
+ s->ctrl_tuner_if = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_if, NULL);
+ s->ctrl_tuner_gain = v4l2_ctrl_new_custom(&s->ctrl_handler, &ctrl_tuner_gain, NULL);
+ if (s->ctrl_handler.error) {
+ ret = s->ctrl_handler.error;
+ dev_err(&s->udev->dev, "Could not initialize controls\n");
+ goto err_free_controls;
+ }
+
+ /* Register the v4l2_device structure */
+ s->v4l2_dev.release = msi3101_video_release;
+ ret = v4l2_device_register(&intf->dev, &s->v4l2_dev);
+ if (ret) {
+ dev_err(&s->udev->dev,
+ "Failed to register v4l2-device (%d)\n", ret);
+ goto err_free_controls;
+ }
+
+ s->v4l2_dev.ctrl_handler = &s->ctrl_handler;
+ s->vdev.v4l2_dev = &s->v4l2_dev;
+ s->vdev.lock = &s->v4l2_lock;
+
+ ret = video_register_device(&s->vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(&s->udev->dev,
+ "Failed to register as video device (%d)\n",
+ ret);
+ goto err_unregister_v4l2_dev;
+ }
+ dev_info(&s->udev->dev, "Registered as %s\n",
+ video_device_node_name(&s->vdev));
+
+ return 0;
+
+err_unregister_v4l2_dev:
+ v4l2_device_unregister(&s->v4l2_dev);
+err_free_controls:
+ v4l2_ctrl_handler_free(&s->ctrl_handler);
+err_free_mem:
+ kfree(s);
+ return ret;
+}
+
+/* USB device ID list */
+static struct usb_device_id msi3101_id_table[] = {
+ { USB_DEVICE(0x1df7, 0x2500) }, /* Mirics MSi3101 SDR Dongle */
+ { USB_DEVICE(0x2040, 0xd300) }, /* Hauppauge WinTV 133559 LF */
+ { }
+};
+MODULE_DEVICE_TABLE(usb, msi3101_id_table);
+
+/* USB subsystem interface */
+static struct usb_driver msi3101_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = msi3101_probe,
+ .disconnect = msi3101_disconnect,
+ .id_table = msi3101_id_table,
+};
+
+module_usb_driver(msi3101_driver);
+
+MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
+MODULE_DESCRIPTION("Mirics MSi3101 SDR Dongle");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 10393da315d..5a5c6397e74 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -750,8 +750,6 @@ static void tegra_init_i2c_slave(struct nvec_chip *nvec)
writel(0, nvec->base + I2C_SL_ADDR2);
enable_irq(nvec->irq);
-
- clk_disable_unprepare(nvec->i2c_clk);
}
#ifdef CONFIG_PM_SLEEP
@@ -872,9 +870,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
tegra_init_i2c_slave(nvec);
- clk_prepare_enable(i2c_clk);
-
-
/* enable event reporting */
nvec_toggle_global_events(nvec, true);
diff --git a/drivers/staging/octeon-usb/Kconfig b/drivers/staging/octeon-usb/Kconfig
index 018af6db08c..16ea17ff3fd 100644
--- a/drivers/staging/octeon-usb/Kconfig
+++ b/drivers/staging/octeon-usb/Kconfig
@@ -1,6 +1,6 @@
config OCTEON_USB
tristate "Cavium Networks Octeon USB support"
- depends on CPU_CAVIUM_OCTEON && USB
+ depends on CAVIUM_OCTEON_SOC && USB
help
This driver supports USB host controller on some Cavium
Networks' products in the Octeon family.
diff --git a/drivers/staging/octeon-usb/cvmx-usb.c b/drivers/staging/octeon-usb/cvmx-usb.c
index bf366495fdd..d7b3c82b5ea 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.c
+++ b/drivers/staging/octeon-usb/cvmx-usb.c
@@ -46,8 +46,6 @@
* systems. These functions provide a generic API to the Octeon
* USB blocks, hiding the internal hardware specific
* operations.
- *
- * <hr>$Revision: 32636 $<hr>
*/
#include <linux/delay.h>
#include <asm/octeon/cvmx.h>
@@ -68,30 +66,27 @@
#define CVMX_PREFETCH_PREF0(address, offset) CVMX_PREFETCH_PREFX(0, address, offset)
#define CVMX_CLZ(result, input) asm ("clz %[rd],%[rs]" : [rd] "=d" (result) : [rs] "d" (input))
-#define cvmx_likely likely
-#define cvmx_wait_usec udelay
-#define cvmx_unlikely unlikely
-#define cvmx_le16_to_cpu le16_to_cpu
-
-#define MAX_RETRIES 3 /* Maximum number of times to retry failed transactions */
-#define MAX_PIPES 32 /* Maximum number of pipes that can be open at once */
-#define MAX_TRANSACTIONS 256 /* Maximum number of outstanding transactions across all pipes */
-#define MAX_CHANNELS 8 /* Maximum number of hardware channels supported by the USB block */
-#define MAX_USB_ADDRESS 127 /* The highest valid USB device address */
-#define MAX_USB_ENDPOINT 15 /* The highest valid USB endpoint number */
-#define MAX_USB_HUB_PORT 15 /* The highest valid port number on a hub */
-#define MAX_TRANSFER_BYTES ((1<<19)-1) /* The low level hardware can transfer a maximum of this number of bytes in each transfer. The field is 19 bits wide */
-#define MAX_TRANSFER_PACKETS ((1<<10)-1) /* The low level hardware can transfer a maximum of this number of packets in each transfer. The field is 10 bits wide */
-
-/* These defines disable the normal read and write csr. This is so I can add
- extra debug stuff to the usb specific version and I won't use the normal
- version by mistake */
+#define MAX_RETRIES 3 /* Maximum number of times to retry failed transactions */
+#define MAX_PIPES 32 /* Maximum number of pipes that can be open at once */
+#define MAX_TRANSACTIONS 256 /* Maximum number of outstanding transactions across all pipes */
+#define MAX_CHANNELS 8 /* Maximum number of hardware channels supported by the USB block */
+#define MAX_USB_ADDRESS 127 /* The highest valid USB device address */
+#define MAX_USB_ENDPOINT 15 /* The highest valid USB endpoint number */
+#define MAX_USB_HUB_PORT 15 /* The highest valid port number on a hub */
+#define MAX_TRANSFER_BYTES ((1<<19)-1) /* The low level hardware can transfer a maximum of this number of bytes in each transfer. The field is 19 bits wide */
+#define MAX_TRANSFER_PACKETS ((1<<10)-1) /* The low level hardware can transfer a maximum of this number of packets in each transfer. The field is 10 bits wide */
+
+/*
+ * These defines disable the normal read and write csr. This is so I can add
+ * extra debug stuff to the usb specific version and I won't use the normal
+ * version by mistake
+ */
#define cvmx_read_csr use_cvmx_usb_read_csr64_instead_of_cvmx_read_csr
#define cvmx_write_csr use_cvmx_usb_write_csr64_instead_of_cvmx_write_csr
-typedef enum {
- __CVMX_USB_TRANSACTION_FLAGS_IN_USE = 1<<16,
-} cvmx_usb_transaction_flags_t;
+enum cvmx_usb_transaction_flags {
+ __CVMX_USB_TRANSACTION_FLAGS_IN_USE = 1<<16,
+};
enum {
USB_CLOCK_TYPE_REF_12,
@@ -108,167 +103,208 @@ enum {
* the NAK handler can backup to the previous low level
* transaction with a simple clearing of bit 0.
*/
-typedef enum {
- CVMX_USB_STAGE_NON_CONTROL,
- CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
- CVMX_USB_STAGE_SETUP,
- CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
- CVMX_USB_STAGE_DATA,
- CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
- CVMX_USB_STAGE_STATUS,
- CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
-} cvmx_usb_stage_t;
+enum cvmx_usb_stage {
+ CVMX_USB_STAGE_NON_CONTROL,
+ CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_SETUP,
+ CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_DATA,
+ CVMX_USB_STAGE_DATA_SPLIT_COMPLETE,
+ CVMX_USB_STAGE_STATUS,
+ CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE,
+};
+
+/**
+ * struct cvmx_usb_transaction - describes each pending USB transaction
+ * regardless of type. These are linked together
+ * to form a list of pending requests for a pipe.
+ *
+ * @prev: Transaction before this one in the pipe.
+ * @next: Transaction after this one in the pipe.
+ * @type: Type of transaction, duplicated of the pipe.
+ * @flags: State flags for this transaction.
+ * @buffer: User's physical buffer address to read/write.
+ * @buffer_length: Size of the user's buffer in bytes.
+ * @control_header: For control transactions, physical address of the 8
+ * byte standard header.
+ * @iso_start_frame: For ISO transactions, the starting frame number.
+ * @iso_number_packets: For ISO transactions, the number of packets in the
+ * request.
+ * @iso_packets: For ISO transactions, the sub packets in the request.
+ * @actual_bytes: Actual bytes transfer for this transaction.
+ * @stage: For control transactions, the current stage.
+ * @callback: User's callback function when complete.
+ * @callback_data: User's data.
+ */
+struct cvmx_usb_transaction {
+ struct cvmx_usb_transaction *prev;
+ struct cvmx_usb_transaction *next;
+ enum cvmx_usb_transfer type;
+ enum cvmx_usb_transaction_flags flags;
+ uint64_t buffer;
+ int buffer_length;
+ uint64_t control_header;
+ int iso_start_frame;
+ int iso_number_packets;
+ struct cvmx_usb_iso_packet *iso_packets;
+ int xfersize;
+ int pktcnt;
+ int retries;
+ int actual_bytes;
+ enum cvmx_usb_stage stage;
+ cvmx_usb_callback_func_t callback;
+ void *callback_data;
+};
/**
- * This structure describes each pending USB transaction
- * regardless of type. These are linked together to form a list
- * of pending requests for a pipe.
+ * struct cvmx_usb_pipe - a pipe represents a virtual connection between Octeon
+ * and some USB device. It contains a list of pending
+ * request to the device.
+ *
+ * @prev: Pipe before this one in the list
+ * @next: Pipe after this one in the list
+ * @head: The first pending transaction
+ * @tail: The last pending transaction
+ * @interval: For periodic pipes, the interval between packets in
+ * frames
+ * @next_tx_frame: The next frame this pipe is allowed to transmit on
+ * @flags: State flags for this pipe
+ * @device_speed: Speed of device connected to this pipe
+ * @transfer_type: Type of transaction supported by this pipe
+ * @transfer_dir: IN or OUT. Ignored for Control
+ * @multi_count: Max packet in a row for the device
+ * @max_packet: The device's maximum packet size in bytes
+ * @device_addr: USB device address at other end of pipe
+ * @endpoint_num: USB endpoint number at other end of pipe
+ * @hub_device_addr: Hub address this device is connected to
+ * @hub_port: Hub port this device is connected to
+ * @pid_toggle: This toggles between 0/1 on every packet send to track
+ * the data pid needed
+ * @channel: Hardware DMA channel for this pipe
+ * @split_sc_frame: The low order bits of the frame number the split
+ * complete should be sent on
*/
-typedef struct cvmx_usb_transaction {
- struct cvmx_usb_transaction *prev; /**< Transaction before this one in the pipe */
- struct cvmx_usb_transaction *next; /**< Transaction after this one in the pipe */
- cvmx_usb_transfer_t type; /**< Type of transaction, duplicated of the pipe */
- cvmx_usb_transaction_flags_t flags; /**< State flags for this transaction */
- uint64_t buffer; /**< User's physical buffer address to read/write */
- int buffer_length; /**< Size of the user's buffer in bytes */
- uint64_t control_header; /**< For control transactions, physical address of the 8 byte standard header */
- int iso_start_frame; /**< For ISO transactions, the starting frame number */
- int iso_number_packets; /**< For ISO transactions, the number of packets in the request */
- cvmx_usb_iso_packet_t *iso_packets; /**< For ISO transactions, the sub packets in the request */
- int xfersize;
- int pktcnt;
- int retries;
- int actual_bytes; /**< Actual bytes transfer for this transaction */
- cvmx_usb_stage_t stage; /**< For control transactions, the current stage */
- cvmx_usb_callback_func_t callback; /**< User's callback function when complete */
- void *callback_data; /**< User's data */
-} cvmx_usb_transaction_t;
+struct cvmx_usb_pipe {
+ struct cvmx_usb_pipe *prev;
+ struct cvmx_usb_pipe *next;
+ struct cvmx_usb_transaction *head;
+ struct cvmx_usb_transaction *tail;
+ uint64_t interval;
+ uint64_t next_tx_frame;
+ enum cvmx_usb_pipe_flags flags;
+ enum cvmx_usb_speed device_speed;
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_direction transfer_dir;
+ int multi_count;
+ uint16_t max_packet;
+ uint8_t device_addr;
+ uint8_t endpoint_num;
+ uint8_t hub_device_addr;
+ uint8_t hub_port;
+ uint8_t pid_toggle;
+ uint8_t channel;
+ int8_t split_sc_frame;
+};
/**
- * A pipe represents a virtual connection between Octeon and some
- * USB device. It contains a list of pending request to the device.
+ * struct cvmx_usb_pipe_list
+ *
+ * @head: Head of the list, or NULL if empty.
+ * @tail: Tail if the list, or NULL if empty.
*/
-typedef struct cvmx_usb_pipe {
- struct cvmx_usb_pipe *prev; /**< Pipe before this one in the list */
- struct cvmx_usb_pipe *next; /**< Pipe after this one in the list */
- cvmx_usb_transaction_t *head; /**< The first pending transaction */
- cvmx_usb_transaction_t *tail; /**< The last pending transaction */
- uint64_t interval; /**< For periodic pipes, the interval between packets in frames */
- uint64_t next_tx_frame; /**< The next frame this pipe is allowed to transmit on */
- cvmx_usb_pipe_flags_t flags; /**< State flags for this pipe */
- cvmx_usb_speed_t device_speed; /**< Speed of device connected to this pipe */
- cvmx_usb_transfer_t transfer_type; /**< Type of transaction supported by this pipe */
- cvmx_usb_direction_t transfer_dir; /**< IN or OUT. Ignored for Control */
- int multi_count; /**< Max packet in a row for the device */
- uint16_t max_packet; /**< The device's maximum packet size in bytes */
- uint8_t device_addr; /**< USB device address at other end of pipe */
- uint8_t endpoint_num; /**< USB endpoint number at other end of pipe */
- uint8_t hub_device_addr; /**< Hub address this device is connected to */
- uint8_t hub_port; /**< Hub port this device is connected to */
- uint8_t pid_toggle; /**< This toggles between 0/1 on every packet send to track the data pid needed */
- uint8_t channel; /**< Hardware DMA channel for this pipe */
- int8_t split_sc_frame; /**< The low order bits of the frame number the split complete should be sent on */
-} cvmx_usb_pipe_t;
-
-typedef struct {
- cvmx_usb_pipe_t *head; /**< Head of the list, or NULL if empty */
- cvmx_usb_pipe_t *tail; /**< Tail if the list, or NULL if empty */
-} cvmx_usb_pipe_list_t;
-
-typedef struct {
- struct {
- int channel;
- int size;
- uint64_t address;
- } entry[MAX_CHANNELS+1];
- int head;
- int tail;
-} cvmx_usb_tx_fifo_t;
+struct cvmx_usb_pipe_list {
+ struct cvmx_usb_pipe *head;
+ struct cvmx_usb_pipe *tail;
+};
+
+struct cvmx_usb_tx_fifo {
+ struct {
+ int channel;
+ int size;
+ uint64_t address;
+ } entry[MAX_CHANNELS+1];
+ int head;
+ int tail;
+};
/**
- * The state of the USB block is stored in this structure
+ * struct cvmx_usb_internal_state - the state of the USB block
+ *
+ * init_flags: Flags passed to initialize.
+ * index: Which USB block this is for.
+ * idle_hardware_channels: Bit set for every idle hardware channel.
+ * usbcx_hprt: Stored port status so we don't need to read a CSR to
+ * determine splits.
+ * pipe_for_channel: Map channels to pipes.
+ * free_transaction_head: List of free transactions head.
+ * free_transaction_tail: List of free transactions tail.
+ * pipe: Storage for pipes.
+ * transaction: Storage for transactions.
+ * callback: User global callbacks.
+ * callback_data: User data for each callback.
+ * indent: Used by debug output to indent functions.
+ * port_status: Last port status used for change notification.
+ * free_pipes: List of all pipes that are currently closed.
+ * idle_pipes: List of open pipes that have no transactions.
+ * active_pipes: Active pipes indexed by transfer type.
+ * frame_number: Increments every SOF interrupt for time keeping.
+ * active_split: Points to the current active split, or NULL.
*/
-typedef struct {
- int init_flags; /**< Flags passed to initialize */
- int index; /**< Which USB block this is for */
- int idle_hardware_channels; /**< Bit set for every idle hardware channel */
- cvmx_usbcx_hprt_t usbcx_hprt; /**< Stored port status so we don't need to read a CSR to determine splits */
- cvmx_usb_pipe_t *pipe_for_channel[MAX_CHANNELS]; /**< Map channels to pipes */
- cvmx_usb_transaction_t *free_transaction_head; /**< List of free transactions head */
- cvmx_usb_transaction_t *free_transaction_tail; /**< List of free transactions tail */
- cvmx_usb_pipe_t pipe[MAX_PIPES]; /**< Storage for pipes */
- cvmx_usb_transaction_t transaction[MAX_TRANSACTIONS]; /**< Storage for transactions */
- cvmx_usb_callback_func_t callback[__CVMX_USB_CALLBACK_END]; /**< User global callbacks */
- void *callback_data[__CVMX_USB_CALLBACK_END]; /**< User data for each callback */
- int indent; /**< Used by debug output to indent functions */
- cvmx_usb_port_status_t port_status; /**< Last port status used for change notification */
- cvmx_usb_pipe_list_t free_pipes; /**< List of all pipes that are currently closed */
- cvmx_usb_pipe_list_t idle_pipes; /**< List of open pipes that have no transactions */
- cvmx_usb_pipe_list_t active_pipes[4]; /**< Active pipes indexed by transfer type */
- uint64_t frame_number; /**< Increments every SOF interrupt for time keeping */
- cvmx_usb_transaction_t *active_split; /**< Points to the current active split, or NULL */
- cvmx_usb_tx_fifo_t periodic;
- cvmx_usb_tx_fifo_t nonperiodic;
-} cvmx_usb_internal_state_t;
-
-/* This macro logs out whenever a function is called if debugging is on */
-#define CVMX_USB_LOG_CALLED() \
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
- cvmx_dprintf("%*s%s: called\n", 2*usb->indent++, "", __FUNCTION__);
-
-/* This macro logs out each function parameter if debugging is on */
-#define CVMX_USB_LOG_PARAM(format, param) \
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
- cvmx_dprintf("%*s%s: param %s = " format "\n", 2*usb->indent, "", __FUNCTION__, #param, param);
-
-/* This macro logs out when a function returns a value */
-#define CVMX_USB_RETURN(v) \
- do { \
- typeof(v) r = v; \
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
- cvmx_dprintf("%*s%s: returned %s(%d)\n", 2*--usb->indent, "", __FUNCTION__, #v, r); \
- return r; \
- } while (0);
-
-/* This macro logs out when a function doesn't return a value */
-#define CVMX_USB_RETURN_NOTHING() \
- do { \
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS)) \
- cvmx_dprintf("%*s%s: returned\n", 2*--usb->indent, "", __FUNCTION__); \
- return; \
- } while (0);
+struct cvmx_usb_internal_state {
+ int init_flags;
+ int index;
+ int idle_hardware_channels;
+ union cvmx_usbcx_hprt usbcx_hprt;
+ struct cvmx_usb_pipe *pipe_for_channel[MAX_CHANNELS];
+ struct cvmx_usb_transaction *free_transaction_head;
+ struct cvmx_usb_transaction *free_transaction_tail;
+ struct cvmx_usb_pipe pipe[MAX_PIPES];
+ struct cvmx_usb_transaction transaction[MAX_TRANSACTIONS];
+ cvmx_usb_callback_func_t callback[__CVMX_USB_CALLBACK_END];
+ void *callback_data[__CVMX_USB_CALLBACK_END];
+ int indent;
+ struct cvmx_usb_port_status port_status;
+ struct cvmx_usb_pipe_list free_pipes;
+ struct cvmx_usb_pipe_list idle_pipes;
+ struct cvmx_usb_pipe_list active_pipes[4];
+ uint64_t frame_number;
+ struct cvmx_usb_transaction *active_split;
+ struct cvmx_usb_tx_fifo periodic;
+ struct cvmx_usb_tx_fifo nonperiodic;
+};
/* This macro spins on a field waiting for it to reach a value */
#define CVMX_WAIT_FOR_FIELD32(address, type, field, op, value, timeout_usec)\
- ({int result; \
- do { \
- uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
- octeon_get_clock_rate() / 1000000; \
- type c; \
- while (1) \
- { \
- c.u32 = __cvmx_usb_read_csr32(usb, address); \
- if (c.s.field op (value)) { \
- result = 0; \
- break; \
- } else if (cvmx_get_cycle() > done) { \
- result = -1; \
- break; \
- } else \
- cvmx_wait(100); \
- } \
- } while (0); \
- result;})
-
-/* This macro logically sets a single field in a CSR. It does the sequence
- read, modify, and write */
-#define USB_SET_FIELD32(address, type, field, value)\
- do { \
- type c; \
- c.u32 = __cvmx_usb_read_csr32(usb, address);\
- c.s.field = value; \
- __cvmx_usb_write_csr32(usb, address, c.u32);\
- } while (0)
+ ({int result; \
+ do { \
+ uint64_t done = cvmx_get_cycle() + (uint64_t)timeout_usec * \
+ octeon_get_clock_rate() / 1000000; \
+ type c; \
+ while (1) { \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ if (c.s.field op (value)) { \
+ result = 0; \
+ break; \
+ } else if (cvmx_get_cycle() > done) { \
+ result = -1; \
+ break; \
+ } else \
+ cvmx_wait(100); \
+ } \
+ } while (0); \
+ result; })
+
+/*
+ * This macro logically sets a single field in a CSR. It does the sequence
+ * read, modify, and write
+ */
+#define USB_SET_FIELD32(address, type, field, value) \
+ do { \
+ type c; \
+ c.u32 = __cvmx_usb_read_csr32(usb, address); \
+ c.s.field = value; \
+ __cvmx_usb_write_csr32(usb, address, c.u32); \
+ } while (0)
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
@@ -280,145 +316,106 @@ static int octeon_usb_get_clock_type(void)
case CVMX_BOARD_TYPE_LANAI2_A:
case CVMX_BOARD_TYPE_LANAI2_U:
case CVMX_BOARD_TYPE_LANAI2_G:
+ case CVMX_BOARD_TYPE_UBNT_E100:
return USB_CLOCK_TYPE_CRYSTAL_12;
}
-
- /* FIXME: This should use CVMX_BOARD_TYPE_UBNT_E100 */
- if (OCTEON_IS_MODEL(OCTEON_CN50XX) &&
- cvmx_sysinfo_get()->board_type == 20002)
- return USB_CLOCK_TYPE_CRYSTAL_12;
-
return USB_CLOCK_TYPE_REF_48;
}
/**
- * @INTERNAL
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
* debugging is on.
*
- * @param usb USB block this access is for
- * @param address 64bit address to read
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
*
- * @return Result of the read
+ * Returns: Result of the read
*/
-static inline uint32_t __cvmx_usb_read_csr32(cvmx_usb_internal_state_t *usb,
- uint64_t address)
+static inline uint32_t __cvmx_usb_read_csr32(struct cvmx_usb_internal_state *usb,
+ uint64_t address)
{
- uint32_t result = cvmx_read64_uint32(address ^ 4);
- return result;
+ uint32_t result = cvmx_read64_uint32(address ^ 4);
+ return result;
}
/**
- * @INTERNAL
* Write a USB 32bit CSR. It performs the necessary address
* swizzle for 32bit CSRs and logs the value in a readable format
* if debugging is on.
*
- * @param usb USB block this access is for
- * @param address 64bit address to write
- * @param value Value to write
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
*/
-static inline void __cvmx_usb_write_csr32(cvmx_usb_internal_state_t *usb,
- uint64_t address, uint32_t value)
+static inline void __cvmx_usb_write_csr32(struct cvmx_usb_internal_state *usb,
+ uint64_t address, uint32_t value)
{
- cvmx_write64_uint32(address ^ 4, value);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ cvmx_write64_uint32(address ^ 4, value);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
}
/**
- * @INTERNAL
* Read a USB 64bit CSR. It logs the value in a readable format if
* debugging is on.
*
- * @param usb USB block this access is for
- * @param address 64bit address to read
+ * @usb: USB block this access is for
+ * @address: 64bit address to read
*
- * @return Result of the read
+ * Returns: Result of the read
*/
-static inline uint64_t __cvmx_usb_read_csr64(cvmx_usb_internal_state_t *usb,
- uint64_t address)
+static inline uint64_t __cvmx_usb_read_csr64(struct cvmx_usb_internal_state *usb,
+ uint64_t address)
{
- uint64_t result = cvmx_read64_uint64(address);
- return result;
+ uint64_t result = cvmx_read64_uint64(address);
+ return result;
}
/**
- * @INTERNAL
* Write a USB 64bit CSR. It logs the value in a readable format
* if debugging is on.
*
- * @param usb USB block this access is for
- * @param address 64bit address to write
- * @param value Value to write
- */
-static inline void __cvmx_usb_write_csr64(cvmx_usb_internal_state_t *usb,
- uint64_t address, uint64_t value)
-{
- cvmx_write64_uint64(address, value);
-}
-
-
-/**
- * @INTERNAL
- * Utility function to convert complete codes into strings
- *
- * @param complete_code
- * Code to convert
- *
- * @return Human readable string
+ * @usb: USB block this access is for
+ * @address: 64bit address to write
+ * @value: Value to write
*/
-static const char *__cvmx_usb_complete_to_string(cvmx_usb_complete_t complete_code)
+static inline void __cvmx_usb_write_csr64(struct cvmx_usb_internal_state *usb,
+ uint64_t address, uint64_t value)
{
- switch (complete_code)
- {
- case CVMX_USB_COMPLETE_SUCCESS: return "SUCCESS";
- case CVMX_USB_COMPLETE_SHORT: return "SHORT";
- case CVMX_USB_COMPLETE_CANCEL: return "CANCEL";
- case CVMX_USB_COMPLETE_ERROR: return "ERROR";
- case CVMX_USB_COMPLETE_STALL: return "STALL";
- case CVMX_USB_COMPLETE_XACTERR: return "XACTERR";
- case CVMX_USB_COMPLETE_DATATGLERR: return "DATATGLERR";
- case CVMX_USB_COMPLETE_BABBLEERR: return "BABBLEERR";
- case CVMX_USB_COMPLETE_FRAMEERR: return "FRAMEERR";
- }
- return "Update __cvmx_usb_complete_to_string";
+ cvmx_write64_uint64(address, value);
}
-
/**
- * @INTERNAL
* Return non zero if this pipe connects to a non HIGH speed
* device through a high speed hub.
*
- * @param usb USB block this access is for
- * @param pipe Pipe to check
+ * @usb: USB block this access is for
+ * @pipe: Pipe to check
*
- * @return Non zero if we need to do split transactions
+ * Returns: Non zero if we need to do split transactions
*/
-static inline int __cvmx_usb_pipe_needs_split(cvmx_usb_internal_state_t *usb, cvmx_usb_pipe_t *pipe)
+static inline int __cvmx_usb_pipe_needs_split(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe *pipe)
{
- return ((pipe->device_speed != CVMX_USB_SPEED_HIGH) && (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH));
+ return ((pipe->device_speed != CVMX_USB_SPEED_HIGH) && (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH));
}
/**
- * @INTERNAL
* Trivial utility function to return the correct PID for a pipe
*
- * @param pipe pipe to check
+ * @pipe: pipe to check
*
- * @return PID for pipe
+ * Returns: PID for pipe
*/
-static inline int __cvmx_usb_get_data_pid(cvmx_usb_pipe_t *pipe)
+static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
{
- if (pipe->pid_toggle)
- return 2; /* Data1 */
- else
- return 0; /* Data0 */
+ if (pipe->pid_toggle)
+ return 2; /* Data1 */
+ else
+ return 0; /* Data0 */
}
@@ -428,127 +425,119 @@ static inline int __cvmx_usb_get_data_pid(cvmx_usb_pipe_t *pipe)
* by this API, a zero will be returned. Most Octeon chips
* support one usb port, but some support two ports.
* cvmx_usb_initialize() must be called on independent
- * cvmx_usb_state_t structures.
+ * struct cvmx_usb_state.
*
- * @return Number of port, zero if usb isn't supported
+ * Returns: Number of port, zero if usb isn't supported
*/
int cvmx_usb_get_num_ports(void)
{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
+ int arch_ports = 0;
+
+ if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+ arch_ports = 2;
+ else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ arch_ports = 1;
+ else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
+ arch_ports = 1;
+ else
+ arch_ports = 0;
+
+ return arch_ports;
}
/**
- * @INTERNAL
* Allocate a usb transaction for use
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return Transaction or NULL
+ * Returns: Transaction or NULL
*/
-static inline cvmx_usb_transaction_t *__cvmx_usb_alloc_transaction(cvmx_usb_internal_state_t *usb)
+static inline struct cvmx_usb_transaction *__cvmx_usb_alloc_transaction(struct cvmx_usb_internal_state *usb)
{
- cvmx_usb_transaction_t *t;
- t = usb->free_transaction_head;
- if (t) {
- usb->free_transaction_head = t->next;
- if (!usb->free_transaction_head)
- usb->free_transaction_tail = NULL;
- }
- else if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: Failed to allocate a transaction\n", __FUNCTION__);
- if (t) {
- memset(t, 0, sizeof(*t));
- t->flags = __CVMX_USB_TRANSACTION_FLAGS_IN_USE;
- }
- return t;
+ struct cvmx_usb_transaction *t;
+ t = usb->free_transaction_head;
+ if (t) {
+ usb->free_transaction_head = t->next;
+ if (!usb->free_transaction_head)
+ usb->free_transaction_tail = NULL;
+ }
+ if (t) {
+ memset(t, 0, sizeof(*t));
+ t->flags = __CVMX_USB_TRANSACTION_FLAGS_IN_USE;
+ }
+ return t;
}
/**
- * @INTERNAL
* Free a usb transaction
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param transaction
- * Transaction to free
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @transaction:
+ * Transaction to free
*/
-static inline void __cvmx_usb_free_transaction(cvmx_usb_internal_state_t *usb,
- cvmx_usb_transaction_t *transaction)
+static inline void __cvmx_usb_free_transaction(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_transaction *transaction)
{
- transaction->flags = 0;
- transaction->prev = NULL;
- transaction->next = NULL;
- if (usb->free_transaction_tail)
- usb->free_transaction_tail->next = transaction;
- else
- usb->free_transaction_head = transaction;
- usb->free_transaction_tail = transaction;
+ transaction->flags = 0;
+ transaction->prev = NULL;
+ transaction->next = NULL;
+ if (usb->free_transaction_tail)
+ usb->free_transaction_tail->next = transaction;
+ else
+ usb->free_transaction_head = transaction;
+ usb->free_transaction_tail = transaction;
}
/**
- * @INTERNAL
* Add a pipe to the tail of a list
- * @param list List to add pipe to
- * @param pipe Pipe to add
+ * @list: List to add pipe to
+ * @pipe: Pipe to add
*/
-static inline void __cvmx_usb_append_pipe(cvmx_usb_pipe_list_t *list, cvmx_usb_pipe_t *pipe)
+static inline void __cvmx_usb_append_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
{
- pipe->next = NULL;
- pipe->prev = list->tail;
- if (list->tail)
- list->tail->next = pipe;
- else
- list->head = pipe;
- list->tail = pipe;
+ pipe->next = NULL;
+ pipe->prev = list->tail;
+ if (list->tail)
+ list->tail->next = pipe;
+ else
+ list->head = pipe;
+ list->tail = pipe;
}
/**
- * @INTERNAL
* Remove a pipe from a list
- * @param list List to remove pipe from
- * @param pipe Pipe to remove
+ * @list: List to remove pipe from
+ * @pipe: Pipe to remove
*/
-static inline void __cvmx_usb_remove_pipe(cvmx_usb_pipe_list_t *list, cvmx_usb_pipe_t *pipe)
+static inline void __cvmx_usb_remove_pipe(struct cvmx_usb_pipe_list *list, struct cvmx_usb_pipe *pipe)
{
- if (list->head == pipe) {
- list->head = pipe->next;
- pipe->next = NULL;
- if (list->head)
- list->head->prev = NULL;
- else
- list->tail = NULL;
- }
- else if (list->tail == pipe) {
- list->tail = pipe->prev;
- list->tail->next = NULL;
- pipe->prev = NULL;
- }
- else {
- pipe->prev->next = pipe->next;
- pipe->next->prev = pipe->prev;
- pipe->prev = NULL;
- pipe->next = NULL;
- }
+ if (list->head == pipe) {
+ list->head = pipe->next;
+ pipe->next = NULL;
+ if (list->head)
+ list->head->prev = NULL;
+ else
+ list->tail = NULL;
+ } else if (list->tail == pipe) {
+ list->tail = pipe->prev;
+ list->tail->next = NULL;
+ pipe->prev = NULL;
+ } else {
+ pipe->prev->next = pipe->next;
+ pipe->next->prev = pipe->prev;
+ pipe->prev = NULL;
+ pipe->next = NULL;
+ }
}
@@ -557,302 +546,332 @@ static inline void __cvmx_usb_remove_pipe(cvmx_usb_pipe_list_t *list, cvmx_usb_p
* other access to the Octeon USB port is made. The port starts
* off in the disabled state.
*
- * @param state Pointer to an empty cvmx_usb_state_t structure
- * that will be populated by the initialize call.
- * This structure is then passed to all other USB
- * functions.
- * @param usb_port_number
- * Which Octeon USB port to initialize.
- * @param flags Flags to control hardware initialization. See
- * cvmx_usb_initialize_flags_t for the flag
- * definitions. Some flags are mandatory.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * @state: Pointer to an empty struct cvmx_usb_state
+ * that will be populated by the initialize call.
+ * This structure is then passed to all other USB
+ * functions.
+ * @usb_port_number:
+ * Which Octeon USB port to initialize.
+ * @flags: Flags to control hardware initialization. See
+ * enum cvmx_usb_initialize_flags for the flag
+ * definitions. Some flags are mandatory.
+ *
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_initialize(cvmx_usb_state_t *state,
- int usb_port_number,
- cvmx_usb_initialize_flags_t flags)
+int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
- cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
- cvmx_usbnx_usbp_ctl_status_t usbn_usbp_ctl_status;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- usb->init_flags = flags;
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", usb_port_number);
- CVMX_USB_LOG_PARAM("0x%x", flags);
-
- /* Make sure that state is large enough to store the internal state */
- if (sizeof(*state) < sizeof(*usb))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- /* At first allow 0-1 for the usb port number */
- if ((usb_port_number < 0) || (usb_port_number > 1))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- /* Try to determine clock type automatically */
- if ((flags & (CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI |
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0) {
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
- else
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
- }
-
- if (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /* Check for auto ref clock frequency */
- if (!(flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- break;
- }
- }
-
- memset(usb, 0, sizeof(usb));
- usb->init_flags = flags;
-
- /* Initialize the USB state structure */
- {
- int i;
- usb->index = usb_port_number;
-
- /* Initialize the transaction double linked list */
- usb->free_transaction_head = NULL;
- usb->free_transaction_tail = NULL;
- for (i=0; i<MAX_TRANSACTIONS; i++)
- __cvmx_usb_free_transaction(usb, usb->transaction + i);
- for (i=0; i<MAX_PIPES; i++)
- __cvmx_usb_append_pipe(&usb->free_pipes, usb->pipe + i);
- }
-
- /* Power On Reset and PHY Initialization */
-
- /* 1. Wait for DCOK to assert (nothing to do) */
- /* 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
- USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0 */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hrst = 0;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hclk_rst = 0;
- usbn_clk_ctl.s.enable = 0;
- /* 2b. Select the USB reference clock/crystal parameters by writing
- appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON] */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
- /* The USB port uses 12/24/48MHz 2.5V board clock
- source at USB_XO. USB_XI should be tied to GND.
- Most Octeon evaluation boards require this setting */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.cn31xx.p_xenbn = 0;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
- usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
- else
- usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
-
- switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
- usbn_clk_ctl.s.p_c_sel = 0;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
- usbn_clk_ctl.s.p_c_sel = 1;
- break;
- case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
- usbn_clk_ctl.s.p_c_sel = 2;
- break;
- }
- }
- else {
- /* The USB port uses a 12MHz crystal as clock source
- at USB_XO and USB_XI */
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
- usbn_clk_ctl.cn31xx.p_xenbn = 1;
- }
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
- usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
- else
- usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
-
- usbn_clk_ctl.s.p_c_sel = 0;
- }
- /* 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
- setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down such
- that USB is as close as possible to 125Mhz */
- {
- int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
- if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
- divisor = 4;
- usbn_clk_ctl.s.divide = divisor;
- usbn_clk_ctl.s.divide2 = 0;
- }
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
- usbn_clk_ctl.s.hclk_rst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
- cvmx_wait(64);
- /* 3. Program the power-on reset field in the USBN clock-control register:
- USBN_CLK_CTL[POR] = 0 */
- usbn_clk_ctl.s.por = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 4. Wait 1 ms for PHY clock to start */
- cvmx_wait_usec(1000);
- /* 5. Program the Reset input from automatic test equipment field in the
- USBP control and status register: USBN_USBP_CTL_STATUS[ATE_RESET] = 1 */
- usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
- usbn_usbp_ctl_status.s.ate_reset = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 6. Wait 10 cycles */
- cvmx_wait(10);
- /* 7. Clear ATE_RESET field in the USBN clock-control register:
- USBN_USBP_CTL_STATUS[ATE_RESET] = 0 */
- usbn_usbp_ctl_status.s.ate_reset = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 8. Program the PHY reset field in the USBN clock-control register:
- USBN_CLK_CTL[PRST] = 1 */
- usbn_clk_ctl.s.prst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 9. Program the USBP control and status register to select host or
- device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
- device */
- usbn_usbp_ctl_status.s.hst_mode = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
- usbn_usbp_ctl_status.u64);
- /* 10. Wait 1 us */
- cvmx_wait_usec(1);
- /* 11. Program the hreset_n field in the USBN clock-control register:
- USBN_CLK_CTL[HRST] = 1 */
- usbn_clk_ctl.s.hrst = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- /* 12. Proceed to USB core initialization */
- usbn_clk_ctl.s.enable = 1;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- cvmx_wait_usec(1);
-
- /* USB Core Initialization */
-
- /* 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
- determine USB core configuration parameters. */
- /* Nothing needed */
- /* 2. Program the following fields in the global AHB configuration
- register (USBC_GAHBCFG)
- DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
- Burst length, USBC_GAHBCFG[HBSTLEN] = 0
- Nonperiodic TxFIFO empty level (slave mode only),
- USBC_GAHBCFG[NPTXFEMPLVL]
- Periodic TxFIFO empty level (slave mode only),
- USBC_GAHBCFG[PTXFEMPLVL]
- Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1 */
- {
- cvmx_usbcx_gahbcfg_t usbcx_gahbcfg;
- /* Due to an errata, CN31XX doesn't support DMA */
- if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
- usbcx_gahbcfg.u32 = 0;
- usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usb->idle_hardware_channels = 0x1; /* Only use one channel with non DMA */
- else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
- usb->idle_hardware_channels = 0xf7; /* CN5XXX have an errata with channel 3 */
- else
- usb->idle_hardware_channels = 0xff;
- usbcx_gahbcfg.s.hbstlen = 0;
- usbcx_gahbcfg.s.nptxfemplvl = 1;
- usbcx_gahbcfg.s.ptxfemplvl = 1;
- usbcx_gahbcfg.s.glblintrmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
- usbcx_gahbcfg.u32);
- }
- /* 3. Program the following fields in USBC_GUSBCFG register.
- HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
- ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
- USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
- PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0 */
- {
- cvmx_usbcx_gusbcfg_t usbcx_gusbcfg;
- usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
- usbcx_gusbcfg.s.toutcal = 0;
- usbcx_gusbcfg.s.ddrsel = 0;
- usbcx_gusbcfg.s.usbtrdtim = 0x5;
- usbcx_gusbcfg.s.phylpwrclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
- usbcx_gusbcfg.u32);
- }
- /* 4. The software must unmask the following bits in the USBC_GINTMSK
- register.
- OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
- Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1 */
- {
- cvmx_usbcx_gintmsk_t usbcx_gintmsk;
- int channel;
-
- usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
- usbcx_gintmsk.s.otgintmsk = 1;
- usbcx_gintmsk.s.modemismsk = 1;
- usbcx_gintmsk.s.hchintmsk = 1;
- usbcx_gintmsk.s.sofmsk = 0;
- /* We need RX FIFO interrupts if we don't have DMA */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- usbcx_gintmsk.s.rxflvlmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
- usbcx_gintmsk.u32);
-
- /* Disable all channel interrupts. We'll enable them per channel later */
- for (channel=0; channel<8; channel++)
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- }
-
- {
- /* Host Port Initialization */
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: USB%d is in host mode\n", __FUNCTION__, usb->index);
-
- /* 1. Program the host-port interrupt-mask field to unmask,
- USBC_GINTMSK[PRTINT] = 1 */
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t,
- prtintmsk, 1);
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t,
- disconnintmsk, 1);
- /* 2. Program the USBC_HCFG register to select full-speed host or
- high-speed host. */
- {
- cvmx_usbcx_hcfg_t usbcx_hcfg;
- usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
- usbcx_hcfg.s.fslssupp = 0;
- usbcx_hcfg.s.fslspclksel = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
- }
- /* 3. Program the port power bit to drive VBUS on the USB,
- USBC_HPRT[PRTPWR] = 1 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtpwr, 1);
-
- /* Steps 4-15 from the manual are done later in the port enable */
- }
-
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ usb->init_flags = flags;
+
+ /* Make sure that state is large enough to store the internal state */
+ if (sizeof(*state) < sizeof(*usb))
+ return -EINVAL;
+ /* At first allow 0-1 for the usb port number */
+ if ((usb_port_number < 0) || (usb_port_number > 1))
+ return -EINVAL;
+ /* For all chips except 52XX there is only one port */
+ if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
+ return -EINVAL;
+ /* Try to determine clock type automatically */
+ if ((flags & (CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI |
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND)) == 0) {
+ if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12)
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI; /* Only 12 MHZ crystals are supported */
+ else
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+ }
+
+ if (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /* Check for auto ref clock frequency */
+ if (!(flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK))
+ switch (octeon_usb_get_clock_type()) {
+ case USB_CLOCK_TYPE_REF_12:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_24:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case USB_CLOCK_TYPE_REF_48:
+ flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+
+ memset(usb, 0, sizeof(usb));
+ usb->init_flags = flags;
+
+ /* Initialize the USB state structure */
+ {
+ int i;
+ usb->index = usb_port_number;
+
+ /* Initialize the transaction double linked list */
+ usb->free_transaction_head = NULL;
+ usb->free_transaction_tail = NULL;
+ for (i = 0; i < MAX_TRANSACTIONS; i++)
+ __cvmx_usb_free_transaction(usb, usb->transaction + i);
+ for (i = 0; i < MAX_PIPES; i++)
+ __cvmx_usb_append_pipe(&usb->free_pipes, usb->pipe + i);
+ }
+
+ /*
+ * Power On Reset and PHY Initialization
+ *
+ * 1. Wait for DCOK to assert (nothing to do)
+ *
+ * 2a. Write USBN0/1_CLK_CTL[POR] = 1 and
+ * USBN0/1_CLK_CTL[HRST,PRST,HCLK_RST] = 0
+ */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hrst = 0;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hclk_rst = 0;
+ usbn_clk_ctl.s.enable = 0;
+ /*
+ * 2b. Select the USB reference clock/crystal parameters by writing
+ * appropriate values to USBN0/1_CLK_CTL[P_C_SEL, P_RTYPE, P_COM_ON]
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND) {
+ /*
+ * The USB port uses 12/24/48MHz 2.5V board clock
+ * source at USB_XO. USB_XI should be tied to GND.
+ * Most Octeon evaluation boards require this setting
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 0;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 2; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 1; /* From CN52XX manual */
+
+ switch (flags & CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK) {
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ:
+ usbn_clk_ctl.s.p_c_sel = 0;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ:
+ usbn_clk_ctl.s.p_c_sel = 1;
+ break;
+ case CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ:
+ usbn_clk_ctl.s.p_c_sel = 2;
+ break;
+ }
+ } else {
+ /*
+ * The USB port uses a 12MHz crystal as clock source
+ * at USB_XO and USB_XI
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
+ usbn_clk_ctl.cn31xx.p_rclk = 1; /* From CN31XX,CN30XX manual */
+ usbn_clk_ctl.cn31xx.p_xenbn = 1;
+ } else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN50XX))
+ usbn_clk_ctl.cn56xx.p_rtype = 0; /* From CN56XX,CN50XX manual */
+ else
+ usbn_clk_ctl.cn52xx.p_rtype = 0; /* From CN52XX manual */
+
+ usbn_clk_ctl.s.p_c_sel = 0;
+ }
+ /*
+ * 2c. Select the HCLK via writing USBN0/1_CLK_CTL[DIVIDE, DIVIDE2] and
+ * setting USBN0/1_CLK_CTL[ENABLE] = 1. Divide the core clock down
+ * such that USB is as close as possible to 125Mhz
+ */
+ {
+ int divisor = (octeon_get_clock_rate()+125000000-1)/125000000;
+ if (divisor < 4) /* Lower than 4 doesn't seem to work properly */
+ divisor = 4;
+ usbn_clk_ctl.s.divide = divisor;
+ usbn_clk_ctl.s.divide2 = 0;
+ }
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2d. Write USBN0/1_CLK_CTL[HCLK_RST] = 1 */
+ usbn_clk_ctl.s.hclk_rst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 2e. Wait 64 core-clock cycles for HCLK to stabilize */
+ cvmx_wait(64);
+ /*
+ * 3. Program the power-on reset field in the USBN clock-control
+ * register:
+ * USBN_CLK_CTL[POR] = 0
+ */
+ usbn_clk_ctl.s.por = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 4. Wait 1 ms for PHY clock to start */
+ mdelay(1);
+ /*
+ * 5. Program the Reset input from automatic test equipment field in the
+ * USBP control and status register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 1
+ */
+ usbn_usbp_ctl_status.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index));
+ usbn_usbp_ctl_status.s.ate_reset = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 6. Wait 10 cycles */
+ cvmx_wait(10);
+ /*
+ * 7. Clear ATE_RESET field in the USBN clock-control register:
+ * USBN_USBP_CTL_STATUS[ATE_RESET] = 0
+ */
+ usbn_usbp_ctl_status.s.ate_reset = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /*
+ * 8. Program the PHY reset field in the USBN clock-control register:
+ * USBN_CLK_CTL[PRST] = 1
+ */
+ usbn_clk_ctl.s.prst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /*
+ * 9. Program the USBP control and status register to select host or
+ * device mode. USBN_USBP_CTL_STATUS[HST_MODE] = 0 for host, = 1 for
+ * device
+ */
+ usbn_usbp_ctl_status.s.hst_mode = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_USBP_CTL_STATUS(usb->index),
+ usbn_usbp_ctl_status.u64);
+ /* 10. Wait 1 us */
+ udelay(1);
+ /*
+ * 11. Program the hreset_n field in the USBN clock-control register:
+ * USBN_CLK_CTL[HRST] = 1
+ */
+ usbn_clk_ctl.s.hrst = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ /* 12. Proceed to USB core initialization */
+ usbn_clk_ctl.s.enable = 1;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ udelay(1);
+
+ /*
+ * USB Core Initialization
+ *
+ * 1. Read USBC_GHWCFG1, USBC_GHWCFG2, USBC_GHWCFG3, USBC_GHWCFG4 to
+ * determine USB core configuration parameters.
+ *
+ * Nothing needed
+ *
+ * 2. Program the following fields in the global AHB configuration
+ * register (USBC_GAHBCFG)
+ * DMA mode, USBC_GAHBCFG[DMAEn]: 1 = DMA mode, 0 = slave mode
+ * Burst length, USBC_GAHBCFG[HBSTLEN] = 0
+ * Nonperiodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[NPTXFEMPLVL]
+ * Periodic TxFIFO empty level (slave mode only),
+ * USBC_GAHBCFG[PTXFEMPLVL]
+ * Global interrupt mask, USBC_GAHBCFG[GLBLINTRMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gahbcfg usbcx_gahbcfg;
+ /* Due to an errata, CN31XX doesn't support DMA */
+ if (OCTEON_IS_MODEL(OCTEON_CN31XX))
+ usb->init_flags |= CVMX_USB_INITIALIZE_FLAGS_NO_DMA;
+ usbcx_gahbcfg.u32 = 0;
+ usbcx_gahbcfg.s.dmaen = !(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usb->idle_hardware_channels = 0x1; /* Only use one channel with non DMA */
+ else if (OCTEON_IS_MODEL(OCTEON_CN5XXX))
+ usb->idle_hardware_channels = 0xf7; /* CN5XXX have an errata with channel 3 */
+ else
+ usb->idle_hardware_channels = 0xff;
+ usbcx_gahbcfg.s.hbstlen = 0;
+ usbcx_gahbcfg.s.nptxfemplvl = 1;
+ usbcx_gahbcfg.s.ptxfemplvl = 1;
+ usbcx_gahbcfg.s.glblintrmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GAHBCFG(usb->index),
+ usbcx_gahbcfg.u32);
+ }
+ /*
+ * 3. Program the following fields in USBC_GUSBCFG register.
+ * HS/FS timeout calibration, USBC_GUSBCFG[TOUTCAL] = 0
+ * ULPI DDR select, USBC_GUSBCFG[DDRSEL] = 0
+ * USB turnaround time, USBC_GUSBCFG[USBTRDTIM] = 0x5
+ * PHY low-power clock select, USBC_GUSBCFG[PHYLPWRCLKSEL] = 0
+ */
+ {
+ union cvmx_usbcx_gusbcfg usbcx_gusbcfg;
+ usbcx_gusbcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index));
+ usbcx_gusbcfg.s.toutcal = 0;
+ usbcx_gusbcfg.s.ddrsel = 0;
+ usbcx_gusbcfg.s.usbtrdtim = 0x5;
+ usbcx_gusbcfg.s.phylpwrclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GUSBCFG(usb->index),
+ usbcx_gusbcfg.u32);
+ }
+ /*
+ * 4. The software must unmask the following bits in the USBC_GINTMSK
+ * register.
+ * OTG interrupt mask, USBC_GINTMSK[OTGINTMSK] = 1
+ * Mode mismatch interrupt mask, USBC_GINTMSK[MODEMISMSK] = 1
+ */
+ {
+ union cvmx_usbcx_gintmsk usbcx_gintmsk;
+ int channel;
+
+ usbcx_gintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTMSK(usb->index));
+ usbcx_gintmsk.s.otgintmsk = 1;
+ usbcx_gintmsk.s.modemismsk = 1;
+ usbcx_gintmsk.s.hchintmsk = 1;
+ usbcx_gintmsk.s.sofmsk = 0;
+ /* We need RX FIFO interrupts if we don't have DMA */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ usbcx_gintmsk.s.rxflvlmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTMSK(usb->index),
+ usbcx_gintmsk.u32);
+
+ /* Disable all channel interrupts. We'll enable them per channel later */
+ for (channel = 0; channel < 8; channel++)
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ }
+
+ {
+ /*
+ * Host Port Initialization
+ *
+ * 1. Program the host-port interrupt-mask field to unmask,
+ * USBC_GINTMSK[PRTINT] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ prtintmsk, 1);
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk,
+ disconnintmsk, 1);
+ /*
+ * 2. Program the USBC_HCFG register to select full-speed host
+ * or high-speed host.
+ */
+ {
+ union cvmx_usbcx_hcfg usbcx_hcfg;
+ usbcx_hcfg.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCFG(usb->index));
+ usbcx_hcfg.s.fslssupp = 0;
+ usbcx_hcfg.s.fslspclksel = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCFG(usb->index), usbcx_hcfg.u32);
+ }
+ /*
+ * 3. Program the port power bit to drive VBUS on the USB,
+ * USBC_HPRT[PRTPWR] = 1
+ */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtpwr, 1);
+
+ /*
+ * Steps 4-15 from the manual are done later in the port enable
+ */
+ }
+
+ return 0;
}
@@ -861,38 +880,34 @@ cvmx_usb_status_t cvmx_usb_initialize(cvmx_usb_state_t *state,
* The port should be disabled with all pipes closed when this
* function is called.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_shutdown(cvmx_usb_state_t *state)
+int cvmx_usb_shutdown(struct cvmx_usb_state *state)
{
- cvmx_usbnx_clk_ctl_t usbn_clk_ctl;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
-
- /* Make sure all pipes are closed */
- if (usb->idle_pipes.head ||
- usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_CONTROL].head ||
- usb->active_pipes[CVMX_USB_TRANSFER_BULK].head)
- CVMX_USB_RETURN(CVMX_USB_BUSY);
-
- /* Disable the clocks and put them in power on reset */
- usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
- usbn_clk_ctl.s.enable = 1;
- usbn_clk_ctl.s.por = 1;
- usbn_clk_ctl.s.hclk_rst = 1;
- usbn_clk_ctl.s.prst = 0;
- usbn_clk_ctl.s.hrst = 0;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
- usbn_clk_ctl.u64);
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ union cvmx_usbnx_clk_ctl usbn_clk_ctl;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Make sure all pipes are closed */
+ if (usb->idle_pipes.head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_ISOCHRONOUS].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_INTERRUPT].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_CONTROL].head ||
+ usb->active_pipes[CVMX_USB_TRANSFER_BULK].head)
+ return -EBUSY;
+
+ /* Disable the clocks and put them in power on reset */
+ usbn_clk_ctl.u64 = __cvmx_usb_read_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index));
+ usbn_clk_ctl.s.enable = 1;
+ usbn_clk_ctl.s.por = 1;
+ usbn_clk_ctl.s.hclk_rst = 1;
+ usbn_clk_ctl.s.prst = 0;
+ usbn_clk_ctl.s.hrst = 0;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_CLK_CTL(usb->index),
+ usbn_clk_ctl.u64);
+ return 0;
}
@@ -900,96 +915,91 @@ cvmx_usb_status_t cvmx_usb_shutdown(cvmx_usb_state_t *state)
* Enable a USB port. After this call succeeds, the USB port is
* online and servicing requests.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_enable(cvmx_usb_state_t *state)
+int cvmx_usb_enable(struct cvmx_usb_state *state)
{
- cvmx_usbcx_ghwcfg3_t usbcx_ghwcfg3;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
-
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
-
- /* If the port is already enabled the just return. We don't need to do
- anything */
- if (usb->usbcx_hprt.s.prtena)
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
-
- /* If there is nothing plugged into the port then fail immediately */
- if (!usb->usbcx_hprt.s.prtconnsts) {
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: USB%d Nothing plugged into the port\n", __FUNCTION__, usb->index);
- CVMX_USB_RETURN(CVMX_USB_TIMEOUT);
- }
-
- /* Program the port reset bit to start the reset process */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtrst, 1);
-
- /* Wait at least 50ms (high speed), or 10ms (full speed) for the reset
- process to complete. */
- cvmx_wait_usec(50000);
-
- /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtrst, 0);
-
- /* Wait for the USBC_HPRT[PRTENA]. */
- if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t,
- prtena, ==, 1, 100000)) {
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: Timeout waiting for the port to finish reset\n",
- __FUNCTION__);
- CVMX_USB_RETURN(CVMX_USB_TIMEOUT);
- }
-
- /* Read the port speed field to get the enumerated speed, USBC_HPRT[PRTSPD]. */
- usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: USB%d is in %s speed mode\n", __FUNCTION__, usb->index,
- (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_HIGH) ? "high" :
- (usb->usbcx_hprt.s.prtspd == CVMX_USB_SPEED_FULL) ? "full" :
- "low");
-
- usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
-
- /* 13. Program the USBC_GRXFSIZ register to select the size of the receive
- FIFO (25%). */
- USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), cvmx_usbcx_grxfsiz_t,
- rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
- /* 14. Program the USBC_GNPTXFSIZ register to select the size and the
- start address of the non- periodic transmit FIFO for nonperiodic
- transactions (50%). */
- {
- cvmx_usbcx_gnptxfsiz_t siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
- siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
- siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
- }
- /* 15. Program the USBC_HPTXFSIZ register to select the size and start
- address of the periodic transmit FIFO for periodic transactions (25%). */
- {
- cvmx_usbcx_hptxfsiz_t siz;
- siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
- siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
- siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
- }
- /* Flush all FIFOs */
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, txfnum, 0x10);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, txfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t,
- txfflsh, ==, 0, 100);
- USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t, rxfflsh, 1);
- CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), cvmx_usbcx_grstctl_t,
- rxfflsh, ==, 0, 100);
-
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ union cvmx_usbcx_ghwcfg3 usbcx_ghwcfg3;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+
+ /*
+ * If the port is already enabled the just return. We don't need to do
+ * anything
+ */
+ if (usb->usbcx_hprt.s.prtena)
+ return 0;
+
+ /* If there is nothing plugged into the port then fail immediately */
+ if (!usb->usbcx_hprt.s.prtconnsts) {
+ return -ETIMEDOUT;
+ }
+
+ /* Program the port reset bit to start the reset process */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 1);
+
+ /*
+ * Wait at least 50ms (high speed), or 10ms (full speed) for the reset
+ * process to complete.
+ */
+ mdelay(50);
+
+ /* Program the port reset bit to 0, USBC_HPRT[PRTRST] = 0 */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtrst, 0);
+
+ /* Wait for the USBC_HPRT[PRTENA]. */
+ if (CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt,
+ prtena, ==, 1, 100000))
+ return -ETIMEDOUT;
+
+ /* Read the port speed field to get the enumerated speed, USBC_HPRT[PRTSPD]. */
+ usb->usbcx_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbcx_ghwcfg3.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GHWCFG3(usb->index));
+
+ /*
+ * 13. Program the USBC_GRXFSIZ register to select the size of the
+ * receive FIFO (25%).
+ */
+ USB_SET_FIELD32(CVMX_USBCX_GRXFSIZ(usb->index), union cvmx_usbcx_grxfsiz,
+ rxfdep, usbcx_ghwcfg3.s.dfifodepth / 4);
+ /*
+ * 14. Program the USBC_GNPTXFSIZ register to select the size and the
+ * start address of the non- periodic transmit FIFO for nonperiodic
+ * transactions (50%).
+ */
+ {
+ union cvmx_usbcx_gnptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index));
+ siz.s.nptxfdep = usbcx_ghwcfg3.s.dfifodepth / 2;
+ siz.s.nptxfstaddr = usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GNPTXFSIZ(usb->index), siz.u32);
+ }
+ /*
+ * 15. Program the USBC_HPTXFSIZ register to select the size and start
+ * address of the periodic transmit FIFO for periodic transactions
+ * (25%).
+ */
+ {
+ union cvmx_usbcx_hptxfsiz siz;
+ siz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index));
+ siz.s.ptxfsize = usbcx_ghwcfg3.s.dfifodepth / 4;
+ siz.s.ptxfstaddr = 3 * usbcx_ghwcfg3.s.dfifodepth / 4;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPTXFSIZ(usb->index), siz.u32);
+ }
+ /* Flush all FIFOs */
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfnum, 0x10);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, txfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ txfflsh, ==, 0, 100);
+ USB_SET_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl, rxfflsh, 1);
+ CVMX_WAIT_FOR_FIELD32(CVMX_USBCX_GRSTCTL(usb->index), union cvmx_usbcx_grstctl,
+ rxfflsh, ==, 0, 100);
+
+ return 0;
}
@@ -999,22 +1009,18 @@ cvmx_usb_status_t cvmx_usb_enable(cvmx_usb_state_t *state)
* Transactions in process will fail and call their
* associated callbacks.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_disable(cvmx_usb_state_t *state)
+int cvmx_usb_disable(struct cvmx_usb_state *state)
{
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
-
- /* Disable the port */
- USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), cvmx_usbcx_hprt_t, prtena, 1);
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ /* Disable the port */
+ USB_SET_FIELD32(CVMX_USBCX_HPRT(usb->index), union cvmx_usbcx_hprt, prtena, 1);
+ return 0;
}
@@ -1027,40 +1033,28 @@ cvmx_usb_status_t cvmx_usb_disable(cvmx_usb_state_t *state)
* on the last call to cvmx_usb_set_status(). In order to clear
* them, you must update the status through cvmx_usb_set_status().
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return Port status information
+ * Returns: Port status information
*/
-cvmx_usb_port_status_t cvmx_usb_get_status(cvmx_usb_state_t *state)
+struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state)
{
- cvmx_usbcx_hprt_t usbc_hprt;
- cvmx_usb_port_status_t result;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- memset(&result, 0, sizeof(result));
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
-
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- result.port_enabled = usbc_hprt.s.prtena;
- result.port_over_current = usbc_hprt.s.prtovrcurract;
- result.port_powered = usbc_hprt.s.prtpwr;
- result.port_speed = usbc_hprt.s.prtspd;
- result.connected = usbc_hprt.s.prtconnsts;
- result.connect_change = (result.connected != usb->port_status.connected);
-
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS))
- cvmx_dprintf("%*s%s: returned port enabled=%d, over_current=%d, powered=%d, speed=%d, connected=%d, connect_change=%d\n",
- 2*(--usb->indent), "", __FUNCTION__,
- result.port_enabled,
- result.port_over_current,
- result.port_powered,
- result.port_speed,
- result.connected,
- result.connect_change);
- return result;
+ union cvmx_usbcx_hprt usbc_hprt;
+ struct cvmx_usb_port_status result;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ memset(&result, 0, sizeof(result));
+
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ result.port_enabled = usbc_hprt.s.prtena;
+ result.port_over_current = usbc_hprt.s.prtovrcurract;
+ result.port_powered = usbc_hprt.s.prtpwr;
+ result.port_speed = usbc_hprt.s.prtspd;
+ result.connected = usbc_hprt.s.prtconnsts;
+ result.connect_change = (result.connected != usb->port_status.connected);
+
+ return result;
}
@@ -1071,54 +1065,50 @@ cvmx_usb_port_status_t cvmx_usb_get_status(cvmx_usb_state_t *state)
* status passed to this function is not used. No fields can be
* changed through this call.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param port_status
- * Port status to set, most like returned by cvmx_usb_get_status()
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @port_status:
+ * Port status to set, most like returned by cvmx_usb_get_status()
*/
-void cvmx_usb_set_status(cvmx_usb_state_t *state, cvmx_usb_port_status_t port_status)
+void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status)
{
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- usb->port_status = port_status;
- CVMX_USB_RETURN_NOTHING();
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ usb->port_status = port_status;
+ return;
}
/**
- * @INTERNAL
* Convert a USB transaction into a handle
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param transaction
- * Transaction to get handle for
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @transaction:
+ * Transaction to get handle for
*
- * @return Handle
+ * Returns: Handle
*/
-static inline int __cvmx_usb_get_submit_handle(cvmx_usb_internal_state_t *usb,
- cvmx_usb_transaction_t *transaction)
+static inline int __cvmx_usb_get_submit_handle(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_transaction *transaction)
{
- return ((unsigned long)transaction - (unsigned long)usb->transaction) /
- sizeof(*transaction);
+ return ((unsigned long)transaction - (unsigned long)usb->transaction) /
+ sizeof(*transaction);
}
/**
- * @INTERNAL
* Convert a USB pipe into a handle
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe Pipe to get handle for
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe to get handle for
*
- * @return Handle
+ * Returns: Handle
*/
-static inline int __cvmx_usb_get_pipe_handle(cvmx_usb_internal_state_t *usb,
- cvmx_usb_pipe_t *pipe)
+static inline int __cvmx_usb_get_pipe_handle(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe)
{
- return ((unsigned long)pipe - (unsigned long)usb->pipe) / sizeof(*pipe);
+ return ((unsigned long)pipe - (unsigned long)usb->pipe) / sizeof(*pipe);
}
@@ -1127,197 +1117,182 @@ static inline int __cvmx_usb_get_pipe_handle(cvmx_usb_internal_state_t *usb,
* must be opened before data can be transferred between a device
* and Octeon.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param flags Optional pipe flags defined in
- * cvmx_usb_pipe_flags_t.
- * @param device_addr
- * USB device address to open the pipe to
- * (0-127).
- * @param endpoint_num
- * USB endpoint number to open the pipe to
- * (0-15).
- * @param device_speed
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @param max_packet The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @param transfer_type
- * The type of transfer this pipe is for.
- * @param transfer_dir
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @param interval For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @param multi_count
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @param hub_device_addr
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @param hub_port Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * @return A non negative value is a pipe handle. Negative
- * values are failure codes from cvmx_usb_status_t.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @flags: Optional pipe flags defined in
+ * enum cvmx_usb_pipe_flags.
+ * @device_addr:
+ * USB device address to open the pipe to
+ * (0-127).
+ * @endpoint_num:
+ * USB endpoint number to open the pipe to
+ * (0-15).
+ * @device_speed:
+ * The speed of the device the pipe is going
+ * to. This must match the device's speed,
+ * which may be different than the port speed.
+ * @max_packet: The maximum packet length the device can
+ * transmit/receive (low speed=0-8, full
+ * speed=0-1023, high speed=0-1024). This value
+ * comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <10:0>.
+ * @transfer_type:
+ * The type of transfer this pipe is for.
+ * @transfer_dir:
+ * The direction the pipe is in. This is not
+ * used for control pipes.
+ * @interval: For ISOCHRONOUS and INTERRUPT transfers,
+ * this is how often the transfer is scheduled
+ * for. All other transfers should specify
+ * zero. The units are in frames (8000/sec at
+ * high speed, 1000/sec for full speed).
+ * @multi_count:
+ * For high speed devices, this is the maximum
+ * allowed number of packet per microframe.
+ * Specify zero for non high speed devices. This
+ * value comes from the standard endpoint descriptor
+ * field wMaxPacketSize bits <12:11>.
+ * @hub_device_addr:
+ * Hub device address this device is connected
+ * to. Devices connected directly to Octeon
+ * use zero. This is only used when the device
+ * is full/low speed behind a high speed hub.
+ * The address will be of the high speed hub,
+ * not and full speed hubs after it.
+ * @hub_port: Which port on the hub the device is
+ * connected. Use zero for devices connected
+ * directly to Octeon. Like hub_device_addr,
+ * this is only used for full/low speed
+ * devices behind a high speed hub.
+ *
+ * Returns: A non negative value is a pipe handle. Negative
+ * values are error codes.
*/
-int cvmx_usb_open_pipe(cvmx_usb_state_t *state, cvmx_usb_pipe_flags_t flags,
- int device_addr, int endpoint_num,
- cvmx_usb_speed_t device_speed, int max_packet,
- cvmx_usb_transfer_t transfer_type,
- cvmx_usb_direction_t transfer_dir, int interval,
- int multi_count, int hub_device_addr, int hub_port)
+int cvmx_usb_open_pipe(struct cvmx_usb_state *state, enum cvmx_usb_pipe_flags flags,
+ int device_addr, int endpoint_num,
+ enum cvmx_usb_speed device_speed, int max_packet,
+ enum cvmx_usb_transfer transfer_type,
+ enum cvmx_usb_direction transfer_dir, int interval,
+ int multi_count, int hub_device_addr, int hub_port)
{
- cvmx_usb_pipe_t *pipe;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("0x%x", flags);
- CVMX_USB_LOG_PARAM("%d", device_addr);
- CVMX_USB_LOG_PARAM("%d", endpoint_num);
- CVMX_USB_LOG_PARAM("%d", device_speed);
- CVMX_USB_LOG_PARAM("%d", max_packet);
- CVMX_USB_LOG_PARAM("%d", transfer_type);
- CVMX_USB_LOG_PARAM("%d", transfer_dir);
- CVMX_USB_LOG_PARAM("%d", interval);
- CVMX_USB_LOG_PARAM("%d", multi_count);
- CVMX_USB_LOG_PARAM("%d", hub_device_addr);
- CVMX_USB_LOG_PARAM("%d", hub_port);
-
- if (cvmx_unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(device_speed > CVMX_USB_SPEED_LOW))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((max_packet <= 0) || (max_packet > 1024)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
- (transfer_dir != CVMX_USB_DIRECTION_IN)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(interval < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(multi_count < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
- (multi_count != 0)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- /* Find a free pipe */
- pipe = usb->free_pipes.head;
- if (!pipe)
- CVMX_USB_RETURN(CVMX_USB_NO_MEMORY);
- __cvmx_usb_remove_pipe(&usb->free_pipes, pipe);
- pipe->flags = flags | __CVMX_USB_PIPE_FLAGS_OPEN;
- if ((device_speed == CVMX_USB_SPEED_HIGH) &&
- (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (transfer_type == CVMX_USB_TRANSFER_BULK))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- pipe->device_addr = device_addr;
- pipe->endpoint_num = endpoint_num;
- pipe->device_speed = device_speed;
- pipe->max_packet = max_packet;
- pipe->transfer_type = transfer_type;
- pipe->transfer_dir = transfer_dir;
- /* All pipes use interval to rate limit NAK processing. Force an interval
- if one wasn't supplied */
- if (!interval)
- interval = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- pipe->interval = interval*8;
- /* Force start splits to be schedule on uFrame 0 */
- pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
- }
- else {
- pipe->interval = interval;
- pipe->next_tx_frame = usb->frame_number + pipe->interval;
- }
- pipe->multi_count = multi_count;
- pipe->hub_device_addr = hub_device_addr;
- pipe->hub_port = hub_port;
- pipe->pid_toggle = 0;
- pipe->split_sc_frame = -1;
- __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
-
- /* We don't need to tell the hardware about this pipe yet since
- it doesn't have any submitted requests */
-
- CVMX_USB_RETURN(__cvmx_usb_get_pipe_handle(usb, pipe));
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ if (unlikely((device_addr < 0) || (device_addr > MAX_USB_ADDRESS)))
+ return -EINVAL;
+ if (unlikely((endpoint_num < 0) || (endpoint_num > MAX_USB_ENDPOINT)))
+ return -EINVAL;
+ if (unlikely(device_speed > CVMX_USB_SPEED_LOW))
+ return -EINVAL;
+ if (unlikely((max_packet <= 0) || (max_packet > 1024)))
+ return -EINVAL;
+ if (unlikely(transfer_type > CVMX_USB_TRANSFER_INTERRUPT))
+ return -EINVAL;
+ if (unlikely((transfer_dir != CVMX_USB_DIRECTION_OUT) &&
+ (transfer_dir != CVMX_USB_DIRECTION_IN)))
+ return -EINVAL;
+ if (unlikely(interval < 0))
+ return -EINVAL;
+ if (unlikely((transfer_type == CVMX_USB_TRANSFER_CONTROL) && interval))
+ return -EINVAL;
+ if (unlikely(multi_count < 0))
+ return -EINVAL;
+ if (unlikely((device_speed != CVMX_USB_SPEED_HIGH) &&
+ (multi_count != 0)))
+ return -EINVAL;
+ if (unlikely((hub_device_addr < 0) || (hub_device_addr > MAX_USB_ADDRESS)))
+ return -EINVAL;
+ if (unlikely((hub_port < 0) || (hub_port > MAX_USB_HUB_PORT)))
+ return -EINVAL;
+
+ /* Find a free pipe */
+ pipe = usb->free_pipes.head;
+ if (!pipe)
+ return -ENOMEM;
+ __cvmx_usb_remove_pipe(&usb->free_pipes, pipe);
+ pipe->flags = flags | __CVMX_USB_PIPE_FLAGS_OPEN;
+ if ((device_speed == CVMX_USB_SPEED_HIGH) &&
+ (transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (transfer_type == CVMX_USB_TRANSFER_BULK))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ pipe->device_addr = device_addr;
+ pipe->endpoint_num = endpoint_num;
+ pipe->device_speed = device_speed;
+ pipe->max_packet = max_packet;
+ pipe->transfer_type = transfer_type;
+ pipe->transfer_dir = transfer_dir;
+ /*
+ * All pipes use interval to rate limit NAK processing. Force an
+ * interval if one wasn't supplied
+ */
+ if (!interval)
+ interval = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ pipe->interval = interval*8;
+ /* Force start splits to be schedule on uFrame 0 */
+ pipe->next_tx_frame = ((usb->frame_number+7)&~7) + pipe->interval;
+ } else {
+ pipe->interval = interval;
+ pipe->next_tx_frame = usb->frame_number + pipe->interval;
+ }
+ pipe->multi_count = multi_count;
+ pipe->hub_device_addr = hub_device_addr;
+ pipe->hub_port = hub_port;
+ pipe->pid_toggle = 0;
+ pipe->split_sc_frame = -1;
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ /*
+ * We don't need to tell the hardware about this pipe yet since
+ * it doesn't have any submitted requests
+ */
+
+ return __cvmx_usb_get_pipe_handle(usb, pipe);
}
/**
- * @INTERNAL
* Poll the RX FIFOs and remove data as needed. This function is only used
* in non DMA mode. It is very important that this function be called quickly
* enough to prevent FIFO overflow.
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
*/
-static void __cvmx_usb_poll_rx_fifo(cvmx_usb_internal_state_t *usb)
+static void __cvmx_usb_poll_rx_fifo(struct cvmx_usb_internal_state *usb)
{
- cvmx_usbcx_grxstsph_t rx_status;
- int channel;
- int bytes;
- uint64_t address;
- uint32_t *ptr;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
-
- rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
- /* Only read data if IN data is there */
- if (rx_status.s.pktsts != 2)
- CVMX_USB_RETURN_NOTHING();
- /* Check if no data is available */
- if (!rx_status.s.bcnt)
- CVMX_USB_RETURN_NOTHING();
-
- channel = rx_status.s.chnum;
- bytes = rx_status.s.bcnt;
- if (!bytes)
- CVMX_USB_RETURN_NOTHING();
-
- /* Get where the DMA engine would have written this data */
- address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
- ptr = cvmx_phys_to_ptr(address);
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
-
- /* Loop writing the FIFO data for this packet into memory */
- while (bytes > 0) {
- *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
- bytes -= 4;
- }
- CVMX_SYNCW;
-
- CVMX_USB_RETURN_NOTHING();
+ union cvmx_usbcx_grxstsph rx_status;
+ int channel;
+ int bytes;
+ uint64_t address;
+ uint32_t *ptr;
+
+ rx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GRXSTSPH(usb->index));
+ /* Only read data if IN data is there */
+ if (rx_status.s.pktsts != 2)
+ return;
+ /* Check if no data is available */
+ if (!rx_status.s.bcnt)
+ return;
+
+ channel = rx_status.s.chnum;
+ bytes = rx_status.s.bcnt;
+ if (!bytes)
+ return;
+
+ /* Get where the DMA engine would have written this data */
+ address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8);
+ ptr = cvmx_phys_to_ptr(address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, address + bytes);
+
+ /* Loop writing the FIFO data for this packet into memory */
+ while (bytes > 0) {
+ *ptr++ = __cvmx_usb_read_csr32(usb, USB_FIFO_ADDRESS(channel, usb->index));
+ bytes -= 4;
+ }
+ CVMX_SYNCW;
+
+ return;
}
@@ -1325,1196 +1300,1136 @@ static void __cvmx_usb_poll_rx_fifo(cvmx_usb_internal_state_t *usb)
* Fill the TX hardware fifo with data out of the software
* fifos
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param fifo Software fifo to use
- * @param available Amount of space in the hardware fifo
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @fifo: Software fifo to use
+ * @available: Amount of space in the hardware fifo
*
- * @return Non zero if the hardware fifo was too small and needs
- * to be serviced again.
+ * Returns: Non zero if the hardware fifo was too small and needs
+ * to be serviced again.
*/
-static int __cvmx_usb_fill_tx_hw(cvmx_usb_internal_state_t *usb, cvmx_usb_tx_fifo_t *fifo, int available)
+static int __cvmx_usb_fill_tx_hw(struct cvmx_usb_internal_state *usb, struct cvmx_usb_tx_fifo *fifo, int available)
{
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%p", fifo);
- CVMX_USB_LOG_PARAM("%d", available);
-
- /* We're done either when there isn't anymore space or the software FIFO
- is empty */
- while (available && (fifo->head != fifo->tail)) {
- int i = fifo->tail;
- const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
- uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
- int words = available;
-
- /* Limit the amount of data to waht the SW fifo has */
- if (fifo->entry[i].size <= available) {
- words = fifo->entry[i].size;
- fifo->tail++;
- if (fifo->tail > MAX_CHANNELS)
- fifo->tail = 0;
- }
-
- /* Update the next locations and counts */
- available -= words;
- fifo->entry[i].address += words * 4;
- fifo->entry[i].size -= words;
-
- /* Write the HW fifo data. The read every three writes is due
- to an errata on CN3XXX chips */
- while (words > 3) {
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_write64_uint32(csr_address, *ptr++);
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- words -= 3;
- }
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words) {
- cvmx_write64_uint32(csr_address, *ptr++);
- if (--words)
- cvmx_write64_uint32(csr_address, *ptr++);
- }
- cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
- }
- CVMX_USB_RETURN(fifo->head != fifo->tail);
+ /*
+ * We're done either when there isn't anymore space or the software FIFO
+ * is empty
+ */
+ while (available && (fifo->head != fifo->tail)) {
+ int i = fifo->tail;
+ const uint32_t *ptr = cvmx_phys_to_ptr(fifo->entry[i].address);
+ uint64_t csr_address = USB_FIFO_ADDRESS(fifo->entry[i].channel, usb->index) ^ 4;
+ int words = available;
+
+ /* Limit the amount of data to waht the SW fifo has */
+ if (fifo->entry[i].size <= available) {
+ words = fifo->entry[i].size;
+ fifo->tail++;
+ if (fifo->tail > MAX_CHANNELS)
+ fifo->tail = 0;
+ }
+
+ /* Update the next locations and counts */
+ available -= words;
+ fifo->entry[i].address += words * 4;
+ fifo->entry[i].size -= words;
+
+ /*
+ * Write the HW fifo data. The read every three writes is due
+ * to an errata on CN3XXX chips
+ */
+ while (words > 3) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_write64_uint32(csr_address, *ptr++);
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ words -= 3;
+ }
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words) {
+ cvmx_write64_uint32(csr_address, *ptr++);
+ if (--words)
+ cvmx_write64_uint32(csr_address, *ptr++);
+ }
+ cvmx_read64_uint64(CVMX_USBNX_DMA0_INB_CHN0(usb->index));
+ }
+ return fifo->head != fifo->tail;
}
/**
* Check the hardware FIFOs and fill them as needed
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
*/
-static void __cvmx_usb_poll_tx_fifo(cvmx_usb_internal_state_t *usb)
+static void __cvmx_usb_poll_tx_fifo(struct cvmx_usb_internal_state *usb)
{
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
-
- if (usb->periodic.head != usb->periodic.tail) {
- cvmx_usbcx_hptxsts_t tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, ptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, ptxfempmsk, 0);
- }
-
- if (usb->nonperiodic.head != usb->nonperiodic.tail) {
- cvmx_usbcx_gnptxsts_t tx_status;
- tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
- if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, nptxfempmsk, 1);
- else
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, nptxfempmsk, 0);
- }
-
- CVMX_USB_RETURN_NOTHING();
+ if (usb->periodic.head != usb->periodic.tail) {
+ union cvmx_usbcx_hptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->periodic, tx_status.s.ptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, ptxfempmsk, 0);
+ }
+
+ if (usb->nonperiodic.head != usb->nonperiodic.tail) {
+ union cvmx_usbcx_gnptxsts tx_status;
+ tx_status.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GNPTXSTS(usb->index));
+ if (__cvmx_usb_fill_tx_hw(usb, &usb->nonperiodic, tx_status.s.nptxfspcavail))
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 1);
+ else
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, nptxfempmsk, 0);
+ }
+
+ return;
}
/**
- * @INTERNAL
* Fill the TX FIFO with an outgoing packet
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param channel Channel number to get packet from
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel number to get packet from
*/
-static void __cvmx_usb_fill_tx_fifo(cvmx_usb_internal_state_t *usb, int channel)
+static void __cvmx_usb_fill_tx_fifo(struct cvmx_usb_internal_state *usb, int channel)
{
- cvmx_usbcx_hccharx_t hcchar;
- cvmx_usbcx_hcspltx_t usbc_hcsplt;
- cvmx_usbcx_hctsizx_t usbc_hctsiz;
- cvmx_usb_tx_fifo_t *fifo;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%d", channel);
-
- /* We only need to fill data on outbound channels */
- hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
- CVMX_USB_RETURN_NOTHING();
-
- /* OUT Splits only have data on the start and not the complete */
- usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
- if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
- CVMX_USB_RETURN_NOTHING();
-
- /* Find out how many bytes we need to fill and convert it into 32bit words */
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
- if (!usbc_hctsiz.s.xfersize)
- CVMX_USB_RETURN_NOTHING();
-
- if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
- (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
- fifo = &usb->periodic;
- else
- fifo = &usb->nonperiodic;
-
- fifo->entry[fifo->head].channel = channel;
- fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
- fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
- fifo->head++;
- if (fifo->head > MAX_CHANNELS)
- fifo->head = 0;
-
- __cvmx_usb_poll_tx_fifo(usb);
-
- CVMX_USB_RETURN_NOTHING();
+ union cvmx_usbcx_hccharx hcchar;
+ union cvmx_usbcx_hcspltx usbc_hcsplt;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ struct cvmx_usb_tx_fifo *fifo;
+
+ /* We only need to fill data on outbound channels */
+ hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ if (hcchar.s.epdir != CVMX_USB_DIRECTION_OUT)
+ return;
+
+ /* OUT Splits only have data on the start and not the complete */
+ usbc_hcsplt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index));
+ if (usbc_hcsplt.s.spltena && usbc_hcsplt.s.compsplt)
+ return;
+
+ /* Find out how many bytes we need to fill and convert it into 32bit words */
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+ if (!usbc_hctsiz.s.xfersize)
+ return;
+
+ if ((hcchar.s.eptype == CVMX_USB_TRANSFER_INTERRUPT) ||
+ (hcchar.s.eptype == CVMX_USB_TRANSFER_ISOCHRONOUS))
+ fifo = &usb->periodic;
+ else
+ fifo = &usb->nonperiodic;
+
+ fifo->entry[fifo->head].channel = channel;
+ fifo->entry[fifo->head].address = __cvmx_usb_read_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8);
+ fifo->entry[fifo->head].size = (usbc_hctsiz.s.xfersize+3)>>2;
+ fifo->head++;
+ if (fifo->head > MAX_CHANNELS)
+ fifo->head = 0;
+
+ __cvmx_usb_poll_tx_fifo(usb);
+
+ return;
}
/**
- * @INTERNAL
* Perform channel specific setup for Control transactions. All
* the generic stuff will already have been done in
* __cvmx_usb_start_channel()
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param channel Channel to setup
- * @param pipe Pipe for control transaction
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe for control transaction
*/
-static void __cvmx_usb_start_channel_control(cvmx_usb_internal_state_t *usb,
- int channel,
- cvmx_usb_pipe_t *pipe)
+static void __cvmx_usb_start_channel_control(struct cvmx_usb_internal_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
{
- cvmx_usb_transaction_t *transaction = pipe->head;
- cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
- int packets_to_transfer;
- cvmx_usbcx_hctsizx_t usbc_hctsiz;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%d", channel);
- CVMX_USB_LOG_PARAM("%p", pipe);
-
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- switch (transaction->stage) {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
- break;
- case CVMX_USB_STAGE_SETUP:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = sizeof(*header);
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir, CVMX_USB_DIRECTION_OUT);
- /* Setup send the control header instead of the buffer data. The
- buffer data will be used in the next stage */
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = 3; /* Setup */
- bytes_to_transfer = 0;
- /* All Control operations start with a setup going OUT */
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir, CVMX_USB_DIRECTION_OUT);
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
- break;
- case CVMX_USB_STAGE_DATA:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (header->s.request_type & 0x80)
- bytes_to_transfer = 0;
- else if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
- }
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx_t, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- if (!(header->s.request_type & 0x80))
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
- cvmx_usbcx_hccharx_t, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_IN :
- CVMX_USB_DIRECTION_OUT));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
- break;
- case CVMX_USB_STAGE_STATUS:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- bytes_to_transfer = 0;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, epdir,
- ((header->s.request_type & 0x80) ?
- CVMX_USB_DIRECTION_OUT :
- CVMX_USB_DIRECTION_IN));
- USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), cvmx_usbcx_hcspltx_t, compsplt, 1);
- break;
- }
-
- /* Make sure the transfer never exceeds the byte limit of the hardware.
- Further bytes will be sent as continued transactions */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /* Calculate the number of packets to transfer. If the length is zero
- we still need to transfer one packet */
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer>1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /* Limit to one packet when not using DMA. Channels must be restarted
- between every packet for IN transactions, so there is no reason to
- do multiple packets in a row */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
- else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /* Limit the number of packet and data transferred to what the
- hardware can handle */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
- CVMX_USB_RETURN_NOTHING();
+ struct cvmx_usb_transaction *transaction = pipe->head;
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+ int packets_to_transfer;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ cvmx_dprintf("%s: ERROR - Non control stage\n", __FUNCTION__);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = sizeof(*header);
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ /*
+ * Setup send the control header instead of the buffer data. The
+ * buffer data will be used in the next stage
+ */
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, transaction->control_header);
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = 3; /* Setup */
+ bytes_to_transfer = 0;
+ /* All Control operations start with a setup going OUT */
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir, CVMX_USB_DIRECTION_OUT);
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_DATA:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (header->s.request_type & 0x80)
+ bytes_to_transfer = 0;
+ else if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+ }
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ if (!(header->s.request_type & 0x80))
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index),
+ union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_IN :
+ CVMX_USB_DIRECTION_OUT));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ bytes_to_transfer = 0;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, epdir,
+ ((header->s.request_type & 0x80) ?
+ CVMX_USB_DIRECTION_OUT :
+ CVMX_USB_DIRECTION_IN));
+ USB_SET_FIELD32(CVMX_USBCX_HCSPLTX(channel, usb->index), union cvmx_usbcx_hcspltx, compsplt, 1);
+ break;
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the hardware.
+ * Further bytes will be sent as continued transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is zero
+ * we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must be
+ * restarted between every packet for IN transactions, so there
+ * is no reason to do multiple packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to what the
+ * hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ return;
}
/**
- * @INTERNAL
* Start a channel to perform the pipe's head transaction
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param channel Channel to setup
- * @param pipe Pipe to start
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @channel: Channel to setup
+ * @pipe: Pipe to start
*/
-static void __cvmx_usb_start_channel(cvmx_usb_internal_state_t *usb,
- int channel,
- cvmx_usb_pipe_t *pipe)
+static void __cvmx_usb_start_channel(struct cvmx_usb_internal_state *usb,
+ int channel,
+ struct cvmx_usb_pipe *pipe)
{
- cvmx_usb_transaction_t *transaction = pipe->head;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%d", channel);
- CVMX_USB_LOG_PARAM("%p", pipe);
-
- if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
- (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS)))
- cvmx_dprintf("%s: Channel %d started. Pipe %d transaction %d stage %d\n",
- __FUNCTION__, channel, __cvmx_usb_get_pipe_handle(usb, pipe),
- __cvmx_usb_get_submit_handle(usb, transaction),
- transaction->stage);
-
- /* Make sure all writes to the DMA region get flushed */
- CVMX_SYNCW;
-
- /* Attach the channel to the pipe */
- usb->pipe_for_channel[channel] = pipe;
- pipe->channel = channel;
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Mark this channel as in use */
- usb->idle_hardware_channels &= ~(1<<channel);
-
- /* Enable the channel interrupt bits */
- {
- cvmx_usbcx_hcintx_t usbc_hcint;
- cvmx_usbcx_hcintmskx_t usbc_hcintmsk;
- cvmx_usbcx_haintmsk_t usbc_haintmsk;
-
- /* Clear all channel status bits */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
-
- usbc_hcintmsk.u32 = 0;
- usbc_hcintmsk.s.chhltdmsk = 1;
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /* Channels need these extra interrupts when we aren't in DMA mode */
- usbc_hcintmsk.s.datatglerrmsk = 1;
- usbc_hcintmsk.s.frmovrunmsk = 1;
- usbc_hcintmsk.s.bblerrmsk = 1;
- usbc_hcintmsk.s.xacterrmsk = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /* Splits don't generate xfercompl, so we need ACK and NYET */
- usbc_hcintmsk.s.nyetmsk = 1;
- usbc_hcintmsk.s.ackmsk = 1;
- }
- usbc_hcintmsk.s.nakmsk = 1;
- usbc_hcintmsk.s.stallmsk = 1;
- usbc_hcintmsk.s.xfercomplmsk = 1;
- }
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
-
- /* Enable the channel interrupt to propagate */
- usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
- usbc_haintmsk.s.haintmsk |= 1<<channel;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
- }
-
- /* Setup the locations the DMA engines use */
- {
- uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
- __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
- }
-
- /* Setup both the size of the transfer and the SPLIT characteristics */
- {
- cvmx_usbcx_hcspltx_t usbc_hcsplt = {.u32 = 0};
- cvmx_usbcx_hctsizx_t usbc_hctsiz = {.u32 = 0};
- int packets_to_transfer;
- int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
-
- /* ISOCHRONOUS transactions store each individual transfer size in the
- packet structure, not the global buffer_length */
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
-
- /* We need to do split transactions when we are talking to non high
- speed devices that are behind a high speed hub */
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /* On the start split phase (stage is even) record the frame number we
- will need to send the split complete. We only store the lower two bits
- since the time ahead can only be two frames */
- if ((transaction->stage&1) == 0) {
- if (transaction->type == CVMX_USB_TRANSFER_BULK)
- pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
- else
- pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
- }
- else
- pipe->split_sc_frame = -1;
-
- usbc_hcsplt.s.spltena = 1;
- usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
- usbc_hcsplt.s.prtaddr = pipe->hub_port;
- usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
-
- /* SPLIT transactions can only ever transmit one data packet so
- limit the transfer size to the max packet size */
- if (bytes_to_transfer > pipe->max_packet)
- bytes_to_transfer = pipe->max_packet;
-
- /* ISOCHRONOUS OUT splits are unique in that they limit
- data transfers to 188 byte chunks representing the
- begin/middle/end of the data or all */
- if (!usbc_hcsplt.s.compsplt &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Clear the split complete frame number as there isn't going
- to be a split complete */
- pipe->split_sc_frame = -1;
- /* See if we've started this transfer and sent data */
- if (transaction->actual_bytes == 0) {
- /* Nothing sent yet, this is either a begin or the
- entire payload */
- if (bytes_to_transfer <= 188)
- usbc_hcsplt.s.xactpos = 3; /* Entire payload in one go */
- else
- usbc_hcsplt.s.xactpos = 2; /* First part of payload */
- }
- else {
- /* Continuing the previous data, we must either be
- in the middle or at the end */
- if (bytes_to_transfer <= 188)
- usbc_hcsplt.s.xactpos = 1; /* End of payload */
- else
- usbc_hcsplt.s.xactpos = 0; /* Middle of payload */
- }
- /* Again, the transfer size is limited to 188 bytes */
- if (bytes_to_transfer > 188)
- bytes_to_transfer = 188;
- }
- }
-
- /* Make sure the transfer never exceeds the byte limit of the hardware.
- Further bytes will be sent as continued transactions */
- if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
- /* Round MAX_TRANSFER_BYTES to a multiple of out packet size */
- bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
- bytes_to_transfer *= pipe->max_packet;
- }
-
- /* Calculate the number of packets to transfer. If the length is zero
- we still need to transfer one packet */
- packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
- if (packets_to_transfer == 0)
- packets_to_transfer = 1;
- else if ((packets_to_transfer>1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
- /* Limit to one packet when not using DMA. Channels must be restarted
- between every packet for IN transactions, so there is no reason to
- do multiple packets in a row */
- packets_to_transfer = 1;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
- else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
- /* Limit the number of packet and data transferred to what the
- hardware can handle */
- packets_to_transfer = MAX_TRANSFER_PACKETS;
- bytes_to_transfer = packets_to_transfer * pipe->max_packet;
- }
-
- usbc_hctsiz.s.xfersize = bytes_to_transfer;
- usbc_hctsiz.s.pktcnt = packets_to_transfer;
-
- /* Update the DATA0/DATA1 toggle */
- usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
- /* High speed pipes may need a hardware ping before they start */
- if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
- usbc_hctsiz.s.dopng = 1;
-
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
- }
-
- /* Setup the Host Channel Characteristics Register */
- {
- cvmx_usbcx_hccharx_t usbc_hcchar = {.u32 = 0};
-
- /* Set the startframe odd/even properly. This is only used for periodic */
- usbc_hcchar.s.oddfrm = usb->frame_number&1;
-
- /* Set the number of back to back packets allowed by this endpoint.
- Split transactions interpret "ec" as the number of immediate
- retries of failure. These retries happen too quickly, so we
- disable these entirely for splits */
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count < 1)
- usbc_hcchar.s.ec = 1;
- else if (pipe->multi_count > 3)
- usbc_hcchar.s.ec = 3;
- else
- usbc_hcchar.s.ec = pipe->multi_count;
-
- /* Set the rest of the endpoint specific settings */
- usbc_hcchar.s.devaddr = pipe->device_addr;
- usbc_hcchar.s.eptype = transaction->type;
- usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
- usbc_hcchar.s.epdir = pipe->transfer_dir;
- usbc_hcchar.s.epnum = pipe->endpoint_num;
- usbc_hcchar.s.mps = pipe->max_packet;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- }
-
- /* Do transaction type specific fixups as needed */
- switch (transaction->type) {
- case CVMX_USB_TRANSFER_CONTROL:
- __cvmx_usb_start_channel_control(usb, channel, pipe);
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /* ISO transactions require different PIDs depending on direction
- and how many packets are needed */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- if (pipe->multi_count < 2) /* Need DATA0 */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), cvmx_usbcx_hctsizx_t, pid, 0);
- else /* Need MDATA */
- USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), cvmx_usbcx_hctsizx_t, pid, 3);
- }
- }
- break;
- }
- {
- cvmx_usbcx_hctsizx_t usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
- transaction->xfersize = usbc_hctsiz.s.xfersize;
- transaction->pktcnt = usbc_hctsiz.s.pktcnt;
- }
- /* Remeber when we start a split transaction */
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- usb->active_split = transaction;
- USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), cvmx_usbcx_hccharx_t, chena, 1);
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_fill_tx_fifo(usb, channel);
- CVMX_USB_RETURN_NOTHING();
+ struct cvmx_usb_transaction *transaction = pipe->head;
+
+ /* Make sure all writes to the DMA region get flushed */
+ CVMX_SYNCW;
+
+ /* Attach the channel to the pipe */
+ usb->pipe_for_channel[channel] = pipe;
+ pipe->channel = channel;
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /* Mark this channel as in use */
+ usb->idle_hardware_channels &= ~(1<<channel);
+
+ /* Enable the channel interrupt bits */
+ {
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hcintmskx usbc_hcintmsk;
+ union cvmx_usbcx_haintmsk usbc_haintmsk;
+
+ /* Clear all channel status bits */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index), usbc_hcint.u32);
+
+ usbc_hcintmsk.u32 = 0;
+ usbc_hcintmsk.s.chhltdmsk = 1;
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /* Channels need these extra interrupts when we aren't in DMA mode */
+ usbc_hcintmsk.s.datatglerrmsk = 1;
+ usbc_hcintmsk.s.frmovrunmsk = 1;
+ usbc_hcintmsk.s.bblerrmsk = 1;
+ usbc_hcintmsk.s.xacterrmsk = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /* Splits don't generate xfercompl, so we need ACK and NYET */
+ usbc_hcintmsk.s.nyetmsk = 1;
+ usbc_hcintmsk.s.ackmsk = 1;
+ }
+ usbc_hcintmsk.s.nakmsk = 1;
+ usbc_hcintmsk.s.stallmsk = 1;
+ usbc_hcintmsk.s.xfercomplmsk = 1;
+ }
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), usbc_hcintmsk.u32);
+
+ /* Enable the channel interrupt to propagate */
+ usbc_haintmsk.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index));
+ usbc_haintmsk.s.haintmsk |= 1<<channel;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HAINTMSK(usb->index), usbc_haintmsk.u32);
+ }
+
+ /* Setup the locations the DMA engines use */
+ {
+ uint64_t dma_address = transaction->buffer + transaction->actual_bytes;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ dma_address = transaction->buffer + transaction->iso_packets[0].offset + transaction->actual_bytes;
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_OUTB_CHN0(usb->index) + channel*8, dma_address);
+ __cvmx_usb_write_csr64(usb, CVMX_USBNX_DMA0_INB_CHN0(usb->index) + channel*8, dma_address);
+ }
+
+ /* Setup both the size of the transfer and the SPLIT characteristics */
+ {
+ union cvmx_usbcx_hcspltx usbc_hcsplt = {.u32 = 0};
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = 0};
+ int packets_to_transfer;
+ int bytes_to_transfer = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * ISOCHRONOUS transactions store each individual transfer size
+ * in the packet structure, not the global buffer_length
+ */
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ bytes_to_transfer = transaction->iso_packets[0].length - transaction->actual_bytes;
+
+ /*
+ * We need to do split transactions when we are talking to non
+ * high speed devices that are behind a high speed hub
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * On the start split phase (stage is even) record the
+ * frame number we will need to send the split complete.
+ * We only store the lower two bits since the time ahead
+ * can only be two frames
+ */
+ if ((transaction->stage&1) == 0) {
+ if (transaction->type == CVMX_USB_TRANSFER_BULK)
+ pipe->split_sc_frame = (usb->frame_number + 1) & 0x7f;
+ else
+ pipe->split_sc_frame = (usb->frame_number + 2) & 0x7f;
+ } else
+ pipe->split_sc_frame = -1;
+
+ usbc_hcsplt.s.spltena = 1;
+ usbc_hcsplt.s.hubaddr = pipe->hub_device_addr;
+ usbc_hcsplt.s.prtaddr = pipe->hub_port;
+ usbc_hcsplt.s.compsplt = (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE);
+
+ /*
+ * SPLIT transactions can only ever transmit one data
+ * packet so limit the transfer size to the max packet
+ * size
+ */
+ if (bytes_to_transfer > pipe->max_packet)
+ bytes_to_transfer = pipe->max_packet;
+
+ /*
+ * ISOCHRONOUS OUT splits are unique in that they limit
+ * data transfers to 188 byte chunks representing the
+ * begin/middle/end of the data or all
+ */
+ if (!usbc_hcsplt.s.compsplt &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /*
+ * Clear the split complete frame number as
+ * there isn't going to be a split complete
+ */
+ pipe->split_sc_frame = -1;
+ /*
+ * See if we've started this transfer and sent
+ * data
+ */
+ if (transaction->actual_bytes == 0) {
+ /*
+ * Nothing sent yet, this is either a
+ * begin or the entire payload
+ */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 3; /* Entire payload in one go */
+ else
+ usbc_hcsplt.s.xactpos = 2; /* First part of payload */
+ } else {
+ /*
+ * Continuing the previous data, we must
+ * either be in the middle or at the end
+ */
+ if (bytes_to_transfer <= 188)
+ usbc_hcsplt.s.xactpos = 1; /* End of payload */
+ else
+ usbc_hcsplt.s.xactpos = 0; /* Middle of payload */
+ }
+ /*
+ * Again, the transfer size is limited to 188
+ * bytes
+ */
+ if (bytes_to_transfer > 188)
+ bytes_to_transfer = 188;
+ }
+ }
+
+ /*
+ * Make sure the transfer never exceeds the byte limit of the
+ * hardware. Further bytes will be sent as continued
+ * transactions
+ */
+ if (bytes_to_transfer > MAX_TRANSFER_BYTES) {
+ /*
+ * Round MAX_TRANSFER_BYTES to a multiple of out packet
+ * size
+ */
+ bytes_to_transfer = MAX_TRANSFER_BYTES / pipe->max_packet;
+ bytes_to_transfer *= pipe->max_packet;
+ }
+
+ /*
+ * Calculate the number of packets to transfer. If the length is
+ * zero we still need to transfer one packet
+ */
+ packets_to_transfer = (bytes_to_transfer + pipe->max_packet - 1) / pipe->max_packet;
+ if (packets_to_transfer == 0)
+ packets_to_transfer = 1;
+ else if ((packets_to_transfer > 1) && (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)) {
+ /*
+ * Limit to one packet when not using DMA. Channels must
+ * be restarted between every packet for IN
+ * transactions, so there is no reason to do multiple
+ * packets in a row
+ */
+ packets_to_transfer = 1;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ } else if (packets_to_transfer > MAX_TRANSFER_PACKETS) {
+ /*
+ * Limit the number of packet and data transferred to
+ * what the hardware can handle
+ */
+ packets_to_transfer = MAX_TRANSFER_PACKETS;
+ bytes_to_transfer = packets_to_transfer * pipe->max_packet;
+ }
+
+ usbc_hctsiz.s.xfersize = bytes_to_transfer;
+ usbc_hctsiz.s.pktcnt = packets_to_transfer;
+
+ /* Update the DATA0/DATA1 toggle */
+ usbc_hctsiz.s.pid = __cvmx_usb_get_data_pid(pipe);
+ /*
+ * High speed pipes may need a hardware ping before they start
+ */
+ if (pipe->flags & __CVMX_USB_PIPE_FLAGS_NEED_PING)
+ usbc_hctsiz.s.dopng = 1;
+
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCSPLTX(channel, usb->index), usbc_hcsplt.u32);
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index), usbc_hctsiz.u32);
+ }
+
+ /* Setup the Host Channel Characteristics Register */
+ {
+ union cvmx_usbcx_hccharx usbc_hcchar = {.u32 = 0};
+
+ /*
+ * Set the startframe odd/even properly. This is only used for
+ * periodic
+ */
+ usbc_hcchar.s.oddfrm = usb->frame_number&1;
+
+ /*
+ * Set the number of back to back packets allowed by this
+ * endpoint. Split transactions interpret "ec" as the number of
+ * immediate retries of failure. These retries happen too
+ * quickly, so we disable these entirely for splits
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count < 1)
+ usbc_hcchar.s.ec = 1;
+ else if (pipe->multi_count > 3)
+ usbc_hcchar.s.ec = 3;
+ else
+ usbc_hcchar.s.ec = pipe->multi_count;
+
+ /* Set the rest of the endpoint specific settings */
+ usbc_hcchar.s.devaddr = pipe->device_addr;
+ usbc_hcchar.s.eptype = transaction->type;
+ usbc_hcchar.s.lspddev = (pipe->device_speed == CVMX_USB_SPEED_LOW);
+ usbc_hcchar.s.epdir = pipe->transfer_dir;
+ usbc_hcchar.s.epnum = pipe->endpoint_num;
+ usbc_hcchar.s.mps = pipe->max_packet;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ }
+
+ /* Do transaction type specific fixups as needed */
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ __cvmx_usb_start_channel_control(usb, channel, pipe);
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISO transactions require different PIDs depending on
+ * direction and how many packets are needed
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ if (pipe->multi_count < 2) /* Need DATA0 */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 0);
+ else /* Need MDATA */
+ USB_SET_FIELD32(CVMX_USBCX_HCTSIZX(channel, usb->index), union cvmx_usbcx_hctsizx, pid, 3);
+ }
+ }
+ break;
+ }
+ {
+ union cvmx_usbcx_hctsizx usbc_hctsiz = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index))};
+ transaction->xfersize = usbc_hctsiz.s.xfersize;
+ transaction->pktcnt = usbc_hctsiz.s.pktcnt;
+ }
+ /* Remeber when we start a split transaction */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ usb->active_split = transaction;
+ USB_SET_FIELD32(CVMX_USBCX_HCCHARX(channel, usb->index), union cvmx_usbcx_hccharx, chena, 1);
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_fill_tx_fifo(usb, channel);
+ return;
}
/**
- * @INTERNAL
* Find a pipe that is ready to be scheduled to hardware.
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param list Pipe list to search
- * @param current_frame
- * Frame counter to use as a time reference.
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @list: Pipe list to search
+ * @current_frame:
+ * Frame counter to use as a time reference.
*
- * @return Pipe or NULL if none are ready
+ * Returns: Pipe or NULL if none are ready
*/
-static cvmx_usb_pipe_t *__cvmx_usb_find_ready_pipe(cvmx_usb_internal_state_t *usb, cvmx_usb_pipe_list_t *list, uint64_t current_frame)
+static struct cvmx_usb_pipe *__cvmx_usb_find_ready_pipe(struct cvmx_usb_internal_state *usb, struct cvmx_usb_pipe_list *list, uint64_t current_frame)
{
- cvmx_usb_pipe_t *pipe = list->head;
- while (pipe) {
- if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && pipe->head &&
- (pipe->next_tx_frame <= current_frame) &&
- ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
- (!usb->active_split || (usb->active_split == pipe->head))) {
- CVMX_PREFETCH(pipe, 128);
- CVMX_PREFETCH(pipe->head, 0);
- return pipe;
- }
- pipe = pipe->next;
- }
- return NULL;
+ struct cvmx_usb_pipe *pipe = list->head;
+ while (pipe) {
+ if (!(pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED) && pipe->head &&
+ (pipe->next_tx_frame <= current_frame) &&
+ ((pipe->split_sc_frame == -1) || ((((int)current_frame - (int)pipe->split_sc_frame) & 0x7f) < 0x40)) &&
+ (!usb->active_split || (usb->active_split == pipe->head))) {
+ CVMX_PREFETCH(pipe, 128);
+ CVMX_PREFETCH(pipe->head, 0);
+ return pipe;
+ }
+ pipe = pipe->next;
+ }
+ return NULL;
}
/**
- * @INTERNAL
* Called whenever a pipe might need to be scheduled to the
* hardware.
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param is_sof True if this schedule was called on a SOF interrupt.
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @is_sof: True if this schedule was called on a SOF interrupt.
*/
-static void __cvmx_usb_schedule(cvmx_usb_internal_state_t *usb, int is_sof)
+static void __cvmx_usb_schedule(struct cvmx_usb_internal_state *usb, int is_sof)
{
- int channel;
- cvmx_usb_pipe_t *pipe;
- int need_sof;
- cvmx_usb_transfer_t ttype;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- /* Without DMA we need to be careful to not schedule something at the end of a frame and cause an overrun */
- cvmx_usbcx_hfnum_t hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
- cvmx_usbcx_hfir_t hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
- if (hfnum.s.frrem < hfir.s.frint/4)
- goto done;
- }
-
- while (usb->idle_hardware_channels) {
- /* Find an idle channel */
- CVMX_CLZ(channel, usb->idle_hardware_channels);
- channel = 31 - channel;
- if (cvmx_unlikely(channel > 7)) {
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO))
- cvmx_dprintf("%s: Idle hardware channels has a channel higher than 7. This is wrong\n", __FUNCTION__);
- break;
- }
-
- /* Find a pipe needing service */
- pipe = NULL;
- if (is_sof) {
- /* Only process periodic pipes on SOF interrupts. This way we are
- sure that the periodic data is sent in the beginning of the
- frame */
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
- if (cvmx_likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
- }
- if (cvmx_likely(!pipe)) {
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
- if (cvmx_likely(!pipe))
- pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
- }
- if (!pipe)
- break;
-
- CVMX_USB_LOG_PARAM("%d", channel);
- CVMX_USB_LOG_PARAM("%p", pipe);
-
- if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
- (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS))) {
- cvmx_usb_transaction_t *transaction = pipe->head;
- const cvmx_usb_control_header_t *header = (transaction->control_header) ? cvmx_phys_to_ptr(transaction->control_header) : NULL;
- const char *dir = (pipe->transfer_dir == CVMX_USB_DIRECTION_IN) ? "IN" : "OUT";
- const char *type;
- switch (pipe->transfer_type) {
- case CVMX_USB_TRANSFER_CONTROL:
- type = "SETUP";
- dir = (header->s.request_type & 0x80) ? "IN" : "OUT";
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- type = "ISOCHRONOUS";
- break;
- case CVMX_USB_TRANSFER_BULK:
- type = "BULK";
- break;
- default: /* CVMX_USB_TRANSFER_INTERRUPT */
- type = "INTERRUPT";
- break;
- }
- cvmx_dprintf("%s: Starting pipe %d, transaction %d on channel %d. %s %s len=%d header=0x%llx\n",
- __FUNCTION__, __cvmx_usb_get_pipe_handle(usb, pipe),
- __cvmx_usb_get_submit_handle(usb, transaction),
- channel, type, dir,
- transaction->buffer_length,
- (header) ? (unsigned long long)header->u64 : 0ull);
- }
- __cvmx_usb_start_channel(usb, channel, pipe);
- }
+ int channel;
+ struct cvmx_usb_pipe *pipe;
+ int need_sof;
+ enum cvmx_usb_transfer ttype;
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ /* Without DMA we need to be careful to not schedule something at the end of a frame and cause an overrun */
+ union cvmx_usbcx_hfnum hfnum = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index))};
+ union cvmx_usbcx_hfir hfir = {.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFIR(usb->index))};
+ if (hfnum.s.frrem < hfir.s.frint/4)
+ goto done;
+ }
+
+ while (usb->idle_hardware_channels) {
+ /* Find an idle channel */
+ CVMX_CLZ(channel, usb->idle_hardware_channels);
+ channel = 31 - channel;
+ if (unlikely(channel > 7))
+ break;
+
+ /* Find a pipe needing service */
+ pipe = NULL;
+ if (is_sof) {
+ /*
+ * Only process periodic pipes on SOF interrupts. This
+ * way we are sure that the periodic data is sent in the
+ * beginning of the frame
+ */
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_ISOCHRONOUS, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_INTERRUPT, usb->frame_number);
+ }
+ if (likely(!pipe)) {
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_CONTROL, usb->frame_number);
+ if (likely(!pipe))
+ pipe = __cvmx_usb_find_ready_pipe(usb, usb->active_pipes + CVMX_USB_TRANSFER_BULK, usb->frame_number);
+ }
+ if (!pipe)
+ break;
+
+ __cvmx_usb_start_channel(usb, channel, pipe);
+ }
done:
- /* Only enable SOF interrupts when we have transactions pending in the
- future that might need to be scheduled */
- need_sof = 0;
- for (ttype=CVMX_USB_TRANSFER_CONTROL; ttype<=CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
- pipe = usb->active_pipes[ttype].head;
- while (pipe) {
- if (pipe->next_tx_frame > usb->frame_number) {
- need_sof = 1;
- break;
- }
- pipe=pipe->next;
- }
- }
- USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), cvmx_usbcx_gintmsk_t, sofmsk, need_sof);
- CVMX_USB_RETURN_NOTHING();
+ /*
+ * Only enable SOF interrupts when we have transactions pending in the
+ * future that might need to be scheduled
+ */
+ need_sof = 0;
+ for (ttype = CVMX_USB_TRANSFER_CONTROL; ttype <= CVMX_USB_TRANSFER_INTERRUPT; ttype++) {
+ pipe = usb->active_pipes[ttype].head;
+ while (pipe) {
+ if (pipe->next_tx_frame > usb->frame_number) {
+ need_sof = 1;
+ break;
+ }
+ pipe = pipe->next;
+ }
+ }
+ USB_SET_FIELD32(CVMX_USBCX_GINTMSK(usb->index), union cvmx_usbcx_gintmsk, sofmsk, need_sof);
+ return;
}
/**
- * @INTERNAL
* Call a user's callback for a specific reason.
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe Pipe the callback is for or NULL
- * @param transaction
- * Transaction the callback is for or NULL
- * @param reason Reason this callback is being called
- * @param complete_code
- * Completion code for the transaction, if any
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe the callback is for or NULL
+ * @transaction:
+ * Transaction the callback is for or NULL
+ * @reason: Reason this callback is being called
+ * @complete_code:
+ * Completion code for the transaction, if any
*/
-static void __cvmx_usb_perform_callback(cvmx_usb_internal_state_t *usb,
- cvmx_usb_pipe_t *pipe,
- cvmx_usb_transaction_t *transaction,
- cvmx_usb_callback_t reason,
- cvmx_usb_complete_t complete_code)
+static void __cvmx_usb_perform_callback(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete complete_code)
{
- cvmx_usb_callback_func_t callback = usb->callback[reason];
- void *user_data = usb->callback_data[reason];
- int submit_handle = -1;
- int pipe_handle = -1;
- int bytes_transferred = 0;
-
- if (pipe)
- pipe_handle = __cvmx_usb_get_pipe_handle(usb, pipe);
-
- if (transaction) {
- submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
- bytes_transferred = transaction->actual_bytes;
- /* Transactions are allowed to override the default callback */
- if ((reason == CVMX_USB_CALLBACK_TRANSFER_COMPLETE) && transaction->callback) {
- callback = transaction->callback;
- user_data = transaction->callback_data;
- }
- }
-
- if (!callback)
- return;
-
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS))
- cvmx_dprintf("%*s%s: calling callback %p(usb=%p, complete_code=%s, "
- "pipe_handle=%d, submit_handle=%d, bytes_transferred=%d, user_data=%p);\n",
- 2*usb->indent, "", __FUNCTION__, callback, usb,
- __cvmx_usb_complete_to_string(complete_code),
- pipe_handle, submit_handle, bytes_transferred, user_data);
-
- callback((cvmx_usb_state_t *)usb, reason, complete_code, pipe_handle, submit_handle,
- bytes_transferred, user_data);
-
- if (cvmx_unlikely(usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS))
- cvmx_dprintf("%*s%s: callback %p complete\n", 2*usb->indent, "",
- __FUNCTION__, callback);
+ cvmx_usb_callback_func_t callback = usb->callback[reason];
+ void *user_data = usb->callback_data[reason];
+ int submit_handle = -1;
+ int pipe_handle = -1;
+ int bytes_transferred = 0;
+
+ if (pipe)
+ pipe_handle = __cvmx_usb_get_pipe_handle(usb, pipe);
+
+ if (transaction) {
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+ bytes_transferred = transaction->actual_bytes;
+ /* Transactions are allowed to override the default callback */
+ if ((reason == CVMX_USB_CALLBACK_TRANSFER_COMPLETE) && transaction->callback) {
+ callback = transaction->callback;
+ user_data = transaction->callback_data;
+ }
+ }
+
+ if (!callback)
+ return;
+
+ callback((struct cvmx_usb_state *)usb, reason, complete_code, pipe_handle, submit_handle,
+ bytes_transferred, user_data);
}
/**
- * @INTERNAL
* Signal the completion of a transaction and free it. The
* transaction will be removed from the pipe transaction list.
*
- * @param usb USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe Pipe the transaction is on
- * @param transaction
- * Transaction that completed
- * @param complete_code
- * Completion code
+ * @usb: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe: Pipe the transaction is on
+ * @transaction:
+ * Transaction that completed
+ * @complete_code:
+ * Completion code
*/
-static void __cvmx_usb_perform_complete(cvmx_usb_internal_state_t * usb,
- cvmx_usb_pipe_t *pipe,
- cvmx_usb_transaction_t *transaction,
- cvmx_usb_complete_t complete_code)
+static void __cvmx_usb_perform_complete(struct cvmx_usb_internal_state *usb,
+ struct cvmx_usb_pipe *pipe,
+ struct cvmx_usb_transaction *transaction,
+ enum cvmx_usb_complete complete_code)
{
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%p", pipe);
- CVMX_USB_LOG_PARAM("%p", transaction);
- CVMX_USB_LOG_PARAM("%d", complete_code);
-
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
-
- /* Isochronous transactions need extra processing as they might not be done
- after a single data transfer */
- if (cvmx_unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
- /* Update the number of bytes transferred in this ISO packet */
- transaction->iso_packets[0].length = transaction->actual_bytes;
- transaction->iso_packets[0].status = complete_code;
-
- /* If there are more ISOs pending and we succeeded, schedule the next
- one */
- if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
- transaction->actual_bytes = 0; /* No bytes transferred for this packet as of yet */
- transaction->iso_number_packets--; /* One less ISO waiting to transfer */
- transaction->iso_packets++; /* Increment to the next location in our packet array */
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- goto done;
- }
- }
-
- /* Remove the transaction from the pipe list */
- if (transaction->next)
- transaction->next->prev = transaction->prev;
- else
- pipe->tail = transaction->prev;
- if (transaction->prev)
- transaction->prev->next = transaction->next;
- else
- pipe->head = transaction->next;
- if (!pipe->head) {
- __cvmx_usb_remove_pipe(usb->active_pipes + pipe->transfer_type, pipe);
- __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
-
- }
- __cvmx_usb_perform_callback(usb, pipe, transaction,
- CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
- complete_code);
- __cvmx_usb_free_transaction(usb, transaction);
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+
+ /*
+ * Isochronous transactions need extra processing as they might not be
+ * done after a single data transfer
+ */
+ if (unlikely(transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)) {
+ /* Update the number of bytes transferred in this ISO packet */
+ transaction->iso_packets[0].length = transaction->actual_bytes;
+ transaction->iso_packets[0].status = complete_code;
+
+ /*
+ * If there are more ISOs pending and we succeeded, schedule the
+ * next one
+ */
+ if ((transaction->iso_number_packets > 1) && (complete_code == CVMX_USB_COMPLETE_SUCCESS)) {
+ transaction->actual_bytes = 0; /* No bytes transferred for this packet as of yet */
+ transaction->iso_number_packets--; /* One less ISO waiting to transfer */
+ transaction->iso_packets++; /* Increment to the next location in our packet array */
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ goto done;
+ }
+ }
+
+ /* Remove the transaction from the pipe list */
+ if (transaction->next)
+ transaction->next->prev = transaction->prev;
+ else
+ pipe->tail = transaction->prev;
+ if (transaction->prev)
+ transaction->prev->next = transaction->next;
+ else
+ pipe->head = transaction->next;
+ if (!pipe->head) {
+ __cvmx_usb_remove_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ __cvmx_usb_append_pipe(&usb->idle_pipes, pipe);
+
+ }
+ __cvmx_usb_perform_callback(usb, pipe, transaction,
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ complete_code);
+ __cvmx_usb_free_transaction(usb, transaction);
done:
- CVMX_USB_RETURN_NOTHING();
+ return;
}
/**
- * @INTERNAL
* Submit a usb transaction to a pipe. Called for all types
* of transactions.
*
- * @param usb
- * @param pipe_handle
- * Which pipe to submit to. Will be validated in this function.
- * @param type Transaction type
- * @param flags Flags for the transaction
- * @param buffer User buffer for the transaction
- * @param buffer_length
- * User buffer's length in bytes
- * @param control_header
- * For control transactions, the 8 byte standard header
- * @param iso_start_frame
- * For ISO transactions, the start frame
- * @param iso_number_packets
- * For ISO, the number of packet in the transaction.
- * @param iso_packets
- * A description of each ISO packet
- * @param callback User callback to call when the transaction completes
- * @param user_data User's data for the callback
- *
- * @return Submit handle or negative on failure. Matches the result
- * in the external API.
+ * @usb:
+ * @pipe_handle:
+ * Which pipe to submit to. Will be validated in this function.
+ * @type: Transaction type
+ * @flags: Flags for the transaction
+ * @buffer: User buffer for the transaction
+ * @buffer_length:
+ * User buffer's length in bytes
+ * @control_header:
+ * For control transactions, the 8 byte standard header
+ * @iso_start_frame:
+ * For ISO transactions, the start frame
+ * @iso_number_packets:
+ * For ISO, the number of packet in the transaction.
+ * @iso_packets:
+ * A description of each ISO packet
+ * @callback: User callback to call when the transaction completes
+ * @user_data: User's data for the callback
+ *
+ * Returns: Submit handle or negative on failure. Matches the result
+ * in the external API.
*/
-static int __cvmx_usb_submit_transaction(cvmx_usb_internal_state_t *usb,
- int pipe_handle,
- cvmx_usb_transfer_t type,
- int flags,
- uint64_t buffer,
- int buffer_length,
- uint64_t control_header,
- int iso_start_frame,
- int iso_number_packets,
- cvmx_usb_iso_packet_t *iso_packets,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+static int __cvmx_usb_submit_transaction(struct cvmx_usb_internal_state *usb,
+ int pipe_handle,
+ enum cvmx_usb_transfer type,
+ int flags,
+ uint64_t buffer,
+ int buffer_length,
+ uint64_t control_header,
+ int iso_start_frame,
+ int iso_number_packets,
+ struct cvmx_usb_iso_packet *iso_packets,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- int submit_handle;
- cvmx_usb_transaction_t *transaction;
- cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
-
- CVMX_USB_LOG_CALLED();
- if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- /* Fail if the pipe isn't open */
- if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(pipe->transfer_type != type))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- transaction = __cvmx_usb_alloc_transaction(usb);
- if (cvmx_unlikely(!transaction))
- CVMX_USB_RETURN(CVMX_USB_NO_MEMORY);
-
- transaction->type = type;
- transaction->flags |= flags;
- transaction->buffer = buffer;
- transaction->buffer_length = buffer_length;
- transaction->control_header = control_header;
- transaction->iso_start_frame = iso_start_frame; // FIXME: This is not used, implement it
- transaction->iso_number_packets = iso_number_packets;
- transaction->iso_packets = iso_packets;
- transaction->callback = callback;
- transaction->callback_data = user_data;
- if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
- transaction->stage = CVMX_USB_STAGE_SETUP;
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
-
- transaction->next = NULL;
- if (pipe->tail) {
- transaction->prev = pipe->tail;
- transaction->prev->next = transaction;
- }
- else {
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- transaction->prev = NULL;
- pipe->head = transaction;
- __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
- __cvmx_usb_append_pipe(usb->active_pipes + pipe->transfer_type, pipe);
- }
- pipe->tail = transaction;
-
- submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
-
- /* We may need to schedule the pipe if this was the head of the pipe */
- if (!transaction->prev)
- __cvmx_usb_schedule(usb, 0);
-
- CVMX_USB_RETURN(submit_handle);
+ int submit_handle;
+ struct cvmx_usb_transaction *transaction;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+ if (unlikely(pipe->transfer_type != type))
+ return -EINVAL;
+
+ transaction = __cvmx_usb_alloc_transaction(usb);
+ if (unlikely(!transaction))
+ return -ENOMEM;
+
+ transaction->type = type;
+ transaction->flags |= flags;
+ transaction->buffer = buffer;
+ transaction->buffer_length = buffer_length;
+ transaction->control_header = control_header;
+ transaction->iso_start_frame = iso_start_frame; // FIXME: This is not used, implement it
+ transaction->iso_number_packets = iso_number_packets;
+ transaction->iso_packets = iso_packets;
+ transaction->callback = callback;
+ transaction->callback_data = user_data;
+ if (transaction->type == CVMX_USB_TRANSFER_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_SETUP;
+ else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+
+ transaction->next = NULL;
+ if (pipe->tail) {
+ transaction->prev = pipe->tail;
+ transaction->prev->next = transaction;
+ } else {
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ transaction->prev = NULL;
+ pipe->head = transaction;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(usb->active_pipes + pipe->transfer_type, pipe);
+ }
+ pipe->tail = transaction;
+
+ submit_handle = __cvmx_usb_get_submit_handle(usb, transaction);
+
+ /* We may need to schedule the pipe if this was the head of the pipe */
+ if (!transaction->prev)
+ __cvmx_usb_schedule(usb, 0);
+
+ return submit_handle;
}
/**
* Call to submit a USB Bulk transfer to a pipe.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
*/
-int cvmx_usb_submit_bulk(cvmx_usb_state_t *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- int submit_handle;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
- CVMX_USB_LOG_PARAM("%d", buffer_length);
-
- /* Pipe handle checking is done later in a common place */
- if (cvmx_unlikely(!buffer))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(buffer_length < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_BULK,
- 0, /* flags */
- buffer,
- buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- CVMX_USB_RETURN(submit_handle);
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_BULK,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
}
/**
* Call to submit a USB Interrupt transfer to a pipe.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
*/
-int cvmx_usb_submit_interrupt(cvmx_usb_state_t *state, int pipe_handle,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- int submit_handle;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
- CVMX_USB_LOG_PARAM("%d", buffer_length);
-
- /* Pipe handle checking is done later in a common place */
- if (cvmx_unlikely(!buffer))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(buffer_length < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_INTERRUPT,
- 0, /* flags */
- buffer,
- buffer_length,
- 0, /* control_header */
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- CVMX_USB_RETURN(submit_handle);
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_INTERRUPT,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
}
/**
* Call to submit a USB Control transfer to a pipe.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param control_header
- * USB 8 byte control header physical address.
- * Note that this is NOT A POINTER, but the
- * full 64bit physical address of the buffer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @control_header:
+ * USB 8 byte control header physical address.
+ * Note that this is NOT A POINTER, but the
+ * full 64bit physical address of the buffer.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
*/
-int cvmx_usb_submit_control(cvmx_usb_state_t *state, int pipe_handle,
- uint64_t control_header,
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
+ uint64_t control_header,
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- int submit_handle;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(control_header);
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)control_header);
- CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
- CVMX_USB_LOG_PARAM("%d", buffer_length);
-
- /* Pipe handle checking is done later in a common place */
- if (cvmx_unlikely(!control_header))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- /* Some drivers send a buffer with a zero length. God only knows why */
- if (cvmx_unlikely(buffer && (buffer_length < 0)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(!buffer && (buffer_length != 0)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if ((header->s.request_type & 0x80) == 0)
- buffer_length = cvmx_le16_to_cpu(header->s.length);
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_CONTROL,
- 0, /* flags */
- buffer,
- buffer_length,
- control_header,
- 0, /* iso_start_frame */
- 0, /* iso_number_packets */
- NULL, /* iso_packets */
- callback,
- user_data);
- CVMX_USB_RETURN(submit_handle);
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(control_header);
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(!control_header))
+ return -EINVAL;
+ /* Some drivers send a buffer with a zero length. God only knows why */
+ if (unlikely(buffer && (buffer_length < 0)))
+ return -EINVAL;
+ if (unlikely(!buffer && (buffer_length != 0)))
+ return -EINVAL;
+ if ((header->s.request_type & 0x80) == 0)
+ buffer_length = le16_to_cpu(header->s.length);
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_CONTROL,
+ 0, /* flags */
+ buffer,
+ buffer_length,
+ control_header,
+ 0, /* iso_start_frame */
+ 0, /* iso_number_packets */
+ NULL, /* iso_packets */
+ callback,
+ user_data);
+ return submit_handle;
}
/**
* Call to submit a USB Isochronous transfer to a pipe.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param start_frame
- * Number of frames into the future to schedule
- * this transaction.
- * @param flags Flags to control the transfer. See
- * cvmx_usb_isochronous_flags_t for the flag
- * definitions.
- * @param number_packets
- * Number of sequential packets to transfer.
- * "packets" is a pointer to an array of this
- * many packet structures.
- * @param packets Description of each transfer packet as
- * defined by cvmx_usb_iso_packet_t. The array
- * pointed to here must stay valid until the
- * complete callback is called.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Handle to the pipe for the transfer.
+ * @start_frame:
+ * Number of frames into the future to schedule
+ * this transaction.
+ * @flags: Flags to control the transfer. See
+ * enum cvmx_usb_isochronous_flags for the flag
+ * definitions.
+ * @number_packets:
+ * Number of sequential packets to transfer.
+ * "packets" is a pointer to an array of this
+ * many packet structures.
+ * @packets: Description of each transfer packet as
+ * defined by struct cvmx_usb_iso_packet. The array
+ * pointed to here must stay valid until the
+ * complete callback is called.
+ * @buffer: Physical address of the data buffer in
+ * memory. Note that this is NOT A POINTER, but
+ * the full 64bit physical address of the
+ * buffer. This may be zero if buffer_length is
+ * zero.
+ * @buffer_length:
+ * Length of buffer in bytes.
+ * @callback: Function to call when this transaction
+ * completes. If the return value of this
+ * function isn't an error, then this function
+ * is guaranteed to be called when the
+ * transaction completes. If this parameter is
+ * NULL, then the generic callback registered
+ * through cvmx_usb_register_callback is
+ * called. If both are NULL, then there is no
+ * way to know when a transaction completes.
+ * @user_data: User supplied data returned when the
+ * callback is called. This is only used if
+ * callback in not NULL.
+ *
+ * Returns: A submitted transaction handle or negative on
+ * failure. Negative values are error codes.
*/
-int cvmx_usb_submit_isochronous(cvmx_usb_state_t *state, int pipe_handle,
- int start_frame, int flags,
- int number_packets,
- cvmx_usb_iso_packet_t packets[],
- uint64_t buffer, int buffer_length,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
+ int start_frame, int flags,
+ int number_packets,
+ struct cvmx_usb_iso_packet packets[],
+ uint64_t buffer, int buffer_length,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- int submit_handle;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- CVMX_USB_LOG_PARAM("%d", start_frame);
- CVMX_USB_LOG_PARAM("0x%x", flags);
- CVMX_USB_LOG_PARAM("%d", number_packets);
- CVMX_USB_LOG_PARAM("%p", packets);
- CVMX_USB_LOG_PARAM("0x%llx", (unsigned long long)buffer);
- CVMX_USB_LOG_PARAM("%d", buffer_length);
-
- /* Pipe handle checking is done later in a common place */
- if (cvmx_unlikely(start_frame < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(flags & ~(CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT | CVMX_USB_ISOCHRONOUS_FLAGS_ASAP)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(number_packets < 1))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(!packets))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(!buffer))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(buffer_length < 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
- CVMX_USB_TRANSFER_ISOCHRONOUS,
- flags,
- buffer,
- buffer_length,
- 0, /* control_header */
- start_frame,
- number_packets,
- packets,
- callback,
- user_data);
- CVMX_USB_RETURN(submit_handle);
+ int submit_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ /* Pipe handle checking is done later in a common place */
+ if (unlikely(start_frame < 0))
+ return -EINVAL;
+ if (unlikely(flags & ~(CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT | CVMX_USB_ISOCHRONOUS_FLAGS_ASAP)))
+ return -EINVAL;
+ if (unlikely(number_packets < 1))
+ return -EINVAL;
+ if (unlikely(!packets))
+ return -EINVAL;
+ if (unlikely(!buffer))
+ return -EINVAL;
+ if (unlikely(buffer_length < 0))
+ return -EINVAL;
+
+ submit_handle = __cvmx_usb_submit_transaction(usb, pipe_handle,
+ CVMX_USB_TRANSFER_ISOCHRONOUS,
+ flags,
+ buffer,
+ buffer_length,
+ 0, /* control_header */
+ start_frame,
+ number_packets,
+ packets,
+ callback,
+ user_data);
+ return submit_handle;
}
@@ -2525,63 +2440,58 @@ int cvmx_usb_submit_isochronous(cvmx_usb_state_t *state, int pipe_handle,
* a frame or two for the cvmx_usb_poll() function to call the
* associated callback.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to cancel requests in.
- * @param submit_handle
- * Handle to transaction to cancel, returned by the submit function.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to cancel requests in.
+ * @submit_handle:
+ * Handle to transaction to cancel, returned by the submit function.
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_cancel(cvmx_usb_state_t *state, int pipe_handle,
- int submit_handle)
+int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle, int submit_handle)
{
- cvmx_usb_transaction_t *transaction;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- CVMX_USB_LOG_PARAM("%d", submit_handle);
-
- if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely((submit_handle < 0) || (submit_handle >= MAX_TRANSACTIONS)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- /* Fail if the pipe isn't open */
- if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- transaction = usb->transaction + submit_handle;
-
- /* Fail if this transaction already completed */
- if (cvmx_unlikely((transaction->flags & __CVMX_USB_TRANSACTION_FLAGS_IN_USE) == 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- /* If the transaction is the HEAD of the queue and scheduled. We need to
- treat it special */
- if ((pipe->head == transaction) &&
- (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
- cvmx_usbcx_hccharx_t usbc_hcchar;
-
- usb->pipe_for_channel[pipe->channel] = NULL;
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- CVMX_SYNCW;
-
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
- /* If the channel isn't enabled then the transaction already completed */
- if (usbc_hcchar.s.chena) {
- usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
- }
- }
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ struct cvmx_usb_transaction *transaction;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+ if (unlikely((submit_handle < 0) || (submit_handle >= MAX_TRANSACTIONS)))
+ return -EINVAL;
+
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+
+ transaction = usb->transaction + submit_handle;
+
+ /* Fail if this transaction already completed */
+ if (unlikely((transaction->flags & __CVMX_USB_TRANSACTION_FLAGS_IN_USE) == 0))
+ return -EINVAL;
+
+ /*
+ * If the transaction is the HEAD of the queue and scheduled. We need to
+ * treat it special
+ */
+ if ((pipe->head == transaction) &&
+ (pipe->flags & __CVMX_USB_PIPE_FLAGS_SCHEDULED)) {
+ union cvmx_usbcx_hccharx usbc_hcchar;
+
+ usb->pipe_for_channel[pipe->channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ CVMX_SYNCW;
+
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index));
+ /* If the channel isn't enabled then the transaction already completed */
+ if (usbc_hcchar.s.chena) {
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(pipe->channel, usb->index), usbc_hcchar.u32);
+ }
+ }
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_CANCEL);
+ return 0;
}
@@ -2589,112 +2499,98 @@ cvmx_usb_status_t cvmx_usb_cancel(cvmx_usb_state_t *state, int pipe_handle,
* Cancel all outstanding requests in a pipe. Logically all this
* does is call cvmx_usb_cancel() in a loop.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to cancel requests in.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to cancel requests in.
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_cancel_all(cvmx_usb_state_t *state, int pipe_handle)
+int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle)
{
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- /* Fail if the pipe isn't open */
- if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- /* Simply loop through and attempt to cancel each transaction */
- while (pipe->head) {
- cvmx_usb_status_t result = cvmx_usb_cancel(state, pipe_handle,
- __cvmx_usb_get_submit_handle(usb, pipe->head));
- if (cvmx_unlikely(result != CVMX_USB_SUCCESS))
- CVMX_USB_RETURN(result);
- }
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
+
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
+
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
+
+ /* Simply loop through and attempt to cancel each transaction */
+ while (pipe->head) {
+ int result = cvmx_usb_cancel(state, pipe_handle,
+ __cvmx_usb_get_submit_handle(usb, pipe->head));
+ if (unlikely(result != 0))
+ return result;
+ }
+ return 0;
}
/**
* Close a pipe created with cvmx_usb_open_pipe().
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to close.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @pipe_handle:
+ * Pipe handle to close.
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t. CVMX_USB_BUSY is returned if the
- * pipe has outstanding transfers.
+ * Returns: 0 or a negative error code. EBUSY is returned if the pipe has
+ * outstanding transfers.
*/
-cvmx_usb_status_t cvmx_usb_close_pipe(cvmx_usb_state_t *state, int pipe_handle)
+int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle)
{
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- cvmx_usb_pipe_t *pipe = usb->pipe + pipe_handle;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ struct cvmx_usb_pipe *pipe = usb->pipe + pipe_handle;
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", pipe_handle);
- if (cvmx_unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ if (unlikely((pipe_handle < 0) || (pipe_handle >= MAX_PIPES)))
+ return -EINVAL;
- /* Fail if the pipe isn't open */
- if (cvmx_unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
+ /* Fail if the pipe isn't open */
+ if (unlikely((pipe->flags & __CVMX_USB_PIPE_FLAGS_OPEN) == 0))
+ return -EINVAL;
- /* Fail if the pipe has pending transactions */
- if (cvmx_unlikely(pipe->head))
- CVMX_USB_RETURN(CVMX_USB_BUSY);
+ /* Fail if the pipe has pending transactions */
+ if (unlikely(pipe->head))
+ return -EBUSY;
- pipe->flags = 0;
- __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
- __cvmx_usb_append_pipe(&usb->free_pipes, pipe);
+ pipe->flags = 0;
+ __cvmx_usb_remove_pipe(&usb->idle_pipes, pipe);
+ __cvmx_usb_append_pipe(&usb->free_pipes, pipe);
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ return 0;
}
/**
* Register a function to be called when various USB events occur.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param reason Which event to register for.
- * @param callback Function to call when the event occurs.
- * @param user_data User data parameter to the function.
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
+ * @reason: Which event to register for.
+ * @callback: Function to call when the event occurs.
+ * @user_data: User data parameter to the function.
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_register_callback(cvmx_usb_state_t *state,
- cvmx_usb_callback_t reason,
- cvmx_usb_callback_func_t callback,
- void *user_data)
+int cvmx_usb_register_callback(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data)
{
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
- CVMX_USB_LOG_PARAM("%d", reason);
- CVMX_USB_LOG_PARAM("%p", callback);
- CVMX_USB_LOG_PARAM("%p", user_data);
- if (cvmx_unlikely(reason >= __CVMX_USB_CALLBACK_END))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
- if (cvmx_unlikely(!callback))
- CVMX_USB_RETURN(CVMX_USB_INVALID_PARAM);
-
- usb->callback[reason] = callback;
- usb->callback_data[reason] = user_data;
-
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ if (unlikely(reason >= __CVMX_USB_CALLBACK_END))
+ return -EINVAL;
+ if (unlikely(!callback))
+ return -EINVAL;
+
+ usb->callback[reason] = callback;
+ usb->callback_data[reason] = user_data;
+
+ return 0;
}
@@ -2702,428 +2598,457 @@ cvmx_usb_status_t cvmx_usb_register_callback(cvmx_usb_state_t *state,
* Get the current USB protocol level frame number. The frame
* number is always in the range of 0-0x7ff.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return USB frame number
+ * Returns: USB frame number
*/
-int cvmx_usb_get_frame_number(cvmx_usb_state_t *state)
+int cvmx_usb_get_frame_number(struct cvmx_usb_state *state)
{
- int frame_number;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
- cvmx_usbcx_hfnum_t usbc_hfnum;
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
+ int frame_number;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+ union cvmx_usbcx_hfnum usbc_hfnum;
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- frame_number = usbc_hfnum.s.frnum;
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ frame_number = usbc_hfnum.s.frnum;
- CVMX_USB_RETURN(frame_number);
+ return frame_number;
}
/**
- * @INTERNAL
* Poll a channel for status
*
- * @param usb USB device
- * @param channel Channel to poll
+ * @usb: USB device
+ * @channel: Channel to poll
*
- * @return Zero on success
+ * Returns: Zero on success
*/
-static int __cvmx_usb_poll_channel(cvmx_usb_internal_state_t *usb, int channel)
+static int __cvmx_usb_poll_channel(struct cvmx_usb_internal_state *usb, int channel)
{
- cvmx_usbcx_hcintx_t usbc_hcint;
- cvmx_usbcx_hctsizx_t usbc_hctsiz;
- cvmx_usbcx_hccharx_t usbc_hcchar;
- cvmx_usb_pipe_t *pipe;
- cvmx_usb_transaction_t *transaction;
- int bytes_this_transfer;
- int bytes_in_last_packet;
- int packets_processed;
- int buffer_space_left;
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", usb);
- CVMX_USB_LOG_PARAM("%d", channel);
-
- /* Read the interrupt status bits for the channel */
- usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
-
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
-
- if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
- /* There seems to be a bug in CN31XX which can cause interrupt
- IN transfers to get stuck until we do a write of HCCHARX
- without changing things */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- CVMX_USB_RETURN(0);
- }
-
- /* In non DMA mode the channels don't halt themselves. We need to
- manually disable channels that are left running */
- if (!usbc_hcint.s.chhltd) {
- if (usbc_hcchar.s.chena) {
- cvmx_usbcx_hcintmskx_t hcintmsk;
- /* Disable all interrupts except CHHLTD */
- hcintmsk.u32 = 0;
- hcintmsk.s.chhltdmsk = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
- usbc_hcchar.s.chdis = 1;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
- CVMX_USB_RETURN(0);
- }
- else if (usbc_hcint.s.xfercompl) {
- /* Successful IN/OUT with transfer complete. Channel halt isn't needed */
- }
- else {
- cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
- CVMX_USB_RETURN(0);
- }
- }
- }
- else {
- /* There is are no interrupts that we need to process when the channel is
- still running */
- if (!usbc_hcint.s.chhltd)
- CVMX_USB_RETURN(0);
- }
-
- /* Disable the channel interrupts now that it is done */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
- usb->idle_hardware_channels |= (1<<channel);
-
- /* Make sure this channel is tied to a valid pipe */
- pipe = usb->pipe_for_channel[channel];
- CVMX_PREFETCH(pipe, 0);
- CVMX_PREFETCH(pipe, 128);
- if (!pipe)
- CVMX_USB_RETURN(0);
- transaction = pipe->head;
- CVMX_PREFETCH0(transaction);
-
- /* Disconnect this pipe from the HW channel. Later the schedule function will
- figure out which pipe needs to go */
- usb->pipe_for_channel[channel] = NULL;
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
-
- /* Read the channel config info so we can figure out how much data
- transfered */
- usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
- usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
-
- /* Calculating the number of bytes successfully transferred is dependent on
- the transfer direction */
- packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
- if (usbc_hcchar.s.epdir) {
- /* IN transactions are easy. For every byte received the hardware
- decrements xfersize. All we need to do is subtract the current
- value of xfersize from its starting value and we know how many
- bytes were written to the buffer */
- bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
- }
- else {
- /* OUT transaction don't decrement xfersize. Instead pktcnt is
- decremented on every successful packet send. The hardware does
- this when it receives an ACK, or NYET. If it doesn't
- receive one of these responses pktcnt doesn't change */
- bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
- /* The last packet may not be a full transfer if we didn't have
- enough data */
- if (bytes_this_transfer > transaction->xfersize)
- bytes_this_transfer = transaction->xfersize;
- }
- /* Figure out how many bytes were in the last packet of the transfer */
- if (packets_processed)
- bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
- else
- bytes_in_last_packet = bytes_this_transfer;
-
- /* As a special case, setup transactions output the setup header, not
- the user's data. For this reason we don't count setup data as bytes
- transferred */
- if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
- (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
- bytes_this_transfer = 0;
-
- /* Optional debug output */
- if (cvmx_unlikely((usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS) ||
- (pipe->flags & CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS)))
- cvmx_dprintf("%s: Channel %d halted. Pipe %d transaction %d stage %d bytes=%d\n",
- __FUNCTION__, channel,
- __cvmx_usb_get_pipe_handle(usb, pipe),
- __cvmx_usb_get_submit_handle(usb, transaction),
- transaction->stage, bytes_this_transfer);
-
- /* Add the bytes transferred to the running total. It is important that
- bytes_this_transfer doesn't count any data that needs to be
- retransmitted */
- transaction->actual_bytes += bytes_this_transfer;
- if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
- buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
- else
- buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
-
- /* We need to remember the PID toggle state for the next transaction. The
- hardware already updated it for the next transaction */
- pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
-
- /* For high speed bulk out, assume the next transaction will need to do a
- ping before proceeding. If this isn't true the ACK processing below
- will clear this flag */
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- if (usbc_hcint.s.stall) {
- /* STALL as a response means this transaction cannot be completed
- because the device can't process transactions. Tell the user. Any
- data that was transferred will be counted on the actual bytes
- transferred */
- pipe->pid_toggle = 0;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
- }
- else if (usbc_hcint.s.xacterr) {
- /* We know at least one packet worked if we get a ACK or NAK. Reset the retry counter */
- if (usbc_hcint.s.nak || usbc_hcint.s.ack)
- transaction->retries = 0;
- transaction->retries++;
- if (transaction->retries > MAX_RETRIES) {
- /* XactErr as a response means the device signaled something wrong with
- the transfer. For example, PID toggle errors cause these */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
- }
- else {
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /* Rewind to the beginning of the transaction by anding off the
- split complete bit */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- }
- }
- else if (usbc_hcint.s.bblerr)
- {
- /* Babble Error (BblErr) */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
- }
- else if (usbc_hcint.s.datatglerr)
- {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- }
- else if (usbc_hcint.s.nyet) {
- /* NYET as a response is only allowed in three cases: as a response to
- a ping, as a response to a split transaction, and as a response to
- a bulk out. The ping case is handled by hardware, so we only have
- splits and bulk out */
- if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->retries = 0;
- /* If there is more data to go then we need to try again. Otherwise
- this transaction is complete */
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- else {
- /* Split transactions retry the split complete 4 times then rewind
- to the start split and do the entire transactions again */
- transaction->retries++;
- if ((transaction->retries & 0x3) == 0) {
- /* Rewind to the beginning of the transaction by anding off the
- split complete bit */
- transaction->stage &= ~1;
- pipe->split_sc_frame = -1;
- }
- }
- }
- else if (usbc_hcint.s.ack) {
- transaction->retries = 0;
- /* The ACK bit can only be checked after the other error bits. This is
- because a multi packet transfer may succeed in a number of packets
- and then get a different response on the last packet. In this case
- both ACK and the last response bit will be set. If none of the
- other response bits is set, then the last packet must have been an
- ACK */
-
- /* Since we got an ACK, we know we don't need to do a ping on this
- pipe */
- pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
-
- switch (transaction->type)
- {
- case CVMX_USB_TRANSFER_CONTROL:
- switch (transaction->stage)
- {
- case CVMX_USB_STAGE_NON_CONTROL:
- case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
- /* This should be impossible */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
- break;
- case CVMX_USB_STAGE_SETUP:
- pipe->pid_toggle = 1;
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
- else {
- cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
- if (header->s.length)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
- {
- cvmx_usb_control_header_t *header = cvmx_phys_to_ptr(transaction->control_header);
- if (header->s.length)
- transaction->stage = CVMX_USB_STAGE_DATA;
- else
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA:
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
- /* For setup OUT data that are splits, the hardware
- doesn't appear to count transferred data. Here
- we manually update the data transferred */
- if (!usbc_hcchar.s.epdir) {
- if (buffer_space_left < pipe->max_packet)
- transaction->actual_bytes += buffer_space_left;
- else
- transaction->actual_bytes += pipe->max_packet;
- }
- }
- else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- break;
- case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->pid_toggle = 1;
- transaction->stage = CVMX_USB_STAGE_STATUS;
- }
- else {
- transaction->stage = CVMX_USB_STAGE_DATA;
- }
- break;
- case CVMX_USB_STAGE_STATUS:
- if (__cvmx_usb_pipe_needs_split(usb, pipe))
- transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
- else
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- break;
- case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- break;
- }
- break;
- case CVMX_USB_TRANSFER_BULK:
- case CVMX_USB_TRANSFER_INTERRUPT:
- /* The only time a bulk transfer isn't complete when
- it finishes with an ACK is during a split transaction. For
- splits we need to continue the transfer if more data is
- needed */
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- else {
- if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
- else {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- }
- else {
- if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
- (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
- (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
- (usbc_hcint.s.nak))
- pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
- if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
- if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- break;
- case CVMX_USB_TRANSFER_ISOCHRONOUS:
- if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
- /* ISOCHRONOUS OUT splits don't require a complete split stage.
- Instead they use a sequence of begin OUT splits to transfer
- the data 188 bytes at a time. Once the transfer is complete,
- the pipe sleeps until the next schedule interval */
- if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
- /* If no space left or this wasn't a max size packet then
- this transfer is complete. Otherwise start it again
- to send the next 188 bytes */
- if (!buffer_space_left || (bytes_this_transfer < 188)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- else {
- if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
- /* We are in the incoming data phase. Keep getting
- data until we run out of space or get a small
- packet */
- if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- }
- else
- transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
- }
- }
- else {
- pipe->next_tx_frame += pipe->interval;
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
- }
- break;
- }
- }
- else if (usbc_hcint.s.nak) {
- /* If this was a split then clear our split in progress marker */
- if (usb->active_split == transaction)
- usb->active_split = NULL;
- /* NAK as a response means the device couldn't accept the transaction,
- but it should be retried in the future. Rewind to the beginning of
- the transaction by anding off the split complete bit. Retry in the
- next interval */
- transaction->retries = 0;
- transaction->stage &= ~1;
- pipe->next_tx_frame += pipe->interval;
- if (pipe->next_tx_frame < usb->frame_number)
- pipe->next_tx_frame = usb->frame_number + pipe->interval -
- (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
- }
- else {
- cvmx_usb_port_status_t port;
- port = cvmx_usb_get_status((cvmx_usb_state_t *)usb);
- if (port.port_enabled)
- {
- /* We'll retry the exact same transaction again */
- transaction->retries++;
- }
- else
- {
- /* We get channel halted interrupts with no result bits sets when the
- cable is unplugged */
- __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
- }
- }
- CVMX_USB_RETURN(0);
+ union cvmx_usbcx_hcintx usbc_hcint;
+ union cvmx_usbcx_hctsizx usbc_hctsiz;
+ union cvmx_usbcx_hccharx usbc_hcchar;
+ struct cvmx_usb_pipe *pipe;
+ struct cvmx_usb_transaction *transaction;
+ int bytes_this_transfer;
+ int bytes_in_last_packet;
+ int packets_processed;
+ int buffer_space_left;
+
+ /* Read the interrupt status bits for the channel */
+ usbc_hcint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCINTX(channel, usb->index));
+
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA) {
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+
+ if (usbc_hcchar.s.chena && usbc_hcchar.s.chdis) {
+ /*
+ * There seems to be a bug in CN31XX which can cause
+ * interrupt IN transfers to get stuck until we do a
+ * write of HCCHARX without changing things
+ */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ }
+
+ /*
+ * In non DMA mode the channels don't halt themselves. We need
+ * to manually disable channels that are left running
+ */
+ if (!usbc_hcint.s.chhltd) {
+ if (usbc_hcchar.s.chena) {
+ union cvmx_usbcx_hcintmskx hcintmsk;
+ /* Disable all interrupts except CHHLTD */
+ hcintmsk.u32 = 0;
+ hcintmsk.s.chhltdmsk = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), hcintmsk.u32);
+ usbc_hcchar.s.chdis = 1;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index), usbc_hcchar.u32);
+ return 0;
+ } else if (usbc_hcint.s.xfercompl) {
+ /* Successful IN/OUT with transfer complete. Channel halt isn't needed */
+ } else {
+ cvmx_dprintf("USB%d: Channel %d interrupt without halt\n", usb->index, channel);
+ return 0;
+ }
+ }
+ } else {
+ /*
+ * There is are no interrupts that we need to process when the
+ * channel is still running
+ */
+ if (!usbc_hcint.s.chhltd)
+ return 0;
+ }
+
+ /* Disable the channel interrupts now that it is done */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HCINTMSKX(channel, usb->index), 0);
+ usb->idle_hardware_channels |= (1<<channel);
+
+ /* Make sure this channel is tied to a valid pipe */
+ pipe = usb->pipe_for_channel[channel];
+ CVMX_PREFETCH(pipe, 0);
+ CVMX_PREFETCH(pipe, 128);
+ if (!pipe)
+ return 0;
+ transaction = pipe->head;
+ CVMX_PREFETCH0(transaction);
+
+ /*
+ * Disconnect this pipe from the HW channel. Later the schedule
+ * function will figure out which pipe needs to go
+ */
+ usb->pipe_for_channel[channel] = NULL;
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_SCHEDULED;
+
+ /*
+ * Read the channel config info so we can figure out how much data
+ * transfered
+ */
+ usbc_hcchar.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCCHARX(channel, usb->index));
+ usbc_hctsiz.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HCTSIZX(channel, usb->index));
+
+ /*
+ * Calculating the number of bytes successfully transferred is dependent
+ * on the transfer direction
+ */
+ packets_processed = transaction->pktcnt - usbc_hctsiz.s.pktcnt;
+ if (usbc_hcchar.s.epdir) {
+ /*
+ * IN transactions are easy. For every byte received the
+ * hardware decrements xfersize. All we need to do is subtract
+ * the current value of xfersize from its starting value and we
+ * know how many bytes were written to the buffer
+ */
+ bytes_this_transfer = transaction->xfersize - usbc_hctsiz.s.xfersize;
+ } else {
+ /*
+ * OUT transaction don't decrement xfersize. Instead pktcnt is
+ * decremented on every successful packet send. The hardware
+ * does this when it receives an ACK, or NYET. If it doesn't
+ * receive one of these responses pktcnt doesn't change
+ */
+ bytes_this_transfer = packets_processed * usbc_hcchar.s.mps;
+ /*
+ * The last packet may not be a full transfer if we didn't have
+ * enough data
+ */
+ if (bytes_this_transfer > transaction->xfersize)
+ bytes_this_transfer = transaction->xfersize;
+ }
+ /* Figure out how many bytes were in the last packet of the transfer */
+ if (packets_processed)
+ bytes_in_last_packet = bytes_this_transfer - (packets_processed-1) * usbc_hcchar.s.mps;
+ else
+ bytes_in_last_packet = bytes_this_transfer;
+
+ /*
+ * As a special case, setup transactions output the setup header, not
+ * the user's data. For this reason we don't count setup data as bytes
+ * transferred
+ */
+ if ((transaction->stage == CVMX_USB_STAGE_SETUP) ||
+ (transaction->stage == CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE))
+ bytes_this_transfer = 0;
+
+ /*
+ * Add the bytes transferred to the running total. It is important that
+ * bytes_this_transfer doesn't count any data that needs to be
+ * retransmitted
+ */
+ transaction->actual_bytes += bytes_this_transfer;
+ if (transaction->type == CVMX_USB_TRANSFER_ISOCHRONOUS)
+ buffer_space_left = transaction->iso_packets[0].length - transaction->actual_bytes;
+ else
+ buffer_space_left = transaction->buffer_length - transaction->actual_bytes;
+
+ /*
+ * We need to remember the PID toggle state for the next transaction.
+ * The hardware already updated it for the next transaction
+ */
+ pipe->pid_toggle = !(usbc_hctsiz.s.pid == 0);
+
+ /*
+ * For high speed bulk out, assume the next transaction will need to do
+ * a ping before proceeding. If this isn't true the ACK processing below
+ * will clear this flag
+ */
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ if (usbc_hcint.s.stall) {
+ /*
+ * STALL as a response means this transaction cannot be
+ * completed because the device can't process transactions. Tell
+ * the user. Any data that was transferred will be counted on
+ * the actual bytes transferred
+ */
+ pipe->pid_toggle = 0;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_STALL);
+ } else if (usbc_hcint.s.xacterr) {
+ /*
+ * We know at least one packet worked if we get a ACK or NAK.
+ * Reset the retry counter
+ */
+ if (usbc_hcint.s.nak || usbc_hcint.s.ack)
+ transaction->retries = 0;
+ transaction->retries++;
+ if (transaction->retries > MAX_RETRIES) {
+ /*
+ * XactErr as a response means the device signaled
+ * something wrong with the transfer. For example, PID
+ * toggle errors cause these
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_XACTERR);
+ } else {
+ /*
+ * If this was a split then clear our split in progress
+ * marker
+ */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * Rewind to the beginning of the transaction by anding
+ * off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ }
+ } else if (usbc_hcint.s.bblerr) {
+ /* Babble Error (BblErr) */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_BABBLEERR);
+ } else if (usbc_hcint.s.datatglerr) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else if (usbc_hcint.s.nyet) {
+ /*
+ * NYET as a response is only allowed in three cases: as a
+ * response to a ping, as a response to a split transaction, and
+ * as a response to a bulk out. The ping case is handled by
+ * hardware, so we only have splits and bulk out
+ */
+ if (!__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->retries = 0;
+ /*
+ * If there is more data to go then we need to try
+ * again. Otherwise this transaction is complete
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet))
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ } else {
+ /*
+ * Split transactions retry the split complete 4 times
+ * then rewind to the start split and do the entire
+ * transactions again
+ */
+ transaction->retries++;
+ if ((transaction->retries & 0x3) == 0) {
+ /*
+ * Rewind to the beginning of the transaction by
+ * anding off the split complete bit
+ */
+ transaction->stage &= ~1;
+ pipe->split_sc_frame = -1;
+ }
+ }
+ } else if (usbc_hcint.s.ack) {
+ transaction->retries = 0;
+ /*
+ * The ACK bit can only be checked after the other error bits.
+ * This is because a multi packet transfer may succeed in a
+ * number of packets and then get a different response on the
+ * last packet. In this case both ACK and the last response bit
+ * will be set. If none of the other response bits is set, then
+ * the last packet must have been an ACK
+ *
+ * Since we got an ACK, we know we don't need to do a ping on
+ * this pipe
+ */
+ pipe->flags &= ~__CVMX_USB_PIPE_FLAGS_NEED_PING;
+
+ switch (transaction->type) {
+ case CVMX_USB_TRANSFER_CONTROL:
+ switch (transaction->stage) {
+ case CVMX_USB_STAGE_NON_CONTROL:
+ case CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE:
+ /* This should be impossible */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ break;
+ case CVMX_USB_STAGE_SETUP:
+ pipe->pid_toggle = 1;
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE;
+ else {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_SETUP_SPLIT_COMPLETE:
+ {
+ union cvmx_usb_control_header *header =
+ cvmx_phys_to_ptr(transaction->control_header);
+ if (header->s.length)
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ else
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ transaction->stage = CVMX_USB_STAGE_DATA_SPLIT_COMPLETE;
+ /*
+ * For setup OUT data that are splits,
+ * the hardware doesn't appear to count
+ * transferred data. Here we manually
+ * update the data transferred
+ */
+ if (!usbc_hcchar.s.epdir) {
+ if (buffer_space_left < pipe->max_packet)
+ transaction->actual_bytes += buffer_space_left;
+ else
+ transaction->actual_bytes += pipe->max_packet;
+ }
+ } else if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ }
+ break;
+ case CVMX_USB_STAGE_DATA_SPLIT_COMPLETE:
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->pid_toggle = 1;
+ transaction->stage = CVMX_USB_STAGE_STATUS;
+ } else {
+ transaction->stage = CVMX_USB_STAGE_DATA;
+ }
+ break;
+ case CVMX_USB_STAGE_STATUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe))
+ transaction->stage = CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE;
+ else
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ case CVMX_USB_STAGE_STATUS_SPLIT_COMPLETE:
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ break;
+ }
+ break;
+ case CVMX_USB_TRANSFER_BULK:
+ case CVMX_USB_TRANSFER_INTERRUPT:
+ /*
+ * The only time a bulk transfer isn't complete when it
+ * finishes with an ACK is during a split transaction.
+ * For splits we need to continue the transfer if more
+ * data is needed
+ */
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL)
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ else {
+ if (buffer_space_left && (bytes_in_last_packet == pipe->max_packet))
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL;
+ else {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ } else {
+ if ((pipe->device_speed == CVMX_USB_SPEED_HIGH) &&
+ (pipe->transfer_type == CVMX_USB_TRANSFER_BULK) &&
+ (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) &&
+ (usbc_hcint.s.nak))
+ pipe->flags |= __CVMX_USB_PIPE_FLAGS_NEED_PING;
+ if (!buffer_space_left || (bytes_in_last_packet < pipe->max_packet)) {
+ if (transaction->type == CVMX_USB_TRANSFER_INTERRUPT)
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ }
+ break;
+ case CVMX_USB_TRANSFER_ISOCHRONOUS:
+ if (__cvmx_usb_pipe_needs_split(usb, pipe)) {
+ /*
+ * ISOCHRONOUS OUT splits don't require a
+ * complete split stage. Instead they use a
+ * sequence of begin OUT splits to transfer the
+ * data 188 bytes at a time. Once the transfer
+ * is complete, the pipe sleeps until the next
+ * schedule interval
+ */
+ if (pipe->transfer_dir == CVMX_USB_DIRECTION_OUT) {
+ /*
+ * If no space left or this wasn't a max
+ * size packet then this transfer is
+ * complete. Otherwise start it again to
+ * send the next 188 bytes
+ */
+ if (!buffer_space_left || (bytes_this_transfer < 188)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else {
+ if (transaction->stage == CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE) {
+ /*
+ * We are in the incoming data
+ * phase. Keep getting data
+ * until we run out of space or
+ * get a small packet
+ */
+ if ((buffer_space_left == 0) || (bytes_in_last_packet < pipe->max_packet)) {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ } else
+ transaction->stage = CVMX_USB_STAGE_NON_CONTROL_SPLIT_COMPLETE;
+ }
+ } else {
+ pipe->next_tx_frame += pipe->interval;
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_SUCCESS);
+ }
+ break;
+ }
+ } else if (usbc_hcint.s.nak) {
+ /* If this was a split then clear our split in progress marker */
+ if (usb->active_split == transaction)
+ usb->active_split = NULL;
+ /*
+ * NAK as a response means the device couldn't accept the
+ * transaction, but it should be retried in the future. Rewind
+ * to the beginning of the transaction by anding off the split
+ * complete bit. Retry in the next interval
+ */
+ transaction->retries = 0;
+ transaction->stage &= ~1;
+ pipe->next_tx_frame += pipe->interval;
+ if (pipe->next_tx_frame < usb->frame_number)
+ pipe->next_tx_frame = usb->frame_number + pipe->interval -
+ (usb->frame_number - pipe->next_tx_frame) % pipe->interval;
+ } else {
+ struct cvmx_usb_port_status port;
+ port = cvmx_usb_get_status((struct cvmx_usb_state *)usb);
+ if (port.port_enabled) {
+ /* We'll retry the exact same transaction again */
+ transaction->retries++;
+ } else {
+ /*
+ * We get channel halted interrupts with no result bits
+ * sets when the cable is unplugged
+ */
+ __cvmx_usb_perform_complete(usb, pipe, transaction, CVMX_USB_COMPLETE_ERROR);
+ }
+ }
+ return 0;
}
@@ -3133,97 +3058,101 @@ static int __cvmx_usb_poll_channel(cvmx_usb_internal_state_t *usb, int channel)
* handler for the USB controller. It can also be called
* periodically in a loop for non-interrupt based operation.
*
- * @param state USB device state populated by
- * cvmx_usb_initialize().
+ * @state: USB device state populated by
+ * cvmx_usb_initialize().
*
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
+ * Returns: 0 or a negative error code.
*/
-cvmx_usb_status_t cvmx_usb_poll(cvmx_usb_state_t *state)
+int cvmx_usb_poll(struct cvmx_usb_state *state)
{
- cvmx_usbcx_hfnum_t usbc_hfnum;
- cvmx_usbcx_gintsts_t usbc_gintsts;
- cvmx_usb_internal_state_t *usb = (cvmx_usb_internal_state_t*)state;
-
- CVMX_PREFETCH(usb, 0);
- CVMX_PREFETCH(usb, 1*128);
- CVMX_PREFETCH(usb, 2*128);
- CVMX_PREFETCH(usb, 3*128);
- CVMX_PREFETCH(usb, 4*128);
-
- CVMX_USB_LOG_CALLED();
- CVMX_USB_LOG_PARAM("%p", state);
-
- /* Update the frame counter */
- usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
- if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
- usb->frame_number += 0x4000;
- usb->frame_number &= ~0x3fffull;
- usb->frame_number |= usbc_hfnum.s.frnum;
-
- /* Read the pending interrupts */
- usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
-
- /* Clear the interrupts now that we know about them */
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
-
- if (usbc_gintsts.s.rxflvl) {
- /* RxFIFO Non-Empty (RxFLvl)
- Indicates that there is at least one packet pending to be read
- from the RxFIFO. */
- /* In DMA mode this is handled by hardware */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_poll_rx_fifo(usb);
- }
- if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
- /* Fill the Tx FIFOs when not in DMA mode */
- if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
- __cvmx_usb_poll_tx_fifo(usb);
- }
- if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
- cvmx_usbcx_hprt_t usbc_hprt;
- /* Disconnect Detected Interrupt (DisconnInt)
- Asserted when a device disconnect is detected. */
-
- /* Host Port Interrupt (PrtInt)
- The core sets this bit to indicate a change in port status of one
- of the O2P USB core ports in Host mode. The application must
- read the Host Port Control and Status (HPRT) register to
- determine the exact event that caused this interrupt. The
- application must clear the appropriate status bit in the Host Port
- Control and Status register to clear this bit. */
-
- /* Call the user's port callback */
- __cvmx_usb_perform_callback(usb, NULL, NULL,
- CVMX_USB_CALLBACK_PORT_CHANGED,
- CVMX_USB_COMPLETE_SUCCESS);
- /* Clear the port change bits */
- usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
- usbc_hprt.s.prtena = 0;
- __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
- }
- if (usbc_gintsts.s.hchint) {
- /* Host Channels Interrupt (HChInt)
- The core sets this bit to indicate that an interrupt is pending on
- one of the channels of the core (in Host mode). The application
- must read the Host All Channels Interrupt (HAINT) register to
- determine the exact number of the channel on which the
- interrupt occurred, and then read the corresponding Host
- Channel-n Interrupt (HCINTn) register to determine the exact
- cause of the interrupt. The application must clear the
- appropriate status bit in the HCINTn register to clear this bit. */
- cvmx_usbcx_haint_t usbc_haint;
- usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
- while (usbc_haint.u32) {
- int channel;
- CVMX_CLZ(channel, usbc_haint.u32);
- channel = 31 - channel;
- __cvmx_usb_poll_channel(usb, channel);
- usbc_haint.u32 ^= 1<<channel;
- }
- }
-
- __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
-
- CVMX_USB_RETURN(CVMX_USB_SUCCESS);
+ union cvmx_usbcx_hfnum usbc_hfnum;
+ union cvmx_usbcx_gintsts usbc_gintsts;
+ struct cvmx_usb_internal_state *usb = (struct cvmx_usb_internal_state *)state;
+
+ CVMX_PREFETCH(usb, 0);
+ CVMX_PREFETCH(usb, 1*128);
+ CVMX_PREFETCH(usb, 2*128);
+ CVMX_PREFETCH(usb, 3*128);
+ CVMX_PREFETCH(usb, 4*128);
+
+ /* Update the frame counter */
+ usbc_hfnum.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HFNUM(usb->index));
+ if ((usb->frame_number&0x3fff) > usbc_hfnum.s.frnum)
+ usb->frame_number += 0x4000;
+ usb->frame_number &= ~0x3fffull;
+ usb->frame_number |= usbc_hfnum.s.frnum;
+
+ /* Read the pending interrupts */
+ usbc_gintsts.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_GINTSTS(usb->index));
+
+ /* Clear the interrupts now that we know about them */
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_GINTSTS(usb->index), usbc_gintsts.u32);
+
+ if (usbc_gintsts.s.rxflvl) {
+ /*
+ * RxFIFO Non-Empty (RxFLvl)
+ * Indicates that there is at least one packet pending to be
+ * read from the RxFIFO.
+ *
+ * In DMA mode this is handled by hardware
+ */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_rx_fifo(usb);
+ }
+ if (usbc_gintsts.s.ptxfemp || usbc_gintsts.s.nptxfemp) {
+ /* Fill the Tx FIFOs when not in DMA mode */
+ if (usb->init_flags & CVMX_USB_INITIALIZE_FLAGS_NO_DMA)
+ __cvmx_usb_poll_tx_fifo(usb);
+ }
+ if (usbc_gintsts.s.disconnint || usbc_gintsts.s.prtint) {
+ union cvmx_usbcx_hprt usbc_hprt;
+ /*
+ * Disconnect Detected Interrupt (DisconnInt)
+ * Asserted when a device disconnect is detected.
+ *
+ * Host Port Interrupt (PrtInt)
+ * The core sets this bit to indicate a change in port status of
+ * one of the O2P USB core ports in Host mode. The application
+ * must read the Host Port Control and Status (HPRT) register to
+ * determine the exact event that caused this interrupt. The
+ * application must clear the appropriate status bit in the Host
+ * Port Control and Status register to clear this bit.
+ *
+ * Call the user's port callback
+ */
+ __cvmx_usb_perform_callback(usb, NULL, NULL,
+ CVMX_USB_CALLBACK_PORT_CHANGED,
+ CVMX_USB_COMPLETE_SUCCESS);
+ /* Clear the port change bits */
+ usbc_hprt.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HPRT(usb->index));
+ usbc_hprt.s.prtena = 0;
+ __cvmx_usb_write_csr32(usb, CVMX_USBCX_HPRT(usb->index), usbc_hprt.u32);
+ }
+ if (usbc_gintsts.s.hchint) {
+ /*
+ * Host Channels Interrupt (HChInt)
+ * The core sets this bit to indicate that an interrupt is
+ * pending on one of the channels of the core (in Host mode).
+ * The application must read the Host All Channels Interrupt
+ * (HAINT) register to determine the exact number of the channel
+ * on which the interrupt occurred, and then read the
+ * corresponding Host Channel-n Interrupt (HCINTn) register to
+ * determine the exact cause of the interrupt. The application
+ * must clear the appropriate status bit in the HCINTn register
+ * to clear this bit.
+ */
+ union cvmx_usbcx_haint usbc_haint;
+ usbc_haint.u32 = __cvmx_usb_read_csr32(usb, CVMX_USBCX_HAINT(usb->index));
+ while (usbc_haint.u32) {
+ int channel;
+ CVMX_CLZ(channel, usbc_haint.u32);
+ channel = 31 - channel;
+ __cvmx_usb_poll_channel(usb, channel);
+ usbc_haint.u32 ^= 1<<channel;
+ }
+ }
+
+ __cvmx_usb_schedule(usb, usbc_gintsts.s.sof);
+
+ return 0;
}
diff --git a/drivers/staging/octeon-usb/cvmx-usb.h b/drivers/staging/octeon-usb/cvmx-usb.h
index db9cc05e5d3..8bf36966ef1 100644
--- a/drivers/staging/octeon-usb/cvmx-usb.h
+++ b/drivers/staging/octeon-usb/cvmx-usb.h
@@ -39,8 +39,6 @@
/**
- * @file
- *
* "cvmx-usb.h" defines a set of low level USB functions to help
* developers create Octeon USB drivers for various operating
* systems. These functions provide a generic API to the Octeon
@@ -49,24 +47,24 @@
*
* At a high level the device driver needs to:
*
- * -# Call cvmx_usb_get_num_ports() to get the number of
- * supported ports.
- * -# Call cvmx_usb_initialize() for each Octeon USB port.
- * -# Enable the port using cvmx_usb_enable().
- * -# Either periodically, or in an interrupt handler, call
- * cvmx_usb_poll() to service USB events.
- * -# Manage pipes using cvmx_usb_open_pipe() and
- * cvmx_usb_close_pipe().
- * -# Manage transfers using cvmx_usb_submit_*() and
- * cvmx_usb_cancel*().
- * -# Shutdown USB on unload using cvmx_usb_shutdown().
+ * - Call cvmx_usb_get_num_ports() to get the number of
+ * supported ports.
+ * - Call cvmx_usb_initialize() for each Octeon USB port.
+ * - Enable the port using cvmx_usb_enable().
+ * - Either periodically, or in an interrupt handler, call
+ * cvmx_usb_poll() to service USB events.
+ * - Manage pipes using cvmx_usb_open_pipe() and
+ * cvmx_usb_close_pipe().
+ * - Manage transfers using cvmx_usb_submit_*() and
+ * cvmx_usb_cancel*().
+ * - Shutdown USB on unload using cvmx_usb_shutdown().
*
* To monitor USB status changes, the device driver must use
* cvmx_usb_register_callback() to register for events that it
* is interested in. Below are a few hints on successfully
* implementing a driver on top of this API.
*
- * <h2>Initialization</h2>
+ * == Initialization ==
*
* When a driver is first loaded, it is normally not necessary
* to bring up the USB port completely. Most operating systems
@@ -75,24 +73,24 @@
* initialize anything found, and then enable the hardware.
*
* In the probe phase you should:
- * -# Use cvmx_usb_get_num_ports() to determine the number of
- * USB port to be supported.
- * -# Allocate space for a cvmx_usb_state_t structure for each
- * port.
- * -# Tell the operating system about each port
+ * - Use cvmx_usb_get_num_ports() to determine the number of
+ * USB port to be supported.
+ * - Allocate space for a struct cvmx_usb_state for each
+ * port.
+ * - Tell the operating system about each port
*
* In the initialization phase you should:
- * -# Use cvmx_usb_initialize() on each port.
- * -# Do not call cvmx_usb_enable(). This leaves the USB port in
- * the disabled state until the operating system is ready.
+ * - Use cvmx_usb_initialize() on each port.
+ * - Do not call cvmx_usb_enable(). This leaves the USB port in
+ * the disabled state until the operating system is ready.
*
* Finally, in the enable phase you should:
- * -# Call cvmx_usb_enable() on the appropriate port.
- * -# Note that some operating system use a RESET instead of an
- * enable call. To implement RESET, you should call
- * cvmx_usb_disable() followed by cvmx_usb_enable().
+ * - Call cvmx_usb_enable() on the appropriate port.
+ * - Note that some operating system use a RESET instead of an
+ * enable call. To implement RESET, you should call
+ * cvmx_usb_disable() followed by cvmx_usb_enable().
*
- * <h2>Locking</h2>
+ * == Locking ==
*
* All of the functions in the cvmx-usb API assume exclusive
* access to the USB hardware and internal data structures. This
@@ -112,25 +110,24 @@
* take a lock to make sure that another core cannot call
* cvmx-usb.
*
- * <h2>Port callback</h2>
+ * == Port callback ==
*
* The port callback prototype needs to look as follows:
*
- * void port_callback(cvmx_usb_state_t *usb,
- * cvmx_usb_callback_t reason,
- * cvmx_usb_complete_t status,
+ * void port_callback(struct cvmx_usb_state *usb,
+ * enum cvmx_usb_callback reason,
+ * enum cvmx_usb_complete status,
* int pipe_handle,
* int submit_handle,
* int bytes_transferred,
* void *user_data);
- * - @b usb is the cvmx_usb_state_t for the port.
- * - @b reason will always be
- * CVMX_USB_CALLBACK_PORT_CHANGED.
- * - @b status will always be CVMX_USB_COMPLETE_SUCCESS.
- * - @b pipe_handle will always be -1.
- * - @b submit_handle will always be -1.
- * - @b bytes_transferred will always be 0.
- * - @b user_data is the void pointer originally passed along
+ * - "usb" is the struct cvmx_usb_state for the port.
+ * - "reason" will always be CVMX_USB_CALLBACK_PORT_CHANGED.
+ * - "status" will always be CVMX_USB_COMPLETE_SUCCESS.
+ * - "pipe_handle" will always be -1.
+ * - "submit_handle" will always be -1.
+ * - "bytes_transferred" will always be 0.
+ * - "user_data" is the void pointer originally passed along
* with the callback. Use this for any state information you
* need.
*
@@ -140,45 +137,43 @@
* root port. Normally all the callback needs to do is tell the
* operating system to poll the root hub for status. Under
* Linux, this is performed by calling usb_hcd_poll_rh_status().
- * In the Linux driver we use @b user_data. to pass around the
+ * In the Linux driver we use "user_data". to pass around the
* Linux "hcd" structure. Once the port callback completes,
* Linux automatically calls octeon_usb_hub_status_data() which
* uses cvmx_usb_get_status() to determine the root port status.
*
- * <h2>Complete callback</h2>
+ * == Complete callback ==
*
* The completion callback prototype needs to look as follows:
*
- * void complete_callback(cvmx_usb_state_t *usb,
- * cvmx_usb_callback_t reason,
- * cvmx_usb_complete_t status,
+ * void complete_callback(struct cvmx_usb_state *usb,
+ * enum cvmx_usb_callback reason,
+ * enum cvmx_usb_complete status,
* int pipe_handle,
* int submit_handle,
* int bytes_transferred,
* void *user_data);
- * - @b usb is the cvmx_usb_state_t for the port.
- * - @b reason will always be
- * CVMX_USB_CALLBACK_TRANSFER_COMPLETE.
- * - @b status will be one of the cvmx_usb_complete_t
- * enumerations.
- * - @b pipe_handle is the handle to the pipe the transaction
+ * - "usb" is the struct cvmx_usb_state for the port.
+ * - "reason" will always be CVMX_USB_CALLBACK_TRANSFER_COMPLETE.
+ * - "status" will be one of the cvmx_usb_complete enumerations.
+ * - "pipe_handle" is the handle to the pipe the transaction
* was originally submitted on.
- * - @b submit_handle is the handle returned by the original
+ * - "submit_handle" is the handle returned by the original
* cvmx_usb_submit_* call.
- * - @b bytes_transferred is the number of bytes successfully
+ * - "bytes_transferred" is the number of bytes successfully
* transferred in the transaction. This will be zero on most
* error conditions.
- * - @b user_data is the void pointer originally passed along
+ * - "user_data" is the void pointer originally passed along
* with the callback. Use this for any state information you
* need. For example, the Linux "urb" is stored in here in the
* Linux driver.
*
- * In general your callback handler should use @b status and @b
- * bytes_transferred to tell the operating system the how the
+ * In general your callback handler should use "status" and
+ * "bytes_transferred" to tell the operating system the how the
* transaction completed. Normally the pipe is not changed in
* this callback.
*
- * <h2>Canceling transactions</h2>
+ * == Canceling transactions ==
*
* When a transaction is cancelled using cvmx_usb_cancel*(), the
* actual length of time until the complete callback is called
@@ -188,7 +183,7 @@
* these cases, the complete handler will receive
* CVMX_USB_COMPLETE_CANCEL.
*
- * <h2>Handling pipes</h2>
+ * == Handling pipes ==
*
* USB "pipes" is a software construct created by this API to
* enable the ordering of usb transactions to a device endpoint.
@@ -210,223 +205,16 @@
* destroy a pipe for every transaction. A sequence of
* transaction to the same endpoint must use the same pipe.
*
- * <h2>Root Hub</h2>
+ * == Root Hub ==
*
* Some operating systems view the usb root port as a normal usb
* hub. These systems attempt to control the root hub with
* messages similar to the usb 2.0 spec for hub control and
* status. For these systems it may be necessary to write
* function to decode standard usb control messages into
- * equivalent cvmx-usb API calls. As an example, the following
- * code is used under Linux for some of the basic hub control
- * messages.
- *
- * @code
- * static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength)
- * {
- * cvmx_usb_state_t *usb = (cvmx_usb_state_t *)hcd->hcd_priv;
- * cvmx_usb_port_status_t usb_port_status;
- * int port_status;
- * struct usb_hub_descriptor *desc;
- * unsigned long flags;
- *
- * switch (typeReq)
- * {
- * case ClearHubFeature:
- * DEBUG_ROOT_HUB("OcteonUSB: ClearHubFeature\n");
- * switch (wValue)
- * {
- * case C_HUB_LOCAL_POWER:
- * case C_HUB_OVER_CURRENT:
- * // Nothing required here
- * break;
- * default:
- * return -EINVAL;
- * }
- * break;
- * case ClearPortFeature:
- * DEBUG_ROOT_HUB("OcteonUSB: ClearPortFeature");
- * if (wIndex != 1)
- * {
- * DEBUG_ROOT_HUB(" INVALID\n");
- * return -EINVAL;
- * }
- *
- * switch (wValue)
- * {
- * case USB_PORT_FEAT_ENABLE:
- * DEBUG_ROOT_HUB(" ENABLE");
- * local_irq_save(flags);
- * cvmx_usb_disable(usb);
- * local_irq_restore(flags);
- * break;
- * case USB_PORT_FEAT_SUSPEND:
- * DEBUG_ROOT_HUB(" SUSPEND");
- * // Not supported on Octeon
- * break;
- * case USB_PORT_FEAT_POWER:
- * DEBUG_ROOT_HUB(" POWER");
- * // Not supported on Octeon
- * break;
- * case USB_PORT_FEAT_INDICATOR:
- * DEBUG_ROOT_HUB(" INDICATOR");
- * // Port inidicator not supported
- * break;
- * case USB_PORT_FEAT_C_CONNECTION:
- * DEBUG_ROOT_HUB(" C_CONNECTION");
- * // Clears drivers internal connect status change flag
- * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
- * break;
- * case USB_PORT_FEAT_C_RESET:
- * DEBUG_ROOT_HUB(" C_RESET");
- * // Clears the driver's internal Port Reset Change flag
- * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
- * break;
- * case USB_PORT_FEAT_C_ENABLE:
- * DEBUG_ROOT_HUB(" C_ENABLE");
- * // Clears the driver's internal Port Enable/Disable Change flag
- * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
- * break;
- * case USB_PORT_FEAT_C_SUSPEND:
- * DEBUG_ROOT_HUB(" C_SUSPEND");
- * // Clears the driver's internal Port Suspend Change flag,
- * which is set when resume signaling on the host port is
- * complete
- * break;
- * case USB_PORT_FEAT_C_OVER_CURRENT:
- * DEBUG_ROOT_HUB(" C_OVER_CURRENT");
- * // Clears the driver's overcurrent Change flag
- * cvmx_usb_set_status(usb, cvmx_usb_get_status(usb));
- * break;
- * default:
- * DEBUG_ROOT_HUB(" UNKNOWN\n");
- * return -EINVAL;
- * }
- * DEBUG_ROOT_HUB("\n");
- * break;
- * case GetHubDescriptor:
- * DEBUG_ROOT_HUB("OcteonUSB: GetHubDescriptor\n");
- * desc = (struct usb_hub_descriptor *)buf;
- * desc->bDescLength = 9;
- * desc->bDescriptorType = 0x29;
- * desc->bNbrPorts = 1;
- * desc->wHubCharacteristics = 0x08;
- * desc->bPwrOn2PwrGood = 1;
- * desc->bHubContrCurrent = 0;
- * desc->bitmap[0] = 0;
- * desc->bitmap[1] = 0xff;
- * break;
- * case GetHubStatus:
- * DEBUG_ROOT_HUB("OcteonUSB: GetHubStatus\n");
- * *(__le32 *)buf = 0;
- * break;
- * case GetPortStatus:
- * DEBUG_ROOT_HUB("OcteonUSB: GetPortStatus");
- * if (wIndex != 1)
- * {
- * DEBUG_ROOT_HUB(" INVALID\n");
- * return -EINVAL;
- * }
- *
- * usb_port_status = cvmx_usb_get_status(usb);
- * port_status = 0;
- *
- * if (usb_port_status.connect_change)
- * {
- * port_status |= (1 << USB_PORT_FEAT_C_CONNECTION);
- * DEBUG_ROOT_HUB(" C_CONNECTION");
- * }
- *
- * if (usb_port_status.port_enabled)
- * {
- * port_status |= (1 << USB_PORT_FEAT_C_ENABLE);
- * DEBUG_ROOT_HUB(" C_ENABLE");
- * }
- *
- * if (usb_port_status.connected)
- * {
- * port_status |= (1 << USB_PORT_FEAT_CONNECTION);
- * DEBUG_ROOT_HUB(" CONNECTION");
- * }
- *
- * if (usb_port_status.port_enabled)
- * {
- * port_status |= (1 << USB_PORT_FEAT_ENABLE);
- * DEBUG_ROOT_HUB(" ENABLE");
- * }
+ * equivalent cvmx-usb API calls.
*
- * if (usb_port_status.port_over_current)
- * {
- * port_status |= (1 << USB_PORT_FEAT_OVER_CURRENT);
- * DEBUG_ROOT_HUB(" OVER_CURRENT");
- * }
- *
- * if (usb_port_status.port_powered)
- * {
- * port_status |= (1 << USB_PORT_FEAT_POWER);
- * DEBUG_ROOT_HUB(" POWER");
- * }
- *
- * if (usb_port_status.port_speed == CVMX_USB_SPEED_HIGH)
- * {
- * port_status |= (1 << USB_PORT_FEAT_HIGHSPEED);
- * DEBUG_ROOT_HUB(" HIGHSPEED");
- * }
- * else if (usb_port_status.port_speed == CVMX_USB_SPEED_LOW)
- * {
- * port_status |= (1 << USB_PORT_FEAT_LOWSPEED);
- * DEBUG_ROOT_HUB(" LOWSPEED");
- * }
- *
- * *((__le32 *)buf) = cpu_to_le32(port_status);
- * DEBUG_ROOT_HUB("\n");
- * break;
- * case SetHubFeature:
- * DEBUG_ROOT_HUB("OcteonUSB: SetHubFeature\n");
- * // No HUB features supported
- * break;
- * case SetPortFeature:
- * DEBUG_ROOT_HUB("OcteonUSB: SetPortFeature");
- * if (wIndex != 1)
- * {
- * DEBUG_ROOT_HUB(" INVALID\n");
- * return -EINVAL;
- * }
- *
- * switch (wValue)
- * {
- * case USB_PORT_FEAT_SUSPEND:
- * DEBUG_ROOT_HUB(" SUSPEND\n");
- * return -EINVAL;
- * case USB_PORT_FEAT_POWER:
- * DEBUG_ROOT_HUB(" POWER\n");
- * return -EINVAL;
- * case USB_PORT_FEAT_RESET:
- * DEBUG_ROOT_HUB(" RESET\n");
- * local_irq_save(flags);
- * cvmx_usb_disable(usb);
- * if (cvmx_usb_enable(usb))
- * DEBUG_ERROR("Failed to enable the port\n");
- * local_irq_restore(flags);
- * return 0;
- * case USB_PORT_FEAT_INDICATOR:
- * DEBUG_ROOT_HUB(" INDICATOR\n");
- * // Not supported
- * break;
- * default:
- * DEBUG_ROOT_HUB(" UNKNOWN\n");
- * return -EINVAL;
- * }
- * break;
- * default:
- * DEBUG_ROOT_HUB("OcteonUSB: Unknown root hub request\n");
- * return -EINVAL;
- * }
- * return 0;
- * }
- * @endcode
- *
- * <h2>Interrupts</h2>
+ * == Interrupts ==
*
* If you plan on using usb interrupts, cvmx_usb_poll() must be
* called on every usb interrupt. It will read the usb state,
@@ -441,154 +229,187 @@
*
* If you aren't using interrupts, simple call cvmx_usb_poll()
* in your main processing loop.
- *
- * <hr>$Revision: 32636 $<hr>
*/
#ifndef __CVMX_USB_H__
#define __CVMX_USB_H__
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/**
- * Enumerations representing the status of function calls.
- */
-typedef enum
-{
- CVMX_USB_SUCCESS = 0, /**< There were no errors */
- CVMX_USB_INVALID_PARAM = -1, /**< A parameter to the function was invalid */
- CVMX_USB_NO_MEMORY = -2, /**< Insufficient resources were available for the request */
- CVMX_USB_BUSY = -3, /**< The resource is busy and cannot service the request */
- CVMX_USB_TIMEOUT = -4, /**< Waiting for an action timed out */
- CVMX_USB_INCORRECT_MODE = -5, /**< The function call doesn't work in the current USB
- mode. This happens when host only functions are
- called in device mode or vice versa */
-} cvmx_usb_status_t;
-
-/**
- * Enumerations representing the possible USB device speeds
- */
-typedef enum
-{
- CVMX_USB_SPEED_HIGH = 0, /**< Device is operation at 480Mbps */
- CVMX_USB_SPEED_FULL = 1, /**< Device is operation at 12Mbps */
- CVMX_USB_SPEED_LOW = 2, /**< Device is operation at 1.5Mbps */
-} cvmx_usb_speed_t;
-
/**
- * Enumeration representing the possible USB transfer types.
- */
-typedef enum
-{
- CVMX_USB_TRANSFER_CONTROL = 0, /**< USB transfer type control for hub and status transfers */
- CVMX_USB_TRANSFER_ISOCHRONOUS = 1, /**< USB transfer type isochronous for low priority periodic transfers */
- CVMX_USB_TRANSFER_BULK = 2, /**< USB transfer type bulk for large low priority transfers */
- CVMX_USB_TRANSFER_INTERRUPT = 3, /**< USB transfer type interrupt for high priority periodic transfers */
-} cvmx_usb_transfer_t;
-
-/**
- * Enumeration of the transfer directions
- */
-typedef enum
-{
- CVMX_USB_DIRECTION_OUT, /**< Data is transferring from Octeon to the device/host */
- CVMX_USB_DIRECTION_IN, /**< Data is transferring from the device/host to Octeon */
-} cvmx_usb_direction_t;
-
-/**
- * Enumeration of all possible status codes passed to callback
- * functions.
- */
-typedef enum
-{
- CVMX_USB_COMPLETE_SUCCESS, /**< The transaction / operation finished without any errors */
- CVMX_USB_COMPLETE_SHORT, /**< FIXME: This is currently not implemented */
- CVMX_USB_COMPLETE_CANCEL, /**< The transaction was canceled while in flight by a user call to cvmx_usb_cancel* */
- CVMX_USB_COMPLETE_ERROR, /**< The transaction aborted with an unexpected error status */
- CVMX_USB_COMPLETE_STALL, /**< The transaction received a USB STALL response from the device */
- CVMX_USB_COMPLETE_XACTERR, /**< The transaction failed with an error from the device even after a number of retries */
- CVMX_USB_COMPLETE_DATATGLERR, /**< The transaction failed with a data toggle error even after a number of retries */
- CVMX_USB_COMPLETE_BABBLEERR, /**< The transaction failed with a babble error */
- CVMX_USB_COMPLETE_FRAMEERR, /**< The transaction failed with a frame error even after a number of retries */
-} cvmx_usb_complete_t;
-
-/**
- * Structure returned containing the USB port status information.
- */
-typedef struct
-{
- uint32_t reserved : 25;
- uint32_t port_enabled : 1; /**< 1 = Usb port is enabled, 0 = disabled */
- uint32_t port_over_current : 1; /**< 1 = Over current detected, 0 = Over current not detected. Octeon doesn't support over current detection */
- uint32_t port_powered : 1; /**< 1 = Port power is being supplied to the device, 0 = power is off. Octeon doesn't support turning port power off */
- cvmx_usb_speed_t port_speed : 2; /**< Current port speed */
- uint32_t connected : 1; /**< 1 = A device is connected to the port, 0 = No device is connected */
- uint32_t connect_change : 1; /**< 1 = Device connected state changed since the last set status call */
-} cvmx_usb_port_status_t;
-
-/**
- * This is the structure of a Control packet header
- */
-typedef union
-{
- uint64_t u64;
- struct
- {
- uint64_t request_type : 8; /**< Bit 7 tells the direction: 1=IN, 0=OUT */
- uint64_t request : 8; /**< The standard usb request to make */
- uint64_t value : 16; /**< Value parameter for the request in little endian format */
- uint64_t index : 16; /**< Index for the request in little endian format */
- uint64_t length : 16; /**< Length of the data associated with this request in little endian format */
- } s;
-} cvmx_usb_control_header_t;
-
-/**
- * Descriptor for Isochronous packets
- */
-typedef struct
-{
- int offset; /**< This is the offset in bytes into the main buffer where this data is stored */
- int length; /**< This is the length in bytes of the data */
- cvmx_usb_complete_t status; /**< This is the status of this individual packet transfer */
-} cvmx_usb_iso_packet_t;
-
-/**
- * Possible callback reasons for the USB API.
- */
-typedef enum
-{
- CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
- /**< A callback of this type is called when a submitted transfer
- completes. The completion callback will be called even if the
- transfer fails or is canceled. The status parameter will
- contain details of why he callback was called. */
- CVMX_USB_CALLBACK_PORT_CHANGED, /**< The status of the port changed. For example, someone may have
- plugged a device in. The status parameter contains
- CVMX_USB_COMPLETE_SUCCESS. Use cvmx_usb_get_status() to get
- the new port status. */
- __CVMX_USB_CALLBACK_END /**< Do not use. Used internally for array bounds */
-} cvmx_usb_callback_t;
+ * enum cvmx_usb_speed - the possible USB device speeds
+ *
+ * @CVMX_USB_SPEED_HIGH: Device is operation at 480Mbps
+ * @CVMX_USB_SPEED_FULL: Device is operation at 12Mbps
+ * @CVMX_USB_SPEED_LOW: Device is operation at 1.5Mbps
+ */
+enum cvmx_usb_speed {
+ CVMX_USB_SPEED_HIGH = 0,
+ CVMX_USB_SPEED_FULL = 1,
+ CVMX_USB_SPEED_LOW = 2,
+};
+
+/**
+ * enum cvmx_usb_transfer - the possible USB transfer types
+ *
+ * @CVMX_USB_TRANSFER_CONTROL: USB transfer type control for hub and status
+ * transfers
+ * @CVMX_USB_TRANSFER_ISOCHRONOUS: USB transfer type isochronous for low
+ * priority periodic transfers
+ * @CVMX_USB_TRANSFER_BULK: USB transfer type bulk for large low priority
+ * transfers
+ * @CVMX_USB_TRANSFER_INTERRUPT: USB transfer type interrupt for high priority
+ * periodic transfers
+ */
+enum cvmx_usb_transfer {
+ CVMX_USB_TRANSFER_CONTROL = 0,
+ CVMX_USB_TRANSFER_ISOCHRONOUS = 1,
+ CVMX_USB_TRANSFER_BULK = 2,
+ CVMX_USB_TRANSFER_INTERRUPT = 3,
+};
+
+/**
+ * enum cvmx_usb_direction - the transfer directions
+ *
+ * @CVMX_USB_DIRECTION_OUT: Data is transferring from Octeon to the device/host
+ * @CVMX_USB_DIRECTION_IN: Data is transferring from the device/host to Octeon
+ */
+enum cvmx_usb_direction {
+ CVMX_USB_DIRECTION_OUT,
+ CVMX_USB_DIRECTION_IN,
+};
+
+/**
+ * enum cvmx_usb_complete - possible callback function status codes
+ *
+ * @CVMX_USB_COMPLETE_SUCCESS: The transaction / operation finished without
+ * any errors
+ * @CVMX_USB_COMPLETE_SHORT: FIXME: This is currently not implemented
+ * @CVMX_USB_COMPLETE_CANCEL: The transaction was canceled while in flight by
+ * a user call to cvmx_usb_cancel
+ * @CVMX_USB_COMPLETE_ERROR: The transaction aborted with an unexpected
+ * error status
+ * @CVMX_USB_COMPLETE_STALL: The transaction received a USB STALL response
+ * from the device
+ * @CVMX_USB_COMPLETE_XACTERR: The transaction failed with an error from the
+ * device even after a number of retries
+ * @CVMX_USB_COMPLETE_DATATGLERR: The transaction failed with a data toggle
+ * error even after a number of retries
+ * @CVMX_USB_COMPLETE_BABBLEERR: The transaction failed with a babble error
+ * @CVMX_USB_COMPLETE_FRAMEERR: The transaction failed with a frame error
+ * even after a number of retries
+ */
+enum cvmx_usb_complete {
+ CVMX_USB_COMPLETE_SUCCESS,
+ CVMX_USB_COMPLETE_SHORT,
+ CVMX_USB_COMPLETE_CANCEL,
+ CVMX_USB_COMPLETE_ERROR,
+ CVMX_USB_COMPLETE_STALL,
+ CVMX_USB_COMPLETE_XACTERR,
+ CVMX_USB_COMPLETE_DATATGLERR,
+ CVMX_USB_COMPLETE_BABBLEERR,
+ CVMX_USB_COMPLETE_FRAMEERR,
+};
+
+/**
+ * struct cvmx_usb_port_status - the USB port status information
+ *
+ * @port_enabled: 1 = Usb port is enabled, 0 = disabled
+ * @port_over_current: 1 = Over current detected, 0 = Over current not
+ * detected. Octeon doesn't support over current detection.
+ * @port_powered: 1 = Port power is being supplied to the device, 0 =
+ * power is off. Octeon doesn't support turning port power
+ * off.
+ * @port_speed: Current port speed.
+ * @connected: 1 = A device is connected to the port, 0 = No device is
+ * connected.
+ * @connect_change: 1 = Device connected state changed since the last set
+ * status call.
+ */
+struct cvmx_usb_port_status {
+ uint32_t reserved : 25;
+ uint32_t port_enabled : 1;
+ uint32_t port_over_current : 1;
+ uint32_t port_powered : 1;
+ enum cvmx_usb_speed port_speed : 2;
+ uint32_t connected : 1;
+ uint32_t connect_change : 1;
+};
+
+/**
+ * union cvmx_usb_control_header - the structure of a Control packet header
+ *
+ * @s.request_type: Bit 7 tells the direction: 1=IN, 0=OUT
+ * @s.request The standard usb request to make
+ * @s.value Value parameter for the request in little endian format
+ * @s.index Index for the request in little endian format
+ * @s.length Length of the data associated with this request in
+ * little endian format
+ */
+union cvmx_usb_control_header {
+ uint64_t u64;
+ struct {
+ uint64_t request_type : 8;
+ uint64_t request : 8;
+ uint64_t value : 16;
+ uint64_t index : 16;
+ uint64_t length : 16;
+ } s;
+};
+
+/**
+ * struct cvmx_usb_iso_packet - descriptor for Isochronous packets
+ *
+ * @offset: This is the offset in bytes into the main buffer where this data
+ * is stored.
+ * @length: This is the length in bytes of the data.
+ * @status: This is the status of this individual packet transfer.
+ */
+struct cvmx_usb_iso_packet {
+ int offset;
+ int length;
+ enum cvmx_usb_complete status;
+};
+
+/**
+ * enum cvmx_usb_callback - possible callback reasons for the USB API
+ *
+ * @CVMX_USB_CALLBACK_TRANSFER_COMPLETE: A callback of this type is called when
+ * a submitted transfer completes. The
+ * completion callback will be called even
+ * if the transfer fails or is canceled.
+ * The status parameter will contain
+ * details of why he callback was called.
+ * @CVMX_USB_CALLBACK_PORT_CHANGED: The status of the port changed. For
+ * example, someone may have plugged a
+ * device in. The status parameter
+ * contains CVMX_USB_COMPLETE_SUCCESS. Use
+ * cvmx_usb_get_status() to get the new
+ * port status.
+ * @__CVMX_USB_CALLBACK_END: Do not use. Used internally for array
+ * bounds.
+ */
+enum cvmx_usb_callback {
+ CVMX_USB_CALLBACK_TRANSFER_COMPLETE,
+ CVMX_USB_CALLBACK_PORT_CHANGED,
+ __CVMX_USB_CALLBACK_END
+};
/**
* USB state internal data. The contents of this structure
* may change in future SDKs. No data in it should be referenced
* by user's of this API.
*/
-typedef struct
-{
- char data[65536];
-} cvmx_usb_state_t;
+struct cvmx_usb_state {
+ char data[65536];
+};
/**
* USB callback functions are always of the following type.
* The parameters are as follows:
* - state = USB device state populated by
* cvmx_usb_initialize().
- * - reason = The cvmx_usb_callback_t used to register
+ * - reason = The enum cvmx_usb_callback used to register
* the callback.
- * - status = The cvmx_usb_complete_t representing the
+ * - status = The enum cvmx_usb_complete representing the
* status code of a transaction.
* - pipe_handle = The Pipe that caused this callback, or
* -1 if this callback wasn't associated with a pipe.
@@ -599,487 +420,123 @@ typedef struct
* - user_data = The user pointer supplied to the
* function cvmx_usb_submit() or
* cvmx_usb_register_callback() */
-typedef void (*cvmx_usb_callback_func_t)(cvmx_usb_state_t *state,
- cvmx_usb_callback_t reason,
- cvmx_usb_complete_t status,
+typedef void (*cvmx_usb_callback_func_t)(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
int pipe_handle, int submit_handle,
int bytes_transferred, void *user_data);
/**
- * Flags to pass the initialization function.
- */
-typedef enum
-{
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1<<0, /**< The USB port uses a 12MHz crystal as clock source
- at USB_XO and USB_XI. */
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1<<1, /**< The USB port uses 12/24/48MHz 2.5V board clock
- source at USB_XO. USB_XI should be tied to GND.*/
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO = 0, /**< Automatically determine clock type based on function
- in cvmx-helper-board.c. */
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3<<3, /**< Mask for clock speed field */
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1<<3, /**< Speed of reference clock or crystal */
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2<<3, /**< Speed of reference clock */
- CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3<<3, /**< Speed of reference clock */
- /* Bits 3-4 used to encode the clock frequency */
- CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1<<5, /**< Disable DMA and used polled IO for data transfer use for the USB */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS = 1<<16, /**< Enable extra console output for debugging USB transfers */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLBACKS = 1<<17, /**< Enable extra console output for debugging USB callbacks */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_INFO = 1<<18, /**< Enable extra console output for USB informational data */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_CALLS = 1<<19, /**< Enable extra console output for every function call */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS = 1<<20, /**< Enable extra console output for every CSR access */
- CVMX_USB_INITIALIZE_FLAGS_DEBUG_ALL = ((CVMX_USB_INITIALIZE_FLAGS_DEBUG_CSRS<<1)-1) - (CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS-1),
-} cvmx_usb_initialize_flags_t;
+ * enum cvmx_usb_initialize_flags - flags to pass the initialization function
+ *
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI: The USB port uses a 12MHz crystal
+ * as clock source at USB_XO and
+ * USB_XI.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND: The USB port uses 12/24/48MHz 2.5V
+ * board clock source at USB_XO.
+ * USB_XI should be tied to GND.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO: Automatically determine clock type
+ * based on function in
+ * cvmx-helper-board.c.
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK: Mask for clock speed field
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ: Speed of reference clock or
+ * crystal
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ: Speed of reference clock
+ * @CVMX_USB_INITIALIZE_FLAGS_NO_DMA: Disable DMA and used polled IO for
+ * data transfer use for the USB
+ */
+enum cvmx_usb_initialize_flags {
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI = 1 << 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND = 1 << 1,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_AUTO = 0,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_MHZ_MASK = 3 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ = 1 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ = 2 << 3,
+ CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ = 3 << 3,
+ /* Bits 3-4 used to encode the clock frequency */
+ CVMX_USB_INITIALIZE_FLAGS_NO_DMA = 1 << 5,
+};
+
+/**
+ * enum cvmx_usb_pipe_flags - flags for passing when a pipe is created.
+ * Currently no flags need to be passed.
+ *
+ * @__CVMX_USB_PIPE_FLAGS_OPEN: Used internally to determine if a pipe is
+ * open. Do not use.
+ * @__CVMX_USB_PIPE_FLAGS_SCHEDULED: Used internally to determine if a pipe is
+ * actively using hardware. Do not use.
+ * @__CVMX_USB_PIPE_FLAGS_NEED_PING: Used internally to determine if a high
+ * speed pipe is in the ping state. Do not
+ * use.
+ */
+enum cvmx_usb_pipe_flags {
+ __CVMX_USB_PIPE_FLAGS_OPEN = 1 << 16,
+ __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1 << 17,
+ __CVMX_USB_PIPE_FLAGS_NEED_PING = 1 << 18,
+};
-/**
- * Flags for passing when a pipe is created. Currently no flags
- * need to be passed.
- */
-typedef enum
-{
- CVMX_USB_PIPE_FLAGS_DEBUG_TRANSFERS = 1<<15,/**< Used to display CVMX_USB_INITIALIZE_FLAGS_DEBUG_TRANSFERS for a specific pipe only */
- __CVMX_USB_PIPE_FLAGS_OPEN = 1<<16, /**< Used internally to determine if a pipe is open. Do not use */
- __CVMX_USB_PIPE_FLAGS_SCHEDULED = 1<<17, /**< Used internally to determine if a pipe is actively using hardware. Do not use */
- __CVMX_USB_PIPE_FLAGS_NEED_PING = 1<<18, /**< Used internally to determine if a high speed pipe is in the ping state. Do not use */
-} cvmx_usb_pipe_flags_t;
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * cvmx_usb_state_t structures.
- *
- * @return Number of port, zero if usb isn't supported
- */
extern int cvmx_usb_get_num_ports(void);
-
-/**
- * Initialize a USB port for use. This must be called before any
- * other access to the Octeon USB port is made. The port starts
- * off in the disabled state.
- *
- * @param state Pointer to an empty cvmx_usb_state_t structure
- * that will be populated by the initialize call.
- * This structure is then passed to all other USB
- * functions.
- * @param usb_port_number
- * Which Octeon USB port to initialize.
- * @param flags Flags to control hardware initialization. See
- * cvmx_usb_initialize_flags_t for the flag
- * definitions. Some flags are mandatory.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_initialize(cvmx_usb_state_t *state,
- int usb_port_number,
- cvmx_usb_initialize_flags_t flags);
-
-/**
- * Shutdown a USB port after a call to cvmx_usb_initialize().
- * The port should be disabled with all pipes closed when this
- * function is called.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_shutdown(cvmx_usb_state_t *state);
-
-/**
- * Enable a USB port. After this call succeeds, the USB port is
- * online and servicing requests.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_enable(cvmx_usb_state_t *state);
-
-/**
- * Disable a USB port. After this call the USB port will not
- * generate data transfers and will not generate events.
- * Transactions in process will fail and call their
- * associated callbacks.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_disable(cvmx_usb_state_t *state);
-
-/**
- * Get the current state of the USB port. Use this call to
- * determine if the usb port has anything connected, is enabled,
- * or has some sort of error condition. The return value of this
- * call has "changed" bits to signal of the value of some fields
- * have changed between calls. These "changed" fields are based
- * on the last call to cvmx_usb_set_status(). In order to clear
- * them, you must update the status through cvmx_usb_set_status().
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return Port status information
- */
-extern cvmx_usb_port_status_t cvmx_usb_get_status(cvmx_usb_state_t *state);
-
-/**
- * Set the current state of the USB port. The status is used as
- * a reference for the "changed" bits returned by
- * cvmx_usb_get_status(). Other than serving as a reference, the
- * status passed to this function is not used. No fields can be
- * changed through this call.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param port_status
- * Port status to set, most like returned by cvmx_usb_get_status()
- */
-extern void cvmx_usb_set_status(cvmx_usb_state_t *state, cvmx_usb_port_status_t port_status);
-
-/**
- * Open a virtual pipe between the host and a USB device. A pipe
- * must be opened before data can be transferred between a device
- * and Octeon.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param flags Optional pipe flags defined in
- * cvmx_usb_pipe_flags_t.
- * @param device_addr
- * USB device address to open the pipe to
- * (0-127).
- * @param endpoint_num
- * USB endpoint number to open the pipe to
- * (0-15).
- * @param device_speed
- * The speed of the device the pipe is going
- * to. This must match the device's speed,
- * which may be different than the port speed.
- * @param max_packet The maximum packet length the device can
- * transmit/receive (low speed=0-8, full
- * speed=0-1023, high speed=0-1024). This value
- * comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <10:0>.
- * @param transfer_type
- * The type of transfer this pipe is for.
- * @param transfer_dir
- * The direction the pipe is in. This is not
- * used for control pipes.
- * @param interval For ISOCHRONOUS and INTERRUPT transfers,
- * this is how often the transfer is scheduled
- * for. All other transfers should specify
- * zero. The units are in frames (8000/sec at
- * high speed, 1000/sec for full speed).
- * @param multi_count
- * For high speed devices, this is the maximum
- * allowed number of packet per microframe.
- * Specify zero for non high speed devices. This
- * value comes from the standard endpoint descriptor
- * field wMaxPacketSize bits <12:11>.
- * @param hub_device_addr
- * Hub device address this device is connected
- * to. Devices connected directly to Octeon
- * use zero. This is only used when the device
- * is full/low speed behind a high speed hub.
- * The address will be of the high speed hub,
- * not and full speed hubs after it.
- * @param hub_port Which port on the hub the device is
- * connected. Use zero for devices connected
- * directly to Octeon. Like hub_device_addr,
- * this is only used for full/low speed
- * devices behind a high speed hub.
- *
- * @return A non negative value is a pipe handle. Negative
- * values are failure codes from cvmx_usb_status_t.
- */
-extern int cvmx_usb_open_pipe(cvmx_usb_state_t *state,
- cvmx_usb_pipe_flags_t flags,
+extern int cvmx_usb_initialize(struct cvmx_usb_state *state, int usb_port_number,
+ enum cvmx_usb_initialize_flags flags);
+extern int cvmx_usb_shutdown(struct cvmx_usb_state *state);
+extern int cvmx_usb_enable(struct cvmx_usb_state *state);
+extern int cvmx_usb_disable(struct cvmx_usb_state *state);
+extern struct cvmx_usb_port_status cvmx_usb_get_status(struct cvmx_usb_state *state);
+extern void cvmx_usb_set_status(struct cvmx_usb_state *state, struct cvmx_usb_port_status port_status);
+extern int cvmx_usb_open_pipe(struct cvmx_usb_state *state,
+ enum cvmx_usb_pipe_flags flags,
int device_addr, int endpoint_num,
- cvmx_usb_speed_t device_speed, int max_packet,
- cvmx_usb_transfer_t transfer_type,
- cvmx_usb_direction_t transfer_dir, int interval,
+ enum cvmx_usb_speed device_speed, int max_packet,
+ enum cvmx_usb_transfer transfer_type,
+ enum cvmx_usb_direction transfer_dir, int interval,
int multi_count, int hub_device_addr,
int hub_port);
-
-/**
- * Call to submit a USB Bulk transfer to a pipe.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
- */
-extern int cvmx_usb_submit_bulk(cvmx_usb_state_t *state, int pipe_handle,
+extern int cvmx_usb_submit_bulk(struct cvmx_usb_state *state, int pipe_handle,
uint64_t buffer, int buffer_length,
cvmx_usb_callback_func_t callback,
void *user_data);
-
-/**
- * Call to submit a USB Interrupt transfer to a pipe.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
- */
-extern int cvmx_usb_submit_interrupt(cvmx_usb_state_t *state, int pipe_handle,
+extern int cvmx_usb_submit_interrupt(struct cvmx_usb_state *state, int pipe_handle,
uint64_t buffer, int buffer_length,
cvmx_usb_callback_func_t callback,
void *user_data);
-
-/**
- * Call to submit a USB Control transfer to a pipe.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param control_header
- * USB 8 byte control header physical address.
- * Note that this is NOT A POINTER, but the
- * full 64bit physical address of the buffer.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
- *
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
- */
-extern int cvmx_usb_submit_control(cvmx_usb_state_t *state, int pipe_handle,
+extern int cvmx_usb_submit_control(struct cvmx_usb_state *state, int pipe_handle,
uint64_t control_header,
uint64_t buffer, int buffer_length,
cvmx_usb_callback_func_t callback,
void *user_data);
/**
- * Flags to pass the cvmx_usb_submit_isochronous() function.
- */
-typedef enum
-{
- CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT = 1<<0, /**< Do not return an error if a transfer is less than the maximum packet size of the device */
- CVMX_USB_ISOCHRONOUS_FLAGS_ASAP = 1<<1, /**< Schedule the transaction as soon as possible */
-} cvmx_usb_isochronous_flags_t;
-
-/**
- * Call to submit a USB Isochronous transfer to a pipe.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Handle to the pipe for the transfer.
- * @param start_frame
- * Number of frames into the future to schedule
- * this transaction.
- * @param flags Flags to control the transfer. See
- * cvmx_usb_isochronous_flags_t for the flag
- * definitions.
- * @param number_packets
- * Number of sequential packets to transfer.
- * "packets" is a pointer to an array of this
- * many packet structures.
- * @param packets Description of each transfer packet as
- * defined by cvmx_usb_iso_packet_t. The array
- * pointed to here must stay valid until the
- * complete callback is called.
- * @param buffer Physical address of the data buffer in
- * memory. Note that this is NOT A POINTER, but
- * the full 64bit physical address of the
- * buffer. This may be zero if buffer_length is
- * zero.
- * @param buffer_length
- * Length of buffer in bytes.
- * @param callback Function to call when this transaction
- * completes. If the return value of this
- * function isn't an error, then this function
- * is guaranteed to be called when the
- * transaction completes. If this parameter is
- * NULL, then the generic callback registered
- * through cvmx_usb_register_callback is
- * called. If both are NULL, then there is no
- * way to know when a transaction completes.
- * @param user_data User supplied data returned when the
- * callback is called. This is only used if
- * callback in not NULL.
+ * enum cvmx_usb_isochronous_flags - flags to pass the
+ * cvmx_usb_submit_isochronous() function.
*
- * @return A submitted transaction handle or negative on
- * failure. Negative values are failure codes from
- * cvmx_usb_status_t.
+ * @CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT: Do not return an error if a transfer
+ * is less than the maximum packet size
+ * of the device.
+ * @CVMX_USB_ISOCHRONOUS_FLAGS_ASAP: Schedule the transaction as soon as
+ * possible.
*/
-extern int cvmx_usb_submit_isochronous(cvmx_usb_state_t *state, int pipe_handle,
+enum cvmx_usb_isochronous_flags {
+ CVMX_USB_ISOCHRONOUS_FLAGS_ALLOW_SHORT = 1 << 0,
+ CVMX_USB_ISOCHRONOUS_FLAGS_ASAP = 1 << 1,
+};
+
+extern int cvmx_usb_submit_isochronous(struct cvmx_usb_state *state, int pipe_handle,
int start_frame, int flags,
int number_packets,
- cvmx_usb_iso_packet_t packets[],
+ struct cvmx_usb_iso_packet packets[],
uint64_t buffer, int buffer_length,
cvmx_usb_callback_func_t callback,
void *user_data);
-
-/**
- * Cancel one outstanding request in a pipe. Canceling a request
- * can fail if the transaction has already completed before cancel
- * is called. Even after a successful cancel call, it may take
- * a frame or two for the cvmx_usb_poll() function to call the
- * associated callback.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to cancel requests in.
- * @param submit_handle
- * Handle to transaction to cancel, returned by the submit function.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_cancel(cvmx_usb_state_t *state,
- int pipe_handle, int submit_handle);
-
-
-/**
- * Cancel all outstanding requests in a pipe. Logically all this
- * does is call cvmx_usb_cancel() in a loop.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to cancel requests in.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_cancel_all(cvmx_usb_state_t *state,
- int pipe_handle);
-
-/**
- * Close a pipe created with cvmx_usb_open_pipe().
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param pipe_handle
- * Pipe handle to close.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t. CVMX_USB_BUSY is returned if the
- * pipe has outstanding transfers.
- */
-extern cvmx_usb_status_t cvmx_usb_close_pipe(cvmx_usb_state_t *state,
- int pipe_handle);
-
-/**
- * Register a function to be called when various USB events occur.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- * @param reason Which event to register for.
- * @param callback Function to call when the event occurs.
- * @param user_data User data parameter to the function.
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_register_callback(cvmx_usb_state_t *state,
- cvmx_usb_callback_t reason,
- cvmx_usb_callback_func_t callback,
- void *user_data);
-
-/**
- * Get the current USB protocol level frame number. The frame
- * number is always in the range of 0-0x7ff.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return USB frame number
- */
-extern int cvmx_usb_get_frame_number(cvmx_usb_state_t *state);
-
-/**
- * Poll the USB block for status and call all needed callback
- * handlers. This function is meant to be called in the interrupt
- * handler for the USB controller. It can also be called
- * periodically in a loop for non-interrupt based operation.
- *
- * @param state USB device state populated by
- * cvmx_usb_initialize().
- *
- * @return CVMX_USB_SUCCESS or a negative error code defined in
- * cvmx_usb_status_t.
- */
-extern cvmx_usb_status_t cvmx_usb_poll(cvmx_usb_state_t *state);
-
-#ifdef __cplusplus
-}
-#endif
+extern int cvmx_usb_cancel(struct cvmx_usb_state *state, int pipe_handle,
+ int submit_handle);
+extern int cvmx_usb_cancel_all(struct cvmx_usb_state *state, int pipe_handle);
+extern int cvmx_usb_close_pipe(struct cvmx_usb_state *state, int pipe_handle);
+extern int cvmx_usb_register_callback(struct cvmx_usb_state *state,
+ enum cvmx_usb_callback reason,
+ cvmx_usb_callback_func_t callback,
+ void *user_data);
+extern int cvmx_usb_get_frame_number(struct cvmx_usb_state *state);
+extern int cvmx_usb_poll(struct cvmx_usb_state *state);
#endif /* __CVMX_USB_H__ */
diff --git a/drivers/staging/octeon-usb/cvmx-usbcx-defs.h b/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
index 394e84662ce..d349d77bc35 100644
--- a/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
+++ b/drivers/staging/octeon-usb/cvmx-usbcx-defs.h
@@ -140,7 +140,6 @@ union cvmx_usbcx_gahbcfg {
uint32_t glblintrmsk : 1;
} s;
};
-typedef union cvmx_usbcx_gahbcfg cvmx_usbcx_gahbcfg_t;
/**
* cvmx_usbc#_ghwcfg3
@@ -210,7 +209,6 @@ union cvmx_usbcx_ghwcfg3 {
uint32_t xfersizewidth : 4;
} s;
};
-typedef union cvmx_usbcx_ghwcfg3 cvmx_usbcx_ghwcfg3_t;
/**
* cvmx_usbc#_gintmsk
@@ -299,7 +297,6 @@ union cvmx_usbcx_gintmsk {
uint32_t reserved_0_0 : 1;
} s;
};
-typedef union cvmx_usbcx_gintmsk cvmx_usbcx_gintmsk_t;
/**
* cvmx_usbc#_gintsts
@@ -529,7 +526,6 @@ union cvmx_usbcx_gintsts {
uint32_t curmod : 1;
} s;
};
-typedef union cvmx_usbcx_gintsts cvmx_usbcx_gintsts_t;
/**
* cvmx_usbc#_gnptxfsiz
@@ -556,7 +552,6 @@ union cvmx_usbcx_gnptxfsiz {
uint32_t nptxfstaddr : 16;
} s;
};
-typedef union cvmx_usbcx_gnptxfsiz cvmx_usbcx_gnptxfsiz_t;
/**
* cvmx_usbc#_gnptxsts
@@ -609,7 +604,6 @@ union cvmx_usbcx_gnptxsts {
uint32_t nptxfspcavail : 16;
} s;
};
-typedef union cvmx_usbcx_gnptxsts cvmx_usbcx_gnptxsts_t;
/**
* cvmx_usbc#_grstctl
@@ -737,7 +731,6 @@ union cvmx_usbcx_grstctl {
uint32_t csftrst : 1;
} s;
};
-typedef union cvmx_usbcx_grstctl cvmx_usbcx_grstctl_t;
/**
* cvmx_usbc#_grxfsiz
@@ -761,7 +754,6 @@ union cvmx_usbcx_grxfsiz {
uint32_t rxfdep : 16;
} s;
};
-typedef union cvmx_usbcx_grxfsiz cvmx_usbcx_grxfsiz_t;
/**
* cvmx_usbc#_grxstsph
@@ -807,7 +799,6 @@ union cvmx_usbcx_grxstsph {
uint32_t chnum : 4;
} s;
};
-typedef union cvmx_usbcx_grxstsph cvmx_usbcx_grxstsph_t;
/**
* cvmx_usbc#_gusbcfg
@@ -896,7 +887,6 @@ union cvmx_usbcx_gusbcfg {
uint32_t toutcal : 3;
} s;
};
-typedef union cvmx_usbcx_gusbcfg cvmx_usbcx_gusbcfg_t;
/**
* cvmx_usbc#_haint
@@ -922,7 +912,6 @@ union cvmx_usbcx_haint {
uint32_t haint : 16;
} s;
};
-typedef union cvmx_usbcx_haint cvmx_usbcx_haint_t;
/**
* cvmx_usbc#_haintmsk
@@ -947,7 +936,6 @@ union cvmx_usbcx_haintmsk {
uint32_t haintmsk : 16;
} s;
};
-typedef union cvmx_usbcx_haintmsk cvmx_usbcx_haintmsk_t;
/**
* cvmx_usbc#_hcchar#
@@ -1027,7 +1015,6 @@ union cvmx_usbcx_hccharx {
uint32_t mps : 11;
} s;
};
-typedef union cvmx_usbcx_hccharx cvmx_usbcx_hccharx_t;
/**
* cvmx_usbc#_hcfg
@@ -1075,7 +1062,6 @@ union cvmx_usbcx_hcfg {
uint32_t fslspclksel : 2;
} s;
};
-typedef union cvmx_usbcx_hcfg cvmx_usbcx_hcfg_t;
/**
* cvmx_usbc#_hcint#
@@ -1126,7 +1112,6 @@ union cvmx_usbcx_hcintx {
uint32_t xfercompl : 1;
} s;
};
-typedef union cvmx_usbcx_hcintx cvmx_usbcx_hcintx_t;
/**
* cvmx_usbc#_hcintmsk#
@@ -1168,7 +1153,6 @@ union cvmx_usbcx_hcintmskx {
uint32_t xfercomplmsk : 1;
} s;
};
-typedef union cvmx_usbcx_hcintmskx cvmx_usbcx_hcintmskx_t;
/**
* cvmx_usbc#_hcsplt#
@@ -1213,7 +1197,6 @@ union cvmx_usbcx_hcspltx {
uint32_t prtaddr : 7;
} s;
};
-typedef union cvmx_usbcx_hcspltx cvmx_usbcx_hcspltx_t;
/**
* cvmx_usbc#_hctsiz#
@@ -1257,7 +1240,6 @@ union cvmx_usbcx_hctsizx {
uint32_t xfersize : 19;
} s;
};
-typedef union cvmx_usbcx_hctsizx cvmx_usbcx_hctsizx_t;
/**
* cvmx_usbc#_hfir
@@ -1293,7 +1275,6 @@ union cvmx_usbcx_hfir {
uint32_t frint : 16;
} s;
};
-typedef union cvmx_usbcx_hfir cvmx_usbcx_hfir_t;
/**
* cvmx_usbc#_hfnum
@@ -1323,7 +1304,6 @@ union cvmx_usbcx_hfnum {
uint32_t frnum : 16;
} s;
};
-typedef union cvmx_usbcx_hfnum cvmx_usbcx_hfnum_t;
/**
* cvmx_usbc#_hprt
@@ -1464,7 +1444,6 @@ union cvmx_usbcx_hprt {
uint32_t prtconnsts : 1;
} s;
};
-typedef union cvmx_usbcx_hprt cvmx_usbcx_hprt_t;
/**
* cvmx_usbc#_hptxfsiz
@@ -1489,7 +1468,6 @@ union cvmx_usbcx_hptxfsiz {
uint32_t ptxfstaddr : 16;
} s;
};
-typedef union cvmx_usbcx_hptxfsiz cvmx_usbcx_hptxfsiz_t;
/**
* cvmx_usbc#_hptxsts
@@ -1546,6 +1524,5 @@ union cvmx_usbcx_hptxsts {
uint32_t ptxfspcavail : 16;
} s;
};
-typedef union cvmx_usbcx_hptxsts cvmx_usbcx_hptxsts_t;
#endif
diff --git a/drivers/staging/octeon-usb/cvmx-usbnx-defs.h b/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
index 96d706770fc..e06aafa5726 100644
--- a/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
+++ b/drivers/staging/octeon-usb/cvmx-usbnx-defs.h
@@ -337,7 +337,6 @@ union cvmx_usbnx_clk_ctl {
struct cvmx_usbnx_clk_ctl_cn50xx cn52xx;
struct cvmx_usbnx_clk_ctl_cn50xx cn56xx;
};
-typedef union cvmx_usbnx_clk_ctl cvmx_usbnx_clk_ctl_t;
/**
* cvmx_usbn#_usbp_ctl_status
@@ -882,6 +881,5 @@ union cvmx_usbnx_usbp_ctl_status {
uint64_t ate_reset : 1;
} cn52xx;
};
-typedef union cvmx_usbnx_usbp_ctl_status cvmx_usbnx_usbp_ctl_status_t;
#endif
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index d156b603ae6..5dbbd14ec61 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -26,7 +26,7 @@
struct octeon_hcd {
spinlock_t lock;
- cvmx_usb_state_t usb;
+ struct cvmx_usb_state usb;
struct tasklet_struct dequeue_tasklet;
struct list_head dequeue_list;
};
@@ -42,7 +42,7 @@ static inline struct usb_hcd *octeon_to_hcd(struct octeon_hcd *p)
return container_of((void *)p, struct usb_hcd, hcd_priv);
}
-static inline struct octeon_hcd *cvmx_usb_to_octeon(cvmx_usb_state_t *p)
+static inline struct octeon_hcd *cvmx_usb_to_octeon(struct cvmx_usb_state *p)
{
return container_of(p, struct octeon_hcd, usb);
}
@@ -58,9 +58,9 @@ static irqreturn_t octeon_usb_irq(struct usb_hcd *hcd)
return IRQ_HANDLED;
}
-static void octeon_usb_port_callback(cvmx_usb_state_t *usb,
- cvmx_usb_callback_t reason,
- cvmx_usb_complete_t status,
+static void octeon_usb_port_callback(struct cvmx_usb_state *usb,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
int pipe_handle,
int submit_handle,
int bytes_transferred,
@@ -105,9 +105,9 @@ static int octeon_usb_get_frame_number(struct usb_hcd *hcd)
return cvmx_usb_get_frame_number(&priv->usb);
}
-static void octeon_usb_urb_complete_callback(cvmx_usb_state_t *usb,
- cvmx_usb_callback_t reason,
- cvmx_usb_complete_t status,
+static void octeon_usb_urb_complete_callback(struct cvmx_usb_state *usb,
+ enum cvmx_usb_callback reason,
+ enum cvmx_usb_complete status,
int pipe_handle,
int submit_handle,
int bytes_transferred,
@@ -141,7 +141,8 @@ static void octeon_usb_urb_complete_callback(cvmx_usb_state_t *usb,
* The pointer to the private list is stored in the setup_packet
* field.
*/
- cvmx_usb_iso_packet_t *iso_packet = (cvmx_usb_iso_packet_t *) urb->setup_packet;
+ struct cvmx_usb_iso_packet *iso_packet =
+ (struct cvmx_usb_iso_packet *) urb->setup_packet;
/* Recalculate the transfer size by adding up each packet */
urb->actual_length = 0;
for (i = 0; i < urb->number_of_packets; i++) {
@@ -208,7 +209,7 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
int submit_handle = -1;
int pipe_handle;
unsigned long flags;
- cvmx_usb_iso_packet_t *iso_packet;
+ struct cvmx_usb_iso_packet *iso_packet;
struct usb_host_endpoint *ep = urb->ep;
urb->status = 0;
@@ -216,8 +217,8 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
spin_lock_irqsave(&priv->lock, flags);
if (!ep->hcpriv) {
- cvmx_usb_transfer_t transfer_type;
- cvmx_usb_speed_t speed;
+ enum cvmx_usb_transfer transfer_type;
+ enum cvmx_usb_speed speed;
int split_device = 0;
int split_port = 0;
switch (usb_pipetype(urb->pipe)) {
@@ -305,7 +306,9 @@ static int octeon_usb_urb_enqueue(struct usb_hcd *hcd,
* Allocate a structure to use for our private list of
* isochronous packets.
*/
- iso_packet = kmalloc(urb->number_of_packets * sizeof(cvmx_usb_iso_packet_t), GFP_ATOMIC);
+ iso_packet = kmalloc(urb->number_of_packets *
+ sizeof(struct cvmx_usb_iso_packet),
+ GFP_ATOMIC);
if (iso_packet) {
int i;
/* Fill the list with the data from the URB */
@@ -440,7 +443,7 @@ static void octeon_usb_endpoint_disable(struct usb_hcd *hcd, struct usb_host_end
static int octeon_usb_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct octeon_hcd *priv = hcd_to_octeon(hcd);
- cvmx_usb_port_status_t port_status;
+ struct cvmx_usb_port_status port_status;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
@@ -456,7 +459,7 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
{
struct octeon_hcd *priv = hcd_to_octeon(hcd);
struct device *dev = hcd->self.controller;
- cvmx_usb_port_status_t usb_port_status;
+ struct cvmx_usb_port_status usb_port_status;
int port_status;
struct usb_hub_descriptor *desc;
unsigned long flags;
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index fe40e0b6f67..2ff015d8450 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -4,9 +4,14 @@ config FB_OLPC_DCON
select I2C
select BACKLIGHT_CLASS_DEVICE
---help---
- Add support for the OLPC XO DCON controller. This controller is
- only available on OLPC platforms. Unless you have one of these
- platforms, you will want to say 'N'.
+ In order to support very low power operation, the XO laptop uses a
+ secondary Display CONtroller, or DCON. This secondary controller
+ is present in the video pipeline between the primary display
+ controller (integrate into the processor or chipset) and the LCD
+ panel. It allows the main processor/display controller to be
+ completely powered off while still retaining an image on the display.
+ This controller is only available on OLPC platforms. Unless you have
+ one of these platforms, you will want to say 'N'.
config FB_OLPC_DCON_1
bool "OLPC XO-1 DCON support"
diff --git a/drivers/staging/olpc_dcon/TODO b/drivers/staging/olpc_dcon/TODO
index 35f9cda7be1..61c2e65ac35 100644
--- a/drivers/staging/olpc_dcon/TODO
+++ b/drivers/staging/olpc_dcon/TODO
@@ -1,16 +1,9 @@
TODO:
- - checkpatch.pl cleanups
- see if vx855 gpio API can be made similar enough to cs5535 so we can
share more code
- allow simultaneous XO-1 and XO-1.5 support
- - console event notifier support
- - drop global variables, use a proper olpc_dcon_priv struct
- - audit code for unnecessary code; old unsupported prototype
- workarounds, ancient variables (noaa?), etc
- - verify sane i2c API usage, update to new stuff if necessary
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and
copy:
- Andres Salomon <dilinger@queued.net>
- Chris Ball <cjb@laptop.org>
- Jon Nettleton <jon.nettleton@gmail.com>
+ Daniel Drake <dsd@laptop.org>
+ Jens Frederich <jfrederich@gmail.com>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index 193e1c68bb4..198595e8d74 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -90,9 +90,10 @@ static int dcon_hw_init(struct dcon_priv *dcon, int is_init)
/* SDRAM setup/hold time */
dcon_write(dcon, 0x3a, 0xc040);
- dcon_write(dcon, 0x41, 0x0000);
- dcon_write(dcon, 0x41, 0x0101);
- dcon_write(dcon, 0x42, 0x0101);
+ dcon_write(dcon, DCON_REG_MEM_OPT_A, 0x0000); /* clear option bits */
+ dcon_write(dcon, DCON_REG_MEM_OPT_A,
+ MEM_DLL_CLOCK_DELAY | MEM_POWER_DOWN);
+ dcon_write(dcon, DCON_REG_MEM_OPT_B, MEM_SOFT_RESET);
/* Colour swizzle, AA, no passthrough, backlight */
if (is_init) {
@@ -121,30 +122,31 @@ err:
static int dcon_bus_stabilize(struct dcon_priv *dcon, int is_powered_down)
{
unsigned long timeout;
+ u8 pm;
int x;
power_up:
if (is_powered_down) {
- x = 1;
- x = olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ pm = 1;
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
if (x) {
pr_warn("unable to force dcon to power up: %d!\n", x);
return x;
}
- msleep(10); /* we'll be conservative */
+ usleep_range(10000, 11000); /* we'll be conservative */
}
pdata->bus_stabilize_wiggle();
for (x = -1, timeout = 50; timeout && x < 0; timeout--) {
- msleep(1);
+ usleep_range(1000, 1100);
x = dcon_read(dcon, DCON_REG_ID);
}
if (x < 0) {
pr_err("unable to stabilize dcon's smbus, reasserting power and praying.\n");
BUG_ON(olpc_board_at_least(olpc_board(0xc2)));
- x = 0;
- olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ pm = 0;
+ olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
msleep(100);
is_powered_down = 1;
goto power_up; /* argh, stupid hardware.. */
@@ -207,8 +209,8 @@ static void dcon_sleep(struct dcon_priv *dcon, bool sleep)
return;
if (sleep) {
- x = 0;
- x = olpc_ec_cmd(0x26, (unsigned char *)&x, 1, NULL, 0);
+ u8 pm = 0;
+ x = olpc_ec_cmd(EC_DCON_POWER_MODE, &pm, 1, NULL, 0);
if (x)
pr_warn("unable to force dcon to power down: %d!\n", x);
else
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 997bded2949..e2663b189c6 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -22,15 +22,24 @@
#define MODE_DEBUG (1<<14)
#define MODE_SELFTEST (1<<15)
-#define DCON_REG_HRES 2
-#define DCON_REG_HTOTAL 3
-#define DCON_REG_HSYNC_WIDTH 4
-#define DCON_REG_VRES 5
-#define DCON_REG_VTOTAL 6
-#define DCON_REG_VSYNC_WIDTH 7
-#define DCON_REG_TIMEOUT 8
-#define DCON_REG_SCAN_INT 9
-#define DCON_REG_BRIGHT 10
+#define DCON_REG_HRES 0x2
+#define DCON_REG_HTOTAL 0x3
+#define DCON_REG_HSYNC_WIDTH 0x4
+#define DCON_REG_VRES 0x5
+#define DCON_REG_VTOTAL 0x6
+#define DCON_REG_VSYNC_WIDTH 0x7
+#define DCON_REG_TIMEOUT 0x8
+#define DCON_REG_SCAN_INT 0x9
+#define DCON_REG_BRIGHT 0xa
+#define DCON_REG_MEM_OPT_A 0x41
+#define DCON_REG_MEM_OPT_B 0x42
+
+/* Load Delay Locked Loop (DLL) settings for clock delay */
+#define MEM_DLL_CLOCK_DELAY (1<<0)
+/* Memory controller power down function */
+#define MEM_POWER_DOWN (1<<8)
+/* Memory controller software reset */
+#define MEM_SOFT_RESET (1<<0)
/* Status values */
diff --git a/drivers/staging/ozwpan/Kbuild b/drivers/staging/ozwpan/Makefile
index 1766a268d5f..29529c1a8e3 100644
--- a/drivers/staging/ozwpan/Kbuild
+++ b/drivers/staging/ozwpan/Makefile
@@ -2,6 +2,7 @@
# Copyright (c) 2011 Ozmo Inc
# Released under the GNU General Public License Version 2 (GPLv2).
# -----------------------------------------------------------------------------
+
obj-$(CONFIG_USB_WPAN_HCD) += ozwpan.o
ozwpan-y := \
ozmain.o \
@@ -12,7 +13,4 @@ ozwpan-y := \
ozeltbuf.o \
ozproto.o \
ozcdev.o \
- ozurbparanoia.o \
- oztrace.o
-
-
+ ozurbparanoia.o
diff --git a/drivers/staging/ozwpan/ozcdev.c b/drivers/staging/ozwpan/ozcdev.c
index 374fdc39864..6ccb64fb078 100644
--- a/drivers/staging/ozwpan/ozcdev.c
+++ b/drivers/staging/ozwpan/ozcdev.c
@@ -11,16 +11,14 @@
#include <linux/etherdevice.h>
#include <linux/poll.h>
#include <linux/sched.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
-#include "oztrace.h"
#include "ozappif.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozcdev.h"
-/*------------------------------------------------------------------------------
- */
+
#define OZ_RD_BUF_SZ 256
struct oz_cdev {
dev_t devnum;
@@ -40,16 +38,17 @@ struct oz_serial_ctx {
int rd_in;
int rd_out;
};
-/*------------------------------------------------------------------------------
- */
+
static struct oz_cdev g_cdev;
static struct class *g_oz_class;
-/*------------------------------------------------------------------------------
+
+/*
* Context: process and softirq
*/
static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
@@ -57,37 +56,40 @@ static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
- oz_trace("Dealloc serial context.\n");
+ oz_dbg(ON, "Dealloc serial context\n");
kfree(ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_cdev_open(struct inode *inode, struct file *filp)
{
- struct oz_cdev *dev;
- oz_trace("oz_cdev_open()\n");
- oz_trace("major = %d minor = %d\n", imajor(inode), iminor(inode));
- dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+ struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
+
+ oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
+
filp->private_data = dev;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_cdev_release(struct inode *inode, struct file *filp)
{
- oz_trace("oz_cdev_release()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
@@ -139,7 +141,8 @@ out2:
oz_pd_put(pd);
return count;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
@@ -158,7 +161,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == NULL)
- return -1;
+ return -ENXIO;
+ if (!(pd->state & OZ_PD_S_CONNECTED))
+ return -EAGAIN;
eb = &pd->elt_buff;
ei = oz_elt_info_alloc(eb);
if (ei == NULL) {
@@ -196,7 +201,8 @@ out:
oz_pd_put(pd);
return count;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_set_active_pd(const u8 *addr)
@@ -204,6 +210,7 @@ static int oz_set_active_pd(const u8 *addr)
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
+
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
@@ -229,13 +236,15 @@ static int oz_set_active_pd(const u8 *addr)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
+
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
@@ -251,7 +260,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
- oz_trace("OZ_IOCTL_GET_PD_LIST\n");
+ oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
memset(&list, 0, sizeof(list));
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
@@ -261,7 +270,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_SET_ACTIVE_PD\n");
+ oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
@@ -269,7 +278,7 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
- oz_trace("OZ_IOCTL_GET_ACTIVE_PD\n");
+ oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
@@ -295,14 +304,16 @@ static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
- oz_trace("Poll called wait = %p\n", wait);
+
+ oz_dbg(ON, "Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
@@ -317,7 +328,8 @@ static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
-/*------------------------------------------------------------------------------
+
+/*
*/
static const struct file_operations oz_fops = {
.owner = THIS_MODULE,
@@ -328,19 +340,21 @@ static const struct file_operations oz_fops = {
.unlocked_ioctl = oz_cdev_ioctl,
.poll = oz_cdev_poll
};
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_register(void)
{
int err;
struct device *dev;
+
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
- goto out3;
- oz_trace("Alloc dev number %d:%d\n", MAJOR(g_cdev.devnum),
- MINOR(g_cdev.devnum));
+ return err;
+ oz_dbg(ON, "Alloc dev number %d:%d\n",
+ MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
@@ -348,30 +362,31 @@ int oz_cdev_register(void)
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
if (err < 0) {
- oz_trace("Failed to add cdev\n");
- goto out2;
+ oz_dbg(ON, "Failed to add cdev\n");
+ goto unregister;
}
g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
if (IS_ERR(g_oz_class)) {
- oz_trace("Failed to register ozmo_wpan class\n");
+ oz_dbg(ON, "Failed to register ozmo_wpan class\n");
err = PTR_ERR(g_oz_class);
- goto out1;
+ goto delete;
}
dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
if (IS_ERR(dev)) {
- oz_trace("Failed to create sysfs entry for cdev\n");
+ oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
err = PTR_ERR(dev);
- goto out1;
+ goto delete;
}
return 0;
-out1:
+
+delete:
cdev_del(&g_cdev.cdev);
-out2:
+unregister:
unregister_chrdev_region(g_cdev.devnum, 1);
-out3:
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_deregister(void)
@@ -384,7 +399,8 @@ int oz_cdev_deregister(void)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_cdev_init(void)
@@ -392,22 +408,25 @@ int oz_cdev_init(void)
oz_app_enable(OZ_APPID_SERIAL, 1);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_cdev_term(void)
{
oz_app_enable(OZ_APPID_SERIAL, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx;
+
if (resume) {
- oz_trace("Serial service resumed.\n");
+ oz_dbg(ON, "Serial service resumed\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
@@ -429,20 +448,22 @@ int oz_cdev_start(struct oz_pd *pd, int resume)
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
- oz_trace("Active PD arrived.\n");
+ oz_dbg(ON, "Active PD arrived\n");
}
spin_unlock(&g_cdev.lock);
- oz_trace("Serial service started.\n");
+ oz_dbg(ON, "Serial service started\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_cdev_stop(struct oz_pd *pd, int pause)
{
struct oz_serial_ctx *ctx;
+
if (pause) {
- oz_trace("Serial service paused.\n");
+ oz_dbg(ON, "Serial service paused\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
@@ -459,11 +480,12 @@ void oz_cdev_stop(struct oz_pd *pd, int pause)
spin_unlock(&g_cdev.lock);
if (pd) {
oz_pd_put(pd);
- oz_trace("Active PD departed.\n");
+ oz_dbg(ON, "Active PD departed\n");
}
- oz_trace("Serial service stopped.\n");
+ oz_dbg(ON, "Serial service stopped\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
@@ -478,7 +500,7 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL) {
- oz_trace("Cannot claim serial context.\n");
+ oz_dbg(ON, "Cannot claim serial context\n");
return;
}
@@ -488,8 +510,8 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
if (app_hdr->elt_seq_num != 0) {
if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) {
/* Reject duplicate element. */
- oz_trace("Duplicate element:%02x %02x\n",
- app_hdr->elt_seq_num, ctx->rx_seq_num);
+ oz_dbg(ON, "Duplicate element:%02x %02x\n",
+ app_hdr->elt_seq_num, ctx->rx_seq_num);
goto out;
}
}
@@ -502,7 +524,7 @@ void oz_cdev_rx(struct oz_pd *pd, struct oz_elt *elt)
if (space < 0)
space += OZ_RD_BUF_SZ;
if (len > space) {
- oz_trace("Not enough space:%d %d\n", len, space);
+ oz_dbg(ON, "Not enough space:%d %d\n", len, space);
len = space;
}
ix = ctx->rd_in;
diff --git a/drivers/staging/ozwpan/ozconfig.h b/drivers/staging/ozwpan/ozconfig.h
deleted file mode 100644
index 087c322d2de..00000000000
--- a/drivers/staging/ozwpan/ozconfig.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * ---------------------------------------------------------------------------*/
-#ifndef _OZCONFIG_H
-#define _OZCONFIG_H
-
-/* #define WANT_TRACE */
-#ifdef WANT_TRACE
-#define WANT_VERBOSE_TRACE
-#endif /* #ifdef WANT_TRACE */
-/* #define WANT_URB_PARANOIA */
-
-/* #define WANT_PRE_2_6_39 */
-
-/* These defines determine what verbose trace is displayed. */
-#ifdef WANT_VERBOSE_TRACE
-/* #define WANT_TRACE_STREAM */
-/* #define WANT_TRACE_URB */
-/* #define WANT_TRACE_CTRL_DETAIL */
-#define WANT_TRACE_HUB
-/* #define WANT_TRACE_RX_FRAMES */
-/* #define WANT_TRACE_TX_FRAMES */
-#endif /* WANT_VERBOSE_TRACE */
-
-#endif /* _OZCONFIG_H */
diff --git a/drivers/staging/ozwpan/ozdbg.h b/drivers/staging/ozwpan/ozdbg.h
new file mode 100644
index 00000000000..b86a2b7e017
--- /dev/null
+++ b/drivers/staging/ozwpan/ozdbg.h
@@ -0,0 +1,54 @@
+/* -----------------------------------------------------------------------------
+ * Copyright (c) 2011 Ozmo Inc
+ * Released under the GNU General Public License Version 2 (GPLv2).
+ * ---------------------------------------------------------------------------*/
+
+#ifndef _OZDBG_H
+#define _OZDBG_H
+
+#define OZ_WANT_DBG 0
+#define OZ_WANT_VERBOSE_DBG 1
+
+#define OZ_DBG_ON 0x0
+#define OZ_DBG_STREAM 0x1
+#define OZ_DBG_URB 0x2
+#define OZ_DBG_CTRL_DETAIL 0x4
+#define OZ_DBG_HUB 0x8
+#define OZ_DBG_RX_FRAMES 0x10
+#define OZ_DBG_TX_FRAMES 0x20
+
+#define OZ_DEFAULT_DBG_MASK \
+ ( \
+ /* OZ_DBG_STREAM | */ \
+ /* OZ_DBG_URB | */ \
+ /* OZ_DBG_CTRL_DETAIL | */ \
+ OZ_DBG_HUB | \
+ /* OZ_DBG_RX_FRAMES | */ \
+ /* OZ_DBG_TX_FRAMES | */ \
+ 0)
+
+extern unsigned int oz_dbg_mask;
+
+#define oz_want_dbg(mask) \
+ ((OZ_WANT_DBG && (OZ_DBG_##mask == OZ_DBG_ON)) || \
+ (OZ_WANT_VERBOSE_DBG && (OZ_DBG_##mask & oz_dbg_mask)))
+
+#define oz_dbg(mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define oz_cdev_dbg(cdev, mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ netdev_dbg((cdev)->dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define oz_pd_dbg(pd, mask, fmt, ...) \
+do { \
+ if (oz_want_dbg(mask)) \
+ pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#endif /* _OZDBG_H */
diff --git a/drivers/staging/ozwpan/ozeltbuf.c b/drivers/staging/ozwpan/ozeltbuf.c
index ac90fc7f544..9b86486c6b1 100644
--- a/drivers/staging/ozwpan/ozeltbuf.c
+++ b/drivers/staging/ozwpan/ozeltbuf.c
@@ -6,16 +6,15 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/netdevice.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
-#include "oztrace.h"
-/*------------------------------------------------------------------------------
- */
+
#define OZ_ELT_INFO_MAGIC_USED 0x35791057
#define OZ_ELT_INFO_MAGIC_FREE 0x78940102
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
int oz_elt_buf_init(struct oz_elt_buf *buf)
@@ -28,13 +27,15 @@ int oz_elt_buf_init(struct oz_elt_buf *buf)
spin_lock_init(&buf->lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_elt_buf_term(struct oz_elt_buf *buf)
{
struct list_head *e;
int i;
+
/* Free any elements in the order or isoc lists. */
for (i = 0; i < 2; i++) {
struct list_head *list;
@@ -59,12 +60,14 @@ void oz_elt_buf_term(struct oz_elt_buf *buf)
}
buf->free_elts = 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
{
- struct oz_elt_info *ei = NULL;
+ struct oz_elt_info *ei;
+
spin_lock_bh(&buf->lock);
if (buf->free_elts && buf->elt_pool) {
ei = container_of(buf->elt_pool, struct oz_elt_info, link);
@@ -72,8 +75,8 @@ struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
buf->free_elts--;
spin_unlock_bh(&buf->lock);
if (ei->magic != OZ_ELT_INFO_MAGIC_FREE) {
- oz_trace("oz_elt_info_alloc: ei with bad magic: 0x%x\n",
- ei->magic);
+ oz_dbg(ON, "%s: ei with bad magic: 0x%x\n",
+ __func__, ei->magic);
}
} else {
spin_unlock_bh(&buf->lock);
@@ -91,7 +94,8 @@ struct oz_elt_info *oz_elt_info_alloc(struct oz_elt_buf *buf)
}
return ei;
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: oz_elt_buf.lock must be held.
* Context: softirq or process
*/
@@ -104,18 +108,19 @@ void oz_elt_info_free(struct oz_elt_buf *buf, struct oz_elt_info *ei)
buf->elt_pool = &ei->link;
ei->magic = OZ_ELT_INFO_MAGIC_FREE;
} else {
- oz_trace("oz_elt_info_free: bad magic ei: %p"
- " magic: 0x%x\n",
- ei, ei->magic);
+ oz_dbg(ON, "%s: bad magic ei: %p magic: 0x%x\n",
+ __func__, ei, ei->magic);
}
}
}
+
/*------------------------------------------------------------------------------
* Context: softirq
*/
void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
{
struct list_head *e;
+
e = list->next;
spin_lock_bh(&buf->lock);
while (e != list) {
@@ -126,13 +131,12 @@ void oz_elt_info_free_chain(struct oz_elt_buf *buf, struct list_head *list)
}
spin_unlock_bh(&buf->lock);
}
-/*------------------------------------------------------------------------------
- */
+
int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
{
struct oz_elt_stream *st;
- oz_trace("oz_elt_stream_create(0x%x)\n", id);
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
st = kzalloc(sizeof(struct oz_elt_stream), GFP_ATOMIC | __GFP_ZERO);
if (st == NULL)
@@ -146,13 +150,13 @@ int oz_elt_stream_create(struct oz_elt_buf *buf, u8 id, int max_buf_count)
spin_unlock_bh(&buf->lock);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
{
struct list_head *e;
struct oz_elt_stream *st = NULL;
- oz_trace("oz_elt_stream_delete(0x%x)\n", id);
+
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, id);
spin_lock_bh(&buf->lock);
e = buf->stream_list.next;
while (e != &buf->stream_list) {
@@ -175,9 +179,8 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
list_del_init(&ei->link);
list_del_init(&ei->link_order);
st->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM, "Stream down: %d %d %d\n",
- st->buf_count,
- ei->length, atomic_read(&st->ref_count));
+ oz_dbg(STREAM, "Stream down: %d %d %d\n",
+ st->buf_count, ei->length, atomic_read(&st->ref_count));
oz_elt_stream_put(st);
oz_elt_info_free(buf, ei);
}
@@ -185,22 +188,21 @@ int oz_elt_stream_delete(struct oz_elt_buf *buf, u8 id)
oz_elt_stream_put(st);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
void oz_elt_stream_get(struct oz_elt_stream *st)
{
atomic_inc(&st->ref_count);
}
-/*------------------------------------------------------------------------------
- */
+
void oz_elt_stream_put(struct oz_elt_stream *st)
{
if (atomic_dec_and_test(&st->ref_count)) {
- oz_trace("Stream destroyed\n");
+ oz_dbg(ON, "Stream destroyed\n");
kfree(st);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: Element buffer lock must be held.
* If this function fails the caller is responsible for deallocating the elt
* info structure.
@@ -210,6 +212,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
{
struct oz_elt_stream *st = NULL;
struct list_head *e;
+
if (id) {
list_for_each(e, &buf->stream_list) {
st = container_of(e, struct oz_elt_stream, link);
@@ -242,8 +245,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
st->buf_count += ei->length;
/* Add to list in stream. */
list_add_tail(&ei->link, &st->elt_list);
- oz_trace2(OZ_TRACE_STREAM, "Stream up: %d %d\n",
- st->buf_count, ei->length);
+ oz_dbg(STREAM, "Stream up: %d %d\n", st->buf_count, ei->length);
/* Check if we have too much buffered for this stream. If so
* start dropping elements until we are back in bounds.
*/
@@ -263,8 +265,7 @@ int oz_queue_elt_info(struct oz_elt_buf *buf, u8 isoc, u8 id,
&buf->isoc_list : &buf->order_list);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
unsigned max_len, struct list_head *list)
{
@@ -272,6 +273,7 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
struct list_head *e;
struct list_head *el;
struct oz_elt_info *ei;
+
spin_lock_bh(&buf->lock);
if (isoc)
el = &buf->isoc_list;
@@ -293,9 +295,8 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
list_del(&ei->link_order);
if (ei->stream) {
ei->stream->buf_count -= ei->length;
- oz_trace2(OZ_TRACE_STREAM,
- "Stream down: %d %d\n",
- ei->stream->buf_count, ei->length);
+ oz_dbg(STREAM, "Stream down: %d %d\n",
+ ei->stream->buf_count, ei->length);
oz_elt_stream_put(ei->stream);
ei->stream = NULL;
}
@@ -309,18 +310,17 @@ int oz_select_elts_for_tx(struct oz_elt_buf *buf, u8 isoc, unsigned *len,
spin_unlock_bh(&buf->lock);
return count;
}
-/*------------------------------------------------------------------------------
- */
+
int oz_are_elts_available(struct oz_elt_buf *buf)
{
return buf->order_list.next != &buf->order_list;
}
-/*------------------------------------------------------------------------------
- */
+
void oz_trim_elt_pool(struct oz_elt_buf *buf)
{
struct list_head *free = NULL;
struct list_head *e;
+
spin_lock_bh(&buf->lock);
while (buf->free_elts > buf->max_free_elts) {
e = buf->elt_pool;
diff --git a/drivers/staging/ozwpan/ozhcd.c b/drivers/staging/ozwpan/ozhcd.c
index d68d63a2e68..d9c43c3282e 100644
--- a/drivers/staging/ozwpan/ozhcd.c
+++ b/drivers/staging/ozwpan/ozhcd.c
@@ -26,31 +26,42 @@
*/
#include <linux/platform_device.h>
#include <linux/usb.h>
-#include <linux/jiffies.h>
#include <linux/slab.h>
#include <linux/export.h>
#include "linux/usb/hcd.h"
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozusbif.h"
-#include "oztrace.h"
#include "ozurbparanoia.h"
#include "ozhcd.h"
-/*------------------------------------------------------------------------------
+
+/*
* Number of units of buffering to capture for an isochronous IN endpoint before
* allowing data to be indicated up.
*/
-#define OZ_IN_BUFFERING_UNITS 50
+#define OZ_IN_BUFFERING_UNITS 100
+
/* Name of our platform device.
*/
#define OZ_PLAT_DEV_NAME "ozwpan"
+
/* Maximum number of free urb links that can be kept in the pool.
*/
#define OZ_MAX_LINK_POOL_SIZE 16
+
/* Get endpoint object from the containing link.
*/
#define ep_from_link(__e) container_of((__e), struct oz_endpoint, link)
-/*------------------------------------------------------------------------------
+
+/*EP0 timeout before ep0 request is again added to TX queue. (13*8 = 98mSec)
+ */
+#define EP0_TIMEOUT_COUNTER 13
+
+/* Debounce time HCD driver should wait before unregistering.
+ */
+#define OZ_HUB_DEBOUNCE_TIMEOUT 1500
+
+/*
* Used to link urbs together and also store some status information for each
* urb.
* A cache of these are kept in a pool to reduce number of calls to kmalloc.
@@ -61,16 +72,18 @@ struct oz_urb_link {
struct oz_port *port;
u8 req_id;
u8 ep_num;
- unsigned long submit_jiffies;
+ unsigned submit_counter;
};
/* Holds state information about a USB endpoint.
*/
+#define OZ_EP_BUFFER_SIZE_ISOC (1024 * 24)
+#define OZ_EP_BUFFER_SIZE_INT 512
struct oz_endpoint {
struct list_head urb_list; /* List of oz_urb_link items. */
struct list_head link; /* For isoc ep, links in to isoc
lists of oz_port. */
- unsigned long last_jiffies;
+ struct timespec timestamp;
int credit;
int credit_ceiling;
u8 ep_num;
@@ -83,6 +96,7 @@ struct oz_endpoint {
unsigned flags;
int start_frame;
};
+
/* Bits in the flags field. */
#define OZ_F_EP_BUFFERING 0x1
#define OZ_F_EP_HAVE_STREAM 0x2
@@ -113,6 +127,7 @@ struct oz_port {
struct list_head isoc_out_ep;
struct list_head isoc_in_ep;
};
+
#define OZ_PORT_F_PRESENT 0x1
#define OZ_PORT_F_CHANGED 0x2
#define OZ_PORT_F_DYING 0x4
@@ -130,11 +145,12 @@ struct oz_hcd {
uint flags;
struct usb_hcd *hcd;
};
+
/* Bits in flags field.
*/
#define OZ_HDC_F_SUSPENDED 0x1
-/*------------------------------------------------------------------------------
+/*
* Static function prototypes.
*/
static int oz_hcd_start(struct usb_hcd *hcd);
@@ -174,7 +190,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb);
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status);
-/*------------------------------------------------------------------------------
+
+/*
* Static external variables.
*/
static struct platform_device *g_plat_dev;
@@ -188,6 +205,7 @@ static DEFINE_SPINLOCK(g_tasklet_lock);
static struct tasklet_struct g_urb_process_tasklet;
static struct tasklet_struct g_urb_cancel_tasklet;
static atomic_t g_pending_urbs = ATOMIC_INIT(0);
+static atomic_t g_usb_frame_number = ATOMIC_INIT(0);
static const struct hc_driver g_oz_hc_drv = {
.description = g_hcd_name,
.product_desc = "Ozmo Devices WPAN",
@@ -218,7 +236,8 @@ static struct platform_driver g_oz_plat_drv = {
.owner = THIS_MODULE,
},
};
-/*------------------------------------------------------------------------------
+
+/*
* Gets our private context area (which is of type struct oz_hcd) from the
* usb_hcd structure.
* Context: any
@@ -227,7 +246,8 @@ static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
{
return (struct oz_hcd *)hcd->hcd_priv;
}
-/*------------------------------------------------------------------------------
+
+/*
* Searches list of ports to find the index of the one with a specified USB
* bus address. If none of the ports has the bus address then the connection
* port is returned, if there is one or -1 otherwise.
@@ -236,13 +256,15 @@ static inline struct oz_hcd *oz_hcd_private(struct usb_hcd *hcd)
static int oz_get_port_from_addr(struct oz_hcd *ozhcd, u8 bus_addr)
{
int i;
+
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].bus_addr == bus_addr)
return i;
}
return ozhcd->conn_port;
}
-/*------------------------------------------------------------------------------
+
+/*
* Allocates an urb link, first trying the pool but going to heap if empty.
* Context: any
*/
@@ -250,6 +272,7 @@ static struct oz_urb_link *oz_alloc_urb_link(void)
{
struct oz_urb_link *urbl = NULL;
unsigned long irq_state;
+
spin_lock_irqsave(&g_link_lock, irq_state);
if (g_link_pool) {
urbl = container_of(g_link_pool, struct oz_urb_link, link);
@@ -261,7 +284,8 @@ static struct oz_urb_link *oz_alloc_urb_link(void)
urbl = kmalloc(sizeof(struct oz_urb_link), GFP_ATOMIC);
return urbl;
}
-/*------------------------------------------------------------------------------
+
+/*
* Frees an urb link by putting it in the pool if there is enough space or
* deallocating it to heap otherwise.
* Context: any
@@ -281,7 +305,8 @@ static void oz_free_urb_link(struct oz_urb_link *urbl)
kfree(urbl);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Deallocates all the urb links in the pool.
* Context: unknown
*/
@@ -289,6 +314,7 @@ static void oz_empty_link_pool(void)
{
struct list_head *e;
unsigned long irq_state;
+
spin_lock_irqsave(&g_link_lock, irq_state);
e = g_link_pool;
g_link_pool = NULL;
@@ -301,12 +327,13 @@ static void oz_empty_link_pool(void)
kfree(urbl);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Allocates endpoint structure and optionally a buffer. If a buffer is
* allocated it immediately follows the endpoint structure.
* Context: softirq
*/
-static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
+static struct oz_endpoint *oz_ep_alloc(int buffer_size, gfp_t mem_flags)
{
struct oz_endpoint *ep =
kzalloc(sizeof(struct oz_endpoint)+buffer_size, mem_flags);
@@ -321,7 +348,8 @@ static struct oz_endpoint *oz_ep_alloc(gfp_t mem_flags, int buffer_size)
}
return ep;
}
-/*------------------------------------------------------------------------------
+
+/*
* Pre-condition: Must be called with g_tasklet_lock held and interrupts
* disabled.
* Context: softirq or process
@@ -330,6 +358,7 @@ static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb
{
struct oz_urb_link *urbl;
struct list_head *e;
+
list_for_each(e, &ozhcd->urb_cancel_list) {
urbl = container_of(e, struct oz_urb_link, link);
if (urb == urbl->urb) {
@@ -339,17 +368,19 @@ static struct oz_urb_link *oz_uncancel_urb(struct oz_hcd *ozhcd, struct urb *urb
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when we have finished processing an urb. It unlinks it from
* the ep and returns it to the core.
* Context: softirq or process
*/
static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
- int status, unsigned long submit_jiffies)
+ int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned long irq_state;
- struct oz_urb_link *cancel_urbl = NULL;
+ struct oz_urb_link *cancel_urbl;
+
spin_lock_irqsave(&g_tasklet_lock, irq_state);
usb_hcd_unlink_urb_from_ep(hcd, urb);
/* Clear hcpriv which will prevent it being put in the cancel list
@@ -371,15 +402,9 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
*/
spin_unlock(&g_tasklet_lock);
if (oz_forget_urb(urb)) {
- oz_trace("OZWPAN: ERROR Unknown URB %p\n", urb);
+ oz_dbg(ON, "ERROR Unknown URB %p\n", urb);
} else {
- static unsigned long last_time;
atomic_dec(&g_pending_urbs);
- oz_trace2(OZ_TRACE_URB,
- "%lu: giveback_urb(%p,%x) %lu %lu pending:%d\n",
- jiffies, urb, status, jiffies-submit_jiffies,
- jiffies-last_time, atomic_read(&g_pending_urbs));
- last_time = jiffies;
usb_hcd_giveback_urb(hcd, urb, status);
}
spin_lock(&g_tasklet_lock);
@@ -387,14 +412,14 @@ static void oz_complete_urb(struct usb_hcd *hcd, struct urb *urb,
if (cancel_urbl)
oz_free_urb_link(cancel_urbl);
}
-/*------------------------------------------------------------------------------
+
+/*
* Deallocates an endpoint including deallocating any associated stream and
* returning any queued urbs to the core.
* Context: softirq
*/
static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
{
- oz_trace("oz_ep_free()\n");
if (port) {
struct list_head list;
struct oz_hcd *ozhcd = port->ozhcd;
@@ -409,19 +434,20 @@ static void oz_ep_free(struct oz_port *port, struct oz_endpoint *ep)
list_splice_tail(&list, &ozhcd->orphanage);
spin_unlock_bh(&ozhcd->hcd_lock);
}
- oz_trace("Freeing endpoint memory\n");
+ oz_dbg(ON, "Freeing endpoint memory\n");
kfree(ep);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_complete_buffered_urb(struct oz_port *port,
struct oz_endpoint *ep,
struct urb *urb)
{
- u8 data_len, available_space, copy_len;
+ int data_len, available_space, copy_len;
- memcpy(&data_len, &ep->buffer[ep->out_ix], sizeof(u8));
+ data_len = ep->buffer[ep->out_ix];
if (data_len <= urb->transfer_buffer_length)
available_space = data_len;
else
@@ -446,28 +472,29 @@ static void oz_complete_buffered_urb(struct oz_port *port,
ep->out_ix = 0;
ep->buffered_units--;
- oz_trace("Trying to give back buffered frame of size=%d\n",
- available_space);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_dbg(ON, "Trying to give back buffered frame of size=%d\n",
+ available_space);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
-/*------------------------------------------------------------------------------
+/*
* Context: softirq
*/
static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
struct urb *urb, u8 req_id)
{
struct oz_urb_link *urbl;
- struct oz_endpoint *ep;
+ struct oz_endpoint *ep = NULL;
int err = 0;
+
if (ep_addr >= OZ_NB_ENDPOINTS) {
- oz_trace("Invalid endpoint number in oz_enqueue_ep_urb().\n");
+ oz_dbg(ON, "%s: Invalid endpoint number\n", __func__);
return -EINVAL;
}
urbl = oz_alloc_urb_link();
if (!urbl)
return -ENOMEM;
- urbl->submit_jiffies = jiffies;
+ urbl->submit_counter = 0;
urbl->urb = urb;
urbl->req_id = req_id;
urbl->ep_num = ep_addr;
@@ -480,15 +507,20 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
*/
if (urb->unlinked) {
spin_unlock_bh(&port->ozhcd->hcd_lock);
- oz_trace("urb %p unlinked so complete immediately\n", urb);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_dbg(ON, "urb %p unlinked so complete immediately\n", urb);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
oz_free_urb_link(urbl);
return 0;
}
+
if (in_dir)
ep = port->in_ep[ep_addr];
else
ep = port->out_ep[ep_addr];
+ if (!ep) {
+ err = -ENOMEM;
+ goto out;
+ }
/*For interrupt endpoint check for buffered data
* & complete urb
@@ -501,21 +533,23 @@ static int oz_enqueue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
return 0;
}
- if (ep && port->hpd) {
+ if (port->hpd) {
list_add_tail(&urbl->link, &ep->urb_list);
if (!in_dir && ep_addr && (ep->credit < 0)) {
- ep->last_jiffies = jiffies;
+ getrawmonotonic(&ep->timestamp);
ep->credit = 0;
}
} else {
err = -EPIPE;
}
+out:
spin_unlock_bh(&port->ozhcd->hcd_lock);
if (err)
oz_free_urb_link(urbl);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Removes an urb from the queue in the endpoint.
* Returns 0 if it is found and -EIDRM otherwise.
* Context: softirq
@@ -525,6 +559,7 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
{
struct oz_urb_link *urbl = NULL;
struct oz_endpoint *ep;
+
spin_lock_bh(&port->ozhcd->hcd_lock);
if (in_dir)
ep = port->in_ep[ep_addr];
@@ -546,7 +581,8 @@ static int oz_dequeue_ep_urb(struct oz_port *port, u8 ep_addr, int in_dir,
oz_free_urb_link(urbl);
return urbl ? 0 : -EIDRM;
}
-/*------------------------------------------------------------------------------
+
+/*
* Finds an urb given its request id.
* Context: softirq
*/
@@ -555,7 +591,7 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
{
struct oz_hcd *ozhcd = port->ozhcd;
struct urb *urb = NULL;
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
struct oz_endpoint *ep;
spin_lock_bh(&ozhcd->hcd_lock);
@@ -578,7 +614,8 @@ static struct urb *oz_find_urb_by_id(struct oz_port *port, int ep_ix,
oz_free_urb_link(urbl);
return urb;
}
-/*------------------------------------------------------------------------------
+
+/*
* Pre-condition: Port lock must be held.
* Context: softirq
*/
@@ -592,12 +629,14 @@ static void oz_acquire_port(struct oz_port *port, void *hpd)
oz_usb_get(hpd);
port->hpd = hpd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static struct oz_hcd *oz_hcd_claim(void)
{
struct oz_hcd *ozhcd;
+
spin_lock_bh(&g_hcdlock);
ozhcd = g_ozhcd;
if (ozhcd)
@@ -605,7 +644,8 @@ static struct oz_hcd *oz_hcd_claim(void)
spin_unlock_bh(&g_hcdlock);
return ozhcd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static inline void oz_hcd_put(struct oz_hcd *ozhcd)
@@ -613,7 +653,8 @@ static inline void oz_hcd_put(struct oz_hcd *ozhcd)
if (ozhcd)
usb_put_hcd(ozhcd->hcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called by the protocol handler to notify that a PD has arrived.
* We allocate a port to associate with the PD and create a structure for
* endpoint 0. This port is made the connection port.
@@ -625,75 +666,74 @@ static inline void oz_hcd_put(struct oz_hcd *ozhcd)
* probably very rare indeed.
* Context: softirq
*/
-void *oz_hcd_pd_arrived(void *hpd)
+struct oz_port *oz_hcd_pd_arrived(void *hpd)
{
int i;
- void *hport = NULL;
- struct oz_hcd *ozhcd = NULL;
+ struct oz_port *hport;
+ struct oz_hcd *ozhcd;
struct oz_endpoint *ep;
- oz_trace("oz_hcd_pd_arrived()\n");
+
ozhcd = oz_hcd_claim();
- if (ozhcd == NULL)
+ if (!ozhcd)
return NULL;
/* Allocate an endpoint object in advance (before holding hcd lock) to
* use for out endpoint 0.
*/
- ep = oz_ep_alloc(GFP_ATOMIC, 0);
+ ep = oz_ep_alloc(0, GFP_ATOMIC);
+ if (!ep)
+ goto err_put;
+
spin_lock_bh(&ozhcd->hcd_lock);
- if (ozhcd->conn_port >= 0) {
- spin_unlock_bh(&ozhcd->hcd_lock);
- oz_trace("conn_port >= 0\n");
- goto out;
- }
+ if (ozhcd->conn_port >= 0)
+ goto err_unlock;
+
for (i = 0; i < OZ_NB_PORTS; i++) {
struct oz_port *port = &ozhcd->ports[i];
+
spin_lock(&port->port_lock);
- if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
+ if (!(port->flags & (OZ_PORT_F_PRESENT | OZ_PORT_F_CHANGED))) {
oz_acquire_port(port, hpd);
spin_unlock(&port->port_lock);
break;
}
spin_unlock(&port->port_lock);
}
- if (i < OZ_NB_PORTS) {
- oz_trace("Setting conn_port = %d\n", i);
- ozhcd->conn_port = i;
- /* Attach out endpoint 0.
- */
- ozhcd->ports[i].out_ep[0] = ep;
- ep = NULL;
- hport = &ozhcd->ports[i];
- spin_unlock_bh(&ozhcd->hcd_lock);
- if (ozhcd->flags & OZ_HDC_F_SUSPENDED) {
- oz_trace("Resuming root hub\n");
- usb_hcd_resume_root_hub(ozhcd->hcd);
- }
- usb_hcd_poll_rh_status(ozhcd->hcd);
- } else {
- spin_unlock_bh(&ozhcd->hcd_lock);
- }
-out:
- if (ep) /* ep is non-null if not used. */
- oz_ep_free(NULL, ep);
+ if (i == OZ_NB_PORTS)
+ goto err_unlock;
+
+ ozhcd->conn_port = i;
+ hport = &ozhcd->ports[i];
+ hport->out_ep[0] = ep;
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ if (ozhcd->flags & OZ_HDC_F_SUSPENDED)
+ usb_hcd_resume_root_hub(ozhcd->hcd);
+ usb_hcd_poll_rh_status(ozhcd->hcd);
oz_hcd_put(ozhcd);
+
return hport;
+
+err_unlock:
+ spin_unlock_bh(&ozhcd->hcd_lock);
+ oz_ep_free(NULL, ep);
+err_put:
+ oz_hcd_put(ozhcd);
+ return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called by the protocol handler to notify that the PD has gone away.
* We need to deallocate all resources and then request that the root hub is
* polled. We release the reference we hold on the PD.
* Context: softirq
*/
-void oz_hcd_pd_departed(void *hport)
+void oz_hcd_pd_departed(struct oz_port *port)
{
- struct oz_port *port = (struct oz_port *)hport;
struct oz_hcd *ozhcd;
void *hpd;
struct oz_endpoint *ep = NULL;
- oz_trace("oz_hcd_pd_departed()\n");
if (port == NULL) {
- oz_trace("oz_hcd_pd_departed() port = 0\n");
+ oz_dbg(ON, "%s: port = 0\n", __func__);
return;
}
ozhcd = port->ozhcd;
@@ -704,7 +744,7 @@ void oz_hcd_pd_departed(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
if ((ozhcd->conn_port >= 0) &&
(port == &ozhcd->ports[ozhcd->conn_port])) {
- oz_trace("Clearing conn_port\n");
+ oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_lock(&port->port_lock);
@@ -717,9 +757,10 @@ void oz_hcd_pd_departed(void *hport)
hpd = port->hpd;
port->hpd = NULL;
port->bus_addr = 0xff;
+ port->config_num = 0;
port->flags &= ~(OZ_PORT_F_PRESENT | OZ_PORT_F_DYING);
port->flags |= OZ_PORT_F_CHANGED;
- port->status &= ~USB_PORT_STAT_CONNECTION;
+ port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE);
port->status |= (USB_PORT_STAT_C_CONNECTION << 16);
/* If there is an endpont 0 then clear the pointer while we hold
* the spinlock be we deallocate it after releasing the lock.
@@ -734,7 +775,8 @@ void oz_hcd_pd_departed(void *hport)
usb_hcd_poll_rh_status(ozhcd->hcd);
oz_usb_put(hpd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_pd_reset(void *hpd, void *hport)
@@ -743,7 +785,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
*/
struct oz_port *port = (struct oz_port *)hport;
struct oz_hcd *ozhcd = port->ozhcd;
- oz_trace("PD Reset\n");
+
+ oz_dbg(ON, "PD Reset\n");
spin_lock_bh(&port->port_lock);
port->flags |= OZ_PORT_F_CHANGED;
port->status |= USB_PORT_STAT_RESET;
@@ -752,7 +795,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
oz_clean_endpoints_for_config(ozhcd->hcd, port);
usb_hcd_poll_rh_status(ozhcd->hcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
@@ -762,8 +806,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
struct urb *urb;
int err = 0;
- oz_trace("oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
- length, offset, total_size);
+ oz_dbg(ON, "oz_hcd_get_desc_cnf length = %d offs = %d tot_size = %d\n",
+ length, offset, total_size);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb)
return;
@@ -795,54 +839,52 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
}
}
urb->actual_length = total_size;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
-#ifdef WANT_TRACE
static void oz_display_conf_type(u8 t)
{
switch (t) {
case USB_REQ_GET_STATUS:
- oz_trace("USB_REQ_GET_STATUS - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_STATUS - cnf\n");
break;
case USB_REQ_CLEAR_FEATURE:
- oz_trace("USB_REQ_CLEAR_FEATURE - cnf\n");
+ oz_dbg(ON, "USB_REQ_CLEAR_FEATURE - cnf\n");
break;
case USB_REQ_SET_FEATURE:
- oz_trace("USB_REQ_SET_FEATURE - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_FEATURE - cnf\n");
break;
case USB_REQ_SET_ADDRESS:
- oz_trace("USB_REQ_SET_ADDRESS - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_ADDRESS - cnf\n");
break;
case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_SET_DESCRIPTOR:
- oz_trace("USB_REQ_SET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_DESCRIPTOR - cnf\n");
break;
case USB_REQ_GET_CONFIGURATION:
- oz_trace("USB_REQ_GET_CONFIGURATION - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - cnf\n");
break;
case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - cnf\n");
break;
case USB_REQ_GET_INTERFACE:
- oz_trace("USB_REQ_GET_INTERFACE - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_INTERFACE - cnf\n");
break;
case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - cnf\n");
+ oz_dbg(ON, "USB_REQ_SET_INTERFACE - cnf\n");
break;
case USB_REQ_SYNCH_FRAME:
- oz_trace("USB_REQ_SYNCH_FRAME - cnf\n");
+ oz_dbg(ON, "USB_REQ_SYNCH_FRAME - cnf\n");
break;
}
}
-#else
-#define oz_display_conf_type(__x)
-#endif /* WANT_TRACE */
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
@@ -850,6 +892,7 @@ static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
{
int rc = 0;
struct usb_hcd *hcd = port->ozhcd->hcd;
+
if (rcode == 0) {
port->config_num = config_num;
oz_clean_endpoints_for_config(hcd, port);
@@ -860,9 +903,10 @@ static void oz_hcd_complete_set_config(struct oz_port *port, struct urb *urb,
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
@@ -870,10 +914,11 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
{
struct usb_hcd *hcd = port->ozhcd->hcd;
int rc = 0;
- if (rcode == 0) {
+
+ if ((rcode == 0) && (port->config_num > 0)) {
struct usb_host_config *config;
struct usb_host_interface *intf;
- oz_trace("Set interface %d alt %d\n", if_num, alt);
+ oz_dbg(ON, "Set interface %d alt %d\n", if_num, alt);
oz_clean_endpoints_for_interface(hcd, port, if_num);
config = &urb->dev->config[port->config_num-1];
intf = &config->intf_cache[if_num]->altsetting[alt];
@@ -885,9 +930,10 @@ static void oz_hcd_complete_set_interface(struct oz_port *port, struct urb *urb,
} else {
rc = -ENOMEM;
}
- oz_complete_urb(hcd, urb, rc, 0);
+ oz_complete_urb(hcd, urb, rc);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
@@ -900,10 +946,10 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
unsigned windex;
unsigned wvalue;
- oz_trace("oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
+ oz_dbg(ON, "oz_hcd_control_cnf rcode=%u len=%d\n", rcode, data_len);
urb = oz_find_urb_by_id(port, 0, req_id);
if (!urb) {
- oz_trace("URB not found\n");
+ oz_dbg(ON, "URB not found\n");
return;
}
setup = (struct usb_ctrlrequest *)urb->setup_packet;
@@ -922,12 +968,12 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
(u8)windex, (u8)wvalue);
break;
default:
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
} else {
int copy_len;
- oz_trace("VENDOR-CLASS - cnf\n");
+ oz_dbg(ON, "VENDOR-CLASS - cnf\n");
if (data_len) {
if (data_len <= urb->transfer_buffer_length)
copy_len = data_len;
@@ -936,10 +982,11 @@ void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode, const u8 *data,
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
}
- oz_complete_urb(hcd, urb, 0, 0);
+ oz_complete_urb(hcd, urb, 0);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
@@ -947,13 +994,14 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
{
int space;
int copy_len;
+
if (!ep->buffer)
return -1;
space = ep->out_ix-ep->in_ix-1;
if (space < 0)
space += ep->buffer_size;
if (space < (data_len+1)) {
- oz_trace("Buffer full\n");
+ oz_dbg(ON, "Buffer full\n");
return -1;
}
ep->buffer[ep->in_ix] = (u8)data_len;
@@ -975,7 +1023,8 @@ static int oz_hcd_buffer_data(struct oz_endpoint *ep, const u8 *data,
ep->buffered_units++;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
@@ -983,6 +1032,7 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
struct oz_port *port = (struct oz_port *)hport;
struct oz_endpoint *ep;
struct oz_hcd *ozhcd = port->ozhcd;
+
spin_lock_bh(&ozhcd->hcd_lock);
ep = port->in_ep[endpoint & USB_ENDPOINT_NUMBER_MASK];
if (ep == NULL)
@@ -1006,10 +1056,10 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
copy_len = urb->transfer_buffer_length;
memcpy(urb->transfer_buffer, data, copy_len);
urb->actual_length = copy_len;
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
return;
} else {
- oz_trace("buffering frame as URB is not available\n");
+ oz_dbg(ON, "buffering frame as URB is not available\n");
oz_hcd_buffer_data(ep, data, data_len);
}
break;
@@ -1020,14 +1070,16 @@ void oz_hcd_data_ind(void *hport, u8 endpoint, const u8 *data, int data_len)
done:
spin_unlock_bh(&ozhcd->hcd_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static inline int oz_usb_get_frame_number(void)
{
- return jiffies_to_msecs(get_jiffies_64());
+ return atomic_inc_return(&g_usb_frame_number);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_hcd_heartbeat(void *hport)
@@ -1041,7 +1093,9 @@ int oz_hcd_heartbeat(void *hport)
struct list_head *n;
struct urb *urb;
struct oz_endpoint *ep;
- unsigned long now = jiffies;
+ struct timespec ts, delta;
+
+ getrawmonotonic(&ts);
INIT_LIST_HEAD(&xfr_list);
/* Check the OUT isoc endpoints to see if any URB data can be sent.
*/
@@ -1050,10 +1104,11 @@ int oz_hcd_heartbeat(void *hport)
ep = ep_from_link(e);
if (ep->credit < 0)
continue;
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
if (ep->credit > ep->credit_ceiling)
ep->credit = ep->credit_ceiling;
- ep->last_jiffies = now;
+ ep->timestamp = ts;
while (ep->credit && !list_empty(&ep->urb_list)) {
urbl = list_first_entry(&ep->urb_list,
struct oz_urb_link, link);
@@ -1061,6 +1116,8 @@ int oz_hcd_heartbeat(void *hport)
if ((ep->credit + 1) < urb->number_of_packets)
break;
ep->credit -= urb->number_of_packets;
+ if (ep->credit < 0)
+ ep->credit = 0;
list_move_tail(&urbl->link, &xfr_list);
}
}
@@ -1068,16 +1125,14 @@ int oz_hcd_heartbeat(void *hport)
/* Send to PD and complete URBs.
*/
list_for_each_safe(e, n, &xfr_list) {
- unsigned long t;
urbl = container_of(e, struct oz_urb_link, link);
urb = urbl->urb;
- t = urbl->submit_jiffies;
list_del_init(e);
urb->error_count = 0;
urb->start_frame = oz_usb_get_frame_number();
oz_usb_send_isoc(port->hpd, urbl->ep_num, urb);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, t);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check the IN isoc endpoints to see if any URBs can be completed.
*/
@@ -1088,13 +1143,14 @@ int oz_hcd_heartbeat(void *hport)
if (ep->buffered_units >= OZ_IN_BUFFERING_UNITS) {
ep->flags &= ~OZ_F_EP_BUFFERING;
ep->credit = 0;
- ep->last_jiffies = now;
+ ep->timestamp = ts;
ep->start_frame = 0;
}
continue;
}
- ep->credit += jiffies_to_msecs(now - ep->last_jiffies);
- ep->last_jiffies = now;
+ delta = timespec_sub(ts, ep->timestamp);
+ ep->credit += div_u64(timespec_to_ns(&delta), NSEC_PER_MSEC);
+ ep->timestamp = ts;
while (!list_empty(&ep->urb_list)) {
struct oz_urb_link *urbl =
list_first_entry(&ep->urb_list,
@@ -1103,7 +1159,7 @@ int oz_hcd_heartbeat(void *hport)
int len = 0;
int copy_len;
int i;
- if ((ep->credit + 1) < urb->number_of_packets)
+ if (ep->credit < urb->number_of_packets)
break;
if (ep->buffered_units < urb->number_of_packets)
break;
@@ -1149,7 +1205,7 @@ int oz_hcd_heartbeat(void *hport)
urb = urbl->urb;
list_del_init(e);
oz_free_urb_link(urbl);
- oz_complete_urb(port->ozhcd->hcd, urb, 0, 0);
+ oz_complete_urb(port->ozhcd->hcd, urb, 0);
}
/* Check if there are any ep0 requests that have timed out.
* If so resent to PD.
@@ -1161,11 +1217,12 @@ int oz_hcd_heartbeat(void *hport)
spin_lock_bh(&ozhcd->hcd_lock);
list_for_each_safe(e, n, &ep->urb_list) {
urbl = container_of(e, struct oz_urb_link, link);
- if (time_after(now, urbl->submit_jiffies+HZ/2)) {
- oz_trace("%ld: Request 0x%p timeout\n",
- now, urbl->urb);
- urbl->submit_jiffies = now;
+ if (urbl->submit_counter > EP0_TIMEOUT_COUNTER) {
+ oz_dbg(ON, "Request 0x%p timeout\n", urbl->urb);
list_move_tail(e, &xfr_list);
+ urbl->submit_counter = 0;
+ } else {
+ urbl->submit_counter++;
}
}
if (!list_empty(&ep->urb_list))
@@ -1175,14 +1232,15 @@ int oz_hcd_heartbeat(void *hport)
while (e != &xfr_list) {
urbl = container_of(e, struct oz_urb_link, link);
e = e->next;
- oz_trace("Resending request to PD.\n");
+ oz_dbg(ON, "Resending request to PD\n");
oz_process_ep0_urb(ozhcd, urbl->urb, GFP_ATOMIC);
oz_free_urb_link(urbl);
}
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
@@ -1193,7 +1251,10 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
int i;
int if_ix = intf->desc.bInterfaceNumber;
int request_heartbeat = 0;
- oz_trace("interface[%d] = %p\n", if_ix, intf);
+
+ oz_dbg(ON, "interface[%d] = %p\n", if_ix, intf);
+ if (if_ix >= port->num_iface || port->iface == NULL)
+ return -ENOMEM;
for (i = 0; i < intf->desc.bNumEndpoints; i++) {
struct usb_host_endpoint *hep = &intf->endpoint[i];
u8 ep_addr = hep->desc.bEndpointAddress;
@@ -1201,20 +1262,20 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
struct oz_endpoint *ep;
int buffer_size = 0;
- oz_trace("%d bEndpointAddress = %x\n", i, ep_addr);
+ oz_dbg(ON, "%d bEndpointAddress = %x\n", i, ep_addr);
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
switch (hep->desc.bmAttributes &
USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_ISOC:
- buffer_size = 24*1024;
+ buffer_size = OZ_EP_BUFFER_SIZE_ISOC;
break;
case USB_ENDPOINT_XFER_INT:
- buffer_size = 128;
+ buffer_size = OZ_EP_BUFFER_SIZE_INT;
break;
}
}
- ep = oz_ep_alloc(mem_flags, buffer_size);
+ ep = oz_ep_alloc(buffer_size, mem_flags);
if (!ep) {
oz_clean_endpoints_for_interface(hcd, port, if_ix);
return -ENOMEM;
@@ -1223,8 +1284,8 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
ep->ep_num = ep_num;
if ((ep->attrib & USB_ENDPOINT_XFERTYPE_MASK)
== USB_ENDPOINT_XFER_ISOC) {
- oz_trace("wMaxPacketSize = %d\n",
- usb_endpoint_maxp(&hep->desc));
+ oz_dbg(ON, "wMaxPacketSize = %d\n",
+ usb_endpoint_maxp(&hep->desc));
ep->credit_ceiling = 200;
if (ep_addr & USB_ENDPOINT_DIR_MASK) {
ep->flags |= OZ_F_EP_BUFFERING;
@@ -1259,7 +1320,8 @@ static int oz_build_endpoints_for_interface(struct usb_hcd *hcd,
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
@@ -1270,7 +1332,7 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
int i;
struct list_head ep_list;
- oz_trace("Deleting endpoints for interface %d\n", if_ix);
+ oz_dbg(ON, "Deleting endpoints for interface %d\n", if_ix);
if (if_ix >= port->num_iface)
return;
INIT_LIST_HEAD(&ep_list);
@@ -1304,7 +1366,8 @@ static void oz_clean_endpoints_for_interface(struct usb_hcd *hcd,
oz_ep_free(port, ep);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
@@ -1314,6 +1377,7 @@ static int oz_build_endpoints_for_config(struct usb_hcd *hcd,
struct oz_hcd *ozhcd = port->ozhcd;
int i;
int num_iface = config->desc.bNumInterfaces;
+
if (num_iface) {
struct oz_interface *iface;
@@ -1338,7 +1402,8 @@ fail:
oz_clean_endpoints_for_config(hcd, port);
return -1;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
@@ -1346,25 +1411,28 @@ static void oz_clean_endpoints_for_config(struct usb_hcd *hcd,
{
struct oz_hcd *ozhcd = port->ozhcd;
int i;
- oz_trace("Deleting endpoints for configuration.\n");
+
+ oz_dbg(ON, "Deleting endpoints for configuration\n");
for (i = 0; i < port->num_iface; i++)
oz_clean_endpoints_for_interface(hcd, port, i);
spin_lock_bh(&ozhcd->hcd_lock);
if (port->iface) {
- oz_trace("Freeing interfaces object.\n");
+ oz_dbg(ON, "Freeing interfaces object\n");
kfree(port->iface);
port->iface = NULL;
}
port->num_iface = 0;
spin_unlock_bh(&ozhcd->hcd_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void *oz_claim_hpd(struct oz_port *port)
{
- void *hpd = NULL;
+ void *hpd;
struct oz_hcd *ozhcd = port->ozhcd;
+
spin_lock_bh(&ozhcd->hcd_lock);
hpd = port->hpd;
if (hpd)
@@ -1372,7 +1440,8 @@ static void *oz_claim_hpd(struct oz_port *port)
spin_unlock_bh(&ozhcd->hcd_lock);
return hpd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
@@ -1382,7 +1451,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
unsigned windex;
unsigned wvalue;
unsigned wlength;
- void *hpd = NULL;
+ void *hpd;
u8 req_id;
int rc = 0;
unsigned complete = 0;
@@ -1390,7 +1459,7 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
int port_ix = -1;
struct oz_port *port = NULL;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_process_ep0_urb(%p)\n", jiffies, urb);
+ oz_dbg(URB, "[%s]:(%p)\n", __func__, urb);
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
if (port_ix < 0) {
rc = -EPIPE;
@@ -1399,8 +1468,8 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
port = &ozhcd->ports[port_ix];
if (((port->flags & OZ_PORT_F_PRESENT) == 0)
|| (port->flags & OZ_PORT_F_DYING)) {
- oz_trace("Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
+ oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
rc = -EPIPE;
goto out;
}
@@ -1411,17 +1480,16 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
windex = le16_to_cpu(setup->wIndex);
wvalue = le16_to_cpu(setup->wValue);
wlength = le16_to_cpu(setup->wLength);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequestType = %x\n",
- setup->bRequestType);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wValue = %x\n", wvalue);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wIndex = %x\n", windex);
- oz_trace2(OZ_TRACE_CTRL_DETAIL, "wLength = %x\n", wlength);
+ oz_dbg(CTRL_DETAIL, "bRequestType = %x\n", setup->bRequestType);
+ oz_dbg(CTRL_DETAIL, "bRequest = %x\n", setup->bRequest);
+ oz_dbg(CTRL_DETAIL, "wValue = %x\n", wvalue);
+ oz_dbg(CTRL_DETAIL, "wIndex = %x\n", windex);
+ oz_dbg(CTRL_DETAIL, "wLength = %x\n", wlength);
req_id = port->next_req_id++;
hpd = oz_claim_hpd(port);
if (hpd == NULL) {
- oz_trace("Cannot claim port\n");
+ oz_dbg(ON, "Cannot claim port\n");
rc = -EPIPE;
goto out;
}
@@ -1431,30 +1499,31 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
*/
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
- oz_trace("USB_REQ_GET_DESCRIPTOR - req\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - req\n");
break;
case USB_REQ_SET_ADDRESS:
- oz_trace("USB_REQ_SET_ADDRESS - req\n");
- oz_trace("Port %d address is 0x%x\n", ozhcd->conn_port,
- (u8)le16_to_cpu(setup->wValue));
+ oz_dbg(ON, "USB_REQ_SET_ADDRESS - req\n");
+ oz_dbg(ON, "Port %d address is 0x%x\n",
+ ozhcd->conn_port,
+ (u8)le16_to_cpu(setup->wValue));
spin_lock_bh(&ozhcd->hcd_lock);
if (ozhcd->conn_port >= 0) {
ozhcd->ports[ozhcd->conn_port].bus_addr =
(u8)le16_to_cpu(setup->wValue);
- oz_trace("Clearing conn_port\n");
+ oz_dbg(ON, "Clearing conn_port\n");
ozhcd->conn_port = -1;
}
spin_unlock_bh(&ozhcd->hcd_lock);
complete = 1;
break;
case USB_REQ_SET_CONFIGURATION:
- oz_trace("USB_REQ_SET_CONFIGURATION - req\n");
+ oz_dbg(ON, "USB_REQ_SET_CONFIGURATION - req\n");
break;
case USB_REQ_GET_CONFIGURATION:
/* We short circuit this case and reply directly since
* we have the selected configuration number cached.
*/
- oz_trace("USB_REQ_GET_CONFIGURATION - reply now\n");
+ oz_dbg(ON, "USB_REQ_GET_CONFIGURATION - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
@@ -1468,20 +1537,20 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
/* We short circuit this case and reply directly since
* we have the selected interface alternative cached.
*/
- oz_trace("USB_REQ_GET_INTERFACE - reply now\n");
+ oz_dbg(ON, "USB_REQ_GET_INTERFACE - reply now\n");
if (urb->transfer_buffer_length >= 1) {
urb->actual_length = 1;
*((u8 *)urb->transfer_buffer) =
port->iface[(u8)windex].alt;
- oz_trace("interface = %d alt = %d\n",
- windex, port->iface[(u8)windex].alt);
+ oz_dbg(ON, "interface = %d alt = %d\n",
+ windex, port->iface[(u8)windex].alt);
complete = 1;
} else {
rc = -EPIPE;
}
break;
case USB_REQ_SET_INTERFACE:
- oz_trace("USB_REQ_SET_INTERFACE - req\n");
+ oz_dbg(ON, "USB_REQ_SET_INTERFACE - req\n");
break;
}
}
@@ -1512,13 +1581,14 @@ static void oz_process_ep0_urb(struct oz_hcd *ozhcd, struct urb *urb,
oz_usb_put(hpd);
out:
if (rc || complete) {
- oz_trace("Completing request locally\n");
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_dbg(ON, "Completing request locally\n");
+ oz_complete_urb(ozhcd->hcd, urb, rc);
} else {
oz_usb_request_heartbeat(port->hpd);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
@@ -1526,6 +1596,7 @@ static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
int rc = 0;
struct oz_port *port = urb->hcpriv;
u8 ep_addr;
+
/* When we are paranoid we keep a list of urbs which we check against
* before handing one back. This is just for debugging during
* development and should be turned off in the released driver.
@@ -1551,7 +1622,8 @@ static int oz_urb_process(struct oz_hcd *ozhcd, struct urb *urb)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_urb_process_tasklet(unsigned long unused)
@@ -1560,6 +1632,7 @@ static void oz_urb_process_tasklet(unsigned long unused)
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
int rc = 0;
+
if (ozhcd == NULL)
return;
/* This is called from a tasklet so is in softirq context but the urb
@@ -1577,13 +1650,14 @@ static void oz_urb_process_tasklet(unsigned long unused)
oz_free_urb_link(urbl);
rc = oz_urb_process(ozhcd, urb);
if (rc)
- oz_complete_urb(ozhcd->hcd, urb, rc, 0);
+ oz_complete_urb(ozhcd->hcd, urb, rc);
spin_lock_irqsave(&g_tasklet_lock, irq_state);
}
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* This function searches for the urb in any of the lists it could be in.
* If it is found it is removed from the list and completed. If the urb is
* being processed then it won't be in a list so won't be found. However, the
@@ -1599,13 +1673,14 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
struct oz_hcd *ozhcd;
unsigned long irq_state;
u8 ix;
+
if (port == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) port is null\n", urb);
+ oz_dbg(ON, "%s: ERROR: (%p) port is null\n", __func__, urb);
return;
}
ozhcd = port->ozhcd;
if (ozhcd == NULL) {
- oz_trace("ERRORERROR: oz_urb_cancel(%p) ozhcd is null\n", urb);
+ oz_dbg(ON, "%s; ERROR: (%p) ozhcd is null\n", __func__, urb);
return;
}
@@ -1630,7 +1705,7 @@ static void oz_urb_cancel(struct oz_port *port, u8 ep_num, struct urb *urb)
urbl = container_of(e, struct oz_urb_link, link);
if (urbl->urb == urb) {
list_del(e);
- oz_trace("Found urb in orphanage\n");
+ oz_dbg(ON, "Found urb in orphanage\n");
goto out;
}
}
@@ -1646,10 +1721,11 @@ out2:
if (urbl) {
urb->actual_length = 0;
oz_free_urb_link(urbl);
- oz_complete_urb(ozhcd->hcd, urb, -EPIPE, 0);
+ oz_complete_urb(ozhcd->hcd, urb, -EPIPE);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static void oz_urb_cancel_tasklet(unsigned long unused)
@@ -1657,6 +1733,7 @@ static void oz_urb_cancel_tasklet(unsigned long unused)
unsigned long irq_state;
struct urb *urb;
struct oz_hcd *ozhcd = oz_hcd_claim();
+
if (ozhcd == NULL)
return;
spin_lock_irqsave(&g_tasklet_lock, irq_state);
@@ -1675,7 +1752,8 @@ static void oz_urb_cancel_tasklet(unsigned long unused)
spin_unlock_irqrestore(&g_tasklet_lock, irq_state);
oz_hcd_put(ozhcd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
@@ -1686,37 +1764,38 @@ static void oz_hcd_clear_orphanage(struct oz_hcd *ozhcd, int status)
urbl = list_first_entry(&ozhcd->orphanage,
struct oz_urb_link, link);
list_del(&urbl->link);
- oz_complete_urb(ozhcd->hcd, urbl->urb, status, 0);
+ oz_complete_urb(ozhcd->hcd, urbl->urb, status);
oz_free_urb_link(urbl);
}
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_hcd_start(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_start()\n");
hcd->power_budget = 200;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_stop(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_stop()\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_shutdown(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_shutdown()\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Called to queue an urb for the device.
* This function should return a non-zero error code if it fails the urb but
* should not call usb_hcd_giveback_urb().
@@ -1726,21 +1805,19 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- int rc = 0;
+ int rc;
int port_ix;
struct oz_port *port;
unsigned long irq_state;
struct oz_urb_link *urbl;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_enqueue(%p)\n",
- jiffies, urb);
+
+ oz_dbg(URB, "%s: (%p)\n", __func__, urb);
if (unlikely(ozhcd == NULL)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not ozhcd.\n",
- jiffies, urb);
+ oz_dbg(URB, "Refused urb(%p) not ozhcd\n", urb);
return -EPIPE;
}
if (unlikely(hcd->state != HC_STATE_RUNNING)) {
- oz_trace2(OZ_TRACE_URB, "%lu: Refused urb(%p) not running.\n",
- jiffies, urb);
+ oz_dbg(URB, "Refused urb(%p) not running\n", urb);
return -EPIPE;
}
port_ix = oz_get_port_from_addr(ozhcd, urb->dev->devnum);
@@ -1749,9 +1826,10 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
port = &ozhcd->ports[port_ix];
if (port == NULL)
return -EPIPE;
- if ((port->flags & OZ_PORT_F_PRESENT) == 0) {
- oz_trace("Refusing URB port_ix = %d devnum = %d\n",
- port_ix, urb->dev->devnum);
+ if (!(port->flags & OZ_PORT_F_PRESENT) ||
+ (port->flags & OZ_PORT_F_CHANGED)) {
+ oz_dbg(ON, "Refusing URB port_ix = %d devnum = %d\n",
+ port_ix, urb->dev->devnum);
return -EPIPE;
}
urb->hcpriv = port;
@@ -1774,14 +1852,16 @@ static int oz_hcd_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
atomic_inc(&g_pending_urbs);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
struct urb *urb)
{
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
struct list_head *e;
+
if (unlikely(ep == NULL))
return NULL;
list_for_each(e, &ep->urb_list) {
@@ -1798,17 +1878,19 @@ static struct oz_urb_link *oz_remove_urb(struct oz_endpoint *ep,
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Called to dequeue a previously submitted urb for the device.
* Context: any
*/
static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
- struct oz_urb_link *urbl = NULL;
+ struct oz_urb_link *urbl;
int rc;
unsigned long irq_state;
- oz_trace2(OZ_TRACE_URB, "%lu: oz_hcd_urb_dequeue(%p)\n", jiffies, urb);
+
+ oz_dbg(URB, "%s: (%p)\n", __func__, urb);
urbl = oz_alloc_urb_link();
if (unlikely(urbl == NULL))
return -ENOMEM;
@@ -1838,31 +1920,33 @@ static int oz_hcd_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_disable\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static void oz_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
- oz_trace("oz_hcd_endpoint_reset\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_hcd_get_frame_number(struct usb_hcd *hcd)
{
- oz_trace("oz_hcd_get_frame_number\n");
+ oz_dbg(ON, "oz_hcd_get_frame_number\n");
return oz_usb_get_frame_number();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
* This is called as a consquence of us calling usb_hcd_poll_rh_status() and we
* always do that in softirq context.
@@ -1872,27 +1956,33 @@ static int oz_hcd_hub_status_data(struct usb_hcd *hcd, char *buf)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
int i;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_status_data()\n");
buf[0] = 0;
+ buf[1] = 0;
spin_lock_bh(&ozhcd->hcd_lock);
for (i = 0; i < OZ_NB_PORTS; i++) {
if (ozhcd->ports[i].flags & OZ_PORT_F_CHANGED) {
- oz_trace2(OZ_TRACE_HUB, "Port %d changed\n", i);
+ oz_dbg(HUB, "Port %d changed\n", i);
ozhcd->ports[i].flags &= ~OZ_PORT_F_CHANGED;
- buf[0] |= 1<<(i+1);
+ if (i < 7)
+ buf[0] |= 1 << (i + 1);
+ else
+ buf[1] |= 1 << (i - 7);
}
}
spin_unlock_bh(&ozhcd->hcd_lock);
- return buf[0] ? 1 : 0;
+ if (buf[0] != 0 || buf[1] != 0)
+ return 2;
+ else
+ return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void oz_get_hub_descriptor(struct usb_hcd *hcd,
struct usb_hub_descriptor *desc)
{
- oz_trace2(OZ_TRACE_HUB, "GetHubDescriptor\n");
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
@@ -1900,7 +1990,8 @@ static void oz_get_hub_descriptor(struct usb_hcd *hcd,
__constant_cpu_to_le16(0x0001);
desc->bNbrPorts = OZ_NB_PORTS;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
@@ -1911,59 +2002,59 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned set_bits = 0;
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "SetPortFeature\n");
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
set_bits = USB_PORT_STAT_ENABLE | (USB_PORT_STAT_C_RESET<<16);
clear_bits = USB_PORT_STAT_RESET;
ozhcd->ports[port_id-1].bus_addr = 0;
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
set_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (set_bits || clear_bits) {
@@ -1972,11 +2063,11 @@ static int oz_set_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status |= set_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
- port->status);
+ oz_dbg(HUB, "Port[%d] status = 0x%x\n", port_id, port->status);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
@@ -1986,60 +2077,60 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
u8 port_id = (u8)windex;
struct oz_hcd *ozhcd = oz_hcd_private(hcd);
unsigned clear_bits = 0;
- oz_trace2(OZ_TRACE_HUB, "ClearPortFeature\n");
+
if ((port_id < 1) || (port_id > OZ_NB_PORTS))
return -EPIPE;
port = &ozhcd->ports[port_id-1];
switch (wvalue) {
case USB_PORT_FEAT_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_CONNECTION\n");
break;
case USB_PORT_FEAT_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_ENABLE\n");
clear_bits = USB_PORT_STAT_ENABLE;
break;
case USB_PORT_FEAT_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_SUSPEND\n");
break;
case USB_PORT_FEAT_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_RESET\n");
break;
case USB_PORT_FEAT_POWER:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_POWER\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_POWER\n");
clear_bits |= USB_PORT_STAT_POWER;
break;
case USB_PORT_FEAT_LOWSPEED:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_LOWSPEED\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_LOWSPEED\n");
break;
case USB_PORT_FEAT_C_CONNECTION:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_CONNECTION\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_CONNECTION\n");
clear_bits = (USB_PORT_STAT_C_CONNECTION << 16);
break;
case USB_PORT_FEAT_C_ENABLE:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_ENABLE\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_ENABLE\n");
clear_bits = (USB_PORT_STAT_C_ENABLE << 16);
break;
case USB_PORT_FEAT_C_SUSPEND:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_SUSPEND\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_SUSPEND\n");
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_OVER_CURRENT\n");
break;
case USB_PORT_FEAT_C_RESET:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_C_RESET\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_C_RESET\n");
clear_bits = (USB_PORT_FEAT_C_RESET << 16);
break;
case USB_PORT_FEAT_TEST:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_TEST\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_TEST\n");
break;
case USB_PORT_FEAT_INDICATOR:
- oz_trace2(OZ_TRACE_HUB, "USB_PORT_FEAT_INDICATOR\n");
+ oz_dbg(HUB, "USB_PORT_FEAT_INDICATOR\n");
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other %d\n", wvalue);
+ oz_dbg(HUB, "Other %d\n", wvalue);
break;
}
if (clear_bits) {
@@ -2047,37 +2138,40 @@ static int oz_clear_port_feature(struct usb_hcd *hcd, u16 wvalue, u16 windex)
port->status &= ~clear_bits;
spin_unlock_bh(&port->port_lock);
}
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = 0x%x\n", port_id,
- ozhcd->ports[port_id-1].status);
+ oz_dbg(HUB, "Port[%d] status = 0x%x\n",
+ port_id, ozhcd->ports[port_id-1].status);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_get_port_status(struct usb_hcd *hcd, u16 windex, char *buf)
{
struct oz_hcd *ozhcd;
- u32 status = 0;
+ u32 status;
+
if ((windex < 1) || (windex > OZ_NB_PORTS))
return -EPIPE;
ozhcd = oz_hcd_private(hcd);
- oz_trace2(OZ_TRACE_HUB, "GetPortStatus windex = %d\n", windex);
+ oz_dbg(HUB, "GetPortStatus windex = %d\n", windex);
status = ozhcd->ports[windex-1].status;
put_unaligned(cpu_to_le32(status), (__le32 *)buf);
- oz_trace2(OZ_TRACE_HUB, "Port[%d] status = %x\n", windex, status);
+ oz_dbg(HUB, "Port[%d] status = %x\n", windex, status);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
u16 windex, char *buf, u16 wlength)
{
int err = 0;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_control()\n");
+
switch (req_type) {
case ClearHubFeature:
- oz_trace2(OZ_TRACE_HUB, "ClearHubFeature: %d\n", req_type);
+ oz_dbg(HUB, "ClearHubFeature: %d\n", req_type);
break;
case ClearPortFeature:
err = oz_clear_port_feature(hcd, wvalue, windex);
@@ -2086,32 +2180,32 @@ static int oz_hcd_hub_control(struct usb_hcd *hcd, u16 req_type, u16 wvalue,
oz_get_hub_descriptor(hcd, (struct usb_hub_descriptor *)buf);
break;
case GetHubStatus:
- oz_trace2(OZ_TRACE_HUB, "GetHubStatus: req_type = 0x%x\n",
- req_type);
+ oz_dbg(HUB, "GetHubStatus: req_type = 0x%x\n", req_type);
put_unaligned(__constant_cpu_to_le32(0), (__le32 *)buf);
break;
case GetPortStatus:
err = oz_get_port_status(hcd, windex, buf);
break;
case SetHubFeature:
- oz_trace2(OZ_TRACE_HUB, "SetHubFeature: %d\n", req_type);
+ oz_dbg(HUB, "SetHubFeature: %d\n", req_type);
break;
case SetPortFeature:
err = oz_set_port_feature(hcd, wvalue, windex);
break;
default:
- oz_trace2(OZ_TRACE_HUB, "Other: %d\n", req_type);
+ oz_dbg(HUB, "Other: %d\n", req_type);
break;
}
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_suspend()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
hcd->state = HC_STATE_SUSPENDED;
@@ -2119,13 +2213,14 @@ static int oz_hcd_bus_suspend(struct usb_hcd *hcd)
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_hcd_bus_resume(struct usb_hcd *hcd)
{
struct oz_hcd *ozhcd;
- oz_trace2(OZ_TRACE_HUB, "oz_hcd_hub_resume()\n");
+
ozhcd = oz_hcd_private(hcd);
spin_lock_bh(&ozhcd->hcd_lock);
ozhcd->flags &= ~OZ_HDC_F_SUSPENDED;
@@ -2133,13 +2228,12 @@ static int oz_hcd_bus_resume(struct usb_hcd *hcd)
spin_unlock_bh(&ozhcd->hcd_lock);
return 0;
}
-/*------------------------------------------------------------------------------
- */
+
static void oz_plat_shutdown(struct platform_device *dev)
{
- oz_trace("oz_plat_shutdown()\n");
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_plat_probe(struct platform_device *dev)
@@ -2148,10 +2242,10 @@ static int oz_plat_probe(struct platform_device *dev)
int err;
struct usb_hcd *hcd;
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_probe()\n");
+
hcd = usb_create_hcd(&g_oz_hc_drv, &dev->dev, dev_name(&dev->dev));
if (hcd == NULL) {
- oz_trace("Failed to created hcd object OK\n");
+ oz_dbg(ON, "Failed to created hcd object OK\n");
return -ENOMEM;
}
ozhcd = oz_hcd_private(hcd);
@@ -2172,7 +2266,7 @@ static int oz_plat_probe(struct platform_device *dev)
}
err = usb_add_hcd(hcd, 0, 0);
if (err) {
- oz_trace("Failed to add hcd object OK\n");
+ oz_dbg(ON, "Failed to add hcd object OK\n");
usb_put_hcd(hcd);
return -1;
}
@@ -2181,14 +2275,15 @@ static int oz_plat_probe(struct platform_device *dev)
spin_unlock_bh(&g_hcdlock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_plat_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
struct oz_hcd *ozhcd;
- oz_trace("oz_plat_remove()\n");
+
if (hcd == NULL)
return -1;
ozhcd = oz_hcd_private(hcd);
@@ -2196,42 +2291,45 @@ static int oz_plat_remove(struct platform_device *dev)
if (ozhcd == g_ozhcd)
g_ozhcd = NULL;
spin_unlock_bh(&g_hcdlock);
- oz_trace("Clearing orphanage\n");
+ oz_dbg(ON, "Clearing orphanage\n");
oz_hcd_clear_orphanage(ozhcd, -EPIPE);
- oz_trace("Removing hcd\n");
+ oz_dbg(ON, "Removing hcd\n");
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
oz_empty_link_pool();
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: unknown
*/
static int oz_plat_suspend(struct platform_device *dev, pm_message_t msg)
{
- oz_trace("oz_plat_suspend()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+
+/*
* Context: unknown
*/
static int oz_plat_resume(struct platform_device *dev)
{
- oz_trace("oz_plat_resume()\n");
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_hcd_init(void)
{
int err;
+
if (usb_disabled())
return -ENODEV;
tasklet_init(&g_urb_process_tasklet, oz_urb_process_tasklet, 0);
tasklet_init(&g_urb_cancel_tasklet, oz_urb_cancel_tasklet, 0);
err = platform_driver_register(&g_oz_plat_drv);
- oz_trace("platform_driver_register() returned %d\n", err);
+ oz_dbg(ON, "platform_driver_register() returned %d\n", err);
if (err)
goto error;
g_plat_dev = platform_device_alloc(OZ_PLAT_DEV_NAME, -1);
@@ -2239,11 +2337,11 @@ int oz_hcd_init(void)
err = -ENOMEM;
goto error1;
}
- oz_trace("platform_device_alloc() succeeded\n");
+ oz_dbg(ON, "platform_device_alloc() succeeded\n");
err = platform_device_add(g_plat_dev);
if (err)
goto error2;
- oz_trace("platform_device_add() succeeded\n");
+ oz_dbg(ON, "platform_device_add() succeeded\n");
return 0;
error2:
platform_device_put(g_plat_dev);
@@ -2252,17 +2350,19 @@ error1:
error:
tasklet_disable(&g_urb_process_tasklet);
tasklet_disable(&g_urb_cancel_tasklet);
- oz_trace("oz_hcd_init() failed %d\n", err);
+ oz_dbg(ON, "oz_hcd_init() failed %d\n", err);
return err;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_hcd_term(void)
{
+ msleep(OZ_HUB_DEBOUNCE_TIMEOUT);
tasklet_kill(&g_urb_process_tasklet);
tasklet_kill(&g_urb_cancel_tasklet);
platform_device_unregister(g_plat_dev);
platform_driver_unregister(&g_oz_plat_drv);
- oz_trace("Pending urbs:%d\n", atomic_read(&g_pending_urbs));
+ oz_dbg(ON, "Pending urbs:%d\n", atomic_read(&g_pending_urbs));
}
diff --git a/drivers/staging/ozwpan/ozhcd.h b/drivers/staging/ozwpan/ozhcd.h
index 9b30dfd0997..55e97b1c707 100644
--- a/drivers/staging/ozwpan/ozhcd.h
+++ b/drivers/staging/ozwpan/ozhcd.h
@@ -7,8 +7,8 @@
int oz_hcd_init(void);
void oz_hcd_term(void);
-void *oz_hcd_pd_arrived(void *ctx);
-void oz_hcd_pd_departed(void *ctx);
+struct oz_port *oz_hcd_pd_arrived(void *ctx);
+void oz_hcd_pd_departed(struct oz_port *hport);
void oz_hcd_pd_reset(void *hpd, void *hport);
#endif /* _OZHCD_H */
diff --git a/drivers/staging/ozwpan/ozmain.c b/drivers/staging/ozwpan/ozmain.c
index 51fe9e98c35..d1a5b7a2c16 100644
--- a/drivers/staging/ozwpan/ozmain.c
+++ b/drivers/staging/ozwpan/ozmain.c
@@ -3,6 +3,7 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -10,19 +11,22 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozcdev.h"
-#include "oztrace.h"
-/*------------------------------------------------------------------------------
+
+unsigned int oz_dbg_mask = OZ_DEFAULT_DBG_MASK;
+
+/*
* The name of the 802.11 mac device. Empty string is the default value but a
* value can be supplied as a parameter to the module. An empty string means
* bind to nothing. '*' means bind to all netcards - this includes non-802.11
* netcards. Bindings can be added later using an IOCTL.
*/
static char *g_net_dev = "";
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int __init ozwpan_init(void)
@@ -33,7 +37,8 @@ static int __init ozwpan_init(void)
oz_apps_init();
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void __exit ozwpan_exit(void)
@@ -42,8 +47,7 @@ static void __exit ozwpan_exit(void)
oz_apps_term();
oz_cdev_deregister();
}
-/*------------------------------------------------------------------------------
- */
+
module_param(g_net_dev, charp, S_IRUGO);
module_init(ozwpan_init);
module_exit(ozwpan_exit);
diff --git a/drivers/staging/ozwpan/ozpd.c b/drivers/staging/ozwpan/ozpd.c
index d67dff2430a..ab85a724a0e 100644
--- a/drivers/staging/ozwpan/ozpd.c
+++ b/drivers/staging/ozwpan/ozpd.c
@@ -3,28 +3,26 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/netdevice.h>
#include <linux/errno.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
-#include "oztrace.h"
#include "ozcdev.h"
#include "ozusbsvc.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
-/*------------------------------------------------------------------------------
- */
+
#define OZ_MAX_TX_POOL_SIZE 6
-/*------------------------------------------------------------------------------
- */
+
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
@@ -39,10 +37,12 @@ static void oz_def_app_term(void);
static int oz_def_app_start(struct oz_pd *pd, int resume);
static void oz_def_app_stop(struct oz_pd *pd, int pause);
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
-/*------------------------------------------------------------------------------
+
+/*
* Counts the uncompleted isoc frames submitted to netcard.
*/
static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
+
/* Application handler functions.
*/
static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
@@ -82,69 +82,75 @@ static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
NULL,
OZ_APPID_SERIAL},
};
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static int oz_def_app_init(void)
{
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void oz_def_app_term(void)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_def_app_start(struct oz_pd *pd, int resume)
{
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_def_app_stop(struct oz_pd *pd, int pause)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
{
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_set_state(struct oz_pd *pd, unsigned state)
{
pd->state = state;
-#ifdef WANT_TRACE
switch (state) {
case OZ_PD_S_IDLE:
- oz_trace("PD State: OZ_PD_S_IDLE\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_IDLE\n");
break;
case OZ_PD_S_CONNECTED:
- oz_trace("PD State: OZ_PD_S_CONNECTED\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_CONNECTED\n");
break;
case OZ_PD_S_STOPPED:
- oz_trace("PD State: OZ_PD_S_STOPPED\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_STOPPED\n");
break;
case OZ_PD_S_SLEEP:
- oz_trace("PD State: OZ_PD_S_SLEEP\n");
+ oz_pd_dbg(pd, ON, "PD State: OZ_PD_S_SLEEP\n");
break;
}
-#endif /* WANT_TRACE */
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_get(struct oz_pd *pd)
{
atomic_inc(&pd->ref_count);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_put(struct oz_pd *pd)
@@ -152,12 +158,14 @@ void oz_pd_put(struct oz_pd *pd)
if (atomic_dec_and_test(&pd->ref_count))
oz_pd_destroy(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
{
struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
+
if (pd) {
int i;
atomic_set(&pd->ref_count, 2);
@@ -177,19 +185,34 @@ struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
pd->last_sent_frame = &pd->tx_queue;
spin_lock_init(&pd->stream_lock);
INIT_LIST_HEAD(&pd->stream_list);
+ tasklet_init(&pd->heartbeat_tasklet, oz_pd_heartbeat_handler,
+ (unsigned long)pd);
+ tasklet_init(&pd->timeout_tasklet, oz_pd_timeout_handler,
+ (unsigned long)pd);
+ hrtimer_init(&pd->heartbeat, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_init(&pd->timeout, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ pd->heartbeat.function = oz_pd_heartbeat_event;
+ pd->timeout.function = oz_pd_timeout_event;
}
return pd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
-void oz_pd_destroy(struct oz_pd *pd)
+static void oz_pd_free(struct work_struct *work)
{
struct list_head *e;
struct oz_tx_frame *f;
struct oz_isoc_stream *st;
struct oz_farewell *fwell;
- oz_trace("Destroying PD\n");
+ struct oz_pd *pd;
+
+ oz_pd_dbg(pd, ON, "Destroying PD\n");
+ pd = container_of(work, struct oz_pd, workitem);
+ /*Disable timer tasklets*/
+ tasklet_kill(&pd->heartbeat_tasklet);
+ tasklet_kill(&pd->timeout_tasklet);
/* Delete any streams.
*/
e = pd->stream_list.next;
@@ -228,20 +251,38 @@ void oz_pd_destroy(struct oz_pd *pd)
dev_put(pd->net_dev);
kfree(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
+ * Context: softirq or Process
+ */
+void oz_pd_destroy(struct oz_pd *pd)
+{
+ if (hrtimer_active(&pd->timeout))
+ hrtimer_cancel(&pd->timeout);
+ if (hrtimer_active(&pd->heartbeat))
+ hrtimer_cancel(&pd->heartbeat);
+
+ INIT_WORK(&pd->workitem, oz_pd_free);
+ if (!schedule_work(&pd->workitem))
+ oz_pd_dbg(pd, ON, "failed to schedule workitem\n");
+}
+
+/*
* Context: softirq-serialized
*/
int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
{
const struct oz_app_if *ai;
int rc = 0;
- oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
+
+ oz_pd_dbg(pd, ON, "%s: (0x%x) resume(%d)\n", __func__, apps, resume);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
if (ai->start(pd, resume)) {
rc = -1;
- oz_trace("Unabled to start service %d\n",
- ai->app_id);
+ oz_pd_dbg(pd, ON,
+ "Unable to start service %d\n",
+ ai->app_id);
break;
}
oz_polling_lock_bh();
@@ -253,13 +294,15 @@ int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
{
const struct oz_app_if *ai;
- oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
+
+ oz_pd_dbg(pd, ON, "%s: (0x%x) pause(%d)\n", __func__, apps, pause);
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (apps & (1<<ai->app_id)) {
oz_polling_lock_bh();
@@ -274,34 +317,38 @@ void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
}
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
{
const struct oz_app_if *ai;
int more = 0;
+
for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
if (ai->heartbeat && (apps & (1<<ai->app_id))) {
if (ai->heartbeat(pd))
more = 1;
}
}
- if (more)
- oz_pd_request_heartbeat(pd);
+ if ((!more) && (hrtimer_active(&pd->heartbeat)))
+ hrtimer_cancel(&pd->heartbeat);
if (pd->mode & OZ_F_ISOC_ANYTIME) {
int count = 8;
while (count-- && (oz_send_isoc_frame(pd) >= 0))
;
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_stop(struct oz_pd *pd)
{
- u16 stop_apps = 0;
- oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
+ u16 stop_apps;
+
+ oz_dbg(ON, "oz_pd_stop() State = 0x%x\n", pd->state);
oz_pd_indicate_farewells(pd);
oz_polling_lock_bh();
stop_apps = pd->total_apps;
@@ -314,46 +361,46 @@ void oz_pd_stop(struct oz_pd *pd)
/* Remove from PD list.*/
list_del(&pd->link);
oz_polling_unlock_bh();
- oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
- oz_timer_delete(pd, 0);
+ oz_dbg(ON, "pd ref count = %d\n", atomic_read(&pd->ref_count));
oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_pd_sleep(struct oz_pd *pd)
{
int do_stop = 0;
- u16 stop_apps = 0;
+ u16 stop_apps;
+
oz_polling_lock_bh();
if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
oz_polling_unlock_bh();
return 0;
}
- if (pd->keep_alive_j && pd->session_id) {
+ if (pd->keep_alive && pd->session_id)
oz_pd_set_state(pd, OZ_PD_S_SLEEP);
- pd->pulse_time_j = jiffies + pd->keep_alive_j;
- oz_trace("Sleep Now %lu until %lu\n",
- jiffies, pd->pulse_time_j);
- } else {
+ else
do_stop = 1;
- }
+
stop_apps = pd->total_apps;
oz_polling_unlock_bh();
if (do_stop) {
oz_pd_stop(pd);
} else {
oz_services_stop(pd, stop_apps, 1);
- oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
+ oz_timer_add(pd, OZ_TIMER_STOP, pd->keep_alive);
}
return do_stop;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
{
struct oz_tx_frame *f = NULL;
+
spin_lock_bh(&pd->tx_frame_lock);
if (pd->tx_pool) {
f = container_of(pd->tx_pool, struct oz_tx_frame, link);
@@ -370,7 +417,8 @@ static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
}
return f;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -384,10 +432,11 @@ static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
} else {
kfree(f);
}
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
- pd->nb_queued_isoc_frames);
+ oz_dbg(TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
+ pd->nb_queued_isoc_frames);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -402,28 +451,34 @@ static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
spin_unlock_bh(&pd->tx_frame_lock);
kfree(f);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_set_more_bit(struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+
oz_hdr->control |= OZ_F_MORE_DATA;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
{
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+
oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_prepare_frame(struct oz_pd *pd, int empty)
{
struct oz_tx_frame *f;
+
if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
return -1;
if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
@@ -448,7 +503,8 @@ int oz_prepare_frame(struct oz_pd *pd, int empty)
spin_unlock(&pd->tx_frame_lock);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
@@ -458,6 +514,7 @@ static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct list_head *e;
+
/* Allocate skb with enough space for the lower layers as well
* as the space we need.
*/
@@ -492,13 +549,15 @@ fail:
kfree_skb(skb);
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
{
struct list_head *e;
struct oz_elt_info *ei;
+
e = f->elt_list.next;
while (e != &f->elt_list) {
ei = container_of(e, struct oz_elt_info, link);
@@ -514,7 +573,8 @@ static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
oz_trim_elt_pool(&pd->elt_buff);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
@@ -522,6 +582,7 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
struct sk_buff *skb;
struct oz_tx_frame *f;
struct list_head *e;
+
spin_lock(&pd->tx_frame_lock);
e = pd->last_sent_frame->next;
if (e == &pd->tx_queue) {
@@ -540,18 +601,16 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
if ((int)atomic_read(&g_submitted_isoc) <
OZ_MAX_SUBMITTED_ISOC) {
if (dev_queue_xmit(skb) < 0) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Frame\n");
+ oz_dbg(TX_FRAMES, "Dropping ISOC Frame\n");
return -1;
}
atomic_inc(&g_submitted_isoc);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Sending ISOC Frame, nb_isoc= %d\n",
- pd->nb_queued_isoc_frames);
+ oz_dbg(TX_FRAMES, "Sending ISOC Frame, nb_isoc= %d\n",
+ pd->nb_queued_isoc_frames);
return 0;
} else {
kfree_skb(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
+ oz_dbg(TX_FRAMES, "Dropping ISOC Frame>\n");
return -1;
}
}
@@ -559,17 +618,18 @@ static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
pd->last_sent_frame = e;
skb = oz_build_frame(pd, f);
spin_unlock(&pd->tx_frame_lock);
+ if (!skb)
+ return -1;
if (more_data)
oz_set_more_bit(skb);
- oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
- if (skb) {
- if (dev_queue_xmit(skb) < 0)
- return -1;
+ oz_dbg(TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
+ if (dev_queue_xmit(skb) < 0)
+ return -1;
- }
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_send_queued_frames(struct oz_pd *pd, int backlog)
@@ -607,7 +667,8 @@ void oz_send_queued_frames(struct oz_pd *pd, int backlog)
out: oz_prepare_frame(pd, 1);
oz_send_next_queued_frame(pd, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_send_isoc_frame(struct oz_pd *pd)
@@ -619,6 +680,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
struct list_head *e;
struct list_head list;
int total_size = sizeof(struct oz_hdr);
+
INIT_LIST_HEAD(&list);
oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
@@ -627,7 +689,7 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
return 0;
skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
- oz_trace("Cannot alloc skb\n");
+ oz_dbg(ON, "Cannot alloc skb\n");
oz_elt_info_free_chain(&pd->elt_buff, &list);
return -1;
}
@@ -655,7 +717,8 @@ static int oz_send_isoc_frame(struct oz_pd *pd)
oz_elt_info_free_chain(&pd->elt_buff, &list);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
@@ -675,8 +738,8 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
break;
- oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
- pkt_num, pd->nb_queued_frames);
+ oz_dbg(TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
+ pkt_num, pd->nb_queued_frames);
if (first == NULL)
first = e;
last = e;
@@ -696,7 +759,8 @@ void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
oz_retire_frame(pd, f);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Precondition: stream_lock must be held.
* Context: softirq
*/
@@ -704,6 +768,7 @@ static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
{
struct list_head *e;
struct oz_isoc_stream *st;
+
list_for_each(e, &pd->stream_list) {
st = container_of(e, struct oz_isoc_stream, link);
if (st->ep_num == ep_num)
@@ -711,7 +776,8 @@ static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
}
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
@@ -730,7 +796,8 @@ int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
kfree(st);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
static void oz_isoc_stream_free(struct oz_isoc_stream *st)
@@ -738,12 +805,14 @@ static void oz_isoc_stream_free(struct oz_isoc_stream *st)
kfree_skb(st->skb);
kfree(st);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
{
struct oz_isoc_stream *st;
+
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st)
@@ -753,14 +822,16 @@ int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
oz_isoc_stream_free(st);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: any
*/
static void oz_isoc_destructor(struct sk_buff *skb)
{
atomic_dec(&g_submitted_isoc);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
@@ -771,6 +842,7 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
struct sk_buff *skb = NULL;
struct oz_hdr *oz_hdr = NULL;
int size = 0;
+
spin_lock_bh(&pd->stream_lock);
st = pd_stream_find(pd, ep_num);
if (st) {
@@ -835,10 +907,20 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
struct oz_tx_frame *isoc_unit = NULL;
int nb = pd->nb_queued_isoc_frames;
if (nb >= pd->isoc_latency) {
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Dropping ISOC Unit nb= %d\n",
- nb);
- goto out;
+ struct list_head *e;
+ struct oz_tx_frame *f;
+ oz_dbg(TX_FRAMES, "Dropping ISOC Unit nb= %d\n",
+ nb);
+ spin_lock(&pd->tx_frame_lock);
+ list_for_each(e, &pd->tx_queue) {
+ f = container_of(e, struct oz_tx_frame,
+ link);
+ if (f->skb != NULL) {
+ oz_tx_isoc_free(pd, f);
+ break;
+ }
+ }
+ spin_unlock(&pd->tx_frame_lock);
}
isoc_unit = oz_tx_frame_alloc(pd);
if (isoc_unit == NULL)
@@ -849,9 +931,9 @@ int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
list_add_tail(&isoc_unit->link, &pd->tx_queue);
pd->nb_queued_isoc_frames++;
spin_unlock_bh(&pd->tx_frame_lock);
- oz_trace2(OZ_TRACE_TX_FRAMES,
- "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
- pd->nb_queued_isoc_frames, pd->nb_queued_frames);
+ oz_dbg(TX_FRAMES,
+ "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
+ pd->nb_queued_isoc_frames, pd->nb_queued_frames);
return 0;
}
@@ -870,45 +952,53 @@ out: kfree_skb(skb);
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_apps_init(void)
{
int i;
+
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].init)
g_app_if[i].init();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_apps_term(void)
{
int i;
+
/* Terminate all the apps. */
for (i = 0; i < OZ_APPID_MAX; i++)
if (g_app_if[i].term)
g_app_if[i].term();
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
{
const struct oz_app_if *ai;
+
if (app_id == 0 || app_id > OZ_APPID_MAX)
return;
ai = &g_app_if[app_id-1];
ai->rx(pd, elt);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_indicate_farewells(struct oz_pd *pd)
{
struct oz_farewell *f;
const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
+
while (1) {
oz_polling_lock_bh();
if (list_empty(&pd->farewell_list)) {
diff --git a/drivers/staging/ozwpan/ozpd.h b/drivers/staging/ozwpan/ozpd.h
index fbf47cbab8a..12c71295688 100644
--- a/drivers/staging/ozwpan/ozpd.h
+++ b/drivers/staging/ozwpan/ozpd.h
@@ -6,6 +6,7 @@
#ifndef _OZPD_H_
#define _OZPD_H_
+#include <linux/interrupt.h>
#include "ozeltbuf.h"
/* PD state
@@ -47,8 +48,8 @@ struct oz_farewell {
struct list_head link;
u8 ep_num;
u8 index;
- u8 report[1];
u8 len;
+ u8 report[0];
};
/* Data structure that holds information on a specific peripheral device (PD).
@@ -68,18 +69,16 @@ struct oz_pd {
u8 isoc_sent;
u32 last_rx_pkt_num;
u32 last_tx_pkt_num;
+ struct timespec last_rx_timestamp;
u32 trigger_pkt_num;
- unsigned long pulse_time_j;
- unsigned long timeout_time_j;
- unsigned long pulse_period_j;
- unsigned long presleep_j;
- unsigned long keep_alive_j;
- unsigned long last_rx_time_j;
+ unsigned long pulse_time;
+ unsigned long pulse_period;
+ unsigned long presleep;
+ unsigned long keep_alive;
struct oz_elt_buf elt_buff;
void *app_ctx[OZ_APPID_MAX];
spinlock_t app_lock[OZ_APPID_MAX];
int max_tx_size;
- u8 heartbeat_requested;
u8 mode;
u8 ms_per_isoc;
unsigned isoc_latency;
@@ -95,6 +94,12 @@ struct oz_pd {
spinlock_t stream_lock;
struct list_head stream_list;
struct net_device *net_dev;
+ struct hrtimer heartbeat;
+ struct hrtimer timeout;
+ u8 timeout_type;
+ struct tasklet_struct heartbeat_tasklet;
+ struct tasklet_struct timeout_tasklet;
+ struct work_struct workitem;
};
#define OZ_MAX_QUEUED_FRAMES 4
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index 79ac7b51d5b..88714ec8570 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -3,6 +3,7 @@
* Released under the GNU General Public License Version 2 (GPLv2).
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -10,68 +11,45 @@
#include <linux/netdevice.h>
#include <linux/errno.h>
#include <linux/ieee80211.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbsvc.h"
-#include "oztrace.h"
+
#include "ozappif.h"
#include <asm/unaligned.h>
#include <linux/uaccess.h>
#include <net/psnap.h>
-/*------------------------------------------------------------------------------
- */
+
#define OZ_CF_CONN_SUCCESS 1
#define OZ_CF_CONN_FAILURE 2
#define OZ_DO_STOP 1
#define OZ_DO_SLEEP 2
-/* States of the timer.
- */
-#define OZ_TIMER_IDLE 0
-#define OZ_TIMER_SET 1
-#define OZ_TIMER_IN_HANDLER 2
-
#define OZ_MAX_TIMER_POOL_SIZE 16
-/*------------------------------------------------------------------------------
- */
struct oz_binding {
struct packet_type ptype;
char name[OZ_MAX_BINDING_LEN];
- struct oz_binding *next;
-};
-
-struct oz_timer {
struct list_head link;
- struct oz_pd *pd;
- unsigned long due_time;
- int type;
};
-/*------------------------------------------------------------------------------
+
+/*
* Static external variables.
*/
static DEFINE_SPINLOCK(g_polling_lock);
static LIST_HEAD(g_pd_list);
-static struct oz_binding *g_binding ;
+static LIST_HEAD(g_binding);
static DEFINE_SPINLOCK(g_binding_lock);
static struct sk_buff_head g_rx_queue;
static u8 g_session_id;
static u16 g_apps = 0x1;
static int g_processing_rx;
-static struct timer_list g_timer;
-static struct oz_timer *g_cur_timer;
-static struct list_head *g_timer_pool;
-static int g_timer_pool_count;
-static int g_timer_state = OZ_TIMER_IDLE;
-static LIST_HEAD(g_timer_list);
-/*------------------------------------------------------------------------------
- */
-static void oz_protocol_timer_start(void);
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static u8 oz_get_new_session_id(u8 exclude)
@@ -84,7 +62,8 @@ static u8 oz_get_new_session_id(u8 exclude)
}
return g_session_id;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
@@ -94,6 +73,7 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
struct oz_hdr *oz_hdr;
struct oz_elt *elt;
struct oz_elt_connect_rsp *body;
+
int sz = sizeof(struct oz_hdr) + sizeof(struct oz_elt) +
sizeof(struct oz_elt_connect_rsp);
skb = alloc_skb(sz + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
@@ -124,11 +104,12 @@ static void oz_send_conn_rsp(struct oz_pd *pd, u8 status)
body->session_id = pd->session_id;
put_unaligned(cpu_to_le16(pd->total_apps), &body->apps);
}
- oz_trace("TX: OZ_ELT_CONNECT_RSP %d", status);
+ oz_dbg(ON, "TX: OZ_ELT_CONNECT_RSP %d", status);
dev_queue_xmit(skb);
return;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
@@ -137,35 +118,41 @@ static void pd_set_keepalive(struct oz_pd *pd, u8 kalive)
switch (kalive & OZ_KALIVE_TYPE_MASK) {
case OZ_KALIVE_SPECIAL:
- pd->keep_alive_j =
- oz_ms_to_jiffies(keep_alive * 1000*60*60*24*20);
+ pd->keep_alive = keep_alive * 1000*60*60*24*20;
break;
case OZ_KALIVE_SECS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000);
+ pd->keep_alive = keep_alive*1000;
break;
case OZ_KALIVE_MINS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60);
+ pd->keep_alive = keep_alive*1000*60;
break;
case OZ_KALIVE_HOURS:
- pd->keep_alive_j = oz_ms_to_jiffies(keep_alive*1000*60*60);
+ pd->keep_alive = keep_alive*1000*60*60;
break;
default:
- pd->keep_alive_j = 0;
+ pd->keep_alive = 0;
}
- oz_trace("Keepalive = %lu jiffies\n", pd->keep_alive_j);
+ oz_dbg(ON, "Keepalive = %lu mSec\n", pd->keep_alive);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
-static void pd_set_presleep(struct oz_pd *pd, u8 presleep)
+static void pd_set_presleep(struct oz_pd *pd, u8 presleep, u8 start_timer)
{
if (presleep)
- pd->presleep_j = oz_ms_to_jiffies(presleep*100);
+ pd->presleep = presleep*100;
else
- pd->presleep_j = OZ_PRESLEEP_TOUT_J;
- oz_trace("Presleep time = %lu jiffies\n", pd->presleep_j);
+ pd->presleep = OZ_PRESLEEP_TOUT;
+ if (start_timer) {
+ spin_unlock(&g_polling_lock);
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ spin_lock(&g_polling_lock);
+ }
+ oz_dbg(ON, "Presleep time = %lu mSec\n", pd->presleep);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
@@ -179,6 +166,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
u16 new_apps = g_apps;
struct net_device *old_net_dev = NULL;
struct oz_pd *free_pd = NULL;
+
if (cur_pd) {
pd = cur_pd;
spin_lock_bh(&g_polling_lock);
@@ -188,7 +176,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
pd = oz_pd_alloc(pd_addr);
if (pd == NULL)
return NULL;
- pd->last_rx_time_j = jiffies;
+ getnstimeofday(&pd->last_rx_timestamp);
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd2 = container_of(e, struct oz_pd, link);
@@ -210,7 +198,7 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
dev_hold(net_dev);
pd->net_dev = net_dev;
}
- oz_trace("Host vendor: %d\n", body->host_vendor);
+ oz_dbg(ON, "Host vendor: %d\n", body->host_vendor);
pd->max_tx_size = OZ_MAX_TX_SIZE;
pd->mode = body->mode;
pd->pd_info = body->pd_info;
@@ -234,12 +222,11 @@ static struct oz_pd *oz_connect_req(struct oz_pd *cur_pd, struct oz_elt *elt,
}
if (body->max_len_div16)
pd->max_tx_size = ((u16)body->max_len_div16)<<4;
- oz_trace("Max frame:%u Ms per isoc:%u\n",
- pd->max_tx_size, pd->ms_per_isoc);
+ oz_dbg(ON, "Max frame:%u Ms per isoc:%u\n",
+ pd->max_tx_size, pd->ms_per_isoc);
pd->max_stream_buffering = 3*1024;
- pd->timeout_time_j = jiffies + OZ_CONNECTION_TOUT_J;
- pd->pulse_period_j = OZ_QUANTUM_J;
- pd_set_presleep(pd, body->presleep);
+ pd->pulse_period = OZ_QUANTUM;
+ pd_set_presleep(pd, body->presleep, 0);
pd_set_keepalive(pd, body->keep_alive);
new_apps &= le16_to_cpu(get_unaligned(&body->apps));
@@ -271,9 +258,8 @@ done:
u16 resume_apps = new_apps & pd->paused_apps & ~0x1;
spin_unlock_bh(&g_polling_lock);
oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
- oz_timer_delete(pd, OZ_TIMER_STOP);
- oz_trace("new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
- new_apps, pd->total_apps, pd->paused_apps);
+ oz_dbg(ON, "new_apps=0x%x total_apps=0x%x paused_apps=0x%x\n",
+ new_apps, pd->total_apps, pd->paused_apps);
if (start_apps) {
if (oz_services_start(pd, start_apps, 0))
rsp_status = OZ_STATUS_TOO_MANY_PDS;
@@ -300,7 +286,8 @@ done:
oz_pd_destroy(free_pd);
return pd;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
@@ -309,13 +296,15 @@ static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
struct oz_farewell *f;
struct oz_farewell *f2;
int found = 0;
- f = kmalloc(sizeof(struct oz_farewell) + len - 1, GFP_ATOMIC);
+
+ f = kmalloc(sizeof(struct oz_farewell) + len, GFP_ATOMIC);
if (!f)
return;
f->ep_num = ep_num;
f->index = index;
+ f->len = len;
memcpy(f->report, report, len);
- oz_trace("RX: Adding farewell report\n");
+ oz_dbg(ON, "RX: Adding farewell report\n");
spin_lock(&g_polling_lock);
list_for_each_entry(f2, &pd->farewell_list, link) {
if ((f2->ep_num == ep_num) && (f2->index == index)) {
@@ -329,7 +318,8 @@ static void oz_add_farewell(struct oz_pd *pd, u8 ep_num, u8 index,
if (found)
kfree(f2);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_rx_frame(struct sk_buff *skb)
@@ -340,20 +330,20 @@ static void oz_rx_frame(struct sk_buff *skb)
int length;
struct oz_pd *pd = NULL;
struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
+ struct timespec current_time;
int dup = 0;
u32 pkt_num;
- oz_trace2(OZ_TRACE_RX_FRAMES,
- "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
- oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
+ oz_dbg(RX_FRAMES, "RX frame PN=0x%x LPN=0x%x control=0x%x\n",
+ oz_hdr->pkt_num, oz_hdr->last_pkt_num, oz_hdr->control);
mac_hdr = skb_mac_header(skb);
src_addr = &mac_hdr[ETH_ALEN] ;
length = skb->len;
/* Check the version field */
if (oz_get_prot_ver(oz_hdr->control) != OZ_PROTOCOL_VERSION) {
- oz_trace("Incorrect protocol version: %d\n",
- oz_get_prot_ver(oz_hdr->control));
+ oz_dbg(ON, "Incorrect protocol version: %d\n",
+ oz_get_prot_ver(oz_hdr->control));
goto done;
}
@@ -361,19 +351,24 @@ static void oz_rx_frame(struct sk_buff *skb)
pd = oz_pd_find(src_addr);
if (pd) {
- pd->last_rx_time_j = jiffies;
- oz_timer_add(pd, OZ_TIMER_TOUT,
- pd->last_rx_time_j + pd->presleep_j, 1);
+ if (!(pd->state & OZ_PD_S_CONNECTED))
+ oz_pd_set_state(pd, OZ_PD_S_CONNECTED);
+ getnstimeofday(&current_time);
+ if ((current_time.tv_sec != pd->last_rx_timestamp.tv_sec) ||
+ (pd->presleep < MSEC_PER_SEC)) {
+ oz_timer_add(pd, OZ_TIMER_TOUT, pd->presleep);
+ pd->last_rx_timestamp = current_time;
+ }
if (pkt_num != pd->last_rx_pkt_num) {
pd->last_rx_pkt_num = pkt_num;
} else {
dup = 1;
- oz_trace("Duplicate frame\n");
+ oz_dbg(ON, "Duplicate frame\n");
}
}
if (pd && !dup && ((pd->mode & OZ_MODE_MASK) == OZ_MODE_TRIGGERED)) {
- oz_trace2(OZ_TRACE_RX_FRAMES, "Received TRIGGER Frame\n");
+ oz_dbg(RX_FRAMES, "Received TRIGGER Frame\n");
pd->last_sent_frame = &pd->tx_queue;
if (oz_hdr->control & OZ_F_ACK) {
/* Retire completed frames */
@@ -397,22 +392,22 @@ static void oz_rx_frame(struct sk_buff *skb)
break;
switch (elt->type) {
case OZ_ELT_CONNECT_REQ:
- oz_trace("RX: OZ_ELT_CONNECT_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_CONNECT_REQ\n");
pd = oz_connect_req(pd, elt, src_addr, skb->dev);
break;
case OZ_ELT_DISCONNECT:
- oz_trace("RX: OZ_ELT_DISCONNECT\n");
+ oz_dbg(ON, "RX: OZ_ELT_DISCONNECT\n");
if (pd)
oz_pd_sleep(pd);
break;
case OZ_ELT_UPDATE_PARAM_REQ: {
struct oz_elt_update_param *body =
(struct oz_elt_update_param *)(elt + 1);
- oz_trace("RX: OZ_ELT_UPDATE_PARAM_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_UPDATE_PARAM_REQ\n");
if (pd && (pd->state & OZ_PD_S_CONNECTED)) {
spin_lock(&g_polling_lock);
pd_set_keepalive(pd, body->keepalive);
- pd_set_presleep(pd, body->presleep);
+ pd_set_presleep(pd, body->presleep, 1);
spin_unlock(&g_polling_lock);
}
}
@@ -420,7 +415,7 @@ static void oz_rx_frame(struct sk_buff *skb)
case OZ_ELT_FAREWELL_REQ: {
struct oz_elt_farewell *body =
(struct oz_elt_farewell *)(elt + 1);
- oz_trace("RX: OZ_ELT_FAREWELL_REQ\n");
+ oz_dbg(ON, "RX: OZ_ELT_FAREWELL_REQ\n");
oz_add_farewell(pd, body->ep_num,
body->index, body->report,
elt->length + 1 - sizeof(*body));
@@ -436,7 +431,7 @@ static void oz_rx_frame(struct sk_buff *skb)
}
break;
default:
- oz_trace("RX: Unknown elt %02x\n", elt->type);
+ oz_dbg(ON, "RX: Unknown elt %02x\n", elt->type);
}
elt = oz_next_elt(elt);
}
@@ -445,19 +440,19 @@ done:
oz_pd_put(pd);
consume_skb(skb);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_protocol_term(void)
{
- struct list_head *chain;
- del_timer_sync(&g_timer);
+ struct oz_binding *b, *t;
+
/* Walk the list of bindings and remove each one.
*/
spin_lock_bh(&g_binding_lock);
- while (g_binding) {
- struct oz_binding *b = g_binding;
- g_binding = b->next;
+ list_for_each_entry_safe(b, t, &g_binding, link) {
+ list_del(&b->link);
spin_unlock_bh(&g_binding_lock);
dev_remove_pack(&b->ptype);
if (b->ptype.dev)
@@ -480,21 +475,38 @@ void oz_protocol_term(void)
oz_pd_put(pd);
spin_lock_bh(&g_polling_lock);
}
- chain = g_timer_pool;
- g_timer_pool = NULL;
spin_unlock_bh(&g_polling_lock);
- while (chain) {
- struct oz_timer *t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
- }
- oz_trace("Protocol stopped\n");
+ oz_dbg(ON, "Protocol stopped\n");
+}
+
+/*
+ * Context: softirq
+ */
+void oz_pd_heartbeat_handler(unsigned long data)
+{
+ struct oz_pd *pd = (struct oz_pd *)data;
+ u16 apps = 0;
+
+ spin_lock_bh(&g_polling_lock);
+ if (pd->state & OZ_PD_S_CONNECTED)
+ apps = pd->total_apps;
+ spin_unlock_bh(&g_polling_lock);
+ if (apps)
+ oz_pd_heartbeat(pd, apps);
+ oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
-static void oz_pd_handle_timer(struct oz_pd *pd, int type)
+void oz_pd_timeout_handler(unsigned long data)
{
+ int type;
+ struct oz_pd *pd = (struct oz_pd *)data;
+
+ spin_lock_bh(&g_polling_lock);
+ type = pd->timeout_type;
+ spin_unlock_bh(&g_polling_lock);
switch (type) {
case OZ_TIMER_TOUT:
oz_pd_sleep(pd);
@@ -502,226 +514,86 @@ static void oz_pd_handle_timer(struct oz_pd *pd, int type)
case OZ_TIMER_STOP:
oz_pd_stop(pd);
break;
- case OZ_TIMER_HEARTBEAT: {
- u16 apps = 0;
- spin_lock_bh(&g_polling_lock);
- pd->heartbeat_requested = 0;
- if (pd->state & OZ_PD_S_CONNECTED)
- apps = pd->total_apps;
- spin_unlock_bh(&g_polling_lock);
- if (apps)
- oz_pd_heartbeat(pd, apps);
- }
- break;
}
+ oz_pd_put(pd);
}
-/*------------------------------------------------------------------------------
- * Context: softirq
+
+/*
+ * Context: Interrupt
*/
-static void oz_protocol_timer(unsigned long arg)
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer)
{
- struct oz_timer *t;
- struct oz_timer *t2;
struct oz_pd *pd;
- spin_lock_bh(&g_polling_lock);
- if (!g_cur_timer) {
- /* This happens if we remove the current timer but can't stop
- * the timer from firing. In this case just get out.
- */
- spin_unlock_bh(&g_polling_lock);
- return;
- }
- g_timer_state = OZ_TIMER_IN_HANDLER;
- t = g_cur_timer;
- g_cur_timer = NULL;
- list_del(&t->link);
- spin_unlock_bh(&g_polling_lock);
- do {
- pd = t->pd;
- oz_pd_handle_timer(pd, t->type);
- spin_lock_bh(&g_polling_lock);
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- t = NULL;
- }
- if (!list_empty(&g_timer_list)) {
- t2 = container_of(g_timer_list.next,
- struct oz_timer, link);
- if (time_before_eq(t2->due_time, jiffies))
- list_del(&t2->link);
- else
- t2 = NULL;
- } else {
- t2 = NULL;
- }
- spin_unlock_bh(&g_polling_lock);
- oz_pd_put(pd);
- kfree(t);
- t = t2;
- } while (t);
- g_timer_state = OZ_TIMER_IDLE;
- oz_protocol_timer_start();
+
+ pd = container_of(timer, struct oz_pd, heartbeat);
+ hrtimer_forward_now(timer, ktime_set(pd->pulse_period /
+ MSEC_PER_SEC, (pd->pulse_period % MSEC_PER_SEC) * NSEC_PER_MSEC));
+ oz_pd_get(pd);
+ tasklet_schedule(&pd->heartbeat_tasklet);
+ return HRTIMER_RESTART;
}
-/*------------------------------------------------------------------------------
- * Context: softirq
+
+/*
+ * Context: Interrupt
*/
-static void oz_protocol_timer_start(void)
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer)
{
- spin_lock_bh(&g_polling_lock);
- if (!list_empty(&g_timer_list)) {
- g_cur_timer =
- container_of(g_timer_list.next, struct oz_timer, link);
- if (g_timer_state == OZ_TIMER_SET) {
- mod_timer(&g_timer, g_cur_timer->due_time);
- } else {
- g_timer.expires = g_cur_timer->due_time;
- g_timer.function = oz_protocol_timer;
- g_timer.data = 0;
- add_timer(&g_timer);
- }
- g_timer_state = OZ_TIMER_SET;
- } else {
- oz_trace("No queued timers\n");
- }
- spin_unlock_bh(&g_polling_lock);
+ struct oz_pd *pd;
+
+ pd = container_of(timer, struct oz_pd, timeout);
+ oz_pd_get(pd);
+ tasklet_schedule(&pd->timeout_tasklet);
+ return HRTIMER_NORESTART;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove)
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time)
{
- struct list_head *e;
- struct oz_timer *t = NULL;
- int restart_needed = 0;
- spin_lock(&g_polling_lock);
- if (remove) {
- list_for_each(e, &g_timer_list) {
- t = container_of(e, struct oz_timer, link);
- if ((t->pd == pd) && (t->type == type)) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- }
- list_del(e);
- break;
- }
- t = NULL;
- }
- }
- if (!t) {
- if (g_timer_pool) {
- t = container_of(g_timer_pool, struct oz_timer, link);
- g_timer_pool = g_timer_pool->next;
- g_timer_pool_count--;
+ spin_lock_bh(&g_polling_lock);
+ switch (type) {
+ case OZ_TIMER_TOUT:
+ case OZ_TIMER_STOP:
+ if (hrtimer_active(&pd->timeout)) {
+ hrtimer_set_expires(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC));
+ hrtimer_start_expires(&pd->timeout, HRTIMER_MODE_REL);
} else {
- t = kmalloc(sizeof(struct oz_timer), GFP_ATOMIC);
- }
- if (t) {
- t->pd = pd;
- t->type = type;
- oz_pd_get(pd);
- }
- }
- if (t) {
- struct oz_timer *t2;
- t->due_time = due_time;
- list_for_each(e, &g_timer_list) {
- t2 = container_of(e, struct oz_timer, link);
- if (time_before(due_time, t2->due_time)) {
- if (t2 == g_cur_timer) {
- g_cur_timer = NULL;
- restart_needed = 1;
- }
- break;
- }
+ hrtimer_start(&pd->timeout, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
}
- list_add_tail(&t->link, e);
- }
- if (g_timer_state == OZ_TIMER_IDLE)
- restart_needed = 1;
- else if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
-}
-/*------------------------------------------------------------------------------
- * Context: softirq or process
- */
-void oz_timer_delete(struct oz_pd *pd, int type)
-{
- struct list_head *chain = NULL;
- struct oz_timer *t;
- struct oz_timer *n;
- int restart_needed = 0;
- int release = 0;
- spin_lock(&g_polling_lock);
- list_for_each_entry_safe(t, n, &g_timer_list, link) {
- if ((t->pd == pd) && ((type == 0) || (t->type == type))) {
- if (g_cur_timer == t) {
- restart_needed = 1;
- g_cur_timer = NULL;
- del_timer(&g_timer);
- }
- list_del(&t->link);
- release++;
- if (g_timer_pool_count < OZ_MAX_TIMER_POOL_SIZE) {
- t->link.next = g_timer_pool;
- g_timer_pool = &t->link;
- g_timer_pool_count++;
- } else {
- t->link.next = chain;
- chain = &t->link;
- }
- if (type)
- break;
- }
- }
- if (g_timer_state == OZ_TIMER_IN_HANDLER)
- restart_needed = 0;
- else if (restart_needed)
- g_timer_state = OZ_TIMER_IDLE;
- spin_unlock(&g_polling_lock);
- if (restart_needed)
- oz_protocol_timer_start();
- while (release--)
- oz_pd_put(pd);
- while (chain) {
- t = container_of(chain, struct oz_timer, link);
- chain = chain->next;
- kfree(t);
+ pd->timeout_type = type;
+ break;
+ case OZ_TIMER_HEARTBEAT:
+ if (!hrtimer_active(&pd->heartbeat))
+ hrtimer_start(&pd->heartbeat, ktime_set(due_time /
+ MSEC_PER_SEC, (due_time % MSEC_PER_SEC) *
+ NSEC_PER_MSEC), HRTIMER_MODE_REL);
+ break;
}
+ spin_unlock_bh(&g_polling_lock);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_pd_request_heartbeat(struct oz_pd *pd)
{
- unsigned long now = jiffies;
- unsigned long t;
- spin_lock(&g_polling_lock);
- if (pd->heartbeat_requested) {
- spin_unlock(&g_polling_lock);
- return;
- }
- if (pd->pulse_period_j)
- t = ((now / pd->pulse_period_j) + 1) * pd->pulse_period_j;
- else
- t = now + 1;
- pd->heartbeat_requested = 1;
- spin_unlock(&g_polling_lock);
- oz_timer_add(pd, OZ_TIMER_HEARTBEAT, t, 0);
+ oz_timer_add(pd, OZ_TIMER_HEARTBEAT, pd->pulse_period > 0 ?
+ pd->pulse_period : OZ_QUANTUM);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
struct oz_pd *oz_pd_find(const u8 *mac_addr)
{
struct oz_pd *pd;
struct list_head *e;
+
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
pd = container_of(e, struct oz_pd, link);
@@ -734,7 +606,8 @@ struct oz_pd *oz_pd_find(const u8 *mac_addr)
spin_unlock_bh(&g_polling_lock);
return NULL;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
void oz_app_enable(int app_id, int enable)
@@ -748,7 +621,8 @@ void oz_app_enable(int app_id, int enable)
spin_unlock_bh(&g_polling_lock);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
@@ -782,10 +656,11 @@ static int oz_pkt_recv(struct sk_buff *skb, struct net_device *dev,
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
-void oz_binding_add(char *net_dev)
+void oz_binding_add(const char *net_dev)
{
struct oz_binding *binding;
@@ -795,43 +670,28 @@ void oz_binding_add(char *net_dev)
binding->ptype.func = oz_pkt_recv;
memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
- oz_trace("Adding binding: %s\n", net_dev);
+ oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
if (binding->ptype.dev == NULL) {
- oz_trace("Netdev %s not found\n", net_dev);
+ oz_dbg(ON, "Netdev %s not found\n", net_dev);
kfree(binding);
binding = NULL;
}
} else {
- oz_trace("Binding to all netcards\n");
+ oz_dbg(ON, "Binding to all netcards\n");
binding->ptype.dev = NULL;
}
if (binding) {
dev_add_pack(&binding->ptype);
spin_lock_bh(&g_binding_lock);
- binding->next = g_binding;
- g_binding = binding;
+ list_add_tail(&binding->link, &g_binding);
spin_unlock_bh(&g_binding_lock);
}
}
}
-/*------------------------------------------------------------------------------
- * Context: process
- */
-static int compare_binding_name(char *s1, char *s2)
-{
- int i;
- for (i = 0; i < OZ_MAX_BINDING_LEN; i++) {
- if (*s1 != *s2)
- return 0;
- if (!*s1++)
- return 1;
- s2++;
- }
- return 1;
-}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static void pd_stop_all_for_device(struct net_device *net_dev)
@@ -839,6 +699,7 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
struct list_head h;
struct oz_pd *pd;
struct oz_pd *n;
+
INIT_LIST_HEAD(&h);
spin_lock_bh(&g_polling_lock);
list_for_each_entry_safe(pd, n, &g_pd_list, link) {
@@ -854,38 +715,37 @@ static void pd_stop_all_for_device(struct net_device *net_dev)
oz_pd_put(pd);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
-void oz_binding_remove(char *net_dev)
+void oz_binding_remove(const char *net_dev)
{
struct oz_binding *binding;
- struct oz_binding **link;
- oz_trace("Removing binding: %s\n", net_dev);
+ int found = 0;
+
+ oz_dbg(ON, "Removing binding: %s\n", net_dev);
spin_lock_bh(&g_binding_lock);
- binding = g_binding;
- link = &g_binding;
- while (binding) {
- if (compare_binding_name(binding->name, net_dev)) {
- oz_trace("Binding '%s' found\n", net_dev);
- *link = binding->next;
+ list_for_each_entry(binding, &g_binding, link) {
+ if (strncmp(binding->name, net_dev, OZ_MAX_BINDING_LEN) == 0) {
+ oz_dbg(ON, "Binding '%s' found\n", net_dev);
+ found = 1;
break;
- } else {
- link = &binding;
- binding = binding->next;
}
}
spin_unlock_bh(&g_binding_lock);
- if (binding) {
+ if (found) {
dev_remove_pack(&binding->ptype);
if (binding->ptype.dev) {
dev_put(binding->ptype.dev);
pd_stop_all_for_device(binding->ptype.dev);
}
+ list_del(&binding->link);
kfree(binding);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
static char *oz_get_next_device_name(char *s, char *dname, int max_size)
@@ -899,7 +759,8 @@ static char *oz_get_next_device_name(char *s, char *dname, int max_size)
*dname = 0;
return s;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_protocol_init(char *devs)
@@ -915,10 +776,10 @@ int oz_protocol_init(char *devs)
oz_binding_add(d);
}
}
- init_timer(&g_timer);
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: process
*/
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
@@ -926,6 +787,7 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
struct oz_pd *pd;
struct list_head *e;
int count = 0;
+
spin_lock_bh(&g_polling_lock);
list_for_each(e, &g_pd_list) {
if (count >= max_count)
@@ -936,14 +798,12 @@ int oz_get_pd_list(struct oz_mac_addr *addr, int max_count)
spin_unlock_bh(&g_polling_lock);
return count;
}
-/*------------------------------------------------------------------------------
-*/
+
void oz_polling_lock_bh(void)
{
spin_lock_bh(&g_polling_lock);
}
-/*------------------------------------------------------------------------------
-*/
+
void oz_polling_unlock_bh(void)
{
spin_unlock_bh(&g_polling_lock);
diff --git a/drivers/staging/ozwpan/ozproto.h b/drivers/staging/ozwpan/ozproto.h
index 93bb4c0172e..0c49c8a0e81 100644
--- a/drivers/staging/ozwpan/ozproto.h
+++ b/drivers/staging/ozwpan/ozproto.h
@@ -7,28 +7,19 @@
#define _OZPROTO_H
#include <asm/byteorder.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozappif.h"
#define OZ_ALLOCATED_SPACE(__x) (LL_RESERVED_SPACE(__x)+(__x)->needed_tailroom)
-/* Converts millisecs to jiffies.
- */
-#define oz_ms_to_jiffies(__x) msecs_to_jiffies(__x)
-
-/* Quantum milliseconds.
- */
-#define OZ_QUANTUM_MS 8
-/* Quantum jiffies
- */
-#define OZ_QUANTUM_J (oz_ms_to_jiffies(OZ_QUANTUM_MS))
+/* Quantum in MS */
+#define OZ_QUANTUM 8
/* Default timeouts.
*/
-#define OZ_CONNECTION_TOUT_J (2*HZ)
-#define OZ_PRESLEEP_TOUT_J (11*HZ)
+#define OZ_PRESLEEP_TOUT 11
/* Maximun sizes of tx frames. */
-#define OZ_MAX_TX_SIZE 1514
+#define OZ_MAX_TX_SIZE 760
/* Maximum number of uncompleted isoc frames that can be pending in network. */
#define OZ_MAX_SUBMITTED_ISOC 16
@@ -63,13 +54,18 @@ void oz_protocol_term(void);
int oz_get_pd_list(struct oz_mac_addr *addr, int max_count);
void oz_app_enable(int app_id, int enable);
struct oz_pd *oz_pd_find(const u8 *mac_addr);
-void oz_binding_add(char *net_dev);
-void oz_binding_remove(char *net_dev);
-void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time,
- int remove);
+void oz_binding_add(const char *net_dev);
+void oz_binding_remove(const char *net_dev);
+void oz_timer_add(struct oz_pd *pd, int type, unsigned long due_time);
void oz_timer_delete(struct oz_pd *pd, int type);
void oz_pd_request_heartbeat(struct oz_pd *pd);
void oz_polling_lock_bh(void);
void oz_polling_unlock_bh(void);
+void oz_pd_heartbeat_handler(unsigned long data);
+void oz_pd_timeout_handler(unsigned long data);
+enum hrtimer_restart oz_pd_heartbeat_event(struct hrtimer *timer);
+enum hrtimer_restart oz_pd_timeout_event(struct hrtimer *timer);
+int oz_get_pd_status_list(char *pd_list, int max_count);
+int oz_get_binding_list(char *buf, int max_if);
#endif /* _OZPROTO_H */
diff --git a/drivers/staging/ozwpan/oztrace.c b/drivers/staging/ozwpan/oztrace.c
deleted file mode 100644
index 353ead24fd7..00000000000
--- a/drivers/staging/ozwpan/oztrace.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#include "ozconfig.h"
-#include "oztrace.h"
-
-#ifdef WANT_VERBOSE_TRACE
-unsigned long trace_flags =
- 0
-#ifdef WANT_TRACE_STREAM
- | OZ_TRACE_STREAM
-#endif /* WANT_TRACE_STREAM */
-#ifdef WANT_TRACE_URB
- | OZ_TRACE_URB
-#endif /* WANT_TRACE_URB */
-
-#ifdef WANT_TRACE_CTRL_DETAIL
- | OZ_TRACE_CTRL_DETAIL
-#endif /* WANT_TRACE_CTRL_DETAIL */
-
-#ifdef WANT_TRACE_HUB
- | OZ_TRACE_HUB
-#endif /* WANT_TRACE_HUB */
-
-#ifdef WANT_TRACE_RX_FRAMES
- | OZ_TRACE_RX_FRAMES
-#endif /* WANT_TRACE_RX_FRAMES */
-
-#ifdef WANT_TRACE_TX_FRAMES
- | OZ_TRACE_TX_FRAMES
-#endif /* WANT_TRACE_TX_FRAMES */
- ;
-#endif /* WANT_VERBOSE_TRACE */
-
diff --git a/drivers/staging/ozwpan/oztrace.h b/drivers/staging/ozwpan/oztrace.h
deleted file mode 100644
index 8293b24c5a7..00000000000
--- a/drivers/staging/ozwpan/oztrace.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* -----------------------------------------------------------------------------
- * Copyright (c) 2011 Ozmo Inc
- * Released under the GNU General Public License Version 2 (GPLv2).
- * -----------------------------------------------------------------------------
- */
-#ifndef _OZTRACE_H_
-#define _OZTRACE_H_
-#include "ozconfig.h"
-
-#define TRACE_PREFIX KERN_ALERT "OZWPAN: "
-
-#ifdef WANT_TRACE
-#define oz_trace(...) printk(TRACE_PREFIX __VA_ARGS__)
-#ifdef WANT_VERBOSE_TRACE
-extern unsigned long trace_flags;
-#define oz_trace2(_flag, ...) \
- do { if (trace_flags & _flag) printk(TRACE_PREFIX __VA_ARGS__); \
- } while (0)
-#else
-#define oz_trace2(...)
-#endif /* #ifdef WANT_VERBOSE_TRACE */
-#else
-#define oz_trace(...)
-#define oz_trace2(...)
-#endif /* #ifdef WANT_TRACE */
-
-#define OZ_TRACE_STREAM 0x1
-#define OZ_TRACE_URB 0x2
-#define OZ_TRACE_CTRL_DETAIL 0x4
-#define OZ_TRACE_HUB 0x8
-#define OZ_TRACE_RX_FRAMES 0x10
-#define OZ_TRACE_TX_FRAMES 0x20
-
-#endif /* Sentry */
-
diff --git a/drivers/staging/ozwpan/ozurbparanoia.c b/drivers/staging/ozwpan/ozurbparanoia.c
index 55b9afbbe47..cf6278a198a 100644
--- a/drivers/staging/ozwpan/ozurbparanoia.c
+++ b/drivers/staging/ozwpan/ozurbparanoia.c
@@ -4,37 +4,39 @@
* -----------------------------------------------------------------------------
*/
#include <linux/usb.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
+
#ifdef WANT_URB_PARANOIA
+
#include "ozurbparanoia.h"
-#include "oztrace.h"
-/*-----------------------------------------------------------------------------
- */
+
#define OZ_MAX_URBS 1000
struct urb *g_urb_memory[OZ_MAX_URBS];
int g_nb_urbs;
DEFINE_SPINLOCK(g_urb_mem_lock);
-/*-----------------------------------------------------------------------------
- */
+
void oz_remember_urb(struct urb *urb)
{
unsigned long irq_state;
+
spin_lock_irqsave(&g_urb_mem_lock, irq_state);
if (g_nb_urbs < OZ_MAX_URBS) {
g_urb_memory[g_nb_urbs++] = urb;
- oz_trace("%lu: urb up = %d %p\n", jiffies, g_nb_urbs, urb);
+ oz_dbg(ON, "urb up = %d %p\n", g_nb_urbs, urb);
} else {
- oz_trace("ERROR urb buffer full\n");
+ oz_dbg(ON, "ERROR urb buffer full\n");
}
spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
}
-/*------------------------------------------------------------------------------
+
+/*
*/
int oz_forget_urb(struct urb *urb)
{
unsigned long irq_state;
int i;
int rc = -1;
+
spin_lock_irqsave(&g_urb_mem_lock, irq_state);
for (i = 0; i < g_nb_urbs; i++) {
if (g_urb_memory[i] == urb) {
@@ -42,8 +44,7 @@ int oz_forget_urb(struct urb *urb)
if (--g_nb_urbs > i)
memcpy(&g_urb_memory[i], &g_urb_memory[i+1],
(g_nb_urbs - i) * sizeof(struct urb *));
- oz_trace("%lu: urb down = %d %p\n",
- jiffies, g_nb_urbs, urb);
+ oz_dbg(ON, "urb down = %d %p\n", g_nb_urbs, urb);
}
}
spin_unlock_irqrestore(&g_urb_mem_lock, irq_state);
diff --git a/drivers/staging/ozwpan/ozurbparanoia.h b/drivers/staging/ozwpan/ozurbparanoia.h
index 00f5a3a81bc..5080ea76f50 100644
--- a/drivers/staging/ozwpan/ozurbparanoia.h
+++ b/drivers/staging/ozwpan/ozurbparanoia.h
@@ -10,8 +10,8 @@
void oz_remember_urb(struct urb *urb);
int oz_forget_urb(struct urb *urb);
#else
-#define oz_remember_urb(__x)
-#define oz_forget_urb(__x) 0
+static inline void oz_remember_urb(struct urb *urb) {}
+static inline int oz_forget_urb(struct urb *urb) { return 0; }
#endif /* WANT_URB_PARANOIA */
diff --git a/drivers/staging/ozwpan/ozusbsvc.c b/drivers/staging/ozwpan/ozusbsvc.c
index 16763287824..cf263791cb3 100644
--- a/drivers/staging/ozwpan/ozusbsvc.c
+++ b/drivers/staging/ozwpan/ozusbsvc.c
@@ -10,6 +10,7 @@
* The implementation of this service uses ozhcd.c to implement a USB HCD.
* -----------------------------------------------------------------------------
*/
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/timer.h>
@@ -18,16 +19,16 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
-#include "oztrace.h"
#include "ozusbsvc.h"
-/*------------------------------------------------------------------------------
+
+/*
* This is called once when the driver is loaded to initialise the USB service.
* Context: process
*/
@@ -35,7 +36,8 @@ int oz_usb_init(void)
{
return oz_hcd_init();
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called once when the driver is unloaded to terminate the USB service.
* Context: process
*/
@@ -43,7 +45,8 @@ void oz_usb_term(void)
{
oz_hcd_term();
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the USB service is started or resumed for a PD.
* Context: softirq
*/
@@ -52,11 +55,12 @@ int oz_usb_start(struct oz_pd *pd, int resume)
int rc = 0;
struct oz_usb_ctx *usb_ctx;
struct oz_usb_ctx *old_ctx;
+
if (resume) {
- oz_trace("USB service resumed.\n");
+ oz_dbg(ON, "USB service resumed\n");
return 0;
}
- oz_trace("USB service started.\n");
+ oz_dbg(ON, "USB service started\n");
/* Create a USB context in case we need one. If we find the PD already
* has a USB context then we will destroy it.
*/
@@ -77,7 +81,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_usb_get(pd->app_ctx[OZ_APPID_USB-1]);
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (old_ctx) {
- oz_trace("Already have USB context.\n");
+ oz_dbg(ON, "Already have USB context\n");
kfree(usb_ctx);
usb_ctx = old_ctx;
} else if (usb_ctx) {
@@ -95,7 +99,7 @@ int oz_usb_start(struct oz_pd *pd, int resume)
} else {
usb_ctx->hport = oz_hcd_pd_arrived(usb_ctx);
if (usb_ctx->hport == NULL) {
- oz_trace("USB hub returned null port.\n");
+ oz_dbg(ON, "USB hub returned null port\n");
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -106,15 +110,17 @@ int oz_usb_start(struct oz_pd *pd, int resume)
oz_usb_put(usb_ctx);
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the USB service is stopped or paused for a PD.
* Context: softirq or process
*/
void oz_usb_stop(struct oz_pd *pd, int pause)
{
struct oz_usb_ctx *usb_ctx;
+
if (pause) {
- oz_trace("USB service paused.\n");
+ oz_dbg(ON, "USB service paused\n");
return;
}
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
@@ -122,8 +128,9 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
pd->app_ctx[OZ_APPID_USB-1] = NULL;
spin_unlock_bh(&pd->app_lock[OZ_APPID_USB-1]);
if (usb_ctx) {
- unsigned long tout = jiffies + HZ;
- oz_trace("USB service stopping...\n");
+ struct timespec ts, now;
+ getnstimeofday(&ts);
+ oz_dbg(ON, "USB service stopping...\n");
usb_ctx->stopped = 1;
/* At this point the reference count on the usb context should
* be 2 - one from when we created it and one from the hcd
@@ -131,17 +138,21 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
* should get in but someone may already be in. So wait
* until they leave but timeout after 1 second.
*/
- while ((atomic_read(&usb_ctx->ref_count) > 2) &&
- time_before(jiffies, tout))
- ;
- oz_trace("USB service stopped.\n");
+ while ((atomic_read(&usb_ctx->ref_count) > 2)) {
+ getnstimeofday(&now);
+ /*Approx 1 Sec. this is not perfect calculation*/
+ if (now.tv_sec != ts.tv_sec)
+ break;
+ }
+ oz_dbg(ON, "USB service stopped\n");
oz_hcd_pd_departed(usb_ctx->hport);
/* Release the reference taken in oz_usb_start.
*/
oz_usb_put(usb_ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* This increments the reference count of the context area for a specific PD.
* This ensures this context area does not disappear while still in use.
* Context: softirq
@@ -149,29 +160,34 @@ void oz_usb_stop(struct oz_pd *pd, int pause)
void oz_usb_get(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
atomic_inc(&usb_ctx->ref_count);
}
-/*------------------------------------------------------------------------------
+
+/*
* This decrements the reference count of the context area for a specific PD
* and destroys the context area if the reference count becomes zero.
- * Context: softirq or process
+ * Context: irq or process
*/
void oz_usb_put(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (atomic_dec_and_test(&usb_ctx->ref_count)) {
- oz_trace("Dealloc USB context.\n");
+ oz_dbg(ON, "Dealloc USB context\n");
oz_pd_put(usb_ctx->pd);
kfree(usb_ctx);
}
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_heartbeat(struct oz_pd *pd)
{
struct oz_usb_ctx *usb_ctx;
int rc = 0;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
if (usb_ctx)
@@ -188,14 +204,16 @@ done:
oz_usb_put(usb_ctx);
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_stream_create(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
struct oz_pd *pd = usb_ctx->pd;
- oz_trace("oz_usb_stream_create(0x%x)\n", ep_num);
+
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_create(pd, ep_num);
} else {
@@ -208,16 +226,18 @@ int oz_usb_stream_create(void *hpd, u8 ep_num)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_stream_delete(void *hpd, u8 ep_num)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (usb_ctx) {
struct oz_pd *pd = usb_ctx->pd;
if (pd) {
- oz_trace("oz_usb_stream_delete(0x%x)\n", ep_num);
+ oz_dbg(ON, "%s: (0x%x)\n", __func__, ep_num);
if (pd->mode & OZ_F_ISOC_NO_ELTS) {
oz_isoc_stream_delete(pd, ep_num);
} else {
@@ -229,12 +249,14 @@ int oz_usb_stream_delete(void *hpd, u8 ep_num)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq or process
*/
void oz_usb_request_heartbeat(void *hpd)
{
struct oz_usb_ctx *usb_ctx = (struct oz_usb_ctx *)hpd;
+
if (usb_ctx && usb_ctx->pd)
oz_pd_request_heartbeat(usb_ctx->pd);
}
diff --git a/drivers/staging/ozwpan/ozusbsvc1.c b/drivers/staging/ozwpan/ozusbsvc1.c
index 16e607875c3..228bffaa69c 100644
--- a/drivers/staging/ozwpan/ozusbsvc1.c
+++ b/drivers/staging/ozwpan/ozusbsvc1.c
@@ -13,19 +13,18 @@
#include <linux/errno.h>
#include <linux/input.h>
#include <asm/unaligned.h>
-#include "ozconfig.h"
+#include "ozdbg.h"
#include "ozprotocol.h"
#include "ozeltbuf.h"
#include "ozpd.h"
#include "ozproto.h"
#include "ozusbif.h"
#include "ozhcd.h"
-#include "oztrace.h"
#include "ozusbsvc.h"
-/*------------------------------------------------------------------------------
- */
+
#define MAX_ISOC_FIXED_DATA (253-sizeof(struct oz_isoc_fixed))
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
@@ -34,6 +33,7 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
int ret;
struct oz_elt *elt = (struct oz_elt *)ei->data;
struct oz_app_hdr *app_hdr = (struct oz_app_hdr *)(elt+1);
+
elt->type = OZ_ELT_APP_DATA;
ei->app_id = OZ_APPID_USB;
ei->length = elt->length + sizeof(struct oz_elt);
@@ -50,7 +50,8 @@ static int oz_usb_submit_elt(struct oz_elt_buf *eb, struct oz_elt_info *ei,
spin_unlock_bh(&eb->lock);
return ret;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
@@ -62,12 +63,13 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
struct oz_get_desc_req *body;
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
- oz_trace(" req_type = 0x%x\n", req_type);
- oz_trace(" desc_type = 0x%x\n", desc_type);
- oz_trace(" index = 0x%x\n", index);
- oz_trace(" windex = 0x%x\n", windex);
- oz_trace(" offset = 0x%x\n", offset);
- oz_trace(" len = 0x%x\n", len);
+
+ oz_dbg(ON, " req_type = 0x%x\n", req_type);
+ oz_dbg(ON, " desc_type = 0x%x\n", desc_type);
+ oz_dbg(ON, " index = 0x%x\n", index);
+ oz_dbg(ON, " windex = 0x%x\n", windex);
+ oz_dbg(ON, " offset = 0x%x\n", offset);
+ oz_dbg(ON, " len = 0x%x\n", len);
if (len > 200)
len = 200;
if (ei == NULL)
@@ -85,7 +87,8 @@ int oz_usb_get_desc_req(void *hpd, u8 req_id, u8 req_type, u8 desc_type,
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
@@ -96,6 +99,7 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_config_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -106,7 +110,8 @@ static int oz_usb_set_config_req(void *hpd, u8 req_id, u8 index)
body->index = index;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
@@ -117,6 +122,7 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_set_interface_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -128,7 +134,8 @@ static int oz_usb_set_interface_req(void *hpd, u8 req_id, u8 index, u8 alt)
body->alternative = alt;
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
@@ -140,6 +147,7 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_feature_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -152,7 +160,8 @@ static int oz_usb_set_clear_feature_req(void *hpd, u8 req_id, u8 type,
put_unaligned(feature, &body->feature);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
@@ -164,6 +173,7 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
struct oz_elt_buf *eb = &pd->elt_buff;
struct oz_elt_info *ei = oz_elt_info_alloc(&pd->elt_buff);
struct oz_vendor_class_req *body;
+
if (ei == NULL)
return -1;
elt = (struct oz_elt *)ei->data;
@@ -179,7 +189,8 @@ static int oz_usb_vendor_class_req(void *hpd, u8 req_id, u8 req_type,
memcpy(body->data, data, data_len);
return oz_usb_submit_elt(eb, ei, usb_ctx, 0, 0);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: tasklet
*/
int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
@@ -189,6 +200,7 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
unsigned windex = le16_to_cpu(setup->wIndex);
unsigned wlength = le16_to_cpu(setup->wLength);
int rc = 0;
+
if ((setup->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) {
switch (setup->bRequest) {
case USB_REQ_GET_DESCRIPTOR:
@@ -226,7 +238,8 @@ int oz_usb_control_req(void *hpd, u8 req_id, struct usb_ctrlrequest *setup,
}
return rc;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq
*/
int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
@@ -297,13 +310,15 @@ int oz_usb_send_isoc(void *hpd, u8 ep_num, struct urb *urb)
}
return 0;
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq-serialized
*/
static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
struct oz_usb_hdr *usb_hdr, int len)
{
struct oz_data *data_hdr = (struct oz_data *)usb_hdr;
+
switch (data_hdr->format) {
case OZ_DATA_F_MULTIPLE_FIXED: {
struct oz_multiple_fixed *body =
@@ -339,7 +354,8 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
}
}
-/*------------------------------------------------------------------------------
+
+/*
* This is called when the PD has received a USB element. The type of element
* is determined and is then passed to an appropriate handler function.
* Context: softirq-serialized
@@ -376,7 +392,7 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
u16 offs = le16_to_cpu(get_unaligned(&body->offset));
u16 total_size =
le16_to_cpu(get_unaligned(&body->total_size));
- oz_trace("USB_REQ_GET_DESCRIPTOR - cnf\n");
+ oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
body->rcode, body->data,
data_len, offs, total_size);
@@ -411,12 +427,14 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
done:
oz_usb_put(usb_ctx);
}
-/*------------------------------------------------------------------------------
+
+/*
* Context: softirq, process
*/
void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
{
struct oz_usb_ctx *usb_ctx;
+
spin_lock_bh(&pd->app_lock[OZ_APPID_USB-1]);
usb_ctx = (struct oz_usb_ctx *)pd->app_ctx[OZ_APPID_USB-1];
if (usb_ctx)
@@ -425,7 +443,7 @@ void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len)
if (usb_ctx == NULL)
return; /* Context has gone so nothing to do. */
if (!usb_ctx->stopped) {
- oz_trace("Farewell indicated ep = 0x%x\n", ep_num);
+ oz_dbg(ON, "Farewell indicated ep = 0x%x\n", ep_num);
oz_hcd_data_ind(usb_ctx->hport, ep_num, data, len);
}
oz_usb_put(usb_ctx);
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index adb8da564cf..4247d60c918 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -71,9 +71,8 @@ static struct quickstart_button *pressed;
static struct input_dev *quickstart_input;
/* Platform driver functions */
-static ssize_t quickstart_buttons_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t buttons_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
int count = 0;
struct quickstart_button *b;
@@ -94,18 +93,17 @@ static ssize_t quickstart_buttons_show(struct device *dev,
return count;
}
-static ssize_t quickstart_pressed_button_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t pressed_button_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%s\n",
(pressed ? pressed->name : "none"));
}
-static ssize_t quickstart_pressed_button_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t pressed_button_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
if (count < 2)
return -EINVAL;
@@ -319,9 +317,8 @@ static int quickstart_acpi_remove(struct acpi_device *device)
}
/* Platform driver structs */
-static DEVICE_ATTR(pressed_button, 0666, quickstart_pressed_button_show,
- quickstart_pressed_button_store);
-static DEVICE_ATTR(buttons, 0444, quickstart_buttons_show, NULL);
+static DEVICE_ATTR_RW(pressed_button);
+static DEVICE_ATTR_RO(buttons);
static struct platform_device *pf_device;
static struct platform_driver pf_driver = {
.driver = {
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 8fc9f588b05..7f015499cfa 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
index d5df0d691fc..10b22100dd3 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index 00f9af06aca..b65db542e1a 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
index d9add5305e2..e5282068e3d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
index 89ed86ef0d1..b3466530cf9 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8187se/r8180.h b/drivers/staging/rtl8187se/r8180.h
index edacc800164..d052f4a9a83 100644
--- a/drivers/staging/rtl8187se/r8180.h
+++ b/drivers/staging/rtl8187se/r8180.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8187se/r8180_93cx6.h b/drivers/staging/rtl8187se/r8180_93cx6.h
index 79e7391ac88..b52b5b0610a 100644
--- a/drivers/staging/rtl8187se/r8180_93cx6.h
+++ b/drivers/staging/rtl8187se/r8180_93cx6.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index ca691550436..5947a6f8e16 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1,6 +1,6 @@
/*
This is part of rtl818x pci OpenSource driver - v 0.1
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public License)
Parts of this driver are based on the GPL part of the official
@@ -70,7 +70,7 @@ static int hwwep;
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, rtl8180_pci_id_tbl);
-MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
+MODULE_AUTHOR("Andrea Merello <andrea.merello@gmail.com>");
MODULE_DESCRIPTION("Linux driver for Realtek RTL8187SE WiFi cards");
module_param_string(ifname, ifname, sizeof(ifname), S_IRUGO|S_IWUSR);
@@ -197,7 +197,7 @@ inline void force_pci_posting(struct net_device *dev)
mb();
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs);
+static irqreturn_t rtl8180_interrupt(int irq, void *netdev);
void set_nic_rxring(struct net_device *dev);
void set_nic_txring(struct net_device *dev);
static struct net_device_stats *rtl8180_stats(struct net_device *dev);
@@ -2666,7 +2666,7 @@ short rtl8180_init(struct net_device *dev)
TX_BEACON_RING_ADDR))
return -ENOMEM;
- if (request_irq(dev->irq, (void *)rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
+ if (request_irq(dev->irq, rtl8180_interrupt, IRQF_SHARED, dev->name, dev)) {
DMESGE("Error allocating IRQ %d", dev->irq);
return -1;
} else {
@@ -3537,7 +3537,7 @@ void rtl8180_tx_isr(struct net_device *dev, int pri, short error)
spin_unlock_irqrestore(&priv->tx_lock, flag);
}
-irqreturn_t rtl8180_interrupt(int irq, void *netdev, struct pt_regs *regs)
+irqreturn_t rtl8180_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
diff --git a/drivers/staging/rtl8187se/r8180_hw.h b/drivers/staging/rtl8187se/r8180_hw.h
index 533938123a9..92c05af557c 100644
--- a/drivers/staging/rtl8187se/r8180_hw.h
+++ b/drivers/staging/rtl8187se/r8180_hw.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225.h b/drivers/staging/rtl8187se/r8180_rtl8225.h
index c6f2128e755..c94ca0794a5 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225.h
+++ b/drivers/staging/rtl8187se/r8180_rtl8225.h
@@ -1,7 +1,7 @@
/*
This is part of the rtl8180-sa2400 driver
released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
This files contains programming code for the rtl8225
radio frontend.
diff --git a/drivers/staging/rtl8187se/r8180_rtl8225z2.c b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
index c592f7936dd..9ae96b7852f 100644
--- a/drivers/staging/rtl8187se/r8180_rtl8225z2.c
+++ b/drivers/staging/rtl8187se/r8180_rtl8225z2.c
@@ -1,7 +1,7 @@
/*
* This is part of the rtl8180-sa2400 driver
* released under the GPL (See file COPYING for details).
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* This files contains programming code for the rtl8225
* radio frontend.
diff --git a/drivers/staging/rtl8187se/r8180_wx.c b/drivers/staging/rtl8187se/r8180_wx.c
index 156b7588229..dab787542c4 100644
--- a/drivers/staging/rtl8187se/r8180_wx.c
+++ b/drivers/staging/rtl8187se/r8180_wx.c
@@ -2,7 +2,7 @@
This file contains wireless extension handlers.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part
diff --git a/drivers/staging/rtl8187se/r8180_wx.h b/drivers/staging/rtl8187se/r8180_wx.h
index 40819140311..d471520ac77 100644
--- a/drivers/staging/rtl8187se/r8180_wx.h
+++ b/drivers/staging/rtl8187se/r8180_wx.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver - v 0.3
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
new file mode 100644
index 00000000000..c9c548f1749
--- /dev/null
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -0,0 +1,29 @@
+config R8188EU
+ tristate "Realtek RTL8188EU Wireless LAN NIC driver"
+ depends on WLAN && USB
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ default N
+ ---help---
+ This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
+ If built as a module, it will be called r8188eu.
+
+if R8188EU
+
+config 88EU_AP_MODE
+ bool "Realtek RTL8188EU AP mode"
+ default Y
+ ---help---
+ This option enables Access Point mode. Unless you know that your system
+ will never be used as an AP, or the target system has limited memory,
+ "Y" should be selected.
+
+config 88EU_P2P
+ bool "Realtek RTL8188EU Peer-to-peer mode"
+ default Y
+ ---help---
+ This option enables peer-to-peer mode for the r8188eu driver. Unless you
+ know that peer-to-peer (P2P) mode will never be used, or the target system has
+ limited memory, "Y" should be selected.
+
+endif
diff --git a/drivers/staging/rtl8188eu/Makefile b/drivers/staging/rtl8188eu/Makefile
new file mode 100644
index 00000000000..1639a45da94
--- /dev/null
+++ b/drivers/staging/rtl8188eu/Makefile
@@ -0,0 +1,70 @@
+EXTRA_CFLAGS += -I$(src)/include
+
+r8188eu-y := \
+ core/rtw_ap.o \
+ core/rtw_br_ext.o \
+ core/rtw_cmd.o \
+ core/rtw_debug.o \
+ core/rtw_efuse.o \
+ core/rtw_ieee80211.o \
+ core/rtw_io.o \
+ core/rtw_ioctl_set.o \
+ core/rtw_iol.o \
+ core/rtw_led.o \
+ core/rtw_mlme.o \
+ core/rtw_mlme_ext.o \
+ core/rtw_mp.o \
+ core/rtw_mp_ioctl.o \
+ core/rtw_pwrctrl.o \
+ core/rtw_p2p.o \
+ core/rtw_recv.o \
+ core/rtw_rf.o \
+ core/rtw_security.o \
+ core/rtw_sreset.o \
+ core/rtw_sta_mgt.o \
+ core/rtw_wlan_util.o \
+ core/rtw_xmit.o \
+ hal/HalHWImg8188E_MAC.o \
+ hal/HalHWImg8188E_BB.o \
+ hal/HalHWImg8188E_RF.o \
+ hal/HalPhyRf.o \
+ hal/HalPhyRf_8188e.o \
+ hal/HalPwrSeqCmd.o \
+ hal/Hal8188EFWImg_CE.o \
+ hal/Hal8188EPwrSeq.o \
+ hal/Hal8188ERateAdaptive.o\
+ hal/hal_intf.o \
+ hal/hal_com.o \
+ hal/odm.o \
+ hal/odm_debug.o \
+ hal/odm_interface.o \
+ hal/odm_HWConfig.o \
+ hal/odm_RegConfig8188E.o\
+ hal/odm_RTL8188E.o \
+ hal/rtl8188e_cmd.o \
+ hal/rtl8188e_dm.o \
+ hal/rtl8188e_hal_init.o \
+ hal/rtl8188e_mp.o \
+ hal/rtl8188e_phycfg.o \
+ hal/rtl8188e_rf6052.o \
+ hal/rtl8188e_rxdesc.o \
+ hal/rtl8188e_sreset.o \
+ hal/rtl8188e_xmit.o \
+ hal/rtl8188eu_led.o \
+ hal/rtl8188eu_recv.o \
+ hal/rtl8188eu_xmit.o \
+ hal/usb_halinit.o \
+ hal/usb_ops_linux.o \
+ os_dep/ioctl_linux.o \
+ os_dep/mlme_linux.o \
+ os_dep/os_intfs.o \
+ os_dep/osdep_service.o \
+ os_dep/recv_linux.o \
+ os_dep/rtw_android.o \
+ os_dep/usb_intf.o \
+ os_dep/usb_ops_linux.o \
+ os_dep/xmit_linux.o
+
+obj-$(CONFIG_R8188EU) := r8188eu.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8188eu/TODO b/drivers/staging/rtl8188eu/TODO
new file mode 100644
index 00000000000..e50aa50bdb4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/TODO
@@ -0,0 +1,15 @@
+TODO:
+- find and remove remaining code valid only for 5 HGz. Most of the obvious
+ ones have been removed, but things like channel > 14 still exist.
+- find and remove any code for other chips that is left over
+- convert to external firmware
+- convert any remaining unusual variable types
+- find codes that can use %pM and %Nph formatting
+- checkpatch.pl fixes - most of the remaining ones are lines too long. Many
+ of them will require refactoring
+- merge Realtek's bugfixes and new features into the driver
+- switch to use LIB80211
+- switch to use MAC80211
+
+Please send any patches to Greg Kroah-Hartman <gregkh@linux.com>,
+and Larry Finger <Larry.Finger@lwfinger.net>.
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
new file mode 100644
index 00000000000..2c73823d224
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -0,0 +1,1988 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_AP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <ieee80211.h>
+
+#ifdef CONFIG_88EU_AP_MODE
+
+void init_mlme_ap_info(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+
+ _rtw_spinlock_init(&pmlmepriv->bcn_update_lock);
+
+ /* for ACL */
+ _rtw_init_queue(&pacl_list->acl_node_q);
+
+ start_ap_mode(padapter);
+}
+
+void free_mlme_ap_info(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ rtw_sta_flush(padapter);
+
+ pmlmeinfo->state = _HW_STATE_NOLINK_;
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ /* free bc/mc sta_info */
+ psta = rtw_get_bcmc_stainfo(padapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ _rtw_spinlock_free(&pmlmepriv->bcn_update_lock);
+}
+
+static void update_BCNTIM(struct adapter *padapter)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ unsigned char *pie = pnetwork_mlmeext->IEs;
+
+ /* update TIM IE */
+ if (true) {
+ u8 *p, *dst_ie, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ __le16 tim_bitmap_le;
+ uint offset, tmp_len, tim_ielen, tim_ie_offset, remainder_ielen;
+
+ tim_bitmap_le = cpu_to_le16(pstapriv->tim_bitmap);
+
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, _TIM_IE_, &tim_ielen, pnetwork_mlmeext->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && tim_ielen > 0) {
+ tim_ielen += 2;
+ premainder_ie = p+tim_ielen;
+ tim_ie_offset = (int)(p - pie);
+ remainder_ielen = pnetwork_mlmeext->IELength - tim_ie_offset - tim_ielen;
+ /* append TIM IE from dst_ie offset */
+ dst_ie = p;
+ } else {
+ tim_ielen = 0;
+
+ /* calucate head_len */
+ offset = _FIXED_IE_LENGTH_;
+ offset += pnetwork_mlmeext->Ssid.SsidLength + 2;
+
+ /* get supported rates len */
+ p = rtw_get_ie(pie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &tmp_len, (pnetwork_mlmeext->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL)
+ offset += tmp_len+2;
+
+ /* DS Parameter Set IE, len = 3 */
+ offset += 3;
+
+ premainder_ie = pie + offset;
+
+ remainder_ielen = pnetwork_mlmeext->IELength - offset - tim_ielen;
+
+ /* append TIM IE from offset */
+ dst_ie = pie + offset;
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+ *dst_ie++ = _TIM_IE_;
+
+ if ((pstapriv->tim_bitmap&0xff00) && (pstapriv->tim_bitmap&0x00fc))
+ tim_ielen = 5;
+ else
+ tim_ielen = 4;
+
+ *dst_ie++ = tim_ielen;
+
+ *dst_ie++ = 0;/* DTIM count */
+ *dst_ie++ = 1;/* DTIM peroid */
+
+ if (pstapriv->tim_bitmap&BIT(0))/* for bc/mc frames */
+ *dst_ie++ = BIT(0);/* bitmap ctrl */
+ else
+ *dst_ie++ = 0;
+
+ if (tim_ielen == 4) {
+ *dst_ie++ = *(u8 *)&tim_bitmap_le;
+ } else if (tim_ielen == 5) {
+ memcpy(dst_ie, &tim_bitmap_le, 2);
+ dst_ie += 2;
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+ offset = (uint)(dst_ie - pie);
+ pnetwork_mlmeext->IELength = offset + remainder_ielen;
+ }
+
+ set_tx_beacon_cmd(padapter);
+}
+
+void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index, u8 *data, u8 len)
+{
+ struct ndis_802_11_var_ie *pIE;
+ u8 bmatch = false;
+ u8 *pie = pnetwork->IEs;
+ u8 *p = NULL, *dst_ie = NULL, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ u32 i, offset, ielen = 0, ie_offset, remainder_ielen = 0;
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+
+ if (pIE->ElementID > index) {
+ break;
+ } else if (pIE->ElementID == index) { /* already exist the same IE */
+ p = (u8 *)pIE;
+ ielen = pIE->Length;
+ bmatch = true;
+ break;
+ }
+ p = (u8 *)pIE;
+ ielen = pIE->Length;
+ i += (pIE->Length + 2);
+ }
+
+ if (p != NULL && ielen > 0) {
+ ielen += 2;
+
+ premainder_ie = p+ielen;
+
+ ie_offset = (int)(p - pie);
+
+ remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+
+ if (bmatch)
+ dst_ie = p;
+ else
+ dst_ie = (p+ielen);
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ *dst_ie++ = index;
+ *dst_ie++ = len;
+
+ memcpy(dst_ie, data, len);
+ dst_ie += len;
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork->IELength = offset + remainder_ielen;
+}
+
+void rtw_remove_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork, u8 index)
+{
+ u8 *p, *dst_ie = NULL, *premainder_ie = NULL;
+ u8 *pbackup_remainder_ie = NULL;
+ uint offset, ielen, ie_offset, remainder_ielen = 0;
+ u8 *pie = pnetwork->IEs;
+
+ p = rtw_get_ie(pie + _FIXED_IE_LENGTH_, index, &ielen,
+ pnetwork->IELength - _FIXED_IE_LENGTH_);
+ if (p != NULL && ielen > 0) {
+ ielen += 2;
+
+ premainder_ie = p+ielen;
+
+ ie_offset = (int)(p - pie);
+
+ remainder_ielen = pnetwork->IELength - ie_offset - ielen;
+
+ dst_ie = p;
+ }
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie && premainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ /* copy remainder IE */
+ if (pbackup_remainder_ie) {
+ memcpy(dst_ie, pbackup_remainder_ie, remainder_ielen);
+
+ kfree(pbackup_remainder_ie);
+ }
+
+ offset = (uint)(dst_ie - pie);
+ pnetwork->IELength = offset + remainder_ielen;
+}
+
+static u8 chk_sta_is_alive(struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((psta->sta_stats.last_rx_data_pkts + psta->sta_stats.last_rx_ctrl_pkts) ==
+ (psta->sta_stats.rx_data_pkts + psta->sta_stats.rx_ctrl_pkts))
+ ;
+ else
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void expire_timeout_chk(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ u8 updated = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 chk_alive_num = 0;
+ char chk_alive_list[NUM_STA];
+ int i;
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ phead = &pstapriv->auth_list;
+ plist = get_next(phead);
+
+ /* check auth_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, auth_list);
+ plist = get_next(plist);
+
+ if (psta->expire_to > 0) {
+ psta->expire_to--;
+ if (psta->expire_to == 0) {
+ rtw_list_delete(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+
+ DBG_88E("auth expire %6ph\n",
+ psta->hwaddr);
+
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ }
+ }
+
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ psta = NULL;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ if (chk_sta_is_alive(psta) || !psta->expire_to) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ psta->under_exist_checking = 0;
+ } else {
+ psta->expire_to--;
+ }
+
+ if (psta->expire_to <= 0) {
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (padapter->registrypriv.wifi_spec == 1) {
+ psta->expire_to = pstapriv->expire_to;
+ continue;
+ }
+
+ if (psta->state & WIFI_SLEEP_STATE) {
+ if (!(psta->state & WIFI_STA_ALIVE_CHK_STATE)) {
+ /* to check if alive by another methods if staion is at ps mode. */
+ psta->expire_to = pstapriv->expire_to;
+ psta->state |= WIFI_STA_ALIVE_CHK_STATE;
+
+ /* to update bcn with tim_bitmap for this station */
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+
+ if (!pmlmeext->active_keep_alive_check)
+ continue;
+ }
+ }
+ if (pmlmeext->active_keep_alive_check) {
+ int stainfo_offset;
+
+ stainfo_offset = rtw_stainfo_offset(pstapriv, psta);
+ if (stainfo_offset_valid(stainfo_offset))
+ chk_alive_list[chk_alive_num++] = stainfo_offset;
+ continue;
+ }
+
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ } else {
+ /* TODO: Aging mechanism to digest frames in sleep_q to avoid running out of xmitframe */
+ if (psta->sleepq_len > (NR_XMITFRAME/pstapriv->asoc_list_cnt) &&
+ padapter->xmitpriv.free_xmitframe_cnt < (NR_XMITFRAME/pstapriv->asoc_list_cnt/2)) {
+ DBG_88E("%s sta:%pM, sleepq_len:%u, free_xmitframe_cnt:%u, asoc_list_cnt:%u, clear sleep_q\n", __func__,
+ (psta->hwaddr), psta->sleepq_len,
+ padapter->xmitpriv.free_xmitframe_cnt,
+ pstapriv->asoc_list_cnt);
+ wakeup_sta_to_xmit(padapter, psta);
+ }
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (chk_alive_num) {
+ u8 backup_oper_channel = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch(padapter);
+ SelectChannel(padapter, pmlmeext->cur_channel);
+ }
+
+ /* issue null data to check sta alive*/
+ for (i = 0; i < chk_alive_num; i++) {
+ int ret = _FAIL;
+
+ psta = rtw_get_stainfo_by_offset(pstapriv, chk_alive_list[i]);
+
+ if (psta->state & WIFI_SLEEP_STATE)
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 1, 50);
+ else
+ ret = issue_nulldata(padapter, psta->hwaddr, 0, 3, 50);
+
+ psta->keep_alive_trycnt++;
+ if (ret == _SUCCESS) {
+ DBG_88E("asoc check, sta(%pM) is alive\n", (psta->hwaddr));
+ psta->expire_to = pstapriv->expire_to;
+ psta->keep_alive_trycnt = 0;
+ continue;
+ } else if (psta->keep_alive_trycnt <= 3) {
+ DBG_88E("ack check for asoc expire, keep_alive_trycnt =%d\n", psta->keep_alive_trycnt);
+ psta->expire_to = 1;
+ continue;
+ }
+
+ psta->keep_alive_trycnt = 0;
+
+ DBG_88E("asoc expire %pM, state = 0x%x\n", (psta->hwaddr), psta->state);
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ }
+
+ if (backup_oper_channel > 0) /* back to the original operation channel */
+ SelectChannel(padapter, backup_oper_channel);
+ }
+
+ associated_clients_update(padapter, updated);
+}
+
+void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
+{
+ int i;
+ u8 rf_type;
+ u32 init_rate = 0;
+ unsigned char sta_band = 0, raid, shortGIrate = false;
+ unsigned char limit;
+ unsigned int tx_ra_bitmap = 0;
+ struct ht_priv *psta_ht = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+
+ if (psta)
+ psta_ht = &psta->htpriv;
+ else
+ return;
+
+ if (!(psta->state & _FW_LINKED))
+ return;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i < sizeof(psta->bssrateset); i++) {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ }
+ /* n mode ra_bitmap */
+ if (psta_ht->ht_option) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if (rf_type == RF_2T2R)
+ limit = 16;/* 2R */
+ else
+ limit = 8;/* 1R */
+
+ for (i = 0; i < limit; i++) {
+ if (psta_ht->ht_cap.supp_mcs_set[i/8] & BIT(i%8))
+ tx_ra_bitmap |= BIT(i+12);
+ }
+
+ /* max short GI rate */
+ shortGIrate = psta_ht->sgi;
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* 5G band */
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_5N | WIRELESS_11A;
+ else
+ sta_band |= WIRELESS_11A;
+ } else {
+ if (tx_ra_bitmap & 0xffff000)
+ sta_band |= WIRELESS_11_24N | WIRELESS_11G | WIRELESS_11B;
+ else if (tx_ra_bitmap & 0xff0)
+ sta_band |= WIRELESS_11G | WIRELESS_11B;
+ else
+ sta_band |= WIRELESS_11B;
+ }
+
+ psta->wireless_mode = sta_band;
+
+ raid = networktype_to_raid(sta_band);
+ init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ if (psta->aid < NUM_STA) {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+
+ arg |= BIT(7);/* support entry 2~31 */
+
+ if (shortGIrate)
+ arg |= BIT(5);
+
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+
+ DBG_88E("%s => mac_id:%d , raid:%d , bitmap = 0x%x, arg = 0x%x\n",
+ __func__ , psta->mac_id, raid , tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, rssi_level);
+
+ if (shortGIrate)
+ init_rate |= BIT(6);
+
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ } else {
+ DBG_88E("station aid %d exceed the max number\n", psta->aid);
+ }
+}
+
+static void update_bmc_sta(struct adapter *padapter)
+{
+ unsigned long irqL;
+ u32 init_rate = 0;
+ unsigned char network_type, raid;
+ int i, supportRateNum = 0;
+ unsigned int tx_ra_bitmap = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct sta_info *psta = rtw_get_bcmc_stainfo(padapter);
+
+ if (psta) {
+ psta->aid = 0;/* default set to 0 */
+ psta->mac_id = psta->aid + 1;
+
+ psta->qos_option = 0;
+ psta->htpriv.ht_option = false;
+
+ psta->ieee8021x_blocked = 0;
+
+ _rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ /* prepare for add_RATid */
+ supportRateNum = rtw_get_rateset_len((u8 *)&pcur_network->SupportedRates);
+ network_type = rtw_check_network_type((u8 *)&pcur_network->SupportedRates, supportRateNum, 1);
+
+ memcpy(psta->bssrateset, &pcur_network->SupportedRates, supportRateNum);
+ psta->bssratelen = supportRateNum;
+
+ /* b/g mode ra_bitmap */
+ for (i = 0; i < supportRateNum; i++) {
+ if (psta->bssrateset[i])
+ tx_ra_bitmap |= rtw_get_bit_value_from_ieee_value(psta->bssrateset[i]&0x7f);
+ }
+
+ if (pcur_network->Configuration.DSConfig > 14) {
+ /* force to A mode. 5G doesn't support CCK rates */
+ network_type = WIRELESS_11A;
+ tx_ra_bitmap = 0x150; /* 6, 12, 24 Mbps */
+ } else {
+ /* force to b mode */
+ network_type = WIRELESS_11B;
+ tx_ra_bitmap = 0xf;
+ }
+
+ raid = networktype_to_raid(network_type);
+ init_rate = get_highest_rate_idx(tx_ra_bitmap&0x0fffffff)&0x3f;
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ {
+ u8 arg = 0;
+
+ arg = psta->mac_id&0x1f;
+ arg |= BIT(7);
+ tx_ra_bitmap |= ((raid<<28)&0xf0000000);
+ DBG_88E("update_bmc_sta, mask = 0x%x, arg = 0x%x\n", tx_ra_bitmap, arg);
+
+ /* bitmap[0:27] = tx_rate_bitmap */
+ /* bitmap[28:31]= Rate Adaptive id */
+ /* arg[0:4] = macid */
+ /* arg[5] = Short GI */
+ rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, 0);
+ }
+ /* set ra_id, init_rate */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+
+ rtw_stassoc_hw_rpt(padapter, psta);
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state = _FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+
+ } else {
+ DBG_88E("add_RATid_bmc_sta error!\n");
+ }
+}
+
+/* notes: */
+/* AID: 1~MAX for sta and 0 for bc/mc in ap/adhoc mode */
+/* MAC_ID = AID+1 for sta in ap/adhoc mode */
+/* MAC_ID = 1 for bc/mc for sta/ap/adhoc */
+/* MAC_ID = 0 for bssid for sta/ap/adhoc */
+/* CAM_ID = 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ psta->mac_id = psta->aid+1;
+ DBG_88E("%s\n", __func__);
+
+ /* ap mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->ieee8021x_blocked = true;
+ else
+ psta->ieee8021x_blocked = false;
+
+
+ /* update sta's cap */
+
+ /* ERP */
+ VCS_update(padapter, psta);
+ /* HT related cap */
+ if (phtpriv_sta->ht_option) {
+ /* check if sta supports rx ampdu */
+ phtpriv_sta->ampdu_enable = phtpriv_ap->ampdu_enable;
+
+ /* check if sta support s Short GI */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
+ phtpriv_sta->sgi = true;
+
+ /* bwmode */
+ if ((phtpriv_sta->ht_cap.cap_info & phtpriv_ap->ht_cap.cap_info) & IEEE80211_HT_CAP_SUP_WIDTH) {
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ }
+ psta->qos_option = true;
+ } else {
+ phtpriv_sta->ampdu_enable = false;
+ phtpriv_sta->sgi = false;
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ /* Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* originator */
+ phtpriv_sta->agg_enable_bitmap = 0x0;/* reset */
+ phtpriv_sta->candidate_tid_bitmap = 0x0;/* reset */
+
+ /* todo: init other variables */
+
+ _rtw_memset((void *)&psta->sta_stats, 0, sizeof(struct stainfo_stats));
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state |= _FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+}
+
+static void update_hw_ht_param(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+
+ /* */
+ /* Config SM Power Save setting */
+ /* */
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+}
+
+static void start_bss_network(struct adapter *padapter, u8 *pbuf)
+{
+ u8 *p;
+ u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
+ u16 bcn_interval;
+ u32 acparm;
+ int ie_len;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork_mlmeext = &(pmlmeinfo->network);
+ struct HT_info_element *pht_info = NULL;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ bcn_interval = (u16)pnetwork->Configuration.BeaconPeriod;
+ cur_channel = pnetwork->Configuration.DSConfig;
+ cur_bwmode = HT_CHANNEL_WIDTH_20;
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+
+ /* check if there is wps ie, */
+ /* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
+ /* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
+ if (!rtw_get_wps_ie(pnetwork->IEs+_FIXED_IE_LENGTH_, pnetwork->IELength-_FIXED_IE_LENGTH_, NULL, NULL))
+ pmlmeext->bstart_bss = true;
+
+ /* todo: update wmm, ht cap */
+ if (pmlmepriv->qospriv.qos_option)
+ pmlmeinfo->WMM_enable = true;
+ if (pmlmepriv->htpriv.ht_option) {
+ pmlmeinfo->WMM_enable = true;
+ pmlmeinfo->HT_enable = true;
+
+ update_hw_ht_param(padapter);
+ }
+
+ if (pmlmepriv->cur_network.join_res != true) { /* setting only at first time */
+ /* WEP Key will be set before this function, do not clear CAM. */
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
+ (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
+ flush_all_cam_entry(padapter); /* clear CAM */
+ }
+
+ /* set MSR to AP_Mode */
+ Set_MSR(padapter, _HW_STATE_AP_);
+
+ /* Set BSSID REG */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pnetwork->MacAddress);
+
+ /* Set EDCA param reg */
+ acparm = 0x002F3217; /* VO */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
+ acparm = 0x005E4317; /* VI */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
+ acparm = 0x005ea42b;
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
+ acparm = 0x0000A444; /* BK */
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
+
+ /* Set Security */
+ val8 = (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* Beacon Control related register */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
+
+ UpdateBrateTbl(padapter, pnetwork->SupportedRates);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
+
+ if (!pmlmepriv->cur_network.join_res) { /* setting only at first time */
+ /* turn on all dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+ }
+ /* set channel, bwmode */
+ p = rtw_get_ie((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if (p && ie_len) {
+ pht_info = (struct HT_info_element *)(p+2);
+
+ if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode */
+ cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+ }
+ /* TODO: need to judge the phy parameters on concurrent mode for single phy */
+ set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
+
+ DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
+
+ /* */
+ pmlmeext->cur_channel = cur_channel;
+ pmlmeext->cur_bwmode = cur_bwmode;
+ pmlmeext->cur_ch_offset = cur_ch_offset;
+ pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
+
+ /* update cur_wireless_mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability after cur_wireless_mode updated */
+ update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
+
+ /* let pnetwork_mlmeext == pnetwork_mlme. */
+ memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
+
+#ifdef CONFIG_88EU_P2P
+ memcpy(pwdinfo->p2p_group_ssid, pnetwork->Ssid.Ssid, pnetwork->Ssid.SsidLength);
+ pwdinfo->p2p_group_ssid_len = pnetwork->Ssid.SsidLength;
+#endif /* CONFIG_88EU_P2P */
+
+ if (pmlmeext->bstart_bss) {
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+
+ /* issue beacon frame */
+ if (send_beacon(padapter) == _FAIL)
+ DBG_88E("issue_beacon, fail!\n");
+ }
+
+ /* update bc/mc sta_info */
+ update_bmc_sta(padapter);
+}
+
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
+{
+ int ret = _SUCCESS;
+ u8 *p;
+ u8 *pHT_caps_ie = NULL;
+ u8 *pHT_info_ie = NULL;
+ struct sta_info *psta = NULL;
+ u16 cap, ht_cap = false;
+ uint ie_len = 0;
+ int group_cipher, pairwise_cipher;
+ u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
+ int supportRateNum = 0;
+ u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
+ u8 *ie = pbss_network->IEs;
+
+ /* SSID */
+ /* Supported rates */
+ /* DS Params */
+ /* WLAN_EID_COUNTRY */
+ /* ERP Information element */
+ /* Extended supported rates */
+ /* WPA/WPA2 */
+ /* Wi-Fi Wireless Multimedia Extensions */
+ /* ht_capab, ht_oper */
+ /* WPS IE */
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return _FAIL;
+
+
+ if (len > MAX_IE_SZ)
+ return _FAIL;
+
+ pbss_network->IELength = len;
+
+ _rtw_memset(ie, 0, MAX_IE_SZ);
+
+ memcpy(ie, pbuf, pbss_network->IELength);
+
+
+ if (pbss_network->InfrastructureMode != Ndis802_11APMode)
+ return _FAIL;
+
+ pbss_network->Rssi = 0;
+
+ memcpy(pbss_network->MacAddress, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ /* beacon interval */
+ p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ pbss_network->Configuration.BeaconPeriod = RTW_GET_LE16(p);
+
+ /* capability */
+ cap = RTW_GET_LE16(ie);
+
+ /* SSID */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ _rtw_memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
+ pbss_network->Ssid.SsidLength = ie_len;
+ }
+
+ /* channel */
+ channel = 0;
+ pbss_network->Configuration.Length = 0;
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ channel = *(p + 2);
+
+ pbss_network->Configuration.DSConfig = channel;
+
+ _rtw_memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
+ /* get supported rates */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p != NULL) {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+ }
+
+ /* get ext_supported rates */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
+ if (p != NULL) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+
+ network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
+
+ rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
+
+ /* update privacy/security */
+ if (cap & BIT(4))
+ pbss_network->Privacy = 1;
+ else
+ pbss_network->Privacy = 0;
+
+ psecuritypriv->wpa_psk = 0;
+
+ /* wpa2 */
+ group_cipher = 0;
+ pairwise_cipher = 0;
+ psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ if (rtw_parse_wpa2_ie(p, ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+ psecuritypriv->wpa_psk |= BIT(1);
+
+ psecuritypriv->wpa2_group_cipher = group_cipher;
+ psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
+ }
+ }
+ /* wpa */
+ ie_len = 0;
+ group_cipher = 0;
+ pairwise_cipher = 0;
+ psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
+ psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
+ for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
+ p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if ((p) && (_rtw_memcmp(p+2, OUI1, 4))) {
+ if (rtw_parse_wpa_ie(p, ie_len+2, &group_cipher,
+ &pairwise_cipher, NULL) == _SUCCESS) {
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
+
+ psecuritypriv->wpa_psk |= BIT(0);
+
+ psecuritypriv->wpa_group_cipher = group_cipher;
+ psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
+ }
+ break;
+ }
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+
+ /* wmm */
+ ie_len = 0;
+ pmlmepriv->qospriv.qos_option = 0;
+ if (pregistrypriv->wmm_enable) {
+ for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
+ p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if ((p) && _rtw_memcmp(p+2, WMM_PARA_IE, 6)) {
+ pmlmepriv->qospriv.qos_option = 1;
+
+ *(p+8) |= BIT(7);/* QoS Info, support U-APSD */
+
+ /* disable all ACM bits since the WMM admission control is not supported */
+ *(p + 10) &= ~BIT(4); /* BE */
+ *(p + 14) &= ~BIT(4); /* BK */
+ *(p + 18) &= ~BIT(4); /* VI */
+ *(p + 22) &= ~BIT(4); /* VO */
+ break;
+ }
+
+ if ((p == NULL) || (ie_len == 0))
+ break;
+ }
+ }
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0) {
+ u8 rf_type;
+ struct rtw_ieee80211_ht_cap *pht_cap = (struct rtw_ieee80211_ht_cap *)(p+2);
+
+ pHT_caps_ie = p;
+ ht_cap = true;
+ network_type |= WIRELESS_11_24N;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
+ (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ else
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+
+ /* set Max Rx AMPDU size to 64K */
+ pht_cap->ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03);
+
+ if (rf_type == RF_1T1R) {
+ pht_cap->supp_mcs_set[0] = 0xff;
+ pht_cap->supp_mcs_set[1] = 0x0;
+ }
+ memcpy(&pmlmepriv->htpriv.ht_cap, p+2, ie_len);
+ }
+
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
+ (pbss_network->IELength - _BEACON_IE_OFFSET_));
+ if (p && ie_len > 0)
+ pHT_info_ie = p;
+ switch (network_type) {
+ case WIRELESS_11B:
+ pbss_network->NetworkTypeInUse = Ndis802_11DS;
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ case WIRELESS_11A:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM5;
+ break;
+ default:
+ pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
+ break;
+ }
+
+ pmlmepriv->cur_network.network_type = network_type;
+
+ pmlmepriv->htpriv.ht_option = false;
+
+ if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
+ (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
+ /* todo: */
+ /* ht_cap = false; */
+ }
+
+ /* ht_cap */
+ if (pregistrypriv->ht_enable && ht_cap) {
+ pmlmepriv->htpriv.ht_option = true;
+ pmlmepriv->qospriv.qos_option = 1;
+
+ if (pregistrypriv->ampdu_enable == 1)
+ pmlmepriv->htpriv.ampdu_enable = true;
+ HT_caps_handler(padapter, (struct ndis_802_11_var_ie *)pHT_caps_ie);
+
+ HT_info_handler(padapter, (struct ndis_802_11_var_ie *)pHT_info_ie);
+ }
+
+ pbss_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pbss_network);
+
+ /* issue beacon to start bss network */
+ start_bss_network(padapter, (u8 *)pbss_network);
+
+ /* alloc sta_info for ap itself */
+ psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->MacAddress);
+ if (psta == NULL)
+ return _FAIL;
+ }
+
+ pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
+ return ret;
+}
+
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ DBG_88E("%s, mode =%d\n", __func__, mode);
+
+ pacl_list->mode = mode;
+}
+
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ u8 added = false;
+ int i, ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
+
+ if ((NUM_ACL-1) < pacl_list->num)
+ return -1;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ added = true;
+ DBG_88E("%s, sta has been added\n", __func__);
+ break;
+ }
+ }
+ }
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ if (added)
+ return ret;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ for (i = 0; i < NUM_ACL; i++) {
+ paclnode = &pacl_list->aclnode[i];
+
+ if (!paclnode->valid) {
+ _rtw_init_listhead(&paclnode->list);
+
+ memcpy(paclnode->addr, addr, ETH_ALEN);
+
+ paclnode->valid = true;
+
+ rtw_list_insert_tail(&paclnode->list, get_list_head(pacl_node_q));
+
+ pacl_list->num++;
+
+ break;
+ }
+ }
+
+ DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ return ret;
+}
+
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ int ret = 0;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ paclnode->valid = false;
+
+ rtw_list_delete(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ }
+
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
+ return ret;
+}
+
+static void update_bcn_fixed_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_erpinfo_ie(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *p, *ie = pnetwork->IEs;
+ u32 len = 0;
+
+ DBG_88E("%s, ERP_enable =%d\n", __func__, pmlmeinfo->ERP_enable);
+
+ if (!pmlmeinfo->ERP_enable)
+ return;
+
+ /* parsing ERP_IE */
+ p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &len,
+ (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (p && len > 0) {
+ struct ndis_802_11_var_ie *pIE = (struct ndis_802_11_var_ie *)p;
+
+ if (pmlmepriv->num_sta_non_erp == 1)
+ pIE->data[0] |= RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION;
+ else
+ pIE->data[0] &= ~(RTW_ERP_INFO_NON_ERP_PRESENT|RTW_ERP_INFO_USE_PROTECTION);
+
+ if (pmlmepriv->num_sta_no_short_preamble > 0)
+ pIE->data[0] |= RTW_ERP_INFO_BARKER_PREAMBLE_MODE;
+ else
+ pIE->data[0] &= ~(RTW_ERP_INFO_BARKER_PREAMBLE_MODE);
+
+ ERP_IE_handler(padapter, pIE);
+ }
+}
+
+static void update_bcn_htcap_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_htinfo_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_rsn_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wpa_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wmm_ie(struct adapter *padapter)
+{
+ DBG_88E("%s\n", __func__);
+}
+
+static void update_bcn_wps_ie(struct adapter *padapter)
+{
+ u8 *pwps_ie = NULL, *pwps_ie_src;
+ u8 *premainder_ie, *pbackup_remainder_ie = NULL;
+ uint wps_ielen = 0, wps_offset, remainder_ielen;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ unsigned char *ie = pnetwork->IEs;
+ u32 ielen = pnetwork->IELength;
+
+ DBG_88E("%s\n", __func__);
+
+ pwps_ie = rtw_get_wps_ie(ie+_FIXED_IE_LENGTH_, ielen-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ if (pwps_ie == NULL || wps_ielen == 0)
+ return;
+
+ wps_offset = (uint)(pwps_ie-ie);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = ielen - wps_offset - wps_ielen;
+
+ if (remainder_ielen > 0) {
+ pbackup_remainder_ie = rtw_malloc(remainder_ielen);
+ if (pbackup_remainder_ie)
+ memcpy(pbackup_remainder_ie, premainder_ie, remainder_ielen);
+ }
+
+ pwps_ie_src = pmlmepriv->wps_beacon_ie;
+ if (pwps_ie_src == NULL)
+ return;
+
+ wps_ielen = (uint)pwps_ie_src[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pwps_ie, pwps_ie_src, wps_ielen+2);
+ pwps_ie += (wps_ielen+2);
+
+ if (pbackup_remainder_ie)
+ memcpy(pwps_ie, pbackup_remainder_ie, remainder_ielen);
+
+ /* update IELength */
+ pnetwork->IELength = wps_offset + (wps_ielen+2) + remainder_ielen;
+ }
+
+ if (pbackup_remainder_ie)
+ kfree(pbackup_remainder_ie);
+}
+
+static void update_bcn_p2p_ie(struct adapter *padapter)
+{
+}
+
+static void update_bcn_vendor_spec_ie(struct adapter *padapter, u8 *oui)
+{
+ DBG_88E("%s\n", __func__);
+
+ if (_rtw_memcmp(RTW_WPA_OUI, oui, 4))
+ update_bcn_wpa_ie(padapter);
+ else if (_rtw_memcmp(WMM_OUI, oui, 4))
+ update_bcn_wmm_ie(padapter);
+ else if (_rtw_memcmp(WPS_OUI, oui, 4))
+ update_bcn_wps_ie(padapter);
+ else if (_rtw_memcmp(P2P_OUI, oui, 4))
+ update_bcn_p2p_ie(padapter);
+ else
+ DBG_88E("unknown OUI type!\n");
+}
+
+void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv;
+ struct mlme_ext_priv *pmlmeext;
+
+ if (!padapter)
+ return;
+
+ pmlmepriv = &(padapter->mlmepriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!pmlmeext->bstart_bss)
+ return;
+
+ _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+
+ switch (ie_id) {
+ case 0xFF:
+ update_bcn_fixed_ie(padapter);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
+ break;
+ case _TIM_IE_:
+ update_BCNTIM(padapter);
+ break;
+ case _ERPINFO_IE_:
+ update_bcn_erpinfo_ie(padapter);
+ break;
+ case _HT_CAPABILITY_IE_:
+ update_bcn_htcap_ie(padapter);
+ break;
+ case _RSN_IE_2_:
+ update_bcn_rsn_ie(padapter);
+ break;
+ case _HT_ADD_INFO_IE_:
+ update_bcn_htinfo_ie(padapter);
+ break;
+ case _VENDOR_SPECIFIC_IE_:
+ update_bcn_vendor_spec_ie(padapter, oui);
+ break;
+ default:
+ break;
+ }
+
+ pmlmepriv->update_bcn = true;
+
+ _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+
+ if (tx)
+ set_tx_beacon_cmd(padapter);
+}
+
+/*
+op_mode
+Set to 0 (HT pure) under the followign conditions
+ - all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
+ - all STAs in the BSS are 20 MHz HT in 20 MHz BSS
+Set to 1 (HT non-member protection) if there may be non-HT STAs
+ in both the primary and the secondary channel
+Set to 2 if only HT STAs are associated in BSS,
+ however and at least one 20 MHz HT STA is associated
+Set to 3 (HT mixed mode) when one or more non-HT STAs are associated
+ (currently non-GF HT station is considered as non-HT STA also)
+*/
+static int rtw_ht_operation_update(struct adapter *padapter)
+{
+ u16 cur_op_mode, new_op_mode;
+ int op_mode_changes = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct ht_priv *phtpriv_ap = &pmlmepriv->htpriv;
+
+ if (pmlmepriv->htpriv.ht_option)
+ return 0;
+
+ DBG_88E("%s current operation mode = 0x%X\n",
+ __func__, pmlmepriv->ht_op_mode);
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf) {
+ pmlmepriv->ht_op_mode |=
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT) &&
+ pmlmepriv->num_sta_ht_no_gf == 0) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT;
+ op_mode_changes++;
+ }
+
+ if (!(pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht || pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode |= HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ } else if ((pmlmepriv->ht_op_mode &
+ HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT) &&
+ (pmlmepriv->num_sta_no_ht == 0 && !pmlmepriv->olbc_ht)) {
+ pmlmepriv->ht_op_mode &=
+ ~HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT;
+ op_mode_changes++;
+ }
+
+ /* Note: currently we switch to the MIXED op mode if HT non-greenfield
+ * station is associated. Probably it's a theoretical case, since
+ * it looks like all known HT STAs support greenfield.
+ */
+ new_op_mode = 0;
+ if (pmlmepriv->num_sta_no_ht ||
+ (pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT))
+ new_op_mode = OP_MODE_MIXED;
+ else if ((phtpriv_ap->ht_cap.cap_info & IEEE80211_HT_CAP_SUP_WIDTH) &&
+ pmlmepriv->num_sta_ht_20mhz)
+ new_op_mode = OP_MODE_20MHZ_HT_STA_ASSOCED;
+ else if (pmlmepriv->olbc_ht)
+ new_op_mode = OP_MODE_MAY_BE_LEGACY_STAS;
+ else
+ new_op_mode = OP_MODE_PURE;
+
+ cur_op_mode = pmlmepriv->ht_op_mode & HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ if (cur_op_mode != new_op_mode) {
+ pmlmepriv->ht_op_mode &= ~HT_INFO_OPERATION_MODE_OP_MODE_MASK;
+ pmlmepriv->ht_op_mode |= new_op_mode;
+ op_mode_changes++;
+ }
+
+ DBG_88E("%s new operation mode = 0x%X changes =%d\n",
+ __func__, pmlmepriv->ht_op_mode, op_mode_changes);
+
+ return op_mode_changes;
+}
+
+void associated_clients_update(struct adapter *padapter, u8 updated)
+{
+ /* update associcated stations cap. */
+ if (updated) {
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* check asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ VCS_update(padapter, psta);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ }
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!(psta->flags & WLAN_STA_SHORT_PREAMBLE)) {
+ if (!psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 1;
+
+ pmlmepriv->num_sta_no_short_preamble++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 1)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ } else {
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+
+ pmlmepriv->num_sta_no_short_preamble--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_preamble == 0)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_NONERP) {
+ if (!psta->nonerp_set) {
+ psta->nonerp_set = 1;
+
+ pmlmepriv->num_sta_non_erp++;
+
+ if (pmlmepriv->num_sta_non_erp == 1) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+ } else {
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+
+ pmlmepriv->num_sta_non_erp--;
+
+ if (pmlmepriv->num_sta_non_erp == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+ }
+
+ if (!(psta->capability & WLAN_CAPABILITY_SHORT_SLOT)) {
+ if (!psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 1;
+
+ pmlmepriv->num_sta_no_short_slot_time++;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 1)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ } else {
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time--;
+
+ if ((pmlmeext->cur_wireless_mode > WIRELESS_11B) &&
+ (pmlmepriv->num_sta_no_short_slot_time == 0)) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+ }
+
+ if (psta->flags & WLAN_STA_HT) {
+ u16 ht_capab = psta->htpriv.ht_cap.cap_info;
+
+ DBG_88E("HT: STA %pM HT Capabilities Info: 0x%04x\n",
+ (psta->hwaddr), ht_capab);
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_GRN_FLD) == 0) {
+ if (!psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 1;
+ pmlmepriv->num_sta_ht_no_gf++;
+ }
+ DBG_88E("%s STA %pM - no greenfield, num of non-gf stations %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_ht_no_gf);
+ }
+
+ if ((ht_capab & IEEE80211_HT_CAP_SUP_WIDTH) == 0) {
+ if (!psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 1;
+ pmlmepriv->num_sta_ht_20mhz++;
+ }
+ DBG_88E("%s STA %pM - 20 MHz HT, num of 20MHz HT STAs %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_ht_20mhz);
+ }
+ } else {
+ if (!psta->no_ht_set) {
+ psta->no_ht_set = 1;
+ pmlmepriv->num_sta_no_ht++;
+ }
+ if (pmlmepriv->htpriv.ht_option) {
+ DBG_88E("%s STA %pM - no HT, num of non-HT stations %d\n",
+ __func__, (psta->hwaddr),
+ pmlmepriv->num_sta_no_ht);
+ }
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+ associated_clients_update(padapter, beacon_updated);
+
+ DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
+}
+
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 beacon_updated = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ if (!psta)
+ return beacon_updated;
+
+ if (psta->no_short_preamble_set) {
+ psta->no_short_preamble_set = 0;
+ pmlmepriv->num_sta_no_short_preamble--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
+ pmlmepriv->num_sta_no_short_preamble == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->nonerp_set) {
+ psta->nonerp_set = 0;
+ pmlmepriv->num_sta_non_erp--;
+ if (pmlmepriv->num_sta_non_erp == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, _ERPINFO_IE_, NULL, true);
+ }
+ }
+
+ if (psta->no_short_slot_time_set) {
+ psta->no_short_slot_time_set = 0;
+ pmlmepriv->num_sta_no_short_slot_time--;
+ if (pmlmeext->cur_wireless_mode > WIRELESS_11B &&
+ pmlmepriv->num_sta_no_short_slot_time == 0) {
+ beacon_updated = true;
+ update_beacon(padapter, 0xFF, NULL, true);
+ }
+ }
+
+ if (psta->no_ht_gf_set) {
+ psta->no_ht_gf_set = 0;
+ pmlmepriv->num_sta_ht_no_gf--;
+ }
+
+ if (psta->no_ht_set) {
+ psta->no_ht_set = 0;
+ pmlmepriv->num_sta_no_ht--;
+ }
+
+ if (psta->ht_20mhz_set) {
+ psta->ht_20mhz_set = 0;
+ pmlmepriv->num_sta_ht_20mhz--;
+ }
+
+ if (rtw_ht_operation_update(padapter) > 0) {
+ update_beacon(padapter, _HT_CAPABILITY_IE_, NULL, false);
+ update_beacon(padapter, _HT_ADD_INFO_IE_, NULL, true);
+ }
+
+ /* update associcated stations cap. */
+
+ DBG_88E("%s, updated =%d\n", __func__, beacon_updated);
+
+ return beacon_updated;
+}
+
+u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
+ bool active, u16 reason)
+{
+ unsigned long irqL;
+ u8 beacon_updated = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (!psta)
+ return beacon_updated;
+
+ /* tear down Rx AMPDU */
+ send_delba(padapter, 0, psta->hwaddr);/* recipient */
+
+ /* tear down TX AMPDU */
+ send_delba(padapter, 1, psta->hwaddr);/* originator */
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ if (active)
+ issue_deauth(padapter, psta->hwaddr, reason);
+
+ /* clear cam entry / key */
+ rtw_clearstakey_cmd(padapter, (u8 *)psta, (u8)(psta->mac_id + 3), true);
+
+
+ _enter_critical_bh(&psta->lock, &irqL);
+ psta->state &= ~_FW_LINKED;
+ _exit_critical_bh(&psta->lock, &irqL);
+
+ rtw_indicate_sta_disassoc_event(padapter, psta);
+
+ report_del_sta_event(padapter, psta->hwaddr, reason);
+
+ beacon_updated = bss_cap_update_on_sta_leave(padapter, psta);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ return beacon_updated;
+}
+
+int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* for each sta in asoc_queue */
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+ plist = get_next(plist);
+
+ issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
+ psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
+
+ return ret;
+}
+
+int rtw_sta_flush(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return ret;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+
+ ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+
+ issue_deauth(padapter, bc_addr, WLAN_REASON_DEAUTH_LEAVING);
+
+ associated_clients_update(padapter, true);
+
+ return ret;
+}
+
+/* called > TSR LEVEL for USB or SDIO Interface*/
+void sta_info_update(struct adapter *padapter, struct sta_info *psta)
+{
+ int flags = psta->flags;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ /* update wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* update 802.11n ht cap. */
+ if (WLAN_STA_HT&flags) {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ } else {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (!pmlmepriv->htpriv.ht_option)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+}
+
+/* called >= TSR LEVEL for USB or SDIO Interface*/
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
+{
+ if (psta->state & _FW_LINKED) {
+ /* add ratid */
+ add_RATid(padapter, psta, 0);/* DM_RATR_STA_INIT */
+ }
+}
+
+void start_ap_mode(struct adapter *padapter)
+{
+ int i;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+
+ pmlmepriv->update_bcn = false;
+
+ pmlmeext->bstart_bss = false;
+
+ pmlmepriv->num_sta_non_erp = 0;
+
+ pmlmepriv->num_sta_no_short_slot_time = 0;
+
+ pmlmepriv->num_sta_no_short_preamble = 0;
+
+ pmlmepriv->num_sta_ht_no_gf = 0;
+ pmlmepriv->num_sta_no_ht = 0;
+ pmlmepriv->num_sta_ht_20mhz = 0;
+
+ pmlmepriv->olbc = false;
+
+ pmlmepriv->olbc_ht = false;
+
+ pmlmepriv->ht_op_mode = 0;
+
+ for (i = 0; i < NUM_STA; i++)
+ pstapriv->sta_aid[i] = NULL;
+
+ pmlmepriv->wps_beacon_ie = NULL;
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+
+ pmlmepriv->p2p_beacon_ie = NULL;
+ pmlmepriv->p2p_probe_resp_ie = NULL;
+
+ /* for ACL */
+ _rtw_init_listhead(&(pacl_list->acl_node_q.queue));
+ pacl_list->num = 0;
+ pacl_list->mode = 0;
+ for (i = 0; i < NUM_ACL; i++) {
+ _rtw_init_listhead(&pacl_list->aclnode[i].list);
+ pacl_list->aclnode[i].valid = false;
+ }
+}
+
+void stop_ap_mode(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ struct rtw_wlan_acl_node *paclnode;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ pmlmepriv->update_bcn = false;
+ pmlmeext->bstart_bss = false;
+
+ /* reset and init security priv , this can refine with rtw_reset_securitypriv */
+ _rtw_memset((unsigned char *)&padapter->securitypriv, 0, sizeof(struct security_priv));
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* for ACL */
+ _enter_critical_bh(&(pacl_node_q->lock), &irqL);
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (paclnode->valid) {
+ paclnode->valid = false;
+
+ rtw_list_delete(&paclnode->list);
+
+ pacl_list->num--;
+ }
+ }
+ _exit_critical_bh(&(pacl_node_q->lock), &irqL);
+
+ DBG_88E("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+
+ rtw_sta_flush(padapter);
+
+ /* free_assoc_sta_resources */
+ rtw_free_all_stainfo(padapter);
+
+ psta = rtw_get_bcmc_stainfo(padapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+ rtw_free_stainfo(padapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL);
+
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+}
+
+#endif /* CONFIG_88EU_AP_MODE */
diff --git a/drivers/staging/rtl8188eu/core/rtw_br_ext.c b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
new file mode 100644
index 00000000000..fbca394cf4f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_br_ext.c
@@ -0,0 +1,1199 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_BR_EXT_C_
+
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include <net/ipx.h>
+#include <linux/atalk.h>
+#include <linux/udp.h>
+#include <linux/if_pppox.h>
+
+#include <drv_types.h>
+#include "rtw_br_ext.h"
+#include <usb_osintf.h>
+#include <recv_osdep.h>
+
+#ifndef csum_ipv6_magic
+#include <net/ip6_checksum.h>
+#endif
+
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <net/ndisc.h>
+#include <net/checksum.h>
+
+#define NAT25_IPV4 01
+#define NAT25_IPV6 02
+#define NAT25_IPX 03
+#define NAT25_APPLE 04
+#define NAT25_PPPOE 05
+
+#define RTL_RELAY_TAG_LEN (ETH_ALEN)
+#define TAG_HDR_LEN 4
+
+#define MAGIC_CODE 0x8186
+#define MAGIC_CODE_LEN 2
+#define WAIT_TIME_PPPOE 5 /* waiting time for pppoe server in sec */
+
+/*-----------------------------------------------------------------
+ How database records network address:
+ 0 1 2 3 4 5 6 7 8 9 10
+ |----|----|----|----|----|----|----|----|----|----|----|
+ IPv4 |type| | IP addr |
+ IPX |type| Net addr | Node addr |
+ IPX |type| Net addr |Sckt addr|
+ Apple |type| Network |node|
+ PPPoE |type| SID | AC MAC |
+-----------------------------------------------------------------*/
+
+
+/* Find a tag in pppoe frame and return the pointer */
+static inline unsigned char *__nat25_find_pppoe_tag(struct pppoe_hdr *ph, unsigned short type)
+{
+ unsigned char *cur_ptr, *start_ptr;
+ unsigned short tagLen, tagType;
+
+ start_ptr = cur_ptr = (unsigned char *)ph->tag;
+ while ((cur_ptr - start_ptr) < ntohs(ph->length)) {
+ /* prevent un-alignment access */
+ tagType = (unsigned short)((cur_ptr[0] << 8) + cur_ptr[1]);
+ tagLen = (unsigned short)((cur_ptr[2] << 8) + cur_ptr[3]);
+ if (tagType == type)
+ return cur_ptr;
+ cur_ptr = cur_ptr + TAG_HDR_LEN + tagLen;
+ }
+ return NULL;
+}
+
+
+static inline int __nat25_add_pppoe_tag(struct sk_buff *skb, struct pppoe_tag *tag)
+{
+ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
+ int data_len;
+
+ data_len = tag->tag_len + TAG_HDR_LEN;
+ if (skb_tailroom(skb) < data_len) {
+ _DEBUG_ERR("skb_tailroom() failed in add SID tag!\n");
+ return -1;
+ }
+
+ skb_put(skb, data_len);
+ /* have a room for new tag */
+ memmove(((unsigned char *)ph->tag + data_len), (unsigned char *)ph->tag, ntohs(ph->length));
+ ph->length = htons(ntohs(ph->length) + data_len);
+ memcpy((unsigned char *)ph->tag, tag, data_len);
+ return data_len;
+}
+
+static int skb_pull_and_merge(struct sk_buff *skb, unsigned char *src, int len)
+{
+ int tail_len;
+ unsigned long end, tail;
+
+ if ((src+len) > skb_tail_pointer(skb) || skb->len < len)
+ return -1;
+
+ tail = (unsigned long)skb_tail_pointer(skb);
+ end = (unsigned long)src+len;
+ if (tail < end)
+ return -1;
+
+ tail_len = (int)(tail-end);
+ if (tail_len > 0)
+ memmove(src, src+len, tail_len);
+
+ skb_trim(skb, skb->len-len);
+ return 0;
+}
+
+static inline unsigned long __nat25_timeout(struct adapter *priv)
+{
+ unsigned long timeout;
+
+ timeout = jiffies - NAT25_AGEING_TIME*HZ;
+
+ return timeout;
+}
+
+
+static inline int __nat25_has_expired(struct adapter *priv,
+ struct nat25_network_db_entry *fdb)
+{
+ if (time_before_eq(fdb->ageing_timer, __nat25_timeout(priv)))
+ return 1;
+
+ return 0;
+}
+
+
+static inline void __nat25_generate_ipv4_network_addr(unsigned char *networkAddr,
+ unsigned int *ipAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPV4;
+ memcpy(networkAddr+7, (unsigned char *)ipAddr, 4);
+}
+
+
+static inline void __nat25_generate_ipx_network_addr_with_node(unsigned char *networkAddr,
+ unsigned int *ipxNetAddr, unsigned char *ipxNodeAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPX;
+ memcpy(networkAddr+1, (unsigned char *)ipxNetAddr, 4);
+ memcpy(networkAddr+5, ipxNodeAddr, 6);
+}
+
+
+static inline void __nat25_generate_ipx_network_addr_with_socket(unsigned char *networkAddr,
+ unsigned int *ipxNetAddr, unsigned short *ipxSocketAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPX;
+ memcpy(networkAddr+1, (unsigned char *)ipxNetAddr, 4);
+ memcpy(networkAddr+5, (unsigned char *)ipxSocketAddr, 2);
+}
+
+
+static inline void __nat25_generate_apple_network_addr(unsigned char *networkAddr,
+ unsigned short *network, unsigned char *node)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_APPLE;
+ memcpy(networkAddr+1, (unsigned char *)network, 2);
+ networkAddr[3] = *node;
+}
+
+static inline void __nat25_generate_pppoe_network_addr(unsigned char *networkAddr,
+ unsigned char *ac_mac, unsigned short *sid)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_PPPOE;
+ memcpy(networkAddr+1, (unsigned char *)sid, 2);
+ memcpy(networkAddr+3, (unsigned char *)ac_mac, 6);
+}
+
+static void __nat25_generate_ipv6_network_addr(unsigned char *networkAddr,
+ unsigned int *ipAddr)
+{
+ memset(networkAddr, 0, MAX_NETWORK_ADDR_LEN);
+
+ networkAddr[0] = NAT25_IPV6;
+ memcpy(networkAddr+1, (unsigned char *)ipAddr, 16);
+}
+
+static unsigned char *scan_tlv(unsigned char *data, int len, unsigned char tag, unsigned char len8b)
+{
+ while (len > 0) {
+ if (*data == tag && *(data+1) == len8b && len >= len8b*8)
+ return data+2;
+
+ len -= (*(data+1))*8;
+ data += (*(data+1))*8;
+ }
+ return NULL;
+}
+
+static int update_nd_link_layer_addr(unsigned char *data, int len, unsigned char *replace_mac)
+{
+ struct icmp6hdr *icmphdr = (struct icmp6hdr *)data;
+ unsigned char *mac;
+
+ if (icmphdr->icmp6_type == NDISC_ROUTER_SOLICITATION) {
+ if (len >= 8) {
+ mac = scan_tlv(&data[8], len-8, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Router Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_ROUTER_ADVERTISEMENT) {
+ if (len >= 16) {
+ mac = scan_tlv(&data[16], len-16, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Router Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ if (len >= 24) {
+ mac = scan_tlv(&data[24], len-24, 1, 1);
+ if (mac) {
+ _DEBUG_INFO("Neighbor Solicitation, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
+ if (len >= 24) {
+ mac = scan_tlv(&data[24], len-24, 2, 1);
+ if (mac) {
+ _DEBUG_INFO("Neighbor Advertisement, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ } else if (icmphdr->icmp6_type == NDISC_REDIRECT) {
+ if (len >= 40) {
+ mac = scan_tlv(&data[40], len-40, 2, 1);
+ if (mac) {
+ _DEBUG_INFO("Redirect, replace MAC From: %02x:%02x:%02x:%02x:%02x:%02x, To: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5],
+ replace_mac[0], replace_mac[1], replace_mac[2], replace_mac[3], replace_mac[4], replace_mac[5]);
+ memcpy(mac, replace_mac, 6);
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+static inline int __nat25_network_hash(unsigned char *networkAddr)
+{
+ if (networkAddr[0] == NAT25_IPV4) {
+ unsigned long x;
+
+ x = networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_IPX) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
+ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_APPLE) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_PPPOE) {
+ unsigned long x;
+
+ x = networkAddr[0] ^ networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else if (networkAddr[0] == NAT25_IPV6) {
+ unsigned long x;
+
+ x = networkAddr[1] ^ networkAddr[2] ^ networkAddr[3] ^ networkAddr[4] ^ networkAddr[5] ^
+ networkAddr[6] ^ networkAddr[7] ^ networkAddr[8] ^ networkAddr[9] ^ networkAddr[10] ^
+ networkAddr[11] ^ networkAddr[12] ^ networkAddr[13] ^ networkAddr[14] ^ networkAddr[15] ^
+ networkAddr[16];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ } else {
+ unsigned long x = 0;
+ int i;
+
+ for (i = 0; i < MAX_NETWORK_ADDR_LEN; i++)
+ x ^= networkAddr[i];
+
+ return x & (NAT25_HASH_SIZE - 1);
+ }
+}
+
+static inline void __network_hash_link(struct adapter *priv,
+ struct nat25_network_db_entry *ent, int hash)
+{
+ /* Caller must _enter_critical_bh already! */
+ ent->next_hash = priv->nethash[hash];
+ if (ent->next_hash != NULL)
+ ent->next_hash->pprev_hash = &ent->next_hash;
+ priv->nethash[hash] = ent;
+ ent->pprev_hash = &priv->nethash[hash];
+}
+
+static inline void __network_hash_unlink(struct nat25_network_db_entry *ent)
+{
+ /* Caller must _enter_critical_bh already! */
+ *(ent->pprev_hash) = ent->next_hash;
+ if (ent->next_hash != NULL)
+ ent->next_hash->pprev_hash = ent->pprev_hash;
+ ent->next_hash = NULL;
+ ent->pprev_hash = NULL;
+}
+
+static int __nat25_db_network_lookup_and_replace(struct adapter *priv,
+ struct sk_buff *skb, unsigned char *networkAddr)
+{
+ struct nat25_network_db_entry *db;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ db = priv->nethash[__nat25_network_hash(networkAddr)];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ if (!__nat25_has_expired(priv, db)) {
+ /* replace the destination mac address */
+ memcpy(skb->data, db->macAddr, ETH_ALEN);
+ atomic_inc(&db->use_count);
+
+ DEBUG_INFO("NAT25: Lookup M:%02x%02x%02x%02x%02x%02x N:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x"
+ "%02x%02x%02x%02x%02x%02x\n",
+ db->macAddr[0],
+ db->macAddr[1],
+ db->macAddr[2],
+ db->macAddr[3],
+ db->macAddr[4],
+ db->macAddr[5],
+ db->networkAddr[0],
+ db->networkAddr[1],
+ db->networkAddr[2],
+ db->networkAddr[3],
+ db->networkAddr[4],
+ db->networkAddr[5],
+ db->networkAddr[6],
+ db->networkAddr[7],
+ db->networkAddr[8],
+ db->networkAddr[9],
+ db->networkAddr[10],
+ db->networkAddr[11],
+ db->networkAddr[12],
+ db->networkAddr[13],
+ db->networkAddr[14],
+ db->networkAddr[15],
+ db->networkAddr[16]);
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return 1;
+ }
+ db = db->next_hash;
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return 0;
+}
+
+static void __nat25_db_network_insert(struct adapter *priv,
+ unsigned char *macAddr, unsigned char *networkAddr)
+{
+ struct nat25_network_db_entry *db;
+ int hash;
+ unsigned long irqL;
+
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ hash = __nat25_network_hash(networkAddr);
+ db = priv->nethash[hash];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ memcpy(db->macAddr, macAddr, ETH_ALEN);
+ db->ageing_timer = jiffies;
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return;
+ }
+ db = db->next_hash;
+ }
+ db = (struct nat25_network_db_entry *) rtw_malloc(sizeof(*db));
+ if (db == NULL) {
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ return;
+ }
+ memcpy(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN);
+ memcpy(db->macAddr, macAddr, ETH_ALEN);
+ atomic_set(&db->use_count, 1);
+ db->ageing_timer = jiffies;
+
+ __network_hash_link(priv, db, hash);
+
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+static void __nat25_db_print(struct adapter *priv)
+{
+}
+
+/*
+ * NAT2.5 interface
+ */
+
+void nat25_db_cleanup(struct adapter *priv)
+{
+ int i;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ for (i = 0; i < NAT25_HASH_SIZE; i++) {
+ struct nat25_network_db_entry *f;
+ f = priv->nethash[i];
+ while (f != NULL) {
+ struct nat25_network_db_entry *g;
+
+ g = f->next_hash;
+ if (priv->scdb_entry == f) {
+ memset(priv->scdb_mac, 0, ETH_ALEN);
+ memset(priv->scdb_ip, 0, 4);
+ priv->scdb_entry = NULL;
+ }
+ __network_hash_unlink(f);
+ kfree(f);
+ f = g;
+ }
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+void nat25_db_expire(struct adapter *priv)
+{
+ int i;
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+
+ for (i = 0; i < NAT25_HASH_SIZE; i++) {
+ struct nat25_network_db_entry *f;
+ f = priv->nethash[i];
+
+ while (f != NULL) {
+ struct nat25_network_db_entry *g;
+ g = f->next_hash;
+
+ if (__nat25_has_expired(priv, f)) {
+ if (atomic_dec_and_test(&f->use_count)) {
+ if (priv->scdb_entry == f) {
+ memset(priv->scdb_mac, 0, ETH_ALEN);
+ memset(priv->scdb_ip, 0, 4);
+ priv->scdb_entry = NULL;
+ }
+ __network_hash_unlink(f);
+ kfree(f);
+ }
+ }
+ f = g;
+ }
+ }
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+}
+
+int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method)
+{
+ unsigned short protocol;
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+ unsigned int tmp;
+
+ if (skb == NULL)
+ return -1;
+
+ if ((method <= NAT25_MIN) || (method >= NAT25_MAX))
+ return -1;
+
+ protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN)));
+
+ /*---------------------------------------------------*/
+ /* Handle IP frame */
+ /*---------------------------------------------------*/
+ if (protocol == ETH_P_IP) {
+ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
+
+ if (((unsigned char *)(iph) + (iph->ihl<<2)) >= (skb->data + ETH_HLEN + skb->len)) {
+ DEBUG_WARN("NAT25: malformed IP packet !\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ /* some muticast with source IP is all zero, maybe other case is illegal */
+ /* in class A, B, C, host address is all zero or all one is illegal */
+ if (iph->saddr == 0)
+ return 0;
+ tmp = be32_to_cpu(iph->saddr);
+ DEBUG_INFO("NAT25: Insert IP, SA =%08x, DA =%08x\n", tmp, iph->daddr);
+ __nat25_generate_ipv4_network_addr(networkAddr, &tmp);
+ /* record source IP address and , source mac address into db */
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup IP, SA =%08x, DA =%08x\n", iph->saddr, iph->daddr);
+ tmp = be32_to_cpu(iph->daddr);
+ __nat25_generate_ipv4_network_addr(networkAddr, &tmp);
+
+ if (!__nat25_db_network_lookup_and_replace(priv, skb, networkAddr)) {
+ if (*((unsigned char *)&iph->daddr + 3) == 0xff) {
+ /* L2 is unicast but L3 is broadcast, make L2 bacome broadcast */
+ DEBUG_INFO("NAT25: Set DA as boardcast\n");
+ memset(skb->data, 0xff, ETH_ALEN);
+ } else {
+ /* forward unknow IP packet to upper TCP/IP */
+ DEBUG_INFO("NAT25: Replace DA with BR's MAC\n");
+ if ((*(u32 *)priv->br_mac) == 0 && (*(u16 *)(priv->br_mac+4)) == 0) {
+ printk("Re-init netdev_br_init() due to br_mac == 0!\n");
+ netdev_br_init(priv->pnetdev);
+ }
+ memcpy(skb->data, priv->br_mac, ETH_ALEN);
+ }
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == ETH_P_ARP) {
+ /*---------------------------------------------------*/
+ /* Handle ARP frame */
+ /*---------------------------------------------------*/
+ struct arphdr *arp = (struct arphdr *)(skb->data + ETH_HLEN);
+ unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+ unsigned int *sender, *target;
+
+ if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
+ DEBUG_WARN("NAT25: arp protocol unknown (%4x)!\n", be16_to_cpu(arp->ar_pro));
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return 0; /* skb_copy for all ARP frame */
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert ARP, MAC =%02x%02x%02x%02x%02x%02x\n", arp_ptr[0],
+ arp_ptr[1], arp_ptr[2], arp_ptr[3], arp_ptr[4], arp_ptr[5]);
+
+ /* change to ARP sender mac address to wlan STA address */
+ memcpy(arp_ptr, GET_MY_HWADDR(priv), ETH_ALEN);
+ arp_ptr += arp->ar_hln;
+ sender = (unsigned int *)arp_ptr;
+ __nat25_generate_ipv4_network_addr(networkAddr, sender);
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup ARP\n");
+
+ arp_ptr += arp->ar_hln;
+ sender = (unsigned int *)arp_ptr;
+ arp_ptr += (arp->ar_hln + arp->ar_pln);
+ target = (unsigned int *)arp_ptr;
+ __nat25_generate_ipv4_network_addr(networkAddr, target);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ /* change to ARP target mac address to Lookup result */
+ arp_ptr = (unsigned char *)(arp + 1);
+ arp_ptr += (arp->ar_hln + arp->ar_pln);
+ memcpy(arp_ptr, skb->data, ETH_ALEN);
+ return 0;
+ default:
+ return -1;
+ }
+ } else if ((protocol == ETH_P_IPX) ||
+ (protocol <= ETH_FRAME_LEN)) {
+ /*---------------------------------------------------*/
+ /* Handle IPX and Apple Talk frame */
+ /*---------------------------------------------------*/
+ unsigned char ipx_header[2] = {0xFF, 0xFF};
+ struct ipxhdr *ipx = NULL;
+ struct elapaarp *ea = NULL;
+ struct ddpehdr *ddp = NULL;
+ unsigned char *framePtr = skb->data + ETH_HLEN;
+
+ if (protocol == ETH_P_IPX) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet II)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else if (protocol <= ETH_FRAME_LEN) {
+ if (!memcmp(ipx_header, framePtr, 2)) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet 802.3)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else {
+ unsigned char ipx_8022_type = 0xE0;
+ unsigned char snap_8022_type = 0xAA;
+
+ if (*framePtr == snap_8022_type) {
+ unsigned char ipx_snap_id[5] = {0x0, 0x0, 0x0, 0x81, 0x37}; /* IPX SNAP ID */
+ unsigned char aarp_snap_id[5] = {0x00, 0x00, 0x00, 0x80, 0xF3}; /* Apple Talk AARP SNAP ID */
+ unsigned char ddp_snap_id[5] = {0x08, 0x00, 0x07, 0x80, 0x9B}; /* Apple Talk DDP SNAP ID */
+
+ framePtr += 3; /* eliminate the 802.2 header */
+
+ if (!memcmp(ipx_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet SNAP)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else if (!memcmp(aarp_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ ea = (struct elapaarp *)framePtr;
+ } else if (!memcmp(ddp_snap_id, framePtr, 5)) {
+ framePtr += 5; /* eliminate the SNAP header */
+
+ ddp = (struct ddpehdr *)framePtr;
+ } else {
+ DEBUG_WARN("NAT25: Protocol = Ethernet SNAP %02x%02x%02x%02x%02x\n", framePtr[0],
+ framePtr[1], framePtr[2], framePtr[3], framePtr[4]);
+ return -1;
+ }
+ } else if (*framePtr == ipx_8022_type) {
+ framePtr += 3; /* eliminate the 802.2 header */
+
+ if (!memcmp(ipx_header, framePtr, 2)) {
+ DEBUG_INFO("NAT25: Protocol = IPX (Ethernet 802.2)\n");
+ ipx = (struct ipxhdr *)framePtr;
+ } else {
+ return -1;
+ }
+ } else {
+ return -1;
+ }
+ }
+ } else {
+ return -1;
+ }
+
+ /* IPX */
+ if (ipx != NULL) {
+ switch (method) {
+ case NAT25_CHECK:
+ if (!memcmp(skb->data+ETH_ALEN, ipx->ipx_source.node, ETH_ALEN))
+ DEBUG_INFO("NAT25: Check IPX skb_copy\n");
+ return 0;
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert IPX, Dest =%08x,%02x%02x%02x%02x%02x%02x,%04x Source =%08x,%02x%02x%02x%02x%02x%02x,%04x\n",
+ ipx->ipx_dest.net,
+ ipx->ipx_dest.node[0],
+ ipx->ipx_dest.node[1],
+ ipx->ipx_dest.node[2],
+ ipx->ipx_dest.node[3],
+ ipx->ipx_dest.node[4],
+ ipx->ipx_dest.node[5],
+ ipx->ipx_dest.sock,
+ ipx->ipx_source.net,
+ ipx->ipx_source.node[0],
+ ipx->ipx_source.node[1],
+ ipx->ipx_source.node[2],
+ ipx->ipx_source.node[3],
+ ipx->ipx_source.node[4],
+ ipx->ipx_source.node[5],
+ ipx->ipx_source.sock);
+
+ if (!memcmp(skb->data+ETH_ALEN, ipx->ipx_source.node, ETH_ALEN)) {
+ DEBUG_INFO("NAT25: Use IPX Net, and Socket as network addr\n");
+
+ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_source.net, &ipx->ipx_source.sock);
+
+ /* change IPX source node addr to wlan STA address */
+ memcpy(ipx->ipx_source.node, GET_MY_HWADDR(priv), ETH_ALEN);
+ } else {
+ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_source.net, ipx->ipx_source.node);
+ }
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ if (!memcmp(GET_MY_HWADDR(priv), ipx->ipx_dest.node, ETH_ALEN)) {
+ DEBUG_INFO("NAT25: Lookup IPX, Modify Destination IPX Node addr\n");
+
+ __nat25_generate_ipx_network_addr_with_socket(networkAddr, &ipx->ipx_dest.net, &ipx->ipx_dest.sock);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+
+ /* replace IPX destination node addr with Lookup destination MAC addr */
+ memcpy(ipx->ipx_dest.node, skb->data, ETH_ALEN);
+ } else {
+ __nat25_generate_ipx_network_addr_with_node(networkAddr, &ipx->ipx_dest.net, ipx->ipx_dest.node);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (ea != NULL) {
+ /* Sanity check fields. */
+ if (ea->hw_len != ETH_ALEN || ea->pa_len != AARP_PA_ALEN) {
+ DEBUG_WARN("NAT25: Appletalk AARP Sanity check fail!\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ return 0;
+ case NAT25_INSERT:
+ /* change to AARP source mac address to wlan STA address */
+ memcpy(ea->hw_src, GET_MY_HWADDR(priv), ETH_ALEN);
+
+ DEBUG_INFO("NAT25: Insert AARP, Source =%d,%d Destination =%d,%d\n",
+ ea->pa_src_net,
+ ea->pa_src_node,
+ ea->pa_dst_net,
+ ea->pa_dst_node);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_src_net, &ea->pa_src_node);
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup AARP, Source =%d,%d Destination =%d,%d\n",
+ ea->pa_src_net,
+ ea->pa_src_node,
+ ea->pa_dst_net,
+ ea->pa_dst_node);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ea->pa_dst_net, &ea->pa_dst_node);
+
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+
+ /* change to AARP destination mac address to Lookup result */
+ memcpy(ea->hw_dst, skb->data, ETH_ALEN);
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (ddp != NULL) {
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert DDP, Source =%d,%d Destination =%d,%d\n",
+ ddp->deh_snet,
+ ddp->deh_snode,
+ ddp->deh_dnet,
+ ddp->deh_dnode);
+
+ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_snet, &ddp->deh_snode);
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup DDP, Source =%d,%d Destination =%d,%d\n",
+ ddp->deh_snet,
+ ddp->deh_snode,
+ ddp->deh_dnet,
+ ddp->deh_dnode);
+ __nat25_generate_apple_network_addr(networkAddr, &ddp->deh_dnet, &ddp->deh_dnode);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ return 0;
+ default:
+ return -1;
+ }
+ }
+
+ return -1;
+ } else if ((protocol == ETH_P_PPP_DISC) ||
+ (protocol == ETH_P_PPP_SES)) {
+ /*---------------------------------------------------*/
+ /* Handle PPPoE frame */
+ /*---------------------------------------------------*/
+ struct pppoe_hdr *ph = (struct pppoe_hdr *)(skb->data + ETH_HLEN);
+ unsigned short *pMagic;
+
+ switch (method) {
+ case NAT25_CHECK:
+ if (ph->sid == 0)
+ return 0;
+ return 1;
+ case NAT25_INSERT:
+ if (ph->sid == 0) { /* Discovery phase according to tag */
+ if (ph->code == PADI_CODE || ph->code == PADR_CODE) {
+ if (priv->ethBrExtInfo.addPPPoETag) {
+ struct pppoe_tag *tag, *pOldTag;
+ unsigned char tag_buf[40];
+ int old_tag_len = 0;
+
+ tag = (struct pppoe_tag *)tag_buf;
+ pOldTag = (struct pppoe_tag *)__nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
+ if (pOldTag) { /* if SID existed, copy old value and delete it */
+ old_tag_len = ntohs(pOldTag->tag_len);
+ if (old_tag_len+TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN > sizeof(tag_buf)) {
+ DEBUG_ERR("SID tag length too long!\n");
+ return -1;
+ }
+
+ memcpy(tag->tag_data+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN,
+ pOldTag->tag_data, old_tag_len);
+
+ if (skb_pull_and_merge(skb, (unsigned char *)pOldTag, TAG_HDR_LEN+old_tag_len) < 0) {
+ DEBUG_ERR("call skb_pull_and_merge() failed in PADI/R packet!\n");
+ return -1;
+ }
+ ph->length = htons(ntohs(ph->length)-TAG_HDR_LEN-old_tag_len);
+ }
+
+ tag->tag_type = PTT_RELAY_SID;
+ tag->tag_len = htons(MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN+old_tag_len);
+
+ /* insert the magic_code+client mac in relay tag */
+ pMagic = (unsigned short *)tag->tag_data;
+ *pMagic = htons(MAGIC_CODE);
+ memcpy(tag->tag_data+MAGIC_CODE_LEN, skb->data+ETH_ALEN, ETH_ALEN);
+
+ /* Add relay tag */
+ if (__nat25_add_pppoe_tag(skb, tag) < 0)
+ return -1;
+
+ DEBUG_INFO("NAT25: Insert PPPoE, forward %s packet\n",
+ (ph->code == PADI_CODE ? "PADI" : "PADR"));
+ } else { /* not add relay tag */
+ if (priv->pppoe_connection_in_progress &&
+ memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN)) {
+ DEBUG_ERR("Discard PPPoE packet due to another PPPoE connection is in progress!\n");
+ return -2;
+ }
+
+ if (priv->pppoe_connection_in_progress == 0)
+ memcpy(priv->pppoe_addr, skb->data+ETH_ALEN, ETH_ALEN);
+
+ priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
+ }
+ } else {
+ return -1;
+ }
+ } else { /* session phase */
+ DEBUG_INFO("NAT25: Insert PPPoE, insert session packet to %s\n", skb->dev->name);
+
+ __nat25_generate_pppoe_network_addr(networkAddr, skb->data, &(ph->sid));
+
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+
+ __nat25_db_print(priv);
+
+ if (!priv->ethBrExtInfo.addPPPoETag &&
+ priv->pppoe_connection_in_progress &&
+ !memcmp(skb->data+ETH_ALEN, priv->pppoe_addr, ETH_ALEN))
+ priv->pppoe_connection_in_progress = 0;
+ }
+ return 0;
+ case NAT25_LOOKUP:
+ if (ph->code == PADO_CODE || ph->code == PADS_CODE) {
+ if (priv->ethBrExtInfo.addPPPoETag) {
+ struct pppoe_tag *tag;
+ unsigned char *ptr;
+ unsigned short tagType, tagLen;
+ int offset = 0;
+
+ ptr = __nat25_find_pppoe_tag(ph, ntohs(PTT_RELAY_SID));
+ if (ptr == NULL) {
+ DEBUG_ERR("Fail to find PTT_RELAY_SID in FADO!\n");
+ return -1;
+ }
+
+ tag = (struct pppoe_tag *)ptr;
+ tagType = (unsigned short)((ptr[0] << 8) + ptr[1]);
+ tagLen = (unsigned short)((ptr[2] << 8) + ptr[3]);
+
+ if ((tagType != ntohs(PTT_RELAY_SID)) || (tagLen < (MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN))) {
+ DEBUG_ERR("Invalid PTT_RELAY_SID tag length [%d]!\n", tagLen);
+ return -1;
+ }
+
+ pMagic = (unsigned short *)tag->tag_data;
+ if (ntohs(*pMagic) != MAGIC_CODE) {
+ DEBUG_ERR("Can't find MAGIC_CODE in %s packet!\n",
+ (ph->code == PADO_CODE ? "PADO" : "PADS"));
+ return -1;
+ }
+
+ memcpy(skb->data, tag->tag_data+MAGIC_CODE_LEN, ETH_ALEN);
+
+ if (tagLen > MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN)
+ offset = TAG_HDR_LEN;
+
+ if (skb_pull_and_merge(skb, ptr+offset, TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN-offset) < 0) {
+ DEBUG_ERR("call skb_pull_and_merge() failed in PADO packet!\n");
+ return -1;
+ }
+ ph->length = htons(ntohs(ph->length)-(TAG_HDR_LEN+MAGIC_CODE_LEN+RTL_RELAY_TAG_LEN-offset));
+ if (offset > 0)
+ tag->tag_len = htons(tagLen-MAGIC_CODE_LEN-RTL_RELAY_TAG_LEN);
+
+ DEBUG_INFO("NAT25: Lookup PPPoE, forward %s Packet from %s\n",
+ (ph->code == PADO_CODE ? "PADO" : "PADS"), skb->dev->name);
+ } else { /* not add relay tag */
+ if (!priv->pppoe_connection_in_progress) {
+ DEBUG_ERR("Discard PPPoE packet due to no connection in progresss!\n");
+ return -1;
+ }
+ memcpy(skb->data, priv->pppoe_addr, ETH_ALEN);
+ priv->pppoe_connection_in_progress = WAIT_TIME_PPPOE;
+ }
+ } else {
+ if (ph->sid != 0) {
+ DEBUG_INFO("NAT25: Lookup PPPoE, lookup session packet from %s\n", skb->dev->name);
+ __nat25_generate_pppoe_network_addr(networkAddr, skb->data+ETH_ALEN, &(ph->sid));
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ __nat25_db_print(priv);
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == 0x888e) {
+ /*---------------------------------------------------*/
+ /* Handle EAP frame */
+ /*---------------------------------------------------*/
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ return 0;
+ case NAT25_LOOKUP:
+ return 0;
+ default:
+ return -1;
+ }
+ } else if ((protocol == 0xe2ae) || (protocol == 0xe2af)) {
+ /*---------------------------------------------------*/
+ /* Handle C-Media proprietary frame */
+ /*---------------------------------------------------*/
+ switch (method) {
+ case NAT25_CHECK:
+ return -1;
+ case NAT25_INSERT:
+ return 0;
+ case NAT25_LOOKUP:
+ return 0;
+ default:
+ return -1;
+ }
+ } else if (protocol == ETH_P_IPV6) {
+ /*------------------------------------------------*/
+ /* Handle IPV6 frame */
+ /*------------------------------------------------*/
+ struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+
+ if (sizeof(*iph) >= (skb->len - ETH_HLEN)) {
+ DEBUG_WARN("NAT25: malformed IPv6 packet !\n");
+ return -1;
+ }
+
+ switch (method) {
+ case NAT25_CHECK:
+ if (skb->data[0] & 1)
+ return 0;
+ return -1;
+ case NAT25_INSERT:
+ DEBUG_INFO("NAT25: Insert IP, SA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x,"
+ " DA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
+ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
+ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
+ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
+ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
+
+ if (memcmp(&iph->saddr, "\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0", 16)) {
+ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->saddr);
+ __nat25_db_network_insert(priv, skb->data+ETH_ALEN, networkAddr);
+ __nat25_db_print(priv);
+
+ if (iph->nexthdr == IPPROTO_ICMPV6 &&
+ skb->len > (ETH_HLEN + sizeof(*iph) + 4)) {
+ if (update_nd_link_layer_addr(skb->data + ETH_HLEN + sizeof(*iph),
+ skb->len - ETH_HLEN - sizeof(*iph), GET_MY_HWADDR(priv))) {
+ struct icmp6hdr *hdr = (struct icmp6hdr *)(skb->data + ETH_HLEN + sizeof(*iph));
+ hdr->icmp6_cksum = 0;
+ hdr->icmp6_cksum = csum_ipv6_magic(&iph->saddr, &iph->daddr,
+ iph->payload_len,
+ IPPROTO_ICMPV6,
+ csum_partial((__u8 *)hdr, iph->payload_len, 0));
+ }
+ }
+ }
+ return 0;
+ case NAT25_LOOKUP:
+ DEBUG_INFO("NAT25: Lookup IP, SA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x, DA =%4x:%4x:%4x:%4x:%4x:%4x:%4x:%4x\n",
+ iph->saddr.s6_addr16[0], iph->saddr.s6_addr16[1], iph->saddr.s6_addr16[2], iph->saddr.s6_addr16[3],
+ iph->saddr.s6_addr16[4], iph->saddr.s6_addr16[5], iph->saddr.s6_addr16[6], iph->saddr.s6_addr16[7],
+ iph->daddr.s6_addr16[0], iph->daddr.s6_addr16[1], iph->daddr.s6_addr16[2], iph->daddr.s6_addr16[3],
+ iph->daddr.s6_addr16[4], iph->daddr.s6_addr16[5], iph->daddr.s6_addr16[6], iph->daddr.s6_addr16[7]);
+ __nat25_generate_ipv6_network_addr(networkAddr, (unsigned int *)&iph->daddr);
+ __nat25_db_network_lookup_and_replace(priv, skb, networkAddr);
+ return 0;
+ default:
+ return -1;
+ }
+ }
+ return -1;
+}
+
+int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb)
+{
+ if (!(skb->data[0] & 1)) {
+ int is_vlan_tag = 0, i, retval = 0;
+ unsigned short vlan_hdr = 0;
+ unsigned short protocol;
+
+ protocol = be16_to_cpu(*((__be16 *)(skb->data + 2 * ETH_ALEN)));
+ if (protocol == ETH_P_8021Q) {
+ is_vlan_tag = 1;
+ vlan_hdr = *((unsigned short *)(skb->data+ETH_ALEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+ETH_ALEN*2+2-i*2)) = *((unsigned short *)(skb->data+ETH_ALEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+
+ if (!priv->ethBrExtInfo.nat25_disable) {
+ unsigned long irqL;
+ _enter_critical_bh(&priv->br_ext_lock, &irqL);
+ /*
+ * This function look up the destination network address from
+ * the NAT2.5 database. Return value = -1 means that the
+ * corresponding network protocol is NOT support.
+ */
+ if (!priv->ethBrExtInfo.nat25sc_disable &&
+ (be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) &&
+ !memcmp(priv->scdb_ip, skb->data+ETH_HLEN+16, 4)) {
+ memcpy(skb->data, priv->scdb_mac, ETH_ALEN);
+
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+ } else {
+ _exit_critical_bh(&priv->br_ext_lock, &irqL);
+
+ retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
+ }
+ } else {
+ if (((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_IP) &&
+ !memcmp(priv->br_ip, skb->data+ETH_HLEN+16, 4)) ||
+ ((be16_to_cpu(*((__be16 *)(skb->data+ETH_ALEN*2))) == ETH_P_ARP) &&
+ !memcmp(priv->br_ip, skb->data+ETH_HLEN+24, 4))) {
+ /* for traffic to upper TCP/IP */
+ retval = nat25_db_handle(priv, skb, NAT25_LOOKUP);
+ }
+ }
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+ETH_ALEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+ETH_ALEN*2+2)) = vlan_hdr;
+ }
+
+ if (retval == -1) {
+ /* DEBUG_ERR("NAT25: Lookup fail!\n"); */
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+#define SERVER_PORT 67
+#define CLIENT_PORT 68
+#define DHCP_MAGIC 0x63825363
+#define BROADCAST_FLAG 0x8000
+
+struct dhcpMessage {
+ u_int8_t op;
+ u_int8_t htype;
+ u_int8_t hlen;
+ u_int8_t hops;
+ u_int32_t xid;
+ u_int16_t secs;
+ u_int16_t flags;
+ u_int32_t ciaddr;
+ u_int32_t yiaddr;
+ u_int32_t siaddr;
+ u_int32_t giaddr;
+ u_int8_t chaddr[16];
+ u_int8_t sname[64];
+ u_int8_t file[128];
+ u_int32_t cookie;
+ u_int8_t options[308]; /* 312 - cookie */
+};
+
+void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb)
+{
+ if (skb == NULL)
+ return;
+
+ if (!priv->ethBrExtInfo.dhcp_bcst_disable) {
+ __be16 protocol = *((__be16 *)(skb->data + 2 * ETH_ALEN));
+
+ if (protocol == __constant_htons(ETH_P_IP)) { /* IP */
+ struct iphdr *iph = (struct iphdr *)(skb->data + ETH_HLEN);
+
+ if (iph->protocol == IPPROTO_UDP) { /* UDP */
+ struct udphdr *udph = (struct udphdr *)((size_t)iph + (iph->ihl << 2));
+
+ if ((udph->source == __constant_htons(CLIENT_PORT)) &&
+ (udph->dest == __constant_htons(SERVER_PORT))) { /* DHCP request */
+ struct dhcpMessage *dhcph =
+ (struct dhcpMessage *)((size_t)udph + sizeof(struct udphdr));
+ u32 cookie = be32_to_cpu((__be32)dhcph->cookie);
+
+ if (cookie == DHCP_MAGIC) { /* match magic word */
+ if (!(dhcph->flags & htons(BROADCAST_FLAG))) {
+ /* if not broadcast */
+ register int sum = 0;
+
+ DEBUG_INFO("DHCP: change flag of DHCP request to broadcast.\n");
+ /* or BROADCAST flag */
+ dhcph->flags |= htons(BROADCAST_FLAG);
+ /* recalculate checksum */
+ sum = ~(udph->check) & 0xffff;
+ sum += be16_to_cpu(dhcph->flags);
+ while (sum >> 16)
+ sum = (sum & 0xffff) + (sum >> 16);
+ udph->check = ~sum;
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+
+void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
+ unsigned char *ipAddr)
+{
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+ struct nat25_network_db_entry *db;
+ int hash;
+ /* unsigned long irqL; */
+ /* _enter_critical_bh(&priv->br_ext_lock, &irqL); */
+
+ __nat25_generate_ipv4_network_addr(networkAddr, (unsigned int *)ipAddr);
+ hash = __nat25_network_hash(networkAddr);
+ db = priv->nethash[hash];
+ while (db != NULL) {
+ if (!memcmp(db->networkAddr, networkAddr, MAX_NETWORK_ADDR_LEN)) {
+ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ return (void *)db;
+ }
+
+ db = db->next_hash;
+ }
+
+ /* _exit_critical_bh(&priv->br_ext_lock, &irqL); */
+ return NULL;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
new file mode 100644
index 00000000000..9632ef48fbc
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -0,0 +1,2364 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+#include <rtw_br_ext.h>
+#include <rtw_mlme_ext.h>
+
+/*
+Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
+No irqsave is necessary.
+*/
+
+int _rtw_init_cmd_priv (struct cmd_priv *pcmdpriv)
+{
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ _rtw_init_sema(&(pcmdpriv->cmd_queue_sema), 0);
+ /* _rtw_init_sema(&(pcmdpriv->cmd_done_sema), 0); */
+ _rtw_init_sema(&(pcmdpriv->terminate_cmdthread_sema), 0);
+
+
+ _rtw_init_queue(&(pcmdpriv->cmd_queue));
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ pcmdpriv->cmd_seq = 1;
+
+ pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ);
+
+ if (pcmdpriv->cmd_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((size_t)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1));
+
+ pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4);
+
+ if (pcmdpriv->rsp_allocated_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((size_t)(pcmdpriv->rsp_allocated_buf) & 3);
+
+ pcmdpriv->cmd_issued_cnt = 0;
+ pcmdpriv->cmd_done_cnt = 0;
+ pcmdpriv->rsp_cnt = 0;
+exit:
+_func_exit_;
+ return res;
+}
+
+static void c2h_wk_callback(struct work_struct *work);
+
+int _rtw_init_evt_priv(struct evt_priv *pevtpriv)
+{
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+ ATOMIC_SET(&pevtpriv->event_seq, 0);
+ pevtpriv->evt_done_cnt = 0;
+
+ _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL);
+ pevtpriv->c2h_wk_alive = false;
+ pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1);
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_free_evt_priv(struct evt_priv *pevtpriv)
+{
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+rtw_free_evt_priv\n"));
+
+ _cancel_workitem_sync(&pevtpriv->c2h_wk);
+ while (pevtpriv->c2h_wk_alive)
+ rtw_msleep_os(10);
+
+ while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) {
+ void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue);
+ if (c2h != NULL && c2h != (void *)pevtpriv)
+ kfree(c2h);
+ }
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("-rtw_free_evt_priv\n"));
+
+_func_exit_;
+}
+
+void _rtw_free_cmd_priv (struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+
+ if (pcmdpriv) {
+ _rtw_spinlock_free(&(pcmdpriv->cmd_queue.lock));
+ _rtw_free_sema(&(pcmdpriv->cmd_queue_sema));
+ _rtw_free_sema(&(pcmdpriv->terminate_cmdthread_sema));
+
+ if (pcmdpriv->cmd_allocated_buf)
+ kfree(pcmdpriv->cmd_allocated_buf);
+
+ if (pcmdpriv->rsp_allocated_buf)
+ kfree(pcmdpriv->rsp_allocated_buf);
+ }
+_func_exit_;
+}
+
+/*
+Calling Context:
+
+rtw_enqueue_cmd can only be called between kernel thread,
+since only spin_lock is used.
+
+ISR/Call-Back functions can't call this sub-function.
+
+*/
+
+int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
+{
+ unsigned long irqL;
+
+_func_enter_;
+
+ if (obj == NULL)
+ goto exit;
+
+ /* _enter_critical_bh(&queue->lock, &irqL); */
+ _enter_critical(&queue->lock, &irqL);
+
+ rtw_list_insert_tail(&obj->list, &queue->queue);
+
+ /* _exit_critical_bh(&queue->lock, &irqL); */
+ _exit_critical(&queue->lock, &irqL);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
+{
+ unsigned long irqL;
+ struct cmd_obj *obj;
+
+_func_enter_;
+
+ /* _enter_critical_bh(&(queue->lock), &irqL); */
+ _enter_critical(&queue->lock, &irqL);
+ if (rtw_is_list_empty(&(queue->queue))) {
+ obj = NULL;
+ } else {
+ obj = LIST_CONTAINOR(get_next(&(queue->queue)), struct cmd_obj, list);
+ rtw_list_delete(&obj->list);
+ }
+
+ /* _exit_critical_bh(&(queue->lock), &irqL); */
+ _exit_critical(&queue->lock, &irqL);
+
+_func_exit_;
+
+ return obj;
+}
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+ u32 res;
+_func_enter_;
+ res = _rtw_init_cmd_priv (pcmdpriv);
+_func_exit_;
+ return res;
+}
+
+u32 rtw_init_evt_priv (struct evt_priv *pevtpriv)
+{
+ int res;
+_func_enter_;
+ res = _rtw_init_evt_priv(pevtpriv);
+_func_exit_;
+ return res;
+}
+
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("rtw_free_cmd_priv\n"));
+ _rtw_free_cmd_priv(pcmdpriv);
+_func_exit_;
+}
+
+int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
+
+ /* To decide allow or not */
+ if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) &&
+ (!pcmdpriv->padapter->registrypriv.usbss_enable)) {
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
+ if (pdrvextra_cmd_parm->ec_id == POWER_SAVING_CTRL_WK_CID)
+ bAllow = true;
+ }
+ }
+
+ if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
+ bAllow = true;
+
+ if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
+ !pcmdpriv->cmdthd_running) /* com_thread not running */
+ return _FAIL;
+ return _SUCCESS;
+}
+
+u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
+{
+ int res = _FAIL;
+ struct adapter *padapter = pcmdpriv->padapter;
+
+_func_enter_;
+
+ if (cmd_obj == NULL)
+ goto exit;
+
+ cmd_obj->padapter = padapter;
+
+ res = rtw_cmd_filter(pcmdpriv, cmd_obj);
+ if (_FAIL == res) {
+ rtw_free_cmd_obj(cmd_obj);
+ goto exit;
+ }
+
+ res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
+
+ if (res == _SUCCESS)
+ _rtw_up_sema(&pcmdpriv->cmd_queue_sema);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
+{
+ struct cmd_obj *cmd_obj;
+
+_func_enter_;
+
+ cmd_obj = _rtw_dequeue_cmd(&pcmdpriv->cmd_queue);
+
+_func_exit_;
+ return cmd_obj;
+}
+
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
+{
+_func_enter_;
+ pcmdpriv->cmd_done_cnt++;
+ /* _rtw_up_sema(&(pcmdpriv->cmd_done_sema)); */
+_func_exit_;
+}
+
+void rtw_free_cmd_obj(struct cmd_obj *pcmd)
+{
+_func_enter_;
+
+ if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) {
+ /* free parmbuf in cmd_obj */
+ kfree(pcmd->parmbuf);
+ }
+
+ if (pcmd->rsp != NULL) {
+ if (pcmd->rspsz != 0) {
+ /* free rsp in cmd_obj */
+ kfree(pcmd->rsp);
+ }
+ }
+
+ /* free cmd_obj */
+ kfree(pcmd);
+
+_func_exit_;
+}
+
+int rtw_cmd_thread(void *context)
+{
+ u8 ret;
+ struct cmd_obj *pcmd;
+ u8 *pcmdbuf;
+ u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
+ void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
+ struct adapter *padapter = (struct adapter *)context;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+
+_func_enter_;
+
+ thread_enter("RTW_CMD_THREAD");
+
+ pcmdbuf = pcmdpriv->cmd_buf;
+
+ pcmdpriv->cmdthd_running = true;
+ _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("start r871x rtw_cmd_thread !!!!\n"));
+
+ while (1) {
+ if (_rtw_down_sema(&pcmdpriv->cmd_queue_sema) == _FAIL)
+ break;
+
+ if (padapter->bDriverStopped ||
+ padapter->bSurpriseRemoved) {
+ DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+_next:
+ if (padapter->bDriverStopped ||
+ padapter->bSurpriseRemoved) {
+ DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
+ __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
+ break;
+ }
+
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (!pcmd)
+ continue;
+
+ if (_FAIL == rtw_cmd_filter(pcmdpriv, pcmd)) {
+ pcmd->res = H2C_DROPPED;
+ goto post_process;
+ }
+
+ pcmdpriv->cmd_issued_cnt++;
+
+ pcmd->cmdsz = _RND4((pcmd->cmdsz));/* _RND4 */
+
+ memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz);
+
+ if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) {
+ cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns;
+
+ if (cmd_hdl) {
+ ret = cmd_hdl(pcmd->padapter, pcmdbuf);
+ pcmd->res = ret;
+ }
+
+ pcmdpriv->cmd_seq++;
+ } else {
+ pcmd->res = H2C_PARAMETERS_ERROR;
+ }
+
+ cmd_hdl = NULL;
+
+post_process:
+
+ /* call callback function for post-processed */
+ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
+ pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
+ if (pcmd_callback == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("mlme_cmd_hdl(): pcmd_callback = 0x%p, cmdcode = 0x%x\n", pcmd_callback, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ } else {
+ /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
+ pcmd_callback(pcmd->padapter, pcmd);/* need conider that free cmd_obj in rtw_cmd_callback */
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("%s: cmdcode = 0x%x callback not defined!\n", __func__, pcmd->cmdcode));
+ rtw_free_cmd_obj(pcmd);
+ }
+
+ flush_signals_thread();
+
+ goto _next;
+ }
+ pcmdpriv->cmdthd_running = false;
+
+ /* free all cmd_obj resources */
+ do {
+ pcmd = rtw_dequeue_cmd(pcmdpriv);
+ if (pcmd == NULL)
+ break;
+
+ /* DBG_88E("%s: leaving... drop cmdcode:%u\n", __func__, pcmd->cmdcode); */
+
+ rtw_free_cmd_obj(pcmd);
+ } while (1);
+
+ _rtw_up_sema(&pcmdpriv->terminate_cmdthread_sema);
+
+_func_exit_;
+
+ thread_exit();
+}
+
+u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
+{
+ struct cmd_obj *ph2c;
+ struct usb_suspend_parm *psetusbsuspend;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ psetusbsuspend = (struct usb_suspend_parm *)rtw_zmalloc(sizeof(struct usb_suspend_parm));
+ if (psetusbsuspend == NULL) {
+ kfree(ph2c);
+ ret = _FAIL;
+ goto exit;
+ }
+
+ psetusbsuspend->action = action;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetusbsuspend, GEN_CMD_CODE(_SetUsbSuspend));
+
+ ret = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+/*
+rtw_sitesurvey_cmd(~)
+ ### NOTE:#### (!!!!)
+ MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+*/
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num,
+ struct rtw_ieee80211_channel *ch, int ch_num)
+{
+ u8 res = _FAIL;
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ p2p_ps_wk_cmd(padapter, P2P_PS_SCAN, 1);
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ return _FAIL;
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree(ph2c);
+ return _FAIL;
+ }
+
+ rtw_free_network_queue(padapter, false);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("%s: flush network queue\n", __func__));
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+
+ /* psurveyPara->bsslimit = 48; */
+ psurveyPara->scan_mode = pmlmepriv->scan_mode;
+
+ /* prepare ssid list */
+ if (ssid) {
+ int i;
+ for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (ssid[i].SsidLength) {
+ memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid));
+ psurveyPara->ssid_num++;
+ if (0)
+ DBG_88E(FUNC_ADPT_FMT" ssid:(%s, %d)\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ssid[i].Ssid, psurveyPara->ssid[i].SsidLength);
+ }
+ }
+ }
+
+ /* prepare channel list */
+ if (ch) {
+ int i;
+ for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) {
+ if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) {
+ memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel));
+ psurveyPara->ch_num++;
+ if (0)
+ DBG_88E(FUNC_ADPT_FMT" ch:%u\n", FUNC_ADPT_ARG(padapter),
+ psurveyPara->ch[i].hw_value);
+ }
+ }
+ }
+
+ set_fwstate(pmlmepriv, _FW_UNDER_SURVEY);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+ if (res == _SUCCESS) {
+ pmlmepriv->scan_start_time = rtw_get_current_time();
+
+ _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
+
+ rtw_led_control(padapter, LED_CTL_SITE_SURVEY);
+
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ } else {
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
+{
+ struct cmd_obj *ph2c;
+ struct setdatarate_parm *pbsetdataratepara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pbsetdataratepara = (struct setdatarate_parm *)rtw_zmalloc(sizeof(struct setdatarate_parm));
+ if (pbsetdataratepara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pbsetdataratepara, GEN_CMD_CODE(_SetDataRate));
+ pbsetdataratepara->mac_id = 5;
+ memcpy(pbsetdataratepara->datarates, rateset, NumRates);
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset)
+{
+ struct cmd_obj *ph2c;
+ struct setbasicrate_parm *pssetbasicratepara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pssetbasicratepara = (struct setbasicrate_parm *)rtw_zmalloc(sizeof(struct setbasicrate_parm));
+
+ if (pssetbasicratepara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara, _SetBasicRate_CMD_);
+
+ memcpy(pssetbasicratepara->basicrates, rateset, NumRates);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+
+/*
+unsigned char rtw_setphy_cmd(unsigned char *adapter)
+
+1. be called only after rtw_update_registrypriv_dev_network(~) or mp testing program
+2. for AdHoc/Ap mode or mp mode?
+
+*/
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch)
+{
+ struct cmd_obj *ph2c;
+ struct setphy_parm *psetphypara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetphypara = (struct setphy_parm *)rtw_zmalloc(sizeof(struct setphy_parm));
+
+ if (psetphypara == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetphypara, _SetPhy_CMD_);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("CH =%d, modem =%d", ch, modem));
+
+ psetphypara->modem = modem;
+ psetphypara->rfchannel = ch;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val)
+{
+ struct cmd_obj *ph2c;
+ struct writeBB_parm *pwritebbparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pwritebbparm = (struct writeBB_parm *)rtw_zmalloc(sizeof(struct writeBB_parm));
+
+ if (pwritebbparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwritebbparm, GEN_CMD_CODE(_SetBBReg));
+
+ pwritebbparm->offset = offset;
+ pwritebbparm->value = val;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
+{
+ struct cmd_obj *ph2c;
+ struct readBB_parm *prdbbparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ prdbbparm = (struct readBB_parm *)rtw_zmalloc(sizeof(struct readBB_parm));
+
+ if (prdbbparm == NULL) {
+ kfree(ph2c);
+ return _FAIL;
+ }
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetBBReg);
+ ph2c->parmbuf = (unsigned char *)prdbbparm;
+ ph2c->cmdsz = sizeof(struct readBB_parm);
+ ph2c->rsp = pval;
+ ph2c->rspsz = sizeof(struct readBB_rsp);
+
+ prdbbparm->offset = offset;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val)
+{
+ struct cmd_obj *ph2c;
+ struct writeRF_parm *pwriterfparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pwriterfparm = (struct writeRF_parm *)rtw_zmalloc(sizeof(struct writeRF_parm));
+
+ if (pwriterfparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg));
+
+ pwriterfparm->offset = offset;
+ pwriterfparm->value = val;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
+{
+ struct cmd_obj *ph2c;
+ struct readRF_parm *prdrfparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ prdrfparm = (struct readRF_parm *)rtw_zmalloc(sizeof(struct readRF_parm));
+ if (prdrfparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetRFReg);
+ ph2c->parmbuf = (unsigned char *)prdrfparm;
+ ph2c->cmdsz = sizeof(struct readRF_parm);
+ ph2c->rsp = pval;
+ ph2c->rspsz = sizeof(struct readRF_rsp);
+
+ prdrfparm->offset = offset;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ _func_enter_;
+
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.workparam.bcompleted = true;
+_func_exit_;
+}
+
+void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ _func_enter_;
+
+ kfree(pcmd->parmbuf);
+ kfree(pcmd);
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.workparam.bcompleted = true;
+_func_exit_;
+}
+
+u8 rtw_createbss_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for Any SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+ else
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = _CreateBss_CMD_;
+ pcmd->parmbuf = (unsigned char *)pdev_network;
+ pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ pdev_network->Length = pcmd->cmdsz;
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned int sz)
+{
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = GEN_CMD_CODE(_CreateBss);
+ pcmd->parmbuf = pbss;
+ pcmd->cmdsz = sz;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ u8 res = _SUCCESS;
+ uint t_len = 0;
+ struct wlan_bssid_ex *psecnetwork;
+ struct cmd_obj *pcmd;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ enum ndis_802_11_network_infra ndis_network_mode = pnetwork->network.InfrastructureMode;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+_func_enter_;
+
+ rtw_led_control(padapter, LED_CTL_START_TO_LINK);
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, ("+Join cmd: Any SSid\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
+ }
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
+ goto exit;
+ }
+ /* for IEs is fix buf size */
+ t_len = sizeof(struct wlan_bssid_ex);
+
+
+ /* for hidden ap to set fw_state here */
+ if (!check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+ switch (ndis_network_mode) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+ case Ndis802_11APMode:
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+ }
+
+ psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss;
+ if (psecnetwork == NULL) {
+ if (pcmd != NULL)
+ kfree(pcmd);
+
+ res = _FAIL;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd :psecnetwork == NULL!!!\n"));
+
+ goto exit;
+ }
+
+ _rtw_memset(psecnetwork, 0, t_len);
+
+ memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network));
+
+ psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->IELength;
+
+ if ((psecnetwork->IELength-12) < (256-1)) {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], psecnetwork->IELength-12);
+ } else {
+ memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->IEs[12], (256-1));
+ }
+
+ psecnetwork->IELength = 0;
+ /* Added by Albert 2009/02/18 */
+ /* If the the driver wants to use the bssid to create the connection. */
+ /* If not, we have to copy the connecting AP's MAC address to it so that */
+ /* the driver just has the bssid information for PMKIDList searching. */
+
+ if (!pmlmepriv->assoc_by_bssid)
+ memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.MacAddress[0], ETH_ALEN);
+
+ psecnetwork->IELength = rtw_restruct_sec_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength);
+
+
+ pqospriv->qos_option = 0;
+
+ if (pregistrypriv->wmm_enable) {
+ u32 tmp_len;
+
+ tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0], pnetwork->network.IELength, psecnetwork->IELength);
+
+ if (psecnetwork->IELength != tmp_len) {
+ psecnetwork->IELength = tmp_len;
+ pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */
+ } else {
+ pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */
+ }
+ }
+
+ phtpriv->ht_option = false;
+ if (pregistrypriv->ht_enable) {
+ /* Added by Albert 2010/06/23 */
+ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */
+ /* Especially for Realtek 8192u SoftAP. */
+ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) {
+ /* rtw_restructure_ht_ie */
+ rtw_restructure_ht_ie(padapter, &pnetwork->network.IEs[0], &psecnetwork->IEs[0],
+ pnetwork->network.IELength, &psecnetwork->IELength);
+ }
+ }
+
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.IEs, pnetwork->network.IELength);
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_TENDA)
+ padapter->pwrctrlpriv.smart_ps = 0;
+ else
+ padapter->pwrctrlpriv.smart_ps = padapter->registrypriv.smart_ps;
+
+ DBG_88E("%s: smart_ps =%d\n", __func__, padapter->pwrctrlpriv.smart_ps);
+
+ pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */
+
+ _rtw_init_listhead(&pcmd->list);
+ pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */
+ pcmd->parmbuf = (unsigned char *)psecnetwork;
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */
+{
+ struct cmd_obj *cmdobj = NULL;
+ struct disconnect_parm *param = NULL;
+ struct cmd_priv *cmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_disassoc_cmd\n"));
+
+ /* prepare cmd parameter */
+ param = (struct disconnect_parm *)rtw_zmalloc(sizeof(*param));
+ if (param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ param->deauth_timeout_ms = deauth_timeout_ms;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ cmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(*cmdobj));
+ if (cmdobj == NULL) {
+ res = _FAIL;
+ kfree(param);
+ goto exit;
+ }
+ init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_);
+ res = rtw_enqueue_cmd(cmdpriv, cmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != disconnect_hdl(padapter, (u8 *)param))
+ res = _FAIL;
+ kfree(param);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype)
+{
+ struct cmd_obj *ph2c;
+ struct setopmode_parm *psetop;
+
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = false;
+ goto exit;
+ }
+ psetop = (struct setopmode_parm *)rtw_zmalloc(sizeof(struct setopmode_parm));
+
+ if (psetop == NULL) {
+ kfree(ph2c);
+ res = false;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_);
+ psetop->mode = (u8)networktype;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct sta_info *sta = (struct sta_info *)psta;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *)psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ else
+ GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
+
+ if (unicast_key)
+ memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
+ else
+ memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
+
+ /* jeff: set this becasue at least sw key is ready */
+ padapter->securitypriv.busetkipkey = true;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct set_stakey_rsp *psetstakey_rsp = NULL;
+ struct sta_info *sta = (struct sta_info *)psta;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (!enqueue) {
+ clear_cam_entry(padapter, entry);
+ } else {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_stakey_rsp));
+ if (psetstakey_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetstakey_para);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+ ph2c->rsp = (u8 *)psetstakey_rsp;
+ ph2c->rspsz = sizeof(struct set_stakey_rsp);
+
+ memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
+
+ psetstakey_para->algorithm = _NO_PRIVACY_;
+
+ psetstakey_para->id = entry;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table)
+{
+ struct cmd_obj *ph2c;
+ struct setratable_parm *psetrttblparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetrttblparm = (struct setratable_parm *)rtw_zmalloc(sizeof(struct setratable_parm));
+
+ if (psetrttblparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable));
+
+ memcpy(psetrttblparm, prate_table, sizeof(struct setratable_parm));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval)
+{
+ struct cmd_obj *ph2c;
+ struct getratable_parm *pgetrttblparm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pgetrttblparm = (struct getratable_parm *)rtw_zmalloc(sizeof(struct getratable_parm));
+
+ if (pgetrttblparm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */
+
+ _rtw_init_listhead(&ph2c->list);
+ ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable);
+ ph2c->parmbuf = (unsigned char *)pgetrttblparm;
+ ph2c->cmdsz = sizeof(struct getratable_parm);
+ ph2c->rsp = (u8 *)pval;
+ ph2c->rspsz = sizeof(struct getratable_rsp);
+
+ pgetrttblparm->rsvd = 0x0;
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct set_assocsta_parm *psetassocsta_para;
+ struct set_stakey_rsp *psetassocsta_rsp = NULL;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetassocsta_para = (struct set_assocsta_parm *)rtw_zmalloc(sizeof(struct set_assocsta_parm));
+ if (psetassocsta_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetassocsta_rsp = (struct set_stakey_rsp *)rtw_zmalloc(sizeof(struct set_assocsta_rsp));
+ if (psetassocsta_rsp == NULL) {
+ kfree(ph2c);
+ kfree(psetassocsta_para);
+ return _FAIL;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_);
+ ph2c->rsp = (u8 *)psetassocsta_rsp;
+ ph2c->rspsz = sizeof(struct set_assocsta_rsp);
+
+ memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+ }
+
+u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
+{
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct cmd_obj *ph2c;
+ struct addBaReq_parm *paddbareq_parm;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm = (struct addBaReq_parm *)rtw_zmalloc(sizeof(struct addBaReq_parm));
+ if (paddbareq_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ paddbareq_parm->tid = tid;
+ memcpy(paddbareq_parm->addr, addr, ETH_ALEN);
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq));
+
+ /* DBG_88E("rtw_addbareq_cmd, tid =%d\n", tid); */
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = (u8 *)padapter;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+
+ /* rtw_enqueue_cmd(pcmdpriv, ph2c); */
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+_func_exit_;
+ return res;
+}
+
+u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue)
+{
+ struct cmd_obj *pcmdobj;
+ struct set_ch_parm *set_ch_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
+
+ /* check input parameter */
+
+ /* prepare cmd parameter */
+ set_ch_parm = (struct set_ch_parm *)rtw_zmalloc(sizeof(*set_ch_parm));
+ if (set_ch_parm == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ set_ch_parm->ch = ch;
+ set_ch_parm->bw = bw;
+ set_ch_parm->ch_offset = ch_offset;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ kfree(set_ch_parm);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm, GEN_CMD_CODE(_SetChannel));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != set_ch_hdl(padapter, (u8 *)set_ch_parm))
+ res = _FAIL;
+
+ kfree(set_ch_parm);
+ }
+
+ /* do something based on res... */
+
+exit:
+
+ DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res);
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
+{
+ struct cmd_obj *pcmdobj;
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_chplan_cmd\n"));
+
+ /* check input parameter */
+ if (!rtw_is_channel_plan_valid(chplan)) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ /* prepare cmd parameter */
+ setChannelPlan_param = (struct SetChannelPlan_param *)rtw_zmalloc(sizeof(struct SetChannelPlan_param));
+ if (setChannelPlan_param == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ setChannelPlan_param->channel_plan = chplan;
+
+ if (enqueue) {
+ /* need enqueue, prepare cmd_obj and enqueue */
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ kfree(setChannelPlan_param);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelPlan_param, GEN_CMD_CODE(_SetChannelPlan));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+ } else {
+ /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
+ if (H2C_SUCCESS != set_chplan_hdl(padapter, (unsigned char *)setChannelPlan_param))
+ res = _FAIL;
+
+ kfree(setChannelPlan_param);
+ }
+
+ /* do something based on res... */
+ if (res == _SUCCESS)
+ padapter->mlmepriv.ChannelPlan = chplan;
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed)
+{
+ struct cmd_obj *pcmdobj;
+ struct LedBlink_param *ledBlink_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_led_blink_cmd\n"));
+
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ledBlink_param = (struct LedBlink_param *)rtw_zmalloc(sizeof(struct LedBlink_param));
+ if (ledBlink_param == NULL) {
+ kfree(pcmdobj);
+ res = _FAIL;
+ goto exit;
+ }
+
+ ledBlink_param->pLed = pLed;
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, ledBlink_param, GEN_CMD_CODE(_LedBlink));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no)
+{
+ struct cmd_obj *pcmdobj;
+ struct SetChannelSwitch_param *setChannelSwitch_param;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+rtw_set_csa_cmd\n"));
+
+ pcmdobj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmdobj == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ setChannelSwitch_param = (struct SetChannelSwitch_param *)rtw_zmalloc(sizeof(struct SetChannelSwitch_param));
+ if (setChannelSwitch_param == NULL) {
+ kfree(pcmdobj);
+ res = _FAIL;
+ goto exit;
+ }
+
+ setChannelSwitch_param->new_ch_no = new_ch_no;
+
+ init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelSwitch_param, GEN_CMD_CODE(_SetChannelSwitch));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option)
+{
+ return _SUCCESS;
+}
+
+static void traffic_status_watchdog(struct adapter *padapter)
+{
+ u8 bEnterPS;
+ u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
+ u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ /* */
+ /* Determine if our traffic is busy now */
+ /* */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 100 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 100) {
+ bBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bRxBusyTraffic = true;
+ else
+ bTxBusyTraffic = true;
+ }
+
+ /* Higher Tx/Rx data. */
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 ||
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) {
+ bHigherBusyTraffic = true;
+
+ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod)
+ bHigherBusyRxTraffic = true;
+ else
+ bHigherBusyTxTraffic = true;
+ }
+
+ /* check traffic for powersaving. */
+ if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) ||
+ (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2))
+ bEnterPS = false;
+ else
+ bEnterPS = true;
+
+ /* LeisurePS only work in infra mode. */
+ if (bEnterPS)
+ LPS_Enter(padapter);
+ else
+ LPS_Leave(padapter);
+ } else {
+ LPS_Leave(padapter);
+ }
+
+ pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0;
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic;
+ pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic;
+}
+
+void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+{
+ struct mlme_priv *pmlmepriv;
+
+ padapter = (struct adapter *)pbuf;
+ pmlmepriv = &(padapter->mlmepriv);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ expire_timeout_chk(padapter);
+#endif
+
+ rtw_hal_sreset_xmit_status_check(padapter);
+
+ linked_status_chk(padapter);
+ traffic_status_watchdog(padapter);
+
+ rtw_hal_dm_watchdog(padapter);
+}
+
+static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 mstatus;
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true))
+ return;
+
+ switch (lps_ctrl_type) {
+ case LPS_CTRL_SCAN:
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ /* connect */
+ LPS_Leave(padapter);
+ }
+ break;
+ case LPS_CTRL_JOINBSS:
+ LPS_Leave(padapter);
+ break;
+ case LPS_CTRL_CONNECT:
+ mstatus = 1;/* connect */
+ /* Reset LPS Setting */
+ padapter->pwrctrlpriv.LpsIdleCount = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ break;
+ case LPS_CTRL_DISCONNECT:
+ mstatus = 0;/* disconnect */
+ LPS_Leave(padapter);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ break;
+ case LPS_CTRL_SPECIAL_PACKET:
+ /* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
+ pwrpriv->DelayLPSLastTimeStamp = rtw_get_current_time();
+ LPS_Leave(padapter);
+ break;
+ case LPS_CTRL_LEAVE:
+ LPS_Leave(padapter);
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ /* struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; */
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ /* if (!pwrctrlpriv->bLeisurePs) */
+ /* return res; */
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID;
+ pdrvextra_cmd_parm->type_size = lps_ctrl_type;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ lps_ctrl_wk_hdl(padapter, lps_ctrl_type);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+}
+
+u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = RTP_TIMER_CFG_WK_CID;
+ pdrvextra_cmd_parm->type_size = min_time;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+}
+
+u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 support_ant_div;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+ rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
+ if (!support_ant_div)
+ return res;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = ANT_SELECT_WK_CID;
+ pdrvextra_cmd_parm->type_size = antenna;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ antenna_select_wk_hdl(padapter, antenna);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void power_saving_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
+{
+ rtw_ps_processor(padapter);
+}
+
+#ifdef CONFIG_88EU_P2P
+u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return res;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PROTO_WK_CID;
+ pdrvextra_cmd_parm->type_size = intCmdType; /* As the command tppe. */
+ pdrvextra_cmd_parm->pbuf = NULL; /* Must be NULL here */
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+#endif /* CONFIG_88EU_P2P */
+
+u8 rtw_ps_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ppscmd;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ u8 res = _SUCCESS;
+_func_enter_;
+
+ ppscmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ppscmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ppscmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID;
+ pdrvextra_cmd_parm->pbuf = NULL;
+ init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ppscmd);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
+{
+ int cnt = 0;
+ struct sta_info *psta_bmc;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return;
+
+ if (psta_bmc->sleepq_len == 0) {
+ u8 val = 0;
+
+ /* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
+ /* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+
+ while (!val) {
+ rtw_msleep_os(100);
+
+ cnt++;
+
+ if (cnt > 10)
+ break;
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ }
+
+ if (cnt <= 10) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ } else { /* re check again */
+ rtw_chk_hi_queue_cmd(padapter);
+ }
+ }
+}
+
+u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID;
+ pdrvextra_cmd_parm->type_size = 0;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+exit:
+ return res;
+}
+#endif
+
+u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = C2H_WK_CID;
+ pdrvextra_cmd_parm->type_size = c2h_evt ? 16 : 0;
+ pdrvextra_cmd_parm->pbuf = c2h_evt;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+static s32 c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
+{
+ s32 ret = _FAIL;
+ u8 buf[16];
+
+ if (!c2h_evt) {
+ /* No c2h event in cmd_obj, read c2h event before handling*/
+ if (c2h_evt_read(adapter, buf) == _SUCCESS) {
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler(adapter, c2h_evt);
+ }
+ } else {
+ if (filter && filter(c2h_evt->id) == false)
+ goto exit;
+
+ ret = rtw_hal_c2h_handler(adapter, c2h_evt);
+ }
+exit:
+ return ret;
+}
+
+static void c2h_wk_callback(struct work_struct *work)
+{
+ struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
+ struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
+ struct c2h_evt_hdr *c2h_evt;
+ c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
+
+ evtpriv->c2h_wk_alive = true;
+
+ while (!rtw_cbuf_empty(evtpriv->c2h_queue)) {
+ if ((c2h_evt = (struct c2h_evt_hdr *)rtw_cbuf_pop(evtpriv->c2h_queue)) != NULL) {
+ /* This C2H event is read, clear it */
+ c2h_evt_clear(adapter);
+ } else if ((c2h_evt = (struct c2h_evt_hdr *)rtw_malloc(16)) != NULL) {
+ /* This C2H event is not read, read & clear now */
+ if (c2h_evt_read(adapter, (u8 *)c2h_evt) != _SUCCESS)
+ continue;
+ }
+
+ /* Special pointer to trigger c2h_evt_clear only */
+ if ((void *)c2h_evt == (void *)evtpriv)
+ continue;
+
+ if (!c2h_evt_exist(c2h_evt)) {
+ kfree(c2h_evt);
+ continue;
+ }
+
+ if (ccx_id_filter(c2h_evt->id) == true) {
+ /* Handle CCX report here */
+ rtw_hal_c2h_handler(adapter, c2h_evt);
+ kfree(c2h_evt);
+ } else {
+#ifdef CONFIG_88EU_P2P
+ /* Enqueue into cmd_thread for others */
+ rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
+#endif
+ }
+ }
+
+ evtpriv->c2h_wk_alive = false;
+}
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct drvextra_cmd_parm *pdrvextra_cmd;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf;
+
+ switch (pdrvextra_cmd->ec_id) {
+ case DYNAMIC_CHK_WK_CID:
+ dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ break;
+ case POWER_SAVING_CTRL_WK_CID:
+ power_saving_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ break;
+ case LPS_CTRL_WK_CID:
+ lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
+ break;
+ case RTP_TIMER_CFG_WK_CID:
+ rpt_timer_setting_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+ case ANT_SELECT_WK_CID:
+ antenna_select_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+#ifdef CONFIG_88EU_P2P
+ case P2P_PS_WK_CID:
+ p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+ case P2P_PROTO_WK_CID:
+ /* Commented by Albert 2011/07/01 */
+ /* I used the type_size as the type command */
+ p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
+ break;
+#endif
+#ifdef CONFIG_88EU_AP_MODE
+ case CHECK_HIQ_WK_CID:
+ rtw_chk_hi_queue_hdl(padapter);
+ break;
+#endif /* CONFIG_88EU_AP_MODE */
+ case C2H_WK_CID:
+ c2h_evt_hdl(padapter, (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL);
+ break;
+ default:
+ break;
+ }
+
+ if (pdrvextra_cmd->pbuf && pdrvextra_cmd->type_size > 0)
+ kfree(pdrvextra_cmd->pbuf);
+
+ return H2C_SUCCESS;
+}
+
+void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ _set_timer(&pmlmepriv->scan_to_timer, 1);
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: MgntActrtw_set_802_11_bssid_LIST_SCAN Fail ************\n\n."));
+ }
+
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res != H2C_SUCCESS) {
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ***Error: disconnect_cmd_callback Fail ***\n."));
+
+ goto exit;
+ } else /* clear bridge database */
+ nat25_db_cleanup(padapter);
+
+ /* free cmd */
+ rtw_free_cmd_obj(pcmd);
+
+exit:
+
+_func_exit_;
+}
+
+void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ if (pcmd->res == H2C_DROPPED) {
+ /* TODO: cancel timer and do timeout handler directly... */
+ /* need to make timeout handlerOS independent */
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ } else if (pcmd->res != H2C_SUCCESS) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("********Error:rtw_select_and_join_from_scanned_queue Wait Sema Fail ************\n"));
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+
+void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ u8 timer_cancelled;
+ struct sta_info *psta = NULL;
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+
+_func_enter_;
+
+ if ((pcmd->res != H2C_SUCCESS)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n ********Error: rtw_createbss_cmd_callback Fail ************\n\n."));
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ }
+
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (!psta) {
+ psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->MacAddress);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nCan't alloc sta_info when createbss_cmd_callback\n"));
+ goto createbss_cmd_fail ;
+ }
+ }
+
+ rtw_indicate_connect(padapter);
+ } else {
+ unsigned long irqL;
+
+ pwlan = _rtw_alloc_network(pmlmepriv);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ if (pwlan == NULL) {
+ pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
+ if (pwlan == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\n Error: can't get pwlan in rtw_joinbss_event_callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ goto createbss_cmd_fail;
+ }
+ pwlan->last_scanned = rtw_get_current_time();
+ } else {
+ rtw_list_insert_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
+ }
+
+ pnetwork->Length = get_wlan_bssid_ex_sz(pnetwork);
+ memcpy(&(pwlan->network), pnetwork, pnetwork->Length);
+
+ memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork)));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
+ }
+
+createbss_cmd_fail:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
+
+void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
+
+_func_enter_;
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: rtw_setstaKey_cmdrsp_callback => can't get sta_info\n\n"));
+ goto exit;
+ }
+exit:
+ rtw_free_cmd_obj(pcmd);
+_func_exit_;
+}
+
+void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
+{
+ unsigned long irqL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
+ struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
+ struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
+
+_func_enter_;
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nERROR: setassocsta_cmdrsp_callbac => can't get sta_info\n\n"));
+ goto exit;
+ }
+
+ psta->aid = passocsta_rsp->cam_id;
+ psta->mac_id = passocsta_rsp->cam_id;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ set_fwstate(pmlmepriv, _FW_LINKED);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ rtw_free_cmd_obj(pcmd);
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
new file mode 100644
index 00000000000..0fe5f5de54a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -0,0 +1,948 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_DEBUG_C_
+
+#include <rtw_debug.h>
+#include <rtw_version.h>
+
+int proc_get_drv_version(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_write_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ *eof = 1;
+ return 0;
+}
+
+int proc_set_write_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 addr, val, len;
+
+ if (count < 3) {
+ DBG_88E("argument size is less than 3\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
+
+ if (num != 3) {
+ DBG_88E("invalid write_reg parameter!\n");
+ return count;
+ }
+ switch (len) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)val);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)val);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, val);
+ break;
+ default:
+ DBG_88E("error write length =%d", len);
+ break;
+ }
+ }
+ return count;
+}
+
+static u32 proc_get_read_addr = 0xeeeeeeee;
+static u32 proc_get_read_len = 0x4;
+
+int proc_get_read_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ int len = 0;
+
+ if (proc_get_read_addr == 0xeeeeeeee) {
+ *eof = 1;
+ return len;
+ }
+
+ switch (proc_get_read_len) {
+ case 1:
+ len += snprintf(page + len, count - len, "rtw_read8(0x%x)=0x%x\n", proc_get_read_addr, rtw_read8(padapter, proc_get_read_addr));
+ break;
+ case 2:
+ len += snprintf(page + len, count - len, "rtw_read16(0x%x)=0x%x\n", proc_get_read_addr, rtw_read16(padapter, proc_get_read_addr));
+ break;
+ case 4:
+ len += snprintf(page + len, count - len, "rtw_read32(0x%x)=0x%x\n", proc_get_read_addr, rtw_read32(padapter, proc_get_read_addr));
+ break;
+ default:
+ len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
+ break;
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_read_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ char tmp[16];
+ u32 addr, len;
+
+ if (count < 2) {
+ DBG_88E("argument size is less than 2\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x %x", &addr, &len);
+
+ if (num != 2) {
+ DBG_88E("invalid read_reg parameter!\n");
+ return count;
+ }
+
+ proc_get_read_addr = addr;
+
+ proc_get_read_len = len;
+ }
+
+ return count;
+}
+
+int proc_get_fwstate(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv));
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_sec_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n",
+ psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
+ psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mlmext_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "pmlmeinfo->state=0x%x\n", pmlmeinfo->state);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_qos_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "qos_option=%d\n", pmlmepriv->qospriv.qos_option);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_ht_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ int len = 0;
+ len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offet=%d\n",
+ pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ *eof = 1;
+ return len;
+}
+
+int proc_get_ap_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct sta_info *psta;
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int len = 0;
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta) {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ len += snprintf(page + len, count - len, "SSID=%s\n", cur_network->network.Ssid.Ssid);
+ len += snprintf(page + len, count - len, "sta's macaddr:%pM\n", psta->hwaddr);
+ len += snprintf(page + len, count - len, "cur_channel=%d, cur_bwmode=%d, cur_ch_offset=%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
+ len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable)
+ len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ } else {
+ len += snprintf(page + len, count - len, "can't get sta's macaddr, cur_network's macaddr: %pM\n", cur_network->network.MacAddress);
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_adapter_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
+ padapter->bSurpriseRemoved, padapter->bDriverStopped);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_trx_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int len = 0;
+
+ len += snprintf(page + len, count - len, "free_xmitbuf_cnt=%d, free_xmitframe_cnt=%d, free_ext_xmitbuf_cnt=%d, free_recvframe_cnt=%d\n",
+ pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt, precvpriv->free_recvframe_cnt);
+ len += snprintf(page + len, count - len, "rx_urb_pending_cn=%d\n", precvpriv->rx_pending_cnt);
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+ memset(page, 0, count);
+ for (i = 0x300; i < 0x600; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_mac_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
+
+ for (i = 0x600; i < 0x800; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0x800; i < 0xB00; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0xB00; i < 0xE00; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_bb_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1;
+
+ len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
+ for (i = 0xE00; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 1;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0xC0; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 1;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0xC0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rf_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 2;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0xC0; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+
+ *eof = 1;
+ return len;
+}
+
+
+int proc_get_rf_reg_dump4(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+ int i, j = 1, path;
+ u32 value;
+
+ len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
+ path = 2;
+ len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
+ for (i = 0xC0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ len += snprintf(page + len, count - len, "0x%02x ", i);
+ len += snprintf(page + len, count - len, " 0x%08x ", value);
+ if ((j++)%4 == 0)
+ len += snprintf(page + len, count - len, "\n");
+ }
+ *eof = 1;
+ return len;
+}
+
+
+
+int proc_get_rx_signal(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int len = 0;
+
+ len = snprintf(page + len, count,
+ "rssi:%d\n"
+ "rxpwdb:%d\n"
+ "signal_strength:%u\n"
+ "signal_qual:%u\n"
+ "noise:%u\n",
+ padapter->recvpriv.rssi,
+ padapter->recvpriv.rxpwdb,
+ padapter->recvpriv.signal_strength,
+ padapter->recvpriv.signal_qual,
+ padapter->recvpriv.noise
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_rx_signal(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 is_signal_dbg;
+ s32 signal_strength;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
+ is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
+ if (is_signal_dbg && num != 2)
+ return count;
+
+ signal_strength = signal_strength > 100 ? 100 : signal_strength;
+ signal_strength = signal_strength < 0 ? 0 : signal_strength;
+
+ padapter->recvpriv.is_signal_dbg = is_signal_dbg;
+ padapter->recvpriv.signal_strength_dbg = signal_strength;
+
+ if (is_signal_dbg)
+ DBG_88E("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
+ else
+ DBG_88E("set %s\n", "HW_SIGNAL_STRENGTH");
+ }
+ return count;
+}
+
+int proc_get_ht_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->ht_enable
+ );
+ *eof = 1;
+ return len;
+}
+
+int proc_set_ht_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->ht_enable = mode;
+ pr_info("ht_enable=%d\n", pregpriv->ht_enable);
+ }
+ }
+
+ return count;
+}
+
+int proc_get_cbw40_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->cbw40_enable
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->cbw40_enable = mode;
+ pr_info("cbw40_enable=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_ampdu_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->ampdu_enable
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ s32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->ampdu_enable = mode;
+ pr_info("ampdu_enable=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_two_path_rssi(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ int len = 0;
+
+ if (padapter)
+ len += snprintf(page + len, count - len,
+ "%d %d\n",
+ padapter->recvpriv.RxRssi[0],
+ padapter->recvpriv.RxRssi[1]
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_get_rx_stbc(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ int len = 0;
+
+ if (pregpriv)
+ len += snprintf(page + len, count - len,
+ "%d\n",
+ pregpriv->rx_stbc
+ );
+
+ *eof = 1;
+ return len;
+}
+
+int proc_set_rx_stbc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ char tmp[32];
+ u32 mode = 0;
+
+ if (count < 1)
+ return -EFAULT;
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ if (pregpriv) {
+ pregpriv->rx_stbc = mode;
+ printk("rx_stbc=%d\n", mode);
+ }
+ }
+ return count;
+}
+
+int proc_get_rssi_disp(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ *eof = 1;
+ return 0;
+}
+
+int proc_set_rssi_disp(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ struct net_device *dev = (struct net_device *)data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ char tmp[32];
+ u32 enable = 0;
+
+ if (count < 1) {
+ DBG_88E("argument size is less than 1\n");
+ return -EFAULT;
+ }
+
+ if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
+ int num = sscanf(tmp, "%x", &enable);
+
+ if (num != 1) {
+ DBG_88E("invalid set_rssi_disp parameter!\n");
+ return count;
+ }
+
+ if (enable) {
+ DBG_88E("Turn On Rx RSSI Display Function\n");
+ padapter->bRxRSSIDisplay = enable ;
+ } else {
+ DBG_88E("Turn Off Rx RSSI Display Function\n");
+ padapter->bRxRSSIDisplay = 0;
+ }
+ }
+ return count;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+int proc_get_all_sta_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int len = 0;
+
+
+ len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ for (i = 0; i < NUM_STA; i++) {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
+ len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
+ len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ len += snprintf(page + len, count - len, "sleepq_len=%d\n", psta->sleepq_len);
+ len += snprintf(page + len, count - len, "capability=0x%x\n", psta->capability);
+ len += snprintf(page + len, count - len, "flags=0x%x\n", psta->flags);
+ len += snprintf(page + len, count - len, "wpa_psk=0x%x\n", psta->wpa_psk);
+ len += snprintf(page + len, count - len, "wpa2_group_cipher=0x%x\n", psta->wpa2_group_cipher);
+ len += snprintf(page + len, count - len, "wpa2_pairwise_cipher=0x%x\n", psta->wpa2_pairwise_cipher);
+ len += snprintf(page + len, count - len, "qos_info=0x%x\n", psta->qos_info);
+ len += snprintf(page + len, count - len, "dot118021XPrivacy=0x%x\n", psta->dot118021XPrivacy);
+
+ for (j = 0; j < 16; j++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", j, preorder_ctrl->indicate_seq);
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ *eof = 1;
+ return len;
+}
+#endif
+
+int proc_get_best_channel(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data)
+{
+ struct net_device *dev = data;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ int len = 0;
+ u32 i, best_channel_24G = 1, best_channel_5G = 36, index_24G = 0, index_5G = 0;
+
+ for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
+ if (pmlmeext->channel_set[i].ChannelNum == 1)
+ index_24G = i;
+ if (pmlmeext->channel_set[i].ChannelNum == 36)
+ index_5G = i;
+ }
+
+ for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
+ /* 2.4G */
+ if (pmlmeext->channel_set[i].ChannelNum == 6) {
+ if (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_24G].rx_count) {
+ index_24G = i;
+ best_channel_24G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+
+ /* 5G */
+ if (pmlmeext->channel_set[i].ChannelNum >= 36 &&
+ pmlmeext->channel_set[i].ChannelNum < 140) {
+ /* Find primary channel */
+ if (((pmlmeext->channel_set[i].ChannelNum - 36) % 8 == 0) &&
+ (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
+ index_5G = i;
+ best_channel_5G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+
+ if (pmlmeext->channel_set[i].ChannelNum >= 149 &&
+ pmlmeext->channel_set[i].ChannelNum < 165) {
+ /* find primary channel */
+ if (((pmlmeext->channel_set[i].ChannelNum - 149) % 8 == 0) &&
+ (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_5G].rx_count)) {
+ index_5G = i;
+ best_channel_5G = pmlmeext->channel_set[i].ChannelNum;
+ }
+ }
+ /* debug */
+ len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
+ pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count);
+ }
+
+ len += snprintf(page + len, count - len, "best_channel_5G = %d\n", best_channel_5G);
+ len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
+
+ *eof = 1;
+ return len;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
new file mode 100644
index 00000000000..869434c4cf6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -0,0 +1,875 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_EFUSE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+
+
+/*------------------------Define local variable------------------------------*/
+u8 fakeEfuseBank;
+u32 fakeEfuseUsedBytes;
+u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE] = {0};
+u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN] = {0};
+u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN] = {0};
+
+u32 BTEfuseUsedBytes;
+u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+
+u32 fakeBTEfuseUsedBytes;
+u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
+/*------------------------Define local variable------------------------------*/
+
+/* */
+#define REG_EFUSE_CTRL 0x0030
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+/* */
+
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value);
+bool
+Efuse_Read1ByteFromFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE)
+ return false;
+ if (fakeEfuseBank == 0)
+ *Value = fakeEfuseContent[Offset];
+ else
+ *Value = fakeBTEfuseContent[fakeEfuseBank-1][Offset];
+ return true;
+}
+
+static bool
+Efuse_Write1ByteToFakeContent(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 Value)
+{
+ if (Offset >= EFUSE_MAX_HW_SIZE)
+ return false;
+ if (fakeEfuseBank == 0) {
+ fakeEfuseContent[Offset] = Value;
+ } else {
+ fakeBTEfuseContent[fakeEfuseBank-1][Offset] = Value;
+ }
+ return true;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_PowerSwitch
+ *
+ * Overview: When we want to enable write operation, we should change to
+ * pwr on state. When we stop write, we should switch to 500k mode
+ * and disable LDO 2.5V.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/17/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void
+Efuse_PowerSwitch(
+ struct adapter *pAdapter,
+ u8 write,
+ u8 PwrState)
+{
+ pAdapter->HalFunc.EfusePowerSwitch(pAdapter, write, PwrState);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_GetCurrentSize
+ *
+ * Overview: Get current efuse size!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+u16
+Efuse_GetCurrentSize(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool pseudo)
+{
+ u16 ret = 0;
+
+ ret = pAdapter->HalFunc.EfuseGetCurrentSize(pAdapter, efuseType, pseudo);
+
+ return ret;
+}
+
+/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
+u8
+Efuse_CalculateWordCnts(u8 word_en)
+{
+ u8 word_cnts = 0;
+ if (!(word_en & BIT(0)))
+ word_cnts++; /* 0 : write enable */
+ if (!(word_en & BIT(1)))
+ word_cnts++;
+ if (!(word_en & BIT(2)))
+ word_cnts++;
+ if (!(word_en & BIT(3)))
+ word_cnts++;
+ return word_cnts;
+}
+
+/* */
+/* Description: */
+/* Execute E-Fuse read byte operation. */
+/* Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+void
+ReadEFuseByte(
+ struct adapter *Adapter,
+ u16 _offset,
+ u8 *pbuf,
+ bool pseudo)
+{
+ u32 value32;
+ u8 readbyte;
+ u16 retry;
+
+ if (pseudo) {
+ Efuse_Read1ByteFromFakeContent(Adapter, _offset, pbuf);
+ return;
+ }
+
+ /* Write Address */
+ rtw_write8(Adapter, EFUSE_CTRL+1, (_offset & 0xff));
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+2);
+ rtw_write8(Adapter, EFUSE_CTRL+2, ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
+
+ /* Write bit 32 0 */
+ readbyte = rtw_read8(Adapter, EFUSE_CTRL+3);
+ rtw_write8(Adapter, EFUSE_CTRL+3, (readbyte & 0x7f));
+
+ /* Check bit 32 read-ready */
+ retry = 0;
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+ retry++;
+ }
+
+ /* 20100205 Joseph: Add delay suggested by SD1 Victor. */
+ /* This fix the problem that Efuse read error in high temperature condition. */
+ /* Designer says that there shall be some delay after ready bit is set, or the */
+ /* result will always stay on last data we read. */
+ rtw_udelay_os(50);
+ value32 = rtw_read32(Adapter, EFUSE_CTRL);
+
+ *pbuf = (u8)(value32 & 0xff);
+}
+
+/* */
+/* Description: */
+/* 1. Execute E-Fuse read byte operation according as map offset and */
+/* save to E-Fuse table. */
+/* 2. Refered from SD1 Richard. */
+/* */
+/* Assumption: */
+/* 1. Boot from E-Fuse and successfully auto-load. */
+/* 2. PASSIVE_LEVEL (USB interface) */
+/* */
+/* Created by Roger, 2008.10.21. */
+/* */
+/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
+/* 2. Add efuse utilization collect. */
+/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
+/* write addr must be after sec5. */
+/* */
+
+static void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool pseudo)
+{
+ Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset, _size_byte, pbuf, pseudo);
+}
+
+void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool pseudo
+ )
+{
+ pAdapter->HalFunc.EFUSEGetEfuseDefinition(pAdapter, efuseType, type, pOut, pseudo);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_Read1Byte
+ *
+ * Overview: Copy from WMAC fot EFUSE read 1 byte.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/23/2008 MHC Copy from WMAC.
+ *
+ *---------------------------------------------------------------------------*/
+u8 EFUSE_Read1Byte(struct adapter *Adapter, u16 Address)
+{
+ u8 data;
+ u8 Bytetemp = {0x00};
+ u8 temp = {0x00};
+ u32 k = 0;
+ u16 contentLen = 0;
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI , TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&contentLen, false);
+
+ if (Address < contentLen) { /* E-fuse 512Byte */
+ /* Write E-fuse Register address bit0~7 */
+ temp = Address & 0xFF;
+ rtw_write8(Adapter, EFUSE_CTRL+1, temp);
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+2);
+ /* Write E-fuse Register address bit8~9 */
+ temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
+ rtw_write8(Adapter, EFUSE_CTRL+2, temp);
+
+ /* Write 0x30[31]= 0 */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ temp = Bytetemp & 0x7F;
+ rtw_write8(Adapter, EFUSE_CTRL+3, temp);
+
+ /* Wait Write-ready (0x30[31]= 1) */
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ while (!(Bytetemp & 0x80)) {
+ Bytetemp = rtw_read8(Adapter, EFUSE_CTRL+3);
+ k++;
+ if (k == 1000) {
+ k = 0;
+ break;
+ }
+ }
+ data = rtw_read8(Adapter, EFUSE_CTRL);
+ return data;
+ } else {
+ return 0xFF;
+ }
+
+} /* EFUSE_Read1Byte */
+
+/* 11/16/2008 MH Read one byte from real Efuse. */
+u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
+{
+ u8 tmpidx = 0;
+ u8 result;
+
+ if (pseudo) {
+ result = Efuse_Read1ByteFromFakeContent(pAdapter, addr, data);
+ return result;
+ }
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr & 0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2, ((u8)((addr>>8) & 0x03)) |
+ (rtw_read8(pAdapter, EFUSE_CTRL+2) & 0xFC));
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0x72);/* read cmd */
+
+ while (!(0x80 & rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ tmpidx++;
+ if (tmpidx < 100) {
+ *data = rtw_read8(pAdapter, EFUSE_CTRL);
+ result = true;
+ } else {
+ *data = 0xff;
+ result = false;
+ }
+ return result;
+}
+
+/* 11/16/2008 MH Write one byte to reald Efuse. */
+u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data, bool pseudo)
+{
+ u8 tmpidx = 0;
+ u8 result;
+
+ if (pseudo) {
+ result = Efuse_Write1ByteToFakeContent(pAdapter, addr, data);
+ return result;
+ }
+
+ /* -----------------e-fuse reg ctrl --------------------------------- */
+ /* address */
+ rtw_write8(pAdapter, EFUSE_CTRL+1, (u8)(addr&0xff));
+ rtw_write8(pAdapter, EFUSE_CTRL+2,
+ (rtw_read8(pAdapter, EFUSE_CTRL+2) & 0xFC) |
+ (u8)((addr>>8) & 0x03));
+ rtw_write8(pAdapter, EFUSE_CTRL, data);/* data */
+
+ rtw_write8(pAdapter, EFUSE_CTRL+3, 0xF2);/* write cmd */
+
+ while ((0x80 & rtw_read8(pAdapter, EFUSE_CTRL+3)) && (tmpidx < 100))
+ tmpidx++;
+
+ if (tmpidx < 100)
+ result = true;
+ else
+ result = false;
+
+ return result;
+}
+
+int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool pseudo)
+{
+ int ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketRead(pAdapter, offset, data, pseudo);
+
+ return ret;
+}
+
+int Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
+{
+ int ret;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketWrite(pAdapter, offset, word_en, data, pseudo);
+
+ return ret;
+}
+
+
+static int Efuse_PgPacketWrite_BT(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
+{
+ int ret;
+
+ ret = pAdapter->HalFunc.Efuse_PgPacketWrite_BT(pAdapter, offset, word_en, data, pseudo);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_WordEnableDataRead
+ *
+ * Overview: Read allowed word in current efuse section data.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/16/2008 MHC Create Version 0.
+ * 11/21/2008 MHC Fix Write bug when we only enable late word.
+ *
+ *---------------------------------------------------------------------------*/
+void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
+{
+ if (!(word_en&BIT(0))) {
+ targetdata[0] = sourdata[0];
+ targetdata[1] = sourdata[1];
+ }
+ if (!(word_en&BIT(1))) {
+ targetdata[2] = sourdata[2];
+ targetdata[3] = sourdata[3];
+ }
+ if (!(word_en&BIT(2))) {
+ targetdata[4] = sourdata[4];
+ targetdata[5] = sourdata[5];
+ }
+ if (!(word_en&BIT(3))) {
+ targetdata[6] = sourdata[6];
+ targetdata[7] = sourdata[7];
+ }
+}
+
+u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool pseudo)
+{
+ u8 ret = 0;
+
+ ret = pAdapter->HalFunc.Efuse_WordEnableDataWrite(pAdapter, efuse_addr, word_en, data, pseudo);
+
+ return ret;
+}
+
+static u8 efuse_read8(struct adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteRead(padapter, address, value, false);
+}
+
+static u8 efuse_write8(struct adapter *padapter, u16 address, u8 *value)
+{
+ return efuse_OneByteWrite(padapter, address, *value, false);
+}
+
+/*
+ * read/wirte raw efuse data
+ */
+u8 rtw_efuse_access(struct adapter *padapter, u8 write, u16 start_addr, u16 cnts, u8 *data)
+{
+ int i = 0;
+ u16 real_content_len = 0, max_available_size = 0;
+ u8 res = _FAIL ;
+ u8 (*rw8)(struct adapter *, u16, u8*);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&real_content_len, false);
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if (start_addr > real_content_len)
+ return _FAIL;
+
+ if (write) {
+ if ((start_addr + cnts) > max_available_size)
+ return _FAIL;
+ rw8 = &efuse_write8;
+ } else {
+ rw8 = &efuse_read8;
+ }
+
+ Efuse_PowerSwitch(padapter, write, true);
+
+ /* e-fuse one byte read / write */
+ for (i = 0; i < cnts; i++) {
+ if (start_addr >= real_content_len) {
+ res = _FAIL;
+ break;
+ }
+
+ res = rw8(padapter, start_addr++, data++);
+ if (_FAIL == res)
+ break;
+ }
+
+ Efuse_PowerSwitch(padapter, write, false);
+
+ return res;
+}
+/* */
+u16 efuse_GetMaxSize(struct adapter *padapter)
+{
+ u16 max_size;
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI , TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_size, false);
+ return max_size;
+}
+/* */
+u8 efuse_GetCurrentSize(struct adapter *padapter, u16 *size)
+{
+ Efuse_PowerSwitch(padapter, false, true);
+ *size = Efuse_GetCurrentSize(padapter, EFUSE_WIFI, false);
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+/* */
+u8 rtw_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_WIFI, addr, cnts, data, false);
+
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+
+u8 rtw_BT_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ Efuse_PowerSwitch(padapter, false, true);
+
+ efuse_ReadEFuse(padapter, EFUSE_BT, addr, cnts, data, false);
+
+ Efuse_PowerSwitch(padapter, false, false);
+
+ return _SUCCESS;
+}
+/* */
+u8 rtw_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u8 offset, word_en;
+ u8 *map;
+ u8 newdata[PGPKT_DATA_SIZE];
+ s32 i, idx;
+ u8 ret = _SUCCESS;
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ map = rtw_zmalloc(mapLen);
+ if (map == NULL)
+ return _FAIL;
+
+ ret = rtw_efuse_map_read(padapter, 0, mapLen, map);
+ if (ret == _FAIL)
+ goto exit;
+
+ Efuse_PowerSwitch(padapter, true, true);
+
+ offset = (addr >> 3);
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ i = addr & 0x7; /* index of one package */
+ idx = 0; /* data index */
+
+ if (i & 0x1) {
+ /* odd start */
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i-1] = map[addr+idx-1];
+ newdata[i] = data[idx];
+ }
+ i++;
+ idx++;
+ }
+ do {
+ for (; i < PGPKT_DATA_SIZE; i += 2) {
+ if (cnts == idx)
+ break;
+ if ((cnts - idx) == 1) {
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = map[addr+idx+1];
+ }
+ idx++;
+ break;
+ } else {
+ if ((data[idx] != map[addr+idx]) ||
+ (data[idx+1] != map[addr+idx+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = data[idx + 1];
+ }
+ idx += 2;
+ }
+ if (idx == cnts)
+ break;
+ }
+
+ if (word_en != 0xF) {
+ ret = Efuse_PgPacketWrite(padapter, offset, word_en, newdata, false);
+ DBG_88E("offset=%x\n", offset);
+ DBG_88E("word_en=%x\n", word_en);
+
+ for (i = 0; i < PGPKT_DATA_SIZE; i++)
+ DBG_88E("data=%x \t", newdata[i]);
+ if (ret == _FAIL)
+ break;
+ }
+
+ if (idx == cnts)
+ break;
+
+ offset++;
+ i = 0;
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ } while (1);
+
+ Efuse_PowerSwitch(padapter, true, false);
+exit:
+ kfree(map);
+ return ret;
+}
+
+/* */
+u8 rtw_BT_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
+{
+ u8 offset, word_en;
+ u8 *map;
+ u8 newdata[PGPKT_DATA_SIZE];
+ s32 i, idx;
+ u8 ret = _SUCCESS;
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
+
+ if ((addr + cnts) > mapLen)
+ return _FAIL;
+
+ map = rtw_zmalloc(mapLen);
+ if (map == NULL)
+ return _FAIL;
+
+ ret = rtw_BT_efuse_map_read(padapter, 0, mapLen, map);
+ if (ret == _FAIL)
+ goto exit;
+
+ Efuse_PowerSwitch(padapter, true, true);
+
+ offset = (addr >> 3);
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ i = addr & 0x7; /* index of one package */
+ idx = 0; /* data index */
+
+ if (i & 0x1) {
+ /* odd start */
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i-1] = map[addr+idx-1];
+ newdata[i] = data[idx];
+ }
+ i++;
+ idx++;
+ }
+ do {
+ for (; i < PGPKT_DATA_SIZE; i += 2) {
+ if (cnts == idx)
+ break;
+ if ((cnts - idx) == 1) {
+ if (data[idx] != map[addr+idx]) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = map[addr+idx+1];
+ }
+ idx++;
+ break;
+ } else {
+ if ((data[idx] != map[addr+idx]) ||
+ (data[idx+1] != map[addr+idx+1])) {
+ word_en &= ~BIT(i >> 1);
+ newdata[i] = data[idx];
+ newdata[i+1] = data[idx + 1];
+ }
+ idx += 2;
+ }
+ if (idx == cnts)
+ break;
+ }
+
+ if (word_en != 0xF) {
+ DBG_88E("%s: offset=%#X\n", __func__, offset);
+ DBG_88E("%s: word_en=%#X\n", __func__, word_en);
+ DBG_88E("%s: data=", __func__);
+ for (i = 0; i < PGPKT_DATA_SIZE; i++)
+ DBG_88E("0x%02X ", newdata[i]);
+ DBG_88E("\n");
+
+ ret = Efuse_PgPacketWrite_BT(padapter, offset, word_en, newdata, false);
+ if (ret == _FAIL)
+ break;
+ }
+
+ if (idx == cnts)
+ break;
+
+ offset++;
+ i = 0;
+ word_en = 0xF;
+ _rtw_memset(newdata, 0xFF, PGPKT_DATA_SIZE);
+ } while (1);
+
+ Efuse_PowerSwitch(padapter, true, false);
+
+exit:
+
+ kfree(map);
+
+ return ret;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: efuse_ShadowRead1Byte
+ * efuse_ShadowRead2Byte
+ * efuse_ShadowRead4Byte
+ *
+ * Overview: Read from efuse init map by one/two/four bytes !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+efuse_ShadowRead1Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u8 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+
+} /* EFUSE_ShadowRead1Byte */
+
+/* Read Two Bytes */
+static void
+efuse_ShadowRead2Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u16 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+
+} /* EFUSE_ShadowRead2Byte */
+
+/* Read Four Bytes */
+static void
+efuse_ShadowRead4Byte(
+ struct adapter *pAdapter,
+ u16 Offset,
+ u32 *Value)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+
+ *Value = pEEPROM->efuse_eeprom_data[Offset];
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+1]<<8;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+2]<<16;
+ *Value |= pEEPROM->efuse_eeprom_data[Offset+3]<<24;
+
+} /* efuse_ShadowRead4Byte */
+
+/*-----------------------------------------------------------------------------
+ * Function: Efuse_ReadAllMap
+ *
+ * Overview: Read All Efuse content
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/11/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse, bool pseudo)
+{
+ u16 mapLen = 0;
+
+ Efuse_PowerSwitch(pAdapter, false, true);
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+
+ efuse_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
+
+ Efuse_PowerSwitch(pAdapter, false, false);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowMapUpdate
+ *
+ * Overview: Transfer current EFUSE content to shadow init and modify map.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/13/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowMapUpdate(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool pseudo)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+ u16 mapLen = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+
+ if (pEEPROM->bautoload_fail_flag)
+ _rtw_memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
+ else
+ Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data, pseudo);
+} /* EFUSE_ShadowMapUpdate */
+
+/*-----------------------------------------------------------------------------
+ * Function: EFUSE_ShadowRead
+ *
+ * Overview: Read from efuse init map !!!!!
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void EFUSE_ShadowRead(struct adapter *pAdapter, u8 Type, u16 Offset, u32 *Value)
+{
+ if (Type == 1)
+ efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
+ else if (Type == 2)
+ efuse_ShadowRead2Byte(pAdapter, Offset, (u16 *)Value);
+ else if (Type == 4)
+ efuse_ShadowRead4Byte(pAdapter, Offset, (u32 *)Value);
+
+} /* EFUSE_ShadowRead */
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
new file mode 100644
index 00000000000..3605c5da822
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -0,0 +1,1640 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _IEEE80211_C
+
+#include <drv_types.h>
+#include <ieee80211.h>
+#include <wifi.h>
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+#include <usb_osintf.h>
+
+u8 RTW_WPA_OUI_TYPE[] = { 0x00, 0x50, 0xf2, 1 };
+u16 RTW_WPA_VERSION = 1;
+u8 WPA_AUTH_KEY_MGMT_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_NONE[] = { 0x00, 0x50, 0xf2, 0 };
+u8 WPA_CIPHER_SUITE_WEP40[] = { 0x00, 0x50, 0xf2, 1 };
+u8 WPA_CIPHER_SUITE_TKIP[] = { 0x00, 0x50, 0xf2, 2 };
+u8 WPA_CIPHER_SUITE_WRAP[] = { 0x00, 0x50, 0xf2, 3 };
+u8 WPA_CIPHER_SUITE_CCMP[] = { 0x00, 0x50, 0xf2, 4 };
+u8 WPA_CIPHER_SUITE_WEP104[] = { 0x00, 0x50, 0xf2, 5 };
+
+u16 RSN_VERSION_BSD = 1;
+u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_NONE[] = { 0x00, 0x0f, 0xac, 0 };
+u8 RSN_CIPHER_SUITE_WEP40[] = { 0x00, 0x0f, 0xac, 1 };
+u8 RSN_CIPHER_SUITE_TKIP[] = { 0x00, 0x0f, 0xac, 2 };
+u8 RSN_CIPHER_SUITE_WRAP[] = { 0x00, 0x0f, 0xac, 3 };
+u8 RSN_CIPHER_SUITE_CCMP[] = { 0x00, 0x0f, 0xac, 4 };
+u8 RSN_CIPHER_SUITE_WEP104[] = { 0x00, 0x0f, 0xac, 5 };
+/* */
+/* for adhoc-master to generate ie and provide supported-rate to fw */
+/* */
+
+static u8 WIFI_CCKRATES[] = {
+ (IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK),
+ (IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK)
+ };
+
+static u8 WIFI_OFDMRATES[] = {
+ (IEEE80211_OFDM_RATE_6MB),
+ (IEEE80211_OFDM_RATE_9MB),
+ (IEEE80211_OFDM_RATE_12MB),
+ (IEEE80211_OFDM_RATE_18MB),
+ (IEEE80211_OFDM_RATE_24MB),
+ IEEE80211_OFDM_RATE_36MB,
+ IEEE80211_OFDM_RATE_48MB,
+ IEEE80211_OFDM_RATE_54MB
+ };
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val)
+{
+ unsigned char dot11_rate_table[] = {
+ 2, 4, 11, 22, 12, 18, 24, 36, 48,
+ 72, 96, 108, 0}; /* last element must be zero!! */
+
+ int i = 0;
+ while (dot11_rate_table[i] != 0) {
+ if (dot11_rate_table[i] == val)
+ return BIT(i);
+ i++;
+ }
+ return 0;
+}
+
+uint rtw_is_cckrates_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ i++;
+ }
+ return false;
+}
+
+uint rtw_is_cckratesonly_included(u8 *rate)
+{
+ u32 i = 0;
+
+ while (rate[i] != 0) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ i++;
+ }
+
+ return true;
+}
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel)
+{
+ if (channel > 14) {
+ if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_INVALID;
+ else
+ return WIRELESS_11A;
+ } else { /* could be pure B, pure G, or B/G */
+ if ((rtw_is_cckratesonly_included(rate)) == true)
+ return WIRELESS_11B;
+ else if ((rtw_is_cckrates_included(rate)) == true)
+ return WIRELESS_11BG;
+ else
+ return WIRELESS_11G;
+ }
+}
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len, unsigned char *source,
+ unsigned int *frlen)
+{
+ memcpy((void *)pbuf, (void *)source, len);
+ *frlen = *frlen + len;
+ return pbuf + len;
+}
+
+/* rtw_set_ie will update frame length */
+u8 *rtw_set_ie
+(
+ u8 *pbuf,
+ int index,
+ uint len,
+ u8 *source,
+ uint *frlen /* frame length */
+)
+{
+_func_enter_;
+ *pbuf = (u8)index;
+
+ *(pbuf + 1) = (u8)len;
+
+ if (len > 0)
+ memcpy((void *)(pbuf + 2), (void *)source, len);
+
+ *frlen = *frlen + (len + 2);
+
+ return pbuf + len + 2;
+_func_exit_;
+}
+
+inline u8 *rtw_set_ie_ch_switch (u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+ u8 new_ch, u8 ch_switch_cnt)
+{
+ u8 ie_data[3];
+
+ ie_data[0] = ch_switch_mode;
+ ie_data[1] = new_ch;
+ ie_data[2] = ch_switch_cnt;
+ return rtw_set_ie(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
+}
+
+inline u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset)
+{
+ if (ch_offset == SCN)
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ else if (ch_offset == SCA)
+ return HAL_PRIME_CHNL_OFFSET_UPPER;
+ else if (ch_offset == SCB)
+ return HAL_PRIME_CHNL_OFFSET_LOWER;
+
+ return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+}
+
+inline u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset)
+{
+ if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ return SCN;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ return SCB;
+ else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ return SCA;
+
+ return SCN;
+}
+
+inline u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset)
+{
+ return rtw_set_ie(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET, 1, &secondary_ch_offset, buf_len);
+}
+
+inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
+ u8 flags, u16 reason, u16 precedence)
+{
+ u8 ie_data[6];
+
+ ie_data[0] = ttl;
+ ie_data[1] = flags;
+ RTW_PUT_LE16((u8 *)&ie_data[2], reason);
+ RTW_PUT_LE16((u8 *)&ie_data[4], precedence);
+
+ return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len);
+}
+
+/*----------------------------------------------------------------------------
+index: the information element id index, limit is the limit for search
+-----------------------------------------------------------------------------*/
+u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
+{
+ int tmp, i;
+ u8 *p;
+_func_enter_;
+ if (limit < 1) {
+ _func_exit_;
+ return NULL;
+ }
+
+ p = pbuf;
+ i = 0;
+ *len = 0;
+ while (1) {
+ if (*p == index) {
+ *len = *(p + 1);
+ return p;
+ } else {
+ tmp = *(p + 1);
+ p += (tmp + 2);
+ i += (tmp + 2);
+ }
+ if (i >= limit)
+ break;
+ }
+_func_exit_;
+ return NULL;
+}
+
+/**
+ * rtw_get_ie_ex - Search specific IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
+ * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
+ *
+ * Returns: The address of the specific IE found, or NULL
+ */
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
+{
+ uint cnt;
+ u8 *target_ie = NULL;
+
+
+ if (ielen)
+ *ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return target_ie;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ if (eid == in_ie[cnt] && (!oui || _rtw_memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ target_ie = &in_ie[cnt];
+
+ if (ie)
+ memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (ielen)
+ *ielen = in_ie[cnt+1]+2;
+
+ break;
+ } else {
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+ }
+ return target_ie;
+}
+
+/**
+ * rtw_ies_remove_ie - Find matching IEs and remove
+ * @ies: Address of IEs to search
+ * @ies_len: Pointer of length of ies, will update to new length
+ * @offset: The offset to start scarch
+ * @eid: Element ID to match
+ * @oui: OUI to match
+ * @oui_len: OUI length
+ *
+ * Returns: _SUCCESS: ies is updated, _FAIL: not updated
+ */
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
+{
+ int ret = _FAIL;
+ u8 *target_ie;
+ u32 target_ielen;
+ u8 *start;
+ uint search_len;
+
+ if (!ies || !ies_len || *ies_len <= offset)
+ goto exit;
+
+ start = ies + offset;
+ search_len = *ies_len - offset;
+
+ while (1) {
+ target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
+ if (target_ie && target_ielen) {
+ u8 buf[MAX_IE_SZ] = {0};
+ u8 *remain_ies = target_ie + target_ielen;
+ uint remain_len = search_len - (remain_ies - start);
+
+ memcpy(buf, remain_ies, remain_len);
+ memcpy(target_ie, buf, remain_len);
+ *ies_len = *ies_len - target_ielen;
+ ret = _SUCCESS;
+
+ start = target_ie;
+ search_len = remain_len;
+ } else {
+ break;
+ }
+ }
+exit:
+ return ret;
+}
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
+{
+_func_enter_;
+
+ _rtw_memset(SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ switch (mode) {
+ case WIRELESS_11B:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11A:
+ case WIRELESS_11_5N:
+ case WIRELESS_11A_5N:/* Todo: no basic rate for ofdm ? */
+ memcpy(SupportedRates, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+ case WIRELESS_11BG:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11_24N:
+ case WIRELESS_11BG_24N:
+ memcpy(SupportedRates, WIFI_CCKRATES, IEEE80211_CCK_RATE_LEN);
+ memcpy(SupportedRates + IEEE80211_CCK_RATE_LEN, WIFI_OFDMRATES, IEEE80211_NUM_OFDM_RATESLEN);
+ break;
+ }
+_func_exit_;
+}
+
+uint rtw_get_rateset_len(u8 *rateset)
+{
+ uint i = 0;
+_func_enter_;
+ while (1) {
+ if ((rateset[i]) == 0)
+ break;
+ if (i > 12)
+ break;
+ i++;
+ }
+_func_exit_;
+ return i;
+}
+
+int rtw_generate_ie(struct registry_priv *pregistrypriv)
+{
+ u8 wireless_mode;
+ int sz = 0, rateLen;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *ie = pdev_network->IEs;
+
+_func_enter_;
+
+ /* timestamp will be inserted by hardware */
+ sz += 8;
+ ie += sz;
+
+ /* beacon interval : 2bytes */
+ *(__le16 *)ie = cpu_to_le16((u16)pdev_network->Configuration.BeaconPeriod);/* BCN_INTERVAL; */
+ sz += 2;
+ ie += 2;
+
+ /* capability info */
+ *(u16 *)ie = 0;
+
+ *(__le16 *)ie |= cpu_to_le16(cap_IBSS);
+
+ if (pregistrypriv->preamble == PREAMBLE_SHORT)
+ *(__le16 *)ie |= cpu_to_le16(cap_ShortPremble);
+
+ if (pdev_network->Privacy)
+ *(__le16 *)ie |= cpu_to_le16(cap_Privacy);
+
+ sz += 2;
+ ie += 2;
+
+ /* SSID */
+ ie = rtw_set_ie(ie, _SSID_IE_, pdev_network->Ssid.SsidLength, pdev_network->Ssid.Ssid, &sz);
+
+ /* supported rates */
+ if (pregistrypriv->wireless_mode == WIRELESS_11ABGN) {
+ if (pdev_network->Configuration.DSConfig > 14)
+ wireless_mode = WIRELESS_11A_5N;
+ else
+ wireless_mode = WIRELESS_11BG_24N;
+ } else {
+ wireless_mode = pregistrypriv->wireless_mode;
+ }
+
+ rtw_set_supported_rate(pdev_network->SupportedRates, wireless_mode);
+
+ rateLen = rtw_get_rateset_len(pdev_network->SupportedRates);
+
+ if (rateLen > 8) {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, 8, pdev_network->SupportedRates, &sz);
+ /* ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz); */
+ } else {
+ ie = rtw_set_ie(ie, _SUPPORTEDRATES_IE_, rateLen, pdev_network->SupportedRates, &sz);
+ }
+
+ /* DS parameter set */
+ ie = rtw_set_ie(ie, _DSSET_IE_, 1, (u8 *)&(pdev_network->Configuration.DSConfig), &sz);
+
+ /* IBSS Parameter Set */
+
+ ie = rtw_set_ie(ie, _IBSS_PARA_IE_, 2, (u8 *)&(pdev_network->Configuration.ATIMWindow), &sz);
+
+ if (rateLen > 8)
+ ie = rtw_set_ie(ie, _EXT_SUPPORTEDRATES_IE_, (rateLen - 8), (pdev_network->SupportedRates + 8), &sz);
+_func_exit_;
+
+ return sz;
+}
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit)
+{
+ int len;
+ u16 val16;
+ __le16 le_tmp;
+ unsigned char wpa_oui_type[] = {0x00, 0x50, 0xf2, 0x01};
+ u8 *pbuf = pie;
+ int limit_new = limit;
+
+ while (1) {
+ pbuf = rtw_get_ie(pbuf, _WPA_IE_ID_, &len, limit_new);
+
+ if (pbuf) {
+ /* check if oui matches... */
+ if (_rtw_memcmp((pbuf + 2), wpa_oui_type, sizeof (wpa_oui_type)) == false)
+ goto check_next_ie;
+
+ /* check version... */
+ memcpy((u8 *)&le_tmp, (pbuf + 6), sizeof(val16));
+
+ val16 = le16_to_cpu(le_tmp);
+ if (val16 != 0x0001)
+ goto check_next_ie;
+ *wpa_ie_len = *(pbuf + 1);
+ return pbuf;
+ } else {
+ *wpa_ie_len = 0;
+ return NULL;
+ }
+
+check_next_ie:
+ limit_new = limit - (pbuf - pie) - 2 - len;
+ if (limit_new <= 0)
+ break;
+ pbuf += (2 + len);
+ }
+ *wpa_ie_len = 0;
+ return NULL;
+}
+
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit)
+{
+
+ return rtw_get_ie(pie, _WPA2_IE_ID_, rsn_ie_len, limit);
+}
+
+int rtw_get_wpa_cipher_suite(u8 *s)
+{
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_NONE, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_NONE;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP40, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP40;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_TKIP, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_TKIP;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_CCMP, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_CCMP;
+ if (_rtw_memcmp(s, WPA_CIPHER_SUITE_WEP104, WPA_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+int rtw_get_wpa2_cipher_suite(u8 *s)
+{
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_NONE, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_NONE;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP40, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP40;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_TKIP, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_TKIP;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_CCMP, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_CCMP;
+ if (_rtw_memcmp(s, RSN_CIPHER_SUITE_WEP104, RSN_SELECTOR_LEN) == true)
+ return WPA_CIPHER_WEP104;
+
+ return 0;
+}
+
+
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x50, 0xf2, 1};
+
+ if (wpa_ie_len <= 0) {
+ /* No WPA IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*wpa_ie != _WPA_IE_ID_) || (*(wpa_ie+1) != (u8)(wpa_ie_len - 2)) ||
+ (_rtw_memcmp(wpa_ie+2, RTW_WPA_OUI_TYPE, WPA_SELECTOR_LEN) != true))
+ return _FAIL;
+
+ pos = wpa_ie;
+
+ pos += 8;
+ left = wpa_ie_len - 8;
+
+
+ /* group_cipher */
+ if (left >= WPA_SELECTOR_LEN) {
+ *group_cipher = rtw_get_wpa_cipher_suite(pos);
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * WPA_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa_cipher_suite(pos);
+
+ pos += WPA_SELECTOR_LEN;
+ left -= WPA_SELECTOR_LEN;
+ }
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s : there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+int rtw_parse_wpa2_ie(u8 *rsn_ie, int rsn_ie_len, int *group_cipher, int *pairwise_cipher, int *is_8021x)
+{
+ int i, ret = _SUCCESS;
+ int left, count;
+ u8 *pos;
+ u8 SUITE_1X[4] = {0x00, 0x0f, 0xac, 0x01};
+
+ if (rsn_ie_len <= 0) {
+ /* No RSN IE - fail silently */
+ return _FAIL;
+ }
+
+
+ if ((*rsn_ie != _WPA2_IE_ID_) || (*(rsn_ie+1) != (u8)(rsn_ie_len - 2)))
+ return _FAIL;
+
+ pos = rsn_ie;
+ pos += 4;
+ left = rsn_ie_len - 4;
+
+ /* group_cipher */
+ if (left >= RSN_SELECTOR_LEN) {
+ *group_cipher = rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+
+ } else if (left > 0) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie length mismatch, %u too much", __func__, left));
+ return _FAIL;
+ }
+
+ /* pairwise_cipher */
+ if (left >= 2) {
+ count = RTW_GET_LE16(pos);
+ pos += 2;
+ left -= 2;
+
+ if (count == 0 || left < count * RSN_SELECTOR_LEN) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie count botch (pairwise), "
+ "count %u left %u", __func__, count, left));
+ return _FAIL;
+ }
+
+ for (i = 0; i < count; i++) {
+ *pairwise_cipher |= rtw_get_wpa2_cipher_suite(pos);
+
+ pos += RSN_SELECTOR_LEN;
+ left -= RSN_SELECTOR_LEN;
+ }
+
+ } else if (left == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s: ie too short (for key mgmt)", __func__));
+
+ return _FAIL;
+ }
+
+ if (is_8021x) {
+ if (left >= 6) {
+ pos += 2;
+ if (_rtw_memcmp(pos, SUITE_1X, 4) == 1) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s (): there has 802.1x auth\n", __func__));
+ *is_8021x = 1;
+ }
+ }
+ }
+ return ret;
+}
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len, u8 *wpa_ie, u16 *wpa_len)
+{
+ u8 authmode, sec_idx, i;
+ u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01};
+ uint cnt;
+
+_func_enter_;
+
+ /* Search required WPA or WPA2 IE and copy to sec_ie[] */
+
+ cnt = (_TIMESTAMP_ + _BEACON_ITERVAL_ + _CAPABILITY_);
+
+ sec_idx = 0;
+
+ while (cnt < in_len) {
+ authmode = in_ie[cnt];
+
+ if ((authmode == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n rtw_get_wpa_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
+ sec_idx, in_ie[cnt+1]+2));
+
+ if (wpa_ie) {
+ memcpy(wpa_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ wpa_ie[i], wpa_ie[i+1], wpa_ie[i+2], wpa_ie[i+3], wpa_ie[i+4],
+ wpa_ie[i+5], wpa_ie[i+6], wpa_ie[i+7]));
+ }
+ }
+
+ *wpa_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else {
+ if (authmode == _WPA2_IE_ID_) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n get_rsn_ie: sec_idx =%d in_ie[cnt+1]+2 =%d\n",
+ sec_idx, in_ie[cnt+1]+2));
+
+ if (rsn_ie) {
+ memcpy(rsn_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ for (i = 0; i < (in_ie[cnt+1]+2); i += 8) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\n %2x,%2x,%2x,%2x,%2x,%2x,%2x,%2x\n",
+ rsn_ie[i], rsn_ie[i+1], rsn_ie[i+2], rsn_ie[i+3], rsn_ie[i+4],
+ rsn_ie[i+5], rsn_ie[i+6], rsn_ie[i+7]));
+ }
+ }
+
+ *rsn_len = in_ie[cnt+1]+2;
+ cnt += in_ie[cnt+1]+2; /* get next */
+ } else {
+ cnt += in_ie[cnt+1]+2; /* get next */
+ }
+ }
+ }
+
+_func_exit_;
+
+ return *rsn_len + *wpa_len;
+}
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen)
+{
+ u8 match = false;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (ie_ptr == NULL)
+ return match;
+
+ eid = ie_ptr[0];
+
+ if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&ie_ptr[2], wps_oui, 4))) {
+ *wps_ielen = ie_ptr[1]+2;
+ match = true;
+ }
+ return match;
+}
+
+/**
+ * rtw_get_wps_ie - Search WPS IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @wps_ie: If not NULL and WPS IE is found, WPS IE will be copied to the buf starting from wps_ie
+ * @wps_ielen: If not NULL and WPS IE is found, will set to the length of the entire WPS IE
+ *
+ * Returns: The address of the WPS IE found, or NULL
+ */
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen)
+{
+ uint cnt;
+ u8 *wpsie_ptr = NULL;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if (wps_ielen)
+ *wps_ielen = 0;
+
+ if (!in_ie || in_len <= 0)
+ return wpsie_ptr;
+
+ cnt = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+
+ if ((eid == _WPA_IE_ID_) && (_rtw_memcmp(&in_ie[cnt+2], wps_oui, 4))) {
+ wpsie_ptr = &in_ie[cnt];
+
+ if (wps_ie)
+ memcpy(wps_ie, &in_ie[cnt], in_ie[cnt+1]+2);
+
+ if (wps_ielen)
+ *wps_ielen = in_ie[cnt+1]+2;
+
+ cnt += in_ie[cnt+1]+2;
+
+ break;
+ } else {
+ cnt += in_ie[cnt+1]+2; /* goto next */
+ }
+ }
+ return wpsie_ptr;
+}
+
+/**
+ * rtw_get_wps_attr - Search a specific WPS attribute from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_attr: If not NULL and the WPS attribute is found, WPS attribute will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the WPS attribute is found, will set to the length of the entire WPS attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 wps_oui[4] = {0x00, 0x50, 0xF2, 0x04};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if ((wps_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ (_rtw_memcmp(wps_ie + 2, wps_oui , 4) != true))
+ return attr_ptr;
+
+ /* 6 = 1(Element ID) + 1(Length) + 4(WPS OUI) */
+ attr_ptr = wps_ie + 6; /* goto first attr */
+
+ while (attr_ptr - wps_ie < wps_ielen) {
+ /* 4 = 2(Attribute ID) + 2(Length) */
+ u16 attr_id = RTW_GET_BE16(attr_ptr);
+ u16 attr_data_len = RTW_GET_BE16(attr_ptr + 2);
+ u16 attr_len = attr_data_len + 4;
+
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+ if (len_attr)
+ *len_attr = attr_len;
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_wps_attr_content - Search a specific WPS attribute content from a given WPS IE
+ * @wps_ie: Address of WPS IE to search
+ * @wps_ielen: Length limit from wps_ie
+ * @target_attr_id: The attribute ID of WPS attribute to search
+ * @buf_content: If not NULL and the WPS attribute is found, WPS attribute content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the WPS attribute is found, will set to the length of the WPS attribute content
+ *
+ * Returns: the address of the specific WPS attribute content found, or NULL
+ */
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id , u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_wps_attr(wps_ie, wps_ielen, target_attr_id, NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr+4, attr_len-4);
+
+ if (len_content)
+ *len_content = attr_len-4;
+
+ return attr_ptr+4;
+ }
+
+ return NULL;
+}
+
+static int rtw_ieee802_11_parse_vendor_specific(u8 *pos, uint elen,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ unsigned int oui;
+
+ /* first 3 bytes in vendor specific information element are the IEEE
+ * OUI of the vendor. The following byte is used a vendor specific
+ * sub-type. */
+ if (elen < 4) {
+ if (show_errors) {
+ DBG_88E("short vendor specific information element ignored (len=%lu)\n",
+ (unsigned long) elen);
+ }
+ return -1;
+ }
+
+ oui = RTW_GET_BE24(pos);
+ switch (oui) {
+ case OUI_MICROSOFT:
+ /* Microsoft/Wi-Fi information elements are further typed and
+ * subtyped */
+ switch (pos[3]) {
+ case 1:
+ /* Microsoft OUI (00:50:F2) with OUI Type 1:
+ * real WPA information element */
+ elems->wpa_ie = pos;
+ elems->wpa_ie_len = elen;
+ break;
+ case WME_OUI_TYPE: /* this is a Wi-Fi WME info. element */
+ if (elen < 5) {
+ DBG_88E("short WME information element ignored (len=%lu)\n",
+ (unsigned long) elen);
+ return -1;
+ }
+ switch (pos[4]) {
+ case WME_OUI_SUBTYPE_INFORMATION_ELEMENT:
+ case WME_OUI_SUBTYPE_PARAMETER_ELEMENT:
+ elems->wme = pos;
+ elems->wme_len = elen;
+ break;
+ case WME_OUI_SUBTYPE_TSPEC_ELEMENT:
+ elems->wme_tspec = pos;
+ elems->wme_tspec_len = elen;
+ break;
+ default:
+ DBG_88E("unknown WME information element ignored (subtype=%d len=%lu)\n",
+ pos[4], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ case 4:
+ /* Wi-Fi Protected Setup (WPS) IE */
+ elems->wps_ie = pos;
+ elems->wps_ie_len = elen;
+ break;
+ default:
+ DBG_88E("Unknown Microsoft information element ignored (type=%d len=%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+
+ case OUI_BROADCOM:
+ switch (pos[3]) {
+ case VENDOR_HT_CAPAB_OUI_TYPE:
+ elems->vendor_ht_cap = pos;
+ elems->vendor_ht_cap_len = elen;
+ break;
+ default:
+ DBG_88E("Unknown Broadcom information element ignored (type=%d len=%lu)\n",
+ pos[3], (unsigned long) elen);
+ return -1;
+ }
+ break;
+ default:
+ DBG_88E("unknown vendor specific information element ignored (vendor OUI %02x:%02x:%02x len=%lu)\n",
+ pos[0], pos[1], pos[2], (unsigned long) elen);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * ieee802_11_parse_elems - Parse information elements in management frames
+ * @start: Pointer to the start of IEs
+ * @len: Length of IE buffer in octets
+ * @elems: Data structure for parsed elements
+ * @show_errors: Whether to show parsing errors in debug log
+ * Returns: Parsing result
+ */
+enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors)
+{
+ uint left = len;
+ u8 *pos = start;
+ int unknown = 0;
+
+ _rtw_memset(elems, 0, sizeof(*elems));
+
+ while (left >= 2) {
+ u8 id, elen;
+
+ id = *pos++;
+ elen = *pos++;
+ left -= 2;
+
+ if (elen > left) {
+ if (show_errors) {
+ DBG_88E("IEEE 802.11 element parse failed (id=%d elen=%d left=%lu)\n",
+ id, elen, (unsigned long) left);
+ }
+ return ParseFailed;
+ }
+
+ switch (id) {
+ case WLAN_EID_SSID:
+ elems->ssid = pos;
+ elems->ssid_len = elen;
+ break;
+ case WLAN_EID_SUPP_RATES:
+ elems->supp_rates = pos;
+ elems->supp_rates_len = elen;
+ break;
+ case WLAN_EID_FH_PARAMS:
+ elems->fh_params = pos;
+ elems->fh_params_len = elen;
+ break;
+ case WLAN_EID_DS_PARAMS:
+ elems->ds_params = pos;
+ elems->ds_params_len = elen;
+ break;
+ case WLAN_EID_CF_PARAMS:
+ elems->cf_params = pos;
+ elems->cf_params_len = elen;
+ break;
+ case WLAN_EID_TIM:
+ elems->tim = pos;
+ elems->tim_len = elen;
+ break;
+ case WLAN_EID_IBSS_PARAMS:
+ elems->ibss_params = pos;
+ elems->ibss_params_len = elen;
+ break;
+ case WLAN_EID_CHALLENGE:
+ elems->challenge = pos;
+ elems->challenge_len = elen;
+ break;
+ case WLAN_EID_ERP_INFO:
+ elems->erp_info = pos;
+ elems->erp_info_len = elen;
+ break;
+ case WLAN_EID_EXT_SUPP_RATES:
+ elems->ext_supp_rates = pos;
+ elems->ext_supp_rates_len = elen;
+ break;
+ case WLAN_EID_VENDOR_SPECIFIC:
+ if (rtw_ieee802_11_parse_vendor_specific(pos, elen, elems, show_errors))
+ unknown++;
+ break;
+ case WLAN_EID_RSN:
+ elems->rsn_ie = pos;
+ elems->rsn_ie_len = elen;
+ break;
+ case WLAN_EID_PWR_CAPABILITY:
+ elems->power_cap = pos;
+ elems->power_cap_len = elen;
+ break;
+ case WLAN_EID_SUPPORTED_CHANNELS:
+ elems->supp_channels = pos;
+ elems->supp_channels_len = elen;
+ break;
+ case WLAN_EID_MOBILITY_DOMAIN:
+ elems->mdie = pos;
+ elems->mdie_len = elen;
+ break;
+ case WLAN_EID_FAST_BSS_TRANSITION:
+ elems->ftie = pos;
+ elems->ftie_len = elen;
+ break;
+ case WLAN_EID_TIMEOUT_INTERVAL:
+ elems->timeout_int = pos;
+ elems->timeout_int_len = elen;
+ break;
+ case WLAN_EID_HT_CAP:
+ elems->ht_capabilities = pos;
+ elems->ht_capabilities_len = elen;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ elems->ht_operation = pos;
+ elems->ht_operation_len = elen;
+ break;
+ default:
+ unknown++;
+ if (!show_errors)
+ break;
+ DBG_88E("IEEE 802.11 element parse ignored unknown element (id=%d elen=%d)\n",
+ id, elen);
+ break;
+ }
+ left -= elen;
+ pos += elen;
+ }
+ if (left)
+ return ParseFailed;
+ return unknown ? ParseUnknown : ParseOK;
+}
+
+u8 key_char2num(u8 ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ else if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ else if ((ch >= 'A') && (ch <= 'F'))
+ return ch - 'A' + 10;
+ else
+ return 0xff;
+}
+
+u8 str_2char2num(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) * 10) + key_char2num(lch);
+}
+
+u8 key_2char2num(u8 hch, u8 lch)
+{
+ return (key_char2num(hch) << 4) | key_char2num(lch);
+}
+
+void rtw_macaddr_cfg(u8 *mac_addr)
+{
+ u8 mac[ETH_ALEN];
+ if (mac_addr == NULL)
+ return;
+
+ if (rtw_initmac) { /* Users specify the mac address */
+ int jj, kk;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ mac[jj] = key_2char2num(rtw_initmac[kk], rtw_initmac[kk + 1]);
+ memcpy(mac_addr, mac, ETH_ALEN);
+ } else { /* Use the mac address stored in the Efuse */
+ memcpy(mac, mac_addr, ETH_ALEN);
+ }
+
+ if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
+ (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
+ ((mac[0] == 0x0) && (mac[1] == 0x0) && (mac[2] == 0x0) &&
+ (mac[3] == 0x0) && (mac[4] == 0x0) && (mac[5] == 0x0))) {
+ mac[0] = 0x00;
+ mac[1] = 0xe0;
+ mac[2] = 0x4c;
+ mac[3] = 0x87;
+ mac[4] = 0x00;
+ mac[5] = 0x00;
+ /* use default mac addresss */
+ memcpy(mac_addr, mac, ETH_ALEN);
+ DBG_88E("MAC Address from efuse error, assign default one !!!\n");
+ }
+
+ DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", (mac_addr));
+}
+
+void dump_ies(u8 *buf, u32 buf_len)
+{
+ u8 *pos = (u8 *)buf;
+ u8 id, len;
+
+ while (pos-buf <= buf_len) {
+ id = *pos;
+ len = *(pos+1);
+
+ DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
+ #ifdef CONFIG_88EU_P2P
+ dump_p2p_ie(pos, len);
+ #endif
+ dump_wps_ie(pos, len);
+
+ pos += (2 + len);
+ }
+}
+
+void dump_wps_ie(u8 *ie, u32 ie_len)
+{
+ u8 *pos = (u8 *)ie;
+ u16 id;
+ u16 len;
+ u8 *wps_ie;
+ uint wps_ielen;
+
+ wps_ie = rtw_get_wps_ie(ie, ie_len, NULL, &wps_ielen);
+ if (wps_ie != ie || wps_ielen == 0)
+ return;
+
+ pos += 6;
+ while (pos-ie < ie_len) {
+ id = RTW_GET_BE16(pos);
+ len = RTW_GET_BE16(pos + 2);
+ DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
+ pos += (4+len);
+ }
+}
+
+#ifdef CONFIG_88EU_P2P
+void dump_p2p_ie(u8 *ie, u32 ie_len)
+{
+ u8 *pos = (u8 *)ie;
+ u8 id;
+ u16 len;
+ u8 *p2p_ie;
+ uint p2p_ielen;
+
+ p2p_ie = rtw_get_p2p_ie(ie, ie_len, NULL, &p2p_ielen);
+ if (p2p_ie != ie || p2p_ielen == 0)
+ return;
+
+ pos += 6;
+ while (pos-ie < ie_len) {
+ id = *pos;
+ len = RTW_GET_LE16(pos+1);
+ DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
+ pos += (3+len);
+ }
+}
+
+/**
+ * rtw_get_p2p_ie - Search P2P IE from a series of IEs
+ * @in_ie: Address of IEs to search
+ * @in_len: Length limit from in_ie
+ * @p2p_ie: If not NULL and P2P IE is found, P2P IE will be copied to the buf starting from p2p_ie
+ * @p2p_ielen: If not NULL and P2P IE is found, will set to the length of the entire P2P IE
+ *
+ * Returns: The address of the P2P IE found, or NULL
+ */
+u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen)
+{
+ uint cnt = 0;
+ u8 *p2p_ie_ptr;
+ u8 eid, p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
+
+ if (p2p_ielen != NULL)
+ *p2p_ielen = 0;
+
+ while (cnt < in_len) {
+ eid = in_ie[cnt];
+ if ((in_len < 0) || (cnt > MAX_IE_SZ)) {
+ dump_stack();
+ return NULL;
+ }
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (_rtw_memcmp(&in_ie[cnt+2], p2p_oui, 4) == true)) {
+ p2p_ie_ptr = in_ie + cnt;
+
+ if (p2p_ie != NULL)
+ memcpy(p2p_ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
+ if (p2p_ielen != NULL)
+ *p2p_ielen = in_ie[cnt + 1] + 2;
+ return p2p_ie_ptr;
+ } else {
+ cnt += in_ie[cnt + 1] + 2; /* goto next */
+ }
+ }
+ return NULL;
+}
+
+/**
+ * rtw_get_p2p_attr - Search a specific P2P attribute from a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_attr: If not NULL and the P2P attribute is found, P2P attribute will be copied to the buf starting from buf_attr
+ * @len_attr: If not NULL and the P2P attribute is found, will set to the length of the entire P2P attribute
+ *
+ * Returns: the address of the specific WPS attribute found, or NULL
+ */
+u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_attr, u32 *len_attr)
+{
+ u8 *attr_ptr = NULL;
+ u8 *target_attr_ptr = NULL;
+ u8 p2p_oui[4] = {0x50, 0x6F, 0x9A, 0x09};
+
+ if (len_attr)
+ *len_attr = 0;
+
+ if (!p2p_ie || (p2p_ie[0] != _VENDOR_SPECIFIC_IE_) ||
+ (_rtw_memcmp(p2p_ie + 2, p2p_oui , 4) != true))
+ return attr_ptr;
+
+ /* 6 = 1(Element ID) + 1(Length) + 3 (OUI) + 1(OUI Type) */
+ attr_ptr = p2p_ie + 6; /* goto first attr */
+
+ while (attr_ptr - p2p_ie < p2p_ielen) {
+ /* 3 = 1(Attribute ID) + 2(Length) */
+ u8 attr_id = *attr_ptr;
+ u16 attr_data_len = RTW_GET_LE16(attr_ptr + 1);
+ u16 attr_len = attr_data_len + 3;
+
+ if (attr_id == target_attr_id) {
+ target_attr_ptr = attr_ptr;
+
+ if (buf_attr)
+ memcpy(buf_attr, attr_ptr, attr_len);
+ if (len_attr)
+ *len_attr = attr_len;
+ break;
+ } else {
+ attr_ptr += attr_len; /* goto next */
+ }
+ }
+ return target_attr_ptr;
+}
+
+/**
+ * rtw_get_p2p_attr_content - Search a specific P2P attribute content from a given P2P IE
+ * @p2p_ie: Address of P2P IE to search
+ * @p2p_ielen: Length limit from p2p_ie
+ * @target_attr_id: The attribute ID of P2P attribute to search
+ * @buf_content: If not NULL and the P2P attribute is found, P2P attribute content will be copied to the buf starting from buf_content
+ * @len_content: If not NULL and the P2P attribute is found, will set to the length of the P2P attribute content
+ *
+ * Returns: the address of the specific P2P attribute content found, or NULL
+ */
+u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id , u8 *buf_content, uint *len_content)
+{
+ u8 *attr_ptr;
+ u32 attr_len;
+
+ if (len_content)
+ *len_content = 0;
+
+ attr_ptr = rtw_get_p2p_attr(p2p_ie, p2p_ielen, target_attr_id, NULL, &attr_len);
+
+ if (attr_ptr && attr_len) {
+ if (buf_content)
+ memcpy(buf_content, attr_ptr+3, attr_len-3);
+
+ if (len_content)
+ *len_content = attr_len-3;
+
+ return attr_ptr+3;
+ }
+
+ return NULL;
+}
+
+u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len, u8 *pdata_attr)
+{
+ u32 a_len;
+
+ *pbuf = attr_id;
+
+ /* u16*)(pbuf + 1) = cpu_to_le16(attr_len); */
+ RTW_PUT_LE16(pbuf + 1, attr_len);
+
+ if (pdata_attr)
+ memcpy(pbuf + 3, pdata_attr, attr_len);
+
+ a_len = attr_len + 3;
+
+ return a_len;
+}
+
+static uint rtw_p2p_attr_remove(u8 *ie, uint ielen_ori, u8 attr_id)
+{
+ u8 *target_attr;
+ u32 target_attr_len;
+ uint ielen = ielen_ori;
+
+ while (1) {
+ target_attr = rtw_get_p2p_attr(ie, ielen, attr_id, NULL, &target_attr_len);
+ if (target_attr && target_attr_len) {
+ u8 *next_attr = target_attr+target_attr_len;
+ uint remain_len = ielen-(next_attr-ie);
+
+ _rtw_memset(target_attr, 0, target_attr_len);
+ memcpy(target_attr, next_attr, remain_len);
+ _rtw_memset(target_attr+remain_len, 0, target_attr_len);
+ *(ie+1) -= target_attr_len;
+ ielen -= target_attr_len;
+ } else {
+ break;
+ }
+ }
+ return ielen;
+}
+
+void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex, u8 attr_id)
+{
+ u8 *p2p_ie;
+ uint p2p_ielen, p2p_ielen_ori;
+
+ p2p_ie = rtw_get_p2p_ie(bss_ex->IEs+_FIXED_IE_LENGTH_, bss_ex->IELength-_FIXED_IE_LENGTH_, NULL, &p2p_ielen_ori);
+ if (p2p_ie) {
+ p2p_ielen = rtw_p2p_attr_remove(p2p_ie, p2p_ielen_ori, attr_id);
+ if (p2p_ielen != p2p_ielen_ori) {
+ u8 *next_ie_ori = p2p_ie+p2p_ielen_ori;
+ u8 *next_ie = p2p_ie+p2p_ielen;
+ uint remain_len = bss_ex->IELength-(next_ie_ori-bss_ex->IEs);
+
+ memcpy(next_ie, next_ie_ori, remain_len);
+ _rtw_memset(next_ie+remain_len, 0, p2p_ielen_ori-p2p_ielen);
+ bss_ex->IELength -= p2p_ielen_ori-p2p_ielen;
+ }
+ }
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+/* Baron adds to avoid FreeBSD warning */
+int ieee80211_is_empty_essid(const char *essid, int essid_len)
+{
+ /* Single white space is for Linksys APs */
+ if (essid_len == 1 && essid[0] == ' ')
+ return 1;
+
+ /* Otherwise, if the entire essid is 0, we assume it is hidden */
+ while (essid_len) {
+ essid_len--;
+ if (essid[essid_len] != '\0')
+ return 0;
+ }
+
+ return 1;
+}
+
+int ieee80211_get_hdrlen(u16 fc)
+{
+ int hdrlen = 24;
+
+ switch (WLAN_FC_GET_TYPE(fc)) {
+ case RTW_IEEE80211_FTYPE_DATA:
+ if (fc & RTW_IEEE80211_STYPE_QOS_DATA)
+ hdrlen += 2;
+ if ((fc & RTW_IEEE80211_FCTL_FROMDS) && (fc & RTW_IEEE80211_FCTL_TODS))
+ hdrlen += 6; /* Addr4 */
+ break;
+ case RTW_IEEE80211_FTYPE_CTL:
+ switch (WLAN_FC_GET_STYPE(fc)) {
+ case RTW_IEEE80211_STYPE_CTS:
+ case RTW_IEEE80211_STYPE_ACK:
+ hdrlen = 10;
+ break;
+ default:
+ hdrlen = 16;
+ break;
+ }
+ break;
+ }
+
+ return hdrlen;
+}
+
+static int rtw_get_cipher_info(struct wlan_network *pnetwork)
+{
+ u32 wpa_ielen;
+ unsigned char *pbuf;
+ int group_cipher = 0, pairwise_cipher = 0, is8021x = 0;
+ int ret = _FAIL;
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_cipher_info: wpa_ielen: %d", wpa_ielen));
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d, is_8021x is %d",
+ __func__, pnetwork->BcnInfo.pairwise_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE\n"));
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("get RSN IE OK!!!\n"));
+ pnetwork->BcnInfo.pairwise_cipher = pairwise_cipher;
+ pnetwork->BcnInfo.group_cipher = group_cipher;
+ pnetwork->BcnInfo.is_8021x = is8021x;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s: pnetwork->pairwise_cipher: %d,"
+ "pnetwork->group_cipher is %d, is_8021x is %d", __func__, pnetwork->BcnInfo.pairwise_cipher,
+ pnetwork->BcnInfo.group_cipher, pnetwork->BcnInfo.is_8021x));
+ ret = _SUCCESS;
+ }
+ }
+ }
+
+ return ret;
+}
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork)
+{
+ unsigned short cap = 0;
+ u8 bencrypt = 0;
+ __le16 le_tmp;
+ u16 wpa_len = 0, rsn_len = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct rtw_ieee80211_ht_cap *pht_cap = NULL;
+ unsigned int len;
+ unsigned char *p;
+
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+ cap = le16_to_cpu(le_tmp);
+ if (cap & WLAN_CAPABILITY_PRIVACY) {
+ bencrypt = 1;
+ pnetwork->network.Privacy = 1;
+ } else {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_OPENSYS;
+ }
+ rtw_get_sec_ie(pnetwork->network.IEs , pnetwork->network.IELength, NULL, &rsn_len, NULL, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ if (rsn_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bencrypt)
+ pnetwork->BcnInfo.encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_get_bcn_info: pnetwork->encryp_protocol is %x\n",
+ pnetwork->BcnInfo.encryp_protocol));
+ rtw_get_cipher_info(pnetwork);
+
+ /* get bwmode and ch_offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
+ pnetwork->BcnInfo.ht_cap_info = pht_cap->cap_info;
+ } else {
+ pnetwork->BcnInfo.ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(pnetwork->network.IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, pnetwork->network.IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ pnetwork->BcnInfo.ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ pnetwork->BcnInfo.ht_info_infos_0 = 0;
+ }
+}
+
+/* show MCS rate, unit: 100Kbps */
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsigned char *MCS_rate)
+{
+ u16 max_rate = 0;
+
+ if (rf_type == RF_1T1R) {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+ } else {
+ if (MCS_rate[1]) {
+ if (MCS_rate[1] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 3000 : 2700) : ((short_GI_20) ? 1444 : 1300);
+ else if (MCS_rate[1] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 2700 : 2430) : ((short_GI_20) ? 1300 : 1170);
+ else if (MCS_rate[1] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 2400 : 2160) : ((short_GI_20) ? 1156 : 1040);
+ else if (MCS_rate[1] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1800 : 1620) : ((short_GI_20) ? 867 : 780);
+ else if (MCS_rate[1] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[1] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[1] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[1] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ } else {
+ if (MCS_rate[0] & BIT(7))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1500 : 1350) : ((short_GI_20) ? 722 : 650);
+ else if (MCS_rate[0] & BIT(6))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1350 : 1215) : ((short_GI_20) ? 650 : 585);
+ else if (MCS_rate[0] & BIT(5))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 1200 : 1080) : ((short_GI_20) ? 578 : 520);
+ else if (MCS_rate[0] & BIT(4))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 900 : 810) : ((short_GI_20) ? 433 : 390);
+ else if (MCS_rate[0] & BIT(3))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 600 : 540) : ((short_GI_20) ? 289 : 260);
+ else if (MCS_rate[0] & BIT(2))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 450 : 405) : ((short_GI_20) ? 217 : 195);
+ else if (MCS_rate[0] & BIT(1))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 300 : 270) : ((short_GI_20) ? 144 : 130);
+ else if (MCS_rate[0] & BIT(0))
+ max_rate = (bw_40MHz) ? ((short_GI_40) ? 150 : 135) : ((short_GI_20) ? 72 : 65);
+ }
+ }
+ return max_rate;
+}
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
+{
+ const u8 *frame_body = frame + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u16 fc;
+ u8 c, a = 0;
+
+ fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl);
+
+ if ((fc & (RTW_IEEE80211_FCTL_FTYPE|RTW_IEEE80211_FCTL_STYPE)) !=
+ (RTW_IEEE80211_FTYPE_MGMT|RTW_IEEE80211_STYPE_ACTION))
+ return false;
+
+ c = frame_body[0];
+
+ switch (c) {
+ case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
+ break;
+ default:
+ a = frame_body[1];
+ }
+
+ if (category)
+ *category = c;
+ if (action)
+ *action = a;
+
+ return true;
+}
+
+static const char *_action_public_str[] = {
+ "ACT_PUB_BSSCOEXIST",
+ "ACT_PUB_DSE_ENABLE",
+ "ACT_PUB_DSE_DEENABLE",
+ "ACT_PUB_DSE_REG_LOCATION",
+ "ACT_PUB_EXT_CHL_SWITCH",
+ "ACT_PUB_DSE_MSR_REQ",
+ "ACT_PUB_DSE_MSR_RPRT",
+ "ACT_PUB_MP",
+ "ACT_PUB_DSE_PWR_CONSTRAINT",
+ "ACT_PUB_VENDOR",
+ "ACT_PUB_GAS_INITIAL_REQ",
+ "ACT_PUB_GAS_INITIAL_RSP",
+ "ACT_PUB_GAS_COMEBACK_REQ",
+ "ACT_PUB_GAS_COMEBACK_RSP",
+ "ACT_PUB_TDLS_DISCOVERY_RSP",
+ "ACT_PUB_LOCATION_TRACK",
+ "ACT_PUB_RSVD",
+};
+
+const char *action_public_str(u8 action)
+{
+ action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
+ return _action_public_str[action];
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_io.c b/drivers/staging/rtl8188eu/core/rtw_io.c
new file mode 100644
index 00000000000..10c9c6560b2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_io.c
@@ -0,0 +1,329 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*
+
+The purpose of rtw_io.c
+
+a. provides the API
+
+b. provides the protocol engine
+
+c. provides the software interface between caller and the hardware interface
+
+
+Compiler Flag Option:
+
+USB:
+ a. USE_ASYNC_IRP: Both sync/async operations are provided.
+
+Only sync read/rtw_write_mem operations are provided.
+
+jackson@realtek.com.tw
+
+*/
+
+#define _RTW_IO_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_io.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+
+#define rtw_le16_to_cpu(val) le16_to_cpu(val)
+#define rtw_le32_to_cpu(val) le32_to_cpu(val)
+#define rtw_cpu_to_le16(val) cpu_to_le16(val)
+#define rtw_cpu_to_le32(val) cpu_to_le32(val)
+
+
+u8 _rtw_read8(struct adapter *adapter, u32 addr)
+{
+ u8 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+
+ _func_enter_;
+ _read8 = pintfhdl->io_ops._read8;
+ r_val = _read8(pintfhdl, addr);
+ _func_exit_;
+ return r_val;
+}
+
+u16 _rtw_read16(struct adapter *adapter, u32 addr)
+{
+ u16 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+_func_enter_;
+ _read16 = pintfhdl->io_ops._read16;
+
+ r_val = _read16(pintfhdl, addr);
+_func_exit_;
+ return r_val;
+}
+
+u32 _rtw_read32(struct adapter *adapter, u32 addr)
+{
+ u32 r_val;
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+_func_enter_;
+ _read32 = pintfhdl->io_ops._read32;
+
+ r_val = _read32(pintfhdl, addr);
+_func_exit_;
+ return r_val;
+}
+
+int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int ret;
+ _func_enter_;
+ _write8 = pintfhdl->io_ops._write8;
+
+ ret = _write8(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int ret;
+ _func_enter_;
+ _write16 = pintfhdl->io_ops._write16;
+
+ ret = _write16(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int ret;
+ _func_enter_;
+ _write32 = pintfhdl->io_ops._write32;
+
+ ret = _write32(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_writeN(struct adapter *adapter, u32 addr , u32 length , u8 *pdata)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = (struct intf_hdl *)(&(pio_priv->intf));
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
+ int ret;
+ _func_enter_;
+ _writeN = pintfhdl->io_ops._writeN;
+
+ ret = _writeN(pintfhdl, addr, length, pdata);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int ret;
+ _func_enter_;
+ _write8_async = pintfhdl->io_ops._write8_async;
+
+ ret = _write8_async(pintfhdl, addr, val);
+ _func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int ret;
+
+_func_enter_;
+ _write16_async = pintfhdl->io_ops._write16_async;
+ ret = _write16_async(pintfhdl, addr, val);
+_func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val)
+{
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int ret;
+
+_func_enter_;
+ _write32_async = pintfhdl->io_ops._write32_async;
+ ret = _write32_async(pintfhdl, addr, val);
+_func_exit_;
+
+ return RTW_STATUS_CODE(ret);
+}
+
+void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_mem:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ return;
+ }
+ _read_mem = pintfhdl->io_ops._read_mem;
+ _read_mem(pintfhdl, addr, cnt, pmem);
+ _func_exit_;
+}
+
+void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+
+ _write_mem = pintfhdl->io_ops._write_mem;
+
+ _write_mem(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+}
+
+void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _func_enter_;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved) {
+ RT_TRACE(_module_rtl871x_io_c_, _drv_info_,
+ ("rtw_read_port:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ adapter->bDriverStopped, adapter->bSurpriseRemoved));
+ return;
+ }
+
+ _read_port = pintfhdl->io_ops._read_port;
+
+ _read_port(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+}
+
+void _rtw_read_port_cancel(struct adapter *adapter)
+{
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _read_port_cancel = pintfhdl->io_ops._read_port_cancel;
+
+ if (_read_port_cancel)
+ _read_port_cancel(pintfhdl);
+}
+
+u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
+{
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+ u32 ret = _SUCCESS;
+
+ _func_enter_;
+
+ _write_port = pintfhdl->io_ops._write_port;
+
+ ret = _write_port(pintfhdl, addr, cnt, pmem);
+
+ _func_exit_;
+
+ return ret;
+}
+
+u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem, int timeout_ms)
+{
+ int ret = _SUCCESS;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pmem;
+ struct submit_ctx sctx;
+
+ rtw_sctx_init(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = _rtw_write_port(adapter, addr, cnt, pmem);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait(&sctx);
+
+ return ret;
+}
+
+void _rtw_write_port_cancel(struct adapter *adapter)
+{
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+ struct io_priv *pio_priv = &adapter->iopriv;
+ struct intf_hdl *pintfhdl = &(pio_priv->intf);
+
+ _write_port_cancel = pintfhdl->io_ops._write_port_cancel;
+
+ if (_write_port_cancel)
+ _write_port_cancel(pintfhdl);
+}
+
+int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct _io_ops *pops))
+{
+ struct io_priv *piopriv = &padapter->iopriv;
+ struct intf_hdl *pintf = &piopriv->intf;
+
+ if (set_intf_ops == NULL)
+ return _FAIL;
+
+ piopriv->padapter = padapter;
+ pintf->padapter = padapter;
+ pintf->pintf_dev = adapter_to_dvobj(padapter);
+
+ set_intf_ops(&pintf->io_ops);
+
+ return _SUCCESS;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
new file mode 100644
index 00000000000..193f641bd0d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -0,0 +1,1169 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_IOCTL_SET_C_
+
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_ioctl_set.h>
+#include <hal_intf.h>
+
+#include <usb_osintf.h>
+#include <usb_ops.h>
+
+extern void indicate_wx_scan_complete_event(struct adapter *padapter);
+
+#define IS_MAC_ADDRESS_BROADCAST(addr) \
+(\
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
+{
+ u8 i;
+ u8 ret = true;
+
+_func_enter_;
+
+ if (ssid->SsidLength > 32) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid length >32\n"));
+ ret = false;
+ goto exit;
+ }
+
+ for (i = 0; i < ssid->SsidLength; i++) {
+ /* wifi, printable ascii code must be supported */
+ if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("ssid has nonprintabl ascii\n"));
+ ret = false;
+ break;
+ }
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_do_join(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("\n rtw_do_join: phead = %p; plist = %p\n\n\n", phead, plist));
+
+ pmlmepriv->cur_network.join_res = -2;
+
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ pmlmepriv->pscanned = plist;
+
+ pmlmepriv->to_join = true;
+
+ if (_rtw_queue_empty(queue)) {
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but scanning queue is empty */
+ /* we try to issue sitesurvey firstly */
+
+ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
+ pmlmepriv->to_roaming > 0) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("rtw_do_join(): site survey if scanned_queue is empty\n."));
+ /* submit site_survey_cmd */
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_do_join(): site survey return error\n."));
+ }
+ } else {
+ pmlmepriv->to_join = false;
+ ret = _FAIL;
+ }
+
+ goto exit;
+ } else {
+ int select_ret;
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ select_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (select_ret == _SUCCESS) {
+ pmlmepriv->to_join = false;
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ /* submit createbss_cmd to change to a ADHOC_MASTER */
+
+ /* pmlmepriv->lock has been acquired by caller... */
+ struct wlan_bssid_ex *pdev_network = &(padapter->registrypriv.dev_network);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ pibss = padapter->registrypriv.dev_network.MacAddress;
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(padapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (rtw_createbss_cmd(padapter) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error =>do_goin: rtw_createbss_cmd status FAIL***\n "));
+ ret = false;
+ goto exit;
+ }
+ pmlmepriv->to_join = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("***Error => rtw_select_and_join_from_scanned_queue FAIL under STA_Mode***\n "));
+ } else {
+ /* can't associate ; reset under-linking */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* when set_ssid/set_bssid for rtw_do_join(), but there are no desired bss in scanning queue */
+ /* we try to issue sitesurvey firstly */
+ if (!pmlmepriv->LinkDetectInfo.bBusyTraffic ||
+ pmlmepriv->to_roaming > 0) {
+ ret = rtw_sitesurvey_cmd(padapter, &pmlmepriv->assoc_ssid, 1, NULL, 0);
+ if (_SUCCESS != ret) {
+ pmlmepriv->to_join = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("do_join(): site survey return error\n."));
+ }
+ } else {
+ ret = _FAIL;
+ pmlmepriv->to_join = false;
+ }
+ }
+ }
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_bssid(struct adapter *padapter, u8 *bssid)
+{
+ unsigned long irqL;
+ u8 status = _SUCCESS;
+ u32 cur_time = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ DBG_88E_LEVEL(_drv_info_, "set bssid:%pM\n", bssid);
+
+ if ((bssid[0] == 0x00 && bssid[1] == 0x00 && bssid[2] == 0x00 &&
+ bssid[3] == 0x00 && bssid[4] == 0x00 && bssid[5] == 0x00) ||
+ (bssid[0] == 0xFF && bssid[1] == 0xFF && bssid[2] == 0xFF &&
+ bssid[3] == 0xFF && bssid[4] == 0xFF && bssid[5] == 0xFF)) {
+ status = _FAIL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+
+ DBG_88E("Set BSSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true)
+ goto handle_tkip_countermeasure;
+ else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ goto release_mlme_lock;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if (_rtw_memcmp(&pmlmepriv->cur_network.network.MacAddress, bssid, ETH_ALEN)) {
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set BSSID not the same bssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_bssid =%pM\n", (bssid)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("cur_bssid =%pM\n", (pmlmepriv->cur_network.network.MacAddress)));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+ /* should we add something here...? */
+
+ if (padapter->securitypriv.btkip_countermeasure) {
+ cur_time = rtw_get_current_time();
+
+ if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
+ padapter->securitypriv.btkip_countermeasure = false;
+ padapter->securitypriv.btkip_countermeasure_time = 0;
+ } else {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+ }
+
+ memcpy(&pmlmepriv->assoc_bssid, bssid, ETH_ALEN);
+ pmlmepriv->assoc_by_bssid = true;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ pmlmepriv->to_join = true;
+ else
+ status = rtw_do_join(padapter);
+
+release_mlme_lock:
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_bssid: status=%d\n", status));
+
+_func_exit_;
+
+ return status;
+}
+
+u8 rtw_set_802_11_ssid(struct adapter *padapter, struct ndis_802_11_ssid *ssid)
+{
+ unsigned long irqL;
+ u8 status = _SUCCESS;
+ u32 cur_time = 0;
+
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *pnetwork = &pmlmepriv->cur_network;
+
+_func_enter_;
+
+ DBG_88E_LEVEL(_drv_info_, "set ssid [%s] fw_state=0x%08x\n",
+ ssid->Ssid, get_fwstate(pmlmepriv));
+
+ if (!padapter->hw_init_completed) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("set_ssid: hw_init_completed == false =>exit!!!\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ DBG_88E("Set SSID under fw_state = 0x%08x\n", get_fwstate(pmlmepriv));
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ goto handle_tkip_countermeasure;
+ } else if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) {
+ goto release_mlme_lock;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("set_ssid: _FW_LINKED||WIFI_ADHOC_MASTER_STATE\n"));
+
+ if ((pmlmepriv->assoc_ssid.SsidLength == ssid->SsidLength) &&
+ (_rtw_memcmp(&pmlmepriv->assoc_ssid.Ssid, ssid->Ssid, ssid->SsidLength))) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == false)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Set SSID is the same ssid, fw_state = 0x%08x\n",
+ get_fwstate(pmlmepriv)));
+
+ if (!rtw_is_same_ibss(padapter, pnetwork)) {
+ /* if in WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE, create bss or rejoin again */
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ } else {
+ goto release_mlme_lock;/* it means driver is in WIFI_ADHOC_MASTER_STATE, we needn't create bss again. */
+ }
+ } else {
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_JOINBSS, 1);
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("Set SSID not the same ssid\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_ssid =[%s] len = 0x%x\n", ssid->Ssid, (unsigned int)ssid->SsidLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("assoc_ssid =[%s] len = 0x%x\n", pmlmepriv->assoc_ssid.Ssid, (unsigned int)pmlmepriv->assoc_ssid.SsidLength));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter);
+
+ rtw_free_assoc_resources(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) {
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+ }
+ }
+
+handle_tkip_countermeasure:
+
+ if (padapter->securitypriv.btkip_countermeasure) {
+ cur_time = rtw_get_current_time();
+
+ if ((cur_time - padapter->securitypriv.btkip_countermeasure_time) > 60 * HZ) {
+ padapter->securitypriv.btkip_countermeasure = false;
+ padapter->securitypriv.btkip_countermeasure_time = 0;
+ } else {
+ status = _FAIL;
+ goto release_mlme_lock;
+ }
+ }
+
+ memcpy(&pmlmepriv->assoc_ssid, ssid, sizeof(struct ndis_802_11_ssid));
+ pmlmepriv->assoc_by_bssid = false;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
+ pmlmepriv->to_join = true;
+ } else {
+ status = rtw_do_join(padapter);
+ }
+
+release_mlme_lock:
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+exit:
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("-rtw_set_802_11_ssid: status =%d\n", status));
+_func_exit_;
+ return status;
+}
+
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
+ enum ndis_802_11_network_infra networktype)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ enum ndis_802_11_network_infra *pold_state = &(cur_network->network.InfrastructureMode);
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_notice_,
+ ("+rtw_set_802_11_infrastructure_mode: old =%d new =%d fw_state = 0x%08x\n",
+ *pold_state, networktype, get_fwstate(pmlmepriv)));
+
+ if (*pold_state != networktype) {
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, (" change mode!"));
+ /* DBG_88E("change mode, old_mode =%d, new_mode =%d, fw_state = 0x%x\n", *pold_state, networktype, get_fwstate(pmlmepriv)); */
+
+ if (*pold_state == Ndis802_11APMode) {
+ /* change to other mode from Ndis802_11APMode */
+ cur_network->join_res = -1;
+
+#ifdef CONFIG_88EU_AP_MODE
+ stop_ap_mode(padapter);
+#endif
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (*pold_state == Ndis802_11IBSS))
+ rtw_disassoc_cmd(padapter, 0, true);
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ rtw_free_assoc_resources(padapter, 1);
+
+ if ((*pold_state == Ndis802_11Infrastructure) || (*pold_state == Ndis802_11IBSS)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ rtw_indicate_disconnect(padapter); /* will clr Linked_state; before this function, we must have chked whether issue dis-assoc_cmd or not */
+ }
+
+ *pold_state = networktype;
+
+ _clr_fwstate_(pmlmepriv, ~WIFI_NULL_STATE);
+
+ switch (networktype) {
+ case Ndis802_11IBSS:
+ set_fwstate(pmlmepriv, WIFI_ADHOC_STATE);
+ break;
+ case Ndis802_11Infrastructure:
+ set_fwstate(pmlmepriv, WIFI_STATION_STATE);
+ break;
+ case Ndis802_11APMode:
+ set_fwstate(pmlmepriv, WIFI_AP_STATE);
+#ifdef CONFIG_88EU_AP_MODE
+ start_ap_mode(padapter);
+#endif
+ break;
+ case Ndis802_11AutoUnknown:
+ case Ndis802_11InfrastructureMax:
+ break;
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+
+_func_exit_;
+
+ return true;
+}
+
+
+u8 rtw_set_802_11_disassociate(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("MgntActrtw_set_802_11_disassociate: rtw_indicate_disconnect\n"));
+
+ rtw_disassoc_cmd(padapter, 0, true);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ rtw_pwr_wakeup(padapter);
+ }
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+_func_exit_;
+
+ return true;
+}
+
+u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_ssid *pssid, int ssid_max_num)
+{
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 res = true;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("+rtw_set_802_11_bssid_list_scan(), fw_state =%x\n", get_fwstate(pmlmepriv)));
+
+ if (padapter == NULL) {
+ res = false;
+ goto exit;
+ }
+ if (!padapter->hw_init_completed) {
+ res = false;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n === rtw_set_802_11_bssid_list_scan:hw_init_completed == false ===\n"));
+ goto exit;
+ }
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) ||
+ (pmlmepriv->LinkDetectInfo.bBusyTraffic)) {
+ /* Scan or linking is in progress, do nothing. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("rtw_set_802_11_bssid_list_scan fail since fw_state = %x\n", get_fwstate(pmlmepriv)));
+ res = true;
+
+ if (check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###_FW_UNDER_SURVEY|_FW_UNDER_LINKING\n\n"));
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n###pmlmepriv->sitesurveyctrl.traffic_busy == true\n\n"));
+ }
+ } else {
+ if (rtw_is_scan_deny(padapter)) {
+ DBG_88E(FUNC_ADPT_FMT": scan deny\n", FUNC_ADPT_ARG(padapter));
+ indicate_wx_scan_complete_event(padapter);
+ return _SUCCESS;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ res = rtw_sitesurvey_cmd(padapter, pssid, ssid_max_num, NULL, 0);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtw_set_802_11_authentication_mode(struct adapter *padapter, enum ndis_802_11_auth_mode authmode)
+{
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ int res;
+ u8 ret;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("set_802_11_auth.mode(): mode =%x\n", authmode));
+
+ psecuritypriv->ndisauthtype = authmode;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_authentication_mode:psecuritypriv->ndisauthtype=%d",
+ psecuritypriv->ndisauthtype));
+
+ if (psecuritypriv->ndisauthtype > 3)
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+
+ res = rtw_set_auth(padapter, psecuritypriv);
+
+ if (res == _SUCCESS)
+ ret = true;
+ else
+ ret = false;
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtw_set_802_11_add_wep(struct adapter *padapter, struct ndis_802_11_wep *wep)
+{
+ int keyid, res;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ keyid = wep->KeyIndex & 0x3fffffff;
+
+ if (keyid >= 4) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MgntActrtw_set_802_11_add_wep:keyid>4 =>fail\n"));
+ ret = false;
+ goto exit;
+ }
+
+ switch (wep->KeyLength) {
+ case 5:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 5\n"));
+ break;
+ case 13:
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength = 13\n"));
+ break;
+ default:
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_, ("MgntActrtw_set_802_11_add_wep:wep->KeyLength!= 5 or 13\n"));
+ break;
+ }
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep:befor memcpy, wep->KeyLength = 0x%x wep->KeyIndex = 0x%x keyid =%x\n",
+ wep->KeyLength, wep->KeyIndex, keyid));
+
+ memcpy(&(psecuritypriv->dot11DefKey[keyid].skey[0]), &(wep->KeyMaterial), wep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[keyid] = wep->KeyLength;
+
+ psecuritypriv->dot11PrivacyKeyIndex = keyid;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_wep:security key material : %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ psecuritypriv->dot11DefKey[keyid].skey[0],
+ psecuritypriv->dot11DefKey[keyid].skey[1],
+ psecuritypriv->dot11DefKey[keyid].skey[2],
+ psecuritypriv->dot11DefKey[keyid].skey[3],
+ psecuritypriv->dot11DefKey[keyid].skey[4],
+ psecuritypriv->dot11DefKey[keyid].skey[5],
+ psecuritypriv->dot11DefKey[keyid].skey[6],
+ psecuritypriv->dot11DefKey[keyid].skey[7],
+ psecuritypriv->dot11DefKey[keyid].skey[8],
+ psecuritypriv->dot11DefKey[keyid].skey[9],
+ psecuritypriv->dot11DefKey[keyid].skey[10],
+ psecuritypriv->dot11DefKey[keyid].skey[11],
+ psecuritypriv->dot11DefKey[keyid].skey[12]));
+
+ res = rtw_set_key(padapter, psecuritypriv, keyid, 1);
+
+ if (res == _FAIL)
+ ret = false;
+exit:
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex)
+{
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+ if (keyindex >= 0x80000000 || padapter == NULL) {
+ ret = false;
+ goto exit;
+ } else {
+ int res;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ if (keyindex < 4) {
+ _rtw_memset(&psecuritypriv->dot11DefKey[keyindex], 0, 16);
+ res = rtw_set_key(padapter, psecuritypriv, keyindex, 0);
+ psecuritypriv->dot11DefKeylen[keyindex] = 0;
+ if (res == _FAIL)
+ ret = _FAIL;
+ } else {
+ ret = _FAIL;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key)
+{
+ uint encryptionalgo;
+ u8 *pbssid;
+ struct sta_info *stainfo;
+ u8 bgroup = false;
+ u8 bgrouptkey = false;/* can be removed later */
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) {
+ /* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */
+ /* it must fail the request and return NDIS_STATUS_INVALID_DATA. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key: ((key->KeyIndex & 0x80000000)==0)[=%d]",
+ (int)(key->KeyIndex & 0x80000000) == 0));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key:((key->KeyIndex & 0x40000000)>0)[=%d]",
+ (int)(key->KeyIndex & 0x40000000) > 0));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_info_,
+ ("rtw_set_802_11_add_key: key->KeyIndex=%d\n",
+ (int)key->KeyIndex));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (key->KeyIndex & 0x40000000) {
+ /* Pairwise key */
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Pairwise key +++++\n"));
+
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+
+ if ((stainfo != NULL) && (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("OID_802_11_ADD_KEY:(stainfo!=NULL)&&(Adapter->securitypriv.dot11AuthAlgrthm==dot11AuthAlgrthm_8021X)\n"));
+ encryptionalgo = stainfo->dot118021XPrivacy;
+ } else {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: stainfo == NULL)||(Adapter->securitypriv.dot11AuthAlgrthm!= dot11AuthAlgrthm_8021X)\n"));
+ encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (encryptionalgo==%d)!\n",
+ encryptionalgo));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11PrivacyAlgrthm==%d)!\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (Adapter->securitypriv.dot11AuthAlgrthm==%d)!\n",
+ padapter->securitypriv.dot11AuthAlgrthm));
+
+ if ((stainfo != NULL))
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("rtw_set_802_11_add_key: (stainfo->dot118021XPrivacy==%d)!\n",
+ stainfo->dot118021XPrivacy));
+
+ if (key->KeyIndex & 0x000000FF) {
+ /* The key index is specified in the lower 8 bits by values of zero to 255. */
+ /* The key index should be set to zero for a Pairwise key, and the driver should fail with */
+ /* NDIS_STATUS_INVALID_DATA if the lower 8 bits is not zero */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, (" key->KeyIndex & 0x000000FF.\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* check BSSID */
+ if (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("MacAddr_isBcst(key->BSSID)\n"));
+ ret = false;
+ goto exit;
+ }
+
+ /* Check key length for TKIP. */
+ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("TKIP KeyLength:0x%x != 32\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Check key length for AES. */
+ if ((encryptionalgo == _AES_) && (key->KeyLength != 16)) {
+ /* For our supplicant, EAPPkt9x.vxd, cannot differentiate TKIP and AES case. */
+ if (key->KeyLength == 32) {
+ key->KeyLength = 16;
+ } else {
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+
+ /* Check key length for WEP. For NDTEST, 2005.01.27, by rcnjko. */
+ if ((encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_) &&
+ (key->KeyLength != 5 && key->KeyLength != 13)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("WEP KeyLength:0x%x != 5 or 13\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ bgroup = false;
+
+ /* Check the pairwise key. Added by Annie, 2005-07-06. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Pairwise Key set]\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+
+ } else {
+ /* Group key - KeyIndex(BIT30 == 0) */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ Group key +++++\n"));
+
+
+ /* when add wep key through add key and didn't assigned encryption type before */
+ if ((padapter->securitypriv.ndisauthtype <= 3) &&
+ (padapter->securitypriv.dot118021XGrpPrivacy == 0)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("keylen =%d(Adapter->securitypriv.dot11PrivacyAlgrthm=%x )padapter->securitypriv.dot118021XGrpPrivacy(%x)\n",
+ key->KeyLength, padapter->securitypriv.dot11PrivacyAlgrthm,
+ padapter->securitypriv.dot118021XGrpPrivacy));
+ switch (key->KeyLength) {
+ case 5:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ case 13:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ default:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("Adapter->securitypriv.dot11PrivacyAlgrthm=%x key->KeyLength=%u\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, key->KeyLength));
+ break;
+ }
+
+ encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" Adapter->securitypriv.dot11PrivacyAlgrthm=%x\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm));
+
+ } else {
+ encryptionalgo = padapter->securitypriv.dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("(Adapter->securitypriv.dot11PrivacyAlgrthm=%x)encryptionalgo(%x)=padapter->securitypriv.dot118021XGrpPrivacy(%x)keylen=%d\n",
+ padapter->securitypriv.dot11PrivacyAlgrthm, encryptionalgo,
+ padapter->securitypriv.dot118021XGrpPrivacy, key->KeyLength));
+ }
+
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE) == true) && (IS_MAC_ADDRESS_BROADCAST(key->BSSID) == false)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" IBSS but BSSID is not Broadcast Address.\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Check key length for TKIP */
+ if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ (" TKIP GTK KeyLength:%u != 32\n", key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ } else if (encryptionalgo == _AES_ && (key->KeyLength != 16 && key->KeyLength != 32)) {
+ /* Check key length for AES */
+ /* For NDTEST, we allow keylen = 32 in this case. 2005.01.27, by rcnjko. */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("<=== SetInfo, OID_802_11_ADD_KEY: AES GTK KeyLength:%u != 16 or 32\n",
+ key->KeyLength));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Change the key length for EAPPkt9x.vxd. Added by Annie, 2005-11-03. */
+ if ((encryptionalgo == _AES_) && (key->KeyLength == 32)) {
+ key->KeyLength = 16;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("AES key length changed: %u\n", key->KeyLength));
+ }
+
+ if (key->KeyIndex & 0x8000000) {/* error ??? 0x8000_0000 */
+ bgrouptkey = true;
+ }
+
+ if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE)) &&
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED)))
+ bgrouptkey = true;
+ bgroup = true;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("[Group Key set]\n"));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n")) ;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key index: 0x%8x(0x%8x)\n", key->KeyIndex, (key->KeyIndex&0x3)));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("key Length: %d\n", key->KeyLength)) ;
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("------------------------------------------\n"));
+ }
+
+ /* If WEP encryption algorithm, just call rtw_set_802_11_add_wep(). */
+ if ((padapter->securitypriv.dot11AuthAlgrthm != dot11AuthAlgrthm_8021X) &&
+ (encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_)) {
+ u32 keyindex;
+ u32 len = FIELD_OFFSET(struct ndis_802_11_key, KeyMaterial) + key->KeyLength;
+ struct ndis_802_11_wep *wep = &padapter->securitypriv.ndiswep;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ WEP key +++++\n"));
+
+ wep->Length = len;
+ keyindex = key->KeyIndex&0x7fffffff;
+ wep->KeyIndex = keyindex ;
+ wep->KeyLength = key->KeyLength;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY:Before memcpy\n"));
+
+ memcpy(wep->KeyMaterial, key->KeyMaterial, key->KeyLength);
+ memcpy(&(padapter->securitypriv.dot11DefKey[keyindex].skey[0]), key->KeyMaterial, key->KeyLength);
+
+ padapter->securitypriv.dot11DefKeylen[keyindex] = key->KeyLength;
+ padapter->securitypriv.dot11PrivacyKeyIndex = keyindex;
+
+ ret = rtw_set_802_11_add_wep(padapter, wep);
+ goto exit;
+ }
+ if (key->KeyIndex & 0x20000000) {
+ /* SetRSC */
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("OID_802_11_ADD_KEY: +++++ SetRSC+++++\n"));
+ if (bgroup) {
+ unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
+ memcpy(&padapter->securitypriv.dot11Grprxpn, &keysrc, 8);
+ } else {
+ unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
+ memcpy(&padapter->securitypriv.dot11Grptxpn, &keysrc, 8);
+ }
+ }
+
+ /* Indicate this key idx is used for TX */
+ /* Save the key in KeyMaterial */
+ if (bgroup) { /* Group transmit key */
+ int res;
+
+ if (bgrouptkey)
+ padapter->securitypriv.dot118021XGrpKeyid = (u8)key->KeyIndex;
+ if ((key->KeyIndex&0x3) == 0) {
+ ret = _FAIL;
+ goto exit;
+ }
+ _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+ _rtw_memset(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+ _rtw_memset(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
+
+ if ((key->KeyIndex & 0x10000000)) {
+ memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
+ memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7]));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n"));
+ } else {
+ memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
+ memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:rx mic :0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[0],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[1],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[2],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[3],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[4],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[5],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)].skey[6],
+ padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex-1) & 0x03)].skey[7]));
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_,
+ ("\n rtw_set_802_11_add_key:set Group mic key!!!!!!!!\n"));
+ }
+
+ /* set group key by index */
+ memcpy(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial, key->KeyLength);
+
+ key->KeyIndex = key->KeyIndex & 0x03;
+
+ padapter->securitypriv.binstallGrpkey = true;
+
+ padapter->securitypriv.bcheck_grpkey = false;
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("reset group key"));
+
+ res = rtw_set_key(padapter, &padapter->securitypriv, key->KeyIndex, 1);
+
+ if (res == _FAIL)
+ ret = _FAIL;
+
+ goto exit;
+
+ } else { /* Pairwise Key */
+ u8 res;
+
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+
+ if (stainfo != NULL) {
+ _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16);/* clear keybuffer */
+
+ memcpy(&stainfo->dot118021x_UncstKey, key->KeyMaterial, 16);
+
+ if (encryptionalgo == _TKIP_) {
+ padapter->securitypriv.busetkipkey = false;
+
+ /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
+
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n========== _set_timer\n"));
+
+ /* if TKIP, save the Receive/Transmit MIC key in KeyMaterial[128-255] */
+ if ((key->KeyIndex & 0x10000000)) {
+ memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 16, 8);
+ memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 24, 8);
+
+ } else {
+ memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 24, 8);
+ memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 16, 8);
+ }
+ }
+
+
+ /* Set key to CAM through H2C command */
+ if (bgrouptkey) { /* never go to here */
+ res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, false);
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(group)\n"));
+ } else {
+ res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, true);
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("\n rtw_set_802_11_add_key:rtw_setstakey_cmd(unicast)\n"));
+ }
+ if (!res)
+ ret = _FAIL;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove_key *key)
+{
+ u8 *pbssid;
+ struct sta_info *stainfo;
+ u8 bgroup = (key->KeyIndex & 0x4000000) > 0 ? false : true;
+ u8 keyIndex = (u8)key->KeyIndex & 0x03;
+ u8 ret = _SUCCESS;
+
+_func_enter_;
+
+ if ((key->KeyIndex & 0xbffffffc) > 0) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bgroup) {
+ /* clear group key by index */
+
+ _rtw_memset(&padapter->securitypriv.dot118021XGrpKey[keyIndex], 0, 16);
+
+ /* \todo Send a H2C Command to Firmware for removing this Key in CAM Entry. */
+ } else {
+ pbssid = get_bssid(&padapter->mlmepriv);
+ stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
+ if (stainfo) {
+ /* clear key by BSSID */
+ _rtw_memset(&stainfo->dot118021x_UncstKey, 0, 16);
+
+ /* \todo Send a H2C Command to Firmware for disable this Key in CAM Entry. */
+ } else {
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+/*
+* rtw_get_cur_max_rate -
+* @adapter: pointer to struct adapter structure
+*
+* Return 0 or 100Kbps
+*/
+u16 rtw_get_cur_max_rate(struct adapter *adapter)
+{
+ int i = 0;
+ u8 *p;
+ u16 rate = 0, max_rate = 0;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ u8 rf_type = 0;
+ u8 bw_40MHz = 0, short_GI_20 = 0, short_GI_40 = 0;
+ u16 mcs_rate = 0;
+ u32 ht_ielen = 0;
+
+ if (adapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
+ return 0;
+ }
+
+ if ((!check_fwstate(pmlmepriv, _FW_LINKED)) &&
+ (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ return 0;
+
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N|WIRELESS_11_5N)) {
+ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ if (p && ht_ielen > 0) {
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
+
+ /* cur_bwmod is updated by beacon, pmlmeinfo is updated by association response */
+ bw_40MHz = (pmlmeext->cur_bwmode && (HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH & pmlmeinfo->HT_info.infos[0])) ? 1 : 0;
+
+ short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
+ short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
+
+ rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ max_rate = rtw_mcs_rate(
+ rf_type,
+ bw_40MHz & (pregistrypriv->cbw40_enable),
+ short_GI_20,
+ short_GI_40,
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate
+ );
+ }
+ } else {
+ while ((pcur_bss->SupportedRates[i] != 0) && (pcur_bss->SupportedRates[i] != 0xFF)) {
+ rate = pcur_bss->SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ i++;
+ }
+
+ max_rate = max_rate*10/2;
+ }
+
+ return max_rate;
+}
+
+/*
+* rtw_set_scan_mode -
+* @adapter: pointer to struct adapter structure
+* @scan_mode:
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode)
+{
+ if (scan_mode != SCAN_ACTIVE && scan_mode != SCAN_PASSIVE)
+ return _FAIL;
+
+ adapter->mlmepriv.scan_mode = scan_mode;
+
+ return _SUCCESS;
+}
+
+/*
+* rtw_set_channel_plan -
+* @adapter: pointer to struct adapter structure
+* @channel_plan:
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan)
+{
+ /* handle by cmd_thread to sync with scan operation */
+ return rtw_set_chplan_cmd(adapter, channel_plan, 1);
+}
+
+/*
+* rtw_set_country -
+* @adapter: pointer to struct adapter structure
+* @country_code: string of country code
+*
+* Return _SUCCESS or _FAIL
+*/
+int rtw_set_country(struct adapter *adapter, const char *country_code)
+{
+ int channel_plan = RT_CHANNEL_DOMAIN_WORLD_WIDE_5G;
+
+ DBG_88E("%s country_code:%s\n", __func__, country_code);
+
+ /* TODO: should have a table to match country code and RT_CHANNEL_DOMAIN */
+ /* TODO: should consider 2-character and 3-character country code */
+ if (0 == strcmp(country_code, "US"))
+ channel_plan = RT_CHANNEL_DOMAIN_FCC;
+ else if (0 == strcmp(country_code, "EU"))
+ channel_plan = RT_CHANNEL_DOMAIN_ETSI;
+ else if (0 == strcmp(country_code, "JP"))
+ channel_plan = RT_CHANNEL_DOMAIN_MKK;
+ else if (0 == strcmp(country_code, "CN"))
+ channel_plan = RT_CHANNEL_DOMAIN_CHINA;
+ else
+ DBG_88E("%s unknown country_code:%s\n", __func__, country_code);
+
+ return rtw_set_channel_plan(adapter, channel_plan);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_iol.c b/drivers/staging/rtl8188eu/core/rtw_iol.c
new file mode 100644
index 00000000000..e6fdd32f9a3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_iol.c
@@ -0,0 +1,209 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include<rtw_iol.h>
+
+struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter)
+{
+ struct xmit_frame *xmit_frame;
+ struct xmit_buf *xmitbuf;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv = &(adapter->xmitpriv);
+
+ xmit_frame = rtw_alloc_xmitframe(pxmitpriv);
+ if (xmit_frame == NULL) {
+ DBG_88E("%s rtw_alloc_xmitframe return null\n", __func__);
+ goto exit;
+ }
+
+ xmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (xmitbuf == NULL) {
+ DBG_88E("%s rtw_alloc_xmitbuf return null\n", __func__);
+ rtw_free_xmitframe(pxmitpriv, xmit_frame);
+ xmit_frame = NULL;
+ goto exit;
+ }
+
+ xmit_frame->frame_tag = MGNT_FRAMETAG;
+ xmit_frame->pxmitbuf = xmitbuf;
+ xmit_frame->buf_addr = xmitbuf->pbuf;
+ xmitbuf->priv_data = xmit_frame;
+
+ pattrib = &xmit_frame->attrib;
+ update_mgntframe_attrib(adapter, pattrib);
+ pattrib->qsel = 0x10;/* Beacon */
+ pattrib->subtype = WIFI_BEACON;
+ pattrib->pktlen = 0;
+ pattrib->last_txcmdsz = 0;
+exit:
+ return xmit_frame;
+}
+
+int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds, u32 cmd_len)
+{
+ struct pkt_attrib *pattrib = &xmit_frame->attrib;
+ u16 buf_offset;
+ u32 ori_len;
+
+ buf_offset = TXDESC_OFFSET;
+ ori_len = buf_offset+pattrib->pktlen;
+
+ /* check if the io_buf can accommodate new cmds */
+ if (ori_len + cmd_len + 8 > MAX_XMITBUF_SZ) {
+ DBG_88E("%s %u is large than MAX_XMITBUF_SZ:%u, can't accommodate new cmds\n",
+ __func__ , ori_len + cmd_len + 8, MAX_XMITBUF_SZ);
+ return _FAIL;
+ }
+
+ memcpy(xmit_frame->buf_addr + buf_offset + pattrib->pktlen, IOL_cmds, cmd_len);
+ pattrib->pktlen += cmd_len;
+ pattrib->last_txcmdsz += cmd_len;
+
+ return _SUCCESS;
+}
+
+bool rtw_IOL_applied(struct adapter *adapter)
+{
+ if (1 == adapter->registrypriv.fw_iol)
+ return true;
+
+ if ((2 == adapter->registrypriv.fw_iol) && (!adapter_to_dvobj(adapter)->ishighspeed))
+ return true;
+ return false;
+}
+
+int rtw_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
+{
+ return rtw_hal_iol_cmd(adapter, xmit_frame, max_wating_ms, bndy_cnt);
+}
+
+int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary)
+{
+ return _SUCCESS;
+}
+
+int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr, u16 value, u16 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WW_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr, u32 value, u32 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_WD_REG, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(addr);
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0xFFFFFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path, u16 addr, u32 value, u32 mask)
+{
+ struct ioreg_cfg cmd = {8, IOREG_CMD_W_RF, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16((rf_path<<8) | ((addr) & 0xFF));
+ cmd.data = cpu_to_le32(value);
+
+ if (mask != 0x000FFFFF) {
+ cmd.length = 12;
+ cmd.mask = cpu_to_le32(mask);
+ }
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, cmd.length);
+}
+
+int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
+ cmd.address = cpu_to_le16(us);
+
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_DELAY_US, 0x0, 0x0, 0x0};
+
+ cmd.address = cpu_to_le16(ms);
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame)
+{
+ struct ioreg_cfg cmd = {4, IOREG_CMD_END, cpu_to_le16(0xFFFF), cpu_to_le32(0xFF), 0x0};
+
+ return rtw_IOL_append_cmds(xmit_frame, (u8 *)&cmd, 4);
+}
+
+u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame)
+{
+ u8 is_cmd_bndy = false;
+ if (((pxmit_frame->attrib.pktlen+32)%256) + 8 >= 256) {
+ rtw_IOL_append_END_cmd(pxmit_frame);
+ pxmit_frame->attrib.pktlen = ((((pxmit_frame->attrib.pktlen+32)/256)+1)*256);
+
+ pxmit_frame->attrib.last_txcmdsz = pxmit_frame->attrib.pktlen;
+ is_cmd_bndy = true;
+ }
+ return is_cmd_bndy;
+}
+
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter, int buf_len, u8 *pbuf)
+{
+ int i;
+ int j = 1;
+
+ pr_info("###### %s ######\n", __func__);
+ for (i = 0; i < buf_len; i++) {
+ printk("%02x-", *(pbuf+i));
+
+ if (j%32 == 0)
+ printk("\n");
+ j++;
+ }
+ printk("\n");
+ pr_info("=============ioreg_cmd len=%d===============\n", buf_len);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_led.c b/drivers/staging/rtl8188eu/core/rtw_led.c
new file mode 100644
index 00000000000..afac5370984
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_led.c
@@ -0,0 +1,1692 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <drv_types.h>
+#include "rtw_led.h"
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkTimer, */
+/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl */
+/* */
+void BlinkTimerCallback(void *data)
+{
+ struct LED_871x *pLed = (struct LED_871x *)data;
+ struct adapter *padapter = pLed->padapter;
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
+
+ _set_workitem(&(pLed->BlinkWorkItem));
+}
+
+/* */
+/* Description: */
+/* Callback function of LED BlinkWorkItem. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkWorkItemCallback(struct work_struct *work)
+{
+ struct LED_871x *pLed = container_of(work, struct LED_871x, BlinkWorkItem);
+ BlinkHandler(pLed);
+}
+
+/* */
+/* Description: */
+/* Reset status of LED_871x object. */
+/* */
+void ResetLedStatus(struct LED_871x *pLed)
+{
+ pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
+ pLed->bLedOn = false; /* true if LED is ON, false if LED is OFF. */
+
+ pLed->bLedBlinkInProgress = false; /* true if it is blinking, false o.w.. */
+ pLed->bLedWPSBlinkInProgress = false;
+
+ pLed->BlinkTimes = 0; /* Number of times to toggle led state for blinking. */
+ pLed->BlinkingLedState = LED_UNKNOWN; /* Next state for blinking, either RTW_LED_ON or RTW_LED_OFF are. */
+
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->bLedLinkBlinkInProgress = false;
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ pLed->bLedScanBlinkInProgress = false;
+}
+
+/*Description: */
+/* Initialize an LED_871x object. */
+void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_871x LedPin)
+{
+ pLed->padapter = padapter;
+ pLed->LedPin = LedPin;
+
+ ResetLedStatus(pLed);
+
+ _init_timer(&(pLed->BlinkTimer), padapter->pnetdev, BlinkTimerCallback, pLed);
+
+ _init_workitem(&(pLed->BlinkWorkItem), BlinkWorkItemCallback, pLed);
+}
+
+
+/* */
+/* Description: */
+/* DeInitialize an LED_871x object. */
+/* */
+void DeInitLed871x(struct LED_871x *pLed)
+{
+ _cancel_workitem_sync(&(pLed->BlinkWorkItem));
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ ResetLedStatus(pLed);
+}
+
+/* */
+/* Description: */
+/* Implementation of LED blinking behavior. */
+/* It toggle off LED and schedule corresponding timer if necessary. */
+/* */
+
+static void SwLedBlink(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ /* Determine if we shall change LED state again. */
+ pLed->BlinkTimes--;
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_NORMAL:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_StartToBlink:
+ if (check_fwstate(pmlmepriv, _FW_LINKED) && check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ bStopBlinking = true;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
+ bStopBlinking = true;
+ else if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ break;
+ default:
+ bStopBlinking = true;
+ break;
+ }
+
+ if (bStopBlinking) {
+ /* if (padapter->pwrctrlpriv.cpwm >= PS_STATE_S2) */
+ if (0) {
+ SwLedOff(padapter, pLed);
+ } else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && (!pLed->bLedOn)) {
+ SwLedOn(padapter, pLed);
+ } else if ((check_fwstate(pmlmepriv, _FW_LINKED)) && pLed->bLedOn) {
+ SwLedOff(padapter, pLed);
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ /* Assign LED state to toggle. */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ /* Schedule a timer to toggle LED state. */
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_NORMAL:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_BLINK_SLOWLY:
+ case LED_BLINK_StartToBlink:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LONG_INTERVAL);
+ else
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LONG_INTERVAL);
+ break;
+ default:
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ break;
+ }
+ }
+}
+
+static void SwLedBlink1(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ ResetLedStatus(pLed);
+ return;
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_NORMAL:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->BlinkTimes = 0;
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON)
+ bStopBlinking = false;
+ else
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink2(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop scan blink CurrLedState %d\n", pLed->CurrLedState));
+
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop scan blink CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("stop CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink3(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS success */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ bStopBlinking = false;
+ } else {
+ bStopBlinking = true;
+ }
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void SwLedBlink4(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed1 = &(ledpriv->SwLed1);
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN) {
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+ SwLedOff(padapter, pLed1);
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SLOWLY:
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_BLINK_StartToBlink:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = false;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = false;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_WPS:
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ case LED_BLINK_WPS_STOP: /* WPS authentication fail */
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0) {
+ if (pLed->bLedOn)
+ pLed->BlinkTimes = 1;
+ else
+ bStopBlinking = true;
+ }
+
+ if (bStopBlinking) {
+ pLed->BlinkTimes = 10;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ break;
+ default:
+ break;
+ }
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink4 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink5(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ u8 bStopBlinking = false;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ switch (pLed->CurrLedState) {
+ case LED_BLINK_SCAN:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+
+ pLed->bLedScanBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ }
+ break;
+ case LED_BLINK_TXRX:
+ pLed->BlinkTimes--;
+ if (pLed->BlinkTimes == 0)
+ bStopBlinking = true;
+
+ if (bStopBlinking) {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedOn)
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (!pLed->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+
+ pLed->bLedBlinkInProgress = false;
+ } else {
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
+ SwLedOff(padapter, pLed);
+ } else {
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("SwLedBlink5 CurrLedState %d\n", pLed->CurrLedState));
+}
+
+static void SwLedBlink6(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+
+ /* Change LED according to BlinkingLedState specified. */
+ if (pLed->BlinkingLedState == RTW_LED_ON) {
+ SwLedOn(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn on\n", pLed->BlinkTimes));
+ } else {
+ SwLedOff(padapter, pLed);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Blinktimes (%d): turn off\n", pLed->BlinkTimes));
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("<==== blink6\n"));
+}
+
+ /* ALPHA, added by chiyoko, 20090106 */
+static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!pLed->bLedNoLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ if (!pLed->bLedLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_NORMAL;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ ;
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ else
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ SwLedOff(padapter, pLed);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
+static void SwLedControlMode2(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ pLed->bLedWPSBlinkInProgress = false;
+ if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
+ SwLedOff(padapter, pLed);
+ } else {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+ }
+ break;
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* COREGA, added by chiyoko, 20090316 */
+ static void SwLedControlMode3(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_SITE_SURVEY:
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_LINK:
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_STOP_WPS:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ } else {
+ pLed->bLedWPSBlinkInProgress = true;
+ }
+
+ pLed->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_STOP_WPS_FAIL:
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_START_TO_LINK:
+ case LED_CTL_NO_LINK:
+ if (!IS_LED_BLINKING(pLed)) {
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ default:
+ break;
+ }
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("CurrLedState %d\n", pLed->CurrLedState));
+}
+
+ /* Edimax-Belkin, added by chiyoko, 20090413 */
+static void SwLedControlMode4(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+ struct LED_871x *pLed1 = &(ledpriv->SwLed1);
+
+ switch (LedAction) {
+ case LED_CTL_START_TO_LINK:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+
+ if (!pLed->bLedStartToLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+
+ pLed->bLedStartToLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_StartToBlink;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ }
+ break;
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ /* LED1 settings */
+ if (LedAction == LED_CTL_LINK) {
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+ }
+
+ if (!pLed->bLedNoLinkBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (IS_LED_WPS_BLINKING(pLed))
+ return;
+
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
+ return;
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_START_WPS: /* wait until xinpin finish */
+ case LED_CTL_START_WPS_BOTTON:
+ if (pLed1->bLedWPSBlinkInProgress) {
+ pLed1->bLedWPSBlinkInProgress = false;
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ pLed1->CurrLedState = RTW_LED_OFF;
+
+ if (pLed1->bLedOn)
+ _set_timer(&(pLed->BlinkTimer), 0);
+ }
+
+ if (!pLed->bLedWPSBlinkInProgress) {
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ pLed->bLedWPSBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_WPS;
+ if (pLed->bLedOn) {
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SLOWLY_INTERVAL);
+ } else {
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ }
+ }
+ break;
+ case LED_CTL_STOP_WPS: /* WPS connect success */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ break;
+ case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ pLed->bLedNoLinkBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SLOWLY;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NO_LINK_INTERVAL_ALPHA);
+
+ /* LED1 settings */
+ if (pLed1->bLedWPSBlinkInProgress)
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ else
+ pLed1->bLedWPSBlinkInProgress = true;
+ pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
+ pLed1->BlinkTimes = 10;
+ if (pLed1->bLedOn)
+ pLed1->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed1->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_NORMAL_INTERVAL);
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedNoLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedNoLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedLinkBlinkInProgress = false;
+ }
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ if (pLed->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedWPSBlinkInProgress = false;
+ }
+ if (pLed->bLedScanBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedScanBlinkInProgress = false;
+ }
+ if (pLed->bLedStartToLinkBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedStartToLinkBlinkInProgress = false;
+ }
+ if (pLed1->bLedWPSBlinkInProgress) {
+ _cancel_timer_ex(&(pLed1->BlinkTimer));
+ pLed1->bLedWPSBlinkInProgress = false;
+ }
+ pLed1->BlinkingLedState = LED_UNKNOWN;
+ SwLedOff(padapter, pLed);
+ SwLedOff(padapter, pLed1);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+
+
+ /* Sercomm-Belkin, added by chiyoko, 20090415 */
+static void
+SwLedControlMode5(
+ struct adapter *padapter,
+ enum LED_CTL_MODE LedAction
+)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct LED_871x *pLed = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_NO_LINK:
+ case LED_CTL_LINK: /* solid blue */
+ pLed->CurrLedState = RTW_LED_ON;
+ pLed->BlinkingLedState = RTW_LED_ON;
+
+ _set_timer(&(pLed->BlinkTimer), 0);
+ break;
+ case LED_CTL_SITE_SURVEY:
+ if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
+ } else if (!pLed->bLedScanBlinkInProgress) {
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ pLed->bLedScanBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_SCAN;
+ pLed->BlinkTimes = 24;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_SCAN_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_TX:
+ case LED_CTL_RX:
+ if (!pLed->bLedBlinkInProgress) {
+ if (pLed->CurrLedState == LED_BLINK_SCAN)
+ return;
+ pLed->bLedBlinkInProgress = true;
+ pLed->CurrLedState = LED_BLINK_TXRX;
+ pLed->BlinkTimes = 2;
+ if (pLed->bLedOn)
+ pLed->BlinkingLedState = RTW_LED_OFF;
+ else
+ pLed->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed->BlinkTimer), LED_BLINK_FASTER_INTERVAL_ALPHA);
+ }
+ break;
+ case LED_CTL_POWER_OFF:
+ pLed->CurrLedState = RTW_LED_OFF;
+ pLed->BlinkingLedState = RTW_LED_OFF;
+
+ if (pLed->bLedBlinkInProgress) {
+ _cancel_timer_ex(&(pLed->BlinkTimer));
+ pLed->bLedBlinkInProgress = false;
+ }
+ SwLedOff(padapter, pLed);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("Led %d\n", pLed->CurrLedState));
+}
+
+ /* WNC-Corega, added by chiyoko, 20090902 */
+static void
+SwLedControlMode6(
+ struct adapter *padapter,
+ enum LED_CTL_MODE LedAction
+)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+ struct LED_871x *pLed0 = &(ledpriv->SwLed0);
+
+ switch (LedAction) {
+ case LED_CTL_POWER_ON:
+ case LED_CTL_LINK:
+ case LED_CTL_NO_LINK:
+ _cancel_timer_ex(&(pLed0->BlinkTimer));
+ pLed0->CurrLedState = RTW_LED_ON;
+ pLed0->BlinkingLedState = RTW_LED_ON;
+ _set_timer(&(pLed0->BlinkTimer), 0);
+ break;
+ case LED_CTL_POWER_OFF:
+ SwLedOff(padapter, pLed0);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_, ("ledcontrol 6 Led %d\n", pLed0->CurrLedState));
+}
+
+/* */
+/* Description: */
+/* Handler function of LED Blinking. */
+/* We dispatch acture LED blink action according to LedStrategy. */
+/* */
+void BlinkHandler(struct LED_871x *pLed)
+{
+ struct adapter *padapter = pLed->padapter;
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
+ return;
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ SwLedBlink(pLed);
+ break;
+ case SW_LED_MODE1:
+ SwLedBlink1(pLed);
+ break;
+ case SW_LED_MODE2:
+ SwLedBlink2(pLed);
+ break;
+ case SW_LED_MODE3:
+ SwLedBlink3(pLed);
+ break;
+ case SW_LED_MODE4:
+ SwLedBlink4(pLed);
+ break;
+ case SW_LED_MODE5:
+ SwLedBlink5(pLed);
+ break;
+ case SW_LED_MODE6:
+ SwLedBlink6(pLed);
+ break;
+ default:
+ break;
+ }
+}
+
+void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped) ||
+ (!padapter->hw_init_completed))
+ return;
+
+ if (!ledpriv->bRegUseLed)
+ return;
+
+ if ((padapter->pwrctrlpriv.rf_pwrstate != rf_on &&
+ padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) &&
+ (LedAction == LED_CTL_TX || LedAction == LED_CTL_RX ||
+ LedAction == LED_CTL_SITE_SURVEY ||
+ LedAction == LED_CTL_LINK ||
+ LedAction == LED_CTL_NO_LINK ||
+ LedAction == LED_CTL_POWER_ON))
+ return;
+
+ switch (ledpriv->LedStrategy) {
+ case SW_LED_MODE0:
+ break;
+ case SW_LED_MODE1:
+ SwLedControlMode1(padapter, LedAction);
+ break;
+ case SW_LED_MODE2:
+ SwLedControlMode2(padapter, LedAction);
+ break;
+ case SW_LED_MODE3:
+ SwLedControlMode3(padapter, LedAction);
+ break;
+ case SW_LED_MODE4:
+ SwLedControlMode4(padapter, LedAction);
+ break;
+ case SW_LED_MODE5:
+ SwLedControlMode5(padapter, LedAction);
+ break;
+ case SW_LED_MODE6:
+ SwLedControlMode6(padapter, LedAction);
+ break;
+ default:
+ break;
+ }
+
+ RT_TRACE(_module_rtl8712_led_c_, _drv_info_,
+ ("LedStrategy:%d, LedAction %d\n",
+ ledpriv->LedStrategy, LedAction));
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
new file mode 100644
index 00000000000..ea6607196d8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -0,0 +1,2442 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MLME_C_
+
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+#include <wifi.h>
+#include <wlan_bssdef.h>
+#include <rtw_ioctl_set.h>
+#include <usb_osintf.h>
+
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+
+int _rtw_init_mlme_priv (struct adapter *padapter)
+{
+ int i;
+ u8 *pbuf;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+
+ pmlmepriv->nic_hdl = (u8 *)padapter;
+
+ pmlmepriv->pscanned = NULL;
+ pmlmepriv->fw_state = 0;
+ pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown;
+ pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
+
+ _rtw_spinlock_init(&(pmlmepriv->lock));
+ _rtw_init_queue(&(pmlmepriv->free_bss_pool));
+ _rtw_init_queue(&(pmlmepriv->scanned_queue));
+
+ set_scanned_network_val(pmlmepriv, 0);
+
+ _rtw_memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid));
+
+ pbuf = rtw_zvmalloc(MAX_BSS_CNT * (sizeof(struct wlan_network)));
+
+ if (pbuf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->free_bss_buf = pbuf;
+
+ pnetwork = (struct wlan_network *)pbuf;
+
+ for (i = 0; i < MAX_BSS_CNT; i++) {
+ _rtw_init_listhead(&(pnetwork->list));
+
+ rtw_list_insert_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue));
+
+ pnetwork++;
+ }
+
+ /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
+
+ rtw_clear_scan_deny(padapter);
+
+ rtw_init_mlme_timer(padapter);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_mlme_priv_lock (struct mlme_priv *pmlmepriv)
+{
+ _rtw_spinlock_free(&pmlmepriv->lock);
+ _rtw_spinlock_free(&(pmlmepriv->free_bss_pool.lock));
+ _rtw_spinlock_free(&(pmlmepriv->scanned_queue.lock));
+}
+
+#if defined (CONFIG_88EU_AP_MODE)
+static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
+{
+ kfree(*ppie);
+ *plen = 0;
+ *ppie = NULL;
+}
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_beacon_ie, &pmlmepriv->wps_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_req_ie, &pmlmepriv->wps_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_probe_resp_ie, &pmlmepriv->wps_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->wps_assoc_resp_ie, &pmlmepriv->wps_assoc_resp_ie_len);
+
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_beacon_ie, &pmlmepriv->p2p_beacon_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_req_ie, &pmlmepriv->p2p_probe_req_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_probe_resp_ie, &pmlmepriv->p2p_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
+ rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
+}
+#else
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
+{
+}
+#endif
+
+void _rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+{
+_func_enter_;
+
+ rtw_free_mlme_priv_ie_data(pmlmepriv);
+
+ if (pmlmepriv) {
+ rtw_mfree_mlme_priv_lock (pmlmepriv);
+
+ if (pmlmepriv->free_bss_buf) {
+ rtw_vmfree(pmlmepriv->free_bss_buf, MAX_BSS_CNT * sizeof(struct wlan_network));
+ }
+ }
+_func_exit_;
+}
+
+int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
+{
+ unsigned long irql;
+
+_func_enter_;
+
+ if (pnetwork == NULL)
+ goto exit;
+
+ _enter_critical_bh(&queue->lock, &irql);
+
+ rtw_list_insert_tail(&pnetwork->list, &queue->queue);
+
+ _exit_critical_bh(&queue->lock, &irql);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
+{
+ unsigned long irql;
+
+ struct wlan_network *pnetwork;
+
+_func_enter_;
+
+ _enter_critical_bh(&queue->lock, &irql);
+
+ if (_rtw_queue_empty(queue)) {
+ pnetwork = NULL;
+ } else {
+ pnetwork = LIST_CONTAINOR(get_next(&queue->queue), struct wlan_network, list);
+
+ rtw_list_delete(&(pnetwork->list));
+ }
+
+ _exit_critical_bh(&queue->lock, &irql);
+
+_func_exit_;
+
+ return pnetwork;
+}
+
+struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
+{
+ unsigned long irql;
+ struct wlan_network *pnetwork;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct list_head *plist = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&free_queue->lock, &irql);
+
+ if (_rtw_queue_empty(free_queue) == true) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ plist = get_next(&(free_queue->queue));
+
+ pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list);
+
+ rtw_list_delete(&pnetwork->list);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("_rtw_alloc_network: ptr=%p\n", plist));
+ pnetwork->network_type = 0;
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = rtw_get_current_time();
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ pmlmepriv->num_of_scanned++;
+
+exit:
+ _exit_critical_bh(&free_queue->lock, &irql);
+
+_func_exit_;
+
+ return pnetwork;
+}
+
+void _rtw_free_network(struct mlme_priv *pmlmepriv , struct wlan_network *pnetwork, u8 isfreeall)
+{
+ u32 curr_time, delta_time;
+ u32 lifetime = SCANQUEUE_LIFETIME;
+ unsigned long irql;
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+_func_enter_;
+
+ if (pnetwork == NULL)
+ goto exit;
+
+ if (pnetwork->fixed)
+ goto exit;
+ curr_time = rtw_get_current_time();
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ lifetime = 1;
+ if (!isfreeall) {
+ delta_time = (curr_time - pnetwork->last_scanned)/HZ;
+ if (delta_time < lifetime)/* unit:sec */
+ goto exit;
+ }
+ _enter_critical_bh(&free_queue->lock, &irql);
+ rtw_list_delete(&(pnetwork->list));
+ rtw_list_insert_tail(&(pnetwork->list), &(free_queue->queue));
+ pmlmepriv->num_of_scanned--;
+ _exit_critical_bh(&free_queue->lock, &irql);
+
+exit:
+_func_exit_;
+}
+
+void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork)
+{
+ struct __queue *free_queue = &(pmlmepriv->free_bss_pool);
+
+_func_enter_;
+ if (pnetwork == NULL)
+ goto exit;
+ if (pnetwork->fixed)
+ goto exit;
+ rtw_list_delete(&(pnetwork->list));
+ rtw_list_insert_tail(&(pnetwork->list), get_list_head(free_queue));
+ pmlmepriv->num_of_scanned--;
+exit:
+
+_func_exit_;
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *_rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork = NULL;
+ u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+_func_enter_;
+ if (_rtw_memcmp(zero_addr, addr, ETH_ALEN)) {
+ pnetwork = NULL;
+ goto exit;
+ }
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network , list);
+ if (_rtw_memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN) == true)
+ break;
+ plist = get_next(plist);
+ }
+ if (plist == phead)
+ pnetwork = NULL;
+exit:
+_func_exit_;
+ return pnetwork;
+}
+
+
+void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall)
+{
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct wlan_network *pnetwork;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *scanned_queue = &pmlmepriv->scanned_queue;
+
+_func_enter_;
+
+
+ _enter_critical_bh(&scanned_queue->lock, &irql);
+
+ phead = get_list_head(scanned_queue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ _rtw_free_network(pmlmepriv, pnetwork, isfreeall);
+ }
+ _exit_critical_bh(&scanned_queue->lock, &irql);
+_func_exit_;
+}
+
+int rtw_if_up(struct adapter *padapter)
+{
+ int res;
+_func_enter_;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved ||
+ (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("rtw_if_up:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ res = false;
+ } else {
+ res = true;
+ }
+
+_func_exit_;
+ return res;
+}
+
+void rtw_generate_random_ibss(u8 *pibss)
+{
+ u32 curtime = rtw_get_current_time();
+
+_func_enter_;
+ pibss[0] = 0x02; /* in ad-hoc mode bit1 must set to 1 */
+ pibss[1] = 0x11;
+ pibss[2] = 0x87;
+ pibss[3] = (u8)(curtime & 0xff);/* p[0]; */
+ pibss[4] = (u8)((curtime>>8) & 0xff);/* p[1]; */
+ pibss[5] = (u8)((curtime>>16) & 0xff);/* p[2]; */
+_func_exit_;
+ return;
+}
+
+u8 *rtw_get_capability_from_ie(u8 *ie)
+{
+ return ie + 8 + 2;
+}
+
+
+u16 rtw_get_capability(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+_func_enter_;
+
+ memcpy((u8 *)&val, rtw_get_capability_from_ie(bss->IEs), 2);
+
+_func_exit_;
+ return le16_to_cpu(val);
+}
+
+u8 *rtw_get_timestampe_from_ie(u8 *ie)
+{
+ return ie + 0;
+}
+
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
+{
+ return ie + 8;
+}
+
+int rtw_init_mlme_priv (struct adapter *padapter)/* struct mlme_priv *pmlmepriv) */
+{
+ int res;
+_func_enter_;
+ res = _rtw_init_mlme_priv(padapter);/* (pmlmepriv); */
+_func_exit_;
+ return res;
+}
+
+void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_mlme_priv\n"));
+ _rtw_free_mlme_priv (pmlmepriv);
+_func_exit_;
+}
+
+static struct wlan_network *rtw_alloc_network(struct mlme_priv *pmlmepriv)
+{
+ struct wlan_network *pnetwork;
+_func_enter_;
+ pnetwork = _rtw_alloc_network(pmlmepriv);
+_func_exit_;
+ return pnetwork;
+}
+
+static void rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork)
+{
+_func_enter_;
+ _rtw_free_network_nolock(pmlmepriv, pnetwork);
+_func_exit_;
+}
+
+
+void rtw_free_network_queue(struct adapter *dev, u8 isfreeall)
+{
+_func_enter_;
+ _rtw_free_network_queue(dev, isfreeall);
+_func_exit_;
+}
+
+/*
+ return the wlan_network with the matching addr
+
+ Shall be calle under atomic context... to avoid possible racing condition...
+*/
+struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr)
+{
+ struct wlan_network *pnetwork = _rtw_find_network(scanned_queue, addr);
+
+ return pnetwork;
+}
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ int ret = true;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ if ((psecuritypriv->dot11PrivacyAlgrthm != _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 0))
+ ret = false;
+ else if ((psecuritypriv->dot11PrivacyAlgrthm == _NO_PRIVACY_) &&
+ (pnetwork->network.Privacy == 1))
+ ret = false;
+ else
+ ret = true;
+ return ret;
+}
+
+static int is_same_ess(struct wlan_bssid_ex *a, struct wlan_bssid_ex *b)
+{
+ return (a->Ssid.SsidLength == b->Ssid.SsidLength) &&
+ _rtw_memcmp(a->Ssid.Ssid, b->Ssid.Ssid, a->Ssid.SsidLength);
+}
+
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst)
+{
+ u16 s_cap, d_cap;
+ __le16 le_scap, le_dcap;
+
+_func_enter_;
+ memcpy((u8 *)&le_scap, rtw_get_capability_from_ie(src->IEs), 2);
+ memcpy((u8 *)&le_dcap, rtw_get_capability_from_ie(dst->IEs), 2);
+
+
+ s_cap = le16_to_cpu(le_scap);
+ d_cap = le16_to_cpu(le_dcap);
+
+_func_exit_;
+
+ return ((src->Ssid.SsidLength == dst->Ssid.SsidLength) &&
+ ((_rtw_memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN)) == true) &&
+ ((_rtw_memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength)) == true) &&
+ ((s_cap & WLAN_CAPABILITY_IBSS) ==
+ (d_cap & WLAN_CAPABILITY_IBSS)) &&
+ ((s_cap & WLAN_CAPABILITY_BSS) ==
+ (d_cap & WLAN_CAPABILITY_BSS)));
+}
+
+struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
+{
+ struct list_head *plist, *phead;
+ struct wlan_network *pwlan = NULL;
+ struct wlan_network *oldest = NULL;
+
+_func_enter_;
+ phead = get_list_head(scanned_queue);
+
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (!pwlan->fixed) {
+ if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
+ oldest = pwlan;
+ }
+
+ plist = get_next(plist);
+ }
+_func_exit_;
+ return oldest;
+}
+
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct adapter *padapter, bool update_ie)
+{
+ long rssi_ori = dst->Rssi;
+ u8 sq_smp = src->PhyInfo.SignalQuality;
+ u8 ss_final;
+ u8 sq_final;
+ long rssi_final;
+
+_func_enter_;
+ rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */
+
+ /* The rule below is 1/5 for sample value, 4/5 for history value */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src)) {
+ /* Take the recvpriv's value for the connected AP*/
+ ss_final = padapter->recvpriv.signal_strength;
+ sq_final = padapter->recvpriv.signal_qual;
+ /* the rssi value here is undecorated, and will be used for antenna diversity */
+ if (sq_smp != 101) /* from the right channel */
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ else
+ rssi_final = rssi_ori;
+ } else {
+ if (sq_smp != 101) { /* from the right channel */
+ ss_final = ((u32)(src->PhyInfo.SignalStrength)+(u32)(dst->PhyInfo.SignalStrength)*4)/5;
+ sq_final = ((u32)(src->PhyInfo.SignalQuality)+(u32)(dst->PhyInfo.SignalQuality)*4)/5;
+ rssi_final = (src->Rssi+dst->Rssi*4)/5;
+ } else {
+ /* bss info not receving from the right channel, use the original RX signal infos */
+ ss_final = dst->PhyInfo.SignalStrength;
+ sq_final = dst->PhyInfo.SignalQuality;
+ rssi_final = dst->Rssi;
+ }
+ }
+ if (update_ie)
+ memcpy((u8 *)dst, (u8 *)src, get_wlan_bssid_ex_sz(src));
+ dst->PhyInfo.SignalStrength = ss_final;
+ dst->PhyInfo.SignalQuality = sq_final;
+ dst->Rssi = rssi_final;
+
+_func_exit_;
+}
+
+static void update_current_network(struct adapter *adapter, struct wlan_bssid_ex *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == true) &&
+ (is_same_network(&(pmlmepriv->cur_network.network), pnetwork))) {
+ update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter, true);
+ rtw_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct ndis_802_11_fixed_ie),
+ pmlmepriv->cur_network.network.IELength);
+ }
+_func_exit_;
+}
+
+/*
+Caller must hold pmlmepriv->lock first.
+*/
+void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *target)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ u32 bssid_ex_sz;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *oldest = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&queue->lock, &irql);
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (is_same_network(&(pnetwork->network), target))
+ break;
+ if ((oldest == ((struct wlan_network *)0)) ||
+ time_after(oldest->last_scanned, pnetwork->last_scanned))
+ oldest = pnetwork;
+ plist = get_next(plist);
+ }
+ /* If we didn't find a match, then get a new network slot to initialize
+ * with this beacon's information */
+ if (rtw_end_of_queue_search(phead, plist) == true) {
+ if (_rtw_queue_empty(&(pmlmepriv->free_bss_pool)) == true) {
+ /* If there are no more slots, expire the oldest */
+ pnetwork = oldest;
+
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
+ memcpy(&(pnetwork->network), target, get_wlan_bssid_ex_sz(target));
+ /* variable initialize */
+ pnetwork->fixed = false;
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ pnetwork->network_type = 0;
+ pnetwork->aid = 0;
+ pnetwork->join_res = 0;
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ } else {
+ /* Otherwise just pull from the free list */
+
+ pnetwork = rtw_alloc_network(pmlmepriv); /* will update scan_time */
+
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n\nsomething wrong here\n\n\n"));
+ goto exit;
+ }
+
+ bssid_ex_sz = get_wlan_bssid_ex_sz(target);
+ target->Length = bssid_ex_sz;
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(target->PhyInfo.Optimum_antenna));
+ memcpy(&(pnetwork->network), target, bssid_ex_sz);
+
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ /* bss info not receving from the right channel */
+ if (pnetwork->network.PhyInfo.SignalQuality == 101)
+ pnetwork->network.PhyInfo.SignalQuality = 0;
+ rtw_list_insert_tail(&(pnetwork->list), &(queue->queue));
+ }
+ } else {
+ /* we have an entry and we are going to update it. But this entry may
+ * be already expired. In this case we do the same as we found a new
+ * net and call the new_net handler
+ */
+ bool update_ie = true;
+
+ pnetwork->last_scanned = rtw_get_current_time();
+
+ /* target.Reserved[0]== 1, means that scaned network is a bcn frame. */
+ if ((pnetwork->network.IELength > target->IELength) && (target->Reserved[0] == 1))
+ update_ie = false;
+
+ update_network(&(pnetwork->network), target, adapter, update_ie);
+ }
+
+exit:
+ _exit_critical_bh(&queue->lock, &irql);
+
+_func_exit_;
+}
+
+static void rtw_add_network(struct adapter *adapter,
+ struct wlan_bssid_ex *pnetwork)
+{
+_func_enter_;
+#if defined(CONFIG_88EU_P2P)
+ rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
+#endif
+ update_current_network(adapter, pnetwork);
+ rtw_update_scanned_network(adapter, pnetwork);
+_func_exit_;
+}
+
+/* select the desired network based on the capability of the (i)bss. */
+/* check items: (1) security */
+/* (2) network_type */
+/* (3) WMM */
+/* (4) HT */
+/* (5) others */
+static int rtw_is_desired_network(struct adapter *adapter, struct wlan_network *pnetwork)
+{
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u32 desired_encmode;
+ u32 privacy;
+
+ /* u8 wps_ie[512]; */
+ uint wps_ielen;
+
+ int bselected = true;
+
+ desired_encmode = psecuritypriv->ndisencryptstatus;
+ privacy = pnetwork->network.Privacy;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ if (rtw_get_wps_ie(pnetwork->network.IEs+_FIXED_IE_LENGTH_, pnetwork->network.IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen) != NULL)
+ return true;
+ else
+ return false;
+ }
+ if (adapter->registrypriv.wifi_spec == 1) { /* for correct flow of 8021X to do.... */
+ if ((desired_encmode == Ndis802_11EncryptionDisabled) && (privacy != 0))
+ bselected = false;
+ }
+
+
+ if ((desired_encmode != Ndis802_11EncryptionDisabled) && (privacy == 0)) {
+ DBG_88E("desired_encmode: %d, privacy: %d\n", desired_encmode, privacy);
+ bselected = false;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ bselected = false;
+ }
+
+
+ return bselected;
+}
+
+/* TODO: Perry: For Power Management */
+void rtw_atimdone_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("receive atimdone_evet\n"));
+_func_exit_;
+ return;
+}
+
+
+void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ u32 len;
+ struct wlan_bssid_ex *pnetwork;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+
+_func_enter_;
+
+ pnetwork = (struct wlan_bssid_ex *)pbuf;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_survey_event_callback, ssid=%s\n", pnetwork->Ssid.Ssid));
+
+ len = get_wlan_bssid_ex_sz(pnetwork);
+ if (len > (sizeof(struct wlan_bssid_ex))) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n****rtw_survey_event_callback: return a wrong bss ***\n"));
+ return;
+ }
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ /* update IBSS_network 's timestamp */
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) {
+ if (_rtw_memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) {
+ struct wlan_network *ibss_wlan = NULL;
+ unsigned long irql;
+
+ memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ ibss_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->MacAddress);
+ if (ibss_wlan) {
+ memcpy(ibss_wlan->network.IEs , pnetwork->IEs, 8);
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto exit;
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ }
+ }
+
+ /* lock pmlmepriv->lock when you accessing network_q */
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == false) {
+ if (pnetwork->Ssid.Ssid[0] == 0)
+ pnetwork->Ssid.SsidLength = 0;
+ rtw_add_network(adapter, pnetwork);
+ }
+
+exit:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+
+_func_exit_;
+
+ return;
+}
+
+
+
+void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext;
+
+_func_enter_;
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (pmlmepriv->wps_probe_req_ie) {
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_surveydone_event_callback: fw_state:%x\n\n", get_fwstate(pmlmepriv)));
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ u8 timer_cancelled;
+
+ _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled);
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("nic status=%x, survey done event comes too late!\n", get_fwstate(pmlmepriv)));
+ }
+
+ rtw_set_signal_stat_timer(&adapter->recvpriv);
+
+ if (pmlmepriv->to_join) {
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == false) {
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ if (rtw_select_and_join_from_scanned_queue(pmlmepriv) == _SUCCESS) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else {
+ struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network);
+ u8 *pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("switching to adhoc master\n"));
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+ rtw_generate_random_ibss(pibss);
+
+ pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE;
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error=>rtw_createbss_cmd status FAIL\n"));
+ pmlmepriv->to_join = false;
+ }
+ }
+ } else {
+ int s_ret;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+ pmlmepriv->to_join = false;
+ s_ret = rtw_select_and_join_from_scanned_queue(pmlmepriv);
+ if (_SUCCESS == s_ret) {
+ _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT);
+ } else if (s_ret == 2) { /* there is no need to wait for join */
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ rtw_indicate_connect(adapter);
+ } else {
+ DBG_88E("try_to_join, but select scanning queue fail, to_roaming:%d\n", pmlmepriv->to_roaming);
+ if (pmlmepriv->to_roaming != 0) {
+ if (--pmlmepriv->to_roaming == 0 ||
+ _SUCCESS != rtw_sitesurvey_cmd(adapter, &pmlmepriv->assoc_ssid, 1, NULL, 0)) {
+ pmlmepriv->to_roaming = 0;
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+ } else {
+ pmlmepriv->to_join = true;
+ }
+ }
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ }
+ }
+
+ indicate_wx_scan_complete_event(adapter);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ p2p_ps_wk_cmd(adapter, P2P_PS_SCAN_DONE, 0);
+
+ rtw_os_xmit_schedule(adapter);
+
+ pmlmeext = &adapter->mlmeextpriv;
+ if (pmlmeext->sitesurvey_res.bss_cnt == 0)
+ rtw_hal_sreset_reset(adapter);
+_func_exit_;
+}
+
+void rtw_dummy_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+}
+
+void rtw_fwdbg_event_callback(struct adapter *adapter , u8 *pbuf)
+{
+}
+
+static void free_scanqueue(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql, irql0;
+ struct __queue *free_queue = &pmlmepriv->free_bss_pool;
+ struct __queue *scan_queue = &pmlmepriv->scanned_queue;
+ struct list_head *plist, *phead, *ptemp;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+free_scanqueue\n"));
+ _enter_critical_bh(&scan_queue->lock, &irql0);
+ _enter_critical_bh(&free_queue->lock, &irql);
+
+ phead = get_list_head(scan_queue);
+ plist = get_next(phead);
+
+ while (plist != phead) {
+ ptemp = get_next(plist);
+ rtw_list_delete(plist);
+ rtw_list_insert_tail(plist, &free_queue->queue);
+ plist = ptemp;
+ pmlmepriv->num_of_scanned--;
+ }
+
+ _exit_critical_bh(&free_queue->lock, &irql);
+ _exit_critical_bh(&scan_queue->lock, &irql0);
+
+_func_exit_;
+}
+
+/*
+*rtw_free_assoc_resources: the caller has to lock pmlmepriv->lock
+*/
+void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
+{
+ unsigned long irql;
+ struct wlan_network *pwlan = NULL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_free_assoc_resources\n"));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("tgt_network->network.MacAddress=%pM ssid=%s\n",
+ tgt_network->network.MacAddress, tgt_network->network.Ssid.Ssid));
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE | WIFI_AP_STATE)) {
+ struct sta_info *psta;
+
+ rtw_free_all_stainfo(adapter);
+
+ psta = rtw_get_bcmc_stainfo(adapter);
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+
+ rtw_init_bcmc_stainfo(adapter);
+ }
+
+ if (lock_scanned_queue)
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan)
+ pwlan->fixed = false;
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_free_assoc_resources:pwlan==NULL\n\n"));
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) && (adapter->stapriv.asoc_sta_count == 1)))
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+
+ if (lock_scanned_queue)
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ pmlmepriv->key_mask = 0;
+_func_exit_;
+}
+
+/*
+*rtw_indicate_connect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_connect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_connect\n"));
+
+ pmlmepriv->to_join = false;
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ set_fwstate(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_LINK);
+
+ rtw_os_indicate_connect(padapter);
+ }
+
+ pmlmepriv->to_roaming = 0;
+
+ rtw_set_scan_deny(padapter, 3000);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("-rtw_indicate_connect: fw_state=0x%08x\n", get_fwstate(pmlmepriv)));
+_func_exit_;
+}
+
+/*
+*rtw_indicate_disconnect: the caller has to lock pmlmepriv->lock
+*/
+void rtw_indicate_disconnect(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_indicate_disconnect\n"));
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING | WIFI_UNDER_WPS);
+
+
+ if (pmlmepriv->to_roaming > 0)
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) ||
+ (pmlmepriv->to_roaming <= 0)) {
+ rtw_os_indicate_disconnect(padapter);
+
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+ rtw_clear_scan_deny(padapter);
+ }
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 1);
+
+_func_exit_;
+}
+
+inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ rtw_os_indicate_scan_done(padapter, aborted);
+}
+
+void rtw_scan_abort(struct adapter *adapter)
+{
+ u32 start;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(adapter->mlmeextpriv);
+
+ start = rtw_get_current_time();
+ pmlmeext->scan_abort = true;
+ while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
+ rtw_get_passing_time_ms(start) <= 200) {
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+ DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_msleep_os(20);
+ }
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
+ if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
+ DBG_88E(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev));
+ rtw_indicate_scan_done(adapter, true);
+ }
+ pmlmeext->scan_abort = false;
+}
+
+static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
+{
+ int i;
+ struct sta_info *bmc_sta, *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ psta = rtw_get_stainfo(pstapriv, pnetwork->network.MacAddress);
+ if (psta == NULL)
+ psta = rtw_alloc_stainfo(pstapriv, pnetwork->network.MacAddress);
+
+ if (psta) { /* update ptarget_sta */
+ DBG_88E("%s\n", __func__);
+ psta->aid = pnetwork->join_res;
+ psta->mac_id = 0;
+ /* sta mode */
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+ /* security related */
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ padapter->securitypriv.binstallGrpkey = false;
+ padapter->securitypriv.busetkipkey = false;
+ padapter->securitypriv.bgrpkey_handshake = false;
+ psta->ieee8021x_blocked = true;
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ _rtw_memset((u8 *)&psta->dot118021x_UncstKey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11tkiprxmickey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11tkiptxmickey, 0, sizeof(union Keytype));
+ _rtw_memset((u8 *)&psta->dot11txpn, 0, sizeof(union pn48));
+ _rtw_memset((u8 *)&psta->dot11rxpn, 0, sizeof(union pn48));
+ }
+ /* Commented by Albert 2012/07/21 */
+ /* When doing the WPS, the wps_ie_len won't equal to 0 */
+ /* And the Wi-Fi driver shouldn't allow the data packet to be tramsmitted. */
+ if (padapter->securitypriv.wps_ie_len != 0) {
+ psta->ieee8021x_blocked = true;
+ padapter->securitypriv.wps_ie_len = 0;
+ }
+ /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info */
+ /* if A-MPDU Rx is enabled, reseting rx_ordering_ctrl wstart_b(indicate_seq) to default value = 0xffff */
+ /* todo: check if AP can send A-MPDU packets */
+ for (i = 0; i < 16; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+ bmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (bmc_sta) {
+ for (i = 0; i < 16; i++) {
+ /* preorder_ctrl = &precvpriv->recvreorder_ctrl[i]; */
+ preorder_ctrl = &bmc_sta->recvreorder_ctrl[i];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* max_ampdu_sz; ex. 32(kbytes) -> wsize_b = 32 */
+ }
+ }
+ /* misc. */
+ update_sta_info(padapter, psta);
+ }
+ return psta;
+}
+
+/* pnetwork: returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+static void rtw_joinbss_update_network(struct adapter *padapter, struct wlan_network *ptarget_wlan, struct wlan_network *pnetwork)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+
+ DBG_88E("%s\n", __func__);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("\nfw_state:%x, BSSID:%pM\n",
+ get_fwstate(pmlmepriv), pnetwork->network.MacAddress));
+
+
+ /* why not use ptarget_wlan?? */
+ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length);
+ /* some IEs in pnetwork is wrong, so we should use ptarget_wlan IEs */
+ cur_network->network.IELength = ptarget_wlan->network.IELength;
+ memcpy(&cur_network->network.IEs[0], &ptarget_wlan->network.IEs[0], MAX_IE_SZ);
+
+ cur_network->aid = pnetwork->join_res;
+
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+ padapter->recvpriv.signal_strength = ptarget_wlan->network.PhyInfo.SignalStrength;
+ padapter->recvpriv.signal_qual = ptarget_wlan->network.PhyInfo.SignalQuality;
+ /* the ptarget_wlan->network.Rssi is raw data, we use ptarget_wlan->network.PhyInfo.SignalStrength instead (has scaled) */
+ padapter->recvpriv.rssi = translate_percentage_to_dbm(ptarget_wlan->network.PhyInfo.SignalStrength);
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ /* update fw_state will clr _FW_UNDER_LINKING here indirectly */
+ switch (pnetwork->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ if (pmlmepriv->fw_state&WIFI_UNDER_WPS)
+ pmlmepriv->fw_state = WIFI_STATION_STATE|WIFI_UNDER_WPS;
+ else
+ pmlmepriv->fw_state = WIFI_STATION_STATE;
+ break;
+ case Ndis802_11IBSS:
+ pmlmepriv->fw_state = WIFI_ADHOC_STATE;
+ break;
+ default:
+ pmlmepriv->fw_state = WIFI_NULL_STATE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Invalid network_mode\n"));
+ break;
+ }
+
+ rtw_update_protection(padapter, (cur_network->network.IEs) +
+ sizeof(struct ndis_802_11_fixed_ie),
+ (cur_network->network.IELength));
+ rtw_update_ht_cap(padapter, cur_network->network.IEs, cur_network->network.IELength);
+}
+
+/* Notes: the fucntion could be > passive_level (the same context as Rx tasklet) */
+/* pnetwork: returns from rtw_joinbss_event_callback */
+/* ptarget_wlan: found from scanned_queue */
+/* if join_res > 0, for (fw_state == WIFI_STATION_STATE), we check if "ptarget_sta" & "ptarget_wlan" exist. */
+/* if join_res > 0, for (fw_state == WIFI_ADHOC_STATE), we only check if "ptarget_wlan" exist. */
+/* if join_res > 0, update "cur_network->network" from "pnetwork->network" if (ptarget_wlan != NULL). */
+
+void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql, irql2;
+ u8 timer_cancelled;
+ struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL;
+ unsigned int the_same_macaddr = false;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("joinbss event call back received with res=%d\n", pnetwork->join_res));
+
+ rtw_get_encrypt_decrypt_from_registrypriv(adapter);
+
+
+ if (pmlmepriv->assoc_ssid.SsidLength == 0)
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ joinbss event call back for Any SSid\n"));
+ else
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("@@@@@ rtw_joinbss_event_callback for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
+
+ the_same_macaddr = _rtw_memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN);
+
+ pnetwork->network.Length = get_wlan_bssid_ex_sz(&pnetwork->network);
+ if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("\n\n ***joinbss_evt_callback return a wrong bss ***\n\n"));
+ goto ignore_nolock;
+ }
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("\nrtw_joinbss_event_callback!! _enter_critical\n"));
+
+ if (pnetwork->join_res > 0) {
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) {
+ /* s1. find ptarget_wlan */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ if (the_same_macaddr) {
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ } else {
+ pcur_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if (pcur_wlan)
+ pcur_wlan->fixed = false;
+
+ pcur_sta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (pcur_sta) {
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ rtw_free_stainfo(adapter, pcur_sta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+ }
+
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+ } else {
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, pnetwork->network.MacAddress);
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ }
+ }
+
+ /* s2. update cur_network */
+ if (ptarget_wlan) {
+ rtw_joinbss_update_network(adapter, ptarget_wlan, pnetwork);
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't find ptarget_wlan when joinbss_event callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+
+
+ /* s3. find ptarget_sta & update ptarget_sta after update cur_network only for station mode */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ ptarget_sta = rtw_joinbss_update_stainfo(adapter, pnetwork);
+ if (ptarget_sta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't update stainfo when joinbss_event callback\n"));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+ }
+
+ /* s4. indicate connect */
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) {
+ rtw_indicate_connect(adapter);
+ } else {
+ /* adhoc mode will rtw_indicate_connect when rtw_stassoc_event_callback */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("adhoc mode, fw_state:%x", get_fwstate(pmlmepriv)));
+ }
+
+ /* s5. Cancle assoc_timer */
+ _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("Cancle assoc_timer\n"));
+
+ } else {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("rtw_joinbss_event_callback err: fw_state:%x", get_fwstate(pmlmepriv)));
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ goto ignore_joinbss_callback;
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+ } else if (pnetwork->join_res == -4) {
+ rtw_reset_securitypriv(adapter);
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+
+ if ((check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) == true) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("fail! clear _FW_UNDER_LINKING ^^^fw_state=%x\n", get_fwstate(pmlmepriv)));
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+ } else { /* if join_res < 0 (join fails), then try again */
+ _set_timer(&pmlmepriv->assoc_timer, 1);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+ }
+
+ignore_joinbss_callback:
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ignore_nolock:
+_func_exit_;
+}
+
+void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ struct wlan_network *pnetwork = (struct wlan_network *)pbuf;
+
+_func_enter_;
+
+ mlmeext_joinbss_event_callback(adapter, pnetwork->join_res);
+
+ rtw_os_xmit_schedule(adapter);
+
+_func_exit_;
+}
+
+static u8 search_max_mac_id(struct adapter *padapter)
+{
+ u8 mac_id;
+#if defined (CONFIG_88EU_AP_MODE)
+ u8 aid;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+#endif
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+#if defined (CONFIG_88EU_AP_MODE)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
+ if (pstapriv->sta_aid[aid-1] != NULL)
+ break;
+ }
+ mac_id = aid + 1;
+ } else
+#endif
+ {/* adhoc id = 31~2 */
+ for (mac_id = (NUM_STA-1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
+ break;
+ }
+ }
+ return mac_id;
+}
+
+/* FOR AP , AD-HOC mode */
+void rtw_stassoc_hw_rpt(struct adapter *adapter, struct sta_info *psta)
+{
+ u16 media_status;
+ u8 macid;
+
+ if (psta == NULL)
+ return;
+
+ macid = search_max_mac_id(adapter);
+ rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE:1 connect */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+}
+
+void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql;
+ struct sta_info *psta;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct wlan_network *ptarget_wlan = NULL;
+
+_func_enter_;
+
+ if (rtw_access_ctrl(adapter, pstassoc->macaddr) == false)
+ return;
+
+#if defined (CONFIG_88EU_AP_MODE)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta) {
+ ap_sta_info_defer_update(adapter, psta);
+ rtw_stassoc_hw_rpt(adapter, psta);
+ }
+ goto exit;
+ }
+#endif
+ /* for AD-HOC mode */
+ psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta != NULL) {
+ /* the sta have been in sta_info_queue => do nothing */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Error: rtw_stassoc_event_callback: sta has been in sta_hash_queue\n"));
+ goto exit; /* between drv has received this event before and fw have not yet to set key to CAM_ENTRY) */
+ }
+ psta = rtw_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr);
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Can't alloc sta_info when rtw_stassoc_event_callback\n"));
+ goto exit;
+ }
+ /* to do: init sta_info variable */
+ psta->qos_option = 0;
+ psta->mac_id = (uint)pstassoc->cam_id;
+ DBG_88E("%s\n", __func__);
+ /* for ad-hoc mode */
+ rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true);
+ rtw_stassoc_hw_rpt(adapter, psta);
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
+ psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
+ psta->ieee8021x_blocked = false;
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE))) {
+ if (adapter->stapriv.asoc_sta_count == 2) {
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ ptarget_wlan = rtw_find_network(&pmlmepriv->scanned_queue, cur_network->network.MacAddress);
+ if (ptarget_wlan)
+ ptarget_wlan->fixed = true;
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ rtw_indicate_connect(adapter);
+ }
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ mlmeext_sta_add_event_callback(adapter, psta);
+exit:
+_func_exit_;
+}
+
+void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
+{
+ unsigned long irql, irql2;
+ int mac_id = -1;
+ struct sta_info *psta;
+ struct wlan_network *pwlan = NULL;
+ struct wlan_bssid_ex *pdev_network = NULL;
+ u8 *pibss = NULL;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ struct stadel_event *pstadel = (struct stadel_event *)pbuf;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+
+_func_enter_;
+
+ psta = rtw_get_stainfo(&adapter->stapriv, pstadel->macaddr);
+ if (psta)
+ mac_id = psta->mac_id;
+ else
+ mac_id = pstadel->mac_id;
+
+ DBG_88E("%s(mac_id=%d)=%pM\n", __func__, mac_id, pstadel->macaddr);
+
+ if (mac_id >= 0) {
+ u16 media_status;
+ media_status = (mac_id<<8)|0; /* MACID|OPMODE:0 means disconnect */
+ /* for STA, AP, ADHOC mode, report disconnect stauts to FW */
+ rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ return;
+
+ mlmeext_sta_del_event_callback(adapter);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql2);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ if (pmlmepriv->to_roaming > 0)
+ pmlmepriv->to_roaming--; /* this stadel_event is caused by roaming, decrease to_roaming */
+ else if (pmlmepriv->to_roaming == 0)
+ pmlmepriv->to_roaming = adapter->registrypriv.max_roaming_times;
+
+ if (*((unsigned short *)(pstadel->rsvd)) != WLAN_REASON_EXPIRATION_CHK)
+ pmlmepriv->to_roaming = 0; /* don't roam */
+
+ rtw_free_uc_swdec_pending_queue(adapter);
+
+ rtw_free_assoc_resources(adapter, 1);
+ rtw_indicate_disconnect(adapter);
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* remove the network entry in scanned_queue */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ _rtw_roaming(adapter, tgt_network);
+ }
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+ rtw_free_stainfo(adapter, psta);
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql);
+
+ if (adapter->stapriv.asoc_sta_count == 1) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* free old ibss network */
+ pwlan = rtw_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress);
+ if (pwlan) {
+ pwlan->fixed = false;
+ rtw_free_network_nolock(pmlmepriv, pwlan);
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ /* re-create ibss */
+ pdev_network = &(adapter->registrypriv.dev_network);
+ pibss = adapter->registrypriv.dev_network.MacAddress;
+
+ memcpy(pdev_network, &tgt_network->network, get_wlan_bssid_ex_sz(&tgt_network->network));
+
+ _rtw_memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid));
+
+ rtw_update_registrypriv_dev_network(adapter);
+
+ rtw_generate_random_ibss(pibss);
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
+ set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE);
+ _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE);
+ }
+
+ if (rtw_createbss_cmd(adapter) != _SUCCESS)
+ RT_TRACE(_module_rtl871x_ioctl_set_c_, _drv_err_, ("***Error=>stadel_event_callback: rtw_createbss_cmd status FAIL***\n "));
+ }
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql2);
+_func_exit_;
+}
+
+void rtw_cpwm_event_callback(struct adapter *padapter, u8 *pbuf)
+{
+_func_enter_;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("+rtw_cpwm_event_callback !!!\n"));
+_func_exit_;
+}
+
+/*
+* _rtw_join_timeout_handler - Timeout/faliure handler for CMD JoinBss
+* @adapter: pointer to struct adapter structure
+*/
+void _rtw_join_timeout_handler (struct adapter *adapter)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ int do_join_r;
+
+_func_enter_;
+
+ DBG_88E("%s, fw_state=%x\n", __func__, get_fwstate(pmlmepriv));
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ return;
+
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+
+ if (pmlmepriv->to_roaming > 0) { /* join timeout caused by roaming */
+ while (1) {
+ pmlmepriv->to_roaming--;
+ if (pmlmepriv->to_roaming != 0) { /* try another , */
+ DBG_88E("%s try another roaming\n", __func__);
+ do_join_r = rtw_do_join(adapter);
+ if (_SUCCESS != do_join_r) {
+ DBG_88E("%s roaming do_join return %d\n", __func__ , do_join_r);
+ continue;
+ }
+ break;
+ } else {
+ DBG_88E("%s We've try roaming but fail\n", __func__);
+ rtw_indicate_disconnect(adapter);
+ break;
+ }
+ }
+ } else {
+ rtw_indicate_disconnect(adapter);
+ free_scanqueue(pmlmepriv);/* */
+ }
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+_func_exit_;
+}
+
+/*
+* rtw_scan_timeout_handler - Timeout/Faliure handler for CMD SiteSurvey
+* @adapter: pointer to struct adapter structure
+*/
+void rtw_scan_timeout_handler (struct adapter *adapter)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ DBG_88E(FUNC_ADPT_FMT" fw_state=%x\n", FUNC_ADPT_ARG(adapter), get_fwstate(pmlmepriv));
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+ rtw_indicate_scan_done(adapter, true);
+}
+
+static void rtw_auto_scan_handler(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ /* auto site survey per 60sec */
+ if (pmlmepriv->scan_interval > 0) {
+ pmlmepriv->scan_interval--;
+ if (pmlmepriv->scan_interval == 0) {
+ DBG_88E("%s\n", __func__);
+ rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ }
+ }
+}
+
+void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
+{
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+
+ if (!adapter)
+ return;
+ if (!adapter->hw_init_completed)
+ return;
+ if ((adapter->bDriverStopped) || (adapter->bSurpriseRemoved))
+ return;
+ if (adapter->net_closed)
+ return;
+ rtw_dynamic_chk_wk_cmd(adapter);
+
+ if (pregistrypriv->wifi_spec == 1) {
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+#endif
+ {
+ /* auto site survey */
+ rtw_auto_scan_handler(adapter);
+ }
+ }
+
+ rcu_read_lock();
+
+ if (rcu_dereference(adapter->pnetdev->rx_handler_data) &&
+ (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == true)) {
+ /* expire NAT2.5 entry */
+ nat25_db_expire(adapter);
+
+ if (adapter->pppoe_connection_in_progress > 0) {
+ adapter->pppoe_connection_in_progress--;
+ }
+
+ /* due to rtw_dynamic_check_timer_handlder() is called every 2 seconds */
+ if (adapter->pppoe_connection_in_progress > 0) {
+ adapter->pppoe_connection_in_progress--;
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+#define RTW_SCAN_RESULT_EXPIRE 2000
+
+/*
+* Select a new join candidate from the original @param candidate and @param competitor
+* @return true: candidate is updated
+* @return false: candidate is not updated
+*/
+static int rtw_check_join_candidate(struct mlme_priv *pmlmepriv
+ , struct wlan_network **candidate, struct wlan_network *competitor)
+{
+ int updated = false;
+ struct adapter *adapter = container_of(pmlmepriv, struct adapter, mlmepriv);
+
+
+ /* check bssid, if needed */
+ if (pmlmepriv->assoc_by_bssid) {
+ if (!_rtw_memcmp(competitor->network.MacAddress, pmlmepriv->assoc_bssid, ETH_ALEN))
+ goto exit;
+ }
+
+ /* check ssid, if needed */
+ if (pmlmepriv->assoc_ssid.Ssid && pmlmepriv->assoc_ssid.SsidLength) {
+ if (competitor->network.Ssid.SsidLength != pmlmepriv->assoc_ssid.SsidLength ||
+ _rtw_memcmp(competitor->network.Ssid.Ssid, pmlmepriv->assoc_ssid.Ssid, pmlmepriv->assoc_ssid.SsidLength) == false)
+ goto exit;
+ }
+
+ if (rtw_is_desired_network(adapter, competitor) == false)
+ goto exit;
+
+ if (pmlmepriv->to_roaming) {
+ if (rtw_get_passing_time_ms((u32)competitor->last_scanned) >= RTW_SCAN_RESULT_EXPIRE ||
+ is_same_ess(&competitor->network, &pmlmepriv->cur_network.network) == false)
+ goto exit;
+ }
+
+ if (*candidate == NULL || (*candidate)->network.Rssi < competitor->network.Rssi) {
+ *candidate = competitor;
+ updated = true;
+ }
+ if (updated) {
+ DBG_88E("[by_bssid:%u][assoc_ssid:%s]new candidate: %s(%pM rssi:%d\n",
+ pmlmepriv->assoc_by_bssid,
+ pmlmepriv->assoc_ssid.Ssid,
+ (*candidate)->network.Ssid.Ssid,
+ (*candidate)->network.MacAddress,
+ (int)(*candidate)->network.Rssi);
+ DBG_88E("[to_roaming:%u]\n", pmlmepriv->to_roaming);
+ }
+
+exit:
+ return updated;
+}
+
+/*
+Calling context:
+The caller of the sub-routine will be in critical section...
+The caller must hold the following spinlock
+pmlmepriv->lock
+*/
+
+int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+ int ret;
+ struct list_head *phead;
+ struct adapter *adapter;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ struct wlan_network *candidate = NULL;
+ u8 supp_ant_div = false;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+ phead = get_list_head(queue);
+ adapter = (struct adapter *)pmlmepriv->nic_hdl;
+ pmlmepriv->pscanned = get_next(phead);
+ while (!rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) {
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+ if (pnetwork == NULL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("%s return _FAIL:(pnetwork==NULL)\n", __func__));
+ ret = _FAIL;
+ goto exit;
+ }
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+ rtw_check_join_candidate(pmlmepriv, &candidate, pnetwork);
+ }
+ if (candidate == NULL) {
+ DBG_88E("%s: return _FAIL(candidate==NULL)\n", __func__);
+ ret = _FAIL;
+ goto exit;
+ } else {
+ DBG_88E("%s: candidate: %s(%pM ch:%u)\n", __func__,
+ candidate->network.Ssid.Ssid, candidate->network.MacAddress,
+ candidate->network.Configuration.DSConfig);
+ }
+
+
+ /* check for situation of _FW_LINKED */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ DBG_88E("%s: _FW_LINKED while ask_for_joinbss!!!\n", __func__);
+
+ rtw_disassoc_cmd(adapter, 0, true);
+ rtw_indicate_disconnect(adapter);
+ rtw_free_assoc_resources(adapter, 0);
+ }
+
+ rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div));
+ if (supp_ant_div) {
+ u8 cur_ant;
+ rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant));
+ DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n",
+ (2 == candidate->network.PhyInfo.Optimum_antenna) ? "A" : "B",
+ (2 == cur_ant) ? "A" : "B"
+ );
+ }
+
+ ret = rtw_joinbss_cmd(adapter, candidate);
+
+exit:
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irql);
+
+_func_exit_;
+
+ return ret;
+}
+
+int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
+{
+ struct cmd_obj *pcmd;
+ struct setauth_parm *psetauthparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+
+ psetauthparm = (struct setauth_parm *)rtw_zmalloc(sizeof(struct setauth_parm));
+ if (psetauthparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+ _rtw_memset(psetauthparm, 0, sizeof(struct setauth_parm));
+ psetauthparm->mode = (unsigned char)psecuritypriv->dot11AuthAlgrthm;
+ pcmd->cmdcode = _SetAuth_CMD_;
+ pcmd->parmbuf = (unsigned char *)psetauthparm;
+ pcmd->cmdsz = (sizeof(struct setauth_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ _rtw_init_listhead(&pcmd->list);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("after enqueue set_auth_cmd, auth_mode=%x\n",
+ psecuritypriv->dot11AuthAlgrthm));
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+_func_exit_;
+ return res;
+}
+
+int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, int keyid, u8 set_tx)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(adapter->cmdpriv);
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+ int res = _SUCCESS;
+
+_func_enter_;
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL; /* try again */
+ goto exit;
+ }
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=(unsigned char)psecuritypriv->dot118021XGrpPrivacy=%d\n",
+ psetkeyparm->algorithm));
+ } else {
+ psetkeyparm->algorithm = (u8)psecuritypriv->dot11PrivacyAlgrthm;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=(u8)psecuritypriv->dot11PrivacyAlgrthm=%d\n",
+ psetkeyparm->algorithm));
+ }
+ psetkeyparm->keyid = (u8)keyid;/* 0~3 */
+ psetkeyparm->set_tx = set_tx;
+ pmlmepriv->key_mask |= BIT(psetkeyparm->keyid);
+ DBG_88E("==> rtw_set_key algorithm(%x), keyid(%x), key_mask(%x)\n",
+ psetkeyparm->algorithm, psetkeyparm->keyid, pmlmepriv->key_mask);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key: psetkeyparm->algorithm=%d psetkeyparm->keyid=(u8)keyid=%d\n",
+ psetkeyparm->algorithm, keyid));
+
+ switch (psetkeyparm->algorithm) {
+ case _WEP40_:
+ keylen = 5;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _WEP104_:
+ keylen = 13;
+ memcpy(&(psetkeyparm->key[0]), &(psecuritypriv->dot11DefKey[keyid].skey[0]), keylen);
+ break;
+ case _TKIP_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ case _AES_:
+ keylen = 16;
+ memcpy(&psetkeyparm->key, &psecuritypriv->dot118021XGrpKey[keyid], keylen);
+ psetkeyparm->grpkey = 1;
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("\n rtw_set_key:psecuritypriv->dot11PrivacyAlgrthm=%x (must be 1 or 2 or 4 or 5)\n",
+ psecuritypriv->dot11PrivacyAlgrthm));
+ res = _FAIL;
+ goto exit;
+ }
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+ _rtw_init_listhead(&pcmd->list);
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+exit:
+_func_exit_;
+ return res;
+}
+
+/* adjust IEs for rtw_joinbss_cmd in WMM */
+int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len)
+{
+ unsigned int ielength = 0;
+ unsigned int i, j;
+
+ i = 12; /* after the fixed IE */
+ while (i < in_len) {
+ ielength = initial_out_len;
+
+ if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50 && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) {
+ /* WMM element ID and OUI */
+ /* Append WMM IE to the last index of out_ie */
+
+ for (j = i; j < i + 9; j++) {
+ out_ie[ielength] = in_ie[j];
+ ielength++;
+ }
+ out_ie[initial_out_len + 1] = 0x07;
+ out_ie[initial_out_len + 6] = 0x00;
+ out_ie[initial_out_len + 8] = 0x00;
+ break;
+ }
+ i += (in_ie[i+1]+2); /* to the next IE element */
+ }
+ return ielength;
+}
+
+/* */
+/* Ported from 8185: IsInPreAuthKeyList(). (Renamed from SecIsInPreAuthKeyList(), 2006-10-13.) */
+/* Added by Annie, 2006-05-07. */
+/* */
+/* Search by BSSID, */
+/* Return Value: */
+/* -1 :if there is no pre-auth key in the table */
+/* >= 0 :if there is pre-auth key, and return the entry id */
+/* */
+/* */
+
+static int SecIsInPMKIDList(struct adapter *Adapter, u8 *bssid)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+ int i = 0;
+
+ do {
+ if ((psecuritypriv->PMKIDList[i].bUsed) &&
+ (_rtw_memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN) == true)) {
+ break;
+ } else {
+ i++;
+ /* continue; */
+ }
+
+ } while (i < NUM_PMKID_CACHE);
+
+ if (i == NUM_PMKID_CACHE) {
+ i = -1;/* Could not find. */
+ } else {
+ /* There is one Pre-Authentication Key for the specific BSSID. */
+ }
+ return i;
+}
+
+/* */
+/* Check the RSN IE length */
+/* If the RSN IE length <= 20, the RSN IE didn't include the PMKID information */
+/* 0-11th element in the array are the fixed IE */
+/* 12th element in the array is the IE */
+/* 13th element in the array is the IE length */
+/* */
+
+static int rtw_append_pmkid(struct adapter *Adapter, int iEntry, u8 *ie, uint ie_len)
+{
+ struct security_priv *psecuritypriv = &Adapter->securitypriv;
+
+ if (ie[13] <= 20) {
+ /* The RSN IE didn't include the PMK ID, append the PMK information */
+ ie[ie_len] = 1;
+ ie_len++;
+ ie[ie_len] = 0; /* PMKID count = 0x0100 */
+ ie_len++;
+ memcpy(&ie[ie_len], &psecuritypriv->PMKIDList[iEntry].PMKID, 16);
+
+ ie_len += 16;
+ ie[13] += 18;/* PMKID length = 2+16 */
+ }
+ return ie_len;
+}
+
+int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len)
+{
+ u8 authmode;
+ uint ielength;
+ int iEntry;
+
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ uint ndisauthmode = psecuritypriv->ndisauthtype;
+ uint ndissecuritytype = psecuritypriv->ndisencryptstatus;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("+rtw_restruct_sec_ie: ndisauthmode=%d ndissecuritytype=%d\n",
+ ndisauthmode, ndissecuritytype));
+
+ /* copy fixed ie only */
+ memcpy(out_ie, in_ie, 12);
+ ielength = 12;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA) ||
+ (ndisauthmode == Ndis802_11AuthModeWPAPSK))
+ authmode = _WPA_IE_ID_;
+ if ((ndisauthmode == Ndis802_11AuthModeWPA2) ||
+ (ndisauthmode == Ndis802_11AuthModeWPA2PSK))
+ authmode = _WPA2_IE_ID_;
+
+ if (check_fwstate(pmlmepriv, WIFI_UNDER_WPS)) {
+ memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len);
+
+ ielength += psecuritypriv->wps_ie_len;
+ } else if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) {
+ /* copy RSN or SSN */
+ memcpy(&out_ie[ielength], &psecuritypriv->supplicant_ie[0], psecuritypriv->supplicant_ie[1]+2);
+ ielength += psecuritypriv->supplicant_ie[1]+2;
+ rtw_report_sec_ie(adapter, authmode, psecuritypriv->supplicant_ie);
+ }
+
+ iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid);
+ if (iEntry < 0) {
+ return ielength;
+ } else {
+ if (authmode == _WPA2_IE_ID_)
+ ielength = rtw_append_pmkid(adapter, iEntry, out_ie, ielength);
+ }
+
+_func_exit_;
+
+ return ielength;
+}
+
+void rtw_init_registrypriv_dev_network(struct adapter *adapter)
+{
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct eeprom_priv *peepriv = &adapter->eeprompriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ u8 *myhwaddr = myid(peepriv);
+
+_func_enter_;
+
+ memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN);
+
+ memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid));
+
+ pdev_network->Configuration.Length = sizeof(struct ndis_802_11_config);
+ pdev_network->Configuration.BeaconPeriod = 100;
+ pdev_network->Configuration.FHConfig.Length = 0;
+ pdev_network->Configuration.FHConfig.HopPattern = 0;
+ pdev_network->Configuration.FHConfig.HopSet = 0;
+ pdev_network->Configuration.FHConfig.DwellTime = 0;
+
+_func_exit_;
+}
+
+void rtw_update_registrypriv_dev_network(struct adapter *adapter)
+{
+ int sz = 0;
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ struct wlan_network *cur_network = &adapter->mlmepriv.cur_network;
+
+_func_enter_;
+
+ pdev_network->Privacy = (psecuritypriv->dot11PrivacyAlgrthm > 0 ? 1 : 0); /* adhoc no 802.1x */
+
+ pdev_network->Rssi = 0;
+
+ switch (pregistrypriv->wireless_mode) {
+ case WIRELESS_11B:
+ pdev_network->NetworkTypeInUse = (Ndis802_11DS);
+ break;
+ case WIRELESS_11G:
+ case WIRELESS_11BG:
+ case WIRELESS_11_24N:
+ case WIRELESS_11G_24N:
+ case WIRELESS_11BG_24N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11A_5N:
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ break;
+ case WIRELESS_11ABGN:
+ if (pregistrypriv->channel > 14)
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM5);
+ else
+ pdev_network->NetworkTypeInUse = (Ndis802_11OFDM24);
+ break;
+ default:
+ /* TODO */
+ break;
+ }
+
+ pdev_network->Configuration.DSConfig = (pregistrypriv->channel);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("pregistrypriv->channel=%d, pdev_network->Configuration.DSConfig=0x%x\n",
+ pregistrypriv->channel, pdev_network->Configuration.DSConfig));
+
+ if (cur_network->network.InfrastructureMode == Ndis802_11IBSS)
+ pdev_network->Configuration.ATIMWindow = (0);
+
+ pdev_network->InfrastructureMode = (cur_network->network.InfrastructureMode);
+
+ /* 1. Supported rates */
+ /* 2. IE */
+
+ sz = rtw_generate_ie(pregistrypriv);
+ pdev_network->IELength = sz;
+ pdev_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network);
+
+ /* notes: translate IELength & Length after assign the Length to cmdsz in createbss_cmd(); */
+ /* pdev_network->IELength = cpu_to_le32(sz); */
+_func_exit_;
+}
+
+void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter)
+{
+_func_enter_;
+_func_exit_;
+}
+
+/* the fucntion is at passive_level */
+void rtw_joinbss_reset(struct adapter *padapter)
+{
+ u8 threshold;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ /* todo: if you want to do something io/reg/hw setting before join_bss, please add code here */
+ pmlmepriv->num_FortyMHzIntolerant = 0;
+
+ pmlmepriv->num_sta_no_ht = 0;
+
+ phtpriv->ampdu_enable = false;/* reset to disabled */
+
+ /* TH = 1 => means that invalidate usb rx aggregation */
+ /* TH = 0 => means that validate usb rx aggregation, use init value. */
+ if (phtpriv->ht_option) {
+ if (padapter->registrypriv.wifi_spec == 1)
+ threshold = 1;
+ else
+ threshold = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ } else {
+ threshold = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ }
+}
+
+/* the fucntion is >= passive_level */
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len)
+{
+ u32 ielen, out_len;
+ enum ht_cap_ampdu_factor max_rx_ampdu_factor;
+ unsigned char *p;
+ struct rtw_ieee80211_ht_cap ht_capie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ u32 rx_packet_offset, max_recvbuf_sz;
+
+
+ phtpriv->ht_option = false;
+
+ p = rtw_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12);
+
+ if (p && ielen > 0) {
+ if (pqospriv->qos_option == 0) {
+ out_len = *pout_len;
+ rtw_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_,
+ _WMM_IE_Length_, WMM_IE, pout_len);
+
+ pqospriv->qos_option = 1;
+ }
+
+ out_len = *pout_len;
+
+ _rtw_memset(&ht_capie, 0, sizeof(struct rtw_ieee80211_ht_cap));
+
+ ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_TX_STBC |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
+ rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
+
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+
+ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ ht_capie.ampdu_params_info = (max_rx_ampdu_factor&0x03);
+
+ if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&(0x07<<2));
+ else
+ ht_capie.ampdu_params_info |= (IEEE80211_HT_CAP_AMPDU_DENSITY&0x00);
+
+
+ rtw_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_,
+ sizeof(struct rtw_ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len);
+
+ phtpriv->ht_option = true;
+
+ p = rtw_get_ie(in_ie+12, _HT_ADD_INFO_IE_, &ielen, in_len-12);
+ if (p && (ielen == sizeof(struct ieee80211_ht_addt_info))) {
+ out_len = *pout_len;
+ rtw_set_ie(out_ie+out_len, _HT_ADD_INFO_IE_, ielen, p+2 , pout_len);
+ }
+ }
+ return phtpriv->ht_option;
+}
+
+/* the fucntion is > passive_level (in critical_section) */
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
+{
+ u8 *p, max_ampdu_sz;
+ int len;
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if ((!pmlmeinfo->HT_info_enable) || (!pmlmeinfo->HT_caps_enable))
+ return;
+
+ DBG_88E("+rtw_update_ht_cap()\n");
+
+ /* maybe needs check if ap supports rx ampdu. */
+ if ((!phtpriv->ampdu_enable) && (pregistrypriv->ampdu_enable == 1)) {
+ if (pregistrypriv->wifi_spec == 1)
+ phtpriv->ampdu_enable = false;
+ else
+ phtpriv->ampdu_enable = true;
+ } else if (pregistrypriv->ampdu_enable == 2) {
+ phtpriv->ampdu_enable = true;
+ }
+
+
+ /* check Max Rx A-MPDU Size */
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_CAPABILITY_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
+ if (p && len > 0) {
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR);
+ max_ampdu_sz = 1 << (max_ampdu_sz+3); /* max_ampdu_sz (kbytes); */
+ phtpriv->rx_ampdu_maxlen = max_ampdu_sz;
+ }
+ len = 0;
+ p = rtw_get_ie(pie+sizeof(struct ndis_802_11_fixed_ie), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct ndis_802_11_fixed_ie));
+
+ /* update cur_bwmode & cur_ch_offset */
+ if ((pregistrypriv->cbw40_enable) &&
+ (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & BIT(1)) &&
+ (pmlmeinfo->HT_info.infos[0] & BIT(2))) {
+ int i;
+ u8 rf_type;
+
+ padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++) {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ }
+ /* switch to the 40M Hz mode accoring to the AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch ((pmlmeinfo->HT_info.infos[0] & 0x3)) {
+ case HT_EXTCHNL_OFFSET_UPPER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case HT_EXTCHNL_OFFSET_LOWER:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ }
+
+ /* Config SM Power Save setting */
+ pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
+ if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
+ DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
+
+ /* Config current HT Protection mode. */
+ pmlmeinfo->HT_protection = pmlmeinfo->HT_info.infos[1] & 0x3;
+}
+
+void rtw_issue_addbareq_cmd(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u8 issued;
+ int priority;
+ struct sta_info *psta = NULL;
+ struct ht_priv *phtpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+
+ if (bmcst || (padapter->mlmepriv.LinkDetectInfo.NumTxOkInPeriod < 100))
+ return;
+
+ priority = pattrib->priority;
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
+ issued = (phtpriv->agg_enable_bitmap>>priority)&0x1;
+ issued |= (phtpriv->candidate_tid_bitmap>>priority)&0x1;
+
+ if (0 == issued) {
+ DBG_88E("rtw_issue_addbareq_cmd, p=%d\n", priority);
+ psta->htpriv.candidate_tid_bitmap |= BIT((u8)priority);
+ rtw_addbareq_cmd(padapter, (u8) priority, pattrib->ra);
+ }
+ }
+}
+
+void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ unsigned long irql;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _rtw_roaming(padapter, tgt_network);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int do_join_r;
+
+ struct wlan_network *pnetwork;
+
+ if (tgt_network != NULL)
+ pnetwork = tgt_network;
+ else
+ pnetwork = &pmlmepriv->cur_network;
+
+ if (0 < pmlmepriv->to_roaming) {
+ DBG_88E("roaming from %s(%pM length:%d\n",
+ pnetwork->network.Ssid.Ssid, pnetwork->network.MacAddress,
+ pnetwork->network.Ssid.SsidLength);
+ memcpy(&pmlmepriv->assoc_ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
+
+ pmlmepriv->assoc_by_bssid = false;
+
+ while (1) {
+ do_join_r = rtw_do_join(padapter);
+ if (_SUCCESS == do_join_r) {
+ break;
+ } else {
+ DBG_88E("roaming do_join return %d\n", do_join_r);
+ pmlmepriv->to_roaming--;
+
+ if (0 < pmlmepriv->to_roaming) {
+ continue;
+ } else {
+ DBG_88E("%s(%d) -to roaming fail, indicate_disconnect\n", __func__, __LINE__);
+ rtw_indicate_disconnect(padapter);
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
new file mode 100644
index 00000000000..8b2ba26ba38
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -0,0 +1,8481 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MLME_EXT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <rtw_mlme_ext.h>
+#include <wlan_bssdef.h>
+#include <mlme_osdep.h>
+#include <recv_osdep.h>
+
+static struct mlme_handler mlme_sta_tbl[] = {
+ {WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
+ {WIFI_ASSOCRSP, "OnAssocRsp", &OnAssocRsp},
+ {WIFI_REASSOCREQ, "OnReAssocReq", &OnAssocReq},
+ {WIFI_REASSOCRSP, "OnReAssocRsp", &OnAssocRsp},
+ {WIFI_PROBEREQ, "OnProbeReq", &OnProbeReq},
+ {WIFI_PROBERSP, "OnProbeRsp", &OnProbeRsp},
+
+ /*----------------------------------------------------------
+ below 2 are reserved
+ -----------------------------------------------------------*/
+ {0, "DoReserved", &DoReserved},
+ {0, "DoReserved", &DoReserved},
+ {WIFI_BEACON, "OnBeacon", &OnBeacon},
+ {WIFI_ATIM, "OnATIM", &OnAtim},
+ {WIFI_DISASSOC, "OnDisassoc", &OnDisassoc},
+ {WIFI_AUTH, "OnAuth", &OnAuthClient},
+ {WIFI_DEAUTH, "OnDeAuth", &OnDeAuth},
+ {WIFI_ACTION, "OnAction", &OnAction},
+};
+
+static struct action_handler OnAction_tbl[] = {
+ {RTW_WLAN_CATEGORY_SPECTRUM_MGMT, "ACTION_SPECTRUM_MGMT", on_action_spct},
+ {RTW_WLAN_CATEGORY_QOS, "ACTION_QOS", &OnAction_qos},
+ {RTW_WLAN_CATEGORY_DLS, "ACTION_DLS", &OnAction_dls},
+ {RTW_WLAN_CATEGORY_BACK, "ACTION_BACK", &OnAction_back},
+ {RTW_WLAN_CATEGORY_PUBLIC, "ACTION_PUBLIC", on_action_public},
+ {RTW_WLAN_CATEGORY_RADIO_MEASUREMENT, "ACTION_RADIO_MEASUREMENT", &DoReserved},
+ {RTW_WLAN_CATEGORY_FT, "ACTION_FT", &DoReserved},
+ {RTW_WLAN_CATEGORY_HT, "ACTION_HT", &OnAction_ht},
+ {RTW_WLAN_CATEGORY_SA_QUERY, "ACTION_SA_QUERY", &DoReserved},
+ {RTW_WLAN_CATEGORY_WMM, "ACTION_WMM", &OnAction_wmm},
+ {RTW_WLAN_CATEGORY_P2P, "ACTION_P2P", &OnAction_p2p},
+};
+
+
+static u8 null_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
+
+/**************************************************
+OUI definitions for the vendor specific IE
+***************************************************/
+unsigned char RTW_WPA_OUI[] = {0x00, 0x50, 0xf2, 0x01};
+unsigned char WMM_OUI[] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char WPS_OUI[] = {0x00, 0x50, 0xf2, 0x04};
+unsigned char P2P_OUI[] = {0x50, 0x6F, 0x9A, 0x09};
+unsigned char WFD_OUI[] = {0x50, 0x6F, 0x9A, 0x0A};
+
+unsigned char WMM_INFO_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+unsigned char WMM_PARA_OUI[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02};
+unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02};
+
+extern unsigned char REALTEK_96B_IE[];
+
+/********************************************************
+MCS rate definitions
+*********************************************************/
+unsigned char MCS_rate_2R[16] = {0xff, 0xff, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+unsigned char MCS_rate_1R[16] = {0xff, 0x00, 0x0, 0x0, 0x01, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
+
+/********************************************************
+ChannelPlan definitions
+*********************************************************/
+static struct rt_channel_plan_2g RTW_ChannelPlan2G[RT_CHANNEL_DOMAIN_2G_MAX] = {
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x00, RT_CHANNEL_DOMAIN_2G_WORLD , Passive scan CH 12, 13 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}, 13}, /* 0x01, RT_CHANNEL_DOMAIN_2G_ETSI1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, 11}, /* 0x02, RT_CHANNEL_DOMAIN_2G_FCC1 */
+ {{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, 14}, /* 0x03, RT_CHANNEL_DOMAIN_2G_MIKK1 */
+ {{10, 11, 12, 13}, 4}, /* 0x04, RT_CHANNEL_DOMAIN_2G_ETSI2 */
+ {{}, 0}, /* 0x05, RT_CHANNEL_DOMAIN_2G_NULL */
+};
+
+static struct rt_channel_plan_map RTW_ChannelPlanMap[RT_CHANNEL_DOMAIN_MAX] = {
+ /* 0x00 ~ 0x1F , Old Define ===== */
+ {0x02}, /* 0x00, RT_CHANNEL_DOMAIN_FCC */
+ {0x02}, /* 0x01, RT_CHANNEL_DOMAIN_IC */
+ {0x01}, /* 0x02, RT_CHANNEL_DOMAIN_ETSI */
+ {0x01}, /* 0x03, RT_CHANNEL_DOMAIN_SPAIN */
+ {0x01}, /* 0x04, RT_CHANNEL_DOMAIN_FRANCE */
+ {0x03}, /* 0x05, RT_CHANNEL_DOMAIN_MKK */
+ {0x03}, /* 0x06, RT_CHANNEL_DOMAIN_MKK1 */
+ {0x01}, /* 0x07, RT_CHANNEL_DOMAIN_ISRAEL */
+ {0x03}, /* 0x08, RT_CHANNEL_DOMAIN_TELEC */
+ {0x03}, /* 0x09, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN */
+ {0x00}, /* 0x0A, RT_CHANNEL_DOMAIN_WORLD_WIDE_13 */
+ {0x02}, /* 0x0B, RT_CHANNEL_DOMAIN_TAIWAN */
+ {0x01}, /* 0x0C, RT_CHANNEL_DOMAIN_CHINA */
+ {0x02}, /* 0x0D, RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO */
+ {0x02}, /* 0x0E, RT_CHANNEL_DOMAIN_KOREA */
+ {0x02}, /* 0x0F, RT_CHANNEL_DOMAIN_TURKEY */
+ {0x01}, /* 0x10, RT_CHANNEL_DOMAIN_JAPAN */
+ {0x02}, /* 0x11, RT_CHANNEL_DOMAIN_FCC_NO_DFS */
+ {0x01}, /* 0x12, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x00}, /* 0x13, RT_CHANNEL_DOMAIN_WORLD_WIDE_5G */
+ {0x02}, /* 0x14, RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS */
+ {0x00}, /* 0x15, RT_CHANNEL_DOMAIN_ETSI_NO_DFS */
+ {0x00}, /* 0x16, RT_CHANNEL_DOMAIN_KOREA_NO_DFS */
+ {0x03}, /* 0x17, RT_CHANNEL_DOMAIN_JAPAN_NO_DFS */
+ {0x05}, /* 0x18, RT_CHANNEL_DOMAIN_PAKISTAN_NO_DFS */
+ {0x02}, /* 0x19, RT_CHANNEL_DOMAIN_TAIWAN2_NO_DFS */
+ {0x00}, /* 0x1A, */
+ {0x00}, /* 0x1B, */
+ {0x00}, /* 0x1C, */
+ {0x00}, /* 0x1D, */
+ {0x00}, /* 0x1E, */
+ {0x05}, /* 0x1F, RT_CHANNEL_DOMAIN_WORLD_WIDE_ONLY_5G */
+ /* 0x20 ~ 0x7F , New Define ===== */
+ {0x00}, /* 0x20, RT_CHANNEL_DOMAIN_WORLD_NULL */
+ {0x01}, /* 0x21, RT_CHANNEL_DOMAIN_ETSI1_NULL */
+ {0x02}, /* 0x22, RT_CHANNEL_DOMAIN_FCC1_NULL */
+ {0x03}, /* 0x23, RT_CHANNEL_DOMAIN_MKK1_NULL */
+ {0x04}, /* 0x24, RT_CHANNEL_DOMAIN_ETSI2_NULL */
+ {0x02}, /* 0x25, RT_CHANNEL_DOMAIN_FCC1_FCC1 */
+ {0x00}, /* 0x26, RT_CHANNEL_DOMAIN_WORLD_ETSI1 */
+ {0x03}, /* 0x27, RT_CHANNEL_DOMAIN_MKK1_MKK1 */
+ {0x00}, /* 0x28, RT_CHANNEL_DOMAIN_WORLD_KCC1 */
+ {0x00}, /* 0x29, RT_CHANNEL_DOMAIN_WORLD_FCC2 */
+ {0x00}, /* 0x2A, */
+ {0x00}, /* 0x2B, */
+ {0x00}, /* 0x2C, */
+ {0x00}, /* 0x2D, */
+ {0x00}, /* 0x2E, */
+ {0x00}, /* 0x2F, */
+ {0x00}, /* 0x30, RT_CHANNEL_DOMAIN_WORLD_FCC3 */
+ {0x00}, /* 0x31, RT_CHANNEL_DOMAIN_WORLD_FCC4 */
+ {0x00}, /* 0x32, RT_CHANNEL_DOMAIN_WORLD_FCC5 */
+ {0x00}, /* 0x33, RT_CHANNEL_DOMAIN_WORLD_FCC6 */
+ {0x02}, /* 0x34, RT_CHANNEL_DOMAIN_FCC1_FCC7 */
+ {0x00}, /* 0x35, RT_CHANNEL_DOMAIN_WORLD_ETSI2 */
+ {0x00}, /* 0x36, RT_CHANNEL_DOMAIN_WORLD_ETSI3 */
+ {0x03}, /* 0x37, RT_CHANNEL_DOMAIN_MKK1_MKK2 */
+ {0x03}, /* 0x38, RT_CHANNEL_DOMAIN_MKK1_MKK3 */
+ {0x02}, /* 0x39, RT_CHANNEL_DOMAIN_FCC1_NCC1 */
+ {0x00}, /* 0x3A, */
+ {0x00}, /* 0x3B, */
+ {0x00}, /* 0x3C, */
+ {0x00}, /* 0x3D, */
+ {0x00}, /* 0x3E, */
+ {0x00}, /* 0x3F, */
+ {0x02}, /* 0x40, RT_CHANNEL_DOMAIN_FCC1_NCC2 */
+ {0x03}, /* 0x41, RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G */
+};
+
+static struct rt_channel_plan_map RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE = {0x03}; /* use the conbination for max channel numbers */
+
+/*
+ * Search the @param channel_num in given @param channel_set
+ * @ch_set: the given channel set
+ * @ch: the given channel number
+ *
+ * return the index of channel_num in channel_set, -1 if not found
+ */
+int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch)
+{
+ int i;
+ for (i = 0; ch_set[i].ChannelNum != 0; i++) {
+ if (ch == ch_set[i].ChannelNum)
+ break;
+ }
+
+ if (i >= ch_set[i].ChannelNum)
+ return -1;
+ return i;
+}
+
+/****************************************************************************
+
+Following are the initialization functions for WiFi MLME
+
+*****************************************************************************/
+
+int init_hw_mlme_ext(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+ return _SUCCESS;
+}
+
+static void init_mlme_ext_priv_value(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ unsigned char mixed_datarate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _9M_RATE_, _12M_RATE_, _18M_RATE_, _24M_RATE_, _36M_RATE_,
+ _48M_RATE_, _54M_RATE_, 0xff
+ };
+ unsigned char mixed_basicrate[NumRates] = {
+ _1M_RATE_, _2M_RATE_, _5M_RATE_, _11M_RATE_, _6M_RATE_,
+ _12M_RATE_, _24M_RATE_, 0xff,
+ };
+
+ ATOMIC_SET(&pmlmeext->event_seq, 0);
+ pmlmeext->mgnt_seq = 0;/* reset to zero when disconnect at client mode */
+
+ pmlmeext->cur_channel = padapter->registrypriv.channel;
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeext->oper_channel = pmlmeext->cur_channel ;
+ pmlmeext->oper_bwmode = pmlmeext->cur_bwmode;
+ pmlmeext->oper_ch_offset = pmlmeext->cur_ch_offset;
+ pmlmeext->retry = 0;
+
+ pmlmeext->cur_wireless_mode = padapter->registrypriv.wireless_mode;
+
+ memcpy(pmlmeext->datarate, mixed_datarate, NumRates);
+ memcpy(pmlmeext->basicrate, mixed_basicrate, NumRates);
+
+ pmlmeext->tx_rate = IEEE80211_CCK_RATE_1MB;
+
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->scan_abort = false;
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeinfo->auth_seq = 0;
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ pmlmeinfo->key_index = 0;
+ pmlmeinfo->iv = 0;
+
+ pmlmeinfo->enc_algo = _NO_PRIVACY_;
+ pmlmeinfo->authModeToggle = 0;
+
+ _rtw_memset(pmlmeinfo->chg_txt, 0, 128);
+
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ pmlmeinfo->preamble_mode = PREAMBLE_AUTO;
+
+ pmlmeinfo->dialogToken = 0;
+
+ pmlmeext->action_public_rxseq = 0xffff;
+ pmlmeext->action_public_dialog_token = 0xff;
+}
+
+static int has_channel(struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ u8 chan) {
+ int i;
+
+ for (i = 0; i < chanset_size; i++) {
+ if (channel_set[i].ChannelNum == chan)
+ return 1;
+ }
+ return 0;
+}
+
+static void init_channel_list(struct adapter *padapter, struct rt_channel_info *channel_set,
+ u8 chanset_size,
+ struct p2p_channels *channel_list) {
+ struct p2p_oper_class_map op_class[] = {
+ { IEEE80211G, 81, 1, 13, 1, BW20 },
+ { IEEE80211G, 82, 14, 14, 1, BW20 },
+ { -1, 0, 0, 0, 0, BW20 }
+ };
+
+ int cla, op;
+
+ cla = 0;
+
+ for (op = 0; op_class[op].op_class; op++) {
+ u8 ch;
+ struct p2p_oper_class_map *o = &op_class[op];
+ struct p2p_reg_class *reg = NULL;
+
+ for (ch = o->min_chan; ch <= o->max_chan; ch += o->inc) {
+ if (!has_channel(channel_set, chanset_size, ch)) {
+ continue;
+ }
+
+ if ((0 == padapter->registrypriv.ht_enable) && (8 == o->inc))
+ continue;
+
+ if ((0 == (padapter->registrypriv.cbw40_enable & BIT(1))) &&
+ ((BW40MINUS == o->bw) || (BW40PLUS == o->bw)))
+ continue;
+
+ if (reg == NULL) {
+ reg = &channel_list->reg_class[cla];
+ cla++;
+ reg->reg_class = o->op_class;
+ reg->channels = 0;
+ }
+ reg->channel[reg->channels] = ch;
+ reg->channels++;
+ }
+ }
+ channel_list->reg_classes = cla;
+}
+
+static u8 init_channel_set(struct adapter *padapter, u8 ChannelPlan, struct rt_channel_info *channel_set)
+{
+ u8 index, chanset_size = 0;
+ u8 b2_4GBand = false;
+ u8 Index2G = 0;
+
+ _rtw_memset(channel_set, 0, sizeof(struct rt_channel_info) * MAX_CHANNEL_NUM);
+
+ if (ChannelPlan >= RT_CHANNEL_DOMAIN_MAX && ChannelPlan != RT_CHANNEL_DOMAIN_REALTEK_DEFINE) {
+ DBG_88E("ChannelPlan ID %x error !!!!!\n", ChannelPlan);
+ return chanset_size;
+ }
+
+ if (padapter->registrypriv.wireless_mode & WIRELESS_11G) {
+ b2_4GBand = true;
+ if (RT_CHANNEL_DOMAIN_REALTEK_DEFINE == ChannelPlan)
+ Index2G = RTW_CHANNEL_PLAN_MAP_REALTEK_DEFINE.Index2G;
+ else
+ Index2G = RTW_ChannelPlanMap[ChannelPlan].Index2G;
+ }
+
+ if (b2_4GBand) {
+ for (index = 0; index < RTW_ChannelPlan2G[Index2G].Len; index++) {
+ channel_set[chanset_size].ChannelNum = RTW_ChannelPlan2G[Index2G].Channel[index];
+
+ if ((RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN == ChannelPlan) ||/* Channel 1~11 is active, and 12~14 is passive */
+ (RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G == ChannelPlan)) {
+ if (channel_set[chanset_size].ChannelNum >= 1 && channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else if ((channel_set[chanset_size].ChannelNum >= 12 && channel_set[chanset_size].ChannelNum <= 14))
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else if (RT_CHANNEL_DOMAIN_WORLD_WIDE_13 == ChannelPlan ||
+ RT_CHANNEL_DOMAIN_2G_WORLD == Index2G) {/* channel 12~13, passive scan */
+ if (channel_set[chanset_size].ChannelNum <= 11)
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ else
+ channel_set[chanset_size].ScanType = SCAN_PASSIVE;
+ } else {
+ channel_set[chanset_size].ScanType = SCAN_ACTIVE;
+ }
+
+ chanset_size++;
+ }
+ }
+ return chanset_size;
+}
+
+int init_mlme_ext_priv(struct adapter *padapter)
+{
+ int res = _SUCCESS;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmlmeext->padapter = padapter;
+
+ init_mlme_ext_priv_value(padapter);
+ pmlmeinfo->bAcceptAddbaReq = pregistrypriv->bAcceptAddbaReq;
+
+ init_mlme_ext_timer(padapter);
+
+#ifdef CONFIG_88EU_AP_MODE
+ init_mlme_ap_info(padapter);
+#endif
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->mlmeext_init = true;
+
+
+ pmlmeext->active_keep_alive_check = true;
+
+ return res;
+}
+
+void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
+{
+ struct adapter *padapter = pmlmeext->padapter;
+
+ if (!padapter)
+ return;
+
+ if (padapter->bDriverStopped) {
+ _cancel_timer_ex(&pmlmeext->survey_timer);
+ _cancel_timer_ex(&pmlmeext->link_timer);
+ /* _cancel_timer_ex(&pmlmeext->ADDBA_timer); */
+ }
+}
+
+static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, union recv_frame *precv_frame)
+{
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ if (ptable->func) {
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ return;
+ ptable->func(padapter, precv_frame);
+ }
+}
+
+void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int index;
+ struct mlme_handler *ptable;
+#ifdef CONFIG_88EU_AP_MODE
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif /* CONFIG_88EU_AP_MODE */
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("+mgt_dispatcher: type(0x%x) subtype(0x%x)\n",
+ GetFrameType(pframe), GetFrameSubType(pframe)));
+
+ if (GetFrameType(pframe) != WIFI_MGT_TYPE) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("mgt_dispatcher: type(0x%x) error!\n", GetFrameType(pframe)));
+ return;
+ }
+
+ /* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
+ !_rtw_memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ return;
+
+ ptable = mlme_sta_tbl;
+
+ index = GetFrameSubType(pframe) >> 4;
+
+ if (index > 13) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("Currently we do not support reserved sub-fr-type=%d\n", index));
+ return;
+ }
+ ptable += index;
+
+ if (psta != NULL) {
+ if (GetRetry(pframe)) {
+ if (precv_frame->u.hdr.attrib.seq_num == psta->RxMgmtFrameSeqNum) {
+ /* drop the duplicate management frame */
+ DBG_88E("Drop duplicate management frame with seq_num=%d.\n", precv_frame->u.hdr.attrib.seq_num);
+ return;
+ }
+ }
+ psta->RxMgmtFrameSeqNum = precv_frame->u.hdr.attrib.seq_num;
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+ switch (GetFrameSubType(pframe)) {
+ case WIFI_AUTH:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ ptable->func = &OnAuth;
+ else
+ ptable->func = &OnAuthClient;
+ /* fall through */
+ case WIFI_ASSOCREQ:
+ case WIFI_REASSOCREQ:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_PROBEREQ:
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ else
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_BEACON:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ case WIFI_ACTION:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ break;
+ default:
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ rtw_hostapd_mlme_rx(padapter, precv_frame);
+ break;
+ }
+#else
+ _mgt_dispatcher(padapter, ptable, precv_frame);
+#endif
+}
+
+#ifdef CONFIG_88EU_P2P
+static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
+{
+ bool response = true;
+
+ /* do nothing if the device name is empty */
+ if (!padapter->wdinfo.device_name_len)
+ response = false;
+
+ if (response)
+ issue_probersp_p2p(padapter, da);
+
+ return _SUCCESS;
+}
+#endif /* CONFIG_88EU_P2P */
+
+
+/****************************************************************************
+
+Following are the callback functions for each subtype of the management frames
+
+*****************************************************************************/
+
+unsigned int OnProbeReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ielen;
+ unsigned char *p;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ u8 is_valid_p2p_probereq = false;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wifi_test_chk_rate = 1;
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE) &&
+ !rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH) &&
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN)) {
+ /* mcs_rate = 0 -> CCK 1M rate */
+ /* mcs_rate = 1 -> CCK 2M rate */
+ /* mcs_rate = 2 -> CCK 5.5M rate */
+ /* mcs_rate = 3 -> CCK 11M rate */
+ /* In the P2P mode, the driver should not support the CCK rate */
+
+ /* Commented by Kurt 2012/10/16 */
+ /* IOT issue: Google Nexus7 use 1M rate to send p2p_probe_req after GO nego completed and Nexus7 is client */
+ if (wifi_test_chk_rate == 1) {
+ is_valid_p2p_probereq = process_probe_req_p2p_ie(pwdinfo, pframe, len);
+ if (is_valid_p2p_probereq) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* FIXME */
+ report_survey_event(padapter, precv_frame);
+ p2p_listen_state_process(padapter, get_sa(pframe));
+
+ return _SUCCESS;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ goto _continue;
+ }
+ }
+ }
+
+_continue:
+#endif /* CONFIG_88EU_P2P */
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ return _SUCCESS;
+
+ if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
+ !check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE))
+ return _SUCCESS;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ielen,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ /* check (wildcard) SSID */
+ if (p != NULL) {
+ if (is_valid_p2p_probereq)
+ goto _issue_probersp;
+
+ if ((ielen != 0 && !_rtw_memcmp((void *)(p+2), (void *)cur->Ssid.Ssid, cur->Ssid.SsidLength)) ||
+ (ielen == 0 && pmlmeinfo->hidden_ssid_mode))
+ return _SUCCESS;
+
+_issue_probersp:
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) &&
+ pmlmepriv->cur_network.join_res)
+ issue_probersp(padapter, get_sa(pframe), is_valid_p2p_probereq);
+ }
+ return _SUCCESS;
+}
+
+unsigned int OnProbeRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#endif
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ if (pwdinfo->tx_prov_disc_info.benable) {
+ if (_rtw_memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request(padapter,
+ pwdinfo->tx_prov_disc_info.ssid.Ssid,
+ pwdinfo->tx_prov_disc_info.ssid.SsidLength,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ pwdinfo->tx_prov_disc_info.benable = false;
+ issue_p2p_provision_request(padapter, NULL, 0,
+ pwdinfo->tx_prov_disc_info.peerDevAddr);
+ }
+ }
+ }
+ return _SUCCESS;
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ if (pwdinfo->nego_req_info.benable) {
+ DBG_88E("[%s] P2P State is GONEGO ING!\n", __func__);
+ if (_rtw_memcmp(pwdinfo->nego_req_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ pwdinfo->nego_req_info.benable = false;
+ issue_p2p_GO_request(padapter, pwdinfo->nego_req_info.peerDevAddr);
+ }
+ }
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
+ if (pwdinfo->invitereq_info.benable) {
+ DBG_88E("[%s] P2P_STATE_TX_INVITE_REQ!\n", __func__);
+ if (_rtw_memcmp(pwdinfo->invitereq_info.peer_macaddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
+ pwdinfo->invitereq_info.benable = false;
+ issue_p2p_invitation_request(padapter, pwdinfo->invitereq_info.peer_macaddr);
+ }
+ }
+ }
+#endif
+
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int OnBeacon(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int cam_idx;
+ struct sta_info *psta;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ struct wlan_bssid_ex *pbss;
+ int ret = _SUCCESS;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ report_survey_event(padapter, precv_frame);
+ return _SUCCESS;
+ }
+
+ if (_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)) {
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ /* we should update current network before auth, or some IE is wrong */
+ pbss = (struct wlan_bssid_ex *)rtw_malloc(sizeof(struct wlan_bssid_ex));
+ if (pbss) {
+ if (collect_bss_info(padapter, precv_frame, pbss) == _SUCCESS) {
+ update_network(&(pmlmepriv->cur_network.network), pbss, padapter, true);
+ rtw_get_bcn_info(&(pmlmepriv->cur_network));
+ }
+ kfree(pbss);
+ }
+
+ /* check the vendor of the assoc AP */
+ pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pframe+sizeof(struct rtw_ieee80211_hdr_3addr), len-sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* start auth */
+ start_clnt_auth(padapter);
+
+ return _SUCCESS;
+ }
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) && (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ ret = rtw_check_bcn_info(padapter, pframe, len);
+ if (!ret) {
+ DBG_88E_LEVEL(_drv_info_, "ap has changed, disconnect now\n ");
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress , 65535);
+ return _SUCCESS;
+ }
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
+ update_beacon_info(padapter, pframe, len, psta);
+ process_p2p_ps_ie(padapter, (pframe + WLAN_HDR_A3_LEN), (len - WLAN_HDR_A3_LEN));
+ }
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta != NULL) {
+ /* update WMM, ERP in the beacon */
+ /* todo: the timer is used instead of the number of the beacon received */
+ if ((sta_rx_pkts(psta) & 0xf) == 0)
+ update_beacon_info(padapter, pframe, len, psta);
+ } else {
+ /* allocate a new CAM entry for IBSS station */
+ cam_idx = allocate_fw_sta_entry(padapter);
+ if (cam_idx == NUM_STA)
+ goto _END_ONBEACON_;
+
+ /* get supported rate */
+ if (update_sta_support_rate(padapter, (pframe + WLAN_HDR_A3_LEN + _BEACON_IE_OFFSET_), (len - WLAN_HDR_A3_LEN - _BEACON_IE_OFFSET_), cam_idx) == _FAIL) {
+ pmlmeinfo->FW_sta_info[cam_idx].status = 0;
+ goto _END_ONBEACON_;
+ }
+
+ /* update TSF Value */
+ update_TSF(pmlmeext, pframe, len);
+
+ /* report sta add event */
+ report_add_sta_event(padapter, GetAddr2Ptr(pframe), cam_idx);
+ }
+ }
+ }
+
+_END_ONBEACON_:
+
+ return _SUCCESS;
+}
+
+unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irqL;
+ unsigned int auth_mode, ie_len;
+ u16 seq;
+ unsigned char *sa, *p;
+ u16 algorithm;
+ int status;
+ static struct sta_info stat;
+ struct sta_info *pstat = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ DBG_88E("+OnAuth\n");
+
+ sa = GetAddr2Ptr(pframe);
+
+ auth_mode = psecuritypriv->dot11AuthAlgrthm;
+ seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + 2));
+ algorithm = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("auth alg=%x, seq=%X\n", algorithm, seq);
+
+ if (auth_mode == 2 && psecuritypriv->dot11PrivacyAlgrthm != _WEP40_ &&
+ psecuritypriv->dot11PrivacyAlgrthm != _WEP104_)
+ auth_mode = 0;
+
+ if ((algorithm > 0 && auth_mode == 0) || /* rx a shared-key auth but shared not enabled */
+ (algorithm == 0 && auth_mode == 1)) { /* rx a open-system auth but shared-key is enabled */
+ DBG_88E("auth rejected due to bad alg [alg=%d, auth_mib=%d] %02X%02X%02X%02X%02X%02X\n",
+ algorithm, auth_mode, sa[0], sa[1], sa[2], sa[3], sa[4], sa[5]);
+
+ status = _STATS_NO_SUPP_ALG_;
+
+ goto auth_fail;
+ }
+
+ if (!rtw_access_ctrl(padapter, sa)) {
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+ /* allocate a new one */
+ DBG_88E("going to alloc stainfo for sa=%pM\n", sa);
+ pstat = rtw_alloc_stainfo(pstapriv, sa);
+ if (pstat == NULL) {
+ DBG_88E(" Exceed the upper limit of supported clients...\n");
+ status = _STATS_UNABLE_HANDLE_STA_;
+ goto auth_fail;
+ }
+
+ pstat->state = WIFI_FW_AUTH_NULL;
+ pstat->auth_seq = 0;
+ } else {
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&pstat->asoc_list)) {
+ rtw_list_delete(&pstat->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (seq == 1) {
+ /* TODO: STA re_auth and auth timeout */
+ }
+ }
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ if (rtw_is_list_empty(&pstat->auth_list)) {
+ rtw_list_insert_tail(&pstat->auth_list, &pstapriv->auth_list);
+ pstapriv->auth_list_cnt++;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ if (pstat->auth_seq == 0)
+ pstat->expire_to = pstapriv->auth_to;
+
+ if ((pstat->auth_seq + 1) != seq) {
+ DBG_88E("(1)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+
+ if (algorithm == 0 && (auth_mode == 0 || auth_mode == 2)) {
+ if (seq == 1) {
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ pstat->expire_to = pstapriv->assoc_to;
+ pstat->authalg = algorithm;
+ } else {
+ DBG_88E("(2)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ } else { /* shared system or auto authentication */
+ if (seq == 1) {
+ /* prepare for the challenging txt... */
+
+ pstat->state &= ~WIFI_FW_AUTH_NULL;
+ pstat->state |= WIFI_FW_AUTH_STATE;
+ pstat->authalg = algorithm;
+ pstat->auth_seq = 2;
+ } else if (seq == 3) {
+ /* checking for challenging txt... */
+ DBG_88E("checking for challenging txt...\n");
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_ , _CHLGETXT_IE_, (int *)&ie_len,
+ len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
+
+ if ((p == NULL) || (ie_len <= 0)) {
+ DBG_88E("auth rejected because challenge failure!(1)\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+
+ if (_rtw_memcmp((void *)(p + 2), pstat->chg_txt, 128)) {
+ pstat->state &= (~WIFI_FW_AUTH_STATE);
+ pstat->state |= WIFI_FW_AUTH_SUCCESS;
+ /* challenging txt is correct... */
+ pstat->expire_to = pstapriv->assoc_to;
+ } else {
+ DBG_88E("auth rejected because challenge failure!\n");
+ status = _STATS_CHALLENGE_FAIL_;
+ goto auth_fail;
+ }
+ } else {
+ DBG_88E("(3)auth rejected because out of seq [rx_seq=%d, exp_seq=%d]!\n",
+ seq, pstat->auth_seq+1);
+ status = _STATS_OUT_OF_AUTH_SEQ_;
+ goto auth_fail;
+ }
+ }
+
+ /* Now, we are going to issue_auth... */
+ pstat->auth_seq = seq + 1;
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
+#endif
+
+ if (pstat->state & WIFI_FW_AUTH_SUCCESS)
+ pstat->auth_seq = 0;
+
+ return _SUCCESS;
+
+auth_fail:
+
+ if (pstat)
+ rtw_free_stainfo(padapter , pstat);
+
+ pstat = &stat;
+ _rtw_memset((char *)pstat, '\0', sizeof(stat));
+ pstat->auth_seq = 2;
+ memcpy(pstat->hwaddr, sa, 6);
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_auth(padapter, pstat, (unsigned short)status);
+#endif
+
+#endif
+ return _FAIL;
+}
+
+unsigned int OnAuthClient(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int seq, len, status, offset;
+ unsigned char *p;
+ unsigned int go2asoc = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_88E("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & WIFI_FW_AUTH_STATE))
+ return _SUCCESS;
+
+ offset = (GetPrivacy(pframe)) ? 4 : 0;
+
+ seq = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 2));
+ status = le16_to_cpu(*(__le16 *)((size_t)pframe + WLAN_HDR_A3_LEN + offset + 4));
+
+ if (status != 0) {
+ DBG_88E("clnt auth fail, status: %d\n", status);
+ if (status == 13) { /* pmlmeinfo->auth_algo == dot11AuthAlgrthm_Auto) */
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Open;
+ else
+ pmlmeinfo->auth_algo = dot11AuthAlgrthm_Shared;
+ }
+
+ set_link_timer(pmlmeext, 1);
+ goto authclnt_fail;
+ }
+
+ if (seq == 2) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) {
+ /* legendary shared system */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _AUTH_IE_OFFSET_, _CHLGETXT_IE_, (int *)&len,
+ pkt_len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_);
+
+ if (p == NULL)
+ goto authclnt_fail;
+
+ memcpy((void *)(pmlmeinfo->chg_txt), (void *)(p + 2), len);
+ pmlmeinfo->auth_seq = 3;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+
+ return _SUCCESS;
+ } else {
+ /* open system */
+ go2asoc = 1;
+ }
+ } else if (seq == 4) {
+ if (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared)
+ go2asoc = 1;
+ else
+ goto authclnt_fail;
+ } else {
+ /* this is also illegal */
+ goto authclnt_fail;
+ }
+
+ if (go2asoc) {
+ DBG_88E_LEVEL(_drv_info_, "auth success, start assoc\n");
+ start_clnt_assoc(padapter);
+ return _SUCCESS;
+ }
+authclnt_fail:
+ return _FAIL;
+}
+
+unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irqL;
+ u16 capab_info;
+ struct rtw_ieee802_11_elems elems;
+ struct sta_info *pstat;
+ unsigned char reassoc, *p, *pos, *wpa_ie;
+ unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01};
+ int i, ie_len, wpa_ie_len, left;
+ unsigned char supportRate[16];
+ int supportRateNum;
+ unsigned short status = _STATS_SUCCESSFUL_;
+ unsigned short frame_type, ie_offset = 0;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 p2p_status_code = P2P_STATUS_SUCCESS;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+#endif /* CONFIG_88EU_P2P */
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ return _FAIL;
+
+ frame_type = GetFrameSubType(pframe);
+ if (frame_type == WIFI_ASSOCREQ) {
+ reassoc = 0;
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ } else { /* WIFI_REASSOCREQ */
+ reassoc = 1;
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+ }
+
+
+ if (pkt_len < IEEE80211_3ADDR_LEN + ie_offset) {
+ DBG_88E("handle_assoc(reassoc=%d) - too short payload (len=%lu)"
+ "\n", reassoc, (unsigned long)pkt_len);
+ return _FAIL;
+ }
+
+ pstat = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (pstat == (struct sta_info *)NULL) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ }
+
+ capab_info = RTW_GET_LE16(pframe + WLAN_HDR_A3_LEN);
+
+ left = pkt_len - (IEEE80211_3ADDR_LEN + ie_offset);
+ pos = pframe + (IEEE80211_3ADDR_LEN + ie_offset);
+
+
+ DBG_88E("%s\n", __func__);
+
+ /* check if this stat has been successfully authenticated/assocated */
+ if (!((pstat->state) & WIFI_FW_AUTH_SUCCESS)) {
+ if (!((pstat->state) & WIFI_FW_ASSOC_SUCCESS)) {
+ status = _RSON_CLS2_;
+ goto asoc_class2_error;
+ } else {
+ pstat->state &= (~WIFI_FW_ASSOC_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ } else {
+ pstat->state &= (~WIFI_FW_AUTH_SUCCESS);
+ pstat->state |= WIFI_FW_ASSOC_STATE;
+ }
+ pstat->capability = capab_info;
+ /* now parse all ieee802_11 ie to point to elems */
+ if (rtw_ieee802_11_parse_elems(pos, left, &elems, 1) == ParseFailed ||
+ !elems.ssid) {
+ DBG_88E("STA %pM sent invalid association request\n",
+ pstat->hwaddr);
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+
+ /* now we should check all the fields... */
+ /* checking SSID */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SSID_IE_, &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL)
+ status = _STATS_FAILURE_;
+
+ if (ie_len == 0) { /* broadcast ssid, however it is not allowed in assocreq */
+ status = _STATS_FAILURE_;
+ } else {
+ /* check if ssid match */
+ if (!_rtw_memcmp((void *)(p+2), cur->Ssid.Ssid, cur->Ssid.SsidLength))
+ status = _STATS_FAILURE_;
+
+ if (ie_len != cur->Ssid.SsidLength)
+ status = _STATS_FAILURE_;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ /* check if the supported rate is ok */
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _SUPPORTEDRATES_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p == NULL) {
+ DBG_88E("Rx a sta assoc-req which supported rate is empty!\n");
+ /* use our own rate set as statoin used */
+ /* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
+ /* supportRateNum = AP_BSSRATE_LEN; */
+
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ } else {
+ memcpy(supportRate, p+2, ie_len);
+ supportRateNum = ie_len;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, _EXT_SUPPORTEDRATES_IE_ , &ie_len,
+ pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+ if (supportRateNum <= sizeof(supportRate)) {
+ memcpy(supportRate+supportRateNum, p+2, ie_len);
+ supportRateNum += ie_len;
+ }
+ }
+ }
+
+ /* todo: mask supportRate between AP & STA -> move to update raid */
+ /* get_matched_rate(pmlmeext, supportRate, &supportRateNum, 0); */
+
+ /* update station supportRate */
+ pstat->bssratelen = supportRateNum;
+ memcpy(pstat->bssrateset, supportRate, supportRateNum);
+ UpdateBrateTblForSoftAP(pstat->bssrateset, pstat->bssratelen);
+
+ /* check RSN/WPA/WPS */
+ pstat->dot8021xalg = 0;
+ pstat->wpa_psk = 0;
+ pstat->wpa_group_cipher = 0;
+ pstat->wpa2_group_cipher = 0;
+ pstat->wpa_pairwise_cipher = 0;
+ pstat->wpa2_pairwise_cipher = 0;
+ _rtw_memset(pstat->wpa_ie, 0, sizeof(pstat->wpa_ie));
+ if ((psecuritypriv->wpa_psk & BIT(1)) && elems.rsn_ie) {
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.rsn_ie;
+ wpa_ie_len = elems.rsn_ie_len;
+
+ if (rtw_parse_wpa2_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(1);
+
+ pstat->wpa2_group_cipher = group_cipher&psecuritypriv->wpa2_group_cipher;
+ pstat->wpa2_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa2_pairwise_cipher;
+
+ if (!pstat->wpa2_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa2_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+ } else if ((psecuritypriv->wpa_psk & BIT(0)) && elems.wpa_ie) {
+ int group_cipher = 0, pairwise_cipher = 0;
+
+ wpa_ie = elems.wpa_ie;
+ wpa_ie_len = elems.wpa_ie_len;
+
+ if (rtw_parse_wpa_ie(wpa_ie-2, wpa_ie_len+2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ pstat->dot8021xalg = 1;/* psk, todo:802.1x */
+ pstat->wpa_psk |= BIT(0);
+
+ pstat->wpa_group_cipher = group_cipher&psecuritypriv->wpa_group_cipher;
+ pstat->wpa_pairwise_cipher = pairwise_cipher&psecuritypriv->wpa_pairwise_cipher;
+
+ if (!pstat->wpa_group_cipher)
+ status = WLAN_STATUS_GROUP_CIPHER_NOT_VALID;
+
+ if (!pstat->wpa_pairwise_cipher)
+ status = WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID;
+ } else {
+ status = WLAN_STATUS_INVALID_IE;
+ }
+ } else {
+ wpa_ie = NULL;
+ wpa_ie_len = 0;
+ }
+
+ if (_STATS_SUCCESSFUL_ != status)
+ goto OnAssocReqFail;
+
+ pstat->flags &= ~(WLAN_STA_WPS | WLAN_STA_MAYBE_WPS);
+ if (wpa_ie == NULL) {
+ if (elems.wps_ie) {
+ DBG_88E("STA included WPS IE in "
+ "(Re)Association Request - assume WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ /* wpabuf_free(sta->wps_ie); */
+ /* sta->wps_ie = wpabuf_alloc_copy(elems.wps_ie + 4, */
+ /* elems.wps_ie_len - 4); */
+ } else {
+ DBG_88E("STA did not include WPA/RSN IE "
+ "in (Re)Association Request - possible WPS "
+ "use\n");
+ pstat->flags |= WLAN_STA_MAYBE_WPS;
+ }
+
+
+ /* AP support WPA/RSN, and sta is going to do WPS, but AP is not ready */
+ /* that the selected registrar of AP is _FLASE */
+ if ((psecuritypriv->wpa_psk > 0) && (pstat->flags & (WLAN_STA_WPS|WLAN_STA_MAYBE_WPS))) {
+ if (pmlmepriv->wps_beacon_ie) {
+ u8 selected_registrar = 0;
+
+ rtw_get_wps_attr_content(pmlmepriv->wps_beacon_ie, pmlmepriv->wps_beacon_ie_len, WPS_ATTR_SELECTED_REGISTRAR , &selected_registrar, NULL);
+
+ if (!selected_registrar) {
+ DBG_88E("selected_registrar is false , or AP is not ready to do WPS\n");
+
+ status = _STATS_UNABLE_HANDLE_STA_;
+
+ goto OnAssocReqFail;
+ }
+ }
+ }
+ } else {
+ int copy_len;
+
+ if (psecuritypriv->wpa_psk == 0) {
+ DBG_88E("STA %pM: WPA/RSN IE in association "
+ "request, but AP don't support WPA/RSN\n", pstat->hwaddr);
+
+ status = WLAN_STATUS_INVALID_IE;
+
+ goto OnAssocReqFail;
+ }
+
+ if (elems.wps_ie) {
+ DBG_88E("STA included WPS IE in "
+ "(Re)Association Request - WPS is "
+ "used\n");
+ pstat->flags |= WLAN_STA_WPS;
+ copy_len = 0;
+ } else {
+ copy_len = ((wpa_ie_len+2) > sizeof(pstat->wpa_ie)) ? (sizeof(pstat->wpa_ie)) : (wpa_ie_len+2);
+ }
+ if (copy_len > 0)
+ memcpy(pstat->wpa_ie, wpa_ie-2, copy_len);
+ }
+ /* check if there is WMM IE & support WWM-PS */
+ pstat->flags &= ~WLAN_STA_WME;
+ pstat->qos_option = 0;
+ pstat->qos_info = 0;
+ pstat->has_legacy_ac = true;
+ pstat->uapsd_vo = 0;
+ pstat->uapsd_vi = 0;
+ pstat->uapsd_be = 0;
+ pstat->uapsd_bk = 0;
+ if (pmlmepriv->qospriv.qos_option) {
+ p = pframe + WLAN_HDR_A3_LEN + ie_offset; ie_len = 0;
+ for (;;) {
+ p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
+ if (p != NULL) {
+ if (_rtw_memcmp(p+2, WMM_IE, 6)) {
+ pstat->flags |= WLAN_STA_WME;
+
+ pstat->qos_option = 1;
+ pstat->qos_info = *(p+8);
+
+ pstat->max_sp_len = (pstat->qos_info>>5)&0x3;
+
+ if ((pstat->qos_info&0xf) != 0xf)
+ pstat->has_legacy_ac = true;
+ else
+ pstat->has_legacy_ac = false;
+
+ if (pstat->qos_info&0xf) {
+ if (pstat->qos_info&BIT(0))
+ pstat->uapsd_vo = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vo = 0;
+
+ if (pstat->qos_info&BIT(1))
+ pstat->uapsd_vi = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_vi = 0;
+
+ if (pstat->qos_info&BIT(2))
+ pstat->uapsd_bk = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_bk = 0;
+
+ if (pstat->qos_info&BIT(3))
+ pstat->uapsd_be = BIT(0)|BIT(1);
+ else
+ pstat->uapsd_be = 0;
+ }
+ break;
+ }
+ } else {
+ break;
+ }
+ p = p + ie_len + 2;
+ }
+ }
+
+ /* save HT capabilities in the sta object */
+ _rtw_memset(&pstat->htpriv.ht_cap, 0, sizeof(struct rtw_ieee80211_ht_cap));
+ if (elems.ht_capabilities && elems.ht_capabilities_len >= sizeof(struct rtw_ieee80211_ht_cap)) {
+ pstat->flags |= WLAN_STA_HT;
+
+ pstat->flags |= WLAN_STA_WME;
+
+ memcpy(&pstat->htpriv.ht_cap, elems.ht_capabilities, sizeof(struct rtw_ieee80211_ht_cap));
+ } else {
+ pstat->flags &= ~WLAN_STA_HT;
+ }
+ if ((!pmlmepriv->htpriv.ht_option) && (pstat->flags&WLAN_STA_HT)) {
+ status = _STATS_FAILURE_;
+ goto OnAssocReqFail;
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) &&
+ ((pstat->wpa2_pairwise_cipher&WPA_CIPHER_TKIP) ||
+ (pstat->wpa_pairwise_cipher&WPA_CIPHER_TKIP))) {
+ DBG_88E("HT: %pM tried to "
+ "use TKIP with HT association\n", pstat->hwaddr);
+
+ /* status = WLAN_STATUS_CIPHER_REJECTED_PER_POLICY; */
+ /* goto OnAssocReqFail; */
+ }
+
+ pstat->flags |= WLAN_STA_NONERP;
+ for (i = 0; i < pstat->bssratelen; i++) {
+ if ((pstat->bssrateset[i] & 0x7f) > 22) {
+ pstat->flags &= ~WLAN_STA_NONERP;
+ break;
+ }
+ }
+
+ if (pstat->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
+ pstat->flags |= WLAN_STA_SHORT_PREAMBLE;
+ else
+ pstat->flags &= ~WLAN_STA_SHORT_PREAMBLE;
+
+
+
+ if (status != _STATS_SUCCESSFUL_)
+ goto OnAssocReqFail;
+
+#ifdef CONFIG_88EU_P2P
+ pstat->is_p2p_device = false;
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + ie_offset , pkt_len - WLAN_HDR_A3_LEN - ie_offset , NULL, &p2pielen);
+ if (p2pie) {
+ pstat->is_p2p_device = true;
+ p2p_status_code = (u8)process_assoc_req_p2p_ie(pwdinfo, pframe, pkt_len, pstat);
+ if (p2p_status_code > 0) {
+ pstat->p2p_status_code = p2p_status_code;
+ status = _STATS_CAP_FAIL_;
+ goto OnAssocReqFail;
+ }
+ }
+ }
+ pstat->p2p_status_code = p2p_status_code;
+#endif /* CONFIG_88EU_P2P */
+
+ /* TODO: identify_proprietary_vendor_ie(); */
+ /* Realtek proprietary IE */
+ /* identify if this is Broadcom sta */
+ /* identify if this is ralink sta */
+ /* Customer proprietary IE */
+
+ /* get a unique AID */
+ if (pstat->aid > 0) {
+ DBG_88E(" old AID %d\n", pstat->aid);
+ } else {
+ for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
+ if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ break;
+
+ /* if (pstat->aid > NUM_STA) { */
+ if (pstat->aid > pstapriv->max_num_sta) {
+ pstat->aid = 0;
+
+ DBG_88E(" no room for more AIDs\n");
+
+ status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
+
+ goto OnAssocReqFail;
+ } else {
+ pstapriv->sta_aid[pstat->aid - 1] = pstat;
+ DBG_88E("allocate new AID=(%d)\n", pstat->aid);
+ }
+ }
+
+ pstat->state &= (~WIFI_FW_ASSOC_STATE);
+ pstat->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irqL);
+ if (!rtw_is_list_empty(&pstat->auth_list)) {
+ rtw_list_delete(&pstat->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irqL);
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (rtw_is_list_empty(&pstat->asoc_list)) {
+ pstat->expire_to = pstapriv->expire_to;
+ rtw_list_insert_tail(&pstat->asoc_list, &pstapriv->asoc_list);
+ pstapriv->asoc_list_cnt++;
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ /* now the station is qualified to join our BSS... */
+ if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
+#ifdef CONFIG_88EU_AP_MODE
+ /* 1 bss_cap_update & sta_info_update */
+ bss_cap_update_on_sta_join(padapter, pstat);
+ sta_info_update(padapter, pstat);
+
+ /* issue assoc rsp before notify station join event. */
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+
+ /* 2 - report to upper layer */
+ DBG_88E("indicate_sta_join_event to upper layer - hostapd\n");
+ rtw_indicate_sta_assoc_event(padapter, pstat);
+
+ /* 3-(1) report sta add event */
+ report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
+#endif
+ }
+
+ return _SUCCESS;
+
+asoc_class2_error:
+
+#ifdef CONFIG_88EU_AP_MODE
+ issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
+#endif
+
+ return _FAIL;
+
+OnAssocReqFail:
+
+
+#ifdef CONFIG_88EU_AP_MODE
+ pstat->aid = 0;
+ if (frame_type == WIFI_ASSOCREQ)
+ issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
+ else
+ issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
+#endif
+
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+ return _FAIL;
+}
+
+unsigned int OnAssocRsp(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ uint i;
+ int res;
+ unsigned short status;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ /* struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); */
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint pkt_len = precv_frame->u.hdr.len;
+
+ DBG_88E("%s\n", __func__);
+
+ /* check A1 matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), get_da(pframe), ETH_ALEN))
+ return _SUCCESS;
+
+ if (!(pmlmeinfo->state & (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE)))
+ return _SUCCESS;
+
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)
+ return _SUCCESS;
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* status */
+ status = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 2));
+ if (status > 0) {
+ DBG_88E("assoc reject, status code: %d\n", status);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ res = -4;
+ goto report_assoc_result;
+ }
+
+ /* get capabilities */
+ pmlmeinfo->capability = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ /* set slot time */
+ pmlmeinfo->slotTime = (pmlmeinfo->capability & BIT(10)) ? 9 : 20;
+
+ /* AID */
+ pmlmeinfo->aid = (int)(le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN + 4))&0x3fff);
+ res = pmlmeinfo->aid;
+
+ /* following are moved to join event callback function */
+ /* to handle HT, WMM, rate adaptive, update MAC reg */
+ /* for not to handle the synchronous IO in the tasklet */
+ for (i = (6 + WLAN_HDR_A3_LEN); i < pkt_len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, WMM_PARA_OUI, 6)) /* WMM */
+ WMM_param_handler(padapter, pIE);
+ break;
+ case _HT_CAPABILITY_IE_: /* HT caps */
+ HT_caps_handler(padapter, pIE);
+ break;
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ HT_info_handler(padapter, pIE);
+ break;
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ pmlmeinfo->state &= (~WIFI_FW_ASSOC_STATE);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+
+ /* Update Basic Rate Table for spec, 2010-12-28 , by thomas */
+ UpdateBrateTbl(padapter, pmlmeinfo->network.SupportedRates);
+
+report_assoc_result:
+ if (res > 0) {
+ rtw_buf_update(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len, pframe, pkt_len);
+ } else {
+ rtw_buf_free(&pmlmepriv->assoc_rsp, &pmlmepriv->assoc_rsp_len);
+ }
+
+ report_join_res(padapter, res);
+
+ return _SUCCESS;
+}
+
+unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned short reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ /* check A3 */
+ if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E_LEVEL(_drv_always_, "ap recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = 0;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ associated_clients_update(padapter, updated);
+ }
+
+
+ return _SUCCESS;
+ } else
+#endif
+ {
+ DBG_88E_LEVEL(_drv_always_, "sta recv deauth reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe) , reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u16 reason;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ /* check A3 */
+ if (!(_rtw_memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
+
+ DBG_88E("%s Reason code(%d)\n", __func__, reason);
+
+#ifdef CONFIG_88EU_AP_MODE
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ unsigned long irqL;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* _enter_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
+ /* rtw_free_stainfo(padapter, psta); */
+ /* _exit_critical_bh(&(pstapriv->sta_hash_lock), &irqL); */
+
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr2Ptr(pframe));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+ if (psta) {
+ u8 updated = 0;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, false, reason);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ associated_clients_update(padapter, updated);
+ }
+
+ return _SUCCESS;
+ } else
+#endif
+ {
+ DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
+ reason, GetAddr3Ptr(pframe));
+
+ receive_disconnect(padapter, GetAddr3Ptr(pframe), reason);
+ }
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+ return _SUCCESS;
+}
+
+unsigned int OnAtim(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ DBG_88E("%s\n", __func__);
+ return _SUCCESS;
+}
+
+unsigned int on_action_spct(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = (u8 *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+ u8 category;
+ u8 action;
+
+ DBG_88E(FUNC_NDEV_FMT"\n", FUNC_NDEV_ARG(padapter->pnetdev));
+
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+
+ if (!psta)
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_SPECTRUM_MGMT)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case RTW_WLAN_ACTION_SPCT_MSR_REQ:
+ case RTW_WLAN_ACTION_SPCT_MSR_RPRT:
+ case RTW_WLAN_ACTION_SPCT_TPC_REQ:
+ case RTW_WLAN_ACTION_SPCT_TPC_RPRT:
+ break;
+ case RTW_WLAN_ACTION_SPCT_CHL_SWITCH:
+ break;
+ default:
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_qos(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_dls(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_back(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 *addr;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ unsigned char *frame_body;
+ unsigned char category, action;
+ unsigned short tid, status, reason_code = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ return _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ addr = GetAddr2Ptr(pframe);
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta == NULL)
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category == RTW_WLAN_CATEGORY_BACK) { /* representing Block Ack */
+ if (!pmlmeinfo->HT_enable)
+ return _SUCCESS;
+ action = frame_body[1];
+ DBG_88E("%s, action=%d\n", __func__, action);
+ switch (action) {
+ case RTW_WLAN_ACTION_ADDBA_REQ: /* ADDBA request */
+ memcpy(&(pmlmeinfo->ADDBA_req), &(frame_body[2]), sizeof(struct ADDBA_request));
+ process_addba_req(padapter, (u8 *)&(pmlmeinfo->ADDBA_req), addr);
+
+ if (pmlmeinfo->bAcceptAddbaReq)
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 0);
+ else
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_ADDBA_RESP, 37);/* reject ADDBA Req */
+ break;
+ case RTW_WLAN_ACTION_ADDBA_RESP: /* ADDBA response */
+ status = RTW_GET_LE16(&frame_body[3]);
+ tid = ((frame_body[5] >> 2) & 0x7);
+ if (status == 0) { /* successful */
+ DBG_88E("agg_enable for TID=%d\n", tid);
+ psta->htpriv.agg_enable_bitmap |= 1 << tid;
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ } else {
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ }
+ break;
+ case RTW_WLAN_ACTION_DELBA: /* DELBA */
+ if ((frame_body[3] & BIT(3)) == 0) {
+ psta->htpriv.agg_enable_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ psta->htpriv.candidate_tid_bitmap &= ~(1 << ((frame_body[3] >> 4) & 0xf));
+ reason_code = RTW_GET_LE16(&frame_body[4]);
+ } else if ((frame_body[3] & BIT(3)) == BIT(3)) {
+ tid = (frame_body[3] >> 4) & 0x0F;
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->enable = false;
+ preorder_ctrl->indicate_seq = 0xffff;
+ }
+ DBG_88E("%s(): DELBA: %x(%x)\n", __func__, pmlmeinfo->agg_enable_bitmap, reason_code);
+ /* todo: how to notify the host while receiving DELETE BA */
+ break;
+ default:
+ break;
+ }
+ }
+ return _SUCCESS;
+}
+
+#ifdef CONFIG_88EU_P2P
+
+static int get_reg_classes_full_count(struct p2p_channels channel_list)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < channel_list.reg_classes; i++) {
+ cnt += channel_list.reg_class[i].channels;
+ }
+
+ return cnt;
+}
+
+void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_REQ;
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u8 wpsielen = 0, p2pielen = 0;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pwdinfo->negotiation_dialog_token = 1; /* Initialize the dialog value */
+ pframe = rtw_set_fixed_ie(pframe, 1, &pwdinfo->negotiation_dialog_token, &(pattrib->pktlen));
+
+
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ else if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation request frame should contain 9 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Group Owner Intent */
+ /* 3. Configuration Timeout */
+ /* 4. Listen Channel */
+ /* 5. Extended Listen Timing */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. P2P Device Info */
+ /* 9. Operating Channel */
+
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ else
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Todo the tie breaker bit. */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->listen_channel; /* listening channel number */
+
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)(pmlmeext->channel_list.reg_classes)
+ + get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name , pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_GO_response(struct adapter *padapter, u8 *raddr, u8 *frame_body, uint len, u8 result)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_RESP;
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ uint wpsielen = 0;
+ u16 wps_devicepassword_id = 0x0000;
+ __be16 be_tmp;
+ uint wps_devicepassword_id_len = 0;
+ u16 len_channellist_attr = 0;
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In, result=%d\n", __func__, result);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pwdinfo->negotiation_dialog_token = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pwdinfo->negotiation_dialog_token), &(pattrib->pktlen));
+
+ /* Commented by Albert 20110328 */
+ /* Try to get the device password ID from the WPS IE of group negotiation request frame */
+ /* WiFi Direct test plan 5.1.15 */
+ rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, wpsie, &wpsielen);
+ rtw_get_wps_attr_content(wpsie, wpsielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(be_tmp);
+
+ _rtw_memset(wpsie, 0x00, 255);
+ wpsielen = 0;
+
+ /* WPS Section */
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_USER_SPEC);
+ else
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_PBC);
+ wpsielen += 2;
+
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ else
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100908 */
+ /* According to the P2P Specification, the group negoitation response frame should contain 9 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Group Owner Intent */
+ /* 4. Configuration Timeout */
+ /* 5. Operating Channel */
+ /* 6. Intended P2P Interface Address */
+ /* 7. Channel List */
+ /* 8. Device Info */
+ /* 9. Group ID (Only GO) */
+
+
+ /* ToDo: */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Commented by Albert 2011/03/08 */
+ /* According to the P2P specification */
+ /* if the sending device will be client, the P2P Capability should be reserved of group negotation response frame */
+ p2pie[p2pielen++] = 0;
+ } else {
+ /* Be group owner or meet the error case */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+ }
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported) {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ } else {
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+ }
+
+ /* Group Owner Intent */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GO_INTENT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ if (pwdinfo->peer_intent & 0x01) {
+ /* Peer's tie breaker bit is 1, our tie breaker bit should be 0 */
+ p2pie[p2pielen++] = (pwdinfo->intent << 1);
+ } else {
+ /* Peer's tie breaker bit is 0, our tie breaker bit should be 1 */
+ p2pie[p2pielen++] = ((pwdinfo->intent << 1) | BIT(0));
+ }
+
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+ /* Intended P2P Interface Address */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTENTED_IF_ADDR;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name , pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen , pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+ return;
+}
+
+static void issue_p2p_GO_confirm(struct adapter *padapter, u8 *raddr, u8 result)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_NEGO_CONF;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pwdinfo->negotiation_dialog_token), &(pattrib->pktlen));
+
+
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110306 */
+ /* According to the P2P Specification, the group negoitation request frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. P2P Capability */
+ /* 3. Operating Channel */
+ /* 4. Channel List */
+ /* 5. Group ID (if this WiFi is GO) */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = result;
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN | P2P_GRPCAP_PERSISTENT_GROUP;
+ else
+ p2pie[p2pielen++] = P2P_GRPCAP_CROSS_CONN;
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+ p2pie[p2pielen++] = pwdinfo->peer_operating_ch;
+ } else {
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* Use the listen channel as the operating channel */
+ }
+
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(pwdinfo->channel_list_attr_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len);
+ p2pielen += pwdinfo->channel_list_attr_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Group ID Attribute */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + pwdinfo->nego_ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* p2P Device Address */
+ memcpy(p2pie + p2pielen , pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ p2pielen += pwdinfo->nego_ssidlen;
+ }
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+ return;
+}
+
+void issue_p2p_invitation_request(struct adapter *padapter, u8 *raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_REQ;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ u8 dialogToken = 3;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101011 */
+ /* According to the P2P Specification, the P2P Invitation request frame should contain 7 P2P attributes */
+ /* 1. Configuration Timeout */
+ /* 2. Invitation Flags */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Should be included if I am the GO) */
+ /* 5. Channel List */
+ /* 6. P2P Group ID */
+ /* 7. P2P Device Info */
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ /* Invitation Flags */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INVITATION_FLAGS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = P2P_INVITATION_FLAGS_PERSISTENT;
+
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51;
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->invitereq_info.operating_ch; /* operating channel number */
+
+ if (_rtw_memcmp(myid(&padapter->eeprompriv), pwdinfo->invitereq_info.go_bssid, ETH_ALEN)) {
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+
+
+ /* P2P Group ID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(6 + pwdinfo->invitereq_info.ssidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_bssid, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* SSID */
+ memcpy(p2pie + p2pielen, pwdinfo->invitereq_info.go_ssid, pwdinfo->invitereq_info.ssidlen);
+ p2pielen += pwdinfo->invitereq_info.ssidlen;
+
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr, u8 dialogToken, u8 status_code)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_INVIT_RESP;
+ u8 p2pie[255] = { 0x00 };
+ u8 p2pielen = 0;
+ u16 len_channellist_attr = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* P2P IE Section. */
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101005 */
+ /* According to the P2P Specification, the P2P Invitation response frame should contain 5 P2P attributes */
+ /* 1. Status */
+ /* 2. Configuration Timeout */
+ /* 3. Operating Channel (Only GO) */
+ /* 4. P2P Group BSSID (Only GO) */
+ /* 5. Channel List */
+
+ /* P2P Status */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_STATUS;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0001);
+ p2pielen += 2;
+
+ /* Value: */
+ /* When status code is P2P_STATUS_FAIL_INFO_UNAVAILABLE. */
+ /* Sent the event receiving the P2P Invitation Req frame to DMP UI. */
+ /* DMP had to compare the MAC address to find out the profile. */
+ /* So, the WiFi driver will send the P2P_STATUS_FAIL_INFO_UNAVAILABLE to NB. */
+ /* If the UI found the corresponding profile, the WiFi driver sends the P2P Invitation Req */
+ /* to NB to rebuild the persistent group. */
+ p2pie[p2pielen++] = status_code;
+
+ /* Configuration Timeout */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CONF_TIMEOUT;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P GO */
+ p2pie[p2pielen++] = 200; /* 2 seconds needed to be the P2P Client */
+
+ if (status_code == P2P_STATUS_SUCCESS) {
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* The P2P Invitation request frame asks this Wi-Fi device to be the P2P GO */
+ /* In this case, the P2P Invitation response frame should carry the two more P2P attributes. */
+ /* First one is operating channel attribute. */
+ /* Second one is P2P Group BSSID attribute. */
+
+ /* Operating Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+
+
+ /* P2P Group BSSID */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_BSSID;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(ETH_ALEN);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address for GO */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+ }
+
+ /* Channel List */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CH_LIST;
+
+ /* Length: */
+ /* Country String(3) */
+ /* + (Operating Class (1) + Number of Channels(1)) * Operation Classes (?) */
+ /* + number of channels in all classes */
+ len_channellist_attr = 3
+ + (1 + 1) * (u16)pmlmeext->channel_list.reg_classes
+ + get_reg_classes_full_count(pmlmeext->channel_list);
+
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(len_channellist_attr);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Channel Entry List */
+ {
+ int i, j;
+ for (j = 0; j < pmlmeext->channel_list.reg_classes; j++) {
+ /* Operating Class */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].reg_class;
+
+ /* Number of Channels */
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channels;
+
+ /* Channel List */
+ for (i = 0; i < pmlmeext->channel_list.reg_class[j].channels; i++) {
+ p2pie[p2pielen++] = pmlmeext->channel_list.reg_class[j].channel[i];
+ }
+ }
+ }
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
+{
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = 1;
+ u8 oui_subtype = P2P_PROVISION_DISC_REQ;
+ u8 wpsie[100] = { 0x00 };
+ u8 wpsielen = 0;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u32 p2pielen = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, pdev_raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pdev_raddr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ p2pielen = build_prov_disc_request_p2p_ie(pwdinfo, pframe, pssid, ussidlen, pdev_raddr);
+
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->tx_prov_disc_info.wps_config_method_request);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static u8 is_matched_in_profilelist(u8 *peermacaddr, struct profile_info *profileinfo)
+{
+ u8 i, match_result = 0;
+
+ DBG_88E("[%s] peermac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ peermacaddr[0], peermacaddr[1], peermacaddr[2], peermacaddr[3], peermacaddr[4], peermacaddr[5]);
+
+ for (i = 0; i < P2P_MAX_PERSISTENT_GROUP_NUM; i++, profileinfo++) {
+ DBG_88E("[%s] profileinfo_mac=%.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ profileinfo->peermac[0], profileinfo->peermac[1], profileinfo->peermac[2], profileinfo->peermac[3], profileinfo->peermac[4], profileinfo->peermac[5]);
+ if (_rtw_memcmp(peermacaddr, profileinfo->peermac, ETH_ALEN)) {
+ match_result = 1;
+ DBG_88E("[%s] Match!\n", __func__);
+ break;
+ }
+ }
+ return match_result;
+}
+
+void issue_probersp_p2p(struct adapter *padapter, unsigned char *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ u16 beacon_interval = 100;
+ u16 capInfo = 0;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wpsie[255] = { 0x00 };
+ u32 wpsielen = 0, p2pielen = 0;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ /* Use the device address for BSSID field. */
+ memcpy(pwlanhdr->addr3, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)&beacon_interval, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+ /* ESS and IBSS bits must be 0 (defined in the 3.1.2.1.1 of WiFi Direct Spec) */
+ capInfo |= cap_ShortPremble;
+ capInfo |= cap_ShortSlot;
+
+ memcpy(pframe, (unsigned char *)&capInfo, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 7, pwdinfo->p2p_wildcard_ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ /* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&pwdinfo->listen_channel, &pattrib->pktlen);
+
+ /* Todo: WPS IE */
+ /* Noted by Albert 20100907 */
+ /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ /* WiFi Simple Config State */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SIMPLE_CONF_STATE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_WSC_STATE_NOT_CONFIG; /* Not Configured. */
+
+ /* Response Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_RESP_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_RESPONSE_TYPE_8021X;
+
+ /* UUID-E */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Manufacturer */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MANUFACTURER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0007);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "Realtek", 7);
+ wpsielen += 7;
+
+ /* Model Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0006);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "8188EU", 6);
+ wpsielen += 6;
+
+ /* Model Number */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_MODEL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = 0x31; /* character 1 */
+
+ /* Serial Number */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_SERIAL_NUMBER);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(ETH_ALEN);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, "123456" , ETH_ALEN);
+ wpsielen += ETH_ALEN;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ wpsielen += 2;
+
+ /* OUI */
+ *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ wpsielen += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ if (pwdinfo->device_name_len) {
+ memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+ }
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+
+ p2pielen = build_probe_resp_p2p_ie(pwdinfo, pframe);
+ pframe += p2pielen;
+ pattrib->pktlen += p2pielen;
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
+ u16 wpsielen = 0, p2pielen = 0;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if (da) {
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ } else {
+ if ((pwdinfo->p2p_info.scan_op_ch_only) || (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ /* This two flags will be set when this is only the P2P client mode. */
+ memcpy(pwlanhdr->addr1, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
+ } else {
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ }
+ }
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ))
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pwdinfo->tx_prov_disc_info.ssid.SsidLength, pwdinfo->tx_prov_disc_info.ssid.Ssid, &(pattrib->pktlen));
+ else
+ pframe = rtw_set_ie(pframe, _SSID_IE_, P2P_WILDCARD_SSID_LEN, pwdinfo->p2p_wildcard_ssid, &(pattrib->pktlen));
+
+ /* Use the OFDM rate in the P2P probe request frame. (6(B), 9(B), 12(B), 24(B), 36, 48, 54) */
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pwdinfo->support_rate, &pattrib->pktlen);
+
+
+ /* WPS IE */
+ /* Noted by Albert 20110221 */
+ /* According to the WPS specification, all the WPS attribute is presented by Big Endian. */
+
+ wpsielen = 0;
+ /* WPS OUI */
+ *(__be32 *)(wpsie) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* WPS version */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_VER1);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0001);
+ wpsielen += 2;
+
+ /* Value: */
+ wpsie[wpsielen++] = WPS_VERSION_1; /* Version 1.0 */
+
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ /* UUID-E */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_UUID_E);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0010);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ wpsielen += 0x10;
+
+ /* Config Method */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->supported_wps_cm);
+ wpsielen += 2;
+ }
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(pwdinfo->device_name_len);
+ wpsielen += 2;
+
+ /* Value: */
+ memcpy(wpsie + wpsielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ wpsielen += pwdinfo->device_name_len;
+
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0008);
+ wpsielen += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_CID_RTK_WIDI);
+ wpsielen += 2;
+
+ /* OUI */
+ *(__be32 *)(wpsie + wpsielen) = cpu_to_be32(WPSOUI);
+ wpsielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_PDT_SCID_RTK_DMP);
+ wpsielen += 2;
+
+ /* Device Password ID */
+ /* Type: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_ATTR_DEVICE_PWID);
+ wpsielen += 2;
+
+ /* Length: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ *(__be16 *)(wpsie + wpsielen) = cpu_to_be16(WPS_DPID_REGISTRAR_SPEC); /* Registrar-specified */
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110221 */
+ /* According to the P2P Specification, the probe request frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID if this probe request wants to find the specific P2P device */
+ /* 3. Listen Channel */
+ /* 4. Extended Listen Timing */
+ /* 5. Operating Channel if this WiFi is working as the group owner now */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Listen Channel */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_LISTEN_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->listen_channel; /* listen channel */
+
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Operating Channel (if this WiFi is working as the group owner now) */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_OPERATING_CH;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0005);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Country String */
+ p2pie[p2pielen++] = 'X';
+ p2pie[p2pielen++] = 'X';
+
+ /* The third byte should be set to 0x04. */
+ /* Described in the "Operating Channel Attribute" section. */
+ p2pie[p2pielen++] = 0x04;
+
+ /* Operating Class */
+ p2pie[p2pielen++] = 0x51; /* Copy from SD7 */
+
+ /* Channel Number */
+ p2pie[p2pielen++] = pwdinfo->operating_channel; /* operating channel number */
+ }
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+
+ if (pmlmepriv->wps_probe_req_ie != NULL) {
+ /* WPS IE */
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("issuing probe_req, tx_len=%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq_p2p(struct adapter *adapter, u8 *da)
+{
+ _issue_probereq_p2p(adapter, da, false);
+}
+
+int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_probereq_p2p(adapter, da, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(adapter), da, rtw_get_oper_ch(adapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(adapter), rtw_get_oper_ch(adapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+static s32 rtw_action_public_decache(union recv_frame *recv_frame, s32 token)
+{
+ struct adapter *adapter = recv_frame->u.hdr.adapter;
+ struct mlme_ext_priv *mlmeext = &(adapter->mlmeextpriv);
+ u8 *frame = recv_frame->u.hdr.rx_data;
+ u16 seq_ctrl = ((recv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (recv_frame->u.hdr.attrib.frag_num & 0xf);
+
+ if (GetRetry(frame)) {
+ if (token >= 0) {
+ if ((seq_ctrl == mlmeext->action_public_rxseq) && (token == mlmeext->action_public_dialog_token)) {
+ DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x, token:%d\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq, token);
+ return _FAIL;
+ }
+ } else {
+ if (seq_ctrl == mlmeext->action_public_rxseq) {
+ DBG_88E(FUNC_ADPT_FMT" seq_ctrl = 0x%x, rxseq = 0x%x\n",
+ FUNC_ADPT_ARG(adapter), seq_ctrl, mlmeext->action_public_rxseq);
+ return _FAIL;
+ }
+ }
+ }
+
+ mlmeext->action_public_rxseq = seq_ctrl;
+
+ if (token >= 0)
+ mlmeext->action_public_dialog_token = token;
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_p2p(union recv_frame *precv_frame)
+{
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body;
+ u8 dialogToken = 0;
+#ifdef CONFIG_88EU_P2P
+ struct adapter *padapter = precv_frame->u.hdr.adapter;
+ uint len = precv_frame->u.hdr.len;
+ u8 *p2p_ie;
+ u32 p2p_ielen;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 result = P2P_STATUS_SUCCESS;
+ u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+#endif /* CONFIG_88EU_P2P */
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+
+ if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
+ return _FAIL;
+
+#ifdef CONFIG_88EU_P2P
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ /* Do nothing if the driver doesn't enable the P2P function. */
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
+ return _SUCCESS;
+
+ len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ switch (frame_body[6]) { /* OUI Subtype */
+ case P2P_GO_NEGO_REQ:
+ DBG_88E("[%s] Got GO Nego Req Frame\n", __func__);
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL)) {
+ /* Commented by Albert 20110526 */
+ /* In this case, this means the previous nego fail doesn't be reset yet. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ /* Restore the previous p2p state */
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ DBG_88E("[%s] Restore the previous p2p state to %d\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ /* Commented by Kurt 20110902 */
+ /* Add if statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ /* Commented by Kurt 20120113 */
+ /* Get peer_dev_addr here if peer doesn't issue prov_disc frame. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.peerDevAddr, empty_addr, ETH_ALEN))
+ memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
+
+ result = process_p2p_group_negotation_req(pwdinfo, frame_body, len);
+ issue_p2p_GO_response(padapter, GetAddr2Ptr(pframe), frame_body, len, result);
+
+ /* Commented by Albert 20110718 */
+ /* No matter negotiating or negotiation failure, the driver should set up the restore P2P state timer. */
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ break;
+ case P2P_GO_NEGO_RESP:
+ DBG_88E("[%s] Got GO Nego Resp Frame\n", __func__);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ /* Commented by Albert 20110425 */
+ /* The restore timer is enabled when issuing the nego request frame of rtw_p2p_connect function. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ pwdinfo->nego_req_info.benable = false;
+ result = process_p2p_group_negotation_resp(pwdinfo, frame_body, len);
+ issue_p2p_GO_confirm(pwdinfo->padapter, GetAddr2Ptr(pframe), result);
+ if (P2P_STATUS_SUCCESS == result) {
+ if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
+ pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
+ }
+ }
+ /* Reset the dialog token for group negotiation frames. */
+ pwdinfo->negotiation_dialog_token = 1;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ } else {
+ DBG_88E("[%s] Skipped GO Nego Resp Frame (p2p_state != P2P_STATE_GONEGO_ING)\n", __func__);
+ }
+ break;
+ case P2P_GO_NEGO_CONF:
+ DBG_88E("[%s] Got GO Nego Confirm Frame\n", __func__);
+ result = process_p2p_group_negotation_confirm(pwdinfo, frame_body, len);
+ if (P2P_STATUS_SUCCESS == result) {
+ if (rtw_p2p_role(pwdinfo) == P2P_ROLE_CLIENT) {
+ pwdinfo->p2p_info.operation_ch[0] = pwdinfo->peer_operating_ch;
+ pwdinfo->p2p_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey2, P2P_RESET_SCAN_CH);
+ }
+ }
+ break;
+ case P2P_INVIT_REQ:
+ /* Added by Albert 2010/10/05 */
+ /* Received the P2P Invite Request frame. */
+
+ DBG_88E("[%s] Got invite request frame!\n", __func__);
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ /* Parse the necessary information from the P2P Invitation Request frame. */
+ /* For example: The MAC address of sending this P2P Invitation Request frame. */
+ u32 attr_contentlen = 0;
+ u8 status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ struct group_id_info group_id;
+ u8 invitation_flag = 0;
+
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INVITATION_FLAGS, &invitation_flag, &attr_contentlen);
+ if (attr_contentlen) {
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_BSSID, pwdinfo->p2p_peer_interface_addr, &attr_contentlen);
+ /* Commented by Albert 20120510 */
+ /* Copy to the pwdinfo->p2p_peer_interface_addr. */
+ /* So that the WFD UI (or Sigma) can get the peer interface address by using the following command. */
+ /* #> iwpriv wlan0 p2p_get peer_ifa */
+ /* After having the peer interface address, the sigma can find the correct conf file for wpa_supplicant. */
+
+ if (attr_contentlen) {
+ DBG_88E("[%s] GO's BSSID = %.2X %.2X %.2X %.2X %.2X %.2X\n", __func__,
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1],
+ pwdinfo->p2p_peer_interface_addr[2], pwdinfo->p2p_peer_interface_addr[3],
+ pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+ }
+
+ if (invitation_flag & P2P_INVITATION_FLAGS_PERSISTENT) {
+ /* Re-invoke the persistent group. */
+
+ _rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
+ if (attr_contentlen) {
+ if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ /* The p2p device sending this p2p invitation request wants this Wi-Fi device to be the persistent GO. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_GO);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ status_code = P2P_STATUS_SUCCESS;
+ } else {
+ /* The p2p device sending this p2p invitation request wants to be the persistent GO. */
+ if (is_matched_in_profilelist(pwdinfo->p2p_peer_interface_addr, &pwdinfo->profileinfo[0])) {
+ u8 operatingch_info[5] = { 0x00 };
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, (u32)operatingch_info[4])) {
+ /* The operating channel is acceptable for this device. */
+ pwdinfo->rx_invitereq_info.operation_ch[0] = operatingch_info[4];
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 1;
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, P2P_RESET_SCAN_CH);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ } else {
+ /* The operating channel isn't supported by this device. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ status_code = P2P_STATUS_FAIL_NO_COMMON_CH;
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 3000);
+ }
+ } else {
+ /* Commented by Albert 20121130 */
+ /* Intel will use the different P2P IE to store the operating channel information */
+ /* Workaround for Intel WiDi 3.5 */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_MATCH);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ status_code = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+ }
+ }
+ } else {
+ DBG_88E("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ } else {
+ /* Received the invitation to join a P2P group. */
+
+ _rtw_memset(&group_id, 0x00, sizeof(struct group_id_info));
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, (u8 *)&group_id, &attr_contentlen);
+ if (attr_contentlen) {
+ if (_rtw_memcmp(group_id.go_device_addr, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ /* In this case, the GO can't be myself. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_DISMATCH);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ } else {
+ /* The p2p device sending this p2p invitation request wants to join an existing P2P group */
+ /* Commented by Albert 2012/06/28 */
+ /* In this case, this Wi-Fi device should use the iwpriv command to get the peer device address. */
+ /* The peer device address should be the destination address for the provisioning discovery request. */
+ /* Then, this Wi-Fi device should use the iwpriv command to get the peer interface address. */
+ /* The peer interface address should be the address for WPS mac address */
+ memcpy(pwdinfo->p2p_peer_device_addr, group_id.go_device_addr , ETH_ALEN);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RECV_INVITE_REQ_JOIN);
+ status_code = P2P_STATUS_SUCCESS;
+ }
+ } else {
+ DBG_88E("[%s] P2P Group ID Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ } else {
+ DBG_88E("[%s] P2P Invitation Flags Attribute NOT FOUND!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+
+ DBG_88E("[%s] status_code = %d\n", __func__, status_code);
+
+ pwdinfo->inviteresp_info.token = frame_body[7];
+ issue_p2p_invitation_response(padapter, GetAddr2Ptr(pframe), pwdinfo->inviteresp_info.token, status_code);
+ }
+ break;
+ case P2P_INVIT_RESP: {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+
+ DBG_88E("[%s] Got invite response frame!\n", __func__);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ pwdinfo->invitereq_info.benable = false;
+
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ if (_rtw_memcmp(pwdinfo->invitereq_info.go_bssid, myid(&padapter->eeprompriv), ETH_ALEN)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_OK);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL);
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_INVITE_RESP_FAIL))
+ _set_timer(&pwdinfo->restore_p2p_state_timer, 5000);
+ break;
+ }
+ case P2P_DEVDISC_REQ:
+ process_p2p_devdisc_req(pwdinfo, pframe, len);
+ break;
+ case P2P_DEVDISC_RESP:
+ process_p2p_devdisc_resp(pwdinfo, pframe, len);
+ break;
+ case P2P_PROVISION_DISC_REQ:
+ DBG_88E("[%s] Got Provisioning Discovery Request Frame\n", __func__);
+ process_p2p_provdisc_req(pwdinfo, pframe, len);
+ memcpy(pwdinfo->rx_prov_disc_info.peerDevAddr, GetAddr2Ptr(pframe), ETH_ALEN);
+
+ /* 20110902 Kurt */
+ /* Add the following statement to avoid receiving duplicate prov disc req. such that pre_p2p_state would be covered. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ))
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_REQ);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ break;
+ case P2P_PROVISION_DISC_RESP:
+ /* Commented by Albert 20110707 */
+ /* Should we check the pwdinfo->tx_prov_disc_info.bsent flag here?? */
+ DBG_88E("[%s] Got Provisioning Discovery Response Frame\n", __func__);
+ /* Commented by Albert 20110426 */
+ /* The restore timer is enabled when issuing the provisioing request frame in rtw_p2p_prov_disc function. */
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_PROVISION_DIS_RSP);
+ process_p2p_provdisc_resp(pwdinfo, pframe);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ break;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ return _SUCCESS;
+}
+
+static unsigned int on_action_public_vendor(union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (_rtw_memcmp(frame_body + 2, P2P_OUI, 4) == true) {
+ ret = on_action_public_p2p(precv_frame);
+ }
+
+ return ret;
+}
+
+static unsigned int on_action_public_default(union recv_frame *precv_frame, u8 action)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 token;
+
+ token = frame_body[2];
+
+ if (rtw_action_public_decache(precv_frame, token) == _FAIL)
+ goto exit;
+
+ ret = _SUCCESS;
+
+exit:
+ return ret;
+}
+
+unsigned int on_action_public(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ unsigned int ret = _FAIL;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u8 *frame_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ u8 category, action;
+
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))
+ goto exit;
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_PUBLIC)
+ goto exit;
+
+ action = frame_body[1];
+ switch (action) {
+ case ACT_PUBLIC_VENDOR:
+ ret = on_action_public_vendor(precv_frame);
+ break;
+ default:
+ ret = on_action_public_default(precv_frame, action);
+ break;
+ }
+
+exit:
+ return ret;
+}
+
+unsigned int OnAction_ht(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_wmm(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+unsigned int OnAction_p2p(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_P2P
+ u8 *frame_body;
+ u8 category, OUI_Subtype;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ uint len = precv_frame->u.hdr.len;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+
+ DBG_88E("%s\n", __func__);
+
+ /* check RA matches or not */
+ if (!_rtw_memcmp(myid(&(padapter->eeprompriv)), GetAddr1Ptr(pframe), ETH_ALEN))/* for if1, sta/ap mode */
+ return _SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+ if (category != RTW_WLAN_CATEGORY_P2P)
+ return _SUCCESS;
+
+ if (be32_to_cpu(*((__be32 *)(frame_body + 1))) != P2POUI)
+ return _SUCCESS;
+
+ len -= sizeof(struct rtw_ieee80211_hdr_3addr);
+ OUI_Subtype = frame_body[5];
+
+ switch (OUI_Subtype) {
+ case P2P_NOTICE_OF_ABSENCE:
+ break;
+ case P2P_PRESENCE_REQUEST:
+ process_p2p_presence_req(pwdinfo, pframe, len);
+ break;
+ case P2P_PRESENCE_RESPONSE:
+ break;
+ case P2P_GO_DISC_REQUEST:
+ break;
+ default:
+ break;
+ }
+#endif /* CONFIG_88EU_P2P */
+ return _SUCCESS;
+}
+
+unsigned int OnAction(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ int i;
+ unsigned char category;
+ struct action_handler *ptable;
+ unsigned char *frame_body;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ category = frame_body[0];
+
+ for (i = 0; i < sizeof(OnAction_tbl)/sizeof(struct action_handler); i++) {
+ ptable = &OnAction_tbl[i];
+ if (category == ptable->num)
+ ptable->func(padapter, precv_frame);
+ }
+ return _SUCCESS;
+}
+
+unsigned int DoReserved(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ return _SUCCESS;
+}
+
+struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pmgntframe;
+ struct xmit_buf *pxmitbuf;
+
+ pmgntframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc xmitframe fail\n", __func__);
+ return NULL;
+ }
+
+ pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
+ if (pxmitbuf == NULL) {
+ DBG_88E("%s, alloc xmitbuf fail\n", __func__);
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ return NULL;
+ }
+ pmgntframe->frame_tag = MGNT_FRAMETAG;
+ pmgntframe->pxmitbuf = pxmitbuf;
+ pmgntframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pmgntframe;
+ return pmgntframe;
+}
+
+/****************************************************************************
+
+Following are some TX fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ pmlmeext->tx_rate = rate;
+ DBG_88E("%s(): rate = %x\n", __func__, rate);
+}
+
+void update_mgntframe_attrib(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ _rtw_memset((u8 *)(pattrib), 0, sizeof(struct pkt_attrib));
+
+ pattrib->hdrlen = 24;
+ pattrib->nr_frags = 1;
+ pattrib->priority = 7;
+ pattrib->mac_id = 0;
+ pattrib->qsel = 0x12;
+
+ pattrib->pktlen = 0;
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ pattrib->raid = 6;/* b mode */
+ else
+ pattrib->raid = 5;/* a/g mode */
+
+ pattrib->encrypt = _NO_PRIVACY_;
+ pattrib->bswenc = false;
+
+ pattrib->qos_en = false;
+ pattrib->ht_en = false;
+ pattrib->bwmode = HT_CHANNEL_WIDTH_20;
+ pattrib->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pattrib->sgi = false;
+
+ pattrib->seqnum = pmlmeext->mgnt_seq;
+
+ pattrib->retry_ctrl = true;
+}
+
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
+
+ rtw_hal_mgnt_xmit(padapter, pmgntframe);
+}
+
+s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
+{
+ s32 ret = _FAIL;
+ struct xmit_buf *pxmitbuf = pmgntframe->pxmitbuf;
+ struct submit_ctx sctx;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return ret;
+
+ rtw_sctx_init(&sctx, timeout_ms);
+ pxmitbuf->sctx = &sctx;
+
+ ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
+
+ if (ret == _SUCCESS)
+ ret = rtw_sctx_wait(&sctx);
+
+ return ret;
+}
+
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ u32 timeout_ms = 500;/* 500ms */
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return -1;
+
+ _enter_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ pxmitpriv->ack_tx = true;
+
+ pmgntframe->ack_report = 1;
+ if (rtw_hal_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
+ ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
+ }
+
+ pxmitpriv->ack_tx = false;
+ _exit_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+
+ return ret;
+}
+
+static int update_hidden_ssid(u8 *ies, u32 ies_len, u8 hidden_ssid_mode)
+{
+ u8 *ssid_ie;
+ int ssid_len_ori;
+ int len_diff = 0;
+
+ ssid_ie = rtw_get_ie(ies, WLAN_EID_SSID, &ssid_len_ori, ies_len);
+
+ if (ssid_ie && ssid_len_ori > 0) {
+ switch (hidden_ssid_mode) {
+ case 1: {
+ u8 *next_ie = ssid_ie + 2 + ssid_len_ori;
+ u32 remain_len = 0;
+
+ remain_len = ies_len - (next_ie - ies);
+
+ ssid_ie[1] = 0;
+ memcpy(ssid_ie+2, next_ie, remain_len);
+ len_diff -= ssid_len_ori;
+
+ break;
+ }
+ case 2:
+ _rtw_memset(&ssid_ie[2], 0, ssid_len_ori);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return len_diff;
+}
+
+void issue_beacon(struct adapter *padapter, int timeout_ms)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int rate_len;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+#if defined(CONFIG_88EU_AP_MODE)
+ unsigned long irqL;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+#if defined (CONFIG_88EU_AP_MODE)
+ _enter_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->qsel = 0x10;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ /* pmlmeext->mgnt_seq++; */
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+#ifdef CONFIG_88EU_P2P
+ /* for P2P : Primary Device Type & Device Name */
+ u32 wpsielen = 0, insert_len = 0;
+ u8 *wpsie = NULL;
+ wpsie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wpsielen);
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && wpsie && wpsielen > 0) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie, *pframe_wscie;
+
+ wps_offset = (uint)(wpsie - cur_network->IEs);
+ premainder_ie = wpsie + wpsielen;
+ remainder_ielen = cur_network->IELength - wps_offset - wpsielen;
+ pframe_wscie = pframe + wps_offset;
+ memcpy(pframe, cur_network->IEs, wps_offset+wpsielen);
+ pframe += (wps_offset + wpsielen);
+ pattrib->pktlen += (wps_offset + wpsielen);
+
+ /* now pframe is end of wsc ie, insert Primary Device Type & Device Name */
+ /* Primary Device Type */
+ /* Type: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_PRIMARY_DEV_TYPE);
+ insert_len += 2;
+
+ /* Length: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(0x0008);
+ insert_len += 2;
+
+ /* Value: */
+ /* Category ID */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ insert_len += 2;
+
+ /* OUI */
+ *(__be32 *)(pframe + insert_len) = cpu_to_be32(WPSOUI);
+ insert_len += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ insert_len += 2;
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ insert_len += 2;
+
+ /* Length: */
+ *(__be16 *)(pframe + insert_len) = cpu_to_be16(pwdinfo->device_name_len);
+ insert_len += 2;
+
+ /* Value: */
+ memcpy(pframe + insert_len, pwdinfo->device_name, pwdinfo->device_name_len);
+ insert_len += pwdinfo->device_name_len;
+
+ /* update wsc ie length */
+ *(pframe_wscie+1) = (wpsielen-2) + insert_len;
+
+ /* pframe move to end */
+ pframe += insert_len;
+ pattrib->pktlen += insert_len;
+
+ /* copy remainder_ie to pframe */
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ int len_diff;
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ len_diff = update_hidden_ssid(
+ pframe+_BEACON_IE_OFFSET_
+ , cur_network->IELength-_BEACON_IE_OFFSET_
+ , pmlmeinfo->hidden_ssid_mode
+ );
+ pframe += (cur_network->IELength+len_diff);
+ pattrib->pktlen += (cur_network->IELength+len_diff);
+ }
+
+ {
+ u8 *wps_ie;
+ uint wps_ielen;
+ u8 sr = 0;
+ wps_ie = rtw_get_wps_ie(pmgntframe->buf_addr+TXDESC_OFFSET+sizeof (struct rtw_ieee80211_hdr_3addr)+_BEACON_IE_OFFSET_,
+ pattrib->pktlen-sizeof (struct rtw_ieee80211_hdr_3addr)-_BEACON_IE_OFFSET_, NULL, &wps_ielen);
+ if (wps_ie && wps_ielen > 0)
+ rtw_get_wps_attr_content(wps_ie, wps_ielen, WPS_ATTR_SELECTED_REGISTRAR, (u8 *)(&sr), NULL);
+ if (sr != 0)
+ set_fwstate(pmlmepriv, WIFI_UNDER_WPS);
+ else
+ _clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ u32 len;
+ len = build_beacon_p2p_ie(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ goto _issue_bcn;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ /* todo:HT for adhoc */
+_issue_bcn:
+
+#if defined (CONFIG_88EU_AP_MODE)
+ pmlmepriv->update_bcn = false;
+
+ _exit_critical_bh(&pmlmepriv->bcn_update_lock, &irqL);
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+
+ if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
+ DBG_88E("beacon frame too large\n");
+ return;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ /* DBG_88E("issue bcn_sz=%d\n", pattrib->last_txcmdsz); */
+ if (timeout_ms > 0)
+ dump_mgntframe_and_wait(padapter, pmgntframe, timeout_ms);
+ else
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p_probereq)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac, *bssid;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+#if defined (CONFIG_88EU_AP_MODE)
+ u8 *pwps_ie;
+ uint wps_ielen;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned int rate_len;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL) {
+ DBG_88E("%s, alloc mgnt frame fail\n", __func__);
+ return;
+ }
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+#if defined(CONFIG_88EU_AP_MODE)
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ pwps_ie = rtw_get_wps_ie(cur_network->IEs+_FIXED_IE_LENGTH_, cur_network->IELength-_FIXED_IE_LENGTH_, NULL, &wps_ielen);
+
+ /* inerset & update wps_probe_resp_ie */
+ if ((pmlmepriv->wps_probe_resp_ie != NULL) && pwps_ie && (wps_ielen > 0)) {
+ uint wps_offset, remainder_ielen;
+ u8 *premainder_ie;
+
+ wps_offset = (uint)(pwps_ie - cur_network->IEs);
+
+ premainder_ie = pwps_ie + wps_ielen;
+
+ remainder_ielen = cur_network->IELength - wps_offset - wps_ielen;
+
+ memcpy(pframe, cur_network->IEs, wps_offset);
+ pframe += wps_offset;
+ pattrib->pktlen += wps_offset;
+
+ wps_ielen = (uint)pmlmepriv->wps_probe_resp_ie[1];/* to get ie data len */
+ if ((wps_offset+wps_ielen+2) <= MAX_IE_SZ) {
+ memcpy(pframe, pmlmepriv->wps_probe_resp_ie, wps_ielen+2);
+ pframe += wps_ielen+2;
+ pattrib->pktlen += wps_ielen+2;
+ }
+
+ if ((wps_offset+wps_ielen+2+remainder_ielen) <= MAX_IE_SZ) {
+ memcpy(pframe, premainder_ie, remainder_ielen);
+ pframe += remainder_ielen;
+ pattrib->pktlen += remainder_ielen;
+ }
+ } else {
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pattrib->pktlen += cur_network->IELength;
+ }
+ } else
+#endif
+ {
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pattrib->pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* capability info: 2 bytes */
+
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pattrib->pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pattrib->pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pattrib->pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u8 erpinfo = 0;
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ /* ATIMWindow = cur->Configuration.ATIMWindow; */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pattrib->pktlen);
+
+ /* ERP IE */
+ pframe = rtw_set_ie(pframe, _ERPINFO_IE_, 1, &erpinfo, &pattrib->pktlen);
+ }
+
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pattrib->pktlen);
+ /* todo:HT for adhoc */
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && is_valid_p2p_probereq) {
+ u32 len;
+ len = build_probe_resp_p2p_ie(pwdinfo, pframe);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned char *mac;
+ unsigned char bssrate[NumRates];
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int bssrate_len = 0;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+issue_probereq\n"));
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(padapter->eeprompriv));
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if (da) {
+ /* unicast probe request frame */
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, da, ETH_ALEN);
+ } else {
+ /* broadcast probe request frame */
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ }
+
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_PROBEREQ);
+
+ pframe += sizeof (struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof (struct rtw_ieee80211_hdr_3addr);
+
+ if (pssid)
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pssid->SsidLength, pssid->Ssid, &(pattrib->pktlen));
+ else
+ pframe = rtw_set_ie(pframe, _SSID_IE_, 0, NULL, &(pattrib->pktlen));
+
+ get_rate_set(padapter, bssrate, &bssrate_len);
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ }
+
+ /* add wps_ie for wps2.0 */
+ if (pmlmepriv->wps_probe_req_ie_len > 0 && pmlmepriv->wps_probe_req_ie) {
+ memcpy(pframe, pmlmepriv->wps_probe_req_ie, pmlmepriv->wps_probe_req_ie_len);
+ pframe += pmlmepriv->wps_probe_req_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_probe_req_ie_len;
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("issuing probe_req, tx_len=%d\n", pattrib->last_txcmdsz));
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+inline void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da)
+{
+ _issue_probereq(padapter, pssid, da, false);
+}
+
+int issue_probereq_ex(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da,
+ int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_probereq(padapter, pssid, da, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+/* if psta == NULL, indiate we are station(client) now... */
+void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short status)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ unsigned int val32;
+ u16 val16;
+#ifdef CONFIG_88EU_AP_MODE
+ __le16 le_val16;
+#endif
+ int use_shared_key = 0;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_AUTH);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+
+ if (psta) {/* for AP mode */
+#ifdef CONFIG_88EU_AP_MODE
+
+ memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+
+ /* setting auth algo number */
+ val16 = (u16)psta->authalg;
+
+ if (status != _STATS_SUCCESSFUL_)
+ val16 = 0;
+
+ if (val16) {
+ le_val16 = cpu_to_le16(val16);
+ use_shared_key = 1;
+ } else {
+ le_val16 = 0;
+ }
+
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ val16 = (u16)psta->auth_seq;
+ le_val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* setting status code... */
+ val16 = status;
+ le_val16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_val16, &(pattrib->pktlen));
+
+ /* added challenging text... */
+ if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, psta->chg_txt, &(pattrib->pktlen));
+#endif
+ } else {
+ __le32 le_tmp32;
+ __le16 le_tmp16;
+ memcpy(pwlanhdr->addr1, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&pmlmeinfo->network), ETH_ALEN);
+
+ /* setting auth algo number */
+ val16 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_Shared) ? 1 : 0;/* 0:OPEN System, 1:Shared key */
+ if (val16)
+ use_shared_key = 1;
+
+ /* setting IV for auth seq #3 */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ val32 = ((pmlmeinfo->iv++) | (pmlmeinfo->key_index << 30));
+ le_tmp32 = cpu_to_le32(val32);
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&le_tmp32, &(pattrib->pktlen));
+
+ pattrib->iv_len = 4;
+ }
+
+ le_tmp16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_ALGM_NUM_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+ /* setting auth seq number */
+ val16 = pmlmeinfo->auth_seq;
+ le_tmp16 = cpu_to_le16(val16);
+ pframe = rtw_set_fixed_ie(pframe, _AUTH_SEQ_NUM_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+
+ /* setting status code... */
+ le_tmp16 = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe, _STATUS_CODE_, (unsigned char *)&le_tmp16, &(pattrib->pktlen));
+
+ /* then checking to see if sending challenging text... */
+ if ((pmlmeinfo->auth_seq == 3) && (pmlmeinfo->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1)) {
+ pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, pmlmeinfo->chg_txt, &(pattrib->pktlen));
+
+ SetPrivacy(fctrl);
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pattrib->encrypt = _WEP40_;
+
+ pattrib->icv_len = 4;
+
+ pattrib->pktlen += pattrib->icv_len;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
+ DBG_88E("%s\n", __func__);
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+
+void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct xmit_frame *pmgntframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ struct pkt_attrib *pattrib;
+ unsigned char *pbuf, *pframe;
+ unsigned short val;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = &(pmlmeinfo->network);
+ u8 *ie = pnetwork->IEs;
+ __le16 lestatus, leval;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+ DBG_88E("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy((void *)GetAddr1Ptr(pwlanhdr), pstat->hwaddr, ETH_ALEN);
+ memcpy((void *)GetAddr2Ptr(pwlanhdr), myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy((void *)GetAddr3Ptr(pwlanhdr), get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ if ((pkt_type == WIFI_ASSOCRSP) || (pkt_type == WIFI_REASSOCRSP))
+ SetFrameSubType(pwlanhdr, pkt_type);
+ else
+ return;
+
+ pattrib->hdrlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen += pattrib->hdrlen;
+ pframe += pattrib->hdrlen;
+
+ /* capability */
+ val = *(unsigned short *)rtw_get_capability_from_ie(ie);
+
+ pframe = rtw_set_fixed_ie(pframe, _CAPABILITY_ , (unsigned char *)&val, &(pattrib->pktlen));
+
+ lestatus = cpu_to_le16(status);
+ pframe = rtw_set_fixed_ie(pframe , _STATUS_CODE_ , (unsigned char *)&lestatus, &(pattrib->pktlen));
+
+ leval = cpu_to_le16(pstat->aid | BIT(14) | BIT(15));
+ pframe = rtw_set_fixed_ie(pframe, _ASOC_ID_ , (unsigned char *)&leval, &(pattrib->pktlen));
+
+ if (pstat->bssratelen <= 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, pstat->bssratelen, pstat->bssrateset, &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, 8, pstat->bssrateset, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (pstat->bssratelen-8), pstat->bssrateset+8, &(pattrib->pktlen));
+ }
+
+ if ((pstat->flags & WLAN_STA_HT) && (pmlmepriv->htpriv.ht_option)) {
+ uint ie_len = 0;
+
+ /* FILL HT CAP INFO IE */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+
+ /* FILL HT ADD INFO IE */
+ pbuf = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_));
+ if (pbuf && ie_len > 0) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ }
+ }
+
+ /* FILL WMM IE */
+ if ((pstat->flags & WLAN_STA_WME) && (pmlmepriv->qospriv.qos_option)) {
+ uint ie_len = 0;
+ unsigned char WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
+
+ for (pbuf = ie + _BEACON_IE_OFFSET_;; pbuf += (ie_len + 2)) {
+ pbuf = rtw_get_ie(pbuf, _VENDOR_SPECIFIC_IE_, &ie_len, (pnetwork->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
+ if (pbuf && _rtw_memcmp(pbuf+2, WMM_PARA_IE, 6)) {
+ memcpy(pframe, pbuf, ie_len+2);
+ pframe += (ie_len+2);
+ pattrib->pktlen += (ie_len+2);
+ break;
+ }
+
+ if ((pbuf == NULL) || (ie_len == 0))
+ break;
+ }
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+
+ /* add WPS IE ie for wps 2.0 */
+ if (pmlmepriv->wps_assoc_resp_ie && pmlmepriv->wps_assoc_resp_ie_len > 0) {
+ memcpy(pframe, pmlmepriv->wps_assoc_resp_ie, pmlmepriv->wps_assoc_resp_ie_len);
+
+ pframe += pmlmepriv->wps_assoc_resp_ie_len;
+ pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && (pstat->is_p2p_device)) {
+ u32 len;
+
+ len = build_assoc_resp_p2p_ie(pwdinfo, pframe, pstat->p2p_status_code);
+
+ pframe += len;
+ pattrib->pktlen += len;
+ }
+#endif /* CONFIG_88EU_P2P */
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+#endif
+}
+
+void issue_assocreq(struct adapter *padapter)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe, *p;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ __le16 le_tmp;
+ unsigned int i, j, ie_len, index = 0;
+ unsigned char rf_type, bssrate[NumRates], sta_bssrate[NumRates];
+ struct ndis_802_11_var_ie *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bssrate_len = 0, sta_bssrate_len = 0;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 p2pie[255] = { 0x00 };
+ u16 p2pielen = 0;
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ASSOCREQ);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* caps */
+
+ memcpy(pframe, rtw_get_capability_from_ie(pmlmeinfo->network.IEs), 2);
+
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* listen interval */
+ /* todo: listen interval for power saving */
+ le_tmp = cpu_to_le16(3);
+ memcpy(pframe , (unsigned char *)&le_tmp, 2);
+ pframe += 2;
+ pattrib->pktlen += 2;
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, pmlmeinfo->network.Ssid.SsidLength, pmlmeinfo->network.Ssid.Ssid, &(pattrib->pktlen));
+
+ /* supported rate & extended supported rate */
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ get_rate_set(padapter, sta_bssrate, &sta_bssrate_len);
+
+ if (pmlmeext->cur_channel == 14)/* for JAPAN, channel 14 can only uses B Mode(CCK) */
+ sta_bssrate_len = 4;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+ DBG_88E("network.SupportedRates[%d]=%02X\n", i, pmlmeinfo->network.SupportedRates[i]);
+ }
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ if (pmlmeinfo->network.SupportedRates[i] == 0)
+ break;
+
+ /* Check if the AP's supported rates are also supported by STA. */
+ for (j = 0; j < sta_bssrate_len; j++) {
+ /* Avoid the proprietary data rate (22Mbps) of Handlink WSG-4000 AP */
+ if ((pmlmeinfo->network.SupportedRates[i]|IEEE80211_BASIC_RATE_MASK)
+ == (sta_bssrate[j]|IEEE80211_BASIC_RATE_MASK))
+ break;
+ }
+
+ if (j == sta_bssrate_len) {
+ /* the rate is not supported by STA */
+ DBG_88E("%s(): the rate[%d]=%02X is not supported by STA!\n", __func__, i, pmlmeinfo->network.SupportedRates[i]);
+ } else {
+ /* the rate is supported by STA */
+ bssrate[index++] = pmlmeinfo->network.SupportedRates[i];
+ }
+ }
+
+ bssrate_len = index;
+ DBG_88E("bssrate_len=%d\n", bssrate_len);
+
+ if (bssrate_len == 0) {
+ rtw_free_xmitbuf(pxmitpriv, pmgntframe->pxmitbuf);
+ rtw_free_xmitframe(pxmitpriv, pmgntframe);
+ goto exit; /* don't connect to AP if no joint supported rate */
+ }
+
+
+ if (bssrate_len > 8) {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , 8, bssrate, &(pattrib->pktlen));
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_ , (bssrate_len - 8), (bssrate + 8), &(pattrib->pktlen));
+ } else {
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_ , bssrate_len , bssrate, &(pattrib->pktlen));
+ }
+
+ /* RSN */
+ p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _RSN_IE_2_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if (p != NULL)
+ pframe = rtw_set_ie(pframe, _RSN_IE_2_, ie_len, (p + 2), &(pattrib->pktlen));
+
+ /* HT caps */
+ if (padapter->mlmepriv.htpriv.ht_option) {
+ p = rtw_get_ie((pmlmeinfo->network.IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_CAPABILITY_IE_, &ie_len, (pmlmeinfo->network.IELength - sizeof(struct ndis_802_11_fixed_ie)));
+ if ((p != NULL) && (!(is_ap_in_tkip(padapter)))) {
+ memcpy(&(pmlmeinfo->HT_caps), (p + 2), sizeof(struct HT_caps_element));
+
+ /* to disable 40M Hz support while gd_bw_40MHz_en = 0 */
+ if (pregpriv->cbw40_enable == 0)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info &= cpu_to_le16(~(BIT(6) | BIT(1)));
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(BIT(1));
+
+ /* todo: disable SM power save mode */
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ switch (rf_type) {
+ case RF_1T1R:
+ if (pregpriv->rx_stbc)
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0100);/* RX STBC One spatial stream */
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_1R, 16);
+ break;
+ case RF_2T2R:
+ case RF_1T2R:
+ default:
+ if ((pregpriv->rx_stbc == 0x3) ||/* enable for 2.4/5 GHz */
+ ((pmlmeext->cur_wireless_mode & WIRELESS_11_24N) && (pregpriv->rx_stbc == 0x1)) || /* enable for 2.4GHz */
+ (pregpriv->wifi_spec == 1)) {
+ DBG_88E("declare supporting RX STBC\n");
+ pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x0200);/* RX STBC two spatial stream */
+ }
+ memcpy(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_rate_2R, 16);
+ break;
+ }
+ pframe = rtw_set_ie(pframe, _HT_CAPABILITY_IE_, ie_len , (u8 *)(&(pmlmeinfo->HT_caps)), &(pattrib->pktlen));
+ }
+ }
+
+ /* vendor specific IE, such as WPA, WMM, WPS */
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) ||
+ (_rtw_memcmp(pIE->data, WMM_OUI, 4)) ||
+ (_rtw_memcmp(pIE->data, WPS_OUI, 4))) {
+ if (!padapter->registrypriv.wifi_spec) {
+ /* Commented by Kurt 20110629 */
+ /* In some older APs, WPS handshake */
+ /* would be fail if we append vender extensions informations to AP */
+ if (_rtw_memcmp(pIE->data, WPS_OUI, 4))
+ pIE->Length = 14;
+ }
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, pIE->Length, pIE->data, &(pattrib->pktlen));
+ }
+ break;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+
+ if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6 , REALTEK_96B_IE, &(pattrib->pktlen));
+
+#ifdef CONFIG_88EU_P2P
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
+ /* Should add the P2P IE in the association request frame. */
+ /* P2P OUI */
+
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20101109 */
+ /* According to the P2P Specification, the association request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Device Info */
+ /* Commented by Albert 20110516 */
+ /* 4. P2P Interface */
+
+ /* P2P Capability */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+ /* Extended Listen Timing */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0xFFFF);
+ p2pielen += 2;
+
+ /* Device Info */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, myid(&padapter->eeprompriv), ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ if ((pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PEER_DISPLAY_PIN) ||
+ (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_SELF_DISPLAY_PIN))
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY);
+ else
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC);
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ *(__be32 *)(p2pie + p2pielen) = cpu_to_be32(WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ *(__be16 *)(p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* P2P Interface */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_INTERFACE;
+
+ /* Length: */
+ *(__le16 *)(p2pie + p2pielen) = cpu_to_le16(0x000D);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Device Address */
+ p2pielen += ETH_ALEN;
+
+ p2pie[p2pielen++] = 1; /* P2P Interface Address Count */
+
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN); /* P2P Interface Address List */
+ p2pielen += ETH_ALEN;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
+ }
+
+#endif /* CONFIG_88EU_P2P */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+ dump_mgntframe(padapter, pmgntframe);
+
+ ret = _SUCCESS;
+
+exit:
+ if (ret == _SUCCESS)
+ rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
+ else
+ rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
+
+ return;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ goto exit;
+
+ pxmitpriv = &(padapter->xmitpriv);
+ pmlmeext = &(padapter->mlmeextpriv);
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (power_mode)
+ SetPwrMgt(fctrl);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+
+/* when wait_ms > 0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_nulldata(struct adapter *padapter, unsigned char *da, unsigned int power_mode, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ do {
+ ret = _issue_nulldata(padapter, da, power_mode, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+/* when wait_ack is ture, this function shoule be called at process context */
+static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int wait_ack)
+{
+ int ret = _FAIL;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl, *qc;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ pattrib->hdrlen += 2;
+ pattrib->qos_en = true;
+ pattrib->eosp = 1;
+ pattrib->ack_policy = 0;
+ pattrib->mdata = 0;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)
+ SetFrDs(fctrl);
+ else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ SetToDs(fctrl);
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
+
+ SetPriority(qc, tid);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+/* when wait_ms > 0 , this function shoule be called at process context */
+/* da == NULL for station mode */
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16 tid, int try_cnt, int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* da == NULL, assum it's null data for sta to ap*/
+ if (da == NULL)
+ da = get_my_bssid(&(pmlmeinfo->network));
+
+ do {
+ ret = _issue_qos_nulldata(padapter, da, tid, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason, u8 wait_ack)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int ret = _FAIL;
+ __le16 le_tmp;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+#ifdef CONFIG_88EU_P2P
+ if (!(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) && (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+ pattrib->retry_ctrl = false;
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_DEAUTH);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ le_tmp = cpu_to_le16(reason);
+ pframe = rtw_set_fixed_ie(pframe, _RSON_CODE_ , (unsigned char *)&le_tmp, &(pattrib->pktlen));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+
+ if (wait_ack) {
+ ret = dump_mgntframe_and_wait_ack(padapter, pmgntframe);
+ } else {
+ dump_mgntframe(padapter, pmgntframe);
+ ret = _SUCCESS;
+ }
+
+exit:
+ return ret;
+}
+
+int issue_deauth(struct adapter *padapter, unsigned char *da, unsigned short reason)
+{
+ DBG_88E("%s to %pM\n", __func__, da);
+ return _issue_deauth(padapter, da, reason, false);
+}
+
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason, int try_cnt,
+ int wait_ms)
+{
+ int ret;
+ int i = 0;
+ u32 start = rtw_get_current_time();
+
+ do {
+ ret = _issue_deauth(padapter, da, reason, wait_ms > 0 ? true : false);
+
+ i++;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ break;
+
+ if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
+ rtw_msleep_os(wait_ms);
+ } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
+
+ if (ret != _FAIL) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+
+ if (try_cnt && wait_ms) {
+ if (da)
+ DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), da, rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ else
+ DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
+ FUNC_ADPT_ARG(padapter), rtw_get_oper_ch(padapter),
+ ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
+ }
+exit:
+ return ret;
+}
+
+void issue_action_spct_ch_switch (struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+
+ DBG_88E(FUNC_NDEV_FMT" ra =%pM, ch:%u, offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev), ra, new_ch, ch_offset);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, ra, ETH_ALEN); /* RA */
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN); /* TA */
+ memcpy(pwlanhdr->addr3, ra, ETH_ALEN); /* DA = RA */
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* category, action */
+ {
+ u8 category, action;
+ category = RTW_WLAN_CATEGORY_SPECTRUM_MGMT;
+ action = RTW_WLAN_ACTION_SPCT_CHL_SWITCH;
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ }
+
+ pframe = rtw_set_ie_ch_switch (pframe, &(pattrib->pktlen), 0, new_ch, 0);
+ pframe = rtw_set_ie_secondary_ch_offset(pframe, &(pattrib->pktlen),
+ hal_ch_offset_to_secondary_ch_offset(ch_offset));
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
+{
+ u8 category = RTW_WLAN_CATEGORY_BACK;
+ u16 start_seq;
+ u16 BA_para_set;
+ u16 reason_code;
+ u16 BA_timeout_value;
+ __le16 le_tmp;
+ u16 BA_starting_seqctrl = 0;
+ enum ht_cap_ampdu_factor max_rx_ampdu_factor;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ u8 *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ DBG_88E("%s, category=%d, action=%d, status=%d\n", __func__, category, action, status);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ /* memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN); */
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+ if (category == 3) {
+ switch (action) {
+ case 0: /* ADDBA req */
+ do {
+ pmlmeinfo->dialogToken++;
+ } while (pmlmeinfo->dialogToken == 0);
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->dialogToken), &(pattrib->pktlen));
+
+ BA_para_set = (0x1002 | ((status & 0xf) << 2)); /* immediate ack & 64 buffer size */
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ BA_timeout_value = 5000;/* 5ms */
+ le_tmp = cpu_to_le16(BA_timeout_value);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ psta = rtw_get_stainfo(pstapriv, raddr);
+ if (psta != NULL) {
+ start_seq = (psta->sta_xmitpriv.txseq_tid[status & 0x07]&0xfff) + 1;
+
+ DBG_88E("BA_starting_seqctrl=%d for TID=%d\n", start_seq, status & 0x07);
+
+ psta->BA_starting_seqctrl[status & 0x07] = start_seq;
+
+ BA_starting_seqctrl = start_seq << 4;
+ }
+ le_tmp = cpu_to_le16(BA_starting_seqctrl);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+ case 1: /* ADDBA rsp */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(pmlmeinfo->ADDBA_req.dialog_token), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&status), &(pattrib->pktlen));
+ rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ if (MAX_AMPDU_FACTOR_64K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+ else if (MAX_AMPDU_FACTOR_32K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0800); /* 32 buffer size */
+ else if (MAX_AMPDU_FACTOR_16K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0400); /* 16 buffer size */
+ else if (MAX_AMPDU_FACTOR_8K == max_rx_ampdu_factor)
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x0200); /* 8 buffer size */
+ else
+ BA_para_set = (((pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f) | 0x1000); /* 64 buffer size */
+
+ if (pregpriv->ampdu_amsdu == 0)/* disabled */
+ BA_para_set = BA_para_set & ~BIT(0);
+ else if (pregpriv->ampdu_amsdu == 1)/* enabled */
+ BA_para_set = BA_para_set | BIT(0);
+ le_tmp = cpu_to_le16(BA_para_set);
+
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(pmlmeinfo->ADDBA_req.BA_timeout_value)), &(pattrib->pktlen));
+ break;
+ case 2:/* DELBA */
+ BA_para_set = (status & 0x1F) << 3;
+ le_tmp = cpu_to_le16(BA_para_set);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+
+ reason_code = 37;/* Requested from peer STA as it does not want to use the mechanism */
+ le_tmp = cpu_to_le16(reason_code);
+ pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)(&(le_tmp)), &(pattrib->pktlen));
+ break;
+ default:
+ break;
+ }
+ }
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_action_BSSCoexistPacket(struct adapter *padapter)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ unsigned char category, action;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct wlan_network *pnetwork = NULL;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ u8 InfoContent[16] = {0};
+ u8 ICS[8][15];
+ if ((pmlmepriv->num_FortyMHzIntolerant == 0) || (pmlmepriv->num_sta_no_ht == 0))
+ return;
+
+ if (pmlmeinfo->bwmode_updated)
+ return;
+
+
+ DBG_88E("%s\n", __func__);
+
+
+ category = RTW_WLAN_CATEGORY_PUBLIC;
+ action = ACT_PUBLIC_BSSCOEXIST;
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+
+
+ /* */
+ if (pmlmepriv->num_FortyMHzIntolerant > 0) {
+ u8 iedata = 0;
+
+ iedata |= BIT(2);/* 20 MHz BSS Width Request */
+
+ pframe = rtw_set_ie(pframe, EID_BSSCoexistence, 1, &iedata, &(pattrib->pktlen));
+ }
+
+
+ /* */
+ _rtw_memset(ICS, 0, sizeof(ICS));
+ if (pmlmepriv->num_sta_no_ht > 0) {
+ int i;
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ int len;
+ u8 *p;
+ struct wlan_bssid_ex *pbss_network;
+
+ if (rtw_end_of_queue_search(phead, plist))
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ plist = get_next(plist);
+
+ pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
+
+ p = rtw_get_ie(pbss_network->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, pbss_network->IELength - _FIXED_IE_LENGTH_);
+ if ((p == NULL) || (len == 0)) { /* non-HT */
+ if ((pbss_network->Configuration.DSConfig <= 0) || (pbss_network->Configuration.DSConfig > 14))
+ continue;
+
+ ICS[0][pbss_network->Configuration.DSConfig] = 1;
+
+ if (ICS[0][0] == 0)
+ ICS[0][0] = 1;
+ }
+ }
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ for (i = 0; i < 8; i++) {
+ if (ICS[i][0] == 1) {
+ int j, k = 0;
+
+ InfoContent[k] = i;
+ /* SET_BSS_INTOLERANT_ELE_REG_CLASS(InfoContent, i); */
+ k++;
+
+ for (j = 1; j <= 14; j++) {
+ if (ICS[i][j] == 1) {
+ if (k < 16) {
+ InfoContent[k] = j; /* channel number */
+ /* SET_BSS_INTOLERANT_ELE_CHANNEL(InfoContent+k, j); */
+ k++;
+ }
+ }
+ }
+
+ pframe = rtw_set_ie(pframe, EID_BSSIntolerantChlReport, k, InfoContent, &(pattrib->pktlen));
+ }
+ }
+ }
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+ /* struct recv_reorder_ctrl *preorder_ctrl; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u16 tid;
+
+ if ((pmlmeinfo->state&0x03) != WIFI_FW_AP_STATE)
+ if (!(pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS))
+ return _SUCCESS;
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+ if (psta == NULL)
+ return _SUCCESS;
+
+ if (initiator == 0) { /* recipient */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->recvreorder_ctrl[tid].enable) {
+ DBG_88E("rx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->recvreorder_ctrl[tid].enable = false;
+ psta->recvreorder_ctrl[tid].indicate_seq = 0xffff;
+ }
+ }
+ } else if (initiator == 1) { /* originator */
+ for (tid = 0; tid < MAXTID; tid++) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(tid)) {
+ DBG_88E("tx agg disable tid(%d)\n", tid);
+ issue_action_BA(padapter, addr, RTW_WLAN_ACTION_DELBA, (((tid << 1) | initiator)&0x1F));
+ psta->htpriv.agg_enable_bitmap &= ~BIT(tid);
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(tid);
+ }
+ }
+ }
+
+ return _SUCCESS;
+}
+
+unsigned int send_beacon(struct adapter *padapter)
+{
+ u8 bxmitok = false;
+ int issue = 0;
+ int poll = 0;
+
+ u32 start = rtw_get_current_time();
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ do {
+ issue_beacon(padapter, 100);
+ issue++;
+ do {
+ rtw_yield_os();
+ rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ poll++;
+ } while ((poll%10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+ } while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return _FAIL;
+ if (!bxmitok) {
+ DBG_88E("%s fail! %u ms\n", __func__, rtw_get_passing_time_ms(start));
+ return _FAIL;
+ } else {
+ u32 passing_time = rtw_get_passing_time_ms(start);
+
+ if (passing_time > 100 || issue > 3)
+ DBG_88E("%s success, issue:%d, poll:%d, %u ms\n", __func__, issue, poll, rtw_get_passing_time_ms(start));
+ return _SUCCESS;
+ }
+}
+
+/****************************************************************************
+
+Following are some utitity fuctions for WiFi MLME
+
+*****************************************************************************/
+
+void site_survey(struct adapter *padapter)
+{
+ unsigned char survey_channel = 0, val8;
+ enum rt_scan_type ScanType = SCAN_PASSIVE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 initialgain = 0;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
+ if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
+ survey_channel = pwdinfo->rx_invitereq_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ } else {
+ survey_channel = pwdinfo->p2p_info.operation_ch[pmlmeext->sitesurvey_res.channel_idx];
+ }
+ ScanType = SCAN_ACTIVE;
+ } else if (rtw_p2p_findphase_ex_is_social(pwdinfo)) {
+ /* Commented by Albert 2011/06/03 */
+ /* The driver is in the find phase, it should go through the social channel. */
+ int ch_set_idx;
+ survey_channel = pwdinfo->social_chan[pmlmeext->sitesurvey_res.channel_idx];
+ ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, survey_channel);
+ if (ch_set_idx >= 0)
+ ScanType = pmlmeext->channel_set[ch_set_idx].ScanType;
+ else
+ ScanType = SCAN_ACTIVE;
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ struct rtw_ieee80211_channel *ch;
+ if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
+ ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
+ survey_channel = ch->hw_value;
+ ScanType = (ch->flags & RTW_IEEE80211_CHAN_PASSIVE_SCAN) ? SCAN_PASSIVE : SCAN_ACTIVE;
+ }
+ }
+
+ if (survey_channel != 0) {
+ /* PAUSE 4-AC Queue when site_survey */
+ /* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ /* val8 |= 0x0f; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+ if (pmlmeext->sitesurvey_res.channel_idx == 0)
+ set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ else
+ SelectChannel(padapter, survey_channel);
+
+ if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
+ #ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) ||
+ rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
+ issue_probereq_p2p(padapter, NULL);
+ issue_probereq_p2p(padapter, NULL);
+ issue_probereq_p2p(padapter, NULL);
+ } else
+ #endif /* CONFIG_88EU_P2P */
+ {
+ int i;
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
+ /* todo: to issue two probe req??? */
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ /* rtw_msleep_os(SURVEY_TO>>1); */
+ issue_probereq(padapter, &(pmlmeext->sitesurvey_res.ssid[i]), NULL);
+ }
+ }
+
+ if (pmlmeext->sitesurvey_res.scan_mode == SCAN_ACTIVE) {
+ /* todo: to issue two probe req??? */
+ issue_probereq(padapter, NULL, NULL);
+ /* rtw_msleep_os(SURVEY_TO>>1); */
+ issue_probereq(padapter, NULL, NULL);
+ }
+ }
+ }
+
+ set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
+ } else {
+ /* channel number is 0 or this channel is not valid. */
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
+ if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
+ /* Set the find_phase_state_exchange_cnt to P2P_FINDPHASE_EX_CNT. */
+ /* This will let the following flow to run the scanning end. */
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ }
+ }
+
+ if (rtw_p2p_findphase_ex_is_needed(pwdinfo)) {
+ /* Set the P2P State to the listen state of find phase and set the current channel to the listen channel */
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_LISTEN);
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag(padapter);
+ /* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
+
+ _set_timer(&pwdinfo->find_phase_timer, (u32)((u32)(pwdinfo->listen_dwell) * 100));
+ } else
+#endif /* CONFIG_88EU_P2P */
+ {
+ /* 20100721:Interrupt scan operation here. */
+ /* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
+ /* It compares the scan result and select beter one to do connection. */
+ if (rtw_hal_antdiv_before_linked(padapter)) {
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = -1;
+ pmlmeext->chan_scan_time = SURVEY_TO / 2;
+ set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
+ return;
+ }
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+#endif /* CONFIG_88EU_P2P */
+
+ pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
+
+ /* switch back to the original channel */
+
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN))
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ else
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+#endif /* CONFIG_88EU_P2P */
+
+ /* flush 4-AC Queue after site_survey */
+ /* val8 = 0; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
+
+ /* config MSR */
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ initialgain = 0xff; /* restore RX GAIN */
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ /* turn on dynamic functions */
+ Restore_DM_Func_Flag(padapter);
+ /* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
+
+ if (is_client_associated_to_ap(padapter))
+ issue_nulldata(padapter, NULL, 0, 3, 500);
+
+ val8 = 0; /* survey done */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ report_surveydone_event(padapter);
+
+ pmlmeext->chan_scan_time = SURVEY_TO;
+ pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
+
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ issue_action_BSSCoexistPacket(padapter);
+ }
+ }
+ return;
+}
+
+/* collect bss info from Beacon and Probe request/response frames. */
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame, struct wlan_bssid_ex *bssid)
+{
+ int i;
+ u32 len;
+ u8 *p;
+ u16 val16, subtype;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ u32 packet_len = precv_frame->u.hdr.len;
+ u8 ie_offset;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ __le32 le32_tmp;
+
+ len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ)
+ return _FAIL;
+
+ _rtw_memset(bssid, 0, sizeof(struct wlan_bssid_ex));
+
+ subtype = GetFrameSubType(pframe);
+
+ if (subtype == WIFI_BEACON) {
+ bssid->Reserved[0] = 1;
+ ie_offset = _BEACON_IE_OFFSET_;
+ } else {
+ /* FIXME : more type */
+ if (subtype == WIFI_PROBEREQ) {
+ ie_offset = _PROBEREQ_IE_OFFSET_;
+ bssid->Reserved[0] = 2;
+ } else if (subtype == WIFI_PROBERSP) {
+ ie_offset = _PROBERSP_IE_OFFSET_;
+ bssid->Reserved[0] = 3;
+ } else {
+ bssid->Reserved[0] = 0;
+ ie_offset = _FIXED_IE_LENGTH_;
+ }
+ }
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* get the signal strength */
+ bssid->Rssi = precv_frame->u.hdr.attrib.phy_info.recvpower; /* in dBM.raw data */
+ bssid->PhyInfo.SignalQuality = precv_frame->u.hdr.attrib.phy_info.SignalQuality;/* in percentage */
+ bssid->PhyInfo.SignalStrength = precv_frame->u.hdr.attrib.phy_info.SignalStrength;/* in percentage */
+ rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
+ if (p == NULL) {
+ DBG_88E("marc: cannot find SSID for survey event\n");
+ return _FAIL;
+ }
+
+ if (*(p + 1)) {
+ if (len > NDIS_802_11_LENGTH_SSID) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else {
+ bssid->Ssid.SsidLength = 0;
+ }
+
+ _rtw_memset(bssid->SupportedRates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ /* checking rate info... */
+ i = 0;
+ p = rtw_get_ie(bssid->IEs + ie_offset, _SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > NDIS_802_11_LENGTH_RATES_EX) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates, (p + 2), len);
+ i = len;
+ }
+
+ p = rtw_get_ie(bssid->IEs + ie_offset, _EXT_SUPPORTEDRATES_IE_, &len, bssid->IELength - ie_offset);
+ if (p != NULL) {
+ if (len > (NDIS_802_11_LENGTH_RATES_EX-i)) {
+ DBG_88E("%s()-%d: IE too long (%d) for survey event\n", __func__, __LINE__, len);
+ return _FAIL;
+ }
+ memcpy(bssid->SupportedRates + i, (p + 2), len);
+ }
+
+ /* todo: */
+ bssid->NetworkTypeInUse = Ndis802_11OFDM24;
+
+ if (bssid->IELength < 12)
+ return _FAIL;
+
+ /* Checking for DSConfig */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _DSSET_IE_, &len, bssid->IELength - ie_offset);
+
+ bssid->Configuration.DSConfig = 0;
+ bssid->Configuration.Length = 0;
+
+ if (p) {
+ bssid->Configuration.DSConfig = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE */
+ /* checking HT info for channel */
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_ADD_INFO_IE_, &len, bssid->IELength - ie_offset);
+ if (p) {
+ struct HT_info_element *HT_info = (struct HT_info_element *)(p + 2);
+ bssid->Configuration.DSConfig = HT_info->primary_channel;
+ } else { /* use current channel */
+ bssid->Configuration.DSConfig = rtw_get_oper_ch(padapter);
+ }
+ }
+
+ if (subtype == WIFI_PROBEREQ) {
+ /* FIXME */
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ bssid->Privacy = 1;
+ return _SUCCESS;
+ }
+
+ memcpy(&le32_tmp, rtw_get_beacon_interval_from_ie(bssid->IEs), 2);
+ bssid->Configuration.BeaconPeriod = le32_to_cpu(le32_tmp);
+
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(0)) {
+ bssid->InfrastructureMode = Ndis802_11Infrastructure;
+ memcpy(bssid->MacAddress, GetAddr2Ptr(pframe), ETH_ALEN);
+ } else {
+ bssid->InfrastructureMode = Ndis802_11IBSS;
+ memcpy(bssid->MacAddress, GetAddr3Ptr(pframe), ETH_ALEN);
+ }
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ bssid->Configuration.ATIMWindow = 0;
+
+ /* 20/40 BSS Coexistence check */
+ if ((pregistrypriv->wifi_spec == 1) && (!pmlmeinfo->bwmode_updated)) {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ p = rtw_get_ie(bssid->IEs + ie_offset, _HT_CAPABILITY_IE_, &len, bssid->IELength - ie_offset);
+ if (p && len > 0) {
+ struct HT_caps_element *pHT_caps;
+ pHT_caps = (struct HT_caps_element *)(p + 2);
+
+ if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info)&BIT(14))
+ pmlmepriv->num_FortyMHzIntolerant++;
+ } else {
+ pmlmepriv->num_sta_no_ht++;
+ }
+ }
+
+ /* mark bss info receving from nearby channel as SignalQuality 101 */
+ if (bssid->Configuration.DSConfig != rtw_get_oper_ch(padapter))
+ bssid->PhyInfo.SignalQuality = 101;
+ return _SUCCESS;
+}
+
+void start_create_ibss(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ u8 join_type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_IBSS) {/* adhoc master */
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ /* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ beacon_timing_control(padapter);
+
+ /* set msr to WIFI_FW_ADHOC_STATE */
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+ Set_MSR(padapter, (pmlmeinfo->state & 0x3));
+
+ /* issue beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_, ("issuing beacon frame fail....\n"));
+
+ report_join_res(padapter, -1);
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ } else {
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ report_join_res(padapter, 1);
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ }
+ } else {
+ DBG_88E("start_create_ibss, invalid cap:%x\n", caps);
+ return;
+ }
+}
+
+void start_clnt_join(struct adapter *padapter)
+{
+ unsigned short caps;
+ u8 val8;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ int beacon_timeout;
+
+ pmlmeext->cur_channel = (u8)pnetwork->Configuration.DSConfig;
+ pmlmeinfo->bcn_interval = get_beacon_interval(pnetwork);
+
+ /* update wireless mode */
+ update_wireless_mode(padapter);
+
+ /* udpate capability */
+ caps = rtw_get_capability((struct wlan_bssid_ex *)pnetwork);
+ update_capinfo(padapter, caps);
+ if (caps&cap_ESS) {
+ Set_MSR(padapter, WIFI_FW_STATION_STATE);
+
+ val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ /* here wait for receiving the beacon to start auth */
+ /* and enable a timer */
+ beacon_timeout = decide_wait_for_beacon_timeout(pmlmeinfo->bcn_interval);
+ set_link_timer(pmlmeext, beacon_timeout);
+ _set_timer(&padapter->mlmepriv.assoc_timer,
+ (REAUTH_TO * REAUTH_LIMIT) + (REASSOC_TO*REASSOC_LIMIT) + beacon_timeout);
+
+ pmlmeinfo->state = WIFI_FW_AUTH_NULL | WIFI_FW_STATION_STATE;
+ } else if (caps&cap_IBSS) { /* adhoc client */
+ Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
+
+ val8 = 0xcf;
+ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+
+ /* switch channel */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ beacon_timing_control(padapter);
+
+ pmlmeinfo->state = WIFI_FW_ADHOC_STATE;
+
+ report_join_res(padapter, 1);
+ } else {
+ return;
+ }
+}
+
+void start_clnt_auth(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~WIFI_FW_AUTH_NULL);
+ pmlmeinfo->state |= WIFI_FW_AUTH_STATE;
+
+ pmlmeinfo->auth_seq = 1;
+ pmlmeinfo->reauth_count = 0;
+ pmlmeinfo->reassoc_count = 0;
+ pmlmeinfo->link_count = 0;
+ pmlmeext->retry = 0;
+
+
+ /* Because of AP's not receiving deauth before */
+ /* AP may: 1)not response auth or 2)deauth us after link is complete */
+ /* issue deauth before issuing auth to deal with the situation */
+ /* Commented by Albert 2012/07/21 */
+ /* For the Win8 P2P connection, it will be hard to have a successful connection if this Wi-Fi doesn't connect to it. */
+ issue_deauth(padapter, (&(pmlmeinfo->network))->MacAddress, WLAN_REASON_DEAUTH_LEAVING);
+
+ DBG_88E_LEVEL(_drv_info_, "start auth\n");
+ issue_auth(padapter, NULL, 0);
+
+ set_link_timer(pmlmeext, REAUTH_TO);
+}
+
+
+void start_clnt_assoc(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ pmlmeinfo->state &= (~(WIFI_FW_AUTH_NULL | WIFI_FW_AUTH_STATE));
+ pmlmeinfo->state |= (WIFI_FW_AUTH_SUCCESS | WIFI_FW_ASSOC_STATE);
+
+ issue_assocreq(padapter);
+
+ set_link_timer(pmlmeext, REASSOC_TO);
+}
+
+unsigned int receive_disconnect(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* check A3 */
+ if (!(_rtw_memcmp(MacAddr, get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
+ return _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_del_sta_event(padapter, MacAddr, reason);
+ } else if (pmlmeinfo->state & WIFI_FW_LINKING_STATE) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ }
+ }
+ return _SUCCESS;
+}
+
+static void process_80211d(struct adapter *padapter, struct wlan_bssid_ex *bssid)
+{
+ struct registry_priv *pregistrypriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct rt_channel_info *chplan_new;
+ u8 channel;
+ u8 i;
+
+ pregistrypriv = &padapter->registrypriv;
+ pmlmeext = &padapter->mlmeextpriv;
+
+ /* Adjust channel plan by AP Country IE */
+ if (pregistrypriv->enable80211d &&
+ (!pmlmeext->update_channel_plan_by_ap_done)) {
+ u8 *ie, *p;
+ u32 len;
+ struct rt_channel_plan chplan_ap;
+ struct rt_channel_info chplan_sta[MAX_CHANNEL_NUM];
+ u8 country[4];
+ u8 fcn; /* first channel number */
+ u8 noc; /* number of channel */
+ u8 j, k;
+
+ ie = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _COUNTRY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (!ie)
+ return;
+ if (len < 6)
+ return;
+ ie += 2;
+ p = ie;
+ ie += len;
+
+ _rtw_memset(country, 0, 4);
+ memcpy(country, p, 3);
+ p += 3;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: 802.11d country =%s\n", __func__, country));
+
+ i = 0;
+ while ((ie - p) >= 3) {
+ fcn = *(p++);
+ noc = *(p++);
+ p++;
+
+ for (j = 0; j < noc; j++) {
+ if (fcn <= 14)
+ channel = fcn + j; /* 2.4 GHz */
+ else
+ channel = fcn + j*4; /* 5 GHz */
+
+ chplan_ap.Channel[i++] = channel;
+ }
+ }
+ chplan_ap.Len = i;
+
+ memcpy(chplan_sta, pmlmeext->channel_set, sizeof(chplan_sta));
+
+ _rtw_memset(pmlmeext->channel_set, 0, sizeof(pmlmeext->channel_set));
+ chplan_new = pmlmeext->channel_set;
+
+ i = 0;
+ j = 0;
+ k = 0;
+ if (pregistrypriv->wireless_mode & WIRELESS_11G) {
+ do {
+ if ((i == MAX_CHANNEL_NUM) ||
+ (chplan_sta[i].ChannelNum == 0) ||
+ (chplan_sta[i].ChannelNum > 14))
+ break;
+
+ if ((j == chplan_ap.Len) || (chplan_ap.Channel[j] > 14))
+ break;
+
+ if (chplan_sta[i].ChannelNum == chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ i++;
+ j++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum < chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ } else if (chplan_sta[i].ChannelNum > chplan_ap.Channel[j]) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } while (1);
+
+ /* change AP not support channel to Passive scan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = SCAN_PASSIVE;
+ i++;
+ k++;
+ }
+
+ /* add channel AP supported */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14)) {
+ chplan_new[k].ChannelNum = chplan_ap.Channel[j];
+ chplan_new[k].ScanType = SCAN_ACTIVE;
+ j++;
+ k++;
+ }
+ } else {
+ /* keep original STA 2.4G channel plan */
+ while ((i < MAX_CHANNEL_NUM) &&
+ (chplan_sta[i].ChannelNum != 0) &&
+ (chplan_sta[i].ChannelNum <= 14)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ /* skip AP 2.4G channel plan */
+ while ((j < chplan_ap.Len) && (chplan_ap.Channel[j] <= 14))
+ j++;
+ }
+
+ /* keep original STA 5G channel plan */
+ while ((i < MAX_CHANNEL_NUM) && (chplan_sta[i].ChannelNum != 0)) {
+ chplan_new[k].ChannelNum = chplan_sta[i].ChannelNum;
+ chplan_new[k].ScanType = chplan_sta[i].ScanType;
+ i++;
+ k++;
+ }
+
+ pmlmeext->update_channel_plan_by_ap_done = 1;
+ }
+
+ /* If channel is used by AP, set channel scan type to active */
+ channel = bssid->Configuration.DSConfig;
+ chplan_new = pmlmeext->channel_set;
+ i = 0;
+ while ((i < MAX_CHANNEL_NUM) && (chplan_new[i].ChannelNum != 0)) {
+ if (chplan_new[i].ChannelNum == channel) {
+ if (chplan_new[i].ScanType == SCAN_PASSIVE) {
+ chplan_new[i].ScanType = SCAN_ACTIVE;
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_,
+ ("%s: change channel %d scan type from passive to active\n",
+ __func__, channel));
+ }
+ break;
+ }
+ i++;
+ }
+}
+
+/****************************************************************************
+
+Following are the functions to report events
+
+*****************************************************************************/
+
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct survey_event *psurvey_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext;
+ struct cmd_priv *pcmdpriv;
+ /* u8 *pframe = precv_frame->u.hdr.rx_data; */
+ /* uint len = precv_frame->u.hdr.len; */
+
+ if (!padapter)
+ return;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pcmdpriv = &padapter->cmdpriv;
+
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct survey_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_Survey);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ psurvey_evt = (struct survey_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+
+ if (collect_bss_info(padapter, precv_frame, (struct wlan_bssid_ex *)&psurvey_evt->bss) == _FAIL) {
+ kfree(pcmd_obj);
+ kfree(pevtcmd);
+ return;
+ }
+
+ process_80211d(padapter, &psurvey_evt->bss);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ pmlmeext->sitesurvey_res.bss_cnt++;
+
+ return;
+}
+
+void report_surveydone_event(struct adapter *padapter)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct surveydone_event *psurveydone_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct surveydone_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_SurveyDone);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ psurveydone_evt = (struct surveydone_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ psurveydone_evt->bss_cnt = pmlmeext->sitesurvey_res.bss_cnt;
+
+ DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_join_res(struct adapter *padapter, int res)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct joinbss_event *pjoinbss_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct joinbss_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_JoinBss);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ pjoinbss_evt = (struct joinbss_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pjoinbss_evt->network.network)), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+ pjoinbss_evt->network.join_res = res;
+ pjoinbss_evt->network.aid = res;
+
+ DBG_88E("report_join_res(%d)\n", res);
+
+
+ rtw_joinbss_event_prehandle(padapter, (u8 *)&pjoinbss_evt->network);
+
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct sta_info *psta;
+ int mac_id;
+ struct stadel_event *pdel_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stadel_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_DelSTA);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ pdel_sta_evt = (struct stadel_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(pdel_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ memcpy((unsigned char *)(pdel_sta_evt->rsvd), (unsigned char *)(&reason), 2);
+
+
+ psta = rtw_get_stainfo(&padapter->stapriv, MacAddr);
+ if (psta)
+ mac_id = (int)psta->mac_id;
+ else
+ mac_id = (-1);
+
+ pdel_sta_evt->mac_id = mac_id;
+
+ DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
+{
+ struct cmd_obj *pcmd_obj;
+ u8 *pevtcmd;
+ u32 cmdsz;
+ struct stassoc_event *padd_sta_evt;
+ struct C2HEvent_Header *pc2h_evt_hdr;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+
+ pcmd_obj = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd_obj == NULL)
+ return;
+
+ cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
+ pevtcmd = (u8 *)rtw_zmalloc(cmdsz);
+ if (pevtcmd == NULL) {
+ kfree(pcmd_obj);
+ return;
+ }
+
+ _rtw_init_listhead(&pcmd_obj->list);
+
+ pcmd_obj->cmdcode = GEN_CMD_CODE(_Set_MLME_EVT);
+ pcmd_obj->cmdsz = cmdsz;
+ pcmd_obj->parmbuf = pevtcmd;
+
+ pcmd_obj->rsp = NULL;
+ pcmd_obj->rspsz = 0;
+
+ pc2h_evt_hdr = (struct C2HEvent_Header *)(pevtcmd);
+ pc2h_evt_hdr->len = sizeof(struct stassoc_event);
+ pc2h_evt_hdr->ID = GEN_EVT_CODE(_AddSTA);
+ pc2h_evt_hdr->seq = ATOMIC_INC_RETURN(&pmlmeext->event_seq);
+
+ padd_sta_evt = (struct stassoc_event *)(pevtcmd + sizeof(struct C2HEvent_Header));
+ memcpy((unsigned char *)(&(padd_sta_evt->macaddr)), MacAddr, ETH_ALEN);
+ padd_sta_evt->cam_id = cam_idx;
+
+ DBG_88E("report_add_sta_event: add STA\n");
+
+ rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
+
+ return;
+}
+
+
+/****************************************************************************
+
+Following are the event callback functions
+
+*****************************************************************************/
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ /* ERP */
+ VCS_update(padapter, psta);
+
+ /* HT */
+ if (pmlmepriv->htpriv.ht_option) {
+ psta->htpriv.ht_option = true;
+
+ psta->htpriv.ampdu_enable = pmlmepriv->htpriv.ampdu_enable;
+
+ if (support_short_GI(padapter, &(pmlmeinfo->HT_caps)))
+ psta->htpriv.sgi = true;
+
+ psta->qos_option = true;
+ } else {
+ psta->htpriv.ht_option = false;
+
+ psta->htpriv.ampdu_enable = false;
+
+ psta->htpriv.sgi = false;
+ psta->qos_option = false;
+ }
+ psta->htpriv.bwmode = pmlmeext->cur_bwmode;
+ psta->htpriv.ch_offset = pmlmeext->cur_ch_offset;
+
+ psta->htpriv.agg_enable_bitmap = 0x0;/* reset */
+ psta->htpriv.candidate_tid_bitmap = 0x0;/* reset */
+
+ /* QoS */
+ if (pmlmepriv->qospriv.qos_option)
+ psta->qos_option = true;
+
+
+ psta->state = _FW_LINKED;
+}
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
+{
+ struct sta_info *psta, *psta_bmc;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 join_type;
+ u16 media_status;
+
+ if (join_res < 0) {
+ join_type = 1;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ goto exit_mlmeext_joinbss_event_callback;
+ }
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ /* for bc/mc */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (psta_bmc) {
+ pmlmeinfo->FW_sta_info[psta_bmc->mac_id].psta = psta_bmc;
+ update_bmc_sta_support_rate(padapter, psta_bmc->mac_id);
+ Update_RA_Entry(padapter, psta_bmc->mac_id);
+ }
+ }
+
+
+ /* turn on dynamic functions */
+ Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
+
+ /* update IOT-releated issue */
+ update_IOT_info(padapter);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+
+ /* BCN interval */
+ rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
+
+ /* udpate capability */
+ update_capinfo(padapter, pmlmeinfo->capability);
+
+ /* WMM, Update EDCA param */
+ WMMOnAssocRsp(padapter);
+
+ /* HT */
+ HTOnAssocRsp(padapter);
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) { /* only for infra. mode */
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ psta->wireless_mode = pmlmeext->cur_wireless_mode;
+
+ /* set per sta rate after updating HT cap. */
+ set_sta_rate(padapter, psta);
+ rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+ media_status = (psta->mac_id<<8)|1; /* MACID|OPMODE: 1 means connect */
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE) {
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+ }
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_CONNECT, 0);
+
+exit_mlmeext_joinbss_event_callback:
+
+ DBG_88E("=>%s\n", __func__);
+}
+
+void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *psta)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 join_type;
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {/* adhoc master or sta_count>1 */
+ /* nothing to do */
+ } else { /* adhoc client */
+ /* correcting TSF */
+ correct_TSF(padapter, pmlmeext);
+
+ /* start beacon */
+ if (send_beacon(padapter) == _FAIL) {
+ pmlmeinfo->FW_sta_info[psta->mac_id].status = 0;
+ pmlmeinfo->state ^= WIFI_FW_ADHOC_STATE;
+ return;
+ }
+ pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
+ }
+
+ join_type = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ }
+
+ pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
+
+ /* rate radaptive */
+ Update_RA_Entry(padapter, psta->mac_id);
+
+ /* update adhoc sta_info */
+ update_sta_info(padapter, psta);
+}
+
+void mlmeext_sta_del_event_callback(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ /* SelectChannel(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset); */
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+
+ flush_all_cam_entry(padapter);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+ }
+}
+
+/****************************************************************************
+
+Following are the functions for the timer handlers
+
+*****************************************************************************/
+void _linked_rx_signal_strehgth_display(struct adapter *padapter);
+void _linked_rx_signal_strehgth_display(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 mac_id;
+ int UndecoratedSmoothedPWDB;
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
+ mac_id = 0;
+ else if ((pmlmeinfo->state&0x03) == _HW_STATE_AP_)
+ mac_id = 2;
+
+ rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &mac_id);
+
+ rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
+ DBG_88E("UndecoratedSmoothedPWDB:%d\n", UndecoratedSmoothedPWDB);
+}
+
+static u8 chk_ap_is_alive(struct adapter *padapter, struct sta_info *psta)
+{
+ u8 ret = false;
+
+ if ((sta_rx_data_pkts(psta) == sta_last_rx_data_pkts(psta)) &&
+ sta_rx_beacon_pkts(psta) == sta_last_rx_beacon_pkts(psta) &&
+ sta_rx_probersp_pkts(psta) == sta_last_rx_probersp_pkts(psta))
+ ret = false;
+ else
+ ret = true;
+
+ sta_update_last_rx_pkts(psta);
+
+ return ret;
+}
+
+void linked_status_chk(struct adapter *padapter)
+{
+ u32 i;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (padapter->bRxRSSIDisplay)
+ _linked_rx_signal_strehgth_display(padapter);
+
+ rtw_hal_sreset_linked_status_check(padapter);
+
+ if (is_client_associated_to_ap(padapter)) {
+ /* linked infrastructure client mode */
+
+ int tx_chk = _SUCCESS, rx_chk = _SUCCESS;
+ int rx_chk_limit;
+
+ rx_chk_limit = 4;
+ psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
+ if (psta != NULL) {
+ bool is_p2p_enable = false;
+ #ifdef CONFIG_88EU_P2P
+ is_p2p_enable = !rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE);
+ #endif
+
+ if (!chk_ap_is_alive(padapter, psta))
+ rx_chk = _FAIL;
+
+ if (pxmitpriv->last_tx_pkts == pxmitpriv->tx_pkts)
+ tx_chk = _FAIL;
+
+ if (pmlmeext->active_keep_alive_check && (rx_chk == _FAIL || tx_chk == _FAIL)) {
+ u8 backup_oper_channel = 0;
+
+ /* switch to correct channel of current network before issue keep-alive frames */
+ if (rtw_get_oper_ch(padapter) != pmlmeext->cur_channel) {
+ backup_oper_channel = rtw_get_oper_ch(padapter);
+ SelectChannel(padapter, pmlmeext->cur_channel);
+ }
+
+ if (rx_chk != _SUCCESS)
+ issue_probereq_ex(padapter, &pmlmeinfo->network.Ssid, psta->hwaddr, 3, 1);
+
+ if ((tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) || rx_chk != _SUCCESS) {
+ tx_chk = issue_nulldata(padapter, psta->hwaddr, 0, 3, 1);
+ /* if tx acked and p2p disabled, set rx_chk _SUCCESS to reset retry count */
+ if (tx_chk == _SUCCESS && !is_p2p_enable)
+ rx_chk = _SUCCESS;
+ }
+
+ /* back to the original operation channel */
+ if (backup_oper_channel > 0)
+ SelectChannel(padapter, backup_oper_channel);
+ } else {
+ if (rx_chk != _SUCCESS) {
+ if (pmlmeext->retry == 0) {
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ issue_probereq(padapter, &pmlmeinfo->network.Ssid, pmlmeinfo->network.MacAddress);
+ }
+ }
+
+ if (tx_chk != _SUCCESS && pmlmeinfo->link_count++ == 0xf) {
+ tx_chk = issue_nulldata(padapter, NULL, 0, 1, 0);
+ }
+ }
+
+ if (rx_chk == _FAIL) {
+ pmlmeext->retry++;
+ if (pmlmeext->retry > rx_chk_limit) {
+ DBG_88E_LEVEL(_drv_always_, FUNC_ADPT_FMT" disconnect or roaming\n",
+ FUNC_ADPT_ARG(padapter));
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress,
+ WLAN_REASON_EXPIRATION_CHK);
+ return;
+ }
+ } else {
+ pmlmeext->retry = 0;
+ }
+
+ if (tx_chk == _FAIL) {
+ pmlmeinfo->link_count &= 0xf;
+ } else {
+ pxmitpriv->last_tx_pkts = pxmitpriv->tx_pkts;
+ pmlmeinfo->link_count = 0;
+ }
+ } /* end of if ((psta = rtw_get_stainfo(pstapriv, passoc_res->network.MacAddress)) != NULL) */
+ } else if (is_client_associated_to_ibss(padapter)) {
+ /* linked IBSS mode */
+ /* for each assoc list entry to check the rx pkt counter */
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1) {
+ psta = pmlmeinfo->FW_sta_info[i].psta;
+
+ if (NULL == psta)
+ continue;
+ if (pmlmeinfo->FW_sta_info[i].rx_pkt == sta_rx_pkts(psta)) {
+ if (pmlmeinfo->FW_sta_info[i].retry < 3) {
+ pmlmeinfo->FW_sta_info[i].retry++;
+ } else {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].status = 0;
+ report_del_sta_event(padapter, psta->hwaddr
+ , 65535/* indicate disconnect caused by no rx */
+ );
+ }
+ } else {
+ pmlmeinfo->FW_sta_info[i].retry = 0;
+ pmlmeinfo->FW_sta_info[i].rx_pkt = (u32)sta_rx_pkts(psta);
+ }
+ }
+ }
+ }
+}
+
+void survey_timer_hdl(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct sitesurvey_parm *psurveyPara;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif
+
+ /* issue rtw_sitesurvey_cmd */
+ if (pmlmeext->sitesurvey_res.state > SCAN_START) {
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS)
+ pmlmeext->sitesurvey_res.channel_idx++;
+
+ if (pmlmeext->scan_abort) {
+ #ifdef CONFIG_88EU_P2P
+ if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE)) {
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
+ pmlmeext->sitesurvey_res.channel_idx = 3;
+ DBG_88E("%s idx:%d, cnt:%u\n", __func__
+ , pmlmeext->sitesurvey_res.channel_idx
+ , pwdinfo->find_phase_state_exchange_cnt
+ );
+ } else
+ #endif
+ {
+ pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
+ DBG_88E("%s idx:%d\n", __func__
+ , pmlmeext->sitesurvey_res.channel_idx
+ );
+ }
+
+ pmlmeext->scan_abort = false;/* reset */
+ }
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL)
+ goto exit_survey_timer_hdl;
+
+ psurveyPara = (struct sitesurvey_parm *)rtw_zmalloc(sizeof(struct sitesurvey_parm));
+ if (psurveyPara == NULL) {
+ kfree(ph2c);
+ goto exit_survey_timer_hdl;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey));
+ rtw_enqueue_cmd(pcmdpriv, ph2c);
+ }
+
+
+exit_survey_timer_hdl:
+ return;
+}
+
+void link_timer_hdl(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeinfo->state & WIFI_FW_AUTH_NULL) {
+ DBG_88E("link_timer_hdl:no beacon while connecting\n");
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -3);
+ } else if (pmlmeinfo->state & WIFI_FW_AUTH_STATE) {
+ /* re-auth timer */
+ if (++pmlmeinfo->reauth_count > REAUTH_LIMIT) {
+ pmlmeinfo->state = 0;
+ report_join_res(padapter, -1);
+ return;
+ }
+
+ DBG_88E("link_timer_hdl: auth timeout and try again\n");
+ pmlmeinfo->auth_seq = 1;
+ issue_auth(padapter, NULL, 0);
+ set_link_timer(pmlmeext, REAUTH_TO);
+ } else if (pmlmeinfo->state & WIFI_FW_ASSOC_STATE) {
+ /* re-assoc timer */
+ if (++pmlmeinfo->reassoc_count > REASSOC_LIMIT) {
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+ report_join_res(padapter, -2);
+ return;
+ }
+
+ DBG_88E("link_timer_hdl: assoc timeout and try again\n");
+ issue_assocreq(padapter);
+ set_link_timer(pmlmeext, REASSOC_TO);
+ }
+ return;
+}
+
+void addba_timer_hdl(struct sta_info *psta)
+{
+ struct ht_priv *phtpriv;
+
+ if (!psta)
+ return;
+
+ phtpriv = &psta->htpriv;
+
+ if ((phtpriv->ht_option) && (phtpriv->ampdu_enable)) {
+ if (phtpriv->candidate_tid_bitmap)
+ phtpriv->candidate_tid_bitmap = 0x0;
+ }
+}
+
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ return H2C_SUCCESS;
+}
+
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 type;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct setopmode_parm *psetop = (struct setopmode_parm *)pbuf;
+
+ if (psetop->mode == Ndis802_11APMode) {
+ pmlmeinfo->state = WIFI_FW_AP_STATE;
+ type = _HW_STATE_AP_;
+ } else if (psetop->mode == Ndis802_11Infrastructure) {
+ pmlmeinfo->state &= ~(BIT(0)|BIT(1));/* clear state */
+ pmlmeinfo->state |= WIFI_FW_STATION_STATE;/* set to STATION_STATE */
+ type = _HW_STATE_STATION_;
+ } else if (psetop->mode == Ndis802_11IBSS) {
+ type = _HW_STATE_ADHOC_;
+ } else {
+ type = _HW_STATE_NOLINK_;
+ }
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
+ /* Set_NETYPE0_MSR(padapter, type); */
+
+ return H2C_SUCCESS;
+}
+
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
+ /* u32 initialgain; */
+
+
+ if (pparm->network.InfrastructureMode == Ndis802_11APMode) {
+#ifdef CONFIG_88EU_AP_MODE
+
+ if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
+ /* todo: */
+ return H2C_SUCCESS;
+ }
+#endif
+ }
+
+ /* below is for ad-hoc master */
+ if (pparm->network.InfrastructureMode == Ndis802_11IBSS) {
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under linking, need to write the BB registers */
+ /* initialgain = 0x1E; */
+ /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
+
+ /* cancel link timer */
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ start_create_ibss(padapter);
+ }
+
+ return H2C_SUCCESS;
+}
+
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u8 join_type;
+ struct ndis_802_11_var_ie *pIE;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ struct joinbss_parm *pparm = (struct joinbss_parm *)pbuf;
+ u32 i;
+
+ /* check already connecting to AP or not */
+ if (pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) {
+ if (pmlmeinfo->state & WIFI_FW_STATION_STATE)
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, 5, 100);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* clear CAM */
+ flush_all_cam_entry(padapter);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ /* set MSR to nolink -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ }
+
+ rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
+
+ rtw_joinbss_reset(padapter);
+
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmlmeinfo->ERP_enable = 0;
+ pmlmeinfo->WMM_enable = 0;
+ pmlmeinfo->HT_enable = 0;
+ pmlmeinfo->HT_caps_enable = 0;
+ pmlmeinfo->HT_info_enable = 0;
+ pmlmeinfo->agg_enable_bitmap = 0;
+ pmlmeinfo->candidate_tid_bitmap = 0;
+ pmlmeinfo->bwmode_updated = false;
+
+ memcpy(pnetwork, pbuf, FIELD_OFFSET(struct wlan_bssid_ex, IELength));
+ pnetwork->IELength = ((struct wlan_bssid_ex *)pbuf)->IELength;
+
+ if (pnetwork->IELength > MAX_IE_SZ)/* Check pbuf->IELength */
+ return H2C_PARAMETERS_ERROR;
+
+ memcpy(pnetwork->IEs, ((struct wlan_bssid_ex *)pbuf)->IEs, pnetwork->IELength);
+
+ /* Check AP vendor to move rtw_joinbss_cmd() */
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pnetwork->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pnetwork->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:/* Get WMM IE. */
+ if (_rtw_memcmp(pIE->data, WMM_OUI, 4))
+ pmlmeinfo->WMM_enable = 1;
+ break;
+ case _HT_CAPABILITY_IE_: /* Get HT Cap IE. */
+ pmlmeinfo->HT_caps_enable = 1;
+ break;
+ case _HT_EXTRA_INFO_IE_: /* Get HT Info IE. */
+ pmlmeinfo->HT_info_enable = 1;
+
+ /* spec case only for cisco's ap because cisco's ap issue assoc rsp using mcs rate @40MHz or @20MHz */
+ {
+ struct HT_info_element *pht_info = (struct HT_info_element *)(pIE->data);
+
+ if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
+ /* switch to the 40M Hz mode according to the AP */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_40;
+ switch (pht_info->infos[0] & 0x3) {
+ case 1:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+
+ DBG_88E("set ch/bw before connected\n");
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ /* disable dynamic functions, such as high power, DIG */
+
+ /* config the initial gain under linking, need to write the BB registers */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
+ join_type = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+
+ /* cancel link timer */
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ start_clnt_join(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct disconnect_parm *param = (struct disconnect_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)(&(pmlmeinfo->network));
+ u8 val8;
+
+ if (is_client_associated_to_ap(padapter))
+ issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms/100, 100);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+
+ /* restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ /* Stop BCN */
+ val8 = 0;
+ rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
+ }
+
+
+ /* set MSR to no link state -> infra. mode */
+ Set_MSR(padapter, _HW_STATE_STATION_);
+
+ pmlmeinfo->state = WIFI_FW_NULL_STATE;
+
+ /* switch to the 20M Hz mode after disconnect */
+ pmlmeext->cur_bwmode = HT_CHANNEL_WIDTH_20;
+ pmlmeext->cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
+
+ flush_all_cam_entry(padapter);
+
+ _cancel_timer_ex(&pmlmeext->link_timer);
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ return H2C_SUCCESS;
+}
+
+static int rtw_scan_ch_decision(struct adapter *padapter, struct rtw_ieee80211_channel *out,
+ u32 out_num, struct rtw_ieee80211_channel *in, u32 in_num)
+{
+ int i, j;
+ int set_idx;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ /* clear out first */
+ _rtw_memset(out, 0, sizeof(struct rtw_ieee80211_channel)*out_num);
+
+ /* acquire channels from in */
+ j = 0;
+ for (i = 0; i < in_num; i++) {
+ set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, in[i].hw_value);
+ if (in[i].hw_value && !(in[i].flags & RTW_IEEE80211_CHAN_DISABLED) &&
+ set_idx >= 0) {
+ memcpy(&out[j], &in[i], sizeof(struct rtw_ieee80211_channel));
+
+ if (pmlmeext->channel_set[set_idx].ScanType == SCAN_PASSIVE)
+ out[j].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ if (j >= out_num)
+ break;
+ }
+
+ /* if out is empty, use channel_set as default */
+ if (j == 0) {
+ for (i = 0; i < pmlmeext->max_chan_nums; i++) {
+ out[i].hw_value = pmlmeext->channel_set[i].ChannelNum;
+
+ if (pmlmeext->channel_set[i].ScanType == SCAN_PASSIVE)
+ out[i].flags &= RTW_IEEE80211_CHAN_PASSIVE_SCAN;
+
+ j++;
+ }
+ }
+
+ return j;
+}
+
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct sitesurvey_parm *pparm = (struct sitesurvey_parm *)pbuf;
+ u8 bdelayscan = false;
+ u8 val8;
+ u32 initialgain;
+ u32 i;
+
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
+ /* for first time sitesurvey_cmd */
+ rtw_hal_set_hwreg(padapter, HW_VAR_CHECK_TXBUF, NULL);
+
+ pmlmeext->sitesurvey_res.state = SCAN_START;
+ pmlmeext->sitesurvey_res.bss_cnt = 0;
+ pmlmeext->sitesurvey_res.channel_idx = 0;
+
+ for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
+ if (pparm->ssid[i].SsidLength) {
+ memcpy(pmlmeext->sitesurvey_res.ssid[i].Ssid, pparm->ssid[i].Ssid, IW_ESSID_MAX_SIZE);
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = pparm->ssid[i].SsidLength;
+ } else {
+ pmlmeext->sitesurvey_res.ssid[i].SsidLength = 0;
+ }
+ }
+
+ pmlmeext->sitesurvey_res.ch_num = rtw_scan_ch_decision(padapter
+ , pmlmeext->sitesurvey_res.ch, RTW_CHANNEL_SCAN_AMOUNT
+ , pparm->ch, pparm->ch_num
+ );
+
+ pmlmeext->sitesurvey_res.scan_mode = pparm->scan_mode;
+
+ /* issue null data if associating to the AP */
+ if (is_client_associated_to_ap(padapter)) {
+ pmlmeext->sitesurvey_res.state = SCAN_TXNULL;
+
+ issue_nulldata(padapter, NULL, 1, 3, 500);
+
+ bdelayscan = true;
+ }
+ if (bdelayscan) {
+ /* delay 50ms to protect nulldata(1). */
+ set_survey_timer(pmlmeext, 50);
+ return H2C_SUCCESS;
+ }
+ }
+
+ if ((pmlmeext->sitesurvey_res.state == SCAN_START) || (pmlmeext->sitesurvey_res.state == SCAN_TXNULL)) {
+ /* disable dynamic functions, such as high power, DIG */
+ Save_DM_Func_Flag(padapter);
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* config the initial gain under scaning, need to write the BB registers */
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ initialgain = 0x1E;
+ else
+ initialgain = 0x28;
+#else /* CONFIG_88EU_P2P */
+ initialgain = 0x1E;
+#endif /* CONFIG_88EU_P2P */
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+
+ /* set MSR to no link state */
+ Set_MSR(padapter, _HW_STATE_NOLINK_);
+
+ val8 = 1; /* under site survey */
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+
+ pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
+ }
+
+ site_survey(padapter);
+
+ return H2C_SUCCESS;
+}
+
+u8 setauth_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct setauth_parm *pparm = (struct setauth_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pparm->mode < 4)
+ pmlmeinfo->auth_algo = pparm->mode;
+ return H2C_SUCCESS;
+}
+
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ unsigned short ctrl;
+ struct setkey_parm *pparm = (struct setkey_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ /* main tx key for wep. */
+ if (pparm->set_tx)
+ pmlmeinfo->key_index = pparm->keyid;
+
+ /* write cam */
+ ctrl = BIT(15) | ((pparm->algorithm) << 2) | pparm->keyid;
+
+ DBG_88E_LEVEL(_drv_info_, "set group key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) "
+ "keyid:%d\n", pparm->algorithm, pparm->keyid);
+ write_cam(padapter, pparm->keyid, ctrl, null_sta, pparm->key);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ u16 ctrl = 0;
+ u8 cam_id;/* cam_entry */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct set_stakey_parm *pparm = (struct set_stakey_parm *)pbuf;
+
+ /* cam_entry: */
+ /* 0~3 for default key */
+
+ /* for concurrent mode (ap+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 for sta mode (macid = 0) */
+ /* cam_entry(macid+3) = 5 ~ N for ap mode (aid = 1~N, macid = 2 ~N) */
+
+ /* for concurrent mode (sta+sta): */
+ /* default key is disable, using sw encrypt/decrypt */
+ /* cam_entry = 4 mapping to macid = 0 */
+ /* cam_entry = 5 mapping to macid = 2 */
+
+ cam_id = 4;
+
+ DBG_88E_LEVEL(_drv_info_, "set pairwise key to hw: alg:%d(WEP40-1 WEP104-5 TKIP-2 AES-4) camid:%d\n",
+ pparm->algorithm, cam_id);
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (pparm->algorithm == _NO_PRIVACY_) /* clear cam entry */ {
+ clear_cam_entry(padapter, pparm->id);
+ return H2C_SUCCESS_RSP;
+ }
+
+ psta = rtw_get_stainfo(pstapriv, pparm->addr);
+ if (psta) {
+ ctrl = (BIT(15) | ((pparm->algorithm) << 2));
+
+ DBG_88E("r871x_set_stakey_hdl(): enc_algorithm=%d\n", pparm->algorithm);
+
+ if ((psta->mac_id < 1) || (psta->mac_id > (NUM_STA-4))) {
+ DBG_88E("r871x_set_stakey_hdl():set_stakey failed, mac_id(aid)=%d\n", psta->mac_id);
+ return H2C_REJECTED;
+ }
+
+ cam_id = (psta->mac_id + 3);/* 0~3 for default key, cmd_id = macid + 3, macid = aid+1; */
+
+ DBG_88E("Write CAM, mac_addr =%x:%x:%x:%x:%x:%x, cam_entry=%d\n", pparm->addr[0],
+ pparm->addr[1], pparm->addr[2], pparm->addr[3], pparm->addr[4],
+ pparm->addr[5], cam_id);
+
+ write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+
+ return H2C_SUCCESS_RSP;
+ } else {
+ DBG_88E("r871x_set_stakey_hdl(): sta has been free\n");
+ return H2C_REJECTED;
+ }
+ }
+
+ /* below for sta mode */
+
+ if (pparm->algorithm == _NO_PRIVACY_) { /* clear cam entry */
+ clear_cam_entry(padapter, pparm->id);
+ return H2C_SUCCESS;
+ }
+ ctrl = BIT(15) | ((pparm->algorithm) << 2);
+ write_cam(padapter, cam_id, ctrl, pparm->addr, pparm->key);
+ pmlmeinfo->enc_algo = pparm->algorithm;
+ return H2C_SUCCESS;
+}
+
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct addBaReq_parm *pparm = (struct addBaReq_parm *)pbuf;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, pparm->addr);
+
+ if (!psta)
+ return H2C_SUCCESS;
+
+ if (((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && (pmlmeinfo->HT_enable)) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE)) {
+ issue_action_BA(padapter, pparm->addr, RTW_WLAN_ACTION_ADDBA_REQ, (u16)pparm->tid);
+ _set_timer(&psta->addba_retry_timer, ADDBA_TO);
+ } else {
+ psta->htpriv.candidate_tid_bitmap &= ~BIT(pparm->tid);
+ }
+ return H2C_SUCCESS;
+}
+
+u8 set_tx_beacon_cmd(struct adapter *padapter)
+{
+ struct cmd_obj *ph2c;
+ struct Tx_Beacon_param *ptxBeacon_parm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u8 res = _SUCCESS;
+ int len_diff = 0;
+
+_func_enter_;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ ptxBeacon_parm = (struct Tx_Beacon_param *)rtw_zmalloc(sizeof(struct Tx_Beacon_param));
+ if (ptxBeacon_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ memcpy(&(ptxBeacon_parm->network), &(pmlmeinfo->network), sizeof(struct wlan_bssid_ex));
+
+ len_diff = update_hidden_ssid(ptxBeacon_parm->network.IEs+_BEACON_IE_OFFSET_,
+ ptxBeacon_parm->network.IELength-_BEACON_IE_OFFSET_,
+ pmlmeinfo->hidden_ssid_mode);
+ ptxBeacon_parm->network.IELength += len_diff;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, ptxBeacon_parm, GEN_CMD_CODE(_TX_Beacon));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ u8 evt_code;
+ u16 evt_sz;
+ uint *peventbuf;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+ struct evt_priv *pevt_priv = &(padapter->evtpriv);
+
+ peventbuf = (uint *)pbuf;
+ evt_sz = (u16)(*peventbuf&0xffff);
+ evt_code = (u8)((*peventbuf>>16)&0xff);
+
+ /* checking if event code is valid */
+ if (evt_code >= MAX_C2HEVT) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("\nEvent Code(%d) mismatch!\n", evt_code));
+ goto _abort_event_;
+ }
+
+ /* checking if event size match the event parm size */
+ if ((wlanevents[evt_code].parmsize != 0) &&
+ (wlanevents[evt_code].parmsize != evt_sz)) {
+ RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_,
+ ("\nEvent(%d) Parm Size mismatch (%d vs %d)!\n",
+ evt_code, wlanevents[evt_code].parmsize, evt_sz));
+ goto _abort_event_;
+ }
+
+ ATOMIC_INC(&pevt_priv->event_seq);
+
+ peventbuf += 2;
+
+ if (peventbuf) {
+ event_callback = wlanevents[evt_code].event_callback;
+ event_callback(padapter, (u8 *)peventbuf);
+
+ pevt_priv->evt_done_cnt++;
+ }
+
+_abort_event_:
+ return H2C_SUCCESS;
+}
+
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ return H2C_SUCCESS;
+}
+
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (send_beacon(padapter) == _FAIL) {
+ DBG_88E("issue_beacon, fail!\n");
+ return H2C_PARAMETERS_ERROR;
+ }
+#ifdef CONFIG_88EU_AP_MODE
+ else { /* tx bc/mc frames after update TIM */
+ unsigned long irqL;
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return H2C_SUCCESS;
+
+ if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
+ rtw_msleep_os(10);/* 10ms, ATIM(HIQ) Windows */
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ pxmitframe->attrib.qsel = 0x11;/* HIQ */
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ }
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irqL);
+ }
+ }
+#endif
+ return H2C_SUCCESS;
+}
+
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf)
+{
+ struct set_ch_parm *set_ch_parm;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ set_ch_parm = (struct set_ch_parm *)pbuf;
+
+ DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
+ FUNC_NDEV_ARG(padapter->pnetdev),
+ set_ch_parm->ch, set_ch_parm->bw, set_ch_parm->ch_offset);
+
+ pmlmeext->cur_channel = set_ch_parm->ch;
+ pmlmeext->cur_ch_offset = set_ch_parm->ch_offset;
+ pmlmeext->cur_bwmode = set_ch_parm->bw;
+
+ set_channel_bwmode(padapter, set_ch_parm->ch, set_ch_parm->ch_offset, set_ch_parm->bw);
+
+ return H2C_SUCCESS;
+}
+
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ struct SetChannelPlan_param *setChannelPlan_param;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+
+ setChannelPlan_param = (struct SetChannelPlan_param *)pbuf;
+
+ pmlmeext->max_chan_nums = init_channel_set(padapter, setChannelPlan_param->channel_plan, pmlmeext->channel_set);
+ init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
+
+ return H2C_SUCCESS;
+}
+
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ if (!pbuf)
+ return H2C_PARAMETERS_ERROR;
+ return H2C_SUCCESS;
+}
+
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
+
+/* TDLS_WRCR : write RCR DATA BIT */
+/* TDLS_SD_PTI : issue peer traffic indication */
+/* TDLS_CS_OFF : go back to the channel linked with AP, terminating channel switch procedure */
+/* TDLS_INIT_CH_SEN : init channel sensing, receive all data and mgnt frame */
+/* TDLS_DONE_CH_SEN: channel sensing and report candidate channel */
+/* TDLS_OFF_CH : first time set channel to off channel */
+/* TDLS_BASE_CH : go back tp the channel linked with AP when set base channel as target channel */
+/* TDLS_P_OFF_CH : periodically go to off channel */
+/* TDLS_P_BASE_CH : periodically go back to base channel */
+/* TDLS_RS_RCR : restore RCR */
+/* TDLS_CKALV_PH1 : check alive timer phase1 */
+/* TDLS_CKALV_PH2 : check alive timer phase2 */
+/* TDLS_FREE_STA : free tdls sta */
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf)
+{
+ return H2C_REJECTED;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp.c b/drivers/staging/rtl8188eu/core/rtw_mp.c
new file mode 100644
index 00000000000..c7ff2e4d1f2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mp.c
@@ -0,0 +1,997 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ *published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MP_C_
+
+#include <drv_types.h>
+
+#include "odm_precomp.h"
+#include "rtl8188e_hal.h"
+
+u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz)
+{
+ u32 val = 0;
+
+ switch (sz) {
+ case 1:
+ val = rtw_read8(padapter, addr);
+ break;
+ case 2:
+ val = rtw_read16(padapter, addr);
+ break;
+ case 4:
+ val = rtw_read32(padapter, addr);
+ break;
+ default:
+ val = 0xffffffff;
+ break;
+ }
+
+ return val;
+}
+
+void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz)
+{
+ switch (sz) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)val);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)val);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, val);
+ break;
+ default:
+ break;
+ }
+}
+
+u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask)
+{
+ return rtw_hal_read_bbreg(padapter, addr, bitmask);
+}
+
+void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val)
+{
+ rtw_hal_write_bbreg(padapter, addr, bitmask, val);
+}
+
+u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask)
+{
+ return rtw_hal_read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask);
+}
+
+void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val)
+{
+ rtw_hal_write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask, val);
+}
+
+u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr)
+{
+ return _read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask);
+}
+
+void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val)
+{
+ _write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask, val);
+}
+
+static void _init_mp_priv_(struct mp_priv *pmp_priv)
+{
+ struct wlan_bssid_ex *pnetwork;
+
+ _rtw_memset(pmp_priv, 0, sizeof(struct mp_priv));
+
+ pmp_priv->mode = MP_OFF;
+
+ pmp_priv->channel = 1;
+ pmp_priv->bandwidth = HT_CHANNEL_WIDTH_20;
+ pmp_priv->prime_channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ pmp_priv->rateidx = MPT_RATE_1M;
+ pmp_priv->txpoweridx = 0x2A;
+
+ pmp_priv->antenna_tx = ANTENNA_A;
+ pmp_priv->antenna_rx = ANTENNA_AB;
+
+ pmp_priv->check_mp_pkt = 0;
+
+ pmp_priv->tx_pktcount = 0;
+
+ pmp_priv->rx_pktcount = 0;
+ pmp_priv->rx_crcerrpktcount = 0;
+
+ pmp_priv->network_macaddr[0] = 0x00;
+ pmp_priv->network_macaddr[1] = 0xE0;
+ pmp_priv->network_macaddr[2] = 0x4C;
+ pmp_priv->network_macaddr[3] = 0x87;
+ pmp_priv->network_macaddr[4] = 0x66;
+ pmp_priv->network_macaddr[5] = 0x55;
+
+ pnetwork = &pmp_priv->mp_network.network;
+ memcpy(pnetwork->MacAddress, pmp_priv->network_macaddr, ETH_ALEN);
+
+ pnetwork->Ssid.SsidLength = 8;
+ memcpy(pnetwork->Ssid.Ssid, "mp_871x", pnetwork->Ssid.SsidLength);
+}
+
+static void mp_init_xmit_attrib(struct mp_tx *pmptx, struct adapter *padapter)
+{
+ struct pkt_attrib *pattrib;
+ struct tx_desc *desc;
+
+ /* init xmitframe attribute */
+ pattrib = &pmptx->attrib;
+ _rtw_memset(pattrib, 0, sizeof(struct pkt_attrib));
+ desc = &pmptx->desc;
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+
+ pattrib->ether_type = 0x8712;
+ _rtw_memset(pattrib->dst, 0xFF, ETH_ALEN);
+ pattrib->ack_policy = 0;
+ pattrib->hdrlen = WLAN_HDR_A3_LEN;
+ pattrib->subtype = WIFI_DATA;
+ pattrib->priority = 0;
+ pattrib->qsel = pattrib->priority;
+ pattrib->nr_frags = 1;
+ pattrib->encrypt = 0;
+ pattrib->bswenc = false;
+ pattrib->qos_en = false;
+}
+
+s32 init_mp_priv(struct adapter *padapter)
+{
+ struct mp_priv *pmppriv = &padapter->mppriv;
+
+ _init_mp_priv_(pmppriv);
+ pmppriv->papdater = padapter;
+
+ pmppriv->tx.stop = 1;
+ mp_init_xmit_attrib(&pmppriv->tx, padapter);
+
+ switch (padapter->registrypriv.rf_config) {
+ case RF_1T1R:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_A;
+ break;
+ case RF_1T2R:
+ default:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T4R:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_ABCD;
+ break;
+ }
+
+ return _SUCCESS;
+}
+
+void free_mp_priv(struct mp_priv *pmp_priv)
+{
+ kfree(pmp_priv->pallocated_mp_xmitframe_buf);
+ pmp_priv->pallocated_mp_xmitframe_buf = NULL;
+ pmp_priv->pmp_xmtframe_buf = NULL;
+}
+
+#define PHY_IQCalibrate(a, b) PHY_IQCalibrate_8188E(a, b)
+#define PHY_LCCalibrate(a) PHY_LCCalibrate_8188E(a)
+#define PHY_SetRFPathSwitch(a, b) PHY_SetRFPathSwitch_8188E(a, b)
+
+s32 MPT_InitializeAdapter(struct adapter *pAdapter, u8 Channel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ s32 rtStatus = _SUCCESS;
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+ struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
+
+ /* HW Initialization for 8190 MPT. */
+ /* SW Initialization for 8190 MP. */
+ pMptCtx->bMptDrvUnload = false;
+ pMptCtx->bMassProdTest = false;
+ pMptCtx->bMptIndexEven = true; /* default gain index is -6.0db */
+ pMptCtx->h2cReqNum = 0x0;
+ /* Init mpt event. */
+ /* init for BT MP */
+
+ pMptCtx->bMptWorkItemInProgress = false;
+ pMptCtx->CurrMptAct = NULL;
+ /* */
+
+ /* Don't accept any packets */
+ rtw_write32(pAdapter, REG_RCR, 0);
+
+ PHY_IQCalibrate(pAdapter, false);
+ dm_CheckTXPowerTracking(&pHalData->odmpriv); /* trigger thermal meter */
+ PHY_LCCalibrate(pAdapter);
+
+ pMptCtx->backup0xc50 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0);
+ pMptCtx->backup0xc58 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0);
+ pMptCtx->backup0xc30 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_RxDetector1, bMaskByte0);
+ pMptCtx->backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+ pMptCtx->backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+
+ /* set ant to wifi side in mp mode */
+ rtw_write16(pAdapter, 0x870, 0x300);
+ rtw_write16(pAdapter, 0x860, 0x110);
+
+ if (pAdapter->registrypriv.mp_mode == 1)
+ pmlmepriv->fw_state = WIFI_MP_STATE;
+
+ return rtStatus;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: MPT_DeInitAdapter()
+ *
+ * Overview: Extra DeInitialization for Mass Production Test.
+ *
+ * Input: struct adapter * pAdapter
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 05/08/2007 MHC Create Version 0.
+ * 05/18/2007 MHC Add normal driver MPHalt code.
+ *
+ *---------------------------------------------------------------------------*/
+void MPT_DeInitAdapter(struct adapter *pAdapter)
+{
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+
+ pMptCtx->bMptDrvUnload = true;
+}
+
+static u8 mpt_ProStartTest(struct adapter *padapter)
+{
+ struct mpt_context *pMptCtx = &padapter->mppriv.MptCtx;
+
+ pMptCtx->bMassProdTest = true;
+ pMptCtx->bStartContTx = false;
+ pMptCtx->bCckContTx = false;
+ pMptCtx->bOfdmContTx = false;
+ pMptCtx->bSingleCarrier = false;
+ pMptCtx->bCarrierSuppression = false;
+ pMptCtx->bSingleTone = false;
+
+ return _SUCCESS;
+}
+
+/*
+ * General use
+ */
+s32 SetPowerTracking(struct adapter *padapter, u8 enable)
+{
+ Hal_SetPowerTracking(padapter, enable);
+ return 0;
+}
+
+void GetPowerTracking(struct adapter *padapter, u8 *enable)
+{
+ Hal_GetPowerTracking(padapter, enable);
+}
+
+static void disable_dm(struct adapter *padapter)
+{
+ u8 v8;
+
+ /* 3 1. disable firmware dynamic mechanism */
+ /* disable Power Training, Rate Adaptive */
+ v8 = rtw_read8(padapter, REG_BCN_CTRL);
+ v8 &= ~EN_BCN_FUNCTION;
+ rtw_write8(padapter, REG_BCN_CTRL, v8);
+
+ /* 3 2. disable driver dynamic mechanism */
+ /* disable Dynamic Initial Gain */
+ /* disable High Power */
+ /* disable Power Tracking */
+ Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
+
+ /* enable APK, LCK and IQK but disable power tracking */
+ Switch_DM_Func(padapter, DYNAMIC_RF_CALIBRATION, true);
+}
+
+/* This function initializes the DUT to the MP test mode */
+s32 mp_start_test(struct adapter *padapter)
+{
+ struct wlan_bssid_ex bssid;
+ struct sta_info *psta;
+ u32 length;
+ u8 val8;
+
+ unsigned long irqL;
+ s32 res = _SUCCESS;
+
+ struct mp_priv *pmppriv = &padapter->mppriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+
+ padapter->registrypriv.mp_mode = 1;
+ pmppriv->bSetTxPower = 0; /* for manually set tx power */
+
+ /* 3 disable dynamic mechanism */
+ disable_dm(padapter);
+
+ /* 3 0. update mp_priv */
+
+ if (padapter->registrypriv.rf_config == RF_819X_MAX_TYPE) {
+ switch (GET_RF_TYPE(padapter)) {
+ case RF_1T1R:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_A;
+ break;
+ case RF_1T2R:
+ default:
+ pmppriv->antenna_tx = ANTENNA_A;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T2R:
+ case RF_2T2R_GREEN:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_AB;
+ break;
+ case RF_2T4R:
+ pmppriv->antenna_tx = ANTENNA_AB;
+ pmppriv->antenna_rx = ANTENNA_ABCD;
+ break;
+ }
+ }
+
+ mpt_ProStartTest(padapter);
+
+ /* 3 1. initialize a new struct wlan_bssid_ex */
+/* _rtw_memset(&bssid, 0, sizeof(struct wlan_bssid_ex)); */
+ memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
+ bssid.Ssid.SsidLength = strlen("mp_pseudo_adhoc");
+ memcpy(bssid.Ssid.Ssid, (u8 *)"mp_pseudo_adhoc", bssid.Ssid.SsidLength);
+ bssid.InfrastructureMode = Ndis802_11IBSS;
+ bssid.NetworkTypeInUse = Ndis802_11DS;
+ bssid.IELength = 0;
+
+ length = get_wlan_bssid_ex_sz(&bssid);
+ if (length % 4)
+ bssid.Length = ((length >> 2) + 1) << 2; /* round up to multiple of 4 bytes. */
+ else
+ bssid.Length = length;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
+ goto end_of_mp_start_test;
+
+ /* init mp_start_test status */
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ rtw_disassoc_cmd(padapter, 500, true);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+ pmppriv->prev_fw_state = get_fwstate(pmlmepriv);
+ if (padapter->registrypriv.mp_mode == 1)
+ pmlmepriv->fw_state = WIFI_MP_STATE;
+ set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
+
+ /* 3 2. create a new psta for mp driver */
+ /* clear psta in the cur_network, if any */
+ psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
+ if (psta)
+ rtw_free_stainfo(padapter, psta);
+
+ psta = rtw_alloc_stainfo(&padapter->stapriv, bssid.MacAddress);
+ if (psta == NULL) {
+ RT_TRACE(_module_mp_, _drv_err_, ("mp_start_test: Can't alloc sta_info!\n"));
+ pmlmepriv->fw_state = pmppriv->prev_fw_state;
+ res = _FAIL;
+ goto end_of_mp_start_test;
+ }
+
+ /* 3 3. join psudo AdHoc */
+ tgt_network->join_res = 1;
+ tgt_network->aid = 1;
+ psta->aid = 1;
+ memcpy(&tgt_network->network, &bssid, length);
+
+ rtw_indicate_connect(padapter);
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
+
+end_of_mp_start_test:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (res == _SUCCESS) {
+ /* set MSR to WIFI_FW_ADHOC_STATE */
+ val8 = rtw_read8(padapter, MSR) & 0xFC; /* 0x0102 */
+ val8 |= WIFI_FW_ADHOC_STATE;
+ rtw_write8(padapter, MSR, val8); /* Link in ad hoc network */
+ }
+ return res;
+}
+/* */
+/* This function change the DUT from the MP test mode into normal mode */
+void mp_stop_test(struct adapter *padapter)
+{
+ struct mp_priv *pmppriv = &padapter->mppriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
+ struct sta_info *psta;
+
+ unsigned long irqL;
+
+ if (pmppriv->mode == MP_ON) {
+ pmppriv->bSetTxPower = 0;
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == false)
+ goto end_of_mp_stop_test;
+
+ /* 3 1. disconnect psudo AdHoc */
+ rtw_indicate_disconnect(padapter);
+
+ /* 3 2. clear psta used in mp test mode. */
+ psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
+ if (psta)
+ rtw_free_stainfo(padapter, psta);
+
+ /* 3 3. return to normal state (default:station mode) */
+ pmlmepriv->fw_state = pmppriv->prev_fw_state; /* WIFI_STATION_STATE; */
+
+ /* flush the cur_network */
+ _rtw_memset(tgt_network, 0, sizeof(struct wlan_network));
+
+ _clr_fwstate_(pmlmepriv, WIFI_MP_STATE);
+
+end_of_mp_stop_test:
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ }
+}
+
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+/*
+ * SetChannel
+ * Description
+ * Use H2C command to change channel,
+ * not only modify rf register, but also other setting need to be done.
+ */
+void SetChannel(struct adapter *pAdapter)
+{
+ Hal_SetChannel(pAdapter);
+}
+
+/*
+ * Notice
+ * Switch bandwitdth may change center frequency(channel)
+ */
+void SetBandwidth(struct adapter *pAdapter)
+{
+ Hal_SetBandwidth(pAdapter);
+}
+
+void SetAntenna(struct adapter *pAdapter)
+{
+ Hal_SetAntenna(pAdapter);
+}
+
+void SetAntennaPathPower(struct adapter *pAdapter)
+{
+ Hal_SetAntennaPathPower(pAdapter);
+}
+
+void SetTxPower(struct adapter *pAdapter)
+{
+ Hal_SetTxPower(pAdapter);
+ }
+
+void SetDataRate(struct adapter *pAdapter)
+{
+ Hal_SetDataRate(pAdapter);
+}
+
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter , bool bMain)
+{
+ PHY_SetRFPathSwitch(pAdapter, bMain);
+}
+
+s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
+{
+ return Hal_SetThermalMeter(pAdapter, target_ther);
+}
+
+void GetThermalMeter(struct adapter *pAdapter, u8 *value)
+{
+ Hal_GetThermalMeter(pAdapter, value);
+}
+
+void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetSingleCarrierTx(pAdapter, bStart);
+}
+
+void SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetSingleToneTx(pAdapter, bStart);
+}
+
+void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetCarrierSuppressionTx(pAdapter, bStart);
+}
+
+void SetContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ PhySetTxPowerLevel(pAdapter);
+ Hal_SetContinuousTx(pAdapter, bStart);
+}
+
+
+void PhySetTxPowerLevel(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp_priv = &pAdapter->mppriv;
+
+ if (pmp_priv->bSetTxPower == 0) /* for NO manually set power index */
+ PHY_SetTxPowerLevel8188E(pAdapter, pmp_priv->channel);
+}
+
+/* */
+static void dump_mpframe(struct adapter *padapter, struct xmit_frame *pmpframe)
+{
+ rtw_hal_mgnt_xmit(padapter, pmpframe);
+}
+
+static struct xmit_frame *alloc_mp_xmitframe(struct xmit_priv *pxmitpriv)
+{
+ struct xmit_frame *pmpframe;
+ struct xmit_buf *pxmitbuf;
+
+ pmpframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pmpframe == NULL)
+ return NULL;
+
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL) {
+ rtw_free_xmitframe(pxmitpriv, pmpframe);
+ return NULL;
+ }
+
+ pmpframe->frame_tag = MP_FRAMETAG;
+
+ pmpframe->pxmitbuf = pxmitbuf;
+
+ pmpframe->buf_addr = pxmitbuf->pbuf;
+
+ pxmitbuf->priv_data = pmpframe;
+
+ return pmpframe;
+}
+
+static int mp_xmit_packet_thread(void *context)
+{
+ struct xmit_frame *pxmitframe;
+ struct mp_tx *pmptx;
+ struct mp_priv *pmp_priv;
+ struct xmit_priv *pxmitpriv;
+ struct adapter *padapter;
+
+ pmp_priv = (struct mp_priv *)context;
+ pmptx = &pmp_priv->tx;
+ padapter = pmp_priv->papdater;
+ pxmitpriv = &(padapter->xmitpriv);
+
+ thread_enter("RTW_MP_THREAD");
+
+ /* DBG_88E("%s:pkTx Start\n", __func__); */
+ while (1) {
+ pxmitframe = alloc_mp_xmitframe(pxmitpriv);
+ if (pxmitframe == NULL) {
+ if (pmptx->stop ||
+ padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped) {
+ goto exit;
+ } else {
+ rtw_msleep_os(1);
+ continue;
+ }
+ }
+
+ memcpy((u8 *)(pxmitframe->buf_addr+TXDESC_OFFSET), pmptx->buf, pmptx->write_size);
+ memcpy(&(pxmitframe->attrib), &(pmptx->attrib), sizeof(struct pkt_attrib));
+
+ dump_mpframe(padapter, pxmitframe);
+
+ pmptx->sended++;
+ pmp_priv->tx_pktcount++;
+
+ if (pmptx->stop ||
+ padapter->bSurpriseRemoved ||
+ padapter->bDriverStopped)
+ goto exit;
+ if ((pmptx->count != 0) &&
+ (pmptx->count == pmptx->sended))
+ goto exit;
+
+ flush_signals_thread();
+ }
+
+exit:
+ kfree(pmptx->pallocated_buf);
+ pmptx->pallocated_buf = NULL;
+ pmptx->stop = 1;
+
+ thread_exit();
+}
+
+void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc)
+{
+ struct mp_priv *pmp_priv = &padapter->mppriv;
+ memcpy(ptxdesc, &(pmp_priv->tx.desc), TXDESC_SIZE);
+}
+
+void SetPacketTx(struct adapter *padapter)
+{
+ u8 *ptr, *pkt_start, *pkt_end;
+ u32 pkt_size;
+ struct tx_desc *desc;
+ struct rtw_ieee80211_hdr *hdr;
+ u8 payload;
+ s32 bmcast;
+ struct pkt_attrib *pattrib;
+ struct mp_priv *pmp_priv;
+
+
+ pmp_priv = &padapter->mppriv;
+ if (pmp_priv->tx.stop)
+ return;
+ pmp_priv->tx.sended = 0;
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx_pktcount = 0;
+
+ /* 3 1. update_attrib() */
+ pattrib = &pmp_priv->tx.attrib;
+ memcpy(pattrib->src, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ bmcast = IS_MCAST(pattrib->ra);
+ if (bmcast) {
+ pattrib->mac_id = 1;
+ pattrib->psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ pattrib->mac_id = 0;
+ pattrib->psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
+ }
+
+ pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->pktlen;
+
+ /* 3 2. allocate xmit buffer */
+ pkt_size = pattrib->last_txcmdsz;
+
+ kfree(pmp_priv->tx.pallocated_buf);
+ pmp_priv->tx.write_size = pkt_size;
+ pmp_priv->tx.buf_size = pkt_size + XMITBUF_ALIGN_SZ;
+ pmp_priv->tx.pallocated_buf = rtw_zmalloc(pmp_priv->tx.buf_size);
+ if (pmp_priv->tx.pallocated_buf == NULL) {
+ DBG_88E("%s: malloc(%d) fail!!\n", __func__, pmp_priv->tx.buf_size);
+ return;
+ }
+ pmp_priv->tx.buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pmp_priv->tx.pallocated_buf), XMITBUF_ALIGN_SZ);
+ ptr = pmp_priv->tx.buf;
+
+ desc = &(pmp_priv->tx.desc);
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+ pkt_start = ptr;
+ pkt_end = pkt_start + pkt_size;
+
+ /* 3 3. init TX descriptor */
+ /* offset 0 */
+ desc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+ desc->txdw0 |= cpu_to_le32(pkt_size & 0x0000FFFF); /* packet size */
+ desc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00FF0000); /* 32 bytes for TX Desc */
+ if (bmcast)
+ desc->txdw0 |= cpu_to_le32(BMC); /* broadcast packet */
+
+ desc->txdw1 |= cpu_to_le32((0x01 << 26) & 0xff000000);
+ /* offset 4 */
+ desc->txdw1 |= cpu_to_le32((pattrib->mac_id) & 0x3F); /* CAM_ID(MAC_ID) */
+ desc->txdw1 |= cpu_to_le32((pattrib->qsel << QSEL_SHT) & 0x00001F00); /* Queue Select, TID */
+
+ desc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000); /* Rate Adaptive ID */
+ /* offset 8 */
+ /* offset 12 */
+
+ desc->txdw3 |= cpu_to_le32((pattrib->seqnum<<16)&0x0fff0000);
+
+ /* offset 16 */
+ desc->txdw4 |= cpu_to_le32(HW_SSN);
+ desc->txdw4 |= cpu_to_le32(USERATE);
+ desc->txdw4 |= cpu_to_le32(DISDATAFB);
+
+ if (pmp_priv->preamble) {
+ if (pmp_priv->rateidx <= MPT_RATE_54M)
+ desc->txdw4 |= cpu_to_le32(DATA_SHORT); /* CCK Short Preamble */
+ }
+ if (pmp_priv->bandwidth == HT_CHANNEL_WIDTH_40)
+ desc->txdw4 |= cpu_to_le32(DATA_BW);
+
+ /* offset 20 */
+ desc->txdw5 |= cpu_to_le32(pmp_priv->rateidx & 0x0000001F);
+
+ if (pmp_priv->preamble) {
+ if (pmp_priv->rateidx > MPT_RATE_54M)
+ desc->txdw5 |= cpu_to_le32(SGI); /* MCS Short Guard Interval */
+ }
+ desc->txdw5 |= cpu_to_le32(RTY_LMT_EN); /* retry limit enable */
+ desc->txdw5 |= cpu_to_le32(0x00180000); /* DATA/RTS Rate Fallback Limit */
+
+ /* 3 4. make wlan header, make_wlanhdr() */
+ hdr = (struct rtw_ieee80211_hdr *)pkt_start;
+ SetFrameSubType(&hdr->frame_ctl, pattrib->subtype);
+ memcpy(hdr->addr1, pattrib->dst, ETH_ALEN); /* DA */
+ memcpy(hdr->addr2, pattrib->src, ETH_ALEN); /* SA */
+ memcpy(hdr->addr3, get_bssid(&padapter->mlmepriv), ETH_ALEN); /* RA, BSSID */
+
+ /* 3 5. make payload */
+ ptr = pkt_start + pattrib->hdrlen;
+
+ switch (pmp_priv->tx.payload) {
+ case 0:
+ payload = 0x00;
+ break;
+ case 1:
+ payload = 0x5a;
+ break;
+ case 2:
+ payload = 0xa5;
+ break;
+ case 3:
+ payload = 0xff;
+ break;
+ default:
+ payload = 0x00;
+ break;
+ }
+
+ _rtw_memset(ptr, payload, pkt_end - ptr);
+
+ /* 3 6. start thread */
+ pmp_priv->tx.PktTxThread = kthread_run(mp_xmit_packet_thread, pmp_priv, "RTW_MP_THREAD");
+ if (IS_ERR(pmp_priv->tx.PktTxThread))
+ DBG_88E("Create PktTx Thread Fail !!!!!\n");
+}
+
+void SetPacketRx(struct adapter *pAdapter, u8 bStartRx)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (bStartRx) {
+ /* Accept CRC error and destination address */
+ pHalData->ReceiveConfig = AAP | APM | AM | AB | APP_ICV | ADF | AMF | HTC_LOC_CTRL | APP_MIC | APP_PHYSTS;
+
+ pHalData->ReceiveConfig |= ACRC32;
+
+ rtw_write32(pAdapter, REG_RCR, pHalData->ReceiveConfig);
+
+ /* Accept all data frames */
+ rtw_write16(pAdapter, REG_RXFLTMAP2, 0xFFFF);
+ } else {
+ rtw_write32(pAdapter, REG_RCR, 0);
+ }
+}
+
+void ResetPhyRxPktCount(struct adapter *pAdapter)
+{
+ u32 i, phyrx_set = 0;
+
+ for (i = 0; i <= 0xF; i++) {
+ phyrx_set = 0;
+ phyrx_set |= _RXERR_RPT_SEL(i); /* select */
+ phyrx_set |= RXERR_RPT_RST; /* set counter to zero */
+ rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
+ }
+}
+
+static u32 GetPhyRxPktCounts(struct adapter *pAdapter, u32 selbit)
+{
+ /* selection */
+ u32 phyrx_set = 0, count = 0;
+
+ phyrx_set = _RXERR_RPT_SEL(selbit & 0xF);
+ rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
+
+ /* Read packet count */
+ count = rtw_read32(pAdapter, REG_RXERR_RPT) & RXERR_COUNTER_MASK;
+
+ return count;
+}
+
+u32 GetPhyRxPktReceived(struct adapter *pAdapter)
+{
+ u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
+
+ OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_OK);
+ CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_OK);
+ HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_OK);
+
+ return OFDM_cnt + CCK_cnt + HT_cnt;
+}
+
+u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter)
+{
+ u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
+
+ OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_FAIL);
+ CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_FAIL);
+ HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_FAIL);
+
+ return OFDM_cnt + CCK_cnt + HT_cnt;
+}
+
+/* reg 0x808[9:0]: FFT data x */
+/* reg 0x808[22]: 0 --> 1 to get 1 FFT data y */
+/* reg 0x8B4[15:0]: FFT data y report */
+static u32 rtw_GetPSDData(struct adapter *pAdapter, u32 point)
+{
+ int psd_val;
+
+
+ psd_val = rtw_read32(pAdapter, 0x808);
+ psd_val &= 0xFFBFFC00;
+ psd_val |= point;
+
+ rtw_write32(pAdapter, 0x808, psd_val);
+ rtw_mdelay_os(1);
+ psd_val |= 0x00400000;
+
+ rtw_write32(pAdapter, 0x808, psd_val);
+ rtw_mdelay_os(1);
+ psd_val = rtw_read32(pAdapter, 0x8B4);
+
+ psd_val &= 0x0000FFFF;
+
+ return psd_val;
+}
+
+/*
+ *pts start_point_min stop_point_max
+ * 128 64 64 + 128 = 192
+ * 256 128 128 + 256 = 384
+ * 512 256 256 + 512 = 768
+ * 1024 512 512 + 1024 = 1536
+ *
+ */
+u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
+{
+ u32 i, psd_pts = 0, psd_start = 0, psd_stop = 0;
+ u32 psd_data = 0;
+
+
+ if (!netif_running(pAdapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! interface not opened!\n"));
+ return 0;
+ }
+
+ if (check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE) == false) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("mp_query_psd: Fail! not in MP mode!\n"));
+ return 0;
+ }
+
+ if (strlen(data) == 0) { /* default value */
+ psd_pts = 128;
+ psd_start = 64;
+ psd_stop = 128;
+ } else {
+ sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
+ }
+
+ _rtw_memset(data, '\0', sizeof(data));
+
+ i = psd_start;
+ while (i < psd_stop) {
+ if (i >= psd_pts) {
+ psd_data = rtw_GetPSDData(pAdapter, i-psd_pts);
+ } else {
+ psd_data = rtw_GetPSDData(pAdapter, i);
+ }
+ sprintf(data, "%s%x ", data, psd_data);
+ i++;
+ }
+
+ rtw_msleep_os(100);
+ return strlen(data)+1;
+}
+
+void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
+{
+ int i, res;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ if (padapter->registrypriv.mp_mode == 0) {
+ max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ } else {
+ max_xmit_extbuf_size = 20000;
+ num_xmit_extbuf = 1;
+ }
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_xmit_extbuf)
+ rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (padapter->registrypriv.mp_mode == 0) {
+ max_xmit_extbuf_size = 20000;
+ num_xmit_extbuf = 1;
+ } else {
+ max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ num_xmit_extbuf = NR_XMIT_EXTBUFF;
+ }
+
+ /* Init xmit extension buff */
+ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+
+ pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = true;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
+
+exit:
+ ;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
new file mode 100644
index 00000000000..f06312c4158
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_mp_ioctl.c
@@ -0,0 +1,1508 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_MP_IOCTL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <mlme_osdep.h>
+
+/* include <rtw_mp.h> */
+#include <rtw_mp_ioctl.h>
+
+
+/* rtl8188eu_oid_rtl_seg_81_85 section start **************** */
+int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ Adapter->registrypriv.wireless_mode = *(u8 *)poid_par_priv->information_buf;
+ } else if (poid_par_priv->type_of_oid == QUERY_OID) {
+ *(u8 *)poid_par_priv->information_buf = Adapter->registrypriv.wireless_mode;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_info_, ("-query Wireless Mode=%d\n", Adapter->registrypriv.wireless_mode));
+ } else {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_87_80 section start **************** */
+int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct bb_reg_param *pbbreg;
+ u16 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_bb_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
+
+ offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
+ if (offset < BB_REG_BASE_ADDR)
+ offset |= BB_REG_BASE_ADDR;
+
+ value = pbbreg->value;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_write_bb_reg_hdl: offset=0x%03X value=0x%08X\n",
+ offset, value));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ write_bbreg(Adapter, offset, 0xFFFFFFFF, value);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct bb_reg_param *pbbreg;
+ u16 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_bb_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
+
+ offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
+ if (offset < BB_REG_BASE_ADDR)
+ offset |= BB_REG_BASE_ADDR;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ value = read_bbreg(Adapter, offset, 0xFFFFFFFF);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ pbbreg->value = value;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_pro_read_bb_reg_hdl: offset=0x%03X value:0x%08X\n",
+ offset, value));
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct rf_reg_param *pbbreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_write_rf_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
+
+ if (pbbreg->path >= MAX_RF_PATH_NUMS)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->offset > 0xFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->value > 0xFFFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ path = (u8)pbbreg->path;
+ offset = (u8)pbbreg->offset;
+ value = pbbreg->value;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_write_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
+ path, offset, value));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ write_rfreg(Adapter, path, offset, value);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct rf_reg_param *pbbreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_read_rf_reg_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
+
+ if (pbbreg->path >= MAX_RF_PATH_NUMS)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ if (pbbreg->offset > 0xFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ path = (u8)pbbreg->path;
+ offset = (u8)pbbreg->offset;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ value = read_rfreg(Adapter, path, offset);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ pbbreg->value = value;
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_pro_read_rf_reg_hdl: path=%d offset=0x%02X value=0x%05X\n",
+ path, offset, value));
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_87_00 section end**************** */
+/* */
+
+/* rtl8188eu_oid_rtl_seg_81_80_00 section start **************** */
+/* */
+int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 ratevalue;/* 4 */
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_set_data_rate_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ ratevalue = *((u32 *)poid_par_priv->information_buf);/* 4 */
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_data_rate_hdl: data rate idx=%d\n", ratevalue));
+ if (ratevalue >= MPT_RATE_LAST)
+ return NDIS_STATUS_INVALID_DATA;
+
+ Adapter->mppriv.rateidx = ratevalue;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetDataRate(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 mode;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_start_test_hdl\n"));
+
+ if (Adapter->registrypriv.mp_mode == 0)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ /* IQCalibrateBcut(Adapter); */
+
+ mode = *((u32 *)poid_par_priv->information_buf);
+ Adapter->mppriv.mode = mode;/* 1 for loopback */
+
+ if (mp_start_test(Adapter) == _FAIL) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ goto exit;
+ }
+
+exit:
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_pro_start_test_hdl: mp_mode=%d\n", Adapter->mppriv.mode));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+Set OID_RT_PRO_STOP_TEST\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ mp_stop_test(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-Set OID_RT_PRO_STOP_TEST\n"));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 Channel;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl\n"));
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ *((u32 *)poid_par_priv->information_buf) = Adapter->mppriv.channel;
+ return NDIS_STATUS_SUCCESS;
+ }
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ Channel = *((u32 *)poid_par_priv->information_buf);
+ RT_TRACE(_module_mp_, _drv_notice_, ("rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl: Channel=%d\n", Channel));
+ if (Channel > 14)
+ return NDIS_STATUS_NOT_ACCEPTED;
+ Adapter->mppriv.channel = Channel;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetChannel(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u16 bandwidth;
+ u16 channel_offset;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_set_bandwidth_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ bandwidth = *((u32 *)poid_par_priv->information_buf);/* 4 */
+ channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+
+ if (bandwidth != HT_CHANNEL_WIDTH_40)
+ bandwidth = HT_CHANNEL_WIDTH_20;
+ padapter->mppriv.bandwidth = (u8)bandwidth;
+ padapter->mppriv.prime_channel_offset = (u8)channel_offset;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetBandwidth(padapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-rtl8188eu_oid_rt_set_bandwidth_hdl: bandwidth=%d channel_offset=%d\n",
+ bandwidth, channel_offset));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 antenna;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_antenna_bb_hdl\n"));
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ antenna = *(u32 *)poid_par_priv->information_buf;
+
+ Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16);
+ Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF);
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_antenna_bb_hdl: tx_ant=0x%04x rx_ant=0x%04x\n",
+ Adapter->mppriv.antenna_tx, Adapter->mppriv.antenna_rx));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetAntenna(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+ } else {
+ antenna = (Adapter->mppriv.antenna_tx << 16)|Adapter->mppriv.antenna_rx;
+ *(u32 *)poid_par_priv->information_buf = antenna;
+ }
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 tx_pwr_idx;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_set_tx_power_control_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ tx_pwr_idx = *((u32 *)poid_par_priv->information_buf);
+ if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ Adapter->mppriv.txpoweridx = (u8)tx_pwr_idx;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_set_tx_power_control_hdl: idx=0x%2x\n",
+ Adapter->mppriv.txpoweridx));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetTxPower(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+/* */
+/* rtl8188eu_oid_rtl_seg_81_80_20 section start **************** */
+/* */
+int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.tx_pktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl.\n"));
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_pktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_alert_, ("recv_ok:%d\n", Adapter->mppriv.rx_pktcount));
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl.\n"));
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_crcerrpktcount;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ RT_TRACE(_module_mp_, _drv_alert_, ("recv_err:%d\n", Adapter->mppriv.rx_crcerrpktcount));
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+
+int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("===> rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl.\n"));
+ Adapter->mppriv.tx_pktcount = 0;
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ if (poid_par_priv->information_buf_len == sizeof(u32)) {
+ Adapter->mppriv.rx_pktcount = 0;
+ Adapter->mppriv.rx_crcerrpktcount = 0;
+ } else {
+ status = NDIS_STATUS_INVALID_LENGTH;
+ }
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ ResetPhyRxPktCount(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ *(u32 *)poid_par_priv->information_buf = GetPhyRxPktReceived(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl: recv_ok=%d\n", *(u32 *)poid_par_priv->information_buf));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+
+ if (poid_par_priv->information_buf_len != sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ *(u32 *)poid_par_priv->information_buf = GetPhyRxPktCRC32Error(Adapter);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl: recv_err =%d\n",
+ *(u32 *)poid_par_priv->information_buf));
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_80_20 section end **************** */
+int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_continuous_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetContinuousTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetSingleCarrierTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetCarrierSuppressionTx(Adapter, (u8)bStartTest);
+ if (bStartTest) {
+ struct mp_priv *pmp_priv = &Adapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ DBG_88E("%s: pkt tx is running...\n", __func__);
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(Adapter);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u32 bStartTest;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_alert_, ("+rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ bStartTest = *((u32 *)poid_par_priv->information_buf);
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ SetSingleToneTx(Adapter, (u8)bStartTest);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ int status = NDIS_STATUS_SUCCESS;
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, NULL);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* rtl8188eu_oid_rtl_seg_81_80_00 section end **************** */
+/* */
+int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_rw_reg *RegRWStruct;
+ u32 offset, width;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_pro_read_register_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
+ offset = RegRWStruct->offset;
+ width = RegRWStruct->width;
+
+ if (offset > 0xFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ switch (width) {
+ case 1:
+ RegRWStruct->value = rtw_read8(Adapter, offset);
+ break;
+ case 2:
+ RegRWStruct->value = rtw_read16(Adapter, offset);
+ break;
+ default:
+ width = 4;
+ RegRWStruct->value = rtw_read32(Adapter, offset);
+ break;
+ }
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_read_register_hdl: offset:0x%04X value:0x%X\n",
+ offset, RegRWStruct->value));
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *poid_par_priv->bytes_rw = width;
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_rw_reg *RegRWStruct;
+ u32 offset, width, value;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("+rtl8188eu_oid_rt_pro_write_register_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
+ offset = RegRWStruct->offset;
+ width = RegRWStruct->width;
+ value = RegRWStruct->value;
+
+ if (offset > 0xFFF)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ switch (RegRWStruct->width) {
+ case 1:
+ if (value > 0xFF) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+ rtw_write8(padapter, offset, (u8)value);
+ break;
+ case 2:
+ if (value > 0xFFFF) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+ rtw_write16(padapter, offset, (u16)value);
+ break;
+ case 4:
+ rtw_write32(padapter, offset, value);
+ break;
+ default:
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ break;
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_write_register_hdl: offset=0x%08X width=%d value=0x%X\n",
+ offset, width, value));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* */
+int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* */
+int rtl8188eu_oid_rt_pro_write16_eeprom_hdl (struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+OID_RT_PRO_SET_DATA_RATE_EX\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (rtw_setdatarate_cmd(Adapter, poid_par_priv->information_buf) != _SUCCESS)
+ status = NDIS_STATUS_NOT_ACCEPTED;
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ u8 thermal = 0;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_get_thermal_meter_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ GetThermalMeter(Adapter, &thermal);
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ *(u32 *)poid_par_priv->information_buf = (u32)thermal;
+ *poid_par_priv->bytes_rw = sizeof(u32);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+
+_func_enter_;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (poid_par_priv->type_of_oid == SET_OID) {
+ u8 enable;
+
+ enable = *(u8 *)poid_par_priv->information_buf;
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_set_power_tracking_hdl: enable =%d\n", enable));
+
+ SetPowerTracking(Adapter, enable);
+ } else {
+ GetPowerTracking(Adapter, (u8 *)poid_par_priv->information_buf);
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+/* */
+int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+/* rtl8188eu_oid_rtl_seg_87_12_00 section start **************** */
+int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return NDIS_STATUS_SUCCESS;
+}
+/* */
+int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct efuse_access_struct *pefuse;
+ u8 *data;
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct efuse_access_struct))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
+ addr = pefuse->start_addr;
+ cnts = pefuse->cnts;
+ data = pefuse->data;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_read_efuse_hd: buf_len=%d addr=%d cnts=%d\n",
+ poid_par_priv->information_buf_len, addr, cnts));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if ((addr + cnts) > max_available_size) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_read_efuse_hdl: parameter error!\n"));
+ return NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (rtw_efuse_access(Adapter, false, addr, cnts, data) == _FAIL) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_read_efuse_hdl: rtw_efuse_access FAIL!\n"));
+ status = NDIS_STATUS_FAILURE;
+ } else {
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ }
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct efuse_access_struct *pefuse;
+ u8 *data;
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
+ addr = pefuse->start_addr;
+ cnts = pefuse->cnts;
+ data = pefuse->data;
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("+rtl8188eu_oid_rt_pro_write_efuse_hdl: buf_len=%d addr=0x%04x cnts=%d\n",
+ poid_par_priv->information_buf_len, addr, cnts));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+
+ if ((addr + cnts) > max_available_size) {
+ RT_TRACE(_module_mp_, _drv_err_, ("!rtl8188eu_oid_rt_pro_write_efuse_hdl: parameter error"));
+ return NDIS_STATUS_NOT_ACCEPTED;
+ }
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ if (rtw_efuse_access(Adapter, true, addr, cnts, data) == _FAIL)
+ status = NDIS_STATUS_FAILURE;
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct pgpkt *ppgpkt;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ *poid_par_priv->bytes_rw = 0;
+
+ if (poid_par_priv->information_buf_len < sizeof(struct pgpkt *))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ ppgpkt = (struct pgpkt *)poid_par_priv->information_buf;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: Read offset=0x%x\n",\
+ ppgpkt->offset));
+
+ Efuse_PowerSwitch(Adapter, false, true);
+ if (Efuse_PgPacketRead(Adapter, ppgpkt->offset, ppgpkt->data, false) == true)
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ else
+ status = NDIS_STATUS_FAILURE;
+ Efuse_PowerSwitch(Adapter, false, false);
+ } else {
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: Write offset=0x%x word_en=0x%x\n",\
+ ppgpkt->offset, ppgpkt->word_en));
+
+ Efuse_PowerSwitch(Adapter, true, true);
+ if (Efuse_PgPacketWrite(Adapter, ppgpkt->offset, ppgpkt->word_en, ppgpkt->data, false) == true)
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ else
+ status = NDIS_STATUS_FAILURE;
+ Efuse_PowerSwitch(Adapter, true, false);
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u16 size;
+ u8 ret;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+ ret = efuse_GetCurrentSize(Adapter, &size);
+ _irqlevel_changed_(&oldirql, RAISE);
+ if (ret == _SUCCESS) {
+ *(u32 *)poid_par_priv->information_buf = size;
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+ } else {
+ status = NDIS_STATUS_FAILURE;
+ }
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != QUERY_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u32))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ *(u32 *)poid_par_priv->information_buf = efuse_GetMaxSize(Adapter);
+ *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_get_efuse_max_size_hdl: size=%d status=0x%08X\n",
+ *(int *)poid_par_priv->information_buf, status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_info_, ("+rtl8188eu_oid_rt_pro_efuse_hdl\n"));
+
+ if (poid_par_priv->type_of_oid == QUERY_OID)
+ status = rtl8188eu_oid_rt_pro_read_efuse_hdl(poid_par_priv);
+ else
+ status = rtl8188eu_oid_rt_pro_write_efuse_hdl(poid_par_priv);
+
+ RT_TRACE(_module_mp_, _drv_info_, ("-rtl8188eu_oid_rt_pro_efuse_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u8 *data;
+ int status = NDIS_STATUS_SUCCESS;
+ struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
+ u16 maplen = 0;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_pro_efuse_map_hdl\n"));
+
+ EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&maplen, false);
+
+ *poid_par_priv->bytes_rw = 0;
+
+ if (poid_par_priv->information_buf_len < maplen)
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ data = (u8 *)poid_par_priv->information_buf;
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: READ\n"));
+
+ if (rtw_efuse_map_read(Adapter, 0, maplen, data) == _SUCCESS) {
+ *poid_par_priv->bytes_rw = maplen;
+ } else {
+ RT_TRACE(_module_mp_, _drv_err_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: READ fail\n"));
+ status = NDIS_STATUS_FAILURE;
+ }
+ } else {
+ /* SET_OID */
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: WRITE\n"));
+
+ if (rtw_efuse_map_write(Adapter, 0, maplen, data) == _SUCCESS) {
+ *poid_par_priv->bytes_rw = maplen;
+ } else {
+ RT_TRACE(_module_mp_, _drv_err_,
+ ("rtl8188eu_oid_rt_pro_efuse_map_hdl: WRITE fail\n"));
+ status = NDIS_STATUS_FAILURE;
+ }
+ }
+
+ _irqlevel_changed_(&oldirql, RAISE);
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("-rtl8188eu_oid_rt_pro_efuse_map_hdl: status=0x%08X\n", status));
+
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+ return status;
+}
+
+int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
+{
+ u8 rx_pkt_type;
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+rtl8188eu_oid_rt_set_rx_packet_type_hdl\n"));
+
+ if (poid_par_priv->type_of_oid != SET_OID)
+ return NDIS_STATUS_NOT_ACCEPTED;
+
+ if (poid_par_priv->information_buf_len < sizeof(u8))
+ return NDIS_STATUS_INVALID_LENGTH;
+
+ rx_pkt_type = *((u8 *)poid_par_priv->information_buf);/* 4 */
+
+ RT_TRACE(_module_mp_, _drv_info_, ("rx_pkt_type: %x\n", rx_pkt_type));
+_func_exit_;
+
+ return status;
+}
+
+int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
+
+int rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
+{
+ struct mp_xmit_parm *pparm;
+ struct adapter *padapter;
+ struct mp_priv *pmp_priv;
+ struct pkt_attrib *pattrib;
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("+%s\n", __func__));
+
+ pparm = (struct mp_xmit_parm *)poid_par_priv->information_buf;
+ padapter = (struct adapter *)poid_par_priv->adapter_context;
+ pmp_priv = &padapter->mppriv;
+
+ if (poid_par_priv->type_of_oid == QUERY_OID) {
+ pparm->enable = !pmp_priv->tx.stop;
+ pparm->count = pmp_priv->tx.sended;
+ } else {
+ if (pparm->enable == 0) {
+ pmp_priv->tx.stop = 1;
+ } else if (pmp_priv->tx.stop == 1) {
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = pparm->count;
+ pmp_priv->tx.payload = pparm->payload_type;
+ pattrib = &pmp_priv->tx.attrib;
+ pattrib->pktlen = pparm->length;
+ memcpy(pattrib->dst, pparm->da, ETH_ALEN);
+ SetPacketTx(padapter);
+ } else {
+ return NDIS_STATUS_FAILURE;
+ }
+ }
+
+ return NDIS_STATUS_SUCCESS;
+}
+
+/* */
+int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
+{
+ int status = NDIS_STATUS_SUCCESS;
+
+_func_enter_;
+
+ if (poid_par_priv->type_of_oid != SET_OID) {
+ status = NDIS_STATUS_NOT_ACCEPTED;
+ return status;
+ }
+
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("\n ===> Setrtl8188eu_oid_rt_set_power_down_hdl.\n"));
+
+ _irqlevel_changed_(&oldirql, LOWER);
+
+ /* CALL the power_down function */
+ _irqlevel_changed_(&oldirql, RAISE);
+
+_func_exit_;
+
+ return status;
+}
+/* */
+int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv)
+{
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_p2p.c b/drivers/staging/rtl8188eu/core/rtw_p2p.c
new file mode 100644
index 00000000000..8cf915f4cf9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_p2p.c
@@ -0,0 +1,2064 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_P2P_C_
+
+#include <drv_types.h>
+#include <rtw_p2p.h>
+#include <wifi.h>
+
+#ifdef CONFIG_88EU_P2P
+
+static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
+{
+ int found = 0, i = 0;
+
+ for (i = 0; i < ch_cnt; i++) {
+ if (ch_list[i] == desired_ch) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+static u32 go_add_group_info_attr(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+ u32 len = 0;
+ u16 attr_len = 0;
+ u8 tmplen, *pdata_attr, *pstart, *pcur;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("%s\n", __func__);
+
+ pdata_attr = rtw_zmalloc(MAX_P2P_IE_LEN);
+
+ pstart = pdata_attr;
+ pcur = pdata_attr;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* look up sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+
+ if (psta->is_p2p_device) {
+ tmplen = 0;
+
+ pcur++;
+
+ /* P2P device address */
+ memcpy(pcur, psta->dev_addr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ /* P2P interface address */
+ memcpy(pcur, psta->hwaddr, ETH_ALEN);
+ pcur += ETH_ALEN;
+
+ *pcur = psta->dev_cap;
+ pcur++;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->config_methods); */
+ RTW_PUT_BE16(pcur, psta->config_methods);
+ pcur += 2;
+
+ memcpy(pcur, psta->primary_dev_type, 8);
+ pcur += 8;
+
+ *pcur = psta->num_of_secdev_type;
+ pcur++;
+
+ memcpy(pcur, psta->secdev_types_list, psta->num_of_secdev_type*8);
+ pcur += psta->num_of_secdev_type*8;
+
+ if (psta->dev_name_len > 0) {
+ /* u16*)(pcur) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(pcur, WPS_ATTR_DEVICE_NAME);
+ pcur += 2;
+
+ /* u16*)(pcur) = cpu_to_be16(psta->dev_name_len); */
+ RTW_PUT_BE16(pcur, psta->dev_name_len);
+ pcur += 2;
+
+ memcpy(pcur, psta->dev_name, psta->dev_name_len);
+ pcur += psta->dev_name_len;
+ }
+
+
+ tmplen = (u8)(pcur-pstart);
+
+ *pstart = (tmplen-1);
+
+ attr_len += tmplen;
+
+ /* pstart += tmplen; */
+ pstart = pcur;
+ }
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+
+ if (attr_len > 0)
+ len = rtw_set_p2p_attr_content(pbuf, P2P_ATTR_GROUP_INFO, attr_len, pdata_attr);
+
+ kfree(pdata_attr);
+ return len;
+}
+
+static void issue_group_disc_req(struct wifidirect_info *pwdinfo, u8 *da)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_GO_DISC_REQUEST;
+ u8 dialogToken = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ /* there is no IE in this P2P action frame */
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_DEVDISC_RESP;
+ u8 p2pie[8] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->device_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P public action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+
+ /* Build P2P IE */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* P2P_ATTR_STATUS */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+static void issue_p2p_provision_resp(struct wifidirect_info *pwdinfo, u8 *raddr, u8 *frame_body, u16 config_method)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ unsigned char category = RTW_WLAN_CATEGORY_PUBLIC;
+ u8 action = P2P_PUB_ACTION_ACTION;
+ u8 dialogToken = frame_body[7]; /* The Dialog Token of provisioning discovery request frame. */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PROVISION_DISC_RESP;
+ u8 wpsie[100] = { 0x00 };
+ u8 wpsielen = 0;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, raddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(padapter->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(padapter->eeprompriv)), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(action), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+ wpsielen = 0;
+ /* WPS OUI */
+ RTW_PUT_BE32(wpsie, WPSOUI);
+ wpsielen += 4;
+
+ /* Config Method */
+ /* Type: */
+ RTW_PUT_BE16(wpsie + wpsielen, WPS_ATTR_CONF_METHOD);
+ wpsielen += 2;
+
+ /* Length: */
+ RTW_PUT_BE16(wpsie + wpsielen, 0x0002);
+ wpsielen += 2;
+
+ /* Value: */
+ RTW_PUT_BE16(wpsie + wpsielen, config_method);
+ wpsielen += 2;
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, wpsielen, (unsigned char *)wpsie, &pattrib->pktlen);
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+
+ return;
+}
+
+static void issue_p2p_presence_resp(struct wifidirect_info *pwdinfo, u8 *da, u8 status, u8 dialogToken)
+{
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ unsigned char *pframe;
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ unsigned short *fctrl;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ unsigned char category = RTW_WLAN_CATEGORY_P2P;/* P2P action frame */
+ __be32 p2poui = cpu_to_be32(P2POUI);
+ u8 oui_subtype = P2P_PRESENCE_RESPONSE;
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u8 noa_attr_content[32] = { 0x00 };
+ u32 p2pielen = 0;
+
+ DBG_88E("[%s]\n", __func__);
+
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ return;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(padapter, pattrib);
+
+ _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
+
+ pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, da, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pwdinfo->interface_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pwdinfo->interface_addr, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
+ pmlmeext->mgnt_seq++;
+ SetFrameSubType(pframe, WIFI_ACTION);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* Build P2P action frame header */
+ pframe = rtw_set_fixed_ie(pframe, 1, &(category), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 4, (unsigned char *)&(p2poui), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(oui_subtype), &(pattrib->pktlen));
+ pframe = rtw_set_fixed_ie(pframe, 1, &(dialogToken), &(pattrib->pktlen));
+
+
+ /* Add P2P IE header */
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Add Status attribute in P2P IE */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status);
+
+ /* Add NoA attribute in P2P IE */
+ noa_attr_content[0] = 0x1;/* index */
+ noa_attr_content[1] = 0x0;/* CTWindow and OppPS Parameters */
+
+ /* todo: Notice of Absence Descriptor(s) */
+
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_NOA, 2, noa_attr_content);
+
+
+
+ pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, p2pie, &(pattrib->pktlen));
+
+
+ pattrib->last_txcmdsz = pattrib->pktlen;
+
+ dump_mgntframe(padapter, pmgntframe);
+}
+
+u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u16 capability = 0;
+ u32 len = 0, p2pielen = 0;
+ __le16 le_tmp;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+
+ /* According to the P2P Specification, the beacon frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. P2P Device ID */
+ /* 3. Notice of Absence (NOA) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ /* Be able to participate in additional P2P Groups and */
+ /* support the P2P Invitation Procedure */
+ /* Group Capability Bitmap, 1 byte */
+ capability = P2P_DEVCAP_INVITATION_PROC|P2P_DEVCAP_CLIENT_DISCOVERABILITY;
+ capability |= ((P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS) << 8);
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ capability |= (P2P_GRPCAP_GROUP_FORMATION<<8);
+
+ le_tmp = cpu_to_le16(capability);
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_CAPABILITY, 2, (u8 *)&le_tmp);
+
+ /* P2P Device ID ATTR */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_DEVICE_ID, ETH_ALEN, pwdinfo->device_addr);
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+ return len;
+}
+
+u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20100907 */
+ /* According to the P2P Specification, the probe response frame should contain 5 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Extended Listen Timing */
+ /* 3. Notice of Absence (NOA) (Only GO needs this) */
+ /* 4. Device Info */
+ /* 5. Group Info (Only GO need this) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie[p2pielen] = (P2P_GRPCAP_GO | P2P_GRPCAP_INTRABSS);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ p2pie[p2pielen] |= P2P_GRPCAP_GROUP_FORMATION;
+
+ p2pielen++;
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+ }
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_EX_LISTEN_TIMING;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0004); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0004);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Availability Period */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
+ p2pielen += 2;
+
+ /* Availability Interval */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0xFFFF); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0xFFFF);
+ p2pielen += 2;
+
+
+ /* Notice of Absence ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->supported_wps_cm); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->supported_wps_cm);
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ /* Group Info ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ p2pielen += go_add_group_info_attr(pwdinfo, p2pie + p2pielen);
+
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+
+ return len;
+}
+
+u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 *pssid, u8 ussidlen, u8 *pdev_raddr)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* Commented by Albert 20110301 */
+ /* According to the P2P Specification, the provision discovery request frame should contain 3 P2P attributes */
+ /* 1. P2P Capability */
+ /* 2. Device Info */
+ /* 3. Group ID (When joining an operating P2P Group) */
+
+ /* P2P Capability ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_CAPABILITY;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(0x0002); */
+ RTW_PUT_LE16(p2pie + p2pielen, 0x0002);
+ p2pielen += 2;
+
+ /* Value: */
+ /* Device Capability Bitmap, 1 byte */
+ p2pie[p2pielen++] = DMP_P2P_DEVCAP_SUPPORT;
+
+ /* Group Capability Bitmap, 1 byte */
+ if (pwdinfo->persistent_supported)
+ p2pie[p2pielen++] = P2P_GRPCAP_PERSISTENT_GROUP | DMP_P2P_GRPCAP_SUPPORT;
+ else
+ p2pie[p2pielen++] = DMP_P2P_GRPCAP_SUPPORT;
+
+
+ /* Device Info ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_DEVICE_INFO;
+
+ /* Length: */
+ /* 21 -> P2P Device Address (6bytes) + Config Methods (2bytes) + Primary Device Type (8bytes) */
+ /* + NumofSecondDevType (1byte) + WPS Device Name ID field (2bytes) + WPS Device Name Len field (2bytes) */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(21 + pwdinfo->device_name_len); */
+ RTW_PUT_LE16(p2pie + p2pielen, 21 + pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ /* P2P Device Address */
+ memcpy(p2pie + p2pielen, pwdinfo->device_addr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ /* Config Method */
+ /* This field should be big endian. Noted by P2P specification. */
+ if (pwdinfo->ui_got_wps_info == P2P_GOT_WPSINFO_PBC) {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_PBC); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_PBC);
+ } else {
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_CONFIG_METHOD_DISPLAY); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_CONFIG_METHOD_DISPLAY);
+ }
+
+ p2pielen += 2;
+
+ /* Primary Device Type */
+ /* Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_CID_MULIT_MEDIA); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_CID_MULIT_MEDIA);
+ p2pielen += 2;
+
+ /* OUI */
+ /* u32*) (p2pie + p2pielen) = cpu_to_be32(WPSOUI); */
+ RTW_PUT_BE32(p2pie + p2pielen, WPSOUI);
+ p2pielen += 4;
+
+ /* Sub Category ID */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_PDT_SCID_MEDIA_SERVER); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_PDT_SCID_MEDIA_SERVER);
+ p2pielen += 2;
+
+ /* Number of Secondary Device Types */
+ p2pie[p2pielen++] = 0x00; /* No Secondary Device Type List */
+
+ /* Device Name */
+ /* Type: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(WPS_ATTR_DEVICE_NAME); */
+ RTW_PUT_BE16(p2pie + p2pielen, WPS_ATTR_DEVICE_NAME);
+ p2pielen += 2;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_be16(pwdinfo->device_name_len); */
+ RTW_PUT_BE16(p2pie + p2pielen, pwdinfo->device_name_len);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pwdinfo->device_name, pwdinfo->device_name_len);
+ p2pielen += pwdinfo->device_name_len;
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ /* Added by Albert 2011/05/19 */
+ /* In this case, the pdev_raddr is the device address of the group owner. */
+
+ /* P2P Group ID ATTR */
+ /* Type: */
+ p2pie[p2pielen++] = P2P_ATTR_GROUP_ID;
+
+ /* Length: */
+ /* u16*) (p2pie + p2pielen) = cpu_to_le16(ETH_ALEN + ussidlen); */
+ RTW_PUT_LE16(p2pie + p2pielen, ETH_ALEN + ussidlen);
+ p2pielen += 2;
+
+ /* Value: */
+ memcpy(p2pie + p2pielen, pdev_raddr, ETH_ALEN);
+ p2pielen += ETH_ALEN;
+
+ memcpy(p2pie + p2pielen, pssid, ussidlen);
+ p2pielen += ussidlen;
+ }
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+
+ return len;
+}
+
+
+u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 status_code)
+{
+ u8 p2pie[MAX_P2P_IE_LEN] = { 0x00 };
+ u32 len = 0, p2pielen = 0;
+
+ /* P2P OUI */
+ p2pielen = 0;
+ p2pie[p2pielen++] = 0x50;
+ p2pie[p2pielen++] = 0x6F;
+ p2pie[p2pielen++] = 0x9A;
+ p2pie[p2pielen++] = 0x09; /* WFA P2P v1.0 */
+
+ /* According to the P2P Specification, the Association response frame should contain 2 P2P attributes */
+ /* 1. Status */
+ /* 2. Extended Listen Timing (optional) */
+
+
+ /* Status ATTR */
+ p2pielen += rtw_set_p2p_attr_content(&p2pie[p2pielen], P2P_ATTR_STATUS, 1, &status_code);
+
+
+ /* Extended Listen Timing ATTR */
+ /* Type: */
+ /* Length: */
+ /* Value: */
+
+ pbuf = rtw_set_ie(pbuf, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &len);
+
+ return len;
+}
+
+u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
+{
+ u32 len = 0;
+
+ return len;
+}
+
+u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *p;
+ u32 ret = false;
+ u8 *p2pie;
+ u32 p2pielen = 0;
+ int ssid_len = 0, rate_cnt = 0;
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SUPPORTEDRATES_IE_, (int *)&rate_cnt,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ if (rate_cnt <= 4) {
+ int i, g_rate = 0;
+
+ for (i = 0; i < rate_cnt; i++) {
+ if (((*(p + 2 + i) & 0xff) != 0x02) &&
+ ((*(p + 2 + i) & 0xff) != 0x04) &&
+ ((*(p + 2 + i) & 0xff) != 0x0B) &&
+ ((*(p + 2 + i) & 0xff) != 0x16))
+ g_rate = 1;
+ }
+
+ if (g_rate == 0) {
+ /* There is no OFDM rate included in SupportedRates IE of this probe request frame */
+ /* The driver should response this probe request. */
+ return ret;
+ }
+ } else {
+ /* rate_cnt > 4 means the SupportRates IE contains the OFDM rate because the count of CCK rates are 4. */
+ /* We should proceed the following check for this probe request. */
+ }
+
+ /* Added comments by Albert 20100906 */
+ /* There are several items we should check here. */
+ /* 1. This probe request frame must contain the P2P IE. (Done) */
+ /* 2. This probe request frame must contain the wildcard SSID. (Done) */
+ /* 3. Wildcard BSSID. (Todo) */
+ /* 4. Destination Address. (Done in mgt_dispatcher function) */
+ /* 5. Requested Device Type in WSC IE. (Todo) */
+ /* 6. Device ID attribute in P2P IE. (Todo) */
+
+ p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_, _SSID_IE_, (int *)&ssid_len,
+ len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_);
+
+ ssid_len &= 0xff; /* Just last 1 byte is valid for ssid len of the probe request */
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + _PROBEREQ_IE_OFFSET_ , len - WLAN_HDR_A3_LEN - _PROBEREQ_IE_OFFSET_ , NULL, &p2pielen);
+ if (p2pie) {
+ if ((p != NULL) && _rtw_memcmp((void *)(p+2), (void *)pwdinfo->p2p_wildcard_ssid , 7)) {
+ /* todo: */
+ /* Check Requested Device Type attributes in WSC IE. */
+ /* Check Device ID attribute in P2P IE */
+
+ ret = true;
+ } else if ((p != NULL) && (ssid_len == 0)) {
+ ret = true;
+ }
+ } else {
+ /* non -p2p device */
+ }
+ }
+
+
+ return ret;
+}
+
+u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len, struct sta_info *psta)
+{
+ u8 status_code = P2P_STATUS_SUCCESS;
+ u8 *pbuf, *pattr_content = NULL;
+ u32 attr_contentlen = 0;
+ u16 cap_attr = 0;
+ unsigned short frame_type, ie_offset = 0;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ __be16 be_tmp;
+ __le16 le_tmp;
+
+ if (!rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO))
+ return P2P_STATUS_FAIL_REQUEST_UNABLE;
+
+ frame_type = GetFrameSubType(pframe);
+ if (frame_type == WIFI_ASSOCREQ)
+ ie_offset = _ASOCREQ_IE_OFFSET_;
+ else /* WIFI_REASSOCREQ */
+ ie_offset = _REASOCREQ_IE_OFFSET_;
+
+ ies = pframe + WLAN_HDR_A3_LEN + ie_offset;
+ ies_len = len - WLAN_HDR_A3_LEN - ie_offset;
+
+ p2p_ie = rtw_get_p2p_ie(ies , ies_len , NULL, &p2p_ielen);
+
+ if (!p2p_ie) {
+ DBG_88E("[%s] P2P IE not Found!!\n", __func__);
+ status_code = P2P_STATUS_FAIL_INVALID_PARAM;
+ } else {
+ DBG_88E("[%s] P2P IE Found!!\n", __func__);
+ }
+
+ while (p2p_ie) {
+ /* Check P2P Capability ATTR */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CAPABILITY, (u8 *)&le_tmp, (uint *)&attr_contentlen)) {
+ DBG_88E("[%s] Got P2P Capability Attr!!\n", __func__);
+ cap_attr = le16_to_cpu(le_tmp);
+ psta->dev_cap = cap_attr&0xff;
+ }
+
+ /* Check Extended Listen Timing ATTR */
+
+
+ /* Check P2P Device Info ATTR */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO, NULL, (uint *)&attr_contentlen)) {
+ DBG_88E("[%s] Got P2P DEVICE INFO Attr!!\n", __func__);
+ pattr_content = rtw_zmalloc(attr_contentlen);
+ pbuf = pattr_content;
+ if (pattr_content) {
+ u8 num_of_secdev_type;
+ u16 dev_name_len;
+
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_INFO , pattr_content, (uint *)&attr_contentlen);
+
+ memcpy(psta->dev_addr, pattr_content, ETH_ALEN);/* P2P Device Address */
+
+ pattr_content += ETH_ALEN;
+
+ memcpy(&be_tmp, pattr_content, 2);/* Config Methods */
+ psta->config_methods = be16_to_cpu(be_tmp);
+
+ pattr_content += 2;
+
+ memcpy(psta->primary_dev_type, pattr_content, 8);
+
+ pattr_content += 8;
+
+ num_of_secdev_type = *pattr_content;
+ pattr_content += 1;
+
+ if (num_of_secdev_type == 0) {
+ psta->num_of_secdev_type = 0;
+ } else {
+ u32 len;
+
+ psta->num_of_secdev_type = num_of_secdev_type;
+
+ len = (sizeof(psta->secdev_types_list) < (num_of_secdev_type*8)) ?
+ (sizeof(psta->secdev_types_list)) : (num_of_secdev_type*8);
+
+ memcpy(psta->secdev_types_list, pattr_content, len);
+
+ pattr_content += (num_of_secdev_type*8);
+ }
+
+
+ psta->dev_name_len = 0;
+ if (WPS_ATTR_DEVICE_NAME == be16_to_cpu(*(__be16 *)pattr_content)) {
+ dev_name_len = be16_to_cpu(*(__be16 *)(pattr_content+2));
+
+ psta->dev_name_len = (sizeof(psta->dev_name) < dev_name_len) ? sizeof(psta->dev_name) : dev_name_len;
+
+ memcpy(psta->dev_name, pattr_content+4, psta->dev_name_len);
+ }
+ kfree(pbuf);
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+ return status_code;
+}
+
+u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 status, dialogToken;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = pwdinfo->padapter;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[7];
+ status = P2P_STATUS_FAIL_UNKNOWN_P2PGROUP;
+
+ p2p_ie = rtw_get_p2p_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &p2p_ielen);
+ if (p2p_ie) {
+ u8 groupid[38] = { 0x00 };
+ u8 dev_addr[ETH_ALEN] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ if (_rtw_memcmp(pwdinfo->device_addr, groupid, ETH_ALEN) &&
+ _rtw_memcmp(pwdinfo->p2p_group_ssid, groupid+ETH_ALEN, pwdinfo->p2p_group_ssid_len)) {
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_DEVICE_ID, dev_addr, &attr_contentlen)) {
+ unsigned long irqL;
+ struct list_head *phead, *plist;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* look up sta asoc_queue */
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ if (psta->is_p2p_device && (psta->dev_cap&P2P_DEVCAP_CLIENT_DISCOVERABILITY) &&
+ _rtw_memcmp(psta->dev_addr, dev_addr, ETH_ALEN)) {
+ /* issue GO Discoverability Request */
+ issue_group_disc_req(pwdinfo, psta->hwaddr);
+ status = P2P_STATUS_SUCCESS;
+ break;
+ } else {
+ status = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ }
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ } else {
+ status = P2P_STATUS_FAIL_INVALID_PARAM;
+ }
+ }
+ }
+
+
+ /* issue Device Discoverability Response */
+ issue_p2p_devdisc_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
+
+ return (status == P2P_STATUS_SUCCESS) ? true : false;
+}
+
+u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ return true;
+}
+
+u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 *wpsie;
+ uint wps_ielen = 0, attr_contentlen = 0;
+ u16 uconfig_method = 0;
+ __be16 be_tmp;
+
+ frame_body = (pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ wpsie = rtw_get_wps_ie(frame_body + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
+ if (wpsie) {
+ if (rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_CONF_METHOD, (u8 *)&be_tmp, &attr_contentlen)) {
+ uconfig_method = be16_to_cpu(be_tmp);
+ switch (uconfig_method) {
+ case WPS_CM_DISPLYA:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ break;
+ case WPS_CM_LABEL:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "lab", 3);
+ break;
+ case WPS_CM_PUSH_BUTTON:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ break;
+ case WPS_CM_KEYPAD:
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ break;
+ }
+ issue_p2p_provision_resp(pwdinfo, GetAddr2Ptr(pframe), frame_body, uconfig_method);
+ }
+ }
+ DBG_88E("[%s] config method = %s\n", __func__, pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
+ return true;
+}
+
+u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe)
+{
+ return true;
+}
+
+static u8 rtw_p2p_get_peer_ch_list(struct wifidirect_info *pwdinfo, u8 *ch_content, u8 ch_cnt, u8 *peer_ch_list)
+{
+ u8 i = 0, j = 0;
+ u8 temp = 0;
+ u8 ch_no = 0;
+ ch_content += 3;
+ ch_cnt -= 3;
+
+ while (ch_cnt > 0) {
+ ch_content += 1;
+ ch_cnt -= 1;
+ temp = *ch_content;
+ for (i = 0 ; i < temp ; i++, j++)
+ peer_ch_list[j] = *(ch_content + 1 + i);
+ ch_content += (temp + 1);
+ ch_cnt -= (temp + 1);
+ ch_no += temp ;
+ }
+
+ return ch_no;
+}
+
+static u8 rtw_p2p_ch_inclusion(struct mlme_ext_priv *pmlmeext, u8 *peer_ch_list, u8 peer_ch_num, u8 *ch_list_inclusioned)
+{
+ int i = 0, j = 0, temp = 0;
+ u8 ch_no = 0;
+
+ for (i = 0; i < peer_ch_num; i++) {
+ for (j = temp; j < pmlmeext->max_chan_nums; j++) {
+ if (*(peer_ch_list + i) == pmlmeext->channel_set[j].ChannelNum) {
+ ch_list_inclusioned[ch_no++] = *(peer_ch_list + i);
+ temp = j;
+ break;
+ }
+ }
+ }
+
+ return ch_no;
+}
+
+u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen = 0, wps_ielen = 0;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u8 *wpsie;
+ u16 wps_devicepassword_id = 0x0000;
+ uint wps_devicepassword_id_len = 0;
+ __be16 be_tmp;
+
+ wpsie = rtw_get_wps_ie(pframe + _PUBLIC_ACTION_IE_OFFSET_, len - _PUBLIC_ACTION_IE_OFFSET_, NULL, &wps_ielen);
+ if (wpsie) {
+ /* Commented by Kurt 20120113 */
+ /* If some device wants to do p2p handshake without sending prov_disc_req */
+ /* We have to get peer_req_cm from here. */
+ if (_rtw_memcmp(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "000", 3)) {
+ rtw_get_wps_attr_content(wpsie, wps_ielen, WPS_ATTR_DEVICE_PWID, (u8 *)&be_tmp, &wps_devicepassword_id_len);
+ wps_devicepassword_id = be16_to_cpu(be_tmp);
+
+ if (wps_devicepassword_id == WPS_DPID_USER_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "dis", 3);
+ else if (wps_devicepassword_id == WPS_DPID_REGISTRAR_SPEC)
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pad", 3);
+ else
+ memcpy(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, "pbc", 3);
+ }
+ } else {
+ DBG_88E("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ return result;
+ }
+
+ if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO) {
+ result = P2P_STATUS_FAIL_INFO_UNAVAILABLE;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INFOR_NOREADY);
+ return result;
+ }
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+
+ if (!p2p_ie) {
+ DBG_88E("[%s] P2P IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ while (p2p_ie) {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 ch_content[50] = { 0x00 };
+ uint ch_cnt = 0;
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT , &attr_content, &attr_contentlen)) {
+ DBG_88E("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ } else {
+ if (attr_content & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ }
+ }
+
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
+ if (attr_contentlen != ETH_ALEN)
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, ch_content, &ch_cnt)) {
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, ch_content, ch_cnt, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0) {
+ DBG_88E("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ peer_operating_ch = operatingch_info[4];
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_88E("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ } else {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_88E("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ return result;
+}
+
+u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ struct adapter *padapter = pwdinfo->padapter;
+ u8 result = P2P_STATUS_SUCCESS;
+ u32 p2p_ielen, wps_ielen;
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ /* Be able to know which one is the P2P GO and which one is P2P client. */
+
+ if (rtw_get_wps_ie(ies, ies_len, NULL, &wps_ielen)) {
+ } else {
+ DBG_88E("[%s] WPS IE not Found!!\n", __func__);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+ if (!p2p_ie) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ result = P2P_STATUS_FAIL_INCOMPATIBLE_PARAM;
+ } else {
+ u8 attr_content = 0x00;
+ u32 attr_contentlen = 0;
+ u8 operatingch_info[5] = { 0x00 };
+ u8 groupid[38];
+ u8 peer_ch_list[50] = { 0x00 };
+ u8 peer_ch_num = 0;
+ u8 ch_list_inclusioned[50] = { 0x00 };
+ u8 ch_num_inclusioned = 0;
+
+ while (p2p_ie) { /* Found the P2P IE. */
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ /* Do nothing. */
+ } else {
+ if (P2P_STATUS_FAIL_INFO_UNAVAILABLE == attr_content) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_RX_INFOR_NOREADY);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ }
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = attr_content;
+ break;
+ }
+ }
+
+ /* Try to get the peer's interface address */
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_INTENTED_IF_ADDR, pwdinfo->p2p_peer_interface_addr, &attr_contentlen)) {
+ if (attr_contentlen != ETH_ALEN)
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ }
+
+ /* Try to get the peer's intent and tie breaker value. */
+ attr_content = 0x00;
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GO_INTENT , &attr_content, &attr_contentlen)) {
+ DBG_88E("[%s] GO Intent = %d, tie = %d\n", __func__, attr_content >> 1, attr_content & 0x01);
+ pwdinfo->peer_intent = attr_content; /* include both intent and tie breaker values. */
+
+ if (pwdinfo->intent == (pwdinfo->peer_intent >> 1)) {
+ /* Try to match the tie breaker value */
+ if (pwdinfo->intent == P2P_MAX_INTENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ result = P2P_STATUS_FAIL_BOTH_GOINTENT_15;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if (attr_content & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else if (pwdinfo->intent > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else {
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ /* Store the group id information. */
+ memcpy(pwdinfo->groupid_info.go_device_addr, pwdinfo->device_addr, ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, pwdinfo->nego_ssid, pwdinfo->nego_ssidlen);
+ }
+ }
+
+ /* Try to get the operation channel information */
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ DBG_88E("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Try to get the channel list information */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_CH_LIST, pwdinfo->channel_list_attr, &pwdinfo->channel_list_attr_len)) {
+ DBG_88E("[%s] channel list attribute found, len = %d\n", __func__, pwdinfo->channel_list_attr_len);
+
+ peer_ch_num = rtw_p2p_get_peer_ch_list(pwdinfo, pwdinfo->channel_list_attr, pwdinfo->channel_list_attr_len, peer_ch_list);
+ ch_num_inclusioned = rtw_p2p_ch_inclusion(&padapter->mlmeextpriv, peer_ch_list, peer_ch_num, ch_list_inclusioned);
+
+ if (ch_num_inclusioned == 0) {
+ DBG_88E("[%s] No common channel in channel list!\n", __func__);
+ result = P2P_STATUS_FAIL_NO_COMMON_CH;
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ if (!rtw_p2p_is_channel_list_ok(pwdinfo->operating_channel,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ u8 operatingch_info[5] = { 0x00 }, peer_operating_ch = 0;
+ attr_contentlen = 0;
+
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen))
+ peer_operating_ch = operatingch_info[4];
+
+ if (rtw_p2p_is_channel_list_ok(peer_operating_ch,
+ ch_list_inclusioned, ch_num_inclusioned)) {
+ /**
+ * Change our operating channel as peer's for compatibility.
+ */
+ pwdinfo->operating_channel = peer_operating_ch;
+ DBG_88E("[%s] Change op ch to %02x as peer's\n", __func__, pwdinfo->operating_channel);
+ } else {
+ /* Take first channel of ch_list_inclusioned as operating channel */
+ pwdinfo->operating_channel = ch_list_inclusioned[0];
+ DBG_88E("[%s] Change op ch to %02x\n", __func__, pwdinfo->operating_channel);
+ }
+ }
+ }
+ } else {
+ DBG_88E("[%s] channel list attribute not found!\n", __func__);
+ }
+
+ /* Try to get the group id information if peer is GO */
+ attr_contentlen = 0;
+ _rtw_memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ }
+ return result;
+}
+
+u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 result = P2P_STATUS_SUCCESS;
+ ies = pframe + _PUBLIC_ACTION_IE_OFFSET_;
+ ies_len = len - _PUBLIC_ACTION_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+ while (p2p_ie) { /* Found the P2P IE. */
+ u8 attr_content = 0x00, operatingch_info[5] = { 0x00 };
+ u8 groupid[38] = { 0x00 };
+ u32 attr_contentlen = 0;
+
+ pwdinfo->negotiation_dialog_token = 1;
+ rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_STATUS, &attr_content, &attr_contentlen);
+ if (attr_contentlen == 1) {
+ DBG_88E("[%s] Status = %d\n", __func__, attr_content);
+ result = attr_content;
+
+ if (attr_content == P2P_STATUS_SUCCESS) {
+ u8 bcancelled = 0;
+
+ _cancel_timer(&pwdinfo->restore_p2p_state_timer, &bcancelled);
+
+ /* Commented by Albert 20100911 */
+ /* Todo: Need to handle the case which both Intents are the same. */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ if ((pwdinfo->intent) > (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ } else if ((pwdinfo->intent) < (pwdinfo->peer_intent >> 1)) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ } else {
+ /* Have to compare the Tie Breaker */
+ if (pwdinfo->peer_intent & 0x01)
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ else
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ }
+ } else {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_FAIL);
+ break;
+ }
+ }
+
+ /* Try to get the group id information */
+ attr_contentlen = 0;
+ _rtw_memset(groupid, 0x00, 38);
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_GROUP_ID, groupid, &attr_contentlen)) {
+ DBG_88E("[%s] Ssid = %s, ssidlen = %zu\n", __func__, &groupid[ETH_ALEN], strlen(&groupid[ETH_ALEN]));
+ memcpy(pwdinfo->groupid_info.go_device_addr, &groupid[0], ETH_ALEN);
+ memcpy(pwdinfo->groupid_info.ssid, &groupid[6], attr_contentlen - ETH_ALEN);
+ }
+
+ attr_contentlen = 0;
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_OPERATING_CH, operatingch_info, &attr_contentlen)) {
+ DBG_88E("[%s] Peer's operating channel = %d\n", __func__, operatingch_info[4]);
+ pwdinfo->peer_operating_ch = operatingch_info[4];
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+ return result;
+}
+
+u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
+{
+ u8 *frame_body;
+ u8 dialogToken = 0;
+ u8 status = P2P_STATUS_SUCCESS;
+
+ frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
+
+ dialogToken = frame_body[6];
+
+ /* todo: check NoA attribute */
+
+ issue_p2p_presence_resp(pwdinfo, GetAddr2Ptr(pframe), status, dialogToken);
+
+ return true;
+}
+
+static void find_phase_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_ssid ssid;
+ unsigned long irqL;
+
+_func_enter_;
+
+ _rtw_memset((unsigned char *)&ssid, 0, sizeof(struct ndis_802_11_ssid));
+ memcpy(ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
+ ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
+
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+
+
+_func_exit_;
+}
+
+void p2p_concurrent_handler(struct adapter *padapter);
+
+static void restore_p2p_state_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+_func_enter_;
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_FAIL))
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE)) {
+ /* In the P2P client mode, the driver should not switch back to its listen channel */
+ /* because this P2P client should stay at the operating channel of P2P GO. */
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ }
+_func_exit_;
+}
+
+static void pre_tx_invitereq_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+static void pre_tx_provdisc_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+static void pre_tx_negoreq_handler(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ u8 val8 = 1;
+_func_enter_;
+
+ set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ issue_probereq_p2p(padapter, NULL);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+_func_exit_;
+}
+
+void p2p_protocol_wk_hdl(struct adapter *padapter, int intCmdType)
+{
+_func_enter_;
+ switch (intCmdType) {
+ case P2P_FIND_PHASE_WK:
+ find_phase_handler(padapter);
+ break;
+ case P2P_RESTORE_STATE_WK:
+ restore_p2p_state_handler(padapter);
+ break;
+ case P2P_PRE_TX_PROVDISC_PROCESS_WK:
+ pre_tx_provdisc_handler(padapter);
+ break;
+ case P2P_PRE_TX_INVITEREQ_PROCESS_WK:
+ pre_tx_invitereq_handler(padapter);
+ break;
+ case P2P_PRE_TX_NEGOREQ_PROCESS_WK:
+ pre_tx_negoreq_handler(padapter);
+ break;
+ }
+
+_func_exit_;
+}
+
+void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
+{
+ u8 *ies;
+ u32 ies_len;
+ u8 *p2p_ie;
+ u32 p2p_ielen = 0;
+ u8 noa_attr[MAX_P2P_IE_LEN] = { 0x00 };/* NoA length should be n*(13) + 2 */
+ u32 attr_contentlen = 0;
+
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 find_p2p = false, find_p2p_ps = false;
+ u8 noa_offset, noa_num, noa_index;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+ if (IELength <= _BEACON_IE_OFFSET_)
+ return;
+
+ ies = IEs + _BEACON_IE_OFFSET_;
+ ies_len = IELength - _BEACON_IE_OFFSET_;
+
+ p2p_ie = rtw_get_p2p_ie(ies, ies_len, NULL, &p2p_ielen);
+
+ while (p2p_ie) {
+ find_p2p = true;
+ /* Get Notice of Absence IE. */
+ if (rtw_get_p2p_attr_content(p2p_ie, p2p_ielen, P2P_ATTR_NOA, noa_attr, &attr_contentlen)) {
+ find_p2p_ps = true;
+ noa_index = noa_attr[0];
+
+ if ((pwdinfo->p2p_ps_mode == P2P_PS_NONE) ||
+ (noa_index != pwdinfo->noa_index)) { /* if index change, driver should reconfigure related setting. */
+ pwdinfo->noa_index = noa_index;
+ pwdinfo->opp_ps = noa_attr[1] >> 7;
+ pwdinfo->ctwindow = noa_attr[1] & 0x7F;
+
+ noa_offset = 2;
+ noa_num = 0;
+ /* NoA length should be n*(13) + 2 */
+ if (attr_contentlen > 2) {
+ while (noa_offset < attr_contentlen) {
+ /* memcpy(&wifidirect_info->noa_count[noa_num], &noa_attr[noa_offset], 1); */
+ pwdinfo->noa_count[noa_num] = noa_attr[noa_offset];
+ noa_offset += 1;
+
+ memcpy(&pwdinfo->noa_duration[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_interval[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ memcpy(&pwdinfo->noa_start_time[noa_num], &noa_attr[noa_offset], 4);
+ noa_offset += 4;
+
+ noa_num++;
+ }
+ }
+ pwdinfo->noa_num = noa_num;
+
+ if (pwdinfo->opp_ps == 1) {
+ pwdinfo->p2p_ps_mode = P2P_PS_CTWINDOW;
+ /* driver should wait LPS for entering CTWindow */
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode)
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
+ } else if (pwdinfo->noa_num > 0) {
+ pwdinfo->p2p_ps_mode = P2P_PS_NOA;
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 1);
+ } else if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+ }
+ }
+
+ break; /* find target, just break. */
+ }
+
+ /* Get the next P2P IE */
+ p2p_ie = rtw_get_p2p_ie(p2p_ie+p2p_ielen, ies_len - (p2p_ie - ies + p2p_ielen), NULL, &p2p_ielen);
+ }
+
+ if (find_p2p) {
+ if ((pwdinfo->p2p_ps_mode > P2P_PS_NONE) && !find_p2p_ps)
+ p2p_ps_wk_cmd(padapter, P2P_PS_DISABLE, 1);
+ }
+
+_func_exit_;
+}
+
+void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+_func_enter_;
+
+ /* Pre action for p2p state */
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+
+ pwdinfo->noa_index = 0;
+ pwdinfo->ctwindow = 0;
+ pwdinfo->opp_ps = 0;
+ pwdinfo->noa_num = 0;
+ pwdinfo->p2p_ps_mode = P2P_PS_NONE;
+ if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
+ if (pwrpriv->smart_ps == 0) {
+ pwrpriv->smart_ps = 2;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&(padapter->pwrctrlpriv.pwr_mode)));
+ }
+ }
+ break;
+ case P2P_PS_ENABLE:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+
+ if (pwdinfo->ctwindow > 0) {
+ if (pwrpriv->smart_ps != 0) {
+ pwrpriv->smart_ps = 0;
+ DBG_88E("%s(): Enter CTW, change SmartPS\n", __func__);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&(padapter->pwrctrlpriv.pwr_mode)));
+ }
+ }
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ case P2P_PS_SCAN:
+ case P2P_PS_SCAN_DONE:
+ case P2P_PS_ALLSTASLEEP:
+ if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
+ pwdinfo->p2p_ps_state = p2p_ps_state;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ }
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
+{
+ struct cmd_obj *ph2c;
+ struct drvextra_cmd_parm *pdrvextra_cmd_parm;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+_func_enter_;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return res;
+
+ if (enqueue) {
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
+ if (pdrvextra_cmd_parm == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ pdrvextra_cmd_parm->ec_id = P2P_PS_WK_CID;
+ pdrvextra_cmd_parm->type_size = p2p_ps_state;
+ pdrvextra_cmd_parm->pbuf = NULL;
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+ } else {
+ p2p_ps_wk_hdl(padapter, p2p_ps_state);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void reset_ch_sitesurvey_timer_process (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+}
+
+static void reset_ch_sitesurvey_timer_process2 (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ DBG_88E("[%s] In\n", __func__);
+ /* Reset the operation channel information */
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+static void restore_p2p_state_timer_process (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ p2p_protocol_wk_cmd(adapter, P2P_RESTORE_STATE_WK);
+}
+
+static void pre_tx_scan_timer_process(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+ unsigned long irqL;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
+ if (pwdinfo->tx_prov_disc_info.benable) { /* the provision discovery request frame is trigger to send or not */
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_PROVDISC_PROCESS_WK);
+ /* issue_probereq_p2p(adapter, NULL); */
+ /* _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT); */
+ }
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_ING)) {
+ if (pwdinfo->nego_req_info.benable)
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_NEGOREQ_PROCESS_WK);
+ } else if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_INVITE_REQ)) {
+ if (pwdinfo->invitereq_info.benable)
+ p2p_protocol_wk_cmd(adapter, P2P_PRE_TX_INVITEREQ_PROCESS_WK);
+ } else {
+ DBG_88E("[%s] p2p_state is %d, ignore!!\n", __func__, rtw_p2p_state(pwdinfo));
+ }
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+}
+
+static void find_phase_timer_process(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct wifidirect_info *pwdinfo = &adapter->wdinfo;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+
+ adapter->wdinfo.find_phase_state_exchange_cnt++;
+
+ p2p_protocol_wk_cmd(adapter, P2P_FIND_PHASE_WK);
+}
+
+void reset_global_wifidirect_info(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo;
+
+ pwdinfo = &padapter->wdinfo;
+ pwdinfo->persistent_supported = 0;
+ pwdinfo->session_available = true;
+ pwdinfo->wfd_tdls_enable = 0;
+ pwdinfo->wfd_tdls_weaksec = 0;
+}
+
+void rtw_init_wifidirect_timers(struct adapter *padapter)
+{
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ _init_timer(&pwdinfo->find_phase_timer, padapter->pnetdev, find_phase_timer_process, padapter);
+ _init_timer(&pwdinfo->restore_p2p_state_timer, padapter->pnetdev, restore_p2p_state_timer_process, padapter);
+ _init_timer(&pwdinfo->pre_tx_scan_timer, padapter->pnetdev, pre_tx_scan_timer_process, padapter);
+ _init_timer(&pwdinfo->reset_ch_sitesurvey, padapter->pnetdev, reset_ch_sitesurvey_timer_process, padapter);
+ _init_timer(&pwdinfo->reset_ch_sitesurvey2, padapter->pnetdev, reset_ch_sitesurvey_timer_process2, padapter);
+}
+
+void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr, u8 *iface_addr)
+{
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ /*init device&interface address */
+ if (dev_addr)
+ memcpy(pwdinfo->device_addr, dev_addr, ETH_ALEN);
+ if (iface_addr)
+ memcpy(pwdinfo->interface_addr, iface_addr, ETH_ALEN);
+#endif
+}
+
+void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
+{
+ struct wifidirect_info *pwdinfo;
+
+ pwdinfo = &padapter->wdinfo;
+ pwdinfo->padapter = padapter;
+
+ /* 1, 6, 11 are the social channel defined in the WiFi Direct specification. */
+ pwdinfo->social_chan[0] = 1;
+ pwdinfo->social_chan[1] = 6;
+ pwdinfo->social_chan[2] = 11;
+ pwdinfo->social_chan[3] = 0; /* channel 0 for scanning ending in site survey function. */
+
+ /* Use the channel 11 as the listen channel */
+ pwdinfo->listen_channel = 11;
+
+ if (role == P2P_ROLE_DEVICE) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DEVICE);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_LISTEN);
+ } else if (role == P2P_ROLE_CLIENT) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_CLIENT);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 1;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ } else if (role == P2P_ROLE_GO) {
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_GO);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ pwdinfo->intent = 15;
+ rtw_p2p_set_pre_state(pwdinfo, P2P_STATE_GONEGO_OK);
+ }
+
+/* Use the OFDM rate in the P2P probe response frame. (6(B), 9(B), 12, 18, 24, 36, 48, 54) */
+ pwdinfo->support_rate[0] = 0x8c; /* 6(B) */
+ pwdinfo->support_rate[1] = 0x92; /* 9(B) */
+ pwdinfo->support_rate[2] = 0x18; /* 12 */
+ pwdinfo->support_rate[3] = 0x24; /* 18 */
+ pwdinfo->support_rate[4] = 0x30; /* 24 */
+ pwdinfo->support_rate[5] = 0x48; /* 36 */
+ pwdinfo->support_rate[6] = 0x60; /* 48 */
+ pwdinfo->support_rate[7] = 0x6c; /* 54 */
+
+ memcpy(pwdinfo->p2p_wildcard_ssid, "DIRECT-", 7);
+
+ _rtw_memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
+ pwdinfo->device_name_len = 0;
+
+ _rtw_memset(&pwdinfo->invitereq_info, 0x00, sizeof(struct tx_invite_req_info));
+ pwdinfo->invitereq_info.token = 3; /* Token used for P2P invitation request frame. */
+
+ _rtw_memset(&pwdinfo->inviteresp_info, 0x00, sizeof(struct tx_invite_resp_info));
+ pwdinfo->inviteresp_info.token = 0;
+
+ pwdinfo->profileindex = 0;
+ _rtw_memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
+
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
+
+ pwdinfo->listen_dwell = (u8) ((rtw_get_current_time() % 3) + 1);
+
+ _rtw_memset(&pwdinfo->tx_prov_disc_info, 0x00, sizeof(struct tx_provdisc_req_info));
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_NONE;
+
+ _rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
+
+ pwdinfo->device_password_id_for_nego = WPS_DPID_PBC;
+ pwdinfo->negotiation_dialog_token = 1;
+
+ _rtw_memset(pwdinfo->nego_ssid, 0x00, WLAN_SSID_MAXLEN);
+ pwdinfo->nego_ssidlen = 0;
+
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ pwdinfo->supported_wps_cm = WPS_CONFIG_METHOD_DISPLAY | WPS_CONFIG_METHOD_PBC | WPS_CONFIG_METHOD_KEYPAD;
+ pwdinfo->channel_list_attr_len = 0;
+ _rtw_memset(pwdinfo->channel_list_attr, 0x00, 100);
+
+ _rtw_memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, 0x00, 4);
+ _rtw_memset(pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req, '0', 3);
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+ pwdinfo->wfd_tdls_enable = 0;
+ _rtw_memset(pwdinfo->p2p_peer_interface_addr, 0x00, ETH_ALEN);
+ _rtw_memset(pwdinfo->p2p_peer_device_addr, 0x00, ETH_ALEN);
+
+ pwdinfo->rx_invitereq_info.operation_ch[0] = 0;
+ pwdinfo->rx_invitereq_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->rx_invitereq_info.scan_op_ch_only = 0;
+ pwdinfo->p2p_info.operation_ch[0] = 0;
+ pwdinfo->p2p_info.operation_ch[1] = 0; /* Used to indicate the scan end in site survey function */
+ pwdinfo->p2p_info.scan_op_ch_only = 0;
+}
+
+int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
+{
+ int ret = _SUCCESS;
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if (role == P2P_ROLE_DEVICE || role == P2P_ROLE_CLIENT || role == P2P_ROLE_GO) {
+ /* leave IPS/Autosuspend */
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ update_tx_basic_rate(padapter, WIRELESS_11AGN);
+
+ /* Enable P2P function */
+ init_wifidirect_info(padapter, role);
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, true);
+ } else if (role == P2P_ROLE_DISABLE) {
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* Disable P2P function */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ _cancel_timer_ex(&pwdinfo->find_phase_timer);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
+ _cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey2);
+ reset_ch_sitesurvey_timer_process(padapter);
+ reset_ch_sitesurvey_timer_process2(padapter);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ rtw_p2p_set_role(pwdinfo, P2P_ROLE_DISABLE);
+ _rtw_memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
+ }
+
+ rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, false);
+
+ /* Restore to initial setting. */
+ update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
+ }
+
+exit:
+ return ret;
+}
+
+#else
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
+{
+ return _FAIL;
+}
+
+void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
+{
+}
+
+#endif /* CONFIG_88EU_P2P */
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
new file mode 100644
index 00000000000..58a1661f5a8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -0,0 +1,662 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_PWRCTRL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <linux/usb.h>
+
+void ips_enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct xmit_priv *pxmit_priv = &padapter->xmitpriv;
+
+ if (padapter->registrypriv.mp_mode == 1)
+ return;
+
+ if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
+ pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
+ DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
+ DBG_88E_LEVEL(_drv_info_, "free_xmitbuf_cnt: %d, free_xmit_extbuf_cnt: %d\n",
+ pxmit_priv->free_xmitbuf_cnt, pxmit_priv->free_xmit_extbuf_cnt);
+ return;
+ }
+
+ _enter_pwrlock(&pwrpriv->lock);
+
+ pwrpriv->bips_processing = true;
+
+ /* syn ips_mode with request */
+ pwrpriv->ips_mode = pwrpriv->ips_mode_req;
+
+ pwrpriv->ips_enter_cnts++;
+ DBG_88E("==>ips_enter cnts:%d\n", pwrpriv->ips_enter_cnts);
+ if (rf_off == pwrpriv->change_rfpwrstate) {
+ pwrpriv->bpower_saving = true;
+ DBG_88E_LEVEL(_drv_info_, "nolinked power save enter\n");
+
+ if (pwrpriv->ips_mode == IPS_LEVEL_2)
+ pwrpriv->bkeepfwalive = true;
+
+ rtw_ips_pwr_down(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ }
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+}
+
+int ips_leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int result = _SUCCESS;
+ int keyid;
+
+
+ _enter_pwrlock(&pwrpriv->lock);
+
+ if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
+ pwrpriv->bips_processing = true;
+ pwrpriv->change_rfpwrstate = rf_on;
+ pwrpriv->ips_leave_cnts++;
+ DBG_88E("==>ips_leave cnts:%d\n", pwrpriv->ips_leave_cnts);
+
+ result = rtw_ips_pwr_up(padapter);
+ if (result == _SUCCESS) {
+ pwrpriv->rf_pwrstate = rf_on;
+ }
+ DBG_88E_LEVEL(_drv_info_, "nolinked power save leave\n");
+
+ if ((_WEP40_ == psecuritypriv->dot11PrivacyAlgrthm) || (_WEP104_ == psecuritypriv->dot11PrivacyAlgrthm)) {
+ DBG_88E("==>%s, channel(%d), processing(%x)\n", __func__, padapter->mlmeextpriv.cur_channel, pwrpriv->bips_processing);
+ set_channel_bwmode(padapter, padapter->mlmeextpriv.cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ for (keyid = 0; keyid < 4; keyid++) {
+ if (pmlmepriv->key_mask & BIT(keyid)) {
+ if (keyid == psecuritypriv->dot11PrivacyKeyIndex)
+ result = rtw_set_key(padapter, psecuritypriv, keyid, 1);
+ else
+ result = rtw_set_key(padapter, psecuritypriv, keyid, 0);
+ }
+ }
+ }
+
+ DBG_88E("==> ips_leave.....LED(0x%08x)...\n", rtw_read32(padapter, 0x4c));
+ pwrpriv->bips_processing = false;
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->bpower_saving = false;
+ }
+
+ _exit_pwrlock(&pwrpriv->lock);
+
+ return result;
+}
+
+static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
+{
+ struct adapter *buddy = adapter->pbuddy_adapter;
+ struct mlme_priv *pmlmepriv = &(adapter->mlmepriv);
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(adapter->wdinfo);
+#endif
+
+ bool ret = false;
+
+ if (adapter->pwrctrlpriv.ips_deny_time >= rtw_get_current_time())
+ goto exit;
+
+ if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
+ check_fwstate(pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
+ check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE) ||
+#if defined(CONFIG_88EU_P2P)
+ !rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+#else
+ 0)
+#endif
+ goto exit;
+
+ /* consider buddy, if exist */
+ if (buddy) {
+ struct mlme_priv *b_pmlmepriv = &(buddy->mlmepriv);
+ #ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *b_pwdinfo = &(buddy->wdinfo);
+ #endif
+
+ if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE|WIFI_SITE_MONITOR) ||
+ check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING|WIFI_UNDER_WPS) ||
+ check_fwstate(b_pmlmepriv, WIFI_AP_STATE) ||
+ check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE|WIFI_ADHOC_STATE) ||
+#if defined(CONFIG_88EU_P2P)
+ !rtw_p2p_chk_state(b_pwdinfo, P2P_STATE_NONE))
+#else
+ 0)
+#endif
+ goto exit;
+ }
+ ret = true;
+
+exit:
+ return ret;
+}
+
+void rtw_ps_processor(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ enum rt_rf_power_state rfpwrstate;
+
+ pwrpriv->ps_processing = true;
+
+ if (pwrpriv->bips_processing)
+ goto exit;
+
+ if (padapter->pwrctrlpriv.bHWPwrPindetect) {
+ rfpwrstate = RfOnOffDetect(padapter);
+ DBG_88E("@@@@- #2 %s==> rfstate:%s\n", __func__, (rfpwrstate == rf_on) ? "rf_on" : "rf_off");
+
+ if (rfpwrstate != pwrpriv->rf_pwrstate) {
+ if (rfpwrstate == rf_off) {
+ pwrpriv->change_rfpwrstate = rf_off;
+ pwrpriv->brfoffbyhw = true;
+ padapter->bCardDisableWOHSM = true;
+ rtw_hw_suspend(padapter);
+ } else {
+ pwrpriv->change_rfpwrstate = rf_on;
+ rtw_hw_resume(padapter);
+ }
+ DBG_88E("current rf_pwrstate(%s)\n", (pwrpriv->rf_pwrstate == rf_off) ? "rf_off" : "rf_on");
+ }
+ pwrpriv->pwr_state_check_cnts++;
+ }
+
+ if (pwrpriv->ips_mode_req == IPS_NONE)
+ goto exit;
+
+ if (rtw_pwr_unassociated_idle(padapter) == false)
+ goto exit;
+
+ if ((pwrpriv->rf_pwrstate == rf_on) && ((pwrpriv->pwr_state_check_cnts%4) == 0)) {
+ DBG_88E("==>%s .fw_state(%x)\n", __func__, get_fwstate(pmlmepriv));
+ pwrpriv->change_rfpwrstate = rf_off;
+
+ ips_enter(padapter);
+ }
+exit:
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ pwrpriv->ps_processing = false;
+ return;
+}
+
+static void pwr_state_check_handler(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+ rtw_ps_cmd(padapter);
+}
+
+/*
+ *
+ * Parameters
+ * padapter
+ * pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
+ *
+ */
+void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
+{
+ u8 rpwm;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ pslv = PS_STATE(pslv);
+
+
+ if (pwrpriv->btcoex_rfon) {
+ if (pslv < PS_STATE_S4)
+ pslv = PS_STATE_S3;
+ }
+
+ if ((pwrpriv->rpwm == pslv)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Already set rpwm[0x%02X], new=0x%02X!\n", __func__, pwrpriv->rpwm, pslv));
+ return;
+ }
+
+ if ((padapter->bSurpriseRemoved) ||
+ (!padapter->hw_init_completed)) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: SurpriseRemoved(%d) hw_init_completed(%d)\n",
+ __func__, padapter->bSurpriseRemoved, padapter->hw_init_completed));
+
+ pwrpriv->cpwm = PS_STATE_S4;
+
+ return;
+ }
+
+ if (padapter->bDriverStopped) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: change power state(0x%02X) when DriverStopped\n", __func__, pslv));
+
+ if (pslv < PS_STATE_S2) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_,
+ ("%s: Reject to enter PS_STATE(0x%02X) lower than S2 when DriverStopped!!\n", __func__, pslv));
+ return;
+ }
+ }
+
+ rpwm = pslv | pwrpriv->tog;
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("rtw_set_rpwm: rpwm=0x%02x cpwm=0x%02x\n", rpwm, pwrpriv->cpwm));
+
+ pwrpriv->rpwm = pslv;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_SET_RPWM, (u8 *)(&rpwm));
+
+ pwrpriv->tog += 0x80;
+ pwrpriv->cpwm = pslv;
+
+_func_exit_;
+}
+
+static u8 PS_RDY_CHECK(struct adapter *padapter)
+{
+ u32 curr_time, delta_time;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+
+ curr_time = rtw_get_current_time();
+ delta_time = curr_time - pwrpriv->DelayLPSLastTimeStamp;
+
+ if (delta_time < LPS_DELAY_TIME)
+ return false;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) ||
+ (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) ||
+ (check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ return false;
+ if (pwrpriv->bInSuspend)
+ return false;
+ if ((padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) && (padapter->securitypriv.binstallGrpkey == false)) {
+ DBG_88E("Group handshake still in progress !!!\n");
+ return false;
+ }
+ return true;
+}
+
+void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_notice_,
+ ("%s: PowerMode=%d Smart_PS=%d\n",
+ __func__, ps_mode, smart_ps));
+
+ if (ps_mode > PM_Card_Disable) {
+ RT_TRACE(_module_rtl871x_pwrctrl_c_, _drv_err_, ("ps_mode:%d error\n", ps_mode));
+ return;
+ }
+
+ if (pwrpriv->pwr_mode == ps_mode) {
+ if (PS_MODE_ACTIVE == ps_mode)
+ return;
+
+ if ((pwrpriv->smart_ps == smart_ps) &&
+ (pwrpriv->bcn_ant_mode == bcn_ant_mode))
+ return;
+ }
+
+ /* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
+ if (ps_mode == PS_MODE_ACTIVE) {
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->opp_ps == 0) {
+ DBG_88E("rtw_set_ps_mode: Leave 802.11 power save\n");
+ pwrpriv->pwr_mode = ps_mode;
+ rtw_set_rpwm(padapter, PS_STATE_S4);
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ pwrpriv->bFwCurrentInPSMode = false;
+ }
+ } else {
+#endif /* CONFIG_88EU_P2P */
+ if (PS_RDY_CHECK(padapter)) {
+ DBG_88E("%s: Enter 802.11 power save\n", __func__);
+ pwrpriv->bFwCurrentInPSMode = true;
+ pwrpriv->pwr_mode = ps_mode;
+ pwrpriv->smart_ps = smart_ps;
+ pwrpriv->bcn_ant_mode = bcn_ant_mode;
+ rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+
+#ifdef CONFIG_88EU_P2P
+ /* Set CTWindow after LPS */
+ if (pwdinfo->opp_ps == 1)
+ p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
+#endif /* CONFIG_88EU_P2P */
+
+ rtw_set_rpwm(padapter, PS_STATE_S2);
+ }
+ }
+
+_func_exit_;
+}
+
+/*
+ * Return:
+ * 0: Leave OK
+ * -1: Timeout
+ * -2: Other error
+ */
+s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
+{
+ u32 start_time;
+ u8 bAwake = false;
+ s32 err = 0;
+
+
+ start_time = rtw_get_current_time();
+ while (1) {
+ rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
+ if (bAwake)
+ break;
+
+ if (padapter->bSurpriseRemoved) {
+ err = -2;
+ DBG_88E("%s: device surprise removed!!\n", __func__);
+ break;
+ }
+
+ if (rtw_get_passing_time_ms(start_time) > delay_ms) {
+ err = -1;
+ DBG_88E("%s: Wait for FW LPS leave more than %u ms!!!\n", __func__, delay_ms);
+ break;
+ }
+ rtw_usleep_os(100);
+ }
+
+ return err;
+}
+
+/* */
+/* Description: */
+/* Enter the leisure power save mode. */
+/* */
+void LPS_Enter(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ if (PS_RDY_CHECK(padapter) == false)
+ return;
+
+ if (pwrpriv->bLeisurePs) {
+ /* Idle for a while if we connect to AP a while ago. */
+ if (pwrpriv->LpsIdleCount >= 2) { /* 4 Sec */
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) {
+ pwrpriv->bpower_saving = true;
+ DBG_88E("%s smart_ps:%d\n", __func__, pwrpriv->smart_ps);
+ /* For Tenda W311R IOT issue */
+ rtw_set_ps_mode(padapter, pwrpriv->power_mgnt, pwrpriv->smart_ps, 0);
+ }
+ } else {
+ pwrpriv->LpsIdleCount++;
+ }
+ }
+
+_func_exit_;
+}
+
+#define LPS_LEAVE_TIMEOUT_MS 100
+
+/* Description: */
+/* Leave the leisure power save mode. */
+void LPS_Leave(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ if (pwrpriv->bLeisurePs) {
+ if (pwrpriv->pwr_mode != PS_MODE_ACTIVE) {
+ rtw_set_ps_mode(padapter, PS_MODE_ACTIVE, 0, 0);
+
+ if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
+ LPS_RF_ON_check(padapter, LPS_LEAVE_TIMEOUT_MS);
+ }
+ }
+
+ pwrpriv->bpower_saving = false;
+
+_func_exit_;
+}
+
+/* */
+/* Description: Leave all power save mode: LPS, FwLPS, IPS if needed. */
+/* Move code to function by tynli. 2010.03.26. */
+/* */
+void LeaveAllPowerSaveMode(struct adapter *Adapter)
+{
+ struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+ u8 enqueue = 0;
+
+_func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */
+ p2p_ps_wk_cmd(Adapter, P2P_PS_DISABLE, enqueue);
+
+ rtw_lps_ctrl_wk_cmd(Adapter, LPS_CTRL_LEAVE, enqueue);
+ }
+
+_func_exit_;
+}
+
+void rtw_init_pwrctrl_priv(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+_func_enter_;
+
+ _init_pwrlock(&pwrctrlpriv->lock);
+ pwrctrlpriv->rf_pwrstate = rf_on;
+ pwrctrlpriv->ips_enter_cnts = 0;
+ pwrctrlpriv->ips_leave_cnts = 0;
+ pwrctrlpriv->bips_processing = false;
+
+ pwrctrlpriv->ips_mode = padapter->registrypriv.ips_mode;
+ pwrctrlpriv->ips_mode_req = padapter->registrypriv.ips_mode;
+
+ pwrctrlpriv->pwr_state_check_interval = RTW_PWR_STATE_CHK_INTERVAL;
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+ pwrctrlpriv->bInternalAutoSuspend = false;
+ pwrctrlpriv->bInSuspend = false;
+ pwrctrlpriv->bkeepfwalive = false;
+
+ pwrctrlpriv->LpsIdleCount = 0;
+ if (padapter->registrypriv.mp_mode == 1)
+ pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE ;
+ else
+ pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+
+ pwrctrlpriv->bFwCurrentInPSMode = false;
+
+ pwrctrlpriv->rpwm = 0;
+ pwrctrlpriv->cpwm = PS_STATE_S4;
+
+ pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
+ pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
+ pwrctrlpriv->bcn_ant_mode = 0;
+
+ pwrctrlpriv->tog = 0x80;
+
+ pwrctrlpriv->btcoex_rfon = false;
+
+ _init_timer(&(pwrctrlpriv->pwr_state_check_timer), padapter->pnetdev, pwr_state_check_handler, (u8 *)padapter);
+
+_func_exit_;
+}
+
+void rtw_free_pwrctrl_priv(struct adapter *adapter)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
+
+_func_enter_;
+
+ _free_pwrlock(&pwrctrlpriv->lock);
+
+_func_exit_;
+}
+
+u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ u8 bResult = true;
+ rtw_hal_intf_ps_func(padapter, efunc_id, val);
+
+ return bResult;
+}
+
+
+inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ms);
+}
+
+/*
+* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
+* @adapter: pointer to struct adapter structure
+* @ips_deffer_ms: the ms wiil prevent from falling into IPS after wakeup
+* Return _SUCCESS or _FAIL
+*/
+
+int _rtw_pwr_wakeup(struct adapter *padapter, u32 ips_deffer_ms, const char *caller)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int ret = _SUCCESS;
+
+ if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+
+{
+ u32 start = rtw_get_current_time();
+ if (pwrpriv->ps_processing) {
+ DBG_88E("%s wait ps_processing...\n", __func__);
+ while (pwrpriv->ps_processing && rtw_get_passing_time_ms(start) <= 3000)
+ rtw_msleep_os(10);
+ if (pwrpriv->ps_processing)
+ DBG_88E("%s wait ps_processing timeout\n", __func__);
+ else
+ DBG_88E("%s wait ps_processing done\n", __func__);
+ }
+}
+
+ /* System suspend is not allowed to wakeup */
+ if ((!pwrpriv->bInternalAutoSuspend) && (pwrpriv->bInSuspend)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* block??? */
+ if ((pwrpriv->bInternalAutoSuspend) && (padapter->net_closed)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* I think this should be check in IPS, LPS, autosuspend functions... */
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ ret = _SUCCESS;
+ goto exit;
+ }
+ if (rf_off == pwrpriv->rf_pwrstate) {
+ DBG_88E("%s call ips_leave....\n", __func__);
+ if (_FAIL == ips_leave(padapter)) {
+ DBG_88E("======> ips_leave fail.............\n");
+ ret = _FAIL;
+ goto exit;
+ }
+ }
+
+ /* TODO: the following checking need to be merged... */
+ if (padapter->bDriverStopped || !padapter->bup ||
+ !padapter->hw_init_completed) {
+ DBG_88E("%s: bDriverStopped=%d, bup=%d, hw_init_completed =%u\n"
+ , caller
+ , padapter->bDriverStopped
+ , padapter->bup
+ , padapter->hw_init_completed);
+ ret = false;
+ goto exit;
+ }
+
+exit:
+ if (pwrpriv->ips_deny_time < rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms))
+ pwrpriv->ips_deny_time = rtw_get_current_time() + rtw_ms_to_systime(ips_deffer_ms);
+ return ret;
+}
+
+int rtw_pm_set_lps(struct adapter *padapter, u8 mode)
+{
+ int ret = 0;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode < PS_MODE_NUM) {
+ if (pwrctrlpriv->power_mgnt != mode) {
+ if (PS_MODE_ACTIVE == mode)
+ LeaveAllPowerSaveMode(padapter);
+ else
+ pwrctrlpriv->LpsIdleCount = 2;
+ pwrctrlpriv->power_mgnt = mode;
+ pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
+ }
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int rtw_pm_set_ips(struct adapter *padapter, u8 mode)
+{
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ if (mode == IPS_NORMAL || mode == IPS_LEVEL_2) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_88E("%s %s\n", __func__, mode == IPS_NORMAL ? "IPS_NORMAL" : "IPS_LEVEL_2");
+ return 0;
+ } else if (mode == IPS_NONE) {
+ rtw_ips_mode_req(pwrctrlpriv, mode);
+ DBG_88E("%s %s\n", __func__, "IPS_NONE");
+ if ((padapter->bSurpriseRemoved == 0) && (_FAIL == rtw_pwr_wakeup(padapter)))
+ return -EFAULT;
+ } else {
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
new file mode 100644
index 00000000000..20116578736
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -0,0 +1,2299 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_RECV_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <ip.h>
+#include <if_ether.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+#include <wifi.h>
+
+static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
+static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
+
+/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
+static u8 rtw_bridge_tunnel_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8
+};
+
+static u8 rtw_rfc1042_header[] = {
+ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
+};
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS);
+
+void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
+{
+_func_enter_;
+
+ _rtw_memset((u8 *)psta_recvpriv, 0, sizeof (struct sta_recv_priv));
+
+ _rtw_spinlock_init(&psta_recvpriv->lock);
+
+ _rtw_init_queue(&psta_recvpriv->defrag_q);
+
+_func_exit_;
+}
+
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
+{
+ int i;
+
+ union recv_frame *precvframe;
+
+ int res = _SUCCESS;
+
+_func_enter_;
+ _rtw_spinlock_init(&precvpriv->lock);
+
+ _rtw_init_queue(&precvpriv->free_recv_queue);
+ _rtw_init_queue(&precvpriv->recv_pending_queue);
+ _rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
+
+ precvpriv->adapter = padapter;
+
+ precvpriv->free_recvframe_cnt = NR_RECVFRAME;
+
+ rtw_os_recv_resource_init(precvpriv, padapter);
+
+ precvpriv->pallocated_frame_buf = rtw_zvmalloc(NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+
+ if (precvpriv->pallocated_frame_buf == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ precvpriv->precv_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_frame_buf), RXFRAME_ALIGN_SZ);
+
+ precvframe = (union recv_frame *)precvpriv->precv_frame_buf;
+
+ for (i = 0; i < NR_RECVFRAME; i++) {
+ _rtw_init_listhead(&(precvframe->u.list));
+
+ rtw_list_insert_tail(&(precvframe->u.list), &(precvpriv->free_recv_queue.queue));
+
+ res = rtw_os_recv_resource_alloc(padapter, precvframe);
+
+ precvframe->u.hdr.len = 0;
+
+ precvframe->u.hdr.adapter = padapter;
+ precvframe++;
+ }
+ precvpriv->rx_pending_cnt = 1;
+
+ _rtw_init_sema(&precvpriv->allrxreturnevt, 0);
+
+ res = rtw_hal_init_recv_priv(padapter);
+
+ _init_timer(&precvpriv->signal_stat_timer, padapter->pnetdev, RTW_TIMER_HDL_NAME(signal_stat), padapter);
+
+ precvpriv->signal_stat_sampling_interval = 1000; /* ms */
+
+ rtw_set_signal_stat_timer(precvpriv);
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_recv_priv_lock(struct recv_priv *precvpriv)
+{
+ _rtw_spinlock_free(&precvpriv->lock);
+ _rtw_spinlock_free(&precvpriv->free_recv_queue.lock);
+ _rtw_spinlock_free(&precvpriv->recv_pending_queue.lock);
+
+ _rtw_spinlock_free(&precvpriv->free_recv_buf_queue.lock);
+}
+
+void _rtw_free_recv_priv (struct recv_priv *precvpriv)
+{
+ struct adapter *padapter = precvpriv->adapter;
+
+_func_enter_;
+
+ rtw_free_uc_swdec_pending_queue(padapter);
+
+ rtw_mfree_recv_priv_lock(precvpriv);
+
+ rtw_os_recv_resource_free(precvpriv);
+
+ if (precvpriv->pallocated_frame_buf) {
+ rtw_vmfree(precvpriv->pallocated_frame_buf, NR_RECVFRAME * sizeof(union recv_frame) + RXFRAME_ALIGN_SZ);
+ }
+
+ rtw_hal_free_recv_priv(padapter);
+
+_func_exit_;
+}
+
+union recv_frame *_rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+_func_enter_;
+
+ if (_rtw_queue_empty(pfree_recv_queue)) {
+ precvframe = NULL;
+ } else {
+ phead = get_list_head(pfree_recv_queue);
+
+ plist = get_next(phead);
+
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ rtw_list_delete(&precvframe->u.hdr.list);
+ padapter = precvframe->u.hdr.adapter;
+ if (padapter != NULL) {
+ precvpriv = &padapter->recvpriv;
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt--;
+ }
+ }
+
+_func_exit_;
+
+ return precvframe;
+}
+
+union recv_frame *rtw_alloc_recvframe (struct __queue *pfree_recv_queue)
+{
+ unsigned long irqL;
+ union recv_frame *precvframe;
+
+ _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ precvframe = _rtw_alloc_recvframe(pfree_recv_queue);
+
+ _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ return precvframe;
+}
+
+void rtw_init_recvframe(union recv_frame *precvframe, struct recv_priv *precvpriv)
+{
+ /* Perry: This can be removed */
+ _rtw_init_listhead(&precvframe->u.hdr.list);
+
+ precvframe->u.hdr.len = 0;
+}
+
+int rtw_free_recvframe(union recv_frame *precvframe, struct __queue *pfree_recv_queue)
+{
+ unsigned long irqL;
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+_func_enter_;
+
+ if (precvframe->u.hdr.pkt) {
+ dev_kfree_skb_any(precvframe->u.hdr.pkt);/* free skb by driver */
+ precvframe->u.hdr.pkt = NULL;
+ }
+
+ _enter_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+ rtw_list_delete(&(precvframe->u.hdr.list));
+
+ precvframe->u.hdr.len = 0;
+
+ rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(pfree_recv_queue));
+
+ if (padapter != NULL) {
+ if (pfree_recv_queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+ _exit_critical_bh(&pfree_recv_queue->lock, &irqL);
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+_func_enter_;
+
+ rtw_list_delete(&(precvframe->u.hdr.list));
+ rtw_list_insert_tail(&(precvframe->u.hdr.list), get_list_head(queue));
+
+ if (padapter != NULL) {
+ if (queue == &precvpriv->free_recv_queue)
+ precvpriv->free_recvframe_cnt++;
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue)
+{
+ int ret;
+ unsigned long irqL;
+
+ _enter_critical_bh(&queue->lock, &irqL);
+ ret = _rtw_enqueue_recvframe(precvframe, queue);
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ return ret;
+}
+
+/*
+caller : defrag ; recvframe_chk_defrag in recv_thread (passive)
+pframequeue: defrag_queue : will be accessed in recv_thread (passive)
+
+using spinlock to protect
+
+*/
+
+void rtw_free_recvframe_queue(struct __queue *pframequeue, struct __queue *pfree_recv_queue)
+{
+ union recv_frame *precvframe;
+ struct list_head *plist, *phead;
+
+_func_enter_;
+ spin_lock(&pframequeue->lock);
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ }
+
+ spin_unlock(&pframequeue->lock);
+
+_func_exit_;
+}
+
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
+{
+ u32 cnt = 0;
+ union recv_frame *pending_frame;
+ while ((pending_frame = rtw_alloc_recvframe(&adapter->recvpriv.uc_swdec_pending_queue))) {
+ rtw_free_recvframe(pending_frame, &adapter->recvpriv.free_recv_queue);
+ DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ unsigned long irqL;
+
+ _enter_critical_bh(&queue->lock, &irqL);
+
+ rtw_list_delete(&precvbuf->list);
+ rtw_list_insert_head(&precvbuf->list, get_list_head(queue));
+
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ return _SUCCESS;
+}
+
+int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
+{
+ unsigned long irqL;
+ _enter_critical_ex(&queue->lock, &irqL);
+
+ rtw_list_delete(&precvbuf->list);
+
+ rtw_list_insert_tail(&precvbuf->list, get_list_head(queue));
+ _exit_critical_ex(&queue->lock, &irqL);
+ return _SUCCESS;
+}
+
+struct recv_buf *rtw_dequeue_recvbuf (struct __queue *queue)
+{
+ unsigned long irqL;
+ struct recv_buf *precvbuf;
+ struct list_head *plist, *phead;
+
+ _enter_critical_ex(&queue->lock, &irqL);
+
+ if (_rtw_queue_empty(queue)) {
+ precvbuf = NULL;
+ } else {
+ phead = get_list_head(queue);
+
+ plist = get_next(phead);
+
+ precvbuf = LIST_CONTAINOR(plist, struct recv_buf, list);
+
+ rtw_list_delete(&precvbuf->list);
+ }
+
+ _exit_critical_ex(&queue->lock, &irqL);
+
+ return precvbuf;
+}
+
+static int recvframe_chkmic(struct adapter *adapter, union recv_frame *precvframe)
+{
+ int i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload, *pframemic;
+ u8 *mickey;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+_func_enter_;
+
+ stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
+
+ if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:prxattrib->encrypt==_TKIP_\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic:da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2], prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n recvframe_chkmic: bcmc key\n"));
+
+ if (!psecuritypriv) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n"));
+ DBG_88E("\n recvframe_chkmic:didn't install group key!!!!!!!!!!\n");
+ goto exit;
+ }
+ } else {
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n recvframe_chkmic: unicast key\n"));
+ }
+
+ datalen = precvframe->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len-prxattrib->icv_len-8;/* icv_len included the mic code */
+ pframe = precvframe->u.hdr.rx_data;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+ rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+ (unsigned char)prxattrib->priority); /* care the length of the data */
+
+ pframemic = payload+datalen;
+
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic+i)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvframe_chkmic:miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ i, miccode[i], i, *(pframemic+i)));
+ bmic_err = true;
+ }
+ }
+
+ if (bmic_err) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-8), *(pframemic-7), *(pframemic-6),
+ *(pframemic-5), *(pframemic-4), *(pframemic-3),
+ *(pframemic-2), *(pframemic-1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-16), *(pframemic-15), *(pframemic-14),
+ *(pframemic-13), *(pframemic-12), *(pframemic-11),
+ *(pframemic-10), *(pframemic-9)));
+ {
+ uint i;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ======demp packet (len=%d)======\n", precvframe->u.hdr.len));
+ for (i = 0; i < precvframe->u.hdr.len; i = i+8) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->u.hdr.rx_data+i), *(precvframe->u.hdr.rx_data+i+1),
+ *(precvframe->u.hdr.rx_data+i+2), *(precvframe->u.hdr.rx_data+i+3),
+ *(precvframe->u.hdr.rx_data+i+4), *(precvframe->u.hdr.rx_data+i+5),
+ *(precvframe->u.hdr.rx_data+i+6), *(precvframe->u.hdr.rx_data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n ====== demp packet end [len=%d]======\n", precvframe->u.hdr.len));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("\n hrdlen=%d,\n", prxattrib->hdrlen));
+ }
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing issue , */
+ /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+ if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+ rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ }
+ res = _FAIL;
+ } else {
+ /* mic checked ok */
+ if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+ }
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic: rtw_get_stainfo==NULL!!!\n"));
+ }
+
+ recvframe_pull_tail(precvframe, 8);
+ }
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+/* decrypt and set the ivlen, icvlen of the recv_frame */
+static union recv_frame *decryptor(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ struct rx_pkt_attrib *prxattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ union recv_frame *return_packet = precv_frame;
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("prxstat->decrypted=%x prxattrib->encrypt=0x%03x\n", prxattrib->bdecrypted, prxattrib->encrypt));
+
+ if (prxattrib->encrypt > 0) {
+ u8 *iv = precv_frame->u.hdr.rx_data+prxattrib->hdrlen;
+ prxattrib->key_index = (((iv[3])>>6)&0x3);
+
+ if (prxattrib->key_index > WEP_KEYS) {
+ DBG_88E("prxattrib->key_index(%d)>WEP_KEYS\n", prxattrib->key_index);
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ prxattrib->key_index = psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case _TKIP_:
+ case _AES_:
+ default:
+ prxattrib->key_index = psecuritypriv->dot118021XGrpKeyid;
+ break;
+ }
+ }
+ }
+
+ if ((prxattrib->encrypt > 0) && ((prxattrib->bdecrypted == 0) || (psecuritypriv->sw_decrypt))) {
+ psecuritypriv->hw_decrypted = false;
+
+ switch (prxattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _TKIP_:
+ res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ case _AES_:
+ res = rtw_aes_decrypt(padapter, (u8 *)precv_frame);
+ break;
+ default:
+ break;
+ }
+ } else if (prxattrib->bdecrypted == 1 && prxattrib->encrypt > 0 &&
+ (psecuritypriv->busetkipkey == 1 || prxattrib->encrypt != _TKIP_))
+ psecuritypriv->hw_decrypted = true;
+
+ if (res == _FAIL) {
+ rtw_free_recvframe(return_packet, &padapter->recvpriv.free_recv_queue);
+ return_packet = NULL;
+ }
+
+_func_exit_;
+
+ return return_packet;
+}
+
+/* set the security information in the recv_frame */
+static union recv_frame *portctrl(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ u8 *psta_addr = NULL, *ptr;
+ uint auth_alg;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ union recv_frame *prtnframe;
+ u16 ether_type = 0;
+ u16 eapol_type = 0x888e;/* for Funia BD's WPA issue */
+ struct rx_pkt_attrib *pattrib;
+ __be16 be_tmp;
+
+_func_enter_;
+
+ pstapriv = &adapter->stapriv;
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+
+ auth_alg = adapter->securitypriv.dot11AuthAlgrthm;
+
+ ptr = get_recvframe_data(precv_frame);
+ pfhdr = &precv_frame->u.hdr;
+ pattrib = &pfhdr->attrib;
+ psta_addr = pattrib->ta;
+
+ prtnframe = NULL;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:adapter->securitypriv.dot11AuthAlgrthm=%d\n", adapter->securitypriv.dot11AuthAlgrthm));
+
+ if (auth_alg == 2) {
+ if ((psta != NULL) && (psta->ieee8021x_blocked)) {
+ /* blocked */
+ /* only accept EAPOL frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==1\n"));
+
+ prtnframe = precv_frame;
+
+ /* get ether_type */
+ ptr = ptr+pfhdr->attrib.hdrlen+pfhdr->attrib.iv_len+LLC_HEADER_SIZE;
+ memcpy(&be_tmp, ptr, 2);
+ ether_type = ntohs(be_tmp);
+
+ if (ether_type == eapol_type) {
+ prtnframe = precv_frame;
+ } else {
+ /* free this frame */
+ rtw_free_recvframe(precv_frame, &adapter->recvpriv.free_recv_queue);
+ prtnframe = NULL;
+ }
+ } else {
+ /* allowed */
+ /* check decryption status, and decrypt the frame if needed */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:psta->ieee8021x_blocked==0\n"));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:precv_frame->hdr.attrib.privacy=%x\n", precv_frame->u.hdr.attrib.privacy));
+
+ if (pattrib->bdecrypted == 0)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("portctrl:prxstat->decrypted=%x\n", pattrib->bdecrypted));
+
+ prtnframe = precv_frame;
+ /* check is the EAPOL frame or not (Rekey) */
+ if (ether_type == eapol_type) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("########portctrl:ether_type==0x888e\n"));
+ /* check Rekey */
+
+ prtnframe = precv_frame;
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("########portctrl:ether_type=0x%04x\n", ether_type));
+ }
+ }
+ } else {
+ prtnframe = precv_frame;
+ }
+
+_func_exit_;
+
+ return prtnframe;
+}
+
+static int recv_decache(union recv_frame *precv_frame, u8 bretry, struct stainfo_rxcache *prxcache)
+{
+ int tid = precv_frame->u.hdr.attrib.priority;
+
+ u16 seq_ctrl = ((precv_frame->u.hdr.attrib.seq_num&0xffff) << 4) |
+ (precv_frame->u.hdr.attrib.frag_num & 0xf);
+
+_func_enter_;
+
+ if (tid > 15) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, (tid>15)! seq_ctrl=0x%x, tid=0x%x\n", seq_ctrl, tid));
+
+ return _FAIL;
+ }
+
+ if (1) {/* if (bretry) */
+ if (seq_ctrl == prxcache->tid_rxseq[tid]) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_decache, seq_ctrl=0x%x, tid=0x%x, tid_rxseq=0x%x\n", seq_ctrl, tid, prxcache->tid_rxseq[tid]));
+
+ return _FAIL;
+ }
+ }
+
+ prxcache->tid_rxseq[tid] = seq_ctrl;
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame);
+void process_pwrbit_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned char pwrbit;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ pwrbit = GetPwrMgt(ptr);
+
+ if (psta) {
+ if (pwrbit) {
+ if (!(psta->state & WIFI_SLEEP_STATE))
+ stop_sta_xmit(padapter, psta);
+ } else {
+ if (psta->state & WIFI_SLEEP_STATE)
+ wakeup_sta_to_xmit(padapter, psta);
+ }
+ }
+
+#endif
+}
+
+static void process_wmmps_data(struct adapter *padapter, union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *psta = NULL;
+
+ psta = rtw_get_stainfo(pstapriv, pattrib->src);
+
+ if (!psta)
+ return;
+
+ if (!psta->qos_option)
+ return;
+
+ if (!(psta->qos_info&0xf))
+ return;
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (wmmps_ac) {
+ if (psta->sleepq_ac_len > 0) {
+ /* process received triggered frame */
+ xmit_delivery_enabled_frames(padapter, psta);
+ } else {
+ /* issue one qos null frame with More data bit = 0 and the EOSP bit set (= 1) */
+ issue_qos_nulldata(padapter, psta->hwaddr, (u16)pattrib->priority, 0, 0);
+ }
+ }
+ }
+
+#endif
+}
+
+static void count_rx_stats(struct adapter *padapter, union recv_frame *prframe, struct sta_info *sta)
+{
+ int sz;
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ sz = get_recvframe_len(prframe);
+ precvpriv->rx_bytes += sz;
+
+ padapter->mlmepriv.LinkDetectInfo.NumRxOkInPeriod++;
+
+ if ((!MacAddr_isBcst(pattrib->dst)) && (!IS_MCAST(pattrib->dst)))
+ padapter->mlmepriv.LinkDetectInfo.NumRxUnicastOkInPeriod++;
+
+ if (sta)
+ psta = sta;
+ else
+ psta = prframe->u.hdr.psta;
+
+ if (psta) {
+ pstats = &psta->sta_stats;
+
+ pstats->rx_data_pkts++;
+ pstats->rx_bytes += sz;
+ }
+}
+
+int sta2sta_data_frame(
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta
+);
+
+int sta2sta_data_frame(struct adapter *adapter, union recv_frame *precv_frame, struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ int ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ u8 *sta_addr = NULL;
+ int bmcast = IS_MCAST(pattrib->dst);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ !_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ /* For Station mode, sa and bssid should always be BSSID, and DA is my mac-address */
+ if (!_rtw_memcmp(pattrib->bssid, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("bssid!=TA under STATION_MODE; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+ sta_addr = pattrib->bssid;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (bmcast) {
+ /* For AP mode, if DA == MCAST, then BSSID should be also MCAST */
+ if (!IS_MCAST(pattrib->bssid)) {
+ ret = _FAIL;
+ goto exit;
+ }
+ } else { /* not mc-frame */
+ /* For AP mode, if DA is non-MCAST, then it must be BSSID, and bssid == BSSID */
+ if (!_rtw_memcmp(pattrib->bssid, pattrib->dst, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ sta_addr = pattrib->src;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ sta_addr = mybssid;
+ } else {
+ ret = _FAIL;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under sta2sta_data_frame ; drop pkt\n"));
+ if (adapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
+ adapter->mppriv.rx_pktloss++;
+ }
+ ret = _FAIL;
+ goto exit;
+ }
+
+exit:
+_func_exit_;
+ return ret;
+}
+
+static int ap2sta_data_frame (
+ struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ int ret = _SUCCESS;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *mybssid = get_bssid(pmlmepriv);
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ int bmcast = IS_MCAST(pattrib->dst);
+
+_func_enter_;
+
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true ||
+ check_fwstate(pmlmepriv, _FW_UNDER_LINKING))) {
+ /* filter packets that SA is myself or multicast or broadcast */
+ if (_rtw_memcmp(myhwaddr, pattrib->src, ETH_ALEN)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" SA==myself\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* da should be for me */
+ if ((!_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN)) && (!bmcast)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare DA fail; DA=%pM\n", (pattrib->dst)));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* check BSSID */
+ if (_rtw_memcmp(pattrib->bssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ _rtw_memcmp(mybssid, "\x0\x0\x0\x0\x0\x0", ETH_ALEN) ||
+ (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN))) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ (" ap2sta_data_frame: compare BSSID fail ; BSSID=%pM\n", (pattrib->bssid)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("mybssid=%pM\n", (mybssid)));
+
+ if (!bmcast) {
+ DBG_88E("issue_deauth to the nonassociated ap=%pM for the reason(7)\n", (pattrib->bssid));
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (bmcast)
+ *psta = rtw_get_bcmc_stainfo(adapter);
+ else
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get ap_info */
+
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("ap2sta: can't get psta under STATION_MODE ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) { */
+ /* */
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) &&
+ (check_fwstate(pmlmepriv, _FW_LINKED) == true)) {
+ memcpy(pattrib->dst, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->src, GetAddr2Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->bssid, GetAddr3Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+
+ /* */
+ memcpy(pattrib->bssid, mybssid, ETH_ALEN);
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under MP_MODE ; drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* Special case */
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ } else {
+ if (_rtw_memcmp(myhwaddr, pattrib->dst, ETH_ALEN) && (!bmcast)) {
+ *psta = rtw_get_stainfo(pstapriv, pattrib->bssid); /* get sta_info */
+ if (*psta == NULL) {
+ DBG_88E("issue_deauth to the ap =%pM for the reason(7)\n", (pattrib->bssid));
+
+ issue_deauth(adapter, pattrib->bssid, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ }
+ }
+
+ ret = _FAIL;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int sta2ap_data_frame(struct adapter *adapter,
+ union recv_frame *precv_frame,
+ struct sta_info **psta)
+{
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &adapter->stapriv;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ unsigned char *mybssid = get_bssid(pmlmepriv);
+ int ret = _SUCCESS;
+
+_func_enter_;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+ /* For AP mode, RA = BSSID, TX = STA(SRC_ADDR), A3 = DST_ADDR */
+ if (!_rtw_memcmp(pattrib->bssid, mybssid, ETH_ALEN)) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ *psta = rtw_get_stainfo(pstapriv, pattrib->src);
+ if (*psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("can't get psta under AP_MODE; drop pkt\n"));
+ DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
+
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+ process_pwrbit_data(adapter, precv_frame);
+
+ if ((GetFrameSubType(ptr) & WIFI_QOS_DATA_TYPE) == WIFI_QOS_DATA_TYPE) {
+ process_wmmps_data(adapter, precv_frame);
+ }
+
+ if (GetFrameSubType(ptr) & BIT(6)) {
+ /* No data, will not indicate to upper layer, temporily count it here */
+ count_rx_stats(adapter, precv_frame, *psta);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ } else {
+ u8 *myhwaddr = myid(&adapter->eeprompriv);
+ if (!_rtw_memcmp(pattrib->ra, myhwaddr, ETH_ALEN)) {
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+ DBG_88E("issue_deauth to sta=%pM for the reason(7)\n", (pattrib->src));
+ issue_deauth(adapter, pattrib->src, WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA);
+ ret = RTW_RX_HANDLED;
+ goto exit;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int validate_recv_ctrl_frame(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 *pframe = precv_frame->u.hdr.rx_data;
+ /* uint len = precv_frame->u.hdr.len; */
+
+ if (GetFrameType(pframe) != WIFI_CTRL_TYPE)
+ return _FAIL;
+
+ /* receive the frames that ra(a1) is my address */
+ if (!_rtw_memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN))
+ return _FAIL;
+
+ /* only handle ps-poll */
+ if (GetFrameSubType(pframe) == WIFI_PSPOLL) {
+ u16 aid;
+ u8 wmmps_ac = 0;
+ struct sta_info *psta = NULL;
+
+ aid = GetAid(pframe);
+ psta = rtw_get_stainfo(pstapriv, GetAddr2Ptr(pframe));
+
+ if ((psta == NULL) || (psta->aid != aid))
+ return _FAIL;
+
+ /* for rx pkt statistics */
+ psta->sta_stats.rx_ctrl_pkts++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ return _FAIL;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ DBG_88E("%s alive check-rx ps-poll\n", __func__);
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
+ unsigned long irqL;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ if ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == false) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta->sleepq_len--;
+
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta->sleep_q.lock, &irqL);
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ } else {
+ if (pstapriv->tim_bitmap&BIT(psta->aid)) {
+ if (psta->sleepq_len == 0) {
+ DBG_88E("no buffered packets to xmit\n");
+
+ /* issue nulldata with More data bit = 0 to indicate we have no buffered packets */
+ issue_nulldata(padapter, psta->hwaddr, 0, 0, 0);
+ } else {
+ DBG_88E("error!psta->sleepq_len=%d\n", psta->sleepq_len);
+ psta->sleepq_len = 0;
+ }
+
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ /* update_BCNTIM(padapter); */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irqL);
+ }
+ }
+
+#endif
+
+ return _FAIL;
+}
+
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame);
+
+static int validate_recv_mgnt_frame(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+ struct sta_info *psta;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("+validate_recv_mgnt_frame\n"));
+
+ precv_frame = recvframe_chk_defrag(padapter, precv_frame);
+ if (precv_frame == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("%s: fragment packet\n", __func__));
+ return _SUCCESS;
+ }
+
+ /* for rx pkt statistics */
+ psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(precv_frame->u.hdr.rx_data));
+ if (psta) {
+ psta->sta_stats.rx_mgnt_pkts++;
+ if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_BEACON) {
+ psta->sta_stats.rx_beacon_pkts++;
+ } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBEREQ) {
+ psta->sta_stats.rx_probereq_pkts++;
+ } else if (GetFrameSubType(precv_frame->u.hdr.rx_data) == WIFI_PROBERSP) {
+ if (_rtw_memcmp(padapter->eeprompriv.mac_addr, GetAddr1Ptr(precv_frame->u.hdr.rx_data), ETH_ALEN) == true)
+ psta->sta_stats.rx_probersp_pkts++;
+ else if (is_broadcast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)) ||
+ is_multicast_mac_addr(GetAddr1Ptr(precv_frame->u.hdr.rx_data)))
+ psta->sta_stats.rx_probersp_bm_pkts++;
+ else
+ psta->sta_stats.rx_probersp_uo_pkts++;
+ }
+ }
+
+ mgt_dispatcher(padapter, precv_frame);
+
+ return _SUCCESS;
+}
+
+static int validate_recv_data_frame(struct adapter *adapter,
+ union recv_frame *precv_frame)
+{
+ u8 bretry;
+ u8 *psa, *pda, *pbssid;
+ struct sta_info *psta = NULL;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+ int ret = _SUCCESS;
+
+_func_enter_;
+
+ bretry = GetRetry(ptr);
+ pda = get_da(ptr);
+ psa = get_sa(ptr);
+ pbssid = get_hdr_bssid(ptr);
+
+ if (pbssid == NULL) {
+ ret = _FAIL;
+ goto exit;
+ }
+
+ memcpy(pattrib->dst, pda, ETH_ALEN);
+ memcpy(pattrib->src, psa, ETH_ALEN);
+
+ memcpy(pattrib->bssid, pbssid, ETH_ALEN);
+
+ switch (pattrib->to_fr_ds) {
+ case 0:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 1:
+ memcpy(pattrib->ra, pda, ETH_ALEN);
+ memcpy(pattrib->ta, pbssid, ETH_ALEN);
+ ret = ap2sta_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 2:
+ memcpy(pattrib->ra, pbssid, ETH_ALEN);
+ memcpy(pattrib->ta, psa, ETH_ALEN);
+ ret = sta2ap_data_frame(adapter, precv_frame, &psta);
+ break;
+ case 3:
+ memcpy(pattrib->ra, GetAddr1Ptr(ptr), ETH_ALEN);
+ memcpy(pattrib->ta, GetAddr2Ptr(ptr), ETH_ALEN);
+ ret = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" case 3\n"));
+ break;
+ default:
+ ret = _FAIL;
+ break;
+ }
+
+ if (ret == _FAIL) {
+ goto exit;
+ } else if (ret == RTW_RX_HANDLED) {
+ goto exit;
+ }
+
+ if (psta == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" after to_fr_ds_chk; psta==NULL\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ /* psta->rssi = prxcmd->rssi; */
+ /* psta->signal_quality = prxcmd->sq; */
+ precv_frame->u.hdr.psta = psta;
+
+ pattrib->amsdu = 0;
+ pattrib->ack_policy = 0;
+ /* parsing QC field */
+ if (pattrib->qos == 1) {
+ pattrib->priority = GetPriority((ptr + 24));
+ pattrib->ack_policy = GetAckpolicy((ptr + 24));
+ pattrib->amsdu = GetAMsdu((ptr + 24));
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 32 : 26;
+
+ if (pattrib->priority != 0 && pattrib->priority != 3)
+ adapter->recvpriv.bIsAnyNonBEPkts = true;
+ } else {
+ pattrib->priority = 0;
+ pattrib->hdrlen = pattrib->to_fr_ds == 3 ? 30 : 24;
+ }
+
+ if (pattrib->order)/* HT-CTRL 11n */
+ pattrib->hdrlen += 4;
+
+ precv_frame->u.hdr.preorder_ctrl = &psta->recvreorder_ctrl[pattrib->priority];
+
+ /* decache, drop duplicate recv packets */
+ if (recv_decache(precv_frame, bretry, &psta->sta_recvpriv.rxcache) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decache : drop pkt\n"));
+ ret = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->privacy) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("validate_recv_data_frame:pattrib->privacy=%x\n", pattrib->privacy));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n ^^^^^^^^^^^IS_MCAST(pattrib->ra(0x%02x))=%d^^^^^^^^^^^^^^^6\n", pattrib->ra[0], IS_MCAST(pattrib->ra)));
+
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, IS_MCAST(pattrib->ra));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n pattrib->encrypt=%d\n", pattrib->encrypt));
+
+ SET_ICE_IV_LEN(pattrib->iv_len, pattrib->icv_len, pattrib->encrypt);
+ } else {
+ pattrib->encrypt = 0;
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ }
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+static int validate_recv_frame(struct adapter *adapter, union recv_frame *precv_frame)
+{
+ /* shall check frame subtype, to / from ds, da, bssid */
+
+ /* then call check if rx seq/frag. duplicated. */
+
+ u8 type;
+ u8 subtype;
+ int retval = _SUCCESS;
+ u8 bDumpRxPkt;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ u8 *ptr = precv_frame->u.hdr.rx_data;
+ u8 ver = (unsigned char) (*ptr)&0x3;
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+
+_func_enter_;
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ int ch_set_idx = rtw_ch_set_search_ch(pmlmeext->channel_set, rtw_get_oper_ch(adapter));
+ if (ch_set_idx >= 0)
+ pmlmeext->channel_set[ch_set_idx].rx_count++;
+ }
+
+ /* add version chk */
+ if (ver != 0) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! (ver!=0)\n"));
+ retval = _FAIL;
+ goto exit;
+ }
+
+ type = GetFrameType(ptr);
+ subtype = GetFrameSubType(ptr); /* bit(7)~bit(2) */
+
+ pattrib->to_fr_ds = get_tofr_ds(ptr);
+
+ pattrib->frag_num = GetFragNum(ptr);
+ pattrib->seq_num = GetSequence(ptr);
+
+ pattrib->pw_save = GetPwrMgt(ptr);
+ pattrib->mfrag = GetMFrag(ptr);
+ pattrib->mdata = GetMData(ptr);
+ pattrib->privacy = GetPrivacy(ptr);
+ pattrib->order = GetOrder(ptr);
+
+ /* Dump rx packets */
+ rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
+ if (bDumpRxPkt == 1) {/* dump all rx packets */
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ } else if (bDumpRxPkt == 2) {
+ if (type == WIFI_MGT_TYPE) {
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ }
+ } else if (bDumpRxPkt == 3) {
+ if (type == WIFI_DATA_TYPE) {
+ int i;
+ DBG_88E("#############################\n");
+
+ for (i = 0; i < 64; i = i+8)
+ DBG_88E("%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X:\n", *(ptr+i),
+ *(ptr+i+1), *(ptr+i+2), *(ptr+i+3), *(ptr+i+4), *(ptr+i+5), *(ptr+i+6), *(ptr+i+7));
+ DBG_88E("#############################\n");
+ }
+ }
+ switch (type) {
+ case WIFI_MGT_TYPE: /* mgnt */
+ retval = validate_recv_mgnt_frame(adapter, precv_frame);
+ if (retval == _FAIL)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_mgnt_frame fail\n"));
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_CTRL_TYPE: /* ctrl */
+ retval = validate_recv_ctrl_frame(adapter, precv_frame);
+ if (retval == _FAIL)
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_ctrl_frame fail\n"));
+ retval = _FAIL; /* only data frame return _SUCCESS */
+ break;
+ case WIFI_DATA_TYPE: /* data */
+ rtw_led_control(adapter, LED_CTL_RX);
+ pattrib->qos = (subtype & BIT(7)) ? 1 : 0;
+ retval = validate_recv_data_frame(adapter, precv_frame);
+ if (retval == _FAIL) {
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ precvpriv->rx_drop++;
+ }
+ break;
+ default:
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("validate_recv_data_frame fail! type= 0x%x\n", type));
+ retval = _FAIL;
+ break;
+ }
+
+exit:
+
+_func_exit_;
+
+ return retval;
+}
+
+/* remove the wlanhdr and add the eth_hdr */
+
+static int wlanhdr_to_ethhdr (union recv_frame *precvframe)
+{
+ int rmv_len;
+ u16 eth_type, len;
+ __be16 be_tmp;
+ u8 bsnaphdr;
+ u8 *psnap_type;
+ struct ieee80211_snap_hdr *psnap;
+
+ int ret = _SUCCESS;
+ struct adapter *adapter = precvframe->u.hdr.adapter;
+ struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
+
+ u8 *ptr = get_recvframe_data(precvframe); /* point to frame_ctrl field */
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+
+_func_enter_;
+
+ if (pattrib->encrypt)
+ recvframe_pull_tail(precvframe, pattrib->icv_len);
+
+ psnap = (struct ieee80211_snap_hdr *)(ptr+pattrib->hdrlen + pattrib->iv_len);
+ psnap_type = ptr+pattrib->hdrlen + pattrib->iv_len+SNAP_SIZE;
+ /* convert hdr + possible LLC headers into Ethernet header */
+ if ((_rtw_memcmp(psnap, rtw_rfc1042_header, SNAP_SIZE) &&
+ (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_IPX, 2) == false) &&
+ (_rtw_memcmp(psnap_type, SNAP_ETH_TYPE_APPLETALK_AARP, 2) == false)) ||
+ _rtw_memcmp(psnap, rtw_bridge_tunnel_header, SNAP_SIZE)) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ bsnaphdr = true;
+ } else {
+ /* Leave Ethernet header part of hdr and full payload */
+ bsnaphdr = false;
+ }
+
+ rmv_len = pattrib->hdrlen + pattrib->iv_len + (bsnaphdr ? SNAP_SIZE : 0);
+ len = precvframe->u.hdr.len - rmv_len;
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n===pattrib->hdrlen: %x, pattrib->iv_len:%x===\n\n", pattrib->hdrlen, pattrib->iv_len));
+
+ memcpy(&be_tmp, ptr+rmv_len, 2);
+ eth_type = ntohs(be_tmp); /* pattrib->ether_type */
+ pattrib->eth_type = eth_type;
+
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE))) {
+ ptr += rmv_len;
+ *ptr = 0x87;
+ *(ptr+1) = 0x12;
+
+ eth_type = 0x8712;
+ /* append rx status for mp test packets */
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr)+2)-24);
+ memcpy(ptr, get_rxmem(precvframe), 24);
+ ptr += 24;
+ } else {
+ ptr = recvframe_pull(precvframe, (rmv_len-sizeof(struct ethhdr) + (bsnaphdr ? 2 : 0)));
+ }
+
+ memcpy(ptr, pattrib->dst, ETH_ALEN);
+ memcpy(ptr+ETH_ALEN, pattrib->src, ETH_ALEN);
+
+ if (!bsnaphdr) {
+ be_tmp = htons(len);
+ memcpy(ptr+12, &be_tmp, 2);
+ }
+
+_func_exit_;
+ return ret;
+}
+
+/* perform defrag */
+static union recv_frame *recvframe_defrag(struct adapter *adapter, struct __queue *defrag_q)
+{
+ struct list_head *plist, *phead;
+ u8 wlanhdr_offset;
+ u8 curfragnum;
+ struct recv_frame_hdr *pfhdr, *pnfhdr;
+ union recv_frame *prframe, *pnextrframe;
+ struct __queue *pfree_recv_queue;
+
+_func_enter_;
+
+ curfragnum = 0;
+ pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
+
+ phead = get_list_head(defrag_q);
+ plist = get_next(phead);
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pfhdr = &prframe->u.hdr;
+ rtw_list_delete(&(prframe->u.list));
+
+ if (curfragnum != pfhdr->attrib.frag_num) {
+ /* the first fragment number must be 0 */
+ /* free the whole queue */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ return NULL;
+ }
+
+ curfragnum++;
+
+ plist = get_list_head(defrag_q);
+
+ plist = get_next(plist);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame , u);
+ pnfhdr = &pnextrframe->u.hdr;
+
+ /* check the fragment sequence (2nd ~n fragment frame) */
+
+ if (curfragnum != pnfhdr->attrib.frag_num) {
+ /* the fragment number must be increasing (after decache) */
+ /* release the defrag_q & prframe */
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+ return NULL;
+ }
+
+ curfragnum++;
+
+ /* copy the 2nd~n fragment frame's payload to the first fragment */
+ /* get the 2nd~last fragment frame's payload */
+
+ wlanhdr_offset = pnfhdr->attrib.hdrlen + pnfhdr->attrib.iv_len;
+
+ recvframe_pull(pnextrframe, wlanhdr_offset);
+
+ /* append to first fragment frame's tail (if privacy frame, pull the ICV) */
+ recvframe_pull_tail(prframe, pfhdr->attrib.icv_len);
+
+ /* memcpy */
+ memcpy(pfhdr->rx_tail, pnfhdr->rx_data, pnfhdr->len);
+
+ recvframe_put(prframe, pnfhdr->len);
+
+ pfhdr->attrib.icv_len = pnfhdr->attrib.icv_len;
+ plist = get_next(plist);
+ };
+
+ /* free the defrag_q queue and return the prframe */
+ rtw_free_recvframe_queue(defrag_q, pfree_recv_queue);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Performance defrag!!!!!\n"));
+
+_func_exit_;
+
+ return prframe;
+}
+
+/* check if need to defrag, if needed queue the frame to defrag_q */
+union recv_frame *recvframe_chk_defrag(struct adapter *padapter, union recv_frame *precv_frame)
+{
+ u8 ismfrag;
+ u8 fragnum;
+ u8 *psta_addr;
+ struct recv_frame_hdr *pfhdr;
+ struct sta_info *psta;
+ struct sta_priv *pstapriv;
+ struct list_head *phead;
+ union recv_frame *prtnframe = NULL;
+ struct __queue *pfree_recv_queue, *pdefrag_q;
+
+_func_enter_;
+
+ pstapriv = &padapter->stapriv;
+
+ pfhdr = &precv_frame->u.hdr;
+
+ pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* need to define struct of wlan header frame ctrl */
+ ismfrag = pfhdr->attrib.mfrag;
+ fragnum = pfhdr->attrib.frag_num;
+
+ psta_addr = pfhdr->attrib.ta;
+ psta = rtw_get_stainfo(pstapriv, psta_addr);
+ if (psta == NULL) {
+ u8 type = GetFrameType(pfhdr->rx_data);
+ if (type != WIFI_DATA_TYPE) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ } else {
+ pdefrag_q = NULL;
+ }
+ } else {
+ pdefrag_q = &psta->sta_recvpriv.defrag_q;
+ }
+
+ if ((ismfrag == 0) && (fragnum == 0))
+ prtnframe = precv_frame;/* isn't a fragment frame */
+
+ if (ismfrag == 1) {
+ /* 0~(n-1) fragment frame */
+ /* enqueue to defraf_g */
+ if (pdefrag_q != NULL) {
+ if (fragnum == 0) {
+ /* the first fragment */
+ if (_rtw_queue_empty(pdefrag_q) == false) {
+ /* free current defrag_q */
+ rtw_free_recvframe_queue(pdefrag_q, pfree_recv_queue);
+ }
+ }
+
+ /* Then enqueue the 0~(n-1) fragment into the defrag_q */
+
+ phead = get_list_head(pdefrag_q);
+ rtw_list_insert_tail(&pfhdr->list, phead);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("Enqueuq: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+
+ prtnframe = NULL;
+ } else {
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ }
+ }
+
+ if ((ismfrag == 0) && (fragnum != 0)) {
+ /* the last fragment frame */
+ /* enqueue the last fragment */
+ if (pdefrag_q != NULL) {
+ phead = get_list_head(pdefrag_q);
+ rtw_list_insert_tail(&pfhdr->list, phead);
+
+ /* call recvframe_defrag to defrag */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("defrag: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ precv_frame = recvframe_defrag(padapter, pdefrag_q);
+ prtnframe = precv_frame;
+ } else {
+ /* can't find this ta's defrag_queue, so free this recv_frame */
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+ prtnframe = NULL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("Free because pdefrag_q==NULL: ismfrag=%d, fragnum=%d\n", ismfrag, fragnum));
+ }
+ }
+
+ if ((prtnframe != NULL) && (prtnframe->u.hdr.attrib.privacy)) {
+ /* after defrag we must check tkip mic code */
+ if (recvframe_chkmic(padapter, prtnframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chkmic(padapter, prtnframe)==_FAIL\n"));
+ rtw_free_recvframe(prtnframe, pfree_recv_queue);
+ prtnframe = NULL;
+ }
+ }
+
+_func_exit_;
+
+ return prtnframe;
+}
+
+static int amsdu_to_msdu(struct adapter *padapter, union recv_frame *prframe)
+{
+ int a_len, padding_len;
+ u16 eth_type, nSubframe_Length;
+ u8 nr_subframes, i;
+ unsigned char *pdata;
+ struct rx_pkt_attrib *pattrib;
+ unsigned char *data_ptr;
+ struct sk_buff *sub_skb, *subframes[MAX_SUBFRAME_COUNT];
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &(precvpriv->free_recv_queue);
+ int ret = _SUCCESS;
+ nr_subframes = 0;
+
+ pattrib = &prframe->u.hdr.attrib;
+
+ recvframe_pull(prframe, prframe->u.hdr.attrib.hdrlen);
+
+ if (prframe->u.hdr.attrib.iv_len > 0)
+ recvframe_pull(prframe, prframe->u.hdr.attrib.iv_len);
+
+ a_len = prframe->u.hdr.len;
+
+ pdata = prframe->u.hdr.rx_data;
+
+ while (a_len > ETH_HLEN) {
+ /* Offset 12 denote 2 mac address */
+ nSubframe_Length = RTW_GET_BE16(pdata + 12);
+
+ if (a_len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) {
+ DBG_88E("nRemain_Length is %d and nSubframe_Length is : %d\n", a_len, nSubframe_Length);
+ goto exit;
+ }
+
+ /* move the data point to data content */
+ pdata += ETH_HLEN;
+ a_len -= ETH_HLEN;
+
+ /* Allocate new skb for releasing to upper layer */
+ sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ if (sub_skb) {
+ skb_reserve(sub_skb, 12);
+ data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
+ memcpy(data_ptr, pdata, nSubframe_Length);
+ } else {
+ sub_skb = skb_clone(prframe->u.hdr.pkt, GFP_ATOMIC);
+ if (sub_skb) {
+ sub_skb->data = pdata;
+ sub_skb->len = nSubframe_Length;
+ skb_set_tail_pointer(sub_skb, nSubframe_Length);
+ } else {
+ DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
+ break;
+ }
+ }
+
+ subframes[nr_subframes++] = sub_skb;
+
+ if (nr_subframes >= MAX_SUBFRAME_COUNT) {
+ DBG_88E("ParseSubframe(): Too many Subframes! Packets dropped!\n");
+ break;
+ }
+
+ pdata += nSubframe_Length;
+ a_len -= nSubframe_Length;
+ if (a_len != 0) {
+ padding_len = 4 - ((nSubframe_Length + ETH_HLEN) & (4-1));
+ if (padding_len == 4) {
+ padding_len = 0;
+ }
+
+ if (a_len < padding_len) {
+ goto exit;
+ }
+ pdata += padding_len;
+ a_len -= padding_len;
+ }
+ }
+
+ for (i = 0; i < nr_subframes; i++) {
+ sub_skb = subframes[i];
+ /* convert hdr + possible LLC headers into Ethernet header */
+ eth_type = RTW_GET_BE16(&sub_skb->data[6]);
+ if (sub_skb->len >= 8 &&
+ ((_rtw_memcmp(sub_skb->data, rtw_rfc1042_header, SNAP_SIZE) &&
+ eth_type != ETH_P_AARP && eth_type != ETH_P_IPX) ||
+ _rtw_memcmp(sub_skb->data, rtw_bridge_tunnel_header, SNAP_SIZE))) {
+ /* remove RFC1042 or Bridge-Tunnel encapsulation and replace EtherType */
+ skb_pull(sub_skb, SNAP_SIZE);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ } else {
+ __be16 len;
+ /* Leave Ethernet header part of hdr and full payload */
+ len = htons(sub_skb->len);
+ memcpy(skb_push(sub_skb, 2), &len, 2);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->src, ETH_ALEN);
+ memcpy(skb_push(sub_skb, ETH_ALEN), pattrib->dst, ETH_ALEN);
+ }
+
+ /* Indicat the packets to upper layer */
+ if (sub_skb) {
+ /* Insert NAT2.5 RX here! */
+ sub_skb->protocol = eth_type_trans(sub_skb, padapter->pnetdev);
+ sub_skb->dev = padapter->pnetdev;
+
+ sub_skb->ip_summed = CHECKSUM_NONE;
+
+ netif_rx(sub_skb);
+ }
+ }
+
+exit:
+
+ prframe->u.hdr.len = 0;
+ rtw_free_recvframe(prframe, pfree_recv_queue);/* free this recv_frame */
+
+ return ret;
+}
+
+static int check_indicate_seq(struct recv_reorder_ctrl *preorder_ctrl, u16 seq_num)
+{
+ u8 wsize = preorder_ctrl->wsize_b;
+ u16 wend = (preorder_ctrl->indicate_seq + wsize - 1) & 0xFFF;/* 4096; */
+
+ /* Rx Reorder initialize condition. */
+ if (preorder_ctrl->indicate_seq == 0xFFFF)
+ preorder_ctrl->indicate_seq = seq_num;
+
+ /* Drop out the packet which SeqNum is smaller than WinStart */
+ if (SN_LESS(seq_num, preorder_ctrl->indicate_seq))
+ return false;
+
+ /* */
+ /* Sliding window manipulation. Conditions includes: */
+ /* 1. Incoming SeqNum is equal to WinStart =>Window shift 1 */
+ /* 2. Incoming SeqNum is larger than the WinEnd => Window shift N */
+ /* */
+ if (SN_EQUAL(seq_num, preorder_ctrl->indicate_seq)) {
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+ } else if (SN_LESS(wend, seq_num)) {
+ if (seq_num >= (wsize - 1))
+ preorder_ctrl->indicate_seq = seq_num + 1 - wsize;
+ else
+ preorder_ctrl->indicate_seq = 0xFFF - (wsize - (seq_num + 1)) + 1;
+ }
+
+ return true;
+}
+
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe);
+int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+ struct list_head *phead, *plist;
+ union recv_frame *pnextrframe;
+ struct rx_pkt_attrib *pnextattrib;
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (rtw_end_of_queue_search(phead, plist) == false) {
+ pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextattrib = &pnextrframe->u.hdr.attrib;
+
+ if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
+ plist = get_next(plist);
+ else if (SN_EQUAL(pnextattrib->seq_num, pattrib->seq_num))
+ return false;
+ else
+ break;
+ }
+
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ rtw_list_insert_tail(&(prframe->u.hdr.list), plist);
+ return true;
+}
+
+static int recv_indicatepkts_in_order(struct adapter *padapter, struct recv_reorder_ctrl *preorder_ctrl, int bforced)
+{
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct rx_pkt_attrib *pattrib;
+ int bPktInBuf = false;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ /* Handling some condition for forced indicate case. */
+ if (bforced) {
+ if (rtw_is_list_empty(phead))
+ return true;
+
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ }
+
+ /* Prepare indication list and indication. */
+ /* Check if there is any packet need indicate. */
+ while (!rtw_is_list_empty(phead)) {
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pattrib = &prframe->u.hdr.attrib;
+
+ if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkts_in_order: indicate=%d seq=%d amsdu=%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num, pattrib->amsdu));
+ plist = get_next(plist);
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ if (SN_EQUAL(preorder_ctrl->indicate_seq, pattrib->seq_num))
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1) & 0xFFF;
+
+ /* Set this as a lock to make sure that only one thread is indicating packet. */
+
+ /* indicate this recv_frame */
+ if (!pattrib->amsdu) {
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved))
+ rtw_recv_indicatepkt(padapter, prframe);/* indicate this recv_frame */
+ } else if (pattrib->amsdu == 1) {
+ if (amsdu_to_msdu(padapter, prframe) != _SUCCESS)
+ rtw_free_recvframe(prframe, &precvpriv->free_recv_queue);
+ } else {
+ /* error condition; */
+ }
+
+ /* Update local variables. */
+ bPktInBuf = false;
+ } else {
+ bPktInBuf = true;
+ break;
+ }
+ }
+ return bPktInBuf;
+}
+
+static int recv_indicatepkt_reorder(struct adapter *padapter, union recv_frame *prframe)
+{
+ unsigned long irql;
+ int retval = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct recv_reorder_ctrl *preorder_ctrl = prframe->u.hdr.preorder_ctrl;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (!pattrib->amsdu) {
+ /* s1. */
+ wlanhdr_to_ethhdr(prframe);
+
+ if ((pattrib->qos != 1) || (pattrib->eth_type == 0x0806) ||
+ (pattrib->ack_policy != 0)) {
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ recv_indicatepkt_reorder -recv_func recv_indicatepkt\n"));
+
+ rtw_recv_indicatepkt(padapter, prframe);
+ return _SUCCESS;
+ }
+
+ return _FAIL;
+ }
+
+ if (!preorder_ctrl->enable) {
+ /* indicate this recv_frame */
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ rtw_recv_indicatepkt(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ return _SUCCESS;
+ }
+ } else if (pattrib->amsdu == 1) { /* temp filter -> means didn't support A-MSDUs in a A-MPDU */
+ if (!preorder_ctrl->enable) {
+ preorder_ctrl->indicate_seq = pattrib->seq_num;
+ retval = amsdu_to_msdu(padapter, prframe);
+
+ preorder_ctrl->indicate_seq = (preorder_ctrl->indicate_seq + 1)%4096;
+ return retval;
+ }
+ }
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_,
+ ("recv_indicatepkt_reorder: indicate=%d seq=%d\n",
+ preorder_ctrl->indicate_seq, pattrib->seq_num));
+
+ /* s2. check if winstart_b(indicate_seq) needs to been updated */
+ if (!check_indicate_seq(preorder_ctrl, pattrib->seq_num)) {
+ rtw_recv_indicatepkt(padapter, prframe);
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ goto _success_exit;
+ }
+
+ /* s3. Insert all packet into Reorder Queue to maintain its ordering. */
+ if (!enqueue_reorder_recvframe(preorder_ctrl, prframe))
+ goto _err_exit;
+
+ /* s4. */
+ /* Indication process. */
+ /* After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets */
+ /* with the SeqNum smaller than latest WinStart and buffer other packets. */
+ /* */
+ /* For Rx Reorder condition: */
+ /* 1. All packets with SeqNum smaller than WinStart => Indicate */
+ /* 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */
+ /* */
+
+ /* recv_indicatepkts_in_order(padapter, preorder_ctrl, true); */
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, false)) {
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ } else {
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ }
+
+_success_exit:
+
+ return _SUCCESS;
+
+_err_exit:
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ return _FAIL;
+}
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext)
+{
+ unsigned long irql;
+ struct recv_reorder_ctrl *preorder_ctrl = (struct recv_reorder_ctrl *)pcontext;
+ struct adapter *padapter = preorder_ctrl->padapter;
+ struct __queue *ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved)
+ return;
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ if (recv_indicatepkts_in_order(padapter, preorder_ctrl, true) == true)
+ _set_timer(&preorder_ctrl->reordering_ctrl_timer, REORDER_WAIT_TIME);
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+}
+
+static int process_recv_indicatepkts(struct adapter *padapter, union recv_frame *prframe)
+{
+ int retval = _SUCCESS;
+ /* struct recv_priv *precvpriv = &padapter->recvpriv; */
+ /* struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib; */
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (phtpriv->ht_option) { /* B/G/N Mode */
+ /* prframe->u.hdr.preorder_ctrl = &precvpriv->recvreorder_ctrl[pattrib->priority]; */
+
+ if (recv_indicatepkt_reorder(padapter, prframe) != _SUCCESS) {
+ /* including perform A-MPDU Rx Ordering Buffer Control */
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ retval = _FAIL;
+ return retval;
+ }
+ }
+ } else { /* B/G mode */
+ retval = wlanhdr_to_ethhdr (prframe);
+ if (retval != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("wlanhdr_to_ethhdr: drop pkt\n"));
+ return retval;
+ }
+
+ if ((!padapter->bDriverStopped) &&
+ (!padapter->bSurpriseRemoved)) {
+ /* indicate this recv_frame */
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func recv_indicatepkt\n"));
+ rtw_recv_indicatepkt(padapter, prframe);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("@@@@ process_recv_indicatepkts- recv_func free_indicatepkt\n"));
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_notice_, ("recv_func:bDriverStopped(%d) OR bSurpriseRemoved(%d)", padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ retval = _FAIL;
+ return retval;
+ }
+ }
+
+ return retval;
+}
+
+static int recv_func_prehandle(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret = _SUCCESS;
+ struct rx_pkt_attrib *pattrib = &rframe->u.hdr.attrib;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)) { /* padapter->mppriv.check_mp_pkt == 0)) */
+ if (pattrib->crc_err == 1)
+ padapter->mppriv.rx_crcerrpktcount++;
+ else
+ padapter->mppriv.rx_pktcount++;
+
+ if (check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE) == false) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_alert_, ("MP - Not in loopback mode , drop pkt\n"));
+ ret = _FAIL;
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+ }
+ }
+
+ /* check the frame crtl field and decache */
+ ret = validate_recv_frame(padapter, rframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recv_func: validate_recv_frame fail! drop pkt\n"));
+ rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static int recv_func_posthandle(struct adapter *padapter, union recv_frame *prframe)
+{
+ int ret = _SUCCESS;
+ union recv_frame *orig_prframe = prframe;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ /* DATA FRAME */
+ rtw_led_control(padapter, LED_CTL_RX);
+
+ prframe = decryptor(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("decryptor: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ prframe = recvframe_chk_defrag(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvframe_chk_defrag: drop pkt\n"));
+ goto _recv_data_drop;
+ }
+
+ prframe = portctrl(padapter, prframe);
+ if (prframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("portctrl: drop pkt\n"));
+ ret = _FAIL;
+ goto _recv_data_drop;
+ }
+
+ count_rx_stats(padapter, prframe, NULL);
+
+ ret = process_recv_indicatepkts(padapter, prframe);
+ if (ret != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recv_func: process_recv_indicatepkts fail!\n"));
+ rtw_free_recvframe(orig_prframe, pfree_recv_queue);/* free this recv_frame */
+ goto _recv_data_drop;
+ }
+ return ret;
+
+_recv_data_drop:
+ precvpriv->rx_drop++;
+ return ret;
+}
+
+static int recv_func(struct adapter *padapter, union recv_frame *rframe)
+{
+ int ret;
+ struct rx_pkt_attrib *prxattrib = &rframe->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *mlmepriv = &padapter->mlmepriv;
+
+ /* check if need to handle uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) && psecuritypriv->busetkipkey) {
+ union recv_frame *pending_frame;
+
+ while ((pending_frame = rtw_alloc_recvframe(&padapter->recvpriv.uc_swdec_pending_queue))) {
+ if (recv_func_posthandle(padapter, pending_frame) == _SUCCESS)
+ DBG_88E("%s: dequeue uc_swdec_pending_queue\n", __func__);
+ }
+ }
+
+ ret = recv_func_prehandle(padapter, rframe);
+
+ if (ret == _SUCCESS) {
+ /* check if need to enqueue into uc_swdec_pending_queue*/
+ if (check_fwstate(mlmepriv, WIFI_STATION_STATE) &&
+ !IS_MCAST(prxattrib->ra) && prxattrib->encrypt > 0 &&
+ (prxattrib->bdecrypted == 0 || psecuritypriv->sw_decrypt) &&
+ !is_wep_enc(psecuritypriv->dot11PrivacyAlgrthm) &&
+ !psecuritypriv->busetkipkey) {
+ rtw_enqueue_recvframe(rframe, &padapter->recvpriv.uc_swdec_pending_queue);
+ DBG_88E("%s: no key, enqueue uc_swdec_pending_queue\n", __func__);
+ goto exit;
+ }
+
+ ret = recv_func_posthandle(padapter, rframe);
+ }
+
+exit:
+ return ret;
+}
+
+s32 rtw_recv_entry(union recv_frame *precvframe)
+{
+ struct adapter *padapter;
+ struct recv_priv *precvpriv;
+ s32 ret = _SUCCESS;
+
+_func_enter_;
+
+ padapter = precvframe->u.hdr.adapter;
+
+ precvpriv = &padapter->recvpriv;
+
+ ret = recv_func(padapter, precvframe);
+ if (ret == _FAIL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("rtw_recv_entry: recv_func return fail!!!\n"));
+ goto _recv_entry_drop;
+ }
+
+ precvpriv->rx_pkts++;
+
+_func_exit_;
+
+ return ret;
+
+_recv_entry_drop:
+
+ if (padapter->registrypriv.mp_mode == 1)
+ padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
+
+_func_exit_;
+
+ return ret;
+}
+
+void rtw_signal_stat_timer_hdl(RTW_TIMER_HDL_ARGS)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+ struct recv_priv *recvpriv = &adapter->recvpriv;
+
+ u32 tmp_s, tmp_q;
+ u8 avg_signal_strength = 0;
+ u8 avg_signal_qual = 0;
+ u8 _alpha = 3; /* this value is based on converging_constant = 5000 and sampling_interval = 1000 */
+
+ if (adapter->recvpriv.is_signal_dbg) {
+ /* update the user specific value, signal_strength_dbg, to signal_strength, rssi */
+ adapter->recvpriv.signal_strength = adapter->recvpriv.signal_strength_dbg;
+ adapter->recvpriv.rssi = (s8)translate_percentage_to_dbm((u8)adapter->recvpriv.signal_strength_dbg);
+ } else {
+ if (recvpriv->signal_strength_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_strength = recvpriv->signal_strength_data.avg_val;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_strength_data.update_req = 1;
+ }
+
+ if (recvpriv->signal_qual_data.update_req == 0) {/* update_req is clear, means we got rx */
+ avg_signal_qual = recvpriv->signal_qual_data.avg_val;
+ /* after avg_vals are accquired, we can re-stat the signal values */
+ recvpriv->signal_qual_data.update_req = 1;
+ }
+
+ /* update value of signal_strength, rssi, signal_qual */
+ if (check_fwstate(&adapter->mlmepriv, _FW_UNDER_SURVEY) == false) {
+ tmp_s = (avg_signal_strength+(_alpha-1)*recvpriv->signal_strength);
+ if (tmp_s % _alpha)
+ tmp_s = tmp_s/_alpha + 1;
+ else
+ tmp_s = tmp_s/_alpha;
+ if (tmp_s > 100)
+ tmp_s = 100;
+
+ tmp_q = (avg_signal_qual+(_alpha-1)*recvpriv->signal_qual);
+ if (tmp_q % _alpha)
+ tmp_q = tmp_q/_alpha + 1;
+ else
+ tmp_q = tmp_q/_alpha;
+ if (tmp_q > 100)
+ tmp_q = 100;
+
+ recvpriv->signal_strength = tmp_s;
+ recvpriv->rssi = (s8)translate_percentage_to_dbm(tmp_s);
+ recvpriv->signal_qual = tmp_q;
+ }
+ }
+ rtw_set_signal_stat_timer(recvpriv);
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
new file mode 100644
index 00000000000..1170dd001c1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -0,0 +1,89 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_RF_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+
+
+struct ch_freq {
+ u32 channel;
+ u32 frequency;
+};
+
+static struct ch_freq ch_freq_map[] = {
+ {1, 2412}, {2, 2417}, {3, 2422}, {4, 2427}, {5, 2432},
+ {6, 2437}, {7, 2442}, {8, 2447}, {9, 2452}, {10, 2457},
+ {11, 2462}, {12, 2467}, {13, 2472}, {14, 2484},
+ /* UNII */
+ {36, 5180}, {40, 5200}, {44, 5220}, {48, 5240}, {52, 5260},
+ {56, 5280}, {60, 5300}, {64, 5320}, {149, 5745}, {153, 5765},
+ {157, 5785}, {161, 5805}, {165, 5825}, {167, 5835}, {169, 5845},
+ {171, 5855}, {173, 5865},
+ /* HiperLAN2 */
+ {100, 5500}, {104, 5520}, {108, 5540}, {112, 5560}, {116, 5580},
+ {120, 5600}, {124, 5620}, {128, 5640}, {132, 5660}, {136, 5680},
+ {140, 5700},
+ /* Japan MMAC */
+ {34, 5170}, {38, 5190}, {42, 5210}, {46, 5230},
+ /* Japan */
+ {184, 4920}, {188, 4940}, {192, 4960}, {196, 4980},
+ {208, 5040},/* Japan, means J08 */
+ {212, 5060},/* Japan, means J12 */
+ {216, 5080},/* Japan, means J16 */
+};
+
+static int ch_freq_map_num = (sizeof(ch_freq_map) / sizeof(struct ch_freq));
+
+u32 rtw_ch2freq(u32 channel)
+{
+ u8 i;
+ u32 freq = 0;
+
+ for (i = 0; i < ch_freq_map_num; i++) {
+ if (channel == ch_freq_map[i].channel) {
+ freq = ch_freq_map[i].frequency;
+ break;
+ }
+ }
+ if (i == ch_freq_map_num)
+ freq = 2412;
+
+ return freq;
+}
+
+u32 rtw_freq2ch(u32 freq)
+{
+ u8 i;
+ u32 ch = 0;
+
+ for (i = 0; i < ch_freq_map_num; i++) {
+ if (freq == ch_freq_map[i].frequency) {
+ ch = ch_freq_map[i].channel;
+ break;
+ }
+ }
+ if (i == ch_freq_map_num)
+ ch = 1;
+
+ return ch;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
new file mode 100644
index 00000000000..0f076d0cb5f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -0,0 +1,1779 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_SECURITY_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+
+/* WEP related ===== */
+
+#define CRC32_POLY 0x04c11db7
+
+struct arc4context {
+ u32 x;
+ u32 y;
+ u8 state[256];
+};
+
+static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
+{
+ u32 t, u;
+ u32 keyindex;
+ u32 stateindex;
+ u8 *state;
+ u32 counter;
+_func_enter_;
+ state = parc4ctx->state;
+ parc4ctx->x = 0;
+ parc4ctx->y = 0;
+ for (counter = 0; counter < 256; counter++)
+ state[counter] = (u8)counter;
+ keyindex = 0;
+ stateindex = 0;
+ for (counter = 0; counter < 256; counter++) {
+ t = state[counter];
+ stateindex = (stateindex + key[keyindex] + t) & 0xff;
+ u = state[stateindex];
+ state[stateindex] = (u8)t;
+ state[counter] = (u8)u;
+ if (++keyindex >= key_len)
+ keyindex = 0;
+ }
+_func_exit_;
+}
+
+static u32 arcfour_byte(struct arc4context *parc4ctx)
+{
+ u32 x;
+ u32 y;
+ u32 sx, sy;
+ u8 *state;
+_func_enter_;
+ state = parc4ctx->state;
+ x = (parc4ctx->x + 1) & 0xff;
+ sx = state[x];
+ y = (sx + parc4ctx->y) & 0xff;
+ sy = state[y];
+ parc4ctx->x = x;
+ parc4ctx->y = y;
+ state[y] = (u8)sx;
+ state[x] = (u8)sy;
+_func_exit_;
+ return state[(sx + sy) & 0xff];
+}
+
+static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
+{
+ u32 i;
+_func_enter_;
+ for (i = 0; i < len; i++)
+ dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
+_func_exit_;
+}
+
+static int bcrc32initialized;
+static u32 crc32_table[256];
+
+static u8 crc32_reverseBit(u8 data)
+{
+ return (u8)((data<<7)&0x80) | ((data<<5)&0x40) | ((data<<3)&0x20) |
+ ((data<<1)&0x10) | ((data>>1)&0x08) | ((data>>3)&0x04) |
+ ((data>>5)&0x02) | ((data>>7)&0x01);
+}
+
+static void crc32_init(void)
+{
+_func_enter_;
+ if (bcrc32initialized == 1) {
+ goto exit;
+ } else {
+ int i, j;
+ u32 c;
+ u8 *p = (u8 *)&c, *p1;
+ u8 k;
+
+ c = 0x12340000;
+
+ for (i = 0; i < 256; ++i) {
+ k = crc32_reverseBit((u8)i);
+ for (c = ((u32)k) << 24, j = 8; j > 0; --j)
+ c = c & 0x80000000 ? (c << 1) ^ CRC32_POLY : (c << 1);
+ p1 = (u8 *)&crc32_table[i];
+
+ p1[0] = crc32_reverseBit(p[3]);
+ p1[1] = crc32_reverseBit(p[2]);
+ p1[2] = crc32_reverseBit(p[1]);
+ p1[3] = crc32_reverseBit(p[0]);
+ }
+ bcrc32initialized = 1;
+ }
+exit:
+_func_exit_;
+}
+
+static __le32 getcrc32(u8 *buf, int len)
+{
+ u8 *p;
+ u32 crc;
+_func_enter_;
+ if (bcrc32initialized == 0)
+ crc32_init();
+
+ crc = 0xffffffff; /* preload shift register, per CRC-32 spec */
+
+ for (p = buf; len > 0; ++p, --len)
+ crc = crc32_table[(crc ^ *p) & 0xff] ^ (crc >> 8);
+_func_exit_;
+ return cpu_to_le32(~crc); /* transmit complement, per CRC-32 spec */
+}
+
+/*
+ Need to consider the fragment situation
+*/
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+ unsigned char crc[4];
+ struct arc4context mycontext;
+
+ int curfragnum, length;
+ u32 keylength;
+
+ u8 *pframe, *payload, *iv; /* wepkey */
+ u8 wepkey[16];
+ u8 hw_hdr_offset = 0;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* start to encrypt each fragment */
+ if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
+ keylength = psecuritypriv->dot11DefKeylen[psecuritypriv->dot11PrivacyKeyIndex];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[psecuritypriv->dot11PrivacyKeyIndex].skey[0], keylength);
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ *((__le32 *)crc) = getcrc32(payload, length);
+
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ *((__le32 *)crc) = getcrc32(payload, length);
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ }
+
+_func_exit_;
+}
+
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
+{
+ /* exclude ICV */
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+ u32 keylength;
+ u8 *pframe, *payload, *iv, wepkey[16];
+ u8 keyindex;
+ struct rx_pkt_attrib *prxattrib = &(((union recv_frame *)precvframe)->u.hdr.attrib);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+_func_enter_;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* start to decrypt recvframe */
+ if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
+ iv = pframe+prxattrib->hdrlen;
+ keyindex = prxattrib->key_index;
+ keylength = psecuritypriv->dot11DefKeylen[keyindex];
+ memcpy(&wepkey[0], iv, 3);
+ memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+
+ /* decrypt payload include icv */
+ arcfour_init(&mycontext, wepkey, 3+keylength);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ /* calculate icv and compare the icv */
+ *((__le32 *)crc) = getcrc32(payload, length - 4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ }
+ }
+_func_exit_;
+ return;
+}
+
+/* 3 ===== TKIP related ===== */
+
+static u32 secmicgetuint32(u8 *p)
+/* Convert from Byte[] to Us3232 in a portable way */
+{
+ s32 i;
+ u32 res = 0;
+_func_enter_;
+ for (i = 0; i < 4; i++)
+ res |= ((u32)(*p++)) << (8*i);
+_func_exit_;
+ return res;
+}
+
+static void secmicputuint32(u8 *p, u32 val)
+/* Convert from Us3232 to Byte[] in a portable way */
+{
+ long i;
+_func_enter_;
+ for (i = 0; i < 4; i++) {
+ *p++ = (u8) (val & 0xff);
+ val >>= 8;
+ }
+_func_exit_;
+}
+
+static void secmicclear(struct mic_data *pmicdata)
+{
+/* Reset the state to the empty message. */
+_func_enter_;
+ pmicdata->L = pmicdata->K0;
+ pmicdata->R = pmicdata->K1;
+ pmicdata->nBytesInM = 0;
+ pmicdata->M = 0;
+_func_exit_;
+}
+
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key)
+{
+ /* Set the key */
+_func_enter_;
+ pmicdata->K0 = secmicgetuint32(key);
+ pmicdata->K1 = secmicgetuint32(key + 4);
+ /* and reset the message */
+ secmicclear(pmicdata);
+_func_exit_;
+}
+
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b)
+{
+_func_enter_;
+ /* Append the byte to our word-sized buffer */
+ pmicdata->M |= ((unsigned long)b) << (8*pmicdata->nBytesInM);
+ pmicdata->nBytesInM++;
+ /* Process the word if it is full. */
+ if (pmicdata->nBytesInM >= 4) {
+ pmicdata->L ^= pmicdata->M;
+ pmicdata->R ^= ROL32(pmicdata->L, 17);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ((pmicdata->L & 0xff00ff00) >> 8) | ((pmicdata->L & 0x00ff00ff) << 8);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROL32(pmicdata->L, 3);
+ pmicdata->L += pmicdata->R;
+ pmicdata->R ^= ROR32(pmicdata->L, 2);
+ pmicdata->L += pmicdata->R;
+ /* Clear the buffer */
+ pmicdata->M = 0;
+ pmicdata->nBytesInM = 0;
+ }
+_func_exit_;
+}
+
+void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nbytes)
+{
+_func_enter_;
+ /* This is simple */
+ while (nbytes > 0) {
+ rtw_secmicappendbyte(pmicdata, *src++);
+ nbytes--;
+ }
+_func_exit_;
+}
+
+void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst)
+{
+_func_enter_;
+ /* Append the minimum padding */
+ rtw_secmicappendbyte(pmicdata, 0x5a);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ rtw_secmicappendbyte(pmicdata, 0);
+ /* and then zeroes until the length is a multiple of 4 */
+ while (pmicdata->nBytesInM != 0)
+ rtw_secmicappendbyte(pmicdata, 0);
+ /* The appendByte function has already computed the result. */
+ secmicputuint32(dst, pmicdata->L);
+ secmicputuint32(dst+4, pmicdata->R);
+ /* Reset to the empty message. */
+ secmicclear(pmicdata);
+_func_exit_;
+}
+
+void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len, u8 *mic_code, u8 pri)
+{
+ struct mic_data micdata;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+_func_enter_;
+ rtw_secmicsetkey(&micdata, key);
+ priority[0] = pri;
+
+ /* Michael MIC pseudo header: DA, SA, 3 x 0, Priority */
+ if (header[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[24], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &header[4], 6); /* DA */
+ if (header[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &header[16], 6);
+ else
+ rtw_secmicappend(&micdata, &header[10], 6);
+ }
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ rtw_secmicappend(&micdata, data, data_len);
+
+ rtw_secgetmic(&micdata, mic_code);
+_func_exit_;
+}
+
+
+
+/* macros for extraction/creation of unsigned char/unsigned short values */
+#define RotR1(v16) ((((v16) >> 1) & 0x7FFF) ^ (((v16) & 1) << 15))
+#define Lo8(v16) ((u8)((v16) & 0x00FF))
+#define Hi8(v16) ((u8)(((v16) >> 8) & 0x00FF))
+#define Lo16(v32) ((u16)((v32) & 0xFFFF))
+#define Hi16(v32) ((u16)(((v32) >> 16) & 0xFFFF))
+#define Mk16(hi, lo) ((lo) ^ (((u16)(hi)) << 8))
+
+/* select the Nth 16-bit word of the temporal key unsigned char array TK[] */
+#define TK16(N) Mk16(tk[2*(N)+1], tk[2*(N)])
+
+/* S-box lookup: 16 bits --> 16 bits */
+#define _S_(v16) (Sbox1[0][Lo8(v16)] ^ Sbox1[1][Hi8(v16)])
+
+/* fixed algorithm "parameters" */
+#define PHASE1_LOOP_CNT 8 /* this needs to be "big enough" */
+#define TA_SIZE 6 /* 48-bit transmitter address */
+#define TK_SIZE 16 /* 128-bit temporal key */
+#define P1K_SIZE 10 /* 80-bit Phase1 key */
+#define RC4_KEY_SIZE 16 /* 128-bit RC4KEY (104 bits unknown) */
+
+/* 2-unsigned char by 2-unsigned char subset of the full AES S-box table */
+static const unsigned short Sbox1[2][256] = { /* Sbox for hash (can be in ROM) */
+{
+ 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154,
+ 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A,
+ 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B,
+ 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B,
+ 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F,
+ 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F,
+ 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5,
+ 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F,
+ 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB,
+ 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397,
+ 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED,
+ 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A,
+ 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194,
+ 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3,
+ 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104,
+ 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D,
+ 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39,
+ 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695,
+ 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83,
+ 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76,
+ 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4,
+ 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B,
+ 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0,
+ 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018,
+ 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751,
+ 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85,
+ 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12,
+ 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9,
+ 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7,
+ 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A,
+ 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8,
+ 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A,
+ },
+
+ { /* second half of table is unsigned char-reversed version of first! */
+ 0xA5C6, 0x84F8, 0x99EE, 0x8DF6, 0x0DFF, 0xBDD6, 0xB1DE, 0x5491,
+ 0x5060, 0x0302, 0xA9CE, 0x7D56, 0x19E7, 0x62B5, 0xE64D, 0x9AEC,
+ 0x458F, 0x9D1F, 0x4089, 0x87FA, 0x15EF, 0xEBB2, 0xC98E, 0x0BFB,
+ 0xEC41, 0x67B3, 0xFD5F, 0xEA45, 0xBF23, 0xF753, 0x96E4, 0x5B9B,
+ 0xC275, 0x1CE1, 0xAE3D, 0x6A4C, 0x5A6C, 0x417E, 0x02F5, 0x4F83,
+ 0x5C68, 0xF451, 0x34D1, 0x08F9, 0x93E2, 0x73AB, 0x5362, 0x3F2A,
+ 0x0C08, 0x5295, 0x6546, 0x5E9D, 0x2830, 0xA137, 0x0F0A, 0xB52F,
+ 0x090E, 0x3624, 0x9B1B, 0x3DDF, 0x26CD, 0x694E, 0xCD7F, 0x9FEA,
+ 0x1B12, 0x9E1D, 0x7458, 0x2E34, 0x2D36, 0xB2DC, 0xEEB4, 0xFB5B,
+ 0xF6A4, 0x4D76, 0x61B7, 0xCE7D, 0x7B52, 0x3EDD, 0x715E, 0x9713,
+ 0xF5A6, 0x68B9, 0x0000, 0x2CC1, 0x6040, 0x1FE3, 0xC879, 0xEDB6,
+ 0xBED4, 0x468D, 0xD967, 0x4B72, 0xDE94, 0xD498, 0xE8B0, 0x4A85,
+ 0x6BBB, 0x2AC5, 0xE54F, 0x16ED, 0xC586, 0xD79A, 0x5566, 0x9411,
+ 0xCF8A, 0x10E9, 0x0604, 0x81FE, 0xF0A0, 0x4478, 0xBA25, 0xE34B,
+ 0xF3A2, 0xFE5D, 0xC080, 0x8A05, 0xAD3F, 0xBC21, 0x4870, 0x04F1,
+ 0xDF63, 0xC177, 0x75AF, 0x6342, 0x3020, 0x1AE5, 0x0EFD, 0x6DBF,
+ 0x4C81, 0x1418, 0x3526, 0x2FC3, 0xE1BE, 0xA235, 0xCC88, 0x392E,
+ 0x5793, 0xF255, 0x82FC, 0x477A, 0xACC8, 0xE7BA, 0x2B32, 0x95E6,
+ 0xA0C0, 0x9819, 0xD19E, 0x7FA3, 0x6644, 0x7E54, 0xAB3B, 0x830B,
+ 0xCA8C, 0x29C7, 0xD36B, 0x3C28, 0x79A7, 0xE2BC, 0x1D16, 0x76AD,
+ 0x3BDB, 0x5664, 0x4E74, 0x1E14, 0xDB92, 0x0A0C, 0x6C48, 0xE4B8,
+ 0x5D9F, 0x6EBD, 0xEF43, 0xA6C4, 0xA839, 0xA431, 0x37D3, 0x8BF2,
+ 0x32D5, 0x438B, 0x596E, 0xB7DA, 0x8C01, 0x64B1, 0xD29C, 0xE049,
+ 0xB4D8, 0xFAAC, 0x07F3, 0x25CF, 0xAFCA, 0x8EF4, 0xE947, 0x1810,
+ 0xD56F, 0x88F0, 0x6F4A, 0x725C, 0x2438, 0xF157, 0xC773, 0x5197,
+ 0x23CB, 0x7CA1, 0x9CE8, 0x213E, 0xDD96, 0xDC61, 0x860D, 0x850F,
+ 0x90E0, 0x427C, 0xC471, 0xAACC, 0xD890, 0x0506, 0x01F7, 0x121C,
+ 0xA3C2, 0x5F6A, 0xF9AE, 0xD069, 0x9117, 0x5899, 0x273A, 0xB927,
+ 0x38D9, 0x13EB, 0xB32B, 0x3322, 0xBBD2, 0x70A9, 0x8907, 0xA733,
+ 0xB62D, 0x223C, 0x9215, 0x20C9, 0x4987, 0xFFAA, 0x7850, 0x7AA5,
+ 0x8F03, 0xF859, 0x8009, 0x171A, 0xDA65, 0x31D7, 0xC684, 0xB8D0,
+ 0xC382, 0xB029, 0x775A, 0x111E, 0xCB7B, 0xFCA8, 0xD66D, 0x3A2C,
+ }
+};
+
+ /*
+**********************************************************************
+* Routine: Phase 1 -- generate P1K, given TA, TK, IV32
+*
+* Inputs:
+* tk[] = temporal key [128 bits]
+* ta[] = transmitter's MAC address [ 48 bits]
+* iv32 = upper 32 bits of IV [ 32 bits]
+* Output:
+* p1k[] = Phase 1 key [ 80 bits]
+*
+* Note:
+* This function only needs to be called every 2**16 packets,
+* although in theory it could be called every packet.
+*
+**********************************************************************
+*/
+static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
+{
+ int i;
+_func_enter_;
+ /* Initialize the 80 bits of P1K[] from IV32 and TA[0..5] */
+ p1k[0] = Lo16(iv32);
+ p1k[1] = Hi16(iv32);
+ p1k[2] = Mk16(ta[1], ta[0]); /* use TA[] as little-endian */
+ p1k[3] = Mk16(ta[3], ta[2]);
+ p1k[4] = Mk16(ta[5], ta[4]);
+
+ /* Now compute an unbalanced Feistel cipher with 80-bit block */
+ /* size on the 80-bit block P1K[], using the 128-bit key TK[] */
+ for (i = 0; i < PHASE1_LOOP_CNT; i++) { /* Each add operation here is mod 2**16 */
+ p1k[0] += _S_(p1k[4] ^ TK16((i&1)+0));
+ p1k[1] += _S_(p1k[0] ^ TK16((i&1)+2));
+ p1k[2] += _S_(p1k[1] ^ TK16((i&1)+4));
+ p1k[3] += _S_(p1k[2] ^ TK16((i&1)+6));
+ p1k[4] += _S_(p1k[3] ^ TK16((i&1)+0));
+ p1k[4] += (unsigned short)i; /* avoid "slide attacks" */
+ }
+_func_exit_;
+}
+
+/*
+**********************************************************************
+* Routine: Phase 2 -- generate RC4KEY, given TK, P1K, IV16
+*
+* Inputs:
+* tk[] = Temporal key [128 bits]
+* p1k[] = Phase 1 output key [ 80 bits]
+* iv16 = low 16 bits of IV counter [ 16 bits]
+* Output:
+* rc4key[] = the key used to encrypt the packet [128 bits]
+*
+* Note:
+* The value {TA, IV32, IV16} for Phase1/Phase2 must be unique
+* across all packets using the same key TK value. Then, for a
+* given value of TK[], this TKIP48 construction guarantees that
+* the final RC4KEY value is unique across all packets.
+*
+* Suggested implementation optimization: if PPK[] is "overlaid"
+* appropriately on RC4KEY[], there is no need for the final
+* for loop below that copies the PPK[] result into RC4KEY[].
+*
+**********************************************************************
+*/
+static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
+{
+ int i;
+ u16 PPK[6]; /* temporary key for mixing */
+_func_enter_;
+ /* Note: all adds in the PPK[] equations below are mod 2**16 */
+ for (i = 0; i < 5; i++)
+ PPK[i] = p1k[i]; /* first, copy P1K to PPK */
+ PPK[5] = p1k[4] + iv16; /* next, add in IV16 */
+
+ /* Bijective non-linear mixing of the 96 bits of PPK[0..5] */
+ PPK[0] += _S_(PPK[5] ^ TK16(0)); /* Mix key in each "round" */
+ PPK[1] += _S_(PPK[0] ^ TK16(1));
+ PPK[2] += _S_(PPK[1] ^ TK16(2));
+ PPK[3] += _S_(PPK[2] ^ TK16(3));
+ PPK[4] += _S_(PPK[3] ^ TK16(4));
+ PPK[5] += _S_(PPK[4] ^ TK16(5)); /* Total # S-box lookups == 6 */
+
+ /* Final sweep: bijective, "linear". Rotates kill LSB correlations */
+ PPK[0] += RotR1(PPK[5] ^ TK16(6));
+ PPK[1] += RotR1(PPK[0] ^ TK16(7)); /* Use all of TK[] in Phase2 */
+ PPK[2] += RotR1(PPK[1]);
+ PPK[3] += RotR1(PPK[2]);
+ PPK[4] += RotR1(PPK[3]);
+ PPK[5] += RotR1(PPK[4]);
+ /* Note: At this point, for a given key TK[0..15], the 96-bit output */
+ /* value PPK[0..5] is guaranteed to be unique, as a function */
+ /* of the 96-bit "input" value {TA, IV32, IV16}. That is, P1K */
+ /* is now a keyed permutation of {TA, IV32, IV16}. */
+
+ /* Set RC4KEY[0..3], which includes "cleartext" portion of RC4 key */
+ rc4key[0] = Hi8(iv16); /* RC4KEY[0..2] is the WEP IV */
+ rc4key[1] = (Hi8(iv16) | 0x20) & 0x7F; /* Help avoid weak (FMS) keys */
+ rc4key[2] = Lo8(iv16);
+ rc4key[3] = Lo8((PPK[5] ^ TK16(0)) >> 1);
+
+ /* Copy 96 bits of PPK[0..5] to RC4KEY[4..15] (little-endian) */
+ for (i = 0; i < 6; i++) {
+ rc4key[4+2*i] = Lo8(PPK[i]);
+ rc4key[5+2*i] = Hi8(PPK[i]);
+ }
+_func_exit_;
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ u8 hw_hdr_offset = 0;
+ struct arc4context mycontext;
+ int curfragnum, length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+ /* 4 start to encrypt each fragment */
+ if (pattrib->encrypt == _TKIP_) {
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
+
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ iv = pframe+pattrib->hdrlen;
+ payload = pframe+pattrib->iv_len+pattrib->hdrlen;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+ phase1((u16 *)&ttkey[0], prwskey, &pattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (u16 *)&ttkey[0], pnl);
+
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_security_c_, _drv_info_,
+ ("pattrib->iv_len=%x, pattrib->icv_len=%x\n",
+ pattrib->iv_len, pattrib->icv_len));
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+ *((__le32 *)crc) = getcrc32(payload, length);/* modified by Amy*/
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+ arcfour_encrypt(&mycontext, payload+length, crc, 4);
+
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+ return res;
+}
+
+/* The hlen isn't include the IV */
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+
+_func_enter_;
+
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+
+ /* 4 start to decrypt recvframe */
+ if (prxattrib->encrypt == _TKIP_) {
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ if (IS_MCAST(prxattrib->ra)) {
+ if (!psecuritypriv->binstallGrpkey) {
+ res = _FAIL;
+ DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo!= NULL!!!\n"));
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ }
+
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+
+ GET_TKIP_PN(iv, dot11txpn);
+
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
+
+ phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
+
+ /* 4 decrypt payload include icv */
+
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((__le32 *)crc) = getcrc32(payload, length-4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ res = _FAIL;
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+exit:
+ return res;
+}
+
+/* 3 ===== AES related ===== */
+
+
+#define MAX_MSG_SIZE 2048
+/*****************************/
+/******** SBOX Table *********/
+/*****************************/
+
+static u8 sbox_table[256] = {
+ 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
+ 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
+ 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
+ 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
+ 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
+ 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
+ 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
+ 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
+ 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
+ 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
+ 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
+ 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
+ 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
+ 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
+ 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
+ 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
+ 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
+ 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
+ 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
+ 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
+ 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
+ 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
+ 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
+ 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
+ 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
+ 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
+ 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
+ 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
+ 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
+ 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
+ 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
+ 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
+};
+
+/*****************************/
+/**** Function Prototypes ****/
+/*****************************/
+
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out);
+static void construct_mic_iv(u8 *mic_header1, int qc_exists, int a4_exists, u8 *mpdu, uint payload_length, u8 *pn_vector);
+static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu);
+static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists);
+static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c);
+static void xor_128(u8 *a, u8 *b, u8 *out);
+static void xor_32(u8 *a, u8 *b, u8 *out);
+static u8 sbox(u8 a);
+static void next_key(u8 *key, int round);
+static void byte_sub(u8 *in, u8 *out);
+static void shift_row(u8 *in, u8 *out);
+static void mix_column(u8 *in, u8 *out);
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext);
+
+/****************************************/
+/* aes128k128d() */
+/* Performs a 128 bit AES encrypt with */
+/* 128 bit data. */
+/****************************************/
+static void xor_128(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = a[i] ^ b[i];
+_func_exit_;
+}
+
+static void xor_32(u8 *a, u8 *b, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 4; i++)
+ out[i] = a[i] ^ b[i];
+_func_exit_;
+}
+
+static u8 sbox(u8 a)
+{
+ return sbox_table[(int)a];
+}
+
+static void next_key(u8 *key, int round)
+{
+ u8 rcon;
+ u8 sbox_key[4];
+ u8 rcon_table[12] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
+ 0x1b, 0x36, 0x36, 0x36
+ };
+_func_enter_;
+ sbox_key[0] = sbox(key[13]);
+ sbox_key[1] = sbox(key[14]);
+ sbox_key[2] = sbox(key[15]);
+ sbox_key[3] = sbox(key[12]);
+
+ rcon = rcon_table[round];
+
+ xor_32(&key[0], sbox_key, &key[0]);
+ key[0] = key[0] ^ rcon;
+
+ xor_32(&key[4], &key[0], &key[4]);
+ xor_32(&key[8], &key[4], &key[8]);
+ xor_32(&key[12], &key[8], &key[12]);
+_func_exit_;
+}
+
+static void byte_sub(u8 *in, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = sbox(in[i]);
+_func_exit_;
+}
+
+static void shift_row(u8 *in, u8 *out)
+{
+_func_enter_;
+ out[0] = in[0];
+ out[1] = in[5];
+ out[2] = in[10];
+ out[3] = in[15];
+ out[4] = in[4];
+ out[5] = in[9];
+ out[6] = in[14];
+ out[7] = in[3];
+ out[8] = in[8];
+ out[9] = in[13];
+ out[10] = in[2];
+ out[11] = in[7];
+ out[12] = in[12];
+ out[13] = in[1];
+ out[14] = in[6];
+ out[15] = in[11];
+_func_exit_;
+}
+
+static void mix_column(u8 *in, u8 *out)
+{
+ int i;
+ u8 add1b[4];
+ u8 add1bf7[4];
+ u8 rotl[4];
+ u8 swap_halfs[4];
+ u8 andf7[4];
+ u8 rotr[4];
+ u8 temp[4];
+ u8 tempb[4];
+_func_enter_;
+ for (i = 0 ; i < 4; i++) {
+ if ((in[i] & 0x80) == 0x80)
+ add1b[i] = 0x1b;
+ else
+ add1b[i] = 0x00;
+ }
+
+ swap_halfs[0] = in[2]; /* Swap halfs */
+ swap_halfs[1] = in[3];
+ swap_halfs[2] = in[0];
+ swap_halfs[3] = in[1];
+
+ rotl[0] = in[3]; /* Rotate left 8 bits */
+ rotl[1] = in[0];
+ rotl[2] = in[1];
+ rotl[3] = in[2];
+
+ andf7[0] = in[0] & 0x7f;
+ andf7[1] = in[1] & 0x7f;
+ andf7[2] = in[2] & 0x7f;
+ andf7[3] = in[3] & 0x7f;
+
+ for (i = 3; i > 0; i--) { /* logical shift left 1 bit */
+ andf7[i] = andf7[i] << 1;
+ if ((andf7[i-1] & 0x80) == 0x80)
+ andf7[i] = (andf7[i] | 0x01);
+ }
+ andf7[0] = andf7[0] << 1;
+ andf7[0] = andf7[0] & 0xfe;
+
+ xor_32(add1b, andf7, add1bf7);
+
+ xor_32(in, add1bf7, rotr);
+
+ temp[0] = rotr[0]; /* Rotate right 8 bits */
+ rotr[0] = rotr[1];
+ rotr[1] = rotr[2];
+ rotr[2] = rotr[3];
+ rotr[3] = temp[0];
+
+ xor_32(add1bf7, rotr, temp);
+ xor_32(swap_halfs, rotl, tempb);
+ xor_32(temp, tempb, out);
+_func_exit_;
+}
+
+static void aes128k128d(u8 *key, u8 *data, u8 *ciphertext)
+{
+ int round;
+ int i;
+ u8 intermediatea[16];
+ u8 intermediateb[16];
+ u8 round_key[16];
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ round_key[i] = key[i];
+ for (round = 0; round < 11; round++) {
+ if (round == 0) {
+ xor_128(round_key, data, ciphertext);
+ next_key(round_key, round);
+ } else if (round == 10) {
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ xor_128(intermediateb, round_key, ciphertext);
+ } else { /* 1 - 9 */
+ byte_sub(ciphertext, intermediatea);
+ shift_row(intermediatea, intermediateb);
+ mix_column(&intermediateb[0], &intermediatea[0]);
+ mix_column(&intermediateb[4], &intermediatea[4]);
+ mix_column(&intermediateb[8], &intermediatea[8]);
+ mix_column(&intermediateb[12], &intermediatea[12]);
+ xor_128(intermediatea, round_key, ciphertext);
+ next_key(round_key, round);
+ }
+ }
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_iv() */
+/* Builds the MIC IV from header fields and PN */
+/************************************************/
+static void construct_mic_iv(u8 *mic_iv, int qc_exists, int a4_exists, u8 *mpdu,
+ uint payload_length, u8 *pn_vector)
+{
+ int i;
+_func_enter_;
+ mic_iv[0] = 0x59;
+ if (qc_exists && a4_exists)
+ mic_iv[1] = mpdu[30] & 0x0f; /* QoS_TC */
+ if (qc_exists && !a4_exists)
+ mic_iv[1] = mpdu[24] & 0x0f; /* mute bits 7-4 */
+ if (!qc_exists)
+ mic_iv[1] = 0x00;
+ for (i = 2; i < 8; i++)
+ mic_iv[i] = mpdu[i + 8]; /* mic_iv[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ mic_iv[i] = pn_vector[13 - i]; /* mic_iv[8:13] = PN[5:0] */
+ mic_iv[14] = (unsigned char) (payload_length / 256);
+ mic_iv[15] = (unsigned char) (payload_length % 256);
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header1() */
+/* Builds the first MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header1(u8 *mic_header1, int header_length, u8 *mpdu)
+{
+_func_enter_;
+ mic_header1[0] = (u8)((header_length - 2) / 256);
+ mic_header1[1] = (u8)((header_length - 2) % 256);
+ mic_header1[2] = mpdu[0] & 0xcf; /* Mute CF poll & CF ack bits */
+ mic_header1[3] = mpdu[1] & 0xc7; /* Mute retry, more data and pwr mgt bits */
+ mic_header1[4] = mpdu[4]; /* A1 */
+ mic_header1[5] = mpdu[5];
+ mic_header1[6] = mpdu[6];
+ mic_header1[7] = mpdu[7];
+ mic_header1[8] = mpdu[8];
+ mic_header1[9] = mpdu[9];
+ mic_header1[10] = mpdu[10]; /* A2 */
+ mic_header1[11] = mpdu[11];
+ mic_header1[12] = mpdu[12];
+ mic_header1[13] = mpdu[13];
+ mic_header1[14] = mpdu[14];
+ mic_header1[15] = mpdu[15];
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_mic_header2(u8 *mic_header2, u8 *mpdu, int a4_exists, int qc_exists)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ mic_header2[i] = 0x00;
+
+ mic_header2[0] = mpdu[16]; /* A3 */
+ mic_header2[1] = mpdu[17];
+ mic_header2[2] = mpdu[18];
+ mic_header2[3] = mpdu[19];
+ mic_header2[4] = mpdu[20];
+ mic_header2[5] = mpdu[21];
+
+ mic_header2[6] = 0x00;
+ mic_header2[7] = 0x00; /* mpdu[23]; */
+
+ if (!qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+ }
+
+ if (qc_exists && !a4_exists) {
+ mic_header2[8] = mpdu[24] & 0x0f; /* mute bits 15 - 4 */
+ mic_header2[9] = mpdu[25] & 0x00;
+ }
+
+ if (qc_exists && a4_exists) {
+ for (i = 0; i < 6; i++)
+ mic_header2[8+i] = mpdu[24+i]; /* A4 */
+
+ mic_header2[14] = mpdu[30] & 0x0f;
+ mic_header2[15] = mpdu[31] & 0x00;
+ }
+
+_func_exit_;
+}
+
+/************************************************/
+/* construct_mic_header2() */
+/* Builds the last MIC header block from */
+/* header fields. */
+/************************************************/
+static void construct_ctr_preload(u8 *ctr_preload, int a4_exists, int qc_exists, u8 *mpdu, u8 *pn_vector, int c)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ ctr_preload[i] = 0x00;
+ i = 0;
+
+ ctr_preload[0] = 0x01; /* flag */
+ if (qc_exists && a4_exists)
+ ctr_preload[1] = mpdu[30] & 0x0f; /* QoC_Control */
+ if (qc_exists && !a4_exists)
+ ctr_preload[1] = mpdu[24] & 0x0f;
+
+ for (i = 2; i < 8; i++)
+ ctr_preload[i] = mpdu[i + 8]; /* ctr_preload[2:7] = A2[0:5] = mpdu[10:15] */
+ for (i = 8; i < 14; i++)
+ ctr_preload[i] = pn_vector[13 - i]; /* ctr_preload[8:13] = PN[5:0] */
+ ctr_preload[14] = (unsigned char) (c / 256); /* Ctr */
+ ctr_preload[15] = (unsigned char) (c % 256);
+_func_exit_;
+}
+
+/************************************/
+/* bitwise_xor() */
+/* A 128 bit, bitwise exclusive or */
+/************************************/
+static void bitwise_xor(u8 *ina, u8 *inb, u8 *out)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < 16; i++)
+ out[i] = ina[i] ^ inb[i];
+_func_exit_;
+}
+
+static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
+{
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+
+_func_enter_;
+ frsubtype = frsubtype>>4;
+
+ _rtw_memset((void *)mic_iv, 0, 16);
+ _rtw_memset((void *)mic_header1, 0, 16);
+ _rtw_memset((void *)mic_header2, 0, 16);
+ _rtw_memset((void *)ctr_preload, 0, 16);
+ _rtw_memset((void *)chain_buffer, 0, 16);
+ _rtw_memset((void *)aes_out, 0, 16);
+ _rtw_memset((void *)padded_buffer, 0, 16);
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) || (frtype == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ } else if ((frsubtype == 0x08) || (frsubtype == 0x09) || (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, pframe, plen, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, pframe);
+ construct_mic_header2(mic_header2, pframe, a4_exists, qc_exists);
+
+ payload_remainder = plen % 16;
+ num_blocks = plen / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);/* bitwise_xor(aes_out, &message[payload_index], chain_buffer); */
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index++];/* padded_buffer[j] = message[payload_index++]; */
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ pframe[payload_index+j] = mic[j]; /* message[payload_index+j] = mic[j]; */
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = pframe[j+hdrlen+8+plen];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ pframe[payload_index++] = chain_buffer[j];
+_func_exit_;
+ return _SUCCESS;
+}
+
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
+{ /* exclude ICV */
+
+ /*static*/
+/* unsigned char message[MAX_MSG_SIZE]; */
+
+ /* Intermediate Buffers */
+ int curfragnum, length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ u8 hw_hdr_offset = 0;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+/* uint offset = 0; */
+ u32 res = _SUCCESS;
+_func_enter_;
+
+ if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ return _FAIL;
+
+ hw_hdr_offset = TXDESC_SIZE +
+ (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
+
+ pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+
+ /* 4 start to encrypt each fragment */
+ if ((pattrib->encrypt == _AES_)) {
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &pattrib->ra[0]);
+
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(pattrib->ra))
+ prwskey = psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey;
+ else
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ if ((curfragnum+1) == pattrib->nr_frags) { /* 4 the last fragment */
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ } else{
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-pattrib->icv_len ;
+
+ aes_cipher(prwskey, pattrib->hdrlen, pframe, length);
+ pframe += pxmitpriv->frag_len;
+ pframe = (u8 *)RND4((size_t)(pframe));
+ }
+ }
+ } else{
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+
+
+_func_exit_;
+ return res;
+}
+
+static int aes_decipher(u8 *key, uint hdrlen,
+ u8 *pframe, uint plen)
+{
+ static u8 message[MAX_MSG_SIZE];
+ uint qc_exists, a4_exists, i, j, payload_remainder,
+ num_blocks, payload_index;
+ int res = _SUCCESS;
+ u8 pn_vector[6];
+ u8 mic_iv[16];
+ u8 mic_header1[16];
+ u8 mic_header2[16];
+ u8 ctr_preload[16];
+
+ /* Intermediate Buffers */
+ u8 chain_buffer[16];
+ u8 aes_out[16];
+ u8 padded_buffer[16];
+ u8 mic[8];
+
+/* uint offset = 0; */
+ uint frtype = GetFrameType(pframe);
+ uint frsubtype = GetFrameSubType(pframe);
+_func_enter_;
+ frsubtype = frsubtype>>4;
+
+ _rtw_memset((void *)mic_iv, 0, 16);
+ _rtw_memset((void *)mic_header1, 0, 16);
+ _rtw_memset((void *)mic_header2, 0, 16);
+ _rtw_memset((void *)ctr_preload, 0, 16);
+ _rtw_memset((void *)chain_buffer, 0, 16);
+ _rtw_memset((void *)aes_out, 0, 16);
+ _rtw_memset((void *)padded_buffer, 0, 16);
+
+ /* start to decrypt the payload */
+
+ num_blocks = (plen-8) / 16; /* plen including llc, payload_length and mic) */
+
+ payload_remainder = (plen-8) % 16;
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+
+ if ((hdrlen == WLAN_HDR_A3_LEN) || (hdrlen == WLAN_HDR_A3_QOS_LEN))
+ a4_exists = 0;
+ else
+ a4_exists = 1;
+
+ if ((frtype == WIFI_DATA_CFACK) || (frtype == WIFI_DATA_CFPOLL) ||
+ (frtype == WIFI_DATA_CFACKPOLL)) {
+ qc_exists = 1;
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ } else if ((frsubtype == 0x08) || (frsubtype == 0x09) ||
+ (frsubtype == 0x0a) || (frsubtype == 0x0b)) {
+ if (hdrlen != WLAN_HDR_A3_QOS_LEN)
+ hdrlen += 2;
+ qc_exists = 1;
+ } else {
+ qc_exists = 0;
+ }
+
+ /* now, decrypt pframe with hdrlen offset and plen long */
+
+ payload_index = hdrlen + 8; /* 8 is for extiv */
+
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, i+1);
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &pframe[payload_index], chain_buffer);
+
+ for (j = 0; j < 16; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, pframe, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = pframe[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ pframe[payload_index++] = chain_buffer[j];
+ }
+
+ /* start to calculate the mic */
+ if ((hdrlen+plen+8) <= MAX_MSG_SIZE)
+ memcpy(message, pframe, (hdrlen + plen+8)); /* 8 is for ext iv len */
+
+ pn_vector[0] = pframe[hdrlen];
+ pn_vector[1] = pframe[hdrlen+1];
+ pn_vector[2] = pframe[hdrlen+4];
+ pn_vector[3] = pframe[hdrlen+5];
+ pn_vector[4] = pframe[hdrlen+6];
+ pn_vector[5] = pframe[hdrlen+7];
+ construct_mic_iv(mic_iv, qc_exists, a4_exists, message, plen-8, pn_vector);
+
+ construct_mic_header1(mic_header1, hdrlen, message);
+ construct_mic_header2(mic_header2, message, a4_exists, qc_exists);
+
+ payload_remainder = (plen-8) % 16;
+ num_blocks = (plen-8) / 16;
+
+ /* Find start of payload */
+ payload_index = (hdrlen + 8);
+
+ /* Calculate MIC */
+ aes128k128d(key, mic_iv, aes_out);
+ bitwise_xor(aes_out, mic_header1, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ bitwise_xor(aes_out, mic_header2, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+
+ for (i = 0; i < num_blocks; i++) {
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+
+ payload_index += 16;
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ /* Add on the final payload block if it needs padding */
+ if (payload_remainder > 0) {
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index++];
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ aes128k128d(key, chain_buffer, aes_out);
+ }
+
+ for (j = 0 ; j < 8; j++)
+ mic[j] = aes_out[j];
+
+ /* Insert MIC into payload */
+ for (j = 0; j < 8; j++)
+ message[payload_index+j] = mic[j];
+
+ payload_index = hdrlen + 8;
+ for (i = 0; i < num_blocks; i++) {
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, i+1);
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, &message[payload_index], chain_buffer);
+ for (j = 0; j < 16; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ if (payload_remainder > 0) { /* If there is a short final block, then pad it,*/
+ /* encrypt it and copy the unpadded part back */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, num_blocks+1);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < payload_remainder; j++)
+ padded_buffer[j] = message[payload_index+j];
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < payload_remainder; j++)
+ message[payload_index++] = chain_buffer[j];
+ }
+
+ /* Encrypt the MIC */
+ construct_ctr_preload(ctr_preload, a4_exists, qc_exists, message, pn_vector, 0);
+
+ for (j = 0; j < 16; j++)
+ padded_buffer[j] = 0x00;
+ for (j = 0; j < 8; j++)
+ padded_buffer[j] = message[j+hdrlen+8+plen-8];
+
+ aes128k128d(key, ctr_preload, aes_out);
+ bitwise_xor(aes_out, padded_buffer, chain_buffer);
+ for (j = 0; j < 8; j++)
+ message[payload_index++] = chain_buffer[j];
+
+ /* compare the mic */
+ for (i = 0; i < 8; i++) {
+ if (pframe[hdrlen+8+plen-8+i] != message[hdrlen+8+plen-8+i]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]));
+ DBG_88E("aes_decipher:mic check error mic[%d]: pframe(%x)!=message(%x)\n",
+ i, pframe[hdrlen+8+plen-8+i], message[hdrlen+8+plen-8+i]);
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+ return res;
+}
+
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
+{ /* exclude ICV */
+ /* Intermediate Buffers */
+ int length;
+ u8 *pframe, *prwskey; /* *payload,*iv */
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((union recv_frame *)precvframe)->u.hdr.attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+_func_enter_;
+ pframe = (unsigned char *)((union recv_frame *)precvframe)->u.hdr.rx_data;
+ /* 4 start to encrypt each fragment */
+ if ((prxattrib->encrypt == _AES_)) {
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
+ if (stainfo != NULL) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_decrypt: stainfo!= NULL!!!\n"));
+
+ if (IS_MCAST(prxattrib->ra)) {
+ /* in concurrent we should use sw descrypt in group key, so we remove this message */
+ if (!psecuritypriv->binstallGrpkey) {
+ res = _FAIL;
+ DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
+ goto exit;
+ }
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
+ if (psecuritypriv->dot118021XGrpKeyid != prxattrib->key_index) {
+ DBG_88E("not match packet_index=%d, install_index=%d\n",
+ prxattrib->key_index, psecuritypriv->dot118021XGrpKeyid);
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
+ }
+ length = ((union recv_frame *)precvframe)->u.hdr.len-prxattrib->hdrlen-prxattrib->iv_len;
+ res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
+ } else {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_aes_encrypt: stainfo==NULL!!!\n"));
+ res = _FAIL;
+ }
+ }
+_func_exit_;
+exit:
+ return res;
+}
+
+/* AES tables*/
+const u32 Te0[256] = {
+ 0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
+ 0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
+ 0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
+ 0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
+ 0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
+ 0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
+ 0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
+ 0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
+ 0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
+ 0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
+ 0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
+ 0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
+ 0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
+ 0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
+ 0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
+ 0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
+ 0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
+ 0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
+ 0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
+ 0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
+ 0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
+ 0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
+ 0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
+ 0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
+ 0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
+ 0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
+ 0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
+ 0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
+ 0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
+ 0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
+ 0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
+ 0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
+ 0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
+ 0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
+ 0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
+ 0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
+ 0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
+ 0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
+ 0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
+ 0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
+ 0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
+ 0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
+ 0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
+ 0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
+ 0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
+ 0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
+ 0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
+ 0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
+ 0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
+ 0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
+ 0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
+ 0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
+ 0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
+ 0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
+ 0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
+ 0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
+ 0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
+ 0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
+ 0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
+ 0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
+ 0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
+ 0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
+ 0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
+ 0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
+};
+
+const u32 Td0[256] = {
+ 0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
+ 0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
+ 0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
+ 0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
+ 0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
+ 0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
+ 0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
+ 0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
+ 0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
+ 0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
+ 0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
+ 0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
+ 0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
+ 0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
+ 0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
+ 0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
+ 0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
+ 0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
+ 0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
+ 0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
+ 0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
+ 0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
+ 0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
+ 0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
+ 0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
+ 0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
+ 0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
+ 0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
+ 0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
+ 0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
+ 0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
+ 0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
+ 0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
+ 0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
+ 0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
+ 0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
+ 0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
+ 0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
+ 0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
+ 0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
+ 0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
+ 0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
+ 0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
+ 0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
+ 0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
+ 0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
+ 0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
+ 0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
+ 0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
+ 0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
+ 0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
+ 0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
+ 0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
+ 0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
+ 0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
+ 0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
+ 0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
+ 0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
+ 0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
+ 0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
+ 0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
+ 0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
+ 0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
+ 0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
+};
+
+const u8 Td4s[256] = {
+ 0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
+ 0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
+ 0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
+ 0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
+ 0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
+ 0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
+ 0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
+ 0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
+ 0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
+ 0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
+ 0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
+ 0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
+ 0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
+ 0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
+ 0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
+ 0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
+ 0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
+ 0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
+ 0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
+ 0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
+ 0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
+ 0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
+ 0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
+ 0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
+ 0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
+ 0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
+ 0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
+ 0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
+ 0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
+ 0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
+ 0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
+ 0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
+};
+const u8 rcons[] = {
+ 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
+ /* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
+};
+
+/**
+ * Expand the cipher key into the encryption key schedule.
+ *
+ * @return the number of rounds for the given cipher key size.
+ */
+#define ROUND(i, d, s) \
+do { \
+ d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
+ d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
+ d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
+ d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
+} while (0);
+
+/**
+ * omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC)
+ * @key: 128-bit key for the hash operation
+ * @data: Data buffer for which a MAC is determined
+ * @data_len: Length of data buffer in bytes
+ * @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
+ * Returns: 0 on success, -1 on failure
+ *
+ * This is a mode for using block cipher (AES in this case) for authentication.
+ * OMAC1 was standardized with the name CMAC by NIST in a Special Publication
+ * (SP) 800-38B.
+ */
+void rtw_use_tkipkey_handler(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler ^^^\n"));
+
+ padapter->securitypriv.busetkipkey = true;
+
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("^^^rtw_use_tkipkey_handler padapter->securitypriv.busetkipkey=%d^^^\n", padapter->securitypriv.busetkipkey));
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
new file mode 100644
index 00000000000..298f75400c8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <rtw_sreset.h>
+
+void sreset_init_value(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ _rtw_mutex_init(&psrtpriv->silentreset_mutex);
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+void sreset_reset_value(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ psrtpriv->silent_reset_inprogress = false;
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+ psrtpriv->last_tx_time = 0;
+ psrtpriv->last_tx_complete_time = 0;
+}
+
+u8 sreset_get_wifi_status(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ u8 status = WIFI_STATUS_SUCCESS;
+ u32 val32 = 0;
+
+ if (psrtpriv->silent_reset_inprogress)
+ return status;
+ val32 = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (val32 == 0xeaeaeaea) {
+ psrtpriv->Wifi_Error_Status = WIFI_IF_NOT_EXIST;
+ } else if (val32 != 0) {
+ DBG_88E("txdmastatu(%x)\n", val32);
+ psrtpriv->Wifi_Error_Status = WIFI_MAC_TXDMA_ERROR;
+ }
+
+ if (WIFI_STATUS_SUCCESS != psrtpriv->Wifi_Error_Status) {
+ DBG_88E("==>%s error_status(0x%x)\n", __func__, psrtpriv->Wifi_Error_Status);
+ status = (psrtpriv->Wifi_Error_Status & (~(USB_READ_PORT_FAIL|USB_WRITE_PORT_FAIL)));
+ }
+ DBG_88E("==> %s wifi_status(0x%x)\n", __func__, status);
+
+ /* status restore */
+ psrtpriv->Wifi_Error_Status = WIFI_STATUS_SUCCESS;
+
+ return status;
+}
+
+void sreset_set_wifi_error_status(struct adapter *padapter, u32 status)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ pHalData->srestpriv.Wifi_Error_Status = status;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
new file mode 100644
index 00000000000..c2977be92fb
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -0,0 +1,655 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_STA_MGT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <mlme_osdep.h>
+#include <sta_info.h>
+
+static void _rtw_init_stainfo(struct sta_info *psta)
+{
+_func_enter_;
+ _rtw_memset((u8 *)psta, 0, sizeof (struct sta_info));
+
+ _rtw_spinlock_init(&psta->lock);
+ _rtw_init_listhead(&psta->list);
+ _rtw_init_listhead(&psta->hash_list);
+ _rtw_init_queue(&psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ _rtw_init_listhead(&psta->asoc_list);
+
+ _rtw_init_listhead(&psta->auth_list);
+
+ psta->expire_to = 0;
+
+ psta->flags = 0;
+
+ psta->capability = 0;
+
+ psta->bpairwise_key_installed = false;
+
+#ifdef CONFIG_88EU_AP_MODE
+ psta->nonerp_set = 0;
+ psta->no_short_slot_time_set = 0;
+ psta->no_short_preamble_set = 0;
+ psta->no_ht_gf_set = 0;
+ psta->no_ht_set = 0;
+ psta->ht_20mhz_set = 0;
+#endif
+
+ psta->under_exist_checking = 0;
+
+ psta->keep_alive_trycnt = 0;
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+_func_exit_;
+}
+
+u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
+{
+ struct sta_info *psta;
+ s32 i;
+
+_func_enter_;
+
+ pstapriv->pallocated_stainfo_buf = rtw_zvmalloc(sizeof(struct sta_info) * NUM_STA + 4);
+
+ if (!pstapriv->pallocated_stainfo_buf)
+ return _FAIL;
+
+ pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
+ ((size_t)(pstapriv->pallocated_stainfo_buf) & 3);
+
+ _rtw_init_queue(&pstapriv->free_sta_queue);
+
+ _rtw_spinlock_init(&pstapriv->sta_hash_lock);
+
+ pstapriv->asoc_sta_count = 0;
+ _rtw_init_queue(&pstapriv->sleep_q);
+ _rtw_init_queue(&pstapriv->wakeup_q);
+
+ psta = (struct sta_info *)(pstapriv->pstainfo_buf);
+
+ for (i = 0; i < NUM_STA; i++) {
+ _rtw_init_stainfo(psta);
+
+ _rtw_init_listhead(&(pstapriv->sta_hash[i]));
+
+ rtw_list_insert_tail(&psta->list, get_list_head(&pstapriv->free_sta_queue));
+
+ psta++;
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ pstapriv->sta_dz_bitmap = 0;
+ pstapriv->tim_bitmap = 0;
+
+ _rtw_init_listhead(&pstapriv->asoc_list);
+ _rtw_init_listhead(&pstapriv->auth_list);
+ _rtw_spinlock_init(&pstapriv->asoc_list_lock);
+ _rtw_spinlock_init(&pstapriv->auth_list_lock);
+ pstapriv->asoc_list_cnt = 0;
+ pstapriv->auth_list_cnt = 0;
+
+ pstapriv->auth_to = 3; /* 3*2 = 6 sec */
+ pstapriv->assoc_to = 3;
+ pstapriv->expire_to = 3; /* 3*2 = 6 sec */
+ pstapriv->max_num_sta = NUM_STA;
+#endif
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+inline int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta)
+{
+ int offset = (((u8 *)sta) - stapriv->pstainfo_buf)/sizeof(struct sta_info);
+
+ if (!stainfo_offset_valid(offset))
+ DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return offset;
+}
+
+inline struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int offset)
+{
+ if (!stainfo_offset_valid(offset))
+ DBG_88E("%s invalid offset(%d), out of range!!!", __func__, offset);
+
+ return (struct sta_info *)(stapriv->pstainfo_buf + offset * sizeof(struct sta_info));
+}
+
+void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv);
+void _rtw_free_sta_xmit_priv_lock(struct sta_xmit_priv *psta_xmitpriv)
+{
+_func_enter_;
+
+ _rtw_spinlock_free(&psta_xmitpriv->lock);
+
+ _rtw_spinlock_free(&(psta_xmitpriv->be_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->bk_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->vi_q.sta_pending.lock));
+ _rtw_spinlock_free(&(psta_xmitpriv->vo_q.sta_pending.lock));
+_func_exit_;
+}
+
+static void _rtw_free_sta_recv_priv_lock(struct sta_recv_priv *psta_recvpriv)
+{
+_func_enter_;
+
+ _rtw_spinlock_free(&psta_recvpriv->lock);
+
+ _rtw_spinlock_free(&(psta_recvpriv->defrag_q.lock));
+
+_func_exit_;
+}
+
+void rtw_mfree_stainfo(struct sta_info *psta);
+void rtw_mfree_stainfo(struct sta_info *psta)
+{
+_func_enter_;
+
+ if (&psta->lock != NULL)
+ _rtw_spinlock_free(&psta->lock);
+
+ _rtw_free_sta_xmit_priv_lock(&psta->sta_xmitpriv);
+ _rtw_free_sta_recv_priv_lock(&psta->sta_recvpriv);
+
+_func_exit_;
+}
+
+/* this function is used to free the memory of lock || sema for all stainfos */
+void rtw_mfree_all_stainfo(struct sta_priv *pstapriv);
+void rtw_mfree_all_stainfo(struct sta_priv *pstapriv)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+
+_func_enter_;
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ phead = get_list_head(&pstapriv->free_sta_queue);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info , list);
+ plist = get_next(plist);
+
+ rtw_mfree_stainfo(psta);
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+_func_exit_;
+}
+
+static void rtw_mfree_sta_priv_lock(struct sta_priv *pstapriv)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+#endif
+
+ rtw_mfree_all_stainfo(pstapriv); /* be done before free sta_hash_lock */
+
+ _rtw_spinlock_free(&pstapriv->free_sta_queue.lock);
+
+ _rtw_spinlock_free(&pstapriv->sta_hash_lock);
+ _rtw_spinlock_free(&pstapriv->wakeup_q.lock);
+ _rtw_spinlock_free(&pstapriv->sleep_q.lock);
+
+#ifdef CONFIG_88EU_AP_MODE
+ _rtw_spinlock_free(&pstapriv->asoc_list_lock);
+ _rtw_spinlock_free(&pstapriv->auth_list_lock);
+ _rtw_spinlock_free(&pacl_list->acl_node_q.lock);
+#endif
+}
+
+u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
+{
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct sta_info *psta = NULL;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int index;
+
+_func_enter_;
+ if (pstapriv) {
+ /* delete all reordering_ctrl_timer */
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ int i;
+ psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+ plist = get_next(plist);
+
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+ /*===============================*/
+
+ rtw_mfree_sta_priv_lock(pstapriv);
+
+ if (pstapriv->pallocated_stainfo_buf) {
+ rtw_vmfree(pstapriv->pallocated_stainfo_buf, sizeof(struct sta_info)*NUM_STA+4);
+ }
+ }
+
+_func_exit_;
+ return _SUCCESS;
+}
+
+struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ unsigned long irql, irql2;
+ s32 index;
+ struct list_head *phash_list;
+ struct sta_info *psta;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ int i = 0;
+ u16 wRxSeqInitialValue = 0xffff;
+
+_func_enter_;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ _enter_critical_bh(&(pfree_sta_queue->lock), &irql);
+
+ if (_rtw_queue_empty(pfree_sta_queue) == true) {
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ psta = NULL;
+ } else {
+ psta = LIST_CONTAINOR(get_next(&pfree_sta_queue->queue), struct sta_info, list);
+ rtw_list_delete(&(psta->list));
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql);
+ _rtw_init_stainfo(psta);
+ memcpy(psta->hwaddr, hwaddr, ETH_ALEN);
+ index = wifi_mac_hash(hwaddr);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_, ("rtw_alloc_stainfo: index=%x", index));
+ if (index >= NUM_STA) {
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("ERROR => rtw_alloc_stainfo: index >= NUM_STA"));
+ psta = NULL;
+ goto exit;
+ }
+ phash_list = &(pstapriv->sta_hash[index]);
+
+ _enter_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+
+ rtw_list_insert_tail(&psta->hash_list, phash_list);
+
+ pstapriv->asoc_sta_count++ ;
+
+ _exit_critical_bh(&(pstapriv->sta_hash_lock), &irql2);
+
+/* Commented by Albert 2009/08/13 */
+/* For the SMC router, the sequence number of first packet of WPS handshake will be 0. */
+/* In this case, this packet will be dropped by recv_decache function if we use the 0x00 as the default value for tid_rxseq variable. */
+/* So, we initialize the tid_rxseq variable as the 0xffff. */
+
+ for (i = 0; i < 16; i++)
+ memcpy(&psta->sta_recvpriv.rxcache.tid_rxseq[i], &wRxSeqInitialValue, 2);
+
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_info_,
+ ("alloc number_%d stainfo with hwaddr = %pM\n",
+ pstapriv->asoc_sta_count , hwaddr));
+
+ init_addba_retry_timer(pstapriv->padapter, psta);
+
+ /* for A-MPDU Rx reordering buffer control */
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ preorder_ctrl->padapter = pstapriv->padapter;
+
+ preorder_ctrl->enable = false;
+
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->wend_b = 0xffff;
+ preorder_ctrl->wsize_b = 64;/* 64; */
+
+ _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+
+ rtw_init_recv_timer(preorder_ctrl);
+ }
+
+ /* init for DM */
+ psta->rssi_stat.UndecoratedSmoothedPWDB = (-1);
+ psta->rssi_stat.UndecoratedSmoothedCCK = (-1);
+
+ /* init for the sequence number of received management frame */
+ psta->RxMgmtFrameSeqNum = 0xffff;
+ }
+
+exit:
+
+_func_exit_;
+
+ return psta;
+}
+
+/* using pstapriv->sta_hash_lock to protect */
+u32 rtw_free_stainfo(struct adapter *padapter , struct sta_info *psta)
+{
+ int i;
+ unsigned long irql0;
+ struct __queue *pfree_sta_queue;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+_func_enter_;
+
+ if (psta == NULL)
+ goto exit;
+
+ pfree_sta_queue = &pstapriv->free_sta_queue;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
+ psta->sleepq_len = 0;
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
+
+ rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
+
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+
+ rtw_list_delete(&psta->hash_list);
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("\n free number_%d stainfo with hwaddr=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", pstapriv->asoc_sta_count , psta->hwaddr[0], psta->hwaddr[1], psta->hwaddr[2], psta->hwaddr[3], psta->hwaddr[4], psta->hwaddr[5]));
+ pstapriv->asoc_sta_count--;
+
+ /* re-init sta_info; 20061114 */
+ _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
+ _rtw_init_sta_recv_priv(&psta->sta_recvpriv);
+
+ _cancel_timer_ex(&psta->addba_retry_timer);
+
+ /* for A-MPDU Rx reordering buffer control, cancel reordering_ctrl_timer */
+ for (i = 0; i < 16 ; i++) {
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ union recv_frame *prframe;
+ struct __queue *ppending_recvframe_queue;
+ struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
+
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+
+ _cancel_timer_ex(&preorder_ctrl->reordering_ctrl_timer);
+
+ ppending_recvframe_queue = &preorder_ctrl->pending_recvframe_queue;
+
+ _enter_critical_bh(&ppending_recvframe_queue->lock, &irql);
+
+ phead = get_list_head(ppending_recvframe_queue);
+ plist = get_next(phead);
+
+ while (!rtw_is_list_empty(phead)) {
+ prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+
+ plist = get_next(plist);
+
+ rtw_list_delete(&(prframe->u.hdr.list));
+
+ rtw_free_recvframe(prframe, pfree_recv_queue);
+ }
+
+ _exit_critical_bh(&ppending_recvframe_queue->lock, &irql);
+ }
+
+ if (!(psta->state & WIFI_AP_STATE))
+ rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ _enter_critical_bh(&pstapriv->auth_list_lock, &irql0);
+ if (!rtw_is_list_empty(&psta->auth_list)) {
+ rtw_list_delete(&psta->auth_list);
+ pstapriv->auth_list_cnt--;
+ }
+ _exit_critical_bh(&pstapriv->auth_list_lock, &irql0);
+
+ psta->expire_to = 0;
+
+ psta->sleepq_ac_len = 0;
+ psta->qos_info = 0;
+
+ psta->max_sp_len = 0;
+ psta->uapsd_bk = 0;
+ psta->uapsd_be = 0;
+ psta->uapsd_vi = 0;
+ psta->uapsd_vo = 0;
+ psta->has_legacy_ac = 0;
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ if ((psta->aid > 0) && (pstapriv->sta_aid[psta->aid - 1] == psta)) {
+ pstapriv->sta_aid[psta->aid - 1] = NULL;
+ psta->aid = 0;
+ }
+
+ psta->under_exist_checking = 0;
+
+#endif /* CONFIG_88EU_AP_MODE */
+
+ _enter_critical_bh(&(pfree_sta_queue->lock), &irql0);
+ rtw_list_insert_tail(&psta->list, get_list_head(pfree_sta_queue));
+ _exit_critical_bh(&(pfree_sta_queue->lock), &irql0);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+/* free all stainfo which in sta_hash[all] */
+void rtw_free_all_stainfo(struct adapter *padapter)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ s32 index;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
+
+_func_enter_;
+
+ if (pstapriv->asoc_sta_count == 1)
+ goto exit;
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ for (index = 0; index < NUM_STA; index++) {
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ psta = LIST_CONTAINOR(plist, struct sta_info , hash_list);
+
+ plist = get_next(plist);
+
+ if (pbcmc_stainfo != psta)
+ rtw_free_stainfo(padapter , psta);
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+exit:
+
+_func_exit_;
+}
+
+/* any station allocated can be searched by hash list */
+struct sta_info *rtw_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct sta_info *psta = NULL;
+ u32 index;
+ u8 *addr;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+_func_enter_;
+
+ if (hwaddr == NULL)
+ return NULL;
+
+ if (IS_MCAST(hwaddr))
+ addr = bc_addr;
+ else
+ addr = hwaddr;
+
+ index = wifi_mac_hash(addr);
+
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irql);
+
+ phead = &(pstapriv->sta_hash[index]);
+ plist = get_next(phead);
+
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ if ((_rtw_memcmp(psta->hwaddr, addr, ETH_ALEN)) == true) {
+ /* if found the matched address */
+ break;
+ }
+ psta = NULL;
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irql);
+_func_exit_;
+ return psta;
+}
+
+u32 rtw_init_bcmc_stainfo(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ u32 res = _SUCCESS;
+ unsigned char bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+_func_enter_;
+
+ psta = rtw_alloc_stainfo(pstapriv, bcast_addr);
+
+ if (psta == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_sta_mgt_c_, _drv_err_, ("rtw_alloc_stainfo fail"));
+ goto exit;
+ }
+
+ /* default broadcast & multicast use macid 1 */
+ psta->mac_id = 1;
+
+exit:
+_func_exit_;
+ return res;
+}
+
+struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
+{
+ struct sta_info *psta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+_func_enter_;
+ psta = rtw_get_stainfo(pstapriv, bc_addr);
+_func_exit_;
+ return psta;
+}
+
+u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
+{
+ u8 res = true;
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct rtw_wlan_acl_node *paclnode;
+ u8 match = false;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
+ struct __queue *pacl_node_q = &pacl_list->acl_node_q;
+
+ _enter_critical_bh(&(pacl_node_q->lock), &irql);
+ phead = get_list_head(pacl_node_q);
+ plist = get_next(phead);
+ while ((!rtw_end_of_queue_search(phead, plist))) {
+ paclnode = LIST_CONTAINOR(plist, struct rtw_wlan_acl_node, list);
+ plist = get_next(plist);
+
+ if (_rtw_memcmp(paclnode->addr, mac_addr, ETH_ALEN)) {
+ if (paclnode->valid) {
+ match = true;
+ break;
+ }
+ }
+ }
+ _exit_critical_bh(&(pacl_node_q->lock), &irql);
+
+ if (pacl_list->mode == 1)/* accept unless in deny list */
+ res = (match) ? false : true;
+ else if (pacl_list->mode == 2)/* deny unless in accept list */
+ res = (match) ? true : false;
+ else
+ res = true;
+
+#endif
+
+ return res;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
new file mode 100644
index 00000000000..013ea487e7a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -0,0 +1,1689 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_WLAN_UTIL_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+
+static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f};
+static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74};
+
+static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18};
+static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7};
+
+static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96};
+static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43};
+static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43};
+static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c};
+static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5};
+static unsigned char EPIGRAM_OUI[] = {0x00, 0x90, 0x4c};
+
+unsigned char REALTEK_96B_IE[] = {0x00, 0xe0, 0x4c, 0x02, 0x01, 0x20};
+
+#define R2T_PHY_DELAY (0)
+
+/* define WAIT_FOR_BCN_TO_M (3000) */
+#define WAIT_FOR_BCN_TO_MIN (6000)
+#define WAIT_FOR_BCN_TO_MAX (20000)
+
+static u8 rtw_basic_rate_cck[4] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_ofdm[3] = {
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+static u8 rtw_basic_rate_mix[7] = {
+ IEEE80211_CCK_RATE_1MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_CCK_RATE_5MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_6MB|IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB|IEEE80211_BASIC_RATE_MASK,
+ IEEE80211_OFDM_RATE_24MB|IEEE80211_BASIC_RATE_MASK
+};
+
+int cckrates_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) == 2) || (((rate[i]) & 0x7f) == 4) ||
+ (((rate[i]) & 0x7f) == 11) || (((rate[i]) & 0x7f) == 22))
+ return true;
+ }
+ return false;
+}
+
+int cckratesonly_included(unsigned char *rate, int ratelen)
+{
+ int i;
+
+ for (i = 0; i < ratelen; i++) {
+ if ((((rate[i]) & 0x7f) != 2) && (((rate[i]) & 0x7f) != 4) &&
+ (((rate[i]) & 0x7f) != 11) && (((rate[i]) & 0x7f) != 22))
+ return false;
+ }
+
+ return true;
+}
+
+unsigned char networktype_to_raid(unsigned char network_type)
+{
+ unsigned char raid;
+
+ switch (network_type) {
+ case WIRELESS_11B:
+ raid = RATR_INX_WIRELESS_B;
+ break;
+ case WIRELESS_11A:
+ case WIRELESS_11G:
+ raid = RATR_INX_WIRELESS_G;
+ break;
+ case WIRELESS_11BG:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ case WIRELESS_11_24N:
+ case WIRELESS_11_5N:
+ raid = RATR_INX_WIRELESS_N;
+ break;
+ case WIRELESS_11A_5N:
+ case WIRELESS_11G_24N:
+ raid = RATR_INX_WIRELESS_NG;
+ break;
+ case WIRELESS_11BG_24N:
+ raid = RATR_INX_WIRELESS_NGB;
+ break;
+ default:
+ raid = RATR_INX_WIRELESS_GB;
+ break;
+ }
+ return raid;
+}
+
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int ratelen)
+{
+ u8 network_type = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+ return network_type;
+}
+
+static unsigned char ratetbl_val_2wifirate(unsigned char rate)
+{
+ unsigned char val = 0;
+
+ switch (rate & 0x7f) {
+ case 0:
+ val = IEEE80211_CCK_RATE_1MB;
+ break;
+ case 1:
+ val = IEEE80211_CCK_RATE_2MB;
+ break;
+ case 2:
+ val = IEEE80211_CCK_RATE_5MB;
+ break;
+ case 3:
+ val = IEEE80211_CCK_RATE_11MB;
+ break;
+ case 4:
+ val = IEEE80211_OFDM_RATE_6MB;
+ break;
+ case 5:
+ val = IEEE80211_OFDM_RATE_9MB;
+ break;
+ case 6:
+ val = IEEE80211_OFDM_RATE_12MB;
+ break;
+ case 7:
+ val = IEEE80211_OFDM_RATE_18MB;
+ break;
+ case 8:
+ val = IEEE80211_OFDM_RATE_24MB;
+ break;
+ case 9:
+ val = IEEE80211_OFDM_RATE_36MB;
+ break;
+ case 10:
+ val = IEEE80211_OFDM_RATE_48MB;
+ break;
+ case 11:
+ val = IEEE80211_OFDM_RATE_54MB;
+ break;
+ }
+ return val;
+}
+
+static int is_basicrate(struct adapter *padapter, unsigned char rate)
+{
+ int i;
+ unsigned char val;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ val = pmlmeext->basicrate[i];
+
+ if ((val != 0xff) && (val != 0xfe)) {
+ if (rate == ratetbl_val_2wifirate(val))
+ return true;
+ }
+ }
+ return false;
+}
+
+static unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset)
+{
+ int i;
+ unsigned char rate;
+ unsigned int len = 0;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ for (i = 0; i < NumRates; i++) {
+ rate = pmlmeext->datarate[i];
+
+ switch (rate) {
+ case 0xff:
+ return len;
+ case 0xfe:
+ continue;
+ default:
+ rate = ratetbl_val_2wifirate(rate);
+
+ if (is_basicrate(padapter, rate) == true)
+ rate |= IEEE80211_BASIC_RATE_MASK;
+
+ rateset[len] = rate;
+ len++;
+ break;
+ }
+ }
+ return len;
+}
+
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len)
+{
+ unsigned char supportedrates[NumRates];
+
+ _rtw_memset(supportedrates, 0, NumRates);
+ *bssrate_len = ratetbl2rateset(padapter, supportedrates);
+ memcpy(pbssrate, supportedrates, *bssrate_len);
+}
+
+void UpdateBrateTbl(struct adapter *Adapter, u8 *mbrate)
+{
+ u8 i;
+ u8 rate;
+
+ /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ rate = mbrate[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ case IEEE80211_OFDM_RATE_6MB:
+ case IEEE80211_OFDM_RATE_12MB:
+ case IEEE80211_OFDM_RATE_24MB:
+ mbrate[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen)
+{
+ u8 i;
+ u8 rate;
+
+ for (i = 0; i < bssratelen; i++) {
+ rate = bssrateset[i] & 0x7f;
+ switch (rate) {
+ case IEEE80211_CCK_RATE_1MB:
+ case IEEE80211_CCK_RATE_2MB:
+ case IEEE80211_CCK_RATE_5MB:
+ case IEEE80211_CCK_RATE_11MB:
+ bssrateset[i] |= IEEE80211_BASIC_RATE_MASK;
+ break;
+ }
+ }
+}
+
+void Save_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 saveflag = true;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+}
+
+void Restore_DM_Func_Flag(struct adapter *padapter)
+{
+ u8 saveflag = false;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+}
+
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
+{
+ if (enable)
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
+ else
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
+}
+
+static void Set_NETYPE0_MSR(struct adapter *padapter, u8 type)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
+}
+
+void Set_MSR(struct adapter *padapter, u8 type)
+{
+ Set_NETYPE0_MSR(padapter, type);
+}
+
+inline u8 rtw_get_oper_ch(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_channel;
+}
+
+inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
+{
+ adapter->mlmeextpriv.oper_channel = ch;
+}
+
+inline u8 rtw_get_oper_bw(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_bwmode;
+}
+
+inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
+{
+ adapter->mlmeextpriv.oper_bwmode = bw;
+}
+
+inline u8 rtw_get_oper_choffset(struct adapter *adapter)
+{
+ return adapter->mlmeextpriv.oper_ch_offset;
+}
+
+inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
+{
+ adapter->mlmeextpriv.oper_ch_offset = offset;
+}
+
+void SelectChannel(struct adapter *padapter, unsigned char channel)
+{
+ /* saved channel info */
+ rtw_set_oper_ch(padapter, channel);
+ rtw_hal_set_chan(padapter, channel);
+}
+
+void SetBWMode(struct adapter *padapter, unsigned short bwmode,
+ unsigned char channel_offset)
+{
+ /* saved bw info */
+ rtw_set_oper_bw(padapter, bwmode);
+ rtw_set_oper_choffset(padapter, channel_offset);
+
+ rtw_hal_set_bwmode(padapter, (enum ht_channel_width)bwmode, channel_offset);
+}
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode)
+{
+ u8 center_ch;
+
+ if (padapter->bNotifyChannelChange)
+ DBG_88E("[%s] ch = %d, offset = %d, bwmode = %d\n", __func__, channel, channel_offset, bwmode);
+
+ if ((bwmode == HT_CHANNEL_WIDTH_20) ||
+ (channel_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)) {
+ /* SelectChannel(padapter, channel); */
+ center_ch = channel;
+ } else {
+ /* switch to the proper channel */
+ if (channel_offset == HAL_PRIME_CHNL_OFFSET_LOWER) {
+ /* SelectChannel(padapter, channel + 2); */
+ center_ch = channel + 2;
+ } else {
+ /* SelectChannel(padapter, channel - 2); */
+ center_ch = channel - 2;
+ }
+ }
+
+ /* set Channel */
+ /* saved channel/bw info */
+ rtw_set_oper_ch(padapter, channel);
+ rtw_set_oper_bw(padapter, bwmode);
+ rtw_set_oper_choffset(padapter, channel_offset);
+
+ rtw_hal_set_chan(padapter, center_ch); /* set center channel */
+ SetBWMode(padapter, bwmode, channel_offset);
+}
+
+int get_bsstype(unsigned short capability)
+{
+ if (capability & BIT(0))
+ return WIFI_FW_AP_STATE;
+ else if (capability & BIT(1))
+ return WIFI_FW_ADHOC_STATE;
+ else
+ return 0;
+}
+
+__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
+{
+ return pnetwork->MacAddress;
+}
+
+u16 get_beacon_interval(struct wlan_bssid_ex *bss)
+{
+ __le16 val;
+ memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->IEs), 2);
+
+ return le16_to_cpu(val);
+}
+
+int is_client_associated_to_ap(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+
+ if (!padapter)
+ return _FAIL;
+
+ pmlmeext = &padapter->mlmeextpriv;
+ pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_client_associated_to_ibss(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE))
+ return true;
+ else
+ return _FAIL;
+}
+
+int is_IBSS_empty(struct adapter *padapter)
+{
+ unsigned int i;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) {
+ if (pmlmeinfo->FW_sta_info[i].status == 1)
+ return _FAIL;
+ }
+ return true;
+}
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
+{
+ if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN)
+ return WAIT_FOR_BCN_TO_MIN;
+ else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX)
+ return WAIT_FOR_BCN_TO_MAX;
+ else
+ return bcn_interval << 2;
+}
+
+void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex)
+{
+ rtw_hal_set_hwreg(Adapter, HW_VAR_CAM_EMPTY_ENTRY, (u8 *)(&ucIndex));
+}
+
+void invalidate_cam_all(struct adapter *padapter)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+}
+
+void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
+{
+ unsigned int i, val, addr;
+ int j;
+ u32 cam_val[2];
+
+ addr = entry << 3;
+
+ for (j = 5; j >= 0; j--) {
+ switch (j) {
+ case 0:
+ val = (ctrl | (mac[0] << 16) | (mac[1] << 24));
+ break;
+ case 1:
+ val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24));
+ break;
+ default:
+ i = (j - 2) << 2;
+ val = (key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24));
+ break;
+ }
+
+ cam_val[0] = val;
+ cam_val[1] = addr + (unsigned int)j;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
+ }
+}
+
+void clear_cam_entry(struct adapter *padapter, u8 entry)
+{
+ unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+ write_cam(padapter, entry, 0, null_sta, null_key);
+}
+
+int allocate_fw_sta_entry(struct adapter *padapter)
+{
+ unsigned int mac_id;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) {
+ if (pmlmeinfo->FW_sta_info[mac_id].status == 0) {
+ pmlmeinfo->FW_sta_info[mac_id].status = 1;
+ pmlmeinfo->FW_sta_info[mac_id].retry = 0;
+ break;
+ }
+ }
+
+ return mac_id;
+}
+
+void flush_all_cam_entry(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+
+ _rtw_memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
+}
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmepriv->qospriv.qos_option == 0) {
+ pmlmeinfo->WMM_enable = 0;
+ return _FAIL;
+ }
+
+ pmlmeinfo->WMM_enable = 1;
+ memcpy(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element));
+ return true;
+}
+
+void WMMOnAssocRsp(struct adapter *padapter)
+{
+ u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime;
+ u8 acm_mask;
+ u16 TXOP;
+ u32 acParm, i;
+ u32 edca[4], inx[4];
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ padapter->mlmepriv.acm_mask = 0;
+ return;
+ }
+
+ acm_mask = 0;
+
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ for (i = 0; i < 4; i++) {
+ ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03;
+ ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01;
+
+ /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */
+ AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime;
+
+ ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f);
+ ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4;
+ TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit);
+
+ acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16);
+
+ switch (ACI) {
+ case 0x0:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(1) : 0);
+ edca[XMIT_BE_QUEUE] = acParm;
+ break;
+ case 0x1:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ edca[XMIT_BK_QUEUE] = acParm;
+ break;
+ case 0x2:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(2) : 0);
+ edca[XMIT_VI_QUEUE] = acParm;
+ break;
+ case 0x3:
+ rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ acm_mask |= (ACM ? BIT(3) : 0);
+ edca[XMIT_VO_QUEUE] = acParm;
+ break;
+ }
+
+ DBG_88E("WMM(%x): %x, %x\n", ACI, ACM, acParm);
+ }
+
+ if (padapter->registrypriv.acm_method == 1)
+ rtw_hal_set_hwreg(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ else
+ padapter->mlmepriv.acm_mask = acm_mask;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ u32 j, tmp, change_inx;
+
+ /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
+ for (i = 0; i < 4; i++) {
+ for (j = i+1; j < 4; j++) {
+ /* compare CW and AIFS */
+ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) {
+ change_inx = true;
+ } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) {
+ /* compare TXOP */
+ if ((edca[j] >> 16) > (edca[i] >> 16))
+ change_inx = true;
+ }
+
+ if (change_inx) {
+ tmp = edca[i];
+ edca[i] = edca[j];
+ edca[j] = tmp;
+
+ tmp = inx[i];
+ inx[i] = inx[j];
+ inx[j] = tmp;
+
+ change_inx = false;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ pxmitpriv->wmm_para_seq[i] = inx[i];
+ DBG_88E("wmm_para_seq(%d): %d\n", i, pxmitpriv->wmm_para_seq[i]);
+ }
+
+ return;
+}
+
+static void bwmode_update_check(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ unsigned char new_bwmode;
+ unsigned char new_ch_offset;
+ struct HT_info_element *pHT_info;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (!pIE)
+ return;
+
+ if (!phtpriv)
+ return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pHT_info = (struct HT_info_element *)pIE->data;
+
+ if ((pHT_info->infos[0] & BIT(2)) && pregistrypriv->cbw40_enable) {
+ new_bwmode = HT_CHANNEL_WIDTH_40;
+
+ switch (pHT_info->infos[0] & 0x3) {
+ case 1:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
+ break;
+ case 3:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
+ break;
+ default:
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ break;
+ }
+ } else {
+ new_bwmode = HT_CHANNEL_WIDTH_20;
+ new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+
+ if ((new_bwmode != pmlmeext->cur_bwmode) ||
+ (new_ch_offset != pmlmeext->cur_ch_offset)) {
+ pmlmeinfo->bwmode_updated = true;
+
+ pmlmeext->cur_bwmode = new_bwmode;
+ pmlmeext->cur_ch_offset = new_ch_offset;
+
+ /* update HT info also */
+ HT_info_handler(padapter, pIE);
+ } else {
+ pmlmeinfo->bwmode_updated = false;
+ }
+
+ if (pmlmeinfo->bwmode_updated) {
+ struct sta_info *psta;
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */
+
+ /* update ap's stainfo */
+ psta = rtw_get_stainfo(pstapriv, cur_network->MacAddress);
+ if (psta) {
+ struct ht_priv *phtpriv_sta = &psta->htpriv;
+
+ if (phtpriv_sta->ht_option) {
+ /* bwmode */
+ phtpriv_sta->bwmode = pmlmeext->cur_bwmode;
+ phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset;
+ } else {
+ phtpriv_sta->bwmode = HT_CHANNEL_WIDTH_20;
+ phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ }
+ }
+ }
+}
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ unsigned int i;
+ u8 rf_type;
+ u8 max_AMPDU_len, min_MPDU_spacing;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (!phtpriv->ht_option)
+ return;
+
+ pmlmeinfo->HT_caps_enable = 1;
+
+ for (i = 0; i < (pIE->Length); i++) {
+ if (i != 2) {
+ /* Got the endian issue here. */
+ pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]);
+ } else {
+ /* modify from fw by Thomas 2010/11/17 */
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3) > (pIE->data[i] & 0x3))
+ max_AMPDU_len = (pIE->data[i] & 0x3);
+ else
+ max_AMPDU_len = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3);
+
+ if ((pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) > (pIE->data[i] & 0x1c))
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c);
+ else
+ min_MPDU_spacing = (pIE->data[i] & 0x1c);
+
+ pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing;
+ }
+ }
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ /* update the MCS rates */
+ for (i = 0; i < 16; i++) {
+ if ((rf_type == RF_1T1R) || (rf_type == RF_1T2R))
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_1R[i];
+ else
+ pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= MCS_rate_2R[i];
+ }
+ return;
+}
+
+void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ht_priv *phtpriv = &pmlmepriv->htpriv;
+
+ if (pIE == NULL)
+ return;
+
+ if (!phtpriv->ht_option)
+ return;
+
+ if (pIE->Length > sizeof(struct HT_info_element))
+ return;
+
+ pmlmeinfo->HT_info_enable = 1;
+ memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->Length);
+ return;
+}
+
+void HTOnAssocRsp(struct adapter *padapter)
+{
+ unsigned char max_AMPDU_len;
+ unsigned char min_MPDU_spacing;
+ /* struct registry_priv *pregpriv = &padapter->registrypriv; */
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ DBG_88E("%s\n", __func__);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) {
+ pmlmeinfo->HT_enable = 1;
+ } else {
+ pmlmeinfo->HT_enable = 0;
+ return;
+ }
+
+ /* handle A-MPDU parameter field */
+ /*
+ AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
+ AMPDU_para [4:2]:Min MPDU Start Spacing
+ */
+ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
+
+ min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+}
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pIE->Length > 1)
+ return;
+
+ pmlmeinfo->ERP_enable = 1;
+ memcpy(&(pmlmeinfo->ERP_IE), pIE->data, pIE->Length);
+}
+
+void VCS_update(struct adapter *padapter, struct sta_info *psta)
+{
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pregpriv->vrtl_carrier_sense) { /* 0:off 1:on 2:auto */
+ case 0: /* off */
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ break;
+ case 1: /* on */
+ if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ break;
+ case 2: /* auto */
+ default:
+ if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) {
+ if (pregpriv->vcs_type == 1) {
+ psta->rtsen = 1;
+ psta->cts2self = 0;
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 1;
+ }
+ } else {
+ psta->rtsen = 0;
+ psta->cts2self = 0;
+ }
+ break;
+ }
+}
+
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
+{
+ unsigned int len;
+ unsigned char *p;
+ unsigned short val16, subtype;
+ struct wlan_network *cur_network = &(Adapter->mlmepriv.cur_network);
+ /* u8 wpa_ie[255], rsn_ie[255]; */
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 encryp_protocol = 0;
+ struct wlan_bssid_ex *bssid;
+ int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0;
+ unsigned char *pbuf;
+ u32 wpa_ielen = 0;
+ u8 *pbssid = GetAddr3Ptr(pframe);
+ u32 hidden_ssid = 0;
+ struct HT_info_element *pht_info = NULL;
+ struct rtw_ieee80211_ht_cap *pht_cap = NULL;
+ u32 bcn_channel;
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+
+ if (is_client_associated_to_ap(Adapter) == false)
+ return true;
+
+ len = packet_len - sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ if (len > MAX_IE_SZ) {
+ DBG_88E("%s IE too long for survey event\n", __func__);
+ return _FAIL;
+ }
+
+ if (_rtw_memcmp(cur_network->network.MacAddress, pbssid, 6) == false) {
+ DBG_88E("Oops: rtw_check_network_encrypt linked but recv other bssid bcn\n%pM %pM\n",
+ (pbssid), (cur_network->network.MacAddress));
+ return true;
+ }
+
+ bssid = (struct wlan_bssid_ex *)rtw_zmalloc(sizeof(struct wlan_bssid_ex));
+
+ subtype = GetFrameSubType(pframe) >> 4;
+
+ if (subtype == WIFI_BEACON)
+ bssid->Reserved[0] = 1;
+
+ bssid->Length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len;
+
+ /* below is to copy the information element */
+ bssid->IELength = len;
+ memcpy(bssid->IEs, (pframe + sizeof(struct rtw_ieee80211_hdr_3addr)), bssid->IELength);
+
+ /* check bw and channel offset */
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_CAPABILITY_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_cap = (struct rtw_ieee80211_ht_cap *)(p + 2);
+ ht_cap_info = pht_cap->cap_info;
+ } else {
+ ht_cap_info = 0;
+ }
+ /* parsing HT_INFO_IE */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p && len > 0) {
+ pht_info = (struct HT_info_element *)(p + 2);
+ ht_info_infos_0 = pht_info->infos[0];
+ } else {
+ ht_info_infos_0 = 0;
+ }
+ if (ht_cap_info != cur_network->BcnInfo.ht_cap_info ||
+ ((ht_info_infos_0&0x03) != (cur_network->BcnInfo.ht_info_infos_0&0x03))) {
+ DBG_88E("%s bcn now: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ ht_cap_info, ht_info_infos_0);
+ DBG_88E("%s bcn link: ht_cap_info:%x ht_info_infos_0:%x\n", __func__,
+ cur_network->BcnInfo.ht_cap_info, cur_network->BcnInfo.ht_info_infos_0);
+ DBG_88E("%s bw mode change, disconnect\n", __func__);
+ /* bcn_info_update */
+ cur_network->BcnInfo.ht_cap_info = ht_cap_info;
+ cur_network->BcnInfo.ht_info_infos_0 = ht_info_infos_0;
+ /* to do : need to check that whether modify related register of BB or not */
+ /* goto _mismatch; */
+ }
+
+ /* Checking for channel */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _DSSET_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p) {
+ bcn_channel = *(p + 2);
+ } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _HT_ADD_INFO_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (pht_info) {
+ bcn_channel = pht_info->primary_channel;
+ } else { /* we don't find channel IE, so don't check it */
+ DBG_88E("Oops: %s we don't find channel IE, so don't check it\n", __func__);
+ bcn_channel = Adapter->mlmeextpriv.cur_channel;
+ }
+ }
+ if (bcn_channel != Adapter->mlmeextpriv.cur_channel) {
+ DBG_88E("%s beacon channel:%d cur channel:%d disconnect\n", __func__,
+ bcn_channel, Adapter->mlmeextpriv.cur_channel);
+ goto _mismatch;
+ }
+
+ /* checking SSID */
+ p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
+ if (p == NULL) {
+ DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
+ hidden_ssid = true;
+ } else {
+ hidden_ssid = false;
+ }
+
+ if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
+ memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
+ bssid->Ssid.SsidLength = *(p + 1);
+ } else {
+ bssid->Ssid.SsidLength = 0;
+ bssid->Ssid.Ssid[0] = '\0';
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
+ "cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
+ bssid->Ssid.SsidLength, cur_network->network.Ssid.Ssid,
+ cur_network->network.Ssid.SsidLength));
+
+ if (!_rtw_memcmp(bssid->Ssid.Ssid, cur_network->network.Ssid.Ssid, 32) ||
+ bssid->Ssid.SsidLength != cur_network->network.Ssid.SsidLength) {
+ if (bssid->Ssid.Ssid[0] != '\0' && bssid->Ssid.SsidLength != 0) { /* not hidden ssid */
+ DBG_88E("%s(), SSID is not match return FAIL\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ /* check encryption info */
+ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid);
+
+ if (val16 & BIT(4))
+ bssid->Privacy = 1;
+ else
+ bssid->Privacy = 0;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s(): cur_network->network.Privacy is %d, bssid.Privacy is %d\n",
+ __func__, cur_network->network.Privacy, bssid->Privacy));
+ if (cur_network->network.Privacy != bssid->Privacy) {
+ DBG_88E("%s(), privacy is not match return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ rtw_get_sec_ie(bssid->IEs, bssid->IELength, NULL, &rsn_len, NULL, &wpa_len);
+
+ if (rsn_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA2;
+ } else if (wpa_len > 0) {
+ encryp_protocol = ENCRYP_PROTOCOL_WPA;
+ } else {
+ if (bssid->Privacy)
+ encryp_protocol = ENCRYP_PROTOCOL_WEP;
+ }
+
+ if (cur_network->BcnInfo.encryp_protocol != encryp_protocol) {
+ DBG_88E("%s(): enctyp is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+
+ if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) {
+ pbuf = rtw_get_wpa_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, group_cipher is %d, is_8021x is %d\n", __func__,
+ pairwise_cipher, group_cipher, is_8021x));
+ }
+ } else {
+ pbuf = rtw_get_wpa2_ie(&bssid->IEs[12], &wpa_ielen, bssid->IELength-12);
+
+ if (pbuf && (wpa_ielen > 0)) {
+ if (_SUCCESS == rtw_parse_wpa2_ie(pbuf, wpa_ielen+2, &group_cipher, &pairwise_cipher, &is_8021x)) {
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_,
+ ("%s pnetwork->pairwise_cipher: %d, pnetwork->group_cipher is %d, is_802x is %d\n",
+ __func__, pairwise_cipher, group_cipher, is_8021x));
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_err_,
+ ("%s cur_network->group_cipher is %d: %d\n", __func__, cur_network->BcnInfo.group_cipher, group_cipher));
+ if (pairwise_cipher != cur_network->BcnInfo.pairwise_cipher || group_cipher != cur_network->BcnInfo.group_cipher) {
+ DBG_88E("%s pairwise_cipher(%x:%x) or group_cipher(%x:%x) is not match , return FAIL\n", __func__,
+ pairwise_cipher, cur_network->BcnInfo.pairwise_cipher,
+ group_cipher, cur_network->BcnInfo.group_cipher);
+ goto _mismatch;
+ }
+
+ if (is_8021x != cur_network->BcnInfo.is_8021x) {
+ DBG_88E("%s authentication is not match , return FAIL\n", __func__);
+ goto _mismatch;
+ }
+ }
+
+ kfree(bssid);
+ return _SUCCESS;
+
+_mismatch:
+ kfree(bssid);
+ return _FAIL;
+
+ _func_exit_;
+}
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta)
+{
+ unsigned int i;
+ unsigned int len;
+ struct ndis_802_11_var_ie *pIE;
+
+ len = pkt_len - (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN);
+
+ for (i = 0; i < len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN) + i);
+
+ switch (pIE->ElementID) {
+ case _HT_EXTRA_INFO_IE_: /* HT info */
+ /* HT_info_handler(padapter, pIE); */
+ bwmode_update_check(padapter, pIE);
+ break;
+ case _ERPINFO_IE_:
+ ERP_IE_handler(padapter, pIE);
+ VCS_update(padapter, psta);
+ break;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+}
+
+unsigned int is_ap_in_tkip(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4)) && (_rtw_memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4)))
+ return true;
+ break;
+ case _RSN_IE_2_:
+ if (_rtw_memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4))
+ return true;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+ return false;
+ } else {
+ return false;
+ }
+}
+
+unsigned int should_forbid_n_rate(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(cur_network->IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4) &&
+ ((_rtw_memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
+ (_rtw_memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ if ((_rtw_memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
+ (_rtw_memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
+ return false;
+ default:
+ break;
+ }
+
+ i += (pIE->Length + 2);
+ }
+
+ return true;
+ } else {
+ return false;
+ }
+}
+
+unsigned int is_ap_in_wep(struct adapter *padapter)
+{
+ u32 i;
+ struct ndis_802_11_var_ie *pIE;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
+ pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if (_rtw_memcmp(pIE->data, RTW_WPA_OUI, 4))
+ return false;
+ break;
+ case _RSN_IE_2_:
+ return false;
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+int wifirate2_ratetbl_inx(unsigned char rate)
+{
+ int inx = 0;
+ rate = rate & 0x7f;
+
+ switch (rate) {
+ case 54*2:
+ inx = 11;
+ break;
+ case 48*2:
+ inx = 10;
+ break;
+ case 36*2:
+ inx = 9;
+ break;
+ case 24*2:
+ inx = 8;
+ break;
+ case 18*2:
+ inx = 7;
+ break;
+ case 12*2:
+ inx = 6;
+ break;
+ case 9*2:
+ inx = 5;
+ break;
+ case 6*2:
+ inx = 4;
+ break;
+ case 11*2:
+ inx = 3;
+ break;
+ case 11:
+ inx = 2;
+ break;
+ case 2*2:
+ inx = 1;
+ break;
+ case 1*2:
+ inx = 0;
+ break;
+ }
+ return inx;
+}
+
+unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++) {
+ if ((*(ptn + i)) & 0x80)
+ mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
+ }
+ return mask;
+}
+
+unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz)
+{
+ unsigned int i, num_of_rate;
+ unsigned int mask = 0;
+
+ num_of_rate = (ptn_sz > NumRates) ? NumRates : ptn_sz;
+
+ for (i = 0; i < num_of_rate; i++)
+ mask |= 0x1 << wifirate2_ratetbl_inx(*(ptn + i));
+ return mask;
+}
+
+unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps)
+{
+ unsigned int mask = 0;
+
+ mask = ((pHT_caps->u.HT_cap_element.MCS_rate[0] << 12) | (pHT_caps->u.HT_cap_element.MCS_rate[1] << 20));
+
+ return mask;
+}
+
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps)
+{
+ unsigned char bit_offset;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (!(pmlmeinfo->HT_enable))
+ return _FAIL;
+
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK))
+ return _FAIL;
+
+ bit_offset = (pmlmeext->cur_bwmode & HT_CHANNEL_WIDTH_40) ? 6 : 5;
+
+ if (__le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset))
+ return _SUCCESS;
+ else
+ return _FAIL;
+}
+
+unsigned char get_highest_rate_idx(u32 mask)
+{
+ int i;
+ unsigned char rate_idx = 0;
+
+ for (i = 27; i >= 0; i--) {
+ if (mask & BIT(i)) {
+ rate_idx = i;
+ break;
+ }
+ }
+ return rate_idx;
+}
+
+void Update_RA_Entry(struct adapter *padapter, u32 mac_id)
+{
+ rtw_hal_update_ra_mask(padapter, mac_id, 0);
+}
+
+static void enable_rate_adaptive(struct adapter *padapter, u32 mac_id)
+{
+ Update_RA_Entry(padapter, mac_id);
+}
+
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
+{
+ /* rate adaptive */
+ enable_rate_adaptive(padapter, psta->mac_id);
+}
+
+/* Update RRSR and Rate for USERATE */
+void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
+{
+ unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ /* Added by Albert 2011/03/22 */
+ /* In the P2P mode, the driver should not support the b mode. */
+ /* So, the Tx packet shouldn't use the CCK rate */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
+ return;
+#endif /* CONFIG_88EU_P2P */
+ _rtw_memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
+
+ if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B)) {
+ memcpy(supported_rates, rtw_basic_rate_cck, 4);
+ } else if (wirelessmode & WIRELESS_11B) {
+ memcpy(supported_rates, rtw_basic_rate_mix, 7);
+ } else {
+ memcpy(supported_rates, rtw_basic_rate_ofdm, 3);
+ }
+
+ if (wirelessmode & WIRELESS_11B)
+ update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, supported_rates);
+}
+
+unsigned char check_assoc_AP(u8 *pframe, uint len)
+{
+ unsigned int i;
+ struct ndis_802_11_var_ie *pIE;
+ u8 epigram_vendor_flag;
+ u8 ralink_vendor_flag;
+ epigram_vendor_flag = 0;
+ ralink_vendor_flag = 0;
+
+ for (i = sizeof(struct ndis_802_11_fixed_ie); i < len;) {
+ pIE = (struct ndis_802_11_var_ie *)(pframe + i);
+
+ switch (pIE->ElementID) {
+ case _VENDOR_SPECIFIC_IE_:
+ if ((_rtw_memcmp(pIE->data, ARTHEROS_OUI1, 3)) ||
+ (_rtw_memcmp(pIE->data, ARTHEROS_OUI2, 3))) {
+ DBG_88E("link to Artheros AP\n");
+ return HT_IOT_PEER_ATHEROS;
+ } else if ((_rtw_memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
+ (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
+ (_rtw_memcmp(pIE->data, BROADCOM_OUI2, 3))) {
+ DBG_88E("link to Broadcom AP\n");
+ return HT_IOT_PEER_BROADCOM;
+ } else if (_rtw_memcmp(pIE->data, MARVELL_OUI, 3)) {
+ DBG_88E("link to Marvell AP\n");
+ return HT_IOT_PEER_MARVELL;
+ } else if (_rtw_memcmp(pIE->data, RALINK_OUI, 3)) {
+ if (!ralink_vendor_flag) {
+ ralink_vendor_flag = 1;
+ } else {
+ DBG_88E("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ }
+ } else if (_rtw_memcmp(pIE->data, CISCO_OUI, 3)) {
+ DBG_88E("link to Cisco AP\n");
+ return HT_IOT_PEER_CISCO;
+ } else if (_rtw_memcmp(pIE->data, REALTEK_OUI, 3)) {
+ DBG_88E("link to Realtek 96B\n");
+ return HT_IOT_PEER_REALTEK;
+ } else if (_rtw_memcmp(pIE->data, AIRGOCAP_OUI, 3)) {
+ DBG_88E("link to Airgo Cap\n");
+ return HT_IOT_PEER_AIRGO;
+ } else if (_rtw_memcmp(pIE->data, EPIGRAM_OUI, 3)) {
+ epigram_vendor_flag = 1;
+ if (ralink_vendor_flag) {
+ DBG_88E("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_88E("Capture EPIGRAM_OUI\n");
+ }
+ } else {
+ break;
+ }
+
+ default:
+ break;
+ }
+ i += (pIE->Length + 2);
+ }
+
+ if (ralink_vendor_flag && !epigram_vendor_flag) {
+ DBG_88E("link to Ralink AP\n");
+ return HT_IOT_PEER_RALINK;
+ } else if (ralink_vendor_flag && epigram_vendor_flag) {
+ DBG_88E("link to Tenda W311R AP\n");
+ return HT_IOT_PEER_TENDA;
+ } else {
+ DBG_88E("link to new AP\n");
+ return HT_IOT_PEER_UNKNOWN;
+ }
+}
+
+void update_IOT_info(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ switch (pmlmeinfo->assoc_AP_vendor) {
+ case HT_IOT_PEER_MARVELL:
+ pmlmeinfo->turboMode_cts2self = 1;
+ pmlmeinfo->turboMode_rtsen = 0;
+ break;
+ case HT_IOT_PEER_RALINK:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ case HT_IOT_PEER_REALTEK:
+ /* rtw_write16(padapter, 0x4cc, 0xffff); */
+ /* rtw_write16(padapter, 0x546, 0x01c0); */
+ /* disable high power */
+ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false);
+ break;
+ default:
+ pmlmeinfo->turboMode_cts2self = 0;
+ pmlmeinfo->turboMode_rtsen = 1;
+ break;
+ }
+}
+
+void update_capinfo(struct adapter *Adapter, u16 updateCap)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool ShortPreamble;
+
+ /* Check preamble mode, 2005.01.06, by rcnjko. */
+ /* Mark to update preamble value forever, 2008.03.18 by lanhsin */
+
+ if (updateCap & cShortPreamble) { /* Short Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
+ ShortPreamble = true;
+ pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ } else { /* Long Preamble */
+ if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
+ ShortPreamble = false;
+ pmlmeinfo->preamble_mode = PREAMBLE_LONG;
+ rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ }
+ }
+
+ if (updateCap & cIBSS) {
+ /* Filen: See 802.11-2007 p.91 */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ } else { /* Filen: See 802.11-2007 p.90 */
+ if (pmlmeext->cur_wireless_mode & (WIRELESS_11G | WIRELESS_11_24N)) {
+ if (updateCap & cShortSlotTime) { /* Short Slot Time */
+ if (pmlmeinfo->slotTime != SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else { /* Long Slot Time */
+ if (pmlmeinfo->slotTime != NON_SHORT_SLOT_TIME)
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ } else if (pmlmeext->cur_wireless_mode & (WIRELESS_11A | WIRELESS_11_5N)) {
+ pmlmeinfo->slotTime = SHORT_SLOT_TIME;
+ } else {
+ /* B Mode */
+ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME;
+ }
+ }
+
+ rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+}
+
+void update_wireless_mode(struct adapter *padapter)
+{
+ int ratelen, network_type = 0;
+ u32 SIFS_Timer;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ unsigned char *rate = cur_network->SupportedRates;
+
+ ratelen = rtw_get_rateset_len(cur_network->SupportedRates);
+
+ if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable))
+ pmlmeinfo->HT_enable = 1;
+
+ if (pmlmeext->cur_channel > 14) {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_5N;
+
+ network_type |= WIRELESS_11A;
+ } else {
+ if (pmlmeinfo->HT_enable)
+ network_type = WIRELESS_11_24N;
+
+ if ((cckratesonly_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11B;
+ else if ((cckrates_included(rate, ratelen)) == true)
+ network_type |= WIRELESS_11BG;
+ else
+ network_type |= WIRELESS_11G;
+ }
+
+ pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode;
+
+ SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
+ /* change this value if having IOT issues. */
+
+ padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
+ else
+ update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
+}
+
+void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id)
+{
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B) {
+ /* Only B, B/G, and B/G/N AP could use CCK rate */
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_cck, 4);
+ } else {
+ memcpy((pmlmeinfo->FW_sta_info[mac_id].SupportedRates), rtw_basic_rate_ofdm, 3);
+ }
+}
+
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx)
+{
+ unsigned int ie_len;
+ struct ndis_802_11_var_ie *pIE;
+ int supportRateNum = 0;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE == NULL)
+ return _FAIL;
+
+ memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len);
+ supportRateNum = ie_len;
+
+ pIE = (struct ndis_802_11_var_ie *)rtw_get_ie(pvar_ie, _EXT_SUPPORTEDRATES_IE_, &ie_len, var_ie_len);
+ if (pIE)
+ memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len);
+
+ return _SUCCESS;
+}
+
+void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr)
+{
+ struct sta_info *psta;
+ u16 tid;
+ u16 param;
+ struct recv_reorder_ctrl *preorder_ctrl;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req;
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ psta = rtw_get_stainfo(pstapriv, addr);
+
+ if (psta) {
+ param = le16_to_cpu(preq->BA_para_set);
+ tid = (param>>2)&0x0f;
+ preorder_ctrl = &psta->recvreorder_ctrl[tid];
+ preorder_ctrl->indicate_seq = 0xffff;
+ preorder_ctrl->enable = (pmlmeinfo->bAcceptAddbaReq) ? true : false;
+ }
+}
+
+void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
+{
+ u8 *pIE;
+ __le32 *pbuf;
+
+ pIE = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
+ pbuf = (__le32 *)pIE;
+
+ pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1));
+
+ pmlmeext->TSFValue = pmlmeext->TSFValue << 32;
+
+ pmlmeext->TSFValue |= le32_to_cpu(*pbuf);
+}
+
+void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
+{
+ rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL);
+}
+
+void beacon_timing_control(struct adapter *padapter)
+{
+ rtw_hal_bcn_related_reg_setting(padapter);
+}
+
+static struct adapter *pbuddy_padapter;
+
+int rtw_handle_dualmac(struct adapter *adapter, bool init)
+{
+ int status = _SUCCESS;
+
+ if (init) {
+ if (pbuddy_padapter == NULL) {
+ pbuddy_padapter = adapter;
+ DBG_88E("%s(): pbuddy_padapter == NULL, Set pbuddy_padapter\n", __func__);
+ } else {
+ adapter->pbuddy_adapter = pbuddy_padapter;
+ pbuddy_padapter->pbuddy_adapter = adapter;
+ /* clear global value */
+ pbuddy_padapter = NULL;
+ DBG_88E("%s(): pbuddy_padapter exist, Exchange Information\n", __func__);
+ }
+ } else {
+ pbuddy_padapter = NULL;
+ }
+ return status;
+}
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
new file mode 100644
index 00000000000..bb5cd95c564
--- /dev/null
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -0,0 +1,2447 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTW_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <ip.h>
+#include <usb_ops.h>
+#include <usb_osintf.h>
+
+static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
+static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
+
+static void _init_txservq(struct tx_servq *ptxservq)
+{
+_func_enter_;
+ _rtw_init_listhead(&ptxservq->tx_pending);
+ _rtw_init_queue(&ptxservq->sta_pending);
+ ptxservq->qcnt = 0;
+_func_exit_;
+}
+
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
+{
+_func_enter_;
+ _rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof (struct sta_xmit_priv));
+ _rtw_spinlock_init(&psta_xmitpriv->lock);
+ _init_txservq(&psta_xmitpriv->be_q);
+ _init_txservq(&psta_xmitpriv->bk_q);
+ _init_txservq(&psta_xmitpriv->vi_q);
+ _init_txservq(&psta_xmitpriv->vo_q);
+ _rtw_init_listhead(&psta_xmitpriv->legacy_dz);
+ _rtw_init_listhead(&psta_xmitpriv->apsd);
+
+_func_exit_;
+}
+
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
+{
+ int i;
+ struct xmit_buf *pxmitbuf;
+ struct xmit_frame *pxframe;
+ int res = _SUCCESS;
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+
+_func_enter_;
+
+ /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
+
+ _rtw_spinlock_init(&pxmitpriv->lock);
+ _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
+ _rtw_init_sema(&pxmitpriv->terminate_xmitthread_sema, 0);
+
+ /*
+ Please insert all the queue initializaiton using _rtw_init_queue below
+ */
+
+ pxmitpriv->adapter = padapter;
+
+ _rtw_init_queue(&pxmitpriv->be_pending);
+ _rtw_init_queue(&pxmitpriv->bk_pending);
+ _rtw_init_queue(&pxmitpriv->vi_pending);
+ _rtw_init_queue(&pxmitpriv->vo_pending);
+ _rtw_init_queue(&pxmitpriv->bm_pending);
+
+ _rtw_init_queue(&pxmitpriv->free_xmit_queue);
+
+ /*
+ Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
+ and initialize free_xmit_frame below.
+ Please also apply free_txobj to link_up all the xmit_frames...
+ */
+
+ pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_frame_buf == NULL) {
+ pxmitpriv->pxmit_frame_buf = NULL;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_frame fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+ pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_frame_buf), 4);
+ /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
+ /* ((size_t) (pxmitpriv->pallocated_frame_buf) &3); */
+
+ pxframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ _rtw_init_listhead(&(pxframe->list));
+
+ pxframe->padapter = padapter;
+ pxframe->frame_tag = NULL_FRAMETAG;
+
+ pxframe->pkt = NULL;
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
+
+ pxframe++;
+ }
+
+ pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
+
+ pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
+
+ /* init xmit_buf */
+ _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
+ _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+
+ pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmitbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_buf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmitbuf), 4);
+ /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
+ /* ((size_t) (pxmitpriv->pallocated_xmitbuf) &3); */
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = false;
+
+ /* Tx buf allocation may fail sometimes, so sleep and retry. */
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ if (res == _FAIL) {
+ rtw_msleep_os(10);
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ if (res == _FAIL) {
+ goto exit;
+ }
+ }
+
+ pxmitbuf->flags = XMIT_VO_QUEUE;
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
+
+ /* Init xmit extension buff */
+ _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+
+ pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+
+ if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("alloc xmit_extbuf fail!\n"));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ _rtw_init_listhead(&pxmitbuf->list);
+
+ pxmitbuf->priv_data = NULL;
+ pxmitbuf->padapter = padapter;
+ pxmitbuf->ext_tag = true;
+
+ res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
+ if (res == _FAIL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
+ pxmitbuf++;
+ }
+
+ pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
+
+ rtw_alloc_hwxmits(padapter);
+ rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+
+ for (i = 0; i < 4; i++)
+ pxmitpriv->wmm_para_seq[i] = i;
+
+ pxmitpriv->txirp_cnt = 1;
+
+ _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
+
+ /* per AC pending irp */
+ pxmitpriv->beq_cnt = 0;
+ pxmitpriv->bkq_cnt = 0;
+ pxmitpriv->viq_cnt = 0;
+ pxmitpriv->voq_cnt = 0;
+
+ pxmitpriv->ack_tx = false;
+ _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
+ rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
+
+ rtw_hal_init_xmit_priv(padapter);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static void rtw_mfree_xmit_priv_lock (struct xmit_priv *pxmitpriv)
+{
+ _rtw_spinlock_free(&pxmitpriv->lock);
+ _rtw_free_sema(&pxmitpriv->xmit_sema);
+ _rtw_free_sema(&pxmitpriv->terminate_xmitthread_sema);
+
+ _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
+ _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
+
+ _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
+ _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
+ _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
+}
+
+void _rtw_free_xmit_priv (struct xmit_priv *pxmitpriv)
+{
+ int i;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitpriv->pxmit_frame_buf;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
+ u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
+ u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
+
+ _func_enter_;
+
+ rtw_hal_free_xmit_priv(padapter);
+
+ rtw_mfree_xmit_priv_lock(pxmitpriv);
+
+ if (pxmitpriv->pxmit_frame_buf == NULL)
+ goto out;
+
+ for (i = 0; i < NR_XMITFRAME; i++) {
+ rtw_os_xmit_complete(padapter, pxmitframe);
+
+ pxmitframe++;
+ }
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_frame_buf)
+ rtw_vmfree(pxmitpriv->pallocated_frame_buf, NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
+
+ if (pxmitpriv->pallocated_xmitbuf)
+ rtw_vmfree(pxmitpriv->pallocated_xmitbuf, NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
+
+ /* free xmit extension buff */
+ _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
+
+ pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
+ for (i = 0; i < num_xmit_extbuf; i++) {
+ rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
+ pxmitbuf++;
+ }
+
+ if (pxmitpriv->pallocated_xmit_extbuf) {
+ rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf, num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
+ }
+
+ rtw_free_hwxmits(padapter);
+
+ _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
+
+out:
+
+_func_exit_;
+}
+
+static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ u32 sz;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_info *psta = pattrib->psta;
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if (pattrib->nr_frags != 1)
+ sz = padapter->xmitpriv.frag_len;
+ else /* no frag */
+ sz = pattrib->last_txcmdsz;
+
+ /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
+ /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
+ /* Other fragments are protected by previous fragment. */
+ /* So we only need to check the length of first fragment. */
+ if (pmlmeext->cur_wireless_mode < WIRELESS_11_24N || padapter->registrypriv.wifi_spec) {
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ } else {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;
+ }
+ } else {
+ while (true) {
+ /* IOT action */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && pattrib->ampdu_en &&
+ (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
+ pattrib->vcs_mode = CTS_TO_SELF;
+ break;
+ }
+
+ /* check ERP protection */
+ if (psta->rtsen || psta->cts2self) {
+ if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+
+ break;
+ }
+
+ /* check HT op mode */
+ if (pattrib->ht_en) {
+ u8 htopmode = pmlmeinfo->HT_protection;
+ if ((pmlmeext->cur_bwmode && (htopmode == 2 || htopmode == 3)) ||
+ (!pmlmeext->cur_bwmode && htopmode == 3)) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+ }
+
+ /* check rts */
+ if (sz > padapter->registrypriv.rts_thresh) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ /* to do list: check MIMO power save condition. */
+
+ /* check AMPDU aggregation for TXOP */
+ if (pattrib->ampdu_en) {
+ pattrib->vcs_mode = RTS_CTS;
+ break;
+ }
+
+ pattrib->vcs_mode = NONE_VCS;
+ break;
+ }
+ }
+}
+
+static void update_attrib_phy_info(struct pkt_attrib *pattrib, struct sta_info *psta)
+{
+ /*if (psta->rtsen)
+ pattrib->vcs_mode = RTS_CTS;
+ else if (psta->cts2self)
+ pattrib->vcs_mode = CTS_TO_SELF;
+ else
+ pattrib->vcs_mode = NONE_VCS;*/
+
+ pattrib->mdata = 0;
+ pattrib->eosp = 0;
+ pattrib->triggered = 0;
+
+ /* qos_en, ht_en, init rate, , bw, ch_offset, sgi */
+ pattrib->qos_en = psta->qos_option;
+
+ pattrib->raid = psta->raid;
+ pattrib->ht_en = psta->htpriv.ht_option;
+ pattrib->bwmode = psta->htpriv.bwmode;
+ pattrib->ch_offset = psta->htpriv.ch_offset;
+ pattrib->sgi = psta->htpriv.sgi;
+ pattrib->ampdu_en = false;
+ pattrib->retry_ctrl = false;
+}
+
+u8 qos_acm(u8 acm_mask, u8 priority)
+{
+ u8 change_priority = priority;
+
+ switch (priority) {
+ case 0:
+ case 3:
+ if (acm_mask & BIT(1))
+ change_priority = 1;
+ break;
+ case 1:
+ case 2:
+ break;
+ case 4:
+ case 5:
+ if (acm_mask & BIT(2))
+ change_priority = 0;
+ break;
+ case 6:
+ case 7:
+ if (acm_mask & BIT(3))
+ change_priority = 5;
+ break;
+ default:
+ DBG_88E("qos_acm(): invalid pattrib->priority: %d!!!\n", priority);
+ break;
+ }
+
+ return change_priority;
+}
+
+static void set_qos(struct pkt_file *ppktfile, struct pkt_attrib *pattrib)
+{
+ struct ethhdr etherhdr;
+ struct iphdr ip_hdr;
+ s32 user_prio = 0;
+
+ _rtw_open_pktfile(ppktfile->pkt, ppktfile);
+ _rtw_pktfile_read(ppktfile, (unsigned char *)&etherhdr, ETH_HLEN);
+
+ /* get user_prio from IP hdr */
+ if (pattrib->ether_type == 0x0800) {
+ _rtw_pktfile_read(ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
+/* user_prio = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
+ user_prio = ip_hdr.tos >> 5;
+ } else if (pattrib->ether_type == 0x888e) {
+ /* "When priority processing of data frames is supported, */
+ /* a STA's SME should send EAPOL-Key frames at the highest priority." */
+ user_prio = 7;
+ }
+
+ pattrib->priority = user_prio;
+ pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
+ pattrib->subtype = WIFI_QOS_DATA_TYPE;
+}
+
+static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
+{
+ struct pkt_file pktfile;
+ struct sta_info *psta = NULL;
+ struct ethhdr etherhdr;
+
+ int bmcast;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ int res = _SUCCESS;
+
+ _func_enter_;
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ _rtw_pktfile_read(&pktfile, (u8 *)&etherhdr, ETH_HLEN);
+
+ pattrib->ether_type = ntohs(etherhdr.h_proto);
+
+ memcpy(pattrib->dst, &etherhdr.h_dest, ETH_ALEN);
+ memcpy(pattrib->src, &etherhdr.h_source, ETH_ALEN);
+
+ pattrib->pctrl = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ } else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
+ memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
+ memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
+ }
+
+ pattrib->pktlen = pktfile.pkt_len;
+
+ if (ETH_P_IP == pattrib->ether_type) {
+ /* The following is for DHCP and ARP packet, we use cck1M to tx these packets and let LPS awake some time */
+ /* to prevent DHCP protocol fail */
+ u8 tmp[24];
+ _rtw_pktfile_read(&pktfile, &tmp[0], 24);
+ pattrib->dhcp_pkt = 0;
+ if (pktfile.pkt_len > 282) {/* MINIMUM_DHCP_PACKET_SIZE) { */
+ if (ETH_P_IP == pattrib->ether_type) {/* IP header */
+ if (((tmp[21] == 68) && (tmp[23] == 67)) ||
+ ((tmp[21] == 67) && (tmp[23] == 68))) {
+ /* 68 : UDP BOOTP client */
+ /* 67 : UDP BOOTP server */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====================== update_attrib: get DHCP Packet\n"));
+ /* Use low rate to send DHCP packet. */
+ pattrib->dhcp_pkt = 1;
+ }
+ }
+ }
+ } else if (0x888e == pattrib->ether_type) {
+ DBG_88E_LEVEL(_drv_info_, "send eapol packet\n");
+ }
+
+ if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ rtw_set_scan_deny(padapter, 3000);
+
+ /* If EAPOL , ARP , OR DHCP packet, driver must be in active mode. */
+ if ((pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 1);
+
+ bmcast = IS_MCAST(pattrib->ra);
+
+ /* get sta_info */
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ if (psta == NULL) { /* if we cannot get psta => drrp the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra: %pM\n", (pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) && (!(psta->state & _FW_LINKED))) {
+ res = _FAIL;
+ goto exit;
+ }
+ }
+
+ if (psta) {
+ pattrib->mac_id = psta->mac_id;
+ /* DBG_88E("%s ==> mac_id(%d)\n", __func__, pattrib->mac_id); */
+ pattrib->psta = psta;
+ } else {
+ /* if we cannot get psta => drop the pkt */
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("\nupdate_attrib => get sta_info fail, ra:%pM\n", (pattrib->ra)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ pattrib->ack_policy = 0;
+ /* get ether_hdr_len */
+ pattrib->pkt_hdrlen = ETH_HLEN;/* pattrib->ether_type == 0x8100) ? (14 + 4): 14; vlan tag */
+
+ pattrib->hdrlen = WLAN_HDR_A3_LEN;
+ pattrib->subtype = WIFI_DATA_TYPE;
+ pattrib->priority = 0;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE|WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (psta->qos_option)
+ set_qos(&pktfile, pattrib);
+ } else {
+ if (pqospriv->qos_option) {
+ set_qos(&pktfile, pattrib);
+
+ if (pmlmepriv->acm_mask != 0)
+ pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
+ }
+ }
+
+ if (psta->ieee8021x_blocked) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\n psta->ieee8021x_blocked == true\n"));
+
+ pattrib->encrypt = 0;
+
+ if ((pattrib->ether_type != 0x888e) && !check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("\npsta->ieee8021x_blocked == true, pattrib->ether_type(%.4x) != 0x888e\n", pattrib->ether_type));
+ res = _FAIL;
+ goto exit;
+ }
+ } else {
+ GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
+
+ switch (psecuritypriv->dot11AuthAlgrthm) {
+ case dot11AuthAlgrthm_Open:
+ case dot11AuthAlgrthm_Shared:
+ case dot11AuthAlgrthm_Auto:
+ pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
+ break;
+ case dot11AuthAlgrthm_8021X:
+ if (bmcast)
+ pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
+ else
+ pattrib->key_idx = 0;
+ break;
+ default:
+ pattrib->key_idx = 0;
+ break;
+ }
+ }
+
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ pattrib->iv_len = 4;
+ pattrib->icv_len = 4;
+ break;
+ case _TKIP_:
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 4;
+
+ if (padapter->securitypriv.busetkipkey == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("\npadapter->securitypriv.busetkipkey(%d) == _FAIL drop packet\n",
+ padapter->securitypriv.busetkipkey));
+ res = _FAIL;
+ goto exit;
+ }
+ break;
+ case _AES_:
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("pattrib->encrypt=%d (_AES_)\n", pattrib->encrypt));
+ pattrib->iv_len = 8;
+ pattrib->icv_len = 8;
+ break;
+ default:
+ pattrib->iv_len = 0;
+ pattrib->icv_len = 0;
+ break;
+ }
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ ("update_attrib: encrypt=%d securitypriv.sw_encrypt=%d\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+
+ if (pattrib->encrypt &&
+ (padapter->securitypriv.sw_encrypt || !psecuritypriv->hw_decrypted)) {
+ pattrib->bswenc = true;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("update_attrib: encrypt=%d securitypriv.hw_decrypted=%d bswenc = true\n",
+ pattrib->encrypt, padapter->securitypriv.sw_encrypt));
+ } else {
+ pattrib->bswenc = false;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("update_attrib: bswenc = false\n"));
+ }
+
+ rtw_set_tx_chksum_offload(pkt, pattrib);
+
+ update_attrib_phy_info(pattrib, psta);
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+static s32 xmitframe_addmic(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ int curfragnum, length;
+ u8 *pframe, *payload, mic[8];
+ struct mic_data micdata;
+ struct sta_info *stainfo;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
+ u8 hw_hdr_offset = 0;
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (pattrib->psta)
+ stainfo = pattrib->psta;
+ else
+ stainfo = rtw_get_stainfo(&padapter->stapriv , &pattrib->ra[0]);
+
+_func_enter_;
+
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
+
+ if (pattrib->encrypt == _TKIP_) {/* if (psecuritypriv->dot11PrivacyAlgrthm == _TKIP_PRIVACY_) */
+ /* encode mic code */
+ if (stainfo != NULL) {
+ u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0x0, 0x0};
+
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
+ if (bmcst) {
+ if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16))
+ return _FAIL;
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
+ } else {
+ if (_rtw_memcmp(&stainfo->dot11tkiptxmickey.skey[0], null_key, 16) == true) {
+ /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey == 0\n"); */
+ /* rtw_msleep_os(10); */
+ return _FAIL;
+ }
+ /* start to calculate the mic code */
+ rtw_secmicsetkey(&micdata, &stainfo->dot11tkiptxmickey.skey[0]);
+ }
+
+ if (pframe[1]&1) { /* ToDS == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[24], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+ } else { /* ToDS == 0 */
+ rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
+ if (pframe[1]&2) /* From Ds == 1 */
+ rtw_secmicappend(&micdata, &pframe[16], 6);
+ else
+ rtw_secmicappend(&micdata, &pframe[10], 6);
+ }
+
+ if (pattrib->qos_en)
+ priority[0] = (u8)pxmitframe->attrib.priority;
+
+ rtw_secmicappend(&micdata, &priority[0], 4);
+
+ payload = pframe;
+
+ for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
+ payload = (u8 *)RND4((size_t)(payload));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("=== curfragnum=%d, pframe = 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x, 0x%.2x,!!!\n",
+ curfragnum, *payload, *(payload+1),
+ *(payload+2), *(payload+3),
+ *(payload+4), *(payload+5),
+ *(payload+6), *(payload+7)));
+
+ payload = payload+pattrib->hdrlen+pattrib->iv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("curfragnum=%d pattrib->hdrlen=%d pattrib->iv_len=%d",
+ curfragnum, pattrib->hdrlen, pattrib->iv_len));
+ if ((curfragnum+1) == pattrib->nr_frags) {
+ length = pattrib->last_txcmdsz-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length;
+ } else {
+ length = pxmitpriv->frag_len-pattrib->hdrlen-pattrib->iv_len-((pattrib->bswenc) ? pattrib->icv_len : 0);
+ rtw_secmicappend(&micdata, payload, length);
+ payload = payload+length+pattrib->icv_len;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("curfragnum=%d length=%d pattrib->icv_len=%d", curfragnum, length, pattrib->icv_len));
+ }
+ }
+ rtw_secgetmic(&micdata, &(mic[0]));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: before add mic code!!!\n"));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: pattrib->last_txcmdsz=%d!!!\n", pattrib->last_txcmdsz));
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: mic[0]=0x%.2x , mic[1]=0x%.2x , mic[2]= 0x%.2x, mic[3]=0x%.2x\n\
+ mic[4]= 0x%.2x , mic[5]= 0x%.2x , mic[6]= 0x%.2x , mic[7]= 0x%.2x !!!!\n",
+ mic[0], mic[1], mic[2], mic[3], mic[4], mic[5], mic[6], mic[7]));
+ /* add mic code and add the mic code length in last_txcmdsz */
+
+ memcpy(payload, &(mic[0]), 8);
+ pattrib->last_txcmdsz += 8;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("\n ======== last pkt ========\n"));
+ payload = payload-pattrib->last_txcmdsz+8;
+ for (curfragnum = 0; curfragnum < pattrib->last_txcmdsz; curfragnum = curfragnum+8)
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_,
+ (" %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x, %.2x ",
+ *(payload+curfragnum), *(payload+curfragnum+1),
+ *(payload+curfragnum+2), *(payload+curfragnum+3),
+ *(payload+curfragnum+4), *(payload+curfragnum+5),
+ *(payload+curfragnum+6), *(payload+curfragnum+7)));
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic: rtw_get_stainfo==NULL!!!\n"));
+ }
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+_func_enter_;
+
+ if (pattrib->bswenc) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_alert_, ("### xmitframe_swencrypt\n"));
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _TKIP_:
+ rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ case _AES_:
+ rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
+ break;
+ default:
+ break;
+ }
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_, ("### xmitframe_hwencrypt\n"));
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+s32 rtw_make_wlanhdr (struct adapter *padapter , u8 *hdr, struct pkt_attrib *pattrib)
+{
+ u16 *qc;
+
+ struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct qos_priv *pqospriv = &pmlmepriv->qospriv;
+ u8 qos_option = false;
+
+ int res = _SUCCESS;
+ u16 *fctrl = &pwlanhdr->frame_ctl;
+
+ struct sta_info *psta;
+
+ int bmcst = IS_MCAST(pattrib->ra);
+
+_func_enter_;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ if (bmcst) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ } else {
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+ }
+ }
+
+ _rtw_memset(hdr, 0, WLANHDR_OFFSET);
+
+ SetFrameSubType(fctrl, pattrib->subtype);
+
+ if (pattrib->subtype & WIFI_DATA_TYPE) {
+ if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
+ /* to_ds = 1, fr_ds = 0; */
+ /* Data transfer to AP */
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
+
+ if (pqospriv->qos_option)
+ qos_option = true;
+ } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ /* to_ds = 0, fr_ds = 1; */
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ } else if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
+
+ if (psta->qos_option)
+ qos_option = true;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("fw_state:%x is not allowed to xmit frame\n", get_fwstate(pmlmepriv)));
+ res = _FAIL;
+ goto exit;
+ }
+
+ if (pattrib->mdata)
+ SetMData(fctrl);
+
+ if (pattrib->encrypt)
+ SetPrivacy(fctrl);
+
+ if (qos_option) {
+ qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
+
+ if (pattrib->priority)
+ SetPriority(qc, pattrib->priority);
+
+ SetEOSP(qc, pattrib->eosp);
+
+ SetAckpolicy(qc, pattrib->ack_policy);
+ }
+
+ /* TODO: fill HT Control Field */
+
+ /* Update Seq Num will be handled by f/w */
+ if (psta) {
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
+ psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
+
+ pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
+
+ SetSeqNum(hdr, pattrib->seqnum);
+
+ /* check if enable ampdu */
+ if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
+ if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
+ pattrib->ampdu_en = true;
+ }
+
+ /* re-check if enable ampdu by BA_starting_seqctrl */
+ if (pattrib->ampdu_en) {
+ u16 tx_seq;
+
+ tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
+
+ /* check BA_starting_seqctrl */
+ if (SN_LESS(pattrib->seqnum, tx_seq)) {
+ pattrib->ampdu_en = false;/* AGG BK */
+ } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq+1)&0xfff;
+
+ pattrib->ampdu_en = true;/* AGG EN */
+ } else {
+ psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum+1)&0xfff;
+ pattrib->ampdu_en = true;/* AGG EN */
+ }
+ }
+ }
+ }
+exit:
+
+_func_exit_;
+ return res;
+}
+
+s32 rtw_txframes_pending(struct adapter *padapter)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ return ((_rtw_queue_empty(&pxmitpriv->be_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->bk_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->vi_pending) == false) ||
+ (_rtw_queue_empty(&pxmitpriv->vo_pending) == false));
+}
+
+s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ int priority = pattrib->priority;
+
+ psta = pattrib->psta;
+
+ switch (priority) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ break;
+ }
+
+ return ptxservq->qcnt;
+}
+
+/*
+ * Calculate wlan 802.11 packet MAX size from pkt_attrib
+ * This function doesn't consider fragment case
+ */
+u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
+{
+ u32 len = 0;
+
+ len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
+ len += SNAP_SIZE + sizeof(u16); /* LLC */
+ len += pattrib->pktlen;
+ if (pattrib->encrypt == _TKIP_)
+ len += 8; /* MIC */
+ len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
+
+ return len;
+}
+
+/*
+
+This sub-routine will perform all the following:
+
+1. remove 802.3 header.
+2. create wlan_header, based on the info in pxmitframe
+3. append sta's iv/ext-iv
+4. append LLC
+5. move frag chunk from pframe to pxmitframe->mem
+6. apply sw-encrypt, if necessary.
+
+*/
+s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
+{
+ struct pkt_file pktfile;
+ s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
+ size_t addr;
+ u8 *pframe, *mem_start;
+ u8 hw_hdr_offset;
+ struct sta_info *psta;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ u8 *pbuf_start;
+ s32 bmcst = IS_MCAST(pattrib->ra);
+ s32 res = _SUCCESS;
+
+_func_enter_;
+
+ psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return _FAIL;
+
+ if (pxmitframe->buf_addr == NULL) {
+ DBG_88E("==> %s buf_addr == NULL\n", __func__);
+ return _FAIL;
+ }
+
+ pbuf_start = pxmitframe->buf_addr;
+
+ hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
+
+ mem_start = pbuf_start + hw_hdr_offset;
+
+ if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n"));
+ DBG_88E("rtw_xmitframe_coalesce: rtw_make_wlanhdr fail; drop pkt\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_open_pktfile(pkt, &pktfile);
+ _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
+
+ frg_inx = 0;
+ frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
+
+ while (1) {
+ llc_sz = 0;
+
+ mpdu_len = frg_len;
+
+ pframe = mem_start;
+
+ SetMFrag(mem_start);
+
+ pframe += pattrib->hdrlen;
+ mpdu_len -= pattrib->hdrlen;
+
+ /* adding icv, if necessary... */
+ if (pattrib->iv_len) {
+ if (psta != NULL) {
+ switch (pattrib->encrypt) {
+ case _WEP40_:
+ case _WEP104_:
+ WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ break;
+ case _TKIP_:
+ if (bmcst)
+ TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ case _AES_:
+ if (bmcst)
+ AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
+ else
+ AES_IV(pattrib->iv, psta->dot11txpn, 0);
+ break;
+ }
+ }
+
+ memcpy(pframe, pattrib->iv, pattrib->iv_len);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_notice_,
+ ("rtw_xmitframe_coalesce: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n",
+ padapter->securitypriv.dot11PrivacyKeyIndex, pattrib->iv[3], *pframe, *(pframe+1), *(pframe+2), *(pframe+3)));
+
+ pframe += pattrib->iv_len;
+
+ mpdu_len -= pattrib->iv_len;
+ }
+
+ if (frg_inx == 0) {
+ llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
+ pframe += llc_sz;
+ mpdu_len -= llc_sz;
+ }
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ mpdu_len -= pattrib->icv_len;
+ }
+
+ if (bmcst) {
+ /* don't do fragment to broadcat/multicast packets */
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
+ } else {
+ mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
+ }
+
+ pframe += mem_sz;
+
+ if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
+ memcpy(pframe, pattrib->icv, pattrib->icv_len);
+ pframe += pattrib->icv_len;
+ }
+
+ frg_inx++;
+
+ if (bmcst || rtw_endofpktfile(&pktfile)) {
+ pattrib->nr_frags = frg_inx;
+
+ pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + ((pattrib->nr_frags == 1) ? llc_sz : 0) +
+ ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
+
+ ClearMFrag(mem_start);
+
+ break;
+ } else {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("%s: There're still something in packet!\n", __func__));
+ }
+
+ addr = (size_t)(pframe);
+
+ mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
+ memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
+ }
+
+ if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n"));
+ DBG_88E("xmitframe_addmic(padapter, pxmitframe) == _FAIL\n");
+ res = _FAIL;
+ goto exit;
+ }
+
+ xmitframe_swencrypt(padapter, pxmitframe);
+
+ if (!bmcst)
+ update_attrib_vcs_info(padapter, pxmitframe);
+ else
+ pattrib->vcs_mode = NONE_VCS;
+
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+/* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
+ * IEEE LLC/SNAP header contains 8 octets
+ * First 3 octets comprise the LLC portion
+ * SNAP portion, 5 octets, is divided into two fields:
+ * Organizationally Unique Identifier(OUI), 3 octets,
+ * type, defined by that organization, 2 octets.
+ */
+s32 rtw_put_snap(u8 *data, u16 h_proto)
+{
+ struct ieee80211_snap_hdr *snap;
+ u8 *oui;
+
+_func_enter_;
+
+ snap = (struct ieee80211_snap_hdr *)data;
+ snap->dsap = 0xaa;
+ snap->ssap = 0xaa;
+ snap->ctrl = 0x03;
+
+ if (h_proto == 0x8137 || h_proto == 0x80f3)
+ oui = P802_1H_OUI;
+ else
+ oui = RFC1042_OUI;
+
+ snap->oui[0] = oui[0];
+ snap->oui[1] = oui[1];
+ snap->oui[2] = oui[2];
+
+ *(__be16 *)(data + SNAP_SIZE) = htons(h_proto);
+
+_func_exit_;
+
+ return SNAP_SIZE + sizeof(u16);
+}
+
+void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len)
+{
+ uint protection;
+ u8 *perp;
+ int erp_len;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+
+_func_enter_;
+
+ switch (pxmitpriv->vcs_setting) {
+ case DISABLE_VCS:
+ pxmitpriv->vcs = NONE_VCS;
+ break;
+ case ENABLE_VCS:
+ break;
+ case AUTO_VCS:
+ default:
+ perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
+ if (perp == NULL) {
+ pxmitpriv->vcs = NONE_VCS;
+ } else {
+ protection = (*(perp + 2)) & BIT(1);
+ if (protection) {
+ if (pregistrypriv->vcs_type == RTS_CTS)
+ pxmitpriv->vcs = RTS_CTS;
+ else
+ pxmitpriv->vcs = CTS_TO_SELF;
+ } else {
+ pxmitpriv->vcs = NONE_VCS;
+ }
+ }
+ break;
+ }
+
+_func_exit_;
+}
+
+void rtw_count_tx_stats(struct adapter *padapter, struct xmit_frame *pxmitframe, int sz)
+{
+ struct sta_info *psta = NULL;
+ struct stainfo_stats *pstats = NULL;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if ((pxmitframe->frame_tag&0x0f) == DATA_FRAMETAG) {
+ pxmitpriv->tx_bytes += sz;
+ pmlmepriv->LinkDetectInfo.NumTxOkInPeriod += pxmitframe->agg_num;
+
+ psta = pxmitframe->attrib.psta;
+ if (psta) {
+ pstats = &psta->sta_stats;
+ pstats->tx_pkts += pxmitframe->agg_num;
+ pstats->tx_bytes += sz;
+ }
+ }
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irql;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+_func_enter_;
+
+ _enter_critical(&pfree_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_queue) == true) {
+ pxmitbuf = NULL;
+ } else {
+ phead = get_list_head(pfree_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ rtw_list_delete(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmit_extbuf_cnt--;
+
+ pxmitbuf->priv_data = NULL;
+ /* pxmitbuf->ext_tag = true; */
+
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+
+ _exit_critical(&pfree_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irql;
+ struct __queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
+
+_func_enter_;
+
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ _enter_critical(&pfree_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitbuf->list);
+
+ rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
+ pxmitpriv->free_xmit_extbuf_cnt++;
+
+ _exit_critical(&pfree_queue->lock, &irql);
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
+{
+ unsigned long irql;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+_func_enter_;
+
+ /* DBG_88E("+rtw_alloc_xmitbuf\n"); */
+
+ _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_xmitbuf_queue) == true) {
+ pxmitbuf = NULL;
+ } else {
+ phead = get_list_head(pfree_xmitbuf_queue);
+
+ plist = get_next(phead);
+
+ pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
+
+ rtw_list_delete(&(pxmitbuf->list));
+ }
+
+ if (pxmitbuf != NULL) {
+ pxmitpriv->free_xmitbuf_cnt--;
+ pxmitbuf->priv_data = NULL;
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
+ }
+ }
+ _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxmitbuf;
+}
+
+s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ unsigned long irql;
+ struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
+
+_func_enter_;
+ if (pxmitbuf == NULL)
+ return _FAIL;
+
+ if (pxmitbuf->sctx) {
+ DBG_88E("%s pxmitbuf->sctx is not NULL\n", __func__);
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
+ }
+
+ if (pxmitbuf->ext_tag) {
+ rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
+ } else {
+ _enter_critical(&pfree_xmitbuf_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitbuf->list);
+
+ rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
+
+ pxmitpriv->free_xmitbuf_cnt++;
+ _exit_critical(&pfree_xmitbuf_queue->lock, &irql);
+ }
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+/*
+Calling context:
+1. OS_TXENTRY
+2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
+
+If we turn on USE_RXTHREAD, then, no need for critical section.
+Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
+
+Must be very very cautious...
+
+*/
+
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv)/* _queue *pfree_xmit_queue) */
+{
+ /*
+ Please remember to use all the osdep_service api,
+ and lock/unlock or _enter/_exit critical to protect
+ pfree_xmit_queue
+ */
+
+ unsigned long irql;
+ struct xmit_frame *pxframe = NULL;
+ struct list_head *plist, *phead;
+ struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+
+_func_enter_;
+
+ _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ if (_rtw_queue_empty(pfree_xmit_queue) == true) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe:%d\n", pxmitpriv->free_xmitframe_cnt));
+ pxframe = NULL;
+ } else {
+ phead = get_list_head(pfree_xmit_queue);
+
+ plist = get_next(phead);
+
+ pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ rtw_list_delete(&(pxframe->list));
+ }
+
+ if (pxframe != NULL) { /* default value setting */
+ pxmitpriv->free_xmitframe_cnt--;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_alloc_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+
+ pxframe->buf_addr = NULL;
+ pxframe->pxmitbuf = NULL;
+
+ _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
+ /* pxframe->attrib.psta = NULL; */
+
+ pxframe->frame_tag = DATA_FRAMETAG;
+
+ pxframe->pkt = NULL;
+ pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
+
+ pxframe->agg_num = 1;
+ pxframe->ack_report = 0;
+ }
+
+ _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+_func_exit_;
+
+ return pxframe;
+}
+
+s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct sk_buff *pndis_pkt = NULL;
+
+_func_enter_;
+
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("====== rtw_free_xmitframe():pxmitframe == NULL!!!!!!!!!!\n"));
+ goto exit;
+ }
+
+ _enter_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ if (pxmitframe->pkt) {
+ pndis_pkt = pxmitframe->pkt;
+ pxmitframe->pkt = NULL;
+ }
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
+
+ pxmitpriv->free_xmitframe_cnt++;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_debug_, ("rtw_free_xmitframe():free_xmitframe_cnt=%d\n", pxmitpriv->free_xmitframe_cnt));
+
+ _exit_critical_bh(&pfree_xmit_queue->lock, &irql);
+
+ if (pndis_pkt)
+ rtw_os_pkt_complete(padapter, pndis_pkt);
+
+exit:
+
+_func_exit_;
+
+ return _SUCCESS;
+}
+
+void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pframequeue)
+{
+ unsigned long irql;
+ struct list_head *plist, *phead;
+ struct xmit_frame *pxmitframe;
+
+_func_enter_;
+
+ _enter_critical_bh(&(pframequeue->lock), &irql);
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ }
+ _exit_critical_bh(&(pframequeue->lock), &irql);
+
+_func_exit_;
+}
+
+s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_,
+ ("rtw_xmitframe_enqueue: drop xmit pkt for classifier fail\n"));
+/* pxmitframe->pkt = NULL; */
+ return _FAIL;
+ }
+
+ return _SUCCESS;
+}
+
+static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, struct __queue *pframe_queue)
+{
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+
+ xmitframe_phead = get_list_head(pframe_queue);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ ptxservq->qcnt--;
+
+ break;
+
+ pxmitframe = NULL;
+ }
+
+ return pxmitframe;
+}
+
+struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, int entry)
+{
+ unsigned long irql0;
+ struct list_head *sta_plist, *sta_phead;
+ struct hw_xmit *phwxmit;
+ struct tx_servq *ptxservq = NULL;
+ struct __queue *pframe_queue = NULL;
+ struct xmit_frame *pxmitframe = NULL;
+ struct adapter *padapter = pxmitpriv->adapter;
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ int i, inx[4];
+
+_func_enter_;
+
+ inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
+
+ if (pregpriv->wifi_spec == 1) {
+ int j;
+
+ for (j = 0; j < 4; j++)
+ inx[j] = pxmitpriv->wmm_para_seq[j];
+ }
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ for (i = 0; i < entry; i++) {
+ phwxmit = phwxmit_i + inx[i];
+
+ sta_phead = get_list_head(phwxmit->sta_queue);
+ sta_plist = get_next(sta_phead);
+
+ while (!rtw_end_of_queue_search(sta_phead, sta_plist)) {
+ ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
+
+ pframe_queue = &ptxservq->sta_pending;
+
+ pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
+
+ if (pxmitframe) {
+ phwxmit->accnt--;
+
+ /* Remove sta node when there are no pending packets. */
+ if (_rtw_queue_empty(pframe_queue)) /* must be done after get_next and before break */
+ rtw_list_delete(&ptxservq->tx_pending);
+ goto exit;
+ }
+
+ sta_plist = get_next(sta_plist);
+ }
+ }
+exit:
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+_func_exit_;
+ return pxmitframe;
+}
+
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter, struct sta_info *psta, int up, u8 *ac)
+{
+ struct tx_servq *ptxservq;
+
+_func_enter_;
+ switch (up) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ *(ac) = 3;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BK\n"));
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ *(ac) = 1;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VI\n"));
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ *(ac) = 0;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : VO\n"));
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ *(ac) = 2;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_get_sta_pending : BE\n"));
+ break;
+ }
+
+_func_exit_;
+
+ return ptxservq;
+}
+
+/*
+ * Will enqueue pxmitframe to the proper queue,
+ * and indicate it to xx_pending list.....
+ */
+s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ /* unsigned long irql0; */
+ u8 ac_index;
+ struct sta_info *psta;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+ int res = _SUCCESS;
+
+_func_enter_;
+
+ if (pattrib->psta) {
+ psta = pattrib->psta;
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+ }
+
+ if (psta == NULL) {
+ res = _FAIL;
+ DBG_88E("rtw_xmit_classifier: psta == NULL\n");
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("rtw_xmit_classifier: psta == NULL\n"));
+ goto exit;
+ }
+
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ if (rtw_is_list_empty(&ptxservq->tx_pending))
+ rtw_list_insert_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
+ ptxservq->qcnt++;
+ phwxmits[ac_index].accnt++;
+exit:
+
+_func_exit_;
+
+ return res;
+}
+
+void rtw_alloc_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
+
+ pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
+
+ hwxmits = pxmitpriv->hwxmits;
+
+ if (pxmitpriv->hwxmit_entry == 5) {
+ hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
+ hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
+ } else if (pxmitpriv->hwxmit_entry == 4) {
+ hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
+ hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
+ hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
+ hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
+ } else {
+ }
+}
+
+void rtw_free_hwxmits(struct adapter *padapter)
+{
+ struct hw_xmit *hwxmits;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ hwxmits = pxmitpriv->hwxmits;
+ kfree(hwxmits);
+}
+
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry)
+{
+ int i;
+_func_enter_;
+ for (i = 0; i < entry; i++, phwxmit++)
+ phwxmit->accnt = 0;
+_func_exit_;
+}
+
+static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ unsigned long irql;
+ int res, is_vlan_tag = 0, i, do_nat25 = 1;
+ unsigned short vlan_hdr = 0;
+ void *br_port = NULL;
+
+ rcu_read_lock();
+ br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+ _enter_critical_bh(&padapter->br_ext_lock, &irql);
+ if (!(skb->data[0] & 1) && br_port &&
+ memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
+ *((__be16 *)(skb->data+MACADDRLEN*2)) != __constant_htons(ETH_P_8021Q) &&
+ *((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP) &&
+ !memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
+ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+ padapter->scdb_entry->ageing_timer = jiffies;
+ _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ } else {
+ if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_8021Q)) {
+ is_vlan_tag = 1;
+ vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+ if (!memcmp(skb->data+MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
+ (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP)))
+ memcpy(padapter->br_ip, skb->data+WLAN_ETHHDR_LEN+12, 4);
+
+ if (*((__be16 *)(skb->data+MACADDRLEN*2)) == __constant_htons(ETH_P_IP)) {
+ if (memcmp(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN)) {
+ padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
+ skb->data+MACADDRLEN, skb->data+WLAN_ETHHDR_LEN+12);
+ if (padapter->scdb_entry) {
+ memcpy(padapter->scdb_mac, skb->data+MACADDRLEN, MACADDRLEN);
+ memcpy(padapter->scdb_ip, skb->data+WLAN_ETHHDR_LEN+12, 4);
+ padapter->scdb_entry->ageing_timer = jiffies;
+ do_nat25 = 0;
+ }
+ } else {
+ if (padapter->scdb_entry) {
+ padapter->scdb_entry->ageing_timer = jiffies;
+ do_nat25 = 0;
+ } else {
+ memset(padapter->scdb_mac, 0, MACADDRLEN);
+ memset(padapter->scdb_ip, 0, 4);
+ }
+ }
+ }
+ _exit_critical_bh(&padapter->br_ext_lock, &irql);
+ if (do_nat25) {
+ if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
+ struct sk_buff *newskb;
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr;
+ }
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+ if (newskb == NULL) {
+ DEBUG_ERR("TX DROP: skb_copy fail!\n");
+ return -1;
+ }
+ dev_kfree_skb_any(skb);
+
+ *pskb = skb = newskb;
+ if (is_vlan_tag) {
+ vlan_hdr = *((unsigned short *)(skb->data+MACADDRLEN*2+2));
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2-i*2)) = *((unsigned short *)(skb->data+MACADDRLEN*2-2-i*2));
+ skb_pull(skb, 4);
+ }
+ }
+
+ if (skb_is_nonlinear(skb))
+ DEBUG_ERR("%s(): skb_is_nonlinear!!\n", __func__);
+
+ res = skb_linearize(skb);
+ if (res < 0) {
+ DEBUG_ERR("TX DROP: skb_linearize fail!\n");
+ return -1;
+ }
+
+ res = nat25_db_handle(padapter, skb, NAT25_INSERT);
+ if (res < 0) {
+ if (res == -2) {
+ DEBUG_ERR("TX DROP: nat25_db_handle fail!\n");
+ return -1;
+ }
+ return 0;
+ }
+ }
+
+ memcpy(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+
+ dhcp_flag_bcast(padapter, skb);
+
+ if (is_vlan_tag) {
+ skb_push(skb, 4);
+ for (i = 0; i < 6; i++)
+ *((unsigned short *)(skb->data+i*2)) = *((unsigned short *)(skb->data+4+i*2));
+ *((__be16 *)(skb->data+MACADDRLEN*2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data+MACADDRLEN*2+2)) = vlan_hdr;
+ }
+ }
+
+ /* check if SA is equal to our MAC */
+ if (memcmp(skb->data+MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) {
+ DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
+ skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
+ return -1;
+ }
+ return 0;
+}
+
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe)
+{
+ u32 addr;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ switch (pattrib->qsel) {
+ case 0:
+ case 3:
+ addr = BE_QUEUE_INX;
+ break;
+ case 1:
+ case 2:
+ addr = BK_QUEUE_INX;
+ break;
+ case 4:
+ case 5:
+ addr = VI_QUEUE_INX;
+ break;
+ case 6:
+ case 7:
+ addr = VO_QUEUE_INX;
+ break;
+ case 0x10:
+ addr = BCN_QUEUE_INX;
+ break;
+ case 0x11:/* BC/MC in PS (HIQ) */
+ addr = HIGH_QUEUE_INX;
+ break;
+ case 0x12:
+ default:
+ addr = MGT_QUEUE_INX;
+ break;
+ }
+
+ return addr;
+}
+
+static void do_queue_select(struct adapter *padapter, struct pkt_attrib *pattrib)
+{
+ u8 qsel;
+
+ qsel = pattrib->priority;
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("### do_queue_select priority=%d , qsel = %d\n", pattrib->priority , qsel));
+
+ pattrib->qsel = qsel;
+}
+
+/*
+ * The main transmit(tx) entry
+ *
+ * Return
+ * 1 enqueue
+ * 0 success, hardware will handle this xmit frame(packet)
+ * <0 fail
+ */
+s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
+{
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned long irql0;
+#endif
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_frame *pxmitframe = NULL;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ void *br_port = NULL;
+ s32 res;
+
+ pxmitframe = rtw_alloc_xmitframe(pxmitpriv);
+ if (pxmitframe == NULL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: no more pxmitframe\n"));
+ DBG_88E("DBG_TX_DROP_FRAME %s no more pxmitframe\n", __func__);
+ return -1;
+ }
+
+ rcu_read_lock();
+ br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ if (br_port && check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE)) {
+ res = rtw_br_client_tx(padapter, ppkt);
+ if (res == -1) {
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ }
+
+ res = update_attrib(padapter, *ppkt, &pxmitframe->attrib);
+
+ if (res == _FAIL) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit: update attrib fail\n"));
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ return -1;
+ }
+ pxmitframe->pkt = *ppkt;
+
+ rtw_led_control(padapter, LED_CTL_TX);
+
+ do_queue_select(padapter, &pxmitframe->attrib);
+
+#ifdef CONFIG_88EU_AP_MODE
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+ if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+ return 1;
+ }
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+#endif
+
+ if (rtw_hal_xmit(padapter, pxmitframe) == false)
+ return 1;
+
+ return 0;
+}
+
+#if defined(CONFIG_88EU_AP_MODE)
+
+int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ int ret = false;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == false)
+ return ret;
+
+ if (pattrib->psta)
+ psta = pattrib->psta;
+ else
+ psta = rtw_get_stainfo(pstapriv, pattrib->ra);
+
+ if (psta == NULL)
+ return ret;
+
+ if (pattrib->triggered == 1) {
+ if (bmcst)
+ pattrib->qsel = 0x11;/* HIQ */
+ return ret;
+ }
+
+ if (bmcst) {
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ if (pstapriv->sta_dz_bitmap) {/* if any one sta is in ps mode */
+ rtw_list_delete(&pxmitframe->list);
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ pstapriv->tim_bitmap |= BIT(0);/* */
+ pstapriv->sta_dz_bitmap |= BIT(0);
+
+ update_beacon(padapter, _TIM_IE_, NULL, false);/* tx bc/mc packets after upate bcn */
+
+ ret = true;
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ return ret;
+ }
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ if (psta->state&WIFI_SLEEP_STATE) {
+ u8 wmmps_ac = 0;
+
+ if (pstapriv->sta_dz_bitmap&BIT(psta->aid)) {
+ rtw_list_delete(&pxmitframe->list);
+
+ rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
+
+ psta->sleepq_len++;
+
+ switch (pattrib->priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(0);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(0);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(0);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(0);
+ break;
+ }
+
+ if (wmmps_ac)
+ psta->sleepq_ac_len++;
+
+ if (((psta->has_legacy_ac) && (!wmmps_ac)) ||
+ ((!psta->has_legacy_ac) && (wmmps_ac))) {
+ pstapriv->tim_bitmap |= BIT(psta->aid);
+
+ if (psta->sleepq_len == 1) {
+ /* upate BCN for TIM IE */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+ ret = true;
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ return ret;
+}
+
+static void dequeue_xmitframes_to_sleeping_queue(struct adapter *padapter, struct sta_info *psta, struct __queue *pframequeue)
+{
+ struct list_head *plist, *phead;
+ u8 ac_index;
+ struct tx_servq *ptxservq;
+ struct pkt_attrib *pattrib;
+ struct xmit_frame *pxmitframe;
+ struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
+
+ phead = get_list_head(pframequeue);
+ plist = get_next(phead);
+
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+
+ plist = get_next(plist);
+
+ xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
+
+ pattrib = &pxmitframe->attrib;
+
+ ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+
+ ptxservq->qcnt--;
+ phwxmits[ac_index].accnt--;
+ }
+}
+
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql0;
+ struct sta_info *psta_bmc;
+ struct sta_xmit_priv *pstaxmitpriv;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ pstaxmitpriv = &psta->sta_xmitpriv;
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql0);
+
+ psta->state |= WIFI_SLEEP_STATE;
+
+ pstapriv->sta_dz_bitmap |= BIT(psta->aid);
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
+
+ /* for BC/MC Frames */
+ pstaxmitpriv = &psta_bmc->sta_xmitpriv;
+ dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
+ rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql0);
+}
+
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql;
+ u8 update_mask = 0, wmmps_ac = 0;
+ struct sta_info *psta_bmc;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ psta->sleepq_len--;
+ if (psta->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ if (wmmps_ac) {
+ psta->sleepq_ac_len--;
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+ }
+
+ if (psta->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ update_mask = BIT(0);
+
+ if (psta->state&WIFI_SLEEP_STATE)
+ psta->state ^= WIFI_SLEEP_STATE;
+
+ if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
+ psta->expire_to = pstapriv->expire_to;
+ psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
+ }
+
+ pstapriv->sta_dz_bitmap &= ~BIT(psta->aid);
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+
+ /* for BC/MC Frames */
+ psta_bmc = rtw_get_bcmc_stainfo(padapter);
+ if (!psta_bmc)
+ return;
+
+ if ((pstapriv->sta_dz_bitmap&0xfffe) == 0x0) { /* no any sta in ps mode */
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta_bmc->sleepq_len--;
+ if (psta_bmc->sleepq_len > 0)
+ pxmitframe->attrib.mdata = 1;
+ else
+ pxmitframe->attrib.mdata = 0;
+
+ pxmitframe->attrib.triggered = 1;
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ if (rtw_hal_xmit(padapter, pxmitframe))
+ rtw_os_xmit_complete(padapter, pxmitframe);
+ _enter_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ }
+
+ if (psta_bmc->sleepq_len == 0) {
+ pstapriv->tim_bitmap &= ~BIT(0);
+ pstapriv->sta_dz_bitmap &= ~BIT(0);
+
+ update_mask |= BIT(1);
+ }
+
+ _exit_critical_bh(&psta_bmc->sleep_q.lock, &irql);
+ }
+
+ if (update_mask)
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+}
+
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta)
+{
+ unsigned long irql;
+ u8 wmmps_ac = 0;
+ struct list_head *xmitframe_plist, *xmitframe_phead;
+ struct xmit_frame *pxmitframe = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ _enter_critical_bh(&psta->sleep_q.lock, &irql);
+
+ xmitframe_phead = get_list_head(&psta->sleep_q);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ switch (pxmitframe->attrib.priority) {
+ case 1:
+ case 2:
+ wmmps_ac = psta->uapsd_bk&BIT(1);
+ break;
+ case 4:
+ case 5:
+ wmmps_ac = psta->uapsd_vi&BIT(1);
+ break;
+ case 6:
+ case 7:
+ wmmps_ac = psta->uapsd_vo&BIT(1);
+ break;
+ case 0:
+ case 3:
+ default:
+ wmmps_ac = psta->uapsd_be&BIT(1);
+ break;
+ }
+
+ if (!wmmps_ac)
+ continue;
+
+ rtw_list_delete(&pxmitframe->list);
+
+ psta->sleepq_len--;
+ psta->sleepq_ac_len--;
+
+ if (psta->sleepq_ac_len > 0) {
+ pxmitframe->attrib.mdata = 1;
+ pxmitframe->attrib.eosp = 0;
+ } else {
+ pxmitframe->attrib.mdata = 0;
+ pxmitframe->attrib.eosp = 1;
+ }
+
+ pxmitframe->attrib.triggered = 1;
+
+ if (rtw_hal_xmit(padapter, pxmitframe) == true)
+ rtw_os_xmit_complete(padapter, pxmitframe);
+
+ if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
+ pstapriv->tim_bitmap &= ~BIT(psta->aid);
+
+ /* upate BCN for TIM IE */
+ update_beacon(padapter, _TIM_IE_, NULL, false);
+ }
+ }
+
+ _exit_critical_bh(&psta->sleep_q.lock, &irql);
+}
+
+#endif
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
+{
+ sctx->timeout_ms = timeout_ms;
+ sctx->submit_time = rtw_get_current_time();
+ init_completion(&sctx->done);
+ sctx->status = RTW_SCTX_SUBMITTED;
+}
+
+int rtw_sctx_wait(struct submit_ctx *sctx)
+{
+ int ret = _FAIL;
+ unsigned long expire;
+ int status = 0;
+
+ expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
+ if (!wait_for_completion_timeout(&sctx->done, expire)) {
+ /* timeout, do something?? */
+ status = RTW_SCTX_DONE_TIMEOUT;
+ DBG_88E("%s timeout\n", __func__);
+ } else {
+ status = sctx->status;
+ }
+
+ if (status == RTW_SCTX_DONE_SUCCESS)
+ ret = _SUCCESS;
+
+ return ret;
+}
+
+static bool rtw_sctx_chk_waring_status(int status)
+{
+ switch (status) {
+ case RTW_SCTX_DONE_UNKNOWN:
+ case RTW_SCTX_DONE_BUF_ALLOC:
+ case RTW_SCTX_DONE_BUF_FREE:
+
+ case RTW_SCTX_DONE_DRV_STOP:
+ case RTW_SCTX_DONE_DEV_REMOVE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
+{
+ if (*sctx) {
+ if (rtw_sctx_chk_waring_status(status))
+ DBG_88E("%s status:%d\n", __func__, status);
+ (*sctx)->status = status;
+ complete(&((*sctx)->done));
+ *sctx = NULL;
+ }
+}
+
+void rtw_sctx_done(struct submit_ctx **sctx)
+{
+ rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
+}
+
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ pack_tx_ops->submit_time = rtw_get_current_time();
+ pack_tx_ops->timeout_ms = timeout_ms;
+ pack_tx_ops->status = RTW_SCTX_SUBMITTED;
+
+ return rtw_sctx_wait(pack_tx_ops);
+}
+
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
+{
+ struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
+
+ if (pxmitpriv->ack_tx)
+ rtw_sctx_done_err(&pack_tx_ops, status);
+ else
+ DBG_88E("%s ack_tx not set\n", __func__);
+}
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c b/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
new file mode 100644
index 00000000000..95759bed541
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188EFWImg_CE.c
@@ -0,0 +1,1761 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+#include "odm_precomp.h"
+
+const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength] = {
+ 0xE1, 0x88, 0x10, 0x00, 0x0B, 0x00, 0x01, 0x00,
+ 0x01, 0x21, 0x11, 0x27, 0x30, 0x36, 0x00, 0x00,
+ 0x2D, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x02, 0x45, 0x4E, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xC1, 0x6F, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0xA1, 0xE6, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x02, 0x56, 0xF7, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0xC2, 0xAF, 0x80, 0xFE, 0x32, 0x12, 0x42, 0x04,
+ 0x85, 0xD0, 0x0B, 0x75, 0xD0, 0x08, 0xAA, 0xE0,
+ 0xC2, 0x8C, 0xE5, 0x8A, 0x24, 0x67, 0xF5, 0x8A,
+ 0xE5, 0x8C, 0x34, 0x79, 0xF5, 0x8C, 0xD2, 0x8C,
+ 0xEC, 0x24, 0x89, 0xF8, 0xE6, 0xBC, 0x03, 0x02,
+ 0x74, 0xFF, 0xC3, 0x95, 0x81, 0xB4, 0x40, 0x00,
+ 0x40, 0xCE, 0x79, 0x04, 0x78, 0x80, 0x16, 0xE6,
+ 0x08, 0x70, 0x0B, 0xC2, 0xAF, 0xE6, 0x30, 0xE1,
+ 0x03, 0x44, 0x18, 0xF6, 0xD2, 0xAF, 0x08, 0xD9,
+ 0xED, 0xEA, 0x8B, 0xD0, 0x22, 0xE5, 0x0C, 0xFF,
+ 0x23, 0x24, 0x81, 0xF8, 0x0F, 0x08, 0x08, 0xBF,
+ 0x04, 0x04, 0x7F, 0x00, 0x78, 0x81, 0xE6, 0x30,
+ 0xE4, 0xF2, 0x00, 0xE5, 0x0C, 0xC3, 0x9F, 0x50,
+ 0x20, 0x05, 0x0C, 0x74, 0x88, 0x25, 0x0C, 0xF8,
+ 0xE6, 0xFD, 0xA6, 0x81, 0x08, 0xE6, 0xAE, 0x0C,
+ 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xCD, 0xF8, 0xE8,
+ 0x6D, 0x60, 0xE0, 0x08, 0xE6, 0xC0, 0xE0, 0x80,
+ 0xF6, 0xE5, 0x0C, 0xD3, 0x9F, 0x40, 0x27, 0xE5,
+ 0x0C, 0x24, 0x89, 0xF8, 0xE6, 0xAE, 0x0C, 0xBE,
+ 0x03, 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xCD,
+ 0xF8, 0xE5, 0x81, 0x6D, 0x60, 0x06, 0xD0, 0xE0,
+ 0xF6, 0x18, 0x80, 0xF5, 0xE5, 0x0C, 0x24, 0x88,
+ 0xC8, 0xF6, 0x15, 0x0C, 0x80, 0xD3, 0xE5, 0x0C,
+ 0x23, 0x24, 0x81, 0xF8, 0x7F, 0x04, 0xC2, 0xAF,
+ 0xE6, 0x30, 0xE0, 0x03, 0x10, 0xE2, 0x0C, 0x7F,
+ 0x00, 0x30, 0xE1, 0x07, 0x30, 0xE3, 0x04, 0x7F,
+ 0x08, 0x54, 0xF4, 0x54, 0x7C, 0xC6, 0xD2, 0xAF,
+ 0x54, 0x80, 0x42, 0x07, 0x22, 0x78, 0x88, 0xA6,
+ 0x81, 0x74, 0x03, 0x60, 0x06, 0xFF, 0x08, 0x76,
+ 0xFF, 0xDF, 0xFB, 0x7F, 0x04, 0xE4, 0x78, 0x80,
+ 0xF6, 0x08, 0xF6, 0x08, 0xDF, 0xFA, 0x78, 0x81,
+ 0x76, 0x30, 0x90, 0x45, 0xDE, 0x74, 0x01, 0x93,
+ 0xC0, 0xE0, 0xE4, 0x93, 0xC0, 0xE0, 0x43, 0x89,
+ 0x01, 0x75, 0x8A, 0x60, 0x75, 0x8C, 0x79, 0xD2,
+ 0x8C, 0xD2, 0xAF, 0x22, 0x03, 0xEF, 0xD3, 0x94,
+ 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0x74, 0x81,
+ 0x2F, 0x2F, 0xF8, 0xE6, 0x20, 0xE5, 0xF4, 0xC2,
+ 0xAF, 0xE6, 0x44, 0x30, 0xF6, 0xD2, 0xAF, 0xAE,
+ 0x0C, 0xEE, 0xC3, 0x9F, 0x50, 0x21, 0x0E, 0x74,
+ 0x88, 0x2E, 0xF8, 0xE6, 0xF9, 0x08, 0xE6, 0x18,
+ 0xBE, 0x03, 0x02, 0x74, 0xFF, 0xFD, 0xED, 0x69,
+ 0x60, 0x09, 0x09, 0xE7, 0x19, 0x19, 0xF7, 0x09,
+ 0x09, 0x80, 0xF3, 0x16, 0x16, 0x80, 0xDA, 0xEE,
+ 0xD3, 0x9F, 0x40, 0x04, 0x05, 0x81, 0x05, 0x81,
+ 0xEE, 0xD3, 0x9F, 0x40, 0x22, 0x74, 0x88, 0x2E,
+ 0xF8, 0x08, 0xE6, 0xF9, 0xEE, 0xB5, 0x0C, 0x02,
+ 0xA9, 0x81, 0x18, 0x06, 0x06, 0xE6, 0xFD, 0xED,
+ 0x69, 0x60, 0x09, 0x19, 0x19, 0xE7, 0x09, 0x09,
+ 0xF7, 0x19, 0x80, 0xF3, 0x1E, 0x80, 0xD9, 0xEF,
+ 0x24, 0x88, 0xF8, 0xE6, 0x04, 0xF8, 0xEF, 0x2F,
+ 0x04, 0x90, 0x45, 0xDE, 0x93, 0xF6, 0x08, 0xEF,
+ 0x2F, 0x93, 0xF6, 0x7F, 0x00, 0x22, 0xEF, 0xD3,
+ 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22, 0xEF,
+ 0x23, 0x24, 0x81, 0xF8, 0xE6, 0x30, 0xE5, 0xF4,
+ 0xC2, 0xAF, 0xE6, 0x54, 0x8C, 0xF6, 0xD2, 0xAF,
+ 0xE5, 0x0C, 0xB5, 0x07, 0x0A, 0x74, 0x88, 0x2F,
+ 0xF8, 0xE6, 0xF5, 0x81, 0x02, 0x42, 0x4D, 0x50,
+ 0x2E, 0x74, 0x89, 0x2F, 0xF8, 0xE6, 0xBF, 0x03,
+ 0x02, 0x74, 0xFF, 0xFD, 0x18, 0xE6, 0xF9, 0x74,
+ 0x88, 0x2F, 0xF8, 0xFB, 0xE6, 0xFC, 0xE9, 0x6C,
+ 0x60, 0x08, 0xA8, 0x05, 0xE7, 0xF6, 0x1D, 0x19,
+ 0x80, 0xF4, 0xA8, 0x03, 0xA6, 0x05, 0x1F, 0xE5,
+ 0x0C, 0xB5, 0x07, 0xE3, 0x7F, 0x00, 0x22, 0x74,
+ 0x89, 0x2F, 0xF8, 0xE6, 0xFD, 0x18, 0x86, 0x01,
+ 0x0F, 0x74, 0x88, 0x2F, 0xF8, 0xA6, 0x01, 0x08,
+ 0x86, 0x04, 0xE5, 0x0C, 0xB5, 0x07, 0x02, 0xAC,
+ 0x81, 0xED, 0x6C, 0x60, 0x08, 0x0D, 0x09, 0xA8,
+ 0x05, 0xE6, 0xF7, 0x80, 0xF4, 0xE5, 0x0C, 0xB5,
+ 0x07, 0xDE, 0x89, 0x81, 0x7F, 0x00, 0x22, 0xEF,
+ 0xD3, 0x94, 0x03, 0x40, 0x03, 0x7F, 0xFF, 0x22,
+ 0xEF, 0x23, 0x24, 0x81, 0xF8, 0xC2, 0xAF, 0xE6,
+ 0x30, 0xE5, 0x05, 0x30, 0xE0, 0x02, 0xD2, 0xE4,
+ 0xD2, 0xE2, 0xC6, 0xD2, 0xAF, 0x7F, 0x00, 0x30,
+ 0xE2, 0x01, 0x0F, 0x02, 0x42, 0x4C, 0x8F, 0xF0,
+ 0xE4, 0xFF, 0xFE, 0xE5, 0x0C, 0x23, 0x24, 0x80,
+ 0xF8, 0xC2, 0xA9, 0x30, 0xF7, 0x0D, 0x7F, 0x08,
+ 0xE6, 0x60, 0x0B, 0x2D, 0xF6, 0x60, 0x30, 0x50,
+ 0x2E, 0x80, 0x07, 0x30, 0xF1, 0x06, 0xED, 0xF6,
+ 0x60, 0x25, 0x7E, 0x02, 0x08, 0x30, 0xF0, 0x10,
+ 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x23, 0x0E, 0x30,
+ 0xE2, 0x0C, 0xD2, 0xAF, 0x7F, 0x04, 0x80, 0x12,
+ 0xC2, 0xAF, 0xE6, 0x10, 0xE7, 0x13, 0x54, 0xEC,
+ 0x4E, 0xF6, 0xD2, 0xAF, 0x02, 0x42, 0x4D, 0x7F,
+ 0x08, 0x08, 0xEF, 0x44, 0x83, 0xF4, 0xC2, 0xAF,
+ 0x56, 0xC6, 0xD2, 0xAF, 0x54, 0x80, 0x4F, 0xFF,
+ 0x22, 0xC5, 0xF0, 0xF8, 0xA3, 0xE0, 0x28, 0xF0,
+ 0xC5, 0xF0, 0xF8, 0xE5, 0x82, 0x15, 0x82, 0x70,
+ 0x02, 0x15, 0x83, 0xE0, 0x38, 0xF0, 0x22, 0xEF,
+ 0x5B, 0xFF, 0xEE, 0x5A, 0xFE, 0xED, 0x59, 0xFD,
+ 0xEC, 0x58, 0xFC, 0x22, 0xEF, 0x4B, 0xFF, 0xEE,
+ 0x4A, 0xFE, 0xED, 0x49, 0xFD, 0xEC, 0x48, 0xFC,
+ 0x22, 0xE0, 0xFC, 0xA3, 0xE0, 0xFD, 0xA3, 0xE0,
+ 0xFE, 0xA3, 0xE0, 0xFF, 0x22, 0xE2, 0xFC, 0x08,
+ 0xE2, 0xFD, 0x08, 0xE2, 0xFE, 0x08, 0xE2, 0xFF,
+ 0x22, 0xE2, 0xFB, 0x08, 0xE2, 0xF9, 0x08, 0xE2,
+ 0xFA, 0x08, 0xE2, 0xCB, 0xF8, 0x22, 0xEC, 0xF2,
+ 0x08, 0xED, 0xF2, 0x08, 0xEE, 0xF2, 0x08, 0xEF,
+ 0xF2, 0x22, 0xA4, 0x25, 0x82, 0xF5, 0x82, 0xE5,
+ 0xF0, 0x35, 0x83, 0xF5, 0x83, 0x22, 0xE0, 0xFB,
+ 0xA3, 0xE0, 0xFA, 0xA3, 0xE0, 0xF9, 0x22, 0xEB,
+ 0xF0, 0xA3, 0xEA, 0xF0, 0xA3, 0xE9, 0xF0, 0x22,
+ 0xD0, 0x83, 0xD0, 0x82, 0xF8, 0xE4, 0x93, 0x70,
+ 0x12, 0x74, 0x01, 0x93, 0x70, 0x0D, 0xA3, 0xA3,
+ 0x93, 0xF8, 0x74, 0x01, 0x93, 0xF5, 0x82, 0x88,
+ 0x83, 0xE4, 0x73, 0x74, 0x02, 0x93, 0x68, 0x60,
+ 0xEF, 0xA3, 0xA3, 0xA3, 0x80, 0xDF, 0x02, 0x45,
+ 0x8C, 0x02, 0x42, 0xDD, 0xE4, 0x93, 0xA3, 0xF8,
+ 0xE4, 0x93, 0xA3, 0x40, 0x03, 0xF6, 0x80, 0x01,
+ 0xF2, 0x08, 0xDF, 0xF4, 0x80, 0x29, 0xE4, 0x93,
+ 0xA3, 0xF8, 0x54, 0x07, 0x24, 0x0C, 0xC8, 0xC3,
+ 0x33, 0xC4, 0x54, 0x0F, 0x44, 0x20, 0xC8, 0x83,
+ 0x40, 0x04, 0xF4, 0x56, 0x80, 0x01, 0x46, 0xF6,
+ 0xDF, 0xE4, 0x80, 0x0B, 0x01, 0x02, 0x04, 0x08,
+ 0x10, 0x20, 0x40, 0x80, 0x90, 0x45, 0xD1, 0xE4,
+ 0x7E, 0x01, 0x93, 0x60, 0xBC, 0xA3, 0xFF, 0x54,
+ 0x3F, 0x30, 0xE5, 0x09, 0x54, 0x1F, 0xFE, 0xE4,
+ 0x93, 0xA3, 0x60, 0x01, 0x0E, 0xCF, 0x54, 0xC0,
+ 0x25, 0xE0, 0x60, 0xA8, 0x40, 0xB8, 0xE4, 0x93,
+ 0xA3, 0xFA, 0xE4, 0x93, 0xA3, 0xF8, 0xE4, 0x93,
+ 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA, 0xC5, 0x83,
+ 0xCA, 0xF0, 0xA3, 0xC8, 0xC5, 0x82, 0xC8, 0xCA,
+ 0xC5, 0x83, 0xCA, 0xDF, 0xE9, 0xDE, 0xE7, 0x80,
+ 0xBE, 0x00, 0x41, 0x82, 0x09, 0x00, 0x41, 0x82,
+ 0x0A, 0x00, 0x41, 0x82, 0x17, 0x00, 0x59, 0xE2,
+ 0x5C, 0x24, 0x5E, 0x5D, 0x5F, 0xA1, 0xC0, 0xE0,
+ 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0, 0xD0,
+ 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01, 0xC0,
+ 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74, 0xE6,
+ 0xF0, 0x74, 0x45, 0xA3, 0xF0, 0xD1, 0x35, 0x74,
+ 0xE6, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x45,
+ 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
+ 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
+ 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
+ 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x00, 0x54,
+ 0xE0, 0x55, 0x35, 0xF5, 0x39, 0xA3, 0xE0, 0x55,
+ 0x36, 0xF5, 0x3A, 0xA3, 0xE0, 0x55, 0x37, 0xF5,
+ 0x3B, 0xA3, 0xE0, 0x55, 0x38, 0xF5, 0x3C, 0xAD,
+ 0x39, 0x7F, 0x54, 0x12, 0x32, 0x1E, 0xAD, 0x3A,
+ 0x7F, 0x55, 0x12, 0x32, 0x1E, 0xAD, 0x3B, 0x7F,
+ 0x56, 0x12, 0x32, 0x1E, 0xAD, 0x3C, 0x7F, 0x57,
+ 0x12, 0x32, 0x1E, 0x53, 0x91, 0xEF, 0x22, 0xC0,
+ 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
+ 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
+ 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
+ 0x6F, 0xF0, 0x74, 0x46, 0xA3, 0xF0, 0x12, 0x6C,
+ 0x78, 0xE5, 0x41, 0x30, 0xE4, 0x04, 0x7F, 0x02,
+ 0x91, 0x27, 0xE5, 0x41, 0x30, 0xE6, 0x03, 0x12,
+ 0x6C, 0xD5, 0xE5, 0x43, 0x30, 0xE0, 0x03, 0x12,
+ 0x51, 0xC2, 0xE5, 0x43, 0x30, 0xE1, 0x03, 0x12,
+ 0x4D, 0x0C, 0xE5, 0x43, 0x30, 0xE2, 0x03, 0x12,
+ 0x4C, 0xC1, 0xE5, 0x43, 0x30, 0xE3, 0x03, 0x12,
+ 0x6C, 0xE2, 0xE5, 0x43, 0x30, 0xE4, 0x03, 0x12,
+ 0x6D, 0x04, 0xE5, 0x43, 0x30, 0xE5, 0x03, 0x12,
+ 0x6D, 0x33, 0xE5, 0x43, 0x30, 0xE6, 0x02, 0xF1,
+ 0x0F, 0xE5, 0x44, 0x30, 0xE1, 0x03, 0x12, 0x51,
+ 0x7F, 0x74, 0x6F, 0x04, 0x90, 0x01, 0xC4, 0xF0,
+ 0x74, 0x46, 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06,
+ 0xD0, 0x05, 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02,
+ 0xD0, 0x01, 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82,
+ 0xD0, 0x83, 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90,
+ 0x80, 0xDE, 0xE0, 0xB4, 0x01, 0x13, 0x90, 0x81,
+ 0x27, 0xE0, 0x60, 0x0D, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x54, 0x07, 0x70, 0x02, 0xF1,
+ 0x2A, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0x90, 0x81,
+ 0x29, 0x30, 0xE0, 0x05, 0xE0, 0xFF, 0x02, 0x74,
+ 0x8F, 0xE0, 0xFF, 0x7D, 0x01, 0xD3, 0x10, 0xAF,
+ 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82, 0x13, 0xED,
+ 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x90, 0x82, 0x14,
+ 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFE, 0xC4, 0x13,
+ 0x13, 0x54, 0x03, 0x30, 0xE0, 0x03, 0x02, 0x48,
+ 0xA0, 0xEE, 0xC4, 0x13, 0x13, 0x13, 0x54, 0x01,
+ 0x30, 0xE0, 0x03, 0x02, 0x48, 0xA0, 0x90, 0x82,
+ 0x14, 0xE0, 0xFE, 0x6F, 0x70, 0x03, 0x02, 0x48,
+ 0xA0, 0xEF, 0x70, 0x03, 0x02, 0x48, 0x17, 0x24,
+ 0xFE, 0x70, 0x03, 0x02, 0x48, 0x50, 0x24, 0xFE,
+ 0x60, 0x51, 0x24, 0xFC, 0x70, 0x03, 0x02, 0x48,
+ 0x8B, 0x24, 0xFC, 0x60, 0x03, 0x02, 0x48, 0xA0,
+ 0xEE, 0xB4, 0x0E, 0x03, 0x12, 0x49, 0x5E, 0x90,
+ 0x82, 0x14, 0xE0, 0x70, 0x05, 0x7F, 0x01, 0x12,
+ 0x49, 0x93, 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x06,
+ 0x03, 0x12, 0x49, 0x34, 0x90, 0x82, 0x14, 0xE0,
+ 0xB4, 0x04, 0x0F, 0x90, 0x82, 0x13, 0xE0, 0xFF,
+ 0x60, 0x05, 0x12, 0x73, 0x75, 0x80, 0x03, 0x12,
+ 0x66, 0x26, 0x90, 0x82, 0x14, 0xE0, 0x64, 0x08,
+ 0x60, 0x03, 0x02, 0x48, 0xA0, 0x12, 0x73, 0xD3,
+ 0x02, 0x48, 0xA0, 0x90, 0x82, 0x14, 0xE0, 0x70,
+ 0x05, 0x7F, 0x01, 0x12, 0x49, 0x93, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x06, 0x03, 0x12, 0x49, 0x34,
+ 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x09, 0x12,
+ 0x48, 0xA5, 0xBF, 0x01, 0x03, 0x12, 0x49, 0x5E,
+ 0x90, 0x82, 0x14, 0xE0, 0x64, 0x0C, 0x60, 0x02,
+ 0x01, 0xA0, 0x11, 0xA5, 0xEF, 0x64, 0x01, 0x60,
+ 0x02, 0x01, 0xA0, 0x11, 0xFA, 0x01, 0xA0, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11, 0xA5,
+ 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82, 0x14,
+ 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5, 0xBF,
+ 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14, 0xE0,
+ 0x64, 0x04, 0x70, 0x5C, 0x12, 0x72, 0xF5, 0xEF,
+ 0x64, 0x01, 0x70, 0x54, 0x31, 0xBE, 0x80, 0x50,
+ 0x90, 0x82, 0x14, 0xE0, 0xB4, 0x0E, 0x07, 0x11,
+ 0xA5, 0xBF, 0x01, 0x02, 0x31, 0x5E, 0x90, 0x82,
+ 0x14, 0xE0, 0xB4, 0x06, 0x02, 0x31, 0x34, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x0C, 0x07, 0x11, 0xA5,
+ 0xBF, 0x01, 0x02, 0x11, 0xFA, 0x90, 0x82, 0x14,
+ 0xE0, 0x70, 0x04, 0x7F, 0x01, 0x31, 0x93, 0x90,
+ 0x82, 0x14, 0xE0, 0xB4, 0x04, 0x1A, 0x12, 0x73,
+ 0xBB, 0x80, 0x15, 0x90, 0x82, 0x14, 0xE0, 0xB4,
+ 0x0C, 0x0E, 0x90, 0x81, 0x25, 0xE0, 0xFF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x02, 0x31, 0xB1,
+ 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD1, 0xAB, 0xEF,
+ 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x01, 0xF0, 0x80, 0x3D, 0x90, 0x81, 0x24, 0xE0,
+ 0xFF, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x08, 0x90, 0x01, 0xB8, 0x74, 0x02, 0xF0, 0x80,
+ 0x28, 0xEF, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x08,
+ 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x19,
+ 0x90, 0x81, 0x29, 0xE0, 0xD3, 0x94, 0x04, 0x40,
+ 0x08, 0x90, 0x01, 0xB8, 0x74, 0x08, 0xF0, 0x80,
+ 0x08, 0x90, 0x01, 0xB8, 0xE4, 0xF0, 0x7F, 0x01,
+ 0x22, 0x90, 0x01, 0xB9, 0x74, 0x02, 0xF0, 0x7F,
+ 0x00, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
+ 0x70, 0x31, 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x7F,
+ 0x01, 0xF1, 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81,
+ 0x24, 0xE0, 0x44, 0x80, 0xF0, 0x90, 0x81, 0x2A,
+ 0x74, 0x0E, 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22,
+ 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01,
+ 0xB8, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x25, 0xE0,
+ 0x90, 0x06, 0x04, 0x20, 0xE0, 0x0C, 0xE0, 0x44,
+ 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
+ 0x80, 0x0E, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90, 0x81,
+ 0x25, 0xE0, 0xC3, 0x13, 0x20, 0xE0, 0x08, 0x90,
+ 0x81, 0x2A, 0x74, 0x0C, 0xF0, 0x80, 0x1E, 0x90,
+ 0x06, 0x04, 0xE0, 0x44, 0x40, 0xF0, 0xE0, 0x44,
+ 0x80, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x04, 0xF0,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x80, 0xF0, 0x90,
+ 0x81, 0x23, 0x74, 0x04, 0xF0, 0x90, 0x05, 0x22,
+ 0xE4, 0xF0, 0x22, 0x90, 0x82, 0x15, 0xEF, 0xF0,
+ 0x12, 0x54, 0x65, 0x90, 0x82, 0x15, 0xE0, 0x60,
+ 0x05, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x04, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x22, 0x31, 0xE3, 0x90, 0x81, 0x2A, 0x74, 0x08,
+ 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05,
+ 0x22, 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x01,
+ 0x37, 0x74, 0x02, 0xF0, 0xFD, 0x7F, 0x03, 0x51,
+ 0x57, 0x31, 0xE3, 0xE4, 0x90, 0x81, 0x2A, 0xF0,
+ 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x05, 0x22,
+ 0x74, 0xFF, 0xF0, 0xF1, 0x3A, 0x90, 0x85, 0xBB,
+ 0x12, 0x20, 0xDA, 0xCC, 0xF0, 0x00, 0xC0, 0x7F,
+ 0x8C, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00, 0x14,
+ 0x7F, 0x70, 0x7E, 0x0E, 0x12, 0x2E, 0xA2, 0x90,
+ 0x81, 0xF9, 0x12, 0x20, 0xDA, 0x00, 0x00, 0x00,
+ 0x00, 0xE4, 0xFD, 0xFF, 0x12, 0x55, 0x1C, 0x7F,
+ 0x7C, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x44,
+ 0x80, 0xFC, 0x90, 0x82, 0x05, 0x12, 0x20, 0xCE,
+ 0x90, 0x82, 0x05, 0x12, 0x44, 0xD9, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08,
+ 0x12, 0x2E, 0xA2, 0x90, 0x01, 0x00, 0x74, 0x3F,
+ 0xF0, 0xA3, 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x05,
+ 0x53, 0xE0, 0x44, 0x20, 0xF0, 0x22, 0x90, 0x01,
+ 0x34, 0x74, 0x40, 0xF0, 0xFD, 0xE4, 0xFF, 0x74,
+ 0x3D, 0x2F, 0xF8, 0xE6, 0x4D, 0xFE, 0xF6, 0x74,
+ 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x01, 0xF5,
+ 0x83, 0xEE, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
+ 0xC3, 0xC0, 0xD0, 0xE4, 0x90, 0x81, 0xCB, 0xF0,
+ 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
+ 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0x4E, 0xFE, 0xF0,
+ 0xEF, 0x54, 0x02, 0xFF, 0xEE, 0x54, 0xFD, 0x4F,
+ 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE, 0x54, 0x04,
+ 0xFD, 0xEF, 0x54, 0xFB, 0x4D, 0xFF, 0x90, 0x81,
+ 0x1F, 0xF0, 0xEE, 0x54, 0x08, 0xFE, 0xEF, 0x54,
+ 0xF7, 0x4E, 0xFF, 0xF0, 0x12, 0x1F, 0xA4, 0xFE,
+ 0x54, 0x10, 0xFD, 0xEF, 0x54, 0xEF, 0x4D, 0xFF,
+ 0x90, 0x81, 0x1F, 0xF0, 0xEE, 0x54, 0x20, 0xFE,
+ 0xEF, 0x54, 0xDF, 0x4E, 0xF0, 0x12, 0x1F, 0xA4,
+ 0xC3, 0x13, 0x20, 0xE0, 0x02, 0x61, 0x5E, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x6D, 0x90,
+ 0x81, 0xCB, 0x74, 0x21, 0xF0, 0xEF, 0x13, 0x13,
+ 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E, 0x90,
+ 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80, 0x0C,
+ 0xE4, 0x90, 0x81, 0x20, 0xF0, 0xA3, 0xF0, 0x7D,
+ 0x40, 0xFF, 0x91, 0x26, 0x90, 0x81, 0x1F, 0xE0,
+ 0xFD, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x12, 0xF0,
+ 0xED, 0xC4, 0x54, 0x0F, 0x30, 0xE0, 0x07, 0x90,
+ 0x81, 0xCB, 0xE0, 0x44, 0x14, 0xF0, 0x90, 0x81,
+ 0x1F, 0xE0, 0xC4, 0x13, 0x54, 0x07, 0x30, 0xE0,
+ 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x80, 0xF0,
+ 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27, 0xF0,
+ 0x90, 0x81, 0x22, 0xE0, 0x60, 0x02, 0x81, 0x17,
+ 0x7F, 0x01, 0x80, 0x15, 0x90, 0x81, 0xCB, 0x74,
+ 0x01, 0xF0, 0x90, 0x05, 0x27, 0xF0, 0x90, 0x81,
+ 0x22, 0xE0, 0x64, 0x04, 0x60, 0x02, 0x81, 0x17,
+ 0xFF, 0x12, 0x53, 0x0E, 0x81, 0x17, 0x90, 0x81,
+ 0x1F, 0xE0, 0xFF, 0x20, 0xE0, 0x02, 0x61, 0xE7,
+ 0x90, 0x81, 0xCB, 0x74, 0x31, 0xF0, 0xEF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0B, 0x51, 0x4E,
+ 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x08, 0xF0, 0x80,
+ 0x06, 0x7D, 0x40, 0xE4, 0xFF, 0x91, 0x26, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFD, 0x13, 0x13, 0x13, 0x54,
+ 0x1F, 0x30, 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0,
+ 0x44, 0x02, 0xF0, 0xED, 0xC4, 0x54, 0x0F, 0x30,
+ 0xE0, 0x07, 0x90, 0x81, 0xCB, 0xE0, 0x44, 0x04,
+ 0xF0, 0x90, 0x81, 0xCB, 0xE0, 0x90, 0x05, 0x27,
+ 0xF0, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x70,
+ 0x1D, 0xFD, 0x7F, 0x04, 0x12, 0x47, 0x3D, 0x12,
+ 0x51, 0x73, 0xBF, 0x01, 0x09, 0x90, 0x81, 0x29,
+ 0xE0, 0xFF, 0x7D, 0x01, 0x80, 0x03, 0xE4, 0xFD,
+ 0xFF, 0x12, 0x47, 0x3D, 0x80, 0x41, 0x90, 0x81,
+ 0x2A, 0xE0, 0x90, 0x81, 0x23, 0xF0, 0x90, 0x05,
+ 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x80, 0x30, 0x90,
+ 0x81, 0xCB, 0x74, 0x01, 0xF0, 0x90, 0x05, 0x27,
+ 0xF0, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x02, 0x06,
+ 0x7D, 0x01, 0x7F, 0x04, 0x80, 0x0B, 0x90, 0x81,
+ 0x23, 0xE0, 0xB4, 0x08, 0x07, 0x7D, 0x01, 0x7F,
+ 0x0C, 0x12, 0x47, 0x3D, 0xD1, 0x34, 0x90, 0x81,
+ 0x29, 0x12, 0x47, 0x39, 0x12, 0x5A, 0xA7, 0xD0,
+ 0xD0, 0x92, 0xAF, 0x22, 0x7D, 0x02, 0x7F, 0x02,
+ 0x91, 0x26, 0x7D, 0x01, 0x7F, 0x02, 0x74, 0x3D,
+ 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E, 0xFE,
+ 0xF6, 0x74, 0x30, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xEF, 0x70,
+ 0x37, 0x7D, 0x78, 0x7F, 0x02, 0x91, 0x26, 0x7D,
+ 0x02, 0x7F, 0x03, 0x91, 0x26, 0x7D, 0xC8, 0x7F,
+ 0x02, 0x12, 0x71, 0x8F, 0x90, 0x01, 0x57, 0xE4,
+ 0xF0, 0x90, 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x7D,
+ 0x01, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0,
+ 0x90, 0x06, 0x0A, 0xE0, 0x54, 0xF8, 0xF0, 0x22,
+ 0x90, 0x01, 0x36, 0x74, 0x78, 0xF0, 0xA3, 0x74,
+ 0x02, 0xF0, 0x7D, 0x78, 0xFF, 0x51, 0x57, 0x7D,
+ 0x02, 0x7F, 0x03, 0x51, 0x57, 0x90, 0x06, 0x0A,
+ 0xE0, 0x44, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xA3,
+ 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90, 0x80, 0xDE,
+ 0xE0, 0xB4, 0x01, 0x15, 0x90, 0x81, 0x25, 0xE0,
+ 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0x20,
+ 0xE2, 0x0E, 0x7D, 0x01, 0x7F, 0x04, 0x02, 0x47,
+ 0x3D, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04, 0xF0,
+ 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0,
+ 0x08, 0x90, 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60,
+ 0x3A, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x04, 0xEF,
+ 0x30, 0xE0, 0x0A, 0x90, 0x81, 0x2A, 0xE0, 0x64,
+ 0x02, 0x60, 0x28, 0xB1, 0x83, 0x90, 0x81, 0x25,
+ 0xE0, 0x13, 0x13, 0x13, 0x54, 0x1F, 0x30, 0xE0,
+ 0x14, 0x90, 0x81, 0x2D, 0xE0, 0xFF, 0xA3, 0xE0,
+ 0x6F, 0x70, 0x0A, 0xF1, 0xCD, 0x91, 0x1C, 0x90,
+ 0x81, 0x2E, 0xE0, 0x14, 0xF0, 0x90, 0x01, 0xE6,
+ 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x81, 0x1F, 0xE0,
+ 0x30, 0xE0, 0x06, 0x90, 0x81, 0x21, 0x74, 0x01,
+ 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x45, 0x90,
+ 0x81, 0x25, 0xE0, 0xFF, 0x13, 0x13, 0x13, 0x54,
+ 0x1F, 0x30, 0xE0, 0x12, 0x90, 0x01, 0x3B, 0xE0,
+ 0x30, 0xE4, 0x0B, 0x91, 0x1C, 0x90, 0x81, 0x2D,
+ 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0, 0x90, 0x82,
+ 0x0B, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
+ 0xC3, 0x90, 0x82, 0x0C, 0xE0, 0x94, 0x80, 0x90,
+ 0x82, 0x0B, 0xE0, 0x64, 0x80, 0x94, 0x80, 0x40,
+ 0x0B, 0x90, 0x01, 0x98, 0xE0, 0x54, 0xFE, 0xF0,
+ 0xE0, 0x44, 0x01, 0xF0, 0x12, 0x75, 0xF8, 0xD1,
+ 0xD6, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x0C,
+ 0xE4, 0xF5, 0x1D, 0xA3, 0xF1, 0xFB, 0x90, 0x01,
+ 0x57, 0x74, 0x05, 0xF0, 0x90, 0x01, 0xBE, 0xE0,
+ 0x04, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64,
+ 0x01, 0x60, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x27,
+ 0xE0, 0x70, 0x02, 0xC1, 0x23, 0x90, 0x81, 0x26,
+ 0xE0, 0xC4, 0x54, 0x0F, 0x64, 0x01, 0x70, 0x22,
+ 0x90, 0x06, 0xAB, 0xE0, 0x90, 0x81, 0x2E, 0xF0,
+ 0x90, 0x06, 0xAA, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
+ 0xA3, 0xE0, 0xFF, 0x70, 0x08, 0x90, 0x81, 0x2D,
+ 0xE0, 0xFE, 0xFF, 0x80, 0x00, 0x90, 0x81, 0x2E,
+ 0xEF, 0xF0, 0x90, 0x81, 0x25, 0xE0, 0x44, 0x04,
+ 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
+ 0x32, 0xA3, 0xE0, 0x90, 0x05, 0x58, 0xF0, 0x90,
+ 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x02, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x26, 0xE0,
+ 0xFF, 0xC4, 0x54, 0x0F, 0x24, 0xFD, 0x50, 0x02,
+ 0x80, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x30, 0xE0,
+ 0x05, 0x12, 0x6D, 0xF2, 0x80, 0x03, 0x12, 0x6E,
+ 0xC9, 0x90, 0x81, 0x25, 0xE0, 0x13, 0x13, 0x13,
+ 0x54, 0x1F, 0x30, 0xE0, 0x0E, 0x90, 0x81, 0x2D,
+ 0xE0, 0xFF, 0xA3, 0xE0, 0xB5, 0x07, 0x04, 0xF1,
+ 0xCD, 0x91, 0x22, 0x90, 0x81, 0x1F, 0xE0, 0xC3,
+ 0x13, 0x20, 0xE0, 0x07, 0x90, 0x81, 0x25, 0xE0,
+ 0x44, 0x04, 0xF0, 0x22, 0xD1, 0xAB, 0xEF, 0x70,
+ 0x02, 0xD1, 0x3C, 0x22, 0x90, 0x81, 0x27, 0xE0,
+ 0x64, 0x01, 0x70, 0x66, 0x90, 0x81, 0x26, 0xE0,
+ 0x54, 0x0F, 0x60, 0x51, 0x90, 0x81, 0x2A, 0xE0,
+ 0x70, 0x03, 0xFF, 0x31, 0x93, 0x90, 0x81, 0x2A,
+ 0xE0, 0x64, 0x0C, 0x60, 0x03, 0x12, 0x66, 0x26,
+ 0x90, 0x01, 0x5B, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
+ 0x74, 0x04, 0xF0, 0xD1, 0xAB, 0xEF, 0x64, 0x01,
+ 0x60, 0x38, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x90, 0x81, 0x2A,
+ 0xE0, 0x70, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
+ 0x47, 0x3D, 0x22, 0x90, 0x04, 0x1A, 0xE0, 0xF4,
+ 0x60, 0x03, 0x7F, 0x00, 0x22, 0x90, 0x04, 0x1B,
+ 0xE0, 0x54, 0x07, 0x64, 0x07, 0x7F, 0x01, 0x60,
+ 0x02, 0x7F, 0x00, 0x22, 0x12, 0x50, 0x60, 0x90,
+ 0x81, 0x2D, 0xE0, 0x14, 0x90, 0x05, 0x73, 0xF0,
+ 0x7D, 0x02, 0x7F, 0x02, 0x51, 0x57, 0x90, 0x81,
+ 0x42, 0xE0, 0x30, 0xE0, 0x2D, 0x90, 0x80, 0xDE,
+ 0xE0, 0xB4, 0x01, 0x26, 0x90, 0x82, 0x17, 0xE0,
+ 0x04, 0xF0, 0xE0, 0xB4, 0x0A, 0x0B, 0x90, 0x81,
+ 0x44, 0xE0, 0x04, 0xF0, 0xE4, 0x90, 0x82, 0x17,
+ 0xF0, 0x90, 0x81, 0x44, 0xE0, 0xFF, 0x90, 0x81,
+ 0x43, 0xE0, 0xB5, 0x07, 0x05, 0xE4, 0xA3, 0xF0,
+ 0xF1, 0x0B, 0x22, 0xE4, 0xFF, 0x8F, 0x53, 0x90,
+ 0x04, 0x1D, 0xE0, 0x60, 0x19, 0x90, 0x05, 0x22,
+ 0xE0, 0xF5, 0x56, 0x74, 0xFF, 0xF0, 0xF1, 0x3A,
+ 0xBF, 0x01, 0x03, 0x12, 0x74, 0xFB, 0x90, 0x05,
+ 0x22, 0xE5, 0x56, 0xF0, 0x80, 0x03, 0x12, 0x74,
+ 0xFB, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0, 0x7F,
+ 0x01, 0x22, 0xE4, 0x90, 0x82, 0x0F, 0xF0, 0xA3,
+ 0xF0, 0x90, 0x05, 0xF8, 0xE0, 0x70, 0x0F, 0xA3,
+ 0xE0, 0x70, 0x0B, 0xA3, 0xE0, 0x70, 0x07, 0xA3,
+ 0xE0, 0x70, 0x03, 0x7F, 0x01, 0x22, 0xD3, 0x90,
+ 0x82, 0x10, 0xE0, 0x94, 0xE8, 0x90, 0x82, 0x0F,
+ 0xE0, 0x94, 0x03, 0x40, 0x0A, 0x90, 0x01, 0xC0,
+ 0xE0, 0x44, 0x20, 0xF0, 0x7F, 0x00, 0x22, 0x7F,
+ 0x32, 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x90, 0x82,
+ 0x0F, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9,
+ 0x80, 0xBF, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4,
+ 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0x3F, 0xF0,
+ 0xEF, 0x60, 0x1D, 0x74, 0x21, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x10,
+ 0xF0, 0x74, 0x1F, 0x2D, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0x22,
+ 0x74, 0x21, 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC,
+ 0xF5, 0x83, 0xE0, 0x54, 0xEF, 0xF0, 0x74, 0x1F,
+ 0x2D, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
+ 0xE0, 0x44, 0x40, 0xF0, 0x22, 0xEF, 0x14, 0x90,
+ 0x05, 0x73, 0xF0, 0x90, 0x01, 0x3F, 0x74, 0x10,
+ 0xF0, 0xFD, 0x7F, 0x03, 0x74, 0x45, 0x2F, 0xF8,
+ 0xE6, 0x4D, 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5,
+ 0x82, 0xE4, 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0,
+ 0x22, 0xE0, 0x44, 0x02, 0xF0, 0xE4, 0xF5, 0x1D,
+ 0x90, 0x81, 0x39, 0xE0, 0xF5, 0x1E, 0xE4, 0xFB,
+ 0xFD, 0x7F, 0x54, 0x7E, 0x01, 0x8E, 0x19, 0x8F,
+ 0x1A, 0xE5, 0x1E, 0x54, 0x07, 0xC4, 0x33, 0x54,
+ 0xE0, 0x85, 0x19, 0x83, 0x85, 0x1A, 0x82, 0xF0,
+ 0xE5, 0x1D, 0x54, 0x07, 0xC4, 0x33, 0x54, 0xE0,
+ 0xFF, 0xE5, 0x1E, 0x13, 0x13, 0x13, 0x54, 0x1F,
+ 0x4F, 0xA3, 0xF0, 0xEB, 0x54, 0x07, 0xC4, 0x33,
+ 0x54, 0xE0, 0xFF, 0xE5, 0x1D, 0x13, 0x13, 0x13,
+ 0x54, 0x1F, 0x4F, 0x85, 0x1A, 0x82, 0x85, 0x19,
+ 0x83, 0xA3, 0xA3, 0xF0, 0xBD, 0x01, 0x0C, 0x85,
+ 0x1A, 0x82, 0x8E, 0x83, 0xA3, 0xA3, 0xA3, 0x74,
+ 0x03, 0xF0, 0x22, 0x85, 0x1A, 0x82, 0x85, 0x19,
+ 0x83, 0xA3, 0xA3, 0xA3, 0x74, 0x01, 0xF0, 0x22,
+ 0xE4, 0x90, 0x81, 0x4D, 0xF0, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x58, 0x90, 0x80, 0xDE, 0xE0, 0x64,
+ 0x01, 0x70, 0x50, 0x90, 0x81, 0x4D, 0x04, 0xF0,
+ 0xE4, 0x90, 0x81, 0x2E, 0xF0, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
+ 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4D, 0xF0,
+ 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4D,
+ 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x60, 0x24, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0xE4, 0xF5,
+ 0x1D, 0x90, 0x81, 0x2F, 0x12, 0x4F, 0xFB, 0x90,
+ 0x01, 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A,
+ 0xE0, 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04,
+ 0x12, 0x47, 0x3D, 0x22, 0xE4, 0x90, 0x81, 0x4C,
+ 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x70, 0x02, 0x21,
+ 0x72, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x60,
+ 0x02, 0x21, 0x72, 0x90, 0x81, 0x26, 0xE0, 0xFF,
+ 0xC4, 0x54, 0x0F, 0x60, 0x22, 0x24, 0xFE, 0x60,
+ 0x03, 0x04, 0x70, 0x21, 0x90, 0x81, 0x2E, 0xE0,
+ 0x14, 0xF0, 0xE0, 0xFF, 0x60, 0x06, 0x90, 0x81,
+ 0x30, 0xE0, 0x60, 0x11, 0xEF, 0x70, 0x08, 0x90,
+ 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x80, 0x00, 0x90,
+ 0x81, 0x4C, 0x74, 0x01, 0xF0, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x15, 0x90, 0x81, 0x23, 0xE0,
+ 0xB4, 0x02, 0x05, 0xE4, 0x90, 0x81, 0x4C, 0xF0,
+ 0x31, 0x73, 0xEF, 0x70, 0x04, 0x90, 0x81, 0x4C,
+ 0xF0, 0x90, 0x81, 0x4C, 0xE0, 0x60, 0x43, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81,
+ 0x30, 0xE0, 0x60, 0x03, 0xB4, 0x01, 0x09, 0xE4,
+ 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x80, 0x0D,
+ 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x30, 0xE0, 0x75,
+ 0xF0, 0x03, 0xA4, 0x24, 0xFE, 0xFF, 0x90, 0x81,
+ 0x2F, 0xE0, 0x2F, 0x12, 0x4F, 0xFC, 0x90, 0x01,
+ 0x57, 0x74, 0x05, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
+ 0x20, 0xE2, 0x07, 0x7D, 0x01, 0x7F, 0x04, 0x12,
+ 0x47, 0x3D, 0x22, 0x90, 0x05, 0x43, 0xE0, 0x7F,
+ 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0x22, 0x90,
+ 0x81, 0x27, 0xE0, 0x70, 0x07, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x11, 0x90, 0x81, 0x1F, 0xE0,
+ 0x30, 0xE0, 0x07, 0x31, 0x73, 0xBF, 0x01, 0x05,
+ 0x41, 0x5B, 0x12, 0x4E, 0x3C, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x1E,
+ 0xE0, 0xB4, 0x01, 0x04, 0x7F, 0x04, 0x80, 0x0B,
+ 0x31, 0x73, 0xBF, 0x01, 0x04, 0x7F, 0x01, 0x80,
+ 0x02, 0x7F, 0x02, 0x71, 0x0E, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x90, 0x81, 0x4B, 0xE0, 0x60, 0x0F,
+ 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x02,
+ 0xF0, 0x90, 0x05, 0xFC, 0xE0, 0x04, 0xF0, 0x90,
+ 0x81, 0x1F, 0xE0, 0x30, 0xE0, 0x10, 0xA3, 0x74,
+ 0x01, 0xF0, 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0xC3,
+ 0x13, 0x30, 0xE0, 0x02, 0x31, 0x9E, 0x11, 0xC4,
+ 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0, 0x07, 0x91,
+ 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x22, 0x90,
+ 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3D, 0x90,
+ 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02, 0x02,
+ 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D, 0x00,
+ 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E, 0x70,
+ 0x23, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02, 0x21,
+ 0x9E, 0x51, 0x45, 0x90, 0x81, 0x23, 0xE0, 0xB4,
+ 0x08, 0x06, 0xE4, 0xFD, 0x7F, 0x0C, 0x80, 0x09,
+ 0x90, 0x81, 0x23, 0xE0, 0x70, 0x06, 0xFD, 0x7F,
+ 0x04, 0x12, 0x47, 0x3D, 0x22, 0x90, 0x81, 0x1E,
+ 0xE0, 0xB4, 0x01, 0x0F, 0x90, 0x81, 0x23, 0xE0,
+ 0x64, 0x02, 0x60, 0x07, 0x7D, 0x01, 0x7F, 0x02,
+ 0x12, 0x47, 0x3D, 0x90, 0x81, 0x27, 0xE0, 0x64,
+ 0x02, 0x60, 0x14, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0x0F, 0x60, 0x0C, 0x12, 0x4E, 0xAB, 0xEF, 0x70,
+ 0x06, 0xFD, 0x7F, 0x0C, 0x12, 0x47, 0x3D, 0x22,
+ 0x90, 0x81, 0x1F, 0xE0, 0xFF, 0x30, 0xE0, 0x3F,
+ 0x90, 0x81, 0x23, 0xE0, 0x7E, 0x00, 0xB4, 0x02,
+ 0x02, 0x7E, 0x01, 0x90, 0x81, 0x22, 0xE0, 0x7D,
+ 0x00, 0xB4, 0x04, 0x02, 0x7D, 0x01, 0xED, 0x4E,
+ 0x70, 0x25, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x02,
+ 0x21, 0x9E, 0x12, 0x74, 0xAC, 0x90, 0x81, 0x23,
+ 0xE0, 0xB4, 0x0C, 0x06, 0xE4, 0xFD, 0x7F, 0x08,
+ 0x80, 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xB4, 0x04,
+ 0x06, 0xE4, 0xFD, 0xFF, 0x12, 0x47, 0x3D, 0x22,
+ 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
+ 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x1F, 0xA4,
+ 0xFF, 0x90, 0x81, 0x1E, 0xF0, 0xBF, 0x01, 0x12,
+ 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
+ 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01, 0x60, 0x21,
+ 0x80, 0x1D, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
+ 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x64, 0x01,
+ 0x60, 0x0F, 0x90, 0x81, 0x1F, 0xE0, 0x20, 0xE0,
+ 0x06, 0xE4, 0xFF, 0x71, 0x0E, 0x80, 0x02, 0x31,
+ 0x9E, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0x22,
+ 0xE0, 0x90, 0x82, 0x16, 0xF0, 0x6F, 0x70, 0x02,
+ 0x81, 0x04, 0xEF, 0x14, 0x60, 0x3E, 0x14, 0x60,
+ 0x62, 0x14, 0x70, 0x02, 0x61, 0xB8, 0x14, 0x70,
+ 0x02, 0x61, 0xDF, 0x24, 0x04, 0x60, 0x02, 0x81,
+ 0x04, 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04,
+ 0x04, 0x91, 0x41, 0x81, 0x04, 0xEF, 0xB4, 0x02,
+ 0x04, 0x91, 0x50, 0x81, 0x04, 0x90, 0x82, 0x16,
+ 0xE0, 0xFF, 0xB4, 0x03, 0x04, 0x91, 0x54, 0x81,
+ 0x04, 0xEF, 0x64, 0x01, 0x60, 0x02, 0x81, 0x04,
+ 0x91, 0x43, 0x81, 0x04, 0x90, 0x82, 0x16, 0xE0,
+ 0xFF, 0xB4, 0x04, 0x04, 0x91, 0xF3, 0x81, 0x04,
+ 0xEF, 0xB4, 0x02, 0x04, 0x91, 0x58, 0x81, 0x04,
+ 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x04,
+ 0x91, 0xE8, 0x81, 0x04, 0xEF, 0x70, 0x7D, 0x91,
+ 0x2B, 0x80, 0x79, 0x90, 0x82, 0x16, 0xE0, 0xB4,
+ 0x04, 0x05, 0x12, 0x74, 0x60, 0x80, 0x6D, 0x90,
+ 0x82, 0x16, 0xE0, 0xB4, 0x01, 0x04, 0x91, 0x21,
+ 0x80, 0x62, 0x90, 0x82, 0x16, 0xE0, 0xB4, 0x03,
+ 0x05, 0x12, 0x74, 0x71, 0x80, 0x56, 0x90, 0x82,
+ 0x16, 0xE0, 0x70, 0x50, 0x91, 0x1F, 0x80, 0x4C,
+ 0x90, 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x04, 0x05,
+ 0x12, 0x74, 0x4C, 0x80, 0x3F, 0xEF, 0xB4, 0x01,
+ 0x04, 0x91, 0x34, 0x80, 0x37, 0xEF, 0xB4, 0x02,
+ 0x04, 0x91, 0xDF, 0x80, 0x2F, 0x90, 0x82, 0x16,
+ 0xE0, 0x70, 0x29, 0x91, 0x32, 0x80, 0x25, 0x90,
+ 0x82, 0x16, 0xE0, 0xFF, 0xB4, 0x03, 0x05, 0x12,
+ 0x74, 0x7B, 0x80, 0x18, 0xEF, 0xB4, 0x01, 0x04,
+ 0x91, 0x0B, 0x80, 0x10, 0xEF, 0xB4, 0x02, 0x04,
+ 0xB1, 0x06, 0x80, 0x08, 0x90, 0x82, 0x16, 0xE0,
+ 0x70, 0x02, 0x91, 0x09, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0x91,
+ 0x2B, 0x12, 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74,
+ 0x02, 0xF0, 0x22, 0x90, 0x81, 0x22, 0x74, 0x01,
+ 0xF0, 0x22, 0x91, 0x2B, 0x90, 0x05, 0x22, 0x74,
+ 0xFF, 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0,
+ 0x22, 0x91, 0xF3, 0x90, 0x05, 0x27, 0xE0, 0x54,
+ 0xBF, 0xF0, 0xE4, 0x90, 0x81, 0x22, 0xF0, 0x22,
+ 0x91, 0x58, 0x80, 0xEF, 0x91, 0xE8, 0x80, 0xEB,
+ 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90,
+ 0x81, 0x22, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF,
+ 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x01, 0x01, 0xE0,
+ 0x44, 0x02, 0xF0, 0x90, 0x01, 0x00, 0x74, 0xFF,
+ 0xF0, 0x90, 0x06, 0xB7, 0x74, 0x09, 0xF0, 0x90,
+ 0x06, 0xB4, 0x74, 0x86, 0xF0, 0x7F, 0x7C, 0x7E,
+ 0x08, 0x12, 0x2D, 0x5C, 0xEC, 0x54, 0x7F, 0xFC,
+ 0x90, 0x82, 0x01, 0x12, 0x20, 0xCE, 0x90, 0x82,
+ 0x01, 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12,
+ 0x20, 0xCE, 0x7F, 0x7C, 0x7E, 0x08, 0x12, 0x2E,
+ 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA, 0xCC,
+ 0xC0, 0x00, 0xC0, 0x7F, 0x8C, 0x7E, 0x08, 0x12,
+ 0x2E, 0xA2, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xDA,
+ 0x00, 0xC0, 0x00, 0x14, 0x7F, 0x70, 0x7E, 0x0E,
+ 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xF9, 0x12, 0x20,
+ 0xDA, 0x00, 0x03, 0x3E, 0x60, 0xE4, 0xFD, 0xFF,
+ 0xB1, 0x1C, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x91,
+ 0x65, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
+ 0x90, 0x05, 0x22, 0xE4, 0xF0, 0x90, 0x81, 0x22,
+ 0x04, 0xF0, 0x22, 0x90, 0x05, 0x22, 0xE4, 0xF0,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x90,
+ 0x81, 0x22, 0x74, 0x01, 0xF0, 0x22, 0x91, 0x65,
+ 0x90, 0x05, 0x22, 0x74, 0x6F, 0xF0, 0x90, 0x05,
+ 0x27, 0xE0, 0x54, 0xBF, 0xF0, 0x90, 0x81, 0x22,
+ 0x74, 0x04, 0xF0, 0x22, 0xD3, 0x10, 0xAF, 0x01,
+ 0xC3, 0xC0, 0xD0, 0xC0, 0x07, 0xC0, 0x05, 0x90,
+ 0x81, 0xF9, 0x12, 0x44, 0xD9, 0x90, 0x81, 0xE5,
+ 0x12, 0x20, 0xCE, 0xD0, 0x05, 0xD0, 0x07, 0x12,
+ 0x60, 0xF5, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x1F, 0xEF, 0x12, 0x45,
+ 0x28, 0x55, 0x71, 0x00, 0x55, 0x7A, 0x01, 0x55,
+ 0x83, 0x02, 0x55, 0x8B, 0x03, 0x55, 0x94, 0x04,
+ 0x55, 0x9C, 0x20, 0x55, 0xA4, 0x21, 0x55, 0xAD,
+ 0x23, 0x55, 0xB5, 0x24, 0x55, 0xBE, 0x25, 0x55,
+ 0xC7, 0x26, 0x55, 0xCF, 0xC0, 0x00, 0x00, 0x55,
+ 0xD8, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02,
+ 0x6A, 0xB0, 0x90, 0x81, 0xC8, 0x12, 0x45, 0x16,
+ 0x02, 0x65, 0x81, 0x90, 0x81, 0xC8, 0x12, 0x45,
+ 0x16, 0x41, 0xC0, 0x90, 0x81, 0xC8, 0x12, 0x45,
+ 0x16, 0x02, 0x75, 0xD8, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0x80, 0x44, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0xC1, 0x4B, 0x90, 0x81, 0xC8, 0x12,
+ 0x45, 0x16, 0x02, 0x6A, 0xF8, 0x90, 0x81, 0xC8,
+ 0x12, 0x45, 0x16, 0xE1, 0xE1, 0x90, 0x81, 0xC8,
+ 0x12, 0x45, 0x16, 0x02, 0x4A, 0x6C, 0x90, 0x81,
+ 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x3E, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x16, 0x80, 0x3E, 0x90,
+ 0x81, 0xC8, 0x12, 0x45, 0x16, 0x02, 0x6B, 0x4E,
+ 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x01, 0xF0, 0x22,
+ 0x12, 0x5A, 0x4B, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
+ 0x01, 0xFE, 0x90, 0x81, 0x45, 0xE0, 0x54, 0xFE,
+ 0x4E, 0xF0, 0xEF, 0xC3, 0x13, 0x30, 0xE0, 0x14,
+ 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90, 0x81,
+ 0x46, 0xF0, 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD,
+ 0x90, 0x81, 0x47, 0xF0, 0x22, 0x12, 0x1F, 0xA4,
+ 0xFF, 0x54, 0x01, 0xFE, 0x90, 0x81, 0x3F, 0xE0,
+ 0x54, 0xFE, 0x4E, 0xF0, 0x90, 0x00, 0x01, 0x12,
+ 0x1F, 0xBD, 0xFE, 0x90, 0x05, 0x54, 0xE0, 0xC3,
+ 0x9E, 0x90, 0x81, 0x40, 0xF0, 0xEF, 0x20, 0xE0,
+ 0x07, 0x91, 0x65, 0x90, 0x05, 0x22, 0xE4, 0xF0,
+ 0x90, 0x81, 0x3F, 0xE0, 0x54, 0x01, 0x90, 0x01,
+ 0xBC, 0xF0, 0x90, 0x81, 0x40, 0xE0, 0x90, 0x01,
+ 0xBD, 0xF0, 0x22, 0x12, 0x1F, 0xA4, 0xFF, 0x54,
+ 0x7F, 0x90, 0x81, 0x27, 0xF0, 0xEF, 0xC4, 0x13,
+ 0x13, 0x13, 0x54, 0x01, 0xA3, 0xF0, 0x90, 0x00,
+ 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0x54, 0xF0, 0xC4,
+ 0x54, 0x0F, 0xFE, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0xF0, 0x4E, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
+ 0xBD, 0x54, 0x01, 0x25, 0xE0, 0xFE, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xFD, 0x4E, 0xF0, 0xEF, 0x54,
+ 0x0F, 0xC4, 0x54, 0xF0, 0xFF, 0x90, 0x81, 0x26,
+ 0xE0, 0x54, 0x0F, 0x4F, 0xF0, 0x90, 0x00, 0x04,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x29, 0xF0, 0xD1,
+ 0xC6, 0x90, 0x01, 0xB9, 0x74, 0x01, 0xF0, 0x90,
+ 0x01, 0xB8, 0xF0, 0x90, 0x81, 0x27, 0xE0, 0x90,
+ 0x01, 0xBA, 0xF0, 0x90, 0x81, 0x29, 0xE0, 0x90,
+ 0x01, 0xBB, 0xF0, 0x90, 0x81, 0x26, 0xE0, 0x54,
+ 0x0F, 0x90, 0x01, 0xBE, 0xF0, 0x22, 0x90, 0x81,
+ 0xCB, 0x12, 0x45, 0x1F, 0x12, 0x72, 0xB3, 0x90,
+ 0x81, 0x27, 0xE0, 0xFF, 0x12, 0x4C, 0x3E, 0x90,
+ 0x81, 0x27, 0xE0, 0x60, 0x19, 0x90, 0x81, 0xCB,
+ 0x12, 0x45, 0x16, 0x90, 0x00, 0x01, 0x12, 0x1F,
+ 0xBD, 0x54, 0x0F, 0xFF, 0x90, 0x00, 0x02, 0x12,
+ 0x1F, 0xBD, 0xFD, 0x12, 0x72, 0xC4, 0x22, 0xC0,
+ 0xE0, 0xC0, 0xF0, 0xC0, 0x83, 0xC0, 0x82, 0xC0,
+ 0xD0, 0x75, 0xD0, 0x00, 0xC0, 0x00, 0xC0, 0x01,
+ 0xC0, 0x02, 0xC0, 0x03, 0xC0, 0x04, 0xC0, 0x05,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x01, 0xC4, 0x74,
+ 0xF7, 0xF0, 0x74, 0x56, 0xA3, 0xF0, 0x12, 0x6C,
+ 0xA5, 0xE5, 0x49, 0x30, 0xE1, 0x03, 0x12, 0x6F,
+ 0x79, 0xE5, 0x49, 0x30, 0xE2, 0x02, 0xF1, 0xA5,
+ 0xE5, 0x49, 0x30, 0xE3, 0x03, 0x12, 0x6F, 0x8D,
+ 0xE5, 0x4A, 0x30, 0xE0, 0x03, 0x12, 0x6F, 0xC9,
+ 0xE5, 0x4A, 0x30, 0xE4, 0x03, 0x12, 0x70, 0x22,
+ 0xE5, 0x4B, 0x30, 0xE1, 0x02, 0x51, 0x78, 0xE5,
+ 0x4B, 0x30, 0xE0, 0x02, 0x31, 0xFF, 0xE5, 0x4B,
+ 0x30, 0xE3, 0x02, 0xF1, 0xE0, 0xE5, 0x4C, 0x30,
+ 0xE1, 0x05, 0x7F, 0x03, 0x12, 0x44, 0x27, 0xE5,
+ 0x4C, 0x30, 0xE4, 0x03, 0x12, 0x4E, 0xC4, 0xE5,
+ 0x4C, 0x30, 0xE5, 0x03, 0x12, 0x70, 0x38, 0xE5,
+ 0x4C, 0x30, 0xE6, 0x03, 0x12, 0x70, 0xCE, 0x74,
+ 0xF7, 0x04, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x56,
+ 0xA3, 0xF0, 0xD0, 0x07, 0xD0, 0x06, 0xD0, 0x05,
+ 0xD0, 0x04, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01,
+ 0xD0, 0x00, 0xD0, 0xD0, 0xD0, 0x82, 0xD0, 0x83,
+ 0xD0, 0xF0, 0xD0, 0xE0, 0x32, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x34, 0x90, 0x06, 0x92, 0xE0, 0x30,
+ 0xE0, 0x23, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x58, 0x7E, 0x01, 0x11, 0x05,
+ 0x90, 0x01, 0x5B, 0x74, 0x05, 0xF0, 0x90, 0x06,
+ 0x92, 0x74, 0x01, 0xF0, 0x22, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xF7, 0xF0, 0x12, 0x47, 0x2A, 0x22,
+ 0x22, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x31, 0xF0,
+ 0x22, 0x90, 0x01, 0xC8, 0xE4, 0xF0, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x51,
+ 0x7F, 0xFF, 0xFE, 0x12, 0x2B, 0x27, 0xBF, 0x01,
+ 0x09, 0x90, 0x81, 0x51, 0xE0, 0x64, 0x03, 0x60,
+ 0x03, 0x22, 0x01, 0xAB, 0xE4, 0x90, 0x81, 0x56,
+ 0xF0, 0x90, 0x81, 0x56, 0xE0, 0xFF, 0xC3, 0x94,
+ 0x02, 0x40, 0x02, 0x01, 0xE6, 0xC3, 0x74, 0xFE,
+ 0x9F, 0xFF, 0xE4, 0x94, 0x00, 0xFE, 0x7B, 0x01,
+ 0x7A, 0x81, 0x79, 0x52, 0x12, 0x2B, 0x27, 0xEF,
+ 0x64, 0x01, 0x70, 0x77, 0x90, 0x81, 0x52, 0xE0,
+ 0xFF, 0x54, 0xC0, 0xFE, 0x60, 0x05, 0xEF, 0x54,
+ 0x0C, 0x70, 0x16, 0x90, 0x81, 0x52, 0xE0, 0xFF,
+ 0x54, 0x30, 0x60, 0x67, 0xEF, 0x54, 0x03, 0x60,
+ 0x62, 0x90, 0x81, 0x53, 0x74, 0x01, 0xF0, 0x80,
+ 0x05, 0xE4, 0x90, 0x81, 0x53, 0xF0, 0x90, 0x81,
+ 0x53, 0xE0, 0x90, 0x81, 0x52, 0x70, 0x16, 0xE0,
+ 0xFF, 0xEE, 0x13, 0x13, 0x54, 0x3F, 0x90, 0x81,
+ 0x54, 0xF0, 0xEF, 0x54, 0x0C, 0x13, 0x13, 0x54,
+ 0x3F, 0xA3, 0xF0, 0x80, 0x0D, 0xE0, 0xFE, 0x54,
+ 0x30, 0x90, 0x81, 0x54, 0xF0, 0xEE, 0x54, 0x03,
+ 0xA3, 0xF0, 0x90, 0x81, 0x54, 0xE0, 0x64, 0x30,
+ 0x70, 0x54, 0xA3, 0xE0, 0x64, 0x02, 0x70, 0x4E,
+ 0x90, 0x00, 0xF5, 0xE0, 0x54, 0x40, 0x90, 0x81,
+ 0x57, 0xF0, 0xE0, 0x70, 0x41, 0xA3, 0x74, 0x02,
+ 0xF0, 0x80, 0x10, 0x90, 0x81, 0x58, 0x74, 0x01,
+ 0xF0, 0x80, 0x08, 0x90, 0x81, 0x56, 0xE0, 0x04,
+ 0xF0, 0x01, 0x11, 0x90, 0x01, 0xC4, 0x74, 0xE9,
+ 0xF0, 0x74, 0x57, 0xA3, 0xF0, 0x90, 0x81, 0x58,
+ 0xE0, 0x90, 0x01, 0xC8, 0xF0, 0x90, 0x81, 0x52,
+ 0xE0, 0x90, 0x01, 0xC9, 0xF0, 0x90, 0x81, 0x53,
+ 0xE0, 0x90, 0x01, 0xCA, 0xF0, 0xE4, 0xFD, 0x7F,
+ 0x1F, 0x12, 0x32, 0x1E, 0x80, 0xD5, 0x22, 0x90,
+ 0x00, 0xF7, 0xE0, 0x20, 0xE7, 0x09, 0xE0, 0x7F,
+ 0x01, 0x20, 0xE6, 0x0C, 0x7F, 0x02, 0x22, 0x90,
+ 0x00, 0xF7, 0xE0, 0x30, 0xE6, 0x02, 0x7F, 0x03,
+ 0x22, 0x11, 0xE7, 0x90, 0x80, 0x3C, 0xEF, 0xF0,
+ 0x31, 0x13, 0x90, 0x01, 0x64, 0x74, 0x01, 0xF0,
+ 0x02, 0x2D, 0xA7, 0x31, 0x81, 0x31, 0xB1, 0x31,
+ 0x40, 0x31, 0x5F, 0xE4, 0xF5, 0x35, 0xF5, 0x36,
+ 0xF5, 0x37, 0xF5, 0x38, 0xAD, 0x35, 0x7F, 0x50,
+ 0x12, 0x32, 0x1E, 0xAD, 0x36, 0x7F, 0x51, 0x12,
+ 0x32, 0x1E, 0xAD, 0x37, 0x7F, 0x52, 0x12, 0x32,
+ 0x1E, 0xAD, 0x38, 0x7F, 0x53, 0x02, 0x32, 0x1E,
+ 0x75, 0x3D, 0x10, 0xE4, 0xF5, 0x3E, 0x75, 0x3F,
+ 0x07, 0x75, 0x40, 0x02, 0x90, 0x01, 0x30, 0xE5,
+ 0x3D, 0xF0, 0xA3, 0xE5, 0x3E, 0xF0, 0xA3, 0xE5,
+ 0x3F, 0xF0, 0xA3, 0xE5, 0x40, 0xF0, 0x22, 0x75,
+ 0x45, 0x0E, 0x75, 0x46, 0x01, 0x43, 0x46, 0x10,
+ 0x75, 0x47, 0x03, 0x75, 0x48, 0x62, 0x90, 0x01,
+ 0x38, 0xE5, 0x45, 0xF0, 0xA3, 0xE5, 0x46, 0xF0,
+ 0xA3, 0xE5, 0x47, 0xF0, 0xA3, 0xE5, 0x48, 0xF0,
+ 0x22, 0x90, 0x01, 0x30, 0xE4, 0xF0, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x38, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD, 0x7F,
+ 0x50, 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x51,
+ 0x12, 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x52, 0x12,
+ 0x32, 0x1E, 0xE4, 0xFD, 0x7F, 0x53, 0x02, 0x32,
+ 0x1E, 0x90, 0x01, 0x34, 0x74, 0xFF, 0xF0, 0xA3,
+ 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x3C,
+ 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0xF0, 0xFD,
+ 0x7F, 0x54, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F,
+ 0x55, 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x56,
+ 0x12, 0x32, 0x1E, 0x7D, 0xFF, 0x7F, 0x57, 0x02,
+ 0x32, 0x1E, 0x90, 0x00, 0x80, 0xE0, 0x44, 0x80,
+ 0xFD, 0x7F, 0x80, 0x12, 0x32, 0x1E, 0x90, 0xFD,
+ 0x00, 0xE0, 0x54, 0xBF, 0xF0, 0x12, 0x57, 0xE9,
+ 0x51, 0x77, 0x12, 0x32, 0x77, 0x51, 0xC9, 0x51,
+ 0x5E, 0x7F, 0x01, 0x12, 0x43, 0x15, 0x90, 0x81,
+ 0x41, 0x74, 0x02, 0xF0, 0xFF, 0x12, 0x43, 0x15,
+ 0x90, 0x81, 0x41, 0xE0, 0x04, 0xF0, 0x7F, 0x03,
+ 0x12, 0x43, 0x15, 0x90, 0x81, 0x41, 0xE0, 0x04,
+ 0xF0, 0x31, 0x01, 0x51, 0x3F, 0x90, 0x00, 0x80,
+ 0xE0, 0x44, 0x40, 0xFD, 0x7F, 0x80, 0x12, 0x32,
+ 0x1E, 0x75, 0x20, 0xFF, 0x51, 0x68, 0x51, 0xF9,
+ 0x51, 0x7F, 0xE4, 0xFF, 0x02, 0x43, 0x9E, 0x51,
+ 0x62, 0x51, 0x6F, 0x51, 0xA7, 0x71, 0x4F, 0x51,
+ 0x8A, 0x51, 0x95, 0x90, 0x81, 0x45, 0xE0, 0x54,
+ 0xFE, 0xF0, 0xA3, 0x74, 0x03, 0xF0, 0xA3, 0xF0,
+ 0xE4, 0xA3, 0xF0, 0xA3, 0xF0, 0x22, 0xE4, 0xF5,
+ 0x4D, 0x22, 0xE4, 0x90, 0x80, 0xDE, 0xF0, 0x22,
+ 0x75, 0xE8, 0x03, 0x75, 0xA8, 0x84, 0x22, 0xE4,
+ 0x90, 0x80, 0xD8, 0xF0, 0xA3, 0xF0, 0x22, 0x90,
+ 0x01, 0x94, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90,
+ 0x01, 0xE4, 0x74, 0x0B, 0xF0, 0xA3, 0x74, 0x01,
+ 0xF0, 0x22, 0x90, 0x81, 0x3F, 0xE0, 0x54, 0xFE,
+ 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90, 0x81, 0x42,
+ 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0x7F, 0xF0, 0xA3,
+ 0x74, 0x0A, 0xF0, 0xE4, 0xA3, 0xF0, 0x22, 0x90,
+ 0x81, 0x1F, 0xE0, 0x54, 0xFE, 0xF0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
+ 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0xE4, 0xA3, 0xF0,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0xA3, 0x74, 0x0C, 0xF0,
+ 0x22, 0x90, 0x01, 0x01, 0xE0, 0x44, 0x04, 0xF0,
+ 0x90, 0x01, 0x9C, 0x74, 0x7E, 0xF0, 0xA3, 0x74,
+ 0x92, 0xF0, 0xA3, 0x74, 0xA0, 0xF0, 0xA3, 0x74,
+ 0x24, 0xF0, 0x90, 0x01, 0x9B, 0x74, 0x49, 0xF0,
+ 0x90, 0x01, 0x9A, 0x74, 0xE0, 0xF0, 0x90, 0x01,
+ 0x99, 0xE4, 0xF0, 0x90, 0x01, 0x98, 0x04, 0xF0,
+ 0x22, 0xE4, 0x90, 0x81, 0x51, 0xF0, 0xA3, 0xF0,
+ 0x90, 0x01, 0x98, 0xE0, 0x7F, 0x00, 0x30, 0xE4,
+ 0x02, 0x7F, 0x01, 0xEF, 0x64, 0x01, 0x60, 0x3E,
+ 0xC3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x88, 0x90,
+ 0x81, 0x51, 0xE0, 0x94, 0x13, 0x40, 0x08, 0x90,
+ 0x01, 0xC1, 0xE0, 0x44, 0x10, 0xF0, 0x22, 0x90,
+ 0x81, 0x51, 0xE4, 0x75, 0xF0, 0x01, 0x12, 0x44,
+ 0xA9, 0x7F, 0x14, 0x7E, 0x00, 0x12, 0x32, 0xAA,
+ 0xD3, 0x90, 0x81, 0x52, 0xE0, 0x94, 0x32, 0x90,
+ 0x81, 0x51, 0xE0, 0x94, 0x00, 0x40, 0xB9, 0x90,
+ 0x01, 0xC6, 0xE0, 0x30, 0xE3, 0xB2, 0x22, 0xE4,
+ 0x90, 0x81, 0x27, 0xF0, 0xA3, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0xF0, 0x54, 0xF0, 0xF0,
+ 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFD, 0xF0, 0x54,
+ 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x2D,
+ 0x74, 0x01, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xFB, 0xF0, 0xA3, 0xE0, 0x54, 0xFB,
+ 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90, 0x81,
+ 0x2F, 0x74, 0x07, 0xF0, 0x90, 0x81, 0x32, 0xE4,
+ 0xF0, 0xA3, 0x74, 0x02, 0xF0, 0xE4, 0x90, 0x81,
+ 0x2B, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x54, 0xFE,
+ 0xF0, 0x90, 0x81, 0x29, 0x74, 0x0C, 0xF0, 0x90,
+ 0x81, 0x24, 0xE0, 0x54, 0xDF, 0xF0, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x24, 0xE0,
+ 0x54, 0xBF, 0xF0, 0x54, 0x7F, 0xF0, 0xA3, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x54, 0xF7,
+ 0xF0, 0x90, 0x81, 0x34, 0x12, 0x20, 0xDA, 0x00,
+ 0x00, 0x00, 0x00, 0x90, 0x80, 0x3C, 0xE0, 0xB4,
+ 0x01, 0x08, 0x90, 0x81, 0x31, 0x74, 0x99, 0xF0,
+ 0x80, 0x12, 0x90, 0x80, 0x3C, 0xE0, 0x90, 0x81,
+ 0x31, 0xB4, 0x03, 0x05, 0x74, 0x90, 0xF0, 0x80,
+ 0x03, 0x74, 0x40, 0xF0, 0x90, 0x81, 0x38, 0x74,
+ 0x01, 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0xA3, 0xE0,
+ 0x54, 0x01, 0x44, 0x28, 0xF0, 0xA3, 0x74, 0x05,
+ 0xF0, 0xE4, 0xA3, 0xF0, 0xA3, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x54, 0xFB, 0xF0, 0x54, 0xF7, 0xF0, 0x54,
+ 0xEF, 0xF0, 0x54, 0xDF, 0xF0, 0x54, 0xBF, 0xF0,
+ 0xE4, 0xA3, 0xF0, 0x22, 0xE4, 0x90, 0x81, 0x59,
+ 0xF0, 0x90, 0x81, 0x59, 0xE0, 0x64, 0x01, 0xF0,
+ 0x24, 0x24, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5C,
+ 0xA3, 0xF0, 0x90, 0x81, 0x2A, 0xE0, 0xFF, 0x90,
+ 0x81, 0x29, 0xE0, 0x6F, 0x60, 0x03, 0x12, 0x47,
+ 0x2A, 0xD1, 0x08, 0xBF, 0x01, 0x02, 0x91, 0x5F,
+ 0xB1, 0xF2, 0x12, 0x32, 0x9E, 0xBF, 0x01, 0x02,
+ 0xB1, 0x67, 0x12, 0x42, 0x4D, 0x80, 0xCA, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81,
+ 0x24, 0xE0, 0x30, 0xE0, 0x24, 0x90, 0x81, 0x1F,
+ 0xE0, 0xFF, 0x30, 0xE0, 0x1A, 0xC3, 0x13, 0x30,
+ 0xE0, 0x07, 0xB1, 0xFB, 0xBF, 0x01, 0x12, 0x80,
+ 0x0A, 0x90, 0x81, 0x23, 0xE0, 0xFF, 0x60, 0x03,
+ 0xB4, 0x08, 0x06, 0x91, 0x96, 0x80, 0x02, 0x91,
+ 0xA6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0xB1, 0x22, 0x91,
+ 0xBA, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x90, 0x81,
+ 0x2A, 0xE0, 0x70, 0x0D, 0xD1, 0x2F, 0xBF, 0x01,
+ 0x08, 0x91, 0x96, 0x90, 0x01, 0xE5, 0xE0, 0x04,
+ 0xF0, 0x22, 0xB1, 0xF3, 0x90, 0x00, 0x08, 0xE0,
+ 0x54, 0xEF, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
+ 0xE4, 0xFF, 0x8F, 0x50, 0xE4, 0x90, 0x81, 0x5A,
+ 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0x09, 0xE0, 0x7F,
+ 0x00, 0x30, 0xE7, 0x02, 0x7F, 0x01, 0xEF, 0x65,
+ 0x50, 0x60, 0x3E, 0xC3, 0x90, 0x81, 0x5B, 0xE0,
+ 0x94, 0x88, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x13,
+ 0x40, 0x08, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x10,
+ 0xF0, 0x22, 0x90, 0x81, 0x5A, 0xE4, 0x75, 0xF0,
+ 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x14, 0x7E, 0x00,
+ 0x12, 0x32, 0xAA, 0xD3, 0x90, 0x81, 0x5B, 0xE0,
+ 0x94, 0x32, 0x90, 0x81, 0x5A, 0xE0, 0x94, 0x00,
+ 0x40, 0xB9, 0x90, 0x01, 0xC6, 0xE0, 0x30, 0xE0,
+ 0xB2, 0x22, 0x90, 0x81, 0x31, 0xE0, 0xFD, 0x7F,
+ 0x93, 0x12, 0x32, 0x1E, 0x90, 0x81, 0x28, 0xE0,
+ 0x60, 0x12, 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7,
+ 0x05, 0x74, 0x10, 0xF0, 0x80, 0x06, 0x90, 0x01,
+ 0x2F, 0x74, 0x90, 0xF0, 0x90, 0x00, 0x08, 0xE0,
+ 0x44, 0x10, 0xFD, 0x7F, 0x08, 0x12, 0x32, 0x1E,
+ 0x7F, 0x01, 0x91, 0xCA, 0x90, 0x00, 0x90, 0xE0,
+ 0x44, 0x01, 0xFD, 0x7F, 0x90, 0x12, 0x32, 0x1E,
+ 0x7F, 0x14, 0x7E, 0x00, 0x02, 0x32, 0xAA, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x2D,
+ 0xA7, 0xE4, 0xF5, 0x52, 0x12, 0x32, 0x9E, 0xEF,
+ 0x60, 0x73, 0x63, 0x52, 0x01, 0xE5, 0x52, 0x24,
+ 0x67, 0x90, 0x01, 0xC4, 0xF0, 0x74, 0x5D, 0xA3,
+ 0xF0, 0x90, 0x00, 0x88, 0xE0, 0xF5, 0x50, 0xF5,
+ 0x51, 0x54, 0x0F, 0x60, 0xDF, 0xE5, 0x50, 0x30,
+ 0xE0, 0x0B, 0x20, 0xE4, 0x03, 0x12, 0x29, 0xC5,
+ 0x53, 0x51, 0xEE, 0x80, 0x3F, 0xE5, 0x50, 0x30,
+ 0xE1, 0x16, 0x20, 0xE5, 0x0E, 0x12, 0x11, 0xBD,
+ 0xEF, 0x70, 0x03, 0x43, 0x51, 0x20, 0x90, 0x01,
+ 0x06, 0xE4, 0xF0, 0x53, 0x51, 0xFD, 0x80, 0x24,
+ 0xE5, 0x50, 0x30, 0xE2, 0x0B, 0x20, 0xE6, 0x03,
+ 0x12, 0x67, 0x06, 0x53, 0x51, 0xFB, 0x80, 0x14,
+ 0xE5, 0x50, 0x30, 0xE3, 0x0F, 0x20, 0xE7, 0x09,
+ 0x12, 0x61, 0x6E, 0xEF, 0x70, 0x03, 0x43, 0x51,
+ 0x80, 0x53, 0x51, 0xF7, 0xAD, 0x51, 0x7F, 0x88,
+ 0x12, 0x32, 0x1E, 0x80, 0x87, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x22, 0x90, 0x00, 0x90, 0xE0, 0x20,
+ 0xE0, 0xF9, 0x22, 0x90, 0x81, 0x22, 0xE0, 0x64,
+ 0x02, 0x7F, 0x01, 0x60, 0x02, 0x7F, 0x00, 0x22,
+ 0x7F, 0x02, 0x90, 0x81, 0x41, 0xE0, 0xFE, 0xEF,
+ 0xC3, 0x9E, 0x50, 0x18, 0xEF, 0x25, 0xE0, 0x24,
+ 0x81, 0xF8, 0xE6, 0x30, 0xE4, 0x0B, 0x90, 0x01,
+ 0xB8, 0x74, 0x08, 0xF0, 0xA3, 0xF0, 0x7F, 0x00,
+ 0x22, 0x0F, 0x80, 0xDE, 0x7F, 0x01, 0x22, 0x90,
+ 0x02, 0x87, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8,
+ 0x74, 0x01, 0xF0, 0x80, 0x17, 0x90, 0x02, 0x86,
+ 0xE0, 0x20, 0xE1, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x04, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
+ 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
+ 0x08, 0xF0, 0x7F, 0x00, 0x22, 0xE4, 0xFB, 0xFA,
+ 0xFD, 0x7F, 0x01, 0x12, 0x44, 0x4E, 0x90, 0x81,
+ 0xBD, 0xEF, 0xF0, 0x60, 0xF0, 0xD1, 0x71, 0x80,
+ 0xEC, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0x90, 0x01, 0xCC, 0xE0, 0x54, 0x0F, 0x90, 0x81,
+ 0xBE, 0xF0, 0x90, 0x81, 0xBE, 0xE0, 0xFD, 0x70,
+ 0x02, 0xE1, 0x9C, 0x90, 0x82, 0x09, 0xE0, 0xFF,
+ 0x74, 0x01, 0x7E, 0x00, 0xA8, 0x07, 0x08, 0x80,
+ 0x05, 0xC3, 0x33, 0xCE, 0x33, 0xCE, 0xD8, 0xF9,
+ 0xFF, 0xEF, 0x5D, 0x70, 0x02, 0xE1, 0x95, 0x90,
+ 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01,
+ 0xD0, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xBF,
+ 0xF0, 0x75, 0x13, 0x01, 0x75, 0x14, 0x81, 0x75,
+ 0x15, 0xBF, 0x75, 0x16, 0x01, 0x7B, 0x01, 0x7A,
+ 0x81, 0x79, 0xC0, 0x12, 0x2B, 0xED, 0x90, 0x82,
+ 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xD1,
+ 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC1, 0xF0,
+ 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
+ 0x01, 0xD2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
+ 0xC2, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
+ 0x04, 0x90, 0x01, 0xD3, 0x12, 0x45, 0x0A, 0xE0,
+ 0x90, 0x81, 0xC3, 0xF0, 0x90, 0x82, 0x09, 0xE0,
+ 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF0, 0x12, 0x45,
+ 0x0A, 0xE0, 0x90, 0x81, 0xC4, 0xF0, 0x90, 0x82,
+ 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90, 0x01, 0xF1,
+ 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81, 0xC5, 0xF0,
+ 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0, 0x04, 0x90,
+ 0x01, 0xF2, 0x12, 0x45, 0x0A, 0xE0, 0x90, 0x81,
+ 0xC6, 0xF0, 0x90, 0x82, 0x09, 0xE0, 0x75, 0xF0,
+ 0x04, 0x90, 0x01, 0xF3, 0x12, 0x45, 0x0A, 0xE0,
+ 0x90, 0x81, 0xC7, 0xF0, 0x90, 0x81, 0xBE, 0xE0,
+ 0xFF, 0x90, 0x82, 0x09, 0xE0, 0xFE, 0x74, 0x01,
+ 0xA8, 0x06, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0x5F, 0x90, 0x81, 0xBE, 0xF0, 0x90,
+ 0x82, 0x09, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
+ 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0x90,
+ 0x01, 0xCC, 0xF0, 0x90, 0x81, 0xC0, 0xE0, 0xFF,
+ 0x7B, 0x01, 0x7A, 0x81, 0x79, 0xC1, 0x12, 0x55,
+ 0x3F, 0x90, 0x82, 0x09, 0xE0, 0x04, 0xF0, 0xE0,
+ 0x54, 0x03, 0xF0, 0xC1, 0x82, 0x90, 0x01, 0xC0,
+ 0xE0, 0x44, 0x02, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0xE4, 0xFB, 0xFA, 0xFD, 0x7F, 0x01, 0x12,
+ 0x44, 0x4E, 0x90, 0x81, 0xD0, 0xEF, 0xF0, 0x60,
+ 0xF0, 0x12, 0x6C, 0x19, 0x80, 0xEB, 0x90, 0x81,
+ 0xD4, 0xEF, 0xF0, 0xA3, 0xED, 0xF0, 0xA3, 0x12,
+ 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xE4, 0x90,
+ 0x81, 0xE2, 0xF0, 0x7F, 0x24, 0x7E, 0x08, 0x12,
+ 0x2D, 0x5C, 0x90, 0x81, 0xDA, 0x12, 0x20, 0xCE,
+ 0x90, 0x81, 0xD4, 0xE0, 0xFB, 0x70, 0x08, 0x90,
+ 0x81, 0xDA, 0x12, 0x44, 0xD9, 0x80, 0x16, 0xEB,
+ 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82,
+ 0xE4, 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3,
+ 0xE0, 0xFF, 0x12, 0x2D, 0x5C, 0x90, 0x81, 0xDE,
+ 0x12, 0x20, 0xCE, 0x90, 0x81, 0xD5, 0xE0, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x17, 0x12, 0x20,
+ 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x90, 0x81, 0xDE, 0x12, 0x44, 0xD9, 0xED,
+ 0x54, 0x7F, 0xFD, 0xEC, 0x54, 0x80, 0xFC, 0x12,
+ 0x44, 0xCC, 0xEC, 0x44, 0x80, 0xFC, 0x90, 0x81,
+ 0xDE, 0x12, 0x20, 0xCE, 0x90, 0x81, 0xDA, 0x12,
+ 0x44, 0xD9, 0xEC, 0x54, 0x7F, 0xFC, 0x90, 0x85,
+ 0xBB, 0x12, 0x20, 0xCE, 0x7F, 0x24, 0x7E, 0x08,
+ 0x12, 0x2E, 0xA2, 0x90, 0x81, 0xD4, 0xE0, 0x75,
+ 0xF0, 0x08, 0xA4, 0x24, 0x62, 0xF5, 0x82, 0xE4,
+ 0x34, 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0,
+ 0xFF, 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xDE,
+ 0x12, 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20,
+ 0xCE, 0xD0, 0x07, 0xD0, 0x06, 0x12, 0x2E, 0xA2,
+ 0x90, 0x81, 0xDA, 0x12, 0x44, 0xD9, 0xEC, 0x44,
+ 0x80, 0xFC, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
+ 0x7F, 0x24, 0x7E, 0x08, 0x12, 0x2E, 0xA2, 0x90,
+ 0x81, 0xD4, 0xE0, 0x70, 0x04, 0x7F, 0x20, 0x80,
+ 0x09, 0x90, 0x81, 0xD4, 0xE0, 0xB4, 0x01, 0x16,
+ 0x7F, 0x28, 0x7E, 0x08, 0x12, 0x2D, 0x5C, 0x78,
+ 0x08, 0x12, 0x20, 0xA8, 0xEF, 0x54, 0x01, 0xFF,
+ 0xE4, 0x90, 0x81, 0xE2, 0xEF, 0xF0, 0x90, 0x81,
+ 0xE2, 0xE0, 0x90, 0x81, 0xD4, 0x60, 0x0E, 0xE0,
+ 0x75, 0xF0, 0x08, 0xA4, 0x24, 0x66, 0xF5, 0x82,
+ 0xE4, 0x34, 0x87, 0x80, 0x0C, 0xE0, 0x75, 0xF0,
+ 0x08, 0xA4, 0x24, 0x64, 0xF5, 0x82, 0xE4, 0x34,
+ 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0x12, 0x2D, 0x5C, 0xED, 0x54, 0x0F, 0xFD, 0xE4,
+ 0xFC, 0x90, 0x81, 0xD6, 0x12, 0x20, 0xCE, 0x90,
+ 0x81, 0xD6, 0x02, 0x44, 0xD9, 0x90, 0x81, 0xE3,
+ 0xEF, 0xF0, 0xAB, 0x05, 0x90, 0x81, 0xE9, 0x12,
+ 0x20, 0xDA, 0x00, 0x00, 0x00, 0x00, 0xAF, 0x03,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x14, 0x12, 0x20,
+ 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x90, 0x81, 0xE5, 0x12, 0x44, 0xD9, 0xED,
+ 0x54, 0x0F, 0xFD, 0xE4, 0xFC, 0x12, 0x44, 0xCC,
+ 0xEC, 0x54, 0x0F, 0xFC, 0x90, 0x81, 0xE9, 0x12,
+ 0x20, 0xCE, 0x90, 0x81, 0xE3, 0xE0, 0x75, 0xF0,
+ 0x08, 0xA4, 0x24, 0x60, 0xF5, 0x82, 0xE4, 0x34,
+ 0x87, 0xF5, 0x83, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0xC0, 0x06, 0xC0, 0x07, 0x90, 0x81, 0xE9, 0x12,
+ 0x44, 0xD9, 0x90, 0x85, 0xBB, 0x12, 0x20, 0xCE,
+ 0xD0, 0x07, 0xD0, 0x06, 0x02, 0x2E, 0xA2, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x12, 0x5F,
+ 0xB6, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0x78, 0x10,
+ 0x74, 0x01, 0xF2, 0x90, 0x02, 0x09, 0xE0, 0x78,
+ 0x00, 0xF2, 0x08, 0x74, 0x20, 0xF2, 0x18, 0xE2,
+ 0xFF, 0x30, 0xE0, 0x05, 0x08, 0xE2, 0x24, 0x80,
+ 0xF2, 0xEF, 0xC3, 0x13, 0x90, 0xFD, 0x10, 0xF0,
+ 0x78, 0x01, 0xE2, 0x24, 0x00, 0xF5, 0x82, 0xE4,
+ 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x78, 0x03, 0xF2,
+ 0x64, 0x04, 0x60, 0x0D, 0xE2, 0xFF, 0x64, 0x08,
+ 0x60, 0x07, 0xEF, 0x64, 0x0C, 0x60, 0x02, 0x61,
+ 0xDE, 0xE4, 0x78, 0x02, 0xF2, 0x78, 0x03, 0xE2,
+ 0xFF, 0x18, 0xE2, 0xC3, 0x9F, 0x50, 0x2D, 0xE2,
+ 0xFD, 0x18, 0xE2, 0x2D, 0x90, 0x81, 0x5A, 0xF0,
+ 0xE0, 0xFF, 0x24, 0x00, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0xFE, 0x74, 0x04, 0x2D,
+ 0xF8, 0xEE, 0xF2, 0xEF, 0xB4, 0xFF, 0x06, 0x90,
+ 0xFD, 0x10, 0xE0, 0x04, 0xF0, 0x78, 0x02, 0xE2,
+ 0x04, 0xF2, 0x80, 0xC9, 0x78, 0x04, 0xE2, 0x78,
+ 0x12, 0xF2, 0xFF, 0x78, 0x05, 0xE2, 0x78, 0x11,
+ 0xF2, 0x78, 0x06, 0xE2, 0x78, 0x13, 0xF2, 0x78,
+ 0x07, 0xE2, 0x78, 0x14, 0xF2, 0x78, 0x08, 0xE2,
+ 0x78, 0x33, 0xF2, 0x78, 0x09, 0xE2, 0x78, 0x34,
+ 0xF2, 0x78, 0x0A, 0xE2, 0x78, 0x35, 0xF2, 0x78,
+ 0x0B, 0xE2, 0x78, 0x36, 0xF2, 0x78, 0x0C, 0xE2,
+ 0x78, 0x37, 0xF2, 0x78, 0x0D, 0xE2, 0x78, 0x38,
+ 0xF2, 0x78, 0x0E, 0xE2, 0x78, 0x39, 0xF2, 0x78,
+ 0x0F, 0xE2, 0x78, 0x3A, 0xF2, 0xE4, 0x78, 0x15,
+ 0xF2, 0xEF, 0x24, 0xF8, 0x60, 0x75, 0x24, 0xFC,
+ 0x60, 0x6C, 0x24, 0x08, 0x60, 0x02, 0x61, 0xC0,
+ 0x78, 0x11, 0xE2, 0xB4, 0x01, 0x05, 0x12, 0x29,
+ 0xC5, 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xB4, 0x02,
+ 0x05, 0x12, 0x11, 0xBD, 0x61, 0xC5, 0x78, 0x11,
+ 0xE2, 0xB4, 0x03, 0x04, 0xF1, 0x06, 0x61, 0xC5,
+ 0x78, 0x11, 0xE2, 0xB4, 0x10, 0x17, 0x78, 0x14,
+ 0xE2, 0xFE, 0x18, 0xE2, 0xFD, 0xED, 0xFF, 0x78,
+ 0x16, 0xEE, 0xF2, 0xFE, 0x08, 0xEF, 0xF2, 0xFF,
+ 0x12, 0x32, 0xAA, 0x61, 0xC5, 0x78, 0x11, 0xE2,
+ 0xB4, 0x11, 0x17, 0x78, 0x14, 0xE2, 0xFE, 0x18,
+ 0xE2, 0xFD, 0xED, 0xFF, 0x78, 0x16, 0xEE, 0xF2,
+ 0xFE, 0x08, 0xEF, 0xF2, 0xFF, 0x12, 0x32, 0x06,
+ 0x61, 0xC5, 0x78, 0x11, 0xE2, 0xF4, 0x60, 0x02,
+ 0x61, 0xC5, 0x18, 0xF2, 0x61, 0xC5, 0x78, 0x15,
+ 0x74, 0x01, 0xF2, 0x78, 0x11, 0xE2, 0x64, 0x07,
+ 0x60, 0x02, 0x61, 0xAA, 0x78, 0x34, 0xE2, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12, 0x20,
+ 0xBB, 0xC0, 0x04, 0xA9, 0x05, 0xAA, 0x06, 0xAB,
+ 0x07, 0x78, 0x33, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
+ 0xFE, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0xC0, 0x04,
+ 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x35,
+ 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10,
+ 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
+ 0x12, 0x44, 0xFE, 0x78, 0x15, 0xE2, 0x70, 0x02,
+ 0x61, 0x93, 0x18, 0xE2, 0xFF, 0x18, 0xE2, 0xFD,
+ 0x31, 0x5F, 0x78, 0x1C, 0x12, 0x44, 0xFE, 0x78,
+ 0x38, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78,
+ 0x08, 0x12, 0x20, 0xBB, 0xC0, 0x04, 0xA9, 0x05,
+ 0xAA, 0x06, 0xAB, 0x07, 0x78, 0x37, 0xE2, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0xD0, 0x00, 0x12, 0x44,
+ 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0,
+ 0x07, 0x78, 0x39, 0xE2, 0xFF, 0xE4, 0xFC, 0xFD,
+ 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB, 0xD0, 0x03,
+ 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00, 0x12, 0x44,
+ 0xCC, 0x78, 0x20, 0x12, 0x44, 0xFE, 0x78, 0x20,
+ 0x12, 0x44, 0xE5, 0x12, 0x20, 0x9B, 0x78, 0x1C,
+ 0x12, 0x44, 0xF1, 0x12, 0x44, 0xBF, 0xC0, 0x04,
+ 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07, 0x78, 0x18,
+ 0x12, 0x44, 0xE5, 0x78, 0x20, 0x12, 0x44, 0xF1,
+ 0x12, 0x44, 0xBF, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x78, 0x18,
+ 0x12, 0x44, 0xFE, 0x78, 0x18, 0x12, 0x44, 0xE5,
+ 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x78, 0x13,
+ 0xE2, 0xFD, 0x08, 0xE2, 0xFF, 0x12, 0x55, 0x1C,
+ 0x80, 0x1B, 0x78, 0x13, 0xE2, 0xFF, 0x08, 0xE2,
+ 0xFD, 0x78, 0x11, 0xE2, 0xFB, 0x78, 0x15, 0xE2,
+ 0x90, 0x81, 0xBC, 0xF0, 0x71, 0xE1, 0x80, 0x05,
+ 0x78, 0x10, 0x74, 0x02, 0xF2, 0x78, 0x10, 0xE2,
+ 0xFF, 0xC3, 0x94, 0x02, 0x50, 0x10, 0xEF, 0x60,
+ 0x0A, 0x78, 0x02, 0xE2, 0xFF, 0x18, 0xE2, 0x2F,
+ 0xF2, 0x21, 0x90, 0x7F, 0x01, 0x22, 0x7F, 0x00,
+ 0x22, 0xAC, 0x07, 0xED, 0xAD, 0x04, 0x78, 0x24,
+ 0xF2, 0xED, 0x08, 0xF2, 0xEB, 0xB4, 0x04, 0x07,
+ 0x78, 0x27, 0x74, 0x01, 0xF2, 0x80, 0x0E, 0xEB,
+ 0x78, 0x27, 0xB4, 0x05, 0x05, 0x74, 0x02, 0xF2,
+ 0x80, 0x03, 0x74, 0x04, 0xF2, 0xD3, 0x78, 0x25,
+ 0xE2, 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x00, 0x50,
+ 0x63, 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2,
+ 0xFF, 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02,
+ 0xA1, 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78,
+ 0x28, 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D,
+ 0x74, 0x37, 0x2E, 0xF8, 0xE2, 0x78, 0x32, 0xF2,
+ 0xEE, 0xFF, 0x78, 0x25, 0xE2, 0x2F, 0xFF, 0x18,
+ 0xE2, 0x34, 0x00, 0x8F, 0x82, 0xF5, 0x83, 0xE0,
+ 0x78, 0x29, 0xF2, 0x78, 0x32, 0xE2, 0xFF, 0xF4,
+ 0xFE, 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2,
+ 0xFD, 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x24, 0x08,
+ 0xE2, 0xFF, 0x08, 0xE2, 0x2F, 0xFF, 0x78, 0x28,
+ 0xE2, 0xFD, 0x12, 0x32, 0x1E, 0x78, 0x26, 0xE2,
+ 0x04, 0xF2, 0x80, 0xA1, 0xD3, 0x78, 0x25, 0xE2,
+ 0x94, 0xFF, 0x18, 0xE2, 0x94, 0x07, 0x50, 0x69,
+ 0xE4, 0x78, 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF,
+ 0x18, 0xE2, 0xFE, 0xC3, 0x9F, 0x40, 0x02, 0xA1,
+ 0x7F, 0x74, 0x33, 0x2E, 0xF8, 0xE2, 0x78, 0x28,
+ 0xF2, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x2D, 0x78,
+ 0x26, 0xE2, 0xFF, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
+ 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
+ 0xE0, 0x78, 0x29, 0xF2, 0x74, 0x37, 0x2F, 0xF8,
+ 0xE2, 0x78, 0x32, 0xF2, 0xE2, 0xFF, 0xF4, 0xFE,
+ 0x78, 0x29, 0xE2, 0x5E, 0xFE, 0x18, 0xE2, 0xFD,
+ 0xEF, 0x5D, 0x4E, 0xF2, 0x78, 0x28, 0xE2, 0xFF,
+ 0x78, 0x26, 0xE2, 0xFD, 0x18, 0xE2, 0x2D, 0xFD,
+ 0x18, 0xE2, 0x34, 0x00, 0x8D, 0x82, 0xF5, 0x83,
+ 0xEF, 0xF0, 0x78, 0x26, 0xE2, 0x04, 0xF2, 0x80,
+ 0x9B, 0x90, 0x81, 0xBC, 0xE0, 0x60, 0x0F, 0x78,
+ 0x24, 0xE2, 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2D,
+ 0x5C, 0x78, 0x2E, 0x12, 0x44, 0xFE, 0xE4, 0x78,
+ 0x26, 0xF2, 0x78, 0x27, 0xE2, 0xFF, 0x18, 0xE2,
+ 0xFE, 0xC3, 0x9F, 0x50, 0x5D, 0x74, 0x33, 0x2E,
+ 0xF8, 0xE2, 0x78, 0x28, 0xF2, 0x90, 0x81, 0xBC,
+ 0xE0, 0x60, 0x2B, 0x78, 0x2E, 0x12, 0x44, 0xE5,
+ 0x78, 0x26, 0xE2, 0xFB, 0x75, 0xF0, 0x08, 0xA4,
+ 0xF9, 0xF8, 0x12, 0x20, 0xA8, 0x78, 0x29, 0xEF,
+ 0xF2, 0x74, 0x37, 0x2B, 0xF8, 0xE2, 0x78, 0x32,
+ 0xF2, 0xE2, 0xFE, 0xF4, 0x5F, 0xFF, 0x78, 0x28,
+ 0xE2, 0xFD, 0xEE, 0x5D, 0x4F, 0xF2, 0x78, 0x28,
+ 0xE2, 0xFF, 0x78, 0x26, 0xE2, 0xFD, 0xC3, 0x74,
+ 0x03, 0x9D, 0xFD, 0xE4, 0x94, 0x00, 0xFC, 0x7B,
+ 0xFE, 0x74, 0x2A, 0x2D, 0xF9, 0x74, 0x80, 0x3C,
+ 0xFA, 0xEF, 0x12, 0x1F, 0xEA, 0xE2, 0x04, 0xF2,
+ 0x80, 0x98, 0x78, 0x2A, 0x12, 0x44, 0xE5, 0x90,
+ 0x85, 0xBB, 0x12, 0x20, 0xCE, 0x78, 0x24, 0xE2,
+ 0xFE, 0x08, 0xE2, 0xFF, 0x12, 0x2E, 0xA2, 0x22,
+ 0x22, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x1F, 0x90,
+ 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF, 0xFE, 0x12,
+ 0x1F, 0xA4, 0xFD, 0xC3, 0x13, 0x30, 0xE0, 0x12,
+ 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16, 0x90, 0x00,
+ 0x02, 0x12, 0x1F, 0xBD, 0x90, 0x81, 0xCF, 0xF0,
+ 0x80, 0x05, 0x90, 0x81, 0xCF, 0xEF, 0xF0, 0x90,
+ 0x81, 0xCE, 0xEE, 0xF0, 0x90, 0x81, 0xCF, 0xE0,
+ 0xFE, 0x90, 0x81, 0xCE, 0xE0, 0xFF, 0xD3, 0x9E,
+ 0x50, 0x38, 0x90, 0x81, 0xCB, 0x12, 0x45, 0x16,
+ 0x12, 0x1F, 0xA4, 0x54, 0x01, 0xFE, 0x74, 0xDE,
+ 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x80, 0xF5, 0x83,
+ 0xEE, 0xF0, 0x74, 0xDE, 0x2F, 0xF5, 0x82, 0xE4,
+ 0x34, 0x80, 0xF5, 0x83, 0xE0, 0x70, 0x04, 0xD1,
+ 0x25, 0x80, 0x07, 0x90, 0x81, 0xCE, 0xE0, 0xFF,
+ 0xB1, 0x80, 0x90, 0x81, 0xCE, 0xE0, 0x04, 0xF0,
+ 0x80, 0xBA, 0x90, 0x80, 0xDE, 0xE0, 0x70, 0x24,
+ 0x90, 0x81, 0x2A, 0xE0, 0x70, 0x04, 0xFF, 0x12,
+ 0x49, 0x93, 0x90, 0x81, 0x2A, 0xE0, 0x64, 0x0C,
+ 0x60, 0x02, 0xD1, 0x26, 0x90, 0x81, 0x24, 0xE0,
+ 0x54, 0xF7, 0xF0, 0x54, 0xEF, 0xF0, 0x54, 0xBF,
+ 0xF0, 0x54, 0x7F, 0xF0, 0x22, 0x22, 0x90, 0x06,
+ 0x04, 0xE0, 0x54, 0x7F, 0xF0, 0x90, 0x05, 0x22,
+ 0xE4, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x0C, 0xF0,
+ 0x22, 0x90, 0x81, 0xED, 0xEF, 0xF0, 0xA3, 0xED,
+ 0xF0, 0xAD, 0x03, 0xAC, 0x02, 0xE4, 0x90, 0x81,
+ 0xF5, 0xF0, 0xA3, 0xF0, 0x90, 0x01, 0xC4, 0x74,
+ 0x39, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0xEC, 0x54,
+ 0x3F, 0xFC, 0x90, 0x01, 0x40, 0xED, 0xF0, 0xAE,
+ 0x04, 0xEE, 0xA3, 0xF0, 0x90, 0x81, 0xED, 0xE0,
+ 0x24, 0x81, 0x60, 0x34, 0x24, 0xDA, 0x60, 0x1C,
+ 0x24, 0x3C, 0x70, 0x41, 0x90, 0x81, 0xEE, 0xE0,
+ 0xC4, 0x33, 0x33, 0x33, 0x54, 0x80, 0x90, 0x81,
+ 0xF2, 0xF0, 0xA3, 0x74, 0x69, 0xF0, 0xA3, 0x74,
+ 0x80, 0xF0, 0x80, 0x2C, 0x90, 0x81, 0xEE, 0xE0,
+ 0x54, 0x01, 0x90, 0x81, 0xF2, 0xF0, 0xA3, 0x74,
+ 0xA5, 0xF0, 0xA3, 0x74, 0x01, 0xF0, 0x80, 0x18,
+ 0x90, 0x81, 0xEE, 0xE0, 0xC4, 0x54, 0x10, 0x90,
+ 0x81, 0xF2, 0xF0, 0xA3, 0x74, 0x7F, 0xF0, 0xA3,
+ 0x74, 0x10, 0xF0, 0x80, 0x03, 0x7F, 0x00, 0x22,
+ 0x90, 0x81, 0xF3, 0xE0, 0x90, 0x01, 0x06, 0xF0,
+ 0x90, 0x81, 0xF2, 0xE0, 0x60, 0x0E, 0x90, 0x01,
+ 0x42, 0xF0, 0x90, 0x81, 0xF1, 0xE0, 0x90, 0x01,
+ 0x43, 0xF0, 0x80, 0x0D, 0x90, 0x01, 0x43, 0xE4,
+ 0xF0, 0x90, 0x81, 0xF2, 0xE0, 0x90, 0x01, 0x42,
+ 0xF0, 0x90, 0x81, 0xF4, 0xE0, 0xFF, 0x90, 0x01,
+ 0x42, 0xE0, 0x5F, 0xFF, 0x90, 0x81, 0xF2, 0xE0,
+ 0x6F, 0x60, 0xEE, 0x74, 0x39, 0x04, 0x90, 0x01,
+ 0xC4, 0xF0, 0x74, 0x66, 0xA3, 0xF0, 0x90, 0x01,
+ 0x43, 0xE4, 0xF0, 0x7F, 0x01, 0x22, 0xE4, 0x90,
+ 0x81, 0x6A, 0xF0, 0x90, 0x87, 0x5F, 0xE0, 0x90,
+ 0x81, 0x69, 0xF0, 0xE4, 0x90, 0x81, 0x76, 0xF0,
+ 0x90, 0x81, 0x66, 0xF0, 0x90, 0x81, 0x66, 0xE0,
+ 0xFF, 0xC3, 0x94, 0x40, 0x50, 0x15, 0x74, 0x79,
+ 0x2F, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0x74, 0xFF, 0xF0, 0x90, 0x81, 0x66, 0xE0, 0x04,
+ 0xF0, 0x80, 0xE1, 0xE4, 0x90, 0x81, 0x66, 0xF0,
+ 0x90, 0x81, 0x69, 0xE0, 0xFF, 0x90, 0x81, 0x66,
+ 0xE0, 0xFE, 0xC3, 0x9F, 0x40, 0x03, 0x02, 0x68,
+ 0x12, 0x74, 0xDF, 0x2E, 0xF9, 0xE4, 0x34, 0x86,
+ 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15, 0x75,
+ 0x16, 0x0A, 0x7B, 0x01, 0x7A, 0x81, 0x79, 0x5B,
+ 0x12, 0x2B, 0xED, 0x90, 0x81, 0x5C, 0xE0, 0xFF,
+ 0x12, 0x2F, 0x27, 0xEF, 0x04, 0x90, 0x81, 0x76,
+ 0xF0, 0x90, 0x81, 0x5B, 0xE0, 0xFF, 0xA3, 0xE0,
+ 0xFD, 0x12, 0x31, 0xEA, 0xEF, 0x24, 0xC8, 0x90,
+ 0x81, 0x78, 0xF0, 0x75, 0xF0, 0x08, 0xA4, 0xF0,
+ 0x90, 0x81, 0x5C, 0xE0, 0x54, 0x0F, 0x90, 0x81,
+ 0x77, 0xF0, 0xE4, 0x90, 0x81, 0x65, 0xF0, 0x90,
+ 0x81, 0x67, 0xF0, 0x90, 0x81, 0x67, 0xE0, 0xFF,
+ 0xC3, 0x94, 0x04, 0x50, 0x57, 0x90, 0x81, 0x77,
+ 0xE0, 0xFE, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
+ 0x13, 0xD8, 0xFC, 0x20, 0xE0, 0x3E, 0x90, 0x81,
+ 0x67, 0xE0, 0x25, 0xE0, 0xFF, 0x90, 0x81, 0x78,
+ 0xE0, 0x2F, 0x24, 0x79, 0xF9, 0xE4, 0x34, 0x81,
+ 0xFA, 0x7B, 0x01, 0xC0, 0x03, 0xC0, 0x01, 0x90,
+ 0x81, 0x65, 0xE0, 0x75, 0xF0, 0x02, 0xA4, 0x24,
+ 0x5D, 0xF9, 0x74, 0x81, 0x35, 0xF0, 0x8B, 0x13,
+ 0xF5, 0x14, 0x89, 0x15, 0x75, 0x16, 0x02, 0xD0,
+ 0x01, 0xD0, 0x03, 0x12, 0x2B, 0xED, 0x90, 0x81,
+ 0x65, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x67, 0xE0,
+ 0x04, 0xF0, 0x80, 0x9F, 0x90, 0x81, 0x76, 0xE0,
+ 0xFF, 0x90, 0x81, 0x66, 0xE0, 0x2F, 0xF0, 0x02,
+ 0x67, 0x40, 0xE4, 0x90, 0x81, 0x6A, 0xF0, 0x90,
+ 0x81, 0x6A, 0xE0, 0xC3, 0x94, 0x40, 0x40, 0x02,
+ 0x41, 0xAF, 0xE0, 0xFF, 0x24, 0x79, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90, 0x81,
+ 0x6C, 0xF0, 0xE0, 0xFE, 0x54, 0xF0, 0xC4, 0x54,
+ 0x0F, 0xFD, 0x90, 0x81, 0x6B, 0xF0, 0xEE, 0x54,
+ 0x0F, 0xFE, 0xA3, 0xF0, 0x74, 0x7A, 0x2F, 0xF5,
+ 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0x90,
+ 0x81, 0x6D, 0xF0, 0xFC, 0xEE, 0xFE, 0xEC, 0xFB,
+ 0xEB, 0xFF, 0x90, 0x81, 0x72, 0xEE, 0xF0, 0xA3,
+ 0xEF, 0xF0, 0xED, 0x12, 0x45, 0x28, 0x68, 0x8B,
+ 0x00, 0x68, 0xC2, 0x01, 0x69, 0x73, 0x02, 0x6A,
+ 0xA0, 0x03, 0x69, 0x8E, 0x04, 0x69, 0xAF, 0x05,
+ 0x69, 0xAF, 0x06, 0x69, 0xAF, 0x07, 0x69, 0xAF,
+ 0x08, 0x6A, 0x33, 0x09, 0x6A, 0x69, 0x0A, 0x00,
+ 0x00, 0x6A, 0xAF, 0x90, 0x81, 0x6A, 0xE0, 0xFD,
+ 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5,
+ 0x83, 0xE0, 0xFE, 0x74, 0x7B, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFD, 0xED,
+ 0xFF, 0x90, 0x81, 0x74, 0xEE, 0xF0, 0xFC, 0xA3,
+ 0xEF, 0xF0, 0x90, 0x81, 0x6D, 0xE0, 0xFF, 0x12,
+ 0x2F, 0x96, 0x90, 0x81, 0x68, 0x74, 0x02, 0xF0,
+ 0x41, 0xA0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7C,
+ 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
+ 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x08, 0x12,
+ 0x20, 0xBB, 0xA8, 0x04, 0xA9, 0x05, 0xAA, 0x06,
+ 0xAB, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B,
+ 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0,
+ 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x12, 0x44, 0xCC,
+ 0xC0, 0x04, 0xC0, 0x05, 0xC0, 0x06, 0xC0, 0x07,
+ 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7D, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFF, 0xE4,
+ 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20, 0xBB,
+ 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0, 0x00,
+ 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6A, 0xE0, 0x24,
+ 0x7E, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x18,
+ 0x12, 0x20, 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0,
+ 0x01, 0xD0, 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81,
+ 0x6E, 0x12, 0x20, 0xCE, 0x90, 0x81, 0x6E, 0x12,
+ 0x44, 0xD9, 0x90, 0x85, 0x96, 0x12, 0x20, 0xCE,
+ 0x90, 0x81, 0x72, 0xE0, 0xFE, 0xA3, 0xE0, 0xFF,
+ 0x12, 0x2E, 0xE4, 0x90, 0x81, 0x68, 0x74, 0x04,
+ 0xF0, 0x41, 0xA0, 0x90, 0x81, 0x6D, 0xE0, 0xFD,
+ 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF5, 0x82,
+ 0xE4, 0x34, 0x81, 0xF5, 0x83, 0xE0, 0xFB, 0xE4,
+ 0xFF, 0x12, 0x30, 0xC7, 0x80, 0x19, 0x90, 0x81,
+ 0x6D, 0xE0, 0xFD, 0x90, 0x81, 0x6A, 0xE0, 0x24,
+ 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81, 0xF5, 0x83,
+ 0xE0, 0xFB, 0xE4, 0xFF, 0x12, 0x30, 0x6A, 0x90,
+ 0x81, 0x68, 0x74, 0x01, 0xF0, 0x41, 0xA0, 0x90,
+ 0x81, 0x68, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x6A,
+ 0xE0, 0x24, 0x7C, 0xF5, 0x82, 0xE4, 0x34, 0x81,
+ 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
+ 0x78, 0x08, 0x12, 0x20, 0xBB, 0xA8, 0x04, 0xA9,
+ 0x05, 0xAA, 0x06, 0xAB, 0x07, 0x90, 0x81, 0x6A,
+ 0xE0, 0x24, 0x7B, 0xF5, 0x82, 0xE4, 0x34, 0x81,
+ 0xF5, 0x83, 0xE0, 0xFF, 0xE4, 0xFC, 0xFD, 0xFE,
+ 0x12, 0x44, 0xCC, 0xC0, 0x04, 0xC0, 0x05, 0xC0,
+ 0x06, 0xC0, 0x07, 0x90, 0x81, 0x6C, 0xE0, 0xFF,
+ 0xE4, 0xFC, 0xFD, 0xFE, 0x78, 0x10, 0x12, 0x20,
+ 0xBB, 0xD0, 0x03, 0xD0, 0x02, 0xD0, 0x01, 0xD0,
+ 0x00, 0x12, 0x44, 0xCC, 0x90, 0x81, 0x6E, 0x12,
+ 0x20, 0xCE, 0x90, 0x81, 0x6B, 0xE0, 0x24, 0xFB,
+ 0xFF, 0xC0, 0x07, 0x90, 0x81, 0x6E, 0x12, 0x44,
+ 0xD9, 0x90, 0x81, 0xF9, 0x12, 0x20, 0xCE, 0x90,
+ 0x81, 0x6D, 0xE0, 0xFD, 0xD0, 0x07, 0x12, 0x55,
+ 0x1C, 0x80, 0x6D, 0x90, 0x81, 0x68, 0x74, 0x01,
+ 0xF0, 0x90, 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9,
+ 0xE4, 0x34, 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14,
+ 0x89, 0x15, 0x75, 0x16, 0x01, 0x7B, 0xFE, 0x7A,
+ 0x80, 0x79, 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81,
+ 0x6D, 0xE0, 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD,
+ 0xE4, 0x90, 0x81, 0xBC, 0xF0, 0x7B, 0x04, 0x80,
+ 0x34, 0x90, 0x81, 0x68, 0x74, 0x04, 0xF0, 0x90,
+ 0x81, 0x6A, 0xE0, 0x24, 0x7B, 0xF9, 0xE4, 0x34,
+ 0x81, 0x75, 0x13, 0x01, 0xF5, 0x14, 0x89, 0x15,
+ 0x75, 0x16, 0x04, 0x7B, 0xFE, 0x7A, 0x80, 0x79,
+ 0x33, 0x12, 0x2B, 0xED, 0x90, 0x81, 0x6D, 0xE0,
+ 0xFF, 0x90, 0x81, 0x6C, 0xE0, 0xFD, 0xE4, 0x90,
+ 0x81, 0xBC, 0xF0, 0x7B, 0x06, 0x12, 0x63, 0xE1,
+ 0x90, 0x81, 0x68, 0xE0, 0x24, 0x02, 0xFF, 0x90,
+ 0x81, 0x6A, 0xE0, 0x2F, 0xF0, 0x01, 0x17, 0x22,
+ 0x90, 0x02, 0x09, 0xE0, 0xFD, 0x12, 0x1F, 0xA4,
+ 0xFE, 0xAF, 0x05, 0xED, 0x2E, 0x90, 0x80, 0x3D,
+ 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0xFF,
+ 0xED, 0x2F, 0x90, 0x80, 0x3E, 0xF0, 0x90, 0x00,
+ 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0xED, 0x2F, 0x90,
+ 0x80, 0x3F, 0xF0, 0x90, 0x00, 0x03, 0x12, 0x1F,
+ 0xBD, 0xFF, 0xED, 0x2F, 0x90, 0x80, 0x40, 0xF0,
+ 0x90, 0x00, 0x04, 0x12, 0x1F, 0xBD, 0xFF, 0xAE,
+ 0x05, 0xED, 0x2F, 0x90, 0x80, 0x41, 0xF0, 0x22,
+ 0x90, 0x00, 0x02, 0x12, 0x1F, 0xBD, 0xFF, 0x30,
+ 0xE0, 0x26, 0x12, 0x1F, 0xA4, 0x90, 0x81, 0x38,
+ 0xF0, 0x90, 0x00, 0x01, 0x12, 0x1F, 0xBD, 0x90,
+ 0x81, 0x39, 0xF0, 0xEF, 0x54, 0xFE, 0xFF, 0xA3,
+ 0xE0, 0x54, 0x01, 0x4F, 0xF0, 0x90, 0x00, 0x03,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x3B, 0xF0, 0x22,
+ 0x90, 0x81, 0x38, 0x74, 0x01, 0xF0, 0xA3, 0x74,
+ 0x05, 0xF0, 0xA3, 0xE0, 0x54, 0x01, 0x44, 0x28,
+ 0xF0, 0xA3, 0x74, 0x05, 0xF0, 0x22, 0x12, 0x1F,
+ 0xA4, 0x90, 0x81, 0x3E, 0xF0, 0x90, 0x81, 0x3E,
+ 0xE0, 0x90, 0x01, 0xE7, 0xF0, 0x22, 0x12, 0x1F,
+ 0xA4, 0x90, 0x81, 0x4A, 0xF0, 0x90, 0x00, 0x01,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x4B, 0xF0, 0x22,
+ 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90,
+ 0x81, 0xFD, 0xEE, 0xF0, 0xA3, 0xEF, 0xF0, 0xE4,
+ 0xA3, 0xF0, 0xA3, 0xF0, 0x90, 0x81, 0xFD, 0xE0,
+ 0xFE, 0xA3, 0xE0, 0xF5, 0x82, 0x8E, 0x83, 0xE0,
+ 0x60, 0x2D, 0xC3, 0x90, 0x82, 0x00, 0xE0, 0x94,
+ 0xE8, 0x90, 0x81, 0xFF, 0xE0, 0x94, 0x03, 0x40,
+ 0x0B, 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x80, 0xF0,
+ 0x7F, 0x00, 0x80, 0x15, 0x90, 0x81, 0xFF, 0xE4,
+ 0x75, 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x7F, 0x0A,
+ 0x7E, 0x00, 0x12, 0x32, 0xAA, 0x80, 0xC5, 0x7F,
+ 0x01, 0xD0, 0xD0, 0x92, 0xAF, 0x22, 0xD3, 0x10,
+ 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x81, 0xD1,
+ 0x12, 0x45, 0x1F, 0x90, 0x82, 0x0A, 0xE0, 0xFF,
+ 0x04, 0xF0, 0x90, 0x00, 0x01, 0xEF, 0x12, 0x1F,
+ 0xFC, 0x7F, 0xAF, 0x7E, 0x01, 0x71, 0x60, 0xEF,
+ 0x60, 0x3A, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16,
+ 0x8B, 0x13, 0x8A, 0x14, 0x89, 0x15, 0x90, 0x00,
+ 0x0E, 0x12, 0x1F, 0xBD, 0x24, 0x02, 0xF5, 0x16,
+ 0x7B, 0x01, 0x7A, 0x01, 0x79, 0xA0, 0x12, 0x2B,
+ 0xED, 0x90, 0x81, 0xD1, 0x12, 0x45, 0x16, 0x90,
+ 0x00, 0x0E, 0x12, 0x1F, 0xBD, 0x90, 0x01, 0xAE,
+ 0xF0, 0xA3, 0x74, 0xFF, 0xF0, 0x90, 0x01, 0xCB,
+ 0xE0, 0x64, 0x80, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0xE4, 0xFF, 0x90, 0x80, 0xD9, 0xE0, 0xFE, 0x90,
+ 0x80, 0xD8, 0xE0, 0xFD, 0xB5, 0x06, 0x04, 0x7E,
+ 0x01, 0x80, 0x02, 0x7E, 0x00, 0xEE, 0x64, 0x01,
+ 0x60, 0x32, 0x90, 0x01, 0xAF, 0xE0, 0x70, 0x13,
+ 0xED, 0x75, 0xF0, 0x0F, 0xA4, 0x24, 0x42, 0xF9,
+ 0x74, 0x80, 0x35, 0xF0, 0xFA, 0x7B, 0x01, 0x71,
+ 0xB6, 0x7F, 0x01, 0xEF, 0x60, 0x16, 0x90, 0x80,
+ 0xD8, 0xE0, 0x04, 0xF0, 0xE0, 0x7F, 0x00, 0xB4,
+ 0x0A, 0x02, 0x7F, 0x01, 0xEF, 0x60, 0x05, 0xE4,
+ 0x90, 0x80, 0xD8, 0xF0, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x8F, 0x0D, 0x22, 0x8F, 0x0E, 0x22, 0x22,
+ 0x90, 0x01, 0x34, 0xE0, 0x55, 0x3D, 0xF5, 0x41,
+ 0xA3, 0xE0, 0x55, 0x3E, 0xF5, 0x42, 0xA3, 0xE0,
+ 0x55, 0x3F, 0xF5, 0x43, 0xA3, 0xE0, 0x55, 0x40,
+ 0xF5, 0x44, 0x90, 0x01, 0x34, 0xE5, 0x41, 0xF0,
+ 0xA3, 0xE5, 0x42, 0xF0, 0xA3, 0xE5, 0x43, 0xF0,
+ 0xA3, 0xE5, 0x44, 0xF0, 0x22, 0x90, 0x01, 0x3C,
+ 0xE0, 0x55, 0x45, 0xF5, 0x49, 0xA3, 0xE0, 0x55,
+ 0x46, 0xF5, 0x4A, 0xA3, 0xE0, 0x55, 0x47, 0xF5,
+ 0x4B, 0xA3, 0xE0, 0x55, 0x48, 0xF5, 0x4C, 0x90,
+ 0x01, 0x3C, 0xE5, 0x49, 0xF0, 0xA3, 0xE5, 0x4A,
+ 0xF0, 0xA3, 0xE5, 0x4B, 0xF0, 0xA3, 0xE5, 0x4C,
+ 0xF0, 0x53, 0x91, 0xDF, 0x22, 0x90, 0x81, 0x1F,
+ 0xE0, 0x30, 0xE0, 0x05, 0xE4, 0xA3, 0xF0, 0xA3,
+ 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01,
+ 0x70, 0x19, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x13,
+ 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90, 0x01, 0x3C,
+ 0x74, 0x02, 0x12, 0x4F, 0xF4, 0x90, 0x01, 0x57,
+ 0x74, 0x05, 0xF0, 0x22, 0x90, 0x80, 0xDE, 0xE0,
+ 0x64, 0x01, 0x70, 0x26, 0x90, 0x81, 0x27, 0xE0,
+ 0x60, 0x20, 0x90, 0x01, 0x57, 0xE4, 0xF0, 0x90,
+ 0x01, 0x3C, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x54, 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xFD, 0xF0, 0x54, 0x07, 0x70, 0x03, 0x12,
+ 0x47, 0x2A, 0x22, 0x90, 0x80, 0xDE, 0xE0, 0xB4,
+ 0x01, 0x14, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x0E,
+ 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02,
+ 0x60, 0x02, 0x80, 0x03, 0xD1, 0x7F, 0x22, 0x90,
+ 0x04, 0x1D, 0xE0, 0x70, 0x13, 0x90, 0x80, 0x3E,
+ 0xE0, 0xFF, 0xE4, 0xFD, 0xB1, 0x69, 0x8E, 0x4E,
+ 0x8F, 0x4F, 0x90, 0x04, 0x1F, 0x74, 0x20, 0xF0,
+ 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0,
+ 0x90, 0x82, 0x0E, 0xED, 0xF0, 0x90, 0x82, 0x0D,
+ 0xEF, 0xF0, 0xE4, 0xFD, 0xFC, 0xF1, 0x37, 0x7C,
+ 0x00, 0xAD, 0x07, 0x90, 0x82, 0x0D, 0xE0, 0x90,
+ 0x04, 0x25, 0xF0, 0x90, 0x82, 0x0E, 0xE0, 0x60,
+ 0x0E, 0x74, 0x0F, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x80, 0xF0, 0xAF,
+ 0x05, 0x74, 0x08, 0x2F, 0xF5, 0x82, 0xE4, 0x34,
+ 0xFC, 0xF5, 0x83, 0xE4, 0xF0, 0x74, 0x09, 0x2F,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x54, 0xF0, 0xF0, 0x74, 0x21, 0x2D, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x54, 0xF7,
+ 0xF0, 0xAE, 0x04, 0xAF, 0x05, 0xD0, 0xD0, 0x92,
+ 0xAF, 0x22, 0x8F, 0x4E, 0xF1, 0x4B, 0xBF, 0x01,
+ 0x18, 0x90, 0x80, 0x40, 0xE0, 0xFF, 0x7D, 0x01,
+ 0xB1, 0x69, 0xAD, 0x07, 0xAC, 0x06, 0xAF, 0x4E,
+ 0x12, 0x4F, 0x82, 0x90, 0x04, 0x1F, 0x74, 0x20,
+ 0xF0, 0x22, 0x90, 0x06, 0xA9, 0xE0, 0x90, 0x81,
+ 0x4C, 0xF0, 0xE0, 0xFD, 0x54, 0xC0, 0x70, 0x09,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x80,
+ 0x72, 0xED, 0x30, 0xE6, 0x4B, 0x90, 0x81, 0x27,
+ 0xE0, 0x64, 0x02, 0x70, 0x2A, 0x90, 0x81, 0x24,
+ 0xE0, 0xFF, 0xC3, 0x13, 0x20, 0xE0, 0x09, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x80, 0x28,
+ 0x90, 0x81, 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x01,
+ 0x70, 0x2D, 0x90, 0x81, 0x2B, 0xE0, 0x44, 0x04,
+ 0xF0, 0x7F, 0x01, 0xB1, 0xD2, 0x80, 0x20, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
+ 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
+ 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
+ 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
+ 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0x90,
+ 0x01, 0x5F, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x08, 0xF0, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x02, 0xF0, 0x90, 0x81, 0x24,
+ 0xE0, 0x44, 0x10, 0xF0, 0x90, 0x81, 0x2A, 0xE0,
+ 0x64, 0x0C, 0x60, 0x0C, 0xE4, 0xFD, 0x7F, 0x0C,
+ 0x12, 0x47, 0x3D, 0xE4, 0xFF, 0x12, 0x4F, 0x0D,
+ 0x22, 0xE4, 0x90, 0x81, 0x4C, 0xF0, 0x90, 0x06,
+ 0xA9, 0xE0, 0x90, 0x81, 0x4C, 0xF0, 0xE0, 0x54,
+ 0xC0, 0x70, 0x0D, 0x90, 0x81, 0x2B, 0xE0, 0x54,
+ 0xFE, 0xF0, 0x54, 0xFD, 0xF0, 0x02, 0x47, 0x2A,
+ 0x90, 0x81, 0x4C, 0xE0, 0x30, 0xE6, 0x21, 0x90,
+ 0x81, 0x27, 0xE0, 0x64, 0x01, 0x70, 0x20, 0x90,
+ 0x81, 0x2B, 0xE0, 0x44, 0x01, 0xF0, 0x90, 0x81,
+ 0x26, 0xE0, 0x54, 0x0F, 0x64, 0x02, 0x60, 0x04,
+ 0xB1, 0x4F, 0x80, 0x0B, 0xD1, 0x7F, 0x80, 0x07,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x81, 0x4C, 0xE0, 0x90, 0x81, 0x2B, 0x30, 0xE7,
+ 0x11, 0x12, 0x4F, 0xF1, 0x90, 0x01, 0x57, 0x74,
+ 0x05, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x44, 0x04,
+ 0xF0, 0x22, 0xE0, 0x54, 0xFD, 0xF0, 0x22, 0xE4,
+ 0xFE, 0xEF, 0xC3, 0x13, 0xFD, 0xEF, 0x30, 0xE0,
+ 0x02, 0x7E, 0x80, 0x90, 0xFD, 0x10, 0xED, 0xF0,
+ 0xAF, 0x06, 0x22, 0xD3, 0x10, 0xAF, 0x01, 0xC3,
+ 0xC0, 0xD0, 0x90, 0x04, 0x1D, 0xE0, 0x60, 0x1A,
+ 0x90, 0x05, 0x22, 0xE0, 0x54, 0x90, 0x60, 0x07,
+ 0x90, 0x01, 0xC0, 0xE0, 0x44, 0x08, 0xF0, 0x90,
+ 0x01, 0xC6, 0xE0, 0x30, 0xE1, 0xE4, 0x7F, 0x00,
+ 0x80, 0x02, 0x7F, 0x01, 0xD0, 0xD0, 0x92, 0xAF,
+ 0x22, 0x90, 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12,
+ 0x73, 0xE1, 0x90, 0x81, 0x3F, 0xE0, 0x30, 0xE0,
+ 0x03, 0x12, 0x49, 0xDD, 0x22, 0x90, 0x81, 0x27,
+ 0xE0, 0x60, 0x35, 0x90, 0x06, 0x92, 0xE0, 0x30,
+ 0xE1, 0x24, 0xE4, 0xF5, 0x1D, 0x90, 0x81, 0x3A,
+ 0xE0, 0xC3, 0x13, 0x54, 0x7F, 0xF5, 0x1E, 0xE4,
+ 0xFB, 0xFD, 0x7F, 0x5C, 0x7E, 0x01, 0x12, 0x50,
+ 0x05, 0x90, 0x01, 0x5F, 0x74, 0x05, 0xF0, 0x90,
+ 0x06, 0x92, 0x74, 0x02, 0xF0, 0x22, 0x90, 0x81,
+ 0x24, 0xE0, 0x54, 0xEF, 0xF0, 0x12, 0x47, 0x2A,
+ 0x22, 0x12, 0x71, 0x48, 0x90, 0x81, 0x4D, 0xEF,
+ 0xF0, 0x90, 0x81, 0x24, 0x30, 0xE0, 0x06, 0xE0,
+ 0x44, 0x01, 0xF0, 0x80, 0x04, 0xE0, 0x54, 0xFE,
+ 0xF0, 0x90, 0x81, 0x4D, 0xE0, 0x30, 0xE6, 0x11,
+ 0x90, 0x01, 0x2F, 0xE0, 0x30, 0xE7, 0x04, 0xE4,
+ 0xF0, 0x80, 0x06, 0x90, 0x01, 0x2F, 0x74, 0x80,
+ 0xF0, 0x90, 0x81, 0x24, 0xE0, 0x30, 0xE0, 0x1A,
+ 0x90, 0x81, 0x32, 0xE4, 0xF0, 0xA3, 0x74, 0x07,
+ 0xF0, 0x90, 0x81, 0x32, 0xA3, 0xE0, 0x90, 0x05,
+ 0x58, 0xF0, 0x90, 0x04, 0xEC, 0xE0, 0x54, 0xDD,
+ 0xF0, 0x22, 0x90, 0x04, 0xEC, 0xE0, 0x44, 0x22,
+ 0xF0, 0x22, 0x90, 0x81, 0x4A, 0xE0, 0x60, 0x0F,
+ 0xE4, 0xF0, 0x90, 0x05, 0x53, 0xE0, 0x44, 0x01,
+ 0xF0, 0x90, 0x05, 0xFD, 0xE0, 0x04, 0xF0, 0x22,
+ 0x90, 0x81, 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13,
+ 0x54, 0x03, 0x30, 0xE0, 0x27, 0xEF, 0x54, 0xBF,
+ 0xF0, 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25,
+ 0x30, 0xE0, 0x06, 0xE0, 0x44, 0x01, 0xF0, 0x80,
+ 0x10, 0xE0, 0x54, 0xFE, 0xF0, 0x90, 0x01, 0xB9,
+ 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74, 0x04,
+ 0xF0, 0x12, 0x47, 0x2A, 0xE4, 0xFF, 0x90, 0x81,
+ 0x45, 0xE0, 0x30, 0xE0, 0x48, 0x90, 0x81, 0x49,
+ 0xE0, 0xFD, 0x60, 0x41, 0x74, 0x01, 0x7E, 0x00,
+ 0xA8, 0x07, 0x08, 0x80, 0x05, 0xC3, 0x33, 0xCE,
+ 0x33, 0xCE, 0xD8, 0xF9, 0xFF, 0x90, 0x04, 0xE0,
+ 0xE0, 0xFB, 0xEF, 0x5B, 0x60, 0x06, 0xE4, 0x90,
+ 0x81, 0x49, 0xF0, 0x22, 0x90, 0x81, 0x47, 0xE0,
+ 0xD3, 0x9D, 0x50, 0x10, 0x90, 0x01, 0xC7, 0x74,
+ 0x10, 0xF0, 0x11, 0xBE, 0x90, 0x81, 0x45, 0xE0,
+ 0x54, 0xFE, 0xF0, 0x22, 0x12, 0x4F, 0x0B, 0x90,
+ 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x22, 0x90, 0x80,
+ 0x3C, 0xE0, 0x64, 0x02, 0x60, 0x07, 0x90, 0x06,
+ 0x90, 0xE0, 0x44, 0x01, 0xF0, 0x22, 0x90, 0x81,
+ 0x24, 0xE0, 0xFF, 0xC4, 0x13, 0x13, 0x13, 0x54,
+ 0x01, 0x30, 0xE0, 0x2C, 0xEF, 0x54, 0x7F, 0xF0,
+ 0x90, 0x04, 0xE0, 0xE0, 0x90, 0x81, 0x25, 0x30,
+ 0xE1, 0x06, 0xE0, 0x44, 0x02, 0xF0, 0x80, 0x0F,
+ 0xE0, 0x54, 0xFD, 0xF0, 0x90, 0x01, 0xB9, 0x74,
+ 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x04, 0xF0, 0x90,
+ 0x81, 0x27, 0xE0, 0x60, 0x03, 0x12, 0x47, 0x2A,
+ 0x7F, 0x01, 0x01, 0x6E, 0xC3, 0xEE, 0x94, 0x01,
+ 0x40, 0x0A, 0x0D, 0xED, 0x13, 0x90, 0xFD, 0x10,
+ 0xF0, 0xE4, 0x2F, 0xFF, 0x22, 0xC3, 0xEE, 0x94,
+ 0x01, 0x40, 0x24, 0x90, 0xFD, 0x11, 0xE0, 0x6D,
+ 0x70, 0x1A, 0x90, 0x01, 0x17, 0xE0, 0xB5, 0x05,
+ 0x0D, 0x90, 0x01, 0xE4, 0x74, 0x77, 0xF0, 0x90,
+ 0xFD, 0x11, 0xE4, 0xF0, 0x80, 0x06, 0xED, 0x04,
+ 0x90, 0xFD, 0x11, 0xF0, 0xE4, 0x2F, 0xFF, 0x22,
+ 0xE4, 0x90, 0x81, 0x4E, 0xF0, 0xA3, 0xF0, 0xA3,
+ 0xF0, 0x90, 0x00, 0x83, 0xE0, 0x90, 0x81, 0x4E,
+ 0xF0, 0x90, 0x00, 0x83, 0xE0, 0xFE, 0x90, 0x81,
+ 0x4E, 0xE0, 0xFF, 0xB5, 0x06, 0x01, 0x22, 0xC3,
+ 0x90, 0x81, 0x50, 0xE0, 0x94, 0x64, 0x90, 0x81,
+ 0x4F, 0xE0, 0x94, 0x00, 0x40, 0x0D, 0x90, 0x01,
+ 0xC0, 0xE0, 0x44, 0x40, 0xF0, 0x90, 0x81, 0x4E,
+ 0xE0, 0xFF, 0x22, 0x90, 0x81, 0x4F, 0xE4, 0x75,
+ 0xF0, 0x01, 0x12, 0x44, 0xA9, 0x80, 0xC2, 0x74,
+ 0x45, 0x2F, 0xF8, 0xE6, 0xFE, 0xED, 0xF4, 0x5E,
+ 0xFE, 0xF6, 0x74, 0x38, 0x2F, 0xF5, 0x82, 0xE4,
+ 0x34, 0x01, 0xF5, 0x83, 0xEE, 0xF0, 0x22, 0xD3,
+ 0x10, 0xAF, 0x01, 0xC3, 0xC0, 0xD0, 0x90, 0x82,
+ 0x12, 0xED, 0xF0, 0x90, 0x82, 0x11, 0xEF, 0xF0,
+ 0xD3, 0x94, 0x07, 0x50, 0x70, 0xE0, 0xFF, 0x74,
+ 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33,
+ 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x47, 0xE0,
+ 0x5F, 0xFD, 0x7F, 0x47, 0x12, 0x32, 0x1E, 0x90,
+ 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8, 0x07,
+ 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC, 0xFF,
+ 0x90, 0x00, 0x46, 0xE0, 0x4F, 0xFD, 0x7F, 0x46,
+ 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0, 0x60,
+ 0x18, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x4F, 0x80,
+ 0x17, 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x45, 0xE0, 0x5F,
+ 0xFD, 0x7F, 0x45, 0x80, 0x7E, 0x90, 0x82, 0x11,
+ 0xE0, 0x24, 0xF8, 0xF0, 0xE0, 0x24, 0x04, 0xFF,
+ 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3,
+ 0x33, 0xD8, 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x43,
+ 0xE0, 0x5F, 0xFD, 0x7F, 0x43, 0x12, 0x32, 0x1E,
+ 0x90, 0x82, 0x11, 0xE0, 0xFF, 0x74, 0x01, 0xA8,
+ 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8, 0xFC,
+ 0xFF, 0x90, 0x00, 0x43, 0xE0, 0x4F, 0xFD, 0x7F,
+ 0x43, 0x12, 0x32, 0x1E, 0x90, 0x82, 0x12, 0xE0,
+ 0x60, 0x1D, 0x90, 0x82, 0x11, 0xE0, 0x24, 0x04,
+ 0xFF, 0x74, 0x01, 0xA8, 0x07, 0x08, 0x80, 0x02,
+ 0xC3, 0x33, 0xD8, 0xFC, 0xFF, 0x90, 0x00, 0x42,
+ 0xE0, 0x4F, 0xFD, 0x7F, 0x42, 0x80, 0x1C, 0x90,
+ 0x82, 0x11, 0xE0, 0x24, 0x04, 0xFF, 0x74, 0x01,
+ 0xA8, 0x07, 0x08, 0x80, 0x02, 0xC3, 0x33, 0xD8,
+ 0xFC, 0xF4, 0xFF, 0x90, 0x00, 0x42, 0xE0, 0x5F,
+ 0xFD, 0x7F, 0x42, 0x12, 0x32, 0x1E, 0xD0, 0xD0,
+ 0x92, 0xAF, 0x22, 0x90, 0x81, 0x24, 0xE0, 0x54,
+ 0xFB, 0xF0, 0xE4, 0x90, 0x81, 0x30, 0xF0, 0x90,
+ 0x81, 0x2B, 0xF0, 0x22, 0xEF, 0x24, 0xFE, 0x60,
+ 0x0C, 0x04, 0x70, 0x28, 0x90, 0x81, 0x2D, 0x74,
+ 0x01, 0xF0, 0xA3, 0xF0, 0x22, 0xED, 0x70, 0x0A,
+ 0x90, 0x81, 0x3B, 0xE0, 0x90, 0x81, 0x2D, 0xF0,
+ 0x80, 0x05, 0x90, 0x81, 0x2D, 0xED, 0xF0, 0x90,
+ 0x81, 0x2D, 0xE0, 0xA3, 0xF0, 0x90, 0x81, 0x25,
+ 0xE0, 0x44, 0x08, 0xF0, 0x22, 0x12, 0x4E, 0xAB,
+ 0xEF, 0x64, 0x01, 0x60, 0x08, 0x90, 0x01, 0xB8,
+ 0x74, 0x01, 0xF0, 0x80, 0x67, 0x90, 0x81, 0x2B,
+ 0xE0, 0xFF, 0x54, 0x03, 0x60, 0x08, 0x90, 0x01,
+ 0xB8, 0x74, 0x02, 0xF0, 0x80, 0x56, 0x90, 0x81,
+ 0x29, 0xE0, 0xFE, 0xE4, 0xC3, 0x9E, 0x50, 0x08,
+ 0x90, 0x01, 0xB8, 0x74, 0x04, 0xF0, 0x80, 0x44,
+ 0xEF, 0x30, 0xE2, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x08, 0xF0, 0x80, 0x38, 0x90, 0x81, 0x2B, 0xE0,
+ 0x30, 0xE4, 0x08, 0x90, 0x01, 0xB8, 0x74, 0x10,
+ 0xF0, 0x80, 0x29, 0x90, 0x81, 0x25, 0xE0, 0x13,
+ 0x13, 0x54, 0x3F, 0x20, 0xE0, 0x08, 0x90, 0x01,
+ 0xB8, 0x74, 0x20, 0xF0, 0x80, 0x16, 0x90, 0x81,
+ 0x3E, 0xE0, 0x60, 0x08, 0x90, 0x01, 0xB8, 0x74,
+ 0x80, 0xF0, 0x80, 0x08, 0x90, 0x01, 0xB8, 0xE4,
+ 0xF0, 0x7F, 0x01, 0x22, 0x90, 0x01, 0xB9, 0x74,
+ 0x04, 0xF0, 0x7F, 0x00, 0x22, 0xEF, 0x60, 0x42,
+ 0x90, 0x80, 0xDE, 0xE0, 0x64, 0x01, 0x70, 0x3A,
+ 0x90, 0x81, 0x25, 0xE0, 0x54, 0xFE, 0xF0, 0x90,
+ 0x05, 0x22, 0x74, 0x0F, 0xF0, 0x90, 0x06, 0x04,
+ 0xE0, 0x54, 0xBF, 0xF0, 0xE4, 0xFF, 0x12, 0x4F,
+ 0x0D, 0xBF, 0x01, 0x12, 0x90, 0x81, 0x24, 0xE0,
+ 0x44, 0x40, 0xF0, 0x90, 0x81, 0x2A, 0x74, 0x06,
+ 0xF0, 0x90, 0x81, 0x23, 0xF0, 0x22, 0x90, 0x01,
+ 0xB9, 0x74, 0x01, 0xF0, 0x90, 0x01, 0xB8, 0x74,
+ 0x08, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x2A, 0x74, 0x02, 0xF0, 0x90, 0x81,
+ 0x23, 0xF0, 0x22, 0x12, 0x54, 0x65, 0x90, 0x81,
+ 0x2A, 0x74, 0x0C, 0xF0, 0x90, 0x81, 0x23, 0xF0,
+ 0x22, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13, 0x13,
+ 0x54, 0x3F, 0x30, 0xE0, 0x11, 0xEF, 0x54, 0xFB,
+ 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD, 0xF0,
+ 0x54, 0x07, 0x70, 0x42, 0x80, 0x3D, 0x90, 0x81,
+ 0x30, 0xE0, 0x04, 0xF0, 0x90, 0x81, 0x2B, 0xE0,
+ 0x54, 0xEF, 0xF0, 0x90, 0x81, 0x30, 0xE0, 0xFF,
+ 0xB4, 0x01, 0x02, 0x80, 0x04, 0xEF, 0xB4, 0x02,
+ 0x06, 0x90, 0x05, 0x58, 0xE0, 0x04, 0xF0, 0x90,
+ 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
+ 0xD3, 0x9F, 0x40, 0x0F, 0x90, 0x80, 0xDE, 0xE0,
+ 0xB4, 0x01, 0x0B, 0x90, 0x81, 0x25, 0xE0, 0x54,
+ 0xFB, 0xF0, 0x22, 0x12, 0x47, 0x2A, 0x22, 0x22,
+ 0x90, 0x05, 0x2B, 0xE0, 0x7F, 0x00, 0x30, 0xE7,
+ 0x02, 0x7F, 0x01, 0x22, 0x90, 0x05, 0x22, 0x74,
+ 0xFF, 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40,
+ 0xF0, 0x90, 0x81, 0x22, 0x74, 0x03, 0xF0, 0x22,
+ 0x90, 0x05, 0x27, 0xE0, 0x44, 0x40, 0xF0, 0x12,
+ 0x49, 0xDD, 0x90, 0x81, 0x22, 0x74, 0x02, 0xF0,
+ 0x22, 0x12, 0x49, 0xE3, 0x90, 0x81, 0x22, 0x74,
+ 0x02, 0xF0, 0x22, 0x90, 0x05, 0x22, 0x74, 0x6F,
+ 0xF0, 0x90, 0x05, 0x27, 0xE0, 0x54, 0xBF, 0xF0,
+ 0x90, 0x81, 0x22, 0x74, 0x04, 0xF0, 0x22, 0xAE,
+ 0x07, 0x12, 0x51, 0x73, 0xBF, 0x01, 0x12, 0x90,
+ 0x81, 0x23, 0xE0, 0x64, 0x02, 0x60, 0x0A, 0xAF,
+ 0x06, 0x7D, 0x01, 0x12, 0x47, 0x3D, 0x7F, 0x01,
+ 0x22, 0x7F, 0x00, 0x22, 0x90, 0x01, 0x57, 0xE0,
+ 0x60, 0x48, 0xE4, 0xF0, 0x90, 0x01, 0x3C, 0x74,
+ 0x02, 0xF0, 0x90, 0x81, 0x24, 0xE0, 0xFF, 0x13,
+ 0x13, 0x54, 0x3F, 0x30, 0xE0, 0x0C, 0xEF, 0x54,
+ 0xFB, 0xF0, 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xFD,
+ 0xF0, 0x22, 0x90, 0x81, 0x30, 0xE0, 0x04, 0xF0,
+ 0x90, 0x81, 0x2B, 0xE0, 0x54, 0xEF, 0xF0, 0x90,
+ 0x81, 0x38, 0xE0, 0xFF, 0x90, 0x81, 0x30, 0xE0,
+ 0xD3, 0x9F, 0x40, 0x0E, 0x90, 0x80, 0xDE, 0xE0,
+ 0xB4, 0x01, 0x07, 0x90, 0x81, 0x25, 0xE0, 0x54,
+ 0xFB, 0xF0, 0x22, 0x90, 0x80, 0x3F, 0xE0, 0xFF,
+ 0x7D, 0x01, 0x12, 0x6D, 0x69, 0x8E, 0x54, 0x8F,
+ 0x55, 0xAD, 0x55, 0xAC, 0x54, 0xAF, 0x53, 0x12,
+ 0x4F, 0x82, 0xAF, 0x55, 0xAE, 0x54, 0x90, 0x04,
+ 0x80, 0xE0, 0x54, 0x0F, 0xFD, 0xAC, 0x07, 0x74,
+ 0x11, 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5,
+ 0x83, 0xE0, 0x44, 0x01, 0xF0, 0x74, 0x11, 0x2C,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x54, 0xFB, 0xF0, 0xAC, 0x07, 0x74, 0x16, 0x2C,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0,
+ 0x44, 0xFA, 0xF0, 0x74, 0x15, 0x2C, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x1F,
+ 0xF0, 0xAC, 0x07, 0x74, 0x06, 0x2C, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xE0, 0x44, 0x0F,
+ 0xF0, 0x90, 0x04, 0x53, 0xE4, 0xF0, 0x90, 0x04,
+ 0x52, 0xF0, 0x90, 0x04, 0x51, 0x74, 0xFF, 0xF0,
+ 0x90, 0x04, 0x50, 0x74, 0xFD, 0xF0, 0x74, 0x14,
+ 0x2C, 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83,
+ 0xE0, 0x54, 0xC0, 0x4D, 0xFD, 0x74, 0x14, 0x2F,
+ 0xF5, 0x82, 0xE4, 0x34, 0xFC, 0xF5, 0x83, 0xED,
+ 0xF0, 0x22, 0xAB, 0x07, 0xAA, 0x06, 0xED, 0x2B,
+ 0xFB, 0xE4, 0x3A, 0xFA, 0xC3, 0x90, 0x80, 0xDB,
+ 0xE0, 0x9B, 0x90, 0x80, 0xDA, 0xE0, 0x9A, 0x50,
+ 0x13, 0xA3, 0xE0, 0x24, 0x01, 0xFF, 0x90, 0x80,
+ 0xDA, 0xE0, 0x34, 0x00, 0xFE, 0xC3, 0xEB, 0x9F,
+ 0xFB, 0xEA, 0x9E, 0xFA, 0xEA, 0x90, 0xFD, 0x11,
+ 0xF0, 0xAF, 0x03, 0x74, 0x00, 0x2F, 0xF5, 0x82,
+ 0xE4, 0x34, 0xFB, 0xF5, 0x83, 0xE0, 0xFF, 0x22,
+ 0x12, 0x1F, 0xA4, 0xFF, 0x54, 0x01, 0xFE, 0x90,
+ 0x81, 0x42, 0xE0, 0x54, 0xFE, 0x4E, 0xF0, 0xEF,
+ 0xC3, 0x13, 0x30, 0xE0, 0x0A, 0x90, 0x00, 0x01,
+ 0x12, 0x1F, 0xBD, 0x90, 0x81, 0x43, 0xF0, 0x22,
+ 0x90, 0x81, 0x45, 0xE0, 0x30, 0xE0, 0x2D, 0x90,
+ 0x81, 0x48, 0xE0, 0x04, 0xF0, 0xE0, 0xFF, 0x90,
+ 0x81, 0x46, 0xE0, 0xB5, 0x07, 0x1E, 0x90, 0x06,
+ 0x92, 0xE0, 0x54, 0x1C, 0x70, 0x0B, 0x12, 0x4F,
+ 0x0B, 0x90, 0x81, 0x49, 0xE0, 0x04, 0xF0, 0x80,
+ 0x06, 0x90, 0x06, 0x92, 0x74, 0x1C, 0xF0, 0xE4,
+ 0x90, 0x81, 0x48, 0xF0, 0x22, 0x00, 0xBB, 0x8E,
+};
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c b/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c
new file mode 100644
index 00000000000..fc23bf15934
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188EPwrSeq.c
@@ -0,0 +1,86 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "Hal8188EPwrSeq.h"
+#include <rtl8188e_hal.h>
+
+/*
+ drivers should parse below arrays and do the corresponding actions
+*/
+/* 3 Power on Array */
+struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3Radio off Array */
+struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_END
+};
+
+/* 3Card Disable Array */
+struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_CARDDIS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Card Enable Array */
+struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_CARDDIS_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3Suspend Array */
+struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_SUS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Resume Array */
+struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_SUS_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_ACT
+ RTL8188E_TRANS_END
+};
+
+/* 3HWPDN Array */
+struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS + RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ RTL8188E_TRANS_ACT_TO_CARDEMU
+ RTL8188E_TRANS_CARDEMU_TO_PDN
+ RTL8188E_TRANS_END
+};
+
+/* 3 Enter LPS */
+struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8188E_TRANS_ACT_TO_LPS
+ RTL8188E_TRANS_END
+};
+
+/* 3 Leave LPS */
+struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS + RTL8188E_TRANS_END_STEPS] = {
+ /* FW behavior */
+ RTL8188E_TRANS_LPS_TO_ACT
+ RTL8188E_TRANS_END
+};
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
new file mode 100644
index 00000000000..aaa261771ab
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -0,0 +1,760 @@
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ RateAdaptive.c
+
+Abstract:
+ Implement Rate Adaptive functions for common operations.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-08-12 Page Create.
+
+--*/
+#include "odm_precomp.h"
+
+/* Rate adaptive parameters */
+
+static u8 RETRY_PENALTY[PERENTRY][RETRYSIZE+1] = {
+ {5, 4, 3, 2, 0, 3}, /* 92 , idx = 0 */
+ {6, 5, 4, 3, 0, 4}, /* 86 , idx = 1 */
+ {6, 5, 4, 2, 0, 4}, /* 81 , idx = 2 */
+ {8, 7, 6, 4, 0, 6}, /* 75 , idx = 3 */
+ {10, 9, 8, 6, 0, 8}, /* 71 , idx = 4 */
+ {10, 9, 8, 4, 0, 8}, /* 66 , idx = 5 */
+ {10, 9, 8, 2, 0, 8}, /* 62 , idx = 6 */
+ {10, 9, 8, 0, 0, 8}, /* 59 , idx = 7 */
+ {18, 17, 16, 8, 0, 16}, /* 53 , idx = 8 */
+ {26, 25, 24, 16, 0, 24}, /* 50 , idx = 9 */
+ {34, 33, 32, 24, 0, 32}, /* 47 , idx = 0x0a */
+ {34, 31, 28, 20, 0, 32}, /* 43 , idx = 0x0b */
+ {34, 31, 27, 18, 0, 32}, /* 40 , idx = 0x0c */
+ {34, 31, 26, 16, 0, 32}, /* 37 , idx = 0x0d */
+ {34, 30, 22, 16, 0, 32}, /* 32 , idx = 0x0e */
+ {34, 30, 24, 16, 0, 32}, /* 26 , idx = 0x0f */
+ {49, 46, 40, 16, 0, 48}, /* 20 , idx = 0x10 */
+ {49, 45, 32, 0, 0, 48}, /* 17 , idx = 0x11 */
+ {49, 45, 22, 18, 0, 48}, /* 15 , idx = 0x12 */
+ {49, 40, 24, 16, 0, 48}, /* 12 , idx = 0x13 */
+ {49, 32, 18, 12, 0, 48}, /* 9 , idx = 0x14 */
+ {49, 22, 18, 14, 0, 48}, /* 6 , idx = 0x15 */
+ {49, 16, 16, 0, 0, 48}
+ }; /* 3, idx = 0x16 */
+
+static u8 PT_PENALTY[RETRYSIZE+1] = {34, 31, 30, 24, 0, 32};
+
+/* wilson modify */
+static u8 RETRY_PENALTY_IDX[2][RATESIZE] = {
+ {4, 4, 4, 5, 4, 4, 5, 7, 7, 7, 8, 0x0a, /* SS>TH */
+ 4, 4, 4, 4, 6, 0x0a, 0x0b, 0x0d,
+ 5, 5, 7, 7, 8, 0x0b, 0x0d, 0x0f}, /* 0329 R01 */
+ {0x0a, 0x0a, 0x0b, 0x0c, 0x0a,
+ 0x0a, 0x0b, 0x0c, 0x0d, 0x10, 0x13, 0x14, /* SS<TH */
+ 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x11, 0x13, 0x15,
+ 9, 9, 9, 9, 0x0c, 0x0e, 0x11, 0x13}
+ };
+
+static u8 RETRY_PENALTY_UP_IDX[RATESIZE] = {
+ 0x0c, 0x0d, 0x0d, 0x0f, 0x0d, 0x0e,
+ 0x0f, 0x0f, 0x10, 0x12, 0x13, 0x14, /* SS>TH */
+ 0x0f, 0x10, 0x10, 0x12, 0x12, 0x13, 0x14, 0x15,
+ 0x11, 0x11, 0x12, 0x13, 0x13, 0x13, 0x14, 0x15};
+
+static u8 RSSI_THRESHOLD[RATESIZE] = {
+ 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0x24, 0x26, 0x2a,
+ 0x18, 0x1a, 0x1d, 0x1f, 0x21, 0x27, 0x29, 0x2a,
+ 0, 0, 0, 0x1f, 0x23, 0x28, 0x2a, 0x2c};
+
+static u16 N_THRESHOLD_HIGH[RATESIZE] = {
+ 4, 4, 8, 16,
+ 24, 36, 48, 72, 96, 144, 192, 216,
+ 60, 80, 100, 160, 240, 400, 560, 640,
+ 300, 320, 480, 720, 1000, 1200, 1600, 2000};
+static u16 N_THRESHOLD_LOW[RATESIZE] = {
+ 2, 2, 4, 8,
+ 12, 18, 24, 36, 48, 72, 96, 108,
+ 30, 40, 50, 80, 120, 200, 280, 320,
+ 150, 160, 240, 360, 500, 600, 800, 1000};
+
+static u8 DROPING_NECESSARY[RATESIZE] = {
+ 1, 1, 1, 1,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 1, 2, 3, 4, 5, 6, 7, 8,
+ 5, 6, 7, 8, 9, 10, 11, 12};
+
+static u8 PendingForRateUpFail[5] = {2, 10, 24, 40, 60};
+static u16 DynamicTxRPTTiming[6] = {
+ 0x186a, 0x30d4, 0x493e, 0x61a8, 0x7a12 , 0x927c}; /* 200ms-1200ms */
+
+/* End Rate adaptive parameters */
+
+static void odm_SetTxRPTTiming_8188E(
+ struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo,
+ u8 extend
+ )
+{
+ u8 idx = 0;
+
+ for (idx = 0; idx < 5; idx++)
+ if (DynamicTxRPTTiming[idx] == pRaInfo->RptTime)
+ break;
+
+ if (extend == 0) { /* back to default timing */
+ idx = 0; /* 200ms */
+ } else if (extend == 1) {/* increase the timing */
+ idx += 1;
+ if (idx > 5)
+ idx = 5;
+ } else if (extend == 2) {/* decrease the timing */
+ if (idx != 0)
+ idx -= 1;
+ }
+ pRaInfo->RptTime = DynamicTxRPTTiming[idx];
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("pRaInfo->RptTime = 0x%x\n", pRaInfo->RptTime));
+}
+
+static int odm_RateDown_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
+{
+ u8 RateID, LowestRate, HighestRate;
+ u8 i;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateDown_8188E()\n"));
+ if (NULL == pRaInfo) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateDown_8188E(): pRaInfo is NULL\n"));
+ return -1;
+ }
+ RateID = pRaInfo->PreRate;
+ LowestRate = pRaInfo->LowestRate;
+ HighestRate = pRaInfo->HighestRate;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" RateID =%d LowestRate =%d HighestRate =%d RateSGI =%d\n",
+ RateID, LowestRate, HighestRate, pRaInfo->RateSGI));
+ if (RateID > HighestRate) {
+ RateID = HighestRate;
+ } else if (pRaInfo->RateSGI) {
+ pRaInfo->RateSGI = 0;
+ } else if (RateID > LowestRate) {
+ if (RateID > 0) {
+ for (i = RateID-1; i > LowestRate; i--) {
+ if (pRaInfo->RAUseRate & BIT(i)) {
+ RateID = i;
+ goto RateDownFinish;
+ }
+ }
+ }
+ } else if (RateID <= LowestRate) {
+ RateID = LowestRate;
+ }
+RateDownFinish:
+ if (pRaInfo->RAWaitingCounter == 1) {
+ pRaInfo->RAWaitingCounter += 1;
+ pRaInfo->RAPendingCounter += 1;
+ } else if (pRaInfo->RAWaitingCounter == 0) {
+ ;
+ } else {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ }
+
+ if (pRaInfo->RAPendingCounter >= 4)
+ pRaInfo->RAPendingCounter = 4;
+
+ pRaInfo->DecisionRate = RateID;
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 2);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate down, RPT Timing default\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("RAWaitingCounter %d, RAPendingCounter %d", pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate down to RateID %d RateSGI %d\n", RateID, pRaInfo->RateSGI));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateDown_8188E()\n"));
+ return 0;
+}
+
+static int odm_RateUp_8188E(
+ struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo
+ )
+{
+ u8 RateID, HighestRate;
+ u8 i;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateUp_8188E()\n"));
+ if (NULL == pRaInfo) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateUp_8188E(): pRaInfo is NULL\n"));
+ return -1;
+ }
+ RateID = pRaInfo->PreRate;
+ HighestRate = pRaInfo->HighestRate;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" RateID =%d HighestRate =%d\n",
+ RateID, HighestRate));
+ if (pRaInfo->RAWaitingCounter == 1) {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ } else if (pRaInfo->RAWaitingCounter > 1) {
+ pRaInfo->PreRssiStaRA = pRaInfo->RssiStaRA;
+ goto RateUpfinish;
+ }
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 0);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("odm_RateUp_8188E():Decrease RPT Timing\n"));
+
+ if (RateID < HighestRate) {
+ for (i = RateID+1; i <= HighestRate; i++) {
+ if (pRaInfo->RAUseRate & BIT(i)) {
+ RateID = i;
+ goto RateUpfinish;
+ }
+ }
+ } else if (RateID == HighestRate) {
+ if (pRaInfo->SGIEnable && (pRaInfo->RateSGI != 1))
+ pRaInfo->RateSGI = 1;
+ else if ((pRaInfo->SGIEnable) != 1)
+ pRaInfo->RateSGI = 0;
+ } else {
+ RateID = HighestRate;
+ }
+RateUpfinish:
+ if (pRaInfo->RAWaitingCounter == (4+PendingForRateUpFail[pRaInfo->RAPendingCounter]))
+ pRaInfo->RAWaitingCounter = 0;
+ else
+ pRaInfo->RAWaitingCounter++;
+
+ pRaInfo->DecisionRate = RateID;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("Rate up to RateID %d\n", RateID));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("RAWaitingCounter %d, RAPendingCounter %d", pRaInfo->RAWaitingCounter, pRaInfo->RAPendingCounter));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateUp_8188E()\n"));
+ return 0;
+}
+
+static void odm_ResetRaCounter_8188E(struct odm_ra_info *pRaInfo)
+{
+ u8 RateID;
+
+ RateID = pRaInfo->DecisionRate;
+ pRaInfo->NscUp = (N_THRESHOLD_HIGH[RateID]+N_THRESHOLD_LOW[RateID])>>1;
+ pRaInfo->NscDown = (N_THRESHOLD_HIGH[RateID]+N_THRESHOLD_LOW[RateID])>>1;
+}
+
+static void odm_RateDecision_8188E(struct odm_dm_struct *dm_odm,
+ struct odm_ra_info *pRaInfo
+ )
+{
+ u8 RateID = 0, RtyPtID = 0, PenaltyID1 = 0, PenaltyID2 = 0;
+ /* u32 pool_retry; */
+ static u8 DynamicTxRPTTimingCounter;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("=====>odm_RateDecision_8188E()\n"));
+
+ if (pRaInfo->Active && (pRaInfo->TOTAL > 0)) { /* STA used and data packet exits */
+ if ((pRaInfo->RssiStaRA < (pRaInfo->PreRssiStaRA - 3)) ||
+ (pRaInfo->RssiStaRA > (pRaInfo->PreRssiStaRA + 3))) {
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ }
+ /* Start RA decision */
+ if (pRaInfo->PreRate > pRaInfo->HighestRate)
+ RateID = pRaInfo->HighestRate;
+ else
+ RateID = pRaInfo->PreRate;
+ if (pRaInfo->RssiStaRA > RSSI_THRESHOLD[RateID])
+ RtyPtID = 0;
+ else
+ RtyPtID = 1;
+ PenaltyID1 = RETRY_PENALTY_IDX[RtyPtID][RateID]; /* TODO by page */
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscDown init is %d\n", pRaInfo->NscDown));
+ pRaInfo->NscDown += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID1][0];
+ pRaInfo->NscDown += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID1][1];
+ pRaInfo->NscDown += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID1][2];
+ pRaInfo->NscDown += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID1][3];
+ pRaInfo->NscDown += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID1][4];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscDown is %d, total*penalty[5] is %d\n",
+ pRaInfo->NscDown, (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5])));
+ if (pRaInfo->NscDown > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5]))
+ pRaInfo->NscDown -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID1][5];
+ else
+ pRaInfo->NscDown = 0;
+
+ /* rate up */
+ PenaltyID2 = RETRY_PENALTY_UP_IDX[RateID];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" NscUp init is %d\n", pRaInfo->NscUp));
+ pRaInfo->NscUp += pRaInfo->RTY[0] * RETRY_PENALTY[PenaltyID2][0];
+ pRaInfo->NscUp += pRaInfo->RTY[1] * RETRY_PENALTY[PenaltyID2][1];
+ pRaInfo->NscUp += pRaInfo->RTY[2] * RETRY_PENALTY[PenaltyID2][2];
+ pRaInfo->NscUp += pRaInfo->RTY[3] * RETRY_PENALTY[PenaltyID2][3];
+ pRaInfo->NscUp += pRaInfo->RTY[4] * RETRY_PENALTY[PenaltyID2][4];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("NscUp is %d, total*up[5] is %d\n",
+ pRaInfo->NscUp, (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5])));
+ if (pRaInfo->NscUp > (pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5]))
+ pRaInfo->NscUp -= pRaInfo->TOTAL * RETRY_PENALTY[PenaltyID2][5];
+ else
+ pRaInfo->NscUp = 0;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE|ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" RssiStaRa = %d RtyPtID =%d PenaltyID1 = 0x%x PenaltyID2 = 0x%x RateID =%d NscDown =%d NscUp =%d SGI =%d\n",
+ pRaInfo->RssiStaRA, RtyPtID, PenaltyID1, PenaltyID2, RateID, pRaInfo->NscDown, pRaInfo->NscUp, pRaInfo->RateSGI));
+ if ((pRaInfo->NscDown < N_THRESHOLD_LOW[RateID]) ||
+ (pRaInfo->DROP > DROPING_NECESSARY[RateID]))
+ odm_RateDown_8188E(dm_odm, pRaInfo);
+ else if (pRaInfo->NscUp > N_THRESHOLD_HIGH[RateID])
+ odm_RateUp_8188E(dm_odm, pRaInfo);
+
+ if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
+ pRaInfo->DecisionRate = pRaInfo->HighestRate;
+
+ if ((pRaInfo->DecisionRate) == (pRaInfo->PreRate))
+ DynamicTxRPTTimingCounter += 1;
+ else
+ DynamicTxRPTTimingCounter = 0;
+
+ if (DynamicTxRPTTimingCounter >= 4) {
+ odm_SetTxRPTTiming_8188E(dm_odm, pRaInfo, 1);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE,
+ ODM_DBG_LOUD, ("<===== Rate don't change 4 times, Extend RPT Timing\n"));
+ DynamicTxRPTTimingCounter = 0;
+ }
+
+ pRaInfo->PreRate = pRaInfo->DecisionRate; /* YJ, add, 120120 */
+
+ odm_ResetRaCounter_8188E(pRaInfo);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, ("<===== odm_RateDecision_8188E()\n"));
+}
+
+static int odm_ARFBRefresh_8188E(struct odm_dm_struct *dm_odm, struct odm_ra_info *pRaInfo)
+{ /* Wilson 2011/10/26 */
+ u32 MaskFromReg;
+ s8 i;
+
+ switch (pRaInfo->RateID) {
+ case RATR_INX_WIRELESS_NGB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff015;
+ break;
+ case RATR_INX_WIRELESS_NG:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff010;
+ break;
+ case RATR_INX_WIRELESS_NB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff005;
+ break;
+ case RATR_INX_WIRELESS_N:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0f8ff000;
+ break;
+ case RATR_INX_WIRELESS_GB:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x00000ff5;
+ break;
+ case RATR_INX_WIRELESS_G:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x00000ff0;
+ break;
+ case RATR_INX_WIRELESS_B:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&0x0000000d;
+ break;
+ case 12:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR0);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 13:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR1);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 14:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR2);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ case 15:
+ MaskFromReg = ODM_Read4Byte(dm_odm, REG_ARFR3);
+ pRaInfo->RAUseRate = (pRaInfo->RateMask)&MaskFromReg;
+ break;
+ default:
+ pRaInfo->RAUseRate = (pRaInfo->RateMask);
+ break;
+ }
+ /* Highest rate */
+ if (pRaInfo->RAUseRate) {
+ for (i = RATESIZE; i >= 0; i--) {
+ if ((pRaInfo->RAUseRate)&BIT(i)) {
+ pRaInfo->HighestRate = i;
+ break;
+ }
+ }
+ } else {
+ pRaInfo->HighestRate = 0;
+ }
+ /* Lowest rate */
+ if (pRaInfo->RAUseRate) {
+ for (i = 0; i < RATESIZE; i++) {
+ if ((pRaInfo->RAUseRate) & BIT(i)) {
+ pRaInfo->LowestRate = i;
+ break;
+ }
+ }
+ } else {
+ pRaInfo->LowestRate = 0;
+ }
+ if (pRaInfo->HighestRate > 0x13)
+ pRaInfo->PTModeSS = 3;
+ else if (pRaInfo->HighestRate > 0x0b)
+ pRaInfo->PTModeSS = 2;
+ else if (pRaInfo->HighestRate > 0x0b)
+ pRaInfo->PTModeSS = 1;
+ else
+ pRaInfo->PTModeSS = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_ARFBRefresh_8188E(): PTModeSS =%d\n", pRaInfo->PTModeSS));
+
+ if (pRaInfo->DecisionRate > pRaInfo->HighestRate)
+ pRaInfo->DecisionRate = pRaInfo->HighestRate;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_ARFBRefresh_8188E(): RateID =%d RateMask =%8.8x RAUseRate =%8.8x HighestRate =%d, DecisionRate =%d\n",
+ pRaInfo->RateID, pRaInfo->RateMask, pRaInfo->RAUseRate, pRaInfo->HighestRate, pRaInfo->DecisionRate));
+ return 0;
+}
+
+static void odm_PTTryState_8188E(struct odm_ra_info *pRaInfo)
+{
+ pRaInfo->PTTryState = 0;
+ switch (pRaInfo->PTModeSS) {
+ case 3:
+ if (pRaInfo->DecisionRate >= 0x19)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 2:
+ if (pRaInfo->DecisionRate >= 0x11)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 1:
+ if (pRaInfo->DecisionRate >= 0x0a)
+ pRaInfo->PTTryState = 1;
+ break;
+ case 0:
+ if (pRaInfo->DecisionRate >= 0x03)
+ pRaInfo->PTTryState = 1;
+ break;
+ default:
+ pRaInfo->PTTryState = 0;
+ break;
+ }
+
+ if (pRaInfo->RssiStaRA < 48) {
+ pRaInfo->PTStage = 0;
+ } else if (pRaInfo->PTTryState == 1) {
+ if ((pRaInfo->PTStopCount >= 10) ||
+ (pRaInfo->PTPreRssi > pRaInfo->RssiStaRA + 5) ||
+ (pRaInfo->PTPreRssi < pRaInfo->RssiStaRA - 5) ||
+ (pRaInfo->DecisionRate != pRaInfo->PTPreRate)) {
+ if (pRaInfo->PTStage == 0)
+ pRaInfo->PTStage = 1;
+ else if (pRaInfo->PTStage == 1)
+ pRaInfo->PTStage = 3;
+ else
+ pRaInfo->PTStage = 5;
+
+ pRaInfo->PTPreRssi = pRaInfo->RssiStaRA;
+ pRaInfo->PTStopCount = 0;
+ } else {
+ pRaInfo->RAstage = 0;
+ pRaInfo->PTStopCount++;
+ }
+ } else {
+ pRaInfo->PTStage = 0;
+ pRaInfo->RAstage = 0;
+ }
+ pRaInfo->PTPreRate = pRaInfo->DecisionRate;
+}
+
+static void odm_PTDecision_8188E(struct odm_ra_info *pRaInfo)
+{
+ u8 j;
+ u8 temp_stage;
+ u32 numsc;
+ u32 num_total;
+ u8 stage_id;
+
+ numsc = 0;
+ num_total = pRaInfo->TOTAL * PT_PENALTY[5];
+ for (j = 0; j <= 4; j++) {
+ numsc += pRaInfo->RTY[j] * PT_PENALTY[j];
+ if (numsc > num_total)
+ break;
+ }
+
+ j = j >> 1;
+ temp_stage = (pRaInfo->PTStage + 1) >> 1;
+ if (temp_stage > j)
+ stage_id = temp_stage-j;
+ else
+ stage_id = 0;
+
+ pRaInfo->PTSmoothFactor = (pRaInfo->PTSmoothFactor>>1) + (pRaInfo->PTSmoothFactor>>2) + stage_id*16+2;
+ if (pRaInfo->PTSmoothFactor > 192)
+ pRaInfo->PTSmoothFactor = 192;
+ stage_id = pRaInfo->PTSmoothFactor >> 6;
+ temp_stage = stage_id*2;
+ if (temp_stage != 0)
+ temp_stage -= 1;
+ if (pRaInfo->DROP > 3)
+ temp_stage = 0;
+ pRaInfo->PTStage = temp_stage;
+}
+
+static void
+odm_RATxRPTTimerSetting(
+ struct odm_dm_struct *dm_odm,
+ u16 minRptTime
+)
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, (" =====>odm_RATxRPTTimerSetting()\n"));
+
+ if (dm_odm->CurrminRptTime != minRptTime) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ (" CurrminRptTime = 0x%04x minRptTime = 0x%04x\n", dm_odm->CurrminRptTime, minRptTime));
+ rtw_rpt_timer_cfg_cmd(dm_odm->Adapter, minRptTime);
+ dm_odm->CurrminRptTime = minRptTime;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE, (" <===== odm_RATxRPTTimerSetting()\n"));
+}
+
+void
+ODM_RASupport_Init(
+ struct odm_dm_struct *dm_odm
+ )
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>ODM_RASupport_Init()\n"));
+
+ /* 2012/02/14 MH Be noticed, the init must be after IC type is recognized!!!!! */
+ if (dm_odm->SupportICType == ODM_RTL8188E)
+ dm_odm->RaSupport88E = true;
+}
+
+int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ struct odm_ra_info *pRaInfo = &dm_odm->RAInfo[macid];
+ u8 WirelessMode = 0xFF; /* invalid value */
+ u8 max_rate_idx = 0x13; /* MCS7 */
+ if (dm_odm->pWirelessMode != NULL)
+ WirelessMode = *(dm_odm->pWirelessMode);
+
+ if (WirelessMode != 0xFF) {
+ if (WirelessMode & ODM_WM_N24G)
+ max_rate_idx = 0x13;
+ else if (WirelessMode & ODM_WM_G)
+ max_rate_idx = 0x0b;
+ else if (WirelessMode & ODM_WM_B)
+ max_rate_idx = 0x03;
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("ODM_RAInfo_Init(): WirelessMode:0x%08x , max_raid_idx:0x%02x\n",
+ WirelessMode, max_rate_idx));
+
+ pRaInfo->DecisionRate = max_rate_idx;
+ pRaInfo->PreRate = max_rate_idx;
+ pRaInfo->HighestRate = max_rate_idx;
+ pRaInfo->LowestRate = 0;
+ pRaInfo->RateID = 0;
+ pRaInfo->RateMask = 0xffffffff;
+ pRaInfo->RssiStaRA = 0;
+ pRaInfo->PreRssiStaRA = 0;
+ pRaInfo->SGIEnable = 0;
+ pRaInfo->RAUseRate = 0xffffffff;
+ pRaInfo->NscDown = (N_THRESHOLD_HIGH[0x13]+N_THRESHOLD_LOW[0x13])/2;
+ pRaInfo->NscUp = (N_THRESHOLD_HIGH[0x13]+N_THRESHOLD_LOW[0x13])/2;
+ pRaInfo->RateSGI = 0;
+ pRaInfo->Active = 1; /* Active is not used at present. by page, 110819 */
+ pRaInfo->RptTime = 0x927c;
+ pRaInfo->DROP = 0;
+ pRaInfo->RTY[0] = 0;
+ pRaInfo->RTY[1] = 0;
+ pRaInfo->RTY[2] = 0;
+ pRaInfo->RTY[3] = 0;
+ pRaInfo->RTY[4] = 0;
+ pRaInfo->TOTAL = 0;
+ pRaInfo->RAWaitingCounter = 0;
+ pRaInfo->RAPendingCounter = 0;
+ pRaInfo->PTActive = 1; /* Active when this STA is use */
+ pRaInfo->PTTryState = 0;
+ pRaInfo->PTStage = 5; /* Need to fill into HW_PWR_STATUS */
+ pRaInfo->PTSmoothFactor = 192;
+ pRaInfo->PTStopCount = 0;
+ pRaInfo->PTPreRate = 0;
+ pRaInfo->PTPreRssi = 0;
+ pRaInfo->PTModeSS = 0;
+ pRaInfo->RAstage = 0;
+ return 0;
+}
+
+int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm)
+{
+ u8 macid = 0;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("=====>\n"));
+ dm_odm->CurrminRptTime = 0;
+
+ for (macid = 0; macid < ODM_ASSOCIATE_ENTRY_NUM; macid++)
+ ODM_RAInfo_Init(dm_odm, macid);
+
+ return 0;
+}
+
+u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("macid =%d SGI =%d\n", macid, dm_odm->RAInfo[macid].RateSGI));
+ return dm_odm->RAInfo[macid].RateSGI;
+}
+
+u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ u8 DecisionRate = 0;
+
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ DecisionRate = (dm_odm->RAInfo[macid].DecisionRate);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" macid =%d DecisionRate = 0x%x\n", macid, DecisionRate));
+ return DecisionRate;
+}
+
+u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 macid)
+{
+ u8 PTStage = 5;
+
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return 0;
+ PTStage = (dm_odm->RAInfo[macid].PTStage);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ ("macid =%d PTStage = 0x%x\n", macid, PTStage));
+ return PTStage;
+}
+
+void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 RateID, u32 RateMask, u8 SGIEnable)
+{
+ struct odm_ra_info *pRaInfo = NULL;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("macid =%d RateID = 0x%x RateMask = 0x%x SGIEnable =%d\n",
+ macid, RateID, RateMask, SGIEnable));
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return;
+
+ pRaInfo = &(dm_odm->RAInfo[macid]);
+ pRaInfo->RateID = RateID;
+ pRaInfo->RateMask = RateMask;
+ pRaInfo->SGIEnable = SGIEnable;
+ odm_ARFBRefresh_8188E(dm_odm, pRaInfo);
+}
+
+void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
+{
+ struct odm_ra_info *pRaInfo = NULL;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_TRACE,
+ (" macid =%d Rssi =%d\n", macid, Rssi));
+ if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ return;
+
+ pRaInfo = &(dm_odm->RAInfo[macid]);
+ pRaInfo->RssiStaRA = Rssi;
+}
+
+void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime)
+{
+ ODM_Write2Byte(dm_odm, REG_TX_RPT_TIME, minRptTime);
+}
+
+void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16 TxRPT_Len, u32 macid_entry0, u32 macid_entry1)
+{
+ struct odm_ra_info *pRAInfo = NULL;
+ u8 MacId = 0;
+ u8 *pBuffer = NULL;
+ u32 valid = 0, ItemNum = 0;
+ u16 minRptTime = 0x927c;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("=====>ODM_RA_TxRPT2Handle_8188E(): valid0 =%d valid1 =%d BufferLength =%d\n",
+ macid_entry0, macid_entry1, TxRPT_Len));
+
+ ItemNum = TxRPT_Len >> 3;
+ pBuffer = TxRPT_Buf;
+
+ do {
+ if (MacId >= ASSOCIATE_ENTRY_NUM)
+ valid = 0;
+ else if (MacId >= 32)
+ valid = (1 << (MacId - 32)) & macid_entry1;
+ else
+ valid = (1 << MacId) & macid_entry0;
+
+ pRAInfo = &(dm_odm->RAInfo[MacId]);
+ if (valid) {
+ pRAInfo->RTY[0] = (u16)GET_TX_REPORT_TYPE1_RERTY_0(pBuffer);
+ pRAInfo->RTY[1] = (u16)GET_TX_REPORT_TYPE1_RERTY_1(pBuffer);
+ pRAInfo->RTY[2] = (u16)GET_TX_REPORT_TYPE1_RERTY_2(pBuffer);
+ pRAInfo->RTY[3] = (u16)GET_TX_REPORT_TYPE1_RERTY_3(pBuffer);
+ pRAInfo->RTY[4] = (u16)GET_TX_REPORT_TYPE1_RERTY_4(pBuffer);
+ pRAInfo->DROP = (u16)GET_TX_REPORT_TYPE1_DROP_0(pBuffer);
+ pRAInfo->TOTAL = pRAInfo->RTY[0] + pRAInfo->RTY[1] +
+ pRAInfo->RTY[2] + pRAInfo->RTY[3] +
+ pRAInfo->RTY[4] + pRAInfo->DROP;
+ if (pRAInfo->TOTAL != 0) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD,
+ ("macid =%d Total =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d D0 =%d valid0 =%x valid1 =%x\n",
+ MacId, pRAInfo->TOTAL,
+ pRAInfo->RTY[0], pRAInfo->RTY[1],
+ pRAInfo->RTY[2], pRAInfo->RTY[3],
+ pRAInfo->RTY[4], pRAInfo->DROP,
+ macid_entry0 , macid_entry1));
+ if (pRAInfo->PTActive) {
+ if (pRAInfo->RAstage < 5)
+ odm_RateDecision_8188E(dm_odm, pRAInfo);
+ else if (pRAInfo->RAstage == 5) /* Power training try state */
+ odm_PTTryState_8188E(pRAInfo);
+ else /* RAstage == 6 */
+ odm_PTDecision_8188E(pRAInfo);
+
+ /* Stage_RA counter */
+ if (pRAInfo->RAstage <= 5)
+ pRAInfo->RAstage++;
+ else
+ pRAInfo->RAstage = 0;
+ } else {
+ odm_RateDecision_8188E(dm_odm, pRAInfo);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("macid =%d R0 =%d R1 =%d R2 =%d R3 =%d R4 =%d drop =%d valid0 =%x RateID =%d SGI =%d\n",
+ MacId,
+ pRAInfo->RTY[0],
+ pRAInfo->RTY[1],
+ pRAInfo->RTY[2],
+ pRAInfo->RTY[3],
+ pRAInfo->RTY[4],
+ pRAInfo->DROP,
+ macid_entry0,
+ pRAInfo->DecisionRate,
+ pRAInfo->RateSGI));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, (" TOTAL = 0!!!!\n"));
+ }
+ }
+
+ if (minRptTime > pRAInfo->RptTime)
+ minRptTime = pRAInfo->RptTime;
+
+ pBuffer += TX_RPT2_ITEM_SIZE;
+ MacId++;
+ } while (MacId < ItemNum);
+
+ odm_RATxRPTTimerSetting(dm_odm, minRptTime);
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_RATE_ADAPTIVE, ODM_DBG_LOUD, ("<===== ODM_RA_TxRPT2Handle_8188E()\n"));
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c
new file mode 100644
index 00000000000..787e8f1f97f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_BB.c
@@ -0,0 +1,721 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+#include <rtw_iol.h>
+
+#define read_next_pair(array, v1, v2, i) \
+ do { \
+ i += 2; \
+ v1 = array[i]; \
+ v2 = array[i+1]; \
+ } while (0)
+
+static bool CheckCondition(const u32 condition, const u32 hex)
+{
+ u32 _board = (hex & 0x000000FF);
+ u32 _interface = (hex & 0x0000FF00) >> 8;
+ u32 _platform = (hex & 0x00FF0000) >> 16;
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+static u32 array_agc_tab_1t_8188e[] = {
+ 0xC78, 0xFB000001,
+ 0xC78, 0xFB010001,
+ 0xC78, 0xFB020001,
+ 0xC78, 0xFB030001,
+ 0xC78, 0xFB040001,
+ 0xC78, 0xFB050001,
+ 0xC78, 0xFA060001,
+ 0xC78, 0xF9070001,
+ 0xC78, 0xF8080001,
+ 0xC78, 0xF7090001,
+ 0xC78, 0xF60A0001,
+ 0xC78, 0xF50B0001,
+ 0xC78, 0xF40C0001,
+ 0xC78, 0xF30D0001,
+ 0xC78, 0xF20E0001,
+ 0xC78, 0xF10F0001,
+ 0xC78, 0xF0100001,
+ 0xC78, 0xEF110001,
+ 0xC78, 0xEE120001,
+ 0xC78, 0xED130001,
+ 0xC78, 0xEC140001,
+ 0xC78, 0xEB150001,
+ 0xC78, 0xEA160001,
+ 0xC78, 0xE9170001,
+ 0xC78, 0xE8180001,
+ 0xC78, 0xE7190001,
+ 0xC78, 0xE61A0001,
+ 0xC78, 0xE51B0001,
+ 0xC78, 0xE41C0001,
+ 0xC78, 0xE31D0001,
+ 0xC78, 0xE21E0001,
+ 0xC78, 0xE11F0001,
+ 0xC78, 0x8A200001,
+ 0xC78, 0x89210001,
+ 0xC78, 0x88220001,
+ 0xC78, 0x87230001,
+ 0xC78, 0x86240001,
+ 0xC78, 0x85250001,
+ 0xC78, 0x84260001,
+ 0xC78, 0x83270001,
+ 0xC78, 0x82280001,
+ 0xC78, 0x6B290001,
+ 0xC78, 0x6A2A0001,
+ 0xC78, 0x692B0001,
+ 0xC78, 0x682C0001,
+ 0xC78, 0x672D0001,
+ 0xC78, 0x662E0001,
+ 0xC78, 0x652F0001,
+ 0xC78, 0x64300001,
+ 0xC78, 0x63310001,
+ 0xC78, 0x62320001,
+ 0xC78, 0x61330001,
+ 0xC78, 0x46340001,
+ 0xC78, 0x45350001,
+ 0xC78, 0x44360001,
+ 0xC78, 0x43370001,
+ 0xC78, 0x42380001,
+ 0xC78, 0x41390001,
+ 0xC78, 0x403A0001,
+ 0xC78, 0x403B0001,
+ 0xC78, 0x403C0001,
+ 0xC78, 0x403D0001,
+ 0xC78, 0x403E0001,
+ 0xC78, 0x403F0001,
+ 0xC78, 0xFB400001,
+ 0xC78, 0xFB410001,
+ 0xC78, 0xFB420001,
+ 0xC78, 0xFB430001,
+ 0xC78, 0xFB440001,
+ 0xC78, 0xFB450001,
+ 0xC78, 0xFB460001,
+ 0xC78, 0xFB470001,
+ 0xC78, 0xFB480001,
+ 0xC78, 0xFA490001,
+ 0xC78, 0xF94A0001,
+ 0xC78, 0xF84B0001,
+ 0xC78, 0xF74C0001,
+ 0xC78, 0xF64D0001,
+ 0xC78, 0xF54E0001,
+ 0xC78, 0xF44F0001,
+ 0xC78, 0xF3500001,
+ 0xC78, 0xF2510001,
+ 0xC78, 0xF1520001,
+ 0xC78, 0xF0530001,
+ 0xC78, 0xEF540001,
+ 0xC78, 0xEE550001,
+ 0xC78, 0xED560001,
+ 0xC78, 0xEC570001,
+ 0xC78, 0xEB580001,
+ 0xC78, 0xEA590001,
+ 0xC78, 0xE95A0001,
+ 0xC78, 0xE85B0001,
+ 0xC78, 0xE75C0001,
+ 0xC78, 0xE65D0001,
+ 0xC78, 0xE55E0001,
+ 0xC78, 0xE45F0001,
+ 0xC78, 0xE3600001,
+ 0xC78, 0xE2610001,
+ 0xC78, 0xC3620001,
+ 0xC78, 0xC2630001,
+ 0xC78, 0xC1640001,
+ 0xC78, 0x8B650001,
+ 0xC78, 0x8A660001,
+ 0xC78, 0x89670001,
+ 0xC78, 0x88680001,
+ 0xC78, 0x87690001,
+ 0xC78, 0x866A0001,
+ 0xC78, 0x856B0001,
+ 0xC78, 0x846C0001,
+ 0xC78, 0x676D0001,
+ 0xC78, 0x666E0001,
+ 0xC78, 0x656F0001,
+ 0xC78, 0x64700001,
+ 0xC78, 0x63710001,
+ 0xC78, 0x62720001,
+ 0xC78, 0x61730001,
+ 0xC78, 0x60740001,
+ 0xC78, 0x46750001,
+ 0xC78, 0x45760001,
+ 0xC78, 0x44770001,
+ 0xC78, 0x43780001,
+ 0xC78, 0x42790001,
+ 0xC78, 0x417A0001,
+ 0xC78, 0x407B0001,
+ 0xC78, 0x407C0001,
+ 0xC78, 0x407D0001,
+ 0xC78, 0x407E0001,
+ 0xC78, 0x407F0001,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_agc_tab_1t_8188e)/sizeof(u32);
+ u32 *array = array_agc_tab_1t_8188e;
+ bool biol = false;
+ struct adapter *adapter = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < arraylen; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ } else {
+ odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ continue;
+ } else {
+ /* This line is the start line of branch. */
+ if (!CheckCondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ } else {
+ odm_ConfigBB_AGC_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ read_next_pair(array, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
+ rst = HAL_STATUS_FAILURE;
+ }
+ }
+ return rst;
+}
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+static u32 array_phy_reg_1t_8188e[] = {
+ 0x800, 0x80040000,
+ 0x804, 0x00000003,
+ 0x808, 0x0000FC00,
+ 0x80C, 0x0000000A,
+ 0x810, 0x10001331,
+ 0x814, 0x020C3D10,
+ 0x818, 0x02200385,
+ 0x81C, 0x00000000,
+ 0x820, 0x01000100,
+ 0x824, 0x00390204,
+ 0x828, 0x00000000,
+ 0x82C, 0x00000000,
+ 0x830, 0x00000000,
+ 0x834, 0x00000000,
+ 0x838, 0x00000000,
+ 0x83C, 0x00000000,
+ 0x840, 0x00010000,
+ 0x844, 0x00000000,
+ 0x848, 0x00000000,
+ 0x84C, 0x00000000,
+ 0x850, 0x00000000,
+ 0x854, 0x00000000,
+ 0x858, 0x569A11A9,
+ 0x85C, 0x01000014,
+ 0x860, 0x66F60110,
+ 0x864, 0x061F0649,
+ 0x868, 0x00000000,
+ 0x86C, 0x27272700,
+ 0x870, 0x07000760,
+ 0x874, 0x25004000,
+ 0x878, 0x00000808,
+ 0x87C, 0x00000000,
+ 0x880, 0xB0000C1C,
+ 0x884, 0x00000001,
+ 0x888, 0x00000000,
+ 0x88C, 0xCCC000C0,
+ 0x890, 0x00000800,
+ 0x894, 0xFFFFFFFE,
+ 0x898, 0x40302010,
+ 0x89C, 0x00706050,
+ 0x900, 0x00000000,
+ 0x904, 0x00000023,
+ 0x908, 0x00000000,
+ 0x90C, 0x81121111,
+ 0x910, 0x00000002,
+ 0x914, 0x00000201,
+ 0xA00, 0x00D047C8,
+ 0xA04, 0x80FF000C,
+ 0xA08, 0x8C838300,
+ 0xA0C, 0x2E7F120F,
+ 0xA10, 0x9500BB78,
+ 0xA14, 0x1114D028,
+ 0xA18, 0x00881117,
+ 0xA1C, 0x89140F00,
+ 0xA20, 0x1A1B0000,
+ 0xA24, 0x090E1317,
+ 0xA28, 0x00000204,
+ 0xA2C, 0x00D30000,
+ 0xA70, 0x101FBF00,
+ 0xA74, 0x00000007,
+ 0xA78, 0x00000900,
+ 0xA7C, 0x225B0606,
+ 0xA80, 0x218075B1,
+ 0xB2C, 0x80000000,
+ 0xC00, 0x48071D40,
+ 0xC04, 0x03A05611,
+ 0xC08, 0x000000E4,
+ 0xC0C, 0x6C6C6C6C,
+ 0xC10, 0x08800000,
+ 0xC14, 0x40000100,
+ 0xC18, 0x08800000,
+ 0xC1C, 0x40000100,
+ 0xC20, 0x00000000,
+ 0xC24, 0x00000000,
+ 0xC28, 0x00000000,
+ 0xC2C, 0x00000000,
+ 0xC30, 0x69E9AC47,
+ 0xC34, 0x469652AF,
+ 0xC38, 0x49795994,
+ 0xC3C, 0x0A97971C,
+ 0xC40, 0x1F7C403F,
+ 0xC44, 0x000100B7,
+ 0xC48, 0xEC020107,
+ 0xC4C, 0x007F037F,
+ 0xC50, 0x69553420,
+ 0xC54, 0x43BC0094,
+ 0xC58, 0x00013169,
+ 0xC5C, 0x00250492,
+ 0xC60, 0x00000000,
+ 0xC64, 0x7112848B,
+ 0xC68, 0x47C00BFF,
+ 0xC6C, 0x00000036,
+ 0xC70, 0x2C7F000D,
+ 0xC74, 0x020610DB,
+ 0xC78, 0x0000001F,
+ 0xC7C, 0x00B91612,
+ 0xC80, 0x390000E4,
+ 0xC84, 0x20F60000,
+ 0xC88, 0x40000100,
+ 0xC8C, 0x20200000,
+ 0xC90, 0x00091521,
+ 0xC94, 0x00000000,
+ 0xC98, 0x00121820,
+ 0xC9C, 0x00007F7F,
+ 0xCA0, 0x00000000,
+ 0xCA4, 0x000300A0,
+ 0xCA8, 0x00000000,
+ 0xCAC, 0x00000000,
+ 0xCB0, 0x00000000,
+ 0xCB4, 0x00000000,
+ 0xCB8, 0x00000000,
+ 0xCBC, 0x28000000,
+ 0xCC0, 0x00000000,
+ 0xCC4, 0x00000000,
+ 0xCC8, 0x00000000,
+ 0xCCC, 0x00000000,
+ 0xCD0, 0x00000000,
+ 0xCD4, 0x00000000,
+ 0xCD8, 0x64B22427,
+ 0xCDC, 0x00766932,
+ 0xCE0, 0x00222222,
+ 0xCE4, 0x00000000,
+ 0xCE8, 0x37644302,
+ 0xCEC, 0x2F97D40C,
+ 0xD00, 0x00000740,
+ 0xD04, 0x00020401,
+ 0xD08, 0x0000907F,
+ 0xD0C, 0x20010201,
+ 0xD10, 0xA0633333,
+ 0xD14, 0x3333BC43,
+ 0xD18, 0x7A8F5B6F,
+ 0xD2C, 0xCC979975,
+ 0xD30, 0x00000000,
+ 0xD34, 0x80608000,
+ 0xD38, 0x00000000,
+ 0xD3C, 0x00127353,
+ 0xD40, 0x00000000,
+ 0xD44, 0x00000000,
+ 0xD48, 0x00000000,
+ 0xD4C, 0x00000000,
+ 0xD50, 0x6437140A,
+ 0xD54, 0x00000000,
+ 0xD58, 0x00000282,
+ 0xD5C, 0x30032064,
+ 0xD60, 0x4653DE68,
+ 0xD64, 0x04518A3C,
+ 0xD68, 0x00002101,
+ 0xD6C, 0x2A201C16,
+ 0xD70, 0x1812362E,
+ 0xD74, 0x322C2220,
+ 0xD78, 0x000E3C24,
+ 0xE00, 0x2D2D2D2D,
+ 0xE04, 0x2D2D2D2D,
+ 0xE08, 0x0390272D,
+ 0xE10, 0x2D2D2D2D,
+ 0xE14, 0x2D2D2D2D,
+ 0xE18, 0x2D2D2D2D,
+ 0xE1C, 0x2D2D2D2D,
+ 0xE28, 0x00000000,
+ 0xE30, 0x1000DC1F,
+ 0xE34, 0x10008C1F,
+ 0xE38, 0x02140102,
+ 0xE3C, 0x681604C2,
+ 0xE40, 0x01007C00,
+ 0xE44, 0x01004800,
+ 0xE48, 0xFB000000,
+ 0xE4C, 0x000028D1,
+ 0xE50, 0x1000DC1F,
+ 0xE54, 0x10008C1F,
+ 0xE58, 0x02140102,
+ 0xE5C, 0x28160D05,
+ 0xE60, 0x00000008,
+ 0xE68, 0x001B25A4,
+ 0xE6C, 0x00C00014,
+ 0xE70, 0x00C00014,
+ 0xE74, 0x01000014,
+ 0xE78, 0x01000014,
+ 0xE7C, 0x01000014,
+ 0xE80, 0x01000014,
+ 0xE84, 0x00C00014,
+ 0xE88, 0x01000014,
+ 0xE8C, 0x00C00014,
+ 0xED0, 0x00C00014,
+ 0xED4, 0x00C00014,
+ 0xED8, 0x00C00014,
+ 0xEDC, 0x00000014,
+ 0xEE0, 0x00000014,
+ 0xEEC, 0x01C00014,
+ 0xF14, 0x00000003,
+ 0xF4C, 0x00000000,
+ 0xF00, 0x00000300,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_phy_reg_1t_8188e)/sizeof(u32);
+ u32 *array = array_phy_reg_1t_8188e;
+ bool biol = false;
+ struct adapter *adapter = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < arraylen; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ if (v1 == 0xfe) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfd) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xfc) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ } else if (v1 == 0xfb) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfa) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xf9) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ } else {
+ if (v1 == 0xa24)
+ dm_odm->RFCalibrateInfo.RegA24 = v2;
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ }
+ } else {
+ odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!CheckCondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ read_next_pair(array, v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < arraylen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ if (v1 == 0xfe) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfd) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xfc) {
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ } else if (v1 == 0xfb) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ } else if (v1 == 0xfa) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ } else if (v1 == 0xf9) {
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ } else{
+ if (v1 == 0xa24)
+ dm_odm->RFCalibrateInfo.RegA24 = v2;
+
+ rtw_IOL_append_WD_cmd(pxmit_frame, (u16)v1, v2, bMaskDWord);
+ }
+ } else {
+ odm_ConfigBB_PHY_8188E(dm_odm, v1, bMaskDWord, v2);
+ }
+ read_next_pair(array, v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < arraylen - 2)
+ read_next_pair(array, v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ rst = HAL_STATUS_FAILURE;
+ pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ }
+ }
+ return rst;
+}
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+static u32 array_phy_reg_pg_8188e[] = {
+ 0xE00, 0xFFFFFFFF, 0x06070809,
+ 0xE04, 0xFFFFFFFF, 0x02020405,
+ 0xE08, 0x0000FF00, 0x00000006,
+ 0x86C, 0xFFFFFF00, 0x00020400,
+ 0xE10, 0xFFFFFFFF, 0x08090A0B,
+ 0xE14, 0xFFFFFFFF, 0x01030607,
+ 0xE18, 0xFFFFFFFF, 0x08090A0B,
+ 0xE1C, 0xFFFFFFFF, 0x01030607,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x02020202,
+ 0xE04, 0xFFFFFFFF, 0x00020202,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x04040404,
+ 0xE14, 0xFFFFFFFF, 0x00020404,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+ 0xE00, 0xFFFFFFFF, 0x00000000,
+ 0xE04, 0xFFFFFFFF, 0x00000000,
+ 0xE08, 0x0000FF00, 0x00000000,
+ 0x86C, 0xFFFFFF00, 0x00000000,
+ 0xE10, 0xFFFFFFFF, 0x00000000,
+ 0xE14, 0xFFFFFFFF, 0x00000000,
+ 0xE18, 0xFFFFFFFF, 0x00000000,
+ 0xE1C, 0xFFFFFFFF, 0x00000000,
+
+};
+
+void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
+{
+ u32 hex;
+ u32 i = 0;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interfaceValue = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 arraylen = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
+ u32 *array = array_phy_reg_pg_8188e;
+
+ hex = board + (interfaceValue << 8);
+ hex += (platform << 16) + 0xFF000000;
+
+ for (i = 0; i < arraylen; i += 3) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+ u32 v3 = array[i+2];
+
+ /* this line is a line of pure_body */
+ if (v1 < 0xCDCDCDCD) {
+ odm_ConfigBB_PHY_REG_PG_8188E(dm_odm, v1, v2, v3);
+ continue;
+ } else { /* this line is the start of branch */
+ if (!CheckCondition(array[i], hex)) {
+ /* don't need the hw_body */
+ i += 2; /* skip the pair of expression */
+ v1 = array[i];
+ v2 = array[i+1];
+ v3 = array[i+2];
+ while (v2 != 0xDEAD) {
+ i += 3;
+ v1 = array[i];
+ v2 = array[i+1];
+ v3 = array[i+1];
+ }
+ }
+ }
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c
new file mode 100644
index 00000000000..b49b5ab48b1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_MAC.c
@@ -0,0 +1,231 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+#include <rtw_iol.h>
+
+static bool Checkcondition(const u32 condition, const u32 hex)
+{
+ u32 _board = (hex & 0x000000FF);
+ u32 _interface = (hex & 0x0000FF00) >> 8;
+ u32 _platform = (hex & 0x00FF0000) >> 16;
+ u32 cond = condition;
+
+ if (condition == 0xCDCDCDCD)
+ return true;
+
+ cond = condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+static u32 array_MAC_REG_8188E[] = {
+ 0x026, 0x00000041,
+ 0x027, 0x00000035,
+ 0x428, 0x0000000A,
+ 0x429, 0x00000010,
+ 0x430, 0x00000000,
+ 0x431, 0x00000001,
+ 0x432, 0x00000002,
+ 0x433, 0x00000004,
+ 0x434, 0x00000005,
+ 0x435, 0x00000006,
+ 0x436, 0x00000007,
+ 0x437, 0x00000008,
+ 0x438, 0x00000000,
+ 0x439, 0x00000000,
+ 0x43A, 0x00000001,
+ 0x43B, 0x00000002,
+ 0x43C, 0x00000004,
+ 0x43D, 0x00000005,
+ 0x43E, 0x00000006,
+ 0x43F, 0x00000007,
+ 0x440, 0x0000005D,
+ 0x441, 0x00000001,
+ 0x442, 0x00000000,
+ 0x444, 0x00000015,
+ 0x445, 0x000000F0,
+ 0x446, 0x0000000F,
+ 0x447, 0x00000000,
+ 0x458, 0x00000041,
+ 0x459, 0x000000A8,
+ 0x45A, 0x00000072,
+ 0x45B, 0x000000B9,
+ 0x460, 0x00000066,
+ 0x461, 0x00000066,
+ 0x480, 0x00000008,
+ 0x4C8, 0x000000FF,
+ 0x4C9, 0x00000008,
+ 0x4CC, 0x000000FF,
+ 0x4CD, 0x000000FF,
+ 0x4CE, 0x00000001,
+ 0x4D3, 0x00000001,
+ 0x500, 0x00000026,
+ 0x501, 0x000000A2,
+ 0x502, 0x0000002F,
+ 0x503, 0x00000000,
+ 0x504, 0x00000028,
+ 0x505, 0x000000A3,
+ 0x506, 0x0000005E,
+ 0x507, 0x00000000,
+ 0x508, 0x0000002B,
+ 0x509, 0x000000A4,
+ 0x50A, 0x0000005E,
+ 0x50B, 0x00000000,
+ 0x50C, 0x0000004F,
+ 0x50D, 0x000000A4,
+ 0x50E, 0x00000000,
+ 0x50F, 0x00000000,
+ 0x512, 0x0000001C,
+ 0x514, 0x0000000A,
+ 0x516, 0x0000000A,
+ 0x525, 0x0000004F,
+ 0x550, 0x00000010,
+ 0x551, 0x00000010,
+ 0x559, 0x00000002,
+ 0x55D, 0x000000FF,
+ 0x605, 0x00000030,
+ 0x608, 0x0000000E,
+ 0x609, 0x0000002A,
+ 0x620, 0x000000FF,
+ 0x621, 0x000000FF,
+ 0x622, 0x000000FF,
+ 0x623, 0x000000FF,
+ 0x624, 0x000000FF,
+ 0x625, 0x000000FF,
+ 0x626, 0x000000FF,
+ 0x627, 0x000000FF,
+ 0x652, 0x00000020,
+ 0x63C, 0x0000000A,
+ 0x63D, 0x0000000A,
+ 0x63E, 0x0000000E,
+ 0x63F, 0x0000000E,
+ 0x640, 0x00000040,
+ 0x66E, 0x00000005,
+ 0x700, 0x00000021,
+ 0x701, 0x00000043,
+ 0x702, 0x00000065,
+ 0x703, 0x00000087,
+ 0x708, 0x00000021,
+ 0x709, 0x00000043,
+ 0x70A, 0x00000065,
+ 0x70B, 0x00000087,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) do { i += 2; v1 = array[i]; v2 = array[i+1]; } while (0)
+
+ u32 hex = 0;
+ u32 i;
+ u8 platform = dm_odm->SupportPlatform;
+ u8 interface_val = dm_odm->SupportInterface;
+ u8 board = dm_odm->BoardType;
+ u32 array_len = sizeof(array_MAC_REG_8188E)/sizeof(u32);
+ u32 *array = array_MAC_REG_8188E;
+ bool biol = false;
+
+ struct adapter *adapt = dm_odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+ hex += board;
+ hex += interface_val << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+
+ biol = rtw_IOL_applied(adapt);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(adapt);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < array_len; i += 2) {
+ u32 v1 = array[i];
+ u32 v2 = array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
+ } else {
+ odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!Checkcondition(array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < array_len - 2) {
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < array_len - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+ rtw_IOL_append_WB_cmd(pxmit_frame, (u16)v1, (u8)v2, 0xFF);
+ } else {
+ odm_ConfigMAC_8188E(dm_odm, v1, (u8)v2);
+ }
+
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ while (v2 != 0xDEAD && i < array_len - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
+ rst = HAL_STATUS_FAILURE;
+ }
+ }
+ return rst;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
new file mode 100644
index 00000000000..480c810c446
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalHWImg8188E_RF.c
@@ -0,0 +1,269 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#include "odm_precomp.h"
+
+#include <rtw_iol.h>
+
+static bool CheckCondition(const u32 Condition, const u32 Hex)
+{
+ u32 _board = (Hex & 0x000000FF);
+ u32 _interface = (Hex & 0x0000FF00) >> 8;
+ u32 _platform = (Hex & 0x00FF0000) >> 16;
+ u32 cond = Condition;
+
+ if (Condition == 0xCDCDCDCD)
+ return true;
+
+ cond = Condition & 0x000000FF;
+ if ((_board == cond) && cond != 0x00)
+ return false;
+
+ cond = Condition & 0x0000FF00;
+ cond = cond >> 8;
+ if ((_interface & cond) == 0 && cond != 0x07)
+ return false;
+
+ cond = Condition & 0x00FF0000;
+ cond = cond >> 16;
+ if ((_platform & cond) == 0 && cond != 0x0F)
+ return false;
+ return true;
+}
+
+
+/******************************************************************************
+* RadioA_1T.TXT
+******************************************************************************/
+
+static u32 Array_RadioA_1T_8188E[] = {
+ 0x000, 0x00030000,
+ 0x008, 0x00084000,
+ 0x018, 0x00000407,
+ 0x019, 0x00000012,
+ 0x01E, 0x00080009,
+ 0x01F, 0x00000880,
+ 0x02F, 0x0001A060,
+ 0x03F, 0x00000000,
+ 0x042, 0x000060C0,
+ 0x057, 0x000D0000,
+ 0x058, 0x000BE180,
+ 0x067, 0x00001552,
+ 0x083, 0x00000000,
+ 0x0B0, 0x000FF8FC,
+ 0x0B1, 0x00054400,
+ 0x0B2, 0x000CCC19,
+ 0x0B4, 0x00043003,
+ 0x0B6, 0x0004953E,
+ 0x0B7, 0x0001C718,
+ 0x0B8, 0x000060FF,
+ 0x0B9, 0x00080001,
+ 0x0BA, 0x00040000,
+ 0x0BB, 0x00000400,
+ 0x0BF, 0x000C0000,
+ 0x0C2, 0x00002400,
+ 0x0C3, 0x00000009,
+ 0x0C4, 0x00040C91,
+ 0x0C5, 0x00099999,
+ 0x0C6, 0x000000A3,
+ 0x0C7, 0x00088820,
+ 0x0C8, 0x00076C06,
+ 0x0C9, 0x00000000,
+ 0x0CA, 0x00080000,
+ 0x0DF, 0x00000180,
+ 0x0EF, 0x000001A0,
+ 0x051, 0x0006B27D,
+ 0xFF0F041F, 0xABCD,
+ 0x052, 0x0007E4DD,
+ 0xCDCDCDCD, 0xCDCD,
+ 0x052, 0x0007E49D,
+ 0xFF0F041F, 0xDEAD,
+ 0x053, 0x00000073,
+ 0x056, 0x00051FF3,
+ 0x035, 0x00000086,
+ 0x035, 0x00000186,
+ 0x035, 0x00000286,
+ 0x036, 0x00001C25,
+ 0x036, 0x00009C25,
+ 0x036, 0x00011C25,
+ 0x036, 0x00019C25,
+ 0x0B6, 0x00048538,
+ 0x018, 0x00000C07,
+ 0x05A, 0x0004BD00,
+ 0x019, 0x000739D0,
+ 0x034, 0x0000ADF3,
+ 0x034, 0x00009DF0,
+ 0x034, 0x00008DED,
+ 0x034, 0x00007DEA,
+ 0x034, 0x00006DE7,
+ 0x034, 0x000054EE,
+ 0x034, 0x000044EB,
+ 0x034, 0x000034E8,
+ 0x034, 0x0000246B,
+ 0x034, 0x00001468,
+ 0x034, 0x0000006D,
+ 0x000, 0x00030159,
+ 0x084, 0x00068200,
+ 0x086, 0x000000CE,
+ 0x087, 0x00048A00,
+ 0x08E, 0x00065540,
+ 0x08F, 0x00088000,
+ 0x0EF, 0x000020A0,
+ 0x03B, 0x000F02B0,
+ 0x03B, 0x000EF7B0,
+ 0x03B, 0x000D4FB0,
+ 0x03B, 0x000CF060,
+ 0x03B, 0x000B0090,
+ 0x03B, 0x000A0080,
+ 0x03B, 0x00090080,
+ 0x03B, 0x0008F780,
+ 0x03B, 0x000722B0,
+ 0x03B, 0x0006F7B0,
+ 0x03B, 0x00054FB0,
+ 0x03B, 0x0004F060,
+ 0x03B, 0x00030090,
+ 0x03B, 0x00020080,
+ 0x03B, 0x00010080,
+ 0x03B, 0x0000F780,
+ 0x0EF, 0x000000A0,
+ 0x000, 0x00010159,
+ 0x018, 0x0000F407,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x01F, 0x00080003,
+ 0xFFE, 0x00000000,
+ 0xFFE, 0x00000000,
+ 0x01E, 0x00000001,
+ 0x01F, 0x00080000,
+ 0x000, 0x00033E60,
+};
+
+enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
+{
+ #define READ_NEXT_PAIR(v1, v2, i) do \
+ { i += 2; v1 = Array[i]; \
+ v2 = Array[i+1]; } while (0)
+
+ u32 hex = 0;
+ u32 i = 0;
+ u8 platform = pDM_Odm->SupportPlatform;
+ u8 interfaceValue = pDM_Odm->SupportInterface;
+ u8 board = pDM_Odm->BoardType;
+ u32 ArrayLen = sizeof(Array_RadioA_1T_8188E)/sizeof(u32);
+ u32 *Array = Array_RadioA_1T_8188E;
+ bool biol = false;
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct xmit_frame *pxmit_frame = NULL;
+ u8 bndy_cnt = 1;
+ enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
+
+ hex += board;
+ hex += interfaceValue << 8;
+ hex += platform << 16;
+ hex += 0xFF000000;
+ biol = rtw_IOL_applied(Adapter);
+
+ if (biol) {
+ pxmit_frame = rtw_IOL_accquire_xmit_frame(Adapter);
+ if (pxmit_frame == NULL) {
+ pr_info("rtw_IOL_accquire_xmit_frame failed\n");
+ return HAL_STATUS_FAILURE;
+ }
+ }
+
+ for (i = 0; i < ArrayLen; i += 2) {
+ u32 v1 = Array[i];
+ u32 v2 = Array[i+1];
+
+ /* This (offset, data) pair meets the condition. */
+ if (v1 < 0xCDCDCDCD) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+
+ if (v1 == 0xffe)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfd)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ else if (v1 == 0xfc)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ else if (v1 == 0xfb)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfa)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ else if (v1 == 0xf9)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ else
+ rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ } else {
+ odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
+ }
+ continue;
+ } else { /* This line is the start line of branch. */
+ if (!CheckCondition(Array[i], hex)) {
+ /* Discard the following (offset, data) pairs. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ i -= 2; /* prevent from for-loop += 2 */
+ } else { /* Configure matched pairs and skip to end of if-else. */
+ READ_NEXT_PAIR(v1, v2, i);
+ while (v2 != 0xDEAD &&
+ v2 != 0xCDEF &&
+ v2 != 0xCDCD && i < ArrayLen - 2) {
+ if (biol) {
+ if (rtw_IOL_cmd_boundary_handle(pxmit_frame))
+ bndy_cnt++;
+
+ if (v1 == 0xffe)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfd)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 5);
+ else if (v1 == 0xfc)
+ rtw_IOL_append_DELAY_MS_cmd(pxmit_frame, 1);
+ else if (v1 == 0xfb)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 50);
+ else if (v1 == 0xfa)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 5);
+ else if (v1 == 0xf9)
+ rtw_IOL_append_DELAY_US_cmd(pxmit_frame, 1);
+ else
+ rtw_IOL_append_WRF_cmd(pxmit_frame, ODM_RF_PATH_A, (u16)v1, v2, bRFRegOffsetMask);
+ } else {
+ odm_ConfigRF_RadioA_8188E(pDM_Odm, v1, v2);
+ }
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+
+ while (v2 != 0xDEAD && i < ArrayLen - 2)
+ READ_NEXT_PAIR(v1, v2, i);
+ }
+ }
+ }
+ if (biol) {
+ if (!rtw_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ rst = HAL_STATUS_FAILURE;
+ pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
+ }
+ }
+ return rst;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf.c b/drivers/staging/rtl8188eu/hal/HalPhyRf.c
new file mode 100644
index 00000000000..980f7da8ab3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf.c
@@ -0,0 +1,49 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+ #include "odm_precomp.h"
+
+/* 3============================================================ */
+/* 3 IQ Calibration */
+/* 3============================================================ */
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl)
+{
+ u8 channel_all[ODM_TARGET_CHNL_NUM_2G_5G] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64,
+ 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122,
+ 124, 126, 128, 130, 132, 134, 136, 138, 140, 149, 151, 153,
+ 155, 157, 159, 161, 163, 165
+ };
+ u8 place = chnl;
+
+ if (chnl > 14) {
+ for (place = 14; place < sizeof(channel_all); place++) {
+ if (channel_all[place] == chnl)
+ return place-13;
+ }
+ }
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
new file mode 100644
index 00000000000..e4f20da91b4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPhyRf_8188e.c
@@ -0,0 +1,1928 @@
+
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+/*---------------------------Define Local Constant---------------------------*/
+/* 2010/04/25 MH Define the max tx power tracking tx agc power. */
+#define ODM_TXPWRTRACK_MAX_IDX_88E 6
+
+/*---------------------------Define Local Constant---------------------------*/
+
+/* 3============================================================ */
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+/*-----------------------------------------------------------------------------
+ * Function: ODM_TxPwrTrackAdjust88E()
+ *
+ * Overview: 88E we can not write 0xc80/c94/c4c/ 0xa2x. Instead of write TX agc.
+ * No matter OFDM & CCK use the same method.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ * 04/23/2012 MHC Adjust TX agc directly not throughput BB digital.
+ *
+ *---------------------------------------------------------------------------*/
+void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *dm_odm, u8 Type,/* 0 = OFDM, 1 = CCK */
+ u8 *pDirection, /* 1 = +(increase) 2 = -(decrease) */
+ u32 *pOutWriteVal /* Tx tracking CCK/OFDM BB swing index adjust */
+ )
+{
+ u8 pwr_value = 0;
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ if (Type == 0) { /* For OFDM afjust */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n",
+ dm_odm->BbSwingIdxOfdm, dm_odm->BbSwingFlagOfdm));
+
+ if (dm_odm->BbSwingIdxOfdm <= dm_odm->BbSwingIdxOfdmBase) {
+ *pDirection = 1;
+ pwr_value = (dm_odm->BbSwingIdxOfdmBase - dm_odm->BbSwingIdxOfdm);
+ } else {
+ *pDirection = 2;
+ pwr_value = (dm_odm->BbSwingIdxOfdm - dm_odm->BbSwingIdxOfdmBase);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("BbSwingIdxOfdm = %d BbSwingFlagOfdm=%d\n",
+ dm_odm->BbSwingIdxOfdm, dm_odm->BbSwingFlagOfdm));
+ } else if (Type == 1) { /* For CCK adjust. */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD,
+ ("dm_odm->BbSwingIdxCck = %d dm_odm->BbSwingIdxCckBase = %d\n",
+ dm_odm->BbSwingIdxCck, dm_odm->BbSwingIdxCckBase));
+
+ if (dm_odm->BbSwingIdxCck <= dm_odm->BbSwingIdxCckBase) {
+ *pDirection = 1;
+ pwr_value = (dm_odm->BbSwingIdxCckBase - dm_odm->BbSwingIdxCck);
+ } else {
+ *pDirection = 2;
+ pwr_value = (dm_odm->BbSwingIdxCck - dm_odm->BbSwingIdxCckBase);
+ }
+ }
+
+ /* */
+ /* 2012/04/25 MH According to Ed/Luke.Lees estimate for EVM the max tx power tracking */
+ /* need to be less than 6 power index for 88E. */
+ /* */
+ if (pwr_value >= ODM_TXPWRTRACK_MAX_IDX_88E && *pDirection == 1)
+ pwr_value = ODM_TXPWRTRACK_MAX_IDX_88E;
+
+ *pOutWriteVal = pwr_value | (pwr_value<<8) | (pwr_value<<16) | (pwr_value<<24);
+} /* ODM_TxPwrTrackAdjust88E */
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_TxPwrTrackSetPwr88E()
+ *
+ * Overview: 88E change all channel tx power accordign to flag.
+ * OFDM & CCK are all different.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 04/23/2012 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+static void odm_TxPwrTrackSetPwr88E(struct odm_dm_struct *dm_odm)
+{
+ if (dm_odm->BbSwingFlagOfdm || dm_odm->BbSwingFlagCck) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_TX_PWR_TRACK, ODM_DBG_LOUD, ("odm_TxPwrTrackSetPwr88E CH=%d\n", *(dm_odm->pChannel)));
+ PHY_SetTxPowerLevel8188E(dm_odm->Adapter, *(dm_odm->pChannel));
+ dm_odm->BbSwingFlagOfdm = false;
+ dm_odm->BbSwingFlagCck = false;
+ }
+} /* odm_TxPwrTrackSetPwr88E */
+
+/* 091212 chiyokolin */
+void
+odm_TXPowerTrackingCallback_ThermalMeter_8188E(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 ThermalValue = 0, delta, delta_LCK, delta_IQK, offset;
+ u8 ThermalValue_AVG_count = 0;
+ u32 ThermalValue_AVG = 0;
+ s32 ele_A = 0, ele_D, TempCCk, X, value32;
+ s32 Y, ele_C = 0;
+ s8 OFDM_index[2], CCK_index = 0;
+ s8 OFDM_index_old[2] = {0, 0}, CCK_index_old = 0;
+ u32 i = 0, j = 0;
+ bool is2t = false;
+
+ u8 OFDM_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB, which is required by Arthur */
+ u8 Indexforchannel = 0/*GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/;
+ s8 OFDM_index_mapping[2][index_mapping_NUM_88E] = {
+ {0, 0, 2, 3, 4, 4, /* 2.4G, decrease power */
+ 5, 6, 7, 7, 8, 9,
+ 10, 10, 11}, /* For lower temperature, 20120220 updated on 20120220. */
+ {0, 0, -1, -2, -3, -4, /* 2.4G, increase power */
+ -4, -4, -4, -5, -7, -8,
+ -9, -9, -10},
+ };
+ u8 Thermal_mapping[2][index_mapping_NUM_88E] = {
+ {0, 2, 4, 6, 8, 10, /* 2.4G, decrease power */
+ 12, 14, 16, 18, 20, 22,
+ 24, 26, 27},
+ {0, 2, 4, 6, 8, 10, /* 2.4G,, increase power */
+ 12, 14, 16, 18, 20, 22,
+ 25, 25, 25},
+ };
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ /* 2012/04/25 MH Add for tx power tracking to set tx power in tx agc for 88E. */
+ odm_TxPwrTrackSetPwr88E(dm_odm);
+
+ dm_odm->RFCalibrateInfo.TXPowerTrackingCallbackCnt++; /* cosa add for debug */
+ dm_odm->RFCalibrateInfo.bTXPowerTrackingInit = true;
+
+ /* <Kordan> RFCalibrateInfo.RegA24 will be initialized when ODM HW configuring, but MP configures with para files. */
+ dm_odm->RFCalibrateInfo.RegA24 = 0x090e1317;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("===>dm_TXPowerTrackingCallback_ThermalMeter_8188E txpowercontrol %d\n",
+ dm_odm->RFCalibrateInfo.TxPowerTrackControl));
+
+ ThermalValue = (u8)ODM_GetRFReg(dm_odm, RF_PATH_A, RF_T_METER_88E, 0xfc00); /* 0x42: RF Reg[15:10] 88E */
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x\n",
+ ThermalValue, dm_odm->RFCalibrateInfo.ThermalValue, pHalData->EEPROMThermalMeter));
+
+ if (is2t)
+ rf = 2;
+ else
+ rf = 1;
+
+ if (ThermalValue) {
+ /* Query OFDM path A default setting */
+ ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
+ if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
+ OFDM_index_old[0] = (u8)i;
+ dm_odm->BbSwingIdxOfdmBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial pathA ele_D reg0x%x = 0x%x, OFDM_index=0x%x\n",
+ rOFDM0_XATxIQImbalance, ele_D, OFDM_index_old[0]));
+ break;
+ }
+ }
+
+ /* Query OFDM path B default setting */
+ if (is2t) {
+ ele_D = ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord)&bMaskOFDM_D;
+ for (i = 0; i < OFDM_TABLE_SIZE_92D; i++) { /* find the index */
+ if (ele_D == (OFDMSwingTable[i]&bMaskOFDM_D)) {
+ OFDM_index_old[1] = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial pathB ele_D reg0x%x = 0x%x, OFDM_index=0x%x\n",
+ rOFDM0_XBTxIQImbalance, ele_D, OFDM_index_old[1]));
+ break;
+ }
+ }
+ }
+
+ /* Query CCK default setting From 0xa24 */
+ TempCCk = dm_odm->RFCalibrateInfo.RegA24;
+
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (dm_odm->RFCalibrateInfo.bCCKinCH14) {
+ if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4) == 0) {
+ CCK_index_old = (u8)i;
+ dm_odm->BbSwingIdxCckBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial reg0x%x = 0x%x, CCK_index=0x%x, ch 14 %d\n",
+ rCCK0_TxFilter2, TempCCk, CCK_index_old, dm_odm->RFCalibrateInfo.bCCKinCH14));
+ break;
+ }
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("RegA24: 0x%X, CCKSwingTable_Ch1_Ch13[%d][2]: CCKSwingTable_Ch1_Ch13[i][2]: 0x%X\n",
+ TempCCk, i, CCKSwingTable_Ch1_Ch13[i][2]));
+ if (ODM_CompareMemory(dm_odm, (void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4) == 0) {
+ CCK_index_old = (u8)i;
+ dm_odm->BbSwingIdxCckBase = (u8)i;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Initial reg0x%x = 0x%x, CCK_index=0x%x, ch14 %d\n",
+ rCCK0_TxFilter2, TempCCk, CCK_index_old, dm_odm->RFCalibrateInfo.bCCKinCH14));
+ break;
+ }
+ }
+ }
+
+ if (!dm_odm->RFCalibrateInfo.ThermalValue) {
+ dm_odm->RFCalibrateInfo.ThermalValue = pHalData->EEPROMThermalMeter;
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
+
+ for (i = 0; i < rf; i++)
+ dm_odm->RFCalibrateInfo.OFDM_index[i] = OFDM_index_old[i];
+ dm_odm->RFCalibrateInfo.CCK_index = CCK_index_old;
+ }
+
+ if (dm_odm->RFCalibrateInfo.bReloadtxpowerindex)
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("reload ofdm index for band switch\n"));
+
+ /* calculate average thermal meter */
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG[dm_odm->RFCalibrateInfo.ThermalValue_AVG_index] = ThermalValue;
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG_index++;
+ if (dm_odm->RFCalibrateInfo.ThermalValue_AVG_index == AVG_THERMAL_NUM_88E)
+ dm_odm->RFCalibrateInfo.ThermalValue_AVG_index = 0;
+
+ for (i = 0; i < AVG_THERMAL_NUM_88E; i++) {
+ if (dm_odm->RFCalibrateInfo.ThermalValue_AVG[i]) {
+ ThermalValue_AVG += dm_odm->RFCalibrateInfo.ThermalValue_AVG[i];
+ ThermalValue_AVG_count++;
+ }
+ }
+
+ if (ThermalValue_AVG_count) {
+ ThermalValue = (u8)(ThermalValue_AVG / ThermalValue_AVG_count);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("AVG Thermal Meter = 0x%x\n", ThermalValue));
+ }
+
+ if (dm_odm->RFCalibrateInfo.bReloadtxpowerindex) {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ dm_odm->RFCalibrateInfo.bReloadtxpowerindex = false;
+ dm_odm->RFCalibrateInfo.bDoneTxpower = false;
+ } else if (dm_odm->RFCalibrateInfo.bDoneTxpower) {
+ delta = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue) :
+ (dm_odm->RFCalibrateInfo.ThermalValue - ThermalValue);
+ } else {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ }
+ delta_LCK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_LCK) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_LCK) :
+ (dm_odm->RFCalibrateInfo.ThermalValue_LCK - ThermalValue);
+ delta_IQK = (ThermalValue > dm_odm->RFCalibrateInfo.ThermalValue_IQK) ?
+ (ThermalValue - dm_odm->RFCalibrateInfo.ThermalValue_IQK) :
+ (dm_odm->RFCalibrateInfo.ThermalValue_IQK - ThermalValue);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x EEPROMthermalmeter 0x%x delta 0x%x delta_LCK 0x%x delta_IQK 0x%x\n",
+ ThermalValue, dm_odm->RFCalibrateInfo.ThermalValue,
+ pHalData->EEPROMThermalMeter, delta, delta_LCK, delta_IQK));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("pre thermal meter LCK 0x%x pre thermal meter IQK 0x%x delta_LCK_bound 0x%x delta_IQK_bound 0x%x\n",
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK,
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK,
+ dm_odm->RFCalibrateInfo.Delta_LCK,
+ dm_odm->RFCalibrateInfo.Delta_IQK));
+
+ if ((delta_LCK >= 8)) { /* Delta temperature is equal to or larger than 20 centigrade. */
+ dm_odm->RFCalibrateInfo.ThermalValue_LCK = ThermalValue;
+ PHY_LCCalibrate_8188E(Adapter);
+ }
+
+ if (delta > 0 && dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
+ delta = ThermalValue > pHalData->EEPROMThermalMeter ?
+ (ThermalValue - pHalData->EEPROMThermalMeter) :
+ (pHalData->EEPROMThermalMeter - ThermalValue);
+ /* calculate new OFDM / CCK offset */
+ if (ThermalValue > pHalData->EEPROMThermalMeter)
+ j = 1;
+ else
+ j = 0;
+ for (offset = 0; offset < index_mapping_NUM_88E; offset++) {
+ if (delta < Thermal_mapping[j][offset]) {
+ if (offset != 0)
+ offset--;
+ break;
+ }
+ }
+ if (offset >= index_mapping_NUM_88E)
+ offset = index_mapping_NUM_88E-1;
+ for (i = 0; i < rf; i++)
+ OFDM_index[i] = dm_odm->RFCalibrateInfo.OFDM_index[i] + OFDM_index_mapping[j][offset];
+ CCK_index = dm_odm->RFCalibrateInfo.CCK_index + OFDM_index_mapping[j][offset];
+
+ if (is2t) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, OFDM_B_index=0x%x, CCK_index=0x%x\n",
+ dm_odm->RFCalibrateInfo.OFDM_index[0],
+ dm_odm->RFCalibrateInfo.OFDM_index[1],
+ dm_odm->RFCalibrateInfo.CCK_index));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("temp OFDM_A_index=0x%x, CCK_index=0x%x\n",
+ dm_odm->RFCalibrateInfo.OFDM_index[0],
+ dm_odm->RFCalibrateInfo.CCK_index));
+ }
+
+ for (i = 0; i < rf; i++) {
+ if (OFDM_index[i] > OFDM_TABLE_SIZE_92D-1)
+ OFDM_index[i] = OFDM_TABLE_SIZE_92D-1;
+ else if (OFDM_index[i] < OFDM_min_index)
+ OFDM_index[i] = OFDM_min_index;
+ }
+
+ if (CCK_index > CCK_TABLE_SIZE-1)
+ CCK_index = CCK_TABLE_SIZE-1;
+ else if (CCK_index < 0)
+ CCK_index = 0;
+
+ if (is2t) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("new OFDM_A_index=0x%x, OFDM_B_index=0x%x, CCK_index=0x%x\n",
+ OFDM_index[0], OFDM_index[1], CCK_index));
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("new OFDM_A_index=0x%x, CCK_index=0x%x\n",
+ OFDM_index[0], CCK_index));
+ }
+
+ /* 2 temporarily remove bNOPG */
+ /* Config by SwingTable */
+ if (dm_odm->RFCalibrateInfo.TxPowerTrackControl) {
+ dm_odm->RFCalibrateInfo.bDoneTxpower = true;
+
+ /* Adujst OFDM Ant_A according to IQK result */
+ ele_D = (OFDMSwingTable[(u8)OFDM_index[0]] & 0xFFC00000)>>22;
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][0];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][1];
+
+ /* Revse TX power table. */
+ dm_odm->BbSwingIdxOfdm = (u8)OFDM_index[0];
+ dm_odm->BbSwingIdxCck = (u8)CCK_index;
+
+ if (dm_odm->BbSwingIdxOfdmCurrent != dm_odm->BbSwingIdxOfdm) {
+ dm_odm->BbSwingIdxOfdmCurrent = dm_odm->BbSwingIdxOfdm;
+ dm_odm->BbSwingFlagOfdm = true;
+ }
+
+ if (dm_odm->BbSwingIdxCckCurrent != dm_odm->BbSwingIdxCck) {
+ dm_odm->BbSwingIdxCckCurrent = dm_odm->BbSwingIdxCck;
+ dm_odm->BbSwingFlagCck = true;
+ }
+
+ if (X != 0) {
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x000003FF;
+
+ /* 2012/04/23 MH According to Luke's suggestion, we can not write BB digital */
+ /* to increase TX power. Otherwise, EVM will be bad. */
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking for path A: X=0x%x, Y=0x%x ele_A=0x%x ele_C=0x%x ele_D=0x%x 0xe94=0x%x 0xe9c=0x%x\n",
+ (u32)X, (u32)Y, (u32)ele_A, (u32)ele_C, (u32)ele_D, (u32)X, (u32)Y));
+
+ if (is2t) {
+ ele_D = (OFDMSwingTable[(u8)OFDM_index[1]] & 0xFFC00000)>>22;
+
+ /* new element A = element D x X */
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][4];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][5];
+
+ if ((X != 0) && (*(dm_odm->pBandType) == ODM_BAND_2_4G)) {
+ if ((X & 0x00000200) != 0) /* consider minus */
+ X = X | 0xFFFFFC00;
+ ele_A = ((X * ele_D)>>8)&0x000003FF;
+
+ /* new element C = element D x Y */
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+ ele_C = ((Y * ele_D)>>8)&0x00003FF;
+
+ /* wtite new elements A, C, D to regC88 and regC9C, element B is always 0 */
+ value32 = (ele_D<<22) | ((ele_C&0x3F)<<16) | ele_A;
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, value32);
+
+ value32 = (ele_C&0x000003C0)>>6;
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, value32);
+
+ value32 = ((X * ele_D)>>7)&0x01;
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, value32);
+ } else {
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord, OFDMSwingTable[(u8)OFDM_index[1]]);
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, bMaskH4Bits, 0x00);
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT28, 0x00);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking path B: X=0x%x, Y=0x%x ele_A=0x%x ele_C=0x%x ele_D=0x%x 0xeb4=0x%x 0xebc=0x%x\n",
+ (u32)X, (u32)Y, (u32)ele_A,
+ (u32)ele_C, (u32)ele_D, (u32)X, (u32)Y));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("TxPwrTracking 0xc80 = 0x%x, 0xc94 = 0x%x RF 0x24 = 0x%x\n",
+ ODM_GetBBReg(dm_odm, 0xc80, bMaskDWord), ODM_GetBBReg(dm_odm,
+ 0xc94, bMaskDWord), ODM_GetRFReg(dm_odm, RF_PATH_A, 0x24, bRFRegOffsetMask)));
+ }
+ }
+
+ if (delta_IQK >= 8) { /* Delta temperature is equal to or larger than 20 centigrade. */
+ ODM_ResetIQKResult(dm_odm);
+
+ dm_odm->RFCalibrateInfo.ThermalValue_IQK = ThermalValue;
+ PHY_IQCalibrate_8188E(Adapter, false);
+ }
+ /* update thermal meter value */
+ if (dm_odm->RFCalibrateInfo.TxPowerTrackControl)
+ dm_odm->RFCalibrateInfo.ThermalValue = ThermalValue;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("<===dm_TXPowerTrackingCallback_ThermalMeter_8188E\n"));
+ dm_odm->RFCalibrateInfo.TXPowercount = 0;
+}
+
+/* 1 7. IQK */
+#define MAX_TOLERANCE 5
+#define IQK_DELAY_TIME 1 /* ms */
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathA_IQK_8188E(struct adapter *adapt, bool configPathB)
+{
+ u32 regeac, regE94, regE9C, regEA4;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK!\n"));
+
+ /* 1 Tx IQK */
+ /* path-A IQK setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A IQK setting!\n"));
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x8214032a);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x00462911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
+ regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
+
+ if (!(regeac & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ return result;
+}
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathA_RxIQK(struct adapter *adapt, bool configPathB)
+{
+ u32 regeac, regE94, regE9C, regEA4, u4tmp;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK!\n"));
+
+ /* 1 Get TXIMR setting */
+ /* modify RXIQK mode table */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf117B);
+
+ /* PA,PAD off */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x980);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0x56, bRFRegOffsetMask, 0x51000);
+
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+
+ /* IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+
+ /* path-A IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x30008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c1f);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160000);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Delay %d ms for One shot, path A LOK & IQK.\n",
+ IQK_DELAY_TIME_88E));
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xe9c = 0x%x\n", regE9C));
+
+ if (!(regeac & BIT28) &&
+ (((regE94 & 0x03FF0000)>>16) != 0x142) &&
+ (((regE9C & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else /* if Tx not OK, ignore Rx */
+ return result;
+
+ u4tmp = 0x80007C00 | (regE94&0x3FF0000) | ((regE9C&0x3FF0000) >> 16);
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, u4tmp);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe40 = 0x%x u4tmp = 0x%x\n", ODM_GetBBReg(dm_odm, rTx_IQK, bMaskDWord), u4tmp));
+
+ /* 1 RX IQK */
+ /* modify RXIQK mode table */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A Rx IQK modify RXIQK mode table 2!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_WE_LUT, bRFRegOffsetMask, 0x800a0);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_RCK_OS, bRFRegOffsetMask, 0x30000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G1, bRFRegOffsetMask, 0x0000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_TXPA_G2, bRFRegOffsetMask, 0xf7ffa);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+
+ /* IQK setting */
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x01004800);
+
+ /* path-A IQK setting */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x38008c1c);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x18008c1c);
+ ODM_SetBBReg(dm_odm, rTx_IQK_PI_A, bMaskDWord, 0x82160c05);
+ ODM_SetBBReg(dm_odm, rRx_IQK_PI_A, bMaskDWord, 0x28160c1f);
+
+ /* LO calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("LO calibration setting!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Rsp, bMaskDWord, 0x0046a911);
+
+ /* One shot, path A LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf9000000);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Delay %d ms for One shot, path A LOK & IQK.\n", IQK_DELAY_TIME_88E));
+ /* PlatformStallExecution(IQK_DELAY_TIME_88E*1000); */
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xeac = 0x%x\n", regeac));
+ regE94 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe94 = 0x%x\n", regE94));
+ regE9C = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xe9c = 0x%x\n", regE9C));
+ regEA4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("0xea4 = 0x%x\n", regEA4));
+
+ /* reload RF 0xdf */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, 0xdf, bRFRegOffsetMask, 0x180);
+
+ if (!(regeac & BIT27) && /* if Tx is OK, check whether Rx is OK */
+ (((regEA4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regeac & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK fail!!\n"));
+
+ return result;
+}
+
+static u8 /* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */
+phy_PathB_IQK_8188E(struct adapter *adapt)
+{
+ u32 regeac, regeb4, regebc, regec4, regecc;
+ u8 result = 0x00;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK!\n"));
+
+ /* One shot, path B LOK & IQK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("One shot, path A LOK & IQK!\n"));
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000002);
+ ODM_SetBBReg(dm_odm, rIQK_AGC_Cont, bMaskDWord, 0x00000000);
+
+ /* delay x ms */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Delay %d ms for One shot, path B LOK & IQK.\n",
+ IQK_DELAY_TIME_88E));
+ ODM_delay_ms(IQK_DELAY_TIME_88E);
+
+ /* Check failed */
+ regeac = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeac = 0x%x\n", regeac));
+ regeb4 = ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xeb4 = 0x%x\n", regeb4));
+ regebc = ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xebc = 0x%x\n", regebc));
+ regec4 = ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xec4 = 0x%x\n", regec4));
+ regecc = ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("0xecc = 0x%x\n", regecc));
+
+ if (!(regeac & BIT31) &&
+ (((regeb4 & 0x03FF0000)>>16) != 0x142) &&
+ (((regebc & 0x03FF0000)>>16) != 0x42))
+ result |= 0x01;
+ else
+ return result;
+
+ if (!(regeac & BIT30) &&
+ (((regec4 & 0x03FF0000)>>16) != 0x132) &&
+ (((regecc & 0x03FF0000)>>16) != 0x36))
+ result |= 0x02;
+ else
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Rx IQK fail!!\n"));
+ return result;
+}
+
+static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
+{
+ u32 Oldval_0, X, TX0_A, reg;
+ s32 Y, TX0_C;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path A IQ Calibration %s !\n",
+ (iqkok) ? "Success" : "Failed"));
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqkok) {
+ Oldval_0 = (ODM_GetBBReg(dm_odm, rOFDM0_XATxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][0];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX0_A = (X * Oldval_0) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("X = 0x%x, TX0_A = 0x%x, Oldval_0 0x%x\n",
+ X, TX0_A, Oldval_0));
+ ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x3FF, TX0_A);
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(31), ((X * Oldval_0>>7) & 0x1));
+
+ Y = result[final_candidate][1];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ TX0_C = (Y * Oldval_0) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX = 0x%x\n", Y, TX0_C));
+ ODM_SetBBReg(dm_odm, rOFDM0_XCTxAFE, 0xF0000000, ((TX0_C&0x3C0)>>6));
+ ODM_SetBBReg(dm_odm, rOFDM0_XATxIQImbalance, 0x003F0000, (TX0_C&0x3F));
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(29), ((Y * Oldval_0>>7) & 0x1));
+
+ if (txonly) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("patha_fill_iqk only Tx OK\n"));
+ return;
+ }
+
+ reg = result[final_candidate][2];
+ ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][3] & 0x3F;
+ ODM_SetBBReg(dm_odm, rOFDM0_XARxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][3] >> 6) & 0xF;
+ ODM_SetBBReg(dm_odm, rOFDM0_RxIQExtAnta, 0xF0000000, reg);
+ }
+}
+
+static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
+{
+ u32 Oldval_1, X, TX1_A, reg;
+ s32 Y, TX1_C;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("Path B IQ Calibration %s !\n",
+ (iqkok) ? "Success" : "Failed"));
+
+ if (final_candidate == 0xFF) {
+ return;
+ } else if (iqkok) {
+ Oldval_1 = (ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
+
+ X = result[final_candidate][4];
+ if ((X & 0x00000200) != 0)
+ X = X | 0xFFFFFC00;
+ TX1_A = (X * Oldval_1) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("X = 0x%x, TX1_A = 0x%x\n", X, TX1_A));
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1>>7) & 0x1));
+
+ Y = result[final_candidate][5];
+ if ((Y & 0x00000200) != 0)
+ Y = Y | 0xFFFFFC00;
+
+ TX1_C = (Y * Oldval_1) >> 8;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Y = 0x%x, TX1_C = 0x%x\n", Y, TX1_C));
+ ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C&0x3C0)>>6));
+ ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C&0x3F));
+
+ ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1>>7) & 0x1));
+
+ if (txonly)
+ return;
+
+ reg = result[final_candidate][6];
+ ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
+
+ reg = result[final_candidate][7] & 0x3F;
+ ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
+
+ reg = (result[final_candidate][7] >> 6) & 0xF;
+ ODM_SetBBReg(dm_odm, rOFDM0_AGCRSSITable, 0x0000F000, reg);
+ }
+}
+
+/* */
+/* 2011/07/26 MH Add an API for testing IQK fail case. */
+/* */
+/* MP Already declare in odm.c */
+static bool ODM_CheckPowerStatus(struct adapter *Adapter)
+{
+ return true;
+}
+
+void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (ODM_CheckPowerStatus(adapt) == false)
+ return;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save ADDA parameters.\n"));
+ for (i = 0; i < RegisterNum; i++) {
+ ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
+ }
+}
+
+static void _PHY_SaveMACRegisters(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Save MAC parameters.\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ MACBackup[i] = ODM_Read1Byte(dm_odm, MACReg[i]);
+ }
+ MACBackup[i] = ODM_Read4Byte(dm_odm, MACReg[i]);
+}
+
+static void reload_adda_reg(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegiesterNum)
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload ADDA power saving parameters !\n"));
+ for (i = 0; i < RegiesterNum; i++)
+ ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, ADDABackup[i]);
+}
+
+static void
+_PHY_ReloadMACRegisters(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Reload MAC parameters !\n"));
+ for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) {
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)MACBackup[i]);
+ }
+ ODM_Write4Byte(dm_odm, MACReg[i], MACBackup[i]);
+}
+
+void
+_PHY_PathADDAOn(
+ struct adapter *adapt,
+ u32 *ADDAReg,
+ bool isPathAOn,
+ bool is2t
+ )
+{
+ u32 pathOn;
+ u32 i;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("ADDA ON.\n"));
+
+ pathOn = isPathAOn ? 0x04db25a4 : 0x0b1b25a4;
+ if (false == is2t) {
+ pathOn = 0x0bdb25a0;
+ ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, 0x0b1b25a0);
+ } else {
+ ODM_SetBBReg(dm_odm, ADDAReg[0], bMaskDWord, pathOn);
+ }
+
+ for (i = 1; i < IQK_ADDA_REG_NUM; i++)
+ ODM_SetBBReg(dm_odm, ADDAReg[i], bMaskDWord, pathOn);
+}
+
+void
+_PHY_MACSettingCalibration(
+ struct adapter *adapt,
+ u32 *MACReg,
+ u32 *MACBackup
+ )
+{
+ u32 i = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("MAC settings for Calibration.\n"));
+
+ ODM_Write1Byte(dm_odm, MACReg[i], 0x3F);
+
+ for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++) {
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT3)));
+ }
+ ODM_Write1Byte(dm_odm, MACReg[i], (u8)(MACBackup[i]&(~BIT5)));
+}
+
+void
+_PHY_PathAStandBy(
+ struct adapter *adapt
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path-A standby mode!\n"));
+
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x0);
+ ODM_SetBBReg(dm_odm, 0x840, bMaskDWord, 0x00010000);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+}
+
+static void _PHY_PIModeSwitch(
+ struct adapter *adapt,
+ bool PIMode
+ )
+{
+ u32 mode;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("BB Switch to %s mode!\n", (PIMode ? "PI" : "SI")));
+
+ mode = PIMode ? 0x01000100 : 0x01000000;
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, bMaskDWord, mode);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_HSSIParameter1, bMaskDWord, mode);
+}
+
+static bool phy_SimularityCompare_8188E(
+ struct adapter *adapt,
+ s32 resulta[][8],
+ u8 c1,
+ u8 c2
+ )
+{
+ u32 i, j, diff, sim_bitmap, bound = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
+ bool result = true;
+ bool is2t;
+ s32 tmp1 = 0, tmp2 = 0;
+
+ if ((dm_odm->RFType == ODM_2T2R) || (dm_odm->RFType == ODM_2T3R) || (dm_odm->RFType == ODM_2T4R))
+ is2t = true;
+ else
+ is2t = false;
+
+ if (is2t)
+ bound = 8;
+ else
+ bound = 4;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("===> IQK:phy_SimularityCompare_8188E c1 %d c2 %d!!!\n", c1, c2));
+
+ sim_bitmap = 0;
+
+ for (i = 0; i < bound; i++) {
+ if ((i == 1) || (i == 3) || (i == 5) || (i == 7)) {
+ if ((resulta[c1][i] & 0x00000200) != 0)
+ tmp1 = resulta[c1][i] | 0xFFFFFC00;
+ else
+ tmp1 = resulta[c1][i];
+
+ if ((resulta[c2][i] & 0x00000200) != 0)
+ tmp2 = resulta[c2][i] | 0xFFFFFC00;
+ else
+ tmp2 = resulta[c2][i];
+ } else {
+ tmp1 = resulta[c1][i];
+ tmp2 = resulta[c2][i];
+ }
+
+ diff = (tmp1 > tmp2) ? (tmp1 - tmp2) : (tmp2 - tmp1);
+
+ if (diff > MAX_TOLERANCE) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK:phy_SimularityCompare_8188E differnece overflow index %d compare1 0x%x compare2 0x%x!!!\n",
+ i, resulta[c1][i], resulta[c2][i]));
+
+ if ((i == 2 || i == 6) && !sim_bitmap) {
+ if (resulta[c1][i] + resulta[c1][i+1] == 0)
+ final_candidate[(i/4)] = c2;
+ else if (resulta[c2][i] + resulta[c2][i+1] == 0)
+ final_candidate[(i/4)] = c1;
+ else
+ sim_bitmap = sim_bitmap | (1<<i);
+ } else {
+ sim_bitmap = sim_bitmap | (1<<i);
+ }
+ }
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:phy_SimularityCompare_8188E sim_bitmap %d !!!\n", sim_bitmap));
+
+ if (sim_bitmap == 0) {
+ for (i = 0; i < (bound/4); i++) {
+ if (final_candidate[i] != 0xFF) {
+ for (j = i*4; j < (i+1)*4-2; j++)
+ resulta[3][j] = resulta[final_candidate[i]][j];
+ result = false;
+ }
+ }
+ return result;
+ } else {
+ if (!(sim_bitmap & 0x03)) { /* path A TX OK */
+ for (i = 0; i < 2; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ if (!(sim_bitmap & 0x0c)) { /* path A RX OK */
+ for (i = 2; i < 4; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0x30)) { /* path B TX OK */
+ for (i = 4; i < 6; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+
+ if (!(sim_bitmap & 0xc0)) { /* path B RX OK */
+ for (i = 6; i < 8; i++)
+ resulta[3][i] = resulta[c1][i];
+ }
+ return false;
+ }
+}
+
+static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u32 i;
+ u8 PathAOK, PathBOK;
+ u32 ADDA_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN };
+ u32 IQK_MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
+
+ /* since 92C & 92D have the different define in IQK_BB_REG */
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_TRxPathEnable, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rConfig_AntA, rConfig_AntB,
+ rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
+ rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
+ };
+
+ u32 retryCount = 9;
+ if (*(dm_odm->mp_mode) == 1)
+ retryCount = 9;
+ else
+ retryCount = 2;
+ /* Note: IQ calibration must be performed after loading */
+ /* PHY_REG.txt , and radio_a, radio_b.txt */
+
+ if (t == 0) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2t ? "2T2R" : "1T1R"), t));
+
+ /* Save ADDA parameters, turn Path A ADDA on */
+ _PHY_SaveADDARegisters(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+ _PHY_SaveMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+ _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQ Calibration for %s for %d times\n", (is2t ? "2T2R" : "1T1R"), t));
+
+ _PHY_PathADDAOn(adapt, ADDA_REG, true, is2t);
+ if (t == 0)
+ dm_odm->RFCalibrateInfo.bRfPiEnable = (u8)ODM_GetBBReg(dm_odm, rFPGA0_XA_HSSIParameter1, BIT(8));
+
+ if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
+ /* Switch BB to PI mode to do IQ Calibration. */
+ _PHY_PIModeSwitch(adapt, true);
+ }
+
+ /* BB setting */
+ ODM_SetBBReg(dm_odm, rFPGA0_RFMOD, BIT24, 0x00);
+ ODM_SetBBReg(dm_odm, rOFDM0_TRxPathEnable, bMaskDWord, 0x03a05600);
+ ODM_SetBBReg(dm_odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800e4);
+ ODM_SetBBReg(dm_odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22204000);
+
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT10, 0x01);
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFInterfaceSW, BIT26, 0x01);
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT10, 0x00);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT10, 0x00);
+
+ if (is2t) {
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00010000);
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00010000);
+ }
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ /* Page B init */
+ /* AP or IQK */
+ ODM_SetBBReg(dm_odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+
+ if (is2t)
+ ODM_SetBBReg(dm_odm, rConfig_AntB, bMaskDWord, 0x0f600000);
+
+ /* IQ calibration setting */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK setting!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ ODM_SetBBReg(dm_odm, rTx_IQK, bMaskDWord, 0x01007c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK, bMaskDWord, 0x81004800);
+
+ for (i = 0; i < retryCount; i++) {
+ PathAOK = phy_PathA_IQK_8188E(adapt, is2t);
+ if (PathAOK == 0x01) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Tx IQK Success!!\n"));
+ result[t][0] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ result[t][1] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_A, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ }
+ }
+
+ for (i = 0; i < retryCount; i++) {
+ PathAOK = phy_PathA_RxIQK(adapt, is2t);
+ if (PathAOK == 0x03) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Success!!\n"));
+ result[t][2] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][3] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_A_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A Rx IQK Fail!!\n"));
+ }
+ }
+
+ if (0x00 == PathAOK) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path A IQK failed!!\n"));
+ }
+
+ if (is2t) {
+ _PHY_PathAStandBy(adapt);
+
+ /* Turn Path B ADDA on */
+ _PHY_PathADDAOn(adapt, ADDA_REG, false, is2t);
+
+ for (i = 0; i < retryCount; i++) {
+ PathBOK = phy_PathB_IQK_8188E(adapt);
+ if (PathBOK == 0x03) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK Success!!\n"));
+ result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][6] = (ODM_GetBBReg(dm_odm, rRx_Power_Before_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ result[t][7] = (ODM_GetBBReg(dm_odm, rRx_Power_After_IQK_B_2, bMaskDWord)&0x3FF0000)>>16;
+ break;
+ } else if (i == (retryCount - 1) && PathBOK == 0x01) { /* Tx IQK OK */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B Only Tx IQK Success!!\n"));
+ result[t][4] = (ODM_GetBBReg(dm_odm, rTx_Power_Before_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ result[t][5] = (ODM_GetBBReg(dm_odm, rTx_Power_After_IQK_B, bMaskDWord)&0x3FF0000)>>16;
+ }
+ }
+
+ if (0x00 == PathBOK) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("Path B IQK failed!!\n"));
+ }
+ }
+
+ /* Back to BB mode, load original value */
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Back to BB mode, load original value!\n"));
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+
+ if (t != 0) {
+ if (!dm_odm->RFCalibrateInfo.bRfPiEnable) {
+ /* Switch back BB to SI mode after finish IQ Calibration. */
+ _PHY_PIModeSwitch(adapt, false);
+ }
+
+ /* Reload ADDA power saving parameters */
+ reload_adda_reg(adapt, ADDA_REG, dm_odm->RFCalibrateInfo.ADDA_backup, IQK_ADDA_REG_NUM);
+
+ /* Reload MAC parameters */
+ _PHY_ReloadMACRegisters(adapt, IQK_MAC_REG, dm_odm->RFCalibrateInfo.IQK_MAC_backup);
+
+ reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup, IQK_BB_REG_NUM);
+
+ /* Restore RX initial gain */
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_LSSIParameter, bMaskDWord, 0x00032ed3);
+ if (is2t)
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_LSSIParameter, bMaskDWord, 0x00032ed3);
+
+ /* load 0xe30 IQC default value */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_IQCalibrate_8188E() <==\n"));
+}
+
+static void phy_LCCalibrate_8188E(struct adapter *adapt, bool is2t)
+{
+ u8 tmpreg;
+ u32 RF_Amode = 0, RF_Bmode = 0, LC_Cal;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ /* Check continuous TX and Packet TX */
+ tmpreg = ODM_Read1Byte(dm_odm, 0xd03);
+
+ if ((tmpreg&0x70) != 0) /* Deal with contisuous TX case */
+ ODM_Write1Byte(dm_odm, 0xd03, tmpreg&0x8F); /* disable all continuous TX */
+ else /* Deal with Packet TX case */
+ ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0xFF); /* block all queues */
+
+ if ((tmpreg&0x70) != 0) {
+ /* 1. Read original RF mode */
+ /* Path-A */
+ RF_Amode = PHY_QueryRFReg(adapt, RF_PATH_A, RF_AC, bMask12Bits);
+
+ /* Path-B */
+ if (is2t)
+ RF_Bmode = PHY_QueryRFReg(adapt, RF_PATH_B, RF_AC, bMask12Bits);
+
+ /* 2. Set RF mode = standby mode */
+ /* Path-A */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, (RF_Amode&0x8FFFF)|0x10000);
+
+ /* Path-B */
+ if (is2t)
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, (RF_Bmode&0x8FFFF)|0x10000);
+ }
+
+ /* 3. Read RF reg18 */
+ LC_Cal = PHY_QueryRFReg(adapt, RF_PATH_A, RF_CHNLBW, bMask12Bits);
+
+ /* 4. Set LC calibration begin bit15 */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_CHNLBW, bMask12Bits, LC_Cal|0x08000);
+
+ ODM_sleep_ms(100);
+
+ /* Restore original situation */
+ if ((tmpreg&0x70) != 0) {
+ /* Deal with continuous TX case */
+ /* Path-A */
+ ODM_Write1Byte(dm_odm, 0xd03, tmpreg);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMask12Bits, RF_Amode);
+
+ /* Path-B */
+ if (is2t)
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMask12Bits, RF_Bmode);
+ } else {
+ /* Deal with Packet TX case */
+ ODM_Write1Byte(dm_odm, REG_TXPAUSE, 0x00);
+ }
+}
+
+/* Analog Pre-distortion calibration */
+#define APK_BB_REG_NUM 8
+#define APK_CURVE_REG_NUM 4
+#define PATH_NUM 2
+
+static void phy_APCalibrate_8188E(struct adapter *adapt, s8 delta, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ u32 regD[PATH_NUM];
+ u32 tmpreg, index, offset, apkbound;
+ u8 path, i, pathbound = PATH_NUM;
+ u32 BB_backup[APK_BB_REG_NUM];
+ u32 BB_REG[APK_BB_REG_NUM] = {
+ rFPGA1_TxBlock, rOFDM0_TRxPathEnable,
+ rFPGA0_RFMOD, rOFDM0_TRMuxPar,
+ rFPGA0_XCD_RFInterfaceSW, rFPGA0_XAB_RFInterfaceSW,
+ rFPGA0_XA_RFInterfaceOE, rFPGA0_XB_RFInterfaceOE };
+ u32 BB_AP_MODE[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x00204000 };
+ u32 BB_normal_AP_MODE[APK_BB_REG_NUM] = {
+ 0x00000020, 0x00a05430, 0x02040000,
+ 0x000800e4, 0x22204000 };
+
+ u32 AFE_backup[IQK_ADDA_REG_NUM];
+ u32 AFE_REG[IQK_ADDA_REG_NUM] = {
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth,
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN };
+
+ u32 MAC_backup[IQK_MAC_REG_NUM];
+ u32 MAC_REG[IQK_MAC_REG_NUM] = {
+ REG_TXPAUSE, REG_BCN_CTRL,
+ REG_BCN_CTRL_1, REG_GPIO_MUXCFG};
+
+ u32 APK_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
+ {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
+ };
+
+ u32 APK_normal_RF_init_value[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c}, /* path settings equal to path b settings */
+ {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
+ };
+
+ u32 APK_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
+ {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
+ };
+
+ u32 APK_normal_RF_value_0[PATH_NUM][APK_BB_REG_NUM] = {
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}, /* path settings equal to path b settings */
+ {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
+ };
+
+ u32 AFE_on_off[PATH_NUM] = {
+ 0x04db25a4, 0x0b1b25a4}; /* path A on path B off / path A off path B on */
+
+ u32 APK_offset[PATH_NUM] = {
+ rConfig_AntA, rConfig_AntB};
+
+ u32 APK_normal_offset[PATH_NUM] = {
+ rConfig_Pmpd_AntA, rConfig_Pmpd_AntB};
+
+ u32 APK_value[PATH_NUM] = {
+ 0x92fc0000, 0x12fc0000};
+
+ u32 APK_normal_value[PATH_NUM] = {
+ 0x92680000, 0x12680000};
+
+ s8 APK_delta_mapping[APK_BB_REG_NUM][13] = {
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
+ {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
+ };
+
+ u32 APK_normal_setting_value_1[13] = {
+ 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
+ 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
+ 0x12680000, 0x00880000, 0x00880000
+ };
+
+ u32 APK_normal_setting_value_2[16] = {
+ 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
+ 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
+ 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
+ 0x00050006
+ };
+
+ u32 APK_result[PATH_NUM][APK_BB_REG_NUM]; /* val_1_1a, val_1_2a, val_2a, val_3a, val_4a */
+ s32 BB_offset, delta_V, delta_offset;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+ pMptCtx->APK_bound[0] = 45;
+ pMptCtx->APK_bound[1] = 52;
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("==>phy_APCalibrate_8188E() delta %d\n", delta));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("AP Calibration for %s\n", (is2t ? "2T2R" : "1T1R")));
+ if (!is2t)
+ pathbound = 1;
+
+ /* 2 FOR NORMAL CHIP SETTINGS */
+
+/* Temporarily do not allow normal driver to do the following settings
+ * because these offset and value will cause RF internal PA to be
+ * unpredictably disabled by HW, such that RF Tx signal will disappear
+ * after disable/enable card many times on 88CU. RF SD and DD have not
+ * find the root cause, so we remove these actions temporarily.
+ */
+ if (*(dm_odm->mp_mode) != 1)
+ return;
+ /* settings adjust for normal chip */
+ for (index = 0; index < PATH_NUM; index++) {
+ APK_offset[index] = APK_normal_offset[index];
+ APK_value[index] = APK_normal_value[index];
+ AFE_on_off[index] = 0x6fdb25a4;
+ }
+
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ for (path = 0; path < pathbound; path++) {
+ APK_RF_init_value[path][index] = APK_normal_RF_init_value[path][index];
+ APK_RF_value_0[path][index] = APK_normal_RF_value_0[path][index];
+ }
+ BB_AP_MODE[index] = BB_normal_AP_MODE[index];
+ }
+
+ apkbound = 6;
+
+ /* save BB default value */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ BB_backup[index] = ODM_GetBBReg(dm_odm, BB_REG[index], bMaskDWord);
+ }
+
+ /* save MAC default value */
+ _PHY_SaveMACRegisters(adapt, MAC_REG, MAC_backup);
+
+ /* save AFE default value */
+ _PHY_SaveADDARegisters(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
+
+ for (path = 0; path < pathbound; path++) {
+ if (path == RF_PATH_A) {
+ /* path A APK */
+ /* load APK setting */
+ /* path-A */
+ offset = rPdp_AntA;
+ for (index = 0; index < 11; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
+
+ offset = rConfig_AntA;
+ for (; index < 13; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ /* page-B1 */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
+
+ /* path A */
+ offset = rPdp_AntA;
+ for (index = 0; index < 16; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ } else if (path == RF_PATH_B) {
+ /* path B APK */
+ /* load APK setting */
+ /* path-B */
+ offset = rPdp_AntB;
+ for (index = 0; index < 10; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rConfig_Pmpd_AntA, bMaskDWord, 0x12680000);
+ PHY_SetBBReg(adapt, rConfig_Pmpd_AntB, bMaskDWord, 0x12680000);
+
+ offset = rConfig_AntA;
+ index = 11;
+ for (; index < 13; index++) { /* offset 0xb68, 0xb6c */
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_1[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+ offset += 0x04;
+ }
+
+ /* page-B1 */
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x40000000);
+
+ /* path B */
+ offset = 0xb60;
+ for (index = 0; index < 16; index++) {
+ ODM_SetBBReg(dm_odm, offset, bMaskDWord, APK_normal_setting_value_2[index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n",
+ offset, ODM_GetBBReg(dm_odm, offset, bMaskDWord)));
+
+ offset += 0x04;
+ }
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0);
+ }
+
+ /* save RF default value */
+ regD[path] = PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord);
+
+ /* Path A AFE all on, path B AFE All off or vise versa */
+ for (index = 0; index < IQK_ADDA_REG_NUM; index++)
+ ODM_SetBBReg(dm_odm, AFE_REG[index], bMaskDWord, AFE_on_off[path]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0xe70 %x\n",
+ ODM_GetBBReg(dm_odm, rRx_Wait_CCA, bMaskDWord)));
+
+ /* BB to AP mode */
+ if (path == 0) {
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ else if (index < 5)
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_AP_MODE[index]);
+ else if (BB_REG[index] == 0x870)
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]|BIT10|BIT26);
+ else
+ ODM_SetBBReg(dm_odm, BB_REG[index], BIT10, 0x0);
+ }
+
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_A, bMaskDWord, 0x01008c00);
+ } else {
+ /* path B */
+ ODM_SetBBReg(dm_odm, rTx_IQK_Tone_B, bMaskDWord, 0x01008c00);
+ ODM_SetBBReg(dm_odm, rRx_IQK_Tone_B, bMaskDWord, 0x01008c00);
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() offset 0x800 %x\n",
+ ODM_GetBBReg(dm_odm, 0x800, bMaskDWord)));
+
+ /* MAC settings */
+ _PHY_MACSettingCalibration(adapt, MAC_REG, MAC_backup);
+
+ if (path == RF_PATH_A) {
+ /* Path B to standby mode */
+ ODM_SetRFReg(dm_odm, RF_PATH_B, RF_AC, bMaskDWord, 0x10000);
+ } else {
+ /* Path A to standby mode */
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_AC, bMaskDWord, 0x10000);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20103);
+ }
+
+ delta_offset = ((delta+14)/2);
+ if (delta_offset < 0)
+ delta_offset = 0;
+ else if (delta_offset > 12)
+ delta_offset = 12;
+
+ /* AP calibration */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index != 1) /* only DO PA11+PAD01001, AP RF setting */
+ continue;
+
+ tmpreg = APK_RF_init_value[path][index];
+ if (!dm_odm->RFCalibrateInfo.bAPKThermalMeterIgnore) {
+ BB_offset = (tmpreg & 0xF0000) >> 16;
+
+ if (!(tmpreg & BIT15)) /* sign bit 0 */
+ BB_offset = -BB_offset;
+
+ delta_V = APK_delta_mapping[index][delta_offset];
+
+ BB_offset += delta_V;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("phy_APCalibrate_8188E() APK index %d tmpreg 0x%x delta_V %d delta_offset %d\n",
+ index, tmpreg, delta_V, delta_offset));
+
+ if (BB_offset < 0) {
+ tmpreg = tmpreg & (~BIT15);
+ BB_offset = -BB_offset;
+ } else {
+ tmpreg = tmpreg | BIT15;
+ }
+ tmpreg = (tmpreg & 0xFFF0FFFF) | (BB_offset << 16);
+ }
+
+ ODM_SetRFReg(dm_odm, path, RF_IPA_A, bMaskDWord, 0x8992e);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xc %x\n", PHY_QueryRFReg(adapt, path, RF_IPA_A, bMaskDWord)));
+ ODM_SetRFReg(dm_odm, path, RF_AC, bMaskDWord, APK_RF_value_0[path][index]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x0 %x\n", PHY_QueryRFReg(adapt, path, RF_AC, bMaskDWord)));
+ ODM_SetRFReg(dm_odm, path, RF_TXBIAS_A, bMaskDWord, tmpreg);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xd %x\n", PHY_QueryRFReg(adapt, path, RF_TXBIAS_A, bMaskDWord)));
+ /* PA11+PAD01111, one shot */
+ i = 0;
+ do {
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x80000000);
+ ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[0]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
+ ODM_delay_ms(3);
+ ODM_SetBBReg(dm_odm, APK_offset[path], bMaskDWord, APK_value[1]);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0x%x value 0x%x\n", APK_offset[path], ODM_GetBBReg(dm_odm, APK_offset[path], bMaskDWord)));
+
+ ODM_delay_ms(20);
+ ODM_SetBBReg(dm_odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+
+ if (path == RF_PATH_A)
+ tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0x03E00000);
+ else
+ tmpreg = ODM_GetBBReg(dm_odm, rAPK, 0xF8000000);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("phy_APCalibrate_8188E() offset 0xbd8[25:21] %x\n", tmpreg));
+
+ i++;
+ } while (tmpreg > apkbound && i < 4);
+
+ APK_result[path][index] = tmpreg;
+ }
+ }
+
+ /* reload MAC default value */
+ _PHY_ReloadMACRegisters(adapt, MAC_REG, MAC_backup);
+
+ /* reload BB default value */
+ for (index = 0; index < APK_BB_REG_NUM; index++) {
+ if (index == 0) /* skip */
+ continue;
+ ODM_SetBBReg(dm_odm, BB_REG[index], bMaskDWord, BB_backup[index]);
+ }
+
+ /* reload AFE default value */
+ reload_adda_reg(adapt, AFE_REG, AFE_backup, IQK_ADDA_REG_NUM);
+
+ /* reload RF path default value */
+ for (path = 0; path < pathbound; path++) {
+ ODM_SetRFReg(dm_odm, path, 0xd, bMaskDWord, regD[path]);
+ if (path == RF_PATH_B) {
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE1, bMaskDWord, 0x1000f);
+ ODM_SetRFReg(dm_odm, RF_PATH_A, RF_MODE2, bMaskDWord, 0x20101);
+ }
+
+ /* note no index == 0 */
+ if (APK_result[path][1] > 6)
+ APK_result[path][1] = 6;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("apk path %d result %d 0x%x \t", path, 1, APK_result[path][1]));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\n"));
+
+ for (path = 0; path < pathbound; path++) {
+ ODM_SetRFReg(dm_odm, path, 0x3, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (APK_result[path][1] << 5) | APK_result[path][1]));
+ if (path == RF_PATH_A)
+ ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x00 << 5) | 0x05));
+ else
+ ODM_SetRFReg(dm_odm, path, 0x4, bMaskDWord,
+ ((APK_result[path][1] << 15) | (APK_result[path][1] << 10) | (0x02 << 5) | 0x05));
+ ODM_SetRFReg(dm_odm, path, RF_BS_PA_APSET_G9_G11, bMaskDWord,
+ ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) | 0x08));
+ }
+
+ dm_odm->RFCalibrateInfo.bAPKdone = true;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("<==phy_APCalibrate_8188E()\n"));
+}
+
+#define DP_BB_REG_NUM 7
+#define DP_RF_REG_NUM 1
+#define DP_RETRY_LIMIT 10
+#define DP_PATH_NUM 2
+#define DP_DPK_NUM 3
+#define DP_DPK_VALUE_NUM 2
+
+void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+ s32 result[4][8]; /* last is final result */
+ u8 i, final_candidate, Indexforchannel;
+ bool pathaok, pathbok;
+ s32 RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC;
+ bool is12simular, is13simular, is23simular;
+ bool singletone = false, carrier_sup = false;
+ u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
+ rOFDM0_XARxIQImbalance, rOFDM0_XBRxIQImbalance,
+ rOFDM0_ECCAThreshold, rOFDM0_AGCRSSITable,
+ rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
+ rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
+ rOFDM0_RxIQExtAnta};
+ bool is2t;
+
+ is2t = (dm_odm->RFType == ODM_2T2R) ? true : false;
+ if (ODM_CheckPowerStatus(adapt) == false)
+ return;
+
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ singletone = pMptCtx->bSingleTone;
+ carrier_sup = pMptCtx->bCarrierSuppression;
+ }
+
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (singletone || carrier_sup)
+ return;
+
+ if (recovery) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("PHY_IQCalibrate_8188E: Return due to recovery!\n"));
+ reload_adda_reg(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK:Start!!!\n"));
+
+ for (i = 0; i < 8; i++) {
+ result[0][i] = 0;
+ result[1][i] = 0;
+ result[2][i] = 0;
+ if ((i == 0) || (i == 2) || (i == 4) || (i == 6))
+ result[3][i] = 0x100;
+ else
+ result[3][i] = 0;
+ }
+ final_candidate = 0xff;
+ pathaok = false;
+ pathbok = false;
+ is12simular = false;
+ is23simular = false;
+ is13simular = false;
+
+ for (i = 0; i < 3; i++) {
+ phy_IQCalibrate_8188E(adapt, result, i, is2t);
+
+ if (i == 1) {
+ is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
+ if (is12simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is12simular final_candidate is %x\n", final_candidate));
+ break;
+ }
+ }
+
+ if (i == 2) {
+ is13simular = phy_SimularityCompare_8188E(adapt, result, 0, 2);
+ if (is13simular) {
+ final_candidate = 0;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is13simular final_candidate is %x\n", final_candidate));
+
+ break;
+ }
+ is23simular = phy_SimularityCompare_8188E(adapt, result, 1, 2);
+ if (is23simular) {
+ final_candidate = 1;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: is23simular final_candidate is %x\n", final_candidate));
+ } else {
+ final_candidate = 3;
+ }
+ }
+ }
+
+ for (i = 0; i < 4; i++) {
+ RegE94 = result[i][0];
+ RegE9C = result[i][1];
+ RegEA4 = result[i][2];
+ RegEAC = result[i][3];
+ RegEB4 = result[i][4];
+ RegEBC = result[i][5];
+ RegEC4 = result[i][6];
+ RegECC = result[i][7];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n",
+ RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ }
+
+ if (final_candidate != 0xff) {
+ RegE94 = result[final_candidate][0];
+ RegE9C = result[final_candidate][1];
+ RegEA4 = result[final_candidate][2];
+ RegEAC = result[final_candidate][3];
+ RegEB4 = result[final_candidate][4];
+ RegEBC = result[final_candidate][5];
+ dm_odm->RFCalibrateInfo.RegE94 = RegE94;
+ dm_odm->RFCalibrateInfo.RegE9C = RegE9C;
+ dm_odm->RFCalibrateInfo.RegEB4 = RegEB4;
+ dm_odm->RFCalibrateInfo.RegEBC = RegEBC;
+ RegEC4 = result[final_candidate][6];
+ RegECC = result[final_candidate][7];
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: final_candidate is %x\n", final_candidate));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("IQK: RegE94=%x RegE9C=%x RegEA4=%x RegEAC=%x RegEB4=%x RegEBC=%x RegEC4=%x RegECC=%x\n",
+ RegE94, RegE9C, RegEA4, RegEAC, RegEB4, RegEBC, RegEC4, RegECC));
+ pathaok = true;
+ pathbok = true;
+ } else {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK: FAIL use default value\n"));
+ dm_odm->RFCalibrateInfo.RegE94 = 0x100;
+ dm_odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
+ dm_odm->RFCalibrateInfo.RegE9C = 0x0;
+ dm_odm->RFCalibrateInfo.RegEBC = 0x0; /* Y default value */
+ }
+ if (RegE94 != 0)
+ patha_fill_iqk(adapt, pathaok, result, final_candidate, (RegEA4 == 0));
+ if (is2t) {
+ if (RegEB4 != 0)
+ pathb_fill_iqk(adapt, pathbok, result, final_candidate, (RegEC4 == 0));
+ }
+
+ Indexforchannel = ODM_GetRightChnlPlaceforIQK(pHalData->CurrentChannel);
+
+/* To Fix BSOD when final_candidate is 0xff */
+/* by sherry 20120321 */
+ if (final_candidate < 4) {
+ for (i = 0; i < IQK_Matrix_REG_NUM; i++)
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][i] = result[final_candidate][i];
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].bIQKDone = true;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("\nIQK OK Indexforchannel %d.\n", Indexforchannel));
+
+ _PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD, ("IQK finished\n"));
+}
+
+void PHY_LCCalibrate_8188E(struct adapter *adapt)
+{
+ bool singletone = false, carrier_sup = false;
+ u32 timeout = 2000, timecount = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+ struct mpt_context *pMptCtx = &(adapt->mppriv.MptCtx);
+
+ if (*(dm_odm->mp_mode) == 1) {
+ singletone = pMptCtx->bSingleTone;
+ carrier_sup = pMptCtx->bCarrierSuppression;
+ }
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+ /* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
+ if (singletone || carrier_sup)
+ return;
+
+ while (*(dm_odm->pbScanInProcess) && timecount < timeout) {
+ ODM_delay_ms(50);
+ timecount += 50;
+ }
+
+ dm_odm->RFCalibrateInfo.bLCKInProgress = true;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_LCCalibrate_8188E(adapt, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_LCCalibrate_8188E(adapt, false);
+ }
+
+ dm_odm->RFCalibrateInfo.bLCKInProgress = false;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_CALIBRATION, ODM_DBG_LOUD,
+ ("LCK:Finish!!!interface %d\n", dm_odm->InterfaceIndex));
+}
+
+void PHY_APCalibrate_8188E(struct adapter *adapt, s8 delta)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ return;
+ if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
+ return;
+
+#if FOR_BRAZIL_PRETEST != 1
+ if (dm_odm->RFCalibrateInfo.bAPKdone)
+#endif
+ return;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_APCalibrate_8188E(adapt, delta, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_APCalibrate_8188E(adapt, delta, false);
+ }
+}
+
+static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (!adapt->hw_init_completed) {
+ u8 u1btmp;
+ u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT7;
+ ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
+ ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+ }
+
+ if (is2t) { /* 92C */
+ if (main)
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x1); /* 92C_Path_A */
+ else
+ ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT5|BIT6, 0x2); /* BT */
+ } else { /* 88C */
+ if (main)
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x2); /* Main */
+ else
+ ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT8|BIT9, 0x1); /* Aux */
+ }
+}
+
+void PHY_SetRFPathSwitch_8188E(struct adapter *adapt, bool main)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
+ struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
+
+ if (dm_odm->RFType == ODM_2T2R) {
+ phy_setrfpathswitch_8188e(adapt, main, true);
+ } else {
+ /* For 88C 1T1R */
+ phy_setrfpathswitch_8188e(adapt, main, false);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
new file mode 100644
index 00000000000..e913a22a642
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/HalPwrSeqCmd.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ HalPwrSeqCmd.c
+
+Abstract:
+ Implement HW Power sequence configuration CMD handling routine for Realtek devices.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-10-26 Lucas Modify to be compatible with SD4-CE driver.
+ 2011-07-07 Roger Create.
+
+--*/
+
+#include <HalPwrSeqCmd.h>
+
+/* Description: */
+/* This routine deals with the Power Configuration CMDs parsing
+ * for RTL8723/RTL8188E Series IC.
+ * Assumption:
+ * We should follow specific format which was released from HW SD.
+ */
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 cut_vers, u8 fab_vers,
+ u8 ifacetype, struct wl_pwr_cfg pwrseqcmd[])
+{
+ struct wl_pwr_cfg pwrcfgcmd = {0};
+ u8 poll_bit = false;
+ u32 aryidx = 0;
+ u8 value = 0;
+ u32 offset = 0;
+ u32 poll_count = 0; /* polling autoload done. */
+ u32 max_poll_count = 5000;
+
+ do {
+ pwrcfgcmd = pwrseqcmd[aryidx];
+
+ RT_TRACE(_module_hal_init_c_ , _drv_info_,
+ ("HalPwrSeqCmdParsing: offset(%#x) cut_msk(%#x) fab_msk(%#x) interface_msk(%#x) base(%#x) cmd(%#x) msk(%#x) value(%#x)\n",
+ GET_PWR_CFG_OFFSET(pwrcfgcmd),
+ GET_PWR_CFG_CUT_MASK(pwrcfgcmd),
+ GET_PWR_CFG_FAB_MASK(pwrcfgcmd),
+ GET_PWR_CFG_INTF_MASK(pwrcfgcmd),
+ GET_PWR_CFG_BASE(pwrcfgcmd),
+ GET_PWR_CFG_CMD(pwrcfgcmd),
+ GET_PWR_CFG_MASK(pwrcfgcmd),
+ GET_PWR_CFG_VALUE(pwrcfgcmd)));
+
+ /* 2 Only Handle the command whose FAB, CUT, and Interface are matched */
+ if ((GET_PWR_CFG_FAB_MASK(pwrcfgcmd) & fab_vers) &&
+ (GET_PWR_CFG_CUT_MASK(pwrcfgcmd) & cut_vers) &&
+ (GET_PWR_CFG_INTF_MASK(pwrcfgcmd) & ifacetype)) {
+ switch (GET_PWR_CFG_CMD(pwrcfgcmd)) {
+ case PWR_CMD_READ:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_READ\n"));
+ break;
+ case PWR_CMD_WRITE:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_WRITE\n"));
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+
+ /* Read the value from system register */
+ value = rtw_read8(padapter, offset);
+
+ value &= ~(GET_PWR_CFG_MASK(pwrcfgcmd));
+ value |= (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd));
+
+ /* Write the value back to sytem register */
+ rtw_write8(padapter, offset, value);
+ break;
+ case PWR_CMD_POLLING:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_POLLING\n"));
+
+ poll_bit = false;
+ offset = GET_PWR_CFG_OFFSET(pwrcfgcmd);
+ do {
+ value = rtw_read8(padapter, offset);
+
+ value &= GET_PWR_CFG_MASK(pwrcfgcmd);
+ if (value == (GET_PWR_CFG_VALUE(pwrcfgcmd) & GET_PWR_CFG_MASK(pwrcfgcmd)))
+ poll_bit = true;
+ else
+ rtw_udelay_os(10);
+
+ if (poll_count++ > max_poll_count) {
+ DBG_88E("Fail to polling Offset[%#x]\n", offset);
+ return false;
+ }
+ } while (!poll_bit);
+ break;
+ case PWR_CMD_DELAY:
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_DELAY\n"));
+ if (GET_PWR_CFG_VALUE(pwrcfgcmd) == PWRSEQ_DELAY_US)
+ rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd));
+ else
+ rtw_udelay_os(GET_PWR_CFG_OFFSET(pwrcfgcmd)*1000);
+ break;
+ case PWR_CMD_END:
+ /* When this command is parsed, end the process */
+ RT_TRACE(_module_hal_init_c_ , _drv_info_, ("HalPwrSeqCmdParsing: PWR_CMD_END\n"));
+ return true;
+ break;
+ default:
+ RT_TRACE(_module_hal_init_c_ , _drv_err_, ("HalPwrSeqCmdParsing: Unknown CMD!!\n"));
+ break;
+ }
+ }
+
+ aryidx++;/* Add Array Index */
+ } while (1);
+ return true;
+}
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
new file mode 100644
index 00000000000..829b900ee93
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -0,0 +1,381 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtl8188e_hal.h>
+
+#define _HAL_INIT_C_
+
+void dump_chip_info(struct HAL_VERSION chip_vers)
+{
+ uint cnt = 0;
+ char buf[128];
+
+ if (IS_81XXC(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: %s_",
+ IS_92C_SERIAL(chip_vers) ?
+ "CHIP_8192C" : "CHIP_8188C");
+ } else if (IS_92D(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8192D_");
+ } else if (IS_8723_SERIES(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8723A_");
+ } else if (IS_8188E(chip_vers)) {
+ cnt += sprintf((buf+cnt), "Chip Version Info: CHIP_8188E_");
+ }
+
+ cnt += sprintf((buf+cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
+ "Normal_Chip" : "Test_Chip");
+ cnt += sprintf((buf+cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
+ "TSMC" : "UMC");
+ if (IS_A_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "A_CUT_");
+ else if (IS_B_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "B_CUT_");
+ else if (IS_C_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "C_CUT_");
+ else if (IS_D_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "D_CUT_");
+ else if (IS_E_CUT(chip_vers))
+ cnt += sprintf((buf+cnt), "E_CUT_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_CUT(%d)_",
+ chip_vers.CUTVersion);
+
+ if (IS_1T1R(chip_vers))
+ cnt += sprintf((buf+cnt), "1T1R_");
+ else if (IS_1T2R(chip_vers))
+ cnt += sprintf((buf+cnt), "1T2R_");
+ else if (IS_2T2R(chip_vers))
+ cnt += sprintf((buf+cnt), "2T2R_");
+ else
+ cnt += sprintf((buf+cnt), "UNKNOWN_RFTYPE(%d)_",
+ chip_vers.RFType);
+
+ cnt += sprintf((buf+cnt), "RomVer(%d)\n", chip_vers.ROMVer);
+
+ pr_info("%s", buf);
+}
+
+#define CHAN_PLAN_HW 0x80
+
+u8 /* return the final channel plan decision */
+hal_com_get_channel_plan(struct adapter *padapter, u8 hw_channel_plan,
+ u8 sw_channel_plan, u8 def_channel_plan,
+ bool load_fail)
+{
+ u8 sw_cfg;
+ u8 chnlplan;
+
+ sw_cfg = true;
+ if (!load_fail) {
+ if (!rtw_is_channel_plan_valid(sw_channel_plan))
+ sw_cfg = false;
+ if (hw_channel_plan & CHAN_PLAN_HW)
+ sw_cfg = false;
+ }
+
+ if (sw_cfg)
+ chnlplan = sw_channel_plan;
+ else
+ chnlplan = hw_channel_plan & (~CHAN_PLAN_HW);
+
+ if (!rtw_is_channel_plan_valid(chnlplan))
+ chnlplan = def_channel_plan;
+
+ return chnlplan;
+}
+
+u8 MRateToHwRate(u8 rate)
+{
+ u8 ret = DESC_RATE1M;
+
+ switch (rate) {
+ /* CCK and OFDM non-HT rates */
+ case IEEE80211_CCK_RATE_1MB:
+ ret = DESC_RATE1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ ret = DESC_RATE2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ ret = DESC_RATE5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ ret = DESC_RATE11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ ret = DESC_RATE6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ ret = DESC_RATE9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ ret = DESC_RATE12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ ret = DESC_RATE18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ ret = DESC_RATE24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ ret = DESC_RATE36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ ret = DESC_RATE48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ ret = DESC_RATE54M;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+void HalSetBrateCfg(struct adapter *adapt, u8 *brates, u16 *rate_cfg)
+{
+ u8 i, is_brate, brate;
+
+ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) {
+ is_brate = brates[i] & IEEE80211_BASIC_RATE_MASK;
+ brate = brates[i] & 0x7f;
+
+ if (is_brate) {
+ switch (brate) {
+ case IEEE80211_CCK_RATE_1MB:
+ *rate_cfg |= RATE_1M;
+ break;
+ case IEEE80211_CCK_RATE_2MB:
+ *rate_cfg |= RATE_2M;
+ break;
+ case IEEE80211_CCK_RATE_5MB:
+ *rate_cfg |= RATE_5_5M;
+ break;
+ case IEEE80211_CCK_RATE_11MB:
+ *rate_cfg |= RATE_11M;
+ break;
+ case IEEE80211_OFDM_RATE_6MB:
+ *rate_cfg |= RATE_6M;
+ break;
+ case IEEE80211_OFDM_RATE_9MB:
+ *rate_cfg |= RATE_9M;
+ break;
+ case IEEE80211_OFDM_RATE_12MB:
+ *rate_cfg |= RATE_12M;
+ break;
+ case IEEE80211_OFDM_RATE_18MB:
+ *rate_cfg |= RATE_18M;
+ break;
+ case IEEE80211_OFDM_RATE_24MB:
+ *rate_cfg |= RATE_24M;
+ break;
+ case IEEE80211_OFDM_RATE_36MB:
+ *rate_cfg |= RATE_36M;
+ break;
+ case IEEE80211_OFDM_RATE_48MB:
+ *rate_cfg |= RATE_48M;
+ break;
+ case IEEE80211_OFDM_RATE_54MB:
+ *rate_cfg |= RATE_54M;
+ break;
+ }
+ }
+ }
+}
+
+static void one_out_pipe(struct adapter *adapter)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[0];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+}
+
+static void two_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ if (wifi_cfg) { /* WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 0, 1, 0, 1, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[1];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[0];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else {/* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 1, 0, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[0];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[1];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+ }
+}
+
+static void three_out_pipe(struct adapter *adapter, bool wifi_cfg)
+{
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapter);
+
+ if (wifi_cfg) {/* for WMM */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 1, 2, 1, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[1];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+
+ } else {/* typical setting */
+ /* BK, BE, VI, VO, BCN, CMD, MGT, HIGH, HCCA */
+ /* 2, 2, 1, 0, 0, 0, 0, 0, 0}; */
+ /* 0:H, 1:N, 2:L */
+
+ pdvobjpriv->Queue2Pipe[0] = pdvobjpriv->RtOutPipe[0];/* VO */
+ pdvobjpriv->Queue2Pipe[1] = pdvobjpriv->RtOutPipe[1];/* VI */
+ pdvobjpriv->Queue2Pipe[2] = pdvobjpriv->RtOutPipe[2];/* BE */
+ pdvobjpriv->Queue2Pipe[3] = pdvobjpriv->RtOutPipe[2];/* BK */
+
+ pdvobjpriv->Queue2Pipe[4] = pdvobjpriv->RtOutPipe[0];/* BCN */
+ pdvobjpriv->Queue2Pipe[5] = pdvobjpriv->RtOutPipe[0];/* MGT */
+ pdvobjpriv->Queue2Pipe[6] = pdvobjpriv->RtOutPipe[0];/* HIGH */
+ pdvobjpriv->Queue2Pipe[7] = pdvobjpriv->RtOutPipe[0];/* TXCMD */
+ }
+}
+
+bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
+{
+ struct registry_priv *pregistrypriv = &adapter->registrypriv;
+ bool wifi_cfg = (pregistrypriv->wifi_spec) ? true : false;
+ bool result = true;
+
+ switch (numoutpipe) {
+ case 2:
+ two_out_pipe(adapter, wifi_cfg);
+ break;
+ case 3:
+ three_out_pipe(adapter, wifi_cfg);
+ break;
+ case 1:
+ one_out_pipe(adapter);
+ break;
+ default:
+ result = false;
+ break;
+ }
+ return result;
+}
+
+void hal_init_macaddr(struct adapter *adapter)
+{
+ rtw_hal_set_hwreg(adapter, HW_VAR_MAC_ADDR,
+ adapter->eeprompriv.mac_addr);
+}
+
+/*
+* C2H event format:
+* Field TRIGGER CONTENT CMD_SEQ CMD_LEN CMD_ID
+* BITS [127:120] [119:16] [15:8] [7:4] [3:0]
+*/
+
+void c2h_evt_clear(struct adapter *adapter)
+{
+ rtw_write8(adapter, REG_C2HEVT_CLEAR, C2H_EVT_HOST_CLOSE);
+}
+
+s32 c2h_evt_read(struct adapter *adapter, u8 *buf)
+{
+ s32 ret = _FAIL;
+ struct c2h_evt_hdr *c2h_evt;
+ int i;
+ u8 trigger;
+
+ if (buf == NULL)
+ goto exit;
+
+ trigger = rtw_read8(adapter, REG_C2HEVT_CLEAR);
+
+ if (trigger == C2H_EVT_HOST_CLOSE)
+ goto exit; /* Not ready */
+ else if (trigger != C2H_EVT_FW_CLOSE)
+ goto clear_evt; /* Not a valid value */
+
+ c2h_evt = (struct c2h_evt_hdr *)buf;
+
+ _rtw_memset(c2h_evt, 0, 16);
+
+ *buf = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL);
+ *(buf+1) = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL + 1);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_, "c2h_evt_read(): ",
+ &c2h_evt , sizeof(c2h_evt));
+
+ /* Read the content */
+ for (i = 0; i < c2h_evt->plen; i++)
+ c2h_evt->payload[i] = rtw_read8(adapter, REG_C2HEVT_MSG_NORMAL +
+ sizeof(*c2h_evt) + i);
+
+ RT_PRINT_DATA(_module_hal_init_c_, _drv_info_,
+ "c2h_evt_read(): Command Content:\n",
+ c2h_evt->payload, c2h_evt->plen);
+
+ ret = _SUCCESS;
+
+clear_evt:
+ /*
+ * Clear event to notify FW we have read the command.
+ * If this field isn't clear, the FW won't update the next
+ * command message.
+ */
+ c2h_evt_clear(adapter);
+exit:
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
new file mode 100644
index 00000000000..598140464f0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -0,0 +1,464 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#define _HAL_INTF_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <hal_intf.h>
+#include <usb_hal.h>
+
+void rtw_hal_chip_configure(struct adapter *adapt)
+{
+ if (adapt->HalFunc.intf_chip_configure)
+ adapt->HalFunc.intf_chip_configure(adapt);
+}
+
+void rtw_hal_read_chip_info(struct adapter *adapt)
+{
+ if (adapt->HalFunc.read_adapter_info)
+ adapt->HalFunc.read_adapter_info(adapt);
+}
+
+void rtw_hal_read_chip_version(struct adapter *adapt)
+{
+ if (adapt->HalFunc.read_chip_version)
+ adapt->HalFunc.read_chip_version(adapt);
+}
+
+void rtw_hal_def_value_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_default_value)
+ adapt->HalFunc.init_default_value(adapt);
+}
+
+void rtw_hal_free_data(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_hal_data)
+ adapt->HalFunc.free_hal_data(adapt);
+}
+
+void rtw_hal_dm_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.dm_init)
+ adapt->HalFunc.dm_init(adapt);
+}
+
+void rtw_hal_dm_deinit(struct adapter *adapt)
+{
+ /* cancel dm timer */
+ if (adapt->HalFunc.dm_deinit)
+ adapt->HalFunc.dm_deinit(adapt);
+}
+
+void rtw_hal_sw_led_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.InitSwLeds)
+ adapt->HalFunc.InitSwLeds(adapt);
+}
+
+void rtw_hal_sw_led_deinit(struct adapter *adapt)
+{
+ if (adapt->HalFunc.DeInitSwLeds)
+ adapt->HalFunc.DeInitSwLeds(adapt);
+}
+
+u32 rtw_hal_power_on(struct adapter *adapt)
+{
+ if (adapt->HalFunc.hal_power_on)
+ return adapt->HalFunc.hal_power_on(adapt);
+ return _FAIL;
+}
+
+uint rtw_hal_init(struct adapter *adapt)
+{
+ uint status = _SUCCESS;
+
+ adapt->hw_init_completed = false;
+
+ status = adapt->HalFunc.hal_init(adapt);
+
+ if (status == _SUCCESS) {
+ adapt->hw_init_completed = true;
+
+ if (adapt->registrypriv.notch_filter == 1)
+ rtw_hal_notch_filter(adapt, 1);
+
+ rtw_hal_reset_security_engine(adapt);
+ } else {
+ adapt->hw_init_completed = false;
+ DBG_88E("rtw_hal_init: hal__init fail\n");
+ }
+
+ RT_TRACE(_module_hal_init_c_, _drv_err_,
+ ("-rtl871x_hal_init:status=0x%x\n", status));
+
+ return status;
+}
+
+uint rtw_hal_deinit(struct adapter *adapt)
+{
+ uint status = _SUCCESS;
+
+_func_enter_;
+
+ status = adapt->HalFunc.hal_deinit(adapt);
+
+ if (status == _SUCCESS)
+ adapt->hw_init_completed = false;
+ else
+ DBG_88E("\n rtw_hal_deinit: hal_init fail\n");
+
+_func_exit_;
+
+ return status;
+}
+
+void rtw_hal_set_hwreg(struct adapter *adapt, u8 variable, u8 *val)
+{
+ if (adapt->HalFunc.SetHwRegHandler)
+ adapt->HalFunc.SetHwRegHandler(adapt, variable, val);
+}
+
+void rtw_hal_get_hwreg(struct adapter *adapt, u8 variable, u8 *val)
+{
+ if (adapt->HalFunc.GetHwRegHandler)
+ adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
+}
+
+u8 rtw_hal_set_def_var(struct adapter *adapt, enum hal_def_variable var,
+ void *val)
+{
+ if (adapt->HalFunc.SetHalDefVarHandler)
+ return adapt->HalFunc.SetHalDefVarHandler(adapt, var, val);
+ return _FAIL;
+}
+
+u8 rtw_hal_get_def_var(struct adapter *adapt,
+ enum hal_def_variable var, void *val)
+{
+ if (adapt->HalFunc.GetHalDefVarHandler)
+ return adapt->HalFunc.GetHalDefVarHandler(adapt, var, val);
+ return _FAIL;
+}
+
+void rtw_hal_set_odm_var(struct adapter *adapt,
+ enum hal_odm_variable var, void *val1,
+ bool set)
+{
+ if (adapt->HalFunc.SetHalODMVarHandler)
+ adapt->HalFunc.SetHalODMVarHandler(adapt, var,
+ val1, set);
+}
+
+void rtw_hal_get_odm_var(struct adapter *adapt,
+ enum hal_odm_variable var, void *val1,
+ bool set)
+{
+ if (adapt->HalFunc.GetHalODMVarHandler)
+ adapt->HalFunc.GetHalODMVarHandler(adapt, var,
+ val1, set);
+}
+
+void rtw_hal_enable_interrupt(struct adapter *adapt)
+{
+ if (adapt->HalFunc.enable_interrupt)
+ adapt->HalFunc.enable_interrupt(adapt);
+ else
+ DBG_88E("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
+}
+
+void rtw_hal_disable_interrupt(struct adapter *adapt)
+{
+ if (adapt->HalFunc.disable_interrupt)
+ adapt->HalFunc.disable_interrupt(adapt);
+ else
+ DBG_88E("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
+}
+
+u32 rtw_hal_inirp_init(struct adapter *adapt)
+{
+ u32 rst = _FAIL;
+
+ if (adapt->HalFunc.inirp_init)
+ rst = adapt->HalFunc.inirp_init(adapt);
+ else
+ DBG_88E(" %s HalFunc.inirp_init is NULL!!!\n", __func__);
+ return rst;
+}
+
+u32 rtw_hal_inirp_deinit(struct adapter *adapt)
+{
+ if (adapt->HalFunc.inirp_deinit)
+ return adapt->HalFunc.inirp_deinit(adapt);
+
+ return _FAIL;
+}
+
+u8 rtw_hal_intf_ps_func(struct adapter *adapt,
+ enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ if (adapt->HalFunc.interface_ps_func)
+ return adapt->HalFunc.interface_ps_func(adapt, efunc_id,
+ val);
+ return _FAIL;
+}
+
+s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ if (adapt->HalFunc.hal_xmit)
+ return adapt->HalFunc.hal_xmit(adapt, pxmitframe);
+
+ return false;
+}
+
+s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
+{
+ s32 ret = _FAIL;
+ if (adapt->HalFunc.mgnt_xmit)
+ ret = adapt->HalFunc.mgnt_xmit(adapt, pmgntframe);
+ return ret;
+}
+
+s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_xmit_priv != NULL)
+ return adapt->HalFunc.init_xmit_priv(adapt);
+ return _FAIL;
+}
+
+void rtw_hal_free_xmit_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_xmit_priv != NULL)
+ adapt->HalFunc.free_xmit_priv(adapt);
+}
+
+s32 rtw_hal_init_recv_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.init_recv_priv)
+ return adapt->HalFunc.init_recv_priv(adapt);
+
+ return _FAIL;
+}
+
+void rtw_hal_free_recv_priv(struct adapter *adapt)
+{
+ if (adapt->HalFunc.free_recv_priv)
+ adapt->HalFunc.free_recv_priv(adapt);
+}
+
+void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+{
+ struct mlme_priv *pmlmepriv = &(adapt->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true) {
+#ifdef CONFIG_88EU_AP_MODE
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &adapt->stapriv;
+ if ((mac_id-1) > 0)
+ psta = pstapriv->sta_aid[(mac_id-1) - 1];
+ if (psta)
+ add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
+#endif
+ } else {
+ if (adapt->HalFunc.UpdateRAMaskHandler)
+ adapt->HalFunc.UpdateRAMaskHandler(adapt, mac_id,
+ rssi_level);
+ }
+}
+
+void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg,
+ u8 rssi_level)
+{
+ if (adapt->HalFunc.Add_RateATid)
+ adapt->HalFunc.Add_RateATid(adapt, bitmap, arg,
+ rssi_level);
+}
+
+/* Start specifical interface thread */
+void rtw_hal_start_thread(struct adapter *adapt)
+{
+ if (adapt->HalFunc.run_thread)
+ adapt->HalFunc.run_thread(adapt);
+}
+
+/* Start specifical interface thread */
+void rtw_hal_stop_thread(struct adapter *adapt)
+{
+ if (adapt->HalFunc.cancel_thread)
+ adapt->HalFunc.cancel_thread(adapt);
+}
+
+u32 rtw_hal_read_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask)
+{
+ u32 data = 0;
+
+ if (adapt->HalFunc.read_bbreg)
+ data = adapt->HalFunc.read_bbreg(adapt, regaddr, bitmask);
+ return data;
+}
+
+void rtw_hal_write_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask,
+ u32 data)
+{
+ if (adapt->HalFunc.write_bbreg)
+ adapt->HalFunc.write_bbreg(adapt, regaddr, bitmask, data);
+}
+
+u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
+ u32 regaddr, u32 bitmask)
+{
+ u32 data = 0;
+
+ if (adapt->HalFunc.read_rfreg)
+ data = adapt->HalFunc.read_rfreg(adapt, rfpath, regaddr,
+ bitmask);
+ return data;
+}
+
+void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
+ u32 regaddr, u32 bitmask, u32 data)
+{
+ if (adapt->HalFunc.write_rfreg)
+ adapt->HalFunc.write_rfreg(adapt, rfpath, regaddr,
+ bitmask, data);
+}
+
+s32 rtw_hal_interrupt_handler(struct adapter *adapt)
+{
+ if (adapt->HalFunc.interrupt_handler)
+ return adapt->HalFunc.interrupt_handler(adapt);
+ return _FAIL;
+}
+
+void rtw_hal_set_bwmode(struct adapter *adapt,
+ enum ht_channel_width bandwidth, u8 offset)
+{
+ if (adapt->HalFunc.set_bwmode_handler)
+ adapt->HalFunc.set_bwmode_handler(adapt, bandwidth,
+ offset);
+}
+
+void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
+{
+ if (adapt->HalFunc.set_channel_handler)
+ adapt->HalFunc.set_channel_handler(adapt, channel);
+}
+
+void rtw_hal_dm_watchdog(struct adapter *adapt)
+{
+ if (adapt->HalFunc.hal_dm_watchdog)
+ adapt->HalFunc.hal_dm_watchdog(adapt);
+}
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
+{
+ if (adapt->HalFunc.SetBeaconRelatedRegistersHandler)
+ adapt->HalFunc.SetBeaconRelatedRegistersHandler(adapt);
+}
+
+u8 rtw_hal_antdiv_before_linked(struct adapter *adapt)
+{
+ if (adapt->HalFunc.AntDivBeforeLinkHandler)
+ return adapt->HalFunc.AntDivBeforeLinkHandler(adapt);
+ return false;
+}
+
+void rtw_hal_antdiv_rssi_compared(struct adapter *adapt,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src)
+{
+ if (adapt->HalFunc.AntDivCompareHandler)
+ adapt->HalFunc.AntDivCompareHandler(adapt, dst, src);
+}
+
+void rtw_hal_sreset_init(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_init_value)
+ adapt->HalFunc.sreset_init_value(adapt);
+}
+
+void rtw_hal_sreset_reset(struct adapter *adapt)
+{
+ if (adapt->HalFunc.silentreset)
+ adapt->HalFunc.silentreset(adapt);
+}
+
+void rtw_hal_sreset_reset_value(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_reset_value)
+ adapt->HalFunc.sreset_reset_value(adapt);
+}
+
+void rtw_hal_sreset_xmit_status_check(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_xmit_status_check)
+ adapt->HalFunc.sreset_xmit_status_check(adapt);
+}
+
+void rtw_hal_sreset_linked_status_check(struct adapter *adapt)
+{
+ if (adapt->HalFunc.sreset_linked_status_check)
+ adapt->HalFunc.sreset_linked_status_check(adapt);
+}
+
+u8 rtw_hal_sreset_get_wifi_status(struct adapter *adapt)
+{
+ u8 status = 0;
+
+ if (adapt->HalFunc.sreset_get_wifi_status)
+ status = adapt->HalFunc.sreset_get_wifi_status(adapt);
+ return status;
+}
+
+int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
+ u32 max_wating_ms, u32 bndy_cnt)
+{
+ if (adapter->HalFunc.IOL_exec_cmds_sync)
+ return adapter->HalFunc.IOL_exec_cmds_sync(adapter, xmit_frame,
+ max_wating_ms,
+ bndy_cnt);
+ return _FAIL;
+}
+
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
+{
+ if (adapter->HalFunc.hal_notch_filter)
+ adapter->HalFunc.hal_notch_filter(adapter, enable);
+}
+
+void rtw_hal_reset_security_engine(struct adapter *adapter)
+{
+ if (adapter->HalFunc.hal_reset_security_engine)
+ adapter->HalFunc.hal_reset_security_engine(adapter);
+}
+
+s32 rtw_hal_c2h_handler(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt)
+{
+ s32 ret = _FAIL;
+
+ if (adapter->HalFunc.c2h_handler)
+ ret = adapter->HalFunc.c2h_handler(adapter, c2h_evt);
+ return ret;
+}
+
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
+{
+ return adapter->HalFunc.c2h_id_filter_ccx;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
new file mode 100644
index 00000000000..285475f9613
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -0,0 +1,2171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+static const u16 dB_Invert_Table[8][12] = {
+ {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
+ {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
+ {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
+ {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
+ {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
+ {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
+ {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
+ {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
+};
+
+/* avoid to warn in FreeBSD ==> To DO modify */
+static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
+ /* UL DL */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 0:unknown AP */
+ {0xa44f, 0x5ea44f, 0x5e431c}, /* 1:realtek AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 2:unknown AP => realtek_92SE */
+ {0x5ea32b, 0x5ea42b, 0x5e4322}, /* 3:broadcom AP */
+ {0x5ea422, 0x00a44f, 0x00a44f}, /* 4:ralink AP */
+ {0x5ea322, 0x00a630, 0x00a44f}, /* 5:atheros AP */
+ {0x5e4322, 0x5e4322, 0x5e4322},/* 6:cisco AP */
+ {0x5ea44f, 0x00a44f, 0x5ea42b}, /* 8:marvell AP */
+ {0x5ea42b, 0x5ea42b, 0x5ea42b}, /* 10:unknown AP=> 92U AP */
+ {0x5ea42b, 0xa630, 0x5e431c}, /* 11:airgocap AP */
+};
+
+/* Global var */
+u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D] = {
+ 0x7f8001fe, /* 0, +6.0dB */
+ 0x788001e2, /* 1, +5.5dB */
+ 0x71c001c7, /* 2, +5.0dB */
+ 0x6b8001ae, /* 3, +4.5dB */
+ 0x65400195, /* 4, +4.0dB */
+ 0x5fc0017f, /* 5, +3.5dB */
+ 0x5a400169, /* 6, +3.0dB */
+ 0x55400155, /* 7, +2.5dB */
+ 0x50800142, /* 8, +2.0dB */
+ 0x4c000130, /* 9, +1.5dB */
+ 0x47c0011f, /* 10, +1.0dB */
+ 0x43c0010f, /* 11, +0.5dB */
+ 0x40000100, /* 12, +0dB */
+ 0x3c8000f2, /* 13, -0.5dB */
+ 0x390000e4, /* 14, -1.0dB */
+ 0x35c000d7, /* 15, -1.5dB */
+ 0x32c000cb, /* 16, -2.0dB */
+ 0x300000c0, /* 17, -2.5dB */
+ 0x2d4000b5, /* 18, -3.0dB */
+ 0x2ac000ab, /* 19, -3.5dB */
+ 0x288000a2, /* 20, -4.0dB */
+ 0x26000098, /* 21, -4.5dB */
+ 0x24000090, /* 22, -5.0dB */
+ 0x22000088, /* 23, -5.5dB */
+ 0x20000080, /* 24, -6.0dB */
+ 0x1e400079, /* 25, -6.5dB */
+ 0x1c800072, /* 26, -7.0dB */
+ 0x1b00006c, /* 27. -7.5dB */
+ 0x19800066, /* 28, -8.0dB */
+ 0x18000060, /* 29, -8.5dB */
+ 0x16c0005b, /* 30, -9.0dB */
+ 0x15800056, /* 31, -9.5dB */
+ 0x14400051, /* 32, -10.0dB */
+ 0x1300004c, /* 33, -10.5dB */
+ 0x12000048, /* 34, -11.0dB */
+ 0x11000044, /* 35, -11.5dB */
+ 0x10000040, /* 36, -12.0dB */
+ 0x0f00003c,/* 37, -12.5dB */
+ 0x0e400039,/* 38, -13.0dB */
+ 0x0d800036,/* 39, -13.5dB */
+ 0x0cc00033,/* 40, -14.0dB */
+ 0x0c000030,/* 41, -14.5dB */
+ 0x0b40002d,/* 42, -15.0dB */
+};
+
+u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01} /* 32, -16.0dB */
+};
+
+u8 CCKSwingTable_Ch14[CCK_TABLE_SIZE][8] = {
+ {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00}, /* 0, +0dB */
+ {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00}, /* 1, -0.5dB */
+ {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00}, /* 2, -1.0dB */
+ {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00}, /* 3, -1.5dB */
+ {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00}, /* 4, -2.0dB */
+ {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00}, /* 5, -2.5dB */
+ {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00}, /* 6, -3.0dB */
+ {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00}, /* 7, -3.5dB */
+ {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00}, /* 8, -4.0dB */
+ {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00}, /* 9, -4.5dB */
+ {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00}, /* 10, -5.0dB */
+ {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 11, -5.5dB */
+ {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00}, /* 12, -6.0dB */
+ {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00}, /* 13, -6.5dB */
+ {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00}, /* 14, -7.0dB */
+ {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 15, -7.5dB */
+ {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00}, /* 16, -8.0dB */
+ {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 17, -8.5dB */
+ {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00}, /* 18, -9.0dB */
+ {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 19, -9.5dB */
+ {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00}, /* 20, -10.0dB */
+ {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 21, -10.5dB */
+ {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00}, /* 22, -11.0dB */
+ {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 23, -11.5dB */
+ {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00}, /* 24, -12.0dB */
+ {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 25, -12.5dB */
+ {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 26, -13.0dB */
+ {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00}, /* 27, -13.5dB */
+ {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 28, -14.0dB */
+ {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 29, -14.5dB */
+ {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 30, -15.0dB */
+ {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00}, /* 31, -15.5dB */
+ {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00} /* 32, -16.0dB */
+};
+
+
+#define RxDefaultAnt1 0x65a9
+#define RxDefaultAnt2 0x569a
+
+/* 3 Export Interface */
+
+/* 2011/09/21 MH Add to describe different team necessary resource allocate?? */
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_CommonInfoSelfInit(pDM_Odm);
+ odm_CmnInfoInit_Debug(pDM_Odm);
+ odm_DIGInit(pDM_Odm);
+ odm_RateAdaptiveMaskInit(pDM_Odm);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
+ ;
+ } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_DynamicTxPowerInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivInit(pDM_Odm);
+ }
+}
+
+/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
+/* You can not add any dummy function here, be care, you can only use DM structure */
+/* to perform any new ODM_DM. */
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2012.05.03 Luke: For all IC series */
+ odm_GlobalAdapterCheck();
+ odm_CmnInfoHook_Debug(pDM_Odm);
+ odm_CmnInfoUpdate_Debug(pDM_Odm);
+ odm_CommonInfoSelfUpdate(pDM_Odm);
+ odm_FalseAlarmCounterStatistics(pDM_Odm);
+ odm_RSSIMonitorCheck(pDM_Odm);
+
+ /* For CE Platform(SPRD or Tablet) */
+ /* 8723A or 8189ES platform */
+ /* NeilChen--2012--08--24-- */
+ /* Fix Leave LPS issue */
+ if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
+ ((pDM_Odm->SupportICType & (ODM_RTL8723A)) ||
+ (pDM_Odm->SupportICType & (ODM_RTL8188E) &&
+ ((pDM_Odm->SupportInterface == ODM_ITRF_SDIO))))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("----Step1: odm_DIG is in LPS mode\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Step2: 8723AS is in LPS mode\n"));
+ odm_DIGbyRSSI_LPS(pDM_Odm);
+ } else {
+ odm_DIG(pDM_Odm);
+ }
+ odm_CCKPacketDetectionThresh(pDM_Odm);
+
+ if (*(pDM_Odm->pbPowerSaving))
+ return;
+
+ odm_RefreshRateAdaptiveMask(pDM_Odm);
+
+ odm_DynamicBBPowerSaving(pDM_Odm);
+ odm_DynamicPrimaryCCA(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_HwAntDiv(pDM_Odm);
+ else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
+ odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
+
+ if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
+ ;
+ } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
+ odm_DynamicTxPower(pDM_Odm);
+ }
+ odm_dtc(pDM_Odm);
+}
+
+/* Init /.. Fixed HW value. Only init time. */
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u32 Value)
+{
+ /* This section is used for init value */
+ switch (CmnInfo) {
+ /* Fixed ODM value. */
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_PLATFORM:
+ pDM_Odm->SupportPlatform = (u8)Value;
+ break;
+ case ODM_CMNINFO_INTERFACE:
+ pDM_Odm->SupportInterface = (u8)Value;
+ break;
+ case ODM_CMNINFO_MP_TEST_CHIP:
+ pDM_Odm->bIsMPChip = (u8)Value;
+ break;
+ case ODM_CMNINFO_IC_TYPE:
+ pDM_Odm->SupportICType = Value;
+ break;
+ case ODM_CMNINFO_CUT_VER:
+ pDM_Odm->CutVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_FAB_VER:
+ pDM_Odm->FabVersion = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_RF_ANTENNA_TYPE:
+ pDM_Odm->AntDivType = (u8)Value;
+ break;
+ case ODM_CMNINFO_BOARD_TYPE:
+ pDM_Odm->BoardType = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_LNA:
+ pDM_Odm->ExtLNA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_PA:
+ pDM_Odm->ExtPA = (u8)Value;
+ break;
+ case ODM_CMNINFO_EXT_TRSW:
+ pDM_Odm->ExtTRSW = (u8)Value;
+ break;
+ case ODM_CMNINFO_PATCH_ID:
+ pDM_Odm->PatchID = (u8)Value;
+ break;
+ case ODM_CMNINFO_BINHCT_TEST:
+ pDM_Odm->bInHctTest = (bool)Value;
+ break;
+ case ODM_CMNINFO_BWIFI_TEST:
+ pDM_Odm->bWIFITest = (bool)Value;
+ break;
+ case ODM_CMNINFO_SMART_CONCURRENT:
+ pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+
+ /* Tx power tracking BB swing table. */
+ /* The base index = 12. +((12-n)/2)dB 13~?? = decrease tx pwr by -((n-12)/2)dB */
+ pDM_Odm->BbSwingIdxOfdm = 12; /* Set defalut value as index 12. */
+ pDM_Odm->BbSwingIdxOfdmCurrent = 12;
+ pDM_Odm->BbSwingFlagOfdm = false;
+}
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, void *pValue)
+{
+ /* */
+ /* Hook call by reference pointer. */
+ /* */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_MAC_PHY_MODE:
+ pDM_Odm->pMacPhyMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_TX_UNI:
+ pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_RX_UNI:
+ pDM_Odm->pNumRxBytesUnicast = (u64 *)pValue;
+ break;
+ case ODM_CMNINFO_WM_MODE:
+ pDM_Odm->pWirelessMode = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BAND:
+ pDM_Odm->pBandType = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_CHNL_OFFSET:
+ pDM_Odm->pSecChOffset = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_SEC_MODE:
+ pDM_Odm->pSecurity = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_BW:
+ pDM_Odm->pBandWidth = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_CHNL:
+ pDM_Odm->pChannel = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_GET_VALUE:
+ pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_BUDDY_ADAPTOR:
+ pDM_Odm->pBuddyAdapter = (struct adapter **)pValue;
+ break;
+ case ODM_CMNINFO_DMSP_IS_MASTER:
+ pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_SCAN:
+ pDM_Odm->pbScanInProcess = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_POWER_SAVING:
+ pDM_Odm->pbPowerSaving = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ONE_PATH_CCA:
+ pDM_Odm->pOnePathCCA = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_DRV_STOP:
+ pDM_Odm->pbDriverStopped = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_PNP_IN:
+ pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_INIT_ON:
+ pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_ANT_TEST:
+ pDM_Odm->pAntennaTest = (u8 *)pValue;
+ break;
+ case ODM_CMNINFO_NET_CLOSED:
+ pDM_Odm->pbNet_closed = (bool *)pValue;
+ break;
+ case ODM_CMNINFO_MP_MODE:
+ pDM_Odm->mp_mode = (u8 *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u16 Index, void *pValue)
+{
+ /* Hook call by reference pointer. */
+ switch (CmnInfo) {
+ /* Dynamic call by reference pointer. */
+ case ODM_CMNINFO_STA_STATUS:
+ pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
+ break;
+ /* To remove the compiler warning, must add an empty default statement to handle the other values. */
+ default:
+ /* do nothing */
+ break;
+ }
+}
+
+/* Update Band/CHannel/.. The values are dynamic but non-per-packet. */
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
+{
+ /* */
+ /* This init variable may be changed in run time. */
+ /* */
+ switch (CmnInfo) {
+ case ODM_CMNINFO_ABILITY:
+ pDM_Odm->SupportAbility = (u32)Value;
+ break;
+ case ODM_CMNINFO_RF_TYPE:
+ pDM_Odm->RFType = (u8)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DIRECT:
+ pDM_Odm->bWIFI_Direct = (bool)Value;
+ break;
+ case ODM_CMNINFO_WIFI_DISPLAY:
+ pDM_Odm->bWIFI_Display = (bool)Value;
+ break;
+ case ODM_CMNINFO_LINK:
+ pDM_Odm->bLinked = (bool)Value;
+ break;
+ case ODM_CMNINFO_RSSI_MIN:
+ pDM_Odm->RSSI_Min = (u8)Value;
+ break;
+ case ODM_CMNINFO_DBG_COMP:
+ pDM_Odm->DebugComponents = Value;
+ break;
+ case ODM_CMNINFO_DBG_LEVEL:
+ pDM_Odm->DebugLevel = (u32)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_HIGH:
+ pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
+ break;
+ case ODM_CMNINFO_RA_THRESHOLD_LOW:
+ pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
+ break;
+ }
+}
+
+void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->bCckHighPower = (bool) ODM_GetBBReg(pDM_Odm, 0x824, BIT9);
+ pDM_Odm->RFPathRxEnable = (u8) ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
+ if (pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8192D))
+ pDM_Odm->AntDivType = CG_TRX_HW_ANTDIV;
+ if (pDM_Odm->SupportICType & (ODM_RTL8723A))
+ pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
+
+ ODM_InitDebugSetting(pDM_Odm);
+}
+
+void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
+{
+ u8 EntryCnt = 0;
+ u8 i;
+ struct sta_info *pEntry;
+
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M) {
+ if (*(pDM_Odm->pSecChOffset) == 1)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) - 2;
+ else if (*(pDM_Odm->pSecChOffset) == 2)
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel) + 2;
+ } else {
+ pDM_Odm->ControlChannel = *(pDM_Odm->pChannel);
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry))
+ EntryCnt++;
+ }
+ if (EntryCnt == 1)
+ pDM_Odm->bOneEntryOnly = true;
+ else
+ pDM_Odm->bOneEntryOnly = false;
+}
+
+void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoInit_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportPlatform=%d\n", pDM_Odm->SupportPlatform));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportAbility=0x%x\n", pDM_Odm->SupportAbility));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportInterface=%d\n", pDM_Odm->SupportInterface));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("SupportICType=0x%x\n", pDM_Odm->SupportICType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("CutVersion=%d\n", pDM_Odm->CutVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("FabVersion=%d\n", pDM_Odm->FabVersion));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RFType=%d\n", pDM_Odm->RFType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("BoardType=%d\n", pDM_Odm->BoardType));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtLNA=%d\n", pDM_Odm->ExtLNA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtPA=%d\n", pDM_Odm->ExtPA));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("ExtTRSW=%d\n", pDM_Odm->ExtTRSW));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("PatchID=%d\n", pDM_Odm->PatchID));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bInHctTest=%d\n", pDM_Odm->bInHctTest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFITest=%d\n", pDM_Odm->bWIFITest));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bDualMacSmartConcurrent=%d\n", pDM_Odm->bDualMacSmartConcurrent));
+}
+
+void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoHook_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumTxBytesUnicast=%llu\n", *(pDM_Odm->pNumTxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pNumRxBytesUnicast=%llu\n", *(pDM_Odm->pNumRxBytesUnicast)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pWirelessMode=0x%x\n", *(pDM_Odm->pWirelessMode)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecChOffset=%d\n", *(pDM_Odm->pSecChOffset)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pSecurity=%d\n", *(pDM_Odm->pSecurity)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pBandWidth=%d\n", *(pDM_Odm->pBandWidth)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pChannel=%d\n", *(pDM_Odm->pChannel)));
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbScanInProcess=%d\n", *(pDM_Odm->pbScanInProcess)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pbPowerSaving=%d\n", *(pDM_Odm->pbPowerSaving)));
+
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL))
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("pOnePathCCA=%d\n", *(pDM_Odm->pOnePathCCA)));
+}
+
+void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("odm_CmnInfoUpdate_Debug==>\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Direct=%d\n", pDM_Odm->bWIFI_Direct));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bWIFI_Display=%d\n", pDM_Odm->bWIFI_Display));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("bLinked=%d\n", pDM_Odm->bLinked));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_COMMON, ODM_DBG_LOUD, ("RSSI_Min=%d\n", pDM_Odm->RSSI_Min));
+}
+
+static int getIGIForDiff(int value_IGI)
+{
+ #define ONERCCA_LOW_TH 0x30
+ #define ONERCCA_LOW_DIFF 8
+
+ if (value_IGI < ONERCCA_LOW_TH) {
+ if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
+ return ONERCCA_LOW_TH;
+ else
+ return value_IGI + ONERCCA_LOW_DIFF;
+ } else {
+ return value_IGI;
+ }
+}
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("ODM_REG(IGI_A,pDM_Odm)=0x%x, ODM_BIT(IGI,pDM_Odm)=0x%x\n",
+ ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm)));
+
+ if (pDM_DigTable->CurIGValue != CurrentIGI) {
+ if (pDM_Odm->SupportPlatform & (ODM_CE|ODM_MP)) {
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ } else if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ switch (*(pDM_Odm->pOnePathCCA)) {
+ case ODM_CCA_2R:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ break;
+ case ODM_CCA_1R_A:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
+ break;
+ case ODM_CCA_1R_B:
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
+ if (pDM_Odm->SupportICType != ODM_RTL8188E)
+ ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
+ break;
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("CurrentIGI(0x%02x).\n", CurrentIGI));
+ /* pDM_DigTable->PreIGValue = pDM_DigTable->CurIGValue; */
+ pDM_DigTable->CurIGValue = CurrentIGI;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("ODM_Write_DIG():CurrentIGI=0x%x\n", CurrentIGI));
+
+/* Add by Neil Chen to enable edcca to MP Platform */
+}
+
+/* Need LPS mode for CE platform --2012--08--24--- */
+/* 8723AS/8189ES */
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+
+ u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
+ u8 bFwCurrentInPSMode = false;
+ u8 CurrentIGI = pDM_Odm->RSSI_Min;
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8188E)))
+ return;
+
+ CurrentIGI = CurrentIGI + RSSI_OFFSET_DIG;
+ bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
+
+ /* Using FW PS mode to make IGI */
+ if (bFwCurrentInPSMode) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("---Neil---odm_DIG is in LPS mode\n"));
+ /* Adjust by FA in LPS MODE */
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
+ CurrentIGI = CurrentIGI+2;
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
+ CurrentIGI = CurrentIGI+1;
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
+ CurrentIGI = CurrentIGI-1;
+ } else {
+ CurrentIGI = RSSI_Lower;
+ }
+
+ /* Lower bound checking */
+
+ /* RSSI Lower bound check */
+ if ((pDM_Odm->RSSI_Min-10) > DM_DIG_MIN_NIC)
+ RSSI_Lower = (pDM_Odm->RSSI_Min-10);
+ else
+ RSSI_Lower = DM_DIG_MIN_NIC;
+
+ /* Upper and Lower Bound checking */
+ if (CurrentIGI > DM_DIG_MAX_NIC)
+ CurrentIGI = DM_DIG_MAX_NIC;
+ else if (CurrentIGI < RSSI_Lower)
+ CurrentIGI = RSSI_Lower;
+
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+}
+
+void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ pDM_DigTable->CurIGValue = (u8) ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
+ pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
+ pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
+ pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
+ if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ } else {
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
+ }
+ pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
+ pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
+ pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
+ pDM_DigTable->PreCCK_CCAThres = 0xFF;
+ pDM_DigTable->CurCCK_CCAThres = 0x83;
+ pDM_DigTable->ForbiddenIGI = DM_DIG_MIN_NIC;
+ pDM_DigTable->LargeFAHit = 0;
+ pDM_DigTable->Recover_cnt = 0;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DM_DIG_MIN_NIC;
+ pDM_DigTable->DIG_Dynamic_MIN_1 = DM_DIG_MIN_NIC;
+ pDM_DigTable->bMediaConnect_0 = false;
+ pDM_DigTable->bMediaConnect_1 = false;
+
+ /* To Initialize pDM_Odm->bDMInitialGainEnable == false to avoid DIG error */
+ pDM_Odm->bDMInitialGainEnable = true;
+}
+
+void odm_DIG(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+ struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
+ u8 DIG_Dynamic_MIN;
+ u8 DIG_MaxOfMin;
+ bool FirstConnect, FirstDisConnect;
+ u8 dm_dig_max, dm_dig_min;
+ u8 CurrentIGI = pDM_DigTable->CurIGValue;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG()==>\n"));
+ if ((!(pDM_Odm->SupportAbility&ODM_BB_DIG)) || (!(pDM_Odm->SupportAbility&ODM_BB_FA_CNT))) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() Return: SupportAbility ODM_BB_DIG or ODM_BB_FA_CNT is disabled\n"));
+ return;
+ }
+
+ if (*(pDM_Odm->pbScanInProcess)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: In Scan Progress\n"));
+ return;
+ }
+
+ /* add by Neil Chen to avoid PSD is processing */
+ if (pDM_Odm->bDMInitialGainEnable == false) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() Return: PSD is Processing\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType == ODM_RTL8192D) {
+ if (*(pDM_Odm->pMacPhyMode) == ODM_DMSP) {
+ if (*(pDM_Odm->pbMasterOfDMSP)) {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
+ }
+ } else {
+ if (*(pDM_Odm->pBandType) == ODM_BAND_5G) {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
+ }
+ }
+ } else {
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
+ }
+
+ /* 1 Boundary Decision */
+ if ((pDM_Odm->SupportICType & (ODM_RTL8192C|ODM_RTL8723A)) &&
+ ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ dm_dig_max = DM_DIG_MAX_AP_HP;
+ dm_dig_min = DM_DIG_MIN_AP_HP;
+ } else {
+ dm_dig_max = DM_DIG_MAX_NIC_HP;
+ dm_dig_min = DM_DIG_MIN_NIC_HP;
+ }
+ DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
+ } else {
+ if (pDM_Odm->SupportPlatform & (ODM_AP|ODM_ADSL)) {
+ dm_dig_max = DM_DIG_MAX_AP;
+ dm_dig_min = DM_DIG_MIN_AP;
+ DIG_MaxOfMin = dm_dig_max;
+ } else {
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+ }
+ }
+ if (pDM_Odm->bLinked) {
+ /* 2 8723A Series, offset need to be 10 */
+ if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
+ /* 2 Upper Bound */
+ if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
+ /* 2 If BT is Concurrent, need to set Lower Bound */
+ DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
+ } else {
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
+ DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
+ else
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : bOneEntryOnly=true, DIG_Dynamic_MIN=0x%x\n",
+ DIG_Dynamic_MIN));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG() : pDM_Odm->RSSI_Min=%d\n",
+ pDM_Odm->RSSI_Min));
+ } else if ((pDM_Odm->SupportICType == ODM_RTL8188E) &&
+ (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ /* 1 Lower Bound for 88E AntDiv */
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ DIG_Dynamic_MIN = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->AntDiv_RSSI_max=%d\n",
+ pDM_DigTable->AntDiv_RSSI_max));
+ }
+ } else {
+ DIG_Dynamic_MIN = dm_dig_min;
+ }
+ }
+ } else {
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ DIG_Dynamic_MIN = dm_dig_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG() : No Link\n"));
+ }
+
+ /* 1 Modify DIG lower bound, deal with abnormally large false alarm */
+ if (pFalseAlmCnt->Cnt_all > 10000) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("dm_DIG(): Abnornally false alarm case.\n"));
+
+ if (pDM_DigTable->LargeFAHit != 3)
+ pDM_DigTable->LargeFAHit++;
+ if (pDM_DigTable->ForbiddenIGI < CurrentIGI) {
+ pDM_DigTable->ForbiddenIGI = CurrentIGI;
+ pDM_DigTable->LargeFAHit = 1;
+ }
+
+ if (pDM_DigTable->LargeFAHit >= 3) {
+ if ((pDM_DigTable->ForbiddenIGI+1) > pDM_DigTable->rx_gain_range_max)
+ pDM_DigTable->rx_gain_range_min = pDM_DigTable->rx_gain_range_max;
+ else
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ pDM_DigTable->Recover_cnt = 3600; /* 3600=2hr */
+ }
+
+ } else {
+ /* Recovery mechanism for IGI lower bound */
+ if (pDM_DigTable->Recover_cnt != 0) {
+ pDM_DigTable->Recover_cnt--;
+ } else {
+ if (pDM_DigTable->LargeFAHit < 3) {
+ if ((pDM_DigTable->ForbiddenIGI-1) < DIG_Dynamic_MIN) { /* DM_DIG_MIN) */
+ pDM_DigTable->ForbiddenIGI = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ pDM_DigTable->rx_gain_range_min = DIG_Dynamic_MIN; /* DM_DIG_MIN; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: At Lower Bound\n"));
+ } else {
+ pDM_DigTable->ForbiddenIGI--;
+ pDM_DigTable->rx_gain_range_min = (pDM_DigTable->ForbiddenIGI + 1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): Normal Case: Approach Lower Bound\n"));
+ }
+ } else {
+ pDM_DigTable->LargeFAHit = 0;
+ }
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG(): pDM_DigTable->LargeFAHit=%d\n",
+ pDM_DigTable->LargeFAHit));
+
+ /* 1 Adjust initial gain by false alarm */
+ if (pDM_Odm->bLinked) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG AfterLink\n"));
+ if (FirstConnect) {
+ CurrentIGI = pDM_Odm->RSSI_Min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("DIG: First Connect\n"));
+ } else {
+ if (pDM_Odm->SupportICType == ODM_RTL8192D) {
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_92D)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_92D)
+ CurrentIGI = CurrentIGI + 1; /* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_92D)
+ CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ } else {
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ }
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG BeforeLink\n"));
+ if (FirstDisConnect) {
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): First DisConnect\n"));
+ } else {
+ /* 2012.03.30 LukeLee: enable DIG before link but with very high thresholds */
+ if (pFalseAlmCnt->Cnt_all > 10000)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > 8000)
+ CurrentIGI = CurrentIGI + 1;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < 500)
+ CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): England DIG\n"));
+ }
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): DIG End Adjust IGI\n"));
+ /* 1 Check initial gain by upper/lower bound */
+ if (CurrentIGI > pDM_DigTable->rx_gain_range_max)
+ CurrentIGI = pDM_DigTable->rx_gain_range_max;
+ if (CurrentIGI < pDM_DigTable->rx_gain_range_min)
+ CurrentIGI = pDM_DigTable->rx_gain_range_min;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD,
+ ("odm_DIG(): rx_gain_range_max=0x%x, rx_gain_range_min=0x%x\n",
+ pDM_DigTable->rx_gain_range_max, pDM_DigTable->rx_gain_range_min));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): TotalFA=%d\n", pFalseAlmCnt->Cnt_all));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_DIG, ODM_DBG_LOUD, ("odm_DIG(): CurIGValue=0x%x\n", CurrentIGI));
+
+ /* 2 High power RSSI threshold */
+
+ ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
+ pDM_DigTable->bMediaConnect_0 = pDM_Odm->bLinked;
+ pDM_DigTable->DIG_Dynamic_MIN_0 = DIG_Dynamic_MIN;
+}
+
+/* 3============================================================ */
+/* 3 FASLE ALARM CHECK */
+/* 3============================================================ */
+
+void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
+{
+ u32 ret_value;
+ struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
+ return;
+
+ if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
+ /* hold ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 1); /* hold page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 1); /* hold page D counter */
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value&0xffff0000)>>16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value&0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E) {
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value&0xffff);
+ FalseAlmCnt->Cnt_BW_USC = ((ret_value&0xffff0000)>>16);
+ }
+
+ /* hold cck counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT12, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT14, 1);
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff)<<8;
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value&0xFF)<<8) | ((ret_value&0xFF00)>>8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
+
+ if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
+ /* reset false alarm counter registers */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT31, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT27, 0);
+ /* update ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT31, 0); /* update page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT31, 0); /* update page D counter */
+
+ /* reset CCK CCA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT13|BIT12, 2);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT15|BIT14, 2);
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Enter odm_FalseAlarmCounterStatistics\n"));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Fast_Fsync=%d, Cnt_SB_Search_fail=%d\n",
+ FalseAlmCnt->Cnt_Fast_Fsync, FalseAlmCnt->Cnt_SB_Search_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Parity_Fail=%d, Cnt_Rate_Illegal=%d\n",
+ FalseAlmCnt->Cnt_Parity_Fail, FalseAlmCnt->Cnt_Rate_Illegal));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD,
+ ("Cnt_Crc8_fail=%d, Cnt_Mcs_fail=%d\n",
+ FalseAlmCnt->Cnt_Crc8_fail, FalseAlmCnt->Cnt_Mcs_fail));
+ } else { /* FOR ODM_IC_11AC_SERIES */
+ /* read OFDM FA counter */
+ FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
+ FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
+
+ /* reset OFDM FA coutner */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT17, 0);
+ /* reset CCK FA counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 0);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT15, 1);
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Cck_fail=%d\n", FalseAlmCnt->Cnt_Cck_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Cnt_Ofdm_fail=%d\n", FalseAlmCnt->Cnt_Ofdm_fail));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_FA_CNT, ODM_DBG_LOUD, ("Total False Alarm=%d\n", FalseAlmCnt->Cnt_all));
+}
+
+/* 3============================================================ */
+/* 3 CCK Packet Detect Threshold */
+/* 3============================================================ */
+
+void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
+{
+ u8 CurCCK_CCAThres;
+ struct false_alarm_stats *FalseAlmCnt = &(pDM_Odm->FalseAlmCnt);
+
+ if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD|ODM_BB_FA_CNT)))
+ return;
+ if (pDM_Odm->ExtLNA)
+ return;
+ if (pDM_Odm->bLinked) {
+ if (pDM_Odm->RSSI_Min > 25) {
+ CurCCK_CCAThres = 0xcd;
+ } else if ((pDM_Odm->RSSI_Min <= 25) && (pDM_Odm->RSSI_Min > 10)) {
+ CurCCK_CCAThres = 0x83;
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ } else {
+ if (FalseAlmCnt->Cnt_Cck_fail > 1000)
+ CurCCK_CCAThres = 0x83;
+ else
+ CurCCK_CCAThres = 0x40;
+ }
+ ODM_Write_CCK_CCA_Thres(pDM_Odm, CurCCK_CCAThres);
+}
+
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
+{
+ struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
+
+ if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
+ ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
+ pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
+}
+
+/* 3============================================================ */
+/* 3 BB Power Save */
+/* 3============================================================ */
+void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ pDM_PSTable->PreCCAState = CCA_MAX;
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ pDM_PSTable->PreRFState = RF_MAX;
+ pDM_PSTable->CurRFState = RF_MAX;
+ pDM_PSTable->Rssi_val_min = 0;
+ pDM_PSTable->initialize = 0;
+}
+
+void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm)
+{
+ if ((pDM_Odm->SupportICType != ODM_RTL8192C) && (pDM_Odm->SupportICType != ODM_RTL8723A))
+ return;
+ if (!(pDM_Odm->SupportAbility & ODM_BB_PWR_SAVE))
+ return;
+ if (!(pDM_Odm->SupportPlatform & (ODM_MP|ODM_CE)))
+ return;
+
+ /* 1 2.Power Saving for 92C */
+ if ((pDM_Odm->SupportICType == ODM_RTL8192C) && (pDM_Odm->RFType == ODM_2T2R)) {
+ odm_1R_CCA(pDM_Odm);
+ } else {
+ /* 20100628 Joseph: Turn off BB power save for 88CE because it makesthroughput unstable. */
+ /* 20100831 Joseph: Turn ON BB power save again after modifying AGC delay from 900ns ot 600ns. */
+ /* 1 3.Power Saving for 88C */
+ ODM_RF_Saving(pDM_Odm, false);
+ }
+}
+
+void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreCCAState == CCA_2R) {
+ if (pDM_Odm->RSSI_Min >= 35)
+ pDM_PSTable->CurCCAState = CCA_1R;
+ else
+ pDM_PSTable->CurCCAState = CCA_2R;
+ } else {
+ if (pDM_Odm->RSSI_Min <= 30)
+ pDM_PSTable->CurCCAState = CCA_2R;
+ else
+ pDM_PSTable->CurCCAState = CCA_1R;
+ }
+ } else {
+ pDM_PSTable->CurCCAState = CCA_MAX;
+ }
+
+ if (pDM_PSTable->PreCCAState != pDM_PSTable->CurCCAState) {
+ if (pDM_PSTable->CurCCAState == CCA_1R) {
+ if (pDM_Odm->RFType == ODM_2T2R)
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
+ else
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
+ }
+ pDM_PSTable->PreCCAState = pDM_PSTable->CurCCAState;
+ }
+}
+
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
+{
+ struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
+ u8 Rssi_Up_bound = 30;
+ u8 Rssi_Low_bound = 25;
+
+ if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
+ Rssi_Up_bound = 50;
+ Rssi_Low_bound = 45;
+ }
+ if (pDM_PSTable->initialize == 0) {
+ pDM_PSTable->Reg874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord)&0x1CC000)>>14;
+ pDM_PSTable->RegC70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord)&BIT3)>>3;
+ pDM_PSTable->Reg85C = (ODM_GetBBReg(pDM_Odm, 0x85c, bMaskDWord)&0xFF000000)>>24;
+ pDM_PSTable->RegA74 = (ODM_GetBBReg(pDM_Odm, 0xa74, bMaskDWord)&0xF000)>>12;
+ pDM_PSTable->initialize = 1;
+ }
+
+ if (!bForceInNormal) {
+ if (pDM_Odm->RSSI_Min != 0xFF) {
+ if (pDM_PSTable->PreRFState == RF_Normal) {
+ if (pDM_Odm->RSSI_Min >= Rssi_Up_bound)
+ pDM_PSTable->CurRFState = RF_Save;
+ else
+ pDM_PSTable->CurRFState = RF_Normal;
+ } else {
+ if (pDM_Odm->RSSI_Min <= Rssi_Low_bound)
+ pDM_PSTable->CurRFState = RF_Normal;
+ else
+ pDM_PSTable->CurRFState = RF_Save;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_MAX;
+ }
+ } else {
+ pDM_PSTable->CurRFState = RF_Normal;
+ }
+
+ if (pDM_PSTable->PreRFState != pDM_PSTable->CurRFState) {
+ if (pDM_PSTable->CurRFState == RF_Save) {
+ /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]=1 when enter BB power saving mode. */
+ /* Suggested by SD3 Yu-Nan. 2011.01.20. */
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874 , BIT5, 0x1); /* Reg874[5]=1b'1 */
+ ODM_SetBBReg(pDM_Odm, 0x874 , 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, 0); /* RegC70[3]=1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
+ ODM_SetBBReg(pDM_Odm, 0x874, 0xC000, 0x2); /* Reg874[15:14]=2'b10 */
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, 0x3); /* RegA75[7:4]=0x3 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0); /* Reg818[28]=1'b0 */
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x1); /* Reg818[28]=1'b1 */
+ } else {
+ ODM_SetBBReg(pDM_Odm, 0x874 , 0x1CC000, pDM_PSTable->Reg874);
+ ODM_SetBBReg(pDM_Odm, 0xc70, BIT3, pDM_PSTable->RegC70);
+ ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->Reg85C);
+ ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->RegA74);
+ ODM_SetBBReg(pDM_Odm, 0x818, BIT28, 0x0);
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A)
+ ODM_SetBBReg(pDM_Odm, 0x874, BIT5, 0x0); /* Reg874[5]=1b'0 */
+ }
+ pDM_PSTable->PreRFState = pDM_PSTable->CurRFState;
+ }
+}
+
+/* 3============================================================ */
+/* 3 RATR MASK */
+/* 3============================================================ */
+/* 3============================================================ */
+/* 3 Rate Adaptive */
+/* 3============================================================ */
+
+void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
+
+ pOdmRA->Type = DM_Type_ByDriver;
+ if (pOdmRA->Type == DM_Type_ByDriver)
+ pDM_Odm->bUseRAMask = true;
+ else
+ pDM_Odm->bUseRAMask = false;
+
+ pOdmRA->RATRState = DM_RATR_STA_INIT;
+ pOdmRA->HighRSSIThresh = 50;
+ pOdmRA->LowRSSIThresh = 20;
+}
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u8 rssi_level)
+{
+ struct sta_info *pEntry;
+ u32 rate_bitmap = 0x0fffffff;
+ u8 WirelessMode;
+
+ pEntry = pDM_Odm->pODM_StaInfo[macid];
+ if (!IS_STA_VALID(pEntry))
+ return ra_mask;
+
+ WirelessMode = pEntry->wireless_mode;
+
+ switch (WirelessMode) {
+ case ODM_WM_B:
+ if (ra_mask & 0x0000000c) /* 11M or 5.5M enable */
+ rate_bitmap = 0x0000000d;
+ else
+ rate_bitmap = 0x0000000f;
+ break;
+ case (ODM_WM_A|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else
+ rate_bitmap = 0x00000ff0;
+ break;
+ case (ODM_WM_B|ODM_WM_G):
+ if (rssi_level == DM_RATR_STA_HIGH)
+ rate_bitmap = 0x00000f00;
+ else if (rssi_level == DM_RATR_STA_MIDDLE)
+ rate_bitmap = 0x00000ff0;
+ else
+ rate_bitmap = 0x00000ff5;
+ break;
+ case (ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ case (ODM_WM_A|ODM_WM_B|ODM_WM_G|ODM_WM_N24G):
+ if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
+ }
+ } else {
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x0f8f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x0f8ff000;
+ } else {
+ if (*(pDM_Odm->pBandWidth) == ODM_BW40M)
+ rate_bitmap = 0x0f8ff015;
+ else
+ rate_bitmap = 0x0f8ff005;
+ }
+ }
+ break;
+ default:
+ /* case WIRELESS_11_24N: */
+ /* case WIRELESS_11_5N: */
+ if (pDM_Odm->RFType == RF_1T2R)
+ rate_bitmap = 0x000fffff;
+ else
+ rate_bitmap = 0x0fffffff;
+ break;
+ }
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ (" ==> rssi_level:0x%02x, WirelessMode:0x%02x, rate_bitmap:0x%08x\n",
+ rssi_level, WirelessMode, rate_bitmap));
+
+ return rate_bitmap;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: odm_RefreshRateAdaptiveMask()
+ *
+ * Overview: Update rate table mask according to rssi
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 05/27/2009 hpfan Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
+ return;
+ /* */
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ /* */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_RefreshRateAdaptiveMaskMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ case ODM_ADSL:
+ odm_RefreshRateAdaptiveMaskAPADSL(pDM_Odm);
+ break;
+ }
+}
+
+void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
+{
+ u8 i;
+ struct adapter *pAdapter = pDM_Odm->Adapter;
+
+ if (pAdapter->bDriverStopped) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_TRACE, ("<---- odm_RefreshRateAdaptiveMask(): driver is going to unload\n"));
+ return;
+ }
+
+ if (!pDM_Odm->bUseRAMask) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("<---- odm_RefreshRateAdaptiveMask(): driver does not control rate adaptive mask\n"));
+ return;
+ }
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ struct sta_info *pstat = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pstat)) {
+ if (ODM_RAStateCheck(pDM_Odm, pstat->rssi_stat.UndecoratedSmoothedPWDB, false , &pstat->rssi_level)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD,
+ ("RSSI:%d, RSSI_LEVEL:%d\n",
+ pstat->rssi_stat.UndecoratedSmoothedPWDB, pstat->rssi_level));
+ rtw_hal_update_ra_mask(pAdapter, i, pstat->rssi_level);
+ }
+ }
+ }
+}
+
+void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* Return Value: bool */
+/* - true: RATRState is changed. */
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
+{
+ struct odm_rate_adapt *pRA = &pDM_Odm->RateAdaptive;
+ const u8 GoUpGap = 5;
+ u8 HighRSSIThreshForRA = pRA->HighRSSIThresh;
+ u8 LowRSSIThreshForRA = pRA->LowRSSIThresh;
+ u8 RATRState;
+
+ /* Threshold Adjustment: */
+ /* when RSSI state trends to go up one or two levels, make sure RSSI is high enough. */
+ /* Here GoUpGap is added to solve the boundary's level alternation issue. */
+ switch (*pRATRState) {
+ case DM_RATR_STA_INIT:
+ case DM_RATR_STA_HIGH:
+ break;
+ case DM_RATR_STA_MIDDLE:
+ HighRSSIThreshForRA += GoUpGap;
+ break;
+ case DM_RATR_STA_LOW:
+ HighRSSIThreshForRA += GoUpGap;
+ LowRSSIThreshForRA += GoUpGap;
+ break;
+ default:
+ ODM_RT_ASSERT(pDM_Odm, false, ("wrong rssi level setting %d !", *pRATRState));
+ break;
+ }
+
+ /* Decide RATRState by RSSI. */
+ if (RSSI > HighRSSIThreshForRA)
+ RATRState = DM_RATR_STA_HIGH;
+ else if (RSSI > LowRSSIThreshForRA)
+ RATRState = DM_RATR_STA_MIDDLE;
+ else
+ RATRState = DM_RATR_STA_LOW;
+
+ if (*pRATRState != RATRState || bForceUpdate) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_RA_MASK, ODM_DBG_LOUD, ("RSSI Level %d -> %d\n", *pRATRState, RATRState));
+ *pRATRState = RATRState;
+ return true;
+ }
+ return false;
+}
+
+/* 3============================================================ */
+/* 3 Dynamic Tx Power */
+/* 3============================================================ */
+
+void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ pdmpriv->bDynamicTxPowerEnable = false;
+ pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
+ pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
+}
+
+void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm)
+{
+ /* For AP/ADSL use struct rtl8192cd_priv * */
+ /* For CE/NIC use struct adapter * */
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
+ return;
+
+ /* 2012/01/12 MH According to Luke's suggestion, only high power will support the feature. */
+ if (!pDM_Odm->ExtPA)
+ return;
+
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ case ODM_CE:
+ odm_DynamicTxPowerNIC(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_DynamicTxPowerAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ break;
+ }
+}
+
+void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
+ return;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E) {
+ /* ??? */
+ /* This part need to be redefined. */
+ }
+}
+
+void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* 3============================================================ */
+/* 3 RSSI Monitor */
+/* 3============================================================ */
+
+void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ return;
+
+ /* */
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ /* */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_RSSIMonitorCheckMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_RSSIMonitorCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_RSSIMonitorCheckAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ /* odm_DIGAP(pDM_Odm); */
+ break;
+ }
+
+} /* odm_RSSIMonitorCheck */
+
+void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+static void FindMinimumRSSI(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
+
+ /* 1 1.Determine the minimum RSSI */
+ if ((check_fwstate(pmlmepriv, _FW_LINKED) == false) &&
+ (pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0))
+ pdmpriv->MinUndecoratedPWDBForDM = 0;
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) /* Default port */
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+ else /* associated entry pwdb */
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+}
+
+void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ int i;
+ int tmpEntryMaxPWDB = 0, tmpEntryMinPWDB = 0xff;
+ u8 sta_cnt = 0;
+ u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
+ struct sta_info *psta;
+ u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (!check_fwstate(&Adapter->mlmepriv, _FW_LINKED))
+ return;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ psta = pDM_Odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(psta) &&
+ (psta->state & WIFI_ASOC_STATE) &&
+ !_rtw_memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ !_rtw_memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
+ tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB > tmpEntryMaxPWDB)
+ tmpEntryMaxPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
+ if (psta->rssi_stat.UndecoratedSmoothedPWDB != (-1))
+ PWDB_rssi[sta_cnt++] = (psta->mac_id | (psta->rssi_stat.UndecoratedSmoothedPWDB<<16));
+ }
+ }
+
+ for (i = 0; i < sta_cnt; i++) {
+ if (PWDB_rssi[i] != (0)) {
+ if (pHalData->fw_ractrl) {
+ /* Report every sta's RSSI to FW */
+ } else {
+ ODM_RA_SetRSSI_8188E(
+ &(pHalData->odmpriv), (PWDB_rssi[i]&0xFF), (u8)((PWDB_rssi[i]>>16) & 0xFF));
+ }
+ }
+ }
+
+ if (tmpEntryMaxPWDB != 0) /* If associated entry is found */
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = tmpEntryMaxPWDB;
+ else
+ pdmpriv->EntryMaxUndecoratedSmoothedPWDB = 0;
+
+ if (tmpEntryMinPWDB != 0xff) /* If associated entry is found */
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = tmpEntryMinPWDB;
+ else
+ pdmpriv->EntryMinUndecoratedSmoothedPWDB = 0;
+
+ FindMinimumRSSI(Adapter);
+ ODM_CmnInfoUpdate(&pHalData->odmpriv , ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
+}
+
+void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_InitializeTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer,
+ (void *)odm_SwAntDivChkAntSwitchCallback, NULL, "SwAntennaSwitchTimer");
+}
+
+void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_CancelTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+}
+
+void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm)
+{
+ ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
+
+ ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->FastAntTrainingTimer);
+}
+
+/* 3============================================================ */
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+
+void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm)
+{
+ odm_TXPowerTrackingThermalMeterInit(pDM_Odm);
+}
+
+void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
+ pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
+ pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
+ if (*(pDM_Odm->mp_mode) != 1)
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
+ MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
+
+ pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
+}
+
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ odm_TXPowerTrackingCheckMP(pDM_Odm);
+ break;
+ case ODM_CE:
+ odm_TXPowerTrackingCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ odm_TXPowerTrackingCheckAP(pDM_Odm);
+ break;
+ case ODM_ADSL:
+ break;
+ }
+}
+
+void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+
+ if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
+ return;
+
+ if (!pDM_Odm->RFCalibrateInfo.TM_Trigger) { /* at least delay 1 sec */
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_T_METER_88E, BIT17 | BIT16, 0x03);
+
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 1;
+ return;
+ } else {
+ odm_TXPowerTrackingCallback_ThermalMeter_8188E(Adapter);
+ pDM_Odm->RFCalibrateInfo.TM_Trigger = 0;
+ }
+}
+
+void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+/* antenna mapping info */
+/* 1: right-side antenna */
+/* 2/0: left-side antenna */
+/* PDM_SWAT_Table->CCK_Ant1_Cnt /OFDM_Ant1_Cnt: for right-side antenna: Ant:1 RxDefaultAnt1 */
+/* PDM_SWAT_Table->CCK_Ant2_Cnt /OFDM_Ant2_Cnt: for left-side antenna: Ant:0 RxDefaultAnt2 */
+/* We select left antenna as default antenna in initial process, modify it as needed */
+/* */
+
+/* 3============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3============================================================ */
+void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID, struct odm_phy_status_info *pPhyInfo)
+{
+}
+
+void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step)
+{
+}
+
+void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm)
+{
+}
+
+void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext)
+{
+}
+
+/* 3============================================================ */
+/* 3 SW Antenna Diversity */
+/* 3============================================================ */
+
+void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
+ ;
+ else if (pDM_Odm->SupportICType == ODM_RTL8188E)
+ ODM_AntennaDiversityInit_88E(pDM_Odm);
+}
+
+void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ if (pDM_SWAT_Table->antsel == 1) {
+ if (isCCKrate) {
+ pDM_SWAT_Table->CCK_Ant1_Cnt[MacId]++;
+ } else {
+ pDM_SWAT_Table->OFDM_Ant1_Cnt[MacId]++;
+ pDM_SWAT_Table->RSSI_Ant1_Sum[MacId] += PWDBAll;
+ }
+ } else {
+ if (isCCKrate) {
+ pDM_SWAT_Table->CCK_Ant2_Cnt[MacId]++;
+ } else {
+ pDM_SWAT_Table->OFDM_Ant2_Cnt[MacId]++;
+ pDM_SWAT_Table->RSSI_Ant2_Sum[MacId] += PWDBAll;
+ }
+ }
+}
+
+void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
+{
+ if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Return: Not Support HW AntDiv\n"));
+ return;
+ }
+
+ if (pDM_Odm->SupportICType == ODM_RTL8188E)
+ ODM_AntennaDiversity_88E(pDM_Odm);
+}
+
+/* EDCA Turbo */
+void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ pDM_Odm->DM_EDCA_Table.bIsCurRDLState = false;
+ Adapter->recvpriv.bIsAnyNonBEPkts = false;
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VO PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VO_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial VI PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_VI_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BE PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BE_PARAM)));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("Orginial BK PARAM: 0x%x\n", ODM_Read4Byte(pDM_Odm, ODM_EDCA_BK_PARAM)));
+} /* ODM_InitEdcaTurbo */
+
+void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
+{
+ /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
+ /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
+ /* HW dynamic mechanism. */
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("odm_EdcaTurboCheck========================>\n"));
+
+ if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
+ return;
+
+ switch (pDM_Odm->SupportPlatform) {
+ case ODM_MP:
+ break;
+ case ODM_CE:
+ odm_EdcaTurboCheckCE(pDM_Odm);
+ break;
+ case ODM_AP:
+ case ODM_ADSL:
+ break;
+ }
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_EDCA_TURBO, ODM_DBG_LOUD, ("<========================odm_EdcaTurboCheck\n"));
+} /* odm_CheckEdcaTurbo */
+
+void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ u32 trafficIndex;
+ u32 edca_param;
+ u64 cur_tx_bytes = 0;
+ u64 cur_rx_bytes = 0;
+ u8 bbtchange = false;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct xmit_priv *pxmitpriv = &(Adapter->xmitpriv);
+ struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct registry_priv *pregpriv = &Adapter->registrypriv;
+ struct mlme_ext_priv *pmlmeext = &(Adapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((pregpriv->wifi_spec == 1))/* (pmlmeinfo->HT_enable == 0)) */
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ if (pmlmeinfo->assoc_AP_vendor >= HT_IOT_PEER_MAX)
+ goto dm_CheckEdcaTurbo_EXIT;
+
+ /* Check if the status needs to be changed. */
+ if ((bbtchange) || (!precvpriv->bIsAnyNonBEPkts)) {
+ cur_tx_bytes = pxmitpriv->tx_bytes - pxmitpriv->last_tx_bytes;
+ cur_rx_bytes = precvpriv->rx_bytes - precvpriv->last_rx_bytes;
+
+ /* traffic, TX or RX */
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_RALINK) ||
+ (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS)) {
+ if (cur_tx_bytes > (cur_rx_bytes << 2)) {
+ /* Uplink TP is present. */
+ trafficIndex = UP_LINK;
+ } else {
+ /* Balance TP is present. */
+ trafficIndex = DOWN_LINK;
+ }
+ } else {
+ if (cur_rx_bytes > (cur_tx_bytes << 2)) {
+ /* Downlink TP is present. */
+ trafficIndex = DOWN_LINK;
+ } else {
+ /* Balance TP is present. */
+ trafficIndex = UP_LINK;
+ }
+ }
+
+ if ((pDM_Odm->DM_EDCA_Table.prv_traffic_idx != trafficIndex) || (!pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA)) {
+ if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_CISCO) && (pmlmeext->cur_wireless_mode & WIRELESS_11_24N))
+ edca_param = EDCAParam[pmlmeinfo->assoc_AP_vendor][trafficIndex];
+ else
+ edca_param = EDCAParam[HT_IOT_PEER_UNKNOWN][trafficIndex];
+
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, edca_param);
+
+ pDM_Odm->DM_EDCA_Table.prv_traffic_idx = trafficIndex;
+ }
+
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = true;
+ } else {
+ /* Turn Off EDCA turbo here. */
+ /* Restore original EDCA according to the declaration of AP. */
+ if (pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA) {
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, pHalData->AcParam_BE);
+ pDM_Odm->DM_EDCA_Table.bCurrentTurboEDCA = false;
+ }
+ }
+
+dm_CheckEdcaTurbo_EXIT:
+ /* Set variables for next time. */
+ precvpriv->bIsAnyNonBEPkts = false;
+ pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
+ precvpriv->last_rx_bytes = precvpriv->rx_bytes;
+}
+
+/* need to ODM CE Platform */
+/* move to here for ANT detection mechanism using */
+
+u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point, u8 initial_gain_psd)
+{
+ u32 psd_report;
+
+ /* Set DCO frequency index, offset=(40MHz/SamplePts)*point */
+ ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
+
+ /* Start PSD calculation, Reg808[22]=0->1 */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 1);
+ /* Need to wait for HW PSD report */
+ ODM_StallExecution(30);
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT22, 0);
+ /* Read PSD report, Reg8B4[15:0] */
+ psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
+
+ psd_report = (u32) (ConvertTo_dB(psd_report))+(u32)(initial_gain_psd-0x1c);
+
+ return psd_report;
+}
+
+u32 ConvertTo_dB(u32 Value)
+{
+ u8 i;
+ u8 j;
+ u32 dB;
+
+ Value = Value & 0xFFFF;
+ for (i = 0; i < 8; i++) {
+ if (Value <= dB_Invert_Table[i][11])
+ break;
+ }
+
+ if (i >= 8)
+ return 96; /* maximum 96 dB */
+
+ for (j = 0; j < 12; j++) {
+ if (Value <= dB_Invert_Table[i][j])
+ break;
+ }
+
+ dB = i*12 + j + 1;
+
+ return dB;
+}
+
+/* 2011/09/22 MH Add for 92D global spin lock utilization. */
+void odm_GlobalAdapterCheck(void)
+{
+} /* odm_GlobalAdapterCheck */
+
+/* Description: */
+/* Set Single/Dual Antenna default setting for products that do not do detection in advance. */
+/* Added by Joseph, 2012.03.22 */
+void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = true;
+}
+
+
+/* 2 8723A ANT DETECT */
+
+static void odm_PHY_SaveAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegisterNum)
+{
+ u32 i;
+
+ /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
+ for (i = 0; i < RegisterNum; i++)
+ AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
+}
+
+static void odm_PHY_ReloadAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegiesterNum)
+{
+ u32 i;
+
+ for (i = 0; i < RegiesterNum; i++)
+ ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
+}
+
+/* 2 8723A ANT DETECT */
+/* Description: */
+/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
+/* This function is cooperated with BB team Neil. */
+bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
+ u32 CurrentChannel, RfLoopReg;
+ u8 n;
+ u32 Reg88c, Regc08, Reg874, Regc50;
+ u8 initial_gain = 0x5a;
+ u32 PSD_report_tmp;
+ u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
+ bool bResult = true;
+ u32 AFE_Backup[16];
+ u32 AFE_REG_8723A[16] = {
+ rRx_Wait_CCA, rTx_CCK_RFON,
+ rTx_CCK_BBON, rTx_OFDM_RFON,
+ rTx_OFDM_BBON, rTx_To_Rx,
+ rTx_To_Tx, rRx_CCK,
+ rRx_OFDM, rRx_Wait_RIFS,
+ rRx_TO_Rx, rStandby,
+ rSleep, rPMPD_ANAEN,
+ rFPGA0_XCD_SwitchControl, rBlue_Tooth};
+
+ if (!(pDM_Odm->SupportICType & (ODM_RTL8723A|ODM_RTL8192C)))
+ return bResult;
+
+ if (!(pDM_Odm->SupportAbility&ODM_BB_ANT_DIV))
+ return bResult;
+
+ if (pDM_Odm->SupportICType == ODM_RTL8192C) {
+ /* Which path in ADC/DAC is turnned on for PSD: both I/Q */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT10|BIT11, 0x3);
+ /* Ageraged number: 8 */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT12|BIT13, 0x1);
+ /* pts = 128; */
+ ODM_SetBBReg(pDM_Odm, 0x808, BIT14|BIT15, 0x0);
+ }
+
+ /* 1 Backup Current RF/BB Settings */
+
+ CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
+ RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
+ /* Step 1: USE IQK to transmitter single tone */
+
+ ODM_StallExecution(10);
+
+ /* Store A Path Register 88c, c08, 874, c50 */
+ Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
+ Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
+ Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
+ Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
+
+ /* Store AFE Registers */
+ odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ /* Set PSD 128 pts */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT14|BIT15, 0x0); /* 128 pts */
+
+ /* To SET CH1 to do */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
+
+ /* AFE all on step */
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
+ ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
+
+ /* 3 wire Disable */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
+
+ /* BB IQK Setting */
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
+
+ /* IQK setting tone@ 4.34Mhz */
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
+
+
+ /* Page B init */
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
+ ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
+ ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
+ ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
+
+ /* RF loop Setting */
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
+
+ /* IQK Single tone start */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
+ ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
+ ODM_StallExecution(1000);
+ PSD_report_tmp = 0x0;
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntA_report)
+ AntA_report = PSD_report_tmp;
+ }
+
+ PSD_report_tmp = 0x0;
+
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
+ ODM_StallExecution(10);
+
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntB_report)
+ AntB_report = PSD_report_tmp;
+ }
+
+ /* change to open case */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
+ ODM_StallExecution(10);
+
+ for (n = 0; n < 2; n++) {
+ PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
+ if (PSD_report_tmp > AntO_report)
+ AntO_report = PSD_report_tmp;
+ }
+
+ /* Close IQK Single Tone function */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
+ PSD_report_tmp = 0x0;
+
+ /* 1 Return to antanna A */
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
+ ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
+ ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
+
+ /* Reload AFE Registers */
+ odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_A[%d]= %d\n", 2416, AntA_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_B[%d]= %d\n", 2416, AntB_report));
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("psd_report_O[%d]= %d\n", 2416, AntO_report));
+
+
+ if (pDM_Odm->SupportICType == ODM_RTL8723A) {
+ /* 2 Test Ant B based on Ant A is ON */
+ if (mode == ANTTESTB) {
+ if (AntA_report >= 100) {
+ if (AntB_report > (AntA_report+1)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Dual Antenna is A and B\n"));
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ } else if (mode == ANTTESTALL) {
+ /* 2 Test Ant A and B based on DPDT Open */
+ if ((AntO_report >= 100)&(AntO_report < 118)) {
+ if (AntA_report > (AntO_report+1)) {
+ pDM_SWAT_Table->ANTA_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTA_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant A is ON"));
+ }
+
+ if (AntB_report > (AntO_report+2)) {
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is OFF"));
+ } else {
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Ant B is ON"));
+ }
+ }
+ }
+ } else if (pDM_Odm->SupportICType == ODM_RTL8192C) {
+ if (AntA_report >= 100) {
+ if (AntB_report > (AntA_report+2)) {
+ pDM_SWAT_Table->ANTA_ON = false;
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna B\n"));
+ } else if (AntA_report > (AntB_report+2)) {
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = false;
+ ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Single Antenna A\n"));
+ } else {
+ pDM_SWAT_Table->ANTA_ON = true;
+ pDM_SWAT_Table->ANTB_ON = true;
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("ODM_SingleDualAntennaDetection(): Dual Antenna\n"));
+ }
+ } else {
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_SingleDualAntennaDetection(): Need to check again\n"));
+ pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
+ pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
+ bResult = false;
+ }
+ }
+ return bResult;
+}
+
+/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
+void odm_dtc(struct odm_dm_struct *pDM_Odm)
+{
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
new file mode 100644
index 00000000000..19c509a2beb
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -0,0 +1,596 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+#define READ_AND_CONFIG READ_AND_CONFIG_MP
+
+#define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig##txt##ic(dm_odm))
+#define READ_AND_CONFIG_TC(ic, txt) (ODM_ReadAndConfig_TC##txt##ic(dm_odm))
+
+static u8 odm_QueryRxPwrPercentage(s8 AntPower)
+{
+ if ((AntPower <= -100) || (AntPower >= 20))
+ return 0;
+ else if (AntPower >= 0)
+ return 100;
+ else
+ return 100+AntPower;
+}
+
+/* 2012/01/12 MH MOve some signal strength smooth method to MP HAL layer. */
+/* IF other SW team do not support the feature, remove this section.?? */
+static s32 odm_sig_patch_lenove(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ return 0;
+}
+
+static s32 odm_sig_patch_netcore(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ return 0;
+}
+
+static s32 odm_SignalScaleMapping_92CSeries(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ s32 RetSig = 0;
+
+ if ((dm_odm->SupportInterface == ODM_ITRF_USB) ||
+ (dm_odm->SupportInterface == ODM_ITRF_SDIO)) {
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40)*2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
+ else
+ RetSig = CurrSig;
+ }
+ return RetSig;
+}
+
+static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
+{
+ if ((dm_odm->SupportPlatform == ODM_MP) &&
+ (dm_odm->SupportInterface != ODM_ITRF_PCIE) && /* USB & SDIO */
+ (dm_odm->PatchID == 10))
+ return odm_sig_patch_netcore(dm_odm, CurrSig);
+ else if ((dm_odm->SupportPlatform == ODM_MP) &&
+ (dm_odm->SupportInterface == ODM_ITRF_PCIE) &&
+ (dm_odm->PatchID == 19))
+ return odm_sig_patch_lenove(dm_odm, CurrSig);
+ else
+ return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
+}
+
+/* pMgntInfo->CustomerID == RT_CID_819x_Lenovo */
+static u8 odm_SQ_process_patch_RT_CID_819x_Lenovo(struct odm_dm_struct *dm_odm,
+ u8 isCCKrate, u8 PWDB_ALL, u8 path, u8 RSSI)
+{
+ return 0;
+}
+
+static u8 odm_EVMdbToPercentage(s8 Value)
+{
+ /* -33dB~0dB to 0%~99% */
+ s8 ret_val;
+
+ ret_val = Value;
+
+ if (ret_val >= 0)
+ ret_val = 0;
+ if (ret_val <= -33)
+ ret_val = -33;
+
+ ret_val = 0 - ret_val;
+ ret_val *= 3;
+
+ if (ret_val == 99)
+ ret_val = 100;
+ return ret_val;
+}
+
+static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ struct sw_ant_switch *pDM_SWAT_Table = &dm_odm->DM_SWAT_Table;
+ u8 i, Max_spatial_stream;
+ s8 rx_pwr[4], rx_pwr_all = 0;
+ u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
+ u8 RSSI, total_rssi = 0;
+ u8 isCCKrate = 0;
+ u8 rf_rx_num = 0;
+ u8 cck_highpwr = 0;
+ u8 LNA_idx, VGA_idx;
+
+ struct phy_status_rpt *pPhyStaRpt = (struct phy_status_rpt *)pPhyStatus;
+
+ isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = -1;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+
+ if (isCCKrate) {
+ u8 report;
+ u8 cck_agc_rpt;
+
+ dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
+ /* (1)Hardware does not provide RSSI for CCK */
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+
+ cck_highpwr = dm_odm->bCckHighPower;
+
+ cck_agc_rpt = pPhyStaRpt->cck_agc_rpt_ofdm_cfosho_a ;
+
+ /* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
+ /* The RSSI formula should be modified according to the gain table */
+ /* In 88E, cck_highpwr is always set to 1 */
+ if (dm_odm->SupportICType & (ODM_RTL8188E|ODM_RTL8812)) {
+ LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ switch (LNA_idx) {
+ case 7:
+ if (VGA_idx <= 27)
+ rx_pwr_all = -100 + 2*(27-VGA_idx); /* VGA_idx = 27~2 */
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2*(2-VGA_idx); /* VGA_idx = 2~0 */
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2*(7-VGA_idx); /* VGA_idx = 7~5 */
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2*(7-VGA_idx); /* VGA_idx = 7~4 */
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2*(7-VGA_idx); /* VGA_idx = 7~0 */
+ break;
+ case 2:
+ if (cck_highpwr)
+ rx_pwr_all = -12 + 2*(5-VGA_idx); /* VGA_idx = 5~0 */
+ else
+ rx_pwr_all = -6 + 2*(5-VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8-2*VGA_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14-2*VGA_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (!cck_highpwr) {
+ if (PWDB_ALL >= 80)
+ PWDB_ALL = ((PWDB_ALL-80)<<1)+((PWDB_ALL-80)>>1)+80;
+ else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
+ PWDB_ALL += 3;
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
+ }
+ } else {
+ if (!cck_highpwr) {
+ report = (cck_agc_rpt & 0xc0)>>6;
+ switch (report) {
+ /* 03312009 modified by cosa */
+ /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
+ /* Note: different RF with the different RNA gain. */
+ case 0x3:
+ rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
+ break;
+ }
+ } else {
+ report = (cck_agc_rpt & 0x60)>>5;
+ switch (report) {
+ case 0x3:
+ rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f)<<1) ;
+ break;
+ case 0x2:
+ rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x1:
+ rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ case 0x0:
+ rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f)<<1);
+ break;
+ }
+ }
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+
+ /* Modification for ext-LNA board */
+ if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((cck_agc_rpt>>7) == 0) {
+ PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL+6);
+ } else {
+ if (PWDB_ALL > 38)
+ PWDB_ALL -= 16;
+ else
+ PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL>>2) : (PWDB_ALL-12);
+ }
+
+ /* CCK modification */
+ if (PWDB_ALL > 25 && PWDB_ALL <= 60)
+ PWDB_ALL += 6;
+ } else {/* Modification for int-LNA board */
+ if (PWDB_ALL > 99)
+ PWDB_ALL -= 8;
+ else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
+ PWDB_ALL += 4;
+ }
+ }
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+ /* (3) Get Signal Quality (EVM) */
+ if (pPktinfo->bPacketMatchBSSID) {
+ u8 SQ, SQ_rpt;
+
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ SQ = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, 0, 0);
+ } else if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
+ SQ = 100;
+ } else {
+ SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
+
+ if (SQ_rpt > 64)
+ SQ = 0;
+ else if (SQ_rpt < 20)
+ SQ = 100;
+ else
+ SQ = ((64-SQ_rpt) * 100) / 44;
+ }
+ pPhyInfo->SignalQuality = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_A] = SQ;
+ pPhyInfo->RxMIMOSignalQuality[ODM_RF_PATH_B] = -1;
+ }
+ } else { /* is OFDM rate */
+ dm_odm->PhyDbgInfo.NumQryPhyStatusOFDM++;
+
+ /* (1)Get RSSI for HT rate */
+
+ for (i = ODM_RF_PATH_A; i < ODM_RF_PATH_MAX; i++) {
+ /* 2008/01/30 MH we will judge RF RX path now. */
+ if (dm_odm->RFPathRxEnable & BIT(i))
+ rf_rx_num++;
+
+ rx_pwr[i] = ((pPhyStaRpt->path_agc[i].gain & 0x3F)*2) - 110;
+
+ pPhyInfo->RxPwr[i] = rx_pwr[i];
+
+ /* Translate DBM to percentage. */
+ RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
+ total_rssi += RSSI;
+
+ /* Modification for ext-LNA board */
+ if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
+ if ((pPhyStaRpt->path_agc[i].trsw) == 1)
+ RSSI = (RSSI > 94) ? 100 : (RSSI + 6);
+ else
+ RSSI = (RSSI <= 16) ? (RSSI >> 3) : (RSSI - 16);
+
+ if ((RSSI <= 34) && (RSSI >= 4))
+ RSSI -= 4;
+ }
+
+ pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
+
+ /* Get Rx snr value in DB */
+ pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+ dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i]/2);
+
+ /* Record Signal Strength for next packet */
+ if (pPktinfo->bPacketMatchBSSID) {
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ if (i == ODM_RF_PATH_A)
+ pPhyInfo->SignalQuality = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, i, RSSI);
+ }
+ }
+ }
+ /* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
+ rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
+
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ PWDB_ALL_BT = PWDB_ALL;
+
+ pPhyInfo->RxPWDBAll = PWDB_ALL;
+ pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
+ pPhyInfo->RxPower = rx_pwr_all;
+ pPhyInfo->RecvSignalPower = rx_pwr_all;
+
+ if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
+ /* do nothing */
+ } else {
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_EVMdbToPercentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == ODM_RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
+ }
+ }
+ }
+ }
+ /* UI BSS List signal strength(in percentage), make it good looking, from 0~100. */
+ /* It is assigned to the BSS List in GetValueFromBeaconOrProbeRsp(). */
+ if (isCCKrate) {
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, PWDB_ALL));/* PWDB_ALL; */
+ } else {
+ if (rf_rx_num != 0)
+ pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, total_rssi /= rf_rx_num));
+ }
+
+ /* For 92C/92D HW (Hybrid) Antenna Diversity */
+ pDM_SWAT_Table->antsel = pPhyStaRpt->ant_sel;
+ /* For 88E HW Antenna Diversity */
+ dm_odm->DM_FatTable.antsel_rx_keep_0 = pPhyStaRpt->ant_sel;
+ dm_odm->DM_FatTable.antsel_rx_keep_1 = pPhyStaRpt->ant_sel_b;
+ dm_odm->DM_FatTable.antsel_rx_keep_2 = pPhyStaRpt->antsel_rx_keep_2;
+}
+
+void odm_Init_RSSIForDM(struct odm_dm_struct *dm_odm)
+{
+}
+
+static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM, RSSI_Ave;
+ u8 isCCKrate = 0;
+ u8 RSSI_max, RSSI_min, i;
+ u32 OFDM_pkt = 0;
+ u32 Weighting = 0;
+ struct sta_info *pEntry;
+
+ if (pPktinfo->StationID == 0xFF)
+ return;
+ pEntry = dm_odm->pODM_StaInfo[pPktinfo->StationID];
+ if (!IS_STA_VALID(pEntry))
+ return;
+ if ((!pPktinfo->bPacketMatchBSSID))
+ return;
+
+ isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
+
+ /* Smart Antenna Debug Message------------------ */
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ u8 antsel_tr_mux;
+ struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
+
+ if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
+ if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
+ if (pPktinfo->bPacketToSelf) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) |
+ pDM_FatTable->antsel_rx_keep_0;
+ pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
+ pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
+ }
+ }
+ } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2<<2) |
+ (pDM_FatTable->antsel_rx_keep_1<<1) | pDM_FatTable->antsel_rx_keep_0;
+ ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ }
+ }
+ }
+ /* Smart Antenna Debug Message------------------ */
+
+ UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
+ UndecoratedSmoothedOFDM = pEntry->rssi_stat.UndecoratedSmoothedOFDM;
+ UndecoratedSmoothedPWDB = pEntry->rssi_stat.UndecoratedSmoothedPWDB;
+
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ if (!isCCKrate) { /* ofdm rate */
+ if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B] == 0) {
+ RSSI_Ave = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ } else {
+ if (pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A] > pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B]) {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ } else {
+ RSSI_max = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_B];
+ RSSI_min = pPhyInfo->RxMIMOSignalStrength[ODM_RF_PATH_A];
+ }
+ if ((RSSI_max - RSSI_min) < 3)
+ RSSI_Ave = RSSI_max;
+ else if ((RSSI_max - RSSI_min) < 6)
+ RSSI_Ave = RSSI_max - 1;
+ else if ((RSSI_max - RSSI_min) < 10)
+ RSSI_Ave = RSSI_max - 2;
+ else
+ RSSI_Ave = RSSI_max - 3;
+ }
+
+ /* 1 Process OFDM RSSI */
+ if (UndecoratedSmoothedOFDM <= 0) { /* initialize */
+ UndecoratedSmoothedOFDM = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedOFDM) {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM + 1;
+ } else {
+ UndecoratedSmoothedOFDM =
+ (((UndecoratedSmoothedOFDM)*(Rx_Smooth_Factor-1)) +
+ (RSSI_Ave)) / (Rx_Smooth_Factor);
+ }
+ }
+
+ pEntry->rssi_stat.PacketMap = (pEntry->rssi_stat.PacketMap<<1) | BIT0;
+
+ } else {
+ RSSI_Ave = pPhyInfo->RxPWDBAll;
+
+ /* 1 Process CCK RSSI */
+ if (UndecoratedSmoothedCCK <= 0) { /* initialize */
+ UndecoratedSmoothedCCK = pPhyInfo->RxPWDBAll;
+ } else {
+ if (pPhyInfo->RxPWDBAll > (u32)UndecoratedSmoothedCCK) {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
+ UndecoratedSmoothedCCK = UndecoratedSmoothedCCK + 1;
+ } else {
+ UndecoratedSmoothedCCK =
+ ((UndecoratedSmoothedCCK * (Rx_Smooth_Factor-1)) +
+ pPhyInfo->RxPWDBAll) / Rx_Smooth_Factor;
+ }
+ }
+ pEntry->rssi_stat.PacketMap = pEntry->rssi_stat.PacketMap<<1;
+ }
+ /* 2011.07.28 LukeLee: modified to prevent unstable CCK RSSI */
+ if (pEntry->rssi_stat.ValidBit >= 64)
+ pEntry->rssi_stat.ValidBit = 64;
+ else
+ pEntry->rssi_stat.ValidBit++;
+
+ for (i = 0; i < pEntry->rssi_stat.ValidBit; i++)
+ OFDM_pkt += (u8)(pEntry->rssi_stat.PacketMap>>i)&BIT0;
+
+ if (pEntry->rssi_stat.ValidBit == 64) {
+ Weighting = ((OFDM_pkt<<4) > 64) ? 64 : (OFDM_pkt<<4);
+ UndecoratedSmoothedPWDB = (Weighting*UndecoratedSmoothedOFDM+(64-Weighting)*UndecoratedSmoothedCCK)>>6;
+ } else {
+ if (pEntry->rssi_stat.ValidBit != 0)
+ UndecoratedSmoothedPWDB = (OFDM_pkt * UndecoratedSmoothedOFDM +
+ (pEntry->rssi_stat.ValidBit-OFDM_pkt) *
+ UndecoratedSmoothedCCK)/pEntry->rssi_stat.ValidBit;
+ else
+ UndecoratedSmoothedPWDB = 0;
+ }
+ pEntry->rssi_stat.UndecoratedSmoothedCCK = UndecoratedSmoothedCCK;
+ pEntry->rssi_stat.UndecoratedSmoothedOFDM = UndecoratedSmoothedOFDM;
+ pEntry->rssi_stat.UndecoratedSmoothedPWDB = UndecoratedSmoothedPWDB;
+ }
+}
+
+/* Endianness before calling this API */
+static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo)
+{
+ odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
+ pPktinfo);
+ if (dm_odm->RSSI_test) {
+ /* Select the packets to do RSSI checking for antenna switching. */
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
+ ODM_SwAntDivChkPerPktRssi(dm_odm, pPktinfo->StationID, pPhyInfo);
+ } else {
+ odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
+ }
+}
+
+void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus, struct odm_per_pkt_info *pPktinfo)
+{
+ ODM_PhyStatusQuery_92CSeries(dm_odm, pPhyInfo, pPhyStatus, pPktinfo);
+}
+
+/* For future use. */
+void ODM_MacStatusQuery(struct odm_dm_struct *dm_odm, u8 *mac_stat,
+ u8 macid, bool pkt_match_bssid,
+ bool pkttoself, bool pkt_beacon)
+{
+ /* 2011/10/19 Driver team will handle in the future. */
+}
+
+enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
+ enum ODM_RF_RADIO_PATH content,
+ enum ODM_RF_RADIO_PATH rfpath)
+{
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("===>ODM_ConfigRFWithHeaderFile\n"));
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ if (rfpath == ODM_RF_PATH_A)
+ READ_AND_CONFIG(8188E, _RadioA_1T_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_A:Rtl8188ERadioA_1TArray\n"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, (" ===> ODM_ConfigRFWithHeaderFile() Radio_B:Rtl8188ERadioB_1TArray\n"));
+ }
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("ODM_ConfigRFWithHeaderFile: Radio No %x\n", rfpath));
+ return HAL_STATUS_SUCCESS;
+}
+
+enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
+ enum odm_bb_config_type config_tp)
+{
+ if (dm_odm->SupportICType == ODM_RTL8188E) {
+ if (config_tp == CONFIG_BB_PHY_REG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_1T_);
+ } else if (config_tp == CONFIG_BB_AGC_TAB) {
+ READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
+ } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_PG_);
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ (" ===> phy_ConfigBBWithHeaderFile() agc:Rtl8188EPHY_REG_PGArray\n"));
+ }
+ }
+ return HAL_STATUS_SUCCESS;
+}
+
+enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *dm_odm)
+{
+ u8 result = HAL_STATUS_SUCCESS;
+ if (dm_odm->SupportICType == ODM_RTL8188E)
+ result = READ_AND_CONFIG(8188E, _MAC_REG_);
+ return result;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
new file mode 100644
index 00000000000..58410f3e531
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -0,0 +1,399 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void ODM_DIG_LowerBound_88E(struct odm_dm_struct *dm_odm)
+{
+ struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ pDM_DigTable->rx_gain_range_min = (u8) pDM_DigTable->AntDiv_RSSI_max;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("ODM_DIG_LowerBound_88E(): pDM_DigTable->AntDiv_RSSI_max=%d\n", pDM_DigTable->AntDiv_RSSI_max));
+ }
+ /* If only one Entry connected */
+}
+
+static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* 1:CG, 0:CS */
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_RX_HWAntDivInit()\n"));
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ /* Pin Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 1); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ /* OFDM Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ /* CCK Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
+ ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, 0xFFFF, 0x0201); /* antenna mapping table */
+}
+
+static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32;
+
+ if (*(dm_odm->mp_mode) == 1) {
+ dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* disable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, 0); /* Default RX (0/1) */
+ return;
+ }
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_TRX_HWAntDivInit()\n"));
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
+ ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ /* Pin Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_PIN_CTRL_11N, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ /* OFDM Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTDIV_PARA1_11N, bMaskDWord, 0x000000a0);
+ /* CCK Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_BB_PWR_SAV4_11N, BIT7, 1); /* Fix CCK PHY status report issue */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA2_11N, BIT4, 1); /* CCK complete HW AntDiv within 64 samples */
+ /* Tx Settings */
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, MAIN_ANT);
+
+ /* antenna mapping table */
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_DEFUALT_A_11N, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANT_MAPPING1_11N, bMaskDWord, 0x0201); /* Reg914=3'b010, Reg915=3'b001 */
+ }
+}
+
+static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
+{
+ u32 value32, i;
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u32 AntCombination = 2;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("odm_FastAntTrainingInit()\n"));
+
+ if (*(dm_odm->mp_mode) == 1) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_INIT, ODM_DBG_LOUD, ("dm_odm->AntDivType: %d\n", dm_odm->AntDivType));
+ return;
+ }
+
+ for (i = 0; i < 6; i++) {
+ dm_fat_tbl->Bssid[i] = 0;
+ dm_fat_tbl->antSumRSSI[i] = 0;
+ dm_fat_tbl->antRSSIcnt[i] = 0;
+ dm_fat_tbl->antAveRSSI[i] = 0;
+ }
+ dm_fat_tbl->TrainIdx = 0;
+ dm_fat_tbl->FAT_State = FAT_NORMAL_STATE;
+
+ /* MAC Setting */
+ value32 = ODM_GetMACReg(dm_odm, 0x4c, bMaskDWord);
+ ODM_SetMACReg(dm_odm, 0x4c, bMaskDWord, value32|(BIT23|BIT25)); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
+ value32 = ODM_GetMACReg(dm_odm, 0x7B4, bMaskDWord);
+ ODM_SetMACReg(dm_odm, 0x7b4, bMaskDWord, value32|(BIT16|BIT17)); /* Reg7B4[16]=1 enable antenna training, Reg7B4[17]=1 enable A2 match */
+
+ /* Match MAC ADDR */
+ ODM_SetMACReg(dm_odm, 0x7b4, 0xFFFF, 0);
+ ODM_SetMACReg(dm_odm, 0x7b0, bMaskDWord, 0);
+
+ ODM_SetBBReg(dm_odm, 0x870, BIT9|BIT8, 0);/* Reg870[8]=1'b0, Reg870[9]=1'b0 antsel antselb by HW */
+ ODM_SetBBReg(dm_odm, 0x864, BIT10, 0); /* Reg864[10]=1'b0 antsel2 by HW */
+ ODM_SetBBReg(dm_odm, 0xb2c, BIT22, 0); /* Regb2c[22]=1'b0 disable CS/CG switch */
+ ODM_SetBBReg(dm_odm, 0xb2c, BIT31, 1); /* Regb2c[31]=1'b1 output at CG only */
+ ODM_SetBBReg(dm_odm, 0xca4, bMaskDWord, 0x000000a0);
+
+ /* antenna mapping table */
+ if (AntCombination == 2) {
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 1); /* Reg858[10:8]=3'b001 */
+ ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 2); /* Reg858[13:11]=3'b010 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 1);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 2);
+ }
+ } else if (AntCombination == 7) {
+ if (!dm_odm->bIsMPChip) { /* testchip */
+ ODM_SetBBReg(dm_odm, 0x858, BIT10|BIT9|BIT8, 0); /* Reg858[10:8]=3'b000 */
+ ODM_SetBBReg(dm_odm, 0x858, BIT13|BIT12|BIT11, 1); /* Reg858[13:11]=3'b001 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT16, 0);
+ ODM_SetBBReg(dm_odm, 0x858, BIT15|BIT14, 2); /* Reg878[0],Reg858[14:15])=3'b010 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT19|BIT18|BIT17, 3);/* Reg878[3:1]=3b'011 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT22|BIT21|BIT20, 4);/* Reg878[6:4]=3b'100 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT25|BIT24|BIT23, 5);/* Reg878[9:7]=3b'101 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT28|BIT27|BIT26, 6);/* Reg878[12:10]=3b'110 */
+ ODM_SetBBReg(dm_odm, 0x878, BIT31|BIT30|BIT29, 7);/* Reg878[15:13]=3b'111 */
+ } else { /* MPchip */
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte0, 0);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte1, 1);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte2, 2);
+ ODM_SetBBReg(dm_odm, 0x914, bMaskByte3, 3);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte0, 4);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte1, 5);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte2, 6);
+ ODM_SetBBReg(dm_odm, 0x918, bMaskByte3, 7);
+ }
+ }
+
+ /* Default Ant Setting when no fast training */
+ ODM_SetBBReg(dm_odm, 0x80c, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ ODM_SetBBReg(dm_odm, 0x864, BIT5|BIT4|BIT3, 0); /* Default RX */
+ ODM_SetBBReg(dm_odm, 0x864, BIT8|BIT7|BIT6, 1); /* Optional RX */
+
+ /* Enter Traing state */
+ ODM_SetBBReg(dm_odm, 0x864, BIT2|BIT1|BIT0, (AntCombination-1)); /* Reg864[2:0]=3'd6 ant combination=reg864[2:0]+1 */
+ ODM_SetBBReg(dm_odm, 0xc50, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+}
+
+void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
+{
+ if (dm_odm->SupportICType != ODM_RTL8188E)
+ return;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->AntDivType=%d\n", dm_odm->AntDivType));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("dm_odm->bIsMPChip=%s\n", (dm_odm->bIsMPChip ? "true" : "false")));
+
+ if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)
+ odm_RX_HWAntDivInit(dm_odm);
+ else if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ odm_TRX_HWAntDivInit(dm_odm);
+ else if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)
+ odm_FastAntTrainingInit(dm_odm);
+}
+
+void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u32 DefaultAnt, OptionalAnt;
+
+ if (dm_fat_tbl->RxIdleAnt != Ant) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Update Rx Idle Ant\n"));
+ if (Ant == MAIN_ANT) {
+ DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ } else {
+ DefaultAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? AUX_ANT_CG_TRX : AUX_ANT_CGCS_RX;
+ OptionalAnt = (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) ? MAIN_ANT_CG_TRX : MAIN_ANT_CGCS_RX;
+ }
+
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_ANTSEL_CTRL_11N, BIT14|BIT13|BIT12, DefaultAnt); /* Default TX */
+ ODM_SetMACReg(dm_odm, ODM_REG_RESP_TX_11N, BIT6|BIT7, DefaultAnt); /* Resp Tx */
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT5|BIT4|BIT3, DefaultAnt); /* Default RX */
+ ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT8|BIT7|BIT6, OptionalAnt); /* Optional RX */
+ }
+ }
+ dm_fat_tbl->RxIdleAnt = Ant;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("RxIdleAnt=%s\n", (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT"));
+ pr_info("RxIdleAnt=%s\n", (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT");
+}
+
+static void odm_UpdateTxAnt_88E(struct odm_dm_struct *dm_odm, u8 Ant, u32 MacId)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ u8 TargetAnt;
+
+ if (Ant == MAIN_ANT)
+ TargetAnt = MAIN_ANT_CG_TRX;
+ else
+ TargetAnt = AUX_ANT_CG_TRX;
+ dm_fat_tbl->antsel_a[MacId] = TargetAnt&BIT0;
+ dm_fat_tbl->antsel_b[MacId] = (TargetAnt&BIT1)>>1;
+ dm_fat_tbl->antsel_c[MacId] = (TargetAnt&BIT2)>>2;
+
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("Tx from TxInfo, TargetAnt=%s\n",
+ (Ant == MAIN_ANT) ? "MAIN_ANT" : "AUX_ANT"));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("antsel_tr_mux=3'b%d%d%d\n",
+ dm_fat_tbl->antsel_c[MacId], dm_fat_tbl->antsel_b[MacId], dm_fat_tbl->antsel_a[MacId]));
+}
+
+void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *dm_odm, u8 *pDesc, u8 macId)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV)) {
+ SET_TX_DESC_ANTSEL_A_88E(pDesc, dm_fat_tbl->antsel_a[macId]);
+ SET_TX_DESC_ANTSEL_B_88E(pDesc, dm_fat_tbl->antsel_b[macId]);
+ SET_TX_DESC_ANTSEL_C_88E(pDesc, dm_fat_tbl->antsel_c[macId]);
+ }
+}
+
+void ODM_AntselStatistics_88E(struct odm_dm_struct *dm_odm, u8 antsel_tr_mux, u32 MacId, u8 RxPWDBAll)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV) {
+ if (antsel_tr_mux == MAIN_ANT_CG_TRX) {
+ dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->MainAnt_Cnt[MacId]++;
+ } else {
+ dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->AuxAnt_Cnt[MacId]++;
+ }
+ } else if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV) {
+ if (antsel_tr_mux == MAIN_ANT_CGCS_RX) {
+ dm_fat_tbl->MainAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->MainAnt_Cnt[MacId]++;
+ } else {
+ dm_fat_tbl->AuxAnt_Sum[MacId] += RxPWDBAll;
+ dm_fat_tbl->AuxAnt_Cnt[MacId]++;
+ }
+ }
+}
+
+static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
+{
+ u32 i, MinRSSI = 0xFF, AntDivMaxRSSI = 0, MaxRSSI = 0, LocalMinRSSI, LocalMaxRSSI;
+ u32 Main_RSSI, Aux_RSSI;
+ u8 RxIdleAnt = 0, TargetAnt = 7;
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
+ struct sta_info *pEntry;
+
+ for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
+ pEntry = dm_odm->pODM_StaInfo[i];
+ if (IS_STA_VALID(pEntry)) {
+ /* 2 Caculate RSSI per Antenna */
+ Main_RSSI = (dm_fat_tbl->MainAnt_Cnt[i] != 0) ? (dm_fat_tbl->MainAnt_Sum[i]/dm_fat_tbl->MainAnt_Cnt[i]) : 0;
+ Aux_RSSI = (dm_fat_tbl->AuxAnt_Cnt[i] != 0) ? (dm_fat_tbl->AuxAnt_Sum[i]/dm_fat_tbl->AuxAnt_Cnt[i]) : 0;
+ TargetAnt = (Main_RSSI >= Aux_RSSI) ? MAIN_ANT : AUX_ANT;
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, MainAnt_Sum=%d, MainAnt_Cnt=%d\n",
+ i, dm_fat_tbl->MainAnt_Sum[i],
+ dm_fat_tbl->MainAnt_Cnt[i]));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, AuxAnt_Sum=%d, AuxAnt_Cnt=%d\n",
+ i, dm_fat_tbl->AuxAnt_Sum[i], dm_fat_tbl->AuxAnt_Cnt[i]));
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD,
+ ("MacID=%d, Main_RSSI= %d, Aux_RSSI= %d\n",
+ i, Main_RSSI, Aux_RSSI));
+ /* 2 Select MaxRSSI for DIG */
+ LocalMaxRSSI = (Main_RSSI > Aux_RSSI) ? Main_RSSI : Aux_RSSI;
+ if ((LocalMaxRSSI > AntDivMaxRSSI) && (LocalMaxRSSI < 40))
+ AntDivMaxRSSI = LocalMaxRSSI;
+ if (LocalMaxRSSI > MaxRSSI)
+ MaxRSSI = LocalMaxRSSI;
+
+ /* 2 Select RX Idle Antenna */
+ if ((dm_fat_tbl->RxIdleAnt == MAIN_ANT) && (Main_RSSI == 0))
+ Main_RSSI = Aux_RSSI;
+ else if ((dm_fat_tbl->RxIdleAnt == AUX_ANT) && (Aux_RSSI == 0))
+ Aux_RSSI = Main_RSSI;
+
+ LocalMinRSSI = (Main_RSSI > Aux_RSSI) ? Aux_RSSI : Main_RSSI;
+ if (LocalMinRSSI < MinRSSI) {
+ MinRSSI = LocalMinRSSI;
+ RxIdleAnt = TargetAnt;
+ }
+ /* 2 Select TRX Antenna */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ odm_UpdateTxAnt_88E(dm_odm, TargetAnt, i);
+ }
+ dm_fat_tbl->MainAnt_Sum[i] = 0;
+ dm_fat_tbl->AuxAnt_Sum[i] = 0;
+ dm_fat_tbl->MainAnt_Cnt[i] = 0;
+ dm_fat_tbl->AuxAnt_Cnt[i] = 0;
+ }
+
+ /* 2 Set RX Idle Antenna */
+ ODM_UpdateRxIdleAnt_88E(dm_odm, RxIdleAnt);
+
+ pDM_DigTable->AntDiv_RSSI_max = AntDivMaxRSSI;
+ pDM_DigTable->RSSI_max = MaxRSSI;
+}
+
+void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
+{
+ struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
+ if ((dm_odm->SupportICType != ODM_RTL8188E) || (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV)))
+ return;
+ if (!dm_odm->bLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("ODM_AntennaDiversity_88E(): No Link.\n"));
+ if (dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn off HW AntDiv\n"));
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 0); /* RegC50[7]=1'b1 enable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 0); /* Enable CCK AntDiv */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 0); /* Reg80c[21]=1'b0 from TX Reg */
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+ return;
+ } else {
+ if (!dm_fat_tbl->bBecomeLinked) {
+ ODM_RT_TRACE(dm_odm, ODM_COMP_ANT_DIV, ODM_DBG_LOUD, ("Need to Turn on HW AntDiv\n"));
+ /* Because HW AntDiv is disabled before Link, we enable HW AntDiv after link */
+ ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT7, 1); /* RegC50[7]=1'b1 enable HW AntDiv */
+ ODM_SetBBReg(dm_odm, ODM_REG_CCK_ANTDIV_PARA1_11N, BIT15, 1); /* Enable CCK AntDiv */
+ if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
+ ODM_SetBBReg(dm_odm, ODM_REG_TX_ANT_CTRL_11N, BIT21, 1); /* Reg80c[21]=1'b1 from TX Info */
+ dm_fat_tbl->bBecomeLinked = dm_odm->bLinked;
+ }
+ }
+ if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV))
+ odm_HWAntDiv(dm_odm);
+}
+
+/* 3============================================================ */
+/* 3 Dynamic Primary CCA */
+/* 3============================================================ */
+
+void odm_PrimaryCCA_Init(struct odm_dm_struct *dm_odm)
+{
+ struct dyn_primary_cca *PrimaryCCA = &(dm_odm->DM_PriCCA);
+
+ PrimaryCCA->DupRTS_flag = 0;
+ PrimaryCCA->intf_flag = 0;
+ PrimaryCCA->intf_type = 0;
+ PrimaryCCA->Monitor_flag = 0;
+ PrimaryCCA->PriCCA_flag = 0;
+}
+
+bool ODM_DynamicPrimaryCCA_DupRTS(struct odm_dm_struct *dm_odm)
+{
+ struct dyn_primary_cca *PrimaryCCA = &(dm_odm->DM_PriCCA);
+
+ return PrimaryCCA->DupRTS_flag;
+}
+
+void odm_DynamicPrimaryCCA(struct odm_dm_struct *dm_odm)
+{
+ return;
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
new file mode 100644
index 00000000000..18c0533fbd0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_RegConfig8188E.c
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+
+void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Data, enum ODM_RF_RADIO_PATH RF_PATH,
+ u32 RegAddr)
+{
+ if (Addr == 0xffe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else {
+ ODM_SetRFReg(pDM_Odm, RF_PATH, RegAddr, bRFRegOffsetMask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+ }
+}
+
+void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1000; /* RF_Content: radioa_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_A, Addr|maskforPhySet);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioA] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
+{
+ u32 content = 0x1001; /* RF_Content: radiob_txt */
+ u32 maskforPhySet = (u32)(content&0xE000);
+
+ odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, ODM_RF_PATH_B, Addr|maskforPhySet);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigRFWithHeaderFile: [RadioB] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
+{
+ ODM_Write1Byte(pDM_Odm, Addr, Data);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE, ("===> ODM_ConfigMACWithHeaderFile: [MAC_REG] %08X %08X\n", Addr, Data));
+}
+
+void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
+ ("===> ODM_ConfigBBWithHeaderFile: [AGC_TAB] %08X %08X\n",
+ Addr, Data));
+}
+
+void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else{
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_LOUD,
+ ("===> @@@@@@@ ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X %08X\n",
+ Addr, Bitmask, Data));
+ storePwrIndexDiffRateOffset(pDM_Odm->Adapter, Addr, Bitmask, Data);
+ }
+}
+
+void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Bitmask, u32 Data)
+{
+ if (Addr == 0xfe) {
+ ODM_sleep_ms(50);
+ } else if (Addr == 0xfd) {
+ ODM_delay_ms(5);
+ } else if (Addr == 0xfc) {
+ ODM_delay_ms(1);
+ } else if (Addr == 0xfb) {
+ ODM_delay_us(50);
+ } else if (Addr == 0xfa) {
+ ODM_delay_us(5);
+ } else if (Addr == 0xf9) {
+ ODM_delay_us(1);
+ } else {
+ if (Addr == 0xa24)
+ pDM_Odm->RFCalibrateInfo.RegA24 = Data;
+ ODM_SetBBReg(pDM_Odm, Addr, Bitmask, Data);
+
+ /* Add 1us delay between BB/RF register setting. */
+ ODM_delay_us(1);
+ ODM_RT_TRACE(pDM_Odm, ODM_COMP_INIT, ODM_DBG_TRACE,
+ ("===> ODM_ConfigBBWithHeaderFile: [PHY_REG] %08X %08X\n",
+ Addr, Data));
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/odm_debug.c b/drivers/staging/rtl8188eu/hal/odm_debug.c
new file mode 100644
index 00000000000..84caadd6c8e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_debug.c
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+/* include files */
+
+#include "odm_precomp.h"
+
+void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm)
+{
+ pDM_Odm->DebugLevel = ODM_DBG_TRACE;
+
+ pDM_Odm->DebugComponents = 0;
+}
+
+u32 GlobalDebugLevel;
diff --git a/drivers/staging/rtl8188eu/hal/odm_interface.c b/drivers/staging/rtl8188eu/hal/odm_interface.c
new file mode 100644
index 00000000000..59ad5bf4d94
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/odm_interface.c
@@ -0,0 +1,203 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include "odm_precomp.h"
+/* ODM IO Relative API. */
+
+u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read8(Adapter, RegAddr);
+}
+
+u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read16(Adapter, RegAddr);
+}
+
+u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return rtw_read32(Adapter, RegAddr);
+}
+
+void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write8(Adapter, RegAddr, Data);
+}
+
+void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write16(Adapter, RegAddr, Data);
+}
+
+void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ rtw_write32(Adapter, RegAddr, Data);
+}
+
+void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetBBReg(Adapter, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryBBReg(Adapter, RegAddr, BitMask);
+}
+
+void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask, Data);
+}
+
+u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath, u32 RegAddr, u32 BitMask)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ return PHY_QueryRFReg(Adapter, (enum rf_radio_path)eRFPath, RegAddr, BitMask);
+}
+
+/* ODM Memory relative API. */
+void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
+{
+ *pPtr = rtw_zvmalloc(length);
+}
+
+/* length could be ignored, used to detect memory leakage. */
+void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length)
+{
+ rtw_vmfree(pPtr, length);
+}
+
+s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
+{
+ return _rtw_memcmp(pBuf1, pBuf2, length);
+}
+
+/* ODM MISC relative API. */
+void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
+{
+}
+
+void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
+{
+}
+
+/* Work item relative API. FOr MP driver only~! */
+void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback,
+ void *pContext, const char *szID)
+{
+}
+
+void ODM_StartWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_StopWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_FreeWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_ScheduleWorkItem(void *pRtWorkItem)
+{
+}
+
+void ODM_IsWorkItemScheduled(void *pRtWorkItem)
+{
+}
+
+/* ODM Timer relative API. */
+void ODM_StallExecution(u32 usDelay)
+{
+ rtw_udelay_os(usDelay);
+}
+
+void ODM_delay_ms(u32 ms)
+{
+ rtw_mdelay_os(ms);
+}
+
+void ODM_delay_us(u32 us)
+{
+ rtw_udelay_os(us);
+}
+
+void ODM_sleep_ms(u32 ms)
+{
+ rtw_msleep_os(ms);
+}
+
+void ODM_sleep_us(u32 us)
+{
+ rtw_usleep_os(us);
+}
+
+void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
+{
+ _set_timer(pTimer, msDelay); /* ms */
+}
+
+void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
+ void *CallBackFunc, void *pContext,
+ const char *szID)
+{
+ struct adapter *Adapter = pDM_Odm->Adapter;
+ _init_timer(pTimer, Adapter->pnetdev, CallBackFunc, pDM_Odm);
+}
+
+void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
+{
+ _cancel_timer_ex(pTimer);
+}
+
+void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
+{
+}
+
+/* ODM FW relative API. */
+u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
+ u32 *pElementID, u32 *pCmdLen,
+ u8 **pCmbBuffer, u8 *CmdStartSeq)
+{
+ return true;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
new file mode 100644
index 00000000000..8c858775451
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -0,0 +1,779 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_CMD_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <cmd_osdep.h>
+#include <mlme_osdep.h>
+#include <rtw_ioctl_set.h>
+
+#include <rtl8188e_hal.h>
+
+#define RTL88E_MAX_H2C_BOX_NUMS 4
+#define RTL88E_MAX_CMD_LEN 7
+#define RTL88E_MESSAGE_BOX_SIZE 4
+#define RTL88E_EX_MESSAGE_BOX_SIZE 4
+
+static u8 _is_fw_read_cmd_down(struct adapter *adapt, u8 msgbox_num)
+{
+ u8 read_down = false;
+ int retry_cnts = 100;
+
+ u8 valid;
+
+ do {
+ valid = rtw_read8(adapt, REG_HMETFR) & BIT(msgbox_num);
+ if (0 == valid)
+ read_down = true;
+ } while ((!read_down) && (retry_cnts--));
+
+ return read_down;
+}
+
+/*****************************************
+* H2C Msg format :
+* 0x1DF - 0x1D0
+*| 31 - 8 | 7-5 4 - 0 |
+*| h2c_msg |Class_ID CMD_ID |
+*
+* Extend 0x1FF - 0x1F0
+*|31 - 0 |
+*|ext_msg|
+******************************************/
+static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *pCmdBuffer)
+{
+ u8 bcmd_down = false;
+ s32 retry_cnts = 100;
+ u8 h2c_box_num;
+ u32 msgbox_addr;
+ u32 msgbox_ex_addr;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ u8 cmd_idx, ext_cmd_len;
+ u32 h2c_cmd = 0;
+ u32 h2c_cmd_ex = 0;
+ s32 ret = _FAIL;
+
+_func_enter_;
+
+ if (!adapt->bFWReady) {
+ DBG_88E("FillH2CCmd_88E(): return H2C cmd because fw is not ready\n");
+ return ret;
+ }
+
+ if (!pCmdBuffer)
+ goto exit;
+ if (CmdLen > RTL88E_MAX_CMD_LEN)
+ goto exit;
+ if (adapt->bSurpriseRemoved)
+ goto exit;
+
+ /* pay attention to if race condition happened in H2C cmd setting. */
+ do {
+ h2c_box_num = haldata->LastHMEBoxNum;
+
+ if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
+ DBG_88E(" fw read cmd failed...\n");
+ goto exit;
+ }
+
+ *(u8 *)(&h2c_cmd) = ElementID;
+
+ if (CmdLen <= 3) {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, CmdLen);
+ } else {
+ memcpy((u8 *)(&h2c_cmd)+1, pCmdBuffer, 3);
+ ext_cmd_len = CmdLen-3;
+ memcpy((u8 *)(&h2c_cmd_ex), pCmdBuffer+3, ext_cmd_len);
+
+ /* Write Ext command */
+ msgbox_ex_addr = REG_HMEBOX_EXT_0 + (h2c_box_num * RTL88E_EX_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < ext_cmd_len; cmd_idx++) {
+ rtw_write8(adapt, msgbox_ex_addr+cmd_idx, *((u8 *)(&h2c_cmd_ex)+cmd_idx));
+ }
+ }
+ /* Write command */
+ msgbox_addr = REG_HMEBOX_0 + (h2c_box_num * RTL88E_MESSAGE_BOX_SIZE);
+ for (cmd_idx = 0; cmd_idx < RTL88E_MESSAGE_BOX_SIZE; cmd_idx++) {
+ rtw_write8(adapt, msgbox_addr+cmd_idx, *((u8 *)(&h2c_cmd)+cmd_idx));
+ }
+ bcmd_down = true;
+
+ haldata->LastHMEBoxNum = (h2c_box_num+1) % RTL88E_MAX_H2C_BOX_NUMS;
+
+ } while ((!bcmd_down) && (retry_cnts--));
+
+ ret = _SUCCESS;
+
+exit:
+
+_func_exit_;
+
+ return ret;
+}
+
+u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
+{
+ u8 res = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+_func_enter_;
+
+ if (haldata->fw_ractrl) {
+ ;
+ } else {
+ DBG_88E("==>%s fw dont support RA\n", __func__);
+ res = _FAIL;
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
+{
+ u8 buf[3];
+ u8 res = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+_func_enter_;
+ if (haldata->fw_ractrl) {
+ __le32 lmask;
+
+ _rtw_memset(buf, 0, 3);
+ lmask = cpu_to_le32(mask);
+ memcpy(buf, &lmask, 3);
+
+ FillH2CCmd_88E(adapt, H2C_DM_MACID_CFG, 3, buf);
+ } else {
+ DBG_88E("==>%s fw dont support RA\n", __func__);
+ res = _FAIL;
+ }
+
+_func_exit_;
+
+ return res;
+}
+
+/* bitmap[0:27] = tx_rate_bitmap */
+/* bitmap[28:31]= Rate Adaptive id */
+/* arg[0:4] = macid */
+/* arg[5] = Short GI */
+void rtl8188e_Add_RateATid(struct adapter *pAdapter, u32 bitmap, u8 arg, u8 rssi_level)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(pAdapter);
+
+ u8 macid, init_rate, raid, shortGIrate = false;
+
+ macid = arg&0x1f;
+
+ raid = (bitmap>>28) & 0x0f;
+ bitmap &= 0x0fffffff;
+
+ if (rssi_level != DM_RATR_STA_INIT)
+ bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, macid, bitmap, rssi_level);
+
+ bitmap |= ((raid<<28)&0xf0000000);
+
+ init_rate = get_highest_rate_idx(bitmap&0x0fffffff)&0x3f;
+
+ shortGIrate = (arg&BIT(5)) ? true : false;
+
+ if (shortGIrate)
+ init_rate |= BIT(6);
+
+ raid = (bitmap>>28) & 0x0f;
+
+ bitmap &= 0x0fffffff;
+
+ DBG_88E("%s=> mac_id:%d, raid:%d, ra_bitmap=0x%x, shortGIrate=0x%02x\n",
+ __func__, macid, raid, bitmap, shortGIrate);
+
+ ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv), macid, raid, bitmap, shortGIrate);
+}
+
+void rtl8188e_set_FwPwrMode_cmd(struct adapter *adapt, u8 Mode)
+{
+ struct setpwrmode_parm H2CSetPwrMode;
+ struct pwrctrl_priv *pwrpriv = &adapt->pwrctrlpriv;
+ u8 RLBM = 0; /* 0:Min, 1:Max, 2:User define */
+_func_enter_;
+
+ DBG_88E("%s: Mode=%d SmartPS=%d UAPSD=%d\n", __func__,
+ Mode, pwrpriv->smart_ps, adapt->registrypriv.uapsd_enable);
+
+ switch (Mode) {
+ case PS_MODE_ACTIVE:
+ H2CSetPwrMode.Mode = 0;
+ break;
+ case PS_MODE_MIN:
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_MAX:
+ RLBM = 1;
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_DTIM:
+ RLBM = 2;
+ H2CSetPwrMode.Mode = 1;
+ break;
+ case PS_MODE_UAPSD_WMM:
+ H2CSetPwrMode.Mode = 2;
+ break;
+ default:
+ H2CSetPwrMode.Mode = 0;
+ break;
+ }
+
+ H2CSetPwrMode.SmartPS_RLBM = (((pwrpriv->smart_ps<<4)&0xf0) | (RLBM & 0x0f));
+
+ H2CSetPwrMode.AwakeInterval = 1;
+
+ H2CSetPwrMode.bAllQueueUAPSD = adapt->registrypriv.uapsd_enable;
+
+ if (Mode > 0)
+ H2CSetPwrMode.PwrState = 0x00;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+ else
+ H2CSetPwrMode.PwrState = 0x0C;/* AllON(0x0C), RFON(0x04), RFOFF(0x00) */
+
+ FillH2CCmd_88E(adapt, H2C_PS_PWR_MODE, sizeof(H2CSetPwrMode), (u8 *)&H2CSetPwrMode);
+
+_func_exit_;
+}
+
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt)
+{
+ u8 opmode, macid;
+ u16 mst_rpt = le16_to_cpu(mstatus_rpt);
+ opmode = (u8) mst_rpt;
+ macid = (u8)(mst_rpt >> 8);
+
+ DBG_88E("### %s: MStatus=%x MACID=%d\n", __func__, opmode, macid);
+ FillH2CCmd_88E(adapt, H2C_COM_MEDIA_STATUS_RPT, sizeof(mst_rpt), (u8 *)&mst_rpt);
+}
+
+static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 rate_len, pktlen;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+
+ memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0/*pmlmeext->mgnt_seq*/);
+ SetFrameSubType(pframe, WIFI_BEACON);
+
+ pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+
+ /* timestamp will be inserted by hardware */
+ pframe += 8;
+ pktlen += 8;
+
+ /* beacon interval: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_beacon_interval_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ /* capability info: 2 bytes */
+ memcpy(pframe, (unsigned char *)(rtw_get_capability_from_ie(cur_network->IEs)), 2);
+
+ pframe += 2;
+ pktlen += 2;
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ pktlen += cur_network->IELength - sizeof(struct ndis_802_11_fixed_ie);
+ memcpy(pframe, cur_network->IEs+sizeof(struct ndis_802_11_fixed_ie), pktlen);
+
+ goto _ConstructBeacon;
+ }
+
+ /* below for ad-hoc mode */
+
+ /* SSID */
+ pframe = rtw_set_ie(pframe, _SSID_IE_, cur_network->Ssid.SsidLength, cur_network->Ssid.Ssid, &pktlen);
+
+ /* supported rates... */
+ rate_len = rtw_get_rateset_len(cur_network->SupportedRates);
+ pframe = rtw_set_ie(pframe, _SUPPORTEDRATES_IE_, ((rate_len > 8) ? 8 : rate_len), cur_network->SupportedRates, &pktlen);
+
+ /* DS parameter set */
+ pframe = rtw_set_ie(pframe, _DSSET_IE_, 1, (unsigned char *)&(cur_network->Configuration.DSConfig), &pktlen);
+
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) {
+ u32 ATIMWindow;
+ /* IBSS Parameter Set... */
+ ATIMWindow = 0;
+ pframe = rtw_set_ie(pframe, _IBSS_PARA_IE_, 2, (unsigned char *)(&ATIMWindow), &pktlen);
+ }
+
+ /* todo: ERP IE */
+
+ /* EXTERNDED SUPPORTED RATE */
+ if (rate_len > 8)
+ pframe = rtw_set_ie(pframe, _EXT_SUPPORTEDRATES_IE_, (rate_len - 8), (cur_network->SupportedRates + 8), &pktlen);
+
+ /* todo:HT for adhoc */
+
+_ConstructBeacon:
+
+ if ((pktlen + TXDESC_SIZE) > 512) {
+ DBG_88E("beacon frame too large\n");
+ return;
+ }
+
+ *pLength = pktlen;
+}
+
+static void ConstructPSPoll(struct adapter *adapt, u8 *pframe, u32 *pLength)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u16 *fctrl;
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ /* Frame control. */
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ SetPwrMgt(fctrl);
+ SetFrameSubType(pframe, WIFI_PSPOLL);
+
+ /* AID. */
+ SetDuration(pframe, (pmlmeinfo->aid | 0xc000));
+
+ /* BSSID. */
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+
+ /* TA. */
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+
+ *pLength = 16;
+}
+
+static void ConstructNullFunctionData(struct adapter *adapt, u8 *pframe,
+ u32 *pLength,
+ u8 *StaAddr,
+ u8 bQoS,
+ u8 AC,
+ u8 bEosp,
+ u8 bForcePowerSave)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u32 pktlen;
+ struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
+ struct wlan_network *cur_network = &pmlmepriv->cur_network;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ fctrl = &pwlanhdr->frame_ctl;
+ *(fctrl) = 0;
+ if (bForcePowerSave)
+ SetPwrMgt(fctrl);
+
+ switch (cur_network->network.InfrastructureMode) {
+ case Ndis802_11Infrastructure:
+ SetToDs(fctrl);
+ memcpy(pwlanhdr->addr1, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, StaAddr, ETH_ALEN);
+ break;
+ case Ndis802_11APMode:
+ SetFrDs(fctrl);
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ break;
+ case Ndis802_11IBSS:
+ default:
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, myid(&(adapt->eeprompriv)), ETH_ALEN);
+ memcpy(pwlanhdr->addr3, get_my_bssid(&(pmlmeinfo->network)), ETH_ALEN);
+ break;
+ }
+
+ SetSeqNum(pwlanhdr, 0);
+
+ if (bQoS) {
+ struct rtw_ieee80211_hdr_3addr_qos *pwlanqoshdr;
+
+ SetFrameSubType(pframe, WIFI_QOS_DATA_NULL);
+
+ pwlanqoshdr = (struct rtw_ieee80211_hdr_3addr_qos *)pframe;
+ SetPriority(&pwlanqoshdr->qc, AC);
+ SetEOSP(&pwlanqoshdr->qc, bEosp);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr_qos);
+ } else {
+ SetFrameSubType(pframe, WIFI_DATA_NULL);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ }
+
+ *pLength = pktlen;
+}
+
+static void ConstructProbeRsp(struct adapter *adapt, u8 *pframe, u32 *pLength, u8 *StaAddr, bool bHideSSID)
+{
+ struct rtw_ieee80211_hdr *pwlanhdr;
+ u16 *fctrl;
+ u8 *mac, *bssid;
+ u32 pktlen;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
+
+ mac = myid(&(adapt->eeprompriv));
+ bssid = cur_network->MacAddress;
+
+ fctrl = &(pwlanhdr->frame_ctl);
+ *(fctrl) = 0;
+ memcpy(pwlanhdr->addr1, StaAddr, ETH_ALEN);
+ memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
+ memcpy(pwlanhdr->addr3, bssid, ETH_ALEN);
+
+ SetSeqNum(pwlanhdr, 0);
+ SetFrameSubType(fctrl, WIFI_PROBERSP);
+
+ pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
+ pframe += pktlen;
+
+ if (cur_network->IELength > MAX_IE_SZ)
+ return;
+
+ memcpy(pframe, cur_network->IEs, cur_network->IELength);
+ pframe += cur_network->IELength;
+ pktlen += cur_network->IELength;
+
+ *pLength = pktlen;
+}
+
+/* To check if reserved page content is destroyed by beacon beacuse beacon is too large. */
+/* 2010.06.23. Added by tynli. */
+void CheckFwRsvdPageContent(struct adapter *Adapter)
+{
+}
+
+/* */
+/* Description: Fill the reserved packets that FW will use to RSVD page. */
+/* Now we just send 4 types packet to rsvd page. */
+/* (1)Beacon, (2)Ps-poll, (3)Null data, (4)ProbeRsp. */
+/* Input: */
+/* bDLFinished - false: At the first time we will send all the packets as a large packet to Hw, */
+/* so we need to set the packet length to total lengh. */
+/* true: At the second time, we should send the first packet (default:beacon) */
+/* to Hw again and set the lengh in descriptor to the real beacon lengh. */
+/* 2009.10.15 by tynli. */
+static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
+{
+ struct hal_data_8188e *haldata;
+ struct xmit_frame *pmgntframe;
+ struct pkt_attrib *pattrib;
+ struct xmit_priv *pxmitpriv;
+ struct mlme_ext_priv *pmlmeext;
+ struct mlme_ext_info *pmlmeinfo;
+ u32 BeaconLength = 0, ProbeRspLength = 0, PSPollLength;
+ u32 NullDataLength, QosNullLength;
+ u8 *ReservedPagePacket;
+ u8 PageNum, PageNeed, TxDescLen;
+ u16 BufIndex;
+ u32 TotalPacketLen;
+ struct rsvdpage_loc RsvdPageLoc;
+
+ DBG_88E("%s\n", __func__);
+ ReservedPagePacket = (u8 *)rtw_zmalloc(1000);
+ if (ReservedPagePacket == NULL) {
+ DBG_88E("%s: alloc ReservedPagePacket fail!\n", __func__);
+ return;
+ }
+
+ haldata = GET_HAL_DATA(adapt);
+ pxmitpriv = &adapt->xmitpriv;
+ pmlmeext = &adapt->mlmeextpriv;
+ pmlmeinfo = &pmlmeext->mlmext_info;
+
+ TxDescLen = TXDESC_SIZE;
+ PageNum = 0;
+
+ /* 3 (1) beacon * 2 pages */
+ BufIndex = TXDESC_OFFSET;
+ ConstructBeacon(adapt, &ReservedPagePacket[BufIndex], &BeaconLength);
+
+ /* When we count the first page size, we need to reserve description size for the RSVD */
+ /* packet, it will be filled in front of the packet in TXPKTBUF. */
+ PageNeed = (u8)PageNum_128(TxDescLen + BeaconLength);
+ /* To reserved 2 pages for beacon buffer. 2010.06.24. */
+ if (PageNeed == 1)
+ PageNeed += 1;
+ PageNum += PageNeed;
+ haldata->FwRsvdPageStartOffset = PageNum;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (2) ps-poll *1 page */
+ RsvdPageLoc.LocPsPoll = PageNum;
+ ConstructPSPoll(adapt, &ReservedPagePacket[BufIndex], &PSPollLength);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], PSPollLength, true, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + PSPollLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (3) null data * 1 page */
+ RsvdPageLoc.LocNullData = PageNum;
+ ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex], &NullDataLength, get_my_bssid(&pmlmeinfo->network), false, 0, 0, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], NullDataLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + NullDataLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (4) probe response * 1page */
+ RsvdPageLoc.LocProbeRsp = PageNum;
+ ConstructProbeRsp(adapt, &ReservedPagePacket[BufIndex], &ProbeRspLength, get_my_bssid(&pmlmeinfo->network), false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], ProbeRspLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + ProbeRspLength);
+ PageNum += PageNeed;
+
+ BufIndex += PageNeed*128;
+
+ /* 3 (5) Qos null data */
+ RsvdPageLoc.LocQosNull = PageNum;
+ ConstructNullFunctionData(adapt, &ReservedPagePacket[BufIndex],
+ &QosNullLength, get_my_bssid(&pmlmeinfo->network), true, 0, 0, false);
+ rtl8188e_fill_fake_txdesc(adapt, &ReservedPagePacket[BufIndex-TxDescLen], QosNullLength, false, false);
+
+ PageNeed = (u8)PageNum_128(TxDescLen + QosNullLength);
+ PageNum += PageNeed;
+
+ TotalPacketLen = BufIndex + QosNullLength;
+ pmgntframe = alloc_mgtxmitframe(pxmitpriv);
+ if (pmgntframe == NULL)
+ goto exit;
+
+ /* update attribute */
+ pattrib = &pmgntframe->attrib;
+ update_mgntframe_attrib(adapt, pattrib);
+ pattrib->qsel = 0x10;
+ pattrib->last_txcmdsz = TotalPacketLen - TXDESC_OFFSET;
+ pattrib->pktlen = pattrib->last_txcmdsz;
+ memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
+
+ rtw_hal_mgnt_xmit(adapt, pmgntframe);
+
+ DBG_88E("%s: Set RSVD page location to Fw\n", __func__);
+ FillH2CCmd_88E(adapt, H2C_COM_RSVD_PAGE, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
+
+exit:
+ kfree(ReservedPagePacket);
+}
+
+void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ bool bSendBeacon = false;
+ bool bcn_valid = false;
+ u8 DLBcnCount = 0;
+ u32 poll = 0;
+
+_func_enter_;
+
+ DBG_88E("%s mstatus(%x)\n", __func__, mstatus);
+
+ if (mstatus == 1) {
+ /* We should set AID, correct TSF, HW seq enable before set JoinBssReport to Fw in 88/92C. */
+ /* Suggested by filen. Added by tynli. */
+ rtw_write16(adapt, REG_BCN_PSR_RPT, (0xC000|pmlmeinfo->aid));
+ /* Do not set TSF again here or vWiFi beacon DMA INT will not work. */
+
+ /* Set REG_CR bit 8. DMA beacon by SW. */
+ haldata->RegCR_1 |= BIT0;
+ rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
+
+ /* Disable Hw protection for a time which revserd for Hw sending beacon. */
+ /* Fix download reserved page packet fail that access collision with the protection time. */
+ /* 2010.05.11. Added by tynli. */
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)&(~BIT(3)));
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)|BIT(4));
+
+ if (haldata->RegFwHwTxQCtrl&BIT6) {
+ DBG_88E("HalDownloadRSVDPage(): There is an Adapter is sending beacon.\n");
+ bSendBeacon = true;
+ }
+
+ /* Set FWHW_TXQ_CTRL 0x422[6]=0 to tell Hw the packet is not a real beacon frame. */
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl&(~BIT6)));
+ haldata->RegFwHwTxQCtrl &= (~BIT6);
+
+ /* Clear beacon valid check bit. */
+ rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ DLBcnCount = 0;
+ poll = 0;
+ do {
+ /* download rsvd page. */
+ SetFwRsvdPagePkt(adapt, false);
+ DLBcnCount++;
+ do {
+ rtw_yield_os();
+ /* rtw_mdelay_os(10); */
+ /* check rsvd page download OK. */
+ rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ poll++;
+ } while (!bcn_valid && (poll%10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
+ } while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
+
+ if (adapt->bSurpriseRemoved || adapt->bDriverStopped)
+ ;
+ else if (!bcn_valid)
+ DBG_88E("%s: 1 Download RSVD page failed! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
+ else
+ DBG_88E("%s: 1 Download RSVD success! DLBcnCount:%u, poll:%u\n", __func__, DLBcnCount, poll);
+ /* */
+ /* We just can send the reserved page twice during the time that Tx thread is stopped (e.g. pnpsetpower) */
+ /* becuase we need to free the Tx BCN Desc which is used by the first reserved page packet. */
+ /* At run time, we cannot get the Tx Desc until it is released in TxHandleInterrupt() so we will return */
+ /* the beacon TCB in the following code. 2011.11.23. by tynli. */
+ /* */
+
+ /* Enable Bcn */
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)|BIT(3));
+ rtw_write8(adapt, REG_BCN_CTRL, rtw_read8(adapt, REG_BCN_CTRL)&(~BIT(4)));
+
+ /* To make sure that if there exists an adapter which would like to send beacon. */
+ /* If exists, the origianl value of 0x422[6] will be 1, we should check this to */
+ /* prevent from setting 0x422[6] to 0 after download reserved page, or it will cause */
+ /* the beacon cannot be sent by HW. */
+ /* 2010.06.23. Added by tynli. */
+ if (bSendBeacon) {
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl|BIT6));
+ haldata->RegFwHwTxQCtrl |= BIT6;
+ }
+
+ /* Update RSVD page location H2C to Fw. */
+ if (bcn_valid) {
+ rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ DBG_88E("Set RSVD page location to Fw.\n");
+ }
+
+ /* Do not enable HW DMA BCN or it will cause Pcie interface hang by timing issue. 2011.11.24. by tynli. */
+ /* Clear CR[8] or beacon packet will not be send to TxBuf anymore. */
+ haldata->RegCR_1 &= (~BIT0);
+ rtw_write8(adapt, REG_CR+1, haldata->RegCR_1);
+ }
+_func_exit_;
+}
+
+void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
+{
+#ifdef CONFIG_88EU_P2P
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct wifidirect_info *pwdinfo = &(adapt->wdinfo);
+ struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
+ u8 i;
+
+_func_enter_;
+
+ switch (p2p_ps_state) {
+ case P2P_PS_DISABLE:
+ DBG_88E("P2P_PS_DISABLE\n");
+ _rtw_memset(p2p_ps_offload, 0, 1);
+ break;
+ case P2P_PS_ENABLE:
+ DBG_88E("P2P_PS_ENABLE\n");
+ /* update CTWindow value. */
+ if (pwdinfo->ctwindow > 0) {
+ p2p_ps_offload->CTWindow_En = 1;
+ rtw_write8(adapt, REG_P2P_CTWIN, pwdinfo->ctwindow);
+ }
+
+ /* hw only support 2 set of NoA */
+ for (i = 0; i < pwdinfo->noa_num; i++) {
+ /* To control the register setting for which NOA */
+ rtw_write8(adapt, REG_NOA_DESC_SEL, (i << 4));
+ if (i == 0)
+ p2p_ps_offload->NoA0_En = 1;
+ else
+ p2p_ps_offload->NoA1_En = 1;
+
+ /* config P2P NoA Descriptor Register */
+ rtw_write32(adapt, REG_NOA_DESC_DURATION, pwdinfo->noa_duration[i]);
+ rtw_write32(adapt, REG_NOA_DESC_INTERVAL, pwdinfo->noa_interval[i]);
+ rtw_write32(adapt, REG_NOA_DESC_START, pwdinfo->noa_start_time[i]);
+ rtw_write8(adapt, REG_NOA_DESC_COUNT, pwdinfo->noa_count[i]);
+ }
+
+ if ((pwdinfo->opp_ps == 1) || (pwdinfo->noa_num > 0)) {
+ /* rst p2p circuit */
+ rtw_write8(adapt, REG_DUAL_TSF_RST, BIT(4));
+
+ p2p_ps_offload->Offload_En = 1;
+
+ if (pwdinfo->role == P2P_ROLE_GO) {
+ p2p_ps_offload->role = 1;
+ p2p_ps_offload->AllStaSleep = 0;
+ } else {
+ p2p_ps_offload->role = 0;
+ }
+
+ p2p_ps_offload->discovery = 0;
+ }
+ break;
+ case P2P_PS_SCAN:
+ DBG_88E("P2P_PS_SCAN\n");
+ p2p_ps_offload->discovery = 1;
+ break;
+ case P2P_PS_SCAN_DONE:
+ DBG_88E("P2P_PS_SCAN_DONE\n");
+ p2p_ps_offload->discovery = 0;
+ pwdinfo->p2p_ps_state = P2P_PS_ENABLE;
+ break;
+ default:
+ break;
+ }
+
+ FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
+#endif
+
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
new file mode 100644
index 00000000000..9c2e7a20c09
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -0,0 +1,268 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* Description: */
+/* */
+/* This file is for 92CE/92CU dynamic mechanism only */
+/* */
+/* */
+/* */
+#define _RTL8188E_DM_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8188e_hal.h>
+
+static void dm_CheckStatistics(struct adapter *Adapter)
+{
+}
+
+/* Initialize GPIO setting registers */
+static void dm_InitGPIOSetting(struct adapter *Adapter)
+{
+ u8 tmp1byte;
+
+ tmp1byte = rtw_read8(Adapter, REG_GPIO_MUXCFG);
+ tmp1byte &= (GPIOSEL_GPIO | ~GPIOSEL_ENBT);
+
+ rtw_write8(Adapter, REG_GPIO_MUXCFG, tmp1byte);
+}
+
+/* */
+/* functions */
+/* */
+static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ u8 cut_ver, fab_ver;
+
+ /* Init Value */
+ _rtw_memset(dm_odm, 0, sizeof(dm_odm));
+
+ dm_odm->Adapter = Adapter;
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
+
+ if (Adapter->interface_type == RTW_GSPI)
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, ODM_ITRF_SDIO);
+ else
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, Adapter->interface_type);/* RTL871X_HCI_TYPE */
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
+
+ fab_ver = ODM_TSMC;
+ cut_ver = ODM_CUT_A;
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_FAB_VER, fab_ver);
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver);
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(hal_data->VersionID));
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PATCH_ID, hal_data->CustomerID);
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
+
+
+ if (hal_data->rf_type == RF_1T1R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
+ else if (hal_data->rf_type == RF_2T2R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
+ else if (hal_data->rf_type == RF_1T2R)
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
+
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+}
+
+static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
+{
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ int i;
+
+ pdmpriv->InitODMFlag = ODM_BB_DIG |
+ ODM_BB_RA_MASK |
+ ODM_BB_DYNAMIC_TXPWR |
+ ODM_BB_FA_CNT |
+ ODM_BB_RSSI_MONITOR |
+ ODM_BB_CCK_PD |
+ ODM_BB_PWR_SAVE |
+ ODM_MAC_EDCA_TURBO |
+ ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+ if (hal_data->AntDivCfg)
+ pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
+
+ if (Adapter->registrypriv.mp_mode == 1) {
+ pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
+ ODM_RF_TX_PWR_TRACK;
+ }
+
+ ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
+
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &(Adapter->xmitpriv.tx_bytes));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_RX_UNI, &(Adapter->recvpriv.rx_bytes));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_WM_MODE, &(pmlmeext->cur_wireless_mode));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_CHNL_OFFSET, &(hal_data->nCur40MhzPrimeSC));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SEC_MODE, &(Adapter->securitypriv.dot11PrivacyAlgrthm));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &(hal_data->CurrentChannelBW));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &(hal_data->CurrentChannel));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &(Adapter->net_closed));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_MP_MODE, &(Adapter->registrypriv.mp_mode));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &(pmlmepriv->bScanInProcess));
+ ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &(pwrctrlpriv->bpower_saving));
+ ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
+
+ for (i = 0; i < NUM_STA; i++)
+ ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+}
+
+void rtl8188e_InitHalDm(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *dm_odm = &(hal_data->odmpriv);
+
+ dm_InitGPIOSetting(Adapter);
+ pdmpriv->DM_Type = DM_Type_ByDriver;
+ pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
+ Update_ODM_ComInfo_88E(Adapter);
+ ODM_DMInit(dm_odm);
+ Adapter->fix_rate = 0xFF;
+}
+
+void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
+{
+ bool fw_cur_in_ps = false;
+ bool fw_ps_awake = true;
+ u8 hw_init_completed = false;
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+
+ _func_enter_;
+ hw_init_completed = Adapter->hw_init_completed;
+
+ if (!hw_init_completed)
+ goto skip_dm;
+
+ fw_cur_in_ps = Adapter->pwrctrlpriv.bFwCurrentInPSMode;
+ rtw_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake));
+
+ /* Fw is under p2p powersaving mode, driver should stop dynamic mechanism. */
+ /* modifed by thomas. 2011.06.11. */
+ if (Adapter->wdinfo.p2p_ps_mode)
+ fw_ps_awake = false;
+
+ if (hw_init_completed && ((!fw_cur_in_ps) && fw_ps_awake)) {
+ /* Calculate Tx/Rx statistics. */
+ dm_CheckStatistics(Adapter);
+
+ _func_exit_;
+ }
+
+ /* ODM */
+ if (hw_init_completed) {
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 bLinked = false;
+
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
+ if (Adapter->stapriv.asoc_sta_count > 2)
+ bLinked = true;
+ } else {/* Station mode */
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ bLinked = true;
+ }
+
+ ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_DMWatchdog(&hal_data->odmpriv);
+ }
+skip_dm:
+ /* Check GPIO to determine current RF on/off and Pbc status. */
+ /* Check Hardware Radio ON/OFF or not */
+ return;
+}
+
+void rtl8188e_init_dm_priv(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &hal_data->dmpriv;
+ struct odm_dm_struct *podmpriv = &hal_data->odmpriv;
+
+ _rtw_memset(pdmpriv, 0, sizeof(struct dm_priv));
+ Init_ODM_ComInfo_88E(Adapter);
+ ODM_InitDebugSetting(podmpriv);
+}
+
+void rtl8188e_deinit_dm_priv(struct adapter *Adapter)
+{
+}
+
+/* Add new function to reset the state of antenna diversity before link. */
+/* Compare RSSI for deciding antenna */
+void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+
+ if (0 != hal_data->AntDivCfg) {
+ /* select optimum_antenna for before linked =>For antenna diversity */
+ if (dst->Rssi >= src->Rssi) {/* keep org parameter */
+ src->Rssi = dst->Rssi;
+ src->PhyInfo.Optimum_antenna = dst->PhyInfo.Optimum_antenna;
+ }
+ }
+}
+
+/* Add new function to reset the state of antenna diversity before link. */
+u8 AntDivBeforeLink8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
+ struct sw_ant_switch *dm_swat_tbl = &dm_odm->DM_SWAT_Table;
+ struct mlme_priv *pmlmepriv = &(Adapter->mlmepriv);
+
+ /* Condition that does not need to use antenna diversity. */
+ if (hal_data->AntDivCfg == 0)
+ return false;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ return false;
+
+ if (dm_swat_tbl->SWAS_NoLink_State == 0) {
+ /* switch channel */
+ dm_swat_tbl->SWAS_NoLink_State = 1;
+ dm_swat_tbl->CurAntenna = (dm_swat_tbl->CurAntenna == Antenna_A) ? Antenna_B : Antenna_A;
+
+ rtw_antenna_select_cmd(Adapter, dm_swat_tbl->CurAntenna, false);
+ return true;
+ } else {
+ dm_swat_tbl->SWAS_NoLink_State = 0;
+ return false;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
new file mode 100644
index 00000000000..292ba62d722
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -0,0 +1,2378 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HAL_INIT_C_
+
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <rtl8188e_hal.h>
+
+#include <rtw_iol.h>
+
+#include <usb_ops.h>
+
+static void iol_mode_enable(struct adapter *padapter, u8 enable)
+{
+ u8 reg_0xf0 = 0;
+
+ if (enable) {
+ /* Enable initial offload */
+ reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ rtw_write8(padapter, REG_SYS_CFG, reg_0xf0|SW_OFFLOAD_EN);
+
+ if (!padapter->bFWReady) {
+ DBG_88E("bFWReady == false call reset 8051...\n");
+ _8051Reset88E(padapter);
+ }
+
+ } else {
+ /* disable initial offload */
+ reg_0xf0 = rtw_read8(padapter, REG_SYS_CFG);
+ rtw_write8(padapter, REG_SYS_CFG, reg_0xf0 & ~SW_OFFLOAD_EN);
+ }
+}
+
+static s32 iol_execute(struct adapter *padapter, u8 control)
+{
+ s32 status = _FAIL;
+ u8 reg_0x88 = 0;
+ u32 start = 0, passing_time = 0;
+
+ control = control&0x0f;
+ reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ rtw_write8(padapter, REG_HMEBOX_E0, reg_0x88|control);
+
+ start = rtw_get_current_time();
+ while ((reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0)) & control &&
+ (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ ;
+ }
+
+ reg_0x88 = rtw_read8(padapter, REG_HMEBOX_E0);
+ status = (reg_0x88 & control) ? _FAIL : _SUCCESS;
+ if (reg_0x88 & control<<4)
+ status = _FAIL;
+ return status;
+}
+
+static s32 iol_InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
+{
+ s32 rst = _SUCCESS;
+ iol_mode_enable(padapter, 1);
+ rtw_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ rst = iol_execute(padapter, CMD_INIT_LLT);
+ iol_mode_enable(padapter, 0);
+ return rst;
+}
+
+static void
+efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
+{
+ u8 *efuseTbl = NULL;
+ u8 rtemp8;
+ u16 eFuse_Addr = 0;
+ u8 offset, wren;
+ u16 i, j;
+ u16 **eFuseWord = NULL;
+ u16 efuse_utilized = 0;
+ u8 u1temp = 0;
+
+ efuseTbl = (u8 *)rtw_zmalloc(EFUSE_MAP_LEN_88E);
+ if (efuseTbl == NULL) {
+ DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
+ goto exit;
+ }
+
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+ if (eFuseWord == NULL) {
+ DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
+ goto exit;
+ }
+
+ /* 0. Refresh efuse init map as all oxFF. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ eFuseWord[i][j] = 0xFFFF;
+
+ /* */
+ /* 1. Read the first byte to check if efuse is empty!!! */
+ /* */
+ /* */
+ rtemp8 = *(phymap+eFuse_Addr);
+ if (rtemp8 != 0xFF) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ } else {
+ DBG_88E("EFUSE is empty efuse_Addr-%d efuse_data =%x\n", eFuse_Addr, rtemp8);
+ goto exit;
+ }
+
+ /* */
+ /* 2. Read real efuse content. Filter PG header and every section data. */
+ /* */
+ while ((rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ /* Check PG header for section num. */
+ if ((rtemp8 & 0x1F) == 0x0F) { /* extended header */
+ u1temp = ((rtemp8 & 0xE0) >> 5);
+ rtemp8 = *(phymap+eFuse_Addr);
+ if ((rtemp8 & 0x0F) == 0x0F) {
+ eFuse_Addr++;
+ rtemp8 = *(phymap+eFuse_Addr);
+
+ if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
+ eFuse_Addr++;
+ continue;
+ } else {
+ offset = ((rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (rtemp8 & 0x0F);
+ eFuse_Addr++;
+ }
+ } else {
+ offset = ((rtemp8 >> 4) & 0x0f);
+ wren = (rtemp8 & 0x0f);
+ }
+
+ if (offset < EFUSE_MAX_SECTION_88E) {
+ /* Get word enable value from PG header */
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wren & 0x01)) {
+ rtemp8 = *(phymap+eFuse_Addr);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] = (rtemp8 & 0xff);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ rtemp8 = *(phymap+eFuse_Addr);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] |= (((u16)rtemp8 << 8) & 0xff00);
+
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ }
+ wren >>= 1;
+ }
+ }
+ /* Read next PG header */
+ rtemp8 = *(phymap+eFuse_Addr);
+
+ if (rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ }
+ }
+
+ /* */
+ /* 3. Collect 16 sections and 4 word unit into Efuse map. */
+ /* */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuseTbl[(i*8)+(j*2)] = (eFuseWord[i][j] & 0xff);
+ efuseTbl[(i*8)+((j*2)+1)] = ((eFuseWord[i][j] >> 8) & 0xff);
+ }
+ }
+
+ /* */
+ /* 4. Copy from Efuse map to output pointer memory!!! */
+ /* */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+ /* */
+ /* 5. Calculate Efuse utilization. */
+ /* */
+
+exit:
+ kfree(efuseTbl);
+
+ if (eFuseWord)
+ rtw_mfree2d((void *)eFuseWord, EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+}
+
+static void efuse_read_phymap_from_txpktbuf(
+ struct adapter *adapter,
+ int bcnhead, /* beacon head, where FW store len(2-byte) and efuse physical map. */
+ u8 *content, /* buffer to store efuse physical map */
+ u16 *size /* for efuse content: the max byte to read. will update to byte read */
+ )
+{
+ u16 dbg_addr = 0;
+ u32 start = 0, passing_time = 0;
+ u8 reg_0x143 = 0;
+ u32 lo32 = 0, hi32 = 0;
+ u16 len = 0, count = 0;
+ int i = 0;
+ u16 limit = *size;
+
+ u8 *pos = content;
+
+ if (bcnhead < 0) /* if not valid */
+ bcnhead = rtw_read8(adapter, REG_TDECTRL+1);
+
+ DBG_88E("%s bcnhead:%d\n", __func__, bcnhead);
+
+ rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+
+ dbg_addr = bcnhead*128/8; /* 8-bytes addressing */
+
+ while (1) {
+ rtw_write16(adapter, REG_PKTBUF_DBG_ADDR, dbg_addr+i);
+
+ rtw_write8(adapter, REG_TXPKTBUF_DBG, 0);
+ start = rtw_get_current_time();
+ while (!(reg_0x143 = rtw_read8(adapter, REG_TXPKTBUF_DBG)) &&
+ (passing_time = rtw_get_passing_time_ms(start)) < 1000) {
+ DBG_88E("%s polling reg_0x143:0x%02x, reg_0x106:0x%02x\n", __func__, reg_0x143, rtw_read8(adapter, 0x106));
+ rtw_usleep_os(100);
+ }
+
+ lo32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_L);
+ hi32 = rtw_read32(adapter, REG_PKTBUF_DBG_DATA_H);
+
+ if (i == 0) {
+ u8 lenc[2];
+ u16 lenbak, aaabak;
+ u16 aaa;
+ lenc[0] = rtw_read8(adapter, REG_PKTBUF_DBG_DATA_L);
+ lenc[1] = rtw_read8(adapter, REG_PKTBUF_DBG_DATA_L+1);
+
+ aaabak = le16_to_cpup((__le16 *)lenc);
+ lenbak = le16_to_cpu(*((__le16 *)lenc));
+ aaa = le16_to_cpup((__le16 *)&lo32);
+ len = le16_to_cpu(*((__le16 *)&lo32));
+
+ limit = (len-2 < limit) ? len-2 : limit;
+
+ DBG_88E("%s len:%u, lenbak:%u, aaa:%u, aaabak:%u\n", __func__, len, lenbak, aaa, aaabak);
+
+ memcpy(pos, ((u8 *)&lo32)+2, (limit >= count+2) ? 2 : limit-count);
+ count += (limit >= count+2) ? 2 : limit-count;
+ pos = content+count;
+
+ } else {
+ memcpy(pos, ((u8 *)&lo32), (limit >= count+4) ? 4 : limit-count);
+ count += (limit >= count+4) ? 4 : limit-count;
+ pos = content+count;
+ }
+
+ if (limit > count && len-2 > count) {
+ memcpy(pos, (u8 *)&hi32, (limit >= count+4) ? 4 : limit-count);
+ count += (limit >= count+4) ? 4 : limit-count;
+ pos = content+count;
+ }
+
+ if (limit <= count || len-2 <= count)
+ break;
+ i++;
+ }
+ rtw_write8(adapter, REG_PKT_BUFF_ACCESS_CTRL, DISABLE_TRXPKT_BUF_ACCESS);
+ DBG_88E("%s read count:%u\n", __func__, count);
+ *size = count;
+}
+
+static s32 iol_read_efuse(struct adapter *padapter, u8 txpktbuf_bndy, u16 offset, u16 size_byte, u8 *logical_map)
+{
+ s32 status = _FAIL;
+ u8 physical_map[512];
+ u16 size = 512;
+
+ rtw_write8(padapter, REG_TDECTRL+1, txpktbuf_bndy);
+ _rtw_memset(physical_map, 0xFF, 512);
+ rtw_write8(padapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+ status = iol_execute(padapter, CMD_READ_EFUSE_MAP);
+ if (status == _SUCCESS)
+ efuse_read_phymap_from_txpktbuf(padapter, txpktbuf_bndy, physical_map, &size);
+ efuse_phymap_to_logical(physical_map, offset, size_byte, logical_map);
+ return status;
+}
+
+s32 rtl8188e_iol_efuse_patch(struct adapter *padapter)
+{
+ s32 result = _SUCCESS;
+
+ DBG_88E("==> %s\n", __func__);
+ if (rtw_IOL_applied(padapter)) {
+ iol_mode_enable(padapter, 1);
+ result = iol_execute(padapter, CMD_READ_EFUSE_MAP);
+ if (result == _SUCCESS)
+ result = iol_execute(padapter, CMD_EFUSE_PATCH);
+
+ iol_mode_enable(padapter, 0);
+ }
+ return result;
+}
+
+static s32 iol_ioconfig(struct adapter *padapter, u8 iocfg_bndy)
+{
+ s32 rst = _SUCCESS;
+
+ rtw_write8(padapter, REG_TDECTRL+1, iocfg_bndy);
+ rst = iol_execute(padapter, CMD_IOCONFIG);
+ return rst;
+}
+
+static int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
+{
+ struct pkt_attrib *pattrib = &xmit_frame->attrib;
+ u8 i;
+ int ret = _FAIL;
+
+ if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
+ goto exit;
+ if (rtw_usb_bulk_size_boundary(adapter, TXDESC_SIZE+pattrib->last_txcmdsz)) {
+ if (rtw_IOL_append_END_cmd(xmit_frame) != _SUCCESS)
+ goto exit;
+ }
+
+ dump_mgntframe_and_wait(adapter, xmit_frame, max_wating_ms);
+
+ iol_mode_enable(adapter, 1);
+ for (i = 0; i < bndy_cnt; i++) {
+ u8 page_no = 0;
+ page_no = i*2;
+ ret = iol_ioconfig(adapter, page_no);
+ if (ret != _SUCCESS)
+ break;
+ }
+ iol_mode_enable(adapter, 0);
+exit:
+ /* restore BCN_HEAD */
+ rtw_write8(adapter, REG_TDECTRL+1, 0);
+ return ret;
+}
+
+void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
+{
+ u32 fifo_data, reg_140;
+ u32 addr, rstatus, loop = 0;
+ u16 data_cnts = (data_len/8)+1;
+ u8 *pbuf = rtw_zvmalloc(data_len+10);
+ DBG_88E("###### %s ######\n", __func__);
+
+ rtw_write8(Adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
+ if (pbuf) {
+ for (addr = 0; addr < data_cnts; addr++) {
+ rtw_write32(Adapter, 0x140, addr);
+ rtw_usleep_os(2);
+ loop = 0;
+ do {
+ rstatus = (reg_140 = rtw_read32(Adapter, REG_PKTBUF_DBG_CTRL)&BIT24);
+ if (rstatus) {
+ fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_L);
+ memcpy(pbuf+(addr*8), &fifo_data, 4);
+
+ fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_H);
+ memcpy(pbuf+(addr*8+4), &fifo_data, 4);
+ }
+ rtw_usleep_os(2);
+ } while (!rstatus && (loop++ < 10));
+ }
+ rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
+ rtw_vmfree(pbuf, data_len+10);
+ }
+ DBG_88E("###### %s ######\n", __func__);
+}
+
+static void _FWDownloadEnable(struct adapter *padapter, bool enable)
+{
+ u8 tmp;
+
+ if (enable) {
+ /* MCU firmware download enable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp | 0x01);
+
+ /* 8051 reset */
+ tmp = rtw_read8(padapter, REG_MCUFWDL+2);
+ rtw_write8(padapter, REG_MCUFWDL+2, tmp&0xf7);
+ } else {
+ /* MCU firmware download disable. */
+ tmp = rtw_read8(padapter, REG_MCUFWDL);
+ rtw_write8(padapter, REG_MCUFWDL, tmp&0xfe);
+
+ /* Reserved for fw extension. */
+ rtw_write8(padapter, REG_MCUFWDL+1, 0x00);
+ }
+}
+
+#define MAX_REG_BOLCK_SIZE 196
+
+static int _BlockWrite(struct adapter *padapter, void *buffer, u32 buffSize)
+{
+ int ret = _SUCCESS;
+ u32 blockSize_p1 = 4; /* (Default) Phase #1 : PCI muse use 4-byte write to download FW */
+ u32 blockSize_p2 = 8; /* Phase #2 : Use 8-byte, if Phase#1 use big size to write FW. */
+ u32 blockSize_p3 = 1; /* Phase #3 : Use 1-byte, the remnant of FW image. */
+ u32 blockCount_p1 = 0, blockCount_p2 = 0, blockCount_p3 = 0;
+ u32 remainSize_p1 = 0, remainSize_p2 = 0;
+ u8 *bufferPtr = (u8 *)buffer;
+ u32 i = 0, offset = 0;
+
+ blockSize_p1 = MAX_REG_BOLCK_SIZE;
+
+ /* 3 Phase #1 */
+ blockCount_p1 = buffSize / blockSize_p1;
+ remainSize_p1 = buffSize % blockSize_p1;
+
+ if (blockCount_p1) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P1] buffSize(%d) blockSize_p1(%d) blockCount_p1(%d) remainSize_p1(%d)\n",
+ buffSize, blockSize_p1, blockCount_p1, remainSize_p1));
+ }
+
+ for (i = 0; i < blockCount_p1; i++) {
+ ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + i * blockSize_p1), blockSize_p1, (bufferPtr + i * blockSize_p1));
+ if (ret == _FAIL)
+ goto exit;
+ }
+
+ /* 3 Phase #2 */
+ if (remainSize_p1) {
+ offset = blockCount_p1 * blockSize_p1;
+
+ blockCount_p2 = remainSize_p1/blockSize_p2;
+ remainSize_p2 = remainSize_p1%blockSize_p2;
+
+ if (blockCount_p2) {
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P2] buffSize_p2(%d) blockSize_p2(%d) blockCount_p2(%d) remainSize_p2(%d)\n",
+ (buffSize-offset), blockSize_p2 , blockCount_p2, remainSize_p2));
+ }
+
+ for (i = 0; i < blockCount_p2; i++) {
+ ret = rtw_writeN(padapter, (FW_8188E_START_ADDRESS + offset + i*blockSize_p2), blockSize_p2, (bufferPtr + offset + i*blockSize_p2));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+ /* 3 Phase #3 */
+ if (remainSize_p2) {
+ offset = (blockCount_p1 * blockSize_p1) + (blockCount_p2 * blockSize_p2);
+
+ blockCount_p3 = remainSize_p2 / blockSize_p3;
+
+ RT_TRACE(_module_hal_init_c_, _drv_notice_,
+ ("_BlockWrite: [P3] buffSize_p3(%d) blockSize_p3(%d) blockCount_p3(%d)\n",
+ (buffSize-offset), blockSize_p3, blockCount_p3));
+
+ for (i = 0; i < blockCount_p3; i++) {
+ ret = rtw_write8(padapter, (FW_8188E_START_ADDRESS + offset + i), *(bufferPtr + offset + i));
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ }
+
+exit:
+ return ret;
+}
+
+static int _PageWrite(struct adapter *padapter, u32 page, void *buffer, u32 size)
+{
+ u8 value8;
+ u8 u8Page = (u8)(page & 0x07);
+
+ value8 = (rtw_read8(padapter, REG_MCUFWDL+2) & 0xF8) | u8Page;
+ rtw_write8(padapter, REG_MCUFWDL+2, value8);
+
+ return _BlockWrite(padapter, buffer, size);
+}
+
+static int _WriteFW(struct adapter *padapter, void *buffer, u32 size)
+{
+ /* Since we need dynamic decide method of dwonload fw, so we call this function to get chip version. */
+ /* We can remove _ReadChipVersion from ReadpadapterInfo8192C later. */
+ int ret = _SUCCESS;
+ u32 pageNums, remainSize;
+ u32 page, offset;
+ u8 *bufferPtr = (u8 *)buffer;
+
+ pageNums = size / MAX_PAGE_SIZE;
+ remainSize = size % MAX_PAGE_SIZE;
+
+ for (page = 0; page < pageNums; page++) {
+ offset = page * MAX_PAGE_SIZE;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, MAX_PAGE_SIZE);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ if (remainSize) {
+ offset = pageNums * MAX_PAGE_SIZE;
+ page = pageNums;
+ ret = _PageWrite(padapter, page, bufferPtr+offset, remainSize);
+
+ if (ret == _FAIL)
+ goto exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("_WriteFW Done- for Normal chip.\n"));
+exit:
+ return ret;
+}
+
+void _8051Reset88E(struct adapter *padapter)
+{
+ u8 u1bTmp;
+
+ u1bTmp = rtw_read8(padapter, REG_SYS_FUNC_EN+1);
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp&(~BIT2));
+ rtw_write8(padapter, REG_SYS_FUNC_EN+1, u1bTmp|(BIT2));
+ DBG_88E("=====> _8051Reset88E(): 8051 reset success .\n");
+}
+
+static s32 _FWFreeToGo(struct adapter *padapter)
+{
+ u32 counter = 0;
+ u32 value32;
+
+ /* polling CheckSum report */
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & FWDL_ChkSum_rpt)
+ break;
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ if (counter >= POLLING_READY_TIMEOUT_COUNT) {
+ DBG_88E("%s: chksum report fail! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _FAIL;
+ }
+ DBG_88E("%s: Checksum report OK! REG_MCUFWDL:0x%08x\n", __func__, value32);
+
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ value32 |= MCUFWDL_RDY;
+ value32 &= ~WINTINI_RDY;
+ rtw_write32(padapter, REG_MCUFWDL, value32);
+
+ _8051Reset88E(padapter);
+
+ /* polling for FW ready */
+ counter = 0;
+ do {
+ value32 = rtw_read32(padapter, REG_MCUFWDL);
+ if (value32 & WINTINI_RDY) {
+ DBG_88E("%s: Polling FW ready success!! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _SUCCESS;
+ }
+ rtw_udelay_os(5);
+ } while (counter++ < POLLING_READY_TIMEOUT_COUNT);
+
+ DBG_88E("%s: Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", __func__, value32);
+ return _FAIL;
+}
+
+#define IS_FW_81xxC(padapter) (((GET_HAL_DATA(padapter))->FirmwareSignature & 0xFFF0) == 0x88C0)
+
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter)
+{
+ s32 rtStatus = _SUCCESS;
+ u8 writeFW_retry = 0;
+ u32 fwdl_start_time;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ u8 *FwImage;
+ u32 FwImageLen;
+ struct rt_firmware *pFirmware = NULL;
+ struct rt_firmware_hdr *pFwHdr = NULL;
+ u8 *pFirmwareBuf;
+ u32 FirmwareLen;
+
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
+ pFirmware = (struct rt_firmware *)rtw_zmalloc(sizeof(struct rt_firmware));
+ if (!pFirmware) {
+ rtStatus = _FAIL;
+ goto Exit;
+ }
+
+ FwImage = (u8 *)Rtl8188E_FwImageArray;
+ FwImageLen = Rtl8188E_FWImgArrayLength;
+
+ pFirmware->eFWSource = FW_SOURCE_HEADER_FILE;
+
+ switch (pFirmware->eFWSource) {
+ case FW_SOURCE_IMG_FILE:
+ break;
+ case FW_SOURCE_HEADER_FILE:
+ if (FwImageLen > FW_8188E_SIZE) {
+ rtStatus = _FAIL;
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Firmware size exceed 0x%X. Check it.\n", FW_8188E_SIZE));
+ goto Exit;
+ }
+
+ pFirmware->szFwBuffer = FwImage;
+ pFirmware->ulFwLength = FwImageLen;
+ break;
+ }
+ pFirmwareBuf = pFirmware->szFwBuffer;
+ FirmwareLen = pFirmware->ulFwLength;
+ DBG_88E_LEVEL(_drv_info_, "+%s: !bUsedWoWLANFw, FmrmwareLen:%d+\n", __func__, FirmwareLen);
+
+ /* To Check Fw header. Added by tynli. 2009.12.04. */
+ pFwHdr = (struct rt_firmware_hdr *)pFirmware->szFwBuffer;
+
+ pHalData->FirmwareVersion = le16_to_cpu(pFwHdr->Version);
+ pHalData->FirmwareSubVersion = pFwHdr->Subversion;
+ pHalData->FirmwareSignature = le16_to_cpu(pFwHdr->Signature);
+
+ DBG_88E("%s: fw_ver =%d fw_subver =%d sig = 0x%x\n",
+ __func__, pHalData->FirmwareVersion, pHalData->FirmwareSubVersion, pHalData->FirmwareSignature);
+
+ if (IS_FW_HEADER_EXIST(pFwHdr)) {
+ /* Shift 32 bytes for FW header */
+ pFirmwareBuf = pFirmwareBuf + 32;
+ FirmwareLen = FirmwareLen - 32;
+ }
+
+ /* Suggested by Filen. If 8051 is running in RAM code, driver should inform Fw to reset by itself, */
+ /* or it will cause download Fw fail. 2010.02.01. by tynli. */
+ if (rtw_read8(padapter, REG_MCUFWDL) & RAM_DL_SEL) { /* 8051 RAM code */
+ rtw_write8(padapter, REG_MCUFWDL, 0x00);
+ _8051Reset88E(padapter);
+ }
+
+ _FWDownloadEnable(padapter, true);
+ fwdl_start_time = rtw_get_current_time();
+ while (1) {
+ /* reset the FWDL chksum */
+ rtw_write8(padapter, REG_MCUFWDL, rtw_read8(padapter, REG_MCUFWDL) | FWDL_ChkSum_rpt);
+
+ rtStatus = _WriteFW(padapter, pFirmwareBuf, FirmwareLen);
+
+ if (rtStatus == _SUCCESS ||
+ (rtw_get_passing_time_ms(fwdl_start_time) > 500 && writeFW_retry++ >= 3))
+ break;
+
+ DBG_88E("%s writeFW_retry:%u, time after fwdl_start_time:%ums\n",
+ __func__, writeFW_retry, rtw_get_passing_time_ms(fwdl_start_time)
+ );
+ }
+ _FWDownloadEnable(padapter, false);
+ if (_SUCCESS != rtStatus) {
+ DBG_88E("DL Firmware failed!\n");
+ goto Exit;
+ }
+
+ rtStatus = _FWFreeToGo(padapter);
+ if (_SUCCESS != rtStatus) {
+ DBG_88E("DL Firmware failed!\n");
+ goto Exit;
+ }
+ RT_TRACE(_module_hal_init_c_, _drv_info_, ("Firmware is ready to run!\n"));
+
+Exit:
+
+ kfree(pFirmware);
+ return rtStatus;
+}
+
+void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ /* Init Fw LPS related. */
+ padapter->pwrctrlpriv.bFwCurrentInPSMode = false;
+
+ /* Init H2C counter. by tynli. 2009.12.09. */
+ pHalData->LastHMEBoxNum = 0;
+}
+
+static void rtl8188e_free_hal_data(struct adapter *padapter)
+{
+_func_enter_;
+ kfree(padapter->HalData);
+ padapter->HalData = NULL;
+_func_exit_;
+}
+
+/* */
+/* Efuse related code */
+/* */
+enum{
+ VOLTAGE_V25 = 0x03,
+ LDOE25_SHIFT = 28 ,
+ };
+
+static bool
+hal_EfusePgPacketWrite2ByteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+static bool
+hal_EfusePgPacketWrite1ByteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+static bool
+hal_EfusePgPacketWriteData(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest);
+
+static void
+hal_EfusePowerSwitch_RTL8188E(
+ struct adapter *pAdapter,
+ u8 bWrite,
+ u8 PwrState)
+{
+ u8 tempval;
+ u16 tmpV16;
+
+ if (PwrState) {
+ rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_ON);
+
+ /* 1.2V Power: From VDDON with Power Cut(0x0000h[15]), defualt valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_ISO_CTRL);
+ if (!(tmpV16 & PWC_EV12V)) {
+ tmpV16 |= PWC_EV12V;
+ rtw_write16(pAdapter, REG_SYS_ISO_CTRL, tmpV16);
+ }
+ /* Reset: 0x0000h[28], default valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_FUNC_EN);
+ if (!(tmpV16 & FEN_ELDR)) {
+ tmpV16 |= FEN_ELDR;
+ rtw_write16(pAdapter, REG_SYS_FUNC_EN, tmpV16);
+ }
+
+ /* Clock: Gated(0x0008h[5]) 8M(0x0008h[1]) clock from ANA, default valid */
+ tmpV16 = rtw_read16(pAdapter, REG_SYS_CLKR);
+ if ((!(tmpV16 & LOADER_CLK_EN)) || (!(tmpV16 & ANA8M))) {
+ tmpV16 |= (LOADER_CLK_EN | ANA8M);
+ rtw_write16(pAdapter, REG_SYS_CLKR, tmpV16);
+ }
+
+ if (bWrite) {
+ /* Enable LDO 2.5V before read/write action */
+ tempval = rtw_read8(pAdapter, EFUSE_TEST+3);
+ tempval &= 0x0F;
+ tempval |= (VOLTAGE_V25 << 4);
+ rtw_write8(pAdapter, EFUSE_TEST+3, (tempval | 0x80));
+ }
+ } else {
+ rtw_write8(pAdapter, REG_EFUSE_ACCESS, EFUSE_ACCESS_OFF);
+
+ if (bWrite) {
+ /* Disable LDO 2.5V after read/write action */
+ tempval = rtw_read8(pAdapter, EFUSE_TEST+3);
+ rtw_write8(pAdapter, EFUSE_TEST+3, (tempval & 0x7F));
+ }
+ }
+}
+
+static void
+rtl8188e_EfusePowerSwitch(
+ struct adapter *pAdapter,
+ u8 bWrite,
+ u8 PwrState)
+{
+ hal_EfusePowerSwitch_RTL8188E(pAdapter, bWrite, PwrState);
+}
+
+
+static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
+ u16 _offset,
+ u16 _size_byte,
+ u8 *pbuf,
+ bool bPseudoTest
+ )
+{
+ u8 *efuseTbl = NULL;
+ u8 rtemp8[1];
+ u16 eFuse_Addr = 0;
+ u8 offset, wren;
+ u16 i, j;
+ u16 **eFuseWord = NULL;
+ u16 efuse_utilized = 0;
+ u8 u1temp = 0;
+
+ /* */
+ /* Do NOT excess total size of EFuse table. Added by Roger, 2008.11.10. */
+ /* */
+ if ((_offset + _size_byte) > EFUSE_MAP_LEN_88E) {/* total E-Fuse table is 512bytes */
+ DBG_88E("Hal_EfuseReadEFuse88E(): Invalid offset(%#x) with read bytes(%#x)!!\n", _offset, _size_byte);
+ goto exit;
+ }
+
+ efuseTbl = (u8 *)rtw_zmalloc(EFUSE_MAP_LEN_88E);
+ if (efuseTbl == NULL) {
+ DBG_88E("%s: alloc efuseTbl fail!\n", __func__);
+ goto exit;
+ }
+
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+ if (eFuseWord == NULL) {
+ DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
+ goto exit;
+ }
+
+ /* 0. Refresh efuse init map as all oxFF. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++)
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
+ eFuseWord[i][j] = 0xFFFF;
+
+ /* */
+ /* 1. Read the first byte to check if efuse is empty!!! */
+ /* */
+ /* */
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ if (*rtemp8 != 0xFF) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ } else {
+ DBG_88E("EFUSE is empty efuse_Addr-%d efuse_data =%x\n", eFuse_Addr, *rtemp8);
+ goto exit;
+ }
+
+ /* */
+ /* 2. Read real efuse content. Filter PG header and every section data. */
+ /* */
+ while ((*rtemp8 != 0xFF) && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ /* Check PG header for section num. */
+ if ((*rtemp8 & 0x1F) == 0x0F) { /* extended header */
+ u1temp = ((*rtemp8 & 0xE0) >> 5);
+
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if ((*rtemp8 & 0x0F) == 0x0F) {
+ eFuse_Addr++;
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E))
+ eFuse_Addr++;
+ continue;
+ } else {
+ offset = ((*rtemp8 & 0xF0) >> 1) | u1temp;
+ wren = (*rtemp8 & 0x0F);
+ eFuse_Addr++;
+ }
+ } else {
+ offset = ((*rtemp8 >> 4) & 0x0f);
+ wren = (*rtemp8 & 0x0f);
+ }
+
+ if (offset < EFUSE_MAX_SECTION_88E) {
+ /* Get word enable value from PG header */
+
+ for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
+ /* Check word enable condition in the section */
+ if (!(wren & 0x01)) {
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] = (*rtemp8 & 0xff);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+ eFuse_Addr++;
+ efuse_utilized++;
+ eFuseWord[offset][i] |= (((u16)*rtemp8 << 8) & 0xff00);
+ if (eFuse_Addr >= EFUSE_REAL_CONTENT_LEN_88E)
+ break;
+ }
+ wren >>= 1;
+ }
+ }
+
+ /* Read next PG header */
+ ReadEFuseByte(Adapter, eFuse_Addr, rtemp8, bPseudoTest);
+
+ if (*rtemp8 != 0xFF && (eFuse_Addr < EFUSE_REAL_CONTENT_LEN_88E)) {
+ efuse_utilized++;
+ eFuse_Addr++;
+ }
+ }
+
+ /* 3. Collect 16 sections and 4 word unit into Efuse map. */
+ for (i = 0; i < EFUSE_MAX_SECTION_88E; i++) {
+ for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
+ efuseTbl[(i*8)+(j*2)] = (eFuseWord[i][j] & 0xff);
+ efuseTbl[(i*8)+((j*2)+1)] = ((eFuseWord[i][j] >> 8) & 0xff);
+ }
+ }
+
+ /* 4. Copy from Efuse map to output pointer memory!!! */
+ for (i = 0; i < _size_byte; i++)
+ pbuf[i] = efuseTbl[_offset+i];
+
+ /* 5. Calculate Efuse utilization. */
+ rtw_hal_set_hwreg(Adapter, HW_VAR_EFUSE_BYTES, (u8 *)&eFuse_Addr);
+
+exit:
+ kfree(efuseTbl);
+
+ if (eFuseWord)
+ rtw_mfree2d((void *)eFuseWord, EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+}
+
+static void ReadEFuseByIC(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+{
+ if (!bPseudoTest) {
+ int ret = _FAIL;
+ if (rtw_IOL_applied(Adapter)) {
+ rtw_hal_power_on(Adapter);
+
+ iol_mode_enable(Adapter, 1);
+ ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
+ iol_mode_enable(Adapter, 0);
+
+ if (_SUCCESS == ret)
+ goto exit;
+ }
+ }
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
+
+exit:
+ return;
+}
+
+static void ReadEFuse_Pseudo(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool bPseudoTest)
+{
+ Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+static void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf,
+ bool bPseudoTest)
+{
+ if (bPseudoTest)
+ ReadEFuse_Pseudo (Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
+ else
+ ReadEFuseByIC(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
+}
+
+/* Do not support BT */
+static void Hal_EFUSEGetEfuseDefinition88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
+{
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ {
+ u8 *pMax_section;
+ pMax_section = (u8 *)pOut;
+ *pMax_section = EFUSE_MAX_SECTION_88E;
+ }
+ break;
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_EFUSE_MAP_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ default:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = 0;
+ }
+ break;
+ }
+}
+
+static void Hal_EFUSEGetEfuseDefinition_Pseudo88E(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut)
+{
+ switch (type) {
+ case TYPE_EFUSE_MAX_SECTION:
+ {
+ u8 *pMax_section;
+ pMax_section = (u8 *)pOut;
+ *pMax_section = EFUSE_MAX_SECTION_88E;
+ }
+ break;
+ case TYPE_EFUSE_REAL_CONTENT_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_CONTENT_LEN_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = EFUSE_REAL_CONTENT_LEN_88E;
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_BANK:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_AVAILABLE_EFUSE_BYTES_TOTAL:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)(EFUSE_REAL_CONTENT_LEN_88E-EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ case TYPE_EFUSE_MAP_LEN:
+ {
+ u16 *pu2Tmp;
+ pu2Tmp = (u16 *)pOut;
+ *pu2Tmp = (u16)EFUSE_MAP_LEN_88E;
+ }
+ break;
+ case TYPE_EFUSE_PROTECT_BYTES_BANK:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = (u8)(EFUSE_OOB_PROTECT_BYTES_88E);
+ }
+ break;
+ default:
+ {
+ u8 *pu1Tmp;
+ pu1Tmp = (u8 *)pOut;
+ *pu1Tmp = 0;
+ }
+ break;
+ }
+}
+
+static void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
+{
+ if (bPseudoTest)
+ Hal_EFUSEGetEfuseDefinition_Pseudo88E(pAdapter, efuseType, type, pOut);
+ else
+ Hal_EFUSEGetEfuseDefinition88E(pAdapter, efuseType, type, pOut);
+}
+
+static u8 Hal_EfuseWordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u16 tmpaddr = 0;
+ u16 start_addr = efuse_addr;
+ u8 badworden = 0x0F;
+ u8 tmpdata[8];
+
+ _rtw_memset((void *)tmpdata, 0xff, PGPKT_DATA_SIZE);
+
+ if (!(word_en&BIT0)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[0], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[1], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[0], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[1], bPseudoTest);
+ if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
+ badworden &= (~BIT0);
+ }
+ if (!(word_en&BIT1)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[2], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[3], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr , &tmpdata[2], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[3], bPseudoTest);
+ if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
+ badworden &= (~BIT1);
+ }
+ if (!(word_en&BIT2)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[4], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[5], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[4], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[5], bPseudoTest);
+ if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
+ badworden &= (~BIT2);
+ }
+ if (!(word_en&BIT3)) {
+ tmpaddr = start_addr;
+ efuse_OneByteWrite(pAdapter, start_addr++, data[6], bPseudoTest);
+ efuse_OneByteWrite(pAdapter, start_addr++, data[7], bPseudoTest);
+
+ efuse_OneByteRead(pAdapter, tmpaddr, &tmpdata[6], bPseudoTest);
+ efuse_OneByteRead(pAdapter, tmpaddr+1, &tmpdata[7], bPseudoTest);
+ if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
+ badworden &= (~BIT3);
+ }
+ return badworden;
+}
+
+static u8 Hal_EfuseWordEnableDataWrite_Pseudo(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u8 ret;
+
+ ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static u8 rtl8188e_Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ u8 ret = 0;
+
+ if (bPseudoTest)
+ ret = Hal_EfuseWordEnableDataWrite_Pseudo (pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ else
+ ret = Hal_EfuseWordEnableDataWrite(pAdapter, efuse_addr, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoTest)
+{
+ int bContinual = true;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 efuse_data, word_cnts = 0;
+
+ if (bPseudoTest)
+ efuse_addr = (u16)(fakeEfuseUsedBytes);
+ else
+ rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ while (bContinual &&
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) &&
+ AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ if (efuse_data != 0xFF) {
+ if ((efuse_data&0x1F) == 0x0F) { /* extended header */
+ hoffset = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
+ if ((efuse_data & 0x0F) == 0x0F) {
+ efuse_addr++;
+ continue;
+ } else {
+ hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ }
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ /* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ } else {
+ bContinual = false;
+ }
+ }
+
+ if (bPseudoTest)
+ fakeEfuseUsedBytes = efuse_addr;
+ else
+ rtw_hal_set_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+
+ return efuse_addr;
+}
+
+static u16 Hal_EfuseGetCurrentSize_Pseudo(struct adapter *pAdapter, bool bPseudoTest)
+{
+ u16 ret = 0;
+
+ ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
+ return ret;
+}
+
+static u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
+{
+ u16 ret = 0;
+
+ if (bPseudoTest)
+ ret = Hal_EfuseGetCurrentSize_Pseudo(pAdapter, bPseudoTest);
+ else
+ ret = hal_EfuseGetCurrentSize_8188e(pAdapter, bPseudoTest);
+ return ret;
+}
+
+static int hal_EfusePgPacketRead_8188e(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ u8 ReadState = PG_STATE_HEADER;
+ int bContinual = true;
+ int bDataEmpty = true;
+ u8 efuse_data, word_cnts = 0;
+ u16 efuse_addr = 0;
+ u8 hoffset = 0, hworden = 0;
+ u8 tmpidx = 0;
+ u8 tmpdata[8];
+ u8 max_section = 0;
+ u8 tmp_header = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
+
+ if (data == NULL)
+ return false;
+ if (offset > max_section)
+ return false;
+
+ _rtw_memset((void *)data, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+ _rtw_memset((void *)tmpdata, 0xff, sizeof(u8)*PGPKT_DATA_SIZE);
+
+ /* <Roger_TODO> Efuse has been pre-programmed dummy 5Bytes at the end of Efuse by CP. */
+ /* Skip dummy parts to prevent unexpected data read from Efuse. */
+ /* By pass right now. 2009.02.19. */
+ while (bContinual && AVAILABLE_EFUSE_ADDR(efuse_addr)) {
+ /* Header Read ------------- */
+ if (ReadState & PG_STATE_HEADER) {
+ if (efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
+ if (EXT_HEADER(efuse_data)) {
+ tmp_header = efuse_data;
+ efuse_addr++;
+ efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
+ if (!ALL_WORDS_DISABLED(efuse_data)) {
+ hoffset = ((tmp_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ hworden = efuse_data & 0x0F;
+ } else {
+ DBG_88E("Error, All words disabled\n");
+ efuse_addr++;
+ continue;
+ }
+ } else {
+ hoffset = (efuse_data>>4) & 0x0F;
+ hworden = efuse_data & 0x0F;
+ }
+ word_cnts = Efuse_CalculateWordCnts(hworden);
+ bDataEmpty = true;
+
+ if (hoffset == offset) {
+ for (tmpidx = 0; tmpidx < word_cnts*2; tmpidx++) {
+ if (efuse_OneByteRead(pAdapter, efuse_addr+1+tmpidx, &efuse_data, bPseudoTest)) {
+ tmpdata[tmpidx] = efuse_data;
+ if (efuse_data != 0xff)
+ bDataEmpty = false;
+ }
+ }
+ if (bDataEmpty == false) {
+ ReadState = PG_STATE_DATA;
+ } else {/* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+ } else {/* read next header */
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+ } else {
+ bContinual = false;
+ }
+ } else if (ReadState & PG_STATE_DATA) {
+ /* Data section Read ------------- */
+ efuse_WordEnableDataRead(hworden, tmpdata, data);
+ efuse_addr = efuse_addr + (word_cnts*2)+1;
+ ReadState = PG_STATE_HEADER;
+ }
+
+ }
+
+ if ((data[0] == 0xff) && (data[1] == 0xff) && (data[2] == 0xff) && (data[3] == 0xff) &&
+ (data[4] == 0xff) && (data[5] == 0xff) && (data[6] == 0xff) && (data[7] == 0xff))
+ return false;
+ else
+ return true;
+}
+
+static int Hal_EfusePgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static int Hal_EfusePgPacketRead_Pseudo(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketRead_8188e(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ if (bPseudoTest)
+ ret = Hal_EfusePgPacketRead_Pseudo (pAdapter, offset, data, bPseudoTest);
+ else
+ ret = Hal_EfusePgPacketRead(pAdapter, offset, data, bPseudoTest);
+ return ret;
+}
+
+static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, struct pgpkt *pFixPkt, u16 *pAddr, bool bPseudoTest)
+{
+ u8 originaldata[8], badworden = 0;
+ u16 efuse_addr = *pAddr;
+ u32 PgWriteSuccess = 0;
+
+ _rtw_memset((void *)originaldata, 0xff, 8);
+
+ if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
+ /* check if data exist */
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pFixPkt->word_en, originaldata, bPseudoTest);
+
+ if (badworden != 0xf) { /* write fail */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
+
+ if (!PgWriteSuccess)
+ return false;
+ else
+ efuse_addr = Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest);
+ } else {
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ }
+ } else {
+ efuse_addr = efuse_addr + (pFixPkt->word_cnts*2) + 1;
+ }
+ *pAddr = efuse_addr;
+ return true;
+}
+
+static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u16 efuse_addr = *pAddr, efuse_max_available_len = 0;
+ u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
+ u8 repeatcnt = 0;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+
+ while (efuse_addr < efuse_max_available_len) {
+ pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ /* to write ext_header */
+ if (tmp_header == pg_header) {
+ efuse_addr++;
+ pg_header_temp = pg_header;
+ pg_header = ((pTargetPkt->offset & 0x78) << 1) | pTargetPkt->word_en;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ if ((tmp_header & 0x0F) == 0x0F) { /* word_en PG fail */
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_) {
+ return false;
+ } else {
+ efuse_addr++;
+ continue;
+ }
+ } else if (pg_header != tmp_header) { /* offset PG fail */
+ struct pgpkt fixPkt;
+ fixPkt.offset = ((pg_header_temp & 0xE0) >> 5) | ((tmp_header & 0xF0) >> 1);
+ fixPkt.word_en = tmp_header & 0x0F;
+ fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
+ if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
+ return false;
+ } else {
+ bRet = true;
+ break;
+ }
+ } else if ((tmp_header & 0x1F) == 0x0F) { /* wrong extended header */
+ efuse_addr += 2;
+ continue;
+ }
+ }
+
+ *pAddr = efuse_addr;
+ return bRet;
+}
+
+static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 pg_header = 0, tmp_header = 0;
+ u16 efuse_addr = *pAddr;
+ u8 repeatcnt = 0;
+
+ pg_header = ((pTargetPkt->offset << 4) & 0xf0) | pTargetPkt->word_en;
+
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+
+ while (tmp_header == 0xFF) {
+ if (repeatcnt++ > EFUSE_REPEAT_THRESHOLD_)
+ return false;
+ efuse_OneByteWrite(pAdapter, efuse_addr, pg_header, bPseudoTest);
+ efuse_OneByteRead(pAdapter, efuse_addr, &tmp_header, bPseudoTest);
+ }
+
+ if (pg_header == tmp_header) {
+ bRet = true;
+ } else {
+ struct pgpkt fixPkt;
+ fixPkt.offset = (tmp_header>>4) & 0x0F;
+ fixPkt.word_en = tmp_header & 0x0F;
+ fixPkt.word_cnts = Efuse_CalculateWordCnts(fixPkt.word_en);
+ if (!hal_EfuseFixHeaderProcess(pAdapter, efuseType, &fixPkt, &efuse_addr, bPseudoTest))
+ return false;
+ }
+
+ *pAddr = efuse_addr;
+ return bRet;
+}
+
+static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u16 efuse_addr = *pAddr;
+ u8 badworden = 0;
+ u32 PgWriteSuccess = 0;
+
+ badworden = 0x0f;
+ badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr+1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
+ if (badworden == 0x0F) {
+ /* write ok */
+ return true;
+ } else {
+ /* reorganize other pg packet */
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+ if (!PgWriteSuccess)
+ return false;
+ else
+ return true;
+ }
+ return bRet;
+}
+
+static bool
+hal_EfusePgPacketWriteHeader(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ u16 *pAddr,
+ struct pgpkt *pTargetPkt,
+ bool bPseudoTest)
+{
+ bool bRet = false;
+
+ if (pTargetPkt->offset >= EFUSE_MAX_SECTION_BASE)
+ bRet = hal_EfusePgPacketWrite2ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+ else
+ bRet = hal_EfusePgPacketWrite1ByteHeader(pAdapter, efuseType, pAddr, pTargetPkt, bPseudoTest);
+
+ return bRet;
+}
+
+static bool wordEnMatched(struct pgpkt *pTargetPkt, struct pgpkt *pCurPkt,
+ u8 *pWden)
+{
+ u8 match_word_en = 0x0F; /* default all words are disabled */
+
+ /* check if the same words are enabled both target and current PG packet */
+ if (((pTargetPkt->word_en & BIT0) == 0) &&
+ ((pCurPkt->word_en & BIT0) == 0))
+ match_word_en &= ~BIT0; /* enable word 0 */
+ if (((pTargetPkt->word_en & BIT1) == 0) &&
+ ((pCurPkt->word_en & BIT1) == 0))
+ match_word_en &= ~BIT1; /* enable word 1 */
+ if (((pTargetPkt->word_en & BIT2) == 0) &&
+ ((pCurPkt->word_en & BIT2) == 0))
+ match_word_en &= ~BIT2; /* enable word 2 */
+ if (((pTargetPkt->word_en & BIT3) == 0) &&
+ ((pCurPkt->word_en & BIT3) == 0))
+ match_word_en &= ~BIT3; /* enable word 3 */
+
+ *pWden = match_word_en;
+
+ if (match_word_en != 0xf)
+ return true;
+ else
+ return false;
+}
+
+static bool hal_EfuseCheckIfDatafollowed(struct adapter *pAdapter, u8 word_cnts, u16 startAddr, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 i, efuse_data;
+
+ for (i = 0; i < (word_cnts*2); i++) {
+ if (efuse_OneByteRead(pAdapter, (startAddr+i), &efuse_data, bPseudoTest) && (efuse_data != 0xFF))
+ bRet = true;
+ }
+ return bRet;
+}
+
+static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
+{
+ bool bRet = false;
+ u8 i, efuse_data = 0, cur_header = 0;
+ u8 matched_wden = 0, badworden = 0;
+ u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
+ struct pgpkt curPkt;
+
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+ EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
+
+ if (efuseType == EFUSE_WIFI) {
+ if (bPseudoTest) {
+ startAddr = (u16)(fakeEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ } else {
+ rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
+ startAddr %= EFUSE_REAL_CONTENT_LEN;
+ }
+ } else {
+ if (bPseudoTest)
+ startAddr = (u16)(fakeBTEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ else
+ startAddr = (u16)(BTEfuseUsedBytes%EFUSE_REAL_CONTENT_LEN);
+ }
+
+ while (1) {
+ if (startAddr >= efuse_max_available_len) {
+ bRet = false;
+ break;
+ }
+
+ if (efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest) && (efuse_data != 0xFF)) {
+ if (EXT_HEADER(efuse_data)) {
+ cur_header = efuse_data;
+ startAddr++;
+ efuse_OneByteRead(pAdapter, startAddr, &efuse_data, bPseudoTest);
+ if (ALL_WORDS_DISABLED(efuse_data)) {
+ bRet = false;
+ break;
+ } else {
+ curPkt.offset = ((cur_header & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
+ curPkt.word_en = efuse_data & 0x0F;
+ }
+ } else {
+ cur_header = efuse_data;
+ curPkt.offset = (cur_header>>4) & 0x0F;
+ curPkt.word_en = cur_header & 0x0F;
+ }
+
+ curPkt.word_cnts = Efuse_CalculateWordCnts(curPkt.word_en);
+ /* if same header is found but no data followed */
+ /* write some part of data followed by the header. */
+ if ((curPkt.offset == pTargetPkt->offset) &&
+ (!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr+1, bPseudoTest)) &&
+ wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
+ /* Here to write partial data */
+ badworden = Efuse_WordEnableDataWrite(pAdapter, startAddr+1, matched_wden, pTargetPkt->data, bPseudoTest);
+ if (badworden != 0x0F) {
+ u32 PgWriteSuccess = 0;
+ /* if write fail on some words, write these bad words again */
+
+ PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+
+ if (!PgWriteSuccess) {
+ bRet = false; /* write fail, return */
+ break;
+ }
+ }
+ /* partial write ok, update the target packet for later use */
+ for (i = 0; i < 4; i++) {
+ if ((matched_wden & (0x1<<i)) == 0) /* this word has been written */
+ pTargetPkt->word_en |= (0x1<<i); /* disable the word */
+ }
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+ }
+ /* read from next header */
+ startAddr = startAddr + (curPkt.word_cnts*2) + 1;
+ } else {
+ /* not used header, 0xff */
+ *pAddr = startAddr;
+ bRet = true;
+ break;
+ }
+ }
+ return bRet;
+}
+
+static bool
+hal_EfusePgCheckAvailableAddr(
+ struct adapter *pAdapter,
+ u8 efuseType,
+ bool bPseudoTest
+ )
+{
+ u16 efuse_max_available_len = 0;
+
+ /* Change to check TYPE_EFUSE_MAP_LEN , beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
+
+ if (Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
+ return false;
+ return true;
+}
+
+static void hal_EfuseConstructPGPkt(u8 offset, u8 word_en, u8 *pData, struct pgpkt *pTargetPkt)
+{
+ _rtw_memset((void *)pTargetPkt->data, 0xFF, sizeof(u8)*8);
+ pTargetPkt->offset = offset;
+ pTargetPkt->word_en = word_en;
+ efuse_WordEnableDataRead(word_en, pData, pTargetPkt->data);
+ pTargetPkt->word_cnts = Efuse_CalculateWordCnts(pTargetPkt->word_en);
+}
+
+static bool hal_EfusePgPacketWrite_8188e(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *pData, bool bPseudoTest)
+{
+ struct pgpkt targetPkt;
+ u16 startAddr = 0;
+ u8 efuseType = EFUSE_WIFI;
+
+ if (!hal_EfusePgCheckAvailableAddr(pAdapter, efuseType, bPseudoTest))
+ return false;
+
+ hal_EfuseConstructPGPkt(offset, word_en, pData, &targetPkt);
+
+ if (!hal_EfusePartialWriteCheck(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteHeader(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ if (!hal_EfusePgPacketWriteData(pAdapter, efuseType, &startAddr, &targetPkt, bPseudoTest))
+ return false;
+
+ return true;
+}
+
+static int Hal_EfusePgPacketWrite_Pseudo(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static int Hal_EfusePgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret = 0;
+ ret = hal_EfusePgPacketWrite_8188e(pAdapter, offset, word_en, data, bPseudoTest);
+
+ return ret;
+}
+
+static int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+{
+ int ret;
+
+ if (bPseudoTest)
+ ret = Hal_EfusePgPacketWrite_Pseudo (pAdapter, offset, word_en, data, bPseudoTest);
+ else
+ ret = Hal_EfusePgPacketWrite(pAdapter, offset, word_en, data, bPseudoTest);
+ return ret;
+}
+
+static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
+{
+ u32 value32;
+ struct HAL_VERSION ChipVersion;
+ struct hal_data_8188e *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ value32 = rtw_read32(padapter, REG_SYS_CFG);
+ ChipVersion.ICType = CHIP_8188E;
+ ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
+
+ ChipVersion.RFType = RF_TYPE_1T1R;
+ ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
+ ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK)>>CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
+
+ /* For regulator mode. by tynli. 2011.01.14 */
+ pHalData->RegulatorMode = ((value32 & TRP_BT_EN) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
+
+ ChipVersion.ROMVer = 0; /* ROM code version. */
+ pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
+
+ dump_chip_info(ChipVersion);
+
+ pHalData->VersionID = ChipVersion;
+
+ if (IS_1T2R(ChipVersion)) {
+ pHalData->rf_type = RF_1T2R;
+ pHalData->NumTotalRFPath = 2;
+ } else if (IS_2T2R(ChipVersion)) {
+ pHalData->rf_type = RF_2T2R;
+ pHalData->NumTotalRFPath = 2;
+ } else{
+ pHalData->rf_type = RF_1T1R;
+ pHalData->NumTotalRFPath = 1;
+ }
+
+ MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
+
+ return ChipVersion;
+}
+
+static void rtl8188e_read_chip_version(struct adapter *padapter)
+{
+ ReadChipVersion8188E(padapter);
+}
+
+static void rtl8188e_GetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+}
+
+static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
+ switch (eVariable) {
+ case HAL_ODM_STA_INFO:
+ {
+ struct sta_info *psta = (struct sta_info *)pValue1;
+ if (bSet) {
+ DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
+ ODM_RAInfo_Init(podmpriv, psta->mac_id);
+ } else {
+ DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
+ ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, NULL);
+ }
+ }
+ break;
+ case HAL_ODM_P2P_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DIRECT, bSet);
+ break;
+ case HAL_ODM_WIFI_DISPLAY_STATE:
+ ODM_CmnInfoUpdate(podmpriv, ODM_CMNINFO_WIFI_DISPLAY, bSet);
+ break;
+ default:
+ break;
+ }
+}
+
+void rtl8188e_clone_haldata(struct adapter *dst_adapter, struct adapter *src_adapter)
+{
+ memcpy(dst_adapter->HalData, src_adapter->HalData, dst_adapter->hal_data_sz);
+}
+
+void rtl8188e_start_thread(struct adapter *padapter)
+{
+}
+
+void rtl8188e_stop_thread(struct adapter *padapter)
+{
+}
+
+static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
+{
+ if (enable) {
+ DBG_88E("Enable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) | BIT1);
+ } else {
+ DBG_88E("Disable notch filter\n");
+ rtw_write8(adapter, rOFDM0_RxDSP+1, rtw_read8(adapter, rOFDM0_RxDSP+1) & ~BIT1);
+ }
+}
+void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
+{
+ pHalFunc->free_hal_data = &rtl8188e_free_hal_data;
+
+ pHalFunc->dm_init = &rtl8188e_init_dm_priv;
+ pHalFunc->dm_deinit = &rtl8188e_deinit_dm_priv;
+
+ pHalFunc->read_chip_version = &rtl8188e_read_chip_version;
+
+ pHalFunc->set_bwmode_handler = &PHY_SetBWMode8188E;
+ pHalFunc->set_channel_handler = &PHY_SwChnl8188E;
+
+ pHalFunc->hal_dm_watchdog = &rtl8188e_HalDmWatchDog;
+
+ pHalFunc->Add_RateATid = &rtl8188e_Add_RateATid;
+ pHalFunc->run_thread = &rtl8188e_start_thread;
+ pHalFunc->cancel_thread = &rtl8188e_stop_thread;
+
+ pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
+ pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
+ pHalFunc->read_bbreg = &rtl8188e_PHY_QueryBBReg;
+ pHalFunc->write_bbreg = &rtl8188e_PHY_SetBBReg;
+ pHalFunc->read_rfreg = &rtl8188e_PHY_QueryRFReg;
+ pHalFunc->write_rfreg = &rtl8188e_PHY_SetRFReg;
+
+ /* Efuse related function */
+ pHalFunc->EfusePowerSwitch = &rtl8188e_EfusePowerSwitch;
+ pHalFunc->ReadEFuse = &rtl8188e_ReadEFuse;
+ pHalFunc->EFUSEGetEfuseDefinition = &rtl8188e_EFUSE_GetEfuseDefinition;
+ pHalFunc->EfuseGetCurrentSize = &rtl8188e_EfuseGetCurrentSize;
+ pHalFunc->Efuse_PgPacketRead = &rtl8188e_Efuse_PgPacketRead;
+ pHalFunc->Efuse_PgPacketWrite = &rtl8188e_Efuse_PgPacketWrite;
+ pHalFunc->Efuse_WordEnableDataWrite = &rtl8188e_Efuse_WordEnableDataWrite;
+
+ pHalFunc->sreset_init_value = &sreset_init_value;
+ pHalFunc->sreset_reset_value = &sreset_reset_value;
+ pHalFunc->silentreset = &rtl8188e_silentreset_for_specific_platform;
+ pHalFunc->sreset_xmit_status_check = &rtl8188e_sreset_xmit_status_check;
+ pHalFunc->sreset_linked_status_check = &rtl8188e_sreset_linked_status_check;
+ pHalFunc->sreset_get_wifi_status = &sreset_get_wifi_status;
+
+ pHalFunc->GetHalODMVarHandler = &rtl8188e_GetHalODMVar;
+ pHalFunc->SetHalODMVarHandler = &rtl8188e_SetHalODMVar;
+
+ pHalFunc->IOL_exec_cmds_sync = &rtl8188e_IOL_exec_cmds_sync;
+
+ pHalFunc->hal_notch_filter = &hal_notch_filter_8188e;
+}
+
+u8 GetEEPROMSize8188E(struct adapter *padapter)
+{
+ u8 size = 0;
+ u32 cr;
+
+ cr = rtw_read16(padapter, REG_9346CR);
+ /* 6: EEPROM used is 93C46, 4: boot from E-Fuse. */
+ size = (cr & BOOT_FROM_EEPROM) ? 6 : 4;
+
+ MSG_88E("EEPROM type is %s\n", size == 4 ? "E-FUSE" : "93C46");
+
+ return size;
+}
+
+/* */
+/* */
+/* LLT R/W/Init function */
+/* */
+/* */
+static s32 _LLTWrite(struct adapter *padapter, u32 address, u32 data)
+{
+ s32 status = _SUCCESS;
+ s32 count = 0;
+ u32 value = _LLT_INIT_ADDR(address) | _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
+ u16 LLTReg = REG_LLT_INIT;
+
+ rtw_write32(padapter, LLTReg, value);
+
+ /* polling */
+ do {
+ value = rtw_read32(padapter, LLTReg);
+ if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
+ break;
+
+ if (count > POLLING_LLT_THRESHOLD) {
+ RT_TRACE(_module_hal_init_c_, _drv_err_, ("Failed to polling write LLT done at address %d!\n", address));
+ status = _FAIL;
+ break;
+ }
+ } while (count++);
+
+ return status;
+}
+
+s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
+{
+ s32 status = _FAIL;
+ u32 i;
+ u32 Last_Entry_Of_TxPktBuf = LAST_ENTRY_OF_TX_PKT_BUFFER;/* 176, 22k */
+
+ if (rtw_IOL_applied(padapter)) {
+ status = iol_InitLLTTable(padapter, txpktbuf_bndy);
+ } else {
+ for (i = 0; i < (txpktbuf_bndy - 1); i++) {
+ status = _LLTWrite(padapter, i, i + 1);
+ if (_SUCCESS != status)
+ return status;
+ }
+
+ /* end of list */
+ status = _LLTWrite(padapter, (txpktbuf_bndy - 1), 0xFF);
+ if (_SUCCESS != status)
+ return status;
+
+ /* Make the other pages as ring buffer */
+ /* This ring buffer is used as beacon buffer if we config this MAC as two MAC transfer. */
+ /* Otherwise used as local loopback buffer. */
+ for (i = txpktbuf_bndy; i < Last_Entry_Of_TxPktBuf; i++) {
+ status = _LLTWrite(padapter, i, (i + 1));
+ if (_SUCCESS != status)
+ return status;
+ }
+
+ /* Let last entry point to the start entry of ring buffer */
+ status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
+ if (_SUCCESS != status) {
+ return status;
+ }
+ }
+
+ return status;
+}
+
+void
+Hal_InitPGData88E(struct adapter *padapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
+ if (!is_boot_from_eeprom(padapter)) {
+ /* Read EFUSE real map to shadow. */
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ }
+ } else {/* autoload fail */
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_, ("AutoLoad Fail reported from CR9346!!\n"));
+ /* update to default value 0xFF */
+ if (!is_boot_from_eeprom(padapter))
+ EFUSE_ShadowMapUpdate(padapter, EFUSE_WIFI, false);
+ }
+}
+
+void
+Hal_EfuseParseIDCode88E(
+ struct adapter *padapter,
+ u8 *hwinfo
+ )
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ u16 EEPROMId;
+
+ /* Checl 0x8129 again for making sure autoload status!! */
+ EEPROMId = le16_to_cpu(*((__le16 *)hwinfo));
+ if (EEPROMId != RTL_EEPROM_ID) {
+ DBG_88E("EEPROM ID(%#x) is invalid!!\n", EEPROMId);
+ pEEPROM->bautoload_fail_flag = true;
+ } else {
+ pEEPROM->bautoload_fail_flag = false;
+ }
+
+ DBG_88E("EEPROM ID = 0x%04x\n", EEPROMId);
+}
+
+static void Hal_ReadPowerValueFromPROM_8188E(struct txpowerinfo24g *pwrInfo24G, u8 *PROMContent, bool AutoLoadFail)
+{
+ u32 rfPath, eeAddr = EEPROM_TX_PWR_INX_88E, group, TxCount = 0;
+
+ _rtw_memset(pwrInfo24G, 0, sizeof(struct txpowerinfo24g));
+
+ if (AutoLoadFail) {
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW20_Diff[rfPath][0] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][0] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ }
+ }
+ }
+ return;
+ }
+
+ for (rfPath = 0; rfPath < MAX_RF_PATH; rfPath++) {
+ /* 2.4G default value */
+ for (group = 0; group < MAX_CHNL_GROUP_24G; group++) {
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexCCK_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexCCK_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (group = 0; group < MAX_CHNL_GROUP_24G-1; group++) {
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = PROMContent[eeAddr++];
+ if (pwrInfo24G->IndexBW40_Base[rfPath][group] == 0xFF)
+ pwrInfo24G->IndexBW40_Base[rfPath][group] = EEPROM_DEFAULT_24G_INDEX;
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ if (TxCount == 0) {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = 0;
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_HT20_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_24G_OFDM_DIFF;
+ } else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = 0;
+ eeAddr++;
+ } else {
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->BW40_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW40_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->BW20_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->BW20_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0xf0)>>4;
+ if (pwrInfo24G->OFDM_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->OFDM_Diff[rfPath][TxCount] |= 0xF0;
+ }
+
+ if (PROMContent[eeAddr] == 0xFF) {
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = EEPROM_DEFAULT_DIFF;
+ } else {
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] = (PROMContent[eeAddr]&0x0f);
+ if (pwrInfo24G->CCK_Diff[rfPath][TxCount] & BIT3) /* 4bit sign number to 8 bit sign number */
+ pwrInfo24G->CCK_Diff[rfPath][TxCount] |= 0xF0;
+ }
+ eeAddr++;
+ }
+ }
+ }
+}
+
+static u8 Hal_GetChnlGroup88E(u8 chnl, u8 *pGroup)
+{
+ u8 bIn24G = true;
+
+ if (chnl <= 14) {
+ bIn24G = true;
+
+ if (chnl < 3) /* Chanel 1-2 */
+ *pGroup = 0;
+ else if (chnl < 6) /* Channel 3-5 */
+ *pGroup = 1;
+ else if (chnl < 9) /* Channel 6-8 */
+ *pGroup = 2;
+ else if (chnl < 12) /* Channel 9-11 */
+ *pGroup = 3;
+ else if (chnl < 14) /* Channel 12-13 */
+ *pGroup = 4;
+ else if (chnl == 14) /* Channel 14 */
+ *pGroup = 5;
+ } else {
+ bIn24G = false;
+
+ if (chnl <= 40)
+ *pGroup = 0;
+ else if (chnl <= 48)
+ *pGroup = 1;
+ else if (chnl <= 56)
+ *pGroup = 2;
+ else if (chnl <= 64)
+ *pGroup = 3;
+ else if (chnl <= 104)
+ *pGroup = 4;
+ else if (chnl <= 112)
+ *pGroup = 5;
+ else if (chnl <= 120)
+ *pGroup = 5;
+ else if (chnl <= 128)
+ *pGroup = 6;
+ else if (chnl <= 136)
+ *pGroup = 7;
+ else if (chnl <= 144)
+ *pGroup = 8;
+ else if (chnl <= 153)
+ *pGroup = 9;
+ else if (chnl <= 161)
+ *pGroup = 10;
+ else if (chnl <= 177)
+ *pGroup = 11;
+ }
+ return bIn24G;
+}
+
+void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ if (AutoLoadFail) {
+ padapter->pwrctrlpriv.bHWPowerdown = false;
+ padapter->pwrctrlpriv.bSupportRemoteWakeup = false;
+ } else {
+ /* hw power down mode selection , 0:rf-off / 1:power down */
+
+ if (padapter->registrypriv.hwpdn_mode == 2)
+ padapter->pwrctrlpriv.bHWPowerdown = (hwinfo[EEPROM_RF_FEATURE_OPTION_88E] & BIT4);
+ else
+ padapter->pwrctrlpriv.bHWPowerdown = padapter->registrypriv.hwpdn_mode;
+
+ /* decide hw if support remote wakeup function */
+ /* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
+ padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT1) ? true : false;
+
+ DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
+ padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown , padapter->pwrctrlpriv.bSupportRemoteWakeup);
+
+ DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
+ }
+}
+
+void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct txpowerinfo24g pwrInfo24G;
+ u8 rfPath, ch, group;
+ u8 bIn24G, TxCount;
+
+ Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
+
+ if (!AutoLoadFail)
+ pHalData->bTXPowerDataReadFromEEPORM = true;
+
+ for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
+ for (ch = 0; ch <= CHANNEL_MAX_NUMBER; ch++) {
+ bIn24G = Hal_GetChnlGroup88E(ch, &group);
+ if (bIn24G) {
+ pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ if (ch == 14)
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
+ else
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ }
+ if (bIn24G) {
+ DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
+ DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_CCK_Base[rfPath][ch]);
+ DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch , pHalData->Index24G_BW40_Base[rfPath][ch]);
+ }
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
+ pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
+ pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+ DBG_88E("======= TxCount %d =======\n", TxCount);
+ DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
+ }
+ }
+
+ /* 2010/10/19 MH Add Regulator recognize for CU. */
+ if (!AutoLoadFail) {
+ pHalData->EEPROMRegulatory = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x7); /* bit0~2 */
+ if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ pHalData->EEPROMRegulatory = (EEPROM_DEFAULT_BOARD_OPTION&0x7); /* bit0~2 */
+ } else {
+ pHalData->EEPROMRegulatory = 0;
+ }
+ DBG_88E("EEPROMRegulatory = 0x%x\n", pHalData->EEPROMRegulatory);
+}
+
+void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (!AutoLoadFail) {
+ pHalData->CrystalCap = hwinfo[EEPROM_XTAL_88E];
+ if (pHalData->CrystalCap == 0xFF)
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
+ } else {
+ pHalData->CrystalCap = EEPROM_Default_CrystalCap_88E;
+ }
+ DBG_88E("CrystalCap: 0x%2x\n", pHalData->CrystalCap);
+}
+
+void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ if (!AutoLoadFail)
+ pHalData->BoardType = ((hwinfo[EEPROM_RF_BOARD_OPTION_88E]&0xE0)>>5);
+ else
+ pHalData->BoardType = 0;
+ DBG_88E("Board Type: 0x%2x\n", pHalData->BoardType);
+}
+
+void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->EEPROMVersion = hwinfo[EEPROM_VERSION_88E];
+ if (pHalData->EEPROMVersion == 0xFF)
+ pHalData->EEPROMVersion = EEPROM_Default_Version;
+ } else {
+ pHalData->EEPROMVersion = 1;
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("Hal_EfuseParseEEPROMVer(), EEVer = %d\n",
+ pHalData->EEPROMVersion));
+}
+
+void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ padapter->mlmepriv.ChannelPlan =
+ hal_com_get_channel_plan(padapter,
+ hwinfo ? hwinfo[EEPROM_ChannelPlan_88E] : 0xFF,
+ padapter->registrypriv.channel_plan,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13, AutoLoadFail);
+
+ DBG_88E("mlmepriv.ChannelPlan = 0x%02x\n", padapter->mlmepriv.ChannelPlan);
+}
+
+void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (!AutoLoadFail) {
+ pHalData->EEPROMCustomerID = hwinfo[EEPROM_CUSTOMERID_88E];
+ } else {
+ pHalData->EEPROMCustomerID = 0;
+ pHalData->EEPROMSubCustomerID = 0;
+ }
+ DBG_88E("EEPROM Customer ID: 0x%2x\n", pHalData->EEPROMCustomerID);
+}
+
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool AutoLoadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct registry_priv *registry_par = &pAdapter->registrypriv;
+
+ if (!AutoLoadFail) {
+ /* Antenna Diversity setting. */
+ if (registry_par->antdiv_cfg == 2) { /* 2:By EFUSE */
+ pHalData->AntDivCfg = (PROMContent[EEPROM_RF_BOARD_OPTION_88E]&0x18)>>3;
+ if (PROMContent[EEPROM_RF_BOARD_OPTION_88E] == 0xFF)
+ pHalData->AntDivCfg = (EEPROM_DEFAULT_BOARD_OPTION&0x18)>>3;;
+ } else {
+ pHalData->AntDivCfg = registry_par->antdiv_cfg; /* 0:OFF , 1:ON, 2:By EFUSE */
+ }
+
+ if (registry_par->antdiv_type == 0) {
+ /* If TRxAntDivType is AUTO in advanced setting, use EFUSE value instead. */
+ pHalData->TRxAntDivType = PROMContent[EEPROM_RF_ANTENNA_OPT_88E];
+ if (pHalData->TRxAntDivType == 0xFF)
+ pHalData->TRxAntDivType = CG_TRX_HW_ANTDIV; /* For 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+ } else {
+ pHalData->TRxAntDivType = registry_par->antdiv_type;
+ }
+
+ if (pHalData->TRxAntDivType == CG_TRX_HW_ANTDIV || pHalData->TRxAntDivType == CGCS_RX_HW_ANTDIV)
+ pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
+ } else {
+ pHalData->AntDivCfg = 0;
+ pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
+ }
+ DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
+}
+
+void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* ThermalMeter from EEPROM */
+ if (!AutoloadFail)
+ pHalData->EEPROMThermalMeter = PROMContent[EEPROM_THERMAL_METER_88E];
+ else
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
+
+ if (pHalData->EEPROMThermalMeter == 0xff || AutoloadFail) {
+ pHalData->bAPKThermalMeterIgnore = true;
+ pHalData->EEPROMThermalMeter = EEPROM_Default_ThermalMeter_88E;
+ }
+ DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
+}
+
+void Hal_InitChannelPlan(struct adapter *padapter)
+{
+}
+
+bool HalDetectPwrDownMode88E(struct adapter *Adapter)
+{
+ u8 tmpvalue = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+
+ EFUSE_ShadowRead(Adapter, 1, EEPROM_RF_FEATURE_OPTION_88E, (u32 *)&tmpvalue);
+
+ /* 2010/08/25 MH INF priority > PDN Efuse value. */
+ if (tmpvalue & BIT(4) && pwrctrlpriv->reg_pdnmode)
+ pHalData->pwrdown = true;
+ else
+ pHalData->pwrdown = false;
+
+ DBG_88E("HalDetectPwrDownMode(): PDN =%d\n", pHalData->pwrdown);
+
+ return pHalData->pwrdown;
+} /* HalDetectPwrDownMode */
+
+/* This function is used only for 92C to set REG_BCN_CTRL(0x550) register. */
+/* We just reserve the value of the register in variable pHalData->RegBcnCtrlVal and then operate */
+/* the value of the register via atomic operation. */
+/* This prevents from race condition when setting this register. */
+/* The value of pHalData->RegBcnCtrlVal is initialized in HwConfigureRTL8192CE() function. */
+
+void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits)
+{
+ struct hal_data_8188e *pHalData;
+
+ pHalData = GET_HAL_DATA(padapter);
+
+ pHalData->RegBcnCtrlVal |= SetBits;
+ pHalData->RegBcnCtrlVal &= ~ClearBits;
+
+ rtw_write8(padapter, REG_BCN_CTRL, (u8)pHalData->RegBcnCtrlVal);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
new file mode 100644
index 00000000000..e97ba02fa04
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_mp.c
@@ -0,0 +1,860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_MP_C_
+
+#include <drv_types.h>
+#include <rtw_mp.h>
+#include <rtl8188e_hal.h>
+#include <rtl8188e_dm.h>
+
+s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+
+ if (!netif_running(padapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_,
+ ("SetPowerTracking! Fail: interface not opened!\n"));
+ return _FAIL;
+ }
+
+ if (!check_fwstate(&padapter->mlmepriv, WIFI_MP_STATE)) {
+ RT_TRACE(_module_mp_, _drv_warning_,
+ ("SetPowerTracking! Fail: not in MP mode!\n"));
+ return _FAIL;
+ }
+
+ if (enable)
+ pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
+ else
+ pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
+
+ return _SUCCESS;
+}
+
+void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+
+ *enable = pDM_Odm->RFCalibrateInfo.TxPowerTrackControl;
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: mpt_SwitchRfSetting
+ *
+ * Overview: Change RF Setting when we siwthc channel/rate/BW for MP.
+ *
+ * Input: struct adapter * pAdapter
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 01/08/2009 MHC Suggestion from SD3 Willis for 92S series.
+ * 01/09/2009 MHC Add CCK modification for 40MHZ. Suggestion from SD3.
+ *
+ *---------------------------------------------------------------------------*/
+void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp = &pAdapter->mppriv;
+
+ /* <20120525, Kordan> Dynamic mechanism for APK, asked by Dennis. */
+ pmp->MptCtx.backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
+ pmp->MptCtx.backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0);
+ PHY_SetRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0, 0xD);
+ PHY_SetRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0, 0xD);
+
+ return;
+}
+/*---------------------------hal\rtl8192c\MPT_Phy.c---------------------------*/
+
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
+{
+ u32 TempVal = 0, TempVal2 = 0, TempVal3 = 0;
+ u32 CurrCCKSwingVal = 0, CCKSwingIndex = 12;
+ u8 i;
+
+ /* get current cck swing value and check 0xa22 & 0xa23 later to match the table. */
+ CurrCCKSwingVal = read_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord);
+
+ if (!bInCH14) {
+ /* Readback the current bb cck swing value and compare with the table to */
+ /* get the current swing index */
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch1_Ch13[i][0]) &&
+ (((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch1_Ch13[i][1])) {
+ CCKSwingIndex = i;
+ break;
+ }
+ }
+
+ /* Write 0xa22 0xa23 */
+ TempVal = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][0] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][1]<<8);
+
+
+ /* Write 0xa24 ~ 0xa27 */
+ TempVal2 = 0;
+ TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3]<<8) +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4]<<16)+
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5]<<24);
+
+ /* Write 0xa28 0xa29 */
+ TempVal3 = 0;
+ TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
+ (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7]<<8);
+ } else {
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (((CurrCCKSwingVal&0xff) == (u32)CCKSwingTable_Ch14[i][0]) &&
+ (((CurrCCKSwingVal&0xff00)>>8) == (u32)CCKSwingTable_Ch14[i][1])) {
+ CCKSwingIndex = i;
+ break;
+ }
+ }
+
+ /* Write 0xa22 0xa23 */
+ TempVal = CCKSwingTable_Ch14[CCKSwingIndex][0] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][1]<<8);
+
+ /* Write 0xa24 ~ 0xa27 */
+ TempVal2 = 0;
+ TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][3]<<8) +
+ (CCKSwingTable_Ch14[CCKSwingIndex][4]<<16)+
+ (CCKSwingTable_Ch14[CCKSwingIndex][5]<<24);
+
+ /* Write 0xa28 0xa29 */
+ TempVal3 = 0;
+ TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
+ (CCKSwingTable_Ch14[CCKSwingIndex][7]<<8);
+ }
+
+ write_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord, TempVal);
+ write_bbreg(Adapter, rCCK0_TxFilter2, bMaskDWord, TempVal2);
+ write_bbreg(Adapter, rCCK0_DebugPort, bMaskLWord, TempVal3);
+}
+
+void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+ s32 TempCCk;
+ u8 CCK_index, CCK_index_old = 0;
+ u8 Action = 0; /* 0: no action, 1: even->odd, 2:odd->even */
+ s32 i = 0;
+
+
+ if (!IS_92C_SERIAL(pHalData->VersionID))
+ return;
+ if (beven && !pMptCtx->bMptIndexEven) {
+ /* odd->even */
+ Action = 2;
+ pMptCtx->bMptIndexEven = true;
+ } else if (!beven && pMptCtx->bMptIndexEven) {
+ /* even->odd */
+ Action = 1;
+ pMptCtx->bMptIndexEven = false;
+ }
+
+ if (Action != 0) {
+ /* Query CCK default setting From 0xa24 */
+ TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
+ for (i = 0; i < CCK_TABLE_SIZE; i++) {
+ if (pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ } else {
+ if (_rtw_memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
+ CCK_index_old = (u8)i;
+ break;
+ }
+ }
+ }
+
+ if (Action == 1)
+ CCK_index = CCK_index_old - 1;
+ else
+ CCK_index = CCK_index_old + 1;
+
+ /* Adjust CCK according to gain index */
+ if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
+ rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch1_Ch13[CCK_index][1]);
+ rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch1_Ch13[CCK_index][2]);
+ rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch1_Ch13[CCK_index][3]);
+ rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch1_Ch13[CCK_index][4]);
+ rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch1_Ch13[CCK_index][5]);
+ rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch1_Ch13[CCK_index][6]);
+ rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch1_Ch13[CCK_index][7]);
+ } else {
+ rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch14[CCK_index][0]);
+ rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch14[CCK_index][1]);
+ rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch14[CCK_index][2]);
+ rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch14[CCK_index][3]);
+ rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch14[CCK_index][4]);
+ rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch14[CCK_index][5]);
+ rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch14[CCK_index][6]);
+ rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch14[CCK_index][7]);
+ }
+ }
+}
+/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
+
+/*
+ * SetChannel
+ * Description
+ * Use H2C command to change channel,
+ * not only modify rf register, but also other setting need to be done.
+ */
+void Hal_SetChannel(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ struct mp_priv *pmp = &pAdapter->mppriv;
+ struct odm_dm_struct *pDM_Odm = &(pHalData->odmpriv);
+ u8 eRFPath;
+ u8 channel = pmp->channel;
+
+ /* set RF channel register */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
+ _write_rfreg(pAdapter, eRFPath, ODM_CHANNEL, 0x3FF, channel);
+ Hal_mpt_SwitchRfSetting(pAdapter);
+
+ SelectChannel(pAdapter, channel);
+
+ if (pHalData->CurrentChannel == 14 && !pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ pDM_Odm->RFCalibrateInfo.bCCKinCH14 = true;
+ Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
+ } else if (pHalData->CurrentChannel != 14 && pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
+ pDM_Odm->RFCalibrateInfo.bCCKinCH14 = false;
+ Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
+ }
+}
+
+/*
+ * Notice
+ * Switch bandwitdth may change center frequency(channel)
+ */
+void Hal_SetBandwidth(struct adapter *pAdapter)
+{
+ struct mp_priv *pmp = &pAdapter->mppriv;
+
+
+ SetBWMode(pAdapter, pmp->bandwidth, pmp->prime_channel_offset);
+ Hal_mpt_SwitchRfSetting(pAdapter);
+}
+
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 *TxPower)
+{
+ u32 tmpval = 0;
+
+
+ /* rf-A cck tx power */
+ write_bbreg(pAdapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, TxPower[RF_PATH_A]);
+ tmpval = (TxPower[RF_PATH_A]<<16) | (TxPower[RF_PATH_A]<<8) | TxPower[RF_PATH_A];
+ write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ /* rf-B cck tx power */
+ write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, TxPower[RF_PATH_B]);
+ tmpval = (TxPower[RF_PATH_B]<<16) | (TxPower[RF_PATH_B]<<8) | TxPower[RF_PATH_B];
+ write_bbreg(pAdapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+
+ RT_TRACE(_module_mp_, _drv_notice_,
+ ("-SetCCKTxPower: A[0x%02x] B[0x%02x]\n",
+ TxPower[RF_PATH_A], TxPower[RF_PATH_B]));
+}
+
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 *TxPower)
+{
+ u32 TxAGC = 0;
+ u8 tmpval = 0;
+
+ /* HT Tx-rf(A) */
+ tmpval = TxPower[RF_PATH_A];
+ TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
+
+ write_bbreg(pAdapter, rTxAGC_A_Rate18_06, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Rate54_24, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs03_Mcs00, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs07_Mcs04, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs11_Mcs08, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_A_Mcs15_Mcs12, bMaskDWord, TxAGC);
+
+ /* HT Tx-rf(B) */
+ tmpval = TxPower[RF_PATH_B];
+ TxAGC = (tmpval<<24) | (tmpval<<16) | (tmpval<<8) | tmpval;
+
+ write_bbreg(pAdapter, rTxAGC_B_Rate18_06, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Rate54_24, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs03_Mcs00, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs07_Mcs04, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs11_Mcs08, bMaskDWord, TxAGC);
+ write_bbreg(pAdapter, rTxAGC_B_Mcs15_Mcs12, bMaskDWord, TxAGC);
+}
+
+void Hal_SetAntennaPathPower(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ u8 TxPowerLevel[MAX_RF_PATH_NUMS];
+ u8 rfPath;
+
+ TxPowerLevel[RF_PATH_A] = pAdapter->mppriv.txpoweridx;
+ TxPowerLevel[RF_PATH_B] = pAdapter->mppriv.txpoweridx_b;
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
+ if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
+ Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
+ Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
+ break;
+ default:
+ break;
+ }
+}
+
+void Hal_SetTxPower(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ u8 TxPower = pAdapter->mppriv.txpoweridx;
+ u8 TxPowerLevel[MAX_RF_PATH_NUMS];
+ u8 rf, rfPath;
+
+ for (rf = 0; rf < MAX_RF_PATH_NUMS; rf++)
+ TxPowerLevel[rf] = TxPower;
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ switch (pHalData->rf_chip) {
+ /* 2008/09/12 MH Test only !! We enable the TX power tracking for MP!!!!! */
+ /* We should call normal driver API later!! */
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
+ if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
+ Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath]%2 == 0);
+ Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
+ break;
+ default:
+ break;
+ }
+}
+
+void Hal_SetDataRate(struct adapter *pAdapter)
+{
+ Hal_mpt_SwitchRfSetting(pAdapter);
+}
+
+void Hal_SetAntenna(struct adapter *pAdapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+ struct ant_sel_ofdm *p_ofdm_tx; /* OFDM Tx register */
+ struct ant_sel_cck *p_cck_txrx;
+ u8 r_rx_antenna_ofdm = 0, r_ant_select_cck_val = 0;
+ u8 chgTx = 0, chgRx = 0;
+ u32 r_ant_select_ofdm_val = 0, r_ofdm_tx_en_val = 0;
+
+
+ p_ofdm_tx = (struct ant_sel_ofdm *)&r_ant_select_ofdm_val;
+ p_cck_txrx = (struct ant_sel_cck *)&r_ant_select_cck_val;
+
+ p_ofdm_tx->r_ant_ht1 = 0x1;
+ p_ofdm_tx->r_ant_ht2 = 0x2; /* Second TX RF path is A */
+ p_ofdm_tx->r_ant_non_ht = 0x3; /* 0x1+0x2=0x3 */
+
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ p_ofdm_tx->r_tx_antenna = 0x1;
+ r_ofdm_tx_en_val = 0x1;
+ p_ofdm_tx->r_ant_l = 0x1;
+ p_ofdm_tx->r_ant_ht_s1 = 0x1;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x1;
+ p_cck_txrx->r_ccktx_enable = 0x8;
+ chgTx = 1;
+
+ /* From SD3 Willis suggestion !!! Set RF A=TX and B as standby */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 1);
+ r_ofdm_tx_en_val = 0x3;
+
+ /* Power save */
+
+ /* We need to close RFB by SW control */
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 0);
+ }
+ break;
+ case ANTENNA_B:
+ p_ofdm_tx->r_tx_antenna = 0x2;
+ r_ofdm_tx_en_val = 0x2;
+ p_ofdm_tx->r_ant_l = 0x2;
+ p_ofdm_tx->r_ant_ht_s1 = 0x2;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x2;
+ p_cck_txrx->r_ccktx_enable = 0x4;
+ chgTx = 1;
+ /* From SD3 Willis suggestion !!! Set RF A as standby */
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
+
+ /* Power save */
+ /* cosa r_ant_select_ofdm_val = 0x22222222; */
+
+ /* 2008/10/31 MH From SD3 Willi's suggestion. We must read RF 1T table. */
+ /* 2009/01/08 MH From Sd3 Willis. We need to close RFA by SW control */
+ if (pHalData->rf_type == RF_2T2R || pHalData->rf_type == RF_1T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
+ }
+ break;
+ case ANTENNA_AB: /* For 8192S */
+ p_ofdm_tx->r_tx_antenna = 0x3;
+ r_ofdm_tx_en_val = 0x3;
+ p_ofdm_tx->r_ant_l = 0x3;
+ p_ofdm_tx->r_ant_ht_s1 = 0x3;
+ p_ofdm_tx->r_ant_non_ht_s1 = 0x3;
+ p_cck_txrx->r_ccktx_enable = 0xC;
+ chgTx = 1;
+
+ /* From SD3 Willis suggestion !!! Set RF B as standby */
+ PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
+ PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
+
+ /* Disable Power save */
+ /* cosa r_ant_select_ofdm_val = 0x3321333; */
+ /* 2009/01/08 MH From Sd3 Willis. We need to enable RFA/B by SW control */
+ if (pHalData->rf_type == RF_2T2R) {
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT10, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT26, 0);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT1, 1);
+ PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT17, 1);
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* r_rx_antenna_ofdm, bit0=A, bit1=B, bit2=C, bit3=D */
+ /* r_cckrx_enable : CCK default, 0=A, 1=B, 2=C, 3=D */
+ /* r_cckrx_enable_2 : CCK option, 0=A, 1=B, 2=C, 3=D */
+ switch (pAdapter->mppriv.antenna_rx) {
+ case ANTENNA_A:
+ r_rx_antenna_ofdm = 0x1; /* A */
+ p_cck_txrx->r_cckrx_enable = 0x0; /* default: A */
+ p_cck_txrx->r_cckrx_enable_2 = 0x0; /* option: A */
+ chgRx = 1;
+ break;
+ case ANTENNA_B:
+ r_rx_antenna_ofdm = 0x2; /* B */
+ p_cck_txrx->r_cckrx_enable = 0x1; /* default: B */
+ p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option: B */
+ chgRx = 1;
+ break;
+ case ANTENNA_AB:
+ r_rx_antenna_ofdm = 0x3; /* AB */
+ p_cck_txrx->r_cckrx_enable = 0x0; /* default:A */
+ p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option:B */
+ chgRx = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (chgTx && chgRx) {
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ case RF_8256:
+ case RF_6052:
+ /* r_ant_sel_cck_val = r_ant_select_cck_val; */
+ PHY_SetBBReg(pAdapter, rFPGA1_TxInfo, 0x7fffffff, r_ant_select_ofdm_val); /* OFDM Tx */
+ PHY_SetBBReg(pAdapter, rFPGA0_TxInfo, 0x0000000f, r_ofdm_tx_en_val); /* OFDM Tx */
+ PHY_SetBBReg(pAdapter, rOFDM0_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
+ PHY_SetBBReg(pAdapter, rOFDM1_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
+ PHY_SetBBReg(pAdapter, rCCK0_AFESetting, bMaskByte3, r_ant_select_cck_val); /* CCK TxRx */
+
+ break;
+ default:
+ break;
+ }
+ }
+
+ RT_TRACE(_module_mp_, _drv_notice_, ("-SwitchAntenna: finished\n"));
+}
+
+s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+
+
+ if (!netif_running(pAdapter->pnetdev)) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter! Fail: interface not opened!\n"));
+ return _FAIL;
+ }
+
+ if (check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE) == false) {
+ RT_TRACE(_module_mp_, _drv_warning_, ("SetThermalMeter: Fail! not in MP mode!\n"));
+ return _FAIL;
+ }
+
+ target_ther &= 0xff;
+ if (target_ther < 0x07)
+ target_ther = 0x07;
+ else if (target_ther > 0x1d)
+ target_ther = 0x1d;
+
+ pHalData->EEPROMThermalMeter = target_ther;
+
+ return _SUCCESS;
+}
+
+void Hal_TriggerRFThermalMeter(struct adapter *pAdapter)
+{
+ _write_rfreg(pAdapter, RF_PATH_A , RF_T_METER_88E , BIT17 | BIT16 , 0x03);
+}
+
+u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter)
+{
+ u32 ThermalValue = 0;
+
+ ThermalValue = _read_rfreg(pAdapter, RF_PATH_A, RF_T_METER_88E, 0xfc00);
+ return (u8)ThermalValue;
+}
+
+void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value)
+{
+ Hal_TriggerRFThermalMeter(pAdapter);
+ rtw_msleep_os(1000);
+ *value = Hal_ReadRFThermalMeter(pAdapter);
+}
+
+void Hal_SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
+{
+ pAdapter->mppriv.MptCtx.bSingleCarrier = bStart;
+ if (bStart) {
+ /* Start Single Carrier. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleCarrierTx: test start\n"));
+ /* 1. if OFDM block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
+
+ /* 2. set CCK test mode off, set to CCK normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
+ /* 3. turn on scramble setting */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
+ /* 4. Turn On Single Carrier Tx and turn off the other test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bEnable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ /* Stop Single Carrier. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleCarrierTx: test stop\n"));
+
+ /* Turn off all test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ rtw_msleep_os(10);
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+
+void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
+ bool is92C = IS_92C_SERIAL(pHalData->VersionID);
+
+ u8 rfPath;
+ u32 reg58 = 0x0;
+ switch (pAdapter->mppriv.antenna_tx) {
+ case ANTENNA_A:
+ default:
+ rfPath = RF_PATH_A;
+ break;
+ case ANTENNA_B:
+ rfPath = RF_PATH_B;
+ break;
+ case ANTENNA_C:
+ rfPath = RF_PATH_C;
+ break;
+ }
+
+ pAdapter->mppriv.MptCtx.bSingleTone = bStart;
+ if (bStart) {
+ /* Start Single Tone. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test start\n"));
+ /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
+ if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ reg58 += 2;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
+ }
+ PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
+ PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
+
+ if (is92C) {
+ _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x01);
+ rtw_usleep_os(100);
+ if (rfPath == RF_PATH_A)
+ write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); /* PAD all on. */
+ else if (rfPath == RF_PATH_B)
+ write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); /* PAD all on. */
+ write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
+ rtw_usleep_os(100);
+ } else {
+ write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
+ rtw_usleep_os(100);
+ }
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+
+ } else {
+ /* Stop Single Tone. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetSingleToneTx: test stop\n"));
+
+ /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
+ /* <20120326, Kordan> Only in single tone mode. (asked by Edlu) */
+ if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
+ reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
+ reg58 &= 0xFFFFFFF0;
+ PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
+ }
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+ if (is92C) {
+ _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT19, 0x00);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); /* PAD all on. */
+ write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); /* PAD all on. */
+ rtw_usleep_os(100);
+ } else {
+ write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
+ rtw_usleep_os(100);
+ write_rfreg(pAdapter, rfPath, 0x00, 0x30000); /* PAD all on. */
+ rtw_usleep_os(100);
+ }
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+
+
+void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
+{
+ pAdapter->mppriv.MptCtx.bCarrierSuppression = bStart;
+ if (bStart) {
+ /* Start Carrier Suppression. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetCarrierSuppressionTx: test start\n"));
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
+ /* 1. if CCK block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
+
+ /* Turn Off All Test Mode */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x0); /* turn off scramble setting */
+
+ /* Set CCK Tx Test Rate */
+ write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, 0x0); /* Set FTxRate to 1Mbps */
+ }
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ /* Stop Carrier Suppression. */
+ RT_TRACE(_module_mp_, _drv_alert_, ("SetCarrierSuppressionTx: test stop\n"));
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x1); /* turn on scramble setting */
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+ }
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+}
+
+void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ u32 cckrate;
+
+ if (bStart) {
+ RT_TRACE(_module_mp_, _drv_alert_,
+ ("SetCCKContinuousTx: test start\n"));
+
+ /* 1. if CCK block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
+
+ /* Turn Off All Test Mode */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* Set CCK Tx Test Rate */
+ cckrate = pAdapter->mppriv.rateidx;
+ write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, cckrate);
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+ } else {
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("SetCCKContinuousTx: test stop\n"));
+
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
+
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+
+ pAdapter->mppriv.MptCtx.bCckContTx = bStart;
+ pAdapter->mppriv.MptCtx.bOfdmContTx = false;
+} /* mpt_StartCckContTx */
+
+void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ if (bStart) {
+ RT_TRACE(_module_mp_, _drv_info_, ("SetOFDMContinuousTx: test start\n"));
+ /* 1. if OFDM block on? */
+ if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
+ write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
+
+ /* 2. set CCK test mode off, set to CCK normal mode */
+ write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
+
+ /* 3. turn on scramble setting */
+ write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
+ /* 4. Turn On Continue Tx and turn off the other test modes. */
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bEnable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+
+ /* for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
+
+ } else {
+ RT_TRACE(_module_mp_, _drv_info_, ("SetOFDMContinuousTx: test stop\n"));
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
+ write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
+ /* Delay 10 ms */
+ rtw_msleep_os(10);
+ /* BB Reset */
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
+ write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
+
+ /* Stop for dynamic set Power index. */
+ write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
+ write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
+ }
+
+ pAdapter->mppriv.MptCtx.bCckContTx = false;
+ pAdapter->mppriv.MptCtx.bOfdmContTx = bStart;
+} /* mpt_StartOfdmContTx */
+
+void Hal_SetContinuousTx(struct adapter *pAdapter, u8 bStart)
+{
+ RT_TRACE(_module_mp_, _drv_info_,
+ ("SetContinuousTx: rate:%d\n", pAdapter->mppriv.rateidx));
+
+ pAdapter->mppriv.MptCtx.bStartContTx = bStart;
+ if (pAdapter->mppriv.rateidx <= MPT_RATE_11M)
+ Hal_SetCCKContinuousTx(pAdapter, bStart);
+ else if ((pAdapter->mppriv.rateidx >= MPT_RATE_6M) &&
+ (pAdapter->mppriv.rateidx <= MPT_RATE_MCS15))
+ Hal_SetOFDMContinuousTx(pAdapter, bStart);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
new file mode 100644
index 00000000000..ff468a68e32
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_phycfg.c
@@ -0,0 +1,1144 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_PHYCFG_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_iol.h>
+#include <rtl8188e_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Channel switch:The size of command tables for switch channel*/
+#define MAX_PRECMD_CNT 16
+#define MAX_RFDEPENDCMD_CNT 16
+#define MAX_POSTCMD_CNT 16
+
+#define MAX_DOZE_WAITING_TIMES_9x 64
+
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+
+/*------------------------Define local variable------------------------------*/
+
+
+/*--------------------Define export function prototype-----------------------*/
+/* Please refer to header file */
+/*--------------------Define export function prototype-----------------------*/
+
+/*----------------------------Function Body----------------------------------*/
+/* */
+/* 1. BB register R/W API */
+/* */
+
+/**
+* Function: phy_CalculateBitShift
+*
+* OverView: Get shifted position of the BitMask
+*
+* Input:
+* u32 BitMask,
+*
+* Output: none
+* Return: u32 Return the shift bit bit position of the mask
+*/
+static u32 phy_CalculateBitShift(u32 BitMask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++) {
+ if (((BitMask>>i) & 0x1) == 1)
+ break;
+ }
+ return i;
+}
+
+/**
+* Function: PHY_QueryBBReg
+*
+* OverView: Read "sepcific bits" from BB register
+*
+* Input:
+* struct adapter *Adapter,
+* u32 RegAddr, The target address to be readback
+* u32 BitMask The target bit position in the target address
+* to be readback
+* Output: None
+* Return: u32 Data The readback register value
+* Note: This function is equal to "GetRegSetting" in PHY programming guide
+*/
+u32
+rtl8188e_PHY_QueryBBReg(
+ struct adapter *Adapter,
+ u32 RegAddr,
+ u32 BitMask
+ )
+{
+ u32 ReturnValue = 0, OriginalValue, BitShift;
+
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ ReturnValue = (OriginalValue & BitMask) >> BitShift;
+ return ReturnValue;
+}
+
+
+/**
+* Function: PHY_SetBBReg
+*
+* OverView: Write "Specific bits" to BB register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register value in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRegSetting" in PHY programming guide
+*/
+
+void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ u32 OriginalValue, BitShift;
+
+ if (BitMask != bMaskDWord) { /* if not "double word" write */
+ OriginalValue = rtw_read32(Adapter, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((OriginalValue & (~BitMask)) | (Data << BitShift));
+ }
+
+ rtw_write32(Adapter, RegAddr, Data);
+}
+
+
+/* */
+/* 2. RF register R/W API */
+/* */
+/**
+* Function: phy_RFSerialRead
+*
+* OverView: Read regster from RF chips
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+*
+* Output: None
+* Return: u32 reback value
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+*/
+static u32
+phy_RFSerialRead(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 Offset
+ )
+{
+ u32 retValue = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+ u32 tmplong, tmplong2;
+ u8 RfPiEnable = 0;
+ /* */
+ /* Make sure RF register offset is correct */
+ /* */
+ Offset &= 0xff;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* For 92S LSSI Read RFLSSIRead */
+ /* For RF A/B write 0x824/82c(does not work in the future) */
+ /* We must use 0x824 for RF A and B to execute read trigger */
+ tmplong = PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord);
+ if (eRFPath == RF_PATH_A)
+ tmplong2 = tmplong;
+ else
+ tmplong2 = PHY_QueryBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord);
+
+ tmplong2 = (tmplong2 & (~bLSSIReadAddress)) | (NewOffset<<23) | bLSSIReadEdge; /* T65 RF */
+
+ PHY_SetBBReg(Adapter, rFPGA0_XA_HSSIParameter2, bMaskDWord, tmplong&(~bLSSIReadEdge));
+ rtw_udelay_os(10);/* PlatformStallExecution(10); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, bMaskDWord, tmplong2);
+ rtw_udelay_os(100);/* PlatformStallExecution(100); */
+
+ rtw_udelay_os(10);/* PlatformStallExecution(10); */
+
+ if (eRFPath == RF_PATH_A)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XA_HSSIParameter1, BIT8);
+ else if (eRFPath == RF_PATH_B)
+ RfPiEnable = (u8)PHY_QueryBBReg(Adapter, rFPGA0_XB_HSSIParameter1, BIT8);
+
+ if (RfPiEnable) { /* Read from BBreg8b8, 12 bits for 8190, 20bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBackPi, bLSSIReadBackData);
+ } else { /* Read from BBreg8a0, 12 bits for 8190, 20 bits for T65 RF */
+ retValue = PHY_QueryBBReg(Adapter, pPhyReg->rfLSSIReadBack, bLSSIReadBackData);
+ }
+ return retValue;
+}
+
+/**
+* Function: phy_RFSerialWrite
+*
+* OverView: Write data to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 Offset, The target address to be read
+* u32 Data The new register Data in the target bit position
+* of the target to be read
+*
+* Output: None
+* Return: None
+* Note: Threre are three types of serial operations:
+* 1. Software serial write
+* 2. Hardware LSSI-Low Speed Serial Interface
+* 3. Hardware HSSI-High speed
+* serial write. Driver need to implement (1) and (2).
+* This function is equal to the combination of RF_ReadReg() and RFLSSIRead()
+ *
+ * Note: For RF8256 only
+ * The total count of RTL8256(Zebra4) register is around 36 bit it only employs
+ * 4-bit RF address. RTL8256 uses "register mode control bit" (Reg00[12], Reg00[10])
+ * to access register address bigger than 0xf. See "Appendix-4 in PHY Configuration
+ * programming guide" for more details.
+ * Thus, we define a sub-finction for RTL8526 register address conversion
+ * ===========================================================
+ * Register Mode RegCTL[1] RegCTL[0] Note
+ * (Reg00[12]) (Reg00[10])
+ * ===========================================================
+ * Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf)
+ * ------------------------------------------------------------------
+ *
+ * 2008/09/02 MH Add 92S RF definition
+ *
+ *
+ *
+*/
+static void
+phy_RFSerialWrite(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 Offset,
+ u32 Data
+ )
+{
+ u32 DataAndAddr = 0;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct bb_reg_def *pPhyReg = &pHalData->PHYRegDef[eRFPath];
+ u32 NewOffset;
+
+
+ /* 2009/06/17 MH We can not execute IO for power save or other accident mode. */
+
+ Offset &= 0xff;
+
+ /* */
+ /* Switch page for 8256 RF IC */
+ /* */
+ NewOffset = Offset;
+
+ /* */
+ /* Put write addr in [5:0] and write data in [31:16] */
+ /* */
+ DataAndAddr = ((NewOffset<<20) | (Data&0x000fffff)) & 0x0fffffff; /* T65 RF */
+
+ /* */
+ /* Write Operation */
+ /* */
+ PHY_SetBBReg(Adapter, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr);
+}
+
+/**
+* Function: PHY_QueryRFReg
+*
+* OverView: Query "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be read
+* u32 BitMask The target bit position in the target address
+* to be read
+*
+* Output: None
+* Return: u32 Readback value
+* Note: This function is equal to "GetRFRegSetting" in PHY programming guide
+*/
+u32 rtl8188e_PHY_QueryRFReg(struct adapter *Adapter, enum rf_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask)
+{
+ u32 Original_Value, Readback_Value, BitShift;
+
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+
+ BitShift = phy_CalculateBitShift(BitMask);
+ Readback_Value = (Original_Value & BitMask) >> BitShift;
+ return Readback_Value;
+}
+
+/**
+* Function: PHY_SetRFReg
+*
+* OverView: Write "Specific bits" to RF register (page 8~)
+*
+* Input:
+* struct adapter *Adapter,
+* enum rf_radio_path eRFPath, Radio path of A/B/C/D
+* u32 RegAddr, The target address to be modified
+* u32 BitMask The target bit position in the target address
+* to be modified
+* u32 Data The new register Data in the target bit position
+* of the target address
+*
+* Output: None
+* Return: None
+* Note: This function is equal to "PutRFRegSetting" in PHY programming guide
+*/
+void
+rtl8188e_PHY_SetRFReg(
+ struct adapter *Adapter,
+ enum rf_radio_path eRFPath,
+ u32 RegAddr,
+ u32 BitMask,
+ u32 Data
+ )
+{
+ u32 Original_Value, BitShift;
+
+ /* RF data is 12 bits only */
+ if (BitMask != bRFRegOffsetMask) {
+ Original_Value = phy_RFSerialRead(Adapter, eRFPath, RegAddr);
+ BitShift = phy_CalculateBitShift(BitMask);
+ Data = ((Original_Value & (~BitMask)) | (Data << BitShift));
+ }
+
+ phy_RFSerialWrite(Adapter, eRFPath, RegAddr, Data);
+}
+
+/* */
+/* 3. Initial MAC/BB/RF config by reading MAC/BB/RF txt. */
+/* */
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_MACConfig8192C
+ *
+ * Overview: Condig MAC by header file or parameter file.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/12/2008 MHC Create Version 0.
+ *
+ *---------------------------------------------------------------------------*/
+s32 PHY_MACConfig8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* Config MAC */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigMACWithHeaderFile(&pHalData->odmpriv))
+ rtStatus = _FAIL;
+
+ /* 2010.07.13 AMPDU aggregation number B */
+ rtw_write16(Adapter, REG_MAX_AGGR_NUM, MAX_AGGR_NUM);
+
+ return rtStatus;
+}
+
+/**
+* Function: phy_InitBBRFRegisterDefinition
+*
+* OverView: Initialize Register definition offset for Radio Path A/B/C/D
+*
+* Input:
+* struct adapter *Adapter,
+*
+* Output: None
+* Return: None
+* Note: The initialization value is constant and it should never be changes
+*/
+static void
+phy_InitBBRFRegisterDefinition(
+ struct adapter *Adapter
+)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* RF Interface Sowrtware Control */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 LSBs if read 32-bit from 0x870 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; /* 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) */
+ pHalData->PHYRegDef[RF_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 LSBs if read 32-bit from 0x874 */
+ pHalData->PHYRegDef[RF_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;/* 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) */
+
+ /* RF Interface Readback Value */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; /* 16 LSBs if read 32-bit from 0x8E0 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) */
+ pHalData->PHYRegDef[RF_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 LSBs if read 32-bit from 0x8E4 */
+ pHalData->PHYRegDef[RF_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;/* 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) */
+
+ /* RF Interface Output (and Enable) */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x860 */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; /* 16 LSBs if read 32-bit from 0x864 */
+
+ /* RF Interface (Output and) Enable */
+ pHalData->PHYRegDef[RF_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) */
+ pHalData->PHYRegDef[RF_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; /* 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) */
+
+ /* Addr of LSSI. Wirte RF register by driver */
+ pHalData->PHYRegDef[RF_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; /* LSSI Parameter */
+ pHalData->PHYRegDef[RF_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter;
+
+ /* RF parameter */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; /* BB Band Select */
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter;
+ pHalData->PHYRegDef[RF_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+ pHalData->PHYRegDef[RF_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter;
+
+ /* Tx AGC Gain Stage (same for all path. Should we remove this?) */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+ pHalData->PHYRegDef[RF_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; /* Tx gain stage */
+
+ /* Tranceiver A~D HSSI Parameter-1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; /* wire control parameter1 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; /* wire control parameter1 */
+
+ /* Tranceiver A~D HSSI Parameter-2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; /* wire control parameter2 */
+ pHalData->PHYRegDef[RF_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; /* wire control parameter2 */
+
+ /* RF switch Control */
+ pHalData->PHYRegDef[RF_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; /* TR/Ant switch control */
+ pHalData->PHYRegDef[RF_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl;
+ pHalData->PHYRegDef[RF_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+ pHalData->PHYRegDef[RF_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl;
+
+ /* AGC control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1;
+ pHalData->PHYRegDef[RF_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1;
+
+ /* AGC control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2;
+ pHalData->PHYRegDef[RF_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance;
+
+ /* RX AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfRxAFE = rOFDM0_XARxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfRxAFE = rOFDM0_XBRxAFE;
+ pHalData->PHYRegDef[RF_PATH_C].rfRxAFE = rOFDM0_XCRxAFE;
+ pHalData->PHYRegDef[RF_PATH_D].rfRxAFE = rOFDM0_XDRxAFE;
+
+ /* Tx AFE control 1 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance;
+ pHalData->PHYRegDef[RF_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance;
+
+ /* Tx AFE control 2 */
+ pHalData->PHYRegDef[RF_PATH_A].rfTxAFE = rOFDM0_XATxAFE;
+ pHalData->PHYRegDef[RF_PATH_B].rfTxAFE = rOFDM0_XBTxAFE;
+ pHalData->PHYRegDef[RF_PATH_C].rfTxAFE = rOFDM0_XCTxAFE;
+ pHalData->PHYRegDef[RF_PATH_D].rfTxAFE = rOFDM0_XDTxAFE;
+
+ /* Tranceiver LSSI Readback SI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack;
+ pHalData->PHYRegDef[RF_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack;
+
+ /* Tranceiver LSSI Readback PI mode */
+ pHalData->PHYRegDef[RF_PATH_A].rfLSSIReadBackPi = TransceiverA_HSPI_Readback;
+ pHalData->PHYRegDef[RF_PATH_B].rfLSSIReadBackPi = TransceiverB_HSPI_Readback;
+}
+
+void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMask, u32 Data)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ if (RegAddr == rTxAGC_A_Rate18_06)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][0] = Data;
+ if (RegAddr == rTxAGC_A_Rate54_24)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][1] = Data;
+ if (RegAddr == rTxAGC_A_CCK1_Mcs32)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][6] = Data;
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0xffffff00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][7] = Data;
+ if (RegAddr == rTxAGC_A_Mcs03_Mcs00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][2] = Data;
+ if (RegAddr == rTxAGC_A_Mcs07_Mcs04)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][3] = Data;
+ if (RegAddr == rTxAGC_A_Mcs11_Mcs08)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][4] = Data;
+ if (RegAddr == rTxAGC_A_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][5] = Data;
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ }
+ if (RegAddr == rTxAGC_B_Rate18_06)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][8] = Data;
+ if (RegAddr == rTxAGC_B_Rate54_24)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][9] = Data;
+ if (RegAddr == rTxAGC_B_CCK1_55_Mcs32)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][14] = Data;
+ if (RegAddr == rTxAGC_B_CCK11_A_CCK2_11 && BitMask == 0x000000ff)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][15] = Data;
+ if (RegAddr == rTxAGC_B_Mcs03_Mcs00)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][10] = Data;
+ if (RegAddr == rTxAGC_B_Mcs07_Mcs04)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][11] = Data;
+ if (RegAddr == rTxAGC_B_Mcs11_Mcs08)
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][12] = Data;
+ if (RegAddr == rTxAGC_B_Mcs15_Mcs12) {
+ pHalData->MCSTxPowerLevelOriginalOffset[pHalData->pwrGroupCnt][13] = Data;
+ if (pHalData->rf_type != RF_1T1R)
+ pHalData->pwrGroupCnt++;
+ }
+}
+
+static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
+{
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* 1. Read PHY_REG.TXT BB INIT!! */
+ /* We will seperate as 88C / 92C according to chip version */
+ /* */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG))
+ rtStatus = _FAIL;
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* 2. If EEPROM or EFUSE autoload OK, We must config by PHY_REG_PG.txt */
+ if (!pEEPROM->bautoload_fail_flag) {
+ pHalData->pwrGroupCnt = 0;
+
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_PHY_REG_PG))
+ rtStatus = _FAIL;
+ }
+
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+ /* 3. BB AGC table Initialization */
+ if (HAL_STATUS_FAILURE == ODM_ConfigBBWithHeaderFile(&pHalData->odmpriv, CONFIG_BB_AGC_TAB))
+ rtStatus = _FAIL;
+
+ if (rtStatus != _SUCCESS)
+ goto phy_BB8190_Config_ParaFile_Fail;
+
+phy_BB8190_Config_ParaFile_Fail:
+
+ return rtStatus;
+}
+
+int
+PHY_BBConfig8188E(
+ struct adapter *Adapter
+ )
+{
+ int rtStatus = _SUCCESS;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 RegVal;
+ u8 CrystalCap;
+
+ phy_InitBBRFRegisterDefinition(Adapter);
+
+
+ /* Enable BB and RF */
+ RegVal = rtw_read16(Adapter, REG_SYS_FUNC_EN);
+ rtw_write16(Adapter, REG_SYS_FUNC_EN, (u16)(RegVal|BIT13|BIT0|BIT1));
+
+ /* 20090923 Joseph: Advised by Steven and Jenyu. Power sequence before init RF. */
+
+ rtw_write8(Adapter, REG_RF_CTRL, RF_EN|RF_RSTB|RF_SDMRSTB);
+
+ rtw_write8(Adapter, REG_SYS_FUNC_EN, FEN_USBA | FEN_USBD | FEN_BB_GLB_RSTn | FEN_BBRSTB);
+
+ /* Config BB and AGC */
+ rtStatus = phy_BB8188E_Config_ParaFile(Adapter);
+
+ /* write 0x24[16:11] = 0x24[22:17] = CrystalCap */
+ CrystalCap = pHalData->CrystalCap & 0x3F;
+ PHY_SetBBReg(Adapter, REG_AFE_XTAL_CTRL, 0x7ff800, (CrystalCap | (CrystalCap << 6)));
+
+ return rtStatus;
+}
+
+int PHY_RFConfig8188E(struct adapter *Adapter)
+{
+ int rtStatus = _SUCCESS;
+
+ /* RF config */
+ rtStatus = PHY_RF6052_Config8188E(Adapter);
+ return rtStatus;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_ConfigRFWithParaFile()
+ *
+ * Overview: This function read RF parameters from general file format, and do RF 3-wire
+ *
+ * Input: struct adapter *Adapter
+ * ps8 pFileName
+ * enum rf_radio_path eRFPath
+ *
+ * Output: NONE
+ *
+ * Return: RT_STATUS_SUCCESS: configuration file exist
+ *
+ * Note: Delay may be required for RF configuration
+ *---------------------------------------------------------------------------*/
+int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *Adapter, u8 *pFileName, enum rf_radio_path eRFPath)
+{
+ return _SUCCESS;
+}
+
+void
+rtl8192c_PHY_GetHWRegOriginalValue(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ /* read rx initial gain */
+ pHalData->DefaultInitialGain[0] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[1] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[2] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XCAGCCore1, bMaskByte0);
+ pHalData->DefaultInitialGain[3] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XDAGCCore1, bMaskByte0);
+
+ /* read framesync */
+ pHalData->framesync = (u8)PHY_QueryBBReg(Adapter, rOFDM0_RxDetector3, bMaskByte0);
+ pHalData->framesyncC34 = PHY_QueryBBReg(Adapter, rOFDM0_RxDetector2, bMaskDWord);
+}
+
+/* */
+/* Description: */
+/* Map dBm into Tx power index according to */
+/* current HW model, for example, RF and PA, and */
+/* current wireless mode. */
+/* By Bruce, 2008-01-29. */
+/* */
+static u8 phy_DbmToTxPwrIdx(struct adapter *Adapter, enum wireless_mode WirelessMode, int PowerInDbm)
+{
+ u8 TxPwrIdx = 0;
+ int Offset = 0;
+
+
+ /* */
+ /* Tested by MP, we found that CCK Index 0 equals to 8dbm, OFDM legacy equals to */
+ /* 3dbm, and OFDM HT equals to 0dbm repectively. */
+ /* Note: */
+ /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
+ /* By Bruce, 2008-01-29. */
+ /* */
+ switch (WirelessMode) {
+ case WIRELESS_MODE_B:
+ Offset = -7;
+ break;
+
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ Offset = -8;
+ break;
+ }
+
+ if ((PowerInDbm - Offset) > 0)
+ TxPwrIdx = (u8)((PowerInDbm - Offset) * 2);
+ else
+ TxPwrIdx = 0;
+
+ /* Tx Power Index is too large. */
+ if (TxPwrIdx > MAX_TXPWR_IDX_NMODE_92S)
+ TxPwrIdx = MAX_TXPWR_IDX_NMODE_92S;
+
+ return TxPwrIdx;
+}
+
+/* */
+/* Description: */
+/* Map Tx power index into dBm according to */
+/* current HW model, for example, RF and PA, and */
+/* current wireless mode. */
+/* By Bruce, 2008-01-29. */
+/* */
+static int phy_TxPwrIdxToDbm(struct adapter *Adapter, enum wireless_mode WirelessMode, u8 TxPwrIdx)
+{
+ int Offset = 0;
+ int PwrOutDbm = 0;
+
+ /* */
+ /* Tested by MP, we found that CCK Index 0 equals to -7dbm, OFDM legacy equals to -8dbm. */
+ /* Note: */
+ /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
+ /* By Bruce, 2008-01-29. */
+ /* */
+ switch (WirelessMode) {
+ case WIRELESS_MODE_B:
+ Offset = -7;
+ break;
+ case WIRELESS_MODE_G:
+ case WIRELESS_MODE_N_24G:
+ default:
+ Offset = -8;
+ break;
+ }
+
+ PwrOutDbm = TxPwrIdx / 2 + Offset; /* Discard the decimal part. */
+
+ return PwrOutDbm;
+}
+
+
+/*-----------------------------------------------------------------------------
+ * Function: GetTxPowerLevel8190()
+ *
+ * Overview: This function is export to "common" moudule
+ *
+ * Input: struct adapter *Adapter
+ * psByte Power Level
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ *---------------------------------------------------------------------------*/
+void PHY_GetTxPowerLevel8188E(struct adapter *Adapter, u32 *powerlevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 TxPwrLevel = 0;
+ int TxPwrDbm;
+
+ /* */
+ /* Because the Tx power indexes are different, we report the maximum of them to */
+ /* meet the CCX TPC request. By Bruce, 2008-01-31. */
+ /* */
+
+ /* CCK */
+ TxPwrLevel = pHalData->CurrentCckTxPwrIdx;
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_B, TxPwrLevel);
+
+ /* Legacy OFDM */
+ TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx + pHalData->LegacyHTTxPowerDiff;
+
+ /* Compare with Legacy OFDM Tx power. */
+ if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel) > TxPwrDbm)
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel);
+
+ /* HT OFDM */
+ TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx;
+
+ /* Compare with HT OFDM Tx power. */
+ if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel) > TxPwrDbm)
+ TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel);
+
+ *powerlevel = TxPwrDbm;
+}
+
+static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
+ u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
+ u8 *BW40PowerLevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 index = (channel - 1);
+ u8 TxCount = 0, path_nums;
+
+ if ((RF_1T2R == pHalData->rf_type) || (RF_1T1R == pHalData->rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (TxCount = 0; TxCount < path_nums; TxCount++) {
+ if (TxCount == RF_PATH_A) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->OFDM_24G_Diff[TxCount][RF_PATH_A];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][RF_PATH_A];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_B) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[TxCount][RF_PATH_A]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_C) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ } else if (TxCount == RF_PATH_D) {
+ /* 1. CCK */
+ cckPowerLevel[TxCount] = pHalData->Index24G_CCK_Base[TxCount][index];
+ /* 2. OFDM */
+ ofdmPowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_C][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+
+ /* 1. BW20 */
+ BW20PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_A][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_B][index]+
+ pHalData->BW20_24G_Diff[RF_PATH_C][index]+
+ pHalData->BW20_24G_Diff[TxCount][index];
+
+ /* 2. BW40 */
+ BW40PowerLevel[TxCount] = pHalData->Index24G_BW40_Base[TxCount][index];
+ }
+ }
+}
+
+static void phy_PowerIndexCheck88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
+ u8 *ofdmPowerLevel, u8 *BW20PowerLevel, u8 *BW40PowerLevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ pHalData->CurrentCckTxPwrIdx = cckPowerLevel[0];
+ pHalData->CurrentOfdm24GTxPwrIdx = ofdmPowerLevel[0];
+ pHalData->CurrentBW2024GTxPwrIdx = BW20PowerLevel[0];
+ pHalData->CurrentBW4024GTxPwrIdx = BW40PowerLevel[0];
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: SetTxPowerLevel8190()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ * We must consider RF path later!!!!!!!
+ *
+ * Input: struct adapter *Adapter
+ * u8 channel
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ * 2008/11/04 MHC We remove EEPROM_93C56.
+ * We need to move CCX relative code to independet file.
+ * 2009/01/21 MHC Support new EEPROM format from SD3 requirement.
+ *
+ *---------------------------------------------------------------------------*/
+void
+PHY_SetTxPowerLevel8188E(
+ struct adapter *Adapter,
+ u8 channel
+ )
+{
+ u8 cckPowerLevel[MAX_TX_COUNT] = {0};
+ u8 ofdmPowerLevel[MAX_TX_COUNT] = {0};/* [0]:RF-A, [1]:RF-B */
+ u8 BW20PowerLevel[MAX_TX_COUNT] = {0};
+ u8 BW40PowerLevel[MAX_TX_COUNT] = {0};
+
+ getTxPowerIndex88E(Adapter, channel, &cckPowerLevel[0], &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0]);
+
+ phy_PowerIndexCheck88E(Adapter, channel, &cckPowerLevel[0], &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0]);
+
+ rtl8188e_PHY_RF6052SetCckTxPower(Adapter, &cckPowerLevel[0]);
+ rtl8188e_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0], channel);
+}
+
+/* */
+/* Description: */
+/* Update transmit power level of all channel supported. */
+/* */
+/* TODO: */
+/* A mode. */
+/* By Bruce, 2008-02-04. */
+/* */
+bool
+PHY_UpdateTxPowerDbm8188E(
+ struct adapter *Adapter,
+ int powerInDbm
+ )
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 idx;
+ u8 rf_path;
+
+ /* TODO: A mode Tx power. */
+ u8 CckTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_B, powerInDbm);
+ u8 OfdmTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_N_24G, powerInDbm);
+
+ if (OfdmTxPwrIdx - pHalData->LegacyHTTxPowerDiff > 0)
+ OfdmTxPwrIdx -= pHalData->LegacyHTTxPowerDiff;
+ else
+ OfdmTxPwrIdx = 0;
+
+ for (idx = 0; idx < 14; idx++) {
+ for (rf_path = 0; rf_path < 2; rf_path++) {
+ pHalData->TxPwrLevelCck[rf_path][idx] = CckTxPwrIdx;
+ pHalData->TxPwrLevelHT40_1S[rf_path][idx] =
+ pHalData->TxPwrLevelHT40_2S[rf_path][idx] = OfdmTxPwrIdx;
+ }
+ }
+ return true;
+}
+
+void
+PHY_ScanOperationBackup8188E(
+ struct adapter *Adapter,
+ u8 Operation
+ )
+{
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_SetBWModeCallback8192C()
+ *
+ * Overview: Timer callback function for SetSetBWMode
+ *
+ * Input: PRT_TIMER pTimer
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: (1) We do not take j mode into consideration now
+ * (2) Will two workitem of "switch channel" and "switch channel bandwidth" run
+ * concurrently?
+ *---------------------------------------------------------------------------*/
+static void
+_PHY_SetBWMode92C(
+ struct adapter *Adapter
+)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 regBwOpMode;
+ u8 regRRSR_RSC;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
+ return;
+
+ /* There is no 40MHz mode in RF_8225. */
+ if (pHalData->rf_chip == RF_8225)
+ return;
+
+ if (Adapter->bDriverStopped)
+ return;
+
+ /* 3 */
+ /* 3<1>Set MAC register */
+ /* 3 */
+
+ regBwOpMode = rtw_read8(Adapter, REG_BWOPMODE);
+ regRRSR_RSC = rtw_read8(Adapter, REG_RRSR+2);
+
+ switch (pHalData->CurrentChannelBW) {
+ case HT_CHANNEL_WIDTH_20:
+ regBwOpMode |= BW_OPMODE_20MHZ;
+ /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ regBwOpMode &= ~BW_OPMODE_20MHZ;
+ /* 2007/02/07 Mark by Emily becasue we have not verify whether this register works */
+ rtw_write8(Adapter, REG_BWOPMODE, regBwOpMode);
+ regRRSR_RSC = (regRRSR_RSC&0x90) | (pHalData->nCur40MhzPrimeSC<<5);
+ rtw_write8(Adapter, REG_RRSR+2, regRRSR_RSC);
+ break;
+ default:
+ break;
+ }
+
+ /* 3 */
+ /* 3 <2>Set PHY related register */
+ /* 3 */
+ switch (pHalData->CurrentChannelBW) {
+ /* 20 MHz channel*/
+ case HT_CHANNEL_WIDTH_20:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x0);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x0);
+ break;
+ /* 40 MHz channel*/
+ case HT_CHANNEL_WIDTH_40:
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bRFMOD, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA1_RFMOD, bRFMOD, 0x1);
+ /* Set Control channel to upper or lower. These settings are required only for 40MHz */
+ PHY_SetBBReg(Adapter, rCCK0_System, bCCKSideBand, (pHalData->nCur40MhzPrimeSC>>1));
+ PHY_SetBBReg(Adapter, rOFDM1_LSTF, 0xC00, pHalData->nCur40MhzPrimeSC);
+ PHY_SetBBReg(Adapter, 0x818, (BIT26 | BIT27),
+ (pHalData->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
+ break;
+ default:
+ break;
+ }
+ /* Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 */
+
+ /* 3<3>Set RF related register */
+ switch (pHalData->rf_chip) {
+ case RF_8225:
+ break;
+ case RF_8256:
+ /* Please implement this function in Hal8190PciPhy8256.c */
+ break;
+ case RF_8258:
+ /* Please implement this function in Hal8190PciPhy8258.c */
+ break;
+ case RF_PSEUDO_11N:
+ break;
+ case RF_6052:
+ rtl8188e_PHY_RF6052SetBandwidth(Adapter, pHalData->CurrentChannelBW);
+ break;
+ default:
+ break;
+ }
+}
+
+ /*-----------------------------------------------------------------------------
+ * Function: SetBWMode8190Pci()
+ *
+ * Overview: This function is export to "HalCommon" moudule
+ *
+ * Input: struct adapter *Adapter
+ * enum ht_channel_width Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: We do not take j mode into consideration now
+ *---------------------------------------------------------------------------*/
+void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth, /* 20M or 40M */
+ unsigned char Offset) /* Upper, Lower, or Don't care */
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ enum ht_channel_width tmpBW = pHalData->CurrentChannelBW;
+
+ pHalData->CurrentChannelBW = Bandwidth;
+
+ pHalData->nCur40MhzPrimeSC = Offset;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved))
+ _PHY_SetBWMode92C(Adapter);
+ else
+ pHalData->CurrentChannelBW = tmpBW;
+}
+
+static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
+{
+ u8 eRFPath;
+ u32 param1, param2;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ if (Adapter->bNotifyChannelChange)
+ DBG_88E("[%s] ch = %d\n", __func__, channel);
+
+ /* s1. pre common command - CmdID_SetTxPowerLevel */
+ PHY_SetTxPowerLevel8188E(Adapter, channel);
+
+ /* s2. RF dependent command - CmdID_RF_WriteReg, param1=RF_CHNLBW, param2=channel */
+ param1 = RF_CHNLBW;
+ param2 = channel;
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+ pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
+ PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
+ }
+}
+
+void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
+{
+ /* Call after initialization */
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u8 tmpchannel = pHalData->CurrentChannel;
+ bool bResult = true;
+
+ if (pHalData->rf_chip == RF_PSEUDO_11N)
+ return; /* return immediately if it is peudo-phy */
+
+ if (channel == 0)
+ channel = 1;
+
+ pHalData->CurrentChannel = channel;
+
+ if ((!Adapter->bDriverStopped) && (!Adapter->bSurpriseRemoved)) {
+ _PHY_SwChnl8192C(Adapter, channel);
+
+ if (bResult)
+ ;
+ else
+ pHalData->CurrentChannel = tmpchannel;
+
+ } else {
+ pHalData->CurrentChannel = tmpchannel;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
new file mode 100644
index 00000000000..bfdf9b3ce77
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rf6052.c
@@ -0,0 +1,572 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/******************************************************************************
+ *
+ *
+ * Module: rtl8192c_rf6052.c ( Source C File)
+ *
+ * Note: Provide RF 6052 series relative API.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ *
+ * 09/25/2008 MHC Create initial version.
+ * 11/05/2008 MHC Add API for tw power setting.
+ *
+ *
+******************************************************************************/
+
+#define _RTL8188E_RF6052_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <rtl8188e_hal.h>
+
+/*---------------------------Define Local Constant---------------------------*/
+/* Define local structure for debug!!!!! */
+struct rf_shadow {
+ /* Shadow register value */
+ u32 Value;
+ /* Compare or not flag */
+ u8 Compare;
+ /* Record If it had ever modified unpredicted */
+ u8 ErrorOrNot;
+ /* Recorver Flag */
+ u8 Recorver;
+ /* */
+ u8 Driver_Write;
+};
+
+/*---------------------------Define Local Constant---------------------------*/
+
+
+/*------------------------Define global variable-----------------------------*/
+
+/*------------------------Define local variable------------------------------*/
+
+/*-----------------------------------------------------------------------------
+ * Function: RF_ChangeTxPath
+ *
+ * Overview: For RL6052, we must change some RF settign for 1T or 2T.
+ *
+ * Input: u16 DataRate 0x80-8f, 0x90-9f
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 09/25/2008 MHC Create Version 0.
+ * Firmwaer support the utility later.
+ *
+ *---------------------------------------------------------------------------*/
+void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate)
+{
+/* We do not support gain table change inACUT now !!!! Delete later !!! */
+} /* RF_ChangeTxPath */
+
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetBandwidth()
+ *
+ * Overview: This function is called by SetBWModeCallback8190Pci() only
+ *
+ * Input: struct adapter *Adapter
+ * WIRELESS_BANDWIDTH_E Bandwidth 20M or 40M
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Note: For RF type 0222D
+ *---------------------------------------------------------------------------*/
+void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
+ enum ht_channel_width Bandwidth)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+
+ switch (Bandwidth) {
+ case HT_CHANNEL_WIDTH_20:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10) | BIT(11));
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ case HT_CHANNEL_WIDTH_40:
+ pHalData->RfRegChnlVal[0] = ((pHalData->RfRegChnlVal[0] & 0xfffff3ff) | BIT(10));
+ PHY_SetRFReg(Adapter, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, pHalData->RfRegChnlVal[0]);
+ break;
+ default:
+ break;
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetCckTxPower
+ *
+ * Overview:
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192series..
+ *
+ *---------------------------------------------------------------------------*/
+
+void
+rtl8188e_PHY_RF6052SetCckTxPower(
+ struct adapter *Adapter,
+ u8 *pPowerlevel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
+ bool TurboScanOff = false;
+ u8 idx1, idx2;
+ u8 *ptr;
+ u8 direction;
+ /* FOR CE ,must disable turbo scan */
+ TurboScanOff = true;
+
+
+ if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
+ TxAGC[RF_PATH_A] = 0x3f3f3f3f;
+ TxAGC[RF_PATH_B] = 0x3f3f3f3f;
+
+ TurboScanOff = true;/* disable turbo scan */
+
+ if (TurboScanOff) {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ /* 2010/10/18 MH For external PA module. We need to limit power index to be less than 0x20. */
+ if (TxAGC[idx1] > 0x20 && pHalData->ExternalPA)
+ TxAGC[idx1] = 0x20;
+ }
+ }
+ } else {
+ /* Driver dynamic Tx power shall not affect Tx power.
+ * It shall be determined by power training mechanism.
+i * Currently, we cannot fully disable driver dynamic
+ * tx power mechanism because it is referenced by BT
+ * coexist mechanism.
+ * In the future, two mechanism shall be separated from
+ * each other and maintained independantly. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
+ TxAGC[RF_PATH_A] = 0x10101010;
+ TxAGC[RF_PATH_B] = 0x10101010;
+ } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
+ TxAGC[RF_PATH_A] = 0x00000000;
+ TxAGC[RF_PATH_B] = 0x00000000;
+ } else {
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1]<<8) |
+ (pPowerlevel[idx1]<<16) | (pPowerlevel[idx1]<<24);
+ }
+ if (pHalData->EEPROMRegulatory == 0) {
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][7]<<8);
+ TxAGC[RF_PATH_A] += tmpval;
+
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][15]<<24);
+ TxAGC[RF_PATH_B] += tmpval;
+ }
+ }
+ }
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ ptr = (u8 *)(&(TxAGC[idx1]));
+ for (idx2 = 0; idx2 < 4; idx2++) {
+ if (*ptr > RF6052_MAX_TX_PWR)
+ *ptr = RF6052_MAX_TX_PWR;
+ ptr++;
+ }
+ }
+ ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 1, &direction, &pwrtrac_value);
+
+ if (direction == 1) {
+ /* Increase TX pwoer */
+ TxAGC[0] += pwrtrac_value;
+ TxAGC[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ /* Decrease TX pwoer */
+ TxAGC[0] -= pwrtrac_value;
+ TxAGC[1] -= pwrtrac_value;
+ }
+
+ /* rf-A cck tx power */
+ tmpval = TxAGC[RF_PATH_A]&0xff;
+ PHY_SetBBReg(Adapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, tmpval);
+ tmpval = TxAGC[RF_PATH_A]>>8;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
+
+ /* rf-B cck tx power */
+ tmpval = TxAGC[RF_PATH_B]>>24;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, tmpval);
+ tmpval = TxAGC[RF_PATH_B]&0x00ffffff;
+ PHY_SetBBReg(Adapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
+} /* PHY_RF6052SetCckTxPower */
+
+/* */
+/* powerbase0 for OFDM rates */
+/* powerbase1 for HT MCS rates */
+/* */
+static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
+ u8 *pPowerLevelBW20, u8 *pPowerLevelBW40, u8 Channel, u32 *OfdmBase, u32 *MCSBase)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 powerBase0, powerBase1;
+ u8 i, powerlevel[2];
+
+ for (i = 0; i < 2; i++) {
+ powerBase0 = pPowerLevelOFDM[i];
+
+ powerBase0 = (powerBase0<<24) | (powerBase0<<16) | (powerBase0<<8) | powerBase0;
+ *(OfdmBase+i) = powerBase0;
+ }
+ for (i = 0; i < pHalData->NumTotalRFPath; i++) {
+ /* Check HT20 to HT40 diff */
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ powerlevel[i] = pPowerLevelBW20[i];
+ else
+ powerlevel[i] = pPowerLevelBW40[i];
+ powerBase1 = powerlevel[i];
+ powerBase1 = (powerBase1<<24) | (powerBase1<<16) | (powerBase1<<8) | powerBase1;
+ *(MCSBase+i) = powerBase1;
+ }
+}
+static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
+ u8 index, u32 *powerBase0, u32 *powerBase1,
+ u32 *pOutWriteVal)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &pHalData->dmpriv;
+ u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
+ s8 pwr_diff = 0;
+ u32 writeVal, customer_limit, rf;
+ u8 Regulatory = pHalData->EEPROMRegulatory;
+
+ /* Index 0 & 1= legacy OFDM, 2-5=HT_MCS rate */
+
+ for (rf = 0; rf < 2; rf++) {
+ switch (Regulatory) {
+ case 0: /* Realtek better performance */
+ /* increase power diff defined by Realtek for large power */
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 1: /* Realtek regulatory */
+ /* increase power diff defined by Realtek for regulatory */
+ if (pHalData->pwrGroupCnt == 1)
+ chnlGroup = 0;
+ if (pHalData->pwrGroupCnt >= pHalData->PGMaxGroup) {
+ if (Channel < 3) /* Chanel 1-2 */
+ chnlGroup = 0;
+ else if (Channel < 6) /* Channel 3-5 */
+ chnlGroup = 1;
+ else if (Channel < 9) /* Channel 6-8 */
+ chnlGroup = 2;
+ else if (Channel < 12) /* Channel 9-11 */
+ chnlGroup = 3;
+ else if (Channel < 14) /* Channel 12-13 */
+ chnlGroup = 4;
+ else if (Channel == 14) /* Channel 14 */
+ chnlGroup = 5;
+ }
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 2: /* Better regulatory */
+ /* don't increase any power diff */
+ writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ case 3: /* Customer defined power diff. */
+ /* increase power diff defined by customer. */
+ chnlGroup = 0;
+
+ if (index < 2)
+ pwr_diff = pHalData->TxPwrLegacyHtDiff[rf][Channel-1];
+ else if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ pwr_diff = pHalData->TxPwrHt20Diff[rf][Channel-1];
+
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_40)
+ customer_pwr_limit = pHalData->PwrGroupHT40[rf][Channel-1];
+ else
+ customer_pwr_limit = pHalData->PwrGroupHT20[rf][Channel-1];
+
+ if (pwr_diff >= customer_pwr_limit)
+ pwr_diff = 0;
+ else
+ pwr_diff = customer_pwr_limit - pwr_diff;
+
+ for (i = 0; i < 4; i++) {
+ pwr_diff_limit[i] = (u8)((pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)]&(0x7f<<(i*8)))>>(i*8));
+
+ if (pwr_diff_limit[i] > pwr_diff)
+ pwr_diff_limit[i] = pwr_diff;
+ }
+ customer_limit = (pwr_diff_limit[3]<<24) | (pwr_diff_limit[2]<<16) |
+ (pwr_diff_limit[1]<<8) | (pwr_diff_limit[0]);
+ writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ default:
+ chnlGroup = 0;
+ writeVal = pHalData->MCSTxPowerLevelOriginalOffset[chnlGroup][index+(rf ? 8 : 0)] +
+ ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
+ break;
+ }
+/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
+/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
+/* In the future, two mechanism shall be separated from each other and maintained independantly. Thanks for Lanhsin's reminder. */
+ /* 92d do not need this */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
+ writeVal = 0x14141414;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
+ writeVal = 0x00000000;
+
+ /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
+ /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
+ if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
+ writeVal = writeVal - 0x06060606;
+ else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT2)
+ writeVal = writeVal;
+ *(pOutWriteVal+rf) = writeVal;
+ }
+}
+static void writeOFDMPowerReg88E(struct adapter *Adapter, u8 index, u32 *pValue)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u16 regoffset_a[6] = {
+ rTxAGC_A_Rate18_06, rTxAGC_A_Rate54_24,
+ rTxAGC_A_Mcs03_Mcs00, rTxAGC_A_Mcs07_Mcs04,
+ rTxAGC_A_Mcs11_Mcs08, rTxAGC_A_Mcs15_Mcs12};
+ u16 regoffset_b[6] = {
+ rTxAGC_B_Rate18_06, rTxAGC_B_Rate54_24,
+ rTxAGC_B_Mcs03_Mcs00, rTxAGC_B_Mcs07_Mcs04,
+ rTxAGC_B_Mcs11_Mcs08, rTxAGC_B_Mcs15_Mcs12};
+ u8 i, rf, pwr_val[4];
+ u32 writeVal;
+ u16 regoffset;
+
+ for (rf = 0; rf < 2; rf++) {
+ writeVal = pValue[rf];
+ for (i = 0; i < 4; i++) {
+ pwr_val[i] = (u8)((writeVal & (0x7f<<(i*8)))>>(i*8));
+ if (pwr_val[i] > RF6052_MAX_TX_PWR)
+ pwr_val[i] = RF6052_MAX_TX_PWR;
+ }
+ writeVal = (pwr_val[3]<<24) | (pwr_val[2]<<16) | (pwr_val[1]<<8) | pwr_val[0];
+
+ if (rf == 0)
+ regoffset = regoffset_a[index];
+ else
+ regoffset = regoffset_b[index];
+
+ PHY_SetBBReg(Adapter, regoffset, bMaskDWord, writeVal);
+
+ /* 201005115 Joseph: Set Tx Power diff for Tx power training mechanism. */
+ if (((pHalData->rf_type == RF_2T2R) &&
+ (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs15_Mcs12)) ||
+ ((pHalData->rf_type != RF_2T2R) &&
+ (regoffset == rTxAGC_A_Mcs07_Mcs04 || regoffset == rTxAGC_B_Mcs07_Mcs04))) {
+ writeVal = pwr_val[3];
+ if (regoffset == rTxAGC_A_Mcs15_Mcs12 || regoffset == rTxAGC_A_Mcs07_Mcs04)
+ regoffset = 0xc90;
+ if (regoffset == rTxAGC_B_Mcs15_Mcs12 || regoffset == rTxAGC_B_Mcs07_Mcs04)
+ regoffset = 0xc98;
+ for (i = 0; i < 3; i++) {
+ if (i != 2)
+ writeVal = (writeVal > 8) ? (writeVal-8) : 0;
+ else
+ writeVal = (writeVal > 6) ? (writeVal-6) : 0;
+ rtw_write8(Adapter, (u32)(regoffset+i), (u8)writeVal);
+ }
+ }
+ }
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: PHY_RF6052SetOFDMTxPower
+ *
+ * Overview: For legacy and HY OFDM, we must read EEPROM TX power index for
+ * different channel and read original value in TX power register area from
+ * 0xe00. We increase offset and original value to be correct tx pwr.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 11/05/2008 MHC Simulate 8192 series method.
+ * 01/06/2009 MHC 1. Prevent Path B tx power overflow or underflow dure to
+ * A/B pwr difference or legacy/HT pwr diff.
+ * 2. We concern with path B legacy/HT OFDM difference.
+ * 01/22/2009 MHC Support new EPRO format from SD3.
+ *
+ *---------------------------------------------------------------------------*/
+
+void
+rtl8188e_PHY_RF6052SetOFDMTxPower(
+ struct adapter *Adapter,
+ u8 *pPowerLevelOFDM,
+ u8 *pPowerLevelBW20,
+ u8 *pPowerLevelBW40,
+ u8 Channel)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 writeVal[2], powerBase0[2], powerBase1[2], pwrtrac_value;
+ u8 direction;
+ u8 index = 0;
+
+ getpowerbase88e(Adapter, pPowerLevelOFDM, pPowerLevelBW20, pPowerLevelBW40, Channel, &powerBase0[0], &powerBase1[0]);
+
+ /* 2012/04/23 MH According to power tracking value, we need to revise OFDM tx power. */
+ /* This is ued to fix unstable power tracking mode. */
+ ODM_TxPwrTrackAdjust88E(&pHalData->odmpriv, 0, &direction, &pwrtrac_value);
+
+ for (index = 0; index < 6; index++) {
+ get_rx_power_val_by_reg(Adapter, Channel, index,
+ &powerBase0[0], &powerBase1[0],
+ &writeVal[0]);
+
+ if (direction == 1) {
+ writeVal[0] += pwrtrac_value;
+ writeVal[1] += pwrtrac_value;
+ } else if (direction == 2) {
+ writeVal[0] -= pwrtrac_value;
+ writeVal[1] -= pwrtrac_value;
+ }
+ writeOFDMPowerReg88E(Adapter, index, &writeVal[0]);
+ }
+}
+
+static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
+{
+ struct bb_reg_def *pPhyReg;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ u32 u4RegValue = 0;
+ u8 eRFPath;
+ int rtStatus = _SUCCESS;
+
+ /* 3----------------------------------------------------------------- */
+ /* 3 <2> Initialize RF */
+ /* 3----------------------------------------------------------------- */
+ for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
+ pPhyReg = &pHalData->PHYRegDef[eRFPath];
+
+ /*----Store original RFENV control type----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16);
+ break;
+ }
+ /*----Set RF_ENV enable----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1);
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /*----Set RF_ENV output high----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /* Set bit number of Address and Data for RF register */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ rtw_udelay_os(1);/* PlatformStallExecution(1); */
+
+ /*----Initialize RF fom connfiguration file----*/
+ switch (eRFPath) {
+ case RF_PATH_A:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ case RF_PATH_B:
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum ODM_RF_RADIO_PATH)eRFPath, (enum ODM_RF_RADIO_PATH)eRFPath))
+ rtStatus = _FAIL;
+ break;
+ case RF_PATH_C:
+ break;
+ case RF_PATH_D:
+ break;
+ }
+ /*----Restore RFENV control type----*/;
+ switch (eRFPath) {
+ case RF_PATH_A:
+ case RF_PATH_C:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+ break;
+ case RF_PATH_B:
+ case RF_PATH_D:
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue);
+ break;
+ }
+ if (rtStatus != _SUCCESS)
+ goto phy_RF6052_Config_ParaFile_Fail;
+ }
+ return rtStatus;
+
+phy_RF6052_Config_ParaFile_Fail:
+ return rtStatus;
+}
+
+int PHY_RF6052_Config8188E(struct adapter *Adapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
+ int rtStatus = _SUCCESS;
+
+ /* */
+ /* Initialize general global value */
+ /* */
+ /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
+ if (pHalData->rf_type == RF_1T1R)
+ pHalData->NumTotalRFPath = 1;
+ else
+ pHalData->NumTotalRFPath = 2;
+
+ /* */
+ /* Config BB and RF */
+ /* */
+ rtStatus = phy_RF6052_Config_ParaFile(Adapter);
+ return rtStatus;
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
new file mode 100644
index 00000000000..05e2475cfd6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -0,0 +1,202 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_REDESC_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+
+static void process_rssi(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib = &prframe->u.hdr.attrib;
+ struct signal_stat *signal_stat = &padapter->recvpriv.signal_strength_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalStrength;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+} /* Process_UI_RSSI_8192C */
+
+static void process_link_qual(struct adapter *padapter, union recv_frame *prframe)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct signal_stat *signal_stat;
+
+ if (prframe == NULL || padapter == NULL)
+ return;
+
+ pattrib = &prframe->u.hdr.attrib;
+ signal_stat = &padapter->recvpriv.signal_qual_data;
+
+ if (signal_stat->update_req) {
+ signal_stat->total_num = 0;
+ signal_stat->total_val = 0;
+ signal_stat->update_req = 0;
+ }
+
+ signal_stat->total_num++;
+ signal_stat->total_val += pattrib->phy_info.SignalQuality;
+ signal_stat->avg_val = signal_stat->total_val / signal_stat->total_num;
+}
+
+void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
+{
+ union recv_frame *precvframe = (union recv_frame *)prframe;
+
+ /* Check RSSI */
+ process_rssi(padapter, precvframe);
+ /* Check EVM */
+ process_link_qual(padapter, precvframe);
+}
+
+void update_recvframe_attrib_88e(union recv_frame *precvframe, struct recv_stat *prxstat)
+{
+ struct rx_pkt_attrib *pattrib;
+ struct recv_stat report;
+
+ report.rxdw0 = prxstat->rxdw0;
+ report.rxdw1 = prxstat->rxdw1;
+ report.rxdw2 = prxstat->rxdw2;
+ report.rxdw3 = prxstat->rxdw3;
+ report.rxdw4 = prxstat->rxdw4;
+ report.rxdw5 = prxstat->rxdw5;
+
+ pattrib = &precvframe->u.hdr.attrib;
+ _rtw_memset(pattrib, 0, sizeof(struct rx_pkt_attrib));
+
+ pattrib->crc_err = (u8)((le32_to_cpu(report.rxdw0) >> 14) & 0x1);;/* u8)prxreport->crc32; */
+
+ /* update rx report to recv_frame attribute */
+ pattrib->pkt_rpt_type = (u8)((le32_to_cpu(report.rxdw3) >> 14) & 0x3);/* prxreport->rpt_sel; */
+
+ if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
+ pattrib->drvinfo_sz = (u8)((le32_to_cpu(report.rxdw0) >> 16) & 0xf) * 8;/* u8)(prxreport->drvinfosize << 3); */
+
+ pattrib->physt = (u8)((le32_to_cpu(report.rxdw0) >> 26) & 0x1);/* u8)prxreport->physt; */
+
+ pattrib->bdecrypted = (le32_to_cpu(report.rxdw0) & BIT(27)) ? 0 : 1;/* u8)(prxreport->swdec ? 0 : 1); */
+ pattrib->encrypt = (u8)((le32_to_cpu(report.rxdw0) >> 20) & 0x7);/* u8)prxreport->security; */
+
+ pattrib->qos = (u8)((le32_to_cpu(report.rxdw0) >> 23) & 0x1);/* u8)prxreport->qos; */
+ pattrib->priority = (u8)((le32_to_cpu(report.rxdw1) >> 8) & 0xf);/* u8)prxreport->tid; */
+
+ pattrib->amsdu = (u8)((le32_to_cpu(report.rxdw1) >> 13) & 0x1);/* u8)prxreport->amsdu; */
+
+ pattrib->seq_num = (u16)(le32_to_cpu(report.rxdw2) & 0x00000fff);/* u16)prxreport->seq; */
+ pattrib->frag_num = (u8)((le32_to_cpu(report.rxdw2) >> 12) & 0xf);/* u8)prxreport->frag; */
+ pattrib->mfrag = (u8)((le32_to_cpu(report.rxdw1) >> 27) & 0x1);/* u8)prxreport->mf; */
+ pattrib->mdata = (u8)((le32_to_cpu(report.rxdw1) >> 26) & 0x1);/* u8)prxreport->md; */
+
+ pattrib->mcs_rate = (u8)(le32_to_cpu(report.rxdw3) & 0x3f);/* u8)prxreport->rxmcs; */
+ pattrib->rxht = (u8)((le32_to_cpu(report.rxdw3) >> 6) & 0x1);/* u8)prxreport->rxht; */
+
+ pattrib->icv_err = (u8)((le32_to_cpu(report.rxdw0) >> 15) & 0x1);/* u8)prxreport->icverr; */
+ pattrib->shift_sz = (u8)((le32_to_cpu(report.rxdw0) >> 24) & 0x3);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT1) { /* CCX */
+ pattrib->pkt_len = TX_RPT1_PKT_LEN;
+ pattrib->drvinfo_sz = 0;
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) { /* TX RPT */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x3FF);/* Rx length[9:0] */
+ pattrib->drvinfo_sz = 0;
+
+ /* */
+ /* Get TX report MAC ID valid. */
+ /* */
+ pattrib->MacIDValidEntry[0] = le32_to_cpu(report.rxdw4);
+ pattrib->MacIDValidEntry[1] = le32_to_cpu(report.rxdw5);
+
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) { /* USB HISR RPT */
+ pattrib->pkt_len = (u16)(le32_to_cpu(report.rxdw0) & 0x00003fff);/* u16)prxreport->pktlen; */
+ }
+}
+
+/*
+ * Notice:
+ * Before calling this function,
+ * precvframe->u.hdr.rx_data should be ready!
+ */
+void update_recvframe_phyinfo_88e(union recv_frame *precvframe, struct phy_stat *pphy_status)
+{
+ struct adapter *padapter = precvframe->u.hdr.adapter;
+ struct rx_pkt_attrib *pattrib = &precvframe->u.hdr.attrib;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
+ u8 *wlanhdr;
+ struct odm_per_pkt_info pkt_info;
+ u8 *sa = NULL;
+ struct sta_priv *pstapriv;
+ struct sta_info *psta;
+
+ pkt_info.bPacketMatchBSSID = false;
+ pkt_info.bPacketToSelf = false;
+ pkt_info.bPacketBeacon = false;
+
+ wlanhdr = get_recvframe_data(precvframe);
+
+ pkt_info.bPacketMatchBSSID = ((!IsFrameTypeCtrl(wlanhdr)) &&
+ !pattrib->icv_err && !pattrib->crc_err &&
+ _rtw_memcmp(get_hdr_bssid(wlanhdr),
+ get_bssid(&padapter->mlmepriv), ETH_ALEN));
+
+ pkt_info.bPacketToSelf = pkt_info.bPacketMatchBSSID &&
+ (_rtw_memcmp(get_da(wlanhdr),
+ myid(&padapter->eeprompriv), ETH_ALEN));
+
+ pkt_info.bPacketBeacon = pkt_info.bPacketMatchBSSID &&
+ (GetFrameSubType(wlanhdr) == WIFI_BEACON);
+
+ if (pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE))
+ sa = padapter->mlmepriv.cur_network.network.MacAddress;
+ /* to do Ad-hoc */
+ } else {
+ sa = get_sa(wlanhdr);
+ }
+
+ pstapriv = &padapter->stapriv;
+ pkt_info.StationID = 0xFF;
+ psta = rtw_get_stainfo(pstapriv, sa);
+ if (psta)
+ pkt_info.StationID = psta->mac_id;
+ pkt_info.Rate = pattrib->mcs_rate;
+
+ ODM_PhyStatusQuery(&pHalData->odmpriv, pPHYInfo, (u8 *)pphy_status, &(pkt_info));
+
+ precvframe->u.hdr.psta = NULL;
+ if (pkt_info.bPacketMatchBSSID &&
+ (check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE))) {
+ if (psta) {
+ precvframe->u.hdr.psta = psta;
+ rtl8188e_process_phy_info(padapter, precvframe);
+ }
+ } else if (pkt_info.bPacketToSelf || pkt_info.bPacketBeacon) {
+ if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE)) {
+ if (psta)
+ precvframe->u.hdr.psta = psta;
+ }
+ rtl8188e_process_phy_info(padapter, precvframe);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
new file mode 100644
index 00000000000..96d698e1f33
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_sreset.c
@@ -0,0 +1,80 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_SRESET_C_
+
+#include <rtl8188e_sreset.h>
+#include <rtl8188e_hal.h>
+
+void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter)
+{
+}
+
+void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
+{
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+ struct sreset_priv *psrtpriv = &pHalData->srestpriv;
+
+ unsigned long current_time;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ unsigned int diff_time;
+ u32 txdma_status;
+
+ txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
+ if (txdma_status != 0x00) {
+ DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
+ rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
+ rtl8188e_silentreset_for_specific_platform(padapter);
+ }
+ /* total xmit irp = 4 */
+ current_time = rtw_get_current_time();
+ if (0 == pxmitpriv->free_xmitbuf_cnt) {
+ diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_time);
+
+ if (diff_time > 2000) {
+ if (psrtpriv->last_tx_complete_time == 0) {
+ psrtpriv->last_tx_complete_time = current_time;
+ } else {
+ diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_complete_time);
+ if (diff_time > 4000) {
+ DBG_88E("%s tx hang\n", __func__);
+ rtl8188e_silentreset_for_specific_platform(padapter);
+ }
+ }
+ }
+ }
+}
+
+void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
+{
+ u32 rx_dma_status = 0;
+ u8 fw_status = 0;
+ rx_dma_status = rtw_read32(padapter, REG_RXDMA_STATUS);
+ if (rx_dma_status != 0x00) {
+ DBG_88E("%s REG_RXDMA_STATUS:0x%08x\n", __func__, rx_dma_status);
+ rtw_write32(padapter, REG_RXDMA_STATUS, rx_dma_status);
+ }
+ fw_status = rtw_read8(padapter, REG_FMETHR);
+ if (fw_status != 0x00) {
+ if (fw_status == 1)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Read_Efuse_Fail !!\n", __func__, fw_status);
+ else if (fw_status == 2)
+ DBG_88E("%s REG_FW_STATUS (0x%02x), Condition_No_Match !!\n", __func__, fw_status);
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
new file mode 100644
index 00000000000..7ecbcf731ea
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -0,0 +1,91 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_XMIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+
+void dump_txrpt_ccx_88e(void *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ DBG_88E("%s:\n"
+ "tag1:%u, pkt_num:%u, txdma_underflow:%u, int_bt:%u, int_tri:%u, int_ccx:%u\n"
+ "mac_id:%u, pkt_ok:%u, bmc:%u\n"
+ "retry_cnt:%u, lifetime_over:%u, retry_over:%u\n"
+ "ccx_qtime:%u\n"
+ "final_data_rate:0x%02x\n"
+ "qsel:%u, sw:0x%03x\n",
+ __func__, txrpt_ccx->tag1, txrpt_ccx->pkt_num,
+ txrpt_ccx->txdma_underflow, txrpt_ccx->int_bt,
+ txrpt_ccx->int_tri, txrpt_ccx->int_ccx,
+ txrpt_ccx->mac_id, txrpt_ccx->pkt_ok, txrpt_ccx->bmc,
+ txrpt_ccx->retry_cnt, txrpt_ccx->lifetime_over,
+ txrpt_ccx->retry_over, txrpt_ccx_qtime_88e(txrpt_ccx),
+ txrpt_ccx->final_data_rate, txrpt_ccx->qsel,
+ txrpt_ccx_sw_88e(txrpt_ccx)
+ );
+}
+
+void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf)
+{
+ struct txrpt_ccx_88e *txrpt_ccx = (struct txrpt_ccx_88e *)buf;
+
+ if (txrpt_ccx->int_ccx) {
+ if (txrpt_ccx->pkt_ok)
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_SUCCESS);
+ else
+ rtw_ack_tx_done(&adapter->xmitpriv,
+ RTW_SCTX_DONE_CCX_PKT_FAIL);
+ }
+}
+
+void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
+ struct tx_desc *ptxdesc)
+{
+ u8 dmp_txpkt;
+ bool dump_txdesc = false;
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(dmp_txpkt));
+
+ if (dmp_txpkt == 1) {/* dump txdesc for data frame */
+ DBG_88E("dump tx_desc for data frame\n");
+ if ((frame_tag & 0x0f) == DATA_FRAMETAG)
+ dump_txdesc = true;
+ } else if (dmp_txpkt == 2) {/* dump txdesc for mgnt frame */
+ DBG_88E("dump tx_desc for mgnt frame\n");
+ if ((frame_tag & 0x0f) == MGNT_FRAMETAG)
+ dump_txdesc = true;
+ }
+
+ if (dump_txdesc) {
+ DBG_88E("=====================================\n");
+ DBG_88E("txdw0(0x%08x)\n", ptxdesc->txdw0);
+ DBG_88E("txdw1(0x%08x)\n", ptxdesc->txdw1);
+ DBG_88E("txdw2(0x%08x)\n", ptxdesc->txdw2);
+ DBG_88E("txdw3(0x%08x)\n", ptxdesc->txdw3);
+ DBG_88E("txdw4(0x%08x)\n", ptxdesc->txdw4);
+ DBG_88E("txdw5(0x%08x)\n", ptxdesc->txdw5);
+ DBG_88E("txdw6(0x%08x)\n", ptxdesc->txdw6);
+ DBG_88E("txdw7(0x%08x)\n", ptxdesc->txdw7);
+ DBG_88E("=====================================\n");
+ }
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
new file mode 100644
index 00000000000..08dfd94163e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtl8188e_hal.h>
+#include <rtl8188e_led.h>
+
+/* LED object. */
+
+/* LED_819xUsb routines. */
+/* Description: */
+/* Turn on LED according to LedPin specified. */
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed)
+{
+ u8 LedCfg;
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ return;
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);
+ switch (pLed->LedPin) {
+ case LED_PIN_LED0:
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0xf0)|BIT5|BIT6); /* SW control led0 on. */
+ break;
+ case LED_PIN_LED1:
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg&0x0f)|BIT5); /* SW control led1 on. */
+ break;
+ default:
+ break;
+ }
+ pLed->bLedOn = true;
+}
+
+/* Description: */
+/* Turn off LED according to LedPin specified. */
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed)
+{
+ u8 LedCfg;
+ struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
+ goto exit;
+
+ LedCfg = rtw_read8(padapter, REG_LEDCFG2);/* 0x4E */
+
+ switch (pLed->LedPin) {
+ case LED_PIN_LED0:
+ if (pHalData->bLedOpenDrain) {
+ /* Open-drain arrangement for controlling the LED) */
+ LedCfg &= 0x90; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3));
+ LedCfg = rtw_read8(padapter, REG_MAC_PINMUX_CFG);
+ LedCfg &= 0xFE;
+ rtw_write8(padapter, REG_MAC_PINMUX_CFG, LedCfg);
+ } else {
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3|BIT5|BIT6));
+ }
+ break;
+ case LED_PIN_LED1:
+ LedCfg &= 0x0f; /* Set to software control. */
+ rtw_write8(padapter, REG_LEDCFG2, (LedCfg|BIT3));
+ break;
+ default:
+ break;
+ }
+exit:
+ pLed->bLedOn = false;
+}
+
+/* Interface to manipulate LED objects. */
+/* Default LED behavior. */
+
+/* Description: */
+/* Initialize all LED_871x objects. */
+void rtl8188eu_InitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *pledpriv = &(padapter->ledpriv);
+
+ pledpriv->LedControlHandler = LedControl8188eu;
+
+ InitLed871x(padapter, &(pledpriv->SwLed0), LED_PIN_LED0);
+
+ InitLed871x(padapter, &(pledpriv->SwLed1), LED_PIN_LED1);
+}
+
+/* Description: */
+/* DeInitialize all LED_819xUsb objects. */
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter)
+{
+ struct led_priv *ledpriv = &(padapter->ledpriv);
+
+ DeInitLed871x(&(ledpriv->SwLed0));
+ DeInitLed871x(&(ledpriv->SwLed1));
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
new file mode 100644
index 00000000000..0f47b891859
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -0,0 +1,138 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188EU_RECV_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <mlme_osdep.h>
+#include <ip.h>
+#include <if_ether.h>
+#include <ethernet.h>
+
+#include <usb_ops.h>
+#include <wifi.h>
+
+#include <rtl8188e_hal.h>
+
+void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ precvbuf->transfer_len = 0;
+
+ precvbuf->len = 0;
+
+ precvbuf->ref_cnt = 0;
+
+ if (precvbuf->pbuf) {
+ precvbuf->pdata = precvbuf->pbuf;
+ precvbuf->phead = precvbuf->pbuf;
+ precvbuf->ptail = precvbuf->pbuf;
+ precvbuf->pend = precvbuf->pdata + MAX_RECVBUF_SZ;
+ }
+}
+
+int rtl8188eu_init_recv_priv(struct adapter *padapter)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+ int i, res = _SUCCESS;
+ struct recv_buf *precvbuf;
+
+ tasklet_init(&precvpriv->recv_tasklet,
+ (void(*)(unsigned long))rtl8188eu_recv_tasklet,
+ (unsigned long)padapter);
+
+ /* init recv_buf */
+ _rtw_init_queue(&precvpriv->free_recv_buf_queue);
+
+ precvpriv->pallocated_recv_buf = rtw_zmalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4);
+ if (precvpriv->pallocated_recv_buf == NULL) {
+ res = _FAIL;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("alloc recv_buf fail!\n"));
+ goto exit;
+ }
+ _rtw_memset(precvpriv->pallocated_recv_buf, 0, NR_RECVBUFF * sizeof(struct recv_buf) + 4);
+
+ precvpriv->precv_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(precvpriv->pallocated_recv_buf), 4);
+
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ _rtw_init_listhead(&precvbuf->list);
+ _rtw_spinlock_init(&precvbuf->recvbuf_lock);
+ precvbuf->alloc_sz = MAX_RECVBUF_SZ;
+ res = rtw_os_recvbuf_resource_alloc(padapter, precvbuf);
+ if (res == _FAIL)
+ break;
+ precvbuf->ref_cnt = 0;
+ precvbuf->adapter = padapter;
+ precvbuf++;
+ }
+ precvpriv->free_recv_buf_queue_cnt = NR_RECVBUFF;
+ skb_queue_head_init(&precvpriv->rx_skb_queue);
+ {
+ int i;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ struct sk_buff *pskb = NULL;
+
+ skb_queue_head_init(&precvpriv->free_recv_skb_queue);
+
+ for (i = 0; i < NR_PREALLOC_RECV_SKB; i++) {
+ pskb = __netdev_alloc_skb(padapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ, GFP_KERNEL);
+ if (pskb) {
+ pskb->dev = padapter->pnetdev;
+ tmpaddr = (size_t)pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+ pskb = NULL;
+ }
+ }
+exit:
+ return res;
+}
+
+void rtl8188eu_free_recv_priv(struct adapter *padapter)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ rtw_os_recvbuf_resource_free(padapter, precvbuf);
+ precvbuf++;
+ }
+
+ kfree(precvpriv->pallocated_recv_buf);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue))
+ DBG_88E(KERN_WARNING "rx_skb_queue not empty\n");
+ skb_queue_purge(&precvpriv->rx_skb_queue);
+
+
+ if (skb_queue_len(&precvpriv->free_recv_skb_queue))
+ DBG_88E(KERN_WARNING "free_recv_skb_queue not empty, %d\n", skb_queue_len(&precvpriv->free_recv_skb_queue));
+
+ skb_queue_purge(&precvpriv->free_recv_skb_queue);
+}
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
new file mode 100644
index 00000000000..bd8a9ae5d07
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -0,0 +1,706 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RTL8188E_XMIT_C_
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+#include <rtl8188e_hal.h>
+
+s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
+{
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+
+ tasklet_init(&pxmitpriv->xmit_tasklet,
+ (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
+ (unsigned long)adapt);
+ return _SUCCESS;
+}
+
+void rtl8188eu_free_xmit_priv(struct adapter *adapt)
+{
+}
+
+static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
+{
+ u8 set_tx_desc_offset;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
+
+ return set_tx_desc_offset;
+}
+
+static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
+{
+ u16 *usptr = (u16 *)ptxdesc;
+ u32 count = 16; /* (32 bytes / 2 bytes per XOR) => 16 times */
+ u32 index;
+ u16 checksum = 0;
+
+ /* Clear first */
+ ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
+
+ for (index = 0; index < count; index++)
+ checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
+ ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
+}
+
+/* Description: In normal chip, we should send some packet to Hw which will be used by Fw */
+/* in FW LPS mode. The function is to fill the Tx descriptor of this packets, then */
+/* Fw can tell Hw to send these packet derectly. */
+void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
+{
+ struct tx_desc *ptxdesc;
+
+ /* Clear all status */
+ ptxdesc = (struct tx_desc *)desc;
+ _rtw_memset(desc, 0, TXDESC_SIZE);
+
+ /* offset 0 */
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG); /* own, bFirstSeg, bLastSeg; */
+
+ ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000); /* 32 bytes for TX Desc */
+
+ ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff); /* Buffer size + command header */
+
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00); /* Fixed queue of Mgnt queue */
+
+ /* Set NAVUSEHDR to prevent Ps-poll AId filed to be changed to error vlaue by Hw. */
+ if (ispspoll) {
+ ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
+ } else {
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(7)); /* Hw set sequence number */
+ ptxdesc->txdw3 |= cpu_to_le32((8 << 28)); /* set bit3 to 1. Suugested by TimChen. 2009.12.29. */
+ }
+
+ if (is_btqosnull)
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(23)); /* BT NULL */
+
+ /* offset 16 */
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(8));/* driver uses rate */
+
+ /* USB interface drop packet if the checksum of descriptor isn't correct. */
+ /* Using this checksum can let hardware recovery from packet bulk out error (e.g. Cancel URC, Bulk out error.). */
+ rtl8188eu_cal_txdesc_chksum(ptxdesc);
+}
+
+static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
+{
+ if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
+ switch (pattrib->encrypt) {
+ /* SEC_TYPE : 0:NO_ENC,1:WEP40/TKIP,2:WAPI,3:AES */
+ case _WEP40_:
+ case _WEP104_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _AES_:
+ ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
+ ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
+ break;
+ case _NO_PRIVACY_:
+ default:
+ break;
+ }
+ }
+}
+
+static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
+{
+ switch (pattrib->vcs_mode) {
+ case RTS_CTS:
+ *pdw |= cpu_to_le32(RTS_EN);
+ break;
+ case CTS_TO_SELF:
+ *pdw |= cpu_to_le32(CTS_2_SELF);
+ break;
+ case NONE_VCS:
+ default:
+ break;
+ }
+ if (pattrib->vcs_mode) {
+ *pdw |= cpu_to_le32(HW_RTS_EN);
+ /* Set RTS BW */
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
+ }
+ }
+}
+
+static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
+{
+ if (pattrib->ht_en) {
+ *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
+
+ if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
+ *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
+ *pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
+ else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
+ *pdw |= 0;
+ else
+ *pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
+ }
+}
+
+static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
+{
+ int pull = 0;
+ uint qsel;
+ u8 data_rate, pwr_status, offset;
+ struct adapter *adapt = pxmitframe->padapter;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ int bmcst = IS_MCAST(pattrib->ra);
+
+ if (adapt->registrypriv.mp_mode == 0) {
+ if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
+ ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
+ pull = 1;
+ }
+ }
+
+ _rtw_memset(ptxdesc, 0, sizeof(struct tx_desc));
+
+ /* 4 offset 0 */
+ ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
+ ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);/* update TXPKTSIZE */
+
+ offset = TXDESC_SIZE + OFFSET_SZ;
+
+ ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);/* 32 bytes for TX Desc */
+
+ if (bmcst)
+ ptxdesc->txdw0 |= cpu_to_le32(BMC);
+
+ if (adapt->registrypriv.mp_mode == 0) {
+ if (!bagg_pkt) {
+ if ((pull) && (pxmitframe->pkt_offset > 0))
+ pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
+ }
+ }
+
+ /* pkt_offset, unit:8 bytes padding */
+ if (pxmitframe->pkt_offset > 0)
+ ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
+
+ /* driver uses rate */
+ ptxdesc->txdw4 |= cpu_to_le32(USERATE);/* rate control always by driver */
+
+ if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
+
+ qsel = (uint)(pattrib->qsel & 0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
+
+ fill_txdesc_sectype(pattrib, ptxdesc);
+
+ if (pattrib->ampdu_en) {
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);/* AGG EN */
+ ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
+ } else {
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
+ }
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
+
+ /* offset 16 , offset 20 */
+ if (pattrib->qos_en)
+ ptxdesc->txdw4 |= cpu_to_le32(QOS);/* QoS */
+
+ /* offset 20 */
+ if (pxmitframe->agg_num > 1)
+ ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
+
+ if ((pattrib->ether_type != 0x888e) &&
+ (pattrib->ether_type != 0x0806) &&
+ (pattrib->ether_type != 0x88b4) &&
+ (pattrib->dhcp_pkt != 1)) {
+ /* Non EAP & ARP & DHCP type data packet */
+
+ fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
+ fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
+
+ ptxdesc->txdw4 |= cpu_to_le32(0x00000008);/* RTS Rate=24M */
+ ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);/* DATA/RTS Rate FB LMT */
+
+ if (pattrib->ht_en) {
+ if (ODM_RA_GetShortGI_8188E(&haldata->odmpriv, pattrib->mac_id))
+ ptxdesc->txdw5 |= cpu_to_le32(SGI);/* SGI */
+ }
+ data_rate = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, pattrib->mac_id);
+ ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
+ pwr_status = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, pattrib->mac_id);
+ ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
+ } else {
+ /* EAP data packet and ARP packet and DHCP. */
+ /* Use the 1M data rate to send the EAP/ARP packet. */
+ /* This will maybe make the handshake smooth. */
+ ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);/* AGG BK */
+ if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
+ ptxdesc->txdw4 |= cpu_to_le32(BIT(24));/* DATA_SHORT */
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ }
+ } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
+
+ qsel = (uint)(pattrib->qsel&0x0000001f);
+ ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
+
+ ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
+
+ /* offset 8 */
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ if (pxmitframe->ack_report)
+ ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);/* retry limit enable */
+ if (pattrib->retry_ctrl)
+ ptxdesc->txdw5 |= cpu_to_le32(0x00180000);/* retry limit = 6 */
+ else
+ ptxdesc->txdw5 |= cpu_to_le32(0x00300000);/* retry limit = 12 */
+
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
+ DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
+ } else if (((pxmitframe->frame_tag&0x0f) == MP_FRAMETAG) &&
+ (adapt->registrypriv.mp_mode == 1)) {
+ fill_txdesc_for_mp(adapt, ptxdesc);
+ } else {
+ DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
+
+ /* offset 4 */
+ ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);/* CAM_ID(MAC_ID) */
+
+ ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);/* raid */
+
+ /* offset 8 */
+
+ /* offset 12 */
+ ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
+
+ /* offset 20 */
+ ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
+ }
+
+ /* 2009.11.05. tynli_test. Suggested by SD4 Filen for FW LPS. */
+ /* (1) The sequence number of each non-Qos frame / broadcast / multicast / */
+ /* mgnt frame should be controled by Hw because Fw will also send null data */
+ /* which we cannot control when Fw LPS enable. */
+ /* --> default enable non-Qos data sequense number. 2010.06.23. by tynli. */
+ /* (2) Enable HW SEQ control for beacon packet, because we use Hw beacon. */
+ /* (3) Use HW Qos SEQ to control the seq num of Ext port non-Qos packets. */
+ /* 2010.06.23. Added by tynli. */
+ if (!pattrib->qos_en) {
+ ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ); /* Hw set sequence number */
+ ptxdesc->txdw4 |= cpu_to_le32(HW_SSN); /* Hw set sequence number */
+ }
+
+ ODM_SetTxAntByTxInfo_88E(&haldata->odmpriv, pmem, pattrib->mac_id);
+
+ rtl8188eu_cal_txdesc_chksum(ptxdesc);
+ _dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
+ return pull;
+}
+
+/* for non-agg data frame or management frame */
+static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ s32 ret = _SUCCESS;
+ s32 inner_ret = _SUCCESS;
+ int t, sz, w_sz, pull = 0;
+ u8 *mem_addr;
+ u32 ff_hwaddr;
+ struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ struct security_priv *psecuritypriv = &adapt->securitypriv;
+ if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
+ (pxmitframe->attrib.ether_type != 0x0806) &&
+ (pxmitframe->attrib.ether_type != 0x888e) &&
+ (pxmitframe->attrib.ether_type != 0x88b4) &&
+ (pxmitframe->attrib.dhcp_pkt != 1))
+ rtw_issue_addbareq_cmd(adapt, pxmitframe);
+ mem_addr = pxmitframe->buf_addr;
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
+
+ for (t = 0; t < pattrib->nr_frags; t++) {
+ if (inner_ret != _SUCCESS && ret == _SUCCESS)
+ ret = _FAIL;
+
+ if (t != (pattrib->nr_frags - 1)) {
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
+
+ sz = pxmitpriv->frag_len;
+ sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
+ } else {
+ /* no frag */
+ sz = pattrib->last_txcmdsz;
+ }
+
+ pull = update_txdesc(pxmitframe, mem_addr, sz, false);
+
+ if (pull) {
+ mem_addr += PACKET_OFFSET_SZ; /* pull txdesc head */
+ pxmitframe->buf_addr = mem_addr;
+ w_sz = sz + TXDESC_SIZE;
+ } else {
+ w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
+ }
+ ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
+
+ inner_ret = rtw_write_port(adapt, ff_hwaddr, w_sz, (unsigned char *)pxmitbuf);
+
+ rtw_count_tx_stats(adapt, pxmitframe, sz);
+
+ RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
+
+ mem_addr += w_sz;
+
+ mem_addr = (u8 *)RND4(((size_t)(mem_addr)));
+ }
+
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ if (ret != _SUCCESS)
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
+
+ return ret;
+}
+
+static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
+{
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+
+ u32 len = 0;
+
+ /* no consider fragement */
+ len = pattrib->hdrlen + pattrib->iv_len +
+ SNAP_SIZE + sizeof(u16) +
+ pattrib->pktlen +
+ ((pattrib->bswenc) ? pattrib->icv_len : 0);
+
+ if (pattrib->encrypt == _TKIP_)
+ len += 8;
+
+ return len;
+}
+
+s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct xmit_frame *pxmitframe = NULL;
+ struct xmit_frame *pfirstframe = NULL;
+
+ /* aggregate variable */
+ struct hw_xmit *phwxmit;
+ struct sta_info *psta = NULL;
+ struct tx_servq *ptxservq = NULL;
+
+ unsigned long irql;
+ struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
+
+ u32 pbuf; /* next pkt address */
+ u32 pbuf_tail; /* last pkt tail */
+ u32 len; /* packet length, except TXDESC_SIZE and PKT_OFFSET */
+
+ u32 bulksize = haldata->UsbBulkOutSize;
+ u8 desc_cnt;
+ u32 bulkptr;
+
+ /* dump frame variable */
+ u32 ff_hwaddr;
+
+ RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
+
+ /* check xmitbuffer is ok */
+ if (pxmitbuf == NULL) {
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL)
+ return false;
+ }
+
+ /* 3 1. pick up first frame */
+ do {
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ if (pxmitframe == NULL) {
+ /* no more xmit frame, release xmit buffer */
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ return false;
+ }
+
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
+
+ pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
+ pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
+
+ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+
+ /* always return ndis_packet after rtw_xmitframe_coalesce */
+ rtw_os_xmit_complete(adapt, pxmitframe);
+
+ break;
+ } while (1);
+
+ /* 3 2. aggregate same priority and same DA(AP or STA) frames */
+ pfirstframe = pxmitframe;
+ len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
+ pbuf_tail = len;
+ pbuf = _RND8(pbuf_tail);
+
+ /* check pkt amount in one bulk */
+ desc_cnt = 0;
+ bulkptr = bulksize;
+ if (pbuf < bulkptr) {
+ desc_cnt++;
+ } else {
+ desc_cnt = 0;
+ bulkptr = ((pbuf / bulksize) + 1) * bulksize; /* round to next bulksize */
+ }
+
+ /* dequeue same priority packet from station tx queue */
+ psta = pfirstframe->attrib.psta;
+ switch (pfirstframe->attrib.priority) {
+ case 1:
+ case 2:
+ ptxservq = &(psta->sta_xmitpriv.bk_q);
+ phwxmit = pxmitpriv->hwxmits + 3;
+ break;
+ case 4:
+ case 5:
+ ptxservq = &(psta->sta_xmitpriv.vi_q);
+ phwxmit = pxmitpriv->hwxmits + 1;
+ break;
+ case 6:
+ case 7:
+ ptxservq = &(psta->sta_xmitpriv.vo_q);
+ phwxmit = pxmitpriv->hwxmits;
+ break;
+ case 0:
+ case 3:
+ default:
+ ptxservq = &(psta->sta_xmitpriv.be_q);
+ phwxmit = pxmitpriv->hwxmits + 2;
+ break;
+ }
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ xmitframe_phead = get_list_head(&ptxservq->sta_pending);
+ xmitframe_plist = get_next(xmitframe_phead);
+
+ while (!rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
+ pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
+ xmitframe_plist = get_next(xmitframe_plist);
+
+ pxmitframe->agg_num = 0; /* not first frame of aggregation */
+ pxmitframe->pkt_offset = 0; /* not first frame of aggregation, no need to reserve offset */
+
+ len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
+
+ if (_RND8(pbuf + len) > MAX_XMITBUF_SZ) {
+ pxmitframe->agg_num = 1;
+ pxmitframe->pkt_offset = 1;
+ break;
+ }
+ rtw_list_delete(&pxmitframe->list);
+ ptxservq->qcnt--;
+ phwxmit->accnt--;
+
+ pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
+
+ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+ /* always return ndis_packet after rtw_xmitframe_coalesce */
+ rtw_os_xmit_complete(adapt, pxmitframe);
+
+ /* (len - TXDESC_SIZE) == pxmitframe->attrib.last_txcmdsz */
+ update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
+
+ /* don't need xmitframe any more */
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ /* handle pointer and stop condition */
+ pbuf_tail = pbuf + len;
+ pbuf = _RND8(pbuf_tail);
+
+ pfirstframe->agg_num++;
+ if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
+ break;
+
+ if (pbuf < bulkptr) {
+ desc_cnt++;
+ if (desc_cnt == haldata->UsbTxAggDescNum)
+ break;
+ } else {
+ desc_cnt = 0;
+ bulkptr = ((pbuf / bulksize) + 1) * bulksize;
+ }
+ } /* end while (aggregate same priority and same DA(AP or STA) frames) */
+
+ if (_rtw_queue_empty(&ptxservq->sta_pending) == true)
+ rtw_list_delete(&ptxservq->tx_pending);
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+ if ((pfirstframe->attrib.ether_type != 0x0806) &&
+ (pfirstframe->attrib.ether_type != 0x888e) &&
+ (pfirstframe->attrib.ether_type != 0x88b4) &&
+ (pfirstframe->attrib.dhcp_pkt != 1))
+ rtw_issue_addbareq_cmd(adapt, pfirstframe);
+ /* 3 3. update first frame txdesc */
+ if ((pbuf_tail % bulksize) == 0) {
+ /* remove pkt_offset */
+ pbuf_tail -= PACKET_OFFSET_SZ;
+ pfirstframe->buf_addr += PACKET_OFFSET_SZ;
+ pfirstframe->pkt_offset--;
+ }
+
+ update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
+
+ /* 3 4. write xmit buffer to USB FIFO */
+ ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
+ rtw_write_port(adapt, ff_hwaddr, pbuf_tail, (u8 *)pxmitbuf);
+
+ /* 3 5. update statisitc */
+ pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
+ pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
+
+ rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
+
+ rtw_free_xmitframe(pxmitpriv, pfirstframe);
+
+ return true;
+}
+
+static s32 xmitframe_direct(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ s32 res = _SUCCESS;
+
+ res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+ if (res == _SUCCESS)
+ rtw_dump_xframe(adapt, pxmitframe);
+ else
+ DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
+ return res;
+}
+
+/*
+ * Return
+ * true dump packet directly
+ * false enqueue packet
+ */
+static s32 pre_xmitframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ unsigned long irql;
+ s32 res;
+ struct xmit_buf *pxmitbuf = NULL;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
+ goto enqueue;
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
+ goto enqueue;
+
+ pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
+ if (pxmitbuf == NULL)
+ goto enqueue;
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
+
+ if (xmitframe_direct(adapt, pxmitframe) != _SUCCESS) {
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+ }
+
+ return true;
+
+enqueue:
+ res = rtw_xmitframe_enqueue(adapt, pxmitframe);
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (res != _SUCCESS) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
+
+ /* Trick, make the statistics correct */
+ pxmitpriv->tx_pkts--;
+ pxmitpriv->tx_drop++;
+ return true;
+ }
+
+ return false;
+}
+
+s32 rtl8188eu_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
+{
+ return rtw_dump_xframe(adapt, pmgntframe);
+}
+
+/*
+ * Return
+ * true dump packet directly ok
+ * false temporary can't transmit packets to hardware
+ */
+s32 rtl8188eu_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
+{
+ return pre_xmitframe(adapt, pxmitframe);
+}
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
new file mode 100644
index 00000000000..5e656ce4540
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -0,0 +1,2346 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_HAL_INIT_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_efuse.h>
+
+#include <rtl8188e_hal.h>
+#include <rtl8188e_led.h>
+#include <rtw_iol.h>
+#include <usb_ops.h>
+#include <usb_hal.h>
+#include <usb_osintf.h>
+
+#define HAL_MAC_ENABLE 1
+#define HAL_BB_ENABLE 1
+#define HAL_RF_ENABLE 1
+
+static void _ConfigNormalChipOutEP_8188E(struct adapter *adapt, u8 NumOutPipe)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ switch (NumOutPipe) {
+ case 3:
+ haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_LQ | TX_SELE_NQ;
+ haldata->OutEpNumber = 3;
+ break;
+ case 2:
+ haldata->OutEpQueueSel = TX_SELE_HQ | TX_SELE_NQ;
+ haldata->OutEpNumber = 2;
+ break;
+ case 1:
+ haldata->OutEpQueueSel = TX_SELE_HQ;
+ haldata->OutEpNumber = 1;
+ break;
+ default:
+ break;
+ }
+ DBG_88E("%s OutEpQueueSel(0x%02x), OutEpNumber(%d)\n", __func__, haldata->OutEpQueueSel, haldata->OutEpNumber);
+}
+
+static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPipe, u8 NumOutPipe)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ bool result = false;
+
+ _ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
+
+ /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
+ if (1 == haldata->OutEpNumber) {
+ if (1 != NumInPipe)
+ return result;
+ }
+
+ /* All config other than above support one Bulk IN and one Interrupt IN. */
+
+ result = Hal_MappingOutPipe(adapt, NumOutPipe);
+
+ return result;
+}
+
+static void rtl8188eu_interface_configure(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
+
+ if (pdvobjpriv->ishighspeed)
+ haldata->UsbBulkOutSize = USB_HIGH_SPEED_BULK_SIZE;/* 512 bytes */
+ else
+ haldata->UsbBulkOutSize = USB_FULL_SPEED_BULK_SIZE;/* 64 bytes */
+
+ haldata->interfaceIndex = pdvobjpriv->InterfaceNumber;
+
+ haldata->UsbTxAggMode = 1;
+ haldata->UsbTxAggDescNum = 0x6; /* only 4 bits */
+
+ haldata->UsbRxAggMode = USB_RX_AGG_DMA;/* USB_RX_AGG_DMA; */
+ haldata->UsbRxAggBlockCount = 8; /* unit : 512b */
+ haldata->UsbRxAggBlockTimeout = 0x6;
+ haldata->UsbRxAggPageCount = 48; /* uint :128 b 0x0A; 10 = MAX_RX_DMA_BUFFER_SIZE/2/haldata->UsbBulkOutSize */
+ haldata->UsbRxAggPageTimeout = 0x4; /* 6, absolute time = 34ms/(2^6) */
+
+ HalUsbSetQueuePipeMapping8188EUsb(adapt,
+ pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
+}
+
+static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
+{
+ u16 value16;
+ /* HW Power on sequence */
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ if (haldata->bMacPwrCtrlOn)
+ return _SUCCESS;
+
+ if (!HalPwrSeqCmdParsing(adapt, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_PWR_ON_FLOW)) {
+ DBG_88E(KERN_ERR "%s: run power on flow fail\n", __func__);
+ return _FAIL;
+ }
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ /* Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
+ rtw_write16(adapt, REG_CR, 0x00); /* suggseted by zhouzhou, by page, 20111230 */
+
+ /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */
+ value16 = rtw_read16(adapt, REG_CR);
+ value16 |= (HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN
+ | PROTOCOL_EN | SCHEDULE_EN | ENSEC | CALTMR_EN);
+ /* for SDIO - Set CR bit10 to enable 32k calibration. Suggested by SD1 Gimmy. Added by tynli. 2011.08.31. */
+
+ rtw_write16(adapt, REG_CR, value16);
+ haldata->bMacPwrCtrlOn = true;
+
+ return _SUCCESS;
+}
+
+/* Shall USB interface init this? */
+static void _InitInterrupt(struct adapter *Adapter)
+{
+ u32 imr, imr_ex;
+ u8 usb_opt;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ /* HISR write one to clear */
+ rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
+ /* HIMR - */
+ imr = IMR_PSTIMEOUT_88E | IMR_TBDER_88E | IMR_CPWM_88E | IMR_CPWM2_88E;
+ rtw_write32(Adapter, REG_HIMR_88E, imr);
+ haldata->IntrMask[0] = imr;
+
+ imr_ex = IMR_TXERR_88E | IMR_RXERR_88E | IMR_TXFOVW_88E | IMR_RXFOVW_88E;
+ rtw_write32(Adapter, REG_HIMRE_88E, imr_ex);
+ haldata->IntrMask[1] = imr_ex;
+
+ /* REG_USB_SPECIAL_OPTION - BIT(4) */
+ /* 0; Use interrupt endpoint to upload interrupt pkt */
+ /* 1; Use bulk endpoint to upload interrupt pkt, */
+ usb_opt = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+
+ if (!adapter_to_dvobj(Adapter)->ishighspeed)
+ usb_opt = usb_opt & (~INT_BULK_SEL);
+ else
+ usb_opt = usb_opt | (INT_BULK_SEL);
+
+ rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, usb_opt);
+}
+
+static void _InitQueueReservedPage(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 numHQ = 0;
+ u32 numLQ = 0;
+ u32 numNQ = 0;
+ u32 numPubQ;
+ u32 value32;
+ u8 value8;
+ bool bWiFiConfig = pregistrypriv->wifi_spec;
+
+ if (bWiFiConfig) {
+ if (haldata->OutEpQueueSel & TX_SELE_HQ)
+ numHQ = 0x29;
+
+ if (haldata->OutEpQueueSel & TX_SELE_LQ)
+ numLQ = 0x1C;
+
+ /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ if (haldata->OutEpQueueSel & TX_SELE_NQ)
+ numNQ = 0x1C;
+ value8 = (u8)_NPQ(numNQ);
+ rtw_write8(Adapter, REG_RQPN_NPQ, value8);
+
+ numPubQ = 0xA8 - numHQ - numLQ - numNQ;
+
+ /* TX DMA */
+ value32 = _HPQ(numHQ) | _LPQ(numLQ) | _PUBQ(numPubQ) | LD_RQPN;
+ rtw_write32(Adapter, REG_RQPN, value32);
+ } else {
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0000);/* Just follow MP Team,??? Georgia 03/28 */
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0d);
+ rtw_write32(Adapter, REG_RQPN, 0x808E000d);/* reserve 7 page for LPS */
+ }
+}
+
+static void _InitTxBufferBoundary(struct adapter *Adapter, u8 txpktbuf_bndy)
+{
+ rtw_write8(Adapter, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TRXFF_BNDY, txpktbuf_bndy);
+ rtw_write8(Adapter, REG_TDECTRL+1, txpktbuf_bndy);
+}
+
+static void _InitPageBoundary(struct adapter *Adapter)
+{
+ /* RX Page Boundary */
+ /* */
+ u16 rxff_bndy = MAX_RX_DMA_BUFFER_SIZE_88E-1;
+
+ rtw_write16(Adapter, (REG_TRXFF_BNDY + 2), rxff_bndy);
+}
+
+static void _InitNormalChipRegPriority(struct adapter *Adapter, u16 beQ,
+ u16 bkQ, u16 viQ, u16 voQ, u16 mgtQ,
+ u16 hiQ)
+{
+ u16 value16 = (rtw_read16(Adapter, REG_TRXDMA_CTRL) & 0x7);
+
+ value16 |= _TXDMA_BEQ_MAP(beQ) | _TXDMA_BKQ_MAP(bkQ) |
+ _TXDMA_VIQ_MAP(viQ) | _TXDMA_VOQ_MAP(voQ) |
+ _TXDMA_MGQ_MAP(mgtQ) | _TXDMA_HIQ_MAP(hiQ);
+
+ rtw_write16(Adapter, REG_TRXDMA_CTRL, value16);
+}
+
+static void _InitNormalChipOneOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ u16 value = 0;
+ switch (haldata->OutEpQueueSel) {
+ case TX_SELE_HQ:
+ value = QUEUE_HIGH;
+ break;
+ case TX_SELE_LQ:
+ value = QUEUE_LOW;
+ break;
+ case TX_SELE_NQ:
+ value = QUEUE_NORMAL;
+ break;
+ default:
+ break;
+ }
+ _InitNormalChipRegPriority(Adapter, value, value, value, value,
+ value, value);
+}
+
+static void _InitNormalChipTwoOutEpPriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+ u16 valueHi = 0;
+ u16 valueLow = 0;
+
+ switch (haldata->OutEpQueueSel) {
+ case (TX_SELE_HQ | TX_SELE_LQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_NQ | TX_SELE_LQ):
+ valueHi = QUEUE_NORMAL;
+ valueLow = QUEUE_LOW;
+ break;
+ case (TX_SELE_HQ | TX_SELE_NQ):
+ valueHi = QUEUE_HIGH;
+ valueLow = QUEUE_NORMAL;
+ break;
+ default:
+ break;
+ }
+
+ if (!pregistrypriv->wifi_spec) {
+ beQ = valueLow;
+ bkQ = valueLow;
+ viQ = valueHi;
+ voQ = valueHi;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ } else {/* for WMM ,CONFIG_OUT_EP_WIFI_MODE */
+ beQ = valueLow;
+ bkQ = valueHi;
+ viQ = valueHi;
+ voQ = valueLow;
+ mgtQ = valueHi;
+ hiQ = valueHi;
+ }
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitNormalChipThreeOutEpPriority(struct adapter *Adapter)
+{
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u16 beQ, bkQ, viQ, voQ, mgtQ, hiQ;
+
+ if (!pregistrypriv->wifi_spec) {/* typical setting */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_LOW;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ } else {/* for WMM */
+ beQ = QUEUE_LOW;
+ bkQ = QUEUE_NORMAL;
+ viQ = QUEUE_NORMAL;
+ voQ = QUEUE_HIGH;
+ mgtQ = QUEUE_HIGH;
+ hiQ = QUEUE_HIGH;
+ }
+ _InitNormalChipRegPriority(Adapter, beQ, bkQ, viQ, voQ, mgtQ, hiQ);
+}
+
+static void _InitQueuePriority(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ switch (haldata->OutEpNumber) {
+ case 1:
+ _InitNormalChipOneOutEpPriority(Adapter);
+ break;
+ case 2:
+ _InitNormalChipTwoOutEpPriority(Adapter);
+ break;
+ case 3:
+ _InitNormalChipThreeOutEpPriority(Adapter);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _InitNetworkType(struct adapter *Adapter)
+{
+ u32 value32;
+
+ value32 = rtw_read32(Adapter, REG_CR);
+ /* TODO: use the other function to set network type */
+ value32 = (value32 & ~MASK_NETTYPE) | _NETTYPE(NT_LINK_AP);
+
+ rtw_write32(Adapter, REG_CR, value32);
+}
+
+static void _InitTransferPageSize(struct adapter *Adapter)
+{
+ /* Tx page size is always 128. */
+
+ u8 value8;
+ value8 = _PSRX(PBP_128) | _PSTX(PBP_128);
+ rtw_write8(Adapter, REG_PBP, value8);
+}
+
+static void _InitDriverInfoSize(struct adapter *Adapter, u8 drvInfoSize)
+{
+ rtw_write8(Adapter, REG_RX_DRVINFO_SZ, drvInfoSize);
+}
+
+static void _InitWMACSetting(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ haldata->ReceiveConfig = RCR_AAP | RCR_APM | RCR_AM | RCR_AB |
+ RCR_CBSSID_DATA | RCR_CBSSID_BCN |
+ RCR_APP_ICV | RCR_AMF | RCR_HTC_LOC_CTRL |
+ RCR_APP_MIC | RCR_APP_PHYSTS;
+
+ /* some REG_RCR will be modified later by phy_ConfigMACWithHeaderFile() */
+ rtw_write32(Adapter, REG_RCR, haldata->ReceiveConfig);
+
+ /* Accept all multicast address */
+ rtw_write32(Adapter, REG_MAR, 0xFFFFFFFF);
+ rtw_write32(Adapter, REG_MAR + 4, 0xFFFFFFFF);
+}
+
+static void _InitAdaptiveCtrl(struct adapter *Adapter)
+{
+ u16 value16;
+ u32 value32;
+
+ /* Response Rate Set */
+ value32 = rtw_read32(Adapter, REG_RRSR);
+ value32 &= ~RATE_BITMAP_ALL;
+ value32 |= RATE_RRSR_CCK_ONLY_1M;
+ rtw_write32(Adapter, REG_RRSR, value32);
+
+ /* CF-END Threshold */
+
+ /* SIFS (used in NAV) */
+ value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
+ rtw_write16(Adapter, REG_SPEC_SIFS, value16);
+
+ /* Retry Limit */
+ value16 = _LRL(0x30) | _SRL(0x30);
+ rtw_write16(Adapter, REG_RL, value16);
+}
+
+static void _InitEDCA(struct adapter *Adapter)
+{
+ /* Set Spec SIFS (used in NAV) */
+ rtw_write16(Adapter, REG_SPEC_SIFS, 0x100a);
+ rtw_write16(Adapter, REG_MAC_SPEC_SIFS, 0x100a);
+
+ /* Set SIFS for CCK */
+ rtw_write16(Adapter, REG_SIFS_CTX, 0x100a);
+
+ /* Set SIFS for OFDM */
+ rtw_write16(Adapter, REG_SIFS_TRX, 0x100a);
+
+ /* TXOP */
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, 0x005EA42B);
+ rtw_write32(Adapter, REG_EDCA_BK_PARAM, 0x0000A44F);
+ rtw_write32(Adapter, REG_EDCA_VI_PARAM, 0x005EA324);
+ rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
+}
+
+static void _InitBeaconMaxError(struct adapter *Adapter, bool InfraMode)
+{
+}
+
+static void _InitHWLed(struct adapter *Adapter)
+{
+ struct led_priv *pledpriv = &(Adapter->ledpriv);
+
+ if (pledpriv->LedStrategy != HW_LED)
+ return;
+
+/* HW led control */
+/* to do .... */
+/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
+}
+
+static void _InitRDGSetting(struct adapter *Adapter)
+{
+ rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
+ rtw_write16(Adapter, REG_RD_NAV_NXT, 0x200);
+ rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
+}
+
+static void _InitRxSetting(struct adapter *Adapter)
+{
+ rtw_write32(Adapter, REG_MACID, 0x87654321);
+ rtw_write32(Adapter, 0x0700, 0x87654321);
+}
+
+static void _InitRetryFunction(struct adapter *Adapter)
+{
+ u8 value8;
+
+ value8 = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL);
+ value8 |= EN_AMPDU_RTY_NEW;
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL, value8);
+
+ /* Set ACK timeout */
+ rtw_write8(Adapter, REG_ACKTO, 0x40);
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingTxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void usb_AggSettingTxUpdate(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u32 value32;
+
+ if (Adapter->registrypriv.wifi_spec)
+ haldata->UsbTxAggMode = false;
+
+ if (haldata->UsbTxAggMode) {
+ value32 = rtw_read32(Adapter, REG_TDECTRL);
+ value32 = value32 & ~(BLK_DESC_NUM_MASK << BLK_DESC_NUM_SHIFT);
+ value32 |= ((haldata->UsbTxAggDescNum & BLK_DESC_NUM_MASK) << BLK_DESC_NUM_SHIFT);
+
+ rtw_write32(Adapter, REG_TDECTRL, value32);
+ }
+} /* usb_AggSettingTxUpdate */
+
+/*-----------------------------------------------------------------------------
+ * Function: usb_AggSettingRxUpdate()
+ *
+ * Overview: Seperate TX/RX parameters update independent for TP detection and
+ * dynamic TX/RX aggreagtion parameters update.
+ *
+ * Input: struct adapter *
+ *
+ * Output/Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 12/10/2010 MHC Seperate to smaller function.
+ *
+ *---------------------------------------------------------------------------*/
+static void
+usb_AggSettingRxUpdate(
+ struct adapter *Adapter
+ )
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 valueDMA;
+ u8 valueUSB;
+
+ valueDMA = rtw_read8(Adapter, REG_TRXDMA_CTRL);
+ valueUSB = rtw_read8(Adapter, REG_USB_SPECIAL_OPTION);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ valueDMA |= RXDMA_AGG_EN;
+ valueUSB &= ~USB_AGG_EN;
+ break;
+ case USB_RX_AGG_USB:
+ valueDMA &= ~RXDMA_AGG_EN;
+ valueUSB |= USB_AGG_EN;
+ break;
+ case USB_RX_AGG_MIX:
+ valueDMA |= RXDMA_AGG_EN;
+ valueUSB |= USB_AGG_EN;
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ valueDMA &= ~RXDMA_AGG_EN;
+ valueUSB &= ~USB_AGG_EN;
+ break;
+ }
+
+ rtw_write8(Adapter, REG_TRXDMA_CTRL, valueDMA);
+ rtw_write8(Adapter, REG_USB_SPECIAL_OPTION, valueUSB);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, haldata->UsbRxAggPageTimeout);
+ break;
+ case USB_RX_AGG_USB:
+ rtw_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
+ rtw_write8(Adapter, REG_USB_AGG_TO, haldata->UsbRxAggBlockTimeout);
+ break;
+ case USB_RX_AGG_MIX:
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, haldata->UsbRxAggPageCount);
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH+1, (haldata->UsbRxAggPageTimeout & 0x1F));/* 0x280[12:8] */
+ rtw_write8(Adapter, REG_USB_AGG_TH, haldata->UsbRxAggBlockCount);
+ rtw_write8(Adapter, REG_USB_AGG_TO, haldata->UsbRxAggBlockTimeout);
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ /* TODO: */
+ break;
+ }
+
+ switch (PBP_128) {
+ case PBP_128:
+ haldata->HwRxPageSize = 128;
+ break;
+ case PBP_64:
+ haldata->HwRxPageSize = 64;
+ break;
+ case PBP_256:
+ haldata->HwRxPageSize = 256;
+ break;
+ case PBP_512:
+ haldata->HwRxPageSize = 512;
+ break;
+ case PBP_1024:
+ haldata->HwRxPageSize = 1024;
+ break;
+ default:
+ break;
+ }
+} /* usb_AggSettingRxUpdate */
+
+static void InitUsbAggregationSetting(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ /* Tx aggregation setting */
+ usb_AggSettingTxUpdate(Adapter);
+
+ /* Rx aggregation setting */
+ usb_AggSettingRxUpdate(Adapter);
+
+ /* 201/12/10 MH Add for USB agg mode dynamic switch. */
+ haldata->UsbRxHighSpeedMode = false;
+}
+
+static void _InitOperationMode(struct adapter *Adapter)
+{
+}
+
+static void _InitBeaconParameters(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ rtw_write16(Adapter, REG_BCN_CTRL, 0x1010);
+
+ /* TODO: Remove these magic number */
+ rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0x6404);/* ms */
+ rtw_write8(Adapter, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/* 5ms */
+ rtw_write8(Adapter, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME); /* 2ms */
+
+ /* Suggested by designer timchen. Change beacon AIFS to the largest number */
+ /* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
+ rtw_write16(Adapter, REG_BCNTCFG, 0x660F);
+
+ haldata->RegBcnCtrlVal = rtw_read8(Adapter, REG_BCN_CTRL);
+ haldata->RegTxPause = rtw_read8(Adapter, REG_TXPAUSE);
+ haldata->RegFwHwTxQCtrl = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL+2);
+ haldata->RegReg542 = rtw_read8(Adapter, REG_TBTT_PROHIBIT+2);
+ haldata->RegCR_1 = rtw_read8(Adapter, REG_CR+1);
+}
+
+static void _BeaconFunctionEnable(struct adapter *Adapter,
+ bool Enable, bool Linked)
+{
+ rtw_write8(Adapter, REG_BCN_CTRL, (BIT4 | BIT3 | BIT1));
+
+ rtw_write8(Adapter, REG_RD_CTRL+1, 0x6F);
+}
+
+/* Set CCK and OFDM Block "ON" */
+static void _BBTurnOnBlock(struct adapter *Adapter)
+{
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bCCKEn, 0x1);
+ PHY_SetBBReg(Adapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
+}
+
+enum {
+ Antenna_Lfet = 1,
+ Antenna_Right = 2,
+};
+
+static void _InitAntenna_Selection(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ if (haldata->AntDivCfg == 0)
+ return;
+ DBG_88E("==> %s ....\n", __func__);
+
+ rtw_write32(Adapter, REG_LEDCFG0, rtw_read32(Adapter, REG_LEDCFG0)|BIT23);
+ PHY_SetBBReg(Adapter, rFPGA0_XAB_RFParameter, BIT13, 0x01);
+
+ if (PHY_QueryBBReg(Adapter, rFPGA0_XA_RFInterfaceOE, 0x300) == Antenna_A)
+ haldata->CurAntenna = Antenna_A;
+ else
+ haldata->CurAntenna = Antenna_B;
+ DBG_88E("%s,Cur_ant:(%x)%s\n", __func__, haldata->CurAntenna, (haldata->CurAntenna == Antenna_A) ? "Antenna_A" : "Antenna_B");
+}
+
+/*-----------------------------------------------------------------------------
+ * Function: HwSuspendModeEnable92Cu()
+ *
+ * Overview: HW suspend mode switch.
+ *
+ * Input: NONE
+ *
+ * Output: NONE
+ *
+ * Return: NONE
+ *
+ * Revised History:
+ * When Who Remark
+ * 08/23/2010 MHC HW suspend mode switch test..
+ *---------------------------------------------------------------------------*/
+enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
+{
+ u8 val8;
+ enum rt_rf_power_state rfpowerstate = rf_off;
+
+ if (adapt->pwrctrlpriv.bHWPowerdown) {
+ val8 = rtw_read8(adapt, REG_HSISR);
+ DBG_88E("pwrdown, 0x5c(BIT7)=%02x\n", val8);
+ rfpowerstate = (val8 & BIT7) ? rf_off : rf_on;
+ } else { /* rf on/off */
+ rtw_write8(adapt, REG_MAC_PINMUX_CFG, rtw_read8(adapt, REG_MAC_PINMUX_CFG)&~(BIT3));
+ val8 = rtw_read8(adapt, REG_GPIO_IO_SEL);
+ DBG_88E("GPIO_IN=%02x\n", val8);
+ rfpowerstate = (val8 & BIT3) ? rf_on : rf_off;
+ }
+ return rfpowerstate;
+} /* HalDetectPwrDownMode */
+
+static u32 rtl8188eu_hal_init(struct adapter *Adapter)
+{
+ u8 value8 = 0;
+ u16 value16;
+ u8 txpktbuf_bndy;
+ u32 status = _SUCCESS;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
+ struct registry_priv *pregistrypriv = &Adapter->registrypriv;
+ u32 init_start_time = rtw_get_current_time();
+
+ #define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
+
+_func_enter_;
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
+
+ if (Adapter->pwrctrlpriv.bkeepfwalive) {
+ _ps_open_RF(Adapter);
+
+ if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
+ PHY_IQCalibrate_8188E(Adapter, true);
+ } else {
+ PHY_IQCalibrate_8188E(Adapter, false);
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
+ }
+
+ ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+ PHY_LCCalibrate_8188E(Adapter);
+
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PW_ON);
+ status = rtl8188eu_InitPowerOn(Adapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init power on!\n"));
+ goto exit;
+ }
+
+ /* Save target channel */
+ haldata->CurrentChannel = 6;/* default set to 6 */
+
+ if (pwrctrlpriv->reg_rfoff) {
+ pwrctrlpriv->rf_pwrstate = rf_off;
+ }
+
+ /* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
+ /* HW GPIO pin. Before PHY_RFConfig8192C. */
+ /* 2010/08/26 MH If Efuse does not support sective suspend then disable the function. */
+
+ if (!pregistrypriv->wifi_spec) {
+ txpktbuf_bndy = TX_PAGE_BOUNDARY_88E;
+ } else {
+ /* for WMM */
+ txpktbuf_bndy = WMM_NORMAL_TX_PAGE_BOUNDARY_88E;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC01);
+ _InitQueueReservedPage(Adapter);
+ _InitQueuePriority(Adapter);
+ _InitPageBoundary(Adapter);
+ _InitTransferPageSize(Adapter);
+
+ _InitTxBufferBoundary(Adapter, 0);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
+ if (Adapter->registrypriv.mp_mode == 1) {
+ _InitRxSetting(Adapter);
+ Adapter->bFWReady = false;
+ haldata->fw_ractrl = false;
+ } else {
+ status = rtl8188e_FirmwareDownload(Adapter);
+
+ if (status != _SUCCESS) {
+ DBG_88E("%s: Download Firmware failed!!\n", __func__);
+ Adapter->bFWReady = false;
+ haldata->fw_ractrl = false;
+ return status;
+ } else {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
+ Adapter->bFWReady = true;
+ haldata->fw_ractrl = false;
+ }
+ }
+ rtl8188e_InitializeFirmwareVars(Adapter);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MAC);
+#if (HAL_MAC_ENABLE == 1)
+ status = PHY_MACConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init MAC ......\n ");
+ goto exit;
+ }
+#endif
+
+ /* */
+ /* d. Initialize BB related configurations. */
+ /* */
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BB);
+#if (HAL_BB_ENABLE == 1)
+ status = PHY_BBConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init BB ......\n ");
+ goto exit;
+ }
+#endif
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_RF);
+#if (HAL_RF_ENABLE == 1)
+ status = PHY_RFConfig8188E(Adapter);
+ if (status == _FAIL) {
+ DBG_88E(" ### Failed to init RF ......\n ");
+ goto exit;
+ }
+#endif
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_EFUSE_PATCH);
+ status = rtl8188e_iol_efuse_patch(Adapter);
+ if (status == _FAIL) {
+ DBG_88E("%s rtl8188e_iol_efuse_patch failed\n", __func__);
+ goto exit;
+ }
+
+ _InitTxBufferBoundary(Adapter, txpktbuf_bndy);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_LLTT);
+ status = InitLLTTable(Adapter, txpktbuf_bndy);
+ if (status == _FAIL) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("Failed to init LLT table\n"));
+ goto exit;
+ }
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC02);
+ /* Get Rx PHY status in order to report RSSI and others. */
+ _InitDriverInfoSize(Adapter, DRVINFO_SZ);
+
+ _InitInterrupt(Adapter);
+ hal_init_macaddr(Adapter);/* set mac_address */
+ _InitNetworkType(Adapter);/* set msr */
+ _InitWMACSetting(Adapter);
+ _InitAdaptiveCtrl(Adapter);
+ _InitEDCA(Adapter);
+ _InitRetryFunction(Adapter);
+ InitUsbAggregationSetting(Adapter);
+ _InitOperationMode(Adapter);/* todo */
+ _InitBeaconParameters(Adapter);
+ _InitBeaconMaxError(Adapter, true);
+
+ /* */
+ /* Init CR MACTXEN, MACRXEN after setting RxFF boundary REG_TRXFF_BNDY to patch */
+ /* Hw bug which Hw initials RxFF boundry size to a value which is larger than the real Rx buffer size in 88E. */
+ /* */
+ /* Enable MACTXEN/MACRXEN block */
+ value16 = rtw_read16(Adapter, REG_CR);
+ value16 |= (MACTXEN | MACRXEN);
+ rtw_write8(Adapter, REG_CR, value16);
+
+ if (haldata->bRDGEnable)
+ _InitRDGSetting(Adapter);
+
+ /* Enable TX Report */
+ /* Enable Tx Report Timer */
+ value8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL, (value8|BIT1|BIT0));
+ /* Set MAX RPT MACID */
+ rtw_write8(Adapter, REG_TX_RPT_CTRL+1, 2);/* FOR sta mode ,0: bc/mc ,1:AP */
+ /* Tx RPT Timer. Unit: 32us */
+ rtw_write16(Adapter, REG_TX_RPT_TIME, 0xCdf0);
+
+ rtw_write8(Adapter, REG_EARLY_MODE_CONTROL, 0);
+
+ rtw_write16(Adapter, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+ rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
+
+ _InitHWLed(Adapter);
+
+ /* Keep RfRegChnlVal for later use. */
+ haldata->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
+ haldata->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_TURN_ON_BLOCK);
+ _BBTurnOnBlock(Adapter);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_SECURITY);
+ invalidate_cam_all(Adapter);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_MISC11);
+ /* 2010/12/17 MH We need to set TX power according to EFUSE content at first. */
+ PHY_SetTxPowerLevel8188E(Adapter, haldata->CurrentChannel);
+
+/* Move by Neo for USB SS to below setp */
+/* _RfPowerSave(Adapter); */
+
+ _InitAntenna_Selection(Adapter);
+
+ /* */
+ /* Disable BAR, suggested by Scott */
+ /* 2010.04.09 add by hpfan */
+ /* */
+ rtw_write32(Adapter, REG_BAR_MODE_CTRL, 0x0201ffff);
+
+ /* HW SEQ CTRL */
+ /* set 0x0 to 0xFF by tynli. Default enable HW SEQ NUM. */
+ rtw_write8(Adapter, REG_HWSEQ_CTRL, 0xFF);
+
+ if (pregistrypriv->wifi_spec)
+ rtw_write16(Adapter, REG_FAST_EDCA_CTRL, 0);
+
+ /* Nav limit , suggest by scott */
+ rtw_write8(Adapter, 0x652, 0x0);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
+ rtl8188e_InitHalDm(Adapter);
+
+ if (Adapter->registrypriv.mp_mode == 1) {
+ Adapter->mppriv.channel = haldata->CurrentChannel;
+ MPT_InitializeAdapter(Adapter, Adapter->mppriv.channel);
+ } else {
+ /* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
+ /* and then decide to enable RF or not.!!!??? For Selective suspend mode. We may not */
+ /* call initstruct adapter. May cause some problem?? */
+ /* Fix the bug that Hw/Sw radio off before S3/S4, the RF off action will not be executed */
+ /* in MgntActSet_RF_State() after wake up, because the value of haldata->eRFPowerState */
+ /* is the same as eRfOff, we should change it to eRfOn after we config RF parameters. */
+ /* Added by tynli. 2010.03.30. */
+ pwrctrlpriv->rf_pwrstate = rf_on;
+
+ /* enable Tx report. */
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL+1, 0x0F);
+
+ /* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
+ rtw_write8(Adapter, REG_EARLY_MODE_CONTROL+3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
+
+ /* tynli_test_tx_report. */
+ rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
+
+ /* enable tx DMA to drop the redundate data of packet */
+ rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
+ /* 2010/08/26 MH Merge from 8192CE. */
+ if (pwrctrlpriv->rf_pwrstate == rf_on) {
+ if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
+ PHY_IQCalibrate_8188E(Adapter, true);
+ } else {
+ PHY_IQCalibrate_8188E(Adapter, false);
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
+ }
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
+
+ ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
+ PHY_LCCalibrate_8188E(Adapter);
+ }
+ }
+
+/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
+/* _InitPABias(Adapter); */
+ rtw_write8(Adapter, REG_USB_HRPWM, 0);
+
+ /* ack for xmit mgmt frames. */
+ rtw_write32(Adapter, REG_FWHW_TXQ_CTRL, rtw_read32(Adapter, REG_FWHW_TXQ_CTRL)|BIT(12));
+
+exit:
+HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
+
+ DBG_88E("%s in %dms\n", __func__, rtw_get_passing_time_ms(init_start_time));
+
+_func_exit_;
+
+ return status;
+}
+
+void _ps_open_RF(struct adapter *adapt)
+{
+ /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
+ /* phy_SsPwrSwitch92CU(adapt, rf_on, 1); */
+}
+
+static void _ps_close_RF(struct adapter *adapt)
+{
+ /* here call with bRegSSPwrLvl 1, bRegSSPwrLvl 2 needs to be verified */
+ /* phy_SsPwrSwitch92CU(adapt, rf_off, 1); */
+}
+
+static void CardDisableRTL8188EU(struct adapter *Adapter)
+{
+ u8 val8;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("CardDisableRTL8188EU\n"));
+
+ /* Stop Tx Report Timer. 0x4EC[Bit1]=b'0 */
+ val8 = rtw_read8(Adapter, REG_TX_RPT_CTRL);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL, val8&(~BIT1));
+
+ /* stop rx */
+ rtw_write8(Adapter, REG_CR, 0x0);
+
+ /* Run LPS WL RFOFF flow */
+ HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_LPS_ENTER_FLOW);
+
+ /* 2. 0x1F[7:0] = 0 turn off RF */
+
+ val8 = rtw_read8(Adapter, REG_MCUFWDL);
+ if ((val8 & RAM_DL_SEL) && Adapter->bFWReady) { /* 8051 RAM code */
+ /* Reset MCU 0x2[10]=0. */
+ val8 = rtw_read8(Adapter, REG_SYS_FUNC_EN+1);
+ val8 &= ~BIT(2); /* 0x2[10], FEN_CPUEN */
+ rtw_write8(Adapter, REG_SYS_FUNC_EN+1, val8);
+ }
+
+ /* reset MCU ready status */
+ rtw_write8(Adapter, REG_MCUFWDL, 0);
+
+ /* YJ,add,111212 */
+ /* Disable 32k */
+ val8 = rtw_read8(Adapter, REG_32K_CTRL);
+ rtw_write8(Adapter, REG_32K_CTRL, val8&(~BIT0));
+
+ /* Card disable power action flow */
+ HalPwrSeqCmdParsing(Adapter, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, Rtl8188E_NIC_DISABLE_FLOW);
+
+ /* Reset MCU IO Wrapper */
+ val8 = rtw_read8(Adapter, REG_RSV_CTRL+1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, (val8&(~BIT3)));
+ val8 = rtw_read8(Adapter, REG_RSV_CTRL+1);
+ rtw_write8(Adapter, REG_RSV_CTRL+1, val8|BIT3);
+
+ /* YJ,test add, 111207. For Power Consumption. */
+ val8 = rtw_read8(Adapter, GPIO_IN);
+ rtw_write8(Adapter, GPIO_OUT, val8);
+ rtw_write8(Adapter, GPIO_IO_SEL, 0xFF);/* Reg0x46 */
+
+ val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL);
+ rtw_write8(Adapter, REG_GPIO_IO_SEL, (val8<<4));
+ val8 = rtw_read8(Adapter, REG_GPIO_IO_SEL+1);
+ rtw_write8(Adapter, REG_GPIO_IO_SEL+1, val8|0x0F);/* Reg0x43 */
+ rtw_write32(Adapter, REG_BB_PAD_CTRL, 0x00080808);/* set LNA ,TRSW,EX_PA Pin to output mode */
+ haldata->bMacPwrCtrlOn = false;
+ Adapter->bFWReady = false;
+}
+static void rtl8192cu_hw_power_down(struct adapter *adapt)
+{
+ /* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
+ /* Then enable power down control bit of register 0x04 BIT4 and BIT15 as 1. */
+
+ /* Enable register area 0x0-0xc. */
+ rtw_write8(adapt, REG_RSV_CTRL, 0x0);
+ rtw_write16(adapt, REG_APS_FSMCO, 0x8812);
+}
+
+static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
+{
+
+ DBG_88E("==> %s\n", __func__);
+
+ rtw_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
+ rtw_write32(Adapter, REG_HIMRE_88E, IMR_DISABLED_88E);
+
+ DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
+ if (Adapter->pwrctrlpriv.bkeepfwalive) {
+ _ps_close_RF(Adapter);
+ if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
+ rtl8192cu_hw_power_down(Adapter);
+ } else {
+ if (Adapter->hw_init_completed) {
+ CardDisableRTL8188EU(Adapter);
+
+ if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
+ rtl8192cu_hw_power_down(Adapter);
+ }
+ }
+ return _SUCCESS;
+ }
+
+static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
+{
+ u8 i;
+ struct recv_buf *precvbuf;
+ uint status;
+ struct intf_hdl *pintfhdl = &Adapter->iopriv.intf;
+ struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
+
+_func_enter_;
+
+ _read_port = pintfhdl->io_ops._read_port;
+
+ status = _SUCCESS;
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_,
+ ("===> usb_inirp_init\n"));
+
+ precvpriv->ff_hwaddr = RECV_BULK_IN_ADDR;
+
+ /* issue Rx irp to receive data */
+ precvbuf = (struct recv_buf *)precvpriv->precv_buf;
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ if (_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf) == false) {
+ RT_TRACE(_module_hci_hal_init_c_, _drv_err_, ("usb_rx_init: usb_read_port error\n"));
+ status = _FAIL;
+ goto exit;
+ }
+
+ precvbuf++;
+ precvpriv->free_recv_buf_queue_cnt--;
+ }
+
+exit:
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("<=== usb_inirp_init\n"));
+
+_func_exit_;
+
+ return status;
+}
+
+static unsigned int rtl8188eu_inirp_deinit(struct adapter *Adapter)
+{
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n ===> usb_rx_deinit\n"));
+
+ rtw_read_port_cancel(Adapter);
+
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("\n <=== usb_rx_deinit\n"));
+
+ return _SUCCESS;
+}
+
+/* */
+/* */
+/* EEPROM/EFUSE Content Parsing */
+/* */
+/* */
+static void _ReadLEDSetting(struct adapter *Adapter, u8 *PROMContent, bool AutoloadFail)
+{
+ struct led_priv *pledpriv = &(Adapter->ledpriv);
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ pledpriv->bRegUseLed = true;
+ pledpriv->LedStrategy = SW_LED_MODE1;
+ haldata->bLedOpenDrain = true;/* Support Open-drain arrangement for controlling the LED. */
+}
+
+static void Hal_EfuseParsePIDVID_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ if (!AutoLoadFail) {
+ /* VID, PID */
+ haldata->EEPROMVID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_VID_88EU]);
+ haldata->EEPROMPID = EF2BYTE(*(__le16 *)&hwinfo[EEPROM_PID_88EU]);
+
+ /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
+ haldata->EEPROMCustomerID = *(u8 *)&hwinfo[EEPROM_CUSTOMERID_88E];
+ haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
+ } else {
+ haldata->EEPROMVID = EEPROM_Default_VID;
+ haldata->EEPROMPID = EEPROM_Default_PID;
+
+ /* Customer ID, 0x00 and 0xff are reserved for Realtek. */
+ haldata->EEPROMCustomerID = EEPROM_Default_CustomerID;
+ haldata->EEPROMSubCustomerID = EEPROM_Default_SubCustomerID;
+ }
+
+ DBG_88E("VID = 0x%04X, PID = 0x%04X\n", haldata->EEPROMVID, haldata->EEPROMPID);
+ DBG_88E("Customer ID: 0x%02X, SubCustomer ID: 0x%02X\n", haldata->EEPROMCustomerID, haldata->EEPROMSubCustomerID);
+}
+
+static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool AutoLoadFail)
+{
+ u16 i;
+ u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x88, 0x02};
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+
+ if (AutoLoadFail) {
+ for (i = 0; i < 6; i++)
+ eeprom->mac_addr[i] = sMacAddr[i];
+ } else {
+ /* Read Permanent MAC address */
+ memcpy(eeprom->mac_addr, &hwinfo[EEPROM_MAC_ADDR_88EU], ETH_ALEN);
+ }
+ RT_TRACE(_module_hci_hal_init_c_, _drv_notice_,
+ ("Hal_EfuseParseMACAddr_8188EU: Permanent Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
+ eeprom->mac_addr[0], eeprom->mac_addr[1],
+ eeprom->mac_addr[2], eeprom->mac_addr[3],
+ eeprom->mac_addr[4], eeprom->mac_addr[5]));
+}
+
+static void Hal_CustomizeByCustomerID_8188EU(struct adapter *adapt)
+{
+}
+
+static void
+readAdapterInfo_8188EU(
+ struct adapter *adapt
+ )
+{
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+
+ /* parse the eeprom/efuse content */
+ Hal_EfuseParseIDCode88E(adapt, eeprom->efuse_eeprom_data);
+ Hal_EfuseParsePIDVID_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseMACAddr_8188EU(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+
+ Hal_ReadPowerSavingMode88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadTxPowerInfo88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseEEPROMVer88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ rtl8188e_EfuseParseChnlPlan(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseXtal_8188E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseCustomerID88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+ Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+
+ /* */
+ /* The following part initialize some vars by PG info. */
+ /* */
+ Hal_InitChannelPlan(adapt);
+ Hal_CustomizeByCustomerID_8188EU(adapt);
+
+ _ReadLEDSetting(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
+}
+
+static void _ReadPROMContent(
+ struct adapter *Adapter
+ )
+{
+ struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(Adapter);
+ u8 eeValue;
+
+ /* check system boot selection */
+ eeValue = rtw_read8(Adapter, REG_9346CR);
+ eeprom->EepromOrEfuse = (eeValue & BOOT_FROM_EEPROM) ? true : false;
+ eeprom->bautoload_fail_flag = (eeValue & EEPROM_EN) ? false : true;
+
+ DBG_88E("Boot from %s, Autoload %s !\n", (eeprom->EepromOrEfuse ? "EEPROM" : "EFUSE"),
+ (eeprom->bautoload_fail_flag ? "Fail" : "OK"));
+
+ Hal_InitPGData88E(Adapter);
+ readAdapterInfo_8188EU(Adapter);
+}
+
+static void _ReadRFType(struct adapter *Adapter)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+
+ haldata->rf_chip = RF_6052;
+}
+
+static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
+{
+ u32 start = rtw_get_current_time();
+
+ MSG_88E("====> %s\n", __func__);
+
+ _ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
+ _ReadPROMContent(Adapter);
+
+ MSG_88E("<==== %s in %d ms\n", __func__, rtw_get_passing_time_ms(start));
+
+ return _SUCCESS;
+}
+
+static void ReadAdapterInfo8188EU(struct adapter *Adapter)
+{
+ /* Read EEPROM size before call any EEPROM function */
+ Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
+
+ _ReadAdapterInfo8188EU(Adapter);
+}
+
+#define GPIO_DEBUG_PORT_NUM 0
+static void rtl8192cu_trigger_gpio_0(struct adapter *adapt)
+{
+}
+
+static void ResumeTxBeacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) | BIT6);
+ haldata->RegFwHwTxQCtrl |= BIT6;
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+1, 0xff);
+ haldata->RegReg542 |= BIT0;
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+}
+
+static void StopTxBeacon(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ /* 2010.03.01. Marked by tynli. No need to call workitem beacause we record the value */
+ /* which should be read from register to a global variable. */
+
+ rtw_write8(adapt, REG_FWHW_TXQ_CTRL+2, (haldata->RegFwHwTxQCtrl) & (~BIT6));
+ haldata->RegFwHwTxQCtrl &= (~BIT6);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+1, 0x64);
+ haldata->RegReg542 &= ~(BIT0);
+ rtw_write8(adapt, REG_TBTT_PROHIBIT+2, haldata->RegReg542);
+
+ /* todo: CheckFwRsvdPageContent(Adapter); 2010.06.23. Added by tynli. */
+}
+
+static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 val8;
+ u8 mode = *((u8 *)val);
+
+ /* disable Port0 TSF update */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+
+ /* set net_type */
+ val8 = rtw_read8(Adapter, MSR)&0x0c;
+ val8 |= mode;
+ rtw_write8(Adapter, MSR, val8);
+
+ DBG_88E("%s()-%d mode = %d\n", __func__, __LINE__, mode);
+
+ if ((mode == _HW_STATE_STATION_) || (mode == _HW_STATE_NOLINK_)) {
+ StopTxBeacon(Adapter);
+
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
+ } else if ((mode == _HW_STATE_ADHOC_)) {
+ ResumeTxBeacon(Adapter);
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x1a);
+ } else if (mode == _HW_STATE_AP_) {
+ ResumeTxBeacon(Adapter);
+
+ rtw_write8(Adapter, REG_BCN_CTRL, 0x12);
+
+ /* Set RCR */
+ rtw_write32(Adapter, REG_RCR, 0x7000208e);/* CBSSID_DATA must set to 0,reject ICV_ERR packet */
+ /* enable to rx data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable to rx ps-poll */
+ rtw_write16(Adapter, REG_RXFLTMAP1, 0x0400);
+
+ /* Beacon Control related register for first time */
+ rtw_write8(Adapter, REG_BCNDMATIM, 0x02); /* 2ms */
+
+ rtw_write8(Adapter, REG_ATIMWND, 0x0a); /* 10ms */
+ rtw_write16(Adapter, REG_BCNTCFG, 0x00);
+ rtw_write16(Adapter, REG_TBTT_PROHIBIT, 0xff04);
+ rtw_write16(Adapter, REG_TSFTR_SYN_OFFSET, 0x7fff);/* +32767 (~32ms) */
+
+ /* reset TSF */
+ rtw_write8(Adapter, REG_DUAL_TSF_RST, BIT(0));
+
+ /* BIT3 - If set 0, hw will clr bcnq when tx becon ok/fail or port 0 */
+ rtw_write8(Adapter, REG_MBID_NUM, rtw_read8(Adapter, REG_MBID_NUM) | BIT(3) | BIT(4));
+
+ /* enable BCN0 Function for if1 */
+ /* don't enable update TSF0 for if1 (due to TSF update when beacon/probe rsp are received) */
+ rtw_write8(Adapter, REG_BCN_CTRL, (DIS_TSF_UDT0_NORMAL_CHIP|EN_BCN_FUNCTION | BIT(1)));
+
+ /* dis BCN1 ATIM WND if if2 is station */
+ rtw_write8(Adapter, REG_BCN_CTRL_1, rtw_read8(Adapter, REG_BCN_CTRL_1) | BIT(0));
+ }
+}
+
+static void hw_var_set_macaddr(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_macid;
+
+ reg_macid = REG_MACID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(Adapter, (reg_macid+idx), val[idx]);
+}
+
+static void hw_var_set_bssid(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u8 idx = 0;
+ u32 reg_bssid;
+
+ reg_bssid = REG_BSSID;
+
+ for (idx = 0; idx < 6; idx++)
+ rtw_write8(Adapter, (reg_bssid+idx), val[idx]);
+}
+
+static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ u32 bcn_ctrl_reg;
+
+ bcn_ctrl_reg = REG_BCN_CTRL;
+
+ if (*((u8 *)val))
+ rtw_write8(Adapter, bcn_ctrl_reg, (EN_BCN_FUNCTION | EN_TXBCN_RPT));
+ else
+ rtw_write8(Adapter, bcn_ctrl_reg, rtw_read8(Adapter, bcn_ctrl_reg)&(~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
+}
+
+static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct dm_priv *pdmpriv = &haldata->dmpriv;
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+_func_enter_;
+
+ switch (variable) {
+ case HW_VAR_MEDIA_STATUS:
+ {
+ u8 val8;
+
+ val8 = rtw_read8(Adapter, MSR)&0x0c;
+ val8 |= *((u8 *)val);
+ rtw_write8(Adapter, MSR, val8);
+ }
+ break;
+ case HW_VAR_MEDIA_STATUS1:
+ {
+ u8 val8;
+
+ val8 = rtw_read8(Adapter, MSR) & 0x03;
+ val8 |= *((u8 *)val) << 2;
+ rtw_write8(Adapter, MSR, val8);
+ }
+ break;
+ case HW_VAR_SET_OPMODE:
+ hw_var_set_opmode(Adapter, variable, val);
+ break;
+ case HW_VAR_MAC_ADDR:
+ hw_var_set_macaddr(Adapter, variable, val);
+ break;
+ case HW_VAR_BSSID:
+ hw_var_set_bssid(Adapter, variable, val);
+ break;
+ case HW_VAR_BASIC_RATE:
+ {
+ u16 BrateCfg = 0;
+ u8 RateIndex = 0;
+
+ /* 2007.01.16, by Emily */
+ /* Select RRSR (in Legacy-OFDM and CCK) */
+ /* For 8190, we select only 24M, 12M, 6M, 11M, 5.5M, 2M, and 1M from the Basic rate. */
+ /* We do not use other rates. */
+ HalSetBrateCfg(Adapter, val, &BrateCfg);
+ DBG_88E("HW_VAR_BASIC_RATE: BrateCfg(%#x)\n", BrateCfg);
+
+ /* 2011.03.30 add by Luke Lee */
+ /* CCK 2M ACK should be disabled for some BCM and Atheros AP IOT */
+ /* because CCK 2M has poor TXEVM */
+ /* CCK 5.5M & 11M ACK should be enabled for better performance */
+
+ BrateCfg = (BrateCfg | 0xd) & 0x15d;
+ haldata->BasicRateSet = BrateCfg;
+
+ BrateCfg |= 0x01; /* default enable 1M ACK rate */
+ /* Set RRSR rate table. */
+ rtw_write8(Adapter, REG_RRSR, BrateCfg & 0xff);
+ rtw_write8(Adapter, REG_RRSR+1, (BrateCfg >> 8) & 0xff);
+ rtw_write8(Adapter, REG_RRSR+2, rtw_read8(Adapter, REG_RRSR+2)&0xf0);
+
+ /* Set RTS initial rate */
+ while (BrateCfg > 0x1) {
+ BrateCfg = (BrateCfg >> 1);
+ RateIndex++;
+ }
+ /* Ziv - Check */
+ rtw_write8(Adapter, REG_INIRTS_RATE_SEL, RateIndex);
+ }
+ break;
+ case HW_VAR_TXPAUSE:
+ rtw_write8(Adapter, REG_TXPAUSE, *((u8 *)val));
+ break;
+ case HW_VAR_BCN_FUNC:
+ hw_var_set_bcn_func(Adapter, variable, val);
+ break;
+ case HW_VAR_CORRECT_TSF:
+ {
+ u64 tsf;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ StopTxBeacon(Adapter);
+
+ /* disable related TSF function */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(3)));
+
+ rtw_write32(Adapter, REG_TSFTR, tsf);
+ rtw_write32(Adapter, REG_TSFTR+4, tsf>>32);
+
+ /* enable related TSF function */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(3));
+
+ if (((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE))
+ ResumeTxBeacon(Adapter);
+ }
+ break;
+ case HW_VAR_CHECK_BSSID:
+ if (*((u8 *)val)) {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ } else {
+ u32 val32;
+
+ val32 = rtw_read32(Adapter, REG_RCR);
+
+ val32 &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);
+
+ rtw_write32(Adapter, REG_RCR, val32);
+ }
+ break;
+ case HW_VAR_MLME_DISCONNECT:
+ /* Set RCR to not to receive data frame when NO LINK state */
+ /* reject all data frames */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+
+ /* reset TSF */
+ rtw_write8(Adapter, REG_DUAL_TSF_RST, (BIT(0)|BIT(1)));
+
+ /* disable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+ break;
+ case HW_VAR_MLME_SITESURVEY:
+ if (*((u8 *)val)) { /* under sitesurvey */
+ /* config RCR to receive different BSSID & not to receive data frame */
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_BCN);
+ rtw_write32(Adapter, REG_RCR, v);
+ /* reject all data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+
+ /* disable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)|BIT(4));
+ } else { /* sitesurvey done */
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ if ((is_client_associated_to_ap(Adapter)) ||
+ ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
+ /* enable to rx data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ } else if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+ }
+ if ((pmlmeinfo->state&0x03) == WIFI_FW_AP_STATE) {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
+ } else {
+ if (Adapter->in_cta_test) {
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(Adapter, REG_RCR, v);
+ } else {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_BCN);
+ }
+ }
+ }
+ break;
+ case HW_VAR_MLME_JOIN:
+ {
+ u8 RetryLimit = 0x30;
+ u8 type = *((u8 *)val);
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+
+ if (type == 0) { /* prepare to join */
+ /* enable to rx data frame.Accept all data frame */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0xFFFF);
+
+ if (Adapter->in_cta_test) {
+ u32 v = rtw_read32(Adapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(Adapter, REG_RCR, v);
+ } else {
+ rtw_write32(Adapter, REG_RCR, rtw_read32(Adapter, REG_RCR)|RCR_CBSSID_DATA|RCR_CBSSID_BCN);
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ RetryLimit = (haldata->CustomerID == RT_CID_CCX) ? 7 : 48;
+ else /* Ad-hoc Mode */
+ RetryLimit = 0x7;
+ } else if (type == 1) {
+ /* joinbss_event call back when join res < 0 */
+ rtw_write16(Adapter, REG_RXFLTMAP2, 0x00);
+ } else if (type == 2) {
+ /* sta add event call back */
+ /* enable update TSF */
+ rtw_write8(Adapter, REG_BCN_CTRL, rtw_read8(Adapter, REG_BCN_CTRL)&(~BIT(4)));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE))
+ RetryLimit = 0x7;
+ }
+ rtw_write16(Adapter, REG_RL, RetryLimit << RETRY_LIMIT_SHORT_SHIFT | RetryLimit << RETRY_LIMIT_LONG_SHIFT);
+ }
+ break;
+ case HW_VAR_BEACON_INTERVAL:
+ rtw_write16(Adapter, REG_BCN_INTERVAL, *((u16 *)val));
+ break;
+ case HW_VAR_SLOT_TIME:
+ {
+ u8 u1bAIFS, aSifsTime;
+ struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ rtw_write8(Adapter, REG_SLOT, val[0]);
+
+ if (pmlmeinfo->WMM_enable == 0) {
+ if (pmlmeext->cur_wireless_mode == WIRELESS_11B)
+ aSifsTime = 10;
+ else
+ aSifsTime = 16;
+
+ u1bAIFS = aSifsTime + (2 * pmlmeinfo->slotTime);
+
+ /* <Roger_EXP> Temporary removed, 2008.06.20. */
+ rtw_write8(Adapter, REG_EDCA_VO_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_VI_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_BE_PARAM, u1bAIFS);
+ rtw_write8(Adapter, REG_EDCA_BK_PARAM, u1bAIFS);
+ }
+ }
+ break;
+ case HW_VAR_RESP_SIFS:
+ /* RESP_SIFS for CCK */
+ rtw_write8(Adapter, REG_R2T_SIFS, val[0]); /* SIFS_T2T_CCK (0x08) */
+ rtw_write8(Adapter, REG_R2T_SIFS+1, val[1]); /* SIFS_R2T_CCK(0x08) */
+ /* RESP_SIFS for OFDM */
+ rtw_write8(Adapter, REG_T2T_SIFS, val[2]); /* SIFS_T2T_OFDM (0x0a) */
+ rtw_write8(Adapter, REG_T2T_SIFS+1, val[3]); /* SIFS_R2T_OFDM(0x0a) */
+ break;
+ case HW_VAR_ACK_PREAMBLE:
+ {
+ u8 regTmp;
+ u8 bShortPreamble = *((bool *)val);
+ /* Joseph marked out for Netgear 3500 TKIP channel 7 issue.(Temporarily) */
+ regTmp = (haldata->nCur40MhzPrimeSC)<<5;
+ if (bShortPreamble)
+ regTmp |= 0x80;
+
+ rtw_write8(Adapter, REG_RRSR+2, regTmp);
+ }
+ break;
+ case HW_VAR_SEC_CFG:
+ rtw_write8(Adapter, REG_SECCFG, *((u8 *)val));
+ break;
+ case HW_VAR_DM_FLAG:
+ podmpriv->SupportAbility = *((u8 *)val);
+ break;
+ case HW_VAR_DM_FUNC_OP:
+ if (val[0])
+ podmpriv->BK_SupportAbility = podmpriv->SupportAbility;
+ else
+ podmpriv->SupportAbility = podmpriv->BK_SupportAbility;
+ break;
+ case HW_VAR_DM_FUNC_SET:
+ if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
+ pdmpriv->DMFlag = pdmpriv->InitDMFlag;
+ podmpriv->SupportAbility = pdmpriv->InitODMFlag;
+ } else {
+ podmpriv->SupportAbility |= *((u32 *)val);
+ }
+ break;
+ case HW_VAR_DM_FUNC_CLR:
+ podmpriv->SupportAbility &= *((u32 *)val);
+ break;
+ case HW_VAR_CAM_EMPTY_ENTRY:
+ {
+ u8 ucIndex = *((u8 *)val);
+ u8 i;
+ u32 ulCommand = 0;
+ u32 ulContent = 0;
+ u32 ulEncAlgo = CAM_AES;
+
+ for (i = 0; i < CAM_CONTENT_COUNT; i++) {
+ /* filled id in CAM config 2 byte */
+ if (i == 0)
+ ulContent |= (ucIndex & 0x03) | ((u16)(ulEncAlgo)<<2);
+ else
+ ulContent = 0;
+ /* polling bit, and No Write enable, and address */
+ ulCommand = CAM_CONTENT_COUNT*ucIndex+i;
+ ulCommand = ulCommand | CAM_POLLINIG|CAM_WRITE;
+ /* write content 0 is equall to mark invalid */
+ rtw_write32(Adapter, WCAMI, ulContent); /* delay_ms(40); */
+ rtw_write32(Adapter, RWCAM, ulCommand); /* delay_ms(40); */
+ }
+ }
+ break;
+ case HW_VAR_CAM_INVALID_ALL:
+ rtw_write32(Adapter, RWCAM, BIT(31)|BIT(30));
+ break;
+ case HW_VAR_CAM_WRITE:
+ {
+ u32 cmd;
+ u32 *cam_val = (u32 *)val;
+ rtw_write32(Adapter, WCAMI, cam_val[0]);
+
+ cmd = CAM_POLLINIG | CAM_WRITE | cam_val[1];
+ rtw_write32(Adapter, RWCAM, cmd);
+ }
+ break;
+ case HW_VAR_AC_PARAM_VO:
+ rtw_write32(Adapter, REG_EDCA_VO_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_VI:
+ rtw_write32(Adapter, REG_EDCA_VI_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_BE:
+ haldata->AcParam_BE = ((u32 *)(val))[0];
+ rtw_write32(Adapter, REG_EDCA_BE_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_AC_PARAM_BK:
+ rtw_write32(Adapter, REG_EDCA_BK_PARAM, ((u32 *)(val))[0]);
+ break;
+ case HW_VAR_ACM_CTRL:
+ {
+ u8 acm_ctrl = *((u8 *)val);
+ u8 AcmCtrl = rtw_read8(Adapter, REG_ACMHWCTRL);
+
+ if (acm_ctrl > 1)
+ AcmCtrl = AcmCtrl | 0x1;
+
+ if (acm_ctrl & BIT(3))
+ AcmCtrl |= AcmHw_VoqEn;
+ else
+ AcmCtrl &= (~AcmHw_VoqEn);
+
+ if (acm_ctrl & BIT(2))
+ AcmCtrl |= AcmHw_ViqEn;
+ else
+ AcmCtrl &= (~AcmHw_ViqEn);
+
+ if (acm_ctrl & BIT(1))
+ AcmCtrl |= AcmHw_BeqEn;
+ else
+ AcmCtrl &= (~AcmHw_BeqEn);
+
+ DBG_88E("[HW_VAR_ACM_CTRL] Write 0x%X\n", AcmCtrl);
+ rtw_write8(Adapter, REG_ACMHWCTRL, AcmCtrl);
+ }
+ break;
+ case HW_VAR_AMPDU_MIN_SPACE:
+ {
+ u8 MinSpacingToSet;
+ u8 SecMinSpace;
+
+ MinSpacingToSet = *((u8 *)val);
+ if (MinSpacingToSet <= 7) {
+ switch (Adapter->securitypriv.dot11PrivacyAlgrthm) {
+ case _NO_PRIVACY_:
+ case _AES_:
+ SecMinSpace = 0;
+ break;
+ case _WEP40_:
+ case _WEP104_:
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ SecMinSpace = 6;
+ break;
+ default:
+ SecMinSpace = 7;
+ break;
+ }
+ if (MinSpacingToSet < SecMinSpace)
+ MinSpacingToSet = SecMinSpace;
+ rtw_write8(Adapter, REG_AMPDU_MIN_SPACE, (rtw_read8(Adapter, REG_AMPDU_MIN_SPACE) & 0xf8) | MinSpacingToSet);
+ }
+ }
+ break;
+ case HW_VAR_AMPDU_FACTOR:
+ {
+ u8 RegToSet_Normal[4] = {0x41, 0xa8, 0x72, 0xb9};
+ u8 FactorToSet;
+ u8 *pRegToSet;
+ u8 index = 0;
+
+ pRegToSet = RegToSet_Normal; /* 0xb972a841; */
+ FactorToSet = *((u8 *)val);
+ if (FactorToSet <= 3) {
+ FactorToSet = (1<<(FactorToSet + 2));
+ if (FactorToSet > 0xf)
+ FactorToSet = 0xf;
+
+ for (index = 0; index < 4; index++) {
+ if ((pRegToSet[index] & 0xf0) > (FactorToSet<<4))
+ pRegToSet[index] = (pRegToSet[index] & 0x0f) | (FactorToSet<<4);
+
+ if ((pRegToSet[index] & 0x0f) > FactorToSet)
+ pRegToSet[index] = (pRegToSet[index] & 0xf0) | (FactorToSet);
+
+ rtw_write8(Adapter, (REG_AGGLEN_LMT+index), pRegToSet[index]);
+ }
+ }
+ }
+ break;
+ case HW_VAR_RXDMA_AGG_PG_TH:
+ {
+ u8 threshold = *((u8 *)val);
+ if (threshold == 0)
+ threshold = haldata->UsbRxAggPageCount;
+ rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
+ }
+ break;
+ case HW_VAR_SET_RPWM:
+ break;
+ case HW_VAR_H2C_FW_PWRMODE:
+ {
+ u8 psmode = (*(u8 *)val);
+
+ /* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
+ /* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
+ if ((psmode != PS_MODE_ACTIVE) && (!IS_92C_SERIAL(haldata->VersionID)))
+ ODM_RF_Saving(podmpriv, true);
+ rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
+ }
+ break;
+ case HW_VAR_H2C_FW_JOINBSSRPT:
+ {
+ u8 mstatus = (*(u8 *)val);
+ rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
+ }
+ break;
+#ifdef CONFIG_88EU_P2P
+ case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
+ {
+ u8 p2p_ps_state = (*(u8 *)val);
+ rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
+ }
+ break;
+#endif
+ case HW_VAR_INITIAL_GAIN:
+ {
+ struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
+ u32 rx_gain = ((u32 *)(val))[0];
+
+ if (rx_gain == 0xff) {/* restore rx gain */
+ ODM_Write_DIG(podmpriv, pDigTable->BackupIGValue);
+ } else {
+ pDigTable->BackupIGValue = pDigTable->CurIGValue;
+ ODM_Write_DIG(podmpriv, rx_gain);
+ }
+ }
+ break;
+ case HW_VAR_TRIGGER_GPIO_0:
+ rtl8192cu_trigger_gpio_0(Adapter);
+ break;
+ case HW_VAR_RPT_TIMER_SETTING:
+ {
+ u16 min_rpt_time = (*(u16 *)val);
+ ODM_RA_Set_TxRPT_Time(podmpriv, min_rpt_time);
+ }
+ break;
+ case HW_VAR_ANTENNA_DIVERSITY_SELECT:
+ {
+ u8 Optimum_antenna = (*(u8 *)val);
+ u8 Ant;
+ /* switch antenna to Optimum_antenna */
+ if (haldata->CurAntenna != Optimum_antenna) {
+ Ant = (Optimum_antenna == 2) ? MAIN_ANT : AUX_ANT;
+ ODM_UpdateRxIdleAnt_88E(&haldata->odmpriv, Ant);
+
+ haldata->CurAntenna = Optimum_antenna;
+ }
+ }
+ break;
+ case HW_VAR_EFUSE_BYTES: /* To set EFUE total used bytes, added by Roger, 2008.12.22. */
+ haldata->EfuseUsedBytes = *((u16 *)val);
+ break;
+ case HW_VAR_FIFO_CLEARN_UP:
+ {
+ struct pwrctrl_priv *pwrpriv = &Adapter->pwrctrlpriv;
+ u8 trycnt = 100;
+
+ /* pause tx */
+ rtw_write8(Adapter, REG_TXPAUSE, 0xff);
+
+ /* keep sn */
+ Adapter->xmitpriv.nqos_ssn = rtw_read16(Adapter, REG_NQOS_SEQ);
+
+ if (!pwrpriv->bkeepfwalive) {
+ /* RX DMA stop */
+ rtw_write32(Adapter, REG_RXPKT_NUM, (rtw_read32(Adapter, REG_RXPKT_NUM)|RW_RELEASE_EN));
+ do {
+ if (!(rtw_read32(Adapter, REG_RXPKT_NUM)&RXDMA_IDLE))
+ break;
+ } while (trycnt--);
+ if (trycnt == 0)
+ DBG_88E("Stop RX DMA failed......\n");
+
+ /* RQPN Load 0 */
+ rtw_write16(Adapter, REG_RQPN_NPQ, 0x0);
+ rtw_write32(Adapter, REG_RQPN, 0x80000000);
+ rtw_mdelay_os(10);
+ }
+ }
+ break;
+ case HW_VAR_CHECK_TXBUF:
+ break;
+ case HW_VAR_APFM_ON_MAC:
+ haldata->bMacPwrCtrlOn = *val;
+ DBG_88E("%s: bMacPwrCtrlOn=%d\n", __func__, haldata->bMacPwrCtrlOn);
+ break;
+ case HW_VAR_TX_RPT_MAX_MACID:
+ {
+ u8 maxMacid = *val;
+ DBG_88E("### MacID(%d),Set Max Tx RPT MID(%d)\n", maxMacid, maxMacid+1);
+ rtw_write8(Adapter, REG_TX_RPT_CTRL+1, maxMacid+1);
+ }
+ break;
+ case HW_VAR_H2C_MEDIA_STATUS_RPT:
+ rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val));
+ break;
+ case HW_VAR_BCN_VALID:
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
+ rtw_write8(Adapter, REG_TDECTRL+2, rtw_read8(Adapter, REG_TDECTRL+2) | BIT0);
+ break;
+ default:
+ break;
+ }
+_func_exit_;
+}
+
+static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+_func_enter_;
+
+ switch (variable) {
+ case HW_VAR_BASIC_RATE:
+ *((u16 *)(val)) = haldata->BasicRateSet;
+ case HW_VAR_TXPAUSE:
+ val[0] = rtw_read8(Adapter, REG_TXPAUSE);
+ break;
+ case HW_VAR_BCN_VALID:
+ /* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2 */
+ val[0] = (BIT0 & rtw_read8(Adapter, REG_TDECTRL+2)) ? true : false;
+ break;
+ case HW_VAR_DM_FLAG:
+ val[0] = podmpriv->SupportAbility;
+ break;
+ case HW_VAR_RF_TYPE:
+ val[0] = haldata->rf_type;
+ break;
+ case HW_VAR_FWLPS_RF_ON:
+ {
+ /* When we halt NIC, we should check if FW LPS is leave. */
+ if (Adapter->pwrctrlpriv.rf_pwrstate == rf_off) {
+ /* If it is in HW/SW Radio OFF or IPS state, we do not check Fw LPS Leave, */
+ /* because Fw is unload. */
+ val[0] = true;
+ } else {
+ u32 valRCR;
+ valRCR = rtw_read32(Adapter, REG_RCR);
+ valRCR &= 0x00070000;
+ if (valRCR)
+ val[0] = false;
+ else
+ val[0] = true;
+ }
+ }
+ break;
+ case HW_VAR_CURRENT_ANTENNA:
+ val[0] = haldata->CurAntenna;
+ break;
+ case HW_VAR_EFUSE_BYTES: /* To get EFUE total used bytes, added by Roger, 2008.12.22. */
+ *((u16 *)(val)) = haldata->EfuseUsedBytes;
+ break;
+ case HW_VAR_APFM_ON_MAC:
+ *val = haldata->bMacPwrCtrlOn;
+ break;
+ case HW_VAR_CHK_HI_QUEUE_EMPTY:
+ *val = ((rtw_read32(Adapter, REG_HGQ_INFORMATION)&0x0000ff00) == 0) ? true : false;
+ break;
+ default:
+ break;
+ }
+
+_func_exit_;
+}
+
+/* */
+/* Description: */
+/* Query setting of specified variable. */
+/* */
+static u8
+GetHalDefVar8188EUsb(
+ struct adapter *Adapter,
+ enum hal_def_variable eVariable,
+ void *pValue
+ )
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_UNDERCORATEDSMOOTHEDPWDB:
+ {
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ struct sta_priv *pstapriv = &Adapter->stapriv;
+ struct sta_info *psta;
+ psta = rtw_get_stainfo(pstapriv, pmlmepriv->cur_network.network.MacAddress);
+ if (psta)
+ *((int *)pValue) = psta->rssi_stat.UndecoratedSmoothedPWDB;
+ }
+ break;
+ case HAL_DEF_IS_SUPPORT_ANT_DIV:
+ *((u8 *)pValue) = (haldata->AntDivCfg == 0) ? false : true;
+ break;
+ case HAL_DEF_CURRENT_ANTENNA:
+ *((u8 *)pValue) = haldata->CurAntenna;
+ break;
+ case HAL_DEF_DRVINFO_SZ:
+ *((u32 *)pValue) = DRVINFO_SZ;
+ break;
+ case HAL_DEF_MAX_RECVBUF_SZ:
+ *((u32 *)pValue) = MAX_RECVBUF_SZ;
+ break;
+ case HAL_DEF_RX_PACKET_OFFSET:
+ *((u32 *)pValue) = RXDESC_SIZE + DRVINFO_SZ;
+ break;
+ case HAL_DEF_DBG_DM_FUNC:
+ *((u32 *)pValue) = haldata->odmpriv.SupportAbility;
+ break;
+ case HAL_DEF_RA_DECISION_RATE:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HAL_DEF_RA_SGI:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HAL_DEF_PT_PWR_STATUS:
+ {
+ u8 MacID = *((u8 *)pValue);
+ *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID);
+ }
+ break;
+ case HW_VAR_MAX_RX_AMPDU_FACTOR:
+ *((u32 *)pValue) = MAX_AMPDU_FACTOR_64K;
+ break;
+ case HW_DEF_RA_INFO_DUMP:
+ {
+ u8 entry_id = *((u8 *)pValue);
+ if (check_fwstate(&Adapter->mlmepriv, _FW_LINKED)) {
+ DBG_88E("============ RA status check ===================\n");
+ DBG_88E("Mac_id:%d , RateID = %d, RAUseRate = 0x%08x, RateSGI = %d, DecisionRate = 0x%02x ,PTStage = %d\n",
+ entry_id,
+ haldata->odmpriv.RAInfo[entry_id].RateID,
+ haldata->odmpriv.RAInfo[entry_id].RAUseRate,
+ haldata->odmpriv.RAInfo[entry_id].RateSGI,
+ haldata->odmpriv.RAInfo[entry_id].DecisionRate,
+ haldata->odmpriv.RAInfo[entry_id].PTStage);
+ }
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
+ }
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ *((u8 *)pValue) = haldata->bDumpRxPkt;
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ *((u8 *)pValue) = haldata->bDumpTxPkt;
+ break;
+ default:
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+/* */
+/* Description: */
+/* Change default setting of specified variable. */
+/* */
+static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
+ u8 bResult = _SUCCESS;
+
+ switch (eVariable) {
+ case HAL_DEF_DBG_DM_FUNC:
+ {
+ u8 dm_func = *((u8 *)pValue);
+ struct odm_dm_struct *podmpriv = &haldata->odmpriv;
+
+ if (dm_func == 0) { /* disable all dynamic func */
+ podmpriv->SupportAbility = DYNAMIC_FUNC_DISABLE;
+ DBG_88E("==> Disable all dynamic function...\n");
+ } else if (dm_func == 1) {/* disable DIG */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DIG);
+ DBG_88E("==> Disable DIG...\n");
+ } else if (dm_func == 2) {/* disable High power */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_DYNAMIC_TXPWR);
+ } else if (dm_func == 3) {/* disable tx power tracking */
+ podmpriv->SupportAbility &= (~DYNAMIC_RF_CALIBRATION);
+ DBG_88E("==> Disable tx power tracking...\n");
+ } else if (dm_func == 5) {/* disable antenna diversity */
+ podmpriv->SupportAbility &= (~DYNAMIC_BB_ANT_DIV);
+ } else if (dm_func == 6) {/* turn on all dynamic func */
+ if (!(podmpriv->SupportAbility & DYNAMIC_BB_DIG)) {
+ struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
+ pDigTable->CurIGValue = rtw_read8(Adapter, 0xc50);
+ }
+ podmpriv->SupportAbility = DYNAMIC_ALL_FUNC_ENABLE;
+ DBG_88E("==> Turn on all dynamic function...\n");
+ }
+ }
+ break;
+ case HAL_DEF_DBG_DUMP_RXPKT:
+ haldata->bDumpRxPkt = *((u8 *)pValue);
+ break;
+ case HAL_DEF_DBG_DUMP_TXPKT:
+ haldata->bDumpTxPkt = *((u8 *)pValue);
+ break;
+ case HW_DEF_FA_CNT_DUMP:
+ {
+ u8 bRSSIDump = *((u8 *)pValue);
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ if (bRSSIDump)
+ dm_ocm->DebugComponents = ODM_COMP_DIG|ODM_COMP_FA_CNT ;
+ else
+ dm_ocm->DebugComponents = 0;
+ }
+ break;
+ case HW_DEF_ODM_DBG_FLAG:
+ {
+ u64 DebugComponents = *((u64 *)pValue);
+ struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ dm_ocm->DebugComponents = DebugComponents;
+ }
+ break;
+ default:
+ bResult = _FAIL;
+ break;
+ }
+
+ return bResult;
+}
+
+static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+{
+ u8 init_rate = 0;
+ u8 networkType, raid;
+ u32 mask, rate_bitmap;
+ u8 shortGIrate = false;
+ int supportRateNum = 0;
+ struct sta_info *psta;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+
+ if (mac_id >= NUM_STA) /* CAM_SIZE */
+ return;
+ psta = pmlmeinfo->FW_sta_info[mac_id].psta;
+ if (psta == NULL)
+ return;
+ switch (mac_id) {
+ case 0:/* for infra mode */
+ supportRateNum = rtw_get_rateset_len(cur_network->SupportedRates);
+ networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
+ raid = networktype_to_raid(networkType);
+ mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0;
+ if (support_short_GI(adapt, &(pmlmeinfo->HT_caps)))
+ shortGIrate = true;
+ break;
+ case 1:/* for broadcast/multicast */
+ supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
+ networkType = WIRELESS_11B;
+ else
+ networkType = WIRELESS_11G;
+ raid = networktype_to_raid(networkType);
+ mask = update_basic_rate(cur_network->SupportedRates, supportRateNum);
+ break;
+ default: /* for each sta in IBSS */
+ supportRateNum = rtw_get_rateset_len(pmlmeinfo->FW_sta_info[mac_id].SupportedRates);
+ networkType = judge_network_type(adapt, pmlmeinfo->FW_sta_info[mac_id].SupportedRates, supportRateNum) & 0xf;
+ raid = networktype_to_raid(networkType);
+ mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
+
+ /* todo: support HT in IBSS */
+ break;
+ }
+
+ rate_bitmap = 0x0fffffff;
+ rate_bitmap = ODM_Get_Rate_Bitmap(&haldata->odmpriv, mac_id, mask, rssi_level);
+ DBG_88E("%s => mac_id:%d, networkType:0x%02x, mask:0x%08x\n\t ==> rssi_level:%d, rate_bitmap:0x%08x\n",
+ __func__, mac_id, networkType, mask, rssi_level, rate_bitmap);
+
+ mask &= rate_bitmap;
+
+ init_rate = get_highest_rate_idx(mask)&0x3f;
+
+ if (haldata->fw_ractrl) {
+ u8 arg;
+
+ arg = mac_id & 0x1f;/* MACID */
+ arg |= BIT(7);
+ if (shortGIrate)
+ arg |= BIT(5);
+ mask |= ((raid << 28) & 0xf0000000);
+ DBG_88E("update raid entry, mask=0x%x, arg=0x%x\n", mask, arg);
+ psta->ra_mask = mask;
+ mask |= ((raid << 28) & 0xf0000000);
+
+ /* to do ,for 8188E-SMIC */
+ rtl8188e_set_raid_cmd(adapt, mask);
+ } else {
+ ODM_RA_UpdateRateInfo_8188E(&(haldata->odmpriv),
+ mac_id,
+ raid,
+ mask,
+ shortGIrate
+ );
+ }
+ /* set ra_id */
+ psta->raid = raid;
+ psta->init_rate = init_rate;
+}
+
+static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
+{
+ u32 value32;
+ struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ u32 bcn_ctrl_reg = REG_BCN_CTRL;
+ /* reset TSF, enable update TSF, correcting TSF On Beacon */
+
+ /* BCN interval */
+ rtw_write16(adapt, REG_BCN_INTERVAL, pmlmeinfo->bcn_interval);
+ rtw_write8(adapt, REG_ATIMWND, 0x02);/* 2ms */
+
+ _InitBeaconParameters(adapt);
+
+ rtw_write8(adapt, REG_SLOT, 0x09);
+
+ value32 = rtw_read32(adapt, REG_TCR);
+ value32 &= ~TSFRST;
+ rtw_write32(adapt, REG_TCR, value32);
+
+ value32 |= TSFRST;
+ rtw_write32(adapt, REG_TCR, value32);
+
+ /* NOTE: Fix test chip's bug (about contention windows's randomness) */
+ rtw_write8(adapt, REG_RXTSF_OFFSET_CCK, 0x50);
+ rtw_write8(adapt, REG_RXTSF_OFFSET_OFDM, 0x50);
+
+ _BeaconFunctionEnable(adapt, true, true);
+
+ ResumeTxBeacon(adapt);
+
+ rtw_write8(adapt, bcn_ctrl_reg, rtw_read8(adapt, bcn_ctrl_reg)|BIT(1));
+}
+
+static void rtl8188eu_init_default_value(struct adapter *adapt)
+{
+ struct hal_data_8188e *haldata;
+ struct pwrctrl_priv *pwrctrlpriv;
+ u8 i;
+
+ haldata = GET_HAL_DATA(adapt);
+ pwrctrlpriv = &adapt->pwrctrlpriv;
+
+ /* init default value */
+ haldata->fw_ractrl = false;
+ if (!pwrctrlpriv->bkeepfwalive)
+ haldata->LastHMEBoxNum = 0;
+
+ /* init dm default value */
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = false;
+ haldata->odmpriv.RFCalibrateInfo.TM_Trigger = 0;/* for IQK */
+ haldata->pwrGroupCnt = 0;
+ haldata->PGMaxGroup = 13;
+ haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP_index = 0;
+ for (i = 0; i < HP_THERMAL_NUM; i++)
+ haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
+}
+
+static u8 rtl8188eu_ps_func(struct adapter *Adapter, enum hal_intf_ps_func efunc_id, u8 *val)
+{
+ u8 bResult = true;
+ return bResult;
+}
+
+void rtl8188eu_set_hal_ops(struct adapter *adapt)
+{
+ struct hal_ops *halfunc = &adapt->HalFunc;
+
+_func_enter_;
+
+ adapt->HalData = rtw_zmalloc(sizeof(struct hal_data_8188e));
+ if (adapt->HalData == NULL)
+ DBG_88E("cant not alloc memory for HAL DATA\n");
+ adapt->hal_data_sz = sizeof(struct hal_data_8188e);
+
+ halfunc->hal_power_on = rtl8188eu_InitPowerOn;
+ halfunc->hal_init = &rtl8188eu_hal_init;
+ halfunc->hal_deinit = &rtl8188eu_hal_deinit;
+
+ halfunc->inirp_init = &rtl8188eu_inirp_init;
+ halfunc->inirp_deinit = &rtl8188eu_inirp_deinit;
+
+ halfunc->init_xmit_priv = &rtl8188eu_init_xmit_priv;
+ halfunc->free_xmit_priv = &rtl8188eu_free_xmit_priv;
+
+ halfunc->init_recv_priv = &rtl8188eu_init_recv_priv;
+ halfunc->free_recv_priv = &rtl8188eu_free_recv_priv;
+ halfunc->InitSwLeds = &rtl8188eu_InitSwLeds;
+ halfunc->DeInitSwLeds = &rtl8188eu_DeInitSwLeds;
+
+ halfunc->init_default_value = &rtl8188eu_init_default_value;
+ halfunc->intf_chip_configure = &rtl8188eu_interface_configure;
+ halfunc->read_adapter_info = &ReadAdapterInfo8188EU;
+
+ halfunc->SetHwRegHandler = &SetHwReg8188EU;
+ halfunc->GetHwRegHandler = &GetHwReg8188EU;
+ halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
+ halfunc->SetHalDefVarHandler = &SetHalDefVar8188EUsb;
+
+ halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
+ halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
+
+ halfunc->hal_xmit = &rtl8188eu_hal_xmit;
+ halfunc->mgnt_xmit = &rtl8188eu_mgnt_xmit;
+
+ halfunc->interface_ps_func = &rtl8188eu_ps_func;
+
+ rtl8188e_set_hal_ops(halfunc);
+_func_exit_;
+}
diff --git a/drivers/staging/rtl8188eu/hal/usb_ops_linux.c b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
new file mode 100644
index 00000000000..bc564169b2f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/hal/usb_ops_linux.c
@@ -0,0 +1,726 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_OPS_OS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+#include <usb_ops.h>
+#include <recv_osdep.h>
+#include <rtl8188e_hal.h>
+
+static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u8 request, u16 value, u16 index, void *pdata, u16 len, u8 requesttype)
+{
+ struct adapter *adapt = pintfhdl->padapter;
+ struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
+ struct usb_device *udev = dvobjpriv->pusbdev;
+ unsigned int pipe;
+ int status = 0;
+ u8 reqtype;
+ u8 *pIo_buf;
+ int vendorreq_times = 0;
+
+ if ((adapt->bSurpriseRemoved) || (adapt->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usbctrl_vendorreq:(adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ status = -EPERM;
+ goto exit;
+ }
+
+ if (len > MAX_VENDOR_REQ_CMD_SIZE) {
+ DBG_88E("[%s] Buffer len error ,vendor request failed\n", __func__);
+ status = -EINVAL;
+ goto exit;
+ }
+
+ _enter_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+
+ /* Acquire IO memory for vendorreq */
+ pIo_buf = dvobjpriv->usb_vendor_req_buf;
+
+ if (pIo_buf == NULL) {
+ DBG_88E("[%s] pIo_buf == NULL\n", __func__);
+ status = -ENOMEM;
+ goto release_mutex;
+ }
+
+ while (++vendorreq_times <= MAX_USBCTRL_VENDORREQ_TIMES) {
+ _rtw_memset(pIo_buf, 0, len);
+
+ if (requesttype == 0x01) {
+ pipe = usb_rcvctrlpipe(udev, 0);/* read_in */
+ reqtype = REALTEK_USB_VENQT_READ;
+ } else {
+ pipe = usb_sndctrlpipe(udev, 0);/* write_out */
+ reqtype = REALTEK_USB_VENQT_WRITE;
+ memcpy(pIo_buf, pdata, len);
+ }
+
+ status = rtw_usb_control_msg(udev, pipe, request, reqtype, value, index, pIo_buf, len, RTW_USB_CONTROL_MSG_TIMEOUT);
+
+ if (status == len) { /* Success this control transfer. */
+ rtw_reset_continual_urb_error(dvobjpriv);
+ if (requesttype == 0x01)
+ memcpy(pdata, pIo_buf, len);
+ } else { /* error cases */
+ DBG_88E("reg 0x%x, usb %s %u fail, status:%d value=0x%x, vendorreq_times:%d\n",
+ value, (requesttype == 0x01) ? "read" : "write",
+ len, status, *(u32 *)pdata, vendorreq_times);
+
+ if (status < 0) {
+ if (status == (-ESHUTDOWN) || status == -ENODEV) {
+ adapt->bSurpriseRemoved = true;
+ } else {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ haldata->srestpriv.Wifi_Error_Status = USB_VEN_REQ_CMD_FAIL;
+ }
+ } else { /* status != len && status >= 0 */
+ if (status > 0) {
+ if (requesttype == 0x01) {
+ /* For Control read transfer, we have to copy the read data from pIo_buf to pdata. */
+ memcpy(pdata, pIo_buf, len);
+ }
+ }
+ }
+
+ if (rtw_inc_and_chk_continual_urb_error(dvobjpriv)) {
+ adapt->bSurpriseRemoved = true;
+ break;
+ }
+
+ }
+
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8188E_START_ADDRESS && value <= FW_8188E_END_ADDRESS) || status == len)
+ break;
+ }
+release_mutex:
+ _exit_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+exit:
+ return status;
+}
+
+static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data = 0;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return data;
+
+}
+
+static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+
+_func_enter_;
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+_func_exit_;
+
+ return (u16)(le32_to_cpu(data)&0xffff);
+}
+
+static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+
+_func_enter_;
+
+ request = 0x05;
+ requesttype = 0x01;/* read_in */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+
+ usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+_func_exit_;
+
+ return le32_to_cpu(data);
+}
+
+static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 data;
+ int ret;
+
+ _func_enter_;
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 1;
+ data = val;
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+ _func_exit_;
+ return ret;
+}
+
+static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 2;
+
+ data = cpu_to_le32(val & 0x0000ffff);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ __le32 data;
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = 4;
+ data = cpu_to_le32(val);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, &data, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata)
+{
+ u8 request;
+ u8 requesttype;
+ u16 wvalue;
+ u16 index;
+ u16 len;
+ u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
+ int ret;
+
+ _func_enter_;
+
+ request = 0x05;
+ requesttype = 0x00;/* write_out */
+ index = 0;/* n/a */
+
+ wvalue = (u16)(addr&0x0000ffff);
+ len = length;
+ memcpy(buf, pdata, len);
+
+ ret = usbctrl_vendorreq(pintfhdl, request, wvalue, index, buf, len, requesttype);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
+{
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+
+ if (pkt_len != INTERRUPT_MSG_FORMAT_LEN) {
+ DBG_88E("%s Invalid interrupt content length (%d)!\n", __func__, pkt_len);
+ return;
+ }
+
+ /* HISR */
+ memcpy(&(haldata->IntArray[0]), &(pbuf[USB_INTR_CONTENT_HISR_OFFSET]), 4);
+ memcpy(&(haldata->IntArray[1]), &(pbuf[USB_INTR_CONTENT_HISRE_OFFSET]), 4);
+
+ /* C2H Event */
+ if (pbuf[0] != 0)
+ memcpy(&(haldata->C2hArray[0]), &(pbuf[USB_INTR_CONTENT_C2H_OFFSET]), 16);
+}
+
+static int recvbuf2recvframe(struct adapter *adapt, struct sk_buff *pskb)
+{
+ u8 *pbuf;
+ u8 shift_sz = 0;
+ u16 pkt_cnt;
+ u32 pkt_offset, skb_len, alloc_sz;
+ s32 transfer_len;
+ struct recv_stat *prxstat;
+ struct phy_stat *pphy_status = NULL;
+ struct sk_buff *pkt_copy = NULL;
+ union recv_frame *precvframe = NULL;
+ struct rx_pkt_attrib *pattrib = NULL;
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+ struct __queue *pfree_recv_queue = &precvpriv->free_recv_queue;
+
+ transfer_len = (s32)pskb->len;
+ pbuf = pskb->data;
+
+ prxstat = (struct recv_stat *)pbuf;
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2) >> 16) & 0xff;
+
+ do {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("recvbuf2recvframe: rxdesc=offsset 0:0x%08x, 4:0x%08x, 8:0x%08x, C:0x%08x\n",
+ prxstat->rxdw0, prxstat->rxdw1, prxstat->rxdw2, prxstat->rxdw4));
+
+ prxstat = (struct recv_stat *)pbuf;
+
+ precvframe = rtw_alloc_recvframe(pfree_recv_queue);
+ if (precvframe == NULL) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("recvbuf2recvframe: precvframe==NULL\n"));
+ DBG_88E("%s()-%d: rtw_alloc_recvframe() failed! RX Drop!\n", __func__, __LINE__);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ _rtw_init_listhead(&precvframe->u.hdr.list);
+ precvframe->u.hdr.precvbuf = NULL; /* can't access the precvbuf for new arch. */
+ precvframe->u.hdr.len = 0;
+
+ update_recvframe_attrib_88e(precvframe, prxstat);
+
+ pattrib = &precvframe->u.hdr.attrib;
+
+ if ((pattrib->crc_err) || (pattrib->icv_err)) {
+ DBG_88E("%s: RX Warning! crc_err=%d icv_err=%d, skip!\n", __func__, pattrib->crc_err, pattrib->icv_err);
+
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ if ((pattrib->physt) && (pattrib->pkt_rpt_type == NORMAL_RX))
+ pphy_status = (struct phy_stat *)(pbuf + RXDESC_OFFSET);
+
+ pkt_offset = RXDESC_SIZE + pattrib->drvinfo_sz + pattrib->shift_sz + pattrib->pkt_len;
+
+ if ((pattrib->pkt_len <= 0) || (pkt_offset > transfer_len)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("recvbuf2recvframe: pkt_len<=0\n"));
+ DBG_88E("%s()-%d: RX Warning!,pkt_len<=0 or pkt_offset> transfoer_len\n", __func__, __LINE__);
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+
+ /* Modified by Albert 20101213 */
+ /* For 8 bytes IP header alignment. */
+ if (pattrib->qos) /* Qos data, wireless lan header length is 26 */
+ shift_sz = 6;
+ else
+ shift_sz = 0;
+
+ skb_len = pattrib->pkt_len;
+
+ /* for first fragment packet, driver need allocate 1536+drvinfo_sz+RXDESC_SIZE to defrag packet. */
+ /* modify alloc_sz for recvive crc error packet by thomas 2011-06-02 */
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ if (skb_len <= 1650)
+ alloc_sz = 1664;
+ else
+ alloc_sz = skb_len + 14;
+ } else {
+ alloc_sz = skb_len;
+ /* 6 is for IP header 8 bytes alignment in QoS packet case. */
+ /* 8 is for skb->data 4 bytes alignment. */
+ alloc_sz += 14;
+ }
+
+ pkt_copy = netdev_alloc_skb(adapt->pnetdev, alloc_sz);
+ if (pkt_copy) {
+ pkt_copy->dev = adapt->pnetdev;
+ precvframe->u.hdr.pkt = pkt_copy;
+ precvframe->u.hdr.rx_head = pkt_copy->data;
+ precvframe->u.hdr.rx_end = pkt_copy->data + alloc_sz;
+ skb_reserve(pkt_copy, 8 - ((size_t)(pkt_copy->data) & 7));/* force pkt_copy->data at 8-byte alignment address */
+ skb_reserve(pkt_copy, shift_sz);/* force ip_hdr at 8-byte alignment address according to shift_sz. */
+ memcpy(pkt_copy->data, (pbuf + pattrib->drvinfo_sz + RXDESC_SIZE), skb_len);
+ precvframe->u.hdr.rx_tail = pkt_copy->data;
+ precvframe->u.hdr.rx_data = pkt_copy->data;
+ } else {
+ if ((pattrib->mfrag == 1) && (pattrib->frag_num == 0)) {
+ DBG_88E("recvbuf2recvframe: alloc_skb fail , drop frag frame\n");
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+ precvframe->u.hdr.pkt = skb_clone(pskb, GFP_ATOMIC);
+ if (precvframe->u.hdr.pkt) {
+ precvframe->u.hdr.rx_tail = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE;
+ precvframe->u.hdr.rx_head = precvframe->u.hdr.rx_tail;
+ precvframe->u.hdr.rx_data = precvframe->u.hdr.rx_tail;
+ precvframe->u.hdr.rx_end = pbuf + pattrib->drvinfo_sz + RXDESC_SIZE + alloc_sz;
+ } else {
+ DBG_88E("recvbuf2recvframe: skb_clone fail\n");
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ goto _exit_recvbuf2recvframe;
+ }
+ }
+
+ recvframe_put(precvframe, skb_len);
+
+ switch (haldata->UsbRxAggMode) {
+ case USB_RX_AGG_DMA:
+ case USB_RX_AGG_MIX:
+ pkt_offset = (u16)_RND128(pkt_offset);
+ break;
+ case USB_RX_AGG_USB:
+ pkt_offset = (u16)_RND4(pkt_offset);
+ break;
+ case USB_RX_AGG_DISABLE:
+ default:
+ break;
+ }
+ if (pattrib->pkt_rpt_type == NORMAL_RX) { /* Normal rx packet */
+ if (pattrib->physt)
+ update_recvframe_phyinfo_88e(precvframe, (struct phy_stat *)pphy_status);
+ if (rtw_recv_entry(precvframe) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("recvbuf2recvframe: rtw_recv_entry(precvframe) != _SUCCESS\n"));
+ }
+ } else {
+ /* enqueue recvframe to txrtp queue */
+ if (pattrib->pkt_rpt_type == TX_REPORT1) {
+ /* CCX-TXRPT ack for xmit mgmt frames. */
+ handle_txrpt_ccx_88e(adapt, precvframe->u.hdr.rx_data);
+ } else if (pattrib->pkt_rpt_type == TX_REPORT2) {
+ ODM_RA_TxRPT2Handle_8188E(
+ &haldata->odmpriv,
+ precvframe->u.hdr.rx_data,
+ pattrib->pkt_len,
+ pattrib->MacIDValidEntry[0],
+ pattrib->MacIDValidEntry[1]
+ );
+ } else if (pattrib->pkt_rpt_type == HIS_REPORT) {
+ interrupt_handler_8188eu(adapt, pattrib->pkt_len, precvframe->u.hdr.rx_data);
+ }
+ rtw_free_recvframe(precvframe, pfree_recv_queue);
+ }
+ pkt_cnt--;
+ transfer_len -= pkt_offset;
+ pbuf += pkt_offset;
+ precvframe = NULL;
+ pkt_copy = NULL;
+
+ if (transfer_len > 0 && pkt_cnt == 0)
+ pkt_cnt = (le32_to_cpu(prxstat->rxdw2)>>16) & 0xff;
+
+ } while ((transfer_len > 0) && (pkt_cnt > 0));
+
+_exit_recvbuf2recvframe:
+
+ return _SUCCESS;
+}
+
+void rtl8188eu_recv_tasklet(void *priv)
+{
+ struct sk_buff *pskb;
+ struct adapter *adapt = (struct adapter *)priv;
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+
+ while (NULL != (pskb = skb_dequeue(&precvpriv->rx_skb_queue))) {
+ if ((adapt->bDriverStopped) || (adapt->bSurpriseRemoved)) {
+ DBG_88E("recv_tasklet => bDriverStopped or bSurpriseRemoved\n");
+ dev_kfree_skb_any(pskb);
+ break;
+ }
+ recvbuf2recvframe(adapt, pskb);
+ skb_reset_tail_pointer(pskb);
+ pskb->len = 0;
+ skb_queue_tail(&precvpriv->free_recv_skb_queue, pskb);
+ }
+}
+
+static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct recv_buf *precvbuf = (struct recv_buf *)purb->context;
+ struct adapter *adapt = (struct adapter *)precvbuf->adapter;
+ struct recv_priv *precvpriv = &adapt->recvpriv;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete!!!\n"));
+
+ precvpriv->rx_pending_cnt--;
+
+ if (adapt->bSurpriseRemoved || adapt->bDriverStopped || adapt->bReadPortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)\n",
+ adapt->bDriverStopped, adapt->bSurpriseRemoved));
+
+ precvbuf->reuse = true;
+ DBG_88E("%s() RX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bReadPortCancel(%d)\n",
+ __func__, adapt->bDriverStopped,
+ adapt->bSurpriseRemoved, adapt->bReadPortCancel);
+ goto exit;
+ }
+
+ if (purb->status == 0) { /* SUCCESS */
+ if ((purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port_complete: (purb->actual_length > MAX_RECVBUF_SZ) || (purb->actual_length < RXDESC_SIZE)\n"));
+ precvbuf->reuse = true;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ DBG_88E("%s()-%d: RX Warning!\n", __func__, __LINE__);
+ } else {
+ rtw_reset_continual_urb_error(adapter_to_dvobj(adapt));
+
+ precvbuf->transfer_len = purb->actual_length;
+ skb_put(precvbuf->pskb, purb->actual_length);
+ skb_queue_tail(&precvpriv->rx_skb_queue, precvbuf->pskb);
+
+ if (skb_queue_len(&precvpriv->rx_skb_queue) <= 1)
+ tasklet_schedule(&precvpriv->recv_tasklet);
+
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ }
+ } else {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete : purb->status(%d) != 0\n", purb->status));
+
+ DBG_88E("###=> usb_read_port_complete => urb status(%d)\n", purb->status);
+
+ if (rtw_inc_and_chk_continual_urb_error(adapter_to_dvobj(adapt)))
+ adapt->bSurpriseRemoved = true;
+
+ switch (purb->status) {
+ case -EINVAL:
+ case -EPIPE:
+ case -ENODEV:
+ case -ESHUTDOWN:
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bSurpriseRemoved=true\n"));
+ case -ENOENT:
+ adapt->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_read_port_complete:bDriverStopped=true\n"));
+ break;
+ case -EPROTO:
+ case -EOVERFLOW:
+ {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
+ haldata->srestpriv.Wifi_Error_Status = USB_READ_PORT_FAIL;
+ }
+ precvbuf->reuse = true;
+ rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
+ break;
+ case -EINPROGRESS:
+ DBG_88E("ERROR: URB IS IN PROGRESS!/n");
+ break;
+ default:
+ break;
+ }
+ }
+
+exit:
+_func_exit_;
+}
+
+static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+{
+ struct urb *purb = NULL;
+ struct recv_buf *precvbuf = (struct recv_buf *)rmem;
+ struct adapter *adapter = pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
+ struct recv_priv *precvpriv = &adapter->recvpriv;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+ int err;
+ unsigned int pipe;
+ size_t tmpaddr = 0;
+ size_t alignment = 0;
+ u32 ret = _SUCCESS;
+
+_func_enter_;
+
+ if (adapter->bDriverStopped || adapter->bSurpriseRemoved ||
+ adapter->pwrctrlpriv.pnp_bstop_trx) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:(adapt->bDriverStopped ||adapt->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ return _FAIL;
+ }
+
+ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ precvbuf->pskb = skb_dequeue(&precvpriv->free_recv_skb_queue);
+ if (NULL != precvbuf->pskb)
+ precvbuf->reuse = true;
+ }
+
+ if (precvbuf != NULL) {
+ rtl8188eu_init_recvbuf(adapter, precvbuf);
+
+ /* re-assign for linux based on skb */
+ if ((!precvbuf->reuse) || (precvbuf->pskb == NULL)) {
+ precvbuf->pskb = netdev_alloc_skb(adapter->pnetdev, MAX_RECVBUF_SZ + RECVBUFF_ALIGN_SZ);
+ if (precvbuf->pskb == NULL) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("init_recvbuf(): alloc_skb fail!\n"));
+ DBG_88E("#### usb_read_port() alloc_skb fail!#####\n");
+ return _FAIL;
+ }
+
+ tmpaddr = (size_t)precvbuf->pskb->data;
+ alignment = tmpaddr & (RECVBUFF_ALIGN_SZ-1);
+ skb_reserve(precvbuf->pskb, (RECVBUFF_ALIGN_SZ - alignment));
+
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+ } else { /* reuse skb */
+ precvbuf->phead = precvbuf->pskb->head;
+ precvbuf->pdata = precvbuf->pskb->data;
+ precvbuf->ptail = skb_tail_pointer(precvbuf->pskb);
+ precvbuf->pend = skb_end_pointer(precvbuf->pskb);
+ precvbuf->pbuf = precvbuf->pskb->data;
+
+ precvbuf->reuse = false;
+ }
+
+ precvpriv->rx_pending_cnt++;
+
+ purb = precvbuf->purb;
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ precvbuf->pbuf,
+ MAX_RECVBUF_SZ,
+ usb_read_port_complete,
+ precvbuf);/* context is precvbuf */
+
+ err = usb_submit_urb(purb, GFP_ATOMIC);
+ if ((err) && (err != (-EPERM))) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("cannot submit rx in-token(err=0x%.8x), URB_STATUS =0x%.8x",
+ err, purb->status));
+ DBG_88E("cannot submit rx in-token(err = 0x%08x),urb_status = %d\n",
+ err, purb->status);
+ ret = _FAIL;
+ }
+ } else {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_read_port:precvbuf ==NULL\n"));
+ ret = _FAIL;
+ }
+
+_func_exit_;
+ return ret;
+}
+
+void rtl8188eu_xmit_tasklet(void *priv)
+{
+ int ret = false;
+ struct adapter *adapt = (struct adapter *)priv;
+ struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
+
+ if (check_fwstate(&adapt->mlmepriv, _FW_UNDER_SURVEY))
+ return;
+
+ while (1) {
+ if ((adapt->bDriverStopped) ||
+ (adapt->bSurpriseRemoved) ||
+ (adapt->bWritePortCancel)) {
+ DBG_88E("xmit_tasklet => bDriverStopped or bSurpriseRemoved or bWritePortCancel\n");
+ break;
+ }
+
+ ret = rtl8188eu_xmitframe_complete(adapt, pxmitpriv, NULL);
+
+ if (!ret)
+ break;
+ }
+}
+
+void rtl8188eu_set_intf_ops(struct _io_ops *pops)
+{
+ _func_enter_;
+ _rtw_memset((u8 *)pops, 0, sizeof(struct _io_ops));
+ pops->_read8 = &usb_read8;
+ pops->_read16 = &usb_read16;
+ pops->_read32 = &usb_read32;
+ pops->_read_mem = &usb_read_mem;
+ pops->_read_port = &usb_read_port;
+ pops->_write8 = &usb_write8;
+ pops->_write16 = &usb_write16;
+ pops->_write32 = &usb_write32;
+ pops->_writeN = &usb_writeN;
+ pops->_write_mem = &usb_write_mem;
+ pops->_write_port = &usb_write_port;
+ pops->_read_port_cancel = &usb_read_port_cancel;
+ pops->_write_port_cancel = &usb_write_port_cancel;
+ _func_exit_;
+}
+
+void rtl8188eu_set_hw_type(struct adapter *adapt)
+{
+ adapt->chip_type = RTL8188E;
+ adapt->HardwareType = HARDWARE_TYPE_RTL8188EU;
+ DBG_88E("CHIP TYPE: RTL8188E\n");
+}
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h b/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
new file mode 100644
index 00000000000..949c33b9ed6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EFWImg_CE.h
@@ -0,0 +1,28 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+#ifndef __INC_HAL8188E_FW_IMG_H
+#define __INC_HAL8188E_FW_IMG_H
+
+/* V10(1641) */
+#define Rtl8188EFWImgArrayLength 13904
+
+extern const u8 Rtl8188EFwImgArray[Rtl8188EFWImgArrayLength];
+
+#endif /* __INC_HAL8188E_FW_IMG_H */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
new file mode 100644
index 00000000000..c4769e20a5c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -0,0 +1,276 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8188EPHYCFG_H__
+#define __INC_HAL8188EPHYCFG_H__
+
+
+/*--------------------------Define Parameters-------------------------------*/
+#define LOOP_LIMIT 5
+#define MAX_STALL_TIME 50 /* us */
+#define AntennaDiversityValue 0x80
+#define MAX_TXPWR_IDX_NMODE_92S 63
+#define Reset_Cnt_Limit 3
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define MAX_AGGR_NUM 0x07
+
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+/*------------------------------Define structure----------------------------*/
+enum sw_chnl_cmd_id {
+ CmdID_End,
+ CmdID_SetTxPowerLevel,
+ CmdID_BBRegWrite10,
+ CmdID_WritePortUlong,
+ CmdID_WritePortUshort,
+ CmdID_WritePortUchar,
+ CmdID_RF_WriteReg,
+};
+
+/* 1. Switch channel related */
+struct sw_chnl_cmd {
+ enum sw_chnl_cmd_id CmdID;
+ u32 Para1;
+ u32 Para2;
+ u32 msDelay;
+};
+
+enum hw90_block {
+ HW90_BLOCK_MAC = 0,
+ HW90_BLOCK_PHY0 = 1,
+ HW90_BLOCK_PHY1 = 2,
+ HW90_BLOCK_RF = 3,
+ HW90_BLOCK_MAXIMUM = 4, /* Never use this */
+};
+
+enum rf_radio_path {
+ RF_PATH_A = 0, /* Radio Path A */
+ RF_PATH_B = 1, /* Radio Path B */
+ RF_PATH_C = 2, /* Radio Path C */
+ RF_PATH_D = 3, /* Radio Path D */
+};
+
+#define MAX_PG_GROUP 13
+
+#define RF_PATH_MAX 2
+#define MAX_RF_PATH RF_PATH_MAX
+#define MAX_TX_COUNT 4 /* path numbers */
+
+#define CHANNEL_MAX_NUMBER 14 /* 14 is the max chnl number */
+#define MAX_CHNL_GROUP_24G 6 /* ch1~2, ch3~5, ch6~8,
+ *ch9~11, ch12~13, CH 14
+ * total three groups */
+#define CHANNEL_GROUP_MAX_88E 6
+
+enum wireless_mode {
+ WIRELESS_MODE_UNKNOWN = 0x00,
+ WIRELESS_MODE_A = BIT2,
+ WIRELESS_MODE_B = BIT0,
+ WIRELESS_MODE_G = BIT1,
+ WIRELESS_MODE_AUTO = BIT5,
+ WIRELESS_MODE_N_24G = BIT3,
+ WIRELESS_MODE_N_5G = BIT4,
+ WIRELESS_MODE_AC = BIT6
+};
+
+enum phy_rate_tx_offset_area {
+ RA_OFFSET_LEGACY_OFDM1,
+ RA_OFFSET_LEGACY_OFDM2,
+ RA_OFFSET_HT_OFDM1,
+ RA_OFFSET_HT_OFDM2,
+ RA_OFFSET_HT_OFDM3,
+ RA_OFFSET_HT_OFDM4,
+ RA_OFFSET_HT_CCK,
+};
+
+/* BB/RF related */
+enum RF_TYPE_8190P {
+ RF_TYPE_MIN, /* 0 */
+ RF_8225 = 1, /* 1 11b/g RF for verification only */
+ RF_8256 = 2, /* 2 11b/g/n */
+ RF_8258 = 3, /* 3 11a/b/g/n RF */
+ RF_6052 = 4, /* 4 11b/g/n RF */
+ /* TODO: We should remove this psudo PHY RF after we get new RF. */
+ RF_PSEUDO_11N = 5, /* 5, It is a temporality RF. */
+};
+
+struct bb_reg_def {
+ u32 rfintfs; /* set software control: */
+ /* 0x870~0x877[8 bytes] */
+ u32 rfintfi; /* readback data: */
+ /* 0x8e0~0x8e7[8 bytes] */
+ u32 rfintfo; /* output data: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rfintfe; /* output enable: */
+ /* 0x860~0x86f [16 bytes] */
+ u32 rf3wireOffset; /* LSSI data: */
+ /* 0x840~0x84f [16 bytes] */
+ u32 rfLSSI_Select; /* BB Band Select: */
+ /* 0x878~0x87f [8 bytes] */
+ u32 rfTxGainStage; /* Tx gain stage: */
+ /* 0x80c~0x80f [4 bytes] */
+ u32 rfHSSIPara1; /* wire parameter control1 : */
+ /* 0x820~0x823,0x828~0x82b,
+ * 0x830~0x833, 0x838~0x83b [16 bytes] */
+ u32 rfHSSIPara2; /* wire parameter control2 : */
+ /* 0x824~0x827,0x82c~0x82f, 0x834~0x837,
+ * 0x83c~0x83f [16 bytes] */
+ u32 rfSwitchControl; /* Tx Rx antenna control : */
+ /* 0x858~0x85f [16 bytes] */
+ u32 rfAGCControl1; /* AGC parameter control1 : */
+ /* 0xc50~0xc53,0xc58~0xc5b, 0xc60~0xc63,
+ * 0xc68~0xc6b [16 bytes] */
+ u32 rfAGCControl2; /* AGC parameter control2 : */
+ /* 0xc54~0xc57,0xc5c~0xc5f, 0xc64~0xc67,
+ * 0xc6c~0xc6f [16 bytes] */
+ u32 rfRxIQImbalance; /* OFDM Rx IQ imbalance matrix : */
+ /* 0xc14~0xc17,0xc1c~0xc1f, 0xc24~0xc27,
+ * 0xc2c~0xc2f [16 bytes] */
+ u32 rfRxAFE; /* Rx IQ DC ofset and Rx digital filter,
+ * Rx DC notch filter : */
+ /* 0xc10~0xc13,0xc18~0xc1b, 0xc20~0xc23,
+ * 0xc28~0xc2b [16 bytes] */
+ u32 rfTxIQImbalance; /* OFDM Tx IQ imbalance matrix */
+ /* 0xc80~0xc83,0xc88~0xc8b, 0xc90~0xc93,
+ * 0xc98~0xc9b [16 bytes] */
+ u32 rfTxAFE; /* Tx IQ DC Offset and Tx DFIR type */
+ /* 0xc84~0xc87,0xc8c~0xc8f, 0xc94~0xc97,
+ * 0xc9c~0xc9f [16 bytes] */
+ u32 rfLSSIReadBack; /* LSSI RF readback data SI mode */
+ /* 0x8a0~0x8af [16 bytes] */
+ u32 rfLSSIReadBackPi; /* LSSI RF readback data PI mode 0x8b8-8bc for
+ * Path A and B */
+};
+
+struct ant_sel_ofdm {
+ u32 r_tx_antenna:4;
+ u32 r_ant_l:4;
+ u32 r_ant_non_ht:4;
+ u32 r_ant_ht1:4;
+ u32 r_ant_ht2:4;
+ u32 r_ant_ht_s1:4;
+ u32 r_ant_non_ht_s1:4;
+ u32 OFDM_TXSC:2;
+ u32 reserved:2;
+};
+
+struct ant_sel_cck {
+ u8 r_cckrx_enable_2:2;
+ u8 r_cckrx_enable:2;
+ u8 r_ccktx_enable:4;
+};
+
+/*------------------------------Define structure----------------------------*/
+
+
+/*------------------------Export global variable----------------------------*/
+/*------------------------Export global variable----------------------------*/
+
+
+/*------------------------Export Marco Definition---------------------------*/
+/*------------------------Export Marco Definition---------------------------*/
+
+
+/*--------------------------Exported Function prototype---------------------*/
+/* */
+/* BB and RF register read/write */
+/* */
+u32 rtl8188e_PHY_QueryBBReg(struct adapter *adapter, u32 regaddr, u32 mask);
+void rtl8188e_PHY_SetBBReg(struct adapter *Adapter, u32 RegAddr,
+ u32 mask, u32 data);
+u32 rtl8188e_PHY_QueryRFReg(struct adapter *adapter, enum rf_radio_path rfpath,
+ u32 regaddr, u32 mask);
+void rtl8188e_PHY_SetRFReg(struct adapter *adapter, enum rf_radio_path rfpath,
+ u32 regaddr, u32 mask, u32 data);
+
+/* Initialization related function */
+/* MAC/BB/RF HAL config */
+int PHY_MACConfig8188E(struct adapter *adapter);
+int PHY_BBConfig8188E(struct adapter *adapter);
+int PHY_RFConfig8188E(struct adapter *adapter);
+
+/* RF config */
+int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *adapter, u8 *filename,
+ enum rf_radio_path rfpath);
+int rtl8188e_PHY_ConfigRFWithHeaderFile(struct adapter *adapter,
+ enum rf_radio_path rfpath);
+
+/* Read initi reg value for tx power setting. */
+void rtl8192c_PHY_GetHWRegOriginalValue(struct adapter *adapter);
+
+/* BB TX Power R/W */
+void PHY_GetTxPowerLevel8188E(struct adapter *adapter, u32 *powerlevel);
+void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
+bool PHY_UpdateTxPowerDbm8188E(struct adapter *adapter, int power);
+
+void PHY_ScanOperationBackup8188E(struct adapter *Adapter, u8 Operation);
+
+/* Switch bandwidth for 8192S */
+void PHY_SetBWMode8188E(struct adapter *adapter,
+ enum ht_channel_width chnlwidth, unsigned char offset);
+
+/* channel switch related funciton */
+void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
+/* Call after initialization */
+void ChkFwCmdIoDone(struct adapter *adapter);
+
+/* BB/MAC/RF other monitor API */
+void PHY_SetRFPathSwitch_8188E(struct adapter *adapter, bool main);
+
+void PHY_SwitchEphyParameter(struct adapter *adapter);
+
+void PHY_EnableHostClkReq(struct adapter *adapter);
+
+bool SetAntennaConfig92C(struct adapter *adapter, u8 defaultant);
+
+void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
+ u32 mask, u32 data);
+/*--------------------------Exported Function prototype---------------------*/
+
+#define PHY_QueryBBReg(adapt, regaddr, mask) \
+ rtl8188e_PHY_QueryBBReg((adapt), (regaddr), (mask))
+#define PHY_SetBBReg(adapt, regaddr, bitmask, data) \
+ rtl8188e_PHY_SetBBReg((adapt), (regaddr), (bitmask), (data))
+#define PHY_QueryRFReg(adapt, rfpath, regaddr, bitmask) \
+ rtl8188e_PHY_QueryRFReg((adapt), (rfpath), (regaddr), (bitmask))
+#define PHY_SetRFReg(adapt, rfpath, regaddr, bitmask, data) \
+ rtl8188e_PHY_SetRFReg((adapt), (rfpath), (regaddr), (bitmask), (data))
+
+#define PHY_SetMacReg PHY_SetBBReg
+
+#define SIC_HW_SUPPORT 0
+
+#define SIC_MAX_POLL_CNT 5
+
+#define SIC_CMD_READY 0
+#define SIC_CMD_WRITE 1
+#define SIC_CMD_READ 2
+
+#define SIC_CMD_REG 0x1EB /* 1byte */
+#define SIC_ADDR_REG 0x1E8 /* 1b9~1ba, 2 bytes */
+#define SIC_DATA_REG 0x1EC /* 1bc~1bf */
+
+#endif /* __INC_HAL8192CPHYCFG_H */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
new file mode 100644
index 00000000000..0e06d29b2d2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -0,0 +1,1094 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_HAL8188EPHYREG_H__
+#define __INC_HAL8188EPHYREG_H__
+/*--------------------------Define Parameters-------------------------------*/
+/* */
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+/* */
+
+
+/* */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+/* */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* 2. Page2(0x200) */
+/* The following two definition are only used for USB interface. */
+#define RF_BB_CMD_ADDR 0x02c0 /* RF/BB r/w cmd address. */
+#define RF_BB_CMD_DATA 0x02c4 /* RF/BB r/w cmd data. */
+
+/* 3. Page8(0x800) */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Iface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+/* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter1 0x880
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888
+/* enable ad/da clock1 for dual-phy */
+#define rFPGA0_AdDaClockEn 0x888
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+/* Transceiver A HSPI Readback */
+#define TransceiverA_HSPI_Readback 0x8b8
+/* Transceiver B HSPI Readback */
+#define TransceiverB_HSPI_Readback 0x8bc
+/* Useless now RF Interface Readback Value */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* 4. Page9(0x900) */
+/* RF mode & OFDM TxSC RF BW Setting?? */
+#define rFPGA1_RFMOD 0x900
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report */
+
+/* 5. PageA(0xA00) */
+/* Set Control channel to upper or lower - required only for 40MHz */
+#define rCCK0_System 0xa00
+
+/* Disable init gain now Select RX path by RSSI */
+#define rCCK0_AFESetting 0xa04
+/* Disable init gain now Init gain */
+#define rCCK0_CCA 0xa08
+
+/* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold,
+ * RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC1 0xa0c
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+/* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter1 0xa18
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+
+/* */
+/* PageB(0xB00) */
+/* */
+#define rPdp_AntA 0xb00
+#define rPdp_AntA_4 0xb04
+#define rConfig_Pmpd_AntA 0xb28
+#define rConfig_AntA 0xb68
+#define rConfig_AntB 0xb6c
+#define rPdp_AntB 0xb70
+#define rPdp_AntB_4 0xb74
+#define rConfig_Pmpd_AntB 0xb98
+#define rAPK 0xbd8
+
+/* */
+/* 6. PageC(0xC00) */
+/* */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+/* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxAFE 0xc10
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /*PD,BW & SBD DM tune init gain*/
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+
+#define rOFDM0_RxIQExtAnta 0xca0
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+
+
+/* */
+/* 7. PageD(0xD00) */
+/* */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* */
+/* 8. PageE(0xE00) */
+/* */
+#define rTxAGC_A_Rate18_06 0xe00
+#define rTxAGC_A_Rate54_24 0xe04
+#define rTxAGC_A_CCK1_Mcs32 0xe08
+#define rTxAGC_A_Mcs03_Mcs00 0xe10
+#define rTxAGC_A_Mcs07_Mcs04 0xe14
+#define rTxAGC_A_Mcs11_Mcs08 0xe18
+#define rTxAGC_A_Mcs15_Mcs12 0xe1c
+
+#define rTxAGC_B_Rate18_06 0x830
+#define rTxAGC_B_Rate54_24 0x834
+#define rTxAGC_B_CCK1_55_Mcs32 0x838
+#define rTxAGC_B_Mcs03_Mcs00 0x83c
+#define rTxAGC_B_Mcs07_Mcs04 0x848
+#define rTxAGC_B_Mcs11_Mcs08 0x84c
+#define rTxAGC_B_Mcs15_Mcs12 0x868
+#define rTxAGC_B_CCK11_A_CCK2_11 0x86c
+
+#define rFPGA0_IQK 0xe28
+#define rTx_IQK_Tone_A 0xe30
+#define rRx_IQK_Tone_A 0xe34
+#define rTx_IQK_PI_A 0xe38
+#define rRx_IQK_PI_A 0xe3c
+
+#define rTx_IQK 0xe40
+#define rRx_IQK 0xe44
+#define rIQK_AGC_Pts 0xe48
+#define rIQK_AGC_Rsp 0xe4c
+#define rTx_IQK_Tone_B 0xe50
+#define rRx_IQK_Tone_B 0xe54
+#define rTx_IQK_PI_B 0xe58
+#define rRx_IQK_PI_B 0xe5c
+#define rIQK_AGC_Cont 0xe60
+
+#define rBlue_Tooth 0xe6c
+#define rRx_Wait_CCA 0xe70
+#define rTx_CCK_RFON 0xe74
+#define rTx_CCK_BBON 0xe78
+#define rTx_OFDM_RFON 0xe7c
+#define rTx_OFDM_BBON 0xe80
+#define rTx_To_Rx 0xe84
+#define rTx_To_Tx 0xe88
+#define rRx_CCK 0xe8c
+
+#define rTx_Power_Before_IQK_A 0xe94
+#define rTx_Power_After_IQK_A 0xe9c
+
+#define rRx_Power_Before_IQK_A 0xea0
+#define rRx_Power_Before_IQK_A_2 0xea4
+#define rRx_Power_After_IQK_A 0xea8
+#define rRx_Power_After_IQK_A_2 0xeac
+
+#define rTx_Power_Before_IQK_B 0xeb4
+#define rTx_Power_After_IQK_B 0xebc
+
+#define rRx_Power_Before_IQK_B 0xec0
+#define rRx_Power_Before_IQK_B_2 0xec4
+#define rRx_Power_After_IQK_B 0xec8
+#define rRx_Power_After_IQK_B_2 0xecc
+
+#define rRx_OFDM 0xed0
+#define rRx_Wait_RIFS 0xed4
+#define rRx_TO_Rx 0xed8
+#define rStandby 0xedc
+#define rSleep 0xee0
+#define rPMPD_ANAEN 0xeec
+
+/* */
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* */
+/* Zebra1 */
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+#define rZebra1_Channel 0x7 /* RF channel switch */
+
+/* endif */
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* */
+/* RL6052 Register definition */
+/* */
+#define RF_AC 0x00 /* */
+
+#define RF_IQADJ_G1 0x01 /* */
+#define RF_IQADJ_G2 0x02 /* */
+
+#define RF_POW_TRSW 0x05 /* */
+
+#define RF_GAIN_RX 0x06 /* */
+#define RF_GAIN_TX 0x07 /* */
+
+#define RF_TXM_IDAC 0x08 /* */
+#define RF_IPA_G 0x09 /* */
+#define RF_TXBIAS_G 0x0A
+#define RF_TXPA_AG 0x0B
+#define RF_IPA_A 0x0C /* */
+#define RF_TXBIAS_A 0x0D
+#define RF_BS_PA_APSET_G9_G11 0x0E
+#define RF_BS_IQGEN 0x0F /* */
+
+#define RF_MODE1 0x10 /* */
+#define RF_MODE2 0x11 /* */
+
+#define RF_RX_AGC_HP 0x12 /* */
+#define RF_TX_AGC 0x13 /* */
+#define RF_BIAS 0x14 /* */
+#define RF_IPA 0x15 /* */
+#define RF_TXBIAS 0x16
+#define RF_POW_ABILITY 0x17 /* */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19 /* */
+
+#define RF_RX_G1 0x1A /* */
+#define RF_RX_G2 0x1B /* */
+
+#define RF_RX_BB2 0x1C /* */
+#define RF_RX_BB1 0x1D /* */
+
+#define RF_RCK1 0x1E /* */
+#define RF_RCK2 0x1F /* */
+
+#define RF_TX_G1 0x20 /* */
+#define RF_TX_G2 0x21 /* */
+#define RF_TX_G3 0x22 /* */
+
+#define RF_TX_BB1 0x23 /* */
+
+#define RF_T_METER_92D 0x42 /* */
+#define RF_T_METER_88E 0x42 /* */
+#define RF_T_METER 0x24 /* */
+
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+#define RF_TX_BIAS_A 0x35
+#define RF_TX_BIAS_D 0x36
+#define RF_LOBF_9 0x38
+#define RF_RXRF_A3 0x3C /* */
+#define RF_TRSW 0x3F
+
+#define RF_TXRF_A2 0x41
+#define RF_TXPA_G4 0x46
+#define RF_TXPA_A4 0x4B
+#define RF_0x52 0x52
+#define RF_WE_LUT 0xEF
+
+
+/* */
+/* Bit Mask */
+/* */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) \
+ ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bAntennaSelect 0x0300
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* change gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+/* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireDataLength 0x800
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+/* Reg 0x880 rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+#define bADClkPhase 0x4000000
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+/* Reg 0x884 rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap01 0xc0000000
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and HWord, LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0_System 20/40 */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx Iinital gain polarity */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* threshold for high power */
+#define bRSSI_Gen 0x7f000000 /* threshold for ant diversity */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* */
+/* Other Definition */
+/* */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMask12Bits 0xfff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+#define bRFRegOffsetMask 0xfffff
+
+#define bEnable 0x1 /* Useless */
+#define bDisable 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define PathA 0x0 /* Useless */
+#define PathB 0x1
+#define PathC 0x2
+#define PathD 0x3
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
new file mode 100644
index 00000000000..20d0b3e3ad7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPwrSeq.h
@@ -0,0 +1,176 @@
+
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HAL8188EPWRSEQ_H__
+#define __HAL8188EPWRSEQ_H__
+
+#include "HalPwrSeqCmd.h"
+
+/*
+ Check document WM-20110607-Paul-RTL8188E_Power_Architecture-R02.vsd
+ There are 6 HW Power States:
+ 0: POFF--Power Off
+ 1: PDN--Power Down
+ 2: CARDEMU--Card Emulation
+ 3: ACT--Active Mode
+ 4: LPS--Low Power State
+ 5: SUS--Suspend
+
+ The transision from different states are defined below
+ TRANS_CARDEMU_TO_ACT
+ TRANS_ACT_TO_CARDEMU
+ TRANS_CARDEMU_TO_SUS
+ TRANS_SUS_TO_CARDEMU
+ TRANS_CARDEMU_TO_PDN
+ TRANS_ACT_TO_LPS
+ TRANS_LPS_TO_ACT
+
+ TRANS_END
+
+ PWR SEQ Version: rtl8188E_PwrSeq_V09.h
+*/
+#define RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS 10
+#define RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS 10
+#define RTL8188E_TRANS_SUS_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS 10
+#define RTL8188E_TRANS_PDN_TO_CARDEMU_STEPS 10
+#define RTL8188E_TRANS_ACT_TO_LPS_STEPS 15
+#define RTL8188E_TRANS_LPS_TO_ACT_STEPS 15
+#define RTL8188E_TRANS_END_STEPS 1
+
+
+#define RTL8188E_TRANS_CARDEMU_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, BIT1},/* wait till 0x04[17] = 1 power ready*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0|BIT1, 0}, /* 0x02[1:0] = 0 reset BB*/ \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7}, /*0x24[23] = 2b'01 schmit trigger */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0}, /* 0x04[15] = 0 disable HWPDN (control by DRV)*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4|BIT3, 0}, /*0x04[12:11] = 2b'00 disable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, BIT0}, /*0x04[8] = 1 polling until return 0*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT0, 0}, /*wait till 0x04[8] = 0*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*LDO normal mode*/ \
+ {0x0074, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*SDIO Driving*/ \
+
+#define RTL8188E_TRANS_ACT_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x001F, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0},/*0x1F[7:0] = 0 turn off RF*/ \
+ {0x0023, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*LDO Sleep mode*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*0x04[9] = 1 turn off MAC by HW state machine*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT1, 0}, /*wait till 0x04[9] = 0 polling until return 0 to disable*/ \
+
+#define RTL8188E_TRANS_CARDEMU_TO_SUS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01enable WL suspend*/ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3|BIT4}, /*0x04[12:11] = 2b'11enable WL suspend for PCIe*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, BIT7}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8188E_TRANS_SUS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8188E_TRANS_CARDEMU_TO_CARDDIS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0026, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7}, /*0x24[23] = 2b'01 schmit trigger */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, BIT3}, /*0x04[12:11] = 2b'01 enable WL suspend*/ \
+ {0x0007, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /* 0x04[31:30] = 2b'10 enable enable bandgap mbias in suspend */ \
+ {0x0041, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK|PWR_INTF_SDIO_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*Clear SIC_EN register 0x40[12] = 1'b0 */ \
+ {0xfe10, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, BIT4}, /*Set USB suspend enable local register 0xfe10[4]=1 */ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, BIT0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, 0}, /*wait power state to suspend*/
+
+#define RTL8188E_TRANS_CARDDIS_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, BIT0, 0}, /*Set SDIO suspend local register*/ \
+ {0x0086, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_POLLING, BIT1, BIT1}, /*wait power state to suspend*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT3|BIT4, 0}, /*0x04[12:11] = 2b'01enable WL suspend*/
+
+#define RTL8188E_TRANS_CARDEMU_TO_PDN \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0x0006, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/* 0x04[16] = 0*/\
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, BIT7},/* 0x04[15] = 1*/
+
+#define RTL8188E_TRANS_PDN_TO_CARDEMU \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0005, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT7, 0},/* 0x04[15] = 0*/
+
+/* This is used by driver for LPSRadioOff Procedure, not for FW LPS Step */
+#define RTL8188E_TRANS_ACT_TO_LPS \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x7F},/*Tx Pause*/ \
+ {0x05F8, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05F9, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FA, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x05FB, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, 0xFF, 0},/*Should be zero if no packet is transmitting*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT0, 0},/*CCK and OFDM are disabled,and clock are gated*/ \
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_US},/*Delay 1us*/ \
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x3F},/*Reset MAC TRX*/ \
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, 0},/*check if removed later*/ \
+ {0x0553, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT5, BIT5},/*Respond TxOK to scheduler*/ \
+
+
+#define RTL8188E_TRANS_LPS_TO_ACT \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here */ \
+ {0x0080, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_SDIO_MSK, PWR_BASEADDR_SDIO, PWR_CMD_WRITE, 0xFF, 0x84}, /*SDIO RPWM*/\
+ {0xFE58, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_USB_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*USB RPWM*/\
+ {0x0361, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_PCI_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0x84}, /*PCIe RPWM*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_DELAY, 0, PWRSEQ_DELAY_MS}, /*Delay*/\
+ {0x0008, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT4, 0}, /*. 0x08[4] = 0 switch TSF to 40M*/\
+ {0x0109, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_POLLING, BIT7, 0}, /*Polling 0x109[7]=0 TSF in 40M*/\
+ {0x0029, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT6|BIT7, 0}, /*. 0x29[7:6] = 2b'00 enable BB clock*/\
+ {0x0101, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1, BIT1}, /*. 0x101[1] = 1*/\
+ {0x0100, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0xFF}, /*. 0x100[7:0] = 0xFF enable WMAC TRX*/\
+ {0x0002, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, BIT1|BIT0, BIT1|BIT0}, /*. 0x02[1:0] = 2b'11 enable BB macro*/\
+ {0x0522, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK, PWR_BASEADDR_MAC, PWR_CMD_WRITE, 0xFF, 0}, /*. 0x522 = 0*/
+
+#define RTL8188E_TRANS_END \
+ /* format */ \
+ /* { offset, cut_msk, fab_msk|interface_msk, base|cmd, msk, value }, comments here*/ \
+ {0xFFFF, PWR_CUT_ALL_MSK, PWR_FAB_ALL_MSK, PWR_INTF_ALL_MSK,0, PWR_CMD_END, 0, 0}, /* */
+
+
+extern struct wl_pwr_cfg rtl8188E_power_on_flow[RTL8188E_TRANS_CARDEMU_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_radio_off_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_card_disable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_card_enable_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_suspend_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_resume_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_SUS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_hwpdn_flow[RTL8188E_TRANS_ACT_TO_CARDEMU_STEPS+RTL8188E_TRANS_CARDEMU_TO_PDN_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_enter_lps_flow[RTL8188E_TRANS_ACT_TO_LPS_STEPS+RTL8188E_TRANS_END_STEPS];
+extern struct wl_pwr_cfg rtl8188E_leave_lps_flow[RTL8188E_TRANS_LPS_TO_ACT_STEPS+RTL8188E_TRANS_END_STEPS];
+
+#endif /* __HAL8188EPWRSEQ_H__ */
diff --git a/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
new file mode 100644
index 00000000000..21996a1173e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188ERateAdaptive.h
@@ -0,0 +1,75 @@
+#ifndef __INC_RA_H
+#define __INC_RA_H
+/*++
+Copyright (c) Realtek Semiconductor Corp. All rights reserved.
+
+Module Name:
+ RateAdaptive.h
+
+Abstract:
+ Prototype of RA and related data structure.
+
+Major Change History:
+ When Who What
+ ---------- --------------- -------------------------------
+ 2011-08-12 Page Create.
+--*/
+
+/* Rate adaptive define */
+#define PERENTRY 23
+#define RETRYSIZE 5
+#define RATESIZE 28
+#define TX_RPT2_ITEM_SIZE 8
+
+/* */
+/* TX report 2 format in Rx desc */
+/* */
+#define GET_TX_RPT2_DESC_PKT_LEN_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc, 0, 9)
+#define GET_TX_RPT2_DESC_MACID_VALID_1_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc+16, 0, 32)
+#define GET_TX_RPT2_DESC_MACID_VALID_2_88E(__pRxStatusDesc) \
+ LE_BITS_TO_4BYTE(__pRxStatusDesc+20, 0, 32)
+
+#define GET_TX_REPORT_TYPE1_RERTY_0(__pAddr) \
+ LE_BITS_TO_4BYTE(__pAddr, 0, 16)
+#define GET_TX_REPORT_TYPE1_RERTY_1(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+2, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_2(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+3, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_3(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4, 0, 8)
+#define GET_TX_REPORT_TYPE1_RERTY_4(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+1, 0, 8)
+#define GET_TX_REPORT_TYPE1_DROP_0(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+2, 0, 8)
+#define GET_TX_REPORT_TYPE1_DROP_1(__pAddr) \
+ LE_BITS_TO_1BYTE(__pAddr+4+3, 0, 8)
+
+/* End rate adaptive define */
+
+void ODM_RASupport_Init(struct odm_dm_struct *dm_odm);
+
+int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
+
+int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+
+u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 MacID);
+void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 MacID,
+ u8 RateID, u32 RateMask,
+ u8 SGIEnable);
+
+void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid,
+ u8 rssi);
+
+void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm,
+ u8 *txrpt_buf, u16 txrpt_len,
+ u32 validentry0, u32 validentry1);
+
+void ODM_RA_Set_TxRPT_Time(struct odm_dm_struct *dm_odm, u16 minRptTime);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EReg.h b/drivers/staging/rtl8188eu/include/Hal8188EReg.h
new file mode 100644
index 00000000000..d880b0cc803
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/Hal8188EReg.h
@@ -0,0 +1,46 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* File Name: Hal8188EReg.h */
+/* */
+/* Description: */
+/* */
+/* This file is for RTL8188E register definition. */
+/* */
+/* */
+/* */
+#ifndef __HAL_8188E_REG_H__
+#define __HAL_8188E_REG_H__
+
+/* */
+/* Register Definition */
+/* */
+#define TRX_ANTDIV_PATH 0x860
+#define RX_ANTDIV_PATH 0xb2c
+#define ODM_R_A_AGC_CORE1_8188E 0xc50
+
+
+/* */
+/* Bitmap Definition */
+/* */
+#define BIT_FA_RESET_8188E BIT0
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h
new file mode 100644
index 00000000000..e57452104bf
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_BB.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_BB_8188E_HW_IMG_H
+#define __INC_BB_8188E_HW_IMG_H
+
+/* static bool CheckCondition(const u32 Condition, const u32 Hex); */
+
+/******************************************************************************
+* AGC_TAB_1T.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *odm);
+
+/******************************************************************************
+* PHY_REG_1T.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *odm);
+
+/******************************************************************************
+* PHY_REG_PG.TXT
+******************************************************************************/
+
+void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
new file mode 100644
index 00000000000..1bf9bc70a69
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -0,0 +1,34 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_FW_8188E_HW_IMG_H
+#define __INC_FW_8188E_HW_IMG_H
+
+
+/******************************************************************************
+* FW_AP.TXT
+******************************************************************************/
+/******************************************************************************
+* FW_WoWLAN.TXT
+******************************************************************************/
+#define ArrayLength_8188E_FW_WoWLAN 15764
+extern const u8 Array_8188E_FW_WoWLAN[ArrayLength_8188E_FW_WoWLAN];
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h
new file mode 100644
index 00000000000..acf78b94fdd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_MAC.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_MAC_8188E_HW_IMG_H
+#define __INC_MAC_8188E_HW_IMG_H
+
+/******************************************************************************
+* MAC_REG.TXT
+******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *pDM_Odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h
new file mode 100644
index 00000000000..8ecb40d26c7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_RF.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+*
+* Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify it
+* under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but WITHOUT
+* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+* more details.
+*
+* You should have received a copy of the GNU General Public License along with
+* this program; if not, write to the Free Software Foundation, Inc.,
+* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+*
+*
+******************************************************************************/
+
+#ifndef __INC_RF_8188E_HW_IMG_H
+#define __INC_RF_8188E_HW_IMG_H
+
+/******************************************************************************
+ * RadioA_1T.TXT
+ ******************************************************************************/
+
+enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *odm);
+
+#endif /* end of HWIMG_SUPPORT */
diff --git a/drivers/staging/rtl8188eu/include/HalPhyRf.h b/drivers/staging/rtl8188eu/include/HalPhyRf.h
new file mode 100644
index 00000000000..1ec497100da
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPhyRf.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+ #ifndef __HAL_PHY_RF_H__
+ #define __HAL_PHY_RF_H__
+
+#define ODM_TARGET_CHNL_NUM_2G_5G 59
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
+
+u8 ODM_GetRightChnlPlaceforIQK(u8 chnl);
+
+#endif /* #ifndef __HAL_PHY_RF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
new file mode 100644
index 00000000000..fa583f24832
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPhyRf_8188e.h
@@ -0,0 +1,63 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HAL_PHY_RF_8188E_H__
+#define __HAL_PHY_RF_8188E_H__
+
+/*--------------------------Define Parameters-------------------------------*/
+#define IQK_DELAY_TIME_88E 10 /* ms */
+#define index_mapping_NUM_88E 15
+#define AVG_THERMAL_NUM_88E 4
+
+
+void ODM_TxPwrTrackAdjust88E(struct odm_dm_struct *pDM_Odm,
+ u8 Type, /* 0 = OFDM, 1 = CCK */
+ u8 *pDirection,/* 1 = +(incr) 2 = -(decr) */
+ u32 *pOutWriteVal); /* Tx tracking CCK/OFDM BB
+ * swing index adjust */
+
+
+void odm_TXPowerTrackingCallback_ThermalMeter_8188E(struct adapter *Adapter);
+
+
+/* 1 7. IQK */
+
+void PHY_IQCalibrate_8188E(struct adapter *Adapter, bool ReCovery);
+
+/* LC calibrate */
+void PHY_LCCalibrate_8188E(struct adapter *pAdapter);
+
+/* AP calibrate */
+void PHY_APCalibrate_8188E(struct adapter *pAdapter, s8 delta);
+
+void PHY_DigitalPredistortion_8188E(struct adapter *pAdapter);
+
+void _PHY_SaveADDARegisters(struct adapter *pAdapter, u32 *ADDAReg,
+ u32 *ADDABackup, u32 RegisterNum);
+
+void _PHY_PathADDAOn(struct adapter *pAdapter, u32 *ADDAReg,
+ bool isPathAOn, bool is2T);
+
+void _PHY_MACSettingCalibration(struct adapter *pAdapter, u32 *MACReg,
+ u32 *MACBackup);
+
+void _PHY_PathAStandBy(struct adapter *pAdapter);
+
+#endif /* #ifndef __HAL_PHY_RF_8188E_H__ */
diff --git a/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h b/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h
new file mode 100644
index 00000000000..d945784ed5d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalPwrSeqCmd.h
@@ -0,0 +1,128 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HALPWRSEQCMD_H__
+#define __HALPWRSEQCMD_H__
+
+#include <drv_types.h>
+
+/*---------------------------------------------*/
+/* 3 The value of cmd: 4 bits */
+/*---------------------------------------------*/
+#define PWR_CMD_READ 0x00
+ /* offset: the read register offset */
+ /* msk: the mask of the read value */
+ /* value: N/A, left by 0 */
+ /* note: dirver shall implement this function by read & msk */
+
+#define PWR_CMD_WRITE 0x01
+ /* offset: the read register offset */
+ /* msk: the mask of the write bits */
+ /* value: write value */
+ /* note: driver shall implement this cmd by read & msk after write */
+
+#define PWR_CMD_POLLING 0x02
+ /* offset: the read register offset */
+ /* msk: the mask of the polled value */
+ /* value: the value to be polled, masked by the msd field. */
+ /* note: driver shall implement this cmd by */
+ /* do{ */
+ /* if ( (Read(offset) & msk) == (value & msk) ) */
+ /* break; */
+ /* } while (not timeout); */
+
+#define PWR_CMD_DELAY 0x03
+ /* offset: the value to delay */
+ /* msk: N/A */
+ /* value: the unit of delay, 0: us, 1: ms */
+
+#define PWR_CMD_END 0x04
+ /* offset: N/A */
+ /* msk: N/A */
+ /* value: N/A */
+
+/*---------------------------------------------*/
+/* 3 The value of base: 4 bits */
+/*---------------------------------------------*/
+ /* define the base address of each block */
+#define PWR_BASEADDR_MAC 0x00
+#define PWR_BASEADDR_USB 0x01
+#define PWR_BASEADDR_PCIE 0x02
+#define PWR_BASEADDR_SDIO 0x03
+
+/*---------------------------------------------*/
+/* 3 The value of interface_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_INTF_SDIO_MSK BIT(0)
+#define PWR_INTF_USB_MSK BIT(1)
+#define PWR_INTF_PCI_MSK BIT(2)
+#define PWR_INTF_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of fab_msk: 4 bits */
+/*---------------------------------------------*/
+#define PWR_FAB_TSMC_MSK BIT(0)
+#define PWR_FAB_UMC_MSK BIT(1)
+#define PWR_FAB_ALL_MSK (BIT(0)|BIT(1)|BIT(2)|BIT(3))
+
+/*---------------------------------------------*/
+/* 3 The value of cut_msk: 8 bits */
+/*---------------------------------------------*/
+#define PWR_CUT_TESTCHIP_MSK BIT(0)
+#define PWR_CUT_A_MSK BIT(1)
+#define PWR_CUT_B_MSK BIT(2)
+#define PWR_CUT_C_MSK BIT(3)
+#define PWR_CUT_D_MSK BIT(4)
+#define PWR_CUT_E_MSK BIT(5)
+#define PWR_CUT_F_MSK BIT(6)
+#define PWR_CUT_G_MSK BIT(7)
+#define PWR_CUT_ALL_MSK 0xFF
+
+
+enum pwrseq_cmd_delat_unit {
+ PWRSEQ_DELAY_US,
+ PWRSEQ_DELAY_MS,
+};
+
+struct wl_pwr_cfg {
+ u16 offset;
+ u8 cut_msk;
+ u8 fab_msk:4;
+ u8 interface_msk:4;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 value;
+};
+
+#define GET_PWR_CFG_OFFSET(__PWR_CMD) __PWR_CMD.offset
+#define GET_PWR_CFG_CUT_MASK(__PWR_CMD) __PWR_CMD.cut_msk
+#define GET_PWR_CFG_FAB_MASK(__PWR_CMD) __PWR_CMD.fab_msk
+#define GET_PWR_CFG_INTF_MASK(__PWR_CMD) __PWR_CMD.interface_msk
+#define GET_PWR_CFG_BASE(__PWR_CMD) __PWR_CMD.base
+#define GET_PWR_CFG_CMD(__PWR_CMD) __PWR_CMD.cmd
+#define GET_PWR_CFG_MASK(__PWR_CMD) __PWR_CMD.msk
+#define GET_PWR_CFG_VALUE(__PWR_CMD) __PWR_CMD.value
+
+
+/* Prototype of protected function. */
+u8 HalPwrSeqCmdParsing(struct adapter *padapter, u8 CutVersion, u8 FabVersion,
+ u8 InterfaceType, struct wl_pwr_cfg PwrCfgCmd[]);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
new file mode 100644
index 00000000000..97047cf0678
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -0,0 +1,167 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_VERSION_DEF_H__
+#define __HAL_VERSION_DEF_H__
+
+enum HAL_IC_TYPE {
+ CHIP_8192S = 0,
+ CHIP_8188C = 1,
+ CHIP_8192C = 2,
+ CHIP_8192D = 3,
+ CHIP_8723A = 4,
+ CHIP_8188E = 5,
+ CHIP_8881A = 6,
+ CHIP_8812A = 7,
+ CHIP_8821A = 8,
+ CHIP_8723B = 9,
+ CHIP_8192E = 10,
+};
+
+enum HAL_CHIP_TYPE {
+ TEST_CHIP = 0,
+ NORMAL_CHIP = 1,
+ FPGA = 2,
+};
+
+enum HAL_CUT_VERSION {
+ A_CUT_VERSION = 0,
+ B_CUT_VERSION = 1,
+ C_CUT_VERSION = 2,
+ D_CUT_VERSION = 3,
+ E_CUT_VERSION = 4,
+ F_CUT_VERSION = 5,
+ G_CUT_VERSION = 6,
+};
+
+enum HAL_VENDOR {
+ CHIP_VENDOR_TSMC = 0,
+ CHIP_VENDOR_UMC = 1,
+};
+
+enum HAL_RF_TYPE {
+ RF_TYPE_1T1R = 0,
+ RF_TYPE_1T2R = 1,
+ RF_TYPE_2T2R = 2,
+ RF_TYPE_2T3R = 3,
+ RF_TYPE_2T4R = 4,
+ RF_TYPE_3T3R = 5,
+ RF_TYPE_3T4R = 6,
+ RF_TYPE_4T4R = 7,
+};
+
+struct HAL_VERSION {
+ enum HAL_IC_TYPE ICType;
+ enum HAL_CHIP_TYPE ChipType;
+ enum HAL_CUT_VERSION CUTVersion;
+ enum HAL_VENDOR VendorType;
+ enum HAL_RF_TYPE RFType;
+ u8 ROMVer;
+};
+
+/* Get element */
+#define GET_CVID_IC_TYPE(version) (((version).ICType))
+#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
+#define GET_CVID_RF_TYPE(version) (((version).RFType))
+#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
+#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
+#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
+
+/* Common Macro. -- */
+/* HAL_VERSION VersionID */
+
+/* HAL_IC_TYPE_E */
+#define IS_81XXC(version) \
+ (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
+ (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
+#define IS_8723_SERIES(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
+#define IS_92D(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8192D) ? true : false)
+#define IS_8188E(version) \
+ ((GET_CVID_IC_TYPE(version) == CHIP_8188E) ? true : false)
+
+/* HAL_CHIP_TYPE_E */
+#define IS_TEST_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
+#define IS_NORMAL_CHIP(version) \
+ ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false)
+
+/* HAL_CUT_VERSION_E */
+#define IS_A_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == A_CUT_VERSION) ? true : false)
+#define IS_B_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == B_CUT_VERSION) ? true : false)
+#define IS_C_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == C_CUT_VERSION) ? true : false)
+#define IS_D_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == D_CUT_VERSION) ? true : false)
+#define IS_E_CUT(version) \
+ ((GET_CVID_CUT_VERSION(version) == E_CUT_VERSION) ? true : false)
+
+
+/* HAL_VENDOR_E */
+#define IS_CHIP_VENDOR_TSMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_TSMC) ? true : false)
+#define IS_CHIP_VENDOR_UMC(version) \
+ ((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
+
+/* HAL_RF_TYPE_E */
+#define IS_1T1R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
+#define IS_1T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
+#define IS_2T2R(version) \
+ ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
+
+/* Chip version Macro. -- */
+#define IS_81XXC_TEST_CHIP(version) \
+ ((IS_81XXC(version) && (!IS_NORMAL_CHIP(version))) ? true : false)
+
+#define IS_92C_SERIAL(version) \
+ ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
+#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_A_CUT(version) ? true : false) : false) : false)
+#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_B_CUT(version) ? true : false) : false) : false)
+#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
+ (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
+ (IS_C_CUT(version) ? true : false) : false) : false)
+
+#define IS_NORMAL_CHIP92D(version) \
+ ((IS_92D(version)) ? \
+ ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) : false)
+
+#define IS_92D_SINGLEPHY(version) \
+ ((IS_92D(version)) ? (IS_2T2R(version) ? true : false) : false)
+#define IS_92D_C_CUT(version) \
+ ((IS_92D(version)) ? (IS_C_CUT(version) ? true : false) : false)
+#define IS_92D_D_CUT(version) \
+ ((IS_92D(version)) ? (IS_D_CUT(version) ? true : false) : false)
+#define IS_92D_E_CUT(version) \
+ ((IS_92D(version)) ? (IS_E_CUT(version) ? true : false) : false)
+
+#define IS_8723A_A_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
+#define IS_8723A_B_CUT(version) \
+ ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
new file mode 100644
index 00000000000..8a7ca992674
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -0,0 +1,184 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __BASIC_TYPES_H__
+#define __BASIC_TYPES_H__
+
+#define SUCCESS 0
+#define FAIL (-1)
+
+#include <linux/types.h>
+#define NDIS_OID uint
+
+typedef void (*proc_t)(void *);
+
+#define FIELD_OFFSET(s, field) ((ssize_t)&((s *)(0))->field)
+
+#define MEM_ALIGNMENT_OFFSET (sizeof(size_t))
+#define MEM_ALIGNMENT_PADDING (sizeof(size_t) - 1)
+
+/* port from fw */
+/* TODO: Macros Below are Sync from SD7-Driver. It is necessary
+ * to check correctness */
+
+/*
+ * Call endian free function when
+ * 1. Read/write packet content.
+ * 2. Before write integer to IO.
+ * 3. After read integer from IO.
+*/
+
+/* Convert little data endian to host ordering */
+#define EF1BYTE(_val) \
+ ((u8)(_val))
+#define EF2BYTE(_val) \
+ (le16_to_cpu(_val))
+#define EF4BYTE(_val) \
+ (le32_to_cpu(_val))
+
+/* Read data from memory */
+#define READEF1BYTE(_ptr) \
+ EF1BYTE(*((u8 *)(_ptr)))
+/* Read le16 data from memory and convert to host ordering */
+#define READEF2BYTE(_ptr) \
+ EF2BYTE(*(_ptr))
+#define READEF4BYTE(_ptr) \
+ EF4BYTE(*(_ptr))
+
+/* Write data to memory */
+#define WRITEEF1BYTE(_ptr, _val) \
+ do { \
+ (*((u8 *)(_ptr))) = EF1BYTE(_val) \
+ } while (0)
+/* Write le data to memory in host ordering */
+#define WRITEEF2BYTE(_ptr, _val) \
+ do { \
+ (*((u16 *)(_ptr))) = EF2BYTE(_val) \
+ } while (0)
+
+#define WRITEEF4BYTE(_ptr, _val) \
+ do { \
+ (*((u32 *)(_ptr))) = EF2BYTE(_val) \
+ } while (0)
+
+/* Create a bit mask
+ * Examples:
+ * BIT_LEN_MASK_32(0) => 0x00000000
+ * BIT_LEN_MASK_32(1) => 0x00000001
+ * BIT_LEN_MASK_32(2) => 0x00000003
+ * BIT_LEN_MASK_32(32) => 0xFFFFFFFF
+ */
+#define BIT_LEN_MASK_32(__bitlen) \
+ (0xFFFFFFFF >> (32 - (__bitlen)))
+#define BIT_LEN_MASK_16(__bitlen) \
+ (0xFFFF >> (16 - (__bitlen)))
+#define BIT_LEN_MASK_8(__bitlen) \
+ (0xFF >> (8 - (__bitlen)))
+
+/* Create an offset bit mask
+ * Examples:
+ * BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
+ * BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000
+ */
+#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
+#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
+ (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
+
+/*Description:
+ * Return 4-byte value in host byte ordering from
+ * 4-byte pointer in little-endian system.
+ */
+#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
+ (EF4BYTE(*((__le32 *)(__pstart))))
+#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
+ (EF2BYTE(*((__le16 *)(__pstart))))
+#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
+ (EF1BYTE(*((u8 *)(__pstart))))
+
+/*Description:
+Translate subfield (continuous bits in little-endian) of 4-byte
+value to host byte ordering.*/
+#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_32(__bitlen) \
+ )
+#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_16(__bitlen) \
+ )
+#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
+ BIT_LEN_MASK_8(__bitlen) \
+ )
+
+/* Description:
+ * Mask subfield (continuous bits in little-endian) of 4-byte value
+ * and return the result in 4-byte value in host byte ordering.
+ */
+#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
+ )
+#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
+ ( \
+ LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
+ (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
+ )
+
+/* Description:
+ * Set subfield of little-endian 4-byte value to specified value.
+ */
+#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u32 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
+ )
+
+#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u16 *)(__pstart)) = \
+ ( \
+ LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
+ );
+
+#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
+ *((u8 *)(__pstart)) = EF1BYTE \
+ ( \
+ LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
+ ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
+ )
+
+/* Get the N-bytes aligment offset from the current length */
+#define N_BYTE_ALIGMENT(__value, __aligment) ((__aligment == 1) ? \
+ (__value) : (((__value + __aligment - 1) / __aligment) * __aligment))
+
+#endif /* __BASIC_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/cmd_osdep.h b/drivers/staging/rtl8188eu/include/cmd_osdep.h
new file mode 100644
index 00000000000..5a8465e147b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/cmd_osdep.h
@@ -0,0 +1,32 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __CMD_OSDEP_H_
+#define __CMD_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+extern int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+extern int _rtw_init_evt_priv(struct evt_priv *pevtpriv);
+extern void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
+extern int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj);
+extern struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
new file mode 100644
index 00000000000..ad073c8af27
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -0,0 +1,334 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*-----------------------------------------------------------------------------
+
+ For type defines and data structure defines
+
+------------------------------------------------------------------------------*/
+
+
+#ifndef __DRV_TYPES_H__
+#define __DRV_TYPES_H__
+
+#define DRV_NAME "r8188eu"
+
+#include <osdep_service.h>
+#include <wlan_bssdef.h>
+#include <drv_types_linux.h>
+#include <rtw_ht.h>
+#include <rtw_cmd.h>
+#include <rtw_xmit.h>
+#include <rtw_recv.h>
+#include <hal_intf.h>
+#include <hal_com.h>
+#include <rtw_qos.h>
+#include <rtw_security.h>
+#include <rtw_pwrctrl.h>
+#include <rtw_io.h>
+#include <rtw_eeprom.h>
+#include <sta_info.h>
+#include <rtw_mlme.h>
+#include <rtw_debug.h>
+#include <rtw_rf.h>
+#include <rtw_event.h>
+#include <rtw_led.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_p2p.h>
+#include <rtw_ap.h>
+#include <rtw_mp.h>
+#include <rtw_br_ext.h>
+
+enum _NIC_VERSION {
+ RTL8711_NIC,
+ RTL8712_NIC,
+ RTL8713_NIC,
+ RTL8716_NIC
+};
+
+#define SPEC_DEV_ID_NONE BIT(0)
+#define SPEC_DEV_ID_DISABLE_HT BIT(1)
+#define SPEC_DEV_ID_ENABLE_PS BIT(2)
+#define SPEC_DEV_ID_RF_CONFIG_1T1R BIT(3)
+#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
+#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
+
+struct specific_device_id {
+ u32 flags;
+ u16 idVendor;
+ u16 idProduct;
+};
+
+struct registry_priv {
+ u8 chip_version;
+ u8 rfintfs;
+ u8 lbkmode;
+ u8 hci;
+ struct ndis_802_11_ssid ssid;
+ u8 network_mode; /* infra, ad-hoc, auto */
+ u8 channel;/* ad-hoc support requirement */
+ u8 wireless_mode;/* A, B, G, auto */
+ u8 scan_mode;/* active, passive */
+ u8 radio_enable;
+ u8 preamble;/* long, short, auto */
+ u8 vrtl_carrier_sense;/* Enable, Disable, Auto */
+ u8 vcs_type;/* RTS/CTS, CTS-to-self */
+ u16 rts_thresh;
+ u16 frag_thresh;
+ u8 adhoc_tx_pwr;
+ u8 soft_ap;
+ u8 power_mgnt;
+ u8 ips_mode;
+ u8 smart_ps;
+ u8 long_retry_lmt;
+ u8 short_retry_lmt;
+ u16 busy_thresh;
+ u8 ack_policy;
+ u8 mp_mode;
+ u8 software_encrypt;
+ u8 software_decrypt;
+ u8 acm_method;
+ /* UAPSD */
+ u8 wmm_enable;
+ u8 uapsd_enable;
+ u8 uapsd_max_sp;
+ u8 uapsd_acbk_en;
+ u8 uapsd_acbe_en;
+ u8 uapsd_acvi_en;
+ u8 uapsd_acvo_en;
+
+ struct wlan_bssid_ex dev_network;
+
+ u8 ht_enable;
+ u8 cbw40_enable;
+ u8 ampdu_enable;/* for tx */
+ u8 rx_stbc;
+ u8 ampdu_amsdu;/* A-MPDU Supports A-MSDU is permitted */
+ u8 lowrate_two_xmit;
+
+ u8 rf_config;
+ u8 low_power;
+
+ u8 wifi_spec;/* !turbo_mode */
+
+ u8 channel_plan;
+ bool bAcceptAddbaReq;
+
+ u8 antdiv_cfg;
+ u8 antdiv_type;
+
+ u8 usbss_enable;/* 0:disable,1:enable */
+ u8 hwpdn_mode;/* 0:disable,1:enable,2:decide by EFUSE config */
+ u8 hwpwrp_detect;/* 0:disable,1:enable */
+
+ u8 hw_wps_pbc;/* 0:disable,1:enable */
+
+ u8 max_roaming_times; /* the max number driver will try */
+
+ u8 fw_iol; /* enable iol without other concern */
+
+ u8 enable80211d;
+
+ u8 ifname[16];
+ u8 if2name[16];
+
+ u8 notch_filter;
+};
+
+/* For registry parameters */
+#define RGTRY_OFT(field) ((u32)FIELD_OFFSET(struct registry_priv, field))
+#define RGTRY_SZ(field) sizeof(((struct registry_priv *)0)->field)
+#define BSSID_OFT(field) ((u32)FIELD_OFFSET(struct wlan_bssid_ex, field))
+#define BSSID_SZ(field) sizeof(((struct wlan_bssid_ex *)0)->field)
+
+#define MAX_CONTINUAL_URB_ERR 4
+
+struct dvobj_priv {
+ struct adapter *if1;
+ struct adapter *if2;
+
+ /* For 92D, DMDP have 2 interface. */
+ u8 InterfaceNumber;
+ u8 NumInterfaces;
+
+ /* In /Out Pipe information */
+ int RtInPipe[2];
+ int RtOutPipe[3];
+ u8 Queue2Pipe[HW_QUEUE_ENTRY];/* for out pipe mapping */
+
+ u8 irq_alloc;
+
+/*-------- below is for USB INTERFACE --------*/
+
+ u8 nr_endpoint;
+ u8 ishighspeed;
+ u8 RtNumInPipes;
+ u8 RtNumOutPipes;
+ int ep_num[5]; /* endpoint number */
+ int RegUsbSS;
+ struct semaphore usb_suspend_sema;
+ struct mutex usb_vendor_req_mutex;
+
+ u8 *usb_alloc_vendor_req_buf;
+ u8 *usb_vendor_req_buf;
+
+ struct usb_interface *pusbintf;
+ struct usb_device *pusbdev;
+
+ ATOMIC_T continual_urb_error;
+};
+
+static inline struct device *dvobj_to_dev(struct dvobj_priv *dvobj)
+{
+ /* todo: get interface type from dvobj and the return
+ * the dev accordingly */
+ return &dvobj->pusbintf->dev;
+};
+
+enum _IFACE_TYPE {
+ IFACE_PORT0, /* mapping to port0 for C/D series chips */
+ IFACE_PORT1, /* mapping to port1 for C/D series chip */
+ MAX_IFACE_PORT,
+};
+
+enum _ADAPTER_TYPE {
+ PRIMARY_ADAPTER,
+ SECONDARY_ADAPTER,
+ MAX_ADAPTER,
+};
+
+enum driver_state {
+ DRIVER_NORMAL = 0,
+ DRIVER_DISAPPEAR = 1,
+ DRIVER_REPLACE_DONGLE = 2,
+};
+
+struct adapter {
+ int DriverState;/* for disable driver using module, use dongle toi
+ * replace module. */
+ int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
+ int bDongle;/* build-in module or external dongle */
+ u16 chip_type;
+ u16 HardwareType;
+ u16 interface_type;/* USB,SDIO,SPI,PCI */
+
+ struct dvobj_priv *dvobj;
+ struct mlme_priv mlmepriv;
+ struct mlme_ext_priv mlmeextpriv;
+ struct cmd_priv cmdpriv;
+ struct evt_priv evtpriv;
+ struct io_priv iopriv;
+ struct xmit_priv xmitpriv;
+ struct recv_priv recvpriv;
+ struct sta_priv stapriv;
+ struct security_priv securitypriv;
+ struct registry_priv registrypriv;
+ struct pwrctrl_priv pwrctrlpriv;
+ struct eeprom_priv eeprompriv;
+ struct led_priv ledpriv;
+ struct mp_priv mppriv;
+
+#ifdef CONFIG_88EU_AP_MODE
+ struct hostapd_priv *phostapdpriv;
+#endif
+
+ struct wifidirect_info wdinfo;
+
+ void *HalData;
+ u32 hal_data_sz;
+ struct hal_ops HalFunc;
+
+ s32 bDriverStopped;
+ s32 bSurpriseRemoved;
+ s32 bCardDisableWOHSM;
+
+ u32 IsrContent;
+ u32 ImrContent;
+
+ u8 EepromAddressSize;
+ u8 hw_init_completed;
+ u8 bDriverIsGoingToUnload;
+ u8 init_adpt_in_progress;
+ u8 bHaltInProgress;
+
+ void *cmdThread;
+ void *evtThread;
+ void *xmitThread;
+ void *recvThread;
+ void (*intf_start)(struct adapter *adapter);
+ void (*intf_stop)(struct adapter *adapter);
+ struct net_device *pnetdev;
+
+ /* used by rtw_rereg_nd_name related function */
+ struct rereg_nd_name_data {
+ struct net_device *old_pnetdev;
+ char old_ifname[IFNAMSIZ];
+ u8 old_ips_mode;
+ u8 old_bRegUseLed;
+ } rereg_nd_name_priv;
+
+ int bup;
+ struct net_device_stats stats;
+ struct iw_statistics iwstats;
+ struct proc_dir_entry *dir_dev;/* for proc directory */
+
+ int net_closed;
+ u8 bFWReady;
+ u8 bBTFWReady;
+ u8 bReadPortCancel;
+ u8 bWritePortCancel;
+ u8 bRxRSSIDisplay;
+ /* The driver will show up the desired channel number
+ * when this flag is 1. */
+ u8 bNotifyChannelChange;
+#ifdef CONFIG_88EU_P2P
+ /* The driver will show the current P2P status when the
+ * upper application reads it. */
+ u8 bShowGetP2PState;
+#endif
+ struct adapter *pbuddy_adapter;
+
+ struct mutex *hw_init_mutex;
+
+ spinlock_t br_ext_lock;
+ struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
+ int pppoe_connection_in_progress;
+ unsigned char pppoe_addr[MACADDRLEN];
+ unsigned char scdb_mac[MACADDRLEN];
+ unsigned char scdb_ip[4];
+ struct nat25_network_db_entry *scdb_entry;
+ unsigned char br_mac[MACADDRLEN];
+ unsigned char br_ip[4];
+ struct br_ext_info ethBrExtInfo;
+
+ u8 fix_rate;
+
+ unsigned char in_cta_test;
+};
+
+#define adapter_to_dvobj(adapter) (adapter->dvobj)
+
+int rtw_handle_dualmac(struct adapter *adapter, bool init);
+
+static inline u8 *myid(struct eeprom_priv *peepriv)
+{
+ return peepriv->mac_addr;
+}
+
+#endif /* __DRV_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/drv_types_linux.h b/drivers/staging/rtl8188eu/include/drv_types_linux.h
new file mode 100644
index 00000000000..812b7440d4b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/drv_types_linux.h
@@ -0,0 +1,24 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __DRV_TYPES_LINUX_H__
+#define __DRV_TYPES_LINUX_H__
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/ethernet.h b/drivers/staging/rtl8188eu/include/ethernet.h
new file mode 100644
index 00000000000..a59f9120cd7
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ethernet.h
@@ -0,0 +1,42 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*! \file */
+#ifndef __INC_ETHERNET_H
+#define __INC_ETHERNET_H
+
+#define ETHERNET_ADDRESS_LENGTH 6 /* Ethernet Address Length */
+#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
+#define LLC_HEADER_SIZE 6 /* LLC Header Length */
+#define TYPE_LENGTH_FIELD_SIZE 2 /* Type/Length Size */
+#define MINIMUM_ETHERNET_PACKET_SIZE 60 /* Min Ethernet Packet Size */
+#define MAXIMUM_ETHERNET_PACKET_SIZE 1514 /* Max Ethernet Packet Size */
+
+/* Is Multicast Address? */
+#define RT_ETH_IS_MULTICAST(_addr) ((((u8 *)(_addr))[0]&0x01) != 0)
+#define RT_ETH_IS_BROADCAST(_addr) ( \
+ ((u8 *)(_addr))[0] == 0xff && \
+ ((u8 *)(_addr))[1] == 0xff && \
+ ((u8 *)(_addr))[2] == 0xff && \
+ ((u8 *)(_addr))[3] == 0xff && \
+ ((u8 *)(_addr))[4] == 0xff && \
+ ((u8 *)(_addr))[5] == 0xff) /* Is Broadcast Address? */
+
+
+#endif /* #ifndef __INC_ETHERNET_H */
diff --git a/drivers/staging/rtl8188eu/include/h2clbk.h b/drivers/staging/rtl8188eu/include/h2clbk.h
new file mode 100644
index 00000000000..e595030ac8a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/h2clbk.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _H2CLBK_H_
+
+
+#include <rtl8711_spec.h>
+#include <TypeDef.h>
+
+
+void _lbk_cmd(struct adapter *adapter);
+
+void _lbk_rsp(struct adapter *adapter);
+
+void _lbk_evt(IN struct adapter *adapter);
+
+void h2c_event_callback(unsigned char *dev, unsigned char *pbuf);
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
new file mode 100644
index 00000000000..81c27090dd5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -0,0 +1,173 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_COMMON_H__
+#define __HAL_COMMON_H__
+
+/* */
+/* Rate Definition */
+/* */
+/* CCK */
+#define RATR_1M 0x00000001
+#define RATR_2M 0x00000002
+#define RATR_55M 0x00000004
+#define RATR_11M 0x00000008
+/* OFDM */
+#define RATR_6M 0x00000010
+#define RATR_9M 0x00000020
+#define RATR_12M 0x00000040
+#define RATR_18M 0x00000080
+#define RATR_24M 0x00000100
+#define RATR_36M 0x00000200
+#define RATR_48M 0x00000400
+#define RATR_54M 0x00000800
+/* MCS 1 Spatial Stream */
+#define RATR_MCS0 0x00001000
+#define RATR_MCS1 0x00002000
+#define RATR_MCS2 0x00004000
+#define RATR_MCS3 0x00008000
+#define RATR_MCS4 0x00010000
+#define RATR_MCS5 0x00020000
+#define RATR_MCS6 0x00040000
+#define RATR_MCS7 0x00080000
+/* MCS 2 Spatial Stream */
+#define RATR_MCS8 0x00100000
+#define RATR_MCS9 0x00200000
+#define RATR_MCS10 0x00400000
+#define RATR_MCS11 0x00800000
+#define RATR_MCS12 0x01000000
+#define RATR_MCS13 0x02000000
+#define RATR_MCS14 0x04000000
+#define RATR_MCS15 0x08000000
+
+/* CCK */
+#define RATE_1M BIT(0)
+#define RATE_2M BIT(1)
+#define RATE_5_5M BIT(2)
+#define RATE_11M BIT(3)
+/* OFDM */
+#define RATE_6M BIT(4)
+#define RATE_9M BIT(5)
+#define RATE_12M BIT(6)
+#define RATE_18M BIT(7)
+#define RATE_24M BIT(8)
+#define RATE_36M BIT(9)
+#define RATE_48M BIT(10)
+#define RATE_54M BIT(11)
+/* MCS 1 Spatial Stream */
+#define RATE_MCS0 BIT(12)
+#define RATE_MCS1 BIT(13)
+#define RATE_MCS2 BIT(14)
+#define RATE_MCS3 BIT(15)
+#define RATE_MCS4 BIT(16)
+#define RATE_MCS5 BIT(17)
+#define RATE_MCS6 BIT(18)
+#define RATE_MCS7 BIT(19)
+/* MCS 2 Spatial Stream */
+#define RATE_MCS8 BIT(20)
+#define RATE_MCS9 BIT(21)
+#define RATE_MCS10 BIT(22)
+#define RATE_MCS11 BIT(23)
+#define RATE_MCS12 BIT(24)
+#define RATE_MCS13 BIT(25)
+#define RATE_MCS14 BIT(26)
+#define RATE_MCS15 BIT(27)
+
+/* ALL CCK Rate */
+#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
+#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M | \
+ RATR_24M | RATR_36M | RATR_48M | RATR_54M)
+#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
+ RATR_MCS3 | RATR_MCS4 | RATR_MCS5|RATR_MCS6 | \
+ RATR_MCS7)
+#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
+ RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
+ RATR_MCS14 | RATR_MCS15)
+
+/*------------------------------ Tx Desc definition Macro --------------------*/
+/* pragma mark -- Tx Desc related definition. -- */
+/* Rate */
+/* CCK Rates, TxHT = 0 */
+#define DESC_RATE1M 0x00
+#define DESC_RATE2M 0x01
+#define DESC_RATE5_5M 0x02
+#define DESC_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC_RATE6M 0x04
+#define DESC_RATE9M 0x05
+#define DESC_RATE12M 0x06
+#define DESC_RATE18M 0x07
+#define DESC_RATE24M 0x08
+#define DESC_RATE36M 0x09
+#define DESC_RATE48M 0x0a
+#define DESC_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC_RATEMCS0 0x0c
+#define DESC_RATEMCS1 0x0d
+#define DESC_RATEMCS2 0x0e
+#define DESC_RATEMCS3 0x0f
+#define DESC_RATEMCS4 0x10
+#define DESC_RATEMCS5 0x11
+#define DESC_RATEMCS6 0x12
+#define DESC_RATEMCS7 0x13
+#define DESC_RATEMCS8 0x14
+#define DESC_RATEMCS9 0x15
+#define DESC_RATEMCS10 0x16
+#define DESC_RATEMCS11 0x17
+#define DESC_RATEMCS12 0x18
+#define DESC_RATEMCS13 0x19
+#define DESC_RATEMCS14 0x1a
+#define DESC_RATEMCS15 0x1b
+#define DESC_RATEMCS15_SG 0x1c
+#define DESC_RATEMCS32 0x20
+
+/* 1 Byte long (in unit of TU) */
+#define REG_P2P_CTWIN 0x0572
+#define REG_NOA_DESC_SEL 0x05CF
+#define REG_NOA_DESC_DURATION 0x05E0
+#define REG_NOA_DESC_INTERVAL 0x05E4
+#define REG_NOA_DESC_START 0x05E8
+#define REG_NOA_DESC_COUNT 0x05EC
+
+#include "HalVerDef.h"
+void dump_chip_info(struct HAL_VERSION ChipVersion);
+
+
+/* return the final channel plan decision */
+u8 hal_com_get_channel_plan(struct adapter *padapter,
+ u8 hw_channel_plan,
+ u8 sw_channel_plan,
+ u8 def_channel_plan,
+ bool AutoLoadFail
+);
+
+u8 MRateToHwRate(u8 rate);
+
+void HalSetBrateCfg(struct adapter *Adapter, u8 *mBratesOS, u16 *pBrateCfg);
+
+bool Hal_MappingOutPipe(struct adapter *pAdapter, u8 NumOutPipe);
+
+void hal_init_macaddr(struct adapter *adapter);
+
+void c2h_evt_clear(struct adapter *adapter);
+s32 c2h_evt_read(struct adapter *adapter, u8 *buf);
+
+#endif /* __HAL_COMMON_H__ */
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
new file mode 100644
index 00000000000..439c3c941ba
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -0,0 +1,426 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __HAL_INTF_H__
+#define __HAL_INTF_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <Hal8188EPhyCfg.h>
+
+enum RTL871X_HCI_TYPE {
+ RTW_PCIE = BIT0,
+ RTW_USB = BIT1,
+ RTW_SDIO = BIT2,
+ RTW_GSPI = BIT3,
+};
+
+enum _CHIP_TYPE {
+ NULL_CHIP_TYPE,
+ RTL8712_8188S_8191S_8192S,
+ RTL8188C_8192C,
+ RTL8192D,
+ RTL8723A,
+ RTL8188E,
+ MAX_CHIP_TYPE
+};
+
+enum hw_variables {
+ HW_VAR_MEDIA_STATUS,
+ HW_VAR_MEDIA_STATUS1,
+ HW_VAR_SET_OPMODE,
+ HW_VAR_MAC_ADDR,
+ HW_VAR_BSSID,
+ HW_VAR_INIT_RTS_RATE,
+ HW_VAR_BASIC_RATE,
+ HW_VAR_TXPAUSE,
+ HW_VAR_BCN_FUNC,
+ HW_VAR_CORRECT_TSF,
+ HW_VAR_CHECK_BSSID,
+ HW_VAR_MLME_DISCONNECT,
+ HW_VAR_MLME_SITESURVEY,
+ HW_VAR_MLME_JOIN,
+ HW_VAR_BEACON_INTERVAL,
+ HW_VAR_SLOT_TIME,
+ HW_VAR_RESP_SIFS,
+ HW_VAR_ACK_PREAMBLE,
+ HW_VAR_SEC_CFG,
+ HW_VAR_BCN_VALID,
+ HW_VAR_RF_TYPE,
+ HW_VAR_DM_FLAG,
+ HW_VAR_DM_FUNC_OP,
+ HW_VAR_DM_FUNC_SET,
+ HW_VAR_DM_FUNC_CLR,
+ HW_VAR_CAM_EMPTY_ENTRY,
+ HW_VAR_CAM_INVALID_ALL,
+ HW_VAR_CAM_WRITE,
+ HW_VAR_CAM_READ,
+ HW_VAR_AC_PARAM_VO,
+ HW_VAR_AC_PARAM_VI,
+ HW_VAR_AC_PARAM_BE,
+ HW_VAR_AC_PARAM_BK,
+ HW_VAR_ACM_CTRL,
+ HW_VAR_AMPDU_MIN_SPACE,
+ HW_VAR_AMPDU_FACTOR,
+ HW_VAR_RXDMA_AGG_PG_TH,
+ HW_VAR_SET_RPWM,
+ HW_VAR_H2C_FW_PWRMODE,
+ HW_VAR_H2C_FW_JOINBSSRPT,
+ HW_VAR_FWLPS_RF_ON,
+ HW_VAR_H2C_FW_P2P_PS_OFFLOAD,
+ HW_VAR_TDLS_WRCR,
+ HW_VAR_TDLS_INIT_CH_SEN,
+ HW_VAR_TDLS_RS_RCR,
+ HW_VAR_TDLS_DONE_CH_SEN,
+ HW_VAR_INITIAL_GAIN,
+ HW_VAR_TRIGGER_GPIO_0,
+ HW_VAR_BT_SET_COEXIST,
+ HW_VAR_BT_ISSUE_DELBA,
+ HW_VAR_CURRENT_ANTENNA,
+ HW_VAR_ANTENNA_DIVERSITY_LINK,
+ HW_VAR_ANTENNA_DIVERSITY_SELECT,
+ HW_VAR_SWITCH_EPHY_WoWLAN,
+ HW_VAR_EFUSE_USAGE,
+ HW_VAR_EFUSE_BYTES,
+ HW_VAR_EFUSE_BT_USAGE,
+ HW_VAR_EFUSE_BT_BYTES,
+ HW_VAR_FIFO_CLEARN_UP,
+ HW_VAR_CHECK_TXBUF,
+ HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation,
+ * power control for MAC only */
+ /* The valid upper nav range for the HW updating, if the true value is
+ * larger than the upper range, the HW won't update it. */
+ /* Unit in microsecond. 0 means disable this function. */
+ HW_VAR_NAV_UPPER,
+ HW_VAR_RPT_TIMER_SETTING,
+ HW_VAR_TX_RPT_MAX_MACID,
+ HW_VAR_H2C_MEDIA_STATUS_RPT,
+ HW_VAR_CHK_HI_QUEUE_EMPTY,
+};
+
+enum hal_def_variable {
+ HAL_DEF_UNDERCORATEDSMOOTHEDPWDB,
+ HAL_DEF_IS_SUPPORT_ANT_DIV,
+ HAL_DEF_CURRENT_ANTENNA,
+ HAL_DEF_DRVINFO_SZ,
+ HAL_DEF_MAX_RECVBUF_SZ,
+ HAL_DEF_RX_PACKET_OFFSET,
+ HAL_DEF_DBG_DUMP_RXPKT,/* for dbg */
+ HAL_DEF_DBG_DM_FUNC,/* for dbg */
+ HAL_DEF_RA_DECISION_RATE,
+ HAL_DEF_RA_SGI,
+ HAL_DEF_PT_PWR_STATUS,
+ HW_VAR_MAX_RX_AMPDU_FACTOR,
+ HW_DEF_RA_INFO_DUMP,
+ HAL_DEF_DBG_DUMP_TXPKT,
+ HW_DEF_FA_CNT_DUMP,
+ HW_DEF_ODM_DBG_FLAG,
+};
+
+enum hal_odm_variable {
+ HAL_ODM_STA_INFO,
+ HAL_ODM_P2P_STATE,
+ HAL_ODM_WIFI_DISPLAY_STATE,
+};
+
+enum hal_intf_ps_func {
+ HAL_USB_SELECT_SUSPEND,
+ HAL_MAX_ID,
+};
+
+typedef s32 (*c2h_id_filter)(u8 id);
+
+struct hal_ops {
+ u32 (*hal_power_on)(struct adapter *padapter);
+ u32 (*hal_init)(struct adapter *padapter);
+ u32 (*hal_deinit)(struct adapter *padapter);
+
+ void (*free_hal_data)(struct adapter *padapter);
+
+ u32 (*inirp_init)(struct adapter *padapter);
+ u32 (*inirp_deinit)(struct adapter *padapter);
+
+ s32 (*init_xmit_priv)(struct adapter *padapter);
+ void (*free_xmit_priv)(struct adapter *padapter);
+
+ s32 (*init_recv_priv)(struct adapter *padapter);
+ void (*free_recv_priv)(struct adapter *padapter);
+
+ void (*InitSwLeds)(struct adapter *padapter);
+ void (*DeInitSwLeds)(struct adapter *padapter);
+
+ void (*dm_init)(struct adapter *padapter);
+ void (*dm_deinit)(struct adapter *padapter);
+ void (*read_chip_version)(struct adapter *padapter);
+
+ void (*init_default_value)(struct adapter *padapter);
+
+ void (*intf_chip_configure)(struct adapter *padapter);
+
+ void (*read_adapter_info)(struct adapter *padapter);
+
+ void (*enable_interrupt)(struct adapter *padapter);
+ void (*disable_interrupt)(struct adapter *padapter);
+ s32 (*interrupt_handler)(struct adapter *padapter);
+
+ void (*set_bwmode_handler)(struct adapter *padapter,
+ enum ht_channel_width Bandwidth,
+ u8 Offset);
+ void (*set_channel_handler)(struct adapter *padapter, u8 channel);
+
+ void (*hal_dm_watchdog)(struct adapter *padapter);
+
+ void (*SetHwRegHandler)(struct adapter *padapter, u8 variable,
+ u8 *val);
+ void (*GetHwRegHandler)(struct adapter *padapter, u8 variable,
+ u8 *val);
+
+ u8 (*GetHalDefVarHandler)(struct adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+ u8 (*SetHalDefVarHandler)(struct adapter *padapter,
+ enum hal_def_variable eVariable,
+ void *pValue);
+
+ void (*GetHalODMVarHandler)(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+ void (*SetHalODMVarHandler)(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+ void (*UpdateRAMaskHandler)(struct adapter *padapter,
+ u32 mac_id, u8 rssi_level);
+ void (*SetBeaconRelatedRegistersHandler)(struct adapter *padapter);
+
+ void (*Add_RateATid)(struct adapter *adapter, u32 bitmap, u8 arg,
+ u8 rssi_level);
+ void (*run_thread)(struct adapter *adapter);
+ void (*cancel_thread)(struct adapter *adapter);
+
+ u8 (*AntDivBeforeLinkHandler)(struct adapter *adapter);
+ void (*AntDivCompareHandler)(struct adapter *adapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+ u8 (*interface_ps_func)(struct adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+ s32 (*hal_xmit)(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+ s32 (*mgnt_xmit)(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+ u32 (*read_bbreg)(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask);
+ void (*write_bbreg)(struct adapter *padapter, u32 RegAddr,
+ u32 BitMask, u32 Data);
+ u32 (*read_rfreg)(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask);
+ void (*write_rfreg)(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+ void (*EfusePowerSwitch)(struct adapter *padapter, u8 bWrite,
+ u8 PwrState);
+ void (*ReadEFuse)(struct adapter *padapter, u8 efuseType, u16 _offset,
+ u16 _size_byte, u8 *pbuf, bool bPseudoTest);
+ void (*EFUSEGetEfuseDefinition)(struct adapter *padapter, u8 efuseType,
+ u8 type, void *pOut, bool bPseudoTest);
+ u16 (*EfuseGetCurrentSize)(struct adapter *padapter, u8 efuseType,
+ bool bPseudoTest);
+ int (*Efuse_PgPacketRead)(struct adapter *adapter, u8 offset,
+ u8 *data, bool bPseudoTest);
+ int (*Efuse_PgPacketWrite)(struct adapter *padapter, u8 offset,
+ u8 word_en, u8 *data, bool bPseudoTest);
+ u8 (*Efuse_WordEnableDataWrite)(struct adapter *padapter,
+ u16 efuse_addr, u8 word_en,
+ u8 *data, bool bPseudoTest);
+ bool (*Efuse_PgPacketWrite_BT)(struct adapter *padapter, u8 offset,
+ u8 word_en, u8 *data, bool test);
+
+ void (*sreset_init_value)(struct adapter *padapter);
+ void (*sreset_reset_value)(struct adapter *padapter);
+ void (*silentreset)(struct adapter *padapter);
+ void (*sreset_xmit_status_check)(struct adapter *padapter);
+ void (*sreset_linked_status_check) (struct adapter *padapter);
+ u8 (*sreset_get_wifi_status)(struct adapter *padapter);
+
+ int (*IOL_exec_cmds_sync)(struct adapter *padapter,
+ struct xmit_frame *frame, u32 max_wait,
+ u32 bndy_cnt);
+
+ void (*hal_notch_filter)(struct adapter *adapter, bool enable);
+ void (*hal_reset_security_engine)(struct adapter *adapter);
+ s32 (*c2h_handler)(struct adapter *padapter,
+ struct c2h_evt_hdr *c2h_evt);
+ c2h_id_filter c2h_id_filter_ccx;
+};
+
+enum rt_eeprom_type {
+ EEPROM_93C46,
+ EEPROM_93C56,
+ EEPROM_BOOT_EFUSE,
+};
+
+#define RF_CHANGE_BY_INIT 0
+#define RF_CHANGE_BY_IPS BIT28
+#define RF_CHANGE_BY_PS BIT29
+#define RF_CHANGE_BY_HW BIT30
+#define RF_CHANGE_BY_SW BIT31
+
+enum hardware_type {
+ HARDWARE_TYPE_RTL8180,
+ HARDWARE_TYPE_RTL8185,
+ HARDWARE_TYPE_RTL8187,
+ HARDWARE_TYPE_RTL8188,
+ HARDWARE_TYPE_RTL8190P,
+ HARDWARE_TYPE_RTL8192E,
+ HARDWARE_TYPE_RTL819xU,
+ HARDWARE_TYPE_RTL8192SE,
+ HARDWARE_TYPE_RTL8192SU,
+ HARDWARE_TYPE_RTL8192CE,
+ HARDWARE_TYPE_RTL8192CU,
+ HARDWARE_TYPE_RTL8192DE,
+ HARDWARE_TYPE_RTL8192DU,
+ HARDWARE_TYPE_RTL8723AE,
+ HARDWARE_TYPE_RTL8723AU,
+ HARDWARE_TYPE_RTL8723AS,
+ HARDWARE_TYPE_RTL8188EE,
+ HARDWARE_TYPE_RTL8188EU,
+ HARDWARE_TYPE_RTL8188ES,
+ HARDWARE_TYPE_MAX,
+};
+
+/* RTL8188E Series */
+#define IS_HARDWARE_TYPE_8188EE(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EE)
+#define IS_HARDWARE_TYPE_8188EU(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EU)
+#define IS_HARDWARE_TYPE_8188ES(_Adapter) \
+(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188ES)
+#define IS_HARDWARE_TYPE_8188E(_Adapter) \
+(IS_HARDWARE_TYPE_8188EE(_Adapter) || IS_HARDWARE_TYPE_8188EU(_Adapter) || \
+ IS_HARDWARE_TYPE_8188ES(_Adapter))
+
+#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
+
+#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
+
+void rtw_hal_def_value_init(struct adapter *padapter);
+
+void rtw_hal_free_data(struct adapter *padapter);
+
+void rtw_hal_dm_init(struct adapter *padapter);
+void rtw_hal_dm_deinit(struct adapter *padapter);
+void rtw_hal_sw_led_init(struct adapter *padapter);
+void rtw_hal_sw_led_deinit(struct adapter *padapter);
+
+u32 rtw_hal_power_on(struct adapter *padapter);
+uint rtw_hal_init(struct adapter *padapter);
+uint rtw_hal_deinit(struct adapter *padapter);
+void rtw_hal_stop(struct adapter *padapter);
+void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+
+void rtw_hal_chip_configure(struct adapter *padapter);
+void rtw_hal_read_chip_info(struct adapter *padapter);
+void rtw_hal_read_chip_version(struct adapter *padapter);
+
+u8 rtw_hal_set_def_var(struct adapter *padapter,
+ enum hal_def_variable eVariable, void *pValue);
+u8 rtw_hal_get_def_var(struct adapter *padapter,
+ enum hal_def_variable eVariable, void *pValue);
+
+void rtw_hal_set_odm_var(struct adapter *padapter,
+ enum hal_odm_variable eVariable, void *pValue1,
+ bool bSet);
+void rtw_hal_get_odm_var(struct adapter *padapter,
+ enum hal_odm_variable eVariable,
+ void *pValue1, bool bSet);
+
+void rtw_hal_enable_interrupt(struct adapter *padapter);
+void rtw_hal_disable_interrupt(struct adapter *padapter);
+
+u32 rtw_hal_inirp_init(struct adapter *padapter);
+u32 rtw_hal_inirp_deinit(struct adapter *padapter);
+
+u8 rtw_hal_intf_ps_func(struct adapter *padapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+
+s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
+s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+s32 rtw_hal_init_xmit_priv(struct adapter *padapter);
+void rtw_hal_free_xmit_priv(struct adapter *padapter);
+
+s32 rtw_hal_init_recv_priv(struct adapter *padapter);
+void rtw_hal_free_recv_priv(struct adapter *padapter);
+
+void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
+void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level);
+void rtw_hal_clone_data(struct adapter *dst_adapt,
+ struct adapter *src_adapt);
+void rtw_hal_start_thread(struct adapter *padapter);
+void rtw_hal_stop_thread(struct adapter *padapter);
+
+void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
+
+u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask);
+void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask,
+ u32 Data);
+u32 rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
+ u32 RegAddr, u32 BitMask);
+void rtw_hal_write_rfreg(struct adapter *padapter,
+ enum rf_radio_path eRFPath, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+s32 rtw_hal_interrupt_handler(struct adapter *padapter);
+
+void rtw_hal_set_bwmode(struct adapter *padapter,
+ enum ht_channel_width Bandwidth, u8 Offset);
+void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
+void rtw_hal_dm_watchdog(struct adapter *padapter);
+
+u8 rtw_hal_antdiv_before_linked(struct adapter *padapter);
+void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
+ struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+
+void rtw_hal_sreset_init(struct adapter *padapter);
+void rtw_hal_sreset_reset(struct adapter *padapter);
+void rtw_hal_sreset_reset_value(struct adapter *padapter);
+void rtw_hal_sreset_xmit_status_check(struct adapter *padapter);
+void rtw_hal_sreset_linked_status_check(struct adapter *padapter);
+u8 rtw_hal_sreset_get_wifi_status(struct adapter *padapter);
+
+int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
+ u32 max_wating_ms, u32 bndy_cnt);
+
+void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
+void rtw_hal_reset_security_engine(struct adapter *adapter);
+
+s32 rtw_hal_c2h_handler(struct adapter *adapter,
+ struct c2h_evt_hdr *c2h_evt);
+c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+u8 rtw_do_join(struct adapter *padapter);
+
+#endif /* __HAL_INTF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
new file mode 100644
index 00000000000..cd37ea4df4c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -0,0 +1,1274 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_H
+#define __IEEE80211_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include "wifi.h"
+#include <linux/wireless.h>
+
+#define MGMT_QUEUE_NUM 5
+
+#define ETH_ALEN 6
+#define ETH_TYPE_LEN 2
+#define PAYLOAD_TYPE_LEN 1
+
+#ifdef CONFIG_88EU_AP_MODE
+
+#define RTL_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 28)
+
+/* RTL871X_IOCTL_HOSTAPD ioctl() cmd: */
+enum {
+ RTL871X_HOSTAPD_FLUSH = 1,
+ RTL871X_HOSTAPD_ADD_STA = 2,
+ RTL871X_HOSTAPD_REMOVE_STA = 3,
+ RTL871X_HOSTAPD_GET_INFO_STA = 4,
+ /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
+ RTL871X_HOSTAPD_GET_WPAIE_STA = 5,
+ RTL871X_SET_ENCRYPTION = 6,
+ RTL871X_GET_ENCRYPTION = 7,
+ RTL871X_HOSTAPD_SET_FLAGS_STA = 8,
+ RTL871X_HOSTAPD_GET_RID = 9,
+ RTL871X_HOSTAPD_SET_RID = 10,
+ RTL871X_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
+ RTL871X_HOSTAPD_SET_GENERIC_ELEMENT = 12,
+ RTL871X_HOSTAPD_MLME = 13,
+ RTL871X_HOSTAPD_SCAN_REQ = 14,
+ RTL871X_HOSTAPD_STA_CLEAR_STATS = 15,
+ RTL871X_HOSTAPD_SET_BEACON = 16,
+ RTL871X_HOSTAPD_SET_WPS_BEACON = 17,
+ RTL871X_HOSTAPD_SET_WPS_PROBE_RESP = 18,
+ RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP = 19,
+ RTL871X_HOSTAPD_SET_HIDDEN_SSID = 20,
+ RTL871X_HOSTAPD_SET_MACADDR_ACL = 21,
+ RTL871X_HOSTAPD_ACL_ADD_STA = 22,
+ RTL871X_HOSTAPD_ACL_REMOVE_STA = 23,
+};
+
+/* STA flags */
+#define WLAN_STA_AUTH BIT(0)
+#define WLAN_STA_ASSOC BIT(1)
+#define WLAN_STA_PS BIT(2)
+#define WLAN_STA_TIM BIT(3)
+#define WLAN_STA_PERM BIT(4)
+#define WLAN_STA_AUTHORIZED BIT(5)
+#define WLAN_STA_PENDING_POLL BIT(6) /* pending activity poll not ACKed */
+#define WLAN_STA_SHORT_PREAMBLE BIT(7)
+#define WLAN_STA_PREAUTH BIT(8)
+#define WLAN_STA_WME BIT(9)
+#define WLAN_STA_MFP BIT(10)
+#define WLAN_STA_HT BIT(11)
+#define WLAN_STA_WPS BIT(12)
+#define WLAN_STA_MAYBE_WPS BIT(13)
+#define WLAN_STA_NONERP BIT(31)
+
+#endif
+
+#define IEEE_CMD_SET_WPA_PARAM 1
+#define IEEE_CMD_SET_WPA_IE 2
+#define IEEE_CMD_SET_ENCRYPTION 3
+#define IEEE_CMD_MLME 4
+
+#define IEEE_PARAM_WPA_ENABLED 1
+#define IEEE_PARAM_TKIP_COUNTERMEASURES 2
+#define IEEE_PARAM_DROP_UNENCRYPTED 3
+#define IEEE_PARAM_PRIVACY_INVOKED 4
+#define IEEE_PARAM_AUTH_ALGS 5
+#define IEEE_PARAM_IEEE_802_1X 6
+#define IEEE_PARAM_WPAX_SELECT 7
+
+#define AUTH_ALG_OPEN_SYSTEM 0x1
+#define AUTH_ALG_SHARED_KEY 0x2
+#define AUTH_ALG_LEAP 0x00000004
+
+#define IEEE_MLME_STA_DEAUTH 1
+#define IEEE_MLME_STA_DISASSOC 2
+
+#define IEEE_CRYPT_ERR_UNKNOWN_ALG 2
+#define IEEE_CRYPT_ERR_UNKNOWN_ADDR 3
+#define IEEE_CRYPT_ERR_CRYPT_INIT_FAILED 4
+#define IEEE_CRYPT_ERR_KEY_SET_FAILED 5
+#define IEEE_CRYPT_ERR_TX_KEY_SET_FAILED 6
+#define IEEE_CRYPT_ERR_CARD_CONF_FAILED 7
+
+
+#define IEEE_CRYPT_ALG_NAME_LEN 16
+
+#define WPA_CIPHER_NONE BIT(0)
+#define WPA_CIPHER_WEP40 BIT(1)
+#define WPA_CIPHER_WEP104 BIT(2)
+#define WPA_CIPHER_TKIP BIT(3)
+#define WPA_CIPHER_CCMP BIT(4)
+
+
+
+#define WPA_SELECTOR_LEN 4
+extern u8 RTW_WPA_OUI_TYPE[];
+extern u16 RTW_WPA_VERSION;
+extern u8 WPA_AUTH_KEY_MGMT_NONE[];
+extern u8 WPA_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 WPA_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 WPA_CIPHER_SUITE_NONE[];
+extern u8 WPA_CIPHER_SUITE_WEP40[];
+extern u8 WPA_CIPHER_SUITE_TKIP[];
+extern u8 WPA_CIPHER_SUITE_WRAP[];
+extern u8 WPA_CIPHER_SUITE_CCMP[];
+extern u8 WPA_CIPHER_SUITE_WEP104[];
+
+
+#define RSN_HEADER_LEN 4
+#define RSN_SELECTOR_LEN 4
+
+extern u16 RSN_VERSION_BSD;
+extern u8 RSN_AUTH_KEY_MGMT_UNSPEC_802_1X[];
+extern u8 RSN_AUTH_KEY_MGMT_PSK_OVER_802_1X[];
+extern u8 RSN_CIPHER_SUITE_NONE[];
+extern u8 RSN_CIPHER_SUITE_WEP40[];
+extern u8 RSN_CIPHER_SUITE_TKIP[];
+extern u8 RSN_CIPHER_SUITE_WRAP[];
+extern u8 RSN_CIPHER_SUITE_CCMP[];
+extern u8 RSN_CIPHER_SUITE_WEP104[];
+
+enum ratr_table_mode {
+ RATR_INX_WIRELESS_NGB = 0, /* BGN 40 Mhz 2SS 1SS */
+ RATR_INX_WIRELESS_NG = 1, /* GN or N */
+ RATR_INX_WIRELESS_NB = 2, /* BGN 20 Mhz 2SS 1SS or BN */
+ RATR_INX_WIRELESS_N = 3,
+ RATR_INX_WIRELESS_GB = 4,
+ RATR_INX_WIRELESS_G = 5,
+ RATR_INX_WIRELESS_B = 6,
+ RATR_INX_WIRELESS_MC = 7,
+ RATR_INX_WIRELESS_AC_N = 8,
+};
+
+enum NETWORK_TYPE {
+ WIRELESS_INVALID = 0,
+ /* Sub-Element */
+ WIRELESS_11B = BIT(0), /* tx:cck only, rx:cck only, hw: cck */
+ WIRELESS_11G = BIT(1), /* tx:ofdm only, rx:ofdm & cck, hw:cck & ofdm*/
+ WIRELESS_11A = BIT(2), /* tx:ofdm only, rx: ofdm only, hw:ofdm only */
+ WIRELESS_11_24N = BIT(3), /* tx:MCS only, rx:MCS & cck, hw:MCS & cck */
+ WIRELESS_11_5N = BIT(4), /* tx:MCS only, rx:MCS & ofdm, hw:ofdm only */
+ WIRELESS_AC = BIT(6),
+
+ /* Combination */
+ /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */
+ WIRELESS_11BG = (WIRELESS_11B | WIRELESS_11G),
+ /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */
+ WIRELESS_11G_24N = (WIRELESS_11G | WIRELESS_11_24N),
+ /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11A_5N = (WIRELESS_11A | WIRELESS_11_5N),
+ /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */
+ WIRELESS_11BG_24N = (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N),
+ /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
+ WIRELESS_11AGN = (WIRELESS_11A | WIRELESS_11G | WIRELESS_11_24N |
+ WIRELESS_11_5N),
+ WIRELESS_11ABGN = (WIRELESS_11A | WIRELESS_11B | WIRELESS_11G |
+ WIRELESS_11_24N | WIRELESS_11_5N),
+};
+
+#define SUPPORTED_24G_NETTYPE_MSK \
+ (WIRELESS_11B | WIRELESS_11G | WIRELESS_11_24N)
+#define SUPPORTED_5G_NETTYPE_MSK \
+ (WIRELESS_11A | WIRELESS_11_5N)
+
+#define IsSupported24G(NetType) \
+ ((NetType) & SUPPORTED_24G_NETTYPE_MSK ? true : false)
+#define IsSupported5G(NetType) \
+ ((NetType) & SUPPORTED_5G_NETTYPE_MSK ? true : false)
+
+#define IsEnableHWCCK(NetType) \
+ IsSupported24G(NetType)
+#define IsEnableHWOFDM(NetType) \
+ ((NetType) & (WIRELESS_11G | WIRELESS_11_24N | \
+ SUPPORTED_5G_NETTYPE_MSK) ? true : false)
+
+#define IsSupportedRxCCK(NetType) IsEnableHWCCK(NetType)
+#define IsSupportedRxOFDM(NetType) IsEnableHWOFDM(NetType)
+#define IsSupportedRxMCS(NetType) IsEnableHWOFDM(NetType)
+
+#define IsSupportedTxCCK(NetType) \
+ ((NetType) & (WIRELESS_11B) ? true : false)
+#define IsSupportedTxOFDM(NetType) \
+ ((NetType) & (WIRELESS_11G|WIRELESS_11A) ? true : false)
+#define IsSupportedTxMCS(NetType) \
+ ((NetType) & (WIRELESS_11_24N|WIRELESS_11_5N) ? true : false)
+
+
+struct ieee_param {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ union {
+ struct {
+ u8 name;
+ u32 value;
+ } wpa_param;
+ struct {
+ u32 len;
+ u8 reserved[32];
+ u8 data[0];
+ } wpa_ie;
+ struct {
+ int command;
+ int reason_code;
+ } mlme;
+ struct {
+ u8 alg[IEEE_CRYPT_ALG_NAME_LEN];
+ u8 set_tx;
+ u32 err;
+ u8 idx;
+ u8 seq[8]; /* sequence counter (set: RX, get: TX) */
+ u16 key_len;
+ u8 key[0];
+ } crypt;
+#ifdef CONFIG_88EU_AP_MODE
+ struct {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u8 tx_supp_rates[16];
+ struct rtw_ieee80211_ht_cap ht_cap;
+ } add_sta;
+ struct {
+ u8 reserved[2];/* for set max_num_sta */
+ u8 buf[0];
+ } bcn_ie;
+#endif
+
+ } u;
+};
+
+#ifdef CONFIG_88EU_AP_MODE
+struct ieee_param_ex {
+ u32 cmd;
+ u8 sta_addr[ETH_ALEN];
+ u8 data[0];
+};
+
+struct sta_data {
+ u16 aid;
+ u16 capability;
+ int flags;
+ u32 sta_set;
+ u8 tx_supp_rates[16];
+ u32 tx_supp_rates_len;
+ struct rtw_ieee80211_ht_cap ht_cap;
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+#endif
+
+#define IEEE80211_DATA_LEN 2304
+/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
+ 6.2.1.1.2.
+
+ The figure in section 7.1.2 suggests a body size of up to 2312
+ bytes is allowed, which is a bit confusing, I suspect this
+ represents the 2304 bytes of real data, plus a possible 8 bytes of
+ WEP IV and ICV. (this interpretation suggested by Ramiro Barreiro) */
+
+
+#define IEEE80211_HLEN 30
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+
+/* this is stolen from ipw2200 driver */
+#define IEEE_IBSS_MAC_HASH_SIZE 31
+
+struct ieee_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct rtw_ieee80211_hdr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+} __packed;
+
+struct rtw_ieee80211_hdr_3addr {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+} __packed;
+
+struct rtw_ieee80211_hdr_qos {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u8 addr4[ETH_ALEN];
+ u16 qc;
+} __packed;
+
+struct rtw_ieee80211_hdr_3addr_qos {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
+ u16 seq_ctl;
+ u16 qc;
+} __packed;
+
+struct eapol {
+ u8 snap[6];
+ u16 ethertype;
+ u8 version;
+ u8 type;
+ u16 length;
+} __packed;
+
+enum eap_type {
+ EAP_PACKET = 0,
+ EAPOL_START,
+ EAPOL_LOGOFF,
+ EAPOL_KEY,
+ EAPOL_ENCAP_ASF_ALERT
+};
+
+#define IEEE80211_3ADDR_LEN 24
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_FCS_LEN 4
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* Frame control field constants */
+#define RTW_IEEE80211_FCTL_VERS 0x0003
+#define RTW_IEEE80211_FCTL_FTYPE 0x000c
+#define RTW_IEEE80211_FCTL_STYPE 0x00f0
+#define RTW_IEEE80211_FCTL_TODS 0x0100
+#define RTW_IEEE80211_FCTL_FROMDS 0x0200
+#define RTW_IEEE80211_FCTL_MOREFRAGS 0x0400
+#define RTW_IEEE80211_FCTL_RETRY 0x0800
+#define RTW_IEEE80211_FCTL_PM 0x1000
+#define RTW_IEEE80211_FCTL_MOREDATA 0x2000
+#define RTW_IEEE80211_FCTL_PROTECTED 0x4000
+#define RTW_IEEE80211_FCTL_ORDER 0x8000
+#define RTW_IEEE80211_FCTL_CTL_EXT 0x0f00
+
+#define RTW_IEEE80211_FTYPE_MGMT 0x0000
+#define RTW_IEEE80211_FTYPE_CTL 0x0004
+#define RTW_IEEE80211_FTYPE_DATA 0x0008
+#define RTW_IEEE80211_FTYPE_EXT 0x000c
+
+/* management */
+#define RTW_IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define RTW_IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define RTW_IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define RTW_IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define RTW_IEEE80211_STYPE_PROBE_REQ 0x0040
+#define RTW_IEEE80211_STYPE_PROBE_RESP 0x0050
+#define RTW_IEEE80211_STYPE_BEACON 0x0080
+#define RTW_IEEE80211_STYPE_ATIM 0x0090
+#define RTW_IEEE80211_STYPE_DISASSOC 0x00A0
+#define RTW_IEEE80211_STYPE_AUTH 0x00B0
+#define RTW_IEEE80211_STYPE_DEAUTH 0x00C0
+#define RTW_IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define RTW_IEEE80211_STYPE_CTL_EXT 0x0060
+#define RTW_IEEE80211_STYPE_BACK_REQ 0x0080
+#define RTW_IEEE80211_STYPE_BACK 0x0090
+#define RTW_IEEE80211_STYPE_PSPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_RTS 0x00B0
+#define RTW_IEEE80211_STYPE_CTS 0x00C0
+#define RTW_IEEE80211_STYPE_ACK 0x00D0
+#define RTW_IEEE80211_STYPE_CFEND 0x00E0
+#define RTW_IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define RTW_IEEE80211_STYPE_DATA 0x0000
+#define RTW_IEEE80211_STYPE_DATA_CFACK 0x0010
+#define RTW_IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define RTW_IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define RTW_IEEE80211_STYPE_NULLFUNC 0x0040
+#define RTW_IEEE80211_STYPE_CFACK 0x0050
+#define RTW_IEEE80211_STYPE_CFPOLL 0x0060
+#define RTW_IEEE80211_STYPE_CFACKPOLL 0x0070
+#define RTW_IEEE80211_STYPE_QOS_DATA 0x0080
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACK 0x0090
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFPOLL 0x00A0
+#define RTW_IEEE80211_STYPE_QOS_DATA_CFACKPOLL 0x00B0
+#define RTW_IEEE80211_STYPE_QOS_NULLFUNC 0x00C0
+#define RTW_IEEE80211_STYPE_QOS_CFACK 0x00D0
+#define RTW_IEEE80211_STYPE_QOS_CFPOLL 0x00E0
+#define RTW_IEEE80211_STYPE_QOS_CFACKPOLL 0x00F0
+
+/* sequence control field */
+#define RTW_IEEE80211_SCTL_FRAG 0x000F
+#define RTW_IEEE80211_SCTL_SEQ 0xFFF0
+
+
+#define RTW_ERP_INFO_NON_ERP_PRESENT BIT(0)
+#define RTW_ERP_INFO_USE_PROTECTION BIT(1)
+#define RTW_ERP_INFO_BARKER_PREAMBLE_MODE BIT(2)
+
+/* QoS, QOS */
+#define NORMAL_ACK 0
+#define NO_ACK 1
+#define NON_EXPLICIT_ACK 2
+#define BLOCK_ACK 3
+
+#ifndef ETH_P_PAE
+#define ETH_P_PAE 0x888E /* Port Access Entity (IEEE 802.1X) */
+#endif /* ETH_P_PAE */
+
+#define ETH_P_PREAUTH 0x88C7 /* IEEE 802.11i pre-authentication */
+
+#define ETH_P_ECONET 0x0018
+
+#ifndef ETH_P_80211_RAW
+#define ETH_P_80211_RAW (ETH_P_ECONET + 1)
+#endif
+
+/* IEEE 802.11 defines */
+
+#define P80211_OUI_LEN 3
+
+struct ieee80211_snap_hdr {
+ u8 dsap; /* always 0xAA */
+ u8 ssap; /* always 0xAA */
+ u8 ctrl; /* always 0x03 */
+ u8 oui[P80211_OUI_LEN]; /* organizational universal id */
+} __packed;
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define WLAN_FC_GET_TYPE(fc) ((fc) & RTW_IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc) & RTW_IEEE80211_FCTL_STYPE)
+
+#define WLAN_QC_GET_TID(qc) ((qc) & 0x0f)
+
+#define WLAN_GET_SEQ_FRAG(seq) ((seq) & RTW_IEEE80211_SCTL_FRAG)
+#define WLAN_GET_SEQ_SEQ(seq) ((seq) & RTW_IEEE80211_SCTL_SEQ)
+
+/* Authentication algorithms */
+#define WLAN_AUTH_OPEN 0
+#define WLAN_AUTH_SHARED_KEY 1
+
+#define WLAN_AUTH_CHALLENGE_LEN 128
+
+#define WLAN_CAPABILITY_BSS (1<<0)
+#define WLAN_CAPABILITY_IBSS (1<<1)
+#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)
+#define WLAN_CAPABILITY_CF_POLL_REQUEST (1<<3)
+#define WLAN_CAPABILITY_PRIVACY (1<<4)
+#define WLAN_CAPABILITY_SHORT_PREAMBLE (1<<5)
+#define WLAN_CAPABILITY_PBCC (1<<6)
+#define WLAN_CAPABILITY_CHANNEL_AGILITY (1<<7)
+#define WLAN_CAPABILITY_SHORT_SLOT (1<<10)
+
+/* Status codes */
+#define WLAN_STATUS_SUCCESS 0
+#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
+#define WLAN_STATUS_CAPS_UNSUPPORTED 10
+#define WLAN_STATUS_REASSOC_NO_ASSOC 11
+#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
+#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
+#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
+#define WLAN_STATUS_CHALLENGE_FAIL 15
+#define WLAN_STATUS_AUTH_TIMEOUT 16
+#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
+#define WLAN_STATUS_ASSOC_DENIED_RATES 18
+/* 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+
+/* Reason codes */
+#define WLAN_REASON_UNSPECIFIED 1
+#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
+#define WLAN_REASON_DEAUTH_LEAVING 3
+#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
+#define WLAN_REASON_DISASSOC_AP_BUSY 5
+#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
+#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
+#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
+#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9
+#define WLAN_REASON_JOIN_WRONG_CHANNEL 65534
+#define WLAN_REASON_EXPIRATION_CHK 65535
+
+/* Information Element IDs */
+#define WLAN_EID_SSID 0
+#define WLAN_EID_SUPP_RATES 1
+#define WLAN_EID_FH_PARAMS 2
+#define WLAN_EID_DS_PARAMS 3
+#define WLAN_EID_CF_PARAMS 4
+#define WLAN_EID_TIM 5
+#define WLAN_EID_IBSS_PARAMS 6
+#define WLAN_EID_CHALLENGE 16
+/* EIDs defined by IEEE 802.11h - START */
+#define WLAN_EID_PWR_CONSTRAINT 32
+#define WLAN_EID_PWR_CAPABILITY 33
+#define WLAN_EID_TPC_REQUEST 34
+#define WLAN_EID_TPC_REPORT 35
+#define WLAN_EID_SUPPORTED_CHANNELS 36
+#define WLAN_EID_CHANNEL_SWITCH 37
+#define WLAN_EID_MEASURE_REQUEST 38
+#define WLAN_EID_MEASURE_REPORT 39
+#define WLAN_EID_QUITE 40
+#define WLAN_EID_IBSS_DFS 41
+/* EIDs defined by IEEE 802.11h - END */
+#define WLAN_EID_ERP_INFO 42
+#define WLAN_EID_HT_CAP 45
+#define WLAN_EID_RSN 48
+#define WLAN_EID_EXT_SUPP_RATES 50
+#define WLAN_EID_MOBILITY_DOMAIN 54
+#define WLAN_EID_FAST_BSS_TRANSITION 55
+#define WLAN_EID_TIMEOUT_INTERVAL 56
+#define WLAN_EID_RIC_DATA 57
+#define WLAN_EID_HT_OPERATION 61
+#define WLAN_EID_SECONDARY_CHANNEL_OFFSET 62
+#define WLAN_EID_20_40_BSS_COEXISTENCE 72
+#define WLAN_EID_20_40_BSS_INTOLERANT 73
+#define WLAN_EID_OVERLAPPING_BSS_SCAN_PARAMS 74
+#define WLAN_EID_MMIE 76
+#define WLAN_EID_VENDOR_SPECIFIC 221
+#define WLAN_EID_GENERIC (WLAN_EID_VENDOR_SPECIFIC)
+
+#define IEEE80211_MGMT_HDR_LEN 24
+#define IEEE80211_DATA_HDR3_LEN 24
+#define IEEE80211_DATA_HDR4_LEN 30
+
+
+#define IEEE80211_STATMASK_SIGNAL (1<<0)
+#define IEEE80211_STATMASK_RSSI (1<<1)
+#define IEEE80211_STATMASK_NOISE (1<<2)
+#define IEEE80211_STATMASK_RATE (1<<3)
+#define IEEE80211_STATMASK_WEMASK 0x7
+
+
+#define IEEE80211_CCK_MODULATION (1<<0)
+#define IEEE80211_OFDM_MODULATION (1<<1)
+
+#define IEEE80211_24GHZ_BAND (1<<0)
+#define IEEE80211_52GHZ_BAND (1<<1)
+
+#define IEEE80211_CCK_RATE_LEN 4
+#define IEEE80211_NUM_OFDM_RATESLEN 8
+
+
+#define IEEE80211_CCK_RATE_1MB 0x02
+#define IEEE80211_CCK_RATE_2MB 0x04
+#define IEEE80211_CCK_RATE_5MB 0x0B
+#define IEEE80211_CCK_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_LEN 8
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define IEEE80211_CCK_RATE_1MB_MASK (1<<0)
+#define IEEE80211_CCK_RATE_2MB_MASK (1<<1)
+#define IEEE80211_CCK_RATE_5MB_MASK (1<<2)
+#define IEEE80211_CCK_RATE_11MB_MASK (1<<3)
+#define IEEE80211_OFDM_RATE_6MB_MASK (1<<4)
+#define IEEE80211_OFDM_RATE_9MB_MASK (1<<5)
+#define IEEE80211_OFDM_RATE_12MB_MASK (1<<6)
+#define IEEE80211_OFDM_RATE_18MB_MASK (1<<7)
+#define IEEE80211_OFDM_RATE_24MB_MASK (1<<8)
+#define IEEE80211_OFDM_RATE_36MB_MASK (1<<9)
+#define IEEE80211_OFDM_RATE_48MB_MASK (1<<10)
+#define IEEE80211_OFDM_RATE_54MB_MASK (1<<11)
+
+#define IEEE80211_CCK_RATES_MASK 0x0000000F
+#define IEEE80211_CCK_BASIC_RATES_MASK (IEEE80211_CCK_RATE_1MB_MASK | \
+ IEEE80211_CCK_RATE_2MB_MASK)
+#define IEEE80211_CCK_DEFAULT_RATES_MASK \
+ (IEEE80211_CCK_BASIC_RATES_MASK | \
+ IEEE80211_CCK_RATE_5MB_MASK | \
+ IEEE80211_CCK_RATE_11MB_MASK)
+
+#define IEEE80211_OFDM_RATES_MASK 0x00000FF0
+#define IEEE80211_OFDM_BASIC_RATES_MASK (IEEE80211_OFDM_RATE_6MB_MASK | \
+ IEEE80211_OFDM_RATE_12MB_MASK | \
+ IEEE80211_OFDM_RATE_24MB_MASK)
+#define IEEE80211_OFDM_DEFAULT_RATES_MASK \
+ (IEEE80211_OFDM_BASIC_RATES_MASK | \
+ IEEE80211_OFDM_RATE_9MB_MASK | \
+ IEEE80211_OFDM_RATE_18MB_MASK | \
+ IEEE80211_OFDM_RATE_36MB_MASK | \
+ IEEE80211_OFDM_RATE_48MB_MASK | \
+ IEEE80211_OFDM_RATE_54MB_MASK)
+#define IEEE80211_DEFAULT_RATES_MASK \
+ (IEEE80211_OFDM_DEFAULT_RATES_MASK | \
+ IEEE80211_CCK_DEFAULT_RATES_MASK)
+
+#define IEEE80211_NUM_OFDM_RATES 8
+#define IEEE80211_NUM_CCK_RATES 4
+#define IEEE80211_OFDM_SHIFT_MASK_A 4
+
+/* NOTE: This data is for statistical purposes; not all hardware provides this
+ * information for frames received. Not setting these will not cause
+ * any adverse affects. */
+struct ieee80211_rx_stats {
+ /* u32 mac_time[2]; */
+ s8 rssi;
+ u8 signal;
+ u8 noise;
+ u8 received_channel;
+ u16 rate; /* in 100 kbps */
+ /* u8 control; */
+ u8 mask;
+ u8 freq;
+ u16 len;
+};
+
+/* IEEE 802.11 requires that STA supports concurrent reception of at least
+ * three fragmented frames. This define can be increased to support more
+ * concurrent frames, but it should be noted that each entry can consume about
+ * 2 kB of RAM and increasing cache size will slow down frame reassembly. */
+#define IEEE80211_FRAG_CACHE_LEN 4
+
+struct ieee80211_frag_entry {
+ u32 first_frag_time;
+ uint seq;
+ uint last_frag;
+ uint qos; /* jackson */
+ uint tid; /* jackson */
+ struct sk_buff *skb;
+ u8 src_addr[ETH_ALEN];
+ u8 dst_addr[ETH_ALEN];
+};
+
+struct ieee80211_stats {
+ uint tx_unicast_frames;
+ uint tx_multicast_frames;
+ uint tx_fragments;
+ uint tx_unicast_octets;
+ uint tx_multicast_octets;
+ uint tx_deferred_transmissions;
+ uint tx_single_retry_frames;
+ uint tx_multiple_retry_frames;
+ uint tx_retry_limit_exceeded;
+ uint tx_discards;
+ uint rx_unicast_frames;
+ uint rx_multicast_frames;
+ uint rx_fragments;
+ uint rx_unicast_octets;
+ uint rx_multicast_octets;
+ uint rx_fcs_errors;
+ uint rx_discards_no_buffer;
+ uint tx_discards_wrong_sa;
+ uint rx_discards_undecryptable;
+ uint rx_message_in_msg_fragments;
+ uint rx_message_in_bad_msg_fragments;
+};
+
+struct ieee80211_softmac_stats {
+ uint rx_ass_ok;
+ uint rx_ass_err;
+ uint rx_probe_rq;
+ uint tx_probe_rs;
+ uint tx_beacons;
+ uint rx_auth_rq;
+ uint rx_auth_rs_ok;
+ uint rx_auth_rs_err;
+ uint tx_auth_rq;
+ uint no_auth_rs;
+ uint no_ass_rs;
+ uint tx_ass_rq;
+ uint rx_ass_rq;
+ uint tx_probe_rq;
+ uint reassoc;
+ uint swtxstop;
+ uint swtxawake;
+};
+
+#define SEC_KEY_1 (1<<0)
+#define SEC_KEY_2 (1<<1)
+#define SEC_KEY_3 (1<<2)
+#define SEC_KEY_4 (1<<3)
+#define SEC_ACTIVE_KEY (1<<4)
+#define SEC_AUTH_MODE (1<<5)
+#define SEC_UNICAST_GROUP (1<<6)
+#define SEC_LEVEL (1<<7)
+#define SEC_ENABLED (1<<8)
+
+#define SEC_LEVEL_0 0 /* None */
+#define SEC_LEVEL_1 1 /* WEP 40 and 104 bit */
+#define SEC_LEVEL_2 2 /* Level 1 + TKIP */
+#define SEC_LEVEL_2_CKIP 3 /* Level 1 + CKIP */
+#define SEC_LEVEL_3 4 /* Level 2 + CCMP */
+
+#define WEP_KEYS 4
+#define WEP_KEY_LEN 13
+
+struct ieee80211_security {
+ u16 active_key:2,
+ enabled:1,
+ auth_mode:2,
+ auth_algo:4,
+ unicast_uses_group:1;
+ u8 key_sizes[WEP_KEYS];
+ u8 keys[WEP_KEYS][WEP_KEY_LEN];
+ u8 level;
+ u16 flags;
+} __packed;
+
+/*
+
+ 802.11 data frame from AP
+
+ ,-------------------------------------------------------------------.
+Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
+ |------|------|---------|---------|---------|------|---------|------|
+Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | frame | fcs |
+ | | tion | (BSSID) | | | ence | data | |
+ `-------------------------------------------------------------------'
+
+Total: 28-2340 bytes
+
+*/
+
+struct ieee80211_header_data {
+ u16 frame_ctl;
+ u16 duration_id;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ u16 seq_ctrl;
+};
+
+#define BEACON_PROBE_SSID_ID_POSITION 12
+
+/* Management Frame Information Element Types */
+#define MFIE_TYPE_SSID 0
+#define MFIE_TYPE_RATES 1
+#define MFIE_TYPE_FH_SET 2
+#define MFIE_TYPE_DS_SET 3
+#define MFIE_TYPE_CF_SET 4
+#define MFIE_TYPE_TIM 5
+#define MFIE_TYPE_IBSS_SET 6
+#define MFIE_TYPE_CHALLENGE 16
+#define MFIE_TYPE_ERP 42
+#define MFIE_TYPE_RSN 48
+#define MFIE_TYPE_RATES_EX 50
+#define MFIE_TYPE_GENERIC 221
+
+struct ieee80211_info_element_hdr {
+ u8 id;
+ u8 len;
+} __packed;
+
+struct ieee80211_info_element {
+ u8 id;
+ u8 len;
+ u8 data[0];
+} __packed;
+
+/*
+ * These are the data types that can make up management packets
+ *
+ u16 auth_algorithm;
+ u16 auth_sequence;
+ u16 beacon_interval;
+ u16 capability;
+ u8 current_ap[ETH_ALEN];
+ u16 listen_interval;
+ struct {
+ u16 association_id:14, reserved:2;
+ } __packed;
+ u32 time_stamp[2];
+ u16 reason;
+ u16 status;
+*/
+
+#define IEEE80211_DEFAULT_TX_ESSID "Penguin"
+#define IEEE80211_DEFAULT_BASIC_RATE 10
+
+struct ieee80211_authentication {
+ struct ieee80211_header_data header;
+ u16 algorithm;
+ u16 transaction;
+ u16 status;
+ /* struct ieee80211_info_element_hdr info_element; */
+} __packed;
+
+struct ieee80211_probe_response {
+ struct ieee80211_header_data header;
+ u32 time_stamp[2];
+ u16 beacon_interval;
+ u16 capability;
+ struct ieee80211_info_element info_element;
+} __packed;
+
+struct ieee80211_probe_request {
+ struct ieee80211_header_data header;
+} __packed;
+
+struct ieee80211_assoc_request_frame {
+ struct rtw_ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 listen_interval;
+ struct ieee80211_info_element_hdr info_element;
+} __packed;
+
+struct ieee80211_assoc_response_frame {
+ struct rtw_ieee80211_hdr_3addr header;
+ u16 capability;
+ u16 status;
+ u16 aid;
+} __packed;
+
+struct ieee80211_txb {
+ u8 nr_frags;
+ u8 encrypted;
+ u16 reserved;
+ u16 frag_size;
+ u16 payload_size;
+ struct sk_buff *fragments[0];
+};
+
+
+/* SWEEP TABLE ENTRIES NUMBER*/
+#define MAX_SWEEP_TAB_ENTRIES 42
+#define MAX_SWEEP_TAB_ENTRIES_PER_PACKET 7
+/* MAX_RATES_LENGTH needs to be 12. The spec says 8, and many APs
+ * only use 8, and then use extended rates for the remaining supported
+ * rates. Other APs, however, stick all of their supported rates on the
+ * main rates information element... */
+#define MAX_RATES_LENGTH ((u8)12)
+#define MAX_RATES_EX_LENGTH ((u8)16)
+#define MAX_NETWORK_COUNT 128
+#define MAX_CHANNEL_NUMBER 161
+#define IEEE80211_SOFTMAC_SCAN_TIME 400
+/* HZ / 2) */
+#define IEEE80211_SOFTMAC_ASSOC_RETRY_TIME (HZ * 2)
+
+#define CRC_LENGTH 4U
+
+#define MAX_WPA_IE_LEN (256)
+#define MAX_WPS_IE_LEN (512)
+#define MAX_P2P_IE_LEN (256)
+#define MAX_WFD_IE_LEN (128)
+
+#define NETWORK_EMPTY_ESSID (1<<0)
+#define NETWORK_HAS_OFDM (1<<1)
+#define NETWORK_HAS_CCK (1<<2)
+
+#define IEEE80211_DTIM_MBCAST 4
+#define IEEE80211_DTIM_UCAST 2
+#define IEEE80211_DTIM_VALID 1
+#define IEEE80211_DTIM_INVALID 0
+
+#define IEEE80211_PS_DISABLED 0
+#define IEEE80211_PS_UNICAST IEEE80211_DTIM_UCAST
+#define IEEE80211_PS_MBCAST IEEE80211_DTIM_MBCAST
+#define IW_ESSID_MAX_SIZE 32
+/*
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+*/
+
+enum ieee80211_state {
+ /* the card is not linked at all */
+ IEEE80211_NOLINK = 0,
+
+ /* IEEE80211_ASSOCIATING* are for BSS client mode
+ * the driver shall not perform RX filtering unless
+ * the state is LINKED.
+ * The driver shall just check for the state LINKED and
+ * defaults to NOLINK for ALL the other states (including
+ * LINKED_SCANNING)
+ */
+
+ /* the association procedure will start (wq scheduling)*/
+ IEEE80211_ASSOCIATING,
+ IEEE80211_ASSOCIATING_RETRY,
+
+ /* the association procedure is sending AUTH request*/
+ IEEE80211_ASSOCIATING_AUTHENTICATING,
+
+ /* the association procedure has successfully authentcated
+ * and is sending association request
+ */
+ IEEE80211_ASSOCIATING_AUTHENTICATED,
+
+ /* the link is ok. the card associated to a BSS or linked
+ * to a ibss cell or acting as an AP and creating the bss
+ */
+ IEEE80211_LINKED,
+
+ /* same as LINKED, but the driver shall apply RX filter
+ * rules as we are in NO_LINK mode. As the card is still
+ * logically linked, but it is doing a syncro site survey
+ * then it will be back to LINKED state.
+ */
+ IEEE80211_LINKED_SCANNING,
+
+};
+
+#define DEFAULT_MAX_SCAN_AGE (15 * HZ)
+#define DEFAULT_FTS 2346
+
+static inline int is_multicast_mac_addr(const u8 *addr)
+{
+ return ((addr[0] != 0xff) && (0x01 & addr[0]));
+}
+
+static inline int is_broadcast_mac_addr(const u8 *addr)
+{
+ return (addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) &&
+ (addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff);
+}
+
+#define CFG_IEEE80211_RESERVE_FCS (1<<0)
+#define CFG_IEEE80211_COMPUTE_FCS (1<<1)
+
+struct tx_pending {
+ int frag;
+ struct ieee80211_txb *txb;
+};
+
+#define MAXTID 16
+
+#define IEEE_A (1<<0)
+#define IEEE_B (1<<1)
+#define IEEE_G (1<<2)
+#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
+
+/* Baron move to ieee80211.c */
+int ieee80211_is_empty_essid(const char *essid, int essid_len);
+int ieee80211_get_hdrlen(u16 fc);
+
+/* Action category code */
+enum rtw_ieee80211_category {
+ RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ RTW_WLAN_CATEGORY_QOS = 1,
+ RTW_WLAN_CATEGORY_DLS = 2,
+ RTW_WLAN_CATEGORY_BACK = 3,
+ RTW_WLAN_CATEGORY_PUBLIC = 4, /* IEEE 802.11 public action frames */
+ RTW_WLAN_CATEGORY_RADIO_MEASUREMENT = 5,
+ RTW_WLAN_CATEGORY_FT = 6,
+ RTW_WLAN_CATEGORY_HT = 7,
+ RTW_WLAN_CATEGORY_SA_QUERY = 8,
+ RTW_WLAN_CATEGORY_TDLS = 12,
+ RTW_WLAN_CATEGORY_WMM = 17,
+ RTW_WLAN_CATEGORY_P2P = 0x7f,/* P2P action frames */
+};
+
+/* SPECTRUM_MGMT action code */
+enum rtw_ieee80211_spectrum_mgmt_actioncode {
+ RTW_WLAN_ACTION_SPCT_MSR_REQ = 0,
+ RTW_WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ RTW_WLAN_ACTION_SPCT_TPC_REQ = 2,
+ RTW_WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ RTW_WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+ RTW_WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
+};
+
+enum _PUBLIC_ACTION {
+ ACT_PUBLIC_BSSCOEXIST = 0, /* 20/40 BSS Coexistence */
+ ACT_PUBLIC_DSE_ENABLE = 1,
+ ACT_PUBLIC_DSE_DEENABLE = 2,
+ ACT_PUBLIC_DSE_REG_LOCATION = 3,
+ ACT_PUBLIC_EXT_CHL_SWITCH = 4,
+ ACT_PUBLIC_DSE_MSR_REQ = 5,
+ ACT_PUBLIC_DSE_MSR_RPRT = 6,
+ ACT_PUBLIC_MP = 7, /* Measurement Pilot */
+ ACT_PUBLIC_DSE_PWR_CONSTRAINT = 8,
+ ACT_PUBLIC_VENDOR = 9, /* for WIFI_DIRECT */
+ ACT_PUBLIC_GAS_INITIAL_REQ = 10,
+ ACT_PUBLIC_GAS_INITIAL_RSP = 11,
+ ACT_PUBLIC_GAS_COMEBACK_REQ = 12,
+ ACT_PUBLIC_GAS_COMEBACK_RSP = 13,
+ ACT_PUBLIC_TDLS_DISCOVERY_RSP = 14,
+ ACT_PUBLIC_LOCATION_TRACK = 15,
+ ACT_PUBLIC_MAX
+};
+
+/* BACK action code */
+enum rtw_ieee80211_back_actioncode {
+ RTW_WLAN_ACTION_ADDBA_REQ = 0,
+ RTW_WLAN_ACTION_ADDBA_RESP = 1,
+ RTW_WLAN_ACTION_DELBA = 2,
+};
+
+/* HT features action code */
+enum rtw_ieee80211_ht_actioncode {
+ RTW_WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
+ RTW_WLAN_ACTION_SM_PS = 1,
+ RTW_WLAN_ACTION_PSPM = 2,
+ RTW_WLAN_ACTION_PCO_PHASE = 3,
+ RTW_WLAN_ACTION_MIMO_CSI_MX = 4,
+ RTW_WLAN_ACTION_MIMO_NONCP_BF = 5,
+ RTW_WLAN_ACTION_MIMP_CP_BF = 6,
+ RTW_WLAN_ACTION_ASEL_INDICATES_FB = 7,
+ RTW_WLAN_ACTION_HI_INFO_EXCHG = 8,
+};
+
+/* BACK (block-ack) parties */
+enum rtw_ieee80211_back_parties {
+ RTW_WLAN_BACK_RECIPIENT = 0,
+ RTW_WLAN_BACK_INITIATOR = 1,
+ RTW_WLAN_BACK_TIMER = 2,
+};
+
+#define OUI_MICROSOFT 0x0050f2 /* Microsoft (also used in Wi-Fi specs)
+ * 00:50:F2 */
+#define WME_OUI_TYPE 2
+#define WME_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WME_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WME_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WME_VERSION 1
+
+#define WME_ACTION_CODE_SETUP_REQUEST 0
+#define WME_ACTION_CODE_SETUP_RESPONSE 1
+#define WME_ACTION_CODE_TEARDOWN 2
+
+#define WME_SETUP_RESPONSE_STATUS_ADMISSION_ACCEPTED 0
+#define WME_SETUP_RESPONSE_STATUS_INVALID_PARAMETERS 1
+#define WME_SETUP_RESPONSE_STATUS_REFUSED 3
+
+#define WME_TSPEC_DIRECTION_UPLINK 0
+#define WME_TSPEC_DIRECTION_DOWNLINK 1
+#define WME_TSPEC_DIRECTION_BI_DIRECTIONAL 3
+
+
+#define OUI_BROADCOM 0x00904c /* Broadcom (Epigram) */
+
+#define VENDOR_HT_CAPAB_OUI_TYPE 0x33 /* 00-90-4c:0x33 */
+
+/**
+ * enum rtw_ieee80211_channel_flags - channel flags
+ *
+ * Channel flags set by the regulatory control code.
+ *
+ * @RTW_IEEE80211_CHAN_DISABLED: This channel is disabled.
+ * @RTW_IEEE80211_CHAN_PASSIVE_SCAN: Only passive scanning is permitted
+ * on this channel.
+ * @RTW_IEEE80211_CHAN_NO_IBSS: IBSS is not allowed on this channel.
+ * @RTW_IEEE80211_CHAN_RADAR: Radar detection is required on this channel.
+ * @RTW_IEEE80211_CHAN_NO_HT40PLUS: extension channel above this channel
+ * is not permitted.
+ * @RTW_IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel
+ * is not permitted.
+ */
+enum rtw_ieee80211_channel_flags {
+ RTW_IEEE80211_CHAN_DISABLED = 1<<0,
+ RTW_IEEE80211_CHAN_PASSIVE_SCAN = 1<<1,
+ RTW_IEEE80211_CHAN_NO_IBSS = 1<<2,
+ RTW_IEEE80211_CHAN_RADAR = 1<<3,
+ RTW_IEEE80211_CHAN_NO_HT40PLUS = 1<<4,
+ RTW_IEEE80211_CHAN_NO_HT40MINUS = 1<<5,
+};
+
+#define RTW_IEEE80211_CHAN_NO_HT40 \
+ (RTW_IEEE80211_CHAN_NO_HT40PLUS | RTW_IEEE80211_CHAN_NO_HT40MINUS)
+
+/* Represent channel details, subset of ieee80211_channel */
+struct rtw_ieee80211_channel {
+ u16 hw_value;
+ u32 flags;
+};
+
+#define CHAN_FMT \
+ "hw_value:%u, " \
+ "flags:0x%08x" \
+
+#define CHAN_ARG(channel) \
+ (channel)->hw_value \
+ , (channel)->flags \
+
+/* Parsed Information Elements */
+struct rtw_ieee802_11_elems {
+ u8 *ssid;
+ u8 ssid_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *fh_params;
+ u8 fh_params_len;
+ u8 *ds_params;
+ u8 ds_params_len;
+ u8 *cf_params;
+ u8 cf_params_len;
+ u8 *tim;
+ u8 tim_len;
+ u8 *ibss_params;
+ u8 ibss_params_len;
+ u8 *challenge;
+ u8 challenge_len;
+ u8 *erp_info;
+ u8 erp_info_len;
+ u8 *ext_supp_rates;
+ u8 ext_supp_rates_len;
+ u8 *wpa_ie;
+ u8 wpa_ie_len;
+ u8 *rsn_ie;
+ u8 rsn_ie_len;
+ u8 *wme;
+ u8 wme_len;
+ u8 *wme_tspec;
+ u8 wme_tspec_len;
+ u8 *wps_ie;
+ u8 wps_ie_len;
+ u8 *power_cap;
+ u8 power_cap_len;
+ u8 *supp_channels;
+ u8 supp_channels_len;
+ u8 *mdie;
+ u8 mdie_len;
+ u8 *ftie;
+ u8 ftie_len;
+ u8 *timeout_int;
+ u8 timeout_int_len;
+ u8 *ht_capabilities;
+ u8 ht_capabilities_len;
+ u8 *ht_operation;
+ u8 ht_operation_len;
+ u8 *vendor_ht_cap;
+ u8 vendor_ht_cap_len;
+};
+
+enum parse_res {
+ ParseOK = 0,
+ ParseUnknown = 1,
+ ParseFailed = -1
+};
+
+enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
+ struct rtw_ieee802_11_elems *elems,
+ int show_errors);
+
+u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len,
+ unsigned char *source, unsigned int *frlen);
+u8 *rtw_set_ie(u8 *pbuf, int index, uint len, u8 *source, uint *frlen);
+
+enum secondary_ch_offset {
+ SCN = 0, /* no secondary channel */
+ SCA = 1, /* secondary channel above */
+ SCB = 3, /* secondary channel below */
+};
+u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset);
+u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset);
+u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
+ u8 new_ch, u8 ch_switch_cnt);
+u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len,
+ u8 secondary_ch_offset);
+u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
+ u8 flags, u16 reason, u16 precedence);
+
+u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
+u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui,
+ u8 oui_len, u8 *ie, uint *ielen);
+int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset,
+ u8 eid, u8 *oui, u8 oui_len);
+
+void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
+
+unsigned char *rtw_get_wpa_ie(unsigned char *pie, int *wpa_ie_len, int limit);
+unsigned char *rtw_get_wpa2_ie(unsigned char *pie, int *rsn_ie_len, int limit);
+int rtw_get_wpa_cipher_suite(u8 *s);
+int rtw_get_wpa2_cipher_suite(u8 *s);
+int rtw_get_wapi_ie(u8 *in_ie, uint in_len, u8 *wapi_ie, u16 *wapi_len);
+int rtw_parse_wpa_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
+ int *pairwise_cipher, int *is_8021x);
+int rtw_parse_wpa2_ie(u8 *wpa_ie, int wpa_ie_len, int *group_cipher,
+ int *pairwise_cipher, int *is_8021x);
+
+int rtw_get_sec_ie(u8 *in_ie, uint in_len, u8 *rsn_ie, u16 *rsn_len,
+ u8 *wpa_ie, u16 *wpa_len);
+
+u8 rtw_is_wps_ie(u8 *ie_ptr, uint *wps_ielen);
+u8 *rtw_get_wps_ie(u8 *in_ie, uint in_len, u8 *wps_ie, uint *wps_ielen);
+u8 *rtw_get_wps_attr(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
+ u8 *buf_content, uint *len_content);
+
+/**
+ * for_each_ie - iterate over continuous IEs
+ * @ie:
+ * @buf:
+ * @buf_len:
+ */
+#define for_each_ie(ie, buf, buf_len) \
+ for (ie = (void *)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; \
+ ie = (void *)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
+
+void dump_ies(u8 *buf, u32 buf_len);
+void dump_wps_ie(u8 *ie, u32 ie_len);
+
+#ifdef CONFIG_88EU_P2P
+void dump_p2p_ie(u8 *ie, u32 ie_len);
+u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen);
+u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_attr, u32 *len_attr);
+u8 *rtw_get_p2p_attr_content(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
+ u8 *buf_content, uint *len_content);
+u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len,
+ u8 *pdata_attr);
+void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex,
+ u8 attr_id);
+#endif
+
+uint rtw_get_rateset_len(u8 *rateset);
+
+struct registry_priv;
+int rtw_generate_ie(struct registry_priv *pregistrypriv);
+
+
+int rtw_get_bit_value_from_ieee_value(u8 val);
+
+uint rtw_is_cckrates_included(u8 *rate);
+
+uint rtw_is_cckratesonly_included(u8 *rate);
+
+int rtw_check_network_type(unsigned char *rate, int ratelen, int channel);
+
+void rtw_get_bcn_info(struct wlan_network *pnetwork);
+
+void rtw_macaddr_cfg(u8 *mac_addr);
+
+u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
+ unsigned char *MCS_rate);
+
+int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category,
+ u8 *action);
+const char *action_public_str(u8 action);
+
+#endif /* IEEE80211_H */
diff --git a/drivers/staging/rtl8188eu/include/ieee80211_ext.h b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
new file mode 100644
index 00000000000..1052d1817a9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ieee80211_ext.h
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IEEE80211_EXT_H
+#define __IEEE80211_EXT_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define WMM_OUI_TYPE 2
+#define WMM_OUI_SUBTYPE_INFORMATION_ELEMENT 0
+#define WMM_OUI_SUBTYPE_PARAMETER_ELEMENT 1
+#define WMM_OUI_SUBTYPE_TSPEC_ELEMENT 2
+#define WMM_VERSION 1
+
+#define WPA_PROTO_WPA BIT(0)
+#define WPA_PROTO_RSN BIT(1)
+
+#define WPA_KEY_MGMT_IEEE8021X BIT(0)
+#define WPA_KEY_MGMT_PSK BIT(1)
+#define WPA_KEY_MGMT_NONE BIT(2)
+#define WPA_KEY_MGMT_IEEE8021X_NO_WPA BIT(3)
+#define WPA_KEY_MGMT_WPA_NONE BIT(4)
+
+
+#define WPA_CAPABILITY_PREAUTH BIT(0)
+#define WPA_CAPABILITY_MGMT_FRAME_PROTECTION BIT(6)
+#define WPA_CAPABILITY_PEERKEY_ENABLED BIT(9)
+
+
+#define PMKID_LEN 16
+
+
+struct wpa_ie_hdr {
+ u8 elem_id;
+ u8 len;
+ u8 oui[4]; /* 24-bit OUI followed by 8-bit OUI type */
+ u8 version[2]; /* little endian */
+} __packed;
+
+struct rsn_ie_hdr {
+ u8 elem_id; /* WLAN_EID_RSN */
+ u8 len;
+ u8 version[2]; /* little endian */
+} __packed;
+
+struct wme_ac_parameter {
+#if defined(__LITTLE_ENDIAN)
+ /* byte 1 */
+ u8 aifsn:4,
+ acm:1,
+ aci:2,
+ reserved:1;
+
+ /* byte 2 */
+ u8 eCWmin:4,
+ eCWmax:4;
+#elif defined(__BIG_ENDIAN)
+ /* byte 1 */
+ u8 reserved:1,
+ aci:2,
+ acm:1,
+ aifsn:4;
+
+ /* byte 2 */
+ u8 eCWmax:4,
+ eCWmin:4;
+#else
+#error "Please fix <endian.h>"
+#endif
+
+ /* bytes 3 & 4 */
+ u16 txopLimit;
+} __packed;
+
+struct wme_parameter_element {
+ /* required fields for WME version 1 */
+ u8 oui[3];
+ u8 oui_type;
+ u8 oui_subtype;
+ u8 version;
+ u8 acInfo;
+ u8 reserved;
+ struct wme_ac_parameter ac[4];
+
+} __packed;
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RSN_SELECTOR_PUT(a, val) WPA_PUT_BE32((u8 *)(a), (val))
+
+/* Action category code */
+enum ieee80211_category {
+ WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ WLAN_CATEGORY_QOS = 1,
+ WLAN_CATEGORY_DLS = 2,
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_HT = 7,
+ WLAN_CATEGORY_WMM = 17,
+};
+
+/* SPECTRUM_MGMT action code */
+enum ieee80211_spectrum_mgmt_actioncode {
+ WLAN_ACTION_SPCT_MSR_REQ = 0,
+ WLAN_ACTION_SPCT_MSR_RPRT = 1,
+ WLAN_ACTION_SPCT_TPC_REQ = 2,
+ WLAN_ACTION_SPCT_TPC_RPRT = 3,
+ WLAN_ACTION_SPCT_CHL_SWITCH = 4,
+ WLAN_ACTION_SPCT_EXT_CHL_SWITCH = 5,
+};
+
+/* BACK action code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
+/* HT features action code */
+enum ieee80211_ht_actioncode {
+ WLAN_ACTION_NOTIFY_CH_WIDTH = 0,
+ WLAN_ACTION_SM_PS = 1,
+ WLAN_ACTION_PSPM = 2,
+ WLAN_ACTION_PCO_PHASE = 3,
+ WLAN_ACTION_MIMO_CSI_MX = 4,
+ WLAN_ACTION_MIMO_NONCP_BF = 5,
+ WLAN_ACTION_MIMP_CP_BF = 6,
+ WLAN_ACTION_ASEL_INDICATES_FB = 7,
+ WLAN_ACTION_HI_INFO_EXCHG = 8,
+};
+
+/* BACK (block-ack) parties */
+enum ieee80211_back_parties {
+ WLAN_BACK_RECIPIENT = 0,
+ WLAN_BACK_INITIATOR = 1,
+ WLAN_BACK_TIMER = 2,
+};
+
+struct ieee80211_mgmt {
+ u16 frame_control;
+ u16 duration;
+ u8 da[6];
+ u8 sa[6];
+ u8 bssid[6];
+ u16 seq_ctrl;
+ union {
+ struct {
+ u16 auth_alg;
+ u16 auth_transaction;
+ u16 status_code;
+ /* possibly followed by Challenge text */
+ u8 variable[0];
+ } __packed auth;
+ struct {
+ u16 reason_code;
+ } __packed deauth;
+ struct {
+ u16 capab_info;
+ u16 listen_interval;
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed assoc_req;
+ struct {
+ u16 capab_info;
+ u16 status_code;
+ u16 aid;
+ /* followed by Supported rates */
+ u8 variable[0];
+ } __packed assoc_resp, reassoc_resp;
+ struct {
+ u16 capab_info;
+ u16 listen_interval;
+ u8 current_ap[6];
+ /* followed by SSID and Supported rates */
+ u8 variable[0];
+ } __packed reassoc_req;
+ struct {
+ u16 reason_code;
+ } __packed disassoc;
+ struct {
+ __le64 timestamp;
+ u16 beacon_int;
+ u16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params, TIM */
+ u8 variable[0];
+ } __packed beacon;
+ struct {
+ /* only variable items: SSID, Supported rates */
+ u8 variable[0];
+ } __packed probe_req;
+ struct {
+ __le64 timestamp;
+ u16 beacon_int;
+ u16 capab_info;
+ /* followed by some of SSID, Supported rates,
+ * FH Params, DS Params, CF Params, IBSS Params */
+ u8 variable[0];
+ } __packed probe_resp;
+ struct {
+ u8 category;
+ union {
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u8 status_code;
+ u8 variable[0];
+ } __packed wme_action;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u16 capab;
+ u16 timeout;
+ u16 start_seq_num;
+ } __packed addba_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ u16 status;
+ u16 capab;
+ u16 timeout;
+ } __packed addba_resp;
+ struct {
+ u8 action_code;
+ u16 params;
+ u16 reason_code;
+ } __packed delba;
+ structi {
+ u8 action_code;
+ /* capab_info for open and confirm,
+ * reason for close
+ */
+ u16 aux;
+ /* Followed in plink_confirm by status
+ * code, AID and supported rates,
+ * and directly by supported rates in
+ * plink_open and plink_close
+ */
+ u8 variable[0];
+ } __packed plink_action;
+ struct{
+ u8 action_code;
+ u8 variable[0];
+ } __packed mesh_action;
+ } __packed u;
+ } __packed action;
+ } __packed u;
+} __packed;
+
+/* mgmt header + 1 byte category code */
+#define IEEE80211_MIN_ACTION_SIZE \
+ FIELD_OFFSET(struct ieee80211_mgmt, u.action.u)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/if_ether.h b/drivers/staging/rtl8188eu/include/if_ether.h
new file mode 100644
index 00000000000..db157712a20
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/if_ether.h
@@ -0,0 +1,111 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef _LINUX_IF_ETHER_H
+#define _LINUX_IF_ETHER_H
+
+/*
+ * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
+ * and FCS/CRC (frame check sequence).
+ */
+
+#define ETH_ALEN 6 /* Octets in one ethernet addr */
+#define ETH_HLEN 14 /* Total octets in header. */
+#define ETH_ZLEN 60 /* Min. octets in frame sans FCS */
+#define ETH_DATA_LEN 1500 /* Max. octets in payload */
+#define ETH_FRAME_LEN 1514 /* Max. octets in frame sans FCS */
+
+/*
+ * These are the defined Ethernet Protocol ID's.
+ */
+
+#define ETH_P_LOOP 0x0060 /* Ethernet Loopback packet */
+#define ETH_P_PUP 0x0200 /* Xerox PUP packet */
+#define ETH_P_PUPAT 0x0201 /* Xerox PUP Addr Trans packet */
+#define ETH_P_IP 0x0800 /* Internet Protocol packet */
+#define ETH_P_X25 0x0805 /* CCITT X.25 */
+#define ETH_P_ARP 0x0806 /* Address Resolution packet */
+#define ETH_P_BPQ 0x08FF /* G8BPQ AX.25 Ethernet Packet */
+#define ETH_P_IEEEPUP 0x0a00 /* Xerox IEEE802.3 PUP packet */
+#define ETH_P_IEEEPUPAT 0x0a01 /* Xerox IEEE802.3 PUP */
+#define ETH_P_DEC 0x6000 /* DEC Assigned proto */
+#define ETH_P_DNA_DL 0x6001 /* DEC DNA Dump/Load */
+#define ETH_P_DNA_RC 0x6002 /* DEC DNA Remote Console */
+#define ETH_P_DNA_RT 0x6003 /* DEC DNA Routing */
+#define ETH_P_LAT 0x6004 /* DEC LAT */
+#define ETH_P_DIAG 0x6005 /* DEC Diagnostics */
+#define ETH_P_CUST 0x6006 /* DEC Customer use */
+#define ETH_P_SCA 0x6007 /* DEC Systems Comms Arch */
+#define ETH_P_RARP 0x8035 /* Reverse Addr Res packet */
+#define ETH_P_ATALK 0x809B /* Appletalk DDP */
+#define ETH_P_AARP 0x80F3 /* Appletalk AARP */
+#define ETH_P_8021Q 0x8100 /* 802.1Q VLAN Extended Header */
+#define ETH_P_IPX 0x8137 /* IPX over DIX */
+#define ETH_P_IPV6 0x86DD /* IPv6 over bluebook */
+#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
+#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
+#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */
+#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
+ * over Ethernet
+ */
+
+/*
+ * Non DIX types. Won't clash for 1500 types.
+ */
+
+#define ETH_P_802_3 0x0001 /* Dummy type for 802.3 frames */
+#define ETH_P_AX25 0x0002 /* Dummy protocol id for AX.25 */
+#define ETH_P_ALL 0x0003 /* Every packet (be careful!!!) */
+#define ETH_P_802_2 0x0004 /* 802.2 frames */
+#define ETH_P_SNAP 0x0005 /* Internal only */
+#define ETH_P_DDCMP 0x0006 /* DEC DDCMP: Internal only */
+#define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/
+#define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */
+#define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */
+#define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/
+#define ETH_P_TR_802_2 0x0011 /* 802.2 frames */
+#define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */
+#define ETH_P_CONTROL 0x0016 /* Card specific control frames */
+#define ETH_P_IRDA 0x0017 /* Linux-IrDA */
+#define ETH_P_ECONET 0x0018 /* Acorn Econet */
+
+/*
+ * This is an Ethernet frame header.
+ */
+
+struct ethhdr {
+ unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
+ unsigned char h_source[ETH_ALEN]; /* source ether addr */
+ unsigned short h_proto; /* packet type ID field */
+};
+
+struct _vlan {
+ unsigned short h_vlan_TCI; /* Encap prio and VLAN ID */
+ unsigned short h_vlan_encapsulated_proto;
+};
+
+#define get_vlan_id(pvlan) \
+ ((ntohs((unsigned short)pvlan->h_vlan_TCI)) & 0xfff)
+#define get_vlan_priority(pvlan) \
+ ((ntohs((unsigned short)pvlan->h_vlan_TCI))>>13)
+#define get_vlan_encap_proto(pvlan) \
+ (ntohs((unsigned short)pvlan->h_vlan_encapsulated_proto))
+
+#endif /* _LINUX_IF_ETHER_H */
diff --git a/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
new file mode 100644
index 00000000000..037e9a5e5af
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ioctl_cfg80211.h
@@ -0,0 +1,107 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __IOCTL_CFG80211_H__
+#define __IOCTL_CFG80211_H__
+
+struct rtw_wdev_invit_info {
+ u8 token;
+ u8 flags;
+ u8 status;
+ u8 req_op_ch;
+ u8 rsp_op_ch;
+};
+
+#define rtw_wdev_invit_info_init(invit_info) \
+ do { \
+ (invit_info)->token = 0; \
+ (invit_info)->flags = 0x00; \
+ (invit_info)->status = 0xff; \
+ (invit_info)->req_op_ch = 0; \
+ (invit_info)->rsp_op_ch = 0; \
+ } while (0)
+
+struct rtw_wdev_priv {
+ struct wireless_dev *rtw_wdev;
+
+ struct adapter *padapter;
+
+ struct cfg80211_scan_request *scan_request;
+ spinlock_t scan_req_lock;
+
+ struct net_device *pmon_ndev;/* for monitor interface */
+ char ifname_mon[IFNAMSIZ + 1]; /* name of monitor interface */
+
+ u8 p2p_enabled;
+
+ u8 provdisc_req_issued;
+
+ struct rtw_wdev_invit_info invit_info;
+
+ u8 bandroid_scan;
+ bool block;
+ bool power_mgmt;
+};
+
+#define wdev_to_priv(w) ((struct rtw_wdev_priv *)(wdev_priv(w)))
+
+#define wiphy_to_wdev(x) \
+((struct wireless_dev *)(((struct rtw_wdev_priv *)wiphy_priv(x))->rtw_wdev))
+
+int rtw_wdev_alloc(struct adapter *padapter, struct device *dev);
+void rtw_wdev_free(struct wireless_dev *wdev);
+void rtw_wdev_unregister(struct wireless_dev *wdev);
+
+void rtw_cfg80211_init_wiphy(struct adapter *padapter);
+
+void rtw_cfg80211_surveydone_event_callback(struct adapter *padapter);
+
+void rtw_cfg80211_indicate_connect(struct adapter *padapter);
+void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
+void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
+ bool aborted);
+
+#ifdef CONFIG_88EU_AP_MODE
+void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
+ unsigned char *da,
+ unsigned short reason);
+#endif /* CONFIG_88EU_AP_MODE */
+
+void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
+ const u8 *buf, size_t len);
+void rtw_cfg80211_rx_p2p_action_public(struct adapter *padapter,
+ u8 *pmgmt_frame, uint frame_len);
+void rtw_cfg80211_rx_action_p2p(struct adapter *padapter, u8 *pmgmt_frame,
+ uint frame_len);
+void rtw_cfg80211_rx_action(struct adapter *adapter, u8 *frame,
+ uint frame_len, const char *msg);
+
+int rtw_cfg80211_set_mgnt_wpsp2pie(struct net_device *net,
+ char *buf, int len, int type);
+
+bool rtw_cfg80211_pwr_mgmt(struct adapter *adapter);
+
+#define rtw_cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp) \
+ cfg80211_rx_mgmt(dev, freq, sig_dbm, buf, len, gfp)
+#define rtw_cfg80211_send_rx_assoc(dev, bss, buf, len) \
+ cfg80211_send_rx_assoc(dev, bss, buf, len)
+
+#endif /* __IOCTL_CFG80211_H__ */
diff --git a/drivers/staging/rtl8188eu/include/ip.h b/drivers/staging/rtl8188eu/include/ip.h
new file mode 100644
index 00000000000..9fdac6d42d1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/ip.h
@@ -0,0 +1,126 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _LINUX_IP_H
+#define _LINUX_IP_H
+
+/* SOL_IP socket options */
+
+#define IPTOS_TOS_MASK 0x1E
+#define IPTOS_TOS(tos) ((tos)&IPTOS_TOS_MASK)
+#define IPTOS_LOWDELAY 0x10
+#define IPTOS_THROUGHPUT 0x08
+#define IPTOS_RELIABILITY 0x04
+#define IPTOS_MINCOST 0x02
+
+#define IPTOS_PREC_MASK 0xE0
+#define IPTOS_PREC(tos) ((tos)&IPTOS_PREC_MASK)
+#define IPTOS_PREC_NETCONTROL 0xe0
+#define IPTOS_PREC_INTERNETCONTROL 0xc0
+#define IPTOS_PREC_CRITIC_ECP 0xa0
+#define IPTOS_PREC_FLASHOVERRIDE 0x80
+#define IPTOS_PREC_FLASH 0x60
+#define IPTOS_PREC_IMMEDIATE 0x40
+#define IPTOS_PREC_PRIORITY 0x20
+#define IPTOS_PREC_ROUTINE 0x00
+
+
+/* IP options */
+#define IPOPT_COPY 0x80
+#define IPOPT_CLASS_MASK 0x60
+#define IPOPT_NUMBER_MASK 0x1f
+
+#define IPOPT_COPIED(o) ((o)&IPOPT_COPY)
+#define IPOPT_CLASS(o) ((o)&IPOPT_CLASS_MASK)
+#define IPOPT_NUMBER(o) ((o)&IPOPT_NUMBER_MASK)
+
+#define IPOPT_CONTROL 0x00
+#define IPOPT_RESERVED1 0x20
+#define IPOPT_MEASUREMENT 0x40
+#define IPOPT_RESERVED2 0x60
+
+#define IPOPT_END (0 | IPOPT_CONTROL)
+#define IPOPT_NOOP (1 | IPOPT_CONTROL)
+#define IPOPT_SEC (2 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_LSRR (3 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_TIMESTAMP (4 | IPOPT_MEASUREMENT)
+#define IPOPT_RR (7 | IPOPT_CONTROL)
+#define IPOPT_SID (8 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_SSRR (9 | IPOPT_CONTROL | IPOPT_COPY)
+#define IPOPT_RA (20 | IPOPT_CONTROL | IPOPT_COPY)
+
+#define IPVERSION 4
+#define MAXTTL 255
+#define IPDEFTTL 64
+#define IPOPT_OPTVAL 0
+#define IPOPT_OLEN 1
+#define IPOPT_OFFSET 2
+#define IPOPT_MINOFF 4
+#define MAX_IPOPTLEN 40
+#define IPOPT_NOP IPOPT_NOOP
+#define IPOPT_EOL IPOPT_END
+#define IPOPT_TS IPOPT_TIMESTAMP
+
+#define IPOPT_TS_TSONLY 0 /* timestamps only */
+#define IPOPT_TS_TSANDADDR 1 /* timestamps and addresses */
+#define IPOPT_TS_PRESPEC 3 /* specified modules only */
+
+struct ip_options {
+ __u32 faddr; /* Saved first hop address */
+ unsigned char optlen;
+ unsigned char srr;
+ unsigned char rr;
+ unsigned char ts;
+ unsigned char is_setbyuser:1, /* Set by setsockopt? */
+ is_data:1, /* Options in __data, rather than skb*/
+ is_strictroute:1,/* Strict source route */
+ srr_is_hit:1, /* Packet destn addr was ours */
+ is_changed:1, /* IP checksum more not valid */
+ rr_needaddr:1, /* Need to record addr of out dev*/
+ ts_needtime:1, /* Need to record timestamp */
+ ts_needaddr:1; /* Need to record addr of out dev */
+ unsigned char router_alert;
+ unsigned char __pad1;
+ unsigned char __pad2;
+ unsigned char __data[0];
+};
+
+#define optlength(opt) (sizeof(struct ip_options) + opt->optlen)
+
+struct iphdr {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+ __u8 ihl:4,
+ version:4;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+ __u8 version:4,
+ ihl:4;
+#endif
+ __u8 tos;
+ __u16 tot_len;
+ __u16 id;
+ __u16 frag_off;
+ __u8 ttl;
+ __u8 protocol;
+ __u16 check;
+ __u32 saddr;
+ __u32 daddr;
+ /*The options start here. */
+};
+
+#endif /* _LINUX_IP_H */
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
new file mode 100644
index 00000000000..ae1722c6703
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __MLME_OSDEP_H_
+#define __MLME_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+void rtw_init_mlme_timer(struct adapter *padapter);
+void rtw_os_indicate_disconnect(struct adapter *adapter);
+void rtw_os_indicate_connect(struct adapter *adapter);
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted);
+void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie);
+
+void rtw_reset_securitypriv(struct adapter *adapter);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+
+#endif /* _MLME_OSDEP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
new file mode 100644
index 00000000000..6fa52cf99c4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -0,0 +1,352 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __CUSTOM_OID_H
+#define __CUSTOM_OID_H
+
+/* by Owen */
+/* 0xFF818000 - 0xFF81802F RTL8180 Mass Production Kit */
+/* 0xFF818500 - 0xFF81850F RTL8185 Setup Utility */
+/* 0xFF818580 - 0xFF81858F RTL8185 Phy Status Utility */
+
+/* */
+
+/* by Owen for Production Kit */
+/* For Production Kit with Agilent Equipments */
+/* in order to make our custom oids hopefully somewhat unique */
+/* we will use 0xFF (indicating implementation specific OID) */
+/* 81(first byte of non zero Realtek unique identifier) */
+/* 80 (second byte of non zero Realtek unique identifier) */
+/* XX (the custom OID number - providing 255 possible custom oids) */
+
+#define OID_RT_PRO_RESET_DUT 0xFF818000
+#define OID_RT_PRO_SET_DATA_RATE 0xFF818001
+#define OID_RT_PRO_START_TEST 0xFF818002
+#define OID_RT_PRO_STOP_TEST 0xFF818003
+#define OID_RT_PRO_SET_PREAMBLE 0xFF818004
+#define OID_RT_PRO_SET_SCRAMBLER 0xFF818005
+#define OID_RT_PRO_SET_FILTER_BB 0xFF818006
+#define OID_RT_PRO_SET_MANUAL_DIVERSITY_BB 0xFF818007
+#define OID_RT_PRO_SET_CHANNEL_DIRECT_CALL 0xFF818008
+#define OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL 0xFF818009
+#define OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL 0xFF81800A
+
+#define OID_RT_PRO_SET_TX_ANTENNA_BB 0xFF81800D
+#define OID_RT_PRO_SET_ANTENNA_BB 0xFF81800E
+#define OID_RT_PRO_SET_CR_SCRAMBLER 0xFF81800F
+#define OID_RT_PRO_SET_CR_NEW_FILTER 0xFF818010
+#define OID_RT_PRO_SET_TX_POWER_CONTROL 0xFF818011
+#define OID_RT_PRO_SET_CR_TX_CONFIG 0xFF818012
+#define OID_RT_PRO_GET_TX_POWER_CONTROL 0xFF818013
+#define OID_RT_PRO_GET_CR_SIGNAL_QUALITY 0xFF818014
+#define OID_RT_PRO_SET_CR_SETPOINT 0xFF818015
+#define OID_RT_PRO_SET_INTEGRATOR 0xFF818016
+#define OID_RT_PRO_SET_SIGNAL_QUALITY 0xFF818017
+#define OID_RT_PRO_GET_INTEGRATOR 0xFF818018
+#define OID_RT_PRO_GET_SIGNAL_QUALITY 0xFF818019
+#define OID_RT_PRO_QUERY_EEPROM_TYPE 0xFF81801A
+#define OID_RT_PRO_WRITE_MAC_ADDRESS 0xFF81801B
+#define OID_RT_PRO_READ_MAC_ADDRESS 0xFF81801C
+#define OID_RT_PRO_WRITE_CIS_DATA 0xFF81801D
+#define OID_RT_PRO_READ_CIS_DATA 0xFF81801E
+#define OID_RT_PRO_WRITE_POWER_CONTROL 0xFF81801F
+#define OID_RT_PRO_READ_POWER_CONTROL 0xFF818020
+#define OID_RT_PRO_WRITE_EEPROM 0xFF818021
+#define OID_RT_PRO_READ_EEPROM 0xFF818022
+#define OID_RT_PRO_RESET_TX_PACKET_SENT 0xFF818023
+#define OID_RT_PRO_QUERY_TX_PACKET_SENT 0xFF818024
+#define OID_RT_PRO_RESET_RX_PACKET_RECEIVED 0xFF818025
+#define OID_RT_PRO_QUERY_RX_PACKET_RECEIVED 0xFF818026
+#define OID_RT_PRO_QUERY_RX_PACKET_CRC32_ERROR 0xFF818027
+#define OID_RT_PRO_QUERY_CURRENT_ADDRESS 0xFF818028
+#define OID_RT_PRO_QUERY_PERMANENT_ADDRESS 0xFF818029
+#define OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS 0xFF81802A
+#define OID_RT_PRO_RECEIVE_PACKET 0xFF81802C
+/* added by Owen on 04/08/03 for Cameo's request */
+#define OID_RT_PRO_WRITE_EEPROM_BYTE 0xFF81802D
+#define OID_RT_PRO_READ_EEPROM_BYTE 0xFF81802E
+#define OID_RT_PRO_SET_MODULATION 0xFF81802F
+/* */
+
+/* Sean */
+#define OID_RT_DRIVER_OPTION 0xFF818080
+#define OID_RT_RF_OFF 0xFF818081
+#define OID_RT_AUTH_STATUS 0xFF818082
+
+/* */
+#define OID_RT_PRO_SET_CONTINUOUS_TX 0xFF81800B
+#define OID_RT_PRO_SET_SINGLE_CARRIER_TX 0xFF81800C
+#define OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX 0xFF81802B
+#define OID_RT_PRO_SET_SINGLE_TONE_TX 0xFF818043
+/* */
+
+
+/* by Owen for RTL8185 Phy Status Report Utility */
+#define OID_RT_UTILITY_false_ALARM_COUNTERS 0xFF818580
+#define OID_RT_UTILITY_SELECT_DEBUG_MODE 0xFF818581
+#define OID_RT_UTILITY_SELECT_SUBCARRIER_NUMBER 0xFF818582
+#define OID_RT_UTILITY_GET_RSSI_STATUS 0xFF818583
+#define OID_RT_UTILITY_GET_FRAME_DETECTION_STATUS 0xFF818584
+#define OID_RT_UTILITY_GET_AGC_AND_FREQUENCY_OFFSET_ESTIMATION_STATUS \
+ 0xFF818585
+#define OID_RT_UTILITY_GET_CHANNEL_ESTIMATION_STATUS 0xFF818586
+/* */
+
+/* by Owen on 03/09/19-03/09/22 for RTL8185 */
+#define OID_RT_WIRELESS_MODE 0xFF818500
+#define OID_RT_SUPPORTED_RATES 0xFF818501
+#define OID_RT_DESIRED_RATES 0xFF818502
+#define OID_RT_WIRELESS_MODE_STARTING_ADHOC 0xFF818503
+/* */
+
+#define OID_RT_GET_CONNECT_STATE 0xFF030001
+#define OID_RT_RESCAN 0xFF030002
+#define OID_RT_SET_KEY_LENGTH 0xFF030003
+#define OID_RT_SET_DEFAULT_KEY_ID 0xFF030004
+
+#define OID_RT_SET_CHANNEL 0xFF010182
+#define OID_RT_SET_SNIFFER_MODE 0xFF010183
+#define OID_RT_GET_SIGNAL_QUALITY 0xFF010184
+#define OID_RT_GET_SMALL_PACKET_CRC 0xFF010185
+#define OID_RT_GET_MIDDLE_PACKET_CRC 0xFF010186
+#define OID_RT_GET_LARGE_PACKET_CRC 0xFF010187
+#define OID_RT_GET_TX_RETRY 0xFF010188
+#define OID_RT_GET_RX_RETRY 0xFF010189
+#define OID_RT_PRO_SET_FW_DIG_STATE 0xFF01018A/* S */
+#define OID_RT_PRO_SET_FW_RA_STATE 0xFF01018B/* S */
+
+#define OID_RT_GET_RX_TOTAL_PACKET 0xFF010190
+#define OID_RT_GET_TX_BEACON_OK 0xFF010191
+#define OID_RT_GET_TX_BEACON_ERR 0xFF010192
+#define OID_RT_GET_RX_ICV_ERR 0xFF010193
+#define OID_RT_SET_ENCRYPTION_ALGORITHM 0xFF010194
+#define OID_RT_SET_NO_AUTO_RESCAN 0xFF010195
+#define OID_RT_GET_PREAMBLE_MODE 0xFF010196
+#define OID_RT_GET_DRIVER_UP_DELTA_TIME 0xFF010197
+#define OID_RT_GET_AP_IP 0xFF010198
+#define OID_RT_GET_CHANNELPLAN 0xFF010199
+#define OID_RT_SET_PREAMBLE_MODE 0xFF01019A
+#define OID_RT_SET_BCN_INTVL 0xFF01019B
+#define OID_RT_GET_RF_VENDER 0xFF01019C
+#define OID_RT_DEDICATE_PROBE 0xFF01019D
+#define OID_RT_PRO_RX_FILTER_PATTERN 0xFF01019E
+
+#define OID_RT_GET_DCST_CURRENT_THRESHOLD 0xFF01019F
+
+#define OID_RT_GET_CCA_ERR 0xFF0101A0
+#define OID_RT_GET_CCA_UPGRADE_THRESHOLD 0xFF0101A1
+#define OID_RT_GET_CCA_FALLBACK_THRESHOLD 0xFF0101A2
+
+#define OID_RT_GET_CCA_UPGRADE_EVALUATE_TIMES 0xFF0101A3
+#define OID_RT_GET_CCA_FALLBACK_EVALUATE_TIMES 0xFF0101A4
+
+/* by Owen on 03/31/03 for Cameo's request */
+#define OID_RT_SET_RATE_ADAPTIVE 0xFF0101A5
+/* */
+#define OID_RT_GET_DCST_EVALUATE_PERIOD 0xFF0101A5
+#define OID_RT_GET_DCST_TIME_UNIT_INDEX 0xFF0101A6
+#define OID_RT_GET_TOTAL_TX_BYTES 0xFF0101A7
+#define OID_RT_GET_TOTAL_RX_BYTES 0xFF0101A8
+#define OID_RT_CURRENT_TX_POWER_LEVEL 0xFF0101A9
+#define OID_RT_GET_ENC_KEY_MISMATCH_COUNT 0xFF0101AA
+#define OID_RT_GET_ENC_KEY_MATCH_COUNT 0xFF0101AB
+#define OID_RT_GET_CHANNEL 0xFF0101AC
+
+#define OID_RT_SET_CHANNELPLAN 0xFF0101AD
+#define OID_RT_GET_HARDWARE_RADIO_OFF 0xFF0101AE
+#define OID_RT_CHANNELPLAN_BY_COUNTRY 0xFF0101AF
+#define OID_RT_SCAN_AVAILABLE_BSSID 0xFF0101B0
+#define OID_RT_GET_HARDWARE_VERSION 0xFF0101B1
+#define OID_RT_GET_IS_ROAMING 0xFF0101B2
+#define OID_RT_GET_IS_PRIVACY 0xFF0101B3
+#define OID_RT_GET_KEY_MISMATCH 0xFF0101B4
+#define OID_RT_SET_RSSI_ROAM_TRAFFIC_TH 0xFF0101B5
+#define OID_RT_SET_RSSI_ROAM_SIGNAL_TH 0xFF0101B6
+#define OID_RT_RESET_LOG 0xFF0101B7
+#define OID_RT_GET_LOG 0xFF0101B8
+#define OID_RT_SET_INDICATE_HIDDEN_AP 0xFF0101B9
+#define OID_RT_GET_HEADER_FAIL 0xFF0101BA
+#define OID_RT_SUPPORTED_WIRELESS_MODE 0xFF0101BB
+#define OID_RT_GET_CHANNEL_LIST 0xFF0101BC
+#define OID_RT_GET_SCAN_IN_PROGRESS 0xFF0101BD
+#define OID_RT_GET_TX_INFO 0xFF0101BE
+#define OID_RT_RF_READ_WRITE_OFFSET 0xFF0101BF
+#define OID_RT_RF_READ_WRITE 0xFF0101C0
+
+/* For Netgear request. 2005.01.13, by rcnjko. */
+#define OID_RT_FORCED_DATA_RATE 0xFF0101C1
+#define OID_RT_WIRELESS_MODE_FOR_SCAN_LIST 0xFF0101C2
+/* For Netgear request. 2005.02.17, by rcnjko. */
+#define OID_RT_GET_BSS_WIRELESS_MODE 0xFF0101C3
+/* For AZ project. 2005.06.27, by rcnjko. */
+#define OID_RT_SCAN_WITH_MAGIC_PACKET 0xFF0101C4
+
+/* Vincent 8185MP */
+#define OID_RT_PRO_RX_FILTER 0xFF0111C0
+
+#define OID_CE_USB_WRITE_REGISTRY 0xFF0111C1
+#define OID_CE_USB_READ_REGISTRY 0xFF0111C2
+
+#define OID_RT_PRO_SET_INITIAL_GA 0xFF0111C3
+#define OID_RT_PRO_SET_BB_RF_STANDBY_MODE 0xFF0111C4
+#define OID_RT_PRO_SET_BB_RF_SHUTDOWN_MODE 0xFF0111C5
+#define OID_RT_PRO_SET_TX_CHARGE_PUMP 0xFF0111C6
+#define OID_RT_PRO_SET_RX_CHARGE_PUMP 0xFF0111C7
+#define OID_RT_PRO_RF_WRITE_REGISTRY 0xFF0111C8
+#define OID_RT_PRO_RF_READ_REGISTRY 0xFF0111C9
+#define OID_RT_PRO_QUERY_RF_TYPE 0xFF0111CA
+
+/* AP OID */
+#define OID_RT_AP_GET_ASSOCIATED_STATION_LIST 0xFF010300
+#define OID_RT_AP_GET_CURRENT_TIME_STAMP 0xFF010301
+#define OID_RT_AP_SWITCH_INTO_AP_MODE 0xFF010302
+#define OID_RT_AP_SET_DTIM_PERIOD 0xFF010303
+/* Determine if driver supports AP mode. */
+#define OID_RT_AP_SUPPORTED 0xFF010304
+/* Set WPA-PSK passphrase into authenticator. */
+#define OID_RT_AP_SET_PASSPHRASE 0xFF010305
+
+/* 8187MP. 2004.09.06, by rcnjko. */
+#define OID_RT_PRO8187_WI_POLL 0xFF818780
+#define OID_RT_PRO_WRITE_BB_REG 0xFF818781
+#define OID_RT_PRO_READ_BB_REG 0xFF818782
+#define OID_RT_PRO_WRITE_RF_REG 0xFF818783
+#define OID_RT_PRO_READ_RF_REG 0xFF818784
+
+/* Meeting House. added by Annie, 2005-07-20. */
+#define OID_RT_MH_VENDER_ID 0xFFEDC100
+
+/* 8711 MP OID added 20051230. */
+#define OID_RT_PRO8711_JOIN_BSS 0xFF871100/* S */
+
+#define OID_RT_PRO_READ_REGISTER 0xFF871101 /* Q */
+#define OID_RT_PRO_WRITE_REGISTER 0xFF871102 /* S */
+
+#define OID_RT_PRO_BURST_READ_REGISTER 0xFF871103 /* Q */
+#define OID_RT_PRO_BURST_WRITE_REGISTER 0xFF871104 /* S */
+
+#define OID_RT_PRO_WRITE_TXCMD 0xFF871105 /* S */
+
+#define OID_RT_PRO_READ16_EEPROM 0xFF871106 /* Q */
+#define OID_RT_PRO_WRITE16_EEPROM 0xFF871107 /* S */
+
+#define OID_RT_PRO_H2C_SET_COMMAND 0xFF871108 /* S */
+#define OID_RT_PRO_H2C_QUERY_RESULT 0xFF871109 /* Q */
+
+#define OID_RT_PRO8711_WI_POLL 0xFF87110A /* Q */
+#define OID_RT_PRO8711_PKT_LOSS 0xFF87110B /* Q */
+#define OID_RT_RD_ATTRIB_MEM 0xFF87110C/* Q */
+#define OID_RT_WR_ATTRIB_MEM 0xFF87110D/* S */
+
+
+/* Method 2 for H2C/C2H */
+#define OID_RT_PRO_H2C_CMD_MODE 0xFF871110 /* S */
+#define OID_RT_PRO_H2C_CMD_RSP_MODE 0xFF871111 /* Q */
+#define OID_RT_PRO_H2C_CMD_EVENT_MODE 0xFF871112 /* S */
+#define OID_RT_PRO_WAIT_C2H_EVENT 0xFF871113 /* Q */
+#define OID_RT_PRO_RW_ACCESS_PROTOCOL_TEST 0xFF871114/* Q */
+
+#define OID_RT_PRO_SCSI_ACCESS_TEST 0xFF871115 /* Q, S */
+
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_OUT 0xFF871116 /* S */
+#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_IN 0xFF871117 /* Q,S */
+#define OID_RT_RRO_RX_PKT_VIA_IOCTRL 0xFF871118 /* Q */
+#define OID_RT_RRO_RX_PKTARRAY_VIA_IOCTRL 0xFF871119 /* Q */
+
+#define OID_RT_RPO_SET_PWRMGT_TEST 0xFF87111A /* S */
+#define OID_RT_PRO_QRY_PWRMGT_TEST 0XFF87111B /* Q */
+#define OID_RT_RPO_ASYNC_RWIO_TEST 0xFF87111C /* S */
+#define OID_RT_RPO_ASYNC_RWIO_POLL 0xFF87111D /* Q */
+#define OID_RT_PRO_SET_RF_INTFS 0xFF87111E /* S */
+#define OID_RT_POLL_RX_STATUS 0xFF87111F /* Q */
+
+#define OID_RT_PRO_CFG_DEBUG_MESSAGE 0xFF871120 /* Q,S */
+#define OID_RT_PRO_SET_DATA_RATE_EX 0xFF871121/* S */
+#define OID_RT_PRO_SET_BASIC_RATE 0xFF871122/* S */
+#define OID_RT_PRO_READ_TSSI 0xFF871123/* S */
+#define OID_RT_PRO_SET_POWER_TRACKING 0xFF871124/* S */
+
+
+#define OID_RT_PRO_QRY_PWRSTATE 0xFF871150 /* Q */
+#define OID_RT_PRO_SET_PWRSTATE 0xFF871151 /* S */
+
+/* Method 2 , using workitem */
+#define OID_RT_SET_READ_REG 0xFF871181 /* S */
+#define OID_RT_SET_WRITE_REG 0xFF871182 /* S */
+#define OID_RT_SET_BURST_READ_REG 0xFF871183 /* S */
+#define OID_RT_SET_BURST_WRITE_REG 0xFF871184 /* S */
+#define OID_RT_SET_WRITE_TXCMD 0xFF871185 /* S */
+#define OID_RT_SET_READ16_EEPROM 0xFF871186 /* S */
+#define OID_RT_SET_WRITE16_EEPROM 0xFF871187 /* S */
+#define OID_RT_QRY_POLL_WKITEM 0xFF871188 /* Q */
+
+/* For SDIO INTERFACE only */
+#define OID_RT_PRO_SYNCPAGERW_SRAM 0xFF8711A0 /* Q, S */
+#define OID_RT_PRO_871X_DRV_EXT 0xFF8711A1
+
+/* For USB INTERFACE only */
+#define OID_RT_PRO_USB_VENDOR_REQ 0xFF8711B0 /* Q, S */
+#define OID_RT_PRO_SCSI_AUTO_TEST 0xFF8711B1 /* S */
+#define OID_RT_PRO_USB_MAC_AC_FIFO_WRITE 0xFF8711B2 /* S */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_READ 0xFF8711B3 /* Q */
+#define OID_RT_PRO_USB_MAC_RX_FIFO_POLLING 0xFF8711B4 /* Q */
+
+#define OID_RT_PRO_H2C_SET_RATE_TABLE 0xFF8711FB /* S */
+#define OID_RT_PRO_H2C_GET_RATE_TABLE 0xFF8711FC /* S */
+#define OID_RT_PRO_H2C_C2H_LBK_TEST 0xFF8711FE
+
+#define OID_RT_PRO_ENCRYPTION_CTRL 0xFF871200 /* Q, S */
+#define OID_RT_PRO_ADD_STA_INFO 0xFF871201 /* S */
+#define OID_RT_PRO_DELE_STA_INFO 0xFF871202 /* S */
+#define OID_RT_PRO_QUERY_DR_VARIABLE 0xFF871203 /* Q */
+
+#define OID_RT_PRO_RX_PACKET_TYPE 0xFF871204 /* Q, S */
+
+#define OID_RT_PRO_READ_EFUSE 0xFF871205 /* Q */
+#define OID_RT_PRO_WRITE_EFUSE 0xFF871206 /* S */
+#define OID_RT_PRO_RW_EFUSE_PGPKT 0xFF871207 /* Q, S */
+#define OID_RT_GET_EFUSE_CURRENT_SIZE 0xFF871208 /* Q */
+
+#define OID_RT_SET_BANDWIDTH 0xFF871209 /* S */
+#define OID_RT_SET_CRYSTAL_CAP 0xFF87120A /* S */
+
+#define OID_RT_SET_RX_PACKET_TYPE 0xFF87120B /* S */
+
+#define OID_RT_GET_EFUSE_MAX_SIZE 0xFF87120C /* Q */
+
+#define OID_RT_PRO_SET_TX_AGC_OFFSET 0xFF87120D /* S */
+
+#define OID_RT_PRO_SET_PKT_TEST_MODE 0xFF87120E /* S */
+
+#define OID_RT_PRO_FOR_EVM_TEST_SETTING 0xFF87120F /* S */
+
+#define OID_RT_PRO_GET_THERMAL_METER 0xFF871210 /* Q */
+
+#define OID_RT_RESET_PHY_RX_PACKET_COUNT 0xFF871211 /* S */
+#define OID_RT_GET_PHY_RX_PACKET_RECEIVED 0xFF871212 /* Q */
+#define OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR 0xFF871213 /* Q */
+
+#define OID_RT_SET_POWER_DOWN 0xFF871214 /* S */
+
+#define OID_RT_GET_POWER_MODE 0xFF871215 /* Q */
+
+#define OID_RT_PRO_EFUSE 0xFF871216 /* Q, S */
+#define OID_RT_PRO_EFUSE_MAP 0xFF871217 /* Q, S */
+
+#endif /* ifndef __CUSTOM_OID_H */
diff --git a/drivers/staging/rtl8188eu/include/nic_spec.h b/drivers/staging/rtl8188eu/include/nic_spec.h
new file mode 100644
index 00000000000..d42244788ca
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/nic_spec.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __NIC_SPEC_H__
+#define __NIC_SPEC_H__
+
+#define RTL8711_MCTRL_ (0x20000)
+#define RTL8711_UART_ (0x30000)
+#define RTL8711_TIMER_ (0x40000)
+#define RTL8711_FINT_ (0x50000)
+#define RTL8711_HINT_ (0x50000)
+#define RTL8711_GPIO_ (0x60000)
+#define RTL8711_WLANCTRL_ (0x200000)
+#define RTL8711_WLANFF_ (0xe00000)
+#define RTL8711_HCICTRL_ (0x600000)
+#define RTL8711_SYSCFG_ (0x620000)
+#define RTL8711_SYSCTRL_ (0x620000)
+#define RTL8711_MCCTRL_ (0x020000)
+
+
+#include <rtl8711_regdef.h>
+
+#include <rtl8711_bitdef.h>
+
+
+#endif /* __RTL8711_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
new file mode 100644
index 00000000000..2bfe7284192
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -0,0 +1,1198 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __HALDMOUTSRC_H__
+#define __HALDMOUTSRC_H__
+
+/* Definition */
+/* Define all team support ability. */
+
+/* Define for all teams. Please Define the constant in your precomp header. */
+
+/* define DM_ODM_SUPPORT_AP 0 */
+/* define DM_ODM_SUPPORT_ADSL 0 */
+/* define DM_ODM_SUPPORT_CE 0 */
+/* define DM_ODM_SUPPORT_MP 1 */
+
+/* Define ODM SW team support flag. */
+
+/* Antenna Switch Relative Definition. */
+
+/* Add new function SwAntDivCheck8192C(). */
+/* This is the main function of Antenna diversity function before link. */
+/* Mainly, it just retains last scan result and scan again. */
+/* After that, it compares the scan result to see which one gets better
+ * RSSI. It selects antenna with better receiving power and returns better
+ * scan result. */
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+/* 3 Tx Power Tracking */
+/* 3============================================================ */
+#define DPK_DELTA_MAPPING_NUM 13
+#define index_mapping_HP_NUM 15
+
+
+/* */
+/* 3 PSD Handler */
+/* 3============================================================ */
+
+#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
+#define MODE_40M 0 /* 0:20M, 1:40M */
+#define PSD_TH2 3
+#define PSD_CHM 20 /* Minimum channel number for BT AFH */
+#define SIR_STEP_SIZE 3
+#define Smooth_Size_1 5
+#define Smooth_TH_1 3
+#define Smooth_Size_2 10
+#define Smooth_TH_2 4
+#define Smooth_Size_3 20
+#define Smooth_TH_3 4
+#define Smooth_Step_Size 5
+#define Adaptive_SIR 1
+#define PSD_RESCAN 4
+#define PSD_SCAN_INTERVAL 700 /* ms */
+
+/* 8723A High Power IGI Setting */
+#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
+#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
+#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
+
+/* LPS define */
+#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
+#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
+#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
+#define RSSI_OFFSET_DIG 0x05;
+
+/* ANT Test */
+#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
+#define ANTTESTA 0x01 /* Ant A will be Testing */
+#define ANTTESTB 0x02 /* Ant B will be testing */
+
+/* structure and define */
+
+/* Add for AP/ADSLpseudo DM structuer requirement. */
+/* We need to remove to other position??? */
+struct rtl8192cd_priv {
+ u8 temp;
+};
+
+struct rtw_dig {
+ u8 Dig_Enable_Flag;
+ u8 Dig_Ext_Port_Stage;
+
+ int RssiLowThresh;
+ int RssiHighThresh;
+
+ u32 FALowThresh;
+ u32 FAHighThresh;
+
+ u8 CurSTAConnectState;
+ u8 PreSTAConnectState;
+ u8 CurMultiSTAConnectState;
+
+ u8 PreIGValue;
+ u8 CurIGValue;
+ u8 BackupIGValue;
+
+ s8 BackoffVal;
+ s8 BackoffVal_range_max;
+ s8 BackoffVal_range_min;
+ u8 rx_gain_range_max;
+ u8 rx_gain_range_min;
+ u8 Rssi_val_min;
+
+ u8 PreCCK_CCAThres;
+ u8 CurCCK_CCAThres;
+ u8 PreCCKPDState;
+ u8 CurCCKPDState;
+
+ u8 LargeFAHit;
+ u8 ForbiddenIGI;
+ u32 Recover_cnt;
+
+ u8 DIG_Dynamic_MIN_0;
+ u8 DIG_Dynamic_MIN_1;
+ bool bMediaConnect_0;
+ bool bMediaConnect_1;
+
+ u32 AntDiv_RSSI_max;
+ u32 RSSI_max;
+};
+
+struct rtl_ps {
+ u8 PreCCAState;
+ u8 CurCCAState;
+
+ u8 PreRFState;
+ u8 CurRFState;
+
+ int Rssi_val_min;
+
+ u8 initialize;
+ u32 Reg874,RegC70,Reg85C,RegA74;
+
+};
+
+struct false_alarm_stats {
+ u32 Cnt_Parity_Fail;
+ u32 Cnt_Rate_Illegal;
+ u32 Cnt_Crc8_fail;
+ u32 Cnt_Mcs_fail;
+ u32 Cnt_Ofdm_fail;
+ u32 Cnt_Cck_fail;
+ u32 Cnt_all;
+ u32 Cnt_Fast_Fsync;
+ u32 Cnt_SB_Search_fail;
+ u32 Cnt_OFDM_CCA;
+ u32 Cnt_CCK_CCA;
+ u32 Cnt_CCA_all;
+ u32 Cnt_BW_USC; /* Gary */
+ u32 Cnt_BW_LSC; /* Gary */
+};
+
+struct dyn_primary_cca {
+ u8 PriCCA_flag;
+ u8 intf_flag;
+ u8 intf_type;
+ u8 DupRTS_flag;
+ u8 Monitor_flag;
+};
+
+struct rx_hpc {
+ u8 RXHP_flag;
+ u8 PSD_func_trigger;
+ u8 PSD_bitmap_RXHP[80];
+ u8 Pre_IGI;
+ u8 Cur_IGI;
+ u8 Pre_pw_th;
+ u8 Cur_pw_th;
+ bool First_time_enter;
+ bool RXHP_enable;
+ u8 TP_Mode;
+ struct timer_list PSDTimer;
+};
+
+#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
+#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
+
+/* This indicates two different steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to
+ * the signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
+ * SWAW_STEP_PEAK with original RSSI to determine if it is necessary to
+ * switch antenna. */
+
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+#define TP_MODE 0
+#define RSSI_MODE 1
+#define TRAFFIC_LOW 0
+#define TRAFFIC_HIGH 1
+
+struct sw_ant_switch {
+ u8 try_flag;
+ s32 PreRSSI;
+ u8 CurAntenna;
+ u8 PreAntenna;
+ u8 RSSI_Trying;
+ u8 TestMode;
+ u8 bTriggerAntennaSwitch;
+ u8 SelectAntennaMap;
+ u8 RSSI_target;
+
+ /* Before link Antenna Switch check */
+ u8 SWAS_NoLink_State;
+ u32 SWAS_NoLink_BK_Reg860;
+ bool ANTA_ON; /* To indicate Ant A is or not */
+ bool ANTB_ON; /* To indicate Ant B is on or not */
+
+ s32 RSSI_sum_A;
+ s32 RSSI_sum_B;
+ s32 RSSI_cnt_A;
+ s32 RSSI_cnt_B;
+ u64 lastTxOkCnt;
+ u64 lastRxOkCnt;
+ u64 TXByteCnt_A;
+ u64 TXByteCnt_B;
+ u64 RXByteCnt_A;
+ u64 RXByteCnt_B;
+ u8 TrafficLoad;
+ struct timer_list SwAntennaSwitchTimer;
+ /* Hybrid Antenna Diversity */
+ u32 CCK_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 CCK_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 OFDM_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 OFDM_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
+ u32 RSSI_Ant1_Sum[ASSOCIATE_ENTRY_NUM];
+ u32 RSSI_Ant2_Sum[ASSOCIATE_ENTRY_NUM];
+ u8 TxAnt[ASSOCIATE_ENTRY_NUM];
+ u8 TargetSTA;
+ u8 antsel;
+ u8 RxIdleAnt;
+};
+
+struct edca_turbo {
+ bool bCurrentTurboEDCA;
+ bool bIsCurRDLState;
+ u32 prv_traffic_idx; /* edca turbo */
+};
+
+struct odm_rate_adapt {
+ u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
+ u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
+ u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
+ u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
+ u32 LastRATR; /* RATR Register Content */
+};
+
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM_MAX 10
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+
+#define AVG_THERMAL_NUM 8
+#define IQK_Matrix_REG_NUM 8
+#define IQK_Matrix_Settings_NUM 1+24+21
+
+#define DM_Type_ByFWi 0
+#define DM_Type_ByDriver 1
+
+/* Declare for common info */
+
+#define MAX_PATH_NUM_92CS 2
+
+struct odm_phy_status_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[MAX_PATH_NUM_92CS]; /* EVM */
+ u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
+ s8 RxPower; /* in dBm Translate from PWdB */
+ s8 RecvSignalPower;/* Real power in dBm for this packet, no
+ * beautification and aggregation. Keep this raw
+ * info to be used for the other procedures. */
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
+ u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
+};
+
+struct odm_phy_dbg_info {
+ /* ODM Write,debug info */
+ s8 RxSNRdB[MAX_PATH_NUM_92CS];
+ u64 NumQryPhyStatus;
+ u64 NumQryPhyStatusCCK;
+ u64 NumQryPhyStatusOFDM;
+ /* Others */
+ s32 RxEVM[MAX_PATH_NUM_92CS];
+};
+
+struct odm_per_pkt_info {
+ s8 Rate;
+ u8 StationID;
+ bool bPacketMatchBSSID;
+ bool bPacketToSelf;
+ bool bPacketBeacon;
+};
+
+struct odm_mac_status_info {
+ u8 test;
+};
+
+enum odm_ability {
+ /* BB Team */
+ ODM_DIG = 0x00000001,
+ ODM_HIGH_POWER = 0x00000002,
+ ODM_CCK_CCA_TH = 0x00000004,
+ ODM_FA_STATISTICS = 0x00000008,
+ ODM_RAMASK = 0x00000010,
+ ODM_RSSI_MONITOR = 0x00000020,
+ ODM_SW_ANTDIV = 0x00000040,
+ ODM_HW_ANTDIV = 0x00000080,
+ ODM_BB_PWRSV = 0x00000100,
+ ODM_2TPATHDIV = 0x00000200,
+ ODM_1TPATHDIV = 0x00000400,
+ ODM_PSD2AFH = 0x00000800
+};
+
+/* 2011/20/20 MH For MP driver RT_WLAN_STA = struct sta_info */
+/* Please declare below ODM relative info in your STA info structure. */
+
+struct odm_sta_info {
+ /* Driver Write */
+ bool bUsed; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+};
+
+/* 2011/10/20 MH Define Common info enum for all team. */
+
+enum odm_common_info_def {
+ /* Fixed value: */
+
+ /* HOOK BEFORE REG INIT----------- */
+ ODM_CMNINFO_PLATFORM = 0,
+ ODM_CMNINFO_ABILITY, /* ODM_ABILITY_E */
+ ODM_CMNINFO_INTERFACE, /* ODM_INTERFACE_E */
+ ODM_CMNINFO_MP_TEST_CHIP,
+ ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */
+ ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */
+ ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
+ ODM_CMNINFO_RF_TYPE, /* ODM_RF_PATH_E or ODM_RF_TYPE_E? */
+ ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
+ ODM_CMNINFO_EXT_LNA, /* true */
+ ODM_CMNINFO_EXT_PA,
+ ODM_CMNINFO_EXT_TRSW,
+ ODM_CMNINFO_PATCH_ID, /* CUSTOMER ID */
+ ODM_CMNINFO_BINHCT_TEST,
+ ODM_CMNINFO_BWIFI_TEST,
+ ODM_CMNINFO_SMART_CONCURRENT,
+ /* HOOK BEFORE REG INIT----------- */
+
+ /* Dynamic value: */
+/* POINTER REFERENCE----------- */
+ ODM_CMNINFO_MAC_PHY_MODE, /* ODM_MAC_PHY_MODE_E */
+ ODM_CMNINFO_TX_UNI,
+ ODM_CMNINFO_RX_UNI,
+ ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
+ ODM_CMNINFO_BAND, /* ODM_BAND_TYPE_E */
+ ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
+ ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
+ ODM_CMNINFO_BW, /* ODM_BW_E */
+ ODM_CMNINFO_CHNL,
+
+ ODM_CMNINFO_DMSP_GET_VALUE,
+ ODM_CMNINFO_BUDDY_ADAPTOR,
+ ODM_CMNINFO_DMSP_IS_MASTER,
+ ODM_CMNINFO_SCAN,
+ ODM_CMNINFO_POWER_SAVING,
+ ODM_CMNINFO_ONE_PATH_CCA, /* ODM_CCA_PATH_E */
+ ODM_CMNINFO_DRV_STOP,
+ ODM_CMNINFO_PNP_IN,
+ ODM_CMNINFO_INIT_ON,
+ ODM_CMNINFO_ANT_TEST,
+ ODM_CMNINFO_NET_CLOSED,
+ ODM_CMNINFO_MP_MODE,
+/* POINTER REFERENCE----------- */
+
+/* CALL BY VALUE------------- */
+ ODM_CMNINFO_WIFI_DIRECT,
+ ODM_CMNINFO_WIFI_DISPLAY,
+ ODM_CMNINFO_LINK,
+ ODM_CMNINFO_RSSI_MIN,
+ ODM_CMNINFO_DBG_COMP, /* u64 */
+ ODM_CMNINFO_DBG_LEVEL, /* u32 */
+ ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
+ ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
+ ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
+ ODM_CMNINFO_BT_DISABLED,
+ ODM_CMNINFO_BT_OPERATION,
+ ODM_CMNINFO_BT_DIG,
+ ODM_CMNINFO_BT_BUSY, /* Check Bt is using or not */
+ ODM_CMNINFO_BT_DISABLE_EDCA,
+/* CALL BY VALUE-------------*/
+
+ /* Dynamic ptr array hook itms. */
+ ODM_CMNINFO_STA_STATUS,
+ ODM_CMNINFO_PHY_STATUS,
+ ODM_CMNINFO_MAC_STATUS,
+ ODM_CMNINFO_MAX,
+};
+
+/* 2011/10/20 MH Define ODM support ability. ODM_CMNINFO_ABILITY */
+
+enum odm_ability_def {
+ /* BB ODM section BIT 0-15 */
+ ODM_BB_DIG = BIT0,
+ ODM_BB_RA_MASK = BIT1,
+ ODM_BB_DYNAMIC_TXPWR = BIT2,
+ ODM_BB_FA_CNT = BIT3,
+ ODM_BB_RSSI_MONITOR = BIT4,
+ ODM_BB_CCK_PD = BIT5,
+ ODM_BB_ANT_DIV = BIT6,
+ ODM_BB_PWR_SAVE = BIT7,
+ ODM_BB_PWR_TRA = BIT8,
+ ODM_BB_RATE_ADAPTIVE = BIT9,
+ ODM_BB_PATH_DIV = BIT10,
+ ODM_BB_PSD = BIT11,
+ ODM_BB_RXHP = BIT12,
+
+ /* MAC DM section BIT 16-23 */
+ ODM_MAC_EDCA_TURBO = BIT16,
+ ODM_MAC_EARLY_MODE = BIT17,
+
+ /* RF ODM section BIT 24-31 */
+ ODM_RF_TX_PWR_TRACK = BIT24,
+ ODM_RF_RX_GAIN_TRACK = BIT25,
+ ODM_RF_CALIBRATION = BIT26,
+};
+
+/* ODM_CMNINFO_INTERFACE */
+enum odm_interface_def {
+ ODM_ITRF_PCIE = 0x1,
+ ODM_ITRF_USB = 0x2,
+ ODM_ITRF_SDIO = 0x4,
+ ODM_ITRF_ALL = 0x7,
+};
+
+/* ODM_CMNINFO_IC_TYPE */
+enum odm_ic_type {
+ ODM_RTL8192S = BIT0,
+ ODM_RTL8192C = BIT1,
+ ODM_RTL8192D = BIT2,
+ ODM_RTL8723A = BIT3,
+ ODM_RTL8188E = BIT4,
+ ODM_RTL8812 = BIT5,
+ ODM_RTL8821 = BIT6,
+};
+
+#define ODM_IC_11N_SERIES \
+ (ODM_RTL8192S | ODM_RTL8192C | ODM_RTL8192D | \
+ ODM_RTL8723A | ODM_RTL8188E)
+#define ODM_IC_11AC_SERIES (ODM_RTL8812)
+
+/* ODM_CMNINFO_CUT_VER */
+enum odm_cut_version {
+ ODM_CUT_A = 1,
+ ODM_CUT_B = 2,
+ ODM_CUT_C = 3,
+ ODM_CUT_D = 4,
+ ODM_CUT_E = 5,
+ ODM_CUT_F = 6,
+ ODM_CUT_TEST = 7,
+};
+
+/* ODM_CMNINFO_FAB_VER */
+enum odm_fab_Version {
+ ODM_TSMC = 0,
+ ODM_UMC = 1,
+};
+
+/* ODM_CMNINFO_RF_TYPE */
+/* For example 1T2R (A+AB = BIT0|BIT4|BIT5) */
+enum odm_rf_path {
+ ODM_RF_TX_A = BIT0,
+ ODM_RF_TX_B = BIT1,
+ ODM_RF_TX_C = BIT2,
+ ODM_RF_TX_D = BIT3,
+ ODM_RF_RX_A = BIT4,
+ ODM_RF_RX_B = BIT5,
+ ODM_RF_RX_C = BIT6,
+ ODM_RF_RX_D = BIT7,
+};
+
+enum odm_rf_type {
+ ODM_1T1R = 0,
+ ODM_1T2R = 1,
+ ODM_2T2R = 2,
+ ODM_2T3R = 3,
+ ODM_2T4R = 4,
+ ODM_3T3R = 5,
+ ODM_3T4R = 6,
+ ODM_4T4R = 7,
+};
+
+/* ODM Dynamic common info value definition */
+
+enum odm_mac_phy_mode {
+ ODM_SMSP = 0,
+ ODM_DMSP = 1,
+ ODM_DMDP = 2,
+};
+
+enum odm_bt_coexist {
+ ODM_BT_BUSY = 1,
+ ODM_BT_ON = 2,
+ ODM_BT_OFF = 3,
+ ODM_BT_NONE = 4,
+};
+
+/* ODM_CMNINFO_OP_MODE */
+enum odm_operation_mode {
+ ODM_NO_LINK = BIT0,
+ ODM_LINK = BIT1,
+ ODM_SCAN = BIT2,
+ ODM_POWERSAVE = BIT3,
+ ODM_AP_MODE = BIT4,
+ ODM_CLIENT_MODE = BIT5,
+ ODM_AD_HOC = BIT6,
+ ODM_WIFI_DIRECT = BIT7,
+ ODM_WIFI_DISPLAY = BIT8,
+};
+
+/* ODM_CMNINFO_WM_MODE */
+enum odm_wireless_mode {
+ ODM_WM_UNKNOW = 0x0,
+ ODM_WM_B = BIT0,
+ ODM_WM_G = BIT1,
+ ODM_WM_A = BIT2,
+ ODM_WM_N24G = BIT3,
+ ODM_WM_N5G = BIT4,
+ ODM_WM_AUTO = BIT5,
+ ODM_WM_AC = BIT6,
+};
+
+/* ODM_CMNINFO_BAND */
+enum odm_band_type {
+ ODM_BAND_2_4G = BIT0,
+ ODM_BAND_5G = BIT1,
+};
+
+/* ODM_CMNINFO_SEC_CHNL_OFFSET */
+enum odm_sec_chnl_offset {
+ ODM_DONT_CARE = 0,
+ ODM_BELOW = 1,
+ ODM_ABOVE = 2
+};
+
+/* ODM_CMNINFO_SEC_MODE */
+enum odm_security {
+ ODM_SEC_OPEN = 0,
+ ODM_SEC_WEP40 = 1,
+ ODM_SEC_TKIP = 2,
+ ODM_SEC_RESERVE = 3,
+ ODM_SEC_AESCCMP = 4,
+ ODM_SEC_WEP104 = 5,
+ ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
+ ODM_SEC_SMS4 = 7,
+};
+
+/* ODM_CMNINFO_BW */
+enum odm_bw {
+ ODM_BW20M = 0,
+ ODM_BW40M = 1,
+ ODM_BW80M = 2,
+ ODM_BW160M = 3,
+ ODM_BW10M = 4,
+};
+
+/* ODM_CMNINFO_BOARD_TYPE */
+enum odm_board_type {
+ ODM_BOARD_NORMAL = 0,
+ ODM_BOARD_HIGHPWR = 1,
+ ODM_BOARD_MINICARD = 2,
+ ODM_BOARD_SLIM = 3,
+ ODM_BOARD_COMBO = 4,
+};
+
+/* ODM_CMNINFO_ONE_PATH_CCA */
+enum odm_cca_path {
+ ODM_CCA_2R = 0,
+ ODM_CCA_1R_A = 1,
+ ODM_CCA_1R_B = 2,
+};
+
+struct odm_ra_info {
+ u8 RateID;
+ u32 RateMask;
+ u32 RAUseRate;
+ u8 RateSGI;
+ u8 RssiStaRA;
+ u8 PreRssiStaRA;
+ u8 SGIEnable;
+ u8 DecisionRate;
+ u8 PreRate;
+ u8 HighestRate;
+ u8 LowestRate;
+ u32 NscUp;
+ u32 NscDown;
+ u16 RTY[5];
+ u32 TOTAL;
+ u16 DROP;
+ u8 Active;
+ u16 RptTime;
+ u8 RAWaitingCounter;
+ u8 RAPendingCounter;
+ u8 PTActive; /* on or off */
+ u8 PTTryState; /* 0 trying state, 1 for decision state */
+ u8 PTStage; /* 0~6 */
+ u8 PTStopCount; /* Stop PT counter */
+ u8 PTPreRate; /* if rate change do PT */
+ u8 PTPreRssi; /* if RSSI change 5% do PT */
+ u8 PTModeSS; /* decide whitch rate should do PT */
+ u8 RAstage; /* StageRA, decide how many times RA will be done
+ * between PT */
+ u8 PTSmoothFactor;
+};
+
+struct ijk_matrix_regs_set {
+ bool bIQKDone;
+ s32 Value[1][IQK_Matrix_REG_NUM];
+};
+
+struct odm_rf_cal {
+ /* for tx power tracking */
+ u32 RegA24; /* for TempCCK */
+ s32 RegE94;
+ s32 RegE9C;
+ s32 RegEB4;
+ s32 RegEBC;
+
+ u8 TXPowercount;
+ bool bTXPowerTrackingInit;
+ bool bTXPowerTracking;
+ u8 TxPowerTrackControl; /* for mp mode, turn off txpwrtracking
+ * as default */
+ u8 TM_Trigger;
+ u8 InternalPA5G[2]; /* pathA / pathB */
+
+ u8 ThermalMeter[2]; /* ThermalMeter, index 0 for RFIC0,
+ * and 1 for RFIC1 */
+ u8 ThermalValue;
+ u8 ThermalValue_LCK;
+ u8 ThermalValue_IQK;
+ u8 ThermalValue_DPK;
+ u8 ThermalValue_AVG[AVG_THERMAL_NUM];
+ u8 ThermalValue_AVG_index;
+ u8 ThermalValue_RxGain;
+ u8 ThermalValue_Crystal;
+ u8 ThermalValue_DPKstore;
+ u8 ThermalValue_DPKtrack;
+ bool TxPowerTrackingInProgress;
+ bool bDPKenable;
+
+ bool bReloadtxpowerindex;
+ u8 bRfPiEnable;
+ u32 TXPowerTrackingCallbackCnt; /* cosa add for debug */
+
+ u8 bCCKinCH14;
+ u8 CCK_index;
+ u8 OFDM_index[2];
+ bool bDoneTxpower;
+
+ u8 ThermalValue_HP[HP_THERMAL_NUM];
+ u8 ThermalValue_HP_index;
+ struct ijk_matrix_regs_set IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
+
+ u8 Delta_IQK;
+ u8 Delta_LCK;
+
+ /* for IQK */
+ u32 RegC04;
+ u32 Reg874;
+ u32 RegC08;
+ u32 RegB68;
+ u32 RegB6C;
+ u32 Reg870;
+ u32 Reg860;
+ u32 Reg864;
+
+ bool bIQKInitialized;
+ bool bLCKInProgress;
+ bool bAntennaDetected;
+ u32 ADDA_backup[IQK_ADDA_REG_NUM];
+ u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
+ u32 IQK_BB_backup_recover[9];
+ u32 IQK_BB_backup[IQK_BB_REG_NUM];
+
+ /* for APK */
+ u32 APKoutput[2][2]; /* path A/B; output1_1a/output1_2a */
+ u8 bAPKdone;
+ u8 bAPKThermalMeterIgnore;
+ u8 bDPdone;
+ u8 bDPPathAOK;
+ u8 bDPPathBOK;
+};
+
+/* ODM Dynamic common info value definition */
+
+struct fast_ant_train {
+ u8 Bssid[6];
+ u8 antsel_rx_keep_0;
+ u8 antsel_rx_keep_1;
+ u8 antsel_rx_keep_2;
+ u32 antSumRSSI[7];
+ u32 antRSSIcnt[7];
+ u32 antAveRSSI[7];
+ u8 FAT_State;
+ u32 TrainIdx;
+ u8 antsel_a[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_b[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 antsel_c[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Sum[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 MainAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u32 AuxAnt_Cnt[ODM_ASSOCIATE_ENTRY_NUM];
+ u8 RxIdleAnt;
+ bool bBecomeLinked;
+};
+
+enum fat_state {
+ FAT_NORMAL_STATE = 0,
+ FAT_TRAINING_STATE = 1,
+};
+
+enum ant_div_type {
+ NO_ANTDIV = 0xFF,
+ CG_TRX_HW_ANTDIV = 0x01,
+ CGCS_RX_HW_ANTDIV = 0x02,
+ FIXED_HW_ANTDIV = 0x03,
+ CG_TRX_SMART_ANTDIV = 0x04,
+ CGCS_RX_SW_ANTDIV = 0x05,
+};
+
+/* Copy from SD4 defined structure. We use to support PHY DM integration. */
+struct odm_dm_struct {
+ /* Add for different team use temporarily */
+ struct adapter *Adapter; /* For CE/NIC team */
+ struct rtl8192cd_priv *priv; /* For AP/ADSL team */
+ /* WHen you use above pointers, they must be initialized. */
+ bool odm_ready;
+
+ struct rtl8192cd_priv *fake_priv;
+ u64 DebugComponents;
+ u32 DebugLevel;
+
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+ bool bCckHighPower;
+ u8 RFPathRxEnable; /* ODM_CMNINFO_RFPATH_ENABLE */
+ u8 ControlChannel;
+/* ODM HANDLE, DRIVER NEEDS NOT TO HOOK------ */
+
+/* 1 COMMON INFORMATION */
+ /* Init Value */
+/* HOOK BEFORE REG INIT----------- */
+ /* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
+ u8 SupportPlatform;
+ /* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ ¡K¡K = 1/2/3/¡K */
+ u32 SupportAbility;
+ /* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
+ u8 SupportInterface;
+ /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any
+ * other type = 1/2/3/... */
+ u32 SupportICType;
+ /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
+ u8 CutVersion;
+ /* Fab Version TSMC/UMC = 0/1 */
+ u8 FabVersion;
+ /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
+ u8 RFType;
+ /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */
+ u8 BoardType;
+ /* with external LNA NO/Yes = 0/1 */
+ u8 ExtLNA;
+ /* with external PA NO/Yes = 0/1 */
+ u8 ExtPA;
+ /* with external TRSW NO/Yes = 0/1 */
+ u8 ExtTRSW;
+ u8 PatchID; /* Customer ID */
+ bool bInHctTest;
+ bool bWIFITest;
+
+ bool bDualMacSmartConcurrent;
+ u32 BK_SupportAbility;
+ u8 AntDivType;
+/* HOOK BEFORE REG INIT----------- */
+
+ /* Dynamic Value */
+/* POINTER REFERENCE----------- */
+
+ u8 u8_temp;
+ bool bool_temp;
+ struct adapter *adapter_temp;
+
+ /* MAC PHY Mode SMSP/DMSP/DMDP = 0/1/2 */
+ u8 *pMacPhyMode;
+ /* TX Unicast byte count */
+ u64 *pNumTxBytesUnicast;
+ /* RX Unicast byte count */
+ u64 *pNumRxBytesUnicast;
+ /* Wireless mode B/G/A/N = BIT0/BIT1/BIT2/BIT3 */
+ u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
+ /* Frequence band 2.4G/5G = 0/1 */
+ u8 *pBandType;
+ /* Secondary channel offset don't_care/below/above = 0/1/2 */
+ u8 *pSecChOffset;
+ /* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
+ u8 *pSecurity;
+ /* BW info 20M/40M/80M = 0/1/2 */
+ u8 *pBandWidth;
+ /* Central channel location Ch1/Ch2/.... */
+ u8 *pChannel; /* central channel number */
+ /* Common info for 92D DMSP */
+
+ bool *pbGetValueFromOtherMac;
+ struct adapter **pBuddyAdapter;
+ bool *pbMasterOfDMSP; /* MAC0: master, MAC1: slave */
+ /* Common info for Status */
+ bool *pbScanInProcess;
+ bool *pbPowerSaving;
+ /* CCA Path 2-path/path-A/path-B = 0/1/2; using ODM_CCA_PATH_E. */
+ u8 *pOnePathCCA;
+ /* pMgntInfo->AntennaTest */
+ u8 *pAntennaTest;
+ bool *pbNet_closed;
+/* POINTER REFERENCE----------- */
+ /* */
+/* CALL BY VALUE------------- */
+ bool bWIFI_Direct;
+ bool bWIFI_Display;
+ bool bLinked;
+ u8 RSSI_Min;
+ u8 InterfaceIndex; /* Add for 92D dual MAC: 0--Mac0 1--Mac1 */
+ bool bIsMPChip;
+ bool bOneEntryOnly;
+ /* Common info for BTDM */
+ bool bBtDisabled; /* BT is disabled */
+ bool bBtHsOperation; /* BT HS mode is under progress */
+ u8 btHsDigVal; /* use BT rssi to decide the DIG value */
+ bool bBtDisableEdcaTurbo;/* Under some condition, don't enable the
+ * EDCA Turbo */
+ bool bBtBusy; /* BT is busy. */
+/* CALL BY VALUE------------- */
+
+ /* 2 Define STA info. */
+ /* _ODM_STA_INFO */
+ /* For MP, we need to reduce one array pointer for default port.?? */
+ struct sta_info *pODM_StaInfo[ODM_ASSOCIATE_ENTRY_NUM];
+
+ u16 CurrminRptTime;
+ struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
+ * array index. STA MacID=0,
+ * VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
+ /* */
+ /* 2012/02/14 MH Add to share 88E ra with other SW team. */
+ /* We need to colelct all support abilit to a proper area. */
+ /* */
+ bool RaSupport88E;
+
+ /* Define ........... */
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_phy_dbg_info PhyDbgInfo;
+
+ /* Latest packet phy info (ODM write) */
+ struct odm_mac_status_info *pMacInfo;
+
+ /* Different Team independt structure?? */
+
+ /* ODM Structure */
+ struct fast_ant_train DM_FatTable;
+ struct rtw_dig DM_DigTable;
+ struct rtl_ps DM_PSTable;
+ struct dyn_primary_cca DM_PriCCA;
+ struct rx_hpc DM_RXHP_Table;
+ struct false_alarm_stats FalseAlmCnt;
+ struct false_alarm_stats FlaseAlmCntBuddyAdapter;
+ struct sw_ant_switch DM_SWAT_Table;
+ bool RSSI_test;
+
+ struct edca_turbo DM_EDCA_Table;
+ u32 WMMEDCA_BE;
+ /* Copy from SD4 structure */
+ /* */
+ /* ================================================== */
+ /* */
+
+ bool *pbDriverStopped;
+ bool *pbDriverIsGoingToPnpSetPowerSleep;
+ bool *pinit_adpt_in_progress;
+
+ /* PSD */
+ bool bUserAssignLevel;
+ struct timer_list PSDTimer;
+ u8 RSSI_BT; /* come from BT */
+ bool bPSDinProcess;
+ bool bDMInitialGainEnable;
+
+ /* for rate adaptive, in fact, 88c/92c fw will handle this */
+ u8 bUseRAMask;
+
+ struct odm_rate_adapt RateAdaptive;
+
+ struct odm_rf_cal RFCalibrateInfo;
+
+ /* TX power tracking */
+ u8 BbSwingIdxOfdm;
+ u8 BbSwingIdxOfdmCurrent;
+ u8 BbSwingIdxOfdmBase;
+ bool BbSwingFlagOfdm;
+ u8 BbSwingIdxCck;
+ u8 BbSwingIdxCckCurrent;
+ u8 BbSwingIdxCckBase;
+ bool BbSwingFlagCck;
+ u8 *mp_mode;
+ /* ODM system resource. */
+
+ /* ODM relative time. */
+ struct timer_list PathDivSwitchTimer;
+ /* 2011.09.27 add for Path Diversity */
+ struct timer_list CCKPathDiversityTimer;
+ struct timer_list FastAntTrainingTimer;
+}; /* DM_Dynamic_Mechanism_Structure */
+
+#define ODM_RF_PATH_MAX 2
+
+enum ODM_RF_RADIO_PATH {
+ ODM_RF_PATH_A = 0, /* Radio Path A */
+ ODM_RF_PATH_B = 1, /* Radio Path B */
+ ODM_RF_PATH_C = 2, /* Radio Path C */
+ ODM_RF_PATH_D = 3, /* Radio Path D */
+};
+
+enum ODM_RF_CONTENT {
+ odm_radioa_txt = 0x1000,
+ odm_radiob_txt = 0x1001,
+ odm_radioc_txt = 0x1002,
+ odm_radiod_txt = 0x1003
+};
+
+enum odm_bb_config_type {
+ CONFIG_BB_PHY_REG,
+ CONFIG_BB_AGC_TAB,
+ CONFIG_BB_AGC_TAB_2G,
+ CONFIG_BB_AGC_TAB_5G,
+ CONFIG_BB_PHY_REG_PG,
+};
+
+/* Status code */
+enum rt_status {
+ RT_STATUS_SUCCESS,
+ RT_STATUS_FAILURE,
+ RT_STATUS_PENDING,
+ RT_STATUS_RESOURCE,
+ RT_STATUS_INVALID_CONTEXT,
+ RT_STATUS_INVALID_PARAMETER,
+ RT_STATUS_NOT_SUPPORT,
+ RT_STATUS_OS_API_FAILED,
+};
+
+/* 3=========================================================== */
+/* 3 DIG */
+/* 3=========================================================== */
+
+enum dm_dig_op {
+ RT_TYPE_THRESH_HIGH = 0,
+ RT_TYPE_THRESH_LOW = 1,
+ RT_TYPE_BACKOFF = 2,
+ RT_TYPE_RX_GAIN_MIN = 3,
+ RT_TYPE_RX_GAIN_MAX = 4,
+ RT_TYPE_ENABLE = 5,
+ RT_TYPE_DISABLE = 6,
+ DIG_OP_TYPE_MAX
+};
+
+#define DM_DIG_THRESH_HIGH 40
+#define DM_DIG_THRESH_LOW 35
+
+#define DM_SCAN_RSSI_TH 0x14 /* scan return issue for LC */
+
+
+#define DM_false_ALARM_THRESH_LOW 400
+#define DM_false_ALARM_THRESH_HIGH 1000
+
+#define DM_DIG_MAX_NIC 0x3e
+#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
+
+#define DM_DIG_MAX_AP 0x32
+#define DM_DIG_MIN_AP 0x20
+
+#define DM_DIG_MAX_NIC_HP 0x46
+#define DM_DIG_MIN_NIC_HP 0x2e
+
+#define DM_DIG_MAX_AP_HP 0x42
+#define DM_DIG_MIN_AP_HP 0x30
+
+/* vivi 92c&92d has different definition, 20110504 */
+/* this is for 92c */
+#define DM_DIG_FA_TH0 0x200/* 0x20 */
+#define DM_DIG_FA_TH1 0x300/* 0x100 */
+#define DM_DIG_FA_TH2 0x400/* 0x200 */
+/* this is for 92d */
+#define DM_DIG_FA_TH0_92D 0x100
+#define DM_DIG_FA_TH1_92D 0x400
+#define DM_DIG_FA_TH2_92D 0x600
+
+#define DM_DIG_BACKOFF_MAX 12
+#define DM_DIG_BACKOFF_MIN -4
+#define DM_DIG_BACKOFF_DEFAULT 10
+
+/* 3=========================================================== */
+/* 3 AGC RX High Power Mode */
+/* 3=========================================================== */
+#define LNA_Low_Gain_1 0x64
+#define LNA_Low_Gain_2 0x5A
+#define LNA_Low_Gain_3 0x58
+
+#define FA_RXHP_TH1 5000
+#define FA_RXHP_TH2 1500
+#define FA_RXHP_TH3 800
+#define FA_RXHP_TH4 600
+#define FA_RXHP_TH5 500
+
+/* 3=========================================================== */
+/* 3 EDCA */
+/* 3=========================================================== */
+
+/* 3=========================================================== */
+/* 3 Dynamic Tx Power */
+/* 3=========================================================== */
+/* Dynamic Tx Power Control Threshold */
+#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
+#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
+#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
+
+#define TxHighPwrLevel_Normal 0
+#define TxHighPwrLevel_Level1 1
+#define TxHighPwrLevel_Level2 2
+#define TxHighPwrLevel_BT1 3
+#define TxHighPwrLevel_BT2 4
+#define TxHighPwrLevel_15 5
+#define TxHighPwrLevel_35 6
+#define TxHighPwrLevel_50 7
+#define TxHighPwrLevel_70 8
+#define TxHighPwrLevel_100 9
+
+/* 3=========================================================== */
+/* 3 Rate Adaptive */
+/* 3=========================================================== */
+#define DM_RATR_STA_INIT 0
+#define DM_RATR_STA_HIGH 1
+#define DM_RATR_STA_MIDDLE 2
+#define DM_RATR_STA_LOW 3
+
+/* 3=========================================================== */
+/* 3 BB Power Save */
+/* 3=========================================================== */
+
+
+enum dm_1r_cca {
+ CCA_1R = 0,
+ CCA_2R = 1,
+ CCA_MAX = 2,
+};
+
+enum dm_rf {
+ RF_Save = 0,
+ RF_Normal = 1,
+ RF_MAX = 2,
+};
+
+/* 3=========================================================== */
+/* 3 Antenna Diversity */
+/* 3=========================================================== */
+enum dm_swas {
+ Antenna_A = 1,
+ Antenna_B = 2,
+ Antenna_MAX = 3,
+};
+
+/* Maximal number of antenna detection mechanism needs to perform. */
+#define MAX_ANTENNA_DETECTION_CNT 10
+
+/* Extern Global Variables. */
+#define OFDM_TABLE_SIZE_92C 37
+#define OFDM_TABLE_SIZE_92D 43
+#define CCK_TABLE_SIZE 33
+
+extern u32 OFDMSwingTable[OFDM_TABLE_SIZE_92D];
+extern u8 CCKSwingTable_Ch1_Ch13[CCK_TABLE_SIZE][8];
+extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
+
+/* check Sta pointer valid or not */
+#define IS_STA_VALID(pSta) (pSta)
+/* 20100514 Joseph: Add definition for antenna switching test after link. */
+/* This indicates two different the steps. */
+/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the
+ * signal on the air. */
+/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
+ * SWAW_STEP_PEAK */
+/* with original RSSI to determine if it is necessary to switch antenna. */
+#define SWAW_STEP_PEAK 0
+#define SWAW_STEP_DETERMINE 1
+
+void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
+void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
+
+void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
+
+
+#define dm_RF_Saving ODM_RF_Saving
+void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
+
+#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
+void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm);
+
+#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
+void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
+ bool bForceUpdate, u8 *pRATRState);
+
+#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
+void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID,
+ struct odm_phy_status_info *pPhyInfo);
+
+u32 ConvertTo_dB(u32 Value);
+
+u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point,
+ u8 initial_gain_psd);
+
+void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
+
+u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
+ u32 ra_mask, u8 rssi_level);
+
+void ODM_DMInit(struct odm_dm_struct *pDM_Odm);
+
+void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm);
+
+void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo, u32 Value);
+
+void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo, void *pValue);
+
+void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm,
+ enum odm_common_info_def CmnInfo,
+ u16 Index, void *pValue);
+
+void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
+
+void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm);
+
+void ODM_ResetIQKResult(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId,
+ u32 PWDBAll, bool isCCKrate);
+
+void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode);
+
+void odm_dtc(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
new file mode 100644
index 00000000000..63779f5b2a3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __HALHWOUTSRC_H__
+#define __HALHWOUTSRC_H__
+
+/* Definition */
+/* CCK Rates, TxHT = 0 */
+#define DESC92C_RATE1M 0x00
+#define DESC92C_RATE2M 0x01
+#define DESC92C_RATE5_5M 0x02
+#define DESC92C_RATE11M 0x03
+
+/* OFDM Rates, TxHT = 0 */
+#define DESC92C_RATE6M 0x04
+#define DESC92C_RATE9M 0x05
+#define DESC92C_RATE12M 0x06
+#define DESC92C_RATE18M 0x07
+#define DESC92C_RATE24M 0x08
+#define DESC92C_RATE36M 0x09
+#define DESC92C_RATE48M 0x0a
+#define DESC92C_RATE54M 0x0b
+
+/* MCS Rates, TxHT = 1 */
+#define DESC92C_RATEMCS0 0x0c
+#define DESC92C_RATEMCS1 0x0d
+#define DESC92C_RATEMCS2 0x0e
+#define DESC92C_RATEMCS3 0x0f
+#define DESC92C_RATEMCS4 0x10
+#define DESC92C_RATEMCS5 0x11
+#define DESC92C_RATEMCS6 0x12
+#define DESC92C_RATEMCS7 0x13
+#define DESC92C_RATEMCS8 0x14
+#define DESC92C_RATEMCS9 0x15
+#define DESC92C_RATEMCS10 0x16
+#define DESC92C_RATEMCS11 0x17
+#define DESC92C_RATEMCS12 0x18
+#define DESC92C_RATEMCS13 0x19
+#define DESC92C_RATEMCS14 0x1a
+#define DESC92C_RATEMCS15 0x1b
+#define DESC92C_RATEMCS15_SG 0x1c
+#define DESC92C_RATEMCS32 0x20
+
+/* structure and define */
+
+struct phy_rx_agc_info {
+ #ifdef __LITTLE_ENDIAN
+ u8 gain:7, trsw:1;
+ #else
+ u8 trsw:1, gain:7;
+ #endif
+};
+
+struct phy_status_rpt {
+ struct phy_rx_agc_info path_agc[2];
+ u8 ch_corr[2];
+ u8 cck_sig_qual_ofdm_pwdb_all;
+ u8 cck_agc_rpt_ofdm_cfosho_a;
+ u8 cck_rpt_b_ofdm_cfosho_b;
+ u8 rsvd_1;/* ch_corr_msb; */
+ u8 noise_power_db_msb;
+ u8 path_cfotail[2];
+ u8 pcts_mask[2];
+ s8 stream_rxevm[2];
+ u8 path_rxsnr[2];
+ u8 noise_power_db_lsb;
+ u8 rsvd_2[3];
+ u8 stream_csi[2];
+ u8 stream_target_csi[2];
+ s8 sig_evm;
+ u8 rsvd_3;
+
+#ifdef __LITTLE_ENDIAN
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+ u8 sgi_en:1;
+ u8 rxsc:2;
+ u8 idle_long:1;
+ u8 r_ant_train_en:1;
+ u8 ant_sel_b:1;
+ u8 ant_sel:1;
+#else /* _BIG_ENDIAN_ */
+ u8 ant_sel:1;
+ u8 ant_sel_b:1;
+ u8 r_ant_train_en:1;
+ u8 idle_long:1;
+ u8 rxsc:2;
+ u8 sgi_en:1;
+ u8 antsel_rx_keep_2:1; /* ex_intf_flg:1; */
+#endif
+};
+
+void odm_Init_RSSIForDM(struct odm_dm_struct *pDM_Odm);
+
+void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
+ struct odm_phy_status_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo);
+
+void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
+ u8 *pMacStatus,
+ u8 MacID,
+ bool bPacketMatchBSSID,
+ bool bPacketToSelf,
+ bool bPacketBeacon);
+
+enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm,
+ enum ODM_RF_RADIO_PATH Content,
+ enum ODM_RF_RADIO_PATH eRFPath);
+
+enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *pDM_Odm,
+ enum odm_bb_config_type ConfigType);
+
+enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
new file mode 100644
index 00000000000..f96ad5af4bd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __ODM_RTL8188E_H__
+#define __ODM_RTL8188E_H__
+
+#define MAIN_ANT 0
+#define AUX_ANT 1
+#define MAIN_ANT_CG_TRX 1
+#define AUX_ANT_CG_TRX 0
+#define MAIN_ANT_CGCS_RX 0
+#define AUX_ANT_CGCS_RX 1
+
+void ODM_DIG_LowerBound_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_AntennaDiversity_88E(struct odm_dm_struct *pDM_Odm);
+
+void ODM_SetTxAntByTxInfo_88E(struct odm_dm_struct *pDM_Odm, u8 *pDesc,
+ u8 macId);
+
+void ODM_UpdateRxIdleAnt_88E(struct odm_dm_struct *pDM_Odm, u8 Ant);
+
+void ODM_AntselStatistics_88E(struct odm_dm_struct *pDM_Odm, u8 antsel_tr_mux,
+ u32 MacId, u8 RxPWDBAll);
+
+void odm_FastAntTraining(struct odm_dm_struct *pDM_Odm);
+
+void odm_FastAntTrainingCallback(struct odm_dm_struct *pDM_Odm);
+
+void odm_FastAntTrainingWorkItemCallback(struct odm_dm_struct *pDM_Odm);
+
+void odm_PrimaryCCA_Init(struct odm_dm_struct *pDM_Odm);
+
+bool ODM_DynamicPrimaryCCA_DupRTS(struct odm_dm_struct *pDM_Odm);
+
+void odm_DynamicPrimaryCCA(struct odm_dm_struct *pDM_Odm);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
new file mode 100644
index 00000000000..727e6b26fb0
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegConfig8188E.h
@@ -0,0 +1,43 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __INC_ODM_REGCONFIG_H_8188E
+#define __INC_ODM_REGCONFIG_H_8188E
+
+void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
+ enum ODM_RF_RADIO_PATH RF_PATH, u32 RegAddr);
+
+void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
+ u32 Addr, u32 Data);
+
+void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm,
+ u32 Addr, u32 Data);
+
+void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data);
+
+void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_REG_PG_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+void odm_ConfigBB_PHY_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
+ u32 Bitmask, u32 Data);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h
new file mode 100644
index 00000000000..f08775c0dd8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11AC.h
@@ -0,0 +1,54 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11AC_H__
+#define __ODM_REGDEFINE11AC_H__
+
+/* 2 RF REG LIST */
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+/* PAGE 9 */
+#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
+/* PAGE A */
+#define ODM_REG_CCK_CCA_11AC 0xA0A
+#define ODM_REG_CCK_FA_RST_11AC 0xA2C
+#define ODM_REG_CCK_FA_11AC 0xA5C
+/* PAGE C */
+#define ODM_REG_IGI_A_11AC 0xC50
+/* PAGE E */
+#define ODM_REG_IGI_B_11AC 0xE50
+/* PAGE F */
+#define ODM_REG_OFDM_FA_11AC 0xF48
+
+
+/* 2 MAC REG LIST */
+
+
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11AC 0xFFFFFFFF
+
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
new file mode 100644
index 00000000000..5a61f902bc1
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -0,0 +1,171 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_REGDEFINE11N_H__
+#define __ODM_REGDEFINE11N_H__
+
+
+/* 2 RF REG LIST */
+#define ODM_REG_RF_MODE_11N 0x00
+#define ODM_REG_RF_0B_11N 0x0B
+#define ODM_REG_CHNBW_11N 0x18
+#define ODM_REG_T_METER_11N 0x24
+#define ODM_REG_RF_25_11N 0x25
+#define ODM_REG_RF_26_11N 0x26
+#define ODM_REG_RF_27_11N 0x27
+#define ODM_REG_RF_2B_11N 0x2B
+#define ODM_REG_RF_2C_11N 0x2C
+#define ODM_REG_RXRF_A3_11N 0x3C
+#define ODM_REG_T_METER_92D_11N 0x42
+#define ODM_REG_T_METER_88E_11N 0x42
+
+
+
+/* 2 BB REG LIST */
+/* PAGE 8 */
+#define ODM_REG_BB_CTRL_11N 0x800
+#define ODM_REG_RF_PIN_11N 0x804
+#define ODM_REG_PSD_CTRL_11N 0x808
+#define ODM_REG_TX_ANT_CTRL_11N 0x80C
+#define ODM_REG_BB_PWR_SAV5_11N 0x818
+#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
+#define ODM_REG_RX_DEFUALT_A_11N 0x858
+#define ODM_REG_RX_DEFUALT_B_11N 0x85A
+#define ODM_REG_BB_PWR_SAV3_11N 0x85C
+#define ODM_REG_ANTSEL_CTRL_11N 0x860
+#define ODM_REG_RX_ANT_CTRL_11N 0x864
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_BB_PWR_SAV1_11N 0x874
+#define ODM_REG_ANTSEL_PATH_11N 0x878
+#define ODM_REG_BB_3WIRE_11N 0x88C
+#define ODM_REG_SC_CNT_11N 0x8C4
+#define ODM_REG_PSD_DATA_11N 0x8B4
+/* PAGE 9 */
+#define ODM_REG_ANT_MAPPING1_11N 0x914
+#define ODM_REG_ANT_MAPPING2_11N 0x918
+/* PAGE A */
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
+#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
+#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
+#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
+#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
+#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
+#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
+#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
+#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
+#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
+#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define ODM_REG_CCK_FA_RST_11N 0xA2C
+#define ODM_REG_CCK_FA_MSB_11N 0xA58
+#define ODM_REG_CCK_FA_LSB_11N 0xA5C
+#define ODM_REG_CCK_CCA_CNT_11N 0xA60
+#define ODM_REG_BB_PWR_SAV4_11N 0xA74
+/* PAGE B */
+#define ODM_REG_LNA_SWITCH_11N 0xB2C
+#define ODM_REG_PATH_SWITCH_11N 0xB30
+#define ODM_REG_RSSI_CTRL_11N 0xB38
+#define ODM_REG_CONFIG_ANTA_11N 0xB68
+#define ODM_REG_RSSI_BT_11N 0xB9C
+/* PAGE C */
+#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
+#define ODM_REG_RX_PATH_11N 0xC04
+#define ODM_REG_TRMUX_11N 0xC08
+#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
+#define ODM_REG_RXIQI_MATRIX_11N 0xC14
+#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
+#define ODM_REG_IGI_A_11N 0xC50
+#define ODM_REG_ANTDIV_PARA2_11N 0xC54
+#define ODM_REG_IGI_B_11N 0xC58
+#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
+#define ODM_REG_BB_PWR_SAV2_11N 0xC70
+#define ODM_REG_RX_OFF_11N 0xC7C
+#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
+#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
+#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
+#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
+#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
+#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
+/* PAGE D */
+#define ODM_REG_OFDM_FA_RSTD_11N 0xD00
+#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
+#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
+#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
+/* PAGE E */
+#define ODM_REG_TXAGC_A_6_18_11N 0xE00
+#define ODM_REG_TXAGC_A_24_54_11N 0xE04
+#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
+#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
+#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
+#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
+#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
+#define ODM_REG_FPGA0_IQK_11N 0xE28
+#define ODM_REG_TXIQK_TONE_A_11N 0xE30
+#define ODM_REG_RXIQK_TONE_A_11N 0xE34
+#define ODM_REG_TXIQK_PI_A_11N 0xE38
+#define ODM_REG_RXIQK_PI_A_11N 0xE3C
+#define ODM_REG_TXIQK_11N 0xE40
+#define ODM_REG_RXIQK_11N 0xE44
+#define ODM_REG_IQK_AGC_PTS_11N 0xE48
+#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
+#define ODM_REG_BLUETOOTH_11N 0xE6C
+#define ODM_REG_RX_WAIT_CCA_11N 0xE70
+#define ODM_REG_TX_CCK_RFON_11N 0xE74
+#define ODM_REG_TX_CCK_BBON_11N 0xE78
+#define ODM_REG_OFDM_RFON_11N 0xE7C
+#define ODM_REG_OFDM_BBON_11N 0xE80
+#define ODM_REG_TX2RX_11N 0xE84
+#define ODM_REG_TX2TX_11N 0xE88
+#define ODM_REG_RX_CCK_11N 0xE8C
+#define ODM_REG_RX_OFDM_11N 0xED0
+#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
+#define ODM_REG_RX2RX_11N 0xED8
+#define ODM_REG_STANDBY_11N 0xEDC
+#define ODM_REG_SLEEP_11N 0xEE0
+#define ODM_REG_PMPD_ANAEN_11N 0xEEC
+
+
+
+
+
+
+
+/* 2 MAC REG LIST */
+#define ODM_REG_BB_RST_11N 0x02
+#define ODM_REG_ANTSEL_PIN_11N 0x4C
+#define ODM_REG_EARLY_MODE_11N 0x4D0
+#define ODM_REG_RSSI_MONITOR_11N 0x4FE
+#define ODM_REG_EDCA_VO_11N 0x500
+#define ODM_REG_EDCA_VI_11N 0x504
+#define ODM_REG_EDCA_BE_11N 0x508
+#define ODM_REG_EDCA_BK_11N 0x50C
+#define ODM_REG_TXPAUSE_11N 0x522
+#define ODM_REG_RESP_TX_11N 0x6D8
+#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
+#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+
+
+/* DIG Related */
+#define ODM_BIT_IGI_11N 0x0000007F
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
new file mode 100644
index 00000000000..a9ba6df26b9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -0,0 +1,145 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#ifndef __ODM_DBG_H__
+#define __ODM_DBG_H__
+
+
+/* */
+/* Define the debug levels */
+/* */
+/* 1. DBG_TRACE and DBG_LOUD are used for normal cases. */
+/* They can help SW engineer to develope or trace states changed */
+/* and also help HW enginner to trace every operation to and from HW, */
+/* e.g IO, Tx, Rx. */
+/* */
+/* 2. DBG_WARNNING and DBG_SERIOUS are used for unusual or error cases, */
+/* which help us to debug SW or HW. */
+
+/* Never used in a call to ODM_RT_TRACE()! */
+#define ODM_DBG_OFF 1
+
+/* Fatal bug. */
+/* For example, Tx/Rx/IO locked up, OS hangs, memory access violation, */
+/* resource allocation failed, unexpected HW behavior, HW BUG and so on. */
+#define ODM_DBG_SERIOUS 2
+
+/* Abnormal, rare, or unexpeted cases. */
+/* For example, IRP/Packet/OID canceled, device suprisely unremoved and so on. */
+#define ODM_DBG_WARNING 3
+
+/* Normal case with useful information about current SW or HW state. */
+/* For example, Tx/Rx descriptor to fill, Tx/Rx descr. completed status, */
+/* SW protocol state change, dynamic mechanism state change and so on. */
+/* */
+#define ODM_DBG_LOUD 4
+
+/* Normal case with detail execution flow or information. */
+#define ODM_DBG_TRACE 5
+
+/* Define the tracing components */
+/* BB Functions */
+#define ODM_COMP_DIG BIT0
+#define ODM_COMP_RA_MASK BIT1
+#define ODM_COMP_DYNAMIC_TXPWR BIT2
+#define ODM_COMP_FA_CNT BIT3
+#define ODM_COMP_RSSI_MONITOR BIT4
+#define ODM_COMP_CCK_PD BIT5
+#define ODM_COMP_ANT_DIV BIT6
+#define ODM_COMP_PWR_SAVE BIT7
+#define ODM_COMP_PWR_TRA BIT8
+#define ODM_COMP_RATE_ADAPTIVE BIT9
+#define ODM_COMP_PATH_DIV BIT10
+#define ODM_COMP_PSD BIT11
+#define ODM_COMP_DYNAMIC_PRICCA BIT12
+#define ODM_COMP_RXHP BIT13
+/* MAC Functions */
+#define ODM_COMP_EDCA_TURBO BIT16
+#define ODM_COMP_EARLY_MODE BIT17
+/* RF Functions */
+#define ODM_COMP_TX_PWR_TRACK BIT24
+#define ODM_COMP_RX_GAIN_TRACK BIT25
+#define ODM_COMP_CALIBRATION BIT26
+/* Common Functions */
+#define ODM_COMP_COMMON BIT30
+#define ODM_COMP_INIT BIT31
+
+/*------------------------Export Marco Definition---------------------------*/
+#define DbgPrint pr_info
+#define RT_PRINTK(fmt, args...) \
+ DbgPrint( "%s(): " fmt, __func__, ## args);
+
+#ifndef ASSERT
+ #define ASSERT(expr)
+#endif
+
+#define ODM_RT_TRACE(pDM_Odm, comp, level, fmt) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ if (pDM_Odm->SupportICType == ODM_RTL8192C) \
+ DbgPrint("[ODM-92C] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8192D) \
+ DbgPrint("[ODM-92D] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8723A) \
+ DbgPrint("[ODM-8723A] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8188E) \
+ DbgPrint("[ODM-8188E] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8812) \
+ DbgPrint("[ODM-8812] "); \
+ else if (pDM_Odm->SupportICType == ODM_RTL8821) \
+ DbgPrint("[ODM-8821] "); \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_TRACE_F(pDM_Odm, comp, level, fmt) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ RT_PRINTK fmt; \
+ }
+
+#define ODM_RT_ASSERT(pDM_Odm, expr, fmt) \
+ if (!(expr)) { \
+ DbgPrint( "Assertion failed! %s at ......\n", #expr); \
+ DbgPrint( " ......%s,%s,line=%d\n", __FILE__, \
+ __func__, __LINE__); \
+ RT_PRINTK fmt; \
+ ASSERT(false); \
+ }
+#define ODM_dbg_enter() { DbgPrint("==> %s\n", __func__); }
+#define ODM_dbg_exit() { DbgPrint("<== %s\n", __func__); }
+#define ODM_dbg_trace(str) { DbgPrint("%s:%s\n", __func__, str); }
+
+#define ODM_PRINT_ADDR(pDM_Odm, comp, level, title_str, ptr) \
+ if (((comp) & pDM_Odm->DebugComponents) && \
+ (level <= pDM_Odm->DebugLevel)) { \
+ int __i; \
+ u8 *__ptr = (u8 *)ptr; \
+ DbgPrint("[ODM] "); \
+ DbgPrint(title_str); \
+ DbgPrint(" "); \
+ for (__i = 0; __i < 6; __i++) \
+ DbgPrint("%02X%s", __ptr[__i], (__i==5)?"":"-");\
+ DbgPrint("\n"); \
+ }
+
+void ODM_InitDebugSetting(struct odm_dm_struct *pDM_Odm);
+
+#endif /* __ODM_DBG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_interface.h b/drivers/staging/rtl8188eu/include/odm_interface.h
new file mode 100644
index 00000000000..e5c8704ac01
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_interface.h
@@ -0,0 +1,164 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_INTERFACE_H__
+#define __ODM_INTERFACE_H__
+
+/* */
+/* =========== Constant/Structure/Enum/... Define */
+/* */
+
+/* */
+/* =========== Macro Define */
+/* */
+
+#define _reg_all(_name) ODM_##_name
+#define _reg_ic(_name, _ic) ODM_##_name##_ic
+#define _bit_all(_name) BIT_##_name
+#define _bit_ic(_name, _ic) BIT_##_name##_ic
+
+/* _cat: implemented by Token-Pasting Operator. */
+
+/*===================================
+
+#define ODM_REG_DIG_11N 0xC50
+#define ODM_REG_DIG_11AC 0xDDD
+
+ODM_REG(DIG,_pDM_Odm)
+=====================================*/
+
+#define _reg_11N(_name) ODM_REG_##_name##_11N
+#define _reg_11AC(_name) ODM_REG_##_name##_11AC
+#define _bit_11N(_name) ODM_BIT_##_name##_11N
+#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
+
+#define _cat(_name, _ic_type, _func) \
+ ( \
+ ((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
+ _func##_11AC(_name) \
+ )
+
+/* _name: name of register or bit. */
+/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
+/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C",
+ * depends on SupportICType. */
+#define ODM_REG(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _reg)
+#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _bit)
+
+enum odm_h2c_cmd {
+ ODM_H2C_RSSI_REPORT = 0,
+ ODM_H2C_PSD_RESULT= 1,
+ ODM_H2C_PathDiv = 2,
+ ODM_MAX_H2CCMD
+};
+
+/* 2012/02/17 MH For non-MP compile pass only. Linux does not support workitem. */
+/* Suggest HW team to use thread instead of workitem. Windows also support the feature. */
+typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
+
+/* =========== Extern Variable ??? It should be forbidden. */
+
+/* =========== EXtern Function Prototype */
+
+u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
+
+void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
+
+void ODM_Write2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u16 Data);
+
+void ODM_Write4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 Data);
+
+void ODM_SetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+u32 ODM_GetMACReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr,
+ u32 BitMask, u32 Data);
+
+u32 ODM_GetBBReg(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u32 BitMask);
+
+void ODM_SetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask, u32 Data);
+
+u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum ODM_RF_RADIO_PATH eRFPath,
+ u32 RegAddr, u32 BitMask);
+
+/* Memory Relative Function. */
+void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
+void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
+
+s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
+ u32 length);
+
+/* ODM MISC-spin lock relative API. */
+void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm,
+ enum RT_SPINLOCK_TYPE type);
+
+void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm,
+ enum RT_SPINLOCK_TYPE type);
+
+/* ODM MISC-workitem relative API. */
+void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
+ RT_WORKITEM_CALL_BACK RtWorkItemCallback,
+ void *pContext, const char *szID);
+
+void ODM_StartWorkItem(void *pRtWorkItem);
+
+void ODM_StopWorkItem(void *pRtWorkItem);
+
+void ODM_FreeWorkItem(void *pRtWorkItem);
+
+void ODM_ScheduleWorkItem(void *pRtWorkItem);
+
+void ODM_IsWorkItemScheduled(void *pRtWorkItem);
+
+/* ODM Timer relative API. */
+void ODM_StallExecution(u32 usDelay);
+
+void ODM_delay_ms(u32 ms);
+
+void ODM_delay_us(u32 us);
+
+void ODM_sleep_ms(u32 ms);
+
+void ODM_sleep_us(u32 us);
+
+void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
+ u32 msDelay);
+
+void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
+ struct timer_list *pTimer, void *CallBackFunc,
+ void *pContext, const char *szID);
+
+void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
+
+void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
+
+/* ODM FW relative API. */
+u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
+ u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
+ u8 *CmdStartSeq);
+
+#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
new file mode 100644
index 00000000000..520cbbaac35
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -0,0 +1,104 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __ODM_PRECOMP_H__
+#define __ODM_PRECOMP_H__
+
+#include "odm_types.h"
+
+#define TEST_FALG___ 1
+
+/* 2 Config Flags and Structs - defined by each ODM Type */
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <hal_intf.h>
+
+/* 2 Hardware Parameter Files */
+
+#include "Hal8188EFWImg_CE.h"
+
+
+/* 2 OutSrc Header Files */
+
+#include "odm.h"
+#include "odm_HWConfig.h"
+#include "odm_debug.h"
+#include "odm_RegDefine11AC.h"
+#include "odm_RegDefine11N.h"
+
+#include "HalPhyRf.h"
+#include "HalPhyRf_8188e.h"/* for IQK,LCK,Power-tracking */
+#include "Hal8188ERateAdaptive.h"/* for RA,Power training */
+#include "rtl8188e_hal.h"
+
+#include "odm_interface.h"
+#include "odm_reg.h"
+
+#include "HalHWImg8188E_MAC.h"
+#include "HalHWImg8188E_RF.h"
+#include "HalHWImg8188E_BB.h"
+#include "Hal8188EReg.h"
+
+#include "odm_RegConfig8188E.h"
+#include "odm_RTL8188E.h"
+
+void odm_CmnInfoHook_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_CmnInfoInit_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
+void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
+void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
+void odm_GlobalAdapterCheck(void);
+void odm_CmnInfoUpdate_Debug(struct odm_dm_struct *pDM_Odm);
+void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
+void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
+void odm_DIG(struct odm_dm_struct *pDM_Odm);
+void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step);
+void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm);
+void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
+void odm_1R_CCA(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm);
+void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm);
+void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
+void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm);
+void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm);
+void odm_SwAntDivChkAntSwitchCallback(void *FunctionContext);
+void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
+void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
+
+#endif /* __ODM_PRECOMP_H__ */
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
new file mode 100644
index 00000000000..89bc46bc71b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -0,0 +1,119 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/* */
+/* File Name: odm_reg.h */
+/* */
+/* Description: */
+/* */
+/* This file is for general register definition. */
+/* */
+/* */
+/* */
+#ifndef __HAL_ODM_REG_H__
+#define __HAL_ODM_REG_H__
+
+/* */
+/* Register Definition */
+/* */
+
+/* MAC REG */
+#define ODM_BB_RESET 0x002
+#define ODM_DUMMY 0x4fe
+#define ODM_EDCA_VO_PARAM 0x500
+#define ODM_EDCA_VI_PARAM 0x504
+#define ODM_EDCA_BE_PARAM 0x508
+#define ODM_EDCA_BK_PARAM 0x50C
+#define ODM_TXPAUSE 0x522
+
+/* BB REG */
+#define ODM_FPGA_PHY0_PAGE8 0x800
+#define ODM_PSD_SETTING 0x808
+#define ODM_AFE_SETTING 0x818
+#define ODM_TXAGC_B_6_18 0x830
+#define ODM_TXAGC_B_24_54 0x834
+#define ODM_TXAGC_B_MCS32_5 0x838
+#define ODM_TXAGC_B_MCS0_MCS3 0x83c
+#define ODM_TXAGC_B_MCS4_MCS7 0x848
+#define ODM_TXAGC_B_MCS8_MCS11 0x84c
+#define ODM_ANALOG_REGISTER 0x85c
+#define ODM_RF_INTERFACE_OUTPUT 0x860
+#define ODM_TXAGC_B_MCS12_MCS15 0x868
+#define ODM_TXAGC_B_11_A_2_11 0x86c
+#define ODM_AD_DA_LSB_MASK 0x874
+#define ODM_ENABLE_3_WIRE 0x88c
+#define ODM_PSD_REPORT 0x8b4
+#define ODM_R_ANT_SELECT 0x90c
+#define ODM_CCK_ANT_SELECT 0xa07
+#define ODM_CCK_PD_THRESH 0xa0a
+#define ODM_CCK_RF_REG1 0xa11
+#define ODM_CCK_MATCH_FILTER 0xa20
+#define ODM_CCK_RAKE_MAC 0xa2e
+#define ODM_CCK_CNT_RESET 0xa2d
+#define ODM_CCK_TX_DIVERSITY 0xa2f
+#define ODM_CCK_FA_CNT_MSB 0xa5b
+#define ODM_CCK_FA_CNT_LSB 0xa5c
+#define ODM_CCK_NEW_FUNCTION 0xa75
+#define ODM_OFDM_PHY0_PAGE_C 0xc00
+#define ODM_OFDM_RX_ANT 0xc04
+#define ODM_R_A_RXIQI 0xc14
+#define ODM_R_A_AGC_CORE1 0xc50
+#define ODM_R_A_AGC_CORE2 0xc54
+#define ODM_R_B_AGC_CORE1 0xc58
+#define ODM_R_AGC_PAR 0xc70
+#define ODM_R_HTSTF_AGC_PAR 0xc7c
+#define ODM_TX_PWR_TRAINING_A 0xc90
+#define ODM_TX_PWR_TRAINING_B 0xc98
+#define ODM_OFDM_FA_CNT1 0xcf0
+#define ODM_OFDM_PHY0_PAGE_D 0xd00
+#define ODM_OFDM_FA_CNT2 0xda0
+#define ODM_OFDM_FA_CNT3 0xda4
+#define ODM_OFDM_FA_CNT4 0xda8
+#define ODM_TXAGC_A_6_18 0xe00
+#define ODM_TXAGC_A_24_54 0xe04
+#define ODM_TXAGC_A_1_MCS32 0xe08
+#define ODM_TXAGC_A_MCS0_MCS3 0xe10
+#define ODM_TXAGC_A_MCS4_MCS7 0xe14
+#define ODM_TXAGC_A_MCS8_MCS11 0xe18
+#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
+
+/* RF REG */
+#define ODM_GAIN_SETTING 0x00
+#define ODM_CHANNEL 0x18
+
+/* Ant Detect Reg */
+#define ODM_DPDT 0x300
+
+/* PSD Init */
+#define ODM_PSDREG 0x808
+
+/* 92D Path Div */
+#define PATHDIV_REG 0xB30
+#define PATHDIV_TRI 0xBA0
+
+
+/* */
+/* Bitmap Definition */
+/* */
+
+#define BIT_FA_RESET BIT0
+
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
new file mode 100644
index 00000000000..78ee2bac0d8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __ODM_TYPES_H__
+#define __ODM_TYPES_H__
+
+/* */
+/* Define Different SW team support */
+/* */
+#define ODM_AP 0x01 /* BIT0 */
+#define ODM_ADSL 0x02 /* BIT1 */
+#define ODM_CE 0x04 /* BIT2 */
+#define ODM_MP 0x08 /* BIT3 */
+
+#define RT_PCI_INTERFACE 1
+#define RT_USB_INTERFACE 2
+#define RT_SDIO_INTERFACE 3
+
+enum HAL_STATUS {
+ HAL_STATUS_SUCCESS,
+ HAL_STATUS_FAILURE,
+};
+
+enum RT_SPINLOCK_TYPE {
+ RT_TEMP = 1,
+};
+
+#include <basic_types.h>
+
+#define DEV_BUS_TYPE RT_USB_INTERFACE
+
+#define SET_TX_DESC_ANTSEL_A_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 24, 1, __Value)
+#define SET_TX_DESC_ANTSEL_B_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+8, 25, 1, __Value)
+#define SET_TX_DESC_ANTSEL_C_88E(__pTxDesc, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pTxDesc+28, 29, 1, __Value)
+
+/* define useless flag to avoid compile warning */
+#define USE_WORKITEM 0
+#define FOR_BRAZIL_PRETEST 0
+#define BT_30_SUPPORT 0
+#define FPGA_TWO_MAC_VERIFICATION 0
+
+
+#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
new file mode 100644
index 00000000000..c4599c583b5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -0,0 +1,83 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __OSDEP_INTF_H_
+#define __OSDEP_INTF_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct intf_priv {
+ u8 *intf_dev;
+ u32 max_iosz; /* USB2.0: 128, USB1.1: 64, SDIO:64 */
+ u32 max_xmitsz; /* USB2.0: unlimited, SDIO:512 */
+ u32 max_recvsz; /* USB2.0: unlimited, SDIO:512 */
+
+ u8 *io_rwmem;
+ u8 *allocated_io_rwmem;
+ u32 io_wsz; /* unit: 4bytes */
+ u32 io_rsz;/* unit: 4bytes */
+ u8 intf_status;
+
+ void (*_bus_io)(u8 *priv);
+
+/*
+Under Sync. IRP (SDIO/USB)
+A protection mechanism is necessary for the io_rwmem(read/write protocol)
+
+Under Async. IRP (SDIO/USB)
+The protection mechanism is through the pending queue.
+*/
+ struct mutex ioctl_mutex;
+ /* when in USB, IO is through interrupt in/out endpoints */
+ struct usb_device *udev;
+ struct urb *piorw_urb;
+ u8 io_irp_cnt;
+ u8 bio_irp_pending;
+ struct semaphore io_retevt;
+ struct timer_list io_timer;
+ u8 bio_irp_timeout;
+ u8 bio_timer_cancel;
+};
+
+u8 rtw_init_drv_sw(struct adapter *padapter);
+u8 rtw_free_drv_sw(struct adapter *padapter);
+u8 rtw_reset_drv_sw(struct adapter *padapter);
+
+u32 rtw_start_drv_threads(struct adapter *padapter);
+void rtw_stop_drv_threads (struct adapter *padapter);
+void rtw_cancel_all_timer(struct adapter *padapter);
+
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
+struct net_device *rtw_init_netdev(struct adapter *padapter);
+u16 rtw_recv_select_queue(struct sk_buff *skb);
+void rtw_proc_init_one(struct net_device *dev);
+void rtw_proc_remove_one(struct net_device *dev);
+
+void rtw_ips_dev_unload(struct adapter *padapter);
+
+int rtw_ips_pwr_up(struct adapter *padapter);
+void rtw_ips_pwr_down(struct adapter *padapter);
+int rtw_hw_suspend(struct adapter *padapter);
+int rtw_hw_resume(struct adapter *padapter);
+
+#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
new file mode 100644
index 00000000000..44f24fa31a3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -0,0 +1,547 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __OSDEP_SERVICE_H_
+#define __OSDEP_SERVICE_H_
+
+#include <basic_types.h>
+
+#define _FAIL 0
+#define _SUCCESS 1
+#define RTW_RX_HANDLED 2
+
+#include <linux/spinlock.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kref.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/circ_buf.h>
+#include <linux/uaccess.h>
+#include <asm/byteorder.h>
+#include <linux/atomic.h>
+#include <linux/io.h>
+#include <linux/semaphore.h>
+#include <linux/sem.h>
+#include <linux/sched.h>
+#include <linux/etherdevice.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h> /* Necessary because we use the proc fs */
+#include <linux/interrupt.h> /* for struct tasklet_struct */
+#include <linux/ip.h>
+#include <linux/kthread.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+struct __queue {
+ struct list_head queue;
+ spinlock_t lock;
+};
+
+#define thread_exit() complete_and_exit(NULL, 0)
+
+static inline struct list_head *get_next(struct list_head *list)
+{
+ return list->next;
+}
+
+static inline struct list_head *get_list_head(struct __queue *queue)
+{
+ return (&(queue->queue));
+}
+
+
+#define LIST_CONTAINOR(ptr, type, member) \
+ ((type *)((char *)(ptr)-(size_t)(&((type *)0)->member)))
+
+
+static inline void _enter_critical(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_irqsave(plock, *pirqL);
+}
+
+static inline void _exit_critical(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_irqrestore(plock, *pirqL);
+}
+
+static inline void _enter_critical_ex(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_irqsave(plock, *pirqL);
+}
+
+static inline void _exit_critical_ex(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_irqrestore(plock, *pirqL);
+}
+
+static inline void _enter_critical_bh(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_lock_bh(plock);
+}
+
+static inline void _exit_critical_bh(spinlock_t *plock, unsigned long *pirqL)
+{
+ spin_unlock_bh(plock);
+}
+
+static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+{
+ int ret;
+
+ ret = mutex_lock_interruptible(pmutex);
+ return ret;
+}
+
+
+static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
+{
+ mutex_unlock(pmutex);
+}
+
+static inline void rtw_list_delete(struct list_head *plist)
+{
+ list_del_init(plist);
+}
+
+static inline void _init_timer(struct timer_list *ptimer,struct net_device *nic_hdl,void *pfunc,void* cntx)
+{
+ ptimer->function = pfunc;
+ ptimer->data = (unsigned long)cntx;
+ init_timer(ptimer);
+}
+
+static inline void _set_timer(struct timer_list *ptimer,u32 delay_time)
+{
+ mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
+}
+
+static inline void _cancel_timer(struct timer_list *ptimer,u8 *bcancelled)
+{
+ del_timer_sync(ptimer);
+ *bcancelled= true;/* true ==1; false==0 */
+}
+
+#define RTW_TIMER_HDL_ARGS void *FunctionContext
+#define RTW_TIMER_HDL_NAME(name) rtw_##name##_timer_hdl
+#define RTW_DECLARE_TIMER_HDL(name) void RTW_TIMER_HDL_NAME(name)(RTW_TIMER_HDL_ARGS)
+
+static inline void _init_workitem(struct work_struct *pwork, void *pfunc, void * cntx)
+{
+ INIT_WORK(pwork, pfunc);
+}
+
+static inline void _set_workitem(struct work_struct *pwork)
+{
+ schedule_work(pwork);
+}
+
+static inline void _cancel_workitem_sync(struct work_struct *pwork)
+{
+ cancel_work_sync(pwork);
+}
+/* */
+/* Global Mutex: can only be used at PASSIVE level. */
+/* */
+
+#define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
+{ \
+ while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
+ { \
+ atomic_dec((atomic_t *)&(_MutexCounter)); \
+ msleep(10); \
+ } \
+}
+
+#define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
+{ \
+ atomic_dec((atomic_t *)&(_MutexCounter)); \
+}
+
+static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
+{
+ return netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
+ netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3));
+}
+
+static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
+{
+ netif_tx_wake_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_start_queue(struct net_device *pnetdev)
+{
+ netif_tx_start_all_queues(pnetdev);
+}
+
+static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
+{
+ netif_tx_stop_all_queues(pnetdev);
+}
+
+#ifndef BIT
+ #define BIT(x) ( 1 << (x))
+#endif
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+#define BIT32 0x0100000000
+#define BIT33 0x0200000000
+#define BIT34 0x0400000000
+#define BIT35 0x0800000000
+#define BIT36 0x1000000000
+
+extern int RTW_STATUS_CODE(int error_code);
+
+/* flags used for rtw_update_mem_stat() */
+enum {
+ MEM_STAT_VIR_ALLOC_SUCCESS,
+ MEM_STAT_VIR_ALLOC_FAIL,
+ MEM_STAT_VIR_FREE,
+ MEM_STAT_PHY_ALLOC_SUCCESS,
+ MEM_STAT_PHY_ALLOC_FAIL,
+ MEM_STAT_PHY_FREE,
+ MEM_STAT_TX, /* used to distinguish TX/RX, asigned from caller */
+ MEM_STAT_TX_ALLOC_SUCCESS,
+ MEM_STAT_TX_ALLOC_FAIL,
+ MEM_STAT_TX_FREE,
+ MEM_STAT_RX, /* used to distinguish TX/RX, asigned from caller */
+ MEM_STAT_RX_ALLOC_SUCCESS,
+ MEM_STAT_RX_ALLOC_FAIL,
+ MEM_STAT_RX_FREE
+};
+
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WPA_TKIP_CIPHER[4];
+extern unsigned char RSN_TKIP_CIPHER[4];
+
+#define rtw_update_mem_stat(flag, sz) do {} while (0)
+u8 *_rtw_vmalloc(u32 sz);
+u8 *_rtw_zvmalloc(u32 sz);
+void _rtw_vmfree(u8 *pbuf, u32 sz);
+u8 *_rtw_zmalloc(u32 sz);
+u8 *_rtw_malloc(u32 sz);
+void _rtw_mfree(u8 *pbuf, u32 sz);
+#define rtw_vmalloc(sz) _rtw_vmalloc((sz))
+#define rtw_zvmalloc(sz) _rtw_zvmalloc((sz))
+#define rtw_vmfree(pbuf, sz) _rtw_vmfree((pbuf), (sz))
+#define rtw_malloc(sz) _rtw_malloc((sz))
+#define rtw_zmalloc(sz) _rtw_zmalloc((sz))
+#define rtw_mfree(pbuf, sz) _rtw_mfree((pbuf), (sz))
+
+void *rtw_malloc2d(int h, int w, int size);
+void rtw_mfree2d(void *pbuf, int h, int w, int size);
+
+void _rtw_memcpy(void *dec, void *sour, u32 sz);
+int _rtw_memcmp(void *dst, void *src, u32 sz);
+void _rtw_memset(void *pbuf, int c, u32 sz);
+
+void _rtw_init_listhead(struct list_head *list);
+u32 rtw_is_list_empty(struct list_head *phead);
+void rtw_list_insert_head(struct list_head *plist, struct list_head *phead);
+void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead);
+void rtw_list_delete(struct list_head *plist);
+
+void _rtw_init_sema(struct semaphore *sema, int init_val);
+void _rtw_free_sema(struct semaphore *sema);
+void _rtw_up_sema(struct semaphore *sema);
+u32 _rtw_down_sema(struct semaphore *sema);
+void _rtw_mutex_init(struct mutex *pmutex);
+void _rtw_mutex_free(struct mutex *pmutex);
+void _rtw_spinlock_init(spinlock_t *plock);
+void _rtw_spinlock_free(spinlock_t *plock);
+
+void _rtw_init_queue(struct __queue *pqueue);
+u32 _rtw_queue_empty(struct __queue *pqueue);
+u32 rtw_end_of_queue_search(struct list_head *queue, struct list_head *pelement);
+
+u32 rtw_get_current_time(void);
+u32 rtw_systime_to_ms(u32 systime);
+u32 rtw_ms_to_systime(u32 ms);
+s32 rtw_get_passing_time_ms(u32 start);
+s32 rtw_get_time_interval_ms(u32 start, u32 end);
+
+void rtw_sleep_schedulable(int ms);
+
+void rtw_msleep_os(int ms);
+void rtw_usleep_os(int us);
+
+u32 rtw_atoi(u8 *s);
+
+void rtw_mdelay_os(int ms);
+void rtw_udelay_os(int us);
+
+void rtw_yield_os(void);
+
+static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
+{
+ return del_timer_sync(ptimer);
+}
+
+static __inline void thread_enter(char *name)
+{
+#ifdef daemonize
+ daemonize("%s", name);
+#endif
+ allow_signal(SIGTERM);
+}
+
+static inline void flush_signals_thread(void)
+{
+ if (signal_pending (current))
+ flush_signals(current);
+}
+
+static inline int res_to_status(int res)
+{
+ return res;
+}
+
+#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
+#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
+
+static inline u32 _RND4(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 2) + ((sz & 3) ? 1: 0)) << 2;
+ return val;
+}
+
+static inline u32 _RND8(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 3) + ((sz & 7) ? 1: 0)) << 3;
+ return val;
+}
+
+static inline u32 _RND128(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 7) + ((sz & 127) ? 1: 0)) << 7;
+ return val;
+}
+
+static inline u32 _RND256(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 8) + ((sz & 255) ? 1: 0)) << 8;
+ return val;
+}
+
+static inline u32 _RND512(u32 sz)
+{
+ u32 val;
+
+ val = ((sz >> 9) + ((sz & 511) ? 1: 0)) << 9;
+ return val;
+}
+
+static inline u32 bitshift(u32 bitmask)
+{
+ u32 i;
+
+ for (i = 0; i <= 31; i++)
+ if (((bitmask>>i) & 0x1) == 1) break;
+ return i;
+}
+
+/* limitation of path length */
+#define PATH_LENGTH_MAX PATH_MAX
+
+void rtw_suspend_lock_init(void);
+void rtw_suspend_lock_uninit(void);
+void rtw_lock_suspend(void);
+void rtw_unlock_suspend(void);
+
+/* Atomic integer operations */
+#define ATOMIC_T atomic_t
+
+void ATOMIC_SET(ATOMIC_T *v, int i);
+int ATOMIC_READ(ATOMIC_T *v);
+void ATOMIC_ADD(ATOMIC_T *v, int i);
+void ATOMIC_SUB(ATOMIC_T *v, int i);
+void ATOMIC_INC(ATOMIC_T *v);
+void ATOMIC_DEC(ATOMIC_T *v);
+int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i);
+int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i);
+int ATOMIC_INC_RETURN(ATOMIC_T *v);
+int ATOMIC_DEC_RETURN(ATOMIC_T *v);
+
+/* File operation APIs, just for linux now */
+int rtw_is_file_readable(char *path);
+int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz);
+int rtw_store_to_file(char *path, u8 __user *buf, u32 sz);
+
+struct rtw_netdev_priv_indicator {
+ void *priv;
+ u32 sizeof_priv;
+};
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
+ void *old_priv);
+struct net_device *rtw_alloc_etherdev(int sizeof_priv);
+
+#define rtw_netdev_priv(netdev) \
+ (((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv)
+void rtw_free_netdev(struct net_device *netdev);
+
+#define NDEV_FMT "%s"
+#define NDEV_ARG(ndev) ndev->name
+#define ADPT_FMT "%s"
+#define ADPT_ARG(adapter) adapter->pnetdev->name
+#define FUNC_NDEV_FMT "%s(%s)"
+#define FUNC_NDEV_ARG(ndev) __func__, ndev->name
+#define FUNC_ADPT_FMT "%s(%s)"
+#define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
+
+#define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
+
+u64 rtw_modular64(u64 x, u64 y);
+u64 rtw_division64(u64 x, u64 y);
+
+/* Macros for handling unaligned memory accesses */
+
+#define RTW_GET_BE16(a) ((u16) (((a)[0] << 8) | (a)[1]))
+#define RTW_PUT_BE16(a, val) \
+ do { \
+ (a)[0] = ((u16) (val)) >> 8; \
+ (a)[1] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_LE16(a) ((u16) (((a)[1] << 8) | (a)[0]))
+#define RTW_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16) (val)) >> 8; \
+ (a)[0] = ((u16) (val)) & 0xff; \
+ } while (0)
+
+#define RTW_GET_BE24(a) ((((u32) (a)[0]) << 16) | (((u32) (a)[1]) << 8) | \
+ ((u32) (a)[2]))
+#define RTW_PUT_BE24(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[2] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE32(a) ((((u32) (a)[0]) << 24) | (((u32) (a)[1]) << 16) | \
+ (((u32) (a)[2]) << 8) | ((u32) (a)[3]))
+#define RTW_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[3] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE32(a) ((((u32) (a)[3]) << 24) | (((u32) (a)[2]) << 16) | \
+ (((u32) (a)[1]) << 8) | ((u32) (a)[0]))
+#define RTW_PUT_LE32(a, val) \
+ do { \
+ (a)[3] = (u8) ((((u32) (val)) >> 24) & 0xff); \
+ (a)[2] = (u8) ((((u32) (val)) >> 16) & 0xff); \
+ (a)[1] = (u8) ((((u32) (val)) >> 8) & 0xff); \
+ (a)[0] = (u8) (((u32) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_BE64(a) ((((u64) (a)[0]) << 56) | (((u64) (a)[1]) << 48) | \
+ (((u64) (a)[2]) << 40) | (((u64) (a)[3]) << 32) | \
+ (((u64) (a)[4]) << 24) | (((u64) (a)[5]) << 16) | \
+ (((u64) (a)[6]) << 8) | ((u64) (a)[7]))
+#define RTW_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8) (((u64) (val)) >> 56); \
+ (a)[1] = (u8) (((u64) (val)) >> 48); \
+ (a)[2] = (u8) (((u64) (val)) >> 40); \
+ (a)[3] = (u8) (((u64) (val)) >> 32); \
+ (a)[4] = (u8) (((u64) (val)) >> 24); \
+ (a)[5] = (u8) (((u64) (val)) >> 16); \
+ (a)[6] = (u8) (((u64) (val)) >> 8); \
+ (a)[7] = (u8) (((u64) (val)) & 0xff); \
+ } while (0)
+
+#define RTW_GET_LE64(a) ((((u64) (a)[7]) << 56) | (((u64) (a)[6]) << 48) | \
+ (((u64) (a)[5]) << 40) | (((u64) (a)[4]) << 32) | \
+ (((u64) (a)[3]) << 24) | (((u64) (a)[2]) << 16) | \
+ (((u64) (a)[1]) << 8) | ((u64) (a)[0]))
+
+void rtw_buf_free(u8 **buf, u32 *buf_len);
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len);
+
+struct rtw_cbuf {
+ u32 write;
+ u32 read;
+ u32 size;
+ void *bufs[0];
+};
+
+bool rtw_cbuf_full(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_empty(struct rtw_cbuf *cbuf);
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf);
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf);
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size);
+int wifirate2_ratetbl_inx(unsigned char rate);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
new file mode 100644
index 00000000000..69123807807
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -0,0 +1,56 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RECV_OSDEP_H_
+#define __RECV_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void _rtw_free_recv_priv(struct recv_priv *precvpriv);
+
+
+s32 rtw_recv_entry(union recv_frame *precv_frame);
+int rtw_recv_indicatepkt(struct adapter *adapter, union recv_frame *recv_frame);
+void rtw_recv_returnpacket(struct net_device *cnxt, struct sk_buff *retpkt);
+
+void rtw_hostapd_mlme_rx(struct adapter *padapter, union recv_frame *recv_fr);
+void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup);
+
+int rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter);
+void rtw_free_recv_priv(struct recv_priv *precvpriv);
+
+int rtw_os_recv_resource_init(struct recv_priv *recvpr, struct adapter *adapt);
+int rtw_os_recv_resource_alloc(struct adapter *adapt, union recv_frame *recvfr);
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
+
+int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
+int rtw_os_recvbuf_resource_free(struct adapter *adapt, struct recv_buf *buf);
+
+void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf);
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
+int nat25_handle_frame(struct adapter *priv, struct sk_buff *skb);
+int _netdev_open(struct net_device *pnetdev);
+int netdev_open(struct net_device *pnetdev);
+int netdev_close(struct net_device *pnetdev);
+
+#endif /* */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
new file mode 100644
index 00000000000..b32bc28503d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -0,0 +1,122 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_CMD_H__
+#define __RTL8188E_CMD_H__
+
+enum RTL8188E_H2C_CMD_ID {
+ /* Class Common */
+ H2C_COM_RSVD_PAGE = 0x00,
+ H2C_COM_MEDIA_STATUS_RPT = 0x01,
+ H2C_COM_SCAN = 0x02,
+ H2C_COM_KEEP_ALIVE = 0x03,
+ H2C_COM_DISCNT_DECISION = 0x04,
+ H2C_COM_INIT_OFFLOAD = 0x06,
+ H2C_COM_REMOTE_WAKE_CTL = 0x07,
+ H2C_COM_AP_OFFLOAD = 0x08,
+ H2C_COM_BCN_RSVD_PAGE = 0x09,
+ H2C_COM_PROB_RSP_RSVD_PAGE = 0x0A,
+
+ /* Class PS */
+ H2C_PS_PWR_MODE = 0x20,
+ H2C_PS_TUNE_PARA = 0x21,
+ H2C_PS_TUNE_PARA_2 = 0x22,
+ H2C_PS_LPS_PARA = 0x23,
+ H2C_PS_P2P_OFFLOAD = 0x24,
+
+ /* Class DM */
+ H2C_DM_MACID_CFG = 0x40,
+ H2C_DM_TXBF = 0x41,
+
+ /* Class BT */
+ H2C_BT_COEX_MASK = 0x60,
+ H2C_BT_COEX_GPIO_MODE = 0x61,
+ H2C_BT_DAC_SWING_VAL = 0x62,
+ H2C_BT_PSD_RST = 0x63,
+
+ /* Class */
+ H2C_RESET_TSF = 0xc0,
+};
+
+struct cmd_msg_parm {
+ u8 eid; /* element id */
+ u8 sz; /* sz */
+ u8 buf[6];
+};
+
+enum {
+ PWRS
+};
+
+struct setpwrmode_parm {
+ u8 Mode;/* 0:Active,1:LPS,2:WMMPS */
+ u8 SmartPS_RLBM;/* LPS= 0:PS_Poll,1:PS_Poll,2:NullData,WMM= 0:PS_Poll,1:NullData */
+ u8 AwakeInterval; /* unit: beacon interval */
+ u8 bAllQueueUAPSD;
+ u8 PwrState;/* AllON(0x0c),RFON(0x04),RFOFF(0x00) */
+};
+
+struct H2C_SS_RFOFF_PARAM {
+ u8 ROFOn; /* 1: on, 0:off */
+ u16 gpio_period; /* unit: 1024 us */
+} __packed;
+
+struct joinbssrpt_parm {
+ u8 OpMode; /* RT_MEDIA_STATUS */
+};
+
+struct rsvdpage_loc {
+ u8 LocProbeRsp;
+ u8 LocPsPoll;
+ u8 LocNullData;
+ u8 LocQosNull;
+ u8 LocBTQosNull;
+};
+
+struct P2P_PS_Offload_t {
+ u8 Offload_En:1;
+ u8 role:1; /* 1: Owner, 0: Client */
+ u8 CTWindow_En:1;
+ u8 NoA0_En:1;
+ u8 NoA1_En:1;
+ u8 AllStaSleep:1; /* Only valid in Owner */
+ u8 discovery:1;
+ u8 rsvd:1;
+};
+
+struct P2P_PS_CTWPeriod_t {
+ u8 CTWPeriod; /* TU */
+};
+
+/* host message to firmware cmd */
+void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
+void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
+u8 rtl8188e_set_rssi_cmd(struct adapter *padapter, u8 *param);
+u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
+void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
+ u8 rssi_level);
+
+#ifdef CONFIG_88EU_P2P
+void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state);
+#endif /* CONFIG_88EU_P2P */
+
+void CheckFwRsvdPageContent(struct adapter *adapt);
+void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt);
+
+#endif/* __RTL8188E_CMD_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
new file mode 100644
index 00000000000..97a3175250e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_DM_H__
+#define __RTL8188E_DM_H__
+enum{
+ UP_LINK,
+ DOWN_LINK,
+};
+/* duplicate code,will move to ODM ######### */
+#define IQK_MAC_REG_NUM 4
+#define IQK_ADDA_REG_NUM 16
+#define IQK_BB_REG_NUM 9
+#define HP_THERMAL_NUM 8
+/* duplicate code,will move to ODM ######### */
+struct dm_priv {
+ u8 DM_Type;
+ u8 DMFlag;
+ u8 InitDMFlag;
+ u32 InitODMFlag;
+
+ /* Upper and Lower Signal threshold for Rate Adaptive*/
+ int UndecoratedSmoothedPWDB;
+ int UndecoratedSmoothedCCK;
+ int EntryMinUndecoratedSmoothedPWDB;
+ int EntryMaxUndecoratedSmoothedPWDB;
+ int MinUndecoratedPWDBForDM;
+ int LastMinUndecoratedPWDBForDM;
+
+ /* for High Power */
+ u8 bDynamicTxPowerEnable;
+ u8 LastDTPLvl;
+ u8 DynamicTxHighPowerLvl;/* Tx Power Control for Near/Far Range */
+ u8 PowerIndex_backup[6];
+};
+
+void rtl8188e_init_dm_priv(struct adapter *adapt);
+void rtl8188e_deinit_dm_priv(struct adapter *adapt);
+void rtl8188e_InitHalDm(struct adapter *adapt);
+void rtl8188e_HalDmWatchDog(struct adapter *adapt);
+
+void AntDivCompare8188E(struct adapter *adapt, struct wlan_bssid_ex *dst,
+ struct wlan_bssid_ex *src);
+u8 AntDivBeforeLink8188E(struct adapter *adapt);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
new file mode 100644
index 00000000000..52b280165a9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -0,0 +1,487 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_HAL_H__
+#define __RTL8188E_HAL_H__
+
+
+/* include HAL Related header after HAL Related compiling flags */
+#include "rtl8188e_spec.h"
+#include "Hal8188EPhyReg.h"
+#include "Hal8188EPhyCfg.h"
+#include "rtl8188e_rf.h"
+#include "rtl8188e_dm.h"
+#include "rtl8188e_recv.h"
+#include "rtl8188e_xmit.h"
+#include "rtl8188e_cmd.h"
+#include "Hal8188EPwrSeq.h"
+#include "rtl8188e_sreset.h"
+#include "rtw_efuse.h"
+
+#include "odm_precomp.h"
+
+/* Fw Array */
+#define Rtl8188E_FwImageArray Rtl8188EFwImgArray
+#define Rtl8188E_FWImgArrayLength Rtl8188EFWImgArrayLength
+
+#define RTL8188E_FW_UMC_IMG "rtl8188E\\rtl8188efw.bin"
+#define RTL8188E_PHY_REG "rtl8188E\\PHY_REG_1T.txt"
+#define RTL8188E_PHY_RADIO_A "rtl8188E\\radio_a_1T.txt"
+#define RTL8188E_PHY_RADIO_B "rtl8188E\\radio_b_1T.txt"
+#define RTL8188E_AGC_TAB "rtl8188E\\AGC_TAB_1T.txt"
+#define RTL8188E_PHY_MACREG "rtl8188E\\MAC_REG.txt"
+#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
+#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
+
+/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
+#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
+#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
+#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
+#define Rtl8188E_NIC_ENABLE_FLOW rtl8188E_card_enable_flow
+#define Rtl8188E_NIC_SUSPEND_FLOW rtl8188E_suspend_flow
+#define Rtl8188E_NIC_RESUME_FLOW rtl8188E_resume_flow
+#define Rtl8188E_NIC_PDN_FLOW rtl8188E_hwpdn_flow
+#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188E_enter_lps_flow
+#define Rtl8188E_NIC_LPS_LEAVE_FLOW rtl8188E_leave_lps_flow
+
+#define DRVINFO_SZ 4 /* unit is 8bytes */
+#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0))
+
+/* download firmware related data structure */
+#define FW_8188E_SIZE 0x4000 /* 16384,16k */
+#define FW_8188E_START_ADDRESS 0x1000
+#define FW_8188E_END_ADDRESS 0x1FFF /* 0x5FFF */
+
+#define MAX_PAGE_SIZE 4096 /* @ page : 4k bytes */
+
+#define IS_FW_HEADER_EXIST(_pFwHdr) \
+ ((le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x92C0 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88C0 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x2300 || \
+ (le16_to_cpu(_pFwHdr->Signature)&0xFFF0) == 0x88E0)
+
+enum firmware_source {
+ FW_SOURCE_IMG_FILE = 0,
+ FW_SOURCE_HEADER_FILE = 1, /* from header file */
+};
+
+struct rt_firmware {
+ enum firmware_source eFWSource;
+ u8 *szFwBuffer;
+ u32 ulFwLength;
+};
+
+/* This structure must be careful with byte-ordering */
+
+struct rt_firmware_hdr {
+ /* 8-byte alinment required */
+ /* LONG WORD 0 ---- */
+ __le16 Signature; /* 92C0: test chip; 92C,
+ * 88C0: test chip; 88C1: MP A-cut;
+ * 92C1: MP A-cut */
+ u8 Category; /* AP/NIC and USB/PCI */
+ u8 Function; /* Reserved for different FW function
+ * indcation, for further use when
+ * driver needs to download different
+ * FW for different conditions */
+ __le16 Version; /* FW Version */
+ u8 Subversion; /* FW Subversion, default 0x00 */
+ u16 Rsvd1;
+
+ /* LONG WORD 1 ---- */
+ u8 Month; /* Release time Month field */
+ u8 Date; /* Release time Date field */
+ u8 Hour; /* Release time Hour field */
+ u8 Minute; /* Release time Minute field */
+ __le16 RamCodeSize; /* The size of RAM code */
+ u8 Foundry;
+ u8 Rsvd2;
+
+ /* LONG WORD 2 ---- */
+ __le32 SvnIdx; /* The SVN entry index */
+ u32 Rsvd3;
+
+ /* LONG WORD 3 ---- */
+ u32 Rsvd4;
+ u32 Rsvd5;
+};
+
+#define DRIVER_EARLY_INT_TIME 0x05
+#define BCN_DMA_ATIME_INT_TIME 0x02
+
+enum usb_rx_agg_mode {
+ USB_RX_AGG_DISABLE,
+ USB_RX_AGG_DMA,
+ USB_RX_AGG_USB,
+ USB_RX_AGG_MIX
+};
+
+#define MAX_RX_DMA_BUFFER_SIZE_88E \
+ 0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
+ * WOLPattern(16*24)) */
+
+#define MAX_TX_REPORT_BUFFER_SIZE 0x0400 /* 1k */
+
+
+/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
+#define MAX_TX_QUEUE 9
+
+#define TX_SELE_HQ BIT(0) /* High Queue */
+#define TX_SELE_LQ BIT(1) /* Low Queue */
+#define TX_SELE_NQ BIT(2) /* Normal Queue */
+
+/* Note: We will divide number of page equally for each queue other
+ * than public queue! */
+/* 22k = 22528 bytes = 176 pages (@page = 128 bytes) */
+/* must reserved about 7 pages for LPS => 176-7 = 169 (0xA9) */
+/* 2*BCN / 1*ps-poll / 1*null-data /1*prob_rsp /1*QOS null-data /1*BT QOS
+ * null-data */
+
+#define TX_TOTAL_PAGE_NUMBER_88E 0xA9/* 169 (21632=> 21k) */
+
+#define TX_PAGE_BOUNDARY_88E (TX_TOTAL_PAGE_NUMBER_88E + 1)
+
+/* Note: For Normal Chip Setting ,modify later */
+#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER \
+ TX_TOTAL_PAGE_NUMBER_88E /* 0xA9 , 0xb0=>176=>22k */
+#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
+ (WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
+
+/* Chip specific */
+#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
+#define CHIP_BONDING_92C_1T2R 0x1
+#define CHIP_BONDING_88C_USB_MCARD 0x2
+#define CHIP_BONDING_88C_USB_HP 0x1
+#include "HalVerDef.h"
+#include "hal_com.h"
+
+/* Channel Plan */
+enum ChannelPlan {
+ CHPL_FCC = 0,
+ CHPL_IC = 1,
+ CHPL_ETSI = 2,
+ CHPL_SPA = 3,
+ CHPL_FRANCE = 4,
+ CHPL_MKK = 5,
+ CHPL_MKK1 = 6,
+ CHPL_ISRAEL = 7,
+ CHPL_TELEC = 8,
+ CHPL_GLOBAL = 9,
+ CHPL_WORLD = 10,
+};
+
+struct txpowerinfo24g {
+ u8 IndexCCK_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G];
+ u8 IndexBW40_Base[MAX_RF_PATH][MAX_CHNL_GROUP_24G-1];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+};
+
+#define EFUSE_REAL_CONTENT_LEN 512
+#define EFUSE_MAX_SECTION 16
+#define EFUSE_IC_ID_OFFSET 506 /* For some inferior IC purpose*/
+#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
+/* To prevent out of boundary programming case, */
+/* leave 1byte and program full section */
+/* 9bytes + 1byt + 5bytes and pre 1byte. */
+/* For worst case: */
+/* | 1byte|----8bytes----|1byte|--5bytes--| */
+/* | | Reserved(14bytes) | */
+
+/* PG data exclude header, dummy 6 bytes frome CP test and reserved 1byte. */
+#define EFUSE_OOB_PROTECT_BYTES 15
+
+#define HWSET_MAX_SIZE_88E 512
+
+#define EFUSE_REAL_CONTENT_LEN_88E 256
+#define EFUSE_MAP_LEN_88E 512
+#define EFUSE_MAP_LEN EFUSE_MAP_LEN_88E
+#define EFUSE_MAX_SECTION_88E 64
+#define EFUSE_MAX_WORD_UNIT_88E 4
+#define EFUSE_IC_ID_OFFSET_88E 506
+#define AVAILABLE_EFUSE_ADDR_88E(addr) \
+ (addr < EFUSE_REAL_CONTENT_LEN_88E)
+/* To prevent out of boundary programming case, leave 1byte and program
+ * full section */
+/* 9bytes + 1byt + 5bytes and pre 1byte. */
+/* For worst case: */
+/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
+/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte. */
+#define EFUSE_OOB_PROTECT_BYTES_88E 18
+#define EFUSE_PROTECT_BYTES_BANK_88E 16
+
+/* EFUSE for BT definition */
+#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
+#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
+#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
+
+#define EFUSE_PROTECT_BYTES_BANK 16
+
+/* For RTL8723 WiFi/BT/GPS multi-function configuration. */
+enum rt_multi_func {
+ RT_MULTI_FUNC_NONE = 0x00,
+ RT_MULTI_FUNC_WIFI = 0x01,
+ RT_MULTI_FUNC_BT = 0x02,
+ RT_MULTI_FUNC_GPS = 0x04,
+};
+
+/* For RTL8723 regulator mode. */
+enum rt_regulator_mode {
+ RT_SWITCHING_REGULATOR = 0,
+ RT_LDO_REGULATOR = 1,
+};
+
+struct hal_data_8188e {
+ struct HAL_VERSION VersionID;
+ enum rt_multi_func MultiFunc; /* For multi-function consideration. */
+ enum rt_regulator_mode RegulatorMode; /* switching regulator or LDO */
+ u16 CustomerID;
+
+ u16 FirmwareVersion;
+ u16 FirmwareVersionRev;
+ u16 FirmwareSubVersion;
+ u16 FirmwareSignature;
+ u8 PGMaxGroup;
+ /* current WIFI_PHY values */
+ u32 ReceiveConfig;
+ enum wireless_mode CurrentWirelessMode;
+ enum ht_channel_width CurrentChannelBW;
+ u8 CurrentChannel;
+ u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
+
+ u16 BasicRateSet;
+
+ /* rf_ctrl */
+ u8 rf_chip;
+ u8 rf_type;
+ u8 NumTotalRFPath;
+
+ u8 BoardType;
+
+ /* EEPROM setting. */
+ u16 EEPROMVID;
+ u16 EEPROMPID;
+ u16 EEPROMSVID;
+ u16 EEPROMSDID;
+ u8 EEPROMCustomerID;
+ u8 EEPROMSubCustomerID;
+ u8 EEPROMVersion;
+ u8 EEPROMRegulatory;
+
+ u8 bTXPowerDataReadFromEEPORM;
+ u8 EEPROMThermalMeter;
+ u8 bAPKThermalMeterIgnore;
+
+ bool EepromOrEfuse;
+ /* 92C:256bytes, 88E:512bytes, we use union set (512bytes) */
+ u8 EfuseMap[2][HWSET_MAX_SIZE_512];
+ u8 EfuseUsedPercentage;
+ struct efuse_hal EfuseHal;
+
+ u8 Index24G_CCK_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ u8 Index24G_BW40_Base[MAX_RF_PATH][CHANNEL_MAX_NUMBER];
+ /* If only one tx, only BW20 and OFDM are used. */
+ s8 CCK_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 OFDM_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW20_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+ s8 BW40_24G_Diff[MAX_RF_PATH][MAX_TX_COUNT];
+
+ u8 TxPwrLevelCck[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_1S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT 40MHZ pwr */
+ u8 TxPwrLevelHT40_2S[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* HT 20<->40 Pwr diff */
+ u8 TxPwrHt20Diff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For HT<->legacy pwr diff */
+ u8 TxPwrLegacyHtDiff[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ /* For power group */
+ u8 PwrGroupHT20[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+ u8 PwrGroupHT40[RF_PATH_MAX][CHANNEL_MAX_NUMBER];
+
+ u8 LegacyHTTxPowerDiff;/* Legacy to HT rate power diff */
+ /* The current Tx Power Level */
+ u8 CurrentCckTxPwrIdx;
+ u8 CurrentOfdm24GTxPwrIdx;
+ u8 CurrentBW2024GTxPwrIdx;
+ u8 CurrentBW4024GTxPwrIdx;
+
+
+ /* Read/write are allow for following hardware information variables */
+ u8 framesync;
+ u32 framesyncC34;
+ u8 framesyncMonitor;
+ u8 DefaultInitialGain[4];
+ u8 pwrGroupCnt;
+ u32 MCSTxPowerLevelOriginalOffset[MAX_PG_GROUP][16];
+ u32 CCKTxPowerLevelOriginalOffset;
+
+ u8 CrystalCap;
+ u32 AntennaTxPath; /* Antenna path Tx */
+ u32 AntennaRxPath; /* Antenna path Rx */
+ u8 BluetoothCoexist;
+ u8 ExternalPA;
+
+ u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
+
+ u8 b1x1RecvCombine; /* for 1T1R receive combining */
+
+ u32 AcParam_BE; /* Original parameter for BE, use for EDCA turbo. */
+
+ struct bb_reg_def PHYRegDef[4]; /* Radio A/B/C/D */
+
+ u32 RfRegChnlVal[2];
+
+ /* RDG enable */
+ bool bRDGEnable;
+
+ /* for host message to fw */
+ u8 LastHMEBoxNum;
+
+ u8 fw_ractrl;
+ u8 RegTxPause;
+ /* Beacon function related global variable. */
+ u32 RegBcnCtrlVal;
+ u8 RegFwHwTxQCtrl;
+ u8 RegReg542;
+ u8 RegCR_1;
+
+ struct dm_priv dmpriv;
+ struct odm_dm_struct odmpriv;
+ struct sreset_priv srestpriv;
+
+ u8 CurAntenna;
+ u8 AntDivCfg;
+ u8 TRxAntDivType;
+
+
+ u8 bDumpRxPkt;/* for debug */
+ u8 bDumpTxPkt;/* for debug */
+ u8 FwRsvdPageStartOffset; /* Reserve page start offset except
+ * beacon in TxQ. */
+
+ /* 2010/08/09 MH Add CU power down mode. */
+ bool pwrdown;
+
+ /* Add for dual MAC 0--Mac0 1--Mac1 */
+ u32 interfaceIndex;
+
+ u8 OutEpQueueSel;
+ u8 OutEpNumber;
+
+ /* Add for USB aggreation mode dynamic shceme. */
+ bool UsbRxHighSpeedMode;
+
+ /* 2010/11/22 MH Add for slim combo debug mode selective. */
+ /* This is used for fix the drawback of CU TSMC-A/UMC-A cut.
+ * HW auto suspend ability. Close BT clock. */
+ bool SlimComboDbg;
+
+ u16 EfuseUsedBytes;
+
+#ifdef CONFIG_88EU_P2P
+ struct P2P_PS_Offload_t p2p_ps_offload;
+#endif
+
+ /* Auto FSM to Turn On, include clock, isolation, power control
+ * for MAC only */
+ u8 bMacPwrCtrlOn;
+
+ u32 UsbBulkOutSize;
+
+ /* Interrupt relatd register information. */
+ u32 IntArray[3];/* HISR0,HISR1,HSISR */
+ u32 IntrMask[3];
+ u8 C2hArray[16];
+ u8 UsbTxAggMode;
+ u8 UsbTxAggDescNum;
+ u16 HwRxPageSize; /* Hardware setting */
+ u32 MaxUsbRxAggBlock;
+
+ enum usb_rx_agg_mode UsbRxAggMode;
+ u8 UsbRxAggBlockCount; /* USB Block count. Block size is
+ * 512-byte in high speed and 64-byte
+ * in full speed */
+ u8 UsbRxAggBlockTimeout;
+ u8 UsbRxAggPageCount; /* 8192C DMA page count */
+ u8 UsbRxAggPageTimeout;
+};
+
+#define GET_HAL_DATA(__pAdapter) \
+ ((struct hal_data_8188e *)((__pAdapter)->HalData))
+#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
+
+#define INCLUDE_MULTI_FUNC_BT(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
+#define INCLUDE_MULTI_FUNC_GPS(_Adapter) \
+ (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
+
+/* rtl8188e_hal_init.c */
+s32 rtl8188e_FirmwareDownload(struct adapter *padapter);
+void _8051Reset88E(struct adapter *padapter);
+void rtl8188e_InitializeFirmwareVars(struct adapter *padapter);
+
+
+s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy);
+
+/* EFuse */
+u8 GetEEPROMSize8188E(struct adapter *padapter);
+void Hal_InitPGData88E(struct adapter *padapter);
+void Hal_EfuseParseIDCode88E(struct adapter *padapter, u8 *hwinfo);
+void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+
+void Hal_EfuseParseEEPROMVer88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void rtl8188e_EfuseParseChnlPlan(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseCustomerID88E(struct adapter *padapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter,u8 *PROMContent,
+ bool AutoLoadFail);
+void Hal_ReadThermalMeter_88E(struct adapter * dapter, u8 *PROMContent,
+ bool AutoloadFail);
+void Hal_EfuseParseXtal_8188E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
+ bool AutoLoadFail);
+
+bool HalDetectPwrDownMode88E(struct adapter *Adapter);
+
+void Hal_InitChannelPlan(struct adapter *padapter);
+void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc);
+
+/* register */
+void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits);
+
+void rtl8188e_clone_haldata(struct adapter *dst, struct adapter *src);
+void rtl8188e_start_thread(struct adapter *padapter);
+void rtl8188e_stop_thread(struct adapter *padapter);
+
+void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int len);
+s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
+void rtw_cancel_all_timer(struct adapter *padapter);
+void _ps_open_RF(struct adapter *adapt);
+
+#endif /* __RTL8188E_HAL_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
new file mode 100644
index 00000000000..c0147e73cd8
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -0,0 +1,35 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_LED_H__
+#define __RTL8188E_LED_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+/* */
+/* Interface to manipulate LED objects. */
+/* */
+void rtl8188eu_InitSwLeds(struct adapter *padapter);
+void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
new file mode 100644
index 00000000000..02ccb404f53
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -0,0 +1,69 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_RECV_H__
+#define __RTL8188E_RECV_H__
+
+#define TX_RPT1_PKT_LEN 8
+
+#define RECV_BLK_SZ 512
+#define RECV_BLK_CNT 16
+#define RECV_BLK_TH RECV_BLK_CNT
+#define RECV_BULK_IN_ADDR 0x80
+#define RECV_INT_IN_ADDR 0x81
+
+#define NR_PREALLOC_RECV_SKB (8)
+
+#define NR_RECVBUFF (4)
+
+#define MAX_RECVBUF_SZ (15360) /* 15k < 16k */
+
+struct phy_stat {
+ unsigned int phydw0;
+ unsigned int phydw1;
+ unsigned int phydw2;
+ unsigned int phydw3;
+ unsigned int phydw4;
+ unsigned int phydw5;
+ unsigned int phydw6;
+ unsigned int phydw7;
+};
+
+/* Rx smooth factor */
+#define Rx_Smooth_Factor (20)
+
+enum rx_packet_type {
+ NORMAL_RX,/* Normal rx packet */
+ TX_REPORT1,/* CCX */
+ TX_REPORT2,/* TX RPT */
+ HIS_REPORT,/* USB HISR RPT */
+};
+
+#define INTERRUPT_MSG_FORMAT_LEN 60
+void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *buf);
+s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
+void rtl8188eu_free_recv_priv(struct adapter * padapter);
+void rtl8188eu_recv_hdl(struct adapter * padapter, struct recv_buf *precvbuf);
+void rtl8188eu_recv_tasklet(void *priv);
+void rtl8188e_query_rx_phy_status(union recv_frame *fr, struct phy_stat *phy);
+void rtl8188e_process_phy_info(struct adapter * padapter, void *prframe);
+void update_recvframe_phyinfo_88e(union recv_frame *fra, struct phy_stat *phy);
+void update_recvframe_attrib_88e(union recv_frame *fra, struct recv_stat *stat);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_rf.h b/drivers/staging/rtl8188eu/include/rtl8188e_rf.h
new file mode 100644
index 00000000000..10fc356e020
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_rf.h
@@ -0,0 +1,36 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_RF_H__
+#define __RTL8188E_RF_H__
+
+#define RF6052_MAX_TX_PWR 0x3F
+#define RF6052_MAX_REG 0x3F
+#define RF6052_MAX_PATH 2
+
+
+int PHY_RF6052_Config8188E(struct adapter *Adapter);
+void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate);
+void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
+ enum ht_channel_width Bandwidth);
+void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
+void rtl8188e_PHY_RF6052SetOFDMTxPower(struct adapter *Adapter, u8 *ofdm,
+ u8 *pwrbw20, u8 *pwrbw40, u8 channel);
+
+#endif/* __RTL8188E_RF_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
new file mode 100644
index 00000000000..c12c56b9734
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -0,0 +1,1439 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *******************************************************************************/
+#ifndef __RTL8188E_SPEC_H__
+#define __RTL8188E_SPEC_H__
+
+#ifndef BIT
+#define BIT(x) (1 << (x))
+#endif
+
+#define BIT0 0x00000001
+#define BIT1 0x00000002
+#define BIT2 0x00000004
+#define BIT3 0x00000008
+#define BIT4 0x00000010
+#define BIT5 0x00000020
+#define BIT6 0x00000040
+#define BIT7 0x00000080
+#define BIT8 0x00000100
+#define BIT9 0x00000200
+#define BIT10 0x00000400
+#define BIT11 0x00000800
+#define BIT12 0x00001000
+#define BIT13 0x00002000
+#define BIT14 0x00004000
+#define BIT15 0x00008000
+#define BIT16 0x00010000
+#define BIT17 0x00020000
+#define BIT18 0x00040000
+#define BIT19 0x00080000
+#define BIT20 0x00100000
+#define BIT21 0x00200000
+#define BIT22 0x00400000
+#define BIT23 0x00800000
+#define BIT24 0x01000000
+#define BIT25 0x02000000
+#define BIT26 0x04000000
+#define BIT27 0x08000000
+#define BIT28 0x10000000
+#define BIT29 0x20000000
+#define BIT30 0x40000000
+#define BIT31 0x80000000
+
+/* 8192C Regsiter offset definition */
+
+#define HAL_PS_TIMER_INT_DELAY 50 /* 50 microseconds */
+#define HAL_92C_NAV_UPPER_UNIT 128 /* micro-second */
+
+#define MAC_ADDR_LEN 6
+/* 8188E PKT_BUFF_ACCESS_CTRL value */
+#define TXPKT_BUF_SELECT 0x69
+#define RXPKT_BUF_SELECT 0xA5
+#define DISABLE_TRXPKT_BUF_ACCESS 0x0
+
+
+/* 0x0000h ~ 0x00FFh System Configuration */
+#define REG_SYS_ISO_CTRL 0x0000
+#define REG_SYS_FUNC_EN 0x0002
+#define REG_APS_FSMCO 0x0004
+#define REG_SYS_CLKR 0x0008
+#define REG_9346CR 0x000A
+#define REG_EE_VPD 0x000C
+#define REG_AFE_MISC 0x0010
+#define REG_SPS0_CTRL 0x0011
+#define REG_SPS_OCP_CFG 0x0018
+#define REG_RSV_CTRL 0x001C
+#define REG_RF_CTRL 0x001F
+#define REG_LDOA15_CTRL 0x0020
+#define REG_LDOV12D_CTRL 0x0021
+#define REG_LDOHCI12_CTRL 0x0022
+#define REG_LPLDO_CTRL 0x0023
+#define REG_AFE_XTAL_CTRL 0x0024
+#define REG_AFE_PLL_CTRL 0x0028
+#define REG_APE_PLL_CTRL_EXT 0x002c
+#define REG_EFUSE_CTRL 0x0030
+#define REG_EFUSE_TEST 0x0034
+#define REG_GPIO_MUXCFG 0x0040
+#define REG_GPIO_IO_SEL 0x0042
+#define REG_MAC_PINMUX_CFG 0x0043
+#define REG_GPIO_PIN_CTRL 0x0044
+#define REG_GPIO_INTM 0x0048
+#define REG_LEDCFG0 0x004C
+#define REG_LEDCFG1 0x004D
+#define REG_LEDCFG2 0x004E
+#define REG_LEDCFG3 0x004F
+#define REG_FSIMR 0x0050
+#define REG_FSISR 0x0054
+#define REG_HSIMR 0x0058
+#define REG_HSISR 0x005c
+#define REG_GPIO_PIN_CTRL_2 0x0060 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function GPIO Pin Control. */
+#define REG_GPIO_IO_SEL_2 0x0062 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function GPIO Select. */
+#define REG_BB_PAD_CTRL 0x0064
+#define REG_MULTI_FUNC_CTRL 0x0068 /* RTL8723 WIFI/BT/GPS
+ * Multi-Function control source. */
+#define REG_GPIO_OUTPUT 0x006c
+#define REG_AFE_XTAL_CTRL_EXT 0x0078 /* RTL8188E */
+#define REG_XCK_OUT_CTRL 0x007c /* RTL8188E */
+#define REG_MCUFWDL 0x0080
+#define REG_WOL_EVENT 0x0081 /* RTL8188E */
+#define REG_MCUTSTCFG 0x0084
+#define REG_HMEBOX_E0 0x0088
+#define REG_HMEBOX_E1 0x008A
+#define REG_HMEBOX_E2 0x008C
+#define REG_HMEBOX_E3 0x008E
+#define REG_HMEBOX_EXT_0 0x01F0
+#define REG_HMEBOX_EXT_1 0x01F4
+#define REG_HMEBOX_EXT_2 0x01F8
+#define REG_HMEBOX_EXT_3 0x01FC
+#define REG_HIMR_88E 0x00B0
+#define REG_HISR_88E 0x00B4
+#define REG_HIMRE_88E 0x00B8
+#define REG_HISRE_88E 0x00BC
+#define REG_EFUSE_ACCESS 0x00CF /* Efuse access protection
+ * for RTL8723 */
+#define REG_BIST_SCAN 0x00D0
+#define REG_BIST_RPT 0x00D4
+#define REG_BIST_ROM_RPT 0x00D8
+#define REG_USB_SIE_INTF 0x00E0
+#define REG_PCIE_MIO_INTF 0x00E4
+#define REG_PCIE_MIO_INTD 0x00E8
+#define REG_HPON_FSM 0x00EC
+#define REG_SYS_CFG 0x00F0
+#define REG_GPIO_OUTSTS 0x00F4 /* For RTL8723 only. */
+#define REG_TYPE_ID 0x00FC
+
+#define REG_MAC_PHY_CTRL_NORMAL 0x00f8
+
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+#define REG_CR 0x0100
+#define REG_PBP 0x0104
+#define REG_PKT_BUFF_ACCESS_CTRL 0x0106
+#define REG_TRXDMA_CTRL 0x010C
+#define REG_TRXFF_BNDY 0x0114
+#define REG_TRXFF_STATUS 0x0118
+#define REG_RXFF_PTR 0x011C
+/* define REG_HIMR 0x0120 */
+/* define REG_HISR 0x0124 */
+#define REG_HIMRE 0x0128
+#define REG_HISRE 0x012C
+#define REG_CPWM 0x012F
+#define REG_FWIMR 0x0130
+#define REG_FTIMR 0x0138
+#define REG_FWISR 0x0134
+#define REG_PKTBUF_DBG_CTRL 0x0140
+#define REG_PKTBUF_DBG_ADDR (REG_PKTBUF_DBG_CTRL)
+#define REG_RXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+2)
+#define REG_TXPKTBUF_DBG (REG_PKTBUF_DBG_CTRL+3)
+#define REG_RXPKTBUF_CTRL (REG_PKTBUF_DBG_CTRL+2)
+#define REG_PKTBUF_DBG_DATA_L 0x0144
+#define REG_PKTBUF_DBG_DATA_H 0x0148
+
+#define REG_TC0_CTRL 0x0150
+#define REG_TC1_CTRL 0x0154
+#define REG_TC2_CTRL 0x0158
+#define REG_TC3_CTRL 0x015C
+#define REG_TC4_CTRL 0x0160
+#define REG_TCUNIT_BASE 0x0164
+#define REG_MBIST_START 0x0174
+#define REG_MBIST_DONE 0x0178
+#define REG_MBIST_FAIL 0x017C
+#define REG_32K_CTRL 0x0194 /* RTL8188E */
+#define REG_C2HEVT_MSG_NORMAL 0x01A0
+#define REG_C2HEVT_CLEAR 0x01AF
+#define REG_MCUTST_1 0x01c0
+#define REG_FMETHR 0x01C8
+#define REG_HMETFR 0x01CC
+#define REG_HMEBOX_0 0x01D0
+#define REG_HMEBOX_1 0x01D4
+#define REG_HMEBOX_2 0x01D8
+#define REG_HMEBOX_3 0x01DC
+
+#define REG_LLT_INIT 0x01E0
+
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+#define REG_RQPN 0x0200
+#define REG_FIFOPAGE 0x0204
+#define REG_TDECTRL 0x0208
+#define REG_TXDMA_OFFSET_CHK 0x020C
+#define REG_TXDMA_STATUS 0x0210
+#define REG_RQPN_NPQ 0x0214
+
+/* 0x0280h ~ 0x02FFh RXDMA Configuration */
+#define REG_RXDMA_AGG_PG_TH 0x0280
+#define REG_RXPKT_NUM 0x0284
+#define REG_RXDMA_STATUS 0x0288
+
+/* 0x0300h ~ 0x03FFh PCIe */
+#define REG_PCIE_CTRL_REG 0x0300
+#define REG_INT_MIG 0x0304 /* Interrupt Migration */
+#define REG_BCNQ_DESA 0x0308 /* TX Beacon Descr Address */
+#define REG_HQ_DESA 0x0310 /* TX High Queue Descr Addr */
+#define REG_MGQ_DESA 0x0318 /* TX Manage Queue Descr Addr*/
+#define REG_VOQ_DESA 0x0320 /* TX VO Queue Descr Addr */
+#define REG_VIQ_DESA 0x0328 /* TX VI Queue Descr Addr */
+#define REG_BEQ_DESA 0x0330 /* TX BE Queue Descr Addr */
+#define REG_BKQ_DESA 0x0338 /* TX BK Queue Descr Addr */
+#define REG_RX_DESA 0x0340 /* RX Queue Descr Addr */
+#define REG_MDIO 0x0354 /* MDIO for Access PCIE PHY */
+#define REG_DBG_SEL 0x0360 /* Debug Selection Register */
+#define REG_PCIE_HRPWM 0x0361 /* PCIe RPWM */
+#define REG_PCIE_HCPWM 0x0363 /* PCIe CPWM */
+#define REG_WATCH_DOG 0x0368
+
+/* RTL8723 series ------------------------------ */
+#define REG_PCIE_HISR 0x03A0
+
+/* spec version 11 */
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+#define REG_VOQ_INFORMATION 0x0400
+#define REG_VIQ_INFORMATION 0x0404
+#define REG_BEQ_INFORMATION 0x0408
+#define REG_BKQ_INFORMATION 0x040C
+#define REG_MGQ_INFORMATION 0x0410
+#define REG_HGQ_INFORMATION 0x0414
+#define REG_BCNQ_INFORMATION 0x0418
+#define REG_TXPKT_EMPTY 0x041A
+
+#define REG_CPU_MGQ_INFORMATION 0x041C
+#define REG_FWHW_TXQ_CTRL 0x0420
+#define REG_HWSEQ_CTRL 0x0423
+#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
+#define REG_TXPKTBUF_MGQ_BDNY 0x0425
+#define REG_LIFETIME_EN 0x0426
+#define REG_MULTI_BCNQ_OFFSET 0x0427
+#define REG_SPEC_SIFS 0x0428
+#define REG_RL 0x042A
+#define REG_DARFRC 0x0430
+#define REG_RARFRC 0x0438
+#define REG_RRSR 0x0440
+#define REG_ARFR0 0x0444
+#define REG_ARFR1 0x0448
+#define REG_ARFR2 0x044C
+#define REG_ARFR3 0x0450
+#define REG_AGGLEN_LMT 0x0458
+#define REG_AMPDU_MIN_SPACE 0x045C
+#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
+#define REG_FAST_EDCA_CTRL 0x0460
+#define REG_RD_RESP_PKT_TH 0x0463
+#define REG_INIRTS_RATE_SEL 0x0480
+/* define REG_INIDATA_RATE_SEL 0x0484 */
+#define REG_POWER_STATUS 0x04A4
+#define REG_POWER_STAGE1 0x04B4
+#define REG_POWER_STAGE2 0x04B8
+#define REG_PKT_VO_VI_LIFE_TIME 0x04C0
+#define REG_PKT_BE_BK_LIFE_TIME 0x04C2
+#define REG_STBC_SETTING 0x04C4
+#define REG_PROT_MODE_CTRL 0x04C8
+#define REG_MAX_AGGR_NUM 0x04CA
+#define REG_RTS_MAX_AGGR_NUM 0x04CB
+#define REG_BAR_MODE_CTRL 0x04CC
+#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
+#define REG_EARLY_MODE_CONTROL 0x4D0
+#define REG_NQOS_SEQ 0x04DC
+#define REG_QOS_SEQ 0x04DE
+#define REG_NEED_CPU_HANDLE 0x04E0
+#define REG_PKT_LOSE_RPT 0x04E1
+#define REG_PTCL_ERR_STATUS 0x04E2
+#define REG_TX_RPT_CTRL 0x04EC
+#define REG_TX_RPT_TIME 0x04F0 /* 2 byte */
+#define REG_DUMMY 0x04FC
+
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+#define REG_EDCA_VO_PARAM 0x0500
+#define REG_EDCA_VI_PARAM 0x0504
+#define REG_EDCA_BE_PARAM 0x0508
+#define REG_EDCA_BK_PARAM 0x050C
+#define REG_BCNTCFG 0x0510
+#define REG_PIFS 0x0512
+#define REG_RDG_PIFS 0x0513
+#define REG_SIFS_CTX 0x0514
+#define REG_SIFS_TRX 0x0516
+#define REG_TSFTR_SYN_OFFSET 0x0518
+#define REG_AGGR_BREAK_TIME 0x051A
+#define REG_SLOT 0x051B
+#define REG_TX_PTCL_CTRL 0x0520
+#define REG_TXPAUSE 0x0522
+#define REG_DIS_TXREQ_CLR 0x0523
+#define REG_RD_CTRL 0x0524
+/* Format for offset 540h-542h: */
+/* [3:0]: TBTT prohibit setup in unit of 32us. The time for HW getting
+ * beacon content before TBTT. */
+/* [7:4]: Reserved. */
+/* [19:8]: TBTT prohibit hold in unit of 32us. The time for HW holding
+ * to send the beacon packet. */
+/* [23:20]: Reserved */
+/* Description: */
+/* | */
+/* |<--Setup--|--Hold------------>| */
+/* --------------|---------------------- */
+/* | */
+/* TBTT */
+/* Note: We cannot update beacon content to HW or send any AC packets during
+ * the time between Setup and Hold. */
+#define REG_TBTT_PROHIBIT 0x0540
+#define REG_RD_NAV_NXT 0x0544
+#define REG_NAV_PROT_LEN 0x0546
+#define REG_BCN_CTRL 0x0550
+#define REG_BCN_CTRL_1 0x0551
+#define REG_MBID_NUM 0x0552
+#define REG_DUAL_TSF_RST 0x0553
+#define REG_BCN_INTERVAL 0x0554
+#define REG_DRVERLYINT 0x0558
+#define REG_BCNDMATIM 0x0559
+#define REG_ATIMWND 0x055A
+#define REG_BCN_MAX_ERR 0x055D
+#define REG_RXTSF_OFFSET_CCK 0x055E
+#define REG_RXTSF_OFFSET_OFDM 0x055F
+#define REG_TSFTR 0x0560
+#define REG_TSFTR1 0x0568
+#define REG_ATIMWND_1 0x0570
+#define REG_PSTIMER 0x0580
+#define REG_TIMER0 0x0584
+#define REG_TIMER1 0x0588
+#define REG_ACMHWCTRL 0x05C0
+
+/* define REG_FW_TSF_SYNC_CNT 0x04A0 */
+#define REG_FW_RESET_TSF_CNT_1 0x05FC
+#define REG_FW_RESET_TSF_CNT_0 0x05FD
+#define REG_FW_BCN_DIS_CNT 0x05FE
+
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+#define REG_APSD_CTRL 0x0600
+#define REG_BWOPMODE 0x0603
+#define REG_TCR 0x0604
+#define REG_RCR 0x0608
+#define REG_RX_PKT_LIMIT 0x060C
+#define REG_RX_DLK_TIME 0x060D
+#define REG_RX_DRVINFO_SZ 0x060F
+
+#define REG_MACID 0x0610
+#define REG_BSSID 0x0618
+#define REG_MAR 0x0620
+#define REG_MBIDCAMCFG 0x0628
+
+#define REG_USTIME_EDCA 0x0638
+#define REG_MAC_SPEC_SIFS 0x063A
+
+/* 20100719 Joseph: Hardware register definition change. (HW datasheet v54) */
+/* [15:8]SIFS_R2T_OFDM, [7:0]SIFS_R2T_CCK */
+#define REG_R2T_SIFS 0x063C
+/* [15:8]SIFS_T2T_OFDM, [7:0]SIFS_T2T_CCK */
+#define REG_T2T_SIFS 0x063E
+#define REG_ACKTO 0x0640
+#define REG_CTS2TO 0x0641
+#define REG_EIFS 0x0642
+
+/* RXERR_RPT */
+#define RXERR_TYPE_OFDM_PPDU 0
+#define RXERR_TYPE_OFDM_false_ALARM 1
+#define RXERR_TYPE_OFDM_MPDU_OK 2
+#define RXERR_TYPE_OFDM_MPDU_FAIL 3
+#define RXERR_TYPE_CCK_PPDU 4
+#define RXERR_TYPE_CCK_false_ALARM 5
+#define RXERR_TYPE_CCK_MPDU_OK 6
+#define RXERR_TYPE_CCK_MPDU_FAIL 7
+#define RXERR_TYPE_HT_PPDU 8
+#define RXERR_TYPE_HT_false_ALARM 9
+#define RXERR_TYPE_HT_MPDU_TOTAL 10
+#define RXERR_TYPE_HT_MPDU_OK 11
+#define RXERR_TYPE_HT_MPDU_FAIL 12
+#define RXERR_TYPE_RX_FULL_DROP 15
+
+#define RXERR_COUNTER_MASK 0xFFFFF
+#define RXERR_RPT_RST BIT(27)
+#define _RXERR_RPT_SEL(type) ((type) << 28)
+
+/* Note: */
+/* The NAV upper value is very important to WiFi 11n 5.2.3 NAV test.
+ * The default value is always too small, but the WiFi TestPlan test
+ * by 25,000 microseconds of NAV through sending CTS in the air.
+ * We must update this value greater than 25,000 microseconds to pass
+ * the item. The offset of NAV_UPPER in 8192C Spec is incorrect, and
+ * the offset should be 0x0652. */
+#define REG_NAV_UPPER 0x0652 /* unit of 128 */
+
+/* WMA, BA, CCX */
+/* define REG_NAV_CTRL 0x0650 */
+#define REG_BACAMCMD 0x0654
+#define REG_BACAMCONTENT 0x0658
+#define REG_LBDLY 0x0660
+#define REG_FWDLY 0x0661
+#define REG_RXERR_RPT 0x0664
+#define REG_WMAC_TRXPTCL_CTL 0x0668
+
+/* Security */
+#define REG_CAMCMD 0x0670
+#define REG_CAMWRITE 0x0674
+#define REG_CAMREAD 0x0678
+#define REG_CAMDBG 0x067C
+#define REG_SECCFG 0x0680
+
+/* Power */
+#define REG_WOW_CTRL 0x0690
+#define REG_PS_RX_INFO 0x0692
+#define REG_UAPSD_TID 0x0693
+#define REG_WKFMCAM_CMD 0x0698
+#define REG_WKFMCAM_NUM_88E 0x698
+#define REG_RXFLTMAP0 0x06A0
+#define REG_RXFLTMAP1 0x06A2
+#define REG_RXFLTMAP2 0x06A4
+#define REG_BCN_PSR_RPT 0x06A8
+#define REG_BT_COEX_TABLE 0x06C0
+
+/* Hardware Port 2 */
+#define REG_MACID1 0x0700
+#define REG_BSSID1 0x0708
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+/* For normal chip */
+#define REG_NORMAL_SIE_VID 0xFE60 /* 0xFE60~0xFE61 */
+#define REG_NORMAL_SIE_PID 0xFE62 /* 0xFE62~0xFE63 */
+#define REG_NORMAL_SIE_OPTIONAL 0xFE64
+#define REG_NORMAL_SIE_EP 0xFE65 /* 0xFE65~0xFE67 */
+#define REG_NORMAL_SIE_PHY 0xFE68 /* 0xFE68~0xFE6B */
+#define REG_NORMAL_SIE_OPTIONAL2 0xFE6C
+#define REG_NORMAL_SIE_GPS_EP 0xFE6D /* 0xFE6D, for RTL8723 only. */
+#define REG_NORMAL_SIE_MAC_ADDR 0xFE70 /* 0xFE70~0xFE75 */
+#define REG_NORMAL_SIE_STRING 0xFE80 /* 0xFE80~0xFEDF */
+
+/* TODO: use these definition when using REG_xxx naming rule. */
+/* NOTE: DO NOT Remove these definition. Use later. */
+
+#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
+#define EFUSE_TEST REG_EFUSE_TEST /* E-Fuse Test. */
+#define MSR (REG_CR + 2) /* Media Status reg */
+#define ISR REG_HISR_88E
+/* Timing Sync Function Timer Register. */
+#define TSFR REG_TSFTR
+
+#define PBP REG_PBP
+
+/* Redifine MACID register, to compatible prior ICs. */
+/* MAC ID Register, Offset 0x0050-0x0053 */
+#define IDR0 REG_MACID
+/* MAC ID Register, Offset 0x0054-0x0055 */
+#define IDR4 (REG_MACID + 4)
+
+/* 9. Security Control Registers (Offset: ) */
+/* IN 8190 Data Sheet is called CAMcmd */
+#define RWCAM REG_CAMCMD
+/* Software write CAM input content */
+#define WCAMI REG_CAMWRITE
+/* Software read/write CAM config */
+#define RCAMO REG_CAMREAD
+#define CAMDBG REG_CAMDBG
+/* Security Configuration Register */
+#define SECR REG_SECCFG
+
+/* Unused register */
+#define UnusedRegister 0x1BF
+#define DCAM UnusedRegister
+#define PSR UnusedRegister
+#define BBAddr UnusedRegister
+#define PhyDataR UnusedRegister
+
+/* Min Spacing related settings. */
+#define MAX_MSS_DENSITY_2T 0x13
+#define MAX_MSS_DENSITY_1T 0x0A
+
+/* EEPROM enable when set 1 */
+#define CmdEEPROM_En BIT5
+/* System EEPROM select, 0: boot from E-FUSE, 1: The EEPROM used is 9346 */
+#define CmdEERPOMSEL BIT4
+#define Cmd9346CR_9356SEL BIT4
+
+/* 8192C GPIO MUX Configuration Register (offset 0x40, 4 byte) */
+#define GPIOSEL_GPIO 0
+#define GPIOSEL_ENBT BIT5
+
+/* 8192C GPIO PIN Control Register (offset 0x44, 4 byte) */
+/* GPIO pins input value */
+#define GPIO_IN REG_GPIO_PIN_CTRL
+/* GPIO pins output value */
+#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
+/* GPIO pins output enable when a bit is set to "1"; otherwise,
+ * input is configured. */
+#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
+#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
+
+/* 8723/8188E Host System Interrupt Mask Register (offset 0x58, 32 byte) */
+#define HSIMR_GPIO12_0_INT_EN BIT0
+#define HSIMR_SPS_OCP_INT_EN BIT5
+#define HSIMR_RON_INT_EN BIT6
+#define HSIMR_PDN_INT_EN BIT7
+#define HSIMR_GPIO9_INT_EN BIT25
+
+/* 8723/8188E Host System Interrupt Status Register (offset 0x5C, 32 byte) */
+#define HSISR_GPIO12_0_INT BIT0
+#define HSISR_SPS_OCP_INT BIT5
+#define HSISR_RON_INT_EN BIT6
+#define HSISR_PDNINT BIT7
+#define HSISR_GPIO9_INT BIT25
+
+/* 8192C (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+/*
+Network Type
+00: No link
+01: Link in ad hoc network
+10: Link in infrastructure network
+11: AP mode
+Default: 00b.
+*/
+#define MSR_NOLINK 0x00
+#define MSR_ADHOC 0x01
+#define MSR_INFRA 0x02
+#define MSR_AP 0x03
+
+/* 88EU (MSR) Media Status Register (Offset 0x4C, 8 bits) */
+#define USB_INTR_CONTENT_C2H_OFFSET 0
+#define USB_INTR_CONTENT_CPWM1_OFFSET 16
+#define USB_INTR_CONTENT_CPWM2_OFFSET 20
+#define USB_INTR_CONTENT_HISR_OFFSET 48
+#define USB_INTR_CONTENT_HISRE_OFFSET 52
+
+/* 88E Driver Initialization Offload REG_FDHM0(Offset 0x88, 8 bits) */
+/* IOL config for REG_FDHM0(Reg0x88) */
+#define CMD_INIT_LLT BIT0
+#define CMD_READ_EFUSE_MAP BIT1
+#define CMD_EFUSE_PATCH BIT2
+#define CMD_IOCONFIG BIT3
+#define CMD_INIT_LLT_ERR BIT4
+#define CMD_READ_EFUSE_MAP_ERR BIT5
+#define CMD_EFUSE_PATCH_ERR BIT6
+#define CMD_IOCONFIG_ERR BIT7
+
+/* 6. Adaptive Control Registers (Offset: 0x0160 - 0x01CF) */
+/* 8192C Response Rate Set Register (offset 0x181, 24bits) */
+#define RRSR_1M BIT0
+#define RRSR_2M BIT1
+#define RRSR_5_5M BIT2
+#define RRSR_11M BIT3
+#define RRSR_6M BIT4
+#define RRSR_9M BIT5
+#define RRSR_12M BIT6
+#define RRSR_18M BIT7
+#define RRSR_24M BIT8
+#define RRSR_36M BIT9
+#define RRSR_48M BIT10
+#define RRSR_54M BIT11
+#define RRSR_MCS0 BIT12
+#define RRSR_MCS1 BIT13
+#define RRSR_MCS2 BIT14
+#define RRSR_MCS3 BIT15
+#define RRSR_MCS4 BIT16
+#define RRSR_MCS5 BIT17
+#define RRSR_MCS6 BIT18
+#define RRSR_MCS7 BIT19
+
+/* 8192C Response Rate Set Register (offset 0x1BF, 8bits) */
+/* WOL bit information */
+#define HAL92C_WOL_PTK_UPDATE_EVENT BIT0
+#define HAL92C_WOL_GTK_UPDATE_EVENT BIT1
+
+/* 8192C BW_OPMODE bits (Offset 0x203, 8bit) */
+#define BW_OPMODE_20MHZ BIT2
+#define BW_OPMODE_5G BIT1
+
+/* 8192C CAM Config Setting (offset 0x250, 1 byte) */
+#define CAM_VALID BIT15
+#define CAM_NOTVALID 0x0000
+#define CAM_USEDK BIT5
+
+#define CAM_CONTENT_COUNT 8
+
+#define CAM_NONE 0x0
+#define CAM_WEP40 0x01
+#define CAM_TKIP 0x02
+#define CAM_AES 0x04
+#define CAM_WEP104 0x05
+#define CAM_SMS4 0x6
+
+#define TOTAL_CAM_ENTRY 32
+#define HALF_CAM_ENTRY 16
+
+#define CAM_CONFIG_USEDK true
+#define CAM_CONFIG_NO_USEDK false
+
+#define CAM_WRITE BIT16
+#define CAM_READ 0x00000000
+#define CAM_POLLINIG BIT31
+
+#define SCR_UseDK 0x01
+#define SCR_TxSecEnable 0x02
+#define SCR_RxSecEnable 0x04
+
+/* 10. Power Save Control Registers (Offset: 0x0260 - 0x02DF) */
+#define WOW_PMEN BIT0 /* Power management Enable. */
+#define WOW_WOMEN BIT1 /* WoW function on or off. */
+#define WOW_MAGIC BIT2 /* Magic packet */
+#define WOW_UWF BIT3 /* Unicast Wakeup frame. */
+
+/* 12. Host Interrupt Status Registers (Offset: 0x0300 - 0x030F) */
+/* 8188 IMR/ISR bits */
+#define IMR_DISABLED_88E 0x0
+/* IMR DW0(0x0060-0063) Bit 0-31 */
+#define IMR_TXCCK_88E BIT30 /* TXRPT interrupt when CCX bit of the packet is set */
+#define IMR_PSTIMEOUT_88E BIT29 /* Power Save Time Out Interrupt */
+#define IMR_GTINT4_88E BIT28 /* When GTIMER4 expires, this bit is set to 1 */
+#define IMR_GTINT3_88E BIT27 /* When GTIMER3 expires, this bit is set to 1 */
+#define IMR_TBDER_88E BIT26 /* Transmit Beacon0 Error */
+#define IMR_TBDOK_88E BIT25 /* Transmit Beacon0 OK */
+#define IMR_TSF_BIT32_TOGGLE_88E BIT24 /* TSF Timer BIT32 toggle indication interrupt */
+#define IMR_BCNDMAINT0_88E BIT20 /* Beacon DMA Interrupt 0 */
+#define IMR_BCNDERR0_88E BIT16 /* Beacon Queue DMA Error 0 */
+#define IMR_HSISR_IND_ON_INT_88E BIT15 /* HSISR Indicator (HSIMR & HSISR is true, this bit is set to 1) */
+#define IMR_BCNDMAINT_E_88E BIT14 /* Beacon DMA Interrupt Extension for Win7 */
+#define IMR_ATIMEND_88E BIT12 /* CTWidnow End or ATIM Window End */
+#define IMR_HISR1_IND_INT_88E BIT11 /* HISR1 Indicator (HISR1 & HIMR1 is true, this bit is set to 1) */
+#define IMR_C2HCMD_88E BIT10 /* CPU to Host Command INT Status, Write 1 clear */
+#define IMR_CPWM2_88E BIT9 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_CPWM_88E BIT8 /* CPU power Mode exchange INT Status, Write 1 clear */
+#define IMR_HIGHDOK_88E BIT7 /* High Queue DMA OK */
+#define IMR_MGNTDOK_88E BIT6 /* Management Queue DMA OK */
+#define IMR_BKDOK_88E BIT5 /* AC_BK DMA OK */
+#define IMR_BEDOK_88E BIT4 /* AC_BE DMA OK */
+#define IMR_VIDOK_88E BIT3 /* AC_VI DMA OK */
+#define IMR_VODOK_88E BIT2 /* AC_VO DMA OK */
+#define IMR_RDU_88E BIT1 /* Rx Descriptor Unavailable */
+#define IMR_ROK_88E BIT0 /* Receive DMA OK */
+
+/* IMR DW1(0x00B4-00B7) Bit 0-31 */
+#define IMR_BCNDMAINT7_88E BIT27 /* Beacon DMA Interrupt 7 */
+#define IMR_BCNDMAINT6_88E BIT26 /* Beacon DMA Interrupt 6 */
+#define IMR_BCNDMAINT5_88E BIT25 /* Beacon DMA Interrupt 5 */
+#define IMR_BCNDMAINT4_88E BIT24 /* Beacon DMA Interrupt 4 */
+#define IMR_BCNDMAINT3_88E BIT23 /* Beacon DMA Interrupt 3 */
+#define IMR_BCNDMAINT2_88E BIT22 /* Beacon DMA Interrupt 2 */
+#define IMR_BCNDMAINT1_88E BIT21 /* Beacon DMA Interrupt 1 */
+#define IMR_BCNDERR7_88E BIT20 /* Beacon DMA Error Int 7 */
+#define IMR_BCNDERR6_88E BIT19 /* Beacon DMA Error Int 6 */
+#define IMR_BCNDERR5_88E BIT18 /* Beacon DMA Error Int 5 */
+#define IMR_BCNDERR4_88E BIT17 /* Beacon DMA Error Int 4 */
+#define IMR_BCNDERR3_88E BIT16 /* Beacon DMA Error Int 3 */
+#define IMR_BCNDERR2_88E BIT15 /* Beacon DMA Error Int 2 */
+#define IMR_BCNDERR1_88E BIT14 /* Beacon DMA Error Int 1 */
+#define IMR_ATIMEND_E_88E BIT13 /* ATIM Window End Ext for Win7 */
+#define IMR_TXERR_88E BIT11 /* Tx Err Flag Int Status, write 1 clear. */
+#define IMR_RXERR_88E BIT10 /* Rx Err Flag INT Status, Write 1 clear */
+#define IMR_TXFOVW_88E BIT9 /* Transmit FIFO Overflow */
+#define IMR_RXFOVW_88E BIT8 /* Receive FIFO Overflow */
+
+#define HAL_NIC_UNPLUG_ISR 0xFFFFFFFF /* The value when the NIC is unplugged for PCI. */
+
+/* 8192C EFUSE */
+#define HWSET_MAX_SIZE 256
+#define HWSET_MAX_SIZE_88E 512
+
+/*===================================================================
+=====================================================================
+Here the register defines are for 92C. When the define is as same with 92C,
+we will use the 92C's define for the consistency
+So the following defines for 92C is not entire!!!!!!
+=====================================================================
+=====================================================================*/
+/*
+Based on Datasheet V33---090401
+Register Summary
+Current IOREG MAP
+0x0000h ~ 0x00FFh System Configuration (256 Bytes)
+0x0100h ~ 0x01FFh MACTOP General Configuration (256 Bytes)
+0x0200h ~ 0x027Fh TXDMA Configuration (128 Bytes)
+0x0280h ~ 0x02FFh RXDMA Configuration (128 Bytes)
+0x0300h ~ 0x03FFh PCIE EMAC Reserved Region (256 Bytes)
+0x0400h ~ 0x04FFh Protocol Configuration (256 Bytes)
+0x0500h ~ 0x05FFh EDCA Configuration (256 Bytes)
+0x0600h ~ 0x07FFh WMAC Configuration (512 Bytes)
+0x2000h ~ 0x3FFFh 8051 FW Download Region (8196 Bytes)
+*/
+/* 8192C (TXPAUSE) transmission pause (Offset 0x522, 8 bits) */
+/* Note: */
+/* The bits of stopping AC(VO/VI/BE/BK) queue in datasheet
+ * RTL8192S/RTL8192C are wrong, */
+/* the correct arragement is VO - Bit0, VI - Bit1, BE - Bit2,
+ * and BK - Bit3. */
+/* 8723 and 88E may be not correct either in the earlier version. */
+#define StopBecon BIT6
+#define StopHigh BIT5
+#define StopMgt BIT4
+#define StopBK BIT3
+#define StopBE BIT2
+#define StopVI BIT1
+#define StopVO BIT0
+
+/* 8192C (RCR) Receive Configuration Register(Offset 0x608, 32 bits) */
+#define RCR_APPFCS BIT31 /* WMAC append FCS after payload */
+#define RCR_APP_MIC BIT30
+#define RCR_APP_PHYSTS BIT28
+#define RCR_APP_ICV BIT29
+#define RCR_APP_PHYST_RXFF BIT28
+#define RCR_APP_BA_SSN BIT27 /* Accept BA SSN */
+#define RCR_ENMBID BIT24 /* Enable Multiple BssId. */
+#define RCR_LSIGEN BIT23
+#define RCR_MFBEN BIT22
+#define RCR_HTC_LOC_CTRL BIT14 /* MFC<--HTC=1 MFC-->HTC=0 */
+#define RCR_AMF BIT13 /* Accept management type frame */
+#define RCR_ACF BIT12 /* Accept control type frame */
+#define RCR_ADF BIT11 /* Accept data type frame */
+#define RCR_AICV BIT9 /* Accept ICV error packet */
+#define RCR_ACRC32 BIT8 /* Accept CRC32 error packet */
+#define RCR_CBSSID_BCN BIT7 /* Accept BSSID match packet
+ * (Rx beacon, probe rsp) */
+#define RCR_CBSSID_DATA BIT6 /* Accept BSSID match (Data)*/
+#define RCR_CBSSID RCR_CBSSID_DATA /* Accept BSSID match */
+#define RCR_APWRMGT BIT5 /* Accept power management pkt*/
+#define RCR_ADD3 BIT4 /* Accept address 3 match pkt */
+#define RCR_AB BIT3 /* Accept broadcast packet */
+#define RCR_AM BIT2 /* Accept multicast packet */
+#define RCR_APM BIT1 /* Accept physical match pkt */
+#define RCR_AAP BIT0 /* Accept all unicast packet */
+#define RCR_MXDMA_OFFSET 8
+#define RCR_FIFO_OFFSET 13
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+#define REG_USB_INFO 0xFE17
+#define REG_USB_SPECIAL_OPTION 0xFE55
+#define REG_USB_DMA_AGG_TO 0xFE5B
+#define REG_USB_AGG_TO 0xFE5C
+#define REG_USB_AGG_TH 0xFE5D
+
+#define REG_USB_HRPWM 0xFE58
+#define REG_USB_HCPWM 0xFE57
+/* 8192C Regsiter Bit and Content definition */
+/* 0x0000h ~ 0x00FFh System Configuration */
+
+/* 2 SYS_ISO_CTRL */
+#define ISO_MD2PP BIT(0)
+#define ISO_UA2USB BIT(1)
+#define ISO_UD2CORE BIT(2)
+#define ISO_PA2PCIE BIT(3)
+#define ISO_PD2CORE BIT(4)
+#define ISO_IP2MAC BIT(5)
+#define ISO_DIOP BIT(6)
+#define ISO_DIOE BIT(7)
+#define ISO_EB2CORE BIT(8)
+#define ISO_DIOR BIT(9)
+#define PWC_EV12V BIT(15)
+
+/* 2 SYS_FUNC_EN */
+#define FEN_BBRSTB BIT(0)
+#define FEN_BB_GLB_RSTn BIT(1)
+#define FEN_USBA BIT(2)
+#define FEN_UPLL BIT(3)
+#define FEN_USBD BIT(4)
+#define FEN_DIO_PCIE BIT(5)
+#define FEN_PCIEA BIT(6)
+#define FEN_PPLL BIT(7)
+#define FEN_PCIED BIT(8)
+#define FEN_DIOE BIT(9)
+#define FEN_CPUEN BIT(10)
+#define FEN_DCORE BIT(11)
+#define FEN_ELDR BIT(12)
+#define FEN_DIO_RF BIT(13)
+#define FEN_HWPDN BIT(14)
+#define FEN_MREGEN BIT(15)
+
+/* 2 APS_FSMCO */
+#define PFM_LDALL BIT(0)
+#define PFM_ALDN BIT(1)
+#define PFM_LDKP BIT(2)
+#define PFM_WOWL BIT(3)
+#define EnPDN BIT(4)
+#define PDN_PL BIT(5)
+#define APFM_ONMAC BIT(8)
+#define APFM_OFF BIT(9)
+#define APFM_RSM BIT(10)
+#define AFSM_HSUS BIT(11)
+#define AFSM_PCIE BIT(12)
+#define APDM_MAC BIT(13)
+#define APDM_HOST BIT(14)
+#define APDM_HPDN BIT(15)
+#define RDY_MACON BIT(16)
+#define SUS_HOST BIT(17)
+#define ROP_ALD BIT(20)
+#define ROP_PWR BIT(21)
+#define ROP_SPS BIT(22)
+#define SOP_MRST BIT(25)
+#define SOP_FUSE BIT(26)
+#define SOP_ABG BIT(27)
+#define SOP_AMB BIT(28)
+#define SOP_RCK BIT(29)
+#define SOP_A8M BIT(30)
+#define XOP_BTCK BIT(31)
+
+/* 2 SYS_CLKR */
+#define ANAD16V_EN BIT(0)
+#define ANA8M BIT(1)
+#define MACSLP BIT(4)
+#define LOADER_CLK_EN BIT(5)
+
+/* 2 9346CR */
+
+#define BOOT_FROM_EEPROM BIT(4)
+#define EEPROM_EN BIT(5)
+
+/* 2 SPS0_CTRL */
+
+/* 2 SPS_OCP_CFG */
+
+/* 2 RF_CTRL */
+#define RF_EN BIT(0)
+#define RF_RSTB BIT(1)
+#define RF_SDMRSTB BIT(2)
+
+/* 2 LDOV12D_CTRL */
+#define LDV12_EN BIT(0)
+#define LDV12_SDBY BIT(1)
+#define LPLDO_HSM BIT(2)
+#define LPLDO_LSM_DIS BIT(3)
+#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
+
+/* 2EFUSE_CTRL */
+#define ALD_EN BIT(18)
+#define EF_PD BIT(19)
+#define EF_FLAG BIT(31)
+
+/* 2 EFUSE_TEST (For RTL8723 partially) */
+#define EF_TRPT BIT(7)
+/* 00: Wifi Efuse, 01: BT Efuse0, 10: BT Efuse1, 11: BT Efuse2 */
+#define EF_CELL_SEL (BIT(8)|BIT(9))
+#define LDOE25_EN BIT(31)
+#define EFUSE_SEL(x) (((x) & 0x3) << 8)
+#define EFUSE_SEL_MASK 0x300
+#define EFUSE_WIFI_SEL_0 0x0
+#define EFUSE_BT_SEL_0 0x1
+#define EFUSE_BT_SEL_1 0x2
+#define EFUSE_BT_SEL_2 0x3
+
+#define EFUSE_ACCESS_ON 0x69 /* For RTL8723 only. */
+#define EFUSE_ACCESS_OFF 0x00 /* For RTL8723 only. */
+
+/* 2 8051FWDL */
+/* 2 MCUFWDL */
+#define MCUFWDL_EN BIT(0)
+#define MCUFWDL_RDY BIT(1)
+#define FWDL_ChkSum_rpt BIT(2)
+#define MACINI_RDY BIT(3)
+#define BBINI_RDY BIT(4)
+#define RFINI_RDY BIT(5)
+#define WINTINI_RDY BIT(6)
+#define RAM_DL_SEL BIT(7) /* 1:RAM, 0:ROM */
+#define ROM_DLEN BIT(19)
+#define CPRST BIT(23)
+
+/* 2 REG_SYS_CFG */
+#define XCLK_VLD BIT(0)
+#define ACLK_VLD BIT(1)
+#define UCLK_VLD BIT(2)
+#define PCLK_VLD BIT(3)
+#define PCIRSTB BIT(4)
+#define V15_VLD BIT(5)
+#define SW_OFFLOAD_EN BIT(7)
+#define SIC_IDLE BIT(8)
+#define BD_MAC2 BIT(9)
+#define BD_MAC1 BIT(10)
+#define IC_MACPHY_MODE BIT(11)
+#define CHIP_VER (BIT(12)|BIT(13)|BIT(14)|BIT(15))
+#define BT_FUNC BIT(16)
+#define VENDOR_ID BIT(19)
+#define PAD_HWPD_IDN BIT(22)
+#define TRP_VAUX_EN BIT(23) /* RTL ID */
+#define TRP_BT_EN BIT(24)
+#define BD_PKG_SEL BIT(25)
+#define BD_HCI_SEL BIT(26)
+#define TYPE_ID BIT(27)
+
+#define CHIP_VER_RTL_MASK 0xF000 /* Bit 12 ~ 15 */
+#define CHIP_VER_RTL_SHIFT 12
+
+/* 2REG_GPIO_OUTSTS (For RTL8723 only) */
+#define EFS_HCI_SEL (BIT(0)|BIT(1))
+#define PAD_HCI_SEL (BIT(2)|BIT(3))
+#define HCI_SEL (BIT(4)|BIT(5))
+#define PKG_SEL_HCI BIT(6)
+#define FEN_GPS BIT(7)
+#define FEN_BT BIT(8)
+#define FEN_WL BIT(9)
+#define FEN_PCI BIT(10)
+#define FEN_USB BIT(11)
+#define BTRF_HWPDN_N BIT(12)
+#define WLRF_HWPDN_N BIT(13)
+#define PDN_BT_N BIT(14)
+#define PDN_GPS_N BIT(15)
+#define BT_CTL_HWPDN BIT(16)
+#define GPS_CTL_HWPDN BIT(17)
+#define PPHY_SUSB BIT(20)
+#define UPHY_SUSB BIT(21)
+#define PCI_SUSEN BIT(22)
+#define USB_SUSEN BIT(23)
+#define RF_RL_ID (BIT(31)|BIT(30)|BIT(29)|BIT(28))
+
+/* 2SYS_CFG */
+#define RTL_ID BIT(23) /* TestChip ID, 1:Test(RLE); 0:MP(RL) */
+
+/* 0x0100h ~ 0x01FFh MACTOP General Configuration */
+
+/* 2 Function Enable Registers */
+/* 2 CR */
+
+#define HCI_TXDMA_EN BIT(0)
+#define HCI_RXDMA_EN BIT(1)
+#define TXDMA_EN BIT(2)
+#define RXDMA_EN BIT(3)
+#define PROTOCOL_EN BIT(4)
+#define SCHEDULE_EN BIT(5)
+#define MACTXEN BIT(6)
+#define MACRXEN BIT(7)
+#define ENSWBCN BIT(8)
+#define ENSEC BIT(9)
+#define CALTMR_EN BIT(10) /* 32k CAL TMR enable */
+
+/* Network type */
+#define _NETTYPE(x) (((x) & 0x3) << 16)
+#define MASK_NETTYPE 0x30000
+#define NT_NO_LINK 0x0
+#define NT_LINK_AD_HOC 0x1
+#define NT_LINK_AP 0x2
+#define NT_AS_AP 0x3
+
+/* 2 PBP - Page Size Register */
+#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
+#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
+#define _PSRX_MASK 0xF
+#define _PSTX_MASK 0xF0
+#define _PSRX(x) (x)
+#define _PSTX(x) ((x) << 4)
+
+#define PBP_64 0x0
+#define PBP_128 0x1
+#define PBP_256 0x2
+#define PBP_512 0x3
+#define PBP_1024 0x4
+
+/* 2 TX/RXDMA */
+#define RXDMA_ARBBW_EN BIT(0)
+#define RXSHFT_EN BIT(1)
+#define RXDMA_AGG_EN BIT(2)
+#define QS_VO_QUEUE BIT(8)
+#define QS_VI_QUEUE BIT(9)
+#define QS_BE_QUEUE BIT(10)
+#define QS_BK_QUEUE BIT(11)
+#define QS_MANAGER_QUEUE BIT(12)
+#define QS_HIGH_QUEUE BIT(13)
+
+#define HQSEL_VOQ BIT(0)
+#define HQSEL_VIQ BIT(1)
+#define HQSEL_BEQ BIT(2)
+#define HQSEL_BKQ BIT(3)
+#define HQSEL_MGTQ BIT(4)
+#define HQSEL_HIQ BIT(5)
+
+/* For normal driver, 0x10C */
+#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
+#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
+#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
+#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8 )
+#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6 )
+#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4 )
+
+#define QUEUE_LOW 1
+#define QUEUE_NORMAL 2
+#define QUEUE_HIGH 3
+
+/* 2 TRXFF_BNDY */
+
+/* 2 LLT_INIT */
+#define _LLT_NO_ACTIVE 0x0
+#define _LLT_WRITE_ACCESS 0x1
+#define _LLT_READ_ACCESS 0x2
+
+#define _LLT_INIT_DATA(x) ((x) & 0xFF)
+#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
+#define _LLT_OP(x) (((x) & 0x3) << 30)
+#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
+
+/* 0x0200h ~ 0x027Fh TXDMA Configuration */
+/* 2RQPN */
+#define _HPQ(x) ((x) & 0xFF)
+#define _LPQ(x) (((x) & 0xFF) << 8)
+#define _PUBQ(x) (((x) & 0xFF) << 16)
+/* NOTE: in RQPN_NPQ register */
+#define _NPQ(x) ((x) & 0xFF)
+
+#define HPQ_PUBLIC_DIS BIT(24)
+#define LPQ_PUBLIC_DIS BIT(25)
+#define LD_RQPN BIT(31)
+
+/* 2TDECTRL */
+#define BCN_VALID BIT(16)
+#define BCN_HEAD(x) (((x) & 0xFF) << 8)
+#define BCN_HEAD_MASK 0xFF00
+
+/* 2 TDECTL */
+#define BLK_DESC_NUM_SHIFT 4
+#define BLK_DESC_NUM_MASK 0xF
+
+/* 2 TXDMA_OFFSET_CHK */
+#define DROP_DATA_EN BIT(9)
+
+/* 0x0280h ~ 0x028Bh RX DMA Configuration */
+
+/* REG_RXDMA_CONTROL, 0x0286h */
+
+/* 2 REG_RXPKT_NUM, 0x0284 */
+#define RXPKT_RELEASE_POLL BIT(16)
+#define RXDMA_IDLE BIT(17)
+#define RW_RELEASE_EN BIT(18)
+
+/* 0x0400h ~ 0x047Fh Protocol Configuration */
+/* 2 FWHW_TXQ_CTRL */
+#define EN_AMPDU_RTY_NEW BIT(7)
+
+/* 2 SPEC SIFS */
+#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
+#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
+
+/* 2 RL */
+#define RETRY_LIMIT_SHORT_SHIFT 8
+#define RETRY_LIMIT_LONG_SHIFT 0
+
+/* 0x0500h ~ 0x05FFh EDCA Configuration */
+
+/* 2 EDCA setting */
+#define AC_PARAM_TXOP_LIMIT_OFFSET 16
+#define AC_PARAM_ECW_MAX_OFFSET 12
+#define AC_PARAM_ECW_MIN_OFFSET 8
+#define AC_PARAM_AIFS_OFFSET 0
+
+#define _LRL(x) ((x) & 0x3F)
+#define _SRL(x) (((x) & 0x3F) << 8)
+
+/* 2 BCN_CTRL */
+#define EN_MBSSID BIT(1)
+#define EN_TXBCN_RPT BIT(2)
+#define EN_BCN_FUNCTION BIT(3)
+#define DIS_TSF_UPDATE BIT(3)
+
+/* The same function but different bit field. */
+#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
+#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
+#define STOP_BCNQ BIT(6)
+
+/* 2 ACMHWCTRL */
+#define AcmHw_HwEn BIT(0)
+#define AcmHw_BeqEn BIT(1)
+#define AcmHw_ViqEn BIT(2)
+#define AcmHw_VoqEn BIT(3)
+#define AcmHw_BeqStatus BIT(4)
+#define AcmHw_ViqStatus BIT(5)
+#define AcmHw_VoqStatus BIT(6)
+
+/* 0x0600h ~ 0x07FFh WMAC Configuration */
+/* 2APSD_CTRL */
+#define APSDOFF BIT(6)
+#define APSDOFF_STATUS BIT(7)
+
+#define RATE_BITMAP_ALL 0xFFFFF
+
+/* Only use CCK 1M rate for ACK */
+#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
+
+/* 2 TCR */
+#define TSFRST BIT(0)
+#define DIS_GCLK BIT(1)
+#define PAD_SEL BIT(2)
+#define PWR_ST BIT(6)
+#define PWRBIT_OW_EN BIT(7)
+#define ACRC BIT(8)
+#define CFENDFORM BIT(9)
+#define ICV BIT(10)
+
+/* 2 RCR */
+#define AAP BIT(0)
+#define APM BIT(1)
+#define AM BIT(2)
+#define AB BIT(3)
+#define ADD3 BIT(4)
+#define APWRMGT BIT(5)
+#define CBSSID BIT(6)
+#define CBSSID_DATA BIT(6)
+#define CBSSID_BCN BIT(7)
+#define ACRC32 BIT(8)
+#define AICV BIT(9)
+#define ADF BIT(11)
+#define ACF BIT(12)
+#define AMF BIT(13)
+#define HTC_LOC_CTRL BIT(14)
+#define UC_DATA_EN BIT(16)
+#define BM_DATA_EN BIT(17)
+#define MFBEN BIT(22)
+#define LSIGEN BIT(23)
+#define EnMBID BIT(24)
+#define APP_BASSN BIT(27)
+#define APP_PHYSTS BIT(28)
+#define APP_ICV BIT(29)
+#define APP_MIC BIT(30)
+#define APP_FCS BIT(31)
+
+/* 2 SECCFG */
+#define SCR_TxUseDK BIT(0) /* Force Tx Use Default Key */
+#define SCR_RxUseDK BIT(1) /* Force Rx Use Default Key */
+#define SCR_TxEncEnable BIT(2) /* Enable Tx Encryption */
+#define SCR_RxDecEnable BIT(3) /* Enable Rx Decryption */
+#define SCR_SKByA2 BIT(4) /* Search kEY BY A2 */
+#define SCR_NoSKMC BIT(5) /* No Key Search Multicast */
+#define SCR_TXBCUSEDK BIT(6) /* Force Tx Bcast pkt Use Default Key */
+#define SCR_RXBCUSEDK BIT(7) /* Force Rx Bcast pkt Use Default Key */
+
+/* RTL8188E SDIO Configuration */
+
+/* I/O bus domain address mapping */
+#define SDIO_LOCAL_BASE 0x10250000
+#define WLAN_IOREG_BASE 0x10260000
+#define FIRMWARE_FIFO_BASE 0x10270000
+#define TX_HIQ_BASE 0x10310000
+#define TX_MIQ_BASE 0x10320000
+#define TX_LOQ_BASE 0x10330000
+#define RX_RX0FF_BASE 0x10340000
+
+/* SDIO host local register space mapping. */
+#define SDIO_LOCAL_MSK 0x0FFF
+#define WLAN_IOREG_MSK 0x7FFF
+#define WLAN_FIFO_MSK 0x1FFF /* Aggregation Length[12:0] */
+#define WLAN_RX0FF_MSK 0x0003
+
+/* Without ref to the SDIO Device ID */
+#define SDIO_WITHOUT_REF_DEVICE_ID 0
+#define SDIO_LOCAL_DEVICE_ID 0 /* 0b[16], 000b[15:13] */
+#define WLAN_TX_HIQ_DEVICE_ID 4 /* 0b[16], 100b[15:13] */
+#define WLAN_TX_MIQ_DEVICE_ID 5 /* 0b[16], 101b[15:13] */
+#define WLAN_TX_LOQ_DEVICE_ID 6 /* 0b[16], 110b[15:13] */
+#define WLAN_RX0FF_DEVICE_ID 7 /* 0b[16], 111b[15:13] */
+#define WLAN_IOREG_DEVICE_ID 8 /* 1b[16] */
+
+/* SDIO Tx Free Page Index */
+#define HI_QUEUE_IDX 0
+#define MID_QUEUE_IDX 1
+#define LOW_QUEUE_IDX 2
+#define PUBLIC_QUEUE_IDX 3
+
+#define SDIO_MAX_TX_QUEUE 3 /* HIQ, MIQ and LOQ */
+#define SDIO_MAX_RX_QUEUE 1
+
+/* SDIO Tx Control */
+#define SDIO_REG_TX_CTRL 0x0000
+/* SDIO Host Interrupt Mask */
+#define SDIO_REG_HIMR 0x0014
+/* SDIO Host Interrupt Service Routine */
+#define SDIO_REG_HISR 0x0018
+/* HCI Current Power Mode */
+#define SDIO_REG_HCPWM 0x0019
+/* RXDMA Request Length */
+#define SDIO_REG_RX0_REQ_LEN 0x001C
+/* Free Tx Buffer Page */
+#define SDIO_REG_FREE_TXPG 0x0020
+/* HCI Current Power Mode 1 */
+#define SDIO_REG_HCPWM1 0x0024
+/* HCI Current Power Mode 2 */
+#define SDIO_REG_HCPWM2 0x0026
+/* HTSF Informaion */
+#define SDIO_REG_HTSFR_INFO 0x0030
+/* HCI Request Power Mode 1 */
+#define SDIO_REG_HRPWM1 0x0080
+/* HCI Request Power Mode 2 */
+#define SDIO_REG_HRPWM2 0x0082
+/* HCI Power Save Clock */
+#define SDIO_REG_HPS_CLKR 0x0084
+/* SDIO HCI Suspend Control */
+#define SDIO_REG_HSUS_CTRL 0x0086
+/* SDIO Host Extension Interrupt Mask Always */
+#define SDIO_REG_HIMR_ON 0x0090
+/* SDIO Host Extension Interrupt Status Always */
+#define SDIO_REG_HISR_ON 0x0091
+
+#define SDIO_HIMR_DISABLED 0
+
+/* RTL8188E SDIO Host Interrupt Mask Register */
+#define SDIO_HIMR_RX_REQUEST_MSK BIT0
+#define SDIO_HIMR_AVAL_MSK BIT1
+#define SDIO_HIMR_TXERR_MSK BIT2
+#define SDIO_HIMR_RXERR_MSK BIT3
+#define SDIO_HIMR_TXFOVW_MSK BIT4
+#define SDIO_HIMR_RXFOVW_MSK BIT5
+#define SDIO_HIMR_TXBCNOK_MSK BIT6
+#define SDIO_HIMR_TXBCNERR_MSK BIT7
+#define SDIO_HIMR_BCNERLY_INT_MSK BIT16
+#define SDIO_HIMR_C2HCMD_MSK BIT17
+#define SDIO_HIMR_CPWM1_MSK BIT18
+#define SDIO_HIMR_CPWM2_MSK BIT19
+#define SDIO_HIMR_HSISR_IND_MSK BIT20
+#define SDIO_HIMR_GTINT3_IND_MSK BIT21
+#define SDIO_HIMR_GTINT4_IND_MSK BIT22
+#define SDIO_HIMR_PSTIMEOUT_MSK BIT23
+#define SDIO_HIMR_OCPINT_MSK BIT24
+#define SDIO_HIMR_ATIMEND_MSK BIT25
+#define SDIO_HIMR_ATIMEND_E_MSK BIT26
+#define SDIO_HIMR_CTWEND_MSK BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HIMR_MCU_ERR_MSK BIT28
+#define SDIO_HIMR_TSF_BIT32_TOGGLE_MSK BIT29
+
+/* SDIO Host Interrupt Service Routine */
+#define SDIO_HISR_RX_REQUEST BIT0
+#define SDIO_HISR_AVAL BIT1
+#define SDIO_HISR_TXERR BIT2
+#define SDIO_HISR_RXERR BIT3
+#define SDIO_HISR_TXFOVW BIT4
+#define SDIO_HISR_RXFOVW BIT5
+#define SDIO_HISR_TXBCNOK BIT6
+#define SDIO_HISR_TXBCNERR BIT7
+#define SDIO_HISR_BCNERLY_INT BIT16
+#define SDIO_HISR_C2HCMD BIT17
+#define SDIO_HISR_CPWM1 BIT18
+#define SDIO_HISR_CPWM2 BIT19
+#define SDIO_HISR_HSISR_IND BIT20
+#define SDIO_HISR_GTINT3_IND BIT21
+#define SDIO_HISR_GTINT4_IND BIT22
+#define SDIO_HISR_PSTIME BIT23
+#define SDIO_HISR_OCPINT BIT24
+#define SDIO_HISR_ATIMEND BIT25
+#define SDIO_HISR_ATIMEND_E BIT26
+#define SDIO_HISR_CTWEND BIT27
+
+/* RTL8188E SDIO Specific */
+#define SDIO_HISR_MCU_ERR BIT28
+#define SDIO_HISR_TSF_BIT32_TOGGLE BIT29
+
+#define MASK_SDIO_HISR_CLEAR \
+ (SDIO_HISR_TXERR | SDIO_HISR_RXERR | SDIO_HISR_TXFOVW |\
+ SDIO_HISR_RXFOVW | SDIO_HISR_TXBCNOK | SDIO_HISR_TXBCNERR |\
+ SDIO_HISR_C2HCMD | SDIO_HISR_CPWM1 | SDIO_HISR_CPWM2 |\
+ SDIO_HISR_HSISR_IND | SDIO_HISR_GTINT3_IND | SDIO_HISR_GTINT4_IND |\
+ SDIO_HISR_PSTIMEOUT | SDIO_HISR_OCPINT)
+
+/* SDIO HCI Suspend Control Register */
+#define HCI_RESUME_PWR_RDY BIT1
+#define HCI_SUS_CTRL BIT0
+
+/* SDIO Tx FIFO related */
+/* The number of Tx FIFO free page */
+#define SDIO_TX_FREE_PG_QUEUE 4
+#define SDIO_TX_FIFO_PAGE_SZ 128
+
+/* 0xFE00h ~ 0xFE55h USB Configuration */
+
+/* 2 USB Information (0xFE17) */
+#define USB_IS_HIGH_SPEED 0
+#define USB_IS_FULL_SPEED 1
+#define USB_SPEED_MASK BIT(5)
+
+#define USB_NORMAL_SIE_EP_MASK 0xF
+#define USB_NORMAL_SIE_EP_SHIFT 4
+
+/* 2 Special Option */
+#define USB_AGG_EN BIT(3)
+
+/* 0; Use interrupt endpoint to upload interrupt pkt */
+/* 1; Use bulk endpoint to upload interrupt pkt, */
+#define INT_BULK_SEL BIT(4)
+
+/* 2REG_C2HEVT_CLEAR */
+/* Set by driver and notify FW that the driver has read
+ * the C2H command message */
+#define C2H_EVT_HOST_CLOSE 0x00
+/* Set by FW indicating that FW had set the C2H command
+ * message and it's not yet read by driver. */
+#define C2H_EVT_FW_CLOSE 0xFF
+
+/* 2REG_MULTI_FUNC_CTRL(For RTL8723 Only) */
+/* Enable GPIO[9] as WiFi HW PDn source */
+#define WL_HWPDN_EN BIT0
+/* WiFi HW PDn polarity control */
+#define WL_HWPDN_SL BIT1
+/* WiFi function enable */
+#define WL_FUNC_EN BIT2
+/* Enable GPIO[9] as WiFi RF HW PDn source */
+#define WL_HWROF_EN BIT3
+/* Enable GPIO[11] as BT HW PDn source */
+#define BT_HWPDN_EN BIT16
+/* BT HW PDn polarity control */
+#define BT_HWPDN_SL BIT17
+/* BT function enable */
+#define BT_FUNC_EN BIT18
+/* Enable GPIO[11] as BT/GPS RF HW PDn source */
+#define BT_HWROF_EN BIT19
+/* Enable GPIO[10] as GPS HW PDn source */
+#define GPS_HWPDN_EN BIT20
+/* GPS HW PDn polarity control */
+#define GPS_HWPDN_SL BIT21
+/* GPS function enable */
+#define GPS_FUNC_EN BIT22
+
+/* 3 REG_LIFECTRL_CTRL */
+#define HAL92C_EN_PKT_LIFE_TIME_BK BIT3
+#define HAL92C_EN_PKT_LIFE_TIME_BE BIT2
+#define HAL92C_EN_PKT_LIFE_TIME_VI BIT1
+#define HAL92C_EN_PKT_LIFE_TIME_VO BIT0
+
+#define HAL92C_MSDU_LIFE_TIME_UNIT 128 /* in us */
+
+/* General definitions */
+#define LAST_ENTRY_OF_TX_PKT_BUFFER 176 /* 22k 22528 bytes */
+
+#define POLLING_LLT_THRESHOLD 20
+#define POLLING_READY_TIMEOUT_COUNT 1000
+/* GPIO BIT */
+#define HAL_8192C_HW_GPIO_WPS_BIT BIT2
+
+/* 8192C EEPROM/EFUSE share register definition. */
+
+/* EEPROM/Efuse PG Offset for 88EE/88EU/88ES */
+#define EEPROM_TX_PWR_INX_88E 0x10
+
+#define EEPROM_ChannelPlan_88E 0xB8
+#define EEPROM_XTAL_88E 0xB9
+#define EEPROM_THERMAL_METER_88E 0xBA
+#define EEPROM_IQK_LCK_88E 0xBB
+
+#define EEPROM_RF_BOARD_OPTION_88E 0xC1
+#define EEPROM_RF_FEATURE_OPTION_88E 0xC2
+#define EEPROM_RF_BT_SETTING_88E 0xC3
+#define EEPROM_VERSION_88E 0xC4
+#define EEPROM_CUSTOMERID_88E 0xC5
+#define EEPROM_RF_ANTENNA_OPT_88E 0xC9
+
+/* RTL88EE */
+#define EEPROM_MAC_ADDR_88EE 0xD0
+#define EEPROM_VID_88EE 0xD6
+#define EEPROM_DID_88EE 0xD8
+#define EEPROM_SVID_88EE 0xDA
+#define EEPROM_SMID_88EE 0xDC
+
+/* RTL88EU */
+#define EEPROM_MAC_ADDR_88EU 0xD7
+#define EEPROM_VID_88EU 0xD0
+#define EEPROM_PID_88EU 0xD2
+#define EEPROM_USB_OPTIONAL_FUNCTION0 0xD4
+
+/* RTL88ES */
+#define EEPROM_MAC_ADDR_88ES 0x11A
+
+/* EEPROM/Efuse Value Type */
+#define EETYPE_TX_PWR 0x0
+
+/* Default Value for EEPROM or EFUSE!!! */
+#define EEPROM_Default_TSSI 0x0
+#define EEPROM_Default_TxPowerDiff 0x0
+#define EEPROM_Default_CrystalCap 0x5
+/* Default: 2X2, RTL8192CE(QFPN68) */
+#define EEPROM_Default_BoardType 0x02
+#define EEPROM_Default_TxPower 0x1010
+#define EEPROM_Default_HT2T_TxPwr 0x10
+
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_ThermalMeter 0x12
+
+#define EEPROM_Default_AntTxPowerDiff 0x0
+#define EEPROM_Default_TxPwDiff_CrystalCap 0x5
+#define EEPROM_Default_TxPowerLevel 0x2A
+
+#define EEPROM_Default_HT40_2SDiff 0x0
+/* HT20<->40 default Tx Power Index Difference */
+#define EEPROM_Default_HT20_Diff 2
+#define EEPROM_Default_LegacyHTTxPowerDiff 0x3
+#define EEPROM_Default_HT40_PwrMaxOffset 0
+#define EEPROM_Default_HT20_PwrMaxOffset 0
+
+#define EEPROM_Default_CrystalCap_88E 0x20
+#define EEPROM_Default_ThermalMeter_88E 0x18
+
+/* New EFUSE deafult value */
+#define EEPROM_DEFAULT_24G_INDEX 0x2D
+#define EEPROM_DEFAULT_24G_HT20_DIFF 0X02
+#define EEPROM_DEFAULT_24G_OFDM_DIFF 0X04
+
+#define EEPROM_DEFAULT_5G_INDEX 0X2A
+#define EEPROM_DEFAULT_5G_HT20_DIFF 0X00
+#define EEPROM_DEFAULT_5G_OFDM_DIFF 0X04
+
+#define EEPROM_DEFAULT_DIFF 0XFE
+#define EEPROM_DEFAULT_CHANNEL_PLAN 0x7F
+#define EEPROM_DEFAULT_BOARD_OPTION 0x00
+#define EEPROM_DEFAULT_FEATURE_OPTION 0x00
+#define EEPROM_DEFAULT_BT_OPTION 0x10
+
+/* For debug */
+#define EEPROM_Default_PID 0x1234
+#define EEPROM_Default_VID 0x5678
+#define EEPROM_Default_CustomerID 0xAB
+#define EEPROM_Default_CustomerID_8188E 0x00
+#define EEPROM_Default_SubCustomerID 0xCD
+#define EEPROM_Default_Version 0
+
+#define EEPROM_CHANNEL_PLAN_FCC 0x0
+#define EEPROM_CHANNEL_PLAN_IC 0x1
+#define EEPROM_CHANNEL_PLAN_ETSI 0x2
+#define EEPROM_CHANNEL_PLAN_SPA 0x3
+#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
+#define EEPROM_CHANNEL_PLAN_MKK 0x5
+#define EEPROM_CHANNEL_PLAN_MKK1 0x6
+#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
+#define EEPROM_CHANNEL_PLAN_TELEC 0x8
+#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMA 0x9
+#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
+#define EEPROM_CHANNEL_PLAN_NCC 0xB
+#define EEPROM_USB_OPTIONAL1 0xE
+#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_TOSHIBA 0x4
+#define EEPROM_CID_CCX 0x10 /* CCX test. */
+#define EEPROM_CID_QMI 0x0D
+#define EEPROM_CID_WHQL 0xFE
+#define RTL_EEPROM_ID 0x8129
+
+#endif /* __RTL8188E_SPEC_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h b/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h
new file mode 100644
index 00000000000..a29e6951979
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_sreset.h
@@ -0,0 +1,31 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTL8188E_SRESET_H_
+#define _RTL8188E_SRESET_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <rtw_sreset.h>
+
+void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter);
+void rtl8188e_sreset_xmit_status_check(struct adapter *padapter);
+void rtl8188e_sreset_linked_status_check(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
new file mode 100644
index 00000000000..cf7267a5365
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -0,0 +1,178 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTL8188E_XMIT_H__
+#define __RTL8188E_XMIT_H__
+
+#define MAX_TX_AGG_PACKET_NUMBER 0xFF
+/* */
+/* Queue Select Value in TxDesc */
+/* */
+#define QSLT_BK 0x2/* 0x01 */
+#define QSLT_BE 0x0
+#define QSLT_VI 0x5/* 0x4 */
+#define QSLT_VO 0x7/* 0x6 */
+#define QSLT_BEACON 0x10
+#define QSLT_HIGH 0x11
+#define QSLT_MGNT 0x12
+#define QSLT_CMD 0x13
+
+/* For 88e early mode */
+#define SET_EARLYMODE_PKTNUM(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 0, 3, __Value)
+#define SET_EARLYMODE_LEN0(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 4, 12, __Value)
+#define SET_EARLYMODE_LEN1(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 16, 12, __Value)
+#define SET_EARLYMODE_LEN2_1(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr, 28, 4, __Value)
+#define SET_EARLYMODE_LEN2_2(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 0, 8, __Value)
+#define SET_EARLYMODE_LEN3(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 8, 12, __Value)
+#define SET_EARLYMODE_LEN4(__pAddr, __Value) \
+ SET_BITS_TO_LE_4BYTE(__pAddr+4, 20, 12, __Value)
+
+/* */
+/* defined for TX DESC Operation */
+/* */
+
+#define MAX_TID (15)
+
+/* OFFSET 0 */
+#define OFFSET_SZ 0
+#define OFFSET_SHT 16
+#define BMC BIT(24)
+#define LSG BIT(26)
+#define FSG BIT(27)
+#define OWN BIT(31)
+
+
+/* OFFSET 4 */
+#define PKT_OFFSET_SZ 0
+#define QSEL_SHT 8
+#define RATE_ID_SHT 16
+#define NAVUSEHDR BIT(20)
+#define SEC_TYPE_SHT 22
+#define PKT_OFFSET_SHT 26
+
+/* OFFSET 8 */
+#define AGG_EN BIT(12)
+#define AGG_BK BIT(16)
+#define AMPDU_DENSITY_SHT 20
+#define ANTSEL_A BIT(24)
+#define ANTSEL_B BIT(25)
+#define TX_ANT_CCK_SHT 26
+#define TX_ANTL_SHT 28
+#define TX_ANT_HT_SHT 30
+
+/* OFFSET 12 */
+#define SEQ_SHT 16
+#define EN_HWSEQ BIT(31)
+
+/* OFFSET 16 */
+#define QOS BIT(6)
+#define HW_SSN BIT(7)
+#define USERATE BIT(8)
+#define DISDATAFB BIT(10)
+#define CTS_2_SELF BIT(11)
+#define RTS_EN BIT(12)
+#define HW_RTS_EN BIT(13)
+#define DATA_SHORT BIT(24)
+#define PWR_STATUS_SHT 15
+#define DATA_SC_SHT 20
+#define DATA_BW BIT(25)
+
+/* OFFSET 20 */
+#define RTY_LMT_EN BIT(17)
+
+enum TXDESC_SC {
+ SC_DONT_CARE = 0x00,
+ SC_UPPER = 0x01,
+ SC_LOWER = 0x02,
+ SC_DUPLICATE = 0x03
+};
+/* OFFSET 20 */
+#define SGI BIT(6)
+#define USB_TXAGG_NUM_SHT 24
+
+#define txdesc_set_ccx_sw_88e(txdesc, value) \
+ do { \
+ ((struct txdesc_88e *)(txdesc))->sw1 = (((value)>>8) & 0x0f); \
+ ((struct txdesc_88e *)(txdesc))->sw0 = ((value) & 0xff); \
+ } while (0)
+
+struct txrpt_ccx_88e {
+ /* offset 0 */
+ u8 tag1:1;
+ u8 pkt_num:3;
+ u8 txdma_underflow:1;
+ u8 int_bt:1;
+ u8 int_tri:1;
+ u8 int_ccx:1;
+
+ /* offset 1 */
+ u8 mac_id:6;
+ u8 pkt_ok:1;
+ u8 bmc:1;
+
+ /* offset 2 */
+ u8 retry_cnt:6;
+ u8 lifetime_over:1;
+ u8 retry_over:1;
+
+ /* offset 3 */
+ u8 ccx_qtime0;
+ u8 ccx_qtime1;
+
+ /* offset 5 */
+ u8 final_data_rate;
+
+ /* offset 6 */
+ u8 sw1:4;
+ u8 qsel:4;
+
+ /* offset 7 */
+ u8 sw0;
+};
+
+#define txrpt_ccx_sw_88e(txrpt_ccx) ((txrpt_ccx)->sw0 + ((txrpt_ccx)->sw1<<8))
+#define txrpt_ccx_qtime_88e(txrpt_ccx) \
+ ((txrpt_ccx)->ccx_qtime0+((txrpt_ccx)->ccx_qtime1<<8))
+
+void rtl8188e_fill_fake_txdesc(struct adapter *padapter, u8 *pDesc,
+ u32 BufferLen, u8 IsPsPoll, u8 IsBTQosNull);
+s32 rtl8188eu_init_xmit_priv(struct adapter *padapter);
+void rtl8188eu_free_xmit_priv(struct adapter *padapter);
+s32 rtl8188eu_hal_xmit(struct adapter *padapter, struct xmit_frame *frame);
+s32 rtl8188eu_mgnt_xmit(struct adapter *padapter, struct xmit_frame *frame);
+s32 rtl8188eu_xmit_buf_handler(struct adapter *padapter);
+#define hal_xmit_handler rtl8188eu_xmit_buf_handler
+void rtl8188eu_xmit_tasklet(void *priv);
+s32 rtl8188eu_xmitframe_complete(struct adapter *padapter,
+ struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+
+void dump_txrpt_ccx_88e(void *buf);
+void handle_txrpt_ccx_88e(struct adapter *adapter, u8 *buf);
+
+void _dbg_dump_tx_info(struct adapter *padapter, int frame_tag,
+ struct tx_desc *ptxdesc);
+
+#endif /* __RTL8188E_XMIT_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
new file mode 100644
index 00000000000..e85bf1ff01f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -0,0 +1,64 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef __RTW_ANDROID_H__
+#define __RTW_ANDROID_H__
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+enum ANDROID_WIFI_CMD {
+ ANDROID_WIFI_CMD_START,
+ ANDROID_WIFI_CMD_STOP,
+ ANDROID_WIFI_CMD_SCAN_ACTIVE,
+ ANDROID_WIFI_CMD_SCAN_PASSIVE,
+ ANDROID_WIFI_CMD_RSSI,
+ ANDROID_WIFI_CMD_LINKSPEED,
+ ANDROID_WIFI_CMD_RXFILTER_START,
+ ANDROID_WIFI_CMD_RXFILTER_STOP,
+ ANDROID_WIFI_CMD_RXFILTER_ADD,
+ ANDROID_WIFI_CMD_RXFILTER_REMOVE,
+ ANDROID_WIFI_CMD_BTCOEXSCAN_START,
+ ANDROID_WIFI_CMD_BTCOEXSCAN_STOP,
+ ANDROID_WIFI_CMD_BTCOEXMODE,
+ ANDROID_WIFI_CMD_SETSUSPENDOPT,
+ ANDROID_WIFI_CMD_P2P_DEV_ADDR,
+ ANDROID_WIFI_CMD_SETFWPATH,
+ ANDROID_WIFI_CMD_SETBAND,
+ ANDROID_WIFI_CMD_GETBAND,
+ ANDROID_WIFI_CMD_COUNTRY,
+ ANDROID_WIFI_CMD_P2P_SET_NOA,
+ ANDROID_WIFI_CMD_P2P_GET_NOA,
+ ANDROID_WIFI_CMD_P2P_SET_PS,
+ ANDROID_WIFI_CMD_SET_AP_WPS_P2P_IE,
+ ANDROID_WIFI_CMD_MACADDR,
+ ANDROID_WIFI_CMD_BLOCK,
+ ANDROID_WIFI_CMD_WFD_ENABLE,
+ ANDROID_WIFI_CMD_WFD_DISABLE,
+ ANDROID_WIFI_CMD_WFD_SET_TCPPORT,
+ ANDROID_WIFI_CMD_WFD_SET_MAX_TPUT,
+ ANDROID_WIFI_CMD_WFD_SET_DEVTYPE,
+ ANDROID_WIFI_CMD_MAX
+};
+
+int rtw_android_cmdstr_to_num(char *cmdstr);
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd);
+
+#endif /* __RTW_ANDROID_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
new file mode 100644
index 00000000000..92334015979
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -0,0 +1,65 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_AP_H_
+#define __RTW_AP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#ifdef CONFIG_88EU_AP_MODE
+
+/* external function */
+void rtw_indicate_sta_assoc_event(struct adapter *padapter,
+ struct sta_info *psta);
+void rtw_indicate_sta_disassoc_event(struct adapter *padapter,
+ struct sta_info *psta);
+void init_mlme_ap_info(struct adapter *padapter);
+void free_mlme_ap_info(struct adapter *padapter);
+void rtw_add_bcn_ie(struct adapter *padapter, struct wlan_bssid_ex *pnetwork,
+ u8 index, u8 *data, u8 len);
+void rtw_remove_bcn_ie(struct adapter *padapter,
+ struct wlan_bssid_ex *pnetwork, u8 index);
+void update_beacon(struct adapter *padapter, u8 ie_id,
+ u8 *oui, u8 tx);
+void add_RATid(struct adapter *padapter, struct sta_info *psta,
+ u8 rssi_level);
+void expire_timeout_chk(struct adapter *padapter);
+void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
+int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len);
+void rtw_set_macaddr_acl(struct adapter *padapter, int mode);
+int rtw_acl_add_sta(struct adapter *padapter, u8 *addr);
+int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr);
+
+#ifdef CONFIG_88EU_AP_MODE
+void associated_clients_update(struct adapter *padapter, u8 updated);
+void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta);
+u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta);
+void sta_info_update(struct adapter *padapter, struct sta_info *psta);
+void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta);
+u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
+ bool active, u16 reason);
+int rtw_sta_flush(struct adapter *padapter);
+int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset);
+void start_ap_mode(struct adapter *padapter);
+void stop_ap_mode(struct adapter *padapter);
+#endif
+#endif /* end of CONFIG_88EU_AP_MODE */
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_br_ext.h b/drivers/staging/rtl8188eu/include/rtw_br_ext.h
new file mode 100644
index 00000000000..f21e7a4515d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_br_ext.h
@@ -0,0 +1,66 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_BR_EXT_H_
+#define _RTW_BR_EXT_H_
+
+#define MACADDRLEN 6
+#define _DEBUG_ERR DBG_88E
+#define _DEBUG_INFO DBG_88E
+#define DEBUG_WARN DBG_88E
+#define DEBUG_INFO DBG_88E
+#define DEBUG_ERR DBG_88E
+#define GET_MY_HWADDR(padapter) ((padapter)->eeprompriv.mac_addr)
+
+#define NAT25_HASH_BITS 4
+#define NAT25_HASH_SIZE (1 << NAT25_HASH_BITS)
+#define NAT25_AGEING_TIME 300
+
+#define MAX_NETWORK_ADDR_LEN 17
+
+struct nat25_network_db_entry {
+ struct nat25_network_db_entry *next_hash;
+ struct nat25_network_db_entry **pprev_hash;
+ atomic_t use_count;
+ unsigned char macAddr[6];
+ unsigned long ageing_timer;
+ unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
+};
+
+enum NAT25_METHOD {
+ NAT25_MIN,
+ NAT25_CHECK,
+ NAT25_INSERT,
+ NAT25_LOOKUP,
+ NAT25_PARSE,
+ NAT25_MAX
+};
+
+struct br_ext_info {
+ unsigned int nat25_disable;
+ unsigned int macclone_enable;
+ unsigned int dhcp_bcst_disable;
+ int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
+ unsigned char nat25_dmzMac[MACADDRLEN];
+ unsigned int nat25sc_disable;
+};
+
+void nat25_db_cleanup(struct adapter *priv);
+
+#endif /* _RTW_BR_EXT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
new file mode 100644
index 00000000000..819285b9a78
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -0,0 +1,991 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_CMD_H_
+#define __RTW_CMD_H_
+
+#include <wlan_bssdef.h>
+#include <rtw_rf.h>
+#include <rtw_led.h>
+
+#define C2H_MEM_SZ (16*1024)
+
+#include <osdep_service.h>
+#include <ieee80211.h> /* <ieee80211/ieee80211.h> */
+
+#define FREE_CMDOBJ_SZ 128
+
+#define MAX_CMDSZ 1024
+#define MAX_RSPSZ 512
+#define MAX_EVTSZ 1024
+
+#define CMDBUFF_ALIGN_SZ 512
+
+struct cmd_obj {
+ struct adapter *padapter;
+ u16 cmdcode;
+ u8 res;
+ u8 *parmbuf;
+ u32 cmdsz;
+ u8 *rsp;
+ u32 rspsz;
+ struct list_head list;
+};
+
+struct cmd_priv {
+ struct semaphore cmd_queue_sema;
+ struct semaphore terminate_cmdthread_sema;
+ struct __queue cmd_queue;
+ u8 cmd_seq;
+ u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *cmd_allocated_buf;
+ u8 *rsp_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *rsp_allocated_buf;
+ u32 cmd_issued_cnt;
+ u32 cmd_done_cnt;
+ u32 rsp_cnt;
+ u8 cmdthd_running;
+ struct adapter *padapter;
+};
+
+struct evt_priv {
+ struct work_struct c2h_wk;
+ bool c2h_wk_alive;
+ struct rtw_cbuf *c2h_queue;
+ #define C2H_QUEUE_MAX_LEN 10
+ ATOMIC_T event_seq;
+ u8 *evt_buf; /* shall be non-paged, and 4 bytes aligned */
+ u8 *evt_allocated_buf;
+ u32 evt_done_cnt;
+};
+
+#define init_h2fwcmd_w_parm_no_rsp(pcmd, pparm, code) \
+do {\
+ _rtw_init_listhead(&pcmd->list);\
+ pcmd->cmdcode = code;\
+ pcmd->parmbuf = (u8 *)(pparm);\
+ pcmd->cmdsz = sizeof(*pparm);\
+ pcmd->rsp = NULL;\
+ pcmd->rspsz = 0;\
+} while (0)
+
+struct c2h_evt_hdr {
+ u8 id:4;
+ u8 plen:4;
+ u8 seq;
+ u8 payload[0];
+};
+
+#define c2h_evt_exist(c2h_evt) ((c2h_evt)->id || (c2h_evt)->plen)
+
+u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *obj);
+struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv);
+void rtw_free_cmd_obj(struct cmd_obj *pcmd);
+
+int rtw_cmd_thread(void *context);
+
+u32 rtw_init_cmd_priv(struct cmd_priv *pcmdpriv);
+void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
+
+u32 rtw_init_evt_priv(struct evt_priv *pevtpriv);
+void rtw_free_evt_priv(struct evt_priv *pevtpriv);
+void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv);
+void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
+#ifdef CONFIG_88EU_P2P
+u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType);
+#endif /* CONFIG_88EU_P2P */
+
+enum rtw_drvextra_cmd_id {
+ NONE_WK_CID,
+ DYNAMIC_CHK_WK_CID,
+ DM_CTRL_WK_CID,
+ PBC_POLLING_WK_CID,
+ POWER_SAVING_CTRL_WK_CID,/* IPS,AUTOSuspend */
+ LPS_CTRL_WK_CID,
+ ANT_SELECT_WK_CID,
+ P2P_PS_WK_CID,
+ P2P_PROTO_WK_CID,
+ CHECK_HIQ_WK_CID,/* for softap mode, check hi queue if empty */
+ INTEl_WIDI_WK_CID,
+ C2H_WK_CID,
+ RTP_TIMER_CFG_WK_CID,
+ MAX_WK_CID
+};
+
+enum LPS_CTRL_TYPE {
+ LPS_CTRL_SCAN = 0,
+ LPS_CTRL_JOINBSS = 1,
+ LPS_CTRL_CONNECT = 2,
+ LPS_CTRL_DISCONNECT = 3,
+ LPS_CTRL_SPECIAL_PACKET = 4,
+ LPS_CTRL_LEAVE = 5,
+};
+
+enum RFINTFS {
+ SWSI,
+ HWSI,
+ HWPI,
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To enter USB suspend mode
+
+Command Mode
+
+*/
+struct usb_suspend_parm {
+ u32 action;/* 1: sleep, 0:resume */
+};
+
+/*
+Caller Mode: Infra, Ad-HoC
+
+Notes: To join a known BSS.
+
+Command-Event Mode
+
+*/
+
+/*
+Caller Mode: Infra, Ad-Hoc
+
+Notes: To join the specified bss
+
+Command Event Mode
+
+*/
+struct joinbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+/*
+Caller Mode: Infra, Ad-HoC(C)
+
+Notes: To disconnect the current associated BSS
+
+Command Mode
+
+*/
+struct disconnect_parm {
+ u32 deauth_timeout_ms;
+};
+
+/*
+Caller Mode: AP, Ad-HoC(M)
+
+Notes: To create a BSS
+
+Command Mode
+*/
+struct createbss_parm {
+ struct wlan_bssid_ex network;
+};
+
+struct setopmode_parm {
+ u8 mode;
+ u8 rsvd[3];
+};
+
+/*
+Caller Mode: AP, Ad-HoC, Infra
+
+Notes: To ask RTL8711 performing site-survey
+
+Command-Event Mode
+
+*/
+
+#define RTW_SSID_SCAN_AMOUNT 9 /* for WEXT_CSCAN_AMOUNT 9 */
+#define RTW_CHANNEL_SCAN_AMOUNT (14+37)
+struct sitesurvey_parm {
+ int scan_mode; /* active: 1, passive: 0 */
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the auth type of RTL8711. open/shared/802.1x
+
+Command Mode
+
+*/
+struct setauth_parm {
+ u8 mode; /* 0: legacy open, 1: legacy shared 2: 802.1x */
+ u8 _1x; /* 0: PSK, 1: TLS */
+ u8 rsvd[2];
+};
+
+/*
+Caller Mode: Infra
+
+a. algorithm: wep40, wep104, tkip & aes
+b. keytype: grp key/unicast key
+c. key contents
+
+when shared key ==> keyid is the camid
+when 802.1x ==> keyid [0:1] ==> grp key
+when 802.1x ==> keyid > 2 ==> unicast key
+
+*/
+struct setkey_parm {
+ u8 algorithm; /* could be none, wep40, TKIP, CCMP, wep104 */
+ u8 keyid;
+ u8 grpkey; /* 1: this is the grpkey for 802.1x.
+ * 0: this is the unicast key for 802.1x */
+ u8 set_tx; /* 1: main tx key for wep. 0: other key. */
+ u8 key[16]; /* this could be 40 or 104 */
+};
+
+/*
+When in AP or Ad-Hoc mode, this is used to
+allocate an sw/hw entry for a newly associated sta.
+
+Command
+
+when shared key ==> algorithm/keyid
+
+*/
+struct set_stakey_parm {
+ u8 addr[ETH_ALEN];
+ u8 algorithm;
+ u8 id;/* currently for erasing cam entry if
+ * algorithm == _NO_PRIVACY_ */
+ u8 key[16];
+};
+
+struct set_stakey_rsp {
+ u8 addr[ETH_ALEN];
+ u8 keyid;
+ u8 rsvd;
+};
+
+/*
+Caller Ad-Hoc/AP
+
+Command -Rsp(AID == CAMID) mode
+
+This is to force fw to add an sta_data entry per driver's request.
+
+FW will write an cam entry associated with it.
+
+*/
+struct set_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+struct set_assocsta_rsp {
+ u8 cam_id;
+ u8 rsvd[3];
+};
+
+/*
+ Caller Ad-Hoc/AP
+
+ Command mode
+
+ This is to force fw to del an sta_data entry per driver's request
+
+ FW will invalidate the cam entry associated with it.
+
+*/
+struct del_assocsta_parm {
+ u8 addr[ETH_ALEN];
+};
+
+/*
+Caller Mode: AP/Ad-HoC(M)
+
+Notes: To notify fw that given staid has changed its power state
+
+Command Mode
+
+*/
+struct setstapwrstate_parm {
+ u8 staid;
+ u8 status;
+ u8 hwaddr[6];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the basic rate of RTL8711
+
+Command Mode
+
+*/
+struct setbasicrate_parm {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current basic rate
+
+Command-Rsp Mode
+
+*/
+struct getbasicrate_parm {
+ u32 rsvd;
+};
+
+struct getbasicrate_rsp {
+ u8 basicrates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To setup the data rate of RTL8711
+
+Command Mode
+
+*/
+struct setdatarate_parm {
+ u8 mac_id;
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+
+Notes: To read the current data rate
+
+Command-Rsp Mode
+
+*/
+struct getdatarate_parm {
+ u32 rsvd;
+
+};
+struct getdatarate_rsp {
+ u8 datarates[NumRates];
+};
+
+/*
+Caller Mode: Any
+AP: AP can use the info for the contents of beacon frame
+Infra: STA can use the info when sitesurveying
+Ad-HoC(M): Like AP
+Ad-HoC(C): Like STA
+
+Notes: To set the phy capability of the NIC
+
+Command Mode
+
+*/
+
+struct setphyinfo_parm {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+struct getphyinfo_parm {
+ u32 rsvd;
+};
+
+struct getphyinfo_rsp {
+ struct regulatory_class class_sets[NUM_REGULATORYS];
+ u8 status;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To set the channel/modem/band
+This command will be used when channel/modem/band is changed.
+
+Command Mode
+
+*/
+struct setphy_parm {
+ u8 rfchannel;
+ u8 modem;
+};
+
+/*
+Caller Mode: Any
+
+Notes: To get the current setting of channel/modem/band
+
+Command-Rsp Mode
+
+*/
+struct getphy_parm {
+ u32 rsvd;
+
+};
+struct getphy_rsp {
+ u8 rfchannel;
+ u8 modem;
+};
+
+struct readBB_parm {
+ u8 offset;
+};
+struct readBB_rsp {
+ u8 value;
+};
+
+struct readTSSI_parm {
+ u8 offset;
+};
+struct readTSSI_rsp {
+ u8 value;
+};
+
+struct writeBB_parm {
+ u8 offset;
+ u8 value;
+};
+
+struct readRF_parm {
+ u8 offset;
+};
+struct readRF_rsp {
+ u32 value;
+};
+
+struct writeRF_parm {
+ u32 offset;
+ u32 value;
+};
+
+struct getrfintfs_parm {
+ u8 rfintfs;
+};
+
+struct Tx_Beacon_param
+{
+ struct wlan_bssid_ex network;
+};
+
+/*
+ Notes: This command is used for H2C/C2H loopback testing
+
+ mac[0] == 0
+ ==> CMD mode, return H2C_SUCCESS.
+ The following condition must be ture under CMD mode
+ mac[1] == mac[4], mac[2] == mac[3], mac[0]=mac[5]= 0;
+ s0 == 0x1234, s1 == 0xabcd, w0 == 0x78563412, w1 == 0x5aa5def7;
+ s2 == (b1 << 8 | b0);
+
+ mac[0] == 1
+ ==> CMD_RSP mode, return H2C_SUCCESS_RSP
+
+ The rsp layout shall be:
+ rsp: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = mac[3];
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = s1;
+ s1 = swap16(s0);
+ w0 = swap32(w1);
+ b0 = b1
+ s2 = s0 + s1
+ b1 = b0
+ w1 = w0
+
+ mac[0] == 2
+ ==> CMD_EVENT mode, return H2C_SUCCESS
+ The event layout shall be:
+ event: parm:
+ mac[0] = mac[5];
+ mac[1] = mac[4];
+ mac[2] = event's seq no, starting from 1 to parm's marc[3]
+ mac[3] = mac[2];
+ mac[4] = mac[1];
+ mac[5] = mac[0];
+ s0 = swap16(s0) - event.mac[2];
+ s1 = s1 + event.mac[2];
+ w0 = swap32(w0);
+ b0 = b1
+ s2 = s0 + event.mac[2]
+ b1 = b0
+ w1 = swap32(w1) - event.mac[2];
+
+ parm->mac[3] is the total event counts that host requested.
+ event will be the same with the cmd's param.
+*/
+
+/* CMD param Format for driver extra cmd handler */
+struct drvextra_cmd_parm {
+ int ec_id; /* extra cmd id */
+ int type_size; /* Can use this field as the type id or command size */
+ unsigned char *pbuf;
+};
+
+/*------------------- Below are used for RF/BB tunning ---------------------*/
+
+struct setantenna_parm {
+ u8 tx_antset;
+ u8 rx_antset;
+ u8 tx_antenna;
+ u8 rx_antenna;
+};
+
+struct enrateadaptive_parm {
+ u32 en;
+};
+
+struct settxagctbl_parm {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct gettxagctbl_parm {
+ u32 rsvd;
+};
+struct gettxagctbl_rsp {
+ u32 txagc[MAX_RATES_LENGTH];
+};
+
+struct setagcctrl_parm {
+ u32 agcctrl; /* 0: pure hw, 1: fw */
+};
+
+struct setssup_parm {
+ u32 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct getssup_parm {
+ u32 rsvd;
+};
+
+struct getssup_rsp {
+ u8 ss_ForceUp[MAX_RATES_LENGTH];
+};
+
+struct setssdlevel_parm {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct getssdlevel_parm {
+ u32 rsvd;
+};
+
+struct getssdlevel_rsp {
+ u8 ss_DLevel[MAX_RATES_LENGTH];
+};
+
+struct setssulevel_parm {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct getssulevel_parm {
+ u32 rsvd;
+};
+
+struct getssulevel_rsp {
+ u8 ss_ULevel[MAX_RATES_LENGTH];
+};
+
+struct setcountjudge_parm {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct getcountjudge_parm {
+ u32 rsvd;
+};
+
+struct getcountjudge_rsp {
+ u8 count_judge[MAX_RATES_LENGTH];
+};
+
+struct setratable_parm {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+struct getratable_parm {
+ uint rsvd;
+};
+
+struct getratable_rsp {
+ u8 ss_ForceUp[NumRates];
+ u8 ss_ULevel[NumRates];
+ u8 ss_DLevel[NumRates];
+ u8 count_judge[NumRates];
+};
+
+/* to get TX,RX retry count */
+
+struct gettxretrycnt_parm {
+ unsigned int rsvd;
+};
+
+struct gettxretrycnt_rsp {
+ unsigned long tx_retrycnt;
+};
+
+struct getrxretrycnt_parm {
+ unsigned int rsvd;
+};
+
+struct getrxretrycnt_rsp {
+ unsigned long rx_retrycnt;
+};
+
+/* to get BCNOK,BCNERR count */
+struct getbcnokcnt_parm {
+ unsigned int rsvd;
+};
+
+struct getbcnokcnt_rsp {
+ unsigned long bcnokcnt;
+};
+
+struct getbcnerrcnt_parm {
+ unsigned int rsvd;
+};
+
+struct getbcnerrcnt_rsp {
+ unsigned long bcnerrcnt;
+};
+
+/* to get current TX power level */
+struct getcurtxpwrlevel_parm {
+ unsigned int rsvd;
+};
+struct getcurtxpwrlevel_rspi {
+ unsigned short tx_power;
+};
+
+struct setprobereqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocreqextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setproberspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct setassocrspextraie_parm {
+ unsigned char e_id;
+ unsigned char ie_len;
+ unsigned char ie[0];
+};
+
+struct addBaReq_parm {
+ unsigned int tid;
+ u8 addr[ETH_ALEN];
+};
+
+/*H2C Handler index: 46 */
+struct set_ch_parm {
+ u8 ch;
+ u8 bw;
+ u8 ch_offset;
+};
+
+/*H2C Handler index: 59 */
+struct SetChannelPlan_param
+{
+ u8 channel_plan;
+};
+
+/*H2C Handler index: 60 */
+struct LedBlink_param
+{
+ struct LED_871x *pLed;
+};
+
+/*H2C Handler index: 61 */
+struct SetChannelSwitch_param
+{
+ u8 new_ch_no;
+};
+
+/*H2C Handler index: 62 */
+struct TDLSoption_param
+{
+ u8 addr[ETH_ALEN];
+ u8 option;
+};
+
+#define GEN_CMD_CODE(cmd) cmd ## _CMD_
+
+/*
+
+Result:
+0x00: success
+0x01: sucess, and check Response.
+0x02: cmd ignored due to duplicated sequcne number
+0x03: cmd dropped due to invalid cmd code
+0x04: reserved.
+
+*/
+
+#define H2C_RSP_OFFSET 512
+
+#define H2C_SUCCESS 0x00
+#define H2C_SUCCESS_RSP 0x01
+#define H2C_DUPLICATED 0x02
+#define H2C_DROPPED 0x03
+#define H2C_PARAMETERS_ERROR 0x04
+#define H2C_REJECTED 0x05
+#define H2C_CMD_OVERFLOW 0x06
+#define H2C_RESERVED 0x07
+
+u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
+u8 rtw_setstandby_cmd(struct adapter *padapter, uint action);
+u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
+ int ssid_num, struct rtw_ieee80211_channel *ch,
+ int ch_num);
+u8 rtw_createbss_cmd(struct adapter *padapter);
+u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
+ unsigned int sz);
+u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
+u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
+u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
+u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
+u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
+u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
+u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
+u8 rtw_setbbreg_cmd(struct adapter * padapter, u8 offset, u8 val);
+u8 rtw_setrfreg_cmd(struct adapter * padapter, u8 offset, u32 val);
+u8 rtw_getbbreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_getrfreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
+u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
+u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table);
+u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
+
+u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval);
+u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type);
+u8 rtw_setfwra_cmd(struct adapter*padapter, u8 type);
+
+u8 rtw_addbareq_cmd(struct adapter*padapter, u8 tid, u8 *addr);
+
+u8 rtw_dynamic_chk_wk_cmd(struct adapter *adapter);
+
+u8 rtw_lps_ctrl_wk_cmd(struct adapter*padapter, u8 lps_ctrl_type, u8 enqueue);
+u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime);
+
+ u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue);
+u8 rtw_ps_cmd(struct adapter*padapter);
+
+#ifdef CONFIG_88EU_AP_MODE
+u8 rtw_chk_hi_queue_cmd(struct adapter*padapter);
+#endif
+
+u8 rtw_set_ch_cmd(struct adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
+u8 rtw_set_chplan_cmd(struct adapter*padapter, u8 chplan, u8 enqueue);
+u8 rtw_led_blink_cmd(struct adapter*padapter, struct LED_871x * pLed);
+u8 rtw_set_csa_cmd(struct adapter*padapter, u8 new_ch_no);
+u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option);
+
+u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
+
+u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
+void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
+void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+
+void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
+void rtw_getrttbl_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
+
+struct _cmd_callback {
+ u32 cmd_code;
+ void (*callback)(struct adapter *padapter, struct cmd_obj *cmd);
+};
+
+enum rtw_h2c_cmd {
+ GEN_CMD_CODE(_Read_MACREG), /*0*/
+ GEN_CMD_CODE(_Write_MACREG),
+ GEN_CMD_CODE(_Read_BBREG),
+ GEN_CMD_CODE(_Write_BBREG),
+ GEN_CMD_CODE(_Read_RFREG),
+ GEN_CMD_CODE(_Write_RFREG), /*5*/
+ GEN_CMD_CODE(_Read_EEPROM),
+ GEN_CMD_CODE(_Write_EEPROM),
+ GEN_CMD_CODE(_Read_EFUSE),
+ GEN_CMD_CODE(_Write_EFUSE),
+
+ GEN_CMD_CODE(_Read_CAM), /*10*/
+ GEN_CMD_CODE(_Write_CAM),
+ GEN_CMD_CODE(_setBCNITV),
+ GEN_CMD_CODE(_setMBIDCFG),
+ GEN_CMD_CODE(_JoinBss), /*14*/
+ GEN_CMD_CODE(_DisConnect), /*15*/
+ GEN_CMD_CODE(_CreateBss),
+ GEN_CMD_CODE(_SetOpMode),
+ GEN_CMD_CODE(_SiteSurvey), /*18*/
+ GEN_CMD_CODE(_SetAuth),
+
+ GEN_CMD_CODE(_SetKey), /*20*/
+ GEN_CMD_CODE(_SetStaKey),
+ GEN_CMD_CODE(_SetAssocSta),
+ GEN_CMD_CODE(_DelAssocSta),
+ GEN_CMD_CODE(_SetStaPwrState),
+ GEN_CMD_CODE(_SetBasicRate), /*25*/
+ GEN_CMD_CODE(_GetBasicRate),
+ GEN_CMD_CODE(_SetDataRate),
+ GEN_CMD_CODE(_GetDataRate),
+ GEN_CMD_CODE(_SetPhyInfo),
+
+ GEN_CMD_CODE(_GetPhyInfo), /*30*/
+ GEN_CMD_CODE(_SetPhy),
+ GEN_CMD_CODE(_GetPhy),
+ GEN_CMD_CODE(_readRssi),
+ GEN_CMD_CODE(_readGain),
+ GEN_CMD_CODE(_SetAtim), /*35*/
+ GEN_CMD_CODE(_SetPwrMode),
+ GEN_CMD_CODE(_JoinbssRpt),
+ GEN_CMD_CODE(_SetRaTable),
+ GEN_CMD_CODE(_GetRaTable),
+
+ GEN_CMD_CODE(_GetCCXReport), /*40*/
+ GEN_CMD_CODE(_GetDTMReport),
+ GEN_CMD_CODE(_GetTXRateStatistics),
+ GEN_CMD_CODE(_SetUsbSuspend),
+ GEN_CMD_CODE(_SetH2cLbk),
+ GEN_CMD_CODE(_AddBAReq), /*45*/
+ GEN_CMD_CODE(_SetChannel), /*46*/
+ GEN_CMD_CODE(_SetTxPower),
+ GEN_CMD_CODE(_SwitchAntenna),
+ GEN_CMD_CODE(_SetCrystalCap),
+ GEN_CMD_CODE(_SetSingleCarrierTx), /*50*/
+
+ GEN_CMD_CODE(_SetSingleToneTx),/*51*/
+ GEN_CMD_CODE(_SetCarrierSuppressionTx),
+ GEN_CMD_CODE(_SetContinuousTx),
+ GEN_CMD_CODE(_SwitchBandwidth), /*54*/
+ GEN_CMD_CODE(_TX_Beacon), /*55*/
+
+ GEN_CMD_CODE(_Set_MLME_EVT), /*56*/
+ GEN_CMD_CODE(_Set_Drv_Extra), /*57*/
+ GEN_CMD_CODE(_Set_H2C_MSG), /*58*/
+
+ GEN_CMD_CODE(_SetChannelPlan), /*59*/
+ GEN_CMD_CODE(_LedBlink), /*60*/
+
+ GEN_CMD_CODE(_SetChannelSwitch), /*61*/
+ GEN_CMD_CODE(_TDLS), /*62*/
+
+ MAX_H2CCMD
+};
+
+#define _GetBBReg_CMD_ _Read_BBREG_CMD_
+#define _SetBBReg_CMD_ _Write_BBREG_CMD_
+#define _GetRFReg_CMD_ _Read_RFREG_CMD_
+#define _SetRFReg_CMD_ _Write_RFREG_CMD_
+
+#ifdef _RTW_CMD_C_
+static struct _cmd_callback rtw_cmd_callback[] =
+{
+ {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/
+ {GEN_CMD_CODE(_Write_MACREG), NULL},
+ {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_BBREG), NULL},
+ {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback},
+ {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/
+ {GEN_CMD_CODE(_Read_EEPROM), NULL},
+ {GEN_CMD_CODE(_Write_EEPROM), NULL},
+ {GEN_CMD_CODE(_Read_EFUSE), NULL},
+ {GEN_CMD_CODE(_Write_EFUSE), NULL},
+
+ {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/
+ {GEN_CMD_CODE(_Write_CAM), NULL},
+ {GEN_CMD_CODE(_setBCNITV), NULL},
+ {GEN_CMD_CODE(_setMBIDCFG), NULL},
+ {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/
+ {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/
+ {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback},
+ {GEN_CMD_CODE(_SetOpMode), NULL},
+ {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/
+ {GEN_CMD_CODE(_SetAuth), NULL},
+
+ {GEN_CMD_CODE(_SetKey), NULL}, /*20*/
+ {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback},
+ {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback},
+ {GEN_CMD_CODE(_DelAssocSta), NULL},
+ {GEN_CMD_CODE(_SetStaPwrState), NULL},
+ {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/
+ {GEN_CMD_CODE(_GetBasicRate), NULL},
+ {GEN_CMD_CODE(_SetDataRate), NULL},
+ {GEN_CMD_CODE(_GetDataRate), NULL},
+ {GEN_CMD_CODE(_SetPhyInfo), NULL},
+
+ {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/
+ {GEN_CMD_CODE(_SetPhy), NULL},
+ {GEN_CMD_CODE(_GetPhy), NULL},
+ {GEN_CMD_CODE(_readRssi), NULL},
+ {GEN_CMD_CODE(_readGain), NULL},
+ {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/
+ {GEN_CMD_CODE(_SetPwrMode), NULL},
+ {GEN_CMD_CODE(_JoinbssRpt), NULL},
+ {GEN_CMD_CODE(_SetRaTable), NULL},
+ {GEN_CMD_CODE(_GetRaTable), NULL},
+
+ {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/
+ {GEN_CMD_CODE(_GetDTMReport), NULL},
+ {GEN_CMD_CODE(_GetTXRateStatistics), NULL},
+ {GEN_CMD_CODE(_SetUsbSuspend), NULL},
+ {GEN_CMD_CODE(_SetH2cLbk), NULL},
+ {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/
+ {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/
+ {GEN_CMD_CODE(_SetTxPower), NULL},
+ {GEN_CMD_CODE(_SwitchAntenna), NULL},
+ {GEN_CMD_CODE(_SetCrystalCap), NULL},
+ {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/
+
+ {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/
+ {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL},
+ {GEN_CMD_CODE(_SetContinuousTx), NULL},
+ {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/
+ {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/
+
+ {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/
+ {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/
+ {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/
+ {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/
+ {GEN_CMD_CODE(_LedBlink), NULL},/*60*/
+
+ {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*61*/
+ {GEN_CMD_CODE(_TDLS), NULL},/*62*/
+};
+#endif
+
+#endif /* _CMD_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
new file mode 100644
index 00000000000..c6b193a2e79
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_DEBUG_H__
+#define __RTW_DEBUG_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#define _drv_always_ 1
+#define _drv_emerg_ 2
+#define _drv_alert_ 3
+#define _drv_crit_ 4
+#define _drv_err_ 5
+#define _drv_warning_ 6
+#define _drv_notice_ 7
+#define _drv_info_ 8
+#define _drv_debug_ 9
+
+
+#define _module_rtl871x_xmit_c_ BIT(0)
+#define _module_xmit_osdep_c_ BIT(1)
+#define _module_rtl871x_recv_c_ BIT(2)
+#define _module_recv_osdep_c_ BIT(3)
+#define _module_rtl871x_mlme_c_ BIT(4)
+#define _module_mlme_osdep_c_ BIT(5)
+#define _module_rtl871x_sta_mgt_c_ BIT(6)
+#define _module_rtl871x_cmd_c_ BIT(7)
+#define _module_cmd_osdep_c_ BIT(8)
+#define _module_rtl871x_io_c_ BIT(9)
+#define _module_io_osdep_c_ BIT(10)
+#define _module_os_intfs_c_ BIT(11)
+#define _module_rtl871x_security_c_ BIT(12)
+#define _module_rtl871x_eeprom_c_ BIT(13)
+#define _module_hal_init_c_ BIT(14)
+#define _module_hci_hal_init_c_ BIT(15)
+#define _module_rtl871x_ioctl_c_ BIT(16)
+#define _module_rtl871x_ioctl_set_c_ BIT(17)
+#define _module_rtl871x_ioctl_query_c_ BIT(18)
+#define _module_rtl871x_pwrctrl_c_ BIT(19)
+#define _module_hci_intfs_c_ BIT(20)
+#define _module_hci_ops_c_ BIT(21)
+#define _module_osdep_service_c_ BIT(22)
+#define _module_mp_ BIT(23)
+#define _module_hci_ops_os_c_ BIT(24)
+#define _module_rtl871x_ioctl_os_c BIT(25)
+#define _module_rtl8712_cmd_c_ BIT(26)
+#define _module_rtl8192c_xmit_c_ BIT(27)
+#define _module_hal_xmit_c_ BIT(28)
+#define _module_efuse_ BIT(29)
+#define _module_rtl8712_recv_c_ BIT(30)
+#define _module_rtl8712_led_c_ BIT(31)
+
+#define DRIVER_PREFIX "R8188EU: "
+
+extern u32 GlobalDebugLevel;
+
+#define DBG_88E_LEVEL(_level, fmt, arg...) \
+ do { \
+ if (_level <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX"ERROR " fmt, ##arg); \
+ } while (0)
+
+#define DBG_88E(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+#define MSG_88E(...) \
+ do { \
+ if (_drv_err_ <= GlobalDebugLevel) \
+ pr_info(DRIVER_PREFIX __VA_ARGS__); \
+ } while (0)
+
+#define RT_TRACE(_comp, _level, fmt) \
+ do { \
+ if (_level <= GlobalDebugLevel) { \
+ pr_info("%s [0x%08x,%d]", DRIVER_PREFIX, \
+ (unsigned int)_comp, _level); \
+ pr_info fmt; \
+ } \
+ } while (0)
+
+#define _func_enter_ \
+ do { \
+ if (GlobalDebugLevel >= _drv_debug_) \
+ pr_info("%s : %s enters at %d\n", \
+ DRIVER_PREFIX, __func__, __LINE__); \
+ } while (0)
+
+#define _func_exit_ \
+ do { \
+ if (GlobalDebugLevel >= _drv_debug_) \
+ pr_info("%s : %s exits at %d\n", \
+ DRIVER_PREFIX, __func__, __LINE__); \
+ } while (0)
+
+#define RT_PRINT_DATA(_comp, _level, _titlestring, _hexdata, _hexdatalen)\
+ do { \
+ if (_level <= GlobalDebugLevel) { \
+ int __i; \
+ u8 *ptr = (u8 *)_hexdata; \
+ pr_info("%s", DRIVER_PREFIX); \
+ pr_info(_titlestring); \
+ for (__i = 0; __i < (int)_hexdatalen; __i++ ) { \
+ pr_info("%02X%s", ptr[__i], \
+ (((__i + 1) % 4) == 0) ? \
+ " " : " "); \
+ if (((__i + 1) % 16) == 0) \
+ printk("\n"); \
+ } \
+ printk("\n"); \
+ } \
+ } while (0)
+
+int proc_get_drv_version(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_write_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_write_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+int proc_get_read_reg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_read_reg(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_fwstate(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_sec_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_mlmext_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_qos_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_ht_option(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_rf_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+int proc_get_ap_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_adapter_state(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_trx_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_mac_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_bb_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump1(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump2(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump3(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rf_reg_dump4(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+#ifdef CONFIG_88EU_AP_MODE
+
+int proc_get_all_sta_info(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+#endif
+
+int proc_get_best_channel(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rx_signal(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rx_signal(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_ht_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_ht_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_cbw40_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_ampdu_enable(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_rx_stbc(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rx_stbc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+int proc_get_two_path_rssi(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_get_rssi_disp(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_rssi_disp(struct file *file, const char __user *buffer,
+ unsigned long count, void *data);
+
+#ifdef CONFIG_BT_COEXIST
+int proc_get_btcoex_dbg(char *page, char **start,
+ off_t offset, int count,
+ int *eof, void *data);
+
+int proc_set_btcoex_dbg(struct file *file, const char *buffer,
+ signed long count, void *data);
+
+#endif /* CONFIG_BT_COEXIST */
+
+#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
new file mode 100644
index 00000000000..b2672c3febd
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -0,0 +1,130 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_EEPROM_H__
+#define __RTW_EEPROM_H__
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define RTL8712_EEPROM_ID 0x8712
+
+#define HWSET_MAX_SIZE_512 512
+#define EEPROM_MAX_SIZE HWSET_MAX_SIZE_512
+
+#define CLOCK_RATE 50 /* 100us */
+
+/* EEPROM opcodes */
+#define EEPROM_READ_OPCODE 06
+#define EEPROM_WRITE_OPCODE 05
+#define EEPROM_ERASE_OPCODE 07
+#define EEPROM_EWEN_OPCODE 19 /* Erase/write enable */
+#define EEPROM_EWDS_OPCODE 16 /* Erase/write disable */
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+#define EEPROM_CID_DEFAULT 0x0
+#define EEPROM_CID_ALPHA 0x1
+#define EEPROM_CID_Senao 0x3
+#define EEPROM_CID_NetCore 0x5
+#define EEPROM_CID_CAMEO 0X8
+#define EEPROM_CID_SITECOM 0x9
+#define EEPROM_CID_COREGA 0xB
+#define EEPROM_CID_EDIMAX_BELK 0xC
+#define EEPROM_CID_SERCOMM_BELK 0xE
+#define EEPROM_CID_CAMEO1 0xF
+#define EEPROM_CID_WNC_COREGA 0x12
+#define EEPROM_CID_CLEVO 0x13
+#define EEPROM_CID_WHQL 0xFE
+
+/* Customer ID, note that: */
+/* This variable is initiailzed through EEPROM or registry, */
+/* however, its definition may be different with that in EEPROM for */
+/* EEPROM size consideration. So, we have to perform proper translation
+ * between them. */
+/* Besides, CustomerID of registry has precedence of that of EEPROM. */
+/* defined below. 060703, by rcnjko. */
+enum RT_CUSTOMER_ID {
+ RT_CID_DEFAULT = 0,
+ RT_CID_8187_ALPHA0 = 1,
+ RT_CID_8187_SERCOMM_PS = 2,
+ RT_CID_8187_HW_LED = 3,
+ RT_CID_8187_NETGEAR = 4,
+ RT_CID_WHQL = 5,
+ RT_CID_819x_CAMEO = 6,
+ RT_CID_819x_RUNTOP = 7,
+ RT_CID_819x_Senao = 8,
+ RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
+ RT_CID_819x_Netcore = 10,
+ RT_CID_Nettronix = 11,
+ RT_CID_DLINK = 12,
+ RT_CID_PRONET = 13,
+ RT_CID_COREGA = 14,
+ RT_CID_CHINA_MOBILE = 15,
+ RT_CID_819x_ALPHA = 16,
+ RT_CID_819x_Sitecom = 17,
+ RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded
+ * for CCX functions, but for test behavior like retry
+ * limit and tx report. By Bruce, 2009-02-17. */
+ RT_CID_819x_Lenovo = 19,
+ RT_CID_819x_QMI = 20,
+ RT_CID_819x_Edimax_Belkin = 21,
+ RT_CID_819x_Sercomm_Belkin = 22,
+ RT_CID_819x_CAMEO1 = 23,
+ RT_CID_819x_MSI = 24,
+ RT_CID_819x_Acer = 25,
+ RT_CID_819x_AzWave_ASUS = 26,
+ RT_CID_819x_AzWave = 27, /* For AzWave in PCIe,i
+ * The ID is AzWave use and not only Asus */
+ RT_CID_819x_HP = 28,
+ RT_CID_819x_WNC_COREGA = 29,
+ RT_CID_819x_Arcadyan_Belkin = 30,
+ RT_CID_819x_SAMSUNG = 31,
+ RT_CID_819x_CLEVO = 32,
+ RT_CID_819x_DELL = 33,
+ RT_CID_819x_PRONETS = 34,
+ RT_CID_819x_Edimax_ASUS = 35,
+ RT_CID_819x_CAMEO_NETGEAR = 36,
+ RT_CID_PLANEX = 37,
+ RT_CID_CC_C = 38,
+ RT_CID_819x_Xavi = 39,
+ RT_CID_819x_FUNAI_TV = 40,
+ RT_CID_819x_ALPHA_WD=41,
+};
+
+struct eeprom_priv {
+ u8 bautoload_fail_flag;
+ u8 bloadfile_fail_flag;
+ u8 bloadmac_fail_flag;
+ u8 mac_addr[6]; /* PermanentAddress */
+ u16 channel_plan;
+ u8 EepromOrEfuse;
+ u8 efuse_eeprom_data[HWSET_MAX_SIZE_512];
+};
+
+void eeprom_write16(struct adapter *padapter, u16 reg, u16 data);
+u16 eeprom_read16(struct adapter *padapter, u16 reg);
+void read_eeprom_content(struct adapter *padapter);
+void eeprom_read_sz(struct adapter *adapt, u16 reg, u8 *data, u32 sz);
+void read_eeprom_content_by_attrib(struct adapter *padapter);
+
+#endif /* __RTL871X_EEPROM_H__ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
new file mode 100644
index 00000000000..cee6b5e8b07
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -0,0 +1,150 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_EFUSE_H__
+#define __RTW_EFUSE_H__
+
+#include <osdep_service.h>
+
+#define EFUSE_ERROE_HANDLE 1
+
+#define PG_STATE_HEADER 0x01
+#define PG_STATE_WORD_0 0x02
+#define PG_STATE_WORD_1 0x04
+#define PG_STATE_WORD_2 0x08
+#define PG_STATE_WORD_3 0x10
+#define PG_STATE_DATA 0x20
+
+#define PG_SWBYTE_H 0x01
+#define PG_SWBYTE_L 0x02
+
+#define PGPKT_DATA_SIZE 8
+
+#define EFUSE_WIFI 0
+#define EFUSE_BT 1
+
+enum _EFUSE_DEF_TYPE {
+ TYPE_EFUSE_MAX_SECTION = 0,
+ TYPE_EFUSE_REAL_CONTENT_LEN = 1,
+ TYPE_AVAILABLE_EFUSE_BYTES_BANK = 2,
+ TYPE_AVAILABLE_EFUSE_BYTES_TOTAL = 3,
+ TYPE_EFUSE_MAP_LEN = 4,
+ TYPE_EFUSE_PROTECT_BYTES_BANK = 5,
+ TYPE_EFUSE_CONTENT_LEN_BANK = 6,
+};
+
+/* E-Fuse */
+#define EFUSE_MAP_SIZE 512
+#define EFUSE_MAX_SIZE 256
+/* end of E-Fuse */
+
+#define EFUSE_MAX_MAP_LEN 512
+#define EFUSE_MAX_HW_SIZE 512
+#define EFUSE_MAX_SECTION_BASE 16
+
+#define EXT_HEADER(header) ((header & 0x1F) == 0x0F)
+#define ALL_WORDS_DISABLED(wde) ((wde & 0x0F) == 0x0F)
+#define GET_HDR_OFFSET_2_0(header) ((header & 0xE0) >> 5)
+
+#define EFUSE_REPEAT_THRESHOLD_ 3
+
+/* The following is for BT Efuse definition */
+#define EFUSE_BT_MAX_MAP_LEN 1024
+#define EFUSE_MAX_BANK 4
+#define EFUSE_MAX_BT_BANK (EFUSE_MAX_BANK-1)
+/*--------------------------Define Parameters-------------------------------*/
+#define EFUSE_MAX_WORD_UNIT 4
+
+/*------------------------------Define structure----------------------------*/
+struct pgpkt {
+ u8 offset;
+ u8 word_en;
+ u8 data[8];
+ u8 word_cnts;
+};
+
+/*------------------------------Define structure----------------------------*/
+struct efuse_hal {
+ u8 fakeEfuseBank;
+ u32 fakeEfuseUsedBytes;
+ u8 fakeEfuseContent[EFUSE_MAX_HW_SIZE];
+ u8 fakeEfuseInitMap[EFUSE_MAX_MAP_LEN];
+ u8 fakeEfuseModifiedMap[EFUSE_MAX_MAP_LEN];
+
+ u16 BTEfuseUsedBytes;
+ u8 BTEfuseUsedPercentage;
+ u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 BTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 BTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+
+ u16 fakeBTEfuseUsedBytes;
+ u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+ u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN];
+ u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN];
+};
+
+/*------------------------Export global variable----------------------------*/
+extern u8 fakeEfuseBank;
+extern u32 fakeEfuseUsedBytes;
+extern u8 fakeEfuseContent[];
+extern u8 fakeEfuseInitMap[];
+extern u8 fakeEfuseModifiedMap[];
+
+extern u32 BTEfuseUsedBytes;
+extern u8 BTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 BTEfuseInitMap[];
+extern u8 BTEfuseModifiedMap[];
+
+extern u32 fakeBTEfuseUsedBytes;
+extern u8 fakeBTEfuseContent[EFUSE_MAX_BT_BANK][EFUSE_MAX_HW_SIZE];
+extern u8 fakeBTEfuseInitMap[];
+extern u8 fakeBTEfuseModifiedMap[];
+/*------------------------Export global variable----------------------------*/
+
+u8 efuse_GetCurrentSize(struct adapter *adapter, u16 *size);
+u16 efuse_GetMaxSize(struct adapter *adapter);
+u8 rtw_efuse_access(struct adapter *adapter, u8 read, u16 start_addr,
+ u16 cnts, u8 *data);
+u8 rtw_efuse_map_read(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_efuse_map_write(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_read(struct adapter *adapter, u16 addr,
+ u16 cnts, u8 *data);
+u8 rtw_BT_efuse_map_write(struct adapter *adapter, u16 addr,
+ u16 cnts, u8 *data);
+u16 Efuse_GetCurrentSize(struct adapter *adapter, u8 efusetype, bool test);
+u8 Efuse_CalculateWordCnts(u8 word_en);
+void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf, bool test);
+void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
+ void *out, bool bPseudoTest);
+u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
+u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
+
+void Efuse_PowerSwitch(struct adapter *adapt,u8 bWrite,u8 PwrState);
+int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data, bool test);
+int Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data,
+ bool test);
+void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
+u8 Efuse_WordEnableDataWrite(struct adapter *adapter, u16 efuse_addr,
+ u8 word_en, u8 *data, bool test);
+
+u8 EFUSE_Read1Byte(struct adapter *adapter, u16 address);
+void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype, bool test);
+void EFUSE_ShadowRead(struct adapter *adapt, u8 type, u16 offset, u32 *val);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
new file mode 100644
index 00000000000..52151dc4495
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -0,0 +1,115 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_EVENT_H_
+#define _RTW_EVENT_H_
+
+#include <osdep_service.h>
+
+#include <wlan_bssdef.h>
+#include <linux/semaphore.h>
+#include <linux/sem.h>
+
+/*
+Used to report a bss has been scanned
+*/
+struct survey_event {
+ struct wlan_bssid_ex bss;
+};
+
+/*
+Used to report that the requested site survey has been done.
+
+bss_cnt indicates the number of bss that has been reported.
+
+
+*/
+struct surveydone_event {
+ unsigned int bss_cnt;
+
+};
+
+/*
+Used to report the link result of joinning the given bss
+
+
+join_res:
+-1: authentication fail
+-2: association fail
+> 0: TID
+
+*/
+struct joinbss_event {
+ struct wlan_network network;
+};
+
+/*
+Used to report a given STA has joinned the created BSS.
+It is used in AP/Ad-HoC(M) mode.
+*/
+
+struct stassoc_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2];
+ int cam_id;
+};
+
+struct stadel_event {
+ unsigned char macaddr[6];
+ unsigned char rsvd[2]; /* for reason */
+ int mac_id;
+};
+
+struct addba_event {
+ unsigned int tid;
+};
+
+#define GEN_EVT_CODE(event) event ## _EVT_
+
+struct fwevent {
+ u32 parmsize;
+ void (*event_callback)(struct adapter *dev, u8 *pbuf);
+};
+
+#define C2HEVENT_SZ 32
+
+struct event_node {
+ unsigned char *node;
+ unsigned char evt_code;
+ unsigned short evt_sz;
+ int *caller_ff_tail;
+ int caller_ff_sz;
+};
+
+struct c2hevent_queue {
+ int head;
+ int tail;
+ struct event_node nodes[C2HEVENT_SZ];
+ unsigned char seq;
+};
+
+#define NETWORK_QUEUE_SZ 4
+
+struct network_queue {
+ int head;
+ int tail;
+ struct wlan_bssid_ex networks[NETWORK_QUEUE_SZ];
+};
+
+#endif /* _WLANEVENT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
new file mode 100644
index 00000000000..beb210b3708
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -0,0 +1,44 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_HT_H_
+#define _RTW_HT_H_
+
+#include <osdep_service.h>
+#include "wifi.h"
+
+struct ht_priv {
+ u32 ht_option;
+ u32 ampdu_enable;/* for enable Tx A-MPDU */
+ u32 tx_amsdu_enable;/* for enable Tx A-MSDU */
+ u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
+ u32 rx_ampdu_maxlen; /* for rx reordering ctrl win_sz,
+ * updated when join_callback. */
+ u8 bwmode;/* */
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+
+ /* for processing Tx A-MPDU */
+ u8 agg_enable_bitmap;
+ u8 candidate_tid_bitmap;
+
+ struct rtw_ieee80211_ht_cap ht_cap;
+};
+
+#endif /* _RTL871X_HT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
new file mode 100644
index 00000000000..eb6f0e550ac
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_io.h
@@ -0,0 +1,387 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#ifndef _RTW_IO_H_
+#define _RTW_IO_H_
+
+#include <osdep_service.h>
+#include <osdep_intf.h>
+
+#include <asm/byteorder.h>
+#include <linux/semaphore.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+
+#define rtw_usb_buffer_alloc(dev, size, dma) \
+ usb_alloc_coherent((dev), (size), (in_interrupt() ? \
+ GFP_ATOMIC : GFP_KERNEL), (dma))
+#define rtw_usb_buffer_free(dev, size, addr, dma) \
+ usb_free_coherent((dev), (size), (addr), (dma))
+
+#define NUM_IOREQ 8
+
+#define MAX_PROT_SZ (64-16)
+
+#define _IOREADY 0
+#define _IO_WAIT_COMPLETE 1
+#define _IO_WAIT_RSP 2
+
+/* IO COMMAND TYPE */
+#define _IOSZ_MASK_ (0x7F)
+#define _IO_WRITE_ BIT(7)
+#define _IO_FIXED_ BIT(8)
+#define _IO_BURST_ BIT(9)
+#define _IO_BYTE_ BIT(10)
+#define _IO_HW_ BIT(11)
+#define _IO_WORD_ BIT(12)
+#define _IO_SYNC_ BIT(13)
+#define _IO_CMDMASK_ (0x1F80)
+
+/*
+ For prompt mode accessing, caller shall free io_req
+ Otherwise, io_handler will free io_req
+*/
+
+/* IO STATUS TYPE */
+#define _IO_ERR_ BIT(2)
+#define _IO_SUCCESS_ BIT(1)
+#define _IO_DONE_ BIT(0)
+
+#define IO_RD32 (_IO_SYNC_ | _IO_WORD_)
+#define IO_RD16 (_IO_SYNC_ | _IO_HW_)
+#define IO_RD8 (_IO_SYNC_ | _IO_BYTE_)
+
+#define IO_RD32_ASYNC (_IO_WORD_)
+#define IO_RD16_ASYNC (_IO_HW_)
+#define IO_RD8_ASYNC (_IO_BYTE_)
+
+#define IO_WR32 (_IO_WRITE_ | _IO_SYNC_ | _IO_WORD_)
+#define IO_WR16 (_IO_WRITE_ | _IO_SYNC_ | _IO_HW_)
+#define IO_WR8 (_IO_WRITE_ | _IO_SYNC_ | _IO_BYTE_)
+
+#define IO_WR32_ASYNC (_IO_WRITE_ | _IO_WORD_)
+#define IO_WR16_ASYNC (_IO_WRITE_ | _IO_HW_)
+#define IO_WR8_ASYNC (_IO_WRITE_ | _IO_BYTE_)
+
+/*
+ Only Sync. burst accessing is provided.
+*/
+
+#define IO_WR_BURST(x) \
+ (_IO_WRITE_ | _IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+#define IO_RD_BURST(x) \
+ (_IO_SYNC_ | _IO_BURST_ | ((x) & _IOSZ_MASK_))
+
+/* below is for the intf_option bit defition... */
+
+#define _INTF_ASYNC_ BIT(0) /* support async io */
+
+struct intf_priv;
+struct intf_hdl;
+struct io_queue;
+
+struct _io_ops {
+ u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
+ u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
+ int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length,
+ u8 *pdata);
+ int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
+ int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
+ int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
+ void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ void (*_sync_irp_protocol_rw)(struct io_queue *pio_q);
+ u32 (*_read_interrupt)(struct intf_hdl *pintfhdl, u32 addr);
+ u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
+ u8 *pmem);
+ u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
+ void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
+ void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
+};
+
+struct io_req {
+ struct list_head list;
+ u32 addr;
+ u32 val;
+ u32 command;
+ u32 status;
+ u8 *pbuf;
+ struct semaphore sema;
+
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req, u8 *cnxt);
+ u8 *cnxt;
+};
+
+struct intf_hdl {
+ struct adapter *padapter;
+ struct dvobj_priv *pintf_dev;
+ struct _io_ops io_ops;
+};
+
+struct reg_protocol_rd {
+#ifdef __LITTLE_ENDIAN
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ /* u32 Value; */
+#else
+/* DW1 */
+ u32 Reserved1:4;
+ u32 NumOfTrans:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 WriteEnable:1;
+ u32 ByteCount:7;
+ u32 Reserved3:3;
+ u32 Byte4Access:1;
+
+ u32 Byte2Access:1;
+ u32 Byte1Access:1;
+ u32 BurstMode:1;
+ u32 FixOrContinuous:1;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+
+ /* DW4 */
+#endif
+};
+
+struct reg_protocol_wt {
+#ifdef __LITTLE_ENDIAN
+ /* DW1 */
+ u32 NumOfTrans:4;
+ u32 Reserved1:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 ByteCount:7;
+ u32 WriteEnable:1; /* 0:read, 1:write */
+ u32 FixOrContinuous:1; /* 0:continuous, 1: Fix */
+ u32 BurstMode:1;
+ u32 Byte1Access:1;
+ u32 Byte2Access:1;
+ u32 Byte4Access:1;
+ u32 Reserved3:3;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+#else
+ /* DW1 */
+ u32 Reserved1 :4;
+ u32 NumOfTrans:4;
+ u32 Reserved2:24;
+ /* DW2 */
+ u32 WriteEnable:1;
+ u32 ByteCount:7;
+ u32 Reserved3:3;
+ u32 Byte4Access:1;
+ u32 Byte2Access:1;
+ u32 Byte1Access:1;
+ u32 BurstMode:1;
+ u32 FixOrContinuous:1;
+ u32 Reserved4:16;
+ /* DW3 */
+ u32 BusAddress;
+ /* DW4 */
+ u32 Value;
+#endif
+};
+
+/*
+Below is the data structure used by _io_handler
+*/
+
+struct io_queue {
+ spinlock_t lock;
+ struct list_head free_ioreqs;
+ struct list_head pending; /* The io_req list that will be served
+ * in the single protocol read/write.*/
+ struct list_head processing;
+ u8 *free_ioreqs_buf; /* 4-byte aligned */
+ u8 *pallocated_free_ioreqs_buf;
+ struct intf_hdl intf;
+};
+
+struct io_priv {
+ struct adapter *padapter;
+ struct intf_hdl intf;
+};
+
+uint ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+void sync_ioreq_enqueue(struct io_req *preq,struct io_queue *ioqueue);
+uint sync_ioreq_flush(struct adapter *adapter, struct io_queue *ioqueue);
+uint free_ioreq(struct io_req *preq, struct io_queue *pio_queue);
+struct io_req *alloc_ioreq(struct io_queue *pio_q);
+
+uint register_intf_hdl(u8 *dev, struct intf_hdl *pintfhdl);
+void unregister_intf_hdl(struct intf_hdl *pintfhdl);
+
+void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+u8 _rtw_read8(struct adapter *adapter, u32 addr);
+u16 _rtw_read16(struct adapter *adapter, u32 addr);
+u32 _rtw_read32(struct adapter *adapter, u32 addr);
+void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void _rtw_read_port_cancel(struct adapter *adapter);
+
+int _rtw_write8(struct adapter *adapter, u32 addr, u8 val);
+int _rtw_write16(struct adapter *adapter, u32 addr, u16 val);
+int _rtw_write32(struct adapter *adapter, u32 addr, u32 val);
+int _rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
+
+int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val);
+int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val);
+int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val);
+
+void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt,
+ u8 *pmem, int timeout_ms);
+void _rtw_write_port_cancel(struct adapter *adapter);
+
+#define rtw_read8(adapter, addr) _rtw_read8((adapter), (addr))
+#define rtw_read16(adapter, addr) _rtw_read16((adapter), (addr))
+#define rtw_read32(adapter, addr) _rtw_read32((adapter), (addr))
+#define rtw_read_mem(adapter, addr, cnt, mem) \
+ _rtw_read_mem((adapter), (addr), (cnt), (mem))
+#define rtw_read_port(adapter, addr, cnt, mem) \
+ _rtw_read_port((adapter), (addr), (cnt), (mem))
+#define rtw_read_port_cancel(adapter) _rtw_read_port_cancel((adapter))
+
+#define rtw_write8(adapter, addr, val) \
+ _rtw_write8((adapter), (addr), (val))
+#define rtw_write16(adapter, addr, val) \
+ _rtw_write16((adapter), (addr), (val))
+#define rtw_write32(adapter, addr, val) \
+ _rtw_write32((adapter), (addr), (val))
+#define rtw_writeN(adapter, addr, length, data) \
+ _rtw_writeN((adapter), (addr), (length), (data))
+#define rtw_write8_async(adapter, addr, val) \
+ _rtw_write8_async((adapter), (addr), (val))
+#define rtw_write16_async(adapter, addr, val) \
+ _rtw_write16_async((adapter), (addr), (val))
+#define rtw_write32_async(adapter, addr, val) \
+ _rtw_write32_async((adapter), (addr), (val))
+#define rtw_write_mem(adapter, addr, cnt, mem) \
+ _rtw_write_mem((adapter), (addr), (cnt), (mem))
+#define rtw_write_port(adapter, addr, cnt, mem) \
+ _rtw_write_port((adapter), (addr), (cnt), (mem))
+#define rtw_write_port_and_wait(adapter, addr, cnt, mem, timeout_ms) \
+ _rtw_write_port_and_wait((adapter), (addr), (cnt), (mem), (timeout_ms))
+#define rtw_write_port_cancel(adapter) _rtw_write_port_cancel((adapter))
+
+void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
+
+/* ioreq */
+void ioreq_read8(struct adapter *adapter, u32 addr, u8 *pval);
+void ioreq_read16(struct adapter *adapter, u32 addr, u16 *pval);
+void ioreq_read32(struct adapter *adapter, u32 addr, u32 *pval);
+void ioreq_write8(struct adapter *adapter, u32 addr, u8 val);
+void ioreq_write16(struct adapter *adapter, u32 addr, u16 val);
+void ioreq_write32(struct adapter *adapter, u32 addr, u32 val);
+
+uint async_read8(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+uint async_read16(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+uint async_read32(struct adapter *adapter, u32 addr, u8 *pbuff,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+
+void async_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void async_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+void async_write8(struct adapter *adapter, u32 addr, u8 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+void async_write16(struct adapter *adapter, u32 addr, u16 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+void async_write32(struct adapter *adapter, u32 addr, u32 val,
+ void (*_async_io_callback)(struct adapter *padater,
+ struct io_req *pio_req,
+ u8 *cnxt), u8 *cnxt);
+
+void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+
+int rtw_init_io_priv(struct adapter *padapter,
+ void (*set_intf_ops)(struct _io_ops *pops));
+
+uint alloc_io_queue(struct adapter *adapter);
+void free_io_queue(struct adapter *adapter);
+void async_bus_io(struct io_queue *pio_q);
+void bus_sync_io(struct io_queue *pio_q);
+u32 _ioreq2rwmem(struct io_queue *pio_q);
+void dev_power_down(struct adapter * Adapter, u8 bpwrup);
+
+#define PlatformEFIOWrite1Byte(_a,_b,_c) \
+ rtw_write8(_a,_b,_c)
+#define PlatformEFIOWrite2Byte(_a,_b,_c) \
+ rtw_write16(_a,_b,_c)
+#define PlatformEFIOWrite4Byte(_a,_b,_c) \
+ rtw_write32(_a,_b,_c)
+
+#define PlatformEFIORead1Byte(_a,_b) \
+ rtw_read8(_a,_b)
+#define PlatformEFIORead2Byte(_a,_b) \
+ rtw_read16(_a,_b)
+#define PlatformEFIORead4Byte(_a,_b) \
+ rtw_read32(_a,_b)
+
+#endif /* _RTL8711_IO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
new file mode 100644
index 00000000000..8772d1d178c
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -0,0 +1,124 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_H_
+#define _RTW_IOCTL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#ifndef OID_802_11_CAPABILITY
+ #define OID_802_11_CAPABILITY 0x0d010122
+#endif
+
+#ifndef OID_802_11_PMKID
+ #define OID_802_11_PMKID 0x0d010123
+#endif
+
+
+/* For DDK-defined OIDs */
+#define OID_NDIS_SEG1 0x00010100
+#define OID_NDIS_SEG2 0x00010200
+#define OID_NDIS_SEG3 0x00020100
+#define OID_NDIS_SEG4 0x01010100
+#define OID_NDIS_SEG5 0x01020100
+#define OID_NDIS_SEG6 0x01020200
+#define OID_NDIS_SEG7 0xFD010100
+#define OID_NDIS_SEG8 0x0D010100
+#define OID_NDIS_SEG9 0x0D010200
+#define OID_NDIS_SEG10 0x0D020200
+
+#define SZ_OID_NDIS_SEG1 23
+#define SZ_OID_NDIS_SEG2 3
+#define SZ_OID_NDIS_SEG3 6
+#define SZ_OID_NDIS_SEG4 6
+#define SZ_OID_NDIS_SEG5 4
+#define SZ_OID_NDIS_SEG6 8
+#define SZ_OID_NDIS_SEG7 7
+#define SZ_OID_NDIS_SEG8 36
+#define SZ_OID_NDIS_SEG9 24
+#define SZ_OID_NDIS_SEG10 19
+
+/* For Realtek-defined OIDs */
+#define OID_MP_SEG1 0xFF871100
+#define OID_MP_SEG2 0xFF818000
+
+#define OID_MP_SEG3 0xFF818700
+#define OID_MP_SEG4 0xFF011100
+
+#define DEBUG_OID(dbg, str) \
+ if ((!dbg)) { \
+ RT_TRACE(_module_rtl871x_ioctl_c_, _drv_info_, \
+ ("%s(%d): %s", __func__, __line__, str)); \
+ }
+
+enum oid_type {
+ QUERY_OID,
+ SET_OID
+};
+
+struct oid_funs_node {
+ unsigned int oid_start; /* the starting number for OID */
+ unsigned int oid_end; /* the ending number for OID */
+ struct oid_obj_priv *node_array;
+ unsigned int array_sz; /* the size of node_array */
+ int query_counter; /* count the number of query hits for this segment */
+ int set_counter; /* count the number of set hits for this segment */
+};
+
+struct oid_par_priv {
+ void *adapter_context;
+ NDIS_OID oid;
+ void *information_buf;
+ u32 information_buf_len;
+ u32 *bytes_rw;
+ u32 *bytes_needed;
+ enum oid_type type_of_oid;
+ u32 dbg;
+};
+
+struct oid_obj_priv {
+ unsigned char dbg; /* 0: without OID debug message
+ * 1: with OID debug message */
+ int (*oidfuns)(struct oid_par_priv *poid_par_priv);
+};
+
+#if defined(_RTW_MP_IOCTL_C_)
+static int oid_null_function(struct oid_par_priv *poid_par_priv) {
+ _func_enter_;
+ _func_exit_;
+ return NDIS_STATUS_SUCCESS;
+}
+#endif
+
+extern struct iw_handler_def rtw_handlers_def;
+
+int drv_query_info(struct net_device *miniportadaptercontext, NDIS_OID oid,
+ void *informationbuffer, u32 informationbufferlength,
+ u32 *byteswritten, u32 *bytesneeded);
+
+int drv_set_info(struct net_device *MiniportAdapterContext,
+ NDIS_OID oid, void *informationbuffer,
+ u32 informationbufferlength, u32 *bytesread,
+ u32 *bytesneeded);
+
+extern int ui_pid[3];
+
+#endif /* #ifndef __INC_CEINFO_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
new file mode 100644
index 00000000000..8fa3858cb77
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -0,0 +1,79 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_IOCTL_RTL_H_
+#define _RTW_IOCTL_RTL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+/* oid_rtl_seg_01_01 ************** */
+int oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv);/* 84 */
+int oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_retry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_beacon_ok_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv);
+
+int oid_rt_pro_set_fw_dig_state_hdl(struct oid_par_priv *poid_par_priv);/* 8a */
+int oid_rt_pro_set_fw_ra_state_hdl(struct oid_par_priv *poid_par_priv); /* 8b */
+
+int oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv);/* 93 */
+int oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_channelplan_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_bcn_intvl_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_dedicate_probe_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_current_tx_power_level_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_enc_key_mismatch_count_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_hardware_radio_off_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_key_mismatch_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_supported_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_channel_list_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_wireless_mode_for_scan_list_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_get_bss_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_scan_with_magic_packet_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_01_03 section start ************** */
+int oid_rt_ap_get_associated_station_list_hdl(struct oid_par_priv *priv);
+int oid_rt_ap_switch_into_ap_mode_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_ap_supported_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_ap_set_passphrase_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_01_11 */
+int oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv);
+
+/* oid_rtl_seg_03_00 section start ************** */
+int oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv);
+int oid_rt_set_default_key_id_hdl(struct oid_par_priv *poid_par_priv);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
new file mode 100644
index 00000000000..49efb23747d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOCTL_SET_H_
+#define __RTW_IOCTL_SET_H_
+
+#include <drv_types.h>
+
+
+typedef u8 NDIS_802_11_PMKID_VALUE[16];
+
+u8 rtw_set_802_11_add_key(struct adapter *adapt, struct ndis_802_11_key *key);
+u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
+ enum ndis_802_11_auth_mode authmode);
+u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid);
+u8 rtw_set_802_11_add_wep(struct adapter *adapter, struct ndis_802_11_wep *wep);
+u8 rtw_set_802_11_disassociate(struct adapter *adapter);
+u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter,
+ struct ndis_802_11_ssid *pssid,
+ int ssid_max_num);
+u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
+ enum ndis_802_11_network_infra type);
+u8 rtw_set_802_11_remove_wep(struct adapter *adapter, u32 keyindex);
+u8 rtw_set_802_11_ssid(struct adapter *adapt, struct ndis_802_11_ssid *ssid);
+u8 rtw_set_802_11_remove_key(struct adapter *adapt,
+ struct ndis_802_11_remove_key *key);
+u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid);
+u16 rtw_get_cur_max_rate(struct adapter *adapter);
+int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode);
+int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan);
+int rtw_set_country(struct adapter *adapter, const char *country_code);
+int rtw_change_ifname(struct adapter *padapter, const char *ifname);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
new file mode 100644
index 00000000000..6949922baa6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -0,0 +1,84 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_IOL_H_
+#define __RTW_IOL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define IOREG_CMD_END_LEN 4
+
+struct ioreg_cfg {
+ u8 length;
+ u8 cmd_id;
+ __le16 address;
+ __le32 data;
+ __le32 mask;
+};
+
+enum ioreg_cmd {
+ IOREG_CMD_LLT = 0x01,
+ IOREG_CMD_REFUSE = 0x02,
+ IOREG_CMD_EFUSE_PATH = 0x03,
+ IOREG_CMD_WB_REG = 0x04,
+ IOREG_CMD_WW_REG = 0x05,
+ IOREG_CMD_WD_REG = 0x06,
+ IOREG_CMD_W_RF = 0x07,
+ IOREG_CMD_DELAY_US = 0x10,
+ IOREG_CMD_DELAY_MS = 0x11,
+ IOREG_CMD_END = 0xFF,
+};
+
+struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter);
+int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds,
+ u32 cmd_len);
+int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary);
+int rtw_IOL_exec_cmds_sync(struct adapter *adapter,
+ struct xmit_frame *xmit_frame, u32 max_wating_ms,
+ u32 bndy_cnt);
+bool rtw_IOL_applied(struct adapter *adapter);
+int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us);
+int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms);
+int rtw_IOL_append_END_cmd(struct xmit_frame *xmit_frame);
+
+void read_efuse_from_txpktbuf(struct adapter *adapter, int bcnhead,
+ u8 *content, u16 *size);
+
+int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u8 value, u8 mask);
+int _rtw_IOL_append_WW_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u16 value, u16 mask);
+int _rtw_IOL_append_WD_cmd(struct xmit_frame *xmit_frame, u16 addr,
+ u32 value, u32 mask);
+int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
+ u16 addr, u32 value, u32 mask);
+#define rtw_IOL_append_WB_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WB_cmd((xmit_frame), (addr), (value) ,(mask))
+#define rtw_IOL_append_WW_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WW_cmd((xmit_frame), (addr), (value),(mask))
+#define rtw_IOL_append_WD_cmd(xmit_frame, addr, value, mask) \
+ _rtw_IOL_append_WD_cmd((xmit_frame), (addr), (value), (mask))
+#define rtw_IOL_append_WRF_cmd(xmit_frame, rf_path, addr, value, mask) \
+ _rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
+
+u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
+void rtw_IOL_cmd_buf_dump(struct adapter *Adapter,int buf_len,u8 *pbuf);
+
+#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_led.h b/drivers/staging/rtl8188eu/include/rtw_led.h
new file mode 100644
index 00000000000..2e618043d35
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_led.h
@@ -0,0 +1,197 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_LED_H_
+#define __RTW_LED_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MSECS(t) (HZ * ((t) / 1000) + (HZ * ((t) % 1000)) / 1000)
+
+#define LED_BLINK_NORMAL_INTERVAL 100
+#define LED_BLINK_SLOWLY_INTERVAL 200
+#define LED_BLINK_LONG_INTERVAL 400
+
+#define LED_BLINK_NO_LINK_INTERVAL_ALPHA 1000
+#define LED_BLINK_LINK_INTERVAL_ALPHA 500 /* 500 */
+#define LED_BLINK_SCAN_INTERVAL_ALPHA 180 /* 150 */
+#define LED_BLINK_FASTER_INTERVAL_ALPHA 50
+#define LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA 5000
+
+#define LED_BLINK_NORMAL_INTERVAL_NETTRONIX 100
+#define LED_BLINK_SLOWLY_INTERVAL_NETTRONIX 2000
+
+#define LED_BLINK_SLOWLY_INTERVAL_PORNET 1000
+#define LED_BLINK_NORMAL_INTERVAL_PORNET 100
+
+#define LED_BLINK_FAST_INTERVAL_BITLAND 30
+
+/* 060403, rcnjko: Customized for AzWave. */
+#define LED_CM2_BLINK_ON_INTERVAL 250
+#define LED_CM2_BLINK_OFF_INTERVAL 4750
+
+#define LED_CM8_BLINK_INTERVAL 500 /* for QMI */
+#define LED_CM8_BLINK_OFF_INTERVAL 3750 /* for QMI */
+
+/* 080124, lanhsin: Customized for RunTop */
+#define LED_RunTop_BLINK_INTERVAL 300
+
+/* 060421, rcnjko: Customized for Sercomm Printer Server case. */
+#define LED_CM3_BLINK_INTERVAL 1500
+
+enum LED_CTL_MODE {
+ LED_CTL_POWER_ON = 1,
+ LED_CTL_LINK = 2,
+ LED_CTL_NO_LINK = 3,
+ LED_CTL_TX = 4,
+ LED_CTL_RX = 5,
+ LED_CTL_SITE_SURVEY = 6,
+ LED_CTL_POWER_OFF = 7,
+ LED_CTL_START_TO_LINK = 8,
+ LED_CTL_START_WPS = 9,
+ LED_CTL_STOP_WPS = 10,
+ LED_CTL_START_WPS_BOTTON = 11, /* added for runtop */
+ LED_CTL_STOP_WPS_FAIL = 12, /* added for ALPHA */
+ LED_CTL_STOP_WPS_FAIL_OVERLAP = 13, /* added for BELKIN */
+ LED_CTL_CONNECTION_NO_TRANSFER = 14,
+};
+
+enum LED_STATE_871x {
+ LED_UNKNOWN = 0,
+ RTW_LED_ON = 1,
+ RTW_LED_OFF = 2,
+ LED_BLINK_NORMAL = 3,
+ LED_BLINK_SLOWLY = 4,
+ LED_BLINK_POWER_ON = 5,
+ LED_BLINK_SCAN = 6, /* LED is blinking during scanning period,
+ * the # of times to blink is depend on time
+ * for scanning. */
+ LED_BLINK_NO_LINK = 7, /* LED is blinking during no link state. */
+ LED_BLINK_StartToBlink = 8,/* Customzied for Sercomm Printer
+ * Server case */
+ LED_BLINK_TXRX = 9,
+ LED_BLINK_WPS = 10, /* LED is blinkg during WPS communication */
+ LED_BLINK_WPS_STOP = 11, /* for ALPHA */
+ LED_BLINK_WPS_STOP_OVERLAP = 12, /* for BELKIN */
+ LED_BLINK_RUNTOP = 13, /* Customized for RunTop */
+ LED_BLINK_CAMEO = 14,
+ LED_BLINK_XAVI = 15,
+ LED_BLINK_ALWAYS_ON = 16,
+};
+
+enum LED_PIN_871x {
+ LED_PIN_NULL = 0,
+ LED_PIN_LED0 = 1,
+ LED_PIN_LED1 = 2,
+ LED_PIN_LED2 = 3,
+ LED_PIN_GPIO0 = 4,
+};
+
+struct LED_871x {
+ struct adapter *padapter;
+
+ enum LED_PIN_871x LedPin; /* Identify how to implement this
+ * SW led. */
+ enum LED_STATE_871x CurrLedState; /* Current LED state. */
+ enum LED_STATE_871x BlinkingLedState; /* Next state for blinking,
+ * either RTW_LED_ON or RTW_LED_OFF are. */
+
+ u8 bLedOn; /* true if LED is ON, false if LED is OFF. */
+
+ u8 bLedBlinkInProgress; /* true if it is blinking, false o.w.. */
+
+ u8 bLedWPSBlinkInProgress;
+
+ u32 BlinkTimes; /* Number of times to toggle led state for blinking. */
+
+ struct timer_list BlinkTimer; /* Timer object for led blinking. */
+
+ u8 bSWLedCtrl;
+
+ /* ALPHA, added by chiyoko, 20090106 */
+ u8 bLedNoLinkBlinkInProgress;
+ u8 bLedLinkBlinkInProgress;
+ u8 bLedStartToLinkBlinkInProgress;
+ u8 bLedScanBlinkInProgress;
+ struct work_struct BlinkWorkItem; /* Workitem used by BlinkTimer to
+ * manipulate H/W to blink LED. */
+};
+
+#define IS_LED_WPS_BLINKING(_LED_871x) \
+ (((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS || \
+ ((struct LED_871x *)_LED_871x)->CurrLedState == LED_BLINK_WPS_STOP || \
+ ((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress)
+
+#define IS_LED_BLINKING(_LED_871x) \
+ (((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress || \
+ ((struct LED_871x *)_LED_871x)->bLedScanBlinkInProgress)
+
+/* LED customization. */
+
+enum LED_STRATEGY_871x {
+ SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option.*/
+ SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
+ SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave
+ * 8187 minicard. */
+ SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm
+ * Printer Server case. */
+ SW_LED_MODE4 = 4, /* for Edimax / Belkin */
+ SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
+ SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
+ HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4
+ * different control modes, see MAC.CONFIG1 for details.)*/
+ LED_ST_NONE = 99,
+};
+
+void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
+
+struct led_priv{
+ /* add for led controll */
+ struct LED_871x SwLed0;
+ struct LED_871x SwLed1;
+ enum LED_STRATEGY_871x LedStrategy;
+ u8 bRegUseLed;
+ void (*LedControlHandler)(struct adapter *padapter,
+ enum LED_CTL_MODE LedAction);
+ /* add for led controll */
+};
+
+#define rtw_led_control(adapt, action) \
+ do { \
+ if ((adapt)->ledpriv.LedControlHandler) \
+ (adapt)->ledpriv.LedControlHandler((adapt), (action)); \
+ } while (0)
+
+void BlinkTimerCallback(void *data);
+void BlinkWorkItemCallback(struct work_struct *work);
+
+void ResetLedStatus(struct LED_871x * pLed);
+
+void InitLed871x(struct adapter *padapter, struct LED_871x *pLed,
+ enum LED_PIN_871x LedPin);
+
+void DeInitLed871x(struct LED_871x *pLed);
+
+/* hal... */
+void BlinkHandler(struct LED_871x * pLed);
+void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
+void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
+
+#endif /* __RTW_LED_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
new file mode 100644
index 00000000000..22538e61695
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -0,0 +1,655 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_H_
+#define __RTW_MLME_H_
+
+#include <osdep_service.h>
+#include <mlme_osdep.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+#define MAX_BSS_CNT 128
+#define MAX_JOIN_TIMEOUT 6500
+
+/* Increase the scanning timeout because of increasing the SURVEY_TO value. */
+
+#define SCANNING_TIMEOUT 8000
+
+#define SCAN_INTERVAL (30) /* unit:2sec, 30*2=60sec */
+
+#define SCANQUEUE_LIFETIME 20 /* unit:sec */
+
+#define WIFI_NULL_STATE 0x00000000
+
+#define WIFI_ASOC_STATE 0x00000001 /* Under Linked state */
+#define WIFI_REASOC_STATE 0x00000002
+#define WIFI_SLEEP_STATE 0x00000004
+#define WIFI_STATION_STATE 0x00000008
+
+#define WIFI_AP_STATE 0x00000010
+#define WIFI_ADHOC_STATE 0x00000020
+#define WIFI_ADHOC_MASTER_STATE 0x00000040
+#define WIFI_UNDER_LINKING 0x00000080
+
+#define WIFI_UNDER_WPS 0x00000100
+#define WIFI_STA_ALIVE_CHK_STATE 0x00000400
+#define WIFI_SITE_MONITOR 0x00000800 /* to indicate the station is under site surveying */
+
+#define WIFI_MP_STATE 0x00010000
+#define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in continous tx background */
+#define WIFI_MP_CTX_ST 0x00040000 /* in continous tx with single-tone */
+#define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in continous tx background due to out of skb */
+#define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continous tx */
+#define WIFI_MP_CTX_CCK_CS 0x00200000 /* in continous tx with carrier suppression */
+#define WIFI_MP_LPBK_STATE 0x00400000
+
+#define _FW_UNDER_LINKING WIFI_UNDER_LINKING
+#define _FW_LINKED WIFI_ASOC_STATE
+#define _FW_UNDER_SURVEY WIFI_SITE_MONITOR
+
+enum dot11AuthAlgrthmNum {
+ dot11AuthAlgrthm_Open = 0,
+ dot11AuthAlgrthm_Shared,
+ dot11AuthAlgrthm_8021X,
+ dot11AuthAlgrthm_Auto,
+ dot11AuthAlgrthm_WAPI,
+ dot11AuthAlgrthm_MaxNum
+};
+
+/* Scan type including active and passive scan. */
+enum rt_scan_type {
+ SCAN_PASSIVE,
+ SCAN_ACTIVE,
+ SCAN_MIX,
+};
+
+enum SCAN_RESULT_TYPE {
+ SCAN_RESULT_P2P_ONLY = 0, /* Will return all the P2P devices. */
+ SCAN_RESULT_ALL = 1, /* Will return all the scanned device,
+ * include AP. */
+ SCAN_RESULT_WFD_TYPE = 2 /* Will just return the correct WFD
+ * device. */
+ /* If this device is Miracast sink
+ * device, it will just return all the
+ * Miracast source devices. */
+};
+
+/*
+there are several "locks" in mlme_priv,
+since mlme_priv is a shared resource between many threads,
+like ISR/Call-Back functions, the OID handlers, and even timer functions.
+
+Each _queue has its own locks, already.
+Other items are protected by mlme_priv.lock.
+
+To avoid possible dead lock, any thread trying to modifiying mlme_priv
+SHALL not lock up more than one lock at a time!
+*/
+
+#define traffic_threshold 10
+#define traffic_scan_period 500
+
+struct sitesurvey_ctrl {
+ u64 last_tx_pkts;
+ uint last_rx_pkts;
+ int traffic_busy;
+ struct timer_list sitesurvey_ctrl_timer;
+};
+
+struct rt_link_detect {
+ u32 NumTxOkInPeriod;
+ u32 NumRxOkInPeriod;
+ u32 NumRxUnicastOkInPeriod;
+ bool bBusyTraffic;
+ bool bTxBusyTraffic;
+ bool bRxBusyTraffic;
+ bool bHigherBusyTraffic; /* For interrupt migration purpose. */
+ bool bHigherBusyRxTraffic; /* We may disable Tx interrupt according
+ * to Rx traffic. */
+ bool bHigherBusyTxTraffic; /* We may disable Tx interrupt according
+ * to Tx traffic. */
+};
+
+struct profile_info {
+ u8 ssidlen;
+ u8 ssid[ WLAN_SSID_MAXLEN ];
+ u8 peermac[ ETH_ALEN ];
+};
+
+struct tx_invite_req_info {
+ u8 token;
+ u8 benable;
+ u8 go_ssid[ WLAN_SSID_MAXLEN ];
+ u8 ssidlen;
+ u8 go_bssid[ ETH_ALEN ];
+ u8 peer_macaddr[ ETH_ALEN ];
+ u8 operating_ch; /* This information will be set by using the
+ * p2p_set op_ch=x */
+ u8 peer_ch; /* The listen channel for peer P2P device */
+};
+
+struct tx_invite_resp_info {
+ u8 token; /* Used to record the dialog token of p2p invitation
+ * request frame. */
+};
+
+struct tx_provdisc_req_info {
+ u16 wps_config_method_request; /* Used when sending the
+ * provisioning request frame*/
+ u16 peer_channel_num[2]; /* The channel number which the
+ * receiver stands. */
+ struct ndis_802_11_ssid ssid;
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 peerIFAddr[ETH_ALEN]; /* Peer interface address */
+ u8 benable; /* This provision discovery
+ * request frame is trigger
+ * to send or not */
+};
+
+/* When peer device issue prov_disc_req first, we should store the following
+ * information */
+/* The UI must know this information to know which config method the
+ * remote p2p device needs. */
+struct rx_provdisc_req_info {
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 strconfig_method_desc_of_prov_disc_req[4]; /* description
+ * for the config method located in the provisioning
+ * discovery request frame. */
+};
+
+struct tx_nego_req_info {
+ u16 peer_channel_num[2]; /* The channel number. */
+ u8 peerDevAddr[ETH_ALEN]; /* Peer device address */
+ u8 benable; /* This negotiation request frame is
+ * trigger to send or not */
+};
+
+struct group_id_info {
+ u8 go_device_addr[ ETH_ALEN ]; /* The GO's device address of
+ * this P2P group */
+ u8 ssid[ WLAN_SSID_MAXLEN ]; /* The SSID of this P2P group */
+};
+
+struct scan_limit_info {
+ u8 scan_op_ch_only; /* When this flag is set, the driver
+ * should only scan the op. channel */
+ u8 operation_ch[2]; /* Store the op. chan of invitation */
+};
+
+struct wifidirect_info {
+ struct adapter *padapter;
+ struct timer_list find_phase_timer;
+ struct timer_list restore_p2p_state_timer;
+
+ /* Used to do the scanning. After confirming the peer is availalble,
+ * the driver transmits the P2P frame to peer. */
+ struct timer_list pre_tx_scan_timer;
+ struct timer_list reset_ch_sitesurvey;
+ struct timer_list reset_ch_sitesurvey2; /* Just for resetting the scan
+ * limit function by using p2p nego */
+ struct tx_provdisc_req_info tx_prov_disc_info;
+ struct rx_provdisc_req_info rx_prov_disc_info;
+ struct tx_invite_req_info invitereq_info;
+ /* Store the profile information of persistent group */
+ struct profile_info profileinfo[P2P_MAX_PERSISTENT_GROUP_NUM];
+ struct tx_invite_resp_info inviteresp_info;
+ struct tx_nego_req_info nego_req_info;
+ /* Store the group id info when doing the group negot handshake. */
+ struct group_id_info groupid_info;
+ /* Used for get the limit scan channel from the Invitation procedure */
+ struct scan_limit_info rx_invitereq_info;
+ /* Used for get the limit scan chan from the P2P negotiation handshake*/
+ struct scan_limit_info p2p_info;
+ enum P2P_ROLE role;
+ enum P2P_STATE pre_p2p_state;
+ enum P2P_STATE p2p_state;
+ /* The device address should be the mac address of this device. */
+ u8 device_addr[ETH_ALEN];
+ u8 interface_addr[ETH_ALEN];
+ u8 social_chan[4];
+ u8 listen_channel;
+ u8 operating_channel;
+ u8 listen_dwell; /* This value should be between 1 and 3 */
+ u8 support_rate[8];
+ u8 p2p_wildcard_ssid[P2P_WILDCARD_SSID_LEN];
+ u8 intent; /* should only include the intent value. */
+ u8 p2p_peer_interface_addr[ETH_ALEN];
+ u8 p2p_peer_device_addr[ETH_ALEN];
+ u8 peer_intent; /* Included the intent value and tie breaker value. */
+ /* Device name for displaying on searching device screen */
+ u8 device_name[WPS_MAX_DEVICE_NAME_LEN];
+ u8 device_name_len;
+ u8 profileindex; /* Used to point to the index of profileinfo array */
+ u8 peer_operating_ch;
+ u8 find_phase_state_exchange_cnt;
+ /* The device password ID for group negotation */
+ u16 device_password_id_for_nego;
+ u8 negotiation_dialog_token;
+ /* SSID information for group negotitation */
+ u8 nego_ssid[WLAN_SSID_MAXLEN];
+ u8 nego_ssidlen;
+ u8 p2p_group_ssid[WLAN_SSID_MAXLEN];
+ u8 p2p_group_ssid_len;
+ /* Flag to know if the persistent function should be supported or not.*/
+ u8 persistent_supported;
+ /* In the Sigma test, the Sigma will provide this enable from the
+ * sta_set_p2p CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 session_available; /* Flag to set the WFD session available to
+ * enable or disable "by Sigma" */
+ /* In the Sigma test, the Sigma will disable the session available
+ * by using the sta_preset CAPI. */
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_enable; /* Flag to enable or disable the TDLS by WFD Sigma*/
+ /* 0: disable */
+ /* 1: enable */
+ u8 wfd_tdls_weaksec; /* Flag to enable or disable the weak security
+ * function for TDLS by WFD Sigma */
+ /* 0: disable */
+ /* In this case, the driver can't issue the tdsl
+ * setup request frame. */
+ /* 1: enable */
+ /* In this case, the driver can issue the tdls
+ * setup request frame */
+ /* even the current security is weak security. */
+
+ /* This field will store the WPS value (PIN value or PBC) that UI had
+ * got from the user. */
+ enum P2P_WPSINFO ui_got_wps_info;
+ u16 supported_wps_cm; /* This field describes the WPS config method
+ * which this driver supported. */
+ /* The value should be the combination of config
+ * method defined in page104 of WPS v2.0 spec.*/
+ /* This field will contain the length of body of P2P Channel List
+ * attribute of group negotiation response frame. */
+ uint channel_list_attr_len;
+ /* This field will contain the body of P2P Channel List attribute of
+ * group negotitation response frame. */
+ /* We will use the channel_cnt and channel_list fields when constructing
+ * the group negotiation confirm frame. */
+ u8 channel_list_attr[100];
+ enum P2P_PS_MODE p2p_ps_mode; /* indicate p2p ps mode */
+ enum P2P_PS_STATE p2p_ps_state; /* indicate p2p ps state */
+ u8 noa_index; /* Identifies and instance of Notice of Absence timing. */
+ u8 ctwindow; /* Client traffic window. A period of time in TU after TBTT. */
+ u8 opp_ps; /* opportunistic power save. */
+ u8 noa_num; /* number of NoA descriptor in P2P IE. */
+ u8 noa_count[P2P_MAX_NOA_NUM]; /* Count for owner, Type of client. */
+ /* Max duration for owner, preferred or min acceptable duration for
+ * client. */
+ u32 noa_duration[P2P_MAX_NOA_NUM];
+ /* Length of interval for owner, preferred or max acceptable interval
+ * of client. */
+ u32 noa_interval[P2P_MAX_NOA_NUM];
+ /* schedule expressed in terms of the lower 4 bytes of the TSF timer. */
+ u32 noa_start_time[P2P_MAX_NOA_NUM];
+};
+
+struct tdls_ss_record { /* signal strength record */
+ u8 macaddr[ETH_ALEN];
+ u8 RxPWDBAll;
+ u8 is_tdls_sta; /* true: direct link sta, false: else */
+};
+
+struct tdls_info {
+ u8 ap_prohibited;
+ uint setup_state;
+ u8 sta_cnt;
+ u8 sta_maximum; /* 1:tdls sta is equal (NUM_STA-1), reach max direct link number; 0: else; */
+ struct tdls_ss_record ss_record;
+ u8 macid_index; /* macid entry that is ready to write */
+ u8 clear_cam; /* cam entry that is trying to clear, using it in direct link teardown */
+ u8 ch_sensing;
+ u8 cur_channel;
+ u8 candidate_ch;
+ u8 collect_pkt_num[MAX_CHANNEL_NUM];
+ spinlock_t cmd_lock;
+ spinlock_t hdl_lock;
+ u8 watchdog_count;
+ u8 dev_discovered; /* WFD_TDLS: for sigma test */
+ u8 enable;
+};
+
+struct mlme_priv {
+ spinlock_t lock;
+ int fw_state; /* shall we protect this variable? maybe not necessarily... */
+ u8 bScanInProcess;
+ u8 to_join; /* flag */
+ u8 to_roaming; /* roaming trying times */
+
+ u8 *nic_hdl;
+
+ u8 not_indic_disco;
+ struct list_head *pscanned;
+ struct __queue free_bss_pool;
+ struct __queue scanned_queue;
+ u8 *free_bss_buf;
+ u32 num_of_scanned;
+
+ struct ndis_802_11_ssid assoc_ssid;
+ u8 assoc_bssid[6];
+
+ struct wlan_network cur_network;
+
+ u32 scan_interval;
+
+ struct timer_list assoc_timer;
+
+ uint assoc_by_bssid;
+ uint assoc_by_rssi;
+
+ struct timer_list scan_to_timer; /* driver itself handles scan_timeout status. */
+ u32 scan_start_time; /* used to evaluate the time spent in scanning */
+
+ struct qos_priv qospriv;
+
+ /* Number of non-HT AP/stations */
+ int num_sta_no_ht;
+
+ /* Number of HT AP/stations 20 MHz */
+ /* int num_sta_ht_20mhz; */
+
+ int num_FortyMHzIntolerant;
+ struct ht_priv htpriv;
+ struct rt_link_detect LinkDetectInfo;
+ struct timer_list dynamic_chk_timer; /* dynamic/periodic check timer */
+
+ u8 key_mask; /* use for ips to set wep key after ips_leave */
+ u8 acm_mask; /* for wmm acm mask */
+ u8 ChannelPlan;
+ enum rt_scan_type scan_mode; /* active: 1, passive: 0 */
+
+ /* u8 probereq_wpsie[MAX_WPS_IE_LEN];added in probe req */
+ /* int probereq_wpsie_len; */
+ u8 *wps_probe_req_ie;
+ u32 wps_probe_req_ie_len;
+
+ u8 *assoc_req;
+ u32 assoc_req_len;
+ u8 *assoc_rsp;
+ u32 assoc_rsp_len;
+
+#if defined (CONFIG_88EU_AP_MODE)
+ /* Number of associated Non-ERP stations (i.e., stations using 802.11b
+ * in 802.11g BSS) */
+ int num_sta_non_erp;
+
+ /* Number of associated stations that do not support Short Slot Time */
+ int num_sta_no_short_slot_time;
+
+ /* Number of associated stations that do not support Short Preamble */
+ int num_sta_no_short_preamble;
+
+ int olbc; /* Overlapping Legacy BSS Condition */
+
+ /* Number of HT assoc sta that do not support greenfield */
+ int num_sta_ht_no_gf;
+
+ /* Number of associated non-HT stations */
+ /* int num_sta_no_ht; */
+
+ /* Number of HT associated stations 20 MHz */
+ int num_sta_ht_20mhz;
+
+ /* Overlapping BSS information */
+ int olbc_ht;
+
+ u16 ht_op_mode;
+
+ u8 *wps_beacon_ie;
+ /* u8 *wps_probe_req_ie; */
+ u8 *wps_probe_resp_ie;
+ u8 *wps_assoc_resp_ie;
+
+ u32 wps_beacon_ie_len;
+ u32 wps_probe_resp_ie_len;
+ u32 wps_assoc_resp_ie_len;
+
+ u8 *p2p_beacon_ie;
+ u8 *p2p_probe_req_ie;
+ u8 *p2p_probe_resp_ie;
+ u8 *p2p_go_probe_resp_ie; /* for GO */
+ u8 *p2p_assoc_req_ie;
+
+ u32 p2p_beacon_ie_len;
+ u32 p2p_probe_req_ie_len;
+ u32 p2p_probe_resp_ie_len;
+ u32 p2p_go_probe_resp_ie_len; /* for GO */
+ u32 p2p_assoc_req_ie_len;
+ spinlock_t bcn_update_lock;
+ u8 update_bcn;
+#endif /* if defined (CONFIG_88EU_AP_MODE) */
+};
+
+#ifdef CONFIG_88EU_AP_MODE
+
+struct hostapd_priv {
+ struct adapter *padapter;
+};
+
+int hostapd_mode_init(struct adapter *padapter);
+void hostapd_mode_unload(struct adapter *padapter);
+#endif
+
+extern unsigned char WPA_TKIP_CIPHER[4];
+extern unsigned char RSN_TKIP_CIPHER[4];
+extern unsigned char REALTEK_96B_IE[];
+extern unsigned char MCS_rate_2R[16];
+extern unsigned char MCS_rate_1R[16];
+
+void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf);
+void rtw_survey_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_surveydone_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_atimdone_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_cpwm_event_callback(struct adapter *adapter, u8 *pbuf);
+void indicate_wx_scan_complete_event(struct adapter *padapter);
+void rtw_indicate_wx_assoc_event(struct adapter *padapter);
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter);
+int event_thread(void *context);
+void rtw_join_timeout_handler(void *FunctionContext);
+void _rtw_scan_timeout_handler(void *FunctionContext);
+void rtw_free_network_queue(struct adapter *adapter, u8 isfreeall);
+int rtw_init_mlme_priv(struct adapter *adapter);
+void rtw_free_mlme_priv (struct mlme_priv *pmlmepriv);
+int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv);
+int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv,
+ int keyid, u8 set_tx);
+int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv);
+
+static inline u8 *get_bssid(struct mlme_priv *pmlmepriv)
+{ /* if sta_mode:pmlmepriv->cur_network.network.MacAddress=> bssid */
+ /* if adhoc_mode:pmlmepriv->cur_network.network.MacAddress=> ibss mac address */
+ return pmlmepriv->cur_network.network.MacAddress;
+}
+
+static inline int check_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ if (pmlmepriv->fw_state & state)
+ return true;
+
+ return false;
+}
+
+static inline int get_fwstate(struct mlme_priv *pmlmepriv)
+{
+ return pmlmepriv->fw_state;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ *
+ * ### NOTE:#### (!!!!)
+ * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock
+ */
+static inline void set_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state |= state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY==state)
+ pmlmepriv->bScanInProcess = true;
+}
+
+static inline void _clr_fwstate_(struct mlme_priv *pmlmepriv, int state)
+{
+ pmlmepriv->fw_state &= ~state;
+ /* FOR HW integration */
+ if (_FW_UNDER_SURVEY==state)
+ pmlmepriv->bScanInProcess = false;
+}
+
+/*
+ * No Limit on the calling context,
+ * therefore set it to be the critical section...
+ */
+static inline void clr_fwstate(struct mlme_priv *pmlmepriv, int state)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ if (check_fwstate(pmlmepriv, state) == true)
+ pmlmepriv->fw_state ^= state;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void clr_fwstate_ex(struct mlme_priv *pmlmepriv, int state)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ _clr_fwstate_(pmlmepriv, state);
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void up_scanned_network(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned++;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void down_scanned_network(struct mlme_priv *pmlmepriv)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned--;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+static inline void set_scanned_network_val(struct mlme_priv *pmlmepriv, int val)
+{
+ unsigned long irql;
+
+ _enter_critical_bh(&pmlmepriv->lock, &irql);
+ pmlmepriv->num_of_scanned = val;
+ _exit_critical_bh(&pmlmepriv->lock, &irql);
+}
+
+u16 rtw_get_capability(struct wlan_bssid_ex *bss);
+void rtw_update_scanned_network(struct adapter *adapter,
+ struct wlan_bssid_ex *target);
+void rtw_disconnect_hdl_under_linked(struct adapter *adapter,
+ struct sta_info *psta, u8 free_assoc);
+void rtw_generate_random_ibss(u8 *pibss);
+struct wlan_network *rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue);
+
+void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
+void rtw_indicate_disconnect(struct adapter *adapter);
+void rtw_indicate_connect(struct adapter *adapter);
+void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
+void rtw_scan_abort(struct adapter *adapter);
+
+int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len);
+int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
+ uint in_len, uint initial_out_len);
+void rtw_init_registrypriv_dev_network(struct adapter *adapter);
+
+void rtw_update_registrypriv_dev_network(struct adapter *adapter);
+
+void rtw_get_encrypt_decrypt_from_registrypriv(struct adapter *adapter);
+
+void _rtw_join_timeout_handler(struct adapter *adapter);
+void rtw_scan_timeout_handler(struct adapter *adapter);
+
+ void rtw_dynamic_check_timer_handlder(struct adapter *adapter);
+#define rtw_is_scan_deny(adapter) false
+#define rtw_clear_scan_deny(adapter) do {} while (0)
+#define rtw_set_scan_deny_timer_hdl(adapter) do {} while (0)
+#define rtw_set_scan_deny(adapter, ms) do {} while (0)
+
+
+int _rtw_init_mlme_priv(struct adapter *padapter);
+
+void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
+
+void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
+
+int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork);
+
+struct wlan_network *_rtw_dequeue_network(struct __queue *queue);
+
+ struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
+
+
+void _rtw_free_network(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork, u8 isfreeall);
+void _rtw_free_network_nolock(struct mlme_priv *pmlmepriv,
+ struct wlan_network *pnetwork);
+
+
+struct wlan_network* _rtw_find_network(struct __queue *scanned_queue, u8 *addr);
+
+void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
+
+int rtw_if_up(struct adapter *padapter);
+
+
+u8 *rtw_get_capability_from_ie(u8 *ie);
+u8 *rtw_get_timestampe_from_ie(u8 *ie);
+u8 *rtw_get_beacon_interval_from_ie(u8 *ie);
+
+
+void rtw_joinbss_reset(struct adapter *padapter);
+
+unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie,
+ u8 *out_ie, uint in_len, uint *pout_len);
+void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len);
+void rtw_issue_addbareq_cmd(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+
+int rtw_is_same_ibss(struct adapter *adapter, struct wlan_network *pnetwork);
+int is_same_network(struct wlan_bssid_ex *src, struct wlan_bssid_ex *dst);
+
+void rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
+void _rtw_roaming(struct adapter *padapter, struct wlan_network *tgt_network);
+
+void rtw_stassoc_hw_rpt(struct adapter *adapter,struct sta_info *psta);
+
+#endif /* __RTL871X_MLME_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
new file mode 100644
index 00000000000..a96b018e5e6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -0,0 +1,877 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_MLME_EXT_H_
+#define __RTW_MLME_EXT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+
+
+/* Commented by Albert 20101105 */
+/* Increase the SURVEY_TO value from 100 to 150 ( 100ms to 150ms ) */
+/* The Realtek 8188CE SoftAP will spend around 100ms to send the probe response after receiving the probe request. */
+/* So, this driver tried to extend the dwell time for each scanning channel. */
+/* This will increase the chance to receive the probe response from SoftAP. */
+
+#define SURVEY_TO (100)
+#define REAUTH_TO (300) /* 50) */
+#define REASSOC_TO (300) /* 50) */
+/* define DISCONNECT_TO (3000) */
+#define ADDBA_TO (2000)
+
+#define LINKED_TO (1) /* unit:2 sec, 1x2=2 sec */
+
+#define REAUTH_LIMIT (4)
+#define REASSOC_LIMIT (4)
+#define READDBA_LIMIT (2)
+
+#define ROAMING_LIMIT 8
+
+#define DYNAMIC_FUNC_DISABLE (0x0)
+
+/* ====== ODM_ABILITY_E ======== */
+/* BB ODM section BIT 0-15 */
+#define DYNAMIC_BB_DIG BIT(0)
+#define DYNAMIC_BB_RA_MASK BIT(1)
+#define DYNAMIC_BB_DYNAMIC_TXPWR BIT(2)
+#define DYNAMIC_BB_BB_FA_CNT BIT(3)
+
+#define DYNAMIC_BB_RSSI_MONITOR BIT(4)
+#define DYNAMIC_BB_CCK_PD BIT(5)
+#define DYNAMIC_BB_ANT_DIV BIT(6)
+#define DYNAMIC_BB_PWR_SAVE BIT(7)
+#define DYNAMIC_BB_PWR_TRA BIT(8)
+#define DYNAMIC_BB_RATE_ADAPTIVE BIT(9)
+#define DYNAMIC_BB_PATH_DIV BIT(10)
+#define DYNAMIC_BB_PSD BIT(11)
+
+/* MAC DM section BIT 16-23 */
+#define DYNAMIC_MAC_EDCA_TURBO BIT(16)
+#define DYNAMIC_MAC_EARLY_MODE BIT(17)
+
+/* RF ODM section BIT 24-31 */
+#define DYNAMIC_RF_TX_PWR_TRACK BIT(24)
+#define DYNAMIC_RF_RX_GAIN_TRACK BIT(25)
+#define DYNAMIC_RF_CALIBRATION BIT(26)
+
+#define DYNAMIC_ALL_FUNC_ENABLE 0xFFFFFFF
+
+#define _HW_STATE_NOLINK_ 0x00
+#define _HW_STATE_ADHOC_ 0x01
+#define _HW_STATE_STATION_ 0x02
+#define _HW_STATE_AP_ 0x03
+
+
+#define _1M_RATE_ 0
+#define _2M_RATE_ 1
+#define _5M_RATE_ 2
+#define _11M_RATE_ 3
+#define _6M_RATE_ 4
+#define _9M_RATE_ 5
+#define _12M_RATE_ 6
+#define _18M_RATE_ 7
+#define _24M_RATE_ 8
+#define _36M_RATE_ 9
+#define _48M_RATE_ 10
+#define _54M_RATE_ 11
+
+
+extern unsigned char RTW_WPA_OUI[];
+extern unsigned char WMM_OUI[];
+extern unsigned char WPS_OUI[];
+extern unsigned char WFD_OUI[];
+extern unsigned char P2P_OUI[];
+
+extern unsigned char WMM_INFO_OUI[];
+extern unsigned char WMM_PARA_OUI[];
+
+/* Channel Plan Type. */
+/* Note: */
+/* We just add new channel plan when the new channel plan is different
+ * from any of the following channel plan. */
+/* If you just wnat to customize the acitions(scan period or join actions)
+ * about one of the channel plan, */
+/* customize them in struct rt_channel_info in the RT_CHANNEL_LIST. */
+enum RT_CHANNEL_DOMAIN {
+ /* old channel plan mapping ===== */
+ RT_CHANNEL_DOMAIN_FCC = 0x00,
+ RT_CHANNEL_DOMAIN_IC = 0x01,
+ RT_CHANNEL_DOMAIN_ETSI = 0x02,
+ RT_CHANNEL_DOMAIN_SPAIN = 0x03,
+ RT_CHANNEL_DOMAIN_FRANCE = 0x04,
+ RT_CHANNEL_DOMAIN_MKK = 0x05,
+ RT_CHANNEL_DOMAIN_MKK1 = 0x06,
+ RT_CHANNEL_DOMAIN_ISRAEL = 0x07,
+ RT_CHANNEL_DOMAIN_TELEC = 0x08,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN = 0x09,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_13 = 0x0A,
+ RT_CHANNEL_DOMAIN_TAIWAN = 0x0B,
+ RT_CHANNEL_DOMAIN_CHINA = 0x0C,
+ RT_CHANNEL_DOMAIN_SINGAPORE_INDIA_MEXICO = 0x0D,
+ RT_CHANNEL_DOMAIN_KOREA = 0x0E,
+ RT_CHANNEL_DOMAIN_TURKEY = 0x0F,
+ RT_CHANNEL_DOMAIN_JAPAN = 0x10,
+ RT_CHANNEL_DOMAIN_FCC_NO_DFS = 0x11,
+ RT_CHANNEL_DOMAIN_JAPAN_NO_DFS = 0x12,
+ RT_CHANNEL_DOMAIN_WORLD_WIDE_5G = 0x13,
+ RT_CHANNEL_DOMAIN_TAIWAN_NO_DFS = 0x14,
+
+ /* new channel plan mapping, (2GDOMAIN_5GDOMAIN) ===== */
+ RT_CHANNEL_DOMAIN_WORLD_NULL = 0x20,
+ RT_CHANNEL_DOMAIN_ETSI1_NULL = 0x21,
+ RT_CHANNEL_DOMAIN_FCC1_NULL = 0x22,
+ RT_CHANNEL_DOMAIN_MKK1_NULL = 0x23,
+ RT_CHANNEL_DOMAIN_ETSI2_NULL = 0x24,
+ RT_CHANNEL_DOMAIN_FCC1_FCC1 = 0x25,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI1 = 0x26,
+ RT_CHANNEL_DOMAIN_MKK1_MKK1 = 0x27,
+ RT_CHANNEL_DOMAIN_WORLD_KCC1 = 0x28,
+ RT_CHANNEL_DOMAIN_WORLD_FCC2 = 0x29,
+ RT_CHANNEL_DOMAIN_WORLD_FCC3 = 0x30,
+ RT_CHANNEL_DOMAIN_WORLD_FCC4 = 0x31,
+ RT_CHANNEL_DOMAIN_WORLD_FCC5 = 0x32,
+ RT_CHANNEL_DOMAIN_WORLD_FCC6 = 0x33,
+ RT_CHANNEL_DOMAIN_FCC1_FCC7 = 0x34,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI2 = 0x35,
+ RT_CHANNEL_DOMAIN_WORLD_ETSI3 = 0x36,
+ RT_CHANNEL_DOMAIN_MKK1_MKK2 = 0x37,
+ RT_CHANNEL_DOMAIN_MKK1_MKK3 = 0x38,
+ RT_CHANNEL_DOMAIN_FCC1_NCC1 = 0x39,
+ RT_CHANNEL_DOMAIN_FCC1_NCC2 = 0x40,
+ RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G = 0x41,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_MAX,
+ RT_CHANNEL_DOMAIN_REALTEK_DEFINE = 0x7F,
+};
+
+enum RT_CHANNEL_DOMAIN_2G {
+ RT_CHANNEL_DOMAIN_2G_WORLD = 0x00, /* Worldwide 13 */
+ RT_CHANNEL_DOMAIN_2G_ETSI1 = 0x01, /* Europe */
+ RT_CHANNEL_DOMAIN_2G_FCC1 = 0x02, /* US */
+ RT_CHANNEL_DOMAIN_2G_MKK1 = 0x03, /* Japan */
+ RT_CHANNEL_DOMAIN_2G_ETSI2 = 0x04, /* France */
+ RT_CHANNEL_DOMAIN_2G_NULL = 0x05,
+ /* Add new channel plan above this line=============== */
+ RT_CHANNEL_DOMAIN_2G_MAX,
+};
+
+#define rtw_is_channel_plan_valid(chplan) \
+ (chplan < RT_CHANNEL_DOMAIN_MAX || \
+ chplan == RT_CHANNEL_DOMAIN_REALTEK_DEFINE)
+
+struct rt_channel_plan {
+ unsigned char Channel[MAX_CHANNEL_NUM];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_2g {
+ unsigned char Channel[MAX_CHANNEL_NUM_2G];
+ unsigned char Len;
+};
+
+struct rt_channel_plan_map {
+ unsigned char Index2G;
+};
+
+enum Associated_AP {
+ atherosAP = 0,
+ broadcomAP = 1,
+ ciscoAP = 2,
+ marvellAP = 3,
+ ralinkAP = 4,
+ realtekAP = 5,
+ airgocapAP = 6,
+ unknownAP = 7,
+ maxAP,
+};
+
+enum HT_IOT_PEER {
+ HT_IOT_PEER_UNKNOWN = 0,
+ HT_IOT_PEER_REALTEK = 1,
+ HT_IOT_PEER_REALTEK_92SE = 2,
+ HT_IOT_PEER_BROADCOM = 3,
+ HT_IOT_PEER_RALINK = 4,
+ HT_IOT_PEER_ATHEROS = 5,
+ HT_IOT_PEER_CISCO = 6,
+ HT_IOT_PEER_MERU = 7,
+ HT_IOT_PEER_MARVELL = 8,
+ HT_IOT_PEER_REALTEK_SOFTAP = 9,/* peer is RealTek SOFT_AP */
+ HT_IOT_PEER_SELF_SOFTAP = 10, /* Self is SoftAP */
+ HT_IOT_PEER_AIRGO = 11,
+ HT_IOT_PEER_INTEL = 12,
+ HT_IOT_PEER_RTK_APCLIENT = 13,
+ HT_IOT_PEER_REALTEK_81XX = 14,
+ HT_IOT_PEER_REALTEK_WOW = 15,
+ HT_IOT_PEER_TENDA = 16,
+ HT_IOT_PEER_MAX = 17
+};
+
+enum SCAN_STATE {
+ SCAN_DISABLE = 0,
+ SCAN_START = 1,
+ SCAN_TXNULL = 2,
+ SCAN_PROCESS = 3,
+ SCAN_COMPLETE = 4,
+ SCAN_STATE_MAX,
+};
+
+struct mlme_handler {
+ unsigned int num;
+ char *str;
+ unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+};
+
+struct action_handler {
+ unsigned int num;
+ char* str;
+ unsigned int (*func)(struct adapter *adapt, union recv_frame *frame);
+};
+
+struct ss_res {
+ int state;
+ int bss_cnt;
+ int channel_idx;
+ int scan_mode;
+ u8 ssid_num;
+ u8 ch_num;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ struct rtw_ieee80211_channel ch[RTW_CHANNEL_SCAN_AMOUNT];
+};
+
+/* define AP_MODE 0x0C */
+/* define STATION_MODE 0x08 */
+/* define AD_HOC_MODE 0x04 */
+/* define NO_LINK_MODE 0x00 */
+
+#define WIFI_FW_NULL_STATE _HW_STATE_NOLINK_
+#define WIFI_FW_STATION_STATE _HW_STATE_STATION_
+#define WIFI_FW_AP_STATE _HW_STATE_AP_
+#define WIFI_FW_ADHOC_STATE _HW_STATE_ADHOC_
+
+#define WIFI_FW_AUTH_NULL 0x00000100
+#define WIFI_FW_AUTH_STATE 0x00000200
+#define WIFI_FW_AUTH_SUCCESS 0x00000400
+
+#define WIFI_FW_ASSOC_STATE 0x00002000
+#define WIFI_FW_ASSOC_SUCCESS 0x00004000
+
+#define WIFI_FW_LINKING_STATE (WIFI_FW_AUTH_NULL | \
+ WIFI_FW_AUTH_STATE | \
+ WIFI_FW_AUTH_SUCCESS | \
+ WIFI_FW_ASSOC_STATE)
+
+struct FW_Sta_Info {
+ struct sta_info *psta;
+ u32 status;
+ u32 rx_pkt;
+ u32 retry;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+};
+
+/*
+ * Usage:
+ * When one iface acted as AP mode and the other iface is STA mode and scanning,
+ * it should switch back to AP's operating channel periodically.
+ * Parameters info:
+ * When the driver scanned RTW_SCAN_NUM_OF_CH channels, it would switch back to
+ * AP's operating channel for
+ * RTW_STAY_AP_CH_MILLISECOND * SURVEY_TO milliseconds.
+ * Example:
+ * For chip supports 2.4G + 5GHz and AP mode is operating in channel 1,
+ * RTW_SCAN_NUM_OF_CH is 8, RTW_STAY_AP_CH_MS is 3 and SURVEY_TO is 100.
+ * When it's STA mode gets set_scan command,
+ * it would
+ * 1. Doing the scan on channel 1.2.3.4.5.6.7.8
+ * 2. Back to channel 1 for 300 milliseconds
+ * 3. Go through doing site survey on channel 9.10.11.36.40.44.48.52
+ * 4. Back to channel 1 for 300 milliseconds
+ * 5. ... and so on, till survey done.
+ */
+
+struct mlme_ext_info {
+ u32 state;
+ u32 reauth_count;
+ u32 reassoc_count;
+ u32 link_count;
+ u32 auth_seq;
+ u32 auth_algo; /* 802.11 auth, could be open, shared, auto */
+ u32 authModeToggle;
+ u32 enc_algo;/* encrypt algorithm; */
+ u32 key_index; /* this is only valid for legacy wep,
+ * 0~3 for key id. */
+ u32 iv;
+ u8 chg_txt[128];
+ u16 aid;
+ u16 bcn_interval;
+ u16 capability;
+ u8 assoc_AP_vendor;
+ u8 slotTime;
+ u8 preamble_mode;
+ u8 WMM_enable;
+ u8 ERP_enable;
+ u8 ERP_IE;
+ u8 HT_enable;
+ u8 HT_caps_enable;
+ u8 HT_info_enable;
+ u8 HT_protection;
+ u8 turboMode_cts2self;
+ u8 turboMode_rtsen;
+ u8 SM_PS;
+ u8 agg_enable_bitmap;
+ u8 ADDBA_retry_count;
+ u8 candidate_tid_bitmap;
+ u8 dialogToken;
+ /* Accept ADDBA Request */
+ bool bAcceptAddbaReq;
+ u8 bwmode_updated;
+ u8 hidden_ssid_mode;
+
+ struct ADDBA_request ADDBA_req;
+ struct WMM_para_element WMM_param;
+ struct HT_caps_element HT_caps;
+ struct HT_info_element HT_info;
+ struct wlan_bssid_ex network;/* join network or bss_network,
+ * if in ap mode, it is the same
+ * as cur_network.network */
+ struct FW_Sta_Info FW_sta_info[NUM_STA];
+};
+
+/* The channel information about this channel including joining,
+ * scanning, and power constraints. */
+struct rt_channel_info {
+ u8 ChannelNum; /* The channel number. */
+ enum rt_scan_type ScanType; /* Scan type such as passive
+ * or active scan. */
+ u32 rx_count;
+};
+
+int rtw_ch_set_search_ch(struct rt_channel_info *ch_set, const u32 ch);
+
+/* P2P_MAX_REG_CLASSES - Maximum number of regulatory classes */
+#define P2P_MAX_REG_CLASSES 10
+
+/* P2P_MAX_REG_CLASS_CHANNELS - Maximum number of chan per regulatory class */
+#define P2P_MAX_REG_CLASS_CHANNELS 20
+
+/* struct p2p_channels - List of supported channels */
+struct p2p_channels {
+ /* struct p2p_reg_class - Supported regulatory class */
+ struct p2p_reg_class {
+ /* reg_class - Regulatory class (IEEE 802.11-2007, Annex J) */
+ u8 reg_class;
+
+ /* channel - Supported channels */
+ u8 channel[P2P_MAX_REG_CLASS_CHANNELS];
+
+ /* channels - Number of channel entries in use */
+ size_t channels;
+ } reg_class[P2P_MAX_REG_CLASSES];
+
+ /* reg_classes - Number of reg_class entries in use */
+ size_t reg_classes;
+};
+
+struct p2p_oper_class_map {
+ enum hw_mode {IEEE80211G} mode;
+ u8 op_class;
+ u8 min_chan;
+ u8 max_chan;
+ u8 inc;
+ enum {BW20, BW40PLUS, BW40MINUS} bw;
+};
+
+struct mlme_ext_priv {
+ struct adapter *padapter;
+ u8 mlmeext_init;
+ ATOMIC_T event_seq;
+ u16 mgnt_seq;
+
+ unsigned char cur_channel;
+ unsigned char cur_bwmode;
+ unsigned char cur_ch_offset;/* PRIME_CHNL_OFFSET */
+ unsigned char cur_wireless_mode; /* NETWORK_TYPE */
+
+ unsigned char oper_channel; /* saved chan info when call
+ * set_channel_bw */
+ unsigned char oper_bwmode;
+ unsigned char oper_ch_offset;/* PRIME_CHNL_OFFSET */
+
+ unsigned char max_chan_nums;
+ struct rt_channel_info channel_set[MAX_CHANNEL_NUM];
+ struct p2p_channels channel_list;
+ unsigned char basicrate[NumRates];
+ unsigned char datarate[NumRates];
+
+ struct ss_res sitesurvey_res;
+ struct mlme_ext_info mlmext_info;/* for sta/adhoc mode, including
+ * current scan/connecting/connected
+ * related info. For ap mode,
+ * network includes ap's cap_info*/
+ struct timer_list survey_timer;
+ struct timer_list link_timer;
+ u16 chan_scan_time;
+
+ u8 scan_abort;
+ u8 tx_rate; /* TXRATE when USERATE is set. */
+
+ u32 retry; /* retry for issue probereq */
+
+ u64 TSFValue;
+
+#ifdef CONFIG_88EU_AP_MODE
+ unsigned char bstart_bss;
+#endif
+ u8 update_channel_plan_by_ap_done;
+ /* recv_decache check for Action_public frame */
+ u8 action_public_dialog_token;
+ u16 action_public_rxseq;
+ u8 active_keep_alive_check;
+};
+
+int init_mlme_ext_priv(struct adapter *adapter);
+int init_hw_mlme_ext(struct adapter *padapter);
+void free_mlme_ext_priv (struct mlme_ext_priv *pmlmeext);
+extern void init_mlme_ext_timer(struct adapter *padapter);
+extern void init_addba_retry_timer(struct adapter *adapt, struct sta_info *sta);
+extern struct xmit_frame *alloc_mgtxmitframe(struct xmit_priv *pxmitpriv);
+
+unsigned char networktype_to_raid(unsigned char network_type);
+u8 judge_network_type(struct adapter *padapter, unsigned char *rate, int len);
+void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *len);
+void UpdateBrateTbl(struct adapter *padapter, u8 *mBratesOS);
+void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen);
+
+void Save_DM_Func_Flag(struct adapter *padapter);
+void Restore_DM_Func_Flag(struct adapter *padapter);
+void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable);
+
+void Set_MSR(struct adapter *padapter, u8 type);
+
+u8 rtw_get_oper_ch(struct adapter *adapter);
+void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
+u8 rtw_get_oper_bw(struct adapter *adapter);
+void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
+u8 rtw_get_oper_choffset(struct adapter *adapter);
+void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
+
+void set_channel_bwmode(struct adapter *padapter, unsigned char channel,
+ unsigned char channel_offset, unsigned short bwmode);
+void SelectChannel(struct adapter *padapter, unsigned char channel);
+void SetBWMode(struct adapter *padapter, unsigned short bwmode,
+ unsigned char channel_offset);
+
+unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval);
+
+void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
+void clear_cam_entry(struct adapter *padapter, u8 entry);
+
+void invalidate_cam_all(struct adapter *padapter);
+void CAM_empty_entry(struct adapter * Adapter, u8 ucIndex);
+
+int allocate_fw_sta_entry(struct adapter *padapter);
+void flush_all_cam_entry(struct adapter *padapter);
+
+void site_survey(struct adapter *padapter);
+u8 collect_bss_info(struct adapter *padapter, union recv_frame *precv_frame,
+ struct wlan_bssid_ex *bssid);
+void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
+ struct adapter *adapter, bool update_ie);
+
+int get_bsstype(unsigned short capability);
+u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
+u16 get_beacon_interval(struct wlan_bssid_ex *bss);
+
+int is_client_associated_to_ap(struct adapter *padapter);
+int is_client_associated_to_ibss(struct adapter *padapter);
+int is_IBSS_empty(struct adapter *padapter);
+
+unsigned char check_assoc_AP(u8 *pframe, uint len);
+
+int WMM_param_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void WMMOnAssocRsp(struct adapter *padapter);
+
+void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void HT_info_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void HTOnAssocRsp(struct adapter *padapter);
+
+void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE);
+void VCS_update(struct adapter *padapter, struct sta_info *psta);
+
+void update_beacon_info(struct adapter *padapter, u8 *pframe, uint len,
+ struct sta_info *psta);
+int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len);
+void update_IOT_info(struct adapter *padapter);
+void update_capinfo(struct adapter *adapter, u16 updatecap);
+void update_wireless_mode(struct adapter *padapter);
+void update_tx_basic_rate(struct adapter *padapter, u8 modulation);
+void update_bmc_sta_support_rate(struct adapter *padapter, u32 mac_id);
+int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie,
+ uint var_ie_len, int cam_idx);
+
+/* for sta/adhoc mode */
+void update_sta_info(struct adapter *padapter, struct sta_info *psta);
+unsigned int update_basic_rate(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_supported_rate(unsigned char *ptn, unsigned int ptn_sz);
+unsigned int update_MSC_rate(struct HT_caps_element *pHT_caps);
+void Update_RA_Entry(struct adapter *padapter, u32 mac_id);
+void set_sta_rate(struct adapter *padapter, struct sta_info *psta);
+
+unsigned int receive_disconnect(struct adapter *padapter,
+ unsigned char *macaddr, unsigned short reason);
+
+unsigned char get_highest_rate_idx(u32 mask);
+int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
+unsigned int is_ap_in_tkip(struct adapter *padapter);
+unsigned int is_ap_in_wep(struct adapter *padapter);
+unsigned int should_forbid_n_rate(struct adapter *padapter);
+
+void report_join_res(struct adapter *padapter, int res);
+void report_survey_event(struct adapter *padapter, union recv_frame *precv_frame);
+void report_surveydone_event(struct adapter *padapter);
+void report_del_sta_event(struct adapter *padapter,
+ unsigned char *addr, unsigned short reason);
+void report_add_sta_event(struct adapter *padapter, unsigned char* addr,
+ int cam_idx);
+
+void beacon_timing_control(struct adapter *padapter);
+extern u8 set_tx_beacon_cmd(struct adapter*padapter);
+unsigned int setup_beacon_frame(struct adapter *padapter,
+ unsigned char *beacon_frame);
+void update_mgnt_tx_rate(struct adapter *padapter, u8 rate);
+void update_mgntframe_attrib(struct adapter *padapter,
+ struct pkt_attrib *pattrib);
+void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe);
+s32 dump_mgntframe_and_wait(struct adapter *padapter,
+ struct xmit_frame *pmgntframe, int timeout_ms);
+s32 dump_mgntframe_and_wait_ack(struct adapter *padapter,
+ struct xmit_frame *pmgntframe);
+
+#ifdef CONFIG_88EU_P2P
+void issue_probersp_p2p(struct adapter *padapter, unsigned char *da);
+void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid,
+ u8 ussidlen, u8 *pdev_raddr);
+void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr);
+void issue_probereq_p2p(struct adapter *padapter, u8 *da);
+int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt,
+ int wait_ms);
+void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
+ u8 dialogToken, u8 success);
+void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr);
+#endif /* CONFIG_88EU_P2P */
+void issue_beacon(struct adapter *padapter, int timeout_ms);
+void issue_probersp(struct adapter *padapter, unsigned char *da,
+ u8 is_valid_p2p_probereq);
+void issue_assocreq(struct adapter *padapter);
+void issue_asocrsp(struct adapter *padapter, unsigned short status,
+ struct sta_info *pstat, int pkt_type);
+void issue_auth(struct adapter *padapter, struct sta_info *psta,
+ unsigned short status);
+void issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid,
+ u8 *da);
+s32 issue_probereq_ex(struct adapter *adapter, struct ndis_802_11_ssid *pssid,
+ u8* da, int try_cnt, int wait_ms);
+int issue_nulldata(struct adapter *padapter, unsigned char *da,
+ unsigned int power_mode, int try_cnt, int wait_ms);
+int issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
+ u16 tid, int try_cnt, int wait_ms);
+int issue_deauth(struct adapter *padapter, unsigned char *da,
+ unsigned short reason);
+int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason,
+ int try_cnt, int wait_ms);
+void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch,
+ u8 ch_offset);
+void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
+ unsigned char action, unsigned short status);
+unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
+unsigned int send_beacon(struct adapter *padapter);
+
+void start_clnt_assoc(struct adapter *padapter);
+void start_clnt_auth(struct adapter *padapter);
+void start_clnt_join(struct adapter *padapter);
+void start_create_ibss(struct adapter *padapter);
+
+unsigned int OnAssocReq(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAssocRsp(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnProbeReq(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnProbeRsp(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int DoReserved(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnBeacon(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAtim(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnDisassoc(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAuth(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAuthClient(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnDeAuth(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction(struct adapter *padapter,
+ union recv_frame *precv_frame);
+
+unsigned int on_action_spct(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_qos(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_dls(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_back(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int on_action_public(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_ht(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_wmm(struct adapter *padapter,
+ union recv_frame *precv_frame);
+unsigned int OnAction_p2p(struct adapter *padapter,
+ union recv_frame *precv_frame);
+
+void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res);
+void mlmeext_sta_del_event_callback(struct adapter *padapter);
+void mlmeext_sta_add_event_callback(struct adapter *padapter,
+ struct sta_info *psta);
+
+void linked_status_chk(struct adapter *padapter);
+
+void survey_timer_hdl (struct adapter *padapter);
+void link_timer_hdl (struct adapter *padapter);
+void addba_timer_hdl(struct sta_info *psta);
+
+#define set_survey_timer(mlmeext, ms) \
+ do { \
+ _set_timer(&(mlmeext)->survey_timer, (ms)); \
+ } while (0)
+
+#define set_link_timer(mlmeext, ms) \
+ do { \
+ _set_timer(&(mlmeext)->link_timer, (ms)); \
+ } while (0)
+
+int cckrates_included(unsigned char *rate, int ratelen);
+int cckratesonly_included(unsigned char *rate, int ratelen);
+
+void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr);
+
+void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len);
+void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext);
+
+struct cmd_hdl {
+ uint parmsize;
+ u8 (*h2cfuns)(struct adapter *padapter, u8 *pbuf);
+};
+
+u8 read_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_macreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_bbreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 read_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 write_rfreg_hdl(struct adapter *padapter, u8 *pbuf);
+u8 NULL_hdl(struct adapter *padapter, u8 *pbuf);
+u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 disconnect_hdl(struct adapter *padapter, u8 *pbuf);
+u8 createbss_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf);
+u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setauth_hdl(struct adapter *padapter, u8 *pbuf);
+u8 setkey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_stakey_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 del_assocsta_hdl(struct adapter *padapter, u8 *pbuf);
+u8 add_ba_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 h2c_msg_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 set_ch_hdl(struct adapter *padapter, u8 *pbuf);
+u8 set_chplan_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 led_blink_hdl(struct adapter *padapter, unsigned char *pbuf);
+/* Handling DFS channel switch announcement ie. */
+u8 set_csa_hdl(struct adapter *padapter, unsigned char *pbuf);
+u8 tdls_hdl(struct adapter *padapter, unsigned char *pbuf);
+
+#define GEN_DRV_CMD_HANDLER(size, cmd) {size, &cmd ## _hdl},
+#define GEN_MLME_EXT_HANDLER(size, cmd) {size, cmd},
+
+#ifdef _RTW_CMD_C_
+
+static struct cmd_hdl wlancmds[] = {
+ GEN_DRV_CMD_HANDLER(0, NULL) /*0*/
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_DRV_CMD_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*10*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct joinbss_parm), join_cmd_hdl) /*14*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct disconnect_parm), disconnect_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct createbss_parm), createbss_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setopmode_parm), setopmode_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct sitesurvey_parm),
+ sitesurvey_cmd_hdl) /*18*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setauth_parm), setauth_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setkey_parm), setkey_hdl) /*20*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_stakey_parm), set_stakey_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof (struct set_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct del_assocsta_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setstapwrstate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getbasicrate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getdatarate_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphyinfo_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphyinfo_parm), NULL) /*30*/
+ GEN_MLME_EXT_HANDLER(sizeof (struct setphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(sizeof (struct getphy_parm), NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*40*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl)
+ GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL) /*50*/
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(0, NULL)
+ GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param),
+ tx_beacon_hdl) /*55*/
+
+ GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/
+ GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/
+
+ GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param),
+ set_chplan_hdl) /*59*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct LedBlink_param),
+ led_blink_hdl) /*60*/
+
+ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param),
+ set_csa_hdl) /*61*/
+ GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param),
+ tdls_hdl) /*62*/
+};
+
+#endif
+
+struct C2HEvent_Header {
+#ifdef __LITTLE_ENDIAN
+ unsigned int len:16;
+ unsigned int ID:8;
+ unsigned int seq:8;
+#elif defined(__BIG_ENDIAN)
+ unsigned int seq:8;
+ unsigned int ID:8;
+ unsigned int len:16;
+#endif
+ unsigned int rsvd;
+};
+
+void rtw_dummy_event_callback(struct adapter *adapter, u8 *pbuf);
+void rtw_fwdbg_event_callback(struct adapter *adapter, u8 *pbuf);
+
+enum rtw_c2h_event {
+ GEN_EVT_CODE(_Read_MACREG) = 0, /*0*/
+ GEN_EVT_CODE(_Read_BBREG),
+ GEN_EVT_CODE(_Read_RFREG),
+ GEN_EVT_CODE(_Read_EEPROM),
+ GEN_EVT_CODE(_Read_EFUSE),
+ GEN_EVT_CODE(_Read_CAM), /*5*/
+ GEN_EVT_CODE(_Get_BasicRate),
+ GEN_EVT_CODE(_Get_DataRate),
+ GEN_EVT_CODE(_Survey), /*8*/
+ GEN_EVT_CODE(_SurveyDone), /*9*/
+
+ GEN_EVT_CODE(_JoinBss) , /*10*/
+ GEN_EVT_CODE(_AddSTA),
+ GEN_EVT_CODE(_DelSTA),
+ GEN_EVT_CODE(_AtimDone),
+ GEN_EVT_CODE(_TX_Report),
+ GEN_EVT_CODE(_CCX_Report), /*15*/
+ GEN_EVT_CODE(_DTM_Report),
+ GEN_EVT_CODE(_TX_Rate_Statistics),
+ GEN_EVT_CODE(_C2HLBK),
+ GEN_EVT_CODE(_FWDBG),
+ GEN_EVT_CODE(_C2HFEEDBACK), /*20*/
+ GEN_EVT_CODE(_ADDBA),
+ GEN_EVT_CODE(_C2HBCN),
+ GEN_EVT_CODE(_ReportPwrState), /* filen: only for PCIE, USB */
+ GEN_EVT_CODE(_CloseRF), /* filen: only for PCIE,
+ * work around ASPM */
+ MAX_C2HEVT
+};
+
+
+#ifdef _RTW_MLME_EXT_C_
+
+static struct fwevent wlanevents[] = {
+ {0, rtw_dummy_event_callback}, /*0*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_survey_event_callback}, /*8*/
+ {sizeof (struct surveydone_event), &rtw_surveydone_event_callback},/*9*/
+ {0, &rtw_joinbss_event_callback}, /*10*/
+ {sizeof(struct stassoc_event), &rtw_stassoc_event_callback},
+ {sizeof(struct stadel_event), &rtw_stadel_event_callback},
+ {0, &rtw_atimdone_event_callback},
+ {0, rtw_dummy_event_callback},
+ {0, NULL}, /*15*/
+ {0, NULL},
+ {0, NULL},
+ {0, NULL},
+ {0, rtw_fwdbg_event_callback},
+ {0, NULL}, /*20*/
+ {0, NULL},
+ {0, NULL},
+ {0, &rtw_cpwm_event_callback},
+};
+
+#endif/* _RTL_MLME_EXT_C_ */
+
+#endif /* __RTW_MLME_EXT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp.h b/drivers/staging/rtl8188eu/include/rtw_mp.h
new file mode 100644
index 00000000000..59bdbb5f396
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp.h
@@ -0,0 +1,495 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_MP_H_
+#define _RTW_MP_H_
+
+/* 00 - Success */
+/* 11 - Error */
+#define STATUS_SUCCESS (0x00000000L)
+#define STATUS_PENDING (0x00000103L)
+
+#define STATUS_UNSUCCESSFUL (0xC0000001L)
+#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
+#define STATUS_NOT_SUPPORTED (0xC00000BBL)
+
+#define NDIS_STATUS_SUCCESS ((int)STATUS_SUCCESS)
+#define NDIS_STATUS_PENDING ((int)STATUS_PENDING)
+#define NDIS_STATUS_NOT_RECOGNIZED ((int)0x00010001L)
+#define NDIS_STATUS_NOT_COPIED ((int)0x00010002L)
+#define NDIS_STATUS_NOT_ACCEPTED ((int)0x00010003L)
+#define NDIS_STATUS_CALL_ACTIVE ((int)0x00010007L)
+
+#define NDIS_STATUS_FAILURE ((int)STATUS_UNSUCCESSFUL)
+#define NDIS_STATUS_RESOURCES ((int)STATUS_INSUFFICIENT_RESOURCES)
+#define NDIS_STATUS_CLOSING ((int)0xC0010002L)
+#define NDIS_STATUS_BAD_VERSION ((int)0xC0010004L)
+#define NDIS_STATUS_BAD_CHARACTERISTICS ((int)0xC0010005L)
+#define NDIS_STATUS_ADAPTER_NOT_FOUND ((int)0xC0010006L)
+#define NDIS_STATUS_OPEN_FAILED ((int)0xC0010007L)
+#define NDIS_STATUS_DEVICE_FAILED ((int)0xC0010008L)
+#define NDIS_STATUS_MULTICAST_FULL ((int)0xC0010009L)
+#define NDIS_STATUS_MULTICAST_EXISTS ((int)0xC001000AL)
+#define NDIS_STATUS_MULTICAST_NOT_FOUND ((int)0xC001000BL)
+#define NDIS_STATUS_REQUEST_ABORTED ((int)0xC001000CL)
+#define NDIS_STATUS_RESET_IN_PROGRESS ((int)0xC001000DL)
+#define NDIS_STATUS_CLOSING_INDICATING ((int)0xC001000EL)
+#define NDIS_STATUS_NOT_SUPPORTED ((int)STATUS_NOT_SUPPORTED)
+#define NDIS_STATUS_INVALID_PACKET ((int)0xC001000FL)
+#define NDIS_STATUS_OPEN_LIST_FULL ((int)0xC0010010L)
+#define NDIS_STATUS_ADAPTER_NOT_READY ((int)0xC0010011L)
+#define NDIS_STATUS_ADAPTER_NOT_OPEN ((int)0xC0010012L)
+#define NDIS_STATUS_NOT_INDICATING ((int)0xC0010013L)
+#define NDIS_STATUS_INVALID_LENGTH ((int)0xC0010014L)
+#define NDIS_STATUS_INVALID_DATA ((int)0xC0010015L)
+#define NDIS_STATUS_BUFFER_TOO_SHORT ((int)0xC0010016L)
+#define NDIS_STATUS_INVALID_OID ((int)0xC0010017L)
+#define NDIS_STATUS_ADAPTER_REMOVED ((int)0xC0010018L)
+#define NDIS_STATUS_UNSUPPORTED_MEDIA ((int)0xC0010019L)
+#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((int)0xC001001AL)
+#define NDIS_STATUS_FILE_NOT_FOUND ((int)0xC001001BL)
+#define NDIS_STATUS_ERROR_READING_FILE ((int)0xC001001CL)
+#define NDIS_STATUS_ALREADY_MAPPED ((int)0xC001001DL)
+#define NDIS_STATUS_RESOURCE_CONFLICT ((int)0xC001001EL)
+#define NDIS_STATUS_NO_CABLE ((int)0xC001001FL)
+
+#define NDIS_STATUS_INVALID_SAP ((int)0xC0010020L)
+#define NDIS_STATUS_SAP_IN_USE ((int)0xC0010021L)
+#define NDIS_STATUS_INVALID_ADDRESS ((int)0xC0010022L)
+#define NDIS_STATUS_VC_NOT_ACTIVATED ((int)0xC0010023L)
+#define NDIS_STATUS_DEST_OUT_OF_ORDER ((int)0xC0010024L) /*cause 27*/
+#define NDIS_STATUS_VC_NOT_AVAILABLE ((int)0xC0010025L) /*cause 35,45 */
+#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((int)0xC0010026L) /*cause 37*/
+#define NDIS_STATUS_INCOMPATABLE_QOS ((int)0xC0010027L) /*cause 49*/
+#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((int)0xC0010028L) /*cause 93*/
+#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((int)0xC0010029L) /*cause 3 */
+
+enum antenna_path {
+ ANTENNA_NONE = 0x00,
+ ANTENNA_D,
+ ANTENNA_C,
+ ANTENNA_CD,
+ ANTENNA_B,
+ ANTENNA_BD,
+ ANTENNA_BC,
+ ANTENNA_BCD,
+ ANTENNA_A,
+ ANTENNA_AD,
+ ANTENNA_AC,
+ ANTENNA_ACD,
+ ANTENNA_AB,
+ ANTENNA_ABD,
+ ANTENNA_ABC,
+ ANTENNA_ABCD
+};
+
+
+#define MAX_MP_XMITBUF_SZ 2048
+#define NR_MP_XMITFRAME 8
+
+struct mp_xmit_frame {
+ struct list_head list;
+ struct pkt_attrib attrib;
+ struct sk_buff *pkt;
+ int frame_tag;
+ struct adapter *padapter;
+ struct urb *pxmit_urb[8];
+ /* insert urb, irp, and irpcnt info below... */
+ u8 *mem_addr;
+ u32 sz[8];
+ u8 bpending[8];
+ int ac_tag[8];
+ int last[8];
+ uint irpcnt;
+ uint fragcnt;
+ uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
+};
+
+struct mp_wiparam {
+ u32 bcompleted;
+ u32 act_type;
+ u32 io_offset;
+ u32 io_value;
+};
+
+typedef void(*wi_act_func)(void *padapter);
+
+struct mp_tx {
+ u8 stop;
+ u32 count, sended;
+ u8 payload;
+ struct pkt_attrib attrib;
+ struct tx_desc desc;
+ u8 *pallocated_buf;
+ u8 *buf;
+ u32 buf_size, write_size;
+ void *PktTxThread;
+};
+
+#include <Hal8188EPhyCfg.h>
+
+#define MP_MAX_LINES 1000
+#define MP_MAX_LINES_BYTES 256
+
+typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
+
+struct mpt_context {
+ /* Indicate if we have started Mass Production Test. */
+ bool bMassProdTest;
+
+ /* Indicate if the driver is unloading or unloaded. */
+ bool bMptDrvUnload;
+
+ struct semaphore MPh2c_Sema;
+ struct timer_list MPh2c_timeout_timer;
+/* Event used to sync H2c for BT control */
+
+ bool MptH2cRspEvent;
+ bool MptBtC2hEvent;
+ bool bMPh2c_timeout;
+
+ /* 8190 PCI does not support NDIS_WORK_ITEM. */
+ /* Work Item for Mass Production Test. */
+ /* Event used to sync the case unloading driver and MptWorkItem
+ * is still in progress. */
+ /* Indicate a MptWorkItem is scheduled and not yet finished. */
+ bool bMptWorkItemInProgress;
+ /* An instance which implements function and context of MptWorkItem. */
+ MPT_WORK_ITEM_HANDLER CurrMptAct;
+
+ /* 1=Start, 0=Stop from UI. */
+ u32 MptTestStart;
+ /* _TEST_MODE, defined in MPT_Req2.h */
+ u32 MptTestItem;
+ /* Variable needed in each implementation of CurrMptAct. */
+ u32 MptActType; /* Type of action performed in CurrMptAct. */
+ /* The Offset of IO operation is depend of MptActType. */
+ u32 MptIoOffset;
+ /* The Value of IO operation is depend of MptActType. */
+ u32 MptIoValue;
+ /* The RfPath of IO operation is depend of MptActType. */
+ u32 MptRfPath;
+
+ enum wireless_mode MptWirelessModeToSw; /* Wireless mode to switch. */
+ u8 MptChannelToSw; /* Channel to switch. */
+ u8 MptInitGainToSet; /* Initial gain to set. */
+ u32 MptBandWidth; /* bandwidth to switch. */
+ u32 MptRateIndex; /* rate index. */
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpCckTxPower;
+ /* Register value kept for Single Carrier Tx test. */
+ u8 btMpOfdmTxPower;
+ /* For MP Tx Power index */
+ u8 TxPwrLevel[2]; /* rf-A, rf-B */
+
+ /* Content of RCR Regsiter for Mass Production Test. */
+ u32 MptRCR;
+ /* true if we only receive packets with specific pattern. */
+ bool bMptFilterPattern;
+ /* Rx OK count, statistics used in Mass Production Test. */
+ u32 MptRxOkCnt;
+ /* Rx CRC32 error count, statistics used in Mass Production Test. */
+ u32 MptRxCrcErrCnt;
+
+ bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
+ bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
+ bool bStartContTx; /* true if we have start Continuous Tx test. */
+ /* true if we are in Single Carrier Tx test. */
+ bool bSingleCarrier;
+ /* true if we are in Carrier Suppression Tx Test. */
+ bool bCarrierSuppression;
+ /* true if we are in Single Tone Tx test. */
+ bool bSingleTone;
+
+ /* ACK counter asked by K.Y.. */
+ bool bMptEnableAckCounter;
+ u32 MptAckCounter;
+
+ u8 APK_bound[2]; /* for APK path A/path B */
+ bool bMptIndexEven;
+
+ u8 backup0xc50;
+ u8 backup0xc58;
+ u8 backup0xc30;
+ u8 backup0x52_RF_A;
+ u8 backup0x52_RF_B;
+
+ u8 h2cReqNum;
+ u8 c2hBuf[20];
+
+ u8 btInBuf[100];
+ u32 mptOutLen;
+ u8 mptOutBuf[100];
+};
+
+enum {
+ WRITE_REG = 1,
+ READ_REG,
+ WRITE_RF,
+ READ_RF,
+ MP_START,
+ MP_STOP,
+ MP_RATE,
+ MP_CHANNEL,
+ MP_BANDWIDTH,
+ MP_TXPOWER,
+ MP_ANT_TX,
+ MP_ANT_RX,
+ MP_CTX,
+ MP_QUERY,
+ MP_ARX,
+ MP_PSD,
+ MP_PWRTRK,
+ MP_THER,
+ MP_IOCTL,
+ EFUSE_GET,
+ EFUSE_SET,
+ MP_RESET_STATS,
+ MP_DUMP,
+ MP_PHYPARA,
+ MP_SetRFPathSwh,
+ MP_QueryDrvStats,
+ MP_SetBT,
+ CTA_TEST,
+ MP_NULL,
+};
+
+struct mp_priv {
+ struct adapter *papdater;
+
+ /* Testing Flag */
+ /* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
+ u32 mode;
+
+ u32 prev_fw_state;
+
+ /* OID cmd handler */
+ struct mp_wiparam workparam;
+
+ /* Tx Section */
+ u8 TID;
+ u32 tx_pktcount;
+ struct mp_tx tx;
+
+ /* Rx Section */
+ u32 rx_pktcount;
+ u32 rx_crcerrpktcount;
+ u32 rx_pktloss;
+
+ struct recv_stat rxstat;
+
+ /* RF/BB relative */
+ u8 channel;
+ u8 bandwidth;
+ u8 prime_channel_offset;
+ u8 txpoweridx;
+ u8 txpoweridx_b;
+ u8 rateidx;
+ u32 preamble;
+ u32 CrystalCap;
+
+ u16 antenna_tx;
+ u16 antenna_rx;
+
+ u8 check_mp_pkt;
+
+ u8 bSetTxPower;
+
+ struct wlan_network mp_network;
+ unsigned char network_macaddr[ETH_ALEN];
+
+ u8 *pallocated_mp_xmitframe_buf;
+ u8 *pmp_xmtframe_buf;
+ struct __queue free_mp_xmitqueue;
+ u32 free_mp_xmitframe_cnt;
+
+ struct mpt_context MptCtx;
+};
+
+struct iocmd_struct {
+ u8 cmdclass;
+ u16 value;
+ u8 index;
+};
+
+struct rf_reg_param {
+ u32 path;
+ u32 offset;
+ u32 value;
+};
+
+struct bb_reg_param {
+ u32 offset;
+ u32 value;
+};
+/* */
+
+#define LOWER true
+#define RAISE false
+
+/* Hardware Registers */
+#define BB_REG_BASE_ADDR 0x800
+
+/* MP variables */
+enum mp_mode_{
+ MP_OFF,
+ MP_ON,
+ MP_ERR,
+ MP_CONTINUOUS_TX,
+ MP_SINGLE_CARRIER_TX,
+ MP_CARRIER_SUPPRISSION_TX,
+ MP_SINGLE_TONE_TX,
+ MP_PACKET_TX,
+ MP_PACKET_RX
+};
+
+#define MAX_RF_PATH_NUMS RF_PATH_MAX
+
+extern u8 mpdatarate[NumRates];
+
+/* MP set force data rate base on the definition. */
+enum mpt_rate_index {
+ /* CCK rate. */
+ MPT_RATE_1M, /* 0 */
+ MPT_RATE_2M,
+ MPT_RATE_55M,
+ MPT_RATE_11M, /* 3 */
+
+ /* OFDM rate. */
+ MPT_RATE_6M, /* 4 */
+ MPT_RATE_9M,
+ MPT_RATE_12M,
+ MPT_RATE_18M,
+ MPT_RATE_24M,
+ MPT_RATE_36M,
+ MPT_RATE_48M,
+ MPT_RATE_54M, /* 11 */
+
+ /* HT rate. */
+ MPT_RATE_MCS0, /* 12 */
+ MPT_RATE_MCS1,
+ MPT_RATE_MCS2,
+ MPT_RATE_MCS3,
+ MPT_RATE_MCS4,
+ MPT_RATE_MCS5,
+ MPT_RATE_MCS6,
+ MPT_RATE_MCS7, /* 19 */
+ MPT_RATE_MCS8,
+ MPT_RATE_MCS9,
+ MPT_RATE_MCS10,
+ MPT_RATE_MCS11,
+ MPT_RATE_MCS12,
+ MPT_RATE_MCS13,
+ MPT_RATE_MCS14,
+ MPT_RATE_MCS15, /* 27 */
+ MPT_RATE_LAST
+};
+
+#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
+
+enum power_mode {
+ POWER_LOW = 0,
+ POWER_NORMAL
+};
+
+#define RX_PKT_BROADCAST 1
+#define RX_PKT_DEST_ADDR 2
+#define RX_PKT_PHY_MATCH 3
+
+enum encry_ctrl_state {
+ HW_CONTROL, /* hw encryption& decryption */
+ SW_CONTROL, /* sw encryption& decryption */
+ HW_ENCRY_SW_DECRY, /* hw encryption & sw decryption */
+ SW_ENCRY_HW_DECRY /* sw encryption & hw decryption */
+};
+
+s32 init_mp_priv(struct adapter *padapter);
+void free_mp_priv(struct mp_priv *pmp_priv);
+s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
+void MPT_DeInitAdapter(struct adapter *padapter);
+s32 mp_start_test(struct adapter *padapter);
+void mp_stop_test(struct adapter *padapter);
+
+u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
+void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
+
+u32 read_macreg(struct adapter *padapter, u32 addr, u32 sz);
+void write_macreg(struct adapter *padapter, u32 addr, u32 val, u32 sz);
+u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask);
+void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val);
+u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr);
+void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val);
+
+void SetChannel(struct adapter *pAdapter);
+void SetBandwidth(struct adapter *pAdapter);
+void SetTxPower(struct adapter *pAdapter);
+void SetAntennaPathPower(struct adapter *pAdapter);
+void SetDataRate(struct adapter *pAdapter);
+
+void SetAntenna(struct adapter *pAdapter);
+
+s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
+void GetThermalMeter(struct adapter *pAdapter, u8 *value);
+
+void SetContinuousTx(struct adapter *pAdapter, u8 bStart);
+void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart);
+void SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
+void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
+void PhySetTxPowerLevel(struct adapter *pAdapter);
+
+void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc);
+void SetPacketTx(struct adapter *padapter);
+void SetPacketRx(struct adapter *pAdapter, u8 bStartRx);
+
+void ResetPhyRxPktCount(struct adapter *pAdapter);
+u32 GetPhyRxPktReceived(struct adapter *pAdapter);
+u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter);
+
+s32 SetPowerTracking(struct adapter *padapter, u8 enable);
+void GetPowerTracking(struct adapter *padapter, u8 *enable);
+u32 mp_query_psd(struct adapter *pAdapter, u8 *data);
+void Hal_SetAntenna(struct adapter *pAdapter);
+void Hal_SetBandwidth(struct adapter *pAdapter);
+void Hal_SetTxPower(struct adapter *pAdapter);
+void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetSingleCarrierTx (struct adapter *pAdapter, u8 bStart);
+void Hal_SetContinuousTx (struct adapter *pAdapter, u8 bStart);
+void Hal_SetBandwidth(struct adapter *pAdapter);
+void Hal_SetDataRate(struct adapter *pAdapter);
+void Hal_SetChannel(struct adapter *pAdapter);
+void Hal_SetAntennaPathPower(struct adapter *pAdapter);
+s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
+s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
+void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
+void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value);
+void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter);
+void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
+void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven);
+void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 * TxPower);
+void Hal_TriggerRFThermalMeter(struct adapter *pAdapter);
+u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter);
+void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart);
+void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart);
+void Hal_ProSetCrystalCap (struct adapter *pAdapter , u32 CrystalCapVal);
+void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv);
+void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter ,bool bMain);
+
+#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
new file mode 100644
index 00000000000..494e90e5a75
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_ioctl.h
@@ -0,0 +1,340 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_MP_IOCTL_H_
+#define _RTW_MP_IOCTL_H_
+
+#include <drv_types.h>
+#include <mp_custom_oid.h>
+#include <rtw_ioctl.h>
+#include <rtw_ioctl_rtl.h>
+#include <rtw_efuse.h>
+#include <rtw_mp.h>
+
+/* */
+struct cfg_dbg_msg_struct {
+ u32 DebugLevel;
+ u32 DebugComponent_H32;
+ u32 DebugComponent_L32;
+};
+
+struct mp_rw_reg {
+ u32 offset;
+ u32 width;
+ u32 value;
+};
+
+struct efuse_access_struct {
+ u16 start_addr;
+ u16 cnts;
+ u8 data[0];
+};
+
+struct burst_rw_reg {
+ u32 offset;
+ u32 len;
+ u8 Data[256];
+};
+
+struct usb_vendor_req {
+ u8 bRequest;
+ u16 wValue;
+ u16 wIndex;
+ u16 wLength;
+ u8 u8Dir;/* 0:OUT, 1:IN */
+ u8 u8InData;
+};
+
+struct dr_variable_struct {
+ u8 offset;
+ u32 variable;
+};
+
+#define _irqlevel_changed_(a, b)
+
+/* rtl8188eu_oid_rtl_seg_81_80_00 */
+int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_80_20 */
+int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_87 */
+int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_81_85 */
+int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_87_11_00 */
+int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_20 */
+int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_50 */
+int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
+/* rtl8188eu_oid_rtl_seg_87_11_F0 */
+int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv);
+
+/* rtl8188eu_oid_rtl_seg_87_12_00 */
+int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
+int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv);
+int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv);
+
+#ifdef _RTW_MP_IOCTL_C_
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_00[] = {
+ {1, &oid_null_function}, /* 0x00 OID_RT_PRO_RESET_DUT */
+ {1, &rtl8188eu_oid_rt_pro_set_data_rate_hdl}, /* 0x01 */
+ {1, &rtl8188eu_oid_rt_pro_start_test_hdl}, /* 0x02 */
+ {1, &rtl8188eu_oid_rt_pro_stop_test_hdl}, /* 0x03 */
+ {1, &oid_null_function}, /* 0x04 OID_RT_PRO_SET_PREAMBLE */
+ {1, &oid_null_function}, /* 0x05 OID_RT_PRO_SET_SCRAMBLER */
+ {1, &oid_null_function}, /* 0x06 OID_RT_PRO_SET_FILTER_BB */
+ {1, &oid_null_function},/* 0x07 OID_RT_PRO_SET_MANUAL_DIVERSITY_BB */
+ {1, &rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl}, /* 0x08 */
+ {1, &oid_null_function},/* 0x09 OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL */
+ {1, &oid_null_function},/* 0x0A OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL */
+ {1, &rtl8188eu_oid_rt_pro_set_continuous_tx_hdl}, /* 0x0B OID_RT_PRO_SET_TX_CONTINUOUS_DIRECT_CALL */
+ {1, &rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl},/* 0x0C OID_RT_PRO_SET_SINGLE_CARRIER_TX_CONTINUOUS */
+ {1, &oid_null_function}, /* 0x0D OID_RT_PRO_SET_TX_ANTENNA_BB */
+ {1, &rtl8188eu_oid_rt_pro_set_antenna_bb_hdl}, /* 0x0E */
+ {1, &oid_null_function}, /* 0x0F OID_RT_PRO_SET_CR_SCRAMBLER */
+ {1, &oid_null_function}, /* 0x10 OID_RT_PRO_SET_CR_NEW_FILTER */
+ {1, &rtl8188eu_oid_rt_pro_set_tx_power_control_hdl},/* 0x11 OID_RT_PRO_SET_TX_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x12 OID_RT_PRO_SET_CR_TX_CONFIG */
+ {1, &oid_null_function}, /* 0x13 OID_RT_PRO_GET_TX_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x14 OID_RT_PRO_GET_CR_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x15 OID_RT_PRO_SET_CR_SETPOINT */
+ {1, &oid_null_function}, /* 0x16 OID_RT_PRO_SET_INTEGRATOR */
+ {1, &oid_null_function}, /* 0x17 OID_RT_PRO_SET_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x18 OID_RT_PRO_GET_INTEGRATOR */
+ {1, &oid_null_function}, /* 0x19 OID_RT_PRO_GET_SIGNAL_QUALITY */
+ {1, &oid_null_function}, /* 0x1A OID_RT_PRO_QUERY_EEPROM_TYPE */
+ {1, &oid_null_function}, /* 0x1B OID_RT_PRO_WRITE_MAC_ADDRESS */
+ {1, &oid_null_function}, /* 0x1C OID_RT_PRO_READ_MAC_ADDRESS */
+ {1, &oid_null_function}, /* 0x1D OID_RT_PRO_WRITE_CIS_DATA */
+ {1, &oid_null_function}, /* 0x1E OID_RT_PRO_READ_CIS_DATA */
+ {1, &oid_null_function} /* 0x1F OID_RT_PRO_WRITE_POWER_CONTROL */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_20[] = {
+ {1, &oid_null_function}, /* 0x20 OID_RT_PRO_READ_POWER_CONTROL */
+ {1, &oid_null_function}, /* 0x21 OID_RT_PRO_WRITE_EEPROM */
+ {1, &oid_null_function}, /* 0x22 OID_RT_PRO_READ_EEPROM */
+ {1, &rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl}, /* 0x23 */
+ {1, &rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl}, /* 0x24 */
+ {1, &rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl}, /* 0x25 */
+ {1, &rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl}, /* 0x26 */
+ {1, &rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl}, /* 0x27 */
+ {1, &oid_null_function}, /* 0x28 OID_RT_PRO_QUERY_CURRENT_ADDRESS */
+ {1, &oid_null_function}, /* 0x29 OID_RT_PRO_QUERY_PERMANENT_ADDRESS */
+ {1, &oid_null_function}, /* 0x2A OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS */
+ {1, &rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl},/* 0x2B OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX */
+ {1, &oid_null_function}, /* 0x2C OID_RT_PRO_RECEIVE_PACKET */
+ {1, &oid_null_function}, /* 0x2D OID_RT_PRO_WRITE_EEPROM_BYTE */
+ {1, &oid_null_function}, /* 0x2E OID_RT_PRO_READ_EEPROM_BYTE */
+ {1, &rtl8188eu_oid_rt_pro_set_modulation_hdl} /* 0x2F */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_40[] = {
+ {1, &oid_null_function}, /* 0x40 */
+ {1, &oid_null_function}, /* 0x41 */
+ {1, &oid_null_function}, /* 0x42 */
+ {1, &rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl}, /* 0x43 */
+ {1, &oid_null_function}, /* 0x44 */
+ {1, &oid_null_function} /* 0x45 */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_80_80[] = {
+ {1, &oid_null_function}, /* 0x80 OID_RT_DRIVER_OPTION */
+ {1, &oid_null_function}, /* 0x81 OID_RT_RF_OFF */
+ {1, &oid_null_function} /* 0x82 OID_RT_AUTH_STATUS */
+};
+
+static const struct oid_obj_priv rtl8188eu_oid_rtl_seg_81_85[] = {
+ {1, &rtl8188eu_oid_rt_wireless_mode_hdl} /* 0x00 OID_RT_WIRELESS_MODE */
+};
+
+#endif /* _RTL871X_MP_IOCTL_C_ */
+
+struct rwreg_param {
+ u32 offset;
+ u32 width;
+ u32 value;
+};
+
+struct bbreg_param {
+ u32 offset;
+ u32 phymask;
+ u32 value;
+};
+
+struct txpower_param {
+ u32 pwr_index;
+};
+
+struct datarate_param {
+ u32 rate_index;
+};
+
+struct rfintfs_parm {
+ u32 rfintfs;
+};
+
+struct mp_xmit_parm {
+ u8 enable;
+ u32 count;
+ u16 length;
+ u8 payload_type;
+ u8 da[ETH_ALEN];
+};
+
+struct mp_xmit_packet {
+ u32 len;
+ u32 mem[MAX_MP_XMITBUF_SZ >> 2];
+};
+
+struct psmode_param {
+ u32 ps_mode;
+ u32 smart_ps;
+};
+
+/* for OID_RT_PRO_READ16_EEPROM & OID_RT_PRO_WRITE16_EEPROM */
+struct eeprom_rw_param {
+ u32 offset;
+ u16 value;
+};
+
+struct mp_ioctl_handler {
+ u32 paramsize;
+ s32 (*handler)(struct oid_par_priv* poid_par_priv);
+ u32 oid;
+};
+
+struct mp_ioctl_param{
+ u32 subcode;
+ u32 len;
+ u8 data[0];
+};
+
+#define GEN_MP_IOCTL_SUBCODE(code) _MP_IOCTL_ ## code ## _CMD_
+
+enum RTL871X_MP_IOCTL_SUBCODE {
+ GEN_MP_IOCTL_SUBCODE(MP_START), /*0*/
+ GEN_MP_IOCTL_SUBCODE(MP_STOP),
+ GEN_MP_IOCTL_SUBCODE(READ_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_REG),
+ GEN_MP_IOCTL_SUBCODE(READ_BB_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_BB_REG), /*5*/
+ GEN_MP_IOCTL_SUBCODE(READ_RF_REG),
+ GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG),
+ GEN_MP_IOCTL_SUBCODE(SET_CHANNEL),
+ GEN_MP_IOCTL_SUBCODE(SET_TXPOWER),
+ GEN_MP_IOCTL_SUBCODE(SET_DATARATE), /*10*/
+ GEN_MP_IOCTL_SUBCODE(SET_BANDWIDTH),
+ GEN_MP_IOCTL_SUBCODE(SET_ANTENNA),
+ GEN_MP_IOCTL_SUBCODE(CNTU_TX),
+ GEN_MP_IOCTL_SUBCODE(SC_TX),
+ GEN_MP_IOCTL_SUBCODE(CS_TX), /*15*/
+ GEN_MP_IOCTL_SUBCODE(ST_TX),
+ GEN_MP_IOCTL_SUBCODE(IOCTL_XMIT_PACKET),
+ GEN_MP_IOCTL_SUBCODE(SET_RX_PKT_TYPE),
+ GEN_MP_IOCTL_SUBCODE(RESET_PHY_RX_PKT_CNT),
+ GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_RECV), /*20*/
+ GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_ERROR),
+ GEN_MP_IOCTL_SUBCODE(READ16_EEPROM),
+ GEN_MP_IOCTL_SUBCODE(WRITE16_EEPROM),
+ GEN_MP_IOCTL_SUBCODE(EFUSE),
+ GEN_MP_IOCTL_SUBCODE(EFUSE_MAP), /*25*/
+ GEN_MP_IOCTL_SUBCODE(GET_EFUSE_MAX_SIZE),
+ GEN_MP_IOCTL_SUBCODE(GET_EFUSE_CURRENT_SIZE),
+ GEN_MP_IOCTL_SUBCODE(GET_THERMAL_METER),
+ GEN_MP_IOCTL_SUBCODE(SET_PTM),
+ GEN_MP_IOCTL_SUBCODE(SET_POWER_DOWN), /*30*/
+ GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO),
+ GEN_MP_IOCTL_SUBCODE(SET_DM_BT), /*35*/
+ GEN_MP_IOCTL_SUBCODE(DEL_BA), /*36*/
+ GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS), /*37*/
+ MAX_MP_IOCTL_SUBCODE,
+};
+
+s32 rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv);
+
+#define GEN_HANDLER(sz, hdl, oid) {sz, hdl, oid},
+
+#define EXT_MP_IOCTL_HANDLER(sz, subcode, oid) \
+ {sz, rtl8188eu_mp_ioctl_##subcode##_hdl, oid},
+
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
new file mode 100644
index 00000000000..3ad22076de3
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -0,0 +1,1084 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+/*****************************************************************************
+ *
+ * Module: __RTW_MP_PHY_REGDEF_H_
+ *
+ *
+ * Note: 1. Define PMAC/BB register map
+ * 2. Define RF register map
+ * 3. PMAC/BB register bit mask.
+ * 4. RF reg bit mask.
+ * 5. Other BB/RF relative definition.
+ *
+ *
+ * Export: Constants, macro, functions(API), global variables(None).
+ *
+ * Abbrev:
+ *
+ * History:
+ * Data Who Remark
+ * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
+ * 2. Reorganize code architecture.
+ * 09/25/2008 MH 1. Add RL6052 register definition
+ *
+ *****************************************************************************/
+#ifndef __RTW_MP_PHY_REGDEF_H_
+#define __RTW_MP_PHY_REGDEF_H_
+
+
+/*--------------------------Define Parameters-------------------------------*/
+
+/* */
+/* 8192S Regsiter offset definition */
+/* */
+
+/* */
+/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
+/* 3. RF register 0x00-2E */
+/* 4. Bit Mask for BB/RF register */
+/* 5. Other defintion for BB/RF R/W */
+/* */
+
+
+/* */
+/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
+/* 1. Page1(0x100) */
+/* */
+#define rPMAC_Reset 0x100
+#define rPMAC_TxStart 0x104
+#define rPMAC_TxLegacySIG 0x108
+#define rPMAC_TxHTSIG1 0x10c
+#define rPMAC_TxHTSIG2 0x110
+#define rPMAC_PHYDebug 0x114
+#define rPMAC_TxPacketNum 0x118
+#define rPMAC_TxIdle 0x11c
+#define rPMAC_TxMACHeader0 0x120
+#define rPMAC_TxMACHeader1 0x124
+#define rPMAC_TxMACHeader2 0x128
+#define rPMAC_TxMACHeader3 0x12c
+#define rPMAC_TxMACHeader4 0x130
+#define rPMAC_TxMACHeader5 0x134
+#define rPMAC_TxDataType 0x138
+#define rPMAC_TxRandomSeed 0x13c
+#define rPMAC_CCKPLCPPreamble 0x140
+#define rPMAC_CCKPLCPHeader 0x144
+#define rPMAC_CCKCRC16 0x148
+#define rPMAC_OFDMRxCRC32OK 0x170
+#define rPMAC_OFDMRxCRC32Er 0x174
+#define rPMAC_OFDMRxParityEr 0x178
+#define rPMAC_OFDMRxCRC8Er 0x17c
+#define rPMAC_CCKCRxRC16Er 0x180
+#define rPMAC_CCKCRxRC32Er 0x184
+#define rPMAC_CCKCRxRC32OK 0x188
+#define rPMAC_TxStatus 0x18c
+
+/* */
+/* 2. Page2(0x200) */
+/* */
+/* The following two definition are only used for USB interface. */
+/* define RF_BB_CMD_ADDR 0x02c0 RF/BB read/write command address. */
+/* define RF_BB_CMD_DATA 0x02c4 RF/BB read/write command data. */
+
+/* */
+/* 3. Page8(0x800) */
+/* */
+#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
+
+#define rFPGA0_TxInfo 0x804 /* Status report?? */
+#define rFPGA0_PSDFunction 0x808
+
+#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
+
+#define rFPGA0_RFTiming1 0x810 /* Useless now */
+#define rFPGA0_RFTiming2 0x814
+/* define rFPGA0_XC_RFTiming 0x818 */
+/* define rFPGA0_XD_RFTiming 0x81c */
+
+#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
+#define rFPGA0_XA_HSSIParameter2 0x824
+#define rFPGA0_XB_HSSIParameter1 0x828
+#define rFPGA0_XB_HSSIParameter2 0x82c
+#define rFPGA0_XC_HSSIParameter1 0x830
+#define rFPGA0_XC_HSSIParameter2 0x834
+#define rFPGA0_XD_HSSIParameter1 0x838
+#define rFPGA0_XD_HSSIParameter2 0x83c
+#define rFPGA0_XA_LSSIParameter 0x840
+#define rFPGA0_XB_LSSIParameter 0x844
+#define rFPGA0_XC_LSSIParameter 0x848
+#define rFPGA0_XD_LSSIParameter 0x84c
+
+#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
+#define rFPGA0_RFSleepUpParameter 0x854
+
+#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
+#define rFPGA0_XCD_SwitchControl 0x85c
+
+#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
+#define rFPGA0_XB_RFInterfaceOE 0x864
+#define rFPGA0_XC_RFInterfaceOE 0x868
+#define rFPGA0_XD_RFInterfaceOE 0x86c
+
+#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
+#define rFPGA0_XCD_RFInterfaceSW 0x874
+
+#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
+#define rFPGA0_XCD_RFParameter 0x87c
+
+#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
+#define rFPGA0_AnalogParameter2 0x884
+#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
+#define rFPGA0_AnalogParameter4 0x88c
+
+#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
+#define rFPGA0_XB_LSSIReadBack 0x8a4
+#define rFPGA0_XC_LSSIReadBack 0x8a8
+#define rFPGA0_XD_LSSIReadBack 0x8ac
+
+#define rFPGA0_PSDReport 0x8b4 /* Useless now */
+#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
+#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
+
+/* */
+/* 4. Page9(0x900) */
+/* */
+#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
+
+#define rFPGA1_TxBlock 0x904 /* Useless now */
+#define rFPGA1_DebugSelect 0x908 /* Useless now */
+#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
+
+/* */
+/* 5. PageA(0xA00) */
+/* */
+/* Set Control channel to upper or lower. These settings are required only for 40MHz */
+#define rCCK0_System 0xa00
+
+#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
+#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
+
+#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
+#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
+
+#define rCCK0_RxHP 0xa14
+
+#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
+#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
+
+#define rCCK0_TxFilter1 0xa20
+#define rCCK0_TxFilter2 0xa24
+#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
+#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
+#define rCCK0_TRSSIReport 0xa50
+#define rCCK0_RxReport 0xa54 /* 0xa57 */
+#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
+#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
+
+/* */
+/* 6. PageC(0xC00) */
+/* */
+#define rOFDM0_LSTF 0xc00
+
+#define rOFDM0_TRxPathEnable 0xc04
+#define rOFDM0_TRMuxPar 0xc08
+#define rOFDM0_TRSWIsolation 0xc0c
+
+#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
+#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
+#define rOFDM0_XBRxAFE 0xc18
+#define rOFDM0_XBRxIQImbalance 0xc1c
+#define rOFDM0_XCRxAFE 0xc20
+#define rOFDM0_XCRxIQImbalance 0xc24
+#define rOFDM0_XDRxAFE 0xc28
+#define rOFDM0_XDRxIQImbalance 0xc2c
+
+#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
+#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
+#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
+#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
+
+#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
+#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
+#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
+#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
+
+#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
+#define rOFDM0_XAAGCCore2 0xc54
+#define rOFDM0_XBAGCCore1 0xc58
+#define rOFDM0_XBAGCCore2 0xc5c
+#define rOFDM0_XCAGCCore1 0xc60
+#define rOFDM0_XCAGCCore2 0xc64
+#define rOFDM0_XDAGCCore1 0xc68
+#define rOFDM0_XDAGCCore2 0xc6c
+
+#define rOFDM0_AGCParameter1 0xc70
+#define rOFDM0_AGCParameter2 0xc74
+#define rOFDM0_AGCRSSITable 0xc78
+#define rOFDM0_HTSTFAGC 0xc7c
+
+#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
+#define rOFDM0_XATxAFE 0xc84
+#define rOFDM0_XBTxIQImbalance 0xc88
+#define rOFDM0_XBTxAFE 0xc8c
+#define rOFDM0_XCTxIQImbalance 0xc90
+#define rOFDM0_XCTxAFE 0xc94
+#define rOFDM0_XDTxIQImbalance 0xc98
+#define rOFDM0_XDTxAFE 0xc9c
+#define rOFDM0_RxIQExtAnta 0xca0
+
+#define rOFDM0_RxHPParameter 0xce0
+#define rOFDM0_TxPseudoNoiseWgt 0xce4
+#define rOFDM0_FrameSync 0xcf0
+#define rOFDM0_DFSReport 0xcf4
+#define rOFDM0_TxCoeff1 0xca4
+#define rOFDM0_TxCoeff2 0xca8
+#define rOFDM0_TxCoeff3 0xcac
+#define rOFDM0_TxCoeff4 0xcb0
+#define rOFDM0_TxCoeff5 0xcb4
+#define rOFDM0_TxCoeff6 0xcb8
+
+/* 7. PageD(0xD00) */
+#define rOFDM1_LSTF 0xd00
+#define rOFDM1_TRxPathEnable 0xd04
+
+#define rOFDM1_CFO 0xd08 /* No setting now */
+#define rOFDM1_CSI1 0xd10
+#define rOFDM1_SBD 0xd14
+#define rOFDM1_CSI2 0xd18
+#define rOFDM1_CFOTracking 0xd2c
+#define rOFDM1_TRxMesaure1 0xd34
+#define rOFDM1_IntfDet 0xd3c
+#define rOFDM1_PseudoNoiseStateAB 0xd50
+#define rOFDM1_PseudoNoiseStateCD 0xd54
+#define rOFDM1_RxPseudoNoiseWgt 0xd58
+
+#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
+#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
+#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
+
+#define rOFDM_ShortCFOAB 0xdac /* No setting now */
+#define rOFDM_ShortCFOCD 0xdb0
+#define rOFDM_LongCFOAB 0xdb4
+#define rOFDM_LongCFOCD 0xdb8
+#define rOFDM_TailCFOAB 0xdbc
+#define rOFDM_TailCFOCD 0xdc0
+#define rOFDM_PWMeasure1 0xdc4
+#define rOFDM_PWMeasure2 0xdc8
+#define rOFDM_BWReport 0xdcc
+#define rOFDM_AGCReport 0xdd0
+#define rOFDM_RxSNR 0xdd4
+#define rOFDM_RxEVMCSI 0xdd8
+#define rOFDM_SIGReport 0xddc
+
+
+/* */
+/* 8. PageE(0xE00) */
+/* */
+#define rTxAGC_Rate18_06 0xe00
+#define rTxAGC_Rate54_24 0xe04
+#define rTxAGC_CCK_Mcs32 0xe08
+#define rTxAGC_Mcs03_Mcs00 0xe10
+#define rTxAGC_Mcs07_Mcs04 0xe14
+#define rTxAGC_Mcs11_Mcs08 0xe18
+#define rTxAGC_Mcs15_Mcs12 0xe1c
+
+/* Analog- control in RX_WAIT_CCA : REG: EE0 [Analog- Power & Control Register] */
+#define rRx_Wait_CCCA 0xe70
+#define rAnapar_Ctrl_BB 0xee0
+
+/* */
+/* 7. RF Register 0x00-0x2E (RF 8256) */
+/* RF-0222D 0x00-3F */
+/* */
+/* Zebra1 */
+#define RTL92SE_FPGA_VERIFY 0
+#define rZebra1_HSSIEnable 0x0 /* Useless now */
+#define rZebra1_TRxEnable1 0x1
+#define rZebra1_TRxEnable2 0x2
+#define rZebra1_AGC 0x4
+#define rZebra1_ChargePump 0x5
+/* if (RTL92SE_FPGA_VERIFY == 1) */
+#define rZebra1_Channel 0x7 /* RF channel switch */
+/* else */
+
+/* endif */
+#define rZebra1_TxGain 0x8 /* Useless now */
+#define rZebra1_TxLPF 0x9
+#define rZebra1_RxLPF 0xb
+#define rZebra1_RxHPFCorner 0xc
+
+/* Zebra4 */
+#define rGlobalCtrl 0 /* Useless now */
+#define rRTL8256_TxLPF 19
+#define rRTL8256_RxLPF 11
+
+/* RTL8258 */
+#define rRTL8258_TxLPF 0x11 /* Useless now */
+#define rRTL8258_RxLPF 0x13
+#define rRTL8258_RSSILPF 0xa
+
+/* */
+/* RL6052 Register definition */
+#define RF_AC 0x00 /* */
+
+#define RF_IQADJ_G1 0x01 /* */
+#define RF_IQADJ_G2 0x02 /* */
+#define RF_POW_TRSW 0x05 /* */
+
+#define RF_GAIN_RX 0x06 /* */
+#define RF_GAIN_TX 0x07 /* */
+
+#define RF_TXM_IDAC 0x08 /* */
+#define RF_BS_IQGEN 0x0F /* */
+
+#define RF_MODE1 0x10 /* */
+#define RF_MODE2 0x11 /* */
+
+#define RF_RX_AGC_HP 0x12 /* */
+#define RF_TX_AGC 0x13 /* */
+#define RF_BIAS 0x14 /* */
+#define RF_IPA 0x15 /* */
+#define RF_TXBIAS 0x16 /* */
+#define RF_POW_ABILITY 0x17 /* */
+#define RF_MODE_AG 0x18 /* */
+#define rRfChannel 0x18 /* RF channel and BW switch */
+#define RF_CHNLBW 0x18 /* RF channel and BW switch */
+#define RF_TOP 0x19 /* */
+
+#define RF_RX_G1 0x1A /* */
+#define RF_RX_G2 0x1B /* */
+
+#define RF_RX_BB2 0x1C /* */
+#define RF_RX_BB1 0x1D /* */
+
+#define RF_RCK1 0x1E /* */
+#define RF_RCK2 0x1F /* */
+
+#define RF_TX_G1 0x20 /* */
+#define RF_TX_G2 0x21 /* */
+#define RF_TX_G3 0x22 /* */
+
+#define RF_TX_BB1 0x23 /* */
+
+#define RF_T_METER 0x24 /* */
+
+#define RF_SYN_G1 0x25 /* RF TX Power control */
+#define RF_SYN_G2 0x26 /* RF TX Power control */
+#define RF_SYN_G3 0x27 /* RF TX Power control */
+#define RF_SYN_G4 0x28 /* RF TX Power control */
+#define RF_SYN_G5 0x29 /* RF TX Power control */
+#define RF_SYN_G6 0x2A /* RF TX Power control */
+#define RF_SYN_G7 0x2B /* RF TX Power control */
+#define RF_SYN_G8 0x2C /* RF TX Power control */
+
+#define RF_RCK_OS 0x30 /* RF TX PA control */
+#define RF_TXPA_G1 0x31 /* RF TX PA control */
+#define RF_TXPA_G2 0x32 /* RF TX PA control */
+#define RF_TXPA_G3 0x33 /* RF TX PA control */
+
+/* */
+/* Bit Mask */
+/* */
+/* 1. Page1(0x100) */
+#define bBBResetB 0x100 /* Useless now? */
+#define bGlobalResetB 0x200
+#define bOFDMTxStart 0x4
+#define bCCKTxStart 0x8
+#define bCRC32Debug 0x100
+#define bPMACLoopback 0x10
+#define bTxLSIG 0xffffff
+#define bOFDMTxRate 0xf
+#define bOFDMTxReserved 0x10
+#define bOFDMTxLength 0x1ffe0
+#define bOFDMTxParity 0x20000
+#define bTxHTSIG1 0xffffff
+#define bTxHTMCSRate 0x7f
+#define bTxHTBW 0x80
+#define bTxHTLength 0xffff00
+#define bTxHTSIG2 0xffffff
+#define bTxHTSmoothing 0x1
+#define bTxHTSounding 0x2
+#define bTxHTReserved 0x4
+#define bTxHTAggreation 0x8
+#define bTxHTSTBC 0x30
+#define bTxHTAdvanceCoding 0x40
+#define bTxHTShortGI 0x80
+#define bTxHTNumberHT_LTF 0x300
+#define bTxHTCRC8 0x3fc00
+#define bCounterReset 0x10000
+#define bNumOfOFDMTx 0xffff
+#define bNumOfCCKTx 0xffff0000
+#define bTxIdleInterval 0xffff
+#define bOFDMService 0xffff0000
+#define bTxMACHeader 0xffffffff
+#define bTxDataInit 0xff
+#define bTxHTMode 0x100
+#define bTxDataType 0x30000
+#define bTxRandomSeed 0xffffffff
+#define bCCKTxPreamble 0x1
+#define bCCKTxSFD 0xffff0000
+#define bCCKTxSIG 0xff
+#define bCCKTxService 0xff00
+#define bCCKLengthExt 0x8000
+#define bCCKTxLength 0xffff0000
+#define bCCKTxCRC16 0xffff
+#define bCCKTxStatus 0x1
+#define bOFDMTxStatus 0x2
+
+#define IS_BB_REG_OFFSET_92S(_Offset) ((_Offset >= 0x800) && (_Offset <= 0xfff))
+
+/* 2. Page8(0x800) */
+#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
+#define bJapanMode 0x2
+#define bCCKTxSC 0x30
+#define bCCKEn 0x1000000
+#define bOFDMEn 0x2000000
+
+#define bOFDMRxADCPhase 0x10000 /* Useless now */
+#define bOFDMTxDACPhase 0x40000
+#define bXATxAGC 0x3f
+
+#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
+#define bXCTxAGC 0xf000
+#define bXDTxAGC 0xf0000
+
+#define bPAStart 0xf0000000 /* Useless now */
+#define bTRStart 0x00f00000
+#define bRFStart 0x0000f000
+#define bBBStart 0x000000f0
+#define bBBCCKStart 0x0000000f
+#define bPAEnd 0xf /* Reg0x814 */
+#define bTREnd 0x0f000000
+#define bRFEnd 0x000f0000
+#define bCCAMask 0x000000f0 /* T2R */
+#define bR2RCCAMask 0x00000f00
+#define bHSSI_R2TDelay 0xf8000000
+#define bHSSI_T2RDelay 0xf80000
+#define bContTxHSSI 0x400 /* chane gain at continue Tx */
+#define bIGFromCCK 0x200
+#define bAGCAddress 0x3f
+#define bRxHPTx 0x7000
+#define bRxHPT2R 0x38000
+#define bRxHPCCKIni 0xc0000
+#define bAGCTxCode 0xc00000
+#define bAGCRxCode 0x300000
+
+#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
+#define b3WireAddressLength 0x400
+
+#define b3WireRFPowerDown 0x1 /* Useless now */
+/* define bHWSISelect 0x8 */
+#define b5GPAPEPolarity 0x40000000
+#define b2GPAPEPolarity 0x80000000
+#define bRFSW_TxDefaultAnt 0x3
+#define bRFSW_TxOptionAnt 0x30
+#define bRFSW_RxDefaultAnt 0x300
+#define bRFSW_RxOptionAnt 0x3000
+#define bRFSI_3WireData 0x1
+#define bRFSI_3WireClock 0x2
+#define bRFSI_3WireLoad 0x4
+#define bRFSI_3WireRW 0x8
+#define bRFSI_3Wire 0xf
+
+#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
+
+#define bRFSI_TRSW 0x20 /* Useless now */
+#define bRFSI_TRSWB 0x40
+#define bRFSI_ANTSW 0x100
+#define bRFSI_ANTSWB 0x200
+#define bRFSI_PAPE 0x400
+#define bRFSI_PAPE5G 0x800
+#define bBandSelect 0x1
+#define bHTSIG2_GI 0x80
+#define bHTSIG2_Smoothing 0x01
+#define bHTSIG2_Sounding 0x02
+#define bHTSIG2_Aggreaton 0x08
+#define bHTSIG2_STBC 0x30
+#define bHTSIG2_AdvCoding 0x40
+#define bHTSIG2_NumOfHTLTF 0x300
+#define bHTSIG2_CRC8 0x3fc
+#define bHTSIG1_MCS 0x7f
+#define bHTSIG1_BandWidth 0x80
+#define bHTSIG1_HTLength 0xffff
+#define bLSIG_Rate 0xf
+#define bLSIG_Reserved 0x10
+#define bLSIG_Length 0x1fffe
+#define bLSIG_Parity 0x20
+#define bCCKRxPhase 0x4
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
+ Reg 0x824 rFPGA0_XA_HSSIParameter2 */
+#else
+#define bLSSIReadAddress 0x7f800000 /* T65 RF */
+#endif
+#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bLSSIReadBackData 0xfff /* Reg 0x8a0
+ rFPGA0_XA_LSSIReadBack */
+#else
+#define bLSSIReadBackData 0xfffff /* T65 RF */
+#endif
+#define bLSSIReadOKFlag 0x1000 /* Useless now */
+#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
+#define bRegulator0Standby 0x1
+#define bRegulatorPLLStandby 0x2
+#define bRegulator1Standby 0x4
+#define bPLLPowerUp 0x8
+#define bDPLLPowerUp 0x10
+#define bDA10PowerUp 0x20
+#define bAD7PowerUp 0x200
+#define bDA6PowerUp 0x2000
+#define bXtalPowerUp 0x4000
+#define b40MDClkPowerUP 0x8000
+#define bDA6DebugMode 0x20000
+#define bDA6Swing 0x380000
+
+#define bADClkPhase 0x4000000 /* Reg 0x880
+ rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
+
+#define b80MClkDelay 0x18000000 /* Useless */
+#define bAFEWatchDogEnable 0x20000000
+
+#define bXtalCap01 0xc0000000 /* Reg 0x884
+ rFPGA0_AnalogParameter2 Crystal cap */
+#define bXtalCap23 0x3
+#define bXtalCap92x 0x0f000000
+#define bXtalCap 0x0f000000
+
+#define bIntDifClkEnable 0x400 /* Useless */
+#define bExtSigClkEnable 0x800
+#define bBandgapMbiasPowerUp 0x10000
+#define bAD11SHGain 0xc0000
+#define bAD11InputRange 0x700000
+#define bAD11OPCurrent 0x3800000
+#define bIPathLoopback 0x4000000
+#define bQPathLoopback 0x8000000
+#define bAFELoopback 0x10000000
+#define bDA10Swing 0x7e0
+#define bDA10Reverse 0x800
+#define bDAClkSource 0x1000
+#define bAD7InputRange 0x6000
+#define bAD7Gain 0x38000
+#define bAD7OutputCMMode 0x40000
+#define bAD7InputCMMode 0x380000
+#define bAD7Current 0xc00000
+#define bRegulatorAdjust 0x7000000
+#define bAD11PowerUpAtTx 0x1
+#define bDA10PSAtTx 0x10
+#define bAD11PowerUpAtRx 0x100
+#define bDA10PSAtRx 0x1000
+#define bCCKRxAGCFormat 0x200
+#define bPSDFFTSamplepPoint 0xc000
+#define bPSDAverageNum 0x3000
+#define bIQPathControl 0xc00
+#define bPSDFreq 0x3ff
+#define bPSDAntennaPath 0x30
+#define bPSDIQSwitch 0x40
+#define bPSDRxTrigger 0x400000
+#define bPSDTxTrigger 0x80000000
+#define bPSDSineToneScale 0x7f000000
+#define bPSDReport 0xffff
+
+/* 3. Page9(0x900) */
+#define bOFDMTxSC 0x30000000 /* Useless */
+#define bCCKTxOn 0x1
+#define bOFDMTxOn 0x2
+#define bDebugPage 0xfff /* reset debug page and HWord,
+ * LWord */
+#define bDebugItem 0xff /* reset debug page and LWord */
+#define bAntL 0x10
+#define bAntNonHT 0x100
+#define bAntHT1 0x1000
+#define bAntHT2 0x10000
+#define bAntHT1S1 0x100000
+#define bAntNonHTS1 0x1000000
+
+/* 4. PageA(0xA00) */
+#define bCCKBBMode 0x3 /* Useless */
+#define bCCKTxPowerSaving 0x80
+#define bCCKRxPowerSaving 0x40
+
+#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0 20/40 sw */
+
+#define bCCKScramble 0x8 /* Useless */
+#define bCCKAntDiversity 0x8000
+#define bCCKCarrierRecovery 0x4000
+#define bCCKTxRate 0x3000
+#define bCCKDCCancel 0x0800
+#define bCCKISICancel 0x0400
+#define bCCKMatchFilter 0x0200
+#define bCCKEqualizer 0x0100
+#define bCCKPreambleDetect 0x800000
+#define bCCKFastFalseCCA 0x400000
+#define bCCKChEstStart 0x300000
+#define bCCKCCACount 0x080000
+#define bCCKcs_lim 0x070000
+#define bCCKBistMode 0x80000000
+#define bCCKCCAMask 0x40000000
+#define bCCKTxDACPhase 0x4
+#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
+#define bCCKr_cp_mode0 0x0100
+#define bCCKTxDCOffset 0xf0
+#define bCCKRxDCOffset 0xf
+#define bCCKCCAMode 0xc000
+#define bCCKFalseCS_lim 0x3f00
+#define bCCKCS_ratio 0xc00000
+#define bCCKCorgBit_sel 0x300000
+#define bCCKPD_lim 0x0f0000
+#define bCCKNewCCA 0x80000000
+#define bCCKRxHPofIG 0x8000
+#define bCCKRxIG 0x7f00
+#define bCCKLNAPolarity 0x800000
+#define bCCKRx1stGain 0x7f0000
+#define bCCKRFExtend 0x20000000 /* CCK Rx init gain polar */
+#define bCCKRxAGCSatLevel 0x1f000000
+#define bCCKRxAGCSatCount 0xe0
+#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
+#define bCCKFixedRxAGC 0x8000
+#define bCCKAntennaPolarity 0x2000
+#define bCCKTxFilterType 0x0c00
+#define bCCKRxAGCReportType 0x0300
+#define bCCKRxDAGCEn 0x80000000
+#define bCCKRxDAGCPeriod 0x20000000
+#define bCCKRxDAGCSatLevel 0x1f000000
+#define bCCKTimingRecovery 0x800000
+#define bCCKTxC0 0x3f0000
+#define bCCKTxC1 0x3f000000
+#define bCCKTxC2 0x3f
+#define bCCKTxC3 0x3f00
+#define bCCKTxC4 0x3f0000
+#define bCCKTxC5 0x3f000000
+#define bCCKTxC6 0x3f
+#define bCCKTxC7 0x3f00
+#define bCCKDebugPort 0xff0000
+#define bCCKDACDebug 0x0f000000
+#define bCCKFalseAlarmEnable 0x8000
+#define bCCKFalseAlarmRead 0x4000
+#define bCCKTRSSI 0x7f
+#define bCCKRxAGCReport 0xfe
+#define bCCKRxReport_AntSel 0x80000000
+#define bCCKRxReport_MFOff 0x40000000
+#define bCCKRxRxReport_SQLoss 0x20000000
+#define bCCKRxReport_Pktloss 0x10000000
+#define bCCKRxReport_Lockedbit 0x08000000
+#define bCCKRxReport_RateError 0x04000000
+#define bCCKRxReport_RxRate 0x03000000
+#define bCCKRxFACounterLower 0xff
+#define bCCKRxFACounterUpper 0xff000000
+#define bCCKRxHPAGCStart 0xe000
+#define bCCKRxHPAGCFinal 0x1c00
+#define bCCKRxFalseAlarmEnable 0x8000
+#define bCCKFACounterFreeze 0x4000
+#define bCCKTxPathSel 0x10000000
+#define bCCKDefaultRxPath 0xc000000
+#define bCCKOptionRxPath 0x3000000
+
+/* 5. PageC(0xC00) */
+#define bNumOfSTF 0x3 /* Useless */
+#define bShift_L 0xc0
+#define bGI_TH 0xc
+#define bRxPathA 0x1
+#define bRxPathB 0x2
+#define bRxPathC 0x4
+#define bRxPathD 0x8
+#define bTxPathA 0x1
+#define bTxPathB 0x2
+#define bTxPathC 0x4
+#define bTxPathD 0x8
+#define bTRSSIFreq 0x200
+#define bADCBackoff 0x3000
+#define bDFIRBackoff 0xc000
+#define bTRSSILatchPhase 0x10000
+#define bRxIDCOffset 0xff
+#define bRxQDCOffset 0xff00
+#define bRxDFIRMode 0x1800000
+#define bRxDCNFType 0xe000000
+#define bRXIQImb_A 0x3ff
+#define bRXIQImb_B 0xfc00
+#define bRXIQImb_C 0x3f0000
+#define bRXIQImb_D 0xffc00000
+#define bDC_dc_Notch 0x60000
+#define bRxNBINotch 0x1f000000
+#define bPD_TH 0xf
+#define bPD_TH_Opt2 0xc000
+#define bPWED_TH 0x700
+#define bIfMF_Win_L 0x800
+#define bPD_Option 0x1000
+#define bMF_Win_L 0xe000
+#define bBW_Search_L 0x30000
+#define bwin_enh_L 0xc0000
+#define bBW_TH 0x700000
+#define bED_TH2 0x3800000
+#define bBW_option 0x4000000
+#define bRatio_TH 0x18000000
+#define bWindow_L 0xe0000000
+#define bSBD_Option 0x1
+#define bFrame_TH 0x1c
+#define bFS_Option 0x60
+#define bDC_Slope_check 0x80
+#define bFGuard_Counter_DC_L 0xe00
+#define bFrame_Weight_Short 0x7000
+#define bSub_Tune 0xe00000
+#define bFrame_DC_Length 0xe000000
+#define bSBD_start_offset 0x30000000
+#define bFrame_TH_2 0x7
+#define bFrame_GI2_TH 0x38
+#define bGI2_Sync_en 0x40
+#define bSarch_Short_Early 0x300
+#define bSarch_Short_Late 0xc00
+#define bSarch_GI2_Late 0x70000
+#define bCFOAntSum 0x1
+#define bCFOAcc 0x2
+#define bCFOStartOffset 0xc
+#define bCFOLookBack 0x70
+#define bCFOSumWeight 0x80
+#define bDAGCEnable 0x10000
+#define bTXIQImb_A 0x3ff
+#define bTXIQImb_B 0xfc00
+#define bTXIQImb_C 0x3f0000
+#define bTXIQImb_D 0xffc00000
+#define bTxIDCOffset 0xff
+#define bTxQDCOffset 0xff00
+#define bTxDFIRMode 0x10000
+#define bTxPesudoNoiseOn 0x4000000
+#define bTxPesudoNoise_A 0xff
+#define bTxPesudoNoise_B 0xff00
+#define bTxPesudoNoise_C 0xff0000
+#define bTxPesudoNoise_D 0xff000000
+#define bCCADropOption 0x20000
+#define bCCADropThres 0xfff00000
+#define bEDCCA_H 0xf
+#define bEDCCA_L 0xf0
+#define bLambda_ED 0x300
+#define bRxInitialGain 0x7f
+#define bRxAntDivEn 0x80
+#define bRxAGCAddressForLNA 0x7f00
+#define bRxHighPowerFlow 0x8000
+#define bRxAGCFreezeThres 0xc0000
+#define bRxFreezeStep_AGC1 0x300000
+#define bRxFreezeStep_AGC2 0xc00000
+#define bRxFreezeStep_AGC3 0x3000000
+#define bRxFreezeStep_AGC0 0xc000000
+#define bRxRssi_Cmp_En 0x10000000
+#define bRxQuickAGCEn 0x20000000
+#define bRxAGCFreezeThresMode 0x40000000
+#define bRxOverFlowCheckType 0x80000000
+#define bRxAGCShift 0x7f
+#define bTRSW_Tri_Only 0x80
+#define bPowerThres 0x300
+#define bRxAGCEn 0x1
+#define bRxAGCTogetherEn 0x2
+#define bRxAGCMin 0x4
+#define bRxHP_Ini 0x7
+#define bRxHP_TRLNA 0x70
+#define bRxHP_RSSI 0x700
+#define bRxHP_BBP1 0x7000
+#define bRxHP_BBP2 0x70000
+#define bRxHP_BBP3 0x700000
+#define bRSSI_H 0x7f0000 /* thresh for hi power */
+#define bRSSI_Gen 0x7f000000 /* thresh for ant div */
+#define bRxSettle_TRSW 0x7
+#define bRxSettle_LNA 0x38
+#define bRxSettle_RSSI 0x1c0
+#define bRxSettle_BBP 0xe00
+#define bRxSettle_RxHP 0x7000
+#define bRxSettle_AntSW_RSSI 0x38000
+#define bRxSettle_AntSW 0xc0000
+#define bRxProcessTime_DAGC 0x300000
+#define bRxSettle_HSSI 0x400000
+#define bRxProcessTime_BBPPW 0x800000
+#define bRxAntennaPowerShift 0x3000000
+#define bRSSITableSelect 0xc000000
+#define bRxHP_Final 0x7000000
+#define bRxHTSettle_BBP 0x7
+#define bRxHTSettle_HSSI 0x8
+#define bRxHTSettle_RxHP 0x70
+#define bRxHTSettle_BBPPW 0x80
+#define bRxHTSettle_Idle 0x300
+#define bRxHTSettle_Reserved 0x1c00
+#define bRxHTRxHPEn 0x8000
+#define bRxHTAGCFreezeThres 0x30000
+#define bRxHTAGCTogetherEn 0x40000
+#define bRxHTAGCMin 0x80000
+#define bRxHTAGCEn 0x100000
+#define bRxHTDAGCEn 0x200000
+#define bRxHTRxHP_BBP 0x1c00000
+#define bRxHTRxHP_Final 0xe0000000
+#define bRxPWRatioTH 0x3
+#define bRxPWRatioEn 0x4
+#define bRxMFHold 0x3800
+#define bRxPD_Delay_TH1 0x38
+#define bRxPD_Delay_TH2 0x1c0
+#define bRxPD_DC_COUNT_MAX 0x600
+/* define bRxMF_Hold 0x3800 */
+#define bRxPD_Delay_TH 0x8000
+#define bRxProcess_Delay 0xf0000
+#define bRxSearchrange_GI2_Early 0x700000
+#define bRxFrame_Guard_Counter_L 0x3800000
+#define bRxSGI_Guard_L 0xc000000
+#define bRxSGI_Search_L 0x30000000
+#define bRxSGI_TH 0xc0000000
+#define bDFSCnt0 0xff
+#define bDFSCnt1 0xff00
+#define bDFSFlag 0xf0000
+#define bMFWeightSum 0x300000
+#define bMinIdxTH 0x7f000000
+#define bDAFormat 0x40000
+#define bTxChEmuEnable 0x01000000
+#define bTRSWIsolation_A 0x7f
+#define bTRSWIsolation_B 0x7f00
+#define bTRSWIsolation_C 0x7f0000
+#define bTRSWIsolation_D 0x7f000000
+#define bExtLNAGain 0x7c00
+
+/* 6. PageE(0xE00) */
+#define bSTBCEn 0x4 /* Useless */
+#define bAntennaMapping 0x10
+#define bNss 0x20
+#define bCFOAntSumD 0x200
+#define bPHYCounterReset 0x8000000
+#define bCFOReportGet 0x4000000
+#define bOFDMContinueTx 0x10000000
+#define bOFDMSingleCarrier 0x20000000
+#define bOFDMSingleTone 0x40000000
+/* define bRxPath1 0x01 */
+/* define bRxPath2 0x02 */
+/* define bRxPath3 0x04 */
+/* define bRxPath4 0x08 */
+/* define bTxPath1 0x10 */
+/* define bTxPath2 0x20 */
+#define bHTDetect 0x100
+#define bCFOEn 0x10000
+#define bCFOValue 0xfff00000
+#define bSigTone_Re 0x3f
+#define bSigTone_Im 0x7f00
+#define bCounter_CCA 0xffff
+#define bCounter_ParityFail 0xffff0000
+#define bCounter_RateIllegal 0xffff
+#define bCounter_CRC8Fail 0xffff0000
+#define bCounter_MCSNoSupport 0xffff
+#define bCounter_FastSync 0xffff
+#define bShortCFO 0xfff
+#define bShortCFOTLength 12 /* total */
+#define bShortCFOFLength 11 /* fraction */
+#define bLongCFO 0x7ff
+#define bLongCFOTLength 11
+#define bLongCFOFLength 11
+#define bTailCFO 0x1fff
+#define bTailCFOTLength 13
+#define bTailCFOFLength 12
+#define bmax_en_pwdB 0xffff
+#define bCC_power_dB 0xffff0000
+#define bnoise_pwdB 0xffff
+#define bPowerMeasTLength 10
+#define bPowerMeasFLength 3
+#define bRx_HT_BW 0x1
+#define bRxSC 0x6
+#define bRx_HT 0x8
+#define bNB_intf_det_on 0x1
+#define bIntf_win_len_cfg 0x30
+#define bNB_Intf_TH_cfg 0x1c0
+#define bRFGain 0x3f
+#define bTableSel 0x40
+#define bTRSW 0x80
+#define bRxSNR_A 0xff
+#define bRxSNR_B 0xff00
+#define bRxSNR_C 0xff0000
+#define bRxSNR_D 0xff000000
+#define bSNREVMTLength 8
+#define bSNREVMFLength 1
+#define bCSI1st 0xff
+#define bCSI2nd 0xff00
+#define bRxEVM1st 0xff0000
+#define bRxEVM2nd 0xff000000
+#define bSIGEVM 0xff
+#define bPWDB 0xff00
+#define bSGIEN 0x10000
+
+#define bSFactorQAM1 0xf /* Useless */
+#define bSFactorQAM2 0xf0
+#define bSFactorQAM3 0xf00
+#define bSFactorQAM4 0xf000
+#define bSFactorQAM5 0xf0000
+#define bSFactorQAM6 0xf0000
+#define bSFactorQAM7 0xf00000
+#define bSFactorQAM8 0xf000000
+#define bSFactorQAM9 0xf0000000
+#define bCSIScheme 0x100000
+
+#define bNoiseLvlTopSet 0x3 /* Useless */
+#define bChSmooth 0x4
+#define bChSmoothCfg1 0x38
+#define bChSmoothCfg2 0x1c0
+#define bChSmoothCfg3 0xe00
+#define bChSmoothCfg4 0x7000
+#define bMRCMode 0x800000
+#define bTHEVMCfg 0x7000000
+
+#define bLoopFitType 0x1 /* Useless */
+#define bUpdCFO 0x40
+#define bUpdCFOOffData 0x80
+#define bAdvUpdCFO 0x100
+#define bAdvTimeCtrl 0x800
+#define bUpdClko 0x1000
+#define bFC 0x6000
+#define bTrackingMode 0x8000
+#define bPhCmpEnable 0x10000
+#define bUpdClkoLTF 0x20000
+#define bComChCFO 0x40000
+#define bCSIEstiMode 0x80000
+#define bAdvUpdEqz 0x100000
+#define bUChCfg 0x7000000
+#define bUpdEqz 0x8000000
+
+#define bTxAGCRate18_06 0x7f7f7f7f /* Useless */
+#define bTxAGCRate54_24 0x7f7f7f7f
+#define bTxAGCRateMCS32 0x7f
+#define bTxAGCRateCCK 0x7f00
+#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
+#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
+#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
+#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
+
+/* Rx Pseduo noise */
+#define bRxPesudoNoiseOn 0x20000000 /* Useless */
+#define bRxPesudoNoise_A 0xff
+#define bRxPesudoNoise_B 0xff00
+#define bRxPesudoNoise_C 0xff0000
+#define bRxPesudoNoise_D 0xff000000
+#define bPesudoNoiseState_A 0xffff
+#define bPesudoNoiseState_B 0xffff0000
+#define bPesudoNoiseState_C 0xffff
+#define bPesudoNoiseState_D 0xffff0000
+
+/* 7. RF Register */
+/* Zebra1 */
+#define bZebra1_HSSIEnable 0x8 /* Useless */
+#define bZebra1_TRxControl 0xc00
+#define bZebra1_TRxGainSetting 0x07f
+#define bZebra1_RxCorner 0xc00
+#define bZebra1_TxChargePump 0x38
+#define bZebra1_RxChargePump 0x7
+#define bZebra1_ChannelNum 0xf80
+#define bZebra1_TxLPFBW 0x400
+#define bZebra1_RxLPFBW 0x600
+
+/* Zebra4 */
+#define bRTL8256RegModeCtrl1 0x100 /* Useless */
+#define bRTL8256RegModeCtrl0 0x40
+#define bRTL8256_TxLPFBW 0x18
+#define bRTL8256_RxLPFBW 0x600
+
+/* RTL8258 */
+#define bRTL8258_TxLPFBW 0xc /* Useless */
+#define bRTL8258_RxLPFBW 0xc00
+#define bRTL8258_RSSILPFBW 0xc0
+
+
+/* */
+/* Other Definition */
+/* */
+
+/* byte endable for sb_write */
+#define bByte0 0x1 /* Useless */
+#define bByte1 0x2
+#define bByte2 0x4
+#define bByte3 0x8
+#define bWord0 0x3
+#define bWord1 0xc
+#define bDWord 0xf
+
+/* for PutRegsetting & GetRegSetting BitMask */
+#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
+#define bMaskByte1 0xff00
+#define bMaskByte2 0xff0000
+#define bMaskByte3 0xff000000
+#define bMaskHWord 0xffff0000
+#define bMaskLWord 0x0000ffff
+#define bMaskDWord 0xffffffff
+#define bMaskH4Bits 0xf0000000
+#define bMaskOFDM_D 0xffc00000
+#define bMaskCCK 0x3f3f3f3f
+#define bMask12Bits 0xfff
+
+/* for PutRFRegsetting & GetRFRegSetting BitMask */
+#if (RTL92SE_FPGA_VERIFY == 1)
+#define bRFRegOffsetMask 0xfff
+#else
+#define bRFRegOffsetMask 0xfffff
+#endif
+#define bEnable 0x1 /* Useless */
+#define bDisabl 0x0
+
+#define LeftAntenna 0x0 /* Useless */
+#define RightAntenna 0x1
+
+#define tCheckTxStatus 500 /* 500ms Useless */
+#define tUpdateRxCounter 100 /* 100ms */
+
+#define rateCCK 0 /* Useless */
+#define rateOFDM 1
+#define rateHT 2
+
+/* define Register-End */
+#define bPMAC_End 0x1ff /* Useless */
+#define bFPGAPHY0_End 0x8ff
+#define bFPGAPHY1_End 0x9ff
+#define bCCKPHY0_End 0xaff
+#define bOFDMPHY0_End 0xcff
+#define bOFDMPHY1_End 0xdff
+
+/* define max debug item in each debug page */
+/* define bMaxItem_FPGA_PHY0 0x9 */
+/* define bMaxItem_FPGA_PHY1 0x3 */
+/* define bMaxItem_PHY_11B 0x16 */
+/* define bMaxItem_OFDM_PHY0 0x29 */
+/* define bMaxItem_OFDM_PHY1 0x0 */
+
+#define bPMACControl 0x0 /* Useless */
+#define bWMACControl 0x1
+#define bWNICControl 0x2
+
+#define RCR_AAP BIT(0) /* accept all physical address */
+#define RCR_APM BIT(1) /* accept physical match */
+#define RCR_AM BIT(2) /* accept multicast */
+#define RCR_AB BIT(3) /* accept broadcast */
+#define RCR_ACRC32 BIT(5) /* accept error packet */
+#define RCR_9356SEL BIT(6)
+#define RCR_AICV BIT(12) /* Accept ICV error packet */
+#define RCR_RXFTH0 (BIT(13)|BIT(14)|BIT(15)) /* Rx FIFO threshold */
+#define RCR_ADF BIT(18) /* Accept Data(frame type) frame */
+#define RCR_ACF BIT(19) /* Accept control frame */
+#define RCR_AMF BIT(20) /* Accept management frame */
+#define RCR_ADD3 BIT(21)
+#define RCR_APWRMGT BIT(22) /* Accept power management packet */
+#define RCR_CBSSID BIT(23) /* Accept BSSID match packet */
+#define RCR_ENMARP BIT(28) /* enable mac auto reset phy */
+#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
+#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
+#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
+ * packet size greater than 1536 */
+
+/*--------------------------Define Parameters-------------------------------*/
+
+
+#endif /* __INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/rtl8188eu/include/rtw_p2p.h b/drivers/staging/rtl8188eu/include/rtw_p2p.h
new file mode 100644
index 00000000000..a3e3adc92b9
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_p2p.h
@@ -0,0 +1,135 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_P2P_H_
+#define __RTW_P2P_H_
+
+#include <drv_types.h>
+
+u32 build_beacon_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_probe_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pbuf, u8 *pssid, u8 ussidlen,
+ u8 *pdev_raddr);
+u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pbuf, u8 status_code);
+u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
+u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len, struct sta_info *psta);
+u32 process_p2p_devdisc_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u32 process_p2p_devdisc_resp(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_provdisc_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_provdisc_resp(struct wifidirect_info *pwdinfo, u8 *pframe);
+u8 process_p2p_group_negotation_req(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_resp(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_group_negotation_confirm(struct wifidirect_info *pwdinfo,
+ u8 *pframe, uint len);
+u8 process_p2p_presence_req(struct wifidirect_info *pwdinfo, u8 *pframe,
+ uint len);
+void p2p_protocol_wk_hdl(struct adapter *padapter, int intcmdtype);
+void process_p2p_ps_ie(struct adapter *padapter, u8 *ies, u32 ielength);
+void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state);
+u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue);
+void reset_global_wifidirect_info(struct adapter *padapter);
+int rtw_init_wifi_display_info(struct adapter *padapter);
+void rtw_init_wifidirect_timers(struct adapter *padapter);
+void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr,
+ u8 *iface_addr);
+void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role);
+int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role);
+
+static inline void _rtw_p2p_set_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->p2p_state != state)
+ wdinfo->p2p_state = state;
+}
+
+static inline void _rtw_p2p_set_pre_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ if (wdinfo->pre_p2p_state != state)
+ wdinfo->pre_p2p_state = state;
+}
+
+static inline void _rtw_p2p_set_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ if (wdinfo->role != role)
+ wdinfo->role = role;
+}
+
+static inline int _rtw_p2p_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->p2p_state;
+}
+
+static inline int _rtw_p2p_pre_state(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->pre_p2p_state;
+}
+
+static inline int _rtw_p2p_role(struct wifidirect_info *wdinfo)
+{
+ return wdinfo->role;
+}
+
+static inline bool _rtw_p2p_chk_state(struct wifidirect_info *wdinfo,
+ enum P2P_STATE state)
+{
+ return wdinfo->p2p_state == state;
+}
+
+static inline bool _rtw_p2p_chk_role(struct wifidirect_info *wdinfo,
+ enum P2P_ROLE role)
+{
+ return wdinfo->role == role;
+}
+
+#define rtw_p2p_set_state(wdinfo, state) _rtw_p2p_set_state(wdinfo, state)
+#define rtw_p2p_set_pre_state(wdinfo, state) \
+ _rtw_p2p_set_pre_state(wdinfo, state)
+#define rtw_p2p_set_role(wdinfo, role) _rtw_p2p_set_role(wdinfo, role)
+
+#define rtw_p2p_state(wdinfo) _rtw_p2p_state(wdinfo)
+#define rtw_p2p_pre_state(wdinfo) _rtw_p2p_pre_state(wdinfo)
+#define rtw_p2p_role(wdinfo) _rtw_p2p_role(wdinfo)
+#define rtw_p2p_chk_state(wdinfo, state) _rtw_p2p_chk_state(wdinfo, state)
+#define rtw_p2p_chk_role(wdinfo, role) _rtw_p2p_chk_role(wdinfo, role)
+
+#define rtw_p2p_findphase_ex_set(wdinfo, value) \
+ ((wdinfo)->find_phase_state_exchange_cnt = (value))
+
+/* is this find phase exchange for social channel scan? */
+#define rtw_p2p_findphase_ex_is_social(wdinfo) \
+((wdinfo)->find_phase_state_exchange_cnt >= P2P_FINDPHASE_EX_SOCIAL_FIRST)
+
+/* should we need find phase exchange anymore? */
+#define rtw_p2p_findphase_ex_is_needed(wdinfo) \
+ ((wdinfo)->find_phase_state_exchange_cnt < P2P_FINDPHASE_EX_MAX && \
+ (wdinfo)->find_phase_state_exchange_cnt != P2P_FINDPHASE_EX_NONE)
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
new file mode 100644
index 00000000000..d4b8acb8025
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -0,0 +1,283 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_PWRCTRL_H_
+#define __RTW_PWRCTRL_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define FW_PWR0 0
+#define FW_PWR1 1
+#define FW_PWR2 2
+#define FW_PWR3 3
+#define HW_PWR0 7
+#define HW_PWR1 6
+#define HW_PWR2 2
+#define HW_PWR3 0
+#define HW_PWR4 8
+
+#define FW_PWRMSK 0x7
+
+#define XMIT_ALIVE BIT(0)
+#define RECV_ALIVE BIT(1)
+#define CMD_ALIVE BIT(2)
+#define EVT_ALIVE BIT(3)
+
+enum power_mgnt {
+ PS_MODE_ACTIVE = 0,
+ PS_MODE_MIN,
+ PS_MODE_MAX,
+ PS_MODE_DTIM,
+ PS_MODE_VOIP,
+ PS_MODE_UAPSD_WMM,
+ PS_MODE_UAPSD,
+ PS_MODE_IBSS,
+ PS_MODE_WWLAN,
+ PM_Radio_Off,
+ PM_Card_Disable,
+ PS_MODE_NUM
+};
+
+/*
+ BIT[2:0] = HW state
+ BIT[3] = Protocol PS state, 0: register active state,
+ 1: register sleep state
+ BIT[4] = sub-state
+*/
+
+#define PS_DPS BIT(0)
+#define PS_LCLK (PS_DPS)
+#define PS_RF_OFF BIT(1)
+#define PS_ALL_ON BIT(2)
+#define PS_ST_ACTIVE BIT(3)
+
+#define PS_ISR_ENABLE BIT(4)
+#define PS_IMR_ENABLE BIT(5)
+#define PS_ACK BIT(6)
+#define PS_TOGGLE BIT(7)
+
+#define PS_STATE_MASK (0x0F)
+#define PS_STATE_HW_MASK (0x07)
+#define PS_SEQ_MASK (0xc0)
+
+#define PS_STATE(x) (PS_STATE_MASK & (x))
+#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
+#define PS_SEQ(x) (PS_SEQ_MASK & (x))
+
+#define PS_STATE_S0 (PS_DPS)
+#define PS_STATE_S1 (PS_LCLK)
+#define PS_STATE_S2 (PS_RF_OFF)
+#define PS_STATE_S3 (PS_ALL_ON)
+#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
+
+#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
+#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
+#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
+
+struct reportpwrstate_parm {
+ unsigned char mode;
+ unsigned char state; /* the CPWM value */
+ unsigned short rsvd;
+};
+
+static inline void _init_pwrlock(struct semaphore *plock)
+{
+ _rtw_init_sema(plock, 1);
+}
+
+static inline void _free_pwrlock(struct semaphore *plock)
+{
+ _rtw_free_sema(plock);
+}
+
+static inline void _enter_pwrlock(struct semaphore *plock)
+{
+ _rtw_down_sema(plock);
+}
+
+static inline void _exit_pwrlock(struct semaphore *plock)
+{
+ _rtw_up_sema(plock);
+}
+
+#define LPS_DELAY_TIME 1*HZ /* 1 sec */
+
+#define EXE_PWR_NONE 0x01
+#define EXE_PWR_IPS 0x02
+#define EXE_PWR_LPS 0x04
+
+/* RF state. */
+enum rt_rf_power_state {
+ rf_on, /* RF is on after RFSleep or RFOff */
+ rf_sleep, /* 802.11 Power Save mode */
+ rf_off, /* HW/SW Radio OFF or Inactive Power Save */
+ /* Add the new RF state above this line===== */
+ rf_max
+};
+
+/* RF Off Level for IPS or HW/SW radio off */
+#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
+#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
+#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
+#define RT_RF_OFF_LEVL_HALT_NIC BIT(3) /* NIC halt, re-init hw param*/
+#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW*/
+#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
+#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock
+ * Req in initialization. */
+#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R
+ * if no packet is RX or TX. */
+#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
+
+#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
+ ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
+#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level &= (~(_PS_FLAG)))
+#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) \
+ (ppsc->cur_ps_level |= _PS_FLAG)
+
+enum _PS_BBRegBackup_ {
+ PSBBREG_RF0 = 0,
+ PSBBREG_RF1,
+ PSBBREG_RF2,
+ PSBBREG_AFE0,
+ PSBBREG_TOTALCNT
+};
+
+enum { /* for ips_mode */
+ IPS_NONE = 0,
+ IPS_NORMAL,
+ IPS_LEVEL_2,
+};
+
+struct pwrctrl_priv {
+ struct semaphore lock;
+ volatile u8 rpwm; /* requested power state for fw */
+ volatile u8 cpwm; /* fw current power state. updated when
+ * 1. read from HCPWM 2. driver lowers power level */
+ volatile u8 tog; /* toggling */
+ volatile u8 cpwm_tog; /* toggling */
+
+ u8 pwr_mode;
+ u8 smart_ps;
+ u8 bcn_ant_mode;
+
+ u32 alives;
+ struct work_struct cpwm_event;
+ u8 bpower_saving;
+
+ u8 b_hw_radio_off;
+ u8 reg_rfoff;
+ u8 reg_pdnmode; /* powerdown mode */
+ u32 rfoff_reason;
+
+ /* RF OFF Level */
+ u32 cur_ps_level;
+ u32 reg_rfps_level;
+ uint ips_enter_cnts;
+ uint ips_leave_cnts;
+
+ u8 ips_mode;
+ u8 ips_mode_req; /* used to accept the mode setting request,
+ * will update to ipsmode later */
+ uint bips_processing;
+ u32 ips_deny_time; /* will deny IPS when system time less than this */
+ u8 ps_processing; /* temp used to mark whether in rtw_ps_processor */
+
+ u8 bLeisurePs;
+ u8 LpsIdleCount;
+ u8 power_mgnt;
+ u8 bFwCurrentInPSMode;
+ u32 DelayLPSLastTimeStamp;
+ u8 btcoex_rfon;
+ s32 pnp_current_pwr_state;
+ u8 pnp_bstop_trx;
+
+ u8 bInternalAutoSuspend;
+ u8 bInSuspend;
+#ifdef CONFIG_BT_COEXIST
+ u8 bAutoResume;
+ u8 autopm_cnt;
+#endif
+ u8 bSupportRemoteWakeup;
+ struct timer_list pwr_state_check_timer;
+ int pwr_state_check_interval;
+ u8 pwr_state_check_cnts;
+
+ int ps_flag;
+
+ enum rt_rf_power_state rf_pwrstate;/* cur power state */
+ enum rt_rf_power_state change_rfpwrstate;
+
+ u8 wepkeymask;
+ u8 bHWPowerdown;/* if support hw power down */
+ u8 bHWPwrPindetect;
+ u8 bkeepfwalive;
+ u8 brfoffbyhw;
+ unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
+};
+
+#define rtw_get_ips_mode_req(pwrctrlpriv) \
+ (pwrctrlpriv)->ips_mode_req
+
+#define rtw_ips_mode_req(pwrctrlpriv, ips_mode) \
+ ((pwrctrlpriv)->ips_mode_req = (ips_mode))
+
+#define RTW_PWR_STATE_CHK_INTERVAL 2000
+
+#define _rtw_set_pwr_state_check_timer(pwrctrlpriv, ms) \
+ do { \
+ _set_timer(&(pwrctrlpriv)->pwr_state_check_timer, (ms)); \
+ } while (0)
+
+#define rtw_set_pwr_state_check_timer(pwrctrl) \
+ _rtw_set_pwr_state_check_timer((pwrctrl), \
+ (pwrctrl)->pwr_state_check_interval)
+
+void rtw_init_pwrctrl_priv(struct adapter *adapter);
+void rtw_free_pwrctrl_priv(struct adapter *adapter);
+
+void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
+ u8 bcn_ant_mode);
+void rtw_set_rpwm(struct adapter *adapter, u8 val8);
+void LeaveAllPowerSaveMode(struct adapter *adapter);
+void ips_enter(struct adapter *padapter);
+int ips_leave(struct adapter *padapter);
+
+void rtw_ps_processor(struct adapter *padapter);
+
+enum rt_rf_power_state RfOnOffDetect(struct adapter *iadapter);
+
+s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
+void LPS_Enter(struct adapter *adapter);
+void LPS_Leave(struct adapter *adapter);
+
+u8 rtw_interface_ps_func(struct adapter *adapter,
+ enum hal_intf_ps_func efunc_id, u8 *val);
+void rtw_set_ips_deny(struct adapter *adapter, u32 ms);
+int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
+ const char *caller);
+#define rtw_pwr_wakeup(adapter) \
+ _rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
+#define rtw_pwr_wakeup_ex(adapter, ips_deffer_ms) \
+ _rtw_pwr_wakeup(adapter, ips_deffer_ms, __func__)
+int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
+int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
+
+#endif /* __RTL871X_PWRCTRL_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
new file mode 100644
index 00000000000..bbee1ddc00b
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -0,0 +1,30 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_QOS_H_
+#define _RTW_QOS_H_
+
+#include <osdep_service.h>
+
+struct qos_priv {
+ unsigned int qos_option; /* bit mask option: u-apsd,
+ * s-apsd, ts, block ack... */
+};
+
+#endif /* _RTL871X_QOS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
new file mode 100644
index 00000000000..bae8885c57f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -0,0 +1,485 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_RECV_H_
+#define _RTW_RECV_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+
+#define NR_RECVFRAME 256
+
+#define RXFRAME_ALIGN 8
+#define RXFRAME_ALIGN_SZ (1<<RXFRAME_ALIGN)
+
+#define MAX_RXFRAME_CNT 512
+#define MAX_RX_NUMBLKS (32)
+#define RECVFRAME_HDR_ALIGN 128
+
+#define SNAP_SIZE sizeof(struct ieee80211_snap_hdr)
+
+#define MAX_SUBFRAME_COUNT 64
+
+/* for Rx reordering buffer control */
+struct recv_reorder_ctrl {
+ struct adapter *padapter;
+ u8 enable;
+ u16 indicate_seq;/* wstart_b, init_value=0xffff */
+ u16 wend_b;
+ u8 wsize_b;
+ struct __queue pending_recvframe_queue;
+ struct timer_list reordering_ctrl_timer;
+};
+
+struct stainfo_rxcache {
+ u16 tid_rxseq[16];
+/*
+ unsigned short tid0_rxseq;
+ unsigned short tid1_rxseq;
+ unsigned short tid2_rxseq;
+ unsigned short tid3_rxseq;
+ unsigned short tid4_rxseq;
+ unsigned short tid5_rxseq;
+ unsigned short tid6_rxseq;
+ unsigned short tid7_rxseq;
+ unsigned short tid8_rxseq;
+ unsigned short tid9_rxseq;
+ unsigned short tid10_rxseq;
+ unsigned short tid11_rxseq;
+ unsigned short tid12_rxseq;
+ unsigned short tid13_rxseq;
+ unsigned short tid14_rxseq;
+ unsigned short tid15_rxseq;
+*/
+};
+
+struct smooth_rssi_data {
+ u32 elements[100]; /* array to store values */
+ u32 index; /* index to current array to store */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+
+struct signal_stat {
+ u8 update_req; /* used to indicate */
+ u8 avg_val; /* avg of valid elements */
+ u32 total_num; /* num of valid elements */
+ u32 total_val; /* sum of valid elements */
+};
+#define MAX_PATH_NUM_92CS 2
+struct phy_info {
+ u8 RxPWDBAll;
+ u8 SignalQuality; /* in 0-100 index. */
+ u8 RxMIMOSignalQuality[MAX_PATH_NUM_92CS]; /* EVM */
+ u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
+ s8 RxPower; /* in dBm Translate from PWdB */
+/* Real power in dBm for this packet, no beautification and aggregation.
+ * Keep this raw info to be used for the other procedures. */
+ s8 recvpower;
+ u8 BTRxRSSIPercentage;
+ u8 SignalStrength; /* in 0-100 index. */
+ u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
+ u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
+};
+
+struct rx_pkt_attrib {
+ u16 pkt_len;
+ u8 physt;
+ u8 drvinfo_sz;
+ u8 shift_sz;
+ u8 hdrlen; /* the WLAN Header Len */
+ u8 to_fr_ds;
+ u8 amsdu;
+ u8 qos;
+ u8 priority;
+ u8 pw_save;
+ u8 mdata;
+ u16 seq_num;
+ u8 frag_num;
+ u8 mfrag;
+ u8 order;
+ u8 privacy; /* in frame_ctrl field */
+ u8 bdecrypted;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
+ * indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 crc_err;
+ u8 icv_err;
+
+ u16 eth_type;
+
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+
+ u8 ack_policy;
+
+ u8 key_index;
+
+ u8 mcs_rate;
+ u8 rxht;
+ u8 sgi;
+ u8 pkt_rpt_type;
+ u32 MacIDValidEntry[2]; /* 64 bits present 64 entry. */
+
+ struct phy_info phy_info;
+};
+
+
+/* These definition is used for Rx packet reordering. */
+#define SN_LESS(a, b) (((a - b) & 0x800) != 0)
+#define SN_EQUAL(a, b) (a == b)
+#define REORDER_WAIT_TIME (50) /* (ms) */
+
+#define RECVBUFF_ALIGN_SZ 8
+
+#define RXDESC_SIZE 24
+#define RXDESC_OFFSET RXDESC_SIZE
+
+struct recv_stat {
+ __le32 rxdw0;
+ __le32 rxdw1;
+ __le32 rxdw2;
+ __le32 rxdw3;
+ __le32 rxdw4;
+ __le32 rxdw5;
+};
+
+#define EOR BIT(30)
+
+/*
+accesser of recv_priv: rtw_recv_entry(dispatch / passive level);
+recv_thread(passive) ; returnpkt(dispatch)
+; halt(passive) ;
+
+using enter_critical section to protect
+*/
+struct recv_priv {
+ spinlock_t lock;
+ struct __queue free_recv_queue;
+ struct __queue recv_pending_queue;
+ struct __queue uc_swdec_pending_queue;
+ u8 *pallocated_frame_buf;
+ u8 *precv_frame_buf;
+ uint free_recvframe_cnt;
+ struct adapter *adapter;
+ u32 bIsAnyNonBEPkts;
+ u64 rx_bytes;
+ u64 rx_pkts;
+ u64 rx_drop;
+ u64 last_rx_bytes;
+
+ uint rx_icv_err;
+ uint rx_largepacket_crcerr;
+ uint rx_smallpacket_crcerr;
+ uint rx_middlepacket_crcerr;
+ struct semaphore allrxreturnevt;
+ uint ff_hwaddr;
+ u8 rx_pending_cnt;
+
+ struct tasklet_struct irq_prepare_beacon_tasklet;
+ struct tasklet_struct recv_tasklet;
+ struct sk_buff_head free_recv_skb_queue;
+ struct sk_buff_head rx_skb_queue;
+ u8 *pallocated_recv_buf;
+ u8 *precv_buf; /* 4 alignment */
+ struct __queue free_recv_buf_queue;
+ u32 free_recv_buf_queue_cnt;
+ /* For display the phy informatiom */
+ u8 is_signal_dbg; /* for debug */
+ u8 signal_strength_dbg; /* for debug */
+ s8 rssi;
+ s8 rxpwdb;
+ u8 signal_strength;
+ u8 signal_qual;
+ u8 noise;
+ int RxSNRdB[2];
+ s8 RxRssi[2];
+ int FalseAlmCnt_all;
+
+ struct timer_list signal_stat_timer;
+ u32 signal_stat_sampling_interval;
+ struct signal_stat signal_qual_data;
+ struct signal_stat signal_strength_data;
+};
+
+#define rtw_set_signal_stat_timer(recvpriv) \
+ _set_timer(&(recvpriv)->signal_stat_timer, \
+ (recvpriv)->signal_stat_sampling_interval)
+
+struct sta_recv_priv {
+ spinlock_t lock;
+ int option;
+ struct __queue defrag_q; /* keeping the fragment frame until defrag */
+ struct stainfo_rxcache rxcache;
+};
+
+struct recv_buf {
+ struct list_head list;
+ spinlock_t recvbuf_lock;
+ u32 ref_cnt;
+ struct adapter *adapter;
+ u8 *pbuf;
+ u8 *pallocated_buf;
+ u32 len;
+ u8 *phead;
+ u8 *pdata;
+ u8 *ptail;
+ u8 *pend;
+ struct urb *purb;
+ dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
+ u32 alloc_sz;
+ u8 irp_pending;
+ int transfer_len;
+ struct sk_buff *pskb;
+ u8 reuse;
+};
+
+/*
+ head ----->
+
+ data ----->
+
+ payload
+
+ tail ----->
+
+
+ end ----->
+
+ len = (unsigned int )(tail - data);
+
+*/
+struct recv_frame_hdr {
+ struct list_head list;
+ struct sk_buff *pkt;
+ struct sk_buff *pkt_newalloc;
+ struct adapter *adapter;
+ u8 fragcnt;
+ int frame_tag;
+ struct rx_pkt_attrib attrib;
+ uint len;
+ u8 *rx_head;
+ u8 *rx_data;
+ u8 *rx_tail;
+ u8 *rx_end;
+ void *precvbuf;
+ struct sta_info *psta;
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl *preorder_ctrl;
+};
+
+union recv_frame {
+ union {
+ struct list_head list;
+ struct recv_frame_hdr hdr;
+ uint mem[RECVFRAME_HDR_ALIGN>>2];
+ } u;
+};
+
+union recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+union recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
+void rtw_init_recvframe(union recv_frame *precvframe,
+ struct recv_priv *precvpriv);
+int rtw_free_recvframe(union recv_frame *precvframe,
+ struct __queue *pfree_recv_queue);
+#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
+int _rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+int rtw_enqueue_recvframe(union recv_frame *precvframe, struct __queue *queue);
+void rtw_free_recvframe_queue(struct __queue *pframequeue,
+ struct __queue *pfree_recv_queue);
+u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
+int rtw_enqueue_recvbuf_to_head(struct recv_buf *buf, struct __queue *queue);
+int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
+struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
+
+void rtw_reordering_ctrl_timeout_handler(void *pcontext);
+
+static inline u8 *get_rxmem(union recv_frame *precvframe)
+{
+ /* always return rx_head... */
+ if (precvframe == NULL)
+ return NULL;
+ return precvframe->u.hdr.rx_head;
+}
+
+static inline u8 *get_rx_status(union recv_frame *precvframe)
+{
+ return get_rxmem(precvframe);
+}
+
+static inline u8 *get_recvframe_data(union recv_frame *precvframe)
+{
+ /* always return rx_data */
+ if (precvframe == NULL)
+ return NULL;
+
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_push(union recv_frame *precvframe, int sz)
+{
+ /* append data before rx_data */
+
+ /* add data to the start of recv_frame
+ *
+ * This function extends the used data area of the recv_frame at the buffer
+ * start. rx_data must be still larger than rx_head, after pushing.
+ */
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_data -= sz ;
+ if (precvframe->u.hdr.rx_data < precvframe->u.hdr.rx_head) {
+ precvframe->u.hdr.rx_data += sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len += sz;
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_pull(union recv_frame *precvframe, int sz)
+{
+ /* rx_data += sz; move rx_data sz bytes hereafter */
+
+ /* used for extract sz bytes from rx_data, update rx_data and return
+ * the updated rx_data to the caller */
+
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_data += sz;
+ if (precvframe->u.hdr.rx_data > precvframe->u.hdr.rx_tail) {
+ precvframe->u.hdr.rx_data -= sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len -= sz;
+ return precvframe->u.hdr.rx_data;
+}
+
+static inline u8 *recvframe_put(union recv_frame *precvframe, int sz)
+{
+ /* used for append sz bytes from ptr to rx_tail, update rx_tail
+ * and return the updated rx_tail to the caller */
+ /* after putting, rx_tail must be still larger than rx_end. */
+
+ if (precvframe == NULL)
+ return NULL;
+
+ precvframe->u.hdr.rx_tail += sz;
+
+ if (precvframe->u.hdr.rx_tail > precvframe->u.hdr.rx_end) {
+ precvframe->u.hdr.rx_tail -= sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len += sz;
+ return precvframe->u.hdr.rx_tail;
+}
+
+static inline u8 *recvframe_pull_tail(union recv_frame *precvframe, int sz)
+{
+ /* rmv data from rx_tail (by yitsen) */
+
+ /* used for extract sz bytes from rx_end, update rx_end and return
+ * the updated rx_end to the caller */
+ /* after pulling, rx_end must be still larger than rx_data. */
+
+ if (precvframe == NULL)
+ return NULL;
+ precvframe->u.hdr.rx_tail -= sz;
+ if (precvframe->u.hdr.rx_tail < precvframe->u.hdr.rx_data) {
+ precvframe->u.hdr.rx_tail += sz;
+ return NULL;
+ }
+ precvframe->u.hdr.len -= sz;
+ return precvframe->u.hdr.rx_tail;
+}
+
+static inline unsigned char *get_rxbuf_desc(union recv_frame *precvframe)
+{
+ unsigned char *buf_desc;
+
+ if (precvframe == NULL)
+ return NULL;
+ return buf_desc;
+}
+
+static inline union recv_frame *rxmem_to_recvframe(u8 *rxmem)
+{
+ /* due to the design of 2048 bytes alignment of recv_frame,
+ * we can reference the union recv_frame */
+ /* from any given member of recv_frame. */
+ /* rxmem indicates the any member/address in recv_frame */
+
+ return (union recv_frame *)(((size_t)rxmem >> RXFRAME_ALIGN) << RXFRAME_ALIGN);
+}
+
+static inline union recv_frame *pkt_to_recvframe(struct sk_buff *pkt)
+{
+ u8 *buf_star;
+ union recv_frame *precv_frame;
+ precv_frame = rxmem_to_recvframe((unsigned char *)buf_star);
+
+ return precv_frame;
+}
+
+static inline u8 *pkt_to_recvmem(struct sk_buff *pkt)
+{
+ /* return the rx_head */
+
+ union recv_frame *precv_frame = pkt_to_recvframe(pkt);
+
+ return precv_frame->u.hdr.rx_head;
+}
+
+static inline u8 *pkt_to_recvdata(struct sk_buff *pkt)
+{
+ /* return the rx_data */
+
+ union recv_frame *precv_frame = pkt_to_recvframe(pkt);
+
+ return precv_frame->u.hdr.rx_data;
+}
+
+static inline int get_recvframe_len(union recv_frame *precvframe)
+{
+ return precvframe->u.hdr.len;
+}
+
+static inline s32 translate_percentage_to_dbm(u32 sig_stren_index)
+{
+ s32 power; /* in dBm. */
+
+ /* Translate to dBm (x=0.5y-95). */
+ power = (s32)((sig_stren_index + 1) >> 1);
+ power -= 95;
+
+ return power;
+}
+
+
+struct sta_info;
+
+void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv);
+
+void mgt_dispatcher(struct adapter *padapter, union recv_frame *precv_frame);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
new file mode 100644
index 00000000000..089ecee6c1f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -0,0 +1,146 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_RF_H_
+#define __RTW_RF_H_
+
+#include <rtw_cmd.h>
+
+#define OFDM_PHY 1
+#define MIXED_PHY 2
+#define CCK_PHY 3
+
+#define NumRates (13)
+
+/* slot time for 11g */
+#define SHORT_SLOT_TIME 9
+#define NON_SHORT_SLOT_TIME 20
+
+#define RTL8711_RF_MAX_SENS 6
+#define RTL8711_RF_DEF_SENS 4
+
+/* We now define the following channels as the max channels in each
+ * channel plan. */
+/* 2G, total 14 chnls */
+/* {1,2,3,4,5,6,7,8,9,10,11,12,13,14} */
+#define MAX_CHANNEL_NUM_2G 14
+#define MAX_CHANNEL_NUM 14 /* 2.4 GHz only */
+
+#define NUM_REGULATORYS 1
+
+/* Country codes */
+#define USA 0x555320
+#define EUROPE 0x1 /* temp, should be provided later */
+#define JAPAN 0x2 /* temp, should be provided later */
+
+struct regulatory_class {
+ u32 starting_freq; /* MHz, */
+ u8 channel_set[MAX_CHANNEL_NUM];
+ u8 channel_cck_power[MAX_CHANNEL_NUM]; /* dbm */
+ u8 channel_ofdm_power[MAX_CHANNEL_NUM]; /* dbm */
+ u8 txpower_limit; /* dbm */
+ u8 channel_spacing; /* MHz */
+ u8 modem;
+};
+
+enum capability {
+ cESS = 0x0001,
+ cIBSS = 0x0002,
+ cPollable = 0x0004,
+ cPollReq = 0x0008,
+ cPrivacy = 0x0010,
+ cShortPreamble = 0x0020,
+ cPBCC = 0x0040,
+ cChannelAgility = 0x0080,
+ cSpectrumMgnt = 0x0100,
+ cQos = 0x0200, /* For HCCA, use with CF-Pollable
+ * and CF-PollReq */
+ cShortSlotTime = 0x0400,
+ cAPSD = 0x0800,
+ cRM = 0x1000, /* RRM (Radio Request Measurement) */
+ cDSSS_OFDM = 0x2000,
+ cDelayedBA = 0x4000,
+ cImmediateBA = 0x8000,
+};
+
+enum _REG_PREAMBLE_MODE {
+ PREAMBLE_LONG = 1,
+ PREAMBLE_AUTO = 2,
+ PREAMBLE_SHORT = 3,
+};
+
+enum _RTL8712_RF_MIMO_CONFIG_ {
+ RTL8712_RFCONFIG_1T = 0x10,
+ RTL8712_RFCONFIG_2T = 0x20,
+ RTL8712_RFCONFIG_1R = 0x01,
+ RTL8712_RFCONFIG_2R = 0x02,
+ RTL8712_RFCONFIG_1T1R = 0x11,
+ RTL8712_RFCONFIG_1T2R = 0x12,
+ RTL8712_RFCONFIG_TURBO = 0x92,
+ RTL8712_RFCONFIG_2T2R = 0x22
+};
+
+enum rf90_radio_path {
+ RF90_PATH_A = 0, /* Radio Path A */
+ RF90_PATH_B = 1, /* Radio Path B */
+ RF90_PATH_C = 2, /* Radio Path C */
+ RF90_PATH_D = 3 /* Radio Path D */
+};
+
+/* Bandwidth Offset */
+#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
+#define HAL_PRIME_CHNL_OFFSET_LOWER 1
+#define HAL_PRIME_CHNL_OFFSET_UPPER 2
+
+/* Represent Channel Width in HT Capabilities */
+/* */
+enum ht_channel_width {
+ HT_CHANNEL_WIDTH_20 = 0,
+ HT_CHANNEL_WIDTH_40 = 1,
+ HT_CHANNEL_WIDTH_80 = 2,
+ HT_CHANNEL_WIDTH_160 = 3,
+ HT_CHANNEL_WIDTH_10 = 4,
+};
+
+/* */
+/* Represent Extention Channel Offset in HT Capabilities */
+/* This is available only in 40Mhz mode. */
+/* */
+enum ht_extchnl_offset {
+ HT_EXTCHNL_OFFSET_NO_EXT = 0,
+ HT_EXTCHNL_OFFSET_UPPER = 1,
+ HT_EXTCHNL_OFFSET_NO_DEF = 2,
+ HT_EXTCHNL_OFFSET_LOWER = 3,
+};
+
+/* 2007/11/15 MH Define different RF type. */
+enum rt_rf_type_def {
+ RF_1T2R = 0,
+ RF_2T4R = 1,
+ RF_2T2R = 2,
+ RF_1T1R = 3,
+ RF_2T2R_GREEN = 4,
+ RF_819X_MAX_TYPE = 5,
+};
+
+u32 rtw_ch2freq(u32 ch);
+u32 rtw_freq2ch(u32 freq);
+
+
+#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
new file mode 100644
index 00000000000..23c7814a50e
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -0,0 +1,383 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __RTW_SECURITY_H_
+#define __RTW_SECURITY_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define _NO_PRIVACY_ 0x0
+#define _WEP40_ 0x1
+#define _TKIP_ 0x2
+#define _TKIP_WTMIC_ 0x3
+#define _AES_ 0x4
+#define _WEP104_ 0x5
+#define _WEP_WPA_MIXED_ 0x07 /* WEP + WPA */
+#define _SMS4_ 0x06
+
+#define is_wep_enc(alg) (((alg) == _WEP40_) || ((alg) == _WEP104_))
+
+#define _WPA_IE_ID_ 0xdd
+#define _WPA2_IE_ID_ 0x30
+
+#define SHA256_MAC_LEN 32
+#define AES_BLOCK_SIZE 16
+#define AES_PRIV_SIZE (4 * 44)
+
+enum {
+ ENCRYP_PROTOCOL_OPENSYS, /* open system */
+ ENCRYP_PROTOCOL_WEP, /* WEP */
+ ENCRYP_PROTOCOL_WPA, /* WPA */
+ ENCRYP_PROTOCOL_WPA2, /* WPA2 */
+ ENCRYP_PROTOCOL_WAPI, /* WAPI: Not support in this version */
+ ENCRYP_PROTOCOL_MAX
+};
+
+
+#ifndef Ndis802_11AuthModeWPA2
+#define Ndis802_11AuthModeWPA2 (Ndis802_11AuthModeWPANone + 1)
+#endif
+
+#ifndef Ndis802_11AuthModeWPA2PSK
+#define Ndis802_11AuthModeWPA2PSK (Ndis802_11AuthModeWPANone + 2)
+#endif
+
+union pn48 {
+ u64 val;
+
+#ifdef __LITTLE_ENDIAN
+ struct {
+ u8 TSC0;
+ u8 TSC1;
+ u8 TSC2;
+ u8 TSC3;
+ u8 TSC4;
+ u8 TSC5;
+ u8 TSC6;
+ u8 TSC7;
+ } _byte_;
+
+#elif defined(__BIG_ENDIAN)
+
+ struct {
+ u8 TSC7;
+ u8 TSC6;
+ u8 TSC5;
+ u8 TSC4;
+ u8 TSC3;
+ u8 TSC2;
+ u8 TSC1;
+ u8 TSC0;
+ } _byte_;
+#endif
+};
+
+union Keytype {
+ u8 skey[16];
+ u32 lkey[4];
+};
+
+struct rt_pmkid_list {
+ u8 bUsed;
+ u8 Bssid[6];
+ u8 PMKID[16];
+ u8 SsidBuf[33];
+ u8 *ssid_octet;
+ u16 ssid_length;
+};
+
+struct security_priv {
+ u32 dot11AuthAlgrthm; /* 802.11 auth, could be open,
+ * shared, 8021x and authswitch */
+ u32 dot11PrivacyAlgrthm; /* This specify the privacy for
+ * shared auth. algorithm. */
+ /* WEP */
+ u32 dot11PrivacyKeyIndex; /* this is only valid for legendary
+ * wep, 0~3 for key id.(tx key index) */
+ union Keytype dot11DefKey[4]; /* this is only valid for def. key */
+ u32 dot11DefKeylen[4];
+ u32 dot118021XGrpPrivacy; /* This specify the privacy algthm.
+ * used for Grp key */
+ u32 dot118021XGrpKeyid; /* key id used for Grp Key
+ * ( tx key index) */
+ union Keytype dot118021XGrpKey[4]; /* 802.1x Group Key,
+ * for inx0 and inx1 */
+ union Keytype dot118021XGrptxmickey[4];
+ union Keytype dot118021XGrprxmickey[4];
+ union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
+ union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
+#ifdef CONFIG_88EU_AP_MODE
+ /* extend security capabilities for AP_MODE */
+ unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ unsigned int wpa_group_cipher;
+ unsigned int wpa2_group_cipher;
+ unsigned int wpa_pairwise_cipher;
+ unsigned int wpa2_pairwise_cipher;
+#endif
+ u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
+ int wps_ie_len;
+ u8 binstallGrpkey;
+ u8 busetkipkey;
+ u8 bcheck_grpkey;
+ u8 bgrpkey_handshake;
+ s32 sw_encrypt;/* from registry_priv */
+ s32 sw_decrypt;/* from registry_priv */
+ s32 hw_decrypted;/* if the rx packets is hw_decrypted==false,i
+ * it means the hw has not been ready. */
+
+ /* keeps the auth_type & enc_status from upper layer
+ * ioctl(wpa_supplicant or wzc) */
+ u32 ndisauthtype; /* NDIS_802_11_AUTHENTICATION_MODE */
+ u32 ndisencryptstatus; /* NDIS_802_11_ENCRYPTION_STATUS */
+ struct wlan_bssid_ex sec_bss; /* for joinbss (h2c buffer) usage */
+ struct ndis_802_11_wep ndiswep;
+ u8 assoc_info[600];
+ u8 szofcapability[256]; /* for wpa2 usage */
+ u8 oidassociation[512]; /* for wpa/wpa2 usage */
+ u8 authenticator_ie[256]; /* store ap security information element */
+ u8 supplicant_ie[256]; /* store sta security information element */
+
+ /* for tkip countermeasure */
+ u32 last_mic_err_time;
+ u8 btkip_countermeasure;
+ u8 btkip_wait_report;
+ u32 btkip_countermeasure_time;
+
+ /* */
+ /* For WPA2 Pre-Authentication. */
+ /* */
+ struct rt_pmkid_list PMKIDList[NUM_PMKID_CACHE];
+ u8 PMKIDIndex;
+ u8 bWepDefaultKeyIdxSet;
+};
+
+struct sha256_state {
+ u64 length;
+ u32 state[8], curlen;
+ u8 buf[64];
+};
+
+#define GET_ENCRY_ALGO(psecuritypriv, psta, encry_algo, bmcst) \
+do { \
+ switch (psecuritypriv->dot11AuthAlgrthm) { \
+ case dot11AuthAlgrthm_Open: \
+ case dot11AuthAlgrthm_Shared: \
+ case dot11AuthAlgrthm_Auto: \
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
+ break; \
+ case dot11AuthAlgrthm_8021X: \
+ if (bmcst) \
+ encry_algo = (u8)psecuritypriv->dot118021XGrpPrivacy;\
+ else \
+ encry_algo = (u8)psta->dot118021XPrivacy; \
+ break; \
+ case dot11AuthAlgrthm_WAPI: \
+ encry_algo = (u8)psecuritypriv->dot11PrivacyAlgrthm; \
+ break; \
+ } \
+} while (0)
+
+#define SET_ICE_IV_LEN(iv_len, icv_len, encrypt) \
+do { \
+ switch (encrypt) { \
+ case _WEP40_: \
+ case _WEP104_: \
+ iv_len = 4; \
+ icv_len = 4; \
+ break; \
+ case _TKIP_: \
+ iv_len = 8; \
+ icv_len = 4; \
+ break; \
+ case _AES_: \
+ iv_len = 8; \
+ icv_len = 8; \
+ break; \
+ case _SMS4_: \
+ iv_len = 18; \
+ icv_len = 16; \
+ break; \
+ default: \
+ iv_len = 0; \
+ icv_len = 0; \
+ break; \
+ } \
+} while (0)
+
+
+#define GET_TKIP_PN(iv, dot11txpn) \
+do { \
+ dot11txpn._byte_.TSC0 = iv[2]; \
+ dot11txpn._byte_.TSC1 = iv[0]; \
+ dot11txpn._byte_.TSC2 = iv[4]; \
+ dot11txpn._byte_.TSC3 = iv[5]; \
+ dot11txpn._byte_.TSC4 = iv[6]; \
+ dot11txpn._byte_.TSC5 = iv[7]; \
+} while (0)
+
+
+#define ROL32(A, n) (((A) << (n)) | (((A)>>(32-(n))) & ((1UL << (n)) - 1)))
+#define ROR32(A, n) ROL32((A), 32-(n))
+
+struct mic_data {
+ u32 K0, K1; /* Key */
+ u32 L, R; /* Current state */
+ u32 M; /* Message accumulator (single word) */
+ u32 nBytesInM; /* # bytes in M */
+};
+
+extern const u32 Te0[256];
+extern const u32 Te1[256];
+extern const u32 Te2[256];
+extern const u32 Te3[256];
+extern const u32 Te4[256];
+extern const u32 Td0[256];
+extern const u32 Td1[256];
+extern const u32 Td2[256];
+extern const u32 Td3[256];
+extern const u32 Td4[256];
+extern const u32 rcon[10];
+extern const u8 Td4s[256];
+extern const u8 rcons[10];
+
+#define RCON(i) (rcons[(i)] << 24)
+
+static inline u32 rotr(u32 val, int bits)
+{
+ return (val >> bits) | (val << (32 - bits));
+}
+
+#define TE0(i) Te0[((i) >> 24) & 0xff]
+#define TE1(i) rotr(Te0[((i) >> 16) & 0xff], 8)
+#define TE2(i) rotr(Te0[((i) >> 8) & 0xff], 16)
+#define TE3(i) rotr(Te0[(i) & 0xff], 24)
+#define TE41(i) ((Te0[((i) >> 24) & 0xff] << 8) & 0xff000000)
+#define TE42(i) (Te0[((i) >> 16) & 0xff] & 0x00ff0000)
+#define TE43(i) (Te0[((i) >> 8) & 0xff] & 0x0000ff00)
+#define TE44(i) ((Te0[(i) & 0xff] >> 8) & 0x000000ff)
+#define TE421(i) ((Te0[((i) >> 16) & 0xff] << 8) & 0xff000000)
+#define TE432(i) (Te0[((i) >> 8) & 0xff] & 0x00ff0000)
+#define TE443(i) (Te0[(i) & 0xff] & 0x0000ff00)
+#define TE414(i) ((Te0[((i) >> 24) & 0xff] >> 8) & 0x000000ff)
+#define TE4(i) ((Te0[(i)] >> 8) & 0x000000ff)
+
+#define TD0(i) Td0[((i) >> 24) & 0xff]
+#define TD1(i) rotr(Td0[((i) >> 16) & 0xff], 8)
+#define TD2(i) rotr(Td0[((i) >> 8) & 0xff], 16)
+#define TD3(i) rotr(Td0[(i) & 0xff], 24)
+#define TD41(i) (Td4s[((i) >> 24) & 0xff] << 24)
+#define TD42(i) (Td4s[((i) >> 16) & 0xff] << 16)
+#define TD43(i) (Td4s[((i) >> 8) & 0xff] << 8)
+#define TD44(i) (Td4s[(i) & 0xff])
+#define TD0_(i) Td0[(i) & 0xff]
+#define TD1_(i) rotr(Td0[(i) & 0xff], 8)
+#define TD2_(i) rotr(Td0[(i) & 0xff], 16)
+#define TD3_(i) rotr(Td0[(i) & 0xff], 24)
+
+#define GETU32(pt) (((u32)(pt)[0] << 24) ^ ((u32)(pt)[1] << 16) ^ \
+ ((u32)(pt)[2] << 8) ^ ((u32)(pt)[3]))
+
+#define PUTU32(ct, st) { \
+(ct)[0] = (u8)((st) >> 24); (ct)[1] = (u8)((st) >> 16); \
+(ct)[2] = (u8)((st) >> 8); (ct)[3] = (u8)(st); }
+
+#define WPA_GET_BE32(a) ((((u32)(a)[0]) << 24) | (((u32)(a)[1]) << 16) | \
+ (((u32)(a)[2]) << 8) | ((u32)(a)[3]))
+
+#define WPA_PUT_LE16(a, val) \
+ do { \
+ (a)[1] = ((u16)(val)) >> 8; \
+ (a)[0] = ((u16)(val)) & 0xff; \
+ } while (0)
+
+#define WPA_PUT_BE32(a, val) \
+ do { \
+ (a)[0] = (u8)((((u32)(val)) >> 24) & 0xff); \
+ (a)[1] = (u8)((((u32)(val)) >> 16) & 0xff); \
+ (a)[2] = (u8)((((u32)(val)) >> 8) & 0xff); \
+ (a)[3] = (u8)(((u32)(val)) & 0xff); \
+ } while (0)
+
+#define WPA_PUT_BE64(a, val) \
+ do { \
+ (a)[0] = (u8)(((u64)(val)) >> 56); \
+ (a)[1] = (u8)(((u64)(val)) >> 48); \
+ (a)[2] = (u8)(((u64)(val)) >> 40); \
+ (a)[3] = (u8)(((u64)(val)) >> 32); \
+ (a)[4] = (u8)(((u64)(val)) >> 24); \
+ (a)[5] = (u8)(((u64)(val)) >> 16); \
+ (a)[6] = (u8)(((u64)(val)) >> 8); \
+ (a)[7] = (u8)(((u64)(val)) & 0xff); \
+ } while (0)
+
+/* ===== start - public domain SHA256 implementation ===== */
+
+/* This is based on SHA256 implementation in LibTomCrypt that was released into
+ * public domain by Tom St Denis. */
+
+/* the K array */
+static const unsigned long K[64] = {
+ 0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL, 0x3956c25bUL,
+ 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL, 0xd807aa98UL, 0x12835b01UL,
+ 0x243185beUL, 0x550c7dc3UL, 0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL,
+ 0xc19bf174UL, 0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
+ 0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL, 0x983e5152UL,
+ 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL, 0xc6e00bf3UL, 0xd5a79147UL,
+ 0x06ca6351UL, 0x14292967UL, 0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL,
+ 0x53380d13UL, 0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
+ 0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL, 0xd192e819UL,
+ 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL, 0x19a4c116UL, 0x1e376c08UL,
+ 0x2748774cUL, 0x34b0bcb5UL, 0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL,
+ 0x682e6ff3UL, 0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
+ 0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
+};
+
+/* Various logical functions */
+#define RORc(x, y) \
+ (((((unsigned long)(x) & 0xFFFFFFFFUL) >> (unsigned long)((y)&31)) | \
+ ((unsigned long)(x) << (unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL)
+#define Ch(x, y ,z) (z ^ (x & (y ^ z)))
+#define Maj(x, y, z) (((x | y) & z) | (x & y))
+#define S(x, n) RORc((x), (n))
+#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n))
+#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22))
+#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25))
+#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3))
+#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10))
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+
+void rtw_secmicsetkey(struct mic_data *pmicdata, u8 *key);
+void rtw_secmicappendbyte(struct mic_data *pmicdata, u8 b);
+void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
+void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst);
+void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len,
+ u8 *Miccode, u8 priority);
+u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe);
+u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe);
+void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe);
+u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe);
+u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe);
+void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe);
+void rtw_use_tkipkey_handler(void *FunctionContext);
+
+#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
new file mode 100644
index 00000000000..2a1244f7579
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -0,0 +1,50 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_SRESET_C_
+#define _RTW_SRESET_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct sreset_priv {
+ struct mutex silentreset_mutex;
+ u8 silent_reset_inprogress;
+ u8 Wifi_Error_Status;
+ unsigned long last_tx_time;
+ unsigned long last_tx_complete_time;
+};
+
+#include <rtl8188e_hal.h>
+
+#define WIFI_STATUS_SUCCESS 0
+#define USB_VEN_REQ_CMD_FAIL BIT0
+#define USB_READ_PORT_FAIL BIT1
+#define USB_WRITE_PORT_FAIL BIT2
+#define WIFI_MAC_TXDMA_ERROR BIT3
+#define WIFI_TX_HANG BIT4
+#define WIFI_RX_HANG BIT5
+#define WIFI_IF_NOT_EXIST BIT6
+
+void sreset_init_value(struct adapter *padapter);
+void sreset_reset_value(struct adapter *padapter);
+u8 sreset_get_wifi_status(struct adapter *padapter);
+void sreset_set_wifi_error_status(struct adapter *padapter, u32 status);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/rtw_version.h b/drivers/staging/rtl8188eu/include/rtw_version.h
new file mode 100644
index 00000000000..6d2d52cbb3d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_version.h
@@ -0,0 +1 @@
+#define DRIVERVERSION "v4.1.4_6773.20130222"
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
new file mode 100644
index 00000000000..1ac1dd31db6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _RTW_XMIT_H_
+#define _RTW_XMIT_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#define MAX_XMITBUF_SZ (20480) /* 20k */
+#define NR_XMITBUFF (4)
+
+#define XMITBUF_ALIGN_SZ 4
+
+/* xmit extension buff defination */
+#define MAX_XMIT_EXTBUF_SZ (1536)
+#define NR_XMIT_EXTBUFF (32)
+
+#define MAX_NUMBLKS (1)
+
+#define XMIT_VO_QUEUE (0)
+#define XMIT_VI_QUEUE (1)
+#define XMIT_BE_QUEUE (2)
+#define XMIT_BK_QUEUE (3)
+
+#define VO_QUEUE_INX 0
+#define VI_QUEUE_INX 1
+#define BE_QUEUE_INX 2
+#define BK_QUEUE_INX 3
+#define BCN_QUEUE_INX 4
+#define MGT_QUEUE_INX 5
+#define HIGH_QUEUE_INX 6
+#define TXCMD_QUEUE_INX 7
+
+#define HW_QUEUE_ENTRY 8
+
+#define WEP_IV(pattrib_iv, dot11txpn, keyidx)\
+do {\
+ pattrib_iv[0] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[1] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[3] = ((keyidx & 0x3)<<6);\
+ dot11txpn.val = (dot11txpn.val == 0xffffff) ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+
+#define TKIP_IV(pattrib_iv, dot11txpn, keyidx)\
+do {\
+ pattrib_iv[0] = dot11txpn._byte_.TSC1;\
+ pattrib_iv[1] = (dot11txpn._byte_.TSC1 | 0x20) & 0x7f;\
+ pattrib_iv[2] = dot11txpn._byte_.TSC0;\
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6);\
+ pattrib_iv[4] = dot11txpn._byte_.TSC2;\
+ pattrib_iv[5] = dot11txpn._byte_.TSC3;\
+ pattrib_iv[6] = dot11txpn._byte_.TSC4;\
+ pattrib_iv[7] = dot11txpn._byte_.TSC5;\
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+#define AES_IV(pattrib_iv, dot11txpn, keyidx)\
+do { \
+ pattrib_iv[0] = dot11txpn._byte_.TSC0; \
+ pattrib_iv[1] = dot11txpn._byte_.TSC1; \
+ pattrib_iv[2] = 0; \
+ pattrib_iv[3] = BIT(5) | ((keyidx & 0x3)<<6); \
+ pattrib_iv[4] = dot11txpn._byte_.TSC2; \
+ pattrib_iv[5] = dot11txpn._byte_.TSC3; \
+ pattrib_iv[6] = dot11txpn._byte_.TSC4; \
+ pattrib_iv[7] = dot11txpn._byte_.TSC5; \
+ dot11txpn.val = dot11txpn.val == 0xffffffffffffULL ? 0 : (dot11txpn.val+1);\
+} while (0)
+
+#define HWXMIT_ENTRY 4
+
+#define TXDESC_SIZE 32
+
+#define PACKET_OFFSET_SZ (8)
+#define TXDESC_OFFSET (TXDESC_SIZE + PACKET_OFFSET_SZ)
+
+struct tx_desc {
+ /* DWORD 0 */
+ __le32 txdw0;
+ __le32 txdw1;
+ __le32 txdw2;
+ __le32 txdw3;
+ __le32 txdw4;
+ __le32 txdw5;
+ __le32 txdw6;
+ __le32 txdw7;
+};
+
+union txdesc {
+ struct tx_desc txdesc;
+ unsigned int value[TXDESC_SIZE>>2];
+};
+
+struct hw_xmit {
+ struct __queue *sta_queue;
+ int accnt;
+};
+
+/* reduce size */
+struct pkt_attrib {
+ u8 type;
+ u8 subtype;
+ u8 bswenc;
+ u8 dhcp_pkt;
+ u16 ether_type;
+ u16 seqnum;
+ u16 pkt_hdrlen; /* the original 802.3 pkt header len */
+ u16 hdrlen; /* the WLAN Header Len */
+ u32 pktlen; /* the original 802.3 pkt raw_data len (not include
+ * ether_hdr data) */
+ u32 last_txcmdsz;
+ u8 nr_frags;
+ u8 encrypt; /* when 0 indicate no encrypt. when non-zero,
+ * indicate the encrypt algorith */
+ u8 iv_len;
+ u8 icv_len;
+ u8 iv[18];
+ u8 icv[16];
+ u8 priority;
+ u8 ack_policy;
+ u8 mac_id;
+ u8 vcs_mode; /* virtual carrier sense method */
+ u8 dst[ETH_ALEN];
+ u8 src[ETH_ALEN];
+ u8 ta[ETH_ALEN];
+ u8 ra[ETH_ALEN];
+ u8 key_idx;
+ u8 qos_en;
+ u8 ht_en;
+ u8 raid;/* rate adpative id */
+ u8 bwmode;
+ u8 ch_offset;/* PRIME_CHNL_OFFSET */
+ u8 sgi;/* short GI */
+ u8 ampdu_en;/* tx ampdu enable */
+ u8 mdata;/* more data bit */
+ u8 pctrl;/* per packet txdesc control enable */
+ u8 triggered;/* for ap mode handling Power Saving sta */
+ u8 qsel;
+ u8 eosp;
+ u8 rate;
+ u8 intel_proxim;
+ u8 retry_ctrl;
+ struct sta_info *psta;
+};
+
+#define WLANHDR_OFFSET 64
+
+#define NULL_FRAMETAG (0x0)
+#define DATA_FRAMETAG 0x01
+#define L2_FRAMETAG 0x02
+#define MGNT_FRAMETAG 0x03
+#define AMSDU_FRAMETAG 0x04
+
+#define EII_FRAMETAG 0x05
+#define IEEE8023_FRAMETAG 0x06
+
+#define MP_FRAMETAG 0x07
+
+#define TXAGG_FRAMETAG 0x08
+
+struct submit_ctx {
+ u32 submit_time; /* */
+ u32 timeout_ms; /* <0: not synchronous, 0: wait forever, >0: up to ms waiting */
+ int status; /* status for operation */
+ struct completion done;
+};
+
+enum {
+ RTW_SCTX_SUBMITTED = -1,
+ RTW_SCTX_DONE_SUCCESS = 0,
+ RTW_SCTX_DONE_UNKNOWN,
+ RTW_SCTX_DONE_TIMEOUT,
+ RTW_SCTX_DONE_BUF_ALLOC,
+ RTW_SCTX_DONE_BUF_FREE,
+ RTW_SCTX_DONE_WRITE_PORT_ERR,
+ RTW_SCTX_DONE_TX_DESC_NA,
+ RTW_SCTX_DONE_TX_DENY,
+ RTW_SCTX_DONE_CCX_PKT_FAIL,
+ RTW_SCTX_DONE_DRV_STOP,
+ RTW_SCTX_DONE_DEV_REMOVE,
+};
+
+void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
+int rtw_sctx_wait(struct submit_ctx *sctx);
+void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
+void rtw_sctx_done(struct submit_ctx **sctx);
+
+struct xmit_buf {
+ struct list_head list;
+ struct adapter *padapter;
+ u8 *pallocated_buf;
+ u8 *pbuf;
+ void *priv_data;
+ u16 ext_tag; /* 0: Normal xmitbuf, 1: extension xmitbuf. */
+ u16 flags;
+ u32 alloc_sz;
+ u32 len;
+ struct submit_ctx *sctx;
+ u32 ff_hwaddr;
+ struct urb *pxmit_urb[8];
+ dma_addr_t dma_transfer_addr; /* (in) dma addr for transfer_buffer */
+ u8 bpending[8];
+ int last[8];
+};
+
+struct xmit_frame {
+ struct list_head list;
+ struct pkt_attrib attrib;
+ struct sk_buff *pkt;
+ int frame_tag;
+ struct adapter *padapter;
+ u8 *buf_addr;
+ struct xmit_buf *pxmitbuf;
+
+ u8 agg_num;
+ s8 pkt_offset;
+ u8 ack_report;
+};
+
+struct tx_servq {
+ struct list_head tx_pending;
+ struct __queue sta_pending;
+ int qcnt;
+};
+
+struct sta_xmit_priv {
+ spinlock_t lock;
+ int option;
+ int apsd_setting; /* When bit mask is on, the associated edca
+ * queue supports APSD. */
+ struct tx_servq be_q; /* priority == 0,3 */
+ struct tx_servq bk_q; /* priority == 1,2 */
+ struct tx_servq vi_q; /* priority == 4,5 */
+ struct tx_servq vo_q; /* priority == 6,7 */
+ struct list_head legacy_dz;
+ struct list_head apsd;
+ u16 txseq_tid[16];
+};
+
+struct hw_txqueue {
+ volatile int head;
+ volatile int tail;
+ volatile int free_sz; /* in units of 64 bytes */
+ volatile int free_cmdsz;
+ volatile int txsz[8];
+ uint ff_hwaddr;
+ uint cmd_hwaddr;
+ int ac_tag;
+};
+
+struct agg_pkt_info {
+ u16 offset;
+ u16 pkt_len;
+};
+
+struct xmit_priv {
+ spinlock_t lock;
+ struct semaphore xmit_sema;
+ struct semaphore terminate_xmitthread_sema;
+ struct __queue be_pending;
+ struct __queue bk_pending;
+ struct __queue vi_pending;
+ struct __queue vo_pending;
+ struct __queue bm_pending;
+ u8 *pallocated_frame_buf;
+ u8 *pxmit_frame_buf;
+ uint free_xmitframe_cnt;
+ struct __queue free_xmit_queue;
+ uint frag_len;
+ struct adapter *adapter;
+ u8 vcs_setting;
+ u8 vcs;
+ u8 vcs_type;
+ u64 tx_bytes;
+ u64 tx_pkts;
+ u64 tx_drop;
+ u64 last_tx_bytes;
+ u64 last_tx_pkts;
+ struct hw_xmit *hwxmits;
+ u8 hwxmit_entry;
+ u8 wmm_para_seq[4];/* sequence for wmm ac parameter strength
+ * from large to small. it's value is 0->vo,
+ * 1->vi, 2->be, 3->bk. */
+ struct semaphore tx_retevt;/* all tx return event; */
+ u8 txirp_cnt;/* */
+ struct tasklet_struct xmit_tasklet;
+ /* per AC pending irp */
+ int beq_cnt;
+ int bkq_cnt;
+ int viq_cnt;
+ int voq_cnt;
+ struct __queue free_xmitbuf_queue;
+ struct __queue pending_xmitbuf_queue;
+ u8 *pallocated_xmitbuf;
+ u8 *pxmitbuf;
+ uint free_xmitbuf_cnt;
+ struct __queue free_xmit_extbuf_queue;
+ u8 *pallocated_xmit_extbuf;
+ u8 *pxmit_extbuf;
+ uint free_xmit_extbuf_cnt;
+ u16 nqos_ssn;
+ int ack_tx;
+ struct mutex ack_tx_mutex;
+ struct submit_ctx ack_tx_ops;
+};
+
+struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv,
+ struct xmit_buf *pxmitbuf);
+void rtw_count_tx_stats(struct adapter *padapter,
+ struct xmit_frame *pxmitframe, int sz);
+void rtw_update_protection(struct adapter *padapter, u8 *ie, uint ie_len);
+s32 rtw_make_wlanhdr(struct adapter *padapter, u8 *hdr,
+ struct pkt_attrib *pattrib);
+s32 rtw_put_snap(u8 *data, u16 h_proto);
+
+struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv);
+s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv,
+ struct xmit_frame *pxmitframe);
+void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
+ struct __queue *pframequeue);
+struct tx_servq *rtw_get_sta_pending(struct adapter *padapter,
+ struct sta_info *psta, int up, u8 *ac);
+s32 rtw_xmitframe_enqueue(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
+ struct hw_xmit *phwxmit_i, int entry);
+
+s32 rtw_xmit_classifier(struct adapter *padapter,
+ struct xmit_frame *pxmitframe);
+u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
+#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
+s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
+ struct xmit_frame *pxmitframe);
+s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
+void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv);
+s32 rtw_txframes_pending(struct adapter *padapter);
+s32 rtw_txframes_sta_ac_pending(struct adapter *padapter,
+ struct pkt_attrib *pattrib);
+void rtw_init_hwxmits(struct hw_xmit *phwxmit, int entry);
+s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter);
+void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv);
+void rtw_alloc_hwxmits(struct adapter *padapter);
+void rtw_free_hwxmits(struct adapter *padapter);
+s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
+
+#if defined(CONFIG_88EU_AP_MODE)
+int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
+void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
+void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
+void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
+#endif
+
+u8 qos_acm(u8 acm_mask, u8 priority);
+u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
+int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms);
+void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status);
+
+/* include after declaring struct xmit_buf, in order to avoid warning */
+#include <xmit_osdep.h>
+
+#endif /* _RTL871X_XMIT_H_ */
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
new file mode 100644
index 00000000000..3ed2a39741a
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -0,0 +1,384 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __STA_INFO_H_
+#define __STA_INFO_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wifi.h>
+
+#define IBSS_START_MAC_ID 2
+#define NUM_STA 32
+#define NUM_ACL 16
+
+/* if mode ==0, then the sta is allowed once the addr is hit. */
+/* if mode ==1, then the sta is rejected once the addr is non-hit. */
+struct rtw_wlan_acl_node {
+ struct list_head list;
+ u8 addr[ETH_ALEN];
+ u8 valid;
+};
+
+/* mode=0, disable */
+/* mode=1, accept unless in deny list */
+/* mode=2, deny unless in accept list */
+struct wlan_acl_pool {
+ int mode;
+ int num;
+ struct rtw_wlan_acl_node aclnode[NUM_ACL];
+ struct __queue acl_node_q;
+};
+
+struct rssi_sta {
+ s32 UndecoratedSmoothedPWDB;
+ s32 UndecoratedSmoothedCCK;
+ s32 UndecoratedSmoothedOFDM;
+ u64 PacketMap;
+ u8 ValidBit;
+};
+
+struct stainfo_stats {
+ u64 rx_mgnt_pkts;
+ u64 rx_beacon_pkts;
+ u64 rx_probereq_pkts;
+ u64 rx_probersp_pkts;
+ u64 rx_probersp_bm_pkts;
+ u64 rx_probersp_uo_pkts;
+ u64 rx_ctrl_pkts;
+ u64 rx_data_pkts;
+
+ u64 last_rx_mgnt_pkts;
+ u64 last_rx_beacon_pkts;
+ u64 last_rx_probereq_pkts;
+ u64 last_rx_probersp_pkts;
+ u64 last_rx_probersp_bm_pkts;
+ u64 last_rx_probersp_uo_pkts;
+ u64 last_rx_ctrl_pkts;
+ u64 last_rx_data_pkts;
+ u64 rx_bytes;
+ u64 rx_drops;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_drops;
+};
+
+struct sta_info {
+ spinlock_t lock;
+ struct list_head list; /* free_sta_queue */
+ struct list_head hash_list; /* sta_hash */
+
+ struct sta_xmit_priv sta_xmitpriv;
+ struct sta_recv_priv sta_recvpriv;
+
+ struct __queue sleep_q;
+ unsigned int sleepq_len;
+
+ uint state;
+ uint aid;
+ uint mac_id;
+ uint qos_option;
+ u8 hwaddr[ETH_ALEN];
+
+ uint ieee8021x_blocked; /* 0: allowed, 1:blocked */
+ uint dot118021XPrivacy; /* aes, tkip... */
+ union Keytype dot11tkiptxmickey;
+ union Keytype dot11tkiprxmickey;
+ union Keytype dot118021x_UncstKey;
+ union pn48 dot11txpn; /* PN48 used for Unicast xmit. */
+ union pn48 dot11rxpn; /* PN48 used for Unicast recv. */
+ u8 bssrateset[16];
+ u32 bssratelen;
+ s32 rssi;
+ s32 signal_quality;
+
+ u8 cts2self;
+ u8 rtsen;
+
+ u8 raid;
+ u8 init_rate;
+ u32 ra_mask;
+ u8 wireless_mode; /* NETWORK_TYPE */
+ struct stainfo_stats sta_stats;
+
+ /* for A-MPDU TX, ADDBA timeout check */
+ struct timer_list addba_retry_timer;
+
+ /* for A-MPDU Rx reordering buffer control */
+ struct recv_reorder_ctrl recvreorder_ctrl[16];
+
+ /* for A-MPDU Tx */
+ /* unsigned char ampdu_txen_bitmap; */
+ u16 BA_starting_seqctrl[16];
+
+ struct ht_priv htpriv;
+
+ /* Notes: */
+ /* STA_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) +
+ * sta_info: (STA & AP) CAP/INFO */
+ /* scan_q: AP CAP/INFO */
+
+ /* AP_Mode: */
+ /* curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO */
+ /* sta_info: (AP & STA) CAP/INFO */
+
+ struct list_head asoc_list;
+#ifdef CONFIG_88EU_AP_MODE
+ struct list_head auth_list;
+
+ unsigned int expire_to;
+ unsigned int auth_seq;
+ unsigned int authalg;
+ unsigned char chg_txt[128];
+
+ u16 capability;
+ int flags;
+
+ int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
+ int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
+ int wpa_group_cipher;
+ int wpa2_group_cipher;
+ int wpa_pairwise_cipher;
+ int wpa2_pairwise_cipher;
+
+ u8 bpairwise_key_installed;
+
+ u8 wpa_ie[32];
+
+ u8 nonerp_set;
+ u8 no_short_slot_time_set;
+ u8 no_short_preamble_set;
+ u8 no_ht_gf_set;
+ u8 no_ht_set;
+ u8 ht_20mhz_set;
+
+ unsigned int tx_ra_bitmap;
+ u8 qos_info;
+
+ u8 max_sp_len;
+ u8 uapsd_bk;/* BIT(0): Delivery enabled, BIT(1): Trigger enabled */
+ u8 uapsd_be;
+ u8 uapsd_vi;
+ u8 uapsd_vo;
+
+ u8 has_legacy_ac;
+ unsigned int sleepq_ac_len;
+#endif /* CONFIG_88EU_AP_MODE */
+
+#ifdef CONFIG_88EU_P2P
+ /* p2p priv data */
+ u8 is_p2p_device;
+ u8 p2p_status_code;
+
+ /* p2p client info */
+ u8 dev_addr[ETH_ALEN];
+ u8 dev_cap;
+ u16 config_methods;
+ u8 primary_dev_type[8];
+ u8 num_of_secdev_type;
+ u8 secdev_types_list[32];/* 32/8 == 4; */
+ u16 dev_name_len;
+ u8 dev_name[32];
+#endif /* CONFIG_88EU_P2P */
+ u8 under_exist_checking;
+ u8 keep_alive_trycnt;
+
+ /* for DM */
+ struct rssi_sta rssi_stat;
+
+ /* ================ODM Relative Info======================= */
+ /* Please be careful, don't declare too much structure here.
+ * It will cost memory * STA support num. */
+ /* 2011/10/20 MH Add for ODM STA info. */
+ /* Driver Write */
+ u8 bValid; /* record the sta status link or not? */
+ u8 IOTPeer; /* Enum value. HT_IOT_PEER_E */
+ u8 rssi_level; /* for Refresh RA mask */
+ /* ODM Write */
+ /* 1 PHY_STATUS_INFO */
+ u8 RSSI_Path[4]; /* */
+ u8 RSSI_Ave;
+ u8 RXEVM[4];
+ u8 RXSNR[4];
+
+ /* ================ODM Relative Info======================= */
+ /* */
+
+ /* To store the sequence number of received management frame */
+ u16 RxMgmtFrameSeqNum;
+};
+
+#define sta_rx_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts \
+ + sta->sta_stats.rx_ctrl_pkts \
+ + sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts \
+ + sta->sta_stats.last_rx_ctrl_pkts \
+ + sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_data_pkts(sta) \
+ (sta->sta_stats.rx_data_pkts)
+
+#define sta_last_rx_data_pkts(sta) \
+ (sta->sta_stats.last_rx_data_pkts)
+
+#define sta_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.rx_mgnt_pkts)
+
+#define sta_last_rx_mgnt_pkts(sta) \
+ (sta->sta_stats.last_rx_mgnt_pkts)
+
+#define sta_rx_beacon_pkts(sta) \
+ (sta->sta_stats.rx_beacon_pkts)
+
+#define sta_last_rx_beacon_pkts(sta) \
+ (sta->sta_stats.last_rx_beacon_pkts)
+
+#define sta_rx_probereq_pkts(sta) \
+ (sta->sta_stats.rx_probereq_pkts)
+
+#define sta_last_rx_probereq_pkts(sta) \
+ (sta->sta_stats.last_rx_probereq_pkts)
+
+#define sta_rx_probersp_pkts(sta) \
+ (sta->sta_stats.rx_probersp_pkts)
+
+#define sta_last_rx_probersp_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_pkts)
+
+#define sta_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.rx_probersp_bm_pkts)
+
+#define sta_last_rx_probersp_bm_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_bm_pkts)
+
+#define sta_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.rx_probersp_uo_pkts)
+
+#define sta_last_rx_probersp_uo_pkts(sta) \
+ (sta->sta_stats.last_rx_probersp_uo_pkts)
+
+#define sta_update_last_rx_pkts(sta) \
+do { \
+ sta->sta_stats.last_rx_mgnt_pkts = sta->sta_stats.rx_mgnt_pkts; \
+ sta->sta_stats.last_rx_beacon_pkts = sta->sta_stats.rx_beacon_pkts; \
+ sta->sta_stats.last_rx_probereq_pkts = sta->sta_stats.rx_probereq_pkts; \
+ sta->sta_stats.last_rx_probersp_pkts = sta->sta_stats.rx_probersp_pkts; \
+ sta->sta_stats.last_rx_probersp_bm_pkts = sta->sta_stats.rx_probersp_bm_pkts; \
+ sta->sta_stats.last_rx_probersp_uo_pkts = sta->sta_stats.rx_probersp_uo_pkts; \
+ sta->sta_stats.last_rx_ctrl_pkts = sta->sta_stats.rx_ctrl_pkts; \
+ sta->sta_stats.last_rx_data_pkts = sta->sta_stats.rx_data_pkts; \
+} while (0)
+
+#define STA_RX_PKTS_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts
+
+#define STA_LAST_RX_PKTS_ARG(sta) \
+ sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.last_rx_data_pkts
+
+#define STA_RX_PKTS_DIFF_ARG(sta) \
+ sta->sta_stats.rx_mgnt_pkts - sta->sta_stats.last_rx_mgnt_pkts \
+ , sta->sta_stats.rx_ctrl_pkts - sta->sta_stats.last_rx_ctrl_pkts \
+ , sta->sta_stats.rx_data_pkts - sta->sta_stats.last_rx_data_pkts
+
+#define STA_PKTS_FMT "(m:%llu, c:%llu, d:%llu)"
+
+struct sta_priv {
+ u8 *pallocated_stainfo_buf;
+ u8 *pstainfo_buf;
+ struct __queue free_sta_queue;
+
+ spinlock_t sta_hash_lock;
+ struct list_head sta_hash[NUM_STA];
+ int asoc_sta_count;
+ struct __queue sleep_q;
+ struct __queue wakeup_q;
+
+ struct adapter *padapter;
+
+ spinlock_t asoc_list_lock;
+ struct list_head asoc_list;
+
+#ifdef CONFIG_88EU_AP_MODE
+ struct list_head auth_list;
+ spinlock_t auth_list_lock;
+ u8 asoc_list_cnt;
+ u8 auth_list_cnt;
+
+ unsigned int auth_to; /* sec, time to expire in authenticating. */
+ unsigned int assoc_to; /* sec, time to expire before associating. */
+ unsigned int expire_to; /* sec , time to expire after associated. */
+
+ /* pointers to STA info; based on allocated AID or NULL if AID free
+ * AID is in the range 1-2007, so sta_aid[0] corresponders to AID 1
+ * and so on
+ */
+ struct sta_info *sta_aid[NUM_STA];
+
+ u16 sta_dz_bitmap;/* only support 15 stations, staion aid bitmap
+ * for sleeping sta. */
+ u16 tim_bitmap; /* only support 15 stations, aid=0~15 mapping
+ * bit0~bit15 */
+
+ u16 max_num_sta;
+
+ struct wlan_acl_pool acl_list;
+#endif
+
+};
+
+static inline u32 wifi_mac_hash(u8 *mac)
+{
+ u32 x;
+
+ x = mac[0];
+ x = (x << 2) ^ mac[1];
+ x = (x << 2) ^ mac[2];
+ x = (x << 2) ^ mac[3];
+ x = (x << 2) ^ mac[4];
+ x = (x << 2) ^ mac[5];
+
+ x ^= x >> 8;
+ x = x & (NUM_STA - 1);
+ return x;
+}
+
+extern u32 _rtw_init_sta_priv(struct sta_priv *pstapriv);
+extern u32 _rtw_free_sta_priv(struct sta_priv *pstapriv);
+
+#define stainfo_offset_valid(offset) (offset < NUM_STA && offset >= 0)
+int rtw_stainfo_offset(struct sta_priv *stapriv, struct sta_info *sta);
+struct sta_info *rtw_get_stainfo_by_offset(struct sta_priv *stapriv, int off);
+
+extern struct sta_info *rtw_alloc_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+extern u32 rtw_free_stainfo(struct adapter *adapt, struct sta_info *psta);
+extern void rtw_free_all_stainfo(struct adapter *adapt);
+extern struct sta_info *rtw_get_stainfo(struct sta_priv *stapriv, u8 *hwaddr);
+extern u32 rtw_init_bcmc_stainfo(struct adapter *adapt);
+extern struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter);
+extern u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr);
+
+#endif /* _STA_INFO_H_ */
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
new file mode 100644
index 00000000000..8a65995d5e4
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -0,0 +1,26 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_HAL_H__
+#define __USB_HAL_H__
+
+void rtl8188eu_set_hal_ops(struct adapter *padapter);
+#define hal_set_hal_ops rtl8188eu_set_hal_ops
+
+#endif /* __USB_HAL_H__ */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops.h b/drivers/staging/rtl8188eu/include/usb_ops.h
new file mode 100644
index 00000000000..df342376553
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_ops.h
@@ -0,0 +1,115 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_H_
+#define __USB_OPS_H_
+
+#include <linux/version.h>
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <osdep_intf.h>
+
+#define REALTEK_USB_VENQT_READ 0xC0
+#define REALTEK_USB_VENQT_WRITE 0x40
+#define REALTEK_USB_VENQT_CMD_REQ 0x05
+#define REALTEK_USB_VENQT_CMD_IDX 0x00
+
+enum{
+ VENDOR_WRITE = 0x00,
+ VENDOR_READ = 0x01,
+};
+#define ALIGNMENT_UNIT 16
+#define MAX_VENDOR_REQ_CMD_SIZE 254 /* 8188cu SIE Support */
+#define MAX_USB_IO_CTL_SIZE (MAX_VENDOR_REQ_CMD_SIZE + ALIGNMENT_UNIT)
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12))
+#define rtw_usb_control_msg(dev, pipe, request, requesttype, \
+ value, index, data, size, timeout_ms) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), (value),\
+ (index), (data), (size), (timeout_ms))
+#define rtw_usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout_ms) \
+ usb_bulk_msg((usb_dev), (pipe), (data), (len), \
+ (actual_length), (timeout_ms))
+#else
+#define rtw_usb_control_msg(dev, pipe, request, requesttype, \
+ value, index, data, size, timeout_ms) \
+ usb_control_msg((dev), (pipe), (request), (requesttype), \
+ (value), (index), (data), (size), \
+ ((timeout_ms) == 0) || \
+ ((timeout_ms)*HZ/1000 > 0) ? \
+ ((timeout_ms)*HZ/1000) : 1)
+#define rtw_usb_bulk_msg(usb_dev, pipe, data, len, \
+ actual_length, timeout_ms) \
+ usb_bulk_msg((usb_dev), (pipe), (data), (len), (actual_length), \
+ ((timeout_ms) == 0) || ((timeout_ms)*HZ/1000 > 0) ?\
+ ((timeout_ms)*HZ/1000) : 1)
+#endif
+#include <usb_ops_linux.h>
+
+void rtl8188eu_set_hw_type(struct adapter *padapter);
+#define hal_set_hw_type rtl8188eu_set_hw_type
+void rtl8188eu_set_intf_ops(struct _io_ops *pops);
+#define usb_set_intf_ops rtl8188eu_set_intf_ops
+
+/*
+ * Increase and check if the continual_urb_error of this @param dvobjprivei
+ * is larger than MAX_CONTINUAL_URB_ERR
+ * @return true:
+ * @return false:
+ */
+static inline int rtw_inc_and_chk_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ int ret = false;
+ int value;
+ value = ATOMIC_INC_RETURN(&dvobj->continual_urb_error);
+ if (value > MAX_CONTINUAL_URB_ERR) {
+ DBG_88E("[dvobj:%p][ERROR] continual_urb_error:%d > %d\n",
+ dvobj, value, MAX_CONTINUAL_URB_ERR);
+ ret = true;
+ }
+ return ret;
+}
+
+/*
+* Set the continual_urb_error of this @param dvobjprive to 0
+*/
+static inline void rtw_reset_continual_urb_error(struct dvobj_priv *dvobj)
+{
+ ATOMIC_SET(&dvobj->continual_urb_error, 0);
+}
+
+#define USB_HIGH_SPEED_BULK_SIZE 512
+#define USB_FULL_SPEED_BULK_SIZE 64
+
+static inline u8 rtw_usb_bulk_size_boundary(struct adapter *padapter,
+ int buf_len)
+{
+ u8 rst = true;
+ struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
+
+ if (pdvobjpriv->ishighspeed)
+ rst = (0 == (buf_len) % USB_HIGH_SPEED_BULK_SIZE) ?
+ true : false;
+ else
+ rst = (0 == (buf_len) % USB_FULL_SPEED_BULK_SIZE) ?
+ true : false;
+ return rst;
+}
+
+#endif /* __USB_OPS_H_ */
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
new file mode 100644
index 00000000000..e5b758a81a5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -0,0 +1,55 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OPS_LINUX_H__
+#define __USB_OPS_LINUX_H__
+
+#define VENDOR_CMD_MAX_DATA_LEN 254
+
+#define RTW_USB_CONTROL_MSG_TIMEOUT_TEST 10/* ms */
+#define RTW_USB_CONTROL_MSG_TIMEOUT 500/* ms */
+
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
+
+#define RTW_USB_BULKOUT_TIME 5000/* ms */
+
+#define _usbctrl_vendorreq_async_callback(urb, regs) \
+ _usbctrl_vendorreq_async_callback(urb)
+#define usb_bulkout_zero_complete(purb, regs) \
+ usb_bulkout_zero_complete(purb)
+#define usb_write_mem_complete(purb, regs) \
+ usb_write_mem_complete(purb)
+#define usb_write_port_complete(purb, regs) \
+ usb_write_port_complete(purb)
+#define usb_read_port_complete(purb, regs) \
+ usb_read_port_complete(purb)
+#define usb_read_interrupt_complete(purb, regs) \
+ usb_read_interrupt_complete(purb)
+
+unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr);
+
+void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem);
+void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
+
+void usb_read_port_cancel(struct intf_hdl *pintfhdl);
+
+u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
+void usb_write_port_cancel(struct intf_hdl *pintfhdl);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/usb_osintf.h b/drivers/staging/rtl8188eu/include/usb_osintf.h
new file mode 100644
index 00000000000..9de99ca9799
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_osintf.h
@@ -0,0 +1,45 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __USB_OSINTF_H
+#define __USB_OSINTF_H
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <usb_vendor_req.h>
+
+extern char *rtw_initmac;
+extern int rtw_mc2u_disable;
+
+#define USBD_HALTED(Status) ((u32)(Status) >> 30 == 3)
+
+u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
+ enum rt_usb_wvalue wvalue, u8 windex, void *data,
+ u8 datalen, u8 isdirectionin);
+int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
+void netdev_br_init(struct net_device *netdev);
+void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
+void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
+ unsigned char *ipAddr);
+void nat25_db_expire(struct adapter *priv);
+int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method);
+
+int rtw_resume_process(struct adapter *padapter);
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/usb_vendor_req.h b/drivers/staging/rtl8188eu/include/usb_vendor_req.h
new file mode 100644
index 00000000000..7f26c8f2c78
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/usb_vendor_req.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _USB_VENDOR_REQUEST_H_
+#define _USB_VENDOR_REQUEST_H_
+
+/* 4 Set/Get Register related wIndex/Data */
+#define RT_USB_RESET_MASK_OFF 0
+#define RT_USB_RESET_MASK_ON 1
+#define RT_USB_SLEEP_MASK_OFF 0
+#define RT_USB_SLEEP_MASK_ON 1
+#define RT_USB_LDO_ON 1
+#define RT_USB_LDO_OFF 0
+
+/* 4 Set/Get SYSCLK related wValue or Data */
+#define RT_USB_SYSCLK_32KHZ 0
+#define RT_USB_SYSCLK_40MHZ 1
+#define RT_USB_SYSCLK_60MHZ 2
+
+
+enum bt_usb_request {
+ RT_USB_SET_REGISTER = 1,
+ RT_USB_SET_SYSCLK = 2,
+ RT_USB_GET_SYSCLK = 3,
+ RT_USB_GET_REGISTER = 4
+};
+
+enum rt_usb_wvalue {
+ RT_USB_RESET_MASK = 1,
+ RT_USB_SLEEP_MASK = 2,
+ RT_USB_USB_HRCPWM = 3,
+ RT_USB_LDO = 4,
+ RT_USB_BOOT_TYPE = 5
+};
+
+#endif
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
new file mode 100644
index 00000000000..a615659f947
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -0,0 +1,1127 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef _WIFI_H_
+#define _WIFI_H_
+
+
+#ifdef BIT
+/* error "BIT define occurred earlier elsewhere!\n" */
+#undef BIT
+#endif
+#define BIT(x) (1 << (x))
+
+
+#define WLAN_ETHHDR_LEN 14
+#define WLAN_ETHADDR_LEN 6
+#define WLAN_IEEE_OUI_LEN 3
+#define WLAN_ADDR_LEN 6
+#define WLAN_CRC_LEN 4
+#define WLAN_BSSID_LEN 6
+#define WLAN_BSS_TS_LEN 8
+#define WLAN_HDR_A3_LEN 24
+#define WLAN_HDR_A4_LEN 30
+#define WLAN_HDR_A3_QOS_LEN 26
+#define WLAN_HDR_A4_QOS_LEN 32
+#define WLAN_SSID_MAXLEN 32
+#define WLAN_DATA_MAXLEN 2312
+
+#define WLAN_A3_PN_OFFSET 24
+#define WLAN_A4_PN_OFFSET 30
+
+#define WLAN_MIN_ETHFRM_LEN 60
+#define WLAN_MAX_ETHFRM_LEN 1514
+#define WLAN_ETHHDR_LEN 14
+
+#define P80211CAPTURE_VERSION 0x80211001
+
+/* This value is tested by WiFi 11n Test Plan 5.2.3. */
+/* This test verifies the WLAN NIC can update the NAV through sending
+ * the CTS with large duration. */
+#define WiFiNavUpperUs 30000 /* 30 ms */
+
+enum WIFI_FRAME_TYPE {
+ WIFI_MGT_TYPE = (0),
+ WIFI_CTRL_TYPE = (BIT(2)),
+ WIFI_DATA_TYPE = (BIT(3)),
+ WIFI_QOS_DATA_TYPE = (BIT(7)|BIT(3)), /* QoS Data */
+};
+
+enum WIFI_FRAME_SUBTYPE {
+ /* below is for mgt frame */
+ WIFI_ASSOCREQ = (0 | WIFI_MGT_TYPE),
+ WIFI_ASSOCRSP = (BIT(4) | WIFI_MGT_TYPE),
+ WIFI_REASSOCREQ = (BIT(5) | WIFI_MGT_TYPE),
+ WIFI_REASSOCRSP = (BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_PROBEREQ = (BIT(6) | WIFI_MGT_TYPE),
+ WIFI_PROBERSP = (BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_BEACON = (BIT(7) | WIFI_MGT_TYPE),
+ WIFI_ATIM = (BIT(7) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DISASSOC = (BIT(7) | BIT(5) | WIFI_MGT_TYPE),
+ WIFI_AUTH = (BIT(7) | BIT(5) | BIT(4) | WIFI_MGT_TYPE),
+ WIFI_DEAUTH = (BIT(7) | BIT(6) | WIFI_MGT_TYPE),
+ WIFI_ACTION = (BIT(7) | BIT(6) | BIT(4) | WIFI_MGT_TYPE),
+
+ /* below is for control frame */
+ WIFI_PSPOLL = (BIT(7) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_RTS = (BIT(7) | BIT(5) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CTS = (BIT(7) | BIT(6) | WIFI_CTRL_TYPE),
+ WIFI_ACK = (BIT(7) | BIT(6) | BIT(4) | WIFI_CTRL_TYPE),
+ WIFI_CFEND = (BIT(7) | BIT(6) | BIT(5) | WIFI_CTRL_TYPE),
+ WIFI_CFEND_CFACK = (BIT(7) | BIT(6) | BIT(5) | BIT(4) |
+ WIFI_CTRL_TYPE),
+
+ /* below is for data frame */
+ WIFI_DATA = (0 | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACK = (BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFPOLL = (BIT(5) | WIFI_DATA_TYPE),
+ WIFI_DATA_CFACKPOLL = (BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_DATA_NULL = (BIT(6) | WIFI_DATA_TYPE),
+ WIFI_CF_ACK = (BIT(6) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_CF_POLL = (BIT(6) | BIT(5) | WIFI_DATA_TYPE),
+ WIFI_CF_ACKPOLL = (BIT(6) | BIT(5) | BIT(4) | WIFI_DATA_TYPE),
+ WIFI_QOS_DATA_NULL = (BIT(6) | WIFI_QOS_DATA_TYPE),
+};
+
+enum WIFI_REASON_CODE {
+ _RSON_RESERVED_ = 0,
+ _RSON_UNSPECIFIED_ = 1,
+ _RSON_AUTH_NO_LONGER_VALID_ = 2,
+ _RSON_DEAUTH_STA_LEAVING_ = 3,
+ _RSON_INACTIVITY_ = 4,
+ _RSON_UNABLE_HANDLE_ = 5,
+ _RSON_CLS2_ = 6,
+ _RSON_CLS3_ = 7,
+ _RSON_DISAOC_STA_LEAVING_ = 8,
+ _RSON_ASOC_NOT_AUTH_ = 9,
+
+ /* WPA reason */
+ _RSON_INVALID_IE_ = 13,
+ _RSON_MIC_FAILURE_ = 14,
+ _RSON_4WAY_HNDSHK_TIMEOUT_ = 15,
+ _RSON_GROUP_KEY_UPDATE_TIMEOUT_ = 16,
+ _RSON_DIFF_IE_ = 17,
+ _RSON_MLTCST_CIPHER_NOT_VALID_ = 18,
+ _RSON_UNICST_CIPHER_NOT_VALID_ = 19,
+ _RSON_AKMP_NOT_VALID_ = 20,
+ _RSON_UNSUPPORT_RSNE_VER_ = 21,
+ _RSON_INVALID_RSNE_CAP_ = 22,
+ _RSON_IEEE_802DOT1X_AUTH_FAIL_ = 23,
+
+ /* belowing are Realtek definition */
+ _RSON_PMK_NOT_AVAILABLE_ = 24,
+ _RSON_TDLS_TEAR_TOOFAR_ = 25,
+ _RSON_TDLS_TEAR_UN_RSN_ = 26,
+};
+
+/* Reason codes (IEEE 802.11-2007, 7.3.1.7, Table 7-22)
+
+#define WLAN_REASON_UNSPECIFIED 1
+#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
+#define WLAN_REASON_DEAUTH_LEAVING 3
+#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
+#define WLAN_REASON_DISASSOC_AP_BUSY 5
+#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
+#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
+#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
+#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9 */
+/* IEEE 802.11h */
+#define WLAN_REASON_PWR_CAPABILITY_NOT_VALID 10
+#define WLAN_REASON_SUPPORTED_CHANNEL_NOT_VALID 11
+
+/* IEEE 802.11i
+#define WLAN_REASON_INVALID_IE 13
+#define WLAN_REASON_MICHAEL_MIC_FAILURE 14
+#define WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT 15
+#define WLAN_REASON_GROUP_KEY_UPDATE_TIMEOUT 16
+#define WLAN_REASON_IE_IN_4WAY_DIFFERS 17
+#define WLAN_REASON_GROUP_CIPHER_NOT_VALID 18
+#define WLAN_REASON_PAIRWISE_CIPHER_NOT_VALID 19
+#define WLAN_REASON_AKMP_NOT_VALID 20
+#define WLAN_REASON_UNSUPPORTED_RSN_IE_VERSION 21
+#define WLAN_REASON_INVALID_RSN_IE_CAPAB 22
+#define WLAN_REASON_IEEE_802_1X_AUTH_FAILED 23
+#define WLAN_REASON_CIPHER_SUITE_REJECTED 24 */
+
+enum WIFI_STATUS_CODE {
+ _STATS_SUCCESSFUL_ = 0,
+ _STATS_FAILURE_ = 1,
+ _STATS_CAP_FAIL_ = 10,
+ _STATS_NO_ASOC_ = 11,
+ _STATS_OTHER_ = 12,
+ _STATS_NO_SUPP_ALG_ = 13,
+ _STATS_OUT_OF_AUTH_SEQ_ = 14,
+ _STATS_CHALLENGE_FAIL_ = 15,
+ _STATS_AUTH_TIMEOUT_ = 16,
+ _STATS_UNABLE_HANDLE_STA_ = 17,
+ _STATS_RATE_FAIL_ = 18,
+};
+
+/* Status codes (IEEE 802.11-2007, 7.3.1.9, Table 7-23)
+#define WLAN_STATUS_SUCCESS 0
+#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
+#define WLAN_STATUS_CAPS_UNSUPPORTED 10
+#define WLAN_STATUS_REASSOC_NO_ASSOC 11
+#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
+#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
+#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
+#define WLAN_STATUS_CHALLENGE_FAIL 15
+#define WLAN_STATUS_AUTH_TIMEOUT 16
+#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
+#define WLAN_STATUS_ASSOC_DENIED_RATES 18 */
+
+/* entended */
+/* IEEE 802.11b */
+#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
+#define WLAN_STATUS_ASSOC_DENIED_NOPBCC 20
+#define WLAN_STATUS_ASSOC_DENIED_NOAGILITY 21
+/* IEEE 802.11h */
+#define WLAN_STATUS_SPEC_MGMT_REQUIRED 22
+#define WLAN_STATUS_PWR_CAPABILITY_NOT_VALID 23
+#define WLAN_STATUS_SUPPORTED_CHANNEL_NOT_VALID 24
+/* IEEE 802.11g */
+#define WLAN_STATUS_ASSOC_DENIED_NO_SHORT_SLOT_TIME 25
+#define WLAN_STATUS_ASSOC_DENIED_NO_ER_PBCC 26
+#define WLAN_STATUS_ASSOC_DENIED_NO_DSSS_OFDM 27
+/* IEEE 802.11w */
+#define WLAN_STATUS_ASSOC_REJECTED_TEMPORARILY 30
+#define WLAN_STATUS_ROBUST_MGMT_FRAME_POLICY_VIOLATION 31
+/* IEEE 802.11i */
+#define WLAN_STATUS_INVALID_IE 40
+#define WLAN_STATUS_GROUP_CIPHER_NOT_VALID 41
+#define WLAN_STATUS_PAIRWISE_CIPHER_NOT_VALID 42
+#define WLAN_STATUS_AKMP_NOT_VALID 43
+#define WLAN_STATUS_UNSUPPORTED_RSN_IE_VERSION 44
+#define WLAN_STATUS_INVALID_RSN_IE_CAPAB 45
+#define WLAN_STATUS_CIPHER_REJECTED_PER_POLICY 46
+#define WLAN_STATUS_TS_NOT_CREATED 47
+#define WLAN_STATUS_DIRECT_LINK_NOT_ALLOWED 48
+#define WLAN_STATUS_DEST_STA_NOT_PRESENT 49
+#define WLAN_STATUS_DEST_STA_NOT_QOS_STA 50
+#define WLAN_STATUS_ASSOC_DENIED_LISTEN_INT_TOO_LARGE 51
+/* IEEE 802.11r */
+#define WLAN_STATUS_INVALID_FT_ACTION_FRAME_COUNT 52
+#define WLAN_STATUS_INVALID_PMKID 53
+#define WLAN_STATUS_INVALID_MDIE 54
+#define WLAN_STATUS_INVALID_FTIE 55
+
+enum WIFI_REG_DOMAIN {
+ DOMAIN_FCC = 1,
+ DOMAIN_IC = 2,
+ DOMAIN_ETSI = 3,
+ DOMAIN_SPA = 4,
+ DOMAIN_FRANCE = 5,
+ DOMAIN_MKK = 6,
+ DOMAIN_ISRAEL = 7,
+ DOMAIN_MKK1 = 8,
+ DOMAIN_MKK2 = 9,
+ DOMAIN_MKK3 = 10,
+ DOMAIN_MAX
+};
+
+#define _TO_DS_ BIT(8)
+#define _FROM_DS_ BIT(9)
+#define _MORE_FRAG_ BIT(10)
+#define _RETRY_ BIT(11)
+#define _PWRMGT_ BIT(12)
+#define _MORE_DATA_ BIT(13)
+#define _PRIVACY_ BIT(14)
+#define _ORDER_ BIT(15)
+
+#define SetToDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_TO_DS_)
+
+#define GetToDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_TO_DS_)) != 0)
+
+#define ClearToDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_TO_DS_))
+
+#define SetFrDs(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_FROM_DS_)
+
+#define GetFrDs(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_FROM_DS_)) != 0)
+
+#define ClearFrDs(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_FROM_DS_))
+
+#define get_tofr_ds(pframe) ((GetToDs(pframe) << 1) | GetFrDs(pframe))
+
+
+#define SetMFrag(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_FRAG_)
+
+#define GetMFrag(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_FRAG_)) != 0)
+
+#define ClearMFrag(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_FRAG_))
+
+#define SetRetry(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_RETRY_)
+
+#define GetRetry(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_RETRY_)) != 0)
+
+#define ClearRetry(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_RETRY_))
+
+#define SetPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PWRMGT_)
+
+#define GetPwrMgt(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_PWRMGT_)) != 0)
+
+#define ClearPwrMgt(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PWRMGT_))
+
+#define SetMData(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_MORE_DATA_)
+
+#define GetMData(pbuf) (((*(__le16 *)(pbuf)) & cpu_to_le16(_MORE_DATA_)) != 0)
+
+#define ClearMData(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_MORE_DATA_))
+
+#define SetPrivacy(pbuf) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(_PRIVACY_)
+
+#define GetPrivacy(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_PRIVACY_)) != 0)
+
+#define ClearPrivacy(pbuf) \
+ *(__le16 *)(pbuf) &= (~cpu_to_le16(_PRIVACY_))
+
+
+#define GetOrder(pbuf) \
+ (((*(__le16 *)(pbuf)) & cpu_to_le16(_ORDER_)) != 0)
+
+#define GetFrameType(pbuf) \
+ (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(3) | BIT(2)))
+
+#define SetFrameType(pbuf, type) \
+ do { \
+ *(unsigned short *)(pbuf) &= __constant_cpu_to_le16(~(BIT(3) | BIT(2))); \
+ *(unsigned short *)(pbuf) |= __constant_cpu_to_le16(type); \
+ } while (0)
+
+#define GetFrameSubType(pbuf) (le16_to_cpu(*(__le16 *)(pbuf)) & (BIT(7) |\
+ BIT(6) | BIT(5) | BIT(4) | BIT(3) | BIT(2)))
+
+#define SetFrameSubType(pbuf, type) \
+ do { \
+ *(__le16 *)(pbuf) &= cpu_to_le16(~(BIT(7) | BIT(6) | \
+ BIT(5) | BIT(4) | BIT(3) | BIT(2))); \
+ *(__le16 *)(pbuf) |= cpu_to_le16(type); \
+ } while (0)
+
+#define GetSequence(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) >> 4)
+
+#define GetFragNum(pbuf) \
+ (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 22)) & 0x0f)
+
+#define GetTupleCache(pbuf) \
+ (cpu_to_le16(*(unsigned short *)((size_t)(pbuf) + 22)))
+
+#define SetFragNum(pbuf, num) \
+ do { \
+ *(unsigned short *)((size_t)(pbuf) + 22) = \
+ ((*(unsigned short *)((size_t)(pbuf) + 22)) & \
+ le16_to_cpu(~(0x000f))) | \
+ cpu_to_le16(0x0f & (num)); \
+ } while (0)
+
+#define SetSeqNum(pbuf, num) \
+ do { \
+ *(__le16 *)((size_t)(pbuf) + 22) = \
+ ((*(__le16 *)((size_t)(pbuf) + 22)) & cpu_to_le16((unsigned short)0x000f)) | \
+ cpu_to_le16((unsigned short)(0xfff0 & (num << 4))); \
+ } while (0)
+
+#define SetDuration(pbuf, dur) \
+ *(__le16 *)((size_t)(pbuf) + 2) = cpu_to_le16(0xffff & (dur))
+
+
+#define SetPriority(pbuf, tid) \
+ *(__le16 *)(pbuf) |= cpu_to_le16(tid & 0xf)
+
+#define GetPriority(pbuf) ((le16_to_cpu(*(__le16 *)(pbuf))) & 0xf)
+
+#define SetEOSP(pbuf, eosp) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((eosp & 1) << 4)
+
+#define SetAckpolicy(pbuf, ack) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((ack & 3) << 5)
+
+#define GetAckpolicy(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 5) & 0x3)
+
+#define GetAMsdu(pbuf) (((le16_to_cpu(*(__le16 *)pbuf)) >> 7) & 0x1)
+
+#define SetAMsdu(pbuf, amsdu) \
+ *(__le16 *)(pbuf) |= cpu_to_le16((amsdu & 1) << 7)
+
+#define GetAid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + 2)) & 0x3fff)
+
+#define GetTid(pbuf) (le16_to_cpu(*(__le16 *)((size_t)(pbuf) + \
+ (((GetToDs(pbuf)<<1) | GetFrDs(pbuf)) == 3 ? \
+ 30 : 24))) & 0x000f)
+
+#define GetAddr1Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 4))
+
+#define GetAddr2Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 10))
+
+#define GetAddr3Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 16))
+
+#define GetAddr4Ptr(pbuf) ((unsigned char *)((size_t)(pbuf) + 24))
+
+#define MacAddr_isBcst(addr) \
+ ( \
+ ((addr[0] == 0xff) && (addr[1] == 0xff) && \
+ (addr[2] == 0xff) && (addr[3] == 0xff) && \
+ (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
+)
+
+static inline int IS_MCAST(unsigned char *da)
+{
+ if ((*da) & 0x01)
+ return true;
+ else
+ return false;
+}
+
+static inline unsigned char *get_da(unsigned char *pframe)
+{
+ unsigned char *da;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ da = GetAddr1Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ default: /* ToDs=1, FromDs=1 */
+ da = GetAddr3Ptr(pframe);
+ break;
+ }
+ return da;
+}
+
+static inline unsigned char *get_sa(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ default: /* ToDs=1, FromDs=1 */
+ sa = GetAddr4Ptr(pframe);
+ break;
+ }
+ return sa;
+}
+
+static inline unsigned char *get_hdr_bssid(unsigned char *pframe)
+{
+ unsigned char *sa;
+ unsigned int to_fr_ds = (GetToDs(pframe) << 1) | GetFrDs(pframe);
+
+ switch (to_fr_ds) {
+ case 0x00: /* ToDs=0, FromDs=0 */
+ sa = GetAddr3Ptr(pframe);
+ break;
+ case 0x01: /* ToDs=0, FromDs=1 */
+ sa = GetAddr2Ptr(pframe);
+ break;
+ case 0x02: /* ToDs=1, FromDs=0 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ case 0x03: /* ToDs=1, FromDs=1 */
+ sa = GetAddr1Ptr(pframe);
+ break;
+ default:
+ sa = NULL; /* */
+ break;
+ }
+ return sa;
+}
+
+static inline int IsFrameTypeCtrl(unsigned char *pframe)
+{
+ if (WIFI_CTRL_TYPE == GetFrameType(pframe))
+ return true;
+ else
+ return false;
+}
+/*-----------------------------------------------------------------------------
+ Below is for the security related definition
+------------------------------------------------------------------------------*/
+#define _RESERVED_FRAME_TYPE_ 0
+#define _SKB_FRAME_TYPE_ 2
+#define _PRE_ALLOCMEM_ 1
+#define _PRE_ALLOCHDR_ 3
+#define _PRE_ALLOCLLCHDR_ 4
+#define _PRE_ALLOCICVHDR_ 5
+#define _PRE_ALLOCMICHDR_ 6
+
+#define _SIFSTIME_ \
+ ((priv->pmib->dot11BssType.net_work_type & WIRELESS_11A) ? 16 : 10)
+#define _ACKCTSLNG_ 14 /* 14 bytes long, including crclng */
+#define _CRCLNG_ 4
+
+#define _ASOCREQ_IE_OFFSET_ 4 /* excluding wlan_hdr */
+#define _ASOCRSP_IE_OFFSET_ 6
+#define _REASOCREQ_IE_OFFSET_ 10
+#define _REASOCRSP_IE_OFFSET_ 6
+#define _PROBEREQ_IE_OFFSET_ 0
+#define _PROBERSP_IE_OFFSET_ 12
+#define _AUTH_IE_OFFSET_ 6
+#define _DEAUTH_IE_OFFSET_ 0
+#define _BEACON_IE_OFFSET_ 12
+#define _PUBLIC_ACTION_IE_OFFSET_ 8
+
+#define _FIXED_IE_LENGTH_ _BEACON_IE_OFFSET_
+
+#define _SSID_IE_ 0
+#define _SUPPORTEDRATES_IE_ 1
+#define _DSSET_IE_ 3
+#define _TIM_IE_ 5
+#define _IBSS_PARA_IE_ 6
+#define _COUNTRY_IE_ 7
+#define _CHLGETXT_IE_ 16
+#define _SUPPORTED_CH_IE_ 36
+#define _CH_SWTICH_ANNOUNCE_ 37 /* Secondary Channel Offset */
+#define _RSN_IE_2_ 48
+#define _SSN_IE_1_ 221
+#define _ERPINFO_IE_ 42
+#define _EXT_SUPPORTEDRATES_IE_ 50
+
+#define _HT_CAPABILITY_IE_ 45
+#define _FTIE_ 55
+#define _TIMEOUT_ITVL_IE_ 56
+#define _SRC_IE_ 59
+#define _HT_EXTRA_INFO_IE_ 61
+#define _HT_ADD_INFO_IE_ 61 /* _HT_EXTRA_INFO_IE_ */
+#define _WAPI_IE_ 68
+
+
+#define EID_BSSCoexistence 72 /* 20/40 BSS Coexistence */
+#define EID_BSSIntolerantChlReport 73
+#define _RIC_Descriptor_IE_ 75
+
+#define _LINK_ID_IE_ 101
+#define _CH_SWITCH_TIMING_ 104
+#define _PTI_BUFFER_STATUS_ 106
+#define _EXT_CAP_IE_ 127
+#define _VENDOR_SPECIFIC_IE_ 221
+
+#define _RESERVED47_ 47
+
+/* ---------------------------------------------------------------------------
+ Below is the fixed elements...
+-----------------------------------------------------------------------------*/
+#define _AUTH_ALGM_NUM_ 2
+#define _AUTH_SEQ_NUM_ 2
+#define _BEACON_ITERVAL_ 2
+#define _CAPABILITY_ 2
+#define _CURRENT_APADDR_ 6
+#define _LISTEN_INTERVAL_ 2
+#define _RSON_CODE_ 2
+#define _ASOC_ID_ 2
+#define _STATUS_CODE_ 2
+#define _TIMESTAMP_ 8
+
+#define AUTH_ODD_TO 0
+#define AUTH_EVEN_TO 1
+
+#define WLAN_ETHCONV_ENCAP 1
+#define WLAN_ETHCONV_RFC1042 2
+#define WLAN_ETHCONV_8021h 3
+
+#define cap_ESS BIT(0)
+#define cap_IBSS BIT(1)
+#define cap_CFPollable BIT(2)
+#define cap_CFRequest BIT(3)
+#define cap_Privacy BIT(4)
+#define cap_ShortPremble BIT(5)
+#define cap_PBCC BIT(6)
+#define cap_ChAgility BIT(7)
+#define cap_SpecMgmt BIT(8)
+#define cap_QoSi BIT(9)
+#define cap_ShortSlot BIT(10)
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11i / 802.1x
+------------------------------------------------------------------------------*/
+#define _IEEE8021X_MGT_ 1 /* WPA */
+#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
+
+/*
+#define _NO_PRIVACY_ 0
+#define _WEP_40_PRIVACY_ 1
+#define _TKIP_PRIVACY_ 2
+#define _WRAP_PRIVACY_ 3
+#define _CCMP_PRIVACY_ 4
+#define _WEP_104_PRIVACY_ 5
+#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
+*/
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for WMM
+------------------------------------------------------------------------------*/
+#define _WMM_IE_Length_ 7 /* for WMM STA */
+#define _WMM_Para_Element_Length_ 24
+
+
+/*-----------------------------------------------------------------------------
+ Below is the definition for 802.11n
+------------------------------------------------------------------------------*/
+
+#define SetOrderBit(pbuf) \
+ do { \
+ *(unsigned short *)(pbuf) |= cpu_to_le16(_ORDER_); \
+ } while (0)
+
+#define GetOrderBit(pbuf) \
+ (((*(unsigned short *)(pbuf)) & le16_to_cpu(_ORDER_)) != 0)
+
+
+/**
+ * struct rtw_ieee80211_bar - HT Block Ack Request
+ *
+ * This structure refers to "HT BlockAckReq" as
+ * described in 802.11n draft section 7.2.1.7.1
+ */
+struct rtw_ieee80211_bar {
+ unsigned short frame_control;
+ unsigned short duration;
+ unsigned char ra[6];
+ unsigned char ta[6];
+ unsigned short control;
+ unsigned short start_seq_num;
+} __packed;
+
+/* 802.11 BAR control masks */
+#define IEEE80211_BAR_CTRL_ACK_POLICY_NORMAL 0x0000
+#define IEEE80211_BAR_CTRL_CBMTID_COMPRESSED_BA 0x0004
+
+ /**
+ * struct rtw_ieee80211_ht_cap - HT capabilities
+ *
+ * This structure refers to "HT capabilities element" as
+ * described in 802.11n draft section 7.3.2.52
+ */
+
+struct rtw_ieee80211_ht_cap {
+ unsigned short cap_info;
+ unsigned char ampdu_params_info;
+ unsigned char supp_mcs_set[16];
+ unsigned short extended_ht_cap_info;
+ unsigned int tx_BF_cap_info;
+ unsigned char antenna_selection_info;
+} __packed;
+
+/**
+ * struct rtw_ieee80211_ht_cap - HT additional information
+ *
+ * This structure refers to "HT information element" as
+ * described in 802.11n draft section 7.3.2.53
+ */
+struct ieee80211_ht_addt_info {
+ unsigned char control_chan;
+ unsigned char ht_param;
+ unsigned short operation_mode;
+ unsigned short stbc_param;
+ unsigned char basic_set[16];
+} __packed;
+
+struct HT_caps_element {
+ union {
+ struct {
+ __le16 HT_caps_info;
+ unsigned char AMPDU_para;
+ unsigned char MCS_rate[16];
+ unsigned short HT_ext_caps;
+ unsigned int Beamforming_caps;
+ unsigned char ASEL_caps;
+ } HT_cap_element;
+ unsigned char HT_cap[26];
+ } u;
+} __packed;
+
+struct HT_info_element {
+ unsigned char primary_channel;
+ unsigned char infos[5];
+ unsigned char MCS_rate[16];
+} __packed;
+
+struct AC_param {
+ unsigned char ACI_AIFSN;
+ unsigned char CW;
+ __le16 TXOP_limit;
+} __packed;
+
+struct WMM_para_element {
+ unsigned char QoS_info;
+ unsigned char reserved;
+ struct AC_param ac_param[4];
+} __packed;
+
+struct ADDBA_request {
+ unsigned char dialog_token;
+ unsigned short BA_para_set;
+ unsigned short BA_timeout_value;
+ unsigned short BA_starting_seqctrl;
+} __packed;
+
+enum ht_cap_ampdu_factor {
+ MAX_AMPDU_FACTOR_8K = 0,
+ MAX_AMPDU_FACTOR_16K = 1,
+ MAX_AMPDU_FACTOR_32K = 2,
+ MAX_AMPDU_FACTOR_64K = 3,
+};
+
+/* 802.11n HT capabilities masks */
+#define IEEE80211_HT_CAP_SUP_WIDTH 0x0002
+#define IEEE80211_HT_CAP_SM_PS 0x000C
+#define IEEE80211_HT_CAP_GRN_FLD 0x0010
+#define IEEE80211_HT_CAP_SGI_20 0x0020
+#define IEEE80211_HT_CAP_SGI_40 0x0040
+#define IEEE80211_HT_CAP_TX_STBC 0x0080
+#define IEEE80211_HT_CAP_RX_STBC 0x0300
+#define IEEE80211_HT_CAP_DELAY_BA 0x0400
+#define IEEE80211_HT_CAP_MAX_AMSDU 0x0800
+#define IEEE80211_HT_CAP_DSSSCCK40 0x1000
+/* 802.11n HT capability AMPDU settings */
+#define IEEE80211_HT_CAP_AMPDU_FACTOR 0x03
+#define IEEE80211_HT_CAP_AMPDU_DENSITY 0x1C
+/* 802.11n HT capability MSC set */
+#define IEEE80211_SUPP_MCS_SET_UEQM 4
+#define IEEE80211_HT_CAP_MAX_STREAMS 4
+#define IEEE80211_SUPP_MCS_SET_LEN 10
+/* maximum streams the spec allows */
+#define IEEE80211_HT_CAP_MCS_TX_DEFINED 0x01
+#define IEEE80211_HT_CAP_MCS_TX_RX_DIFF 0x02
+#define IEEE80211_HT_CAP_MCS_TX_STREAMS 0x0C
+#define IEEE80211_HT_CAP_MCS_TX_UEQM 0x10
+/* 802.11n HT IE masks */
+#define IEEE80211_HT_IE_CHA_SEC_OFFSET 0x03
+#define IEEE80211_HT_IE_CHA_SEC_NONE 0x00
+#define IEEE80211_HT_IE_CHA_SEC_ABOVE 0x01
+#define IEEE80211_HT_IE_CHA_SEC_BELOW 0x03
+#define IEEE80211_HT_IE_CHA_WIDTH 0x04
+#define IEEE80211_HT_IE_HT_PROTECTION 0x0003
+#define IEEE80211_HT_IE_NON_GF_STA_PRSNT 0x0004
+#define IEEE80211_HT_IE_NON_HT_STA_PRSNT 0x0010
+
+/* block-ack parameters */
+#define IEEE80211_ADDBA_PARAM_POLICY_MASK 0x0002
+#define IEEE80211_ADDBA_PARAM_TID_MASK 0x003C
+#define RTW_IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK 0xFFC0
+#define IEEE80211_DELBA_PARAM_TID_MASK 0xF000
+#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
+
+/*
+ * A-PMDU buffer sizes
+ * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ */
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF 0x40
+
+
+/* Spatial Multiplexing Power Save Modes */
+#define WLAN_HT_CAP_SM_PS_STATIC 0
+#define WLAN_HT_CAP_SM_PS_DYNAMIC 1
+#define WLAN_HT_CAP_SM_PS_INVALID 2
+#define WLAN_HT_CAP_SM_PS_DISABLED 3
+
+
+#define OP_MODE_PURE 0
+#define OP_MODE_MAY_BE_LEGACY_STAS 1
+#define OP_MODE_20MHZ_HT_STA_ASSOCED 2
+#define OP_MODE_MIXED 3
+
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_OFF_MASK ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_ABOVE ((u8) BIT(0))
+#define HT_INFO_HT_PARAM_SECONDARY_CHNL_BELOW ((u8) BIT(0) | BIT(1))
+#define HT_INFO_HT_PARAM_REC_TRANS_CHNL_WIDTH ((u8) BIT(2))
+#define HT_INFO_HT_PARAM_RIFS_MODE ((u8) BIT(3))
+#define HT_INFO_HT_PARAM_CTRL_ACCESS_ONLY ((u8) BIT(4))
+#define HT_INFO_HT_PARAM_SRV_INTERVAL_GRANULARITY ((u8) BIT(5))
+
+#define HT_INFO_OPERATION_MODE_OP_MODE_MASK \
+ ((u16) (0x0001 | 0x0002))
+#define HT_INFO_OPERATION_MODE_OP_MODE_OFFSET 0
+#define HT_INFO_OPERATION_MODE_NON_GF_DEVS_PRESENT ((u8) BIT(2))
+#define HT_INFO_OPERATION_MODE_TRANSMIT_BURST_LIMIT ((u8) BIT(3))
+#define HT_INFO_OPERATION_MODE_NON_HT_STA_PRESENT ((u8) BIT(4))
+
+#define HT_INFO_STBC_PARAM_DUAL_BEACON ((u16) BIT(6))
+#define HT_INFO_STBC_PARAM_DUAL_STBC_PROTECT ((u16) BIT(7))
+#define HT_INFO_STBC_PARAM_SECONDARY_BC ((u16) BIT(8))
+#define HT_INFO_STBC_PARAM_LSIG_TXOP_PROTECT_ALLOWED ((u16) BIT(9))
+#define HT_INFO_STBC_PARAM_PCO_ACTIVE ((u16) BIT(10))
+#define HT_INFO_STBC_PARAM_PCO_PHASE ((u16) BIT(11))
+
+/* ===============WPS Section=============== */
+/* For WPSv1.0 */
+#define WPSOUI 0x0050f204
+/* WPS attribute ID */
+#define WPS_ATTR_VER1 0x104A
+#define WPS_ATTR_SIMPLE_CONF_STATE 0x1044
+#define WPS_ATTR_RESP_TYPE 0x103B
+#define WPS_ATTR_UUID_E 0x1047
+#define WPS_ATTR_MANUFACTURER 0x1021
+#define WPS_ATTR_MODEL_NAME 0x1023
+#define WPS_ATTR_MODEL_NUMBER 0x1024
+#define WPS_ATTR_SERIAL_NUMBER 0x1042
+#define WPS_ATTR_PRIMARY_DEV_TYPE 0x1054
+#define WPS_ATTR_SEC_DEV_TYPE_LIST 0x1055
+#define WPS_ATTR_DEVICE_NAME 0x1011
+#define WPS_ATTR_CONF_METHOD 0x1008
+#define WPS_ATTR_RF_BANDS 0x103C
+#define WPS_ATTR_DEVICE_PWID 0x1012
+#define WPS_ATTR_REQUEST_TYPE 0x103A
+#define WPS_ATTR_ASSOCIATION_STATE 0x1002
+#define WPS_ATTR_CONFIG_ERROR 0x1009
+#define WPS_ATTR_VENDOR_EXT 0x1049
+#define WPS_ATTR_SELECTED_REGISTRAR 0x1041
+
+/* Value of WPS attribute "WPS_ATTR_DEVICE_NAME */
+#define WPS_MAX_DEVICE_NAME_LEN 32
+
+/* Value of WPS Request Type Attribute */
+#define WPS_REQ_TYPE_ENROLLEE_INFO_ONLY 0x00
+#define WPS_REQ_TYPE_ENROLLEE_OPEN_8021X 0x01
+#define WPS_REQ_TYPE_REGISTRAR 0x02
+#define WPS_REQ_TYPE_WLAN_MANAGER_REGISTRAR 0x03
+
+/* Value of WPS Response Type Attribute */
+#define WPS_RESPONSE_TYPE_INFO_ONLY 0x00
+#define WPS_RESPONSE_TYPE_8021X 0x01
+#define WPS_RESPONSE_TYPE_REGISTRAR 0x02
+#define WPS_RESPONSE_TYPE_AP 0x03
+
+/* Value of WPS WiFi Simple Configuration State Attribute */
+#define WPS_WSC_STATE_NOT_CONFIG 0x01
+#define WPS_WSC_STATE_CONFIG 0x02
+
+/* Value of WPS Version Attribute */
+#define WPS_VERSION_1 0x10
+
+/* Value of WPS Configuration Method Attribute */
+#define WPS_CONFIG_METHOD_FLASH 0x0001
+#define WPS_CONFIG_METHOD_ETHERNET 0x0002
+#define WPS_CONFIG_METHOD_LABEL 0x0004
+#define WPS_CONFIG_METHOD_DISPLAY 0x0008
+#define WPS_CONFIG_METHOD_E_NFC 0x0010
+#define WPS_CONFIG_METHOD_I_NFC 0x0020
+#define WPS_CONFIG_METHOD_NFC 0x0040
+#define WPS_CONFIG_METHOD_PBC 0x0080
+#define WPS_CONFIG_METHOD_KEYPAD 0x0100
+#define WPS_CONFIG_METHOD_VPBC 0x0280
+#define WPS_CONFIG_METHOD_PPBC 0x0480
+#define WPS_CONFIG_METHOD_VDISPLAY 0x2008
+#define WPS_CONFIG_METHOD_PDISPLAY 0x4008
+
+/* Value of Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_CID_DISPLAYS 0x0007
+#define WPS_PDT_CID_MULIT_MEDIA 0x0008
+#define WPS_PDT_CID_RTK_WIDI WPS_PDT_CID_MULIT_MEDIA
+
+/* Value of Sub Category ID of WPS Primary Device Type Attribute */
+#define WPS_PDT_SCID_MEDIA_SERVER 0x0005
+#define WPS_PDT_SCID_RTK_DMP WPS_PDT_SCID_MEDIA_SERVER
+
+/* Value of Device Password ID */
+#define WPS_DPID_P 0x0000
+#define WPS_DPID_USER_SPEC 0x0001
+#define WPS_DPID_MACHINE_SPEC 0x0002
+#define WPS_DPID_REKEY 0x0003
+#define WPS_DPID_PBC 0x0004
+#define WPS_DPID_REGISTRAR_SPEC 0x0005
+
+/* Value of WPS RF Bands Attribute */
+#define WPS_RF_BANDS_2_4_GHZ 0x01
+#define WPS_RF_BANDS_5_GHZ 0x02
+
+/* Value of WPS Association State Attribute */
+#define WPS_ASSOC_STATE_NOT_ASSOCIATED 0x00
+#define WPS_ASSOC_STATE_CONNECTION_SUCCESS 0x01
+#define WPS_ASSOC_STATE_CONFIGURATION_FAILURE 0x02
+#define WPS_ASSOC_STATE_ASSOCIATION_FAILURE 0x03
+#define WPS_ASSOC_STATE_IP_FAILURE 0x04
+
+/* =====================P2P Section===================== */
+/* For P2P */
+#define P2POUI 0x506F9A09
+
+/* P2P Attribute ID */
+#define P2P_ATTR_STATUS 0x00
+#define P2P_ATTR_MINOR_REASON_CODE 0x01
+#define P2P_ATTR_CAPABILITY 0x02
+#define P2P_ATTR_DEVICE_ID 0x03
+#define P2P_ATTR_GO_INTENT 0x04
+#define P2P_ATTR_CONF_TIMEOUT 0x05
+#define P2P_ATTR_LISTEN_CH 0x06
+#define P2P_ATTR_GROUP_BSSID 0x07
+#define P2P_ATTR_EX_LISTEN_TIMING 0x08
+#define P2P_ATTR_INTENTED_IF_ADDR 0x09
+#define P2P_ATTR_MANAGEABILITY 0x0A
+#define P2P_ATTR_CH_LIST 0x0B
+#define P2P_ATTR_NOA 0x0C
+#define P2P_ATTR_DEVICE_INFO 0x0D
+#define P2P_ATTR_GROUP_INFO 0x0E
+#define P2P_ATTR_GROUP_ID 0x0F
+#define P2P_ATTR_INTERFACE 0x10
+#define P2P_ATTR_OPERATING_CH 0x11
+#define P2P_ATTR_INVITATION_FLAGS 0x12
+
+/* Value of Status Attribute */
+#define P2P_STATUS_SUCCESS 0x00
+#define P2P_STATUS_FAIL_INFO_UNAVAILABLE 0x01
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PARAM 0x02
+#define P2P_STATUS_FAIL_LIMIT_REACHED 0x03
+#define P2P_STATUS_FAIL_INVALID_PARAM 0x04
+#define P2P_STATUS_FAIL_REQUEST_UNABLE 0x05
+#define P2P_STATUS_FAIL_PREVOUS_PROTO_ERR 0x06
+#define P2P_STATUS_FAIL_NO_COMMON_CH 0x07
+#define P2P_STATUS_FAIL_UNKNOWN_P2PGROUP 0x08
+#define P2P_STATUS_FAIL_BOTH_GOINTENT_15 0x09
+#define P2P_STATUS_FAIL_INCOMPATIBLE_PROVSION 0x0A
+#define P2P_STATUS_FAIL_USER_REJECT 0x0B
+
+/* Value of Inviation Flags Attribute */
+#define P2P_INVITATION_FLAGS_PERSISTENT BIT(0)
+
+#define DMP_P2P_DEVCAP_SUPPORT (P2P_DEVCAP_SERVICE_DISCOVERY | \
+ P2P_DEVCAP_CLIENT_DISCOVERABILITY | \
+ P2P_DEVCAP_CONCURRENT_OPERATION | \
+ P2P_DEVCAP_INVITATION_PROC)
+
+#define DMP_P2P_GRPCAP_SUPPORT (P2P_GRPCAP_INTRABSS)
+
+/* Value of Device Capability Bitmap */
+#define P2P_DEVCAP_SERVICE_DISCOVERY BIT(0)
+#define P2P_DEVCAP_CLIENT_DISCOVERABILITY BIT(1)
+#define P2P_DEVCAP_CONCURRENT_OPERATION BIT(2)
+#define P2P_DEVCAP_INFRA_MANAGED BIT(3)
+#define P2P_DEVCAP_DEVICE_LIMIT BIT(4)
+#define P2P_DEVCAP_INVITATION_PROC BIT(5)
+
+/* Value of Group Capability Bitmap */
+#define P2P_GRPCAP_GO BIT(0)
+#define P2P_GRPCAP_PERSISTENT_GROUP BIT(1)
+#define P2P_GRPCAP_GROUP_LIMIT BIT(2)
+#define P2P_GRPCAP_INTRABSS BIT(3)
+#define P2P_GRPCAP_CROSS_CONN BIT(4)
+#define P2P_GRPCAP_PERSISTENT_RECONN BIT(5)
+#define P2P_GRPCAP_GROUP_FORMATION BIT(6)
+
+/* P2P Public Action Frame (Management Frame) */
+#define P2P_PUB_ACTION_ACTION 0x09
+
+/* P2P Public Action Frame Type */
+#define P2P_GO_NEGO_REQ 0
+#define P2P_GO_NEGO_RESP 1
+#define P2P_GO_NEGO_CONF 2
+#define P2P_INVIT_REQ 3
+#define P2P_INVIT_RESP 4
+#define P2P_DEVDISC_REQ 5
+#define P2P_DEVDISC_RESP 6
+#define P2P_PROVISION_DISC_REQ 7
+#define P2P_PROVISION_DISC_RESP 8
+
+/* P2P Action Frame Type */
+#define P2P_NOTICE_OF_ABSENCE 0
+#define P2P_PRESENCE_REQUEST 1
+#define P2P_PRESENCE_RESPONSE 2
+#define P2P_GO_DISC_REQUEST 3
+
+
+#define P2P_MAX_PERSISTENT_GROUP_NUM 10
+
+#define P2P_PROVISIONING_SCAN_CNT 3
+
+#define P2P_WILDCARD_SSID_LEN 7
+
+/* default value, used when: (1)p2p disabed or (2)p2p enabled
+ * but only do 1 scan phase */
+#define P2P_FINDPHASE_EX_NONE 0
+/* used when p2p enabled and want to do 1 scan phase and
+ * P2P_FINDPHASE_EX_MAX-1 find phase */
+#define P2P_FINDPHASE_EX_FULL 1
+#define P2P_FINDPHASE_EX_SOCIAL_FIRST (P2P_FINDPHASE_EX_FULL+1)
+#define P2P_FINDPHASE_EX_MAX 4
+#define P2P_FINDPHASE_EX_SOCIAL_LAST P2P_FINDPHASE_EX_MAX
+
+/* 5 seconds timeout for sending the provision discovery request */
+#define P2P_PROVISION_TIMEOUT 5000
+/* 3 seconds timeout for sending the prov disc request concurrent mode */
+#define P2P_CONCURRENT_PROVISION_TIME 3000
+/* 5 seconds timeout for receiving the group negotation response */
+#define P2P_GO_NEGO_TIMEOUT 5000
+/* 3 seconds timeout for sending the negotiation request under concurrent mode */
+#define P2P_CONCURRENT_GO_NEGO_TIME 3000
+/* 100ms */
+#define P2P_TX_PRESCAN_TIMEOUT 100
+/* 5 seconds timeout for sending the invitation request */
+#define P2P_INVITE_TIMEOUT 5000
+/* 3 seconds timeout for sending the invitation request under concurrent mode */
+#define P2P_CONCURRENT_INVITE_TIME 3000
+/* 25 seconds timeout to reset the scan channel (based on channel plan) */
+#define P2P_RESET_SCAN_CH 25000
+#define P2P_MAX_INTENT 15
+
+#define P2P_MAX_NOA_NUM 2
+
+/* WPS Configuration Method */
+#define WPS_CM_NONE 0x0000
+#define WPS_CM_LABEL 0x0004
+#define WPS_CM_DISPLYA 0x0008
+#define WPS_CM_EXTERNAL_NFC_TOKEN 0x0010
+#define WPS_CM_INTEGRATED_NFC_TOKEN 0x0020
+#define WPS_CM_NFC_INTERFACE 0x0040
+#define WPS_CM_PUSH_BUTTON 0x0080
+#define WPS_CM_KEYPAD 0x0100
+#define WPS_CM_SW_PUHS_BUTTON 0x0280
+#define WPS_CM_HW_PUHS_BUTTON 0x0480
+#define WPS_CM_SW_DISPLAY_P 0x2008
+#define WPS_CM_LCD_DISPLAY_P 0x4008
+
+enum P2P_ROLE {
+ P2P_ROLE_DISABLE = 0,
+ P2P_ROLE_DEVICE = 1,
+ P2P_ROLE_CLIENT = 2,
+ P2P_ROLE_GO = 3
+};
+
+enum P2P_STATE {
+ P2P_STATE_NONE = 0, /* P2P disable */
+ /* P2P had enabled and do nothing */
+ P2P_STATE_IDLE = 1,
+ P2P_STATE_LISTEN = 2, /* In pure listen state */
+ P2P_STATE_SCAN = 3, /* In scan phase */
+ /* In the listen state of find phase */
+ P2P_STATE_FIND_PHASE_LISTEN = 4,
+ /* In the search state of find phase */
+ P2P_STATE_FIND_PHASE_SEARCH = 5,
+ /* In P2P provisioning discovery */
+ P2P_STATE_TX_PROVISION_DIS_REQ = 6,
+ P2P_STATE_RX_PROVISION_DIS_RSP = 7,
+ P2P_STATE_RX_PROVISION_DIS_REQ = 8,
+ /* Doing the group owner negoitation handshake */
+ P2P_STATE_GONEGO_ING = 9,
+ /* finish the group negoitation handshake with success */
+ P2P_STATE_GONEGO_OK = 10,
+ /* finish the group negoitation handshake with failure */
+ P2P_STATE_GONEGO_FAIL = 11,
+ /* receiving the P2P Inviation request and match with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_MATCH = 12,
+ /* Doing the P2P WPS */
+ P2P_STATE_PROVISIONING_ING = 13,
+ /* Finish the P2P WPS */
+ P2P_STATE_PROVISIONING_DONE = 14,
+ /* Transmit the P2P Invitation request */
+ P2P_STATE_TX_INVITE_REQ = 15,
+ /* Receiving the P2P Invitation response */
+ P2P_STATE_RX_INVITE_RESP_OK = 16,
+ /* receiving the P2P Inviation request and dismatch with the profile. */
+ P2P_STATE_RECV_INVITE_REQ_DISMATCH = 17,
+ /* receiving the P2P Inviation request and this wifi is GO. */
+ P2P_STATE_RECV_INVITE_REQ_GO = 18,
+ /* receiving the P2P Inviation request to join an existing P2P Group. */
+ P2P_STATE_RECV_INVITE_REQ_JOIN = 19,
+ /* recveing the P2P Inviation response with failure */
+ P2P_STATE_RX_INVITE_RESP_FAIL = 20,
+ /* receiving p2p negoitation response with information is not available */
+ P2P_STATE_RX_INFOR_NOREADY = 21,
+ /* sending p2p negoitation response with information is not available */
+ P2P_STATE_TX_INFOR_NOREADY = 22,
+};
+
+enum P2P_WPSINFO {
+ P2P_NO_WPSINFO = 0,
+ P2P_GOT_WPSINFO_PEER_DISPLAY_PIN = 1,
+ P2P_GOT_WPSINFO_SELF_DISPLAY_PIN = 2,
+ P2P_GOT_WPSINFO_PBC = 3,
+};
+
+#define P2P_PRIVATE_IOCTL_SET_LEN 64
+
+enum P2P_PROTO_WK_ID {
+ P2P_FIND_PHASE_WK = 0,
+ P2P_RESTORE_STATE_WK = 1,
+ P2P_PRE_TX_PROVDISC_PROCESS_WK = 2,
+ P2P_PRE_TX_NEGOREQ_PROCESS_WK = 3,
+ P2P_PRE_TX_INVITEREQ_PROCESS_WK = 4,
+ P2P_AP_P2P_CH_SWITCH_PROCESS_WK =5,
+ P2P_RO_CH_WK = 6,
+};
+
+enum P2P_PS_STATE {
+ P2P_PS_DISABLE = 0,
+ P2P_PS_ENABLE = 1,
+ P2P_PS_SCAN = 2,
+ P2P_PS_SCAN_DONE = 3,
+ P2P_PS_ALLSTASLEEP = 4, /* for P2P GO */
+};
+
+enum P2P_PS_MODE {
+ P2P_PS_NONE = 0,
+ P2P_PS_CTWINDOW = 1,
+ P2P_PS_NOA = 2,
+ P2P_PS_MIX = 3, /* CTWindow and NoA */
+};
+
+/* =====================WFD Section===================== */
+/* For Wi-Fi Display */
+#define WFD_ATTR_DEVICE_INFO 0x00
+#define WFD_ATTR_ASSOC_BSSID 0x01
+#define WFD_ATTR_COUPLED_SINK_INFO 0x06
+#define WFD_ATTR_LOCAL_IP_ADDR 0x08
+#define WFD_ATTR_SESSION_INFO 0x09
+#define WFD_ATTR_ALTER_MAC 0x0a
+
+/* For WFD Device Information Attribute */
+#define WFD_DEVINFO_SOURCE 0x0000
+#define WFD_DEVINFO_PSINK 0x0001
+#define WFD_DEVINFO_SSINK 0x0002
+#define WFD_DEVINFO_DUAL 0x0003
+
+#define WFD_DEVINFO_SESSION_AVAIL 0x0010
+#define WFD_DEVINFO_WSD 0x0040
+#define WFD_DEVINFO_PC_TDLS 0x0080
+#define WFD_DEVINFO_HDCP_SUPPORT 0x0100
+
+#define IP_MCAST_MAC(mac) \
+ ((mac[0] == 0x01) && (mac[1] == 0x00) && (mac[2] == 0x5e))
+#define ICMPV6_MCAST_MAC(mac) \
+ ((mac[0] == 0x33) && (mac[1] == 0x33) && (mac[2] != 0xff))
+
+#endif /* _WIFI_H_ */
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
new file mode 100644
index 00000000000..e70075d586d
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -0,0 +1,347 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __WLAN_BSSDEF_H__
+#define __WLAN_BSSDEF_H__
+
+
+#define MAX_IE_SZ 768
+
+#define NDIS_802_11_LENGTH_SSID 32
+#define NDIS_802_11_LENGTH_RATES 8
+#define NDIS_802_11_LENGTH_RATES_EX 16
+
+#define NDIS_802_11_RSSI long /* in dBm */
+
+struct ndis_802_11_ssid {
+ u32 SsidLength;
+ u8 Ssid[32];
+};
+
+enum NDIS_802_11_NETWORK_TYPE {
+ Ndis802_11FH,
+ Ndis802_11DS,
+ Ndis802_11OFDM5,
+ Ndis802_11OFDM24,
+ Ndis802_11NetworkTypeMax /* dummy upper bound */
+};
+
+struct ndis_802_11_config_fh {
+ u32 Length; /* Length of structure */
+ u32 HopPattern; /* As defined by 802.11, MSB set */
+ u32 HopSet; /* to one if non-802.11 */
+ u32 DwellTime; /* units are Kusec */
+};
+
+/*
+ * FW will only save the channel number in DSConfig.
+ * ODI Handler will convert the channel number to freq. number.
+ */
+struct ndis_802_11_config {
+ u32 Length; /* Length of structure */
+ u32 BeaconPeriod; /* units are Kusec */
+ u32 ATIMWindow; /* units are Kusec */
+ u32 DSConfig; /* Frequency, units are kHz */
+ struct ndis_802_11_config_fh FHConfig;
+};
+
+enum ndis_802_11_network_infra {
+ Ndis802_11IBSS,
+ Ndis802_11Infrastructure,
+ Ndis802_11AutoUnknown,
+ Ndis802_11InfrastructureMax, /* dummy upper bound */
+ Ndis802_11APMode
+};
+
+struct ndis_802_11_fixed_ie {
+ u8 Timestamp[8];
+ u16 BeaconInterval;
+ u16 Capabilities;
+};
+
+
+
+struct ndis_802_11_var_ie {
+ u8 ElementID;
+ u8 Length;
+ u8 data[1];
+};
+
+/*
+ * Length is the 4 bytes multiples of the sume of
+ * [ETH_ALEN] + 2 + sizeof (struct ndis_802_11_ssid) + sizeof (u32)
+ * + sizeof (NDIS_802_11_RSSI) + sizeof (enum NDIS_802_11_NETWORK_TYPE)
+ * + sizeof (struct ndis_802_11_config)
+ * + NDIS_802_11_LENGTH_RATES_EX + IELength
+ *
+ * Except the IELength, all other fields are fixed length.
+ * Therefore, we can define a macro to represent the partial sum. */
+
+enum ndis_802_11_auth_mode {
+ Ndis802_11AuthModeOpen,
+ Ndis802_11AuthModeShared,
+ Ndis802_11AuthModeAutoSwitch,
+ Ndis802_11AuthModeWPA,
+ Ndis802_11AuthModeWPAPSK,
+ Ndis802_11AuthModeWPANone,
+ Ndis802_11AuthModeWAPI,
+ Ndis802_11AuthModeMax /* Not a real mode, upper bound */
+};
+
+enum ndis_802_11_wep_status {
+ Ndis802_11WEPEnabled,
+ Ndis802_11Encryption1Enabled = Ndis802_11WEPEnabled,
+ Ndis802_11WEPDisabled,
+ Ndis802_11EncryptionDisabled = Ndis802_11WEPDisabled,
+ Ndis802_11WEPKeyAbsent,
+ Ndis802_11Encryption1KeyAbsent = Ndis802_11WEPKeyAbsent,
+ Ndis802_11WEPNotSupported,
+ Ndis802_11EncryptionNotSupported = Ndis802_11WEPNotSupported,
+ Ndis802_11Encryption2Enabled,
+ Ndis802_11Encryption2KeyAbsent,
+ Ndis802_11Encryption3Enabled,
+ Ndis802_11Encryption3KeyAbsent,
+ Ndis802_11_EncryptionWAPI
+};
+
+#define NDIS_802_11_AI_REQFI_CAPABILITIES 1
+#define NDIS_802_11_AI_REQFI_LISTENINTERVAL 2
+#define NDIS_802_11_AI_REQFI_CURRENTAPADDRESS 4
+
+#define NDIS_802_11_AI_RESFI_CAPABILITIES 1
+#define NDIS_802_11_AI_RESFI_STATUSCODE 2
+#define NDIS_802_11_AI_RESFI_ASSOCIATIONID 4
+
+struct ndis_802_11_ai_reqfi {
+ u16 Capabilities;
+ u16 ListenInterval;
+ unsigned char CurrentAPAddress[ETH_ALEN];
+};
+
+struct ndis_802_11_ai_resfi {
+ u16 Capabilities;
+ u16 StatusCode;
+ u16 AssociationId;
+};
+
+struct ndis_802_11_assoc_info {
+ u32 Length;
+ u16 AvailableRequestFixedIEs;
+ struct ndis_802_11_ai_reqfi RequestFixedIEs;
+ u32 RequestIELength;
+ u32 OffsetRequestIEs;
+ u16 AvailableResponseFixedIEs;
+ struct ndis_802_11_ai_resfi ResponseFixedIEs;
+ u32 ResponseIELength;
+ u32 OffsetResponseIEs;
+};
+
+enum ndis_802_11_reload_def {
+ Ndis802_11ReloadWEPKeys
+};
+
+/* Key mapping keys require a BSSID */
+struct ndis_802_11_key {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex;
+ u32 KeyLength; /* length of key in bytes */
+ unsigned char BSSID[ETH_ALEN];
+ unsigned long long KeyRSC;
+ u8 KeyMaterial[32]; /* var len depending on above field */
+};
+
+struct ndis_802_11_remove_key {
+ u32 Length; /* Length */
+ u32 KeyIndex;
+ unsigned char BSSID[ETH_ALEN];
+};
+
+struct ndis_802_11_wep {
+ u32 Length; /* Length of this structure */
+ u32 KeyIndex; /* 0 is the per-client key,
+ * 1-N are the global keys */
+ u32 KeyLength; /* length of key in bytes */
+ u8 KeyMaterial[16];/* variable len depending on above field */
+};
+
+struct ndis_802_11_auth_req {
+ u32 Length; /* Length of structure */
+ unsigned char Bssid[ETH_ALEN];
+ u32 Flags;
+};
+
+enum ndis_802_11_status_type {
+ Ndis802_11StatusType_Authentication,
+ Ndis802_11StatusType_MediaStreamMode,
+ Ndis802_11StatusType_PMKID_CandidateList,
+ Ndis802_11StatusTypeMax /* not a real type, defined as
+ * an upper bound */
+};
+
+struct ndis_802_11_status_ind {
+ enum ndis_802_11_status_type StatusType;
+};
+
+/* mask for authentication/integrity fields */
+#define NDIS_802_11_AUTH_REQUEST_AUTH_FIELDS 0x0f
+#define NDIS_802_11_AUTH_REQUEST_REAUTH 0x01
+#define NDIS_802_11_AUTH_REQUEST_KEYUPDATE 0x02
+#define NDIS_802_11_AUTH_REQUEST_PAIRWISE_ERROR 0x06
+#define NDIS_802_11_AUTH_REQUEST_GROUP_ERROR 0x0E
+
+/* MIC check time, 60 seconds. */
+#define MIC_CHECK_TIME 60000000
+
+struct ndis_802_11_auth_evt {
+ struct ndis_802_11_status_ind Status;
+ struct ndis_802_11_auth_req Request[1];
+};
+
+struct ndis_802_11_test {
+ u32 Length;
+ u32 Type;
+ union {
+ struct ndis_802_11_auth_evt AuthenticationEvent;
+ NDIS_802_11_RSSI RssiTrigger;
+ } tt;
+};
+
+
+#ifndef Ndis802_11APMode
+#define Ndis802_11APMode (Ndis802_11InfrastructureMax+1)
+#endif
+
+struct wlan_phy_info {
+ u8 SignalStrength;/* in percentage) */
+ u8 SignalQuality;/* in percentage) */
+ u8 Optimum_antenna; /* for Antenna diversity */
+ u8 Reserved_0;
+};
+
+struct wlan_bcn_info {
+ /* these infor get from rtw_get_encrypt_info when
+ * * translate scan to UI */
+ u8 encryp_protocol;/* ENCRYP_PROTOCOL_E: OPEN/WEP/WPA/WPA2/WAPI */
+ int group_cipher; /* WPA/WPA2 group cipher */
+ int pairwise_cipher;/* WPA/WPA2/WEP pairwise cipher */
+ int is_8021x;
+
+ /* bwmode 20/40 and ch_offset UP/LOW */
+ unsigned short ht_cap_info;
+ unsigned char ht_info_infos_0;
+};
+
+/* temporally add #pragma pack for structure alignment issue of
+* struct wlan_bssid_ex and get_struct wlan_bssid_ex_sz()
+*/
+struct wlan_bssid_ex {
+ u32 Length;
+ unsigned char MacAddress[ETH_ALEN];
+ u8 Reserved[2];/* 0]: IS beacon frame */
+ struct ndis_802_11_ssid Ssid;
+ u32 Privacy;
+ NDIS_802_11_RSSI Rssi;/* in dBM,raw data ,get from PHY) */
+ enum NDIS_802_11_NETWORK_TYPE NetworkTypeInUse;
+ struct ndis_802_11_config Configuration;
+ enum ndis_802_11_network_infra InfrastructureMode;
+ unsigned char SupportedRates[NDIS_802_11_LENGTH_RATES_EX];
+ struct wlan_phy_info PhyInfo;
+ u32 IELength;
+ u8 IEs[MAX_IE_SZ]; /* timestamp, beacon interval, and
+ * capability information) */
+} __packed;
+
+static inline uint get_wlan_bssid_ex_sz(struct wlan_bssid_ex *bss)
+{
+ return sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + bss->IELength;
+}
+
+struct wlan_network {
+ struct list_head list;
+ int network_type; /* refer to ieee80211.h for WIRELESS_11A/B/G */
+ int fixed; /* set fixed when not to be removed
+ * in site-surveying */
+ unsigned long last_scanned; /* timestamp for the network */
+ int aid; /* will only be valid when a BSS is joinned. */
+ int join_res;
+ struct wlan_bssid_ex network; /* must be the last item */
+ struct wlan_bcn_info BcnInfo;
+};
+
+enum VRTL_CARRIER_SENSE {
+ DISABLE_VCS,
+ ENABLE_VCS,
+ AUTO_VCS
+};
+
+enum VCS_TYPE {
+ NONE_VCS,
+ RTS_CTS,
+ CTS_TO_SELF
+};
+
+#define PWR_CAM 0
+#define PWR_MINPS 1
+#define PWR_MAXPS 2
+#define PWR_UAPSD 3
+#define PWR_VOIP 4
+
+enum UAPSD_MAX_SP {
+ NO_LIMIT,
+ TWO_MSDU,
+ FOUR_MSDU,
+ SIX_MSDU
+};
+
+#define NUM_PRE_AUTH_KEY 16
+#define NUM_PMKID_CACHE NUM_PRE_AUTH_KEY
+
+/*
+* WPA2
+*/
+
+struct pmkid_candidate {
+ unsigned char BSSID[ETH_ALEN];
+ u32 Flags;
+};
+
+struct ndis_802_11_pmkid_list {
+ u32 Version; /* Version of the structure */
+ u32 NumCandidates; /* No. of pmkid candidates */
+ struct pmkid_candidate CandidateList[1];
+};
+
+struct ndis_802_11_auth_encrypt {
+ enum ndis_802_11_auth_mode AuthModeSupported;
+ enum ndis_802_11_wep_status EncryptStatusSupported;
+};
+
+struct ndis_802_11_cap {
+ u32 Length;
+ u32 Version;
+ u32 NoOfPMKIDs;
+ u32 NoOfAuthEncryptPairsSupported;
+ struct ndis_802_11_auth_encrypt AuthenticationEncryptionSupported[1];
+};
+
+u8 key_2char2num(u8 hch, u8 lch);
+u8 key_char2num(u8 ch);
+u8 str_2char2num(u8 hch, u8 lch);
+
+#endif /* ifndef WLAN_BSSDEF_H_ */
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
new file mode 100644
index 00000000000..2ff622ba24f
--- /dev/null
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -0,0 +1,67 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#ifndef __XMIT_OSDEP_H_
+#define __XMIT_OSDEP_H_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+struct pkt_file {
+ struct sk_buff *pkt;
+ size_t pkt_len; /* the remainder length of the open_file */
+ unsigned char *cur_buffer;
+ u8 *buf_start;
+ u8 *cur_addr;
+ size_t buf_len;
+};
+
+extern int rtw_ht_enable;
+extern int rtw_cbw40_enable;
+extern int rtw_ampdu_enable;/* for enable tx_ampdu */
+
+#define NR_XMITFRAME 256
+
+struct xmit_priv;
+struct pkt_attrib;
+struct sta_xmit_priv;
+struct xmit_frame;
+struct xmit_buf;
+
+int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev);
+
+void rtw_os_xmit_schedule(struct adapter *padapter);
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 alloc_sz);
+void rtw_os_xmit_resource_free(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 free_sz);
+
+void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib);
+
+uint rtw_remainder_len(struct pkt_file *pfile);
+void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
+uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
+int rtw_endofpktfile(struct pkt_file *pfile);
+
+void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt);
+void rtw_os_xmit_complete(struct adapter *padapter,
+ struct xmit_frame *pxframe);
+
+#endif /* __XMIT_OSDEP_H_ */
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
new file mode 100644
index 00000000000..cd4100fb364
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -0,0 +1,8222 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _IOCTL_LINUX_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <wlan_bssdef.h>
+#include <rtw_debug.h>
+#include <wifi.h>
+#include <rtw_mlme.h>
+#include <rtw_mlme_ext.h>
+#include <rtw_ioctl.h>
+#include <rtw_ioctl_set.h>
+#include <rtw_mp_ioctl.h>
+#include <usb_ops.h>
+#include <rtw_version.h>
+#include <rtl8188e_hal.h>
+
+#include <rtw_mp.h>
+#include <rtw_iol.h>
+
+#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
+
+#define SCAN_ITEM_SIZE 768
+#define MAX_CUSTOM_LEN 64
+#define RATE_COUNT 4
+
+/* combo scan */
+#define WEXT_CSCAN_AMOUNT 9
+#define WEXT_CSCAN_BUF_LEN 360
+#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
+#define WEXT_CSCAN_HEADER_SIZE 12
+#define WEXT_CSCAN_SSID_SECTION 'S'
+#define WEXT_CSCAN_CHANNEL_SECTION 'C'
+#define WEXT_CSCAN_NPROBE_SECTION 'N'
+#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
+#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
+#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
+#define WEXT_CSCAN_TYPE_SECTION 'T'
+
+static struct mp_ioctl_handler mp_ioctl_hdl[] = {
+/*0*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_start_test_hdl, OID_RT_PRO_START_TEST)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_stop_test_hdl, OID_RT_PRO_STOP_TEST)
+
+ GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_read_register_hdl, OID_RT_PRO_READ_REGISTER)
+ GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_write_register_hdl, OID_RT_PRO_WRITE_REGISTER)
+ GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_read_bb_reg_hdl, OID_RT_PRO_READ_BB_REG)
+/*5*/ GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_write_bb_reg_hdl, OID_RT_PRO_WRITE_BB_REG)
+ GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_read_rf_reg_hdl, OID_RT_PRO_RF_READ_REGISTRY)
+ GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_write_rf_reg_hdl, OID_RT_PRO_RF_WRITE_REGISTRY)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl, OID_RT_PRO_SET_CHANNEL_DIRECT_CALL)
+ GEN_HANDLER(sizeof(struct txpower_param), rtl8188eu_oid_rt_pro_set_tx_power_control_hdl, OID_RT_PRO_SET_TX_POWER_CONTROL)
+/*10*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_data_rate_hdl, OID_RT_PRO_SET_DATA_RATE)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_bandwidth_hdl, OID_RT_SET_BANDWIDTH)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_antenna_bb_hdl, OID_RT_PRO_SET_ANTENNA_BB)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_continuous_tx_hdl, OID_RT_PRO_SET_CONTINUOUS_TX)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl, OID_RT_PRO_SET_SINGLE_CARRIER_TX)
+/*15*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl, OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl, OID_RT_PRO_SET_SINGLE_TONE_TX)
+
+ EXT_MP_IOCTL_HANDLER(0, xmit_packet, 0)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_rx_packet_type_hdl, OID_RT_SET_RX_PACKET_TYPE)
+ GEN_HANDLER(0, rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl, OID_RT_RESET_PHY_RX_PACKET_COUNT)
+/*20*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl, OID_RT_GET_PHY_RX_PACKET_RECEIVED)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl, OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR)
+
+ GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
+ GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
+ GEN_HANDLER(sizeof(struct efuse_access_struct), rtl8188eu_oid_rt_pro_efuse_hdl, OID_RT_PRO_EFUSE)
+/*25*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_efuse_map_hdl, OID_RT_PRO_EFUSE_MAP)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_max_size_hdl, OID_RT_GET_EFUSE_MAX_SIZE)
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_current_size_hdl, OID_RT_GET_EFUSE_CURRENT_SIZE)
+
+ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_thermal_meter_hdl, OID_RT_PRO_GET_THERMAL_METER)
+ GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_pro_set_power_tracking_hdl, OID_RT_PRO_SET_POWER_TRACKING)
+/*30*/ GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_set_power_down_hdl, OID_RT_SET_POWER_DOWN)
+/*31*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_trigger_gpio_hdl, 0)
+};
+
+static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
+ 6000000, 9000000, 12000000, 18000000, 24000000, 36000000,
+ 48000000, 54000000};
+
+static const char * const iw_operation_mode[] = {
+ "Auto", "Ad-Hoc", "Managed", "Master", "Repeater",
+ "Secondary", "Monitor"
+};
+
+static int hex2num_i(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+/**
+ * hwaddr_aton - Convert ASCII string to MAC address
+ * @txt: MAC address as a string (e.g., "00:11:22:33:44:55")
+ * @addr: Buffer for the MAC address (ETH_ALEN = 6 bytes)
+ * Returns: 0 on success, -1 on failure (e.g., string not a MAC address)
+ */
+static int hwaddr_aton_i(const char *txt, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ int a, b;
+
+ a = hex2num_i(*txt++);
+ if (a < 0)
+ return -1;
+ b = hex2num_i(*txt++);
+ if (b < 0)
+ return -1;
+ *addr++ = (a << 4) | b;
+ if (i < 5 && *txt++ != ':')
+ return -1;
+ }
+
+ return 0;
+}
+
+void indicate_wx_scan_complete_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+ wireless_send_event(padapter->pnetdev, SIOCGIWSCAN, &wrqu, NULL);
+}
+
+void rtw_indicate_wx_assoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.ap_addr.sa_data, pmlmepriv->cur_network.network.MacAddress, ETH_ALEN);
+
+ DBG_88E_LEVEL(_drv_always_, "assoc success\n");
+ wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
+}
+
+void rtw_indicate_wx_disassoc_event(struct adapter *padapter)
+{
+ union iwreq_data wrqu;
+
+ _rtw_memset(&wrqu, 0, sizeof(union iwreq_data));
+
+ wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+ _rtw_memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+
+ DBG_88E_LEVEL(_drv_always_, "indicate disassoc\n");
+ wireless_send_event(padapter->pnetdev, SIOCGIWAP, &wrqu, NULL);
+}
+
+static char *translate_scan(struct adapter *padapter,
+ struct iw_request_info *info,
+ struct wlan_network *pnetwork,
+ char *start, char *stop)
+{
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct iw_event iwe;
+ u16 cap;
+ __le16 le_tmp;
+ u32 ht_ielen = 0;
+ char custom[MAX_CUSTOM_LEN];
+ char *p;
+ u16 max_rate = 0, rate, ht_cap = false;
+ u32 i = 0;
+ u8 bw_40MHz = 0, short_GI = 0;
+ u16 mcs_rate = 0;
+ u8 ss, sq;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ u32 blnGotP2PIE = false;
+
+ /* User is doing the P2P device discovery */
+ /* The prefix of SSID should be "DIRECT-" and the IE should contains the P2P IE. */
+ /* If not, the driver should ignore this AP and go to the next AP. */
+
+ /* Verifying the SSID */
+ if (!memcmp(pnetwork->network.Ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN)) {
+ u32 p2pielen = 0;
+
+ if (pnetwork->network.Reserved[0] == 2) {/* Probe Request */
+ /* Verifying the P2P IE */
+ if (rtw_get_p2p_ie(pnetwork->network.IEs, pnetwork->network.IELength, NULL, &p2pielen))
+ blnGotP2PIE = true;
+ } else {/* Beacon or Probe Respones */
+ /* Verifying the P2P IE */
+ if (rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen))
+ blnGotP2PIE = true;
+ }
+ }
+
+ if (!blnGotP2PIE)
+ return start;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ /* AP MAC address */
+ iwe.cmd = SIOCGIWAP;
+ iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(iwe.u.ap_addr.sa_data, pnetwork->network.MacAddress, ETH_ALEN);
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_ADDR_LEN);
+
+ /* Add the ESSID */
+ iwe.cmd = SIOCGIWESSID;
+ iwe.u.data.flags = 1;
+ iwe.u.data.length = min_t(u16, pnetwork->network.Ssid.SsidLength, 32);
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(&pnetwork->network.IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pnetwork->network.IELength-12);
+
+ if (p && ht_ielen > 0) {
+ struct rtw_ieee80211_ht_cap *pht_capie;
+ ht_cap = true;
+ pht_capie = (struct rtw_ieee80211_ht_cap *)(p+2);
+ memcpy(&mcs_rate, pht_capie->supp_mcs_set, 2);
+ bw_40MHz = (pht_capie->cap_info&IEEE80211_HT_CAP_SUP_WIDTH) ? 1 : 0;
+ short_GI = (pht_capie->cap_info&(IEEE80211_HT_CAP_SGI_20|IEEE80211_HT_CAP_SGI_40)) ? 1 : 0;
+ }
+
+ /* Add the protocol name */
+ iwe.cmd = SIOCGIWNAME;
+ if ((rtw_is_cckratesonly_included((u8 *)&pnetwork->network.SupportedRates))) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11b");
+ } else if ((rtw_is_cckrates_included((u8 *)&pnetwork->network.SupportedRates))) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11bg");
+ } else {
+ if (pnetwork->network.Configuration.DSConfig > 14) {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11a");
+ } else {
+ if (ht_cap)
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(iwe.u.name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_CHAR_LEN);
+
+ /* Add mode */
+ iwe.cmd = SIOCGIWMODE;
+ memcpy(&le_tmp, rtw_get_capability_from_ie(pnetwork->network.IEs), 2);
+
+ cap = le16_to_cpu(le_tmp);
+
+ if (cap & (WLAN_CAPABILITY_IBSS | WLAN_CAPABILITY_BSS)) {
+ if (cap & WLAN_CAPABILITY_BSS)
+ iwe.u.mode = IW_MODE_MASTER;
+ else
+ iwe.u.mode = IW_MODE_ADHOC;
+
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_UINT_LEN);
+ }
+
+ if (pnetwork->network.Configuration.DSConfig < 1)
+ pnetwork->network.Configuration.DSConfig = 1;
+
+ /* Add frequency/channel */
+ iwe.cmd = SIOCGIWFREQ;
+ iwe.u.freq.m = rtw_ch2freq(pnetwork->network.Configuration.DSConfig) * 100000;
+ iwe.u.freq.e = 1;
+ iwe.u.freq.i = pnetwork->network.Configuration.DSConfig;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_FREQ_LEN);
+
+ /* Add encryption capability */
+ iwe.cmd = SIOCGIWENCODE;
+ if (cap & WLAN_CAPABILITY_PRIVACY)
+ iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
+ else
+ iwe.u.data.flags = IW_ENCODE_DISABLED;
+ iwe.u.data.length = 0;
+ start = iwe_stream_add_point(info, start, stop, &iwe, pnetwork->network.Ssid.Ssid);
+
+ /*Add basic and extended rates */
+ max_rate = 0;
+ p = custom;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom), " Rates (Mb/s): ");
+ while (pnetwork->network.SupportedRates[i] != 0) {
+ rate = pnetwork->network.SupportedRates[i]&0x7F;
+ if (rate > max_rate)
+ max_rate = rate;
+ p += snprintf(p, MAX_CUSTOM_LEN - (p - custom),
+ "%d%s ", rate >> 1, (rate & 1) ? ".5" : "");
+ i++;
+ }
+
+ if (ht_cap) {
+ if (mcs_rate&0x8000)/* MCS15 */
+ max_rate = (bw_40MHz) ? ((short_GI) ? 300 : 270) : ((short_GI) ? 144 : 130);
+ else if (mcs_rate&0x0080)/* MCS7 */
+ ;
+ else/* default MCS7 */
+ max_rate = (bw_40MHz) ? ((short_GI) ? 150 : 135) : ((short_GI) ? 72 : 65);
+
+ max_rate = max_rate*2;/* Mbps/2; */
+ }
+
+ iwe.cmd = SIOCGIWRATE;
+ iwe.u.bitrate.fixed = 0;
+ iwe.u.bitrate.disabled = 0;
+ iwe.u.bitrate.value = max_rate * 500000;
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_PARAM_LEN);
+
+ /* parsing WPA/WPA2 IE */
+ {
+ u8 buf[MAX_WPA_IE_LEN];
+ u8 wpa_ie[255], rsn_ie[255];
+ u16 wpa_len = 0, rsn_len = 0;
+ u8 *p;
+
+ rtw_get_sec_ie(pnetwork->network.IEs, pnetwork->network.IELength, rsn_ie, &rsn_len, wpa_ie, &wpa_len);
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: ssid =%s\n", pnetwork->network.Ssid.Ssid));
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan: wpa_len =%d rsn_len =%d\n", wpa_len, rsn_len));
+
+ if (wpa_len > 0) {
+ p = buf;
+ _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
+ p += sprintf(p, "wpa_ie =");
+ for (i = 0; i < wpa_len; i++)
+ p += sprintf(p, "%02x", wpa_ie[i]);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = wpa_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpa_ie);
+ }
+ if (rsn_len > 0) {
+ p = buf;
+ _rtw_memset(buf, 0, MAX_WPA_IE_LEN);
+ p += sprintf(p, "rsn_ie =");
+ for (i = 0; i < rsn_len; i++)
+ p += sprintf(p, "%02x", rsn_ie[i]);
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVCUSTOM;
+ iwe.u.data.length = strlen(buf);
+ start = iwe_stream_add_point(info, start, stop, &iwe, buf);
+
+ _rtw_memset(&iwe, 0, sizeof(iwe));
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = rsn_len;
+ start = iwe_stream_add_point(info, start, stop, &iwe, rsn_ie);
+ }
+ }
+
+ {/* parsing WPS IE */
+ uint cnt = 0, total_ielen;
+ u8 *wpsie_ptr = NULL;
+ uint wps_ielen = 0;
+
+ u8 *ie_ptr = pnetwork->network.IEs + _FIXED_IE_LENGTH_;
+ total_ielen = pnetwork->network.IELength - _FIXED_IE_LENGTH_;
+
+ while (cnt < total_ielen) {
+ if (rtw_is_wps_ie(&ie_ptr[cnt], &wps_ielen) && (wps_ielen > 2)) {
+ wpsie_ptr = &ie_ptr[cnt];
+ iwe.cmd = IWEVGENIE;
+ iwe.u.data.length = (u16)wps_ielen;
+ start = iwe_stream_add_point(info, start, stop, &iwe, wpsie_ptr);
+ }
+ cnt += ie_ptr[cnt+1]+2; /* goto next */
+ }
+ }
+
+ /* Add quality statistics */
+ iwe.cmd = IWEVQUAL;
+ iwe.u.qual.updated = IW_QUAL_QUAL_UPDATED | IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_INVALID;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) == true &&
+ is_same_network(&pmlmepriv->cur_network.network, &pnetwork->network)) {
+ ss = padapter->recvpriv.signal_strength;
+ sq = padapter->recvpriv.signal_qual;
+ } else {
+ ss = pnetwork->network.PhyInfo.SignalStrength;
+ sq = pnetwork->network.PhyInfo.SignalQuality;
+ }
+
+ iwe.u.qual.level = (u8)ss;
+ iwe.u.qual.qual = (u8)sq; /* signal quality */
+ iwe.u.qual.noise = 0; /* noise level */
+ start = iwe_stream_add_event(info, start, stop, &iwe, IW_EV_QUAL_LEN);
+ return start;
+}
+
+static int wpa_set_auth_algs(struct net_device *dev, u32 value)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ int ret = 0;
+
+ if ((value & AUTH_ALG_SHARED_KEY) && (value & AUTH_ALG_OPEN_SYSTEM)) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY and AUTH_ALG_OPEN_SYSTEM [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeAutoSwitch;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Auto;
+ } else if (value & AUTH_ALG_SHARED_KEY) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_SHARED_KEY [value:0x%x]\n", value);
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeShared;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+ } else if (value & AUTH_ALG_OPEN_SYSTEM) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_OPEN_SYSTEM\n");
+ if (padapter->securitypriv.ndisauthtype < Ndis802_11AuthModeWPAPSK) {
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ }
+ } else if (value & AUTH_ALG_LEAP) {
+ DBG_88E("wpa_set_auth_algs, AUTH_ALG_LEAP\n");
+ } else {
+ DBG_88E("wpa_set_auth_algs, error!\n");
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+
+_func_enter_;
+
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+
+ if (param_len < (u32) ((u8 *)param->u.crypt.key - (u8 *)param) + param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("wpa_set_encryption, crypt.alg = WEP\n"));
+ DBG_88E("wpa_set_encryption, crypt.alg = WEP\n");
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(1)wep_key_idx =%d\n", wep_key_idx));
+ DBG_88E("(1)wep_key_idx =%d\n", wep_key_idx);
+
+ if (wep_key_idx > WEP_KEYS)
+ return -EINVAL;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("(2)wep_key_idx =%d\n", wep_key_idx));
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, (" wpa_set_encryption: pwep allocate fail !!!\n"));
+ goto exit;
+ }
+ _rtw_memset(pwep, 0, wep_total_len);
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+ if (wep_key_len == 13) {
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+ pwep->KeyIndex = wep_key_idx;
+ pwep->KeyIndex |= 0x80000000;
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+ if (param->u.crypt.set_tx) {
+ DBG_88E("wep, set_tx = 1\n");
+ if (rtw_set_802_11_add_wep(padapter, pwep) == (u8)_FAIL)
+ ret = -EOPNOTSUPP;
+ } else {
+ DBG_88E("wep, set_tx = 0\n");
+ if (wep_key_idx >= WEP_KEYS) {
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+ rtw_set_key(padapter, psecuritypriv, wep_key_idx, 0);
+ }
+ goto exit;
+ }
+
+ if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) { /* 802_1x */
+ struct sta_info *psta, *pbcmc_sta;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE)) { /* sta mode */
+ psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
+ if (psta == NULL) {
+ ;
+ } else {
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ psta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ psta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+
+ if (param->u.crypt.set_tx == 1) { /* pairwise key */
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "TKIP") == 0) { /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+ padapter->securitypriv.busetkipkey = false;
+ }
+
+ DBG_88E(" ~~~~set sta key:unicastkey\n");
+
+ rtw_setstakey_cmd(padapter, (unsigned char *)psta, true);
+ } else { /* group key */
+ memcpy(padapter->securitypriv.dot118021XGrpKey[param->u.crypt.idx].skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ memcpy(padapter->securitypriv.dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(padapter->securitypriv.dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+ padapter->securitypriv.binstallGrpkey = true;
+ DBG_88E(" ~~~~set sta key:groupkey\n");
+
+ padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
+
+ rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1);
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE);
+#endif /* CONFIG_88EU_P2P */
+ }
+ }
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta == NULL) {
+ ;
+ } else {
+ /* Jeff: don't disable ieee8021x_blocked while clearing key */
+ if (strcmp(param->u.crypt.alg, "none") != 0)
+ pbcmc_sta->ieee8021x_blocked = false;
+
+ if ((padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption2Enabled) ||
+ (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption3Enabled))
+ pbcmc_sta->dot118021XPrivacy = padapter->securitypriv.dot11PrivacyAlgrthm;
+ }
+ }
+ }
+
+exit:
+
+ kfree(pwep);
+
+_func_exit_;
+
+ return ret;
+}
+
+static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ielen)
+{
+ u8 *buf = NULL;
+ int group_cipher = 0, pairwise_cipher = 0;
+ int ret = 0;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+
+ if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ if (pie == NULL)
+ return ret;
+ else
+ return -EINVAL;
+ }
+
+ if (ielen) {
+ buf = rtw_zmalloc(ielen);
+ if (buf == NULL) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(buf, pie, ielen);
+
+ /* dump */
+ {
+ int i;
+ DBG_88E("\n wpa_ie(length:%d):\n", ielen);
+ for (i = 0; i < ielen; i += 8)
+ DBG_88E("0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x\n", buf[i], buf[i+1], buf[i+2], buf[i+3], buf[i+4], buf[i+5], buf[i+6], buf[i+7]);
+ }
+
+ if (ielen < RSN_HEADER_LEN) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("Ie len too short %d\n", ielen));
+ ret = -1;
+ goto exit;
+ }
+
+ if (rtw_parse_wpa_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ if (rtw_parse_wpa2_ie(buf, ielen, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK;
+ memcpy(padapter->securitypriv.supplicant_ie, &buf[0], ielen);
+ }
+
+ switch (group_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot118021XGrpPrivacy = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ switch (pairwise_cipher) {
+ case WPA_CIPHER_NONE:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ break;
+ case WPA_CIPHER_WEP40:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ case WPA_CIPHER_TKIP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _TKIP_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case WPA_CIPHER_CCMP:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _AES_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ case WPA_CIPHER_WEP104:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ break;
+ }
+
+ _clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
+ {/* set wps_ie */
+ u16 cnt = 0;
+ u8 eid, wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ while (cnt < ielen) {
+ eid = buf[cnt];
+ if ((eid == _VENDOR_SPECIFIC_IE_) && (!memcmp(&buf[cnt+2], wps_oui, 4))) {
+ DBG_88E("SET WPS_IE\n");
+
+ padapter->securitypriv.wps_ie_len = ((buf[cnt+1]+2) < (MAX_WPA_IE_LEN<<2)) ? (buf[cnt+1]+2) : (MAX_WPA_IE_LEN<<2);
+
+ memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
+
+ set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
+#ifdef CONFIG_88EU_P2P
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_OK))
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_ING);
+#endif /* CONFIG_88EU_P2P */
+ cnt += buf[cnt+1]+2;
+ break;
+ } else {
+ cnt += buf[cnt+1]+2; /* goto next */
+ }
+ }
+ }
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_set_wpa_ie: pairwise_cipher = 0x%08x padapter->securitypriv.ndisencryptstatus =%d padapter->securitypriv.ndisauthtype =%d\n",
+ pairwise_cipher, padapter->securitypriv.ndisencryptstatus, padapter->securitypriv.ndisauthtype));
+exit:
+ kfree(buf);
+ return ret;
+}
+
+typedef unsigned char NDIS_802_11_RATES_EX[NDIS_802_11_LENGTH_RATES_EX];
+
+static int rtw_wx_get_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 ht_ielen = 0;
+ char *p;
+ u8 ht_cap = false;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+ NDIS_802_11_RATES_EX *prates = NULL;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("cmd_code =%x\n", info->cmd));
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED|WIFI_ADHOC_MASTER_STATE) == true) {
+ /* parsing HT_CAP_IE */
+ p = rtw_get_ie(&pcur_bss->IEs[12], _HT_CAPABILITY_IE_, &ht_ielen, pcur_bss->IELength-12);
+ if (p && ht_ielen > 0)
+ ht_cap = true;
+
+ prates = &pcur_bss->SupportedRates;
+
+ if (rtw_is_cckratesonly_included((u8 *)prates) == true) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b");
+ } else if ((rtw_is_cckrates_included((u8 *)prates)) == true) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bgn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11bg");
+ } else {
+ if (pcur_bss->Configuration.DSConfig > 14) {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11an");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11a");
+ } else {
+ if (ht_cap)
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11gn");
+ else
+ snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11g");
+ }
+ }
+ } else {
+ snprintf(wrqu->name, IFNAMSIZ, "unassociated");
+ }
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_notice_, ("+rtw_wx_set_freq\n"));
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_freq(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ /* wrqu->freq.m = ieee80211_wlan_frequencies[pcur_bss->Configuration.DSConfig-1] * 100000; */
+ wrqu->freq.m = rtw_ch2freq(pcur_bss->Configuration.DSConfig) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = pcur_bss->Configuration.DSConfig;
+ } else {
+ wrqu->freq.m = rtw_ch2freq(padapter->mlmeextpriv.cur_channel) * 100000;
+ wrqu->freq.e = 1;
+ wrqu->freq.i = padapter->mlmeextpriv.cur_channel;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ enum ndis_802_11_network_infra networkType;
+ int ret = 0;
+
+ _func_enter_;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ if (!padapter->hw_init_completed) {
+ ret = -EPERM;
+ goto exit;
+ }
+
+ switch (wrqu->mode) {
+ case IW_MODE_AUTO:
+ networkType = Ndis802_11AutoUnknown;
+ DBG_88E("set_mode = IW_MODE_AUTO\n");
+ break;
+ case IW_MODE_ADHOC:
+ networkType = Ndis802_11IBSS;
+ DBG_88E("set_mode = IW_MODE_ADHOC\n");
+ break;
+ case IW_MODE_MASTER:
+ networkType = Ndis802_11APMode;
+ DBG_88E("set_mode = IW_MODE_MASTER\n");
+ break;
+ case IW_MODE_INFRA:
+ networkType = Ndis802_11Infrastructure;
+ DBG_88E("set_mode = IW_MODE_INFRA\n");
+ break;
+ default:
+ ret = -EINVAL;
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("\n Mode: %s is not supported\n", iw_operation_mode[wrqu->mode]));
+ goto exit;
+ }
+ if (rtw_set_802_11_infrastructure_mode(padapter, networkType) == false) {
+ ret = -EPERM;
+ goto exit;
+ }
+ rtw_setopmode_cmd(padapter, networkType);
+exit:
+ _func_exit_;
+ return ret;
+}
+
+static int rtw_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_get_mode\n"));
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
+ wrqu->mode = IW_MODE_INFRA;
+ else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)))
+ wrqu->mode = IW_MODE_ADHOC;
+ else if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
+ wrqu->mode = IW_MODE_MASTER;
+ else
+ wrqu->mode = IW_MODE_AUTO;
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_pmkid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 j, blInserted = false;
+ int ret = false;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct iw_pmksa *pPMK = (struct iw_pmksa *)extra;
+ u8 strZeroMacAddress[ETH_ALEN] = {0x00};
+ u8 strIssueBssid[ETH_ALEN] = {0x00};
+
+ memcpy(strIssueBssid, pPMK->bssid.sa_data, ETH_ALEN);
+ if (pPMK->cmd == IW_PMKSA_ADD) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_ADD!\n");
+ if (!memcmp(strIssueBssid, strZeroMacAddress, ETH_ALEN) == true)
+ return ret;
+ else
+ ret = true;
+ blInserted = false;
+
+ /* overwrite PMKID */
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => rewrite with new PMKID. */
+ DBG_88E("[rtw_wx_set_pmkid] BSSID exists in the PMKList.\n");
+ memcpy(psecuritypriv->PMKIDList[j].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+ psecuritypriv->PMKIDList[j].bUsed = true;
+ psecuritypriv->PMKIDIndex = j+1;
+ blInserted = true;
+ break;
+ }
+ }
+
+ if (!blInserted) {
+ /* Find a new entry */
+ DBG_88E("[rtw_wx_set_pmkid] Use the new entry index = %d for this PMKID.\n",
+ psecuritypriv->PMKIDIndex);
+
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].Bssid, strIssueBssid, ETH_ALEN);
+ memcpy(psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].PMKID, pPMK->pmkid, IW_PMKID_LEN);
+
+ psecuritypriv->PMKIDList[psecuritypriv->PMKIDIndex].bUsed = true;
+ psecuritypriv->PMKIDIndex++;
+ if (psecuritypriv->PMKIDIndex == 16)
+ psecuritypriv->PMKIDIndex = 0;
+ }
+ } else if (pPMK->cmd == IW_PMKSA_REMOVE) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_REMOVE!\n");
+ ret = true;
+ for (j = 0; j < NUM_PMKID_CACHE; j++) {
+ if (!memcmp(psecuritypriv->PMKIDList[j].Bssid, strIssueBssid, ETH_ALEN)) {
+ /* BSSID is matched, the same AP => Remove this PMKID information and reset it. */
+ _rtw_memset(psecuritypriv->PMKIDList[j].Bssid, 0x00, ETH_ALEN);
+ psecuritypriv->PMKIDList[j].bUsed = false;
+ break;
+ }
+ }
+ } else if (pPMK->cmd == IW_PMKSA_FLUSH) {
+ DBG_88E("[rtw_wx_set_pmkid] IW_PMKSA_FLUSH!\n");
+ _rtw_memset(&psecuritypriv->PMKIDList[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ psecuritypriv->PMKIDIndex = 0;
+ ret = true;
+ }
+ return ret;
+}
+
+static int rtw_wx_get_sens(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->sens.value = 0;
+ wrqu->sens.fixed = 0; /* no auto select */
+ wrqu->sens.disabled = 1;
+ return 0;
+}
+
+static int rtw_wx_get_range(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct iw_range *range = (struct iw_range *)extra;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ u16 val;
+ int i;
+
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_range. cmd_code =%x\n", info->cmd));
+
+ wrqu->data.length = sizeof(*range);
+ _rtw_memset(range, 0, sizeof(*range));
+
+ /* Let's try to keep this struct in the same order as in
+ * linux/include/wireless.h
+ */
+
+ /* TODO: See what values we can set, and remove the ones we can't
+ * set, or fill them with some default data.
+ */
+
+ /* ~5 Mb/s real (802.11b) */
+ range->throughput = 5 * 1000 * 1000;
+
+ /* signal level threshold range */
+
+ /* percent values between 0 and 100. */
+ range->max_qual.qual = 100;
+ range->max_qual.level = 100;
+ range->max_qual.noise = 100;
+ range->max_qual.updated = 7; /* Updated all three */
+
+ range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
+ /* TODO: Find real 'good' to 'bad' threshol value for RSSI */
+ range->avg_qual.level = 20 + -98;
+ range->avg_qual.noise = 0;
+ range->avg_qual.updated = 7; /* Updated all three */
+
+ range->num_bitrates = RATE_COUNT;
+
+ for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++)
+ range->bitrate[i] = rtw_rates[i];
+
+ range->min_frag = MIN_FRAG_THRESHOLD;
+ range->max_frag = MAX_FRAG_THRESHOLD;
+
+ range->pm_capa = 0;
+
+ range->we_version_compiled = WIRELESS_EXT;
+ range->we_version_source = 16;
+
+ for (i = 0, val = 0; i < MAX_CHANNEL_NUM; i++) {
+ /* Include only legal frequencies for some countries */
+ if (pmlmeext->channel_set[i].ChannelNum != 0) {
+ range->freq[val].i = pmlmeext->channel_set[i].ChannelNum;
+ range->freq[val].m = rtw_ch2freq(pmlmeext->channel_set[i].ChannelNum) * 100000;
+ range->freq[val].e = 1;
+ val++;
+ }
+
+ if (val == IW_MAX_FREQUENCIES)
+ break;
+ }
+
+ range->num_channels = val;
+ range->num_frequency = val;
+
+/* The following code will proivde the security capability to network manager. */
+/* If the driver doesn't provide this capability to network manager, */
+/* the WPA/WPA2 routers can't be choosen in the network manager. */
+
+/*
+#define IW_SCAN_CAPA_NONE 0x00
+#define IW_SCAN_CAPA_ESSID 0x01
+#define IW_SCAN_CAPA_BSSID 0x02
+#define IW_SCAN_CAPA_CHANNEL 0x04
+#define IW_SCAN_CAPA_MODE 0x08
+#define IW_SCAN_CAPA_RATE 0x10
+#define IW_SCAN_CAPA_TYPE 0x20
+#define IW_SCAN_CAPA_TIME 0x40
+*/
+
+ range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
+ IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
+
+ range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE |
+ IW_SCAN_CAPA_BSSID | IW_SCAN_CAPA_CHANNEL |
+ IW_SCAN_CAPA_MODE | IW_SCAN_CAPA_RATE;
+ _func_exit_;
+
+ return 0;
+}
+
+/* set bssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. rtw_set_802_11_authentication_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_bssid() */
+static int rtw_wx_set_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ unsigned long irqL;
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct sockaddr *temp = (struct sockaddr *)awrq;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct list_head *phead;
+ u8 *dst_bssid, *src_bssid;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ enum ndis_802_11_auth_mode authmode;
+
+ _func_enter_;
+
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (temp->sa_family != ARPHRD_ETHER) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ _enter_critical_bh(&queue->lock, &irqL);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if ((rtw_end_of_queue_search(phead, pmlmepriv->pscanned)) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_bssid = pnetwork->network.MacAddress;
+
+ src_bssid = temp->sa_data;
+
+ if ((!memcmp(dst_bssid, src_bssid, ETH_ALEN))) {
+ if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
+ ret = -1;
+ _exit_critical_bh(&queue->lock, &irqL);
+ goto exit;
+ }
+
+ break;
+ }
+ }
+ _exit_critical_bh(&queue->lock, &irqL);
+
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ /* set_802_11_encryption_mode(padapter, padapter->securitypriv.ndisencryptstatus); */
+ if (rtw_set_802_11_bssid(padapter, temp->sa_data) == false) {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_wap(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ wrqu->ap_addr.sa_family = ARPHRD_ETHER;
+
+ _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_wap\n"));
+
+ _func_enter_;
+
+ if (((check_fwstate(pmlmepriv, _FW_LINKED)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) == true) ||
+ ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) == true))
+ memcpy(wrqu->ap_addr.sa_data, pcur_bss->MacAddress, ETH_ALEN);
+ else
+ _rtw_memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_mlme(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u16 reason;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_mlme *mlme = (struct iw_mlme *)extra;
+
+ if (mlme == NULL)
+ return -1;
+
+ DBG_88E("%s\n", __func__);
+
+ reason = mlme->reason_code;
+
+ DBG_88E("%s, cmd =%d, reason =%d\n", __func__, mlme->cmd, reason);
+
+ switch (mlme->cmd) {
+ case IW_MLME_DEAUTH:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ case IW_MLME_DISASSOC:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u8 _status = false;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
+ unsigned long irqL;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+#endif /* CONFIG_88EU_P2P */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_set_scan\n"));
+
+_func_enter_;
+ if (padapter->registrypriv.mp_mode == 1) {
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
+ ret = -1;
+ goto exit;
+ }
+ }
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (padapter->bDriverStopped) {
+ DBG_88E("bDriverStopped =%d\n", padapter->bDriverStopped);
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->hw_init_completed) {
+ ret = -1;
+ goto exit;
+ }
+
+ /* When Busy Traffic, driver do not site survey. So driver return success. */
+ /* wpa_supplicant will not issue SIOCSIWSCAN cmd again after scan timeout. */
+ /* modify by thomas 2011-02-22. */
+ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING)) {
+ indicate_wx_scan_complete_event(padapter);
+ goto exit;
+ }
+
+/* For the DMP WiFi Display project, the driver won't to scan because */
+/* the pmlmepriv->scan_interval is always equal to 3. */
+/* So, the wpa_supplicant won't find out the WPS SoftAP. */
+
+#ifdef CONFIG_88EU_P2P
+ if (pwdinfo->p2p_state != P2P_STATE_NONE) {
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
+ rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_FULL);
+ rtw_free_network_queue(padapter, true);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ _rtw_memset(ssid, 0, sizeof(struct ndis_802_11_ssid)*RTW_SSID_SCAN_AMOUNT);
+
+ if (wrqu->data.length == sizeof(struct iw_scan_req)) {
+ struct iw_scan_req *req = (struct iw_scan_req *)extra;
+
+ if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
+ int len = min((int)req->essid_len, IW_ESSID_MAX_SIZE);
+
+ memcpy(ssid[0].Ssid, req->essid, len);
+ ssid[0].SsidLength = len;
+
+ DBG_88E("IW_SCAN_THIS_ESSID, ssid =%s, len =%d\n", req->essid, req->essid_len);
+
+ _enter_critical_bh(&pmlmepriv->lock, &irqL);
+
+ _status = rtw_sitesurvey_cmd(padapter, ssid, 1, NULL, 0);
+
+ _exit_critical_bh(&pmlmepriv->lock, &irqL);
+ } else if (req->scan_type == IW_SCAN_TYPE_PASSIVE) {
+ DBG_88E("rtw_wx_set_scan, req->scan_type == IW_SCAN_TYPE_PASSIVE\n");
+ }
+ } else {
+ if (wrqu->data.length >= WEXT_CSCAN_HEADER_SIZE &&
+ !memcmp(extra, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
+ int len = wrqu->data.length - WEXT_CSCAN_HEADER_SIZE;
+ char *pos = extra+WEXT_CSCAN_HEADER_SIZE;
+ char section;
+ char sec_len;
+ int ssid_index = 0;
+
+ while (len >= 1) {
+ section = *(pos++);
+ len -= 1;
+
+ switch (section) {
+ case WEXT_CSCAN_SSID_SECTION:
+ if (len < 1) {
+ len = 0;
+ break;
+ }
+ sec_len = *(pos++); len -= 1;
+ if (sec_len > 0 && sec_len <= len) {
+ ssid[ssid_index].SsidLength = sec_len;
+ memcpy(ssid[ssid_index].Ssid, pos, ssid[ssid_index].SsidLength);
+ ssid_index++;
+ }
+ pos += sec_len;
+ len -= sec_len;
+ break;
+ case WEXT_CSCAN_TYPE_SECTION:
+ case WEXT_CSCAN_CHANNEL_SECTION:
+ pos += 1;
+ len -= 1;
+ break;
+ case WEXT_CSCAN_PASV_DWELL_SECTION:
+ case WEXT_CSCAN_HOME_DWELL_SECTION:
+ case WEXT_CSCAN_ACTV_DWELL_SECTION:
+ pos += 2;
+ len -= 2;
+ break;
+ default:
+ len = 0; /* stop parsing */
+ }
+ }
+
+ /* it has still some scan paramater to parse, we only do this now... */
+ _status = rtw_set_802_11_bssid_list_scan(padapter, ssid, RTW_SSID_SCAN_AMOUNT);
+ } else {
+ _status = rtw_set_802_11_bssid_list_scan(padapter, NULL, 0);
+ }
+ }
+
+ if (!_status)
+ ret = -1;
+
+exit:
+
+_func_exit_;
+ return ret;
+}
+
+static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ char *ev = extra;
+ char *stop = ev + wrqu->data.length;
+ u32 ret = 0;
+ u32 cnt = 0;
+ u32 wait_for_surveydone;
+ int wait_status;
+#ifdef CONFIG_88EU_P2P
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+#endif /* CONFIG_88EU_P2P */
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_scan\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, (" Start of Query SIOCGIWSCAN .\n"));
+
+ _func_enter_;
+
+ if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ /* P2P is enabled */
+ wait_for_surveydone = 200;
+ } else {
+ /* P2P is disabled */
+ wait_for_surveydone = 100;
+ }
+#else
+ {
+ wait_for_surveydone = 100;
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
+
+ while (check_fwstate(pmlmepriv, wait_status)) {
+ rtw_msleep_os(30);
+ cnt++;
+ if (cnt > wait_for_surveydone)
+ break;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist))
+ break;
+
+ if ((stop - ev) < SCAN_ITEM_SIZE) {
+ ret = -E2BIG;
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* report network only if the current channel set contains the channel to which this network belongs */
+ if (rtw_ch_set_search_ch(padapter->mlmeextpriv.channel_set, pnetwork->network.Configuration.DSConfig) >= 0)
+ ev = translate_scan(padapter, a, pnetwork, ev, stop);
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ wrqu->data.length = ev-extra;
+ wrqu->data.flags = 0;
+
+exit:
+ _func_exit_;
+ return ret;
+}
+
+/* set ssid flow */
+/* s1. rtw_set_802_11_infrastructure_mode() */
+/* s2. set_802_11_authenticaion_mode() */
+/* s3. set_802_11_encryption_mode() */
+/* s4. rtw_set_802_11_ssid() */
+static int rtw_wx_set_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct __queue *queue = &pmlmepriv->scanned_queue;
+ struct list_head *phead;
+ struct wlan_network *pnetwork = NULL;
+ enum ndis_802_11_auth_mode authmode;
+ struct ndis_802_11_ssid ndis_ssid;
+ u8 *dst_ssid, *src_ssid;
+
+ uint ret = 0, len;
+
+ _func_enter_;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("+rtw_wx_set_essid: fw_state = 0x%08x\n", get_fwstate(pmlmepriv)));
+ if (_FAIL == rtw_pwr_wakeup(padapter)) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (!padapter->bup) {
+ ret = -1;
+ goto exit;
+ }
+
+ if (wrqu->essid.length > IW_ESSID_MAX_SIZE) {
+ ret = -E2BIG;
+ goto exit;
+ }
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ ret = -1;
+ goto exit;
+ }
+
+ authmode = padapter->securitypriv.ndisauthtype;
+ DBG_88E("=>%s\n", __func__);
+ if (wrqu->essid.flags && wrqu->essid.length) {
+ len = (wrqu->essid.length < IW_ESSID_MAX_SIZE) ? wrqu->essid.length : IW_ESSID_MAX_SIZE;
+
+ if (wrqu->essid.length != 33)
+ DBG_88E("ssid =%s, len =%d\n", extra, wrqu->essid.length);
+
+ _rtw_memset(&ndis_ssid, 0, sizeof(struct ndis_802_11_ssid));
+ ndis_ssid.SsidLength = len;
+ memcpy(ndis_ssid.Ssid, extra, len);
+ src_ssid = ndis_ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("rtw_wx_set_essid: ssid =[%s]\n", src_ssid));
+ _enter_critical_bh(&queue->lock, &irqL);
+ phead = get_list_head(queue);
+ pmlmepriv->pscanned = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, pmlmepriv->pscanned) == true) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_warning_,
+ ("rtw_wx_set_essid: scan_q is empty, set ssid to check if scanning again!\n"));
+
+ break;
+ }
+
+ pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list);
+
+ pmlmepriv->pscanned = get_next(pmlmepriv->pscanned);
+
+ dst_ssid = pnetwork->network.Ssid.Ssid;
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: dst_ssid =%s\n",
+ pnetwork->network.Ssid.Ssid));
+
+ if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength)) &&
+ (pnetwork->network.Ssid.SsidLength == ndis_ssid.SsidLength)) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_wx_set_essid: find match, set infra mode\n"));
+
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) {
+ if (pnetwork->network.InfrastructureMode != pmlmepriv->cur_network.network.InfrastructureMode)
+ continue;
+ }
+
+ if (!rtw_set_802_11_infrastructure_mode(padapter, pnetwork->network.InfrastructureMode)) {
+ ret = -1;
+ _exit_critical_bh(&queue->lock, &irqL);
+ goto exit;
+ }
+
+ break;
+ }
+ }
+ _exit_critical_bh(&queue->lock, &irqL);
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("set ssid: set_802_11_auth. mode =%d\n", authmode));
+ rtw_set_802_11_authentication_mode(padapter, authmode);
+ if (rtw_set_802_11_ssid(padapter, &ndis_ssid) == false) {
+ ret = -1;
+ goto exit;
+ }
+ }
+
+exit:
+
+ DBG_88E("<=%s, ret %d\n", __func__, ret);
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_essid(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_bssid_ex *pcur_bss = &pmlmepriv->cur_network.network;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("rtw_wx_get_essid\n"));
+
+ _func_enter_;
+
+ if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
+ len = pcur_bss->Ssid.SsidLength;
+
+ wrqu->essid.length = len;
+
+ memcpy(extra, pcur_bss->Ssid.Ssid, len);
+
+ wrqu->essid.flags = 1;
+ } else {
+ ret = -1;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_set_rate(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *extra)
+{
+ int i, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 datarates[NumRates];
+ u32 target_rate = wrqu->bitrate.value;
+ u32 fixed = wrqu->bitrate.fixed;
+ u32 ratevalue = 0;
+ u8 mpdatarate[NumRates] = {11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0xff};
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, (" rtw_wx_set_rate\n"));
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("target_rate = %d, fixed = %d\n", target_rate, fixed));
+
+ if (target_rate == -1) {
+ ratevalue = 11;
+ goto set_rate;
+ }
+ target_rate = target_rate/100000;
+
+ switch (target_rate) {
+ case 10:
+ ratevalue = 0;
+ break;
+ case 20:
+ ratevalue = 1;
+ break;
+ case 55:
+ ratevalue = 2;
+ break;
+ case 60:
+ ratevalue = 3;
+ break;
+ case 90:
+ ratevalue = 4;
+ break;
+ case 110:
+ ratevalue = 5;
+ break;
+ case 120:
+ ratevalue = 6;
+ break;
+ case 180:
+ ratevalue = 7;
+ break;
+ case 240:
+ ratevalue = 8;
+ break;
+ case 360:
+ ratevalue = 9;
+ break;
+ case 480:
+ ratevalue = 10;
+ break;
+ case 540:
+ ratevalue = 11;
+ break;
+ default:
+ ratevalue = 11;
+ break;
+ }
+
+set_rate:
+
+ for (i = 0; i < NumRates; i++) {
+ if (ratevalue == mpdatarate[i]) {
+ datarates[i] = mpdatarate[i];
+ if (fixed == 0)
+ break;
+ } else {
+ datarates[i] = 0xff;
+ }
+
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_, ("datarate_inx =%d\n", datarates[i]));
+ }
+
+ if (rtw_setdatarate_cmd(padapter, datarates) != _SUCCESS) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("rtw_wx_set_rate Fail!!!\n"));
+ ret = -1;
+ }
+
+_func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u16 max_rate = 0;
+
+ max_rate = rtw_get_cur_max_rate((struct adapter *)rtw_netdev_priv(dev));
+
+ if (max_rate == 0)
+ return -EPERM;
+
+ wrqu->bitrate.fixed = 0; /* no auto select */
+ wrqu->bitrate.value = max_rate * 100000;
+
+ return 0;
+}
+
+static int rtw_wx_set_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ if (wrqu->rts.disabled) {
+ padapter->registrypriv.rts_thresh = 2347;
+ } else {
+ if (wrqu->rts.value < 0 ||
+ wrqu->rts.value > 2347)
+ return -EINVAL;
+
+ padapter->registrypriv.rts_thresh = wrqu->rts.value;
+ }
+
+ DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_rts(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ DBG_88E("%s, rts_thresh =%d\n", __func__, padapter->registrypriv.rts_thresh);
+
+ wrqu->rts.value = padapter->registrypriv.rts_thresh;
+ wrqu->rts.fixed = 0; /* no auto select */
+ /* wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); */
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_set_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ if (wrqu->frag.disabled) {
+ padapter->xmitpriv.frag_len = MAX_FRAG_THRESHOLD;
+ } else {
+ if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
+ wrqu->frag.value > MAX_FRAG_THRESHOLD)
+ return -EINVAL;
+
+ padapter->xmitpriv.frag_len = wrqu->frag.value & ~0x1;
+ }
+
+ DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_frag(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ _func_enter_;
+
+ DBG_88E("%s, frag_len =%d\n", __func__, padapter->xmitpriv.frag_len);
+
+ wrqu->frag.value = padapter->xmitpriv.frag_len;
+ wrqu->frag.fixed = 0; /* no auto select */
+
+ _func_exit_;
+
+ return 0;
+}
+
+static int rtw_wx_get_retry(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->retry.value = 7;
+ wrqu->retry.fixed = 0; /* no auto select */
+ wrqu->retry.disabled = 1;
+
+ return 0;
+}
+
+static int rtw_wx_set_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ u32 key, ret = 0;
+ u32 keyindex_provided;
+ struct ndis_802_11_wep wep;
+ enum ndis_802_11_auth_mode authmode;
+
+ struct iw_point *erq = &(wrqu->encoding);
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ DBG_88E("+rtw_wx_set_enc, flags = 0x%x\n", erq->flags);
+
+ _rtw_memset(&wep, 0, sizeof(struct ndis_802_11_wep));
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ _func_enter_;
+
+ if (erq->flags & IW_ENCODE_DISABLED) {
+ DBG_88E("EncryptionDisabled\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+
+ goto exit;
+ }
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ keyindex_provided = 1;
+ } else {
+ keyindex_provided = 0;
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ DBG_88E("rtw_wx_set_enc, key =%d\n", key);
+ }
+
+ /* set authentication mode */
+ if (erq->flags & IW_ENCODE_OPEN) {
+ DBG_88E("rtw_wx_set_enc():IW_ENCODE_OPEN\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+ } else if (erq->flags & IW_ENCODE_RESTRICTED) {
+ DBG_88E("rtw_wx_set_enc():IW_ENCODE_RESTRICTED\n");
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Shared;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _WEP40_;
+ authmode = Ndis802_11AuthModeShared;
+ padapter->securitypriv.ndisauthtype = authmode;
+ } else {
+ DBG_88E("rtw_wx_set_enc():erq->flags = 0x%x\n", erq->flags);
+
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption1Enabled;/* Ndis802_11EncryptionDisabled; */
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ authmode = Ndis802_11AuthModeOpen;
+ padapter->securitypriv.ndisauthtype = authmode;
+ }
+
+ wep.KeyIndex = key;
+ if (erq->length > 0) {
+ wep.KeyLength = erq->length <= 5 ? 5 : 13;
+
+ wep.Length = wep.KeyLength + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ } else {
+ wep.KeyLength = 0;
+
+ if (keyindex_provided == 1) {
+ /* set key_id only, no given KeyMaterial(erq->length == 0). */
+ padapter->securitypriv.dot11PrivacyKeyIndex = key;
+
+ DBG_88E("(keyindex_provided == 1), keyid =%d, key_len =%d\n", key, padapter->securitypriv.dot11DefKeylen[key]);
+
+ switch (padapter->securitypriv.dot11DefKeylen[key]) {
+ case 5:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
+ break;
+ case 13:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
+ break;
+ default:
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ break;
+ }
+
+ goto exit;
+ }
+ }
+
+ wep.KeyIndex |= 0x80000000;
+
+ memcpy(wep.KeyMaterial, keybuf, wep.KeyLength);
+
+ if (rtw_set_802_11_add_wep(padapter, &wep) == false) {
+ if (rf_on == pwrpriv->rf_pwrstate)
+ ret = -EOPNOTSUPP;
+ goto exit;
+ }
+
+exit:
+
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_enc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *keybuf)
+{
+ uint key, ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *erq = &(wrqu->encoding);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ _func_enter_;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED) != true) {
+ if (!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ return 0;
+ }
+ }
+
+ key = erq->flags & IW_ENCODE_INDEX;
+
+ if (key) {
+ if (key > WEP_KEYS)
+ return -EINVAL;
+ key--;
+ } else {
+ key = padapter->securitypriv.dot11PrivacyKeyIndex;
+ }
+
+ erq->flags = key + 1;
+
+ switch (padapter->securitypriv.ndisencryptstatus) {
+ case Ndis802_11EncryptionNotSupported:
+ case Ndis802_11EncryptionDisabled:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ case Ndis802_11Encryption1Enabled:
+ erq->length = padapter->securitypriv.dot11DefKeylen[key];
+ if (erq->length) {
+ memcpy(keybuf, padapter->securitypriv.dot11DefKey[key].skey, padapter->securitypriv.dot11DefKeylen[key]);
+
+ erq->flags |= IW_ENCODE_ENABLED;
+
+ if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeOpen)
+ erq->flags |= IW_ENCODE_OPEN;
+ else if (padapter->securitypriv.ndisauthtype == Ndis802_11AuthModeShared)
+ erq->flags |= IW_ENCODE_RESTRICTED;
+ } else {
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ }
+ break;
+ case Ndis802_11Encryption2Enabled:
+ case Ndis802_11Encryption3Enabled:
+ erq->length = 16;
+ erq->flags |= (IW_ENCODE_ENABLED | IW_ENCODE_OPEN | IW_ENCODE_NOKEY);
+ break;
+ default:
+ erq->length = 0;
+ erq->flags |= IW_ENCODE_DISABLED;
+ break;
+ }
+ _func_exit_;
+
+ return ret;
+}
+
+static int rtw_wx_get_power(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ wrqu->power.value = 0;
+ wrqu->power.fixed = 0; /* no auto select */
+ wrqu->power.disabled = 1;
+
+ return 0;
+}
+
+static int rtw_wx_set_gen_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ ret = rtw_set_wpa_ie(padapter, extra, wrqu->data.length);
+ return ret;
+}
+
+static int rtw_wx_set_auth(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_param *param = (struct iw_param *)&(wrqu->param);
+ int ret = 0;
+
+ switch (param->flags & IW_AUTH_INDEX) {
+ case IW_AUTH_WPA_VERSION:
+ break;
+ case IW_AUTH_CIPHER_PAIRWISE:
+
+ break;
+ case IW_AUTH_CIPHER_GROUP:
+
+ break;
+ case IW_AUTH_KEY_MGMT:
+ /*
+ * ??? does not use these parameters
+ */
+ break;
+ case IW_AUTH_TKIP_COUNTERMEASURES:
+ if (param->value) {
+ /* wpa_supplicant is enabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = true;
+ } else {
+ /* wpa_supplicant is disabling the tkip countermeasure. */
+ padapter->securitypriv.btkip_countermeasure = false;
+ }
+ break;
+ case IW_AUTH_DROP_UNENCRYPTED:
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+
+ if (padapter->securitypriv.ndisencryptstatus == Ndis802_11Encryption1Enabled)
+ break;/* it means init value, or using wep, ndisencryptstatus = Ndis802_11Encryption1Enabled, */
+ /* then it needn't reset it; */
+
+ if (param->value) {
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11EncryptionDisabled;
+ padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ padapter->securitypriv.dot118021XGrpPrivacy = _NO_PRIVACY_;
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ }
+
+ break;
+ case IW_AUTH_80211_AUTH_ALG:
+ /*
+ * It's the starting point of a link layer connection using wpa_supplicant
+ */
+ if (check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ DBG_88E("%s...call rtw_indicate_disconnect\n ", __func__);
+ rtw_indicate_disconnect(padapter);
+ rtw_free_assoc_resources(padapter, 1);
+ }
+ ret = wpa_set_auth_algs(dev, (u32)param->value);
+ break;
+ case IW_AUTH_WPA_ENABLED:
+ break;
+ case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+ break;
+ case IW_AUTH_PRIVACY_INVOKED:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int rtw_wx_set_enc_ext(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ char *alg_name;
+ u32 param_len;
+ struct ieee_param *param = NULL;
+ struct iw_point *pencoding = &wrqu->encoding;
+ struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
+ int ret = 0;
+
+ param_len = sizeof(struct ieee_param) + pext->key_len;
+ param = (struct ieee_param *)rtw_malloc(param_len);
+ if (param == NULL)
+ return -1;
+
+ _rtw_memset(param, 0, param_len);
+
+ param->cmd = IEEE_CMD_SET_ENCRYPTION;
+ _rtw_memset(param->sta_addr, 0xff, ETH_ALEN);
+
+ switch (pext->alg) {
+ case IW_ENCODE_ALG_NONE:
+ /* todo: remove key */
+ /* remove = 1; */
+ alg_name = "none";
+ break;
+ case IW_ENCODE_ALG_WEP:
+ alg_name = "WEP";
+ break;
+ case IW_ENCODE_ALG_TKIP:
+ alg_name = "TKIP";
+ break;
+ case IW_ENCODE_ALG_CCMP:
+ alg_name = "CCMP";
+ break;
+ default:
+ return -1;
+ }
+
+ strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+
+ if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+ param->u.crypt.set_tx = 1;
+
+ /* cliW: WEP does not have group key
+ * just not checking GROUP key setting
+ */
+ if ((pext->alg != IW_ENCODE_ALG_WEP) &&
+ (pext->ext_flags & IW_ENCODE_EXT_GROUP_KEY))
+ param->u.crypt.set_tx = 0;
+
+ param->u.crypt.idx = (pencoding->flags&0x00FF) - 1;
+
+ if (pext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID)
+ memcpy(param->u.crypt.seq, pext->rx_seq, 8);
+
+ if (pext->key_len) {
+ param->u.crypt.key_len = pext->key_len;
+ memcpy(param->u.crypt.key, pext + 1, pext->key_len);
+ }
+
+ ret = wpa_set_encryption(dev, param, param_len);
+
+ kfree(param);
+ return ret;
+}
+
+static int rtw_wx_get_nick(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ if (extra) {
+ wrqu->data.length = 14;
+ wrqu->data.flags = 1;
+ memcpy(extra, "<WIFI@REALTEK>", 14);
+ }
+
+ /* dump debug info here */
+ return 0;
+}
+
+static int rtw_wx_read32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter;
+ struct iw_point *p;
+ u16 len;
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+ u8 *ptmp;
+
+ padapter = (struct adapter *)rtw_netdev_priv(dev);
+ p = &wrqu->data;
+ len = p->length;
+ ptmp = (u8 *)rtw_malloc(len);
+ if (NULL == ptmp)
+ return -ENOMEM;
+
+ if (copy_from_user(ptmp, p->pointer, len)) {
+ kfree(ptmp);
+ return -EFAULT;
+ }
+
+ bytes = 0;
+ addr = 0;
+ sscanf(ptmp, "%d,%x", &bytes, &addr);
+
+ switch (bytes) {
+ case 1:
+ data32 = rtw_read8(padapter, addr);
+ sprintf(extra, "0x%02X", data32);
+ break;
+ case 2:
+ data32 = rtw_read16(padapter, addr);
+ sprintf(extra, "0x%04X", data32);
+ break;
+ case 4:
+ data32 = rtw_read32(padapter, addr);
+ sprintf(extra, "0x%08X", data32);
+ break;
+ default:
+ DBG_88E(KERN_INFO "%s: usage> read [bytes],[address(hex)]\n", __func__);
+ return -EINVAL;
+ }
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data =%s\n", __func__, addr, extra);
+
+ kfree(ptmp);
+ return 0;
+}
+
+static int rtw_wx_write32(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ u32 addr;
+ u32 data32;
+ u32 bytes;
+
+ bytes = 0;
+ addr = 0;
+ data32 = 0;
+ sscanf(extra, "%d,%x,%x", &bytes, &addr, &data32);
+
+ switch (bytes) {
+ case 1:
+ rtw_write8(padapter, addr, (u8)data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%02X\n", __func__, addr, (u8)data32);
+ break;
+ case 2:
+ rtw_write16(padapter, addr, (u16)data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%04X\n", __func__, addr, (u16)data32);
+ break;
+ case 4:
+ rtw_write32(padapter, addr, data32);
+ DBG_88E(KERN_INFO "%s: addr = 0x%08X data = 0x%08X\n", __func__, addr, data32);
+ break;
+ default:
+ DBG_88E(KERN_INFO "%s: usage> write [bytes],[address(hex)],[data(hex)]\n", __func__);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw_wx_read_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+ path = *(u32 *)extra;
+ addr = *((u32 *)extra + 1);
+ data32 = rtw_hal_read_rfreg(padapter, path, addr, 0xFFFFF);
+ /*
+ * IMPORTANT!!
+ * Only when wireless private ioctl is at odd order,
+ * "extra" would be copied to user space.
+ */
+ sprintf(extra, "0x%05x", data32);
+
+ return 0;
+}
+
+static int rtw_wx_write_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u32 path, addr, data32;
+
+ path = *(u32 *)extra;
+ addr = *((u32 *)extra + 1);
+ data32 = *((u32 *)extra + 2);
+ rtw_hal_write_rfreg(padapter, path, addr, 0xFFFFF, data32);
+
+ return 0;
+}
+
+static int rtw_wx_priv_null(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return -1;
+}
+
+static int dummy(struct net_device *dev, struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return -1;
+}
+
+static int rtw_wx_set_channel_plan(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ u8 channel_plan_req = (u8) (*((int *)wrqu));
+
+ if (_SUCCESS == rtw_set_chplan_cmd(padapter, channel_plan_req, 1))
+ DBG_88E("%s set channel_plan = 0x%02X\n", __func__, pmlmepriv->ChannelPlan);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_probe_ie(struct net_device *dev,
+ struct iw_request_info *a,
+ union iwreq_data *wrqu, char *b)
+{
+ return 0;
+}
+
+static int rtw_wx_get_sensitivity(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *buf)
+{
+ return 0;
+}
+
+static int rtw_wx_set_mtk_wps_ie(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+/*
+ * For all data larger than 16 octets, we need to use a
+ * pointer to memory allocated in user space.
+ */
+static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static void rtw_dbg_mode_hdl(struct adapter *padapter, u32 id, u8 *pdata, u32 len)
+{
+ struct mp_rw_reg *RegRWStruct;
+ struct rf_reg_param *prfreg;
+ u8 path;
+ u8 offset;
+ u32 value;
+
+ DBG_88E("%s\n", __func__);
+
+ switch (id) {
+ case GEN_MP_IOCTL_SUBCODE(MP_START):
+ DBG_88E("871x_driver is only for normal mode, can't enter mp mode\n");
+ break;
+ case GEN_MP_IOCTL_SUBCODE(READ_REG):
+ RegRWStruct = (struct mp_rw_reg *)pdata;
+ switch (RegRWStruct->width) {
+ case 1:
+ RegRWStruct->value = rtw_read8(padapter, RegRWStruct->offset);
+ break;
+ case 2:
+ RegRWStruct->value = rtw_read16(padapter, RegRWStruct->offset);
+ break;
+ case 4:
+ RegRWStruct->value = rtw_read32(padapter, RegRWStruct->offset);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(WRITE_REG):
+ RegRWStruct = (struct mp_rw_reg *)pdata;
+ switch (RegRWStruct->width) {
+ case 1:
+ rtw_write8(padapter, RegRWStruct->offset, (u8)RegRWStruct->value);
+ break;
+ case 2:
+ rtw_write16(padapter, RegRWStruct->offset, (u16)RegRWStruct->value);
+ break;
+ case 4:
+ rtw_write32(padapter, RegRWStruct->offset, (u32)RegRWStruct->value);
+ break;
+ default:
+ break;
+ }
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(READ_RF_REG):
+
+ prfreg = (struct rf_reg_param *)pdata;
+
+ path = (u8)prfreg->path;
+ offset = (u8)prfreg->offset;
+
+ value = rtw_hal_read_rfreg(padapter, path, offset, 0xffffffff);
+
+ prfreg->value = value;
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG):
+
+ prfreg = (struct rf_reg_param *)pdata;
+
+ path = (u8)prfreg->path;
+ offset = (u8)prfreg->offset;
+ value = prfreg->value;
+
+ rtw_hal_write_rfreg(padapter, path, offset, 0xffffffff, value);
+
+ break;
+ case GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO):
+ DBG_88E("==> trigger gpio 0\n");
+ rtw_hal_set_hwreg(padapter, HW_VAR_TRIGGER_GPIO_0, NULL);
+ break;
+ case GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS):
+ *pdata = rtw_hal_sreset_get_wifi_status(padapter);
+ break;
+ default:
+ break;
+ }
+}
+
+static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u32 BytesRead, BytesWritten, BytesNeeded;
+ struct oid_par_priv oid_par;
+ struct mp_ioctl_handler *phandler;
+ struct mp_ioctl_param *poidparam;
+ uint status = 0;
+ u16 len;
+ u8 *pparmbuf = NULL, bset;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *p = &wrqu->data;
+
+ if ((!p->length) || (!p->pointer)) {
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+ pparmbuf = NULL;
+ bset = (u8)(p->flags & 0xFFFF);
+ len = p->length;
+ pparmbuf = (u8 *)rtw_malloc(len);
+ if (pparmbuf == NULL) {
+ ret = -ENOMEM;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (copy_from_user(pparmbuf, p->pointer, len)) {
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ poidparam = (struct mp_ioctl_param *)pparmbuf;
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
+ poidparam->subcode, poidparam->len, len));
+
+ if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ phandler = mp_ioctl_hdl + poidparam->subcode;
+
+ if ((phandler->paramsize != 0) && (poidparam->len < phandler->paramsize)) {
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_,
+ ("no matching drvext param size %d vs %d\r\n",
+ poidparam->len, phandler->paramsize));
+ ret = -EINVAL;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+ if (phandler->handler) {
+ oid_par.adapter_context = padapter;
+ oid_par.oid = phandler->oid;
+ oid_par.information_buf = poidparam->data;
+ oid_par.information_buf_len = poidparam->len;
+ oid_par.dbg = 0;
+
+ BytesWritten = 0;
+ BytesNeeded = 0;
+
+ if (bset) {
+ oid_par.bytes_rw = &BytesRead;
+ oid_par.bytes_needed = &BytesNeeded;
+ oid_par.type_of_oid = SET_OID;
+ } else {
+ oid_par.bytes_rw = &BytesWritten;
+ oid_par.bytes_needed = &BytesNeeded;
+ oid_par.type_of_oid = QUERY_OID;
+ }
+
+ status = phandler->handler(&oid_par);
+ } else {
+ DBG_88E("rtw_mp_ioctl_hdl(): err!, subcode =%d, oid =%d, handler =%p\n",
+ poidparam->subcode, phandler->oid, phandler->handler);
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+ } else {
+ rtw_dbg_mode_hdl(padapter, poidparam->subcode, poidparam->data, poidparam->len);
+ }
+
+ if (bset == 0x00) {/* query info */
+ if (copy_to_user(p->pointer, pparmbuf, len))
+ ret = -EFAULT;
+ }
+
+ if (status) {
+ ret = -EFAULT;
+ goto _rtw_mp_ioctl_hdl_exit;
+ }
+
+_rtw_mp_ioctl_hdl_exit:
+
+ kfree(pparmbuf);
+ return ret;
+}
+
+static int rtw_get_ap_info(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ u32 cnt = 0, wpa_ielen;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ unsigned char *pbuf;
+ u8 bssid[ETH_ALEN];
+ char data[32];
+ struct wlan_network *pnetwork = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct iw_point *pdata = &wrqu->data;
+
+ DBG_88E("+rtw_get_aplist_info\n");
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ while ((check_fwstate(pmlmepriv, (_FW_UNDER_SURVEY|_FW_UNDER_LINKING)))) {
+ rtw_msleep_os(30);
+ cnt++;
+ if (cnt > 100)
+ break;
+ }
+ pdata->flags = 0;
+ if (pdata->length >= 32) {
+ if (copy_from_user(data, pdata->pointer, 32)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ if (hwaddr_aton_i(data, bssid)) {
+ DBG_88E("Invalid BSSID '%s'.\n", (u8 *)data);
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+ return -EINVAL;
+ }
+
+ if (!memcmp(bssid, pnetwork->network.MacAddress, ETH_ALEN) == true) {
+ /* BSSID match, then check if supporting wpa/wpa2 */
+ DBG_88E("BSSID:%pM\n", (bssid));
+
+ pbuf = rtw_get_wpa_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ pdata->flags = 1;
+ break;
+ }
+
+ pbuf = rtw_get_wpa2_ie(&pnetwork->network.IEs[12], &wpa_ielen, pnetwork->network.IELength-12);
+ if (pbuf && (wpa_ielen > 0)) {
+ pdata->flags = 2;
+ break;
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (pdata->length >= 34) {
+ if (copy_to_user(pdata->pointer+32, (u8 *)&pdata->flags, 1)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ }
+
+exit:
+
+ return ret;
+}
+
+static int rtw_set_pid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ int *pdata = (int *)wrqu;
+ int selector;
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ selector = *pdata;
+ if (selector < 3 && selector >= 0) {
+ padapter->pid[selector] = *(pdata+1);
+ ui_pid[selector] = *(pdata+1);
+ DBG_88E("%s set pid[%d] =%d\n", __func__, selector, padapter->pid[selector]);
+ } else {
+ DBG_88E("%s selector %d error\n", __func__, selector);
+ }
+exit:
+ return ret;
+}
+
+static int rtw_wps_start(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *pdata = &wrqu->data;
+ u32 u32wps_start = 0;
+
+ ret = copy_from_user((void *)&u32wps_start, pdata->pointer, 4);
+ if (ret) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if ((padapter->bDriverStopped) || (pdata == NULL)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (u32wps_start == 0)
+ u32wps_start = *extra;
+
+ DBG_88E("[%s] wps_start = %d\n", __func__, u32wps_start);
+
+ if (u32wps_start == 1) /* WPS Start */
+ rtw_led_control(padapter, LED_CTL_START_WPS);
+ else if (u32wps_start == 2) /* WPS Stop because of wps success */
+ rtw_led_control(padapter, LED_CTL_STOP_WPS);
+ else if (u32wps_start == 3) /* WPS Stop because of wps fail */
+ rtw_led_control(padapter, LED_CTL_STOP_WPS_FAIL);
+
+exit:
+ return ret;
+}
+
+#ifdef CONFIG_88EU_P2P
+static int rtw_wext_p2p_enable(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ enum P2P_ROLE init_role = P2P_ROLE_DISABLE;
+
+ if (*extra == '0')
+ init_role = P2P_ROLE_DISABLE;
+ else if (*extra == '1')
+ init_role = P2P_ROLE_DEVICE;
+ else if (*extra == '2')
+ init_role = P2P_ROLE_CLIENT;
+ else if (*extra == '3')
+ init_role = P2P_ROLE_GO;
+
+ if (_FAIL == rtw_p2p_enable(padapter, init_role)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ /* set channel/bandwidth */
+ if (init_role != P2P_ROLE_DISABLE) {
+ u8 channel, ch_offset;
+ u16 bwmode;
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN)) {
+ /* Stay at the listen state and wait for discovery. */
+ channel = pwdinfo->listen_channel;
+ pwdinfo->operating_channel = pwdinfo->listen_channel;
+ ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
+ bwmode = HT_CHANNEL_WIDTH_20;
+ } else {
+ pwdinfo->operating_channel = pmlmeext->cur_channel;
+
+ channel = pwdinfo->operating_channel;
+ ch_offset = pmlmeext->cur_ch_offset;
+ bwmode = pmlmeext->cur_bwmode;
+ }
+
+ set_channel_bwmode(padapter, channel, ch_offset, bwmode);
+ }
+
+exit:
+ return ret;
+}
+
+static int rtw_p2p_set_go_nego_ssid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] ssid = %s, len = %zu\n", __func__, extra, strlen(extra));
+ memcpy(pwdinfo->nego_ssid, extra, strlen(extra));
+ pwdinfo->nego_ssidlen = strlen(extra);
+
+ return ret;
+}
+
+static int rtw_p2p_set_intent(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 intent = pwdinfo->intent;
+
+ switch (wrqu->data.length) {
+ case 1:
+ intent = extra[0] - '0';
+ break;
+ case 2:
+ intent = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+ if (intent <= 15)
+ pwdinfo->intent = intent;
+ else
+ ret = -1;
+ DBG_88E("[%s] intent = %d\n", __func__, intent);
+ return ret;
+}
+
+static int rtw_p2p_set_listen_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 listen_ch = pwdinfo->listen_channel; /* Listen channel number */
+
+ switch (wrqu->data.length) {
+ case 1:
+ listen_ch = extra[0] - '0';
+ break;
+ case 2:
+ listen_ch = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+
+ if ((listen_ch == 1) || (listen_ch == 6) || (listen_ch == 11)) {
+ pwdinfo->listen_channel = listen_ch;
+ set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+ } else {
+ ret = -1;
+ }
+
+ DBG_88E("[%s] listen_ch = %d\n", __func__, pwdinfo->listen_channel);
+
+ return ret;
+}
+
+static int rtw_p2p_set_op_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+/* Commented by Albert 20110524 */
+/* This function is used to set the operating channel if the driver will become the group owner */
+
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 op_ch = pwdinfo->operating_channel; /* Operating channel number */
+
+ switch (wrqu->data.length) {
+ case 1:
+ op_ch = extra[0] - '0';
+ break;
+ case 2:
+ op_ch = str_2char2num(extra[0], extra[1]);
+ break;
+ }
+
+ if (op_ch > 0)
+ pwdinfo->operating_channel = op_ch;
+ else
+ ret = -1;
+
+ DBG_88E("[%s] op_ch = %d\n", __func__, pwdinfo->operating_channel);
+
+ return ret;
+}
+
+static int rtw_p2p_profilefound(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ /* Comment by Albert 2010/10/13 */
+ /* Input data format: */
+ /* Ex: 0 */
+ /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
+ /* 0 => Reflush the profile record list. */
+ /* 1 => Add the profile list */
+ /* XX:XX:XX:XX:XX:XX => peer's MAC Address (ex: 00:E0:4C:00:00:01) */
+ /* YY => SSID Length */
+ /* SSID => SSID for persistence group */
+
+ DBG_88E("[%s] In value = %s, len = %d\n", __func__, extra, wrqu->data.length - 1);
+
+ /* The upper application should pass the SSID to driver by using this rtw_p2p_profilefound function. */
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ if (extra[0] == '0') {
+ /* Remove all the profile information of wifidirect_info structure. */
+ _rtw_memset(&pwdinfo->profileinfo[0], 0x00, sizeof(struct profile_info) * P2P_MAX_PERSISTENT_GROUP_NUM);
+ pwdinfo->profileindex = 0;
+ } else {
+ if (pwdinfo->profileindex >= P2P_MAX_PERSISTENT_GROUP_NUM) {
+ ret = -1;
+ } else {
+ int jj, kk;
+
+ /* Add this profile information into pwdinfo->profileinfo */
+ /* Ex: 1XX:XX:XX:XX:XX:XXYYSSID */
+ for (jj = 0, kk = 1; jj < ETH_ALEN; jj++, kk += 3)
+ pwdinfo->profileinfo[pwdinfo->profileindex].peermac[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen = (extra[18] - '0') * 10 + (extra[19] - '0');
+ memcpy(pwdinfo->profileinfo[pwdinfo->profileindex].ssid, &extra[20], pwdinfo->profileinfo[pwdinfo->profileindex].ssidlen);
+ pwdinfo->profileindex++;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int rtw_p2p_setDN(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] %s %d\n", __func__, extra, wrqu->data.length - 1);
+ _rtw_memset(pwdinfo->device_name, 0x00, WPS_MAX_DEVICE_NAME_LEN);
+ memcpy(pwdinfo->device_name, extra, wrqu->data.length - 1);
+ pwdinfo->device_name_len = wrqu->data.length - 1;
+
+ return ret;
+}
+
+static int rtw_p2p_get_status(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ if (padapter->bShowGetP2PState)
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2],
+ pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+
+ /* Commented by Albert 2010/10/12 */
+ /* Because of the output size limitation, I had removed the "Role" information. */
+ /* About the "Role" information, we will use the new private IOCTL to get the "Role" information. */
+ sprintf(extra, "\n\nStatus =%.2d\n", rtw_p2p_state(pwdinfo));
+ wrqu->data.length = strlen(extra);
+
+ return ret;
+}
+
+/* Commented by Albert 20110520 */
+/* This function will return the config method description */
+/* This config method description will show us which config method the remote P2P device is intented to use */
+/* by sending the provisioning discovery request frame. */
+
+static int rtw_p2p_get_req_cm(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ sprintf(extra, "\n\nCM =%s\n", pwdinfo->rx_prov_disc_info.strconfig_method_desc_of_prov_disc_req);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_role(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %.2X:%.2X:%.2X:%.2X:%.2X:%.2X\n", __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr[0], pwdinfo->p2p_peer_interface_addr[1], pwdinfo->p2p_peer_interface_addr[2],
+ pwdinfo->p2p_peer_interface_addr[3], pwdinfo->p2p_peer_interface_addr[4], pwdinfo->p2p_peer_interface_addr[5]);
+
+ sprintf(extra, "\n\nRole =%.2d\n", rtw_p2p_role(pwdinfo));
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_ifaddr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_interface_addr);
+ sprintf(extra, "\nMAC %pM",
+ pwdinfo->p2p_peer_interface_addr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_devaddr(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n", __func__,
+ rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->rx_prov_disc_info.peerDevAddr);
+ sprintf(extra, "\n%pM",
+ pwdinfo->rx_prov_disc_info.peerDevAddr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_peer_devaddr_by_invitation(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Role = %d, Status = %d, peer addr = %pM\n",
+ __func__, rtw_p2p_role(pwdinfo), rtw_p2p_state(pwdinfo),
+ pwdinfo->p2p_peer_device_addr);
+ sprintf(extra, "\nMAC %pM",
+ pwdinfo->p2p_peer_device_addr);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_groupid(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ sprintf(extra, "\n%.2X:%.2X:%.2X:%.2X:%.2X:%.2X %s",
+ pwdinfo->groupid_info.go_device_addr[0], pwdinfo->groupid_info.go_device_addr[1],
+ pwdinfo->groupid_info.go_device_addr[2], pwdinfo->groupid_info.go_device_addr[3],
+ pwdinfo->groupid_info.go_device_addr[4], pwdinfo->groupid_info.go_device_addr[5],
+ pwdinfo->groupid_info.ssid);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_op_ch(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] Op_ch = %02x\n", __func__, pwdinfo->operating_channel);
+
+ sprintf(extra, "\n\nOp_ch =%.2d\n", pwdinfo->operating_channel);
+ wrqu->data.length = strlen(extra);
+ return ret;
+}
+
+static int rtw_p2p_get_wps_configmethod(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u16 attr_content = 0;
+ uint attr_contentlen = 0;
+ /* 6 is the string "wpsCM =", 17 is the MAC addr, we have to clear it at wrqu->data.pointer */
+ u8 attr_content_str[6 + 17] = {0x00};
+
+ /* Commented by Albert 20110727 */
+ /* The input data is the MAC address which the application wants to know its WPS config method. */
+ /* After knowing its WPS config method, the application can decide the config method for provisioning discovery. */
+ /* Format: iwpriv wlanx p2p_get_wpsCM 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 6, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+ __be16 be_tmp;
+
+ /* The mac address is matched. */
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_CONF_METHOD, (u8 *) &be_tmp, &attr_contentlen);
+ if (attr_contentlen) {
+ attr_content = be16_to_cpu(be_tmp);
+ sprintf(attr_content_str, "\n\nM =%.4d", attr_content);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(attr_content_str, "\n\nM = 0000");
+
+ if (copy_to_user(wrqu->data.pointer, attr_content_str, 6 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_go_device_address(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ u8 attr_content[100] = {0x00};
+
+ u8 go_devadd_str[17 + 10] = {0x00};
+ /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121209 */
+ /* The input data is the GO's interface address which the application wants to know its device address. */
+ /* Format: iwpriv wlanx p2p_get2 go_devadd = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 10, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ _rtw_memset(attr_content, 0x00, 100);
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ blnMatch = 1;
+ break;
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ blnMatch = 1;
+ break;
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ else
+ sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
+
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_device_type(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 dev_type[8] = {0x00};
+ uint dev_type_len = 0;
+ u8 dev_type_str[17 + 9] = {0x00}; /* +9 is for the str "dev_type =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121209 */
+ /* The input data is the MAC address which the application wants to know its device type. */
+ /* Such user interface could know the device type. */
+ /* Format: iwpriv wlanx p2p_get2 dev_type = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 9, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+
+ /* The mac address is matched. */
+
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12],
+ pnetwork->network.IELength - 12,
+ NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_PRIMARY_DEV_TYPE, dev_type, &dev_type_len);
+ if (dev_type_len) {
+ u16 type = 0;
+ __be16 be_tmp;
+
+ memcpy(&be_tmp, dev_type, 2);
+ type = be16_to_cpu(be_tmp);
+ sprintf(dev_type_str, "\n\nN =%.2d", type);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(dev_type_str, "\n\nN = 00");
+
+ if (copy_to_user(wrqu->data.pointer, dev_type_str, 9 + 17)) {
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+static int rtw_p2p_get_device_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 dev_name[WPS_MAX_DEVICE_NAME_LEN] = {0x00};
+ uint dev_len = 0;
+ u8 dev_name_str[WPS_MAX_DEVICE_NAME_LEN + 5] = {0x00}; /* +5 is for the str "devN =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Albert 20121225 */
+ /* The input data is the MAC address which the application wants to know its device name. */
+ /* Such user interface could show peer device's device name instead of ssid. */
+ /* Format: iwpriv wlanx p2p_get2 devN = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 5, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ u8 *wpsie;
+ uint wpsie_len = 0;
+
+ /* The mac address is matched. */
+ wpsie = rtw_get_wps_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &wpsie_len);
+ if (wpsie) {
+ rtw_get_wps_attr_content(wpsie, wpsie_len, WPS_ATTR_DEVICE_NAME, dev_name, &dev_len);
+ if (dev_len) {
+ sprintf(dev_name_str, "\n\nN =%s", dev_name);
+ blnMatch = 1;
+ }
+ }
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch)
+ sprintf(dev_name_str, "\n\nN = 0000");
+
+ if (copy_to_user(wrqu->data.pointer, dev_name_str, 5 + ((dev_len > 17) ? dev_len : 17)))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_get_invitation_procedure(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ u8 peerMACStr[17] = {0x00};
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ u8 blnMatch = 0;
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ u8 attr_content[2] = {0x00};
+
+ u8 inv_proc_str[17 + 8] = {0x00};
+ /* +8 is for the str "InvProc =", we have to clear it at wrqu->data.pointer */
+
+ /* Commented by Ouden 20121226 */
+ /* The application wants to know P2P initation procedure is support or not. */
+ /* Format: iwpriv wlanx p2p_get2 InvProc = 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, (char *)extra);
+ if (copy_from_user(peerMACStr, wrqu->data.pointer + 8, 17))
+ return -EFAULT;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(peerMACStr[kk], peerMACStr[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ /* Commented by Albert 20121226 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_CAPABILITY, attr_content, &attr_contentlen)) {
+ /* Handle the P2P capability attribute */
+ blnMatch = 1;
+ break;
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (!blnMatch) {
+ sprintf(inv_proc_str, "\nIP =-1");
+ } else {
+ if (attr_content[0] & 0x20)
+ sprintf(inv_proc_str, "\nIP = 1");
+ else
+ sprintf(inv_proc_str, "\nIP = 0");
+ }
+ if (copy_to_user(wrqu->data.pointer, inv_proc_str, 8 + 17))
+ return -EFAULT;
+ return ret;
+}
+
+static int rtw_p2p_connect(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ unsigned long irqL;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+
+ /* Commented by Albert 20110304 */
+ /* The input data contains two informations. */
+ /* 1. First information is the MAC address which wants to formate with */
+ /* 2. Second information is the WPS PINCode or "pbc" string for push button method */
+ /* Format: 00:E0:4C:00:00:05 */
+ /* Format: 00:E0:4C:00:00:05 */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (pwdinfo->p2p_state == P2P_STATE_NONE) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ }
+
+ if (pwdinfo->ui_got_wps_info == P2P_NO_WPSINFO)
+ return -1;
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ if (!memcmp(pnetwork->network.MacAddress, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ _rtw_memset(&pwdinfo->nego_req_info, 0x00, sizeof(struct tx_nego_req_info));
+ _rtw_memset(&pwdinfo->groupid_info, 0x00, sizeof(struct group_id_info));
+
+ pwdinfo->nego_req_info.peer_channel_num[0] = uintPeerChannel;
+ memcpy(pwdinfo->nego_req_info.peerDevAddr, pnetwork->network.MacAddress, ETH_ALEN);
+ pwdinfo->nego_req_info.benable = true;
+
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ if (rtw_p2p_state(pwdinfo) != P2P_STATE_GONEGO_OK) {
+ /* Restore to the listen state if the current p2p state is not nego OK */
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_LISTEN);
+ }
+
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_GONEGO_ING);
+
+ DBG_88E("[%s] Start PreTx Procedure!\n", __func__);
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_GO_NEGO_TIMEOUT);
+ } else {
+ DBG_88E("[%s] Not Found in Scanning Queue~\n", __func__);
+ ret = -1;
+ }
+ return ret;
+}
+
+static int rtw_p2p_invite_req(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+ u8 attr_content[50] = {0x00};
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ unsigned long irqL;
+ struct tx_invite_req_info *pinvite_req_info = &pwdinfo->invitereq_info;
+
+ /* The input data contains two informations. */
+ /* 1. First information is the P2P device address which you want to send to. */
+ /* 2. Second information is the group id which combines with GO's mac address, space and GO's ssid. */
+ /* Command line sample: iwpriv wlan0 p2p_set invite ="00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy" */
+ /* Format: 00:11:22:33:44:55 00:E0:4C:00:00:05 DIRECT-xy */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (wrqu->data.length <= 37) {
+ DBG_88E("[%s] Wrong format!\n", __func__);
+ return ret;
+ }
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ /* Reset the content of struct tx_invite_req_info */
+ pinvite_req_info->benable = false;
+ _rtw_memset(pinvite_req_info->go_bssid, 0x00, ETH_ALEN);
+ _rtw_memset(pinvite_req_info->go_ssid, 0x00, WLAN_SSID_MAXLEN);
+ pinvite_req_info->ssidlen = 0x00;
+ pinvite_req_info->operating_ch = pwdinfo->operating_channel;
+ _rtw_memset(pinvite_req_info->peer_macaddr, 0x00, ETH_ALEN);
+ pinvite_req_info->token = 3;
+ }
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ pinvite_req_info->peer_macaddr[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ if (!memcmp(attr_content, pinvite_req_info->peer_macaddr, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ }
+ }
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ /* Store the GO's bssid */
+ for (jj = 0, kk = 18; jj < ETH_ALEN; jj++, kk += 3)
+ pinvite_req_info->go_bssid[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ /* Store the GO's ssid */
+ pinvite_req_info->ssidlen = wrqu->data.length - 36;
+ memcpy(pinvite_req_info->go_ssid, &extra[36], (u32) pinvite_req_info->ssidlen);
+ pinvite_req_info->benable = true;
+ pinvite_req_info->peer_ch = uintPeerChannel;
+
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_INVITE_REQ);
+
+ set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_INVITE_TIMEOUT);
+ } else {
+ DBG_88E("[%s] NOT Found in the Scanning Queue!\n", __func__);
+ }
+ return ret;
+}
+
+static int rtw_p2p_set_persistent(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ /* The input data is 0 or 1 */
+ /* 0: disable persistent group functionality */
+ /* 1: enable persistent group founctionality */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ if (extra[0] == '0') /* Disable the persistent group function. */
+ pwdinfo->persistent_supported = false;
+ else if (extra[0] == '1') /* Enable the persistent group function. */
+ pwdinfo->persistent_supported = true;
+ else
+ pwdinfo->persistent_supported = false;
+ }
+ pr_info("[%s] persistent_supported = %d\n", __func__, pwdinfo->persistent_supported);
+ return ret;
+}
+
+static int rtw_p2p_prov_disc(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+ u8 peerMAC[ETH_ALEN] = {0x00};
+ int jj, kk;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct list_head *plist, *phead;
+ struct __queue *queue = &(pmlmepriv->scanned_queue);
+ struct wlan_network *pnetwork = NULL;
+ uint uintPeerChannel = 0;
+ u8 attr_content[100] = {0x00};
+ u8 *p2pie;
+ uint p2pielen = 0, attr_contentlen = 0;
+ unsigned long irqL;
+
+ /* The input data contains two informations. */
+ /* 1. First information is the MAC address which wants to issue the provisioning discovery request frame. */
+ /* 2. Second information is the WPS configuration method which wants to discovery */
+ /* Format: 00:E0:4C:00:00:05_display */
+ /* Format: 00:E0:4C:00:00:05_keypad */
+ /* Format: 00:E0:4C:00:00:05_pbc */
+ /* Format: 00:E0:4C:00:00:05_label */
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+
+ if (pwdinfo->p2p_state == P2P_STATE_NONE) {
+ DBG_88E("[%s] WiFi Direct is disable!\n", __func__);
+ return ret;
+ } else {
+ /* Reset the content of struct tx_provdisc_req_info excluded the wps_config_method_request. */
+ _rtw_memset(pwdinfo->tx_prov_disc_info.peerDevAddr, 0x00, ETH_ALEN);
+ _rtw_memset(pwdinfo->tx_prov_disc_info.peerIFAddr, 0x00, ETH_ALEN);
+ _rtw_memset(&pwdinfo->tx_prov_disc_info.ssid, 0x00, sizeof(struct ndis_802_11_ssid));
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = 0;
+ pwdinfo->tx_prov_disc_info.peer_channel_num[1] = 0;
+ pwdinfo->tx_prov_disc_info.benable = false;
+ }
+
+ for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
+ peerMAC[jj] = key_2char2num(extra[kk], extra[kk + 1]);
+
+ if (!memcmp(&extra[18], "display", 7)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_DISPLYA;
+ } else if (!memcmp(&extra[18], "keypad", 7)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_KEYPAD;
+ } else if (!memcmp(&extra[18], "pbc", 3)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_PUSH_BUTTON;
+ } else if (!memcmp(&extra[18], "label", 5)) {
+ pwdinfo->tx_prov_disc_info.wps_config_method_request = WPS_CM_LABEL;
+ } else {
+ DBG_88E("[%s] Unknown WPS config methodn", __func__);
+ return ret;
+ }
+
+ _enter_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ phead = get_list_head(queue);
+ plist = get_next(phead);
+
+ while (1) {
+ if (rtw_end_of_queue_search(phead, plist) == true)
+ break;
+
+ if (uintPeerChannel != 0)
+ break;
+
+ pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+
+ /* Commented by Albert 2011/05/18 */
+ /* Match the device address located in the P2P IE */
+ /* This is for the case that the P2P device address is not the same as the P2P interface address. */
+
+ p2pie = rtw_get_p2p_ie(&pnetwork->network.IEs[12], pnetwork->network.IELength - 12, NULL, &p2pielen);
+ if (p2pie) {
+ while (p2pie) {
+ /* The P2P Device ID attribute is included in the Beacon frame. */
+ /* The P2P Device Info attribute is included in the probe response frame. */
+
+ if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_ID, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device ID attribute of Beacon first */
+ if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ } else if (rtw_get_p2p_attr_content(p2pie, p2pielen, P2P_ATTR_DEVICE_INFO, attr_content, &attr_contentlen)) {
+ /* Handle the P2P Device Info attribute of probe response */
+ if (!memcmp(attr_content, peerMAC, ETH_ALEN)) {
+ uintPeerChannel = pnetwork->network.Configuration.DSConfig;
+ break;
+ }
+ }
+
+ /* Get the next P2P IE */
+ p2pie = rtw_get_p2p_ie(p2pie+p2pielen, pnetwork->network.IELength - 12 - (p2pie - &pnetwork->network.IEs[12] + p2pielen), NULL, &p2pielen);
+ }
+ }
+
+ plist = get_next(plist);
+ }
+
+ _exit_critical_bh(&(pmlmepriv->scanned_queue.lock), &irqL);
+
+ if (uintPeerChannel) {
+ DBG_88E("[%s] peer channel: %d!\n", __func__, uintPeerChannel);
+ memcpy(pwdinfo->tx_prov_disc_info.peerIFAddr, pnetwork->network.MacAddress, ETH_ALEN);
+ memcpy(pwdinfo->tx_prov_disc_info.peerDevAddr, peerMAC, ETH_ALEN);
+ pwdinfo->tx_prov_disc_info.peer_channel_num[0] = (u16) uintPeerChannel;
+ pwdinfo->tx_prov_disc_info.benable = true;
+ rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ);
+
+ if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_CLIENT)) {
+ memcpy(&pwdinfo->tx_prov_disc_info.ssid, &pnetwork->network.Ssid, sizeof(struct ndis_802_11_ssid));
+ } else if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_DEVICE) || rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
+ memcpy(pwdinfo->tx_prov_disc_info.ssid.Ssid, pwdinfo->p2p_wildcard_ssid, P2P_WILDCARD_SSID_LEN);
+ pwdinfo->tx_prov_disc_info.ssid.SsidLength = P2P_WILDCARD_SSID_LEN;
+ }
+
+ set_channel_bwmode(padapter, uintPeerChannel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
+
+ _set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
+
+ _set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
+ } else {
+ DBG_88E("[%s] NOT Found in the Scanning Queue!\n", __func__);
+ }
+ return ret;
+}
+
+/* This function is used to inform the driver the user had specified the pin code value or pbc */
+/* to application. */
+
+static int rtw_p2p_got_wpsinfo(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct wifidirect_info *pwdinfo = &(padapter->wdinfo);
+
+ DBG_88E("[%s] data = %s\n", __func__, extra);
+ /* Added by Albert 20110328 */
+ /* if the input data is P2P_NO_WPSINFO -> reset the wpsinfo */
+ /* if the input data is P2P_GOT_WPSINFO_PEER_DISPLAY_PIN -> the utility just input the PIN code got from the peer P2P device. */
+ /* if the input data is P2P_GOT_WPSINFO_SELF_DISPLAY_PIN -> the utility just got the PIN code from itself. */
+ /* if the input data is P2P_GOT_WPSINFO_PBC -> the utility just determine to use the PBC */
+
+ if (*extra == '0')
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ else if (*extra == '1')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PEER_DISPLAY_PIN;
+ else if (*extra == '2')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_SELF_DISPLAY_PIN;
+ else if (*extra == '3')
+ pwdinfo->ui_got_wps_info = P2P_GOT_WPSINFO_PBC;
+ else
+ pwdinfo->ui_got_wps_info = P2P_NO_WPSINFO;
+ return ret;
+}
+
+#endif /* CONFIG_88EU_P2P */
+
+static int rtw_p2p_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("[%s] extra = %s\n", __func__, extra);
+ if (!memcmp(extra, "enable =", 7)) {
+ rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "setDN =", 6)) {
+ wrqu->data.length -= 6;
+ rtw_p2p_setDN(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "profilefound =", 13)) {
+ wrqu->data.length -= 13;
+ rtw_p2p_profilefound(dev, info, wrqu, &extra[13]);
+ } else if (!memcmp(extra, "prov_disc =", 10)) {
+ wrqu->data.length -= 10;
+ rtw_p2p_prov_disc(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "nego =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_connect(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "intent =", 7)) {
+ /* Commented by Albert 2011/03/23 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease 7 + 1 */
+ wrqu->data.length -= 8;
+ rtw_p2p_set_intent(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "ssid =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_set_go_nego_ssid(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "got_wpsinfo =", 12)) {
+ wrqu->data.length -= 12;
+ rtw_p2p_got_wpsinfo(dev, info, wrqu, &extra[12]);
+ } else if (!memcmp(extra, "listen_ch =", 10)) {
+ /* Commented by Albert 2011/05/24 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease (10 + 1) */
+ wrqu->data.length -= 11;
+ rtw_p2p_set_listen_ch(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "op_ch =", 6)) {
+ /* Commented by Albert 2011/05/24 */
+ /* The wrqu->data.length will include the null character */
+ /* So, we will decrease (6 + 1) */
+ wrqu->data.length -= 7;
+ rtw_p2p_set_op_ch(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "invite =", 7)) {
+ wrqu->data.length -= 8;
+ rtw_p2p_invite_req(dev, info, wrqu, &extra[7]);
+ } else if (!memcmp(extra, "persistent =", 11)) {
+ wrqu->data.length -= 11;
+ rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]);
+ }
+#endif /* CONFIG_88EU_P2P */
+
+ return ret;
+}
+
+static int rtw_p2p_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ if (padapter->bShowGetP2PState)
+ DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ if (!memcmp(wrqu->data.pointer, "status", 6)) {
+ rtw_p2p_get_status(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "role", 4)) {
+ rtw_p2p_get_role(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_ifa", 8)) {
+ rtw_p2p_get_peer_ifaddr(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "req_cm", 6)) {
+ rtw_p2p_get_req_cm(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_deva", 9)) {
+ /* Get the P2P device address when receiving the provision discovery request frame. */
+ rtw_p2p_get_peer_devaddr(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "group_id", 8)) {
+ rtw_p2p_get_groupid(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "peer_deva_inv", 9)) {
+ /* Get the P2P device address when receiving the P2P Invitation request frame. */
+ rtw_p2p_get_peer_devaddr_by_invitation(dev, info, wrqu, extra);
+ } else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
+ rtw_p2p_get_op_ch(dev, info, wrqu, extra);
+ }
+#endif /* CONFIG_88EU_P2P */
+ return ret;
+}
+
+static int rtw_p2p_get2(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
+ if (!memcmp(extra, "wpsCM =", 6)) {
+ wrqu->data.length -= 6;
+ rtw_p2p_get_wps_configmethod(dev, info, wrqu, &extra[6]);
+ } else if (!memcmp(extra, "devN =", 5)) {
+ wrqu->data.length -= 5;
+ rtw_p2p_get_device_name(dev, info, wrqu, &extra[5]);
+ } else if (!memcmp(extra, "dev_type =", 9)) {
+ wrqu->data.length -= 9;
+ rtw_p2p_get_device_type(dev, info, wrqu, &extra[9]);
+ } else if (!memcmp(extra, "go_devadd =", 10)) {
+ wrqu->data.length -= 10;
+ rtw_p2p_get_go_device_address(dev, info, wrqu, &extra[10]);
+ } else if (!memcmp(extra, "InvProc =", 8)) {
+ wrqu->data.length -= 8;
+ rtw_p2p_get_invitation_procedure(dev, info, wrqu, &extra[8]);
+ }
+
+#endif /* CONFIG_88EU_P2P */
+
+ return ret;
+}
+
+static int rtw_cta_test_start(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ DBG_88E("%s %s\n", __func__, extra);
+ if (!strcmp(extra, "1"))
+ padapter->in_cta_test = 1;
+ else
+ padapter->in_cta_test = 0;
+
+ if (padapter->in_cta_test) {
+ u32 v = rtw_read32(padapter, REG_RCR);
+ v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
+ rtw_write32(padapter, REG_RCR, v);
+ DBG_88E("enable RCR_ADF\n");
+ } else {
+ u32 v = rtw_read32(padapter, REG_RCR);
+ v |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;/* RCR_ADF */
+ rtw_write32(padapter, REG_RCR, v);
+ DBG_88E("disable RCR_ADF\n");
+ }
+ return ret;
+}
+
+static int rtw_rereg_nd_name(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct rereg_nd_name_data *rereg_priv = &padapter->rereg_nd_name_priv;
+ char new_ifname[IFNAMSIZ];
+
+ if (rereg_priv->old_ifname[0] == 0) {
+ char *reg_ifname;
+ reg_ifname = padapter->registrypriv.if2name;
+
+ strncpy(rereg_priv->old_ifname, reg_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+ }
+
+ if (wrqu->data.length > IFNAMSIZ)
+ return -EFAULT;
+
+ if (copy_from_user(new_ifname, wrqu->data.pointer, IFNAMSIZ))
+ return -EFAULT;
+
+ if (0 == strcmp(rereg_priv->old_ifname, new_ifname))
+ return ret;
+
+ DBG_88E("%s new_ifname:%s\n", __func__, new_ifname);
+ ret = rtw_change_ifname(padapter, new_ifname);
+ if (0 != ret)
+ goto exit;
+
+ if (!memcmp(rereg_priv->old_ifname, "disable%d", 9) == true) {
+ padapter->ledpriv.bRegUseLed = rereg_priv->old_bRegUseLed;
+ rtw_hal_sw_led_init(padapter);
+ rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
+ }
+
+ strncpy(rereg_priv->old_ifname, new_ifname, IFNAMSIZ);
+ rereg_priv->old_ifname[IFNAMSIZ-1] = 0;
+
+ if (!memcmp(new_ifname, "disable%d", 9) == true) {
+ DBG_88E("%s disable\n", __func__);
+ /* free network queue for Android's timming issue */
+ rtw_free_network_queue(padapter, true);
+
+ /* close led */
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ rereg_priv->old_bRegUseLed = padapter->ledpriv.bRegUseLed;
+ padapter->ledpriv.bRegUseLed = false;
+ rtw_hal_sw_led_deinit(padapter);
+
+ /* the interface is being "disabled", we can do deeper IPS */
+ rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv);
+ rtw_ips_mode_req(&padapter->pwrctrlpriv, IPS_NORMAL);
+ }
+exit:
+ return ret;
+}
+
+static void mac_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1;
+ pr_info("\n ======= MAC REG =======\n");
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+ for (i = 0x400; i < 0x800; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+}
+
+static void bb_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1;
+ pr_info("\n ======= BB REG =======\n");
+ for (i = 0x800; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ pr_info("0x%02x", i);
+
+ pr_info(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+}
+
+static void rf_reg_dump(struct adapter *padapter)
+{
+ int i, j = 1, path;
+ u32 value;
+ u8 rf_type, path_nums = 0;
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ pr_info("\n ======= RF REG =======\n");
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (path = 0; path < path_nums; path++) {
+ pr_info("\nRF_Path(%x)\n", path);
+ for (i = 0; i < 0x100; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ pr_info("0x%02x ", i);
+ pr_info(" 0x%08x ", value);
+ if ((j++)%4 == 0)
+ pr_info("\n");
+ }
+ }
+}
+
+static int rtw_dbg_port(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ unsigned long irqL;
+ int ret = 0;
+ u8 major_cmd, minor_cmd;
+ u16 arg;
+ s32 extra_arg;
+ u32 *pdata, val32;
+ struct sta_info *psta;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct wlan_network *cur_network = &(pmlmepriv->cur_network);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ pdata = (u32 *)&wrqu->data;
+
+ val32 = *pdata;
+ arg = (u16)(val32 & 0x0000ffff);
+ major_cmd = (u8)(val32 >> 24);
+ minor_cmd = (u8)((val32 >> 16) & 0x00ff);
+
+ extra_arg = *(pdata+1);
+
+ switch (major_cmd) {
+ case 0x70:/* read_reg */
+ switch (minor_cmd) {
+ case 1:
+ DBG_88E("rtw_read8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ DBG_88E("rtw_read16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ DBG_88E("rtw_read32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x71:/* write_reg */
+ switch (minor_cmd) {
+ case 1:
+ rtw_write8(padapter, arg, extra_arg);
+ DBG_88E("rtw_write8(0x%x) = 0x%02x\n", arg, rtw_read8(padapter, arg));
+ break;
+ case 2:
+ rtw_write16(padapter, arg, extra_arg);
+ DBG_88E("rtw_write16(0x%x) = 0x%04x\n", arg, rtw_read16(padapter, arg));
+ break;
+ case 4:
+ rtw_write32(padapter, arg, extra_arg);
+ DBG_88E("rtw_write32(0x%x) = 0x%08x\n", arg, rtw_read32(padapter, arg));
+ break;
+ }
+ break;
+ case 0x72:/* read_bb */
+ DBG_88E("read_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x73:/* write_bb */
+ rtw_hal_write_bbreg(padapter, arg, 0xffffffff, extra_arg);
+ DBG_88E("write_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ break;
+ case 0x74:/* read_rf */
+ DBG_88E("read RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+ case 0x75:/* write_rf */
+ rtw_hal_write_rfreg(padapter, minor_cmd, arg, 0xffffffff, extra_arg);
+ DBG_88E("write RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ break;
+
+ case 0x76:
+ switch (minor_cmd) {
+ case 0x00: /* normal mode, */
+ padapter->recvpriv.is_signal_dbg = 0;
+ break;
+ case 0x01: /* dbg mode */
+ padapter->recvpriv.is_signal_dbg = 1;
+ extra_arg = extra_arg > 100 ? 100 : extra_arg;
+ extra_arg = extra_arg < 0 ? 0 : extra_arg;
+ padapter->recvpriv.signal_strength_dbg = extra_arg;
+ break;
+ }
+ break;
+ case 0x78: /* IOL test */
+ switch (minor_cmd) {
+ case 0x04: /* LLT table initialization test */
+ {
+ u8 page_boundary = 0xf9;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ rtw_IOL_append_LLT_cmd(xmit_frame, page_boundary);
+
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 500, 0))
+ ret = -EPERM;
+ }
+ break;
+ case 0x05: /* blink LED test */
+ {
+ u16 reg = 0x4c;
+ u32 blink_num = 50;
+ u32 blink_delay_ms = 200;
+ int i;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < blink_num; i++) {
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x00, 0xff);
+ rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x08, 0xff);
+ rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
+ }
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms*blink_num*2)+200, 0))
+ ret = -EPERM;
+ }
+ break;
+
+ case 0x06: /* continuous write byte test */
+ {
+ u16 reg = arg;
+ u16 start_value = 0;
+ u32 write_num = extra_arg;
+ int i;
+ u8 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WB_cmd(xmit_frame, reg, i+start_value, 0xFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read8(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WB_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WB_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final);
+ }
+ break;
+
+ case 0x07: /* continuous write word test */
+ {
+ u16 reg = arg;
+ u16 start_value = 200;
+ u32 write_num = extra_arg;
+
+ int i;
+ u16 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WW_cmd(xmit_frame, reg, i+start_value, 0xFFFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read16(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WW_REG to 0x%x %u times Success, start:%u, final:%u\n", reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WW_REG to 0x%x %u times Fail, start:%u, final:%u\n", reg, write_num, start_value, final);
+ }
+ break;
+ case 0x08: /* continuous write dword test */
+ {
+ u16 reg = arg;
+ u32 start_value = 0x110000c7;
+ u32 write_num = extra_arg;
+
+ int i;
+ u32 final;
+ struct xmit_frame *xmit_frame;
+
+ xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
+ if (xmit_frame == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ for (i = 0; i < write_num; i++)
+ rtw_IOL_append_WD_cmd(xmit_frame, reg, i+start_value, 0xFFFFFFFF);
+ if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ ret = -EPERM;
+
+ final = rtw_read32(padapter, reg);
+ if (start_value+write_num-1 == final)
+ DBG_88E("continuous IOL_CMD_WD_REG to 0x%x %u times Success, start:%u, final:%u\n",
+ reg, write_num, start_value, final);
+ else
+ DBG_88E("continuous IOL_CMD_WD_REG to 0x%x %u times Fail, start:%u, final:%u\n",
+ reg, write_num, start_value, final);
+ }
+ break;
+ }
+ break;
+ case 0x79:
+ {
+ /*
+ * dbg 0x79000000 [value], set RESP_TXAGC to + value, value:0~15
+ * dbg 0x79010000 [value], set RESP_TXAGC to - value, value:0~15
+ */
+ u8 value = extra_arg & 0x0f;
+ u8 sign = minor_cmd;
+ u16 write_value = 0;
+
+ DBG_88E("%s set RESP_TXAGC to %s %u\n", __func__, sign ? "minus" : "plus", value);
+
+ if (sign)
+ value = value | 0x10;
+
+ write_value = value | (value << 5);
+ rtw_write16(padapter, 0x6d9, write_value);
+ }
+ break;
+ case 0x7a:
+ receive_disconnect(padapter, pmlmeinfo->network.MacAddress
+ , WLAN_REASON_EXPIRATION_CHK);
+ break;
+ case 0x7F:
+ switch (minor_cmd) {
+ case 0x0:
+ DBG_88E("fwstate = 0x%x\n", get_fwstate(pmlmepriv));
+ break;
+ case 0x01:
+ DBG_88E("auth_alg = 0x%x, enc_alg = 0x%x, auth_type = 0x%x, enc_type = 0x%x\n",
+ psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
+ psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
+ break;
+ case 0x02:
+ DBG_88E("pmlmeinfo->state = 0x%x\n", pmlmeinfo->state);
+ break;
+ case 0x03:
+ DBG_88E("qos_option =%d\n", pmlmepriv->qospriv.qos_option);
+ DBG_88E("ht_option =%d\n", pmlmepriv->htpriv.ht_option);
+ break;
+ case 0x04:
+ DBG_88E("cur_ch =%d\n", pmlmeext->cur_channel);
+ DBG_88E("cur_bw =%d\n", pmlmeext->cur_bwmode);
+ DBG_88E("cur_ch_off =%d\n", pmlmeext->cur_ch_offset);
+ break;
+ case 0x05:
+ psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
+ if (psta) {
+ int i;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ DBG_88E("SSID =%s\n", cur_network->network.Ssid.Ssid);
+ DBG_88E("sta's macaddr: %pM\n", psta->hwaddr);
+ DBG_88E("cur_channel =%d, cur_bwmode =%d, cur_ch_offset =%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
+ DBG_88E("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_88E("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_88E("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_88E("bwmode =%d, ch_offset =%d, sgi =%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ DBG_88E("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_88E("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+ for (i = 0; i < 16; i++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[i];
+ if (preorder_ctrl->enable)
+ DBG_88E("tid =%d, indicate_seq =%d\n", i, preorder_ctrl->indicate_seq);
+ }
+ } else {
+ DBG_88E("can't get sta's macaddr, cur_network's macaddr:%pM\n", (cur_network->network.MacAddress));
+ }
+ break;
+ case 0x06:
+ {
+ u32 ODMFlag;
+ rtw_hal_get_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ DBG_88E("(B)DMFlag = 0x%x, arg = 0x%x\n", ODMFlag, arg);
+ ODMFlag = (u32)(0x0f&arg);
+ DBG_88E("(A)DMFlag = 0x%x\n", ODMFlag);
+ rtw_hal_set_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ }
+ break;
+ case 0x07:
+ DBG_88E("bSurpriseRemoved =%d, bDriverStopped =%d\n",
+ padapter->bSurpriseRemoved, padapter->bDriverStopped);
+ break;
+ case 0x08:
+ {
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ DBG_88E("free_xmitbuf_cnt =%d, free_xmitframe_cnt =%d, free_xmit_extbuf_cnt =%d\n",
+ pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt);
+ DBG_88E("rx_urb_pending_cn =%d\n", precvpriv->rx_pending_cnt);
+ }
+ break;
+ case 0x09:
+ {
+ int i, j;
+ struct list_head *plist, *phead;
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+#ifdef CONFIG_88EU_AP_MODE
+ DBG_88E("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
+#endif
+ _enter_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+
+ for (i = 0; i < NUM_STA; i++) {
+ phead = &(pstapriv->sta_hash[i]);
+ plist = get_next(phead);
+
+ while ((rtw_end_of_queue_search(phead, plist)) == false) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+
+ plist = get_next(plist);
+
+ if (extra_arg == psta->aid) {
+ DBG_88E("sta's macaddr:%pM\n", (psta->hwaddr));
+ DBG_88E("rtsen =%d, cts2slef =%d\n", psta->rtsen, psta->cts2self);
+ DBG_88E("state = 0x%x, aid =%d, macid =%d, raid =%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
+ DBG_88E("qos_en =%d, ht_en =%d, init_rate =%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
+ DBG_88E("bwmode =%d, ch_offset =%d, sgi =%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
+ DBG_88E("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
+ DBG_88E("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
+
+#ifdef CONFIG_88EU_AP_MODE
+ DBG_88E("capability = 0x%x\n", psta->capability);
+ DBG_88E("flags = 0x%x\n", psta->flags);
+ DBG_88E("wpa_psk = 0x%x\n", psta->wpa_psk);
+ DBG_88E("wpa2_group_cipher = 0x%x\n", psta->wpa2_group_cipher);
+ DBG_88E("wpa2_pairwise_cipher = 0x%x\n", psta->wpa2_pairwise_cipher);
+ DBG_88E("qos_info = 0x%x\n", psta->qos_info);
+#endif
+ DBG_88E("dot118021XPrivacy = 0x%x\n", psta->dot118021XPrivacy);
+
+ for (j = 0; j < 16; j++) {
+ preorder_ctrl = &psta->recvreorder_ctrl[j];
+ if (preorder_ctrl->enable)
+ DBG_88E("tid =%d, indicate_seq =%d\n", j, preorder_ctrl->indicate_seq);
+ }
+ }
+ }
+ }
+ _exit_critical_bh(&pstapriv->sta_hash_lock, &irqL);
+ }
+ break;
+ case 0x0c:/* dump rx/tx packet */
+ if (arg == 0) {
+ DBG_88E("dump rx packet (%d)\n", extra_arg);
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_RXPKT, &(extra_arg));
+ } else if (arg == 1) {
+ DBG_88E("dump tx packet (%d)\n", extra_arg);
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(extra_arg));
+ }
+ break;
+ case 0x0f:
+ if (extra_arg == 0) {
+ DBG_88E("###### silent reset test.......#####\n");
+ rtw_hal_sreset_reset(padapter);
+ }
+ break;
+ case 0x15:
+ {
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ DBG_88E("==>silent resete cnts:%d\n", pwrpriv->ips_enter_cnts);
+ }
+ break;
+ case 0x10:/* driver version display */
+ DBG_88E("rtw driver version =%s\n", DRIVERVERSION);
+ break;
+ case 0x11:
+ DBG_88E("turn %s Rx RSSI display function\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bRxRSSIDisplay = extra_arg;
+ rtw_hal_set_def_var(padapter, HW_DEF_FA_CNT_DUMP, &extra_arg);
+ break;
+ case 0x12: /* set rx_stbc */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
+ /* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
+ if (pregpriv &&
+ (extra_arg == 0 ||
+ extra_arg == 1 ||
+ extra_arg == 2 ||
+ extra_arg == 3)) {
+ pregpriv->rx_stbc = extra_arg;
+ DBG_88E("set rx_stbc =%d\n", pregpriv->rx_stbc);
+ } else {
+ DBG_88E("get rx_stbc =%d\n", pregpriv->rx_stbc);
+ }
+ }
+ break;
+ case 0x13: /* set ampdu_enable */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ /* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
+ if (pregpriv && extra_arg >= 0 && extra_arg < 3) {
+ pregpriv->ampdu_enable = extra_arg;
+ DBG_88E("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
+ } else {
+ DBG_88E("get ampdu_enable =%d\n", pregpriv->ampdu_enable);
+ }
+ }
+ break;
+ case 0x14: /* get wifi_spec */
+ {
+ struct registry_priv *pregpriv = &padapter->registrypriv;
+ DBG_88E("get wifi_spec =%d\n", pregpriv->wifi_spec);
+ }
+ break;
+ case 0x16:
+ if (arg == 0xff) {
+ pr_info("ODM_COMP_DIG\t\tBIT0\n");
+ pr_info("ODM_COMP_RA_MASK\t\tBIT1\n");
+ pr_info("ODM_COMP_DYNAMIC_TXPWR\tBIT2\n");
+ pr_info("ODM_COMP_FA_CNT\t\tBIT3\n");
+ pr_info("ODM_COMP_RSSI_MONITOR\tBIT4\n");
+ pr_info("ODM_COMP_CCK_PD\t\tBIT5\n");
+ pr_info("ODM_COMP_ANT_DIV\t\tBIT6\n");
+ pr_info("ODM_COMP_PWR_SAVE\t\tBIT7\n");
+ pr_info("ODM_COMP_PWR_TRAIN\tBIT8\n");
+ pr_info("ODM_COMP_RATE_ADAPTIVE\tBIT9\n");
+ pr_info("ODM_COMP_PATH_DIV\t\tBIT10\n");
+ pr_info("ODM_COMP_PSD \tBIT11\n");
+ pr_info("ODM_COMP_DYNAMIC_PRICCA\tBIT12\n");
+ pr_info("ODM_COMP_RXHP\t\tBIT13\n");
+ pr_info("ODM_COMP_EDCA_TURBO\tBIT16\n");
+ pr_info("ODM_COMP_EARLY_MODE\tBIT17\n");
+ pr_info("ODM_COMP_TX_PWR_TRACK\tBIT24\n");
+ pr_info("ODM_COMP_RX_GAIN_TRACK\tBIT25\n");
+ pr_info("ODM_COMP_CALIBRATION\tBIT26\n");
+ rtw_hal_get_def_var(padapter, HW_DEF_ODM_DBG_FLAG, &extra_arg);
+ } else {
+ rtw_hal_set_def_var(padapter, HW_DEF_ODM_DBG_FLAG, &extra_arg);
+ }
+ break;
+ case 0x23:
+ DBG_88E("turn %s the bNotifyChannelChange Variable\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bNotifyChannelChange = extra_arg;
+ break;
+ case 0x24:
+#ifdef CONFIG_88EU_P2P
+ DBG_88E("turn %s the bShowGetP2PState Variable\n", (extra_arg == 1) ? "on" : "off");
+ padapter->bShowGetP2PState = extra_arg;
+#endif /* CONFIG_88EU_P2P */
+ break;
+ case 0xaa:
+ if (extra_arg > 0x13)
+ extra_arg = 0xFF;
+ DBG_88E("chang data rate to :0x%02x\n", extra_arg);
+ padapter->fix_rate = extra_arg;
+ break;
+ case 0xdd:/* registers dump, 0 for mac reg, 1 for bb reg, 2 for rf reg */
+ if (extra_arg == 0)
+ mac_reg_dump(padapter);
+ else if (extra_arg == 1)
+ bb_reg_dump(padapter);
+ else if (extra_arg == 2)
+ rf_reg_dump(padapter);
+ break;
+ case 0xee:/* turn on/off dynamic funcs */
+ {
+ u32 odm_flag;
+
+ if (0xf == extra_arg) {
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
+ DBG_88E("extra_arg = 0 - disable all dynamic func\n");
+ DBG_88E("extra_arg = 1 - disable DIG- BIT(0)\n");
+ DBG_88E("extra_arg = 2 - disable High power - BIT(1)\n");
+ DBG_88E("extra_arg = 3 - disable tx power tracking - BIT(2)\n");
+ DBG_88E("extra_arg = 4 - disable BT coexistence - BIT(3)\n");
+ DBG_88E("extra_arg = 5 - disable antenna diversity - BIT(4)\n");
+ DBG_88E("extra_arg = 6 - enable all dynamic func\n");
+ } else {
+ /* extra_arg = 0 - disable all dynamic func
+ extra_arg = 1 - disable DIG
+ extra_arg = 2 - disable tx power tracking
+ extra_arg = 3 - turn on all dynamic func
+ */
+ rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg));
+ rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
+ }
+ }
+ break;
+
+ case 0xfd:
+ rtw_write8(padapter, 0xc50, arg);
+ DBG_88E("wr(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ rtw_write8(padapter, 0xc58, arg);
+ DBG_88E("wr(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xfe:
+ DBG_88E("rd(0xc50) = 0x%x\n", rtw_read8(padapter, 0xc50));
+ DBG_88E("rd(0xc58) = 0x%x\n", rtw_read8(padapter, 0xc58));
+ break;
+ case 0xff:
+ DBG_88E("dbg(0x210) = 0x%x\n", rtw_read32(padapter, 0x210));
+ DBG_88E("dbg(0x608) = 0x%x\n", rtw_read32(padapter, 0x608));
+ DBG_88E("dbg(0x280) = 0x%x\n", rtw_read32(padapter, 0x280));
+ DBG_88E("dbg(0x284) = 0x%x\n", rtw_read32(padapter, 0x284));
+ DBG_88E("dbg(0x288) = 0x%x\n", rtw_read32(padapter, 0x288));
+
+ DBG_88E("dbg(0x664) = 0x%x\n", rtw_read32(padapter, 0x664));
+
+ DBG_88E("\n");
+
+ DBG_88E("dbg(0x430) = 0x%x\n", rtw_read32(padapter, 0x430));
+ DBG_88E("dbg(0x438) = 0x%x\n", rtw_read32(padapter, 0x438));
+
+ DBG_88E("dbg(0x440) = 0x%x\n", rtw_read32(padapter, 0x440));
+
+ DBG_88E("dbg(0x458) = 0x%x\n", rtw_read32(padapter, 0x458));
+
+ DBG_88E("dbg(0x484) = 0x%x\n", rtw_read32(padapter, 0x484));
+ DBG_88E("dbg(0x488) = 0x%x\n", rtw_read32(padapter, 0x488));
+
+ DBG_88E("dbg(0x444) = 0x%x\n", rtw_read32(padapter, 0x444));
+ DBG_88E("dbg(0x448) = 0x%x\n", rtw_read32(padapter, 0x448));
+ DBG_88E("dbg(0x44c) = 0x%x\n", rtw_read32(padapter, 0x44c));
+ DBG_88E("dbg(0x450) = 0x%x\n", rtw_read32(padapter, 0x450));
+ break;
+ }
+ break;
+ default:
+ DBG_88E("error dbg cmd!\n");
+ break;
+ }
+ return ret;
+}
+
+static int wpa_set_param(struct net_device *dev, u8 name, u32 value)
+{
+ uint ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (name) {
+ case IEEE_PARAM_WPA_ENABLED:
+ padapter->securitypriv.dot11AuthAlgrthm = dot11AuthAlgrthm_8021X; /* 802.1x */
+ switch ((value)&0xff) {
+ case 1: /* WPA */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPAPSK; /* WPA_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption2Enabled;
+ break;
+ case 2: /* WPA2 */
+ padapter->securitypriv.ndisauthtype = Ndis802_11AuthModeWPA2PSK; /* WPA2_PSK */
+ padapter->securitypriv.ndisencryptstatus = Ndis802_11Encryption3Enabled;
+ break;
+ }
+ RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_info_,
+ ("wpa_set_param:padapter->securitypriv.ndisauthtype =%d\n", padapter->securitypriv.ndisauthtype));
+ break;
+ case IEEE_PARAM_TKIP_COUNTERMEASURES:
+ break;
+ case IEEE_PARAM_DROP_UNENCRYPTED: {
+ /* HACK:
+ *
+ * wpa_supplicant calls set_wpa_enabled when the driver
+ * is loaded and unloaded, regardless of if WPA is being
+ * used. No other calls are made which can be used to
+ * determine if encryption will be used or not prior to
+ * association being expected. If encryption is not being
+ * used, drop_unencrypted is set to false, else true -- we
+ * can use this to determine if the CAP_PRIVACY_ON bit should
+ * be set.
+ */
+
+ break;
+ }
+ case IEEE_PARAM_PRIVACY_INVOKED:
+ break;
+
+ case IEEE_PARAM_AUTH_ALGS:
+ ret = wpa_set_auth_algs(dev, value);
+ break;
+ case IEEE_PARAM_IEEE_802_1X:
+ break;
+ case IEEE_PARAM_WPAX_SELECT:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
+
+static int wpa_mlme(struct net_device *dev, u32 command, u32 reason)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ switch (command) {
+ case IEEE_MLME_STA_DEAUTH:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ case IEEE_MLME_STA_DISASSOC:
+ if (!rtw_set_802_11_disassociate(padapter))
+ ret = -1;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ uint ret = 0;
+
+ if (p->length < sizeof(struct ieee_param) || !p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+ case IEEE_CMD_SET_WPA_PARAM:
+ ret = wpa_set_param(dev, param->u.wpa_param.name, param->u.wpa_param.value);
+ break;
+
+ case IEEE_CMD_SET_WPA_IE:
+ ret = rtw_set_wpa_ie((struct adapter *)rtw_netdev_priv(dev),
+ (char *)param->u.wpa_ie.data, (u16)param->u.wpa_ie.len);
+ break;
+
+ case IEEE_CMD_SET_ENCRYPTION:
+ ret = wpa_set_encryption(dev, param, p->length);
+ break;
+
+ case IEEE_CMD_MLME:
+ ret = wpa_mlme(dev, param->u.mlme.command, param->u.mlme.reason_code);
+ break;
+
+ default:
+ DBG_88E("Unknown WPA supplicant request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+
+ kfree(param);
+
+out:
+
+ return ret;
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
+{
+ struct cmd_obj *ph2c;
+ struct set_stakey_parm *psetstakey_para;
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
+ u8 res = _SUCCESS;
+
+ ph2c = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (ph2c == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+
+ psetstakey_para = (struct set_stakey_parm *)rtw_zmalloc(sizeof(struct set_stakey_parm));
+ if (psetstakey_para == NULL) {
+ kfree(ph2c);
+ res = _FAIL;
+ goto exit;
+ }
+
+ init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
+
+ psetstakey_para->algorithm = (u8)psta->dot118021XPrivacy;
+
+ memcpy(psetstakey_para->addr, psta->hwaddr, ETH_ALEN);
+
+ memcpy(psetstakey_para->key, &psta->dot118021x_UncstKey, 16);
+
+ res = rtw_enqueue_cmd(pcmdpriv, ph2c);
+
+exit:
+
+ return res;
+}
+
+static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
+{
+ u8 keylen;
+ struct cmd_obj *pcmd;
+ struct setkey_parm *psetkeyparm;
+ struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ int res = _SUCCESS;
+
+ DBG_88E("%s\n", __func__);
+
+ pcmd = (struct cmd_obj *)rtw_zmalloc(sizeof(struct cmd_obj));
+ if (pcmd == NULL) {
+ res = _FAIL;
+ goto exit;
+ }
+ psetkeyparm = (struct setkey_parm *)rtw_zmalloc(sizeof(struct setkey_parm));
+ if (psetkeyparm == NULL) {
+ kfree(pcmd);
+ res = _FAIL;
+ goto exit;
+ }
+
+ _rtw_memset(psetkeyparm, 0, sizeof(struct setkey_parm));
+
+ psetkeyparm->keyid = (u8)keyid;
+
+ psetkeyparm->algorithm = alg;
+
+ psetkeyparm->set_tx = 1;
+
+ switch (alg) {
+ case _WEP40_:
+ keylen = 5;
+ break;
+ case _WEP104_:
+ keylen = 13;
+ break;
+ case _TKIP_:
+ case _TKIP_WTMIC_:
+ case _AES_:
+ keylen = 16;
+ default:
+ keylen = 16;
+ }
+
+ memcpy(&(psetkeyparm->key[0]), key, keylen);
+
+ pcmd->cmdcode = _SetKey_CMD_;
+ pcmd->parmbuf = (u8 *)psetkeyparm;
+ pcmd->cmdsz = (sizeof(struct setkey_parm));
+ pcmd->rsp = NULL;
+ pcmd->rspsz = 0;
+
+ _rtw_init_listhead(&pcmd->list);
+
+ res = rtw_enqueue_cmd(pcmdpriv, pcmd);
+
+exit:
+
+ return res;
+}
+
+static int set_wep_key(struct adapter *padapter, u8 *key, u8 keylen, int keyid)
+{
+ u8 alg;
+
+ switch (keylen) {
+ case 5:
+ alg = _WEP40_;
+ break;
+ case 13:
+ alg = _WEP104_;
+ break;
+ default:
+ alg = _NO_PRIVACY_;
+ }
+
+ return set_group_key(padapter, key, alg, keyid);
+}
+
+static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
+{
+ int ret = 0;
+ u32 wep_key_idx, wep_key_len, wep_total_len;
+ struct ndis_802_11_wep *pwep = NULL;
+ struct sta_info *psta = NULL, *pbcmc_sta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &(padapter->securitypriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("%s\n", __func__);
+ param->u.crypt.err = 0;
+ param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
+ if (param_len != sizeof(struct ieee_param) + param->u.crypt.key_len) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
+ if (param->u.crypt.idx >= WEP_KEYS) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ } else {
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (!psta) {
+ DBG_88E("rtw_set_encryption(), sta has already been removed or never been added\n");
+ goto exit;
+ }
+ }
+
+ if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL)) {
+ /* todo:clear default encryption keys */
+
+ DBG_88E("clear default encryption keys, keyid =%d\n", param->u.crypt.idx);
+ goto exit;
+ }
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL)) {
+ DBG_88E("r871x_set_encryption, crypt.alg = WEP\n");
+ wep_key_idx = param->u.crypt.idx;
+ wep_key_len = param->u.crypt.key_len;
+ DBG_88E("r871x_set_encryption, wep_key_idx=%d, len=%d\n", wep_key_idx, wep_key_len);
+ if ((wep_key_idx >= WEP_KEYS) || (wep_key_len <= 0)) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ if (wep_key_len > 0) {
+ wep_key_len = wep_key_len <= 5 ? 5 : 13;
+ wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
+ pwep = (struct ndis_802_11_wep *)rtw_malloc(wep_total_len);
+ if (pwep == NULL) {
+ DBG_88E(" r871x_set_encryption: pwep allocate fail !!!\n");
+ goto exit;
+ }
+
+ _rtw_memset(pwep, 0, wep_total_len);
+
+ pwep->KeyLength = wep_key_len;
+ pwep->Length = wep_total_len;
+ }
+
+ pwep->KeyIndex = wep_key_idx;
+
+ memcpy(pwep->KeyMaterial, param->u.crypt.key, pwep->KeyLength);
+
+ if (param->u.crypt.set_tx) {
+ DBG_88E("wep, set_tx = 1\n");
+
+ psecuritypriv->ndisencryptstatus = Ndis802_11Encryption1Enabled;
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP40_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+
+ if (pwep->KeyLength == 13) {
+ psecuritypriv->dot11PrivacyAlgrthm = _WEP104_;
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ }
+
+ psecuritypriv->dot11PrivacyKeyIndex = wep_key_idx;
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+
+ set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx);
+ } else {
+ DBG_88E("wep, set_tx = 0\n");
+
+ /* don't update "psecuritypriv->dot11PrivacyAlgrthm" and */
+ /* psecuritypriv->dot11PrivacyKeyIndex = keyid", but can rtw_set_key to cam */
+
+ memcpy(&(psecuritypriv->dot11DefKey[wep_key_idx].skey[0]), pwep->KeyMaterial, pwep->KeyLength);
+
+ psecuritypriv->dot11DefKeylen[wep_key_idx] = pwep->KeyLength;
+
+ set_wep_key(padapter, pwep->KeyMaterial, pwep->KeyLength, wep_key_idx);
+ }
+
+ goto exit;
+ }
+
+ if (!psta && check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* group key */
+ if (param->u.crypt.set_tx == 1) {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ DBG_88E("%s, set group_key, WEP\n", __func__);
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ DBG_88E("%s, set group_key, TKIP\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ DBG_88E("%s, set group_key, CCMP\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ } else {
+ DBG_88E("%s, set group_key, none\n", __func__);
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+ psecuritypriv->binstallGrpkey = true;
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+ set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta) {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+ }
+ goto exit;
+ }
+
+ if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X && psta) { /* psk/802_1x */
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ if (param->u.crypt.set_tx == 1) {
+ memcpy(psta->dot118021x_UncstKey.skey, param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ DBG_88E("%s, set pairwise key, WEP\n", __func__);
+
+ psta->dot118021XPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psta->dot118021XPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ DBG_88E("%s, set pairwise key, TKIP\n", __func__);
+
+ psta->dot118021XPrivacy = _TKIP_;
+
+ /* set mic key */
+ memcpy(psta->dot11tkiptxmickey.skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psta->dot11tkiprxmickey.skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ DBG_88E("%s, set pairwise key, CCMP\n", __func__);
+
+ psta->dot118021XPrivacy = _AES_;
+ } else {
+ DBG_88E("%s, set pairwise key, none\n", __func__);
+
+ psta->dot118021XPrivacy = _NO_PRIVACY_;
+ }
+
+ set_pairwise_key(padapter, psta);
+
+ psta->ieee8021x_blocked = false;
+ } else { /* group key??? */
+ if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ psecuritypriv->dot118021XGrpPrivacy = _WEP40_;
+ if (param->u.crypt.key_len == 13)
+ psecuritypriv->dot118021XGrpPrivacy = _WEP104_;
+ } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ psecuritypriv->dot118021XGrpPrivacy = _TKIP_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+
+ /* set mic key */
+ memcpy(psecuritypriv->dot118021XGrptxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[16]), 8);
+ memcpy(psecuritypriv->dot118021XGrprxmickey[param->u.crypt.idx].skey, &(param->u.crypt.key[24]), 8);
+
+ psecuritypriv->busetkipkey = true;
+ } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ psecuritypriv->dot118021XGrpPrivacy = _AES_;
+
+ memcpy(psecuritypriv->dot118021XGrpKey[param->u.crypt.idx].skey,
+ param->u.crypt.key, (param->u.crypt.key_len > 16 ? 16 : param->u.crypt.key_len));
+ } else {
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ }
+
+ psecuritypriv->dot118021XGrpKeyid = param->u.crypt.idx;
+
+ psecuritypriv->binstallGrpkey = true;
+
+ psecuritypriv->dot11PrivacyAlgrthm = psecuritypriv->dot118021XGrpPrivacy;/* */
+
+ set_group_key(padapter, param->u.crypt.key, psecuritypriv->dot118021XGrpPrivacy, param->u.crypt.idx);
+
+ pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
+ if (pbcmc_sta) {
+ pbcmc_sta->ieee8021x_blocked = false;
+ pbcmc_sta->dot118021XPrivacy = psecuritypriv->dot118021XGrpPrivacy;/* rx will use bmc_sta's dot118021XPrivacy */
+ }
+ }
+ }
+ }
+
+exit:
+
+ kfree(pwep);
+
+ return ret;
+}
+
+static int rtw_set_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ unsigned char *pbuf = param->u.bcn_ie.buf;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ memcpy(&pstapriv->max_num_sta, param->u.bcn_ie.reserved, 2);
+
+ if ((pstapriv->max_num_sta > NUM_STA) || (pstapriv->max_num_sta <= 0))
+ pstapriv->max_num_sta = NUM_STA;
+
+ if (rtw_check_beacon_data(padapter, pbuf, (len-12-2)) == _SUCCESS)/* 12 = param header, 2:no packed */
+ ret = 0;
+ else
+ ret = -EINVAL;
+
+ return ret;
+}
+
+static int rtw_hostapd_sta_flush(struct net_device *dev)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_88E("%s\n", __func__);
+
+ flush_all_cam_entry(padapter); /* clear CAM */
+
+ ret = rtw_sta_flush(padapter);
+
+ return ret;
+}
+
+static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("rtw_add_sta(aid =%d) =%pM\n", param->u.add_sta.aid, (param->sta_addr));
+
+ if (!check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)))
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ int flags = param->u.add_sta.flags;
+
+ psta->aid = param->u.add_sta.aid;/* aid = 1~2007 */
+
+ memcpy(psta->bssrateset, param->u.add_sta.tx_supp_rates, 16);
+
+ /* check wmm cap. */
+ if (WLAN_STA_WME&flags)
+ psta->qos_option = 1;
+ else
+ psta->qos_option = 0;
+
+ if (pmlmepriv->qospriv.qos_option == 0)
+ psta->qos_option = 0;
+
+ /* chec 802.11n ht cap. */
+ if (WLAN_STA_HT&flags) {
+ psta->htpriv.ht_option = true;
+ psta->qos_option = 1;
+ memcpy((void *)&psta->htpriv.ht_cap, (void *)&param->u.add_sta.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ } else {
+ psta->htpriv.ht_option = false;
+ }
+
+ if (pmlmepriv->htpriv.ht_option == false)
+ psta->htpriv.ht_option = false;
+
+ update_sta_info_apmode(padapter, psta);
+ } else {
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+static int rtw_del_sta(struct net_device *dev, struct ieee_param *param)
+{
+ unsigned long irqL;
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ int updated = 0;
+
+ DBG_88E("rtw_del_sta =%pM\n", (param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ if (!rtw_is_list_empty(&psta->asoc_list)) {
+ rtw_list_delete(&psta->asoc_list);
+ pstapriv->asoc_list_cnt--;
+ updated = ap_free_sta(padapter, psta, true, WLAN_REASON_DEAUTH_LEAVING);
+ }
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irqL);
+ associated_clients_update(padapter, updated);
+ psta = NULL;
+ } else {
+ DBG_88E("rtw_del_sta(), sta has already been removed or never been added\n");
+ }
+
+ return ret;
+}
+
+static int rtw_ioctl_get_sta_data(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct ieee_param_ex *param_ex = (struct ieee_param_ex *)param;
+ struct sta_data *psta_data = (struct sta_data *)param_ex->data;
+
+ DBG_88E("rtw_ioctl_get_sta_info, sta_addr: %pM\n", (param_ex->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param_ex->sta_addr[0] == 0xff && param_ex->sta_addr[1] == 0xff &&
+ param_ex->sta_addr[2] == 0xff && param_ex->sta_addr[3] == 0xff &&
+ param_ex->sta_addr[4] == 0xff && param_ex->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param_ex->sta_addr);
+ if (psta) {
+ psta_data->aid = (u16)psta->aid;
+ psta_data->capability = psta->capability;
+ psta_data->flags = psta->flags;
+
+/*
+ nonerp_set : BIT(0)
+ no_short_slot_time_set : BIT(1)
+ no_short_preamble_set : BIT(2)
+ no_ht_gf_set : BIT(3)
+ no_ht_set : BIT(4)
+ ht_20mhz_set : BIT(5)
+*/
+
+ psta_data->sta_set = ((psta->nonerp_set) |
+ (psta->no_short_slot_time_set << 1) |
+ (psta->no_short_preamble_set << 2) |
+ (psta->no_ht_gf_set << 3) |
+ (psta->no_ht_set << 4) |
+ (psta->ht_20mhz_set << 5));
+ psta_data->tx_supp_rates_len = psta->bssratelen;
+ memcpy(psta_data->tx_supp_rates, psta->bssrateset, psta->bssratelen);
+ memcpy(&psta_data->ht_cap, &psta->htpriv.ht_cap, sizeof(struct rtw_ieee80211_ht_cap));
+ psta_data->rx_pkts = psta->sta_stats.rx_data_pkts;
+ psta_data->rx_bytes = psta->sta_stats.rx_bytes;
+ psta_data->rx_drops = psta->sta_stats.rx_drops;
+ psta_data->tx_pkts = psta->sta_stats.tx_pkts;
+ psta_data->tx_bytes = psta->sta_stats.tx_bytes;
+ psta_data->tx_drops = psta->sta_stats.tx_drops;
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int rtw_get_sta_wpaie(struct net_device *dev, struct ieee_param *param)
+{
+ int ret = 0;
+ struct sta_info *psta = NULL;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ DBG_88E("rtw_get_sta_wpaie, sta_addr: %pM\n", (param->sta_addr));
+
+ if (check_fwstate(pmlmepriv, (_FW_LINKED|WIFI_AP_STATE)) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+
+ psta = rtw_get_stainfo(pstapriv, param->sta_addr);
+ if (psta) {
+ if ((psta->wpa_ie[0] == WLAN_EID_RSN) || (psta->wpa_ie[0] == WLAN_EID_GENERIC)) {
+ int wpa_ie_len;
+ int copy_len;
+
+ wpa_ie_len = psta->wpa_ie[1];
+ copy_len = ((wpa_ie_len+2) > sizeof(psta->wpa_ie)) ? (sizeof(psta->wpa_ie)) : (wpa_ie_len+2);
+ param->u.wpa_ie.len = copy_len;
+ memcpy(param->u.wpa_ie.reserved, psta->wpa_ie, copy_len);
+ } else {
+ DBG_88E("sta's wpa_ie is NONE\n");
+ }
+ } else {
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_beacon(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ unsigned char wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_beacon_ie) {
+ kfree(pmlmepriv->wps_beacon_ie);
+ pmlmepriv->wps_beacon_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_beacon_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_beacon_ie_len = ie_len;
+ if (pmlmepriv->wps_beacon_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_beacon_ie, param->u.bcn_ie.buf, ie_len);
+
+ update_beacon(padapter, _VENDOR_SPECIFIC_IE_, wps_oui, true);
+
+ pmlmeext->bstart_bss = true;
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_probe_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_probe_resp_ie) {
+ kfree(pmlmepriv->wps_probe_resp_ie);
+ pmlmepriv->wps_probe_resp_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_probe_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_probe_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_probe_resp_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+ memcpy(pmlmepriv->wps_probe_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+ return ret;
+}
+
+static int rtw_set_wps_assoc_resp(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ int ie_len;
+
+ DBG_88E("%s, len =%d\n", __func__, len);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ ie_len = len-12-2;/* 12 = param header, 2:no packed */
+
+ if (pmlmepriv->wps_assoc_resp_ie) {
+ kfree(pmlmepriv->wps_assoc_resp_ie);
+ pmlmepriv->wps_assoc_resp_ie = NULL;
+ }
+
+ if (ie_len > 0) {
+ pmlmepriv->wps_assoc_resp_ie = rtw_malloc(ie_len);
+ pmlmepriv->wps_assoc_resp_ie_len = ie_len;
+ if (pmlmepriv->wps_assoc_resp_ie == NULL) {
+ DBG_88E("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ memcpy(pmlmepriv->wps_assoc_resp_ie, param->u.bcn_ie.buf, ie_len);
+ }
+
+ return ret;
+}
+
+static int rtw_set_hidden_ssid(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ u8 value;
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->u.wpa_param.name != 0) /* dummy test... */
+ DBG_88E("%s name(%u) != 0\n", __func__, param->u.wpa_param.name);
+ value = param->u.wpa_param.value;
+
+ /* use the same definition of hostapd's ignore_broadcast_ssid */
+ if (value != 1 && value != 2)
+ value = 0;
+ DBG_88E("%s value(%u)\n", __func__, value);
+ pmlmeinfo->hidden_ssid_mode = value;
+ return ret;
+}
+
+static int rtw_ioctl_acl_remove_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+ ret = rtw_acl_remove_sta(padapter, param->sta_addr);
+ return ret;
+}
+
+static int rtw_ioctl_acl_add_sta(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
+ param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
+ param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff)
+ return -EINVAL;
+ ret = rtw_acl_add_sta(padapter, param->sta_addr);
+ return ret;
+}
+
+static int rtw_ioctl_set_macaddr_acl(struct net_device *dev, struct ieee_param *param, int len)
+{
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
+ return -EINVAL;
+
+ rtw_set_macaddr_acl(padapter, param->u.mlme.command);
+
+ return ret;
+}
+
+static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
+{
+ struct ieee_param *param;
+ int ret = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ /*
+ * this function is expect to call in master mode, which allows no power saving
+ * so, we just check hw_init_completed
+ */
+
+ if (!padapter->hw_init_completed) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ if (!p->pointer) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ param = (struct ieee_param *)rtw_malloc(p->length);
+ if (param == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(param, p->pointer, p->length)) {
+ kfree(param);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ switch (param->cmd) {
+ case RTL871X_HOSTAPD_FLUSH:
+ ret = rtw_hostapd_sta_flush(dev);
+ break;
+ case RTL871X_HOSTAPD_ADD_STA:
+ ret = rtw_add_sta(dev, param);
+ break;
+ case RTL871X_HOSTAPD_REMOVE_STA:
+ ret = rtw_del_sta(dev, param);
+ break;
+ case RTL871X_HOSTAPD_SET_BEACON:
+ ret = rtw_set_beacon(dev, param, p->length);
+ break;
+ case RTL871X_SET_ENCRYPTION:
+ ret = rtw_set_encryption(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_GET_WPAIE_STA:
+ ret = rtw_get_sta_wpaie(dev, param);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_BEACON:
+ ret = rtw_set_wps_beacon(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_PROBE_RESP:
+ ret = rtw_set_wps_probe_resp(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP:
+ ret = rtw_set_wps_assoc_resp(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_HIDDEN_SSID:
+ ret = rtw_set_hidden_ssid(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_GET_INFO_STA:
+ ret = rtw_ioctl_get_sta_data(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_SET_MACADDR_ACL:
+ ret = rtw_ioctl_set_macaddr_acl(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_ACL_ADD_STA:
+ ret = rtw_ioctl_acl_add_sta(dev, param, p->length);
+ break;
+ case RTL871X_HOSTAPD_ACL_REMOVE_STA:
+ ret = rtw_ioctl_acl_remove_sta(dev, param, p->length);
+ break;
+ default:
+ DBG_88E("Unknown hostapd request: %d\n", param->cmd);
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ if (ret == 0 && copy_to_user(p->pointer, param, p->length))
+ ret = -EFAULT;
+ kfree(param);
+out:
+ return ret;
+}
+#endif
+
+#include <rtw_android.h>
+static int rtw_wx_set_priv(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *awrq,
+ char *extra)
+{
+ int ret = 0;
+ int len = 0;
+ char *ext;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_point *dwrq = (struct iw_point *)awrq;
+
+ if (dwrq->length == 0)
+ return -EFAULT;
+
+ len = dwrq->length;
+ ext = rtw_vmalloc(len);
+ if (!ext)
+ return -ENOMEM;
+
+ if (copy_from_user(ext, dwrq->pointer, len)) {
+ rtw_vmfree(ext, len);
+ return -EFAULT;
+ }
+
+ /* added for wps2.0 @20110524 */
+ if (dwrq->flags == 0x8766 && len > 8) {
+ u32 cp_sz;
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ u8 *probereq_wpsie = ext;
+ int probereq_wpsie_len = len;
+ u8 wps_oui[4] = {0x0, 0x50, 0xf2, 0x04};
+
+ if ((_VENDOR_SPECIFIC_IE_ == probereq_wpsie[0]) &&
+ (!memcmp(&probereq_wpsie[2], wps_oui, 4))) {
+ cp_sz = probereq_wpsie_len > MAX_WPS_IE_LEN ? MAX_WPS_IE_LEN : probereq_wpsie_len;
+
+ pmlmepriv->wps_probe_req_ie_len = 0;
+ kfree(pmlmepriv->wps_probe_req_ie);
+ pmlmepriv->wps_probe_req_ie = NULL;
+
+ pmlmepriv->wps_probe_req_ie = rtw_malloc(cp_sz);
+ if (pmlmepriv->wps_probe_req_ie == NULL) {
+ pr_info("%s()-%d: rtw_malloc() ERROR!\n", __func__, __LINE__);
+ ret = -EINVAL;
+ goto FREE_EXT;
+ }
+ memcpy(pmlmepriv->wps_probe_req_ie, probereq_wpsie, cp_sz);
+ pmlmepriv->wps_probe_req_ie_len = cp_sz;
+ }
+ goto FREE_EXT;
+ }
+
+ if (len >= WEXT_CSCAN_HEADER_SIZE &&
+ !memcmp(ext, WEXT_CSCAN_HEADER, WEXT_CSCAN_HEADER_SIZE)) {
+ ret = rtw_wx_set_scan(dev, info, awrq, ext);
+ goto FREE_EXT;
+ }
+
+FREE_EXT:
+
+ rtw_vmfree(ext, len);
+
+ return ret;
+}
+
+static int rtw_pm_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ int ret = 0;
+ unsigned mode = 0;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+
+ DBG_88E("[%s] extra = %s\n", __func__, extra);
+
+ if (!memcmp(extra, "lps =", 4)) {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_lps(padapter, mode);
+ } else if (!memcmp(extra, "ips =", 4)) {
+ sscanf(extra+4, "%u", &mode);
+ ret = rtw_pm_set_ips(padapter, mode);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int rtw_mp_efuse_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
+ struct efuse_hal *pEfuseHal;
+ struct iw_point *wrqu;
+
+ u8 *PROMContent = pEEPROM->efuse_eeprom_data;
+ u8 ips_mode = 0, lps_mode = 0;
+ struct pwrctrl_priv *pwrctrlpriv;
+ u8 *data = NULL;
+ u8 *rawdata = NULL;
+ char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
+ u16 i = 0, j = 0, mapLen = 0, addr = 0, cnts = 0;
+ u16 max_available_size = 0, raw_cursize = 0, raw_maxsize = 0;
+ int err;
+ u8 org_fw_iol = padapter->registrypriv.fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
+
+ wrqu = (struct iw_point *)wdata;
+ pwrctrlpriv = &padapter->pwrctrlpriv;
+ pEfuseHal = &haldata->EfuseHal;
+
+ err = 0;
+ data = _rtw_zmalloc(EFUSE_BT_MAX_MAP_LEN);
+ if (data == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ rawdata = _rtw_zmalloc(EFUSE_BT_MAX_MAP_LEN);
+ if (rawdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
+ rtw_pm_set_ips(padapter, IPS_NONE);
+
+ pch = extra;
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ i = 0;
+ /* mac 16 "00e04c871200" rmap, 00, 2 */
+ while ((token = strsep(&pch, ",")) != NULL) {
+ if (i > 2)
+ break;
+ tmp[i] = token;
+ i++;
+ }
+ padapter->registrypriv.fw_iol = 0;/* 0:Disable, 1:enable, 2:by usb speed */
+
+ if (strcmp(tmp[0], "status") == 0) {
+ sprintf(extra, "Load File efuse =%s, Load File MAC =%s", (pEEPROM->bloadfile_fail_flag ? "FAIL" : "OK"), (pEEPROM->bloadmac_fail_flag ? "FAIL" : "OK"));
+
+ goto exit;
+ } else if (strcmp(tmp[0], "filemap") == 0) {
+ mapLen = EFUSE_MAP_SIZE;
+
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, PROMContent[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, PROMContent[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "realmap") == 0) {
+ mapLen = EFUSE_MAP_SIZE;
+ if (rtw_efuse_map_read(padapter, 0, mapLen, pEfuseHal->fakeEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: read realmap Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "rmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ DBG_88E("%s: rmap Fail!! Parameters error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* rmap addr cnts */
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ DBG_88E("%s: addr =%x\n", __func__, addr);
+
+ cnts = simple_strtoul(tmp[2], &ptmp, 10);
+ if (cnts == 0) {
+ DBG_88E("%s: rmap Fail!! cnts error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EINVAL;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++)
+ sprintf(extra, "%s0x%02X ", extra, data[i]);
+ } else if (strcmp(tmp[0], "realraw") == 0) {
+ addr = 0;
+ mapLen = EFUSE_MAX_SIZE;
+ if (rtw_efuse_access(padapter, false, addr, mapLen, rawdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < mapLen; i++) {
+ sprintf(extra, "%s%02X", extra, rawdata[i]);
+
+ if ((i & 0xF) == 0xF)
+ sprintf(extra, "%s\n", extra);
+ else if ((i & 0x7) == 0x7)
+ sprintf(extra, "%s\t", extra);
+ else
+ sprintf(extra, "%s ", extra);
+ }
+ } else if (strcmp(tmp[0], "mac") == 0) {
+ cnts = 6;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++) {
+ sprintf(extra, "%s%02X", extra, data[i]);
+ if (i != (cnts-1))
+ sprintf(extra, "%s:", extra);
+ }
+ } else if (strcmp(tmp[0], "vidpid") == 0) {
+ cnts = 4;
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+ if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++) {
+ sprintf(extra, "%s0x%02X", extra, data[i]);
+ if (i != (cnts-1))
+ sprintf(extra, "%s,", extra);
+ }
+ } else if (strcmp(tmp[0], "ableraw") == 0) {
+ efuse_GetCurrentSize(padapter, &raw_cursize);
+ raw_maxsize = efuse_GetMaxSize(padapter);
+ sprintf(extra, "[available raw size] = %d bytes", raw_maxsize-raw_cursize);
+ } else if (strcmp(tmp[0], "btfmap") == 0) {
+ mapLen = EFUSE_BT_MAX_MAP_LEN;
+ if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 0; i < 512; i += 16) {
+ /* set 512 because the iwpriv's extra size have limit 0x7FF */
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btbmap") == 0) {
+ mapLen = EFUSE_BT_MAX_MAP_LEN;
+ if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ sprintf(extra, "\n");
+ for (i = 512; i < 1024; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->BTEfuseInitMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btrmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* rmap addr cnts */
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+
+ cnts = simple_strtoul(tmp[2], &ptmp, 10);
+ if (cnts == 0) {
+ DBG_88E("%s: btrmap Fail!! cnts error!\n", __func__);
+ err = -EINVAL;
+ goto exit;
+ }
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr + cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_read error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ *extra = 0;
+ for (i = 0; i < cnts; i++)
+ sprintf(extra, "%s 0x%02X ", extra, data[i]);
+ } else if (strcmp(tmp[0], "btffake") == 0) {
+ sprintf(extra, "\n");
+ for (i = 0; i < 512; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "btbfake") == 0) {
+ sprintf(extra, "\n");
+ for (i = 512; i < 1024; i += 16) {
+ sprintf(extra, "%s0x%03x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeBTEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else if (strcmp(tmp[0], "wlrfkmap") == 0) {
+ sprintf(extra, "\n");
+ for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
+ sprintf(extra, "%s0x%02x\t", extra, i);
+ for (j = 0; j < 8; j++)
+ sprintf(extra, "%s%02X ", extra, pEfuseHal->fakeEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\t", extra);
+ for (; j < 16; j++)
+ sprintf(extra, "%s %02X", extra, pEfuseHal->fakeEfuseModifiedMap[i+j]);
+ sprintf(extra, "%s\n", extra);
+ }
+ } else {
+ sprintf(extra, "Command not found!");
+ }
+
+exit:
+ kfree(data);
+ kfree(rawdata);
+ if (!err)
+ wrqu->length = strlen(extra);
+
+ rtw_pm_set_ips(padapter, ips_mode);
+ rtw_pm_set_lps(padapter, lps_mode);
+ padapter->registrypriv.fw_iol = org_fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
+ return err;
+}
+
+static int rtw_mp_efuse_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct adapter *padapter;
+ struct pwrctrl_priv *pwrctrlpriv;
+ struct hal_data_8188e *haldata;
+ struct efuse_hal *pEfuseHal;
+
+ u8 ips_mode = 0, lps_mode = 0;
+ u32 i, jj, kk;
+ u8 *setdata = NULL;
+ u8 *ShadowMapBT = NULL;
+ u8 *ShadowMapWiFi = NULL;
+ u8 *setrawdata = NULL;
+ char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
+ u16 addr = 0, cnts = 0, max_available_size = 0;
+ int err;
+
+ padapter = rtw_netdev_priv(dev);
+ pwrctrlpriv = &padapter->pwrctrlpriv;
+ haldata = GET_HAL_DATA(padapter);
+ pEfuseHal = &haldata->EfuseHal;
+ err = 0;
+ setdata = _rtw_zmalloc(1024);
+ if (setdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ ShadowMapBT = _rtw_malloc(EFUSE_BT_MAX_MAP_LEN);
+ if (ShadowMapBT == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ ShadowMapWiFi = _rtw_malloc(EFUSE_MAP_SIZE);
+ if (ShadowMapWiFi == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ setrawdata = _rtw_malloc(EFUSE_MAX_SIZE);
+ if (setrawdata == NULL) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
+ rtw_pm_set_ips(padapter, IPS_NONE);
+
+ pch = extra;
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ i = 0;
+ while ((token = strsep(&pch, ",")) != NULL) {
+ if (i > 2)
+ break;
+ tmp[i] = token;
+ i++;
+ }
+
+ /* tmp[0],[1],[2] */
+ /* wmap, addr, 00e04c871200 */
+ if (strcmp(tmp[0], "wmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: map data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wraw") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: raw data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setrawdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+
+ if (rtw_efuse_access(padapter, true, addr, cnts, setrawdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "mac") == 0) {
+ if (tmp[1] == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* mac, 00e04c871200 */
+ addr = EEPROM_MAC_ADDR_88EU;
+ cnts = strlen(tmp[1]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+ if (cnts > 6) {
+ DBG_88E("%s: error data for mac addr =\"%s\"\n", __func__, tmp[1]);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: MAC address =%s\n", __func__, tmp[1]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
+ /* Change to check TYPE_EFUSE_MAP_LEN, beacuse 8188E raw 256, logic map over 256. */
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "vidpid") == 0) {
+ if (tmp[1] == NULL) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* pidvid, da0b7881 */
+ addr = EEPROM_VID_88EE;
+ cnts = strlen(tmp[1]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: VID/PID =%s\n", __func__, tmp[1]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "btwmap") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: BT data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if ((addr+cnts) > max_available_size) {
+ DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_write error!!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "btwfake") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: BT tmp data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ pEfuseHal->fakeBTEfuseModifiedMap[addr+jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ } else if (strcmp(tmp[0], "btdumpfake") == 0) {
+ if (rtw_BT_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _SUCCESS) {
+ DBG_88E("%s: BT read all map success\n", __func__);
+ } else {
+ DBG_88E("%s: BT read all map Fail!\n", __func__);
+ err = -EFAULT;
+ }
+ } else if (strcmp(tmp[0], "wldumpfake") == 0) {
+ if (rtw_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _SUCCESS) {
+ DBG_88E("%s: BT read all map success\n", __func__);
+ } else {
+ DBG_88E("%s: BT read all map Fail\n", __func__);
+ err = -EFAULT;
+ }
+ } else if (strcmp(tmp[0], "btfk2map") == 0) {
+ memcpy(pEfuseHal->BTEfuseModifiedMap, pEfuseHal->fakeBTEfuseModifiedMap, EFUSE_BT_MAX_MAP_LEN);
+
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (max_available_size < 1) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_BT_efuse_map_write(padapter, 0x00, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _FAIL) {
+ DBG_88E("%s: rtw_BT_efuse_map_write error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wlfk2map") == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (max_available_size < 1) {
+ err = -EFAULT;
+ goto exit;
+ }
+
+ if (rtw_efuse_map_write(padapter, 0x00, EFUSE_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _FAIL) {
+ DBG_88E("%s: rtw_efuse_map_write error!\n", __func__);
+ err = -EFAULT;
+ goto exit;
+ }
+ } else if (strcmp(tmp[0], "wlwfake") == 0) {
+ if ((tmp[1] == NULL) || (tmp[2] == NULL)) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ addr = simple_strtoul(tmp[1], &ptmp, 16);
+ addr &= 0xFFF;
+
+ cnts = strlen(tmp[2]);
+ if (cnts%2) {
+ err = -EINVAL;
+ goto exit;
+ }
+ cnts /= 2;
+ if (cnts == 0) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ DBG_88E("%s: addr = 0x%X\n", __func__, addr);
+ DBG_88E("%s: cnts =%d\n", __func__, cnts);
+ DBG_88E("%s: map tmp data =%s\n", __func__, tmp[2]);
+
+ for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
+ pEfuseHal->fakeEfuseModifiedMap[addr+jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
+ }
+
+exit:
+ kfree(setdata);
+ kfree(ShadowMapBT);
+ kfree(ShadowMapWiFi);
+ kfree(setrawdata);
+
+ rtw_pm_set_ips(padapter, ips_mode);
+ rtw_pm_set_lps(padapter, lps_mode);
+
+ return err;
+}
+
+/*
+ * Input Format: %s,%d,%d
+ * %s is width, could be
+ * "b" for 1 byte
+ * "w" for WORD (2 bytes)
+ * "dw" for DWORD (4 bytes)
+ * 1st %d is address(offset)
+ * 2st %d is data to write
+ */
+static int rtw_mp_write_reg(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *pch, *pnext, *ptmp;
+ char *width_str;
+ char width;
+ u32 addr, data;
+ int ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pch = extra;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL)
+ return -EINVAL;
+ *pnext = 0;
+ width_str = pch;
+
+ pch = pnext + 1;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL)
+ return -EINVAL;
+ *pnext = 0;
+ addr = simple_strtoul(pch, &ptmp, 16);
+ if (addr > 0x3FFF)
+ return -EINVAL;
+
+ pch = pnext + 1;
+ if ((pch - extra) >= wrqu->length)
+ return -EINVAL;
+ data = simple_strtoul(pch, &ptmp, 16);
+
+ ret = 0;
+ width = width_str[0];
+ switch (width) {
+ case 'b':
+ /* 1 byte */
+ if (data > 0xFF) {
+ ret = -EINVAL;
+ break;
+ }
+ rtw_write8(padapter, addr, data);
+ break;
+ case 'w':
+ /* 2 bytes */
+ if (data > 0xFFFF) {
+ ret = -EINVAL;
+ break;
+ }
+ rtw_write16(padapter, addr, data);
+ break;
+ case 'd':
+ /* 4 bytes */
+ rtw_write32(padapter, addr, data);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Input Format: %s,%d
+ * %s is width, could be
+ * "b" for 1 byte
+ * "w" for WORD (2 bytes)
+ * "dw" for DWORD (4 bytes)
+ * %d is address(offset)
+ *
+ * Return:
+ * %d for data readed
+ */
+static int rtw_mp_read_reg(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ char *pch, *pnext, *ptmp;
+ char *width_str;
+ char width;
+ char data[20], tmp[20];
+ u32 addr;
+ u32 ret, i = 0, j = 0, strtout = 0;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(data, 0, 20);
+ _rtw_memset(tmp, 0, 20);
+ _rtw_memset(extra, 0, wrqu->length);
+
+ pch = input;
+ pnext = strpbrk(pch, ",.-");
+ if (pnext == NULL) {
+ kfree(input);
+ return -EINVAL;
+ }
+ *pnext = 0;
+ width_str = pch;
+
+ pch = pnext + 1;
+ if ((pch - input) >= wrqu->length) {
+ kfree(input);
+ return -EINVAL;
+ }
+ kfree(input);
+ addr = simple_strtoul(pch, &ptmp, 16);
+ if (addr > 0x3FFF)
+ return -EINVAL;
+
+ ret = 0;
+ width = width_str[0];
+ switch (width) {
+ case 'b':
+ /* 1 byte */
+ sprintf(extra, "%d\n", rtw_read8(padapter, addr));
+ wrqu->length = strlen(extra);
+ break;
+ case 'w':
+ /* 2 bytes */
+ sprintf(data, "%04x\n", rtw_read16(padapter, addr));
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ if (data[i] != '\0')
+ tmp[j] = data[i];
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ if (!pnext)
+ break;
+
+ pnext++;
+ if (*pnext != '\0') {
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtout);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = 6;
+ break;
+ case 'd':
+ /* 4 bytes */
+ sprintf(data, "%08x", rtw_read32(padapter, addr));
+ /* add read data format blank */
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ if (data[i] != '\0')
+ tmp[j] = data[i];
+
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ if (!pnext)
+ break;
+ pnext++;
+ if (*pnext != '\0') {
+ strtout = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtout);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = strlen(extra);
+ break;
+ default:
+ wrqu->length = 0;
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Input Format: %d,%x,%x
+ * %d is RF path, should be smaller than MAX_RF_PATH_NUMS
+ * 1st %x is address(offset)
+ * 2st %x is data to write
+ */
+ static int rtw_mp_write_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 path, addr, data;
+ int ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ ret = sscanf(extra, "%d,%x,%x", &path, &addr, &data);
+ if (ret < 3)
+ return -EINVAL;
+
+ if (path >= MAX_RF_PATH_NUMS)
+ return -EINVAL;
+ if (addr > 0xFF)
+ return -EINVAL;
+ if (data > 0xFFFFF)
+ return -EINVAL;
+
+ _rtw_memset(extra, 0, wrqu->length);
+
+ write_rfreg(padapter, path, addr, data);
+
+ sprintf(extra, "write_rf completed\n");
+ wrqu->length = strlen(extra);
+
+ return 0;
+}
+
+/*
+ * Input Format: %d,%x
+ * %d is RF path, should be smaller than MAX_RF_PATH_NUMS
+ * %x is address(offset)
+ *
+ * Return:
+ * %d for data readed
+ */
+static int rtw_mp_read_rf(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ char *pch, *pnext, *ptmp;
+ char data[20], tmp[20];
+ u32 path, addr;
+ u32 ret, i = 0, j = 0, strtou = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ ret = sscanf(input, "%d,%x", &path, &addr);
+ kfree(input);
+ if (ret < 2)
+ return -EINVAL;
+
+ if (path >= MAX_RF_PATH_NUMS)
+ return -EINVAL;
+ if (addr > 0xFF)
+ return -EINVAL;
+
+ _rtw_memset(extra, 0, wrqu->length);
+
+ sprintf(data, "%08x", read_rfreg(padapter, path, addr));
+ /* add read data format blank */
+ for (i = 0; i <= strlen(data); i++) {
+ if (i%2 == 0) {
+ tmp[j] = ' ';
+ j++;
+ }
+ tmp[j] = data[i];
+ j++;
+ }
+ pch = tmp;
+ DBG_88E("pch =%s", pch);
+
+ while (*pch != '\0') {
+ pnext = strpbrk(pch, " ");
+ pnext++;
+ if (*pnext != '\0') {
+ strtou = simple_strtoul(pnext, &ptmp, 16);
+ sprintf(extra, "%s %d", extra, strtou);
+ } else {
+ break;
+ }
+ pch = pnext;
+ }
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_start(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter->registrypriv.mp_mode == 0) {
+ padapter->registrypriv.mp_mode = 1;
+
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ LeaveAllPowerSaveMode(padapter);
+
+ MPT_InitializeAdapter(padapter, 1);
+ }
+ if (padapter->registrypriv.mp_mode == 0)
+ return -EPERM;
+ if (padapter->mppriv.mode == MP_OFF) {
+ if (mp_start_test(padapter) == _FAIL)
+ return -EPERM;
+ padapter->mppriv.mode = MP_ON;
+ }
+ return 0;
+}
+
+static int rtw_mp_stop(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter->registrypriv.mp_mode == 1) {
+ MPT_DeInitAdapter(padapter);
+ padapter->registrypriv.mp_mode = 0;
+ }
+
+ if (padapter->mppriv.mode != MP_OFF) {
+ mp_stop_test(padapter);
+ padapter->mppriv.mode = MP_OFF;
+ }
+
+ return 0;
+}
+
+extern int wifirate2_ratetbl_inx(unsigned char rate);
+
+static int rtw_mp_rate(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 rate = MPT_RATE_1M;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ rate = rtw_atoi(input);
+ sprintf(extra, "Set data rate to %d", rate);
+ kfree(input);
+ if (rate <= 0x7f)
+ rate = wifirate2_ratetbl_inx((u8)rate);
+ else
+ rate = (rate-0x80+MPT_RATE_MCS0);
+
+ if (rate >= MPT_RATE_LAST)
+ return -EINVAL;
+
+ padapter->mppriv.rateidx = rate;
+ Hal_SetDataRate(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ return 0;
+}
+
+static int rtw_mp_channel(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u32 channel = 1;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ channel = rtw_atoi(input);
+ sprintf(extra, "Change channel %d to channel %d", padapter->mppriv.channel, channel);
+
+ padapter->mppriv.channel = channel;
+ Hal_SetChannel(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_bandwidth(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 bandwidth = 0, sg = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
+
+ if (bandwidth != HT_CHANNEL_WIDTH_40)
+ bandwidth = HT_CHANNEL_WIDTH_20;
+
+ padapter->mppriv.bandwidth = (u8)bandwidth;
+ padapter->mppriv.preamble = sg;
+
+ SetBandwidth(padapter);
+
+ return 0;
+}
+
+static int rtw_mp_txpower(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 idx_a = 0, idx_b = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
+
+ sprintf(extra, "Set power level path_A:%d path_B:%d", idx_a, idx_b);
+ padapter->mppriv.txpoweridx = (u8)idx_a;
+ padapter->mppriv.txpoweridx_b = (u8)idx_b;
+ padapter->mppriv.bSetTxPower = 1;
+ Hal_SetAntennaPathPower(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ant_tx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 i;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u16 antenna = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ sprintf(extra, "switch Tx antenna to %s", input);
+
+ for (i = 0; i < strlen(input); i++) {
+ switch (input[i]) {
+ case 'a':
+ antenna |= ANTENNA_A;
+ break;
+ case 'b':
+ antenna |= ANTENNA_B;
+ break;
+ }
+ }
+ padapter->mppriv.antenna_tx = antenna;
+
+ Hal_SetAntenna(padapter);
+
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ant_rx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 i;
+ u16 antenna = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(extra, 0, wrqu->length);
+
+ sprintf(extra, "switch Rx antenna to %s", input);
+
+ for (i = 0; i < strlen(input); i++) {
+ switch (input[i]) {
+ case 'a':
+ antenna |= ANTENNA_A;
+ break;
+ case 'b':
+ antenna |= ANTENNA_B;
+ break;
+ }
+ }
+
+ padapter->mppriv.antenna_rx = antenna;
+ Hal_SetAntenna(padapter);
+ wrqu->length = strlen(extra);
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_ctx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1;
+ u32 bStartTest = 1;
+ u32 count = 0;
+ struct mp_priv *pmp_priv;
+ struct pkt_attrib *pattrib;
+
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pmp_priv = &padapter->mppriv;
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length))
+ return -EFAULT;
+
+ DBG_88E("%s: in =%s\n", __func__, extra);
+
+ countPkTx = strncmp(extra, "count =", 5); /* strncmp true is 0 */
+ cotuTx = strncmp(extra, "background", 20);
+ CarrSprTx = strncmp(extra, "background, cs", 20);
+ scTx = strncmp(extra, "background, sc", 20);
+ sgleTx = strncmp(extra, "background, stone", 20);
+ pkTx = strncmp(extra, "background, pkt", 20);
+ stop = strncmp(extra, "stop", 4);
+ sscanf(extra, "count =%d, pkt", &count);
+
+ _rtw_memset(extra, '\0', sizeof(extra));
+
+ if (stop == 0) {
+ bStartTest = 0; /* To set Stop */
+ pmp_priv->tx.stop = 1;
+ sprintf(extra, "Stop continuous Tx");
+ } else {
+ bStartTest = 1;
+ if (pmp_priv->mode != MP_ON) {
+ if (pmp_priv->tx.stop != 1) {
+ DBG_88E("%s: MP_MODE != ON %d\n", __func__, pmp_priv->mode);
+ return -EFAULT;
+ }
+ }
+ }
+
+ if (pkTx == 0 || countPkTx == 0)
+ pmp_priv->mode = MP_PACKET_TX;
+ if (sgleTx == 0)
+ pmp_priv->mode = MP_SINGLE_TONE_TX;
+ if (cotuTx == 0)
+ pmp_priv->mode = MP_CONTINUOUS_TX;
+ if (CarrSprTx == 0)
+ pmp_priv->mode = MP_CARRIER_SUPPRISSION_TX;
+ if (scTx == 0)
+ pmp_priv->mode = MP_SINGLE_CARRIER_TX;
+
+ switch (pmp_priv->mode) {
+ case MP_PACKET_TX:
+ if (bStartTest == 0) {
+ pmp_priv->tx.stop = 1;
+ pmp_priv->mode = MP_ON;
+ sprintf(extra, "Stop continuous Tx");
+ } else if (pmp_priv->tx.stop == 1) {
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500 count =%u,\n", count);
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = count;
+ pmp_priv->tx.payload = 2;
+ pattrib = &pmp_priv->tx.attrib;
+ pattrib->pktlen = 1500;
+ _rtw_memset(pattrib->dst, 0xFF, ETH_ALEN);
+ SetPacketTx(padapter);
+ } else {
+ return -EFAULT;
+ }
+ wrqu->length = strlen(extra);
+ return 0;
+ case MP_SINGLE_TONE_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetSingleToneTx(padapter, (u8)bStartTest);
+ break;
+ case MP_CONTINUOUS_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetContinuousTx(padapter, (u8)bStartTest);
+ break;
+ case MP_CARRIER_SUPPRISSION_TX:
+ if (bStartTest != 0) {
+ if (pmp_priv->rateidx <= MPT_RATE_11M) {
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetCarrierSuppressionTx(padapter, (u8)bStartTest);
+ } else {
+ sprintf(extra, "Specify carrier suppression but not CCK rate");
+ }
+ }
+ break;
+ case MP_SINGLE_CARRIER_TX:
+ if (bStartTest != 0)
+ sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
+ Hal_SetSingleCarrierTx(padapter, (u8)bStartTest);
+ break;
+ default:
+ sprintf(extra, "Error! Continuous-Tx is not on-going.");
+ return -EFAULT;
+ }
+
+ if (bStartTest == 1 && pmp_priv->mode != MP_ON) {
+ struct mp_priv *pmp_priv = &padapter->mppriv;
+ if (pmp_priv->tx.stop == 0) {
+ pmp_priv->tx.stop = 1;
+ rtw_msleep_os(5);
+ }
+ pmp_priv->tx.stop = 0;
+ pmp_priv->tx.count = 1;
+ SetPacketTx(padapter);
+ } else {
+ pmp_priv->mode = MP_ON;
+ }
+
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_arx(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 bStartRx = 0, bStopRx = 0, bQueryPhy;
+ u32 cckok = 0, cckcrc = 0, ofdmok = 0, ofdmcrc = 0, htok = 0, htcrc = 0, OFDM_FA = 0, CCK_FA = 0;
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ DBG_88E("%s: %s\n", __func__, input);
+
+ bStartRx = (strncmp(input, "start", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
+ bStopRx = (strncmp(input, "stop", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
+ bQueryPhy = (strncmp(input, "phy", 3) == 0) ? 1 : 0; /* strncmp true is 0 */
+
+ if (bStartRx) {
+ sprintf(extra, "start");
+ SetPacketRx(padapter, bStartRx);
+ } else if (bStopRx) {
+ SetPacketRx(padapter, 0);
+ sprintf(extra, "Received packet OK:%d CRC error:%d", padapter->mppriv.rx_pktcount, padapter->mppriv.rx_crcerrpktcount);
+ } else if (bQueryPhy) {
+ /*
+ OFDM FA
+ RegCF0[15:0]
+ RegCF2[31:16]
+ RegDA0[31:16]
+ RegDA4[15:0]
+ RegDA4[31:16]
+ RegDA8[15:0]
+ CCK FA
+ (RegA5B<<8) | RegA5C
+ */
+ cckok = read_bbreg(padapter, 0xf88, 0xffffffff);
+ cckcrc = read_bbreg(padapter, 0xf84, 0xffffffff);
+ ofdmok = read_bbreg(padapter, 0xf94, 0x0000FFFF);
+ ofdmcrc = read_bbreg(padapter, 0xf94, 0xFFFF0000);
+ htok = read_bbreg(padapter, 0xf90, 0x0000FFFF);
+ htcrc = read_bbreg(padapter, 0xf90, 0xFFFF0000);
+
+ OFDM_FA = read_bbreg(padapter, 0xcf0, 0x0000FFFF);
+ OFDM_FA = read_bbreg(padapter, 0xcf2, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda0, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda4, 0x0000FFFF);
+ OFDM_FA = read_bbreg(padapter, 0xda4, 0xFFFF0000);
+ OFDM_FA = read_bbreg(padapter, 0xda8, 0x0000FFFF);
+ CCK_FA = (rtw_read8(padapter, 0xa5b)<<8) | (rtw_read8(padapter, 0xa5c));
+
+ sprintf(extra, "Phy Received packet OK:%d CRC error:%d FA Counter: %d", cckok+ofdmok+htok, cckcrc+ofdmcrc+htcrc, OFDM_FA+CCK_FA);
+ }
+ wrqu->length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_trx_query(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 txok, txfail, rxok, rxfail;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ txok = padapter->mppriv.tx.sended;
+ txfail = 0;
+ rxok = padapter->mppriv.rx_pktcount;
+ rxfail = padapter->mppriv.rx_crcerrpktcount;
+
+ _rtw_memset(extra, '\0', 128);
+
+ sprintf(extra, "Tx OK:%d, Tx Fail:%d, Rx OK:%d, CRC error:%d ", txok, txfail, rxok, rxfail);
+
+ wrqu->length = strlen(extra)+1;
+
+ return 0;
+}
+
+static int rtw_mp_pwrtrk(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 enable;
+ u32 thermal;
+ s32 ret;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+ _rtw_memset(extra, 0, wrqu->length);
+
+ enable = 1;
+ if (wrqu->length > 1) {/* not empty string */
+ if (strncmp(input, "stop", 4) == 0) {
+ enable = 0;
+ sprintf(extra, "mp tx power tracking stop");
+ } else if (sscanf(input, "ther =%d", &thermal)) {
+ ret = Hal_SetThermalMeter(padapter, (u8)thermal);
+ if (ret == _FAIL)
+ return -EPERM;
+ sprintf(extra, "mp tx power tracking start, target value =%d ok ", thermal);
+ } else {
+ kfree(input);
+ return -EINVAL;
+ }
+ }
+
+ kfree(input);
+ ret = Hal_SetPowerTracking(padapter, enable);
+ if (ret == _FAIL)
+ return -EPERM;
+
+ wrqu->length = strlen(extra);
+ return 0;
+}
+
+static int rtw_mp_psd(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ strcpy(extra, input);
+
+ wrqu->length = mp_query_psd(padapter, extra);
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_thermal(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u8 val;
+ u16 bwrite = 1;
+ u16 addr = EEPROM_THERMAL_METER_88E;
+
+ u16 cnt = 1;
+ u16 max_available_size = 0;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (copy_from_user(extra, wrqu->pointer, wrqu->length))
+ return -EFAULT;
+
+ bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
+
+ Hal_GetThermalMeter(padapter, &val);
+
+ if (bwrite == 0) {
+ EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
+ if (2 > max_available_size) {
+ DBG_88E("no available efuse!\n");
+ return -EFAULT;
+ }
+ if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
+ DBG_88E("rtw_efuse_map_write error\n");
+ return -EFAULT;
+ } else {
+ sprintf(extra, " efuse write ok :%d", val);
+ }
+ } else {
+ sprintf(extra, "%d", val);
+ }
+ wrqu->length = strlen(extra);
+
+ return 0;
+}
+
+static int rtw_mp_reset_stats(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ struct mp_priv *pmp_priv;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ pmp_priv = &padapter->mppriv;
+
+ pmp_priv->tx.sended = 0;
+ pmp_priv->tx_pktcount = 0;
+ pmp_priv->rx_pktcount = 0;
+ pmp_priv->rx_crcerrpktcount = 0;
+
+ /* reset phy counter */
+ write_bbreg(padapter, 0xf14, BIT16, 0x1);
+ rtw_msleep_os(10);
+ write_bbreg(padapter, 0xf14, BIT16, 0x0);
+
+ return 0;
+}
+
+static int rtw_mp_dump(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ u32 value;
+ u8 rf_type, path_nums = 0;
+ u32 i, j = 1, path;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (strncmp(extra, "all", 4) == 0) {
+ DBG_88E("\n ======= MAC REG =======\n");
+ for (i = 0x0; i < 0x300; i += 4) {
+ if (j%4 == 1)
+ DBG_88E("0x%02x", i);
+ DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+ for (i = 0x400; i < 0x1000; i += 4) {
+ if (j%4 == 1)
+ DBG_88E("0x%02x", i);
+ DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+
+ j = 1;
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+
+ DBG_88E("\n ======= RF REG =======\n");
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
+ path_nums = 1;
+ else
+ path_nums = 2;
+
+ for (path = 0; path < path_nums; path++) {
+ for (i = 0; i < 0x34; i++) {
+ value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ if (j%4 == 1)
+ DBG_88E("0x%02x ", i);
+ DBG_88E(" 0x%08x ", value);
+ if ((j++)%4 == 0)
+ DBG_88E("\n");
+ }
+ }
+ }
+ return 0;
+}
+
+static int rtw_mp_phypara(struct net_device *dev,
+ struct iw_request_info *info,
+ struct iw_point *wrqu, char *extra)
+{
+ char *input = kmalloc(wrqu->length, GFP_KERNEL);
+ u32 valxcap;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
+ kfree(input);
+ return -EFAULT;
+ }
+
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ sscanf(input, "xcap =%d", &valxcap);
+
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_SetRFPath(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
+ u8 bMain = 1, bTurnoff = 1;
+
+ if (!input)
+ return -ENOMEM;
+ if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
+ return -EFAULT;
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ bMain = strncmp(input, "1", 2); /* strncmp true is 0 */
+ bTurnoff = strncmp(input, "0", 3); /* strncmp true is 0 */
+
+ if (bMain == 0) {
+ MP_PHY_SetRFPathSwitch(padapter, true);
+ DBG_88E("%s:PHY_SetRFPathSwitch = true\n", __func__);
+ } else if (bTurnoff == 0) {
+ MP_PHY_SetRFPathSwitch(padapter, false);
+ DBG_88E("%s:PHY_SetRFPathSwitch = false\n", __func__);
+ }
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_QueryDrv(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
+ u8 qAutoLoad = 1;
+ struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+
+ if (!input)
+ return -ENOMEM;
+
+ if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
+ return -EFAULT;
+ DBG_88E("%s:iwpriv in =%s\n", __func__, input);
+
+ qAutoLoad = strncmp(input, "autoload", 8); /* strncmp true is 0 */
+
+ if (qAutoLoad == 0) {
+ DBG_88E("%s:qAutoLoad\n", __func__);
+
+ if (pEEPROM->bautoload_fail_flag)
+ sprintf(extra, "fail");
+ else
+ sprintf(extra, "ok");
+ }
+ wrqu->data.length = strlen(extra) + 1;
+ kfree(input);
+ return 0;
+}
+
+static int rtw_mp_set(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct iw_point *wrqu = (struct iw_point *)wdata;
+ u32 subcmd = wrqu->flags;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter == NULL)
+ return -ENETDOWN;
+
+ if (extra == NULL) {
+ wrqu->length = 0;
+ return -EIO;
+ }
+
+ switch (subcmd) {
+ case MP_START:
+ DBG_88E("set case mp_start\n");
+ rtw_mp_start(dev, info, wrqu, extra);
+ break;
+ case MP_STOP:
+ DBG_88E("set case mp_stop\n");
+ rtw_mp_stop(dev, info, wrqu, extra);
+ break;
+ case MP_BANDWIDTH:
+ DBG_88E("set case mp_bandwidth\n");
+ rtw_mp_bandwidth(dev, info, wrqu, extra);
+ break;
+ case MP_RESET_STATS:
+ DBG_88E("set case MP_RESET_STATS\n");
+ rtw_mp_reset_stats(dev, info, wrqu, extra);
+ break;
+ case MP_SetRFPathSwh:
+ DBG_88E("set MP_SetRFPathSwitch\n");
+ rtw_mp_SetRFPath(dev, info, wdata, extra);
+ break;
+ case CTA_TEST:
+ DBG_88E("set CTA_TEST\n");
+ rtw_cta_test_start(dev, info, wdata, extra);
+ break;
+ }
+
+ return 0;
+}
+
+static int rtw_mp_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wdata, char *extra)
+{
+ struct iw_point *wrqu = (struct iw_point *)wdata;
+ u32 subcmd = wrqu->flags;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+
+ if (padapter == NULL)
+ return -ENETDOWN;
+ if (extra == NULL) {
+ wrqu->length = 0;
+ return -EIO;
+ }
+
+ switch (subcmd) {
+ case WRITE_REG:
+ rtw_mp_write_reg(dev, info, wrqu, extra);
+ break;
+ case WRITE_RF:
+ rtw_mp_write_rf(dev, info, wrqu, extra);
+ break;
+ case MP_PHYPARA:
+ DBG_88E("mp_get MP_PHYPARA\n");
+ rtw_mp_phypara(dev, info, wrqu, extra);
+ break;
+ case MP_CHANNEL:
+ DBG_88E("set case mp_channel\n");
+ rtw_mp_channel(dev, info, wrqu, extra);
+ break;
+ case READ_REG:
+ DBG_88E("mp_get READ_REG\n");
+ rtw_mp_read_reg(dev, info, wrqu, extra);
+ break;
+ case READ_RF:
+ DBG_88E("mp_get READ_RF\n");
+ rtw_mp_read_rf(dev, info, wrqu, extra);
+ break;
+ case MP_RATE:
+ DBG_88E("set case mp_rate\n");
+ rtw_mp_rate(dev, info, wrqu, extra);
+ break;
+ case MP_TXPOWER:
+ DBG_88E("set case MP_TXPOWER\n");
+ rtw_mp_txpower(dev, info, wrqu, extra);
+ break;
+ case MP_ANT_TX:
+ DBG_88E("set case MP_ANT_TX\n");
+ rtw_mp_ant_tx(dev, info, wrqu, extra);
+ break;
+ case MP_ANT_RX:
+ DBG_88E("set case MP_ANT_RX\n");
+ rtw_mp_ant_rx(dev, info, wrqu, extra);
+ break;
+ case MP_QUERY:
+ rtw_mp_trx_query(dev, info, wrqu, extra);
+ break;
+ case MP_CTX:
+ DBG_88E("set case MP_CTX\n");
+ rtw_mp_ctx(dev, info, wrqu, extra);
+ break;
+ case MP_ARX:
+ DBG_88E("set case MP_ARX\n");
+ rtw_mp_arx(dev, info, wrqu, extra);
+ break;
+ case EFUSE_GET:
+ DBG_88E("efuse get EFUSE_GET\n");
+ rtw_mp_efuse_get(dev, info, wdata, extra);
+ break;
+ case MP_DUMP:
+ DBG_88E("set case MP_DUMP\n");
+ rtw_mp_dump(dev, info, wrqu, extra);
+ break;
+ case MP_PSD:
+ DBG_88E("set case MP_PSD\n");
+ rtw_mp_psd(dev, info, wrqu, extra);
+ break;
+ case MP_THER:
+ DBG_88E("set case MP_THER\n");
+ rtw_mp_thermal(dev, info, wrqu, extra);
+ break;
+ case MP_QueryDrvStats:
+ DBG_88E("mp_get MP_QueryDrvStats\n");
+ rtw_mp_QueryDrv (dev, info, wdata, extra);
+ break;
+ case MP_PWRTRK:
+ DBG_88E("set case MP_PWRTRK\n");
+ rtw_mp_pwrtrk(dev, info, wrqu, extra);
+ break;
+ case EFUSE_SET:
+ DBG_88E("set case efuse set\n");
+ rtw_mp_efuse_set(dev, info, wdata, extra);
+ break;
+ }
+
+ rtw_msleep_os(10); /* delay 5ms for sending pkt before exit adb shell operation */
+ return 0;
+}
+
+static int rtw_tdls(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int rtw_tdls_get(struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ return 0;
+}
+
+static int rtw_test(
+ struct net_device *dev,
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
+{
+ u32 len;
+ u8 *pbuf, *pch;
+ char *ptmp;
+ u8 *delim = ",";
+
+ DBG_88E("+%s\n", __func__);
+ len = wrqu->data.length;
+
+ pbuf = (u8 *)rtw_zmalloc(len);
+ if (pbuf == NULL) {
+ DBG_88E("%s: no memory!\n", __func__);
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(pbuf, wrqu->data.pointer, len)) {
+ kfree(pbuf);
+ DBG_88E("%s: copy from user fail!\n", __func__);
+ return -EFAULT;
+ }
+ DBG_88E("%s: string =\"%s\"\n", __func__, pbuf);
+
+ ptmp = (char *)pbuf;
+ pch = strsep(&ptmp, delim);
+ if ((pch == NULL) || (strlen(pch) == 0)) {
+ kfree(pbuf);
+ DBG_88E("%s: parameter error(level 1)!\n", __func__);
+ return -EFAULT;
+ }
+ kfree(pbuf);
+ return 0;
+}
+
+static iw_handler rtw_handlers[] = {
+ NULL, /* SIOCSIWCOMMIT */
+ rtw_wx_get_name, /* SIOCGIWNAME */
+ dummy, /* SIOCSIWNWID */
+ dummy, /* SIOCGIWNWID */
+ rtw_wx_set_freq, /* SIOCSIWFREQ */
+ rtw_wx_get_freq, /* SIOCGIWFREQ */
+ rtw_wx_set_mode, /* SIOCSIWMODE */
+ rtw_wx_get_mode, /* SIOCGIWMODE */
+ dummy, /* SIOCSIWSENS */
+ rtw_wx_get_sens, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ rtw_wx_get_range, /* SIOCGIWRANGE */
+ rtw_wx_set_priv, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ dummy, /* SIOCSIWSPY */
+ dummy, /* SIOCGIWSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ NULL, /* SIOCWIWTHRSPY */
+ rtw_wx_set_wap, /* SIOCSIWAP */
+ rtw_wx_get_wap, /* SIOCGIWAP */
+ rtw_wx_set_mlme, /* request MLME operation; uses struct iw_mlme */
+ dummy, /* SIOCGIWAPLIST -- depricated */
+ rtw_wx_set_scan, /* SIOCSIWSCAN */
+ rtw_wx_get_scan, /* SIOCGIWSCAN */
+ rtw_wx_set_essid, /* SIOCSIWESSID */
+ rtw_wx_get_essid, /* SIOCGIWESSID */
+ dummy, /* SIOCSIWNICKN */
+ rtw_wx_get_nick, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ rtw_wx_set_rate, /* SIOCSIWRATE */
+ rtw_wx_get_rate, /* SIOCGIWRATE */
+ rtw_wx_set_rts, /* SIOCSIWRTS */
+ rtw_wx_get_rts, /* SIOCGIWRTS */
+ rtw_wx_set_frag, /* SIOCSIWFRAG */
+ rtw_wx_get_frag, /* SIOCGIWFRAG */
+ dummy, /* SIOCSIWTXPOW */
+ dummy, /* SIOCGIWTXPOW */
+ dummy, /* SIOCSIWRETRY */
+ rtw_wx_get_retry, /* SIOCGIWRETRY */
+ rtw_wx_set_enc, /* SIOCSIWENCODE */
+ rtw_wx_get_enc, /* SIOCGIWENCODE */
+ dummy, /* SIOCSIWPOWER */
+ rtw_wx_get_power, /* SIOCGIWPOWER */
+ NULL, /*---hole---*/
+ NULL, /*---hole---*/
+ rtw_wx_set_gen_ie, /* SIOCSIWGENIE */
+ NULL, /* SIOCGWGENIE */
+ rtw_wx_set_auth, /* SIOCSIWAUTH */
+ NULL, /* SIOCGIWAUTH */
+ rtw_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
+ NULL, /* SIOCGIWENCODEEXT */
+ rtw_wx_set_pmkid, /* SIOCSIWPMKSA */
+ NULL, /*---hole---*/
+};
+
+static const struct iw_priv_args rtw_private_args[] = {
+ {
+ SIOCIWFIRSTPRIV + 0x0,
+ IW_PRIV_TYPE_CHAR | 0x7FF, 0, "write"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x1,
+ IW_PRIV_TYPE_CHAR | 0x7FF,
+ IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "read"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x2, 0, 0, "driver_ext"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x3, 0, 0, "mp_ioctl"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x4,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "apinfo"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x5,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "setpid"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x6,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_start"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x7,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "get_sensitivity"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x8,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_prob_req_ie"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x9,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wps_assoc_req_ie"
+ },
+
+ {
+ SIOCIWFIRSTPRIV + 0xA,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "channel_plan"
+ },
+
+ {
+ SIOCIWFIRSTPRIV + 0xB,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "dbg"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xC,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3, 0, "rfw"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0xD,
+ IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "rfr"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x10,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, 0, "p2p_set"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x11,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "p2p_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x12,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IFNAMSIZ, "p2p_get2"
+ },
+ {SIOCIWFIRSTPRIV + 0x13, IW_PRIV_TYPE_CHAR | 128, 0, "NULL"},
+ {
+ SIOCIWFIRSTPRIV + 0x14,
+ IW_PRIV_TYPE_CHAR | 64, 0, "tdls"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x15,
+ IW_PRIV_TYPE_CHAR | P2P_PRIVATE_IOCTL_SET_LEN, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | P2P_PRIVATE_IOCTL_SET_LEN, "tdls_get"
+ },
+ {
+ SIOCIWFIRSTPRIV + 0x16,
+ IW_PRIV_TYPE_CHAR | 64, 0, "pm_set"
+ },
+
+ {SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
+
+ {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"},
+ {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
+ {SIOCIWFIRSTPRIV + 0x1D, IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
+ },
+
+ {SIOCIWFIRSTPRIV + 0x0E, IW_PRIV_TYPE_CHAR | 1024, 0, ""}, /* set */
+ {SIOCIWFIRSTPRIV + 0x0F, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, ""},/* get */
+/* --- sub-ioctls definitions --- */
+
+ {MP_START, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_start"}, /* set */
+ {MP_PHYPARA, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_phypara"},/* get */
+ {MP_STOP, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_stop"}, /* set */
+ {MP_CHANNEL, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_channel"},/* get */
+ {MP_BANDWIDTH, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_bandwidth"}, /* set */
+ {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},/* get */
+ {MP_RESET_STATS, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_reset_stats"},
+ {MP_QUERY, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_query"}, /* get */
+ {READ_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_reg"},
+ {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},
+ {READ_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_rf"},
+ {MP_PSD, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_psd"},
+ {MP_DUMP, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_dump"},
+ {MP_TXPOWER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_txpower"},
+ {MP_ANT_TX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_tx"},
+ {MP_ANT_RX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_rx"},
+ {WRITE_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_reg"},
+ {WRITE_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_rf"},
+ {MP_CTX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ctx"},
+ {MP_ARX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_arx"},
+ {MP_THER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ther"},
+ {EFUSE_SET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_set"},
+ {EFUSE_GET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
+ {MP_PWRTRK, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_pwrtrk"},
+ {MP_QueryDrvStats, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_drvquery"},
+ {MP_IOCTL, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_ioctl"}, /* mp_ioctl */
+ {MP_SetRFPathSwh, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_setrfpath"},
+ {CTA_TEST, IW_PRIV_TYPE_CHAR | 1024, 0, "cta_test"},
+};
+
+static iw_handler rtw_private_handler[] = {
+rtw_wx_write32, /* 0x00 */
+rtw_wx_read32, /* 0x01 */
+rtw_drvext_hdl, /* 0x02 */
+rtw_mp_ioctl_hdl, /* 0x03 */
+
+/* for MM DTV platform */
+ rtw_get_ap_info, /* 0x04 */
+
+ rtw_set_pid, /* 0x05 */
+ rtw_wps_start, /* 0x06 */
+
+ rtw_wx_get_sensitivity, /* 0x07 */
+ rtw_wx_set_mtk_wps_probe_ie, /* 0x08 */
+ rtw_wx_set_mtk_wps_ie, /* 0x09 */
+
+/* Set Channel depend on the country code */
+ rtw_wx_set_channel_plan, /* 0x0A */
+
+ rtw_dbg_port, /* 0x0B */
+ rtw_wx_write_rf, /* 0x0C */
+ rtw_wx_read_rf, /* 0x0D */
+
+ rtw_mp_set, /* 0x0E */
+ rtw_mp_get, /* 0x0F */
+ rtw_p2p_set, /* 0x10 */
+ rtw_p2p_get, /* 0x11 */
+ rtw_p2p_get2, /* 0x12 */
+
+ NULL, /* 0x13 */
+ rtw_tdls, /* 0x14 */
+ rtw_tdls_get, /* 0x15 */
+
+ rtw_pm_set, /* 0x16 */
+ rtw_wx_priv_null, /* 0x17 */
+ rtw_rereg_nd_name, /* 0x18 */
+ rtw_wx_priv_null, /* 0x19 */
+
+ rtw_mp_efuse_set, /* 0x1A */
+ rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1C is reserved for hostapd */
+ rtw_test, /* 0x1D */
+};
+
+static struct iw_statistics *rtw_get_wireless_stats(struct net_device *dev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
+ struct iw_statistics *piwstats = &padapter->iwstats;
+ int tmp_level = 0;
+ int tmp_qual = 0;
+ int tmp_noise = 0;
+
+ if (!check_fwstate(&padapter->mlmepriv, _FW_LINKED)) {
+ piwstats->qual.qual = 0;
+ piwstats->qual.level = 0;
+ piwstats->qual.noise = 0;
+ } else {
+ tmp_level = padapter->recvpriv.signal_strength;
+ tmp_qual = padapter->recvpriv.signal_qual;
+ tmp_noise = padapter->recvpriv.noise;
+
+ piwstats->qual.level = tmp_level;
+ piwstats->qual.qual = tmp_qual;
+ piwstats->qual.noise = tmp_noise;
+ }
+ piwstats->qual.updated = IW_QUAL_ALL_UPDATED;/* IW_QUAL_DBM; */
+ return &padapter->iwstats;
+}
+
+struct iw_handler_def rtw_handlers_def = {
+ .standard = rtw_handlers,
+ .num_standard = sizeof(rtw_handlers) / sizeof(iw_handler),
+ .private = rtw_private_handler,
+ .private_args = (struct iw_priv_args *)rtw_private_args,
+ .num_private = sizeof(rtw_private_handler) / sizeof(iw_handler),
+ .num_private_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args),
+ .get_wireless_stats = rtw_get_wireless_stats,
+};
+
+/* copy from net/wireless/wext.c start */
+/* ---------------------------------------------------------------- */
+/*
+ * Calculate size of private arguments
+ */
+static const char iw_priv_type_size[] = {
+ 0, /* IW_PRIV_TYPE_NONE */
+ 1, /* IW_PRIV_TYPE_BYTE */
+ 1, /* IW_PRIV_TYPE_CHAR */
+ 0, /* Not defined */
+ sizeof(__u32), /* IW_PRIV_TYPE_INT */
+ sizeof(struct iw_freq), /* IW_PRIV_TYPE_FLOAT */
+ sizeof(struct sockaddr), /* IW_PRIV_TYPE_ADDR */
+ 0, /* Not defined */
+};
+
+static int get_priv_size(__u16 args)
+{
+ int num = args & IW_PRIV_SIZE_MASK;
+ int type = (args & IW_PRIV_TYPE_MASK) >> 12;
+
+ return num * iw_priv_type_size[type];
+}
+/* copy from net/wireless/wext.c end */
+
+static int rtw_ioctl_wext_private(struct net_device *dev, union iwreq_data *wrq_data)
+{
+ int err = 0;
+ u8 *input = NULL;
+ u32 input_len = 0;
+ const char delim[] = " ";
+ u8 *output = NULL;
+ u32 output_len = 0;
+ u32 count = 0;
+ u8 *buffer = NULL;
+ u32 buffer_len = 0;
+ char *ptr = NULL;
+ u8 cmdname[17] = {0}; /* IFNAMSIZ+1 */
+ u32 cmdlen;
+ s32 len;
+ u8 *extra = NULL;
+ u32 extra_size = 0;
+
+ s32 k;
+ const iw_handler *priv; /* Private ioctl */
+ const struct iw_priv_args *priv_args; /* Private ioctl description */
+ u32 num_priv_args; /* Number of descriptions */
+ iw_handler handler;
+ int temp;
+ int subcmd = 0; /* sub-ioctl index */
+ int offset = 0; /* Space for sub-ioctl index */
+
+ union iwreq_data wdata;
+
+ memcpy(&wdata, wrq_data, sizeof(wdata));
+
+ input_len = wdata.data.length;
+ input = rtw_zmalloc(input_len);
+ if (NULL == input)
+ return -ENOMEM;
+ if (copy_from_user(input, wdata.data.pointer, input_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ ptr = input;
+ len = input_len;
+
+ sscanf(ptr, "%16s", cmdname);
+ cmdlen = strlen(cmdname);
+ DBG_88E("%s: cmd =%s\n", __func__, cmdname);
+
+ /* skip command string */
+ if (cmdlen > 0)
+ cmdlen += 1; /* skip one space */
+ ptr += cmdlen;
+ len -= cmdlen;
+ DBG_88E("%s: parameters =%s\n", __func__, ptr);
+
+ priv = rtw_private_handler;
+ priv_args = rtw_private_args;
+ num_priv_args = sizeof(rtw_private_args) / sizeof(struct iw_priv_args);
+
+ if (num_priv_args == 0) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Search the correct ioctl */
+ k = -1;
+ while ((++k < num_priv_args) && strcmp(priv_args[k].name, cmdname));
+
+ /* If not found... */
+ if (k == num_priv_args) {
+ err = -EOPNOTSUPP;
+ goto exit;
+ }
+
+ /* Watch out for sub-ioctls ! */
+ if (priv_args[k].cmd < SIOCDEVPRIVATE) {
+ int j = -1;
+
+ /* Find the matching *real* ioctl */
+ while ((++j < num_priv_args) && ((priv_args[j].name[0] != '\0') ||
+ (priv_args[j].set_args != priv_args[k].set_args) ||
+ (priv_args[j].get_args != priv_args[k].get_args)));
+
+ /* If not found... */
+ if (j == num_priv_args) {
+ err = -EINVAL;
+ goto exit;
+ }
+
+ /* Save sub-ioctl number */
+ subcmd = priv_args[k].cmd;
+ /* Reserve one int (simplify alignment issues) */
+ offset = sizeof(__u32);
+ /* Use real ioctl definition from now on */
+ k = j;
+ }
+
+ buffer = rtw_zmalloc(4096);
+ if (NULL == buffer) {
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ /* If we have to set some data */
+ if ((priv_args[k].set_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].set_args & IW_PRIV_SIZE_MASK)) {
+ u8 *str;
+
+ switch (priv_args[k].set_args & IW_PRIV_TYPE_MASK) {
+ case IW_PRIV_TYPE_BYTE:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str)
+ break;
+ sscanf(str, "%i", &temp);
+ buffer[count++] = (u8)temp;
+ } while (1);
+ buffer_len = count;
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+ break;
+ case IW_PRIV_TYPE_INT:
+ /* Fetch args */
+ count = 0;
+ do {
+ str = strsep(&ptr, delim);
+ if (NULL == str)
+ break;
+ sscanf(str, "%i", &temp);
+ ((s32 *)buffer)[count++] = (s32)temp;
+ } while (1);
+ buffer_len = count * sizeof(s32);
+ /* Number of args to fetch */
+ wdata.data.length = count;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+ break;
+ case IW_PRIV_TYPE_CHAR:
+ if (len > 0) {
+ /* Size of the string to fetch */
+ wdata.data.length = len;
+ if (wdata.data.length > (priv_args[k].set_args & IW_PRIV_SIZE_MASK))
+ wdata.data.length = priv_args[k].set_args & IW_PRIV_SIZE_MASK;
+
+ /* Fetch string */
+ memcpy(buffer, ptr, wdata.data.length);
+ } else {
+ wdata.data.length = 1;
+ buffer[0] = '\0';
+ }
+ buffer_len = wdata.data.length;
+ break;
+ default:
+ DBG_88E("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ (wdata.data.length != (priv_args[k].set_args & IW_PRIV_SIZE_MASK))) {
+ DBG_88E("%s: The command %s needs exactly %d argument(s)...\n",
+ __func__, cmdname, priv_args[k].set_args & IW_PRIV_SIZE_MASK);
+ err = -EINVAL;
+ goto exit;
+ }
+ } else {
+ /* if args to set */
+ wdata.data.length = 0L;
+ }
+
+ /* Those two tests are important. They define how the driver
+ * will have to handle the data */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((get_priv_size(priv_args[k].set_args) + offset) <= IFNAMSIZ)) {
+ /* First case : all SET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ memcpy(wdata.name + offset, buffer, IFNAMSIZ - offset);
+ } else {
+ if ((priv_args[k].set_args == 0) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ)) {
+ /* Second case : no SET args, GET args fit within wrq */
+ if (offset)
+ wdata.mode = subcmd;
+ } else {
+ /* Third case : args won't fit in wrq, or variable number of args */
+ if (copy_to_user(wdata.data.pointer, buffer, buffer_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ wdata.data.flags = subcmd;
+ }
+ }
+
+ kfree(input);
+ input = NULL;
+
+ extra_size = 0;
+ if (IW_IS_SET(priv_args[k].cmd)) {
+ /* Size of set arguments */
+ extra_size = get_priv_size(priv_args[k].set_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].set_args & IW_PRIV_SIZE_FIXED) &&
+ ((extra_size + offset) <= IFNAMSIZ))
+ extra_size = 0;
+ } else {
+ /* Size of get arguments */
+ extra_size = get_priv_size(priv_args[k].get_args);
+
+ /* Does it fits in iwr ? */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (extra_size <= IFNAMSIZ))
+ extra_size = 0;
+ }
+
+ if (extra_size == 0) {
+ extra = (u8 *)&wdata;
+ kfree(buffer);
+ buffer = NULL;
+ } else {
+ extra = buffer;
+ }
+
+ handler = priv[priv_args[k].cmd - SIOCIWFIRSTPRIV];
+ err = handler(dev, NULL, &wdata, extra);
+
+ /* If we have to get some data */
+ if ((priv_args[k].get_args & IW_PRIV_TYPE_MASK) &&
+ (priv_args[k].get_args & IW_PRIV_SIZE_MASK)) {
+ int j;
+ int n = 0; /* number of args */
+ u8 str[20] = {0};
+
+ /* Check where is the returned data */
+ if ((priv_args[k].get_args & IW_PRIV_SIZE_FIXED) &&
+ (get_priv_size(priv_args[k].get_args) <= IFNAMSIZ))
+ n = priv_args[k].get_args & IW_PRIV_SIZE_MASK;
+ else
+ n = wdata.data.length;
+
+ output = rtw_zmalloc(4096);
+ if (NULL == output) {
+ err = -ENOMEM;
+ goto exit;
+ }
+ switch (priv_args[k].get_args & IW_PRIV_TYPE_MASK) {
+ case IW_PRIV_TYPE_BYTE:
+ /* Display args */
+ for (j = 0; j < n; j++) {
+ sprintf(str, "%d ", extra[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+ case IW_PRIV_TYPE_INT:
+ /* Display args */
+ for (j = 0; j < n; j++) {
+ sprintf(str, "%d ", ((__s32 *)extra)[j]);
+ len = strlen(str);
+ output_len = strlen(output);
+ if ((output_len + len + 1) > 4096) {
+ err = -E2BIG;
+ goto exit;
+ }
+ memcpy(output+output_len, str, len);
+ }
+ break;
+ case IW_PRIV_TYPE_CHAR:
+ /* Display args */
+ memcpy(output, extra, n);
+ break;
+ default:
+ DBG_88E("%s: Not yet implemented...\n", __func__);
+ err = -1;
+ goto exit;
+ }
+
+ output_len = strlen(output) + 1;
+ wrq_data->data.length = output_len;
+ if (copy_to_user(wrq_data->data.pointer, output, output_len)) {
+ err = -EFAULT;
+ goto exit;
+ }
+ } else {
+ /* if args to set */
+ wrq_data->data.length = 0;
+ }
+
+exit:
+ kfree(input);
+ kfree(buffer);
+ kfree(output);
+ return err;
+}
+
+#include <rtw_android.h>
+int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct iwreq *wrq = (struct iwreq *)rq;
+ int ret = 0;
+
+ switch (cmd) {
+ case RTL_IOCTL_WPA_SUPPLICANT:
+ ret = wpa_supplicant_ioctl(dev, &wrq->u.data);
+ break;
+#ifdef CONFIG_88EU_AP_MODE
+ case RTL_IOCTL_HOSTAPD:
+ ret = rtw_hostapd_ioctl(dev, &wrq->u.data);
+ break;
+#endif /* CONFIG_88EU_AP_MODE */
+ case SIOCDEVPRIVATE:
+ ret = rtw_ioctl_wext_private(dev, &wrq->u);
+ break;
+ case (SIOCDEVPRIVATE+1):
+ ret = rtw_android_priv_cmd(dev, rq, cmd);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
new file mode 100644
index 00000000000..57d1ff750d5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -0,0 +1,246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _MLME_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <mlme_osdep.h>
+
+void rtw_join_timeout_handler (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ _rtw_join_timeout_handler(adapter);
+}
+
+
+void _rtw_scan_timeout_handler (void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ rtw_scan_timeout_handler(adapter);
+}
+
+static void _dynamic_check_timer_handlder(void *FunctionContext)
+{
+ struct adapter *adapter = (struct adapter *)FunctionContext;
+
+ if (adapter->registrypriv.mp_mode == 1)
+ return;
+ rtw_dynamic_check_timer_handlder(adapter);
+ _set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
+}
+
+void rtw_init_mlme_timer(struct adapter *padapter)
+{
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ _init_timer(&(pmlmepriv->assoc_timer), padapter->pnetdev, rtw_join_timeout_handler, padapter);
+ _init_timer(&(pmlmepriv->scan_to_timer), padapter->pnetdev, _rtw_scan_timeout_handler, padapter);
+ _init_timer(&(pmlmepriv->dynamic_chk_timer), padapter->pnetdev, _dynamic_check_timer_handlder, padapter);
+}
+
+void rtw_os_indicate_connect(struct adapter *adapter)
+{
+_func_enter_;
+ rtw_indicate_wx_assoc_event(adapter);
+ netif_carrier_on(adapter->pnetdev);
+ if (adapter->pid[2] != 0)
+ rtw_signal_process(adapter->pid[2], SIGALRM);
+_func_exit_;
+}
+
+void rtw_os_indicate_scan_done(struct adapter *padapter, bool aborted)
+{
+ indicate_wx_scan_complete_event(padapter);
+}
+
+static struct rt_pmkid_list backup_pmkid[NUM_PMKID_CACHE];
+
+void rtw_reset_securitypriv(struct adapter *adapter)
+{
+ u8 backup_index = 0;
+ u8 backup_counter = 0x00;
+ u32 backup_time = 0;
+
+ if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
+ /* 802.1x */
+ /* We have to backup the PMK information for WiFi PMK Caching test item. */
+ /* Backup the btkip_countermeasure information. */
+ /* When the countermeasure is trigger, the driver have to disconnect with AP for 60 seconds. */
+ _rtw_memset(&backup_pmkid[0], 0x00, sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ memcpy(&backup_pmkid[0], &adapter->securitypriv.PMKIDList[0], sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ backup_index = adapter->securitypriv.PMKIDIndex;
+ backup_counter = adapter->securitypriv.btkip_countermeasure;
+ backup_time = adapter->securitypriv.btkip_countermeasure_time;
+ _rtw_memset((unsigned char *)&adapter->securitypriv, 0, sizeof(struct security_priv));
+
+ /* Restore the PMK information to securitypriv structure for the following connection. */
+ memcpy(&adapter->securitypriv.PMKIDList[0],
+ &backup_pmkid[0],
+ sizeof(struct rt_pmkid_list) * NUM_PMKID_CACHE);
+ adapter->securitypriv.PMKIDIndex = backup_index;
+ adapter->securitypriv.btkip_countermeasure = backup_counter;
+ adapter->securitypriv.btkip_countermeasure_time = backup_time;
+ adapter->securitypriv.ndisauthtype = Ndis802_11AuthModeOpen;
+ adapter->securitypriv.ndisencryptstatus = Ndis802_11WEPDisabled;
+ } else {
+ /* reset values in securitypriv */
+ struct security_priv *psec_priv = &adapter->securitypriv;
+
+ psec_priv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psec_priv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psec_priv->dot11PrivacyKeyIndex = 0;
+ psec_priv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psec_priv->dot118021XGrpKeyid = 1;
+ psec_priv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psec_priv->ndisencryptstatus = Ndis802_11WEPDisabled;
+ }
+}
+
+void rtw_os_indicate_disconnect(struct adapter *adapter)
+{
+_func_enter_;
+ netif_carrier_off(adapter->pnetdev); /* Do it first for tx broadcast pkt after disconnection issue! */
+ rtw_indicate_wx_disassoc_event(adapter);
+ rtw_reset_securitypriv(adapter);
+_func_exit_;
+}
+
+void rtw_report_sec_ie(struct adapter *adapter, u8 authmode, u8 *sec_ie)
+{
+ uint len;
+ u8 *buff, *p, i;
+ union iwreq_data wrqu;
+
+_func_enter_;
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("+rtw_report_sec_ie, authmode=%d\n", authmode));
+ buff = NULL;
+ if (authmode == _WPA_IE_ID_) {
+ RT_TRACE(_module_mlme_osdep_c_, _drv_info_,
+ ("rtw_report_sec_ie, authmode=%d\n", authmode));
+ buff = rtw_malloc(IW_CUSTOM_MAX);
+ if (!buff)
+ goto exit;
+ _rtw_memset(buff, 0, IW_CUSTOM_MAX);
+ p = buff;
+ p += sprintf(p, "ASSOCINFO(ReqIEs =");
+ len = sec_ie[1]+2;
+ len = (len < IW_CUSTOM_MAX) ? len : IW_CUSTOM_MAX;
+ for (i = 0; i < len; i++)
+ p += sprintf(p, "%02x", sec_ie[i]);
+ p += sprintf(p, ")");
+ _rtw_memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = p-buff;
+ wrqu.data.length = (wrqu.data.length < IW_CUSTOM_MAX) ?
+ wrqu.data.length : IW_CUSTOM_MAX;
+ wireless_send_event(adapter->pnetdev, IWEVCUSTOM, &wrqu, buff);
+ kfree(buff);
+ }
+exit:
+_func_exit_;
+}
+
+static void _survey_timer_hdl(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+
+ survey_timer_hdl(padapter);
+}
+
+static void _link_timer_hdl(void *FunctionContext)
+{
+ struct adapter *padapter = (struct adapter *)FunctionContext;
+ link_timer_hdl(padapter);
+}
+
+static void _addba_timer_hdl(void *FunctionContext)
+{
+ struct sta_info *psta = (struct sta_info *)FunctionContext;
+ addba_timer_hdl(psta);
+}
+
+void init_addba_retry_timer(struct adapter *padapter, struct sta_info *psta)
+{
+ _init_timer(&psta->addba_retry_timer, padapter->pnetdev, _addba_timer_hdl, psta);
+}
+
+void init_mlme_ext_timer(struct adapter *padapter)
+{
+ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
+
+ _init_timer(&pmlmeext->survey_timer, padapter->pnetdev, _survey_timer_hdl, padapter);
+ _init_timer(&pmlmeext->link_timer, padapter->pnetdev, _link_timer_hdl, padapter);
+}
+
+#ifdef CONFIG_88EU_AP_MODE
+
+void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_88E("+rtw_indicate_sta_assoc_event\n");
+
+ wireless_send_event(padapter->pnetdev, IWEVREGISTERED, &wrqu, NULL);
+}
+
+void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *psta)
+{
+ union iwreq_data wrqu;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+
+ if (psta == NULL)
+ return;
+
+ if (psta->aid > NUM_STA)
+ return;
+
+ if (pstapriv->sta_aid[psta->aid - 1] != psta)
+ return;
+
+
+ wrqu.addr.sa_family = ARPHRD_ETHER;
+
+ memcpy(wrqu.addr.sa_data, psta->hwaddr, ETH_ALEN);
+
+ DBG_88E("+rtw_indicate_sta_disassoc_event\n");
+
+ wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
+}
+
+#endif
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
new file mode 100644
index 00000000000..63bc913eba6
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -0,0 +1,1251 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _OS_INTFS_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <xmit_osdep.h>
+#include <recv_osdep.h>
+#include <hal_intf.h>
+#include <rtw_ioctl.h>
+#include <rtw_version.h>
+
+#include <usb_osintf.h>
+#include <usb_hal.h>
+#include <rtw_br_ext.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
+MODULE_AUTHOR("Realtek Semiconductor Corp.");
+MODULE_VERSION(DRIVERVERSION);
+
+#define CONFIG_BR_EXT_BRNAME "br0"
+#define RTW_NOTCH_FILTER 0 /* 0:Disable, 1:Enable, */
+
+/* module param defaults */
+static int rtw_chip_version = 0x00;
+static int rtw_rfintfs = HWPI;
+static int rtw_lbkmode;/* RTL8712_AIR_TRX; */
+static int rtw_network_mode = Ndis802_11IBSS;/* Ndis802_11Infrastructure; infra, ad-hoc, auto */
+static int rtw_channel = 1;/* ad-hoc support requirement */
+static int rtw_wireless_mode = WIRELESS_11BG_24N;
+static int rtw_vrtl_carrier_sense = AUTO_VCS;
+static int rtw_vcs_type = RTS_CTS;/* */
+static int rtw_rts_thresh = 2347;/* */
+static int rtw_frag_thresh = 2346;/* */
+static int rtw_preamble = PREAMBLE_LONG;/* long, short, auto */
+static int rtw_scan_mode = 1;/* active, passive */
+static int rtw_adhoc_tx_pwr = 1;
+static int rtw_soft_ap;
+static int rtw_power_mgnt = 1;
+static int rtw_ips_mode = IPS_NORMAL;
+
+static int rtw_smart_ps = 2;
+
+module_param(rtw_ips_mode, int, 0644);
+MODULE_PARM_DESC(rtw_ips_mode, "The default IPS mode");
+
+static int rtw_debug = 1;
+static int rtw_radio_enable = 1;
+static int rtw_long_retry_lmt = 7;
+static int rtw_short_retry_lmt = 7;
+static int rtw_busy_thresh = 40;
+static int rtw_ack_policy = NORMAL_ACK;
+
+static int rtw_mp_mode;
+
+static int rtw_software_encrypt;
+static int rtw_software_decrypt;
+
+static int rtw_acm_method;/* 0:By SW 1:By HW. */
+
+static int rtw_wmm_enable = 1;/* default is set to enable the wmm. */
+static int rtw_uapsd_enable;
+static int rtw_uapsd_max_sp = NO_LIMIT;
+static int rtw_uapsd_acbk_en;
+static int rtw_uapsd_acbe_en;
+static int rtw_uapsd_acvi_en;
+static int rtw_uapsd_acvo_en;
+
+int rtw_ht_enable = 1;
+int rtw_cbw40_enable = 3; /* 0 :diable, bit(0): enable 2.4g, bit(1): enable 5g */
+int rtw_ampdu_enable = 1;/* for enable tx_ampdu */
+static int rtw_rx_stbc = 1;/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
+static int rtw_ampdu_amsdu;/* 0: disabled, 1:enabled, 2:auto */
+
+static int rtw_lowrate_two_xmit = 1;/* Use 2 path Tx to transmit MCS0~7 and legacy mode */
+
+static int rtw_rf_config = RF_819X_MAX_TYPE; /* auto */
+static int rtw_low_power;
+static int rtw_wifi_spec;
+static int rtw_channel_plan = RT_CHANNEL_DOMAIN_MAX;
+static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's Add BA req. */
+
+static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
+static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
+
+static int rtw_enusbss;/* 0:disable, 1:enable */
+
+static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
+
+static int rtw_hwpwrp_detect; /* HW power ping detect 0:disable , 1:enable */
+
+static int rtw_hw_wps_pbc = 1;
+
+int rtw_mc2u_disable;
+
+static int rtw_80211d;
+
+static char *ifname = "wlan%d";
+module_param(ifname, charp, 0644);
+MODULE_PARM_DESC(ifname, "The default name to allocate for first interface");
+
+static char *if2name = "wlan%d";
+module_param(if2name, charp, 0644);
+MODULE_PARM_DESC(if2name, "The default name to allocate for second interface");
+
+char *rtw_initmac; /* temp mac address if users want to use instead of the mac address in Efuse */
+
+module_param(rtw_initmac, charp, 0644);
+module_param(rtw_channel_plan, int, 0644);
+module_param(rtw_chip_version, int, 0644);
+module_param(rtw_rfintfs, int, 0644);
+module_param(rtw_lbkmode, int, 0644);
+module_param(rtw_network_mode, int, 0644);
+module_param(rtw_channel, int, 0644);
+module_param(rtw_mp_mode, int, 0644);
+module_param(rtw_wmm_enable, int, 0644);
+module_param(rtw_vrtl_carrier_sense, int, 0644);
+module_param(rtw_vcs_type, int, 0644);
+module_param(rtw_busy_thresh, int, 0644);
+module_param(rtw_ht_enable, int, 0644);
+module_param(rtw_cbw40_enable, int, 0644);
+module_param(rtw_ampdu_enable, int, 0644);
+module_param(rtw_rx_stbc, int, 0644);
+module_param(rtw_ampdu_amsdu, int, 0644);
+module_param(rtw_lowrate_two_xmit, int, 0644);
+module_param(rtw_rf_config, int, 0644);
+module_param(rtw_power_mgnt, int, 0644);
+module_param(rtw_smart_ps, int, 0644);
+module_param(rtw_low_power, int, 0644);
+module_param(rtw_wifi_spec, int, 0644);
+module_param(rtw_antdiv_cfg, int, 0644);
+module_param(rtw_antdiv_type, int, 0644);
+module_param(rtw_enusbss, int, 0644);
+module_param(rtw_hwpdn_mode, int, 0644);
+module_param(rtw_hwpwrp_detect, int, 0644);
+module_param(rtw_hw_wps_pbc, int, 0644);
+
+static uint rtw_max_roaming_times = 2;
+module_param(rtw_max_roaming_times, uint, 0644);
+MODULE_PARM_DESC(rtw_max_roaming_times, "The max roaming times to try");
+
+static int rtw_fw_iol = 1;/* 0:Disable, 1:enable, 2:by usb speed */
+module_param(rtw_fw_iol, int, 0644);
+MODULE_PARM_DESC(rtw_fw_iol, "FW IOL");
+
+module_param(rtw_mc2u_disable, int, 0644);
+
+module_param(rtw_80211d, int, 0644);
+MODULE_PARM_DESC(rtw_80211d, "Enable 802.11d mechanism");
+
+static uint rtw_notch_filter = RTW_NOTCH_FILTER;
+module_param(rtw_notch_filter, uint, 0644);
+MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P");
+module_param_named(debug, rtw_debug, int, 0444);
+MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
+
+/* dummy routines */
+void rtw_proc_remove_one(struct net_device *dev)
+{
+}
+
+void rtw_proc_init_one(struct net_device *dev)
+{
+}
+
+#if 0 /* TODO: Convert these to /sys */
+void rtw_proc_init_one(struct net_device *dev)
+{
+ struct proc_dir_entry *dir_dev = NULL;
+ struct proc_dir_entry *entry = NULL;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ u8 rf_type;
+
+ if (rtw_proc == NULL) {
+ memcpy(rtw_proc_name, DRV_NAME, sizeof(DRV_NAME));
+
+ rtw_proc = create_proc_entry(rtw_proc_name, S_IFDIR, init_net.proc_net);
+ if (rtw_proc == NULL) {
+ DBG_88E(KERN_ERR "Unable to create rtw_proc directory\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ver_info", S_IFREG | S_IRUGO, rtw_proc, proc_get_drv_version, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ }
+
+ if (padapter->dir_dev == NULL) {
+ padapter->dir_dev = create_proc_entry(dev->name,
+ S_IFDIR | S_IRUGO | S_IXUGO,
+ rtw_proc);
+ dir_dev = padapter->dir_dev;
+ if (dir_dev == NULL) {
+ if (rtw_proc_cnt == 0) {
+ if (rtw_proc) {
+ remove_proc_entry(rtw_proc_name, init_net.proc_net);
+ rtw_proc = NULL;
+ }
+ }
+
+ pr_info("Unable to create dir_dev directory\n");
+ return;
+ }
+ } else {
+ return;
+ }
+
+ rtw_proc_cnt++;
+
+ entry = create_proc_read_entry("write_reg", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_write_reg, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_write_reg;
+
+ entry = create_proc_read_entry("read_reg", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_read_reg, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_read_reg;
+
+
+ entry = create_proc_read_entry("fwstate", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_fwstate, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("sec_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_sec_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mlmext_state", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mlmext_state, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("qos_option", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_qos_option, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ht_option", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ht_option, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("ap_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ap_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("adapter_state", S_IFREG | S_IRUGO,
+ dir_dev, proc_getstruct adapter_state, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("trx_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_trx_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("mac_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_mac_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("bb_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_bb_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump1", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump1, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump2", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump2, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
+ entry = create_proc_read_entry("rf_reg_dump3", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump3, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rf_reg_dump4", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rf_reg_dump4, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ }
+
+#ifdef CONFIG_88EU_AP_MODE
+
+ entry = create_proc_read_entry("all_sta_info", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_all_sta_info, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+#endif
+
+ entry = create_proc_read_entry("best_channel", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_best_channel, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+
+ entry = create_proc_read_entry("rx_signal", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rx_signal, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rx_signal;
+ entry = create_proc_read_entry("ht_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ht_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_ht_enable;
+
+ entry = create_proc_read_entry("cbw40_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_cbw40_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_cbw40_enable;
+
+ entry = create_proc_read_entry("ampdu_enable", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_ampdu_enable, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_ampdu_enable;
+
+ entry = create_proc_read_entry("rx_stbc", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rx_stbc, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rx_stbc;
+
+ entry = create_proc_read_entry("path_rssi", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_two_path_rssi, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry = create_proc_read_entry("rssi_disp", S_IFREG | S_IRUGO,
+ dir_dev, proc_get_rssi_disp, dev);
+ if (!entry) {
+ pr_info("Unable to create_proc_read_entry!\n");
+ return;
+ }
+ entry->write_proc = proc_set_rssi_disp;
+}
+
+void rtw_proc_remove_one(struct net_device *dev)
+{
+ struct proc_dir_entry *dir_dev = NULL;
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ u8 rf_type;
+
+ dir_dev = padapter->dir_dev;
+ padapter->dir_dev = NULL;
+
+ if (dir_dev) {
+ remove_proc_entry("write_reg", dir_dev);
+ remove_proc_entry("read_reg", dir_dev);
+ remove_proc_entry("fwstate", dir_dev);
+ remove_proc_entry("sec_info", dir_dev);
+ remove_proc_entry("mlmext_state", dir_dev);
+ remove_proc_entry("qos_option", dir_dev);
+ remove_proc_entry("ht_option", dir_dev);
+ remove_proc_entry("rf_info", dir_dev);
+ remove_proc_entry("ap_info", dir_dev);
+ remove_proc_entry("adapter_state", dir_dev);
+ remove_proc_entry("trx_info", dir_dev);
+ remove_proc_entry("mac_reg_dump1", dir_dev);
+ remove_proc_entry("mac_reg_dump2", dir_dev);
+ remove_proc_entry("mac_reg_dump3", dir_dev);
+ remove_proc_entry("bb_reg_dump1", dir_dev);
+ remove_proc_entry("bb_reg_dump2", dir_dev);
+ remove_proc_entry("bb_reg_dump3", dir_dev);
+ remove_proc_entry("rf_reg_dump1", dir_dev);
+ remove_proc_entry("rf_reg_dump2", dir_dev);
+ rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
+ remove_proc_entry("rf_reg_dump3", dir_dev);
+ remove_proc_entry("rf_reg_dump4", dir_dev);
+ }
+#ifdef CONFIG_88EU_AP_MODE
+ remove_proc_entry("all_sta_info", dir_dev);
+#endif
+
+ remove_proc_entry("best_channel", dir_dev);
+ remove_proc_entry("rx_signal", dir_dev);
+ remove_proc_entry("cbw40_enable", dir_dev);
+ remove_proc_entry("ht_enable", dir_dev);
+ remove_proc_entry("ampdu_enable", dir_dev);
+ remove_proc_entry("rx_stbc", dir_dev);
+ remove_proc_entry("path_rssi", dir_dev);
+ remove_proc_entry("rssi_disp", dir_dev);
+ remove_proc_entry(dev->name, rtw_proc);
+ dir_dev = NULL;
+ } else {
+ return;
+ }
+ rtw_proc_cnt--;
+
+ if (rtw_proc_cnt == 0) {
+ if (rtw_proc) {
+ remove_proc_entry("ver_info", rtw_proc);
+
+ remove_proc_entry(rtw_proc_name, init_net.proc_net);
+ rtw_proc = NULL;
+ }
+ }
+}
+#endif
+
+static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
+{
+ uint status = _SUCCESS;
+ struct registry_priv *registry_par = &padapter->registrypriv;
+
+_func_enter_;
+
+ GlobalDebugLevel = rtw_debug;
+ registry_par->chip_version = (u8)rtw_chip_version;
+ registry_par->rfintfs = (u8)rtw_rfintfs;
+ registry_par->lbkmode = (u8)rtw_lbkmode;
+ registry_par->network_mode = (u8)rtw_network_mode;
+
+ memcpy(registry_par->ssid.Ssid, "ANY", 3);
+ registry_par->ssid.SsidLength = 3;
+
+ registry_par->channel = (u8)rtw_channel;
+ registry_par->wireless_mode = (u8)rtw_wireless_mode;
+ registry_par->vrtl_carrier_sense = (u8)rtw_vrtl_carrier_sense ;
+ registry_par->vcs_type = (u8)rtw_vcs_type;
+ registry_par->rts_thresh = (u16)rtw_rts_thresh;
+ registry_par->frag_thresh = (u16)rtw_frag_thresh;
+ registry_par->preamble = (u8)rtw_preamble;
+ registry_par->scan_mode = (u8)rtw_scan_mode;
+ registry_par->adhoc_tx_pwr = (u8)rtw_adhoc_tx_pwr;
+ registry_par->soft_ap = (u8)rtw_soft_ap;
+ registry_par->smart_ps = (u8)rtw_smart_ps;
+ registry_par->power_mgnt = (u8)rtw_power_mgnt;
+ registry_par->ips_mode = (u8)rtw_ips_mode;
+ registry_par->radio_enable = (u8)rtw_radio_enable;
+ registry_par->long_retry_lmt = (u8)rtw_long_retry_lmt;
+ registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
+ registry_par->busy_thresh = (u16)rtw_busy_thresh;
+ registry_par->ack_policy = (u8)rtw_ack_policy;
+ registry_par->mp_mode = (u8)rtw_mp_mode;
+ registry_par->software_encrypt = (u8)rtw_software_encrypt;
+ registry_par->software_decrypt = (u8)rtw_software_decrypt;
+ registry_par->acm_method = (u8)rtw_acm_method;
+
+ /* UAPSD */
+ registry_par->wmm_enable = (u8)rtw_wmm_enable;
+ registry_par->uapsd_enable = (u8)rtw_uapsd_enable;
+ registry_par->uapsd_max_sp = (u8)rtw_uapsd_max_sp;
+ registry_par->uapsd_acbk_en = (u8)rtw_uapsd_acbk_en;
+ registry_par->uapsd_acbe_en = (u8)rtw_uapsd_acbe_en;
+ registry_par->uapsd_acvi_en = (u8)rtw_uapsd_acvi_en;
+ registry_par->uapsd_acvo_en = (u8)rtw_uapsd_acvo_en;
+
+ registry_par->ht_enable = (u8)rtw_ht_enable;
+ registry_par->cbw40_enable = (u8)rtw_cbw40_enable;
+ registry_par->ampdu_enable = (u8)rtw_ampdu_enable;
+ registry_par->rx_stbc = (u8)rtw_rx_stbc;
+ registry_par->ampdu_amsdu = (u8)rtw_ampdu_amsdu;
+ registry_par->lowrate_two_xmit = (u8)rtw_lowrate_two_xmit;
+ registry_par->rf_config = (u8)rtw_rf_config;
+ registry_par->low_power = (u8)rtw_low_power;
+ registry_par->wifi_spec = (u8)rtw_wifi_spec;
+ registry_par->channel_plan = (u8)rtw_channel_plan;
+ registry_par->bAcceptAddbaReq = (u8)rtw_AcceptAddbaReq;
+ registry_par->antdiv_cfg = (u8)rtw_antdiv_cfg;
+ registry_par->antdiv_type = (u8)rtw_antdiv_type;
+ registry_par->hwpdn_mode = (u8)rtw_hwpdn_mode;/* 0:disable, 1:enable, 2:by EFUSE config */
+ registry_par->hwpwrp_detect = (u8)rtw_hwpwrp_detect;/* 0:disable, 1:enable */
+ registry_par->hw_wps_pbc = (u8)rtw_hw_wps_pbc;
+
+ registry_par->max_roaming_times = (u8)rtw_max_roaming_times;
+
+ registry_par->fw_iol = rtw_fw_iol;
+
+ registry_par->enable80211d = (u8)rtw_80211d;
+ snprintf(registry_par->ifname, 16, "%s", ifname);
+ snprintf(registry_par->if2name, 16, "%s", if2name);
+ registry_par->notch_filter = (u8)rtw_notch_filter;
+_func_exit_;
+ return status;
+}
+
+static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct sockaddr *addr = p;
+
+ if (!padapter->bup)
+ memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
+
+ return 0;
+}
+
+static struct net_device_stats *rtw_net_get_stats(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
+ struct recv_priv *precvpriv = &(padapter->recvpriv);
+
+ padapter->stats.tx_packets = pxmitpriv->tx_pkts;/* pxmitpriv->tx_pkts++; */
+ padapter->stats.rx_packets = precvpriv->rx_pkts;/* precvpriv->rx_pkts++; */
+ padapter->stats.tx_dropped = pxmitpriv->tx_drop;
+ padapter->stats.rx_dropped = precvpriv->rx_drop;
+ padapter->stats.tx_bytes = pxmitpriv->tx_bytes;
+ padapter->stats.rx_bytes = precvpriv->rx_bytes;
+ return &padapter->stats;
+}
+
+/*
+ * AC to queue mapping
+ *
+ * AC_VO -> queue 0
+ * AC_VI -> queue 1
+ * AC_BE -> queue 2
+ * AC_BK -> queue 3
+ */
+static const u16 rtw_1d_to_queue[8] = { 2, 3, 3, 2, 1, 1, 0, 0 };
+
+/* Given a data frame determine the 802.1p/1d tag to use. */
+static unsigned int rtw_classify8021d(struct sk_buff *skb)
+{
+ unsigned int dscp;
+
+ /* skb->priority values from 256->263 are magic values to
+ * directly indicate a specific 802.1d priority. This is used
+ * to allow 802.1d priority to be passed directly in from VLAN
+ * tags, etc.
+ */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return skb->priority - 256;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ dscp = ip_hdr(skb)->tos & 0xfc;
+ break;
+ default:
+ return 0;
+ }
+
+ return dscp >> 5;
+}
+
+static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct adapter *padapter = rtw_netdev_priv(dev);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ skb->priority = rtw_classify8021d(skb);
+
+ if (pmlmepriv->acm_mask != 0)
+ skb->priority = qos_acm(pmlmepriv->acm_mask, skb->priority);
+
+ return rtw_1d_to_queue[skb->priority];
+}
+
+u16 rtw_recv_select_queue(struct sk_buff *skb)
+{
+ struct iphdr *piphdr;
+ unsigned int dscp;
+ __be16 eth_type;
+ u32 priority;
+ u8 *pdata = skb->data;
+
+ memcpy(&eth_type, pdata+(ETH_ALEN<<1), 2);
+
+ switch (eth_type) {
+ case htons(ETH_P_IP):
+ piphdr = (struct iphdr *)(pdata+ETH_HLEN);
+ dscp = piphdr->tos & 0xfc;
+ priority = dscp >> 5;
+ break;
+ default:
+ priority = 0;
+ }
+
+ return rtw_1d_to_queue[priority];
+}
+
+static const struct net_device_ops rtw_netdev_ops = {
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_start_xmit = rtw_xmit_entry,
+ .ndo_select_queue = rtw_select_queue,
+ .ndo_set_mac_address = rtw_net_set_mac_address,
+ .ndo_get_stats = rtw_net_get_stats,
+ .ndo_do_ioctl = rtw_ioctl,
+};
+
+int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname)
+{
+ if (dev_alloc_name(pnetdev, ifname) < 0)
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("dev_alloc_name, fail!\n"));
+
+ netif_carrier_off(pnetdev);
+ return 0;
+}
+
+struct net_device *rtw_init_netdev(struct adapter *old_padapter)
+{
+ struct adapter *padapter;
+ struct net_device *pnetdev;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+init_net_dev\n"));
+
+ if (old_padapter != NULL)
+ pnetdev = rtw_alloc_etherdev_with_old_priv(sizeof(struct adapter), (void *)old_padapter);
+ else
+ pnetdev = rtw_alloc_etherdev(sizeof(struct adapter));
+
+ if (!pnetdev)
+ return NULL;
+
+ padapter = rtw_netdev_priv(pnetdev);
+ padapter->pnetdev = pnetdev;
+ DBG_88E("register rtw_netdev_ops to netdev_ops\n");
+ pnetdev->netdev_ops = &rtw_netdev_ops;
+ pnetdev->watchdog_timeo = HZ*3; /* 3 second timeout */
+ pnetdev->wireless_handlers = (struct iw_handler_def *)&rtw_handlers_def;
+
+ /* step 2. */
+ loadparam(padapter, pnetdev);
+
+ return pnetdev;
+}
+
+u32 rtw_start_drv_threads(struct adapter *padapter)
+{
+ u32 _status = _SUCCESS;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_start_drv_threads\n"));
+
+ padapter->cmdThread = kthread_run(rtw_cmd_thread, padapter, "RTW_CMD_THREAD");
+ if (IS_ERR(padapter->cmdThread))
+ _status = _FAIL;
+ else
+ _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
+
+ rtw_hal_start_thread(padapter);
+ return _status;
+}
+
+void rtw_stop_drv_threads(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_stop_drv_threads\n"));
+
+ /* Below is to termindate rtw_cmd_thread & event_thread... */
+ _rtw_up_sema(&padapter->cmdpriv.cmd_queue_sema);
+ if (padapter->cmdThread)
+ _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
+
+ rtw_hal_stop_thread(padapter);
+}
+
+static u8 rtw_init_default_value(struct adapter *padapter)
+{
+ u8 ret = _SUCCESS;
+ struct registry_priv *pregistrypriv = &padapter->registrypriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+
+ /* xmit_priv */
+ pxmitpriv->vcs_setting = pregistrypriv->vrtl_carrier_sense;
+ pxmitpriv->vcs = pregistrypriv->vcs_type;
+ pxmitpriv->vcs_type = pregistrypriv->vcs_type;
+ pxmitpriv->frag_len = pregistrypriv->frag_thresh;
+
+ /* mlme_priv */
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+ pmlmepriv->scan_mode = SCAN_ACTIVE;
+
+ /* ht_priv */
+ pmlmepriv->htpriv.ampdu_enable = false;/* set to disabled */
+
+ /* security_priv */
+ psecuritypriv->binstallGrpkey = _FAIL;
+ psecuritypriv->sw_encrypt = pregistrypriv->software_encrypt;
+ psecuritypriv->sw_decrypt = pregistrypriv->software_decrypt;
+ psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open; /* open system */
+ psecuritypriv->dot11PrivacyAlgrthm = _NO_PRIVACY_;
+ psecuritypriv->dot11PrivacyKeyIndex = 0;
+ psecuritypriv->dot118021XGrpPrivacy = _NO_PRIVACY_;
+ psecuritypriv->dot118021XGrpKeyid = 1;
+ psecuritypriv->ndisauthtype = Ndis802_11AuthModeOpen;
+ psecuritypriv->ndisencryptstatus = Ndis802_11WEPDisabled;
+
+ /* registry_priv */
+ rtw_init_registrypriv_dev_network(padapter);
+ rtw_update_registrypriv_dev_network(padapter);
+
+ /* hal_priv */
+ rtw_hal_def_value_init(padapter);
+
+ /* misc. */
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ padapter->bNotifyChannelChange = 0;
+#ifdef CONFIG_88EU_P2P
+ padapter->bShowGetP2PState = 1;
+#endif
+ return ret;
+}
+
+u8 rtw_reset_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ /* hal_priv */
+ rtw_hal_def_value_init(padapter);
+ padapter->bReadPortCancel = false;
+ padapter->bWritePortCancel = false;
+ padapter->bRxRSSIDisplay = 0;
+ pmlmepriv->scan_interval = SCAN_INTERVAL;/* 30*2 sec = 60sec */
+
+ padapter->xmitpriv.tx_pkts = 0;
+ padapter->recvpriv.rx_pkts = 0;
+
+ pmlmepriv->LinkDetectInfo.bBusyTraffic = false;
+
+ _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
+
+ rtw_hal_sreset_reset_value(padapter);
+ pwrctrlpriv->pwr_state_check_cnts = 0;
+
+ /* mlmeextpriv */
+ padapter->mlmeextpriv.sitesurvey_res.state = SCAN_DISABLE;
+
+ rtw_set_signal_stat_timer(&padapter->recvpriv);
+
+ return ret8;
+}
+
+u8 rtw_init_drv_sw(struct adapter *padapter)
+{
+ u8 ret8 = _SUCCESS;
+
+_func_enter_;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_init_drv_sw\n"));
+
+ if ((rtw_init_cmd_priv(&padapter->cmdpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init cmd_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->cmdpriv.padapter = padapter;
+
+ if ((rtw_init_evt_priv(&padapter->evtpriv)) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init evt_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (rtw_init_mlme_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+#ifdef CONFIG_88EU_P2P
+ rtw_init_wifidirect_timers(padapter);
+ init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
+ reset_global_wifidirect_info(padapter);
+#endif /* CONFIG_88EU_P2P */
+
+ if (init_mlme_ext_priv(padapter) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("\n Can't init mlme_ext_priv\n"));
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_xmit_priv(&padapter->xmitpriv, padapter) == _FAIL) {
+ DBG_88E("Can't _rtw_init_xmit_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_recv_priv(&padapter->recvpriv, padapter) == _FAIL) {
+ DBG_88E("Can't _rtw_init_recv_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ if (_rtw_init_sta_priv(&padapter->stapriv) == _FAIL) {
+ DBG_88E("Can't _rtw_init_sta_priv\n");
+ ret8 = _FAIL;
+ goto exit;
+ }
+
+ padapter->stapriv.padapter = padapter;
+
+ rtw_init_bcmc_stainfo(padapter);
+
+ rtw_init_pwrctrl_priv(padapter);
+
+ if (init_mp_priv(padapter) == _FAIL)
+ DBG_88E("%s: initialize MP private data Fail!\n", __func__);
+
+ ret8 = rtw_init_default_value(padapter);
+
+ rtw_hal_dm_init(padapter);
+ rtw_hal_sw_led_init(padapter);
+
+ rtw_hal_sreset_init(padapter);
+
+ _rtw_spinlock_init(&padapter->br_ext_lock);
+
+exit:
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_init_drv_sw\n"));
+
+ _func_exit_;
+
+ return ret8;
+}
+
+void rtw_cancel_all_timer(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+rtw_cancel_all_timer\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel association timer complete!\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel scan_to_timer!\n"));
+
+ _cancel_timer_ex(&padapter->mlmepriv.dynamic_chk_timer);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel dynamic_chk_timer!\n"));
+
+ /* cancel sw led timer */
+ rtw_hal_sw_led_deinit(padapter);
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("rtw_cancel_all_timer:cancel DeInitSwLeds!\n"));
+
+ _cancel_timer_ex(&padapter->pwrctrlpriv.pwr_state_check_timer);
+
+ _cancel_timer_ex(&padapter->recvpriv.signal_stat_timer);
+ /* cancel dm timer */
+ rtw_hal_dm_deinit(padapter);
+}
+
+u8 rtw_free_drv_sw(struct adapter *padapter)
+{
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("==>rtw_free_drv_sw"));
+
+ /* we can call rtw_p2p_enable here, but: */
+ /* 1. rtw_p2p_enable may have IO operation */
+ /* 2. rtw_p2p_enable is bundled with wext interface */
+ #ifdef CONFIG_88EU_P2P
+ {
+ struct wifidirect_info *pwdinfo = &padapter->wdinfo;
+ if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
+ _cancel_timer_ex(&pwdinfo->find_phase_timer);
+ _cancel_timer_ex(&pwdinfo->restore_p2p_state_timer);
+ _cancel_timer_ex(&pwdinfo->pre_tx_scan_timer);
+ rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
+ }
+ }
+ #endif
+
+
+ _rtw_spinlock_free(&padapter->br_ext_lock);
+
+ free_mlme_ext_priv(&padapter->mlmeextpriv);
+
+ rtw_free_cmd_priv(&padapter->cmdpriv);
+
+ rtw_free_evt_priv(&padapter->evtpriv);
+
+ rtw_free_mlme_priv(&padapter->mlmepriv);
+ _rtw_free_xmit_priv(&padapter->xmitpriv);
+
+ _rtw_free_sta_priv(&padapter->stapriv); /* will free bcmc_stainfo here */
+
+ _rtw_free_recv_priv(&padapter->recvpriv);
+
+ rtw_free_pwrctrl_priv(padapter);
+
+ rtw_hal_free_data(padapter);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("<== rtw_free_drv_sw\n"));
+
+ /* free the old_pnetdev */
+ if (padapter->rereg_nd_name_priv.old_pnetdev) {
+ free_netdev(padapter->rereg_nd_name_priv.old_pnetdev);
+ padapter->rereg_nd_name_priv.old_pnetdev = NULL;
+ }
+
+ /* clear pbuddystruct adapter to avoid access wrong pointer. */
+ if (padapter->pbuddy_adapter != NULL)
+ padapter->pbuddy_adapter->pbuddy_adapter = NULL;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-rtw_free_drv_sw\n"));
+
+ return _SUCCESS;
+}
+
+void netdev_br_init(struct net_device *netdev)
+{
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(netdev);
+
+ rcu_read_lock();
+
+ if (rcu_dereference(adapter->pnetdev->rx_handler_data)) {
+ struct net_device *br_netdev;
+ struct net *devnet = NULL;
+
+ devnet = dev_net(netdev);
+ br_netdev = dev_get_by_name(devnet, CONFIG_BR_EXT_BRNAME);
+ if (br_netdev) {
+ memcpy(adapter->br_mac, br_netdev->dev_addr, ETH_ALEN);
+ dev_put(br_netdev);
+ } else {
+ pr_info("%s()-%d: dev_get_by_name(%s) failed!",
+ __func__, __LINE__, CONFIG_BR_EXT_BRNAME);
+ }
+ }
+ adapter->ethBrExtInfo.addPPPoETag = 1;
+
+ rcu_read_unlock();
+}
+
+int _netdev_open(struct net_device *pnetdev)
+{
+ uint status;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - dev_open\n"));
+ DBG_88E("+88eu_drv - drv_open, bup =%d\n", padapter->bup);
+
+ if (pwrctrlpriv->ps_flag) {
+ padapter->net_closed = false;
+ goto netdev_open_normal_process;
+ }
+
+ if (!padapter->bup) {
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("rtl88eu_hal_init(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ pr_info("MAC Address = %pM\n", pnetdev->dev_addr);
+
+ status = rtw_start_drv_threads(padapter);
+ if (status == _FAIL) {
+ pr_info("Initialize driver software resource Failed!\n");
+ goto netdev_open_error;
+ }
+
+ if (init_hw_mlme_ext(padapter) == _FAIL) {
+ pr_info("can't init mlme_ext_priv\n");
+ goto netdev_open_error;
+ }
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+ rtw_proc_init_one(pnetdev);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ padapter->bup = true;
+ }
+ padapter->net_closed = false;
+
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000);
+
+ padapter->pwrctrlpriv.bips_processing = false;
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_start_queue(pnetdev);
+ else
+ rtw_netif_wake_queue(pnetdev);
+
+ netdev_br_init(pnetdev);
+
+netdev_open_normal_process:
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - dev_open\n"));
+ DBG_88E("-88eu_drv - drv_open, bup =%d\n", padapter->bup);
+ return 0;
+
+netdev_open_error:
+ padapter->bup = false;
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("-88eu_drv - dev_open, fail!\n"));
+ DBG_88E("-88eu_drv - drv_open fail, bup =%d\n", padapter->bup);
+ return -1;
+}
+
+int netdev_open(struct net_device *pnetdev)
+{
+ int ret;
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+
+ _enter_critical_mutex(padapter->hw_init_mutex, NULL);
+ ret = _netdev_open(pnetdev);
+ _exit_critical_mutex(padapter->hw_init_mutex, NULL);
+ return ret;
+}
+
+static int ips_netdrv_open(struct adapter *padapter)
+{
+ int status = _SUCCESS;
+ padapter->net_closed = false;
+ DBG_88E("===> %s.........\n", __func__);
+
+ padapter->bDriverStopped = false;
+ padapter->bSurpriseRemoved = false;
+ padapter->bCardDisableWOHSM = false;
+
+ status = rtw_hal_init(padapter);
+ if (status == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_, ("ips_netdrv_open(): Can't init h/w!\n"));
+ goto netdev_open_error;
+ }
+
+ if (padapter->intf_start)
+ padapter->intf_start(padapter);
+
+ rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv);
+ _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 5000);
+
+ return _SUCCESS;
+
+netdev_open_error:
+ DBG_88E("-ips_netdrv_open - drv_open failure, bup =%d\n", padapter->bup);
+
+ return _FAIL;
+}
+
+
+int rtw_ips_pwr_up(struct adapter *padapter)
+{
+ int result;
+ u32 start_time = rtw_get_current_time();
+ DBG_88E("===> rtw_ips_pwr_up..............\n");
+ rtw_reset_drv_sw(padapter);
+
+ result = ips_netdrv_open(padapter);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ DBG_88E("<=== rtw_ips_pwr_up.............. in %dms\n", rtw_get_passing_time_ms(start_time));
+ return result;
+}
+
+void rtw_ips_pwr_down(struct adapter *padapter)
+{
+ u32 start_time = rtw_get_current_time();
+ DBG_88E("===> rtw_ips_pwr_down...................\n");
+
+ padapter->bCardDisableWOHSM = true;
+ padapter->net_closed = true;
+
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+
+ rtw_ips_dev_unload(padapter);
+ padapter->bCardDisableWOHSM = false;
+ DBG_88E("<=== rtw_ips_pwr_down..................... in %dms\n", rtw_get_passing_time_ms(start_time));
+}
+
+void rtw_ips_dev_unload(struct adapter *padapter)
+{
+ DBG_88E("====> %s...\n", __func__);
+
+ rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved)
+ rtw_hal_deinit(padapter);
+}
+
+int pm_netdev_open(struct net_device *pnetdev, u8 bnormal)
+{
+ int status;
+
+ if (bnormal)
+ status = netdev_open(pnetdev);
+ else
+ status = (_SUCCESS == ips_netdrv_open((struct adapter *)rtw_netdev_priv(pnetdev))) ? (0) : (-1);
+ return status;
+}
+
+int netdev_close(struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("+88eu_drv - drv_close\n"));
+
+ if (padapter->pwrctrlpriv.bInternalAutoSuspend) {
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_off)
+ padapter->pwrctrlpriv.ps_flag = true;
+ }
+ padapter->net_closed = true;
+
+ if (padapter->pwrctrlpriv.rf_pwrstate == rf_on) {
+ DBG_88E("(2)88eu_drv - drv_close, bup =%d, hw_init_completed =%d\n",
+ padapter->bup, padapter->hw_init_completed);
+
+ /* s1. */
+ if (pnetdev) {
+ if (!rtw_netif_queue_stopped(pnetdev))
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ LeaveAllPowerSaveMode(padapter);
+ rtw_disassoc_cmd(padapter, 500, false);
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+ /* Close LED */
+ rtw_led_control(padapter, LED_CTL_POWER_OFF);
+ }
+
+ nat25_db_cleanup(padapter);
+
+#ifdef CONFIG_88EU_P2P
+ rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
+#endif /* CONFIG_88EU_P2P */
+
+ RT_TRACE(_module_os_intfs_c_, _drv_info_, ("-88eu_drv - drv_close\n"));
+ DBG_88E("-88eu_drv - drv_close, bup =%d\n", padapter->bup);
+ return 0;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
new file mode 100644
index 00000000000..4e0bfb7e153
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -0,0 +1,815 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+
+#define _OSDEP_SERVICE_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <linux/vmalloc.h>
+#include <rtw_ioctl_set.h>
+
+/*
+* Translate the OS dependent @param error_code to OS independent RTW_STATUS_CODE
+* @return: one of RTW_STATUS_CODE
+*/
+inline int RTW_STATUS_CODE(int error_code)
+{
+ if (error_code >= 0)
+ return _SUCCESS;
+ return _FAIL;
+}
+
+u32 rtw_atoi(u8 *s)
+{
+ int num = 0, flag = 0;
+ int i;
+ for (i = 0; i <= strlen(s); i++) {
+ if (s[i] >= '0' && s[i] <= '9')
+ num = num * 10 + s[i] - '0';
+ else if (s[0] == '-' && i == 0)
+ flag = 1;
+ else
+ break;
+ }
+ if (flag == 1)
+ num = num * -1;
+ return num;
+}
+
+inline u8 *_rtw_vmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = vmalloc(sz);
+ return pbuf;
+}
+
+inline u8 *_rtw_zvmalloc(u32 sz)
+{
+ u8 *pbuf;
+ pbuf = _rtw_vmalloc(sz);
+ if (pbuf != NULL)
+ memset(pbuf, 0, sz);
+ return pbuf;
+}
+
+inline void _rtw_vmfree(u8 *pbuf, u32 sz)
+{
+ vfree(pbuf);
+}
+
+u8 *_rtw_malloc(u32 sz)
+{
+ u8 *pbuf = NULL;
+
+ pbuf = kmalloc(sz, in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
+ return pbuf;
+}
+
+u8 *_rtw_zmalloc(u32 sz)
+{
+ u8 *pbuf = _rtw_malloc(sz);
+
+ if (pbuf != NULL)
+ memset(pbuf, 0, sz);
+ return pbuf;
+}
+
+void *rtw_malloc2d(int h, int w, int size)
+{
+ int j;
+
+ void **a = (void **)rtw_zmalloc(h*sizeof(void *) + h*w*size);
+ if (a == NULL) {
+ pr_info("%s: alloc memory fail!\n", __func__);
+ return NULL;
+ }
+
+ for (j = 0; j < h; j++)
+ a[j] = ((char *)(a+h)) + j*w*size;
+
+ return a;
+}
+
+void rtw_mfree2d(void *pbuf, int h, int w, int size)
+{
+ kfree(pbuf);
+}
+
+int _rtw_memcmp(void *dst, void *src, u32 sz)
+{
+/* under Linux/GNU/GLibc, the return value of memcmp for two same
+ * mem. chunk is 0 */
+ if (!(memcmp(dst, src, sz)))
+ return true;
+ else
+ return false;
+}
+
+void _rtw_memset(void *pbuf, int c, u32 sz)
+{
+ memset(pbuf, c, sz);
+}
+
+void _rtw_init_listhead(struct list_head *list)
+{
+ INIT_LIST_HEAD(list);
+}
+
+/*
+For the following list_xxx operations,
+caller must guarantee the atomic context.
+Otherwise, there will be racing condition.
+*/
+u32 rtw_is_list_empty(struct list_head *phead)
+{
+ if (list_empty(phead))
+ return true;
+ else
+ return false;
+}
+
+void rtw_list_insert_head(struct list_head *plist, struct list_head *phead)
+{
+ list_add(plist, phead);
+}
+
+void rtw_list_insert_tail(struct list_head *plist, struct list_head *phead)
+{
+ list_add_tail(plist, phead);
+}
+
+/*
+Caller must check if the list is empty before calling rtw_list_delete
+*/
+
+void _rtw_init_sema(struct semaphore *sema, int init_val)
+{
+ sema_init(sema, init_val);
+}
+
+void _rtw_free_sema(struct semaphore *sema)
+{
+}
+
+void _rtw_up_sema(struct semaphore *sema)
+{
+ up(sema);
+}
+
+u32 _rtw_down_sema(struct semaphore *sema)
+{
+ if (down_interruptible(sema))
+ return _FAIL;
+ else
+ return _SUCCESS;
+}
+
+void _rtw_mutex_init(struct mutex *pmutex)
+{
+ mutex_init(pmutex);
+}
+
+void _rtw_mutex_free(struct mutex *pmutex)
+{
+ mutex_destroy(pmutex);
+}
+
+void _rtw_spinlock_init(spinlock_t *plock)
+{
+ spin_lock_init(plock);
+}
+
+void _rtw_spinlock_free(spinlock_t *plock)
+{
+}
+
+void _rtw_init_queue(struct __queue *pqueue)
+{
+ _rtw_init_listhead(&(pqueue->queue));
+ _rtw_spinlock_init(&(pqueue->lock));
+}
+
+u32 _rtw_queue_empty(struct __queue *pqueue)
+{
+ return rtw_is_list_empty(&(pqueue->queue));
+}
+
+u32 rtw_end_of_queue_search(struct list_head *head, struct list_head *plist)
+{
+ if (head == plist)
+ return true;
+ else
+ return false;
+}
+
+u32 rtw_get_current_time(void)
+{
+ return jiffies;
+}
+
+inline u32 rtw_systime_to_ms(u32 systime)
+{
+ return systime * 1000 / HZ;
+}
+
+inline u32 rtw_ms_to_systime(u32 ms)
+{
+ return ms * HZ / 1000;
+}
+
+/* the input parameter start use the same unit as returned by
+ * rtw_get_current_time */
+inline s32 rtw_get_passing_time_ms(u32 start)
+{
+ return rtw_systime_to_ms(jiffies-start);
+}
+
+inline s32 rtw_get_time_interval_ms(u32 start, u32 end)
+{
+ return rtw_systime_to_ms(end-start);
+}
+
+void rtw_sleep_schedulable(int ms)
+{
+ u32 delta;
+
+ delta = (ms * HZ)/1000;/* ms) */
+ if (delta == 0)
+ delta = 1;/* 1 ms */
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(delta) != 0)
+ return;
+}
+
+void rtw_msleep_os(int ms)
+{
+ msleep((unsigned int)ms);
+}
+
+void rtw_usleep_os(int us)
+{
+ if (1 < (us/1000))
+ msleep(1);
+ else
+ msleep((us/1000) + 1);
+}
+
+void rtw_mdelay_os(int ms)
+{
+ mdelay((unsigned long)ms);
+}
+
+void rtw_udelay_os(int us)
+{
+ udelay((unsigned long)us);
+}
+
+void rtw_yield_os(void)
+{
+ yield();
+}
+
+#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
+
+inline void rtw_suspend_lock_init(void)
+{
+}
+
+inline void rtw_suspend_lock_uninit(void)
+{
+}
+
+inline void rtw_lock_suspend(void)
+{
+}
+
+inline void rtw_unlock_suspend(void)
+{
+}
+
+inline void ATOMIC_SET(ATOMIC_T *v, int i)
+{
+ atomic_set(v, i);
+}
+
+inline int ATOMIC_READ(ATOMIC_T *v)
+{
+ return atomic_read(v);
+}
+
+inline void ATOMIC_ADD(ATOMIC_T *v, int i)
+{
+ atomic_add(i, v);
+}
+
+inline void ATOMIC_SUB(ATOMIC_T *v, int i)
+{
+ atomic_sub(i, v);
+}
+
+inline void ATOMIC_INC(ATOMIC_T *v)
+{
+ atomic_inc(v);
+}
+
+inline void ATOMIC_DEC(ATOMIC_T *v)
+{
+ atomic_dec(v);
+}
+
+inline int ATOMIC_ADD_RETURN(ATOMIC_T *v, int i)
+{
+ return atomic_add_return(i, v);
+}
+
+inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
+{
+ return atomic_sub_return(i, v);
+}
+
+inline int ATOMIC_INC_RETURN(ATOMIC_T *v)
+{
+ return atomic_inc_return(v);
+}
+
+inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
+{
+ return atomic_dec_return(v);
+}
+
+/* Open a file with the specific @param path, @param flag, @param mode
+ * @param fpp the pointer of struct file pointer to get struct file pointer while file opening is success
+ * @param path the path of the file to open
+ * @param flag file operation flags, please refer to linux document
+ * @param mode please refer to linux document
+ * @return Linux specific error code
+ */
+static int openfile(struct file **fpp, char *path, int flag, int mode)
+{
+ struct file *fp;
+
+ fp = filp_open(path, flag, mode);
+ if (IS_ERR(fp)) {
+ *fpp = NULL;
+ return PTR_ERR(fp);
+ } else {
+ *fpp = fp;
+ return 0;
+ }
+}
+
+/* Close the file with the specific @param fp
+ * @param fp the pointer of struct file to close
+ * @return always 0
+ */
+static int closefile(struct file *fp)
+{
+ filp_close(fp, NULL);
+ return 0;
+}
+
+static int readfile(struct file *fp, char __user *buf, int len)
+{
+ int rlen = 0, sum = 0;
+
+ if (!fp->f_op || !fp->f_op->read)
+ return -EPERM;
+
+ while (sum < len) {
+ rlen = fp->f_op->read(fp, buf+sum, len-sum, &fp->f_pos);
+ if (rlen > 0)
+ sum += rlen;
+ else if (0 != rlen)
+ return rlen;
+ else
+ break;
+ }
+ return sum;
+}
+
+static int writefile(struct file *fp, char __user *buf, int len)
+{
+ int wlen = 0, sum = 0;
+
+ if (!fp->f_op || !fp->f_op->write)
+ return -EPERM;
+
+ while (sum < len) {
+ wlen = fp->f_op->write(fp, buf+sum, len-sum, &fp->f_pos);
+ if (wlen > 0)
+ sum += wlen;
+ else if (0 != wlen)
+ return wlen;
+ else
+ break;
+ }
+ return sum;
+}
+
+/* Test if the specifi @param path is a file and readable
+ * @param path the path of the file to test
+ * @return Linux specific error code
+ */
+static int isfilereadable(char *path)
+{
+ struct file *fp;
+ int ret = 0;
+ mm_segment_t oldfs;
+ char __user buf;
+
+ fp = filp_open(path, O_RDONLY, 0);
+ if (IS_ERR(fp)) {
+ ret = PTR_ERR(fp);
+ } else {
+ oldfs = get_fs(); set_fs(get_ds());
+
+ if (1 != readfile(fp, &buf, 1))
+ ret = PTR_ERR(fp);
+
+ set_fs(oldfs);
+ filp_close(fp, NULL);
+ }
+ return ret;
+}
+
+/* Open the file with @param path and retrive the file content into
+ * memory starting from @param buf for @param sz at most
+ * @param path the path of the file to open and read
+ * @param buf the starting address of the buffer to store file content
+ * @param sz how many bytes to read at most
+ * @return the byte we've read, or Linux specific error code
+ */
+static int retrievefromfile(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = -1;
+ mm_segment_t oldfs;
+ struct file *fp;
+
+ if (path && buf) {
+ ret = openfile(&fp, path, O_RDONLY, 0);
+ if (0 == ret) {
+ DBG_88E("%s openfile path:%s fp =%p\n", __func__,
+ path, fp);
+
+ oldfs = get_fs(); set_fs(get_ds());
+ ret = readfile(fp, buf, sz);
+ set_fs(oldfs);
+ closefile(fp);
+
+ DBG_88E("%s readfile, ret:%d\n", __func__, ret);
+
+ } else {
+ DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__,
+ path, ret);
+ }
+ } else {
+ DBG_88E("%s NULL pointer\n", __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+* Open the file with @param path and wirte @param sz byte of data starting from @param buf into the file
+* @param path the path of the file to open and write
+* @param buf the starting address of the data to write into file
+* @param sz how many bytes to write at most
+* @return the byte we've written, or Linux specific error code
+*/
+static int storetofile(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = 0;
+ mm_segment_t oldfs;
+ struct file *fp;
+
+ if (path && buf) {
+ ret = openfile(&fp, path, O_CREAT|O_WRONLY, 0666);
+ if (0 == ret) {
+ DBG_88E("%s openfile path:%s fp =%p\n", __func__, path, fp);
+
+ oldfs = get_fs(); set_fs(get_ds());
+ ret = writefile(fp, buf, sz);
+ set_fs(oldfs);
+ closefile(fp);
+
+ DBG_88E("%s writefile, ret:%d\n", __func__, ret);
+
+ } else {
+ DBG_88E("%s openfile path:%s Fail, ret:%d\n", __func__, path, ret);
+ }
+ } else {
+ DBG_88E("%s NULL pointer\n", __func__);
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/*
+* Test if the specifi @param path is a file and readable
+* @param path the path of the file to test
+* @return true or false
+*/
+int rtw_is_file_readable(char *path)
+{
+ if (isfilereadable(path) == 0)
+ return true;
+ else
+ return false;
+}
+
+/*
+* Open the file with @param path and retrive the file content into memory starting from @param buf for @param sz at most
+* @param path the path of the file to open and read
+* @param buf the starting address of the buffer to store file content
+* @param sz how many bytes to read at most
+* @return the byte we've read
+*/
+int rtw_retrive_from_file(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = retrievefromfile(path, buf, sz);
+
+ return ret >= 0 ? ret : 0;
+}
+
+/*
+ * Open the file with @param path and wirte @param sz byte of data
+ * starting from @param buf into the file
+ * @param path the path of the file to open and write
+ * @param buf the starting address of the data to write into file
+ * @param sz how many bytes to write at most
+ * @return the byte we've written
+ */
+int rtw_store_to_file(char *path, u8 __user *buf, u32 sz)
+{
+ int ret = storetofile(path, buf, sz);
+ return ret >= 0 ? ret : 0;
+}
+
+struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv,
+ void *old_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+ pnpi->priv = old_priv;
+ pnpi->sizeof_priv = sizeof_priv;
+
+RETURN:
+ return pnetdev;
+}
+
+struct net_device *rtw_alloc_etherdev(int sizeof_priv)
+{
+ struct net_device *pnetdev;
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ pnetdev = alloc_etherdev_mq(sizeof(struct rtw_netdev_priv_indicator), 4);
+ if (!pnetdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(pnetdev);
+
+ pnpi->priv = rtw_zvmalloc(sizeof_priv);
+ if (!pnpi->priv) {
+ free_netdev(pnetdev);
+ pnetdev = NULL;
+ goto RETURN;
+ }
+
+ pnpi->sizeof_priv = sizeof_priv;
+RETURN:
+ return pnetdev;
+}
+
+void rtw_free_netdev(struct net_device *netdev)
+{
+ struct rtw_netdev_priv_indicator *pnpi;
+
+ if (!netdev)
+ goto RETURN;
+
+ pnpi = netdev_priv(netdev);
+
+ if (!pnpi->priv)
+ goto RETURN;
+
+ rtw_vmfree(pnpi->priv, pnpi->sizeof_priv);
+ free_netdev(netdev);
+
+RETURN:
+ return;
+}
+
+int rtw_change_ifname(struct adapter *padapter, const char *ifname)
+{
+ struct net_device *pnetdev;
+ struct net_device *cur_pnetdev = padapter->pnetdev;
+ struct rereg_nd_name_data *rereg_priv;
+ int ret;
+
+ if (!padapter)
+ goto error;
+
+ rereg_priv = &padapter->rereg_nd_name_priv;
+
+ /* free the old_pnetdev */
+ if (rereg_priv->old_pnetdev) {
+ free_netdev(rereg_priv->old_pnetdev);
+ rereg_priv->old_pnetdev = NULL;
+ }
+
+ if (!rtnl_is_locked())
+ unregister_netdev(cur_pnetdev);
+ else
+ unregister_netdevice(cur_pnetdev);
+
+ rtw_proc_remove_one(cur_pnetdev);
+
+ rereg_priv->old_pnetdev = cur_pnetdev;
+
+ pnetdev = rtw_init_netdev(padapter);
+ if (!pnetdev) {
+ ret = -1;
+ goto error;
+ }
+
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(adapter_to_dvobj(padapter)));
+
+ rtw_init_netdev_name(pnetdev, ifname);
+
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+
+ if (!rtnl_is_locked())
+ ret = register_netdev(pnetdev);
+ else
+ ret = register_netdevice(pnetdev);
+ if (ret != 0) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("register_netdev() failed\n"));
+ goto error;
+ }
+ rtw_proc_init_one(pnetdev);
+ return 0;
+error:
+ return -1;
+}
+
+u64 rtw_modular64(u64 x, u64 y)
+{
+ return do_div(x, y);
+}
+
+u64 rtw_division64(u64 x, u64 y)
+{
+ do_div(x, y);
+ return x;
+}
+
+void rtw_buf_free(u8 **buf, u32 *buf_len)
+{
+ *buf_len = 0;
+ kfree(*buf);
+ *buf = NULL;
+}
+
+void rtw_buf_update(u8 **buf, u32 *buf_len, u8 *src, u32 src_len)
+{
+ u32 ori_len = 0, dup_len = 0;
+ u8 *ori = NULL;
+ u8 *dup = NULL;
+
+ if (!buf || !buf_len)
+ return;
+
+ if (!src || !src_len)
+ goto keep_ori;
+
+ /* duplicate src */
+ dup = rtw_malloc(src_len);
+ if (dup) {
+ dup_len = src_len;
+ memcpy(dup, src, dup_len);
+ }
+
+keep_ori:
+ ori = *buf;
+ ori_len = *buf_len;
+
+ /* replace buf with dup */
+ *buf_len = 0;
+ *buf = dup;
+ *buf_len = dup_len;
+
+ /* free ori */
+ kfree(ori);
+}
+
+
+/**
+ * rtw_cbuf_full - test if cbuf is full
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is full
+ */
+inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read-1) ? true : false;
+}
+
+/**
+ * rtw_cbuf_empty - test if cbuf is empty
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Returns: true if cbuf is empty
+ */
+inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
+{
+ return (cbuf->write == cbuf->read) ? true : false;
+}
+
+/**
+ * rtw_cbuf_push - push a pointer into cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ * @buf: pointer to push in
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: true push success
+ */
+bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
+{
+ if (rtw_cbuf_full(cbuf))
+ return _FAIL;
+
+ if (0)
+ DBG_88E("%s on %u\n", __func__, cbuf->write);
+ cbuf->bufs[cbuf->write] = buf;
+ cbuf->write = (cbuf->write+1)%cbuf->size;
+
+ return _SUCCESS;
+}
+
+/**
+ * rtw_cbuf_pop - pop a pointer from cbuf
+ * @cbuf: pointer of struct rtw_cbuf
+ *
+ * Lock free operation, be careful of the use scheme
+ * Returns: pointer popped out
+ */
+void *rtw_cbuf_pop(struct rtw_cbuf *cbuf)
+{
+ void *buf;
+ if (rtw_cbuf_empty(cbuf))
+ return NULL;
+
+ if (0)
+ DBG_88E("%s on %u\n", __func__, cbuf->read);
+ buf = cbuf->bufs[cbuf->read];
+ cbuf->read = (cbuf->read+1)%cbuf->size;
+
+ return buf;
+}
+
+/**
+ * rtw_cbuf_alloc - allocte a rtw_cbuf with given size and do initialization
+ * @size: size of pointer
+ *
+ * Returns: pointer of srtuct rtw_cbuf, NULL for allocation failure
+ */
+struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
+{
+ struct rtw_cbuf *cbuf;
+
+ cbuf = (struct rtw_cbuf *)rtw_malloc(sizeof(*cbuf) +
+ sizeof(void *)*size);
+
+ if (cbuf) {
+ cbuf->write = 0;
+ cbuf->read = 0;
+ cbuf->size = size;
+ }
+ return cbuf;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
new file mode 100644
index 00000000000..e2f4e7d7717
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -0,0 +1,261 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _RECV_OSDEP_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <wifi.h>
+#include <recv_osdep.h>
+
+#include <osdep_intf.h>
+#include <ethernet.h>
+#include <usb_ops.h>
+
+/* init os related resource in struct recv_priv */
+int rtw_os_recv_resource_init(struct recv_priv *precvpriv,
+ struct adapter *padapter)
+{
+ return _SUCCESS;
+}
+
+/* alloc os related resource in union recv_frame */
+int rtw_os_recv_resource_alloc(struct adapter *padapter,
+ union recv_frame *precvframe)
+{
+ precvframe->u.hdr.pkt_newalloc = NULL;
+ precvframe->u.hdr.pkt = NULL;
+ return _SUCCESS;
+}
+
+/* free os related resource in union recv_frame */
+void rtw_os_recv_resource_free(struct recv_priv *precvpriv)
+{
+}
+
+/* alloc os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_alloc(struct adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ int res = _SUCCESS;
+
+ precvbuf->irp_pending = false;
+ precvbuf->purb = usb_alloc_urb(0, GFP_KERNEL);
+ if (precvbuf->purb == NULL)
+ res = _FAIL;
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ precvbuf->pallocated_buf = NULL;
+ precvbuf->pbuf = NULL;
+ precvbuf->pdata = NULL;
+ precvbuf->phead = NULL;
+ precvbuf->ptail = NULL;
+ precvbuf->pend = NULL;
+ precvbuf->transfer_len = 0;
+ precvbuf->len = 0;
+ return res;
+}
+
+/* free os related resource in struct recv_buf */
+int rtw_os_recvbuf_resource_free(struct adapter *padapter,
+ struct recv_buf *precvbuf)
+{
+ if (precvbuf->purb)
+ usb_free_urb(precvbuf->purb);
+ return _SUCCESS;
+}
+
+void rtw_handle_tkip_mic_err(struct adapter *padapter, u8 bgroup)
+{
+ union iwreq_data wrqu;
+ struct iw_michaelmicfailure ev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 cur_time = 0;
+
+ if (psecuritypriv->last_mic_err_time == 0) {
+ psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ } else {
+ cur_time = rtw_get_current_time();
+
+ if (cur_time - psecuritypriv->last_mic_err_time < 60*HZ) {
+ psecuritypriv->btkip_countermeasure = true;
+ psecuritypriv->last_mic_err_time = 0;
+ psecuritypriv->btkip_countermeasure_time = cur_time;
+ } else {
+ psecuritypriv->last_mic_err_time = rtw_get_current_time();
+ }
+ }
+
+ _rtw_memset(&ev, 0x00, sizeof(ev));
+ if (bgroup)
+ ev.flags |= IW_MICFAILURE_GROUP;
+ else
+ ev.flags |= IW_MICFAILURE_PAIRWISE;
+
+ ev.src_addr.sa_family = ARPHRD_ETHER;
+ memcpy(ev.src_addr.sa_data, &pmlmepriv->assoc_bssid[0], ETH_ALEN);
+ _rtw_memset(&wrqu, 0x00, sizeof(wrqu));
+ wrqu.data.length = sizeof(ev);
+ wireless_send_event(padapter->pnetdev, IWEVMICHAELMICFAILURE,
+ &wrqu, (char *)&ev);
+}
+
+void rtw_hostapd_mlme_rx(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+}
+
+int rtw_recv_indicatepkt(struct adapter *padapter,
+ union recv_frame *precv_frame)
+{
+ struct recv_priv *precvpriv;
+ struct __queue *pfree_recv_queue;
+ struct sk_buff *skb;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+_func_enter_;
+
+ precvpriv = &(padapter->recvpriv);
+ pfree_recv_queue = &(precvpriv->free_recv_queue);
+
+ skb = precv_frame->u.hdr.pkt;
+ if (skb == NULL) {
+ RT_TRACE(_module_recv_osdep_c_, _drv_err_,
+ ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
+ goto _recv_indicatepkt_drop;
+ }
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt():skb != NULL !!!\n"));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head =%p precv_frame->hdr.rx_data =%p\n",
+ precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("precv_frame->hdr.rx_tail =%p precv_frame->u.hdr.rx_end =%p precv_frame->hdr.len =%d\n",
+ precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end,
+ precv_frame->u.hdr.len));
+
+ skb->data = precv_frame->u.hdr.rx_data;
+
+ skb_set_tail_pointer(skb, precv_frame->u.hdr.len);
+
+ skb->len = precv_frame->u.hdr.len;
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
+ skb->head, skb->data, skb_tail_pointer(skb),
+ skb_end_pointer(skb), skb->len));
+
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
+ struct sk_buff *pskb2 = NULL;
+ struct sta_info *psta = NULL;
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct rx_pkt_attrib *pattrib = &precv_frame->u.hdr.attrib;
+ int bmcast = IS_MCAST(pattrib->dst);
+
+ if (!_rtw_memcmp(pattrib->dst, myid(&padapter->eeprompriv),
+ ETH_ALEN)) {
+ if (bmcast) {
+ psta = rtw_get_bcmc_stainfo(padapter);
+ pskb2 = skb_clone(skb, GFP_ATOMIC);
+ } else {
+ psta = rtw_get_stainfo(pstapriv, pattrib->dst);
+ }
+
+ if (psta) {
+ struct net_device *pnetdev;
+
+ pnetdev = (struct net_device *)padapter->pnetdev;
+ skb->dev = pnetdev;
+ skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));
+
+ rtw_xmit_entry(skb, pnetdev);
+
+ if (bmcast)
+ skb = pskb2;
+ else
+ goto _recv_indicatepkt_end;
+ }
+ }
+ }
+
+ rcu_read_lock();
+ rcu_dereference(padapter->pnetdev->rx_handler_data);
+ rcu_read_unlock();
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb->dev = padapter->pnetdev;
+ skb->protocol = eth_type_trans(skb, padapter->pnetdev);
+
+ netif_rx(skb);
+
+_recv_indicatepkt_end:
+
+ /* pointers to NULL before rtw_free_recvframe() */
+ precv_frame->u.hdr.pkt = NULL;
+
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+ RT_TRACE(_module_recv_osdep_c_, _drv_info_,
+ ("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));
+
+_func_exit_;
+
+ return _SUCCESS;
+
+_recv_indicatepkt_drop:
+
+ /* enqueue back to free_recv_queue */
+ if (precv_frame)
+ rtw_free_recvframe(precv_frame, pfree_recv_queue);
+
+_func_exit_;
+ return _FAIL;
+}
+
+void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf)
+{
+ struct recv_priv *precvpriv = &padapter->recvpriv;
+
+ precvbuf->ref_cnt--;
+ /* free skb in recv_buf */
+ dev_kfree_skb_any(precvbuf->pskb);
+ precvbuf->pskb = NULL;
+ precvbuf->reuse = false;
+ if (!precvbuf->irp_pending)
+ rtw_read_port(padapter, precvpriv->ff_hwaddr, 0,
+ (unsigned char *)precvbuf);
+}
+
+static void _rtw_reordering_ctrl_timeout_handler(void *func_context)
+{
+ struct recv_reorder_ctrl *preorder_ctrl;
+
+ preorder_ctrl = (struct recv_reorder_ctrl *)func_context;
+ rtw_reordering_ctrl_timeout_handler(preorder_ctrl);
+}
+
+void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
+{
+ struct adapter *padapter = preorder_ctrl->padapter;
+
+ _init_timer(&(preorder_ctrl->reordering_ctrl_timer), padapter->pnetdev, _rtw_reordering_ctrl_timeout_handler, preorder_ctrl);
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
new file mode 100644
index 00000000000..6cf71cc2ca2
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -0,0 +1,293 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/netdevice.h>
+
+#include <rtw_android.h>
+#include <osdep_service.h>
+#include <rtw_debug.h>
+#include <ioctl_cfg80211.h>
+#include <rtw_ioctl_set.h>
+
+static const char *android_wifi_cmd_str[ANDROID_WIFI_CMD_MAX] = {
+ "START",
+ "STOP",
+ "SCAN-ACTIVE",
+ "SCAN-PASSIVE",
+ "RSSI",
+ "LINKSPEED",
+ "RXFILTER-START",
+ "RXFILTER-STOP",
+ "RXFILTER-ADD",
+ "RXFILTER-REMOVE",
+ "BTCOEXSCAN-START",
+ "BTCOEXSCAN-STOP",
+ "BTCOEXMODE",
+ "SETSUSPENDOPT",
+ "P2P_DEV_ADDR",
+ "SETFWPATH",
+ "SETBAND",
+ "GETBAND",
+ "COUNTRY",
+ "P2P_SET_NOA",
+ "P2P_GET_NOA",
+ "P2P_SET_PS",
+ "SET_AP_WPS_P2P_IE",
+ "MACADDR",
+ "BLOCK",
+ "WFD-ENABLE",
+ "WFD-DISABLE",
+ "WFD-SET-TCPPORT",
+ "WFD-SET-MAXTPUT",
+ "WFD-SET-DEVTYPE",
+};
+
+struct android_wifi_priv_cmd {
+ const char __user *buf;
+ int used_len;
+ int total_len;
+};
+
+/**
+ * Local (static) functions and variables
+ */
+
+/* Initialize g_wifi_on to 1 so dhd_bus_start will be called for the first
+ * time (only) in dhd_open, subsequential wifi on will be handled by
+ * wl_android_wifi_on
+ */
+static int g_wifi_on = true;
+
+int rtw_android_cmdstr_to_num(char *cmdstr)
+{
+ int cmd_num;
+ for (cmd_num = 0; cmd_num < ANDROID_WIFI_CMD_MAX; cmd_num++)
+ if (0 == strnicmp(cmdstr , android_wifi_cmd_str[cmd_num],
+ strlen(android_wifi_cmd_str[cmd_num])))
+ break;
+ return cmd_num;
+}
+
+static int rtw_android_get_rssi(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct wlan_network *pcur_network = &pmlmepriv->cur_network;
+ int bytes_written = 0;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ bytes_written += snprintf(&command[bytes_written], total_len,
+ "%s rssi %d",
+ pcur_network->network.Ssid.Ssid,
+ padapter->recvpriv.rssi);
+ }
+ return bytes_written;
+}
+
+static int rtw_android_get_link_speed(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(net);
+ int bytes_written;
+ u16 link_speed;
+
+ link_speed = rtw_get_cur_max_rate(padapter) / 10;
+ bytes_written = snprintf(command, total_len, "LinkSpeed %d",
+ link_speed);
+ return bytes_written;
+}
+
+static int rtw_android_get_macaddr(struct net_device *net, char *command,
+ int total_len)
+{
+ int bytes_written;
+
+ bytes_written = snprintf(command, total_len, "Macaddr = %pM",
+ net->dev_addr);
+ return bytes_written;
+}
+
+static int android_set_cntry(struct net_device *net, char *command,
+ int total_len)
+{
+ struct adapter *adapter = (struct adapter *)rtw_netdev_priv(net);
+ char *country_code = command + strlen(android_wifi_cmd_str[ANDROID_WIFI_CMD_COUNTRY]) + 1;
+ int ret;
+
+ ret = rtw_set_country(adapter, country_code);
+ return (ret == _SUCCESS) ? 0 : -1;
+}
+
+static int android_get_p2p_addr(struct net_device *net, char *command,
+ int total_len)
+{
+ /* We use the same address as our HW MAC address */
+ memcpy(command, net->dev_addr, ETH_ALEN);
+ return ETH_ALEN;
+}
+
+static int rtw_android_set_block(struct net_device *net, char *command,
+ int total_len)
+{
+ return 0;
+}
+
+int rtw_android_priv_cmd(struct net_device *net, struct ifreq *ifr, int cmd)
+{
+ int ret = 0;
+ char *command = NULL;
+ int cmd_num;
+ int bytes_written = 0;
+ struct android_wifi_priv_cmd priv_cmd;
+
+ rtw_lock_suspend();
+ if (!ifr->ifr_data) {
+ ret = -EINVAL;
+ goto exit;
+ }
+ if (copy_from_user(&priv_cmd, ifr->ifr_data,
+ sizeof(struct android_wifi_priv_cmd))) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ command = kmalloc(priv_cmd.total_len, GFP_KERNEL);
+ if (!command) {
+ DBG_88E("%s: failed to allocate memory\n", __func__);
+ ret = -ENOMEM;
+ goto exit;
+ }
+ if (!access_ok(VERIFY_READ, priv_cmd.buf, priv_cmd.total_len)) {
+ DBG_88E("%s: failed to access memory\n", __func__);
+ ret = -EFAULT;
+ goto exit;
+ }
+ if (copy_from_user(command, (char __user *)priv_cmd.buf,
+ priv_cmd.total_len)) {
+ ret = -EFAULT;
+ goto exit;
+ }
+ DBG_88E("%s: Android private cmd \"%s\" on %s\n",
+ __func__, command, ifr->ifr_name);
+ cmd_num = rtw_android_cmdstr_to_num(command);
+ switch (cmd_num) {
+ case ANDROID_WIFI_CMD_START:
+ goto response;
+ case ANDROID_WIFI_CMD_SETFWPATH:
+ goto response;
+ }
+ if (!g_wifi_on) {
+ DBG_88E("%s: Ignore private cmd \"%s\" - iface %s is down\n",
+ __func__, command, ifr->ifr_name);
+ ret = 0;
+ goto exit;
+ }
+ switch (cmd_num) {
+ case ANDROID_WIFI_CMD_STOP:
+ break;
+ case ANDROID_WIFI_CMD_SCAN_ACTIVE:
+ break;
+ case ANDROID_WIFI_CMD_SCAN_PASSIVE:
+ break;
+ case ANDROID_WIFI_CMD_RSSI:
+ bytes_written = rtw_android_get_rssi(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_LINKSPEED:
+ bytes_written = rtw_android_get_link_speed(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_MACADDR:
+ bytes_written = rtw_android_get_macaddr(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_BLOCK:
+ bytes_written = rtw_android_set_block(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_START:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_STOP:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_ADD:
+ break;
+ case ANDROID_WIFI_CMD_RXFILTER_REMOVE:
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXSCAN_START:
+ /* TBD: BTCOEXSCAN-START */
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXSCAN_STOP:
+ /* TBD: BTCOEXSCAN-STOP */
+ break;
+ case ANDROID_WIFI_CMD_BTCOEXMODE:
+ break;
+ case ANDROID_WIFI_CMD_SETSUSPENDOPT:
+ break;
+ case ANDROID_WIFI_CMD_SETBAND:
+ break;
+ case ANDROID_WIFI_CMD_GETBAND:
+ break;
+ case ANDROID_WIFI_CMD_COUNTRY:
+ bytes_written = android_set_cntry(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_P2P_DEV_ADDR:
+ bytes_written = android_get_p2p_addr(net, command,
+ priv_cmd.total_len);
+ break;
+ case ANDROID_WIFI_CMD_P2P_SET_NOA:
+ break;
+ case ANDROID_WIFI_CMD_P2P_GET_NOA:
+ break;
+ case ANDROID_WIFI_CMD_P2P_SET_PS:
+ break;
+ default:
+ DBG_88E("Unknown PRIVATE command %s - ignored\n", command);
+ snprintf(command, 3, "OK");
+ bytes_written = strlen("OK");
+ }
+
+response:
+ if (bytes_written >= 0) {
+ if ((bytes_written == 0) && (priv_cmd.total_len > 0))
+ command[0] = '\0';
+ if (bytes_written >= priv_cmd.total_len) {
+ DBG_88E("%s: bytes_written = %d\n", __func__,
+ bytes_written);
+ bytes_written = priv_cmd.total_len;
+ } else {
+ bytes_written++;
+ }
+ priv_cmd.used_len = bytes_written;
+ if (copy_to_user((char __user *)priv_cmd.buf, command,
+ bytes_written)) {
+ DBG_88E("%s: failed to copy data to user buffer\n",
+ __func__);
+ ret = -EFAULT;
+ }
+ } else {
+ ret = bytes_written;
+ }
+exit:
+ rtw_unlock_suspend();
+ kfree(command);
+ return ret;
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
new file mode 100644
index 00000000000..d3078d200e5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -0,0 +1,892 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2011 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _HCI_INTF_C_
+
+#include <osdep_service.h>
+#include <drv_types.h>
+#include <recv_osdep.h>
+#include <xmit_osdep.h>
+#include <hal_intf.h>
+#include <rtw_version.h>
+#include <linux/usb.h>
+#include <osdep_intf.h>
+
+#include <usb_vendor_req.h>
+#include <usb_ops.h>
+#include <usb_osintf.h>
+#include <usb_hal.h>
+#include <rtw_ioctl.h>
+
+int ui_pid[3] = {0, 0, 0};
+
+static int rtw_suspend(struct usb_interface *intf, pm_message_t message);
+static int rtw_resume(struct usb_interface *intf);
+
+
+static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid);
+static void rtw_dev_remove(struct usb_interface *pusb_intf);
+
+
+#define USB_VENDER_ID_REALTEK 0x0bda
+
+/* DID_USB_v916_20130116 */
+static struct usb_device_id rtw_usb_id_tbl[] = {
+ /*=== Realtek demoboard ===*/
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8179)}, /* 8188EUS */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
+ /*=== Customer ID ===*/
+ /****** 8188EUS ********/
+ {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {} /* Terminating entry */
+};
+
+MODULE_DEVICE_TABLE(usb, rtw_usb_id_tbl);
+
+static struct specific_device_id specific_device_id_tbl[] = {
+ {} /* empty table for now */
+};
+
+struct rtw_usb_drv {
+ struct usb_driver usbdrv;
+ int drv_registered;
+ struct mutex hw_init_mutex;
+};
+
+static struct rtw_usb_drv rtl8188e_usb_drv = {
+ .usbdrv.name = (char *)"r8188eu",
+ .usbdrv.probe = rtw_drv_init,
+ .usbdrv.disconnect = rtw_dev_remove,
+ .usbdrv.id_table = rtw_usb_id_tbl,
+ .usbdrv.suspend = rtw_suspend,
+ .usbdrv.resume = rtw_resume,
+ .usbdrv.reset_resume = rtw_resume,
+};
+
+static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
+
+static inline int RT_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN;
+}
+
+static inline int RT_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT;
+}
+
+static inline int RT_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT;
+}
+
+static inline int RT_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
+{
+ return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_out(epd);
+}
+
+static inline int usb_endpoint_is_int(const struct usb_endpoint_descriptor *epd)
+{
+ return RT_usb_endpoint_xfer_int(epd) && RT_usb_endpoint_dir_in(epd);
+}
+
+static inline int RT_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+}
+
+static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ _rtw_mutex_init(&dvobj->usb_vendor_req_mutex);
+
+ dvobj->usb_alloc_vendor_req_buf = rtw_zmalloc(MAX_USB_IO_CTL_SIZE);
+ if (dvobj->usb_alloc_vendor_req_buf == NULL) {
+ DBG_88E("alloc usb_vendor_req_buf failed... /n");
+ rst = _FAIL;
+ goto exit;
+ }
+ dvobj->usb_vendor_req_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(dvobj->usb_alloc_vendor_req_buf), ALIGNMENT_UNIT);
+exit:
+ return rst;
+}
+
+static u8 rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
+{
+ u8 rst = _SUCCESS;
+
+ kfree(dvobj->usb_alloc_vendor_req_buf);
+ _rtw_mutex_free(&dvobj->usb_vendor_req_mutex);
+ return rst;
+}
+
+static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
+{
+ int i;
+ int status = _FAIL;
+ struct dvobj_priv *pdvobjpriv;
+ struct usb_host_config *phost_conf;
+ struct usb_config_descriptor *pconf_desc;
+ struct usb_host_interface *phost_iface;
+ struct usb_interface_descriptor *piface_desc;
+ struct usb_host_endpoint *phost_endp;
+ struct usb_endpoint_descriptor *pendp_desc;
+ struct usb_device *pusbd;
+
+_func_enter_;
+
+ pdvobjpriv = (struct dvobj_priv *)rtw_zmalloc(sizeof(*pdvobjpriv));
+ if (pdvobjpriv == NULL)
+ goto exit;
+
+ pdvobjpriv->pusbintf = usb_intf;
+ pusbd = interface_to_usbdev(usb_intf);
+ pdvobjpriv->pusbdev = pusbd;
+ usb_set_intfdata(usb_intf, pdvobjpriv);
+
+ pdvobjpriv->RtNumInPipes = 0;
+ pdvobjpriv->RtNumOutPipes = 0;
+
+ phost_conf = pusbd->actconfig;
+ pconf_desc = &phost_conf->desc;
+
+ phost_iface = &usb_intf->altsetting[0];
+ piface_desc = &phost_iface->desc;
+
+ pdvobjpriv->NumInterfaces = pconf_desc->bNumInterfaces;
+ pdvobjpriv->InterfaceNumber = piface_desc->bInterfaceNumber;
+ pdvobjpriv->nr_endpoint = piface_desc->bNumEndpoints;
+
+ for (i = 0; i < pdvobjpriv->nr_endpoint; i++) {
+ phost_endp = phost_iface->endpoint + i;
+ if (phost_endp) {
+ pendp_desc = &phost_endp->desc;
+
+ DBG_88E("\nusb_endpoint_descriptor(%d):\n", i);
+ DBG_88E("bLength=%x\n", pendp_desc->bLength);
+ DBG_88E("bDescriptorType=%x\n",
+ pendp_desc->bDescriptorType);
+ DBG_88E("bEndpointAddress=%x\n",
+ pendp_desc->bEndpointAddress);
+ DBG_88E("wMaxPacketSize=%d\n",
+ le16_to_cpu(pendp_desc->wMaxPacketSize));
+ DBG_88E("bInterval=%x\n", pendp_desc->bInterval);
+
+ if (RT_usb_endpoint_is_bulk_in(pendp_desc)) {
+ DBG_88E("RT_usb_endpoint_is_bulk_in = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (usb_endpoint_is_int(pendp_desc)) {
+ DBG_88E("usb_endpoint_is_int = %x, Interval = %x\n",
+ RT_usb_endpoint_num(pendp_desc),
+ pendp_desc->bInterval);
+ pdvobjpriv->RtInPipe[pdvobjpriv->RtNumInPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumInPipes++;
+ } else if (RT_usb_endpoint_is_bulk_out(pendp_desc)) {
+ DBG_88E("RT_usb_endpoint_is_bulk_out = %x\n",
+ RT_usb_endpoint_num(pendp_desc));
+ pdvobjpriv->RtOutPipe[pdvobjpriv->RtNumOutPipes] = RT_usb_endpoint_num(pendp_desc);
+ pdvobjpriv->RtNumOutPipes++;
+ }
+ pdvobjpriv->ep_num[i] = RT_usb_endpoint_num(pendp_desc);
+ }
+ }
+
+ DBG_88E("nr_endpoint=%d, in_num=%d, out_num=%d\n\n",
+ pdvobjpriv->nr_endpoint, pdvobjpriv->RtNumInPipes,
+ pdvobjpriv->RtNumOutPipes);
+
+ if (pusbd->speed == USB_SPEED_HIGH) {
+ pdvobjpriv->ishighspeed = true;
+ DBG_88E("USB_SPEED_HIGH\n");
+ } else {
+ pdvobjpriv->ishighspeed = false;
+ DBG_88E("NON USB_SPEED_HIGH\n");
+ }
+
+ if (rtw_init_intf_priv(pdvobjpriv) == _FAIL) {
+ RT_TRACE(_module_os_intfs_c_, _drv_err_,
+ ("\n Can't INIT rtw_init_intf_priv\n"));
+ goto free_dvobj;
+ }
+
+ /* 3 misc */
+ _rtw_init_sema(&(pdvobjpriv->usb_suspend_sema), 0);
+ rtw_reset_continual_urb_error(pdvobjpriv);
+
+ usb_get_dev(pusbd);
+
+ status = _SUCCESS;
+
+free_dvobj:
+ if (status != _SUCCESS && pdvobjpriv) {
+ usb_set_intfdata(usb_intf, NULL);
+ kfree(pdvobjpriv);
+ pdvobjpriv = NULL;
+ }
+exit:
+_func_exit_;
+ return pdvobjpriv;
+}
+
+static void usb_dvobj_deinit(struct usb_interface *usb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(usb_intf);
+
+_func_enter_;
+
+ usb_set_intfdata(usb_intf, NULL);
+ if (dvobj) {
+ /* Modify condition for 92DU DMDP 2010.11.18, by Thomas */
+ if ((dvobj->NumInterfaces != 2 &&
+ dvobj->NumInterfaces != 3) ||
+ (dvobj->InterfaceNumber == 1)) {
+ if (interface_to_usbdev(usb_intf)->state !=
+ USB_STATE_NOTATTACHED) {
+ /* If we didn't unplug usb dongle and
+ * remove/insert module, driver fails
+ * on sitesurvey for the first time when
+ * device is up . Reset usb port for sitesurvey
+ * fail issue. */
+ DBG_88E("usb attached..., try to reset usb device\n");
+ usb_reset_device(interface_to_usbdev(usb_intf));
+ }
+ }
+ rtw_deinit_intf_priv(dvobj);
+ kfree(dvobj);
+ }
+
+ usb_put_dev(interface_to_usbdev(usb_intf));
+
+_func_exit_;
+}
+
+static void chip_by_usb_id(struct adapter *padapter,
+ const struct usb_device_id *pdid)
+{
+ padapter->chip_type = NULL_CHIP_TYPE;
+ hal_set_hw_type(padapter);
+}
+
+static void usb_intf_start(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_start\n"));
+
+ rtw_hal_inirp_init(padapter);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_start\n"));
+}
+
+static void usb_intf_stop(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+usb_intf_stop\n"));
+
+ /* disabel_hw_interrupt */
+ if (!padapter->bSurpriseRemoved) {
+ /* device still exists, so driver can do i/o operation */
+ /* TODO: */
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("SurpriseRemoved == false\n"));
+ }
+
+ /* cancel in irp */
+ rtw_hal_inirp_deinit(padapter);
+
+ /* cancel out irp */
+ rtw_write_port_cancel(padapter);
+
+ /* todo:cancel other irps */
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-usb_intf_stop\n"));
+}
+
+static void rtw_dev_unload(struct adapter *padapter)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_dev_unload\n"));
+
+ if (padapter->bup) {
+ DBG_88E("===> rtw_dev_unload\n");
+ padapter->bDriverStopped = true;
+ if (padapter->xmitpriv.ack_tx)
+ rtw_ack_tx_done(&padapter->xmitpriv, RTW_SCTX_DONE_DRV_STOP);
+ /* s3. */
+ if (padapter->intf_stop)
+ padapter->intf_stop(padapter);
+ /* s4. */
+ if (!padapter->pwrctrlpriv.bInternalAutoSuspend)
+ rtw_stop_drv_threads(padapter);
+
+ /* s5. */
+ if (!padapter->bSurpriseRemoved) {
+ rtw_hal_deinit(padapter);
+ padapter->bSurpriseRemoved = true;
+ }
+
+ padapter->bup = false;
+ } else {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("r871x_dev_unload():padapter->bup == false\n"));
+ }
+
+ DBG_88E("<=== rtw_dev_unload\n");
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-rtw_dev_unload\n"));
+}
+
+static void process_spec_devid(const struct usb_device_id *pdid)
+{
+ u16 vid, pid;
+ u32 flags;
+ int i;
+ int num = sizeof(specific_device_id_tbl) /
+ sizeof(struct specific_device_id);
+
+ for (i = 0; i < num; i++) {
+ vid = specific_device_id_tbl[i].idVendor;
+ pid = specific_device_id_tbl[i].idProduct;
+ flags = specific_device_id_tbl[i].flags;
+
+ if ((pdid->idVendor == vid) && (pdid->idProduct == pid) &&
+ (flags&SPEC_DEV_ID_DISABLE_HT)) {
+ rtw_ht_enable = 0;
+ rtw_cbw40_enable = 0;
+ rtw_ampdu_enable = 0;
+ }
+ }
+}
+
+int rtw_hw_suspend(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ _func_enter_;
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto error_exit;
+ }
+
+ if (padapter) { /* system suspend */
+ LeaveAllPowerSaveMode(padapter);
+
+ DBG_88E("==> rtw_hw_suspend\n");
+ _enter_pwrlock(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd(padapter, 500, false);
+
+ /* s2-2. indicate disconnect to os */
+ {
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
+ _clr_fwstate_(pmlmepriv, _FW_LINKED);
+
+ rtw_led_control(padapter, LED_CTL_NO_LINK);
+
+ rtw_os_indicate_disconnect(padapter);
+
+ /* donnot enqueue cmd */
+ rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 0);
+ }
+ }
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+ rtw_ips_dev_unload(padapter);
+ pwrpriv->rf_pwrstate = rf_off;
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+ _func_exit_;
+ return 0;
+
+error_exit:
+ DBG_88E("%s, failed\n", __func__);
+ return -1;
+}
+
+int rtw_hw_resume(struct adapter *padapter)
+{
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ struct net_device *pnetdev = padapter->pnetdev;
+
+ _func_enter_;
+
+ if (padapter) { /* system resume */
+ DBG_88E("==> rtw_hw_resume\n");
+ _enter_pwrlock(&pwrpriv->lock);
+ pwrpriv->bips_processing = true;
+ rtw_reset_drv_sw(padapter);
+
+ if (pm_netdev_open(pnetdev, false) != 0) {
+ _exit_pwrlock(&pwrpriv->lock);
+ goto error_exit;
+ }
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ if (!netif_queue_stopped(pnetdev))
+ netif_start_queue(pnetdev);
+ else
+ netif_wake_queue(pnetdev);
+
+ pwrpriv->bkeepfwalive = false;
+ pwrpriv->brfoffbyhw = false;
+
+ pwrpriv->rf_pwrstate = rf_on;
+ pwrpriv->bips_processing = false;
+
+ _exit_pwrlock(&pwrpriv->lock);
+ } else {
+ goto error_exit;
+ }
+
+ _func_exit_;
+
+ return 0;
+error_exit:
+ DBG_88E("%s, Open net dev failed\n", __func__);
+ return -1;
+}
+
+static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+ struct net_device *pnetdev = padapter->pnetdev;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+
+ int ret = 0;
+ u32 start_time = rtw_get_current_time();
+
+ _func_enter_;
+
+ DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if ((!padapter->bup) || (padapter->bDriverStopped) ||
+ (padapter->bSurpriseRemoved)) {
+ DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
+ padapter->bup, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved);
+ goto exit;
+ }
+
+ pwrpriv->bInSuspend = true;
+ rtw_cancel_all_timer(padapter);
+ LeaveAllPowerSaveMode(padapter);
+
+ _enter_pwrlock(&pwrpriv->lock);
+ /* s1. */
+ if (pnetdev) {
+ netif_carrier_off(pnetdev);
+ rtw_netif_stop_queue(pnetdev);
+ }
+
+ /* s2. */
+ rtw_disassoc_cmd(padapter, 0, false);
+
+ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) &&
+ check_fwstate(pmlmepriv, _FW_LINKED)) {
+ DBG_88E("%s:%d %s(%pM), length:%d assoc_ssid.length:%d\n",
+ __func__, __LINE__,
+ pmlmepriv->cur_network.network.Ssid.Ssid,
+ pmlmepriv->cur_network.network.MacAddress,
+ pmlmepriv->cur_network.network.Ssid.SsidLength,
+ pmlmepriv->assoc_ssid.SsidLength);
+
+ pmlmepriv->to_roaming = 1;
+ }
+ /* s2-2. indicate disconnect to os */
+ rtw_indicate_disconnect(padapter);
+ /* s2-3. */
+ rtw_free_assoc_resources(padapter, 1);
+ /* s2-4. */
+ rtw_free_network_queue(padapter, true);
+
+ rtw_dev_unload(padapter);
+ _exit_pwrlock(&pwrpriv->lock);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
+ rtw_indicate_scan_done(padapter, 1);
+
+ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
+ rtw_indicate_disconnect(padapter);
+
+exit:
+ DBG_88E("<=== %s return %d.............. in %dms\n", __func__
+ , ret, rtw_get_passing_time_ms(start_time));
+
+ _func_exit_;
+ return ret;
+}
+
+static int rtw_resume(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+ struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
+ int ret = 0;
+
+ if (pwrpriv->bInternalAutoSuspend)
+ ret = rtw_resume_process(padapter);
+ else
+ ret = rtw_resume_process(padapter);
+ return ret;
+}
+
+int rtw_resume_process(struct adapter *padapter)
+{
+ struct net_device *pnetdev;
+ struct pwrctrl_priv *pwrpriv = NULL;
+ int ret = -1;
+ u32 start_time = rtw_get_current_time();
+ _func_enter_;
+
+ DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
+
+ if (padapter) {
+ pnetdev = padapter->pnetdev;
+ pwrpriv = &padapter->pwrctrlpriv;
+ } else {
+ goto exit;
+ }
+
+ _enter_pwrlock(&pwrpriv->lock);
+ rtw_reset_drv_sw(padapter);
+ if (pwrpriv)
+ pwrpriv->bkeepfwalive = false;
+
+ DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
+ if (pm_netdev_open(pnetdev, true) != 0)
+ goto exit;
+
+ netif_device_attach(pnetdev);
+ netif_carrier_on(pnetdev);
+
+ _exit_pwrlock(&pwrpriv->lock);
+
+ if (padapter->pid[1] != 0) {
+ DBG_88E("pid[1]:%d\n", padapter->pid[1]);
+ rtw_signal_process(padapter->pid[1], SIGUSR2);
+ }
+
+ rtw_roaming(padapter, NULL);
+
+ ret = 0;
+exit:
+ if (pwrpriv)
+ pwrpriv->bInSuspend = false;
+ DBG_88E("<=== %s return %d.............. in %dms\n", __func__,
+ ret, rtw_get_passing_time_ms(start_time));
+
+ _func_exit_;
+
+ return ret;
+}
+
+/*
+ * drv_init() - a device potentially for us
+ *
+ * notes: drv_init() is called when the bus driver has located
+ * a card for us to support.
+ * We accept the new device by returning 0.
+ */
+
+static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
+ struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
+{
+ struct adapter *padapter = NULL;
+ struct net_device *pnetdev = NULL;
+ int status = _FAIL;
+
+ padapter = (struct adapter *)rtw_zvmalloc(sizeof(*padapter));
+ if (padapter == NULL)
+ goto exit;
+ padapter->dvobj = dvobj;
+ dvobj->if1 = padapter;
+
+ padapter->bDriverStopped = true;
+
+ padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
+
+ /* step 1-1., decide the chip_type via vid/pid */
+ padapter->interface_type = RTW_USB;
+ chip_by_usb_id(padapter, pdid);
+
+ if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
+ goto free_adapter;
+
+ pnetdev = rtw_init_netdev(padapter);
+ if (pnetdev == NULL)
+ goto handle_dualmac;
+ SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
+ padapter = rtw_netdev_priv(pnetdev);
+
+ /* step 2. hook HalFunc, allocate HalData */
+ hal_set_hal_ops(padapter);
+
+ padapter->intf_start = &usb_intf_start;
+ padapter->intf_stop = &usb_intf_stop;
+
+ /* step init_io_priv */
+ rtw_init_io_priv(padapter, usb_set_intf_ops);
+
+ /* step read_chip_version */
+ rtw_hal_read_chip_version(padapter);
+
+ /* step usb endpoint mapping */
+ rtw_hal_chip_configure(padapter);
+
+ /* step read efuse/eeprom data and get mac_addr */
+ rtw_hal_read_chip_info(padapter);
+
+ /* step 5. */
+ if (rtw_init_drv_sw(padapter) == _FAIL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("Initialize driver software resource Failed!\n"));
+ goto free_hal_data;
+ }
+
+#ifdef CONFIG_PM
+ if (padapter->pwrctrlpriv.bSupportRemoteWakeup) {
+ dvobj->pusbdev->do_remote_wakeup = 1;
+ pusb_intf->needs_remote_wakeup = 1;
+ device_init_wakeup(&pusb_intf->dev, 1);
+ DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~~~~\n");
+ DBG_88E("\n padapter->pwrctrlpriv.bSupportRemoteWakeup~~~[%d]~~~\n",
+ device_may_wakeup(&pusb_intf->dev));
+ }
+#endif
+
+ /* 2012-07-11 Move here to prevent the 8723AS-VAU BT auto
+ * suspend influence */
+ if (usb_autopm_get_interface(pusb_intf) < 0)
+ DBG_88E("can't get autopm:\n");
+
+ /* alloc dev name after read efuse. */
+ rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
+ rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
+#ifdef CONFIG_88EU_P2P
+ rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
+ padapter->eeprompriv.mac_addr);
+#endif
+ memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ DBG_88E("MAC Address from pnetdev->dev_addr = %pM\n",
+ pnetdev->dev_addr);
+
+ /* step 6. Tell the network stack we exist */
+ if (register_netdev(pnetdev) != 0) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("register_netdev() failed\n"));
+ goto free_hal_data;
+ }
+
+ DBG_88E("bDriverStopped:%d, bSurpriseRemoved:%d, bup:%d, hw_init_completed:%d\n"
+ , padapter->bDriverStopped
+ , padapter->bSurpriseRemoved
+ , padapter->bup
+ , padapter->hw_init_completed
+ );
+
+ status = _SUCCESS;
+
+free_hal_data:
+ if (status != _SUCCESS && padapter->HalData)
+ kfree(padapter->HalData);
+handle_dualmac:
+ if (status != _SUCCESS)
+ rtw_handle_dualmac(padapter, 0);
+free_adapter:
+ if (status != _SUCCESS) {
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+ else if (padapter)
+ rtw_vmfree((u8 *)padapter, sizeof(*padapter));
+ padapter = NULL;
+ }
+exit:
+ return padapter;
+}
+
+static void rtw_usb_if1_deinit(struct adapter *if1)
+{
+ struct net_device *pnetdev = if1->pnetdev;
+ struct mlme_priv *pmlmepriv = &if1->mlmepriv;
+
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ rtw_disassoc_cmd(if1, 0, false);
+
+#ifdef CONFIG_88EU_AP_MODE
+ free_mlme_ap_info(if1);
+#endif
+
+ if (if1->DriverState != DRIVER_DISAPPEAR) {
+ if (pnetdev) {
+ /* will call netdev_close() */
+ unregister_netdev(pnetdev);
+ rtw_proc_remove_one(pnetdev);
+ }
+ }
+ rtw_cancel_all_timer(if1);
+
+ rtw_dev_unload(if1);
+ DBG_88E("+r871xu_dev_remove, hw_init_completed=%d\n",
+ if1->hw_init_completed);
+ rtw_handle_dualmac(if1, 0);
+ rtw_free_drv_sw(if1);
+ if (pnetdev)
+ rtw_free_netdev(pnetdev);
+}
+
+static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device_id *pdid)
+{
+ struct adapter *if1 = NULL;
+ int status;
+ struct dvobj_priv *dvobj;
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_init\n"));
+
+ /* step 0. */
+ process_spec_devid(pdid);
+
+ /* Initialize dvobj_priv */
+ dvobj = usb_dvobj_init(pusb_intf);
+ if (dvobj == NULL) {
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_,
+ ("initialize device object priv Failed!\n"));
+ goto exit;
+ }
+
+ if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
+ if (if1 == NULL) {
+ DBG_88E("rtw_init_primarystruct adapter Failed!\n");
+ goto free_dvobj;
+ }
+
+ if (ui_pid[1] != 0) {
+ DBG_88E("ui_pid[1]:%d\n", ui_pid[1]);
+ rtw_signal_process(ui_pid[1], SIGUSR2);
+ }
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-871x_drv - drv_init, success!\n"));
+
+ status = _SUCCESS;
+
+ if (status != _SUCCESS && if1)
+ rtw_usb_if1_deinit(if1);
+free_dvobj:
+ if (status != _SUCCESS)
+ usb_dvobj_deinit(pusb_intf);
+exit:
+ return status == _SUCCESS ? 0 : -ENODEV;
+}
+
+/*
+ * dev_remove() - our device is being removed
+*/
+/* rmmod module & unplug(SurpriseRemoved) will call r871xu_dev_remove() => how to recognize both */
+static void rtw_dev_remove(struct usb_interface *pusb_intf)
+{
+ struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
+ struct adapter *padapter = dvobj->if1;
+
+_func_enter_;
+
+ DBG_88E("+rtw_dev_remove\n");
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+dev_remove()\n"));
+
+ if (usb_drv->drv_registered)
+ padapter->bSurpriseRemoved = true;
+
+ rtw_pm_set_ips(padapter, IPS_NONE);
+ rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
+
+ LeaveAllPowerSaveMode(padapter);
+
+ rtw_usb_if1_deinit(padapter);
+
+ usb_dvobj_deinit(pusb_intf);
+
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("-dev_remove()\n"));
+ DBG_88E("-r871xu_dev_remove, done\n");
+_func_exit_;
+
+ return;
+}
+
+static int __init rtw_drv_entry(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_entry\n"));
+
+ DBG_88E(DRV_NAME " driver version=%s\n", DRIVERVERSION);
+ DBG_88E("build time: %s %s\n", __DATE__, __TIME__);
+
+ rtw_suspend_lock_init();
+
+ _rtw_mutex_init(&usb_drv->hw_init_mutex);
+
+ usb_drv->drv_registered = true;
+ return usb_register(&usb_drv->usbdrv);
+}
+
+static void __exit rtw_drv_halt(void)
+{
+ RT_TRACE(_module_hci_intfs_c_, _drv_err_, ("+rtw_drv_halt\n"));
+ DBG_88E("+rtw_drv_halt\n");
+
+ rtw_suspend_lock_uninit();
+
+ usb_drv->drv_registered = false;
+ usb_deregister(&usb_drv->usbdrv);
+
+ _rtw_mutex_free(&usb_drv->hw_init_mutex);
+ DBG_88E("-rtw_drv_halt\n");
+}
+
+module_init(rtw_drv_entry);
+module_exit(rtw_drv_halt);
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
new file mode 100644
index 00000000000..4c71e3b93b5
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -0,0 +1,288 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ ******************************************************************************/
+#define _USB_OPS_LINUX_C_
+
+#include <drv_types.h>
+#include <usb_ops_linux.h>
+#include <rtw_sreset.h>
+
+unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
+{
+ unsigned int pipe = 0, ep_num = 0;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+ if (addr == RECV_BULK_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[0]);
+ } else if (addr == RECV_INT_IN_ADDR) {
+ pipe = usb_rcvbulkpipe(pusbd, pdvobj->RtInPipe[1]);
+ } else if (addr < HW_QUEUE_ENTRY) {
+ ep_num = pdvobj->Queue2Pipe[addr];
+ pipe = usb_sndbulkpipe(pusbd, ep_num);
+ }
+
+ return pipe;
+}
+
+struct zero_bulkout_context {
+ void *pbuf;
+ void *purb;
+ void *pirp;
+ void *padapter;
+};
+
+void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+{
+}
+
+void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+{
+}
+
+void usb_read_port_cancel(struct intf_hdl *pintfhdl)
+{
+ int i;
+ struct recv_buf *precvbuf;
+ struct adapter *padapter = pintfhdl->padapter;
+ precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+
+ DBG_88E("%s\n", __func__);
+
+ padapter->bReadPortCancel = true;
+
+ for (i = 0; i < NR_RECVBUFF; i++) {
+ precvbuf->reuse = true;
+ if (precvbuf->purb)
+ usb_kill_urb(precvbuf->purb);
+ precvbuf++;
+ }
+}
+
+static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
+{
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
+ struct adapter *padapter = pxmitbuf->padapter;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct hal_data_8188e *haldata;
+
+_func_enter_;
+
+ switch (pxmitbuf->flags) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt--;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt--;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt--;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt--;
+ break;
+ case HIGH_QUEUE_INX:
+#ifdef CONFIG_88EU_AP_MODE
+ rtw_chk_hi_queue_cmd(padapter);
+#endif
+ break;
+ default:
+ break;
+ }
+
+ if (padapter->bSurpriseRemoved || padapter->bDriverStopped ||
+ padapter->bWritePortCancel) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port_complete:bDriverStopped(%d) OR bSurpriseRemoved(%d)",
+ padapter->bDriverStopped, padapter->bSurpriseRemoved));
+ DBG_88E("%s(): TX Warning! bDriverStopped(%d) OR bSurpriseRemoved(%d) bWritePortCancel(%d) pxmitbuf->ext_tag(%x)\n",
+ __func__, padapter->bDriverStopped,
+ padapter->bSurpriseRemoved, padapter->bReadPortCancel,
+ pxmitbuf->ext_tag);
+
+ goto check_completion;
+ }
+
+ if (purb->status) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete : purb->status(%d) != 0\n", purb->status));
+ DBG_88E("###=> urb_write_port_complete status(%d)\n", purb->status);
+ if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) {
+ sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL);
+ } else if (purb->status == -EINPROGRESS) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: EINPROGESS\n"));
+ goto check_completion;
+ } else if (purb->status == -ENOENT) {
+ DBG_88E("%s: -ENOENT\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ECONNRESET) {
+ DBG_88E("%s: -ECONNRESET\n", __func__);
+ goto check_completion;
+ } else if (purb->status == -ESHUTDOWN) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete: ESHUTDOWN\n"));
+ padapter->bDriverStopped = true;
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bDriverStopped = true\n"));
+ goto check_completion;
+ } else {
+ padapter->bSurpriseRemoved = true;
+ DBG_88E("bSurpriseRemoved = true\n");
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port_complete:bSurpriseRemoved = true\n"));
+
+ goto check_completion;
+ }
+ }
+
+ haldata = GET_HAL_DATA(padapter);
+ haldata->srestpriv.last_tx_complete_time = rtw_get_current_time();
+
+check_completion:
+ rtw_sctx_done_err(&pxmitbuf->sctx,
+ purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
+ RTW_SCTX_DONE_SUCCESS);
+
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+_func_exit_;
+}
+
+u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+{
+ unsigned long irqL;
+ unsigned int pipe;
+ int status;
+ u32 ret = _FAIL;
+ struct urb *purb = NULL;
+ struct adapter *padapter = (struct adapter *)pintfhdl->padapter;
+ struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem;
+ struct xmit_frame *pxmitframe = (struct xmit_frame *)pxmitbuf->priv_data;
+ struct usb_device *pusbd = pdvobj->pusbdev;
+
+_func_enter_;
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("+usb_write_port\n"));
+
+ if ((padapter->bDriverStopped) || (padapter->bSurpriseRemoved) ||
+ (padapter->pwrctrlpriv.pnp_bstop_trx)) {
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_,
+ ("usb_write_port:( padapter->bDriverStopped ||padapter->bSurpriseRemoved ||adapter->pwrctrlpriv.pnp_bstop_trx)!!!\n"));
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_TX_DENY);
+ goto exit;
+ }
+
+ _enter_critical(&pxmitpriv->lock, &irqL);
+
+ switch (addr) {
+ case VO_QUEUE_INX:
+ pxmitpriv->voq_cnt++;
+ pxmitbuf->flags = VO_QUEUE_INX;
+ break;
+ case VI_QUEUE_INX:
+ pxmitpriv->viq_cnt++;
+ pxmitbuf->flags = VI_QUEUE_INX;
+ break;
+ case BE_QUEUE_INX:
+ pxmitpriv->beq_cnt++;
+ pxmitbuf->flags = BE_QUEUE_INX;
+ break;
+ case BK_QUEUE_INX:
+ pxmitpriv->bkq_cnt++;
+ pxmitbuf->flags = BK_QUEUE_INX;
+ break;
+ case HIGH_QUEUE_INX:
+ pxmitbuf->flags = HIGH_QUEUE_INX;
+ break;
+ default:
+ pxmitbuf->flags = MGT_QUEUE_INX;
+ break;
+ }
+
+ _exit_critical(&pxmitpriv->lock, &irqL);
+
+ purb = pxmitbuf->pxmit_urb[0];
+
+ /* translate DMA FIFO addr to pipehandle */
+ pipe = ffaddr2pipehdl(pdvobj, addr);
+
+ usb_fill_bulk_urb(purb, pusbd, pipe,
+ pxmitframe->buf_addr, /* pxmitbuf->pbuf */
+ cnt,
+ usb_write_port_complete,
+ pxmitbuf);/* context is pxmitbuf */
+
+ status = usb_submit_urb(purb, GFP_ATOMIC);
+ if (!status) {
+ struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
+
+ haldata->srestpriv.last_tx_time = rtw_get_current_time();
+ } else {
+ rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
+ DBG_88E("usb_write_port, status =%d\n", status);
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("usb_write_port(): usb_submit_urb, status =%x\n", status));
+
+ switch (status) {
+ case -ENODEV:
+ padapter->bDriverStopped = true;
+ break;
+ default:
+ break;
+ }
+ goto exit;
+ }
+
+ ret = _SUCCESS;
+
+/* We add the URB_ZERO_PACKET flag to urb so that the host will send the zero packet automatically. */
+
+ RT_TRACE(_module_hci_ops_os_c_, _drv_err_, ("-usb_write_port\n"));
+
+exit:
+ if (ret != _SUCCESS)
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+_func_exit_;
+ return ret;
+}
+
+void usb_write_port_cancel(struct intf_hdl *pintfhdl)
+{
+ int i, j;
+ struct adapter *padapter = pintfhdl->padapter;
+ struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
+
+ DBG_88E("%s\n", __func__);
+
+ padapter->bWritePortCancel = true;
+
+ for (i = 0; i < NR_XMITBUFF; i++) {
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ pxmitbuf++;
+ }
+
+ pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmit_extbuf;
+ for (i = 0; i < NR_XMIT_EXTBUFF; i++) {
+ for (j = 0; j < 8; j++) {
+ if (pxmitbuf->pxmit_urb[j])
+ usb_kill_urb(pxmitbuf->pxmit_urb[j]);
+ }
+ pxmitbuf++;
+ }
+}
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
new file mode 100644
index 00000000000..2e586c063ab
--- /dev/null
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -0,0 +1,290 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ *
+ ******************************************************************************/
+#define _XMIT_OSDEP_C_
+
+#include <linux/version.h>
+#include <osdep_service.h>
+#include <drv_types.h>
+
+#include <if_ether.h>
+#include <ip.h>
+#include <wifi.h>
+#include <mlme_osdep.h>
+#include <xmit_osdep.h>
+#include <osdep_intf.h>
+#include <usb_osintf.h>
+
+uint rtw_remainder_len(struct pkt_file *pfile)
+{
+ return pfile->buf_len - ((size_t)(pfile->cur_addr) -
+ (size_t)(pfile->buf_start));
+}
+
+void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
+{
+_func_enter_;
+
+ pfile->pkt = pktptr;
+ pfile->cur_addr = pktptr->data;
+ pfile->buf_start = pktptr->data;
+ pfile->pkt_len = pktptr->len;
+ pfile->buf_len = pktptr->len;
+
+ pfile->cur_buffer = pfile->buf_start;
+
+_func_exit_;
+}
+
+uint _rtw_pktfile_read (struct pkt_file *pfile, u8 *rmem, uint rlen)
+{
+ uint len = 0;
+
+_func_enter_;
+
+ len = rtw_remainder_len(pfile);
+ len = (rlen > len) ? len : rlen;
+
+ if (rmem)
+ skb_copy_bits(pfile->pkt, pfile->buf_len-pfile->pkt_len, rmem, len);
+
+ pfile->cur_addr += len;
+ pfile->pkt_len -= len;
+
+_func_exit_;
+
+ return len;
+}
+
+int rtw_endofpktfile(struct pkt_file *pfile)
+{
+_func_enter_;
+
+ if (pfile->pkt_len == 0) {
+ _func_exit_;
+ return true;
+ }
+
+_func_exit_;
+
+ return false;
+}
+
+void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib)
+{
+}
+
+int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
+{
+ int i;
+
+ pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
+ if (pxmitbuf->pallocated_buf == NULL)
+ return _FAIL;
+
+ pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitbuf->pallocated_buf), XMITBUF_ALIGN_SZ);
+ pxmitbuf->dma_transfer_addr = 0;
+
+ for (i = 0; i < 8; i++) {
+ pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
+ if (pxmitbuf->pxmit_urb[i] == NULL) {
+ DBG_88E("pxmitbuf->pxmit_urb[i]==NULL");
+ return _FAIL;
+ }
+ }
+ return _SUCCESS;
+}
+
+void rtw_os_xmit_resource_free(struct adapter *padapter,
+ struct xmit_buf *pxmitbuf, u32 free_sz)
+{
+ int i;
+
+ for (i = 0; i < 8; i++)
+ usb_free_urb(pxmitbuf->pxmit_urb[i]);
+
+ kfree(pxmitbuf->pallocated_buf);
+}
+
+#define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
+
+void rtw_os_pkt_complete(struct adapter *padapter, struct sk_buff *pkt)
+{
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
+ u16 queue;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue) &&
+ (pxmitpriv->hwxmits[queue].accnt < WMM_XMIT_THRESHOLD))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (__netif_subqueue_stopped(padapter->pnetdev, queue))
+ netif_wake_subqueue(padapter->pnetdev, queue);
+ }
+#else
+ if (netif_queue_stopped(padapter->pnetdev))
+ netif_wake_queue(padapter->pnetdev);
+#endif
+
+ dev_kfree_skb_any(pkt);
+}
+
+void rtw_os_xmit_complete(struct adapter *padapter, struct xmit_frame *pxframe)
+{
+ if (pxframe->pkt)
+ rtw_os_pkt_complete(padapter, pxframe->pkt);
+ pxframe->pkt = NULL;
+}
+
+void rtw_os_xmit_schedule(struct adapter *padapter)
+{
+ unsigned long irql;
+ struct xmit_priv *pxmitpriv;
+
+ if (!padapter)
+ return;
+
+ pxmitpriv = &padapter->xmitpriv;
+
+ _enter_critical_bh(&pxmitpriv->lock, &irql);
+
+ if (rtw_txframes_pending(padapter))
+ tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
+
+ _exit_critical_bh(&pxmitpriv->lock, &irql);
+}
+
+static void rtw_check_xmit_resource(struct adapter *padapter, struct sk_buff *pkt)
+{
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ u16 queue;
+
+ queue = skb_get_queue_mapping(pkt);
+ if (padapter->registrypriv.wifi_spec) {
+ /* No free space for Tx, tx_worker is too slow */
+ if (pxmitpriv->hwxmits[queue].accnt > WMM_XMIT_THRESHOLD)
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ } else {
+ if (pxmitpriv->free_xmitframe_cnt <= 4) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(padapter->pnetdev, queue)))
+ netif_stop_subqueue(padapter->pnetdev, queue);
+ }
+ }
+}
+
+static int rtw_mlcst2unicst(struct adapter *padapter, struct sk_buff *skb)
+{
+ struct sta_priv *pstapriv = &padapter->stapriv;
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ unsigned long irql;
+ struct list_head *phead, *plist;
+ struct sk_buff *newskb;
+ struct sta_info *psta = NULL;
+ s32 res;
+
+ _enter_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ phead = &pstapriv->asoc_list;
+ plist = get_next(phead);
+
+ /* free sta asoc_queue */
+ while (!rtw_end_of_queue_search(phead, plist)) {
+ psta = LIST_CONTAINOR(plist, struct sta_info, asoc_list);
+
+ plist = get_next(plist);
+
+ /* avoid come from STA1 and send back STA1 */
+ if (!memcmp(psta->hwaddr, &skb->data[6], 6))
+ continue;
+
+ newskb = skb_copy(skb, GFP_ATOMIC);
+
+ if (newskb) {
+ memcpy(newskb->data, psta->hwaddr, 6);
+ res = rtw_xmit(padapter, &newskb);
+ if (res < 0) {
+ DBG_88E("%s()-%d: rtw_xmit() return error!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(newskb);
+ } else {
+ pxmitpriv->tx_pkts++;
+ }
+ } else {
+ DBG_88E("%s-%d: skb_copy() failed!\n", __func__, __LINE__);
+ pxmitpriv->tx_drop++;
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ return false; /* Caller shall tx this multicast frame via normal way. */
+ }
+ }
+
+ _exit_critical_bh(&pstapriv->asoc_list_lock, &irql);
+ dev_kfree_skb_any(skb);
+ return true;
+}
+
+
+int rtw_xmit_entry(struct sk_buff *pkt, struct net_device *pnetdev)
+{
+ struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
+ struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
+ s32 res = 0;
+
+_func_enter_;
+
+ RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("+xmit_enry\n"));
+
+ if (rtw_if_up(padapter) == false) {
+ RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("rtw_xmit_entry: rtw_if_up fail\n"));
+ goto drop_packet;
+ }
+
+ rtw_check_xmit_resource(padapter, pkt);
+
+ if (!rtw_mc2u_disable && check_fwstate(pmlmepriv, WIFI_AP_STATE) &&
+ (IP_MCAST_MAC(pkt->data) || ICMPV6_MCAST_MAC(pkt->data)) &&
+ (padapter->registrypriv.wifi_spec == 0)) {
+ if (pxmitpriv->free_xmitframe_cnt > (NR_XMITFRAME/4)) {
+ res = rtw_mlcst2unicst(padapter, pkt);
+ if (res)
+ goto exit;
+ }
+ }
+
+ res = rtw_xmit(padapter, &pkt);
+ if (res < 0)
+ goto drop_packet;
+
+ pxmitpriv->tx_pkts++;
+ RT_TRACE(_module_xmit_osdep_c_, _drv_info_, ("rtw_xmit_entry: tx_pkts=%d\n", (u32)pxmitpriv->tx_pkts));
+ goto exit;
+
+drop_packet:
+ pxmitpriv->tx_drop++;
+ dev_kfree_skb_any(pkt);
+ RT_TRACE(_module_xmit_osdep_c_, _drv_notice_, ("rtw_xmit_entry: drop, tx_drop=%d\n", (u32)pxmitpriv->tx_drop));
+
+exit:
+
+_func_exit_;
+
+ return 0;
+}
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 50c7bb77398..74fbd70d583 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index b9b3b52f912..dbe0e1c8705 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index baf3b6342e4..fa5603a562c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index fa607f98b17..7d075d3cbe6 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 2b6c61c5d3d..e0684435555 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -94,6 +94,7 @@ MODULE_DEVICE_TABLE(pci, rtl8192_pci_id_tbl);
static int rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void rtl8192_pci_disconnect(struct pci_dev *pdev);
+static irqreturn_t rtl8192_interrupt(int irq, void *netdev);
static struct pci_driver rtl8192_pci_driver = {
.name = DRV_NAME, /* Driver name */
@@ -1324,7 +1325,7 @@ static short rtl8192_init(struct net_device *dev)
(unsigned long)dev);
rtl8192_irq_disable(dev);
- if (request_irq(dev->irq, (void *)rtl8192_interrupt_rsl, IRQF_SHARED,
+ if (request_irq(dev->irq, rtl8192_interrupt, IRQF_SHARED,
dev->name, dev)) {
printk(KERN_ERR "Error allocating IRQ %d", dev->irq);
return -1;
@@ -2704,7 +2705,7 @@ out:
}
-irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs)
+irqreturn_t rtl8192_interrupt(int irq, void *netdev)
{
struct net_device *dev = (struct net_device *) netdev;
struct r8192_priv *priv = (struct r8192_priv *)rtllib_priv(dev);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index 87d4d349c89..b015bf61cf0 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
@@ -88,10 +88,6 @@
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID , \
.driver_data = (kernel_ulong_t)&(cfg)
-#define irqreturn_type irqreturn_t
-
-#define rtl8192_interrupt(x, y, z) rtl8192_interrupt_rsl(x, y)
-
#define RTL_MAX_SCAN_SIZE 128
#define RTL_RATE_MAX 30
@@ -1044,8 +1040,6 @@ void rtl8192_set_chan(struct net_device *dev, short ch);
void check_rfctrl_gpio_timer(unsigned long data);
void rtl8192_hw_wakeup_wq(void *data);
-irqreturn_type rtl8192_interrupt(int irq, void *netdev, struct pt_regs *regs);
-
short rtl8192_pci_initdescring(struct net_device *dev);
void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index c1ccff4a832..a6778e0853c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 9452e1683a7..adea2b4c7a4 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 0cfb3ecaade..529ea54d168 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index 5abbee37cdc..2ad92eee50c 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 28c7da677a8..356aec43796 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c9a7c563b68..a8c2ade4f43 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index df79d6c4ca0..962f2e5b8bf 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index 3485ef1dfab..05ef49f24cd 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
index 2bfc1155f50..c59f67b2636 100644
--- a/drivers/staging/rtl8192e/rtllib_debug.h
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -2,7 +2,7 @@
* Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
*
* Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index 84ea721d5d8..51d46e04d3f 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -233,7 +233,8 @@ static const struct file_operations fops = {
.open = open_debug_level,
.read = seq_read,
.llseek = seq_lseek,
- .write = write_debug_level
+ .write = write_debug_level,
+ .release = single_release,
};
int __init rtllib_init(void)
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index e75364e3eb4..8aeaed5a987 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
@@ -777,6 +777,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE);
+ if (!sub_skb)
+ return 0;
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, skb->len);
memcpy(data_ptr, skb->data, skb->len);
@@ -825,6 +827,8 @@ static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb,
/* Allocate new skb for releasing to upper layer */
sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+ if (!sub_skb)
+ return 0;
skb_reserve(sub_skb, 12);
data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length);
memcpy(data_ptr, skb->data, nSubframe_Length);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index aefffac556a..0cbf6f5593a 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 740cf85e9d5..e6af8cfab12 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 759d7c7d78e..1cc6a9d5e8a 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8192u/authors b/drivers/staging/rtl8192u/authors
index b08bbae39e7..0fab11228b4 100644
--- a/drivers/staging/rtl8192u/authors
+++ b/drivers/staging/rtl8192u/authors
@@ -1 +1 @@
-Andrea Merello <andreamrl@tiscali.it>
+Andrea Merello <andrea.merello@gmail.com>
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211.h b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
index c9f3bb363be..bc64f05a7e6 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211.h
@@ -14,7 +14,7 @@
* Copyright (c) 2004, Intel Corporation
*
* Modified for Realtek's wi-fi cards by Andrea Merello
- * <andreamrl@tiscali.it>
+ * <andrea.merello@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
index e0870c05a5e..434c4312718 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_module.c
@@ -268,7 +268,8 @@ static const struct file_operations fops = {
.open = open_debug_level,
.read = seq_read,
.llseek = seq_lseek,
- .write = write_debug_level
+ .write = write_debug_level,
+ .release = single_release,
};
int __init ieee80211_debug_init(void)
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index a6b18409103..59900bfa1c1 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -14,7 +14,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
index 8a0075db925..5fd696926ee 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
index 60746b8b1eb..7b7d929f153 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_softmac_wx.c
@@ -1,5 +1,5 @@
/* IEEE 802.11 SoftMAC layer
- * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ * Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
*
* Mostly extracted from the rtl8180-sa2400 driver for the
* in-kernel generic ieee802.11 stack.
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
index 995504207fc..a7bcc64ff22 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c
@@ -25,7 +25,7 @@
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
- Andrea Merello <andreamrl@tiscali.it>
+ Andrea Merello <andrea.merello@gmail.com>
A special thanks goes to Realtek for their support !
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.c b/drivers/staging/rtl8192u/r8180_93cx6.c
index d2199986d13..c61729b727e 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.c
+++ b/drivers/staging/rtl8192u/r8180_93cx6.c
@@ -3,7 +3,7 @@
memory is addressed by 16 bits words.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8192u/r8180_93cx6.h b/drivers/staging/rtl8192u/r8180_93cx6.h
index 5cea51e1142..ee55dbffe32 100644
--- a/drivers/staging/rtl8192u/r8180_93cx6.h
+++ b/drivers/staging/rtl8192u/r8180_93cx6.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8187 OpenSource driver
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8192u/r8180_pm.c b/drivers/staging/rtl8192u/r8180_pm.c
index 0c58d0ed502..999968d4172 100644
--- a/drivers/staging/rtl8192u/r8180_pm.c
+++ b/drivers/staging/rtl8192u/r8180_pm.c
@@ -5,7 +5,7 @@
does not do anything useful.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
*/
diff --git a/drivers/staging/rtl8192u/r8180_pm.h b/drivers/staging/rtl8192u/r8180_pm.h
index 52d6fba99de..4be63da0b78 100644
--- a/drivers/staging/rtl8192u/r8180_pm.h
+++ b/drivers/staging/rtl8192u/r8180_pm.h
@@ -5,7 +5,7 @@
does not do anything useful.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
*/
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.h b/drivers/staging/rtl8192u/r8190_rtl8256.h
index b64dd662761..592e7807fa4 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.h
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.h
@@ -1,7 +1,7 @@
/*
This is part of the rtl8180-sa2400 driver
released under the GPL (See file COPYING for details).
- Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it>
+ Copyright (c) 2005 Andrea Merello <andrea.merello@gmail.com>
This files contains programming code for the rtl8256
radio frontend.
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 338e7bc237c..b484ee128c1 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1,6 +1,6 @@
/*
* This is part of rtl8187 OpenSource driver.
- * Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ * Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
* Released under the terms of GPL (General Public Licence)
*
* Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 14c14c24ac5..cd0946db025 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -3,7 +3,7 @@
* Linux device driver for RTL8192U
*
* Based on the r8187 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * Copyright 2004-2005 Andrea Merello <andrea.merello@gmail.com>, et al.
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
diff --git a/drivers/staging/rtl8192u/r8192U_hw.h b/drivers/staging/rtl8192u/r8192U_hw.h
index 7e612aa56fa..dd07a735b53 100644
--- a/drivers/staging/rtl8192u/r8192U_hw.h
+++ b/drivers/staging/rtl8192u/r8192U_hw.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8187 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index 3e2576347d2..61f6620213e 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -2,7 +2,7 @@
This file contains wireless extension handlers.
This is part of rtl8180 OpenSource driver.
- Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004-2005 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part
diff --git a/drivers/staging/rtl8192u/r8192U_wx.h b/drivers/staging/rtl8192u/r8192U_wx.h
index 9f6b1050542..ae7a617740a 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.h
+++ b/drivers/staging/rtl8192u/r8192U_wx.h
@@ -1,6 +1,6 @@
/*
This is part of rtl8180 OpenSource driver - v 0.3
- Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it>
+ Copyright (C) Andrea Merello 2004 <andrea.merello@gmail.com>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part of the official realtek driver
diff --git a/drivers/staging/rtl8192u/r819xU_cmdpkt.c b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
index 6810766edfc..5bc361b16d4 100644
--- a/drivers/staging/rtl8192u/r819xU_cmdpkt.c
+++ b/drivers/staging/rtl8192u/r819xU_cmdpkt.c
@@ -1,81 +1,61 @@
/******************************************************************************
-
- (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
-
- Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File)
-
- Note: The module is responsible for handling TX and RX command packet.
- 1. TX : Send set and query configuration command packet.
- 2. RX : Receive tx feedback, beacon state, query configuration
- command packet.
-
- Function:
-
- Export:
-
- Abbrev:
-
- History:
- Data Who Remark
-
- 05/06/2008 amy Create initial version porting from windows driver.
-
-******************************************************************************/
+ *
+ * (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved.
+ *
+ * Module: r819xusb_cmdpkt.c
+ * (RTL8190 TX/RX command packet handler Source C File)
+ *
+ * Note: The module is responsible for handling TX and RX command packet.
+ * 1. TX : Send set and query configuration command packet.
+ * 2. RX : Receive tx feedback, beacon state, query configuration
+ * command packet.
+ *
+ * Function:
+ *
+ * Export:
+ *
+ * Abbrev:
+ *
+ * History:
+ *
+ * Date Who Remark
+ * 05/06/2008 amy Create initial version porting from
+ * windows driver.
+ *
+ ******************************************************************************/
#include "r8192U.h"
#include "r819xU_cmdpkt.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Debug constant*/
-#define CMPK_DEBOUNCE_CNT 1
-/* 2007/10/24 MH Add for printing a range of data. */
-#define CMPK_PRINT(Address)\
-{\
- unsigned char i;\
- u32 temp[10];\
- \
- memcpy(temp, Address, 40);\
- for (i = 0; i <40; i+=4)\
- printk("\r\n %08x", temp[i]);\
-}\
-/*---------------------------Define functions---------------------------------*/
-
-rt_status
-SendTxCommandPacket(
- struct net_device *dev,
- void *pData,
- u32 DataLen
- )
+
+rt_status SendTxCommandPacket(struct net_device *dev, void *pData, u32 DataLen)
{
rt_status rtStatus = RT_STATUS_SUCCESS;
struct r8192_priv *priv = ieee80211_priv(dev);
struct sk_buff *skb;
cb_desc *tcb_desc;
unsigned char *ptr_buf;
- //bool bLastInitPacket = false;
- //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK);
-
- //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ)
+ /* Get TCB and local buffer from common pool.
+ (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) */
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4);
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL;
tcb_desc->bLastIniPkt = 0;
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
ptr_buf = skb_put(skb, DataLen);
- memcpy(ptr_buf,pData,DataLen);
- tcb_desc->txbuf_size= (u16)DataLen;
-
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n");
- skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
- } else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
- }
+ memcpy(ptr_buf, pData, DataLen);
+ tcb_desc->txbuf_size = (u16)DataLen;
+
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "=== NULL packet ======> tx full!\n");
+ skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
+ } else {
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
+ }
- //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK);
return rtStatus;
}
@@ -83,27 +63,25 @@ SendTxCommandPacket(
* Function: cmpk_message_handle_tx()
*
* Overview: Driver internal module can call the API to send message to
- * firmware side. For example, you can send a debug command packet.
- * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
- * Otherwise, you can change MAC/PHT/RF register by firmware at
- * run time. We do not support message more than one segment now.
+ * firmware side. For example, you can send a debug command packet.
+ * Or you can send a request for FW to modify RLX4181 LBUS HW bank.
+ * Otherwise, you can change MAC/PHT/RF register by firmware at
+ * run time. We do not support message more than one segment now.
*
- * Input: NONE
+ * Input: NONE
*
- * Output: NONE
+ * Output: NONE
*
- * Return: NONE
+ * Return: NONE
*
* Revised History:
* When Who Remark
* 05/06/2008 amy porting from windows code.
*
*---------------------------------------------------------------------------*/
- extern rt_status cmpk_message_handle_tx(
- struct net_device *dev,
- u8 *codevirtualaddress,
- u32 packettype,
- u32 buffer_len)
+extern rt_status cmpk_message_handle_tx(struct net_device *dev,
+ u8 *codevirtualaddress,
+ u32 packettype, u32 buffer_len)
{
bool rt_status = true;
@@ -113,8 +91,6 @@ SendTxCommandPacket(
struct r8192_priv *priv = ieee80211_priv(dev);
u16 frag_threshold;
u16 frag_length, frag_offset = 0;
- //u16 total_size;
- //int i;
rt_firmware *pfirmware = priv->pFirmware;
struct sk_buff *skb;
@@ -123,11 +99,11 @@ SendTxCommandPacket(
u8 bLastIniPkt;
firmware_init_param(dev);
- //Fragmentation might be required
+ /* Fragmentation might be required */
frag_threshold = pfirmware->cmdpacket_frag_thresold;
do {
if ((buffer_len - frag_offset) > frag_threshold) {
- frag_length = frag_threshold ;
+ frag_length = frag_threshold;
bLastIniPkt = 0;
} else {
@@ -136,146 +112,127 @@ SendTxCommandPacket(
}
- /* Allocate skb buffer to contain firmware info and tx descriptor info
- * add 4 to avoid packet appending overflow.
- * */
- #ifdef RTL8192U
+ /* Allocate skb buffer to contain firmware info and tx
+ descriptor info add 4 to avoid packet appending overflow. */
+#ifdef RTL8192U
skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4);
- #else
+#else
skb = dev_alloc_skb(frag_length + 4);
- #endif
- memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev));
+#endif
+ memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->queue_index = TXCMD_QUEUE;
tcb_desc->bCmdOrInit = packettype;
tcb_desc->bLastIniPkt = bLastIniPkt;
- #ifdef RTL8192U
+#ifdef RTL8192U
skb_reserve(skb, USB_HWDESC_HEADER_LEN);
- #endif
+#endif
seg_ptr = skb_put(skb, buffer_len);
/*
* Transform from little endian to big endian
* and pending zero
*/
- memcpy(seg_ptr,codevirtualaddress,buffer_len);
- tcb_desc->txbuf_size= (u16)buffer_len;
+ memcpy(seg_ptr, codevirtualaddress, buffer_len);
+ tcb_desc->txbuf_size = (u16)buffer_len;
- if (!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)||
- (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\
- (priv->ieee80211->queue_stop) ) {
- RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n");
+ if (!priv->ieee80211->check_nic_enough_desc(dev, tcb_desc->queue_index) ||
+ (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index])) ||
+ (priv->ieee80211->queue_stop)) {
+ RT_TRACE(COMP_FIRMWARE, "======> tx full!\n");
skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb);
} else {
- priv->ieee80211->softmac_hard_start_xmit(skb,dev);
+ priv->ieee80211->softmac_hard_start_xmit(skb, dev);
}
codevirtualaddress += frag_length;
frag_offset += frag_length;
- }while(frag_offset < buffer_len);
+ } while (frag_offset < buffer_len);
return rt_status;
#endif
-} /* CMPK_Message_Handle_Tx */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_counttxstatistic()
*
* Overview:
*
- * Input: PADAPTER pAdapter - .
- * CMPK_TXFB_T *psTx_FB - .
+ * Input: PADAPTER pAdapter
+ * CMPK_TXFB_T *psTx_FB
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_count_txstatistic(
- struct net_device *dev,
- cmpk_txfb_t *pstx_fb)
+static void cmpk_count_txstatistic(struct net_device *dev, cmpk_txfb_t *pstx_fb)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
RT_RF_POWER_STATE rtState;
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
#ifdef TODO
if (pAdapter->bInHctTest)
return;
#endif
- /* We can not know the packet length and transmit type: broadcast or uni
- or multicast. So the relative statistics must be collected in tx
- feedback info. */
- if (pstx_fb->tok)
- {
+ /* We can not know the packet length and transmit type:
+ broadcast or uni or multicast. So the relative statistics
+ must be collected in tx feedback info. */
+ if (pstx_fb->tok) {
priv->stats.txfeedbackok++;
priv->stats.txoktotal++;
priv->stats.txokbytestotal += pstx_fb->pkt_length;
priv->stats.txokinperiod++;
/* We can not make sure broadcast/multicast or unicast mode. */
- if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
+ if (pstx_fb->pkt_type == PACKET_MULTICAST) {
priv->stats.txmulticast++;
priv->stats.txbytesmulticast += pstx_fb->pkt_length;
- }
- else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
+ } else if (pstx_fb->pkt_type == PACKET_BROADCAST) {
priv->stats.txbroadcast++;
priv->stats.txbytesbroadcast += pstx_fb->pkt_length;
- }
- else
- {
+ } else {
priv->stats.txunicast++;
priv->stats.txbytesunicast += pstx_fb->pkt_length;
}
- }
- else
- {
+ } else {
priv->stats.txfeedbackfail++;
priv->stats.txerrtotal++;
priv->stats.txerrbytestotal += pstx_fb->pkt_length;
/* We can not make sure broadcast/multicast or unicast mode. */
if (pstx_fb->pkt_type == PACKET_MULTICAST)
- {
priv->stats.txerrmulticast++;
- }
else if (pstx_fb->pkt_type == PACKET_BROADCAST)
- {
priv->stats.txerrbroadcast++;
- }
else
- {
priv->stats.txerrunicast++;
- }
}
priv->stats.txretrycount += pstx_fb->retry_cnt;
priv->stats.txfeedbackretry += pstx_fb->retry_cnt;
-} /* cmpk_CountTxStatistic */
+}
@@ -283,80 +240,63 @@ cmpk_count_txstatistic(
* Function: cmpk_handle_tx_feedback()
*
* Overview: The function is responsible for extract the message inside TX
- * feedbck message from firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "TX Feedback Element". We have to read 20 bytes
- * in the command packet.
+ * feedbck message from firmware. It will contain dedicated info in
+ * ws-06-0063-rtl8190-command-packet-specification.
+ * Please refer to chapter "TX Feedback Element".
+ * We have to read 20 bytes in the command packet.
*
- * Input: struct net_device * dev
- * u8 * pmsg - Msg Ptr of the command packet.
+ * Input: struct net_device *dev
+ * u8 *pmsg - Msg Ptr of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/08/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/08/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_feedback(
- struct net_device *dev,
- u8 *pmsg)
+static void cmpk_handle_tx_feedback(struct net_device *dev, u8 *pmsg)
{
struct r8192_priv *priv = ieee80211_priv(dev);
- cmpk_txfb_t rx_tx_fb; /* */
+ cmpk_txfb_t rx_tx_fb;
priv->stats.txfeedback++;
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- /* 2007/07/05 MH Use pointer to transfer structure memory. */
- //memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T));
+ /* Use pointer to transfer structure memory. */
memcpy((u8 *)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_txstatistic(dev, &rx_tx_fb);
- /* 2007/01/17 MH Comment previous method for TX statistic function. */
+ /* Comment previous method for TX statistic function. */
/* Collect info TX feedback packet to fill TCB. */
/* We can not know the packet length and transmit type: broadcast or uni
or multicast. */
- //CountTxStatistics( pAdapter, &tcb );
-} /* cmpk_Handle_Tx_Feedback */
+}
-void
-cmdpkt_beacontimerinterrupt_819xusb(
- struct net_device *dev
-)
+void cmdpkt_beacontimerinterrupt_819xusb(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u16 tx_rate;
- {
- //
- // 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn.
- //
- if (priv->ieee80211->current_network.mode == IEEE_A ||
+ /* 87B have to S/W beacon for DTM encryption_cmn. */
+ if (priv->ieee80211->current_network.mode == IEEE_A ||
priv->ieee80211->current_network.mode == IEEE_N_5G ||
- (priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK)))
- {
+ (priv->ieee80211->current_network.mode == IEEE_N_24G &&
+ (!priv->ieee80211->pHTInfo->bCurSuppCCK))) {
tx_rate = 60;
DMESG("send beacon frame tx rate is 6Mbpm\n");
- }
- else
- {
- tx_rate =10;
+ } else {
+ tx_rate = 10;
DMESG("send beacon frame tx rate is 1Mbpm\n");
}
- rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon
+ rtl819xusb_beacon_tx(dev, tx_rate); /* HW Beacon */
- }
}
@@ -367,151 +307,129 @@ cmdpkt_beacontimerinterrupt_819xusb(
* Function: cmpk_handle_interrupt_status()
*
* Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
- * Please refer to chapter "Interrupt Status Element".
+ * firmware. It will contain dedicated info in
+ * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc.
+ * Please refer to chapter "Interrupt Status Element".
*
- * Input: struct net_device *dev,
- * u8* pmsg - Message Pointer of the command packet.
+ * Input: struct net_device *dev
+ * u8 *pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Add this for rtl8192 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Add this for rtl8192 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_interrupt_status(
- struct net_device *dev,
- u8 *pmsg)
+static void cmpk_handle_interrupt_status(struct net_device *dev, u8 *pmsg)
{
cmpk_intr_sta_t rx_intr_status; /* */
struct r8192_priv *priv = ieee80211_priv(dev);
DMESG("---> cmpk_Handle_Interrupt_Status()\n");
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
-
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_bcn_state.Element_ID = pMsg[0];
- //rx_bcn_state.Length = pMsg[1];
rx_intr_status.length = pmsg[1];
- if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2))
- {
+ if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) {
DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n");
return;
}
- // Statistics of beacon for ad-hoc mode.
- if ( priv->ieee80211->iw_mode == IW_MODE_ADHOC)
- {
- //2 maybe need endian transform?
+ /* Statistics of beacon for ad-hoc mode. */
+ if (priv->ieee80211->iw_mode == IW_MODE_ADHOC) {
+ /* 2 maybe need endian transform? */
rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4));
- //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4)));
- DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status);
+ DMESG("interrupt status = 0x%x\n",
+ rx_intr_status.interrupt_status);
- if (rx_intr_status.interrupt_status & ISR_TxBcnOk)
- {
+ if (rx_intr_status.interrupt_status & ISR_TxBcnOk) {
priv->ieee80211->bibsscoordinator = true;
priv->stats.txbeaconokint++;
- }
- else if (rx_intr_status.interrupt_status & ISR_TxBcnErr)
- {
+ } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) {
priv->ieee80211->bibsscoordinator = false;
priv->stats.txbeaconerr++;
}
if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr)
- {
cmdpkt_beacontimerinterrupt_819xusb(dev);
- }
}
- // Other informations in interrupt status we need?
+ /* Other informations in interrupt status we need? */
DMESG("<---- cmpk_handle_interrupt_status()\n");
-} /* cmpk_handle_interrupt_status */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_handle_query_config_rx()
*
* Overview: The function is responsible for extract the message from
- * firmware. It will contain dedicated info in
- * ws-06-0063-rtl8190-command-packet-specification. Please
- * refer to chapter "Beacon State Element".
+ * firmware. It will contain dedicated info in
+ * ws-06-0063-rtl8190-command-packet-specification. Please
+ * refer to chapter "Beacon State Element".
*
- * Input: u8 * pmsg - Message Pointer of the command packet.
+ * Input: u8 *pmsg - Message Pointer of the command packet.
*
* Output: NONE
*
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_query_config_rx(
- struct net_device *dev,
- u8 *pmsg)
+static void cmpk_handle_query_config_rx(struct net_device *dev, u8 *pmsg)
{
- cmpk_query_cfg_t rx_query_cfg; /* */
+ cmpk_query_cfg_t rx_query_cfg;
- /* 0. Display received message. */
- //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg);
/* 1. Extract TX feedback info from RFD to temp structure buffer. */
/* It seems that FW use big endian(MIPS) and DRV use little endian in
windows OS. So we have to read the content byte by byte or transfer
endian type before copy the message copy. */
- //rx_query_cfg.Element_ID = pMsg[0];
- //rx_query_cfg.Length = pMsg[1];
- rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31;
+ rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000) >> 31;
rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5;
rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3;
rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0;
- rx_query_cfg.cfg_offset = pmsg[7];
- rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
- (pmsg[10] << 8) | (pmsg[11] << 0);
- rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
- (pmsg[14] << 8) | (pmsg[15] << 0);
+ rx_query_cfg.cfg_offset = pmsg[7];
+ rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) |
+ (pmsg[10] << 8) | (pmsg[11] << 0);
+ rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) |
+ (pmsg[14] << 8) | (pmsg[15] << 0);
-} /* cmpk_Handle_Query_Config_Rx */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_count_tx_status()
*
* Overview: Count aggregated tx status from firmwar of one type rx command
- * packet element id = RX_TX_STATUS.
+ * packet element id = RX_TX_STATUS.
*
- * Input: NONE
+ * Input: NONE
*
- * Output: NONE
+ * Output: NONE
*
- * Return: NONE
+ * Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void cmpk_count_tx_status( struct net_device *dev,
- cmpk_tx_status_t *pstx_status)
+static void cmpk_count_tx_status(struct net_device *dev,
+ cmpk_tx_status_t *pstx_status)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -519,15 +437,14 @@ static void cmpk_count_tx_status( struct net_device *dev,
RT_RF_POWER_STATE rtstate;
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
priv->stats.txfeedbackok += pstx_status->txok;
@@ -536,15 +453,12 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txfeedbackfail += pstx_status->txfail;
priv->stats.txerrtotal += pstx_status->txfail;
- priv->stats.txretrycount += pstx_status->txretry;
+ priv->stats.txretrycount += pstx_status->txretry;
priv->stats.txfeedbackretry += pstx_status->txretry;
- //pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length;
- //pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length;
- //pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++;
- priv->stats.txmulticast += pstx_status->txmcok;
- priv->stats.txbroadcast += pstx_status->txbcok;
+ priv->stats.txmulticast += pstx_status->txmcok;
+ priv->stats.txbroadcast += pstx_status->txbcok;
priv->stats.txunicast += pstx_status->txucok;
priv->stats.txerrmulticast += pstx_status->txmcfail;
@@ -553,10 +467,10 @@ static void cmpk_count_tx_status( struct net_device *dev,
priv->stats.txbytesmulticast += pstx_status->txmclength;
priv->stats.txbytesbroadcast += pstx_status->txbclength;
- priv->stats.txbytesunicast += pstx_status->txuclength;
+ priv->stats.txbytesunicast += pstx_status->txuclength;
- priv->stats.last_packet_rate = pstx_status->rate;
-} /* cmpk_CountTxStatus */
+ priv->stats.last_packet_rate = pstx_status->rate;
+}
@@ -564,7 +478,7 @@ static void cmpk_count_tx_status( struct net_device *dev,
* Function: cmpk_handle_tx_status()
*
* Overview: Firmware add a new tx feedback status to reduce rx command
- * packet buffer operation load.
+ * packet buffer operation load.
*
* Input: NONE
*
@@ -573,22 +487,19 @@ static void cmpk_count_tx_status( struct net_device *dev,
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_status(
- struct net_device *dev,
- u8 *pmsg)
+static void cmpk_handle_tx_status(struct net_device *dev, u8 *pmsg)
{
- cmpk_tx_status_t rx_tx_sts; /* */
+ cmpk_tx_status_t rx_tx_sts;
memcpy((void *)&rx_tx_sts, (void *)pmsg, sizeof(cmpk_tx_status_t));
/* 2. Use tx feedback info to count TX statistics. */
cmpk_count_tx_status(dev, &rx_tx_sts);
-} /* cmpk_Handle_Tx_Status */
+}
/*-----------------------------------------------------------------------------
@@ -603,82 +514,71 @@ cmpk_handle_tx_status(
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/12/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/12/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-static void
-cmpk_handle_tx_rate_history(
- struct net_device *dev,
- u8 *pmsg)
+static void cmpk_handle_tx_rate_history(struct net_device *dev, u8 *pmsg)
{
cmpk_tx_rahis_t *ptxrate;
-// RT_RF_POWER_STATE rtState;
- u8 i, j;
- u16 length = sizeof(cmpk_tx_rahis_t);
- u32 *ptemp;
+ u8 i, j;
+ u16 length = sizeof(cmpk_tx_rahis_t);
+ u32 *ptemp;
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_PS
- pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState));
+ pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE,
+ (pu1Byte)(&rtState));
- // When RF is off, we should not count the packet for hw/sw synchronize
- // reason, ie. there may be a duration while sw switch is changed and hw
- // switch is being changed. 2006.12.04, by shien chang.
+ /* When RF is off, we should not count the packet for hw/sw synchronize
+ reason, ie. there may be a duration while sw switch is changed and
+ hw switch is being changed. */
if (rtState == eRfOff)
- {
return;
- }
#endif
ptemp = (u32 *)pmsg;
- //
- // Do endian transfer to word alignment(16 bits) for windows system.
- // You must do different endian transfer for linux and MAC OS
- //
- for (i = 0; i < (length/4); i++)
- {
+ /* Do endian transfer to word alignment(16 bits) for windows system.
+ You must do different endian transfer for linux and MAC OS */
+ for (i = 0; i < (length/4); i++) {
u16 temp1, temp2;
- temp1 = ptemp[i]&0x0000FFFF;
- temp2 = ptemp[i]>>16;
- ptemp[i] = (temp1<<16)|temp2;
+ temp1 = ptemp[i] & 0x0000FFFF;
+ temp2 = ptemp[i] >> 16;
+ ptemp[i] = (temp1 << 16) | temp2;
}
ptxrate = (cmpk_tx_rahis_t *)pmsg;
- if (ptxrate == NULL )
- {
+ if (ptxrate == NULL)
return;
- }
- for (i = 0; i < 16; i++)
- {
- // Collect CCK rate packet num
+ for (i = 0; i < 16; i++) {
+ /* Collect CCK rate packet num */
if (i < 4)
priv->stats.txrate.cck[i] += ptxrate->cck[i];
- // Collect OFDM rate packet num
- if (i< 8)
+ /* Collect OFDM rate packet num */
+ if (i < 8)
priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i];
for (j = 0; j < 4; j++)
priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i];
}
-} /* cmpk_Handle_Tx_Rate_History */
+}
/*-----------------------------------------------------------------------------
* Function: cmpk_message_handle_rx()
*
* Overview: In the function, we will capture different RX command packet
- * info. Every RX command packet element has different message
- * length and meaning in content. We only support three type of RX
- * command packet now. Please refer to document
- * ws-06-0063-rtl8190-command-packet-specification.
+ * info. Every RX command packet element has different message
+ * length and meaning in content. We only support three type of RX
+ * command packet now. Please refer to document
+ * ws-06-0063-rtl8190-command-packet-specification.
*
* Input: NONE
*
@@ -687,30 +587,22 @@ cmpk_handle_tx_rate_history(
* Return: NONE
*
* Revised History:
- * When Who Remark
- * 05/06/2008 amy Create Version 0 porting from windows code.
+ * When Who Remark
+ * 05/06/2008 amy Create Version 0 porting from windows code.
*
*---------------------------------------------------------------------------*/
-extern u32
-cmpk_message_handle_rx(
- struct net_device *dev,
- struct ieee80211_rx_stats *pstats)
+extern u32 cmpk_message_handle_rx(struct net_device *dev,
+ struct ieee80211_rx_stats *pstats)
{
-// u32 debug_level = DBG_LOUD;
int total_length;
u8 cmd_length, exe_cnt = 0;
u8 element_id;
u8 *pcmd_buff;
- /* 0. Check inpt arguments. If is is a command queue message or pointer is
- null. */
- if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL))
- {
- /* Print error message. */
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->Err queue id or pointer"));*/
+ /* 0. Check inpt arguments. If is is a command queue message or
+ pointer is null. */
+ if (pstats == NULL)
return 0; /* This is not a command packet. */
- }
/* 1. Read received command packet message length from RFD. */
total_length = pstats->Length;
@@ -720,67 +612,58 @@ cmpk_message_handle_rx(
/* 3. Read command packet element id and length. */
element_id = pcmd_buff[0];
- /*RT_TRACE(COMP_SEND, DebugLevel,
- ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/
/* 4. Check every received command packet content according to different
- element type. Because FW may aggregate RX command packet to minimize
- transmit time between DRV and FW.*/
- // Add a counter to prevent the lock in the loop from being held too long
- while (total_length > 0 && exe_cnt++ < 100)
- {
- /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */
+ element type. Because FW may aggregate RX command packet to
+ minimize transmit time between DRV and FW.*/
+ /* Add a counter to prevent the lock in the loop from being held too
+ long */
+ while (total_length > 0 && exe_cnt++ < 100) {
+ /* We support aggregation of different cmd in the same packet */
element_id = pcmd_buff[0];
- switch (element_id)
- {
- case RX_TX_FEEDBACK:
- cmpk_handle_tx_feedback (dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_INTERRUPT_STATUS:
- cmpk_handle_interrupt_status(dev, pcmd_buff);
- cmd_length = sizeof(cmpk_intr_sta_t);
- break;
-
- case BOTH_QUERY_CONFIG:
- cmpk_handle_query_config_rx(dev, pcmd_buff);
- cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
- break;
-
- case RX_TX_STATUS:
- cmpk_handle_tx_status(dev, pcmd_buff);
- cmd_length = CMPK_RX_TX_STS_SIZE;
- break;
-
- case RX_TX_PER_PKT_FEEDBACK:
- // You must at lease add a switch case element here,
- // Otherwise, we will jump to default case.
- //DbgPrint("CCX Test\r\n");
- cmd_length = CMPK_RX_TX_FB_SIZE;
- break;
-
- case RX_TX_RATE_HISTORY:
- //DbgPrint(" rx tx rate history\r\n");
- cmpk_handle_tx_rate_history(dev, pcmd_buff);
- cmd_length = CMPK_TX_RAHIS_SIZE;
- break;
-
- default:
-
- RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknow CMD Element\n");
- return 1; /* This is a command packet. */
+ switch (element_id) {
+ case RX_TX_FEEDBACK:
+ cmpk_handle_tx_feedback(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+
+ case RX_INTERRUPT_STATUS:
+ cmpk_handle_interrupt_status(dev, pcmd_buff);
+ cmd_length = sizeof(cmpk_intr_sta_t);
+ break;
+
+ case BOTH_QUERY_CONFIG:
+ cmpk_handle_query_config_rx(dev, pcmd_buff);
+ cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE;
+ break;
+
+ case RX_TX_STATUS:
+ cmpk_handle_tx_status(dev, pcmd_buff);
+ cmd_length = CMPK_RX_TX_STS_SIZE;
+ break;
+
+ case RX_TX_PER_PKT_FEEDBACK:
+ /* You must at lease add a switch case element here,
+ Otherwise, we will jump to default case. */
+ cmd_length = CMPK_RX_TX_FB_SIZE;
+ break;
+
+ case RX_TX_RATE_HISTORY:
+ cmpk_handle_tx_rate_history(dev, pcmd_buff);
+ cmd_length = CMPK_TX_RAHIS_SIZE;
+ break;
+
+ default:
+
+ RT_TRACE(COMP_ERR, "---->%s():unknown CMD Element\n",
+ __func__);
+ return 1; /* This is a command packet. */
}
- // 2007/01/22 MH Display received rx command packet info.
- //cmpk_Display_Message(cmd_length, pcmd_buff);
-
- // 2007/01/22 MH Add to display tx statistic.
- //cmpk_DisplayTxStatistic(pAdapter);
total_length -= cmd_length;
pcmd_buff += cmd_length;
- } /* while (total_length > 0) */
+ }
return 1; /* This is a command packet. */
-} /* CMPK_Message_Handle_Rx */
+}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index f034567122d..d58aa7e3b15 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -1000,12 +1000,8 @@ static int r871x_wx_set_priv(struct net_device *dev,
sprintf(ext, "LINKSPEED %d", mbps);
} else if (0 == strcasecmp(ext, "MACADDR")) {
/*Return mac address of the station */
- /*Macaddr = xx.xx.xx.xx.xx.xx */
- sprintf(ext,
- "MACADDR = %02x.%02x.%02x.%02x.%02x.%02x",
- *(dev->dev_addr), *(dev->dev_addr+1),
- *(dev->dev_addr+2), *(dev->dev_addr+3),
- *(dev->dev_addr+4), *(dev->dev_addr+5));
+ /* Macaddr = xx:xx:xx:xx:xx:xx */
+ sprintf(ext, "MACADDR = %pM", dev->dev_addr);
} else if (0 == strcasecmp(ext, "SCAN-ACTIVE")) {
/*Set scan type to active */
/*OK if successful */
diff --git a/drivers/staging/silicom/bpctl_mod.c b/drivers/staging/silicom/bpctl_mod.c
index 48b9fb110ac..495272d0134 100644
--- a/drivers/staging/silicom/bpctl_mod.c
+++ b/drivers/staging/silicom/bpctl_mod.c
@@ -59,7 +59,7 @@ struct bypass_pfs_sd {
struct proc_dir_entry *bypass_entry;
};
-typedef struct _bpctl_dev {
+struct bpctl_dev {
char *name;
char *desc;
struct pci_dev *pdev; /* PCI device */
@@ -102,26 +102,26 @@ typedef struct _bpctl_dev {
char *bp_tx_data;
struct bypass_pfs_sd bypass_pfs_set;
-} bpctl_dev_t;
+};
-static bpctl_dev_t *bpctl_dev_arr;
+static struct bpctl_dev *bpctl_dev_arr;
static struct semaphore bpctl_sema;
static int device_num;
static int get_dev_idx(int ifindex);
-static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev);
-static int disc_status(bpctl_dev_t *pbpctl_dev);
-static int bypass_status(bpctl_dev_t *pbpctl_dev);
-static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left);
-static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev);
+static struct bpctl_dev *get_master_port_fn(struct bpctl_dev *pbpctl_dev);
+static int disc_status(struct bpctl_dev *pbpctl_dev);
+static int bypass_status(struct bpctl_dev *pbpctl_dev);
+static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left);
+static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev);
static void if_scan_init(void);
-int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block);
-int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block);
+int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block);
+int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block);
int bp_proc_create(void);
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
int get_dev_idx_bsf(int bus, int slot, int func);
static unsigned long str_to_hex(char *p);
@@ -129,7 +129,7 @@ static int bp_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- static bpctl_dev_t *pbpctl_dev, *pbpctl_dev_m;
+ static struct bpctl_dev *pbpctl_dev, *pbpctl_dev_m;
int dev_num = 0, ret = 0, ret_d = 0, time_left = 0;
/* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */
/* return NOTIFY_DONE; */
@@ -284,17 +284,17 @@ static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
-int wdt_time_left(bpctl_dev_t *pbpctl_dev);
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev);
+int wdt_time_left(struct bpctl_dev *pbpctl_dev);
-static void write_pulse(bpctl_dev_t *pbpctl_dev,
+static void write_pulse(struct bpctl_dev *pbpctl_dev,
unsigned int ctrl_ext,
unsigned char value, unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
@@ -590,13 +590,13 @@ static void write_pulse(bpctl_dev_t *pbpctl_dev,
}
}
-static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
+static int read_pulse(struct bpctl_dev *pbpctl_dev, unsigned int ctrl_ext,
unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
@@ -765,11 +765,11 @@ static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
return ctrl_val;
}
-static void write_reg(bpctl_dev_t *pbpctl_dev, unsigned char value,
+static void write_reg(struct bpctl_dev *pbpctl_dev, unsigned char value,
unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
unsigned long flags;
if (pbpctl_dev->bp_10g9) {
pbpctl_dev_c = get_status_port_fn(pbpctl_dev);
@@ -934,15 +934,15 @@ static void write_reg(bpctl_dev_t *pbpctl_dev, unsigned char value,
}
-static void write_data(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static void write_data(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
write_reg(pbpctl_dev, value, CMND_REG_ADDR);
}
-static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
+static int read_reg(struct bpctl_dev *pbpctl_dev, unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0, ctrl_value = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
@@ -1208,10 +1208,10 @@ static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
return ctrl_value;
}
-static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
+static int wdt_pulse(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
@@ -1424,7 +1424,7 @@ static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
return 0;
}
-static void data_pulse(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static void data_pulse(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
uint32_t ctrl_ext = 0;
@@ -1490,7 +1490,7 @@ static void data_pulse(bpctl_dev_t *pbpctl_dev, unsigned char value)
}
-static int send_wdt_pulse(bpctl_dev_t *pbpctl_dev)
+static int send_wdt_pulse(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1524,7 +1524,7 @@ static int send_wdt_pulse(bpctl_dev_t *pbpctl_dev)
return 0;
}
-void send_bypass_clear_pulse(bpctl_dev_t *pbpctl_dev, unsigned int value)
+void send_bypass_clear_pulse(struct bpctl_dev *pbpctl_dev, unsigned int value)
{
uint32_t ctrl_ext = 0;
@@ -1550,7 +1550,7 @@ void send_bypass_clear_pulse(bpctl_dev_t *pbpctl_dev, unsigned int value)
/* #endif OLD_FW */
#ifdef BYPASS_DEBUG
-int pulse_set_fn(bpctl_dev_t *pbpctl_dev, unsigned int counter)
+int pulse_set_fn(struct bpctl_dev *pbpctl_dev, unsigned int counter)
{
uint32_t ctrl_ext = 0;
@@ -1578,7 +1578,7 @@ int pulse_set_fn(bpctl_dev_t *pbpctl_dev, unsigned int counter)
return 0;
}
-int zero_set_fn(bpctl_dev_t *pbpctl_dev)
+int zero_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1603,7 +1603,7 @@ int zero_set_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int pulse_get2_fn(bpctl_dev_t *pbpctl_dev)
+int pulse_get2_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1618,7 +1618,7 @@ int pulse_get2_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int pulse_get1_fn(bpctl_dev_t *pbpctl_dev)
+int pulse_get1_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
@@ -1635,7 +1635,7 @@ int pulse_get1_fn(bpctl_dev_t *pbpctl_dev)
return ctrl_value;
}
-int gpio6_set_fn(bpctl_dev_t *pbpctl_dev)
+int gpio6_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1646,7 +1646,7 @@ int gpio6_set_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio7_set_fn(bpctl_dev_t *pbpctl_dev)
+int gpio7_set_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1657,7 +1657,7 @@ int gpio7_set_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio7_clear_fn(bpctl_dev_t *pbpctl_dev)
+int gpio7_clear_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1668,7 +1668,7 @@ int gpio7_clear_fn(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
+int gpio6_clear_fn(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
@@ -1680,9 +1680,9 @@ int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
}
#endif /*BYPASS_DEBUG */
-static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
+static struct bpctl_dev *lookup_port(struct bpctl_dev *dev)
{
- bpctl_dev_t *p;
+ struct bpctl_dev *p;
int n;
for (n = 0, p = bpctl_dev_arr; n < device_num && p->pdev; n++) {
if (p->bus == dev->bus
@@ -1693,7 +1693,7 @@ static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
return NULL;
}
-static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
+static struct bpctl_dev *get_status_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 0 || pbpctl_dev->func == 2)
@@ -1702,7 +1702,7 @@ static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
return NULL;
}
-static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
+static struct bpctl_dev *get_master_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 1 || pbpctl_dev->func == 3)
@@ -1715,7 +1715,7 @@ static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
/**************INTEL API***************/
/**************************************/
-static void write_data_port_int(bpctl_dev_t *pbpctl_dev,
+static void write_data_port_int(struct bpctl_dev *pbpctl_dev,
unsigned char ctrl_value)
{
uint32_t value;
@@ -1740,9 +1740,9 @@ static void write_data_port_int(bpctl_dev_t *pbpctl_dev,
}
-static int write_data_int(bpctl_dev_t *pbpctl_dev, unsigned char value)
+static int write_data_int(struct bpctl_dev *pbpctl_dev, unsigned char value)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -1755,7 +1755,7 @@ static int write_data_int(bpctl_dev_t *pbpctl_dev, unsigned char value)
return 0;
}
-static int wdt_pulse_int(bpctl_dev_t *pbpctl_dev)
+static int wdt_pulse_int(struct bpctl_dev *pbpctl_dev)
{
if ((atomic_read(&pbpctl_dev->wdt_busy)) == 1)
@@ -1779,7 +1779,7 @@ static int wdt_pulse_int(bpctl_dev_t *pbpctl_dev)
/*************************************/
/* CMND_ON 0x4 (100)*/
-int cmnd_on(bpctl_dev_t *pbpctl_dev)
+int cmnd_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1796,7 +1796,7 @@ int cmnd_on(bpctl_dev_t *pbpctl_dev)
}
/* CMND_OFF 0x2 (10)*/
-int cmnd_off(bpctl_dev_t *pbpctl_dev)
+int cmnd_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1809,12 +1809,12 @@ int cmnd_off(bpctl_dev_t *pbpctl_dev)
else
data_pulse(pbpctl_dev, CMND_OFF);
ret = 0;
- };
+ }
return ret;
}
/* BYPASS_ON (0xa)*/
-int bypass_on(bpctl_dev_t *pbpctl_dev)
+int bypass_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1830,12 +1830,12 @@ int bypass_on(bpctl_dev_t *pbpctl_dev)
} else
data_pulse(pbpctl_dev, BYPASS_ON);
ret = 0;
- };
+ }
return ret;
}
/* BYPASS_OFF (0x8 111)*/
-int bypass_off(bpctl_dev_t *pbpctl_dev)
+int bypass_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -1858,7 +1858,7 @@ int bypass_off(bpctl_dev_t *pbpctl_dev)
}
/* TAP_OFF (0x9)*/
-int tap_off(bpctl_dev_t *pbpctl_dev)
+int tap_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1866,12 +1866,12 @@ int tap_off(bpctl_dev_t *pbpctl_dev)
write_data(pbpctl_dev, TAP_OFF);
msec_delay_bp(LATCH_DELAY);
ret = 0;
- };
+ }
return ret;
}
/* TAP_ON (0xb)*/
-int tap_on(bpctl_dev_t *pbpctl_dev)
+int tap_on(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
@@ -1879,12 +1879,12 @@ int tap_on(bpctl_dev_t *pbpctl_dev)
write_data(pbpctl_dev, TAP_ON);
msec_delay_bp(LATCH_DELAY);
ret = 0;
- };
+ }
return ret;
}
/* DISC_OFF (0x9)*/
-int disc_off(bpctl_dev_t *pbpctl_dev)
+int disc_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1896,7 +1896,7 @@ int disc_off(bpctl_dev_t *pbpctl_dev)
}
/* DISC_ON (0xb)*/
-int disc_on(bpctl_dev_t *pbpctl_dev)
+int disc_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
@@ -1908,10 +1908,10 @@ int disc_on(bpctl_dev_t *pbpctl_dev)
}
/* DISC_PORT_ON */
-int disc_port_on(bpctl_dev_t *pbpctl_dev)
+int disc_port_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -1933,10 +1933,10 @@ int disc_port_on(bpctl_dev_t *pbpctl_dev)
}
/* DISC_PORT_OFF */
-int disc_port_off(bpctl_dev_t *pbpctl_dev)
+int disc_port_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -1958,10 +1958,10 @@ int disc_port_off(bpctl_dev_t *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_EN (0xe)*/
-int tpl_hw_on(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_on(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -1986,10 +1986,10 @@ int tpl_hw_on(bpctl_dev_t *pbpctl_dev)
}
/*TWO_PORT_LINK_HW_DIS (0xc)*/
-int tpl_hw_off(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_off(struct bpctl_dev *pbpctl_dev)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -2012,7 +2012,7 @@ int tpl_hw_off(bpctl_dev_t *pbpctl_dev)
}
/* WDT_OFF (0x6 110)*/
-int wdt_off(bpctl_dev_t *pbpctl_dev)
+int wdt_off(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2025,7 +2025,7 @@ int wdt_off(bpctl_dev_t *pbpctl_dev)
data_pulse(pbpctl_dev, WDT_OFF);
pbpctl_dev->wdt_status = WDT_STATUS_DIS;
ret = 0;
- };
+ }
return ret;
}
@@ -2035,7 +2035,7 @@ int wdt_off(bpctl_dev_t *pbpctl_dev)
static unsigned int
wdt_val_array[] = { 1000, 1500, 2000, 3000, 4000, 8000, 16000, 32000, 0 };
-int wdt_on(bpctl_dev_t *pbpctl_dev, unsigned int timeout)
+int wdt_on(struct bpctl_dev *pbpctl_dev, unsigned int timeout)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2087,7 +2087,7 @@ int wdt_on(bpctl_dev_t *pbpctl_dev, unsigned int timeout)
return BP_NOT_CAP;
}
-void bp75_put_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
+void bp75_put_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
@@ -2098,7 +2098,7 @@ void bp75_put_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm);
}
-s32 bp75_get_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
+s32 bp75_get_hw_semaphore_generic(struct bpctl_dev *pbpctl_dev)
{
u32 swsm;
s32 ret_val = 0;
@@ -2146,7 +2146,7 @@ s32 bp75_get_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
return ret_val;
}
-static void bp75_release_phy(bpctl_dev_t *pbpctl_dev)
+static void bp75_release_phy(struct bpctl_dev *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
@@ -2166,7 +2166,7 @@ static void bp75_release_phy(bpctl_dev_t *pbpctl_dev)
bp75_put_hw_semaphore_generic(pbpctl_dev);
}
-static s32 bp75_acquire_phy(bpctl_dev_t *pbpctl_dev)
+static s32 bp75_acquire_phy(struct bpctl_dev *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
@@ -2212,7 +2212,7 @@ static s32 bp75_acquire_phy(bpctl_dev_t *pbpctl_dev)
return ret_val;
}
-s32 bp75_read_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
+s32 bp75_read_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2245,7 +2245,7 @@ s32 bp75_read_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-s32 bp75_write_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
+s32 bp75_write_phy_reg_mdic(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
@@ -2278,7 +2278,7 @@ s32 bp75_write_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
return ret_val;
}
-static s32 bp75_read_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
+static s32 bp75_read_phy_reg(struct bpctl_dev *pbpctl_dev, u32 offset, u16 *data)
{
s32 ret_val = 0;
@@ -2304,7 +2304,7 @@ static s32 bp75_read_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
return ret_val;
}
-static s32 bp75_write_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
+static s32 bp75_write_phy_reg(struct bpctl_dev *pbpctl_dev, u32 offset, u16 data)
{
s32 ret_val = 0;
@@ -2332,10 +2332,10 @@ static s32 bp75_write_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
}
/* SET_TX (non-Bypass command :)) */
-static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
+static int set_tx(struct bpctl_dev *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
@@ -2532,7 +2532,7 @@ static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
}
/* SET_FORCE_LINK (non-Bypass command :)) */
-static int set_bp_force_link(bpctl_dev_t *pbpctl_dev, int tx_state)
+static int set_bp_force_link(struct bpctl_dev *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
@@ -2556,7 +2556,7 @@ static int set_bp_force_link(bpctl_dev_t *pbpctl_dev, int tx_state)
}
/*RESET_CONT 0x20 */
-int reset_cont(bpctl_dev_t *pbpctl_dev)
+int reset_cont(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -2568,12 +2568,12 @@ int reset_cont(bpctl_dev_t *pbpctl_dev)
else
data_pulse(pbpctl_dev, RESET_CONT);
ret = 0;
- };
+ }
return ret;
}
/*DIS_BYPASS_CAP 0x22 */
-int dis_bypass_cap(bpctl_dev_t *pbpctl_dev)
+int dis_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -2592,7 +2592,7 @@ int dis_bypass_cap(bpctl_dev_t *pbpctl_dev)
}
/*EN_BYPASS_CAP 0x24 */
-int en_bypass_cap(bpctl_dev_t *pbpctl_dev)
+int en_bypass_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -2608,7 +2608,7 @@ int en_bypass_cap(bpctl_dev_t *pbpctl_dev)
}
/* BYPASS_STATE_PWRON 0x26*/
-int bypass_state_pwron(bpctl_dev_t *pbpctl_dev)
+int bypass_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
@@ -2622,7 +2622,7 @@ int bypass_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/* NORMAL_STATE_PWRON 0x28*/
-int normal_state_pwron(bpctl_dev_t *pbpctl_dev)
+int normal_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP)
|| (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)) {
@@ -2637,7 +2637,7 @@ int normal_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/* BYPASS_STATE_PWROFF 0x27*/
-int bypass_state_pwroff(bpctl_dev_t *pbpctl_dev)
+int bypass_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWROFF);
@@ -2648,7 +2648,7 @@ int bypass_state_pwroff(bpctl_dev_t *pbpctl_dev)
}
/* NORMAL_STATE_PWROFF 0x29*/
-int normal_state_pwroff(bpctl_dev_t *pbpctl_dev)
+int normal_state_pwroff(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWROFF);
@@ -2659,7 +2659,7 @@ int normal_state_pwroff(bpctl_dev_t *pbpctl_dev)
}
/*TAP_STATE_PWRON 0x2a*/
-int tap_state_pwron(bpctl_dev_t *pbpctl_dev)
+int tap_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
@@ -2670,7 +2670,7 @@ int tap_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*DIS_TAP_CAP 0x2c*/
-int dis_tap_cap(bpctl_dev_t *pbpctl_dev)
+int dis_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
@@ -2681,7 +2681,7 @@ int dis_tap_cap(bpctl_dev_t *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_tap_cap(bpctl_dev_t *pbpctl_dev)
+int en_tap_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
@@ -2692,7 +2692,7 @@ int en_tap_cap(bpctl_dev_t *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_state_pwron(bpctl_dev_t *pbpctl_dev)
+int disc_state_pwron(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2705,7 +2705,7 @@ int disc_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*DIS_DISC_CAP 0x2c*/
-int dis_disc_cap(bpctl_dev_t *pbpctl_dev)
+int dis_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2718,10 +2718,10 @@ int dis_disc_cap(bpctl_dev_t *pbpctl_dev)
}
/*DISC_STATE_PWRON 0x2a*/
-int disc_port_state_pwron(bpctl_dev_t *pbpctl_dev)
+int disc_port_state_pwron(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
return BP_NOT_CAP;
@@ -2744,10 +2744,10 @@ int disc_port_state_pwron(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int normal_port_state_pwron(bpctl_dev_t *pbpctl_dev)
+int normal_port_state_pwron(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
return BP_NOT_CAP;
if ((is_bypass_fn(pbpctl_dev)) == 1)
@@ -2770,7 +2770,7 @@ int normal_port_state_pwron(bpctl_dev_t *pbpctl_dev)
}
/*EN_TAP_CAP 0x2e*/
-int en_disc_cap(bpctl_dev_t *pbpctl_dev)
+int en_disc_cap(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
@@ -2782,7 +2782,7 @@ int en_disc_cap(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_on(bpctl_dev_t *pbpctl_dev)
+int std_nic_on(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2836,7 +2836,7 @@ int std_nic_on(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int std_nic_off(bpctl_dev_t *pbpctl_dev)
+int std_nic_off(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
@@ -2888,7 +2888,7 @@ int std_nic_off(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_time_left(bpctl_dev_t *pbpctl_dev)
+int wdt_time_left(struct bpctl_dev *pbpctl_dev)
{
/* unsigned long curr_time=((long long)(jiffies*1000))/HZ, delta_time=0,wdt_on_time=((long long)(pbpctl_dev->bypass_wdt_on_time*1000))/HZ; */
@@ -2920,7 +2920,7 @@ int wdt_time_left(bpctl_dev_t *pbpctl_dev)
return time_left;
}
-static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left)
+static int wdt_timer(struct bpctl_dev *pbpctl_dev, int *time_left)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -2936,7 +2936,7 @@ static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left)
return ret;
}
-static int wdt_timer_reload(bpctl_dev_t *pbpctl_dev)
+static int wdt_timer_reload(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -2960,7 +2960,7 @@ static int wdt_timer_reload(bpctl_dev_t *pbpctl_dev)
static void wd_reset_timer(unsigned long param)
{
- bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
+ struct bpctl_dev *pbpctl_dev = (struct bpctl_dev *) param;
#ifdef BP_SELF_TEST
struct sk_buff *skb_tmp;
#endif
@@ -2999,7 +2999,7 @@ static void wd_reset_timer(unsigned long param)
}
/*WAIT_AT_PWRUP 0x80 */
-int bp_wait_at_pwup_en(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3014,7 +3014,7 @@ int bp_wait_at_pwup_en(bpctl_dev_t *pbpctl_dev)
}
/*DIS_WAIT_AT_PWRUP 0x81 */
-int bp_wait_at_pwup_dis(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3031,7 +3031,7 @@ int bp_wait_at_pwup_dis(bpctl_dev_t *pbpctl_dev)
/*EN_HW_RESET 0x82 */
-int bp_hw_reset_en(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_en(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3047,7 +3047,7 @@ int bp_hw_reset_en(bpctl_dev_t *pbpctl_dev)
/*DIS_HW_RESET 0x83 */
-int bp_hw_reset_dis(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_dis(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3062,7 +3062,7 @@ int bp_hw_reset_dis(bpctl_dev_t *pbpctl_dev)
}
-int wdt_exp_mode(bpctl_dev_t *pbpctl_dev, int mode)
+int wdt_exp_mode(struct bpctl_dev *pbpctl_dev, int mode)
{
uint32_t status_reg = 0, status_reg1 = 0;
@@ -3113,7 +3113,7 @@ int wdt_exp_mode(bpctl_dev_t *pbpctl_dev, int mode)
return BP_NOT_CAP;
}
-int bypass_fw_ver(bpctl_dev_t *pbpctl_dev)
+int bypass_fw_ver(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return read_reg(pbpctl_dev, VER_REG_ADDR);
@@ -3121,7 +3121,7 @@ int bypass_fw_ver(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_sign_check(bpctl_dev_t *pbpctl_dev)
+int bypass_sign_check(struct bpctl_dev *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
@@ -3131,10 +3131,10 @@ int bypass_sign_check(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int tx_status(bpctl_dev_t *pbpctl_dev)
+static int tx_status(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl = 0;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
@@ -3218,7 +3218,7 @@ static int tx_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int bp_force_link_status(bpctl_dev_t *pbpctl_dev)
+static int bp_force_link_status(struct bpctl_dev *pbpctl_dev)
{
if (DBI_IF_SERIES(pbpctl_dev->subdevice)) {
@@ -3232,10 +3232,10 @@ static int bp_force_link_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_from_last_read(bpctl_dev_t *pbpctl_dev)
+int bypass_from_last_read(struct bpctl_dev *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
@@ -3252,9 +3252,9 @@ int bypass_from_last_read(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_status_clear(bpctl_dev_t *pbpctl_dev)
+int bypass_status_clear(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
@@ -3266,7 +3266,7 @@ int bypass_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status(bpctl_dev_t *pbpctl_dev)
+int bypass_flag_status(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_CAP)) {
@@ -3279,7 +3279,7 @@ int bypass_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int bypass_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3294,7 +3294,7 @@ int bypass_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bypass_change_status(bpctl_dev_t *pbpctl_dev)
+int bypass_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3313,7 +3313,7 @@ int bypass_change_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int bypass_off_status(bpctl_dev_t *pbpctl_dev)
+int bypass_off_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
@@ -3325,12 +3325,12 @@ int bypass_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int bypass_status(bpctl_dev_t *pbpctl_dev)
+static int bypass_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & BP_CAP) {
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -3408,7 +3408,7 @@ static int bypass_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3426,7 +3426,7 @@ int default_pwron_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int default_pwroff_status(bpctl_dev_t *pbpctl_dev)
+static int default_pwroff_status(struct bpctl_dev *pbpctl_dev)
{
/*if ((!pbpctl_dev->bp_caps&BP_DIS_CAP)&&
@@ -3440,7 +3440,7 @@ static int default_pwroff_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_bypass_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_bypass_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
@@ -3453,7 +3453,7 @@ int dis_bypass_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int cmd_en_status(bpctl_dev_t *pbpctl_dev)
+int cmd_en_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3465,7 +3465,7 @@ int cmd_en_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_en_status(bpctl_dev_t *pbpctl_dev)
+int wdt_en_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3477,7 +3477,7 @@ int wdt_en_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int wdt_programmed(bpctl_dev_t *pbpctl_dev, int *timeout)
+int wdt_programmed(struct bpctl_dev *pbpctl_dev, int *timeout)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -3497,13 +3497,13 @@ int wdt_programmed(bpctl_dev_t *pbpctl_dev, int *timeout)
*timeout =
curr_wdt_status ==
0 ? 0 : pbpctl_dev->bypass_timer_interval;
- };
+ }
} else
ret = BP_NOT_CAP;
return ret;
}
-int bypass_support(bpctl_dev_t *pbpctl_dev)
+int bypass_support(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -3520,7 +3520,7 @@ int bypass_support(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int tap_support(bpctl_dev_t *pbpctl_dev)
+int tap_support(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
@@ -3536,7 +3536,7 @@ int tap_support(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int normal_support(bpctl_dev_t *pbpctl_dev)
+int normal_support(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
@@ -3548,11 +3548,11 @@ int normal_support(bpctl_dev_t *pbpctl_dev)
NORMAL_UNSUPPORT_MASK) ? 0 : 1);
} else
ret = 1;
- };
+ }
return ret;
}
-int get_bp_prod_caps(bpctl_dev_t *pbpctl_dev)
+int get_bp_prod_caps(struct bpctl_dev *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & SW_CTL_CAP) &&
(pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER))
@@ -3561,7 +3561,7 @@ int get_bp_prod_caps(bpctl_dev_t *pbpctl_dev)
}
-int tap_flag_status(bpctl_dev_t *pbpctl_dev)
+int tap_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3573,7 +3573,7 @@ int tap_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int tap_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
@@ -3587,7 +3587,7 @@ int tap_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_change_status(bpctl_dev_t *pbpctl_dev)
+int tap_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
@@ -3604,7 +3604,7 @@ int tap_change_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int tap_off_status(bpctl_dev_t *pbpctl_dev)
+int tap_off_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3614,12 +3614,12 @@ int tap_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tap_status(bpctl_dev_t *pbpctl_dev)
+int tap_status(struct bpctl_dev *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & TAP_CAP) {
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -3653,7 +3653,7 @@ int tap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_tap_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_tap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3664,7 +3664,7 @@ int default_pwron_tap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_tap_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_tap_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
@@ -3675,7 +3675,7 @@ int dis_tap_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status(bpctl_dev_t *pbpctl_dev)
+int disc_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3687,7 +3687,7 @@ int disc_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_flag_status_clear(bpctl_dev_t *pbpctl_dev)
+int disc_flag_status_clear(struct bpctl_dev *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3701,7 +3701,7 @@ int disc_flag_status_clear(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_change_status(bpctl_dev_t *pbpctl_dev)
+int disc_change_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3712,9 +3712,9 @@ int disc_change_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_off_status(bpctl_dev_t *pbpctl_dev)
+int disc_off_status(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3796,7 +3796,7 @@ int disc_off_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-static int disc_status(bpctl_dev_t *pbpctl_dev)
+static int disc_status(struct bpctl_dev *pbpctl_dev)
{
int ctrl = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
@@ -3808,7 +3808,7 @@ static int disc_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int default_pwron_disc_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_disc_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3819,7 +3819,7 @@ int default_pwron_disc_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int dis_disc_cap_status(bpctl_dev_t *pbpctl_dev)
+int dis_disc_cap_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DIS_DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3830,10 +3830,10 @@ int dis_disc_cap_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int disc_port_status(bpctl_dev_t *pbpctl_dev)
+int disc_port_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -3854,10 +3854,10 @@ int disc_port_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int default_pwron_disc_port_status(bpctl_dev_t *pbpctl_dev)
+int default_pwron_disc_port_status(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
- bpctl_dev_t *pbpctl_dev_m;
+ struct bpctl_dev *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
@@ -3878,7 +3878,7 @@ int default_pwron_disc_port_status(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int wdt_exp_mode_status(bpctl_dev_t *pbpctl_dev)
+int wdt_exp_mode_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver <= PXG2BPI_VER)
@@ -3901,7 +3901,7 @@ int wdt_exp_mode_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl2_flag_status(bpctl_dev_t *pbpctl_dev)
+int tpl2_flag_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
@@ -3912,9 +3912,9 @@ int tpl2_flag_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int tpl_hw_status(bpctl_dev_t *pbpctl_dev)
+int tpl_hw_status(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -3927,7 +3927,7 @@ int tpl_hw_status(bpctl_dev_t *pbpctl_dev)
}
-int bp_wait_at_pwup_status(bpctl_dev_t *pbpctl_dev)
+int bp_wait_at_pwup_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
@@ -3938,7 +3938,7 @@ int bp_wait_at_pwup_status(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int bp_hw_reset_status(bpctl_dev_t *pbpctl_dev)
+int bp_hw_reset_status(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
@@ -3952,7 +3952,7 @@ int bp_hw_reset_status(bpctl_dev_t *pbpctl_dev)
}
-int std_nic_status(bpctl_dev_t *pbpctl_dev)
+int std_nic_status(struct bpctl_dev *pbpctl_dev)
{
int status_val = 0;
@@ -4000,10 +4000,10 @@ int std_nic_status(bpctl_dev_t *pbpctl_dev)
/******************************************************/
/**************SW_INIT*********************************/
/******************************************************/
-void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
+void bypass_caps_init(struct bpctl_dev *pbpctl_dev)
{
u_int32_t ctrl_ext = 0;
- bpctl_dev_t *pbpctl_dev_m = NULL;
+ struct bpctl_dev *pbpctl_dev_m = NULL;
#ifdef BYPASS_DEBUG
int ret = 0;
@@ -4218,7 +4218,7 @@ void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
}
}
-int bypass_off_init(bpctl_dev_t *pbpctl_dev)
+int bypass_off_init(struct bpctl_dev *pbpctl_dev)
{
int ret = cmnd_on(pbpctl_dev);
if (ret < 0)
@@ -4234,10 +4234,10 @@ int bypass_off_init(bpctl_dev_t *pbpctl_dev)
return 0;
}
-void remove_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+void remove_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
#ifdef BP_SELF_TEST
- bpctl_dev_t *pbpctl_dev_sl = NULL;
+ struct bpctl_dev *pbpctl_dev_sl = NULL;
#endif
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -4263,7 +4263,7 @@ void remove_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
}
-int init_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+int init_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
init_timer(&pbpctl_dev->bp_timer);
@@ -4277,7 +4277,7 @@ int init_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
#ifdef BP_SELF_TEST
int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
+ struct bpctl_dev *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
int idx_dev = 0;
struct ethhdr *eth = (struct ethhdr *)skb->data;
@@ -4310,7 +4310,7 @@ int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
#endif
-int set_bypass_wd_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bypass_wd_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->reset_time != param) {
@@ -4329,7 +4329,7 @@ int set_bypass_wd_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
+int get_bypass_wd_auto(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP)
return pbpctl_dev->reset_time;
@@ -4339,9 +4339,9 @@ int get_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
#ifdef BP_SELF_TEST
-int set_bp_self_test(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bp_self_test(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
- bpctl_dev_t *pbpctl_dev_sl = NULL;
+ struct bpctl_dev *pbpctl_dev_sl = NULL;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
pbpctl_dev->bp_self_test_flag = param == 0 ? 0 : 1;
@@ -4374,7 +4374,7 @@ int set_bp_self_test(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bp_self_test(bpctl_dev_t *pbpctl_dev)
+int get_bp_self_test(struct bpctl_dev *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
@@ -4392,7 +4392,7 @@ int get_bp_self_test(bpctl_dev_t *pbpctl_dev)
/************************* API ********************************/
/**************************************************************/
-int is_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int is_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4400,7 +4400,7 @@ int is_bypass_fn(bpctl_dev_t *pbpctl_dev)
return (((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) ? 1 : 0);
}
-int set_bypass_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
@@ -4418,12 +4418,12 @@ int set_bypass_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
return bypass_status(pbpctl_dev);
}
-int get_bypass_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4431,7 +4431,7 @@ int get_bypass_change_fn(bpctl_dev_t *pbpctl_dev)
return bypass_change_status(pbpctl_dev);
}
-int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_bypass_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4450,7 +4450,7 @@ int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return ret;
}
-int get_dis_bypass_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_bypass_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4458,7 +4458,7 @@ int get_dis_bypass_fn(bpctl_dev_t *pbpctl_dev)
return dis_bypass_cap_status(pbpctl_dev);
}
-int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4477,7 +4477,7 @@ int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_pwoff_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4485,7 +4485,7 @@ int get_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev)
return default_pwroff_status(pbpctl_dev);
}
-int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
+int set_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4504,7 +4504,7 @@ int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
return ret;
}
-int get_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4512,7 +4512,7 @@ int get_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev)
return default_pwron_status(pbpctl_dev);
}
-int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
+int set_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int timeout)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4534,7 +4534,7 @@ int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
return ret;
}
-int get_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int *timeout)
+int get_bypass_wd_fn(struct bpctl_dev *pbpctl_dev, int *timeout)
{
if (!pbpctl_dev)
return -1;
@@ -4542,7 +4542,7 @@ int get_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int *timeout)
return wdt_programmed(pbpctl_dev, timeout);
}
-int get_wd_expire_time_fn(bpctl_dev_t *pbpctl_dev, int *time_left)
+int get_wd_expire_time_fn(struct bpctl_dev *pbpctl_dev, int *time_left)
{
if (!pbpctl_dev)
return -1;
@@ -4550,7 +4550,7 @@ int get_wd_expire_time_fn(bpctl_dev_t *pbpctl_dev, int *time_left)
return wdt_timer(pbpctl_dev, time_left);
}
-int reset_bypass_wd_timer_fn(bpctl_dev_t *pbpctl_dev)
+int reset_bypass_wd_timer_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4558,7 +4558,7 @@ int reset_bypass_wd_timer_fn(bpctl_dev_t *pbpctl_dev)
return wdt_timer_reload(pbpctl_dev);
}
-int get_wd_set_caps_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_set_caps_fn(struct bpctl_dev *pbpctl_dev)
{
int bp_status = 0;
@@ -4582,7 +4582,7 @@ int get_wd_set_caps_fn(bpctl_dev_t *pbpctl_dev)
return bp_status;
}
-int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
+int set_std_nic_fn(struct bpctl_dev *pbpctl_dev, int nic_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4602,7 +4602,7 @@ int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
return ret;
}
-int get_std_nic_fn(bpctl_dev_t *pbpctl_dev)
+int get_std_nic_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4610,7 +4610,7 @@ int get_std_nic_fn(bpctl_dev_t *pbpctl_dev)
return std_nic_status(pbpctl_dev);
}
-int set_tap_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_tap_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4626,7 +4626,7 @@ int set_tap_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_tap_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4634,7 +4634,7 @@ int get_tap_fn(bpctl_dev_t *pbpctl_dev)
return tap_status(pbpctl_dev);
}
-int set_tap_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_tap_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4652,7 +4652,7 @@ int set_tap_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return ret;
}
-int get_tap_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4664,7 +4664,7 @@ int get_tap_pwup_fn(bpctl_dev_t *pbpctl_dev)
return ((ret == 0) ? 1 : 0);
}
-int get_tap_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_tap_change_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4672,7 +4672,7 @@ int get_tap_change_fn(bpctl_dev_t *pbpctl_dev)
return tap_change_status(pbpctl_dev);
}
-int set_dis_tap_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_tap_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4689,7 +4689,7 @@ int set_dis_tap_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_tap_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_tap_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4697,7 +4697,7 @@ int get_dis_tap_fn(bpctl_dev_t *pbpctl_dev)
return dis_tap_cap_status(pbpctl_dev);
}
-int set_disc_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
if (!pbpctl_dev)
return -1;
@@ -4714,7 +4714,7 @@ int set_disc_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return BP_NOT_CAP;
}
-int get_disc_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4725,7 +4725,7 @@ int get_disc_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_disc_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4743,7 +4743,7 @@ int set_disc_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4753,7 +4753,7 @@ int get_disc_pwup_fn(bpctl_dev_t *pbpctl_dev)
return (ret == 0 ? 1 : (ret < 0 ? BP_NOT_CAP : 0));
}
-int get_disc_change_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_change_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4763,7 +4763,7 @@ int get_disc_change_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_dis_disc_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
+int set_dis_disc_fn(struct bpctl_dev *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4781,7 +4781,7 @@ int set_dis_disc_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
return BP_NOT_CAP;
}
-int get_dis_disc_fn(bpctl_dev_t *pbpctl_dev)
+int get_dis_disc_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4792,7 +4792,7 @@ int get_dis_disc_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_disc_port_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_port_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -4806,7 +4806,7 @@ int set_disc_port_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_port_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_port_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4814,7 +4814,7 @@ int get_disc_port_fn(bpctl_dev_t *pbpctl_dev)
return disc_port_status(pbpctl_dev);
}
-int set_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
+int set_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -4828,7 +4828,7 @@ int set_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
return ret;
}
-int get_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_disc_port_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4840,7 +4840,7 @@ int get_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev)
return ((ret == 0) ? 1 : 0);
}
-int get_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4848,7 +4848,7 @@ int get_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev)
return wdt_exp_mode_status(pbpctl_dev);
}
-int set_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_wd_exp_mode_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4856,7 +4856,7 @@ int set_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev, int param)
return wdt_exp_mode(pbpctl_dev, param);
}
-int reset_cont_fn(bpctl_dev_t *pbpctl_dev)
+int reset_cont_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -4868,10 +4868,10 @@ int reset_cont_fn(bpctl_dev_t *pbpctl_dev)
return reset_cont(pbpctl_dev);
}
-int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
+int set_tx_fn(struct bpctl_dev *pbpctl_dev, int tx_state)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -4891,7 +4891,7 @@ int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
int set_bp_force_link_fn(int dev_num, int tx_state)
{
- static bpctl_dev_t *bpctl_dev_curr;
+ static struct bpctl_dev *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
@@ -4901,7 +4901,7 @@ int set_bp_force_link_fn(int dev_num, int tx_state)
return set_bp_force_link(bpctl_dev_curr, tx_state);
}
-int set_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4909,7 +4909,7 @@ int set_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev, int param)
return set_bypass_wd_auto(pbpctl_dev, param);
}
-int get_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev)
+int get_wd_autoreset_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4918,7 +4918,7 @@ int get_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev)
}
#ifdef BP_SELF_TEST
-int set_bp_self_test_fn(bpctl_dev_t *pbpctl_dev, int param)
+int set_bp_self_test_fn(struct bpctl_dev *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
@@ -4926,7 +4926,7 @@ int set_bp_self_test_fn(bpctl_dev_t *pbpctl_dev, int param)
return set_bp_self_test(pbpctl_dev, param);
}
-int get_bp_self_test_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_self_test_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4936,7 +4936,7 @@ int get_bp_self_test_fn(bpctl_dev_t *pbpctl_dev)
#endif
-int get_bypass_caps_fn(bpctl_dev_t *pbpctl_dev)
+int get_bypass_caps_fn(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4945,7 +4945,7 @@ int get_bypass_caps_fn(bpctl_dev_t *pbpctl_dev)
}
-int get_bypass_slave_fn(bpctl_dev_t *pbpctl_dev, bpctl_dev_t **pbpctl_dev_out)
+int get_bypass_slave_fn(struct bpctl_dev *pbpctl_dev, struct bpctl_dev **pbpctl_dev_out)
{
int idx_dev = 0;
if (!pbpctl_dev)
@@ -4977,7 +4977,7 @@ int get_bypass_slave_fn(bpctl_dev_t *pbpctl_dev, bpctl_dev_t **pbpctl_dev_out)
return 0;
}
-int is_bypass(bpctl_dev_t *pbpctl_dev)
+int is_bypass(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -4988,9 +4988,9 @@ int is_bypass(bpctl_dev_t *pbpctl_dev)
return 0;
}
-int get_tx_fn(bpctl_dev_t *pbpctl_dev)
+int get_tx_fn(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -5010,7 +5010,7 @@ int get_tx_fn(bpctl_dev_t *pbpctl_dev)
int get_bp_force_link_fn(int dev_num)
{
- static bpctl_dev_t *bpctl_dev_curr;
+ static struct bpctl_dev *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
@@ -5020,7 +5020,7 @@ int get_bp_force_link_fn(int dev_num)
return bp_force_link_status(bpctl_dev_curr);
}
-static int get_bypass_link_status(bpctl_dev_t *pbpctl_dev)
+static int get_bypass_link_status(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5036,9 +5036,9 @@ static int get_bypass_link_status(bpctl_dev_t *pbpctl_dev)
static void bp_tpl_timer_fn(unsigned long param)
{
- bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
+ struct bpctl_dev *pbpctl_dev = (struct bpctl_dev *) param;
uint32_t link1, link2;
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (!pbpctl_dev_b)
@@ -5071,9 +5071,9 @@ static void bp_tpl_timer_fn(unsigned long param)
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + BP_LINK_MON_DELAY * HZ);
}
-void remove_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+void remove_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
@@ -5089,7 +5089,7 @@ void remove_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
return;
}
-int init_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+int init_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5102,7 +5102,7 @@ int init_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
+int set_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev, unsigned int param)
{
if (!pbpctl_dev)
return -1;
@@ -5111,7 +5111,7 @@ int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
pbpctl_dev->bp_tpl_flag = param;
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + 1);
return BP_OK;
- };
+ }
if ((!param) && (pbpctl_dev->bp_tpl_flag))
remove_bypass_tpl_auto(pbpctl_dev);
@@ -5120,7 +5120,7 @@ int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
return BP_NOT_CAP;
}
-int get_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
+int get_bypass_tpl_auto(struct bpctl_dev *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
@@ -5130,10 +5130,10 @@ int get_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
return BP_NOT_CAP;
}
-int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
+int set_tpl_fn(struct bpctl_dev *pbpctl_dev, int tpl_mode)
{
- bpctl_dev_t *pbpctl_dev_b = NULL;
+ struct bpctl_dev *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
@@ -5160,7 +5160,7 @@ int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
return BP_NOT_CAP;
}
-int get_tpl_fn(bpctl_dev_t *pbpctl_dev)
+int get_tpl_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
@@ -5174,7 +5174,7 @@ int get_tpl_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5194,7 +5194,7 @@ int set_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_wait_at_pwup_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5207,7 +5207,7 @@ int get_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev)
return ret;
}
-int set_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
+int set_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
@@ -5227,7 +5227,7 @@ int set_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
return BP_NOT_CAP;
}
-int get_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev)
+int get_bp_hw_reset_fn(struct bpctl_dev *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
@@ -5242,7 +5242,7 @@ int get_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev)
}
-int get_bypass_info_fn(bpctl_dev_t *pbpctl_dev, char *dev_name,
+int get_bypass_info_fn(struct bpctl_dev *pbpctl_dev, char *dev_name,
char *add_param)
{
if (!pbpctl_dev)
@@ -5313,7 +5313,7 @@ static int get_dev_idx(int ifindex)
return -1;
}
-static bpctl_dev_t *get_dev_idx_p(int ifindex)
+static struct bpctl_dev *get_dev_idx_p(int ifindex)
{
int idx_dev = 0;
@@ -5401,12 +5401,12 @@ static long device_ioctl(struct file *file, /* see include/linux/fs.h */
{
struct bpctl_cmd bpctl_cmd;
int dev_idx = 0;
- bpctl_dev_t *pbpctl_dev_out;
+ struct bpctl_dev *pbpctl_dev_out;
void __user *argp = (void __user *)ioctl_param;
int ret = 0;
unsigned long flags;
- static bpctl_dev_t *pbpctl_dev;
+ static struct bpctl_dev *pbpctl_dev;
/* lock_kernel(); */
if (down_interruptible(&bpctl_sema))
@@ -5971,7 +5971,7 @@ enum board_type {
PE210G2BPi40,
};
-typedef struct _bpmod_info_t {
+struct bpmod_info {
unsigned int vendor;
unsigned int device;
unsigned int subvendor;
@@ -5979,13 +5979,11 @@ typedef struct _bpmod_info_t {
unsigned int index;
char *bp_name;
-} bpmod_info_t;
+};
-typedef struct _dev_desc {
+struct {
char *name;
-} dev_desc_t;
-
-dev_desc_t dev_desc[] = {
+} dev_desc[] = {
{"Silicom Bypass PXG2BPFI-SD series adapter"},
{"Silicom Bypass PXG2BPFIL-SD series adapter"},
{"Silicom Bypass PXG2BPFILX-SD series adapter"},
@@ -6155,7 +6153,7 @@ dev_desc_t dev_desc[] = {
{0},
};
-static bpmod_info_t tx_ctl_pci_tbl[] = {
+static struct bpmod_info tx_ctl_pci_tbl[] = {
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFI_SSID, PXG2BPFI,
"PXG2BPFI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFIL_SSID, PXG2BPFIL,
@@ -6623,7 +6621,7 @@ static bpmod_info_t tx_ctl_pci_tbl[] = {
{0,}
};
-static void find_fw(bpctl_dev_t *dev)
+static void find_fw(struct bpctl_dev *dev)
{
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev1 = dev->pdev;
@@ -6653,7 +6651,7 @@ static void find_fw(bpctl_dev_t *dev)
printk("firmware version: 0x%x\n", dev->bp_fw_ver);
}
-static int init_one(bpctl_dev_t *dev, bpmod_info_t *info, struct pci_dev *pdev1)
+static int init_one(struct bpctl_dev *dev, struct bpmod_info *info, struct pci_dev *pdev1)
{
unsigned long mmio_start, mmio_len;
@@ -6744,7 +6742,7 @@ static int __init bypass_init_module(void)
{
int ret_val, idx, idx_dev = 0;
struct pci_dev *pdev1 = NULL;
- bpctl_dev_t *dev;
+ struct bpctl_dev *dev;
printk(BP_MOD_DESCR " v" BP_MOD_VER "\n");
ret_val = register_chrdev(major_num, DEVICE_NAME, &Fops);
@@ -6769,14 +6767,14 @@ static int __init bypass_init_module(void)
return -1;
}
- bpctl_dev_arr = kmalloc((device_num) * sizeof(bpctl_dev_t), GFP_KERNEL);
+ bpctl_dev_arr = kmalloc((device_num) * sizeof(struct bpctl_dev), GFP_KERNEL);
if (!bpctl_dev_arr) {
printk("Allocation error\n");
unregister_chrdev(major_num, DEVICE_NAME);
return -1;
}
- memset(bpctl_dev_arr, 0, ((device_num) * sizeof(bpctl_dev_t)));
+ memset(bpctl_dev_arr, 0, ((device_num) * sizeof(struct bpctl_dev)));
pdev1 = NULL;
dev = bpctl_dev_arr;
@@ -6797,7 +6795,7 @@ static int __init bypass_init_module(void)
spin_lock_init(&bpvm_lock);
{
- bpctl_dev_t *pbpctl_dev_c = NULL;
+ struct bpctl_dev *pbpctl_dev_c = NULL;
for (idx_dev = 0, dev = bpctl_dev_arr;
idx_dev < device_num && dev->pdev;
idx_dev++, dev++) {
@@ -7169,7 +7167,7 @@ EXPORT_SYMBOL(get_bypass_caps_sd);
int get_bypass_slave_sd(int ifindex)
{
- bpctl_dev_t *pbpctl_dev_out;
+ struct bpctl_dev *pbpctl_dev_out;
int ret = get_bypass_slave_fn(get_dev_idx_p(ifindex), &pbpctl_dev_out);
if (ret == 1)
return pbpctl_dev_out->ifindex;
@@ -7229,7 +7227,7 @@ int bp_proc_create(void)
}
static int procfs_add(char *proc_name, const struct file_operations *fops,
- bpctl_dev_t *dev)
+ struct bpctl_dev *dev)
{
struct bypass_pfs_sd *pfs = &dev->bypass_pfs_set;
if (!proc_create_data(proc_name, 0644, pfs->bypass_entry, fops, dev))
@@ -7264,7 +7262,7 @@ static const struct file_operations name##_ops = { \
static int show_bypass_info(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
seq_printf(m, "Name\t\t\t%s\n", dev->name);
seq_printf(m, "Firmware version\t0x%x\n", dev->bp_fw_ver);
@@ -7274,8 +7272,8 @@ RO_FOPS(bypass_info)
static int show_bypass_slave(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
- bpctl_dev_t *slave = get_status_port_fn(dev);
+ struct bpctl_dev *dev = m->private;
+ struct bpctl_dev *slave = get_status_port_fn(dev);
if (!slave)
slave = dev;
if (!slave)
@@ -7288,7 +7286,7 @@ RO_FOPS(bypass_slave)
static int show_bypass_caps(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_caps_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "-1\n");
@@ -7300,7 +7298,7 @@ RO_FOPS(bypass_caps)
static int show_wd_set_caps(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_set_caps_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "-1\n");
@@ -7346,7 +7344,7 @@ static ssize_t bypass_write(struct file *file, const char __user *buffer,
}
static int show_bypass(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7370,7 +7368,7 @@ static ssize_t tap_write(struct file *file, const char __user *buffer,
}
static int show_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7394,7 +7392,7 @@ static ssize_t disc_write(struct file *file, const char __user *buffer,
}
static int show_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7408,7 +7406,7 @@ RW_FOPS(disc)
static int show_bypass_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_change_fn(dev);
if (ret == 1)
seq_puts(m, "on\n");
@@ -7422,7 +7420,7 @@ RO_FOPS(bypass_change)
static int show_tap_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_change_fn(dev);
if (ret == 1)
seq_puts(m, "on\n");
@@ -7436,7 +7434,7 @@ RO_FOPS(tap_change)
static int show_disc_change(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_change_fn(dev);
if (ret == 1)
seq_puts(m, "on\n");
@@ -7451,7 +7449,7 @@ RO_FOPS(disc_change)
static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int timeout;
int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
@@ -7461,7 +7459,7 @@ static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
}
static int show_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = 0, timeout = 0;
ret = get_bypass_wd_fn(dev, &timeout);
@@ -7479,7 +7477,7 @@ RW_FOPS(bypass_wd)
static int show_wd_expire_time(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = 0, timeout = 0;
ret = get_wd_expire_time_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
@@ -7497,7 +7495,7 @@ RO_FOPS(wd_expire_time)
static ssize_t tpl_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7507,7 +7505,7 @@ static ssize_t tpl_write(struct file *file, const char __user *buffer,
}
static int show_tpl(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tpl_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7523,7 +7521,7 @@ RW_FOPS(tpl)
static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7533,7 +7531,7 @@ static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
}
static int show_wait_at_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bp_wait_at_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7548,7 +7546,7 @@ RW_FOPS(wait_at_pwup)
static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
- bpctl_dev_t *dev = PDE_DATA(file_inode(file));
+ struct bpctl_dev *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
@@ -7558,7 +7556,7 @@ static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
}
static int show_hw_reset(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bp_hw_reset_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7574,7 +7572,7 @@ RW_FOPS(hw_reset)
static int show_reset_bypass_wd(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = reset_bypass_wd_timer_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7598,7 +7596,7 @@ static ssize_t dis_bypass_write(struct file *file, const char __user *buffer,
}
static int show_dis_bypass(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_bypass_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7622,7 +7620,7 @@ static ssize_t dis_tap_write(struct file *file, const char __user *buffer,
}
static int show_dis_tap(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_tap_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7646,7 +7644,7 @@ static ssize_t dis_disc_write(struct file *file, const char __user *buffer,
}
static int show_dis_disc(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_dis_disc_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7670,7 +7668,7 @@ static ssize_t bypass_pwup_write(struct file *file, const char __user *buffer,
}
static int show_bypass_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7694,7 +7692,7 @@ static ssize_t bypass_pwoff_write(struct file *file, const char __user *buffer,
}
static int show_bypass_pwoff(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_bypass_pwoff_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7718,7 +7716,7 @@ static ssize_t tap_pwup_write(struct file *file, const char __user *buffer,
}
static int show_tap_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_tap_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7742,7 +7740,7 @@ static ssize_t disc_pwup_write(struct file *file, const char __user *buffer,
}
static int show_disc_pwup(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_disc_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7766,7 +7764,7 @@ static ssize_t std_nic_write(struct file *file, const char __user *buffer,
}
static int show_std_nic(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_std_nic_fn(dev);
if (ret == BP_NOT_CAP)
seq_puts(m, "fail\n");
@@ -7808,7 +7806,7 @@ static ssize_t wd_exp_mode_write(struct file *file, const char __user *buffer,
}
static int show_wd_exp_mode(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_exp_mode_fn(dev);
if (ret == 1)
seq_puts(m, "tap\n");
@@ -7834,7 +7832,7 @@ static ssize_t wd_autoreset_write(struct file *file, const char __user *buffer,
}
static int show_wd_autoreset(struct seq_file *m, void *v)
{
- bpctl_dev_t *dev = m->private;
+ struct bpctl_dev *dev = m->private;
int ret = get_wd_autoreset_fn(dev);
if (ret >= 0)
seq_printf(m, "%d\n", ret);
@@ -7844,7 +7842,7 @@ static int show_wd_autoreset(struct seq_file *m, void *v)
}
RW_FOPS(wd_autoreset)
-int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
+int bypass_proc_create_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
static struct proc_dir_entry *procfs_dir;
@@ -7914,7 +7912,7 @@ int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
return ret;
}
-int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block)
+int bypass_proc_remove_dev_sd(struct bpctl_dev *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index c66bcf7ea90..2adf9ecdf07 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -293,7 +293,7 @@ extern int cmm_xlator_free_buf(struct cmm_xlatorobject *xlator,
* ======== cmm_xlator_info ========
* Purpose:
* Set/Get process specific "translator" address info.
- * This is used to perform fast virtaul address translation
+ * This is used to perform fast virtual address translation
* for shared memory buffers between the GPP and DSP.
* Parameters:
* xlator: handle to translator.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 774a3f6ff20..64c2457aae9 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -284,7 +284,7 @@ extern int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size);
* user_envp: An Array of Environment settings(Unicode Strings)
* Returns:
* 0: Success.
- * -ENOENT: The DSP Execuetable was not found.
+ * -ENOENT: The DSP Executable was not found.
* -EFAULT: Invalid processor handle.
* -EPERM : Unable to Load the Processor
* Requires:
diff --git a/drivers/staging/tidspbridge/pmgr/dbll.c b/drivers/staging/tidspbridge/pmgr/dbll.c
index c191ae20356..41e88abe47a 100644
--- a/drivers/staging/tidspbridge/pmgr/dbll.c
+++ b/drivers/staging/tidspbridge/pmgr/dbll.c
@@ -1120,8 +1120,11 @@ static int dbll_rmm_alloc(struct dynamic_loader_allocate *this,
or DYN_EXTERNAL, then mem granularity information is present
within the section name - only process if there are at least three
tokens within the section name (just a minor optimization) */
- if (count >= 3)
- strict_strtol(sz_last_token, 10, (long *)&req);
+ if (count >= 3) {
+ status = kstrtos32(sz_last_token, 10, &req);
+ if (status)
+ goto func_cont;
+ }
if ((req == 0) || (req == 1)) {
if (strcmp(sz_sec_last_token, "DYN_DARAM") == 0) {
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 83d629afdfe..d8957a55662 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -56,8 +56,8 @@ MODULE_DEVICE_TABLE(usb, stub_table);
* usbip_status shows the status of usbip-host as long as this driver is bound
* to the target device.
*/
-static ssize_t show_status(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usbip_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct stub_device *sdev = dev_get_drvdata(dev);
int status;
@@ -73,7 +73,7 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr,
return snprintf(buf, PAGE_SIZE, "%d\n", status);
}
-static DEVICE_ATTR(usbip_status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(usbip_status);
/*
* usbip_sockfd gets a socket descriptor of an established TCP connection that
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 7b97df6f2a4..e3fc749c1e7 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -45,19 +45,20 @@ MODULE_PARM_DESC(usbip_debug_flag, "debug flags (defined in usbip_common.h)");
struct device_attribute dev_attr_usbip_debug;
EXPORT_SYMBOL_GPL(dev_attr_usbip_debug);
-static ssize_t show_flag(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usbip_debug_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%lx\n", usbip_debug_flag);
}
-static ssize_t store_flag(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t usbip_debug_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
sscanf(buf, "%lx", &usbip_debug_flag);
return count;
}
-DEVICE_ATTR(usbip_debug, (S_IRUGO | S_IWUSR), show_flag, store_flag);
+DEVICE_ATTR_RW(usbip_debug);
static void usbip_dump_buffer(char *buff, int bufflen)
{
diff --git a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
index 25e62e9f0a3..1091bb20de1 100644
--- a/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
+++ b/drivers/staging/usbip/userspace/libsrc/vhci_driver.c
@@ -230,7 +230,7 @@ static int refresh_class_device_list(void)
sysfs_close_list(cname_list);
- /* seach under /sys/block */
+ /* search under /sys/block */
ret = search_class_for_usbip_device(SYSFS_BLOCK_NAME);
if (ret < 0)
return -1;
diff --git a/drivers/staging/usbip/userspace/src/usbip.c b/drivers/staging/usbip/userspace/src/usbip.c
index fff4b768e70..04a5f20bea6 100644
--- a/drivers/staging/usbip/userspace/src/usbip.c
+++ b/drivers/staging/usbip/userspace/src/usbip.c
@@ -26,6 +26,7 @@
#include <syslog.h>
#include "usbip_common.h"
+#include "usbip_network.h"
#include "usbip.h"
static int usbip_help(int argc, char *argv[]);
@@ -34,7 +35,7 @@ static int usbip_version(int argc, char *argv[]);
static const char usbip_version_string[] = PACKAGE_STRING;
static const char usbip_usage_string[] =
- "usbip [--debug] [--log] [version]\n"
+ "usbip [--debug] [--log] [--tcp-port PORT] [version]\n"
" [help] <command> <args>\n";
static void usbip_usage(void)
@@ -138,9 +139,10 @@ static int run_command(const struct command *cmd, int argc, char *argv[])
int main(int argc, char *argv[])
{
static const struct option opts[] = {
- { "debug", no_argument, NULL, 'd' },
- { "log", no_argument, NULL, 'l' },
- { NULL, 0, NULL, 0 }
+ { "debug", no_argument, NULL, 'd' },
+ { "log", no_argument, NULL, 'l' },
+ { "tcp-port", required_argument, NULL, 't' },
+ { NULL, 0, NULL, 0 }
};
char *cmd;
@@ -150,7 +152,7 @@ int main(int argc, char *argv[])
usbip_use_stderr = 1;
opterr = 0;
for (;;) {
- opt = getopt_long(argc, argv, "+d", opts, NULL);
+ opt = getopt_long(argc, argv, "+dlt:", opts, NULL);
if (opt == -1)
break;
@@ -163,6 +165,9 @@ int main(int argc, char *argv[])
usbip_use_syslog = 1;
openlog("", LOG_PID, LOG_USER);
break;
+ case 't':
+ usbip_setup_port_number(optarg);
+ break;
case '?':
printf("usbip: invalid option\n");
default:
diff --git a/drivers/staging/usbip/userspace/src/usbip_attach.c b/drivers/staging/usbip/userspace/src/usbip_attach.c
index 0ec16e54fb0..08584119652 100644
--- a/drivers/staging/usbip/userspace/src/usbip_attach.c
+++ b/drivers/staging/usbip/userspace/src/usbip_attach.c
@@ -144,7 +144,7 @@ static int query_import_device(int sockfd, char *busid)
return -1;
}
- /* recieve a reply */
+ /* receive a reply */
rc = usbip_net_recv_op_common(sockfd, &code);
if (rc < 0) {
err("recv op_common");
@@ -175,7 +175,7 @@ static int attach_device(char *host, char *busid)
int rc;
int rhport;
- sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ sockfd = usbip_net_tcp_connect(host, usbip_port_string);
if (sockfd < 0) {
err("tcp connect");
return -1;
@@ -189,7 +189,7 @@ static int attach_device(char *host, char *busid)
close(sockfd);
- rc = record_connection(host, USBIP_PORT_STRING, busid, rhport);
+ rc = record_connection(host, usbip_port_string, busid, rhport);
if (rc < 0) {
err("record connection");
return -1;
diff --git a/drivers/staging/usbip/userspace/src/usbip_list.c b/drivers/staging/usbip/userspace/src/usbip_list.c
index ff56255f497..237e099337a 100644
--- a/drivers/staging/usbip/userspace/src/usbip_list.c
+++ b/drivers/staging/usbip/userspace/src/usbip_list.c
@@ -131,13 +131,13 @@ static int list_exported_devices(char *host)
int rc;
int sockfd;
- sockfd = usbip_net_tcp_connect(host, USBIP_PORT_STRING);
+ sockfd = usbip_net_tcp_connect(host, usbip_port_string);
if (sockfd < 0) {
err("could not connect to %s:%s: %s", host,
- USBIP_PORT_STRING, gai_strerror(sockfd));
+ usbip_port_string, gai_strerror(sockfd));
return -1;
}
- dbg("connected to %s:%s", host, USBIP_PORT_STRING);
+ dbg("connected to %s:%s", host, usbip_port_string);
rc = get_exported_devices(host, sockfd);
if (rc < 0) {
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.c b/drivers/staging/usbip/userspace/src/usbip_network.c
index b12448ec69a..c39a07f1d38 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.c
+++ b/drivers/staging/usbip/userspace/src/usbip_network.c
@@ -28,6 +28,36 @@
#include "usbip_common.h"
#include "usbip_network.h"
+int usbip_port = 3240;
+char *usbip_port_string = "3240";
+
+void usbip_setup_port_number(char *arg)
+{
+ dbg("parsing port arg '%s'", arg);
+ char *end;
+ unsigned long int port = strtoul(arg, &end, 10);
+
+ if (end == arg) {
+ err("port: could not parse '%s' as a decimal integer", arg);
+ return;
+ }
+
+ if (*end != '\0') {
+ err("port: garbage at end of '%s'", arg);
+ return;
+ }
+
+ if (port > UINT16_MAX) {
+ err("port: %s too high (max=%d)",
+ arg, UINT16_MAX);
+ return;
+ }
+
+ usbip_port = port;
+ usbip_port_string = arg;
+ info("using port %d (\"%s\")", usbip_port, usbip_port_string);
+}
+
void usbip_net_pack_uint32_t(int pack, uint32_t *num)
{
uint32_t i;
diff --git a/drivers/staging/usbip/userspace/src/usbip_network.h b/drivers/staging/usbip/userspace/src/usbip_network.h
index 1bbefc993fb..2d0e4277b62 100644
--- a/drivers/staging/usbip/userspace/src/usbip_network.h
+++ b/drivers/staging/usbip/userspace/src/usbip_network.h
@@ -14,8 +14,9 @@
#include <stdint.h>
-#define USBIP_PORT 3240
-#define USBIP_PORT_STRING "3240"
+extern int usbip_port;
+extern char *usbip_port_string;
+void usbip_setup_port_number(char *arg);
/* ---------------------------------------------------------------------- */
/* Common header for all the kinds of PDUs. */
diff --git a/drivers/staging/usbip/userspace/src/usbipd.c b/drivers/staging/usbip/userspace/src/usbipd.c
index 3e913b861dc..1c76cfd274d 100644
--- a/drivers/staging/usbip/userspace/src/usbipd.c
+++ b/drivers/staging/usbip/userspace/src/usbipd.c
@@ -50,21 +50,30 @@
#define MAIN_LOOP_TIMEOUT 10
+#define DEFAULT_PID_FILE "/var/run/" PROGNAME ".pid"
+
static const char usbip_version_string[] = PACKAGE_STRING;
static const char usbipd_help_string[] =
- "usage: usbipd [options] \n"
- " -D, --daemon \n"
- " Run as a daemon process. \n"
- " \n"
- " -d, --debug \n"
- " Print debugging information. \n"
- " \n"
- " -h, --help \n"
- " Print this help. \n"
- " \n"
- " -v, --version \n"
- " Show version. \n";
+ "usage: usbipd [options]\n"
+ " -D, --daemon\n"
+ " Run as a daemon process.\n"
+ "\n"
+ " -d, --debug\n"
+ " Print debugging information.\n"
+ "\n"
+ " -PFILE, --pid FILE\n"
+ " Write process id to FILE.\n"
+ " If no FILE specified, use " DEFAULT_PID_FILE "\n"
+ "\n"
+ " -tPORT, --tcp-port PORT\n"
+ " Listen on TCP/IP port PORT.\n"
+ "\n"
+ " -h, --help\n"
+ " Print this help.\n"
+ "\n"
+ " -v, --version\n"
+ " Show version.\n";
static void usbipd_help(void)
{
@@ -286,13 +295,13 @@ static int do_accept(int listenfd)
memset(&ss, 0, sizeof(ss));
- connfd = accept(listenfd, (struct sockaddr *) &ss, &len);
+ connfd = accept(listenfd, (struct sockaddr *)&ss, &len);
if (connfd < 0) {
err("failed to accept connection");
return -1;
}
- rc = getnameinfo((struct sockaddr *) &ss, len, host, sizeof(host),
+ rc = getnameinfo((struct sockaddr *)&ss, len, host, sizeof(host),
port, sizeof(port), NI_NUMERICHOST | NI_NUMERICSERV);
if (rc)
err("getnameinfo: %s", gai_strerror(rc));
@@ -328,56 +337,69 @@ int process_request(int listenfd)
return 0;
}
-static void log_addrinfo(struct addrinfo *ai)
+static void addrinfo_to_text(struct addrinfo *ai, char buf[],
+ const size_t buf_size)
{
char hbuf[NI_MAXHOST];
char sbuf[NI_MAXSERV];
int rc;
+ buf[0] = '\0';
+
rc = getnameinfo(ai->ai_addr, ai->ai_addrlen, hbuf, sizeof(hbuf),
sbuf, sizeof(sbuf), NI_NUMERICHOST | NI_NUMERICSERV);
if (rc)
err("getnameinfo: %s", gai_strerror(rc));
- info("listening on %s:%s", hbuf, sbuf);
+ snprintf(buf, buf_size, "%s:%s", hbuf, sbuf);
}
static int listen_all_addrinfo(struct addrinfo *ai_head, int sockfdlist[])
{
struct addrinfo *ai;
int ret, nsockfd = 0;
+ const size_t ai_buf_size = NI_MAXHOST + NI_MAXSERV + 2;
+ char ai_buf[ai_buf_size];
for (ai = ai_head; ai && nsockfd < MAXSOCKFD; ai = ai->ai_next) {
- sockfdlist[nsockfd] = socket(ai->ai_family, ai->ai_socktype,
- ai->ai_protocol);
- if (sockfdlist[nsockfd] < 0)
+ int sock;
+ addrinfo_to_text(ai, ai_buf, ai_buf_size);
+ dbg("opening %s", ai_buf);
+ sock = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol);
+ if (sock < 0) {
+ err("socket: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
continue;
+ }
- usbip_net_set_reuseaddr(sockfdlist[nsockfd]);
- usbip_net_set_nodelay(sockfdlist[nsockfd]);
+ usbip_net_set_reuseaddr(sock);
+ usbip_net_set_nodelay(sock);
- if (sockfdlist[nsockfd] >= FD_SETSIZE) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ if (sock >= FD_SETSIZE) {
+ err("FD_SETSIZE: %s: sock=%d, max=%d",
+ ai_buf, sock, FD_SETSIZE);
+ close(sock);
continue;
}
- ret = bind(sockfdlist[nsockfd], ai->ai_addr, ai->ai_addrlen);
+ ret = bind(sock, ai->ai_addr, ai->ai_addrlen);
if (ret < 0) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ err("bind: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
+ close(sock);
continue;
}
- ret = listen(sockfdlist[nsockfd], SOMAXCONN);
+ ret = listen(sock, SOMAXCONN);
if (ret < 0) {
- close(sockfdlist[nsockfd]);
- sockfdlist[nsockfd] = -1;
+ err("listen: %s: %d (%s)",
+ ai_buf, errno, strerror(errno));
+ close(sock);
continue;
}
- log_addrinfo(ai);
- nsockfd++;
+ info("listening on %s", ai_buf);
+ sockfdlist[nsockfd++] = sock;
}
if (nsockfd == 0)
@@ -398,9 +420,9 @@ static struct addrinfo *do_getaddrinfo(char *host, int ai_family)
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
- rc = getaddrinfo(host, USBIP_PORT_STRING, &hints, &ai_head);
+ rc = getaddrinfo(host, usbip_port_string, &hints, &ai_head);
if (rc) {
- err("failed to get a network address %s: %s", USBIP_PORT_STRING,
+ err("failed to get a network address %s: %s", usbip_port_string,
gai_strerror(rc));
return NULL;
}
@@ -426,6 +448,31 @@ static void set_signal(void)
sigaction(SIGCLD, &act, NULL);
}
+static const char *pid_file;
+
+static void write_pid_file()
+{
+ if (pid_file) {
+ dbg("creating pid file %s", pid_file);
+ FILE *fp = fopen(pid_file, "w");
+ if (!fp) {
+ err("pid_file: %s: %d (%s)",
+ pid_file, errno, strerror(errno));
+ return;
+ }
+ fprintf(fp, "%d\n", getpid());
+ fclose(fp);
+ }
+}
+
+static void remove_pid_file()
+{
+ if (pid_file) {
+ dbg("removing pid file %s", pid_file);
+ unlink(pid_file);
+ }
+}
+
static int do_standalone_mode(int daemonize)
{
struct addrinfo *ai_head;
@@ -452,6 +499,7 @@ static int do_standalone_mode(int daemonize)
usbip_use_syslog = 1;
}
set_signal();
+ write_pid_file();
ai_head = do_getaddrinfo(NULL, PF_UNSPEC);
if (!ai_head) {
@@ -496,8 +544,9 @@ static int do_standalone_mode(int daemonize)
process_request(sockfdlist[i]);
}
}
- } else
+ } else {
dbg("heartbeat timeout on ppoll()");
+ }
}
info("shutting down " PROGNAME);
@@ -511,11 +560,13 @@ static int do_standalone_mode(int daemonize)
int main(int argc, char *argv[])
{
static const struct option longopts[] = {
- { "daemon", no_argument, NULL, 'D' },
- { "debug", no_argument, NULL, 'd' },
- { "help", no_argument, NULL, 'h' },
- { "version", no_argument, NULL, 'v' },
- { NULL, 0, NULL, 0 }
+ { "daemon", no_argument, NULL, 'D' },
+ { "debug", no_argument, NULL, 'd' },
+ { "pid", optional_argument, NULL, 'P' },
+ { "tcp-port", required_argument, NULL, 't' },
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, 'v' },
+ { NULL, 0, NULL, 0 }
};
enum {
@@ -526,6 +577,7 @@ int main(int argc, char *argv[])
int daemonize = 0;
int opt, rc = -1;
+ pid_file = NULL;
usbip_use_stderr = 1;
usbip_use_syslog = 0;
@@ -535,7 +587,7 @@ int main(int argc, char *argv[])
cmd = cmd_standalone_mode;
for (;;) {
- opt = getopt_long(argc, argv, "Ddhv", longopts, NULL);
+ opt = getopt_long(argc, argv, "DdP::t:hv", longopts, NULL);
if (opt == -1)
break;
@@ -550,6 +602,12 @@ int main(int argc, char *argv[])
case 'h':
cmd = cmd_help;
break;
+ case 'P':
+ pid_file = optarg ? optarg : DEFAULT_PID_FILE;
+ break;
+ case 't':
+ usbip_setup_port_number(optarg);
+ break;
case 'v':
cmd = cmd_version;
break;
@@ -563,6 +621,7 @@ int main(int argc, char *argv[])
switch (cmd) {
case cmd_standalone_mode:
rc = do_standalone_mode(daemonize);
+ remove_pid_file();
break;
case cmd_version:
printf(PROGNAME " (%s)\n", usbip_version_string);
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index c66e9c05c76..9b51586d11d 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -27,7 +27,7 @@
/* TODO: refine locking ?*/
/* Sysfs entry to show port status */
-static ssize_t show_status(struct device *dev, struct device_attribute *attr,
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
char *out)
{
char *s = out;
@@ -74,7 +74,7 @@ static ssize_t show_status(struct device *dev, struct device_attribute *attr,
return out - s;
}
-static DEVICE_ATTR(status, S_IRUGO, show_status, NULL);
+static DEVICE_ATTR_RO(status);
/* Sysfs entry to shutdown a virtual connection */
static int vhci_port_disconnect(__u32 rhport)
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 08b250f01da..7f36a7103c3 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3370,8 +3370,8 @@ viawget_resume(struct pci_dev *pcid)
PSMgmtObject pMgmt = pDevice->pMgmt;
int power_status; // to silence the compiler
- power_status = pci_set_power_state(pcid, 0);
- power_status = pci_enable_wake(pcid, 0, 0);
+ power_status = pci_set_power_state(pcid, PCI_D0);
+ power_status = pci_enable_wake(pcid, PCI_D0, 0);
pci_restore_state(pcid);
if (netif_running(pDevice->dev)) {
spin_lock_irq(&pDevice->lock);
diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
index 57a08c5771f..8acff44a9e7 100644
--- a/drivers/staging/vt6655/hostap.c
+++ b/drivers/staging/vt6655/hostap.c
@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
apdev_priv = netdev_priv(pDevice->apdev);
*apdev_priv = *pDevice;
- memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->apdev, dev);
pDevice->apdev->netdev_ops = &apdev_netdev_ops;
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 46e0e41e7e6..b5cd2e44e53 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -460,7 +460,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
}
if (sValue.dwValue == 1) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "up wpadev\n");
- memcpy(pDevice->wpadev->dev_addr, pDevice->dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, pDevice->dev);
pDevice->bWPADEVUp = true;
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "close wpadev\n");
diff --git a/drivers/staging/vt6655/wpactl.c b/drivers/staging/vt6655/wpactl.c
index 869f62c678e..e8d9ecd2913 100644
--- a/drivers/staging/vt6655/wpactl.c
+++ b/drivers/staging/vt6655/wpactl.c
@@ -96,7 +96,7 @@ static int wpa_init_wpadev(PSDevice pDevice)
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
- memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(pDevice->wpadev, dev);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 33fa76759bf..1e8b8412e67 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -680,7 +680,6 @@ BBuGetFrameTime(
unsigned int uRate = 0;
if (uRateIdx > RATE_54M) {
- ASSERT(0);
return 0;
}
@@ -724,16 +723,16 @@ BBuGetFrameTime(
* cbFrameLength - Tx Frame Length
* wRate - Tx Rate
* Out:
- * pwPhyLen - pointer to Phy Length field
- * pbyPhySrv - pointer to Phy Service field
- * pbyPhySgn - pointer to Phy Signal field
+ * struct vnt_phy_field *phy
+ * - pointer to Phy Length field
+ * - pointer to Phy Service field
+ * - pointer to Phy Signal field
*
* Return Value: none
*
*/
void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
- u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
- u8 *pbyPhySgn)
+ u16 wRate, u8 byPacketType, struct vnt_phy_field *phy)
{
u32 cbBitCount;
u32 cbUsCount = 0;
@@ -748,15 +747,15 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
switch (wRate) {
case RATE_1M :
cbUsCount = cbBitCount;
- *pbyPhySgn = 0x00;
+ phy->signal = 0x00;
break;
case RATE_2M :
cbUsCount = cbBitCount / 2;
if (byPreambleType == 1)
- *pbyPhySgn = 0x09;
+ phy->signal = 0x09;
else // long preamble
- *pbyPhySgn = 0x01;
+ phy->signal = 0x01;
break;
case RATE_5M :
@@ -767,9 +766,9 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
if (cbTmp != cbBitCount)
cbUsCount ++;
if (byPreambleType == 1)
- *pbyPhySgn = 0x0a;
+ phy->signal = 0x0a;
else // long preamble
- *pbyPhySgn = 0x02;
+ phy->signal = 0x02;
break;
case RATE_11M :
@@ -784,103 +783,102 @@ void BBvCalculateParameter(struct vnt_private *pDevice, u32 cbFrameLength,
bExtBit = true;
}
if (byPreambleType == 1)
- *pbyPhySgn = 0x0b;
+ phy->signal = 0x0b;
else // long preamble
- *pbyPhySgn = 0x03;
+ phy->signal = 0x03;
break;
case RATE_6M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9B; //1001 1011
+ phy->signal = 0x9b;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8B; //1000 1011
+ phy->signal = 0x8b;
}
break;
case RATE_9M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9F; //1001 1111
+ phy->signal = 0x9f;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8F; //1000 1111
+ phy->signal = 0x8f;
}
break;
case RATE_12M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9A; //1001 1010
+ phy->signal = 0x9a;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8A; //1000 1010
+ phy->signal = 0x8a;
}
break;
case RATE_18M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9E; //1001 1110
+ phy->signal = 0x9e;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8E; //1000 1110
+ phy->signal = 0x8e;
}
break;
case RATE_24M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x99; //1001 1001
+ phy->signal = 0x99;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x89; //1000 1001
+ phy->signal = 0x89;
}
break;
case RATE_36M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9D; //1001 1101
+ phy->signal = 0x9d;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8D; //1000 1101
+ phy->signal = 0x8d;
}
break;
case RATE_48M :
if(byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x98; //1001 1000
+ phy->signal = 0x98;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x88; //1000 1000
+ phy->signal = 0x88;
}
break;
case RATE_54M :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9C; //1001 1100
+ phy->signal = 0x9c;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8C; //1000 1100
+ phy->signal = 0x8c;
}
break;
default :
if (byPacketType == PK_TYPE_11A) {//11a, 5GHZ
- *pbyPhySgn = 0x9C; //1001 1100
+ phy->signal = 0x9c;
}
else {//11g, 2.4GHZ
- *pbyPhySgn = 0x8C; //1000 1100
+ phy->signal = 0x8c;
}
break;
}
- if (byPacketType == PK_TYPE_11B) {
- *pbyPhySrv = 0x00;
- if (bExtBit)
- *pbyPhySrv = *pbyPhySrv | 0x80;
- *pwPhyLen = (u16) cbUsCount;
- }
- else {
- *pbyPhySrv = 0x00;
- *pwPhyLen = (u16)cbFrameLength;
- }
+ if (byPacketType == PK_TYPE_11B) {
+ phy->service = 0x00;
+ if (bExtBit)
+ phy->service |= 0x80;
+ phy->len = cpu_to_le16((u16)cbUsCount);
+ } else {
+ phy->service = 0x00;
+ phy->len = cpu_to_le16((u16)cbFrameLength);
+ }
}
/*
diff --git a/drivers/staging/vt6656/baseband.h b/drivers/staging/vt6656/baseband.h
index 0a634adabf0..79faedf4a5e 100644
--- a/drivers/staging/vt6656/baseband.h
+++ b/drivers/staging/vt6656/baseband.h
@@ -81,6 +81,13 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
+/* Length, Service, and Signal fields of Phy for Tx */
+struct vnt_phy_field {
+ u8 signal;
+ u8 service;
+ __le16 len;
+} __packed;
+
unsigned int
BBuGetFrameTime(
u8 byPreambleType,
@@ -90,8 +97,7 @@ BBuGetFrameTime(
);
void BBvCalculateParameter(struct vnt_private *, u32 cbFrameLength,
- u16 wRate, u8 byPacketType, u16 *pwPhyLen, u8 *pbyPhySrv,
- u8 *pbyPhySgn);
+ u16 wRate, u8 byPacketType, struct vnt_phy_field *);
/* timer for antenna diversity */
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index 24291aee58b..dbf11ecb794 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -319,53 +319,27 @@ CARDvCalculateOFDMRParameter (
*/
void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType)
{
- u8 abyServ[4] = {0, 0, 0, 0}; /* For CCK */
- u8 abySignal[4] = {0, 0, 0, 0};
- u16 awLen[4] = {0, 0, 0, 0};
+ struct vnt_phy_field phy[4];
u8 abyTxRate[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0}; /* For OFDM */
u8 abyRsvTime[9] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
u8 abyData[34];
int i;
//RSPINF_b_1
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_1M),
- PK_TYPE_11B,
- &awLen[0],
- &abyServ[0],
- &abySignal[0]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_1M), PK_TYPE_11B, &phy[0]);
///RSPINF_b_2
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_2M),
- PK_TYPE_11B,
- &awLen[1],
- &abyServ[1],
- &abySignal[1]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_2M), PK_TYPE_11B, &phy[1]);
//RSPINF_b_5
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_5M),
- PK_TYPE_11B,
- &awLen[2],
- &abyServ[2],
- &abySignal[2]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_5M), PK_TYPE_11B, &phy[2]);
//RSPINF_b_11
- BBvCalculateParameter(pDevice,
- 14,
- swGetCCKControlRate(pDevice, RATE_11M),
- PK_TYPE_11B,
- &awLen[3],
- &abyServ[3],
- &abySignal[3]
- );
+ BBvCalculateParameter(pDevice, 14,
+ swGetCCKControlRate(pDevice, RATE_11M), PK_TYPE_11B, &phy[3]);
//RSPINF_a_6
CARDvCalculateOFDMRParameter (RATE_6M,
@@ -421,25 +395,21 @@ void CARDvSetRSPINF(struct vnt_private *pDevice, u8 byBBType)
&abyTxRate[8],
&abyRsvTime[8]);
- abyData[0] = (u8)(awLen[0]&0xFF);
- abyData[1] = (u8)(awLen[0]>>8);
- abyData[2] = abySignal[0];
- abyData[3] = abyServ[0];
-
- abyData[4] = (u8)(awLen[1]&0xFF);
- abyData[5] = (u8)(awLen[1]>>8);
- abyData[6] = abySignal[1];
- abyData[7] = abyServ[1];
-
- abyData[8] = (u8)(awLen[2]&0xFF);
- abyData[9] = (u8)(awLen[2]>>8);
- abyData[10] = abySignal[2];
- abyData[11] = abyServ[2];
-
- abyData[12] = (u8)(awLen[3]&0xFF);
- abyData[13] = (u8)(awLen[3]>>8);
- abyData[14] = abySignal[3];
- abyData[15] = abyServ[3];
+ put_unaligned(phy[0].len, (u16 *)&abyData[0]);
+ abyData[2] = phy[0].signal;
+ abyData[3] = phy[0].service;
+
+ put_unaligned(phy[1].len, (u16 *)&abyData[4]);
+ abyData[6] = phy[1].signal;
+ abyData[7] = phy[1].service;
+
+ put_unaligned(phy[2].len, (u16 *)&abyData[8]);
+ abyData[10] = phy[2].signal;
+ abyData[11] = phy[2].service;
+
+ put_unaligned(phy[3].len, (u16 *)&abyData[12]);
+ abyData[14] = phy[3].signal;
+ abyData[15] = phy[3].service;
for (i = 0; i < 9; i++) {
abyData[16+i*2] = abyTxRate[i];
diff --git a/drivers/staging/vt6656/desc.h b/drivers/staging/vt6656/desc.h
index 64cb046fe98..4675135aa25 100644
--- a/drivers/staging/vt6656/desc.h
+++ b/drivers/staging/vt6656/desc.h
@@ -144,160 +144,6 @@
#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
/*
- * RsvTime buffer header
- */
-typedef struct tagSRrvTime_gRTS {
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_gRTS, *PSRrvTime_gRTS;
-
-typedef const SRrvTime_gRTS *PCSRrvTime_gRTS;
-
-typedef struct tagSRrvTime_gCTS {
- u16 wCTSTxRrvTime_ba;
- u16 wReserved;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_gCTS, *PSRrvTime_gCTS;
-
-typedef const SRrvTime_gCTS *PCSRrvTime_gCTS;
-
-typedef struct tagSRrvTime_ab {
- u16 wRTSTxRrvTime;
- u16 wTxRrvTime;
-} __attribute__ ((__packed__))
-SRrvTime_ab, *PSRrvTime_ab;
-
-typedef const SRrvTime_ab *PCSRrvTime_ab;
-
-typedef struct tagSRrvTime_atim {
- u16 wCTSTxRrvTime_ba;
- u16 wTxRrvTime_a;
-} __attribute__ ((__packed__))
-SRrvTime_atim, *PSRrvTime_atim;
-
-typedef const SRrvTime_atim *PCSRrvTime_atim;
-
-/*
- * RTS buffer header
- */
-typedef struct tagSRTSData {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u8 abyTA[ETH_ALEN];
-} __attribute__ ((__packed__))
-SRTSData, *PSRTSData;
-
-typedef const SRTSData *PCSRTSData;
-
-typedef struct tagSRTS_g {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
- u16 wReserved;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_g, *PSRTS_g;
-typedef const SRTS_g *PCSRTS_g;
-
-typedef struct tagSRTS_g_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_ba;
- u16 wDuration_aa;
- u16 wDuration_bb;
- u16 wReserved;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_g_FB, *PSRTS_g_FB;
-
-typedef const SRTS_g_FB *PCSRTS_g_FB;
-
-typedef struct tagSRTS_ab {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wReserved;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_ab, *PSRTS_ab;
-
-typedef const SRTS_ab *PCSRTS_ab;
-
-typedef struct tagSRTS_a_FB {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wReserved;
- u16 wRTSDuration_f0;
- u16 wRTSDuration_f1;
- SRTSData Data;
-} __attribute__ ((__packed__))
-SRTS_a_FB, *PSRTS_a_FB;
-
-typedef const SRTS_a_FB *PCSRTS_a_FB;
-
-/*
- * CTS buffer header
- */
-typedef struct tagSCTSData {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u16 wReserved;
-} __attribute__ ((__packed__))
-SCTSData, *PSCTSData;
-
-typedef struct tagSCTS {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u16 wDuration_ba;
- u16 wReserved;
- SCTSData Data;
-} __attribute__ ((__packed__))
-SCTS, *PSCTS;
-
-typedef const SCTS *PCSCTS;
-
-typedef struct tagSCTS_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u16 wDuration_ba;
- u16 wReserved;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSData Data;
-} __attribute__ ((__packed__))
-SCTS_FB, *PSCTS_FB;
-
-typedef const SCTS_FB *PCSCTS_FB;
-
-/*
* TX FIFO header
*/
typedef struct tagSTxBufHead {
@@ -317,76 +163,6 @@ typedef struct tagSTxShortBufHead {
STxShortBufHead, *PSTxShortBufHead;
typedef const STxShortBufHead *PCSTxShortBufHead;
-/*
- * TX data header
- */
-typedef struct tagSTxDataHead_g {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-} __attribute__ ((__packed__))
-STxDataHead_g, *PSTxDataHead_g;
-
-typedef const STxDataHead_g *PCSTxDataHead_g;
-
-typedef struct tagSTxDataHead_g_FB {
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-} __attribute__ ((__packed__))
-STxDataHead_g_FB, *PSTxDataHead_g_FB;
-typedef const STxDataHead_g_FB *PCSTxDataHead_g_FB;
-
-typedef struct tagSTxDataHead_ab {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wTimeStampOff;
-} __attribute__ ((__packed__))
-STxDataHead_ab, *PSTxDataHead_ab;
-typedef const STxDataHead_ab *PCSTxDataHead_ab;
-
-typedef struct tagSTxDataHead_a_FB {
- u8 bySignalField;
- u8 byServiceField;
- u16 wTransmitLength;
- u16 wDuration;
- u16 wTimeStampOff;
- u16 wDuration_f0;
- u16 wDuration_f1;
-} __attribute__ ((__packed__))
-STxDataHead_a_FB, *PSTxDataHead_a_FB;
-typedef const STxDataHead_a_FB *PCSTxDataHead_a_FB;
-
-/*
- * MICHDR data header
- */
-typedef struct tagSMICHDRHead {
- u32 adwHDR0[4];
- u32 adwHDR1[4];
- u32 adwHDR2[4];
-} __attribute__ ((__packed__))
-SMICHDRHead, *PSMICHDRHead;
-
-typedef const SMICHDRHead *PCSMICHDRHead;
-
typedef struct tagSBEACONCtl {
u32 BufReady:1;
u32 TSF:15;
diff --git a/drivers/staging/vt6656/device.h b/drivers/staging/vt6656/device.h
index f07ba242811..8e396341c5e 100644
--- a/drivers/staging/vt6656/device.h
+++ b/drivers/staging/vt6656/device.h
@@ -166,8 +166,7 @@ typedef enum _CONTEXT_TYPE {
} CONTEXT_TYPE;
/* RCB (Receive Control Block) */
-typedef struct _RCB
-{
+struct vnt_rcb {
void *Next;
signed long Ref;
void *pDevice;
@@ -175,21 +174,20 @@ typedef struct _RCB
struct vnt_rx_mgmt sMngPacket;
struct sk_buff *skb;
int bBoolInUse;
-
-} RCB, *PRCB;
+};
/* used to track bulk out irps */
-typedef struct _USB_SEND_CONTEXT {
- void *pDevice;
- struct sk_buff *pPacket;
- struct urb *pUrb;
- unsigned int uBufLen;
- CONTEXT_TYPE Type;
- struct ethhdr sEthHeader;
- void *Next;
- bool bBoolInUse;
- unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
-} USB_SEND_CONTEXT, *PUSB_SEND_CONTEXT;
+struct vnt_usb_send_context {
+ void *pDevice;
+ struct sk_buff *pPacket;
+ struct urb *pUrb;
+ unsigned int uBufLen;
+ CONTEXT_TYPE Type;
+ struct ethhdr sEthHeader;
+ void *Next;
+ bool bBoolInUse;
+ unsigned char Data[MAX_TOTAL_SIZE_WITH_ALL_HEADERS];
+};
/* structure got from configuration file as user-desired default settings */
typedef struct _DEFAULT_CONFIG {
@@ -416,21 +414,21 @@ struct vnt_private {
u32 int_interval;
/* Variables to track resources for the BULK In Pipe */
- PRCB pRCBMem;
- PRCB apRCB[CB_MAX_RX_DESC];
+ struct vnt_rcb *pRCBMem;
+ struct vnt_rcb *apRCB[CB_MAX_RX_DESC];
u32 cbRD;
- PRCB FirstRecvFreeList;
- PRCB LastRecvFreeList;
+ struct vnt_rcb *FirstRecvFreeList;
+ struct vnt_rcb *LastRecvFreeList;
u32 NumRecvFreeList;
- PRCB FirstRecvMngList;
- PRCB LastRecvMngList;
+ struct vnt_rcb *FirstRecvMngList;
+ struct vnt_rcb *LastRecvMngList;
u32 NumRecvMngList;
int bIsRxWorkItemQueued;
int bIsRxMngWorkItemQueued;
unsigned long ulRcvRefCount; /* packets that have not returned back */
/* Variables to track resources for the BULK Out Pipe */
- PUSB_SEND_CONTEXT apTD[CB_MAX_TX_DESC];
+ struct vnt_usb_send_context *apTD[CB_MAX_TX_DESC];
u32 cbTD;
/* Variables to track resources for the Interrupt In Pipe */
@@ -591,18 +589,11 @@ struct vnt_private {
u8 abyBSSID[ETH_ALEN];
u8 abyDesireBSSID[ETH_ALEN];
- u16 wCTSDuration; /* update while speed change */
- u16 wACKDuration;
- u16 wRTSTransmitLen;
- u8 byRTSServiceField;
- u8 byRTSSignalField;
-
u32 dwMaxReceiveLifetime; /* dot11MaxReceiveLifetime */
int bCCK;
int bEncryptionEnable;
int bLongHeader;
- int bSoftwareGenCrcErr;
int bShortSlotTime;
int bProtectMode;
int bNonERPPresent;
@@ -781,7 +772,7 @@ struct vnt_private {
#define DequeueRCB(Head, Tail) \
{ \
- PRCB RCB = Head; \
+ struct vnt_rcb *RCB = Head; \
if (!RCB->Next) { \
Tail = NULL; \
} \
diff --git a/drivers/staging/vt6656/device_cfg.h b/drivers/staging/vt6656/device_cfg.h
index ea66b975fa5..a97f7bb13db 100644
--- a/drivers/staging/vt6656/device_cfg.h
+++ b/drivers/staging/vt6656/device_cfg.h
@@ -82,18 +82,4 @@ typedef enum _chip_type {
VT3184 = 1
} CHIP_TYPE, *PCHIP_TYPE;
-#ifdef VIAWET_DEBUG
-#define ASSERT(x) { \
- if (!(x)) { \
- printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x, \
- __FUNCTION__, __LINE__);\
- *(int *) 0 = 0; \
- } \
-}
-#define DBG_PORT80(value) outb(value, 0x80)
-#else
-#define ASSERT(x)
-#define DBG_PORT80(value)
-#endif
-
#endif
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 7ec166a2ac8..ea7d443b11d 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -246,7 +246,7 @@ s_vGetDASA (
*pcbHeaderSize = cbHeaderSize;
}
-int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
+int RXbBulkInProcessData(struct vnt_private *pDevice, struct vnt_rcb *pRCB,
unsigned long BytesToIndicate)
{
struct net_device_stats *pStats = &pDevice->stats;
@@ -271,7 +271,7 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
/* signed long ldBm = 0; */
int bIsWEP = false; int bExtIV = false;
u32 dwWbkStatus;
- PRCB pRCBIndicate = pRCB;
+ struct vnt_rcb *pRCBIndicate = pRCB;
u8 *pbyDAddress;
u16 *pwPLCP_Length;
u8 abyVaildRate[MAX_RATE]
@@ -314,7 +314,6 @@ int RXbBulkInProcessData(struct vnt_private *pDevice, PRCB pRCB,
(BytesToIndicate < (*pwPLCP_Length)) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Wrong PLCP Length %x\n", (int) *pwPLCP_Length);
- ASSERT(0);
return false;
}
for ( ii=RATE_1M;ii<MAX_RATE;ii++) {
@@ -1337,7 +1336,7 @@ static int s_bAPModeRxData(struct vnt_private *pDevice, struct sk_buff *skb,
void RXvWorkItem(struct vnt_private *pDevice)
{
int ntStatus;
- PRCB pRCB = NULL;
+ struct vnt_rcb *pRCB = NULL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->Rx Polling Thread\n");
spin_lock_irq(&pDevice->lock);
@@ -1347,7 +1346,6 @@ void RXvWorkItem(struct vnt_private *pDevice)
(pDevice->NumRecvFreeList != 0) ) {
pRCB = pDevice->FirstRecvFreeList;
pDevice->NumRecvFreeList--;
- ASSERT(pRCB);// cannot be NULL
DequeueRCB(pDevice->FirstRecvFreeList, pDevice->LastRecvFreeList);
ntStatus = PIPEnsBulkInUsbRead(pDevice, pRCB);
}
@@ -1356,15 +1354,12 @@ void RXvWorkItem(struct vnt_private *pDevice)
}
-void RXvFreeRCB(PRCB pRCB, int bReAllocSkb)
+void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb)
{
struct vnt_private *pDevice = pRCB->pDevice;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->RXvFreeRCB\n");
- ASSERT(!pRCB->Ref); // should be 0
- ASSERT(pRCB->pDevice); // shouldn't be NULL
-
if (bReAllocSkb == false) {
kfree_skb(pRCB->skb);
bReAllocSkb = true;
@@ -1396,7 +1391,7 @@ void RXvFreeRCB(PRCB pRCB, int bReAllocSkb)
void RXvMngWorkItem(struct vnt_private *pDevice)
{
- PRCB pRCB = NULL;
+ struct vnt_rcb *pRCB = NULL;
struct vnt_rx_mgmt *pRxPacket;
int bReAllocSkb = false;
@@ -1411,7 +1406,6 @@ void RXvMngWorkItem(struct vnt_private *pDevice)
if(!pRCB){
break;
}
- ASSERT(pRCB);// cannot be NULL
pRxPacket = &(pRCB->sMngPacket);
vMgrRxManagePacket(pDevice, &pDevice->vnt_mgmt, pRxPacket);
pRCB->Ref--;
diff --git a/drivers/staging/vt6656/dpc.h b/drivers/staging/vt6656/dpc.h
index 876468f2c3d..95388dc03ee 100644
--- a/drivers/staging/vt6656/dpc.h
+++ b/drivers/staging/vt6656/dpc.h
@@ -36,9 +36,9 @@ void RXvWorkItem(void *Context);
void RXvMngWorkItem(void *Context);
-void RXvFreeRCB(PRCB pRCB, int bReAllocSkb);
+void RXvFreeRCB(struct vnt_rcb *pRCB, int bReAllocSkb);
-int RXbBulkInProcessData(struct vnt_private *, PRCB pRCB,
+int RXbBulkInProcessData(struct vnt_private *, struct vnt_rcb *pRCB,
unsigned long BytesToIndicate);
#endif /* __RXTX_H__ */
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 3a3fdc58b6d..536971786ae 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -267,7 +267,6 @@ device_set_options(struct vnt_private *pDevice) {
pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->byAutoPwrTunning = 0;
- pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
pDevice->bExistSWNetAddr = false;
/* pDevice->bDiversityRegCtlON = true; */
@@ -734,7 +733,7 @@ err_nomem:
static void device_free_tx_bufs(struct vnt_private *pDevice)
{
- PUSB_SEND_CONTEXT pTxContext;
+ struct vnt_usb_send_context *pTxContext;
int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
@@ -752,8 +751,8 @@ static void device_free_tx_bufs(struct vnt_private *pDevice)
static void device_free_rx_bufs(struct vnt_private *pDevice)
{
- PRCB pRCB;
- int ii;
+ struct vnt_rcb *pRCB;
+ int ii;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -789,14 +788,13 @@ static void device_free_int_bufs(struct vnt_private *pDevice)
static bool device_alloc_bufs(struct vnt_private *pDevice)
{
-
- PUSB_SEND_CONTEXT pTxContext;
- PRCB pRCB;
- int ii;
+ struct vnt_usb_send_context *pTxContext;
+ struct vnt_rcb *pRCB;
+ int ii;
for (ii = 0; ii < pDevice->cbTD; ii++) {
- pTxContext = kmalloc(sizeof(USB_SEND_CONTEXT), GFP_KERNEL);
+ pTxContext = kmalloc(sizeof(struct vnt_usb_send_context), GFP_KERNEL);
if (pTxContext == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate tx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -813,7 +811,8 @@ static bool device_alloc_bufs(struct vnt_private *pDevice)
}
/* allocate RCB mem */
- pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
+ pDevice->pRCBMem = kzalloc((sizeof(struct vnt_rcb) * pDevice->cbRD),
+ GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -824,7 +823,8 @@ static bool device_alloc_bufs(struct vnt_private *pDevice)
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
- pRCB = (PRCB) pDevice->pRCBMem;
+
+ pRCB = (struct vnt_rcb *)pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -925,7 +925,6 @@ int device_alloc_frag_buf(struct vnt_private *pDevice,
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
return false;
- ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
return true;
diff --git a/drivers/staging/vt6656/rxtx.c b/drivers/staging/vt6656/rxtx.c
index 9bf2f8d562c..fb743a8811b 100644
--- a/drivers/staging/vt6656/rxtx.c
+++ b/drivers/staging/vt6656/rxtx.c
@@ -52,7 +52,6 @@
#include "card.h"
#include "bssdb.h"
#include "mac.h"
-#include "baseband.h"
#include "michael.h"
#include "tkip.h"
#include "tcrc.h"
@@ -101,13 +100,12 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice);
static void s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader);
+ void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ struct ethhdr *psEthHeader, bool need_rts);
static u32 s_uFillDataHead(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
- u32 uMACfragNum, u8 byFBOption);
+ u32 uDMAIdx, int bNeedAck, u8 byFBOption);
static void s_vGenerateMACHeader(struct vnt_private *pDevice,
u8 *pbyBufferAddr, u16 wDuration, struct ethhdr *psEthHeader,
@@ -115,7 +113,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf, u16 wPayloadLen,
- u8 *pMICHDR);
+ struct vnt_mic_hdr *mic_hdr);
static void s_vSWencryption(struct vnt_private *pDevice,
PSKeyItem pTransmitKey, u8 *pbyPayloadHead, u16 wPayloadSize);
@@ -123,30 +121,28 @@ static void s_vSWencryption(struct vnt_private *pDevice,
static unsigned int s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
u32 cbFrameLength, u16 wRate, int bNeedAck);
-static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice, u8 byRTSRsvType,
u8 byPktType, u32 cbFrameLength, u16 wCurrentRate);
static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
- u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
- int bDisCRC, u16 wCurrentRate, u8 byFBOption);
+ u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
+ int bNeedAck, u16 wCurrentRate, u8 byFBOption);
static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
- void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
+ union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption);
-static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
- u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
- u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
- u8 byFBOption);
+static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+ u8 byPktType, int bNeedAck);
-static unsigned int s_uGetRTSCTSDuration(struct vnt_private *pDevice,
+static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice,
u8 byDurType, u32 cbFrameLength, u8 byPktType, u16 wRate,
int bNeedAck, u8 byFBOption);
static void *s_vGetFreeContext(struct vnt_private *pDevice)
{
- PUSB_SEND_CONTEXT pContext = NULL;
- PUSB_SEND_CONTEXT pReturnContext = NULL;
+ struct vnt_usb_send_context *pContext = NULL;
+ struct vnt_usb_send_context *pReturnContext = NULL;
int ii;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"GetFreeContext()\n");
@@ -155,6 +151,7 @@ static void *s_vGetFreeContext(struct vnt_private *pDevice)
pContext = pDevice->apTD[ii];
if (pContext->bBoolInUse == false) {
pContext->bBoolInUse = true;
+ memset(pContext->Data, 0, MAX_TOTAL_SIZE_WITH_ALL_HEADERS);
pReturnContext = pContext;
break;
}
@@ -186,109 +183,117 @@ static void s_vSaveTxPktInfo(struct vnt_private *pDevice, u8 byPktNum,
static void s_vFillTxKey(struct vnt_private *pDevice, u8 *pbyBuf,
u8 *pbyIVHead, PSKeyItem pTransmitKey, u8 *pbyHdrBuf,
- u16 wPayloadLen, u8 *pMICHDR)
+ u16 wPayloadLen, struct vnt_mic_hdr *mic_hdr)
{
u32 *pdwIV = (u32 *)pbyIVHead;
u32 *pdwExtIV = (u32 *)((u8 *)pbyIVHead + 4);
- u16 wValue;
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *)pbyHdrBuf;
u32 dwRevIVCounter;
- //Fill TXKEY
- if (pTransmitKey == NULL)
- return;
+ /* Fill TXKEY */
+ if (pTransmitKey == NULL)
+ return;
- dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
- *pdwIV = pDevice->dwIVCounter;
- pDevice->byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
+ dwRevIVCounter = cpu_to_le32(pDevice->dwIVCounter);
+ *pdwIV = pDevice->dwIVCounter;
+ pDevice->byKeyIndex = pTransmitKey->dwKeyIndex & 0xf;
- if (pTransmitKey->byCipherSuite == KEY_CTL_WEP) {
- if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN ){
- memcpy(pDevice->abyPRNG, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pDevice->abyPRNG+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- } else {
- memcpy(pbyBuf, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+3, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- if(pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
- memcpy(pbyBuf+8, (u8 *)&(dwRevIVCounter), 3);
- memcpy(pbyBuf+11, pTransmitKey->abyKey, pTransmitKey->uKeyLength);
- }
- memcpy(pDevice->abyPRNG, pbyBuf, 16);
- }
- // Append IV after Mac Header
- *pdwIV &= WEP_IV_MASK;//00000000 11111111 11111111 11111111
- *pdwIV |= (u32)pDevice->byKeyIndex << 30;
- *pdwIV = cpu_to_le32(*pdwIV);
- pDevice->dwIVCounter++;
- if (pDevice->dwIVCounter > WEP_IV_MASK) {
- pDevice->dwIVCounter = 0;
- }
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_TKIP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0) {
- pTransmitKey->dwTSC47_16++;
- }
- TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
- pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16, pDevice->abyPRNG);
- memcpy(pbyBuf, pDevice->abyPRNG, 16);
- // Make IV
- memcpy(pdwIV, pDevice->abyPRNG, 3);
-
- *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- // Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
- DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"vFillTxKey()---- pdwExtIV: %x\n",
- *pdwExtIV);
-
- } else if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
- pTransmitKey->wTSC15_0++;
- if (pTransmitKey->wTSC15_0 == 0) {
- pTransmitKey->dwTSC47_16++;
- }
- memcpy(pbyBuf, pTransmitKey->abyKey, 16);
-
- // Make IV
- *pdwIV = 0;
- *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) & 0xc0) | 0x20); // 0x20 is ExtIV
- *pdwIV |= cpu_to_le16((u16)(pTransmitKey->wTSC15_0));
- //Append IV&ExtIV after Mac Header
- *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
-
- //Fill MICHDR0
- *pMICHDR = 0x59;
- *((u8 *)(pMICHDR+1)) = 0; // TxPriority
- memcpy(pMICHDR+2, &(pMACHeader->addr2[0]), 6);
- *((u8 *)(pMICHDR+8)) = HIBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+9)) = LOBYTE(HIWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+10)) = HIBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+11)) = LOBYTE(LOWORD(pTransmitKey->dwTSC47_16));
- *((u8 *)(pMICHDR+12)) = HIBYTE(pTransmitKey->wTSC15_0);
- *((u8 *)(pMICHDR+13)) = LOBYTE(pTransmitKey->wTSC15_0);
- *((u8 *)(pMICHDR+14)) = HIBYTE(wPayloadLen);
- *((u8 *)(pMICHDR+15)) = LOBYTE(wPayloadLen);
-
- //Fill MICHDR1
- *((u8 *)(pMICHDR+16)) = 0; // HLEN[15:8]
- if (pDevice->bLongHeader) {
- *((u8 *)(pMICHDR+17)) = 28; // HLEN[7:0]
- } else {
- *((u8 *)(pMICHDR+17)) = 22; // HLEN[7:0]
- }
- wValue = cpu_to_le16(pMACHeader->frame_control & 0xC78F);
- memcpy(pMICHDR+18, (u8 *)&wValue, 2); // MSKFRACTL
- memcpy(pMICHDR+20, &(pMACHeader->addr1[0]), 6);
- memcpy(pMICHDR+26, &(pMACHeader->addr2[0]), 6);
-
- //Fill MICHDR2
- memcpy(pMICHDR+32, &(pMACHeader->addr3[0]), 6);
- wValue = pMACHeader->seq_ctrl;
- wValue &= 0x000F;
- wValue = cpu_to_le16(wValue);
- memcpy(pMICHDR+38, (u8 *)&wValue, 2); // MSKSEQCTL
- if (pDevice->bLongHeader) {
- memcpy(pMICHDR+40, &(pMACHeader->addr4[0]), 6);
- }
- }
+ switch (pTransmitKey->byCipherSuite) {
+ case KEY_CTL_WEP:
+ if (pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN) {
+ memcpy(pDevice->abyPRNG, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pDevice->abyPRNG + 3, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ } else {
+ memcpy(pbyBuf, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pbyBuf + 3, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ if (pTransmitKey->uKeyLength == WLAN_WEP40_KEYLEN) {
+ memcpy(pbyBuf+8, (u8 *)&dwRevIVCounter, 3);
+ memcpy(pbyBuf+11, pTransmitKey->abyKey,
+ pTransmitKey->uKeyLength);
+ }
+
+ memcpy(pDevice->abyPRNG, pbyBuf, 16);
+ }
+ /* Append IV after Mac Header */
+ *pdwIV &= WEP_IV_MASK;
+ *pdwIV |= (u32)pDevice->byKeyIndex << 30;
+ *pdwIV = cpu_to_le32(*pdwIV);
+
+ pDevice->dwIVCounter++;
+ if (pDevice->dwIVCounter > WEP_IV_MASK)
+ pDevice->dwIVCounter = 0;
+
+ break;
+ case KEY_CTL_TKIP:
+ pTransmitKey->wTSC15_0++;
+ if (pTransmitKey->wTSC15_0 == 0)
+ pTransmitKey->dwTSC47_16++;
+
+ TKIPvMixKey(pTransmitKey->abyKey, pDevice->abyCurrentNetAddr,
+ pTransmitKey->wTSC15_0, pTransmitKey->dwTSC47_16,
+ pDevice->abyPRNG);
+ memcpy(pbyBuf, pDevice->abyPRNG, 16);
+
+ /* Make IV */
+ memcpy(pdwIV, pDevice->abyPRNG, 3);
+
+ *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) &
+ 0xc0) | 0x20);
+ /* Append IV&ExtIV after Mac Header */
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+
+ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO
+ "vFillTxKey()---- pdwExtIV: %x\n", *pdwExtIV);
+
+ break;
+ case KEY_CTL_CCMP:
+ pTransmitKey->wTSC15_0++;
+ if (pTransmitKey->wTSC15_0 == 0)
+ pTransmitKey->dwTSC47_16++;
+
+ memcpy(pbyBuf, pTransmitKey->abyKey, 16);
+
+ /* Make IV */
+ *pdwIV = 0;
+ *(pbyIVHead+3) = (u8)(((pDevice->byKeyIndex << 6) &
+ 0xc0) | 0x20);
+
+ *pdwIV |= cpu_to_le16((u16)(pTransmitKey->wTSC15_0));
+
+ /* Append IV&ExtIV after Mac Header */
+ *pdwExtIV = cpu_to_le32(pTransmitKey->dwTSC47_16);
+
+ if (!mic_hdr)
+ return;
+
+ /* MICHDR0 */
+ mic_hdr->id = 0x59;
+ mic_hdr->payload_len = cpu_to_be16(wPayloadLen);
+ memcpy(mic_hdr->mic_addr2, pMACHeader->addr2, ETH_ALEN);
+
+ mic_hdr->tsc_47_16 = cpu_to_be32(pTransmitKey->dwTSC47_16);
+ mic_hdr->tsc_15_0 = cpu_to_be16(pTransmitKey->wTSC15_0);
+
+ /* MICHDR1 */
+ if (pDevice->bLongHeader)
+ mic_hdr->hlen = cpu_to_be16(28);
+ else
+ mic_hdr->hlen = cpu_to_be16(22);
+
+ memcpy(mic_hdr->addr1, pMACHeader->addr1, ETH_ALEN);
+ memcpy(mic_hdr->addr2, pMACHeader->addr2, ETH_ALEN);
+
+ /* MICHDR2 */
+ memcpy(mic_hdr->addr3, pMACHeader->addr3, ETH_ALEN);
+ mic_hdr->frame_control = cpu_to_le16(pMACHeader->frame_control
+ & 0xc78f);
+ mic_hdr->seq_ctrl = cpu_to_le16(pMACHeader->seq_ctrl & 0xf);
+
+ if (pDevice->bLongHeader)
+ memcpy(mic_hdr->addr4, pMACHeader->addr4, ETH_ALEN);
+ }
}
static void s_vSWencryption(struct vnt_private *pDevice,
@@ -326,6 +331,12 @@ static void s_vSWencryption(struct vnt_private *pDevice,
}
}
+static u16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
+{
+ return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
+ [rate % MAX_RATE]);
+}
+
/*byPktType : PK_TYPE_11A 0
PK_TYPE_11B 1
PK_TYPE_11GB 2
@@ -351,8 +362,15 @@ static u32 s_uGetTxRsvTime(struct vnt_private *pDevice, u8 byPktType,
}
}
+static u16 vnt_rxtx_rsvtime_le16(struct vnt_private *priv, u8 pkt_type,
+ u32 frame_length, u16 rate, int need_ack)
+{
+ return cpu_to_le16((u16)s_uGetTxRsvTime(priv, pkt_type,
+ frame_length, rate, need_ack));
+}
+
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
+static u16 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
u8 byRTSRsvType, u8 byPktType, u32 cbFrameLength, u16 wCurrentRate)
{
u32 uRrvTime, uRTSTime, uCTSTime, uAckTime, uDataTime;
@@ -382,168 +400,30 @@ static u32 s_uGetRTSCTSRsvTime(struct vnt_private *pDevice,
//RTSRrvTime
uRrvTime = uRTSTime + uCTSTime + uAckTime + uDataTime + 3*pDevice->uSIFS;
- return uRrvTime;
+ return cpu_to_le16((u16)uRrvTime);
}
//byFreqType 0: 5GHz, 1:2.4Ghz
-static u32 s_uGetDataDuration(struct vnt_private *pDevice, u8 byDurType,
- u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
- u32 uFragIdx, u32 cbLastFragmentSize, u32 uMACfragNum,
- u8 byFBOption)
+static u16 s_uGetDataDuration(struct vnt_private *pDevice,
+ u8 byPktType, int bNeedAck)
{
- int bLastFrag = 0;
- u32 uAckTime = 0, uNextPktTime = 0;
-
- if (uFragIdx == (uMACfragNum-1)) {
- bLastFrag = 1;
- }
-
- switch (byDurType) {
-
- case DATADUR_B: //DATADUR_B
- if (((uMACfragNum == 1)) || (bLastFrag == 1)) {//Non Frag or Last Frag
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else {//First Frag or Mid Frag
- if (uFragIdx == (uMACfragNum-2)) {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- }
- if (bNeedAck) {
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A: //DATADUR_A
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else {//First Frag or Mid Frag
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wRate, bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
- }
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A_F0: //DATADUR_A_F0
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else { //First Frag or Mid Frag
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- } else { // (byFBOption == AUTO_FB_1)
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE0][wRate-RATE_18M], bNeedAck);
- }
- }
-
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- case DATADUR_A_F1: //DATADUR_A_F1
- if (((uMACfragNum==1)) || (bLastFrag==1)) {//Non Frag or Last Frag
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime);
- } else {
- return 0;
- }
- }
- else { //First Frag or Mid Frag
- if (byFBOption == AUTO_FB_0) {
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
-
- } else { // (byFBOption == AUTO_FB_1)
- if (wRate < RATE_18M)
- wRate = RATE_18M;
- else if (wRate > RATE_54M)
- wRate = RATE_54M;
-
- if(uFragIdx == (uMACfragNum-2)){
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbLastFragmentSize, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- } else {
- uNextPktTime = s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt1[FB_RATE1][wRate-RATE_18M], bNeedAck);
- }
- }
- if(bNeedAck){
- uAckTime = BBuGetFrameTime(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
- return (pDevice->uSIFS + uAckTime + uNextPktTime);
- } else {
- return (pDevice->uSIFS + uNextPktTime);
- }
- }
- break;
-
- default:
- break;
- }
+ u32 uAckTime = 0;
+
+ if (bNeedAck) {
+ if (byPktType == PK_TYPE_11B)
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType,
+ byPktType, 14, pDevice->byTopCCKBasicRate);
+ else
+ uAckTime = BBuGetFrameTime(pDevice->byPreambleType,
+ byPktType, 14, pDevice->byTopOFDMBasicRate);
+ return cpu_to_le16((u16)(pDevice->uSIFS + uAckTime));
+ }
- ASSERT(false);
return 0;
}
//byFreqType: 0=>5GHZ 1=>2.4GHZ
-static u32 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
+static u16 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
u32 cbFrameLength, u8 byPktType, u16 wRate, int bNeedAck,
u8 byFBOption)
{
@@ -626,14 +506,12 @@ static u32 s_uGetRTSCTSDuration(struct vnt_private *pDevice, u8 byDurType,
break;
}
- return uDurTime;
-
+ return cpu_to_le16((u16)uDurTime);
}
static u32 s_uFillDataHead(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxDataHead, u32 cbFrameLength,
- u32 uDMAIdx, int bNeedAck, u32 uFragIdx, u32 cbLastFragmentSize,
- u32 uMACfragNum, u8 byFBOption)
+ u32 uDMAIdx, int bNeedAck, u8 byFBOption)
{
if (pTxDataHead == NULL) {
@@ -641,409 +519,301 @@ static u32 s_uFillDataHead(struct vnt_private *pDevice,
}
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if ((uDMAIdx == TYPE_ATIMDMA) || (uDMAIdx == TYPE_BEACONDMA)) {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab) pTxDataHead;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- //Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption); //1: 2.4GHz
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
- return (pBuf->wDuration);
- }
- else { // DATA & MANAGE Frame
if (byFBOption == AUTO_FB_NONE) {
- PSTxDataHead_g pBuf = (PSTxDataHead_g)pTxDataHead;
+ struct vnt_tx_datahead_g *pBuf =
+ (struct vnt_tx_datahead_g *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength_a), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(pBuf->wTransmitLength_b), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
+ BBvCalculateParameter(pDevice, cbFrameLength,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
//Get Duration and TimeStamp
- pBuf->wDuration_a = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength,
- byPktType, wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption); //1: 2.4GHz
- pBuf->wDuration_b = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength,
- PK_TYPE_11B, pDevice->byTopCCKBasicRate,
- bNeedAck, uFragIdx, cbLastFragmentSize,
- uMACfragNum, byFBOption); //1: 2.4GHz
-
- pBuf->wTimeStampOff_a = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- pBuf->wTimeStampOff_b = wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE];
+ pBuf->wDuration_a = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_b = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, bNeedAck);
+
+ pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
+ pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
+ pDevice->byTopCCKBasicRate);
return (pBuf->wDuration_a);
} else {
// Auto Fallback
- PSTxDataHead_g_FB pBuf = (PSTxDataHead_g_FB)pTxDataHead;
+ struct vnt_tx_datahead_g_fb *pBuf =
+ (struct vnt_tx_datahead_g_fb *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength_a), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- BBvCalculateParameter(pDevice, cbFrameLength, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(pBuf->wTransmitLength_b), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
+ BBvCalculateParameter(pDevice, cbFrameLength,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
//Get Duration and TimeStamp
- pBuf->wDuration_a = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_b = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, PK_TYPE_11B,
- pDevice->byTopCCKBasicRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_a_f0 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wDuration_a_f1 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //1: 2.4GHz
- pBuf->wTimeStampOff_a = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- pBuf->wTimeStampOff_b = wTimeStampOff[pDevice->byPreambleType%2][pDevice->byTopCCKBasicRate%MAX_RATE];
+ pBuf->wDuration_a = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_b = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, bNeedAck);
+ pBuf->wDuration_a_f0 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_a_f1 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff_a = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
+ pBuf->wTimeStampOff_b = vnt_time_stamp_off(pDevice,
+ pDevice->byTopCCKBasicRate);
return (pBuf->wDuration_a);
} //if (byFBOption == AUTO_FB_NONE)
- }
}
else if (byPktType == PK_TYPE_11A) {
- if ((byFBOption != AUTO_FB_NONE) && (uDMAIdx != TYPE_ATIMDMA) && (uDMAIdx != TYPE_BEACONDMA)) {
- // Auto Fallback
- PSTxDataHead_a_FB pBuf = (PSTxDataHead_a_FB)pTxDataHead;
+ if (byFBOption != AUTO_FB_NONE) {
+ struct vnt_tx_datahead_a_fb *pBuf =
+ (struct vnt_tx_datahead_a_fb *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->a);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- pBuf->wDuration_f0 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F0, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- pBuf->wDuration_f1 = (u16)s_uGetDataDuration(pDevice, DATADUR_A_F1, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx, cbLastFragmentSize, uMACfragNum, byFBOption); //0: 5GHz
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_f0 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wDuration_f1 = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
} else {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
+ struct vnt_tx_datahead_ab *pBuf =
+ (struct vnt_tx_datahead_ab *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->ab);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption);
-
- if(uDMAIdx!=TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
}
}
else if (byPktType == PK_TYPE_11B) {
- PSTxDataHead_ab pBuf = (PSTxDataHead_ab)pTxDataHead;
+ struct vnt_tx_datahead_ab *pBuf =
+ (struct vnt_tx_datahead_ab *)pTxDataHead;
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate, byPktType,
- (u16 *)&(pBuf->wTransmitLength), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameLength, wCurrentRate,
+ byPktType, &pBuf->ab);
//Get Duration and TimeStampOff
- pBuf->wDuration = (u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameLength, byPktType,
- wCurrentRate, bNeedAck, uFragIdx,
- cbLastFragmentSize, uMACfragNum,
- byFBOption);
- if (uDMAIdx != TYPE_ATIMDMA) {
- pBuf->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- }
+ pBuf->wDuration = s_uGetDataDuration(pDevice,
+ byPktType, bNeedAck);
+ pBuf->wTimeStampOff = vnt_time_stamp_off(pDevice,
+ wCurrentRate);
return (pBuf->wDuration);
}
return 0;
}
-static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
- void *pvRTS, u32 cbFrameLength, int bNeedAck, int bDisCRC,
- struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption)
+static int vnt_fill_ieee80211_rts(struct vnt_private *priv,
+ struct ieee80211_rts *rts, struct ethhdr *eth_hdr,
+ u16 duration)
{
- u32 uRTSFrameLen = 20;
- u16 wLen = 0;
+ rts->duration = duration;
+ rts->frame_control = TYPE_CTL_RTS;
- if (pvRTS == NULL)
- return;
+ if (priv->eOPMode == OP_MODE_ADHOC || priv->eOPMode == OP_MODE_AP)
+ memcpy(rts->ra, eth_hdr->h_dest, ETH_ALEN);
+ else
+ memcpy(rts->ra, priv->abyBSSID, ETH_ALEN);
- if (bDisCRC) {
- // When CRCDIS bit is on, H/W forgot to generate FCS for RTS frame,
- // in this case we need to decrease its length by 4.
- uRTSFrameLen -= 4;
- }
+ if (priv->eOPMode == OP_MODE_AP)
+ memcpy(rts->ta, priv->abyBSSID, ETH_ALEN);
+ else
+ memcpy(rts->ta, eth_hdr->h_source, ETH_ALEN);
- // Note: So far RTSHead doesn't appear in ATIM & Beacom DMA, so we don't need to take them into account.
- // Otherwise, we need to modified codes for them.
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (byFBOption == AUTO_FB_NONE) {
- PSRTS_g pBuf = (PSRTS_g)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- pBuf->wTransmitLength_a = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration_bb = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3: 2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
-
- pBuf->Data.wDurationID = pBuf->wDuration_aa;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ return 0;
+}
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
- else {
- PSRTS_g_FB pBuf = (PSRTS_g_FB)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_a), (u8 *)&(pBuf->bySignalField_a)
- );
- pBuf->wTransmitLength_a = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration_bb = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, PK_TYPE_11B, pDevice->byTopCCKBasicRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->wDuration_aa = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //2:RTSDuration_aa, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //1:RTSDuration_ba, 1:2.4G, 2,3:2.4G OFDMData
- pBuf->wRTSDuration_ba_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //4:wRTSDuration_ba_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:wRTSDuration_aa_f0, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_ba_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //6:wRTSDuration_ba_f1, 1:2.4G, 1:CCKData
- pBuf->wRTSDuration_aa_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:wRTSDuration_aa_f1, 1:2.4G, 1:CCKData
- pBuf->Data.wDurationID = pBuf->wDuration_aa;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+static int vnt_rxtx_rts_g_head(struct vnt_private *priv,
+ struct vnt_rts_g *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ BBvCalculateParameter(priv, rts_frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
+ buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
+ buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+ buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- } // if (byFBOption == AUTO_FB_NONE)
- }
- else if (byPktType == PK_TYPE_11A) {
- if (byFBOption == AUTO_FB_NONE) {
- PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ return 0;
+}
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
+static int vnt_rxtx_rts_g_fb_head(struct vnt_private *priv,
+ struct vnt_rts_g_fb *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
- }
- else {
- PSRTS_a_FB pBuf = (PSRTS_a_FB)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopOFDMBasicRate, byPktType,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_aa, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f0 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //5:RTSDuration_aa_f0, 0:5G, 0: 5G OFDMData
- pBuf->wRTSDuration_f1 = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_AA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //7:RTSDuration_aa_f1, 0:5G, 0:
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
+ BBvCalculateParameter(priv, rts_frame_len, priv->byTopCCKBasicRate,
+ PK_TYPE_11B, &buf->b);
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
- }
- else if (byPktType == PK_TYPE_11B) {
- PSRTS_ab pBuf = (PSRTS_ab)pvRTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uRTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField), (u8 *)&(pBuf->bySignalField)
- );
- pBuf->wTransmitLength = cpu_to_le16(wLen);
- //Get Duration
- pBuf->wDuration = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, RTSDUR_BB, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //0:RTSDuration_bb, 1:2.4G, 1:CCKData
- pBuf->Data.wDurationID = pBuf->wDuration;
- //Get RTS Frame body
- pBuf->Data.wFrameControl = TYPE_CTL_RTS;//0x00B4
- if ((pDevice->eOPMode == OP_MODE_ADHOC) ||
- (pDevice->eOPMode == OP_MODE_AP)) {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(psEthHeader->h_dest[0]),
- ETH_ALEN);
- }
- else {
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- }
+ buf->wDuration_bb = s_uGetRTSCTSDuration(priv, RTSDUR_BB, frame_len,
+ PK_TYPE_11B, priv->byTopCCKBasicRate, need_ack, fb_option);
+ buf->wDuration_aa = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+ buf->wDuration_ba = s_uGetRTSCTSDuration(priv, RTSDUR_BA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- if (pDevice->eOPMode == OP_MODE_AP) {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(pDevice->abyBSSID[0]),
- ETH_ALEN);
- } else {
- memcpy(&(pBuf->Data.abyTA[0]),
- &(psEthHeader->h_source[0]),
- ETH_ALEN);
- }
- }
+
+ buf->wRTSDuration_ba_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_aa_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_ba_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_BA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+ buf->wRTSDuration_aa_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration_aa);
+
+ return 0;
}
-static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
- u8 byPktType, void *pvCTS, u32 cbFrameLength, int bNeedAck,
- int bDisCRC, u16 wCurrentRate, u8 byFBOption)
+static int vnt_rxtx_rts_ab_head(struct vnt_private *priv,
+ struct vnt_rts_ab *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
{
- u32 uCTSFrameLen = 14;
- u16 wLen = 0;
+ u16 rts_frame_len = 20;
- if (pvCTS == NULL) {
- return;
- }
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->ab);
- if (bDisCRC) {
- // When CRCDIS bit is on, H/W forgot to generate FCS for CTS frame,
- // in this case we need to decrease its length by 4.
- uCTSFrameLen -= 4;
- }
+ buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA) {
- // Auto Fall back
- PSCTS_FB pBuf = (PSCTS_FB)pvCTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- pBuf->wDuration_ba = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wDuration_ba += pDevice->wCTSDuration;
- pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
- //Get CTSDuration_ba_f0
- pBuf->wCTSDuration_ba_f0 = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //8:CTSDuration_ba_f0, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wCTSDuration_ba_f0 += pDevice->wCTSDuration;
- pBuf->wCTSDuration_ba_f0 = cpu_to_le16(pBuf->wCTSDuration_ba_f0);
- //Get CTSDuration_ba_f1
- pBuf->wCTSDuration_ba_f1 = (u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption); //9:CTSDuration_ba_f1, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wCTSDuration_ba_f1 += pDevice->wCTSDuration;
- pBuf->wCTSDuration_ba_f1 = cpu_to_le16(pBuf->wCTSDuration_ba_f1);
- //Get CTS Frame body
- pBuf->Data.wDurationID = pBuf->wDuration_ba;
- pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
- pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyCurrentNetAddr[0]),
- ETH_ALEN);
- } else { //if (byFBOption != AUTO_FB_NONE && uDMAIdx != TYPE_ATIMDMA && uDMAIdx != TYPE_BEACONDMA)
- PSCTS pBuf = (PSCTS)pvCTS;
- //Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, uCTSFrameLen, pDevice->byTopCCKBasicRate, PK_TYPE_11B,
- (u16 *)&(wLen), (u8 *)&(pBuf->byServiceField_b), (u8 *)&(pBuf->bySignalField_b)
- );
- pBuf->wTransmitLength_b = cpu_to_le16(wLen);
- //Get CTSDuration_ba
- pBuf->wDuration_ba = cpu_to_le16((u16)s_uGetRTSCTSDuration(pDevice, CTSDUR_BA, cbFrameLength, byPktType, wCurrentRate, bNeedAck, byFBOption)); //3:CTSDuration_ba, 1:2.4G, 2,3:2.4G OFDM Data
- pBuf->wDuration_ba += pDevice->wCTSDuration;
- pBuf->wDuration_ba = cpu_to_le16(pBuf->wDuration_ba);
-
- //Get CTS Frame body
- pBuf->Data.wDurationID = pBuf->wDuration_ba;
- pBuf->Data.wFrameControl = TYPE_CTL_CTS;//0x00C4
- pBuf->Data.wReserved = 0x0000;
- memcpy(&(pBuf->Data.abyRA[0]),
- &(pDevice->abyCurrentNetAddr[0]),
- ETH_ALEN);
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+
+ return 0;
+}
+
+static int vnt_rxtx_rts_a_fb_head(struct vnt_private *priv,
+ struct vnt_rts_a_fb *buf, struct ethhdr *eth_hdr,
+ u8 pkt_type, u32 frame_len, int need_ack,
+ u16 current_rate, u8 fb_option)
+{
+ u16 rts_frame_len = 20;
+
+ BBvCalculateParameter(priv, rts_frame_len,
+ priv->byTopOFDMBasicRate, pkt_type, &buf->a);
+
+ buf->wDuration = s_uGetRTSCTSDuration(priv, RTSDUR_AA, frame_len,
+ pkt_type, current_rate, need_ack, fb_option);
+
+ buf->wRTSDuration_f0 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F0,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ buf->wRTSDuration_f1 = s_uGetRTSCTSDuration(priv, RTSDUR_AA_F1,
+ frame_len, pkt_type, current_rate, need_ack, fb_option);
+
+ vnt_fill_ieee80211_rts(priv, &buf->data, eth_hdr, buf->wDuration);
+
+ return 0;
+}
+
+static void s_vFillRTSHead(struct vnt_private *pDevice, u8 byPktType,
+ union vnt_tx_data_head *head, u32 cbFrameLength, int bNeedAck,
+ struct ethhdr *psEthHeader, u16 wCurrentRate, u8 byFBOption)
+{
+
+ if (!head)
+ return;
+
+ /* Note: So far RTSHead doesn't appear in ATIM
+ * & Beacom DMA, so we don't need to take them
+ * into account.
+ * Otherwise, we need to modified codes for them.
+ */
+ switch (byPktType) {
+ case PK_TYPE_11GB:
+ case PK_TYPE_11GA:
+ if (byFBOption == AUTO_FB_NONE)
+ vnt_rxtx_rts_g_head(pDevice, &head->rts_g,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ else
+ vnt_rxtx_rts_g_fb_head(pDevice, &head->rts_g_fb,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ break;
+ case PK_TYPE_11A:
+ if (byFBOption) {
+ vnt_rxtx_rts_a_fb_head(pDevice, &head->rts_a_fb,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ break;
+ }
+ case PK_TYPE_11B:
+ vnt_rxtx_rts_ab_head(pDevice, &head->rts_ab,
+ psEthHeader, byPktType, cbFrameLength,
+ bNeedAck, wCurrentRate, byFBOption);
+ }
+}
+
+static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
+ u8 byPktType, union vnt_tx_data_head *head, u32 cbFrameLength,
+ int bNeedAck, u16 wCurrentRate, u8 byFBOption)
+{
+ u32 uCTSFrameLen = 14;
+
+ if (!head)
+ return;
+
+ if (byFBOption != AUTO_FB_NONE) {
+ /* Auto Fall back */
+ struct vnt_cts_fb *pBuf = &head->cts_g_fb;
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, uCTSFrameLen,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
+ pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice, CTSDUR_BA,
+ cbFrameLength, byPktType,
+ wCurrentRate, bNeedAck, byFBOption);
+ /* Get CTSDuration_ba_f0 */
+ pBuf->wCTSDuration_ba_f0 = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA_F0, cbFrameLength, byPktType, wCurrentRate,
+ bNeedAck, byFBOption);
+ /* Get CTSDuration_ba_f1 */
+ pBuf->wCTSDuration_ba_f1 = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA_F1, cbFrameLength, byPktType, wCurrentRate,
+ bNeedAck, byFBOption);
+ /* Get CTS Frame body */
+ pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.frame_control = TYPE_CTL_CTS;
+ memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
+ } else {
+ struct vnt_cts *pBuf = &head->cts_g;
+ /* Get SignalField,ServiceField,Length */
+ BBvCalculateParameter(pDevice, uCTSFrameLen,
+ pDevice->byTopCCKBasicRate, PK_TYPE_11B, &pBuf->b);
+ /* Get CTSDuration_ba */
+ pBuf->wDuration_ba = s_uGetRTSCTSDuration(pDevice,
+ CTSDUR_BA, cbFrameLength, byPktType,
+ wCurrentRate, bNeedAck, byFBOption);
+ /*Get CTS Frame body*/
+ pBuf->data.duration = pBuf->wDuration_ba;
+ pBuf->data.frame_control = TYPE_CTL_CTS;
+ memcpy(pBuf->data.ra, pDevice->abyCurrentNetAddr, ETH_ALEN);
}
- }
}
/*+
@@ -1071,12 +841,12 @@ static void s_vFillCTSHead(struct vnt_private *pDevice, u32 uDMAIdx,
static void s_vGenerateTxParameter(struct vnt_private *pDevice,
u8 byPktType, u16 wCurrentRate, void *pTxBufHead, void *pvRrvTime,
- void *pvRTS, void *pvCTS, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
- struct ethhdr *psEthHeader)
+ void *rts_cts, u32 cbFrameSize, int bNeedACK, u32 uDMAIdx,
+ struct ethhdr *psEthHeader, bool need_rts)
{
+ union vnt_tx_data_head *head = rts_cts;
u32 cbMACHdLen = WLAN_HDR_ADDR3_LEN; /* 24 */
u16 wFifoCtl;
- int bDisCRC = false;
u8 byFBOption = AUTO_FB_NONE;
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter...\n");
@@ -1084,10 +854,6 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
pFifoHead->wReserved = wCurrentRate;
wFifoCtl = pFifoHead->wFIFOCtl;
- if (wFifoCtl & FIFOCTL_CRCDIS) {
- bDisCRC = true;
- }
-
if (wFifoCtl & FIFOCTL_AUTO_FB_0) {
byFBOption = AUTO_FB_0;
}
@@ -1095,75 +861,87 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
byFBOption = AUTO_FB_1;
}
+ if (!pvRrvTime)
+ return;
+
if (pDevice->bLongHeader)
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
-
- if (pvRTS != NULL) { //RTS_need
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_gRTS pBuf = (PSRrvTime_gRTS)pvRrvTime;
- pBuf->wRTSTxRrvTime_aa = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 1:2.4GHz
- pBuf->wRTSTxRrvTime_ba = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 1, byPktType, cbFrameSize, wCurrentRate));//1:RTSTxRrvTime_ba, 1:2.4GHz
- pBuf->wRTSTxRrvTime_bb = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime_a = cpu_to_le16((u16) s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((u16) s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_rts *pBuf =
+ (struct vnt_rrv_time_rts *)pvRrvTime;
+ pBuf->wRTSTxRrvTime_aa = s_uGetRTSCTSRsvTime(pDevice, 2,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 1,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wRTSTxRrvTime_bb = s_uGetRTSCTSRsvTime(pDevice, 0,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice,
+ byPktType, cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate,
+ bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
}
else {//RTS_needless, PCF mode
-
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_gCTS pBuf = (PSRrvTime_gCTS)pvRrvTime;
- pBuf->wTxRrvTime_a = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//2.4G OFDM
- pBuf->wTxRrvTime_b = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, pDevice->byTopCCKBasicRate, bNeedACK));//1:CCK
- pBuf->wCTSTxRrvTime_ba = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 3, byPktType, cbFrameSize, wCurrentRate));//3:CTSTxRrvTime_Ba, 1:2.4GHz
- }
- //Fill CTS
- s_vFillCTSHead(pDevice, uDMAIdx, byPktType, pvCTS, cbFrameSize, bNeedACK, bDisCRC, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_cts *pBuf =
+ (struct vnt_rrv_time_cts *)pvRrvTime;
+ pBuf->wTxRrvTime_a = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ pBuf->wTxRrvTime_b = vnt_rxtx_rsvtime_le16(pDevice,
+ PK_TYPE_11B, cbFrameSize,
+ pDevice->byTopCCKBasicRate, bNeedACK);
+ pBuf->wCTSTxRrvTime_ba = s_uGetRTSCTSRsvTime(pDevice, 3,
+ byPktType, cbFrameSize, wCurrentRate);
+ /* Fill CTS */
+ s_vFillCTSHead(pDevice, uDMAIdx, byPktType, head,
+ cbFrameSize, bNeedACK, wCurrentRate, byFBOption);
}
}
else if (byPktType == PK_TYPE_11A) {
-
- if (pvRTS != NULL) {//RTS_need, non PCF mode
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 2, byPktType, cbFrameSize, wCurrentRate));//2:RTSTxRrvTime_aa, 0:5GHz
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, byPktType, cbFrameSize, wCurrentRate, bNeedACK));//0:OFDM
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
- }
- else if (pvRTS == NULL) {//RTS_needless, non PCF mode
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 2,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, byPktType,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
+ } else {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11A, cbFrameSize, wCurrentRate, bNeedACK)); //0:OFDM
- }
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11A,
+ cbFrameSize, wCurrentRate, bNeedACK);
}
}
else if (byPktType == PK_TYPE_11B) {
-
- if ((pvRTS != NULL)) {//RTS_need, non PCF mode
+ if (need_rts) {
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wRTSTxRrvTime = cpu_to_le16((u16)s_uGetRTSCTSRsvTime(pDevice, 0, byPktType, cbFrameSize, wCurrentRate));//0:RTSTxRrvTime_bb, 1:2.4GHz
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK));//1:CCK
- }
- //Fill RTS
- s_vFillRTSHead(pDevice, byPktType, pvRTS, cbFrameSize, bNeedACK, bDisCRC, psEthHeader, wCurrentRate, byFBOption);
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wRTSTxRrvTime = s_uGetRTSCTSRsvTime(pDevice, 0,
+ byPktType, cbFrameSize, wCurrentRate);
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
+ cbFrameSize, wCurrentRate, bNeedACK);
+ /* Fill RTS */
+ s_vFillRTSHead(pDevice, byPktType, head, cbFrameSize,
+ bNeedACK, psEthHeader, wCurrentRate, byFBOption);
}
else { //RTS_needless, non PCF mode
//Fill RsvTime
- if (pvRrvTime) {
- PSRrvTime_ab pBuf = (PSRrvTime_ab)pvRrvTime;
- pBuf->wTxRrvTime = cpu_to_le16((u16)s_uGetTxRsvTime(pDevice, PK_TYPE_11B, cbFrameSize, wCurrentRate, bNeedACK)); //1:CCK
- }
+ struct vnt_rrv_time_ab *pBuf =
+ (struct vnt_rrv_time_ab *)pvRrvTime;
+ pBuf->wTxRrvTime = vnt_rxtx_rsvtime_le16(pDevice, PK_TYPE_11B,
+ cbFrameSize, wCurrentRate, bNeedACK);
}
}
//DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_vGenerateTxParameter END.\n");
@@ -1175,17 +953,18 @@ static void s_vGenerateTxParameter(struct vnt_private *pDevice,
*/
static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
- u8 *usbPacketBuf, int bNeedEncryption, u32 uSkbPacketLen, u32 uDMAIdx,
- struct ethhdr *psEthHeader, u8 *pPacket, PSKeyItem pTransmitKey,
- u32 uNodeIndex, u16 wCurrentRate, u32 *pcbHeaderLen, u32 *pcbTotalLen)
+ struct vnt_tx_buffer *pTxBufHead, int bNeedEncryption,
+ u32 uSkbPacketLen, u32 uDMAIdx, struct ethhdr *psEthHeader,
+ u8 *pPacket, PSKeyItem pTransmitKey, u32 uNodeIndex, u16 wCurrentRate,
+ u32 *pcbHeaderLen, u32 *pcbTotalLen)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
u32 cbFrameSize, cbFrameBodySize;
- PTX_BUFFER pTxBufHead;
u32 cb802_1_H_len;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbMACHdLen = 0;
u32 cbFCSlen = 4, cbMICHDR = 0;
- int bNeedACK, bRTS;
+ int bNeedACK;
+ bool bRTS = false;
u8 *pbyType, *pbyMacHdr, *pbyIVHead, *pbyPayloadHead, *pbyTxBufferAddr;
u8 abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
u8 abySNAP_Bridgetunnel[ETH_ALEN]
@@ -1193,26 +972,22 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
u32 uDuration;
u32 cbHeaderLength = 0, uPadding = 0;
void *pvRrvTime;
- PSMICHDRHead pMICHDR;
- void *pvRTS;
- void *pvCTS;
+ struct vnt_mic_hdr *pMICHDR;
+ void *rts_cts = NULL;
void *pvTxDataHd;
u8 byFBOption = AUTO_FB_NONE, byFragType;
u16 wTxBufSize;
- u32 dwMICKey0, dwMICKey1, dwMIC_Priority, dwCRC;
+ u32 dwMICKey0, dwMICKey1, dwMIC_Priority;
u32 *pdwMIC_L, *pdwMIC_R;
int bSoftWEP = false;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
+ pvRrvTime = pMICHDR = pvTxDataHd = NULL;
if (bNeedEncryption && pTransmitKey->pvKeyTable) {
if (((PSKeyTable)pTransmitKey->pvKeyTable)->bSoftWEP == true)
bSoftWEP = true; /* WEP 256 */
}
- pTxBufHead = (PTX_BUFFER) usbPacketBuf;
- memset(pTxBufHead, 0, sizeof(TX_BUFFER));
-
// Get pkt type
if (ntohs(psEthHeader->h_proto) > ETH_DATA_LEN) {
if (pDevice->dwDiagRefCount == 0) {
@@ -1257,10 +1032,6 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (pDevice->bLongHeader)
pTxBufHead->wFIFOCtl |= FIFOCTL_LHEAD;
- if (pDevice->bSoftwareGenCrcErr) {
- pTxBufHead->wFIFOCtl |= FIFOCTL_CRCDIS; // set tx descriptors to NO hardware CRC
- }
-
//Set FRAGCTL_MACHDCNT
if (pDevice->bLongHeader) {
cbMACHdLen = WLAN_HDR_ADDR3_LEN + 6;
@@ -1313,7 +1084,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (pTransmitKey->byCipherSuite == KEY_CTL_CCMP) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
- cbMICHDR = sizeof(SMICHDRHead);
+ cbMICHDR = sizeof(struct vnt_mic_hdr);
}
if (bSoftWEP == false) {
//MAC Header should be padding 0 to DW alignment.
@@ -1336,76 +1107,116 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
- pvRTS = (PSRTS_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_rts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts));
+ rts_cts = (struct vnt_rts_g *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g) +
+ sizeof(struct vnt_tx_datahead_g);
}
else { //RTS_needless
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_cts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts) +
+ sizeof(struct vnt_tx_datahead_g);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_gRTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS));
- pvRTS = (PSRTS_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gRTS) + cbMICHDR + sizeof(SRTS_g_FB) + sizeof(STxDataHead_g_FB);
+ pvRrvTime = (struct vnt_rrv_time_rts *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts));
+ rts_cts = (struct vnt_rts_g_fb *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_rts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_rts) +
+ cbMICHDR + sizeof(struct vnt_rts_g_fb) +
+ sizeof(struct vnt_tx_datahead_g_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB));
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS_FB) + sizeof(STxDataHead_g_FB);
+ pvRrvTime = (struct vnt_rrv_time_cts *)
+ (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts_fb *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g_fb *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ cbMICHDR + sizeof(struct vnt_cts_fb) +
+ sizeof(struct vnt_tx_datahead_g_fb);
}
} // Auto Fall Back
}
else {//802.11a/b packet
if (byFBOption == AUTO_FB_NONE) {
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = (PSRTS_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab));
- cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_ab) + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ rts_cts = (struct vnt_rts_ab *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_rts_ab));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_rts_ab) +
+ sizeof(struct vnt_tx_datahead_ab);
}
else if (bRTS == false) { //RTS_needless, no MICHDR
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_tx_datahead_ab);
}
} else {
// Auto Fall Back
if (bRTS == true) {//RTS_need
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = (PSRTS_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB));
- cbHeaderLength = wTxBufSize + sizeof(PSRrvTime_ab) + cbMICHDR + sizeof(SRTS_a_FB) + sizeof(STxDataHead_a_FB);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ rts_cts = (struct vnt_rts_a_fb *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_rts_a_fb));
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_rts_a_fb) +
+ sizeof(struct vnt_tx_datahead_a_fb);
}
else if (bRTS == false) { //RTS_needless
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_a_FB) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderLength = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_a_FB);
+ pvRrvTime = (struct vnt_rrv_time_ab *)(pbyTxBufferAddr +
+ wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_a_fb *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderLength = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ cbMICHDR + sizeof(struct vnt_tx_datahead_a_fb);
}
} // Auto Fall Back
}
@@ -1424,11 +1235,11 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
//Fill FIFO,RrvTime,RTS,and CTS
s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
- (void *)pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, uDMAIdx, psEthHeader);
+ (void *)pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, uDMAIdx, psEthHeader, bRTS);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, uDMAIdx, bNeedACK,
- 0, 0, 1/*uMACfragNum*/, byFBOption);
+ byFBOption);
// Generate TX MAC Header
s_vGenerateMACHeader(pDevice, pbyMacHdr, (u16)uDuration, psEthHeader, bNeedEncryption,
byFragType, uDMAIdx, 0);
@@ -1436,7 +1247,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
if (bNeedEncryption == true) {
//Fill TXKEY
s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (u16)cbFrameBodySize, (u8 *)pMICHDR);
+ pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -1475,8 +1286,6 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
memcpy((pbyPayloadHead + cb802_1_H_len), ((u8 *)psEthHeader) + ETH_HLEN, uSkbPacketLen - ETH_HLEN);
}
- ASSERT(uLength == cbNdisBodySize);
-
if ((bNeedEncryption == true) && (pTransmitKey != NULL) && (pTransmitKey->byCipherSuite == KEY_CTL_TKIP)) {
///////////////////////////////////////////////////////////////////
@@ -1537,22 +1346,7 @@ static int s_bPacketToWirelessUsb(struct vnt_private *pDevice, u8 byPktType,
cbFrameSize -= cbICVlen;
}
- if (pDevice->bSoftwareGenCrcErr == true) {
- unsigned int cbLen;
- u32 * pdwCRC;
-
- dwCRC = 0xFFFFFFFFL;
- cbLen = cbFrameSize - cbFCSlen;
- // calculate CRC, and wrtie CRC value to end of TD
- dwCRC = CRCdwGetCrc32Ex(pbyMacHdr, cbLen, dwCRC);
- pdwCRC = (u32 *)(pbyMacHdr + cbLen);
- // finally, we must invert dwCRC to get the correct answer
- *pdwCRC = ~dwCRC;
- // Force Error
- *pdwCRC -= 1;
- } else {
cbFrameSize -= cbFCSlen;
- }
*pcbHeaderLen = cbHeaderLength;
*pcbTotalLen = cbHeaderLength + cbFrameSize ;
@@ -1589,13 +1383,7 @@ static void s_vGenerateMACHeader(struct vnt_private *pDevice,
{
struct ieee80211_hdr *pMACHeader = (struct ieee80211_hdr *)pbyBufferAddr;
- memset(pMACHeader, 0, (sizeof(struct ieee80211_hdr)));
-
- if (uDMAIdx == TYPE_ATIMDMA) {
- pMACHeader->frame_control = TYPE_802_11_ATIM;
- } else {
- pMACHeader->frame_control = TYPE_802_11_DATA;
- }
+ pMACHeader->frame_control = TYPE_802_11_DATA;
if (pDevice->eOPMode == OP_MODE_AP) {
memcpy(&(pMACHeader->addr1[0]),
@@ -1678,14 +1466,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
- PTX_BUFFER pTX_Buffer;
+ struct vnt_tx_buffer *pTX_Buffer;
PSTxBufHead pTxBufHead;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
struct ieee80211_hdr *pMACHeader;
- PSCTS pCTS;
struct ethhdr sEthHeader;
u8 byPktType, *pbyTxBufferAddr;
- void *pvRTS, *pvTxDataHd, *pvRrvTime, *pMICHDR;
+ void *rts_cts = NULL;
+ void *pvTxDataHd, *pvRrvTime, *pMICHDR;
u32 uDuration, cbReqCount, cbHeaderSize, cbFrameBodySize, cbFrameSize;
int bNeedACK, bIsPSPOLL = false;
u32 cbIVlen = 0, cbICVlen = 0, cbMIClen = 0, cbFCSlen = 4;
@@ -1694,19 +1482,18 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
u32 cbMacHdLen;
u16 wCurrentRate = RATE_1M;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
return CMD_STATUS_RESOURCES;
}
- pTX_Buffer = (PTX_BUFFER) (&pContext->Data[0]);
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)&(pTX_Buffer->adwTxKey[0]);
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -1819,25 +1606,24 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
//Set RrvTime/RTS/CTS Buffer
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
+ pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + sizeof(SCTS) + sizeof(STxDataHead_g);
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ pvTxDataHd = (struct vnt_tx_datahead_g *)(pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + sizeof(struct vnt_cts));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) +
+ sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
}
else { // 802.11a/b packet
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
pMICHDR = NULL;
- pvRTS = NULL;
- pCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + sizeof(STxDataHead_ab);
+ pvTxDataHd = (struct vnt_tx_datahead_ab *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) +
+ sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
- (cbHeaderSize - wTxBufSize));
-
memcpy(&(sEthHeader.h_dest[0]),
&(pPacket->p80211Header->sA3.abyAddr1[0]),
ETH_ALEN);
@@ -1849,13 +1635,14 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
//=========================
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate, pbyTxBufferAddr, pvRrvTime, pvRTS, pCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE);
+ AUTO_FB_NONE);
pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
@@ -1918,12 +1705,15 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- } else {
- ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
- }
+ if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ } else {
+ ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ cpu_to_le16(pPacket->p80211Header->sA2.wDurationID);
+ }
}
pTX_Buffer->wTxByteCount = cpu_to_le16((u16)(cbReqCount));
@@ -1948,60 +1738,60 @@ CMD_STATUS csMgmt_xmit(struct vnt_private *pDevice,
CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
struct vnt_tx_mgmt *pPacket)
{
+ struct vnt_beacon_buffer *pTX_Buffer;
u32 cbFrameSize = pPacket->cbMPDULen + WLAN_FCS_LEN;
u32 cbHeaderSize = 0;
u16 wTxBufSize = sizeof(STxShortBufHead);
PSTxShortBufHead pTxBufHead;
struct ieee80211_hdr *pMACHeader;
- PSTxDataHead_ab pTxDataHead;
+ struct vnt_tx_datahead_ab *pTxDataHead;
u16 wCurrentRate;
u32 cbFrameBodySize;
u32 cbReqCount;
- PBEACON_BUFFER pTX_Buffer;
u8 *pbyTxBufferAddr;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
CMD_STATUS status;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
status = CMD_STATUS_RESOURCES;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ManagementSend TX...NO CONTEXT!\n");
return status ;
}
- pTX_Buffer = (PBEACON_BUFFER) (&pContext->Data[0]);
+
+ pTX_Buffer = (struct vnt_beacon_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)&(pTX_Buffer->wFIFOCtl);
cbFrameBodySize = pPacket->cbPayloadLen;
pTxBufHead = (PSTxShortBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxShortBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
- pTxDataHead = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize);
+ pTxDataHead = (struct vnt_tx_datahead_ab *)
+ (pbyTxBufferAddr + wTxBufSize);
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
- (u16 *)&(pTxDataHead->wTransmitLength), (u8 *)&(pTxDataHead->byServiceField), (u8 *)&(pTxDataHead->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11A,
+ &pTxDataHead->ab);
//Get Duration and TimeStampOff
- pTxDataHead->wDuration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_A, cbFrameSize, PK_TYPE_11A,
- wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
- pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
+ pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11A, false);
+ pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
} else {
wCurrentRate = RATE_1M;
pTxBufHead->wFIFOCtl |= FIFOCTL_11B;
- pTxDataHead = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize);
+ pTxDataHead = (struct vnt_tx_datahead_ab *)
+ (pbyTxBufferAddr + wTxBufSize);
//Get SignalField,ServiceField,Length
- BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
- (u16 *)&(pTxDataHead->wTransmitLength), (u8 *)&(pTxDataHead->byServiceField), (u8 *)&(pTxDataHead->bySignalField)
- );
+ BBvCalculateParameter(pDevice, cbFrameSize, wCurrentRate, PK_TYPE_11B,
+ &pTxDataHead->ab);
//Get Duration and TimeStampOff
- pTxDataHead->wDuration = cpu_to_le16((u16)s_uGetDataDuration(pDevice, DATADUR_B, cbFrameSize, PK_TYPE_11B,
- wCurrentRate, false, 0, 0, 1, AUTO_FB_NONE));
- pTxDataHead->wTimeStampOff = wTimeStampOff[pDevice->byPreambleType%2][wCurrentRate%MAX_RATE];
- cbHeaderSize = wTxBufSize + sizeof(STxDataHead_ab);
+ pTxDataHead->wDuration = s_uGetDataDuration(pDevice,
+ PK_TYPE_11B, false);
+ pTxDataHead->wTimeStampOff = vnt_time_stamp_off(pDevice, wCurrentRate);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_tx_datahead_ab);
}
//Generate Beacon Header
@@ -2032,9 +1822,11 @@ CMD_STATUS csBeacon_xmit(struct vnt_private *pDevice,
void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u8 byPktType;
u8 *pbyTxBufferAddr;
- void *pvRTS, *pvCTS, *pvTxDataHd;
+ void *rts_cts = NULL;
+ void *pvTxDataHd;
u32 uDuration, cbReqCount;
struct ieee80211_hdr *pMACHeader;
u32 cbHeaderSize, cbFrameBodySize;
@@ -2059,10 +1851,9 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
PSKeyItem pTransmitKey = NULL;
u8 *pbyIVHead, *pbyPayloadHead, *pbyMacHdr;
u32 cbExtSuppRate = 0;
- PTX_BUFFER pTX_Buffer;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
- pvRrvTime = pMICHDR = pvRTS = pvCTS = pvTxDataHd = NULL;
+ pvRrvTime = pMICHDR = pvTxDataHd = NULL;
if(skb->len <= WLAN_HDR_ADDR3_LEN) {
cbFrameBodySize = 0;
@@ -2072,7 +1863,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
p80211Header = (PUWLAN_80211HDR)skb->data;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"DMA0 TX...NO CONTEXT!\n");
@@ -2080,11 +1871,10 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
return ;
}
- pTX_Buffer = (PTX_BUFFER)(&pContext->Data[0]);
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
pbyTxBufferAddr = (u8 *)(&pTX_Buffer->adwTxKey[0]);
pTxBufHead = (PSTxBufHead) pbyTxBufferAddr;
wTxBufSize = sizeof(STxBufHead);
- memset(pTxBufHead, 0, wTxBufSize);
if (pDevice->byBBType == BB_TYPE_11A) {
wCurrentRate = RATE_6M;
@@ -2204,7 +1994,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
cbIVlen = 8;//RSN Header
cbICVlen = 8;//MIC
- cbMICHDR = sizeof(SMICHDRHead);
+ cbMICHDR = sizeof(struct vnt_mic_hdr);
pTxBufHead->wFragCtl |= FRAGCTL_AES;
pDevice->bAES = true;
}
@@ -2222,26 +2012,28 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
//the rest of pTxBufHead->wFragCtl:FragTyp will be set later in s_vFillFragParameter()
if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {//802.11g packet
-
- pvRrvTime = (PSRrvTime_gCTS) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS));
- pvRTS = NULL;
- pvCTS = (PSCTS) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR);
- pvTxDataHd = (PSTxDataHead_g) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS));
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_gCTS) + cbMICHDR + sizeof(SCTS) + sizeof(STxDataHead_g);
+ pvRrvTime = (struct vnt_rrv_time_cts *) (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts));
+ rts_cts = (struct vnt_cts *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_cts) + cbMICHDR);
+ pvTxDataHd = (struct vnt_tx_datahead_g *) (pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
+ sizeof(struct vnt_cts));
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_cts) + cbMICHDR +
+ sizeof(struct vnt_cts) + sizeof(struct vnt_tx_datahead_g);
}
else {//802.11a/b packet
- pvRrvTime = (PSRrvTime_ab) (pbyTxBufferAddr + wTxBufSize);
- pMICHDR = (PSMICHDRHead) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab));
- pvRTS = NULL;
- pvCTS = NULL;
- pvTxDataHd = (PSTxDataHead_ab) (pbyTxBufferAddr + wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR);
- cbHeaderSize = wTxBufSize + sizeof(SRrvTime_ab) + cbMICHDR + sizeof(STxDataHead_ab);
+ pvRrvTime = (struct vnt_rrv_time_ab *) (pbyTxBufferAddr + wTxBufSize);
+ pMICHDR = (struct vnt_mic_hdr *) (pbyTxBufferAddr + wTxBufSize +
+ sizeof(struct vnt_rrv_time_ab));
+ pvTxDataHd = (struct vnt_tx_datahead_ab *)(pbyTxBufferAddr +
+ wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR);
+ cbHeaderSize = wTxBufSize + sizeof(struct vnt_rrv_time_ab) + cbMICHDR +
+ sizeof(struct vnt_tx_datahead_ab);
}
- memset((void *)(pbyTxBufferAddr + wTxBufSize), 0,
- (cbHeaderSize - wTxBufSize));
memcpy(&(sEthHeader.h_dest[0]),
&(p80211Header->sA3.abyAddr1[0]),
ETH_ALEN);
@@ -2253,13 +2045,14 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
//=========================
pTxBufHead->wFragCtl |= (u16)FRAGCTL_NONFRAG;
- //Fill FIFO,RrvTime,RTS,and CTS
- s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate, pbyTxBufferAddr, pvRrvTime, pvRTS, pvCTS,
- cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader);
+ /* Fill FIFO,RrvTime,RTS,and CTS */
+ s_vGenerateTxParameter(pDevice, byPktType, wCurrentRate,
+ pbyTxBufferAddr, pvRrvTime, rts_cts,
+ cbFrameSize, bNeedACK, TYPE_TXDMA0, &sEthHeader, false);
//Fill DataHead
uDuration = s_uFillDataHead(pDevice, byPktType, wCurrentRate, pvTxDataHd, cbFrameSize, TYPE_TXDMA0, bNeedACK,
- 0, 0, 1, AUTO_FB_NONE);
+ AUTO_FB_NONE);
pMACHeader = (struct ieee80211_hdr *) (pbyTxBufferAddr + cbHeaderSize);
@@ -2345,7 +2138,7 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
}
s_vFillTxKey(pDevice, (u8 *)(pTxBufHead->adwTxKey), pbyIVHead, pTransmitKey,
- pbyMacHdr, (u16)cbFrameBodySize, (u8 *)pMICHDR);
+ pbyMacHdr, (u16)cbFrameBodySize, pMICHDR);
if (pDevice->bEnableHostWEP) {
pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16 = pTransmitKey->dwTSC47_16;
@@ -2368,12 +2161,15 @@ void vDMA0_tx_80211(struct vnt_private *pDevice, struct sk_buff *skb)
// This will cause AID-field of PS-POLL packet be incorrect (Because PS-POLL's AID field is
// in the same place of other packet's Duration-field).
// And it will cause Cisco-AP to issue Disassociation-packet
- if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_a = cpu_to_le16(p80211Header->sA2.wDurationID);
- ((PSTxDataHead_g)pvTxDataHd)->wDuration_b = cpu_to_le16(p80211Header->sA2.wDurationID);
- } else {
- ((PSTxDataHead_ab)pvTxDataHd)->wDuration = cpu_to_le16(p80211Header->sA2.wDurationID);
- }
+ if (byPktType == PK_TYPE_11GB || byPktType == PK_TYPE_11GA) {
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_a =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ ((struct vnt_tx_datahead_g *)pvTxDataHd)->wDuration_b =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ } else {
+ ((struct vnt_tx_datahead_ab *)pvTxDataHd)->wDuration =
+ cpu_to_le16(p80211Header->sA2.wDurationID);
+ }
}
pTX_Buffer->wTxByteCount = cpu_to_le16((u16)(cbReqCount));
@@ -2415,6 +2211,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
{
struct net_device_stats *pStats = &pDevice->stats;
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u32 BytesToWrite = 0, uHeaderLen = 0;
u32 uNodeIndex = 0;
u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
@@ -2428,9 +2225,8 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
int bNeedDeAuth = false;
u8 *pbyBSSID;
int bNodeExist = false;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
bool fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
int bTxeapol_key = false;
@@ -2500,7 +2296,7 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (pContext == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG" pContext == NULL\n");
@@ -2738,8 +2534,10 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
+
fConvertedPacket = s_bPacketToWirelessUsb(pDevice, byPktType,
- (u8 *)(&pContext->Data[0]), bNeedEncryption,
+ pTX_Buffer, bNeedEncryption,
skb->len, uDMAIdx, &pDevice->sTxEthHeader,
(u8 *)skb->data, pTransmitKey, uNodeIndex,
pDevice->wCurrentRate,
@@ -2761,7 +2559,6 @@ int nsDMA_tx_packet(struct vnt_private *pDevice,
}
}
- pTX_Buffer = (PTX_BUFFER)&(pContext->Data[0]);
pTX_Buffer->byPKTNO = (u8) (((pDevice->wCurrentRate<<4) &0x00F0) | ((pDevice->wSeqCounter - 1) & 0x000F));
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
@@ -2808,20 +2605,20 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
u32 uNodeIndex)
{
struct vnt_manager *pMgmt = &pDevice->vnt_mgmt;
+ struct vnt_tx_buffer *pTX_Buffer;
u32 BytesToWrite = 0, uHeaderLen = 0;
u8 byPktType = PK_TYPE_11B;
int bNeedEncryption = false;
SKeyItem STempKey;
PSKeyItem pTransmitKey = NULL;
u8 *pbyBSSID;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
u8 byPktTyp;
int fConvertedPacket;
- PTX_BUFFER pTX_Buffer;
u32 status;
u16 wKeepRate = pDevice->wCurrentRate;
- pContext = (PUSB_SEND_CONTEXT)s_vGetFreeContext(pDevice);
+ pContext = (struct vnt_usb_send_context *)s_vGetFreeContext(pDevice);
if (NULL == pContext) {
return false;
@@ -2898,8 +2695,10 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
// Convert the packet to an usb frame and copy into our buffer
// and send the irp.
+ pTX_Buffer = (struct vnt_tx_buffer *)&pContext->Data[0];
+
fConvertedPacket = s_bPacketToWirelessUsb(pDevice, byPktType,
- (u8 *)(&pContext->Data[0]), bNeedEncryption,
+ pTX_Buffer, bNeedEncryption,
uDataLen, TYPE_AC0DMA, &pDevice->sTxEthHeader,
pbySkbData, pTransmitKey, uNodeIndex,
pDevice->wCurrentRate,
@@ -2911,7 +2710,6 @@ int bRelayPacketSend(struct vnt_private *pDevice, u8 *pbySkbData, u32 uDataLen,
return false;
}
- pTX_Buffer = (PTX_BUFFER)&(pContext->Data[0]);
pTX_Buffer->byPKTNO = (u8) (((pDevice->wCurrentRate<<4) &0x00F0) | ((pDevice->wSeqCounter - 1) & 0x000F));
pTX_Buffer->wTxByteCount = (u16)BytesToWrite;
diff --git a/drivers/staging/vt6656/rxtx.h b/drivers/staging/vt6656/rxtx.h
index dd7e85dde1a..4bbee1c2fca 100644
--- a/drivers/staging/vt6656/rxtx.h
+++ b/drivers/staging/vt6656/rxtx.h
@@ -31,602 +31,173 @@
#include "device.h"
#include "wcmd.h"
-
-//
-// RTS buffer header
-//
-typedef struct tagSRTSDataF {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u8 abyTA[ETH_ALEN];
-} SRTSDataF, *PSRTSDataF;
-
-//
-// CTS buffer header
-//
-typedef struct tagSCTSDataF {
- u16 wFrameControl;
- u16 wDurationID;
- u8 abyRA[ETH_ALEN];
- u16 wReserved;
-} SCTSDataF, *PSCTSDataF;
-
-//
-// MICHDR data header
-//
-typedef struct tagSMICHDR {
- u32 adwHDR0[4];
- u32 adwHDR1[4];
- u32 adwHDR2[4];
-} SMICHDR, *PSMICHDR;
-
-typedef struct tagSTX_NAF_G_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_RTS, *PTX_NAF_G_RTS;
-
-typedef struct tagSTX_NAF_G_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_RTS_MIC, *PTX_NAF_G_RTS_MIC;
-
-typedef struct tagSTX_NAF_G_CTS
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_CTS, *PTX_NAF_G_CTS;
-
-typedef struct tagSTX_NAF_G_CTS_MIC
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_CTS_MIC, *PTX_NAF_G_CTS_MIC;
-
-typedef struct tagSTX_NAF_G_BEACON
-{
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
-
-} TX_NAF_G_BEACON, *PTX_NAF_G_BEACON;
-
-typedef struct tagSTX_NAF_AB_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ab;
- u16 wTxRrvTime_ab;
-
- //RTS
- u8 byRTSSignalField_ab;
- u8 byRTSServiceField_ab;
- u16 wRTSTransmitLength_ab;
- u16 wRTSDuration_ab;
- u16 wReserved2;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_RTS, *PTX_NAF_AB_RTS;
-
-typedef struct tagSTX_NAF_AB_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ab;
- u16 wTxRrvTime_ab;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_ab;
- u8 byRTSServiceField_ab;
- u16 wRTSTransmitLength_ab;
- u16 wRTSDuration_ab;
- u16 wReserved2;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_RTS_MIC, *PTX_NAF_AB_RTS_MIC;
-
-typedef struct tagSTX_NAF_AB_CTS
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_ab;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_CTS, *PTX_NAF_AB_CTS;
-
-typedef struct tagSTX_NAF_AB_CTS_MIC
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_ab;
-
- SMICHDR sMICHDR;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_CTS_MIC, *PTX_NAF_AB_CTS_MIC;
-
-typedef struct tagSTX_NAF_AB_BEACON
-{
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- //Data
- u8 bySignalField_ab;
- u8 byServiceField_ab;
- u16 wTransmitLength_ab;
- u16 wDuration_ab;
- u16 wTimeStampOff_ab;
-
-} TX_NAF_AB_BEACON, *PTX_NAF_AB_BEACON;
-
-typedef struct tagSTX_AF_G_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_RTS, *PTX_AF_G_RTS;
-
-typedef struct tagSTX_AF_G_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_ba;
- u16 wRTSTxRrvTime_aa;
- u16 wRTSTxRrvTime_bb;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_b;
- u8 byRTSServiceField_b;
- u16 wRTSTransmitLength_b;
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_ba;
- u16 wRTSDuration_aa;
- u16 wRTSDuration_bb;
- u16 wReserved3;
- u16 wRTSDuration_ba_f0;
- u16 wRTSDuration_aa_f0;
- u16 wRTSDuration_ba_f1;
- u16 wRTSDuration_aa_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_RTS_MIC, *PTX_AF_G_RTS_MIC;
-
-typedef struct tagSTX_AF_G_CTS
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_CTS, *PTX_AF_G_CTS;
-
-typedef struct tagSTX_AF_G_CTS_MIC
-{
- //RsvTime
- u16 wCTSTxRrvTime_ba;
- u16 wReserved2;
- u16 wTxRrvTime_b;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //CTS
- u8 byCTSSignalField_b;
- u8 byCTSServiceField_b;
- u16 wCTSTransmitLength_b;
- u16 wCTSDuration_ba;
- u16 wReserved3;
- u16 wCTSDuration_ba_f0;
- u16 wCTSDuration_ba_f1;
- SCTSDataF sCTS;
-
- //Data
- u8 bySignalField_b;
- u8 byServiceField_b;
- u16 wTransmitLength_b;
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_b;
- u16 wDuration_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
- u16 wTimeStampOff_b;
- u16 wTimeStampOff_a;
-
-} TX_AF_G_CTS_MIC, *PTX_AF_G_CTS_MIC;
-
-typedef struct tagSTX_AF_A_RTS
-{
- //RsvTime
- u16 wRTSTxRrvTime_a;
- u16 wTxRrvTime_a;
-
- //RTS
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_a;
- u16 wReserved2;
- u16 wRTSDuration_a_f0;
- u16 wRTSDuration_a_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_RTS, *PTX_AF_A_RTS;
-
-typedef struct tagSTX_AF_A_RTS_MIC
-{
- //RsvTime
- u16 wRTSTxRrvTime_a;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //RTS
- u8 byRTSSignalField_a;
- u8 byRTSServiceField_a;
- u16 wRTSTransmitLength_a;
- u16 wRTSDuration_a;
- u16 wReserved2;
- u16 wRTSDuration_a_f0;
- u16 wRTSDuration_a_f1;
- SRTSDataF sRTS;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_RTS_MIC, *PTX_AF_A_RTS_MIC;
-
-typedef struct tagSTX_AF_A_CTS
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_a;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_CTS, *PTX_AF_A_CTS;
-
-typedef struct tagSTX_AF_A_CTS_MIC
-{
- //RsvTime
- u16 wReserved2;
- u16 wTxRrvTime_a;
-
- SMICHDR sMICHDR;
-
- //Data
- u8 bySignalField_a;
- u8 byServiceField_a;
- u16 wTransmitLength_a;
- u16 wDuration_a;
- u16 wTimeStampOff_a;
- u16 wDuration_a_f0;
- u16 wDuration_a_f1;
-
-} TX_AF_A_CTS_MIC, *PTX_AF_A_CTS_MIC;
-
-//
-// union with all of the TX Buffer Type
-//
-typedef union tagUTX_BUFFER_CONTAINER
-{
- TX_NAF_G_RTS RTS_G;
- TX_NAF_G_RTS_MIC RTS_G_MIC;
- TX_NAF_G_CTS CTS_G;
- TX_NAF_G_CTS_MIC CTS_G_MIC;
- //TX_NAF_G_BEACON Beacon_G;
- TX_NAF_AB_RTS RTS_AB;
- TX_NAF_AB_RTS_MIC RTS_AB_MIC;
- TX_NAF_AB_CTS CTS_AB;
- TX_NAF_AB_CTS_MIC CTS_AB_MIC;
- //TX_NAF_AB_BEACON Beacon_AB;
- TX_AF_G_RTS RTS_G_AutoFB;
- TX_AF_G_RTS_MIC RTS_G_AutoFB_MIC;
- TX_AF_G_CTS CTS_G_AutoFB;
- TX_AF_G_CTS_MIC CTS_G_AutoFB_MIC;
- TX_AF_A_RTS RTS_A_AutoFB;
- TX_AF_A_RTS_MIC RTS_A_AutoFB_MIC;
- TX_AF_A_CTS CTS_A_AutoFB;
- TX_AF_A_CTS_MIC CTS_A_AutoFB_MIC;
-
-} TX_BUFFER_CONTAINER, *PTX_BUFFER_CONTAINER;
-
-//
-// Remote NDIS message format
-//
-typedef struct tagSTX_BUFFER
-{
- u8 byType;
- u8 byPKTNO;
- u16 wTxByteCount;
-
+#include "baseband.h"
+
+/* MIC HDR data header */
+struct vnt_mic_hdr {
+ u8 id;
+ u8 tx_priority;
+ u8 mic_addr2[6];
+ __be32 tsc_47_16;
+ __be16 tsc_15_0;
+ __be16 payload_len;
+ __be16 hlen;
+ __le16 frame_control;
+ u8 addr1[6];
+ u8 addr2[6];
+ u8 addr3[6];
+ __le16 seq_ctrl;
+ u8 addr4[6];
+ u16 packing; /* packing to 48 bytes */
+} __packed;
+
+/* RsvTime buffer header */
+struct vnt_rrv_time_rts {
+ u16 wRTSTxRrvTime_ba;
+ u16 wRTSTxRrvTime_aa;
+ u16 wRTSTxRrvTime_bb;
+ u16 wReserved;
+ u16 wTxRrvTime_b;
+ u16 wTxRrvTime_a;
+} __packed;
+
+struct vnt_rrv_time_cts {
+ u16 wCTSTxRrvTime_ba;
+ u16 wReserved;
+ u16 wTxRrvTime_b;
+ u16 wTxRrvTime_a;
+} __packed;
+
+struct vnt_rrv_time_ab {
+ u16 wRTSTxRrvTime;
+ u16 wTxRrvTime;
+} __packed;
+
+/* TX data header */
+struct vnt_tx_datahead_g {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_b;
+ u16 wDuration_a;
+ u16 wTimeStampOff_b;
+ u16 wTimeStampOff_a;
+} __packed;
+
+struct vnt_tx_datahead_g_fb {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_b;
+ u16 wDuration_a;
+ u16 wDuration_a_f0;
+ u16 wDuration_a_f1;
+ u16 wTimeStampOff_b;
+ u16 wTimeStampOff_a;
+} __packed;
+
+struct vnt_tx_datahead_ab {
+ struct vnt_phy_field ab;
+ u16 wDuration;
+ u16 wTimeStampOff;
+} __packed;
+
+struct vnt_tx_datahead_a_fb {
+ struct vnt_phy_field a;
+ u16 wDuration;
+ u16 wTimeStampOff;
+ u16 wDuration_f0;
+ u16 wDuration_f1;
+} __packed;
+
+/* RTS buffer header */
+struct vnt_rts_g {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_ba;
+ u16 wDuration_aa;
+ u16 wDuration_bb;
+ u16 wReserved;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_g_fb {
+ struct vnt_phy_field b;
+ struct vnt_phy_field a;
+ u16 wDuration_ba;
+ u16 wDuration_aa;
+ u16 wDuration_bb;
+ u16 wReserved;
+ u16 wRTSDuration_ba_f0;
+ u16 wRTSDuration_aa_f0;
+ u16 wRTSDuration_ba_f1;
+ u16 wRTSDuration_aa_f1;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_ab {
+ struct vnt_phy_field ab;
+ u16 wDuration;
+ u16 wReserved;
+ struct ieee80211_rts data;
+} __packed;
+
+struct vnt_rts_a_fb {
+ struct vnt_phy_field a;
+ u16 wDuration;
+ u16 wReserved;
+ u16 wRTSDuration_f0;
+ u16 wRTSDuration_f1;
+ struct ieee80211_rts data;
+} __packed;
+
+/* CTS buffer header */
+struct vnt_cts {
+ struct vnt_phy_field b;
+ u16 wDuration_ba;
+ u16 wReserved;
+ struct ieee80211_cts data;
+ u16 reserved2;
+} __packed;
+
+struct vnt_cts_fb {
+ struct vnt_phy_field b;
+ u16 wDuration_ba;
+ u16 wReserved;
+ u16 wCTSDuration_ba_f0;
+ u16 wCTSDuration_ba_f1;
+ struct ieee80211_cts data;
+ u16 reserved2;
+} __packed;
+
+union vnt_tx_data_head {
+ /* rts g */
+ struct vnt_rts_g rts_g;
+ struct vnt_rts_g_fb rts_g_fb;
+ /* rts a/b */
+ struct vnt_rts_ab rts_ab;
+ struct vnt_rts_a_fb rts_a_fb;
+ /* cts g */
+ struct vnt_cts cts_g;
+ struct vnt_cts_fb cts_g_fb;
+};
+
+struct vnt_tx_buffer {
+ u8 byType;
+ u8 byPKTNO;
+ u16 wTxByteCount;
u32 adwTxKey[4];
- u16 wFIFOCtl;
- u16 wTimeStamp;
- u16 wFragCtl;
- u16 wReserved;
-
- // Actual message
- TX_BUFFER_CONTAINER BufferHeader;
-
-} TX_BUFFER, *PTX_BUFFER;
-
-//
-// Remote NDIS message format
-//
-typedef struct tagSBEACON_BUFFER
-{
- u8 byType;
- u8 byPKTNO;
- u16 wTxByteCount;
-
- u16 wFIFOCtl;
- u16 wTimeStamp;
-
- // Actual message
- TX_BUFFER_CONTAINER BufferHeader;
-
-} BEACON_BUFFER, *PBEACON_BUFFER;
+ u16 wFIFOCtl;
+ u16 wTimeStamp;
+ u16 wFragCtl;
+ u16 wReserved;
+} __packed;
+
+struct vnt_beacon_buffer {
+ u8 byType;
+ u8 byPKTNO;
+ u16 wTxByteCount;
+ u16 wFIFOCtl;
+ u16 wTimeStamp;
+} __packed;
void vDMA0_tx_80211(struct vnt_private *, struct sk_buff *skb);
int nsDMA_tx_packet(struct vnt_private *, u32 uDMAIdx, struct sk_buff *skb);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index 098be609107..3a03f1d5b68 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -421,7 +421,7 @@ static void s_nsInterruptUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, PRCB pRCB)
+int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, struct vnt_rcb *pRCB)
{
int ntStatus = 0;
struct urb *pUrb;
@@ -479,7 +479,7 @@ int PIPEnsBulkInUsbRead(struct vnt_private *pDevice, PRCB pRCB)
static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
{
- PRCB pRCB = (PRCB)urb->context;
+ struct vnt_rcb *pRCB = (struct vnt_rcb *)urb->context;
struct vnt_private *pDevice = pRCB->pDevice;
unsigned long bytesRead;
int bIndicateReceive = false;
@@ -546,7 +546,8 @@ static void s_nsBulkInUsbIoCompleteRead(struct urb *urb)
*
*/
-int PIPEnsSendBulkOut(struct vnt_private *pDevice, PUSB_SEND_CONTEXT pContext)
+int PIPEnsSendBulkOut(struct vnt_private *pDevice,
+ struct vnt_usb_send_context *pContext)
{
int status;
struct urb *pUrb;
@@ -628,14 +629,13 @@ static void s_nsBulkOutIoCompleteWrite(struct urb *urb)
int status;
CONTEXT_TYPE ContextType;
unsigned long ulBufLen;
- PUSB_SEND_CONTEXT pContext;
+ struct vnt_usb_send_context *pContext;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n");
//
// The context given to IoSetCompletionRoutine is an USB_CONTEXT struct
//
- pContext = (PUSB_SEND_CONTEXT) urb->context;
- ASSERT( NULL != pContext );
+ pContext = (struct vnt_usb_send_context *)urb->context;
pDevice = pContext->pDevice;
ContextType = pContext->Type;
diff --git a/drivers/staging/vt6656/usbpipe.h b/drivers/staging/vt6656/usbpipe.h
index bb7a61111a6..f53770329e7 100644
--- a/drivers/staging/vt6656/usbpipe.h
+++ b/drivers/staging/vt6656/usbpipe.h
@@ -40,7 +40,8 @@ int PIPEnsControlIn(struct vnt_private *, u8 byRequest, u16 wValue,
u16 wIndex, u16 wLength, u8 *pbyBuffer);
int PIPEnsInterruptRead(struct vnt_private *);
-int PIPEnsBulkInUsbRead(struct vnt_private *, PRCB pRCB);
-int PIPEnsSendBulkOut(struct vnt_private *, PUSB_SEND_CONTEXT pContext);
+int PIPEnsBulkInUsbRead(struct vnt_private *, struct vnt_rcb *pRCB);
+int PIPEnsSendBulkOut(struct vnt_private *,
+ struct vnt_usb_send_context *pContext);
#endif /* __USBPIPE_H__ */
diff --git a/drivers/staging/vt6656/wmgr.c b/drivers/staging/vt6656/wmgr.c
index 6d1ff5eeafa..b6cbd138a2b 100644
--- a/drivers/staging/vt6656/wmgr.c
+++ b/drivers/staging/vt6656/wmgr.c
@@ -751,7 +751,6 @@ static void s_vMgrRxAssocResponse(struct vnt_private *pDevice,
|| (sFrame.pwStatus == NULL)
|| (sFrame.pwAid == NULL)
|| (sFrame.pSuppRates == NULL)) {
- DBG_PORT80(0xCC);
return;
}
@@ -3750,7 +3749,6 @@ static void s_vMgrRxProbeResponse(struct vnt_private *pDevice,
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Probe resp:Fail addr:[%p]\n",
pRxPacket->p80211Header);
- DBG_PORT80(0xCC);
return;
}
diff --git a/drivers/staging/winbond/mds.c b/drivers/staging/winbond/mds.c
index faa93f0ee10..fcc3d2165ba 100644
--- a/drivers/staging/winbond/mds.c
+++ b/drivers/staging/winbond/mds.c
@@ -15,7 +15,8 @@ Mds_initial(struct wbsoft_priv *adapter)
return hal_get_tx_buffer(&adapter->sHwData, &pMds->pTxBuffer);
}
-static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *buffer)
+static void Mds_DurationSet(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *buffer)
{
struct T00_descriptor *pT00;
struct T01_descriptor *pT01;
@@ -43,10 +44,11 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* Set RTS/CTS mechanism
******************************************/
if (!boGroupAddr) {
- /* NOTE : If the protection mode is enabled and the MSDU will be fragmented,
- * the tx rates of MPDUs will all be DSSS rates. So it will not use
- * CTS-to-self in this case. CTS-To-self will only be used when without
- * fragmentation. -- 20050112 */
+ /* NOTE : If the protection mode is enabled and the MSDU will
+ * be fragmented, the tx rates of MPDUs will all be DSSS
+ * rates. So it will not use CTS-to-self in this case.
+ * CTS-To-self will only be used when without
+ * fragmentation. -- 20050112 */
BodyLen = (u16)pT00->T00_frame_length; /* include 802.11 header */
BodyLen += 4; /* CRC */
@@ -90,8 +92,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* CTS Rate : 24 Mega bps
* CTS frame length = 14 bytes */
Duration += (DEFAULT_SIFSTIME +
- PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
- ((112 + 22 + 95)/96)*Tsym);
+ PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION +
+ ((112 + 22 + 95)/96)*Tsym);
} else {
/* CTS + 1 SIFS + CTS duration
* CTS Rate : ?? Mega bps
@@ -101,7 +103,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
else
Duration += SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME;
- Duration += (((112 + Rate-1) / Rate) + DEFAULT_SIFSTIME);
+ Duration += (((112 + Rate-1) / Rate) +
+ DEFAULT_SIFSTIME);
}
}
@@ -127,9 +130,10 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* Rate : ??Mega bps
* ACK frame length = 14 bytes, tx rate = 24M */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION * 3;
- Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)/(Rate*4)) * Tsym +
- (((2*14)*8 + 22 + 95)/96)*Tsym +
- DEFAULT_SIFSTIME*3);
+ Duration += (((NextBodyLen*8 + 22 + Rate*4 - 1)
+ /(Rate*4)) * Tsym +
+ (((2*14)*8 + 22 + 95)/96)*Tsym +
+ DEFAULT_SIFSTIME*3);
} else {
/* DSSS
* data transmit time + 2 ACK + 3 SIFS
@@ -141,8 +145,9 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
else
Duration = SHORT_PREAMBLE_PLUS_PLCPHEADER_TIME*3;
- Duration += (((NextBodyLen + (2*14))*8 + Rate-1) / Rate +
- DEFAULT_SIFSTIME*3);
+ Duration += (((NextBodyLen + (2*14))*8
+ + Rate-1) / Rate +
+ DEFAULT_SIFSTIME*3);
}
((u16 *)buffer)[5] = cpu_to_le16(Duration); /* 4 USHOR for skip 8B USB, 2USHORT=FC + Duration */
@@ -168,7 +173,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
* ACK frame length = 14 bytes */
Duration = PREAMBLE_PLUS_SIGNAL_PLUS_SIGNALEXTENSION;
/* The Tx rate of ACK use 24M */
- Duration += (((112 + 22 + 95)/96)*Tsym + DEFAULT_SIFSTIME);
+ Duration += (((112 + 22 + 95)/96)*Tsym +
+ DEFAULT_SIFSTIME);
} else {
/* DSSS
* 1 ACK + 1 SIFS
@@ -191,7 +197,8 @@ static void Mds_DurationSet(struct wbsoft_priv *adapter, struct wb35_descriptor
}
/* The function return the 4n size of usb pk */
-static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
+static u16 Mds_BodyCopy(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct T00_descriptor *pT00;
struct wb35_mds *pMds = &adapter->Mds;
@@ -246,7 +253,7 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
buf_index++;
buf_index %= MAX_DESCRIPTOR_BUFFER_INDEX;
} else {
- u8 *pctmp = pDes->buffer_address[buf_index];
+ u8 *pctmp = pDes->buffer_address[buf_index];
pctmp += CopySize;
pDes->buffer_address[buf_index] = pctmp;
pDes->buffer_size[buf_index] -= CopySize;
@@ -290,7 +297,8 @@ static u16 Mds_BodyCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDe
return Size;
}
-static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *pDes, u8 *TargetBuffer)
+static void Mds_HeaderCopy(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pDes, u8 *TargetBuffer)
{
struct wb35_mds *pMds = &adapter->Mds;
u8 *src_buffer = pDes->buffer_address[0]; /* 931130.5.g */
@@ -391,11 +399,12 @@ static void Mds_HeaderCopy(struct wbsoft_priv *adapter, struct wb35_descriptor *
pDes->PreambleMode = WLAN_PREAMBLE_TYPE_LONG;
else
pDes->PreambleMode = CURRENT_PREAMBLE_MODE;
- pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
+ pT01->T01_plcp_header_length = pDes->PreambleMode; /* Set preamble */
}
-static void MLME_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *desc)
+static void MLME_GetNextPacket(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *desc)
{
desc->InternalUsed = desc->buffer_start_index + desc->buffer_number;
desc->InternalUsed %= MAX_DESCRIPTOR_BUFFER_INDEX;
@@ -423,7 +432,8 @@ static void MLMEfreeMMPDUBuffer(struct wbsoft_priv *adapter, s8 *pData)
}
}
-static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID, unsigned char SendOK)
+static void MLME_SendComplete(struct wbsoft_priv *adapter, u8 PacketID,
+ unsigned char SendOK)
{
/* Reclaim the data buffer */
adapter->sMlmeFrame.len = 0;
@@ -440,9 +450,9 @@ Mds_Tx(struct wbsoft_priv *adapter)
struct wb35_mds *pMds = &adapter->Mds;
struct wb35_descriptor TxDes;
struct wb35_descriptor *pTxDes = &TxDes;
- u8 *XmitBufAddress;
- u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
- u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
+ u8 *XmitBufAddress;
+ u16 XmitBufSize, PacketSize, stmp, CurrentSize, FragmentThreshold;
+ u8 FillIndex, TxDesIndex, FragmentCount, FillCount;
unsigned char BufferFilled = false;
diff --git a/drivers/staging/winbond/mds_f.h b/drivers/staging/winbond/mds_f.h
index ce8be079e95..159b2eb366e 100644
--- a/drivers/staging/winbond/mds_f.h
+++ b/drivers/staging/winbond/mds_f.h
@@ -7,13 +7,16 @@
unsigned char Mds_initial(struct wbsoft_priv *adapter);
void Mds_Tx(struct wbsoft_priv *adapter);
void Mds_SendComplete(struct wbsoft_priv *adapter, struct T02_descriptor *pt02);
-void Mds_MpduProcess(struct wbsoft_priv *adapter, struct wb35_descriptor *prxdes);
-extern void DataDmp(u8 *pdata, u32 len, u32 offset);
+void Mds_MpduProcess(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *prxdes);
/* For data frame sending */
u16 MDS_GetPacketSize(struct wbsoft_priv *adapter);
-void MDS_GetNextPacket(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
-void MDS_GetNextPacketComplete(struct wbsoft_priv *adapter, struct wb35_descriptor *pdes);
-void MDS_SendResult(struct wbsoft_priv *adapter, u8 packetid, unsigned char sendok);
+void MDS_GetNextPacket(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pdes);
+void MDS_GetNextPacketComplete(struct wbsoft_priv *adapter,
+ struct wb35_descriptor *pdes);
+void MDS_SendResult(struct wbsoft_priv *adapter, u8 packetid,
+ unsigned char sendok);
#endif
diff --git a/drivers/staging/winbond/phy_calibration.h b/drivers/staging/winbond/phy_calibration.h
index 84f6e840a47..78fc6805860 100644
--- a/drivers/staging/winbond/phy_calibration.h
+++ b/drivers/staging/winbond/phy_calibration.h
@@ -79,6 +79,7 @@
#define SHIFT_IQCAL_TONE_Q(x) ((x) >> 13)
void phy_set_rf_data(struct hw_data *pHwData, u32 index, u32 value);
+void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
#define phy_init_rf(_A) /* RFSynthesizer_initial(_A) */
#endif
diff --git a/drivers/staging/winbond/wb35reg.c b/drivers/staging/winbond/wb35reg.c
index 9be1b3b004b..a5e255bb0f8 100644
--- a/drivers/staging/winbond/wb35reg.c
+++ b/drivers/staging/winbond/wb35reg.c
@@ -1,10 +1,9 @@
#include "wb35reg_f.h"
+#include "phy_calibration.h"
#include <linux/usb.h>
#include <linux/slab.h>
-extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
-
/*
* true : read command process successfully
* false : register not support
@@ -14,7 +13,8 @@ extern void phy_calibration_winbond(struct hw_data *phw_data, u32 frequency);
* Flag : AUTO_INCREMENT - RegisterNo will auto increment 4
* NO_INCREMENT - Function will write data into the same register
*/
-unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterData, u8 NumberOfData, u8 Flag)
+unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterData, u8 NumberOfData, u8 Flag)
{
struct wb35_reg *reg = &pHwData->reg;
struct urb *urb = NULL;
@@ -44,7 +44,7 @@ unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *p
reg_queue->pBuffer = (u32 *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue));
memcpy(reg_queue->pBuffer, pRegisterData, DataSize);
/* the function for reversing register data from little endian to big endian */
- for (i = 0; i < NumberOfData ; i++)
+ for (i = 0; i < NumberOfData; i++)
reg_queue->pBuffer[i] = cpu_to_le32(reg_queue->pBuffer[i]);
dr = (struct usb_ctrlrequest *)((u8 *)reg_queue + sizeof(struct wb35_reg_queue) + DataSize);
@@ -72,7 +72,7 @@ unsigned char Wb35Reg_BurstWrite(struct hw_data *pHwData, u16 RegisterNo, u32 *p
return true;
}
-void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
switch (RegisterNo) {
@@ -118,7 +118,8 @@ void Wb35Reg_Update(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue
* true : read command process successfully
* false : register not support
*/
-unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo,
+ u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
int ret = -1;
@@ -139,9 +140,10 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
/* Sync IoCallDriver */
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
- usb_sndctrlpipe(pHwData->udev, 0),
- 0x03, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
- 0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
+ usb_sndctrlpipe(pHwData->udev, 0),
+ 0x03,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
+ 0x0, RegisterNo, &RegisterValue, 4, HZ * 100);
reg->EP0vm_state = VM_STOP;
reg->SyncIoPause = 0;
@@ -159,7 +161,8 @@ unsigned char Wb35Reg_WriteSync(struct hw_data *pHwData, u16 RegisterNo, u32 Reg
* true : read command process successfully
* false : register not support
*/
-unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo, u32 RegisterValue)
+unsigned char Wb35Reg_Write(struct hw_data *pHwData, u16 RegisterNo,
+ u32 RegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
@@ -286,7 +289,8 @@ unsigned char Wb35Reg_WriteWithCallbackValue(struct hw_data *pHwData,
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
-unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
+unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
u32 *pltmp = pRegisterValue;
@@ -305,9 +309,10 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
reg->EP0vm_state = VM_RUNNING;
ret = usb_control_msg(pHwData->udev,
- usb_rcvctrlpipe(pHwData->udev, 0),
- 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- 0x0, RegisterNo, pltmp, 4, HZ * 100);
+ usb_rcvctrlpipe(pHwData->udev, 0),
+ 0x01,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ 0x0, RegisterNo, pltmp, 4, HZ * 100);
*pRegisterValue = cpu_to_le32(*pltmp);
@@ -332,7 +337,8 @@ unsigned char Wb35Reg_ReadSync(struct hw_data *pHwData, u16 RegisterNo, u32 *pRe
* pRegisterValue : It must be a resident buffer due to
* asynchronous read register.
*/
-unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo, u32 *pRegisterValue)
+unsigned char Wb35Reg_Read(struct hw_data *pHwData, u16 RegisterNo,
+ u32 *pRegisterValue)
{
struct wb35_reg *reg = &pHwData->reg;
struct usb_ctrlrequest *dr;
diff --git a/drivers/staging/wlags49_h2/Makefile b/drivers/staging/wlags49_h2/Makefile
index 31e1d89a384..6eeb5d1845e 100644
--- a/drivers/staging/wlags49_h2/Makefile
+++ b/drivers/staging/wlags49_h2/Makefile
@@ -51,5 +51,3 @@ $(WLNAME)-y += wl_profile.o \
mmd.o \
hcf.o \
dhf.o
-
-$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
diff --git a/drivers/staging/wlags49_h2/wl_cs.c b/drivers/staging/wlags49_h2/wl_cs.c
index b55dc43a1d1..a458705a379 100644
--- a/drivers/staging/wlags49_h2/wl_cs.c
+++ b/drivers/staging/wlags49_h2/wl_cs.c
@@ -99,7 +99,6 @@
#include <wl_main.h>
#include <wl_netdev.h>
#include <wl_cs.h>
-#include <wl_sysfs.h>
/*******************************************************************************
@@ -178,7 +177,6 @@ static void wl_adapter_detach(struct pcmcia_device *link)
wl_adapter_release(link);
if (dev) {
- unregister_wlags_sysfs(dev);
unregister_netdev(dev);
wl_device_dealloc(dev);
}
@@ -265,8 +263,6 @@ int wl_adapter_insert(struct pcmcia_device *link)
goto failed;
}
- register_wlags_sysfs(dev);
-
printk(KERN_INFO "%s: Wireless, io_addr %#03lx, irq %d, mac_address"
" %pM\n", dev->name, dev->base_addr, dev->irq, dev->dev_addr);
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index b2307816414..78129e93920 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -883,7 +883,6 @@ struct wl_private
int is_registered;
int is_handling_int;
int firmware_present;
- bool sysfsCreated;
CFG_DRV_INFO_STRCT driverInfo;
CFG_IDENTITY_STRCT driverIdentity;
CFG_FW_IDENTITY_STRCT StationIdentity;
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.c b/drivers/staging/wlags49_h2/wl_sysfs.c
deleted file mode 100644
index 1508f04b3c6..00000000000
--- a/drivers/staging/wlags49_h2/wl_sysfs.c
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * ex: sw=4
- */
-
-#include <linux/kernel.h>
-#include <linux/netdevice.h>
-#include <linux/if_arp.h>
-#include <net/sock.h>
-#include <linux/rtnetlink.h>
-#include <linux/wireless.h>
-#include <net/iw_handler.h>
-#include <linux/sysfs.h>
-
-#include <debug.h>
-#include <hcf.h>
-#include <hcfdef.h>
-
-#include <wl_if.h>
-#include <wl_internal.h>
-#include <wl_util.h>
-#include <wl_main.h>
-#include <wl_wext.h>
-#include <wl_priv.h>
-
-static inline int dev_isalive(const struct net_device *dev)
-{
- return dev->reg_state == NETREG_REGISTERED;
-}
-
-/*
- * empirically even if tallies are defined as 32 bits entities, only
- * high 16 bits are relevant; low half is always zero. It means tallies
- * are pretty much useless for traffic counting but at least give overview
- * about where error come from
- */
-static ssize_t show_tallies(struct device *d, struct device_attribute *attr,
- char *buf)
-{
- struct net_device *dev = to_net_dev(d);
- struct wl_private *lp = wl_priv(dev);
- unsigned long flags;
- CFG_HERMES_TALLIES_STRCT tallies;
- ssize_t ret = -EINVAL;
-
- rcu_read_lock();
- if (dev_isalive(dev)) {
- wl_lock(lp, &flags);
-
- ret = wl_get_tallies(lp, &tallies);
- if (ret == 0) {
- wl_unlock(lp, &flags);
- ret = snprintf(buf, PAGE_SIZE,
- "TxUnicastFrames: %u\n"
- "TxMulticastFrames: %u\n"
- "TxFragments: %u\n"
- "TxUnicastOctets: %u\n"
- "TxMulticastOctets: %u\n"
- "TxDeferredTransmissions: %u\n"
- "TxSingleRetryFrames: %u\n"
- "TxMultipleRetryFrames: %u\n"
- "TxRetryLimitExceeded: %u\n"
- "TxDiscards: %u\n"
- "RxUnicastFrames: %u\n"
- "RxMulticastFrames: %u\n"
- "RxFragments: %u\n"
- "RxUnicastOctets: %u\n"
- "RxMulticastOctets: %u\n"
- "RxFCSErrors: %u\n"
- "RxDiscardsNoBuffer: %u\n"
- "TxDiscardsWrongSA: %u\n"
- "RxWEPUndecryptable: %u\n"
- "RxMsgInMsgFragments: %u\n"
- "RxMsgInBadMsgFragments: %u\n"
- "RxDiscardsWEPICVError: %u\n"
- "RxDiscardsWEPExcluded: %u\n"
- ,
- (unsigned int)tallies.TxUnicastFrames,
- (unsigned int)tallies.TxMulticastFrames,
- (unsigned int)tallies.TxFragments,
- (unsigned int)tallies.TxUnicastOctets,
- (unsigned int)tallies.TxMulticastOctets,
- (unsigned int)tallies.TxDeferredTransmissions,
- (unsigned int)tallies.TxSingleRetryFrames,
- (unsigned int)tallies.TxMultipleRetryFrames,
- (unsigned int)tallies.TxRetryLimitExceeded,
- (unsigned int)tallies.TxDiscards,
- (unsigned int)tallies.RxUnicastFrames,
- (unsigned int)tallies.RxMulticastFrames,
- (unsigned int)tallies.RxFragments,
- (unsigned int)tallies.RxUnicastOctets,
- (unsigned int)tallies.RxMulticastOctets,
- (unsigned int)tallies.RxFCSErrors,
- (unsigned int)tallies.RxDiscardsNoBuffer,
- (unsigned int)tallies.TxDiscardsWrongSA,
- (unsigned int)tallies.RxWEPUndecryptable,
- (unsigned int)tallies.RxMsgInMsgFragments,
- (unsigned int)tallies.RxMsgInBadMsgFragments,
- (unsigned int)tallies.RxDiscardsWEPICVError,
- (unsigned int)tallies.RxDiscardsWEPExcluded);
- } else {
- wl_unlock( lp, &flags );
- }
- }
-
- rcu_read_unlock();
- return ret;
-}
-
-static DEVICE_ATTR(tallies, S_IRUGO, show_tallies, NULL);
-
-static struct attribute *wlags_attrs[] = {
- &dev_attr_tallies.attr,
- NULL
-};
-
-static struct attribute_group wlags_group = {
- .name = "wlags",
- .attrs = wlags_attrs,
-};
-
-void register_wlags_sysfs(struct net_device *net)
-{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
- int err;
- err = sysfs_create_group(&dev->kobj, &wlags_group);
- if (!err)
- lp->sysfsCreated = true;
-}
-
-void unregister_wlags_sysfs(struct net_device *net)
-{
- struct device *dev = &(net->dev);
- struct wl_private *lp = wl_priv(net);
-
- if (lp->sysfsCreated)
- sysfs_remove_group(&dev->kobj, &wlags_group);
-}
diff --git a/drivers/staging/wlags49_h2/wl_sysfs.h b/drivers/staging/wlags49_h2/wl_sysfs.h
deleted file mode 100644
index fa658c38001..00000000000
--- a/drivers/staging/wlags49_h2/wl_sysfs.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifdef CONFIG_SYSFS
-extern void register_wlags_sysfs(struct net_device *);
-extern void unregister_wlags_sysfs(struct net_device *);
-#else
-static inline void register_wlags_sysfs(struct net_device *net) { }
-static inline void unregister_wlags_sysfs(struct net_device *net) { }
-#endif
diff --git a/drivers/staging/wlags49_h25/Makefile b/drivers/staging/wlags49_h25/Makefile
index 6e0159d0a34..513ba01c2d5 100644
--- a/drivers/staging/wlags49_h25/Makefile
+++ b/drivers/staging/wlags49_h25/Makefile
@@ -50,6 +50,3 @@ $(WLNAME)-y += wl_profile.o \
mmd.o \
hcf.o \
dhf.o
-
-$(WLNAME)-$(CONFIG_SYSFS) += wl_sysfs.o
-
diff --git a/drivers/staging/wlags49_h25/wl_sysfs.c b/drivers/staging/wlags49_h25/wl_sysfs.c
deleted file mode 100644
index 6458ee63350..00000000000
--- a/drivers/staging/wlags49_h25/wl_sysfs.c
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Use common source from wlags49_h2 */
-#include "../wlags49_h2/wl_sysfs.c"
diff --git a/drivers/staging/wlags49_h25/wl_sysfs.h b/drivers/staging/wlags49_h25/wl_sysfs.h
deleted file mode 100644
index eb819a5ef8b..00000000000
--- a/drivers/staging/wlags49_h25/wl_sysfs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-/* Use common source from wlags49_h2 */
-#include "../wlags49_h2/wl_sysfs.h"
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 801ac4053a7..3b3e17d3f6f 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -60,7 +60,7 @@ static inline void dumpVGAReg(void)
static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno, unsigned char rateindex)
+ unsigned char modeno)
{
unsigned short ModeNo = modeno;
unsigned short ModeIdIndex = 0, ClockIndex = 0;
@@ -68,7 +68,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
int Clock;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
- XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -82,7 +82,7 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned char modeno, unsigned char rateindex,
+ unsigned char modeno,
u32 *left_margin, u32 *right_margin, u32 *upper_margin,
u32 *lower_margin, u32 *hsync_len, u32 *vsync_len, u32 *sync,
u32 *vmode)
@@ -96,7 +96,7 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned char sr_data, cr_data, cr_data2;
int B, C, D, F, temp, j;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
- if (!XGI_SearchModeID(ModeNo, &ModeIdIndex, XGI_Pr))
+ if (!XGI_SearchModeID(ModeNo, &ModeIdIndex))
return 0;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -1980,12 +1980,10 @@ static int xgifb_probe(struct pci_dev *pdev,
fb_info->var.pixclock = (u32) (1000000000 /
XGIfb_mode_rate_to_dclock(&xgifb_info->dev_info,
hw_info,
- XGIbios_mode[xgifb_info->mode_idx].mode_no,
- xgifb_info->rate_idx));
+ XGIbios_mode[xgifb_info->mode_idx].mode_no));
if (XGIfb_mode_rate_to_ddata(&xgifb_info->dev_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no,
- xgifb_info->rate_idx,
&fb_info->var.left_margin,
&fb_info->var.right_margin,
&fb_info->var.upper_margin,
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 5f1c41ed778..21541720e05 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -102,10 +102,8 @@ static void XGINew_DDR1x_MRS_340(unsigned long P3c4,
xgifb_reg_set(P3c4, 0x1B, 0x00);
}
-static void XGINew_SetMemoryClock(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_SetMemoryClock(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_set(pVBInfo->P3c4,
0x28,
pVBInfo->MCLKData[pVBInfo->ram_type].SR28);
@@ -133,7 +131,7 @@ static void XGINew_DDRII_Bootup_XG27(
{
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
/* Set Double Frequency */
xgifb_reg_set(P3d4, 0x97, pVBInfo->XGINew_CR97); /* CR97 */
@@ -206,7 +204,7 @@ static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension,
unsigned long P3d4 = P3c4 + 0x10;
pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo);
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */
@@ -280,7 +278,7 @@ static void XGINew_DDR1x_DefaultRegister(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
if (HwDeviceExtension->jChipType >= XG20) {
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
xgifb_reg_set(P3d4,
0x82,
pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */
@@ -296,7 +294,7 @@ static void XGINew_DDR1x_DefaultRegister(
XGINew_DDR1x_MRS_XG20(P3c4, pVBInfo);
} else {
- XGINew_SetMemoryClock(HwDeviceExtension, pVBInfo);
+ XGINew_SetMemoryClock(pVBInfo);
switch (HwDeviceExtension->jChipType) {
case XG42:
@@ -876,8 +874,7 @@ done:
return rom_copy;
}
-static bool xgifb_read_vbios(struct pci_dev *pdev,
- struct vb_device_info *pVBInfo)
+static bool xgifb_read_vbios(struct pci_dev *pdev)
{
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
u8 *vbios;
@@ -948,8 +945,7 @@ error:
return false;
}
-static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_ChkSenseStatus(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, temp, tempcx, CR3CData;
@@ -991,8 +987,7 @@ static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x3e, ((tempbx & 0xFF00) >> 8));
}
-static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo)
{
unsigned short temp, tempcl = 0, tempch = 0, CR31Data, CR38Data;
@@ -1102,7 +1097,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
unsigned char Temp;
- if (xgifb_read_vbios(pdev, pVBInfo)) { /* For XG21 LVDS */
+ if (xgifb_read_vbios(pdev)) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
@@ -1126,8 +1121,7 @@ static void XGINew_GetXG21Sense(struct pci_dev *pdev,
}
}
-static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo)
{
unsigned char Temp, bCR4A;
@@ -1222,7 +1216,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
XGINew_GetXG21Sense(pdev, pVBInfo);
if (HwDeviceExtension->jChipType == XG27)
- XGINew_GetXG27Sense(HwDeviceExtension, pVBInfo);
+ XGINew_GetXG27Sense(pVBInfo);
/* Reset Extended register */
@@ -1294,7 +1288,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
if (HwDeviceExtension->jChipType < XG20) {
/* Set VB */
- XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_UnLockCRT2(pVBInfo);
/* disable VideoCapture */
xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00);
@@ -1334,7 +1328,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_set(pVBInfo->Part4Port,
0x10, XGI330_CRT2Data_4_10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0F, 0x3F);
- XGI_LockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_LockCRT2(pVBInfo);
}
} /* != XG20 */
@@ -1370,8 +1364,8 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
xgifb_reg_set(pVBInfo->P3c4, 0x22, 0xfa);
xgifb_reg_set(pVBInfo->P3c4, 0x21, 0xa3);
- XGINew_ChkSenseStatus(HwDeviceExtension, pVBInfo);
- XGINew_SetModeScratch(HwDeviceExtension, pVBInfo);
+ XGINew_ChkSenseStatus(pVBInfo);
+ XGINew_SetModeScratch(pVBInfo);
xgifb_reg_set(pVBInfo->P3d4, 0x8c, 0x87);
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index fcefe5b36cd..46dea3f1088 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -63,9 +63,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
}
-static void XGI_SetSeqRegs(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetSeqRegs(struct vb_device_info *pVBInfo)
{
unsigned char SRdata, i;
@@ -79,8 +77,7 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
}
}
-static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRTCRegs(struct vb_device_info *pVBInfo)
{
unsigned char CRTCdata;
unsigned short i;
@@ -96,8 +93,7 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
}
}
-static void XGI_SetATTRegs(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_SetATTRegs(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char ARdata;
@@ -171,8 +167,7 @@ static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
return 0;
}
-static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static unsigned char XGI_AjustCRT2Rate(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex, unsigned short *i,
struct vb_device_info *pVBInfo)
{
@@ -322,7 +317,6 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
}
static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
- unsigned short ModeNo,
struct vb_device_info *pVBInfo)
{
unsigned char data;
@@ -365,7 +359,7 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3d4, 0x09, data);
}
-static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT1CRTC(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo,
struct xgi_hw_device_info *HwDeviceExtension)
@@ -391,7 +385,7 @@ static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
- XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
+ XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
if (pVBInfo->ModeType > 0x03)
xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
@@ -403,8 +397,7 @@ static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
/* Description : Set LCD timing */
/* --------------------------------------------------------------------- */
-static void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetXG21CRTC(unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char index, Tempax, Tempbx, Tempcx, Tempdx;
@@ -500,9 +493,7 @@ static void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->P3c4, 0x3F, Tempax);
}
-static void XGI_SetXG27CRTC(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetXG27CRTC(unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short index, Tempax, Tempbx, Tempcx;
@@ -605,8 +596,7 @@ static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
static void xgifb_set_lcd(int chip_id,
struct vb_device_info *pVBInfo,
- unsigned short RefreshRateTableIndex,
- unsigned short ModeNo)
+ unsigned short RefreshRateTableIndex)
{
unsigned short temp;
@@ -687,8 +677,7 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
}
}
-static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT1DE(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -834,10 +823,8 @@ static void XGI_SetCRT1Offset(unsigned short ModeNo,
xgifb_reg_set(pVBInfo->P3c4, 0x10, ah);
}
-static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short VCLKIndex, modeflag;
@@ -886,8 +873,7 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
return VCLKIndex;
}
-static void XGI_SetCRT1VCLK(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_SetCRT1VCLK(unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
@@ -899,9 +885,8 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
(pVBInfo->VBType & (VB_SIS301B | VB_SIS302B | VB_SIS301LV |
VB_SIS302LV | VB_XGI301C)) &&
(pVBInfo->VBInfo & XGI_SetCRT2ToLCDA)) {
- vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, HwDeviceExtension,
- pVBInfo);
+ vclkindex = XGI_GetVCLK2Ptr(ModeIdIndex, RefreshRateTableIndex,
+ pVBInfo);
data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
data = XGI_VBVCLKData[vclkindex].Part4_A;
@@ -948,9 +933,8 @@ static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
}
-static void XGI_SetCRT1FIFO(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRT1FIFO(struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short data;
@@ -971,7 +955,7 @@ static void XGI_SetCRT1FIFO(unsigned short ModeNo,
}
static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short RefreshRateTableIndex,
+ unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2 = 0;
@@ -1010,7 +994,7 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo, unsigned short ModeIdIndex,
+ unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -1063,8 +1047,7 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data = data ^ 0xA0;
xgifb_reg_and_or(pVBInfo->P3c4, 0x21, 0x1F, data);
- XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
- pVBInfo);
+ XGI_SetVCLKState(HwDeviceExtension, RefreshRateTableIndex, pVBInfo);
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
@@ -1122,8 +1105,7 @@ static void XGI_WriteDAC(unsigned short dl,
outb((unsigned short) bl, pVBInfo->P3c9);
}
-static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_LoadDAC(struct vb_device_info *pVBInfo)
{
unsigned short data, data2, i, k, m, n, o, si, di, bx, dl, al, ah, dh;
const unsigned short *table = XGINew_VGA_DAC;
@@ -1188,8 +1170,7 @@ static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_GetLVDSResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetLVDSResInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short resindex, xres, yres, modeflag;
@@ -1219,9 +1200,7 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
}
static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
- unsigned short ModeNo,
unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempbx, modeflag;
@@ -1259,8 +1238,7 @@ static void const *XGI_GetLcdPtr(struct XGI330_LCDDataTablStruct const *table,
return table[i].DATAPTR;
}
-static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -1289,17 +1267,15 @@ static struct SiS_TVData const *XGI_GetTVPtr(unsigned short ModeNo,
return &XGI_TVDataTable[i].DATAPTR[tempal];
}
-static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_GetLVDSData(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
struct SiS_LVDSData const *LCDPtr;
if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)))
return;
- LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDataPtr, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(XGI_EPLLCDDataPtr, ModeIdIndex, pVBInfo);
pVBInfo->VGAHT = LCDPtr->VGAHT;
pVBInfo->VGAVT = LCDPtr->VGAVT;
pVBInfo->HT = LCDPtr->LCDHT;
@@ -1325,18 +1301,17 @@ static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_ModCRT1Regs(unsigned short ModeIdIndex,
+ struct xgi_hw_device_info *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
{
unsigned short i;
struct XGI_LVDSCRT1HDataStruct const *LCDPtr = NULL;
struct XGI_LVDSCRT1VDataStruct const *LCDPtr1 = NULL;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(xgifb_epllcd_crt1_h, ModeIdIndex,
+ pVBInfo);
for (i = 0; i < 8; i++)
pVBInfo->TimingH.data[i] = LCDPtr[0].Reg[i];
@@ -1345,14 +1320,13 @@ static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr1 = XGI_GetLcdPtr(xgifb_epllcd_crt1_v, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
+ LCDPtr1 = XGI_GetLcdPtr(xgifb_epllcd_crt1_v, ModeIdIndex,
pVBInfo);
for (i = 0; i < 7; i++)
pVBInfo->TimingV.data[i] = LCDPtr1[0].Reg[i];
}
- XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
+ XGI_SetCRT1Timing_V(ModeIdIndex, pVBInfo);
}
static unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
@@ -1425,17 +1399,15 @@ static void XGI_GetLCDSync(unsigned short *HSyncWidth,
*VSyncWidth = pVBInfo->LCDCapList[Index].LCD_VSyncWidth;
}
-static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetLVDSRegs(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempbx, tempax, tempcx, tempdx, push1, push2, modeflag;
unsigned long temp, temp1, temp2, temp3, push3;
struct XGI330_LCDDataDesStruct2 const *LCDPtr1 = NULL;
modeflag = XGI330_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr1 = XGI_GetLcdPtr(XGI_EPLLCDDesDataPtr, ModeIdIndex, pVBInfo);
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
push1 = tempbx;
@@ -1686,8 +1658,7 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
}
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short index, modeflag;
@@ -1769,15 +1740,14 @@ static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
}
}
-static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT2ECLK(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
int i;
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
- pVBInfo);
+ tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
@@ -1795,8 +1765,7 @@ static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_UpdateModeInfo(struct vb_device_info *pVBInfo)
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
@@ -1922,8 +1891,7 @@ finish:
pVBInfo->VBType = tempbx;
}
-static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_GetVBInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
@@ -2048,7 +2016,7 @@ static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetTVInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0, resinfo = 0, modeflag, index1;
@@ -2115,8 +2083,8 @@ static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->TVInfo = tempbx;
}
-static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static unsigned char XGI_GetLCDInfo(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, resinfo = 0, LCDIdIndex;
@@ -2196,7 +2164,7 @@ static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
}
unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex, struct vb_device_info *pVBInfo)
+ unsigned short *ModeIdIndex)
{
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (XGI330_EModeIDTable[*ModeIdIndex].Ext_ModeID == ModeNo)
@@ -2435,8 +2403,7 @@ static void XGI_SaveCRT2Info(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, temp2, temp1);
}
-static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetCRT2ResInfo(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short xres, yres, modeflag, resindex;
@@ -2508,8 +2475,7 @@ static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
return 0;
}
-static void XGI_GetRAMDAC2DATA(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_GetRAMDAC2DATA(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -2551,7 +2517,7 @@ static void XGI_GetRAMDAC2DATA(unsigned short ModeNo,
pVBInfo->VT = tempbx;
}
-static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetCRT2Data(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -2566,14 +2532,13 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->RVBHRS = 50;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
- XGI_GetRAMDAC2DATA(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- pVBInfo);
+ XGI_GetRAMDAC2DATA(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
return;
}
if (pVBInfo->VBInfo & (SetCRT2ToLCD | XGI_SetCRT2ToLCDA)) {
- LCDPtr = XGI_GetLcdPtr(XGI_LCDDataTable, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDPtr = XGI_GetLcdPtr(XGI_LCDDataTable, ModeIdIndex,
+ pVBInfo);
pVBInfo->RVBHCMAX = LCDPtr->RVBHCMAX;
pVBInfo->RVBHCFACT = LCDPtr->RVBHCFACT;
@@ -2654,7 +2619,7 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
struct SiS_TVData const *TVPtr;
- TVPtr = XGI_GetTVPtr(ModeNo, ModeIdIndex, RefreshRateTableIndex,
+ TVPtr = XGI_GetTVPtr(ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
pVBInfo->RVBHCMAX = TVPtr->RVBHCMAX;
@@ -2722,14 +2687,13 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetCRT2VCLK(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
- tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
- pVBInfo);
+ tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeIdIndex, pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
@@ -2751,8 +2715,7 @@ static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x08);
}
-static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+static unsigned short XGI_GetColorDepth(unsigned short ModeIdIndex)
{
unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
short index;
@@ -2769,9 +2732,7 @@ static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
static unsigned short XGI_GetOffset(unsigned short ModeNo,
unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+ unsigned short RefreshRateTableIndex)
{
unsigned short temp, colordepth, modeinfo, index, infoflag,
ColorDepth[] = { 0x01, 0x02, 0x04 };
@@ -2786,7 +2747,7 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
if (infoflag & InterlaceMode)
temp = temp << 1;
- colordepth = XGI_GetColorDepth(ModeNo, ModeIdIndex, pVBInfo);
+ colordepth = XGI_GetColorDepth(ModeIdIndex);
if ((ModeNo >= 0x7C) && (ModeNo <= 0x7E)) {
temp = ModeNo - 0x7C;
@@ -2801,7 +2762,6 @@ static unsigned short XGI_GetOffset(unsigned short ModeNo,
static void XGI_SetCRT2Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short offset;
@@ -2810,8 +2770,7 @@ static void XGI_SetCRT2Offset(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetInSlaveMode)
return;
- offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex);
temp = (unsigned char) (offset & 0xFF);
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
temp = (unsigned char) ((offset & 0xFF00) >> 8);
@@ -2829,14 +2788,12 @@ static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
}
static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
u8 tempcx;
- XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
+ XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
for (tempcx = 4; tempcx < 7; tempcx++)
@@ -2846,8 +2803,7 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
xgifb_reg_set(pVBInfo->Part1Port, 0x02, 0x44); /* temp 0206 */
}
-static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_SetGroup1(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
@@ -3002,8 +2958,6 @@ static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
}
static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
@@ -3294,8 +3248,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
- unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
@@ -3724,9 +3676,7 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short RefreshRateTableIndex,
+static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short pushbx, tempax, tempbx, tempcx, temp, tempah,
@@ -3772,11 +3722,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Customized LCDB Does not add */
if ((pVBInfo->VBType & VB_SIS301LV) || (pVBInfo->VBType & VB_SIS302LV))
- LCDBDesPtr = XGI_GetLcdPtr(xgifb_lcddldes, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ LCDBDesPtr = XGI_GetLcdPtr(xgifb_lcddldes, ModeIdIndex,
+ pVBInfo);
else
- LCDBDesPtr = XGI_GetLcdPtr(XGI_LCDDesDataTable, ModeNo,
- ModeIdIndex, RefreshRateTableIndex,
+ LCDBDesPtr = XGI_GetLcdPtr(XGI_LCDDesDataTable, ModeIdIndex,
pVBInfo);
tempah = pVBInfo->LCDResInfo;
@@ -4003,8 +3952,8 @@ static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x10);
}
-static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetGroup3(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short i;
unsigned char const *tempdi;
@@ -4059,9 +4008,8 @@ static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_SetGroup4(unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
@@ -4224,7 +4172,7 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
/* end 301b */
- XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
@@ -4232,8 +4180,7 @@ static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
xgifb_reg_and_or(pVBInfo->P3c4, 0x1E, 0xFF, 0x20);
}
-static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetGroup5(struct vb_device_info *pVBInfo)
{
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
@@ -4243,16 +4190,13 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
-static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_DisableGatingCRT(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
- unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+ unsigned short ModeNo, unsigned short ModeIdIndex)
{
unsigned short xres, yres, colordepth, modeflag, resindex;
@@ -4281,7 +4225,7 @@ static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
if (xres != xgifb_info->lvds_data.LVDSHDE ||
yres != xgifb_info->lvds_data.LVDSVDE) {
- colordepth = XGI_GetColorDepth(ModeNo, ModeIdIndex, pVBInfo);
+ colordepth = XGI_GetColorDepth(ModeIdIndex);
if (colordepth > 2)
return 0;
}
@@ -4290,7 +4234,6 @@ static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
int chip_id,
- unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -4831,9 +4774,7 @@ static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
/* Output : */
/* Description : Set TV Customized Param. */
/* --------------------------------------------------------------------- */
-static void XGI_SetAntiFlicker(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetAntiFlicker(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -4850,9 +4791,7 @@ static void XGI_SetAntiFlicker(unsigned short ModeNo,
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0x8F, tempah);
}
-static void XGI_SetEdgeEnhance(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetEdgeEnhance(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
@@ -4887,8 +4826,8 @@ static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
& 0xFF000000) >> 24));
}
-static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static void XGI_SetYFilter(unsigned short ModeIdIndex,
+ struct vb_device_info *pVBInfo)
{
unsigned short tempbx, index;
unsigned char const *filterPtr;
@@ -4957,8 +4896,7 @@ static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
/* Output : */
/* Description : Customized Param. for 301 */
/* --------------------------------------------------------------------- */
-static void XGI_OEM310Setting(unsigned short ModeNo,
- unsigned short ModeIdIndex,
+static void XGI_OEM310Setting(unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
XGI_SetDelayComp(pVBInfo);
@@ -4968,11 +4906,11 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
if (pVBInfo->VBInfo & SetCRT2ToTV) {
XGI_SetPhaseIncr(pVBInfo);
- XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetYFilter(ModeIdIndex, pVBInfo);
+ XGI_SetAntiFlicker(pVBInfo);
if (pVBInfo->VBType & VB_SIS301)
- XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetEdgeEnhance(pVBInfo);
}
}
@@ -4982,9 +4920,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+static void XGI_SetCRT2ModeRegs(struct vb_device_info *pVBInfo)
{
unsigned short tempbl;
short tempcl;
@@ -5146,20 +5082,14 @@ reg_and_or:
}
-void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+void XGI_UnLockCRT2(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01);
-
}
-void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
+void XGI_LockCRT2(struct vb_device_info *pVBInfo)
{
-
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2F, 0xFE, 0x00);
-
}
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
@@ -5231,8 +5161,8 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
}
i--;
if ((pVBInfo->SetFlag & ProgrammingCRT2)) {
- temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, &i, pVBInfo);
+ temp = XGI_AjustCRT2Rate(ModeIdIndex, RefreshRateTableIndex,
+ &i, pVBInfo);
}
return RefreshRateTableIndex + i;
}
@@ -5246,12 +5176,11 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
- XGI_GetLVDSResInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetLVDSData(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_ModCRT1Regs(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetLVDSRegs(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_GetLVDSResInfo(ModeIdIndex, pVBInfo);
+ XGI_GetLVDSData(ModeIdIndex, pVBInfo);
+ XGI_ModCRT1Regs(ModeIdIndex, HwDeviceExtension, pVBInfo);
+ XGI_SetLVDSRegs(ModeIdIndex, pVBInfo);
+ XGI_SetCRT2ECLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
@@ -5261,29 +5190,23 @@ static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
unsigned short ModeIdIndex, RefreshRateTableIndex;
pVBInfo->SetFlag |= ProgrammingCRT2;
- XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
pVBInfo->SelectCRT2Rate = 4;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
XGI_SaveCRT2Info(ModeNo, pVBInfo);
- XGI_GetCRT2ResInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetCRT2Data(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_PreSetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetLockRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup2(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetLCDRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
- RefreshRateTableIndex, pVBInfo);
+ XGI_GetCRT2ResInfo(ModeIdIndex, pVBInfo);
+ XGI_GetCRT2Data(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_PreSetGroup1(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetGroup1(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetLockRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetGroup2(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetLCDRegs(ModeIdIndex, pVBInfo);
XGI_SetTap4Regs(pVBInfo);
- XGI_SetGroup3(ModeNo, ModeIdIndex, pVBInfo);
- XGI_SetGroup4(ModeNo, ModeIdIndex, RefreshRateTableIndex,
- HwDeviceExtension, pVBInfo);
- XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
- XGI_SetGroup5(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetGroup3(ModeIdIndex, pVBInfo);
+ XGI_SetGroup4(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT2VCLK(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
+ XGI_SetGroup5(pVBInfo);
XGI_AutoThreshold(pVBInfo);
return 1;
}
@@ -5442,7 +5365,7 @@ static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
/* EnablePart4_1F */
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
- XGI_DisableGatingCRT(HwDeviceExtension, pVBInfo);
+ XGI_DisableGatingCRT(pVBInfo);
XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
} /* 301 */
else { /* LVDS */
@@ -5467,10 +5390,10 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
{
unsigned short RefreshRateTableIndex, temp;
- XGI_SetSeqRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetSeqRegs(pVBInfo);
outb(XGI330_StandTable.MISC, pVBInfo->P3c2);
- XGI_SetCRTCRegs(HwDeviceExtension, pVBInfo);
- XGI_SetATTRegs(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetCRTCRegs(pVBInfo);
+ XGI_SetATTRegs(ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
@@ -5495,13 +5418,12 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
ModeIdIndex, pVBInfo);
if (RefreshRateTableIndex != 0xFFFF) {
XGI_SetSync(RefreshRateTableIndex, pVBInfo);
- XGI_SetCRT1CRTC(ModeNo, ModeIdIndex, RefreshRateTableIndex,
+ XGI_SetCRT1CRTC(ModeIdIndex, RefreshRateTableIndex,
pVBInfo, HwDeviceExtension);
- XGI_SetCRT1DE(HwDeviceExtension, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetCRT1DE(ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
- XGI_SetCRT1VCLK(ModeNo, ModeIdIndex, HwDeviceExtension,
+ XGI_SetCRT1VCLK(ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
}
@@ -5510,30 +5432,28 @@ static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
if (temp & 0xA0) {
if (HwDeviceExtension->jChipType == XG27)
- XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetXG27CRTC(RefreshRateTableIndex, pVBInfo);
else
- XGI_SetXG21CRTC(ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
+ XGI_SetXG21CRTC(RefreshRateTableIndex, pVBInfo);
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
RefreshRateTableIndex);
xgifb_set_lcd(HwDeviceExtension->jChipType,
- pVBInfo, RefreshRateTableIndex, ModeNo);
+ pVBInfo, RefreshRateTableIndex);
if (pVBInfo->IF_DEF_LVDS == 1)
xgifb_set_lvds(xgifb_info,
HwDeviceExtension->jChipType,
- ModeNo, ModeIdIndex, pVBInfo);
+ ModeIdIndex, pVBInfo);
}
}
pVBInfo->SetFlag &= (~ProgrammingCRT2);
- XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
- XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
- RefreshRateTableIndex, pVBInfo);
- XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_SetCRT1FIFO(HwDeviceExtension, pVBInfo);
+ XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeIdIndex,
+ RefreshRateTableIndex, pVBInfo);
+ XGI_LoadDAC(pVBInfo);
}
unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
@@ -5568,14 +5488,14 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20)
- XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_UnLockCRT2(pVBInfo);
- XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
+ XGI_SearchModeID(ModeNo, &ModeIdIndex);
if (HwDeviceExtension->jChipType < XG20) {
- XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
- XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
+ XGI_GetVBInfo(ModeIdIndex, pVBInfo);
+ XGI_GetTVInfo(ModeIdIndex, pVBInfo);
+ XGI_GetLCDInfo(ModeIdIndex, pVBInfo);
XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | XGI_SetCRT2ToLCDA) ||
@@ -5602,15 +5522,14 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
}
}
- XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
- XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
+ XGI_SetCRT2ModeRegs(pVBInfo);
+ XGI_OEM310Setting(ModeIdIndex, pVBInfo); /*0212*/
XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
- ModeIdIndex,
- pVBInfo))
+ ModeIdIndex))
return 0;
pVBInfo->ModeType = XGI330_EModeIDTable[ModeIdIndex].
@@ -5627,10 +5546,10 @@ unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
- XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
+ XGI_UpdateModeInfo(pVBInfo);
if (HwDeviceExtension->jChipType < XG20)
- XGI_LockCRT2(HwDeviceExtension, pVBInfo);
+ XGI_LockCRT2(pVBInfo);
return 1;
}
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 2c0a31c8dfd..5301bec6440 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -2,10 +2,8 @@
#define _VBSETMODE_
extern void InitTo330Pointer(unsigned char, struct vb_device_info *);
-extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
+extern void XGI_UnLockCRT2(struct vb_device_info *);
+extern void XGI_LockCRT2(struct vb_device_info *);
extern void XGI_DisplayOff(struct xgifb_video_info *,
struct xgi_hw_device_info *,
struct vb_device_info *);
@@ -13,11 +11,10 @@ extern void XGI_GetVBType(struct vb_device_info *);
extern void XGI_SenseCRT1(struct vb_device_info *);
extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
struct xgi_hw_device_info *HwDeviceExtension,
- unsigned short ModeNo) ;
+ unsigned short ModeNo);
extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
- unsigned short *ModeIdIndex,
- struct vb_device_info *);
+ unsigned short *ModeIdIndex);
extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo,
unsigned short ModeIdIndex,
diff --git a/drivers/staging/xillybus/Kconfig b/drivers/staging/xillybus/Kconfig
new file mode 100644
index 00000000000..8a4181f846a
--- /dev/null
+++ b/drivers/staging/xillybus/Kconfig
@@ -0,0 +1,32 @@
+#
+# Xillybus devices
+#
+
+config XILLYBUS
+ tristate "Xillybus generic FPGA interface"
+ depends on PCI || (OF_ADDRESS && OF_IRQ) && m
+ help
+ Xillybus is a generic interface for peripherals designed on
+ programmable logic (FPGA). The driver probes the hardware for
+ its capabilities, and creates device files accordingly.
+
+ If unsure, say N.
+
+if XILLYBUS
+
+config XILLYBUS_PCIE
+ tristate "Xillybus over PCIe"
+ depends on XILLYBUS && PCI
+ help
+ Set to M if you want Xillybus to use PCI Express for communicating
+ with the FPGA.
+
+config XILLYBUS_OF
+ tristate "Xillybus over Device Tree"
+ depends on XILLYBUS && OF_ADDRESS && OF_IRQ
+ help
+ Set to M if you want Xillybus to find its resources from the
+ Open Firmware Flattened Device Tree. If the target is an embedded
+ system, say M.
+
+endif # if XILLYBUS
diff --git a/drivers/staging/xillybus/Makefile b/drivers/staging/xillybus/Makefile
new file mode 100644
index 00000000000..b68b7ebfd38
--- /dev/null
+++ b/drivers/staging/xillybus/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Xillybus driver
+#
+
+obj-$(CONFIG_XILLYBUS) += xillybus_core.o
+obj-$(CONFIG_XILLYBUS_PCIE) += xillybus_pcie.o
+obj-$(CONFIG_XILLYBUS_OF) += xillybus_of.o
diff --git a/drivers/staging/xillybus/README b/drivers/staging/xillybus/README
new file mode 100644
index 00000000000..d2d848ae316
--- /dev/null
+++ b/drivers/staging/xillybus/README
@@ -0,0 +1,403 @@
+
+ ==========================================
+ Xillybus driver for generic FPGA interface
+ ==========================================
+
+Author: Eli Billauer, Xillybus Ltd. (http://xillybus.com)
+Email: eli.billauer@gmail.com or as advertised on Xillybus' site.
+
+Contents:
+
+ - Introduction
+ -- Background
+ -- Xillybus Overview
+
+ - Usage
+ -- User interface
+ -- Synchronization
+ -- Seekable pipes
+
+- Internals
+ -- Source code organization
+ -- Pipe attributes
+ -- Host never reads from the FPGA
+ -- Channels, pipes, and the message channel
+ -- Data streaming
+ -- Data granularity
+ -- Probing
+ -- Buffer allocation
+ -- Memory management
+ -- The "nonempty" message (supporting poll)
+
+
+INTRODUCTION
+============
+
+Background
+----------
+
+An FPGA (Field Programmable Gate Array) is a piece of logic hardware, which
+can be programmed to become virtually anything that is usually found as a
+dedicated chipset: For instance, a display adapter, network interface card,
+or even a processor with its peripherals. FPGAs are the LEGO of hardware:
+Based upon certain building blocks, you make your own toys the way you like
+them. It's usually pointless to reimplement something that is already
+available on the market as a chipset, so FPGAs are mostly used when some
+special functionality is needed, and the production volume is relatively low
+(hence not justifying the development of an ASIC).
+
+The challenge with FPGAs is that everything is implemented at a very low
+level, even lower than assembly language. In order to allow FPGA designers to
+focus on their specific project, and not reinvent the wheel over and over
+again, pre-designed building blocks, IP cores, are often used. These are the
+FPGA parallels of library functions. IP cores may implement certain
+mathematical functions, a functional unit (e.g. a USB interface), an entire
+processor (e.g. ARM) or anything that might come handy. Think of them as a
+building block, with electrical wires dangling on the sides for connection to
+other blocks.
+
+One of the daunting tasks in FPGA design is communicating with a fullblown
+operating system (actually, with the processor running it): Implementing the
+low-level bus protocol and the somewhat higher-level interface with the host
+(registers, interrupts, DMA etc.) is a project in itself. When the FPGA's
+function is a well-known one (e.g. a video adapter card, or a NIC), it can
+make sense to design the FPGA's interface logic specifically for the project.
+A special driver is then written to present the FPGA as a well-known interface
+to the kernel and/or user space. In that case, there is no reason to treat the
+FPGA differently than any device on the bus.
+
+It's however common that the desired data communication doesn't fit any well-
+known peripheral function. Also, the effort of designing an elegant
+abstraction for the data exchange is often considered too big. In those cases,
+a quicker and possibly less elegant solution is sought: The driver is
+effectively written as a user space program, leaving the kernel space part
+with just elementary data transport. This still requires designing some
+interface logic for the FPGA, and write a simple ad-hoc driver for the kernel.
+
+Xillybus Overview
+-----------------
+
+Xillybus is an IP core and a Linux driver. Together, they form a kit for
+elementary data transport between an FPGA and the host, providing pipe-like
+data streams with a straightforward user interface. It's intended as a low-
+effort solution for mixed FPGA-host projects, for which it makes sense to
+have the project-specific part of the driver running in a user-space program.
+
+Since the communication requirements may vary significantly from one FPGA
+project to another (the number of data pipes needed in each direction and
+their attributes), there isn't one specific chunk of logic being the Xillybus
+IP core. Rather, the IP core is configured and built based upon a
+specification given by its end user.
+
+Xillybus presents independent data streams, which resemble pipes or TCP/IP
+communication to the user. At the host side, a character device file is used
+just like any pipe file. On the FPGA side, hardware FIFOs are used to stream
+the data. This is contrary to a common method of communicating through fixed-
+sized buffers (even though such buffers are used by Xillybus under the hood).
+There may be more than a hundred of these streams on a single IP core, but
+also no more than one, depending on the configuration.
+
+In order to ease the deployment of the Xillybus IP core, it contains a simple
+data structure which completely defines the core's configuration. The Linux
+driver fetches this data structure during its initialization process, and sets
+up the DMA buffers and character devices accordingly. As a result, a single
+driver is used to work out of the box with any Xillybus IP core.
+
+The data structure just mentioned should not be confused with PCI's
+configuration space or the Flattened Device Tree.
+
+USAGE
+=====
+
+User interface
+--------------
+
+On the host, all interface with Xillybus is done through /dev/xillybus_*
+device files, which are generated automatically as the drivers loads. The
+names of these files depend on the IP core that is loaded in the FPGA (see
+Probing below). To communicate with the FPGA, open the device file that
+corresponds to the hardware FIFO you want to send data or receive data from,
+and use plain write() or read() calls, just like with a regular pipe. In
+particular, it makes perfect sense to go:
+
+$ cat mydata > /dev/xillybus_thisfifo
+
+$ cat /dev/xillybus_thatfifo > hisdata
+
+possibly pressing CTRL-C as some stage, even though the xillybus_* pipes have
+the capability to send an EOF (but may not use it).
+
+The driver and hardware are designed to behave sensibly as pipes, including:
+
+* Supporting non-blocking I/O (by setting O_NONBLOCK on open() ).
+
+* Supporting poll() and select().
+
+* Being bandwidth efficient under load (using DMA) but also handle small
+ pieces of data sent across (like TCP/IP) by autoflushing.
+
+A device file can be read only, write only or bidirectional. Bidirectional
+device files are treated like two independent pipes (except for sharing a
+"channel" structure in the implementation code).
+
+Synchronization
+---------------
+
+Xillybus pipes are configured (on the IP core) to be either synchronous or
+asynchronous. For a synchronous pipe, write() returns successfully only after
+some data has been submitted and acknowledged by the FPGA. This slows down
+bulk data transfers, and is nearly impossible for use with streams that
+require data at a constant rate: There is no data transmitted to the FPGA
+between write() calls, in particular when the process loses the CPU.
+
+When a pipe is configured asynchronous, write() returns if there was enough
+room in the buffers to store any of the data in the buffers.
+
+For FPGA to host pipes, asynchronous pipes allow data transfer from the FPGA
+as soon as the respective device file is opened, regardless of if the data
+has been requested by a read() call. On synchronous pipes, only the amount
+of data requested by a read() call is transmitted.
+
+In summary, for synchronous pipes, data between the host and FPGA is
+transmitted only to satisfy the read() or write() call currently handled
+by the driver, and those calls wait for the transmission to complete before
+returning.
+
+Note that the synchronization attribute has nothing to do with the possibility
+that read() or write() completes less bytes than requested. There is a
+separate configuration flag ("allowpartial") that determines whether such a
+partial completion is allowed.
+
+Seekable pipes
+--------------
+
+A synchronous pipe can be configured to have the stream's position exposed
+to the user logic at the FPGA. Such a pipe is also seekable on the host API.
+With this feature, a memory or register interface can be attached on the
+FPGA side to the seekable stream. Reading or writing to a certain address in
+the attached memory is done by seeking to the desired address, and calling
+read() or write() as required.
+
+
+INTERNALS
+=========
+
+Source code organization
+------------------------
+
+The Xillybus driver consists of a core module, xillybus_core.c, and modules
+that depend on the specific bus interface (xillybus_of.c and xillybus_pcie.c).
+
+The bus specific modules are those probed when a suitable device is found by
+the kernel. Since the DMA mapping and synchronization functions, which are bus
+dependent by their nature, are used by the core module, a
+xilly_endpoint_hardware structure is passed to the core module on
+initialization. This structure is populated with pointers to wrapper functions
+which execute the DMA-related operations on the bus.
+
+Pipe attributes
+---------------
+
+Each pipe has a number of attributes which are set when the FPGA component
+(IP core) is built. They are fetched from the IDT (the data structure which
+defines the core's configuration, see Probing below) by xilly_setupchannels()
+in xillybus_core.c as follows:
+
+* is_writebuf: The pipe's direction. A non-zero value means it's an FPGA to
+ host pipe (the FPGA "writes").
+
+* channelnum: The pipe's identification number in communication between the
+ host and FPGA.
+
+* format: The underlying data width. See Data Granularity below.
+
+* allowpartial: A non-zero value means that a read() or write() (whichever
+ applies) may return with less than the requested number of bytes. The common
+ choice is a non-zero value, to match standard UNIX behavior.
+
+* synchronous: A non-zero value means that the pipe is synchronous. See
+ Syncronization above.
+
+* bufsize: Each DMA buffer's size. Always a power of two.
+
+* bufnum: The number of buffers allocated for this pipe. Always a power of two.
+
+* exclusive_open: A non-zero value forces exclusive opening of the associated
+ device file. If the device file is bidirectional, and already opened only in
+ one direction, the opposite direction may be opened once.
+
+* seekable: A non-zero value indicates that the pipe is seekable. See
+ Seekable pipes above.
+
+* supports_nonempty: A non-zero value (which is typical) indicates that the
+ hardware will send the messages that are necessary to support select() and
+ poll() for this pipe.
+
+Host never reads from the FPGA
+------------------------------
+
+Even though PCI Express is hotpluggable in general, a typical motherboard
+doesn't expect a card to go away all of the sudden. But since the PCIe card
+is based upon reprogrammable logic, a sudden disappearance from the bus is
+quite likely as a result of an accidental reprogramming of the FPGA while the
+host is up. In practice, nothing happens immediately in such a situation. But
+if the host attempts to read from an address that is mapped to the PCI Express
+device, that leads to an immediate freeze of the system on some motherboards,
+even though the PCIe standard requires a graceful recovery.
+
+In order to avoid these freezes, the Xillybus driver refrains completely from
+reading from the device's register space. All communication from the FPGA to
+the host is done through DMA. In particular, the Interrupt Service Routine
+doesn't follow the common practice of checking a status register when it's
+invoked. Rather, the FPGA prepares a small buffer which contains short
+messages, which inform the host what the interrupt was about.
+
+This mechanism is used on non-PCIe buses as well for the sake of uniformity.
+
+
+Channels, pipes, and the message channel
+----------------------------------------
+
+Each of the (possibly bidirectional) pipes presented to the user is allocated
+a data channel between the FPGA and the host. The distinction between channels
+and pipes is necessary only because of channel 0, which is used for interrupt-
+related messages from the FPGA, and has no pipe attached to it.
+
+Data streaming
+--------------
+
+Even though a non-segmented data stream is presented to the user at both
+sides, the implementation relies on a set of DMA buffers which is allocated
+for each channel. For the sake of illustration, let's take the FPGA to host
+direction: As data streams into the respective channel's interface in the
+FPGA, the Xillybus IP core writes it to one of the DMA buffers. When the
+buffer is full, the FPGA informs the host about that (appending a
+XILLYMSG_OPCODE_RELEASEBUF message channel 0 and sending an interrupt if
+necessary). The host responds by making the data available for reading through
+the character device. When all data has been read, the host writes on the
+the FPGA's buffer control register, allowing the buffer's overwriting. Flow
+control mechanisms exist on both sides to prevent underflows and overflows.
+
+This is not good enough for creating a TCP/IP-like stream: If the data flow
+stops momentarily before a DMA buffer is filled, the intuitive expectation is
+that the partial data in buffer will arrive anyhow, despite the buffer not
+being completed. This is implemented by adding a field in the
+XILLYMSG_OPCODE_RELEASEBUF message, through which the FPGA informs not just
+which buffer is submitted, but how much data it contains.
+
+But the FPGA will submit a partially filled buffer only if directed to do so
+by the host. This situation occurs when the read() method has been blocking
+for XILLY_RX_TIMEOUT jiffies (currently 10 ms), after which the host commands
+the FPGA to submit a DMA buffer as soon as it can. This timeout mechanism
+balances between bus bandwidth efficiency (preventing a lot of partially
+filled buffers being sent) and a latency held fairly low for tails of data.
+
+A similar setting is used in the host to FPGA direction. The handling of
+partial DMA buffers is somewhat different, though. The user can tell the
+driver to submit all data it has in the buffers to the FPGA, by issuing a
+write() with the byte count set to zero. This is similar to a flush request,
+but it doesn't block. There is also an autoflushing mechanism, which triggers
+an equivalent flush roughly XILLY_RX_TIMEOUT jiffies after the last write().
+This allows the user to be oblivious about the underlying buffering mechanism
+and yet enjoy a stream-like interface.
+
+Note that the issue of partial buffer flushing is irrelevant for pipes having
+the "synchronous" attribute nonzero, since synchronous pipes don't allow data
+to lay around in the DMA buffers between read() and write() anyhow.
+
+Data granularity
+----------------
+
+The data arrives or is sent at the FPGA as 8, 16 or 32 bit wide words, as
+configured by the "format" attribute. Whenever possible, the driver attempts
+to hide this when the pipe is accessed differently from its natural alignment.
+For example, reading single bytes from a pipe with 32 bit granularity works
+with no issues. Writing single bytes to pipes with 16 or 32 bit granularity
+will also work, but the driver can't send partially completed words to the
+FPGA, so the transmission of up to one word may be held until it's fully
+occupied with user data.
+
+This somewhat complicates the handling of host to FPGA streams, because
+when a buffer is flushed, it may contain up to 3 bytes don't form a word in
+the FPGA, and hence can't be sent. To prevent loss of data, these leftover
+bytes need to be moved to the next buffer. The parts in xillybus_core.c
+that mention "leftovers" in some way are related to this complication.
+
+Probing
+-------
+
+As mentioned earlier, the number of pipes that are created when the driver
+loads and their attributes depend on the Xillybus IP core in the FPGA. During
+the driver's initialization, a blob containing configuration info, the
+Interface Description Table (IDT), is sent from the FPGA to the host. The
+bootstrap process is done in three phases:
+
+1. Acquire the length of the IDT, so a buffer can be allocated for it. This
+ is done by sending a quiesce command to the device, since the acknowledge
+ for this command contains the IDT's buffer length.
+
+2. Acquire the IDT itself.
+
+3. Create the interfaces according to the IDT.
+
+Buffer allocation
+-----------------
+
+In order to simplify the logic that prevents illegal boundary crossings of
+PCIe packets, the following rule applies: If a buffer is smaller than 4kB,
+it must not cross a 4kB boundary. Otherwise, it must be 4kB aligned. The
+xilly_setupchannels() functions allocates these buffers by requesting whole
+pages from the kernel, and diving them into DMA buffers as necessary. Since
+all buffers' sizes are powers of two, it's possible to pack any set of such
+buffers, with a maximal waste of one page of memory.
+
+All buffers are allocated when the driver is loaded. This is necessary,
+since large continuous physical memory segments are sometimes requested,
+which are more likely to be available when the system is freshly booted.
+
+The allocation of buffer memory takes place in the same order they appear in
+the IDT. The driver relies on a rule that the pipes are sorted with decreasing
+buffer size in the IDT. If a requested buffer is larger or equal to a page,
+the necessary number of pages is requested from the kernel, and these are
+used for this buffer. If the requested buffer is smaller than a page, one
+single page is requested from the kernel, and that page is partially used.
+Or, if there already is a partially used page at hand, the buffer is packed
+into that page. It can be shown that all pages requested from the kernel
+(except possibly for the last) are 100% utilized this way.
+
+Memory management
+-----------------
+
+The tricky part about the buffer allocation procedure described above is
+freeing and unmapping the buffers, in particular if something goes wrong in
+the middle, and the allocations need to be rolled back. The three-stage
+probing procedure makes this even more crucial, since temporary buffers are
+set up and mapped in the first of its two stages.
+
+To keep the code clean from complicated and bug-prone memory release routines,
+there are special routines for allocating memory. For example, instead of
+calling kzalloc, there's
+
+void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
+
+which effectively allocates a zeroed buffer of size "size". Its first
+argument, "mem", is where this allocation is enlisted, so that it's released
+when xillybus_do_cleanup() is called with the same "mem" structure.
+
+Two other functions enlist allocations in this structure: xilly_pagealloc()
+for page allocations and xilly_map_single_*() for DMA mapping.
+
+The "nonempty" message (supporting poll)
+---------------------------------------
+
+In order to support the "poll" method (and hence select() ), there is a small
+catch regarding the FPGA to host direction: The FPGA may have filled a DMA
+buffer with some data, but not submitted that buffer. If the host waited for
+the buffer's submission by the FPGA, there would be a possibility that the
+FPGA side has sent data, but a select() call would still block, because the
+host has not received any notification about this. This is solved with
+XILLYMSG_OPCODE_NONEMPTY messages sent by the FPGA when a channel goes from
+completely empty to containing some data.
+
+These messages are used only to support poll() and select(). The IP core can
+be configured not to send them for a slight reduction of bandwidth.
diff --git a/drivers/staging/xillybus/TODO b/drivers/staging/xillybus/TODO
new file mode 100644
index 00000000000..95cfe2f62fc
--- /dev/null
+++ b/drivers/staging/xillybus/TODO
@@ -0,0 +1,5 @@
+TODO:
+- have the driver reviewed
+
+Please send any patches and/or comments to Eli Billauer,
+<eli.billauer@gmail.com>.
diff --git a/drivers/staging/xillybus/xillybus.h b/drivers/staging/xillybus/xillybus.h
new file mode 100644
index 00000000000..e5e91d61288
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus.h
@@ -0,0 +1,182 @@
+/*
+ * linux/drivers/misc/xillybus.h
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Header file for the Xillybus FPGA/host framework.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef __XILLYBUS_H
+#define __XILLYBUS_H
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct xilly_endpoint_hardware;
+
+struct xilly_page {
+ struct list_head node;
+ unsigned long addr;
+ unsigned int order;
+};
+
+struct xilly_dma {
+ struct list_head node;
+ struct pci_dev *pdev;
+ struct device *dev;
+ dma_addr_t dma_addr;
+ size_t size;
+ int direction;
+};
+
+struct xilly_buffer {
+ void *addr;
+ dma_addr_t dma_addr;
+ int end_offset; /* Counting elements, not bytes */
+};
+
+struct xilly_cleanup {
+ struct list_head to_kfree;
+ struct list_head to_pagefree;
+ struct list_head to_unmap;
+};
+
+struct xilly_idt_handle {
+ unsigned char *chandesc;
+ unsigned char *idt;
+ int entries;
+};
+
+/*
+ * Read-write confusion: wr_* and rd_* notation sticks to FPGA view, so
+ * wr_* buffers are those consumed by read(), since the FPGA writes to them
+ * and vice versa.
+ */
+
+struct xilly_channel {
+ struct xilly_endpoint *endpoint;
+ int chan_num;
+ int log2_element_size;
+ int seekable;
+
+ struct xilly_buffer **wr_buffers; /* FPGA writes, driver reads! */
+ int num_wr_buffers;
+ unsigned int wr_buf_size; /* In bytes */
+ int wr_fpga_buf_idx;
+ int wr_host_buf_idx;
+ int wr_host_buf_pos;
+ int wr_empty;
+ int wr_ready; /* Significant only when wr_empty == 1 */
+ int wr_sleepy;
+ int wr_eof;
+ int wr_hangup;
+ spinlock_t wr_spinlock;
+ struct mutex wr_mutex;
+ wait_queue_head_t wr_wait;
+ wait_queue_head_t wr_ready_wait;
+ int wr_ref_count;
+ int wr_synchronous;
+ int wr_allow_partial;
+ int wr_exclusive_open;
+ int wr_supports_nonempty;
+
+ struct xilly_buffer **rd_buffers; /* FPGA reads, driver writes! */
+ int num_rd_buffers;
+ unsigned int rd_buf_size; /* In bytes */
+ int rd_fpga_buf_idx;
+ int rd_host_buf_pos;
+ int rd_host_buf_idx;
+ int rd_full;
+ spinlock_t rd_spinlock;
+ struct mutex rd_mutex;
+ wait_queue_head_t rd_wait;
+ int rd_ref_count;
+ int rd_allow_partial;
+ int rd_synchronous;
+ int rd_exclusive_open;
+ struct delayed_work rd_workitem;
+ unsigned char rd_leftovers[4];
+};
+
+struct xilly_endpoint {
+ /*
+ * One of pdev and dev is always NULL, and the other is a valid
+ * pointer, depending on the type of device
+ */
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct resource res; /* OF devices only */
+ struct xilly_endpoint_hardware *ephw;
+
+ struct list_head ep_list;
+ int dma_using_dac; /* =1 if 64-bit DMA is used, =0 otherwise. */
+ __iomem u32 *registers;
+ int fatal_error;
+
+ struct mutex register_mutex;
+ wait_queue_head_t ep_wait;
+
+ /* List of memory allocations, to make release easy */
+ struct xilly_cleanup cleanup;
+
+ /* Channels and message handling */
+ struct cdev cdev;
+
+ int major;
+ int lowest_minor; /* Highest minor = lowest_minor + num_channels - 1 */
+
+ int num_channels; /* EXCLUDING message buffer */
+ struct xilly_channel **channels;
+ int msg_counter;
+ int failed_messages;
+ int idtlen;
+
+ u32 *msgbuf_addr;
+ dma_addr_t msgbuf_dma_addr;
+ unsigned int msg_buf_size;
+};
+
+struct xilly_endpoint_hardware {
+ struct module *owner;
+ void (*hw_sync_sgl_for_cpu)(struct xilly_endpoint *,
+ dma_addr_t,
+ size_t,
+ int);
+ void (*hw_sync_sgl_for_device)(struct xilly_endpoint *,
+ dma_addr_t,
+ size_t,
+ int);
+ dma_addr_t (*map_single)(struct xilly_cleanup *,
+ struct xilly_endpoint *,
+ void *,
+ size_t,
+ int);
+ void (*unmap_single)(struct xilly_dma *entry);
+};
+
+irqreturn_t xillybus_isr(int irq, void *data);
+
+void xillybus_do_cleanup(struct xilly_cleanup *mem,
+ struct xilly_endpoint *endpoint);
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+ struct device *dev,
+ struct xilly_endpoint_hardware
+ *ephw);
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint);
+
+#endif /* __XILLYBUS_H */
diff --git a/drivers/staging/xillybus/xillybus_core.c b/drivers/staging/xillybus/xillybus_core.c
new file mode 100644
index 00000000000..efc56987a60
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_core.c
@@ -0,0 +1,2345 @@
+/*
+ * linux/drivers/misc/xillybus_core.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework.
+ *
+ * This driver interfaces with a special IP core in an FPGA, setting up
+ * a pipe between a hardware FIFO in the programmable logic and a device
+ * file in the host. The number of such pipes and their attributes are
+ * set up on the logic. This driver detects these automatically and
+ * creates the device files accordingly.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/list.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/crc32.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus core functions");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.07");
+MODULE_ALIAS("xillybus_core");
+MODULE_LICENSE("GPL v2");
+
+/* General timeout is 100 ms, rx timeout is 10 ms */
+#define XILLY_RX_TIMEOUT (10*HZ/1000)
+#define XILLY_TIMEOUT (100*HZ/1000)
+
+#define fpga_msg_ctrl_reg 0x0002
+#define fpga_dma_control_reg 0x0008
+#define fpga_dma_bufno_reg 0x0009
+#define fpga_dma_bufaddr_lowaddr_reg 0x000a
+#define fpga_dma_bufaddr_highaddr_reg 0x000b
+#define fpga_buf_ctrl_reg 0x000c
+#define fpga_buf_offset_reg 0x000d
+#define fpga_endian_reg 0x0010
+
+#define XILLYMSG_OPCODE_RELEASEBUF 1
+#define XILLYMSG_OPCODE_QUIESCEACK 2
+#define XILLYMSG_OPCODE_FIFOEOF 3
+#define XILLYMSG_OPCODE_FATAL_ERROR 4
+#define XILLYMSG_OPCODE_NONEMPTY 5
+
+static const char xillyname[] = "xillybus";
+
+static struct class *xillybus_class;
+
+/*
+ * ep_list_lock is the last lock to be taken; No other lock requests are
+ * allowed while holding it. It merely protects list_of_endpoints, and not
+ * the endpoints listed in it.
+ */
+
+static LIST_HEAD(list_of_endpoints);
+static struct mutex ep_list_lock;
+static struct workqueue_struct *xillybus_wq;
+
+/*
+ * Locking scheme: Mutexes protect invocations of character device methods.
+ * If both locks are taken, wr_mutex is taken first, rd_mutex second.
+ *
+ * wr_spinlock protects wr_*_buf_idx, wr_empty, wr_sleepy, wr_ready and the
+ * buffers' end_offset fields against changes made by IRQ handler (and in
+ * theory, other file request handlers, but the mutex handles that). Nothing
+ * else.
+ * They are held for short direct memory manipulations. Needless to say,
+ * no mutex locking is allowed when a spinlock is held.
+ *
+ * rd_spinlock does the same with rd_*_buf_idx, rd_empty and end_offset.
+ *
+ * register_mutex is endpoint-specific, and is held when non-atomic
+ * register operations are performed. wr_mutex and rd_mutex may be
+ * held when register_mutex is taken, but none of the spinlocks. Note that
+ * register_mutex doesn't protect against sporadic buf_ctrl_reg writes
+ * which are unrelated to buf_offset_reg, since they are harmless.
+ *
+ * Blocking on the wait queues is allowed with mutexes held, but not with
+ * spinlocks.
+ *
+ * Only interruptible blocking is allowed on mutexes and wait queues.
+ *
+ * All in all, the locking order goes (with skips allowed, of course):
+ * wr_mutex -> rd_mutex -> register_mutex -> wr_spinlock -> rd_spinlock
+ */
+
+static void malformed_message(u32 *buf)
+{
+ int opcode;
+ int msg_channel, msg_bufno, msg_data, msg_dir;
+
+ opcode = (buf[0] >> 24) & 0xff;
+ msg_dir = buf[0] & 1;
+ msg_channel = (buf[0] >> 1) & 0x7ff;
+ msg_bufno = (buf[0] >> 12) & 0x3ff;
+ msg_data = buf[1] & 0xfffffff;
+
+ pr_warn("xillybus: Malformed message (skipping): "
+ "opcode=%d, channel=%03x, dir=%d, bufno=%03x, data=%07x\n",
+ opcode, msg_channel, msg_dir, msg_bufno, msg_data);
+}
+
+/*
+ * xillybus_isr assumes the interrupt is allocated exclusively to it,
+ * which is the natural case MSI and several other hardware-oriented
+ * interrupts. Sharing is not allowed.
+ */
+
+irqreturn_t xillybus_isr(int irq, void *data)
+{
+ struct xilly_endpoint *ep = data;
+ u32 *buf;
+ unsigned int buf_size;
+ int i;
+ int opcode;
+ unsigned int msg_channel, msg_bufno, msg_data, msg_dir;
+ struct xilly_channel *channel;
+
+ /*
+ * The endpoint structure is altered during periods when it's
+ * guaranteed no interrupt will occur, but in theory, the cache
+ * lines may not be updated. So a memory barrier is issued.
+ */
+
+ smp_rmb();
+
+ buf = ep->msgbuf_addr;
+ buf_size = ep->msg_buf_size/sizeof(u32);
+
+
+ ep->ephw->hw_sync_sgl_for_cpu(ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ for (i = 0; i < buf_size; i += 2)
+ if (((buf[i+1] >> 28) & 0xf) != ep->msg_counter) {
+ malformed_message(&buf[i]);
+ pr_warn("xillybus: Sending a NACK on "
+ "counter %x (instead of %x) on entry %d\n",
+ ((buf[i+1] >> 28) & 0xf),
+ ep->msg_counter,
+ i/2);
+
+ if (++ep->failed_messages > 10)
+ pr_err("xillybus: Lost sync with "
+ "interrupt messages. Stopping.\n");
+ else {
+ ep->ephw->hw_sync_sgl_for_device(
+ ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ iowrite32(0x01, /* Message NACK */
+ &ep->registers[fpga_msg_ctrl_reg]);
+ }
+ return IRQ_HANDLED;
+ } else if (buf[i] & (1 << 22)) /* Last message */
+ break;
+
+ if (i >= buf_size) {
+ pr_err("xillybus: Bad interrupt message. Stopping.\n");
+ return IRQ_HANDLED;
+ }
+
+ buf_size = i;
+
+ for (i = 0; i <= buf_size; i += 2) { /* Scan through messages */
+ opcode = (buf[i] >> 24) & 0xff;
+
+ msg_dir = buf[i] & 1;
+ msg_channel = (buf[i] >> 1) & 0x7ff;
+ msg_bufno = (buf[i] >> 12) & 0x3ff;
+ msg_data = buf[i+1] & 0xfffffff;
+
+ switch (opcode) {
+ case XILLYMSG_OPCODE_RELEASEBUF:
+
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0)) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_dir) { /* Write channel */
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_buffers[msg_bufno]->end_offset =
+ msg_data;
+ channel->wr_fpga_buf_idx = msg_bufno;
+ channel->wr_empty = 0;
+ channel->wr_sleepy = 0;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ } else {
+ /* Read channel */
+
+ if (msg_bufno >= channel->num_rd_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ spin_lock(&channel->rd_spinlock);
+ channel->rd_fpga_buf_idx = msg_bufno;
+ channel->rd_full = 0;
+ spin_unlock(&channel->rd_spinlock);
+
+ wake_up_interruptible(&channel->rd_wait);
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+ }
+
+ break;
+ case XILLYMSG_OPCODE_NONEMPTY:
+ if ((msg_channel > ep->num_channels) ||
+ (msg_channel == 0) || (!msg_dir) ||
+ !ep->channels[msg_channel]->wr_supports_nonempty) {
+ malformed_message(&buf[i]);
+ break;
+ }
+
+ channel = ep->channels[msg_channel];
+
+ if (msg_bufno >= channel->num_wr_buffers) {
+ malformed_message(&buf[i]);
+ break;
+ }
+ spin_lock(&channel->wr_spinlock);
+ if (msg_bufno == channel->wr_host_buf_idx)
+ channel->wr_ready = 1;
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_ready_wait);
+
+ break;
+ case XILLYMSG_OPCODE_QUIESCEACK:
+ ep->idtlen = msg_data;
+ wake_up_interruptible(&ep->ep_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FIFOEOF:
+ channel = ep->channels[msg_channel];
+ spin_lock(&channel->wr_spinlock);
+ channel->wr_eof = msg_bufno;
+ channel->wr_sleepy = 0;
+
+ channel->wr_hangup = channel->wr_empty &&
+ (channel->wr_host_buf_idx == msg_bufno);
+
+ spin_unlock(&channel->wr_spinlock);
+
+ wake_up_interruptible(&channel->wr_wait);
+
+ break;
+ case XILLYMSG_OPCODE_FATAL_ERROR:
+ ep->fatal_error = 1;
+ wake_up_interruptible(&ep->ep_wait); /* For select() */
+ pr_err("xillybus: FPGA reported a fatal "
+ "error. This means that the low-level "
+ "communication with the device has failed. "
+ "This hardware problem is most likely "
+ "unrelated to xillybus (neither kernel "
+ "module nor FPGA core), but reports are "
+ "still welcome. All I/O is aborted.\n");
+ break;
+ default:
+ malformed_message(&buf[i]);
+ break;
+ }
+ }
+
+ ep->ephw->hw_sync_sgl_for_device(ep,
+ ep->msgbuf_dma_addr,
+ ep->msg_buf_size,
+ DMA_FROM_DEVICE);
+
+ ep->msg_counter = (ep->msg_counter + 1) & 0xf;
+ ep->failed_messages = 0;
+ iowrite32(0x03, &ep->registers[fpga_msg_ctrl_reg]); /* Message ACK */
+
+ return IRQ_HANDLED;
+}
+EXPORT_SYMBOL(xillybus_isr);
+
+/*
+ * A few trivial memory management functions.
+ * NOTE: These functions are used only on probe and remove, and therefore
+ * no locks are applied!
+ */
+
+void xillybus_do_cleanup(struct xilly_cleanup *mem,
+ struct xilly_endpoint *endpoint)
+{
+ struct list_head *this, *next;
+
+ list_for_each_safe(this, next, &mem->to_unmap) {
+ struct xilly_dma *entry =
+ list_entry(this, struct xilly_dma, node);
+
+ endpoint->ephw->unmap_single(entry);
+ kfree(entry);
+ }
+
+ INIT_LIST_HEAD(&mem->to_unmap);
+
+ list_for_each_safe(this, next, &mem->to_kfree)
+ kfree(this);
+
+ INIT_LIST_HEAD(&mem->to_kfree);
+
+ list_for_each_safe(this, next, &mem->to_pagefree) {
+ struct xilly_page *entry =
+ list_entry(this, struct xilly_page, node);
+
+ free_pages(entry->addr, entry->order);
+ kfree(entry);
+ }
+ INIT_LIST_HEAD(&mem->to_pagefree);
+}
+EXPORT_SYMBOL(xillybus_do_cleanup);
+
+static void *xilly_malloc(struct xilly_cleanup *mem, size_t size)
+{
+ void *ptr;
+
+ ptr = kzalloc(sizeof(struct list_head) + size, GFP_KERNEL);
+
+ if (!ptr)
+ return ptr;
+
+ list_add_tail((struct list_head *) ptr, &mem->to_kfree);
+
+ return ptr + sizeof(struct list_head);
+}
+
+static unsigned long xilly_pagealloc(struct xilly_cleanup *mem,
+ unsigned long order)
+{
+ unsigned long addr;
+ struct xilly_page *this;
+
+ this = kmalloc(sizeof(struct xilly_page), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ addr = __get_free_pages(GFP_KERNEL | __GFP_DMA32 | __GFP_ZERO, order);
+
+ if (!addr) {
+ kfree(this);
+ return 0;
+ }
+
+ this->addr = addr;
+ this->order = order;
+
+ list_add_tail(&this->node, &mem->to_pagefree);
+
+ return addr;
+}
+
+
+static void xillybus_autoflush(struct work_struct *work);
+
+static int xilly_setupchannels(struct xilly_endpoint *ep,
+ struct xilly_cleanup *mem,
+ unsigned char *chandesc,
+ int entries
+ )
+{
+ int i, entry, wr_nbuffer, rd_nbuffer;
+ struct xilly_channel *channel;
+ int channelnum, bufnum, bufsize, format, is_writebuf;
+ int bytebufsize;
+ int synchronous, allowpartial, exclusive_open, seekable;
+ int supports_nonempty;
+ void *wr_salami = NULL;
+ void *rd_salami = NULL;
+ int left_of_wr_salami = 0;
+ int left_of_rd_salami = 0;
+ dma_addr_t dma_addr;
+ int msg_buf_done = 0;
+
+ struct xilly_buffer *this_buffer = NULL; /* Init to silence warning */
+
+ channel = xilly_malloc(mem, ep->num_channels *
+ sizeof(struct xilly_channel));
+
+ if (!channel)
+ goto memfail;
+
+ ep->channels = xilly_malloc(mem, (ep->num_channels + 1) *
+ sizeof(struct xilly_channel *));
+
+ if (!ep->channels)
+ goto memfail;
+
+ ep->channels[0] = NULL; /* Channel 0 is message buf. */
+
+ /* Initialize all channels with defaults */
+
+ for (i = 1; i <= ep->num_channels; i++) {
+ channel->wr_buffers = NULL;
+ channel->rd_buffers = NULL;
+ channel->num_wr_buffers = 0;
+ channel->num_rd_buffers = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->rd_fpga_buf_idx = 0;
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_full = 0;
+ channel->wr_ref_count = 0;
+ channel->rd_ref_count = 0;
+
+ spin_lock_init(&channel->wr_spinlock);
+ spin_lock_init(&channel->rd_spinlock);
+ mutex_init(&channel->wr_mutex);
+ mutex_init(&channel->rd_mutex);
+ init_waitqueue_head(&channel->rd_wait);
+ init_waitqueue_head(&channel->wr_wait);
+ init_waitqueue_head(&channel->wr_ready_wait);
+
+ INIT_DELAYED_WORK(&channel->rd_workitem, xillybus_autoflush);
+
+ channel->endpoint = ep;
+ channel->chan_num = i;
+
+ channel->log2_element_size = 0;
+
+ ep->channels[i] = channel++;
+ }
+
+ /*
+ * The DMA buffer address update is atomic on the FPGA, so even if
+ * it was in the middle of sending messages to some buffer, changing
+ * the address is safe, since the data will go to either of the
+ * buffers. Not that this situation should occur at all anyhow.
+ */
+
+ wr_nbuffer = 1;
+ rd_nbuffer = 1; /* Buffer zero isn't used at all */
+
+ for (entry = 0; entry < entries; entry++, chandesc += 4) {
+ is_writebuf = chandesc[0] & 0x01;
+ channelnum = (chandesc[0] >> 1) | ((chandesc[1] & 0x0f) << 7);
+ format = (chandesc[1] >> 4) & 0x03;
+ allowpartial = (chandesc[1] >> 6) & 0x01;
+ synchronous = (chandesc[1] >> 7) & 0x01;
+ bufsize = 1 << (chandesc[2] & 0x1f);
+ bufnum = 1 << (chandesc[3] & 0x0f);
+ exclusive_open = (chandesc[2] >> 7) & 0x01;
+ seekable = (chandesc[2] >> 6) & 0x01;
+ supports_nonempty = (chandesc[2] >> 5) & 0x01;
+
+ if ((channelnum > ep->num_channels) ||
+ ((channelnum == 0) && !is_writebuf)) {
+ pr_err("xillybus: IDT requests channel out "
+ "of range. Aborting.\n");
+ return -ENODEV;
+ }
+
+ channel = ep->channels[channelnum]; /* NULL for msg channel */
+
+ bytebufsize = bufsize << 2; /* Overwritten just below */
+
+ if (!is_writebuf) {
+ channel->num_rd_buffers = bufnum;
+ channel->log2_element_size = ((format > 2) ?
+ 2 : format);
+ bytebufsize = channel->rd_buf_size = bufsize *
+ (1 << channel->log2_element_size);
+ channel->rd_allow_partial = allowpartial;
+ channel->rd_synchronous = synchronous;
+ channel->rd_exclusive_open = exclusive_open;
+ channel->seekable = seekable;
+
+ channel->rd_buffers = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer *));
+
+ if (!channel->rd_buffers)
+ goto memfail;
+
+ this_buffer = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer));
+
+ if (!this_buffer)
+ goto memfail;
+ }
+
+ else if (channelnum > 0) {
+ channel->num_wr_buffers = bufnum;
+ channel->log2_element_size = ((format > 2) ?
+ 2 : format);
+ bytebufsize = channel->wr_buf_size = bufsize *
+ (1 << channel->log2_element_size);
+
+ channel->seekable = seekable;
+ channel->wr_supports_nonempty = supports_nonempty;
+
+ channel->wr_allow_partial = allowpartial;
+ channel->wr_synchronous = synchronous;
+ channel->wr_exclusive_open = exclusive_open;
+
+ channel->wr_buffers = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer *));
+
+ if (!channel->wr_buffers)
+ goto memfail;
+
+ this_buffer = xilly_malloc(
+ mem,
+ bufnum * sizeof(struct xilly_buffer));
+
+ if (!this_buffer)
+ goto memfail;
+ }
+
+ /*
+ * Although daunting, we cut the chunks for read buffers
+ * from a different salami than the write buffers',
+ * possibly improving performance.
+ */
+
+ if (is_writebuf)
+ for (i = 0; i < bufnum; i++) {
+ /*
+ * Buffers are expected in descending
+ * byte-size order, so there is either
+ * enough for this buffer or none at all.
+ */
+ if ((left_of_wr_salami < bytebufsize) &&
+ (left_of_wr_salami > 0)) {
+ pr_err("xillybus: "
+ "Corrupt buffer allocation "
+ "in IDT. Aborting.\n");
+ return -ENODEV;
+ }
+
+ if (left_of_wr_salami == 0) {
+ int allocorder, allocsize;
+
+ allocsize = PAGE_SIZE;
+ allocorder = 0;
+ while (bytebufsize > allocsize) {
+ allocsize *= 2;
+ allocorder++;
+ }
+
+ wr_salami = (void *)
+ xilly_pagealloc(mem,
+ allocorder);
+ if (!wr_salami)
+ goto memfail;
+ left_of_wr_salami = allocsize;
+ }
+
+ dma_addr = ep->ephw->map_single(
+ mem,
+ ep,
+ wr_salami,
+ bytebufsize,
+ DMA_FROM_DEVICE);
+
+ if (!dma_addr)
+ goto dmafail;
+
+ iowrite32(
+ (u32) (dma_addr & 0xffffffff),
+ &ep->registers[
+ fpga_dma_bufaddr_lowaddr_reg]
+ );
+ iowrite32(
+ ((u32) ((((u64) dma_addr) >> 32)
+ & 0xffffffff)),
+ &ep->registers[
+ fpga_dma_bufaddr_highaddr_reg]
+ );
+ mmiowb();
+
+ if (channelnum > 0) {
+ this_buffer->addr = wr_salami;
+ this_buffer->dma_addr = dma_addr;
+ channel->wr_buffers[i] = this_buffer++;
+
+ iowrite32(
+ 0x80000000 | wr_nbuffer++,
+ &ep->registers[
+ fpga_dma_bufno_reg]);
+ } else {
+ ep->msgbuf_addr = wr_salami;
+ ep->msgbuf_dma_addr = dma_addr;
+ ep->msg_buf_size = bytebufsize;
+ msg_buf_done++;
+
+ iowrite32(
+ 0x80000000, &ep->registers[
+ fpga_dma_bufno_reg]);
+ }
+
+ left_of_wr_salami -= bytebufsize;
+ wr_salami += bytebufsize;
+ }
+ else /* Read buffers */
+ for (i = 0; i < bufnum; i++) {
+ /*
+ * Buffers are expected in descending
+ * byte-size order, so there is either
+ * enough for this buffer or none at all.
+ */
+ if ((left_of_rd_salami < bytebufsize) &&
+ (left_of_rd_salami > 0)) {
+ pr_err("xillybus: "
+ "Corrupt buffer allocation "
+ "in IDT. Aborting.\n");
+ return -ENODEV;
+ }
+
+ if (left_of_rd_salami == 0) {
+ int allocorder, allocsize;
+
+ allocsize = PAGE_SIZE;
+ allocorder = 0;
+ while (bytebufsize > allocsize) {
+ allocsize *= 2;
+ allocorder++;
+ }
+
+ rd_salami = (void *)
+ xilly_pagealloc(
+ mem,
+ allocorder);
+
+ if (!rd_salami)
+ goto memfail;
+ left_of_rd_salami = allocsize;
+ }
+
+ dma_addr = ep->ephw->map_single(
+ mem,
+ ep,
+ rd_salami,
+ bytebufsize,
+ DMA_TO_DEVICE);
+
+ if (!dma_addr)
+ goto dmafail;
+
+ iowrite32(
+ (u32) (dma_addr & 0xffffffff),
+ &ep->registers[
+ fpga_dma_bufaddr_lowaddr_reg]
+ );
+ iowrite32(
+ ((u32) ((((u64) dma_addr) >> 32)
+ & 0xffffffff)),
+ &ep->registers[
+ fpga_dma_bufaddr_highaddr_reg]
+ );
+ mmiowb();
+
+ this_buffer->addr = rd_salami;
+ this_buffer->dma_addr = dma_addr;
+ channel->rd_buffers[i] = this_buffer++;
+
+ iowrite32(rd_nbuffer++,
+ &ep->registers[fpga_dma_bufno_reg]);
+
+ left_of_rd_salami -= bytebufsize;
+ rd_salami += bytebufsize;
+ }
+ }
+
+ if (!msg_buf_done) {
+ pr_err("xillybus: Corrupt IDT: No message buffer. "
+ "Aborting.\n");
+ return -ENODEV;
+ }
+
+ return 0;
+
+memfail:
+ pr_err("xillybus: Failed to allocate write buffer memory. "
+ "Aborting.\n");
+ return -ENOMEM;
+dmafail:
+ pr_err("xillybus: Failed to map DMA memory!. Aborting.\n");
+ return -ENOMEM;
+}
+
+static void xilly_scan_idt(struct xilly_endpoint *endpoint,
+ struct xilly_idt_handle *idt_handle)
+{
+ int count = 0;
+ unsigned char *idt = endpoint->channels[1]->wr_buffers[0]->addr;
+ unsigned char *end_of_idt = idt + endpoint->idtlen - 4;
+ unsigned char *scan;
+ int len;
+
+ scan = idt;
+ idt_handle->idt = idt;
+
+ scan++; /* Skip version number */
+
+ while ((scan <= end_of_idt) && *scan) {
+ while ((scan <= end_of_idt) && *scan++)
+ /* Do nothing, just scan thru string */;
+ count++;
+ }
+
+ scan++;
+
+ if (scan > end_of_idt) {
+ pr_err("xillybus: IDT device name list overflow. "
+ "Aborting.\n");
+ idt_handle->chandesc = NULL;
+ return;
+ } else
+ idt_handle->chandesc = scan;
+
+ len = endpoint->idtlen - (3 + ((int) (scan - idt)));
+
+ if (len & 0x03) {
+ idt_handle->chandesc = NULL;
+
+ pr_err("xillybus: Corrupt IDT device name list. "
+ "Aborting.\n");
+ }
+
+ idt_handle->entries = len >> 2;
+
+ endpoint->num_channels = count;
+}
+
+static int xilly_obtain_idt(struct xilly_endpoint *endpoint)
+{
+ int rc = 0;
+ struct xilly_channel *channel;
+ unsigned char *version;
+
+ channel = endpoint->channels[1]; /* This should be generated ad-hoc */
+
+ channel->wr_sleepy = 1;
+ wmb(); /* Setting wr_sleepy must come before the command */
+
+ iowrite32(1 |
+ (3 << 24), /* Opcode 3 for channel 0 = Send IDT */
+ &endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ wait_event_interruptible_timeout(channel->wr_wait,
+ (!channel->wr_sleepy),
+ XILLY_TIMEOUT);
+
+ if (channel->wr_sleepy) {
+ pr_err("xillybus: Failed to obtain IDT. Aborting.\n");
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ rc = -ENODEV;
+ return rc;
+ }
+
+ endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->wr_buffers[0]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (channel->wr_buffers[0]->end_offset != endpoint->idtlen) {
+ pr_err("xillybus: IDT length mismatch (%d != %d). "
+ "Aborting.\n",
+ channel->wr_buffers[0]->end_offset, endpoint->idtlen);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ if (crc32_le(~0, channel->wr_buffers[0]->addr,
+ endpoint->idtlen+1) != 0) {
+ pr_err("xillybus: IDT failed CRC check. Aborting.\n");
+ rc = -ENODEV;
+ return rc;
+ }
+
+ version = channel->wr_buffers[0]->addr;
+
+ /* Check version number. Accept anything below 0x82 for now. */
+ if (*version > 0x82) {
+ pr_err("xillybus: No support for IDT version 0x%02x. "
+ "Maybe the xillybus driver needs an upgarde. "
+ "Aborting.\n",
+ (int) *version);
+ rc = -ENODEV;
+ return rc;
+ }
+
+ return 0; /* Success */
+}
+
+static ssize_t xillybus_read(struct file *filp, char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ int no_time_left = 0;
+ long deadline, left_to_sleep;
+ struct xilly_channel *channel = filp->private_data;
+
+ int empty, reached_eof, exhausted, ready;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int waiting_bufidx;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ deadline = jiffies + 1 + XILLY_RX_TIMEOUT;
+
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+
+ if (rc)
+ return rc;
+
+ rc = 0; /* Just to be clear about it. Compiler optimizes this out */
+
+ while (1) { /* Note that we may drop mutex within this loop */
+ int bytes_to_do = count - bytes_done;
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+
+ empty = channel->wr_empty;
+ ready = !empty || channel->wr_ready;
+
+ if (!empty) {
+ bufidx = channel->wr_host_buf_idx;
+ bufpos = channel->wr_host_buf_pos;
+ howmany = ((channel->wr_buffers[bufidx]->end_offset
+ + 1) << channel->log2_element_size)
+ - bufpos;
+
+ /* Update wr_host_* to its post-operation state */
+ if (howmany > bytes_to_do) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->wr_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ channel->wr_host_buf_pos = 0;
+
+ if (bufidx == channel->wr_fpga_buf_idx) {
+ channel->wr_empty = 1;
+ channel->wr_sleepy = 1;
+ channel->wr_ready = 0;
+ }
+
+ if (bufidx >= (channel->num_wr_buffers - 1))
+ channel->wr_host_buf_idx = 0;
+ else
+ channel->wr_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * empty = empty before change
+ * exhasted = empty after possible change
+ */
+
+ reached_eof = channel->wr_empty &&
+ (channel->wr_host_buf_idx == channel->wr_eof);
+ channel->wr_hangup = reached_eof;
+ exhausted = channel->wr_empty;
+ waiting_bufidx = channel->wr_host_buf_idx;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ if (!empty) { /* Go on, now without the spinlock */
+
+ if (bufpos == 0) /* Position zero means it's virgin */
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->wr_buffers[bufidx]->dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ if (copy_to_user(
+ userbuf,
+ channel->wr_buffers[bufidx]->addr
+ + bufpos, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ channel->endpoint->ephw->
+ hw_sync_sgl_for_device
+ (
+ channel->endpoint,
+ channel->wr_buffers[bufidx]->
+ dma_addr,
+ channel->wr_buf_size,
+ DMA_FROM_DEVICE);
+
+ /*
+ * Tell FPGA the buffer is done with. It's an
+ * atomic operation to the FPGA, so what
+ * happens with other channels doesn't matter,
+ * and the certain channel is protected with
+ * the channel-specific mutex.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1)
+ | (bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->wr_mutex);
+ return rc;
+ }
+ }
+
+ /* This includes a zero-count return = EOF */
+ if ((bytes_done >= count) || reached_eof)
+ break;
+
+ if (!exhausted)
+ continue; /* More in RAM buffer(s)? Just go on. */
+
+ if ((bytes_done > 0) &&
+ (no_time_left ||
+ (channel->wr_synchronous && channel->wr_allow_partial)))
+ break;
+
+ /*
+ * Nonblocking read: The "ready" flag tells us that the FPGA
+ * has data to send. In non-blocking mode, if it isn't on,
+ * just return. But if there is, we jump directly to the point
+ * where we ask for the FPGA to send all it has, and wait
+ * until that data arrives. So in a sense, we *do* block in
+ * nonblocking mode, but only for a very short time.
+ */
+
+ if (!no_time_left && (filp->f_flags & O_NONBLOCK)) {
+ if (bytes_done > 0)
+ break;
+
+ if (ready)
+ goto desperate;
+
+ bytes_done = -EAGAIN;
+ break;
+ }
+
+ if (!no_time_left || (bytes_done > 0)) {
+ /*
+ * Note that in case of an element-misaligned read
+ * request, offsetlimit will include the last element,
+ * which will be partially read from.
+ */
+ int offsetlimit = ((count - bytes_done) - 1) >>
+ channel->log2_element_size;
+ int buf_elements = channel->wr_buf_size >>
+ channel->log2_element_size;
+
+ /*
+ * In synchronous mode, always send an offset limit.
+ * Just don't send a value too big.
+ */
+
+ if (channel->wr_synchronous) {
+ /* Don't request more than one buffer */
+ if (channel->wr_allow_partial &&
+ (offsetlimit >= buf_elements))
+ offsetlimit = buf_elements - 1;
+
+ /* Don't request more than all buffers */
+ if (!channel->wr_allow_partial &&
+ (offsetlimit >=
+ (buf_elements * channel->num_wr_buffers)))
+ offsetlimit = buf_elements *
+ channel->num_wr_buffers - 1;
+ }
+
+ /*
+ * In asynchronous mode, force early flush of a buffer
+ * only if that will allow returning a full count. The
+ * "offsetlimit < ( ... )" rather than "<=" excludes
+ * requesting a full buffer, which would obviously
+ * cause a buffer transmission anyhow
+ */
+
+ if (channel->wr_synchronous ||
+ (offsetlimit < (buf_elements - 1))) {
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(offsetlimit,
+ &channel->endpoint->registers[
+ fpga_buf_offset_reg]);
+ mmiowb();
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (2 << 24) | /* 2 = offset limit */
+ (waiting_bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+ }
+
+ }
+
+ /*
+ * If partial completion is disallowed, there is no point in
+ * timeout sleeping. Neither if no_time_left is set and
+ * there's no data.
+ */
+
+ if (!channel->wr_allow_partial ||
+ (no_time_left && (bytes_done == 0))) {
+
+ /*
+ * This do-loop will run more than once if another
+ * thread reasserted wr_sleepy before we got the mutex
+ * back, so we try again.
+ */
+
+ do {
+ mutex_unlock(&channel->wr_mutex);
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ goto interrupted;
+
+ if (mutex_lock_interruptible(
+ &channel->wr_mutex))
+ goto interrupted;
+ } while (channel->wr_sleepy);
+
+ continue;
+
+interrupted: /* Mutex is not held if got here */
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN; /* Don't admit snoozing */
+ return -EINTR;
+ }
+
+ left_to_sleep = deadline - ((long) jiffies);
+
+ /*
+ * If our time is out, skip the waiting. We may miss wr_sleepy
+ * being deasserted but hey, almost missing the train is like
+ * missing it.
+ */
+
+ if (left_to_sleep > 0) {
+ left_to_sleep =
+ wait_event_interruptible_timeout(
+ channel->wr_wait,
+ (!channel->wr_sleepy),
+ left_to_sleep);
+
+ if (!channel->wr_sleepy)
+ continue;
+
+ if (left_to_sleep < 0) { /* Interrupt */
+ mutex_unlock(&channel->wr_mutex);
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+desperate:
+ no_time_left = 1; /* We're out of sleeping time. Desperate! */
+
+ if (bytes_done == 0) {
+ /*
+ * Reaching here means that we allow partial return,
+ * that we've run out of time, and that we have
+ * nothing to return.
+ * So tell the FPGA to send anything it has or gets.
+ */
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (3 << 24) | /* Opcode 3, flush it all! */
+ (waiting_bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ /*
+ * Formally speaking, we should block for data at this point.
+ * But to keep the code cleaner, we'll just finish the loop,
+ * make the unlikely check for data, and then block at the
+ * usual place.
+ */
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return bytes_done;
+}
+
+/*
+ * The timeout argument takes values as follows:
+ * >0 : Flush with timeout
+ * ==0 : Flush, and wait idefinitely for the flush to complete
+ * <0 : Autoflush: Flush only if there's a single buffer occupied
+ */
+
+static int xillybus_myflush(struct xilly_channel *channel, long timeout)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ int end_offset_plus1;
+ int bufidx, bufidx_minus1;
+ int i;
+ int empty;
+ int new_rd_host_buf_pos;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc)
+ return rc;
+
+ /*
+ * Don't flush a closed channel. This can happen when the work queued
+ * autoflush thread fires off after the file has closed. This is not
+ * an error, just something to dismiss.
+ */
+
+ if (!channel->rd_ref_count)
+ goto done;
+
+ bufidx = channel->rd_host_buf_idx;
+
+ bufidx_minus1 = (bufidx == 0) ? channel->num_rd_buffers - 1 : bufidx-1;
+
+ end_offset_plus1 = channel->rd_host_buf_pos >>
+ channel->log2_element_size;
+
+ new_rd_host_buf_pos = channel->rd_host_buf_pos -
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Submit the current buffer if it's nonempty */
+ if (end_offset_plus1) {
+ unsigned char *tail = channel->rd_buffers[bufidx]->addr +
+ (end_offset_plus1 << channel->log2_element_size);
+
+ /* Copy unflushed data, so we can put it in next buffer */
+ for (i = 0; i < new_rd_host_buf_pos; i++)
+ channel->rd_leftovers[i] = *tail++;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ /* Autoflush only if a single buffer is occupied */
+
+ if ((timeout < 0) &&
+ (channel->rd_full ||
+ (bufidx_minus1 != channel->rd_fpga_buf_idx))) {
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ /*
+ * A new work item may be queued by the ISR exactly
+ * now, since the execution of a work item allows the
+ * queuing of a new one while it's running.
+ */
+ goto done;
+ }
+
+ /* The 4th element is never needed for data, so it's a flag */
+ channel->rd_leftovers[3] = (new_rd_host_buf_pos != 0);
+
+ /* Set up rd_full to reflect a certain moment's state */
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+
+ channel->endpoint->ephw->hw_sync_sgl_for_device(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ &channel->endpoint->registers[fpga_buf_offset_reg]);
+ mmiowb();
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (2 << 24) | /* Opcode 2, submit buffer */
+ (bufidx << 12),
+ &channel->endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+ } else if (bufidx == 0)
+ bufidx = channel->num_rd_buffers - 1;
+ else
+ bufidx--;
+
+ channel->rd_host_buf_pos = new_rd_host_buf_pos;
+
+ if (timeout < 0)
+ goto done; /* Autoflush */
+
+
+ /*
+ * bufidx is now the last buffer written to (or equal to
+ * rd_fpga_buf_idx if buffer was never written to), and
+ * channel->rd_host_buf_idx the one after it.
+ *
+ * If bufidx == channel->rd_fpga_buf_idx we're either empty or full.
+ */
+
+ rc = 0;
+
+ while (1) { /* Loop waiting for draining of buffers */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ if (bufidx != channel->rd_fpga_buf_idx)
+ channel->rd_full = 1; /*
+ * Not really full,
+ * but needs waiting.
+ */
+
+ empty = !channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (empty)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing user should not be surprised if open() for write
+ * sleeps.
+ */
+ if (timeout == 0)
+ wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full));
+
+ else if (wait_event_interruptible_timeout(
+ channel->rd_wait,
+ (!channel->rd_full),
+ timeout) == 0) {
+ pr_warn("xillybus: "
+ "Timed out while flushing. "
+ "Output data may be lost.\n");
+
+ rc = -ETIMEDOUT;
+ break;
+ }
+
+ if (channel->rd_full) {
+ rc = -EINTR;
+ break;
+ }
+ }
+
+done:
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return rc;
+}
+
+static int xillybus_flush(struct file *filp, fl_owner_t id)
+{
+ if (!(filp->f_mode & FMODE_WRITE))
+ return 0;
+
+ return xillybus_myflush(filp->private_data, HZ); /* 1 second timeout */
+}
+
+static void xillybus_autoflush(struct work_struct *work)
+{
+ struct delayed_work *workitem = container_of(
+ work, struct delayed_work, work);
+ struct xilly_channel *channel = container_of(
+ workitem, struct xilly_channel, rd_workitem);
+ int rc;
+
+ rc = xillybus_myflush(channel, -1);
+
+ if (rc == -EINTR)
+ pr_warn("xillybus: Autoflush failed because "
+ "work queue thread got a signal.\n");
+ else if (rc)
+ pr_err("xillybus: Autoflush failed under "
+ "weird circumstances.\n");
+
+}
+
+static ssize_t xillybus_write(struct file *filp, const char __user *userbuf,
+ size_t count, loff_t *f_pos)
+{
+ ssize_t rc;
+ unsigned long flags;
+ int bytes_done = 0;
+ struct xilly_channel *channel = filp->private_data;
+
+ int full, exhausted;
+ /* Initializations are there only to silence warnings */
+
+ int howmany = 0, bufpos = 0, bufidx = 0, bufferdone = 0;
+ int end_offset_plus1 = 0;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc)
+ return rc;
+
+ rc = 0; /* Just to be clear about it. Compiler optimizes this out */
+
+ while (1) {
+ int bytes_to_do = count - bytes_done;
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+
+ full = channel->rd_full;
+
+ if (!full) {
+ bufidx = channel->rd_host_buf_idx;
+ bufpos = channel->rd_host_buf_pos;
+ howmany = channel->rd_buf_size - bufpos;
+
+ /*
+ * Update rd_host_* to its state after this operation.
+ * count=0 means committing the buffer immediately,
+ * which is like flushing, but not necessarily block.
+ */
+
+ if ((howmany > bytes_to_do) &&
+ (count ||
+ ((bufpos >> channel->log2_element_size) == 0))) {
+ bufferdone = 0;
+
+ howmany = bytes_to_do;
+ channel->rd_host_buf_pos += howmany;
+ } else {
+ bufferdone = 1;
+
+ if (count) {
+ end_offset_plus1 =
+ channel->rd_buf_size >>
+ channel->log2_element_size;
+ channel->rd_host_buf_pos = 0;
+ } else {
+ unsigned char *tail;
+ int i;
+
+ end_offset_plus1 = bufpos >>
+ channel->log2_element_size;
+
+ channel->rd_host_buf_pos -=
+ end_offset_plus1 <<
+ channel->log2_element_size;
+
+ tail = channel->
+ rd_buffers[bufidx]->addr +
+ (end_offset_plus1 <<
+ channel->log2_element_size);
+
+ for (i = 0;
+ i < channel->rd_host_buf_pos;
+ i++)
+ channel->rd_leftovers[i] =
+ *tail++;
+ }
+
+ if (bufidx == channel->rd_fpga_buf_idx)
+ channel->rd_full = 1;
+
+ if (bufidx >= (channel->num_rd_buffers - 1))
+ channel->rd_host_buf_idx = 0;
+ else
+ channel->rd_host_buf_idx++;
+ }
+ }
+
+ /*
+ * Marking our situation after the possible changes above,
+ * for use after releasing the spinlock.
+ *
+ * full = full before change
+ * exhasted = full after possible change
+ */
+
+ exhausted = channel->rd_full;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ if (!full) { /* Go on, now without the spinlock */
+ unsigned char *head =
+ channel->rd_buffers[bufidx]->addr;
+ int i;
+
+ if ((bufpos == 0) || /* Zero means it's virgin */
+ (channel->rd_leftovers[3] != 0)) {
+ channel->endpoint->ephw->hw_sync_sgl_for_cpu(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ /* Virgin, but leftovers are due */
+ for (i = 0; i < bufpos; i++)
+ *head++ = channel->rd_leftovers[i];
+
+ channel->rd_leftovers[3] = 0; /* Clear flag */
+ }
+
+ if (copy_from_user(
+ channel->rd_buffers[bufidx]->addr + bufpos,
+ userbuf, howmany))
+ rc = -EFAULT;
+
+ userbuf += howmany;
+ bytes_done += howmany;
+
+ if (bufferdone) {
+ channel->endpoint->ephw->
+ hw_sync_sgl_for_device(
+ channel->endpoint,
+ channel->rd_buffers[bufidx]->
+ dma_addr,
+ channel->rd_buf_size,
+ DMA_TO_DEVICE);
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(end_offset_plus1 - 1,
+ &channel->endpoint->registers[
+ fpga_buf_offset_reg]);
+ mmiowb();
+ iowrite32((channel->chan_num << 1) |
+ (2 << 24) | /* 2 = submit buffer */
+ (bufidx << 12),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->
+ register_mutex);
+
+ channel->rd_leftovers[3] =
+ (channel->rd_host_buf_pos != 0);
+ }
+
+ if (rc) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(
+ xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ return rc;
+ }
+ }
+
+ if (bytes_done >= count)
+ break;
+
+ if (!exhausted)
+ continue; /* If there's more space, just go on */
+
+ if ((bytes_done > 0) && channel->rd_allow_partial)
+ break;
+
+ /*
+ * Indefinite sleep with mutex taken. With data waiting for
+ * flushing, user should not be surprised if open() for write
+ * sleeps.
+ */
+
+ if (filp->f_flags & O_NONBLOCK) {
+ bytes_done = -EAGAIN;
+ break;
+ }
+
+ wait_event_interruptible(channel->rd_wait,
+ (!channel->rd_full));
+
+ if (channel->rd_full) {
+ mutex_unlock(&channel->rd_mutex);
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (bytes_done)
+ return bytes_done;
+ return -EINTR;
+ }
+ }
+
+ mutex_unlock(&channel->rd_mutex);
+
+ if (!channel->rd_synchronous)
+ queue_delayed_work(xillybus_wq,
+ &channel->rd_workitem,
+ XILLY_RX_TIMEOUT);
+
+ if ((channel->rd_synchronous) && (bytes_done > 0)) {
+ rc = xillybus_myflush(filp->private_data, 0); /* No timeout */
+
+ if (rc && (rc != -EINTR))
+ return rc;
+ }
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ return bytes_done;
+}
+
+static int xillybus_open(struct inode *inode, struct file *filp)
+{
+ int rc = 0;
+ unsigned long flags;
+ int minor = iminor(inode);
+ int major = imajor(inode);
+ struct xilly_endpoint *ep_iter, *endpoint = NULL;
+ struct xilly_channel *channel;
+
+ mutex_lock(&ep_list_lock);
+
+ list_for_each_entry(ep_iter, &list_of_endpoints, ep_list) {
+ if ((ep_iter->major == major) &&
+ (minor >= ep_iter->lowest_minor) &&
+ (minor < (ep_iter->lowest_minor +
+ ep_iter->num_channels))) {
+ endpoint = ep_iter;
+ break;
+ }
+ }
+ mutex_unlock(&ep_list_lock);
+
+ if (!endpoint) {
+ pr_err("xillybus: open() failed to find a device "
+ "for major=%d and minor=%d\n", major, minor);
+ return -ENODEV;
+ }
+
+ if (endpoint->fatal_error)
+ return -EIO;
+
+ channel = endpoint->channels[1 + minor - endpoint->lowest_minor];
+ filp->private_data = channel;
+
+
+ /*
+ * It gets complicated because:
+ * 1. We don't want to take a mutex we don't have to
+ * 2. We don't want to open one direction if the other will fail.
+ */
+
+ if ((filp->f_mode & FMODE_READ) && (!channel->num_wr_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_WRITE) && (!channel->num_rd_buffers))
+ return -ENODEV;
+
+ if ((filp->f_mode & FMODE_READ) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->wr_synchronous || !channel->wr_allow_partial ||
+ !channel->wr_supports_nonempty)) {
+ pr_err("xillybus: open() failed: "
+ "O_NONBLOCK not allowed for read on this device\n");
+ return -ENODEV;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) && (filp->f_flags & O_NONBLOCK) &&
+ (channel->rd_synchronous || !channel->rd_allow_partial)) {
+ pr_err("xillybus: open() failed: "
+ "O_NONBLOCK not allowed for write on this device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Note: open() may block on getting mutexes despite O_NONBLOCK.
+ * This shouldn't occur normally, since multiple open of the same
+ * file descriptor is almost always prohibited anyhow
+ * (*_exclusive_open is normally set in real-life systems).
+ */
+
+ if (filp->f_mode & FMODE_READ) {
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc)
+ return rc;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+ if (rc)
+ goto unlock_wr;
+ }
+
+ if ((filp->f_mode & FMODE_READ) &&
+ (channel->wr_ref_count != 0) &&
+ (channel->wr_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+ if ((filp->f_mode & FMODE_WRITE) &&
+ (channel->rd_ref_count != 0) &&
+ (channel->rd_exclusive_open)) {
+ rc = -EBUSY;
+ goto unlock;
+ }
+
+
+ if (filp->f_mode & FMODE_READ) {
+ if (channel->wr_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ channel->wr_host_buf_idx = 0;
+ channel->wr_host_buf_pos = 0;
+ channel->wr_fpga_buf_idx = -1;
+ channel->wr_empty = 1;
+ channel->wr_ready = 0;
+ channel->wr_sleepy = 1;
+ channel->wr_eof = -1;
+ channel->wr_hangup = 0;
+
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (4 << 24) | /* Opcode 4, open channel */
+ ((channel->wr_synchronous & 1) << 23),
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ channel->wr_ref_count++;
+ }
+
+ if (filp->f_mode & FMODE_WRITE) {
+ if (channel->rd_ref_count == 0) { /* First open of file */
+ /* Move the host to first buffer */
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ channel->rd_host_buf_idx = 0;
+ channel->rd_host_buf_pos = 0;
+ channel->rd_leftovers[3] = 0; /* No leftovers. */
+ channel->rd_fpga_buf_idx = channel->num_rd_buffers - 1;
+ channel->rd_full = 0;
+
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+
+ iowrite32((channel->chan_num << 1) |
+ (4 << 24), /* Opcode 4, open channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+
+ channel->rd_ref_count++;
+ }
+
+unlock:
+ if (filp->f_mode & FMODE_WRITE)
+ mutex_unlock(&channel->rd_mutex);
+unlock_wr:
+ if (filp->f_mode & FMODE_READ)
+ mutex_unlock(&channel->wr_mutex);
+
+ if (!rc && (!channel->seekable))
+ return nonseekable_open(inode, filp);
+
+ return rc;
+}
+
+static int xillybus_release(struct inode *inode, struct file *filp)
+{
+ int rc;
+ unsigned long flags;
+ struct xilly_channel *channel = filp->private_data;
+
+ int buf_idx;
+ int eof;
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ if (filp->f_mode & FMODE_WRITE) {
+ rc = mutex_lock_interruptible(&channel->rd_mutex);
+
+ if (rc) {
+ pr_warn("xillybus: Failed to close file. "
+ "Hardware left in messy state.\n");
+ return rc;
+ }
+
+ channel->rd_ref_count--;
+
+ if (channel->rd_ref_count == 0) {
+
+ /*
+ * We rely on the kernel calling flush()
+ * before we get here.
+ */
+
+ iowrite32((channel->chan_num << 1) | /* Channel ID */
+ (5 << 24), /* Opcode 5, close channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+ }
+ mutex_unlock(&channel->rd_mutex);
+ }
+
+ if (filp->f_mode & FMODE_READ) {
+ rc = mutex_lock_interruptible(&channel->wr_mutex);
+ if (rc) {
+ pr_warn("xillybus: Failed to close file. "
+ "Hardware left in messy state.\n");
+ return rc;
+ }
+
+ channel->wr_ref_count--;
+
+ if (channel->wr_ref_count == 0) {
+
+ iowrite32(1 | (channel->chan_num << 1) |
+ (5 << 24), /* Opcode 5, close channel */
+ &channel->endpoint->registers[
+ fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ /*
+ * This is crazily cautious: We make sure that not
+ * only that we got an EOF (be it because we closed
+ * the channel or because of a user's EOF), but verify
+ * that it's one beyond the last buffer arrived, so
+ * we have no leftover buffers pending before wrapping
+ * up (which can only happen in asynchronous channels,
+ * BTW)
+ */
+
+ while (1) {
+ spin_lock_irqsave(&channel->wr_spinlock,
+ flags);
+ buf_idx = channel->wr_fpga_buf_idx;
+ eof = channel->wr_eof;
+ channel->wr_sleepy = 1;
+ spin_unlock_irqrestore(&channel->wr_spinlock,
+ flags);
+
+ /*
+ * Check if eof points at the buffer after
+ * the last one the FPGA submitted. Note that
+ * no EOF is marked by negative eof.
+ */
+
+ buf_idx++;
+ if (buf_idx == channel->num_wr_buffers)
+ buf_idx = 0;
+
+ if (buf_idx == eof)
+ break;
+
+ /*
+ * Steal extra 100 ms if awaken by interrupt.
+ * This is a simple workaround for an
+ * interrupt pending when entering, which would
+ * otherwise result in declaring the hardware
+ * non-responsive.
+ */
+
+ if (wait_event_interruptible(
+ channel->wr_wait,
+ (!channel->wr_sleepy)))
+ msleep(100);
+
+ if (channel->wr_sleepy) {
+ mutex_unlock(&channel->wr_mutex);
+ pr_warn("xillybus: Hardware failed to "
+ "respond to close command, "
+ "therefore left in "
+ "messy state.\n");
+ return -EINTR;
+ }
+ }
+ }
+
+ mutex_unlock(&channel->wr_mutex);
+ }
+
+ return 0;
+}
+static loff_t xillybus_llseek(struct file *filp, loff_t offset, int whence)
+{
+ struct xilly_channel *channel = filp->private_data;
+ loff_t pos = filp->f_pos;
+ int rc = 0;
+
+ /*
+ * Take both mutexes not allowing interrupts, since it seems like
+ * common applications don't expect an -EINTR here. Besides, multiple
+ * access to a single file descriptor on seekable devices is a mess
+ * anyhow.
+ */
+
+ if (channel->endpoint->fatal_error)
+ return -EIO;
+
+ mutex_lock(&channel->wr_mutex);
+ mutex_lock(&channel->rd_mutex);
+
+ switch (whence) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos += offset;
+ break;
+ case 2:
+ pos = offset; /* Going to the end => to the beginning */
+ break;
+ default:
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* In any case, we must finish on an element boundary */
+ if (pos & ((1 << channel->log2_element_size) - 1)) {
+ rc = -EINVAL;
+ goto end;
+ }
+
+ mutex_lock(&channel->endpoint->register_mutex);
+
+ iowrite32(pos >> channel->log2_element_size,
+ &channel->endpoint->registers[fpga_buf_offset_reg]);
+ mmiowb();
+ iowrite32((channel->chan_num << 1) |
+ (6 << 24), /* Opcode 6, set address */
+ &channel->endpoint->registers[fpga_buf_ctrl_reg]);
+ mmiowb(); /* Just to appear safe */
+
+ mutex_unlock(&channel->endpoint->register_mutex);
+
+end:
+ mutex_unlock(&channel->rd_mutex);
+ mutex_unlock(&channel->wr_mutex);
+
+ if (rc) /* Return error after releasing mutexes */
+ return rc;
+
+ filp->f_pos = pos;
+
+ /*
+ * Since seekable devices are allowed only when the channel is
+ * synchronous, we assume that there is no data pending in either
+ * direction (which holds true as long as no concurrent access on the
+ * file descriptor takes place).
+ * The only thing we may need to throw away is leftovers from partial
+ * write() flush.
+ */
+
+ channel->rd_leftovers[3] = 0;
+
+ return pos;
+}
+
+static unsigned int xillybus_poll(struct file *filp, poll_table *wait)
+{
+ struct xilly_channel *channel = filp->private_data;
+ unsigned int mask = 0;
+ unsigned long flags;
+
+ poll_wait(filp, &channel->endpoint->ep_wait, wait);
+
+ /*
+ * poll() won't play ball regarding read() channels which
+ * aren't asynchronous and support the nonempty message. Allowing
+ * that will create situations where data has been delivered at
+ * the FPGA, and users expecting select() to wake up, which it may
+ * not.
+ */
+
+ if (!channel->wr_synchronous && channel->wr_supports_nonempty) {
+ poll_wait(filp, &channel->wr_wait, wait);
+ poll_wait(filp, &channel->wr_ready_wait, wait);
+
+ spin_lock_irqsave(&channel->wr_spinlock, flags);
+ if (!channel->wr_empty || channel->wr_ready)
+ mask |= POLLIN | POLLRDNORM;
+
+ if (channel->wr_hangup)
+ /*
+ * Not POLLHUP, because its behavior is in the
+ * mist, and POLLIN does what we want: Wake up
+ * the read file descriptor so it sees EOF.
+ */
+ mask |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&channel->wr_spinlock, flags);
+ }
+
+ /*
+ * If partial data write is disallowed on a write() channel,
+ * it's pointless to ever signal OK to write, because is could
+ * block despite some space being available.
+ */
+
+ if (channel->rd_allow_partial) {
+ poll_wait(filp, &channel->rd_wait, wait);
+
+ spin_lock_irqsave(&channel->rd_spinlock, flags);
+ if (!channel->rd_full)
+ mask |= POLLOUT | POLLWRNORM;
+ spin_unlock_irqrestore(&channel->rd_spinlock, flags);
+ }
+
+ if (channel->endpoint->fatal_error)
+ mask |= POLLERR;
+
+ return mask;
+}
+
+static const struct file_operations xillybus_fops = {
+ .owner = THIS_MODULE,
+ .read = xillybus_read,
+ .write = xillybus_write,
+ .open = xillybus_open,
+ .flush = xillybus_flush,
+ .release = xillybus_release,
+ .llseek = xillybus_llseek,
+ .poll = xillybus_poll,
+};
+
+static int xillybus_init_chrdev(struct xilly_endpoint *endpoint,
+ const unsigned char *idt)
+{
+ int rc;
+ dev_t dev;
+ int devnum, i, minor, major;
+ char devname[48];
+ struct device *device;
+
+ rc = alloc_chrdev_region(&dev, 0, /* minor start */
+ endpoint->num_channels,
+ xillyname);
+
+ if (rc) {
+ pr_warn("xillybus: Failed to obtain major/minors");
+ goto error1;
+ }
+
+ endpoint->major = major = MAJOR(dev);
+ endpoint->lowest_minor = minor = MINOR(dev);
+
+ cdev_init(&endpoint->cdev, &xillybus_fops);
+ endpoint->cdev.owner = endpoint->ephw->owner;
+ rc = cdev_add(&endpoint->cdev, MKDEV(major, minor),
+ endpoint->num_channels);
+ if (rc) {
+ pr_warn("xillybus: Failed to add cdev. Aborting.\n");
+ goto error2;
+ }
+
+ idt++;
+
+ for (i = minor, devnum = 0;
+ devnum < endpoint->num_channels;
+ devnum++, i++) {
+ snprintf(devname, sizeof(devname)-1, "xillybus_%s", idt);
+
+ devname[sizeof(devname)-1] = 0; /* Should never matter */
+
+ while (*idt++)
+ /* Skip to next */;
+
+ device = device_create(xillybus_class,
+ NULL,
+ MKDEV(major, i),
+ NULL,
+ devname);
+
+ if (IS_ERR(device)) {
+ pr_warn("xillybus: Failed to create %s "
+ "device. Aborting.\n", devname);
+ goto error3;
+ }
+ }
+
+ pr_info("xillybus: Created %d device files.\n",
+ endpoint->num_channels);
+ return 0; /* succeed */
+
+error3:
+ devnum--; i--;
+ for (; devnum >= 0; devnum--, i--)
+ device_destroy(xillybus_class, MKDEV(major, i));
+
+ cdev_del(&endpoint->cdev);
+error2:
+ unregister_chrdev_region(MKDEV(major, minor), endpoint->num_channels);
+error1:
+
+ return rc;
+}
+
+static void xillybus_cleanup_chrdev(struct xilly_endpoint *endpoint)
+{
+ int minor;
+
+ for (minor = endpoint->lowest_minor;
+ minor < (endpoint->lowest_minor + endpoint->num_channels);
+ minor++)
+ device_destroy(xillybus_class, MKDEV(endpoint->major, minor));
+ cdev_del(&endpoint->cdev);
+ unregister_chrdev_region(MKDEV(endpoint->major,
+ endpoint->lowest_minor),
+ endpoint->num_channels);
+
+ pr_info("xillybus: Removed %d device files.\n",
+ endpoint->num_channels);
+}
+
+
+struct xilly_endpoint *xillybus_init_endpoint(struct pci_dev *pdev,
+ struct device *dev,
+ struct xilly_endpoint_hardware
+ *ephw)
+{
+ struct xilly_endpoint *endpoint;
+
+ endpoint = kzalloc(sizeof(*endpoint), GFP_KERNEL);
+ if (!endpoint) {
+ pr_err("xillybus: Failed to allocate memory. Aborting.\n");
+ return NULL;
+ }
+
+ endpoint->pdev = pdev;
+ endpoint->dev = dev;
+ endpoint->ephw = ephw;
+ INIT_LIST_HEAD(&endpoint->cleanup.to_kfree);
+ INIT_LIST_HEAD(&endpoint->cleanup.to_pagefree);
+ INIT_LIST_HEAD(&endpoint->cleanup.to_unmap);
+ endpoint->msg_counter = 0x0b;
+ endpoint->failed_messages = 0;
+ endpoint->fatal_error = 0;
+
+ init_waitqueue_head(&endpoint->ep_wait);
+ mutex_init(&endpoint->register_mutex);
+
+ return endpoint;
+}
+EXPORT_SYMBOL(xillybus_init_endpoint);
+
+static int xilly_quiesce(struct xilly_endpoint *endpoint)
+{
+ endpoint->idtlen = -1;
+ wmb(); /* Make sure idtlen is set before sending command */
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+
+ if (endpoint->idtlen < 0) {
+ pr_err("xillybus: Failed to quiesce the device on "
+ "exit. Quitting while leaving a mess.\n");
+ return -ENODEV;
+ }
+ return 0; /* Success */
+}
+
+int xillybus_endpoint_discovery(struct xilly_endpoint *endpoint)
+{
+ int rc = 0;
+
+ struct xilly_cleanup tmpmem;
+ int idtbuffersize = (1 << PAGE_SHIFT);
+
+ /*
+ * The bogus IDT is used during bootstrap for allocating the initial
+ * message buffer, and then the message buffer and space for the IDT
+ * itself. The initial message buffer is of a single page's size, but
+ * it's soon replaced with a more modest one (and memory is freed).
+ */
+
+ unsigned char bogus_idt[8] = { 1, 224, (PAGE_SHIFT)-2, 0,
+ 3, 192, PAGE_SHIFT, 0 };
+ struct xilly_idt_handle idt_handle;
+
+ INIT_LIST_HEAD(&tmpmem.to_kfree);
+ INIT_LIST_HEAD(&tmpmem.to_pagefree);
+ INIT_LIST_HEAD(&tmpmem.to_unmap);
+
+ /*
+ * Writing the value 0x00000001 to Endianness register signals which
+ * endianness this processor is using, so the FPGA can swap words as
+ * necessary.
+ */
+
+ iowrite32(1, &endpoint->registers[fpga_endian_reg]);
+ mmiowb(); /* Writes below are affected by the one above. */
+
+ /* Bootstrap phase I: Allocate temporary message buffer */
+
+ endpoint->num_channels = 0;
+
+ rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 1);
+
+ if (rc)
+ goto failed_buffers;
+
+ /* Clear the message subsystem (and counter in particular) */
+ iowrite32(0x04, &endpoint->registers[fpga_msg_ctrl_reg]);
+ mmiowb();
+
+ endpoint->idtlen = -1;
+
+ smp_wmb();
+
+ /*
+ * Set DMA 32/64 bit mode, quiesce the device (?!) and get IDT
+ * buffer size.
+ */
+ iowrite32((u32) (endpoint->dma_using_dac & 0x0001),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ wait_event_interruptible_timeout(endpoint->ep_wait,
+ (endpoint->idtlen >= 0),
+ XILLY_TIMEOUT);
+
+ if (endpoint->idtlen < 0) {
+ pr_err("xillybus: No response from FPGA. Aborting.\n");
+ rc = -ENODEV;
+ goto failed_quiesce;
+ }
+
+ /* Enable DMA */
+ iowrite32((u32) (0x0002 | (endpoint->dma_using_dac & 0x0001)),
+ &endpoint->registers[fpga_dma_control_reg]);
+ mmiowb();
+
+ /* Bootstrap phase II: Allocate buffer for IDT and obtain it */
+ while (endpoint->idtlen >= idtbuffersize) {
+ idtbuffersize *= 2;
+ bogus_idt[6]++;
+ }
+
+ endpoint->num_channels = 1;
+
+ rc = xilly_setupchannels(endpoint, &tmpmem, bogus_idt, 2);
+
+ if (rc)
+ goto failed_idt;
+
+ smp_wmb();
+
+ rc = xilly_obtain_idt(endpoint);
+
+ if (rc)
+ goto failed_idt;
+
+ xilly_scan_idt(endpoint, &idt_handle);
+
+ if (!idt_handle.chandesc) {
+ rc = -ENODEV;
+ goto failed_idt;
+ }
+ /* Bootstrap phase III: Allocate buffers according to IDT */
+
+ rc = xilly_setupchannels(endpoint,
+ &endpoint->cleanup,
+ idt_handle.chandesc,
+ idt_handle.entries);
+
+ if (rc)
+ goto failed_idt;
+
+ smp_wmb(); /* mutex_lock below should suffice, but won't hurt.*/
+
+ /*
+ * endpoint is now completely configured. We put it on the list
+ * available to open() before registering the char device(s)
+ */
+
+ mutex_lock(&ep_list_lock);
+ list_add_tail(&endpoint->ep_list, &list_of_endpoints);
+ mutex_unlock(&ep_list_lock);
+
+ rc = xillybus_init_chrdev(endpoint, idt_handle.idt);
+
+ if (rc)
+ goto failed_chrdevs;
+
+ xillybus_do_cleanup(&tmpmem, endpoint);
+
+ return 0;
+
+failed_chrdevs:
+ mutex_lock(&ep_list_lock);
+ list_del(&endpoint->ep_list);
+ mutex_unlock(&ep_list_lock);
+
+failed_idt:
+ /* Quiesce the device. Now it's serious to do it */
+ rc = xilly_quiesce(endpoint);
+
+ if (rc)
+ return rc; /* FPGA may still DMA, so no release */
+
+ flush_workqueue(xillybus_wq);
+failed_quiesce:
+failed_buffers:
+ xillybus_do_cleanup(&tmpmem, endpoint);
+
+ return rc;
+}
+EXPORT_SYMBOL(xillybus_endpoint_discovery);
+
+void xillybus_endpoint_remove(struct xilly_endpoint *endpoint)
+{
+ xillybus_cleanup_chrdev(endpoint);
+
+ mutex_lock(&ep_list_lock);
+ list_del(&endpoint->ep_list);
+ mutex_unlock(&ep_list_lock);
+
+ xilly_quiesce(endpoint);
+
+ /*
+ * Flushing is done upon endpoint release to prevent access to memory
+ * just about to be released. This makes the quiesce complete.
+ */
+ flush_workqueue(xillybus_wq);
+}
+EXPORT_SYMBOL(xillybus_endpoint_remove);
+
+static int __init xillybus_init(void)
+{
+ int rc = 0;
+
+ mutex_init(&ep_list_lock);
+
+ xillybus_class = class_create(THIS_MODULE, xillyname);
+ if (IS_ERR(xillybus_class)) {
+ rc = PTR_ERR(xillybus_class);
+ pr_warn("xillybus: Failed to register class xillybus\n");
+
+ return rc;
+ }
+
+ xillybus_wq = alloc_workqueue(xillyname, 0, 0);
+
+ return 0; /* Success */
+}
+
+static void __exit xillybus_exit(void)
+{
+ /* flush_workqueue() was called for each endpoint released */
+ destroy_workqueue(xillybus_wq);
+
+ class_destroy(xillybus_class);
+}
+
+module_init(xillybus_init);
+module_exit(xillybus_exit);
diff --git a/drivers/staging/xillybus/xillybus_of.c b/drivers/staging/xillybus/xillybus_of.c
new file mode 100644
index 00000000000..92c2931f434
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_of.c
@@ -0,0 +1,212 @@
+/*
+ * linux/drivers/misc/xillybus_of.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using Open Firmware.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for Open Firmware");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_of");
+MODULE_LICENSE("GPL v2");
+
+static const char xillyname[] = "xillybus_of";
+
+/* Match table for of_platform binding */
+static struct of_device_id xillybus_of_match[] = {
+ { .compatible = "xlnx,xillybus-1.00.a", },
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, xillybus_of_match);
+
+static void xilly_dma_sync_single_for_cpu_of(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ dma_sync_single_for_cpu(ep->dev, dma_handle, size, direction);
+}
+
+static void xilly_dma_sync_single_for_device_of(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ dma_sync_single_for_device(ep->dev, dma_handle, size, direction);
+}
+
+static dma_addr_t xilly_map_single_of(struct xilly_cleanup *mem,
+ struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction
+ )
+{
+
+ dma_addr_t addr = 0;
+ struct xilly_dma *this;
+
+ this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ addr = dma_map_single(ep->dev, ptr, size, direction);
+ this->direction = direction;
+
+ if (dma_mapping_error(ep->dev, addr)) {
+ kfree(this);
+ return 0;
+ }
+
+ this->dma_addr = addr;
+ this->dev = ep->dev;
+ this->size = size;
+
+ list_add_tail(&this->node, &mem->to_unmap);
+
+ return addr;
+}
+
+static void xilly_unmap_single_of(struct xilly_dma *entry)
+{
+ dma_unmap_single(entry->dev,
+ entry->dma_addr,
+ entry->size,
+ entry->direction);
+}
+
+static struct xilly_endpoint_hardware of_hw = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_of,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_of,
+ .map_single = xilly_map_single_of,
+ .unmap_single = xilly_unmap_single_of
+};
+
+static int xilly_drv_probe(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint;
+ int rc = 0;
+ int irq;
+
+ endpoint = xillybus_init_endpoint(NULL, dev, &of_hw);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, endpoint);
+
+ rc = of_address_to_resource(dev->of_node, 0, &endpoint->res);
+ if (rc) {
+ pr_warn("xillybus: Failed to obtain device tree "
+ "resource\n");
+ goto failed_request_regions;
+ }
+
+ if (!request_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res), xillyname)) {
+ pr_err("xillybus: request_mem_region failed. Aborting.\n");
+ rc = -EBUSY;
+ goto failed_request_regions;
+ }
+
+ endpoint->registers = of_iomap(dev->of_node, 0);
+
+ if (!endpoint->registers) {
+ pr_err("xillybus: Failed to map I/O memory. Aborting.\n");
+ goto failed_iomap0;
+ }
+
+ irq = irq_of_parse_and_map(dev->of_node, 0);
+
+ rc = request_irq(irq, xillybus_isr, 0, xillyname, endpoint);
+
+ if (rc) {
+ pr_err("xillybus: Failed to register IRQ handler. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_register_irq;
+ }
+
+ rc = xillybus_endpoint_discovery(endpoint);
+
+ if (!rc)
+ return 0;
+
+ free_irq(irq, endpoint);
+
+failed_register_irq:
+ iounmap(endpoint->registers);
+failed_iomap0:
+ release_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res));
+
+failed_request_regions:
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+ return rc;
+}
+
+static int xilly_drv_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct xilly_endpoint *endpoint = dev_get_drvdata(dev);
+ int irq = irq_of_parse_and_map(dev->of_node, 0);
+
+ xillybus_endpoint_remove(endpoint);
+
+ free_irq(irq, endpoint);
+
+ iounmap(endpoint->registers);
+ release_mem_region(endpoint->res.start,
+ resource_size(&endpoint->res));
+
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+
+ return 0;
+}
+
+static struct platform_driver xillybus_platform_driver = {
+ .probe = xilly_drv_probe,
+ .remove = xilly_drv_remove,
+ .driver = {
+ .name = xillyname,
+ .owner = THIS_MODULE,
+ .of_match_table = xillybus_of_match,
+ },
+};
+
+static int __init xillybus_of_init(void)
+{
+ return platform_driver_register(&xillybus_platform_driver);
+}
+
+static void __exit xillybus_of_exit(void)
+{
+ platform_driver_unregister(&xillybus_platform_driver);
+}
+
+module_init(xillybus_of_init);
+module_exit(xillybus_of_exit);
diff --git a/drivers/staging/xillybus/xillybus_pcie.c b/drivers/staging/xillybus/xillybus_pcie.c
new file mode 100644
index 00000000000..67013652358
--- /dev/null
+++ b/drivers/staging/xillybus/xillybus_pcie.c
@@ -0,0 +1,262 @@
+/*
+ * linux/drivers/misc/xillybus_pcie.c
+ *
+ * Copyright 2011 Xillybus Ltd, http://xillybus.com
+ *
+ * Driver for the Xillybus FPGA/host framework using PCI Express.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the smems of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include "xillybus.h"
+
+MODULE_DESCRIPTION("Xillybus driver for PCIe");
+MODULE_AUTHOR("Eli Billauer, Xillybus Ltd.");
+MODULE_VERSION("1.06");
+MODULE_ALIAS("xillybus_pcie");
+MODULE_LICENSE("GPL v2");
+
+#define PCI_DEVICE_ID_XILLYBUS 0xebeb
+
+#define PCI_VENDOR_ID_ALTERA 0x1172
+#define PCI_VENDOR_ID_ACTEL 0x11aa
+#define PCI_VENDOR_ID_LATTICE 0x1204
+
+static const char xillyname[] = "xillybus_pcie";
+
+static DEFINE_PCI_DEVICE_TABLE(xillyids) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_XILINX, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTERA, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ACTEL, PCI_DEVICE_ID_XILLYBUS)},
+ {PCI_DEVICE(PCI_VENDOR_ID_LATTICE, PCI_DEVICE_ID_XILLYBUS)},
+ { /* End: all zeroes */ }
+};
+
+static int xilly_pci_direction(int direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ return PCI_DMA_TODEVICE;
+ case DMA_FROM_DEVICE:
+ return PCI_DMA_FROMDEVICE;
+ default:
+ return PCI_DMA_BIDIRECTIONAL;
+ }
+}
+
+static void xilly_dma_sync_single_for_cpu_pci(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ pci_dma_sync_single_for_cpu(ep->pdev,
+ dma_handle,
+ size,
+ xilly_pci_direction(direction));
+}
+
+static void xilly_dma_sync_single_for_device_pci(struct xilly_endpoint *ep,
+ dma_addr_t dma_handle,
+ size_t size,
+ int direction)
+{
+ pci_dma_sync_single_for_device(ep->pdev,
+ dma_handle,
+ size,
+ xilly_pci_direction(direction));
+}
+
+/*
+ * Map either through the PCI DMA mapper or the non_PCI one. Behind the
+ * scenes exactly the same functions are called with the same parameters,
+ * but that can change.
+ */
+
+static dma_addr_t xilly_map_single_pci(struct xilly_cleanup *mem,
+ struct xilly_endpoint *ep,
+ void *ptr,
+ size_t size,
+ int direction
+ )
+{
+
+ dma_addr_t addr = 0;
+ struct xilly_dma *this;
+ int pci_direction;
+
+ this = kmalloc(sizeof(struct xilly_dma), GFP_KERNEL);
+ if (!this)
+ return 0;
+
+ pci_direction = xilly_pci_direction(direction);
+ addr = pci_map_single(ep->pdev, ptr, size, pci_direction);
+ this->direction = pci_direction;
+
+ if (pci_dma_mapping_error(ep->pdev, addr)) {
+ kfree(this);
+ return 0;
+ }
+
+ this->dma_addr = addr;
+ this->pdev = ep->pdev;
+ this->size = size;
+
+ list_add_tail(&this->node, &mem->to_unmap);
+
+ return addr;
+}
+
+static void xilly_unmap_single_pci(struct xilly_dma *entry)
+{
+ pci_unmap_single(entry->pdev,
+ entry->dma_addr,
+ entry->size,
+ entry->direction);
+}
+
+static struct xilly_endpoint_hardware pci_hw = {
+ .owner = THIS_MODULE,
+ .hw_sync_sgl_for_cpu = xilly_dma_sync_single_for_cpu_pci,
+ .hw_sync_sgl_for_device = xilly_dma_sync_single_for_device_pci,
+ .map_single = xilly_map_single_pci,
+ .unmap_single = xilly_unmap_single_pci
+};
+
+static int xilly_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct xilly_endpoint *endpoint;
+ int rc = 0;
+
+ endpoint = xillybus_init_endpoint(pdev, NULL, &pci_hw);
+
+ if (!endpoint)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, endpoint);
+
+ rc = pci_enable_device(pdev);
+
+ /* L0s has caused packet drops. No power saving, thank you. */
+
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
+
+ if (rc) {
+ pr_err("xillybus: pci_enable_device() failed. "
+ "Aborting.\n");
+ goto no_enable;
+ }
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ pr_err("xillybus: Incorrect BAR configuration. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto bad_bar;
+ }
+
+ rc = pci_request_regions(pdev, xillyname);
+ if (rc) {
+ pr_err("xillybus: pci_request_regions() failed. "
+ "Aborting.\n");
+ goto failed_request_regions;
+ }
+
+ endpoint->registers = pci_iomap(pdev, 0, 128);
+
+ if (!endpoint->registers) {
+ pr_err("xillybus: Failed to map BAR 0. Aborting.\n");
+ goto failed_iomap0;
+ }
+
+ pci_set_master(pdev);
+
+ /* Set up a single MSI interrupt */
+ if (pci_enable_msi(pdev)) {
+ pr_err("xillybus: Failed to enable MSI interrupts. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_enable_msi;
+ }
+ rc = request_irq(pdev->irq, xillybus_isr, 0, xillyname, endpoint);
+
+ if (rc) {
+ pr_err("xillybus: Failed to register MSI handler. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_register_msi;
+ }
+
+ /*
+ * In theory, an attempt to set the DMA mask to 64 and dma_using_dac=1
+ * is the right thing. But some unclever PCIe drivers report it's OK
+ * when the hardware drops those 64-bit PCIe packets. So trust
+ * nobody and use 32 bits DMA addressing in any case.
+ */
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ endpoint->dma_using_dac = 0;
+ else {
+ pr_err("xillybus: Failed to set DMA mask. "
+ "Aborting.\n");
+ rc = -ENODEV;
+ goto failed_dmamask;
+ }
+
+ rc = xillybus_endpoint_discovery(endpoint);
+
+ if (!rc)
+ return 0;
+
+failed_dmamask:
+ free_irq(pdev->irq, endpoint);
+failed_register_msi:
+ pci_disable_msi(pdev);
+failed_enable_msi:
+ /* pci_clear_master(pdev); Nobody else seems to do this */
+ pci_iounmap(pdev, endpoint->registers);
+failed_iomap0:
+ pci_release_regions(pdev);
+failed_request_regions:
+bad_bar:
+ pci_disable_device(pdev);
+no_enable:
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+ return rc;
+}
+
+static void xilly_remove(struct pci_dev *pdev)
+{
+ struct xilly_endpoint *endpoint = pci_get_drvdata(pdev);
+
+ xillybus_endpoint_remove(endpoint);
+
+ free_irq(pdev->irq, endpoint);
+
+ pci_disable_msi(pdev);
+ pci_iounmap(pdev, endpoint->registers);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+
+ xillybus_do_cleanup(&endpoint->cleanup, endpoint);
+
+ kfree(endpoint);
+}
+
+MODULE_DEVICE_TABLE(pci, xillyids);
+
+static struct pci_driver xillybus_driver = {
+ .name = xillyname,
+ .id_table = xillyids,
+ .probe = xilly_probe,
+ .remove = xilly_remove,
+};
+
+module_pci_driver(xillybus_driver);
diff --git a/drivers/staging/zcache/Kconfig b/drivers/staging/zcache/Kconfig
deleted file mode 100644
index 2d7b2da3b9e..00000000000
--- a/drivers/staging/zcache/Kconfig
+++ /dev/null
@@ -1,59 +0,0 @@
-config ZCACHE
- tristate "Dynamic compression of swap pages and clean pagecache pages"
- depends on CRYPTO=y && SWAP=y && CLEANCACHE && FRONTSWAP
- select CRYPTO_LZO
- default n
- help
- Zcache doubles RAM efficiency while providing a significant
- performance boosts on many workloads. Zcache uses
- compression and an in-kernel implementation of transcendent
- memory to store clean page cache pages and swap in RAM,
- providing a noticeable reduction in disk I/O.
-
-config ZCACHE_DEBUG
- bool "Enable debug statistics"
- depends on DEBUG_FS && ZCACHE
- default n
- help
- This is used to provide an debugfs directory with counters of
- how zcache is doing. You probably want to set this to 'N'.
-
-config RAMSTER
- tristate "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
- depends on CONFIGFS_FS=y && SYSFS=y && !HIGHMEM && ZCACHE
- depends on NET
- # must ensure struct page is 8-byte aligned
- select HAVE_ALIGNED_STRUCT_PAGE if !64BIT
- default n
- help
- RAMster allows RAM on other machines in a cluster to be utilized
- dynamically and symmetrically instead of swapping to a local swap
- disk, thus improving performance on memory-constrained workloads
- while minimizing total RAM across the cluster. RAMster, like
- zcache2, compresses swap pages into local RAM, but then remotifies
- the compressed pages to another node in the RAMster cluster.
-
-config RAMSTER_DEBUG
- bool "Enable ramster debug statistics"
- depends on DEBUG_FS && RAMSTER
- default n
- help
- This is used to provide an debugfs directory with counters of
- how ramster is doing. You probably want to set this to 'N'.
-
-# Depends on not-yet-upstreamed mm patches to export end_swap_bio_write and
-# __add_to_swap_cache, and implement __swap_writepage (which is swap_writepage
-# without the frontswap call. When these are in-tree, the dependency on
-# BROKEN can be removed
-config ZCACHE_WRITEBACK
- bool "Allow compressed swap pages to be writtenback to swap disk"
- depends on ZCACHE=y && BROKEN
- default n
- help
- Zcache caches compressed swap pages (and other data) in RAM which
- often improves performance by avoiding I/O's due to swapping.
- In some workloads with very long-lived large processes, it can
- instead reduce performance. Writeback decompresses zcache-compressed
- pages (in LRU order) when under memory pressure and writes them to
- the backing swap disk to ameliorate this problem. Policy driving
- writeback is still under development.
diff --git a/drivers/staging/zcache/Makefile b/drivers/staging/zcache/Makefile
deleted file mode 100644
index 845a5c2721b..00000000000
--- a/drivers/staging/zcache/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-zcache-y := zcache-main.o tmem.o zbud.o
-zcache-$(CONFIG_ZCACHE_DEBUG) += debug.o
-zcache-$(CONFIG_RAMSTER_DEBUG) += ramster/debug.o
-zcache-$(CONFIG_RAMSTER) += ramster/ramster.o ramster/r2net.o
-zcache-$(CONFIG_RAMSTER) += ramster/nodemanager.o ramster/tcp.o
-zcache-$(CONFIG_RAMSTER) += ramster/heartbeat.o ramster/masklog.o
-
-obj-$(CONFIG_ZCACHE) += zcache.o
diff --git a/drivers/staging/zcache/TODO b/drivers/staging/zcache/TODO
deleted file mode 100644
index d0c18fa9574..00000000000
--- a/drivers/staging/zcache/TODO
+++ /dev/null
@@ -1,64 +0,0 @@
-
-** ZCACHE PLAN FOR PROMOTION FROM STAGING **
-
-Last updated: Feb 13, 2013
-
-PLAN STEPS
-
-1. merge zcache and ramster to eliminate horrible code duplication
-2. converge on a predictable, writeback-capable allocator
-3. use debugfs instead of sysfs (per akpm feedback in 2011)
-4. zcache side of cleancache/mm WasActive patch
-5. zcache side of frontswap exclusive gets
-6. zcache must be able to writeback to physical swap disk
- (per Andrea Arcangeli feedback in 2011)
-7. implement adequate policy for writeback
-8. frontswap/cleancache work to allow zcache to be loaded
- as a module
-9. get core mm developer to review
-10. incorporate feedback from review
-11. get review/acks from 1-2 additional mm developers
-12. incorporate any feedback from additional mm reviews
-13. propose location/file-naming in mm tree
-14. repeat 9-13 as necessary until akpm is happy and merges
-
-STATUS/OWNERSHIP
-
-1. DONE as part of "new" zcache; in staging/zcache for 3.9
-2. DONE as part of "new" zcache (cf zbud.[ch]); in staging/zcache for 3.9
- (this was the core of the zcache1 vs zcache2 flail)
-3. DONE as part of "new" zcache; in staging/zcache for 3.9
-4. DONE (w/caveats) as part of "new" zcache; per cleancache performance
- feedback see https://lkml.org/lkml/2011/8/17/351, in
- staging/zcache for 3.9; dependent on proposed mm patch, see
- https://lkml.org/lkml/2012/1/25/300
-5. DONE as part of "new" zcache; performance tuning only,
- in staging/zcache for 3.9; dependent on frontswap patch
- merged in 3.7 (33c2a174)
-6. DONE (w/caveats), prototyped as part of "new" zcache, had
- bad memory leak; reimplemented to use sjennings clever tricks
- and proposed mm patches with new version in staging/zcache
- for 3.9, see https://lkml.org/lkml/2013/2/6/437;
-7. PROTOTYPED as part of "new" zcache; in staging/zcache for 3.9;
- needs more review (plan to discuss at LSF/MM 2013)
-9. IN PROGRESS; owned by Konrad Wilk; Mel Gorman provided
- great feedback in August 2012 (unfortunately of "old"
- zcache)
-11. NOT DONE; owned by Konrad Wilk and Bob Liu
-12. TBD (depends on quantity of feedback)
-13. PROPOSED; one suggestion proposed by Dan; needs more ideas/feedback
-14. TBD (depends on feedback)
-
-WHO NEEDS TO AGREE
-
-Not sure. Seth Jennings is now pursuing a separate but semi-parallel
-track. Akpm clearly has to approve for any mm merge to happen. Minchan
-Kim has interest but may be happy if/when zram is merged into mm. Konrad
-Wilk may be maintainer if akpm decides compression is maintainable
-separately from the rest of mm. (More LSF/MM 2013 discussion.)
-
-ZCACHE FUTURE NEW FUNCTIONALITY
-
-A. Support zsmalloc as an alternative high-density allocator
- (See https://lkml.org/lkml/2013/1/23/511)
-B. Possibly support three zbuds per pageframe when space allows
diff --git a/drivers/staging/zcache/debug.c b/drivers/staging/zcache/debug.c
deleted file mode 100644
index daa26919b88..00000000000
--- a/drivers/staging/zcache/debug.c
+++ /dev/null
@@ -1,107 +0,0 @@
-#include <linux/atomic.h>
-#include "debug.h"
-
-#ifdef CONFIG_ZCACHE_DEBUG
-#include <linux/debugfs.h>
-
-ssize_t zcache_obj_count;
-ssize_t zcache_obj_count_max;
-ssize_t zcache_objnode_count;
-ssize_t zcache_objnode_count_max;
-u64 zcache_eph_zbytes;
-u64 zcache_eph_zbytes_max;
-u64 zcache_pers_zbytes_max;
-ssize_t zcache_eph_pageframes_max;
-ssize_t zcache_pers_pageframes_max;
-ssize_t zcache_pageframes_alloced;
-ssize_t zcache_pageframes_freed;
-ssize_t zcache_eph_zpages;
-ssize_t zcache_eph_zpages_max;
-ssize_t zcache_pers_zpages_max;
-ssize_t zcache_flush_total;
-ssize_t zcache_flush_found;
-ssize_t zcache_flobj_total;
-ssize_t zcache_flobj_found;
-ssize_t zcache_failed_eph_puts;
-ssize_t zcache_failed_pers_puts;
-ssize_t zcache_failed_getfreepages;
-ssize_t zcache_failed_alloc;
-ssize_t zcache_put_to_flush;
-ssize_t zcache_compress_poor;
-ssize_t zcache_mean_compress_poor;
-ssize_t zcache_eph_ate_tail;
-ssize_t zcache_eph_ate_tail_failed;
-ssize_t zcache_pers_ate_eph;
-ssize_t zcache_pers_ate_eph_failed;
-ssize_t zcache_evicted_eph_zpages;
-ssize_t zcache_evicted_eph_pageframes;
-ssize_t zcache_zero_filled_pages;
-ssize_t zcache_zero_filled_pages_max;
-
-#define ATTR(x) { .name = #x, .val = &zcache_##x, }
-static struct debug_entry {
- const char *name;
- ssize_t *val;
-} attrs[] = {
- ATTR(obj_count), ATTR(obj_count_max),
- ATTR(objnode_count), ATTR(objnode_count_max),
- ATTR(flush_total), ATTR(flush_found),
- ATTR(flobj_total), ATTR(flobj_found),
- ATTR(failed_eph_puts), ATTR(failed_pers_puts),
- ATTR(failed_getfreepages), ATTR(failed_alloc),
- ATTR(put_to_flush),
- ATTR(compress_poor), ATTR(mean_compress_poor),
- ATTR(eph_ate_tail), ATTR(eph_ate_tail_failed),
- ATTR(pers_ate_eph), ATTR(pers_ate_eph_failed),
- ATTR(evicted_eph_zpages), ATTR(evicted_eph_pageframes),
- ATTR(eph_pageframes), ATTR(eph_pageframes_max),
- ATTR(pers_pageframes), ATTR(pers_pageframes_max),
- ATTR(eph_zpages), ATTR(eph_zpages_max),
- ATTR(pers_zpages), ATTR(pers_zpages_max),
- ATTR(last_active_file_pageframes),
- ATTR(last_inactive_file_pageframes),
- ATTR(last_active_anon_pageframes),
- ATTR(last_inactive_anon_pageframes),
- ATTR(eph_nonactive_puts_ignored),
- ATTR(pers_nonactive_puts_ignored),
- ATTR(zero_filled_pages),
-#ifdef CONFIG_ZCACHE_WRITEBACK
- ATTR(outstanding_writeback_pages),
- ATTR(writtenback_pages),
-#endif
-};
-#undef ATTR
-int zcache_debugfs_init(void)
-{
- unsigned int i;
- struct dentry *root = debugfs_create_dir("zcache", NULL);
- if (root == NULL)
- return -ENXIO;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- if (!debugfs_create_size_t(attrs[i].name, S_IRUGO, root, attrs[i].val))
- goto out;
-
- debugfs_create_u64("eph_zbytes", S_IRUGO, root, &zcache_eph_zbytes);
- debugfs_create_u64("eph_zbytes_max", S_IRUGO, root, &zcache_eph_zbytes_max);
- debugfs_create_u64("pers_zbytes", S_IRUGO, root, &zcache_pers_zbytes);
- debugfs_create_u64("pers_zbytes_max", S_IRUGO, root, &zcache_pers_zbytes_max);
-
- return 0;
-out:
- return -ENODEV;
-}
-
-/* developers can call this in case of ooms, e.g. to find memory leaks */
-void zcache_dump(void)
-{
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- pr_debug("zcache: %s=%zu\n", attrs[i].name, *attrs[i].val);
-
- pr_debug("zcache: eph_zbytes=%llu\n", (unsigned long long)zcache_eph_zbytes);
- pr_debug("zcache: eph_zbytes_max=%llu\n", (unsigned long long)zcache_eph_zbytes_max);
- pr_debug("zcache: pers_zbytes=%llu\n", (unsigned long long)zcache_pers_zbytes);
- pr_debug("zcache: pers_zbytes_max=%llu\n", (unsigned long long)zcache_pers_zbytes_max);
-}
-#endif
diff --git a/drivers/staging/zcache/debug.h b/drivers/staging/zcache/debug.h
deleted file mode 100644
index 8088d28f2dc..00000000000
--- a/drivers/staging/zcache/debug.h
+++ /dev/null
@@ -1,305 +0,0 @@
-#include <linux/bug.h>
-
-#ifdef CONFIG_ZCACHE_DEBUG
-
-/* we try to keep these statistics SMP-consistent */
-extern ssize_t zcache_obj_count;
-static atomic_t zcache_obj_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_obj_count_max;
-static inline void inc_zcache_obj_count(void)
-{
- zcache_obj_count = atomic_inc_return(&zcache_obj_atomic);
- if (zcache_obj_count > zcache_obj_count_max)
- zcache_obj_count_max = zcache_obj_count;
-}
-static inline void dec_zcache_obj_count(void)
-{
- zcache_obj_count = atomic_dec_return(&zcache_obj_atomic);
- BUG_ON(zcache_obj_count < 0);
-};
-extern ssize_t zcache_objnode_count;
-static atomic_t zcache_objnode_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_objnode_count_max;
-static inline void inc_zcache_objnode_count(void)
-{
- zcache_objnode_count = atomic_inc_return(&zcache_objnode_atomic);
- if (zcache_objnode_count > zcache_objnode_count_max)
- zcache_objnode_count_max = zcache_objnode_count;
-};
-static inline void dec_zcache_objnode_count(void)
-{
- zcache_objnode_count = atomic_dec_return(&zcache_objnode_atomic);
- BUG_ON(zcache_objnode_count < 0);
-};
-extern u64 zcache_eph_zbytes;
-static atomic_long_t zcache_eph_zbytes_atomic = ATOMIC_INIT(0);
-extern u64 zcache_eph_zbytes_max;
-static inline void inc_zcache_eph_zbytes(unsigned clen)
-{
- zcache_eph_zbytes = atomic_long_add_return(clen, &zcache_eph_zbytes_atomic);
- if (zcache_eph_zbytes > zcache_eph_zbytes_max)
- zcache_eph_zbytes_max = zcache_eph_zbytes;
-};
-static inline void dec_zcache_eph_zbytes(unsigned zsize)
-{
- zcache_eph_zbytes = atomic_long_sub_return(zsize, &zcache_eph_zbytes_atomic);
-};
-extern u64 zcache_pers_zbytes;
-static atomic_long_t zcache_pers_zbytes_atomic = ATOMIC_INIT(0);
-extern u64 zcache_pers_zbytes_max;
-static inline void inc_zcache_pers_zbytes(unsigned clen)
-{
- zcache_pers_zbytes = atomic_long_add_return(clen, &zcache_pers_zbytes_atomic);
- if (zcache_pers_zbytes > zcache_pers_zbytes_max)
- zcache_pers_zbytes_max = zcache_pers_zbytes;
-}
-static inline void dec_zcache_pers_zbytes(unsigned zsize)
-{
- zcache_pers_zbytes = atomic_long_sub_return(zsize, &zcache_pers_zbytes_atomic);
-}
-extern ssize_t zcache_eph_pageframes;
-static atomic_t zcache_eph_pageframes_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_eph_pageframes_max;
-static inline void inc_zcache_eph_pageframes(void)
-{
- zcache_eph_pageframes = atomic_inc_return(&zcache_eph_pageframes_atomic);
- if (zcache_eph_pageframes > zcache_eph_pageframes_max)
- zcache_eph_pageframes_max = zcache_eph_pageframes;
-};
-static inline void dec_zcache_eph_pageframes(void)
-{
- zcache_eph_pageframes = atomic_dec_return(&zcache_eph_pageframes_atomic);
-};
-extern ssize_t zcache_pers_pageframes;
-static atomic_t zcache_pers_pageframes_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_pers_pageframes_max;
-static inline void inc_zcache_pers_pageframes(void)
-{
- zcache_pers_pageframes = atomic_inc_return(&zcache_pers_pageframes_atomic);
- if (zcache_pers_pageframes > zcache_pers_pageframes_max)
- zcache_pers_pageframes_max = zcache_pers_pageframes;
-}
-static inline void dec_zcache_pers_pageframes(void)
-{
- zcache_pers_pageframes = atomic_dec_return(&zcache_pers_pageframes_atomic);
-}
-extern ssize_t zcache_pageframes_alloced;
-static atomic_t zcache_pageframes_alloced_atomic = ATOMIC_INIT(0);
-static inline void inc_zcache_pageframes_alloced(void)
-{
- zcache_pageframes_alloced = atomic_inc_return(&zcache_pageframes_alloced_atomic);
-};
-extern ssize_t zcache_pageframes_freed;
-static atomic_t zcache_pageframes_freed_atomic = ATOMIC_INIT(0);
-static inline void inc_zcache_pageframes_freed(void)
-{
- zcache_pageframes_freed = atomic_inc_return(&zcache_pageframes_freed_atomic);
-}
-extern ssize_t zcache_eph_zpages;
-static atomic_t zcache_eph_zpages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_eph_zpages_max;
-static inline void inc_zcache_eph_zpages(void)
-{
- zcache_eph_zpages = atomic_inc_return(&zcache_eph_zpages_atomic);
- if (zcache_eph_zpages > zcache_eph_zpages_max)
- zcache_eph_zpages_max = zcache_eph_zpages;
-}
-static inline void dec_zcache_eph_zpages(unsigned zpages)
-{
- zcache_eph_zpages = atomic_sub_return(zpages, &zcache_eph_zpages_atomic);
-}
-extern ssize_t zcache_pers_zpages;
-static atomic_t zcache_pers_zpages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_pers_zpages_max;
-static inline void inc_zcache_pers_zpages(void)
-{
- zcache_pers_zpages = atomic_inc_return(&zcache_pers_zpages_atomic);
- if (zcache_pers_zpages > zcache_pers_zpages_max)
- zcache_pers_zpages_max = zcache_pers_zpages;
-}
-static inline void dec_zcache_pers_zpages(unsigned zpages)
-{
- zcache_pers_zpages = atomic_sub_return(zpages, &zcache_pers_zpages_atomic);
-}
-
-extern ssize_t zcache_zero_filled_pages;
-static atomic_t zcache_zero_filled_pages_atomic = ATOMIC_INIT(0);
-extern ssize_t zcache_zero_filled_pages_max;
-static inline void inc_zcache_zero_filled_pages(void)
-{
- zcache_zero_filled_pages = atomic_inc_return(
- &zcache_zero_filled_pages_atomic);
- if (zcache_zero_filled_pages > zcache_zero_filled_pages_max)
- zcache_zero_filled_pages_max = zcache_zero_filled_pages;
-}
-static inline void dec_zcache_zero_filled_pages(void)
-{
- zcache_zero_filled_pages = atomic_dec_return(
- &zcache_zero_filled_pages_atomic);
-}
-static inline unsigned long curr_pageframes_count(void)
-{
- return zcache_pageframes_alloced -
- atomic_read(&zcache_pageframes_freed_atomic) -
- atomic_read(&zcache_eph_pageframes_atomic) -
- atomic_read(&zcache_pers_pageframes_atomic);
-};
-/* but for the rest of these, counting races are ok */
-extern ssize_t zcache_flush_total;
-extern ssize_t zcache_flush_found;
-extern ssize_t zcache_flobj_total;
-extern ssize_t zcache_flobj_found;
-extern ssize_t zcache_failed_eph_puts;
-extern ssize_t zcache_failed_pers_puts;
-extern ssize_t zcache_failed_getfreepages;
-extern ssize_t zcache_failed_alloc;
-extern ssize_t zcache_put_to_flush;
-extern ssize_t zcache_compress_poor;
-extern ssize_t zcache_mean_compress_poor;
-extern ssize_t zcache_eph_ate_tail;
-extern ssize_t zcache_eph_ate_tail_failed;
-extern ssize_t zcache_pers_ate_eph;
-extern ssize_t zcache_pers_ate_eph_failed;
-extern ssize_t zcache_evicted_eph_zpages;
-extern ssize_t zcache_evicted_eph_pageframes;
-
-extern ssize_t zcache_last_active_file_pageframes;
-extern ssize_t zcache_last_inactive_file_pageframes;
-extern ssize_t zcache_last_active_anon_pageframes;
-extern ssize_t zcache_last_inactive_anon_pageframes;
-static ssize_t zcache_eph_nonactive_puts_ignored;
-static ssize_t zcache_pers_nonactive_puts_ignored;
-#ifdef CONFIG_ZCACHE_WRITEBACK
-extern ssize_t zcache_writtenback_pages;
-extern ssize_t zcache_outstanding_writeback_pages;
-#endif
-
-static inline void inc_zcache_flush_total(void)
-{
- zcache_flush_total++;
-};
-static inline void inc_zcache_flush_found(void)
-{
- zcache_flush_found++;
-};
-static inline void inc_zcache_flobj_total(void)
-{
- zcache_flobj_total++;
-};
-static inline void inc_zcache_flobj_found(void)
-{
- zcache_flobj_found++;
-};
-static inline void inc_zcache_failed_eph_puts(void)
-{
- zcache_failed_eph_puts++;
-};
-static inline void inc_zcache_failed_pers_puts(void)
-{
- zcache_failed_pers_puts++;
-};
-static inline void inc_zcache_failed_getfreepages(void)
-{
- zcache_failed_getfreepages++;
-};
-static inline void inc_zcache_failed_alloc(void)
-{
- zcache_failed_alloc++;
-};
-static inline void inc_zcache_put_to_flush(void)
-{
- zcache_put_to_flush++;
-};
-static inline void inc_zcache_compress_poor(void)
-{
- zcache_compress_poor++;
-};
-static inline void inc_zcache_mean_compress_poor(void)
-{
- zcache_mean_compress_poor++;
-};
-static inline void inc_zcache_eph_ate_tail(void)
-{
- zcache_eph_ate_tail++;
-};
-static inline void inc_zcache_eph_ate_tail_failed(void)
-{
- zcache_eph_ate_tail_failed++;
-};
-static inline void inc_zcache_pers_ate_eph(void)
-{
- zcache_pers_ate_eph++;
-};
-static inline void inc_zcache_pers_ate_eph_failed(void)
-{
- zcache_pers_ate_eph_failed++;
-};
-static inline void inc_zcache_evicted_eph_zpages(unsigned zpages)
-{
- zcache_evicted_eph_zpages += zpages;
-};
-static inline void inc_zcache_evicted_eph_pageframes(void)
-{
- zcache_evicted_eph_pageframes++;
-};
-
-static inline void inc_zcache_eph_nonactive_puts_ignored(void)
-{
- zcache_eph_nonactive_puts_ignored++;
-};
-static inline void inc_zcache_pers_nonactive_puts_ignored(void)
-{
- zcache_pers_nonactive_puts_ignored++;
-};
-
-int zcache_debugfs_init(void);
-#else
-static inline void inc_zcache_obj_count(void) { };
-static inline void dec_zcache_obj_count(void) { };
-static inline void inc_zcache_objnode_count(void) { };
-static inline void dec_zcache_objnode_count(void) { };
-static inline void inc_zcache_eph_zbytes(unsigned clen) { };
-static inline void dec_zcache_eph_zbytes(unsigned zsize) { };
-static inline void inc_zcache_pers_zbytes(unsigned clen) { };
-static inline void dec_zcache_pers_zbytes(unsigned zsize) { };
-static inline void inc_zcache_eph_pageframes(void) { };
-static inline void dec_zcache_eph_pageframes(void) { };
-static inline void inc_zcache_pers_pageframes(void) { };
-static inline void dec_zcache_pers_pageframes(void) { };
-static inline void inc_zcache_pageframes_alloced(void) { };
-static inline void inc_zcache_pageframes_freed(void) { };
-static inline void inc_zcache_eph_zpages(void) { };
-static inline void dec_zcache_eph_zpages(unsigned zpages) { };
-static inline void inc_zcache_pers_zpages(void) { };
-static inline void dec_zcache_pers_zpages(unsigned zpages) { };
-static inline void inc_zcache_zero_filled_pages(void) { };
-static inline void dec_zcache_zero_filled_pages(void) { };
-static inline unsigned long curr_pageframes_count(void)
-{
- return 0;
-};
-static inline int zcache_debugfs_init(void)
-{
- return 0;
-};
-static inline void inc_zcache_flush_total(void) { };
-static inline void inc_zcache_flush_found(void) { };
-static inline void inc_zcache_flobj_total(void) { };
-static inline void inc_zcache_flobj_found(void) { };
-static inline void inc_zcache_failed_eph_puts(void) { };
-static inline void inc_zcache_failed_pers_puts(void) { };
-static inline void inc_zcache_failed_getfreepages(void) { };
-static inline void inc_zcache_failed_alloc(void) { };
-static inline void inc_zcache_put_to_flush(void) { };
-static inline void inc_zcache_compress_poor(void) { };
-static inline void inc_zcache_mean_compress_poor(void) { };
-static inline void inc_zcache_eph_ate_tail(void) { };
-static inline void inc_zcache_eph_ate_tail_failed(void) { };
-static inline void inc_zcache_pers_ate_eph(void) { };
-static inline void inc_zcache_pers_ate_eph_failed(void) { };
-static inline void inc_zcache_evicted_eph_zpages(unsigned zpages) { };
-static inline void inc_zcache_evicted_eph_pageframes(void) { };
-
-static inline void inc_zcache_eph_nonactive_puts_ignored(void) { };
-static inline void inc_zcache_pers_nonactive_puts_ignored(void) { };
-#endif
diff --git a/drivers/staging/zcache/ramster.h b/drivers/staging/zcache/ramster.h
deleted file mode 100644
index a858666eae6..00000000000
--- a/drivers/staging/zcache/ramster.h
+++ /dev/null
@@ -1,59 +0,0 @@
-
-/*
- * zcache/ramster.h
- *
- * Placeholder to resolve ramster references when !CONFIG_RAMSTER
- * Real ramster.h lives in ramster subdirectory.
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _ZCACHE_RAMSTER_H_
-#define _ZCACHE_RAMSTER_H_
-
-#ifdef CONFIG_RAMSTER
-#include "ramster/ramster.h"
-#else
-static inline void ramster_init(bool x, bool y, bool z, bool w)
-{
-}
-
-static inline void ramster_register_pamops(struct tmem_pamops *p)
-{
-}
-
-static inline int ramster_remotify_pageframe(bool b)
-{
- return 0;
-}
-
-static inline void *ramster_pampd_free(void *v, struct tmem_pool *p,
- struct tmem_oid *o, uint32_t u, bool b)
-{
- return NULL;
-}
-
-static inline int ramster_do_preload_flnode(struct tmem_pool *p)
-{
- return -1;
-}
-
-static inline bool pampd_is_remote(void *v)
-{
- return false;
-}
-
-static inline void ramster_count_foreign_pages(bool b, int i)
-{
-}
-
-static inline void ramster_cpu_up(int cpu)
-{
-}
-
-static inline void ramster_cpu_down(int cpu)
-{
-}
-#endif
-
-#endif /* _ZCACHE_RAMSTER_H */
diff --git a/drivers/staging/zcache/ramster/debug.c b/drivers/staging/zcache/ramster/debug.c
deleted file mode 100644
index 5b26ee977c2..00000000000
--- a/drivers/staging/zcache/ramster/debug.c
+++ /dev/null
@@ -1,68 +0,0 @@
-#include <linux/atomic.h>
-#include "debug.h"
-
-ssize_t ramster_foreign_eph_pages;
-ssize_t ramster_foreign_pers_pages;
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-
-ssize_t ramster_eph_pages_remoted;
-ssize_t ramster_pers_pages_remoted;
-ssize_t ramster_eph_pages_remote_failed;
-ssize_t ramster_pers_pages_remote_failed;
-ssize_t ramster_remote_eph_pages_succ_get;
-ssize_t ramster_remote_pers_pages_succ_get;
-ssize_t ramster_remote_eph_pages_unsucc_get;
-ssize_t ramster_remote_pers_pages_unsucc_get;
-ssize_t ramster_pers_pages_remote_nomem;
-ssize_t ramster_remote_objects_flushed;
-ssize_t ramster_remote_object_flushes_failed;
-ssize_t ramster_remote_pages_flushed;
-ssize_t ramster_remote_page_flushes_failed;
-
-#define ATTR(x) { .name = #x, .val = &ramster_##x, }
-static struct debug_entry {
- const char *name;
- ssize_t *val;
-} attrs[] = {
- ATTR(eph_pages_remoted),
- ATTR(pers_pages_remoted),
- ATTR(eph_pages_remote_failed),
- ATTR(pers_pages_remote_failed),
- ATTR(remote_eph_pages_succ_get),
- ATTR(remote_pers_pages_succ_get),
- ATTR(remote_eph_pages_unsucc_get),
- ATTR(remote_pers_pages_unsucc_get),
- ATTR(pers_pages_remote_nomem),
- ATTR(remote_objects_flushed),
- ATTR(remote_pages_flushed),
- ATTR(remote_object_flushes_failed),
- ATTR(remote_page_flushes_failed),
- ATTR(foreign_eph_pages),
- ATTR(foreign_eph_pages_max),
- ATTR(foreign_pers_pages),
- ATTR(foreign_pers_pages_max),
-};
-#undef ATTR
-
-int ramster_debugfs_init(void)
-{
- int i;
- struct dentry *root = debugfs_create_dir("ramster", NULL);
- if (root == NULL)
- return -ENXIO;
-
- for (i = 0; i < ARRAY_SIZE(attrs); i++)
- if (!debugfs_create_size_t(attrs[i].name,
- S_IRUGO, root, attrs[i].val))
- goto out;
- return 0;
-out:
- return -ENODEV;
-}
-#else
-static inline int ramster_debugfs_init(void)
-{
- return 0;
-}
-#endif
diff --git a/drivers/staging/zcache/ramster/debug.h b/drivers/staging/zcache/ramster/debug.h
deleted file mode 100644
index 5ffab50807d..00000000000
--- a/drivers/staging/zcache/ramster/debug.h
+++ /dev/null
@@ -1,145 +0,0 @@
-#include <linux/bug.h>
-
-#ifdef CONFIG_RAMSTER_DEBUG
-
-extern long ramster_flnodes;
-static atomic_t ramster_flnodes_atomic = ATOMIC_INIT(0);
-static unsigned long ramster_flnodes_max;
-static inline void inc_ramster_flnodes(void)
-{
- ramster_flnodes = atomic_inc_return(&ramster_flnodes_atomic);
- if (ramster_flnodes > ramster_flnodes_max)
- ramster_flnodes_max = ramster_flnodes;
-}
-static inline void dec_ramster_flnodes(void)
-{
- ramster_flnodes = atomic_dec_return(&ramster_flnodes_atomic);
-}
-extern ssize_t ramster_foreign_eph_pages;
-static atomic_t ramster_foreign_eph_pages_atomic = ATOMIC_INIT(0);
-static ssize_t ramster_foreign_eph_pages_max;
-static inline void inc_ramster_foreign_eph_pages(void)
-{
- ramster_foreign_eph_pages = atomic_inc_return(
- &ramster_foreign_eph_pages_atomic);
- if (ramster_foreign_eph_pages > ramster_foreign_eph_pages_max)
- ramster_foreign_eph_pages_max = ramster_foreign_eph_pages;
-}
-static inline void dec_ramster_foreign_eph_pages(void)
-{
- ramster_foreign_eph_pages = atomic_dec_return(
- &ramster_foreign_eph_pages_atomic);
-}
-extern ssize_t ramster_foreign_pers_pages;
-static atomic_t ramster_foreign_pers_pages_atomic = ATOMIC_INIT(0);
-static ssize_t ramster_foreign_pers_pages_max;
-static inline void inc_ramster_foreign_pers_pages(void)
-{
- ramster_foreign_pers_pages = atomic_inc_return(
- &ramster_foreign_pers_pages_atomic);
- if (ramster_foreign_pers_pages > ramster_foreign_pers_pages_max)
- ramster_foreign_pers_pages_max = ramster_foreign_pers_pages;
-}
-static inline void dec_ramster_foreign_pers_pages(void)
-{
- ramster_foreign_pers_pages = atomic_dec_return(
- &ramster_foreign_pers_pages_atomic);
-}
-
-extern ssize_t ramster_eph_pages_remoted;
-extern ssize_t ramster_pers_pages_remoted;
-extern ssize_t ramster_eph_pages_remote_failed;
-extern ssize_t ramster_pers_pages_remote_failed;
-extern ssize_t ramster_remote_eph_pages_succ_get;
-extern ssize_t ramster_remote_pers_pages_succ_get;
-extern ssize_t ramster_remote_eph_pages_unsucc_get;
-extern ssize_t ramster_remote_pers_pages_unsucc_get;
-extern ssize_t ramster_pers_pages_remote_nomem;
-extern ssize_t ramster_remote_objects_flushed;
-extern ssize_t ramster_remote_object_flushes_failed;
-extern ssize_t ramster_remote_pages_flushed;
-extern ssize_t ramster_remote_page_flushes_failed;
-
-int ramster_debugfs_init(void);
-
-static inline void inc_ramster_eph_pages_remoted(void)
-{
- ramster_eph_pages_remoted++;
-};
-static inline void inc_ramster_pers_pages_remoted(void)
-{
- ramster_pers_pages_remoted++;
-};
-static inline void inc_ramster_eph_pages_remote_failed(void)
-{
- ramster_eph_pages_remote_failed++;
-};
-static inline void inc_ramster_pers_pages_remote_failed(void)
-{
- ramster_pers_pages_remote_failed++;
-};
-static inline void inc_ramster_remote_eph_pages_succ_get(void)
-{
- ramster_remote_eph_pages_succ_get++;
-};
-static inline void inc_ramster_remote_pers_pages_succ_get(void)
-{
- ramster_remote_pers_pages_succ_get++;
-};
-static inline void inc_ramster_remote_eph_pages_unsucc_get(void)
-{
- ramster_remote_eph_pages_unsucc_get++;
-};
-static inline void inc_ramster_remote_pers_pages_unsucc_get(void)
-{
- ramster_remote_pers_pages_unsucc_get++;
-};
-static inline void inc_ramster_pers_pages_remote_nomem(void)
-{
- ramster_pers_pages_remote_nomem++;
-};
-static inline void inc_ramster_remote_objects_flushed(void)
-{
- ramster_remote_objects_flushed++;
-};
-static inline void inc_ramster_remote_object_flushes_failed(void)
-{
- ramster_remote_object_flushes_failed++;
-};
-static inline void inc_ramster_remote_pages_flushed(void)
-{
- ramster_remote_pages_flushed++;
-};
-static inline void inc_ramster_remote_page_flushes_failed(void)
-{
- ramster_remote_page_flushes_failed++;
-};
-
-#else
-
-static inline void inc_ramster_flnodes(void) { };
-static inline void dec_ramster_flnodes(void) { };
-static inline void inc_ramster_foreign_eph_pages(void) { };
-static inline void dec_ramster_foreign_eph_pages(void) { };
-static inline void inc_ramster_foreign_pers_pages(void) { };
-static inline void dec_ramster_foreign_pers_pages(void) { };
-
-static inline void inc_ramster_eph_pages_remoted(void) { };
-static inline void inc_ramster_pers_pages_remoted(void) { };
-static inline void inc_ramster_eph_pages_remote_failed(void) { };
-static inline void inc_ramster_pers_pages_remote_failed(void) { };
-static inline void inc_ramster_remote_eph_pages_succ_get(void) { };
-static inline void inc_ramster_remote_pers_pages_succ_get(void) { };
-static inline void inc_ramster_remote_eph_pages_unsucc_get(void) { };
-static inline void inc_ramster_remote_pers_pages_unsucc_get(void) { };
-static inline void inc_ramster_pers_pages_remote_nomem(void) { };
-static inline void inc_ramster_remote_objects_flushed(void) { };
-static inline void inc_ramster_remote_object_flushes_failed(void) { };
-static inline void inc_ramster_remote_pages_flushed(void) { };
-static inline void inc_ramster_remote_page_flushes_failed(void) { };
-
-static inline int ramster_debugfs_init(void)
-{
- return 0;
-}
-#endif
diff --git a/drivers/staging/zcache/ramster/heartbeat.c b/drivers/staging/zcache/ramster/heartbeat.c
deleted file mode 100644
index 75d3fe80b05..00000000000
--- a/drivers/staging/zcache/ramster/heartbeat.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/configfs.h>
-
-#include "heartbeat.h"
-#include "tcp.h"
-#include "nodemanager.h"
-
-#include "masklog.h"
-
-/*
- * The first heartbeat pass had one global thread that would serialize all hb
- * callback calls. This global serializing sem should only be removed once
- * we've made sure that all callees can deal with being called concurrently
- * from multiple hb region threads.
- */
-static DECLARE_RWSEM(r2hb_callback_sem);
-
-/*
- * multiple hb threads are watching multiple regions. A node is live
- * whenever any of the threads sees activity from the node in its region.
- */
-static DEFINE_SPINLOCK(r2hb_live_lock);
-static unsigned long r2hb_live_node_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
-
-static struct r2hb_callback {
- struct list_head list;
-} r2hb_callbacks[R2HB_NUM_CB];
-
-enum r2hb_heartbeat_modes {
- R2HB_HEARTBEAT_LOCAL = 0,
- R2HB_HEARTBEAT_GLOBAL,
- R2HB_HEARTBEAT_NUM_MODES,
-};
-
-char *r2hb_heartbeat_mode_desc[R2HB_HEARTBEAT_NUM_MODES] = {
- "local", /* R2HB_HEARTBEAT_LOCAL */
- "global", /* R2HB_HEARTBEAT_GLOBAL */
-};
-
-unsigned int r2hb_dead_threshold = R2HB_DEFAULT_DEAD_THRESHOLD;
-unsigned int r2hb_heartbeat_mode = R2HB_HEARTBEAT_LOCAL;
-
-/* Only sets a new threshold if there are no active regions.
- *
- * No locking or otherwise interesting code is required for reading
- * r2hb_dead_threshold as it can't change once regions are active and
- * it's not interesting to anyone until then anyway. */
-static void r2hb_dead_threshold_set(unsigned int threshold)
-{
- if (threshold > R2HB_MIN_DEAD_THRESHOLD) {
- spin_lock(&r2hb_live_lock);
- r2hb_dead_threshold = threshold;
- spin_unlock(&r2hb_live_lock);
- }
-}
-
-static int r2hb_global_hearbeat_mode_set(unsigned int hb_mode)
-{
- int ret = -1;
-
- if (hb_mode < R2HB_HEARTBEAT_NUM_MODES) {
- spin_lock(&r2hb_live_lock);
- r2hb_heartbeat_mode = hb_mode;
- ret = 0;
- spin_unlock(&r2hb_live_lock);
- }
-
- return ret;
-}
-
-void r2hb_exit(void)
-{
-}
-
-int r2hb_init(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(r2hb_callbacks); i++)
- INIT_LIST_HEAD(&r2hb_callbacks[i].list);
-
- memset(r2hb_live_node_bitmap, 0, sizeof(r2hb_live_node_bitmap));
-
- return 0;
-}
-
-/* if we're already in a callback then we're already serialized by the sem */
-static void r2hb_fill_node_map_from_callback(unsigned long *map,
- unsigned bytes)
-{
- BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
-
- memcpy(map, &r2hb_live_node_bitmap, bytes);
-}
-
-/*
- * get a map of all nodes that are heartbeating in any regions
- */
-void r2hb_fill_node_map(unsigned long *map, unsigned bytes)
-{
- /* callers want to serialize this map and callbacks so that they
- * can trust that they don't miss nodes coming to the party */
- down_read(&r2hb_callback_sem);
- spin_lock(&r2hb_live_lock);
- r2hb_fill_node_map_from_callback(map, bytes);
- spin_unlock(&r2hb_live_lock);
- up_read(&r2hb_callback_sem);
-}
-EXPORT_SYMBOL_GPL(r2hb_fill_node_map);
-
-/*
- * heartbeat configfs bits. The heartbeat set is a default set under
- * the cluster set in nodemanager.c.
- */
-
-/* heartbeat set */
-
-struct r2hb_hb_group {
- struct config_group hs_group;
- /* some stuff? */
-};
-
-static struct r2hb_hb_group *to_r2hb_hb_group(struct config_group *group)
-{
- return group ?
- container_of(group, struct r2hb_hb_group, hs_group)
- : NULL;
-}
-
-static struct config_item r2hb_config_item;
-
-static struct config_item *r2hb_hb_group_make_item(struct config_group *group,
- const char *name)
-{
- int ret;
-
- if (strlen(name) > R2HB_MAX_REGION_NAME_LEN) {
- ret = -ENAMETOOLONG;
- goto free;
- }
-
- config_item_put(&r2hb_config_item);
-
- return &r2hb_config_item;
-free:
- return ERR_PTR(ret);
-}
-
-static void r2hb_hb_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- if (r2hb_global_heartbeat_active()) {
- pr_notice("ramster: Heartbeat %s on region %s (%s)\n",
- "stopped/aborted", config_item_name(item),
- "no region");
- }
-
- config_item_put(item);
-}
-
-struct r2hb_hb_group_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2hb_hb_group *, char *);
- ssize_t (*store)(struct r2hb_hb_group *, const char *, size_t);
-};
-
-static ssize_t r2hb_hb_group_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
- struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
- container_of(attr, struct r2hb_hb_group_attribute, attr);
- ssize_t ret = 0;
-
- if (r2hb_hb_group_attr->show)
- ret = r2hb_hb_group_attr->show(reg, page);
- return ret;
-}
-
-static ssize_t r2hb_hb_group_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2hb_hb_group *reg = to_r2hb_hb_group(to_config_group(item));
- struct r2hb_hb_group_attribute *r2hb_hb_group_attr =
- container_of(attr, struct r2hb_hb_group_attribute, attr);
- ssize_t ret = -EINVAL;
-
- if (r2hb_hb_group_attr->store)
- ret = r2hb_hb_group_attr->store(reg, page, count);
- return ret;
-}
-
-static ssize_t r2hb_hb_group_threshold_show(struct r2hb_hb_group *group,
- char *page)
-{
- return sprintf(page, "%u\n", r2hb_dead_threshold);
-}
-
-static ssize_t r2hb_hb_group_threshold_store(struct r2hb_hb_group *group,
- const char *page,
- size_t count)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- /* this will validate ranges for us. */
- r2hb_dead_threshold_set((unsigned int) tmp);
-
- return count;
-}
-
-static
-ssize_t r2hb_hb_group_mode_show(struct r2hb_hb_group *group,
- char *page)
-{
- return sprintf(page, "%s\n",
- r2hb_heartbeat_mode_desc[r2hb_heartbeat_mode]);
-}
-
-static
-ssize_t r2hb_hb_group_mode_store(struct r2hb_hb_group *group,
- const char *page, size_t count)
-{
- unsigned int i;
- int ret;
- size_t len;
-
- len = (page[count - 1] == '\n') ? count - 1 : count;
- if (!len)
- return -EINVAL;
-
- for (i = 0; i < R2HB_HEARTBEAT_NUM_MODES; ++i) {
- if (strnicmp(page, r2hb_heartbeat_mode_desc[i], len))
- continue;
-
- ret = r2hb_global_hearbeat_mode_set(i);
- if (!ret)
- pr_notice("ramster: Heartbeat mode set to %s\n",
- r2hb_heartbeat_mode_desc[i]);
- return count;
- }
-
- return -EINVAL;
-
-}
-
-static struct r2hb_hb_group_attribute r2hb_hb_group_attr_threshold = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "dead_threshold",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2hb_hb_group_threshold_show,
- .store = r2hb_hb_group_threshold_store,
-};
-
-static struct r2hb_hb_group_attribute r2hb_hb_group_attr_mode = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "mode",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2hb_hb_group_mode_show,
- .store = r2hb_hb_group_mode_store,
-};
-
-static struct configfs_attribute *r2hb_hb_group_attrs[] = {
- &r2hb_hb_group_attr_threshold.attr,
- &r2hb_hb_group_attr_mode.attr,
- NULL,
-};
-
-static struct configfs_item_operations r2hb_hearbeat_group_item_ops = {
- .show_attribute = r2hb_hb_group_show,
- .store_attribute = r2hb_hb_group_store,
-};
-
-static struct configfs_group_operations r2hb_hb_group_group_ops = {
- .make_item = r2hb_hb_group_make_item,
- .drop_item = r2hb_hb_group_drop_item,
-};
-
-static struct config_item_type r2hb_hb_group_type = {
- .ct_group_ops = &r2hb_hb_group_group_ops,
- .ct_item_ops = &r2hb_hearbeat_group_item_ops,
- .ct_attrs = r2hb_hb_group_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* this is just here to avoid touching group in heartbeat.h which the
- * entire damn world #includes */
-struct config_group *r2hb_alloc_hb_set(void)
-{
- struct r2hb_hb_group *hs = NULL;
- struct config_group *ret = NULL;
-
- hs = kzalloc(sizeof(struct r2hb_hb_group), GFP_KERNEL);
- if (hs == NULL)
- goto out;
-
- config_group_init_type_name(&hs->hs_group, "heartbeat",
- &r2hb_hb_group_type);
-
- ret = &hs->hs_group;
-out:
- if (ret == NULL)
- kfree(hs);
- return ret;
-}
-
-void r2hb_free_hb_set(struct config_group *group)
-{
- struct r2hb_hb_group *hs = to_r2hb_hb_group(group);
- kfree(hs);
-}
-
-/* hb callback registration and issuing */
-
-static struct r2hb_callback *hbcall_from_type(enum r2hb_callback_type type)
-{
- if (type == R2HB_NUM_CB)
- return ERR_PTR(-EINVAL);
-
- return &r2hb_callbacks[type];
-}
-
-void r2hb_setup_callback(struct r2hb_callback_func *hc,
- enum r2hb_callback_type type,
- r2hb_cb_func *func,
- void *data,
- int priority)
-{
- INIT_LIST_HEAD(&hc->hc_item);
- hc->hc_func = func;
- hc->hc_data = data;
- hc->hc_priority = priority;
- hc->hc_type = type;
- hc->hc_magic = R2HB_CB_MAGIC;
-}
-EXPORT_SYMBOL_GPL(r2hb_setup_callback);
-
-int r2hb_register_callback(const char *region_uuid,
- struct r2hb_callback_func *hc)
-{
- struct r2hb_callback_func *tmp;
- struct list_head *iter;
- struct r2hb_callback *hbcall;
- int ret;
-
- BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
- BUG_ON(!list_empty(&hc->hc_item));
-
- hbcall = hbcall_from_type(hc->hc_type);
- if (IS_ERR(hbcall)) {
- ret = PTR_ERR(hbcall);
- goto out;
- }
-
- down_write(&r2hb_callback_sem);
-
- list_for_each(iter, &hbcall->list) {
- tmp = list_entry(iter, struct r2hb_callback_func, hc_item);
- if (hc->hc_priority < tmp->hc_priority) {
- list_add_tail(&hc->hc_item, iter);
- break;
- }
- }
- if (list_empty(&hc->hc_item))
- list_add_tail(&hc->hc_item, &hbcall->list);
-
- up_write(&r2hb_callback_sem);
- ret = 0;
-out:
- mlog(ML_CLUSTER, "returning %d on behalf of %p for funcs %p\n",
- ret, __builtin_return_address(0), hc);
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2hb_register_callback);
-
-void r2hb_unregister_callback(const char *region_uuid,
- struct r2hb_callback_func *hc)
-{
- BUG_ON(hc->hc_magic != R2HB_CB_MAGIC);
-
- mlog(ML_CLUSTER, "on behalf of %p for funcs %p\n",
- __builtin_return_address(0), hc);
-
- /* XXX Can this happen _with_ a region reference? */
- if (list_empty(&hc->hc_item))
- return;
-
- down_write(&r2hb_callback_sem);
-
- list_del_init(&hc->hc_item);
-
- up_write(&r2hb_callback_sem);
-}
-EXPORT_SYMBOL_GPL(r2hb_unregister_callback);
-
-int r2hb_check_node_heartbeating_from_callback(u8 node_num)
-{
- unsigned long testing_map[BITS_TO_LONGS(R2NM_MAX_NODES)];
-
- r2hb_fill_node_map_from_callback(testing_map, sizeof(testing_map));
- if (!test_bit(node_num, testing_map)) {
- mlog(ML_HEARTBEAT,
- "node (%u) does not have heartbeating enabled.\n",
- node_num);
- return 0;
- }
-
- return 1;
-}
-EXPORT_SYMBOL_GPL(r2hb_check_node_heartbeating_from_callback);
-
-void r2hb_stop_all_regions(void)
-{
-}
-EXPORT_SYMBOL_GPL(r2hb_stop_all_regions);
-
-/*
- * this is just a hack until we get the plumbing which flips file systems
- * read only and drops the hb ref instead of killing the node dead.
- */
-int r2hb_global_heartbeat_active(void)
-{
- return (r2hb_heartbeat_mode == R2HB_HEARTBEAT_GLOBAL);
-}
-EXPORT_SYMBOL(r2hb_global_heartbeat_active);
-
-/* added for RAMster */
-void r2hb_manual_set_node_heartbeating(int node_num)
-{
- if (node_num < R2NM_MAX_NODES)
- set_bit(node_num, r2hb_live_node_bitmap);
-}
-EXPORT_SYMBOL(r2hb_manual_set_node_heartbeating);
diff --git a/drivers/staging/zcache/ramster/heartbeat.h b/drivers/staging/zcache/ramster/heartbeat.h
deleted file mode 100644
index 6cbc775bd63..00000000000
--- a/drivers/staging/zcache/ramster/heartbeat.h
+++ /dev/null
@@ -1,87 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * heartbeat.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_HEARTBEAT_H
-#define R2CLUSTER_HEARTBEAT_H
-
-#define R2HB_REGION_TIMEOUT_MS 2000
-
-#define R2HB_MAX_REGION_NAME_LEN 32
-
-/* number of changes to be seen as live */
-#define R2HB_LIVE_THRESHOLD 2
-/* number of equal samples to be seen as dead */
-extern unsigned int r2hb_dead_threshold;
-#define R2HB_DEFAULT_DEAD_THRESHOLD 31
-/* Otherwise MAX_WRITE_TIMEOUT will be zero... */
-#define R2HB_MIN_DEAD_THRESHOLD 2
-#define R2HB_MAX_WRITE_TIMEOUT_MS \
- (R2HB_REGION_TIMEOUT_MS * (r2hb_dead_threshold - 1))
-
-#define R2HB_CB_MAGIC 0x51d1e4ec
-
-/* callback stuff */
-enum r2hb_callback_type {
- R2HB_NODE_DOWN_CB = 0,
- R2HB_NODE_UP_CB,
- R2HB_NUM_CB
-};
-
-struct r2nm_node;
-typedef void (r2hb_cb_func)(struct r2nm_node *, int, void *);
-
-struct r2hb_callback_func {
- u32 hc_magic;
- struct list_head hc_item;
- r2hb_cb_func *hc_func;
- void *hc_data;
- int hc_priority;
- enum r2hb_callback_type hc_type;
-};
-
-struct config_group *r2hb_alloc_hb_set(void);
-void r2hb_free_hb_set(struct config_group *group);
-
-void r2hb_setup_callback(struct r2hb_callback_func *hc,
- enum r2hb_callback_type type,
- r2hb_cb_func *func,
- void *data,
- int priority);
-int r2hb_register_callback(const char *region_uuid,
- struct r2hb_callback_func *hc);
-void r2hb_unregister_callback(const char *region_uuid,
- struct r2hb_callback_func *hc);
-void r2hb_fill_node_map(unsigned long *map,
- unsigned bytes);
-void r2hb_exit(void);
-int r2hb_init(void);
-int r2hb_check_node_heartbeating_from_callback(u8 node_num);
-void r2hb_stop_all_regions(void);
-int r2hb_get_all_regions(char *region_uuids, u8 numregions);
-int r2hb_global_heartbeat_active(void);
-void r2hb_manual_set_node_heartbeating(int);
-
-#endif /* R2CLUSTER_HEARTBEAT_H */
diff --git a/drivers/staging/zcache/ramster/masklog.c b/drivers/staging/zcache/ramster/masklog.c
deleted file mode 100644
index 1261d8579aa..00000000000
--- a/drivers/staging/zcache/ramster/masklog.c
+++ /dev/null
@@ -1,155 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/string.h>
-#include <linux/uaccess.h>
-
-#include "masklog.h"
-
-struct mlog_bits r2_mlog_and_bits = MLOG_BITS_RHS(MLOG_INITIAL_AND_MASK);
-EXPORT_SYMBOL_GPL(r2_mlog_and_bits);
-struct mlog_bits r2_mlog_not_bits = MLOG_BITS_RHS(0);
-EXPORT_SYMBOL_GPL(r2_mlog_not_bits);
-
-static ssize_t mlog_mask_show(u64 mask, char *buf)
-{
- char *state;
-
- if (__mlog_test_u64(mask, r2_mlog_and_bits))
- state = "allow";
- else if (__mlog_test_u64(mask, r2_mlog_not_bits))
- state = "deny";
- else
- state = "off";
-
- return snprintf(buf, PAGE_SIZE, "%s\n", state);
-}
-
-static ssize_t mlog_mask_store(u64 mask, const char *buf, size_t count)
-{
- if (!strnicmp(buf, "allow", 5)) {
- __mlog_set_u64(mask, r2_mlog_and_bits);
- __mlog_clear_u64(mask, r2_mlog_not_bits);
- } else if (!strnicmp(buf, "deny", 4)) {
- __mlog_set_u64(mask, r2_mlog_not_bits);
- __mlog_clear_u64(mask, r2_mlog_and_bits);
- } else if (!strnicmp(buf, "off", 3)) {
- __mlog_clear_u64(mask, r2_mlog_not_bits);
- __mlog_clear_u64(mask, r2_mlog_and_bits);
- } else
- return -EINVAL;
-
- return count;
-}
-
-struct mlog_attribute {
- struct attribute attr;
- u64 mask;
-};
-
-#define to_mlog_attr(_attr) container_of(_attr, struct mlog_attribute, attr)
-
-#define define_mask(_name) { \
- .attr = { \
- .name = #_name, \
- .mode = S_IRUGO | S_IWUSR, \
- }, \
- .mask = ML_##_name, \
-}
-
-static struct mlog_attribute mlog_attrs[MLOG_MAX_BITS] = {
- define_mask(TCP),
- define_mask(MSG),
- define_mask(SOCKET),
- define_mask(HEARTBEAT),
- define_mask(HB_BIO),
- define_mask(DLMFS),
- define_mask(DLM),
- define_mask(DLM_DOMAIN),
- define_mask(DLM_THREAD),
- define_mask(DLM_MASTER),
- define_mask(DLM_RECOVERY),
- define_mask(DLM_GLUE),
- define_mask(VOTE),
- define_mask(CONN),
- define_mask(QUORUM),
- define_mask(BASTS),
- define_mask(CLUSTER),
- define_mask(ERROR),
- define_mask(NOTICE),
- define_mask(KTHREAD),
-};
-
-static struct attribute *mlog_attr_ptrs[MLOG_MAX_BITS] = {NULL, };
-
-static ssize_t mlog_show(struct kobject *obj, struct attribute *attr,
- char *buf)
-{
- struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
-
- return mlog_mask_show(mlog_attr->mask, buf);
-}
-
-static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
- const char *buf, size_t count)
-{
- struct mlog_attribute *mlog_attr = to_mlog_attr(attr);
-
- return mlog_mask_store(mlog_attr->mask, buf, count);
-}
-
-static const struct sysfs_ops mlog_attr_ops = {
- .show = mlog_show,
- .store = mlog_store,
-};
-
-static struct kobj_type mlog_ktype = {
- .default_attrs = mlog_attr_ptrs,
- .sysfs_ops = &mlog_attr_ops,
-};
-
-static struct kset mlog_kset = {
- .kobj = {.ktype = &mlog_ktype},
-};
-
-int r2_mlog_sys_init(struct kset *r2cb_kset)
-{
- int i = 0;
-
- while (mlog_attrs[i].attr.mode) {
- mlog_attr_ptrs[i] = &mlog_attrs[i].attr;
- i++;
- }
- mlog_attr_ptrs[i] = NULL;
-
- kobject_set_name(&mlog_kset.kobj, "logmask");
- mlog_kset.kobj.kset = r2cb_kset;
- return kset_register(&mlog_kset);
-}
-
-void r2_mlog_sys_shutdown(void)
-{
- kset_unregister(&mlog_kset);
-}
diff --git a/drivers/staging/zcache/ramster/masklog.h b/drivers/staging/zcache/ramster/masklog.h
deleted file mode 100644
index 918ae110b69..00000000000
--- a/drivers/staging/zcache/ramster/masklog.h
+++ /dev/null
@@ -1,220 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef R2CLUSTER_MASKLOG_H
-#define R2CLUSTER_MASKLOG_H
-
-/*
- * For now this is a trivial wrapper around printk() that gives the critical
- * ability to enable sets of debugging output at run-time. In the future this
- * will almost certainly be redirected to relayfs so that it can pay a
- * substantially lower heisenberg tax.
- *
- * Callers associate the message with a bitmask and a global bitmask is
- * maintained with help from /proc. If any of the bits match the message is
- * output.
- *
- * We must have efficient bit tests on i386 and it seems gcc still emits crazy
- * code for the 64bit compare. It emits very good code for the dual unsigned
- * long tests, though, completely avoiding tests that can never pass if the
- * caller gives a constant bitmask that fills one of the longs with all 0s. So
- * the desire is to have almost all of the calls decided on by comparing just
- * one of the longs. This leads to having infrequently given bits that are
- * frequently matched in the high bits.
- *
- * _ERROR and _NOTICE are used for messages that always go to the console and
- * have appropriate KERN_ prefixes. We wrap these in our function instead of
- * just calling printk() so that this can eventually make its way through
- * relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
- * The inline tests and macro dance give GCC the opportunity to quite cleverly
- * only emit the appropriage printk() when the caller passes in a constant
- * mask, as is almost always the case.
- *
- * All this bitmask nonsense is managed from the files under
- * /sys/fs/r2cb/logmask/. Reading the files gives a straightforward
- * indication of which bits are allowed (allow) or denied (off/deny).
- * ENTRY deny
- * EXIT deny
- * TCP off
- * MSG off
- * SOCKET off
- * ERROR allow
- * NOTICE allow
- *
- * Writing changes the state of a given bit and requires a strictly formatted
- * single write() call:
- *
- * write(fd, "allow", 5);
- *
- * Echoing allow/deny/off string into the logmask files can flip the bits
- * on or off as expected; here is the bash script for example:
- *
- * log_mask="/sys/fs/r2cb/log_mask"
- * for node in ENTRY EXIT TCP MSG SOCKET ERROR NOTICE; do
- * echo allow >"$log_mask"/"$node"
- * done
- *
- * The debugfs.ramster tool can also flip the bits with the -l option:
- *
- * debugfs.ramster -l TCP allow
- */
-
-/* for task_struct */
-#include <linux/sched.h>
-
-/* bits that are frequently given and infrequently matched in the low word */
-/* NOTE: If you add a flag, you need to also update masklog.c! */
-#define ML_TCP 0x0000000000000001ULL /* net cluster/tcp.c */
-#define ML_MSG 0x0000000000000002ULL /* net network messages */
-#define ML_SOCKET 0x0000000000000004ULL /* net socket lifetime */
-#define ML_HEARTBEAT 0x0000000000000008ULL /* hb all heartbeat tracking */
-#define ML_HB_BIO 0x0000000000000010ULL /* hb io tracing */
-#define ML_DLMFS 0x0000000000000020ULL /* dlm user dlmfs */
-#define ML_DLM 0x0000000000000040ULL /* dlm general debugging */
-#define ML_DLM_DOMAIN 0x0000000000000080ULL /* dlm domain debugging */
-#define ML_DLM_THREAD 0x0000000000000100ULL /* dlm domain thread */
-#define ML_DLM_MASTER 0x0000000000000200ULL /* dlm master functions */
-#define ML_DLM_RECOVERY 0x0000000000000400ULL /* dlm master functions */
-#define ML_DLM_GLUE 0x0000000000000800ULL /* ramster dlm glue layer */
-#define ML_VOTE 0x0000000000001000ULL /* ramster node messaging */
-#define ML_CONN 0x0000000000002000ULL /* net connection management */
-#define ML_QUORUM 0x0000000000004000ULL /* net connection quorum */
-#define ML_BASTS 0x0000000000008000ULL /* dlmglue asts and basts */
-#define ML_CLUSTER 0x0000000000010000ULL /* cluster stack */
-
-/* bits that are infrequently given and frequently matched in the high word */
-#define ML_ERROR 0x1000000000000000ULL /* sent to KERN_ERR */
-#define ML_NOTICE 0x2000000000000000ULL /* setn to KERN_NOTICE */
-#define ML_KTHREAD 0x4000000000000000ULL /* kernel thread activity */
-
-#define MLOG_INITIAL_AND_MASK (ML_ERROR|ML_NOTICE)
-#ifndef MLOG_MASK_PREFIX
-#define MLOG_MASK_PREFIX 0
-#endif
-
-/*
- * When logging is disabled, force the bit test to 0 for anything other
- * than errors and notices, allowing gcc to remove the code completely.
- * When enabled, allow all masks.
- */
-#if defined(CONFIG_RAMSTER_DEBUG_MASKLOG)
-#define ML_ALLOWED_BITS (~0)
-#else
-#define ML_ALLOWED_BITS (ML_ERROR|ML_NOTICE)
-#endif
-
-#define MLOG_MAX_BITS 64
-
-struct mlog_bits {
- unsigned long words[MLOG_MAX_BITS / BITS_PER_LONG];
-};
-
-extern struct mlog_bits r2_mlog_and_bits, r2_mlog_not_bits;
-
-#if BITS_PER_LONG == 32
-
-#define __mlog_test_u64(mask, bits) \
- ((u32)(mask & 0xffffffff) & bits.words[0] || \
- ((u64)(mask) >> 32) & bits.words[1])
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (u32)(mask & 0xffffffff); \
- bits.words[1] |= (u64)(mask) >> 32; \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~((u32)(mask & 0xffffffff)); \
- bits.words[1] &= ~((u64)(mask) >> 32); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { \
- { \
- [0] = (u32)(mask & 0xffffffff), \
- [1] = (u64)(mask) >> 32, \
- } \
-}
-
-#else /* 32bit long above, 64bit long below */
-
-#define __mlog_test_u64(mask, bits) ((mask) & bits.words[0])
-#define __mlog_set_u64(mask, bits) do { \
- bits.words[0] |= (mask); \
-} while (0)
-#define __mlog_clear_u64(mask, bits) do { \
- bits.words[0] &= ~(mask); \
-} while (0)
-#define MLOG_BITS_RHS(mask) { { (mask) } }
-
-#endif
-
-/*
- * smp_processor_id() "helpfully" screams when called outside preemptible
- * regions in current kernels. sles doesn't have the variants that don't
- * scream. just do this instead of trying to guess which we're building
- * against.. *sigh*.
- */
-#define __mlog_cpu_guess ({ \
- unsigned long _cpu = get_cpu(); \
- put_cpu(); \
- _cpu; \
-})
-
-/* In the following two macros, the whitespace after the ',' just
- * before ##args is intentional. Otherwise, gcc 2.95 will eat the
- * previous token if args expands to nothing.
- */
-#define __mlog_printk(level, fmt, args...) \
- printk(level "(%s,%u,%lu):%s:%d " fmt, current->comm, \
- task_pid_nr(current), __mlog_cpu_guess, \
- __PRETTY_FUNCTION__, __LINE__ , ##args)
-
-#define mlog(mask, fmt, args...) do { \
- u64 __m = MLOG_MASK_PREFIX | (mask); \
- if ((__m & ML_ALLOWED_BITS) && \
- __mlog_test_u64(__m, r2_mlog_and_bits) && \
- !__mlog_test_u64(__m, r2_mlog_not_bits)) { \
- if (__m & ML_ERROR) \
- __mlog_printk(KERN_ERR, "ERROR: "fmt , ##args); \
- else if (__m & ML_NOTICE) \
- __mlog_printk(KERN_NOTICE, fmt , ##args); \
- else \
- __mlog_printk(KERN_INFO, fmt , ##args); \
- } \
-} while (0)
-
-#define mlog_errno(st) do { \
- int _st = (st); \
- if (_st != -ERESTARTSYS && _st != -EINTR && \
- _st != AOP_TRUNCATED_PAGE && _st != -ENOSPC) \
- mlog(ML_ERROR, "status = %lld\n", (long long)_st); \
-} while (0)
-
-#define mlog_bug_on_msg(cond, fmt, args...) do { \
- if (cond) { \
- mlog(ML_ERROR, "bug expression: " #cond "\n"); \
- mlog(ML_ERROR, fmt, ##args); \
- BUG(); \
- } \
-} while (0)
-
-#include <linux/kobject.h>
-#include <linux/sysfs.h>
-int r2_mlog_sys_init(struct kset *r2cb_subsys);
-void r2_mlog_sys_shutdown(void);
-
-#endif /* R2CLUSTER_MASKLOG_H */
diff --git a/drivers/staging/zcache/ramster/nodemanager.c b/drivers/staging/zcache/ramster/nodemanager.c
deleted file mode 100644
index 2cfe93342c0..00000000000
--- a/drivers/staging/zcache/ramster/nodemanager.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004, 2005, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#include <linux/slab.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/configfs.h>
-
-#include "tcp.h"
-#include "nodemanager.h"
-#include "heartbeat.h"
-#include "masklog.h"
-
-/* for now we operate under the assertion that there can be only one
- * cluster active at a time. Changing this will require trickling
- * cluster references throughout where nodes are looked up */
-struct r2nm_cluster *r2nm_single_cluster;
-
-char *r2nm_fence_method_desc[R2NM_FENCE_METHODS] = {
- "reset", /* R2NM_FENCE_RESET */
- "panic", /* R2NM_FENCE_PANIC */
-};
-
-struct r2nm_node *r2nm_get_node_by_num(u8 node_num)
-{
- struct r2nm_node *node = NULL;
-
- if (node_num >= R2NM_MAX_NODES || r2nm_single_cluster == NULL)
- goto out;
-
- read_lock(&r2nm_single_cluster->cl_nodes_lock);
- node = r2nm_single_cluster->cl_nodes[node_num];
- if (node)
- config_item_get(&node->nd_item);
- read_unlock(&r2nm_single_cluster->cl_nodes_lock);
-out:
- return node;
-}
-EXPORT_SYMBOL_GPL(r2nm_get_node_by_num);
-
-int r2nm_configured_node_map(unsigned long *map, unsigned bytes)
-{
- struct r2nm_cluster *cluster = r2nm_single_cluster;
-
- BUG_ON(bytes < (sizeof(cluster->cl_nodes_bitmap)));
-
- if (cluster == NULL)
- return -EINVAL;
-
- read_lock(&cluster->cl_nodes_lock);
- memcpy(map, cluster->cl_nodes_bitmap, sizeof(cluster->cl_nodes_bitmap));
- read_unlock(&cluster->cl_nodes_lock);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(r2nm_configured_node_map);
-
-static struct r2nm_node *r2nm_node_ip_tree_lookup(struct r2nm_cluster *cluster,
- __be32 ip_needle,
- struct rb_node ***ret_p,
- struct rb_node **ret_parent)
-{
- struct rb_node **p = &cluster->cl_node_ip_tree.rb_node;
- struct rb_node *parent = NULL;
- struct r2nm_node *node, *ret = NULL;
-
- while (*p) {
- int cmp;
-
- parent = *p;
- node = rb_entry(parent, struct r2nm_node, nd_ip_node);
-
- cmp = memcmp(&ip_needle, &node->nd_ipv4_address,
- sizeof(ip_needle));
- if (cmp < 0)
- p = &(*p)->rb_left;
- else if (cmp > 0)
- p = &(*p)->rb_right;
- else {
- ret = node;
- break;
- }
- }
-
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
-
- return ret;
-}
-
-struct r2nm_node *r2nm_get_node_by_ip(__be32 addr)
-{
- struct r2nm_node *node = NULL;
- struct r2nm_cluster *cluster = r2nm_single_cluster;
-
- if (cluster == NULL)
- goto out;
-
- read_lock(&cluster->cl_nodes_lock);
- node = r2nm_node_ip_tree_lookup(cluster, addr, NULL, NULL);
- if (node)
- config_item_get(&node->nd_item);
- read_unlock(&cluster->cl_nodes_lock);
-
-out:
- return node;
-}
-EXPORT_SYMBOL_GPL(r2nm_get_node_by_ip);
-
-void r2nm_node_put(struct r2nm_node *node)
-{
- config_item_put(&node->nd_item);
-}
-EXPORT_SYMBOL_GPL(r2nm_node_put);
-
-void r2nm_node_get(struct r2nm_node *node)
-{
- config_item_get(&node->nd_item);
-}
-EXPORT_SYMBOL_GPL(r2nm_node_get);
-
-u8 r2nm_this_node(void)
-{
- u8 node_num = R2NM_MAX_NODES;
-
- if (r2nm_single_cluster && r2nm_single_cluster->cl_has_local)
- node_num = r2nm_single_cluster->cl_local_node;
-
- return node_num;
-}
-EXPORT_SYMBOL_GPL(r2nm_this_node);
-
-/* node configfs bits */
-
-static struct r2nm_cluster *to_r2nm_cluster(struct config_item *item)
-{
- return item ?
- container_of(to_config_group(item), struct r2nm_cluster,
- cl_group)
- : NULL;
-}
-
-static struct r2nm_node *to_r2nm_node(struct config_item *item)
-{
- return item ? container_of(item, struct r2nm_node, nd_item) : NULL;
-}
-
-static void r2nm_node_release(struct config_item *item)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- kfree(node);
-}
-
-static ssize_t r2nm_node_num_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%d\n", node->nd_num);
-}
-
-static struct r2nm_cluster *to_r2nm_cluster_from_node(struct r2nm_node *node)
-{
- /* through the first node_set .parent
- * mycluster/nodes/mynode == r2nm_cluster->r2nm_node_group->r2nm_node */
- return to_r2nm_cluster(node->nd_item.ci_parent->ci_parent);
-}
-
-enum {
- R2NM_NODE_ATTR_NUM = 0,
- R2NM_NODE_ATTR_PORT,
- R2NM_NODE_ATTR_ADDRESS,
- R2NM_NODE_ATTR_LOCAL,
-};
-
-static ssize_t r2nm_node_num_write(struct r2nm_node *node, const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp >= R2NM_MAX_NODES)
- return -ERANGE;
-
- /* once we're in the cl_nodes tree networking can look us up by
- * node number and try to use our address and port attributes
- * to connect to this node.. make sure that they've been set
- * before writing the node attribute? */
- if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
- return -EINVAL; /* XXX */
-
- write_lock(&cluster->cl_nodes_lock);
- if (cluster->cl_nodes[tmp])
- p = NULL;
- else {
- cluster->cl_nodes[tmp] = node;
- node->nd_num = tmp;
- set_bit(tmp, cluster->cl_nodes_bitmap);
- }
- write_unlock(&cluster->cl_nodes_lock);
- if (p == NULL)
- return -EEXIST;
-
- return count;
-}
-static ssize_t r2nm_node_ipv4_port_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%u\n", ntohs(node->nd_ipv4_port));
-}
-
-static ssize_t r2nm_node_ipv4_port_write(struct r2nm_node *node,
- const char *page, size_t count)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp == 0)
- return -EINVAL;
- if (tmp >= (u16)-1)
- return -ERANGE;
-
- node->nd_ipv4_port = htons(tmp);
-
- return count;
-}
-
-static ssize_t r2nm_node_ipv4_address_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%pI4\n", &node->nd_ipv4_address);
-}
-
-static ssize_t r2nm_node_ipv4_address_write(struct r2nm_node *node,
- const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- int ret, i;
- struct rb_node **p, *parent;
- unsigned int octets[4];
- __be32 ipv4_addr = 0;
-
- ret = sscanf(page, "%3u.%3u.%3u.%3u", &octets[3], &octets[2],
- &octets[1], &octets[0]);
- if (ret != 4)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(octets); i++) {
- if (octets[i] > 255)
- return -ERANGE;
- be32_add_cpu(&ipv4_addr, octets[i] << (i * 8));
- }
-
- ret = 0;
- write_lock(&cluster->cl_nodes_lock);
- if (r2nm_node_ip_tree_lookup(cluster, ipv4_addr, &p, &parent))
- ret = -EEXIST;
- else {
- rb_link_node(&node->nd_ip_node, parent, p);
- rb_insert_color(&node->nd_ip_node, &cluster->cl_node_ip_tree);
- }
- write_unlock(&cluster->cl_nodes_lock);
- if (ret)
- return ret;
-
- memcpy(&node->nd_ipv4_address, &ipv4_addr, sizeof(ipv4_addr));
-
- return count;
-}
-
-static ssize_t r2nm_node_local_read(struct r2nm_node *node, char *page)
-{
- return sprintf(page, "%d\n", node->nd_local);
-}
-
-static ssize_t r2nm_node_local_write(struct r2nm_node *node, const char *page,
- size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster_from_node(node);
- unsigned long tmp;
- char *p = (char *)page;
- ssize_t ret;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- tmp = !!tmp; /* boolean of whether this node wants to be local */
-
- /* setting local turns on networking rx for now so we require having
- * set everything else first */
- if (!test_bit(R2NM_NODE_ATTR_ADDRESS, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_NUM, &node->nd_set_attributes) ||
- !test_bit(R2NM_NODE_ATTR_PORT, &node->nd_set_attributes))
- return -EINVAL; /* XXX */
-
- /* the only failure case is trying to set a new local node
- * when a different one is already set */
- if (tmp && tmp == cluster->cl_has_local &&
- cluster->cl_local_node != node->nd_num)
- return -EBUSY;
-
- /* bring up the rx thread if we're setting the new local node. */
- if (tmp && !cluster->cl_has_local) {
- ret = r2net_start_listening(node);
- if (ret)
- return ret;
- }
-
- if (!tmp && cluster->cl_has_local &&
- cluster->cl_local_node == node->nd_num) {
- r2net_stop_listening(node);
- cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
- }
-
- node->nd_local = tmp;
- if (node->nd_local) {
- cluster->cl_has_local = tmp;
- cluster->cl_local_node = node->nd_num;
- }
-
- return count;
-}
-
-struct r2nm_node_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2nm_node *, char *);
- ssize_t (*store)(struct r2nm_node *, const char *, size_t);
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_num = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "num",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_num_read,
- .store = r2nm_node_num_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_ipv4_port = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "ipv4_port",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_ipv4_port_read,
- .store = r2nm_node_ipv4_port_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_ipv4_address = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "ipv4_address",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_ipv4_address_read,
- .store = r2nm_node_ipv4_address_write,
-};
-
-static struct r2nm_node_attribute r2nm_node_attr_local = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "local",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_node_local_read,
- .store = r2nm_node_local_write,
-};
-
-static struct configfs_attribute *r2nm_node_attrs[] = {
- [R2NM_NODE_ATTR_NUM] = &r2nm_node_attr_num.attr,
- [R2NM_NODE_ATTR_PORT] = &r2nm_node_attr_ipv4_port.attr,
- [R2NM_NODE_ATTR_ADDRESS] = &r2nm_node_attr_ipv4_address.attr,
- [R2NM_NODE_ATTR_LOCAL] = &r2nm_node_attr_local.attr,
- NULL,
-};
-
-static int r2nm_attr_index(struct configfs_attribute *attr)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(r2nm_node_attrs); i++) {
- if (attr == r2nm_node_attrs[i])
- return i;
- }
- BUG();
- return 0;
-}
-
-static ssize_t r2nm_node_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_node_attribute *r2nm_node_attr =
- container_of(attr, struct r2nm_node_attribute, attr);
- ssize_t ret = 0;
-
- if (r2nm_node_attr->show)
- ret = r2nm_node_attr->show(node, page);
- return ret;
-}
-
-static ssize_t r2nm_node_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_node_attribute *r2nm_node_attr =
- container_of(attr, struct r2nm_node_attribute, attr);
- ssize_t ret;
- int attr_index = r2nm_attr_index(attr);
-
- if (r2nm_node_attr->store == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- if (test_bit(attr_index, &node->nd_set_attributes))
- return -EBUSY;
-
- ret = r2nm_node_attr->store(node, page, count);
- if (ret < count)
- goto out;
-
- set_bit(attr_index, &node->nd_set_attributes);
-out:
- return ret;
-}
-
-static struct configfs_item_operations r2nm_node_item_ops = {
- .release = r2nm_node_release,
- .show_attribute = r2nm_node_show,
- .store_attribute = r2nm_node_store,
-};
-
-static struct config_item_type r2nm_node_type = {
- .ct_item_ops = &r2nm_node_item_ops,
- .ct_attrs = r2nm_node_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* node set */
-
-struct r2nm_node_group {
- struct config_group ns_group;
- /* some stuff? */
-};
-
-#if 0
-static struct r2nm_node_group *to_r2nm_node_group(struct config_group *group)
-{
- return group ?
- container_of(group, struct r2nm_node_group, ns_group)
- : NULL;
-}
-#endif
-
-struct r2nm_cluster_attribute {
- struct configfs_attribute attr;
- ssize_t (*show)(struct r2nm_cluster *, char *);
- ssize_t (*store)(struct r2nm_cluster *, const char *, size_t);
-};
-
-static ssize_t r2nm_cluster_attr_write(const char *page, ssize_t count,
- unsigned int *val)
-{
- unsigned long tmp;
- char *p = (char *)page;
- int err;
-
- err = kstrtoul(p, 10, &tmp);
- if (err)
- return err;
-
- if (tmp == 0)
- return -EINVAL;
- if (tmp >= (u32)-1)
- return -ERANGE;
-
- *val = tmp;
-
- return count;
-}
-
-static ssize_t r2nm_cluster_attr_idle_timeout_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_idle_timeout_ms);
-}
-
-static ssize_t r2nm_cluster_attr_idle_timeout_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- ssize_t ret;
- unsigned int val = 0;
-
- ret = r2nm_cluster_attr_write(page, count, &val);
-
- if (ret > 0) {
- if (cluster->cl_idle_timeout_ms != val
- && r2net_num_connected_peers()) {
- mlog(ML_NOTICE,
- "r2net: cannot change idle timeout after "
- "the first peer has agreed to it."
- " %d connected peers\n",
- r2net_num_connected_peers());
- ret = -EINVAL;
- } else if (val <= cluster->cl_keepalive_delay_ms) {
- mlog(ML_NOTICE,
- "r2net: idle timeout must be larger "
- "than keepalive delay\n");
- ret = -EINVAL;
- } else {
- cluster->cl_idle_timeout_ms = val;
- }
- }
-
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_keepalive_delay_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_keepalive_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_keepalive_delay_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- ssize_t ret;
- unsigned int val = 0;
-
- ret = r2nm_cluster_attr_write(page, count, &val);
-
- if (ret > 0) {
- if (cluster->cl_keepalive_delay_ms != val
- && r2net_num_connected_peers()) {
- mlog(ML_NOTICE,
- "r2net: cannot change keepalive delay after"
- " the first peer has agreed to it."
- " %d connected peers\n",
- r2net_num_connected_peers());
- ret = -EINVAL;
- } else if (val >= cluster->cl_idle_timeout_ms) {
- mlog(ML_NOTICE,
- "r2net: keepalive delay must be "
- "smaller than idle timeout\n");
- ret = -EINVAL;
- } else {
- cluster->cl_keepalive_delay_ms = val;
- }
- }
-
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_reconnect_delay_ms_read(
- struct r2nm_cluster *cluster, char *page)
-{
- return sprintf(page, "%u\n", cluster->cl_reconnect_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_reconnect_delay_ms_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- return r2nm_cluster_attr_write(page, count,
- &cluster->cl_reconnect_delay_ms);
-}
-
-static ssize_t r2nm_cluster_attr_fence_method_read(
- struct r2nm_cluster *cluster, char *page)
-{
- ssize_t ret = 0;
-
- if (cluster)
- ret = sprintf(page, "%s\n",
- r2nm_fence_method_desc[cluster->cl_fence_method]);
- return ret;
-}
-
-static ssize_t r2nm_cluster_attr_fence_method_write(
- struct r2nm_cluster *cluster, const char *page, size_t count)
-{
- unsigned int i;
-
- if (page[count - 1] != '\n')
- goto bail;
-
- for (i = 0; i < R2NM_FENCE_METHODS; ++i) {
- if (count != strlen(r2nm_fence_method_desc[i]) + 1)
- continue;
- if (strncasecmp(page, r2nm_fence_method_desc[i], count - 1))
- continue;
- if (cluster->cl_fence_method != i) {
- pr_info("ramster: Changing fence method to %s\n",
- r2nm_fence_method_desc[i]);
- cluster->cl_fence_method = i;
- }
- return count;
- }
-
-bail:
- return -EINVAL;
-}
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_idle_timeout_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "idle_timeout_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_idle_timeout_ms_read,
- .store = r2nm_cluster_attr_idle_timeout_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_keepalive_delay_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "keepalive_delay_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_keepalive_delay_ms_read,
- .store = r2nm_cluster_attr_keepalive_delay_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_reconnect_delay_ms = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "reconnect_delay_ms",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_reconnect_delay_ms_read,
- .store = r2nm_cluster_attr_reconnect_delay_ms_write,
-};
-
-static struct r2nm_cluster_attribute r2nm_cluster_attr_fence_method = {
- .attr = { .ca_owner = THIS_MODULE,
- .ca_name = "fence_method",
- .ca_mode = S_IRUGO | S_IWUSR },
- .show = r2nm_cluster_attr_fence_method_read,
- .store = r2nm_cluster_attr_fence_method_write,
-};
-
-static struct configfs_attribute *r2nm_cluster_attrs[] = {
- &r2nm_cluster_attr_idle_timeout_ms.attr,
- &r2nm_cluster_attr_keepalive_delay_ms.attr,
- &r2nm_cluster_attr_reconnect_delay_ms.attr,
- &r2nm_cluster_attr_fence_method.attr,
- NULL,
-};
-static ssize_t r2nm_cluster_show(struct config_item *item,
- struct configfs_attribute *attr,
- char *page)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- struct r2nm_cluster_attribute *r2nm_cluster_attr =
- container_of(attr, struct r2nm_cluster_attribute, attr);
- ssize_t ret = 0;
-
- if (r2nm_cluster_attr->show)
- ret = r2nm_cluster_attr->show(cluster, page);
- return ret;
-}
-
-static ssize_t r2nm_cluster_store(struct config_item *item,
- struct configfs_attribute *attr,
- const char *page, size_t count)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- struct r2nm_cluster_attribute *r2nm_cluster_attr =
- container_of(attr, struct r2nm_cluster_attribute, attr);
- ssize_t ret;
-
- if (r2nm_cluster_attr->store == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = r2nm_cluster_attr->store(cluster, page, count);
- if (ret < count)
- goto out;
-out:
- return ret;
-}
-
-static struct config_item *r2nm_node_group_make_item(struct config_group *group,
- const char *name)
-{
- struct r2nm_node *node = NULL;
-
- if (strlen(name) > R2NM_MAX_NAME_LEN)
- return ERR_PTR(-ENAMETOOLONG);
-
- node = kzalloc(sizeof(struct r2nm_node), GFP_KERNEL);
- if (node == NULL)
- return ERR_PTR(-ENOMEM);
-
- strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
- config_item_init_type_name(&node->nd_item, name, &r2nm_node_type);
- spin_lock_init(&node->nd_lock);
-
- mlog(ML_CLUSTER, "r2nm: Registering node %s\n", name);
-
- return &node->nd_item;
-}
-
-static void r2nm_node_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- struct r2nm_node *node = to_r2nm_node(item);
- struct r2nm_cluster *cluster =
- to_r2nm_cluster(group->cg_item.ci_parent);
-
- r2net_disconnect_node(node);
-
- if (cluster->cl_has_local &&
- (cluster->cl_local_node == node->nd_num)) {
- cluster->cl_has_local = 0;
- cluster->cl_local_node = R2NM_INVALID_NODE_NUM;
- r2net_stop_listening(node);
- }
-
- /* XXX call into net to stop this node from trading messages */
-
- write_lock(&cluster->cl_nodes_lock);
-
- /* XXX sloppy */
- if (node->nd_ipv4_address)
- rb_erase(&node->nd_ip_node, &cluster->cl_node_ip_tree);
-
- /* nd_num might be 0 if the node number hasn't been set.. */
- if (cluster->cl_nodes[node->nd_num] == node) {
- cluster->cl_nodes[node->nd_num] = NULL;
- clear_bit(node->nd_num, cluster->cl_nodes_bitmap);
- }
- write_unlock(&cluster->cl_nodes_lock);
-
- mlog(ML_CLUSTER, "r2nm: Unregistered node %s\n",
- config_item_name(&node->nd_item));
-
- config_item_put(item);
-}
-
-static struct configfs_group_operations r2nm_node_group_group_ops = {
- .make_item = r2nm_node_group_make_item,
- .drop_item = r2nm_node_group_drop_item,
-};
-
-static struct config_item_type r2nm_node_group_type = {
- .ct_group_ops = &r2nm_node_group_group_ops,
- .ct_owner = THIS_MODULE,
-};
-
-/* cluster */
-
-static void r2nm_cluster_release(struct config_item *item)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
-
- kfree(cluster->cl_group.default_groups);
- kfree(cluster);
-}
-
-static struct configfs_item_operations r2nm_cluster_item_ops = {
- .release = r2nm_cluster_release,
- .show_attribute = r2nm_cluster_show,
- .store_attribute = r2nm_cluster_store,
-};
-
-static struct config_item_type r2nm_cluster_type = {
- .ct_item_ops = &r2nm_cluster_item_ops,
- .ct_attrs = r2nm_cluster_attrs,
- .ct_owner = THIS_MODULE,
-};
-
-/* cluster set */
-
-struct r2nm_cluster_group {
- struct configfs_subsystem cs_subsys;
- /* some stuff? */
-};
-
-#if 0
-static struct r2nm_cluster_group *
-to_r2nm_cluster_group(struct config_group *group)
-{
- return group ?
- container_of(to_configfs_subsystem(group),
- struct r2nm_cluster_group, cs_subsys)
- : NULL;
-}
-#endif
-
-static struct config_group *
-r2nm_cluster_group_make_group(struct config_group *group,
- const char *name)
-{
- struct r2nm_cluster *cluster = NULL;
- struct r2nm_node_group *ns = NULL;
- struct config_group *r2hb_group = NULL, *ret = NULL;
- void *defs = NULL;
-
- /* this runs under the parent dir's i_mutex; there can be only
- * one caller in here at a time */
- if (r2nm_single_cluster)
- return ERR_PTR(-ENOSPC);
-
- cluster = kzalloc(sizeof(struct r2nm_cluster), GFP_KERNEL);
- ns = kzalloc(sizeof(struct r2nm_node_group), GFP_KERNEL);
- defs = kcalloc(3, sizeof(struct config_group *), GFP_KERNEL);
- r2hb_group = r2hb_alloc_hb_set();
- if (cluster == NULL || ns == NULL || r2hb_group == NULL || defs == NULL)
- goto out;
-
- config_group_init_type_name(&cluster->cl_group, name,
- &r2nm_cluster_type);
- config_group_init_type_name(&ns->ns_group, "node",
- &r2nm_node_group_type);
-
- cluster->cl_group.default_groups = defs;
- cluster->cl_group.default_groups[0] = &ns->ns_group;
- cluster->cl_group.default_groups[1] = r2hb_group;
- cluster->cl_group.default_groups[2] = NULL;
- rwlock_init(&cluster->cl_nodes_lock);
- cluster->cl_node_ip_tree = RB_ROOT;
- cluster->cl_reconnect_delay_ms = R2NET_RECONNECT_DELAY_MS_DEFAULT;
- cluster->cl_idle_timeout_ms = R2NET_IDLE_TIMEOUT_MS_DEFAULT;
- cluster->cl_keepalive_delay_ms = R2NET_KEEPALIVE_DELAY_MS_DEFAULT;
- cluster->cl_fence_method = R2NM_FENCE_RESET;
-
- ret = &cluster->cl_group;
- r2nm_single_cluster = cluster;
-
-out:
- if (ret == NULL) {
- kfree(cluster);
- kfree(ns);
- r2hb_free_hb_set(r2hb_group);
- kfree(defs);
- ret = ERR_PTR(-ENOMEM);
- }
-
- return ret;
-}
-
-static void r2nm_cluster_group_drop_item(struct config_group *group,
- struct config_item *item)
-{
- struct r2nm_cluster *cluster = to_r2nm_cluster(item);
- int i;
- struct config_item *killme;
-
- BUG_ON(r2nm_single_cluster != cluster);
- r2nm_single_cluster = NULL;
-
- for (i = 0; cluster->cl_group.default_groups[i]; i++) {
- killme = &cluster->cl_group.default_groups[i]->cg_item;
- cluster->cl_group.default_groups[i] = NULL;
- config_item_put(killme);
- }
-
- config_item_put(item);
-}
-
-static struct configfs_group_operations r2nm_cluster_group_group_ops = {
- .make_group = r2nm_cluster_group_make_group,
- .drop_item = r2nm_cluster_group_drop_item,
-};
-
-static struct config_item_type r2nm_cluster_group_type = {
- .ct_group_ops = &r2nm_cluster_group_group_ops,
- .ct_owner = THIS_MODULE,
-};
-
-static struct r2nm_cluster_group r2nm_cluster_group = {
- .cs_subsys = {
- .su_group = {
- .cg_item = {
- .ci_namebuf = "cluster",
- .ci_type = &r2nm_cluster_group_type,
- },
- },
- },
-};
-
-int r2nm_depend_item(struct config_item *item)
-{
- return configfs_depend_item(&r2nm_cluster_group.cs_subsys, item);
-}
-
-void r2nm_undepend_item(struct config_item *item)
-{
- configfs_undepend_item(&r2nm_cluster_group.cs_subsys, item);
-}
-
-int r2nm_depend_this_node(void)
-{
- int ret = 0;
- struct r2nm_node *local_node;
-
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- if (!local_node) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = r2nm_depend_item(&local_node->nd_item);
- r2nm_node_put(local_node);
-
-out:
- return ret;
-}
-
-void r2nm_undepend_this_node(void)
-{
- struct r2nm_node *local_node;
-
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- BUG_ON(!local_node);
-
- r2nm_undepend_item(&local_node->nd_item);
- r2nm_node_put(local_node);
-}
-
-
-static void __exit exit_r2nm(void)
-{
- /* XXX sync with hb callbacks and shut down hb? */
- r2net_unregister_hb_callbacks();
- configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
-
- r2net_exit();
- r2hb_exit();
-}
-
-int r2nm_init(void)
-{
- int ret = -1;
-
- ret = r2hb_init();
- if (ret)
- goto out;
-
- ret = r2net_init();
- if (ret)
- goto out_r2hb;
-
- ret = r2net_register_hb_callbacks();
- if (ret)
- goto out_r2net;
-
- config_group_init(&r2nm_cluster_group.cs_subsys.su_group);
- mutex_init(&r2nm_cluster_group.cs_subsys.su_mutex);
- ret = configfs_register_subsystem(&r2nm_cluster_group.cs_subsys);
- if (ret) {
- pr_err("nodemanager: Registration returned %d\n", ret);
- goto out_callbacks;
- }
-
- if (!ret)
- goto out;
-
- configfs_unregister_subsystem(&r2nm_cluster_group.cs_subsys);
-out_callbacks:
- r2net_unregister_hb_callbacks();
-out_r2net:
- r2net_exit();
-out_r2hb:
- r2hb_exit();
-out:
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2nm_init);
-
-MODULE_AUTHOR("Oracle");
-MODULE_LICENSE("GPL");
-
-#ifndef CONFIG_RAMSTER_MODULE
-late_initcall(r2nm_init);
-#endif
diff --git a/drivers/staging/zcache/ramster/nodemanager.h b/drivers/staging/zcache/ramster/nodemanager.h
deleted file mode 100644
index 41a04df5842..00000000000
--- a/drivers/staging/zcache/ramster/nodemanager.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * nodemanager.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_NODEMANAGER_H
-#define R2CLUSTER_NODEMANAGER_H
-
-#include "ramster_nodemanager.h"
-
-/* This totally doesn't belong here. */
-#include <linux/configfs.h>
-#include <linux/rbtree.h>
-
-enum r2nm_fence_method {
- R2NM_FENCE_RESET = 0,
- R2NM_FENCE_PANIC,
- R2NM_FENCE_METHODS, /* Number of fence methods */
-};
-
-struct r2nm_node {
- spinlock_t nd_lock;
- struct config_item nd_item;
- char nd_name[R2NM_MAX_NAME_LEN+1]; /* replace? */
- __u8 nd_num;
- /* only one address per node, as attributes, for now. */
- __be32 nd_ipv4_address;
- __be16 nd_ipv4_port;
- struct rb_node nd_ip_node;
- /* there can be only one local node for now */
- int nd_local;
-
- unsigned long nd_set_attributes;
-};
-
-struct r2nm_cluster {
- struct config_group cl_group;
- unsigned cl_has_local:1;
- u8 cl_local_node;
- rwlock_t cl_nodes_lock;
- struct r2nm_node *cl_nodes[R2NM_MAX_NODES];
- struct rb_root cl_node_ip_tree;
- unsigned int cl_idle_timeout_ms;
- unsigned int cl_keepalive_delay_ms;
- unsigned int cl_reconnect_delay_ms;
- enum r2nm_fence_method cl_fence_method;
-
- /* part of a hack for disk bitmap.. will go eventually. - zab */
- unsigned long cl_nodes_bitmap[BITS_TO_LONGS(R2NM_MAX_NODES)];
-};
-
-extern struct r2nm_cluster *r2nm_single_cluster;
-
-u8 r2nm_this_node(void);
-
-int r2nm_configured_node_map(unsigned long *map, unsigned bytes);
-struct r2nm_node *r2nm_get_node_by_num(u8 node_num);
-struct r2nm_node *r2nm_get_node_by_ip(__be32 addr);
-void r2nm_node_get(struct r2nm_node *node);
-void r2nm_node_put(struct r2nm_node *node);
-
-int r2nm_depend_item(struct config_item *item);
-void r2nm_undepend_item(struct config_item *item);
-int r2nm_depend_this_node(void);
-void r2nm_undepend_this_node(void);
-
-#endif /* R2CLUSTER_NODEMANAGER_H */
diff --git a/drivers/staging/zcache/ramster/r2net.c b/drivers/staging/zcache/ramster/r2net.c
deleted file mode 100644
index 34818dc6561..00000000000
--- a/drivers/staging/zcache/ramster/r2net.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * r2net.c
- *
- * Copyright (c) 2011-2012, Dan Magenheimer, Oracle Corp.
- *
- * Ramster_r2net provides an interface between zcache and r2net.
- *
- * FIXME: support more than two nodes
- */
-
-#include <linux/list.h>
-#include "tcp.h"
-#include "nodemanager.h"
-#include "../tmem.h"
-#include "../zcache.h"
-#include "ramster.h"
-
-#define RAMSTER_TESTING
-
-#define RMSTR_KEY 0x77347734
-
-enum {
- RMSTR_TMEM_PUT_EPH = 100,
- RMSTR_TMEM_PUT_PERS,
- RMSTR_TMEM_ASYNC_GET_REQUEST,
- RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
- RMSTR_TMEM_ASYNC_GET_REPLY,
- RMSTR_TMEM_FLUSH,
- RMSTR_TMEM_FLOBJ,
- RMSTR_TMEM_DESTROY_POOL,
-};
-
-#define RMSTR_R2NET_MAX_LEN \
- (R2NET_MAX_PAYLOAD_BYTES - sizeof(struct tmem_xhandle))
-
-#include "tcp_internal.h"
-
-static struct r2nm_node *r2net_target_node;
-static int r2net_target_nodenum;
-
-int r2net_remote_target_node_set(int node_num)
-{
- int ret = -1;
-
- r2net_target_node = r2nm_get_node_by_num(node_num);
- if (r2net_target_node != NULL) {
- r2net_target_nodenum = node_num;
- r2nm_node_put(r2net_target_node);
- ret = 0;
- }
- return ret;
-}
-
-/* FIXME following buffer should be per-cpu, protected by preempt_disable */
-static char ramster_async_get_buf[R2NET_MAX_PAYLOAD_BYTES];
-
-static int ramster_remote_async_get_request_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- char *pdata;
- struct tmem_xhandle xh;
- int found;
- size_t size = RMSTR_R2NET_MAX_LEN;
- u16 msgtype = be16_to_cpu(msg->msg_type);
- bool get_and_free = (msgtype == RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST);
- unsigned long flags;
-
- xh = *(struct tmem_xhandle *)msg->buf;
- if (xh.xh_data_size > RMSTR_R2NET_MAX_LEN)
- BUG();
- pdata = ramster_async_get_buf;
- *(struct tmem_xhandle *)pdata = xh;
- pdata += sizeof(struct tmem_xhandle);
- local_irq_save(flags);
- found = zcache_get_page(xh.client_id, xh.pool_id, &xh.oid, xh.index,
- pdata, &size, true, get_and_free ? 1 : -1);
- local_irq_restore(flags);
- if (found < 0) {
- /* a zero size indicates the get failed */
- size = 0;
- }
- if (size > RMSTR_R2NET_MAX_LEN)
- BUG();
- *ret_data = pdata - sizeof(struct tmem_xhandle);
- /* now make caller (r2net_process_message) handle specially */
- r2net_force_data_magic(msg, RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY);
- return size + sizeof(struct tmem_xhandle);
-}
-
-static int ramster_remote_async_get_reply_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- char *in = (char *)msg->buf;
- int datalen = len - sizeof(struct r2net_msg);
- int ret = -1;
- struct tmem_xhandle *xh = (struct tmem_xhandle *)in;
-
- in += sizeof(struct tmem_xhandle);
- datalen -= sizeof(struct tmem_xhandle);
- BUG_ON(datalen < 0 || datalen > PAGE_SIZE);
- ret = ramster_localify(xh->pool_id, &xh->oid, xh->index,
- in, datalen, xh->extra);
-#ifdef RAMSTER_TESTING
- if (ret == -EEXIST)
- pr_err("TESTING ArrgREP, aborted overwrite on racy put\n");
-#endif
- return ret;
-}
-
-int ramster_remote_put_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
- int datalen = len - sizeof(struct r2net_msg) -
- sizeof(struct tmem_xhandle);
- u16 msgtype = be16_to_cpu(msg->msg_type);
- bool ephemeral = (msgtype == RMSTR_TMEM_PUT_EPH);
- unsigned long flags;
- int ret;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- zcache_autocreate_pool(xh->client_id, xh->pool_id, ephemeral);
- local_irq_save(flags);
- ret = zcache_put_page(xh->client_id, xh->pool_id, &xh->oid, xh->index,
- p, datalen, true, ephemeral);
- local_irq_restore(flags);
- return ret;
-}
-
-int ramster_remote_flush_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- (void)zcache_flush_page(xh->client_id, xh->pool_id,
- &xh->oid, xh->index);
- return 0;
-}
-
-int ramster_remote_flobj_handler(struct r2net_msg *msg,
- u32 len, void *data, void **ret_data)
-{
- struct tmem_xhandle *xh;
- char *p = (char *)msg->buf;
-
- xh = (struct tmem_xhandle *)p;
- p += sizeof(struct tmem_xhandle);
- (void)zcache_flush_object(xh->client_id, xh->pool_id, &xh->oid);
- return 0;
-}
-
-int r2net_remote_async_get(struct tmem_xhandle *xh, bool free, int remotenode,
- size_t expect_size, uint8_t expect_cksum,
- void *extra)
-{
- int nodenum, ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
- u32 msg_type;
- struct r2net_node *nn;
-
- node = r2nm_get_node_by_num(remotenode);
- if (node == NULL)
- goto out;
- xh->client_id = r2nm_this_node(); /* which node is getting */
- xh->xh_data_cksum = expect_cksum;
- xh->xh_data_size = expect_size;
- xh->extra = extra;
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
-
- node = r2net_target_node;
- if (!node)
- goto out;
-
- nodenum = r2net_target_nodenum;
-
- r2nm_node_get(node);
- nn = r2net_nn_from_num(nodenum);
- if (nn->nn_persistent_error || !nn->nn_sc_valid) {
- ret = -ENOTCONN;
- r2nm_node_put(node);
- goto out;
- }
-
- if (free)
- msg_type = RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST;
- else
- msg_type = RMSTR_TMEM_ASYNC_GET_REQUEST;
- ret = r2net_send_message_vec(msg_type, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- if (ret < 0) {
- if (ret == -ENOTCONN || ret == -EHOSTDOWN)
- goto out;
- if (ret == -EAGAIN)
- goto out;
- /* FIXME handle bad message possibilities here? */
- pr_err("UNTESTED ret<0 in ramster_remote_async_get: ret=%d\n",
- ret);
- }
- ret = status;
-out:
- return ret;
-}
-
-#ifdef RAMSTER_TESTING
-/* leave me here to see if it catches a weird crash */
-static void ramster_check_irq_counts(void)
-{
- static int last_hardirq_cnt, last_softirq_cnt, last_preempt_cnt;
- int cur_hardirq_cnt, cur_softirq_cnt, cur_preempt_cnt;
-
- cur_hardirq_cnt = hardirq_count() >> HARDIRQ_SHIFT;
- if (cur_hardirq_cnt > last_hardirq_cnt) {
- last_hardirq_cnt = cur_hardirq_cnt;
- if (!(last_hardirq_cnt&(last_hardirq_cnt-1)))
- pr_err("RAMSTER TESTING RRP hardirq_count=%d\n",
- last_hardirq_cnt);
- }
- cur_softirq_cnt = softirq_count() >> SOFTIRQ_SHIFT;
- if (cur_softirq_cnt > last_softirq_cnt) {
- last_softirq_cnt = cur_softirq_cnt;
- if (!(last_softirq_cnt&(last_softirq_cnt-1)))
- pr_err("RAMSTER TESTING RRP softirq_count=%d\n",
- last_softirq_cnt);
- }
- cur_preempt_cnt = preempt_count() & PREEMPT_MASK;
- if (cur_preempt_cnt > last_preempt_cnt) {
- last_preempt_cnt = cur_preempt_cnt;
- if (!(last_preempt_cnt&(last_preempt_cnt-1)))
- pr_err("RAMSTER TESTING RRP preempt_count=%d\n",
- last_preempt_cnt);
- }
-}
-#endif
-
-int r2net_remote_put(struct tmem_xhandle *xh, char *data, size_t size,
- bool ephemeral, int *remotenode)
-{
- int nodenum, ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[2];
- size_t veclen = 2;
- u32 msg_type;
- struct r2net_node *nn;
-
- BUG_ON(size > RMSTR_R2NET_MAX_LEN);
- xh->client_id = r2nm_this_node(); /* which node is putting */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- vec[1].iov_len = size;
- vec[1].iov_base = data;
-
- node = r2net_target_node;
- if (!node)
- goto out;
-
- nodenum = r2net_target_nodenum;
-
- r2nm_node_get(node);
-
- nn = r2net_nn_from_num(nodenum);
- if (nn->nn_persistent_error || !nn->nn_sc_valid) {
- ret = -ENOTCONN;
- r2nm_node_put(node);
- goto out;
- }
-
- if (ephemeral)
- msg_type = RMSTR_TMEM_PUT_EPH;
- else
- msg_type = RMSTR_TMEM_PUT_PERS;
-#ifdef RAMSTER_TESTING
- /* leave me here to see if it catches a weird crash */
- ramster_check_irq_counts();
-#endif
-
- ret = r2net_send_message_vec(msg_type, RMSTR_KEY, vec, veclen,
- nodenum, &status);
- if (ret < 0)
- ret = -1;
- else {
- ret = status;
- *remotenode = nodenum;
- }
-
- r2nm_node_put(node);
-out:
- return ret;
-}
-
-int r2net_remote_flush(struct tmem_xhandle *xh, int remotenode)
-{
- int ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
-
- node = r2nm_get_node_by_num(remotenode);
- BUG_ON(node == NULL);
- xh->client_id = r2nm_this_node(); /* which node is flushing */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- BUG_ON(irqs_disabled());
- BUG_ON(in_softirq());
- ret = r2net_send_message_vec(RMSTR_TMEM_FLUSH, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- return ret;
-}
-
-int r2net_remote_flush_object(struct tmem_xhandle *xh, int remotenode)
-{
- int ret = -1, status;
- struct r2nm_node *node = NULL;
- struct kvec vec[1];
- size_t veclen = 1;
-
- node = r2nm_get_node_by_num(remotenode);
- BUG_ON(node == NULL);
- xh->client_id = r2nm_this_node(); /* which node is flobjing */
- vec[0].iov_len = sizeof(*xh);
- vec[0].iov_base = xh;
- ret = r2net_send_message_vec(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
- vec, veclen, remotenode, &status);
- r2nm_node_put(node);
- return ret;
-}
-
-/*
- * Handler registration
- */
-
-static LIST_HEAD(r2net_unreg_list);
-
-static void r2net_unregister_handlers(void)
-{
- r2net_unregister_handler_list(&r2net_unreg_list);
-}
-
-int r2net_register_handlers(void)
-{
- int status;
-
- status = r2net_register_handler(RMSTR_TMEM_PUT_EPH, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_put_handler,
- NULL, NULL, &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_PUT_PERS, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_put_handler,
- NULL, NULL, &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REQUEST, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_request_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_AND_FREE_REQUEST,
- RMSTR_KEY, RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_request_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_ASYNC_GET_REPLY, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_async_get_reply_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_FLUSH, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_flush_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- status = r2net_register_handler(RMSTR_TMEM_FLOBJ, RMSTR_KEY,
- RMSTR_R2NET_MAX_LEN,
- ramster_remote_flobj_handler,
- NULL, NULL,
- &r2net_unreg_list);
- if (status)
- goto bail;
-
- pr_info("ramster: r2net handlers registered\n");
-
-bail:
- if (status) {
- r2net_unregister_handlers();
- pr_err("ramster: couldn't register r2net handlers\n");
- }
- return status;
-}
diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt
deleted file mode 100644
index 7b1ee3bbfdd..00000000000
--- a/drivers/staging/zcache/ramster/ramster-howto.txt
+++ /dev/null
@@ -1,366 +0,0 @@
- RAMSTER HOW-TO
-
-Author: Dan Magenheimer
-Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
-
-This is a HOWTO document for ramster which, as of this writing, is in
-the kernel as a subdirectory of zcache in drivers/staging, called ramster.
-(Zcache can be built with or without ramster functionality.) If enabled
-and properly configured, ramster allows memory capacity load balancing
-across multiple machines in a cluster. Further, the ramster code serves
-as an example of asynchronous access for zcache (as well as cleancache and
-frontswap) that may prove useful for future transcendent memory
-implementations, such as KVM and NVRAM. While ramster works today on
-any network connection that supports kernel sockets, its features may
-become more interesting on future high-speed fabrics/interconnects.
-
-Ramster requires both kernel and userland support. The userland support,
-called ramster-tools, is known to work with EL6-based distros, but is a
-set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
-includes an init file, a config file, and a userland binary that interfaces
-to the kernel. This state of userland support reflects the abysmal userland
-skills of this suitably-embarrassed author; any help/patches to turn
-ramster-tools into more distributable rpms/debs useful for a wider range
-of distros would be appreciated. The source RPM that can be used as a
-starting point is available at:
- http://oss.oracle.com/projects/tmem/files/RAMster/
-
-As a result of this author's ignorance, userland setup described in this
-HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies
-if this offends anyone!
-
-Kernel support has only been tested on x86_64. Systems with an active
-ocfs2 filesystem should work, but since ramster leverages a lot of
-code from ocfs2, there may be latent issues. A kernel configuration that
-includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
-if no ocfs2 filesystem is mounted.
-
-This HOWTO demonstrates memory capacity load balancing for a two-node
-cluster, where one node called the "local" node becomes overcommitted
-and the other node called the "remote" node provides additional RAM
-capacity for use by the local node. Ramster is capable of more complex
-topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
-
-If you find any terms in this HOWTO unfamiliar or don't understand the
-motivation for ramster, the following LWN reading is recommended:
--- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
--- The future calculus of memory management (lwn.net/Articles/475681)
-And since ramster is built on top of zcache, this article may be helpful:
--- In-kernel memory compression (lwn.net/Articles/545244)
-
-Now that you've memorized the contents of those articles, let's get started!
-
-A. PRELIMINARY
-
-1) Install two x86_64 Linux systems that are known to work when
- upgraded to a recent upstream Linux kernel version.
-
-On each system:
-
-2) Configure, build and install, then boot Linux, just to ensure it
- can be done with an unmodified upstream kernel. Confirm you booted
- the upstream kernel with "uname -a".
-
-3) If you plan to do any performance testing or unless you plan to
- test only swapping, the "WasActive" patch is also highly recommended.
- (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
- For a demo or simple testing, the patch can be ignored.
-
-4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems
- can be found at:
- http://oss.oracle.com/projects/tmem/files/RAMster/
- (Sorry but for now, non-EL6 users must recreate ramster-tools on
- their own from source. See above.)
-
-5) Ensure that debugfs is mounted at each boot. Examples below assume it
- is mounted at /sys/kernel/debug.
-
-B. BUILDING RAMSTER INTO THE KERNEL
-
-Do the following on each system:
-
-1) Using the kernel configuration mechanism of your choice, change
- your config to include:
-
- CONFIG_CLEANCACHE=y
- CONFIG_FRONTSWAP=y
- CONFIG_STAGING=y
- CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
- CONFIG_ZCACHE=y
- CONFIG_RAMSTER=y
-
- For a linux-3.10 or later kernel, you should also set:
-
- CONFIG_ZCACHE_DEBUG=y
- CONFIG_RAMSTER_DEBUG=y
-
- Before building the kernel please doublecheck your kernel config
- file to ensure all of the settings are correct.
-
-2) Build this kernel and change your boot file (e.g. /etc/grub.conf)
- so that the new kernel will boot.
-
-3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
-
-4) Reboot each system approximately simultaneously.
-
-5) Check dmesg to ensure there are some messages from ramster, prefixed
- by "ramster:"
-
- # dmesg | grep ramster
-
- You should also see a lot of files in:
-
- # ls /sys/kernel/debug/zcache
- # ls /sys/kernel/debug/ramster
-
- These are mostly counters for various zcache and ramster activities.
- You should also see files in:
-
- # ls /sys/kernel/mm/ramster
-
- These are sysfs files that control ramster as we shall see.
-
- Ramster now will act as a single-system zcache on each system
- but doesn't yet know anything about the cluster so can't yet do
- anything remotely.
-
-C. CONFIGURING THE RAMSTER CLUSTER
-
-This part can be error prone unless you are familiar with clustering
-filesystems. We need to describe the cluster in a /etc/ramster.conf
-file and the init scripts that parse it are extremely picky about
-the syntax.
-
-1) Create a /etc/ramster.conf file and ensure it is identical on both
- systems. This file mimics the ocfs2 format and there is a good amount
- of documentation that can be searched for ocfs2.conf, but you can use:
-
- cluster:
- name = ramster
- node_count = 2
- node:
- name = system1
- cluster = ramster
- number = 0
- ip_address = my.ip.ad.r1
- ip_port = 7777
- node:
- name = system2
- cluster = ramster
- number = 1
- ip_address = my.ip.ad.r2
- ip_port = 7777
-
- You must ensure that the "name" field in the file exactly matches
- the output of "hostname" on each system; if "hostname" shows a
- fully-qualified hostname, ensure the name is fully qualified in
- /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper
- ip addresses.
-
-2) Enable the ramster service and configure it. If you used the
- EL6 ramster-tools, this would be:
-
- # chkconfig --add ramster
- # service ramster configure
-
- Set "load on boot" to "y", cluster to start is "ramster" (or whatever
- name you chose in ramster.conf), heartbeat dead threshold as "500",
- network idle timeout as "1000000". Leave the others as default.
-
-3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools):
-
- # service ramster status
-
- You should see "Checking RAMSTER cluster "ramster": Online". If you do
- not, something is wrong and ramster will not work. Note that you
- should also see that the driver for "configfs" is loaded and mounted,
- the driver for ocfs2_dlmfs is not loaded, and some numbers for network
- parameters. You will also see "Checking RAMSTER heartbeat: Not active".
- That's all OK.
-
-4) Now you need to start the cluster heartbeat; the cluster is not "up"
- until all nodes detect a heartbeat. In a real cluster, heartbeat detection
- is done via a cluster filesystem, but ramster doesn't require one. Some
- hack-y kernel code in ramster can start the heartbeat for you though if
- you tell it what nodes are "up". To enable the heartbeat, do:
-
- # echo 0 > /sys/kernel/mm/ramster/manual_node_up
- # echo 1 > /sys/kernel/mm/ramster/manual_node_up
-
- This must be done on BOTH nodes and, to avoid timeouts, must be done
- approximately concurrently on both nodes. On an EL6 system, it is
- convenient to put these lines in /etc/rc.local. To confirm that the
- cluster is now up, on both systems do:
-
- # dmesg | grep ramster
-
- You should see ramster "Accepted connection" messages in dmesg on both
- nodes after this. Note that if you check userland status again with
-
- # service ramster status
-
- you will still see "Checking RAMSTER heartbeat: Not active". That's
- still OK... the ramster kernel heartbeat hack doesn't communicate to
- userland.
-
-5) You now must tell each node the node to which it should "remotify" pages.
- On this two node cluster, we will assume the "local" node, node 0, has
- memory overcommitted and will use ramster to utilize RAM capacity on
- the "remote node", node 1. To configure this, on node 0, you do:
-
- # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
-
- You should see "ramster: node 1 set as remotification target" in dmesg
- on node 0. Again, on EL6, /etc/rc.local is a good place to put this
- on node 0 so you don't forget to do it at each boot.
-
-6) One more step: By default, the ramster code does not "remotify" any
- pages; this is primarily for testing purposes, but sometimes it is
- useful. This may change in the future, but for now, on node 0, you do:
-
- # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
- # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
-
- The first enables remotifying swap (persistent, aka frontswap) pages,
- the second enables remotifying of page cache (ephemeral, cleancache)
- pages.
-
- On EL6, these lines can also be put in /etc/rc.local (AFTER the
- node_up lines), or at the beginning of a script that runs a workload.
-
-7) Note that most testing has been done with both/all machines booted
- roughly simultaneously to avoid cluster timeouts. Ideally, you should
- do this too unless you are trying to break ramster rather than just
- use it. ;-)
-
-D. TESTING RAMSTER
-
-1) Note that ramster has no value unless pages get "remotified". For
- swap/frontswap/persistent pages, this doesn't happen unless/until
- the workload would cause swapping to occur, at which point pages
- are put into frontswap/zcache, and the remotification thread starts
- working. To get to the point where the system swaps, you either
- need a workload for which the working set exceeds the RAM in the
- system; or you need to somehow reduce the amount of RAM one of
- the system sees. This latter is easy when testing in a VM, but
- harder on physical systems. In some cases, "mem=xxxM" on the
- kernel command line restricts memory, but for some values of xxx
- the kernel may fail to boot. One may also try creating a fixed
- RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
- amount of RAM.
-
-2) To see if ramster is working, on the "remote node", node 1, try:
-
- # grep . /sys/kernel/debug/ramster/foreign_*
- # # note, that is space-dot-space between grep and the pathname
-
- to monitor the number (and max) ephemeral and persistent pages
- that ramster has sent. If these stay at zero, ramster is not working
- either because the workload on the local node (node 0) isn't creating
- enough memory pressure or because "remotifying" isn't working. On the
- local system, node 0, you can watch lots of useful information also.
- Try:
-
- grep . /sys/kernel/debug/zcache/*pageframes* \
- /sys/kernel/debug/zcache/*zbytes* \
- /sys/kernel/debug/zcache/*zpages* \
- /sys/kernel/debug/ramster/*remote*
-
- Of particular note are the remote_*_pages_succ_get counters. These
- show how many disk reads and/or disk writes have been avoided on the
- overcommitted local system by storing pages remotely using ramster.
-
- At the risk of information overload, you can also grep:
-
- /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
-
- These show, for example, how many disk reads and/or disk writes have
- been avoided by using zcache to optimize RAM on the local system.
-
-
-AUTOMATIC SWAP REPATRIATION
-
-You may notice that while the systems are idle, the foreign persistent
-page count on the remote machine slowly decreases. This is because
-ramster implements "frontswap selfshrinking": When possible, swap
-pages that have been remotified are slowly repatriated to the local
-machine. This is so that local RAM can be used when possible and
-so that, in case of remote machine crash, the probability of loss
-of data is reduced.
-
-REBOOTING / POWEROFF
-
-If a system is shut down while some of its swap pages still reside
-on a remote system, the system may lock up during the shutdown
-sequence. This will occur if the network is shut down before the
-swap mechansim is shut down, which is the default ordering on many
-distros. To avoid this annoying problem, simply shut off the swap
-subsystem before starting the shutdown sequence, e.g.:
-
- # swapoff -a
- # reboot
-
-Ideally, this swapoff-before-ifdown ordering should be enforced permanently
-using shutdown scripts.
-
-KNOWN PROBLEMS
-
-1) You may periodically see messages such as:
-
- ramster_r2net, message length problem
-
- This is harmless but indicates that a node is sending messages
- containing compressed pages that exceed the maximum for zcache
- (PAGE_SIZE*15/16). The sender side needs to be fixed.
-
-2) If you see a "No longer connected to node..." message or a "No connection
- established with node X after N seconds", it is possible you may
- be in an unrecoverable state. If you are certain all of the
- appropriate cluster configuration steps described above have been
- performed, try rebooting the two servers concurrently to see if
- the cluster starts.
-
- Note that "Connection to node... shutdown, state 7" is an intermediate
- connection state. As long as you later see "Accepted connection", the
- intermediate states are harmless.
-
-3) There are known issues in counting certain values. As a result
- you may see periodic warnings from the kernel. Almost always you
- will see "ramster: bad accounting for XXX". There are also "WARN_ONCE"
- messages. If you see kernel warnings with a tombstone, please report
- them. They are harmless but reflect bugs that need to be eventually fixed.
-
-ADVANCED RAMSTER TOPOLOGIES
-
-The kernel code for ramster can support up to eight nodes in a cluster,
-but no testing has been done with more than three nodes.
-
-In the example described above, the "remote" node serves as a RAM
-overflow for the "local" node. This can be made symmetric by appropriate
-settings of the sysfs remote_target_nodenum file. For example, by setting:
-
- # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 0, and
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 1, each node can serve as a RAM overflow for the other.
-
-For more than two nodes, a "RAM server" can be configured. For a
-three node system, set:
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 1, and
-
- # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
-
-on node 2. Then node 0 is a RAM server for node 1 and node 2.
-
-In this implementation of ramster, any remote node is potentially a single
-point of failure (SPOF). Though the probability of failure is reduced
-by automatic swap repatriation (see above), a proposed future enhancement
-to ramster improves high-availability for the cluster by sending a copy
-of each page of date to two other nodes. Patches welcome!
diff --git a/drivers/staging/zcache/ramster/ramster.c b/drivers/staging/zcache/ramster/ramster.c
deleted file mode 100644
index a937ce1fa27..00000000000
--- a/drivers/staging/zcache/ramster/ramster.c
+++ /dev/null
@@ -1,925 +0,0 @@
-/*
- * ramster.c
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- * RAMster implements peer-to-peer transcendent memory, allowing a "cluster" of
- * kernels to dynamically pool their RAM so that a RAM-hungry workload on one
- * machine can temporarily and transparently utilize RAM on another machine
- * which is presumably idle or running a non-RAM-hungry workload.
- *
- * RAMster combines a clustering and messaging foundation based on the ocfs2
- * cluster layer with the in-kernel compression implementation of zcache, and
- * adds code to glue them together. When a page is "put" to RAMster, it is
- * compressed and stored locally. Periodically, a thread will "remotify" these
- * pages by sending them via messages to a remote machine. When the page is
- * later needed as indicated by a page fault, a "get" is issued. If the data
- * is local, it is uncompressed and the fault is resolved. If the data is
- * remote, a message is sent to fetch the data and the faulting thread sleeps;
- * when the data arrives, the thread awakens, the data is decompressed and
- * the fault is resolved.
-
- * As of V5, clusters up to eight nodes are supported; each node can remotify
- * pages to one specified node, so clusters can be configured as clients to
- * a "memory server". Some simple policy is in place that will need to be
- * refined over time. Larger clusters and fault-resistant protocols can also
- * be added over time.
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/lzo.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/atomic.h>
-#include <linux/frontswap.h>
-#include "../tmem.h"
-#include "../zcache.h"
-#include "../zbud.h"
-#include "ramster.h"
-#include "ramster_nodemanager.h"
-#include "tcp.h"
-#include "debug.h"
-
-#define RAMSTER_TESTING
-
-#ifndef CONFIG_SYSFS
-#error "ramster needs sysfs to define cluster nodes to use"
-#endif
-
-static bool use_cleancache __read_mostly;
-static bool use_frontswap __read_mostly;
-static bool use_frontswap_exclusive_gets __read_mostly;
-
-/* These must be sysfs not debugfs as they are checked/used by userland!! */
-static unsigned long ramster_interface_revision __read_mostly =
- R2NM_API_VERSION; /* interface revision must match userspace! */
-static unsigned long ramster_pers_remotify_enable __read_mostly;
-static unsigned long ramster_eph_remotify_enable __read_mostly;
-static atomic_t ramster_remote_pers_pages = ATOMIC_INIT(0);
-#define MANUAL_NODES 8
-static bool ramster_nodes_manual_up[MANUAL_NODES] __read_mostly;
-static int ramster_remote_target_nodenum __read_mostly = -1;
-
-/* Used by this code. */
-long ramster_flnodes;
-/* FIXME frontswap selfshrinking knobs in debugfs? */
-
-static LIST_HEAD(ramster_rem_op_list);
-static DEFINE_SPINLOCK(ramster_rem_op_list_lock);
-static DEFINE_PER_CPU(struct ramster_preload, ramster_preloads);
-
-static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem1);
-static DEFINE_PER_CPU(unsigned char *, ramster_remoteputmem2);
-
-static struct kmem_cache *ramster_flnode_cache __read_mostly;
-
-static struct flushlist_node *ramster_flnode_alloc(struct tmem_pool *pool)
-{
- struct flushlist_node *flnode = NULL;
- struct ramster_preload *kp;
-
- kp = &__get_cpu_var(ramster_preloads);
- flnode = kp->flnode;
- BUG_ON(flnode == NULL);
- kp->flnode = NULL;
- inc_ramster_flnodes();
- return flnode;
-}
-
-/* the "flush list" asynchronously collects pages to remotely flush */
-#define FLUSH_ENTIRE_OBJECT ((uint32_t)-1)
-static void ramster_flnode_free(struct flushlist_node *flnode,
- struct tmem_pool *pool)
-{
- dec_ramster_flnodes();
- BUG_ON(ramster_flnodes < 0);
- kmem_cache_free(ramster_flnode_cache, flnode);
-}
-
-int ramster_do_preload_flnode(struct tmem_pool *pool)
-{
- struct ramster_preload *kp;
- struct flushlist_node *flnode;
- int ret = -ENOMEM;
-
- BUG_ON(!irqs_disabled());
- if (unlikely(ramster_flnode_cache == NULL))
- BUG();
- kp = &__get_cpu_var(ramster_preloads);
- flnode = kmem_cache_alloc(ramster_flnode_cache, GFP_ATOMIC);
- if (unlikely(flnode == NULL) && kp->flnode == NULL)
- BUG(); /* FIXME handle more gracefully, but how??? */
- else if (kp->flnode == NULL)
- kp->flnode = flnode;
- else
- kmem_cache_free(ramster_flnode_cache, flnode);
- return ret;
-}
-EXPORT_SYMBOL_GPL(ramster_do_preload_flnode);
-
-/*
- * Called by the message handler after a (still compressed) page has been
- * fetched from the remote machine in response to an "is_remote" tmem_get
- * or persistent tmem_localify. For a tmem_get, "extra" is the address of
- * the page that is to be filled to successfully resolve the tmem_get; for
- * a (persistent) tmem_localify, "extra" is NULL (as the data is placed only
- * in the local zcache). "data" points to "size" bytes of (compressed) data
- * passed in the message. In the case of a persistent remote get, if
- * pre-allocation was successful (see ramster_repatriate_preload), the page
- * is placed into both local zcache and at "extra".
- */
-int ramster_localify(int pool_id, struct tmem_oid *oidp, uint32_t index,
- char *data, unsigned int size, void *extra)
-{
- int ret = -ENOENT;
- unsigned long flags;
- struct tmem_pool *pool;
- bool eph, delete = false;
- void *pampd, *saved_hb;
- struct tmem_obj *obj;
-
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, pool_id);
- if (unlikely(pool == NULL))
- /* pool doesn't exist anymore */
- goto out;
- eph = is_ephemeral(pool);
- local_irq_save(flags); /* FIXME: maybe only disable softirqs? */
- pampd = tmem_localify_get_pampd(pool, oidp, index, &obj, &saved_hb);
- if (pampd == NULL) {
- /* hmmm... must have been a flush while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED pampd==NULL in ramster_localify\n");
-#endif
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- inc_ramster_remote_pers_pages_unsucc_get();
- obj = NULL;
- goto finish;
- } else if (unlikely(!pampd_is_remote(pampd))) {
- /* hmmm... must have been a dup put while waiting */
-#ifdef RAMSTER_TESTING
- pr_err("UNTESTED dup while waiting in ramster_localify\n");
-#endif
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- inc_ramster_remote_pers_pages_unsucc_get();
- obj = NULL;
- pampd = NULL;
- ret = -EEXIST;
- goto finish;
- } else if (size == 0) {
- /* no remote data, delete the local is_remote pampd */
- pampd = NULL;
- if (eph)
- inc_ramster_remote_eph_pages_unsucc_get();
- else
- BUG();
- delete = true;
- goto finish;
- }
- if (pampd_is_intransit(pampd)) {
- /*
- * a pampd is marked intransit if it is remote and space has
- * been allocated for it locally (note, only happens for
- * persistent pages, in which case the remote copy is freed)
- */
- BUG_ON(eph);
- pampd = pampd_mask_intransit_and_remote(pampd);
- zbud_copy_to_zbud(pampd, data, size);
- } else {
- /*
- * setting pampd to NULL tells tmem_localify_finish to leave
- * pampd alone... meaning it is left pointing to the
- * remote copy
- */
- pampd = NULL;
- obj = NULL;
- }
- /*
- * but in all cases, we decompress direct-to-memory to complete
- * the remotify and return success
- */
- BUG_ON(extra == NULL);
- zcache_decompress_to_page(data, size, (struct page *)extra);
- if (eph)
- inc_ramster_remote_eph_pages_succ_get();
- else
- inc_ramster_remote_pers_pages_succ_get();
- ret = 0;
-finish:
- tmem_localify_finish(obj, index, pampd, saved_hb, delete);
- zcache_put_pool(pool);
- local_irq_restore(flags);
-out:
- return ret;
-}
-
-void ramster_pampd_new_obj(struct tmem_obj *obj)
-{
- obj->extra = NULL;
-}
-
-void ramster_pampd_free_obj(struct tmem_pool *pool, struct tmem_obj *obj,
- bool pool_destroy)
-{
- struct flushlist_node *flnode;
-
- BUG_ON(preemptible());
- if (obj->extra == NULL)
- return;
- if (pool_destroy && is_ephemeral(pool))
- /* FIXME don't bother with remote eph data for now */
- return;
- BUG_ON(!pampd_is_remote(obj->extra));
- flnode = ramster_flnode_alloc(pool);
- flnode->xh.client_id = pampd_remote_node(obj->extra);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = obj->oid;
- flnode->xh.index = FLUSH_ENTIRE_OBJECT;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_OBJ;
- spin_lock(&ramster_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &ramster_rem_op_list);
- spin_unlock(&ramster_rem_op_list_lock);
-}
-
-/*
- * Called on a remote persistent tmem_get to attempt to preallocate
- * local storage for the data contained in the remote persistent page.
- * If successfully preallocated, returns the pampd, marked as remote and
- * in_transit. Else returns NULL. Note that the appropriate tmem data
- * structure must be locked.
- */
-void *ramster_pampd_repatriate_preload(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oidp, uint32_t index,
- bool *intransit)
-{
- int clen = pampd_remote_size(pampd), c;
- void *ret_pampd = NULL;
- unsigned long flags;
- struct tmem_handle th;
-
- BUG_ON(!pampd_is_remote(pampd));
- BUG_ON(is_ephemeral(pool));
- if (use_frontswap_exclusive_gets)
- /* don't need local storage */
- goto out;
- if (pampd_is_intransit(pampd)) {
- /*
- * to avoid multiple allocations (and maybe a memory leak)
- * don't preallocate if already in the process of being
- * repatriated
- */
- *intransit = true;
- goto out;
- }
- *intransit = false;
- local_irq_save(flags);
- th.client_id = pampd_remote_node(pampd);
- th.pool_id = pool->pool_id;
- th.oid = *oidp;
- th.index = index;
- ret_pampd = zcache_pampd_create(NULL, clen, true, false, &th);
- if (ret_pampd != NULL) {
- /*
- * a pampd is marked intransit if it is remote and space has
- * been allocated for it locally (note, only happens for
- * persistent pages, in which case the remote copy is freed)
- */
- ret_pampd = pampd_mark_intransit(ret_pampd);
- c = atomic_dec_return(&ramster_remote_pers_pages);
- WARN_ON_ONCE(c < 0);
- } else {
- inc_ramster_pers_pages_remote_nomem();
- }
- local_irq_restore(flags);
-out:
- return ret_pampd;
-}
-
-/*
- * Called on a remote tmem_get to invoke a message to fetch the page.
- * Might sleep so no tmem locks can be held. "extra" is passed
- * all the way through the round-trip messaging to ramster_localify.
- */
-int ramster_pampd_repatriate(void *fake_pampd, void *real_pampd,
- struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index,
- bool free, void *extra)
-{
- struct tmem_xhandle xh;
- int ret;
-
- if (pampd_is_intransit(real_pampd))
- /* have local space pre-reserved, so free remote copy */
- free = true;
- xh = tmem_xhandle_fill(LOCAL_CLIENT, pool, oid, index);
- /* unreliable request/response for now */
- ret = r2net_remote_async_get(&xh, free,
- pampd_remote_node(fake_pampd),
- pampd_remote_size(fake_pampd),
- pampd_remote_cksum(fake_pampd),
- extra);
- return ret;
-}
-
-bool ramster_pampd_is_remote(void *pampd)
-{
- return pampd_is_remote(pampd);
-}
-
-int ramster_pampd_replace_in_obj(void *new_pampd, struct tmem_obj *obj)
-{
- int ret = -1;
-
- if (new_pampd != NULL) {
- if (obj->extra == NULL)
- obj->extra = new_pampd;
- /* enforce that all remote pages in an object reside
- * in the same node! */
- else if (pampd_remote_node(new_pampd) !=
- pampd_remote_node((void *)(obj->extra)))
- BUG();
- ret = 0;
- }
- return ret;
-}
-
-void *ramster_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index, bool acct)
-{
- bool eph = is_ephemeral(pool);
- void *local_pampd = NULL;
- int c;
-
- BUG_ON(preemptible());
- BUG_ON(!pampd_is_remote(pampd));
- WARN_ON(acct == false);
- if (oid == NULL) {
- /*
- * a NULL oid means to ignore this pampd free
- * as the remote freeing will be handled elsewhere
- */
- } else if (eph) {
- /* FIXME remote flush optional but probably good idea */
- } else if (pampd_is_intransit(pampd)) {
- /* did a pers remote get_and_free, so just free local */
- local_pampd = pampd_mask_intransit_and_remote(pampd);
- } else {
- struct flushlist_node *flnode =
- ramster_flnode_alloc(pool);
-
- flnode->xh.client_id = pampd_remote_node(pampd);
- flnode->xh.pool_id = pool->pool_id;
- flnode->xh.oid = *oid;
- flnode->xh.index = index;
- flnode->rem_op.op = RAMSTER_REMOTIFY_FLUSH_PAGE;
- spin_lock(&ramster_rem_op_list_lock);
- list_add(&flnode->rem_op.list, &ramster_rem_op_list);
- spin_unlock(&ramster_rem_op_list_lock);
- c = atomic_dec_return(&ramster_remote_pers_pages);
- WARN_ON_ONCE(c < 0);
- }
- return local_pampd;
-}
-EXPORT_SYMBOL_GPL(ramster_pampd_free);
-
-void ramster_count_foreign_pages(bool eph, int count)
-{
- BUG_ON(count != 1 && count != -1);
- if (eph) {
- if (count > 0) {
- inc_ramster_foreign_eph_pages();
- } else {
- dec_ramster_foreign_eph_pages();
-#ifdef CONFIG_RAMSTER_DEBUG
- WARN_ON_ONCE(ramster_foreign_eph_pages < 0);
-#endif
- }
- } else {
- if (count > 0) {
- inc_ramster_foreign_pers_pages();
- } else {
- dec_ramster_foreign_pers_pages();
-#ifdef CONFIG_RAMSTER_DEBUG
- WARN_ON_ONCE(ramster_foreign_pers_pages < 0);
-#endif
- }
- }
-}
-EXPORT_SYMBOL_GPL(ramster_count_foreign_pages);
-
-/*
- * For now, just push over a few pages every few seconds to
- * ensure that it basically works
- */
-static struct workqueue_struct *ramster_remotify_workqueue;
-static void ramster_remotify_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(ramster_remotify_worker,
- ramster_remotify_process);
-
-static void ramster_remotify_queue_delayed_work(unsigned long delay)
-{
- if (!queue_delayed_work(ramster_remotify_workqueue,
- &ramster_remotify_worker, delay))
- pr_err("ramster_remotify: bad workqueue\n");
-}
-
-static void ramster_remote_flush_page(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = r2net_remote_flush(xh, remotenode);
- if (ret >= 0)
- inc_ramster_remote_pages_flushed();
- else
- inc_ramster_remote_page_flushes_failed();
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-static void ramster_remote_flush_object(struct flushlist_node *flnode)
-{
- struct tmem_xhandle *xh;
- int remotenode, ret;
-
- preempt_disable();
- xh = &flnode->xh;
- remotenode = flnode->xh.client_id;
- ret = r2net_remote_flush_object(xh, remotenode);
- if (ret >= 0)
- inc_ramster_remote_objects_flushed();
- else
- inc_ramster_remote_object_flushes_failed();
- preempt_enable_no_resched();
- ramster_flnode_free(flnode, NULL);
-}
-
-int ramster_remotify_pageframe(bool eph)
-{
- struct tmem_xhandle xh;
- unsigned int size;
- int remotenode, ret, zbuds;
- struct tmem_pool *pool;
- unsigned long flags;
- unsigned char cksum;
- char *p;
- int i, j;
- unsigned char *tmpmem[2];
- struct tmem_handle th[2];
- unsigned int zsize[2];
-
- tmpmem[0] = __get_cpu_var(ramster_remoteputmem1);
- tmpmem[1] = __get_cpu_var(ramster_remoteputmem2);
- local_bh_disable();
- zbuds = zbud_make_zombie_lru(&th[0], &tmpmem[0], &zsize[0], eph);
- /* now OK to release lock set in caller */
- local_bh_enable();
- if (zbuds == 0)
- goto out;
- BUG_ON(zbuds > 2);
- for (i = 0; i < zbuds; i++) {
- xh.client_id = th[i].client_id;
- xh.pool_id = th[i].pool_id;
- xh.oid = th[i].oid;
- xh.index = th[i].index;
- size = zsize[i];
- BUG_ON(size == 0 || size > zbud_max_buddy_size());
- for (p = tmpmem[i], cksum = 0, j = 0; j < size; j++)
- cksum += *p++;
- ret = r2net_remote_put(&xh, tmpmem[i], size, eph, &remotenode);
- if (ret != 0) {
- /*
- * This is some form of a memory leak... if the remote put
- * fails, there will never be another attempt to remotify
- * this page. But since we've dropped the zv pointer,
- * the page may have been freed or the data replaced
- * so we can't just "put it back" in the remote op list.
- * Even if we could, not sure where to put it in the list
- * because there may be flushes that must be strictly
- * ordered vs the put. So leave this as a FIXME for now.
- * But count them so we know if it becomes a problem.
- */
- if (eph)
- inc_ramster_eph_pages_remote_failed();
- else
- inc_ramster_pers_pages_remote_failed();
- break;
- } else {
- if (!eph)
- atomic_inc(&ramster_remote_pers_pages);
- }
- if (eph)
- inc_ramster_eph_pages_remoted();
- else
- inc_ramster_pers_pages_remoted();
- /*
- * data was successfully remoted so change the local version to
- * point to the remote node where it landed
- */
- local_bh_disable();
- pool = zcache_get_pool_by_id(LOCAL_CLIENT, xh.pool_id);
- local_irq_save(flags);
- (void)tmem_replace(pool, &xh.oid, xh.index,
- pampd_make_remote(remotenode, size, cksum));
- local_irq_restore(flags);
- zcache_put_pool(pool);
- local_bh_enable();
- }
-out:
- return zbuds;
-}
-
-static void zcache_do_remotify_flushes(void)
-{
- struct ramster_remotify_hdr *rem_op;
- union remotify_list_node *u;
-
- while (1) {
- spin_lock(&ramster_rem_op_list_lock);
- if (list_empty(&ramster_rem_op_list)) {
- spin_unlock(&ramster_rem_op_list_lock);
- goto out;
- }
- rem_op = list_first_entry(&ramster_rem_op_list,
- struct ramster_remotify_hdr, list);
- list_del_init(&rem_op->list);
- spin_unlock(&ramster_rem_op_list_lock);
- u = (union remotify_list_node *)rem_op;
- switch (rem_op->op) {
- case RAMSTER_REMOTIFY_FLUSH_PAGE:
- ramster_remote_flush_page((struct flushlist_node *)u);
- break;
- case RAMSTER_REMOTIFY_FLUSH_OBJ:
- ramster_remote_flush_object((struct flushlist_node *)u);
- break;
- default:
- BUG();
- }
- }
-out:
- return;
-}
-
-static void ramster_remotify_process(struct work_struct *work)
-{
- static bool remotify_in_progress;
- int i;
-
- BUG_ON(irqs_disabled());
- if (remotify_in_progress)
- goto requeue;
- if (ramster_remote_target_nodenum == -1)
- goto requeue;
- remotify_in_progress = true;
- if (use_cleancache && ramster_eph_remotify_enable) {
- for (i = 0; i < 100; i++) {
- zcache_do_remotify_flushes();
- (void)ramster_remotify_pageframe(true);
- }
- }
- if (use_frontswap && ramster_pers_remotify_enable) {
- for (i = 0; i < 100; i++) {
- zcache_do_remotify_flushes();
- (void)ramster_remotify_pageframe(false);
- }
- }
- remotify_in_progress = false;
-requeue:
- ramster_remotify_queue_delayed_work(HZ);
-}
-
-void ramster_remotify_init(void)
-{
- unsigned long n = 60UL;
- ramster_remotify_workqueue =
- create_singlethread_workqueue("ramster_remotify");
- ramster_remotify_queue_delayed_work(n * HZ);
-}
-
-static ssize_t ramster_manual_node_up_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int i;
- char *p = buf;
- for (i = 0; i < MANUAL_NODES; i++)
- if (ramster_nodes_manual_up[i])
- p += sprintf(p, "%d ", i);
- p += sprintf(p, "\n");
- return p - buf;
-}
-
-static ssize_t ramster_manual_node_up_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int err;
- unsigned long node_num;
-
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- }
- if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- }
- if (ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d already up, ignoring\n",
- (int)node_num);
- } else {
- ramster_nodes_manual_up[node_num] = true;
- r2net_hb_node_up_manual((int)node_num);
- }
- return count;
-}
-
-static struct kobj_attribute ramster_manual_node_up_attr = {
- .attr = { .name = "manual_node_up", .mode = 0644 },
- .show = ramster_manual_node_up_show,
- .store = ramster_manual_node_up_store,
-};
-
-static ssize_t ramster_remote_target_nodenum_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- if (ramster_remote_target_nodenum == -1UL)
- return sprintf(buf, "unset\n");
- else
- return sprintf(buf, "%d\n", ramster_remote_target_nodenum);
-}
-
-static ssize_t ramster_remote_target_nodenum_store(struct kobject *kobj,
- struct kobj_attribute *attr, const char *buf, size_t count)
-{
- int err;
- unsigned long node_num;
-
- err = kstrtoul(buf, 10, &node_num);
- if (err) {
- pr_err("ramster: bad strtoul?\n");
- return -EINVAL;
- } else if (node_num == -1UL) {
- pr_err("ramster: disabling all remotification, "
- "data may still reside on remote nodes however\n");
- return -EINVAL;
- } else if (node_num >= MANUAL_NODES) {
- pr_err("ramster: bad node_num=%lu?\n", node_num);
- return -EINVAL;
- } else if (!ramster_nodes_manual_up[node_num]) {
- pr_err("ramster: node %d not up, ignoring setting "
- "of remotification target\n", (int)node_num);
- } else if (r2net_remote_target_node_set((int)node_num) >= 0) {
- pr_info("ramster: node %d set as remotification target\n",
- (int)node_num);
- ramster_remote_target_nodenum = (int)node_num;
- } else {
- pr_err("ramster: bad num to node node_num=%d?\n",
- (int)node_num);
- return -EINVAL;
- }
- return count;
-}
-
-static struct kobj_attribute ramster_remote_target_nodenum_attr = {
- .attr = { .name = "remote_target_nodenum", .mode = 0644 },
- .show = ramster_remote_target_nodenum_show,
- .store = ramster_remote_target_nodenum_store,
-};
-
-#define RAMSTER_SYSFS_RO(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-#define RAMSTER_SYSFS_RW(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", ramster_##_name); \
- } \
- static ssize_t ramster_##_name##_store(struct kobject *kobj, \
- struct kobj_attribute *attr, const char *buf, size_t count) \
- { \
- int err; \
- unsigned long enable; \
- err = kstrtoul(buf, 10, &enable); \
- if (err) \
- return -EINVAL; \
- ramster_##_name = enable; \
- return count; \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0644 }, \
- .show = ramster_##_name##_show, \
- .store = ramster_##_name##_store, \
- }
-
-#define RAMSTER_SYSFS_RO_ATOMIC(_name) \
- static ssize_t ramster_##_name##_show(struct kobject *kobj, \
- struct kobj_attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%d\n", atomic_read(&ramster_##_name)); \
- } \
- static struct kobj_attribute ramster_##_name##_attr = { \
- .attr = { .name = __stringify(_name), .mode = 0444 }, \
- .show = ramster_##_name##_show, \
- }
-
-RAMSTER_SYSFS_RO(interface_revision);
-RAMSTER_SYSFS_RO_ATOMIC(remote_pers_pages);
-RAMSTER_SYSFS_RW(pers_remotify_enable);
-RAMSTER_SYSFS_RW(eph_remotify_enable);
-
-static struct attribute *ramster_attrs[] = {
- &ramster_interface_revision_attr.attr,
- &ramster_remote_pers_pages_attr.attr,
- &ramster_manual_node_up_attr.attr,
- &ramster_remote_target_nodenum_attr.attr,
- &ramster_pers_remotify_enable_attr.attr,
- &ramster_eph_remotify_enable_attr.attr,
- NULL,
-};
-
-static struct attribute_group ramster_attr_group = {
- .attrs = ramster_attrs,
- .name = "ramster",
-};
-
-/*
- * frontswap selfshrinking
- */
-
-/* In HZ, controls frequency of worker invocation. */
-static unsigned int selfshrink_interval __read_mostly = 5;
-/* Enable/disable with sysfs. */
-static bool frontswap_selfshrinking __read_mostly;
-
-static void selfshrink_process(struct work_struct *work);
-static DECLARE_DELAYED_WORK(selfshrink_worker, selfshrink_process);
-
-#ifndef CONFIG_RAMSTER_MODULE
-/* Enable/disable with kernel boot option. */
-static bool use_frontswap_selfshrink = true;
-#endif
-
-/*
- * The default values for the following parameters were deemed reasonable
- * by experimentation, may be workload-dependent, and can all be
- * adjusted via sysfs.
- */
-
-/* Control rate for frontswap shrinking. Higher hysteresis is slower. */
-static unsigned int frontswap_hysteresis __read_mostly = 20;
-
-/*
- * Number of selfshrink worker invocations to wait before observing that
- * frontswap selfshrinking should commence. Note that selfshrinking does
- * not use a separate worker thread.
- */
-static unsigned int frontswap_inertia __read_mostly = 3;
-
-/* Countdown to next invocation of frontswap_shrink() */
-static unsigned long frontswap_inertia_counter;
-
-/*
- * Invoked by the selfshrink worker thread, uses current number of pages
- * in frontswap (frontswap_curr_pages()), previous status, and control
- * values (hysteresis and inertia) to determine if frontswap should be
- * shrunk and what the new frontswap size should be. Note that
- * frontswap_shrink is essentially a partial swapoff that immediately
- * transfers pages from the "swap device" (frontswap) back into kernel
- * RAM; despite the name, frontswap "shrinking" is very different from
- * the "shrinker" interface used by the kernel MM subsystem to reclaim
- * memory.
- */
-static void frontswap_selfshrink(void)
-{
- static unsigned long cur_frontswap_pages;
- static unsigned long last_frontswap_pages;
- static unsigned long tgt_frontswap_pages;
-
- last_frontswap_pages = cur_frontswap_pages;
- cur_frontswap_pages = frontswap_curr_pages();
- if (!cur_frontswap_pages ||
- (cur_frontswap_pages > last_frontswap_pages)) {
- frontswap_inertia_counter = frontswap_inertia;
- return;
- }
- if (frontswap_inertia_counter && --frontswap_inertia_counter)
- return;
- if (cur_frontswap_pages <= frontswap_hysteresis)
- tgt_frontswap_pages = 0;
- else
- tgt_frontswap_pages = cur_frontswap_pages -
- (cur_frontswap_pages / frontswap_hysteresis);
- frontswap_shrink(tgt_frontswap_pages);
-}
-
-#ifndef CONFIG_RAMSTER_MODULE
-static int __init ramster_nofrontswap_selfshrink_setup(char *s)
-{
- use_frontswap_selfshrink = false;
- return 1;
-}
-
-__setup("noselfshrink", ramster_nofrontswap_selfshrink_setup);
-#endif
-
-static void selfshrink_process(struct work_struct *work)
-{
- if (frontswap_selfshrinking && frontswap_enabled) {
- frontswap_selfshrink();
- schedule_delayed_work(&selfshrink_worker,
- selfshrink_interval * HZ);
- }
-}
-
-void ramster_cpu_up(int cpu)
-{
- unsigned char *p1 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
- unsigned char *p2 = kzalloc(PAGE_SIZE, GFP_KERNEL | __GFP_REPEAT);
- BUG_ON(!p1 || !p2);
- per_cpu(ramster_remoteputmem1, cpu) = p1;
- per_cpu(ramster_remoteputmem2, cpu) = p2;
-}
-EXPORT_SYMBOL_GPL(ramster_cpu_up);
-
-void ramster_cpu_down(int cpu)
-{
- struct ramster_preload *kp;
-
- kfree(per_cpu(ramster_remoteputmem1, cpu));
- per_cpu(ramster_remoteputmem1, cpu) = NULL;
- kfree(per_cpu(ramster_remoteputmem2, cpu));
- per_cpu(ramster_remoteputmem2, cpu) = NULL;
- kp = &per_cpu(ramster_preloads, cpu);
- if (kp->flnode) {
- kmem_cache_free(ramster_flnode_cache, kp->flnode);
- kp->flnode = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(ramster_cpu_down);
-
-void ramster_register_pamops(struct tmem_pamops *pamops)
-{
- pamops->free_obj = ramster_pampd_free_obj;
- pamops->new_obj = ramster_pampd_new_obj;
- pamops->replace_in_obj = ramster_pampd_replace_in_obj;
- pamops->is_remote = ramster_pampd_is_remote;
- pamops->repatriate = ramster_pampd_repatriate;
- pamops->repatriate_preload = ramster_pampd_repatriate_preload;
-}
-EXPORT_SYMBOL_GPL(ramster_register_pamops);
-
-void ramster_init(bool cleancache, bool frontswap,
- bool frontswap_exclusive_gets,
- bool frontswap_selfshrink)
-{
- int ret = 0;
-
- if (cleancache)
- use_cleancache = true;
- if (frontswap)
- use_frontswap = true;
- if (frontswap_exclusive_gets)
- use_frontswap_exclusive_gets = true;
- ramster_debugfs_init();
- ret = sysfs_create_group(mm_kobj, &ramster_attr_group);
- if (ret)
- pr_err("ramster: can't create sysfs for ramster\n");
- (void)r2net_register_handlers();
-#ifdef CONFIG_RAMSTER_MODULE
- ret = r2nm_init();
- if (ret)
- pr_err("ramster: can't init r2net\n");
- frontswap_selfshrinking = frontswap_selfshrink;
-#else
- frontswap_selfshrinking = use_frontswap_selfshrink;
-#endif
- INIT_LIST_HEAD(&ramster_rem_op_list);
- ramster_flnode_cache = kmem_cache_create("ramster_flnode",
- sizeof(struct flushlist_node), 0, 0, NULL);
- if (frontswap_selfshrinking) {
- pr_info("ramster: Initializing frontswap selfshrink driver.\n");
- schedule_delayed_work(&selfshrink_worker,
- selfshrink_interval * HZ);
- }
- ramster_remotify_init();
-}
-EXPORT_SYMBOL_GPL(ramster_init);
diff --git a/drivers/staging/zcache/ramster/ramster.h b/drivers/staging/zcache/ramster/ramster.h
deleted file mode 100644
index 6d41a7a772e..00000000000
--- a/drivers/staging/zcache/ramster/ramster.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * ramster.h
- *
- * Peer-to-peer transcendent memory
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _RAMSTER_RAMSTER_H_
-#define _RAMSTER_RAMSTER_H_
-
-#include "../tmem.h"
-
-enum ramster_remotify_op {
- RAMSTER_REMOTIFY_FLUSH_PAGE,
- RAMSTER_REMOTIFY_FLUSH_OBJ,
-};
-
-struct ramster_remotify_hdr {
- enum ramster_remotify_op op;
- struct list_head list;
-};
-
-struct flushlist_node {
- struct ramster_remotify_hdr rem_op;
- struct tmem_xhandle xh;
-};
-
-struct ramster_preload {
- struct flushlist_node *flnode;
-};
-
-union remotify_list_node {
- struct ramster_remotify_hdr rem_op;
- struct {
- struct ramster_remotify_hdr rem_op;
- struct tmem_handle th;
- } zbud_hdr;
- struct flushlist_node flist;
-};
-
-/*
- * format of remote pampd:
- * bit 0 is reserved for zbud (in-page buddy selection)
- * bit 1 == intransit
- * bit 2 == is_remote... if this bit is set, then
- * bit 3-10 == remotenode
- * bit 11-23 == size
- * bit 24-31 == cksum
- */
-#define FAKE_PAMPD_INTRANSIT_BITS 1
-#define FAKE_PAMPD_ISREMOTE_BITS 1
-#define FAKE_PAMPD_REMOTENODE_BITS 8
-#define FAKE_PAMPD_REMOTESIZE_BITS 13
-#define FAKE_PAMPD_CHECKSUM_BITS 8
-
-#define FAKE_PAMPD_INTRANSIT_SHIFT 1
-#define FAKE_PAMPD_ISREMOTE_SHIFT (FAKE_PAMPD_INTRANSIT_SHIFT + \
- FAKE_PAMPD_INTRANSIT_BITS)
-#define FAKE_PAMPD_REMOTENODE_SHIFT (FAKE_PAMPD_ISREMOTE_SHIFT + \
- FAKE_PAMPD_ISREMOTE_BITS)
-#define FAKE_PAMPD_REMOTESIZE_SHIFT (FAKE_PAMPD_REMOTENODE_SHIFT + \
- FAKE_PAMPD_REMOTENODE_BITS)
-#define FAKE_PAMPD_CHECKSUM_SHIFT (FAKE_PAMPD_REMOTESIZE_SHIFT + \
- FAKE_PAMPD_REMOTESIZE_BITS)
-
-#define FAKE_PAMPD_MASK(x) ((1UL << (x)) - 1)
-
-static inline void *pampd_make_remote(int remotenode, size_t size,
- unsigned char cksum)
-{
- unsigned long fake_pampd = 0;
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= ((unsigned long)remotenode &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS)) <<
- FAKE_PAMPD_REMOTENODE_SHIFT;
- fake_pampd |= ((unsigned long)size &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS)) <<
- FAKE_PAMPD_REMOTESIZE_SHIFT;
- fake_pampd |= ((unsigned long)cksum &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS)) <<
- FAKE_PAMPD_CHECKSUM_SHIFT;
- return (void *)fake_pampd;
-}
-
-static inline unsigned int pampd_remote_node(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTENODE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTENODE_BITS);
-}
-
-static inline unsigned int pampd_remote_size(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_REMOTESIZE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_REMOTESIZE_BITS);
-}
-
-static inline unsigned char pampd_remote_cksum(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_CHECKSUM_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_CHECKSUM_BITS);
-}
-
-static inline bool pampd_is_remote(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_ISREMOTE_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_ISREMOTE_BITS);
-}
-
-static inline bool pampd_is_intransit(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
- return (fake_pampd >> FAKE_PAMPD_INTRANSIT_SHIFT) &
- FAKE_PAMPD_MASK(FAKE_PAMPD_INTRANSIT_BITS);
-}
-
-/* note that it is a BUG for intransit to be set without isremote also set */
-static inline void *pampd_mark_intransit(void *pampd)
-{
- unsigned long fake_pampd = (unsigned long)pampd;
-
- fake_pampd |= 1UL << FAKE_PAMPD_ISREMOTE_SHIFT;
- fake_pampd |= 1UL << FAKE_PAMPD_INTRANSIT_SHIFT;
- return (void *)fake_pampd;
-}
-
-static inline void *pampd_mask_intransit_and_remote(void *marked_pampd)
-{
- unsigned long pampd = (unsigned long)marked_pampd;
-
- pampd &= ~(1UL << FAKE_PAMPD_INTRANSIT_SHIFT);
- pampd &= ~(1UL << FAKE_PAMPD_ISREMOTE_SHIFT);
- return (void *)pampd;
-}
-
-extern int r2net_remote_async_get(struct tmem_xhandle *,
- bool, int, size_t, uint8_t, void *extra);
-extern int r2net_remote_put(struct tmem_xhandle *, char *, size_t,
- bool, int *);
-extern int r2net_remote_flush(struct tmem_xhandle *, int);
-extern int r2net_remote_flush_object(struct tmem_xhandle *, int);
-extern int r2net_register_handlers(void);
-extern int r2net_remote_target_node_set(int);
-
-extern int ramster_remotify_pageframe(bool);
-extern void ramster_init(bool, bool, bool, bool);
-extern void ramster_register_pamops(struct tmem_pamops *);
-extern int ramster_localify(int, struct tmem_oid *oidp, uint32_t, char *,
- unsigned int, void *);
-extern void *ramster_pampd_free(void *, struct tmem_pool *, struct tmem_oid *,
- uint32_t, bool);
-extern void ramster_count_foreign_pages(bool, int);
-extern int ramster_do_preload_flnode(struct tmem_pool *);
-extern void ramster_cpu_up(int);
-extern void ramster_cpu_down(int);
-
-#endif /* _RAMSTER_RAMSTER_H */
diff --git a/drivers/staging/zcache/ramster/ramster_nodemanager.h b/drivers/staging/zcache/ramster/ramster_nodemanager.h
deleted file mode 100644
index dbaae34ea61..00000000000
--- a/drivers/staging/zcache/ramster/ramster_nodemanager.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * ramster_nodemanager.h
- *
- * Header describing the interface between userspace and the kernel
- * for the ramster_nodemanager module.
- *
- * Copyright (C) 2002, 2004, 2012 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef _RAMSTER_NODEMANAGER_H
-#define _RAMSTER_NODEMANAGER_H
-
-#define R2NM_API_VERSION 5
-
-#define R2NM_MAX_NODES 255
-#define R2NM_INVALID_NODE_NUM 255
-
-/* host name, group name, cluster name all 64 bytes */
-#define R2NM_MAX_NAME_LEN 64 /* __NEW_UTS_LEN */
-
-extern int r2nm_init(void);
-
-#endif /* _RAMSTER_NODEMANAGER_H */
diff --git a/drivers/staging/zcache/ramster/tcp.c b/drivers/staging/zcache/ramster/tcp.c
deleted file mode 100644
index f6e1e5209d8..00000000000
--- a/drivers/staging/zcache/ramster/tcp.c
+++ /dev/null
@@ -1,2248 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- *
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- * ----
- *
- * Callers for this were originally written against a very simple synchronus
- * API. This implementation reflects those simple callers. Some day I'm sure
- * we'll need to move to a more robust posting/callback mechanism.
- *
- * Transmit calls pass in kernel virtual addresses and block copying this into
- * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
- * for a failed socket to timeout. TX callers can also pass in a poniter to an
- * 'int' which gets filled with an errno off the wire in response to the
- * message they send.
- *
- * Handlers for unsolicited messages are registered. Each socket has a page
- * that incoming data is copied into. First the header, then the data.
- * Handlers are called from only one thread with a reference to this per-socket
- * page. This page is destroyed after the handler call, so it can't be
- * referenced beyond the call. Handlers may block but are discouraged from
- * doing so.
- *
- * Any framing errors (bad magic, large payload lengths) close a connection.
- *
- * Our sock_container holds the state we associate with a socket. It's current
- * framing state is held there as well as the refcounting we do around when it
- * is safe to tear down the socket. The socket is only finally torn down from
- * the container when the container loses all of its references -- so as long
- * as you hold a ref on the container you can trust that the socket is valid
- * for use with kernel socket APIs.
- *
- * Connections are initiated between a pair of nodes when the node with the
- * higher node number gets a heartbeat callback which indicates that the lower
- * numbered node has started heartbeating. The lower numbered node is passive
- * and only accepts the connection if the higher numbered node is heartbeating.
- */
-
-#include <linux/kernel.h>
-#include <linux/jiffies.h>
-#include <linux/slab.h>
-#include <linux/idr.h>
-#include <linux/kref.h>
-#include <linux/net.h>
-#include <linux/export.h>
-#include <linux/uaccess.h>
-#include <net/tcp.h>
-
-
-#include "heartbeat.h"
-#include "tcp.h"
-#include "nodemanager.h"
-#define MLOG_MASK_PREFIX ML_TCP
-#include "masklog.h"
-
-#include "tcp_internal.h"
-
-#define SC_NODEF_FMT "node %s (num %u) at %pI4:%u"
-
-/*
- * In the following two log macros, the whitespace after the ',' just
- * before ##args is intentional. Otherwise, gcc 2.95 will eat the
- * previous token if args expands to nothing.
- */
-#define msglog(hdr, fmt, args...) do { \
- typeof(hdr) __hdr = (hdr); \
- mlog(ML_MSG, "[mag %u len %u typ %u stat %d sys_stat %d " \
- "key %08x num %u] " fmt, \
- be16_to_cpu(__hdr->magic), be16_to_cpu(__hdr->data_len), \
- be16_to_cpu(__hdr->msg_type), be32_to_cpu(__hdr->status), \
- be32_to_cpu(__hdr->sys_status), be32_to_cpu(__hdr->key), \
- be32_to_cpu(__hdr->msg_num) , ##args); \
-} while (0)
-
-#define sclog(sc, fmt, args...) do { \
- typeof(sc) __sc = (sc); \
- mlog(ML_SOCKET, "[sc %p refs %d sock %p node %u page %p " \
- "pg_off %zu] " fmt, __sc, \
- atomic_read(&__sc->sc_kref.refcount), __sc->sc_sock, \
- __sc->sc_node->nd_num, __sc->sc_page, __sc->sc_page_off , \
- ##args); \
-} while (0)
-
-static DEFINE_RWLOCK(r2net_handler_lock);
-static struct rb_root r2net_handler_tree = RB_ROOT;
-
-static struct r2net_node r2net_nodes[R2NM_MAX_NODES];
-
-/* XXX someday we'll need better accounting */
-static struct socket *r2net_listen_sock;
-
-/*
- * listen work is only queued by the listening socket callbacks on the
- * r2net_wq. teardown detaches the callbacks before destroying the workqueue.
- * quorum work is queued as sock containers are shutdown.. stop_listening
- * tears down all the node's sock containers, preventing future shutdowns
- * and queued quorum work, before canceling delayed quorum work and
- * destroying the work queue.
- */
-static struct workqueue_struct *r2net_wq;
-static struct work_struct r2net_listen_work;
-
-static struct r2hb_callback_func r2net_hb_up, r2net_hb_down;
-#define R2NET_HB_PRI 0x1
-
-static struct r2net_handshake *r2net_hand;
-static struct r2net_msg *r2net_keep_req, *r2net_keep_resp;
-
-static int r2net_sys_err_translations[R2NET_ERR_MAX] = {
- [R2NET_ERR_NONE] = 0,
- [R2NET_ERR_NO_HNDLR] = -ENOPROTOOPT,
- [R2NET_ERR_OVERFLOW] = -EOVERFLOW,
- [R2NET_ERR_DIED] = -EHOSTDOWN,};
-
-/* can't quite avoid *all* internal declarations :/ */
-static void r2net_sc_connect_completed(struct work_struct *work);
-static void r2net_rx_until_empty(struct work_struct *work);
-static void r2net_shutdown_sc(struct work_struct *work);
-static void r2net_listen_data_ready(struct sock *sk, int bytes);
-static void r2net_sc_send_keep_req(struct work_struct *work);
-static void r2net_idle_timer(unsigned long data);
-static void r2net_sc_postpone_idle(struct r2net_sock_container *sc);
-static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc);
-
-#ifdef CONFIG_DEBUG_FS
-static void r2net_init_nst(struct r2net_send_tracking *nst, u32 msgtype,
- u32 msgkey, struct task_struct *task, u8 node)
-{
- INIT_LIST_HEAD(&nst->st_net_debug_item);
- nst->st_task = task;
- nst->st_msg_type = msgtype;
- nst->st_msg_key = msgkey;
- nst->st_node = node;
-}
-
-static inline void r2net_set_nst_sock_time(struct r2net_send_tracking *nst)
-{
- nst->st_sock_time = ktime_get();
-}
-
-static inline void r2net_set_nst_send_time(struct r2net_send_tracking *nst)
-{
- nst->st_send_time = ktime_get();
-}
-
-static inline void r2net_set_nst_status_time(struct r2net_send_tracking *nst)
-{
- nst->st_status_time = ktime_get();
-}
-
-static inline void r2net_set_nst_sock_container(struct r2net_send_tracking *nst,
- struct r2net_sock_container *sc)
-{
- nst->st_sc = sc;
-}
-
-static inline void r2net_set_nst_msg_id(struct r2net_send_tracking *nst,
- u32 msg_id)
-{
- nst->st_id = msg_id;
-}
-
-static inline void r2net_set_sock_timer(struct r2net_sock_container *sc)
-{
- sc->sc_tv_timer = ktime_get();
-}
-
-static inline void r2net_set_data_ready_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_data_ready = ktime_get();
-}
-
-static inline void r2net_set_advance_start_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_advance_start = ktime_get();
-}
-
-static inline void r2net_set_advance_stop_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_advance_stop = ktime_get();
-}
-
-static inline void r2net_set_func_start_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_func_start = ktime_get();
-}
-
-static inline void r2net_set_func_stop_time(struct r2net_sock_container *sc)
-{
- sc->sc_tv_func_stop = ktime_get();
-}
-
-#else /* CONFIG_DEBUG_FS */
-# define r2net_init_nst(a, b, c, d, e)
-# define r2net_set_nst_sock_time(a)
-# define r2net_set_nst_send_time(a)
-# define r2net_set_nst_status_time(a)
-# define r2net_set_nst_sock_container(a, b)
-# define r2net_set_nst_msg_id(a, b)
-# define r2net_set_sock_timer(a)
-# define r2net_set_data_ready_time(a)
-# define r2net_set_advance_start_time(a)
-# define r2net_set_advance_stop_time(a)
-# define r2net_set_func_start_time(a)
-# define r2net_set_func_stop_time(a)
-#endif /* CONFIG_DEBUG_FS */
-
-#ifdef CONFIG_RAMSTER_FS_STATS
-static ktime_t r2net_get_func_run_time(struct r2net_sock_container *sc)
-{
- return ktime_sub(sc->sc_tv_func_stop, sc->sc_tv_func_start);
-}
-
-static void r2net_update_send_stats(struct r2net_send_tracking *nst,
- struct r2net_sock_container *sc)
-{
- sc->sc_tv_status_total = ktime_add(sc->sc_tv_status_total,
- ktime_sub(ktime_get(),
- nst->st_status_time));
- sc->sc_tv_send_total = ktime_add(sc->sc_tv_send_total,
- ktime_sub(nst->st_status_time,
- nst->st_send_time));
- sc->sc_tv_acquiry_total = ktime_add(sc->sc_tv_acquiry_total,
- ktime_sub(nst->st_send_time,
- nst->st_sock_time));
- sc->sc_send_count++;
-}
-
-static void r2net_update_recv_stats(struct r2net_sock_container *sc)
-{
- sc->sc_tv_process_total = ktime_add(sc->sc_tv_process_total,
- r2net_get_func_run_time(sc));
- sc->sc_recv_count++;
-}
-
-#else
-
-# define r2net_update_send_stats(a, b)
-
-# define r2net_update_recv_stats(sc)
-
-#endif /* CONFIG_RAMSTER_FS_STATS */
-
-static inline int r2net_reconnect_delay(void)
-{
- return r2nm_single_cluster->cl_reconnect_delay_ms;
-}
-
-static inline int r2net_keepalive_delay(void)
-{
- return r2nm_single_cluster->cl_keepalive_delay_ms;
-}
-
-static inline int r2net_idle_timeout(void)
-{
- return r2nm_single_cluster->cl_idle_timeout_ms;
-}
-
-static inline int r2net_sys_err_to_errno(enum r2net_system_error err)
-{
- int trans;
- BUG_ON(err >= R2NET_ERR_MAX);
- trans = r2net_sys_err_translations[err];
-
- /* Just in case we mess up the translation table above */
- BUG_ON(err != R2NET_ERR_NONE && trans == 0);
- return trans;
-}
-
-struct r2net_node *r2net_nn_from_num(u8 node_num)
-{
- BUG_ON(node_num >= ARRAY_SIZE(r2net_nodes));
- return &r2net_nodes[node_num];
-}
-
-static u8 r2net_num_from_nn(struct r2net_node *nn)
-{
- BUG_ON(nn == NULL);
- return nn - r2net_nodes;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_prep_nsw(struct r2net_node *nn, struct r2net_status_wait *nsw)
-{
- int ret;
-
- spin_lock(&nn->nn_lock);
- ret = idr_alloc(&nn->nn_status_idr, nsw, 0, 0, GFP_ATOMIC);
- if (ret >= 0) {
- nsw->ns_id = ret;
- list_add_tail(&nsw->ns_node_item, &nn->nn_status_list);
- }
- spin_unlock(&nn->nn_lock);
-
- if (ret >= 0) {
- init_waitqueue_head(&nsw->ns_wq);
- nsw->ns_sys_status = R2NET_ERR_NONE;
- nsw->ns_status = 0;
- return 0;
- }
- return ret;
-}
-
-static void r2net_complete_nsw_locked(struct r2net_node *nn,
- struct r2net_status_wait *nsw,
- enum r2net_system_error sys_status,
- s32 status)
-{
- assert_spin_locked(&nn->nn_lock);
-
- if (!list_empty(&nsw->ns_node_item)) {
- list_del_init(&nsw->ns_node_item);
- nsw->ns_sys_status = sys_status;
- nsw->ns_status = status;
- idr_remove(&nn->nn_status_idr, nsw->ns_id);
- wake_up(&nsw->ns_wq);
- }
-}
-
-static void r2net_complete_nsw(struct r2net_node *nn,
- struct r2net_status_wait *nsw,
- u64 id, enum r2net_system_error sys_status,
- s32 status)
-{
- spin_lock(&nn->nn_lock);
- if (nsw == NULL) {
- if (id > INT_MAX)
- goto out;
-
- nsw = idr_find(&nn->nn_status_idr, id);
- if (nsw == NULL)
- goto out;
- }
-
- r2net_complete_nsw_locked(nn, nsw, sys_status, status);
-
-out:
- spin_unlock(&nn->nn_lock);
- return;
-}
-
-static void r2net_complete_nodes_nsw(struct r2net_node *nn)
-{
- struct r2net_status_wait *nsw, *tmp;
- unsigned int num_kills = 0;
-
- assert_spin_locked(&nn->nn_lock);
-
- list_for_each_entry_safe(nsw, tmp, &nn->nn_status_list, ns_node_item) {
- r2net_complete_nsw_locked(nn, nsw, R2NET_ERR_DIED, 0);
- num_kills++;
- }
-
- mlog(0, "completed %d messages for node %u\n", num_kills,
- r2net_num_from_nn(nn));
-}
-
-static int r2net_nsw_completed(struct r2net_node *nn,
- struct r2net_status_wait *nsw)
-{
- int completed;
- spin_lock(&nn->nn_lock);
- completed = list_empty(&nsw->ns_node_item);
- spin_unlock(&nn->nn_lock);
- return completed;
-}
-
-/* ------------------------------------------------------------ */
-
-static void sc_kref_release(struct kref *kref)
-{
- struct r2net_sock_container *sc = container_of(kref,
- struct r2net_sock_container, sc_kref);
- BUG_ON(timer_pending(&sc->sc_idle_timeout));
-
- sclog(sc, "releasing\n");
-
- if (sc->sc_sock) {
- sock_release(sc->sc_sock);
- sc->sc_sock = NULL;
- }
-
- r2nm_undepend_item(&sc->sc_node->nd_item);
- r2nm_node_put(sc->sc_node);
- sc->sc_node = NULL;
-
- r2net_debug_del_sc(sc);
- kfree(sc);
-}
-
-static void sc_put(struct r2net_sock_container *sc)
-{
- sclog(sc, "put\n");
- kref_put(&sc->sc_kref, sc_kref_release);
-}
-static void sc_get(struct r2net_sock_container *sc)
-{
- sclog(sc, "get\n");
- kref_get(&sc->sc_kref);
-}
-static struct r2net_sock_container *sc_alloc(struct r2nm_node *node)
-{
- struct r2net_sock_container *sc, *ret = NULL;
- struct page *page = NULL;
- int status = 0;
-
- page = alloc_page(GFP_NOFS);
- sc = kzalloc(sizeof(*sc), GFP_NOFS);
- if (sc == NULL || page == NULL)
- goto out;
-
- kref_init(&sc->sc_kref);
- r2nm_node_get(node);
- sc->sc_node = node;
-
- /* pin the node item of the remote node */
- status = r2nm_depend_item(&node->nd_item);
- if (status) {
- mlog_errno(status);
- r2nm_node_put(node);
- goto out;
- }
- INIT_WORK(&sc->sc_connect_work, r2net_sc_connect_completed);
- INIT_WORK(&sc->sc_rx_work, r2net_rx_until_empty);
- INIT_WORK(&sc->sc_shutdown_work, r2net_shutdown_sc);
- INIT_DELAYED_WORK(&sc->sc_keepalive_work, r2net_sc_send_keep_req);
-
- init_timer(&sc->sc_idle_timeout);
- sc->sc_idle_timeout.function = r2net_idle_timer;
- sc->sc_idle_timeout.data = (unsigned long)sc;
-
- sclog(sc, "alloced\n");
-
- ret = sc;
- sc->sc_page = page;
- r2net_debug_add_sc(sc);
- sc = NULL;
- page = NULL;
-
-out:
- if (page)
- __free_page(page);
- kfree(sc);
-
- return ret;
-}
-
-/* ------------------------------------------------------------ */
-
-static void r2net_sc_queue_work(struct r2net_sock_container *sc,
- struct work_struct *work)
-{
- sc_get(sc);
- if (!queue_work(r2net_wq, work))
- sc_put(sc);
-}
-static void r2net_sc_queue_delayed_work(struct r2net_sock_container *sc,
- struct delayed_work *work,
- int delay)
-{
- sc_get(sc);
- if (!queue_delayed_work(r2net_wq, work, delay))
- sc_put(sc);
-}
-static void r2net_sc_cancel_delayed_work(struct r2net_sock_container *sc,
- struct delayed_work *work)
-{
- if (cancel_delayed_work(work))
- sc_put(sc);
-}
-
-static atomic_t r2net_connected_peers = ATOMIC_INIT(0);
-
-int r2net_num_connected_peers(void)
-{
- return atomic_read(&r2net_connected_peers);
-}
-
-static void r2net_set_nn_state(struct r2net_node *nn,
- struct r2net_sock_container *sc,
- unsigned valid, int err)
-{
- int was_valid = nn->nn_sc_valid;
- int was_err = nn->nn_persistent_error;
- struct r2net_sock_container *old_sc = nn->nn_sc;
-
- assert_spin_locked(&nn->nn_lock);
-
- if (old_sc && !sc)
- atomic_dec(&r2net_connected_peers);
- else if (!old_sc && sc)
- atomic_inc(&r2net_connected_peers);
-
- /* the node num comparison and single connect/accept path should stop
- * an non-null sc from being overwritten with another */
- BUG_ON(sc && nn->nn_sc && nn->nn_sc != sc);
- mlog_bug_on_msg(err && valid, "err %d valid %u\n", err, valid);
- mlog_bug_on_msg(valid && !sc, "valid %u sc %p\n", valid, sc);
-
- if (was_valid && !valid && err == 0)
- err = -ENOTCONN;
-
- mlog(ML_CONN, "node %u sc: %p -> %p, valid %u -> %u, err %d -> %d\n",
- r2net_num_from_nn(nn), nn->nn_sc, sc, nn->nn_sc_valid, valid,
- nn->nn_persistent_error, err);
-
- nn->nn_sc = sc;
- nn->nn_sc_valid = valid ? 1 : 0;
- nn->nn_persistent_error = err;
-
- /* mirrors r2net_tx_can_proceed() */
- if (nn->nn_persistent_error || nn->nn_sc_valid)
- wake_up(&nn->nn_sc_wq);
-
- if (!was_err && nn->nn_persistent_error) {
- queue_delayed_work(r2net_wq, &nn->nn_still_up,
- msecs_to_jiffies(R2NET_QUORUM_DELAY_MS));
- }
-
- if (was_valid && !valid) {
- pr_notice("ramster: No longer connected to " SC_NODEF_FMT "\n",
- old_sc->sc_node->nd_name, old_sc->sc_node->nd_num,
- &old_sc->sc_node->nd_ipv4_address,
- ntohs(old_sc->sc_node->nd_ipv4_port));
- r2net_complete_nodes_nsw(nn);
- }
-
- if (!was_valid && valid) {
- cancel_delayed_work(&nn->nn_connect_expired);
- pr_notice("ramster: %s " SC_NODEF_FMT "\n",
- r2nm_this_node() > sc->sc_node->nd_num ?
- "Connected to" : "Accepted connection from",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port));
- }
-
- /* trigger the connecting worker func as long as we're not valid,
- * it will back off if it shouldn't connect. This can be called
- * from node config teardown and so needs to be careful about
- * the work queue actually being up. */
- if (!valid && r2net_wq) {
- unsigned long delay;
- /* delay if we're within a RECONNECT_DELAY of the
- * last attempt */
- delay = (nn->nn_last_connect_attempt +
- msecs_to_jiffies(r2net_reconnect_delay()))
- - jiffies;
- if (delay > msecs_to_jiffies(r2net_reconnect_delay()))
- delay = 0;
- mlog(ML_CONN, "queueing conn attempt in %lu jiffies\n", delay);
- queue_delayed_work(r2net_wq, &nn->nn_connect_work, delay);
-
- /*
- * Delay the expired work after idle timeout.
- *
- * We might have lots of failed connection attempts that run
- * through here but we only cancel the connect_expired work when
- * a connection attempt succeeds. So only the first enqueue of
- * the connect_expired work will do anything. The rest will see
- * that it's already queued and do nothing.
- */
- delay += msecs_to_jiffies(r2net_idle_timeout());
- queue_delayed_work(r2net_wq, &nn->nn_connect_expired, delay);
- }
-
- /* keep track of the nn's sc ref for the caller */
- if ((old_sc == NULL) && sc)
- sc_get(sc);
- if (old_sc && (old_sc != sc)) {
- r2net_sc_queue_work(old_sc, &old_sc->sc_shutdown_work);
- sc_put(old_sc);
- }
-}
-
-/* see r2net_register_callbacks() */
-static void r2net_data_ready(struct sock *sk, int bytes)
-{
- void (*ready)(struct sock *sk, int bytes);
-
- read_lock(&sk->sk_callback_lock);
- if (sk->sk_user_data) {
- struct r2net_sock_container *sc = sk->sk_user_data;
- sclog(sc, "data_ready hit\n");
- r2net_set_data_ready_time(sc);
- r2net_sc_queue_work(sc, &sc->sc_rx_work);
- ready = sc->sc_data_ready;
- } else {
- ready = sk->sk_data_ready;
- }
- read_unlock(&sk->sk_callback_lock);
-
- ready(sk, bytes);
-}
-
-/* see r2net_register_callbacks() */
-static void r2net_state_change(struct sock *sk)
-{
- void (*state_change)(struct sock *sk);
- struct r2net_sock_container *sc;
-
- read_lock(&sk->sk_callback_lock);
- sc = sk->sk_user_data;
- if (sc == NULL) {
- state_change = sk->sk_state_change;
- goto out;
- }
-
- sclog(sc, "state_change to %d\n", sk->sk_state);
-
- state_change = sc->sc_state_change;
-
- switch (sk->sk_state) {
-
- /* ignore connecting sockets as they make progress */
- case TCP_SYN_SENT:
- case TCP_SYN_RECV:
- break;
- case TCP_ESTABLISHED:
- r2net_sc_queue_work(sc, &sc->sc_connect_work);
- break;
- default:
- pr_info("ramster: Connection to "
- SC_NODEF_FMT " shutdown, state %d\n",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), sk->sk_state);
- r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
- break;
-
- }
-out:
- read_unlock(&sk->sk_callback_lock);
- state_change(sk);
-}
-
-/*
- * we register callbacks so we can queue work on events before calling
- * the original callbacks. our callbacks are careful to test user_data
- * to discover when they've reaced with r2net_unregister_callbacks().
- */
-static void r2net_register_callbacks(struct sock *sk,
- struct r2net_sock_container *sc)
-{
- write_lock_bh(&sk->sk_callback_lock);
-
- /* accepted sockets inherit the old listen socket data ready */
- if (sk->sk_data_ready == r2net_listen_data_ready) {
- sk->sk_data_ready = sk->sk_user_data;
- sk->sk_user_data = NULL;
- }
-
- BUG_ON(sk->sk_user_data != NULL);
- sk->sk_user_data = sc;
- sc_get(sc);
-
- sc->sc_data_ready = sk->sk_data_ready;
- sc->sc_state_change = sk->sk_state_change;
- sk->sk_data_ready = r2net_data_ready;
- sk->sk_state_change = r2net_state_change;
-
- mutex_init(&sc->sc_send_lock);
-
- write_unlock_bh(&sk->sk_callback_lock);
-}
-
-static int r2net_unregister_callbacks(struct sock *sk,
- struct r2net_sock_container *sc)
-{
- int ret = 0;
-
- write_lock_bh(&sk->sk_callback_lock);
- if (sk->sk_user_data == sc) {
- ret = 1;
- sk->sk_user_data = NULL;
- sk->sk_data_ready = sc->sc_data_ready;
- sk->sk_state_change = sc->sc_state_change;
- }
- write_unlock_bh(&sk->sk_callback_lock);
-
- return ret;
-}
-
-/*
- * this is a little helper that is called by callers who have seen a problem
- * with an sc and want to detach it from the nn if someone already hasn't beat
- * them to it. if an error is given then the shutdown will be persistent
- * and pending transmits will be canceled.
- */
-static void r2net_ensure_shutdown(struct r2net_node *nn,
- struct r2net_sock_container *sc,
- int err)
-{
- spin_lock(&nn->nn_lock);
- if (nn->nn_sc == sc)
- r2net_set_nn_state(nn, NULL, 0, err);
- spin_unlock(&nn->nn_lock);
-}
-
-/*
- * This work queue function performs the blocking parts of socket shutdown. A
- * few paths lead here. set_nn_state will trigger this callback if it sees an
- * sc detached from the nn. state_change will also trigger this callback
- * directly when it sees errors. In that case we need to call set_nn_state
- * ourselves as state_change couldn't get the nn_lock and call set_nn_state
- * itself.
- */
-static void r2net_shutdown_sc(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_shutdown_work);
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-
- sclog(sc, "shutting down\n");
-
- /* drop the callbacks ref and call shutdown only once */
- if (r2net_unregister_callbacks(sc->sc_sock->sk, sc)) {
- /* we shouldn't flush as we're in the thread, the
- * races with pending sc work structs are harmless */
- del_timer_sync(&sc->sc_idle_timeout);
- r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
- sc_put(sc);
- kernel_sock_shutdown(sc->sc_sock, SHUT_RDWR);
- }
-
- /* not fatal so failed connects before the other guy has our
- * heartbeat can be retried */
- r2net_ensure_shutdown(nn, sc, 0);
- sc_put(sc);
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_handler_cmp(struct r2net_msg_handler *nmh, u32 msg_type,
- u32 key)
-{
- int ret = memcmp(&nmh->nh_key, &key, sizeof(key));
-
- if (ret == 0)
- ret = memcmp(&nmh->nh_msg_type, &msg_type, sizeof(msg_type));
-
- return ret;
-}
-
-static struct r2net_msg_handler *
-r2net_handler_tree_lookup(u32 msg_type, u32 key, struct rb_node ***ret_p,
- struct rb_node **ret_parent)
-{
- struct rb_node **p = &r2net_handler_tree.rb_node;
- struct rb_node *parent = NULL;
- struct r2net_msg_handler *nmh, *ret = NULL;
- int cmp;
-
- while (*p) {
- parent = *p;
- nmh = rb_entry(parent, struct r2net_msg_handler, nh_node);
- cmp = r2net_handler_cmp(nmh, msg_type, key);
-
- if (cmp < 0)
- p = &(*p)->rb_left;
- else if (cmp > 0)
- p = &(*p)->rb_right;
- else {
- ret = nmh;
- break;
- }
- }
-
- if (ret_p != NULL)
- *ret_p = p;
- if (ret_parent != NULL)
- *ret_parent = parent;
-
- return ret;
-}
-
-static void r2net_handler_kref_release(struct kref *kref)
-{
- struct r2net_msg_handler *nmh;
- nmh = container_of(kref, struct r2net_msg_handler, nh_kref);
-
- kfree(nmh);
-}
-
-static void r2net_handler_put(struct r2net_msg_handler *nmh)
-{
- kref_put(&nmh->nh_kref, r2net_handler_kref_release);
-}
-
-/* max_len is protection for the handler func. incoming messages won't
- * be given to the handler if their payload is longer than the max. */
-int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
- r2net_msg_handler_func *func, void *data,
- r2net_post_msg_handler_func *post_func,
- struct list_head *unreg_list)
-{
- struct r2net_msg_handler *nmh = NULL;
- struct rb_node **p, *parent;
- int ret = 0;
-
- if (max_len > R2NET_MAX_PAYLOAD_BYTES) {
- mlog(0, "max_len for message handler out of range: %u\n",
- max_len);
- ret = -EINVAL;
- goto out;
- }
-
- if (!msg_type) {
- mlog(0, "no message type provided: %u, %p\n", msg_type, func);
- ret = -EINVAL;
- goto out;
-
- }
- if (!func) {
- mlog(0, "no message handler provided: %u, %p\n",
- msg_type, func);
- ret = -EINVAL;
- goto out;
- }
-
- nmh = kzalloc(sizeof(struct r2net_msg_handler), GFP_NOFS);
- if (nmh == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- nmh->nh_func = func;
- nmh->nh_func_data = data;
- nmh->nh_post_func = post_func;
- nmh->nh_msg_type = msg_type;
- nmh->nh_max_len = max_len;
- nmh->nh_key = key;
- /* the tree and list get this ref.. they're both removed in
- * unregister when this ref is dropped */
- kref_init(&nmh->nh_kref);
- INIT_LIST_HEAD(&nmh->nh_unregister_item);
-
- write_lock(&r2net_handler_lock);
- if (r2net_handler_tree_lookup(msg_type, key, &p, &parent))
- ret = -EEXIST;
- else {
- rb_link_node(&nmh->nh_node, parent, p);
- rb_insert_color(&nmh->nh_node, &r2net_handler_tree);
- list_add_tail(&nmh->nh_unregister_item, unreg_list);
-
- mlog(ML_TCP, "registered handler func %p type %u key %08x\n",
- func, msg_type, key);
- /* we've had some trouble with handlers seemingly vanishing. */
- mlog_bug_on_msg(r2net_handler_tree_lookup(msg_type, key, &p,
- &parent) == NULL,
- "couldn't find handler we *just* registered "
- "for type %u key %08x\n", msg_type, key);
- }
- write_unlock(&r2net_handler_lock);
- if (ret)
- goto out;
-
-out:
- if (ret)
- kfree(nmh);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2net_register_handler);
-
-void r2net_unregister_handler_list(struct list_head *list)
-{
- struct r2net_msg_handler *nmh, *n;
-
- write_lock(&r2net_handler_lock);
- list_for_each_entry_safe(nmh, n, list, nh_unregister_item) {
- mlog(ML_TCP, "unregistering handler func %p type %u key %08x\n",
- nmh->nh_func, nmh->nh_msg_type, nmh->nh_key);
- rb_erase(&nmh->nh_node, &r2net_handler_tree);
- list_del_init(&nmh->nh_unregister_item);
- kref_put(&nmh->nh_kref, r2net_handler_kref_release);
- }
- write_unlock(&r2net_handler_lock);
-}
-EXPORT_SYMBOL_GPL(r2net_unregister_handler_list);
-
-static struct r2net_msg_handler *r2net_handler_get(u32 msg_type, u32 key)
-{
- struct r2net_msg_handler *nmh;
-
- read_lock(&r2net_handler_lock);
- nmh = r2net_handler_tree_lookup(msg_type, key, NULL, NULL);
- if (nmh)
- kref_get(&nmh->nh_kref);
- read_unlock(&r2net_handler_lock);
-
- return nmh;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_recv_tcp_msg(struct socket *sock, void *data, size_t len)
-{
- int ret;
- mm_segment_t oldfs;
- struct kvec vec = {
- .iov_len = len,
- .iov_base = data,
- };
- struct msghdr msg = {
- .msg_iovlen = 1,
- .msg_iov = (struct iovec *)&vec,
- .msg_flags = MSG_DONTWAIT,
- };
-
- oldfs = get_fs();
- set_fs(get_ds());
- ret = sock_recvmsg(sock, &msg, len, msg.msg_flags);
- set_fs(oldfs);
-
- return ret;
-}
-
-static int r2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
- size_t veclen, size_t total)
-{
- int ret;
- mm_segment_t oldfs;
- struct msghdr msg = {
- .msg_iov = (struct iovec *)vec,
- .msg_iovlen = veclen,
- };
-
- if (sock == NULL) {
- ret = -EINVAL;
- goto out;
- }
-
- oldfs = get_fs();
- set_fs(get_ds());
- ret = sock_sendmsg(sock, &msg, total);
- set_fs(oldfs);
- if (ret != total) {
- mlog(ML_ERROR, "sendmsg returned %d instead of %zu\n", ret,
- total);
- if (ret >= 0)
- ret = -EPIPE; /* should be smarter, I bet */
- goto out;
- }
-
- ret = 0;
-out:
- if (ret < 0)
- mlog(0, "returning error: %d\n", ret);
- return ret;
-}
-
-static void r2net_sendpage(struct r2net_sock_container *sc,
- void *kmalloced_virt,
- size_t size)
-{
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- ssize_t ret;
-
- while (1) {
- mutex_lock(&sc->sc_send_lock);
- ret = sc->sc_sock->ops->sendpage(sc->sc_sock,
- virt_to_page(kmalloced_virt),
- (long)kmalloced_virt & ~PAGE_MASK,
- size, MSG_DONTWAIT);
- mutex_unlock(&sc->sc_send_lock);
- if (ret == size)
- break;
- if (ret == (ssize_t)-EAGAIN) {
- mlog(0, "sendpage of size %zu to " SC_NODEF_FMT
- " returned EAGAIN\n", size, sc->sc_node->nd_name,
- sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port));
- cond_resched();
- continue;
- }
- mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
- " failed with %zd\n", size, sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), ret);
- r2net_ensure_shutdown(nn, sc, 0);
- break;
- }
-}
-
-static void r2net_init_msg(struct r2net_msg *msg, u16 data_len,
- u16 msg_type, u32 key)
-{
- memset(msg, 0, sizeof(struct r2net_msg));
- msg->magic = cpu_to_be16(R2NET_MSG_MAGIC);
- msg->data_len = cpu_to_be16(data_len);
- msg->msg_type = cpu_to_be16(msg_type);
- msg->sys_status = cpu_to_be32(R2NET_ERR_NONE);
- msg->status = 0;
- msg->key = cpu_to_be32(key);
-}
-
-static int r2net_tx_can_proceed(struct r2net_node *nn,
- struct r2net_sock_container **sc_ret,
- int *error)
-{
- int ret = 0;
-
- spin_lock(&nn->nn_lock);
- if (nn->nn_persistent_error) {
- ret = 1;
- *sc_ret = NULL;
- *error = nn->nn_persistent_error;
- } else if (nn->nn_sc_valid) {
- kref_get(&nn->nn_sc->sc_kref);
-
- ret = 1;
- *sc_ret = nn->nn_sc;
- *error = 0;
- }
- spin_unlock(&nn->nn_lock);
-
- return ret;
-}
-
-/* Get a map of all nodes to which this node is currently connected to */
-void r2net_fill_node_map(unsigned long *map, unsigned bytes)
-{
- struct r2net_sock_container *sc;
- int node, ret;
-
- BUG_ON(bytes < (BITS_TO_LONGS(R2NM_MAX_NODES) * sizeof(unsigned long)));
-
- memset(map, 0, bytes);
- for (node = 0; node < R2NM_MAX_NODES; ++node) {
- r2net_tx_can_proceed(r2net_nn_from_num(node), &sc, &ret);
- if (!ret) {
- set_bit(node, map);
- sc_put(sc);
- }
- }
-}
-EXPORT_SYMBOL_GPL(r2net_fill_node_map);
-
-int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *caller_vec,
- size_t caller_veclen, u8 target_node, int *status)
-{
- int ret = 0;
- struct r2net_msg *msg = NULL;
- size_t veclen, caller_bytes = 0;
- struct kvec *vec = NULL;
- struct r2net_sock_container *sc = NULL;
- struct r2net_node *nn = r2net_nn_from_num(target_node);
- struct r2net_status_wait nsw = {
- .ns_node_item = LIST_HEAD_INIT(nsw.ns_node_item),
- };
- struct r2net_send_tracking nst;
-
- /* this may be a general bug fix */
- init_waitqueue_head(&nsw.ns_wq);
-
- r2net_init_nst(&nst, msg_type, key, current, target_node);
-
- if (r2net_wq == NULL) {
- mlog(0, "attempt to tx without r2netd running\n");
- ret = -ESRCH;
- goto out;
- }
-
- if (caller_veclen == 0) {
- mlog(0, "bad kvec array length\n");
- ret = -EINVAL;
- goto out;
- }
-
- caller_bytes = iov_length((struct iovec *)caller_vec, caller_veclen);
- if (caller_bytes > R2NET_MAX_PAYLOAD_BYTES) {
- mlog(0, "total payload len %zu too large\n", caller_bytes);
- ret = -EINVAL;
- goto out;
- }
-
- if (target_node == r2nm_this_node()) {
- ret = -ELOOP;
- goto out;
- }
-
- r2net_debug_add_nst(&nst);
-
- r2net_set_nst_sock_time(&nst);
-
- wait_event(nn->nn_sc_wq, r2net_tx_can_proceed(nn, &sc, &ret));
- if (ret)
- goto out;
-
- r2net_set_nst_sock_container(&nst, sc);
-
- veclen = caller_veclen + 1;
- vec = kmalloc(sizeof(struct kvec) * veclen, GFP_ATOMIC);
- if (vec == NULL) {
- mlog(0, "failed to %zu element kvec!\n", veclen);
- ret = -ENOMEM;
- goto out;
- }
-
- msg = kmalloc(sizeof(struct r2net_msg), GFP_ATOMIC);
- if (!msg) {
- mlog(0, "failed to allocate a r2net_msg!\n");
- ret = -ENOMEM;
- goto out;
- }
-
- r2net_init_msg(msg, caller_bytes, msg_type, key);
-
- vec[0].iov_len = sizeof(struct r2net_msg);
- vec[0].iov_base = msg;
- memcpy(&vec[1], caller_vec, caller_veclen * sizeof(struct kvec));
-
- ret = r2net_prep_nsw(nn, &nsw);
- if (ret)
- goto out;
-
- msg->msg_num = cpu_to_be32(nsw.ns_id);
- r2net_set_nst_msg_id(&nst, nsw.ns_id);
-
- r2net_set_nst_send_time(&nst);
-
- /* finally, convert the message header to network byte-order
- * and send */
- mutex_lock(&sc->sc_send_lock);
- ret = r2net_send_tcp_msg(sc->sc_sock, vec, veclen,
- sizeof(struct r2net_msg) + caller_bytes);
- mutex_unlock(&sc->sc_send_lock);
- msglog(msg, "sending returned %d\n", ret);
- if (ret < 0) {
- mlog(0, "error returned from r2net_send_tcp_msg=%d\n", ret);
- goto out;
- }
-
- /* wait on other node's handler */
- r2net_set_nst_status_time(&nst);
- wait_event(nsw.ns_wq, r2net_nsw_completed(nn, &nsw) ||
- nn->nn_persistent_error || !nn->nn_sc_valid);
-
- r2net_update_send_stats(&nst, sc);
-
- /* Note that we avoid overwriting the callers status return
- * variable if a system error was reported on the other
- * side. Callers beware. */
- ret = r2net_sys_err_to_errno(nsw.ns_sys_status);
- if (status && !ret)
- *status = nsw.ns_status;
-
- mlog(0, "woken, returning system status %d, user status %d\n",
- ret, nsw.ns_status);
-out:
- r2net_debug_del_nst(&nst); /* must be before dropping sc and node */
- if (sc)
- sc_put(sc);
- kfree(vec);
- kfree(msg);
- r2net_complete_nsw(nn, &nsw, 0, 0, 0);
- return ret;
-}
-EXPORT_SYMBOL_GPL(r2net_send_message_vec);
-
-int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
- u8 target_node, int *status)
-{
- struct kvec vec = {
- .iov_base = data,
- .iov_len = len,
- };
- return r2net_send_message_vec(msg_type, key, &vec, 1,
- target_node, status);
-}
-EXPORT_SYMBOL_GPL(r2net_send_message);
-
-static int r2net_send_status_magic(struct socket *sock, struct r2net_msg *hdr,
- enum r2net_system_error syserr, int err)
-{
- struct kvec vec = {
- .iov_base = hdr,
- .iov_len = sizeof(struct r2net_msg),
- };
-
- BUG_ON(syserr >= R2NET_ERR_MAX);
-
- /* leave other fields intact from the incoming message, msg_num
- * in particular */
- hdr->sys_status = cpu_to_be32(syserr);
- hdr->status = cpu_to_be32(err);
- /* twiddle the magic */
- hdr->magic = cpu_to_be16(R2NET_MSG_STATUS_MAGIC);
- hdr->data_len = 0;
-
- msglog(hdr, "about to send status magic %d\n", err);
- /* hdr has been in host byteorder this whole time */
- return r2net_send_tcp_msg(sock, &vec, 1, sizeof(struct r2net_msg));
-}
-
-/*
- * "data magic" is a long version of "status magic" where the message
- * payload actually contains data to be passed in reply to certain messages
- */
-static int r2net_send_data_magic(struct r2net_sock_container *sc,
- struct r2net_msg *hdr,
- void *data, size_t data_len,
- enum r2net_system_error syserr, int err)
-{
- struct kvec vec[2];
- int ret;
-
- vec[0].iov_base = hdr;
- vec[0].iov_len = sizeof(struct r2net_msg);
- vec[1].iov_base = data;
- vec[1].iov_len = data_len;
-
- BUG_ON(syserr >= R2NET_ERR_MAX);
-
- /* leave other fields intact from the incoming message, msg_num
- * in particular */
- hdr->sys_status = cpu_to_be32(syserr);
- hdr->status = cpu_to_be32(err);
- hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC); /* twiddle magic */
- hdr->data_len = cpu_to_be16(data_len);
-
- msglog(hdr, "about to send data magic %d\n", err);
- /* hdr has been in host byteorder this whole time */
- ret = r2net_send_tcp_msg(sc->sc_sock, vec, 2,
- sizeof(struct r2net_msg) + data_len);
- return ret;
-}
-
-/*
- * called by a message handler to convert an otherwise normal reply
- * message into a "data magic" message
- */
-void r2net_force_data_magic(struct r2net_msg *hdr, u16 msgtype, u32 msgkey)
-{
- hdr->magic = cpu_to_be16(R2NET_MSG_DATA_MAGIC);
- hdr->msg_type = cpu_to_be16(msgtype);
- hdr->key = cpu_to_be32(msgkey);
-}
-
-/* this returns -errno if the header was unknown or too large, etc.
- * after this is called the buffer us reused for the next message */
-static int r2net_process_message(struct r2net_sock_container *sc,
- struct r2net_msg *hdr)
-{
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- int ret = 0, handler_status;
- enum r2net_system_error syserr;
- struct r2net_msg_handler *nmh = NULL;
- void *ret_data = NULL;
- int data_magic = 0;
-
- msglog(hdr, "processing message\n");
-
- r2net_sc_postpone_idle(sc);
-
- switch (be16_to_cpu(hdr->magic)) {
-
- case R2NET_MSG_STATUS_MAGIC:
- /* special type for returning message status */
- r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
- be32_to_cpu(hdr->sys_status),
- be32_to_cpu(hdr->status));
- goto out;
- case R2NET_MSG_KEEP_REQ_MAGIC:
- r2net_sendpage(sc, r2net_keep_resp, sizeof(*r2net_keep_resp));
- goto out;
- case R2NET_MSG_KEEP_RESP_MAGIC:
- goto out;
- case R2NET_MSG_MAGIC:
- break;
- case R2NET_MSG_DATA_MAGIC:
- /*
- * unlike a normal status magic, a data magic DOES
- * (MUST) have a handler, so the control flow is
- * a little funky here as a result
- */
- data_magic = 1;
- break;
- default:
- msglog(hdr, "bad magic\n");
- ret = -EINVAL;
- goto out;
- break;
- }
-
- /* find a handler for it */
- handler_status = 0;
- nmh = r2net_handler_get(be16_to_cpu(hdr->msg_type),
- be32_to_cpu(hdr->key));
- if (!nmh) {
- mlog(ML_TCP, "couldn't find handler for type %u key %08x\n",
- be16_to_cpu(hdr->msg_type), be32_to_cpu(hdr->key));
- syserr = R2NET_ERR_NO_HNDLR;
- goto out_respond;
- }
-
- syserr = R2NET_ERR_NONE;
-
- if (be16_to_cpu(hdr->data_len) > nmh->nh_max_len)
- syserr = R2NET_ERR_OVERFLOW;
-
- if (syserr != R2NET_ERR_NONE) {
- pr_err("ramster_r2net, message length problem\n");
- goto out_respond;
- }
-
- r2net_set_func_start_time(sc);
- sc->sc_msg_key = be32_to_cpu(hdr->key);
- sc->sc_msg_type = be16_to_cpu(hdr->msg_type);
- handler_status = (nmh->nh_func)(hdr, sizeof(struct r2net_msg) +
- be16_to_cpu(hdr->data_len),
- nmh->nh_func_data, &ret_data);
- if (data_magic) {
- /*
- * handler handled data sent in reply to request
- * so complete the transaction
- */
- r2net_complete_nsw(nn, NULL, be32_to_cpu(hdr->msg_num),
- be32_to_cpu(hdr->sys_status), handler_status);
- goto out;
- }
- /*
- * handler changed magic to DATA_MAGIC to reply to request for data,
- * implies ret_data points to data to return and handler_status
- * is the number of bytes of data
- */
- if (be16_to_cpu(hdr->magic) == R2NET_MSG_DATA_MAGIC) {
- ret = r2net_send_data_magic(sc, hdr,
- ret_data, handler_status,
- syserr, 0);
- hdr = NULL;
- mlog(0, "sending data reply %d, syserr %d returned %d\n",
- handler_status, syserr, ret);
- r2net_set_func_stop_time(sc);
-
- r2net_update_recv_stats(sc);
- goto out;
- }
- r2net_set_func_stop_time(sc);
-
- r2net_update_recv_stats(sc);
-
-out_respond:
- /* this destroys the hdr, so don't use it after this */
- mutex_lock(&sc->sc_send_lock);
- ret = r2net_send_status_magic(sc->sc_sock, hdr, syserr,
- handler_status);
- mutex_unlock(&sc->sc_send_lock);
- hdr = NULL;
- mlog(0, "sending handler status %d, syserr %d returned %d\n",
- handler_status, syserr, ret);
-
- if (nmh) {
- BUG_ON(ret_data != NULL && nmh->nh_post_func == NULL);
- if (nmh->nh_post_func)
- (nmh->nh_post_func)(handler_status, nmh->nh_func_data,
- ret_data);
- }
-
-out:
- if (nmh)
- r2net_handler_put(nmh);
- return ret;
-}
-
-static int r2net_check_handshake(struct r2net_sock_container *sc)
-{
- struct r2net_handshake *hand = page_address(sc->sc_page);
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-
- if (hand->protocol_version != cpu_to_be64(R2NET_PROTOCOL_VERSION)) {
- pr_notice("ramster: " SC_NODEF_FMT " Advertised net "
- "protocol version %llu but %llu is required. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- (unsigned long long)be64_to_cpu(hand->protocol_version),
- R2NET_PROTOCOL_VERSION);
-
- /* don't bother reconnecting if its the wrong version. */
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- /*
- * Ensure timeouts are consistent with other nodes, otherwise
- * we can end up with one node thinking that the other must be down,
- * but isn't. This can ultimately cause corruption.
- */
- if (be32_to_cpu(hand->r2net_idle_timeout_ms) !=
- r2net_idle_timeout()) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a network "
- "idle timeout of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2net_idle_timeout_ms),
- r2net_idle_timeout());
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- if (be32_to_cpu(hand->r2net_keepalive_delay_ms) !=
- r2net_keepalive_delay()) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a keepalive "
- "delay of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2net_keepalive_delay_ms),
- r2net_keepalive_delay());
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- if (be32_to_cpu(hand->r2hb_heartbeat_timeout_ms) !=
- R2HB_MAX_WRITE_TIMEOUT_MS) {
- pr_notice("ramster: " SC_NODEF_FMT " uses a heartbeat "
- "timeout of %u ms, but we use %u ms locally. "
- "Disconnecting.\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port),
- be32_to_cpu(hand->r2hb_heartbeat_timeout_ms),
- R2HB_MAX_WRITE_TIMEOUT_MS);
- r2net_ensure_shutdown(nn, sc, -ENOTCONN);
- return -1;
- }
-
- sc->sc_handshake_ok = 1;
-
- spin_lock(&nn->nn_lock);
- /* set valid and queue the idle timers only if it hasn't been
- * shut down already */
- if (nn->nn_sc == sc) {
- r2net_sc_reset_idle_timer(sc);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, sc, 1, 0);
- }
- spin_unlock(&nn->nn_lock);
-
- /* shift everything up as though it wasn't there */
- sc->sc_page_off -= sizeof(struct r2net_handshake);
- if (sc->sc_page_off)
- memmove(hand, hand + 1, sc->sc_page_off);
-
- return 0;
-}
-
-/* this demuxes the queued rx bytes into header or payload bits and calls
- * handlers as each full message is read off the socket. it returns -error,
- * == 0 eof, or > 0 for progress made.*/
-static int r2net_advance_rx(struct r2net_sock_container *sc)
-{
- struct r2net_msg *hdr;
- int ret = 0;
- void *data;
- size_t datalen;
-
- sclog(sc, "receiving\n");
- r2net_set_advance_start_time(sc);
-
- if (unlikely(sc->sc_handshake_ok == 0)) {
- if (sc->sc_page_off < sizeof(struct r2net_handshake)) {
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = sizeof(struct r2net_handshake) -
- sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0)
- sc->sc_page_off += ret;
- }
-
- if (sc->sc_page_off == sizeof(struct r2net_handshake)) {
- r2net_check_handshake(sc);
- if (unlikely(sc->sc_handshake_ok == 0))
- ret = -EPROTO;
- }
- goto out;
- }
-
- /* do we need more header? */
- if (sc->sc_page_off < sizeof(struct r2net_msg)) {
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = sizeof(struct r2net_msg) - sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0) {
- sc->sc_page_off += ret;
- /* only swab incoming here.. we can
- * only get here once as we cross from
- * being under to over */
- if (sc->sc_page_off == sizeof(struct r2net_msg)) {
- hdr = page_address(sc->sc_page);
- if (be16_to_cpu(hdr->data_len) >
- R2NET_MAX_PAYLOAD_BYTES)
- ret = -EOVERFLOW;
- WARN_ON_ONCE(ret == -EOVERFLOW);
- }
- }
- if (ret <= 0)
- goto out;
- }
-
- if (sc->sc_page_off < sizeof(struct r2net_msg)) {
- /* oof, still don't have a header */
- goto out;
- }
-
- /* this was swabbed above when we first read it */
- hdr = page_address(sc->sc_page);
-
- msglog(hdr, "at page_off %zu\n", sc->sc_page_off);
-
- /* do we need more payload? */
- if (sc->sc_page_off - sizeof(struct r2net_msg) <
- be16_to_cpu(hdr->data_len)) {
- /* need more payload */
- data = page_address(sc->sc_page) + sc->sc_page_off;
- datalen = (sizeof(struct r2net_msg) +
- be16_to_cpu(hdr->data_len)) -
- sc->sc_page_off;
- ret = r2net_recv_tcp_msg(sc->sc_sock, data, datalen);
- if (ret > 0)
- sc->sc_page_off += ret;
- if (ret <= 0)
- goto out;
- }
-
- if (sc->sc_page_off - sizeof(struct r2net_msg) ==
- be16_to_cpu(hdr->data_len)) {
- /* we can only get here once, the first time we read
- * the payload.. so set ret to progress if the handler
- * works out. after calling this the message is toast */
- ret = r2net_process_message(sc, hdr);
- if (ret == 0)
- ret = 1;
- sc->sc_page_off = 0;
- }
-
-out:
- sclog(sc, "ret = %d\n", ret);
- r2net_set_advance_stop_time(sc);
- return ret;
-}
-
-/* this work func is triggerd by data ready. it reads until it can read no
- * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
- * our work the work struct will be marked and we'll be called again. */
-static void r2net_rx_until_empty(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container, sc_rx_work);
- int ret;
-
- do {
- ret = r2net_advance_rx(sc);
- } while (ret > 0);
-
- if (ret <= 0 && ret != -EAGAIN) {
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
- sclog(sc, "saw error %d, closing\n", ret);
- /* not permanent so read failed handshake can retry */
- r2net_ensure_shutdown(nn, sc, 0);
- }
- sc_put(sc);
-}
-
-static int r2net_set_nodelay(struct socket *sock)
-{
- int ret, val = 1;
- mm_segment_t oldfs;
-
- oldfs = get_fs();
- set_fs(KERNEL_DS);
-
- /*
- * Dear unsuspecting programmer,
- *
- * Don't use sock_setsockopt() for SOL_TCP. It doesn't check its level
- * argument and assumes SOL_SOCKET so, say, your TCP_NODELAY will
- * silently turn into SO_DEBUG.
- *
- * Yours,
- * Keeper of hilariously fragile interfaces.
- */
- ret = sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY,
- (char __user *)&val, sizeof(val));
-
- set_fs(oldfs);
- return ret;
-}
-
-static void r2net_initialize_handshake(void)
-{
- r2net_hand->r2hb_heartbeat_timeout_ms = cpu_to_be32(
- R2HB_MAX_WRITE_TIMEOUT_MS);
- r2net_hand->r2net_idle_timeout_ms = cpu_to_be32(r2net_idle_timeout());
- r2net_hand->r2net_keepalive_delay_ms = cpu_to_be32(
- r2net_keepalive_delay());
- r2net_hand->r2net_reconnect_delay_ms = cpu_to_be32(
- r2net_reconnect_delay());
-}
-
-/* ------------------------------------------------------------ */
-
-/* called when a connect completes and after a sock is accepted. the
- * rx path will see the response and mark the sc valid */
-static void r2net_sc_connect_completed(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_connect_work);
-
- mlog(ML_MSG, "sc sending handshake with ver %llu id %llx\n",
- (unsigned long long)R2NET_PROTOCOL_VERSION,
- (unsigned long long)be64_to_cpu(r2net_hand->connector_id));
-
- r2net_initialize_handshake();
- r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
- sc_put(sc);
-}
-
-/* this is called as a work_struct func. */
-static void r2net_sc_send_keep_req(struct work_struct *work)
-{
- struct r2net_sock_container *sc =
- container_of(work, struct r2net_sock_container,
- sc_keepalive_work.work);
-
- r2net_sendpage(sc, r2net_keep_req, sizeof(*r2net_keep_req));
- sc_put(sc);
-}
-
-/* socket shutdown does a del_timer_sync against this as it tears down.
- * we can't start this timer until we've got to the point in sc buildup
- * where shutdown is going to be involved */
-static void r2net_idle_timer(unsigned long data)
-{
- struct r2net_sock_container *sc = (struct r2net_sock_container *)data;
- struct r2net_node *nn = r2net_nn_from_num(sc->sc_node->nd_num);
-#ifdef CONFIG_DEBUG_FS
- unsigned long msecs = ktime_to_ms(ktime_get()) -
- ktime_to_ms(sc->sc_tv_timer);
-#else
- unsigned long msecs = r2net_idle_timeout();
-#endif
-
- pr_notice("ramster: Connection to " SC_NODEF_FMT " has been "
- "idle for %lu.%lu secs, shutting it down.\n",
- sc->sc_node->nd_name, sc->sc_node->nd_num,
- &sc->sc_node->nd_ipv4_address, ntohs(sc->sc_node->nd_ipv4_port),
- msecs / 1000, msecs % 1000);
-
- /*
- * Initialize the nn_timeout so that the next connection attempt
- * will continue in r2net_start_connect.
- */
- atomic_set(&nn->nn_timeout, 1);
- r2net_sc_queue_work(sc, &sc->sc_shutdown_work);
-}
-
-static void r2net_sc_reset_idle_timer(struct r2net_sock_container *sc)
-{
- r2net_sc_cancel_delayed_work(sc, &sc->sc_keepalive_work);
- r2net_sc_queue_delayed_work(sc, &sc->sc_keepalive_work,
- msecs_to_jiffies(r2net_keepalive_delay()));
- r2net_set_sock_timer(sc);
- mod_timer(&sc->sc_idle_timeout,
- jiffies + msecs_to_jiffies(r2net_idle_timeout()));
-}
-
-static void r2net_sc_postpone_idle(struct r2net_sock_container *sc)
-{
- /* Only push out an existing timer */
- if (timer_pending(&sc->sc_idle_timeout))
- r2net_sc_reset_idle_timer(sc);
-}
-
-/* this work func is kicked whenever a path sets the nn state which doesn't
- * have valid set. This includes seeing hb come up, losing a connection,
- * having a connect attempt fail, etc. This centralizes the logic which decides
- * if a connect attempt should be made or if we should give up and all future
- * transmit attempts should fail */
-static void r2net_start_connect(struct work_struct *work)
-{
- struct r2net_node *nn =
- container_of(work, struct r2net_node, nn_connect_work.work);
- struct r2net_sock_container *sc = NULL;
- struct r2nm_node *node = NULL, *mynode = NULL;
- struct socket *sock = NULL;
- struct sockaddr_in myaddr = {0, }, remoteaddr = {0, };
- int ret = 0, stop;
- unsigned int timeout;
-
- /* if we're greater we initiate tx, otherwise we accept */
- if (r2nm_this_node() <= r2net_num_from_nn(nn))
- goto out;
-
- /* watch for racing with tearing a node down */
- node = r2nm_get_node_by_num(r2net_num_from_nn(nn));
- if (node == NULL) {
- ret = 0;
- goto out;
- }
-
- mynode = r2nm_get_node_by_num(r2nm_this_node());
- if (mynode == NULL) {
- ret = 0;
- goto out;
- }
-
- spin_lock(&nn->nn_lock);
- /*
- * see if we already have one pending or have given up.
- * For nn_timeout, it is set when we close the connection
- * because of the idle time out. So it means that we have
- * at least connected to that node successfully once,
- * now try to connect to it again.
- */
- timeout = atomic_read(&nn->nn_timeout);
- stop = (nn->nn_sc ||
- (nn->nn_persistent_error &&
- (nn->nn_persistent_error != -ENOTCONN || timeout == 0)));
- spin_unlock(&nn->nn_lock);
- if (stop)
- goto out;
-
- nn->nn_last_connect_attempt = jiffies;
-
- sc = sc_alloc(node);
- if (sc == NULL) {
- mlog(0, "couldn't allocate sc\n");
- ret = -ENOMEM;
- goto out;
- }
-
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (ret < 0) {
- mlog(0, "can't create socket: %d\n", ret);
- goto out;
- }
- sc->sc_sock = sock; /* freed by sc_kref_release */
-
- sock->sk->sk_allocation = GFP_ATOMIC;
-
- myaddr.sin_family = AF_INET;
- myaddr.sin_addr.s_addr = mynode->nd_ipv4_address;
- myaddr.sin_port = htons(0); /* any port */
-
- ret = sock->ops->bind(sock, (struct sockaddr *)&myaddr,
- sizeof(myaddr));
- if (ret) {
- mlog(ML_ERROR, "bind failed with %d at address %pI4\n",
- ret, &mynode->nd_ipv4_address);
- goto out;
- }
-
- ret = r2net_set_nodelay(sc->sc_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- r2net_register_callbacks(sc->sc_sock->sk, sc);
-
- spin_lock(&nn->nn_lock);
- /* handshake completion will set nn->nn_sc_valid */
- r2net_set_nn_state(nn, sc, 0, 0);
- spin_unlock(&nn->nn_lock);
-
- remoteaddr.sin_family = AF_INET;
- remoteaddr.sin_addr.s_addr = node->nd_ipv4_address;
- remoteaddr.sin_port = node->nd_ipv4_port;
-
- ret = sc->sc_sock->ops->connect(sc->sc_sock,
- (struct sockaddr *)&remoteaddr,
- sizeof(remoteaddr),
- O_NONBLOCK);
- if (ret == -EINPROGRESS)
- ret = 0;
-
-out:
- if (ret) {
- pr_notice("ramster: Connect attempt to " SC_NODEF_FMT
- " failed with errno %d\n", sc->sc_node->nd_name,
- sc->sc_node->nd_num, &sc->sc_node->nd_ipv4_address,
- ntohs(sc->sc_node->nd_ipv4_port), ret);
- /* 0 err so that another will be queued and attempted
- * from set_nn_state */
- if (sc)
- r2net_ensure_shutdown(nn, sc, 0);
- }
- if (sc)
- sc_put(sc);
- if (node)
- r2nm_node_put(node);
- if (mynode)
- r2nm_node_put(mynode);
-
- return;
-}
-
-static void r2net_connect_expired(struct work_struct *work)
-{
- struct r2net_node *nn =
- container_of(work, struct r2net_node, nn_connect_expired.work);
-
- spin_lock(&nn->nn_lock);
- if (!nn->nn_sc_valid) {
- pr_notice("ramster: No connection established with "
- "node %u after %u.%u seconds, giving up.\n",
- r2net_num_from_nn(nn),
- r2net_idle_timeout() / 1000,
- r2net_idle_timeout() % 1000);
-
- r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
- }
- spin_unlock(&nn->nn_lock);
-}
-
-static void r2net_still_up(struct work_struct *work)
-{
-}
-
-/* ------------------------------------------------------------ */
-
-void r2net_disconnect_node(struct r2nm_node *node)
-{
- struct r2net_node *nn = r2net_nn_from_num(node->nd_num);
-
- /* don't reconnect until it's heartbeating again */
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, NULL, 0, -ENOTCONN);
- spin_unlock(&nn->nn_lock);
-
- if (r2net_wq) {
- cancel_delayed_work(&nn->nn_connect_expired);
- cancel_delayed_work(&nn->nn_connect_work);
- cancel_delayed_work(&nn->nn_still_up);
- flush_workqueue(r2net_wq);
- }
-}
-
-static void r2net_hb_node_down_cb(struct r2nm_node *node, int node_num,
- void *data)
-{
- if (!node)
- return;
-
- if (node_num != r2nm_this_node())
- r2net_disconnect_node(node);
-
- BUG_ON(atomic_read(&r2net_connected_peers) < 0);
-}
-
-static void r2net_hb_node_up_cb(struct r2nm_node *node, int node_num,
- void *data)
-{
- struct r2net_node *nn = r2net_nn_from_num(node_num);
-
- BUG_ON(!node);
-
- /* ensure an immediate connect attempt */
- nn->nn_last_connect_attempt = jiffies -
- (msecs_to_jiffies(r2net_reconnect_delay()) + 1);
-
- if (node_num != r2nm_this_node()) {
- /* believe it or not, accept and node hearbeating testing
- * can succeed for this node before we got here.. so
- * only use set_nn_state to clear the persistent error
- * if that hasn't already happened */
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- if (nn->nn_persistent_error)
- r2net_set_nn_state(nn, NULL, 0, 0);
- spin_unlock(&nn->nn_lock);
- }
-}
-
-void r2net_unregister_hb_callbacks(void)
-{
- r2hb_unregister_callback(NULL, &r2net_hb_up);
- r2hb_unregister_callback(NULL, &r2net_hb_down);
-}
-
-int r2net_register_hb_callbacks(void)
-{
- int ret;
-
- r2hb_setup_callback(&r2net_hb_down, R2HB_NODE_DOWN_CB,
- r2net_hb_node_down_cb, NULL, R2NET_HB_PRI);
- r2hb_setup_callback(&r2net_hb_up, R2HB_NODE_UP_CB,
- r2net_hb_node_up_cb, NULL, R2NET_HB_PRI);
-
- ret = r2hb_register_callback(NULL, &r2net_hb_up);
- if (ret == 0)
- ret = r2hb_register_callback(NULL, &r2net_hb_down);
-
- if (ret)
- r2net_unregister_hb_callbacks();
-
- return ret;
-}
-
-/* ------------------------------------------------------------ */
-
-static int r2net_accept_one(struct socket *sock)
-{
- int ret, slen;
- struct sockaddr_in sin;
- struct socket *new_sock = NULL;
- struct r2nm_node *node = NULL;
- struct r2nm_node *local_node = NULL;
- struct r2net_sock_container *sc = NULL;
- struct r2net_node *nn;
-
- BUG_ON(sock == NULL);
- ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
- sock->sk->sk_protocol, &new_sock);
- if (ret)
- goto out;
-
- new_sock->type = sock->type;
- new_sock->ops = sock->ops;
- ret = sock->ops->accept(sock, new_sock, O_NONBLOCK);
- if (ret < 0)
- goto out;
-
- new_sock->sk->sk_allocation = GFP_ATOMIC;
-
- ret = r2net_set_nodelay(new_sock);
- if (ret) {
- mlog(ML_ERROR, "setting TCP_NODELAY failed with %d\n", ret);
- goto out;
- }
-
- slen = sizeof(sin);
- ret = new_sock->ops->getname(new_sock, (struct sockaddr *) &sin,
- &slen, 1);
- if (ret < 0)
- goto out;
-
- node = r2nm_get_node_by_ip(sin.sin_addr.s_addr);
- if (node == NULL) {
- pr_notice("ramster: Attempt to connect from unknown "
- "node at %pI4:%d\n", &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- if (r2nm_this_node() >= node->nd_num) {
- local_node = r2nm_get_node_by_num(r2nm_this_node());
- pr_notice("ramster: Unexpected connect attempt seen "
- "at node '%s' (%u, %pI4:%d) from node '%s' (%u, "
- "%pI4:%d)\n", local_node->nd_name, local_node->nd_num,
- &(local_node->nd_ipv4_address),
- ntohs(local_node->nd_ipv4_port), node->nd_name,
- node->nd_num, &sin.sin_addr.s_addr, ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- /* this happens all the time when the other node sees our heartbeat
- * and tries to connect before we see their heartbeat */
- if (!r2hb_check_node_heartbeating_from_callback(node->nd_num)) {
- mlog(ML_CONN, "attempt to connect from node '%s' at "
- "%pI4:%d but it isn't heartbeating\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- ret = -EINVAL;
- goto out;
- }
-
- nn = r2net_nn_from_num(node->nd_num);
-
- spin_lock(&nn->nn_lock);
- if (nn->nn_sc)
- ret = -EBUSY;
- else
- ret = 0;
- spin_unlock(&nn->nn_lock);
- if (ret) {
- pr_notice("ramster: Attempt to connect from node '%s' "
- "at %pI4:%d but it already has an open connection\n",
- node->nd_name, &sin.sin_addr.s_addr,
- ntohs(sin.sin_port));
- goto out;
- }
-
- sc = sc_alloc(node);
- if (sc == NULL) {
- ret = -ENOMEM;
- goto out;
- }
-
- sc->sc_sock = new_sock;
- new_sock = NULL;
-
- spin_lock(&nn->nn_lock);
- atomic_set(&nn->nn_timeout, 0);
- r2net_set_nn_state(nn, sc, 0, 0);
- spin_unlock(&nn->nn_lock);
-
- r2net_register_callbacks(sc->sc_sock->sk, sc);
- r2net_sc_queue_work(sc, &sc->sc_rx_work);
-
- r2net_initialize_handshake();
- r2net_sendpage(sc, r2net_hand, sizeof(*r2net_hand));
-
-out:
- if (new_sock)
- sock_release(new_sock);
- if (node)
- r2nm_node_put(node);
- if (local_node)
- r2nm_node_put(local_node);
- if (sc)
- sc_put(sc);
- return ret;
-}
-
-static void r2net_accept_many(struct work_struct *work)
-{
- struct socket *sock = r2net_listen_sock;
- while (r2net_accept_one(sock) == 0)
- cond_resched();
-}
-
-static void r2net_listen_data_ready(struct sock *sk, int bytes)
-{
- void (*ready)(struct sock *sk, int bytes);
-
- read_lock(&sk->sk_callback_lock);
- ready = sk->sk_user_data;
- if (ready == NULL) { /* check for teardown race */
- ready = sk->sk_data_ready;
- goto out;
- }
-
- /* ->sk_data_ready is also called for a newly established child socket
- * before it has been accepted and the acceptor has set up their
- * data_ready.. we only want to queue listen work for our listening
- * socket */
- if (sk->sk_state == TCP_LISTEN) {
- mlog(ML_TCP, "bytes: %d\n", bytes);
- queue_work(r2net_wq, &r2net_listen_work);
- }
-
-out:
- read_unlock(&sk->sk_callback_lock);
- ready(sk, bytes);
-}
-
-static int r2net_open_listening_sock(__be32 addr, __be16 port)
-{
- struct socket *sock = NULL;
- int ret;
- struct sockaddr_in sin = {
- .sin_family = PF_INET,
- .sin_addr = { .s_addr = addr },
- .sin_port = port,
- };
-
- ret = sock_create(PF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
- if (ret < 0) {
- pr_err("ramster: Error %d while creating socket\n", ret);
- goto out;
- }
-
- sock->sk->sk_allocation = GFP_ATOMIC;
-
- write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_user_data = sock->sk->sk_data_ready;
- sock->sk->sk_data_ready = r2net_listen_data_ready;
- write_unlock_bh(&sock->sk->sk_callback_lock);
-
- r2net_listen_sock = sock;
- INIT_WORK(&r2net_listen_work, r2net_accept_many);
-
- sock->sk->sk_reuse = /* SK_CAN_REUSE FIXME FOR 3.4 */ 1;
- ret = sock->ops->bind(sock, (struct sockaddr *)&sin, sizeof(sin));
- if (ret < 0) {
- pr_err("ramster: Error %d while binding socket at %pI4:%u\n",
- ret, &addr, ntohs(port));
- goto out;
- }
-
- ret = sock->ops->listen(sock, 64);
- if (ret < 0)
- pr_err("ramster: Error %d while listening on %pI4:%u\n",
- ret, &addr, ntohs(port));
-
-out:
- if (ret) {
- r2net_listen_sock = NULL;
- if (sock)
- sock_release(sock);
- }
- return ret;
-}
-
-/*
- * called from node manager when we should bring up our network listening
- * socket. node manager handles all the serialization to only call this
- * once and to match it with r2net_stop_listening(). note,
- * r2nm_this_node() doesn't work yet as we're being called while it
- * is being set up.
- */
-int r2net_start_listening(struct r2nm_node *node)
-{
- int ret = 0;
-
- BUG_ON(r2net_wq != NULL);
- BUG_ON(r2net_listen_sock != NULL);
-
- mlog(ML_KTHREAD, "starting r2net thread...\n");
- r2net_wq = create_singlethread_workqueue("r2net");
- if (r2net_wq == NULL) {
- mlog(ML_ERROR, "unable to launch r2net thread\n");
- return -ENOMEM; /* ? */
- }
-
- ret = r2net_open_listening_sock(node->nd_ipv4_address,
- node->nd_ipv4_port);
- if (ret) {
- destroy_workqueue(r2net_wq);
- r2net_wq = NULL;
- }
-
- return ret;
-}
-
-/* again, r2nm_this_node() doesn't work here as we're involved in
- * tearing it down */
-void r2net_stop_listening(struct r2nm_node *node)
-{
- struct socket *sock = r2net_listen_sock;
- size_t i;
-
- BUG_ON(r2net_wq == NULL);
- BUG_ON(r2net_listen_sock == NULL);
-
- /* stop the listening socket from generating work */
- write_lock_bh(&sock->sk->sk_callback_lock);
- sock->sk->sk_data_ready = sock->sk->sk_user_data;
- sock->sk->sk_user_data = NULL;
- write_unlock_bh(&sock->sk->sk_callback_lock);
-
- for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
- struct r2nm_node *node = r2nm_get_node_by_num(i);
- if (node) {
- r2net_disconnect_node(node);
- r2nm_node_put(node);
- }
- }
-
- /* finish all work and tear down the work queue */
- mlog(ML_KTHREAD, "waiting for r2net thread to exit....\n");
- destroy_workqueue(r2net_wq);
- r2net_wq = NULL;
-
- sock_release(r2net_listen_sock);
- r2net_listen_sock = NULL;
-}
-
-void r2net_hb_node_up_manual(int node_num)
-{
- struct r2nm_node dummy;
- if (r2nm_single_cluster == NULL)
- pr_err("ramster: cluster not alive, node_up_manual ignored\n");
- else {
- r2hb_manual_set_node_heartbeating(node_num);
- r2net_hb_node_up_cb(&dummy, node_num, NULL);
- }
-}
-
-/* ------------------------------------------------------------ */
-
-int r2net_init(void)
-{
- unsigned long i;
-
- if (r2net_debugfs_init())
- return -ENOMEM;
-
- r2net_hand = kzalloc(sizeof(struct r2net_handshake), GFP_KERNEL);
- r2net_keep_req = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
- r2net_keep_resp = kzalloc(sizeof(struct r2net_msg), GFP_KERNEL);
- if (!r2net_hand || !r2net_keep_req || !r2net_keep_resp) {
- kfree(r2net_hand);
- kfree(r2net_keep_req);
- kfree(r2net_keep_resp);
- return -ENOMEM;
- }
-
- r2net_hand->protocol_version = cpu_to_be64(R2NET_PROTOCOL_VERSION);
- r2net_hand->connector_id = cpu_to_be64(1);
-
- r2net_keep_req->magic = cpu_to_be16(R2NET_MSG_KEEP_REQ_MAGIC);
- r2net_keep_resp->magic = cpu_to_be16(R2NET_MSG_KEEP_RESP_MAGIC);
-
- for (i = 0; i < ARRAY_SIZE(r2net_nodes); i++) {
- struct r2net_node *nn = r2net_nn_from_num(i);
-
- atomic_set(&nn->nn_timeout, 0);
- spin_lock_init(&nn->nn_lock);
- INIT_DELAYED_WORK(&nn->nn_connect_work, r2net_start_connect);
- INIT_DELAYED_WORK(&nn->nn_connect_expired,
- r2net_connect_expired);
- INIT_DELAYED_WORK(&nn->nn_still_up, r2net_still_up);
- /* until we see hb from a node we'll return einval */
- nn->nn_persistent_error = -ENOTCONN;
- init_waitqueue_head(&nn->nn_sc_wq);
- idr_init(&nn->nn_status_idr);
- INIT_LIST_HEAD(&nn->nn_status_list);
- }
-
- return 0;
-}
-
-void r2net_exit(void)
-{
- kfree(r2net_hand);
- kfree(r2net_keep_req);
- kfree(r2net_keep_resp);
- r2net_debugfs_exit();
-}
diff --git a/drivers/staging/zcache/ramster/tcp.h b/drivers/staging/zcache/ramster/tcp.h
deleted file mode 100644
index 9d05833452b..00000000000
--- a/drivers/staging/zcache/ramster/tcp.h
+++ /dev/null
@@ -1,159 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * tcp.h
- *
- * Function prototypes
- *
- * Copyright (C) 2004 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- *
- */
-
-#ifndef R2CLUSTER_TCP_H
-#define R2CLUSTER_TCP_H
-
-#include <linux/socket.h>
-#ifdef __KERNEL__
-#include <net/sock.h>
-#include <linux/tcp.h>
-#else
-#include <sys/socket.h>
-#endif
-#include <linux/inet.h>
-#include <linux/in.h>
-
-struct r2net_msg {
- __be16 magic;
- __be16 data_len;
- __be16 msg_type;
- __be16 pad1;
- __be32 sys_status;
- __be32 status;
- __be32 key;
- __be32 msg_num;
- __u8 buf[0];
-};
-
-typedef int (r2net_msg_handler_func)(struct r2net_msg *msg, u32 len, void *data,
- void **ret_data);
-typedef void (r2net_post_msg_handler_func)(int status, void *data,
- void *ret_data);
-
-#define R2NET_MAX_PAYLOAD_BYTES (4096 - sizeof(struct r2net_msg))
-
-/* same as hb delay, we're waiting for another node to recognize our hb */
-#define R2NET_RECONNECT_DELAY_MS_DEFAULT 2000
-
-#define R2NET_KEEPALIVE_DELAY_MS_DEFAULT 2000
-#define R2NET_IDLE_TIMEOUT_MS_DEFAULT 30000
-
-
-/* TODO: figure this out.... */
-static inline int r2net_link_down(int err, struct socket *sock)
-{
- if (sock) {
- if (sock->sk->sk_state != TCP_ESTABLISHED &&
- sock->sk->sk_state != TCP_CLOSE_WAIT)
- return 1;
- }
-
- if (err >= 0)
- return 0;
- switch (err) {
-
- /* ????????????????????????? */
- case -ERESTARTSYS:
- case -EBADF:
- /* When the server has died, an ICMP port unreachable
- * message prompts ECONNREFUSED. */
- case -ECONNREFUSED:
- case -ENOTCONN:
- case -ECONNRESET:
- case -EPIPE:
- return 1;
-
- }
- return 0;
-}
-
-enum {
- R2NET_DRIVER_UNINITED,
- R2NET_DRIVER_READY,
-};
-
-int r2net_send_message(u32 msg_type, u32 key, void *data, u32 len,
- u8 target_node, int *status);
-int r2net_send_message_vec(u32 msg_type, u32 key, struct kvec *vec,
- size_t veclen, u8 target_node, int *status);
-
-int r2net_register_handler(u32 msg_type, u32 key, u32 max_len,
- r2net_msg_handler_func *func, void *data,
- r2net_post_msg_handler_func *post_func,
- struct list_head *unreg_list);
-void r2net_unregister_handler_list(struct list_head *list);
-
-void r2net_fill_node_map(unsigned long *map, unsigned bytes);
-
-void r2net_force_data_magic(struct r2net_msg *, u16, u32);
-void r2net_hb_node_up_manual(int);
-struct r2net_node *r2net_nn_from_num(u8);
-
-struct r2nm_node;
-int r2net_register_hb_callbacks(void);
-void r2net_unregister_hb_callbacks(void);
-int r2net_start_listening(struct r2nm_node *node);
-void r2net_stop_listening(struct r2nm_node *node);
-void r2net_disconnect_node(struct r2nm_node *node);
-int r2net_num_connected_peers(void);
-
-int r2net_init(void);
-void r2net_exit(void);
-
-struct r2net_send_tracking;
-struct r2net_sock_container;
-
-#if 0
-int r2net_debugfs_init(void);
-void r2net_debugfs_exit(void);
-void r2net_debug_add_nst(struct r2net_send_tracking *nst);
-void r2net_debug_del_nst(struct r2net_send_tracking *nst);
-void r2net_debug_add_sc(struct r2net_sock_container *sc);
-void r2net_debug_del_sc(struct r2net_sock_container *sc);
-#else
-static inline int r2net_debugfs_init(void)
-{
- return 0;
-}
-static inline void r2net_debugfs_exit(void)
-{
-}
-static inline void r2net_debug_add_nst(struct r2net_send_tracking *nst)
-{
-}
-static inline void r2net_debug_del_nst(struct r2net_send_tracking *nst)
-{
-}
-static inline void r2net_debug_add_sc(struct r2net_sock_container *sc)
-{
-}
-static inline void r2net_debug_del_sc(struct r2net_sock_container *sc)
-{
-}
-#endif /* CONFIG_DEBUG_FS */
-
-#endif /* R2CLUSTER_TCP_H */
diff --git a/drivers/staging/zcache/ramster/tcp_internal.h b/drivers/staging/zcache/ramster/tcp_internal.h
deleted file mode 100644
index 4d8cc9f96fd..00000000000
--- a/drivers/staging/zcache/ramster/tcp_internal.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/* -*- mode: c; c-basic-offset: 8; -*-
- * vim: noexpandtab sw=8 ts=8 sts=0:
- *
- * Copyright (C) 2005 Oracle. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public
- * License along with this program; if not, write to the
- * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
- * Boston, MA 021110-1307, USA.
- */
-
-#ifndef R2CLUSTER_TCP_INTERNAL_H
-#define R2CLUSTER_TCP_INTERNAL_H
-
-#define R2NET_MSG_MAGIC ((u16)0xfa55)
-#define R2NET_MSG_STATUS_MAGIC ((u16)0xfa56)
-#define R2NET_MSG_KEEP_REQ_MAGIC ((u16)0xfa57)
-#define R2NET_MSG_KEEP_RESP_MAGIC ((u16)0xfa58)
-/*
- * "data magic" is a long version of "status magic" where the message
- * payload actually contains data to be passed in reply to certain messages
- */
-#define R2NET_MSG_DATA_MAGIC ((u16)0xfa59)
-
-/* we're delaying our quorum decision so that heartbeat will have timed
- * out truly dead nodes by the time we come around to making decisions
- * on their number */
-#define R2NET_QUORUM_DELAY_MS \
- ((r2hb_dead_threshold + 2) * R2HB_REGION_TIMEOUT_MS)
-
-/*
- * This version number represents quite a lot, unfortunately. It not
- * only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol. It should
- * be somewhere else, I'm sure, but right now it isn't.
- *
- * With version 11, we separate out the filesystem locking portion. The
- * filesystem now has a major.minor version it negotiates. Version 11
- * introduces this negotiation to the r2dlm protocol, and as such the
- * version here in tcp_internal.h should not need to be bumped for
- * filesystem locking changes.
- *
- * New in version 11
- * - Negotiation of filesystem locking in the dlm join.
- *
- * New in version 10:
- * - Meta/data locks combined
- *
- * New in version 9:
- * - All votes removed
- *
- * New in version 8:
- * - Replace delete inode votes with a cluster lock
- *
- * New in version 7:
- * - DLM join domain includes the live nodemap
- *
- * New in version 6:
- * - DLM lockres remote refcount fixes.
- *
- * New in version 5:
- * - Network timeout checking protocol
- *
- * New in version 4:
- * - Remove i_generation from lock names for better stat performance.
- *
- * New in version 3:
- * - Replace dentry votes with a cluster lock
- *
- * New in version 2:
- * - full 64 bit i_size in the metadata lock lvbs
- * - introduction of "rw" lock and pushing meta/data locking down
- */
-#define R2NET_PROTOCOL_VERSION 11ULL
-struct r2net_handshake {
- __be64 protocol_version;
- __be64 connector_id;
- __be32 r2hb_heartbeat_timeout_ms;
- __be32 r2net_idle_timeout_ms;
- __be32 r2net_keepalive_delay_ms;
- __be32 r2net_reconnect_delay_ms;
-};
-
-struct r2net_node {
- /* this is never called from int/bh */
- spinlock_t nn_lock;
-
- /* set the moment an sc is allocated and a connect is started */
- struct r2net_sock_container *nn_sc;
- /* _valid is only set after the handshake passes and tx can happen */
- unsigned nn_sc_valid:1;
- /* if this is set tx just returns it */
- int nn_persistent_error;
- /* It is only set to 1 after the idle time out. */
- atomic_t nn_timeout;
-
- /* threads waiting for an sc to arrive wait on the wq for generation
- * to increase. it is increased when a connecting socket succeeds
- * or fails or when an accepted socket is attached. */
- wait_queue_head_t nn_sc_wq;
-
- struct idr nn_status_idr;
- struct list_head nn_status_list;
-
- /* connects are attempted from when heartbeat comes up until either hb
- * goes down, the node is unconfigured, no connect attempts succeed
- * before R2NET_CONN_IDLE_DELAY, or a connect succeeds. connect_work
- * is queued from set_nn_state both from hb up and from itself if a
- * connect attempt fails and so can be self-arming. shutdown is
- * careful to first mark the nn such that no connects will be attempted
- * before canceling delayed connect work and flushing the queue. */
- struct delayed_work nn_connect_work;
- unsigned long nn_last_connect_attempt;
-
- /* this is queued as nodes come up and is canceled when a connection is
- * established. this expiring gives up on the node and errors out
- * transmits */
- struct delayed_work nn_connect_expired;
-
- /* after we give up on a socket we wait a while before deciding
- * that it is still heartbeating and that we should do some
- * quorum work */
- struct delayed_work nn_still_up;
-};
-
-struct r2net_sock_container {
- struct kref sc_kref;
- /* the next two are valid for the life time of the sc */
- struct socket *sc_sock;
- struct r2nm_node *sc_node;
-
- /* all of these sc work structs hold refs on the sc while they are
- * queued. they should not be able to ref a freed sc. the teardown
- * race is with r2net_wq destruction in r2net_stop_listening() */
-
- /* rx and connect work are generated from socket callbacks. sc
- * shutdown removes the callbacks and then flushes the work queue */
- struct work_struct sc_rx_work;
- struct work_struct sc_connect_work;
- /* shutdown work is triggered in two ways. the simple way is
- * for a code path calls ensure_shutdown which gets a lock, removes
- * the sc from the nn, and queues the work. in this case the
- * work is single-shot. the work is also queued from a sock
- * callback, though, and in this case the work will find the sc
- * still on the nn and will call ensure_shutdown itself.. this
- * ends up triggering the shutdown work again, though nothing
- * will be done in that second iteration. so work queue teardown
- * has to be careful to remove the sc from the nn before waiting
- * on the work queue so that the shutdown work doesn't remove the
- * sc and rearm itself.
- */
- struct work_struct sc_shutdown_work;
-
- struct timer_list sc_idle_timeout;
- struct delayed_work sc_keepalive_work;
-
- unsigned sc_handshake_ok:1;
-
- struct page *sc_page;
- size_t sc_page_off;
-
- /* original handlers for the sockets */
- void (*sc_state_change)(struct sock *sk);
- void (*sc_data_ready)(struct sock *sk, int bytes);
-
- u32 sc_msg_key;
- u16 sc_msg_type;
-
-#ifdef CONFIG_DEBUG_FS
- struct list_head sc_net_debug_item;
- ktime_t sc_tv_timer;
- ktime_t sc_tv_data_ready;
- ktime_t sc_tv_advance_start;
- ktime_t sc_tv_advance_stop;
- ktime_t sc_tv_func_start;
- ktime_t sc_tv_func_stop;
-#endif
-#ifdef CONFIG_RAMSTER_FS_STATS
- ktime_t sc_tv_acquiry_total;
- ktime_t sc_tv_send_total;
- ktime_t sc_tv_status_total;
- u32 sc_send_count;
- u32 sc_recv_count;
- ktime_t sc_tv_process_total;
-#endif
- struct mutex sc_send_lock;
-};
-
-struct r2net_msg_handler {
- struct rb_node nh_node;
- u32 nh_max_len;
- u32 nh_msg_type;
- u32 nh_key;
- r2net_msg_handler_func *nh_func;
- r2net_msg_handler_func *nh_func_data;
- r2net_post_msg_handler_func
- *nh_post_func;
- struct kref nh_kref;
- struct list_head nh_unregister_item;
-};
-
-enum r2net_system_error {
- R2NET_ERR_NONE = 0,
- R2NET_ERR_NO_HNDLR,
- R2NET_ERR_OVERFLOW,
- R2NET_ERR_DIED,
- R2NET_ERR_MAX
-};
-
-struct r2net_status_wait {
- enum r2net_system_error ns_sys_status;
- s32 ns_status;
- int ns_id;
- wait_queue_head_t ns_wq;
- struct list_head ns_node_item;
-};
-
-#ifdef CONFIG_DEBUG_FS
-/* just for state dumps */
-struct r2net_send_tracking {
- struct list_head st_net_debug_item;
- struct task_struct *st_task;
- struct r2net_sock_container *st_sc;
- u32 st_id;
- u32 st_msg_type;
- u32 st_msg_key;
- u8 st_node;
- ktime_t st_sock_time;
- ktime_t st_send_time;
- ktime_t st_status_time;
-};
-#else
-struct r2net_send_tracking {
- u32 dummy;
-};
-#endif /* CONFIG_DEBUG_FS */
-
-#endif /* R2CLUSTER_TCP_INTERNAL_H */
diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
deleted file mode 100644
index d7e51e4152e..00000000000
--- a/drivers/staging/zcache/tmem.c
+++ /dev/null
@@ -1,898 +0,0 @@
-/*
- * In-kernel transcendent memory (generic implementation)
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- *
- * The primary purpose of Transcedent Memory ("tmem") is to map object-oriented
- * "handles" (triples containing a pool id, and object id, and an index), to
- * pages in a page-accessible memory (PAM). Tmem references the PAM pages via
- * an abstract "pampd" (PAM page-descriptor), which can be operated on by a
- * set of functions (pamops). Each pampd contains some representation of
- * PAGE_SIZE bytes worth of data. For those familiar with key-value stores,
- * the tmem handle is a three-level hierarchical key, and the value is always
- * reconstituted (but not necessarily stored) as PAGE_SIZE bytes and is
- * referenced in the datastore by the pampd. The hierarchy is required
- * to ensure that certain invalidation functions can be performed efficiently
- * (i.e. flush all indexes associated with this object_id, or
- * flush all objects associated with this pool).
- *
- * Tmem must support potentially millions of pages and must be able to insert,
- * find, and delete these pages at a potential frequency of thousands per
- * second concurrently across many CPUs, (and, if used with KVM, across many
- * vcpus across many guests). Tmem is tracked with a hierarchy of data
- * structures, organized by the elements in the handle-tuple: pool_id,
- * object_id, and page index. One or more "clients" (e.g. guests) each
- * provide one or more tmem_pools. Each pool, contains a hash table of
- * rb_trees of tmem_objs. Each tmem_obj contains a radix-tree-like tree
- * of pointers, with intermediate nodes called tmem_objnodes. Each leaf
- * pointer in this tree points to a pampd, which is accessible only through
- * a small set of callbacks registered by the PAM implementation (see
- * tmem_register_pamops). Tmem only needs to memory allocation for objs
- * and objnodes and this is done via a set of callbacks that must be
- * registered by the tmem host implementation (e.g. see tmem_register_hostops).
- */
-
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-#include <linux/delay.h>
-#endif
-
-#include "tmem.h"
-
-/* data structure sentinels used for debugging... see tmem.h */
-#define POOL_SENTINEL 0x87658765
-#define OBJ_SENTINEL 0x12345678
-#define OBJNODE_SENTINEL 0xfedcba09
-
-/*
- * A tmem host implementation must use this function to register callbacks
- * for memory allocation.
- */
-static struct tmem_hostops tmem_hostops;
-
-static void tmem_objnode_tree_init(void);
-
-void tmem_register_hostops(struct tmem_hostops *m)
-{
- tmem_objnode_tree_init();
- tmem_hostops = *m;
-}
-
-/*
- * A tmem host implementation must use this function to register
- * callbacks for a page-accessible memory (PAM) implementation.
- */
-static struct tmem_pamops tmem_pamops;
-
-void tmem_register_pamops(struct tmem_pamops *m)
-{
- tmem_pamops = *m;
-}
-
-/*
- * Oid's are potentially very sparse and tmem_objs may have an indeterminately
- * short life, being added and deleted at a relatively high frequency.
- * So an rb_tree is an ideal data structure to manage tmem_objs. But because
- * of the potentially huge number of tmem_objs, each pool manages a hashtable
- * of rb_trees to reduce search, insert, delete, and rebalancing time.
- * Each hashbucket also has a lock to manage concurrent access and no
- * searches, inserts, or deletions can be performed unless the lock is held.
- * As a result, care must be taken to ensure tmem routines are not called
- * recursively; the vast majority of the time, a recursive call may work
- * but a deadlock will occur a small fraction of the time due to the
- * hashbucket lock.
- *
- * The following routines manage tmem_objs. In all of these routines,
- * the hashbucket lock is already held.
- */
-
-/* Search for object==oid in pool, returns object if found. */
-static struct tmem_obj *__tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp,
- struct rb_node **parent,
- struct rb_node ***link)
-{
- struct rb_node *_parent = NULL, **rbnode;
- struct tmem_obj *obj = NULL;
-
- rbnode = &hb->obj_rb_root.rb_node;
- while (*rbnode) {
- BUG_ON(RB_EMPTY_NODE(*rbnode));
- _parent = *rbnode;
- obj = rb_entry(*rbnode, struct tmem_obj,
- rb_tree_node);
- switch (tmem_oid_compare(oidp, &obj->oid)) {
- case 0: /* equal */
- goto out;
- case -1:
- rbnode = &(*rbnode)->rb_left;
- break;
- case 1:
- rbnode = &(*rbnode)->rb_right;
- break;
- }
- }
-
- if (parent)
- *parent = _parent;
- if (link)
- *link = rbnode;
- obj = NULL;
-out:
- return obj;
-}
-
-static struct tmem_obj *tmem_obj_find(struct tmem_hashbucket *hb,
- struct tmem_oid *oidp)
-{
- return __tmem_obj_find(hb, oidp, NULL, NULL);
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *, bool);
-
-/* Free an object that has no more pampds in it. */
-static void tmem_obj_free(struct tmem_obj *obj, struct tmem_hashbucket *hb)
-{
- struct tmem_pool *pool;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pampd_count > 0);
- pool = obj->pool;
- BUG_ON(pool == NULL);
- if (obj->objnode_tree_root != NULL) /* may be "stump" with no leaves */
- tmem_pampd_destroy_all_in_obj(obj, false);
- BUG_ON(obj->objnode_tree_root != NULL);
- BUG_ON((long)obj->objnode_count != 0);
- atomic_dec(&pool->obj_count);
- BUG_ON(atomic_read(&pool->obj_count) < 0);
- INVERT_SENTINEL(obj, OBJ);
- obj->pool = NULL;
- tmem_oid_set_invalid(&obj->oid);
- rb_erase(&obj->rb_tree_node, &hb->obj_rb_root);
-}
-
-/*
- * Initialize, and insert an tmem_object_root (called only if find failed).
- */
-static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
- struct tmem_pool *pool,
- struct tmem_oid *oidp)
-{
- struct rb_root *root = &hb->obj_rb_root;
- struct rb_node **new = NULL, *parent = NULL;
-
- BUG_ON(pool == NULL);
- atomic_inc(&pool->obj_count);
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
- obj->pool = pool;
- obj->oid = *oidp;
- obj->objnode_count = 0;
- obj->pampd_count = 0;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.new_obj != NULL)
- (*tmem_pamops.new_obj)(obj);
-#endif
- SET_SENTINEL(obj, OBJ);
-
- if (__tmem_obj_find(hb, oidp, &parent, &new))
- BUG();
-
- rb_link_node(&obj->rb_tree_node, parent, new);
- rb_insert_color(&obj->rb_tree_node, root);
-}
-
-/*
- * Tmem is managed as a set of tmem_pools with certain attributes, such as
- * "ephemeral" vs "persistent". These attributes apply to all tmem_objs
- * and all pampds that belong to a tmem_pool. A tmem_pool is created
- * or deleted relatively rarely (for example, when a filesystem is
- * mounted or unmounted).
- */
-
-/* flush all data from a pool and, optionally, free it */
-static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
-{
- struct rb_node *rbnode;
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- BUG_ON(pool == NULL);
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- spin_lock(&hb->lock);
- rbnode = rb_first(&hb->obj_rb_root);
- while (rbnode != NULL) {
- obj = rb_entry(rbnode, struct tmem_obj, rb_tree_node);
- rbnode = rb_next(rbnode);
- tmem_pampd_destroy_all_in_obj(obj, true);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- spin_unlock(&hb->lock);
- }
- if (destroy)
- list_del(&pool->pool_list);
-}
-
-/*
- * A tmem_obj contains a radix-tree-like tree in which the intermediate
- * nodes are called tmem_objnodes. (The kernel lib/radix-tree.c implementation
- * is very specialized and tuned for specific uses and is not particularly
- * suited for use from this code, though some code from the core algorithms has
- * been reused, thus the copyright notices below). Each tmem_objnode contains
- * a set of pointers which point to either a set of intermediate tmem_objnodes
- * or a set of of pampds.
- *
- * Portions Copyright (C) 2001 Momchil Velikov
- * Portions Copyright (C) 2001 Christoph Hellwig
- * Portions Copyright (C) 2005 SGI, Christoph Lameter <clameter@sgi.com>
- */
-
-struct tmem_objnode_tree_path {
- struct tmem_objnode *objnode;
- int offset;
-};
-
-/* objnode height_to_maxindex translation */
-static unsigned long tmem_objnode_tree_h2max[OBJNODE_TREE_MAX_PATH + 1];
-
-static void tmem_objnode_tree_init(void)
-{
- unsigned int ht, tmp;
-
- for (ht = 0; ht < ARRAY_SIZE(tmem_objnode_tree_h2max); ht++) {
- tmp = ht * OBJNODE_TREE_MAP_SHIFT;
- if (tmp >= OBJNODE_TREE_INDEX_BITS)
- tmem_objnode_tree_h2max[ht] = ~0UL;
- else
- tmem_objnode_tree_h2max[ht] =
- (~0UL >> (OBJNODE_TREE_INDEX_BITS - tmp - 1)) >> 1;
- }
-}
-
-static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
-{
- struct tmem_objnode *objnode;
-
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
- if (unlikely(objnode == NULL))
- goto out;
- objnode->obj = obj;
- SET_SENTINEL(objnode, OBJNODE);
- memset(&objnode->slots, 0, sizeof(objnode->slots));
- objnode->slots_in_use = 0;
- obj->objnode_count++;
-out:
- return objnode;
-}
-
-static void tmem_objnode_free(struct tmem_objnode *objnode)
-{
- struct tmem_pool *pool;
- int i;
-
- BUG_ON(objnode == NULL);
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++)
- BUG_ON(objnode->slots[i] != NULL);
- ASSERT_SENTINEL(objnode, OBJNODE);
- INVERT_SENTINEL(objnode, OBJNODE);
- BUG_ON(objnode->obj == NULL);
- ASSERT_SENTINEL(objnode->obj, OBJ);
- pool = objnode->obj->pool;
- BUG_ON(pool == NULL);
- ASSERT_SENTINEL(pool, POOL);
- objnode->obj->objnode_count--;
- objnode->obj = NULL;
- (*tmem_hostops.objnode_free)(objnode, pool);
-}
-
-/*
- * Lookup index in object and return associated pampd (or NULL if not found).
- */
-static void **__tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- unsigned int height, shift;
- struct tmem_objnode **slot = NULL;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
-
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height])
- goto out;
- if (height == 0 && obj->objnode_tree_root) {
- slot = &obj->objnode_tree_root;
- goto out;
- }
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- slot = &obj->objnode_tree_root;
- while (height > 0) {
- if (*slot == NULL)
- goto out;
- slot = (struct tmem_objnode **)
- ((*slot)->slots +
- ((index >> shift) & OBJNODE_TREE_MAP_MASK));
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
-out:
- return slot != NULL ? (void **)slot : NULL;
-}
-
-static void *tmem_pampd_lookup_in_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode **slot;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- return slot != NULL ? *slot : NULL;
-}
-
-#ifdef CONFIG_RAMSTER
-static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
- void *new_pampd, bool no_free)
-{
- struct tmem_objnode **slot;
- void *ret = NULL;
-
- slot = (struct tmem_objnode **)__tmem_pampd_lookup_in_obj(obj, index);
- if ((slot != NULL) && (*slot != NULL)) {
- void *old_pampd = *(void **)slot;
- *(void **)slot = new_pampd;
- if (!no_free)
- (*tmem_pamops.free)(old_pampd, obj->pool,
- NULL, 0, false);
- ret = new_pampd;
- }
- return ret;
-}
-#endif
-
-static int tmem_pampd_add_to_obj(struct tmem_obj *obj, uint32_t index,
- void *pampd)
-{
- int ret = 0;
- struct tmem_objnode *objnode = NULL, *newnode, *slot;
- unsigned int height, shift;
- int offset = 0;
-
- /* if necessary, extend the tree to be higher */
- if (index > tmem_objnode_tree_h2max[obj->objnode_tree_height]) {
- height = obj->objnode_tree_height + 1;
- if (index > tmem_objnode_tree_h2max[height])
- while (index > tmem_objnode_tree_h2max[height])
- height++;
- if (obj->objnode_tree_root == NULL) {
- obj->objnode_tree_height = height;
- goto insert;
- }
- do {
- newnode = tmem_objnode_alloc(obj);
- if (!newnode) {
- ret = -ENOMEM;
- goto out;
- }
- newnode->slots[0] = obj->objnode_tree_root;
- newnode->slots_in_use = 1;
- obj->objnode_tree_root = newnode;
- obj->objnode_tree_height++;
- } while (height > obj->objnode_tree_height);
- }
-insert:
- slot = obj->objnode_tree_root;
- height = obj->objnode_tree_height;
- shift = (height-1) * OBJNODE_TREE_MAP_SHIFT;
- while (height > 0) {
- if (slot == NULL) {
- /* add a child objnode. */
- slot = tmem_objnode_alloc(obj);
- if (!slot) {
- ret = -ENOMEM;
- goto out;
- }
- if (objnode) {
-
- objnode->slots[offset] = slot;
- objnode->slots_in_use++;
- } else
- obj->objnode_tree_root = slot;
- }
- /* go down a level */
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- objnode = slot;
- slot = objnode->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- }
- BUG_ON(slot != NULL);
- if (objnode) {
- objnode->slots_in_use++;
- objnode->slots[offset] = pampd;
- } else
- obj->objnode_tree_root = pampd;
- obj->pampd_count++;
-out:
- return ret;
-}
-
-static void *tmem_pampd_delete_from_obj(struct tmem_obj *obj, uint32_t index)
-{
- struct tmem_objnode_tree_path path[OBJNODE_TREE_MAX_PATH + 1];
- struct tmem_objnode_tree_path *pathp = path;
- struct tmem_objnode *slot = NULL;
- unsigned int height, shift;
- int offset;
-
- BUG_ON(obj == NULL);
- ASSERT_SENTINEL(obj, OBJ);
- BUG_ON(obj->pool == NULL);
- ASSERT_SENTINEL(obj->pool, POOL);
- height = obj->objnode_tree_height;
- if (index > tmem_objnode_tree_h2max[height])
- goto out;
- slot = obj->objnode_tree_root;
- if (height == 0 && obj->objnode_tree_root) {
- obj->objnode_tree_root = NULL;
- goto out;
- }
- shift = (height - 1) * OBJNODE_TREE_MAP_SHIFT;
- pathp->objnode = NULL;
- do {
- if (slot == NULL)
- goto out;
- pathp++;
- offset = (index >> shift) & OBJNODE_TREE_MAP_MASK;
- pathp->offset = offset;
- pathp->objnode = slot;
- slot = slot->slots[offset];
- shift -= OBJNODE_TREE_MAP_SHIFT;
- height--;
- } while (height > 0);
- if (slot == NULL)
- goto out;
- while (pathp->objnode) {
- pathp->objnode->slots[pathp->offset] = NULL;
- pathp->objnode->slots_in_use--;
- if (pathp->objnode->slots_in_use) {
- if (pathp->objnode == obj->objnode_tree_root) {
- while (obj->objnode_tree_height > 0 &&
- obj->objnode_tree_root->slots_in_use == 1 &&
- obj->objnode_tree_root->slots[0]) {
- struct tmem_objnode *to_free =
- obj->objnode_tree_root;
-
- obj->objnode_tree_root =
- to_free->slots[0];
- obj->objnode_tree_height--;
- to_free->slots[0] = NULL;
- to_free->slots_in_use = 0;
- tmem_objnode_free(to_free);
- }
- }
- goto out;
- }
- tmem_objnode_free(pathp->objnode); /* 0 slots used, free it */
- pathp--;
- }
- obj->objnode_tree_height = 0;
- obj->objnode_tree_root = NULL;
-
-out:
- if (slot != NULL)
- obj->pampd_count--;
- BUG_ON(obj->pampd_count < 0);
- return slot;
-}
-
-/* Recursively walk the objnode_tree destroying pampds and objnodes. */
-static void tmem_objnode_node_destroy(struct tmem_obj *obj,
- struct tmem_objnode *objnode,
- unsigned int ht)
-{
- int i;
-
- if (ht == 0)
- return;
- for (i = 0; i < OBJNODE_TREE_MAP_SIZE; i++) {
- if (objnode->slots[i]) {
- if (ht == 1) {
- obj->pampd_count--;
- (*tmem_pamops.free)(objnode->slots[i],
- obj->pool, NULL, 0, true);
- objnode->slots[i] = NULL;
- continue;
- }
- tmem_objnode_node_destroy(obj, objnode->slots[i], ht-1);
- tmem_objnode_free(objnode->slots[i]);
- objnode->slots[i] = NULL;
- }
- }
-}
-
-static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
- bool pool_destroy)
-{
- if (obj->objnode_tree_root == NULL)
- return;
- if (obj->objnode_tree_height == 0) {
- obj->pampd_count--;
- (*tmem_pamops.free)(obj->objnode_tree_root,
- obj->pool, NULL, 0, true);
- } else {
- tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
- obj->objnode_tree_height);
- tmem_objnode_free(obj->objnode_tree_root);
- obj->objnode_tree_height = 0;
- }
- obj->objnode_tree_root = NULL;
-#ifdef CONFIG_RAMSTER
- if (tmem_pamops.free_obj != NULL)
- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
-#endif
-}
-
-/*
- * Tmem is operated on by a set of well-defined actions:
- * "put", "get", "flush", "flush_object", "new pool" and "destroy pool".
- * (The tmem ABI allows for subpages and exchanges but these operations
- * are not included in this implementation.)
- *
- * These "tmem core" operations are implemented in the following functions.
- */
-
-/*
- * "Put" a page, e.g. associate the passed pampd with the passed handle.
- * Tmem_put is complicated by a corner case: What if a page with matching
- * handle already exists in tmem? To guarantee coherency, one of two
- * actions is necessary: Either the data for the page must be overwritten,
- * or the page must be "flushed" so that the data is not accessible to a
- * subsequent "get". Since these "duplicate puts" are relatively rare,
- * this implementation always flushes for simplicity.
- */
-int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- bool raw, void *pampd_to_use)
-{
- struct tmem_obj *obj = NULL, *objfound = NULL, *objnew = NULL;
- void *pampd = NULL, *pampd_del = NULL;
- int ret = -ENOMEM;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = objfound = tmem_obj_find(hb, oidp);
- if (obj != NULL) {
- pampd = tmem_pampd_lookup_in_obj(objfound, index);
- if (pampd != NULL) {
- /* if found, is a dup put, flush the old one */
- pampd_del = tmem_pampd_delete_from_obj(obj, index);
- BUG_ON(pampd_del != pampd);
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- objnew = obj;
- objfound = NULL;
- }
- pampd = NULL;
- }
- } else {
- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
- if (unlikely(obj == NULL)) {
- ret = -ENOMEM;
- goto out;
- }
- tmem_obj_init(obj, hb, pool, oidp);
- }
- BUG_ON(obj == NULL);
- BUG_ON(((objnew != obj) && (objfound != obj)) || (objnew == objfound));
- pampd = pampd_to_use;
- BUG_ON(pampd_to_use == NULL);
- ret = tmem_pampd_add_to_obj(obj, index, pampd);
- if (unlikely(ret == -ENOMEM))
- /* may have partially built objnode tree ("stump") */
- goto delete_and_free;
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
- goto out;
-
-delete_and_free:
- (void)tmem_pampd_delete_from_obj(obj, index);
- if (pampd)
- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
- if (objnew) {
- tmem_obj_free(objnew, hb);
- (*tmem_hostops.obj_free)(objnew, pool);
- }
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-#ifdef CONFIG_RAMSTER
-/*
- * For ramster only: The following routines provide a two-step sequence
- * to allow the caller to replace a pampd in the tmem data structures with
- * another pampd. Here, we lookup the passed handle and, if found, return the
- * associated pampd and object, leaving the hashbucket locked and returning
- * a reference to it. The caller is expected to immediately call the
- * matching tmem_localify_finish routine which will handles the replacement
- * and unlocks the hashbucket.
- */
-void *tmem_localify_get_pampd(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, struct tmem_obj **ret_obj,
- void **saved_hb)
-{
- struct tmem_hashbucket *hb;
- struct tmem_obj *obj = NULL;
- void *pampd = NULL;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (likely(obj != NULL))
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- *ret_obj = obj;
- *saved_hb = (void *)hb;
- /* note, hashbucket remains locked */
- return pampd;
-}
-EXPORT_SYMBOL_GPL(tmem_localify_get_pampd);
-
-void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
- void *pampd, void *saved_hb, bool delete)
-{
- struct tmem_hashbucket *hb = (struct tmem_hashbucket *)saved_hb;
-
- BUG_ON(!spin_is_locked(&hb->lock));
- if (pampd != NULL) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
- } else if (delete) {
- BUG_ON(obj == NULL);
- (void)tmem_pampd_delete_from_obj(obj, index);
- }
- spin_unlock(&hb->lock);
-}
-EXPORT_SYMBOL_GPL(tmem_localify_finish);
-
-/*
- * For ramster only. Helper function to support asynchronous tmem_get.
- */
-static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
- struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, bool free, char *data)
-{
- void *old_pampd = *ppampd, *new_pampd = NULL;
- bool intransit = false;
- int ret = 0;
-
- if (!is_ephemeral(pool))
- new_pampd = (*tmem_pamops.repatriate_preload)(
- old_pampd, pool, oidp, index, &intransit);
- if (intransit)
- ret = -EAGAIN;
- else if (new_pampd != NULL)
- *ppampd = new_pampd;
- /* must release the hb->lock else repatriate can't sleep */
- spin_unlock(&hb->lock);
- if (!intransit)
- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
- oidp, index, free, data);
- if (ret == -EAGAIN) {
- /* rare I think, but should cond_resched()??? */
- usleep_range(10, 1000);
- } else if (ret == -ENOTCONN || ret == -EHOSTDOWN) {
- ret = -1;
- } else if (ret != 0 && ret != -ENOENT) {
- ret = -1;
- }
- /* note hb->lock has now been unlocked */
- return ret;
-}
-
-/*
- * For ramster only. If a page in tmem matches the handle, replace the
- * page so that any subsequent "get" gets the new page. Returns 0 if
- * there was a page to replace, else returns -1.
- */
-int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
- uint32_t index, void *new_pampd)
-{
- struct tmem_obj *obj;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
- /* if we bug here, pamops wasn't properly set up for ramster */
- BUG_ON(tmem_pamops.replace_in_obj == NULL);
- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-EXPORT_SYMBOL_GPL(tmem_replace);
-#endif
-
-/*
- * "Get" a page, e.g. if a pampd can be found matching the passed handle,
- * use a pamops callback to recreated the page from the pampd with the
- * matching handle. By tmem definition, when a "get" is successful on
- * an ephemeral page, the page is "flushed", and when a "get" is successful
- * on a persistent page, the page is retained in tmem. Note that to preserve
- * coherency, "get" can never be skipped if tmem contains the data.
- * That is, if a get is done with a certain handle and fails, any
- * subsequent "get" must also fail (unless of course there is a
- * "put" done with the same handle).
- */
-int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
- char *data, size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_obj *obj;
- void *pampd = NULL;
- bool ephemeral = is_ephemeral(pool);
- int ret = -1;
- struct tmem_hashbucket *hb;
- bool free = (get_and_free == 1) || ((get_and_free == 0) && ephemeral);
- bool lock_held = false;
- void **ppampd;
-
- do {
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- lock_held = true;
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- ppampd = __tmem_pampd_lookup_in_obj(obj, index);
- if (ppampd == NULL)
- goto out;
-#ifdef CONFIG_RAMSTER
- if ((tmem_pamops.is_remote != NULL) &&
- tmem_pamops.is_remote(*ppampd)) {
- ret = tmem_repatriate(ppampd, hb, pool, oidp,
- index, free, data);
- /* tmem_repatriate releases hb->lock */
- lock_held = false;
- *sizep = PAGE_SIZE;
- if (ret != -EAGAIN)
- goto out;
- }
-#endif
- } while (ret == -EAGAIN);
- if (free)
- pampd = tmem_pampd_delete_from_obj(obj, index);
- else
- pampd = tmem_pampd_lookup_in_obj(obj, index);
- if (pampd == NULL)
- goto out;
- if (free) {
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- obj = NULL;
- }
- }
- if (free)
- ret = (*tmem_pamops.get_data_and_free)(
- data, sizep, raw, pampd, pool, oidp, index);
- else
- ret = (*tmem_pamops.get_data)(
- data, sizep, raw, pampd, pool, oidp, index);
- if (ret < 0)
- goto out;
- ret = 0;
-out:
- if (lock_held)
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * If a page in tmem matches the handle, "flush" this page from tmem such
- * that any subsequent "get" does not succeed (unless, of course, there
- * was another "put" with the same handle).
- */
-int tmem_flush_page(struct tmem_pool *pool,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_obj *obj;
- void *pampd;
- int ret = -1;
- struct tmem_hashbucket *hb;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- pampd = tmem_pampd_delete_from_obj(obj, index);
- if (pampd == NULL)
- goto out;
- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
- if (obj->pampd_count == 0) {
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- }
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages in tmem matching this oid.
- */
-int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
-{
- struct tmem_obj *obj;
- struct tmem_hashbucket *hb;
- int ret = -1;
-
- hb = &pool->hashbucket[tmem_oid_hash(oidp)];
- spin_lock(&hb->lock);
- obj = tmem_obj_find(hb, oidp);
- if (obj == NULL)
- goto out;
- tmem_pampd_destroy_all_in_obj(obj, false);
- tmem_obj_free(obj, hb);
- (*tmem_hostops.obj_free)(obj, pool);
- ret = 0;
-
-out:
- spin_unlock(&hb->lock);
- return ret;
-}
-
-/*
- * "Flush" all pages (and tmem_objs) from this tmem_pool and disable
- * all subsequent access to this tmem_pool.
- */
-int tmem_destroy_pool(struct tmem_pool *pool)
-{
- int ret = -1;
-
- if (pool == NULL)
- goto out;
- tmem_pool_flush(pool, 1);
- ret = 0;
-out:
- return ret;
-}
-
-static LIST_HEAD(tmem_global_pool_list);
-
-/*
- * Create a new tmem_pool with the provided flag and return
- * a pool id provided by the tmem host implementation.
- */
-void tmem_new_pool(struct tmem_pool *pool, uint32_t flags)
-{
- int persistent = flags & TMEM_POOL_PERSIST;
- int shared = flags & TMEM_POOL_SHARED;
- struct tmem_hashbucket *hb = &pool->hashbucket[0];
- int i;
-
- for (i = 0; i < TMEM_HASH_BUCKETS; i++, hb++) {
- hb->obj_rb_root = RB_ROOT;
- spin_lock_init(&hb->lock);
- }
- INIT_LIST_HEAD(&pool->pool_list);
- atomic_set(&pool->obj_count, 0);
- SET_SENTINEL(pool, POOL);
- list_add_tail(&pool->pool_list, &tmem_global_pool_list);
- pool->persistent = persistent;
- pool->shared = shared;
-}
diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
deleted file mode 100644
index d128ce290f1..00000000000
--- a/drivers/staging/zcache/tmem.h
+++ /dev/null
@@ -1,259 +0,0 @@
-/*
- * tmem.h
- *
- * Transcendent memory
- *
- * Copyright (c) 2009-2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _TMEM_H_
-#define _TMEM_H_
-
-#include <linux/types.h>
-#include <linux/highmem.h>
-#include <linux/hash.h>
-#include <linux/atomic.h>
-
-/*
- * These are defined by the Xen<->Linux ABI so should remain consistent
- */
-#define TMEM_POOL_PERSIST 1
-#define TMEM_POOL_SHARED 2
-#define TMEM_POOL_PRECOMPRESSED 4
-#define TMEM_POOL_PAGESIZE_SHIFT 4
-#define TMEM_POOL_PAGESIZE_MASK 0xf
-#define TMEM_POOL_RESERVED_BITS 0x00ffff00
-
-/*
- * sentinels have proven very useful for debugging but can be removed
- * or disabled before final merge.
- */
-#undef SENTINELS
-#ifdef SENTINELS
-#define DECL_SENTINEL uint32_t sentinel;
-#define SET_SENTINEL(_x, _y) (_x->sentinel = _y##_SENTINEL)
-#define INVERT_SENTINEL(_x, _y) (_x->sentinel = ~_y##_SENTINEL)
-#define ASSERT_SENTINEL(_x, _y) WARN_ON(_x->sentinel != _y##_SENTINEL)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) WARN_ON(_x->sentinel != ~_y##_SENTINEL)
-#else
-#define DECL_SENTINEL
-#define SET_SENTINEL(_x, _y) do { } while (0)
-#define INVERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_SENTINEL(_x, _y) do { } while (0)
-#define ASSERT_INVERTED_SENTINEL(_x, _y) do { } while (0)
-#endif
-
-#define ASSERT_SPINLOCK(_l) lockdep_assert_held(_l)
-
-/*
- * A pool is the highest-level data structure managed by tmem and
- * usually corresponds to a large independent set of pages such as
- * a filesystem. Each pool has an id, and certain attributes and counters.
- * It also contains a set of hash buckets, each of which contains an rbtree
- * of objects and a lock to manage concurrency within the pool.
- */
-
-#define TMEM_HASH_BUCKET_BITS 8
-#define TMEM_HASH_BUCKETS (1<<TMEM_HASH_BUCKET_BITS)
-
-struct tmem_hashbucket {
- struct rb_root obj_rb_root;
- spinlock_t lock;
-};
-
-struct tmem_pool {
- void *client; /* "up" for some clients, avoids table lookup */
- struct list_head pool_list;
- uint32_t pool_id;
- bool persistent;
- bool shared;
- atomic_t obj_count;
- atomic_t refcount;
- struct tmem_hashbucket hashbucket[TMEM_HASH_BUCKETS];
- DECL_SENTINEL
-};
-
-#define is_persistent(_p) (_p->persistent)
-#define is_ephemeral(_p) (!(_p->persistent))
-
-/*
- * An object id ("oid") is large: 192-bits (to ensure, for example, files
- * in a modern filesystem can be uniquely identified).
- */
-
-struct tmem_oid {
- uint64_t oid[3];
-};
-
-static inline void tmem_oid_set_invalid(struct tmem_oid *oidp)
-{
- oidp->oid[0] = oidp->oid[1] = oidp->oid[2] = -1UL;
-}
-
-static inline bool tmem_oid_valid(struct tmem_oid *oidp)
-{
- return oidp->oid[0] != -1UL || oidp->oid[1] != -1UL ||
- oidp->oid[2] != -1UL;
-}
-
-static inline int tmem_oid_compare(struct tmem_oid *left,
- struct tmem_oid *right)
-{
- int ret;
-
- if (left->oid[2] == right->oid[2]) {
- if (left->oid[1] == right->oid[1]) {
- if (left->oid[0] == right->oid[0])
- ret = 0;
- else if (left->oid[0] < right->oid[0])
- ret = -1;
- else
- return 1;
- } else if (left->oid[1] < right->oid[1])
- ret = -1;
- else
- ret = 1;
- } else if (left->oid[2] < right->oid[2])
- ret = -1;
- else
- ret = 1;
- return ret;
-}
-
-static inline unsigned tmem_oid_hash(struct tmem_oid *oidp)
-{
- return hash_long(oidp->oid[0] ^ oidp->oid[1] ^ oidp->oid[2],
- TMEM_HASH_BUCKET_BITS);
-}
-
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-struct tmem_xhandle {
- uint8_t client_id;
- uint8_t xh_data_cksum;
- uint16_t xh_data_size;
- uint16_t pool_id;
- struct tmem_oid oid;
- uint32_t index;
- void *extra;
-};
-
-static inline struct tmem_xhandle tmem_xhandle_fill(uint16_t client_id,
- struct tmem_pool *pool,
- struct tmem_oid *oidp,
- uint32_t index)
-{
- struct tmem_xhandle xh;
- xh.client_id = client_id;
- xh.xh_data_cksum = (uint8_t)-1;
- xh.xh_data_size = (uint16_t)-1;
- xh.pool_id = pool->pool_id;
- xh.oid = *oidp;
- xh.index = index;
- return xh;
-}
-#endif
-
-
-/*
- * A tmem_obj contains an identifier (oid), pointers to the parent
- * pool and the rb_tree to which it belongs, counters, and an ordered
- * set of pampds, structured in a radix-tree-like tree. The intermediate
- * nodes of the tree are called tmem_objnodes.
- */
-
-struct tmem_objnode;
-
-struct tmem_obj {
- struct tmem_oid oid;
- struct tmem_pool *pool;
- struct rb_node rb_tree_node;
- struct tmem_objnode *objnode_tree_root;
- unsigned int objnode_tree_height;
- unsigned long objnode_count;
- long pampd_count;
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
- /*
- * for current design of ramster, all pages belonging to
- * an object reside on the same remotenode and extra is
- * used to record the number of the remotenode so a
- * flush-object operation can specify it
- */
- void *extra; /* for private use by pampd implementation */
-#endif
- DECL_SENTINEL
-};
-
-#define OBJNODE_TREE_MAP_SHIFT 6
-#define OBJNODE_TREE_MAP_SIZE (1UL << OBJNODE_TREE_MAP_SHIFT)
-#define OBJNODE_TREE_MAP_MASK (OBJNODE_TREE_MAP_SIZE-1)
-#define OBJNODE_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long))
-#define OBJNODE_TREE_MAX_PATH \
- (OBJNODE_TREE_INDEX_BITS/OBJNODE_TREE_MAP_SHIFT + 2)
-
-struct tmem_objnode {
- struct tmem_obj *obj;
- DECL_SENTINEL
- void *slots[OBJNODE_TREE_MAP_SIZE];
- unsigned int slots_in_use;
-};
-
-struct tmem_handle {
- struct tmem_oid oid; /* 24 bytes */
- uint32_t index;
- uint16_t pool_id;
- uint16_t client_id;
-};
-
-
-/* pampd abstract datatype methods provided by the PAM implementation */
-struct tmem_pamops {
- void (*create_finish)(void *, bool);
- int (*get_data)(char *, size_t *, bool, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t);
- int (*get_data_and_free)(char *, size_t *, bool, void *,
- struct tmem_pool *, struct tmem_oid *,
- uint32_t);
- void (*free)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool);
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
- void (*new_obj)(struct tmem_obj *);
- void (*free_obj)(struct tmem_pool *, struct tmem_obj *, bool);
- void *(*repatriate_preload)(void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool *);
- int (*repatriate)(void *, void *, struct tmem_pool *,
- struct tmem_oid *, uint32_t, bool, void *);
- bool (*is_remote)(void *);
- int (*replace_in_obj)(void *, struct tmem_obj *);
-#endif
-};
-extern void tmem_register_pamops(struct tmem_pamops *m);
-
-/* memory allocation methods provided by the host implementation */
-struct tmem_hostops {
- struct tmem_obj *(*obj_alloc)(struct tmem_pool *);
- void (*obj_free)(struct tmem_obj *, struct tmem_pool *);
- struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
- void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
-};
-extern void tmem_register_hostops(struct tmem_hostops *m);
-
-/* core tmem accessor functions */
-extern int tmem_put(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- bool, void *);
-extern int tmem_get(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- char *, size_t *, bool, int);
-extern int tmem_flush_page(struct tmem_pool *, struct tmem_oid *,
- uint32_t index);
-extern int tmem_flush_object(struct tmem_pool *, struct tmem_oid *);
-extern int tmem_destroy_pool(struct tmem_pool *);
-extern void tmem_new_pool(struct tmem_pool *, uint32_t);
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-extern int tmem_replace(struct tmem_pool *, struct tmem_oid *, uint32_t index,
- void *);
-extern void *tmem_localify_get_pampd(struct tmem_pool *, struct tmem_oid *,
- uint32_t index, struct tmem_obj **,
- void **);
-extern void tmem_localify_finish(struct tmem_obj *, uint32_t index,
- void *, void *, bool);
-#endif
-#endif /* _TMEM_H */
diff --git a/drivers/staging/zcache/zbud.c b/drivers/staging/zcache/zbud.c
deleted file mode 100644
index 6cda4ed9ed3..00000000000
--- a/drivers/staging/zcache/zbud.c
+++ /dev/null
@@ -1,1066 +0,0 @@
-/*
- * zbud.c - Compression buddies allocator
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- * Compression buddies ("zbud") provides for efficiently packing two
- * (or, possibly in the future, more) compressed pages ("zpages") into
- * a single "raw" pageframe and for tracking both zpages and pageframes
- * so that whole pageframes can be easily reclaimed in LRU-like order.
- * It is designed to be used in conjunction with transcendent memory
- * ("tmem"); for example separate LRU lists are maintained for persistent
- * vs. ephemeral pages.
- *
- * A zbudpage is an overlay for a struct page and thus each zbudpage
- * refers to a physical pageframe of RAM. When the caller passes a
- * struct page from the kernel's page allocator, zbud "transforms" it
- * to a zbudpage which sets/uses a different set of fields than the
- * struct-page and thus must "untransform" it back by reinitializing
- * certain fields before the struct-page can be freed. The fields
- * of a zbudpage include a page lock for controlling access to the
- * corresponding pageframe, and there is a size field for each zpage.
- * Each zbudpage also lives on two linked lists: a "budlist" which is
- * used to support efficient buddying of zpages; and an "lru" which
- * is used for reclaiming pageframes in approximately least-recently-used
- * order.
- *
- * A zbudpageframe is a pageframe divided up into aligned 64-byte "chunks"
- * which contain the compressed data for zero, one, or two zbuds. Contained
- * with the compressed data is a tmem_handle which is a key to allow
- * the same data to be found via the tmem interface so the zpage can
- * be invalidated (for ephemeral pages) or repatriated to the swap cache
- * (for persistent pages). The contents of a zbudpageframe must never
- * be accessed without holding the page lock for the corresponding
- * zbudpage and, to accomodate highmem machines, the contents may
- * only be examined or changes when kmapped. Thus, when in use, a
- * kmapped zbudpageframe is referred to in the zbud code as "void *zbpg".
- *
- * Note that the term "zbud" refers to the combination of a zpage and
- * a tmem_handle that is stored as one of possibly two "buddied" zpages;
- * it also generically refers to this allocator... sorry for any confusion.
- *
- * A zbudref is a pointer to a struct zbudpage (which can be cast to a
- * struct page), with the LSB either cleared or set to indicate, respectively,
- * the first or second zpage in the zbudpageframe. Since a zbudref can be
- * cast to a pointer, it is used as the tmem "pampd" pointer and uniquely
- * references a stored tmem page and so is the only zbud data structure
- * externally visible to zbud.c/zbud.h.
- *
- * Since we wish to reclaim entire pageframes but zpages may be randomly
- * added and deleted to any given pageframe, we approximate LRU by
- * promoting a pageframe to MRU when a zpage is added to it, but
- * leaving it at the current place in the list when a zpage is deleted
- * from it. As a side effect, zpages that are difficult to buddy (e.g.
- * very large paages) will be reclaimed faster than average, which seems
- * reasonable.
- *
- * In the current implementation, no more than two zpages may be stored in
- * any pageframe and no zpage ever crosses a pageframe boundary. While
- * other zpage allocation mechanisms may allow greater density, this two
- * zpage-per-pageframe limit both ensures simple reclaim of pageframes
- * (including garbage collection of references to the contents of those
- * pageframes from tmem data structures) AND avoids the need for compaction.
- * With additional complexity, zbud could be modified to support storing
- * up to three zpages per pageframe or, to handle larger average zpages,
- * up to three zpages per pair of pageframes, but it is not clear if the
- * additional complexity would be worth it. So consider it an exercise
- * for future developers.
- *
- * Note also that zbud does no page allocation or freeing. This is so
- * that the caller has complete control over and, for accounting, visibility
- * into if/when pages are allocated and freed.
- *
- * Finally, note that zbud limits the size of zpages it can store; the
- * caller must check the zpage size with zbud_max_buddy_size before
- * storing it, else BUGs will result. User beware.
- */
-
-#include <linux/module.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/pagemap.h>
-#include <linux/atomic.h>
-#include <linux/bug.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "zbud.h"
-
-/*
- * We need to ensure that a struct zbudpage is never larger than a
- * struct page. This is checked with a BUG_ON in zbud_init.
- *
- * The unevictable field indicates that a zbud is being added to the
- * zbudpage. Since this is a two-phase process (due to tmem locking),
- * this field locks the zbudpage against eviction when a zbud match
- * or creation is in process. Since this addition process may occur
- * in parallel for two zbuds in one zbudpage, the field is a counter
- * that must not exceed two.
- */
-struct zbudpage {
- union {
- struct page page;
- struct {
- unsigned long space_for_flags;
- struct {
- unsigned zbud0_size:PAGE_SHIFT;
- unsigned zbud1_size:PAGE_SHIFT;
- unsigned unevictable:2;
- };
- struct list_head budlist;
- struct list_head lru;
- };
- };
-};
-#if (PAGE_SHIFT * 2) + 2 > BITS_PER_LONG
-#error "zbud won't work for this arch, PAGE_SIZE is too large"
-#endif
-
-struct zbudref {
- union {
- struct zbudpage *zbudpage;
- unsigned long zbudref;
- };
-};
-
-#define CHUNK_SHIFT 6
-#define CHUNK_SIZE (1 << CHUNK_SHIFT)
-#define CHUNK_MASK (~(CHUNK_SIZE-1))
-#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
-#define MAX_CHUNK (NCHUNKS-1)
-
-/*
- * The following functions deal with the difference between struct
- * page and struct zbudpage. Note the hack of using the pageflags
- * from struct page; this is to avoid duplicating all the complex
- * pageflag macros.
- */
-static inline void zbudpage_spin_lock(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- while (unlikely(test_and_set_bit_lock(PG_locked, &page->flags))) {
- do {
- cpu_relax();
- } while (test_bit(PG_locked, &page->flags));
- }
-}
-
-static inline void zbudpage_spin_unlock(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_locked, &page->flags);
-}
-
-static inline int zbudpage_spin_trylock(struct zbudpage *zbudpage)
-{
- return trylock_page((struct page *)zbudpage);
-}
-
-static inline int zbudpage_is_locked(struct zbudpage *zbudpage)
-{
- return PageLocked((struct page *)zbudpage);
-}
-
-static inline void *kmap_zbudpage_atomic(struct zbudpage *zbudpage)
-{
- return kmap_atomic((struct page *)zbudpage);
-}
-
-/*
- * A dying zbudpage is an ephemeral page in the process of being evicted.
- * Any data contained in the zbudpage is invalid and we are just waiting for
- * the tmem pampds to be invalidated before freeing the page
- */
-static inline int zbudpage_is_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- return test_bit(PG_reclaim, &page->flags);
-}
-
-static inline void zbudpage_set_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- set_bit(PG_reclaim, &page->flags);
-}
-
-static inline void zbudpage_clear_dying(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_reclaim, &page->flags);
-}
-
-/*
- * A zombie zbudpage is a persistent page in the process of being evicted.
- * The data contained in the zbudpage is valid and we are just waiting for
- * the tmem pampds to be invalidated before freeing the page
- */
-static inline int zbudpage_is_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- return test_bit(PG_dirty, &page->flags);
-}
-
-static inline void zbudpage_set_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- set_bit(PG_dirty, &page->flags);
-}
-
-static inline void zbudpage_clear_zombie(struct zbudpage *zbudpage)
-{
- struct page *page = (struct page *)zbudpage;
-
- clear_bit(PG_dirty, &page->flags);
-}
-
-static inline void kunmap_zbudpage_atomic(void *zbpg)
-{
- kunmap_atomic(zbpg);
-}
-
-/*
- * zbud "translation" and helper functions
- */
-
-static inline struct zbudpage *zbudref_to_zbudpage(struct zbudref *zref)
-{
- unsigned long zbud = (unsigned long)zref;
- zbud &= ~1UL;
- return (struct zbudpage *)zbud;
-}
-
-static inline struct zbudref *zbudpage_to_zbudref(struct zbudpage *zbudpage,
- unsigned budnum)
-{
- unsigned long zbud = (unsigned long)zbudpage;
- BUG_ON(budnum > 1);
- zbud |= budnum;
- return (struct zbudref *)zbud;
-}
-
-static inline int zbudref_budnum(struct zbudref *zbudref)
-{
- unsigned long zbud = (unsigned long)zbudref;
- return zbud & 1UL;
-}
-
-static inline unsigned zbud_max_size(void)
-{
- return MAX_CHUNK << CHUNK_SHIFT;
-}
-
-static inline unsigned zbud_size_to_chunks(unsigned size)
-{
- BUG_ON(size == 0 || size > zbud_max_size());
- return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
-}
-
-/* can only be used between kmap_zbudpage_atomic/kunmap_zbudpage_atomic! */
-static inline char *zbud_data(void *zbpg,
- unsigned budnum, unsigned size)
-{
- char *p;
-
- BUG_ON(size == 0 || size > zbud_max_size());
- p = (char *)zbpg;
- if (budnum == 1)
- p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
- return p;
-}
-
-/*
- * These are all informative and exposed through debugfs... except for
- * the arrays... anyone know how to do that? To avoid confusion for
- * debugfs viewers, some of these should also be atomic_long_t, but
- * I don't know how to expose atomics via debugfs either...
- */
-static ssize_t zbud_eph_pageframes;
-static ssize_t zbud_pers_pageframes;
-static ssize_t zbud_eph_zpages;
-static ssize_t zbud_pers_zpages;
-static u64 zbud_eph_zbytes;
-static u64 zbud_pers_zbytes;
-static ssize_t zbud_eph_evicted_pageframes;
-static ssize_t zbud_pers_evicted_pageframes;
-static ssize_t zbud_eph_cumul_zpages;
-static ssize_t zbud_pers_cumul_zpages;
-static u64 zbud_eph_cumul_zbytes;
-static u64 zbud_pers_cumul_zbytes;
-static ssize_t zbud_eph_cumul_chunk_counts[NCHUNKS];
-static ssize_t zbud_pers_cumul_chunk_counts[NCHUNKS];
-static ssize_t zbud_eph_buddied_count;
-static ssize_t zbud_pers_buddied_count;
-static ssize_t zbud_eph_unbuddied_count;
-static ssize_t zbud_pers_unbuddied_count;
-static ssize_t zbud_eph_zombie_count;
-static ssize_t zbud_pers_zombie_count;
-static atomic_t zbud_eph_zombie_atomic;
-static atomic_t zbud_pers_zombie_atomic;
-
-#ifdef CONFIG_DEBUG_FS
-#include <linux/debugfs.h>
-#define zdfs debugfs_create_size_t
-#define zdfs64 debugfs_create_u64
-static int zbud_debugfs_init(void)
-{
- struct dentry *root = debugfs_create_dir("zbud", NULL);
- if (root == NULL)
- return -ENXIO;
-
- /*
- * would be nice to dump the sizes of the unbuddied
- * arrays, like was done with sysfs, but it doesn't
- * look like debugfs is flexible enough to do that
- */
- zdfs64("eph_zbytes", S_IRUGO, root, &zbud_eph_zbytes);
- zdfs64("eph_cumul_zbytes", S_IRUGO, root, &zbud_eph_cumul_zbytes);
- zdfs64("pers_zbytes", S_IRUGO, root, &zbud_pers_zbytes);
- zdfs64("pers_cumul_zbytes", S_IRUGO, root, &zbud_pers_cumul_zbytes);
- zdfs("eph_cumul_zpages", S_IRUGO, root, &zbud_eph_cumul_zpages);
- zdfs("eph_evicted_pageframes", S_IRUGO, root,
- &zbud_eph_evicted_pageframes);
- zdfs("eph_zpages", S_IRUGO, root, &zbud_eph_zpages);
- zdfs("eph_pageframes", S_IRUGO, root, &zbud_eph_pageframes);
- zdfs("eph_buddied_count", S_IRUGO, root, &zbud_eph_buddied_count);
- zdfs("eph_unbuddied_count", S_IRUGO, root, &zbud_eph_unbuddied_count);
- zdfs("pers_cumul_zpages", S_IRUGO, root, &zbud_pers_cumul_zpages);
- zdfs("pers_evicted_pageframes", S_IRUGO, root,
- &zbud_pers_evicted_pageframes);
- zdfs("pers_zpages", S_IRUGO, root, &zbud_pers_zpages);
- zdfs("pers_pageframes", S_IRUGO, root, &zbud_pers_pageframes);
- zdfs("pers_buddied_count", S_IRUGO, root, &zbud_pers_buddied_count);
- zdfs("pers_unbuddied_count", S_IRUGO, root, &zbud_pers_unbuddied_count);
- zdfs("pers_zombie_count", S_IRUGO, root, &zbud_pers_zombie_count);
- return 0;
-}
-#undef zdfs
-#undef zdfs64
-#else
-static inline int zbud_debugfs_init(void)
-{
- return 0;
-}
-#endif
-
-/* protects the buddied list and all unbuddied lists */
-static DEFINE_SPINLOCK(zbud_eph_lists_lock);
-static DEFINE_SPINLOCK(zbud_pers_lists_lock);
-
-struct zbud_unbuddied {
- struct list_head list;
- unsigned count;
-};
-
-/* list N contains pages with N chunks USED and NCHUNKS-N unused */
-/* element 0 is never used but optimizing that isn't worth it */
-static struct zbud_unbuddied zbud_eph_unbuddied[NCHUNKS];
-static struct zbud_unbuddied zbud_pers_unbuddied[NCHUNKS];
-static LIST_HEAD(zbud_eph_lru_list);
-static LIST_HEAD(zbud_pers_lru_list);
-static LIST_HEAD(zbud_eph_buddied_list);
-static LIST_HEAD(zbud_pers_buddied_list);
-static LIST_HEAD(zbud_eph_zombie_list);
-static LIST_HEAD(zbud_pers_zombie_list);
-
-/*
- * Given a struct page, transform it to a zbudpage so that it can be
- * used by zbud and initialize fields as necessary.
- */
-static inline struct zbudpage *zbud_init_zbudpage(struct page *page, bool eph)
-{
- struct zbudpage *zbudpage = (struct zbudpage *)page;
-
- BUG_ON(page == NULL);
- INIT_LIST_HEAD(&zbudpage->budlist);
- INIT_LIST_HEAD(&zbudpage->lru);
- zbudpage->zbud0_size = 0;
- zbudpage->zbud1_size = 0;
- zbudpage->unevictable = 0;
- if (eph)
- zbud_eph_pageframes++;
- else
- zbud_pers_pageframes++;
- return zbudpage;
-}
-
-/* "Transform" a zbudpage back to a struct page suitable to free. */
-static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
- bool eph)
-{
- struct page *page = (struct page *)zbudpage;
-
- BUG_ON(!list_empty(&zbudpage->budlist));
- BUG_ON(!list_empty(&zbudpage->lru));
- BUG_ON(zbudpage->zbud0_size != 0);
- BUG_ON(zbudpage->zbud1_size != 0);
- BUG_ON(!PageLocked(page));
- BUG_ON(zbudpage->unevictable != 0);
- BUG_ON(zbudpage_is_dying(zbudpage));
- BUG_ON(zbudpage_is_zombie(zbudpage));
- if (eph)
- zbud_eph_pageframes--;
- else
- zbud_pers_pageframes--;
- zbudpage_spin_unlock(zbudpage);
- page_mapcount_reset(page);
- init_page_count(page);
- page->index = 0;
- return page;
-}
-
-/* Mark a zbud as unused and do accounting */
-static inline void zbud_unuse_zbud(struct zbudpage *zbudpage,
- int budnum, bool eph)
-{
- unsigned size;
-
- BUG_ON(!zbudpage_is_locked(zbudpage));
- if (budnum == 0) {
- size = zbudpage->zbud0_size;
- zbudpage->zbud0_size = 0;
- } else {
- size = zbudpage->zbud1_size;
- zbudpage->zbud1_size = 0;
- }
- if (eph) {
- zbud_eph_zbytes -= size;
- zbud_eph_zpages--;
- } else {
- zbud_pers_zbytes -= size;
- zbud_pers_zpages--;
- }
-}
-
-/*
- * Given a zbudpage/budnum/size, a tmem handle, and a kmapped pointer
- * to some data, set up the zbud appropriately including data copying
- * and accounting. Note that if cdata is NULL, the data copying is
- * skipped. (This is useful for lazy writes such as for RAMster.)
- */
-static void zbud_init_zbud(struct zbudpage *zbudpage, struct tmem_handle *th,
- bool eph, void *cdata,
- unsigned budnum, unsigned size)
-{
- char *to;
- void *zbpg;
- struct tmem_handle *to_th;
- unsigned nchunks = zbud_size_to_chunks(size);
-
- BUG_ON(!zbudpage_is_locked(zbudpage));
- zbpg = kmap_zbudpage_atomic(zbudpage);
- to = zbud_data(zbpg, budnum, size);
- to_th = (struct tmem_handle *)to;
- to_th->index = th->index;
- to_th->oid = th->oid;
- to_th->pool_id = th->pool_id;
- to_th->client_id = th->client_id;
- to += sizeof(struct tmem_handle);
- if (cdata != NULL)
- memcpy(to, cdata, size - sizeof(struct tmem_handle));
- kunmap_zbudpage_atomic(zbpg);
- if (budnum == 0)
- zbudpage->zbud0_size = size;
- else
- zbudpage->zbud1_size = size;
- if (eph) {
- zbud_eph_cumul_chunk_counts[nchunks]++;
- zbud_eph_zpages++;
- zbud_eph_cumul_zpages++;
- zbud_eph_zbytes += size;
- zbud_eph_cumul_zbytes += size;
- } else {
- zbud_pers_cumul_chunk_counts[nchunks]++;
- zbud_pers_zpages++;
- zbud_pers_cumul_zpages++;
- zbud_pers_zbytes += size;
- zbud_pers_cumul_zbytes += size;
- }
-}
-
-/*
- * Given a locked dying zbudpage, read out the tmem handles from the data,
- * unlock the page, then use the handles to tell tmem to flush out its
- * references
- */
-static void zbud_evict_tmem(struct zbudpage *zbudpage)
-{
- int i, j;
- uint32_t pool_id[2], client_id[2];
- uint32_t index[2];
- struct tmem_oid oid[2];
- struct tmem_pool *pool;
- void *zbpg;
- struct tmem_handle *th;
- unsigned size;
-
- /* read out the tmem handles from the data and set aside */
- zbpg = kmap_zbudpage_atomic(zbudpage);
- for (i = 0, j = 0; i < 2; i++) {
- size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
- if (size) {
- th = (struct tmem_handle *)zbud_data(zbpg, i, size);
- client_id[j] = th->client_id;
- pool_id[j] = th->pool_id;
- oid[j] = th->oid;
- index[j] = th->index;
- j++;
- zbud_unuse_zbud(zbudpage, i, true);
- }
- }
- kunmap_zbudpage_atomic(zbpg);
- zbudpage_spin_unlock(zbudpage);
- /* zbudpage is now an unlocked dying... tell tmem to flush pointers */
- for (i = 0; i < j; i++) {
- pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
- if (pool != NULL) {
- tmem_flush_page(pool, &oid[i], index[i]);
- zcache_put_pool(pool);
- }
- }
-}
-
-/*
- * Externally callable zbud handling routines.
- */
-
-/*
- * Return the maximum size compressed page that can be stored (secretly
- * setting aside space for the tmem handle.
- */
-unsigned int zbud_max_buddy_size(void)
-{
- return zbud_max_size() - sizeof(struct tmem_handle);
-}
-
-/*
- * Given a zbud reference, free the corresponding zbud from all lists,
- * mark it as unused, do accounting, and if the freeing of the zbud
- * frees up an entire pageframe, return it to the caller (else NULL).
- */
-struct page *zbud_free_and_delist(struct zbudref *zref, bool eph,
- unsigned int *zsize, unsigned int *zpages)
-{
- unsigned long budnum = zbudref_budnum(zref);
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- struct page *page = NULL;
- unsigned chunks, bud_size, other_bud_size;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- *zpages = 0;
- *zsize = 0;
- goto out;
- }
- if (budnum == 0) {
- bud_size = zbudpage->zbud0_size;
- other_bud_size = zbudpage->zbud1_size;
- } else {
- bud_size = zbudpage->zbud1_size;
- other_bud_size = zbudpage->zbud0_size;
- }
- *zsize = bud_size - sizeof(struct tmem_handle);
- *zpages = 1;
- zbud_unuse_zbud(zbudpage, budnum, eph);
- if (other_bud_size == 0) { /* was unbuddied: unlist and free */
- chunks = zbud_size_to_chunks(bud_size) ;
- if (zbudpage_is_zombie(zbudpage)) {
- if (eph)
- zbud_pers_zombie_count =
- atomic_dec_return(&zbud_eph_zombie_atomic);
- else
- zbud_pers_zombie_count =
- atomic_dec_return(&zbud_pers_zombie_atomic);
- zbudpage_clear_zombie(zbudpage);
- } else {
- BUG_ON(list_empty(&unbud[chunks].list));
- list_del_init(&zbudpage->budlist);
- unbud[chunks].count--;
- }
- list_del_init(&zbudpage->lru);
- spin_unlock(lists_lock);
- if (eph)
- zbud_eph_unbuddied_count--;
- else
- zbud_pers_unbuddied_count--;
- page = zbud_unuse_zbudpage(zbudpage, eph);
- } else { /* was buddied: move remaining buddy to unbuddied list */
- chunks = zbud_size_to_chunks(other_bud_size) ;
- if (!zbudpage_is_zombie(zbudpage)) {
- list_del_init(&zbudpage->budlist);
- list_add_tail(&zbudpage->budlist, &unbud[chunks].list);
- unbud[chunks].count++;
- }
- if (eph) {
- zbud_eph_buddied_count--;
- zbud_eph_unbuddied_count++;
- } else {
- zbud_pers_unbuddied_count++;
- zbud_pers_buddied_count--;
- }
- /* don't mess with lru, no need to move it */
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- }
-out:
- return page;
-}
-
-/*
- * Given a tmem handle, and a kmapped pointer to compressed data of
- * the given size, try to find an unbuddied zbudpage in which to
- * create a zbud. If found, put it there, mark the zbudpage unevictable,
- * and return a zbudref to it. Else return NULL.
- */
-struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size)
-{
- struct zbudpage *zbudpage = NULL, *zbudpage2;
- unsigned long budnum = 0UL;
- unsigned nchunks;
- int i, found_good_buddy = 0;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
- size += sizeof(struct tmem_handle);
- nchunks = zbud_size_to_chunks(size);
- for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
- spin_lock(lists_lock);
- if (!list_empty(&unbud[i].list)) {
- list_for_each_entry_safe(zbudpage, zbudpage2,
- &unbud[i].list, budlist) {
- if (zbudpage_spin_trylock(zbudpage)) {
- found_good_buddy = i;
- goto found_unbuddied;
- }
- }
- }
- spin_unlock(lists_lock);
- }
- zbudpage = NULL;
- goto out;
-
-found_unbuddied:
- BUG_ON(!zbudpage_is_locked(zbudpage));
- BUG_ON(!((zbudpage->zbud0_size == 0) ^ (zbudpage->zbud1_size == 0)));
- if (zbudpage->zbud0_size == 0)
- budnum = 0UL;
- else if (zbudpage->zbud1_size == 0)
- budnum = 1UL;
- list_del_init(&zbudpage->budlist);
- if (eph) {
- list_add_tail(&zbudpage->budlist, &zbud_eph_buddied_list);
- unbud[found_good_buddy].count--;
- zbud_eph_unbuddied_count--;
- zbud_eph_buddied_count++;
- /* "promote" raw zbudpage to most-recently-used */
- list_del_init(&zbudpage->lru);
- list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
- } else {
- list_add_tail(&zbudpage->budlist, &zbud_pers_buddied_list);
- unbud[found_good_buddy].count--;
- zbud_pers_unbuddied_count--;
- zbud_pers_buddied_count++;
- /* "promote" raw zbudpage to most-recently-used */
- list_del_init(&zbudpage->lru);
- list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
- }
- zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
- zbudpage->unevictable++;
- BUG_ON(zbudpage->unevictable == 3);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
-out:
- return zbudpage_to_zbudref(zbudpage, budnum);
-
-}
-
-/*
- * Given a tmem handle, and a kmapped pointer to compressed data of
- * the given size, and a newly allocated struct page, create an unevictable
- * zbud in that new page and return a zbudref to it.
- */
-struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size,
- struct page *newpage)
-{
- struct zbudpage *zbudpage;
- unsigned long budnum = 0;
- unsigned nchunks;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct zbud_unbuddied *unbud =
- eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
-
-#if 0
- /* this may be worth it later to support decompress-in-place? */
- static unsigned long counter;
- budnum = counter++ & 1; /* alternate using zbud0 and zbud1 */
-#endif
-
- if (size > zbud_max_buddy_size())
- return NULL;
- if (newpage == NULL)
- return NULL;
-
- size += sizeof(struct tmem_handle);
- nchunks = zbud_size_to_chunks(size) ;
- spin_lock(lists_lock);
- zbudpage = zbud_init_zbudpage(newpage, eph);
- zbudpage_spin_lock(zbudpage);
- list_add_tail(&zbudpage->budlist, &unbud[nchunks].list);
- if (eph) {
- list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
- zbud_eph_unbuddied_count++;
- } else {
- list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
- zbud_pers_unbuddied_count++;
- }
- unbud[nchunks].count++;
- zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
- zbudpage->unevictable++;
- BUG_ON(zbudpage->unevictable == 3);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return zbudpage_to_zbudref(zbudpage, budnum);
-}
-
-/*
- * Finish creation of a zbud by, assuming another zbud isn't being created
- * in parallel, marking it evictable.
- */
-void zbud_create_finish(struct zbudref *zref, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- BUG_ON(zbudpage_is_dying(zbudpage));
- zbudpage->unevictable--;
- BUG_ON((int)zbudpage->unevictable < 0);
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
-}
-
-/*
- * Given a zbudref and a struct page, decompress the data from
- * the zbud into the physical page represented by the struct page
- * by upcalling to zcache_decompress
- */
-int zbud_decompress(struct page *data_page, struct zbudref *zref, bool eph,
- void (*decompress)(char *, unsigned int, char *))
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *to_va, *from_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- to_va = kmap_atomic(data_page);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- from_va = zbud_data(zbpg, budnum, size);
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- decompress(from_va, size, to_va);
- kunmap_atomic(to_va);
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Given a zbudref and a kernel pointer, copy the data from
- * the zbud to the kernel pointer.
- */
-int zbud_copy_from_zbud(char *to_va, struct zbudref *zref,
- size_t *sizep, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *from_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- from_va = zbud_data(zbpg, budnum, size);
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- *sizep = size;
- memcpy(to_va, from_va, size);
-
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Given a zbudref and a kernel pointer, copy the data from
- * the kernel pointer to the zbud.
- */
-int zbud_copy_to_zbud(struct zbudref *zref, char *from_va, bool eph)
-{
- struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
- unsigned long budnum = zbudref_budnum(zref);
- void *zbpg;
- char *to_va;
- unsigned size;
- int ret = -1;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
-
- spin_lock(lists_lock);
- zbudpage_spin_lock(zbudpage);
- if (zbudpage_is_dying(zbudpage)) {
- /* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
- goto out;
- }
- zbpg = kmap_zbudpage_atomic(zbudpage);
- if (budnum == 0)
- size = zbudpage->zbud0_size;
- else
- size = zbudpage->zbud1_size;
- BUG_ON(size == 0 || size > zbud_max_size());
- to_va = zbud_data(zbpg, budnum, size);
- to_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- memcpy(to_va, from_va, size);
-
- kunmap_zbudpage_atomic(zbpg);
- ret = 0;
-out:
- zbudpage_spin_unlock(zbudpage);
- spin_unlock(lists_lock);
- return ret;
-}
-
-/*
- * Choose an ephemeral LRU zbudpage that is evictable (not locked), ensure
- * there are no references to it remaining, and return the now unused
- * (and re-init'ed) struct page and the total amount of compressed
- * data that was evicted.
- */
-struct page *zbud_evict_pageframe_lru(unsigned int *zsize, unsigned int *zpages)
-{
- struct zbudpage *zbudpage = NULL, *zbudpage2;
- struct zbud_unbuddied *unbud = zbud_eph_unbuddied;
- struct page *page = NULL;
- bool irqs_disabled = irqs_disabled();
-
- /*
- * Since this can be called indirectly from cleancache_put, which
- * has interrupts disabled, as well as frontswap_put, which does not,
- * we need to be able to handle both cases, even though it is ugly.
- */
- if (irqs_disabled)
- spin_lock(&zbud_eph_lists_lock);
- else
- spin_lock_bh(&zbud_eph_lists_lock);
- *zsize = 0;
- if (list_empty(&zbud_eph_lru_list))
- goto unlock_out;
- list_for_each_entry_safe(zbudpage, zbudpage2, &zbud_eph_lru_list, lru) {
- /* skip a locked zbudpage */
- if (unlikely(!zbudpage_spin_trylock(zbudpage)))
- continue;
- /* skip an unevictable zbudpage */
- if (unlikely(zbudpage->unevictable != 0)) {
- zbudpage_spin_unlock(zbudpage);
- continue;
- }
- /* got a locked evictable page */
- goto evict_page;
-
- }
-unlock_out:
- /* no unlocked evictable pages, give up */
- if (irqs_disabled)
- spin_unlock(&zbud_eph_lists_lock);
- else
- spin_unlock_bh(&zbud_eph_lists_lock);
- goto out;
-
-evict_page:
- list_del_init(&zbudpage->budlist);
- list_del_init(&zbudpage->lru);
- zbudpage_set_dying(zbudpage);
- /*
- * the zbudpage is now "dying" and attempts to read, write,
- * or delete data from it will be ignored
- */
- if (zbudpage->zbud0_size != 0 && zbudpage->zbud1_size != 0) {
- *zsize = zbudpage->zbud0_size + zbudpage->zbud1_size -
- (2 * sizeof(struct tmem_handle));
- *zpages = 2;
- } else if (zbudpage->zbud0_size != 0) {
- unbud[zbud_size_to_chunks(zbudpage->zbud0_size)].count--;
- *zsize = zbudpage->zbud0_size - sizeof(struct tmem_handle);
- *zpages = 1;
- } else if (zbudpage->zbud1_size != 0) {
- unbud[zbud_size_to_chunks(zbudpage->zbud1_size)].count--;
- *zsize = zbudpage->zbud1_size - sizeof(struct tmem_handle);
- *zpages = 1;
- } else {
- BUG();
- }
- spin_unlock(&zbud_eph_lists_lock);
- zbud_eph_evicted_pageframes++;
- if (*zpages == 1)
- zbud_eph_unbuddied_count--;
- else
- zbud_eph_buddied_count--;
- zbud_evict_tmem(zbudpage);
- zbudpage_spin_lock(zbudpage);
- zbudpage_clear_dying(zbudpage);
- page = zbud_unuse_zbudpage(zbudpage, true);
- if (!irqs_disabled)
- local_bh_enable();
-out:
- return page;
-}
-
-/*
- * Choose a persistent LRU zbudpage that is evictable (not locked), zombify it,
- * read the tmem_handle(s) out of it into the passed array, and return the
- * number of zbuds. Caller must perform necessary tmem functions and,
- * indirectly, zbud functions to fetch any valid data and cause the
- * now-zombified zbudpage to eventually be freed. We track the zombified
- * zbudpage count so it is possible to observe if there is a leak.
- FIXME: describe (ramster) case where data pointers are passed in for memcpy
- */
-unsigned int zbud_make_zombie_lru(struct tmem_handle *th, unsigned char **data,
- unsigned int *zsize, bool eph)
-{
- struct zbudpage *zbudpage = NULL, *zbudpag2;
- struct tmem_handle *thfrom;
- char *from_va;
- void *zbpg;
- unsigned size;
- int ret = 0, i;
- spinlock_t *lists_lock =
- eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
- struct list_head *lru_list =
- eph ? &zbud_eph_lru_list : &zbud_pers_lru_list;
-
- spin_lock_bh(lists_lock);
- if (list_empty(lru_list))
- goto out;
- list_for_each_entry_safe(zbudpage, zbudpag2, lru_list, lru) {
- /* skip a locked zbudpage */
- if (unlikely(!zbudpage_spin_trylock(zbudpage)))
- continue;
- /* skip an unevictable zbudpage */
- if (unlikely(zbudpage->unevictable != 0)) {
- zbudpage_spin_unlock(zbudpage);
- continue;
- }
- /* got a locked evictable page */
- goto zombify_page;
- }
- /* no unlocked evictable pages, give up */
- goto out;
-
-zombify_page:
- /* got an unlocked evictable page, zombify it */
- list_del_init(&zbudpage->budlist);
- zbudpage_set_zombie(zbudpage);
- /* FIXME what accounting do I need to do here? */
- list_del_init(&zbudpage->lru);
- if (eph) {
- list_add_tail(&zbudpage->lru, &zbud_eph_zombie_list);
- zbud_eph_zombie_count =
- atomic_inc_return(&zbud_eph_zombie_atomic);
- } else {
- list_add_tail(&zbudpage->lru, &zbud_pers_zombie_list);
- zbud_pers_zombie_count =
- atomic_inc_return(&zbud_pers_zombie_atomic);
- }
- /* FIXME what accounting do I need to do here? */
- zbpg = kmap_zbudpage_atomic(zbudpage);
- for (i = 0; i < 2; i++) {
- size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
- if (size) {
- from_va = zbud_data(zbpg, i, size);
- thfrom = (struct tmem_handle *)from_va;
- from_va += sizeof(struct tmem_handle);
- size -= sizeof(struct tmem_handle);
- if (th != NULL)
- th[ret] = *thfrom;
- if (data != NULL)
- memcpy(data[ret], from_va, size);
- if (zsize != NULL)
- *zsize++ = size;
- ret++;
- }
- }
- kunmap_zbudpage_atomic(zbpg);
- zbudpage_spin_unlock(zbudpage);
-out:
- spin_unlock_bh(lists_lock);
- return ret;
-}
-
-void zbud_init(void)
-{
- int i;
-
- zbud_debugfs_init();
- BUG_ON((sizeof(struct tmem_handle) * 2 > CHUNK_SIZE));
- BUG_ON(sizeof(struct zbudpage) > sizeof(struct page));
- for (i = 0; i < NCHUNKS; i++) {
- INIT_LIST_HEAD(&zbud_eph_unbuddied[i].list);
- INIT_LIST_HEAD(&zbud_pers_unbuddied[i].list);
- }
-}
diff --git a/drivers/staging/zcache/zbud.h b/drivers/staging/zcache/zbud.h
deleted file mode 100644
index 891e8a7d5aa..00000000000
--- a/drivers/staging/zcache/zbud.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * zbud.h
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- *
- */
-
-#ifndef _ZBUD_H_
-#define _ZBUD_H_
-
-#include "tmem.h"
-
-struct zbudref;
-
-extern unsigned int zbud_max_buddy_size(void);
-extern struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size);
-extern struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
- void *cdata, unsigned size,
- struct page *newpage);
-extern void zbud_create_finish(struct zbudref *, bool);
-extern int zbud_decompress(struct page *, struct zbudref *, bool,
- void (*func)(char *, unsigned int, char *));
-extern int zbud_copy_from_zbud(char *, struct zbudref *, size_t *, bool);
-extern int zbud_copy_to_zbud(struct zbudref *, char *, bool);
-extern struct page *zbud_free_and_delist(struct zbudref *, bool eph,
- unsigned int *, unsigned int *);
-extern struct page *zbud_evict_pageframe_lru(unsigned int *, unsigned int *);
-extern unsigned int zbud_make_zombie_lru(struct tmem_handle *, unsigned char **,
- unsigned int *, bool);
-extern void zbud_init(void);
-
-#endif /* _ZBUD_H_ */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
deleted file mode 100644
index dcceed29d31..00000000000
--- a/drivers/staging/zcache/zcache-main.c
+++ /dev/null
@@ -1,1941 +0,0 @@
-/*
- * zcache.c
- *
- * Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
- * Copyright (c) 2010,2011, Nitin Gupta
- *
- * Zcache provides an in-kernel "host implementation" for transcendent memory
- * ("tmem") and, thus indirectly, for cleancache and frontswap. Zcache uses
- * lzo1x compression to improve density and an embedded allocator called
- * "zbud" which "buddies" two compressed pages semi-optimally in each physical
- * pageframe. Zbud is integrally tied into tmem to allow pageframes to
- * be "reclaimed" efficiently.
- */
-
-#include <linux/module.h>
-#include <linux/cpu.h>
-#include <linux/highmem.h>
-#include <linux/list.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/atomic.h>
-#include <linux/math64.h>
-#include <linux/crypto.h>
-#include <linux/swap.h>
-#include <linux/swapops.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
-
-#include <linux/cleancache.h>
-#include <linux/frontswap.h>
-#include "tmem.h"
-#include "zcache.h"
-#include "zbud.h"
-#include "ramster.h"
-#include "debug.h"
-#ifdef CONFIG_RAMSTER
-static bool ramster_enabled __read_mostly;
-static int disable_frontswap_selfshrink;
-#else
-#define ramster_enabled false
-#define disable_frontswap_selfshrink 0
-#endif
-
-#ifndef __PG_WAS_ACTIVE
-static inline bool PageWasActive(struct page *page)
-{
- return true;
-}
-
-static inline void SetPageWasActive(struct page *page)
-{
-}
-#endif
-
-#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
-static bool frontswap_has_exclusive_gets __read_mostly = true;
-#else
-static bool frontswap_has_exclusive_gets __read_mostly;
-static inline void frontswap_tmem_exclusive_gets(bool b)
-{
-}
-#endif
-
-/*
- * mark pampd to special value in order that later
- * retrieve will identify zero-filled pages
- */
-#define ZERO_FILLED 0x2
-
-/* enable (or fix code) when Seth's patches are accepted upstream */
-#define zcache_writeback_enabled 0
-
-static bool zcache_enabled __read_mostly;
-static bool disable_cleancache __read_mostly;
-static bool disable_frontswap __read_mostly;
-static bool disable_frontswap_ignore_nonactive __read_mostly;
-static bool disable_cleancache_ignore_nonactive __read_mostly;
-static char *namestr __read_mostly = "zcache";
-
-#define ZCACHE_GFP_MASK \
- (__GFP_FS | __GFP_NORETRY | __GFP_NOWARN | __GFP_NOMEMALLOC)
-
-/* crypto API for zcache */
-#ifdef CONFIG_ZCACHE_MODULE
-static char *zcache_comp_name = "lzo";
-#else
-#define ZCACHE_COMP_NAME_SZ CRYPTO_MAX_ALG_NAME
-static char zcache_comp_name[ZCACHE_COMP_NAME_SZ] __read_mostly;
-#endif
-static struct crypto_comp * __percpu *zcache_comp_pcpu_tfms __read_mostly;
-
-enum comp_op {
- ZCACHE_COMPOP_COMPRESS,
- ZCACHE_COMPOP_DECOMPRESS
-};
-
-static inline int zcache_comp_op(enum comp_op op,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- struct crypto_comp *tfm;
- int ret = -1;
-
- BUG_ON(!zcache_comp_pcpu_tfms);
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, get_cpu());
- BUG_ON(!tfm);
- switch (op) {
- case ZCACHE_COMPOP_COMPRESS:
- ret = crypto_comp_compress(tfm, src, slen, dst, dlen);
- break;
- case ZCACHE_COMPOP_DECOMPRESS:
- ret = crypto_comp_decompress(tfm, src, slen, dst, dlen);
- break;
- default:
- ret = -EINVAL;
- }
- put_cpu();
- return ret;
-}
-
-/*
- * policy parameters
- */
-
-/*
- * byte count defining poor compression; pages with greater zsize will be
- * rejected
- */
-static unsigned int zbud_max_zsize __read_mostly = (PAGE_SIZE / 8) * 7;
-/*
- * byte count defining poor *mean* compression; pages with greater zsize
- * will be rejected until sufficient better-compressed pages are accepted
- * driving the mean below this threshold
- */
-static unsigned int zbud_max_mean_zsize __read_mostly = (PAGE_SIZE / 8) * 5;
-
-/*
- * for now, used named slabs so can easily track usage; later can
- * either just use kmalloc, or perhaps add a slab-like allocator
- * to more carefully manage total memory utilization
- */
-static struct kmem_cache *zcache_objnode_cache;
-static struct kmem_cache *zcache_obj_cache;
-
-static DEFINE_PER_CPU(struct zcache_preload, zcache_preloads) = { 0, };
-
-/* Used by debug.c */
-ssize_t zcache_pers_zpages;
-u64 zcache_pers_zbytes;
-ssize_t zcache_eph_pageframes;
-ssize_t zcache_pers_pageframes;
-
-/* Used by this code. */
-ssize_t zcache_last_active_file_pageframes;
-ssize_t zcache_last_inactive_file_pageframes;
-ssize_t zcache_last_active_anon_pageframes;
-ssize_t zcache_last_inactive_anon_pageframes;
-#ifdef CONFIG_ZCACHE_WRITEBACK
-ssize_t zcache_writtenback_pages;
-ssize_t zcache_outstanding_writeback_pages;
-#endif
-/*
- * zcache core code starts here
- */
-
-static struct zcache_client zcache_host;
-static struct zcache_client zcache_clients[MAX_CLIENTS];
-
-static inline bool is_local_client(struct zcache_client *cli)
-{
- return cli == &zcache_host;
-}
-
-static struct zcache_client *zcache_get_client_by_id(uint16_t cli_id)
-{
- struct zcache_client *cli = &zcache_host;
-
- if (cli_id != LOCAL_CLIENT) {
- if (cli_id >= MAX_CLIENTS)
- goto out;
- cli = &zcache_clients[cli_id];
- }
-out:
- return cli;
-}
-
-/*
- * Tmem operations assume the poolid implies the invoking client.
- * Zcache only has one client (the kernel itself): LOCAL_CLIENT.
- * RAMster has each client numbered by cluster node, and a KVM version
- * of zcache would have one client per guest and each client might
- * have a poolid==N.
- */
-struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id, uint16_t poolid)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (!is_local_client(cli))
- atomic_inc(&cli->refcount);
- if (poolid < MAX_POOLS_PER_CLIENT) {
- pool = cli->tmem_pools[poolid];
- if (pool != NULL)
- atomic_inc(&pool->refcount);
- }
-out:
- return pool;
-}
-
-void zcache_put_pool(struct tmem_pool *pool)
-{
- struct zcache_client *cli = NULL;
-
- if (pool == NULL)
- BUG();
- cli = pool->client;
- atomic_dec(&pool->refcount);
- if (!is_local_client(cli))
- atomic_dec(&cli->refcount);
-}
-
-int zcache_new_client(uint16_t cli_id)
-{
- struct zcache_client *cli;
- int ret = -1;
-
- cli = zcache_get_client_by_id(cli_id);
- if (cli == NULL)
- goto out;
- if (cli->allocated)
- goto out;
- cli->allocated = 1;
- ret = 0;
-out:
- return ret;
-}
-
-/*
- * zcache implementation for tmem host ops
- */
-
-static struct tmem_objnode *zcache_objnode_alloc(struct tmem_pool *pool)
-{
- struct tmem_objnode *objnode = NULL;
- struct zcache_preload *kp;
- int i;
-
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode != NULL) {
- kp->objnodes[i] = NULL;
- break;
- }
- }
- BUG_ON(objnode == NULL);
- inc_zcache_objnode_count();
- return objnode;
-}
-
-static void zcache_objnode_free(struct tmem_objnode *objnode,
- struct tmem_pool *pool)
-{
- dec_zcache_objnode_count();
- kmem_cache_free(zcache_objnode_cache, objnode);
-}
-
-static struct tmem_obj *zcache_obj_alloc(struct tmem_pool *pool)
-{
- struct tmem_obj *obj = NULL;
- struct zcache_preload *kp;
-
- kp = &__get_cpu_var(zcache_preloads);
- obj = kp->obj;
- BUG_ON(obj == NULL);
- kp->obj = NULL;
- inc_zcache_obj_count();
- return obj;
-}
-
-static void zcache_obj_free(struct tmem_obj *obj, struct tmem_pool *pool)
-{
- dec_zcache_obj_count();
- kmem_cache_free(zcache_obj_cache, obj);
-}
-
-/*
- * Compressing zero-filled pages will waste memory and introduce
- * serious fragmentation, skip it to avoid overhead.
- */
-static bool page_is_zero_filled(struct page *p)
-{
- unsigned int pos;
- char *page;
-
- page = kmap_atomic(p);
- for (pos = 0; pos < PAGE_SIZE / sizeof(*page); pos++) {
- if (page[pos]) {
- kunmap_atomic(page);
- return false;
- }
- }
- kunmap_atomic(page);
-
- return true;
-}
-
-static void handle_zero_filled_page(void *p)
-{
- void *user_mem;
- struct page *page = (struct page *)p;
-
- user_mem = kmap_atomic(page);
- memset(user_mem, 0, PAGE_SIZE);
- kunmap_atomic(user_mem);
-
- flush_dcache_page(page);
-}
-
-static struct tmem_hostops zcache_hostops = {
- .obj_alloc = zcache_obj_alloc,
- .obj_free = zcache_obj_free,
- .objnode_alloc = zcache_objnode_alloc,
- .objnode_free = zcache_objnode_free,
-};
-
-static struct page *zcache_alloc_page(void)
-{
- struct page *page = alloc_page(ZCACHE_GFP_MASK);
-
- if (page != NULL)
- inc_zcache_pageframes_alloced();
- return page;
-}
-
-static void zcache_free_page(struct page *page)
-{
- long curr_pageframes;
- static long max_pageframes, min_pageframes;
-
- if (page == NULL)
- BUG();
- __free_page(page);
- inc_zcache_pageframes_freed();
- curr_pageframes = curr_pageframes_count();
- if (curr_pageframes > max_pageframes)
- max_pageframes = curr_pageframes;
- if (curr_pageframes < min_pageframes)
- min_pageframes = curr_pageframes;
-#ifdef CONFIG_ZCACHE_DEBUG
- if (curr_pageframes > 2L || curr_pageframes < -2L) {
- /* pr_info here */
- }
-#endif
-}
-
-/*
- * zcache implementations for PAM page descriptor ops
- */
-
-/* forward reference */
-static void zcache_compress(struct page *from,
- void **out_va, unsigned *out_len);
-
-static struct page *zcache_evict_eph_pageframe(void);
-
-static void *zcache_pampd_eph_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- bool zero_filled = false;
- struct page *page = (struct page *)(data), *newpage;
-
- if (page_is_zero_filled(page)) {
- clen = 0;
- zero_filled = true;
- inc_zcache_zero_filled_pages();
- goto got_pampd;
- }
-
- if (!raw) {
- zcache_compress(page, &cdata, &clen);
- if (clen > zbud_max_buddy_size()) {
- inc_zcache_compress_poor();
- goto out;
- }
- } else {
- BUG_ON(clen > zbud_max_buddy_size());
- }
-
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, true, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
-
- inc_zcache_failed_getfreepages();
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- inc_zcache_eph_ate_tail_failed();
- goto out;
- }
- inc_zcache_eph_ate_tail();
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, true, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- inc_zcache_eph_pageframes();
-
-got_pampd:
- inc_zcache_eph_zbytes(clen);
- inc_zcache_eph_zpages();
- if (ramster_enabled && raw && !zero_filled)
- ramster_count_foreign_pages(true, 1);
- if (zero_filled)
- pampd = (void *)ZERO_FILLED;
-out:
- return pampd;
-}
-
-static void *zcache_pampd_pers_create(char *data, size_t size, bool raw,
- struct tmem_handle *th)
-{
- void *pampd = NULL, *cdata = data;
- unsigned clen = size;
- bool zero_filled = false;
- struct page *page = (struct page *)(data), *newpage;
- unsigned long zbud_mean_zsize;
- unsigned long curr_pers_zpages, total_zsize;
-
- if (data == NULL) {
- BUG_ON(!ramster_enabled);
- goto create_pampd;
- }
-
- if (page_is_zero_filled(page)) {
- clen = 0;
- zero_filled = true;
- inc_zcache_zero_filled_pages();
- goto got_pampd;
- }
-
- curr_pers_zpages = zcache_pers_zpages;
-/* FIXME CONFIG_RAMSTER... subtract atomic remote_pers_pages here? */
- if (!raw)
- zcache_compress(page, &cdata, &clen);
- /* reject if compression is too poor */
- if (clen > zbud_max_zsize) {
- inc_zcache_compress_poor();
- goto out;
- }
- /* reject if mean compression is too poor */
- if ((clen > zbud_max_mean_zsize) && (curr_pers_zpages > 0)) {
- total_zsize = zcache_pers_zbytes;
- if ((long)total_zsize < 0)
- total_zsize = 0;
- zbud_mean_zsize = div_u64(total_zsize,
- curr_pers_zpages);
- if (zbud_mean_zsize > zbud_max_mean_zsize) {
- inc_zcache_mean_compress_poor();
- goto out;
- }
- }
-
-create_pampd:
- /* look for space via an existing match first */
- pampd = (void *)zbud_match_prep(th, false, cdata, clen);
- if (pampd != NULL)
- goto got_pampd;
-
- /* no match, now we need to find (or free up) a full page */
- newpage = zcache_alloc_page();
- if (newpage != NULL)
- goto create_in_new_page;
- /*
- * FIXME do the following only if eph is oversized?
- * if (zcache_eph_pageframes >
- * (global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE) +
- * global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE)))
- */
- inc_zcache_failed_getfreepages();
- /* can't allocate a page, evict an ephemeral page via LRU */
- newpage = zcache_evict_eph_pageframe();
- if (newpage == NULL) {
- inc_zcache_pers_ate_eph_failed();
- goto out;
- }
- inc_zcache_pers_ate_eph();
-
-create_in_new_page:
- pampd = (void *)zbud_create_prep(th, false, cdata, clen, newpage);
- BUG_ON(pampd == NULL);
- inc_zcache_pers_pageframes();
-
-got_pampd:
- inc_zcache_pers_zpages();
- inc_zcache_pers_zbytes(clen);
- if (ramster_enabled && raw && !zero_filled)
- ramster_count_foreign_pages(false, 1);
- if (zero_filled)
- pampd = (void *)ZERO_FILLED;
-out:
- return pampd;
-}
-
-/*
- * This is called directly from zcache_put_page to pre-allocate space
- * to store a zpage.
- */
-void *zcache_pampd_create(char *data, unsigned int size, bool raw,
- int eph, struct tmem_handle *th)
-{
- void *pampd = NULL;
- struct zcache_preload *kp;
- struct tmem_objnode *objnode;
- struct tmem_obj *obj;
- int i;
-
- BUG_ON(!irqs_disabled());
- /* pre-allocate per-cpu metadata */
- BUG_ON(zcache_objnode_cache == NULL);
- BUG_ON(zcache_obj_cache == NULL);
- kp = &__get_cpu_var(zcache_preloads);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- objnode = kp->objnodes[i];
- if (objnode == NULL) {
- objnode = kmem_cache_alloc(zcache_objnode_cache,
- ZCACHE_GFP_MASK);
- if (unlikely(objnode == NULL)) {
- inc_zcache_failed_alloc();
- goto out;
- }
- kp->objnodes[i] = objnode;
- }
- }
- if (kp->obj == NULL) {
- obj = kmem_cache_alloc(zcache_obj_cache, ZCACHE_GFP_MASK);
- kp->obj = obj;
- }
- if (unlikely(kp->obj == NULL)) {
- inc_zcache_failed_alloc();
- goto out;
- }
- /*
- * ok, have all the metadata pre-allocated, now do the data
- * but since how we allocate the data is dependent on ephemeral
- * or persistent, we split the call here to different sub-functions
- */
- if (eph)
- pampd = zcache_pampd_eph_create(data, size, raw, th);
- else
- pampd = zcache_pampd_pers_create(data, size, raw, th);
-out:
- return pampd;
-}
-
-/*
- * This is a pamops called via tmem_put and is necessary to "finish"
- * a pampd creation.
- */
-void zcache_pampd_create_finish(void *pampd, bool eph)
-{
- if (pampd != (void *)ZERO_FILLED)
- zbud_create_finish((struct zbudref *)pampd, eph);
-}
-
-/*
- * This is passed as a function parameter to zbud_decompress so that
- * zbud need not be familiar with the details of crypto. It assumes that
- * the bytes from_va and to_va through from_va+size-1 and to_va+size-1 are
- * kmapped. It must be successful, else there is a logic bug somewhere.
- */
-static void zcache_decompress(char *from_va, unsigned int size, char *to_va)
-{
- int ret;
- unsigned int outlen = PAGE_SIZE;
-
- ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
- to_va, &outlen);
- BUG_ON(ret);
- BUG_ON(outlen != PAGE_SIZE);
-}
-
-/*
- * Decompress from the kernel va to a pageframe
- */
-void zcache_decompress_to_page(char *from_va, unsigned int size,
- struct page *to_page)
-{
- char *to_va = kmap_atomic(to_page);
- zcache_decompress(from_va, size, to_va);
- kunmap_atomic(to_va);
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret;
- bool eph = !is_persistent(pool);
-
- BUG_ON(preemptible());
- BUG_ON(eph); /* fix later if shared pools get implemented */
- BUG_ON(pampd_is_remote(pampd));
-
- if (pampd == (void *)ZERO_FILLED) {
- handle_zero_filled_page(data);
- if (!raw)
- *sizep = PAGE_SIZE;
- return 0;
- }
-
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, false,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- return ret;
-}
-
-/*
- * fill the pageframe corresponding to the struct page with the data
- * from the passed pampd
- */
-static int zcache_pampd_get_data_and_free(char *data, size_t *sizep, bool raw,
- void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index)
-{
- int ret = 0;
- bool eph = !is_persistent(pool), zero_filled = false;
- struct page *page = NULL;
- unsigned int zsize, zpages;
-
- BUG_ON(preemptible());
- BUG_ON(pampd_is_remote(pampd));
-
- if (pampd == (void *)ZERO_FILLED) {
- handle_zero_filled_page(data);
- zero_filled = true;
- zsize = 0;
- zpages = 1;
- if (!raw)
- *sizep = PAGE_SIZE;
- dec_zcache_zero_filled_pages();
- goto zero_fill;
- }
-
- if (raw)
- ret = zbud_copy_from_zbud(data, (struct zbudref *)pampd,
- sizep, eph);
- else {
- ret = zbud_decompress((struct page *)(data),
- (struct zbudref *)pampd, eph,
- zcache_decompress);
- *sizep = PAGE_SIZE;
- }
- page = zbud_free_and_delist((struct zbudref *)pampd, eph,
- &zsize, &zpages);
-zero_fill:
- if (eph) {
- if (page)
- dec_zcache_eph_pageframes();
- dec_zcache_eph_zpages(zpages);
- dec_zcache_eph_zbytes(zsize);
- } else {
- if (page)
- dec_zcache_pers_pageframes();
- dec_zcache_pers_zpages(zpages);
- dec_zcache_pers_zbytes(zsize);
- }
- if (!is_local_client(pool->client) && !zero_filled)
- ramster_count_foreign_pages(eph, -1);
- if (page && !zero_filled)
- zcache_free_page(page);
- return ret;
-}
-
-/*
- * free the pampd and remove it from any zcache lists
- * pampd must no longer be pointed to from any tmem data structures!
- */
-static void zcache_pampd_free(void *pampd, struct tmem_pool *pool,
- struct tmem_oid *oid, uint32_t index, bool acct)
-{
- struct page *page = NULL;
- unsigned int zsize, zpages;
- bool zero_filled = false;
-
- BUG_ON(preemptible());
-
- if (pampd == (void *)ZERO_FILLED) {
- zero_filled = true;
- zsize = 0;
- zpages = 1;
- dec_zcache_zero_filled_pages();
- }
-
- if (pampd_is_remote(pampd) && !zero_filled) {
- BUG_ON(!ramster_enabled);
- pampd = ramster_pampd_free(pampd, pool, oid, index, acct);
- if (pampd == NULL)
- return;
- }
- if (is_ephemeral(pool)) {
- if (!zero_filled)
- page = zbud_free_and_delist((struct zbudref *)pampd,
- true, &zsize, &zpages);
- if (page)
- dec_zcache_eph_pageframes();
- dec_zcache_eph_zpages(zpages);
- dec_zcache_eph_zbytes(zsize);
- /* FIXME CONFIG_RAMSTER... check acct parameter? */
- } else {
- if (!zero_filled)
- page = zbud_free_and_delist((struct zbudref *)pampd,
- false, &zsize, &zpages);
- if (page)
- dec_zcache_pers_pageframes();
- dec_zcache_pers_zpages(zpages);
- dec_zcache_pers_zbytes(zsize);
- }
- if (!is_local_client(pool->client) && !zero_filled)
- ramster_count_foreign_pages(is_ephemeral(pool), -1);
- if (page && !zero_filled)
- zcache_free_page(page);
-}
-
-static struct tmem_pamops zcache_pamops = {
- .create_finish = zcache_pampd_create_finish,
- .get_data = zcache_pampd_get_data,
- .get_data_and_free = zcache_pampd_get_data_and_free,
- .free = zcache_pampd_free,
-};
-
-/*
- * zcache compression/decompression and related per-cpu stuff
- */
-
-static DEFINE_PER_CPU(unsigned char *, zcache_dstmem);
-#define ZCACHE_DSTMEM_ORDER 1
-
-static void zcache_compress(struct page *from, void **out_va, unsigned *out_len)
-{
- int ret;
- unsigned char *dmem = __get_cpu_var(zcache_dstmem);
- char *from_va;
-
- BUG_ON(!irqs_disabled());
- /* no buffer or no compressor so can't compress */
- BUG_ON(dmem == NULL);
- *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
- from_va = kmap_atomic(from);
- mb();
- ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
- out_len);
- BUG_ON(ret);
- *out_va = dmem;
- kunmap_atomic(from_va);
-}
-
-static int zcache_comp_cpu_up(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = crypto_alloc_comp(zcache_comp_name, 0, 0);
- if (IS_ERR(tfm))
- return NOTIFY_BAD;
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = tfm;
- return NOTIFY_OK;
-}
-
-static void zcache_comp_cpu_down(int cpu)
-{
- struct crypto_comp *tfm;
-
- tfm = *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu);
- crypto_free_comp(tfm);
- *per_cpu_ptr(zcache_comp_pcpu_tfms, cpu) = NULL;
-}
-
-static int zcache_cpu_notifier(struct notifier_block *nb,
- unsigned long action, void *pcpu)
-{
- int ret, i, cpu = (long)pcpu;
- struct zcache_preload *kp;
-
- switch (action) {
- case CPU_UP_PREPARE:
- ret = zcache_comp_cpu_up(cpu);
- if (ret != NOTIFY_OK) {
- pr_err("%s: can't allocate compressor xform\n",
- namestr);
- return ret;
- }
- per_cpu(zcache_dstmem, cpu) = (void *)__get_free_pages(
- GFP_KERNEL | __GFP_REPEAT, ZCACHE_DSTMEM_ORDER);
- if (ramster_enabled)
- ramster_cpu_up(cpu);
- break;
- case CPU_DEAD:
- case CPU_UP_CANCELED:
- zcache_comp_cpu_down(cpu);
- free_pages((unsigned long)per_cpu(zcache_dstmem, cpu),
- ZCACHE_DSTMEM_ORDER);
- per_cpu(zcache_dstmem, cpu) = NULL;
- kp = &per_cpu(zcache_preloads, cpu);
- for (i = 0; i < ARRAY_SIZE(kp->objnodes); i++) {
- if (kp->objnodes[i])
- kmem_cache_free(zcache_objnode_cache,
- kp->objnodes[i]);
- }
- if (kp->obj) {
- kmem_cache_free(zcache_obj_cache, kp->obj);
- kp->obj = NULL;
- }
- if (ramster_enabled)
- ramster_cpu_down(cpu);
- break;
- default:
- break;
- }
- return NOTIFY_OK;
-}
-
-static struct notifier_block zcache_cpu_notifier_block = {
- .notifier_call = zcache_cpu_notifier
-};
-
-/*
- * The following code interacts with the zbud eviction and zbud
- * zombify code to access LRU pages
- */
-
-static struct page *zcache_evict_eph_pageframe(void)
-{
- struct page *page;
- unsigned int zsize = 0, zpages = 0;
-
- page = zbud_evict_pageframe_lru(&zsize, &zpages);
- if (page == NULL)
- goto out;
- dec_zcache_eph_zbytes(zsize);
- dec_zcache_eph_zpages(zpages);
- inc_zcache_evicted_eph_zpages(zpages);
- dec_zcache_eph_pageframes();
- inc_zcache_evicted_eph_pageframes();
-out:
- return page;
-}
-
-#ifdef CONFIG_ZCACHE_WRITEBACK
-
-static atomic_t zcache_outstanding_writeback_pages_atomic = ATOMIC_INIT(0);
-
-static inline void inc_zcache_outstanding_writeback_pages(void)
-{
- zcache_outstanding_writeback_pages =
- atomic_inc_return(&zcache_outstanding_writeback_pages_atomic);
-}
-static inline void dec_zcache_outstanding_writeback_pages(void)
-{
- zcache_outstanding_writeback_pages =
- atomic_dec_return(&zcache_outstanding_writeback_pages_atomic);
-};
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset);
-
-/*
- * Choose an LRU persistent pageframe and attempt to write it back to
- * the backing swap disk by calling frontswap_writeback on both zpages.
- *
- * This is work-in-progress.
- */
-
-static void zcache_end_swap_write(struct bio *bio, int err)
-{
- end_swap_bio_write(bio, err);
- dec_zcache_outstanding_writeback_pages();
- zcache_writtenback_pages++;
-}
-
-/*
- * zcache_get_swap_cache_page
- *
- * This is an adaption of read_swap_cache_async()
- *
- * If success, page is returned in retpage
- * Returns 0 if page was already in the swap cache, page is not locked
- * Returns 1 if the new page needs to be populated, page is locked
- */
-static int zcache_get_swap_cache_page(int type, pgoff_t offset,
- struct page *new_page)
-{
- struct page *found_page;
- swp_entry_t entry = swp_entry(type, offset);
- int err;
-
- BUG_ON(new_page == NULL);
- do {
- /*
- * First check the swap cache. Since this is normally
- * called after lookup_swap_cache() failed, re-calling
- * that would confuse statistics.
- */
- found_page = find_get_page(&swapper_space, entry.val);
- if (found_page)
- return 0;
-
- /*
- * call radix_tree_preload() while we can wait.
- */
- err = radix_tree_preload(GFP_KERNEL);
- if (err)
- break;
-
- /*
- * Swap entry may have been freed since our caller observed it.
- */
- err = swapcache_prepare(entry);
- if (err == -EEXIST) { /* seems racy */
- radix_tree_preload_end();
- continue;
- }
- if (err) { /* swp entry is obsolete ? */
- radix_tree_preload_end();
- break;
- }
-
- /* May fail (-ENOMEM) if radix-tree node allocation failed. */
- __set_page_locked(new_page);
- SetPageSwapBacked(new_page);
- err = __add_to_swap_cache(new_page, entry);
- if (likely(!err)) {
- radix_tree_preload_end();
- lru_cache_add_anon(new_page);
- return 1;
- }
- radix_tree_preload_end();
- ClearPageSwapBacked(new_page);
- __clear_page_locked(new_page);
- /*
- * add_to_swap_cache() doesn't return -EEXIST, so we can safely
- * clear SWAP_HAS_CACHE flag.
- */
- swapcache_free(entry, NULL);
- /* FIXME: is it possible to get here without err==-ENOMEM?
- * If not, we can dispense with the do loop, use goto retry */
- } while (err != -ENOMEM);
-
- return -ENOMEM;
-}
-
-/*
- * Given a frontswap zpage in zcache (identified by type/offset) and
- * an empty page, put the page into the swap cache, use frontswap
- * to get the page from zcache into the empty page, then give it
- * to the swap subsystem to send to disk (carefully avoiding the
- * possibility that frontswap might snatch it back).
- * Returns < 0 if error, 0 if successful, and 1 if successful but
- * the newpage passed in not needed and should be freed.
- */
-static int zcache_frontswap_writeback_zpage(int type, pgoff_t offset,
- struct page *newpage)
-{
- struct page *page = newpage;
- int ret;
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_NONE,
- };
-
- ret = zcache_get_swap_cache_page(type, offset, page);
- if (ret < 0)
- return ret;
- else if (ret == 0) {
- /* more uptodate page is already in swapcache */
- __frontswap_invalidate_page(type, offset);
- return 1;
- }
-
- BUG_ON(!frontswap_has_exclusive_gets); /* load must also invalidate */
- /* FIXME: how is it possible to get here when page is unlocked? */
- __frontswap_load(page);
- SetPageUptodate(page); /* above does SetPageDirty, is that enough? */
-
- /* start writeback */
- SetPageReclaim(page);
- /*
- * Return value is ignored here because it doesn't change anything
- * for us. Page is returned unlocked.
- */
- (void)__swap_writepage(page, &wbc, zcache_end_swap_write);
- page_cache_release(page);
- inc_zcache_outstanding_writeback_pages();
-
- return 0;
-}
-
-/*
- * The following is still a magic number... we want to allow forward progress
- * for writeback because it clears out needed RAM when under pressure, but
- * we don't want to allow writeback to absorb and queue too many GFP_KERNEL
- * pages if the swap device is very slow.
- */
-#define ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES 6400
-
-/*
- * Try to allocate two free pages, first using a non-aggressive alloc,
- * then by evicting zcache ephemeral (clean pagecache) pages, and last
- * by aggressive GFP_KERNEL alloc. We allow zbud to choose a pageframe
- * consisting of 1-2 zbuds/zpages, then call the writeback_zpage helper
- * function above for each.
- */
-static int zcache_frontswap_writeback(void)
-{
- struct tmem_handle th[2];
- int ret = 0;
- int nzbuds, writeback_ret;
- unsigned type;
- struct page *znewpage1 = NULL, *znewpage2 = NULL;
- struct page *evictpage1 = NULL, *evictpage2 = NULL;
- struct page *newpage1 = NULL, *newpage2 = NULL;
- struct page *page1 = NULL, *page2 = NULL;
- pgoff_t offset;
-
- znewpage1 = alloc_page(ZCACHE_GFP_MASK);
- znewpage2 = alloc_page(ZCACHE_GFP_MASK);
- if (znewpage1 == NULL)
- evictpage1 = zcache_evict_eph_pageframe();
- if (znewpage2 == NULL)
- evictpage2 = zcache_evict_eph_pageframe();
-
- if ((evictpage1 == NULL || evictpage2 == NULL) &&
- atomic_read(&zcache_outstanding_writeback_pages_atomic) >
- ZCACHE_MAX_OUTSTANDING_WRITEBACK_PAGES) {
- goto free_and_out;
- }
- if (znewpage1 == NULL && evictpage1 == NULL)
- newpage1 = alloc_page(GFP_KERNEL);
- if (znewpage2 == NULL && evictpage2 == NULL)
- newpage2 = alloc_page(GFP_KERNEL);
- if (newpage1 == NULL || newpage2 == NULL)
- goto free_and_out;
-
- /* ok, we have two pageframes pre-allocated, get a pair of zbuds */
- nzbuds = zbud_make_zombie_lru(&th[0], NULL, NULL, false);
- if (nzbuds == 0) {
- ret = -ENOENT;
- goto free_and_out;
- }
-
- /* process the first zbud */
- unswiz(th[0].oid, th[0].index, &type, &offset);
- page1 = (znewpage1 != NULL) ? znewpage1 :
- ((newpage1 != NULL) ? newpage1 : evictpage1);
- writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page1);
- if (writeback_ret < 0) {
- ret = -ENOMEM;
- goto free_and_out;
- }
- if (evictpage1 != NULL)
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
- if (writeback_ret == 0) {
- /* zcache_get_swap_cache_page will free, don't double free */
- znewpage1 = NULL;
- newpage1 = NULL;
- evictpage1 = NULL;
- }
- if (nzbuds < 2)
- goto free_and_out;
-
- /* if there is a second zbud, process it */
- unswiz(th[1].oid, th[1].index, &type, &offset);
- page2 = (znewpage2 != NULL) ? znewpage2 :
- ((newpage2 != NULL) ? newpage2 : evictpage2);
- writeback_ret = zcache_frontswap_writeback_zpage(type, offset, page2);
- if (writeback_ret < 0) {
- ret = -ENOMEM;
- goto free_and_out;
- }
- if (evictpage2 != NULL)
- zcache_pageframes_freed =
- atomic_inc_return(&zcache_pageframes_freed_atomic);
- if (writeback_ret == 0) {
- znewpage2 = NULL;
- newpage2 = NULL;
- evictpage2 = NULL;
- }
-
-free_and_out:
- if (znewpage1 != NULL)
- page_cache_release(znewpage1);
- if (znewpage2 != NULL)
- page_cache_release(znewpage2);
- if (newpage1 != NULL)
- page_cache_release(newpage1);
- if (newpage2 != NULL)
- page_cache_release(newpage2);
- if (evictpage1 != NULL)
- zcache_free_page(evictpage1);
- if (evictpage2 != NULL)
- zcache_free_page(evictpage2);
- return ret;
-}
-#endif /* CONFIG_ZCACHE_WRITEBACK */
-
-/*
- * When zcache is disabled ("frozen"), pools can be created and destroyed,
- * but all puts (and thus all other operations that require memory allocation)
- * must fail. If zcache is unfrozen, accepts puts, then frozen again,
- * data consistency requires all puts while frozen to be converted into
- * flushes.
- */
-static bool zcache_freeze;
-
-/*
- * This zcache shrinker interface reduces the number of ephemeral pageframes
- * used by zcache to approximately the same as the total number of LRU_FILE
- * pageframes in use, and now also reduces the number of persistent pageframes
- * used by zcache to approximately the same as the total number of LRU_ANON
- * pageframes in use. FIXME POLICY: Probably the writeback should only occur
- * if the eviction doesn't free enough pages.
- */
-static int shrink_zcache_memory(struct shrinker *shrink,
- struct shrink_control *sc)
-{
- static bool in_progress;
- int ret = -1;
- int nr = sc->nr_to_scan;
- int nr_evict = 0;
- int nr_writeback = 0;
- struct page *page;
- int file_pageframes_inuse, anon_pageframes_inuse;
-
- if (nr <= 0)
- goto skip_evict;
-
- /* don't allow more than one eviction thread at a time */
- if (in_progress)
- goto skip_evict;
-
- in_progress = true;
-
- /* we are going to ignore nr, and target a different value */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- file_pageframes_inuse = zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- if (zcache_eph_pageframes > file_pageframes_inuse)
- nr_evict = zcache_eph_pageframes - file_pageframes_inuse;
- else
- nr_evict = 0;
- while (nr_evict-- > 0) {
- page = zcache_evict_eph_pageframe();
- if (page == NULL)
- break;
- zcache_free_page(page);
- }
-
- zcache_last_active_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_ANON);
- zcache_last_inactive_anon_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_ANON);
- anon_pageframes_inuse = zcache_last_active_anon_pageframes +
- zcache_last_inactive_anon_pageframes;
- if (zcache_pers_pageframes > anon_pageframes_inuse)
- nr_writeback = zcache_pers_pageframes - anon_pageframes_inuse;
- else
- nr_writeback = 0;
- while (nr_writeback-- > 0) {
-#ifdef CONFIG_ZCACHE_WRITEBACK
- int writeback_ret;
- writeback_ret = zcache_frontswap_writeback();
- if (writeback_ret == -ENOMEM)
-#endif
- break;
- }
- in_progress = false;
-
-skip_evict:
- /* resample: has changed, but maybe not all the way yet */
- zcache_last_active_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_ACTIVE_FILE);
- zcache_last_inactive_file_pageframes =
- global_page_state(NR_LRU_BASE + LRU_INACTIVE_FILE);
- ret = zcache_eph_pageframes - zcache_last_active_file_pageframes +
- zcache_last_inactive_file_pageframes;
- if (ret < 0)
- ret = 0;
- return ret;
-}
-
-static struct shrinker zcache_shrinker = {
- .shrink = shrink_zcache_memory,
- .seeks = DEFAULT_SEEKS,
-};
-
-/*
- * zcache shims between cleancache/frontswap ops and tmem
- */
-
-/* FIXME rename these core routines to zcache_tmemput etc? */
-int zcache_put_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- unsigned int size, bool raw, int ephemeral)
-{
- struct tmem_pool *pool;
- struct tmem_handle th;
- int ret = -1;
- void *pampd = NULL;
-
- BUG_ON(!irqs_disabled());
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (unlikely(pool == NULL))
- goto out;
- if (!zcache_freeze) {
- ret = 0;
- th.client_id = cli_id;
- th.pool_id = pool_id;
- th.oid = *oidp;
- th.index = index;
- pampd = zcache_pampd_create((char *)page, size, raw,
- ephemeral, &th);
- if (pampd == NULL) {
- ret = -ENOMEM;
- if (ephemeral)
- inc_zcache_failed_eph_puts();
- else
- inc_zcache_failed_pers_puts();
- } else {
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- ret = tmem_put(pool, oidp, index, 0, pampd);
- if (ret < 0)
- BUG();
- }
- zcache_put_pool(pool);
- } else {
- inc_zcache_put_to_flush();
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (atomic_read(&pool->obj_count) > 0)
- /* the put fails whether the flush succeeds or not */
- (void)tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
-out:
- return ret;
-}
-
-int zcache_get_page(int cli_id, int pool_id, struct tmem_oid *oidp,
- uint32_t index, void *page,
- size_t *sizep, bool raw, int get_and_free)
-{
- struct tmem_pool *pool;
- int ret = -1;
- bool eph;
-
- if (!raw) {
- BUG_ON(irqs_disabled());
- BUG_ON(in_softirq());
- }
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- eph = is_ephemeral(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_get(pool, oidp, index, (char *)(page),
- sizep, raw, get_and_free);
- zcache_put_pool(pool);
- }
- WARN_ONCE((!is_ephemeral(pool) && (ret != 0)),
- "zcache_get fails on persistent pool, "
- "bad things are very likely to happen soon\n");
-#ifdef RAMSTER_TESTING
- if (ret != 0 && ret != -1 && !(ret == -EINVAL && is_ephemeral(pool)))
- pr_err("TESTING zcache_get tmem_get returns ret=%d\n", ret);
-#endif
- return ret;
-}
-
-int zcache_flush_page(int cli_id, int pool_id,
- struct tmem_oid *oidp, uint32_t index)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- inc_zcache_flush_total();
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_page(pool, oidp, index);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- inc_zcache_flush_found();
- local_irq_restore(flags);
- return ret;
-}
-
-int zcache_flush_object(int cli_id, int pool_id,
- struct tmem_oid *oidp)
-{
- struct tmem_pool *pool;
- int ret = -1;
- unsigned long flags;
-
- local_irq_save(flags);
- inc_zcache_flobj_total();
- pool = zcache_get_pool_by_id(cli_id, pool_id);
- if (ramster_enabled)
- ramster_do_preload_flnode(pool);
- if (likely(pool != NULL)) {
- if (atomic_read(&pool->obj_count) > 0)
- ret = tmem_flush_object(pool, oidp);
- zcache_put_pool(pool);
- }
- if (ret >= 0)
- inc_zcache_flobj_found();
- local_irq_restore(flags);
- return ret;
-}
-
-static int zcache_client_destroy_pool(int cli_id, int pool_id)
-{
- struct tmem_pool *pool = NULL;
- struct zcache_client *cli = NULL;
- int ret = -1;
-
- if (pool_id < 0)
- goto out;
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool == NULL)
- goto out;
- cli->tmem_pools[pool_id] = NULL;
- /* wait for pool activity on other cpus to quiesce */
- while (atomic_read(&pool->refcount) != 0)
- ;
- atomic_dec(&cli->refcount);
- local_bh_disable();
- ret = tmem_destroy_pool(pool);
- local_bh_enable();
- kfree(pool);
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: destroyed local pool id=%d\n", namestr, pool_id);
- else
- pr_info("%s: destroyed pool id=%d, client=%d\n",
- namestr, pool_id, cli_id);
-out:
- return ret;
-}
-
-int zcache_new_pool(uint16_t cli_id, uint32_t flags)
-{
- int poolid = -1;
- struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
-
- if (cli_id == LOCAL_CLIENT)
- cli = &zcache_host;
- else if ((unsigned int)cli_id < MAX_CLIENTS)
- cli = &zcache_clients[cli_id];
- if (cli == NULL)
- goto out;
- atomic_inc(&cli->refcount);
- pool = kmalloc(sizeof(struct tmem_pool), GFP_ATOMIC);
- if (pool == NULL)
- goto out;
-
- for (poolid = 0; poolid < MAX_POOLS_PER_CLIENT; poolid++)
- if (cli->tmem_pools[poolid] == NULL)
- break;
- if (poolid >= MAX_POOLS_PER_CLIENT) {
- pr_info("%s: pool creation failed: max exceeded\n", namestr);
- kfree(pool);
- poolid = -1;
- goto out;
- }
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = poolid;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[poolid] = pool;
- if (cli_id == LOCAL_CLIENT)
- pr_info("%s: created %s local tmem pool, id=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid);
- else
- pr_info("%s: created %s tmem pool, id=%d, client=%d\n", namestr,
- flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- poolid, cli_id);
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return poolid;
-}
-
-static int zcache_local_new_pool(uint32_t flags)
-{
- return zcache_new_pool(LOCAL_CLIENT, flags);
-}
-
-int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph)
-{
- struct tmem_pool *pool;
- struct zcache_client *cli = NULL;
- uint32_t flags = eph ? 0 : TMEM_POOL_PERSIST;
- int ret = -1;
-
- BUG_ON(!ramster_enabled);
- if (cli_id == LOCAL_CLIENT)
- goto out;
- if (pool_id >= MAX_POOLS_PER_CLIENT)
- goto out;
- if (cli_id >= MAX_CLIENTS)
- goto out;
-
- cli = &zcache_clients[cli_id];
- if ((eph && disable_cleancache) || (!eph && disable_frontswap)) {
- pr_err("zcache_autocreate_pool: pool type disabled\n");
- goto out;
- }
- if (!cli->allocated) {
- if (zcache_new_client(cli_id)) {
- pr_err("zcache_autocreate_pool: can't create client\n");
- goto out;
- }
- cli = &zcache_clients[cli_id];
- }
- atomic_inc(&cli->refcount);
- pool = cli->tmem_pools[pool_id];
- if (pool != NULL) {
- if (pool->persistent && eph) {
- pr_err("zcache_autocreate_pool: type mismatch\n");
- goto out;
- }
- ret = 0;
- goto out;
- }
- pool = kmalloc(sizeof(struct tmem_pool), GFP_KERNEL);
- if (pool == NULL)
- goto out;
-
- atomic_set(&pool->refcount, 0);
- pool->client = cli;
- pool->pool_id = pool_id;
- tmem_new_pool(pool, flags);
- cli->tmem_pools[pool_id] = pool;
- pr_info("%s: AUTOcreated %s tmem poolid=%d, for remote client=%d\n",
- namestr, flags & TMEM_POOL_PERSIST ? "persistent" : "ephemeral",
- pool_id, cli_id);
- ret = 0;
-out:
- if (cli != NULL)
- atomic_dec(&cli->refcount);
- return ret;
-}
-
-/**********
- * Two kernel functionalities currently can be layered on top of tmem.
- * These are "cleancache" which is used as a second-chance cache for clean
- * page cache pages; and "frontswap" which is used for swap pages
- * to avoid writes to disk. A generic "shim" is provided here for each
- * to translate in-kernel semantics to zcache semantics.
- */
-
-static void zcache_cleancache_put_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (!disable_cleancache_ignore_nonactive && !PageWasActive(page)) {
- inc_zcache_eph_nonactive_puts_ignored();
- return;
- }
- if (likely(ind == index))
- (void)zcache_put_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, PAGE_SIZE, false, 1);
-}
-
-static int zcache_cleancache_get_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index, struct page *page)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
- size_t size;
- int ret = -1;
-
- if (likely(ind == index)) {
- ret = zcache_get_page(LOCAL_CLIENT, pool_id, &oid, index,
- page, &size, false, 0);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- if (ret == 0)
- SetPageWasActive(page);
- }
- return ret;
-}
-
-static void zcache_cleancache_flush_page(int pool_id,
- struct cleancache_filekey key,
- pgoff_t index)
-{
- u32 ind = (u32) index;
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- if (likely(ind == index))
- (void)zcache_flush_page(LOCAL_CLIENT, pool_id, &oid, ind);
-}
-
-static void zcache_cleancache_flush_inode(int pool_id,
- struct cleancache_filekey key)
-{
- struct tmem_oid oid = *(struct tmem_oid *)&key;
-
- (void)zcache_flush_object(LOCAL_CLIENT, pool_id, &oid);
-}
-
-static void zcache_cleancache_flush_fs(int pool_id)
-{
- if (pool_id >= 0)
- (void)zcache_client_destroy_pool(LOCAL_CLIENT, pool_id);
-}
-
-static int zcache_cleancache_init_fs(size_t pagesize)
-{
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static int zcache_cleancache_init_shared_fs(char *uuid, size_t pagesize)
-{
- /* shared pools are unsupported and map to private */
- BUG_ON(sizeof(struct cleancache_filekey) !=
- sizeof(struct tmem_oid));
- BUG_ON(pagesize != PAGE_SIZE);
- return zcache_local_new_pool(0);
-}
-
-static struct cleancache_ops zcache_cleancache_ops = {
- .put_page = zcache_cleancache_put_page,
- .get_page = zcache_cleancache_get_page,
- .invalidate_page = zcache_cleancache_flush_page,
- .invalidate_inode = zcache_cleancache_flush_inode,
- .invalidate_fs = zcache_cleancache_flush_fs,
- .init_shared_fs = zcache_cleancache_init_shared_fs,
- .init_fs = zcache_cleancache_init_fs
-};
-
-struct cleancache_ops *zcache_cleancache_register_ops(void)
-{
- struct cleancache_ops *old_ops =
- cleancache_register_ops(&zcache_cleancache_ops);
-
- return old_ops;
-}
-
-/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
-static int zcache_frontswap_poolid __read_mostly = -1;
-
-/*
- * Swizzling increases objects per swaptype, increasing tmem concurrency
- * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
- * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page(), but has side-effects. Hence using 8.
- */
-#define SWIZ_BITS 8
-#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
-#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
-#define iswiz(_ind) (_ind >> SWIZ_BITS)
-
-static inline struct tmem_oid oswiz(unsigned type, u32 ind)
-{
- struct tmem_oid oid = { .oid = { 0 } };
- oid.oid[0] = _oswiz(type, ind);
- return oid;
-}
-
-#ifdef CONFIG_ZCACHE_WRITEBACK
-static void unswiz(struct tmem_oid oid, u32 index,
- unsigned *type, pgoff_t *offset)
-{
- *type = (unsigned)(oid.oid[0] >> SWIZ_BITS);
- *offset = (pgoff_t)((index << SWIZ_BITS) |
- (oid.oid[0] & SWIZ_MASK));
-}
-#endif
-
-static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- int ret = -1;
- unsigned long flags;
-
- BUG_ON(!PageLocked(page));
- if (!disable_frontswap_ignore_nonactive && !PageWasActive(page)) {
- inc_zcache_pers_nonactive_puts_ignored();
- ret = -ERANGE;
- goto out;
- }
- if (likely(ind64 == ind)) {
- local_irq_save(flags);
- ret = zcache_put_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, PAGE_SIZE, false, 0);
- local_irq_restore(flags);
- }
-out:
- return ret;
-}
-
-/* returns 0 if the page was successfully gotten from frontswap, -1 if
- * was not present (should never happen!) */
-static int zcache_frontswap_get_page(unsigned type, pgoff_t offset,
- struct page *page)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
- size_t size;
- int ret = -1, get_and_free;
-
- if (frontswap_has_exclusive_gets)
- get_and_free = 1;
- else
- get_and_free = -1;
- BUG_ON(!PageLocked(page));
- if (likely(ind64 == ind)) {
- ret = zcache_get_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind),
- page, &size, false, get_and_free);
- BUG_ON(ret >= 0 && size != PAGE_SIZE);
- }
- return ret;
-}
-
-/* flush a single page from frontswap */
-static void zcache_frontswap_flush_page(unsigned type, pgoff_t offset)
-{
- u64 ind64 = (u64)offset;
- u32 ind = (u32)offset;
- struct tmem_oid oid = oswiz(type, ind);
-
- if (likely(ind64 == ind))
- (void)zcache_flush_page(LOCAL_CLIENT, zcache_frontswap_poolid,
- &oid, iswiz(ind));
-}
-
-/* flush all pages from the passed swaptype */
-static void zcache_frontswap_flush_area(unsigned type)
-{
- struct tmem_oid oid;
- int ind;
-
- for (ind = SWIZ_MASK; ind >= 0; ind--) {
- oid = oswiz(type, ind);
- (void)zcache_flush_object(LOCAL_CLIENT,
- zcache_frontswap_poolid, &oid);
- }
-}
-
-static void zcache_frontswap_init(unsigned ignored)
-{
- /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
- if (zcache_frontswap_poolid < 0)
- zcache_frontswap_poolid =
- zcache_local_new_pool(TMEM_POOL_PERSIST);
-}
-
-static struct frontswap_ops zcache_frontswap_ops = {
- .store = zcache_frontswap_put_page,
- .load = zcache_frontswap_get_page,
- .invalidate_page = zcache_frontswap_flush_page,
- .invalidate_area = zcache_frontswap_flush_area,
- .init = zcache_frontswap_init
-};
-
-struct frontswap_ops *zcache_frontswap_register_ops(void)
-{
- struct frontswap_ops *old_ops =
- frontswap_register_ops(&zcache_frontswap_ops);
-
- return old_ops;
-}
-
-/*
- * zcache initialization
- * NOTE FOR NOW zcache or ramster MUST BE PROVIDED AS A KERNEL BOOT PARAMETER
- * OR NOTHING HAPPENS!
- */
-
-#ifndef CONFIG_ZCACHE_MODULE
-static int __init enable_zcache(char *s)
-{
- zcache_enabled = true;
- return 1;
-}
-__setup("zcache", enable_zcache);
-
-static int __init enable_ramster(char *s)
-{
- zcache_enabled = true;
-#ifdef CONFIG_RAMSTER
- ramster_enabled = true;
-#endif
- return 1;
-}
-__setup("ramster", enable_ramster);
-
-/* allow independent dynamic disabling of cleancache and frontswap */
-
-static int __init no_cleancache(char *s)
-{
- disable_cleancache = true;
- return 1;
-}
-
-__setup("nocleancache", no_cleancache);
-
-static int __init no_frontswap(char *s)
-{
- disable_frontswap = true;
- return 1;
-}
-
-__setup("nofrontswap", no_frontswap);
-
-static int __init no_frontswap_exclusive_gets(char *s)
-{
- frontswap_has_exclusive_gets = false;
- return 1;
-}
-
-__setup("nofrontswapexclusivegets", no_frontswap_exclusive_gets);
-
-static int __init no_frontswap_ignore_nonactive(char *s)
-{
- disable_frontswap_ignore_nonactive = true;
- return 1;
-}
-
-__setup("nofrontswapignorenonactive", no_frontswap_ignore_nonactive);
-
-static int __init no_cleancache_ignore_nonactive(char *s)
-{
- disable_cleancache_ignore_nonactive = true;
- return 1;
-}
-
-__setup("nocleancacheignorenonactive", no_cleancache_ignore_nonactive);
-
-static int __init enable_zcache_compressor(char *s)
-{
- strlcpy(zcache_comp_name, s, sizeof(zcache_comp_name));
- zcache_enabled = true;
- return 1;
-}
-__setup("zcache=", enable_zcache_compressor);
-#endif
-
-
-static int zcache_comp_init(void)
-{
- int ret = 0;
-
- /* check crypto algorithm */
-#ifdef CONFIG_ZCACHE_MODULE
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret) {
- ret = -1;
- goto out;
- }
-#else
- if (*zcache_comp_name != '\0') {
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret)
- pr_info("zcache: %s not supported\n",
- zcache_comp_name);
- goto out;
- }
- if (!ret)
- strcpy(zcache_comp_name, "lzo");
- ret = crypto_has_comp(zcache_comp_name, 0, 0);
- if (!ret) {
- ret = 1;
- goto out;
- }
-#endif
- pr_info("zcache: using %s compressor\n", zcache_comp_name);
-
- /* alloc percpu transforms */
- ret = 0;
- zcache_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *);
- if (!zcache_comp_pcpu_tfms)
- ret = 1;
-out:
- return ret;
-}
-
-static int zcache_init(void)
-{
- int ret = 0;
-
-#ifdef CONFIG_ZCACHE_MODULE
- zcache_enabled = 1;
-#endif
- if (ramster_enabled) {
- namestr = "ramster";
- ramster_register_pamops(&zcache_pamops);
- }
- zcache_debugfs_init();
- if (zcache_enabled) {
- unsigned int cpu;
-
- tmem_register_hostops(&zcache_hostops);
- tmem_register_pamops(&zcache_pamops);
- ret = register_cpu_notifier(&zcache_cpu_notifier_block);
- if (ret) {
- pr_err("%s: can't register cpu notifier\n", namestr);
- goto out;
- }
- ret = zcache_comp_init();
- if (ret) {
- pr_err("%s: compressor initialization failed\n",
- namestr);
- goto out;
- }
- for_each_online_cpu(cpu) {
- void *pcpu = (void *)(long)cpu;
- zcache_cpu_notifier(&zcache_cpu_notifier_block,
- CPU_UP_PREPARE, pcpu);
- }
- }
- zcache_objnode_cache = kmem_cache_create("zcache_objnode",
- sizeof(struct tmem_objnode), 0, 0, NULL);
- zcache_obj_cache = kmem_cache_create("zcache_obj",
- sizeof(struct tmem_obj), 0, 0, NULL);
- ret = zcache_new_client(LOCAL_CLIENT);
- if (ret) {
- pr_err("%s: can't create client\n", namestr);
- goto out;
- }
- zbud_init();
- if (zcache_enabled && !disable_cleancache) {
- struct cleancache_ops *old_ops;
-
- register_shrinker(&zcache_shrinker);
- old_ops = zcache_cleancache_register_ops();
- pr_info("%s: cleancache enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef CONFIG_ZCACHE_DEBUG
- pr_info("%s: cleancache: ignorenonactive = %d\n",
- namestr, !disable_cleancache_ignore_nonactive);
-#endif
- if (old_ops != NULL)
- pr_warn("%s: cleancache_ops overridden\n", namestr);
- }
- if (zcache_enabled && !disable_frontswap) {
- struct frontswap_ops *old_ops;
-
- old_ops = zcache_frontswap_register_ops();
- if (frontswap_has_exclusive_gets)
- frontswap_tmem_exclusive_gets(true);
- pr_info("%s: frontswap enabled using kernel transcendent "
- "memory and compression buddies\n", namestr);
-#ifdef CONFIG_ZCACHE_DEBUG
- pr_info("%s: frontswap: excl gets = %d active only = %d\n",
- namestr, frontswap_has_exclusive_gets,
- !disable_frontswap_ignore_nonactive);
-#endif
- if (IS_ERR(old_ops) || old_ops) {
- if (IS_ERR(old_ops))
- return PTR_RET(old_ops);
- pr_warn("%s: frontswap_ops overridden\n", namestr);
- }
- }
- if (ramster_enabled)
- ramster_init(!disable_cleancache, !disable_frontswap,
- frontswap_has_exclusive_gets,
- !disable_frontswap_selfshrink);
-out:
- return ret;
-}
-
-#ifdef CONFIG_ZCACHE_MODULE
-#ifdef CONFIG_RAMSTER
-module_param(ramster_enabled, bool, S_IRUGO);
-module_param(disable_frontswap_selfshrink, int, S_IRUGO);
-#endif
-module_param(disable_cleancache, bool, S_IRUGO);
-module_param(disable_frontswap, bool, S_IRUGO);
-#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
-module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
-#endif
-module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
-module_param(zcache_comp_name, charp, S_IRUGO);
-module_init(zcache_init);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
-MODULE_DESCRIPTION("In-kernel compression of cleancache/frontswap pages");
-#else
-late_initcall(zcache_init);
-#endif
diff --git a/drivers/staging/zcache/zcache.h b/drivers/staging/zcache/zcache.h
deleted file mode 100644
index 849120095e7..00000000000
--- a/drivers/staging/zcache/zcache.h
+++ /dev/null
@@ -1,53 +0,0 @@
-
-/*
- * zcache.h
- *
- * Copyright (c) 2012, Dan Magenheimer, Oracle Corp.
- */
-
-#ifndef _ZCACHE_H_
-#define _ZCACHE_H_
-
-struct zcache_preload {
- struct tmem_obj *obj;
- struct tmem_objnode *objnodes[OBJNODE_TREE_MAX_PATH];
-};
-
-struct tmem_pool;
-
-#define MAX_POOLS_PER_CLIENT 16
-
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-struct zcache_client {
- struct tmem_pool *tmem_pools[MAX_POOLS_PER_CLIENT];
- bool allocated;
- atomic_t refcount;
-};
-
-extern struct tmem_pool *zcache_get_pool_by_id(uint16_t cli_id,
- uint16_t poolid);
-extern void zcache_put_pool(struct tmem_pool *pool);
-
-extern int zcache_put_page(int, int, struct tmem_oid *,
- uint32_t, void *,
- unsigned int, bool, int);
-extern int zcache_get_page(int, int, struct tmem_oid *, uint32_t,
- void *, size_t *, bool, int);
-extern int zcache_flush_page(int, int, struct tmem_oid *, uint32_t);
-extern int zcache_flush_object(int, int, struct tmem_oid *);
-extern void zcache_decompress_to_page(char *, unsigned int, struct page *);
-
-#if defined(CONFIG_RAMSTER) || defined(CONFIG_RAMSTER_MODULE)
-extern void *zcache_pampd_create(char *, unsigned int, bool, int,
- struct tmem_handle *);
-int zcache_autocreate_pool(unsigned int cli_id, unsigned int pool_id, bool eph);
-#endif
-
-#define MAX_POOLS_PER_CLIENT 16
-
-#define MAX_CLIENTS 16
-#define LOCAL_CLIENT ((uint16_t)-1)
-
-#endif /* _ZCACHE_H_ */
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 82c7202fd5c..91d94b56443 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -169,7 +169,7 @@ static inline int is_partial_io(struct bio_vec *bvec)
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
u64 start, end, bound;
-
+
/* unaligned request */
if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
return 0;
@@ -418,14 +418,6 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
goto out;
}
- /*
- * System overwrites unused sectors. Free memory associated
- * with this sector now.
- */
- if (meta->table[index].handle ||
- zram_test_flag(meta, index, ZRAM_ZERO))
- zram_free_page(zram, index);
-
user_mem = kmap_atomic(page);
if (is_partial_io(bvec)) {
@@ -439,12 +431,23 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
if (page_zero_filled(uncmem)) {
kunmap_atomic(user_mem);
+ /* Free memory associated with this sector now. */
+ zram_free_page(zram, index);
+
zram->stats.pages_zero++;
zram_set_flag(meta, index, ZRAM_ZERO);
ret = 0;
goto out;
}
+ /*
+ * zram_slot_free_notify could miss free so that let's
+ * double check.
+ */
+ if (unlikely(meta->table[index].handle ||
+ zram_test_flag(meta, index, ZRAM_ZERO)))
+ zram_free_page(zram, index);
+
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
meta->compress_workmem);
@@ -486,6 +489,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
zs_unmap_object(meta->mem_pool, handle);
+ /*
+ * Free memory associated with this sector
+ * before overwriting unused sectors.
+ */
+ zram_free_page(zram, index);
+
meta->table[index].handle = handle;
meta->table[index].size = clen;
@@ -504,6 +513,20 @@ out:
return ret;
}
+static void handle_pending_slot_free(struct zram *zram)
+{
+ struct zram_slot_free *free_rq;
+
+ spin_lock(&zram->slot_free_lock);
+ while (zram->slot_free_rq) {
+ free_rq = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq->next;
+ zram_free_page(zram, free_rq->index);
+ kfree(free_rq);
+ }
+ spin_unlock(&zram->slot_free_lock);
+}
+
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
int offset, struct bio *bio, int rw)
{
@@ -511,10 +534,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
if (rw == READ) {
down_read(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_read(zram, bvec, index, offset, bio);
up_read(&zram->lock);
} else {
down_write(&zram->lock);
+ handle_pending_slot_free(zram);
ret = zram_bvec_write(zram, bvec, index, offset);
up_write(&zram->lock);
}
@@ -522,13 +547,18 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
return ret;
}
-static void zram_reset_device(struct zram *zram)
+static void zram_reset_device(struct zram *zram, bool reset_capacity)
{
size_t index;
struct zram_meta *meta;
- if (!zram->init_done)
+ flush_work(&zram->free_work);
+
+ down_write(&zram->init_lock);
+ if (!zram->init_done) {
+ up_write(&zram->init_lock);
return;
+ }
meta = zram->meta;
zram->init_done = 0;
@@ -548,7 +578,9 @@ static void zram_reset_device(struct zram *zram)
memset(&zram->stats, 0, sizeof(zram->stats));
zram->disksize = 0;
- set_capacity(zram->disk, 0);
+ if (reset_capacity)
+ set_capacity(zram->disk, 0);
+ up_write(&zram->init_lock);
}
static void zram_init_device(struct zram *zram, struct zram_meta *meta)
@@ -631,7 +663,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev)
fsync_bdev(bdev);
- zram_reset_device(zram);
+ zram_reset_device(zram, true);
return len;
}
@@ -716,16 +748,40 @@ error:
bio_io_error(bio);
}
+static void zram_slot_free(struct work_struct *work)
+{
+ struct zram *zram;
+
+ zram = container_of(work, struct zram, free_work);
+ down_write(&zram->lock);
+ handle_pending_slot_free(zram);
+ up_write(&zram->lock);
+}
+
+static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
+{
+ spin_lock(&zram->slot_free_lock);
+ free_rq->next = zram->slot_free_rq;
+ zram->slot_free_rq = free_rq;
+ spin_unlock(&zram->slot_free_lock);
+}
+
static void zram_slot_free_notify(struct block_device *bdev,
unsigned long index)
{
struct zram *zram;
+ struct zram_slot_free *free_rq;
zram = bdev->bd_disk->private_data;
- down_write(&zram->lock);
- zram_free_page(zram, index);
- up_write(&zram->lock);
atomic64_inc(&zram->stats.notify_free);
+
+ free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
+ if (!free_rq)
+ return;
+
+ free_rq->index = index;
+ add_slot_free(zram, free_rq);
+ schedule_work(&zram->free_work);
}
static const struct block_device_operations zram_devops = {
@@ -772,6 +828,10 @@ static int create_device(struct zram *zram, int device_id)
init_rwsem(&zram->lock);
init_rwsem(&zram->init_lock);
+ INIT_WORK(&zram->free_work, zram_slot_free);
+ spin_lock_init(&zram->slot_free_lock);
+ zram->slot_free_rq = NULL;
+
zram->queue = blk_alloc_queue(GFP_KERNEL);
if (!zram->queue) {
pr_err("Error allocating disk queue for device %d\n",
@@ -898,10 +958,12 @@ static void __exit zram_exit(void)
for (i = 0; i < num_devices; i++) {
zram = &zram_devices[i];
- get_disk(zram->disk);
destroy_device(zram);
- zram_reset_device(zram);
- put_disk(zram->disk);
+ /*
+ * Shouldn't access zram->disk after destroy_device
+ * because destroy_device already released zram->disk.
+ */
+ zram_reset_device(zram, false);
}
unregister_blkdev(zram_major, "zram");
@@ -919,3 +981,4 @@ MODULE_PARM_DESC(num_devices, "Number of zram devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
MODULE_DESCRIPTION("Compressed RAM Block Device");
+MODULE_ALIAS("devname:zram");
diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
index 9e57bfb29b4..97a3acf6ab7 100644
--- a/drivers/staging/zram/zram_drv.h
+++ b/drivers/staging/zram/zram_drv.h
@@ -94,11 +94,20 @@ struct zram_meta {
struct zs_pool *mem_pool;
};
+struct zram_slot_free {
+ unsigned long index;
+ struct zram_slot_free *next;
+};
+
struct zram {
struct zram_meta *meta;
struct rw_semaphore lock; /* protect compression buffers, table,
* 32bit stat counters against concurrent
* notifications, reads and writes */
+
+ struct work_struct free_work; /* handle pending free request */
+ struct zram_slot_free *slot_free_rq; /* list head of free request */
+
struct request_queue *queue;
struct gendisk *disk;
int init_done;
@@ -109,6 +118,7 @@ struct zram {
* we can store in a disk.
*/
u64 disksize; /* bytes */
+ spinlock_t slot_free_lock;
struct zram_stats stats;
};
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 4bb275b2d98..1a67537dbc5 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -423,7 +423,7 @@ static struct page *get_next_page(struct page *page)
if (is_last_page(page))
next = NULL;
else if (is_first_page(page))
- next = (struct page *)page->private;
+ next = (struct page *)page_private(page);
else
next = list_entry(page->lru.next, struct page, lru);
@@ -581,7 +581,7 @@ static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
first_page->inuse = 0;
}
if (i == 1)
- first_page->private = (unsigned long)page;
+ set_page_private(first_page, (unsigned long)page);
if (i >= 1)
page->first_page = first_page;
if (i >= 2)
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index f73da43cdf9..3a179302b90 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1086,7 +1086,6 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (cmd->reject_reason)
return 0;
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
return 1;
}
/*
@@ -1124,14 +1123,10 @@ after_immediate_data:
*/
cmdsn_ret = iscsit_sequence_cmd(cmd->conn, cmd,
(unsigned char *)hdr, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
+ if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
return -1;
- } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
- target_put_sess_cmd(conn->sess->se_sess, &cmd->se_cmd);
- return 0;
- }
- if (cmd->sense_reason) {
+ if (cmd->sense_reason || cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
int rc;
rc = iscsit_dump_data_payload(cmd->conn,
@@ -1527,6 +1522,10 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (hdr->itt == RESERVED_ITT && !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
pr_err("NOPOUT ITT is reserved, but Immediate Bit is"
" not set, protocol error.\n");
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
@@ -1536,6 +1535,10 @@ int iscsit_setup_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
" greater than MaxXmitDataSegmentLength: %u, protocol"
" error.\n", payload_length,
conn->conn_ops->MaxXmitDataSegmentLength);
+ if (!cmd)
+ return iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
+ (unsigned char *)hdr);
+
return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR,
(unsigned char *)hdr);
}
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3402241be87..bc788c52b6c 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1163,12 +1163,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
- if (ret == -ENODEV) {
- iscsit_put_transport(conn->conn_transport);
- kfree(conn);
- conn = NULL;
+ iscsit_put_transport(conn->conn_transport);
+ kfree(conn);
+ conn = NULL;
+ if (ret == -ENODEV)
goto out;
- }
/* Get another socket */
return 1;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 4cb667d720a..9fabbf7214c 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -97,9 +97,12 @@ spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf)
buf[7] = 0x2; /* CmdQue=1 */
- snprintf(&buf[8], 8, "LIO-ORG");
- snprintf(&buf[16], 16, "%s", dev->t10_wwn.model);
- snprintf(&buf[32], 4, "%s", dev->t10_wwn.revision);
+ memcpy(&buf[8], "LIO-ORG ", 8);
+ memset(&buf[16], 0x20, 16);
+ memcpy(&buf[16], dev->t10_wwn.model,
+ min_t(size_t, strlen(dev->t10_wwn.model), 16));
+ memcpy(&buf[32], dev->t10_wwn.revision,
+ min_t(size_t, strlen(dev->t10_wwn.revision), 4));
buf[4] = 31; /* Set additional length to 31 */
return 0;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7172d005d06..d8e49d79f8c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2134,6 +2134,7 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
+ unsigned long flags;
int ret = 0;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -2144,6 +2145,16 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
} else {
if (wait_for_tasks)
transport_wait_for_tasks(cmd);
+ /*
+ * Handle WRITE failure case where transport_generic_new_cmd()
+ * has already added se_cmd to state_list, but fabric has
+ * failed command before I/O submission.
+ */
+ if (cmd->state_active) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ target_remove_from_state_list(cmd);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ }
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 083710e0236..2b86f8e0fb5 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1785,8 +1785,6 @@ static int __exit amiga_serial_remove(struct platform_device *pdev)
free_irq(IRQ_AMIGA_TBE, state);
free_irq(IRQ_AMIGA_RBF, state);
- platform_set_drvdata(pdev, NULL);
-
return error;
}
diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
index eb255e807c0..9eba119bcdd 100644
--- a/drivers/tty/hvc/hvc_console.c
+++ b/drivers/tty/hvc/hvc_console.c
@@ -361,7 +361,12 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
tty->driver_data = NULL;
tty_port_put(&hp->port);
printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
- }
+ } else
+ /* We are ready... raise DTR/RTS */
+ if (C_BAUD(tty))
+ if (hp->ops->dtr_rts)
+ hp->ops->dtr_rts(hp, 1);
+
/* Force wakeup of the polling thread */
hvc_kick();
@@ -393,6 +398,10 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
/* We are done with the tty pointer now. */
tty_port_tty_set(&hp->port, NULL);
+ if (C_HUPCL(tty))
+ if (hp->ops->dtr_rts)
+ hp->ops->dtr_rts(hp, 0);
+
if (hp->ops->notifier_del)
hp->ops->notifier_del(hp, hp->data);
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 674d23cb919..91310198082 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -75,6 +75,9 @@ struct hv_ops {
/* tiocmget/set implementation */
int (*tiocmget)(struct hvc_struct *hp);
int (*tiocmset)(struct hvc_struct *hp, unsigned int set, unsigned int clear);
+
+ /* Callbacks to handle tty ports */
+ void (*dtr_rts)(struct hvc_struct *hp, int raise);
};
/* Register a vterm and a slot index for use as a console (console_init) */
diff --git a/drivers/tty/hvc/hvc_iucv.c b/drivers/tty/hvc/hvc_iucv.c
index 9d47f50c275..fd17a9b804b 100644
--- a/drivers/tty/hvc/hvc_iucv.c
+++ b/drivers/tty/hvc/hvc_iucv.c
@@ -656,21 +656,64 @@ static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id)
}
/**
+ * hvc_iucv_dtr_rts() - HVC notifier for handling DTR/RTS
+ * @hp: Pointer the HVC device (struct hvc_struct)
+ * @raise: Non-zero to raise or zero to lower DTR/RTS lines
+ *
+ * This routine notifies the HVC back-end to raise or lower DTR/RTS
+ * lines. Raising DTR/RTS is ignored. Lowering DTR/RTS indicates to
+ * drop the IUCV connection (similar to hang up the modem).
+ */
+static void hvc_iucv_dtr_rts(struct hvc_struct *hp, int raise)
+{
+ struct hvc_iucv_private *priv;
+ struct iucv_path *path;
+
+ /* Raising the DTR/RTS is ignored as IUCV connections can be
+ * established at any times.
+ */
+ if (raise)
+ return;
+
+ priv = hvc_iucv_get_private(hp->vtermno);
+ if (!priv)
+ return;
+
+ /* Lowering the DTR/RTS lines disconnects an established IUCV
+ * connection.
+ */
+ flush_sndbuf_sync(priv);
+
+ spin_lock_bh(&priv->lock);
+ path = priv->path; /* save reference to IUCV path */
+ priv->path = NULL;
+ priv->iucv_state = IUCV_DISCONN;
+ spin_unlock_bh(&priv->lock);
+
+ /* Sever IUCV path outside of priv->lock due to lock ordering of:
+ * priv->lock <--> iucv_table_lock */
+ if (path) {
+ iucv_path_sever(path, NULL);
+ iucv_path_free(path);
+ }
+}
+
+/**
* hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time.
* @hp: Pointer to the HVC device (struct hvc_struct)
* @id: Additional data (originally passed to hvc_alloc):
* the index of an struct hvc_iucv_private instance.
*
* This routine notifies the HVC back-end that the last tty device fd has been
- * closed. The function calls hvc_iucv_cleanup() to clean up the struct
- * hvc_iucv_private instance.
+ * closed. The function cleans up tty resources. The clean-up of the IUCV
+ * connection is done in hvc_iucv_dtr_rts() and depends on the HUPCL termios
+ * control setting.
*
* Locking: struct hvc_iucv_private->lock
*/
static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
{
struct hvc_iucv_private *priv;
- struct iucv_path *path;
priv = hvc_iucv_get_private(id);
if (!priv)
@@ -679,17 +722,11 @@ static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id)
flush_sndbuf_sync(priv);
spin_lock_bh(&priv->lock);
- path = priv->path; /* save reference to IUCV path */
- priv->path = NULL;
- hvc_iucv_cleanup(priv);
+ destroy_tty_buffer_list(&priv->tty_outqueue);
+ destroy_tty_buffer_list(&priv->tty_inqueue);
+ priv->tty_state = TTY_CLOSED;
+ priv->sndbuf_len = 0;
spin_unlock_bh(&priv->lock);
-
- /* sever IUCV path outside of priv->lock due to lock ordering of:
- * priv->lock <--> iucv_table_lock */
- if (path) {
- iucv_path_sever(path, NULL);
- iucv_path_free(path);
- }
}
/**
@@ -931,6 +968,7 @@ static const struct hv_ops hvc_iucv_ops = {
.notifier_add = hvc_iucv_notifier_add,
.notifier_del = hvc_iucv_notifier_del,
.notifier_hangup = hvc_iucv_notifier_hangup,
+ .dtr_rts = hvc_iucv_dtr_rts,
};
/* Suspend / resume device operations */
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 682210d778b..e61c36cbb86 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -208,7 +208,7 @@ static int xen_hvm_console_init(void)
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
- info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
+ info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
} else if (info->intf != NULL) {
@@ -257,7 +257,7 @@ static int xen_pv_console_init(void)
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
- info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
+ info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
} else if (info->intf != NULL) {
@@ -284,7 +284,7 @@ static int xen_initial_domain_console_init(void)
info = vtermno_to_xencons(HVC_COOKIE);
if (!info) {
- info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL | __GFP_ZERO);
+ info = kzalloc(sizeof(struct xencons_info), GFP_KERNEL);
if (!info)
return -ENOMEM;
}
diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
index 3396eb9d57a..ac2767100df 100644
--- a/drivers/tty/hvc/hvsi_lib.c
+++ b/drivers/tty/hvc/hvsi_lib.c
@@ -341,8 +341,8 @@ void hvsilib_establish(struct hvsi_priv *pv)
pr_devel("HVSI@%x: ... waiting handshake\n", pv->termno);
- /* Try for up to 200s */
- for (timeout = 0; timeout < 20; timeout++) {
+ /* Try for up to 400ms */
+ for (timeout = 0; timeout < 40; timeout++) {
if (pv->established)
goto established;
if (!hvsi_get_packet(pv))
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 642239015b4..c0f76da5530 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -807,7 +807,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
int h = dlci->adaption - 1;
total_size = 0;
- while(1) {
+ while (1) {
len = kfifo_len(dlci->fifo);
if (len == 0)
return total_size;
@@ -827,8 +827,8 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci)
switch (dlci->adaption) {
case 1: /* Unstructured */
break;
- case 2: /* Unstructed with modem bits. Always one byte as we never
- send inline break data */
+ case 2: /* Unstructed with modem bits.
+ Always one byte as we never send inline break data */
*dp++ = gsm_encode_modem(dlci);
break;
}
@@ -968,7 +968,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
unsigned long flags;
int sweep;
- if (dlci->constipated)
+ if (dlci->constipated)
return;
spin_lock_irqsave(&dlci->gsm->tx_lock, flags);
@@ -981,7 +981,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci)
gsm_dlci_data_output(dlci->gsm, dlci);
}
if (sweep)
- gsm_dlci_data_sweep(dlci->gsm);
+ gsm_dlci_data_sweep(dlci->gsm);
spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags);
}
@@ -1138,7 +1138,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
static void gsm_control_rls(struct gsm_mux *gsm, u8 *data, int clen)
{
struct tty_port *port;
- unsigned int addr = 0 ;
+ unsigned int addr = 0;
u8 bits;
int len = clen;
u8 *dp = data;
@@ -1740,10 +1740,11 @@ static void gsm_queue(struct gsm_mux *gsm)
if ((gsm->control & ~PF) == UI)
gsm->fcs = gsm_fcs_add_block(gsm->fcs, gsm->buf, gsm->len);
- if (gsm->encoding == 0){
- /* WARNING: gsm->received_fcs is used for gsm->encoding = 0 only.
- In this case it contain the last piece of data
- required to generate final CRC */
+ if (gsm->encoding == 0) {
+ /* WARNING: gsm->received_fcs is used for
+ gsm->encoding = 0 only.
+ In this case it contain the last piece of data
+ required to generate final CRC */
gsm->fcs = gsm_fcs_add(gsm->fcs, gsm->received_fcs);
}
if (gsm->fcs != GOOD_FCS) {
@@ -2904,9 +2905,11 @@ static int gsmtty_install(struct tty_driver *driver, struct tty_struct *tty)
gsm = gsm_mux[mux];
if (gsm->dead)
return -EL2HLT;
- /* If DLCI 0 is not yet fully open return an error. This is ok from a locking
- perspective as we don't have to worry about this if DLCI0 is lost */
- if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
+ /* If DLCI 0 is not yet fully open return an error.
+ This is ok from a locking
+ perspective as we don't have to worry about this
+ if DLCI0 is lost */
+ if (gsm->dlci[0] && gsm->dlci[0]->state != DLCI_OPEN)
return -EL2NSYNC;
dlci = gsm->dlci[line];
if (dlci == NULL) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 4bf0fc0843d..c9a9ddd1d0b 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -50,6 +50,7 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ratelimit.h>
+#include <linux/vmalloc.h>
/* number of characters left in xmit buffer before select has we have room */
@@ -74,37 +75,81 @@
#define ECHO_OP_SET_CANON_COL 0x81
#define ECHO_OP_ERASE_TAB 0x82
+#define ECHO_COMMIT_WATERMARK 256
+#define ECHO_BLOCK 256
+#define ECHO_DISCARD_WATERMARK N_TTY_BUF_SIZE - (ECHO_BLOCK + 32)
+
+
+#undef N_TTY_TRACE
+#ifdef N_TTY_TRACE
+# define n_tty_trace(f, args...) trace_printk(f, ##args)
+#else
+# define n_tty_trace(f, args...)
+#endif
+
struct n_tty_data {
- unsigned int column;
+ /* producer-published */
+ size_t read_head;
+ size_t canon_head;
+ size_t echo_head;
+ size_t echo_commit;
+ DECLARE_BITMAP(char_map, 256);
+
+ /* private to n_tty_receive_overrun (single-threaded) */
unsigned long overrun_time;
int num_overrun;
+ /* non-atomic */
+ bool no_room;
+
+ /* must hold exclusive termios_rwsem to reset these */
unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
- unsigned char echo_overrun:1;
- DECLARE_BITMAP(process_char_map, 256);
+ /* shared by producer and consumer */
+ char read_buf[N_TTY_BUF_SIZE];
DECLARE_BITMAP(read_flags, N_TTY_BUF_SIZE);
+ unsigned char echo_buf[N_TTY_BUF_SIZE];
- char *read_buf;
- int read_head;
- int read_tail;
- int read_cnt;
int minimum_to_wake;
- unsigned char *echo_buf;
- unsigned int echo_pos;
- unsigned int echo_cnt;
+ /* consumer-published */
+ size_t read_tail;
+ size_t line_start;
- int canon_data;
- unsigned long canon_head;
+ /* protected by output lock */
+ unsigned int column;
unsigned int canon_column;
+ size_t echo_tail;
struct mutex atomic_read_lock;
struct mutex output_lock;
- struct mutex echo_lock;
- raw_spinlock_t read_lock;
};
+static inline size_t read_cnt(struct n_tty_data *ldata)
+{
+ return ldata->read_head - ldata->read_tail;
+}
+
+static inline unsigned char read_buf(struct n_tty_data *ldata, size_t i)
+{
+ return ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)];
+}
+
+static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
+{
+ return &ldata->read_buf[i & (N_TTY_BUF_SIZE - 1)];
+}
+
+static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
+{
+ return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+}
+
+static inline unsigned char *echo_buf_addr(struct n_tty_data *ldata, size_t i)
+{
+ return &ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
+}
+
static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
unsigned char __user *ptr)
{
@@ -114,33 +159,18 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
-/**
- * n_tty_set_room - receive space
- * @tty: terminal
- *
- * Updates tty->receive_room to reflect the currently available space
- * in the input buffer, and re-schedules the flip buffer work if space
- * just became available.
- *
- * Locks: Concurrent update is protected with read_lock
- */
-
-static int set_room(struct tty_struct *tty)
+static int receive_room(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
int left;
- int old_left;
- unsigned long flags;
-
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
if (I_PARMRK(tty)) {
/* Multiply read_cnt by 3, since each byte might take up to
* three times as many spaces when PARMRK is set (depending on
* its flags, e.g. parity error). */
- left = N_TTY_BUF_SIZE - ldata->read_cnt * 3 - 1;
+ left = N_TTY_BUF_SIZE - read_cnt(ldata) * 3 - 1;
} else
- left = N_TTY_BUF_SIZE - ldata->read_cnt - 1;
+ left = N_TTY_BUF_SIZE - read_cnt(ldata) - 1;
/*
* If we are doing input canonicalization, and there are no
@@ -149,19 +179,31 @@ static int set_room(struct tty_struct *tty)
* characters will be beeped.
*/
if (left <= 0)
- left = ldata->icanon && !ldata->canon_data;
- old_left = tty->receive_room;
- tty->receive_room = left;
+ left = ldata->icanon && ldata->canon_head == ldata->read_tail;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
-
- return left && !old_left;
+ return left;
}
+/**
+ * n_tty_set_room - receive space
+ * @tty: terminal
+ *
+ * Re-schedules the flip buffer work if space just became available.
+ *
+ * Caller holds exclusive termios_rwsem
+ * or
+ * n_tty_read()/consumer path:
+ * holds non-exclusive termios_rwsem
+ */
+
static void n_tty_set_room(struct tty_struct *tty)
{
+ struct n_tty_data *ldata = tty->disc_data;
+
/* Did this open up the receive buffer? We may need to flip */
- if (set_room(tty)) {
+ if (unlikely(ldata->no_room) && receive_room(tty)) {
+ ldata->no_room = 0;
+
WARN_RATELIMIT(tty->port->itty == NULL,
"scheduling with invalid itty\n");
/* see if ldisc has been killed - if so, this means that
@@ -170,17 +212,93 @@ static void n_tty_set_room(struct tty_struct *tty)
*/
WARN_RATELIMIT(test_bit(TTY_LDISC_HALTED, &tty->flags),
"scheduling buffer work for halted ldisc\n");
- schedule_work(&tty->port->buf.work);
+ queue_work(system_unbound_wq, &tty->port->buf.work);
}
}
-static void put_tty_queue_nolock(unsigned char c, struct n_tty_data *ldata)
+static ssize_t chars_in_buffer(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ ssize_t n = 0;
+
+ if (!ldata->icanon)
+ n = read_cnt(ldata);
+ else
+ n = ldata->canon_head - ldata->read_tail;
+ return n;
+}
+
+/**
+ * n_tty_write_wakeup - asynchronous I/O notifier
+ * @tty: tty device
+ *
+ * Required for the ptys, serial driver etc. since processes
+ * that attach themselves to the master and rely on ASYNC
+ * IO must be woken up
+ */
+
+static void n_tty_write_wakeup(struct tty_struct *tty)
{
- if (ldata->read_cnt < N_TTY_BUF_SIZE) {
- ldata->read_buf[ldata->read_head] = c;
- ldata->read_head = (ldata->read_head + 1) & (N_TTY_BUF_SIZE-1);
- ldata->read_cnt++;
+ if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
+ kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
+}
+
+static void n_tty_check_throttle(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY)
+ return;
+ /*
+ * Check the remaining room for the input canonicalization
+ * mode. We don't want to throttle the driver if we're in
+ * canonical mode and don't have a newline yet!
+ */
+ while (1) {
+ int throttled;
+ tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
+ if (receive_room(tty) >= TTY_THRESHOLD_THROTTLE)
+ break;
+ throttled = tty_throttle_safe(tty);
+ if (!throttled)
+ break;
+ }
+ __tty_set_flow_change(tty, 0);
+}
+
+static void n_tty_check_unthrottle(struct tty_struct *tty)
+{
+ if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
+ tty->link->ldisc->ops->write_wakeup == n_tty_write_wakeup) {
+ if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
+ return;
+ if (!tty->count)
+ return;
+ n_tty_set_room(tty);
+ n_tty_write_wakeup(tty->link);
+ wake_up_interruptible_poll(&tty->link->write_wait, POLLOUT);
+ return;
+ }
+
+ /* If there is enough space in the read buffer now, let the
+ * low-level driver know. We use chars_in_buffer() to
+ * check the buffer, as it now knows about canonical mode.
+ * Otherwise, if the driver is throttled and the line is
+ * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
+ * we won't get any more characters.
+ */
+
+ while (1) {
+ int unthrottled;
+ tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
+ if (chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
+ break;
+ if (!tty->count)
+ break;
+ n_tty_set_room(tty);
+ unthrottled = tty_unthrottle_safe(tty);
+ if (!unthrottled)
+ break;
}
+ __tty_set_flow_change(tty, 0);
}
/**
@@ -188,21 +306,19 @@ static void put_tty_queue_nolock(unsigned char c, struct n_tty_data *ldata)
* @c: character
* @ldata: n_tty data
*
- * Add a character to the tty read_buf queue. This is done under the
- * read_lock to serialize character addition and also to protect us
- * against parallel reads or flushes
+ * Add a character to the tty read_buf queue.
+ *
+ * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
+ * modifies read_head
+ *
+ * read_head is only considered 'published' if canonical mode is
+ * not active.
*/
-static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
+static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
{
- unsigned long flags;
- /*
- * The problem of stomping on the buffers ends here.
- * Why didn't anyone see this one coming? --AJK
- */
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- put_tty_queue_nolock(c, ldata);
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
+ *read_buf_addr(ldata, ldata->read_head++) = c;
}
/**
@@ -212,22 +328,17 @@ static void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
* Reset the read buffer counters and clear the flags.
* Called from n_tty_open() and n_tty_flush_buffer().
*
- * Locking: tty_read_lock for read fields.
+ * Locking: caller holds exclusive termios_rwsem
+ * (or locking is not required)
*/
static void reset_buffer_flags(struct n_tty_data *ldata)
{
- unsigned long flags;
+ ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
+ ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+ ldata->line_start = 0;
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- ldata->read_head = ldata->read_tail = ldata->read_cnt = 0;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
-
- mutex_lock(&ldata->echo_lock);
- ldata->echo_pos = ldata->echo_cnt = ldata->echo_overrun = 0;
- mutex_unlock(&ldata->echo_lock);
-
- ldata->canon_head = ldata->canon_data = ldata->erasing = 0;
+ ldata->erasing = 0;
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
}
@@ -251,16 +362,21 @@ static void n_tty_packet_mode_flush(struct tty_struct *tty)
* buffer flushed (eg at hangup) or when the N_TTY line discipline
* internally has to clean the pending queue (for example some signals).
*
- * Locking: ctrl_lock, read_lock.
+ * Holds termios_rwsem to exclude producer/consumer while
+ * buffer indices are reset.
+ *
+ * Locking: ctrl_lock, exclusive termios_rwsem
*/
static void n_tty_flush_buffer(struct tty_struct *tty)
{
+ down_write(&tty->termios_rwsem);
reset_buffer_flags(tty->disc_data);
n_tty_set_room(tty);
if (tty->link)
n_tty_packet_mode_flush(tty);
+ up_write(&tty->termios_rwsem);
}
/**
@@ -270,24 +386,18 @@ static void n_tty_flush_buffer(struct tty_struct *tty)
* Report the number of characters buffered to be delivered to user
* at this instant in time.
*
- * Locking: read_lock
+ * Locking: exclusive termios_rwsem
*/
static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty)
{
- struct n_tty_data *ldata = tty->disc_data;
- unsigned long flags;
- ssize_t n = 0;
+ ssize_t n;
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- if (!ldata->icanon) {
- n = ldata->read_cnt;
- } else if (ldata->canon_data) {
- n = (ldata->canon_head > ldata->read_tail) ?
- ldata->canon_head - ldata->read_tail :
- ldata->canon_head + (N_TTY_BUF_SIZE - ldata->read_tail);
- }
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
+ WARN_ONCE(1, "%s is deprecated and scheduled for removal.", __func__);
+
+ down_write(&tty->termios_rwsem);
+ n = chars_in_buffer(tty);
+ up_write(&tty->termios_rwsem);
return n;
}
@@ -532,33 +642,23 @@ break_out:
* are prioritized. Also, when control characters are echoed with a
* prefixed "^", the pair is treated atomically and thus not separated.
*
- * Locking: output_lock to protect column state and space left,
- * echo_lock to protect the echo buffer
+ * Locking: callers must hold output_lock
*/
-static void process_echoes(struct tty_struct *tty)
+static size_t __process_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- int space, nr;
+ int space, old_space;
+ size_t tail;
unsigned char c;
- unsigned char *cp, *buf_end;
-
- if (!ldata->echo_cnt)
- return;
-
- mutex_lock(&ldata->output_lock);
- mutex_lock(&ldata->echo_lock);
- space = tty_write_room(tty);
+ old_space = space = tty_write_room(tty);
- buf_end = ldata->echo_buf + N_TTY_BUF_SIZE;
- cp = ldata->echo_buf + ldata->echo_pos;
- nr = ldata->echo_cnt;
- while (nr > 0) {
- c = *cp;
+ tail = ldata->echo_tail;
+ while (ldata->echo_commit != tail) {
+ c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
unsigned char op;
- unsigned char *opp;
int no_space_left = 0;
/*
@@ -566,18 +666,13 @@ static void process_echoes(struct tty_struct *tty)
* operation, get the next byte, which is either the
* op code or a control character value.
*/
- opp = cp + 1;
- if (opp == buf_end)
- opp -= N_TTY_BUF_SIZE;
- op = *opp;
+ op = echo_buf(ldata, tail + 1);
switch (op) {
unsigned int num_chars, num_bs;
case ECHO_OP_ERASE_TAB:
- if (++opp == buf_end)
- opp -= N_TTY_BUF_SIZE;
- num_chars = *opp;
+ num_chars = echo_buf(ldata, tail + 2);
/*
* Determine how many columns to go back
@@ -603,21 +698,18 @@ static void process_echoes(struct tty_struct *tty)
if (ldata->column > 0)
ldata->column--;
}
- cp += 3;
- nr -= 3;
+ tail += 3;
break;
case ECHO_OP_SET_CANON_COL:
ldata->canon_column = ldata->column;
- cp += 2;
- nr -= 2;
+ tail += 2;
break;
case ECHO_OP_MOVE_BACK_COL:
if (ldata->column > 0)
ldata->column--;
- cp += 2;
- nr -= 2;
+ tail += 2;
break;
case ECHO_OP_START:
@@ -629,8 +721,7 @@ static void process_echoes(struct tty_struct *tty)
tty_put_char(tty, ECHO_OP_START);
ldata->column++;
space--;
- cp += 2;
- nr -= 2;
+ tail += 2;
break;
default:
@@ -651,8 +742,7 @@ static void process_echoes(struct tty_struct *tty)
tty_put_char(tty, op ^ 0100);
ldata->column += 2;
space -= 2;
- cp += 2;
- nr -= 2;
+ tail += 2;
}
if (no_space_left)
@@ -669,80 +759,92 @@ static void process_echoes(struct tty_struct *tty)
tty_put_char(tty, c);
space -= 1;
}
- cp += 1;
- nr -= 1;
+ tail += 1;
}
-
- /* When end of circular buffer reached, wrap around */
- if (cp >= buf_end)
- cp -= N_TTY_BUF_SIZE;
}
- if (nr == 0) {
- ldata->echo_pos = 0;
- ldata->echo_cnt = 0;
- ldata->echo_overrun = 0;
- } else {
- int num_processed = ldata->echo_cnt - nr;
- ldata->echo_pos += num_processed;
- ldata->echo_pos &= N_TTY_BUF_SIZE - 1;
- ldata->echo_cnt = nr;
- if (num_processed > 0)
- ldata->echo_overrun = 0;
+ /* If the echo buffer is nearly full (so that the possibility exists
+ * of echo overrun before the next commit), then discard enough
+ * data at the tail to prevent a subsequent overrun */
+ while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+ if (echo_buf(ldata, tail == ECHO_OP_START)) {
+ if (echo_buf(ldata, tail) == ECHO_OP_ERASE_TAB)
+ tail += 3;
+ else
+ tail += 2;
+ } else
+ tail++;
}
- mutex_unlock(&ldata->echo_lock);
+ ldata->echo_tail = tail;
+ return old_space - space;
+}
+
+static void commit_echoes(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t nr, old, echoed;
+ size_t head;
+
+ head = ldata->echo_head;
+ old = ldata->echo_commit - ldata->echo_tail;
+
+ /* Process committed echoes if the accumulated # of bytes
+ * is over the threshold (and try again each time another
+ * block is accumulated) */
+ nr = head - ldata->echo_tail;
+ if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+ return;
+
+ mutex_lock(&ldata->output_lock);
+ ldata->echo_commit = head;
+ echoed = __process_echoes(tty);
+ mutex_unlock(&ldata->output_lock);
+
+ if (echoed && tty->ops->flush_chars)
+ tty->ops->flush_chars(tty);
+}
+
+static void process_echoes(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t echoed;
+
+ if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_tail)
+ return;
+
+ mutex_lock(&ldata->output_lock);
+ echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
- if (tty->ops->flush_chars)
+ if (echoed && tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
+static void flush_echoes(struct tty_struct *tty)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (!L_ECHO(tty) || ldata->echo_commit == ldata->echo_head)
+ return;
+
+ mutex_lock(&ldata->output_lock);
+ ldata->echo_commit = ldata->echo_head;
+ __process_echoes(tty);
+ mutex_unlock(&ldata->output_lock);
+}
+
/**
* add_echo_byte - add a byte to the echo buffer
* @c: unicode byte to echo
* @ldata: n_tty data
*
* Add a character or operation byte to the echo buffer.
- *
- * Should be called under the echo lock to protect the echo buffer.
*/
-static void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
+static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
- int new_byte_pos;
-
- if (ldata->echo_cnt == N_TTY_BUF_SIZE) {
- /* Circular buffer is already at capacity */
- new_byte_pos = ldata->echo_pos;
-
- /*
- * Since the buffer start position needs to be advanced,
- * be sure to step by a whole operation byte group.
- */
- if (ldata->echo_buf[ldata->echo_pos] == ECHO_OP_START) {
- if (ldata->echo_buf[(ldata->echo_pos + 1) &
- (N_TTY_BUF_SIZE - 1)] ==
- ECHO_OP_ERASE_TAB) {
- ldata->echo_pos += 3;
- ldata->echo_cnt -= 2;
- } else {
- ldata->echo_pos += 2;
- ldata->echo_cnt -= 1;
- }
- } else {
- ldata->echo_pos++;
- }
- ldata->echo_pos &= N_TTY_BUF_SIZE - 1;
-
- ldata->echo_overrun = 1;
- } else {
- new_byte_pos = ldata->echo_pos + ldata->echo_cnt;
- new_byte_pos &= N_TTY_BUF_SIZE - 1;
- ldata->echo_cnt++;
- }
-
- ldata->echo_buf[new_byte_pos] = c;
+ *echo_buf_addr(ldata, ldata->echo_head++) = c;
}
/**
@@ -750,16 +852,12 @@ static void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
* @ldata: n_tty data
*
* Add an operation to the echo buffer to move back one column.
- *
- * Locking: echo_lock to protect the echo buffer
*/
static void echo_move_back_col(struct n_tty_data *ldata)
{
- mutex_lock(&ldata->echo_lock);
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_MOVE_BACK_COL, ldata);
- mutex_unlock(&ldata->echo_lock);
}
/**
@@ -768,16 +866,12 @@ static void echo_move_back_col(struct n_tty_data *ldata)
*
* Add an operation to the echo buffer to set the canon column
* to the current column.
- *
- * Locking: echo_lock to protect the echo buffer
*/
static void echo_set_canon_col(struct n_tty_data *ldata)
{
- mutex_lock(&ldata->echo_lock);
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_SET_CANON_COL, ldata);
- mutex_unlock(&ldata->echo_lock);
}
/**
@@ -793,15 +887,11 @@ static void echo_set_canon_col(struct n_tty_data *ldata)
* of input. This information will be used later, along with
* canon column (if applicable), to go back the correct number
* of columns.
- *
- * Locking: echo_lock to protect the echo buffer
*/
static void echo_erase_tab(unsigned int num_chars, int after_tab,
struct n_tty_data *ldata)
{
- mutex_lock(&ldata->echo_lock);
-
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_ERASE_TAB, ldata);
@@ -813,8 +903,6 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
num_chars |= 0x80;
add_echo_byte(num_chars, ldata);
-
- mutex_unlock(&ldata->echo_lock);
}
/**
@@ -826,20 +914,16 @@ static void echo_erase_tab(unsigned int num_chars, int after_tab,
* L_ECHO(tty) is true. Called from the driver receive_buf path.
*
* This variant does not treat control characters specially.
- *
- * Locking: echo_lock to protect the echo buffer
*/
static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
{
- mutex_lock(&ldata->echo_lock);
if (c == ECHO_OP_START) {
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_START, ldata);
} else {
add_echo_byte(c, ldata);
}
- mutex_unlock(&ldata->echo_lock);
}
/**
@@ -852,16 +936,12 @@ static void echo_char_raw(unsigned char c, struct n_tty_data *ldata)
*
* This variant tags control characters to be echoed as "^X"
* (where X is the letter representing the control char).
- *
- * Locking: echo_lock to protect the echo buffer
*/
static void echo_char(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
- mutex_lock(&ldata->echo_lock);
-
if (c == ECHO_OP_START) {
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(ECHO_OP_START, ldata);
@@ -870,8 +950,6 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
add_echo_byte(ECHO_OP_START, ldata);
add_echo_byte(c, ldata);
}
-
- mutex_unlock(&ldata->echo_lock);
}
/**
@@ -896,17 +974,22 @@ static inline void finish_erasing(struct n_tty_data *ldata)
* present in the stream from the driver layer. Handles the complexities
* of UTF-8 multibyte symbols.
*
- * Locking: read_lock for tty buffers
+ * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
+ * modifies read_head
+ *
+ * Modifying the read_head is not considered a publish in this context
+ * because canonical mode is active -- only canon_head publishes
*/
static void eraser(unsigned char c, struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
enum { ERASE, WERASE, KILL } kill_type;
- int head, seen_alnums, cnt;
- unsigned long flags;
+ size_t head;
+ size_t cnt;
+ int seen_alnums;
- /* FIXME: locking needed ? */
if (ldata->read_head == ldata->canon_head) {
/* process_output('\a', tty); */ /* what do you think? */
return;
@@ -917,19 +1000,11 @@ static void eraser(unsigned char c, struct tty_struct *tty)
kill_type = WERASE;
else {
if (!L_ECHO(tty)) {
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
- (N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
return;
}
if (!L_ECHOK(tty) || !L_ECHOKE(tty) || !L_ECHOE(tty)) {
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- ldata->read_cnt -= ((ldata->read_head - ldata->canon_head) &
- (N_TTY_BUF_SIZE - 1));
ldata->read_head = ldata->canon_head;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
finish_erasing(ldata);
echo_char(KILL_CHAR(tty), tty);
/* Add a newline if ECHOK is on and ECHOKE is off. */
@@ -941,14 +1016,13 @@ static void eraser(unsigned char c, struct tty_struct *tty)
}
seen_alnums = 0;
- /* FIXME: Locking ?? */
while (ldata->read_head != ldata->canon_head) {
head = ldata->read_head;
/* erase a single possibly multibyte character */
do {
- head = (head - 1) & (N_TTY_BUF_SIZE-1);
- c = ldata->read_buf[head];
+ head--;
+ c = read_buf(ldata, head);
} while (is_continuation(c, tty) && head != ldata->canon_head);
/* do not partially erase */
@@ -962,11 +1036,8 @@ static void eraser(unsigned char c, struct tty_struct *tty)
else if (seen_alnums)
break;
}
- cnt = (ldata->read_head - head) & (N_TTY_BUF_SIZE-1);
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
+ cnt = ldata->read_head - head;
ldata->read_head = head;
- ldata->read_cnt -= cnt;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (L_ECHO(tty)) {
if (L_ECHOPRT(tty)) {
if (!ldata->erasing) {
@@ -976,9 +1047,8 @@ static void eraser(unsigned char c, struct tty_struct *tty)
/* if cnt > 1, output a multi-byte character */
echo_char(c, tty);
while (--cnt > 0) {
- head = (head+1) & (N_TTY_BUF_SIZE-1);
- echo_char_raw(ldata->read_buf[head],
- ldata);
+ head++;
+ echo_char_raw(read_buf(ldata, head), ldata);
echo_move_back_col(ldata);
}
} else if (kill_type == ERASE && !L_ECHOE(tty)) {
@@ -986,7 +1056,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
} else if (c == '\t') {
unsigned int num_chars = 0;
int after_tab = 0;
- unsigned long tail = ldata->read_head;
+ size_t tail = ldata->read_head;
/*
* Count the columns used for characters
@@ -996,8 +1066,8 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* number of columns.
*/
while (tail != ldata->canon_head) {
- tail = (tail-1) & (N_TTY_BUF_SIZE-1);
- c = ldata->read_buf[tail];
+ tail--;
+ c = read_buf(ldata, tail);
if (c == '\t') {
after_tab = 1;
break;
@@ -1040,7 +1110,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* Locking: ctrl_lock
*/
-static inline void isig(int sig, struct tty_struct *tty)
+static void isig(int sig, struct tty_struct *tty)
{
struct pid *tty_pgrp = tty_get_pgrp(tty);
if (tty_pgrp) {
@@ -1056,10 +1126,14 @@ static inline void isig(int sig, struct tty_struct *tty)
* An RS232 break event has been hit in the incoming bitstream. This
* can cause a variety of events depending upon the termios settings.
*
- * Called from the receive_buf path so single threaded.
+ * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
+ * publishes read_head via put_tty_queue()
+ *
+ * Note: may get exclusive termios_rwsem if flushing input buffer
*/
-static inline void n_tty_receive_break(struct tty_struct *tty)
+static void n_tty_receive_break(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1068,8 +1142,11 @@ static inline void n_tty_receive_break(struct tty_struct *tty)
if (I_BRKINT(tty)) {
isig(SIGINT, tty);
if (!L_NOFLSH(tty)) {
+ /* flushing needs exclusive termios_rwsem */
+ up_read(&tty->termios_rwsem);
n_tty_flush_buffer(tty);
tty_driver_flush_buffer(tty);
+ down_read(&tty->termios_rwsem);
}
return;
}
@@ -1094,7 +1171,7 @@ static inline void n_tty_receive_break(struct tty_struct *tty)
* private.
*/
-static inline void n_tty_receive_overrun(struct tty_struct *tty)
+static void n_tty_receive_overrun(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
char buf[64];
@@ -1116,10 +1193,13 @@ static inline void n_tty_receive_overrun(struct tty_struct *tty)
* @c: character
*
* Process a parity error and queue the right data to indicate
- * the error case if necessary. Locking as per n_tty_receive_buf.
+ * the error case if necessary.
+ *
+ * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
+ * publishes read_head via put_tty_queue()
*/
-static inline void n_tty_receive_parity_error(struct tty_struct *tty,
- unsigned char c)
+static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
@@ -1136,6 +1216,26 @@ static inline void n_tty_receive_parity_error(struct tty_struct *tty,
wake_up_interruptible(&tty->read_wait);
}
+static void
+n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
+{
+ if (!L_NOFLSH(tty)) {
+ /* flushing needs exclusive termios_rwsem */
+ up_read(&tty->termios_rwsem);
+ n_tty_flush_buffer(tty);
+ tty_driver_flush_buffer(tty);
+ down_read(&tty->termios_rwsem);
+ }
+ if (I_IXON(tty))
+ start_tty(tty);
+ if (L_ECHO(tty)) {
+ echo_char(c, tty);
+ commit_echoes(tty);
+ }
+ isig(signal, tty);
+ return;
+}
+
/**
* n_tty_receive_char - perform processing
* @tty: terminal device
@@ -1144,117 +1244,54 @@ static inline void n_tty_receive_parity_error(struct tty_struct *tty,
* Process an individual character of input received from the driver.
* This is serialized with respect to itself by the rules for the
* driver above.
+ *
+ * n_tty_receive_buf()/producer path:
+ * caller holds non-exclusive termios_rwsem
+ * publishes canon_head if canonical mode is active
+ * otherwise, publishes read_head via put_tty_queue()
+ *
+ * Returns 1 if LNEXT was received, else returns 0
*/
-static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
+static int
+n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
{
struct n_tty_data *ldata = tty->disc_data;
- unsigned long flags;
int parmrk;
- if (ldata->raw) {
- put_tty_queue(c, ldata);
- return;
- }
-
- if (I_ISTRIP(tty))
- c &= 0x7f;
- if (I_IUCLC(tty) && L_IEXTEN(tty))
- c = tolower(c);
-
- if (L_EXTPROC(tty)) {
- put_tty_queue(c, ldata);
- return;
- }
-
- if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
- I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty) &&
- c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) && c != SUSP_CHAR(tty)) {
- start_tty(tty);
- process_echoes(tty);
- }
-
- if (tty->closing) {
- if (I_IXON(tty)) {
- if (c == START_CHAR(tty)) {
- start_tty(tty);
- process_echoes(tty);
- } else if (c == STOP_CHAR(tty))
- stop_tty(tty);
- }
- return;
- }
-
- /*
- * If the previous character was LNEXT, or we know that this
- * character is not one of the characters that we'll have to
- * handle specially, do shortcut processing to speed things
- * up.
- */
- if (!test_bit(c, ldata->process_char_map) || ldata->lnext) {
- ldata->lnext = 0;
- parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
- if (ldata->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) {
- /* beep if no space */
- if (L_ECHO(tty))
- process_output('\a', tty);
- return;
- }
- if (L_ECHO(tty)) {
- finish_erasing(ldata);
- /* Record the column of first canon char. */
- if (ldata->canon_head == ldata->read_head)
- echo_set_canon_col(ldata);
- echo_char(c, tty);
- process_echoes(tty);
- }
- if (parmrk)
- put_tty_queue(c, ldata);
- put_tty_queue(c, ldata);
- return;
- }
-
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- process_echoes(tty);
- return;
+ commit_echoes(tty);
+ return 0;
}
if (c == STOP_CHAR(tty)) {
stop_tty(tty);
- return;
+ return 0;
}
}
if (L_ISIG(tty)) {
- int signal;
- signal = SIGINT;
- if (c == INTR_CHAR(tty))
- goto send_signal;
- signal = SIGQUIT;
- if (c == QUIT_CHAR(tty))
- goto send_signal;
- signal = SIGTSTP;
- if (c == SUSP_CHAR(tty)) {
-send_signal:
- if (!L_NOFLSH(tty)) {
- n_tty_flush_buffer(tty);
- tty_driver_flush_buffer(tty);
- }
- if (I_IXON(tty))
- start_tty(tty);
- if (L_ECHO(tty)) {
- echo_char(c, tty);
- process_echoes(tty);
- }
- isig(signal, tty);
- return;
+ if (c == INTR_CHAR(tty)) {
+ n_tty_receive_signal_char(tty, SIGINT, c);
+ return 0;
+ } else if (c == QUIT_CHAR(tty)) {
+ n_tty_receive_signal_char(tty, SIGQUIT, c);
+ return 0;
+ } else if (c == SUSP_CHAR(tty)) {
+ n_tty_receive_signal_char(tty, SIGTSTP, c);
+ return 0;
}
}
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ }
+
if (c == '\r') {
if (I_IGNCR(tty))
- return;
+ return 0;
if (I_ICRNL(tty))
c = '\n';
} else if (c == '\n' && I_INLCR(tty))
@@ -1264,8 +1301,8 @@ send_signal:
if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
(c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
eraser(c, tty);
- process_echoes(tty);
- return;
+ commit_echoes(tty);
+ return 0;
}
if (c == LNEXT_CHAR(tty) && L_IEXTEN(tty)) {
ldata->lnext = 1;
@@ -1274,42 +1311,32 @@ send_signal:
if (L_ECHOCTL(tty)) {
echo_char_raw('^', ldata);
echo_char_raw('\b', ldata);
- process_echoes(tty);
+ commit_echoes(tty);
}
}
- return;
+ return 1;
}
- if (c == REPRINT_CHAR(tty) && L_ECHO(tty) &&
- L_IEXTEN(tty)) {
- unsigned long tail = ldata->canon_head;
+ if (c == REPRINT_CHAR(tty) && L_ECHO(tty) && L_IEXTEN(tty)) {
+ size_t tail = ldata->canon_head;
finish_erasing(ldata);
echo_char(c, tty);
echo_char_raw('\n', ldata);
while (tail != ldata->read_head) {
- echo_char(ldata->read_buf[tail], tty);
- tail = (tail+1) & (N_TTY_BUF_SIZE-1);
+ echo_char(read_buf(ldata, tail), tty);
+ tail++;
}
- process_echoes(tty);
- return;
+ commit_echoes(tty);
+ return 0;
}
if (c == '\n') {
- if (ldata->read_cnt >= N_TTY_BUF_SIZE) {
- if (L_ECHO(tty))
- process_output('\a', tty);
- return;
- }
if (L_ECHO(tty) || L_ECHONL(tty)) {
echo_char_raw('\n', ldata);
- process_echoes(tty);
+ commit_echoes(tty);
}
goto handle_newline;
}
if (c == EOF_CHAR(tty)) {
- if (ldata->read_cnt >= N_TTY_BUF_SIZE)
- return;
- if (ldata->canon_head != ldata->read_head)
- set_bit(TTY_PUSH, &tty->flags);
c = __DISABLED_CHAR;
goto handle_newline;
}
@@ -1317,11 +1344,6 @@ send_signal:
(c == EOL2_CHAR(tty) && L_IEXTEN(tty))) {
parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty))
? 1 : 0;
- if (ldata->read_cnt >= (N_TTY_BUF_SIZE - parmrk)) {
- if (L_ECHO(tty))
- process_output('\a', tty);
- return;
- }
/*
* XXX are EOL_CHAR and EOL2_CHAR echoed?!?
*/
@@ -1330,7 +1352,7 @@ send_signal:
if (ldata->canon_head == ldata->read_head)
echo_set_canon_col(ldata);
echo_char(c, tty);
- process_echoes(tty);
+ commit_echoes(tty);
}
/*
* XXX does PARMRK doubling happen for
@@ -1340,26 +1362,17 @@ send_signal:
put_tty_queue(c, ldata);
handle_newline:
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- set_bit(ldata->read_head, ldata->read_flags);
- put_tty_queue_nolock(c, ldata);
+ set_bit(ldata->read_head & (N_TTY_BUF_SIZE - 1), ldata->read_flags);
+ put_tty_queue(c, ldata);
ldata->canon_head = ldata->read_head;
- ldata->canon_data++;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
- return;
+ return 0;
}
}
parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
- if (ldata->read_cnt >= (N_TTY_BUF_SIZE - parmrk - 1)) {
- /* beep if no space */
- if (L_ECHO(tty))
- process_output('\a', tty);
- return;
- }
if (L_ECHO(tty)) {
finish_erasing(ldata);
if (c == '\n')
@@ -1370,29 +1383,123 @@ handle_newline:
echo_set_canon_col(ldata);
echo_char(c, tty);
}
- process_echoes(tty);
+ commit_echoes(tty);
}
if (parmrk)
put_tty_queue(c, ldata);
put_tty_queue(c, ldata);
+ return 0;
}
+static inline void
+n_tty_receive_char_inline(struct tty_struct *tty, unsigned char c)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ int parmrk;
-/**
- * n_tty_write_wakeup - asynchronous I/O notifier
- * @tty: tty device
- *
- * Required for the ptys, serial driver etc. since processes
- * that attach themselves to the master and rely on ASYNC
- * IO must be woken up
- */
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ }
+ if (L_ECHO(tty)) {
+ finish_erasing(ldata);
+ /* Record the column of first canon char. */
+ if (ldata->canon_head == ldata->read_head)
+ echo_set_canon_col(ldata);
+ echo_char(c, tty);
+ commit_echoes(tty);
+ }
+ parmrk = (c == (unsigned char) '\377' && I_PARMRK(tty)) ? 1 : 0;
+ if (parmrk)
+ put_tty_queue(c, ldata);
+ put_tty_queue(c, ldata);
+}
-static void n_tty_write_wakeup(struct tty_struct *tty)
+static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
{
- if (tty->fasync && test_and_clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags))
- kill_fasync(&tty->fasync, SIGIO, POLL_OUT);
+ n_tty_receive_char_inline(tty, c);
+}
+
+static inline void
+n_tty_receive_char_fast(struct tty_struct *tty, unsigned char c)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && I_IXANY(tty)) {
+ start_tty(tty);
+ process_echoes(tty);
+ }
+ if (L_ECHO(tty)) {
+ finish_erasing(ldata);
+ /* Record the column of first canon char. */
+ if (ldata->canon_head == ldata->read_head)
+ echo_set_canon_col(ldata);
+ echo_char(c, tty);
+ commit_echoes(tty);
+ }
+ put_tty_queue(c, ldata);
+}
+
+static inline void
+n_tty_receive_char_closing(struct tty_struct *tty, unsigned char c)
+{
+ if (I_ISTRIP(tty))
+ c &= 0x7f;
+ if (I_IUCLC(tty) && L_IEXTEN(tty))
+ c = tolower(c);
+
+ if (I_IXON(tty)) {
+ if (c == STOP_CHAR(tty))
+ stop_tty(tty);
+ else if (c == START_CHAR(tty) ||
+ (tty->stopped && !tty->flow_stopped && I_IXANY(tty) &&
+ c != INTR_CHAR(tty) && c != QUIT_CHAR(tty) &&
+ c != SUSP_CHAR(tty))) {
+ start_tty(tty);
+ process_echoes(tty);
+ }
+ }
+}
+
+static void
+n_tty_receive_char_flagged(struct tty_struct *tty, unsigned char c, char flag)
+{
+ char buf[64];
+
+ switch (flag) {
+ case TTY_BREAK:
+ n_tty_receive_break(tty);
+ break;
+ case TTY_PARITY:
+ case TTY_FRAME:
+ n_tty_receive_parity_error(tty, c);
+ break;
+ case TTY_OVERRUN:
+ n_tty_receive_overrun(tty);
+ break;
+ default:
+ printk(KERN_ERR "%s: unknown flag %d\n",
+ tty_name(tty, buf), flag);
+ break;
+ }
+}
+
+static void
+n_tty_receive_char_lnext(struct tty_struct *tty, unsigned char c, char flag)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+
+ ldata->lnext = 0;
+ if (likely(flag == TTY_NORMAL)) {
+ if (I_ISTRIP(tty))
+ c &= 0x7f;
+ if (I_IUCLC(tty) && L_IEXTEN(tty))
+ c = tolower(c);
+ n_tty_receive_char(tty, c);
+ } else
+ n_tty_receive_char_flagged(tty, c, flag);
}
/**
@@ -1406,86 +1513,220 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
* been received. This function must be called from soft contexts
* not from interrupt context. The driver is responsible for making
* calls one at a time and in order (or using flush_to_ldisc)
+ *
+ * n_tty_receive_buf()/producer path:
+ * claims non-exclusive termios_rwsem
+ * publishes read_head and canon_head
*/
-static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
- char *fp, int count)
+static void
+n_tty_receive_buf_real_raw(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
{
struct n_tty_data *ldata = tty->disc_data;
- const unsigned char *p;
- char *f, flags = TTY_NORMAL;
- int i;
- char buf[64];
- unsigned long cpuflags;
-
- if (ldata->real_raw) {
- raw_spin_lock_irqsave(&ldata->read_lock, cpuflags);
- i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
- N_TTY_BUF_SIZE - ldata->read_head);
- i = min(count, i);
- memcpy(ldata->read_buf + ldata->read_head, cp, i);
- ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
- ldata->read_cnt += i;
- cp += i;
- count -= i;
-
- i = min(N_TTY_BUF_SIZE - ldata->read_cnt,
- N_TTY_BUF_SIZE - ldata->read_head);
- i = min(count, i);
- memcpy(ldata->read_buf + ldata->read_head, cp, i);
- ldata->read_head = (ldata->read_head + i) & (N_TTY_BUF_SIZE-1);
- ldata->read_cnt += i;
- raw_spin_unlock_irqrestore(&ldata->read_lock, cpuflags);
- } else {
- for (i = count, p = cp, f = fp; i; i--, p++) {
- if (f)
- flags = *f++;
- switch (flags) {
- case TTY_NORMAL:
- n_tty_receive_char(tty, *p);
- break;
- case TTY_BREAK:
- n_tty_receive_break(tty);
- break;
- case TTY_PARITY:
- case TTY_FRAME:
- n_tty_receive_parity_error(tty, *p);
- break;
- case TTY_OVERRUN:
- n_tty_receive_overrun(tty);
- break;
- default:
- printk(KERN_ERR "%s: unknown flag %d\n",
- tty_name(tty, buf), flags);
- break;
+ size_t n, head;
+
+ head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
+ n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head);
+ n = min_t(size_t, count, n);
+ memcpy(read_buf_addr(ldata, head), cp, n);
+ ldata->read_head += n;
+ cp += n;
+ count -= n;
+
+ head = ldata->read_head & (N_TTY_BUF_SIZE - 1);
+ n = N_TTY_BUF_SIZE - max(read_cnt(ldata), head);
+ n = min_t(size_t, count, n);
+ memcpy(read_buf_addr(ldata, head), cp, n);
+ ldata->read_head += n;
+}
+
+static void
+n_tty_receive_buf_raw(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ char flag = TTY_NORMAL;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ put_tty_queue(*cp++, ldata);
+ else
+ n_tty_receive_char_flagged(tty, *cp++, flag);
+ }
+}
+
+static void
+n_tty_receive_buf_closing(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ char flag = TTY_NORMAL;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL))
+ n_tty_receive_char_closing(tty, *cp++);
+ else
+ n_tty_receive_char_flagged(tty, *cp++, flag);
+ }
+}
+
+static void
+n_tty_receive_buf_standard(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ char flag = TTY_NORMAL;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL)) {
+ unsigned char c = *cp++;
+
+ if (I_ISTRIP(tty))
+ c &= 0x7f;
+ if (I_IUCLC(tty) && L_IEXTEN(tty))
+ c = tolower(c);
+ if (L_EXTPROC(tty)) {
+ put_tty_queue(c, ldata);
+ continue;
}
+ if (!test_bit(c, ldata->char_map))
+ n_tty_receive_char_inline(tty, c);
+ else if (n_tty_receive_char_special(tty, c) && count) {
+ if (fp)
+ flag = *fp++;
+ n_tty_receive_char_lnext(tty, *cp++, flag);
+ count--;
+ }
+ } else
+ n_tty_receive_char_flagged(tty, *cp++, flag);
+ }
+}
+
+static void
+n_tty_receive_buf_fast(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ char flag = TTY_NORMAL;
+
+ while (count--) {
+ if (fp)
+ flag = *fp++;
+ if (likely(flag == TTY_NORMAL)) {
+ unsigned char c = *cp++;
+
+ if (!test_bit(c, ldata->char_map))
+ n_tty_receive_char_fast(tty, c);
+ else if (n_tty_receive_char_special(tty, c) && count) {
+ if (fp)
+ flag = *fp++;
+ n_tty_receive_char_lnext(tty, *cp++, flag);
+ count--;
+ }
+ } else
+ n_tty_receive_char_flagged(tty, *cp++, flag);
+ }
+}
+
+static void __receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ bool preops = I_ISTRIP(tty) || (I_IUCLC(tty) && L_IEXTEN(tty));
+
+ if (ldata->real_raw)
+ n_tty_receive_buf_real_raw(tty, cp, fp, count);
+ else if (ldata->raw || (L_EXTPROC(tty) && !preops))
+ n_tty_receive_buf_raw(tty, cp, fp, count);
+ else if (tty->closing && !L_EXTPROC(tty))
+ n_tty_receive_buf_closing(tty, cp, fp, count);
+ else {
+ if (ldata->lnext) {
+ char flag = TTY_NORMAL;
+
+ if (fp)
+ flag = *fp++;
+ n_tty_receive_char_lnext(tty, *cp++, flag);
+ count--;
}
+
+ if (!preops && !I_PARMRK(tty))
+ n_tty_receive_buf_fast(tty, cp, fp, count);
+ else
+ n_tty_receive_buf_standard(tty, cp, fp, count);
+
+ flush_echoes(tty);
if (tty->ops->flush_chars)
tty->ops->flush_chars(tty);
}
- set_room(tty);
-
- if ((!ldata->icanon && (ldata->read_cnt >= ldata->minimum_to_wake)) ||
+ if ((!ldata->icanon && (read_cnt(ldata) >= ldata->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
if (waitqueue_active(&tty->read_wait))
wake_up_interruptible(&tty->read_wait);
}
+}
+
+static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ int room, n;
+
+ down_read(&tty->termios_rwsem);
- /*
- * Check the remaining room for the input canonicalization
- * mode. We don't want to throttle the driver if we're in
- * canonical mode and don't have a newline yet!
- */
while (1) {
- tty_set_flow_change(tty, TTY_THROTTLE_SAFE);
- if (tty->receive_room >= TTY_THRESHOLD_THROTTLE)
+ room = receive_room(tty);
+ n = min(count, room);
+ if (!n)
break;
- if (!tty_throttle_safe(tty))
+ __receive_buf(tty, cp, fp, n);
+ cp += n;
+ if (fp)
+ fp += n;
+ count -= n;
+ }
+
+ tty->receive_room = room;
+ n_tty_check_throttle(tty);
+ up_read(&tty->termios_rwsem);
+}
+
+static int n_tty_receive_buf2(struct tty_struct *tty, const unsigned char *cp,
+ char *fp, int count)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ int room, n, rcvd = 0;
+
+ down_read(&tty->termios_rwsem);
+
+ while (1) {
+ room = receive_room(tty);
+ n = min(count, room);
+ if (!n) {
+ if (!room)
+ ldata->no_room = 1;
break;
+ }
+ __receive_buf(tty, cp, fp, n);
+ cp += n;
+ if (fp)
+ fp += n;
+ count -= n;
+ rcvd += n;
}
- __tty_set_flow_change(tty, 0);
+
+ tty->receive_room = room;
+ n_tty_check_throttle(tty);
+ up_read(&tty->termios_rwsem);
+
+ return rcvd;
}
int is_ignored(int sig)
@@ -1505,7 +1746,7 @@ int is_ignored(int sig)
* guaranteed that this function will not be re-entered or in progress
* when the ldisc is closed.
*
- * Locking: Caller holds tty->termios_mutex
+ * Locking: Caller holds tty->termios_rwsem
*/
static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
@@ -1517,12 +1758,13 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
canon_change = (old->c_lflag ^ tty->termios.c_lflag) & ICANON;
if (canon_change) {
bitmap_zero(ldata->read_flags, N_TTY_BUF_SIZE);
+ ldata->line_start = 0;
ldata->canon_head = ldata->read_tail;
- ldata->canon_data = 0;
ldata->erasing = 0;
+ ldata->lnext = 0;
}
- if (canon_change && !L_ICANON(tty) && ldata->read_cnt)
+ if (canon_change && !L_ICANON(tty) && read_cnt(ldata))
wake_up_interruptible(&tty->read_wait);
ldata->icanon = (L_ICANON(tty) != 0);
@@ -1531,41 +1773,38 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
I_ICRNL(tty) || I_INLCR(tty) || L_ICANON(tty) ||
I_IXON(tty) || L_ISIG(tty) || L_ECHO(tty) ||
I_PARMRK(tty)) {
- bitmap_zero(ldata->process_char_map, 256);
+ bitmap_zero(ldata->char_map, 256);
if (I_IGNCR(tty) || I_ICRNL(tty))
- set_bit('\r', ldata->process_char_map);
+ set_bit('\r', ldata->char_map);
if (I_INLCR(tty))
- set_bit('\n', ldata->process_char_map);
+ set_bit('\n', ldata->char_map);
if (L_ICANON(tty)) {
- set_bit(ERASE_CHAR(tty), ldata->process_char_map);
- set_bit(KILL_CHAR(tty), ldata->process_char_map);
- set_bit(EOF_CHAR(tty), ldata->process_char_map);
- set_bit('\n', ldata->process_char_map);
- set_bit(EOL_CHAR(tty), ldata->process_char_map);
+ set_bit(ERASE_CHAR(tty), ldata->char_map);
+ set_bit(KILL_CHAR(tty), ldata->char_map);
+ set_bit(EOF_CHAR(tty), ldata->char_map);
+ set_bit('\n', ldata->char_map);
+ set_bit(EOL_CHAR(tty), ldata->char_map);
if (L_IEXTEN(tty)) {
- set_bit(WERASE_CHAR(tty),
- ldata->process_char_map);
- set_bit(LNEXT_CHAR(tty),
- ldata->process_char_map);
- set_bit(EOL2_CHAR(tty),
- ldata->process_char_map);
+ set_bit(WERASE_CHAR(tty), ldata->char_map);
+ set_bit(LNEXT_CHAR(tty), ldata->char_map);
+ set_bit(EOL2_CHAR(tty), ldata->char_map);
if (L_ECHO(tty))
set_bit(REPRINT_CHAR(tty),
- ldata->process_char_map);
+ ldata->char_map);
}
}
if (I_IXON(tty)) {
- set_bit(START_CHAR(tty), ldata->process_char_map);
- set_bit(STOP_CHAR(tty), ldata->process_char_map);
+ set_bit(START_CHAR(tty), ldata->char_map);
+ set_bit(STOP_CHAR(tty), ldata->char_map);
}
if (L_ISIG(tty)) {
- set_bit(INTR_CHAR(tty), ldata->process_char_map);
- set_bit(QUIT_CHAR(tty), ldata->process_char_map);
- set_bit(SUSP_CHAR(tty), ldata->process_char_map);
+ set_bit(INTR_CHAR(tty), ldata->char_map);
+ set_bit(QUIT_CHAR(tty), ldata->char_map);
+ set_bit(SUSP_CHAR(tty), ldata->char_map);
}
- clear_bit(__DISABLED_CHAR, ldata->process_char_map);
+ clear_bit(__DISABLED_CHAR, ldata->char_map);
ldata->raw = 0;
ldata->real_raw = 0;
} else {
@@ -1608,9 +1847,7 @@ static void n_tty_close(struct tty_struct *tty)
if (tty->link)
n_tty_packet_mode_flush(tty);
- kfree(ldata->read_buf);
- kfree(ldata->echo_buf);
- kfree(ldata);
+ vfree(ldata);
tty->disc_data = NULL;
}
@@ -1628,26 +1865,23 @@ static int n_tty_open(struct tty_struct *tty)
{
struct n_tty_data *ldata;
- ldata = kzalloc(sizeof(*ldata), GFP_KERNEL);
+ /* Currently a malloc failure here can panic */
+ ldata = vmalloc(sizeof(*ldata));
if (!ldata)
goto err;
ldata->overrun_time = jiffies;
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
- mutex_init(&ldata->echo_lock);
- raw_spin_lock_init(&ldata->read_lock);
-
- /* These are ugly. Currently a malloc failure here can panic */
- ldata->read_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
- ldata->echo_buf = kzalloc(N_TTY_BUF_SIZE, GFP_KERNEL);
- if (!ldata->read_buf || !ldata->echo_buf)
- goto err_free_bufs;
tty->disc_data = ldata;
reset_buffer_flags(tty->disc_data);
ldata->column = 0;
+ ldata->canon_column = 0;
ldata->minimum_to_wake = 1;
+ ldata->num_overrun = 0;
+ ldata->no_room = 0;
+ ldata->lnext = 0;
tty->closing = 0;
/* indicate buffer work may resume */
clear_bit(TTY_LDISC_HALTED, &tty->flags);
@@ -1655,10 +1889,6 @@ static int n_tty_open(struct tty_struct *tty)
tty_unthrottle(tty);
return 0;
-err_free_bufs:
- kfree(ldata->read_buf);
- kfree(ldata->echo_buf);
- kfree(ldata);
err:
return -ENOMEM;
}
@@ -1667,11 +1897,10 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
{
struct n_tty_data *ldata = tty->disc_data;
- tty_flush_to_ldisc(tty);
if (ldata->icanon && !L_EXTPROC(tty)) {
- if (ldata->canon_data)
+ if (ldata->canon_head != ldata->read_tail)
return 1;
- } else if (ldata->read_cnt >= (amt ? amt : 1))
+ } else if (read_cnt(ldata) >= (amt ? amt : 1))
return 1;
return 0;
@@ -1692,6 +1921,9 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
*
* Called under the ldata->atomic_read_lock sem
*
+ * n_tty_read()/consumer path:
+ * caller holds non-exclusive termios_rwsem
+ * read_tail published
*/
static int copy_from_read_buf(struct tty_struct *tty,
@@ -1702,34 +1934,114 @@ static int copy_from_read_buf(struct tty_struct *tty,
struct n_tty_data *ldata = tty->disc_data;
int retval;
size_t n;
- unsigned long flags;
bool is_eof;
+ size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
retval = 0;
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- n = min(ldata->read_cnt, N_TTY_BUF_SIZE - ldata->read_tail);
+ n = min(read_cnt(ldata), N_TTY_BUF_SIZE - tail);
n = min(*nr, n);
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
if (n) {
- retval = copy_to_user(*b, &ldata->read_buf[ldata->read_tail], n);
+ retval = copy_to_user(*b, read_buf_addr(ldata, tail), n);
n -= retval;
- is_eof = n == 1 &&
- ldata->read_buf[ldata->read_tail] == EOF_CHAR(tty);
- tty_audit_add_data(tty, &ldata->read_buf[ldata->read_tail], n,
+ is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty);
+ tty_audit_add_data(tty, read_buf_addr(ldata, tail), n,
ldata->icanon);
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- ldata->read_tail = (ldata->read_tail + n) & (N_TTY_BUF_SIZE-1);
- ldata->read_cnt -= n;
+ ldata->read_tail += n;
/* Turn single EOF into zero-length read */
- if (L_EXTPROC(tty) && ldata->icanon && is_eof && !ldata->read_cnt)
+ if (L_EXTPROC(tty) && ldata->icanon && is_eof && !read_cnt(ldata))
n = 0;
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
*b += n;
*nr -= n;
}
return retval;
}
+/**
+ * canon_copy_from_read_buf - copy read data in canonical mode
+ * @tty: terminal device
+ * @b: user data
+ * @nr: size of data
+ *
+ * Helper function for n_tty_read. It is only called when ICANON is on;
+ * it copies one line of input up to and including the line-delimiting
+ * character into the user-space buffer.
+ *
+ * Called under the atomic_read_lock mutex
+ *
+ * n_tty_read()/consumer path:
+ * caller holds non-exclusive termios_rwsem
+ * read_tail published
+ */
+
+static int canon_copy_from_read_buf(struct tty_struct *tty,
+ unsigned char __user **b,
+ size_t *nr)
+{
+ struct n_tty_data *ldata = tty->disc_data;
+ size_t n, size, more, c;
+ size_t eol;
+ size_t tail;
+ int ret, found = 0;
+ bool eof_push = 0;
+
+ /* N.B. avoid overrun if nr == 0 */
+ n = min(*nr, read_cnt(ldata));
+ if (!n)
+ return 0;
+
+ tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
+ size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
+
+ n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n",
+ __func__, *nr, tail, n, size);
+
+ eol = find_next_bit(ldata->read_flags, size, tail);
+ more = n - (size - tail);
+ if (eol == N_TTY_BUF_SIZE && more) {
+ /* scan wrapped without finding set bit */
+ eol = find_next_bit(ldata->read_flags, more, 0);
+ if (eol != more)
+ found = 1;
+ } else if (eol != size)
+ found = 1;
+
+ size = N_TTY_BUF_SIZE - tail;
+ n = (found + eol + size) & (N_TTY_BUF_SIZE - 1);
+ c = n;
+
+ if (found && read_buf(ldata, eol) == __DISABLED_CHAR) {
+ n--;
+ eof_push = !n && ldata->read_tail != ldata->line_start;
+ }
+
+ n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n",
+ __func__, eol, found, n, c, size, more);
+
+ if (n > size) {
+ ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
+ if (ret)
+ return -EFAULT;
+ ret = copy_to_user(*b + size, ldata->read_buf, n - size);
+ } else
+ ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
+
+ if (ret)
+ return -EFAULT;
+ *b += n;
+ *nr -= n;
+
+ if (found)
+ clear_bit(eol, ldata->read_flags);
+ smp_mb__after_clear_bit();
+ ldata->read_tail += c;
+
+ if (found) {
+ ldata->line_start = ldata->read_tail;
+ tty_audit_push(tty);
+ }
+ return eof_push ? -EAGAIN : 0;
+}
+
extern ssize_t redirected_tty_write(struct file *, const char __user *,
size_t, loff_t *);
@@ -1787,6 +2099,10 @@ static int job_control(struct tty_struct *tty, struct file *file)
* a hangup. Always called in user context, may sleep.
*
* This code must be sure never to sleep through a hangup.
+ *
+ * n_tty_read()/consumer path:
+ * claims non-exclusive termios_rwsem
+ * publishes read_tail
*/
static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
@@ -1798,16 +2114,27 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
int c;
int minimum, time;
ssize_t retval = 0;
- ssize_t size;
long timeout;
unsigned long flags;
int packet;
-do_it_again:
c = job_control(tty, file);
if (c < 0)
return c;
+ /*
+ * Internal serialization of reads.
+ */
+ if (file->f_flags & O_NONBLOCK) {
+ if (!mutex_trylock(&ldata->atomic_read_lock))
+ return -EAGAIN;
+ } else {
+ if (mutex_lock_interruptible(&ldata->atomic_read_lock))
+ return -ERESTARTSYS;
+ }
+
+ down_read(&tty->termios_rwsem);
+
minimum = time = 0;
timeout = MAX_SCHEDULE_TIMEOUT;
if (!ldata->icanon) {
@@ -1825,16 +2152,6 @@ do_it_again:
}
}
- /*
- * Internal serialization of reads.
- */
- if (file->f_flags & O_NONBLOCK) {
- if (!mutex_trylock(&ldata->atomic_read_lock))
- return -EAGAIN;
- } else {
- if (mutex_lock_interruptible(&ldata->atomic_read_lock))
- return -ERESTARTSYS;
- }
packet = tty->packet;
add_wait_queue(&tty->read_wait, &wait);
@@ -1883,7 +2200,11 @@ do_it_again:
break;
}
n_tty_set_room(tty);
+ up_read(&tty->termios_rwsem);
+
timeout = schedule_timeout(timeout);
+
+ down_read(&tty->termios_rwsem);
continue;
}
__set_current_state(TASK_RUNNING);
@@ -1899,45 +2220,11 @@ do_it_again:
}
if (ldata->icanon && !L_EXTPROC(tty)) {
- /* N.B. avoid overrun if nr == 0 */
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- while (nr && ldata->read_cnt) {
- int eol;
-
- eol = test_and_clear_bit(ldata->read_tail,
- ldata->read_flags);
- c = ldata->read_buf[ldata->read_tail];
- ldata->read_tail = ((ldata->read_tail+1) &
- (N_TTY_BUF_SIZE-1));
- ldata->read_cnt--;
- if (eol) {
- /* this test should be redundant:
- * we shouldn't be reading data if
- * canon_data is 0
- */
- if (--ldata->canon_data < 0)
- ldata->canon_data = 0;
- }
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
-
- if (!eol || (c != __DISABLED_CHAR)) {
- if (tty_put_user(tty, c, b++)) {
- retval = -EFAULT;
- b--;
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- break;
- }
- nr--;
- }
- if (eol) {
- tty_audit_push(tty);
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- break;
- }
- raw_spin_lock_irqsave(&ldata->read_lock, flags);
- }
- raw_spin_unlock_irqrestore(&ldata->read_lock, flags);
- if (retval)
+ retval = canon_copy_from_read_buf(tty, &b, &nr);
+ if (retval == -EAGAIN) {
+ retval = 0;
+ continue;
+ } else if (retval)
break;
} else {
int uncopied;
@@ -1951,24 +2238,7 @@ do_it_again:
}
}
- /* If there is enough space in the read buffer now, let the
- * low-level driver know. We use n_tty_chars_in_buffer() to
- * check the buffer, as it now knows about canonical mode.
- * Otherwise, if the driver is throttled and the line is
- * longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
- * we won't get any more characters.
- */
- while (1) {
- tty_set_flow_change(tty, TTY_UNTHROTTLE_SAFE);
- if (n_tty_chars_in_buffer(tty) > TTY_THRESHOLD_UNTHROTTLE)
- break;
- if (!tty->count)
- break;
- n_tty_set_room(tty);
- if (!tty_unthrottle_safe(tty))
- break;
- }
- __tty_set_flow_change(tty, 0);
+ n_tty_check_unthrottle(tty);
if (b - buf >= minimum)
break;
@@ -1982,15 +2252,11 @@ do_it_again:
ldata->minimum_to_wake = minimum;
__set_current_state(TASK_RUNNING);
- size = b - buf;
- if (size) {
- retval = size;
- if (nr)
- clear_bit(TTY_PUSH, &tty->flags);
- } else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
- goto do_it_again;
+ if (b - buf)
+ retval = b - buf;
n_tty_set_room(tty);
+ up_read(&tty->termios_rwsem);
return retval;
}
@@ -2031,6 +2297,8 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
return retval;
}
+ down_read(&tty->termios_rwsem);
+
/* Write out any echoed characters that are still pending */
process_echoes(tty);
@@ -2084,13 +2352,18 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file,
retval = -EAGAIN;
break;
}
+ up_read(&tty->termios_rwsem);
+
schedule();
+
+ down_read(&tty->termios_rwsem);
}
break_out:
__set_current_state(TASK_RUNNING);
remove_wait_queue(&tty->write_wait, &wait);
if (b - buf != nr && tty->fasync)
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+ up_read(&tty->termios_rwsem);
return (b - buf) ? b - buf : retval;
}
@@ -2139,19 +2412,19 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
static unsigned long inq_canon(struct n_tty_data *ldata)
{
- int nr, head, tail;
+ size_t nr, head, tail;
- if (!ldata->canon_data)
+ if (ldata->canon_head == ldata->read_tail)
return 0;
head = ldata->canon_head;
tail = ldata->read_tail;
- nr = (head - tail) & (N_TTY_BUF_SIZE-1);
+ nr = head - tail;
/* Skip EOF-chars.. */
while (head != tail) {
- if (test_bit(tail, ldata->read_flags) &&
- ldata->read_buf[tail] == __DISABLED_CHAR)
+ if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
+ read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
- tail = (tail+1) & (N_TTY_BUF_SIZE-1);
+ tail++;
}
return nr;
}
@@ -2166,10 +2439,12 @@ static int n_tty_ioctl(struct tty_struct *tty, struct file *file,
case TIOCOUTQ:
return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
case TIOCINQ:
- /* FIXME: Locking */
- retval = ldata->read_cnt;
+ down_write(&tty->termios_rwsem);
if (L_ICANON(tty))
retval = inq_canon(ldata);
+ else
+ retval = read_cnt(ldata);
+ up_write(&tty->termios_rwsem);
return put_user(retval, (unsigned int __user *) arg);
default:
return n_tty_ioctl_helper(tty, file, cmd, arg);
@@ -2203,6 +2478,7 @@ struct tty_ldisc_ops tty_ldisc_N_TTY = {
.receive_buf = n_tty_receive_buf,
.write_wakeup = n_tty_write_wakeup,
.fasync = n_tty_fasync,
+ .receive_buf2 = n_tty_receive_buf2,
};
/**
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index abfd9908978..25c9bc78372 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -89,17 +89,13 @@ static void pty_unthrottle(struct tty_struct *tty)
* pty_space - report space left for writing
* @to: tty we are writing into
*
- * The tty buffers allow 64K but we sneak a peak and clip at 8K this
- * allows a lot of overspill room for echo and other fun messes to
- * be handled properly
+ * Limit the buffer space used by ptys to 8k.
*/
static int pty_space(struct tty_struct *to)
{
- int n = 8192 - to->port->buf.memory_used;
- if (n < 0)
- return 0;
- return n;
+ int n = tty_buffer_space_avail(to->port);
+ return min(n, 8192);
}
/**
@@ -125,10 +121,8 @@ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c)
/* Stuff the data into the input queue of the other end */
c = tty_insert_flip_string(to->port, buf, c);
/* And shovel */
- if (c) {
+ if (c)
tty_flip_buffer_push(to->port);
- tty_wakeup(tty);
- }
}
return c;
}
@@ -287,7 +281,7 @@ static int pty_resize(struct tty_struct *tty, struct winsize *ws)
struct tty_struct *pty = tty->link;
/* For a PTY we need to lock the tty side */
- mutex_lock(&tty->termios_mutex);
+ mutex_lock(&tty->winsize_mutex);
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
@@ -314,7 +308,7 @@ static int pty_resize(struct tty_struct *tty, struct winsize *ws)
tty->winsize = *ws;
pty->winsize = *ws; /* Never used so will go away soon */
done:
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&tty->winsize_mutex);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 86c00b1c558..570df9d2a5d 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -3062,7 +3062,7 @@ void serial8250_resume_port(int line)
*/
static int serial8250_probe(struct platform_device *dev)
{
- struct plat_serial8250_port *p = dev->dev.platform_data;
+ struct plat_serial8250_port *p = dev_get_platdata(&dev->dev);
struct uart_8250_port uart;
int ret, i, irqflag = 0;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 76a8daadff4..daf710f5c3f 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -57,11 +57,25 @@
struct dw8250_data {
int last_lcr;
+ int last_mcr;
int line;
struct clk *clk;
u8 usr_reg;
};
+static inline int dw8250_modify_msr(struct uart_port *p, int offset, int value)
+{
+ struct dw8250_data *d = p->private_data;
+
+ /* If reading MSR, report CTS asserted when auto-CTS/RTS enabled */
+ if (offset == UART_MSR && d->last_mcr & UART_MCR_AFE) {
+ value |= UART_MSR_CTS;
+ value &= ~UART_MSR_DCTS;
+ }
+
+ return value;
+}
+
static void dw8250_serial_out(struct uart_port *p, int offset, int value)
{
struct dw8250_data *d = p->private_data;
@@ -69,15 +83,17 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
if (offset == UART_LCR)
d->last_lcr = value;
- offset <<= p->regshift;
- writeb(value, p->membase + offset);
+ if (offset == UART_MCR)
+ d->last_mcr = value;
+
+ writeb(value, p->membase + (offset << p->regshift));
}
static unsigned int dw8250_serial_in(struct uart_port *p, int offset)
{
- offset <<= p->regshift;
+ unsigned int value = readb(p->membase + (offset << p->regshift));
- return readb(p->membase + offset);
+ return dw8250_modify_msr(p, offset, value);
}
/* Read Back (rb) version to ensure register access ording. */
@@ -94,15 +110,17 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
if (offset == UART_LCR)
d->last_lcr = value;
- offset <<= p->regshift;
- writel(value, p->membase + offset);
+ if (offset == UART_MCR)
+ d->last_mcr = value;
+
+ writel(value, p->membase + (offset << p->regshift));
}
static unsigned int dw8250_serial_in32(struct uart_port *p, int offset)
{
- offset <<= p->regshift;
+ unsigned int value = readl(p->membase + (offset << p->regshift));
- return readl(p->membase + offset);
+ return dw8250_modify_msr(p, offset, value);
}
static int dw8250_handle_irq(struct uart_port *p)
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 721904f8efa..c100d6343d5 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -193,7 +193,8 @@ static int __init parse_options(struct early_serial8250_device *device,
if (options) {
options++;
device->baud = simple_strtoul(options, NULL, 0);
- length = min(strcspn(options, " "), sizeof(device->options));
+ length = min(strcspn(options, " ") + 1,
+ (size_t)(sizeof(device->options)));
strlcpy(device->options, options, length);
} else {
device->baud = probe_baud(port);
diff --git a/drivers/tty/serial/8250/8250_em.c b/drivers/tty/serial/8250/8250_em.c
index 916cc19fbbd..5f3bba12c15 100644
--- a/drivers/tty/serial/8250/8250_em.c
+++ b/drivers/tty/serial/8250/8250_em.c
@@ -95,25 +95,23 @@ static int serial8250_em_probe(struct platform_device *pdev)
struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
struct serial8250_em_priv *priv;
struct uart_8250_port up;
- int ret = -EINVAL;
+ int ret;
if (!regs || !irq) {
dev_err(&pdev->dev, "missing registers or irq\n");
- goto err0;
+ return -EINVAL;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to allocate private data\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
- priv->sclk = clk_get(&pdev->dev, "sclk");
+ priv->sclk = devm_clk_get(&pdev->dev, "sclk");
if (IS_ERR(priv->sclk)) {
dev_err(&pdev->dev, "unable to get clock\n");
- ret = PTR_ERR(priv->sclk);
- goto err1;
+ return PTR_ERR(priv->sclk);
}
memset(&up, 0, sizeof(up));
@@ -136,20 +134,13 @@ static int serial8250_em_probe(struct platform_device *pdev)
ret = serial8250_register_8250_port(&up);
if (ret < 0) {
dev_err(&pdev->dev, "unable to register 8250 port\n");
- goto err2;
+ clk_disable(priv->sclk);
+ return ret;
}
priv->line = ret;
platform_set_drvdata(pdev, priv);
return 0;
-
- err2:
- clk_disable(priv->sclk);
- clk_put(priv->sclk);
- err1:
- kfree(priv);
- err0:
- return ret;
}
static int serial8250_em_remove(struct platform_device *pdev)
@@ -158,8 +149,6 @@ static int serial8250_em_remove(struct platform_device *pdev)
serial8250_unregister_port(priv->line);
clk_disable(priv->sclk);
- clk_put(priv->sclk);
- kfree(priv);
return 0;
}
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index bb91b4713eb..2e3ea1a70d7 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -31,9 +31,8 @@ static int __init serial_init_chip(struct parisc_device *dev)
int err;
#ifdef CONFIG_64BIT
- extern int iosapic_serial_irq(int cellnum);
if (!dev->irq && (dev->id.sversion == 0xad))
- dev->irq = iosapic_serial_irq(dev->mod_index-1);
+ dev->irq = iosapic_serial_irq(dev);
#endif
if (!dev->irq) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index c52948b368d..c810da7c7a8 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1565,6 +1565,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_COMMTECH_4228PCIE 0x0021
#define PCI_DEVICE_ID_COMMTECH_4222PCIE 0x0022
#define PCI_DEVICE_ID_BROADCOM_TRUMANAGE 0x160a
+#define PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800 0x818e
#define PCI_VENDOR_ID_SUNIX 0x1fd4
#define PCI_DEVICE_ID_SUNIX_1999 0x1999
@@ -1587,8 +1588,8 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
* ADDI-DATA GmbH communication cards <info@addi-data.com>
*/
{
- .vendor = PCI_VENDOR_ID_ADDIDATA_OLD,
- .device = PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ .vendor = PCI_VENDOR_ID_AMCC,
+ .device = PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.setup = addidata_apci7800_setup,
@@ -4697,8 +4698,8 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
pbn_b0_1_115200 },
- { PCI_VENDOR_ID_ADDIDATA_OLD,
- PCI_DEVICE_ID_ADDIDATA_APCI7800,
+ { PCI_VENDOR_ID_AMCC,
+ PCI_DEVICE_ID_AMCC_ADDIDATA_APCI7800,
PCI_ANY_ID,
PCI_ANY_ID,
0,
@@ -4797,6 +4798,12 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_VENDOR_ID_IBM, 0x0299,
0, 0, pbn_b0_bt_2_115200 },
+ /*
+ * other NetMos 9835 devices are most likely handled by the
+ * parport_serial driver, check drivers/parport/parport_serial.c
+ * before adding them here.
+ */
+
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x1000,
0, 0, pbn_b0_1_115200 },
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index a1ba94d6488..f3b306efaa5 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -116,6 +116,8 @@ config SERIAL_8250_PCI
This builds standard PCI serial support. You may be able to
disable this feature if you only need legacy serial support.
Saves about 9K.
+ Note that serial ports on NetMos 9835 Multi-I/O cards are handled
+ by the parport_serial driver, enabled with CONFIG_PARPORT_SERIAL.
config SERIAL_8250_HP300
tristate
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 5e3d68917ff..cc4c8682b47 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -277,7 +277,7 @@ config SERIAL_TEGRA
select SERIAL_CORE
help
Support for the on-chip UARTs on the NVIDIA Tegra series SOCs
- providing /dev/ttyHS0, 1, 2, 3 and 4 (note, some machines may not
+ providing /dev/ttyTHS0, 1, 2, 3 and 4 (note, some machines may not
provide all of these ports, depending on how the serial port
are enabled). This driver uses the APB DMA to achieve higher baudrate
and better performance.
@@ -291,13 +291,13 @@ config SERIAL_MAX3100
config SERIAL_MAX310X
bool "MAX310X support"
- depends on SPI
+ depends on SPI_MASTER
select SERIAL_CORE
- select REGMAP_SPI if SPI
+ select REGMAP_SPI if SPI_MASTER
default n
help
This selects support for an advanced UART from Maxim (Dallas).
- Supported ICs are MAX3107, MAX3108.
+ Supported ICs are MAX3107, MAX3108, MAX3109, MAX14830.
Each IC contains 128 words each of receive and transmit FIFO
that can be controlled through I2C or high-speed SPI.
@@ -1401,13 +1401,16 @@ config SERIAL_XILINX_PS_UART_CONSOLE
Enable a Xilinx PS UART port to be the system console.
config SERIAL_AR933X
- bool "AR933X serial port support"
- depends on SOC_AR933X
+ tristate "AR933X serial port support"
+ depends on HAVE_CLK && SOC_AR933X
select SERIAL_CORE
help
If you have an Atheros AR933X SOC based board and want to use the
built-in UART of the SoC, say Y to this option.
+ To compile this driver as a module, choose M here: the
+ module will be called ar933x_uart.
+
config SERIAL_AR933X_CONSOLE
bool "Console on AR933X serial port"
depends on SERIAL_AR933X=y
@@ -1424,8 +1427,8 @@ config SERIAL_AR933X_NR_UARTS
to support.
config SERIAL_EFM32_UART
- tristate "EFM32 UART/USART port."
- depends on ARCH_EFM32
+ tristate "EFM32 UART/USART port"
+ depends on ARM && (ARCH_EFM32 || COMPILE_TEST)
select SERIAL_CORE
help
This driver support the USART and UART ports on
@@ -1497,6 +1500,22 @@ config SERIAL_FSL_LPUART_CONSOLE
If you have enabled the lpuart serial port on the Freescale SoCs,
you can make it the console by answering Y to this option.
+config SERIAL_ST_ASC
+ tristate "ST ASC serial port support"
+ select SERIAL_CORE
+ help
+ This driver is for the on-chip Asychronous Serial Controller on
+ STMicroelectronics STi SoCs.
+ ASC is embedded in ST COMMS IP block. It supports Rx & Tx functionality.
+ It support all industry standard baud rates.
+
+ If unsure, say N.
+
+config SERIAL_ST_ASC_CONSOLE
+ bool "Support for console on ST ASC"
+ depends on SERIAL_ST_ASC=y
+ select SERIAL_CORE_CONSOLE
+
endmenu
endif # TTY
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index cf650f0cd6e..47b679c547e 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -65,6 +65,7 @@ obj-$(CONFIG_SERIAL_KGDB_NMI) += kgdb_nmi.o
obj-$(CONFIG_SERIAL_KS8695) += serial_ks8695.o
obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
+obj-$(CONFIG_SERIAL_ST_ASC) += st-asc.o
obj-$(CONFIG_KGDB_SERIAL_CONSOLE) += kgdboc.o
obj-$(CONFIG_SERIAL_QE) += ucc_uart.o
obj-$(CONFIG_SERIAL_TIMBERDALE) += timbuart.o
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c
index c6bdb943726..18e038fbdcd 100644
--- a/drivers/tty/serial/altera_jtaguart.c
+++ b/drivers/tty/serial/altera_jtaguart.c
@@ -139,7 +139,9 @@ static void altera_jtaguart_rx_chars(struct altera_jtaguart *pp)
uart_insert_char(port, 0, 0, ch, flag);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
}
static void altera_jtaguart_tx_chars(struct altera_jtaguart *pp)
@@ -408,7 +410,8 @@ static struct uart_driver altera_jtaguart_driver = {
static int altera_jtaguart_probe(struct platform_device *pdev)
{
- struct altera_jtaguart_platform_uart *platp = pdev->dev.platform_data;
+ struct altera_jtaguart_platform_uart *platp =
+ dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_irq, *res_mem;
int i = pdev->id;
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index 1d46966e2a6..6431472aeb1 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -231,7 +231,9 @@ static void altera_uart_rx_chars(struct altera_uart *pp)
flag);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
}
static void altera_uart_tx_chars(struct altera_uart *pp)
@@ -534,7 +536,7 @@ static int altera_uart_get_of_uartclk(struct platform_device *pdev,
static int altera_uart_probe(struct platform_device *pdev)
{
- struct altera_uart_platform_uart *platp = pdev->dev.platform_data;
+ struct altera_uart_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
struct resource *res_mem;
struct resource *res_irq;
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index c3684051952..8b90f0b6dfd 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -721,7 +721,7 @@ static int pl010_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
uap->dev = dev;
- uap->data = dev->dev.platform_data;
+ uap->data = dev_get_platdata(&dev->dev);
amba_ports[i] = uap;
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 28b35ad9c6c..aaa22867e65 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -265,7 +265,7 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
- struct amba_pl011_data *plat = uap->port.dev->platform_data;
+ struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
@@ -677,6 +677,8 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
* Locking: called with port lock held and IRQs disabled.
*/
static void pl011_dma_flush_buffer(struct uart_port *port)
+__releases(&uap->port.lock)
+__acquires(&uap->port.lock)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1198,6 +1200,8 @@ static void pl011_enable_ms(struct uart_port *port)
}
static void pl011_rx_chars(struct uart_amba_port *uap)
+__releases(&uap->port.lock)
+__acquires(&uap->port.lock)
{
pl011_fifo_to_tty(uap);
@@ -1497,10 +1501,10 @@ static int pl011_hwinit(struct uart_port *port)
uap->im = readw(uap->port.membase + UART011_IMSC);
writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
- if (uap->port.dev->platform_data) {
+ if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
- plat = uap->port.dev->platform_data;
+ plat = dev_get_platdata(uap->port.dev);
if (plat->init)
plat->init();
}
@@ -1645,10 +1649,10 @@ static void pl011_shutdown(struct uart_port *port)
/* Optionally let pins go into sleep states */
pinctrl_pm_select_sleep_state(port->dev);
- if (uap->port.dev->platform_data) {
+ if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
- plat = uap->port.dev->platform_data;
+ plat = dev_get_platdata(uap->port.dev);
if (plat->exit)
plat->exit();
}
@@ -2002,10 +2006,10 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (ret)
return ret;
- if (uap->port.dev->platform_data) {
+ if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
- plat = uap->port.dev->platform_data;
+ plat = dev_get_platdata(uap->port.dev);
if (plat->init)
plat->init();
}
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 6331464d910..de11ab8ffd9 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -125,7 +125,9 @@ static void apbuart_rx_chars(struct uart_port *port)
status = UART_GET_STATUS(port);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
}
static void apbuart_tx_chars(struct uart_port *port)
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 27f20c57abe..acd03af7cd5 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -17,6 +17,8 @@
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_core.h>
@@ -24,11 +26,11 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/clk.h>
#include <asm/div64.h>
#include <asm/mach-ath79/ar933x_uart.h>
-#include <asm/mach-ath79/ar933x_uart_platform.h>
#define DRIVER_NAME "ar933x-uart"
@@ -47,8 +49,14 @@ struct ar933x_uart_port {
unsigned int ier; /* shadow Interrupt Enable Register */
unsigned int min_baud;
unsigned int max_baud;
+ struct clk *clk;
};
+static inline bool ar933x_uart_console_enabled(void)
+{
+ return config_enabled(CONFIG_SERIAL_AR933X_CONSOLE);
+}
+
static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
int offset)
{
@@ -322,7 +330,9 @@ static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (max_count-- > 0);
+ spin_unlock(&up->port.lock);
tty_flip_buffer_push(port);
+ spin_lock(&up->port.lock);
}
static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
@@ -497,8 +507,6 @@ static struct uart_ops ar933x_uart_ops = {
.verify_port = ar933x_uart_verify_port,
};
-#ifdef CONFIG_SERIAL_AR933X_CONSOLE
-
static struct ar933x_uart_port *
ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
@@ -597,80 +605,88 @@ static struct console ar933x_uart_console = {
static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
{
+ if (!ar933x_uart_console_enabled())
+ return;
+
ar933x_console_ports[up->port.line] = up;
}
-#define AR933X_SERIAL_CONSOLE (&ar933x_uart_console)
-
-#else
-
-static inline void ar933x_uart_add_console_port(struct ar933x_uart_port *up) {}
-
-#define AR933X_SERIAL_CONSOLE NULL
-
-#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
-
static struct uart_driver ar933x_uart_driver = {
.owner = THIS_MODULE,
.driver_name = DRIVER_NAME,
.dev_name = "ttyATH",
.nr = CONFIG_SERIAL_AR933X_NR_UARTS,
- .cons = AR933X_SERIAL_CONSOLE,
+ .cons = NULL, /* filled in runtime */
};
static int ar933x_uart_probe(struct platform_device *pdev)
{
- struct ar933x_uart_platform_data *pdata;
struct ar933x_uart_port *up;
struct uart_port *port;
struct resource *mem_res;
struct resource *irq_res;
+ struct device_node *np;
unsigned int baud;
int id;
int ret;
- pdata = pdev->dev.platform_data;
- if (!pdata)
- return -EINVAL;
-
- id = pdev->id;
- if (id == -1)
- id = 0;
+ np = pdev->dev.of_node;
+ if (config_enabled(CONFIG_OF) && np) {
+ id = of_alias_get_id(np, "serial");
+ if (id < 0) {
+ dev_err(&pdev->dev, "unable to get alias id, err=%d\n",
+ id);
+ return id;
+ }
+ } else {
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
+ }
if (id > CONFIG_SERIAL_AR933X_NR_UARTS)
return -EINVAL;
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "no MEM resource\n");
- return -EINVAL;
- }
-
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
dev_err(&pdev->dev, "no IRQ resource\n");
return -EINVAL;
}
- up = kzalloc(sizeof(struct ar933x_uart_port), GFP_KERNEL);
+ up = devm_kzalloc(&pdev->dev, sizeof(struct ar933x_uart_port),
+ GFP_KERNEL);
if (!up)
return -ENOMEM;
+ up->clk = devm_clk_get(&pdev->dev, "uart");
+ if (IS_ERR(up->clk)) {
+ dev_err(&pdev->dev, "unable to get UART clock\n");
+ return PTR_ERR(up->clk);
+ }
+
port = &up->port;
- port->mapbase = mem_res->start;
- port->membase = ioremap(mem_res->start, AR933X_UART_REGS_SIZE);
- if (!port->membase) {
- ret = -ENOMEM;
- goto err_free_up;
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ port->membase = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+
+ ret = clk_prepare_enable(up->clk);
+ if (ret)
+ return ret;
+
+ port->uartclk = clk_get_rate(up->clk);
+ if (!port->uartclk) {
+ ret = -EINVAL;
+ goto err_disable_clk;
}
+ port->mapbase = mem_res->start;
port->line = id;
port->irq = irq_res->start;
port->dev = &pdev->dev;
port->type = PORT_AR933X;
port->iotype = UPIO_MEM32;
- port->uartclk = pdata->uartclk;
port->regshift = 2;
port->fifosize = AR933X_UART_FIFO_SIZE;
@@ -686,15 +702,13 @@ static int ar933x_uart_probe(struct platform_device *pdev)
ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
if (ret)
- goto err_unmap;
+ goto err_disable_clk;
platform_set_drvdata(pdev, up);
return 0;
-err_unmap:
- iounmap(up->port.membase);
-err_free_up:
- kfree(up);
+err_disable_clk:
+ clk_disable_unprepare(up->clk);
return ret;
}
@@ -703,23 +717,30 @@ static int ar933x_uart_remove(struct platform_device *pdev)
struct ar933x_uart_port *up;
up = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
if (up) {
uart_remove_one_port(&ar933x_uart_driver, &up->port);
- iounmap(up->port.membase);
- kfree(up);
+ clk_disable_unprepare(up->clk);
}
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id ar933x_uart_of_ids[] = {
+ { .compatible = "qca,ar9330-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ar933x_uart_of_ids);
+#endif
+
static struct platform_driver ar933x_uart_platform_driver = {
.probe = ar933x_uart_probe,
.remove = ar933x_uart_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(ar933x_uart_of_ids),
},
};
@@ -727,7 +748,9 @@ static int __init ar933x_uart_init(void)
{
int ret;
- ar933x_uart_driver.nr = CONFIG_SERIAL_AR933X_NR_UARTS;
+ if (ar933x_uart_console_enabled())
+ ar933x_uart_driver.cons = &ar933x_uart_console;
+
ret = uart_register_driver(&ar933x_uart_driver);
if (ret)
goto err_out;
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index cbf1d155b7b..569872f4c9b 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -209,9 +209,9 @@ static void arc_serial_start_tx(struct uart_port *port)
arc_serial_tx_chars(uart);
}
-static void arc_serial_rx_chars(struct arc_uart_port *uart)
+static void arc_serial_rx_chars(struct arc_uart_port *uart, unsigned int status)
{
- unsigned int status, ch, flg = 0;
+ unsigned int ch, flg = 0;
/*
* UART has 4 deep RX-FIFO. Driver's recongnition of this fact
@@ -222,11 +222,11 @@ static void arc_serial_rx_chars(struct arc_uart_port *uart)
* before RX-EMPTY=0, implies some sort of buffering going on in the
* controller, which is indeed the Rx-FIFO.
*/
- while (!((status = UART_GET_STATUS(uart)) & RXEMPTY)) {
-
- ch = UART_GET_DATA(uart);
- uart->port.icount.rx++;
-
+ do {
+ /*
+ * This could be an Rx Intr for err (no data),
+ * so check err and clear that Intr first
+ */
if (unlikely(status & (RXOERR | RXFERR))) {
if (status & RXOERR) {
uart->port.icount.overrun++;
@@ -242,14 +242,19 @@ static void arc_serial_rx_chars(struct arc_uart_port *uart)
} else
flg = TTY_NORMAL;
- if (unlikely(uart_handle_sysrq_char(&uart->port, ch)))
- goto done;
+ if (status & RXEMPTY)
+ continue;
- uart_insert_char(&uart->port, status, RXOERR, ch, flg);
+ ch = UART_GET_DATA(uart);
+ uart->port.icount.rx++;
+
+ if (!(uart_handle_sysrq_char(&uart->port, ch)))
+ uart_insert_char(&uart->port, status, RXOERR, ch, flg);
-done:
+ spin_unlock(&uart->port.lock);
tty_flip_buffer_push(&uart->port.state->port);
- }
+ spin_lock(&uart->port.lock);
+ } while (!((status = UART_GET_STATUS(uart)) & RXEMPTY));
}
/*
@@ -292,11 +297,11 @@ static irqreturn_t arc_serial_isr(int irq, void *dev_id)
* notifications from the UART Controller.
* To demultiplex between the two, we check the relevant bits
*/
- if ((status & RXIENB) && !(status & RXEMPTY)) {
+ if (status & RXIENB) {
/* already in ISR, no need of xx_irqsave */
spin_lock(&uart->port.lock);
- arc_serial_rx_chars(uart);
+ arc_serial_rx_chars(uart, status);
spin_unlock(&uart->port.lock);
}
@@ -528,7 +533,7 @@ arc_uart_init_one(struct platform_device *pdev, int dev_id)
unsigned long *plat_data;
struct arc_uart_port *uart = &arc_uart_ports[dev_id];
- plat_data = ((unsigned long *)(pdev->dev.platform_data));
+ plat_data = (unsigned long *)dev_get_platdata(&pdev->dev);
if (!plat_data)
return -ENODEV;
@@ -773,6 +778,6 @@ module_init(arc_serial_init);
module_exit(arc_serial_exit);
MODULE_LICENSE("GPL");
-MODULE_ALIAS("plat-arcfpga/uart");
+MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Vineet Gupta");
MODULE_DESCRIPTION("ARC(Synopsys) On-Chip(fpga) serial driver");
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 691265faebb..d067285a2d2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -39,8 +39,8 @@
#include <linux/atmel_pdc.h>
#include <linux/atmel_serial.h>
#include <linux/uaccess.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/platform_data/atmel.h>
+#include <linux/timer.h>
#include <asm/io.h>
#include <asm/ioctls.h>
@@ -98,6 +98,7 @@ static void atmel_stop_rx(struct uart_port *port);
#define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR)
#define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR)
#define UART_PUT_TTGR(port, v) __raw_writel(v, (port)->membase + ATMEL_US_TTGR)
+#define UART_GET_IP_NAME(port) __raw_readl((port)->membase + ATMEL_US_NAME)
/* PDC registers */
#define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
@@ -140,13 +141,25 @@ struct atmel_uart_port {
u32 backup_imr; /* IMR saved during suspend */
int break_active; /* break being received */
- short use_dma_rx; /* enable PDC receiver */
+ bool use_dma_rx; /* enable DMA receiver */
+ bool use_pdc_rx; /* enable PDC receiver */
short pdc_rx_idx; /* current PDC RX buffer */
struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */
- short use_dma_tx; /* enable PDC transmitter */
+ bool use_dma_tx; /* enable DMA transmitter */
+ bool use_pdc_tx; /* enable PDC transmitter */
struct atmel_dma_buffer pdc_tx; /* PDC transmitter */
+ spinlock_t lock_tx; /* port lock */
+ spinlock_t lock_rx; /* port lock */
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ dma_cookie_t cookie_tx;
+ dma_cookie_t cookie_rx;
+ struct scatterlist sg_tx;
+ struct scatterlist sg_rx;
struct tasklet_struct tasklet;
unsigned int irq_status;
unsigned int irq_status_prev;
@@ -155,6 +168,14 @@ struct atmel_uart_port {
struct serial_rs485 rs485; /* rs485 settings */
unsigned int tx_done_mask;
+ bool is_usart; /* usart or uart */
+ struct timer_list uart_timer; /* uart timer */
+ int (*prepare_rx)(struct uart_port *port);
+ int (*prepare_tx)(struct uart_port *port);
+ void (*schedule_rx)(struct uart_port *port);
+ void (*schedule_tx)(struct uart_port *port);
+ void (*release_rx)(struct uart_port *port);
+ void (*release_tx)(struct uart_port *port);
};
static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART];
@@ -181,31 +202,45 @@ to_atmel_uart_port(struct uart_port *uart)
}
#ifdef CONFIG_SERIAL_ATMEL_PDC
-static bool atmel_use_dma_rx(struct uart_port *port)
+static bool atmel_use_pdc_rx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- return atmel_port->use_dma_rx;
+ return atmel_port->use_pdc_rx;
}
-static bool atmel_use_dma_tx(struct uart_port *port)
+static bool atmel_use_pdc_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- return atmel_port->use_dma_tx;
+ return atmel_port->use_pdc_tx;
}
#else
-static bool atmel_use_dma_rx(struct uart_port *port)
+static bool atmel_use_pdc_rx(struct uart_port *port)
{
return false;
}
-static bool atmel_use_dma_tx(struct uart_port *port)
+static bool atmel_use_pdc_tx(struct uart_port *port)
{
return false;
}
#endif
+static bool atmel_use_dma_tx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ return atmel_port->use_dma_tx;
+}
+
+static bool atmel_use_dma_rx(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ return atmel_port->use_dma_rx;
+}
+
/* Enable or disable the rs485 support */
void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
@@ -233,7 +268,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
mode |= ATMEL_US_USMODE_RS485;
} else {
dev_dbg(port->dev, "Setting UART to RS232\n");
- if (atmel_use_dma_tx(port))
+ if (atmel_use_pdc_tx(port))
atmel_port->tx_done_mask = ATMEL_US_ENDTX |
ATMEL_US_TXBUFE;
else
@@ -345,7 +380,7 @@ static void atmel_stop_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_tx(port)) {
+ if (atmel_use_pdc_tx(port)) {
/* disable PDC transmit */
UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS);
}
@@ -364,7 +399,7 @@ static void atmel_start_tx(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_tx(port)) {
+ if (atmel_use_pdc_tx(port)) {
if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN)
/* The transmitter is already running. Yes, we
really need this.*/
@@ -390,7 +425,7 @@ static void atmel_start_rx(struct uart_port *port)
UART_PUT_CR(port, ATMEL_US_RXEN);
- if (atmel_use_dma_rx(port)) {
+ if (atmel_use_pdc_rx(port)) {
/* enable PDC controller */
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
port->read_status_mask);
@@ -407,7 +442,7 @@ static void atmel_stop_rx(struct uart_port *port)
{
UART_PUT_CR(port, ATMEL_US_RXDIS);
- if (atmel_use_dma_rx(port)) {
+ if (atmel_use_pdc_rx(port)) {
/* disable PDC receive */
UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
@@ -564,6 +599,372 @@ static void atmel_tx_chars(struct uart_port *port)
UART_PUT_IER(port, atmel_port->tx_done_mask);
}
+static void atmel_complete_tx_dma(void *arg)
+{
+ struct atmel_uart_port *atmel_port = arg;
+ struct uart_port *port = &atmel_port->uart;
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_chan *chan = atmel_port->chan_tx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ if (chan)
+ dmaengine_terminate_all(chan);
+ xmit->tail += sg_dma_len(&atmel_port->sg_tx);
+ xmit->tail &= UART_XMIT_SIZE - 1;
+
+ port->icount.tx += sg_dma_len(&atmel_port->sg_tx);
+
+ spin_lock_irq(&atmel_port->lock_tx);
+ async_tx_ack(atmel_port->desc_tx);
+ atmel_port->cookie_tx = -EINVAL;
+ atmel_port->desc_tx = NULL;
+ spin_unlock_irq(&atmel_port->lock_tx);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ /* Do we really need this? */
+ if (!uart_circ_empty(xmit))
+ tasklet_schedule(&atmel_port->tasklet);
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void atmel_release_tx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct dma_chan *chan = atmel_port->chan_tx;
+
+ if (chan) {
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ dma_unmap_sg(port->dev, &atmel_port->sg_tx, 1,
+ DMA_MEM_TO_DEV);
+ }
+
+ atmel_port->desc_tx = NULL;
+ atmel_port->chan_tx = NULL;
+ atmel_port->cookie_tx = -EINVAL;
+}
+
+/*
+ * Called from tasklet with TXRDY interrupt is disabled.
+ */
+static void atmel_tx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct circ_buf *xmit = &port->state->xmit;
+ struct dma_chan *chan = atmel_port->chan_tx;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sg = &atmel_port->sg_tx;
+
+ /* Make sure we have an idle channel */
+ if (atmel_port->desc_tx != NULL)
+ return;
+
+ if (!uart_circ_empty(xmit) && !uart_tx_stopped(port)) {
+ /*
+ * DMA is idle now.
+ * Port xmit buffer is already mapped,
+ * and it is one page... Just adjust
+ * offsets and lengths. Since it is a circular buffer,
+ * we have to transmit till the end, and then the rest.
+ * Take the port lock to get a
+ * consistent xmit buffer state.
+ */
+ sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
+ sg_dma_address(sg) = (sg_dma_address(sg) &
+ ~(UART_XMIT_SIZE - 1))
+ + sg->offset;
+ sg_dma_len(sg) = CIRC_CNT_TO_END(xmit->head,
+ xmit->tail,
+ UART_XMIT_SIZE);
+ BUG_ON(!sg_dma_len(sg));
+
+ desc = dmaengine_prep_slave_sg(chan,
+ sg,
+ 1,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(port->dev, "Failed to send via dma!\n");
+ return;
+ }
+
+ dma_sync_sg_for_device(port->dev, sg, 1, DMA_MEM_TO_DEV);
+
+ atmel_port->desc_tx = desc;
+ desc->callback = atmel_complete_tx_dma;
+ desc->callback_param = atmel_port;
+ atmel_port->cookie_tx = dmaengine_submit(desc);
+
+ } else {
+ if (atmel_port->rs485.flags & SER_RS485_ENABLED) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
+ }
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+}
+
+static int atmel_prepare_tx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ dma_cap_mask_t mask;
+ struct dma_slave_config config;
+ int ret, nent;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ atmel_port->chan_tx = dma_request_slave_channel(port->dev, "tx");
+ if (atmel_port->chan_tx == NULL)
+ goto chan_err;
+ dev_info(port->dev, "using %s for tx DMA transfers\n",
+ dma_chan_name(atmel_port->chan_tx));
+
+ spin_lock_init(&atmel_port->lock_tx);
+ sg_init_table(&atmel_port->sg_tx, 1);
+ /* UART circular tx buffer is an aligned page. */
+ BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ sg_set_page(&atmel_port->sg_tx,
+ virt_to_page(port->state->xmit.buf),
+ UART_XMIT_SIZE,
+ (int)port->state->xmit.buf & ~PAGE_MASK);
+ nent = dma_map_sg(port->dev,
+ &atmel_port->sg_tx,
+ 1,
+ DMA_MEM_TO_DEV);
+
+ if (!nent) {
+ dev_dbg(port->dev, "need to release resource of dma\n");
+ goto chan_err;
+ } else {
+ dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
+ sg_dma_len(&atmel_port->sg_tx),
+ port->state->xmit.buf,
+ sg_dma_address(&atmel_port->sg_tx));
+ }
+
+ /* Configure the slave DMA */
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.dst_addr = port->mapbase + ATMEL_US_THR;
+
+ ret = dmaengine_device_control(atmel_port->chan_tx,
+ DMA_SLAVE_CONFIG,
+ (unsigned long)&config);
+ if (ret) {
+ dev_err(port->dev, "DMA tx slave configuration failed\n");
+ goto chan_err;
+ }
+
+ return 0;
+
+chan_err:
+ dev_err(port->dev, "TX channel not available, switch to pio\n");
+ atmel_port->use_dma_tx = 0;
+ if (atmel_port->chan_tx)
+ atmel_release_tx_dma(port);
+ return -EINVAL;
+}
+
+static void atmel_flip_buffer_rx_dma(struct uart_port *port,
+ char *buf, size_t count)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct tty_port *tport = &port->state->port;
+
+ dma_sync_sg_for_cpu(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_DEV_TO_MEM);
+
+ tty_insert_flip_string(tport, buf, count);
+
+ dma_sync_sg_for_device(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_DEV_TO_MEM);
+ /*
+ * Drop the lock here since it might end up calling
+ * uart_start(), which takes the lock.
+ */
+ spin_unlock(&port->lock);
+ tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
+}
+
+static void atmel_complete_rx_dma(void *arg)
+{
+ struct uart_port *port = arg;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ tasklet_schedule(&atmel_port->tasklet);
+}
+
+static void atmel_release_rx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct dma_chan *chan = atmel_port->chan_rx;
+
+ if (chan) {
+ dmaengine_terminate_all(chan);
+ dma_release_channel(chan);
+ dma_unmap_sg(port->dev, &atmel_port->sg_rx, 1,
+ DMA_DEV_TO_MEM);
+ }
+
+ atmel_port->desc_rx = NULL;
+ atmel_port->chan_rx = NULL;
+ atmel_port->cookie_rx = -EINVAL;
+
+ if (!atmel_port->is_usart)
+ del_timer_sync(&atmel_port->uart_timer);
+}
+
+static void atmel_rx_from_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct circ_buf *ring = &atmel_port->rx_ring;
+ struct dma_chan *chan = atmel_port->chan_rx;
+ struct dma_tx_state state;
+ enum dma_status dmastat;
+ size_t pending, count;
+
+
+ /* Reset the UART timeout early so that we don't miss one */
+ UART_PUT_CR(port, ATMEL_US_STTTO);
+ dmastat = dmaengine_tx_status(chan,
+ atmel_port->cookie_rx,
+ &state);
+ /* Restart a new tasklet if DMA status is error */
+ if (dmastat == DMA_ERROR) {
+ dev_dbg(port->dev, "Get residue error, restart tasklet\n");
+ UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+ tasklet_schedule(&atmel_port->tasklet);
+ return;
+ }
+ /* current transfer size should no larger than dma buffer */
+ pending = sg_dma_len(&atmel_port->sg_rx) - state.residue;
+ BUG_ON(pending > sg_dma_len(&atmel_port->sg_rx));
+
+ /*
+ * This will take the chars we have so far,
+ * ring->head will record the transfer size, only new bytes come
+ * will insert into the framework.
+ */
+ if (pending > ring->head) {
+ count = pending - ring->head;
+
+ atmel_flip_buffer_rx_dma(port, ring->buf + ring->head, count);
+
+ ring->head += count;
+ if (ring->head == sg_dma_len(&atmel_port->sg_rx))
+ ring->head = 0;
+
+ port->icount.rx += count;
+ }
+
+ UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+}
+
+static int atmel_prepare_rx_dma(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct dma_async_tx_descriptor *desc;
+ dma_cap_mask_t mask;
+ struct dma_slave_config config;
+ struct circ_buf *ring;
+ int ret, nent;
+
+ ring = &atmel_port->rx_ring;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_CYCLIC, mask);
+
+ atmel_port->chan_rx = dma_request_slave_channel(port->dev, "rx");
+ if (atmel_port->chan_rx == NULL)
+ goto chan_err;
+ dev_info(port->dev, "using %s for rx DMA transfers\n",
+ dma_chan_name(atmel_port->chan_rx));
+
+ spin_lock_init(&atmel_port->lock_rx);
+ sg_init_table(&atmel_port->sg_rx, 1);
+ /* UART circular rx buffer is an aligned page. */
+ BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK);
+ sg_set_page(&atmel_port->sg_rx,
+ virt_to_page(ring->buf),
+ ATMEL_SERIAL_RINGSIZE,
+ (int)ring->buf & ~PAGE_MASK);
+ nent = dma_map_sg(port->dev,
+ &atmel_port->sg_rx,
+ 1,
+ DMA_DEV_TO_MEM);
+
+ if (!nent) {
+ dev_dbg(port->dev, "need to release resource of dma\n");
+ goto chan_err;
+ } else {
+ dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__,
+ sg_dma_len(&atmel_port->sg_rx),
+ ring->buf,
+ sg_dma_address(&atmel_port->sg_rx));
+ }
+
+ /* Configure the slave DMA */
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_DEV_TO_MEM;
+ config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ config.src_addr = port->mapbase + ATMEL_US_RHR;
+
+ ret = dmaengine_device_control(atmel_port->chan_rx,
+ DMA_SLAVE_CONFIG,
+ (unsigned long)&config);
+ if (ret) {
+ dev_err(port->dev, "DMA rx slave configuration failed\n");
+ goto chan_err;
+ }
+ /*
+ * Prepare a cyclic dma transfer, assign 2 descriptors,
+ * each one is half ring buffer size
+ */
+ desc = dmaengine_prep_dma_cyclic(atmel_port->chan_rx,
+ sg_dma_address(&atmel_port->sg_rx),
+ sg_dma_len(&atmel_port->sg_rx),
+ sg_dma_len(&atmel_port->sg_rx)/2,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ desc->callback = atmel_complete_rx_dma;
+ desc->callback_param = port;
+ atmel_port->desc_rx = desc;
+ atmel_port->cookie_rx = dmaengine_submit(desc);
+
+ return 0;
+
+chan_err:
+ dev_err(port->dev, "RX channel not available, switch to pio\n");
+ atmel_port->use_dma_rx = 0;
+ if (atmel_port->chan_rx)
+ atmel_release_rx_dma(port);
+ return -EINVAL;
+}
+
+static void atmel_uart_timer_callback(unsigned long data)
+{
+ struct uart_port *port = (void *)data;
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ tasklet_schedule(&atmel_port->tasklet);
+ mod_timer(&atmel_port->uart_timer, jiffies + uart_poll_timeout(port));
+}
+
/*
* receive interrupt handler.
*/
@@ -572,7 +973,7 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_rx(port)) {
+ if (atmel_use_pdc_rx(port)) {
/*
* PDC receive. Just schedule the tasklet and let it
* figure out the details.
@@ -591,6 +992,13 @@ atmel_handle_receive(struct uart_port *port, unsigned int pending)
atmel_pdc_rxerr(port, pending);
}
+ if (atmel_use_dma_rx(port)) {
+ if (pending & ATMEL_US_TIMEOUT) {
+ UART_PUT_IDR(port, ATMEL_US_TIMEOUT);
+ tasklet_schedule(&atmel_port->tasklet);
+ }
+ }
+
/* Interrupt receive */
if (pending & ATMEL_US_RXRDY)
atmel_rx_chars(port);
@@ -658,10 +1066,21 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id)
return pass_counter ? IRQ_HANDLED : IRQ_NONE;
}
+static void atmel_release_tx_pdc(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
+
+ dma_unmap_single(port->dev,
+ pdc->dma_addr,
+ pdc->dma_size,
+ DMA_TO_DEVICE);
+}
+
/*
* Called from tasklet with ENDTX and TXBUFE interrupts disabled.
*/
-static void atmel_tx_dma(struct uart_port *port)
+static void atmel_tx_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct circ_buf *xmit = &port->state->xmit;
@@ -710,6 +1129,23 @@ static void atmel_tx_dma(struct uart_port *port)
uart_write_wakeup(port);
}
+static int atmel_prepare_tx_pdc(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ pdc->buf = xmit->buf;
+ pdc->dma_addr = dma_map_single(port->dev,
+ pdc->buf,
+ UART_XMIT_SIZE,
+ DMA_TO_DEVICE);
+ pdc->dma_size = UART_XMIT_SIZE;
+ pdc->ofs = 0;
+
+ return 0;
+}
+
static void atmel_rx_from_ring(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
@@ -778,7 +1214,26 @@ static void atmel_rx_from_ring(struct uart_port *port)
spin_lock(&port->lock);
}
-static void atmel_rx_from_dma(struct uart_port *port)
+static void atmel_release_rx_pdc(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
+
+ dma_unmap_single(port->dev,
+ pdc->dma_addr,
+ pdc->dma_size,
+ DMA_FROM_DEVICE);
+ kfree(pdc->buf);
+ }
+
+ if (!atmel_port->is_usart)
+ del_timer_sync(&atmel_port->uart_timer);
+}
+
+static void atmel_rx_from_pdc(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_port *tport = &port->state->port;
@@ -855,6 +1310,45 @@ static void atmel_rx_from_dma(struct uart_port *port)
UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
}
+static int atmel_prepare_rx_pdc(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
+
+ pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
+ if (pdc->buf == NULL) {
+ if (i != 0) {
+ dma_unmap_single(port->dev,
+ atmel_port->pdc_rx[0].dma_addr,
+ PDC_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ kfree(atmel_port->pdc_rx[0].buf);
+ }
+ atmel_port->use_pdc_rx = 0;
+ return -ENOMEM;
+ }
+ pdc->dma_addr = dma_map_single(port->dev,
+ pdc->buf,
+ PDC_BUFFER_SIZE,
+ DMA_FROM_DEVICE);
+ pdc->dma_size = PDC_BUFFER_SIZE;
+ pdc->ofs = 0;
+ }
+
+ atmel_port->pdc_rx_idx = 0;
+
+ UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
+ UART_PUT_RCR(port, PDC_BUFFER_SIZE);
+
+ UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
+ UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
+
+ return 0;
+}
+
/*
* tasklet handling tty stuff outside the interrupt handler.
*/
@@ -868,10 +1362,7 @@ static void atmel_tasklet_func(unsigned long data)
/* The interrupt handler does not take the lock */
spin_lock(&port->lock);
- if (atmel_use_dma_tx(port))
- atmel_tx_dma(port);
- else
- atmel_tx_chars(port);
+ atmel_port->schedule_tx(port);
status = atmel_port->irq_status;
status_change = status ^ atmel_port->irq_status_prev;
@@ -893,19 +1384,152 @@ static void atmel_tasklet_func(unsigned long data)
atmel_port->irq_status_prev = status;
}
- if (atmel_use_dma_rx(port))
- atmel_rx_from_dma(port);
- else
- atmel_rx_from_ring(port);
+ atmel_port->schedule_rx(port);
spin_unlock(&port->lock);
}
+static int atmel_init_property(struct atmel_uart_port *atmel_port,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
+
+ if (np) {
+ /* DMA/PDC usage specification */
+ if (of_get_property(np, "atmel,use-dma-rx", NULL)) {
+ if (of_get_property(np, "dmas", NULL)) {
+ atmel_port->use_dma_rx = true;
+ atmel_port->use_pdc_rx = false;
+ } else {
+ atmel_port->use_dma_rx = false;
+ atmel_port->use_pdc_rx = true;
+ }
+ } else {
+ atmel_port->use_dma_rx = false;
+ atmel_port->use_pdc_rx = false;
+ }
+
+ if (of_get_property(np, "atmel,use-dma-tx", NULL)) {
+ if (of_get_property(np, "dmas", NULL)) {
+ atmel_port->use_dma_tx = true;
+ atmel_port->use_pdc_tx = false;
+ } else {
+ atmel_port->use_dma_tx = false;
+ atmel_port->use_pdc_tx = true;
+ }
+ } else {
+ atmel_port->use_dma_tx = false;
+ atmel_port->use_pdc_tx = false;
+ }
+
+ } else {
+ atmel_port->use_pdc_rx = pdata->use_dma_rx;
+ atmel_port->use_pdc_tx = pdata->use_dma_tx;
+ atmel_port->use_dma_rx = false;
+ atmel_port->use_dma_tx = false;
+ }
+
+ return 0;
+}
+
+static void atmel_init_rs485(struct atmel_uart_port *atmel_port,
+ struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
+
+ if (np) {
+ u32 rs485_delay[2];
+ /* rs485 properties */
+ if (of_property_read_u32_array(np, "rs485-rts-delay",
+ rs485_delay, 2) == 0) {
+ struct serial_rs485 *rs485conf = &atmel_port->rs485;
+
+ rs485conf->delay_rts_before_send = rs485_delay[0];
+ rs485conf->delay_rts_after_send = rs485_delay[1];
+ rs485conf->flags = 0;
+
+ if (of_get_property(np, "rs485-rx-during-tx", NULL))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ if (of_get_property(np, "linux,rs485-enabled-at-boot-time",
+ NULL))
+ rs485conf->flags |= SER_RS485_ENABLED;
+ }
+ } else {
+ atmel_port->rs485 = pdata->rs485;
+ }
+
+}
+
+static void atmel_set_ops(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+
+ if (atmel_use_dma_rx(port)) {
+ atmel_port->prepare_rx = &atmel_prepare_rx_dma;
+ atmel_port->schedule_rx = &atmel_rx_from_dma;
+ atmel_port->release_rx = &atmel_release_rx_dma;
+ } else if (atmel_use_pdc_rx(port)) {
+ atmel_port->prepare_rx = &atmel_prepare_rx_pdc;
+ atmel_port->schedule_rx = &atmel_rx_from_pdc;
+ atmel_port->release_rx = &atmel_release_rx_pdc;
+ } else {
+ atmel_port->prepare_rx = NULL;
+ atmel_port->schedule_rx = &atmel_rx_from_ring;
+ atmel_port->release_rx = NULL;
+ }
+
+ if (atmel_use_dma_tx(port)) {
+ atmel_port->prepare_tx = &atmel_prepare_tx_dma;
+ atmel_port->schedule_tx = &atmel_tx_dma;
+ atmel_port->release_tx = &atmel_release_tx_dma;
+ } else if (atmel_use_pdc_tx(port)) {
+ atmel_port->prepare_tx = &atmel_prepare_tx_pdc;
+ atmel_port->schedule_tx = &atmel_tx_pdc;
+ atmel_port->release_tx = &atmel_release_tx_pdc;
+ } else {
+ atmel_port->prepare_tx = NULL;
+ atmel_port->schedule_tx = &atmel_tx_chars;
+ atmel_port->release_tx = NULL;
+ }
+}
+
+/*
+ * Get ip name usart or uart
+ */
+static int atmel_get_ip_name(struct uart_port *port)
+{
+ struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
+ int name = UART_GET_IP_NAME(port);
+ int usart, uart;
+ /* usart and uart ascii */
+ usart = 0x55534152;
+ uart = 0x44424755;
+
+ atmel_port->is_usart = false;
+
+ if (name == usart) {
+ dev_dbg(port->dev, "This is usart\n");
+ atmel_port->is_usart = true;
+ } else if (name == uart) {
+ dev_dbg(port->dev, "This is uart\n");
+ atmel_port->is_usart = false;
+ } else {
+ dev_err(port->dev, "Not supported ip name, set to uart\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Perform initialization and enable port for reception
*/
static int atmel_startup(struct uart_port *port)
{
+ struct platform_device *pdev = to_platform_device(port->dev);
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
struct tty_struct *tty = port->state->port.tty;
int retval;
@@ -930,53 +1554,19 @@ static int atmel_startup(struct uart_port *port)
/*
* Initialize DMA (if necessary)
*/
- if (atmel_use_dma_rx(port)) {
- int i;
-
- for (i = 0; i < 2; i++) {
- struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
-
- pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL);
- if (pdc->buf == NULL) {
- if (i != 0) {
- dma_unmap_single(port->dev,
- atmel_port->pdc_rx[0].dma_addr,
- PDC_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- kfree(atmel_port->pdc_rx[0].buf);
- }
- free_irq(port->irq, port);
- return -ENOMEM;
- }
- pdc->dma_addr = dma_map_single(port->dev,
- pdc->buf,
- PDC_BUFFER_SIZE,
- DMA_FROM_DEVICE);
- pdc->dma_size = PDC_BUFFER_SIZE;
- pdc->ofs = 0;
- }
+ atmel_init_property(atmel_port, pdev);
- atmel_port->pdc_rx_idx = 0;
-
- UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr);
- UART_PUT_RCR(port, PDC_BUFFER_SIZE);
-
- UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr);
- UART_PUT_RNCR(port, PDC_BUFFER_SIZE);
+ if (atmel_port->prepare_rx) {
+ retval = atmel_port->prepare_rx(port);
+ if (retval < 0)
+ atmel_set_ops(port);
}
- if (atmel_use_dma_tx(port)) {
- struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
- struct circ_buf *xmit = &port->state->xmit;
- pdc->buf = xmit->buf;
- pdc->dma_addr = dma_map_single(port->dev,
- pdc->buf,
- UART_XMIT_SIZE,
- DMA_TO_DEVICE);
- pdc->dma_size = UART_XMIT_SIZE;
- pdc->ofs = 0;
+ if (atmel_port->prepare_tx) {
+ retval = atmel_port->prepare_tx(port);
+ if (retval < 0)
+ atmel_set_ops(port);
}
-
/*
* If there is a specific "open" function (to register
* control line interrupts)
@@ -1000,14 +1590,38 @@ static int atmel_startup(struct uart_port *port)
/* enable xmit & rcvr */
UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN);
- if (atmel_use_dma_rx(port)) {
+ if (atmel_use_pdc_rx(port)) {
/* set UART timeout */
- UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
- UART_PUT_CR(port, ATMEL_US_STTTO);
-
- UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ if (!atmel_port->is_usart) {
+ setup_timer(&atmel_port->uart_timer,
+ atmel_uart_timer_callback,
+ (unsigned long)port);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ /* set USART timeout */
+ } else {
+ UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+ UART_PUT_CR(port, ATMEL_US_STTTO);
+
+ UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT);
+ }
/* enable PDC controller */
UART_PUT_PTCR(port, ATMEL_PDC_RXTEN);
+ } else if (atmel_use_dma_rx(port)) {
+ /* set UART timeout */
+ if (!atmel_port->is_usart) {
+ setup_timer(&atmel_port->uart_timer,
+ atmel_uart_timer_callback,
+ (unsigned long)port);
+ mod_timer(&atmel_port->uart_timer,
+ jiffies + uart_poll_timeout(port));
+ /* set USART timeout */
+ } else {
+ UART_PUT_RTOR(port, PDC_RX_TIMEOUT);
+ UART_PUT_CR(port, ATMEL_US_STTTO);
+
+ UART_PUT_IER(port, ATMEL_US_TIMEOUT);
+ }
} else {
/* enable receive only */
UART_PUT_IER(port, ATMEL_US_RXRDY);
@@ -1031,27 +1645,10 @@ static void atmel_shutdown(struct uart_port *port)
/*
* Shut-down the DMA.
*/
- if (atmel_use_dma_rx(port)) {
- int i;
-
- for (i = 0; i < 2; i++) {
- struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i];
-
- dma_unmap_single(port->dev,
- pdc->dma_addr,
- pdc->dma_size,
- DMA_FROM_DEVICE);
- kfree(pdc->buf);
- }
- }
- if (atmel_use_dma_tx(port)) {
- struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx;
-
- dma_unmap_single(port->dev,
- pdc->dma_addr,
- pdc->dma_size,
- DMA_TO_DEVICE);
- }
+ if (atmel_port->release_rx)
+ atmel_port->release_rx(port);
+ if (atmel_port->release_tx)
+ atmel_port->release_tx(port);
/*
* Disable all interrupts, port and break condition.
@@ -1080,7 +1677,7 @@ static void atmel_flush_buffer(struct uart_port *port)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
- if (atmel_use_dma_tx(port)) {
+ if (atmel_use_pdc_tx(port)) {
UART_PUT_TCR(port, 0);
atmel_port->pdc_tx.ofs = 0;
}
@@ -1193,7 +1790,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
if (termios->c_iflag & (BRKINT | PARMRK))
port->read_status_mask |= ATMEL_US_RXBRK;
- if (atmel_use_dma_rx(port))
+ if (atmel_use_pdc_rx(port))
/* need to enable error interrupts */
UART_PUT_IER(port, port->read_status_mask);
@@ -1423,38 +2020,6 @@ static struct uart_ops atmel_pops = {
#endif
};
-static void atmel_of_init_port(struct atmel_uart_port *atmel_port,
- struct device_node *np)
-{
- u32 rs485_delay[2];
-
- /* DMA/PDC usage specification */
- if (of_get_property(np, "atmel,use-dma-rx", NULL))
- atmel_port->use_dma_rx = 1;
- else
- atmel_port->use_dma_rx = 0;
- if (of_get_property(np, "atmel,use-dma-tx", NULL))
- atmel_port->use_dma_tx = 1;
- else
- atmel_port->use_dma_tx = 0;
-
- /* rs485 properties */
- if (of_property_read_u32_array(np, "rs485-rts-delay",
- rs485_delay, 2) == 0) {
- struct serial_rs485 *rs485conf = &atmel_port->rs485;
-
- rs485conf->delay_rts_before_send = rs485_delay[0];
- rs485conf->delay_rts_after_send = rs485_delay[1];
- rs485conf->flags = 0;
-
- if (of_get_property(np, "rs485-rx-during-tx", NULL))
- rs485conf->flags |= SER_RS485_RX_DURING_TX;
-
- if (of_get_property(np, "linux,rs485-enabled-at-boot-time", NULL))
- rs485conf->flags |= SER_RS485_ENABLED;
- }
-}
-
/*
* Configure the port from the platform device resource info.
*/
@@ -1463,15 +2028,12 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
{
int ret;
struct uart_port *port = &atmel_port->uart;
- struct atmel_uart_data *pdata = pdev->dev.platform_data;
+ struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
- if (pdev->dev.of_node) {
- atmel_of_init_port(atmel_port, pdev->dev.of_node);
- } else {
- atmel_port->use_dma_rx = pdata->use_dma_rx;
- atmel_port->use_dma_tx = pdata->use_dma_tx;
- atmel_port->rs485 = pdata->rs485;
- }
+ if (!atmel_init_property(atmel_port, pdev))
+ atmel_set_ops(port);
+
+ atmel_init_rs485(atmel_port, pdev);
port->iotype = UPIO_MEM;
port->flags = UPF_BOOT_AUTOCONF;
@@ -1516,7 +2078,7 @@ static int atmel_init_port(struct atmel_uart_port *atmel_port,
/* Use TXEMPTY for interrupt when rs485 else TXRDY or ENDTX|TXBUFE */
if (atmel_port->rs485.flags & SER_RS485_ENABLED)
atmel_port->tx_done_mask = ATMEL_US_TXEMPTY;
- else if (atmel_use_dma_tx(port)) {
+ else if (atmel_use_pdc_tx(port)) {
port->fifosize = PDC_BUFFER_SIZE;
atmel_port->tx_done_mask = ATMEL_US_ENDTX | ATMEL_US_TXBUFE;
} else {
@@ -1664,7 +2226,7 @@ static int __init atmel_console_init(void)
int ret;
if (atmel_default_console_device) {
struct atmel_uart_data *pdata =
- atmel_default_console_device->dev.platform_data;
+ dev_get_platdata(&atmel_default_console_device->dev);
int id = pdata->num;
struct atmel_uart_port *port = &atmel_ports[id];
@@ -1772,10 +2334,9 @@ static int atmel_serial_probe(struct platform_device *pdev)
{
struct atmel_uart_port *port;
struct device_node *np = pdev->dev.of_node;
- struct atmel_uart_data *pdata = pdev->dev.platform_data;
+ struct atmel_uart_data *pdata = dev_get_platdata(&pdev->dev);
void *data;
int ret = -ENODEV;
- struct pinctrl *pinctrl;
BUILD_BUG_ON(ATMEL_SERIAL_RINGSIZE & (ATMEL_SERIAL_RINGSIZE - 1));
@@ -1809,13 +2370,7 @@ static int atmel_serial_probe(struct platform_device *pdev)
if (ret)
goto err;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto err;
- }
-
- if (!atmel_use_dma_rx(&port->uart)) {
+ if (!atmel_use_pdc_rx(&port->uart)) {
ret = -ENOMEM;
data = kmalloc(sizeof(struct atmel_uart_char)
* ATMEL_SERIAL_RINGSIZE, GFP_KERNEL);
@@ -1847,6 +2402,13 @@ static int atmel_serial_probe(struct platform_device *pdev)
UART_PUT_CR(&port->uart, ATMEL_US_RTSEN);
}
+ /*
+ * Get port name of usart or uart
+ */
+ ret = atmel_get_ip_name(&port->uart);
+ if (ret < 0)
+ goto err_add_port;
+
return 0;
err_add_port:
@@ -1868,7 +2430,6 @@ static int atmel_serial_remove(struct platform_device *pdev)
int ret = 0;
device_init_wakeup(&pdev->dev, 0);
- platform_set_drvdata(pdev, NULL);
ret = uart_remove_one_port(&atmel_uart, port);
diff --git a/drivers/tty/serial/bcm63xx_uart.c b/drivers/tty/serial/bcm63xx_uart.c
index 6fa2ae77fff..649d5129c4b 100644
--- a/drivers/tty/serial/bcm63xx_uart.c
+++ b/drivers/tty/serial/bcm63xx_uart.c
@@ -302,7 +302,9 @@ static void bcm_uart_do_rx(struct uart_port *port)
} while (--max_count);
+ spin_unlock(&port->lock);
tty_flip_buffer_push(tty_port);
+ spin_lock(&port->lock);
}
/*
@@ -852,7 +854,6 @@ static int bcm_uart_remove(struct platform_device *pdev)
port = platform_get_drvdata(pdev);
uart_remove_one_port(&bcm_uart_driver, port);
- platform_set_drvdata(pdev, NULL);
/* mark port as free */
ports[pdev->id].membase = 0;
return 0;
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index 487c173b0f7..87636cc61a2 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -161,11 +161,12 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
if (!uart_handle_sysrq_char(&up->port, ch))
tty_insert_flip_char(port, ch, TTY_NORMAL);
}
- /* XXX this won't deadlock with lowlat? */
- tty_flip_buffer_push(port);
spin_unlock(&up->port.lock);
+ /* XXX this won't deadlock with lowlat? */
+ tty_flip_buffer_push(port);
+
return IRQ_HANDLED;
}
@@ -766,7 +767,8 @@ static int sport_uart_probe(struct platform_device *pdev)
}
ret = peripheral_request_list(
- (unsigned short *)pdev->dev.platform_data, DRV_NAME);
+ (unsigned short *)dev_get_platdata(&pdev->dev),
+ DRV_NAME);
if (ret) {
dev_err(&pdev->dev,
"Fail to request SPORT peripherals\n");
@@ -843,7 +845,7 @@ out_error_unmap:
iounmap(sport->port.membase);
out_error_free_peripherals:
peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
+ (unsigned short *)dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
@@ -863,7 +865,7 @@ static int sport_uart_remove(struct platform_device *pdev)
uart_remove_one_port(&sport_uart_reg, &sport->port);
iounmap(sport->port.membase);
peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
+ (unsigned short *)dev_get_platdata(&pdev->dev));
kfree(sport);
bfin_sport_uart_ports[pdev->id] = NULL;
}
@@ -883,7 +885,7 @@ static struct platform_driver sport_uart_driver = {
};
#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
-static __initdata struct early_platform_driver early_sport_uart_driver = {
+static struct early_platform_driver early_sport_uart_driver __initdata = {
.class_str = CLASS_BFIN_SPORT_CONSOLE,
.pdrv = &sport_uart_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 26a3be7ced7..3c75e8e0402 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -41,10 +41,6 @@
# undef CONFIG_EARLY_PRINTK
#endif
-#ifdef CONFIG_SERIAL_BFIN_MODULE
-# undef CONFIG_EARLY_PRINTK
-#endif
-
/* UART name and device definitions */
#define BFIN_SERIAL_DEV_NAME "ttyBF"
#define BFIN_SERIAL_MAJOR 204
@@ -1180,7 +1176,7 @@ bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int c
* don't let the common infrastructure play with things. (see calls to setup
* & earlysetup in ./kernel/printk.c:register_console()
*/
-static struct __initdata console bfin_early_serial_console = {
+static struct console bfin_early_serial_console __initdata = {
.name = "early_BFuart",
.write = bfin_earlyprintk_console_write,
.device = uart_console_device,
@@ -1244,7 +1240,8 @@ static int bfin_serial_probe(struct platform_device *pdev)
*/
#endif
ret = peripheral_request_list(
- (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ (unsigned short *)dev_get_platdata(&pdev->dev),
+ DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
@@ -1362,7 +1359,7 @@ out_error_unmap:
iounmap(uart->port.membase);
out_error_free_peripherals:
peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
+ (unsigned short *)dev_get_platdata(&pdev->dev));
out_error_free_mem:
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
@@ -1381,7 +1378,7 @@ static int bfin_serial_remove(struct platform_device *pdev)
uart_remove_one_port(&bfin_serial_reg, &uart->port);
iounmap(uart->port.membase);
peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
+ (unsigned short *)dev_get_platdata(&pdev->dev));
kfree(uart);
bfin_serial_ports[pdev->id] = NULL;
}
@@ -1401,7 +1398,7 @@ static struct platform_driver bfin_serial_driver = {
};
#if defined(CONFIG_SERIAL_BFIN_CONSOLE)
-static __initdata struct early_platform_driver early_bfin_serial_driver = {
+static struct early_platform_driver early_bfin_serial_driver __initdata = {
.class_str = CLASS_BFIN_CONSOLE,
.pdrv = &bfin_serial_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
@@ -1436,7 +1433,7 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
}
ret = peripheral_request_list(
- (unsigned short *)pdev->dev.platform_data, DRIVER_NAME);
+ (unsigned short *)dev_get_platdata(&pdev->dev), DRIVER_NAME);
if (ret) {
dev_err(&pdev->dev,
"fail to request bfin serial peripherals\n");
@@ -1467,7 +1464,7 @@ static int bfin_earlyprintk_probe(struct platform_device *pdev)
out_error_free_peripherals:
peripheral_free_list(
- (unsigned short *)pdev->dev.platform_data);
+ (unsigned short *)dev_get_platdata(&pdev->dev));
return ret;
}
@@ -1480,7 +1477,7 @@ static struct platform_driver bfin_earlyprintk_driver = {
},
};
-static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = {
+static struct early_platform_driver early_bfin_earlyprintk_driver __initdata = {
.class_str = CLASS_BFIN_EARLYPRINTK,
.pdrv = &bfin_earlyprintk_driver,
.requested_id = EARLY_PLATFORM_ID_UNSET,
diff --git a/drivers/tty/serial/clps711x.c b/drivers/tty/serial/clps711x.c
index bfb17968c8d..7e4e4088471 100644
--- a/drivers/tty/serial/clps711x.c
+++ b/drivers/tty/serial/clps711x.c
@@ -438,8 +438,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
s->uart_clk = devm_clk_get(&pdev->dev, "uart");
if (IS_ERR(s->uart_clk)) {
dev_err(&pdev->dev, "Can't get UART clocks\n");
- ret = PTR_ERR(s->uart_clk);
- goto err_out;
+ return PTR_ERR(s->uart_clk);
}
s->uart.owner = THIS_MODULE;
@@ -461,7 +460,7 @@ static int uart_clps711x_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "Registering UART driver failed\n");
devm_clk_put(&pdev->dev, s->uart_clk);
- goto err_out;
+ return ret;
}
for (i = 0; i < UART_CLPS711X_NR; i++) {
@@ -478,11 +477,6 @@ static int uart_clps711x_probe(struct platform_device *pdev)
}
return 0;
-
-err_out:
- platform_set_drvdata(pdev, NULL);
-
- return ret;
}
static int uart_clps711x_remove(struct platform_device *pdev)
@@ -495,7 +489,6 @@ static int uart_clps711x_remove(struct platform_device *pdev)
devm_clk_put(&pdev->dev, s->uart_clk);
uart_unregister_driver(&s->uart);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index f7672cae532..1a535f70dc4 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1213,8 +1213,32 @@ static int cpm_uart_init_port(struct device_node *np,
goto out_pram;
}
- for (i = 0; i < NUM_GPIOS; i++)
- pinfo->gpios[i] = of_get_gpio(np, i);
+ for (i = 0; i < NUM_GPIOS; i++) {
+ int gpio;
+
+ pinfo->gpios[i] = -1;
+
+ gpio = of_get_gpio(np, i);
+
+ if (gpio_is_valid(gpio)) {
+ ret = gpio_request(gpio, "cpm_uart");
+ if (ret) {
+ pr_err("can't request gpio #%d: %d\n", i, ret);
+ continue;
+ }
+ if (i == GPIO_RTS || i == GPIO_DTR)
+ ret = gpio_direction_output(gpio, 0);
+ else
+ ret = gpio_direction_input(gpio);
+ if (ret) {
+ pr_err("can't set direction for gpio #%d: %d\n",
+ i, ret);
+ gpio_free(gpio);
+ continue;
+ }
+ pinfo->gpios[i] = gpio;
+ }
+ }
#ifdef CONFIG_PPC_EARLY_DEBUG_CPM
udbg_putc = NULL;
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 7d199c8e1a7..0eb5b5673ed 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -268,10 +268,10 @@ static irqreturn_t efm32_uart_rxirq(int irq, void *data)
handled = IRQ_HANDLED;
}
- tty_flip_buffer_push(tport);
-
spin_unlock(&port->lock);
+ tty_flip_buffer_push(tport);
+
return handled;
}
@@ -698,6 +698,7 @@ static int efm32_uart_probe(struct platform_device *pdev)
{
struct efm32_uart_port *efm_port;
struct resource *res;
+ unsigned int line;
int ret;
efm_port = kzalloc(sizeof(*efm_port), GFP_KERNEL);
@@ -750,18 +751,21 @@ static int efm32_uart_probe(struct platform_device *pdev)
if (pdata)
efm_port->pdata = *pdata;
- }
+ } else if (ret < 0)
+ goto err_probe_dt;
+
+ line = efm_port->port.line;
- if (efm_port->port.line >= 0 &&
- efm_port->port.line < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[efm_port->port.line] = efm_port;
+ if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
+ efm32_uart_ports[line] = efm_port;
ret = uart_add_one_port(&efm32_uart_reg, &efm_port->port);
if (ret) {
dev_dbg(&pdev->dev, "failed to add port: %d\n", ret);
- if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[pdev->id] = NULL;
+ if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
+ efm32_uart_ports[line] = NULL;
+err_probe_dt:
err_get_rxirq:
err_too_small:
err_get_base:
@@ -777,20 +781,19 @@ err_get_base:
static int efm32_uart_remove(struct platform_device *pdev)
{
struct efm32_uart_port *efm_port = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
+ unsigned int line = efm_port->port.line;
uart_remove_one_port(&efm32_uart_reg, &efm_port->port);
- if (pdev->id >= 0 && pdev->id < ARRAY_SIZE(efm32_uart_ports))
- efm32_uart_ports[pdev->id] = NULL;
+ if (line >= 0 && line < ARRAY_SIZE(efm32_uart_ports))
+ efm32_uart_ports[line] = NULL;
kfree(efm_port);
return 0;
}
-static struct of_device_id efm32_uart_dt_ids[] = {
+static const struct of_device_id efm32_uart_dt_ids[] = {
{
.compatible = "efm32,uart",
}, {
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 263cfaabe9e..8978dc9a58b 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -342,8 +342,10 @@ static void lpuart_break_ctl(struct uart_port *port, int break_state)
static void lpuart_setup_watermark(struct lpuart_port *sport)
{
unsigned char val, cr2;
+ unsigned char cr2_saved;
cr2 = readb(sport->port.membase + UARTCR2);
+ cr2_saved = cr2;
cr2 &= ~(UARTCR2_TIE | UARTCR2_TCIE | UARTCR2_TE |
UARTCR2_RIE | UARTCR2_RE);
writeb(cr2, sport->port.membase + UARTCR2);
@@ -366,6 +368,9 @@ static void lpuart_setup_watermark(struct lpuart_port *sport)
writeb(2, sport->port.membase + UARTTWFIFO);
writeb(1, sport->port.membase + UARTRWFIFO);
+
+ /* Restore cr2 */
+ writeb(cr2_saved, sport->port.membase + UARTCR2);
}
static int lpuart_startup(struct uart_port *port)
@@ -858,7 +863,7 @@ static int __init lpuart_serial_init(void)
if (ret)
uart_unregister_driver(&lpuart_reg);
- return 0;
+ return ret;
}
static void __exit lpuart_serial_exit(void)
diff --git a/drivers/tty/serial/icom.c b/drivers/tty/serial/icom.c
index 18ed5aebb16..d98e4334897 100644
--- a/drivers/tty/serial/icom.c
+++ b/drivers/tty/serial/icom.c
@@ -105,7 +105,7 @@ static const struct pci_device_id icom_pci_table[] = {
{}
};
-struct lookup_proc_table start_proc[4] = {
+static struct lookup_proc_table start_proc[4] = {
{NULL, ICOM_CONTROL_START_A},
{NULL, ICOM_CONTROL_START_B},
{NULL, ICOM_CONTROL_START_C},
@@ -113,14 +113,14 @@ struct lookup_proc_table start_proc[4] = {
};
-struct lookup_proc_table stop_proc[4] = {
+static struct lookup_proc_table stop_proc[4] = {
{NULL, ICOM_CONTROL_STOP_A},
{NULL, ICOM_CONTROL_STOP_B},
{NULL, ICOM_CONTROL_STOP_C},
{NULL, ICOM_CONTROL_STOP_D}
};
-struct lookup_int_table int_mask_tbl[4] = {
+static struct lookup_int_table int_mask_tbl[4] = {
{NULL, ICOM_INT_MASK_PRC_A},
{NULL, ICOM_INT_MASK_PRC_B},
{NULL, ICOM_INT_MASK_PRC_C},
@@ -297,25 +297,25 @@ static void stop_processor(struct icom_port *icom_port)
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
+ if (port >= ARRAY_SIZE(stop_proc)) {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ goto unlock;
+ }
+
if (port == 0 || port == 1)
stop_proc[port].global_control_reg = &icom_port->global_reg->control;
else
stop_proc[port].global_control_reg = &icom_port->global_reg->control_2;
+ temp = readl(stop_proc[port].global_control_reg);
+ temp = (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
+ writel(temp, stop_proc[port].global_control_reg);
- if (port < 4) {
- temp = readl(stop_proc[port].global_control_reg);
- temp =
- (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
- writel(temp, stop_proc[port].global_control_reg);
-
- /* write flush */
- readl(stop_proc[port].global_control_reg);
- } else {
- dev_err(&icom_port->adapter->pci_dev->dev,
- "Invalid port assignment\n");
- }
+ /* write flush */
+ readl(stop_proc[port].global_control_reg);
+unlock:
spin_unlock_irqrestore(&icom_lock, flags);
}
@@ -328,23 +328,25 @@ static void start_processor(struct icom_port *icom_port)
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
+ if (port >= ARRAY_SIZE(start_proc)) {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ goto unlock;
+ }
+
if (port == 0 || port == 1)
start_proc[port].global_control_reg = &icom_port->global_reg->control;
else
start_proc[port].global_control_reg = &icom_port->global_reg->control_2;
- if (port < 4) {
- temp = readl(start_proc[port].global_control_reg);
- temp =
- (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
- writel(temp, start_proc[port].global_control_reg);
- /* write flush */
- readl(start_proc[port].global_control_reg);
- } else {
- dev_err(&icom_port->adapter->pci_dev->dev,
- "Invalid port assignment\n");
- }
+ temp = readl(start_proc[port].global_control_reg);
+ temp = (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
+ writel(temp, start_proc[port].global_control_reg);
+
+ /* write flush */
+ readl(start_proc[port].global_control_reg);
+unlock:
spin_unlock_irqrestore(&icom_lock, flags);
}
@@ -557,6 +559,12 @@ static int startup(struct icom_port *icom_port)
*/
spin_lock_irqsave(&icom_lock, flags);
port = icom_port->port;
+ if (port >= ARRAY_SIZE(int_mask_tbl)) {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ goto unlock;
+ }
+
if (port == 0 || port == 1)
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
else
@@ -566,17 +574,14 @@ static int startup(struct icom_port *icom_port)
writew(0x00FF, icom_port->int_reg);
else
writew(0x3F00, icom_port->int_reg);
- if (port < 4) {
- temp = readl(int_mask_tbl[port].global_int_mask);
- writel(temp & ~int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
- /* write flush */
- readl(int_mask_tbl[port].global_int_mask);
- } else {
- dev_err(&icom_port->adapter->pci_dev->dev,
- "Invalid port assignment\n");
- }
+ temp = readl(int_mask_tbl[port].global_int_mask);
+ writel(temp & ~int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
+ /* write flush */
+ readl(int_mask_tbl[port].global_int_mask);
+
+unlock:
spin_unlock_irqrestore(&icom_lock, flags);
return 0;
}
@@ -595,21 +600,23 @@ static void shutdown(struct icom_port *icom_port)
* disable all interrupts
*/
port = icom_port->port;
+ if (port >= ARRAY_SIZE(int_mask_tbl)) {
+ dev_err(&icom_port->adapter->pci_dev->dev,
+ "Invalid port assignment\n");
+ goto unlock;
+ }
if (port == 0 || port == 1)
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask;
else
int_mask_tbl[port].global_int_mask = &icom_port->global_reg->int_mask_2;
- if (port < 4) {
- temp = readl(int_mask_tbl[port].global_int_mask);
- writel(temp | int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
+ temp = readl(int_mask_tbl[port].global_int_mask);
+ writel(temp | int_mask_tbl[port].processor_id, int_mask_tbl[port].global_int_mask);
- /* write flush */
- readl(int_mask_tbl[port].global_int_mask);
- } else {
- dev_err(&icom_port->adapter->pci_dev->dev,
- "Invalid port assignment\n");
- }
+ /* write flush */
+ readl(int_mask_tbl[port].global_int_mask);
+
+unlock:
spin_unlock_irqrestore(&icom_lock, flags);
/*
@@ -834,7 +841,10 @@ ignore_char:
status = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].flags);
}
icom_port->next_rcv = rcv_buff;
+
+ spin_unlock(&icom_port->uart_port.lock);
tty_flip_buffer_push(port);
+ spin_lock(&icom_port->uart_port.lock);
}
static void process_interrupt(u16 port_int_reg,
@@ -1087,8 +1097,7 @@ static void icom_close(struct uart_port *port)
/* stop receiver */
cmdReg = readb(&ICOM_PORT->dram->CmdReg);
- writeb(cmdReg & (unsigned char) ~CMD_RCV_ENABLE,
- &ICOM_PORT->dram->CmdReg);
+ writeb(cmdReg & ~CMD_RCV_ENABLE, &ICOM_PORT->dram->CmdReg);
shutdown(ICOM_PORT);
@@ -1567,7 +1576,7 @@ static int icom_probe(struct pci_dev *dev,
icom_port->uart_port.type = PORT_ICOM;
icom_port->uart_port.iotype = UPIO_MEM;
icom_port->uart_port.membase =
- (char *) icom_adapter->base_addr_pci;
+ (unsigned char __iomem *)icom_adapter->base_addr_pci;
icom_port->uart_port.fifosize = 16;
icom_port->uart_port.ops = &icom_ops;
icom_port->uart_port.line =
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 8b1534c424a..af286e6713e 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1008,7 +1008,7 @@ static int ifx_spi_spi_probe(struct spi_device *spi)
return -ENODEV;
}
- pl_data = (struct ifx_modem_platform_data *)spi->dev.platform_data;
+ pl_data = (struct ifx_modem_platform_data *)dev_get_platdata(&spi->dev);
if (!pl_data) {
dev_err(&spi->dev, "missing platform data!");
return -ENODEV;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 415cec62073..a0ebbc9ce5c 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -47,11 +47,12 @@
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/io.h>
+#include <linux/dma-mapping.h>
#include <asm/irq.h>
#include <linux/platform_data/serial-imx.h>
+#include <linux/platform_data/dma-imx.h>
/* Register definitions */
#define URXD0 0x0 /* Receiver Register */
@@ -83,6 +84,7 @@
#define UCR1_ADBR (1<<14) /* Auto detect baud rate */
#define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */
#define UCR1_IDEN (1<<12) /* Idle condition interrupt */
+#define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */
#define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */
#define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */
#define UCR1_IREN (1<<7) /* Infrared interface enable */
@@ -91,6 +93,7 @@
#define UCR1_SNDBRK (1<<4) /* Send break */
#define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */
#define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */
+#define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */
#define UCR1_DOZE (1<<1) /* Doze */
#define UCR1_UARTEN (1<<0) /* UART enabled */
#define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */
@@ -126,6 +129,7 @@
#define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */
#define UCR4_WKEN (1<<7) /* Wake interrupt enable */
#define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */
+#define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */
#define UCR4_IRSC (1<<5) /* IR special case */
#define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */
#define UCR4_BKEN (1<<2) /* Break condition interrupt enable */
@@ -187,6 +191,7 @@
enum imx_uart_type {
IMX1_UART,
IMX21_UART,
+ IMX6Q_UART,
};
/* device type dependent stuff */
@@ -209,6 +214,19 @@ struct imx_port {
struct clk *clk_ipg;
struct clk *clk_per;
const struct imx_uart_data *devdata;
+
+ /* DMA fields */
+ unsigned int dma_is_inited:1;
+ unsigned int dma_is_enabled:1;
+ unsigned int dma_is_rxing:1;
+ unsigned int dma_is_txing:1;
+ struct dma_chan *dma_chan_rx, *dma_chan_tx;
+ struct scatterlist rx_sgl, tx_sgl[2];
+ void *rx_buf;
+ unsigned int rx_bytes, tx_bytes;
+ struct work_struct tsk_dma_rx, tsk_dma_tx;
+ unsigned int dma_tx_nents;
+ wait_queue_head_t dma_wait;
};
struct imx_port_ucrs {
@@ -232,6 +250,10 @@ static struct imx_uart_data imx_uart_devdata[] = {
.uts_reg = IMX21_UTS,
.devtype = IMX21_UART,
},
+ [IMX6Q_UART] = {
+ .uts_reg = IMX21_UTS,
+ .devtype = IMX6Q_UART,
+ },
};
static struct platform_device_id imx_uart_devtype[] = {
@@ -242,12 +264,16 @@ static struct platform_device_id imx_uart_devtype[] = {
.name = "imx21-uart",
.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
}, {
+ .name = "imx6q-uart",
+ .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
+ }, {
/* sentinel */
}
};
MODULE_DEVICE_TABLE(platform, imx_uart_devtype);
static struct of_device_id imx_uart_dt_ids[] = {
+ { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], },
{ .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], },
{ .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], },
{ /* sentinel */ }
@@ -269,6 +295,10 @@ static inline int is_imx21_uart(struct imx_port *sport)
return sport->devdata->devtype == IMX21_UART;
}
+static inline int is_imx6q_uart(struct imx_port *sport)
+{
+ return sport->devdata->devtype == IMX6Q_UART;
+}
/*
* Save and restore functions for UCR1, UCR2 and UCR3 registers
*/
@@ -387,6 +417,13 @@ static void imx_stop_tx(struct uart_port *port)
return;
}
+ /*
+ * We are maybe in the SMP context, so if the DMA TX thread is running
+ * on other cpu, we have to wait for it to finish.
+ */
+ if (sport->dma_is_enabled && sport->dma_is_txing)
+ return;
+
temp = readl(sport->port.membase + UCR1);
writel(temp & ~UCR1_TXMPTYEN, sport->port.membase + UCR1);
}
@@ -399,6 +436,13 @@ static void imx_stop_rx(struct uart_port *port)
struct imx_port *sport = (struct imx_port *)port;
unsigned long temp;
+ /*
+ * We are maybe in the SMP context, so if the DMA TX thread is running
+ * on other cpu, we have to wait for it to finish.
+ */
+ if (sport->dma_is_enabled && sport->dma_is_rxing)
+ return;
+
temp = readl(sport->port.membase + UCR2);
writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2);
}
@@ -434,6 +478,95 @@ static inline void imx_transmit_buffer(struct imx_port *sport)
imx_stop_tx(&sport->port);
}
+static void dma_tx_callback(void *data)
+{
+ struct imx_port *sport = data;
+ struct scatterlist *sgl = &sport->tx_sgl[0];
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ unsigned long flags;
+
+ dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+
+ sport->dma_is_txing = 0;
+
+ /* update the stat */
+ spin_lock_irqsave(&sport->port.lock, flags);
+ xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1);
+ sport->port.icount.tx += sport->tx_bytes;
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ dev_dbg(sport->port.dev, "we finish the TX DMA.\n");
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&sport->port);
+
+ if (waitqueue_active(&sport->dma_wait)) {
+ wake_up(&sport->dma_wait);
+ dev_dbg(sport->port.dev, "exit in %s.\n", __func__);
+ return;
+ }
+
+ schedule_work(&sport->tsk_dma_tx);
+}
+
+static void dma_tx_work(struct work_struct *w)
+{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_tx);
+ struct circ_buf *xmit = &sport->port.state->xmit;
+ struct scatterlist *sgl = sport->tx_sgl;
+ struct dma_async_tx_descriptor *desc;
+ struct dma_chan *chan = sport->dma_chan_tx;
+ struct device *dev = sport->port.dev;
+ enum dma_status status;
+ unsigned long flags;
+ int ret;
+
+ status = chan->device->device_tx_status(chan, (dma_cookie_t)0, NULL);
+ if (DMA_IN_PROGRESS == status)
+ return;
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+ sport->tx_bytes = uart_circ_chars_pending(xmit);
+ if (sport->tx_bytes == 0) {
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+ return;
+ }
+
+ if (xmit->tail > xmit->head) {
+ sport->dma_tx_nents = 2;
+ sg_init_table(sgl, 2);
+ sg_set_buf(sgl, xmit->buf + xmit->tail,
+ UART_XMIT_SIZE - xmit->tail);
+ sg_set_buf(sgl + 1, xmit->buf, xmit->head);
+ } else {
+ sport->dma_tx_nents = 1;
+ sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
+ }
+ spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
+ if (ret == 0) {
+ dev_err(dev, "DMA mapping error for TX.\n");
+ return;
+ }
+ desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "We cannot prepare for the TX slave dma!\n");
+ return;
+ }
+ desc->callback = dma_tx_callback;
+ desc->callback_param = sport;
+
+ dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n",
+ uart_circ_chars_pending(xmit));
+ /* fire it */
+ sport->dma_is_txing = 1;
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ return;
+}
+
/*
* interrupts disabled on entry
*/
@@ -460,8 +593,10 @@ static void imx_start_tx(struct uart_port *port)
temp |= UCR4_OREN;
writel(temp, sport->port.membase + UCR4);
- temp = readl(sport->port.membase + UCR1);
- writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ if (!sport->dma_is_enabled) {
+ temp = readl(sport->port.membase + UCR1);
+ writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1);
+ }
if (USE_IRDA(sport)) {
temp = readl(sport->port.membase + UCR1);
@@ -473,6 +608,15 @@ static void imx_start_tx(struct uart_port *port)
writel(temp, sport->port.membase + UCR4);
}
+ if (sport->dma_is_enabled) {
+ /*
+ * We may in the interrupt context, so arise a work_struct to
+ * do the real job.
+ */
+ schedule_work(&sport->tsk_dma_tx);
+ return;
+ }
+
if (readl(sport->port.membase + uts_reg(sport)) & UTS_TXEMPTY)
imx_transmit_buffer(sport);
}
@@ -588,6 +732,28 @@ out:
return IRQ_HANDLED;
}
+/*
+ * If the RXFIFO is filled with some data, and then we
+ * arise a DMA operation to receive them.
+ */
+static void imx_dma_rxint(struct imx_port *sport)
+{
+ unsigned long temp;
+
+ temp = readl(sport->port.membase + USR2);
+ if ((temp & USR2_RDR) && !sport->dma_is_rxing) {
+ sport->dma_is_rxing = 1;
+
+ /* disable the `Recerver Ready Interrrupt` */
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RRDYEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ /* tell the DMA to receive the data. */
+ schedule_work(&sport->tsk_dma_rx);
+ }
+}
+
static irqreturn_t imx_int(int irq, void *dev_id)
{
struct imx_port *sport = dev_id;
@@ -596,8 +762,12 @@ static irqreturn_t imx_int(int irq, void *dev_id)
sts = readl(sport->port.membase + USR1);
- if (sts & USR1_RRDY)
- imx_rxint(irq, dev_id);
+ if (sts & USR1_RRDY) {
+ if (sport->dma_is_enabled)
+ imx_dma_rxint(sport);
+ else
+ imx_rxint(irq, dev_id);
+ }
if (sts & USR1_TRDY &&
readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN)
@@ -654,7 +824,8 @@ static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl)
temp = readl(sport->port.membase + UCR2) & ~UCR2_CTS;
if (mctrl & TIOCM_RTS)
- temp |= UCR2_CTS;
+ if (!sport->dma_is_enabled)
+ temp |= UCR2_CTS;
writel(temp, sport->port.membase + UCR2);
}
@@ -693,6 +864,226 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
return 0;
}
+#define RX_BUF_SIZE (PAGE_SIZE)
+static int start_rx_dma(struct imx_port *sport);
+static void dma_rx_work(struct work_struct *w)
+{
+ struct imx_port *sport = container_of(w, struct imx_port, tsk_dma_rx);
+ struct tty_port *port = &sport->port.state->port;
+
+ if (sport->rx_bytes) {
+ tty_insert_flip_string(port, sport->rx_buf, sport->rx_bytes);
+ tty_flip_buffer_push(port);
+ sport->rx_bytes = 0;
+ }
+
+ if (sport->dma_is_rxing)
+ start_rx_dma(sport);
+}
+
+static void imx_rx_dma_done(struct imx_port *sport)
+{
+ unsigned long temp;
+
+ /* Enable this interrupt when the RXFIFO is empty. */
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RRDYEN;
+ writel(temp, sport->port.membase + UCR1);
+
+ sport->dma_is_rxing = 0;
+
+ /* Is the shutdown waiting for us? */
+ if (waitqueue_active(&sport->dma_wait))
+ wake_up(&sport->dma_wait);
+}
+
+/*
+ * There are three kinds of RX DMA interrupts(such as in the MX6Q):
+ * [1] the RX DMA buffer is full.
+ * [2] the Aging timer expires(wait for 8 bytes long)
+ * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN).
+ *
+ * The [2] is trigger when a character was been sitting in the FIFO
+ * meanwhile [3] can wait for 32 bytes long when the RX line is
+ * on IDLE state and RxFIFO is empty.
+ */
+static void dma_rx_callback(void *data)
+{
+ struct imx_port *sport = data;
+ struct dma_chan *chan = sport->dma_chan_rx;
+ struct scatterlist *sgl = &sport->rx_sgl;
+ struct dma_tx_state state;
+ enum dma_status status;
+ unsigned int count;
+
+ /* unmap it first */
+ dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE);
+
+ status = chan->device->device_tx_status(chan, (dma_cookie_t)0, &state);
+ count = RX_BUF_SIZE - state.residue;
+ dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
+
+ if (count) {
+ sport->rx_bytes = count;
+ schedule_work(&sport->tsk_dma_rx);
+ } else
+ imx_rx_dma_done(sport);
+}
+
+static int start_rx_dma(struct imx_port *sport)
+{
+ struct scatterlist *sgl = &sport->rx_sgl;
+ struct dma_chan *chan = sport->dma_chan_rx;
+ struct device *dev = sport->port.dev;
+ struct dma_async_tx_descriptor *desc;
+ int ret;
+
+ sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE);
+ ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE);
+ if (ret == 0) {
+ dev_err(dev, "DMA mapping error for RX.\n");
+ return -EINVAL;
+ }
+ desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!desc) {
+ dev_err(dev, "We cannot prepare for the RX slave dma!\n");
+ return -EINVAL;
+ }
+ desc->callback = dma_rx_callback;
+ desc->callback_param = sport;
+
+ dev_dbg(dev, "RX: prepare for the DMA.\n");
+ dmaengine_submit(desc);
+ dma_async_issue_pending(chan);
+ return 0;
+}
+
+static void imx_uart_dma_exit(struct imx_port *sport)
+{
+ if (sport->dma_chan_rx) {
+ dma_release_channel(sport->dma_chan_rx);
+ sport->dma_chan_rx = NULL;
+
+ kfree(sport->rx_buf);
+ sport->rx_buf = NULL;
+ }
+
+ if (sport->dma_chan_tx) {
+ dma_release_channel(sport->dma_chan_tx);
+ sport->dma_chan_tx = NULL;
+ }
+
+ sport->dma_is_inited = 0;
+}
+
+static int imx_uart_dma_init(struct imx_port *sport)
+{
+ struct dma_slave_config slave_config = {};
+ struct device *dev = sport->port.dev;
+ int ret;
+
+ /* Prepare for RX : */
+ sport->dma_chan_rx = dma_request_slave_channel(dev, "rx");
+ if (!sport->dma_chan_rx) {
+ dev_dbg(dev, "cannot get the DMA channel.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = sport->port.mapbase + URXD0;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = RXTL;
+ ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in RX dma configuration.\n");
+ goto err;
+ }
+
+ sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!sport->rx_buf) {
+ dev_err(dev, "cannot alloc DMA buffer.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ sport->rx_bytes = 0;
+
+ /* Prepare for TX : */
+ sport->dma_chan_tx = dma_request_slave_channel(dev, "tx");
+ if (!sport->dma_chan_tx) {
+ dev_err(dev, "cannot get the TX DMA channel!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = sport->port.mapbase + URTX0;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = TXTL;
+ ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in TX dma configuration.");
+ goto err;
+ }
+
+ sport->dma_is_inited = 1;
+
+ return 0;
+err:
+ imx_uart_dma_exit(sport);
+ return ret;
+}
+
+static void imx_enable_dma(struct imx_port *sport)
+{
+ unsigned long temp;
+ struct tty_port *port = &sport->port.state->port;
+
+ port->low_latency = 1;
+ INIT_WORK(&sport->tsk_dma_tx, dma_tx_work);
+ INIT_WORK(&sport->tsk_dma_rx, dma_rx_work);
+ init_waitqueue_head(&sport->dma_wait);
+
+ /* set UCR1 */
+ temp = readl(sport->port.membase + UCR1);
+ temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN |
+ /* wait for 32 idle frames for IDDMA interrupt */
+ UCR1_ICD_REG(3);
+ writel(temp, sport->port.membase + UCR1);
+
+ /* set UCR4 */
+ temp = readl(sport->port.membase + UCR4);
+ temp |= UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
+
+ sport->dma_is_enabled = 1;
+}
+
+static void imx_disable_dma(struct imx_port *sport)
+{
+ unsigned long temp;
+ struct tty_port *port = &sport->port.state->port;
+
+ /* clear UCR1 */
+ temp = readl(sport->port.membase + UCR1);
+ temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN);
+ writel(temp, sport->port.membase + UCR1);
+
+ /* clear UCR2 */
+ temp = readl(sport->port.membase + UCR2);
+ temp &= ~(UCR2_CTSC | UCR2_CTS);
+ writel(temp, sport->port.membase + UCR2);
+
+ /* clear UCR4 */
+ temp = readl(sport->port.membase + UCR4);
+ temp &= ~UCR4_IDDMAEN;
+ writel(temp, sport->port.membase + UCR4);
+
+ sport->dma_is_enabled = 0;
+ port->low_latency = 0;
+}
+
/* half the RX buffer size */
#define CTSTL 16
@@ -702,15 +1093,13 @@ static int imx_startup(struct uart_port *port)
int retval;
unsigned long flags, temp;
- if (!uart_console(port)) {
- retval = clk_prepare_enable(sport->clk_per);
- if (retval)
- goto error_out1;
- retval = clk_prepare_enable(sport->clk_ipg);
- if (retval) {
- clk_disable_unprepare(sport->clk_per);
- goto error_out1;
- }
+ retval = clk_prepare_enable(sport->clk_per);
+ if (retval)
+ goto error_out1;
+ retval = clk_prepare_enable(sport->clk_ipg);
+ if (retval) {
+ clk_disable_unprepare(sport->clk_per);
+ goto error_out1;
}
imx_setup_ufcr(sport, 0);
@@ -803,7 +1192,7 @@ static int imx_startup(struct uart_port *port)
}
}
- if (is_imx21_uart(sport)) {
+ if (!is_imx1_uart(sport)) {
temp = readl(sport->port.membase + UCR3);
temp |= IMX21_UCR3_RXDMUXSEL;
writel(temp, sport->port.membase + UCR3);
@@ -833,7 +1222,7 @@ static int imx_startup(struct uart_port *port)
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
- pdata = sport->port.dev->platform_data;
+ pdata = dev_get_platdata(sport->port.dev);
sport->irda_inv_rx = pdata->irda_inv_rx;
sport->irda_inv_tx = pdata->irda_inv_tx;
sport->trcv_delay = pdata->transceiver_delay;
@@ -859,6 +1248,15 @@ static void imx_shutdown(struct uart_port *port)
unsigned long temp;
unsigned long flags;
+ if (sport->dma_is_enabled) {
+ /* We have to wait for the DMA to finish. */
+ wait_event(sport->dma_wait,
+ !sport->dma_is_rxing && !sport->dma_is_txing);
+ imx_stop_rx(port);
+ imx_disable_dma(sport);
+ imx_uart_dma_exit(sport);
+ }
+
spin_lock_irqsave(&sport->port.lock, flags);
temp = readl(sport->port.membase + UCR2);
temp &= ~(UCR2_TXEN);
@@ -867,7 +1265,7 @@ static void imx_shutdown(struct uart_port *port)
if (USE_IRDA(sport)) {
struct imxuart_platform_data *pdata;
- pdata = sport->port.dev->platform_data;
+ pdata = dev_get_platdata(sport->port.dev);
if (pdata->irda_enable)
pdata->irda_enable(0);
}
@@ -901,10 +1299,8 @@ static void imx_shutdown(struct uart_port *port)
writel(temp, sport->port.membase + UCR1);
spin_unlock_irqrestore(&sport->port.lock, flags);
- if (!uart_console(&sport->port)) {
- clk_disable_unprepare(sport->clk_per);
- clk_disable_unprepare(sport->clk_ipg);
- }
+ clk_disable_unprepare(sport->clk_per);
+ clk_disable_unprepare(sport->clk_ipg);
}
static void
@@ -947,6 +1343,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (sport->have_rtscts) {
ucr2 &= ~UCR2_IRTS;
ucr2 |= UCR2_CTSC;
+
+ /* Can we enable the DMA support? */
+ if (is_imx6q_uart(sport) && !uart_console(port)
+ && !sport->dma_is_inited)
+ imx_uart_dma_init(sport);
} else {
termios->c_cflag &= ~CRTSCTS;
}
@@ -1020,6 +1421,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
*/
div = 1;
} else {
+ /* custom-baudrate handling */
+ div = sport->port.uartclk / (baud * 16);
+ if (baud == 38400 && quot != div)
+ baud = sport->port.uartclk / (quot * 16);
+
div = sport->port.uartclk / (baud * 16);
if (div > 7)
div = 7;
@@ -1048,7 +1454,7 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
writel(num, sport->port.membase + UBIR);
writel(denom, sport->port.membase + UBMR);
- if (is_imx21_uart(sport))
+ if (!is_imx1_uart(sport))
writel(sport->port.uartclk / div / 1000,
sport->port.membase + IMX21_ONEMS);
@@ -1060,6 +1466,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
imx_enable_ms(&sport->port);
+ if (sport->dma_is_inited && !sport->dma_is_enabled)
+ imx_enable_dma(sport);
spin_unlock_irqrestore(&sport->port.lock, flags);
}
@@ -1251,6 +1659,16 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
unsigned int ucr1;
unsigned long flags = 0;
int locked = 1;
+ int retval;
+
+ retval = clk_enable(sport->clk_per);
+ if (retval)
+ return;
+ retval = clk_enable(sport->clk_ipg);
+ if (retval) {
+ clk_disable(sport->clk_per);
+ return;
+ }
if (sport->port.sysrq)
locked = 0;
@@ -1286,6 +1704,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
if (locked)
spin_unlock_irqrestore(&sport->port.lock, flags);
+
+ clk_disable(sport->clk_ipg);
+ clk_disable(sport->clk_per);
}
/*
@@ -1359,6 +1780,7 @@ imx_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
+ int retval;
/*
* Check whether an invalid uart number has been specified, and
@@ -1371,6 +1793,11 @@ imx_console_setup(struct console *co, char *options)
if (sport == NULL)
return -ENODEV;
+ /* For setting the registers, we only need to enable the ipg clock. */
+ retval = clk_prepare_enable(sport->clk_ipg);
+ if (retval)
+ goto error_console;
+
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else
@@ -1378,7 +1805,20 @@ imx_console_setup(struct console *co, char *options)
imx_setup_ufcr(sport, 0);
- return uart_set_options(&sport->port, co, baud, parity, bits, flow);
+ retval = uart_set_options(&sport->port, co, baud, parity, bits, flow);
+
+ clk_disable(sport->clk_ipg);
+ if (retval) {
+ clk_unprepare(sport->clk_ipg);
+ goto error_console;
+ }
+
+ retval = clk_prepare(sport->clk_per);
+ if (retval)
+ clk_disable_unprepare(sport->clk_ipg);
+
+error_console:
+ return retval;
}
static struct uart_driver imx_reg;
@@ -1472,6 +1912,9 @@ static int serial_imx_probe_dt(struct imx_port *sport,
sport->devdata = of_id->data;
+ if (of_device_is_stdout_path(np))
+ add_preferred_console(imx_reg.cons->name, sport->port.line, 0);
+
return 0;
}
#else
@@ -1485,7 +1928,7 @@ static inline int serial_imx_probe_dt(struct imx_port *sport,
static void serial_imx_probe_pdata(struct imx_port *sport,
struct platform_device *pdev)
{
- struct imxuart_platform_data *pdata = pdev->dev.platform_data;
+ struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev);
sport->port.line = pdev->id;
sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data;
@@ -1507,7 +1950,6 @@ static int serial_imx_probe(struct platform_device *pdev)
void __iomem *base;
int ret = 0;
struct resource *res;
- struct pinctrl *pinctrl;
sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL);
if (!sport)
@@ -1543,13 +1985,6 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->timer.function = imx_timeout;
sport->timer.data = (unsigned long)sport;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- dev_err(&pdev->dev, "failed to get default pinctrl: %d\n", ret);
- return ret;
- }
-
sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(sport->clk_ipg)) {
ret = PTR_ERR(sport->clk_ipg);
@@ -1564,18 +1999,15 @@ static int serial_imx_probe(struct platform_device *pdev)
return ret;
}
- clk_prepare_enable(sport->clk_per);
- clk_prepare_enable(sport->clk_ipg);
-
sport->port.uartclk = clk_get_rate(sport->clk_per);
imx_ports[sport->port.line] = sport;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
- goto clkput;
+ return ret;
}
ret = uart_add_one_port(&imx_reg, &sport->port);
@@ -1583,18 +2015,10 @@ static int serial_imx_probe(struct platform_device *pdev)
goto deinit;
platform_set_drvdata(pdev, sport);
- if (!uart_console(&sport->port)) {
- clk_disable_unprepare(sport->clk_per);
- clk_disable_unprepare(sport->clk_ipg);
- }
-
return 0;
deinit:
if (pdata && pdata->exit)
pdata->exit(pdev);
-clkput:
- clk_disable_unprepare(sport->clk_per);
- clk_disable_unprepare(sport->clk_ipg);
return ret;
}
@@ -1603,9 +2027,7 @@ static int serial_imx_remove(struct platform_device *pdev)
struct imxuart_platform_data *pdata;
struct imx_port *sport = platform_get_drvdata(pdev);
- pdata = pdev->dev.platform_data;
-
- platform_set_drvdata(pdev, NULL);
+ pdata = dev_get_platdata(&pdev->dev);
uart_remove_one_port(&imx_reg, &sport->port);
diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
index e2520abcb1c..1274499850f 100644
--- a/drivers/tty/serial/ioc4_serial.c
+++ b/drivers/tty/serial/ioc4_serial.c
@@ -297,7 +297,7 @@ struct ioc4_serial {
struct ioc4_uartregs uart_1;
struct ioc4_uartregs uart_2;
struct ioc4_uartregs uart_3;
-} ioc4_serial;
+};
/* UART clock speed */
#define IOC4_SER_XIN_CLK_66 66666667
@@ -2767,7 +2767,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
* called per card found from IOC4 master module.
* @idd: Master module data for this IOC4
*/
-int
+static int
ioc4_serial_attach_one(struct ioc4_driver_data *idd)
{
unsigned long tmp_addr1;
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 15733da757c..88d01e0bb0c 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -318,7 +318,7 @@ lqasc_startup(struct uart_port *port)
struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
int retval;
- if (ltq_port->clk)
+ if (!IS_ERR(ltq_port->clk))
clk_enable(ltq_port->clk);
port->uartclk = clk_get_rate(ltq_port->fpiclk);
@@ -386,7 +386,7 @@ lqasc_shutdown(struct uart_port *port)
port->membase + LTQ_ASC_RXFCON);
ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
port->membase + LTQ_ASC_TXFCON);
- if (ltq_port->clk)
+ if (!IS_ERR(ltq_port->clk))
clk_disable(ltq_port->clk);
}
@@ -636,6 +636,9 @@ lqasc_console_setup(struct console *co, char *options)
port = &ltq_port->port;
+ if (!IS_ERR(ltq_port->clk))
+ clk_enable(ltq_port->clk);
+
port->uartclk = clk_get_rate(ltq_port->fpiclk);
if (options)
diff --git a/drivers/tty/serial/lpc32xx_hs.c b/drivers/tty/serial/lpc32xx_hs.c
index dffea6b2cd7..701644f0682 100644
--- a/drivers/tty/serial/lpc32xx_hs.c
+++ b/drivers/tty/serial/lpc32xx_hs.c
@@ -279,7 +279,10 @@ static void __serial_lpc32xx_rx(struct uart_port *port)
tmp = readl(LPC32XX_HSUART_FIFO(port->membase));
}
+
+ spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
}
static void __serial_lpc32xx_tx(struct uart_port *port)
@@ -351,10 +354,8 @@ static irqreturn_t serial_lpc32xx_interrupt(int irq, void *dev_id)
}
/* Data received? */
- if (status & (LPC32XX_HSU_RX_TIMEOUT_INT | LPC32XX_HSU_RX_TRIG_INT)) {
+ if (status & (LPC32XX_HSU_RX_TIMEOUT_INT | LPC32XX_HSU_RX_TRIG_INT))
__serial_lpc32xx_rx(port);
- tty_flip_buffer_push(tport);
- }
/* Transmit data request? */
if ((status & LPC32XX_HSU_TX_INT) && (!uart_tx_stopped(port))) {
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index bb1afa0922e..9cd9b4eba9f 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -368,7 +368,10 @@ static void receive_chars(struct uart_sio_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
+
+ spin_unlock(&up->port.lock);
tty_flip_buffer_push(port);
+ spin_lock(&up->port.lock);
}
static void transmit_chars(struct uart_sio_port *up)
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 35866d5872a..79f9a9eff54 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -779,7 +779,7 @@ static int max3100_probe(struct spi_device *spi)
max3100s[i]->irq = spi->irq;
spin_lock_init(&max3100s[i]->conf_lock);
spi_set_drvdata(spi, max3100s[i]);
- pdata = spi->dev.platform_data;
+ pdata = dev_get_platdata(&spi->dev);
max3100s[i]->crystal = pdata->crystal;
max3100s[i]->loopback = pdata->loopback;
max3100s[i]->poll_time = pdata->poll_time * HZ / 1000;
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 8941e641894..b2e707aa603 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -1,7 +1,7 @@
/*
- * Maxim (Dallas) MAX3107/8 serial driver
+ * Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
- * Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
+ * Copyright (C) 2012-2013 Alexander Shiyan <shc_work@mail.ru>
*
* Based on max3100.c, by Christian Pellegrin <chripell@evolware.org>
* Based on max3110.c, by Feng Tang <feng.tang@intel.com>
@@ -13,11 +13,10 @@
* (at your option) any later version.
*/
-/* TODO: MAX3109 support (Dual) */
-/* TODO: MAX14830 support (Quad) */
-
#include <linux/module.h>
+#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/bitops.h>
#include <linux/serial_core.h>
#include <linux/serial.h>
#include <linux/tty.h>
@@ -25,8 +24,10 @@
#include <linux/regmap.h>
#include <linux/gpio.h>
#include <linux/spi/spi.h>
+
#include <linux/platform_data/max310x.h>
+#define MAX310X_NAME "max310x"
#define MAX310X_MAJOR 204
#define MAX310X_MINOR 209
@@ -37,7 +38,8 @@
#define MAX310X_IRQSTS_REG (0x02) /* IRQ status */
#define MAX310X_LSR_IRQEN_REG (0x03) /* LSR IRQ enable */
#define MAX310X_LSR_IRQSTS_REG (0x04) /* LSR IRQ status */
-#define MAX310X_SPCHR_IRQEN_REG (0x05) /* Special char IRQ enable */
+#define MAX310X_REG_05 (0x05)
+#define MAX310X_SPCHR_IRQEN_REG MAX310X_REG_05 /* Special char IRQ en */
#define MAX310X_SPCHR_IRQSTS_REG (0x06) /* Special char IRQ status */
#define MAX310X_STS_IRQEN_REG (0x07) /* Status IRQ enable */
#define MAX310X_STS_IRQSTS_REG (0x08) /* Status IRQ status */
@@ -63,8 +65,15 @@
#define MAX310X_BRGDIVLSB_REG (0x1c) /* Baud rate divisor LSB */
#define MAX310X_BRGDIVMSB_REG (0x1d) /* Baud rate divisor MSB */
#define MAX310X_CLKSRC_REG (0x1e) /* Clock source */
-/* Only present in MAX3107 */
-#define MAX3107_REVID_REG (0x1f) /* Revision identification */
+#define MAX310X_REG_1F (0x1f)
+
+#define MAX310X_REVID_REG MAX310X_REG_1F /* Revision ID */
+
+#define MAX310X_GLOBALIRQ_REG MAX310X_REG_1F /* Global IRQ (RO) */
+#define MAX310X_GLOBALCMD_REG MAX310X_REG_1F /* Global Command (WO) */
+
+/* Extended registers */
+#define MAX310X_REVID_EXTREG MAX310X_REG_05 /* Revision ID */
/* IRQ register bits */
#define MAX310X_IRQ_LSR_BIT (1 << 0) /* LSR interrupt */
@@ -246,58 +255,210 @@
#define MAX310X_CLKSRC_EXTCLK_BIT (1 << 4) /* External clock enable */
#define MAX310X_CLKSRC_CLK2RTS_BIT (1 << 7) /* Baud clk to RTS pin */
+/* Global commands */
+#define MAX310X_EXTREG_ENBL (0xce)
+#define MAX310X_EXTREG_DSBL (0xcd)
+
/* Misc definitions */
#define MAX310X_FIFO_SIZE (128)
+#define MAX310x_REV_MASK (0xfc)
/* MAX3107 specific */
#define MAX3107_REV_ID (0xa0)
-#define MAX3107_REV_MASK (0xfe)
-
-/* IRQ status bits definitions */
-#define MAX310X_IRQ_TX (MAX310X_IRQ_TXFIFO_BIT | \
- MAX310X_IRQ_TXEMPTY_BIT)
-#define MAX310X_IRQ_RX (MAX310X_IRQ_RXFIFO_BIT | \
- MAX310X_IRQ_RXEMPTY_BIT)
-
-/* Supported chip types */
-enum {
- MAX310X_TYPE_MAX3107 = 3107,
- MAX310X_TYPE_MAX3108 = 3108,
+
+/* MAX3109 specific */
+#define MAX3109_REV_ID (0xc0)
+
+/* MAX14830 specific */
+#define MAX14830_BRGCFG_CLKDIS_BIT (1 << 6) /* Clock Disable */
+#define MAX14830_REV_ID (0xb0)
+
+struct max310x_devtype {
+ char name[9];
+ int nr;
+ int (*detect)(struct device *);
+ void (*power)(struct uart_port *, int);
};
-struct max310x_port {
- struct uart_driver uart;
+struct max310x_one {
struct uart_port port;
+ struct work_struct tx_work;
+};
- const char *name;
- int uartclk;
-
- unsigned int nr_gpio;
+struct max310x_port {
+ struct uart_driver uart;
+ struct max310x_devtype *devtype;
+ struct regmap *regmap;
+ struct regmap_config regcfg;
+ struct mutex mutex;
+ struct max310x_pdata *pdata;
+ int gpio_used;
#ifdef CONFIG_GPIOLIB
struct gpio_chip gpio;
#endif
+ struct max310x_one p[0];
+};
- struct regmap *regmap;
- struct regmap_config regcfg;
+static u8 max310x_port_read(struct uart_port *port, u8 reg)
+{
+ struct max310x_port *s = dev_get_drvdata(port->dev);
+ unsigned int val = 0;
- struct workqueue_struct *wq;
- struct work_struct tx_work;
+ regmap_read(s->regmap, port->iobase + reg, &val);
- struct mutex max310x_mutex;
+ return val;
+}
- struct max310x_pdata *pdata;
+static void max310x_port_write(struct uart_port *port, u8 reg, u8 val)
+{
+ struct max310x_port *s = dev_get_drvdata(port->dev);
+
+ regmap_write(s->regmap, port->iobase + reg, val);
+}
+
+static void max310x_port_update(struct uart_port *port, u8 reg, u8 mask, u8 val)
+{
+ struct max310x_port *s = dev_get_drvdata(port->dev);
+
+ regmap_update_bits(s->regmap, port->iobase + reg, mask, val);
+}
+
+static int max3107_detect(struct device *dev)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+ unsigned int val = 0;
+ int ret;
+
+ ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
+ if (ret)
+ return ret;
+
+ if (((val & MAX310x_REV_MASK) != MAX3107_REV_ID)) {
+ dev_err(dev,
+ "%s ID 0x%02x does not match\n", s->devtype->name, val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int max3108_detect(struct device *dev)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+ unsigned int val = 0;
+ int ret;
+
+ /* MAX3108 have not REV ID register, we just check default value
+ * from clocksource register to make sure everything works.
+ */
+ ret = regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val != (MAX310X_CLKSRC_EXTCLK_BIT | MAX310X_CLKSRC_PLLBYP_BIT)) {
+ dev_err(dev, "%s not present\n", s->devtype->name);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int max3109_detect(struct device *dev)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+ unsigned int val = 0;
+ int ret;
+
+ ret = regmap_read(s->regmap, MAX310X_REVID_REG, &val);
+ if (ret)
+ return ret;
+
+ if (((val & MAX310x_REV_MASK) != MAX3109_REV_ID)) {
+ dev_err(dev,
+ "%s ID 0x%02x does not match\n", s->devtype->name, val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void max310x_power(struct uart_port *port, int on)
+{
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_FORCESLEEP_BIT,
+ on ? 0 : MAX310X_MODE1_FORCESLEEP_BIT);
+ if (on)
+ msleep(50);
+}
+
+static int max14830_detect(struct device *dev)
+{
+ struct max310x_port *s = dev_get_drvdata(dev);
+ unsigned int val = 0;
+ int ret;
+
+ ret = regmap_write(s->regmap, MAX310X_GLOBALCMD_REG,
+ MAX310X_EXTREG_ENBL);
+ if (ret)
+ return ret;
+
+ regmap_read(s->regmap, MAX310X_REVID_EXTREG, &val);
+ regmap_write(s->regmap, MAX310X_GLOBALCMD_REG, MAX310X_EXTREG_DSBL);
+ if (((val & MAX310x_REV_MASK) != MAX14830_REV_ID)) {
+ dev_err(dev,
+ "%s ID 0x%02x does not match\n", s->devtype->name, val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void max14830_power(struct uart_port *port, int on)
+{
+ max310x_port_update(port, MAX310X_BRGCFG_REG,
+ MAX14830_BRGCFG_CLKDIS_BIT,
+ on ? 0 : MAX14830_BRGCFG_CLKDIS_BIT);
+ if (on)
+ msleep(50);
+}
+
+static const struct max310x_devtype max3107_devtype = {
+ .name = "MAX3107",
+ .nr = 1,
+ .detect = max3107_detect,
+ .power = max310x_power,
+};
+
+static const struct max310x_devtype max3108_devtype = {
+ .name = "MAX3108",
+ .nr = 1,
+ .detect = max3108_detect,
+ .power = max310x_power,
+};
+
+static const struct max310x_devtype max3109_devtype = {
+ .name = "MAX3109",
+ .nr = 2,
+ .detect = max3109_detect,
+ .power = max310x_power,
+};
+
+static const struct max310x_devtype max14830_devtype = {
+ .name = "MAX14830",
+ .nr = 4,
+ .detect = max14830_detect,
+ .power = max14830_power,
};
-static bool max3107_8_reg_writeable(struct device *dev, unsigned int reg)
+static bool max310x_reg_writeable(struct device *dev, unsigned int reg)
{
- switch (reg) {
+ switch (reg & 0x1f) {
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
case MAX310X_STS_IRQSTS_REG:
case MAX310X_TXFIFOLVL_REG:
case MAX310X_RXFIFOLVL_REG:
- case MAX3107_REVID_REG: /* Only available on MAX3107 */
return false;
default:
break;
@@ -308,7 +469,7 @@ static bool max3107_8_reg_writeable(struct device *dev, unsigned int reg)
static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
{
- switch (reg) {
+ switch (reg & 0x1f) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_LSR_IRQSTS_REG:
@@ -317,6 +478,9 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
case MAX310X_TXFIFOLVL_REG:
case MAX310X_RXFIFOLVL_REG:
case MAX310X_GPIODATA_REG:
+ case MAX310X_BRGDIVLSB_REG:
+ case MAX310X_REG_05:
+ case MAX310X_REG_1F:
return true;
default:
break;
@@ -327,7 +491,7 @@ static bool max310x_reg_volatile(struct device *dev, unsigned int reg)
static bool max310x_reg_precious(struct device *dev, unsigned int reg)
{
- switch (reg) {
+ switch (reg & 0x1f) {
case MAX310X_RHR_REG:
case MAX310X_IRQSTS_REG:
case MAX310X_SPCHR_IRQSTS_REG:
@@ -340,42 +504,25 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
return false;
}
-static void max310x_set_baud(struct max310x_port *s, int baud)
+static void max310x_set_baud(struct uart_port *port, int baud)
{
- unsigned int mode = 0, div = s->uartclk / baud;
+ unsigned int mode = 0, div = port->uartclk / baud;
if (!(div / 16)) {
/* Mode x2 */
mode = MAX310X_BRGCFG_2XMODE_BIT;
- div = (s->uartclk * 2) / baud;
+ div = (port->uartclk * 2) / baud;
}
if (!(div / 16)) {
/* Mode x4 */
mode = MAX310X_BRGCFG_4XMODE_BIT;
- div = (s->uartclk * 4) / baud;
+ div = (port->uartclk * 4) / baud;
}
- regmap_write(s->regmap, MAX310X_BRGDIVMSB_REG,
- ((div / 16) >> 8) & 0xff);
- regmap_write(s->regmap, MAX310X_BRGDIVLSB_REG, (div / 16) & 0xff);
- regmap_write(s->regmap, MAX310X_BRGCFG_REG, (div % 16) | mode);
-}
-
-static void max310x_wait_pll(struct max310x_port *s)
-{
- int tryes = 1000;
-
- /* Wait for PLL only if crystal is used */
- if (!(s->pdata->driver_flags & MAX310X_EXT_CLK)) {
- unsigned int sts = 0;
-
- while (tryes--) {
- regmap_read(s->regmap, MAX310X_STS_IRQSTS_REG, &sts);
- if (sts & MAX310X_STS_CLKREADY_BIT)
- break;
- }
- }
+ max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
+ max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
+ max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
}
static int max310x_update_best_err(unsigned long f, long *besterr)
@@ -449,49 +596,49 @@ static int max310x_set_ref_clk(struct max310x_port *s)
regmap_write(s->regmap, MAX310X_CLKSRC_REG, clksrc);
- if (pllcfg)
- max310x_wait_pll(s);
-
- dev_dbg(s->port.dev, "Reference clock set to %lu Hz\n", bestfreq);
+ /* Wait for crystal */
+ if (pllcfg && !(s->pdata->driver_flags & MAX310X_EXT_CLK))
+ msleep(10);
return (int)bestfreq;
}
-static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
+static void max310x_handle_rx(struct uart_port *port, unsigned int rxlen)
{
- unsigned int sts = 0, ch = 0, flag;
+ unsigned int sts, ch, flag;
- if (unlikely(rxlen >= MAX310X_FIFO_SIZE)) {
- dev_warn(s->port.dev, "Possible RX FIFO overrun %d\n", rxlen);
+ if (unlikely(rxlen >= port->fifosize)) {
+ dev_warn_ratelimited(port->dev,
+ "Port %i: Possible RX FIFO overrun\n",
+ port->line);
+ port->icount.buf_overrun++;
/* Ensure sanity of RX level */
- rxlen = MAX310X_FIFO_SIZE;
+ rxlen = port->fifosize;
}
- dev_dbg(s->port.dev, "RX Len = %u\n", rxlen);
-
while (rxlen--) {
- regmap_read(s->regmap, MAX310X_RHR_REG, &ch);
- regmap_read(s->regmap, MAX310X_LSR_IRQSTS_REG, &sts);
+ ch = max310x_port_read(port, MAX310X_RHR_REG);
+ sts = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
sts &= MAX310X_LSR_RXPAR_BIT | MAX310X_LSR_FRERR_BIT |
MAX310X_LSR_RXOVR_BIT | MAX310X_LSR_RXBRK_BIT;
- s->port.icount.rx++;
+ port->icount.rx++;
flag = TTY_NORMAL;
if (unlikely(sts)) {
if (sts & MAX310X_LSR_RXBRK_BIT) {
- s->port.icount.brk++;
- if (uart_handle_break(&s->port))
+ port->icount.brk++;
+ if (uart_handle_break(port))
continue;
} else if (sts & MAX310X_LSR_RXPAR_BIT)
- s->port.icount.parity++;
+ port->icount.parity++;
else if (sts & MAX310X_LSR_FRERR_BIT)
- s->port.icount.frame++;
+ port->icount.frame++;
else if (sts & MAX310X_LSR_RXOVR_BIT)
- s->port.icount.overrun++;
+ port->icount.overrun++;
- sts &= s->port.read_status_mask;
+ sts &= port->read_status_mask;
if (sts & MAX310X_LSR_RXBRK_BIT)
flag = TTY_BREAK;
else if (sts & MAX310X_LSR_RXPAR_BIT)
@@ -502,129 +649,129 @@ static void max310x_handle_rx(struct max310x_port *s, unsigned int rxlen)
flag = TTY_OVERRUN;
}
- if (uart_handle_sysrq_char(s->port, ch))
+ if (uart_handle_sysrq_char(port, ch))
continue;
- if (sts & s->port.ignore_status_mask)
+ if (sts & port->ignore_status_mask)
continue;
- uart_insert_char(&s->port, sts, MAX310X_LSR_RXOVR_BIT,
- ch, flag);
+ uart_insert_char(port, sts, MAX310X_LSR_RXOVR_BIT, ch, flag);
}
- tty_flip_buffer_push(&s->port.state->port);
+ tty_flip_buffer_push(&port->state->port);
}
-static void max310x_handle_tx(struct max310x_port *s)
+static void max310x_handle_tx(struct uart_port *port)
{
- struct circ_buf *xmit = &s->port.state->xmit;
- unsigned int txlen = 0, to_send;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int txlen, to_send;
- if (unlikely(s->port.x_char)) {
- regmap_write(s->regmap, MAX310X_THR_REG, s->port.x_char);
- s->port.icount.tx++;
- s->port.x_char = 0;
+ if (unlikely(port->x_char)) {
+ max310x_port_write(port, MAX310X_THR_REG, port->x_char);
+ port->icount.tx++;
+ port->x_char = 0;
return;
}
- if (uart_circ_empty(xmit) || uart_tx_stopped(&s->port))
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port))
return;
/* Get length of data pending in circular buffer */
to_send = uart_circ_chars_pending(xmit);
if (likely(to_send)) {
/* Limit to size of TX FIFO */
- regmap_read(s->regmap, MAX310X_TXFIFOLVL_REG, &txlen);
- txlen = MAX310X_FIFO_SIZE - txlen;
+ txlen = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
+ txlen = port->fifosize - txlen;
to_send = (to_send > txlen) ? txlen : to_send;
- dev_dbg(s->port.dev, "TX Len = %u\n", to_send);
-
/* Add data to send */
- s->port.icount.tx += to_send;
+ port->icount.tx += to_send;
while (to_send--) {
- regmap_write(s->regmap, MAX310X_THR_REG,
- xmit->buf[xmit->tail]);
+ max310x_port_write(port, MAX310X_THR_REG,
+ xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
};
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&s->port);
+ uart_write_wakeup(port);
}
-static irqreturn_t max310x_ist(int irq, void *dev_id)
+static void max310x_port_irq(struct max310x_port *s, int portno)
{
- struct max310x_port *s = (struct max310x_port *)dev_id;
- unsigned int ists = 0, lsr = 0, rxlen = 0;
+ struct uart_port *port = &s->p[portno].port;
- mutex_lock(&s->max310x_mutex);
+ do {
+ unsigned int ists, lsr, rxlen;
- for (;;) {
/* Read IRQ status & RX FIFO level */
- regmap_read(s->regmap, MAX310X_IRQSTS_REG, &ists);
- regmap_read(s->regmap, MAX310X_LSR_IRQSTS_REG, &lsr);
- regmap_read(s->regmap, MAX310X_RXFIFOLVL_REG, &rxlen);
- if (!ists && !(lsr & MAX310X_LSR_RXTO_BIT) && !rxlen)
+ ists = max310x_port_read(port, MAX310X_IRQSTS_REG);
+ rxlen = max310x_port_read(port, MAX310X_RXFIFOLVL_REG);
+ if (!ists && !rxlen)
break;
- dev_dbg(s->port.dev, "IRQ status: 0x%02x\n", ists);
-
- if (rxlen)
- max310x_handle_rx(s, rxlen);
- if (ists & MAX310X_IRQ_TX)
- max310x_handle_tx(s);
- if (ists & MAX310X_IRQ_CTS_BIT)
- uart_handle_cts_change(&s->port,
+ if (ists & MAX310X_IRQ_CTS_BIT) {
+ lsr = max310x_port_read(port, MAX310X_LSR_IRQSTS_REG);
+ uart_handle_cts_change(port,
!!(lsr & MAX310X_LSR_CTS_BIT));
- }
+ }
+ if (rxlen)
+ max310x_handle_rx(port, rxlen);
+ if (ists & MAX310X_IRQ_TXEMPTY_BIT) {
+ mutex_lock(&s->mutex);
+ max310x_handle_tx(port);
+ mutex_unlock(&s->mutex);
+ }
+ } while (1);
+}
+
+static irqreturn_t max310x_ist(int irq, void *dev_id)
+{
+ struct max310x_port *s = (struct max310x_port *)dev_id;
- mutex_unlock(&s->max310x_mutex);
+ if (s->uart.nr > 1) {
+ do {
+ unsigned int val = ~0;
+
+ WARN_ON_ONCE(regmap_read(s->regmap,
+ MAX310X_GLOBALIRQ_REG, &val));
+ val = ((1 << s->uart.nr) - 1) & ~val;
+ if (!val)
+ break;
+ max310x_port_irq(s, fls(val) - 1);
+ } while (1);
+ } else
+ max310x_port_irq(s, 0);
return IRQ_HANDLED;
}
static void max310x_wq_proc(struct work_struct *ws)
{
- struct max310x_port *s = container_of(ws, struct max310x_port, tx_work);
+ struct max310x_one *one = container_of(ws, struct max310x_one, tx_work);
+ struct max310x_port *s = dev_get_drvdata(one->port.dev);
- mutex_lock(&s->max310x_mutex);
- max310x_handle_tx(s);
- mutex_unlock(&s->max310x_mutex);
+ mutex_lock(&s->mutex);
+ max310x_handle_tx(&one->port);
+ mutex_unlock(&s->mutex);
}
static void max310x_start_tx(struct uart_port *port)
{
- struct max310x_port *s = container_of(port, struct max310x_port, port);
-
- queue_work(s->wq, &s->tx_work);
-}
-
-static void max310x_stop_tx(struct uart_port *port)
-{
- /* Do nothing */
-}
+ struct max310x_one *one = container_of(port, struct max310x_one, port);
-static void max310x_stop_rx(struct uart_port *port)
-{
- /* Do nothing */
+ if (!work_pending(&one->tx_work))
+ schedule_work(&one->tx_work);
}
static unsigned int max310x_tx_empty(struct uart_port *port)
{
- unsigned int val = 0;
- struct max310x_port *s = container_of(port, struct max310x_port, port);
+ unsigned int lvl, sts;
- mutex_lock(&s->max310x_mutex);
- regmap_read(s->regmap, MAX310X_TXFIFOLVL_REG, &val);
- mutex_unlock(&s->max310x_mutex);
+ lvl = max310x_port_read(port, MAX310X_TXFIFOLVL_REG);
+ sts = max310x_port_read(port, MAX310X_IRQSTS_REG);
- return val ? 0 : TIOCSER_TEMT;
-}
-
-static void max310x_enable_ms(struct uart_port *port)
-{
- /* Modem status not supported */
+ return ((sts & MAX310X_IRQ_TXEMPTY_BIT) && !lvl) ? TIOCSER_TEMT : 0;
}
static unsigned int max310x_get_mctrl(struct uart_port *port)
@@ -644,28 +791,20 @@ static void max310x_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void max310x_break_ctl(struct uart_port *port, int break_state)
{
- struct max310x_port *s = container_of(port, struct max310x_port, port);
-
- mutex_lock(&s->max310x_mutex);
- regmap_update_bits(s->regmap, MAX310X_LCR_REG,
- MAX310X_LCR_TXBREAK_BIT,
- break_state ? MAX310X_LCR_TXBREAK_BIT : 0);
- mutex_unlock(&s->max310x_mutex);
+ max310x_port_update(port, MAX310X_LCR_REG,
+ MAX310X_LCR_TXBREAK_BIT,
+ break_state ? MAX310X_LCR_TXBREAK_BIT : 0);
}
static void max310x_set_termios(struct uart_port *port,
struct ktermios *termios,
struct ktermios *old)
{
- struct max310x_port *s = container_of(port, struct max310x_port, port);
unsigned int lcr, flow = 0;
int baud;
- mutex_lock(&s->max310x_mutex);
-
/* Mask termios capabilities we don't support */
termios->c_cflag &= ~CMSPAR;
- termios->c_iflag &= ~IXANY;
/* Word size */
switch (termios->c_cflag & CSIZE) {
@@ -696,7 +835,7 @@ static void max310x_set_termios(struct uart_port *port,
lcr |= MAX310X_LCR_STOPLEN_BIT; /* 2 stops */
/* Update LCR register */
- regmap_write(s->regmap, MAX310X_LCR_REG, lcr);
+ max310x_port_write(port, MAX310X_LCR_REG, lcr);
/* Set read status mask */
port->read_status_mask = MAX310X_LSR_RXOVR_BIT;
@@ -717,8 +856,8 @@ static void max310x_set_termios(struct uart_port *port,
MAX310X_LSR_RXBRK_BIT;
/* Configure flow control */
- regmap_write(s->regmap, MAX310X_XON1_REG, termios->c_cc[VSTART]);
- regmap_write(s->regmap, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
+ max310x_port_write(port, MAX310X_XON1_REG, termios->c_cc[VSTART]);
+ max310x_port_write(port, MAX310X_XOFF1_REG, termios->c_cc[VSTOP]);
if (termios->c_cflag & CRTSCTS)
flow |= MAX310X_FLOWCTRL_AUTOCTS_BIT |
MAX310X_FLOWCTRL_AUTORTS_BIT;
@@ -728,7 +867,7 @@ static void max310x_set_termios(struct uart_port *port,
if (termios->c_iflag & IXOFF)
flow |= MAX310X_FLOWCTRL_SWFLOW1_BIT |
MAX310X_FLOWCTRL_SWFLOWEN_BIT;
- regmap_write(s->regmap, MAX310X_FLOWCTRL_REG, flow);
+ max310x_port_write(port, MAX310X_FLOWCTRL_REG, flow);
/* Get baud rate generator configuration */
baud = uart_get_baud_rate(port, termios, old,
@@ -736,36 +875,30 @@ static void max310x_set_termios(struct uart_port *port,
port->uartclk / 4);
/* Setup baudrate generator */
- max310x_set_baud(s, baud);
+ max310x_set_baud(port, baud);
/* Update timeout according to new baud rate */
uart_update_timeout(port, termios->c_cflag, baud);
-
- mutex_unlock(&s->max310x_mutex);
}
static int max310x_startup(struct uart_port *port)
{
unsigned int val, line = port->line;
- struct max310x_port *s = container_of(port, struct max310x_port, port);
-
- if (s->pdata->suspend)
- s->pdata->suspend(0);
+ struct max310x_port *s = dev_get_drvdata(port->dev);
- mutex_lock(&s->max310x_mutex);
+ s->devtype->power(port, 1);
/* Configure baud rate, 9600 as default */
- max310x_set_baud(s, 9600);
+ max310x_set_baud(port, 9600);
/* Configure LCR register, 8N1 mode by default */
- val = MAX310X_LCR_WORD_LEN_8;
- regmap_write(s->regmap, MAX310X_LCR_REG, val);
+ max310x_port_write(port, MAX310X_LCR_REG, MAX310X_LCR_WORD_LEN_8);
/* Configure MODE1 register */
- regmap_update_bits(s->regmap, MAX310X_MODE1_REG,
- MAX310X_MODE1_TRNSCVCTRL_BIT,
- (s->pdata->uart_flags[line] & MAX310X_AUTO_DIR_CTRL)
- ? MAX310X_MODE1_TRNSCVCTRL_BIT : 0);
+ max310x_port_update(port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_TRNSCVCTRL_BIT,
+ (s->pdata->uart_flags[line] & MAX310X_AUTO_DIR_CTRL)
+ ? MAX310X_MODE1_TRNSCVCTRL_BIT : 0);
/* Configure MODE2 register */
val = MAX310X_MODE2_RXEMPTINV_BIT;
@@ -776,63 +909,40 @@ static int max310x_startup(struct uart_port *port)
/* Reset FIFOs */
val |= MAX310X_MODE2_FIFORST_BIT;
- regmap_write(s->regmap, MAX310X_MODE2_REG, val);
-
- /* Configure FIFO trigger level register */
- /* RX FIFO trigger for 16 words, TX FIFO trigger for 64 words */
- val = MAX310X_FIFOTRIGLVL_RX(16) | MAX310X_FIFOTRIGLVL_TX(64);
- regmap_write(s->regmap, MAX310X_FIFOTRIGLVL_REG, val);
+ max310x_port_write(port, MAX310X_MODE2_REG, val);
+ max310x_port_update(port, MAX310X_MODE2_REG,
+ MAX310X_MODE2_FIFORST_BIT, 0);
/* Configure flow control levels */
/* Flow control halt level 96, resume level 48 */
- val = MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96);
- regmap_write(s->regmap, MAX310X_FLOWLVL_REG, val);
-
- /* Clear timeout register */
- regmap_write(s->regmap, MAX310X_RXTO_REG, 0);
+ max310x_port_write(port, MAX310X_FLOWLVL_REG,
+ MAX310X_FLOWLVL_RES(48) | MAX310X_FLOWLVL_HALT(96));
- /* Configure LSR interrupt enable register */
- /* Enable RX timeout interrupt */
- val = MAX310X_LSR_RXTO_BIT;
- regmap_write(s->regmap, MAX310X_LSR_IRQEN_REG, val);
+ /* Clear IRQ status register */
+ max310x_port_read(port, MAX310X_IRQSTS_REG);
- /* Clear FIFO reset */
- regmap_update_bits(s->regmap, MAX310X_MODE2_REG,
- MAX310X_MODE2_FIFORST_BIT, 0);
-
- /* Clear IRQ status register by reading it */
- regmap_read(s->regmap, MAX310X_IRQSTS_REG, &val);
-
- /* Configure interrupt enable register */
- /* Enable CTS change interrupt */
- val = MAX310X_IRQ_CTS_BIT;
- /* Enable RX, TX interrupts */
- val |= MAX310X_IRQ_RX | MAX310X_IRQ_TX;
- regmap_write(s->regmap, MAX310X_IRQEN_REG, val);
-
- mutex_unlock(&s->max310x_mutex);
+ /* Enable RX, TX, CTS change interrupts */
+ val = MAX310X_IRQ_RXEMPTY_BIT | MAX310X_IRQ_TXEMPTY_BIT;
+ max310x_port_write(port, MAX310X_IRQEN_REG, val | MAX310X_IRQ_CTS_BIT);
return 0;
}
static void max310x_shutdown(struct uart_port *port)
{
- struct max310x_port *s = container_of(port, struct max310x_port, port);
+ struct max310x_port *s = dev_get_drvdata(port->dev);
/* Disable all interrupts */
- mutex_lock(&s->max310x_mutex);
- regmap_write(s->regmap, MAX310X_IRQEN_REG, 0);
- mutex_unlock(&s->max310x_mutex);
+ max310x_port_write(port, MAX310X_IRQEN_REG, 0);
- if (s->pdata->suspend)
- s->pdata->suspend(1);
+ s->devtype->power(port, 0);
}
static const char *max310x_type(struct uart_port *port)
{
- struct max310x_port *s = container_of(port, struct max310x_port, port);
+ struct max310x_port *s = dev_get_drvdata(port->dev);
- return (port->type == PORT_MAX310X) ? s->name : NULL;
+ return (port->type == PORT_MAX310X) ? s->devtype->name : NULL;
}
static int max310x_request_port(struct uart_port *port)
@@ -841,134 +951,99 @@ static int max310x_request_port(struct uart_port *port)
return 0;
}
-static void max310x_release_port(struct uart_port *port)
-{
- /* Do nothing */
-}
-
static void max310x_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE)
port->type = PORT_MAX310X;
}
-static int max310x_verify_port(struct uart_port *port, struct serial_struct *ser)
+static int max310x_verify_port(struct uart_port *port, struct serial_struct *s)
{
- if ((ser->type == PORT_UNKNOWN) || (ser->type == PORT_MAX310X))
- return 0;
- if (ser->irq == port->irq)
- return 0;
+ if ((s->type != PORT_UNKNOWN) && (s->type != PORT_MAX310X))
+ return -EINVAL;
+ if (s->irq != port->irq)
+ return -EINVAL;
- return -EINVAL;
+ return 0;
}
-static struct uart_ops max310x_ops = {
+static void max310x_null_void(struct uart_port *port)
+{
+ /* Do nothing */
+}
+
+static const struct uart_ops max310x_ops = {
.tx_empty = max310x_tx_empty,
.set_mctrl = max310x_set_mctrl,
.get_mctrl = max310x_get_mctrl,
- .stop_tx = max310x_stop_tx,
+ .stop_tx = max310x_null_void,
.start_tx = max310x_start_tx,
- .stop_rx = max310x_stop_rx,
- .enable_ms = max310x_enable_ms,
+ .stop_rx = max310x_null_void,
+ .enable_ms = max310x_null_void,
.break_ctl = max310x_break_ctl,
.startup = max310x_startup,
.shutdown = max310x_shutdown,
.set_termios = max310x_set_termios,
.type = max310x_type,
.request_port = max310x_request_port,
- .release_port = max310x_release_port,
+ .release_port = max310x_null_void,
.config_port = max310x_config_port,
.verify_port = max310x_verify_port,
};
-#ifdef CONFIG_PM_SLEEP
-
-static int max310x_suspend(struct device *dev)
+static int __maybe_unused max310x_suspend(struct device *dev)
{
- int ret;
struct max310x_port *s = dev_get_drvdata(dev);
+ int i;
- dev_dbg(dev, "Suspend\n");
-
- ret = uart_suspend_port(&s->uart, &s->port);
-
- mutex_lock(&s->max310x_mutex);
-
- /* Enable sleep mode */
- regmap_update_bits(s->regmap, MAX310X_MODE1_REG,
- MAX310X_MODE1_FORCESLEEP_BIT,
- MAX310X_MODE1_FORCESLEEP_BIT);
-
- mutex_unlock(&s->max310x_mutex);
-
- if (s->pdata->suspend)
- s->pdata->suspend(1);
+ for (i = 0; i < s->uart.nr; i++) {
+ uart_suspend_port(&s->uart, &s->p[i].port);
+ s->devtype->power(&s->p[i].port, 0);
+ }
- return ret;
+ return 0;
}
-static int max310x_resume(struct device *dev)
+static int __maybe_unused max310x_resume(struct device *dev)
{
struct max310x_port *s = dev_get_drvdata(dev);
+ int i;
- dev_dbg(dev, "Resume\n");
-
- if (s->pdata->suspend)
- s->pdata->suspend(0);
-
- mutex_lock(&s->max310x_mutex);
-
- /* Disable sleep mode */
- regmap_update_bits(s->regmap, MAX310X_MODE1_REG,
- MAX310X_MODE1_FORCESLEEP_BIT,
- 0);
-
- max310x_wait_pll(s);
-
- mutex_unlock(&s->max310x_mutex);
+ for (i = 0; i < s->uart.nr; i++) {
+ s->devtype->power(&s->p[i].port, 1);
+ uart_resume_port(&s->uart, &s->p[i].port);
+ }
- return uart_resume_port(&s->uart, &s->port);
+ return 0;
}
-static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
-#define MAX310X_PM_OPS (&max310x_pm_ops)
-
-#else
-#define MAX310X_PM_OPS NULL
-#endif
-
#ifdef CONFIG_GPIOLIB
static int max310x_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- unsigned int val = 0;
+ unsigned int val;
struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct uart_port *port = &s->p[offset / 4].port;
- mutex_lock(&s->max310x_mutex);
- regmap_read(s->regmap, MAX310X_GPIODATA_REG, &val);
- mutex_unlock(&s->max310x_mutex);
+ val = max310x_port_read(port, MAX310X_GPIODATA_REG);
- return !!((val >> 4) & (1 << offset));
+ return !!((val >> 4) & (1 << (offset % 4)));
}
static void max310x_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct uart_port *port = &s->p[offset / 4].port;
- mutex_lock(&s->max310x_mutex);
- regmap_update_bits(s->regmap, MAX310X_GPIODATA_REG, 1 << offset, value ?
- 1 << offset : 0);
- mutex_unlock(&s->max310x_mutex);
+ max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
+ value ? 1 << (offset % 4) : 0);
}
static int max310x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct uart_port *port = &s->p[offset / 4].port;
- mutex_lock(&s->max310x_mutex);
-
- regmap_update_bits(s->regmap, MAX310X_GPIOCFG_REG, 1 << offset, 0);
-
- mutex_unlock(&s->max310x_mutex);
+ max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4), 0);
return 0;
}
@@ -977,74 +1052,42 @@ static int max310x_gpio_direction_output(struct gpio_chip *chip,
unsigned offset, int value)
{
struct max310x_port *s = container_of(chip, struct max310x_port, gpio);
+ struct uart_port *port = &s->p[offset / 4].port;
- mutex_lock(&s->max310x_mutex);
-
- regmap_update_bits(s->regmap, MAX310X_GPIOCFG_REG, 1 << offset,
- 1 << offset);
- regmap_update_bits(s->regmap, MAX310X_GPIODATA_REG, 1 << offset, value ?
- 1 << offset : 0);
-
- mutex_unlock(&s->max310x_mutex);
+ max310x_port_update(port, MAX310X_GPIODATA_REG, 1 << (offset % 4),
+ value ? 1 << (offset % 4) : 0);
+ max310x_port_update(port, MAX310X_GPIOCFG_REG, 1 << (offset % 4),
+ 1 << (offset % 4));
return 0;
}
#endif
-/* Generic platform data */
-static struct max310x_pdata generic_plat_data = {
- .driver_flags = MAX310X_EXT_CLK,
- .uart_flags[0] = MAX310X_ECHO_SUPRESS,
- .frequency = 26000000,
-};
-
-static int max310x_probe(struct spi_device *spi)
+static int max310x_probe(struct device *dev, int is_spi,
+ struct max310x_devtype *devtype, int irq)
{
struct max310x_port *s;
- struct device *dev = &spi->dev;
- int chiptype = spi_get_device_id(spi)->driver_data;
- struct max310x_pdata *pdata = dev->platform_data;
- unsigned int val = 0;
- int ret;
+ struct max310x_pdata *pdata = dev_get_platdata(dev);
+ int i, ret, uartclk;
/* Check for IRQ */
- if (spi->irq <= 0) {
+ if (irq <= 0) {
dev_err(dev, "No IRQ specified\n");
return -ENOTSUPP;
}
+ if (!pdata) {
+ dev_err(dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
/* Alloc port structure */
- s = devm_kzalloc(dev, sizeof(struct max310x_port), GFP_KERNEL);
+ s = devm_kzalloc(dev, sizeof(*s) +
+ sizeof(struct max310x_one) * devtype->nr, GFP_KERNEL);
if (!s) {
dev_err(dev, "Error allocating port structure\n");
return -ENOMEM;
}
- dev_set_drvdata(dev, s);
-
- if (!pdata) {
- dev_warn(dev, "No platform data supplied, using defaults\n");
- pdata = &generic_plat_data;
- }
- s->pdata = pdata;
-
- /* Individual chip settings */
- switch (chiptype) {
- case MAX310X_TYPE_MAX3107:
- s->name = "MAX3107";
- s->nr_gpio = 4;
- s->uart.nr = 1;
- s->regcfg.max_register = 0x1f;
- break;
- case MAX310X_TYPE_MAX3108:
- s->name = "MAX3108";
- s->nr_gpio = 4;
- s->uart.nr = 1;
- s->regcfg.max_register = 0x1e;
- break;
- default:
- dev_err(dev, "Unsupported chip type %i\n", chiptype);
- return -ENOTSUPP;
- }
/* Check input frequency */
if ((pdata->driver_flags & MAX310X_EXT_CLK) &&
@@ -1055,13 +1098,11 @@ static int max310x_probe(struct spi_device *spi)
((pdata->frequency < 1000000) || (pdata->frequency > 4000000)))
goto err_freq;
- mutex_init(&s->max310x_mutex);
+ s->pdata = pdata;
+ s->devtype = devtype;
+ dev_set_drvdata(dev, s);
- /* Setup SPI bus */
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 8;
- spi->max_speed_hz = 26000000;
- spi_setup(spi);
+ mutex_init(&s->mutex);
/* Setup regmap */
s->regcfg.reg_bits = 8;
@@ -1069,109 +1110,100 @@ static int max310x_probe(struct spi_device *spi)
s->regcfg.read_flag_mask = 0x00;
s->regcfg.write_flag_mask = 0x80;
s->regcfg.cache_type = REGCACHE_RBTREE;
- s->regcfg.writeable_reg = max3107_8_reg_writeable;
+ s->regcfg.writeable_reg = max310x_reg_writeable;
s->regcfg.volatile_reg = max310x_reg_volatile;
s->regcfg.precious_reg = max310x_reg_precious;
- s->regmap = devm_regmap_init_spi(spi, &s->regcfg);
+ s->regcfg.max_register = devtype->nr * 0x20 - 1;
+
+ if (IS_ENABLED(CONFIG_SPI_MASTER) && is_spi) {
+ struct spi_device *spi = to_spi_device(dev);
+
+ s->regmap = devm_regmap_init_spi(spi, &s->regcfg);
+ } else
+ return -ENOTSUPP;
+
if (IS_ERR(s->regmap)) {
- ret = PTR_ERR(s->regmap);
dev_err(dev, "Failed to initialize register map\n");
- goto err_out;
- }
-
- /* Reset chip & check SPI function */
- ret = regmap_write(s->regmap, MAX310X_MODE2_REG, MAX310X_MODE2_RST_BIT);
- if (ret) {
- dev_err(dev, "SPI transfer failed\n");
- goto err_out;
- }
- /* Clear chip reset */
- regmap_write(s->regmap, MAX310X_MODE2_REG, 0);
-
- switch (chiptype) {
- case MAX310X_TYPE_MAX3107:
- /* Check REV ID to ensure we are talking to what we expect */
- regmap_read(s->regmap, MAX3107_REVID_REG, &val);
- if (((val & MAX3107_REV_MASK) != MAX3107_REV_ID)) {
- dev_err(dev, "%s ID 0x%02x does not match\n",
- s->name, val);
- ret = -ENODEV;
- goto err_out;
- }
- break;
- case MAX310X_TYPE_MAX3108:
- /* MAX3108 have not REV ID register, we just check default value
- * from clocksource register to make sure everything works.
- */
- regmap_read(s->regmap, MAX310X_CLKSRC_REG, &val);
- if (val != (MAX310X_CLKSRC_EXTCLK_BIT |
- MAX310X_CLKSRC_PLLBYP_BIT)) {
- dev_err(dev, "%s not present\n", s->name);
- ret = -ENODEV;
- goto err_out;
- }
- break;
+ return PTR_ERR(s->regmap);
}
/* Board specific configure */
- if (pdata->init)
- pdata->init();
- if (pdata->suspend)
- pdata->suspend(0);
-
- /* Calculate referecne clock */
- s->uartclk = max310x_set_ref_clk(s);
-
- /* Disable all interrupts */
- regmap_write(s->regmap, MAX310X_IRQEN_REG, 0);
-
- /* Setup MODE1 register */
- val = MAX310X_MODE1_IRQSEL_BIT; /* Enable IRQ pin */
- if (pdata->driver_flags & MAX310X_AUTOSLEEP)
- val = MAX310X_MODE1_AUTOSLEEP_BIT;
- regmap_write(s->regmap, MAX310X_MODE1_REG, val);
-
- /* Setup interrupt */
- ret = devm_request_threaded_irq(dev, spi->irq, NULL, max310x_ist,
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- dev_name(dev), s);
- if (ret) {
- dev_err(dev, "Unable to reguest IRQ %i\n", spi->irq);
- goto err_out;
+ if (s->pdata->init)
+ s->pdata->init();
+
+ /* Check device to ensure we are talking to what we expect */
+ ret = devtype->detect(dev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < devtype->nr; i++) {
+ unsigned int offs = i << 5;
+
+ /* Reset port */
+ regmap_write(s->regmap, MAX310X_MODE2_REG + offs,
+ MAX310X_MODE2_RST_BIT);
+ /* Clear port reset */
+ regmap_write(s->regmap, MAX310X_MODE2_REG + offs, 0);
+
+ /* Wait for port startup */
+ do {
+ regmap_read(s->regmap,
+ MAX310X_BRGDIVLSB_REG + offs, &ret);
+ } while (ret != 0x01);
+
+ regmap_update_bits(s->regmap, MAX310X_MODE1_REG + offs,
+ MAX310X_MODE1_AUTOSLEEP_BIT,
+ MAX310X_MODE1_AUTOSLEEP_BIT);
}
+ uartclk = max310x_set_ref_clk(s);
+ dev_dbg(dev, "Reference clock set to %i Hz\n", uartclk);
+
/* Register UART driver */
s->uart.owner = THIS_MODULE;
- s->uart.driver_name = dev_name(dev);
s->uart.dev_name = "ttyMAX";
s->uart.major = MAX310X_MAJOR;
s->uart.minor = MAX310X_MINOR;
+ s->uart.nr = devtype->nr;
ret = uart_register_driver(&s->uart);
if (ret) {
dev_err(dev, "Registering UART driver failed\n");
- goto err_out;
+ return ret;
}
- /* Initialize workqueue for start TX */
- s->wq = create_freezable_workqueue(dev_name(dev));
- INIT_WORK(&s->tx_work, max310x_wq_proc);
-
- /* Initialize UART port data */
- s->port.line = 0;
- s->port.dev = dev;
- s->port.irq = spi->irq;
- s->port.type = PORT_MAX310X;
- s->port.fifosize = MAX310X_FIFO_SIZE;
- s->port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
- s->port.iotype = UPIO_PORT;
- s->port.membase = (void __iomem *)0xffffffff; /* Bogus value */
- s->port.uartclk = s->uartclk;
- s->port.ops = &max310x_ops;
- uart_add_one_port(&s->uart, &s->port);
+ for (i = 0; i < devtype->nr; i++) {
+ /* Initialize port data */
+ s->p[i].port.line = i;
+ s->p[i].port.dev = dev;
+ s->p[i].port.irq = irq;
+ s->p[i].port.type = PORT_MAX310X;
+ s->p[i].port.fifosize = MAX310X_FIFO_SIZE;
+ s->p[i].port.flags = UPF_SKIP_TEST | UPF_FIXED_TYPE |
+ UPF_LOW_LATENCY;
+ s->p[i].port.iotype = UPIO_PORT;
+ s->p[i].port.iobase = i * 0x20;
+ s->p[i].port.membase = (void __iomem *)~0;
+ s->p[i].port.uartclk = uartclk;
+ s->p[i].port.ops = &max310x_ops;
+ /* Disable all interrupts */
+ max310x_port_write(&s->p[i].port, MAX310X_IRQEN_REG, 0);
+ /* Clear IRQ status register */
+ max310x_port_read(&s->p[i].port, MAX310X_IRQSTS_REG);
+ /* Enable IRQ pin */
+ max310x_port_update(&s->p[i].port, MAX310X_MODE1_REG,
+ MAX310X_MODE1_IRQSEL_BIT,
+ MAX310X_MODE1_IRQSEL_BIT);
+ /* Initialize queue for start TX */
+ INIT_WORK(&s->p[i].tx_work, max310x_wq_proc);
+ /* Register port */
+ uart_add_one_port(&s->uart, &s->p[i].port);
+ /* Go to suspend mode */
+ devtype->power(&s->p[i].port, 0);
+ }
#ifdef CONFIG_GPIOLIB
/* Setup GPIO cotroller */
- if (pdata->gpio_base) {
+ if (s->pdata->gpio_base) {
s->gpio.owner = THIS_MODULE;
s->gpio.dev = dev;
s->gpio.label = dev_name(dev);
@@ -1179,86 +1211,107 @@ static int max310x_probe(struct spi_device *spi)
s->gpio.get = max310x_gpio_get;
s->gpio.direction_output= max310x_gpio_direction_output;
s->gpio.set = max310x_gpio_set;
- s->gpio.base = pdata->gpio_base;
- s->gpio.ngpio = s->nr_gpio;
+ s->gpio.base = s->pdata->gpio_base;
+ s->gpio.ngpio = devtype->nr * 4;
s->gpio.can_sleep = 1;
- if (gpiochip_add(&s->gpio)) {
- /* Indicate that we should not call gpiochip_remove */
- s->gpio.base = 0;
- }
+ if (!gpiochip_add(&s->gpio))
+ s->gpio_used = 1;
} else
dev_info(dev, "GPIO support not enabled\n");
#endif
- /* Go to suspend mode */
- if (pdata->suspend)
- pdata->suspend(1);
+ /* Setup interrupt */
+ ret = devm_request_threaded_irq(dev, irq, NULL, max310x_ist,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(dev), s);
+ if (ret) {
+ dev_err(dev, "Unable to reguest IRQ %i\n", irq);
+#ifdef CONFIG_GPIOLIB
+ if (s->gpio_used)
+ WARN_ON(gpiochip_remove(&s->gpio));
+#endif
+ }
- return 0;
+ return ret;
err_freq:
dev_err(dev, "Frequency parameter incorrect\n");
- ret = -EINVAL;
-
-err_out:
- dev_set_drvdata(dev, NULL);
-
- return ret;
+ return -EINVAL;
}
-static int max310x_remove(struct spi_device *spi)
+static int max310x_remove(struct device *dev)
{
- struct device *dev = &spi->dev;
struct max310x_port *s = dev_get_drvdata(dev);
- int ret = 0;
-
- dev_dbg(dev, "Removing port\n");
-
- devm_free_irq(dev, s->port.irq, s);
-
- destroy_workqueue(s->wq);
+ int i, ret = 0;
- uart_remove_one_port(&s->uart, &s->port);
+ for (i = 0; i < s->uart.nr; i++) {
+ cancel_work_sync(&s->p[i].tx_work);
+ uart_remove_one_port(&s->uart, &s->p[i].port);
+ s->devtype->power(&s->p[i].port, 0);
+ }
uart_unregister_driver(&s->uart);
#ifdef CONFIG_GPIOLIB
- if (s->pdata->gpio_base) {
+ if (s->gpio_used)
ret = gpiochip_remove(&s->gpio);
- if (ret)
- dev_err(dev, "Failed to remove gpio chip: %d\n", ret);
- }
#endif
- dev_set_drvdata(dev, NULL);
-
- if (s->pdata->suspend)
- s->pdata->suspend(1);
if (s->pdata->exit)
s->pdata->exit();
return ret;
}
+#ifdef CONFIG_SPI_MASTER
+static int max310x_spi_probe(struct spi_device *spi)
+{
+ struct max310x_devtype *devtype =
+ (struct max310x_devtype *)spi_get_device_id(spi)->driver_data;
+ int ret;
+
+ /* Setup SPI bus */
+ spi->bits_per_word = 8;
+ spi->mode = spi->mode ? : SPI_MODE_0;
+ spi->max_speed_hz = spi->max_speed_hz ? : 26000000;
+ ret = spi_setup(spi);
+ if (ret) {
+ dev_err(&spi->dev, "SPI setup failed\n");
+ return ret;
+ }
+
+ return max310x_probe(&spi->dev, 1, devtype, spi->irq);
+}
+
+static int max310x_spi_remove(struct spi_device *spi)
+{
+ return max310x_remove(&spi->dev);
+}
+
+static SIMPLE_DEV_PM_OPS(max310x_pm_ops, max310x_suspend, max310x_resume);
+
static const struct spi_device_id max310x_id_table[] = {
- { "max3107", MAX310X_TYPE_MAX3107 },
- { "max3108", MAX310X_TYPE_MAX3108 },
+ { "max3107", (kernel_ulong_t)&max3107_devtype, },
+ { "max3108", (kernel_ulong_t)&max3108_devtype, },
+ { "max3109", (kernel_ulong_t)&max3109_devtype, },
+ { "max14830", (kernel_ulong_t)&max14830_devtype, },
{ }
};
MODULE_DEVICE_TABLE(spi, max310x_id_table);
-static struct spi_driver max310x_driver = {
+static struct spi_driver max310x_uart_driver = {
.driver = {
- .name = "max310x",
+ .name = MAX310X_NAME,
.owner = THIS_MODULE,
- .pm = MAX310X_PM_OPS,
+ .pm = &max310x_pm_ops,
},
- .probe = max310x_probe,
- .remove = max310x_remove,
+ .probe = max310x_spi_probe,
+ .remove = max310x_spi_remove,
.id_table = max310x_id_table,
};
-module_spi_driver(max310x_driver);
+module_spi_driver(max310x_uart_driver);
+#endif
-MODULE_LICENSE("GPL v2");
+MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("MAX310X serial driver");
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index 65be0c00c4b..0edfaf8cd26 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -24,6 +24,7 @@
#include <linux/serial_core.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/platform_device.h>
#include <asm/coldfire.h>
#include <asm/mcfsim.h>
#include <asm/mcfuart.h>
@@ -324,7 +325,9 @@ static void mcf_rx_chars(struct mcf_uart *pp)
uart_insert_char(port, status, MCFUART_USR_RXOVERRUN, ch, flag);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
}
/****************************************************************************/
@@ -644,7 +647,7 @@ static struct uart_driver mcf_driver = {
static int mcf_probe(struct platform_device *pdev)
{
- struct mcf_platform_uart *platp = pdev->dev.platform_data;
+ struct mcf_platform_uart *platp = dev_get_platdata(&pdev->dev);
struct uart_port *port;
int i;
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index 4a82267af83..d3db042f649 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -386,7 +386,7 @@ static void serial_hsu_stop_tx(struct uart_port *port)
/* This is always called in spinlock protected mode, so
* modify timeout timer is safe here */
-void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
+void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts, unsigned long *flags)
{
struct hsu_dma_buffer *dbuf = &up->rxbuf;
struct hsu_dma_chan *chan = up->rxc;
@@ -438,7 +438,9 @@ void hsu_dma_rx(struct uart_hsu_port *up, u32 int_sts)
| (0x1 << 16)
| (0x1 << 24) /* timeout bit, see HSU Errata 1 */
);
+ spin_unlock_irqrestore(&up->port.lock, *flags);
tty_flip_buffer_push(tport);
+ spin_lock_irqsave(&up->port.lock, *flags);
chan_writel(chan, HSU_CH_CR, 0x3);
@@ -459,7 +461,8 @@ static void serial_hsu_stop_rx(struct uart_port *port)
}
}
-static inline void receive_chars(struct uart_hsu_port *up, int *status)
+static inline void receive_chars(struct uart_hsu_port *up, int *status,
+ unsigned long *flags)
{
unsigned int ch, flag;
unsigned int max_count = 256;
@@ -519,7 +522,10 @@ static inline void receive_chars(struct uart_hsu_port *up, int *status)
ignore_char:
*status = serial_in(up, UART_LSR);
} while ((*status & UART_LSR_DR) && max_count--);
+
+ spin_unlock_irqrestore(&up->port.lock, *flags);
tty_flip_buffer_push(&up->port.state->port);
+ spin_lock_irqsave(&up->port.lock, *flags);
}
static void transmit_chars(struct uart_hsu_port *up)
@@ -613,7 +619,7 @@ static irqreturn_t port_irq(int irq, void *dev_id)
lsr = serial_in(up, UART_LSR);
if (lsr & UART_LSR_DR)
- receive_chars(up, &lsr);
+ receive_chars(up, &lsr, &flags);
check_modem_status(up);
/* lsr will be renewed during the receive_chars */
@@ -643,7 +649,7 @@ static inline void dma_chan_irq(struct hsu_dma_chan *chan)
/* Rx channel */
if (chan->dirt == DMA_FROM_DEVICE)
- hsu_dma_rx(up, int_sts);
+ hsu_dma_rx(up, int_sts, &flags);
/* Tx channel */
if (chan->dirt == DMA_TO_DEVICE) {
diff --git a/drivers/tty/serial/mpsc.c b/drivers/tty/serial/mpsc.c
index bc24f493167..8d702677acc 100644
--- a/drivers/tty/serial/mpsc.c
+++ b/drivers/tty/serial/mpsc.c
@@ -934,7 +934,7 @@ static int serial_polled;
******************************************************************************
*/
-static int mpsc_rx_intr(struct mpsc_port_info *pi)
+static int mpsc_rx_intr(struct mpsc_port_info *pi, unsigned long *flags)
{
struct mpsc_rx_desc *rxre;
struct tty_port *port = &pi->port.state->port;
@@ -969,8 +969,11 @@ static int mpsc_rx_intr(struct mpsc_port_info *pi)
#endif
/* Following use of tty struct directly is deprecated */
if (tty_buffer_request_room(port, bytes_in) < bytes_in) {
- if (port->low_latency)
+ if (port->low_latency) {
+ spin_unlock_irqrestore(&pi->port.lock, *flags);
tty_flip_buffer_push(port);
+ spin_lock_irqsave(&pi->port.lock, *flags);
+ }
/*
* If this failed then we will throw away the bytes
* but must do so to clear interrupts.
@@ -1080,7 +1083,9 @@ next_frame:
if ((readl(pi->sdma_base + SDMA_SDCM) & SDMA_SDCM_ERD) == 0)
mpsc_start_rx(pi);
+ spin_unlock_irqrestore(&pi->port.lock, *flags);
tty_flip_buffer_push(port);
+ spin_lock_irqsave(&pi->port.lock, *flags);
return rc;
}
@@ -1222,7 +1227,7 @@ static irqreturn_t mpsc_sdma_intr(int irq, void *dev_id)
spin_lock_irqsave(&pi->port.lock, iflags);
mpsc_sdma_intr_ack(pi);
- if (mpsc_rx_intr(pi))
+ if (mpsc_rx_intr(pi, &iflags))
rc = IRQ_HANDLED;
if (mpsc_tx_intr(pi))
rc = IRQ_HANDLED;
@@ -1884,7 +1889,7 @@ static int mpsc_shared_drv_probe(struct platform_device *dev)
if (dev->id == 0) {
if (!(rc = mpsc_shared_map_regs(dev))) {
pdata = (struct mpsc_shared_pdata *)
- dev->dev.platform_data;
+ dev_get_platdata(&dev->dev);
mpsc_shared_regs.MPSC_MRR_m = pdata->mrr_val;
mpsc_shared_regs.MPSC_RCRR_m= pdata->rcrr_val;
@@ -2025,7 +2030,7 @@ static void mpsc_drv_get_platform_data(struct mpsc_port_info *pi,
{
struct mpsc_pdata *pdata;
- pdata = (struct mpsc_pdata *)pd->dev.platform_data;
+ pdata = (struct mpsc_pdata *)dev_get_platdata(&pd->dev);
pi->port.uartclk = pdata->brg_clk_freq;
pi->port.iotype = UPIO_MEM;
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 9b6ef20420c..a67e7081f00 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -713,7 +713,7 @@ static void serial_m3110_enable_ms(struct uart_port *port)
{
}
-struct uart_ops serial_m3110_ops = {
+static struct uart_ops serial_m3110_ops = {
.tx_empty = serial_m3110_tx_empty,
.set_mctrl = serial_m3110_set_mctrl,
.get_mctrl = serial_m3110_get_mctrl,
@@ -844,7 +844,7 @@ static int serial_m3110_probe(struct spi_device *spi)
pmax = max;
/* Give membase a psudo value to pass serial_core's check */
- max->port.membase = (void *)0xff110000;
+ max->port.membase = (unsigned char __iomem *)0xff110000;
uart_add_one_port(&serial_m3110_reg, &max->port);
return 0;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 2c6cfb3cf03..b5d779cd3c2 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -45,16 +45,19 @@ struct msm_port {
struct clk *clk;
struct clk *pclk;
unsigned int imr;
- unsigned int *gsbi_base;
+ void __iomem *gsbi_base;
int is_uartdm;
unsigned int old_snap_state;
};
-static inline void wait_for_xmitr(struct uart_port *port, int bits)
+static inline void wait_for_xmitr(struct uart_port *port)
{
- if (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY))
- while ((msm_read(port, UART_ISR) & bits) != bits)
- cpu_relax();
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
+ if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
+ break;
+ udelay(1);
+ }
+ msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
}
static void msm_stop_tx(struct uart_port *port)
@@ -137,7 +140,10 @@ static void handle_rx_dm(struct uart_port *port, unsigned int misr)
count -= 4;
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
+
if (misr & (UART_IMR_RXSTALE))
msm_write(port, UART_CR_CMD_RESET_STALE_INT, UART_CR);
msm_write(port, 0xFFFFFF, UARTDM_DMRX);
@@ -189,52 +195,69 @@ static void handle_rx(struct uart_port *port)
tty_insert_flip_char(tport, c, flag);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
}
-static void reset_dm_count(struct uart_port *port)
+static void reset_dm_count(struct uart_port *port, int count)
{
- wait_for_xmitr(port, UART_ISR_TX_READY);
- msm_write(port, 1, UARTDM_NCF_TX);
+ wait_for_xmitr(port);
+ msm_write(port, count, UARTDM_NCF_TX);
+ msm_read(port, UARTDM_NCF_TX);
}
static void handle_tx(struct uart_port *port)
{
struct circ_buf *xmit = &port->state->xmit;
struct msm_port *msm_port = UART_TO_MSM(port);
- int sent_tx;
+ unsigned int tx_count, num_chars;
+ unsigned int tf_pointer = 0;
+
+ tx_count = uart_circ_chars_pending(xmit);
+ tx_count = min3(tx_count, (unsigned int)UART_XMIT_SIZE - xmit->tail,
+ port->fifosize);
if (port->x_char) {
if (msm_port->is_uartdm)
- reset_dm_count(port);
+ reset_dm_count(port, tx_count + 1);
msm_write(port, port->x_char,
msm_port->is_uartdm ? UARTDM_TF : UART_TF);
port->icount.tx++;
port->x_char = 0;
+ } else if (tx_count && msm_port->is_uartdm) {
+ reset_dm_count(port, tx_count);
}
- if (msm_port->is_uartdm)
- reset_dm_count(port);
+ while (tf_pointer < tx_count) {
+ int i;
+ char buf[4] = { 0 };
+ unsigned int *bf = (unsigned int *)&buf;
- while (msm_read(port, UART_SR) & UART_SR_TX_READY) {
- if (uart_circ_empty(xmit)) {
- /* disable tx interrupts */
- msm_port->imr &= ~UART_IMR_TXLEV;
- msm_write(port, msm_port->imr, UART_IMR);
+ if (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
break;
- }
- msm_write(port, xmit->buf[xmit->tail],
- msm_port->is_uartdm ? UARTDM_TF : UART_TF);
if (msm_port->is_uartdm)
- reset_dm_count(port);
+ num_chars = min(tx_count - tf_pointer,
+ (unsigned int)sizeof(buf));
+ else
+ num_chars = 1;
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- port->icount.tx++;
- sent_tx = 1;
+ for (i = 0; i < num_chars; i++) {
+ buf[i] = xmit->buf[xmit->tail + i];
+ port->icount.tx++;
+ }
+
+ msm_write(port, *bf, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ xmit->tail = (xmit->tail + num_chars) & (UART_XMIT_SIZE - 1);
+ tf_pointer += num_chars;
}
+ /* disable tx interrupts if nothing more to send */
+ if (uart_circ_empty(xmit))
+ msm_stop_tx(port);
+
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
uart_write_wakeup(port);
}
@@ -295,7 +318,7 @@ static void msm_reset(struct uart_port *port)
msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
}
-void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
+static void msm_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
unsigned int mr;
mr = msm_read(port, UART_MR1);
@@ -318,70 +341,60 @@ static void msm_break_ctl(struct uart_port *port, int break_ctl)
msm_write(port, UART_CR_CMD_STOP_BREAK, UART_CR);
}
+struct msm_baud_map {
+ u16 divisor;
+ u8 code;
+ u8 rxstale;
+};
+
+static const struct msm_baud_map *
+msm_find_best_baud(struct uart_port *port, unsigned int baud)
+{
+ unsigned int i, divisor;
+ const struct msm_baud_map *entry;
+ static const struct msm_baud_map table[] = {
+ { 1536, 0x00, 1 },
+ { 768, 0x11, 1 },
+ { 384, 0x22, 1 },
+ { 192, 0x33, 1 },
+ { 96, 0x44, 1 },
+ { 48, 0x55, 1 },
+ { 32, 0x66, 1 },
+ { 24, 0x77, 1 },
+ { 16, 0x88, 1 },
+ { 12, 0x99, 6 },
+ { 8, 0xaa, 6 },
+ { 6, 0xbb, 6 },
+ { 4, 0xcc, 6 },
+ { 3, 0xdd, 8 },
+ { 2, 0xee, 16 },
+ { 1, 0xff, 31 },
+ };
+
+ divisor = uart_get_divisor(port, baud);
+
+ for (i = 0, entry = table; i < ARRAY_SIZE(table); i++, entry++)
+ if (entry->divisor <= divisor)
+ break;
+
+ return entry; /* Default to smallest divider */
+}
+
static int msm_set_baud_rate(struct uart_port *port, unsigned int baud)
{
- unsigned int baud_code, rxstale, watermark;
+ unsigned int rxstale, watermark;
struct msm_port *msm_port = UART_TO_MSM(port);
+ const struct msm_baud_map *entry;
- switch (baud) {
- case 300:
- baud_code = UART_CSR_300;
- rxstale = 1;
- break;
- case 600:
- baud_code = UART_CSR_600;
- rxstale = 1;
- break;
- case 1200:
- baud_code = UART_CSR_1200;
- rxstale = 1;
- break;
- case 2400:
- baud_code = UART_CSR_2400;
- rxstale = 1;
- break;
- case 4800:
- baud_code = UART_CSR_4800;
- rxstale = 1;
- break;
- case 9600:
- baud_code = UART_CSR_9600;
- rxstale = 2;
- break;
- case 14400:
- baud_code = UART_CSR_14400;
- rxstale = 3;
- break;
- case 19200:
- baud_code = UART_CSR_19200;
- rxstale = 4;
- break;
- case 28800:
- baud_code = UART_CSR_28800;
- rxstale = 6;
- break;
- case 38400:
- baud_code = UART_CSR_38400;
- rxstale = 8;
- break;
- case 57600:
- baud_code = UART_CSR_57600;
- rxstale = 16;
- break;
- case 115200:
- default:
- baud_code = UART_CSR_115200;
- baud = 115200;
- rxstale = 31;
- break;
- }
+ entry = msm_find_best_baud(port, baud);
if (msm_port->is_uartdm)
msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
- msm_write(port, baud_code, UART_CSR);
+ msm_write(port, entry->code, UART_CSR);
/* RX stale watermark */
+ rxstale = entry->rxstale;
watermark = UART_IPR_STALE_LSB & rxstale;
watermark |= UART_IPR_RXSTALE_LAST;
watermark |= UART_IPR_STALE_TIMEOUT_MSB & (rxstale << 2);
@@ -409,8 +422,7 @@ static void msm_init_clock(struct uart_port *port)
struct msm_port *msm_port = UART_TO_MSM(port);
clk_prepare_enable(msm_port->clk);
- if (!IS_ERR(msm_port->pclk))
- clk_prepare_enable(msm_port->pclk);
+ clk_prepare_enable(msm_port->pclk);
msm_serial_set_mnd_regs(port);
}
@@ -589,12 +601,10 @@ static void msm_release_port(struct uart_port *port)
port->membase = NULL;
if (msm_port->gsbi_base) {
- iowrite32(GSBI_PROTOCOL_IDLE, msm_port->gsbi_base +
- GSBI_CONTROL);
-
- gsbi_resource = platform_get_resource(pdev,
- IORESOURCE_MEM, 1);
+ writel_relaxed(GSBI_PROTOCOL_IDLE,
+ msm_port->gsbi_base + GSBI_CONTROL);
+ gsbi_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (unlikely(!gsbi_resource))
return;
@@ -637,7 +647,7 @@ static int msm_request_port(struct uart_port *port)
if (!request_mem_region(gsbi_resource->start, size,
"msm_serial")) {
ret = -EBUSY;
- goto fail_release_port;
+ goto fail_release_port_membase;
}
msm_port->gsbi_base = ioremap(gsbi_resource->start, size);
@@ -651,6 +661,8 @@ static int msm_request_port(struct uart_port *port)
fail_release_gsbi:
release_mem_region(gsbi_resource->start, size);
+fail_release_port_membase:
+ iounmap(port->membase);
fail_release_port:
release_mem_region(port->mapbase, size);
return ret;
@@ -666,10 +678,9 @@ static void msm_config_port(struct uart_port *port, int flags)
if (ret)
return;
}
-
- if (msm_port->is_uartdm)
- iowrite32(GSBI_PROTOCOL_UART, msm_port->gsbi_base +
- GSBI_CONTROL);
+ if (msm_port->gsbi_base)
+ writel_relaxed(GSBI_PROTOCOL_UART,
+ msm_port->gsbi_base + GSBI_CONTROL);
}
static int msm_verify_port(struct uart_port *port, struct serial_struct *ser)
@@ -689,13 +700,11 @@ static void msm_power(struct uart_port *port, unsigned int state,
switch (state) {
case 0:
clk_prepare_enable(msm_port->clk);
- if (!IS_ERR(msm_port->pclk))
- clk_prepare_enable(msm_port->pclk);
+ clk_prepare_enable(msm_port->pclk);
break;
case 3:
clk_disable_unprepare(msm_port->clk);
- if (!IS_ERR(msm_port->pclk))
- clk_disable_unprepare(msm_port->pclk);
+ clk_disable_unprepare(msm_port->pclk);
break;
default:
printk(KERN_ERR "msm_serial: Unknown PM state %d\n", state);
@@ -760,32 +769,63 @@ static inline struct uart_port *get_port_from_line(unsigned int line)
}
#ifdef CONFIG_SERIAL_MSM_CONSOLE
-
-static void msm_console_putchar(struct uart_port *port, int c)
-{
- struct msm_port *msm_port = UART_TO_MSM(port);
-
- if (msm_port->is_uartdm)
- reset_dm_count(port);
-
- while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
- ;
- msm_write(port, c, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
-}
-
static void msm_console_write(struct console *co, const char *s,
unsigned int count)
{
+ int i;
struct uart_port *port;
struct msm_port *msm_port;
+ int num_newlines = 0;
+ bool replaced = false;
BUG_ON(co->index < 0 || co->index >= UART_NR);
port = get_port_from_line(co->index);
msm_port = UART_TO_MSM(port);
+ /* Account for newlines that will get a carriage return added */
+ for (i = 0; i < count; i++)
+ if (s[i] == '\n')
+ num_newlines++;
+ count += num_newlines;
+
spin_lock(&port->lock);
- uart_console_write(port, s, count, msm_console_putchar);
+ if (msm_port->is_uartdm)
+ reset_dm_count(port, count);
+
+ i = 0;
+ while (i < count) {
+ int j;
+ unsigned int num_chars;
+ char buf[4] = { 0 };
+ unsigned int *bf = (unsigned int *)&buf;
+
+ if (msm_port->is_uartdm)
+ num_chars = min(count - i, (unsigned int)sizeof(buf));
+ else
+ num_chars = 1;
+
+ for (j = 0; j < num_chars; j++) {
+ char c = *s;
+
+ if (c == '\n' && !replaced) {
+ buf[j] = '\r';
+ j++;
+ replaced = true;
+ }
+ if (j < num_chars) {
+ buf[j] = c;
+ s++;
+ replaced = false;
+ }
+ }
+
+ while (!(msm_read(port, UART_SR) & UART_SR_TX_READY))
+ cpu_relax();
+
+ msm_write(port, *bf, msm_port->is_uartdm ? UARTDM_TF : UART_TF);
+ i += num_chars;
+ }
spin_unlock(&port->lock);
}
@@ -859,6 +899,11 @@ static struct uart_driver msm_uart_driver = {
static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
+static const struct of_device_id msm_uartdm_table[] = {
+ { .compatible = "qcom,msm-uartdm" },
+ { }
+};
+
static int __init msm_serial_probe(struct platform_device *pdev)
{
struct msm_port *msm_port;
@@ -878,23 +923,17 @@ static int __init msm_serial_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
msm_port = UART_TO_MSM(port);
- if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
+ if (of_match_device(msm_uartdm_table, &pdev->dev))
msm_port->is_uartdm = 1;
else
msm_port->is_uartdm = 0;
- if (msm_port->is_uartdm) {
- msm_port->clk = devm_clk_get(&pdev->dev, "gsbi_uart_clk");
- msm_port->pclk = devm_clk_get(&pdev->dev, "gsbi_pclk");
- } else {
- msm_port->clk = devm_clk_get(&pdev->dev, "uart_clk");
- msm_port->pclk = ERR_PTR(-ENOENT);
- }
-
+ msm_port->clk = devm_clk_get(&pdev->dev, "core");
if (IS_ERR(msm_port->clk))
return PTR_ERR(msm_port->clk);
if (msm_port->is_uartdm) {
+ msm_port->pclk = devm_clk_get(&pdev->dev, "iface");
if (IS_ERR(msm_port->pclk))
return PTR_ERR(msm_port->pclk);
@@ -931,6 +970,7 @@ static int msm_serial_remove(struct platform_device *pdev)
static struct of_device_id msm_match_table[] = {
{ .compatible = "qcom,msm-uart" },
+ { .compatible = "qcom,msm-uartdm" },
{}
};
diff --git a/drivers/tty/serial/msm_serial.h b/drivers/tty/serial/msm_serial.h
index e4acef5de77..469fda50ac6 100644
--- a/drivers/tty/serial/msm_serial.h
+++ b/drivers/tty/serial/msm_serial.h
@@ -38,19 +38,7 @@
#define UART_MR2_PARITY_MODE_SPACE 0x3
#define UART_MR2_PARITY_MODE 0x3
-#define UART_CSR 0x0008
-#define UART_CSR_115200 0xFF
-#define UART_CSR_57600 0xEE
-#define UART_CSR_38400 0xDD
-#define UART_CSR_28800 0xCC
-#define UART_CSR_19200 0xBB
-#define UART_CSR_14400 0xAA
-#define UART_CSR_9600 0x99
-#define UART_CSR_4800 0x77
-#define UART_CSR_2400 0x55
-#define UART_CSR_1200 0x44
-#define UART_CSR_600 0x33
-#define UART_CSR_300 0x22
+#define UART_CSR 0x0008
#define UART_TF 0x000C
#define UARTDM_TF 0x0070
@@ -71,6 +59,7 @@
#define UART_CR_CMD_RESET_RFR (14 << 4)
#define UART_CR_CMD_PROTECTION_EN (16 << 4)
#define UART_CR_CMD_STALE_EVENT_ENABLE (80 << 4)
+#define UART_CR_CMD_RESET_TX_READY (3 << 8)
#define UART_CR_TX_DISABLE (1 << 3)
#define UART_CR_TX_ENABLE (1 << 2)
#define UART_CR_RX_DISABLE (1 << 1)
@@ -151,6 +140,7 @@ static inline void msm_serial_set_mnd_regs_tcxo(struct uart_port *port)
msm_write(port, 0xF1, UART_NREG);
msm_write(port, 0x0F, UART_DREG);
msm_write(port, 0x1A, UART_MNDREG);
+ port->uartclk = 1843200;
}
/*
@@ -162,6 +152,7 @@ static inline void msm_serial_set_mnd_regs_tcxoby4(struct uart_port *port)
msm_write(port, 0xF6, UART_NREG);
msm_write(port, 0x0F, UART_DREG);
msm_write(port, 0x0A, UART_MNDREG);
+ port->uartclk = 1843200;
}
static inline
@@ -169,7 +160,7 @@ void msm_serial_set_mnd_regs_from_uartclk(struct uart_port *port)
{
if (port->uartclk == 19200000)
msm_serial_set_mnd_regs_tcxo(port);
- else
+ else if (port->uartclk == 4800000)
msm_serial_set_mnd_regs_tcxoby4(port);
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 4ca2f64861e..48e94961a9e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1618,7 +1618,7 @@ static int msm_hs_probe(struct platform_device *pdev)
struct msm_hs_port *msm_uport;
struct resource *resource;
const struct msm_serial_hs_platform_data *pdata =
- pdev->dev.platform_data;
+ dev_get_platdata(&pdev->dev);
if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 4f5f161896a..10e9d70b5c4 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -32,7 +32,6 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/io.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/of_device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
@@ -134,10 +133,10 @@ enum mxs_auart_type {
struct mxs_auart_port {
struct uart_port port;
-#define MXS_AUART_DMA_CONFIG 0x1
#define MXS_AUART_DMA_ENABLED 0x2
#define MXS_AUART_DMA_TX_SYNC 2 /* bit 2 */
#define MXS_AUART_DMA_RX_READY 3 /* bit 3 */
+#define MXS_AUART_RTSCTS 4 /* bit 4 */
unsigned long flags;
unsigned int ctrl;
enum mxs_auart_type devtype;
@@ -640,7 +639,8 @@ static void mxs_auart_settermios(struct uart_port *u,
* we can only implement the DMA support for auart
* in mx28.
*/
- if (is_imx28_auart(s) && (s->flags & MXS_AUART_DMA_CONFIG)) {
+ if (is_imx28_auart(s)
+ && test_bit(MXS_AUART_RTSCTS, &s->flags)) {
if (!mxs_auart_dma_init(s))
/* enable DMA tranfer */
ctrl2 |= AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE
@@ -678,11 +678,18 @@ static void mxs_auart_settermios(struct uart_port *u,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
- u32 istatus, istat;
+ u32 istat;
struct mxs_auart_port *s = context;
u32 stat = readl(s->port.membase + AUART_STAT);
- istatus = istat = readl(s->port.membase + AUART_INTR);
+ istat = readl(s->port.membase + AUART_INTR);
+
+ /* ack irq */
+ writel(istat & (AUART_INTR_RTIS
+ | AUART_INTR_TXIS
+ | AUART_INTR_RXIS
+ | AUART_INTR_CTSMIS),
+ s->port.membase + AUART_INTR_CLR);
if (istat & AUART_INTR_CTSMIS) {
uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS);
@@ -702,12 +709,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
- writel(istatus & (AUART_INTR_RTIS
- | AUART_INTR_TXIS
- | AUART_INTR_RXIS
- | AUART_INTR_CTSMIS),
- s->port.membase + AUART_INTR_CLR);
-
return IRQ_HANDLED;
}
@@ -850,7 +851,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
struct mxs_auart_port *s;
struct uart_port *port;
unsigned int old_ctrl0, old_ctrl2;
- unsigned int to = 1000;
+ unsigned int to = 20000;
if (co->index >= MXS_AUART_PORTS || co->index < 0)
return;
@@ -871,18 +872,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count)
uart_console_write(port, str, count, mxs_auart_console_putchar);
- /*
- * Finally, wait for transmitter to become empty
- * and restore the TCR
- */
+ /* Finally, wait for transmitter to become empty ... */
while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) {
+ udelay(1);
if (!to--)
break;
- udelay(1);
}
- writel(old_ctrl0, port->membase + AUART_CTRL0);
- writel(old_ctrl2, port->membase + AUART_CTRL2);
+ /*
+ * ... and restore the TCR if we waited long enough for the transmitter
+ * to be idle. This might keep the transmitter enabled although it is
+ * unused, but that is better than to disable it while it is still
+ * transmitting.
+ */
+ if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) {
+ writel(old_ctrl0, port->membase + AUART_CTRL0);
+ writel(old_ctrl2, port->membase + AUART_CTRL2);
+ }
clk_disable(s->clk);
}
@@ -1002,7 +1008,8 @@ static int serial_mxs_probe_dt(struct mxs_auart_port *s,
}
s->port.line = ret;
- s->flags |= MXS_AUART_DMA_CONFIG;
+ if (of_get_property(np, "fsl,uart-has-rtscts", NULL))
+ set_bit(MXS_AUART_RTSCTS, &s->flags);
return 0;
}
@@ -1015,7 +1022,6 @@ static int mxs_auart_probe(struct platform_device *pdev)
u32 version;
int ret = 0;
struct resource *r;
- struct pinctrl *pinctrl;
s = kzalloc(sizeof(struct mxs_auart_port), GFP_KERNEL);
if (!s) {
@@ -1029,12 +1035,6 @@ static int mxs_auart_probe(struct platform_device *pdev)
else if (ret < 0)
goto out_free;
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto out_free;
- }
-
if (of_id) {
pdev->id_entry = of_id->data;
s->devtype = pdev->id_entry->driver_data;
diff --git a/drivers/tty/serial/netx-serial.c b/drivers/tty/serial/netx-serial.c
index b9a40ed70be..0a4dd70d29e 100644
--- a/drivers/tty/serial/netx-serial.c
+++ b/drivers/tty/serial/netx-serial.c
@@ -196,7 +196,7 @@ static void netx_txint(struct uart_port *port)
uart_write_wakeup(port);
}
-static void netx_rxint(struct uart_port *port)
+static void netx_rxint(struct uart_port *port, unsigned long *flags)
{
unsigned char rx, flg, status;
@@ -236,7 +236,9 @@ static void netx_rxint(struct uart_port *port)
uart_insert_char(port, status, SR_OE, rx, flg);
}
+ spin_unlock_irqrestore(&port->lock, *flags);
tty_flip_buffer_push(&port->state->port);
+ spin_lock_irqsave(&port->lock, *flags);
}
static irqreturn_t netx_int(int irq, void *dev_id)
@@ -250,7 +252,7 @@ static irqreturn_t netx_int(int irq, void *dev_id)
status = readl(port->membase + UART_IIR) & IIR_MASK;
while (status) {
if (status & IIR_RIS)
- netx_rxint(port);
+ netx_rxint(port, &flags);
if (status & IIR_TIS)
netx_txint(port);
if (status & IIR_MIS) {
@@ -693,8 +695,6 @@ static int serial_netx_remove(struct platform_device *pdev)
{
struct netx_port *sport = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (sport)
uart_remove_one_port(&netx_reg, &sport->port);
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index 549c70a2a63..693bc6c2561 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -149,7 +149,10 @@ static irqreturn_t nwpserial_interrupt(int irq, void *dev_id)
tty_insert_flip_char(port, ch, TTY_NORMAL);
} while (dcr_read(up->dcr_host, UART_LSR) & UART_LSR_DR);
+ spin_unlock(&up->port.lock);
tty_flip_buffer_push(port);
+ spin_lock(&up->port.lock);
+
ret = IRQ_HANDLED;
/* clear interrupt */
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b6d17287307..816d1a23f9d 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -40,9 +40,11 @@
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/gpio.h>
-#include <linux/pinctrl/consumer.h>
+#include <linux/of_gpio.h>
#include <linux/platform_data/serial-omap.h>
+#include <dt-bindings/gpio/gpio.h>
+
#define OMAP_MAX_HSUART_PORTS 6
#define UART_BUILD_REVISION(x, y) (((x) << 8) | (y))
@@ -52,6 +54,11 @@
#define OMAP_UART_REV_52 0x0502
#define OMAP_UART_REV_63 0x0603
+#define OMAP_UART_TX_WAKEUP_EN BIT(7)
+
+/* Feature flags */
+#define OMAP_UART_WER_HAS_TX_WAKEUP BIT(0)
+
#define UART_ERRATA_i202_MDR1_ACCESS BIT(0)
#define UART_ERRATA_i291_DMA_FORCEIDLE BIT(1)
@@ -137,6 +144,7 @@ struct uart_omap_port {
unsigned char dlh;
unsigned char mdr1;
unsigned char scr;
+ unsigned char wer;
int use_dma;
/*
@@ -151,16 +159,19 @@ struct uart_omap_port {
int context_loss_cnt;
u32 errata;
u8 wakeups_enabled;
+ u32 features;
int DTR_gpio;
int DTR_inverted;
int DTR_active;
+ struct serial_rs485 rs485;
+ int rts_gpio;
+
struct pm_qos_request pm_qos_request;
u32 latency;
u32 calc_latency;
struct work_struct qos_work;
- struct pinctrl *pins;
bool is_suspending;
};
@@ -195,7 +206,7 @@ static inline void serial_omap_clear_fifos(struct uart_omap_port *up)
static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
{
- struct omap_uart_port_info *pdata = up->dev->platform_data;
+ struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (!pdata || !pdata->get_context_loss_count)
return -EINVAL;
@@ -205,7 +216,7 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
{
- struct omap_uart_port_info *pdata = up->dev->platform_data;
+ struct omap_uart_port_info *pdata = dev_get_platdata(up->dev);
if (!pdata || !pdata->enable_wakeup)
return;
@@ -272,13 +283,42 @@ static void serial_omap_enable_ms(struct uart_port *port)
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int res;
pm_runtime_get_sync(up->dev);
+
+ /* handle rs485 */
+ if (up->rs485.flags & SER_RS485_ENABLED) {
+ /* do nothing if current tx not yet completed */
+ res = serial_in(up, UART_LSR) & UART_LSR_TEMT;
+ if (!res)
+ return;
+
+ /* if there's no more data to send, turn off rts */
+ if (uart_circ_empty(xmit)) {
+ /* if rts not already disabled */
+ res = (up->rs485.flags & SER_RS485_RTS_AFTER_SEND) ? 1 : 0;
+ if (gpio_get_value(up->rts_gpio) != res) {
+ if (up->rs485.delay_rts_after_send > 0) {
+ mdelay(up->rs485.delay_rts_after_send);
+ }
+ gpio_set_value(up->rts_gpio, res);
+ }
+ }
+ }
+
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
+ if ((up->rs485.flags & SER_RS485_ENABLED) &&
+ !(up->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ up->ier = UART_IER_RLSI | UART_IER_RDI;
+ serial_out(up, UART_IER, up->ier);
+ }
+
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
}
@@ -340,8 +380,26 @@ static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = to_uart_omap_port(port);
+ int res;
pm_runtime_get_sync(up->dev);
+
+ /* handle rs485 */
+ if (up->rs485.flags & SER_RS485_ENABLED) {
+ /* if rts not already enabled */
+ res = (up->rs485.flags & SER_RS485_RTS_ON_SEND) ? 1 : 0;
+ if (gpio_get_value(up->rts_gpio) != res) {
+ gpio_set_value(up->rts_gpio, res);
+ if (up->rs485.delay_rts_before_send > 0) {
+ mdelay(up->rs485.delay_rts_before_send);
+ }
+ }
+ }
+
+ if ((up->rs485.flags & SER_RS485_ENABLED) &&
+ !(up->rs485.flags & SER_RS485_RX_DURING_TX))
+ serial_omap_stop_rx(port);
+
serial_omap_enable_ier_thri(up);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -683,7 +741,11 @@ static int serial_omap_startup(struct uart_port *port)
serial_out(up, UART_IER, up->ier);
/* Enable module level wake up */
- serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+ up->wer = OMAP_UART_WER_MOD_WKUP;
+ if (up->features & OMAP_UART_WER_HAS_TX_WAKEUP)
+ up->wer |= OMAP_UART_TX_WAKEUP_EN;
+
+ serial_out(up, UART_OMAP_WER, up->wer);
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
@@ -1254,6 +1316,76 @@ static inline void serial_omap_add_console_port(struct uart_omap_port *up)
#endif
+/* Enable or disable the rs485 support */
+static void
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+{
+ struct uart_omap_port *up = to_uart_omap_port(port);
+ unsigned long flags;
+ unsigned int mode;
+ int val;
+
+ pm_runtime_get_sync(up->dev);
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Disable interrupts from this port */
+ mode = up->ier;
+ up->ier = 0;
+ serial_out(up, UART_IER, 0);
+
+ /* store new config */
+ up->rs485 = *rs485conf;
+
+ /*
+ * Just as a precaution, only allow rs485
+ * to be enabled if the gpio pin is valid
+ */
+ if (gpio_is_valid(up->rts_gpio)) {
+ /* enable / disable rts */
+ val = (up->rs485.flags & SER_RS485_ENABLED) ?
+ SER_RS485_RTS_AFTER_SEND : SER_RS485_RTS_ON_SEND;
+ val = (up->rs485.flags & val) ? 1 : 0;
+ gpio_set_value(up->rts_gpio, val);
+ } else
+ up->rs485.flags &= ~SER_RS485_ENABLED;
+
+ /* Enable interrupts */
+ up->ier = mode;
+ serial_out(up, UART_IER, up->ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_mark_last_busy(up->dev);
+ pm_runtime_put_autosuspend(up->dev);
+}
+
+static int
+serial_omap_ioctl(struct uart_port *port, unsigned int cmd, unsigned long arg)
+{
+ struct serial_rs485 rs485conf;
+
+ switch (cmd) {
+ case TIOCSRS485:
+ if (copy_from_user(&rs485conf, (struct serial_rs485 *) arg,
+ sizeof(rs485conf)))
+ return -EFAULT;
+
+ serial_omap_config_rs485(port, &rs485conf);
+ break;
+
+ case TIOCGRS485:
+ if (copy_to_user((struct serial_rs485 *) arg,
+ &(to_uart_omap_port(port)->rs485),
+ sizeof(rs485conf)))
+ return -EFAULT;
+ break;
+
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return 0;
+}
+
+
static struct uart_ops serial_omap_pops = {
.tx_empty = serial_omap_tx_empty,
.set_mctrl = serial_omap_set_mctrl,
@@ -1275,6 +1407,7 @@ static struct uart_ops serial_omap_pops = {
.request_port = serial_omap_request_port,
.config_port = serial_omap_config_port,
.verify_port = serial_omap_verify_port,
+ .ioctl = serial_omap_ioctl,
#ifdef CONFIG_CONSOLE_POLL
.poll_put_char = serial_omap_poll_put_char,
.poll_get_char = serial_omap_poll_get_char,
@@ -1334,7 +1467,7 @@ static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
u32 mvr, scheme;
u16 revision, major, minor;
- mvr = serial_in(up, UART_OMAP_MVER);
+ mvr = readl(up->port.membase + (UART_OMAP_MVER << up->port.regshift));
/* Check revision register scheme */
scheme = mvr >> OMAP_UART_MVR_SCHEME_SHIFT;
@@ -1373,9 +1506,11 @@ static void omap_serial_fill_features_erratas(struct uart_omap_port *up)
case OMAP_UART_REV_52:
up->errata |= (UART_ERRATA_i202_MDR1_ACCESS |
UART_ERRATA_i291_DMA_FORCEIDLE);
+ up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
case OMAP_UART_REV_63:
up->errata |= UART_ERRATA_i202_MDR1_ACCESS;
+ up->features |= OMAP_UART_WER_HAS_TX_WAKEUP;
break;
default:
break;
@@ -1395,15 +1530,64 @@ static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
return omap_up_info;
}
+static int serial_omap_probe_rs485(struct uart_omap_port *up,
+ struct device_node *np)
+{
+ struct serial_rs485 *rs485conf = &up->rs485;
+ u32 rs485_delay[2];
+ enum of_gpio_flags flags;
+ int ret;
+
+ rs485conf->flags = 0;
+ up->rts_gpio = -EINVAL;
+
+ if (!np)
+ return 0;
+
+ if (of_property_read_bool(np, "rs485-rts-active-high"))
+ rs485conf->flags |= SER_RS485_RTS_ON_SEND;
+ else
+ rs485conf->flags |= SER_RS485_RTS_AFTER_SEND;
+
+ /* check for tx enable gpio */
+ up->rts_gpio = of_get_named_gpio_flags(np, "rts-gpio", 0, &flags);
+ if (gpio_is_valid(up->rts_gpio)) {
+ ret = gpio_request(up->rts_gpio, "omap-serial");
+ if (ret < 0)
+ return ret;
+ ret = gpio_direction_output(up->rts_gpio,
+ flags & SER_RS485_RTS_AFTER_SEND);
+ if (ret < 0)
+ return ret;
+ } else
+ up->rts_gpio = -EINVAL;
+
+ if (of_property_read_u32_array(np, "rs485-rts-delay",
+ rs485_delay, 2) == 0) {
+ rs485conf->delay_rts_before_send = rs485_delay[0];
+ rs485conf->delay_rts_after_send = rs485_delay[1];
+ }
+
+ if (of_property_read_bool(np, "rs485-rx-during-tx"))
+ rs485conf->flags |= SER_RS485_RX_DURING_TX;
+
+ if (of_property_read_bool(np, "linux,rs485-enabled-at-boot-time"))
+ rs485conf->flags |= SER_RS485_ENABLED;
+
+ return 0;
+}
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
struct resource *mem, *irq;
- struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
+ struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
int ret;
- if (pdev->dev.of_node)
+ if (pdev->dev.of_node) {
omap_up_info = of_get_uart_port_info(&pdev->dev);
+ pdev->dev.platform_data = omap_up_info;
+ }
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
@@ -1468,12 +1652,9 @@ static int serial_omap_probe(struct platform_device *pdev)
goto err_port_line;
}
- up->pins = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(up->pins)) {
- dev_warn(&pdev->dev, "did not get pins for uart%i error: %li\n",
- up->port.line, PTR_ERR(up->pins));
- up->pins = NULL;
- }
+ ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
+ if (ret < 0)
+ goto err_rs485;
sprintf(up->name, "OMAP UART%d", up->port.line);
up->port.mapbase = mem->start;
@@ -1501,7 +1682,6 @@ static int serial_omap_probe(struct platform_device *pdev)
INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
platform_set_drvdata(pdev, up);
- pm_runtime_enable(&pdev->dev);
if (omap_up_info->autosuspend_timeout == 0)
omap_up_info->autosuspend_timeout = -1;
device_init_wakeup(up->dev, true);
@@ -1510,6 +1690,8 @@ static int serial_omap_probe(struct platform_device *pdev)
omap_up_info->autosuspend_timeout);
pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
pm_runtime_get_sync(&pdev->dev);
omap_serial_fill_features_erratas(up);
@@ -1529,6 +1711,7 @@ err_add_port:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
err_ioremap:
+err_rs485:
err_port_line:
dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
pdev->id, __func__, ret);
@@ -1609,6 +1792,7 @@ static void serial_omap_restore_context(struct uart_omap_port *up)
serial_omap_mdr1_errataset(up, up->mdr1);
else
serial_out(up, UART_OMAP_MDR1, up->mdr1);
+ serial_out(up, UART_OMAP_WER, up->wer);
}
static int serial_omap_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 572d48189de..52379e56a31 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -232,7 +232,7 @@ struct eg20t_port {
unsigned int iobase;
struct pci_dev *pdev;
int fifo_size;
- int uartclk;
+ unsigned int uartclk;
int start_tx;
int start_rx;
int tx_empty;
@@ -373,35 +373,62 @@ static const struct file_operations port_regs_ops = {
};
#endif /* CONFIG_DEBUG_FS */
+static struct dmi_system_id pch_uart_dmi_table[] = {
+ {
+ .ident = "CM-iTC",
+ {
+ DMI_MATCH(DMI_BOARD_NAME, "CM-iTC"),
+ },
+ (void *)CMITC_UARTCLK,
+ },
+ {
+ .ident = "FRI2",
+ {
+ DMI_MATCH(DMI_BIOS_VERSION, "FRI2"),
+ },
+ (void *)FRI2_64_UARTCLK,
+ },
+ {
+ .ident = "Fish River Island II",
+ {
+ DMI_MATCH(DMI_PRODUCT_NAME, "Fish River Island II"),
+ },
+ (void *)FRI2_48_UARTCLK,
+ },
+ {
+ .ident = "COMe-mTT",
+ {
+ DMI_MATCH(DMI_BOARD_NAME, "COMe-mTT"),
+ },
+ (void *)NTC1_UARTCLK,
+ },
+ {
+ .ident = "nanoETXexpress-TT",
+ {
+ DMI_MATCH(DMI_BOARD_NAME, "nanoETXexpress-TT"),
+ },
+ (void *)NTC1_UARTCLK,
+ },
+ {
+ .ident = "MinnowBoard",
+ {
+ DMI_MATCH(DMI_BOARD_NAME, "MinnowBoard"),
+ },
+ (void *)MINNOW_UARTCLK,
+ },
+};
+
/* Return UART clock, checking for board specific clocks. */
-static int pch_uart_get_uartclk(void)
+static unsigned int pch_uart_get_uartclk(void)
{
- const char *cmp;
+ const struct dmi_system_id *d;
if (user_uartclk)
return user_uartclk;
- cmp = dmi_get_system_info(DMI_BOARD_NAME);
- if (cmp && strstr(cmp, "CM-iTC"))
- return CMITC_UARTCLK;
-
- cmp = dmi_get_system_info(DMI_BIOS_VERSION);
- if (cmp && strnstr(cmp, "FRI2", 4))
- return FRI2_64_UARTCLK;
-
- cmp = dmi_get_system_info(DMI_PRODUCT_NAME);
- if (cmp && strstr(cmp, "Fish River Island II"))
- return FRI2_48_UARTCLK;
-
- /* Kontron COMe-mTT10 (nanoETXexpress-TT) */
- cmp = dmi_get_system_info(DMI_BOARD_NAME);
- if (cmp && (strstr(cmp, "COMe-mTT") ||
- strstr(cmp, "nanoETXexpress-TT")))
- return NTC1_UARTCLK;
-
- cmp = dmi_get_system_info(DMI_BOARD_NAME);
- if (cmp && strstr(cmp, "MinnowBoard"))
- return MINNOW_UARTCLK;
+ d = dmi_first_match(pch_uart_dmi_table);
+ if (d)
+ return (unsigned long)d->driver_data;
return DEFAULT_UARTCLK;
}
@@ -422,7 +449,7 @@ static void pch_uart_hal_disable_interrupt(struct eg20t_port *priv,
iowrite8(ier, priv->membase + UART_IER);
}
-static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
+static int pch_uart_hal_set_line(struct eg20t_port *priv, unsigned int baud,
unsigned int parity, unsigned int bits,
unsigned int stb)
{
@@ -457,7 +484,7 @@ static int pch_uart_hal_set_line(struct eg20t_port *priv, int baud,
lcr |= bits;
lcr |= stb;
- dev_dbg(priv->port.dev, "%s:baud = %d, div = %04x, lcr = %02x (%lu)\n",
+ dev_dbg(priv->port.dev, "%s:baud = %u, div = %04x, lcr = %02x (%lu)\n",
__func__, baud, div, lcr, jiffies);
iowrite8(PCH_UART_LCR_DLAB, priv->membase + UART_LCR);
iowrite8(dll, priv->membase + PCH_UART_DLL);
@@ -1363,9 +1390,8 @@ static void pch_uart_shutdown(struct uart_port *port)
static void pch_uart_set_termios(struct uart_port *port,
struct ktermios *termios, struct ktermios *old)
{
- int baud;
int rtn;
- unsigned int parity, bits, stb;
+ unsigned int baud, parity, bits, stb;
struct eg20t_port *priv;
unsigned long flags;
@@ -1498,6 +1524,7 @@ static int pch_uart_verify_port(struct uart_port *port,
return 0;
}
+#if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_PCH_UART_CONSOLE)
/*
* Wait for transmitter & holding register to empty
*/
@@ -1528,6 +1555,7 @@ static void wait_for_xmitr(struct eg20t_port *up, int bits)
}
}
}
+#endif /* CONFIG_CONSOLE_POLL || CONFIG_SERIAL_PCH_UART_CONSOLE */
#ifdef CONFIG_CONSOLE_POLL
/*
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index b1785f58b6e..f87f1a0c8c6 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -1798,7 +1798,6 @@ static int __exit pmz_detach(struct platform_device *pdev)
uart_remove_one_port(&pmz_uart_reg, &uap->port);
- platform_set_drvdata(pdev, NULL);
uap->port.dev = NULL;
return 0;
diff --git a/drivers/tty/serial/pnx8xxx_uart.c b/drivers/tty/serial/pnx8xxx_uart.c
index 7e277a5384a..de6c05c6368 100644
--- a/drivers/tty/serial/pnx8xxx_uart.c
+++ b/drivers/tty/serial/pnx8xxx_uart.c
@@ -237,7 +237,10 @@ static void pnx8xxx_rx_chars(struct pnx8xxx_port *sport)
status = FIFO_TO_SM(serial_in(sport, PNX8XXX_FIFO)) |
ISTAT_TO_SM(serial_in(sport, PNX8XXX_ISTAT));
}
+
+ spin_unlock(&sport->port.lock);
tty_flip_buffer_push(&sport->port.state->port);
+ spin_lock(&sport->port.lock);
}
static void pnx8xxx_tx_chars(struct pnx8xxx_port *sport)
@@ -801,8 +804,6 @@ static int pnx8xxx_serial_remove(struct platform_device *pdev)
{
struct pnx8xxx_port *sport = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (sport)
uart_remove_one_port(&pnx8xxx_reg, &sport->port);
diff --git a/drivers/tty/serial/pxa.c b/drivers/tty/serial/pxa.c
index 05f504e0c27..f9f20f38376 100644
--- a/drivers/tty/serial/pxa.c
+++ b/drivers/tty/serial/pxa.c
@@ -332,31 +332,6 @@ static void serial_pxa_break_ctl(struct uart_port *port, int break_state)
spin_unlock_irqrestore(&up->port.lock, flags);
}
-#if 0
-static void serial_pxa_dma_init(struct pxa_uart *up)
-{
- up->rxdma =
- pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_receive_dma, up);
- if (up->rxdma < 0)
- goto out;
- up->txdma =
- pxa_request_dma(up->name, DMA_PRIO_LOW, pxa_transmit_dma, up);
- if (up->txdma < 0)
- goto err_txdma;
- up->dmadesc = kmalloc(4 * sizeof(pxa_dma_desc), GFP_KERNEL);
- if (!up->dmadesc)
- goto err_alloc;
-
- /* ... */
-err_alloc:
- pxa_free_dma(up->txdma);
-err_rxdma:
- pxa_free_dma(up->rxdma);
-out:
- return;
-}
-#endif
-
static int serial_pxa_startup(struct uart_port *port)
{
struct uart_pxa_port *up = (struct uart_pxa_port *)port;
@@ -790,7 +765,7 @@ static struct console serial_pxa_console = {
#define PXA_CONSOLE NULL
#endif
-struct uart_ops serial_pxa_pops = {
+static struct uart_ops serial_pxa_pops = {
.tx_empty = serial_pxa_tx_empty,
.set_mctrl = serial_pxa_set_mctrl,
.get_mctrl = serial_pxa_get_mctrl,
@@ -945,8 +920,6 @@ static int serial_pxa_remove(struct platform_device *dev)
{
struct uart_pxa_port *sport = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
-
uart_remove_one_port(&serial_pxa_reg, &sport->port);
clk_unprepare(sport->clk);
@@ -970,7 +943,7 @@ static struct platform_driver serial_pxa_driver = {
},
};
-int __init serial_pxa_init(void)
+static int __init serial_pxa_init(void)
{
int ret;
@@ -985,7 +958,7 @@ int __init serial_pxa_init(void)
return ret;
}
-void __exit serial_pxa_exit(void)
+static void __exit serial_pxa_exit(void)
{
platform_driver_unregister(&serial_pxa_driver);
uart_unregister_driver(&serial_pxa_reg);
diff --git a/drivers/tty/serial/rp2.c b/drivers/tty/serial/rp2.c
index a314a943f12..328d6deb6b0 100644
--- a/drivers/tty/serial/rp2.c
+++ b/drivers/tty/serial/rp2.c
@@ -427,7 +427,9 @@ static void rp2_rx_chars(struct rp2_uart_port *up)
up->port.icount.rx++;
}
+ spin_unlock(&up->port.lock);
tty_flip_buffer_push(port);
+ spin_lock(&up->port.lock);
}
static void rp2_tx_chars(struct rp2_uart_port *up)
diff --git a/drivers/tty/serial/sa1100.c b/drivers/tty/serial/sa1100.c
index af6b3e3ad24..ba25722a713 100644
--- a/drivers/tty/serial/sa1100.c
+++ b/drivers/tty/serial/sa1100.c
@@ -232,7 +232,10 @@ sa1100_rx_chars(struct sa1100_port *sport)
status = UTSR1_TO_SM(UART_GET_UTSR1(sport)) |
UTSR0_TO_SM(UART_GET_UTSR0(sport));
}
+
+ spin_unlock(&sport->port.lock);
tty_flip_buffer_push(&sport->port.state->port);
+ spin_lock(&sport->port.lock);
}
static void sa1100_tx_chars(struct sa1100_port *sport)
@@ -864,8 +867,6 @@ static int sa1100_serial_remove(struct platform_device *pdev)
{
struct sa1100_port *sport = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
-
if (sport)
uart_remove_one_port(&sa1100_reg, &sport->port);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 376079b9bd7..f3dfa19a1cb 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -249,6 +249,8 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
ufcon |= S3C2410_UFCON_RESETRX;
wr_regl(port, S3C2410_UFCON, ufcon);
rx_enabled(port) = 1;
+ spin_unlock_irqrestore(&port->lock,
+ flags);
goto out;
}
continue;
@@ -297,10 +299,11 @@ s3c24xx_serial_rx_chars(int irq, void *dev_id)
ignore_char:
continue;
}
+
+ spin_unlock_irqrestore(&port->lock, flags);
tty_flip_buffer_push(&port->state->port);
out:
- spin_unlock_irqrestore(&port->lock, flags);
return IRQ_HANDLED;
}
@@ -1250,8 +1253,8 @@ static int s3c24xx_serial_probe(struct platform_device *pdev)
ourport->baudclk = ERR_PTR(-EINVAL);
ourport->info = ourport->drv_data->info;
- ourport->cfg = (pdev->dev.platform_data) ?
- (struct s3c2410_uartcfg *)pdev->dev.platform_data :
+ ourport->cfg = (dev_get_platdata(&pdev->dev)) ?
+ (struct s3c2410_uartcfg *)dev_get_platdata(&pdev->dev) :
ourport->drv_data->def_cfg;
ourport->port.fifosize = (ourport->info->fifosize) ?
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 00a499ecd38..aaa617a6c49 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -68,7 +68,8 @@ struct s3c24xx_uart_port {
/* register access controls */
#define portaddr(port, reg) ((port)->membase + (reg))
-#define portaddrl(port, reg) ((unsigned long *)((port)->membase + (reg)))
+#define portaddrl(port, reg) \
+ ((unsigned long *)(unsigned long)((port)->membase + (reg)))
#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 4b1434d53e9..887b4f77074 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -637,7 +637,7 @@ static int sc26xx_probe(struct platform_device *dev)
{
struct resource *res;
struct uart_sc26xx_port *up;
- unsigned int *sc26xx_data = dev->dev.platform_data;
+ unsigned int *sc26xx_data = dev_get_platdata(&dev->dev);
int err;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
diff --git a/drivers/tty/serial/sccnxp.c b/drivers/tty/serial/sccnxp.c
index c7730415541..49e9bbfe6ca 100644
--- a/drivers/tty/serial/sccnxp.c
+++ b/drivers/tty/serial/sccnxp.c
@@ -15,6 +15,7 @@
#define SUPPORT_SYSRQ
#endif
+#include <linux/clk.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -94,16 +95,17 @@
#define MCTRL_IBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_IP0)
#define MCTRL_OBIT(cfg, sig) ((((cfg) >> (sig)) & 0xf) - LINE_OP0)
-/* Supported chip types */
-enum {
- SCCNXP_TYPE_SC2681 = 2681,
- SCCNXP_TYPE_SC2691 = 2691,
- SCCNXP_TYPE_SC2692 = 2692,
- SCCNXP_TYPE_SC2891 = 2891,
- SCCNXP_TYPE_SC2892 = 2892,
- SCCNXP_TYPE_SC28202 = 28202,
- SCCNXP_TYPE_SC68681 = 68681,
- SCCNXP_TYPE_SC68692 = 68692,
+#define SCCNXP_HAVE_IO 0x00000001
+#define SCCNXP_HAVE_MR0 0x00000002
+
+struct sccnxp_chip {
+ const char *name;
+ unsigned int nr;
+ unsigned long freq_min;
+ unsigned long freq_std;
+ unsigned long freq_max;
+ unsigned int flags;
+ unsigned int fifosize;
};
struct sccnxp_port {
@@ -111,16 +113,10 @@ struct sccnxp_port {
struct uart_port port[SCCNXP_MAX_UARTS];
bool opened[SCCNXP_MAX_UARTS];
- const char *name;
int irq;
-
u8 imr;
- u8 addr_mask;
- int freq_std;
- int flags;
-#define SCCNXP_HAVE_IO 0x00000001
-#define SCCNXP_HAVE_MR0 0x00000002
+ struct sccnxp_chip *chip;
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
struct console console;
@@ -136,29 +132,94 @@ struct sccnxp_port {
struct regulator *regulator;
};
-static inline u8 sccnxp_raw_read(void __iomem *base, u8 reg, u8 shift)
-{
- return readb(base + (reg << shift));
-}
+static const struct sccnxp_chip sc2681 = {
+ .name = "SC2681",
+ .nr = 2,
+ .freq_min = 1000000,
+ .freq_std = 3686400,
+ .freq_max = 4000000,
+ .flags = SCCNXP_HAVE_IO,
+ .fifosize = 3,
+};
-static inline void sccnxp_raw_write(void __iomem *base, u8 reg, u8 shift, u8 v)
-{
- writeb(v, base + (reg << shift));
-}
+static const struct sccnxp_chip sc2691 = {
+ .name = "SC2691",
+ .nr = 1,
+ .freq_min = 1000000,
+ .freq_std = 3686400,
+ .freq_max = 4000000,
+ .flags = 0,
+ .fifosize = 3,
+};
+
+static const struct sccnxp_chip sc2692 = {
+ .name = "SC2692",
+ .nr = 2,
+ .freq_min = 1000000,
+ .freq_std = 3686400,
+ .freq_max = 4000000,
+ .flags = SCCNXP_HAVE_IO,
+ .fifosize = 3,
+};
+
+static const struct sccnxp_chip sc2891 = {
+ .name = "SC2891",
+ .nr = 1,
+ .freq_min = 100000,
+ .freq_std = 3686400,
+ .freq_max = 8000000,
+ .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
+ .fifosize = 16,
+};
+
+static const struct sccnxp_chip sc2892 = {
+ .name = "SC2892",
+ .nr = 2,
+ .freq_min = 100000,
+ .freq_std = 3686400,
+ .freq_max = 8000000,
+ .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
+ .fifosize = 16,
+};
+
+static const struct sccnxp_chip sc28202 = {
+ .name = "SC28202",
+ .nr = 2,
+ .freq_min = 1000000,
+ .freq_std = 14745600,
+ .freq_max = 50000000,
+ .flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0,
+ .fifosize = 256,
+};
+
+static const struct sccnxp_chip sc68681 = {
+ .name = "SC68681",
+ .nr = 2,
+ .freq_min = 1000000,
+ .freq_std = 3686400,
+ .freq_max = 4000000,
+ .flags = SCCNXP_HAVE_IO,
+ .fifosize = 3,
+};
+
+static const struct sccnxp_chip sc68692 = {
+ .name = "SC68692",
+ .nr = 2,
+ .freq_min = 1000000,
+ .freq_std = 3686400,
+ .freq_max = 4000000,
+ .flags = SCCNXP_HAVE_IO,
+ .fifosize = 3,
+};
static inline u8 sccnxp_read(struct uart_port *port, u8 reg)
{
- struct sccnxp_port *s = dev_get_drvdata(port->dev);
-
- return sccnxp_raw_read(port->membase, reg & s->addr_mask,
- port->regshift);
+ return readb(port->membase + (reg << port->regshift));
}
static inline void sccnxp_write(struct uart_port *port, u8 reg, u8 v)
{
- struct sccnxp_port *s = dev_get_drvdata(port->dev);
-
- sccnxp_raw_write(port->membase, reg & s->addr_mask, port->regshift, v);
+ writeb(v, port->membase + (reg << port->regshift));
}
static inline u8 sccnxp_port_read(struct uart_port *port, u8 reg)
@@ -224,13 +285,14 @@ static int sccnxp_set_baud(struct uart_port *port, int baud)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
int div_std, tmp_baud, bestbaud = baud, besterr = -1;
+ struct sccnxp_chip *chip = s->chip;
u8 i, acr = 0, csr = 0, mr0 = 0;
/* Find best baud from table */
for (i = 0; baud_std[i].baud && besterr; i++) {
- if (baud_std[i].mr0 && !(s->flags & SCCNXP_HAVE_MR0))
+ if (baud_std[i].mr0 && !(chip->flags & SCCNXP_HAVE_MR0))
continue;
- div_std = DIV_ROUND_CLOSEST(s->freq_std, baud_std[i].baud);
+ div_std = DIV_ROUND_CLOSEST(chip->freq_std, baud_std[i].baud);
tmp_baud = DIV_ROUND_CLOSEST(port->uartclk, div_std);
if (!sccnxp_update_best_err(baud, tmp_baud, &besterr)) {
acr = baud_std[i].acr;
@@ -240,7 +302,7 @@ static int sccnxp_set_baud(struct uart_port *port, int baud)
}
}
- if (s->flags & SCCNXP_HAVE_MR0) {
+ if (chip->flags & SCCNXP_HAVE_MR0) {
/* Enable FIFO, set half level for TX */
mr0 |= MR0_FIFO | MR0_TXLVL;
/* Update MR0 */
@@ -363,7 +425,7 @@ static void sccnxp_handle_tx(struct uart_port *port)
sccnxp_disable_irq(port, IMR_TXRDY);
/* Set direction to input */
- if (s->flags & SCCNXP_HAVE_IO)
+ if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
}
return;
@@ -437,7 +499,7 @@ static void sccnxp_start_tx(struct uart_port *port)
spin_lock_irqsave(&s->lock, flags);
/* Set direction to output */
- if (s->flags & SCCNXP_HAVE_IO)
+ if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 1);
sccnxp_enable_irq(port, IMR_TXRDY);
@@ -483,7 +545,7 @@ static void sccnxp_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned long flags;
- if (!(s->flags & SCCNXP_HAVE_IO))
+ if (!(s->chip->flags & SCCNXP_HAVE_IO))
return;
spin_lock_irqsave(&s->lock, flags);
@@ -501,7 +563,7 @@ static unsigned int sccnxp_get_mctrl(struct uart_port *port)
struct sccnxp_port *s = dev_get_drvdata(port->dev);
unsigned int mctrl = TIOCM_DSR | TIOCM_CTS | TIOCM_CAR;
- if (!(s->flags & SCCNXP_HAVE_IO))
+ if (!(s->chip->flags & SCCNXP_HAVE_IO))
return mctrl;
spin_lock_irqsave(&s->lock, flags);
@@ -617,7 +679,7 @@ static void sccnxp_set_termios(struct uart_port *port,
/* Setup baudrate */
baud = uart_get_baud_rate(port, termios, old, 50,
- (s->flags & SCCNXP_HAVE_MR0) ?
+ (s->chip->flags & SCCNXP_HAVE_MR0) ?
230400 : 38400);
baud = sccnxp_set_baud(port, baud);
@@ -641,7 +703,7 @@ static int sccnxp_startup(struct uart_port *port)
spin_lock_irqsave(&s->lock, flags);
- if (s->flags & SCCNXP_HAVE_IO) {
+ if (s->chip->flags & SCCNXP_HAVE_IO) {
/* Outputs are controlled manually */
sccnxp_write(port, SCCNXP_OPCR_REG, 0);
}
@@ -681,7 +743,7 @@ static void sccnxp_shutdown(struct uart_port *port)
sccnxp_port_write(port, SCCNXP_CR_REG, CR_RX_DISABLE | CR_TX_DISABLE);
/* Leave direction to input */
- if (s->flags & SCCNXP_HAVE_IO)
+ if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(port, DIR_OP, 0);
spin_unlock_irqrestore(&s->lock, flags);
@@ -691,7 +753,7 @@ static const char *sccnxp_type(struct uart_port *port)
{
struct sccnxp_port *s = dev_get_drvdata(port->dev);
- return (port->type == PORT_SC26XX) ? s->name : NULL;
+ return (port->type == PORT_SC26XX) ? s->chip->name : NULL;
}
static void sccnxp_release_port(struct uart_port *port)
@@ -778,19 +840,31 @@ static int sccnxp_console_setup(struct console *co, char *options)
}
#endif
+static const struct platform_device_id sccnxp_id_table[] = {
+ { .name = "sc2681", .driver_data = (kernel_ulong_t)&sc2681, },
+ { .name = "sc2691", .driver_data = (kernel_ulong_t)&sc2691, },
+ { .name = "sc2692", .driver_data = (kernel_ulong_t)&sc2692, },
+ { .name = "sc2891", .driver_data = (kernel_ulong_t)&sc2891, },
+ { .name = "sc2892", .driver_data = (kernel_ulong_t)&sc2892, },
+ { .name = "sc28202", .driver_data = (kernel_ulong_t)&sc28202, },
+ { .name = "sc68681", .driver_data = (kernel_ulong_t)&sc68681, },
+ { .name = "sc68692", .driver_data = (kernel_ulong_t)&sc68692, },
+ { }
+};
+MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
+
static int sccnxp_probe(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- int chiptype = pdev->id_entry->driver_data;
struct sccnxp_pdata *pdata = dev_get_platdata(&pdev->dev);
- int i, ret, fifosize, freq_min, freq_max;
+ int i, ret, uartclk;
struct sccnxp_port *s;
void __iomem *membase;
+ struct clk *clk;
- if (!res) {
- dev_err(&pdev->dev, "Missing memory resource data\n");
- return -EADDRNOTAVAIL;
- }
+ membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(membase))
+ return PTR_ERR(membase);
s = devm_kzalloc(&pdev->dev, sizeof(struct sccnxp_port), GFP_KERNEL);
if (!s) {
@@ -801,99 +875,38 @@ static int sccnxp_probe(struct platform_device *pdev)
spin_lock_init(&s->lock);
- /* Individual chip settings */
- switch (chiptype) {
- case SCCNXP_TYPE_SC2681:
- s->name = "SC2681";
- s->uart.nr = 2;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO;
- fifosize = 3;
- freq_min = 1000000;
- freq_max = 4000000;
- break;
- case SCCNXP_TYPE_SC2691:
- s->name = "SC2691";
- s->uart.nr = 1;
- s->freq_std = 3686400;
- s->addr_mask = 0x07;
- s->flags = 0;
- fifosize = 3;
- freq_min = 1000000;
- freq_max = 4000000;
- break;
- case SCCNXP_TYPE_SC2692:
- s->name = "SC2692";
- s->uart.nr = 2;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO;
- fifosize = 3;
- freq_min = 1000000;
- freq_max = 4000000;
- break;
- case SCCNXP_TYPE_SC2891:
- s->name = "SC2891";
- s->uart.nr = 1;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
- fifosize = 16;
- freq_min = 100000;
- freq_max = 8000000;
- break;
- case SCCNXP_TYPE_SC2892:
- s->name = "SC2892";
- s->uart.nr = 2;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
- fifosize = 16;
- freq_min = 100000;
- freq_max = 8000000;
- break;
- case SCCNXP_TYPE_SC28202:
- s->name = "SC28202";
- s->uart.nr = 2;
- s->freq_std = 14745600;
- s->addr_mask = 0x7f;
- s->flags = SCCNXP_HAVE_IO | SCCNXP_HAVE_MR0;
- fifosize = 256;
- freq_min = 1000000;
- freq_max = 50000000;
- break;
- case SCCNXP_TYPE_SC68681:
- s->name = "SC68681";
- s->uart.nr = 2;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO;
- fifosize = 3;
- freq_min = 1000000;
- freq_max = 4000000;
- break;
- case SCCNXP_TYPE_SC68692:
- s->name = "SC68692";
- s->uart.nr = 2;
- s->freq_std = 3686400;
- s->addr_mask = 0x0f;
- s->flags = SCCNXP_HAVE_IO;
- fifosize = 3;
- freq_min = 1000000;
- freq_max = 4000000;
- break;
- default:
- dev_err(&pdev->dev, "Unsupported chip type %i\n", chiptype);
- ret = -ENOTSUPP;
+ s->chip = (struct sccnxp_chip *)pdev->id_entry->driver_data;
+
+ s->regulator = devm_regulator_get(&pdev->dev, "vcc");
+ if (!IS_ERR(s->regulator)) {
+ ret = regulator_enable(s->regulator);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to enable regulator: %i\n", ret);
+ return ret;
+ }
+ } else if (PTR_ERR(s->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_out;
+ }
+ dev_notice(&pdev->dev, "Using default clock frequency\n");
+ uartclk = s->chip->freq_std;
+ } else
+ uartclk = clk_get_rate(clk);
+
+ /* Check input frequency */
+ if ((uartclk < s->chip->freq_min) || (uartclk > s->chip->freq_max)) {
+ dev_err(&pdev->dev, "Frequency out of bounds\n");
+ ret = -EINVAL;
goto err_out;
}
- if (!pdata) {
- dev_warn(&pdev->dev,
- "No platform data supplied, using defaults\n");
- s->pdata.frequency = s->freq_std;
- } else
+ if (pdata)
memcpy(&s->pdata, pdata, sizeof(struct sccnxp_pdata));
if (s->pdata.poll_time_us) {
@@ -911,34 +924,11 @@ static int sccnxp_probe(struct platform_device *pdev)
}
}
- /* Check input frequency */
- if ((s->pdata.frequency < freq_min) ||
- (s->pdata.frequency > freq_max)) {
- dev_err(&pdev->dev, "Frequency out of bounds\n");
- ret = -EINVAL;
- goto err_out;
- }
-
- s->regulator = devm_regulator_get(&pdev->dev, "VCC");
- if (!IS_ERR(s->regulator)) {
- ret = regulator_enable(s->regulator);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable regulator: %i\n", ret);
- return ret;
- }
- }
-
- membase = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(membase)) {
- ret = PTR_ERR(membase);
- goto err_out;
- }
-
s->uart.owner = THIS_MODULE;
s->uart.dev_name = "ttySC";
s->uart.major = SCCNXP_MAJOR;
s->uart.minor = SCCNXP_MINOR;
+ s->uart.nr = s->chip->nr;
#ifdef CONFIG_SERIAL_SCCNXP_CONSOLE
s->uart.cons = &s->console;
s->uart.cons->device = uart_console_device;
@@ -960,17 +950,17 @@ static int sccnxp_probe(struct platform_device *pdev)
s->port[i].dev = &pdev->dev;
s->port[i].irq = s->irq;
s->port[i].type = PORT_SC26XX;
- s->port[i].fifosize = fifosize;
+ s->port[i].fifosize = s->chip->fifosize;
s->port[i].flags = UPF_SKIP_TEST | UPF_FIXED_TYPE;
s->port[i].iotype = UPIO_MEM;
s->port[i].mapbase = res->start;
s->port[i].membase = membase;
s->port[i].regshift = s->pdata.reg_shift;
- s->port[i].uartclk = s->pdata.frequency;
+ s->port[i].uartclk = uartclk;
s->port[i].ops = &sccnxp_ops;
uart_add_one_port(&s->uart, &s->port[i]);
/* Set direction to input */
- if (s->flags & SCCNXP_HAVE_IO)
+ if (s->chip->flags & SCCNXP_HAVE_IO)
sccnxp_set_bit(&s->port[i], DIR_OP, 0);
}
@@ -997,7 +987,8 @@ static int sccnxp_probe(struct platform_device *pdev)
}
err_out:
- platform_set_drvdata(pdev, NULL);
+ if (!IS_ERR(s->regulator))
+ return regulator_disable(s->regulator);
return ret;
}
@@ -1016,7 +1007,6 @@ static int sccnxp_remove(struct platform_device *pdev)
uart_remove_one_port(&s->uart, &s->port[i]);
uart_unregister_driver(&s->uart);
- platform_set_drvdata(pdev, NULL);
if (!IS_ERR(s->regulator))
return regulator_disable(s->regulator);
@@ -1024,19 +1014,6 @@ static int sccnxp_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id sccnxp_id_table[] = {
- { "sc2681", SCCNXP_TYPE_SC2681 },
- { "sc2691", SCCNXP_TYPE_SC2691 },
- { "sc2692", SCCNXP_TYPE_SC2692 },
- { "sc2891", SCCNXP_TYPE_SC2891 },
- { "sc2892", SCCNXP_TYPE_SC2892 },
- { "sc28202", SCCNXP_TYPE_SC28202 },
- { "sc68681", SCCNXP_TYPE_SC68681 },
- { "sc68692", SCCNXP_TYPE_SC68692 },
- { },
-};
-MODULE_DEVICE_TABLE(platform, sccnxp_id_table);
-
static struct platform_driver sccnxp_uart_driver = {
.driver = {
.name = SCCNXP_NAME,
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index ee7c8123c37..d0d972f7e43 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -571,7 +571,9 @@ static void tegra_uart_rx_dma_complete(void *args)
tegra_uart_handle_rx_pio(tup, port);
if (tty) {
+ spin_unlock_irqrestore(&u->lock, flags);
tty_flip_buffer_push(port);
+ spin_lock_irqsave(&u->lock, flags);
tty_kref_put(tty);
}
tegra_uart_start_rx_dma(tup);
@@ -583,11 +585,13 @@ static void tegra_uart_rx_dma_complete(void *args)
spin_unlock_irqrestore(&u->lock, flags);
}
-static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
+static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
+ unsigned long *flags)
{
struct dma_tx_state state;
struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
struct tty_port *port = &tup->uport.state->port;
+ struct uart_port *u = &tup->uport;
int count;
/* Deactivate flow control to stop sender */
@@ -604,7 +608,9 @@ static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
tegra_uart_handle_rx_pio(tup, port);
if (tty) {
+ spin_unlock_irqrestore(&u->lock, *flags);
tty_flip_buffer_push(port);
+ spin_lock_irqsave(&u->lock, *flags);
tty_kref_put(tty);
}
tegra_uart_start_rx_dma(tup);
@@ -671,7 +677,7 @@ static irqreturn_t tegra_uart_isr(int irq, void *data)
iir = tegra_uart_read(tup, UART_IIR);
if (iir & UART_IIR_NO_INT) {
if (is_rx_int) {
- tegra_uart_handle_rx_dma(tup);
+ tegra_uart_handle_rx_dma(tup, &flags);
if (tup->rx_in_progress) {
ier = tup->ier_shadow;
ier |= (UART_IER_RLSI | UART_IER_RTOIE |
@@ -1206,7 +1212,7 @@ static struct uart_driver tegra_uart_driver = {
.owner = THIS_MODULE,
.driver_name = "tegra_hsuart",
.dev_name = "ttyTHS",
- .cons = 0,
+ .cons = NULL,
.nr = TEGRA_UART_MAXIMUM,
};
@@ -1237,13 +1243,13 @@ static int tegra_uart_parse_dt(struct platform_device *pdev,
return 0;
}
-struct tegra_uart_chip_data tegra20_uart_chip_data = {
+static struct tegra_uart_chip_data tegra20_uart_chip_data = {
.tx_fifo_full_status = false,
.allow_txfifo_reset_fifo_mode = true,
.support_clk_src_div = false,
};
-struct tegra_uart_chip_data tegra30_uart_chip_data = {
+static struct tegra_uart_chip_data tegra30_uart_chip_data = {
.tx_fifo_full_status = true,
.allow_txfifo_reset_fifo_mode = false,
.support_clk_src_div = true,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 28cdd282913..0f02351c923 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2095,12 +2095,12 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
}
- printk(KERN_INFO "%s%s%s%d at %s (irq = %d) is a %s\n",
+ printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
- address, port->irq, uart_type(port));
+ address, port->irq, port->uartclk / 16, uart_type(port));
}
static void
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index fe48a0c2b4c..440a962412d 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -1097,7 +1097,7 @@ static void serial_txx9_unregister_port(int line)
*/
static int serial_txx9_probe(struct platform_device *dev)
{
- struct uart_port *p = dev->dev.platform_data;
+ struct uart_port *p = dev_get_platdata(&dev->dev);
struct uart_port port;
int ret, i;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 7477e0ea5cd..537750261aa 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2380,7 +2380,7 @@ static char early_serial_buf[32];
static int sci_probe_earlyprintk(struct platform_device *pdev)
{
- struct plat_sci_port *cfg = pdev->dev.platform_data;
+ struct plat_sci_port *cfg = dev_get_platdata(&pdev->dev);
if (early_serial_console.data)
return -EEXIST;
@@ -2469,7 +2469,7 @@ static int sci_probe_single(struct platform_device *dev,
static int sci_probe(struct platform_device *dev)
{
- struct plat_sci_port *p = dev->dev.platform_data;
+ struct plat_sci_port *p = dev_get_platdata(&dev->dev);
struct sci_port *sp = &sci_ports[dev->id];
int ret;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 1fd564b8194..61c1ad03db5 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -20,9 +20,13 @@
#include <linux/of.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/sirfsoc_dma.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
-#include <linux/pinctrl/consumer.h>
#include "sirfsoc_uart.h"
@@ -32,6 +36,9 @@ static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
static struct uart_driver sirfsoc_uart_drv;
+static void sirfsoc_uart_tx_dma_complete_callback(void *param);
+static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
+static void sirfsoc_uart_rx_dma_complete_callback(void *param);
static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
{4000000, 2359296},
{3500000, 1310721},
@@ -89,6 +96,13 @@ static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
.line = 4,
},
},
+ [5] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 5,
+ },
+ },
};
static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
@@ -99,21 +113,28 @@ static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
{
unsigned long reg;
- reg = rd_regl(port, SIRFUART_TX_FIFO_STATUS);
- if (reg & SIRFUART_FIFOEMPTY_MASK(port))
- return TIOCSER_TEMT;
- else
- return 0;
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
+ reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
+
+ return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
}
static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- if (!(sirfport->ms_enabled)) {
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
goto cts_asserted;
- } else if (sirfport->hw_flow_ctrl) {
- if (!(rd_regl(port, SIRFUART_AFC_CTRL) &
- SIRFUART_CTS_IN_STATUS))
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
+ SIRFUART_AFC_CTS_STATUS))
+ goto cts_asserted;
+ else
+ goto cts_deasserted;
+ } else {
+ if (!gpio_get_value(sirfport->cts_gpio))
goto cts_asserted;
else
goto cts_deasserted;
@@ -127,89 +148,276 @@ cts_asserted:
static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
unsigned int assert = mctrl & TIOCM_RTS;
unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
unsigned int current_val;
- if (sirfport->hw_flow_ctrl) {
- current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF;
+
+ if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
+ return;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
val |= current_val;
- wr_regl(port, SIRFUART_AFC_CTRL, val);
+ wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
+ } else {
+ if (!val)
+ gpio_set_value(sirfport->rts_gpio, 1);
+ else
+ gpio_set_value(sirfport->rts_gpio, 0);
}
}
static void sirfsoc_uart_stop_tx(struct uart_port *port)
{
- unsigned int regv;
- regv = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_TX_INT_EN);
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
+ if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
+ dmaengine_pause(sirfport->tx_dma_chan);
+ sirfport->tx_dma_state = TX_DMA_PAUSE;
+ } else {
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~uint_en->sirfsoc_txfifo_empty_en);
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_txfifo_empty_en);
+ }
+ } else {
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~uint_en->sirfsoc_txfifo_empty_en);
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_txfifo_empty_en);
+ }
}
-void sirfsoc_uart_start_tx(struct uart_port *port)
+static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
+{
+ struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long tran_size;
+ unsigned long tran_start;
+ unsigned long pio_tx_size;
+
+ tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
+ tran_start = (unsigned long)(xmit->buf + xmit->tail);
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
+ !tran_size)
+ return;
+ if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
+ dmaengine_resume(sirfport->tx_dma_chan);
+ return;
+ }
+ if (sirfport->tx_dma_state == TX_DMA_RUNNING)
+ return;
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)&
+ ~(uint_en->sirfsoc_txfifo_empty_en));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_txfifo_empty_en);
+ /*
+ * DMA requires buffer address and buffer length are both aligned with
+ * 4 bytes, so we use PIO for
+ * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
+ * bytes, and move to DMA for the left part aligned with 4bytes
+ * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
+ * part first, move to PIO for the left 1~3 bytes
+ */
+ if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
+ SIRFUART_IO_MODE);
+ if (BYTES_TO_ALIGN(tran_start)) {
+ pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
+ BYTES_TO_ALIGN(tran_start));
+ tran_size -= pio_tx_size;
+ }
+ if (tran_size < 4)
+ sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)|
+ uint_en->sirfsoc_txfifo_empty_en);
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ uint_en->sirfsoc_txfifo_empty_en);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
+ } else {
+ /* tx transfer mode switch into dma mode */
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
+ ~SIRFUART_IO_MODE);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
+ tran_size &= ~(0x3);
+
+ sirfport->tx_dma_addr = dma_map_single(port->dev,
+ xmit->buf + xmit->tail,
+ tran_size, DMA_TO_DEVICE);
+ sirfport->tx_dma_desc = dmaengine_prep_slave_single(
+ sirfport->tx_dma_chan, sirfport->tx_dma_addr,
+ tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
+ if (!sirfport->tx_dma_desc) {
+ dev_err(port->dev, "DMA prep slave single fail\n");
+ return;
+ }
+ sirfport->tx_dma_desc->callback =
+ sirfsoc_uart_tx_dma_complete_callback;
+ sirfport->tx_dma_desc->callback_param = (void *)sirfport;
+ sirfport->transfer_size = tran_size;
+
+ dmaengine_submit(sirfport->tx_dma_desc);
+ dma_async_issue_pending(sirfport->tx_dma_chan);
+ sirfport->tx_dma_state = TX_DMA_RUNNING;
+ }
+}
+
+static void sirfsoc_uart_start_tx(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- unsigned long regv;
- sirfsoc_uart_pio_tx_chars(sirfport, 1);
- wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START);
- regv = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ sirfsoc_uart_tx_with_dma(sirfport);
+ else {
+ sirfsoc_uart_pio_tx_chars(sirfport, 1);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)|
+ uint_en->sirfsoc_txfifo_empty_en);
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ uint_en->sirfsoc_txfifo_empty_en);
+ }
}
static void sirfsoc_uart_stop_rx(struct uart_port *port)
{
- unsigned long regv;
- wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
- regv = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_RX_IO_INT_EN);
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
+ uint_en->sirfsoc_rx_done_en));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ SIRFUART_RX_DMA_INT_EN(port, uint_en)|
+ uint_en->sirfsoc_rx_done_en);
+ dmaengine_terminate_all(sirfport->rx_dma_chan);
+ } else {
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)&
+ ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ SIRFUART_RX_IO_INT_EN(port, uint_en));
+ }
}
static void sirfsoc_uart_disable_ms(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- unsigned long reg;
- sirfport->ms_enabled = 0;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+
if (!sirfport->hw_flow_ctrl)
return;
- reg = rd_regl(port, SIRFUART_AFC_CTRL);
- wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF);
- reg = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN);
+ sirfport->ms_enabled = false;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ wr_regl(port, ureg->sirfsoc_afc_ctrl,
+ rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)&
+ ~uint_en->sirfsoc_cts_en);
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_cts_en);
+ } else
+ disable_irq(gpio_to_irq(sirfport->cts_gpio));
+}
+
+static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
+{
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ struct uart_port *port = &sirfport->port;
+ if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
+ uart_handle_cts_change(port,
+ !gpio_get_value(sirfport->cts_gpio));
+ return IRQ_HANDLED;
}
static void sirfsoc_uart_enable_ms(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- unsigned long reg;
- unsigned long flg;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+
if (!sirfport->hw_flow_ctrl)
return;
- flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN;
- reg = rd_regl(port, SIRFUART_AFC_CTRL);
- wr_regl(port, SIRFUART_AFC_CTRL, reg | flg);
- reg = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN);
- uart_handle_cts_change(port,
- !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS));
- sirfport->ms_enabled = 1;
+ sirfport->ms_enabled = true;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ wr_regl(port, ureg->sirfsoc_afc_ctrl,
+ rd_regl(port, ureg->sirfsoc_afc_ctrl) |
+ SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg)
+ | uint_en->sirfsoc_cts_en);
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ uint_en->sirfsoc_cts_en);
+ } else
+ enable_irq(gpio_to_irq(sirfport->cts_gpio));
}
static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
{
- unsigned long ulcon = rd_regl(port, SIRFUART_LINE_CTRL);
- if (break_state)
- ulcon |= SIRFUART_SET_BREAK;
- else
- ulcon &= ~SIRFUART_SET_BREAK;
- wr_regl(port, SIRFUART_LINE_CTRL, ulcon);
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
+ if (break_state)
+ ulcon |= SIRFUART_SET_BREAK;
+ else
+ ulcon &= ~SIRFUART_SET_BREAK;
+ wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
+ }
}
static unsigned int
sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
unsigned int ch, rx_count = 0;
-
- while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
- SIRFUART_FIFOEMPTY_MASK(port))) {
- ch = rd_regl(port, SIRFUART_RX_FIFO_DATA) | SIRFUART_DUMMY_READ;
+ struct tty_struct *tty;
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty)
+ return -ENODEV;
+ while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
+ ufifo_st->ff_empty(port->line))) {
+ ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
+ SIRFUART_DUMMY_READ;
if (unlikely(uart_handle_sysrq_char(port, ch)))
continue;
uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
@@ -218,8 +426,12 @@ sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
break;
}
+ sirfport->rx_io_count += rx_count;
port->icount.rx += rx_count;
+
+ spin_unlock(&port->lock);
tty_flip_buffer_push(&port->state->port);
+ spin_lock(&port->lock);
return rx_count;
}
@@ -228,13 +440,16 @@ static unsigned int
sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
{
struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
struct circ_buf *xmit = &port->state->xmit;
unsigned int num_tx = 0;
while (!uart_circ_empty(xmit) &&
- !(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
- SIRFUART_FIFOFULL_MASK(port)) &&
+ !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
+ ufifo_st->ff_full(port->line)) &&
count--) {
- wr_regl(port, SIRFUART_TX_FIFO_DATA, xmit->buf[xmit->tail]);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_data,
+ xmit->buf[xmit->tail]);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
port->icount.tx++;
num_tx++;
@@ -244,6 +459,166 @@ sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
return num_tx;
}
+static void sirfsoc_uart_tx_dma_complete_callback(void *param)
+{
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ struct uart_port *port = &sirfport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned long flags;
+
+ xmit->tail = (xmit->tail + sirfport->transfer_size) &
+ (UART_XMIT_SIZE - 1);
+ port->icount.tx += sirfport->transfer_size;
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ if (sirfport->tx_dma_addr)
+ dma_unmap_single(port->dev, sirfport->tx_dma_addr,
+ sirfport->transfer_size, DMA_TO_DEVICE);
+ spin_lock_irqsave(&sirfport->tx_lock, flags);
+ sirfport->tx_dma_state = TX_DMA_IDLE;
+ sirfsoc_uart_tx_with_dma(sirfport);
+ spin_unlock_irqrestore(&sirfport->tx_lock, flags);
+}
+
+static void sirfsoc_uart_insert_rx_buf_to_tty(
+ struct sirfsoc_uart_port *sirfport, int count)
+{
+ struct uart_port *port = &sirfport->port;
+ struct tty_port *tport = &port->state->port;
+ int inserted;
+
+ inserted = tty_insert_flip_string(tport,
+ sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
+ port->icount.rx += inserted;
+ tty_flip_buffer_push(tport);
+}
+
+static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+
+ sirfport->rx_dma_items[index].xmit.tail =
+ sirfport->rx_dma_items[index].xmit.head = 0;
+ sirfport->rx_dma_items[index].desc =
+ dmaengine_prep_slave_single(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
+ if (!sirfport->rx_dma_items[index].desc) {
+ dev_err(port->dev, "DMA slave single fail\n");
+ return;
+ }
+ sirfport->rx_dma_items[index].desc->callback =
+ sirfsoc_uart_rx_dma_complete_callback;
+ sirfport->rx_dma_items[index].desc->callback_param = sirfport;
+ sirfport->rx_dma_items[index].cookie =
+ dmaengine_submit(sirfport->rx_dma_items[index].desc);
+ dma_async_issue_pending(sirfport->rx_dma_chan);
+}
+
+static void sirfsoc_rx_tmo_process_tl(unsigned long param)
+{
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
+ unsigned int count;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ while (sirfport->rx_completed != sirfport->rx_issued) {
+ sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ SIRFSOC_RX_DMA_BUF_SIZE);
+ sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
+ }
+ count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
+ sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
+ SIRFSOC_RX_DMA_BUF_SIZE);
+ if (count > 0)
+ sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ SIRFUART_IO_MODE);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ if (sirfport->rx_io_count == 4) {
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ sirfport->rx_io_count = 0;
+ wr_regl(port, ureg->sirfsoc_int_st_reg,
+ uint_st->sirfsoc_rx_done);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~(uint_en->sirfsoc_rx_done_en));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_rx_done_en);
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+
+ sirfsoc_uart_start_next_rx_dma(port);
+ } else {
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ wr_regl(port, ureg->sirfsoc_int_st_reg,
+ uint_st->sirfsoc_rx_done);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) |
+ (uint_en->sirfsoc_rx_done_en));
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ uint_en->sirfsoc_rx_done_en);
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ }
+}
+
+static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
+{
+ struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ struct dma_tx_state tx_state;
+ spin_lock(&sirfport->rx_lock);
+
+ dmaengine_tx_status(sirfport->rx_dma_chan,
+ sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
+ dmaengine_terminate_all(sirfport->rx_dma_chan);
+ sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
+ SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~(uint_en->sirfsoc_rx_timeout_en));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_rx_timeout_en);
+ spin_unlock(&sirfport->rx_lock);
+ tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
+}
+
+static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
+{
+ struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
+
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ if (sirfport->rx_io_count == 4) {
+ sirfport->rx_io_count = 0;
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) &
+ ~(uint_en->sirfsoc_rx_done_en));
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR,
+ uint_en->sirfsoc_rx_done_en);
+ wr_regl(port, ureg->sirfsoc_int_st_reg,
+ uint_st->sirfsoc_rx_timeout);
+ sirfsoc_uart_start_next_rx_dma(port);
+ }
+}
+
static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
{
unsigned long intr_status;
@@ -251,79 +626,191 @@ static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
unsigned long flag = TTY_NORMAL;
struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
struct uart_port *port = &sirfport->port;
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
+ struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
struct uart_state *state = port->state;
struct circ_buf *xmit = &port->state->xmit;
spin_lock(&port->lock);
- intr_status = rd_regl(port, SIRFUART_INT_STATUS);
- wr_regl(port, SIRFUART_INT_STATUS, intr_status);
- intr_status &= rd_regl(port, SIRFUART_INT_EN);
- if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) {
- if (intr_status & SIRFUART_RXD_BREAK) {
+ intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
+ wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
+ intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
+ if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
+ if (intr_status & uint_st->sirfsoc_rxd_brk) {
+ port->icount.brk++;
if (uart_handle_break(port))
goto recv_char;
- uart_insert_char(port, intr_status,
- SIRFUART_RX_OFLOW, 0, TTY_BREAK);
- spin_unlock(&port->lock);
- return IRQ_HANDLED;
}
- if (intr_status & SIRFUART_RX_OFLOW)
+ if (intr_status & uint_st->sirfsoc_rx_oflow)
port->icount.overrun++;
- if (intr_status & SIRFUART_FRM_ERR) {
+ if (intr_status & uint_st->sirfsoc_frm_err) {
port->icount.frame++;
flag = TTY_FRAME;
}
- if (intr_status & SIRFUART_PARITY_ERR)
+ if (intr_status & uint_st->sirfsoc_parity_err)
flag = TTY_PARITY;
- wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
- wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
- wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
intr_status &= port->read_status_mask;
uart_insert_char(port, intr_status,
- SIRFUART_RX_OFLOW_INT, 0, flag);
+ uint_en->sirfsoc_rx_oflow_en, 0, flag);
+ tty_flip_buffer_push(&state->port);
}
recv_char:
- if (intr_status & SIRFUART_CTS_INT_EN) {
- cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) &
- SIRFUART_CTS_IN_STATUS);
- if (cts_status != 0) {
- uart_handle_cts_change(port, 1);
- } else {
- uart_handle_cts_change(port, 0);
- wake_up_interruptible(&state->port.delta_msr_wait);
- }
+ if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
+ (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
+ !sirfport->tx_dma_state) {
+ cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
+ SIRFUART_AFC_CTS_STATUS;
+ if (cts_status != 0)
+ cts_status = 0;
+ else
+ cts_status = 1;
+ uart_handle_cts_change(port, cts_status);
+ wake_up_interruptible(&state->port.delta_msr_wait);
}
- if (intr_status & SIRFUART_RX_IO_INT_EN)
- sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
- if (intr_status & SIRFUART_TX_INT_EN) {
- if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
- spin_unlock(&port->lock);
- return IRQ_HANDLED;
- } else {
- sirfsoc_uart_pio_tx_chars(sirfport,
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ if (intr_status & uint_st->sirfsoc_rx_timeout)
+ sirfsoc_uart_handle_rx_tmo(sirfport);
+ if (intr_status & uint_st->sirfsoc_rx_done)
+ sirfsoc_uart_handle_rx_done(sirfport);
+ } else {
+ if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
+ sirfsoc_uart_pio_rx_chars(port,
+ SIRFSOC_UART_IO_RX_MAX_CNT);
+ }
+ if (intr_status & uint_st->sirfsoc_txfifo_empty) {
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ sirfsoc_uart_tx_with_dma(sirfport);
+ else {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ spin_unlock(&port->lock);
+ return IRQ_HANDLED;
+ } else {
+ sirfsoc_uart_pio_tx_chars(sirfport,
SIRFSOC_UART_IO_TX_REASONABLE_CNT);
- if ((uart_circ_empty(xmit)) &&
- (rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
- SIRFUART_FIFOEMPTY_MASK(port)))
- sirfsoc_uart_stop_tx(port);
+ if ((uart_circ_empty(xmit)) &&
+ (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
+ ufifo_st->ff_empty(port->line)))
+ sirfsoc_uart_stop_tx(port);
+ }
}
}
spin_unlock(&port->lock);
return IRQ_HANDLED;
}
+static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
+{
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ struct uart_port *port = &sirfport->port;
+ unsigned long flags;
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ while (sirfport->rx_completed != sirfport->rx_issued) {
+ sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
+ SIRFSOC_RX_DMA_BUF_SIZE);
+ sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
+ sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
+ }
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+}
+
+static void sirfsoc_uart_rx_dma_complete_callback(void *param)
+{
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
+ spin_lock(&sirfport->rx_lock);
+ sirfport->rx_issued++;
+ sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
+ spin_unlock(&sirfport->rx_lock);
+ tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
+}
+
+/* submit rx dma task into dmaengine */
+static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ sirfport->rx_io_count = 0;
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
+ ~SIRFUART_IO_MODE);
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ sirfsoc_rx_submit_one_dma_desc(port, i);
+ sirfport->rx_completed = sirfport->rx_issued = 0;
+ spin_lock_irqsave(&sirfport->rx_lock, flags);
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) |
+ SIRFUART_RX_DMA_INT_EN(port, uint_en));
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ SIRFUART_RX_DMA_INT_EN(port, uint_en));
+ spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+}
+
static void sirfsoc_uart_start_rx(struct uart_port *port)
{
- unsigned long regv;
- regv = rd_regl(port, SIRFUART_INT_EN);
- wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_RX_IO_INT_EN);
- wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
- wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
- wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
+
+ sirfport->rx_io_count = 0;
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ sirfsoc_uart_start_next_rx_dma(port);
+ else {
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ rd_regl(port, ureg->sirfsoc_int_en_reg) |
+ SIRFUART_RX_IO_INT_EN(port, uint_en));
+ else
+ wr_regl(port, ureg->sirfsoc_int_en_reg,
+ SIRFUART_RX_IO_INT_EN(port, uint_en));
+ }
+}
+
+static unsigned int
+sirfsoc_usp_calc_sample_div(unsigned long set_rate,
+ unsigned long ioclk_rate, unsigned long *sample_reg)
+{
+ unsigned long min_delta = ~0UL;
+ unsigned short sample_div;
+ unsigned long ioclk_div = 0;
+ unsigned long temp_delta;
+
+ for (sample_div = SIRF_MIN_SAMPLE_DIV;
+ sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
+ temp_delta = ioclk_rate -
+ (ioclk_rate + (set_rate * sample_div) / 2)
+ / (set_rate * sample_div) * set_rate * sample_div;
+
+ temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
+ if (temp_delta < min_delta) {
+ ioclk_div = (2 * ioclk_rate /
+ (set_rate * sample_div) + 1) / 2 - 1;
+ if (ioclk_div > SIRF_IOCLK_DIV_MAX)
+ continue;
+ min_delta = temp_delta;
+ *sample_reg = sample_div;
+ if (!temp_delta)
+ break;
+ }
+ }
+ return ioclk_div;
}
static unsigned int
-sirfsoc_calc_sample_div(unsigned long baud_rate,
- unsigned long ioclk_rate, unsigned long *setted_baud)
+sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
+ unsigned long ioclk_rate, unsigned long *set_baud)
{
unsigned long min_delta = ~0UL;
unsigned short sample_div;
@@ -346,7 +833,7 @@ sirfsoc_calc_sample_div(unsigned long baud_rate,
regv = regv & (~SIRF_SAMPLE_DIV_MASK);
regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
min_delta = temp_delta;
- *setted_baud = baud_tmp;
+ *set_baud = baud_tmp;
}
}
return regv;
@@ -357,63 +844,93 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
struct ktermios *old)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
unsigned long config_reg = 0;
unsigned long baud_rate;
- unsigned long setted_baud;
+ unsigned long set_baud;
unsigned long flags;
unsigned long ic;
unsigned int clk_div_reg = 0;
- unsigned long temp_reg_val;
+ unsigned long txfifo_op_reg, ioclk_rate;
unsigned long rx_time_out;
int threshold_div;
- int temp;
+ u32 data_bit_len, stop_bit_len, len_val;
+ unsigned long sample_div_reg = 0xf;
+ ioclk_rate = port->uartclk;
switch (termios->c_cflag & CSIZE) {
default:
case CS8:
+ data_bit_len = 8;
config_reg |= SIRFUART_DATA_BIT_LEN_8;
break;
case CS7:
+ data_bit_len = 7;
config_reg |= SIRFUART_DATA_BIT_LEN_7;
break;
case CS6:
+ data_bit_len = 6;
config_reg |= SIRFUART_DATA_BIT_LEN_6;
break;
case CS5:
+ data_bit_len = 5;
config_reg |= SIRFUART_DATA_BIT_LEN_5;
break;
}
- if (termios->c_cflag & CSTOPB)
+ if (termios->c_cflag & CSTOPB) {
config_reg |= SIRFUART_STOP_BIT_LEN_2;
- baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ stop_bit_len = 2;
+ } else
+ stop_bit_len = 1;
+
spin_lock_irqsave(&port->lock, flags);
- port->read_status_mask = SIRFUART_RX_OFLOW_INT;
+ port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
port->ignore_status_mask = 0;
- /* read flags */
- if (termios->c_iflag & INPCK)
- port->read_status_mask |=
- SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
+ uint_en->sirfsoc_parity_err_en;
+ } else {
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
+ }
if (termios->c_iflag & (BRKINT | PARMRK))
- port->read_status_mask |= SIRFUART_RXD_BREAK_INT;
- /* ignore flags */
- if (termios->c_iflag & IGNPAR)
+ port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ uint_en->sirfsoc_frm_err_en |
+ uint_en->sirfsoc_parity_err_en;
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ config_reg |= SIRFUART_STICK_BIT_MARK;
+ else
+ config_reg |= SIRFUART_STICK_BIT_SPACE;
+ } else if (termios->c_cflag & PARODD) {
+ config_reg |= SIRFUART_STICK_BIT_ODD;
+ } else {
+ config_reg |= SIRFUART_STICK_BIT_EVEN;
+ }
+ }
+ } else {
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ uint_en->sirfsoc_frm_err_en;
+ if (termios->c_cflag & PARENB)
+ dev_warn(port->dev,
+ "USP-UART not support parity err\n");
+ }
+ if (termios->c_iflag & IGNBRK) {
port->ignore_status_mask |=
- SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ uint_en->sirfsoc_rxd_brk_en;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ uint_en->sirfsoc_rx_oflow_en;
+ }
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= SIRFUART_DUMMY_READ;
- /* enable parity if PARENB is set*/
- if (termios->c_cflag & PARENB) {
- if (termios->c_cflag & CMSPAR) {
- if (termios->c_cflag & PARODD)
- config_reg |= SIRFUART_STICK_BIT_MARK;
- else
- config_reg |= SIRFUART_STICK_BIT_SPACE;
- } else if (termios->c_cflag & PARODD) {
- config_reg |= SIRFUART_STICK_BIT_ODD;
- } else {
- config_reg |= SIRFUART_STICK_BIT_EVEN;
- }
- }
/* Hardware Flow Control Settings */
if (UART_ENABLE_MS(port, termios->c_cflag)) {
if (!sirfport->ms_enabled)
@@ -422,75 +939,184 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
if (sirfport->ms_enabled)
sirfsoc_uart_disable_ms(port);
}
-
- if (port->uartclk == 150000000) {
- /* common rate: fast calculation */
+ baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ if (ioclk_rate == 150000000) {
for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
if (baud_rate == baudrate_to_regv[ic].baud_rate)
clk_div_reg = baudrate_to_regv[ic].reg_val;
}
-
- setted_baud = baud_rate;
- /* arbitary rate setting */
- if (unlikely(clk_div_reg == 0))
- clk_div_reg = sirfsoc_calc_sample_div(baud_rate, port->uartclk,
- &setted_baud);
- wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
-
+ set_baud = baud_rate;
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ if (unlikely(clk_div_reg == 0))
+ clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
+ ioclk_rate, &set_baud);
+ wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
+ } else {
+ clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
+ ioclk_rate, &sample_div_reg);
+ sample_div_reg--;
+ set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
+ (sample_div_reg + 1));
+ /* setting usp mode 2 */
+ len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
+ (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
+ len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
+ << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
+ wr_regl(port, ureg->sirfsoc_mode2, len_val);
+ }
if (tty_termios_baud_rate(termios))
- tty_termios_encode_baud_rate(termios, setted_baud, setted_baud);
-
- /* set receive timeout */
- rx_time_out = SIRFSOC_UART_RX_TIMEOUT(baud_rate, 20000);
- rx_time_out = (rx_time_out > 0xFFFF) ? 0xFFFF : rx_time_out;
- config_reg |= SIRFUART_RECV_TIMEOUT(rx_time_out);
- temp_reg_val = rd_regl(port, SIRFUART_TX_FIFO_OP);
- wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
- wr_regl(port, SIRFUART_TX_FIFO_OP,
- temp_reg_val & ~SIRFUART_TX_FIFO_START);
- wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, SIRFUART_TX_MODE_IO);
- wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, SIRFUART_RX_MODE_IO);
- wr_regl(port, SIRFUART_LINE_CTRL, config_reg);
-
+ tty_termios_encode_baud_rate(termios, set_baud, set_baud);
+ /* set receive timeout && data bits len */
+ rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
+ rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
+ txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op,
+ (txfifo_op_reg & ~SIRFUART_FIFO_START));
+ if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
+ config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
+ wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
+ } else {
+ /*tx frame ctrl*/
+ len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
+ len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
+ SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
+ len_val |= ((data_bit_len - 1) <<
+ SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
+ len_val |= (((clk_div_reg & 0xc00) >> 10) <<
+ SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
+ wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
+ /*rx frame ctrl*/
+ len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
+ len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
+ SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
+ len_val |= (data_bit_len - 1) <<
+ SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
+ len_val |= (((clk_div_reg & 0xf000) >> 12) <<
+ SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
+ wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
+ /*async param*/
+ wr_regl(port, ureg->sirfsoc_async_param_reg,
+ (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
+ (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
+ SIRFSOC_USP_ASYNC_DIV2_OFFSET);
+ }
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
+ else
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
+ else
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
/* Reset Rx/Tx FIFO Threshold level for proper baudrate */
- if (baud_rate < 1000000)
+ if (set_baud < 1000000)
threshold_div = 1;
else
threshold_div = 2;
- temp = port->line == 1 ? 16 : 64;
- wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp / threshold_div);
- wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp / threshold_div);
- temp_reg_val |= SIRFUART_TX_FIFO_START;
- wr_regl(port, SIRFUART_TX_FIFO_OP, temp_reg_val);
- uart_update_timeout(port, termios->c_cflag, baud_rate);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
+ SIRFUART_FIFO_THD(port) / threshold_div);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
+ SIRFUART_FIFO_THD(port) / threshold_div);
+ txfifo_op_reg |= SIRFUART_FIFO_START;
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
+ uart_update_timeout(port, termios->c_cflag, set_baud);
sirfsoc_uart_start_rx(port);
- wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_TX_EN | SIRFUART_RX_EN);
+ wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
spin_unlock_irqrestore(&port->lock, flags);
}
-static void startup_uart_controller(struct uart_port *port)
+static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
{
- unsigned long temp_regv;
- int temp;
- temp_regv = rd_regl(port, SIRFUART_TX_DMA_IO_CTRL);
- wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, temp_regv | SIRFUART_TX_MODE_IO);
- temp_regv = rd_regl(port, SIRFUART_RX_DMA_IO_CTRL);
- wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, temp_regv | SIRFUART_RX_MODE_IO);
- wr_regl(port, SIRFUART_TX_DMA_IO_LEN, 0);
- wr_regl(port, SIRFUART_RX_DMA_IO_LEN, 0);
- wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_RX_EN | SIRFUART_TX_EN);
- wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_RESET);
- wr_regl(port, SIRFUART_TX_FIFO_OP, 0);
- wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
- wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
- temp = port->line == 1 ? 16 : 64;
- wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp);
- wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp);
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ dma_cap_mask_t dma_mask;
+ struct dma_slave_config tx_slv_cfg = {
+ .dst_maxburst = 2,
+ };
+
+ dma_cap_zero(dma_mask);
+ dma_cap_set(DMA_SLAVE, dma_mask);
+ sirfport->tx_dma_chan = dma_request_channel(dma_mask,
+ (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)sirfport->tx_dma_no);
+ if (!sirfport->tx_dma_chan) {
+ dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
+ sirfport->tx_dma_no);
+ return -EPROBE_DEFER;
+ }
+ dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
+
+ return 0;
+}
+
+static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ dma_cap_mask_t dma_mask;
+ int ret;
+ int i, j;
+ struct dma_slave_config slv_cfg = {
+ .src_maxburst = 2,
+ };
+
+ dma_cap_zero(dma_mask);
+ dma_cap_set(DMA_SLAVE, dma_mask);
+ sirfport->rx_dma_chan = dma_request_channel(dma_mask,
+ (dma_filter_fn)sirfsoc_dma_filter_id,
+ (void *)sirfport->rx_dma_no);
+ if (!sirfport->rx_dma_chan) {
+ dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
+ sirfport->rx_dma_no);
+ ret = -EPROBE_DEFER;
+ goto request_err;
+ }
+ for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
+ sirfport->rx_dma_items[i].xmit.buf =
+ dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
+ if (!sirfport->rx_dma_items[i].xmit.buf) {
+ dev_err(port->dev, "Uart alloc bufa failed\n");
+ ret = -ENOMEM;
+ goto alloc_coherent_err;
+ }
+ sirfport->rx_dma_items[i].xmit.head =
+ sirfport->rx_dma_items[i].xmit.tail = 0;
+ }
+ dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
+
+ return 0;
+alloc_coherent_err:
+ for (j = 0; j < i; j++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[j].xmit.buf,
+ sirfport->rx_dma_items[j].dma_addr);
+ dma_release_channel(sirfport->rx_dma_chan);
+request_err:
+ return ret;
+}
+
+static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
+{
+ dmaengine_terminate_all(sirfport->tx_dma_chan);
+ dma_release_channel(sirfport->tx_dma_chan);
+}
+
+static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
+{
+ int i;
+ struct uart_port *port = &sirfport->port;
+ dmaengine_terminate_all(sirfport->rx_dma_chan);
+ dma_release_channel(sirfport->rx_dma_chan);
+ for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
+ dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
+ sirfport->rx_dma_items[i].xmit.buf,
+ sirfport->rx_dma_items[i].dma_addr);
}
static int sirfsoc_uart_startup(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
unsigned int index = port->line;
int ret;
set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
@@ -504,8 +1130,64 @@ static int sirfsoc_uart_startup(struct uart_port *port)
index, port->irq);
goto irq_err;
}
- startup_uart_controller(port);
+
+ /* initial hardware settings */
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
+ SIRFUART_IO_MODE);
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
+ rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
+ SIRFUART_IO_MODE);
+ wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
+ wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
+ wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
+ if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
+ wr_regl(port, ureg->sirfsoc_mode1,
+ SIRFSOC_USP_ENDIAN_CTRL_LSBF |
+ SIRFSOC_USP_EN);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
+ wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
+ wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
+ wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
+
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
+ ret = sirfsoc_uart_init_rx_dma(port);
+ if (ret)
+ goto init_rx_err;
+ wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
+ SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
+ SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
+ SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
+ }
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
+ sirfsoc_uart_init_tx_dma(port);
+ sirfport->tx_dma_state = TX_DMA_IDLE;
+ wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
+ SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
+ SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
+ SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
+ }
+ sirfport->ms_enabled = false;
+ if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
+ sirfport->hw_flow_ctrl) {
+ set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
+ IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
+ sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
+ if (ret != 0) {
+ dev_err(port->dev, "UART-USP:request gpio irq fail\n");
+ goto init_rx_err;
+ }
+ }
+
enable_irq(port->irq);
+
+ return 0;
+init_rx_err:
+ free_irq(port->irq, sirfport);
irq_err:
return ret;
}
@@ -513,11 +1195,25 @@ irq_err:
static void sirfsoc_uart_shutdown(struct uart_port *port)
{
struct sirfsoc_uart_port *sirfport = to_sirfport(port);
- wr_regl(port, SIRFUART_INT_EN, 0);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ if (!sirfport->is_marco)
+ wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
+ else
+ wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
+
free_irq(port->irq, sirfport);
- if (sirfport->ms_enabled) {
+ if (sirfport->ms_enabled)
sirfsoc_uart_disable_ms(port);
- sirfport->ms_enabled = 0;
+ if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
+ sirfport->hw_flow_ctrl) {
+ gpio_set_value(sirfport->rts_gpio, 1);
+ free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
+ }
+ if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
+ sirfsoc_uart_uninit_rx_dma(sirfport);
+ if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
+ sirfsoc_uart_uninit_tx_dma(sirfport);
+ sirfport->tx_dma_state = TX_DMA_IDLE;
}
}
@@ -528,9 +1224,11 @@ static const char *sirfsoc_uart_type(struct uart_port *port)
static int sirfsoc_uart_request_port(struct uart_port *port)
{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
void *ret;
ret = request_mem_region(port->mapbase,
- SIRFUART_MAP_SIZE, SIRFUART_PORT_NAME);
+ SIRFUART_MAP_SIZE, uart_param->port_name);
return ret ? 0 : -EBUSY;
}
@@ -566,32 +1264,45 @@ static struct uart_ops sirfsoc_uart_ops = {
};
#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
-static int __init sirfsoc_uart_console_setup(struct console *co, char *options)
+static int __init
+sirfsoc_uart_console_setup(struct console *co, char *options)
{
unsigned int baud = 115200;
unsigned int bits = 8;
unsigned int parity = 'n';
unsigned int flow = 'n';
struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
-
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
return -EINVAL;
if (!port->mapbase)
return -ENODEV;
+ /* enable usp in mode1 register */
+ if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
+ wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
+ SIRFSOC_USP_ENDIAN_CTRL_LSBF);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
port->cons = co;
+
+ /* default console tx/rx transfer using io mode */
+ sirfport->rx_dma_no = UNVALID_DMA_CHAN;
+ sirfport->tx_dma_no = UNVALID_DMA_CHAN;
return uart_set_options(port, co, baud, parity, bits, flow);
}
static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
+ struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
while (rd_regl(port,
- SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOFULL_MASK(port))
+ ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
cpu_relax();
- wr_regb(port, SIRFUART_TX_FIFO_DATA, ch);
+ wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch);
}
static void sirfsoc_uart_console_write(struct console *co, const char *s,
@@ -633,27 +1344,99 @@ static struct uart_driver sirfsoc_uart_drv = {
#endif
};
-int sirfsoc_uart_probe(struct platform_device *pdev)
+static struct of_device_id sirfsoc_uart_ids[] = {
+ { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
+ { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
+ { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
+ {}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
+
+static int sirfsoc_uart_probe(struct platform_device *pdev)
{
struct sirfsoc_uart_port *sirfport;
struct uart_port *port;
struct resource *res;
int ret;
+ const struct of_device_id *match;
+ match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
dev_err(&pdev->dev,
"Unable to find cell-index in uart node.\n");
ret = -EFAULT;
goto err;
}
-
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
+ pdev->id += ((struct sirfsoc_uart_register *)
+ match->data)->uart_param.register_uart_nr;
sirfport = &sirfsoc_uart_ports[pdev->id];
port = &sirfport->port;
port->dev = &pdev->dev;
port->private_data = sirfport;
+ sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
+
+ sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
+ "sirf,uart-has-rtscts");
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
+ sirfport->uart_reg->uart_type = SIRF_REAL_UART;
+ if (of_property_read_u32(pdev->dev.of_node,
+ "sirf,uart-dma-rx-channel",
+ &sirfport->rx_dma_no))
+ sirfport->rx_dma_no = UNVALID_DMA_CHAN;
+ if (of_property_read_u32(pdev->dev.of_node,
+ "sirf,uart-dma-tx-channel",
+ &sirfport->tx_dma_no))
+ sirfport->tx_dma_no = UNVALID_DMA_CHAN;
+ }
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
+ sirfport->uart_reg->uart_type = SIRF_USP_UART;
+ if (of_property_read_u32(pdev->dev.of_node,
+ "sirf,usp-dma-rx-channel",
+ &sirfport->rx_dma_no))
+ sirfport->rx_dma_no = UNVALID_DMA_CHAN;
+ if (of_property_read_u32(pdev->dev.of_node,
+ "sirf,usp-dma-tx-channel",
+ &sirfport->tx_dma_no))
+ sirfport->tx_dma_no = UNVALID_DMA_CHAN;
+ if (!sirfport->hw_flow_ctrl)
+ goto usp_no_flow_control;
+ if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
+ sirfport->cts_gpio = of_get_named_gpio(
+ pdev->dev.of_node, "cts-gpios", 0);
+ else
+ sirfport->cts_gpio = -1;
+ if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
+ sirfport->rts_gpio = of_get_named_gpio(
+ pdev->dev.of_node, "rts-gpios", 0);
+ else
+ sirfport->rts_gpio = -1;
- if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
- sirfport->hw_flow_ctrl = 1;
+ if ((!gpio_is_valid(sirfport->cts_gpio) ||
+ !gpio_is_valid(sirfport->rts_gpio))) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev,
+ "Usp flow control must have cts and rts gpio");
+ goto err;
+ }
+ ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
+ "usp-cts-gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "Unable request cts gpio");
+ goto err;
+ }
+ gpio_direction_input(sirfport->cts_gpio);
+ ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
+ "usp-rts-gpio");
+ if (ret) {
+ dev_err(&pdev->dev, "Unable request rts gpio");
+ goto err;
+ }
+ gpio_direction_output(sirfport->rts_gpio, 1);
+ }
+usp_no_flow_control:
+ if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
+ sirfport->is_marco = true;
if (of_property_read_u32(pdev->dev.of_node,
"fifosize",
@@ -670,6 +1453,12 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
ret = -EFAULT;
goto err;
}
+ spin_lock_init(&sirfport->rx_lock);
+ spin_lock_init(&sirfport->tx_lock);
+ tasklet_init(&sirfport->rx_dma_complete_tasklet,
+ sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
+ tasklet_init(&sirfport->rx_tmo_process_tasklet,
+ sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
port->mapbase = res->start;
port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
if (!port->membase) {
@@ -685,18 +1474,10 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
}
port->irq = res->start;
- if (sirfport->hw_flow_ctrl) {
- sirfport->p = pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(sirfport->p)) {
- ret = PTR_ERR(sirfport->p);
- goto err;
- }
- }
-
sirfport->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(sirfport->clk)) {
ret = PTR_ERR(sirfport->clk);
- goto clk_err;
+ goto err;
}
clk_prepare_enable(sirfport->clk);
port->uartclk = clk_get_rate(sirfport->clk);
@@ -716,10 +1497,6 @@ int sirfsoc_uart_probe(struct platform_device *pdev)
port_err:
clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
-clk_err:
- platform_set_drvdata(pdev, NULL);
- if (sirfport->hw_flow_ctrl)
- pinctrl_put(sirfport->p);
err:
return ret;
}
@@ -728,9 +1505,6 @@ static int sirfsoc_uart_remove(struct platform_device *pdev)
{
struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
struct uart_port *port = &sirfport->port;
- platform_set_drvdata(pdev, NULL);
- if (sirfport->hw_flow_ctrl)
- pinctrl_put(sirfport->p);
clk_disable_unprepare(sirfport->clk);
clk_put(sirfport->clk);
uart_remove_one_port(&sirfsoc_uart_drv, port);
@@ -754,13 +1528,6 @@ static int sirfsoc_uart_resume(struct platform_device *pdev)
return 0;
}
-static struct of_device_id sirfsoc_uart_ids[] = {
- { .compatible = "sirf,prima2-uart", },
- { .compatible = "sirf,marco-uart", },
- {}
-};
-MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
-
static struct platform_driver sirfsoc_uart_driver = {
.probe = sirfsoc_uart_probe,
.remove = sirfsoc_uart_remove,
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
index 85328ba0c4e..fb8d0a00260 100644
--- a/drivers/tty/serial/sirfsoc_uart.h
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -6,31 +6,260 @@
* Licensed under GPLv2 or later.
*/
#include <linux/bitops.h>
+struct sirfsoc_uart_param {
+ const char *uart_name;
+ const char *port_name;
+ u32 uart_nr;
+ u32 register_uart_nr;
+};
+
+struct sirfsoc_register {
+ /* hardware uart specific */
+ u32 sirfsoc_line_ctrl;
+ u32 sirfsoc_divisor;
+ /* uart - usp common */
+ u32 sirfsoc_tx_rx_en;
+ u32 sirfsoc_int_en_reg;
+ u32 sirfsoc_int_st_reg;
+ u32 sirfsoc_tx_dma_io_ctrl;
+ u32 sirfsoc_tx_dma_io_len;
+ u32 sirfsoc_tx_fifo_ctrl;
+ u32 sirfsoc_tx_fifo_level_chk;
+ u32 sirfsoc_tx_fifo_op;
+ u32 sirfsoc_tx_fifo_status;
+ u32 sirfsoc_tx_fifo_data;
+ u32 sirfsoc_rx_dma_io_ctrl;
+ u32 sirfsoc_rx_dma_io_len;
+ u32 sirfsoc_rx_fifo_ctrl;
+ u32 sirfsoc_rx_fifo_level_chk;
+ u32 sirfsoc_rx_fifo_op;
+ u32 sirfsoc_rx_fifo_status;
+ u32 sirfsoc_rx_fifo_data;
+ u32 sirfsoc_afc_ctrl;
+ u32 sirfsoc_swh_dma_io;
+ /* hardware usp specific */
+ u32 sirfsoc_mode1;
+ u32 sirfsoc_mode2;
+ u32 sirfsoc_tx_frame_ctrl;
+ u32 sirfsoc_rx_frame_ctrl;
+ u32 sirfsoc_async_param_reg;
+};
+
+typedef u32 (*fifo_full_mask)(int line);
+typedef u32 (*fifo_empty_mask)(int line);
+
+struct sirfsoc_fifo_status {
+ fifo_full_mask ff_full;
+ fifo_empty_mask ff_empty;
+};
-/* UART Register Offset Define */
-#define SIRFUART_LINE_CTRL 0x0040
-#define SIRFUART_TX_RX_EN 0x004c
-#define SIRFUART_DIVISOR 0x0050
-#define SIRFUART_INT_EN 0x0054
-#define SIRFUART_INT_STATUS 0x0058
-#define SIRFUART_TX_DMA_IO_CTRL 0x0100
-#define SIRFUART_TX_DMA_IO_LEN 0x0104
-#define SIRFUART_TX_FIFO_CTRL 0x0108
-#define SIRFUART_TX_FIFO_LEVEL_CHK 0x010C
-#define SIRFUART_TX_FIFO_OP 0x0110
-#define SIRFUART_TX_FIFO_STATUS 0x0114
-#define SIRFUART_TX_FIFO_DATA 0x0118
-#define SIRFUART_RX_DMA_IO_CTRL 0x0120
-#define SIRFUART_RX_DMA_IO_LEN 0x0124
-#define SIRFUART_RX_FIFO_CTRL 0x0128
-#define SIRFUART_RX_FIFO_LEVEL_CHK 0x012C
-#define SIRFUART_RX_FIFO_OP 0x0130
-#define SIRFUART_RX_FIFO_STATUS 0x0134
-#define SIRFUART_RX_FIFO_DATA 0x0138
-#define SIRFUART_AFC_CTRL 0x0140
-#define SIRFUART_SWH_DMA_IO 0x0148
-
-/* UART Line Control Register */
+struct sirfsoc_int_en {
+ u32 sirfsoc_rx_done_en;
+ u32 sirfsoc_tx_done_en;
+ u32 sirfsoc_rx_oflow_en;
+ u32 sirfsoc_tx_allout_en;
+ u32 sirfsoc_rx_io_dma_en;
+ u32 sirfsoc_tx_io_dma_en;
+ u32 sirfsoc_rxfifo_full_en;
+ u32 sirfsoc_txfifo_empty_en;
+ u32 sirfsoc_rxfifo_thd_en;
+ u32 sirfsoc_txfifo_thd_en;
+ u32 sirfsoc_frm_err_en;
+ u32 sirfsoc_rxd_brk_en;
+ u32 sirfsoc_rx_timeout_en;
+ u32 sirfsoc_parity_err_en;
+ u32 sirfsoc_cts_en;
+ u32 sirfsoc_rts_en;
+};
+
+struct sirfsoc_int_status {
+ u32 sirfsoc_rx_done;
+ u32 sirfsoc_tx_done;
+ u32 sirfsoc_rx_oflow;
+ u32 sirfsoc_tx_allout;
+ u32 sirfsoc_rx_io_dma;
+ u32 sirfsoc_tx_io_dma;
+ u32 sirfsoc_rxfifo_full;
+ u32 sirfsoc_txfifo_empty;
+ u32 sirfsoc_rxfifo_thd;
+ u32 sirfsoc_txfifo_thd;
+ u32 sirfsoc_frm_err;
+ u32 sirfsoc_rxd_brk;
+ u32 sirfsoc_rx_timeout;
+ u32 sirfsoc_parity_err;
+ u32 sirfsoc_cts;
+ u32 sirfsoc_rts;
+};
+
+enum sirfsoc_uart_type {
+ SIRF_REAL_UART,
+ SIRF_USP_UART,
+};
+
+struct sirfsoc_uart_register {
+ struct sirfsoc_register uart_reg;
+ struct sirfsoc_int_en uart_int_en;
+ struct sirfsoc_int_status uart_int_st;
+ struct sirfsoc_fifo_status fifo_status;
+ struct sirfsoc_uart_param uart_param;
+ enum sirfsoc_uart_type uart_type;
+};
+
+u32 usp_ff_full(int line)
+{
+ return 0x80;
+}
+u32 usp_ff_empty(int line)
+{
+ return 0x100;
+}
+u32 uart_ff_full(int line)
+{
+ return (line == 1) ? (0x20) : (0x80);
+}
+u32 uart_ff_empty(int line)
+{
+ return (line == 1) ? (0x40) : (0x100);
+}
+struct sirfsoc_uart_register sirfsoc_usp = {
+ .uart_reg = {
+ .sirfsoc_mode1 = 0x0000,
+ .sirfsoc_mode2 = 0x0004,
+ .sirfsoc_tx_frame_ctrl = 0x0008,
+ .sirfsoc_rx_frame_ctrl = 0x000c,
+ .sirfsoc_tx_rx_en = 0x0010,
+ .sirfsoc_int_en_reg = 0x0014,
+ .sirfsoc_int_st_reg = 0x0018,
+ .sirfsoc_async_param_reg = 0x0024,
+ .sirfsoc_tx_dma_io_ctrl = 0x0100,
+ .sirfsoc_tx_dma_io_len = 0x0104,
+ .sirfsoc_tx_fifo_ctrl = 0x0108,
+ .sirfsoc_tx_fifo_level_chk = 0x010c,
+ .sirfsoc_tx_fifo_op = 0x0110,
+ .sirfsoc_tx_fifo_status = 0x0114,
+ .sirfsoc_tx_fifo_data = 0x0118,
+ .sirfsoc_rx_dma_io_ctrl = 0x0120,
+ .sirfsoc_rx_dma_io_len = 0x0124,
+ .sirfsoc_rx_fifo_ctrl = 0x0128,
+ .sirfsoc_rx_fifo_level_chk = 0x012c,
+ .sirfsoc_rx_fifo_op = 0x0130,
+ .sirfsoc_rx_fifo_status = 0x0134,
+ .sirfsoc_rx_fifo_data = 0x0138,
+ },
+ .uart_int_en = {
+ .sirfsoc_rx_done_en = BIT(0),
+ .sirfsoc_tx_done_en = BIT(1),
+ .sirfsoc_rx_oflow_en = BIT(2),
+ .sirfsoc_tx_allout_en = BIT(3),
+ .sirfsoc_rx_io_dma_en = BIT(4),
+ .sirfsoc_tx_io_dma_en = BIT(5),
+ .sirfsoc_rxfifo_full_en = BIT(6),
+ .sirfsoc_txfifo_empty_en = BIT(7),
+ .sirfsoc_rxfifo_thd_en = BIT(8),
+ .sirfsoc_txfifo_thd_en = BIT(9),
+ .sirfsoc_frm_err_en = BIT(10),
+ .sirfsoc_rx_timeout_en = BIT(11),
+ .sirfsoc_rxd_brk_en = BIT(15),
+ },
+ .uart_int_st = {
+ .sirfsoc_rx_done = BIT(0),
+ .sirfsoc_tx_done = BIT(1),
+ .sirfsoc_rx_oflow = BIT(2),
+ .sirfsoc_tx_allout = BIT(3),
+ .sirfsoc_rx_io_dma = BIT(4),
+ .sirfsoc_tx_io_dma = BIT(5),
+ .sirfsoc_rxfifo_full = BIT(6),
+ .sirfsoc_txfifo_empty = BIT(7),
+ .sirfsoc_rxfifo_thd = BIT(8),
+ .sirfsoc_txfifo_thd = BIT(9),
+ .sirfsoc_frm_err = BIT(10),
+ .sirfsoc_rx_timeout = BIT(11),
+ .sirfsoc_rxd_brk = BIT(15),
+ },
+ .fifo_status = {
+ .ff_full = usp_ff_full,
+ .ff_empty = usp_ff_empty,
+ },
+ .uart_param = {
+ .uart_name = "ttySiRF",
+ .port_name = "sirfsoc-uart",
+ .uart_nr = 2,
+ .register_uart_nr = 3,
+ },
+};
+
+struct sirfsoc_uart_register sirfsoc_uart = {
+ .uart_reg = {
+ .sirfsoc_line_ctrl = 0x0040,
+ .sirfsoc_tx_rx_en = 0x004c,
+ .sirfsoc_divisor = 0x0050,
+ .sirfsoc_int_en_reg = 0x0054,
+ .sirfsoc_int_st_reg = 0x0058,
+ .sirfsoc_tx_dma_io_ctrl = 0x0100,
+ .sirfsoc_tx_dma_io_len = 0x0104,
+ .sirfsoc_tx_fifo_ctrl = 0x0108,
+ .sirfsoc_tx_fifo_level_chk = 0x010c,
+ .sirfsoc_tx_fifo_op = 0x0110,
+ .sirfsoc_tx_fifo_status = 0x0114,
+ .sirfsoc_tx_fifo_data = 0x0118,
+ .sirfsoc_rx_dma_io_ctrl = 0x0120,
+ .sirfsoc_rx_dma_io_len = 0x0124,
+ .sirfsoc_rx_fifo_ctrl = 0x0128,
+ .sirfsoc_rx_fifo_level_chk = 0x012c,
+ .sirfsoc_rx_fifo_op = 0x0130,
+ .sirfsoc_rx_fifo_status = 0x0134,
+ .sirfsoc_rx_fifo_data = 0x0138,
+ .sirfsoc_afc_ctrl = 0x0140,
+ .sirfsoc_swh_dma_io = 0x0148,
+ },
+ .uart_int_en = {
+ .sirfsoc_rx_done_en = BIT(0),
+ .sirfsoc_tx_done_en = BIT(1),
+ .sirfsoc_rx_oflow_en = BIT(2),
+ .sirfsoc_tx_allout_en = BIT(3),
+ .sirfsoc_rx_io_dma_en = BIT(4),
+ .sirfsoc_tx_io_dma_en = BIT(5),
+ .sirfsoc_rxfifo_full_en = BIT(6),
+ .sirfsoc_txfifo_empty_en = BIT(7),
+ .sirfsoc_rxfifo_thd_en = BIT(8),
+ .sirfsoc_txfifo_thd_en = BIT(9),
+ .sirfsoc_frm_err_en = BIT(10),
+ .sirfsoc_rxd_brk_en = BIT(11),
+ .sirfsoc_rx_timeout_en = BIT(12),
+ .sirfsoc_parity_err_en = BIT(13),
+ .sirfsoc_cts_en = BIT(14),
+ .sirfsoc_rts_en = BIT(15),
+ },
+ .uart_int_st = {
+ .sirfsoc_rx_done = BIT(0),
+ .sirfsoc_tx_done = BIT(1),
+ .sirfsoc_rx_oflow = BIT(2),
+ .sirfsoc_tx_allout = BIT(3),
+ .sirfsoc_rx_io_dma = BIT(4),
+ .sirfsoc_tx_io_dma = BIT(5),
+ .sirfsoc_rxfifo_full = BIT(6),
+ .sirfsoc_txfifo_empty = BIT(7),
+ .sirfsoc_rxfifo_thd = BIT(8),
+ .sirfsoc_txfifo_thd = BIT(9),
+ .sirfsoc_frm_err = BIT(10),
+ .sirfsoc_rxd_brk = BIT(11),
+ .sirfsoc_rx_timeout = BIT(12),
+ .sirfsoc_parity_err = BIT(13),
+ .sirfsoc_cts = BIT(14),
+ .sirfsoc_rts = BIT(15),
+ },
+ .fifo_status = {
+ .ff_full = uart_ff_full,
+ .ff_empty = uart_ff_empty,
+ },
+ .uart_param = {
+ .uart_name = "ttySiRF",
+ .port_name = "sirfsoc_uart",
+ .uart_nr = 3,
+ .register_uart_nr = 0,
+ },
+};
+/* uart io ctrl */
#define SIRFUART_DATA_BIT_LEN_MASK 0x3
#define SIRFUART_DATA_BIT_LEN_5 BIT(0)
#define SIRFUART_DATA_BIT_LEN_6 1
@@ -50,96 +279,93 @@
#define SIRFUART_LOOP_BACK BIT(7)
#define SIRFUART_PARITY_MASK (7 << 3)
#define SIRFUART_DUMMY_READ BIT(16)
-
-#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
-#define SIRFUART_RECV_TIMEOUT_MASK (0xFFFF << 16)
-#define SIRFUART_RECV_TIMEOUT(x) (((x) & 0xFFFF) << 16)
-
-/* UART Auto Flow Control */
-#define SIRFUART_AFC_RX_THD_MASK 0x000000FF
+#define SIRFUART_AFC_CTRL_RX_THD 0x70
#define SIRFUART_AFC_RX_EN BIT(8)
#define SIRFUART_AFC_TX_EN BIT(9)
-#define SIRFUART_CTS_CTRL BIT(10)
-#define SIRFUART_RTS_CTRL BIT(11)
-#define SIRFUART_CTS_IN_STATUS BIT(12)
-#define SIRFUART_RTS_OUT_STATUS BIT(13)
-
-/* UART Interrupt Enable Register */
-#define SIRFUART_RX_DONE_INT BIT(0)
-#define SIRFUART_TX_DONE_INT BIT(1)
-#define SIRFUART_RX_OFLOW_INT BIT(2)
-#define SIRFUART_TX_ALLOUT_INT BIT(3)
-#define SIRFUART_RX_IO_DMA_INT BIT(4)
-#define SIRFUART_TX_IO_DMA_INT BIT(5)
-#define SIRFUART_RXFIFO_FULL_INT BIT(6)
-#define SIRFUART_TXFIFO_EMPTY_INT BIT(7)
-#define SIRFUART_RXFIFO_THD_INT BIT(8)
-#define SIRFUART_TXFIFO_THD_INT BIT(9)
-#define SIRFUART_FRM_ERR_INT BIT(10)
-#define SIRFUART_RXD_BREAK_INT BIT(11)
-#define SIRFUART_RX_TIMEOUT_INT BIT(12)
-#define SIRFUART_PARITY_ERR_INT BIT(13)
-#define SIRFUART_CTS_INT_EN BIT(14)
-#define SIRFUART_RTS_INT_EN BIT(15)
-
-/* UART Interrupt Status Register */
-#define SIRFUART_RX_DONE BIT(0)
-#define SIRFUART_TX_DONE BIT(1)
-#define SIRFUART_RX_OFLOW BIT(2)
-#define SIRFUART_TX_ALL_EMPTY BIT(3)
-#define SIRFUART_DMA_IO_RX_DONE BIT(4)
-#define SIRFUART_DMA_IO_TX_DONE BIT(5)
-#define SIRFUART_RXFIFO_FULL BIT(6)
-#define SIRFUART_TXFIFO_EMPTY BIT(7)
-#define SIRFUART_RXFIFO_THD_REACH BIT(8)
-#define SIRFUART_TXFIFO_THD_REACH BIT(9)
-#define SIRFUART_FRM_ERR BIT(10)
-#define SIRFUART_RXD_BREAK BIT(11)
-#define SIRFUART_RX_TIMEOUT BIT(12)
-#define SIRFUART_PARITY_ERR BIT(13)
-#define SIRFUART_CTS_CHANGE BIT(14)
-#define SIRFUART_RTS_CHANGE BIT(15)
-#define SIRFUART_PLUG_IN BIT(16)
-
-#define SIRFUART_ERR_INT_STAT \
- (SIRFUART_RX_OFLOW | \
- SIRFUART_FRM_ERR | \
- SIRFUART_RXD_BREAK | \
- SIRFUART_PARITY_ERR)
-#define SIRFUART_ERR_INT_EN \
- (SIRFUART_RX_OFLOW_INT | \
- SIRFUART_FRM_ERR_INT | \
- SIRFUART_RXD_BREAK_INT | \
- SIRFUART_PARITY_ERR_INT)
-#define SIRFUART_TX_INT_EN SIRFUART_TXFIFO_EMPTY_INT
-#define SIRFUART_RX_IO_INT_EN \
- (SIRFUART_RX_TIMEOUT_INT | \
- SIRFUART_RXFIFO_THD_INT | \
- SIRFUART_RXFIFO_FULL_INT | \
- SIRFUART_ERR_INT_EN)
-
+#define SIRFUART_AFC_CTS_CTRL BIT(10)
+#define SIRFUART_AFC_RTS_CTRL BIT(11)
+#define SIRFUART_AFC_CTS_STATUS BIT(12)
+#define SIRFUART_AFC_RTS_STATUS BIT(13)
/* UART FIFO Register */
-#define SIRFUART_TX_FIFO_STOP 0x0
-#define SIRFUART_TX_FIFO_RESET 0x1
-#define SIRFUART_TX_FIFO_START 0x2
-#define SIRFUART_RX_FIFO_STOP 0x0
-#define SIRFUART_RX_FIFO_RESET 0x1
-#define SIRFUART_RX_FIFO_START 0x2
-#define SIRFUART_TX_MODE_DMA 0
-#define SIRFUART_TX_MODE_IO 1
-#define SIRFUART_RX_MODE_DMA 0
-#define SIRFUART_RX_MODE_IO 1
-
-#define SIRFUART_RX_EN 0x1
-#define SIRFUART_TX_EN 0x2
+#define SIRFUART_FIFO_STOP 0x0
+#define SIRFUART_FIFO_RESET BIT(0)
+#define SIRFUART_FIFO_START BIT(1)
+#define SIRFUART_RX_EN BIT(0)
+#define SIRFUART_TX_EN BIT(1)
+
+#define SIRFUART_IO_MODE BIT(0)
+#define SIRFUART_DMA_MODE 0x0
+
+/* Macro Specific*/
+#define SIRFUART_INT_EN_CLR 0x0060
+/* Baud Rate Calculation */
+#define SIRF_MIN_SAMPLE_DIV 0xf
+#define SIRF_MAX_SAMPLE_DIV 0x3f
+#define SIRF_IOCLK_DIV_MAX 0xffff
+#define SIRF_SAMPLE_DIV_SHIFT 16
+#define SIRF_IOCLK_DIV_MASK 0xffff
+#define SIRF_SAMPLE_DIV_MASK 0x3f0000
+#define SIRF_BAUD_RATE_SUPPORT_NR 18
+
+/* USP SPEC */
+#define SIRFSOC_USP_ENDIAN_CTRL_LSBF BIT(4)
+#define SIRFSOC_USP_EN BIT(5)
+#define SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET 0
+#define SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET 8
+#define SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK 0x3ff
+#define SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET 21
+#define SIRFSOC_USP_TX_DATA_LEN_OFFSET 0
+#define SIRFSOC_USP_TX_SYNC_LEN_OFFSET 8
+#define SIRFSOC_USP_TX_FRAME_LEN_OFFSET 16
+#define SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET 24
+#define SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET 30
+#define SIRFSOC_USP_RX_DATA_LEN_OFFSET 0
+#define SIRFSOC_USP_RX_FRAME_LEN_OFFSET 8
+#define SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET 16
+#define SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET 24
+#define SIRFSOC_USP_ASYNC_DIV2_MASK 0x3f
+#define SIRFSOC_USP_ASYNC_DIV2_OFFSET 16
+
+/* USP-UART Common */
+#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
+#define SIRFUART_RECV_TIMEOUT_VALUE(x) \
+ (((x) > 0xFFFF) ? 0xFFFF : ((x) & 0xFFFF))
+#define SIRFUART_RECV_TIMEOUT(port, x) \
+ (((port)->line > 2) ? (x & 0xFFFF) : ((x) & 0xFFFF) << 16)
+
+#define SIRFUART_FIFO_THD(port) ((port->line) == 1 ? 16 : 64)
+#define SIRFUART_ERR_INT_STAT(port, unit_st) \
+ (uint_st->sirfsoc_rx_oflow | \
+ uint_st->sirfsoc_frm_err | \
+ uint_st->sirfsoc_rxd_brk | \
+ ((port->line > 2) ? 0 : uint_st->sirfsoc_parity_err))
+#define SIRFUART_RX_IO_INT_EN(port, uint_en) \
+ (uint_en->sirfsoc_rx_timeout_en |\
+ uint_en->sirfsoc_rxfifo_thd_en |\
+ uint_en->sirfsoc_rxfifo_full_en |\
+ uint_en->sirfsoc_frm_err_en |\
+ uint_en->sirfsoc_rx_oflow_en |\
+ uint_en->sirfsoc_rxd_brk_en |\
+ ((port->line > 2) ? 0 : uint_en->sirfsoc_parity_err_en))
+#define SIRFUART_RX_IO_INT_ST(uint_st) \
+ (uint_st->sirfsoc_rx_timeout |\
+ uint_st->sirfsoc_rxfifo_thd |\
+ uint_st->sirfsoc_rxfifo_full)
+#define SIRFUART_CTS_INT_ST(uint_st) (uint_st->sirfsoc_cts)
+#define SIRFUART_RX_DMA_INT_EN(port, uint_en) \
+ (uint_en->sirfsoc_rx_timeout_en |\
+ uint_en->sirfsoc_frm_err_en |\
+ uint_en->sirfsoc_rx_oflow_en |\
+ uint_en->sirfsoc_rxd_brk_en |\
+ ((port->line > 2) ? 0 : uint_en->sirfsoc_parity_err_en))
/* Generic Definitions */
#define SIRFSOC_UART_NAME "ttySiRF"
#define SIRFSOC_UART_MAJOR 0
#define SIRFSOC_UART_MINOR 0
#define SIRFUART_PORT_NAME "sirfsoc-uart"
#define SIRFUART_MAP_SIZE 0x200
-#define SIRFSOC_UART_NR 5
+#define SIRFSOC_UART_NR 6
#define SIRFSOC_PORT_TYPE 0xa5
/* Baud Rate Calculation */
@@ -151,19 +377,80 @@
#define SIRF_SAMPLE_DIV_MASK 0x3f0000
#define SIRF_BAUD_RATE_SUPPORT_NR 18
+/* Uart Common Use Macro*/
+#define SIRFSOC_RX_DMA_BUF_SIZE 256
+#define BYTES_TO_ALIGN(dma_addr) ((unsigned long)(dma_addr) & 0x3)
+#define LOOP_DMA_BUFA_FILL 1
+#define LOOP_DMA_BUFB_FILL 2
+#define TX_TRAN_PIO 1
+#define TX_TRAN_DMA 2
+/* Uart Fifo Level Chk */
+#define SIRFUART_TX_FIFO_SC_OFFSET 0
+#define SIRFUART_TX_FIFO_LC_OFFSET 10
+#define SIRFUART_TX_FIFO_HC_OFFSET 20
+#define SIRFUART_TX_FIFO_CHK_SC(line, value) ((((line) == 1) ? (value & 0x3) :\
+ (value & 0x1f)) << SIRFUART_TX_FIFO_SC_OFFSET)
+#define SIRFUART_TX_FIFO_CHK_LC(line, value) ((((line) == 1) ? (value & 0x3) :\
+ (value & 0x1f)) << SIRFUART_TX_FIFO_LC_OFFSET)
+#define SIRFUART_TX_FIFO_CHK_HC(line, value) ((((line) == 1) ? (value & 0x3) :\
+ (value & 0x1f)) << SIRFUART_TX_FIFO_HC_OFFSET)
+
+#define SIRFUART_RX_FIFO_CHK_SC SIRFUART_TX_FIFO_CHK_SC
+#define SIRFUART_RX_FIFO_CHK_LC SIRFUART_TX_FIFO_CHK_LC
+#define SIRFUART_RX_FIFO_CHK_HC SIRFUART_TX_FIFO_CHK_HC
+/* Indicate how many buffers used */
+#define SIRFSOC_RX_LOOP_BUF_CNT 2
+
+/* Indicate if DMA channel valid */
+#define IS_DMA_CHAN_VALID(x) ((x) != -1)
+#define UNVALID_DMA_CHAN -1
/* For Fast Baud Rate Calculation */
struct sirfsoc_baudrate_to_regv {
unsigned int baud_rate;
unsigned int reg_val;
};
+enum sirfsoc_tx_state {
+ TX_DMA_IDLE,
+ TX_DMA_RUNNING,
+ TX_DMA_PAUSE,
+};
+
+struct sirfsoc_loop_buffer {
+ struct circ_buf xmit;
+ dma_cookie_t cookie;
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t dma_addr;
+};
+
struct sirfsoc_uart_port {
- unsigned char hw_flow_ctrl;
- unsigned char ms_enabled;
+ bool hw_flow_ctrl;
+ bool ms_enabled;
struct uart_port port;
- struct pinctrl *p;
struct clk *clk;
+ /* for SiRFmarco, there are SET/CLR for UART_INT_EN */
+ bool is_marco;
+ struct sirfsoc_uart_register *uart_reg;
+ int rx_dma_no;
+ int tx_dma_no;
+ struct dma_chan *rx_dma_chan;
+ struct dma_chan *tx_dma_chan;
+ dma_addr_t tx_dma_addr;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+ spinlock_t rx_lock;
+ spinlock_t tx_lock;
+ struct tasklet_struct rx_dma_complete_tasklet;
+ struct tasklet_struct rx_tmo_process_tasklet;
+ unsigned int rx_io_count;
+ unsigned long transfer_size;
+ enum sirfsoc_tx_state tx_dma_state;
+ unsigned int cts_gpio;
+ unsigned int rts_gpio;
+
+ struct sirfsoc_loop_buffer rx_dma_items[SIRFSOC_RX_LOOP_BUF_CNT];
+ int rx_completed;
+ int rx_issued;
};
/* Hardware Flow Control */
diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c
new file mode 100644
index 00000000000..21e6e84c0df
--- /dev/null
+++ b/drivers/tty/serial/st-asc.c
@@ -0,0 +1,932 @@
+/*
+ * st-asc.c: ST Asynchronous serial controller (ASC) driver
+ *
+ * Copyright (C) 2003-2013 STMicroelectronics (R&D) Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#if defined(CONFIG_SERIAL_ST_ASC_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/module.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/serial_core.h>
+#include <linux/clk.h>
+
+#define DRIVER_NAME "st-asc"
+#define ASC_SERIAL_NAME "ttyAS"
+#define ASC_FIFO_SIZE 16
+#define ASC_MAX_PORTS 8
+
+struct asc_port {
+ struct uart_port port;
+ struct clk *clk;
+ unsigned int hw_flow_control:1;
+ unsigned int force_m1:1;
+};
+
+static struct asc_port asc_ports[ASC_MAX_PORTS];
+static struct uart_driver asc_uart_driver;
+
+/*---- UART Register definitions ------------------------------*/
+
+/* Register offsets */
+
+#define ASC_BAUDRATE 0x00
+#define ASC_TXBUF 0x04
+#define ASC_RXBUF 0x08
+#define ASC_CTL 0x0C
+#define ASC_INTEN 0x10
+#define ASC_STA 0x14
+#define ASC_GUARDTIME 0x18
+#define ASC_TIMEOUT 0x1C
+#define ASC_TXRESET 0x20
+#define ASC_RXRESET 0x24
+#define ASC_RETRIES 0x28
+
+/* ASC_RXBUF */
+#define ASC_RXBUF_PE 0x100
+#define ASC_RXBUF_FE 0x200
+/**
+ * Some of status comes from higher bits of the character and some come from
+ * the status register. Combining both of them in to single status using dummy
+ * bits.
+ */
+#define ASC_RXBUF_DUMMY_RX 0x10000
+#define ASC_RXBUF_DUMMY_BE 0x20000
+#define ASC_RXBUF_DUMMY_OE 0x40000
+
+/* ASC_CTL */
+
+#define ASC_CTL_MODE_MSK 0x0007
+#define ASC_CTL_MODE_8BIT 0x0001
+#define ASC_CTL_MODE_7BIT_PAR 0x0003
+#define ASC_CTL_MODE_9BIT 0x0004
+#define ASC_CTL_MODE_8BIT_WKUP 0x0005
+#define ASC_CTL_MODE_8BIT_PAR 0x0007
+#define ASC_CTL_STOP_MSK 0x0018
+#define ASC_CTL_STOP_HALFBIT 0x0000
+#define ASC_CTL_STOP_1BIT 0x0008
+#define ASC_CTL_STOP_1_HALFBIT 0x0010
+#define ASC_CTL_STOP_2BIT 0x0018
+#define ASC_CTL_PARITYODD 0x0020
+#define ASC_CTL_LOOPBACK 0x0040
+#define ASC_CTL_RUN 0x0080
+#define ASC_CTL_RXENABLE 0x0100
+#define ASC_CTL_SCENABLE 0x0200
+#define ASC_CTL_FIFOENABLE 0x0400
+#define ASC_CTL_CTSENABLE 0x0800
+#define ASC_CTL_BAUDMODE 0x1000
+
+/* ASC_GUARDTIME */
+
+#define ASC_GUARDTIME_MSK 0x00FF
+
+/* ASC_INTEN */
+
+#define ASC_INTEN_RBE 0x0001
+#define ASC_INTEN_TE 0x0002
+#define ASC_INTEN_THE 0x0004
+#define ASC_INTEN_PE 0x0008
+#define ASC_INTEN_FE 0x0010
+#define ASC_INTEN_OE 0x0020
+#define ASC_INTEN_TNE 0x0040
+#define ASC_INTEN_TOI 0x0080
+#define ASC_INTEN_RHF 0x0100
+
+/* ASC_RETRIES */
+
+#define ASC_RETRIES_MSK 0x00FF
+
+/* ASC_RXBUF */
+
+#define ASC_RXBUF_MSK 0x03FF
+
+/* ASC_STA */
+
+#define ASC_STA_RBF 0x0001
+#define ASC_STA_TE 0x0002
+#define ASC_STA_THE 0x0004
+#define ASC_STA_PE 0x0008
+#define ASC_STA_FE 0x0010
+#define ASC_STA_OE 0x0020
+#define ASC_STA_TNE 0x0040
+#define ASC_STA_TOI 0x0080
+#define ASC_STA_RHF 0x0100
+#define ASC_STA_TF 0x0200
+#define ASC_STA_NKD 0x0400
+
+/* ASC_TIMEOUT */
+
+#define ASC_TIMEOUT_MSK 0x00FF
+
+/* ASC_TXBUF */
+
+#define ASC_TXBUF_MSK 0x01FF
+
+/*---- Inline function definitions ---------------------------*/
+
+static inline struct asc_port *to_asc_port(struct uart_port *port)
+{
+ return container_of(port, struct asc_port, port);
+}
+
+static inline u32 asc_in(struct uart_port *port, u32 offset)
+{
+ return readl(port->membase + offset);
+}
+
+static inline void asc_out(struct uart_port *port, u32 offset, u32 value)
+{
+ writel(value, port->membase + offset);
+}
+
+/*
+ * Some simple utility functions to enable and disable interrupts.
+ * Note that these need to be called with interrupts disabled.
+ */
+static inline void asc_disable_tx_interrupts(struct uart_port *port)
+{
+ u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_THE;
+ asc_out(port, ASC_INTEN, intenable);
+ (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
+}
+
+static inline void asc_enable_tx_interrupts(struct uart_port *port)
+{
+ u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_THE;
+ asc_out(port, ASC_INTEN, intenable);
+}
+
+static inline void asc_disable_rx_interrupts(struct uart_port *port)
+{
+ u32 intenable = asc_in(port, ASC_INTEN) & ~ASC_INTEN_RBE;
+ asc_out(port, ASC_INTEN, intenable);
+ (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
+}
+
+static inline void asc_enable_rx_interrupts(struct uart_port *port)
+{
+ u32 intenable = asc_in(port, ASC_INTEN) | ASC_INTEN_RBE;
+ asc_out(port, ASC_INTEN, intenable);
+}
+
+static inline u32 asc_txfifo_is_empty(struct uart_port *port)
+{
+ return asc_in(port, ASC_STA) & ASC_STA_TE;
+}
+
+static inline int asc_txfifo_is_full(struct uart_port *port)
+{
+ return asc_in(port, ASC_STA) & ASC_STA_TF;
+}
+
+static inline const char *asc_port_name(struct uart_port *port)
+{
+ return to_platform_device(port->dev)->name;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * This section contains code to support the use of the ASC as a
+ * generic serial port.
+ */
+
+static inline unsigned asc_hw_txroom(struct uart_port *port)
+{
+ u32 status = asc_in(port, ASC_STA);
+
+ if (status & ASC_STA_THE)
+ return port->fifosize / 2;
+ else if (!(status & ASC_STA_TF))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * Start transmitting chars.
+ * This is called from both interrupt and task level.
+ * Either way interrupts are disabled.
+ */
+static void asc_transmit_chars(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+ int txroom;
+ unsigned char c;
+
+ txroom = asc_hw_txroom(port);
+
+ if ((txroom != 0) && port->x_char) {
+ c = port->x_char;
+ port->x_char = 0;
+ asc_out(port, ASC_TXBUF, c);
+ port->icount.tx++;
+ txroom = asc_hw_txroom(port);
+ }
+
+ if (uart_tx_stopped(port)) {
+ /*
+ * We should try and stop the hardware here, but I
+ * don't think the ASC has any way to do that.
+ */
+ asc_disable_tx_interrupts(port);
+ return;
+ }
+
+ if (uart_circ_empty(xmit)) {
+ asc_disable_tx_interrupts(port);
+ return;
+ }
+
+ if (txroom == 0)
+ return;
+
+ do {
+ c = xmit->buf[xmit->tail];
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ asc_out(port, ASC_TXBUF, c);
+ port->icount.tx++;
+ txroom--;
+ } while ((txroom > 0) && (!uart_circ_empty(xmit)));
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+ if (uart_circ_empty(xmit))
+ asc_disable_tx_interrupts(port);
+}
+
+static void asc_receive_chars(struct uart_port *port)
+{
+ struct tty_port *tport = &port->state->port;
+ unsigned long status;
+ unsigned long c = 0;
+ char flag;
+
+ if (port->irq_wake)
+ pm_wakeup_event(tport->tty->dev, 0);
+
+ while ((status = asc_in(port, ASC_STA)) & ASC_STA_RBF) {
+ c = asc_in(port, ASC_RXBUF) | ASC_RXBUF_DUMMY_RX;
+ flag = TTY_NORMAL;
+ port->icount.rx++;
+
+ if ((c & (ASC_RXBUF_FE | ASC_RXBUF_PE)) ||
+ status & ASC_STA_OE) {
+
+ if (c & ASC_RXBUF_FE) {
+ if (c == ASC_RXBUF_FE) {
+ port->icount.brk++;
+ if (uart_handle_break(port))
+ continue;
+ c |= ASC_RXBUF_DUMMY_BE;
+ } else {
+ port->icount.frame++;
+ }
+ } else if (c & ASC_RXBUF_PE) {
+ port->icount.parity++;
+ }
+ /*
+ * Reading any data from the RX FIFO clears the
+ * overflow error condition.
+ */
+ if (status & ASC_STA_OE) {
+ port->icount.overrun++;
+ c |= ASC_RXBUF_DUMMY_OE;
+ }
+
+ c &= port->read_status_mask;
+
+ if (c & ASC_RXBUF_DUMMY_BE)
+ flag = TTY_BREAK;
+ else if (c & ASC_RXBUF_PE)
+ flag = TTY_PARITY;
+ else if (c & ASC_RXBUF_FE)
+ flag = TTY_FRAME;
+ }
+
+ if (uart_handle_sysrq_char(port, c))
+ continue;
+
+ uart_insert_char(port, c, ASC_RXBUF_DUMMY_OE, c & 0xff, flag);
+ }
+
+ /* Tell the rest of the system the news. New characters! */
+ tty_flip_buffer_push(tport);
+}
+
+static irqreturn_t asc_interrupt(int irq, void *ptr)
+{
+ struct uart_port *port = ptr;
+ u32 status;
+
+ spin_lock(&port->lock);
+
+ status = asc_in(port, ASC_STA);
+
+ if (status & ASC_STA_RBF) {
+ /* Receive FIFO not empty */
+ asc_receive_chars(port);
+ }
+
+ if ((status & ASC_STA_THE) &&
+ (asc_in(port, ASC_INTEN) & ASC_INTEN_THE)) {
+ /* Transmitter FIFO at least half empty */
+ asc_transmit_chars(port);
+ }
+
+ spin_unlock(&port->lock);
+
+ return IRQ_HANDLED;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * UART Functions
+ */
+
+static unsigned int asc_tx_empty(struct uart_port *port)
+{
+ return asc_txfifo_is_empty(port) ? TIOCSER_TEMT : 0;
+}
+
+static void asc_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ /*
+ * This routine is used for seting signals of: DTR, DCD, CTS/RTS
+ * We use ASC's hardware for CTS/RTS, so don't need any for that.
+ * Some boards have DTR and DCD implemented using PIO pins,
+ * code to do this should be hooked in here.
+ */
+}
+
+static unsigned int asc_get_mctrl(struct uart_port *port)
+{
+ /*
+ * This routine is used for geting signals of: DTR, DCD, DSR, RI,
+ * and CTS/RTS
+ */
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+/* There are probably characters waiting to be transmitted. */
+static void asc_start_tx(struct uart_port *port)
+{
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (!uart_circ_empty(xmit))
+ asc_enable_tx_interrupts(port);
+}
+
+/* Transmit stop */
+static void asc_stop_tx(struct uart_port *port)
+{
+ asc_disable_tx_interrupts(port);
+}
+
+/* Receive stop */
+static void asc_stop_rx(struct uart_port *port)
+{
+ asc_disable_rx_interrupts(port);
+}
+
+/* Force modem status interrupts on */
+static void asc_enable_ms(struct uart_port *port)
+{
+ /* Nothing here yet .. */
+}
+
+/* Handle breaks - ignored by us */
+static void asc_break_ctl(struct uart_port *port, int break_state)
+{
+ /* Nothing here yet .. */
+}
+
+/*
+ * Enable port for reception.
+ */
+static int asc_startup(struct uart_port *port)
+{
+ if (request_irq(port->irq, asc_interrupt, IRQF_NO_SUSPEND,
+ asc_port_name(port), port)) {
+ dev_err(port->dev, "cannot allocate irq.\n");
+ return -ENODEV;
+ }
+
+ asc_transmit_chars(port);
+ asc_enable_rx_interrupts(port);
+
+ return 0;
+}
+
+static void asc_shutdown(struct uart_port *port)
+{
+ asc_disable_tx_interrupts(port);
+ asc_disable_rx_interrupts(port);
+ free_irq(port->irq, port);
+}
+
+static void asc_pm(struct uart_port *port, unsigned int state,
+ unsigned int oldstate)
+{
+ struct asc_port *ascport = to_asc_port(port);
+ unsigned long flags = 0;
+ u32 ctl;
+
+ switch (state) {
+ case UART_PM_STATE_ON:
+ clk_prepare_enable(ascport->clk);
+ break;
+ case UART_PM_STATE_OFF:
+ /*
+ * Disable the ASC baud rate generator, which is as close as
+ * we can come to turning it off. Note this is not called with
+ * the port spinlock held.
+ */
+ spin_lock_irqsave(&port->lock, flags);
+ ctl = asc_in(port, ASC_CTL) & ~ASC_CTL_RUN;
+ asc_out(port, ASC_CTL, ctl);
+ spin_unlock_irqrestore(&port->lock, flags);
+ clk_disable_unprepare(ascport->clk);
+ break;
+ }
+}
+
+static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct asc_port *ascport = to_asc_port(port);
+ unsigned int baud;
+ u32 ctrl_val;
+ tcflag_t cflag;
+ unsigned long flags;
+
+ /* Update termios to reflect hardware capabilities */
+ termios->c_cflag &= ~(CMSPAR |
+ (ascport->hw_flow_control ? 0 : CRTSCTS));
+
+ port->uartclk = clk_get_rate(ascport->clk);
+
+ baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+ cflag = termios->c_cflag;
+
+ spin_lock_irqsave(&port->lock, flags);
+
+ /* read control register */
+ ctrl_val = asc_in(port, ASC_CTL);
+
+ /* stop serial port and reset value */
+ asc_out(port, ASC_CTL, (ctrl_val & ~ASC_CTL_RUN));
+ ctrl_val = ASC_CTL_RXENABLE | ASC_CTL_FIFOENABLE;
+
+ /* reset fifo rx & tx */
+ asc_out(port, ASC_TXRESET, 1);
+ asc_out(port, ASC_RXRESET, 1);
+
+ /* set character length */
+ if ((cflag & CSIZE) == CS7) {
+ ctrl_val |= ASC_CTL_MODE_7BIT_PAR;
+ } else {
+ ctrl_val |= (cflag & PARENB) ? ASC_CTL_MODE_8BIT_PAR :
+ ASC_CTL_MODE_8BIT;
+ }
+
+ /* set stop bit */
+ ctrl_val |= (cflag & CSTOPB) ? ASC_CTL_STOP_2BIT : ASC_CTL_STOP_1BIT;
+
+ /* odd parity */
+ if (cflag & PARODD)
+ ctrl_val |= ASC_CTL_PARITYODD;
+
+ /* hardware flow control */
+ if ((cflag & CRTSCTS))
+ ctrl_val |= ASC_CTL_CTSENABLE;
+
+ if ((baud < 19200) && !ascport->force_m1) {
+ asc_out(port, ASC_BAUDRATE, (port->uartclk / (16 * baud)));
+ } else {
+ /*
+ * MODE 1: recommended for high bit rates (above 19.2K)
+ *
+ * baudrate * 16 * 2^16
+ * ASCBaudRate = ------------------------
+ * inputclock
+ *
+ * However to keep the maths inside 32bits we divide top and
+ * bottom by 64. The +1 is to avoid a divide by zero if the
+ * input clock rate is something unexpected.
+ */
+ u32 counter = (baud * 16384) / ((port->uartclk / 64) + 1);
+ asc_out(port, ASC_BAUDRATE, counter);
+ ctrl_val |= ASC_CTL_BAUDMODE;
+ }
+
+ uart_update_timeout(port, cflag, baud);
+
+ ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
+ if (termios->c_iflag & INPCK)
+ ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
+
+ /*
+ * Characters to ignore
+ */
+ ascport->port.ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ ascport->port.ignore_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
+ if (termios->c_iflag & IGNBRK) {
+ ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_BE;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_OE;
+ }
+
+ /*
+ * Ignore all characters if CREAD is not set.
+ */
+ if (!(termios->c_cflag & CREAD))
+ ascport->port.ignore_status_mask |= ASC_RXBUF_DUMMY_RX;
+
+ /* Set the timeout */
+ asc_out(port, ASC_TIMEOUT, 20);
+
+ /* write final value and enable port */
+ asc_out(port, ASC_CTL, (ctrl_val | ASC_CTL_RUN));
+
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static const char *asc_type(struct uart_port *port)
+{
+ return (port->type == PORT_ASC) ? DRIVER_NAME : NULL;
+}
+
+static void asc_release_port(struct uart_port *port)
+{
+}
+
+static int asc_request_port(struct uart_port *port)
+{
+ return 0;
+}
+
+/*
+ * Called when the port is opened, and UPF_BOOT_AUTOCONF flag is set
+ * Set type field if successful
+ */
+static void asc_config_port(struct uart_port *port, int flags)
+{
+ if ((flags & UART_CONFIG_TYPE))
+ port->type = PORT_ASC;
+}
+
+static int
+asc_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+ /* No user changeable parameters */
+ return -EINVAL;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+/*
+ * Console polling routines for writing and reading from the uart while
+ * in an interrupt or debug context (i.e. kgdb).
+ */
+
+static int asc_get_poll_char(struct uart_port *port)
+{
+ if (!(asc_in(port, ASC_STA) & ASC_STA_RBF))
+ return NO_POLL_CHAR;
+
+ return asc_in(port, ASC_RXBUF);
+}
+
+static void asc_put_poll_char(struct uart_port *port, unsigned char c)
+{
+ while (asc_txfifo_is_full(port))
+ cpu_relax();
+ asc_out(port, ASC_TXBUF, c);
+}
+
+#endif /* CONFIG_CONSOLE_POLL */
+
+/*---------------------------------------------------------------------*/
+
+static struct uart_ops asc_uart_ops = {
+ .tx_empty = asc_tx_empty,
+ .set_mctrl = asc_set_mctrl,
+ .get_mctrl = asc_get_mctrl,
+ .start_tx = asc_start_tx,
+ .stop_tx = asc_stop_tx,
+ .stop_rx = asc_stop_rx,
+ .enable_ms = asc_enable_ms,
+ .break_ctl = asc_break_ctl,
+ .startup = asc_startup,
+ .shutdown = asc_shutdown,
+ .set_termios = asc_set_termios,
+ .type = asc_type,
+ .release_port = asc_release_port,
+ .request_port = asc_request_port,
+ .config_port = asc_config_port,
+ .verify_port = asc_verify_port,
+ .pm = asc_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = asc_get_poll_char,
+ .poll_put_char = asc_put_poll_char,
+#endif /* CONFIG_CONSOLE_POLL */
+};
+
+static int asc_init_port(struct asc_port *ascport,
+ struct platform_device *pdev)
+{
+ struct uart_port *port = &ascport->port;
+ struct resource *res;
+
+ port->iotype = UPIO_MEM;
+ port->flags = UPF_BOOT_AUTOCONF;
+ port->ops = &asc_uart_ops;
+ port->fifosize = ASC_FIFO_SIZE;
+ port->dev = &pdev->dev;
+ port->irq = platform_get_irq(pdev, 0);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ port->membase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(port->membase))
+ return PTR_ERR(port->membase);
+ port->mapbase = res->start;
+
+ spin_lock_init(&port->lock);
+
+ ascport->clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (WARN_ON(IS_ERR(ascport->clk)))
+ return -EINVAL;
+ /* ensure that clk rate is correct by enabling the clk */
+ clk_prepare_enable(ascport->clk);
+ ascport->port.uartclk = clk_get_rate(ascport->clk);
+ WARN_ON(ascport->port.uartclk == 0);
+ clk_disable_unprepare(ascport->clk);
+
+ return 0;
+}
+
+static struct asc_port *asc_of_get_asc_port(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int id;
+
+ if (!np)
+ return NULL;
+
+ id = of_alias_get_id(np, ASC_SERIAL_NAME);
+
+ if (id < 0)
+ id = 0;
+
+ if (WARN_ON(id >= ASC_MAX_PORTS))
+ return NULL;
+
+ asc_ports[id].hw_flow_control = of_property_read_bool(np,
+ "st,hw-flow-control");
+ asc_ports[id].force_m1 = of_property_read_bool(np, "st,force_m1");
+ asc_ports[id].port.line = id;
+ return &asc_ports[id];
+}
+
+#ifdef CONFIG_OF
+static struct of_device_id asc_match[] = {
+ { .compatible = "st,asc", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, asc_match);
+#endif
+
+static int asc_serial_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct asc_port *ascport;
+
+ ascport = asc_of_get_asc_port(pdev);
+ if (!ascport)
+ return -ENODEV;
+
+ ret = asc_init_port(ascport, pdev);
+ if (ret)
+ return ret;
+
+ ret = uart_add_one_port(&asc_uart_driver, &ascport->port);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, &ascport->port);
+
+ return 0;
+}
+
+static int asc_serial_remove(struct platform_device *pdev)
+{
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ return uart_remove_one_port(&asc_uart_driver, port);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int asc_serial_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ return uart_suspend_port(&asc_uart_driver, port);
+}
+
+static int asc_serial_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct uart_port *port = platform_get_drvdata(pdev);
+
+ return uart_resume_port(&asc_uart_driver, port);
+}
+
+#endif /* CONFIG_PM_SLEEP */
+
+/*----------------------------------------------------------------------*/
+
+#ifdef CONFIG_SERIAL_ST_ASC_CONSOLE
+static void asc_console_putchar(struct uart_port *port, int ch)
+{
+ unsigned int timeout = 1000000;
+
+ /* Wait for upto 1 second in case flow control is stopping us. */
+ while (--timeout && asc_txfifo_is_full(port))
+ udelay(1);
+
+ asc_out(port, ASC_TXBUF, ch);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ */
+
+static void asc_console_write(struct console *co, const char *s, unsigned count)
+{
+ struct uart_port *port = &asc_ports[co->index].port;
+ unsigned long flags;
+ unsigned long timeout = 1000000;
+ int locked = 1;
+ u32 intenable;
+
+ local_irq_save(flags);
+ if (port->sysrq)
+ locked = 0; /* asc_interrupt has already claimed the lock */
+ else if (oops_in_progress)
+ locked = spin_trylock(&port->lock);
+ else
+ spin_lock(&port->lock);
+
+ /*
+ * Disable interrupts so we don't get the IRQ line bouncing
+ * up and down while interrupts are disabled.
+ */
+ intenable = asc_in(port, ASC_INTEN);
+ asc_out(port, ASC_INTEN, 0);
+ (void)asc_in(port, ASC_INTEN); /* Defeat bus write posting */
+
+ uart_console_write(port, s, count, asc_console_putchar);
+
+ while (--timeout && !asc_txfifo_is_empty(port))
+ udelay(1);
+
+ asc_out(port, ASC_INTEN, intenable);
+
+ if (locked)
+ spin_unlock(&port->lock);
+ local_irq_restore(flags);
+}
+
+static int asc_console_setup(struct console *co, char *options)
+{
+ struct asc_port *ascport;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index >= ASC_MAX_PORTS)
+ return -ENODEV;
+
+ ascport = &asc_ports[co->index];
+
+ /*
+ * This driver does not support early console initialization
+ * (use ARM early printk support instead), so we only expect
+ * this to be called during the uart port registration when the
+ * driver gets probed and the port should be mapped at that point.
+ */
+ BUG_ON(ascport->port.mapbase == 0 || ascport->port.membase == NULL);
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&ascport->port, co, baud, parity, bits, flow);
+}
+
+static struct console asc_console = {
+ .name = ASC_SERIAL_NAME,
+ .device = uart_console_device,
+ .write = asc_console_write,
+ .setup = asc_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &asc_uart_driver,
+};
+
+#define ASC_SERIAL_CONSOLE (&asc_console)
+
+#else
+#define ASC_SERIAL_CONSOLE NULL
+#endif /* CONFIG_SERIAL_ST_ASC_CONSOLE */
+
+static struct uart_driver asc_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = ASC_SERIAL_NAME,
+ .major = 0,
+ .minor = 0,
+ .nr = ASC_MAX_PORTS,
+ .cons = ASC_SERIAL_CONSOLE,
+};
+
+static const struct dev_pm_ops asc_serial_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(asc_serial_suspend, asc_serial_resume)
+};
+
+static struct platform_driver asc_serial_driver = {
+ .probe = asc_serial_probe,
+ .remove = asc_serial_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &asc_serial_pm_ops,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(asc_match),
+ },
+};
+
+static int __init asc_init(void)
+{
+ int ret;
+ static char banner[] __initdata =
+ KERN_INFO "STMicroelectronics ASC driver initialized\n";
+
+ printk(banner);
+
+ ret = uart_register_driver(&asc_uart_driver);
+ if (ret)
+ return ret;
+
+ ret = platform_driver_register(&asc_serial_driver);
+ if (ret)
+ uart_unregister_driver(&asc_uart_driver);
+
+ return ret;
+}
+
+static void __exit asc_exit(void)
+{
+ platform_driver_unregister(&asc_serial_driver);
+ uart_unregister_driver(&asc_uart_driver);
+}
+
+module_init(asc_init);
+module_exit(asc_exit);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_AUTHOR("STMicroelectronics (R&D) Limited");
+MODULE_DESCRIPTION("STMicroelectronics ASC serial port driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index 6818410a2be..f87097acd8a 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -162,7 +162,7 @@ static void timbuart_handle_tx_port(struct uart_port *port, u32 isr, u32 *ier)
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
+static void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
{
if (isr & RXFLAGS) {
/* Some RX status is set */
@@ -184,7 +184,7 @@ void timbuart_handle_rx_port(struct uart_port *port, u32 isr, u32 *ier)
dev_dbg(port->dev, "%s - leaving\n", __func__);
}
-void timbuart_tasklet(unsigned long arg)
+static void timbuart_tasklet(unsigned long arg)
{
struct timbuart_port *uart = (struct timbuart_port *)arg;
u32 isr, ier = 0;
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index f655997f44a..a63c14bc9a2 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -705,7 +705,7 @@ static int siu_init_ports(struct platform_device *pdev)
{
struct uart_port *port;
struct resource *res;
- int *type = pdev->dev.platform_data;
+ int *type = dev_get_platdata(&pdev->dev);
int i;
if (!type)
diff --git a/drivers/tty/serial/vt8500_serial.c b/drivers/tty/serial/vt8500_serial.c
index 48af43de346..93b697a0de6 100644
--- a/drivers/tty/serial/vt8500_serial.c
+++ b/drivers/tty/serial/vt8500_serial.c
@@ -170,7 +170,9 @@ static void handle_rx(struct uart_port *port)
tty_insert_flip_char(tport, c, flag);
}
+ spin_unlock(&port->lock);
tty_flip_buffer_push(tport);
+ spin_lock(&port->lock);
}
static void handle_tx(struct uart_port *port)
@@ -630,7 +632,6 @@ static int vt8500_serial_remove(struct platform_device *pdev)
{
struct vt8500_port *vt8500_port = platform_get_drvdata(pdev);
- platform_set_drvdata(pdev, NULL);
clk_disable_unprepare(vt8500_port->clk);
uart_remove_one_port(&vt8500_uart_driver, &vt8500_port->uart);
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 8eaf1ab8add..e1ce141bad5 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -577,22 +577,22 @@ struct mgsl_struct {
#define SICR_RXC_ACTIVE BIT15
#define SICR_RXC_INACTIVE BIT14
-#define SICR_RXC (BIT15+BIT14)
+#define SICR_RXC (BIT15|BIT14)
#define SICR_TXC_ACTIVE BIT13
#define SICR_TXC_INACTIVE BIT12
-#define SICR_TXC (BIT13+BIT12)
+#define SICR_TXC (BIT13|BIT12)
#define SICR_RI_ACTIVE BIT11
#define SICR_RI_INACTIVE BIT10
-#define SICR_RI (BIT11+BIT10)
+#define SICR_RI (BIT11|BIT10)
#define SICR_DSR_ACTIVE BIT9
#define SICR_DSR_INACTIVE BIT8
-#define SICR_DSR (BIT9+BIT8)
+#define SICR_DSR (BIT9|BIT8)
#define SICR_DCD_ACTIVE BIT7
#define SICR_DCD_INACTIVE BIT6
-#define SICR_DCD (BIT7+BIT6)
+#define SICR_DCD (BIT7|BIT6)
#define SICR_CTS_ACTIVE BIT5
#define SICR_CTS_INACTIVE BIT4
-#define SICR_CTS (BIT5+BIT4)
+#define SICR_CTS (BIT5|BIT4)
#define SICR_RCC_UNDERFLOW BIT3
#define SICR_DPLL_NO_SYNC BIT2
#define SICR_BRG1_ZERO BIT1
@@ -1161,7 +1161,7 @@ static void mgsl_isr_receive_status( struct mgsl_struct *info )
{
u16 status = usc_InReg( info, RCSR );
- if ( debug_level >= DEBUG_LEVEL_ISR )
+ if ( debug_level >= DEBUG_LEVEL_ISR )
printk("%s(%d):mgsl_isr_receive_status status=%04X\n",
__FILE__,__LINE__,status);
@@ -1181,7 +1181,7 @@ static void mgsl_isr_receive_status( struct mgsl_struct *info )
(usc_InReg(info, RICR) & ~RXSTATUS_ABORT_RECEIVED));
}
- if (status & (RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED)) {
+ if (status & (RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED)) {
if (status & RXSTATUS_EXITED_HUNT)
info->icount.exithunt++;
if (status & RXSTATUS_IDLE_RECEIVED)
@@ -1463,21 +1463,21 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
/* get the status of the received byte */
status = usc_InReg(info, RCSR);
- if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
- RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) )
+ if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
+ RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) )
usc_UnlatchRxstatusBits(info,RXSTATUS_ALL);
icount->rx++;
flag = 0;
- if ( status & (RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR +
- RXSTATUS_OVERRUN + RXSTATUS_BREAK_RECEIVED) ) {
- printk("rxerr=%04X\n",status);
+ if ( status & (RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR |
+ RXSTATUS_OVERRUN | RXSTATUS_BREAK_RECEIVED) ) {
+ printk("rxerr=%04X\n",status);
/* update error statistics */
if ( status & RXSTATUS_BREAK_RECEIVED ) {
- status &= ~(RXSTATUS_FRAMING_ERROR + RXSTATUS_PARITY_ERROR);
+ status &= ~(RXSTATUS_FRAMING_ERROR | RXSTATUS_PARITY_ERROR);
icount->brk++;
- } else if (status & RXSTATUS_PARITY_ERROR)
+ } else if (status & RXSTATUS_PARITY_ERROR)
icount->parity++;
else if (status & RXSTATUS_FRAMING_ERROR)
icount->frame++;
@@ -1488,7 +1488,7 @@ static void mgsl_isr_receive_data( struct mgsl_struct *info )
icount->overrun++;
}
- /* discard char if tty control flags say so */
+ /* discard char if tty control flags say so */
if (status & info->ignore_status_mask)
continue;
@@ -1545,8 +1545,8 @@ static void mgsl_isr_misc( struct mgsl_struct *info )
usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
usc_DmaCmd(info, DmaCmd_ResetRxChannel);
usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
- usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
- usc_DisableInterrupts(info, RECEIVE_DATA + RECEIVE_STATUS);
+ usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
+ usc_DisableInterrupts(info, RECEIVE_DATA | RECEIVE_STATUS);
/* schedule BH handler to restart receiver */
info->pending_bh |= BH_RECEIVE;
@@ -1595,7 +1595,7 @@ static void mgsl_isr_receive_dma( struct mgsl_struct *info )
u16 status;
/* clear interrupt pending and IUS bit for Rx DMA IRQ */
- usc_OutDmaReg( info, CDIR, BIT9+BIT1 );
+ usc_OutDmaReg( info, CDIR, BIT9 | BIT1 );
/* Read the receive DMA status to identify interrupt type. */
/* This also clears the status bits. */
@@ -1639,7 +1639,7 @@ static void mgsl_isr_transmit_dma( struct mgsl_struct *info )
u16 status;
/* clear interrupt pending and IUS bit for Tx DMA IRQ */
- usc_OutDmaReg(info, CDIR, BIT8+BIT0 );
+ usc_OutDmaReg(info, CDIR, BIT8 | BIT0 );
/* Read the transmit DMA status to identify interrupt type. */
/* This also clears the status bits. */
@@ -1832,8 +1832,8 @@ static void shutdown(struct mgsl_struct * info)
usc_DisableMasterIrqBit(info);
usc_stop_receiver(info);
usc_stop_transmitter(info);
- usc_DisableInterrupts(info,RECEIVE_DATA + RECEIVE_STATUS +
- TRANSMIT_DATA + TRANSMIT_STATUS + IO_PIN + MISC );
+ usc_DisableInterrupts(info,RECEIVE_DATA | RECEIVE_STATUS |
+ TRANSMIT_DATA | TRANSMIT_STATUS | IO_PIN | MISC );
usc_DisableDmaInterrupts(info,DICR_MASTER + DICR_TRANSMIT + DICR_RECEIVE);
/* Disable DMAEN (Port 7, Bit 14) */
@@ -1886,7 +1886,7 @@ static void mgsl_program_hw(struct mgsl_struct *info)
info->ri_chkcount = 0;
info->dsr_chkcount = 0;
- usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
+ usc_EnableStatusIrqs(info,SICR_CTS+SICR_DSR+SICR_DCD+SICR_RI);
usc_EnableInterrupts(info, IO_PIN);
usc_get_serial_signals(info);
@@ -2773,7 +2773,7 @@ static int mgsl_wait_event(struct mgsl_struct * info, int __user * mask_ptr)
if (!waitqueue_active(&info->event_wait_q)) {
/* disable enable exit hunt mode/idle rcvd IRQs */
usc_OutReg(info, RICR, usc_InReg(info,RICR) &
- ~(RXSTATUS_EXITED_HUNT + RXSTATUS_IDLE_RECEIVED));
+ ~(RXSTATUS_EXITED_HUNT | RXSTATUS_IDLE_RECEIVED));
}
spin_unlock_irqrestore(&info->irq_spinlock,flags);
}
@@ -3092,7 +3092,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
__FILE__,__LINE__, info->device_name, info->port.count);
- if (tty_port_close_start(&info->port, tty, filp) == 0)
+ if (tty_port_close_start(&info->port, tty, filp) == 0)
goto cleanup;
mutex_lock(&info->port.mutex);
@@ -4297,7 +4297,7 @@ static struct mgsl_struct* mgsl_allocate_device(void)
spin_lock_init(&info->irq_spinlock);
spin_lock_init(&info->netlock);
memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS));
- info->idle_mode = HDLC_TXIDLE_FLAGS;
+ info->idle_mode = HDLC_TXIDLE_FLAGS;
info->num_tx_dma_buffers = 1;
info->num_tx_holding_buffers = 0;
}
@@ -4722,7 +4722,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
else if ( info->params.flags & HDLC_FLAG_UNDERRUN_FLAG )
RegValue |= BIT15;
else if ( info->params.flags & HDLC_FLAG_UNDERRUN_CRC )
- RegValue |= BIT15 + BIT14;
+ RegValue |= BIT15 | BIT14;
}
if ( info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE )
@@ -4763,11 +4763,11 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
switch ( info->params.encoding ) {
case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
- case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
+ case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
- case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
- case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
- case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
}
if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
@@ -4838,15 +4838,15 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
switch ( info->params.encoding ) {
case HDLC_ENCODING_NRZB: RegValue |= BIT13; break;
case HDLC_ENCODING_NRZI_MARK: RegValue |= BIT14; break;
- case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 + BIT13; break;
+ case HDLC_ENCODING_NRZI_SPACE: RegValue |= BIT14 | BIT13; break;
case HDLC_ENCODING_BIPHASE_MARK: RegValue |= BIT15; break;
- case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 + BIT13; break;
- case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14; break;
- case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 + BIT14 + BIT13; break;
+ case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT15 | BIT13; break;
+ case HDLC_ENCODING_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT15 | BIT14 | BIT13; break;
}
if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_16_CCITT )
- RegValue |= BIT9 + BIT8;
+ RegValue |= BIT9 | BIT8;
else if ( (info->params.crc_type & HDLC_CRC_MASK) == HDLC_CRC_32_CCITT )
RegValue |= ( BIT12 | BIT10 | BIT9 | BIT8);
@@ -4957,7 +4957,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
RegValue = 0x0000;
- if ( info->params.flags & (HDLC_FLAG_RXC_DPLL + HDLC_FLAG_TXC_DPLL) ) {
+ if ( info->params.flags & (HDLC_FLAG_RXC_DPLL | HDLC_FLAG_TXC_DPLL) ) {
u32 XtalSpeed;
u32 DpllDivisor;
u16 Tc;
@@ -5019,7 +5019,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
case HDLC_ENCODING_BIPHASE_MARK:
case HDLC_ENCODING_BIPHASE_SPACE: RegValue |= BIT9; break;
case HDLC_ENCODING_BIPHASE_LEVEL:
- case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 + BIT8; break;
+ case HDLC_ENCODING_DIFF_BIPHASE_LEVEL: RegValue |= BIT9 | BIT8; break;
}
}
@@ -5056,8 +5056,8 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
/* enable Master Interrupt Enable bit (MIE) */
usc_EnableMasterIrqBit( info );
- usc_ClearIrqPendingBits( info, RECEIVE_STATUS + RECEIVE_DATA +
- TRANSMIT_STATUS + TRANSMIT_DATA + MISC);
+ usc_ClearIrqPendingBits( info, RECEIVE_STATUS | RECEIVE_DATA |
+ TRANSMIT_STATUS | TRANSMIT_DATA | MISC);
/* arm RCC underflow interrupt */
usc_OutReg(info, SICR, (u16)(usc_InReg(info,SICR) | BIT3));
@@ -5175,14 +5175,14 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
switch ( info->params.preamble_length ) {
case HDLC_PREAMBLE_LENGTH_16BITS: RegValue |= BIT10; break;
case HDLC_PREAMBLE_LENGTH_32BITS: RegValue |= BIT11; break;
- case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 + BIT10; break;
+ case HDLC_PREAMBLE_LENGTH_64BITS: RegValue |= BIT11 | BIT10; break;
}
switch ( info->params.preamble ) {
- case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 + BIT12; break;
+ case HDLC_PREAMBLE_PATTERN_FLAGS: RegValue |= BIT8 | BIT12; break;
case HDLC_PREAMBLE_PATTERN_ONES: RegValue |= BIT8; break;
case HDLC_PREAMBLE_PATTERN_10: RegValue |= BIT9; break;
- case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 + BIT8; break;
+ case HDLC_PREAMBLE_PATTERN_01: RegValue |= BIT9 | BIT8; break;
}
usc_OutReg( info, CCR, RegValue );
@@ -5221,7 +5221,7 @@ static void usc_enable_loopback(struct mgsl_struct *info, int enable)
{
if (enable) {
/* blank external TXD output */
- usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7+BIT6));
+ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) | (BIT7 | BIT6));
/* Clock mode Control Register (CMCR)
*
@@ -5260,7 +5260,7 @@ static void usc_enable_loopback(struct mgsl_struct *info, int enable)
outw( 0x0300, info->io_base + CCAR );
} else {
/* enable external TXD output */
- usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7+BIT6));
+ usc_OutReg(info,IOCR,usc_InReg(info,IOCR) & ~(BIT7 | BIT6));
/* clear Internal Data loopback mode */
info->loopback_bits = 0;
@@ -5447,13 +5447,13 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
- usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableInterrupts( info, RECEIVE_STATUS );
/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
- usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
+ usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
usc_DmaCmd( info, DmaCmd_InitRxChannel );
if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
@@ -5488,8 +5488,8 @@ static void usc_stop_receiver( struct mgsl_struct *info )
usc_DmaCmd( info, DmaCmd_ResetRxChannel );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
- usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
- usc_DisableInterrupts( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
+ usc_DisableInterrupts( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableReceiver(info,DISABLE_UNCONDITIONAL);
@@ -5536,13 +5536,13 @@ static void usc_start_receiver( struct mgsl_struct *info )
usc_OutDmaReg( info, NRARU, (u16)(phys_addr >> 16) );
usc_UnlatchRxstatusBits( info, RXSTATUS_ALL );
- usc_ClearIrqPendingBits( info, RECEIVE_DATA + RECEIVE_STATUS );
+ usc_ClearIrqPendingBits( info, RECEIVE_DATA | RECEIVE_STATUS );
usc_EnableInterrupts( info, RECEIVE_STATUS );
/* 1. Arm End of Buffer (EOB) Receive DMA Interrupt (BIT2 of RDIAR) */
/* 2. Enable Receive DMA Interrupts (BIT1 of DICR) */
- usc_OutDmaReg( info, RDIAR, BIT3 + BIT2 );
+ usc_OutDmaReg( info, RDIAR, BIT3 | BIT2 );
usc_OutDmaReg( info, DICR, (u16)(usc_InDmaReg(info,DICR) | BIT1) );
usc_DmaCmd( info, DmaCmd_InitRxChannel );
if ( info->params.flags & HDLC_FLAG_AUTO_DCD )
@@ -5551,7 +5551,7 @@ static void usc_start_receiver( struct mgsl_struct *info )
usc_EnableReceiver(info,ENABLE_UNCONDITIONAL);
} else {
usc_UnlatchRxstatusBits(info, RXSTATUS_ALL);
- usc_ClearIrqPendingBits(info, RECEIVE_DATA + RECEIVE_STATUS);
+ usc_ClearIrqPendingBits(info, RECEIVE_DATA | RECEIVE_STATUS);
usc_EnableInterrupts(info, RECEIVE_DATA);
usc_RTCmd( info, RTCmd_PurgeRxFifo );
@@ -5925,7 +5925,7 @@ static void usc_set_async_mode( struct mgsl_struct *info )
RegValue = 0;
if ( info->params.data_bits != 8 )
- RegValue |= BIT4+BIT3+BIT2;
+ RegValue |= BIT4 | BIT3 | BIT2;
if ( info->params.parity != ASYNC_PARITY_NONE ) {
RegValue |= BIT5;
@@ -5982,7 +5982,7 @@ static void usc_set_async_mode( struct mgsl_struct *info )
RegValue = 0;
if ( info->params.data_bits != 8 )
- RegValue |= BIT4+BIT3+BIT2;
+ RegValue |= BIT4 | BIT3 | BIT2;
if ( info->params.parity != ASYNC_PARITY_NONE ) {
RegValue |= BIT5;
@@ -6129,7 +6129,7 @@ static void usc_loopback_frame( struct mgsl_struct *info )
/* WAIT FOR RECEIVE COMPLETE */
for (i=0 ; i<1000 ; i++)
- if (usc_InReg( info, RCSR ) & (BIT8 + BIT4 + BIT3 + BIT1))
+ if (usc_InReg( info, RCSR ) & (BIT8 | BIT4 | BIT3 | BIT1))
break;
/* clear Internal Data loopback mode */
@@ -6579,8 +6579,8 @@ static bool mgsl_get_rx_frame(struct mgsl_struct *info)
status = info->rx_buffer_list[EndIndex].status;
- if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
- RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
+ if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
+ RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
if ( status & RXSTATUS_SHORT_FRAME )
info->icount.rxshort++;
else if ( status & RXSTATUS_ABORT )
@@ -6762,8 +6762,8 @@ static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
status = info->rx_buffer_list[CurrentIndex].status;
- if ( status & (RXSTATUS_SHORT_FRAME + RXSTATUS_OVERRUN +
- RXSTATUS_CRC_ERROR + RXSTATUS_ABORT) ) {
+ if ( status & (RXSTATUS_SHORT_FRAME | RXSTATUS_OVERRUN |
+ RXSTATUS_CRC_ERROR | RXSTATUS_ABORT) ) {
if ( status & RXSTATUS_SHORT_FRAME )
info->icount.rxshort++;
else if ( status & RXSTATUS_ABORT )
@@ -6899,7 +6899,7 @@ static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
/* set CMR:13 to start transmit when
* next GoAhead (abort) is received
*/
- info->cmr_value |= BIT13;
+ info->cmr_value |= BIT13;
}
/* begin loading the frame in the next available tx dma
@@ -7278,7 +7278,7 @@ static bool mgsl_dma_test( struct mgsl_struct *info )
spin_unlock_irqrestore(&info->irq_spinlock,flags);
-
+
/******************************/
/* WAIT FOR TRANSMIT COMPLETE */
/******************************/
@@ -7292,7 +7292,7 @@ static bool mgsl_dma_test( struct mgsl_struct *info )
status = usc_InReg( info, TCSR );
spin_unlock_irqrestore(&info->irq_spinlock,flags);
- while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
+ while ( !(status & (BIT6 | BIT5 | BIT4 | BIT2 | BIT1)) ) {
if (time_after(jiffies, EndTime)) {
rc = false;
break;
@@ -7307,7 +7307,7 @@ static bool mgsl_dma_test( struct mgsl_struct *info )
if ( rc ){
/* CHECK FOR TRANSMIT ERRORS */
- if ( status & (BIT5 + BIT1) )
+ if ( status & (BIT5 | BIT1) )
rc = false;
}
@@ -7333,7 +7333,7 @@ static bool mgsl_dma_test( struct mgsl_struct *info )
/* CHECK FOR RECEIVE ERRORS */
status = info->rx_buffer_list[0].status;
- if ( status & (BIT8 + BIT3 + BIT1) ) {
+ if ( status & (BIT8 | BIT3 | BIT1) ) {
/* receive error has occurred */
rc = false;
} else {
@@ -7605,7 +7605,7 @@ static void usc_loopmode_send_done( struct mgsl_struct * info )
{
info->loopmode_send_done_requested = false;
/* clear CMR:13 to 0 to start echoing RxData to TxData */
- info->cmr_value &= ~BIT13;
+ info->cmr_value &= ~BIT13;
usc_OutReg(info, CMR, info->cmr_value);
}
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index ff171384ea5..dc6e96996ea 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3478,7 +3478,7 @@ static int alloc_buf_list(SLMP_INFO *info)
for ( i = 0; i < info->rx_buf_count; i++ ) {
/* calculate and store physical address of this buffer entry */
info->rx_buf_list_ex[i].phys_entry =
- info->buffer_list_phys + (i * sizeof(SCABUFSIZE));
+ info->buffer_list_phys + (i * SCABUFSIZE);
/* calculate and store physical address of */
/* next entry in cirular list of entries */
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9121c1f7aee..c043136fbe5 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -18,31 +18,118 @@
#include <linux/module.h>
#include <linux/ratelimit.h>
+
+#define MIN_TTYB_SIZE 256
+#define TTYB_ALIGN_MASK 255
+
+/*
+ * Byte threshold to limit memory consumption for flip buffers.
+ * The actual memory limit is > 2x this amount.
+ */
+#define TTYB_MEM_LIMIT 65536
+
+/*
+ * We default to dicing tty buffer allocations to this many characters
+ * in order to avoid multiple page allocations. We know the size of
+ * tty_buffer itself but it must also be taken into account that the
+ * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
+ * logic this must match
+ */
+
+#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+
+
+/**
+ * tty_buffer_lock_exclusive - gain exclusive access to buffer
+ * tty_buffer_unlock_exclusive - release exclusive access
+ *
+ * @port - tty_port owning the flip buffer
+ *
+ * Guarantees safe use of the line discipline's receive_buf() method by
+ * excluding the buffer work and any pending flush from using the flip
+ * buffer. Data can continue to be added concurrently to the flip buffer
+ * from the driver side.
+ *
+ * On release, the buffer work is restarted if there is data in the
+ * flip buffer
+ */
+
+void tty_buffer_lock_exclusive(struct tty_port *port)
+{
+ struct tty_bufhead *buf = &port->buf;
+
+ atomic_inc(&buf->priority);
+ mutex_lock(&buf->lock);
+}
+
+void tty_buffer_unlock_exclusive(struct tty_port *port)
+{
+ struct tty_bufhead *buf = &port->buf;
+ int restart;
+
+ restart = buf->head->commit != buf->head->read;
+
+ atomic_dec(&buf->priority);
+ mutex_unlock(&buf->lock);
+ if (restart)
+ queue_work(system_unbound_wq, &buf->work);
+}
+
+/**
+ * tty_buffer_space_avail - return unused buffer space
+ * @port - tty_port owning the flip buffer
+ *
+ * Returns the # of bytes which can be written by the driver without
+ * reaching the buffer limit.
+ *
+ * Note: this does not guarantee that memory is available to write
+ * the returned # of bytes (use tty_prepare_flip_string_xxx() to
+ * pre-allocate if memory guarantee is required).
+ */
+
+int tty_buffer_space_avail(struct tty_port *port)
+{
+ int space = TTYB_MEM_LIMIT - atomic_read(&port->buf.memory_used);
+ return max(space, 0);
+}
+
+static void tty_buffer_reset(struct tty_buffer *p, size_t size)
+{
+ p->used = 0;
+ p->size = size;
+ p->next = NULL;
+ p->commit = 0;
+ p->read = 0;
+}
+
/**
* tty_buffer_free_all - free buffers used by a tty
* @tty: tty to free from
*
* Remove all the buffers pending on a tty whether queued with data
* or in the free ring. Must be called when the tty is no longer in use
- *
- * Locking: none
*/
void tty_buffer_free_all(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- struct tty_buffer *thead;
+ struct tty_buffer *p, *next;
+ struct llist_node *llist;
- while ((thead = buf->head) != NULL) {
- buf->head = thead->next;
- kfree(thead);
- }
- while ((thead = buf->free) != NULL) {
- buf->free = thead->next;
- kfree(thead);
+ while ((p = buf->head) != NULL) {
+ buf->head = p->next;
+ if (p->size > 0)
+ kfree(p);
}
- buf->tail = NULL;
- buf->memory_used = 0;
+ llist = llist_del_all(&buf->free);
+ llist_for_each_entry_safe(p, next, llist, free)
+ kfree(p);
+
+ tty_buffer_reset(&buf->sentinel, 0);
+ buf->head = &buf->sentinel;
+ buf->tail = &buf->sentinel;
+
+ atomic_set(&buf->memory_used, 0);
}
/**
@@ -51,29 +138,39 @@ void tty_buffer_free_all(struct tty_port *port)
* @size: desired size (characters)
*
* Allocate a new tty buffer to hold the desired number of characters.
+ * We round our buffers off in 256 character chunks to get better
+ * allocation behaviour.
* Return NULL if out of memory or the allocation would exceed the
* per device queue
- *
- * Locking: Caller must hold tty->buf.lock
*/
static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
{
+ struct llist_node *free;
struct tty_buffer *p;
- if (port->buf.memory_used + size > 65536)
+ /* Round the buffer size out */
+ size = __ALIGN_MASK(size, TTYB_ALIGN_MASK);
+
+ if (size <= MIN_TTYB_SIZE) {
+ free = llist_del_first(&port->buf.free);
+ if (free) {
+ p = llist_entry(free, struct tty_buffer, free);
+ goto found;
+ }
+ }
+
+ /* Should possibly check if this fails for the largest buffer we
+ have queued and recycle that ? */
+ if (atomic_read(&port->buf.memory_used) > TTYB_MEM_LIMIT)
return NULL;
p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC);
if (p == NULL)
return NULL;
- p->used = 0;
- p->size = size;
- p->next = NULL;
- p->commit = 0;
- p->read = 0;
- p->char_buf_ptr = (char *)(p->data);
- p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
- port->buf.memory_used += size;
+
+found:
+ tty_buffer_reset(p, size);
+ atomic_add(size, &port->buf.memory_used);
return p;
}
@@ -84,8 +181,6 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size)
*
* Free a tty buffer, or add it to the free list according to our
* internal strategy
- *
- * Locking: Caller must hold tty->buf.lock
*/
static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
@@ -93,41 +188,12 @@ static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b)
struct tty_bufhead *buf = &port->buf;
/* Dumb strategy for now - should keep some stats */
- buf->memory_used -= b->size;
- WARN_ON(buf->memory_used < 0);
+ WARN_ON(atomic_sub_return(b->size, &buf->memory_used) < 0);
- if (b->size >= 512)
+ if (b->size > MIN_TTYB_SIZE)
kfree(b);
- else {
- b->next = buf->free;
- buf->free = b;
- }
-}
-
-/**
- * __tty_buffer_flush - flush full tty buffers
- * @tty: tty to flush
- *
- * flush all the buffers containing receive data. Caller must
- * hold the buffer lock and must have ensured no parallel flush to
- * ldisc is running.
- *
- * Locking: Caller must hold tty->buf.lock
- */
-
-static void __tty_buffer_flush(struct tty_port *port)
-{
- struct tty_bufhead *buf = &port->buf;
- struct tty_buffer *thead;
-
- if (unlikely(buf->head == NULL))
- return;
- while ((thead = buf->head->next) != NULL) {
- tty_buffer_free(port, buf->head);
- buf->head = thead;
- }
- WARN_ON(buf->head != buf->tail);
- buf->head->read = buf->head->commit;
+ else if (b->size > 0)
+ llist_add(&b->free, &buf->free);
}
/**
@@ -138,65 +204,28 @@ static void __tty_buffer_flush(struct tty_port *port)
* being processed by flush_to_ldisc then we defer the processing
* to that function
*
- * Locking: none
+ * Locking: takes buffer lock to ensure single-threaded flip buffer
+ * 'consumer'
*/
void tty_buffer_flush(struct tty_struct *tty)
{
struct tty_port *port = tty->port;
struct tty_bufhead *buf = &port->buf;
- unsigned long flags;
-
- spin_lock_irqsave(&buf->lock, flags);
-
- /* If the data is being pushed to the tty layer then we can't
- process it here. Instead set a flag and the flush_to_ldisc
- path will process the flush request before it exits */
- if (test_bit(TTYP_FLUSHING, &port->iflags)) {
- set_bit(TTYP_FLUSHPENDING, &port->iflags);
- spin_unlock_irqrestore(&buf->lock, flags);
- wait_event(tty->read_wait,
- test_bit(TTYP_FLUSHPENDING, &port->iflags) == 0);
- return;
- } else
- __tty_buffer_flush(port);
- spin_unlock_irqrestore(&buf->lock, flags);
-}
+ struct tty_buffer *next;
-/**
- * tty_buffer_find - find a free tty buffer
- * @tty: tty owning the buffer
- * @size: characters wanted
- *
- * Locate an existing suitable tty buffer or if we are lacking one then
- * allocate a new one. We round our buffers off in 256 character chunks
- * to get better allocation behaviour.
- *
- * Locking: Caller must hold tty->buf.lock
- */
+ atomic_inc(&buf->priority);
-static struct tty_buffer *tty_buffer_find(struct tty_port *port, size_t size)
-{
- struct tty_buffer **tbh = &port->buf.free;
- while ((*tbh) != NULL) {
- struct tty_buffer *t = *tbh;
- if (t->size >= size) {
- *tbh = t->next;
- t->next = NULL;
- t->used = 0;
- t->commit = 0;
- t->read = 0;
- port->buf.memory_used += t->size;
- return t;
- }
- tbh = &((*tbh)->next);
+ mutex_lock(&buf->lock);
+ while ((next = buf->head->next) != NULL) {
+ tty_buffer_free(port, buf->head);
+ buf->head = next;
}
- /* Round the buffer size out */
- size = (size + 0xFF) & ~0xFF;
- return tty_buffer_alloc(port, size);
- /* Should possibly check if this fails for the largest buffer we
- have queued and recycle that ? */
+ buf->head->read = buf->head->commit;
+ atomic_dec(&buf->priority);
+ mutex_unlock(&buf->lock);
}
+
/**
* tty_buffer_request_room - grow tty buffer if needed
* @tty: tty structure
@@ -204,38 +233,26 @@ static struct tty_buffer *tty_buffer_find(struct tty_port *port, size_t size)
*
* Make at least size bytes of linear space available for the tty
* buffer. If we fail return the size we managed to find.
- *
- * Locking: Takes port->buf.lock
*/
int tty_buffer_request_room(struct tty_port *port, size_t size)
{
struct tty_bufhead *buf = &port->buf;
struct tty_buffer *b, *n;
int left;
- unsigned long flags;
- spin_lock_irqsave(&buf->lock, flags);
- /* OPTIMISATION: We could keep a per tty "zero" sized buffer to
- remove this conditional if its worth it. This would be invisible
- to the callers */
+
b = buf->tail;
- if (b != NULL)
- left = b->size - b->used;
- else
- left = 0;
+ left = b->size - b->used;
if (left < size) {
/* This is the slow path - looking for new buffers to use */
- if ((n = tty_buffer_find(port, size)) != NULL) {
- if (b != NULL) {
- b->next = n;
- b->commit = b->used;
- } else
- buf->head = n;
+ if ((n = tty_buffer_alloc(port, size)) != NULL) {
buf->tail = n;
+ b->commit = b->used;
+ smp_mb();
+ b->next = n;
} else
size = left;
}
- spin_unlock_irqrestore(&buf->lock, flags);
return size;
}
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
@@ -249,8 +266,6 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
*
* Queue a series of bytes to the tty buffering. All the characters
* passed are marked with the supplied flag. Returns the number added.
- *
- * Locking: Called functions may take port->buf.lock
*/
int tty_insert_flip_string_fixed_flag(struct tty_port *port,
@@ -261,12 +276,10 @@ int tty_insert_flip_string_fixed_flag(struct tty_port *port,
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
int space = tty_buffer_request_room(port, goal);
struct tty_buffer *tb = port->buf.tail;
- /* If there is no space then tb may be NULL */
- if (unlikely(space == 0)) {
+ if (unlikely(space == 0))
break;
- }
- memcpy(tb->char_buf_ptr + tb->used, chars, space);
- memset(tb->flag_buf_ptr + tb->used, flag, space);
+ memcpy(char_buf_ptr(tb, tb->used), chars, space);
+ memset(flag_buf_ptr(tb, tb->used), flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -287,8 +300,6 @@ EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
* Queue a series of bytes to the tty buffering. For each character
* the flags array indicates the status of the character. Returns the
* number added.
- *
- * Locking: Called functions may take port->buf.lock
*/
int tty_insert_flip_string_flags(struct tty_port *port,
@@ -299,12 +310,10 @@ int tty_insert_flip_string_flags(struct tty_port *port,
int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
int space = tty_buffer_request_room(port, goal);
struct tty_buffer *tb = port->buf.tail;
- /* If there is no space then tb may be NULL */
- if (unlikely(space == 0)) {
+ if (unlikely(space == 0))
break;
- }
- memcpy(tb->char_buf_ptr + tb->used, chars, space);
- memcpy(tb->flag_buf_ptr + tb->used, flags, space);
+ memcpy(char_buf_ptr(tb, tb->used), chars, space);
+ memcpy(flag_buf_ptr(tb, tb->used), flags, space);
tb->used += space;
copied += space;
chars += space;
@@ -325,20 +334,14 @@ EXPORT_SYMBOL(tty_insert_flip_string_flags);
* processing by the line discipline.
* Note that this function can only be used when the low_latency flag
* is unset. Otherwise the workqueue won't be flushed.
- *
- * Locking: Takes port->buf.lock
*/
void tty_schedule_flip(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- unsigned long flags;
WARN_ON(port->low_latency);
- spin_lock_irqsave(&buf->lock, flags);
- if (buf->tail != NULL)
- buf->tail->commit = buf->tail->used;
- spin_unlock_irqrestore(&buf->lock, flags);
+ buf->tail->commit = buf->tail->used;
schedule_work(&buf->work);
}
EXPORT_SYMBOL(tty_schedule_flip);
@@ -354,8 +357,6 @@ EXPORT_SYMBOL(tty_schedule_flip);
* accounted for as ready for normal characters. This is used for drivers
* that need their own block copy routines into the buffer. There is no
* guarantee the buffer is a DMA target!
- *
- * Locking: May call functions taking port->buf.lock
*/
int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
@@ -364,8 +365,8 @@ int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
int space = tty_buffer_request_room(port, size);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
- *chars = tb->char_buf_ptr + tb->used;
- memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ *chars = char_buf_ptr(tb, tb->used);
+ memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space);
tb->used += space;
}
return space;
@@ -384,8 +385,6 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string);
* accounted for as ready for characters. This is used for drivers
* that need their own block copy routines into the buffer. There is no
* guarantee the buffer is a DMA target!
- *
- * Locking: May call functions taking port->buf.lock
*/
int tty_prepare_flip_string_flags(struct tty_port *port,
@@ -394,8 +393,8 @@ int tty_prepare_flip_string_flags(struct tty_port *port,
int space = tty_buffer_request_room(port, size);
if (likely(space)) {
struct tty_buffer *tb = port->buf.tail;
- *chars = tb->char_buf_ptr + tb->used;
- *flags = tb->flag_buf_ptr + tb->used;
+ *chars = char_buf_ptr(tb, tb->used);
+ *flags = flag_buf_ptr(tb, tb->used);
tb->used += space;
}
return space;
@@ -403,6 +402,23 @@ int tty_prepare_flip_string_flags(struct tty_port *port,
EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
+static int
+receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
+{
+ struct tty_ldisc *disc = tty->ldisc;
+ unsigned char *p = char_buf_ptr(head, head->read);
+ char *f = flag_buf_ptr(head, head->read);
+
+ if (disc->ops->receive_buf2)
+ count = disc->ops->receive_buf2(tty, p, f, count);
+ else {
+ count = min_t(int, count, tty->receive_room);
+ if (count)
+ disc->ops->receive_buf(tty, p, f, count);
+ }
+ head->read += count;
+ return count;
+}
/**
* flush_to_ldisc
@@ -411,9 +427,10 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags);
* This routine is called out of the software interrupt to flush data
* from the buffer chain to the line discipline.
*
- * Locking: holds tty->buf.lock to guard buffer list. Drops the lock
- * while invoking the line discipline receive_buf method. The
- * receive_buf method is single threaded for each tty instance.
+ * The receive_buf method is single threaded for each tty instance.
+ *
+ * Locking: takes buffer lock to ensure single-threaded flip buffer
+ * 'consumer'
*/
static void flush_to_ldisc(struct work_struct *work)
@@ -421,7 +438,6 @@ static void flush_to_ldisc(struct work_struct *work)
struct tty_port *port = container_of(work, struct tty_port, buf.work);
struct tty_bufhead *buf = &port->buf;
struct tty_struct *tty;
- unsigned long flags;
struct tty_ldisc *disc;
tty = port->itty;
@@ -429,52 +445,34 @@ static void flush_to_ldisc(struct work_struct *work)
return;
disc = tty_ldisc_ref(tty);
- if (disc == NULL) /* !TTY_LDISC */
+ if (disc == NULL)
return;
- spin_lock_irqsave(&buf->lock, flags);
-
- if (!test_and_set_bit(TTYP_FLUSHING, &port->iflags)) {
- struct tty_buffer *head;
- while ((head = buf->head) != NULL) {
- int count;
- char *char_buf;
- unsigned char *flag_buf;
-
- count = head->commit - head->read;
- if (!count) {
- if (head->next == NULL)
- break;
- buf->head = head->next;
- tty_buffer_free(port, head);
- continue;
- }
- if (!tty->receive_room)
- break;
- if (count > tty->receive_room)
- count = tty->receive_room;
- char_buf = head->char_buf_ptr + head->read;
- flag_buf = head->flag_buf_ptr + head->read;
- head->read += count;
- spin_unlock_irqrestore(&buf->lock, flags);
- disc->ops->receive_buf(tty, char_buf,
- flag_buf, count);
- spin_lock_irqsave(&buf->lock, flags);
- /* Ldisc or user is trying to flush the buffers.
- We may have a deferred request to flush the
- input buffer, if so pull the chain under the lock
- and empty the queue */
- if (test_bit(TTYP_FLUSHPENDING, &port->iflags)) {
- __tty_buffer_flush(port);
- clear_bit(TTYP_FLUSHPENDING, &port->iflags);
- wake_up(&tty->read_wait);
+ mutex_lock(&buf->lock);
+
+ while (1) {
+ struct tty_buffer *head = buf->head;
+ int count;
+
+ /* Ldisc or user is trying to gain exclusive access */
+ if (atomic_read(&buf->priority))
+ break;
+
+ count = head->commit - head->read;
+ if (!count) {
+ if (head->next == NULL)
break;
- }
+ buf->head = head->next;
+ tty_buffer_free(port, head);
+ continue;
}
- clear_bit(TTYP_FLUSHING, &port->iflags);
+
+ count = receive_buf(tty, head, count);
+ if (!count)
+ break;
}
- spin_unlock_irqrestore(&buf->lock, flags);
+ mutex_unlock(&buf->lock);
tty_ldisc_deref(disc);
}
@@ -503,19 +501,13 @@ void tty_flush_to_ldisc(struct tty_struct *tty)
*
* In the event of the queue being busy for flipping the work will be
* held off and retried later.
- *
- * Locking: tty buffer lock. Driver locks in low latency mode.
*/
void tty_flip_buffer_push(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- unsigned long flags;
- spin_lock_irqsave(&buf->lock, flags);
- if (buf->tail != NULL)
- buf->tail->commit = buf->tail->used;
- spin_unlock_irqrestore(&buf->lock, flags);
+ buf->tail->commit = buf->tail->used;
if (port->low_latency)
flush_to_ldisc(&buf->work);
@@ -530,19 +522,18 @@ EXPORT_SYMBOL(tty_flip_buffer_push);
*
* Set up the initial state of the buffer management for a tty device.
* Must be called before the other tty buffer functions are used.
- *
- * Locking: none
*/
void tty_buffer_init(struct tty_port *port)
{
struct tty_bufhead *buf = &port->buf;
- spin_lock_init(&buf->lock);
- buf->head = NULL;
- buf->tail = NULL;
- buf->free = NULL;
- buf->memory_used = 0;
+ mutex_init(&buf->lock);
+ tty_buffer_reset(&buf->sentinel, 0);
+ buf->head = &buf->sentinel;
+ buf->tail = &buf->sentinel;
+ init_llist_head(&buf->free);
+ atomic_set(&buf->memory_used, 0);
+ atomic_set(&buf->priority, 0);
INIT_WORK(&buf->work, flush_to_ldisc);
}
-
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 366af832794..a9355ce1c6d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -603,8 +603,8 @@ static int tty_signal_session_leader(struct tty_struct *tty, int exit_session)
* BTM
* redirect lock for undoing redirection
* file list lock for manipulating list of ttys
- * tty_ldisc_lock from called functions
- * termios_mutex resetting termios data
+ * tty_ldiscs_lock from called functions
+ * termios_rwsem resetting termios data
* tasklist_lock to walk task list for hangup event
* ->siglock to protect ->signal/->sighand
*/
@@ -629,6 +629,11 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
tty_lock(tty);
+ if (test_bit(TTY_HUPPED, &tty->flags)) {
+ tty_unlock(tty);
+ return;
+ }
+
/* some functions below drop BTM, so we need this bit */
set_bit(TTY_HUPPING, &tty->flags);
@@ -664,7 +669,6 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
spin_lock_irq(&tty->ctrl_lock);
clear_bit(TTY_THROTTLED, &tty->flags);
- clear_bit(TTY_PUSH, &tty->flags);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
put_pid(tty->session);
put_pid(tty->pgrp);
@@ -1388,8 +1392,7 @@ static int tty_reopen(struct tty_struct *tty)
struct tty_driver *driver = tty->driver;
if (test_bit(TTY_CLOSING, &tty->flags) ||
- test_bit(TTY_HUPPING, &tty->flags) ||
- test_bit(TTY_LDISC_CHANGING, &tty->flags))
+ test_bit(TTY_HUPPING, &tty->flags))
return -EIO;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -1405,7 +1408,7 @@ static int tty_reopen(struct tty_struct *tty)
}
tty->count++;
- WARN_ON(!test_bit(TTY_LDISC, &tty->flags));
+ WARN_ON(!tty->ldisc);
return 0;
}
@@ -2202,7 +2205,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
* FIXME: does not honour flow control ??
*
* Locking:
- * Called functions take tty_ldisc_lock
+ * Called functions take tty_ldiscs_lock
* current->signal->tty check is safe without locks
*
* FIXME: may race normal receive processing
@@ -2231,7 +2234,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
*
* Copies the kernel idea of the window size into the user buffer.
*
- * Locking: tty->termios_mutex is taken to ensure the winsize data
+ * Locking: tty->winsize_mutex is taken to ensure the winsize data
* is consistent.
*/
@@ -2239,9 +2242,9 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg)
{
int err;
- mutex_lock(&tty->termios_mutex);
+ mutex_lock(&tty->winsize_mutex);
err = copy_to_user(arg, &tty->winsize, sizeof(*arg));
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&tty->winsize_mutex);
return err ? -EFAULT: 0;
}
@@ -2262,7 +2265,7 @@ int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
unsigned long flags;
/* Lock the tty */
- mutex_lock(&tty->termios_mutex);
+ mutex_lock(&tty->winsize_mutex);
if (!memcmp(ws, &tty->winsize, sizeof(*ws)))
goto done;
/* Get the PID values and reference them so we can
@@ -2277,7 +2280,7 @@ int tty_do_resize(struct tty_struct *tty, struct winsize *ws)
tty->winsize = *ws;
done:
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&tty->winsize_mutex);
return 0;
}
EXPORT_SYMBOL(tty_do_resize);
@@ -3016,8 +3019,10 @@ void initialize_tty_struct(struct tty_struct *tty,
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
- mutex_init(&tty->termios_mutex);
- mutex_init(&tty->ldisc_mutex);
+ mutex_init(&tty->throttle_mutex);
+ init_rwsem(&tty->termios_rwsem);
+ mutex_init(&tty->winsize_mutex);
+ init_ldsem(&tty->ldisc_sem);
init_waitqueue_head(&tty->write_wait);
init_waitqueue_head(&tty->read_wait);
INIT_WORK(&tty->hangup_work, do_tty_hangup);
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 3500d411414..03ba081c577 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -94,20 +94,20 @@ EXPORT_SYMBOL(tty_driver_flush_buffer);
* @tty: terminal
*
* Indicate that a tty should stop transmitting data down the stack.
- * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * Takes the termios rwsem to protect against parallel throttle/unthrottle
* and also to ensure the driver can consistently reference its own
* termios data at this point when implementing software flow control.
*/
void tty_throttle(struct tty_struct *tty)
{
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
/* check TTY_THROTTLED first so it indicates our state */
if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->throttle)
tty->ops->throttle(tty);
tty->flow_change = 0;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
}
EXPORT_SYMBOL(tty_throttle);
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(tty_throttle);
* @tty: terminal
*
* Indicate that a tty may continue transmitting data down the stack.
- * Takes the termios mutex to protect against parallel throttle/unthrottle
+ * Takes the termios rwsem to protect against parallel throttle/unthrottle
* and also to ensure the driver can consistently reference its own
* termios data at this point when implementing software flow control.
*
@@ -126,12 +126,12 @@ EXPORT_SYMBOL(tty_throttle);
void tty_unthrottle(struct tty_struct *tty)
{
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
tty->ops->unthrottle)
tty->ops->unthrottle(tty);
tty->flow_change = 0;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
}
EXPORT_SYMBOL(tty_unthrottle);
@@ -151,7 +151,7 @@ int tty_throttle_safe(struct tty_struct *tty)
{
int ret = 0;
- mutex_lock(&tty->termios_mutex);
+ mutex_lock(&tty->throttle_mutex);
if (!test_bit(TTY_THROTTLED, &tty->flags)) {
if (tty->flow_change != TTY_THROTTLE_SAFE)
ret = 1;
@@ -161,7 +161,7 @@ int tty_throttle_safe(struct tty_struct *tty)
tty->ops->throttle(tty);
}
}
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&tty->throttle_mutex);
return ret;
}
@@ -182,7 +182,7 @@ int tty_unthrottle_safe(struct tty_struct *tty)
{
int ret = 0;
- mutex_lock(&tty->termios_mutex);
+ mutex_lock(&tty->throttle_mutex);
if (test_bit(TTY_THROTTLED, &tty->flags)) {
if (tty->flow_change != TTY_UNTHROTTLE_SAFE)
ret = 1;
@@ -192,7 +192,7 @@ int tty_unthrottle_safe(struct tty_struct *tty)
tty->ops->unthrottle(tty);
}
}
- mutex_unlock(&tty->termios_mutex);
+ mutex_unlock(&tty->throttle_mutex);
return ret;
}
@@ -468,7 +468,7 @@ EXPORT_SYMBOL_GPL(tty_termios_encode_baud_rate);
* @obad: output baud rate
*
* Update the current termios data for the tty with the new speed
- * settings. The caller must hold the termios_mutex for the tty in
+ * settings. The caller must hold the termios_rwsem for the tty in
* question.
*/
@@ -528,7 +528,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
* is a bit of layering violation here with n_tty in terms of the
* internal knowledge of this function.
*
- * Locking: termios_mutex
+ * Locking: termios_rwsem
*/
int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
@@ -544,7 +544,7 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
/* FIXME: we need to decide on some locking/ordering semantics
for the set_termios notification eventually */
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
old_termios = tty->termios;
tty->termios = *new_termios;
unset_locked_termios(&tty->termios, &old_termios, &tty->termios_locked);
@@ -586,7 +586,7 @@ int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
(ld->ops->set_termios)(tty, &old_termios);
tty_ldisc_deref(ld);
}
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return 0;
}
EXPORT_SYMBOL_GPL(tty_set_termios);
@@ -601,7 +601,7 @@ EXPORT_SYMBOL_GPL(tty_set_termios);
* functions before using tty_set_termios to do the actual changes.
*
* Locking:
- * Called functions take ldisc and termios_mutex locks
+ * Called functions take ldisc and termios_rwsem locks
*/
static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
@@ -613,9 +613,9 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
if (retval)
return retval;
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
tmp_termios = tty->termios;
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
if (opt & TERMIOS_TERMIO) {
if (user_termio_to_kernel_termios(&tmp_termios,
@@ -667,16 +667,16 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
static void copy_termios(struct tty_struct *tty, struct ktermios *kterm)
{
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
*kterm = tty->termios;
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
}
static void copy_termios_locked(struct tty_struct *tty, struct ktermios *kterm)
{
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
*kterm = tty->termios_locked;
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
}
static int get_termio(struct tty_struct *tty, struct termio __user *termio)
@@ -723,10 +723,10 @@ static int set_termiox(struct tty_struct *tty, void __user *arg, int opt)
return -ERESTARTSYS;
}
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
if (tty->ops->set_termiox)
tty->ops->set_termiox(tty, &tnew);
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return 0;
}
@@ -761,13 +761,13 @@ static int get_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
{
struct sgttyb tmp;
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
tmp.sg_ispeed = tty->termios.c_ispeed;
tmp.sg_ospeed = tty->termios.c_ospeed;
tmp.sg_erase = tty->termios.c_cc[VERASE];
tmp.sg_kill = tty->termios.c_cc[VKILL];
tmp.sg_flags = get_sgflags(tty);
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
return copy_to_user(sgttyb, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
@@ -806,7 +806,7 @@ static void set_sgflags(struct ktermios *termios, int flags)
* Updates a terminal from the legacy BSD style terminal information
* structure.
*
- * Locking: termios_mutex
+ * Locking: termios_rwsem
*/
static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
@@ -822,7 +822,7 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
if (copy_from_user(&tmp, sgttyb, sizeof(tmp)))
return -EFAULT;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
termios = tty->termios;
termios.c_cc[VERASE] = tmp.sg_erase;
termios.c_cc[VKILL] = tmp.sg_kill;
@@ -832,7 +832,7 @@ static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb)
tty_termios_encode_baud_rate(&termios, termios.c_ispeed,
termios.c_ospeed);
#endif
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
tty_set_termios(tty, &termios);
return 0;
}
@@ -843,14 +843,14 @@ static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars)
{
struct tchars tmp;
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
tmp.t_intrc = tty->termios.c_cc[VINTR];
tmp.t_quitc = tty->termios.c_cc[VQUIT];
tmp.t_startc = tty->termios.c_cc[VSTART];
tmp.t_stopc = tty->termios.c_cc[VSTOP];
tmp.t_eofc = tty->termios.c_cc[VEOF];
tmp.t_brkc = tty->termios.c_cc[VEOL2]; /* what is brkc anyway? */
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
@@ -860,14 +860,14 @@ static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars)
if (copy_from_user(&tmp, tchars, sizeof(tmp)))
return -EFAULT;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
tty->termios.c_cc[VINTR] = tmp.t_intrc;
tty->termios.c_cc[VQUIT] = tmp.t_quitc;
tty->termios.c_cc[VSTART] = tmp.t_startc;
tty->termios.c_cc[VSTOP] = tmp.t_stopc;
tty->termios.c_cc[VEOF] = tmp.t_eofc;
tty->termios.c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return 0;
}
#endif
@@ -877,7 +877,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
{
struct ltchars tmp;
- mutex_lock(&tty->termios_mutex);
+ down_read(&tty->termios_rwsem);
tmp.t_suspc = tty->termios.c_cc[VSUSP];
/* what is dsuspc anyway? */
tmp.t_dsuspc = tty->termios.c_cc[VSUSP];
@@ -886,7 +886,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
tmp.t_flushc = tty->termios.c_cc[VEOL2];
tmp.t_werasc = tty->termios.c_cc[VWERASE];
tmp.t_lnextc = tty->termios.c_cc[VLNEXT];
- mutex_unlock(&tty->termios_mutex);
+ up_read(&tty->termios_rwsem);
return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
}
@@ -897,7 +897,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
if (copy_from_user(&tmp, ltchars, sizeof(tmp)))
return -EFAULT;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
tty->termios.c_cc[VSUSP] = tmp.t_suspc;
/* what is dsuspc anyway? */
tty->termios.c_cc[VEOL2] = tmp.t_dsuspc;
@@ -906,7 +906,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
tty->termios.c_cc[VEOL2] = tmp.t_flushc;
tty->termios.c_cc[VWERASE] = tmp.t_werasc;
tty->termios.c_cc[VLNEXT] = tmp.t_lnextc;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return 0;
}
#endif
@@ -946,7 +946,7 @@ static int send_prio_char(struct tty_struct *tty, char ch)
* @arg: enable/disable CLOCAL
*
* Perform a change to the CLOCAL state and call into the driver
- * layer to make it visible. All done with the termios mutex
+ * layer to make it visible. All done with the termios rwsem
*/
static int tty_change_softcar(struct tty_struct *tty, int arg)
@@ -955,7 +955,7 @@ static int tty_change_softcar(struct tty_struct *tty, int arg)
int bit = arg ? CLOCAL : 0;
struct ktermios old;
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
old = tty->termios;
tty->termios.c_cflag &= ~CLOCAL;
tty->termios.c_cflag |= bit;
@@ -963,7 +963,7 @@ static int tty_change_softcar(struct tty_struct *tty, int arg)
tty->ops->set_termios(tty, &old);
if ((tty->termios.c_cflag & CLOCAL) != bit)
ret = -EINVAL;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
return ret;
}
@@ -1066,9 +1066,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
if (user_termios_to_kernel_termios(&kterm,
(struct termios __user *) arg))
return -EFAULT;
- mutex_lock(&real_tty->termios_mutex);
+ down_write(&real_tty->termios_rwsem);
real_tty->termios_locked = kterm;
- mutex_unlock(&real_tty->termios_mutex);
+ up_write(&real_tty->termios_rwsem);
return 0;
#else
case TIOCGLCKTRMIOS:
@@ -1083,9 +1083,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
if (user_termios_to_kernel_termios_1(&kterm,
(struct termios __user *) arg))
return -EFAULT;
- mutex_lock(&real_tty->termios_mutex);
+ down_write(&real_tty->termios_rwsem);
real_tty->termios_locked = kterm;
- mutex_unlock(&real_tty->termios_mutex);
+ up_write(&real_tty->termios_rwsem);
return ret;
#endif
#ifdef TCGETX
@@ -1093,9 +1093,9 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
struct termiox ktermx;
if (real_tty->termiox == NULL)
return -EINVAL;
- mutex_lock(&real_tty->termios_mutex);
+ down_read(&real_tty->termios_rwsem);
memcpy(&ktermx, real_tty->termiox, sizeof(struct termiox));
- mutex_unlock(&real_tty->termios_mutex);
+ up_read(&real_tty->termios_rwsem);
if (copy_to_user(p, &ktermx, sizeof(struct termiox)))
ret = -EFAULT;
return ret;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 1afe192bef6..6458e11e8e9 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -31,14 +31,20 @@
#define tty_ldisc_debug(tty, f, args...)
#endif
+/* lockdep nested classes for tty->ldisc_sem */
+enum {
+ LDISC_SEM_NORMAL,
+ LDISC_SEM_OTHER,
+};
+
+
/*
* This guards the refcounted line discipline lists. The lock
* must be taken with irqs off because there are hangup path
* callers who will do ldisc lookups and cannot sleep.
*/
-static DEFINE_RAW_SPINLOCK(tty_ldisc_lock);
-static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DEFINE_RAW_SPINLOCK(tty_ldiscs_lock);
/* Line disc dispatch table */
static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
@@ -52,7 +58,7 @@ static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
* from this point onwards.
*
* Locking:
- * takes tty_ldisc_lock to guard against ldisc races
+ * takes tty_ldiscs_lock to guard against ldisc races
*/
int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
@@ -63,11 +69,11 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
if (disc < N_TTY || disc >= NR_LDISCS)
return -EINVAL;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+ raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
tty_ldiscs[disc] = new_ldisc;
new_ldisc->num = disc;
new_ldisc->refcount = 0;
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
}
@@ -82,7 +88,7 @@ EXPORT_SYMBOL(tty_register_ldisc);
* currently in use.
*
* Locking:
- * takes tty_ldisc_lock to guard against ldisc races
+ * takes tty_ldiscs_lock to guard against ldisc races
*/
int tty_unregister_ldisc(int disc)
@@ -93,12 +99,12 @@ int tty_unregister_ldisc(int disc)
if (disc < N_TTY || disc >= NR_LDISCS)
return -EINVAL;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+ raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
if (tty_ldiscs[disc]->refcount)
ret = -EBUSY;
else
tty_ldiscs[disc] = NULL;
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
}
@@ -109,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
unsigned long flags;
struct tty_ldisc_ops *ldops, *ret;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+ raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
ret = ERR_PTR(-EINVAL);
ldops = tty_ldiscs[disc];
if (ldops) {
@@ -119,7 +125,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
ret = ldops;
}
}
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
return ret;
}
@@ -127,10 +133,10 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
{
unsigned long flags;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
+ raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
ldops->refcount--;
module_put(ldops->owner);
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+ raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
}
/**
@@ -143,10 +149,10 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
* available
*
* Locking:
- * takes tty_ldisc_lock to guard against ldisc races
+ * takes tty_ldiscs_lock to guard against ldisc races
*/
-static struct tty_ldisc *tty_ldisc_get(int disc)
+static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
{
struct tty_ldisc *ld;
struct tty_ldisc_ops *ldops;
@@ -173,8 +179,7 @@ static struct tty_ldisc *tty_ldisc_get(int disc)
}
ld->ops = ldops;
- atomic_set(&ld->users, 1);
- init_waitqueue_head(&ld->wq_idle);
+ ld->tty = tty;
return ld;
}
@@ -186,20 +191,11 @@ static struct tty_ldisc *tty_ldisc_get(int disc)
*/
static inline void tty_ldisc_put(struct tty_ldisc *ld)
{
- unsigned long flags;
-
if (WARN_ON_ONCE(!ld))
return;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
-
- /* unreleased reader reference(s) will cause this WARN */
- WARN_ON(!atomic_dec_and_test(&ld->users));
-
- ld->ops->refcount--;
- module_put(ld->ops->owner);
+ put_ldops(ld->ops);
kfree(ld);
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
}
static void *tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
@@ -251,34 +247,6 @@ const struct file_operations tty_ldiscs_proc_fops = {
};
/**
- * tty_ldisc_try - internal helper
- * @tty: the tty
- *
- * Make a single attempt to grab and bump the refcount on
- * the tty ldisc. Return 0 on failure or 1 on success. This is
- * used to implement both the waiting and non waiting versions
- * of tty_ldisc_ref
- *
- * Locking: takes tty_ldisc_lock
- */
-
-static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty)
-{
- unsigned long flags;
- struct tty_ldisc *ld;
-
- /* FIXME: this allows reference acquire after TTY_LDISC is cleared */
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
- ld = NULL;
- if (test_bit(TTY_LDISC, &tty->flags) && tty->ldisc) {
- ld = tty->ldisc;
- atomic_inc(&ld->users);
- }
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
- return ld;
-}
-
-/**
* tty_ldisc_ref_wait - wait for the tty ldisc
* @tty: tty device
*
@@ -291,16 +259,15 @@ static struct tty_ldisc *tty_ldisc_try(struct tty_struct *tty)
* against a discipline change, such as an existing ldisc reference
* (which we check for)
*
- * Locking: call functions take tty_ldisc_lock
+ * Note: only callable from a file_operations routine (which
+ * guarantees tty->ldisc != NULL when the lock is acquired).
*/
struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty)
{
- struct tty_ldisc *ld;
-
- /* wait_event is a macro */
- wait_event(tty_ldisc_wait, (ld = tty_ldisc_try(tty)) != NULL);
- return ld;
+ ldsem_down_read(&tty->ldisc_sem, MAX_SCHEDULE_TIMEOUT);
+ WARN_ON(!tty->ldisc);
+ return tty->ldisc;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
@@ -311,13 +278,18 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait);
* Dereference the line discipline for the terminal and take a
* reference to it. If the line discipline is in flux then
* return NULL. Can be called from IRQ and timer functions.
- *
- * Locking: called functions take tty_ldisc_lock
*/
struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty)
{
- return tty_ldisc_try(tty);
+ struct tty_ldisc *ld = NULL;
+
+ if (ldsem_down_read_trylock(&tty->ldisc_sem)) {
+ ld = tty->ldisc;
+ if (!ld)
+ ldsem_up_read(&tty->ldisc_sem);
+ }
+ return ld;
}
EXPORT_SYMBOL_GPL(tty_ldisc_ref);
@@ -327,48 +299,91 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref);
*
* Undoes the effect of tty_ldisc_ref or tty_ldisc_ref_wait. May
* be called in IRQ context.
- *
- * Locking: takes tty_ldisc_lock
*/
void tty_ldisc_deref(struct tty_ldisc *ld)
{
- unsigned long flags;
+ ldsem_up_read(&ld->tty->ldisc_sem);
+}
+EXPORT_SYMBOL_GPL(tty_ldisc_deref);
- if (WARN_ON_ONCE(!ld))
- return;
- raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
- /*
- * WARNs if one-too-many reader references were released
- * - the last reference must be released with tty_ldisc_put
- */
- WARN_ON(atomic_dec_and_test(&ld->users));
- raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
+static inline int __lockfunc
+tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
+{
+ return ldsem_down_write(&tty->ldisc_sem, timeout);
+}
- if (waitqueue_active(&ld->wq_idle))
- wake_up(&ld->wq_idle);
+static inline int __lockfunc
+tty_ldisc_lock_nested(struct tty_struct *tty, unsigned long timeout)
+{
+ return ldsem_down_write_nested(&tty->ldisc_sem,
+ LDISC_SEM_OTHER, timeout);
}
-EXPORT_SYMBOL_GPL(tty_ldisc_deref);
-/**
- * tty_ldisc_enable - allow ldisc use
- * @tty: terminal to activate ldisc on
- *
- * Set the TTY_LDISC flag when the line discipline can be called
- * again. Do necessary wakeups for existing sleepers. Clear the LDISC
- * changing flag to indicate any ldisc change is now over.
- *
- * Note: nobody should set the TTY_LDISC bit except via this function.
- * Clearing directly is allowed.
- */
+static inline void tty_ldisc_unlock(struct tty_struct *tty)
+{
+ return ldsem_up_write(&tty->ldisc_sem);
+}
-static void tty_ldisc_enable(struct tty_struct *tty)
+static int __lockfunc
+tty_ldisc_lock_pair_timeout(struct tty_struct *tty, struct tty_struct *tty2,
+ unsigned long timeout)
+{
+ int ret;
+
+ if (tty < tty2) {
+ ret = tty_ldisc_lock(tty, timeout);
+ if (ret) {
+ ret = tty_ldisc_lock_nested(tty2, timeout);
+ if (!ret)
+ tty_ldisc_unlock(tty);
+ }
+ } else {
+ /* if this is possible, it has lots of implications */
+ WARN_ON_ONCE(tty == tty2);
+ if (tty2 && tty != tty2) {
+ ret = tty_ldisc_lock(tty2, timeout);
+ if (ret) {
+ ret = tty_ldisc_lock_nested(tty, timeout);
+ if (!ret)
+ tty_ldisc_unlock(tty2);
+ }
+ } else
+ ret = tty_ldisc_lock(tty, timeout);
+ }
+
+ if (!ret)
+ return -EBUSY;
+
+ set_bit(TTY_LDISC_HALTED, &tty->flags);
+ if (tty2)
+ set_bit(TTY_LDISC_HALTED, &tty2->flags);
+ return 0;
+}
+
+static void __lockfunc
+tty_ldisc_lock_pair(struct tty_struct *tty, struct tty_struct *tty2)
+{
+ tty_ldisc_lock_pair_timeout(tty, tty2, MAX_SCHEDULE_TIMEOUT);
+}
+
+static void __lockfunc tty_ldisc_unlock_pair(struct tty_struct *tty,
+ struct tty_struct *tty2)
+{
+ tty_ldisc_unlock(tty);
+ if (tty2)
+ tty_ldisc_unlock(tty2);
+}
+
+static void __lockfunc tty_ldisc_enable_pair(struct tty_struct *tty,
+ struct tty_struct *tty2)
{
clear_bit(TTY_LDISC_HALTED, &tty->flags);
- set_bit(TTY_LDISC, &tty->flags);
- clear_bit(TTY_LDISC_CHANGING, &tty->flags);
- wake_up(&tty_ldisc_wait);
+ if (tty2)
+ clear_bit(TTY_LDISC_HALTED, &tty2->flags);
+
+ tty_ldisc_unlock_pair(tty, tty2);
}
/**
@@ -400,14 +415,14 @@ EXPORT_SYMBOL_GPL(tty_ldisc_flush);
* they are not on hot paths so a little discipline won't do
* any harm.
*
- * Locking: takes termios_mutex
+ * Locking: takes termios_rwsem
*/
static void tty_set_termios_ldisc(struct tty_struct *tty, int num)
{
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
tty->termios.c_line = num;
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
}
/**
@@ -468,14 +483,14 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
int r;
/* There is an outstanding reference here so this is safe */
- old = tty_ldisc_get(old->ops->num);
+ old = tty_ldisc_get(tty, old->ops->num);
WARN_ON(IS_ERR(old));
tty->ldisc = old;
tty_set_termios_ldisc(tty, old->ops->num);
if (tty_ldisc_open(tty, old) < 0) {
tty_ldisc_put(old);
/* This driver is always present */
- new_ldisc = tty_ldisc_get(N_TTY);
+ new_ldisc = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(new_ldisc))
panic("n_tty: get");
tty->ldisc = new_ldisc;
@@ -489,101 +504,6 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
}
/**
- * tty_ldisc_wait_idle - wait for the ldisc to become idle
- * @tty: tty to wait for
- * @timeout: for how long to wait at most
- *
- * Wait for the line discipline to become idle. The discipline must
- * have been halted for this to guarantee it remains idle.
- */
-static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
-{
- long ret;
- ret = wait_event_timeout(tty->ldisc->wq_idle,
- atomic_read(&tty->ldisc->users) == 1, timeout);
- return ret > 0 ? 0 : -EBUSY;
-}
-
-/**
- * tty_ldisc_halt - shut down the line discipline
- * @tty: tty device
- * @o_tty: paired pty device (can be NULL)
- * @timeout: # of jiffies to wait for ldisc refs to be released
- *
- * Shut down the line discipline and work queue for this tty device and
- * its paired pty (if exists). Clearing the TTY_LDISC flag ensures
- * no further references can be obtained, while waiting for existing
- * references to be released ensures no more data is fed to the ldisc.
- *
- * You need to do a 'flush_scheduled_work()' (outside the ldisc_mutex)
- * in order to make sure any currently executing ldisc work is also
- * flushed.
- */
-
-static int tty_ldisc_halt(struct tty_struct *tty, struct tty_struct *o_tty,
- long timeout)
-{
- int retval;
-
- clear_bit(TTY_LDISC, &tty->flags);
- if (o_tty)
- clear_bit(TTY_LDISC, &o_tty->flags);
-
- retval = tty_ldisc_wait_idle(tty, timeout);
- if (!retval && o_tty)
- retval = tty_ldisc_wait_idle(o_tty, timeout);
- if (retval)
- return retval;
-
- set_bit(TTY_LDISC_HALTED, &tty->flags);
- if (o_tty)
- set_bit(TTY_LDISC_HALTED, &o_tty->flags);
-
- return 0;
-}
-
-/**
- * tty_ldisc_hangup_halt - halt the line discipline for hangup
- * @tty: tty being hung up
- *
- * Shut down the line discipline and work queue for the tty device
- * being hungup. Clear the TTY_LDISC flag to ensure no further
- * references can be obtained and wait for remaining references to be
- * released to ensure no more data is fed to this ldisc.
- * Caller must hold legacy and ->ldisc_mutex.
- *
- * NB: tty_set_ldisc() is prevented from changing the ldisc concurrently
- * with this function by checking the TTY_HUPPING flag.
- */
-static bool tty_ldisc_hangup_halt(struct tty_struct *tty)
-{
- char cur_n[TASK_COMM_LEN], tty_n[64];
- long timeout = 3 * HZ;
-
- clear_bit(TTY_LDISC, &tty->flags);
-
- if (tty->ldisc) { /* Not yet closed */
- tty_unlock(tty);
-
- while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
- timeout = MAX_SCHEDULE_TIMEOUT;
- printk_ratelimited(KERN_WARNING
- "%s: waiting (%s) for %s took too long, but we keep waiting...\n",
- __func__, get_task_comm(cur_n, current),
- tty_name(tty, tty_n));
- }
-
- set_bit(TTY_LDISC_HALTED, &tty->flags);
-
- /* must reacquire both locks and preserve lock order */
- mutex_unlock(&tty->ldisc_mutex);
- tty_lock(tty);
- mutex_lock(&tty->ldisc_mutex);
- }
- return !!tty->ldisc;
-}
-
-/**
* tty_set_ldisc - set line discipline
* @tty: the terminal to set
* @ldisc: the line discipline
@@ -592,110 +512,49 @@ static bool tty_ldisc_hangup_halt(struct tty_struct *tty)
* context. The ldisc change logic has to protect itself against any
* overlapping ldisc change (including on the other end of pty pairs),
* the close of one side of a tty/pty pair, and eventually hangup.
- *
- * Locking: takes tty_ldisc_lock, termios_mutex
*/
int tty_set_ldisc(struct tty_struct *tty, int ldisc)
{
int retval;
- struct tty_ldisc *o_ldisc, *new_ldisc;
- struct tty_struct *o_tty;
+ struct tty_ldisc *old_ldisc, *new_ldisc;
+ struct tty_struct *o_tty = tty->link;
- new_ldisc = tty_ldisc_get(ldisc);
+ new_ldisc = tty_ldisc_get(tty, ldisc);
if (IS_ERR(new_ldisc))
return PTR_ERR(new_ldisc);
- tty_lock(tty);
- /*
- * We need to look at the tty locking here for pty/tty pairs
- * when both sides try to change in parallel.
- */
-
- o_tty = tty->link; /* o_tty is the pty side or NULL */
-
+ retval = tty_ldisc_lock_pair_timeout(tty, o_tty, 5 * HZ);
+ if (retval) {
+ tty_ldisc_put(new_ldisc);
+ return retval;
+ }
/*
* Check the no-op case
*/
if (tty->ldisc->ops->num == ldisc) {
- tty_unlock(tty);
+ tty_ldisc_enable_pair(tty, o_tty);
tty_ldisc_put(new_ldisc);
return 0;
}
- mutex_lock(&tty->ldisc_mutex);
-
- /*
- * We could be midstream of another ldisc change which has
- * dropped the lock during processing. If so we need to wait.
- */
-
- while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
- mutex_unlock(&tty->ldisc_mutex);
- tty_unlock(tty);
- wait_event(tty_ldisc_wait,
- test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
- tty_lock(tty);
- mutex_lock(&tty->ldisc_mutex);
- }
-
- set_bit(TTY_LDISC_CHANGING, &tty->flags);
-
- /*
- * No more input please, we are switching. The new ldisc
- * will update this value in the ldisc open function
- */
-
- tty->receive_room = 0;
-
- o_ldisc = tty->ldisc;
-
- tty_unlock(tty);
- /*
- * Make sure we don't change while someone holds a
- * reference to the line discipline. The TTY_LDISC bit
- * prevents anyone taking a reference once it is clear.
- * We need the lock to avoid racing reference takers.
- *
- * We must clear the TTY_LDISC bit here to avoid a livelock
- * with a userspace app continually trying to use the tty in
- * parallel to the change and re-referencing the tty.
- */
-
- retval = tty_ldisc_halt(tty, o_tty, 5 * HZ);
-
- /*
- * Wait for hangup to complete, if pending.
- * We must drop the mutex here in case a hangup is also in process.
- */
-
- mutex_unlock(&tty->ldisc_mutex);
-
- flush_work(&tty->hangup_work);
-
+ old_ldisc = tty->ldisc;
tty_lock(tty);
- mutex_lock(&tty->ldisc_mutex);
-
- /* handle wait idle failure locked */
- if (retval) {
- tty_ldisc_put(new_ldisc);
- goto enable;
- }
- if (test_bit(TTY_HUPPING, &tty->flags)) {
+ if (test_bit(TTY_HUPPING, &tty->flags) ||
+ test_bit(TTY_HUPPED, &tty->flags)) {
/* We were raced by the hangup method. It will have stomped
the ldisc data and closed the ldisc down */
- clear_bit(TTY_LDISC_CHANGING, &tty->flags);
- mutex_unlock(&tty->ldisc_mutex);
+ tty_ldisc_enable_pair(tty, o_tty);
tty_ldisc_put(new_ldisc);
tty_unlock(tty);
return -EIO;
}
- /* Shutdown the current discipline. */
- tty_ldisc_close(tty, o_ldisc);
+ /* Shutdown the old discipline. */
+ tty_ldisc_close(tty, old_ldisc);
/* Now set up the new line discipline. */
tty->ldisc = new_ldisc;
@@ -705,26 +564,24 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
if (retval < 0) {
/* Back to the old one or N_TTY if we can't */
tty_ldisc_put(new_ldisc);
- tty_ldisc_restore(tty, o_ldisc);
+ tty_ldisc_restore(tty, old_ldisc);
}
- /* At this point we hold a reference to the new ldisc and a
- a reference to the old ldisc. If we ended up flipping back
- to the existing ldisc we have two references to it */
-
- if (tty->ldisc->ops->num != o_ldisc->ops->num && tty->ops->set_ldisc)
+ if (tty->ldisc->ops->num != old_ldisc->ops->num && tty->ops->set_ldisc)
tty->ops->set_ldisc(tty);
- tty_ldisc_put(o_ldisc);
+ /* At this point we hold a reference to the new ldisc and a
+ reference to the old ldisc, or we hold two references to
+ the old ldisc (if it was restored as part of error cleanup
+ above). In either case, releasing a single reference from
+ the old ldisc is correct. */
+
+ tty_ldisc_put(old_ldisc);
-enable:
/*
* Allow ldisc referencing to occur again
*/
-
- tty_ldisc_enable(tty);
- if (o_tty)
- tty_ldisc_enable(o_tty);
+ tty_ldisc_enable_pair(tty, o_tty);
/* Restart the work queue in case no characters kick it off. Safe if
already running */
@@ -732,7 +589,6 @@ enable:
if (o_tty)
schedule_work(&o_tty->port->buf.work);
- mutex_unlock(&tty->ldisc_mutex);
tty_unlock(tty);
return retval;
}
@@ -746,11 +602,11 @@ enable:
static void tty_reset_termios(struct tty_struct *tty)
{
- mutex_lock(&tty->termios_mutex);
+ down_write(&tty->termios_rwsem);
tty->termios = tty->driver->init_termios;
tty->termios.c_ispeed = tty_termios_input_baud_rate(&tty->termios);
tty->termios.c_ospeed = tty_termios_baud_rate(&tty->termios);
- mutex_unlock(&tty->termios_mutex);
+ up_write(&tty->termios_rwsem);
}
@@ -765,7 +621,7 @@ static void tty_reset_termios(struct tty_struct *tty)
static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
- struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+ struct tty_ldisc *ld = tty_ldisc_get(tty, ldisc);
if (IS_ERR(ld))
return -1;
@@ -804,14 +660,8 @@ void tty_ldisc_hangup(struct tty_struct *tty)
tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
- /*
- * FIXME! What are the locking issues here? This may me overdoing
- * things... This question is especially important now that we've
- * removed the irqlock.
- */
ld = tty_ldisc_ref(tty);
if (ld != NULL) {
- /* We may have no line discipline at this point */
if (ld->ops->flush_buffer)
ld->ops->flush_buffer(tty);
tty_driver_flush_buffer(tty);
@@ -822,21 +672,22 @@ void tty_ldisc_hangup(struct tty_struct *tty)
ld->ops->hangup(tty);
tty_ldisc_deref(ld);
}
- /*
- * FIXME: Once we trust the LDISC code better we can wait here for
- * ldisc completion and fix the driver call race
- */
+
wake_up_interruptible_poll(&tty->write_wait, POLLOUT);
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
+
+ tty_unlock(tty);
+
/*
* Shutdown the current line discipline, and reset it to
* N_TTY if need be.
*
* Avoid racing set_ldisc or tty_ldisc_release
*/
- mutex_lock(&tty->ldisc_mutex);
+ tty_ldisc_lock_pair(tty, tty->link);
+ tty_lock(tty);
- if (tty_ldisc_hangup_halt(tty)) {
+ if (tty->ldisc) {
/* At this point we have a halted ldisc; we want to close it and
reopen a new ldisc. We could defer the reopen to the next
@@ -855,9 +706,8 @@ void tty_ldisc_hangup(struct tty_struct *tty)
BUG_ON(tty_ldisc_reinit(tty, N_TTY));
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
}
- tty_ldisc_enable(tty);
}
- mutex_unlock(&tty->ldisc_mutex);
+ tty_ldisc_enable_pair(tty, tty->link);
if (reset)
tty_reset_termios(tty);
@@ -889,15 +739,12 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_close(tty, ld);
return retval;
}
- tty_ldisc_enable(o_tty);
}
- tty_ldisc_enable(tty);
return 0;
}
static void tty_ldisc_kill(struct tty_struct *tty)
{
- mutex_lock(&tty->ldisc_mutex);
/*
* Now kill off the ldisc
*/
@@ -908,7 +755,6 @@ static void tty_ldisc_kill(struct tty_struct *tty)
/* Ensure the next open requests the N_TTY ldisc */
tty_set_termios_ldisc(tty, N_TTY);
- mutex_unlock(&tty->ldisc_mutex);
}
/**
@@ -930,15 +776,16 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
tty_ldisc_debug(tty, "closing ldisc: %p\n", tty->ldisc);
- tty_ldisc_halt(tty, o_tty, MAX_SCHEDULE_TIMEOUT);
-
+ tty_ldisc_lock_pair(tty, o_tty);
tty_lock_pair(tty, o_tty);
- /* This will need doing differently if we need to lock */
+
tty_ldisc_kill(tty);
if (o_tty)
tty_ldisc_kill(o_tty);
tty_unlock_pair(tty, o_tty);
+ tty_ldisc_unlock_pair(tty, o_tty);
+
/* And the memory resources remaining (buffers, termios) will be
disposed of when the kref hits zero */
@@ -955,7 +802,7 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
void tty_ldisc_init(struct tty_struct *tty)
{
- struct tty_ldisc *ld = tty_ldisc_get(N_TTY);
+ struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
panic("n_tty: init_tty");
tty->ldisc = ld;
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 121aeb9393e..f597e88a705 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -256,10 +256,9 @@ void tty_port_tty_hangup(struct tty_port *port, bool check_clocal)
{
struct tty_struct *tty = tty_port_tty_get(port);
- if (tty && (!check_clocal || !C_CLOCAL(tty))) {
+ if (tty && (!check_clocal || !C_CLOCAL(tty)))
tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_kref_put(tty);
}
EXPORT_SYMBOL_GPL(tty_port_tty_hangup);
diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
index a9af1b9ae16..d0e3a449770 100644
--- a/drivers/tty/vt/keyboard.c
+++ b/drivers/tty/vt/keyboard.c
@@ -132,12 +132,6 @@ static int shift_state = 0;
static unsigned char ledstate = 0xff; /* undefined */
static unsigned char ledioctl;
-static struct ledptr {
- unsigned int *addr;
- unsigned int mask;
- unsigned char valid:1;
-} ledptrs[3];
-
/*
* Notifier list for console keyboard events
*/
@@ -994,24 +988,11 @@ void setledstate(struct kbd_struct *kbd, unsigned int led)
static inline unsigned char getleds(void)
{
struct kbd_struct *kbd = kbd_table + fg_console;
- unsigned char leds;
- int i;
if (kbd->ledmode == LED_SHOW_IOCTL)
return ledioctl;
- leds = kbd->ledflagstate;
-
- if (kbd->ledmode == LED_SHOW_MEM) {
- for (i = 0; i < 3; i++)
- if (ledptrs[i].valid) {
- if (*ledptrs[i].addr & ledptrs[i].mask)
- leds |= (1 << i);
- else
- leds &= ~(1 << i);
- }
- }
- return leds;
+ return kbd->ledflagstate;
}
static int kbd_update_leds_helper(struct input_handle *handle, void *data)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 60b7b692605..ea27804d87a 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -24,6 +24,7 @@
#include <linux/selection.h>
#include <linux/tiocl.h>
#include <linux/console.h>
+#include <linux/tty_flip.h>
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define isspace(c) ((c) == ' ')
@@ -346,8 +347,8 @@ int paste_selection(struct tty_struct *tty)
console_unlock();
ld = tty_ldisc_ref_wait(tty);
+ tty_buffer_lock_exclusive(&vc->port);
- /* FIXME: this is completely unsafe */
add_wait_queue(&vc->paste_wait, &wait);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
@@ -356,13 +357,14 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
- count = min(count, tty->receive_room);
- ld->ops->receive_buf(tty, sel_buffer + pasted, NULL, count);
+ count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
+ count);
pasted += count;
}
remove_wait_queue(&vc->paste_wait, &wait);
__set_current_state(TASK_RUNNING);
+ tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
return 0;
}
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index c677829baa8..9a8e8c5a0c7 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -828,7 +828,7 @@ static inline int resize_screen(struct vc_data *vc, int width, int height,
* If the caller passes a tty structure then update the termios winsize
* information and perform any necessary signal handling.
*
- * Caller must hold the console semaphore. Takes the termios mutex and
+ * Caller must hold the console semaphore. Takes the termios rwsem and
* ctrl_lock of the tty IFF a tty is passed.
*/
@@ -972,7 +972,7 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int rows)
* the actual work.
*
* Takes the console sem and the called methods then take the tty
- * termios_mutex and the tty ctrl_lock in that order.
+ * termios_rwsem and the tty ctrl_lock in that order.
*/
static int vt_resize(struct tty_struct *tty, struct winsize *ws)
{
@@ -2809,8 +2809,10 @@ static void con_shutdown(struct tty_struct *tty)
console_unlock();
}
+static int default_color = 7; /* white */
static int default_italic_color = 2; // green (ASCII)
static int default_underline_color = 3; // cyan (ASCII)
+module_param_named(color, default_color, int, S_IRUGO | S_IWUSR);
module_param_named(italic, default_italic_color, int, S_IRUGO | S_IWUSR);
module_param_named(underline, default_underline_color, int, S_IRUGO | S_IWUSR);
@@ -2832,7 +2834,7 @@ static void vc_init(struct vc_data *vc, unsigned int rows,
vc->vc_palette[k++] = default_grn[j] ;
vc->vc_palette[k++] = default_blu[j] ;
}
- vc->vc_def_color = 0x07; /* white */
+ vc->vc_def_color = default_color;
vc->vc_ulcolor = default_underline_color;
vc->vc_itcolor = default_italic_color;
vc->vc_halfcolor = 0x08; /* grey */
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index 5295be0342c..5a90914d856 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -1,5 +1,6 @@
menuconfig UIO
tristate "Userspace I/O drivers"
+ depends on MMU
help
Enable this to allow the userspace driver core code to be
built. This code allows userspace programs easy access to
@@ -23,13 +24,6 @@ config UIO_CIF
To compile this driver as a module, choose M here: the module
will be called uio_cif.
-config UIO_PDRV
- tristate "Userspace I/O platform driver"
- help
- Generic platform driver for Userspace I/O devices.
-
- If you don't know what to do here, say N.
-
config UIO_PDRV_GENIRQ
tristate "Userspace I/O platform driver with generic IRQ handling"
help
@@ -128,4 +122,17 @@ config UIO_PRUSS
To compile this driver as a module, choose M here: the module
will be called uio_pruss.
+config UIO_MF624
+ tristate "Humusoft MF624 DAQ PCI card driver"
+ depends on PCI
+ help
+ Userspace I/O interface for the Humusoft MF624 PCI card.
+ A sample userspace application using this driver is available
+ (among other MF624 related information and software components)
+ for download in a git repository:
+
+ git clone git://rtime.felk.cvut.cz/mf6xx.git
+
+ If you compile this as a module, it will be called uio_mf624.
+
endif
diff --git a/drivers/uio/Makefile b/drivers/uio/Makefile
index b354c539507..d3218bde3ae 100644
--- a/drivers/uio/Makefile
+++ b/drivers/uio/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_UIO) += uio.o
obj-$(CONFIG_UIO_CIF) += uio_cif.o
-obj-$(CONFIG_UIO_PDRV) += uio_pdrv.o
obj-$(CONFIG_UIO_PDRV_GENIRQ) += uio_pdrv_genirq.o
obj-$(CONFIG_UIO_DMEM_GENIRQ) += uio_dmem_genirq.o
obj-$(CONFIG_UIO_AEC) += uio_aec.o
@@ -8,3 +7,4 @@ obj-$(CONFIG_UIO_SERCOS3) += uio_sercos3.o
obj-$(CONFIG_UIO_PCI_GENERIC) += uio_pci_generic.o
obj-$(CONFIG_UIO_NETX) += uio_netx.o
obj-$(CONFIG_UIO_PRUSS) += uio_pruss.o
+obj-$(CONFIG_UIO_MF624) += uio_mf624.o
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 3b96f18593b..ba475632c5f 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -35,7 +35,6 @@ struct uio_device {
atomic_t event;
struct fasync_struct *async_queue;
wait_queue_head_t wait;
- int vma_count;
struct uio_info *info;
struct kobject *map_dir;
struct kobject *portio_dir;
@@ -224,38 +223,42 @@ static struct kobj_type portio_attr_type = {
.default_attrs = portio_attrs,
};
-static ssize_t show_name(struct device *dev,
+static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", idev->info->name);
}
+static DEVICE_ATTR_RO(name);
-static ssize_t show_version(struct device *dev,
+static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", idev->info->version);
}
+static DEVICE_ATTR_RO(version);
-static ssize_t show_event(struct device *dev,
+static ssize_t event_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
}
+static DEVICE_ATTR_RO(event);
-static struct device_attribute uio_class_attributes[] = {
- __ATTR(name, S_IRUGO, show_name, NULL),
- __ATTR(version, S_IRUGO, show_version, NULL),
- __ATTR(event, S_IRUGO, show_event, NULL),
- {}
+static struct attribute *uio_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_version.attr,
+ &dev_attr_event.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(uio);
/* UIO class infrastructure */
static struct class uio_class = {
.name = "uio",
- .dev_attrs = uio_class_attributes,
+ .dev_groups = uio_groups,
};
/*
@@ -593,18 +596,6 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
return -1;
}
-static void uio_vma_open(struct vm_area_struct *vma)
-{
- struct uio_device *idev = vma->vm_private_data;
- idev->vma_count++;
-}
-
-static void uio_vma_close(struct vm_area_struct *vma)
-{
- struct uio_device *idev = vma->vm_private_data;
- idev->vma_count--;
-}
-
static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct uio_device *idev = vma->vm_private_data;
@@ -630,12 +621,23 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return 0;
}
-static const struct vm_operations_struct uio_vm_ops = {
- .open = uio_vma_open,
- .close = uio_vma_close,
+static const struct vm_operations_struct uio_logical_vm_ops = {
.fault = uio_vma_fault,
};
+static int uio_mmap_logical(struct vm_area_struct *vma)
+{
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &uio_logical_vm_ops;
+ return 0;
+}
+
+static const struct vm_operations_struct uio_physical_vm_ops = {
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+ .access = generic_access_phys,
+#endif
+};
+
static int uio_mmap_physical(struct vm_area_struct *vma)
{
struct uio_device *idev = vma->vm_private_data;
@@ -643,6 +645,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
if (mi < 0)
return -EINVAL;
+ vma->vm_ops = &uio_physical_vm_ops;
+
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return remap_pfn_range(vma,
@@ -652,14 +656,6 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
vma->vm_page_prot);
}
-static int uio_mmap_logical(struct vm_area_struct *vma)
-{
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_ops = &uio_vm_ops;
- uio_vma_open(vma);
- return 0;
-}
-
static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
{
struct uio_listener *listener = filep->private_data;
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 125d0e5a688..1270f3b2613 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -146,7 +146,7 @@ static int uio_dmem_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
static int uio_dmem_genirq_probe(struct platform_device *pdev)
{
- struct uio_dmem_genirq_pdata *pdata = pdev->dev.platform_data;
+ struct uio_dmem_genirq_pdata *pdata = dev_get_platdata(&pdev->dev);
struct uio_info *uioinfo = &pdata->uioinfo;
struct uio_dmem_genirq_platdata *priv;
struct uio_mem *uiomem;
diff --git a/drivers/uio/uio_mf624.c b/drivers/uio/uio_mf624.c
new file mode 100644
index 00000000000..a1768b2f449
--- /dev/null
+++ b/drivers/uio/uio_mf624.c
@@ -0,0 +1,247 @@
+/*
+ * UIO driver fo Humusoft MF624 DAQ card.
+ * Copyright (C) 2011 Rostislav Lisovy <lisovy@gmail.com>,
+ * Czech Technical University in Prague
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/uio_driver.h>
+
+#define PCI_VENDOR_ID_HUMUSOFT 0x186c
+#define PCI_DEVICE_ID_MF624 0x0624
+#define PCI_SUBVENDOR_ID_HUMUSOFT 0x186c
+#define PCI_SUBDEVICE_DEVICE 0x0624
+
+/* BAR0 Interrupt control/status register */
+#define INTCSR 0x4C
+#define INTCSR_ADINT_ENABLE (1 << 0)
+#define INTCSR_CTR4INT_ENABLE (1 << 3)
+#define INTCSR_PCIINT_ENABLE (1 << 6)
+#define INTCSR_ADINT_STATUS (1 << 2)
+#define INTCSR_CTR4INT_STATUS (1 << 5)
+
+enum mf624_interrupt_source {ADC, CTR4, ALL};
+
+void mf624_disable_interrupt(enum mf624_interrupt_source source,
+ struct uio_info *info)
+{
+ void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
+
+ switch (source) {
+ case ADC:
+ iowrite32(ioread32(INTCSR_reg)
+ & ~(INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE),
+ INTCSR_reg);
+ break;
+
+ case CTR4:
+ iowrite32(ioread32(INTCSR_reg)
+ & ~(INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE),
+ INTCSR_reg);
+ break;
+
+ case ALL:
+ default:
+ iowrite32(ioread32(INTCSR_reg)
+ & ~(INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE
+ | INTCSR_PCIINT_ENABLE),
+ INTCSR_reg);
+ break;
+ }
+}
+
+void mf624_enable_interrupt(enum mf624_interrupt_source source,
+ struct uio_info *info)
+{
+ void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
+
+ switch (source) {
+ case ADC:
+ iowrite32(ioread32(INTCSR_reg)
+ | INTCSR_ADINT_ENABLE | INTCSR_PCIINT_ENABLE,
+ INTCSR_reg);
+ break;
+
+ case CTR4:
+ iowrite32(ioread32(INTCSR_reg)
+ | INTCSR_CTR4INT_ENABLE | INTCSR_PCIINT_ENABLE,
+ INTCSR_reg);
+ break;
+
+ case ALL:
+ default:
+ iowrite32(ioread32(INTCSR_reg)
+ | INTCSR_ADINT_ENABLE | INTCSR_CTR4INT_ENABLE
+ | INTCSR_PCIINT_ENABLE,
+ INTCSR_reg);
+ break;
+ }
+}
+
+static irqreturn_t mf624_irq_handler(int irq, struct uio_info *info)
+{
+ void __iomem *INTCSR_reg = info->mem[0].internal_addr + INTCSR;
+
+ if ((ioread32(INTCSR_reg) & INTCSR_ADINT_ENABLE)
+ && (ioread32(INTCSR_reg) & INTCSR_ADINT_STATUS)) {
+ mf624_disable_interrupt(ADC, info);
+ return IRQ_HANDLED;
+ }
+
+ if ((ioread32(INTCSR_reg) & INTCSR_CTR4INT_ENABLE)
+ && (ioread32(INTCSR_reg) & INTCSR_CTR4INT_STATUS)) {
+ mf624_disable_interrupt(CTR4, info);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int mf624_irqcontrol(struct uio_info *info, s32 irq_on)
+{
+ if (irq_on == 0)
+ mf624_disable_interrupt(ALL, info);
+ else if (irq_on == 1)
+ mf624_enable_interrupt(ALL, info);
+
+ return 0;
+}
+
+static int mf624_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct uio_info *info;
+
+ info = kzalloc(sizeof(struct uio_info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ if (pci_enable_device(dev))
+ goto out_free;
+
+ if (pci_request_regions(dev, "mf624"))
+ goto out_disable;
+
+ info->name = "mf624";
+ info->version = "0.0.1";
+
+ /* Note: Datasheet says device uses BAR0, BAR1, BAR2 -- do not trust it */
+
+ /* BAR0 */
+ info->mem[0].name = "PCI chipset, interrupts, status "
+ "bits, special functions";
+ info->mem[0].addr = pci_resource_start(dev, 0);
+ if (!info->mem[0].addr)
+ goto out_release;
+ info->mem[0].size = pci_resource_len(dev, 0);
+ info->mem[0].memtype = UIO_MEM_PHYS;
+ info->mem[0].internal_addr = pci_ioremap_bar(dev, 0);
+ if (!info->mem[0].internal_addr)
+ goto out_release;
+
+ /* BAR2 */
+ info->mem[1].name = "ADC, DAC, DIO";
+ info->mem[1].addr = pci_resource_start(dev, 2);
+ if (!info->mem[1].addr)
+ goto out_unmap0;
+ info->mem[1].size = pci_resource_len(dev, 2);
+ info->mem[1].memtype = UIO_MEM_PHYS;
+ info->mem[1].internal_addr = pci_ioremap_bar(dev, 2);
+ if (!info->mem[1].internal_addr)
+ goto out_unmap0;
+
+ /* BAR4 */
+ info->mem[2].name = "Counter/timer chip";
+ info->mem[2].addr = pci_resource_start(dev, 4);
+ if (!info->mem[2].addr)
+ goto out_unmap1;
+ info->mem[2].size = pci_resource_len(dev, 4);
+ info->mem[2].memtype = UIO_MEM_PHYS;
+ info->mem[2].internal_addr = pci_ioremap_bar(dev, 4);
+ if (!info->mem[2].internal_addr)
+ goto out_unmap1;
+
+ info->irq = dev->irq;
+ info->irq_flags = IRQF_SHARED;
+ info->handler = mf624_irq_handler;
+
+ info->irqcontrol = mf624_irqcontrol;
+
+ if (uio_register_device(&dev->dev, info))
+ goto out_unmap2;
+
+ pci_set_drvdata(dev, info);
+
+ return 0;
+
+out_unmap2:
+ iounmap(info->mem[2].internal_addr);
+out_unmap1:
+ iounmap(info->mem[1].internal_addr);
+out_unmap0:
+ iounmap(info->mem[0].internal_addr);
+
+out_release:
+ pci_release_regions(dev);
+
+out_disable:
+ pci_disable_device(dev);
+
+out_free:
+ kfree(info);
+ return -ENODEV;
+}
+
+static void mf624_pci_remove(struct pci_dev *dev)
+{
+ struct uio_info *info = pci_get_drvdata(dev);
+
+ mf624_disable_interrupt(ALL, info);
+
+ uio_unregister_device(info);
+ pci_release_regions(dev);
+ pci_disable_device(dev);
+ pci_set_drvdata(dev, NULL);
+
+ iounmap(info->mem[0].internal_addr);
+ iounmap(info->mem[1].internal_addr);
+ iounmap(info->mem[2].internal_addr);
+
+ kfree(info);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(mf624_pci_id) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_HUMUSOFT, PCI_DEVICE_ID_MF624) },
+ { 0, }
+};
+
+static struct pci_driver mf624_pci_driver = {
+ .name = "mf624",
+ .id_table = mf624_pci_id,
+ .probe = mf624_pci_probe,
+ .remove = mf624_pci_remove,
+};
+MODULE_DEVICE_TABLE(pci, mf624_pci_id);
+
+module_pci_driver(mf624_pci_driver);
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Rostislav Lisovy <lisovy@gmail.com>");
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
deleted file mode 100644
index 39be9e06170..00000000000
--- a/drivers/uio/uio_pdrv.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * drivers/uio/uio_pdrv.c
- *
- * Copyright (C) 2008 by Digi International Inc.
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-#include <linux/platform_device.h>
-#include <linux/uio_driver.h>
-#include <linux/stringify.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-#define DRIVER_NAME "uio_pdrv"
-
-struct uio_platdata {
- struct uio_info *uioinfo;
-};
-
-static int uio_pdrv_probe(struct platform_device *pdev)
-{
- struct uio_info *uioinfo = pdev->dev.platform_data;
- struct uio_platdata *pdata;
- struct uio_mem *uiomem;
- int ret = -ENODEV;
- int i;
-
- if (!uioinfo || !uioinfo->name || !uioinfo->version) {
- dev_dbg(&pdev->dev, "%s: err_uioinfo\n", __func__);
- goto err_uioinfo;
- }
-
- pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- ret = -ENOMEM;
- dev_dbg(&pdev->dev, "%s: err_alloc_pdata\n", __func__);
- goto err_alloc_pdata;
- }
-
- pdata->uioinfo = uioinfo;
-
- uiomem = &uioinfo->mem[0];
-
- for (i = 0; i < pdev->num_resources; ++i) {
- struct resource *r = &pdev->resource[i];
-
- if (r->flags != IORESOURCE_MEM)
- continue;
-
- if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) {
- dev_warn(&pdev->dev, "device has more than "
- __stringify(MAX_UIO_MAPS)
- " I/O memory resources.\n");
- break;
- }
-
- uiomem->memtype = UIO_MEM_PHYS;
- uiomem->addr = r->start;
- uiomem->size = resource_size(r);
- uiomem->name = r->name;
- ++uiomem;
- }
-
- while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) {
- uiomem->size = 0;
- ++uiomem;
- }
-
- pdata->uioinfo->priv = pdata;
-
- ret = uio_register_device(&pdev->dev, pdata->uioinfo);
-
- if (ret) {
- kfree(pdata);
-err_alloc_pdata:
-err_uioinfo:
- return ret;
- }
-
- platform_set_drvdata(pdev, pdata);
-
- return 0;
-}
-
-static int uio_pdrv_remove(struct platform_device *pdev)
-{
- struct uio_platdata *pdata = platform_get_drvdata(pdev);
-
- uio_unregister_device(pdata->uioinfo);
-
- kfree(pdata);
-
- return 0;
-}
-
-static struct platform_driver uio_pdrv = {
- .probe = uio_pdrv_probe,
- .remove = uio_pdrv_remove,
- .driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
- },
-};
-
-module_platform_driver(uio_pdrv);
-
-MODULE_AUTHOR("Uwe Kleine-Koenig");
-MODULE_DESCRIPTION("Userspace I/O platform driver");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 4eb8eaf71be..90ff17a0202 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -104,7 +104,7 @@ static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on)
static int uio_pdrv_genirq_probe(struct platform_device *pdev)
{
- struct uio_info *uioinfo = pdev->dev.platform_data;
+ struct uio_info *uioinfo = dev_get_platdata(&pdev->dev);
struct uio_pdrv_genirq_platdata *priv;
struct uio_mem *uiomem;
int ret = -EINVAL;
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index 21f7a72301e..f519da9034b 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -121,7 +121,7 @@ static int pruss_probe(struct platform_device *dev)
struct uio_pruss_dev *gdev;
struct resource *regs_prussio;
int ret = -ENODEV, cnt = 0, len;
- struct uio_pruss_pdata *pdata = dev->dev.platform_data;
+ struct uio_pruss_pdata *pdata = dev_get_platdata(&dev->dev);
gdev = kzalloc(sizeof(struct uio_pruss_dev), GFP_KERNEL);
if (!gdev)
@@ -224,7 +224,6 @@ static int pruss_remove(struct platform_device *dev)
struct uio_pruss_dev *gdev = platform_get_drvdata(dev);
pruss_cleanup(dev, gdev);
- platform_set_drvdata(dev, NULL);
return 0;
}
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 73f62caa860..2642b8a11e0 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -6,9 +6,26 @@
config USB_ARCH_HAS_OHCI
bool
+config USB_OHCI_BIG_ENDIAN_DESC
+ bool
+
+config USB_OHCI_BIG_ENDIAN_MMIO
+ bool
+
+config USB_OHCI_LITTLE_ENDIAN
+ bool
+ default n if STB03xxx || PPC_MPC52xx
+ default y
+
config USB_ARCH_HAS_EHCI
bool
+config USB_EHCI_BIG_ENDIAN_MMIO
+ bool
+
+config USB_EHCI_BIG_ENDIAN_DESC
+ bool
+
config USB_ARCH_HAS_XHCI
bool
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 238c5d47cad..70d7c5b92c3 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_USB_ISP1760_HCD) += host/
obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_FSL_MPH_DR_OF) += host/
obj-$(CONFIG_USB_FUSBH200_HCD) += host/
+obj-$(CONFIG_USB_FOTG210_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
@@ -45,7 +46,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
-obj-$(CONFIG_USB_PHY) += phy/
+obj-$(CONFIG_USB_SUPPORT) += phy/
obj-$(CONFIG_EARLY_PRINTK_DBGP) += early/
obj-$(CONFIG_USB_ATM) += atm/
diff --git a/drivers/usb/atm/Makefile b/drivers/usb/atm/Makefile
index a5d792ec3ad..ac278946b06 100644
--- a/drivers/usb/atm/Makefile
+++ b/drivers/usb/atm/Makefile
@@ -1,9 +1,6 @@
#
# Makefile for USB ATM/xDSL drivers
#
-
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
obj-$(CONFIG_USB_CXACRU) += cxacru.o
obj-$(CONFIG_USB_SPEEDTOUCH) += speedtch.o
obj-$(CONFIG_USB_UEAGLEATM) += ueagle-atm.o
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 807627b36cc..69461d65397 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -888,7 +888,7 @@ static int speedtch_bind(struct usbatm_data *usbatm,
usb_fill_int_urb(instance->int_urb, usb_dev,
usb_rcvintpipe(usb_dev, ENDPOINT_INT),
instance->int_data, sizeof(instance->int_data),
- speedtch_handle_int, instance, 50);
+ speedtch_handle_int, instance, 16);
else
usb_dbg(usbatm, "%s: no memory for interrupt urb!\n", __func__);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 5e0d33a7da5..25a7bfcf666 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -311,8 +311,6 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
int vci = ((source[1] & 0x0f) << 12) | (source[2] << 4) | (source[3] >> 4);
u8 pti = ((source[3] & 0xe) >> 1);
- vdbg(&instance->usb_intf->dev, "%s: vpi %hd, vci %d, pti %d", __func__, vpi, vci, pti);
-
if ((vci != instance->cached_vci) || (vpi != instance->cached_vpi)) {
instance->cached_vpi = vpi;
instance->cached_vci = vci;
@@ -344,7 +342,6 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
__func__, sarb->len, vcc);
/* discard cells already received */
skb_trim(sarb, 0);
- UDSL_ASSERT(instance, sarb->tail + ATM_CELL_PAYLOAD <= sarb->end);
}
memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD);
@@ -437,8 +434,6 @@ static void usbatm_extract_cells(struct usbatm_data *instance,
unsigned char *cell_buf = instance->cell_buf;
unsigned int space_left = stride - buf_usage;
- UDSL_ASSERT(instance, buf_usage <= stride);
-
if (avail_data >= space_left) {
/* add new data and process cell */
memcpy(cell_buf + buf_usage, source, space_left);
@@ -479,10 +474,6 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance,
unsigned int bytes_written;
unsigned int stride = instance->tx_channel.stride;
- vdbg(&instance->usb_intf->dev, "%s: skb->len=%d, avail_space=%u",
- __func__, skb->len, avail_space);
- UDSL_ASSERT(instance, !(avail_space % stride));
-
for (bytes_written = 0; bytes_written < avail_space && ctrl->len;
bytes_written += stride, target += stride) {
unsigned int data_len = min_t(unsigned int, skb->len, ATM_CELL_PAYLOAD);
@@ -553,8 +544,6 @@ static void usbatm_rx_process(unsigned long data)
if (!urb->iso_frame_desc[i].status) {
unsigned int actual_length = urb->iso_frame_desc[i].actual_length;
- UDSL_ASSERT(instance, actual_length <= packet_size);
-
if (!merge_length)
merge_start = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
merge_length += actual_length;
@@ -645,7 +634,6 @@ static void usbatm_cancel_send(struct usbatm_data *instance,
{
struct sk_buff *skb, *n;
- atm_dbg(instance, "%s entered\n", __func__);
spin_lock_irq(&instance->sndqueue.lock);
skb_queue_walk_safe(&instance->sndqueue, skb, n) {
if (UDSL_SKB(skb)->atm.vcc == vcc) {
@@ -663,7 +651,6 @@ static void usbatm_cancel_send(struct usbatm_data *instance,
usbatm_pop(vcc, skb);
}
tasklet_enable(&instance->tx_channel.tasklet);
- atm_dbg(instance, "%s done\n", __func__);
}
static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
@@ -674,16 +661,13 @@ static int usbatm_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
/* racy disconnection check - fine */
if (!instance || instance->disconnected) {
-#ifdef DEBUG
+#ifdef VERBOSE_DEBUG
printk_ratelimited(KERN_DEBUG "%s: %s!\n", __func__, instance ? "disconnected" : "NULL instance");
#endif
err = -ENODEV;
goto fail;
}
- vdbg(&instance->usb_intf->dev, "%s called (skb 0x%p, len %u)", __func__,
- skb, skb->len);
-
if (vcc->qos.aal != ATM_AAL5) {
atm_rldbg(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal);
err = -EINVAL;
@@ -723,8 +707,6 @@ static void usbatm_destroy_instance(struct kref *kref)
{
struct usbatm_data *instance = container_of(kref, struct usbatm_data, refcount);
- usb_dbg(instance, "%s\n", __func__);
-
tasklet_kill(&instance->rx_channel.tasklet);
tasklet_kill(&instance->tx_channel.tasklet);
usb_put_dev(instance->usb_dev);
@@ -733,15 +715,11 @@ static void usbatm_destroy_instance(struct kref *kref)
static void usbatm_get_instance(struct usbatm_data *instance)
{
- usb_dbg(instance, "%s\n", __func__);
-
kref_get(&instance->refcount);
}
static void usbatm_put_instance(struct usbatm_data *instance)
{
- usb_dbg(instance, "%s\n", __func__);
-
kref_put(&instance->refcount, usbatm_destroy_instance);
}
@@ -757,7 +735,6 @@ static void usbatm_atm_dev_close(struct atm_dev *atm_dev)
if (!instance)
return;
- usb_dbg(instance, "%s\n", __func__);
atm_dev->dev_data = NULL; /* catch bugs */
usbatm_put_instance(instance); /* taken in usbatm_atm_init */
}
@@ -813,8 +790,6 @@ static int usbatm_atm_open(struct atm_vcc *vcc)
if (!instance)
return -ENODEV;
- atm_dbg(instance, "%s: vpi %hd, vci %d\n", __func__, vpi, vci);
-
/* only support AAL5 */
if ((vcc->qos.aal != ATM_AAL5)) {
atm_warn(instance, "%s: unsupported ATM type %d!\n", __func__, vcc->qos.aal);
@@ -891,11 +866,6 @@ static void usbatm_atm_close(struct atm_vcc *vcc)
if (!instance || !vcc_data)
return;
- atm_dbg(instance, "%s entered\n", __func__);
-
- atm_dbg(instance, "%s: deallocating vcc 0x%p with vpi %d vci %d\n",
- __func__, vcc_data, vcc_data->vpi, vcc_data->vci);
-
usbatm_cancel_send(instance, vcc);
mutex_lock(&instance->serialize); /* vs self, usbatm_atm_open, usbatm_usb_disconnect */
@@ -922,8 +892,6 @@ static void usbatm_atm_close(struct atm_vcc *vcc)
clear_bit(ATM_VF_ADDR, &vcc->flags);
mutex_unlock(&instance->serialize);
-
- atm_dbg(instance, "%s successful\n", __func__);
}
static int usbatm_atm_ioctl(struct atm_dev *atm_dev, unsigned int cmd,
@@ -1060,12 +1028,6 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
int i, length;
unsigned int maxpacket, num_packets;
- dev_dbg(dev, "%s: trying driver %s with vendor=%04x, product=%04x, ifnum %2d\n",
- __func__, driver->driver_name,
- le16_to_cpu(usb_dev->descriptor.idVendor),
- le16_to_cpu(usb_dev->descriptor.idProduct),
- intf->altsetting->desc.bInterfaceNumber);
-
/* instance init */
instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL);
if (!instance) {
@@ -1158,14 +1120,13 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
instance->rx_channel.buf_size = num_packets * maxpacket;
instance->rx_channel.packet_size = maxpacket;
-#ifdef DEBUG
for (i = 0; i < 2; i++) {
struct usbatm_channel *channel = i ?
&instance->tx_channel : &instance->rx_channel;
- dev_dbg(dev, "%s: using %d byte buffer for %s channel 0x%p\n", __func__, channel->buf_size, i ? "tx" : "rx", channel);
+ dev_dbg(dev, "%s: using %d byte buffer for %s channel 0x%p\n",
+ __func__, channel->buf_size, i ? "tx" : "rx", channel);
}
-#endif
/* initialize urbs */
@@ -1176,8 +1137,6 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
struct urb *urb;
unsigned int iso_packets = usb_pipeisoc(channel->endpoint) ? channel->buf_size / channel->packet_size : 0;
- UDSL_ASSERT(instance, !usb_pipeisoc(channel->endpoint) || usb_pipein(channel->endpoint));
-
urb = usb_alloc_urb(iso_packets, GFP_KERNEL);
if (!urb) {
dev_err(dev, "%s: no memory for urb %d!\n", __func__, i);
@@ -1266,8 +1225,6 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
struct usbatm_vcc_data *vcc_data;
int i;
- dev_dbg(dev, "%s entered\n", __func__);
-
if (!instance) {
dev_dbg(dev, "%s: NULL instance!\n", __func__);
return;
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index 5fc48940521..5651231a743 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -39,31 +39,14 @@
#define VERBOSE_DEBUG
*/
-#ifdef DEBUG
-#define UDSL_ASSERT(instance, x) BUG_ON(!(x))
-#else
-#define UDSL_ASSERT(instance, x) \
- do { \
- if (!(x)) \
- dev_warn(&(instance)->usb_intf->dev, \
- "failed assertion '%s' at line %d", \
- __stringify(x), __LINE__); \
- } while (0)
-#endif
-
#define usb_err(instance, format, arg...) \
dev_err(&(instance)->usb_intf->dev , format , ## arg)
#define usb_info(instance, format, arg...) \
dev_info(&(instance)->usb_intf->dev , format , ## arg)
#define usb_warn(instance, format, arg...) \
dev_warn(&(instance)->usb_intf->dev , format , ## arg)
-#ifdef DEBUG
-#define usb_dbg(instance, format, arg...) \
- dev_printk(KERN_DEBUG , &(instance)->usb_intf->dev , format , ## arg)
-#else
#define usb_dbg(instance, format, arg...) \
- do {} while (0)
-#endif
+ dev_dbg(&(instance)->usb_intf->dev , format , ## arg)
/* FIXME: move to dev_* once ATM is driver model aware */
#define atm_printk(level, instance, format, arg...) \
@@ -76,18 +59,12 @@
atm_printk(KERN_INFO, instance , format , ## arg)
#define atm_warn(instance, format, arg...) \
atm_printk(KERN_WARNING, instance , format , ## arg)
-#ifdef DEBUG
-#define atm_dbg(instance, format, arg...) \
- atm_printk(KERN_DEBUG, instance , format , ## arg)
-#define atm_rldbg(instance, format, arg...) \
+#define atm_dbg(instance, format, arg...) \
+ dynamic_pr_debug("ATM dev %d: " format , \
+ (instance)->atm_dev->number , ## arg)
+#define atm_rldbg(instance, format, arg...) \
if (printk_ratelimit()) \
- atm_printk(KERN_DEBUG, instance , format , ## arg)
-#else
-#define atm_dbg(instance, format, arg...) \
- do {} while (0)
-#define atm_rldbg(instance, format, arg...) \
- do {} while (0)
-#endif
+ atm_dbg(instance , format , ## arg)
/* flags, set by mini-driver in bind() */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index fe815ecd557..8db3380c332 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -131,7 +131,7 @@ static int c67x00_drv_probe(struct platform_device *pdev)
if (!res2)
return -ENODEV;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata)
return -ENODEV;
@@ -154,7 +154,7 @@ static int c67x00_drv_probe(struct platform_device *pdev)
spin_lock_init(&c67x00->hpi.lock);
c67x00->hpi.regstep = pdata->hpi_regstep;
- c67x00->pdata = pdev->dev.platform_data;
+ c67x00->pdata = dev_get_platdata(&pdev->dev);
c67x00->pdev = pdev;
c67x00_ll_init(c67x00);
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index eb2aa2e5a84..4a851e15e58 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -1,6 +1,6 @@
config USB_CHIPIDEA
tristate "ChipIdea Highspeed Dual Role Controller"
- depends on USB || USB_GADGET
+ depends on (USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. Currently, only the
@@ -12,15 +12,14 @@ if USB_CHIPIDEA
config USB_CHIPIDEA_UDC
bool "ChipIdea device controller"
- depends on USB_GADGET=y || USB_CHIPIDEA=m
+ depends on USB_GADGET
help
Say Y here to enable device controller functionality of the
ChipIdea driver.
config USB_CHIPIDEA_HOST
bool "ChipIdea host controller"
- depends on USB=y
- depends on USB_EHCI_HCD=y || USB_CHIPIDEA=m
+ depends on USB_EHCI_HCD
select USB_EHCI_ROOT_HUB_TT
help
Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index 6cf5f68dedd..a99d980454a 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -2,7 +2,7 @@ ccflags-$(CONFIG_USB_CHIPIDEA_DEBUG) := -DDEBUG
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
-ci_hdrc-y := core.o
+ci_hdrc-y := core.o otg.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_DEBUG) += debug.o
diff --git a/drivers/usb/chipidea/bits.h b/drivers/usb/chipidea/bits.h
index aefa0261220..464584c6cca 100644
--- a/drivers/usb/chipidea/bits.h
+++ b/drivers/usb/chipidea/bits.h
@@ -50,7 +50,7 @@
#define PORTSC_PTC (0x0FUL << 16)
/* PTS and PTW for non lpm version only */
#define PORTSC_PTS(d) \
- ((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
+ (u32)((((d) & 0x3) << 30) | (((d) & 0x4) ? BIT(25) : 0))
#define PORTSC_PTW BIT(28)
#define PORTSC_STS BIT(29)
@@ -59,7 +59,7 @@
#define DEVLC_PSPD_HS (0x02UL << 25)
#define DEVLC_PTW BIT(27)
#define DEVLC_STS BIT(28)
-#define DEVLC_PTS(d) (((d) & 0x7) << 29)
+#define DEVLC_PTS(d) (u32)(((d) & 0x7) << 29)
/* Encoding for DEVLC_PTS and PORTSC_PTS */
#define PTS_UTMI 0
@@ -79,11 +79,21 @@
#define OTGSC_ASVIS BIT(18)
#define OTGSC_BSVIS BIT(19)
#define OTGSC_BSEIS BIT(20)
+#define OTGSC_1MSIS BIT(21)
+#define OTGSC_DPIS BIT(22)
#define OTGSC_IDIE BIT(24)
#define OTGSC_AVVIE BIT(25)
#define OTGSC_ASVIE BIT(26)
#define OTGSC_BSVIE BIT(27)
#define OTGSC_BSEIE BIT(28)
+#define OTGSC_1MSIE BIT(29)
+#define OTGSC_DPIE BIT(30)
+#define OTGSC_INT_EN_BITS (OTGSC_IDIE | OTGSC_AVVIE | OTGSC_ASVIE \
+ | OTGSC_BSVIE | OTGSC_BSEIE | OTGSC_1MSIE \
+ | OTGSC_DPIE)
+#define OTGSC_INT_STATUS_BITS (OTGSC_IDIS | OTGSC_AVVIS | OTGSC_ASVIS \
+ | OTGSC_BSVIS | OTGSC_BSEIS | OTGSC_1MSIS \
+ | OTGSC_DPIS)
/* USBMODE */
#define USBMODE_CM (0x03UL << 0)
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 33cb29f36e0..1c94fc5257f 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -132,6 +132,9 @@ struct hw_bank {
* @transceiver: pointer to USB PHY, if any
* @hcd: pointer to usb_hcd for ehci host driver
* @debugfs: root dentry for this controller in debugfs
+ * @id_event: indicates there is an id event, and handled at ci_otg_work
+ * @b_sess_valid_event: indicates there is a vbus event, and handled
+ * at ci_otg_work
*/
struct ci_hdrc {
struct device *dev;
@@ -168,6 +171,8 @@ struct ci_hdrc {
struct usb_phy *transceiver;
struct usb_hcd *hcd;
struct dentry *debugfs;
+ bool id_event;
+ bool b_sess_valid_event;
};
static inline struct ci_role_driver *ci_role(struct ci_hdrc *ci)
@@ -303,4 +308,7 @@ int hw_port_test_set(struct ci_hdrc *ci, u8 mode);
u8 hw_port_test_get(struct ci_hdrc *ci);
+int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
+ u32 value, unsigned int timeout_ms);
+
#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 14362c00db3..74d998d9b45 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -19,70 +19,56 @@
#include <linux/dma-mapping.h>
#include <linux/usb/chipidea.h>
#include <linux/clk.h>
-#include <linux/regulator/consumer.h>
#include "ci.h"
#include "ci_hdrc_imx.h"
-#define pdev_to_phy(pdev) \
- ((struct usb_phy *)platform_get_drvdata(pdev))
-
struct ci_hdrc_imx_data {
struct usb_phy *phy;
struct platform_device *ci_pdev;
struct clk *clk;
- struct regulator *reg_vbus;
+ struct imx_usbmisc_data *usbmisc_data;
};
-static const struct usbmisc_ops *usbmisc_ops;
-
/* Common functions shared by usbmisc drivers */
-int usbmisc_set_ops(const struct usbmisc_ops *ops)
-{
- if (usbmisc_ops)
- return -EBUSY;
-
- usbmisc_ops = ops;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(usbmisc_set_ops);
-
-void usbmisc_unset_ops(const struct usbmisc_ops *ops)
-{
- usbmisc_ops = NULL;
-}
-EXPORT_SYMBOL_GPL(usbmisc_unset_ops);
-
-int usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev)
+static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
{
struct device_node *np = dev->of_node;
struct of_phandle_args args;
+ struct imx_usbmisc_data *data;
int ret;
- usbdev->dev = dev;
+ /*
+ * In case the fsl,usbmisc property is not present this device doesn't
+ * need usbmisc. Return NULL (which is no error here)
+ */
+ if (!of_get_property(np, "fsl,usbmisc", NULL))
+ return NULL;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
ret = of_parse_phandle_with_args(np, "fsl,usbmisc", "#index-cells",
0, &args);
if (ret) {
dev_err(dev, "Failed to parse property fsl,usbmisc, errno %d\n",
ret);
- memset(usbdev, 0, sizeof(*usbdev));
- return ret;
+ return ERR_PTR(ret);
}
- usbdev->index = args.args[0];
+
+ data->index = args.args[0];
of_node_put(args.np);
if (of_find_property(np, "disable-over-current", NULL))
- usbdev->disable_oc = 1;
+ data->disable_oc = 1;
if (of_find_property(np, "external-vbus-divider", NULL))
- usbdev->evdo = 1;
+ data->evdo = 1;
- return 0;
+ return data;
}
-EXPORT_SYMBOL_GPL(usbmisc_get_init_data);
/* End of common functions shared by usbmisc drivers*/
@@ -93,27 +79,19 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
.name = "ci_hdrc_imx",
.capoffset = DEF_CAPOFFSET,
.flags = CI_HDRC_REQUIRE_TRANSCEIVER |
- CI_HDRC_PULLUP_ON_VBUS |
CI_HDRC_DISABLE_STREAMING,
};
- struct resource *res;
int ret;
- if (of_find_property(pdev->dev.of_node, "fsl,usbmisc", NULL)
- && !usbmisc_ops)
- return -EPROBE_DEFER;
-
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&pdev->dev, "Failed to allocate ci_hdrc-imx data!\n");
return -ENOMEM;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Can't get device resources!\n");
- return -ENOENT;
- }
+ data->usbmisc_data = usbmisc_get_init_data(&pdev->dev);
+ if (IS_ERR(data->usbmisc_data))
+ return PTR_ERR(data->usbmisc_data);
data->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(data->clk)) {
@@ -141,20 +119,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
goto err_clk;
}
- /* we only support host now, so enable vbus here */
- data->reg_vbus = devm_regulator_get(&pdev->dev, "vbus");
- if (!IS_ERR(data->reg_vbus)) {
- ret = regulator_enable(data->reg_vbus);
- if (ret) {
- dev_err(&pdev->dev,
- "Failed to enable vbus regulator, err=%d\n",
- ret);
- goto err_clk;
- }
- } else {
- data->reg_vbus = NULL;
- }
-
pdata.phy = data->phy;
if (!pdev->dev.dma_mask)
@@ -162,12 +126,12 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- if (usbmisc_ops && usbmisc_ops->init) {
- ret = usbmisc_ops->init(&pdev->dev);
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init(data->usbmisc_data);
if (ret) {
- dev_err(&pdev->dev,
- "usbmisc init failed, ret=%d\n", ret);
- goto err;
+ dev_err(&pdev->dev, "usbmisc init failed, ret=%d\n",
+ ret);
+ goto err_clk;
}
}
@@ -179,14 +143,14 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register ci_hdrc platform device, err=%d\n",
ret);
- goto err;
+ goto err_clk;
}
- if (usbmisc_ops && usbmisc_ops->post) {
- ret = usbmisc_ops->post(&pdev->dev);
+ if (data->usbmisc_data) {
+ ret = imx_usbmisc_init_post(data->usbmisc_data);
if (ret) {
- dev_err(&pdev->dev,
- "usbmisc post failed, ret=%d\n", ret);
+ dev_err(&pdev->dev, "usbmisc post failed, ret=%d\n",
+ ret);
goto disable_device;
}
}
@@ -200,9 +164,6 @@ static int ci_hdrc_imx_probe(struct platform_device *pdev)
disable_device:
ci_hdrc_remove_device(data->ci_pdev);
-err:
- if (data->reg_vbus)
- regulator_disable(data->reg_vbus);
err_clk:
clk_disable_unprepare(data->clk);
return ret;
@@ -215,13 +176,8 @@ static int ci_hdrc_imx_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
ci_hdrc_remove_device(data->ci_pdev);
- if (data->reg_vbus)
- regulator_disable(data->reg_vbus);
-
- if (data->phy) {
+ if (data->phy)
usb_phy_shutdown(data->phy);
- module_put(data->phy->dev->driver->owner);
- }
clk_disable_unprepare(data->clk);
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h
index 550bfa45762..c7271590dd0 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.h
+++ b/drivers/usb/chipidea/ci_hdrc_imx.h
@@ -9,23 +9,12 @@
* http://www.gnu.org/copyleft/gpl.html
*/
-/* Used to set SoC specific callbacks */
-struct usbmisc_ops {
- /* It's called once when probe a usb device */
- int (*init)(struct device *dev);
- /* It's called once after adding a usb device */
- int (*post)(struct device *dev);
-};
-
-struct usbmisc_usb_device {
- struct device *dev; /* usb controller device */
+struct imx_usbmisc_data {
int index;
unsigned int disable_oc:1; /* over current detect disabled */
unsigned int evdo:1; /* set external vbus divider option */
};
-int usbmisc_set_ops(const struct usbmisc_ops *ops);
-void usbmisc_unset_ops(const struct usbmisc_ops *ops);
-int
-usbmisc_get_init_data(struct device *dev, struct usbmisc_usb_device *usbdev);
+int imx_usbmisc_init(struct imx_usbmisc_data *);
+int imx_usbmisc_init_post(struct imx_usbmisc_data *);
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c
index fb657ef50a9..2d51d852b47 100644
--- a/drivers/usb/chipidea/ci_hdrc_msm.c
+++ b/drivers/usb/chipidea/ci_hdrc_msm.c
@@ -49,7 +49,6 @@ static struct ci_hdrc_platform_data ci_hdrc_msm_platdata = {
.name = "ci_hdrc_msm",
.flags = CI_HDRC_REGS_SHARED |
CI_HDRC_REQUIRE_TRANSCEIVER |
- CI_HDRC_PULLUP_ON_VBUS |
CI_HDRC_DISABLE_STREAMING,
.notify_event = ci_hdrc_msm_notify_event,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index a5df24c578f..94626409559 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -65,12 +65,14 @@
#include <linux/usb/chipidea.h>
#include <linux/usb/of.h>
#include <linux/phy.h>
+#include <linux/regulator/consumer.h>
#include "ci.h"
#include "udc.h"
#include "bits.h"
#include "host.h"
#include "debug.h"
+#include "otg.h"
/* Controller register map */
static uintptr_t ci_regs_nolpm[] = {
@@ -197,6 +199,12 @@ static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
if (ci->hw_ep_max > ENDPT_MAX)
return -ENODEV;
+ /* Disable all interrupts bits */
+ hw_write(ci, OP_USBINTR, 0xffffffff, 0);
+
+ /* Clear all interrupts status bits*/
+ hw_write(ci, OP_USBSTS, 0xffffffff, 0xffffffff);
+
dev_dbg(ci->dev, "ChipIdea HDRC found, lpm: %d; cap: %p op: %p\n",
ci->hw_bank.lpm, ci->hw_bank.cap, ci->hw_bank.op);
@@ -264,8 +272,6 @@ int hw_device_reset(struct ci_hdrc *ci, u32 mode)
while (hw_read(ci, OP_USBCMD, USBCMD_RST))
udelay(10); /* not RTOS friendly */
- hw_phymode_configure(ci);
-
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
CI_HDRC_CONTROLLER_RESET_EVENT);
@@ -289,37 +295,35 @@ int hw_device_reset(struct ci_hdrc *ci, u32 mode)
}
/**
- * ci_otg_role - pick role based on ID pin state
+ * hw_wait_reg: wait the register value
+ *
+ * Sometimes, it needs to wait register value before going on.
+ * Eg, when switch to device mode, the vbus value should be lower
+ * than OTGSC_BSV before connects to host.
+ *
* @ci: the controller
+ * @reg: register index
+ * @mask: mast bit
+ * @value: the bit value to wait
+ * @timeout_ms: timeout in millisecond
+ *
+ * This function returns an error code if timeout
*/
-static enum ci_role ci_otg_role(struct ci_hdrc *ci)
-{
- u32 sts = hw_read(ci, OP_OTGSC, ~0);
- enum ci_role role = sts & OTGSC_ID
- ? CI_ROLE_GADGET
- : CI_ROLE_HOST;
-
- return role;
-}
-
-/**
- * ci_role_work - perform role changing based on ID pin
- * @work: work struct
- */
-static void ci_role_work(struct work_struct *work)
+int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
+ u32 value, unsigned int timeout_ms)
{
- struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
- enum ci_role role = ci_otg_role(ci);
-
- if (role != ci->role) {
- dev_dbg(ci->dev, "switching from %s to %s\n",
- ci_role(ci)->name, ci->roles[role]->name);
-
- ci_role_stop(ci);
- ci_role_start(ci, role);
+ unsigned long elapse = jiffies + msecs_to_jiffies(timeout_ms);
+
+ while (hw_read(ci, reg, mask) != value) {
+ if (time_after(jiffies, elapse)) {
+ dev_err(ci->dev, "timeout waiting for %08x in %d\n",
+ mask, reg);
+ return -ETIMEDOUT;
+ }
+ msleep(20);
}
- enable_irq(ci->irq);
+ return 0;
}
static irqreturn_t ci_irq(int irq, void *data)
@@ -331,19 +335,55 @@ static irqreturn_t ci_irq(int irq, void *data)
if (ci->is_otg)
otgsc = hw_read(ci, OP_OTGSC, ~0);
- if (ci->role != CI_ROLE_END)
- ret = ci_role(ci)->irq(ci);
+ /*
+ * Handle id change interrupt, it indicates device/host function
+ * switch.
+ */
+ if (ci->is_otg && (otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS)) {
+ ci->id_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_IDIS);
+ disable_irq_nosync(ci->irq);
+ queue_work(ci->wq, &ci->work);
+ return IRQ_HANDLED;
+ }
- if (ci->is_otg && (otgsc & OTGSC_IDIS)) {
- hw_write(ci, OP_OTGSC, OTGSC_IDIS, OTGSC_IDIS);
+ /*
+ * Handle vbus change interrupt, it indicates device connection
+ * and disconnection events.
+ */
+ if (ci->is_otg && (otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS)) {
+ ci->b_sess_valid_event = true;
+ ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
disable_irq_nosync(ci->irq);
queue_work(ci->wq, &ci->work);
- ret = IRQ_HANDLED;
+ return IRQ_HANDLED;
}
+ /* Handle device/host interrupt */
+ if (ci->role != CI_ROLE_END)
+ ret = ci_role(ci)->irq(ci);
+
return ret;
}
+static int ci_get_platdata(struct device *dev,
+ struct ci_hdrc_platform_data *platdata)
+{
+ /* Get the vbus regulator */
+ platdata->reg_vbus = devm_regulator_get(dev, "vbus");
+ if (PTR_ERR(platdata->reg_vbus) == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else if (PTR_ERR(platdata->reg_vbus) == -ENODEV) {
+ platdata->reg_vbus = NULL; /* no vbus regualator is needed */
+ } else if (IS_ERR(platdata->reg_vbus)) {
+ dev_err(dev, "Getting regulator error: %ld\n",
+ PTR_ERR(platdata->reg_vbus));
+ return PTR_ERR(platdata->reg_vbus);
+ }
+
+ return 0;
+}
+
static DEFINE_IDA(ci_ida);
struct platform_device *ci_hdrc_add_device(struct device *dev,
@@ -353,6 +393,10 @@ struct platform_device *ci_hdrc_add_device(struct device *dev,
struct platform_device *pdev;
int id, ret;
+ ret = ci_get_platdata(dev, platdata);
+ if (ret)
+ return ERR_PTR(ret);
+
id = ida_simple_get(&ci_ida, 0, 0, GFP_KERNEL);
if (id < 0)
return ERR_PTR(id);
@@ -398,6 +442,29 @@ void ci_hdrc_remove_device(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(ci_hdrc_remove_device);
+static inline void ci_role_destroy(struct ci_hdrc *ci)
+{
+ ci_hdrc_gadget_destroy(ci);
+ ci_hdrc_host_destroy(ci);
+ if (ci->is_otg)
+ ci_hdrc_otg_destroy(ci);
+}
+
+static void ci_get_otg_capable(struct ci_hdrc *ci)
+{
+ if (ci->platdata->flags & CI_HDRC_DUAL_ROLE_NOT_OTG)
+ ci->is_otg = false;
+ else
+ ci->is_otg = (hw_read(ci, CAP_DCCPARAMS,
+ DCCPARAMS_DC | DCCPARAMS_HC)
+ == (DCCPARAMS_DC | DCCPARAMS_HC));
+ if (ci->is_otg) {
+ dev_dbg(ci->dev, "It is OTG capable controller\n");
+ ci_disable_otg_interrupt(ci, OTGSC_INT_EN_BITS);
+ ci_clear_otg_interrupt(ci, OTGSC_INT_STATUS_BITS);
+ }
+}
+
static int ci_hdrc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -406,15 +473,13 @@ static int ci_hdrc_probe(struct platform_device *pdev)
void __iomem *base;
int ret;
enum usb_dr_mode dr_mode;
+ struct device_node *of_node = dev->of_node ?: dev->parent->of_node;
if (!dev->platform_data) {
dev_err(dev, "platform data missing\n");
return -ENODEV;
}
- if (!dev->of_node && dev->parent)
- dev->of_node = dev->parent->of_node;
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
@@ -447,18 +512,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
return -ENODEV;
}
- INIT_WORK(&ci->work, ci_role_work);
- ci->wq = create_singlethread_workqueue("ci_otg");
- if (!ci->wq) {
- dev_err(dev, "can't create workqueue\n");
- return -ENODEV;
- }
+ ci_get_otg_capable(ci);
if (!ci->platdata->phy_mode)
- ci->platdata->phy_mode = of_usb_get_phy_mode(dev->of_node);
+ ci->platdata->phy_mode = of_usb_get_phy_mode(of_node);
+
+ hw_phymode_configure(ci);
if (!ci->platdata->dr_mode)
- ci->platdata->dr_mode = of_usb_get_dr_mode(dev->of_node);
+ ci->platdata->dr_mode = of_usb_get_dr_mode(of_node);
if (ci->platdata->dr_mode == USB_DR_MODE_UNKNOWN)
ci->platdata->dr_mode = USB_DR_MODE_OTG;
@@ -479,15 +541,34 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci->roles[CI_ROLE_HOST] && !ci->roles[CI_ROLE_GADGET]) {
dev_err(dev, "no supported roles\n");
- ret = -ENODEV;
- goto rm_wq;
+ return -ENODEV;
+ }
+
+ if (ci->is_otg) {
+ ret = ci_hdrc_otg_init(ci);
+ if (ret) {
+ dev_err(dev, "init otg fails, ret = %d\n", ret);
+ goto stop;
+ }
}
if (ci->roles[CI_ROLE_HOST] && ci->roles[CI_ROLE_GADGET]) {
- ci->is_otg = true;
- /* ID pin needs 1ms debouce time, we delay 2ms for safe */
- mdelay(2);
- ci->role = ci_otg_role(ci);
+ if (ci->is_otg) {
+ /*
+ * ID pin needs 1ms debouce time,
+ * we delay 2ms for safe.
+ */
+ mdelay(2);
+ ci->role = ci_otg_role(ci);
+ ci_enable_otg_interrupt(ci, OTGSC_IDIE);
+ } else {
+ /*
+ * If the controller is not OTG capable, but support
+ * role switch, the defalt role is gadget, and the
+ * user can switch it through debugfs.
+ */
+ ci->role = CI_ROLE_GADGET;
+ }
} else {
ci->role = ci->roles[CI_ROLE_HOST]
? CI_ROLE_HOST
@@ -497,8 +578,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
ret = ci_role_start(ci, ci->role);
if (ret) {
dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
- ret = -ENODEV;
- goto rm_wq;
+ goto stop;
}
platform_set_drvdata(pdev, ci);
@@ -507,19 +587,13 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (ret)
goto stop;
- if (ci->is_otg)
- hw_write(ci, OP_OTGSC, OTGSC_IDIE, OTGSC_IDIE);
-
ret = dbg_create_files(ci);
if (!ret)
return 0;
free_irq(ci->irq, ci);
stop:
- ci_role_stop(ci);
-rm_wq:
- flush_workqueue(ci->wq);
- destroy_workqueue(ci->wq);
+ ci_role_destroy(ci);
return ret;
}
@@ -529,10 +603,8 @@ static int ci_hdrc_remove(struct platform_device *pdev)
struct ci_hdrc *ci = platform_get_drvdata(pdev);
dbg_remove_files(ci);
- flush_workqueue(ci->wq);
- destroy_workqueue(ci->wq);
free_irq(ci->irq, ci);
- ci_role_stop(ci);
+ ci_role_destroy(ci);
return 0;
}
@@ -548,7 +620,6 @@ static struct platform_driver ci_hdrc_driver = {
module_platform_driver(ci_hdrc_driver);
MODULE_ALIAS("platform:ci_hdrc");
-MODULE_ALIAS("platform:ci13xxx");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("David Lopo <dlopo@chipidea.mips.com>");
MODULE_DESCRIPTION("ChipIdea HDRC Driver");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 40d0fda4f66..6f96795dd20 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -24,6 +24,7 @@
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/usb/chipidea.h>
+#include <linux/regulator/consumer.h>
#include "../host/ehci.h"
@@ -63,10 +64,21 @@ static int host_start(struct ci_hdrc *ci)
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
ehci->has_hostpc = ci->hw_bank.lpm;
+ ehci->has_tdi_phy_lpm = ci->hw_bank.lpm;
+
+ if (ci->platdata->reg_vbus) {
+ ret = regulator_enable(ci->platdata->reg_vbus);
+ if (ret) {
+ dev_err(ci->dev,
+ "Failed to enable vbus regulator, ret=%d\n",
+ ret);
+ goto put_hcd;
+ }
+ }
ret = usb_add_hcd(hcd, 0, 0);
if (ret)
- usb_put_hcd(hcd);
+ goto disable_reg;
else
ci->hcd = hcd;
@@ -74,6 +86,14 @@ static int host_start(struct ci_hdrc *ci)
hw_write(ci, OP_USBMODE, USBMODE_CI_SDIS, USBMODE_CI_SDIS);
return ret;
+
+disable_reg:
+ regulator_disable(ci->platdata->reg_vbus);
+
+put_hcd:
+ usb_put_hcd(hcd);
+
+ return ret;
}
static void host_stop(struct ci_hdrc *ci)
@@ -82,6 +102,15 @@ static void host_stop(struct ci_hdrc *ci)
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
+ if (ci->platdata->reg_vbus)
+ regulator_disable(ci->platdata->reg_vbus);
+}
+
+
+void ci_hdrc_host_destroy(struct ci_hdrc *ci)
+{
+ if (ci->role == CI_ROLE_HOST)
+ host_stop(ci);
}
int ci_hdrc_host_init(struct ci_hdrc *ci)
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 058875c1533..5707bf379bf 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -4,6 +4,7 @@
#ifdef CONFIG_USB_CHIPIDEA_HOST
int ci_hdrc_host_init(struct ci_hdrc *ci);
+void ci_hdrc_host_destroy(struct ci_hdrc *ci);
#else
@@ -12,6 +13,11 @@ static inline int ci_hdrc_host_init(struct ci_hdrc *ci)
return -ENXIO;
}
+static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
+{
+
+}
+
#endif
#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
new file mode 100644
index 00000000000..39bd7ec8bf7
--- /dev/null
+++ b/drivers/usb/chipidea/otg.c
@@ -0,0 +1,120 @@
+/*
+ * otg.c - ChipIdea USB IP core OTG driver
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Peter Chen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This file mainly handles otgsc register, it may include OTG operation
+ * in the future.
+ */
+
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/chipidea.h>
+
+#include "ci.h"
+#include "bits.h"
+#include "otg.h"
+
+/**
+ * ci_otg_role - pick role based on ID pin state
+ * @ci: the controller
+ */
+enum ci_role ci_otg_role(struct ci_hdrc *ci)
+{
+ u32 sts = hw_read(ci, OP_OTGSC, ~0);
+ enum ci_role role = sts & OTGSC_ID
+ ? CI_ROLE_GADGET
+ : CI_ROLE_HOST;
+
+ return role;
+}
+
+void ci_handle_vbus_change(struct ci_hdrc *ci)
+{
+ u32 otgsc;
+
+ if (!ci->is_otg)
+ return;
+
+ otgsc = hw_read(ci, OP_OTGSC, ~0);
+
+ if (otgsc & OTGSC_BSV)
+ usb_gadget_vbus_connect(&ci->gadget);
+ else
+ usb_gadget_vbus_disconnect(&ci->gadget);
+}
+
+#define CI_VBUS_STABLE_TIMEOUT_MS 5000
+static void ci_handle_id_switch(struct ci_hdrc *ci)
+{
+ enum ci_role role = ci_otg_role(ci);
+
+ if (role != ci->role) {
+ dev_dbg(ci->dev, "switching from %s to %s\n",
+ ci_role(ci)->name, ci->roles[role]->name);
+
+ ci_role_stop(ci);
+ /* wait vbus lower than OTGSC_BSV */
+ hw_wait_reg(ci, OP_OTGSC, OTGSC_BSV, 0,
+ CI_VBUS_STABLE_TIMEOUT_MS);
+ ci_role_start(ci, role);
+ }
+}
+/**
+ * ci_otg_work - perform otg (vbus/id) event handle
+ * @work: work struct
+ */
+static void ci_otg_work(struct work_struct *work)
+{
+ struct ci_hdrc *ci = container_of(work, struct ci_hdrc, work);
+
+ if (ci->id_event) {
+ ci->id_event = false;
+ ci_handle_id_switch(ci);
+ } else if (ci->b_sess_valid_event) {
+ ci->b_sess_valid_event = false;
+ ci_handle_vbus_change(ci);
+ } else
+ dev_err(ci->dev, "unexpected event occurs at %s\n", __func__);
+
+ enable_irq(ci->irq);
+}
+
+
+/**
+ * ci_hdrc_otg_init - initialize otg struct
+ * ci: the controller
+ */
+int ci_hdrc_otg_init(struct ci_hdrc *ci)
+{
+ INIT_WORK(&ci->work, ci_otg_work);
+ ci->wq = create_singlethread_workqueue("ci_otg");
+ if (!ci->wq) {
+ dev_err(ci->dev, "can't create workqueue\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * ci_hdrc_otg_destroy - destroy otg struct
+ * ci: the controller
+ */
+void ci_hdrc_otg_destroy(struct ci_hdrc *ci)
+{
+ if (ci->wq) {
+ flush_workqueue(ci->wq);
+ destroy_workqueue(ci->wq);
+ }
+ ci_disable_otg_interrupt(ci, OTGSC_INT_EN_BITS);
+ ci_clear_otg_interrupt(ci, OTGSC_INT_STATUS_BITS);
+}
diff --git a/drivers/usb/chipidea/otg.h b/drivers/usb/chipidea/otg.h
new file mode 100644
index 00000000000..2d9f090733b
--- /dev/null
+++ b/drivers/usb/chipidea/otg.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Peter Chen
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_USB_CHIPIDEA_OTG_H
+#define __DRIVERS_USB_CHIPIDEA_OTG_H
+
+static inline void ci_clear_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+{
+ /* Only clear request bits */
+ hw_write(ci, OP_OTGSC, OTGSC_INT_STATUS_BITS, bits);
+}
+
+static inline void ci_enable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+{
+ hw_write(ci, OP_OTGSC, bits, bits);
+}
+
+static inline void ci_disable_otg_interrupt(struct ci_hdrc *ci, u32 bits)
+{
+ hw_write(ci, OP_OTGSC, bits, 0);
+}
+
+int ci_hdrc_otg_init(struct ci_hdrc *ci);
+void ci_hdrc_otg_destroy(struct ci_hdrc *ci);
+enum ci_role ci_otg_role(struct ci_hdrc *ci);
+void ci_handle_vbus_change(struct ci_hdrc *ci);
+
+#endif /* __DRIVERS_USB_CHIPIDEA_OTG_H */
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index e475fcda1d6..6b4c2f2eb94 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -27,6 +27,7 @@
#include "udc.h"
#include "bits.h"
#include "debug.h"
+#include "otg.h"
/* control endpoint description */
static const struct usb_endpoint_descriptor
@@ -84,8 +85,10 @@ static int hw_device_state(struct ci_hdrc *ci, u32 dma)
/* interrupt, error, port change, reset, sleep/suspend */
hw_write(ci, OP_USBINTR, ~0,
USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI);
+ hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
} else {
hw_write(ci, OP_USBINTR, ~0, 0);
+ hw_write(ci, OP_USBCMD, USBCMD_RS, 0);
}
return 0;
}
@@ -1445,9 +1448,6 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
unsigned long flags;
int gadget_ready = 0;
- if (!(ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS))
- return -EOPNOTSUPP;
-
spin_lock_irqsave(&ci->lock, flags);
ci->vbus_active = is_active;
if (ci->driver)
@@ -1459,6 +1459,7 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
pm_runtime_get_sync(&_gadget->dev);
hw_device_reset(ci, USBMODE_CM_DC);
hw_device_state(ci, ci->ep0out->qh.dma);
+ dev_dbg(ci->dev, "Connected to host\n");
} else {
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
@@ -1466,6 +1467,7 @@ static int ci_udc_vbus_session(struct usb_gadget *_gadget, int is_active)
CI_HDRC_CONTROLLER_STOPPED_EVENT);
_gadget_stop_activity(&ci->gadget);
pm_runtime_put_sync(&_gadget->dev);
+ dev_dbg(ci->dev, "Disconnected from host\n");
}
}
@@ -1509,6 +1511,9 @@ static int ci_udc_pullup(struct usb_gadget *_gadget, int is_on)
{
struct ci_hdrc *ci = container_of(_gadget, struct ci_hdrc, gadget);
+ if (!ci->vbus_active)
+ return -EOPNOTSUPP;
+
if (is_on)
hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS);
else
@@ -1630,14 +1635,11 @@ static int ci_udc_start(struct usb_gadget *gadget,
ci->driver = driver;
pm_runtime_get_sync(&ci->gadget.dev);
- if (ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS) {
- if (ci->vbus_active) {
- if (ci->platdata->flags & CI_HDRC_REGS_SHARED)
- hw_device_reset(ci, USBMODE_CM_DC);
- } else {
- pm_runtime_put_sync(&ci->gadget.dev);
- goto done;
- }
+ if (ci->vbus_active) {
+ hw_device_reset(ci, USBMODE_CM_DC);
+ } else {
+ pm_runtime_put_sync(&ci->gadget.dev);
+ goto done;
}
retval = hw_device_state(ci, ci->ep0out->qh.dma);
@@ -1660,8 +1662,7 @@ static int ci_udc_stop(struct usb_gadget *gadget,
spin_lock_irqsave(&ci->lock, flags);
- if (!(ci->platdata->flags & CI_HDRC_PULLUP_ON_VBUS) ||
- ci->vbus_active) {
+ if (ci->vbus_active) {
hw_device_state(ci, 0);
if (ci->platdata->notify_event)
ci->platdata->notify_event(ci,
@@ -1796,16 +1797,15 @@ static int udc_start(struct ci_hdrc *ci)
}
}
- if (!(ci->platdata->flags & CI_HDRC_REGS_SHARED)) {
- retval = hw_device_reset(ci, USBMODE_CM_DC);
- if (retval)
- goto put_transceiver;
- }
-
if (ci->transceiver) {
retval = otg_set_peripheral(ci->transceiver->otg,
&ci->gadget);
- if (retval)
+ /*
+ * If we implement all USB functions using chipidea drivers,
+ * it doesn't need to call above API, meanwhile, if we only
+ * use gadget function, calling above API is useless.
+ */
+ if (retval && retval != -ENOTSUPP)
goto put_transceiver;
}
@@ -1816,6 +1816,9 @@ static int udc_start(struct ci_hdrc *ci)
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
+ /* Update ci->vbus_active */
+ ci_handle_vbus_change(ci);
+
return retval;
remove_trans:
@@ -1839,13 +1842,13 @@ free_qh_pool:
}
/**
- * udc_remove: parent remove must call this to remove UDC
+ * ci_hdrc_gadget_destroy: parent remove must call this to remove UDC
*
* No interrupts active, the IRQ has been released
*/
-static void udc_stop(struct ci_hdrc *ci)
+void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
{
- if (ci == NULL)
+ if (!ci->roles[CI_ROLE_GADGET])
return;
usb_del_gadget_udc(&ci->gadget);
@@ -1860,15 +1863,32 @@ static void udc_stop(struct ci_hdrc *ci)
if (ci->global_phy)
usb_put_phy(ci->transceiver);
}
- /* my kobject is dynamic, I swear! */
- memset(&ci->gadget, 0, sizeof(ci->gadget));
+}
+
+static int udc_id_switch_for_device(struct ci_hdrc *ci)
+{
+ if (ci->is_otg) {
+ ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
+ ci_enable_otg_interrupt(ci, OTGSC_BSVIE);
+ }
+
+ return 0;
+}
+
+static void udc_id_switch_for_host(struct ci_hdrc *ci)
+{
+ if (ci->is_otg) {
+ /* host doesn't care B_SESSION_VALID event */
+ ci_clear_otg_interrupt(ci, OTGSC_BSVIS);
+ ci_disable_otg_interrupt(ci, OTGSC_BSVIE);
+ }
}
/**
* ci_hdrc_gadget_init - initialize device related bits
* ci: the controller
*
- * This function enables the gadget role, if the device is "device capable".
+ * This function initializes the gadget, if the device is "device capable".
*/
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
@@ -1881,11 +1901,11 @@ int ci_hdrc_gadget_init(struct ci_hdrc *ci)
if (!rdrv)
return -ENOMEM;
- rdrv->start = udc_start;
- rdrv->stop = udc_stop;
+ rdrv->start = udc_id_switch_for_device;
+ rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq;
rdrv->name = "gadget";
ci->roles[CI_ROLE_GADGET] = rdrv;
- return 0;
+ return udc_start(ci);
}
diff --git a/drivers/usb/chipidea/udc.h b/drivers/usb/chipidea/udc.h
index 455ac216922..e66df0020bd 100644
--- a/drivers/usb/chipidea/udc.h
+++ b/drivers/usb/chipidea/udc.h
@@ -84,6 +84,7 @@ struct ci_hw_req {
#ifdef CONFIG_USB_CHIPIDEA_UDC
int ci_hdrc_gadget_init(struct ci_hdrc *ci);
+void ci_hdrc_gadget_destroy(struct ci_hdrc *ci);
#else
@@ -92,6 +93,11 @@ static inline int ci_hdrc_gadget_init(struct ci_hdrc *ci)
return -ENXIO;
}
+static inline void ci_hdrc_gadget_destroy(struct ci_hdrc *ci)
+{
+
+}
+
#endif
#endif /* __DRIVERS_USB_CHIPIDEA_UDC_H */
diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c
index ac5a4615520..8a1094b1182 100644
--- a/drivers/usb/chipidea/usbmisc_imx.c
+++ b/drivers/usb/chipidea/usbmisc_imx.c
@@ -18,8 +18,6 @@
#include "ci_hdrc_imx.h"
-#define USB_DEV_MAX 4
-
#define MX25_USB_PHY_CTRL_OFFSET 0x08
#define MX25_BM_EXTERNAL_VBUS_DIVIDER BIT(23)
@@ -32,51 +30,34 @@
#define MX6_BM_OVER_CUR_DIS BIT(7)
+struct usbmisc_ops {
+ /* It's called once when probe a usb device */
+ int (*init)(struct imx_usbmisc_data *data);
+ /* It's called once after adding a usb device */
+ int (*post)(struct imx_usbmisc_data *data);
+};
+
struct imx_usbmisc {
void __iomem *base;
spinlock_t lock;
struct clk *clk;
- struct usbmisc_usb_device usbdev[USB_DEV_MAX];
const struct usbmisc_ops *ops;
};
static struct imx_usbmisc *usbmisc;
-static struct usbmisc_usb_device *get_usbdev(struct device *dev)
-{
- int i, ret;
-
- for (i = 0; i < USB_DEV_MAX; i++) {
- if (usbmisc->usbdev[i].dev == dev)
- return &usbmisc->usbdev[i];
- else if (!usbmisc->usbdev[i].dev)
- break;
- }
-
- if (i >= USB_DEV_MAX)
- return ERR_PTR(-EBUSY);
-
- ret = usbmisc_get_init_data(dev, &usbmisc->usbdev[i]);
- if (ret)
- return ERR_PTR(ret);
-
- return &usbmisc->usbdev[i];
-}
-
-static int usbmisc_imx25_post(struct device *dev)
+static int usbmisc_imx25_post(struct imx_usbmisc_data *data)
{
- struct usbmisc_usb_device *usbdev;
void __iomem *reg;
unsigned long flags;
u32 val;
- usbdev = get_usbdev(dev);
- if (IS_ERR(usbdev))
- return PTR_ERR(usbdev);
+ if (data->index > 2)
+ return -EINVAL;
reg = usbmisc->base + MX25_USB_PHY_CTRL_OFFSET;
- if (usbdev->evdo) {
+ if (data->evdo) {
spin_lock_irqsave(&usbmisc->lock, flags);
val = readl(reg);
writel(val | MX25_BM_EXTERNAL_VBUS_DIVIDER, reg);
@@ -87,20 +68,18 @@ static int usbmisc_imx25_post(struct device *dev)
return 0;
}
-static int usbmisc_imx53_init(struct device *dev)
+static int usbmisc_imx53_init(struct imx_usbmisc_data *data)
{
- struct usbmisc_usb_device *usbdev;
void __iomem *reg = NULL;
unsigned long flags;
u32 val = 0;
- usbdev = get_usbdev(dev);
- if (IS_ERR(usbdev))
- return PTR_ERR(usbdev);
+ if (data->index > 3)
+ return -EINVAL;
- if (usbdev->disable_oc) {
+ if (data->disable_oc) {
spin_lock_irqsave(&usbmisc->lock, flags);
- switch (usbdev->index) {
+ switch (data->index) {
case 0:
reg = usbmisc->base + MX53_USB_OTG_PHY_CTRL_0_OFFSET;
val = readl(reg) | MX53_BM_OVER_CUR_DIS_OTG;
@@ -126,22 +105,19 @@ static int usbmisc_imx53_init(struct device *dev)
return 0;
}
-static int usbmisc_imx6q_init(struct device *dev)
+static int usbmisc_imx6q_init(struct imx_usbmisc_data *data)
{
-
- struct usbmisc_usb_device *usbdev;
unsigned long flags;
u32 reg;
- usbdev = get_usbdev(dev);
- if (IS_ERR(usbdev))
- return PTR_ERR(usbdev);
+ if (data->index > 3)
+ return -EINVAL;
- if (usbdev->disable_oc) {
+ if (data->disable_oc) {
spin_lock_irqsave(&usbmisc->lock, flags);
- reg = readl(usbmisc->base + usbdev->index * 4);
+ reg = readl(usbmisc->base + data->index * 4);
writel(reg | MX6_BM_OVER_CUR_DIS,
- usbmisc->base + usbdev->index * 4);
+ usbmisc->base + data->index * 4);
spin_unlock_irqrestore(&usbmisc->lock, flags);
}
@@ -160,6 +136,26 @@ static const struct usbmisc_ops imx6q_usbmisc_ops = {
.init = usbmisc_imx6q_init,
};
+int imx_usbmisc_init(struct imx_usbmisc_data *data)
+{
+ if (!usbmisc)
+ return -EPROBE_DEFER;
+ if (!usbmisc->ops->init)
+ return 0;
+ return usbmisc->ops->init(data);
+}
+EXPORT_SYMBOL_GPL(imx_usbmisc_init);
+
+int imx_usbmisc_init_post(struct imx_usbmisc_data *data)
+{
+ if (!usbmisc)
+ return -EPROBE_DEFER;
+ if (!usbmisc->ops->post)
+ return 0;
+ return usbmisc->ops->post(data);
+}
+EXPORT_SYMBOL_GPL(imx_usbmisc_init_post);
+
static const struct of_device_id usbmisc_imx_dt_ids[] = {
{
.compatible = "fsl,imx25-usbmisc",
@@ -216,19 +212,12 @@ static int usbmisc_imx_probe(struct platform_device *pdev)
of_match_device(usbmisc_imx_dt_ids, &pdev->dev);
data->ops = (const struct usbmisc_ops *)tmp_dev->data;
usbmisc = data;
- ret = usbmisc_set_ops(data->ops);
- if (ret) {
- usbmisc = NULL;
- clk_disable_unprepare(data->clk);
- return ret;
- }
return 0;
}
static int usbmisc_imx_remove(struct platform_device *pdev)
{
- usbmisc_unset_ops(usbmisc->ops);
clk_disable_unprepare(usbmisc->clk);
usbmisc = NULL;
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 9f49bfe4c6f..3e7560f004f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1295,7 +1295,7 @@ skip_countries:
usb_rcvintpipe(usb_dev, epctrl->bEndpointAddress),
acm->ctrl_buffer, ctrlsize, acm_ctrl_irq, acm,
/* works around buggy devices */
- epctrl->bInterval ? epctrl->bInterval : 0xff);
+ epctrl->bInterval ? epctrl->bInterval : 16);
acm->ctrlurb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
acm->ctrlurb->transfer_dma = acm->ctrl_dma;
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 8a230f0ef77..d3318a0df8e 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -209,6 +209,7 @@ skip_error:
static void wdm_int_callback(struct urb *urb)
{
int rv = 0;
+ int responding;
int status = urb->status;
struct wdm_device *desc;
struct usb_cdc_notification *dr;
@@ -262,8 +263,8 @@ static void wdm_int_callback(struct urb *urb)
spin_lock(&desc->iuspin);
clear_bit(WDM_READ, &desc->flags);
- set_bit(WDM_RESPONDING, &desc->flags);
- if (!test_bit(WDM_DISCONNECTING, &desc->flags)
+ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
+ if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
rv = usb_submit_urb(desc->response, GFP_ATOMIC);
dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
@@ -685,16 +686,20 @@ static void wdm_rxwork(struct work_struct *work)
{
struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
unsigned long flags;
- int rv;
+ int rv = 0;
+ int responding;
spin_lock_irqsave(&desc->iuspin, flags);
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
spin_unlock_irqrestore(&desc->iuspin, flags);
} else {
+ responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
- rv = usb_submit_urb(desc->response, GFP_KERNEL);
+ if (!responding)
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
if (rv < 0 && rv != -EPERM) {
spin_lock_irqsave(&desc->iuspin, flags);
+ clear_bit(WDM_RESPONDING, &desc->flags);
if (!test_bit(WDM_DISCONNECTING, &desc->flags))
schedule_work(&desc->rxwork);
spin_unlock_irqrestore(&desc->iuspin, flags);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 609dbc2f715..09de131ee0c 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -19,6 +19,8 @@
* http://www.gnu.org/copyleft/gpl.html.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -119,7 +121,6 @@ static void usbtmc_delete(struct kref *kref)
struct usbtmc_device_data *data = to_usbtmc_data(kref);
usb_put_dev(data->usb_dev);
- kfree(data);
}
static int usbtmc_open(struct inode *inode, struct file *filp)
@@ -130,10 +131,8 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
intf = usb_find_interface(&usbtmc_driver, iminor(inode));
if (!intf) {
- printk(KERN_ERR KBUILD_MODNAME
- ": can not find device for minor %d", iminor(inode));
- retval = -ENODEV;
- goto exit;
+ pr_err("can not find device for minor %d", iminor(inode));
+ return -ENODEV;
}
data = usb_get_intfdata(intf);
@@ -142,7 +141,6 @@ static int usbtmc_open(struct inode *inode, struct file *filp)
/* Store pointer in file structure's private data field */
filp->private_data = data;
-exit:
return retval;
}
@@ -394,12 +392,12 @@ static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t t
*/
buffer[0] = 2;
buffer[1] = data->bTag;
- buffer[2] = ~(data->bTag);
+ buffer[2] = ~data->bTag;
buffer[3] = 0; /* Reserved */
- buffer[4] = (transfer_size) & 255;
- buffer[5] = ((transfer_size) >> 8) & 255;
- buffer[6] = ((transfer_size) >> 16) & 255;
- buffer[7] = ((transfer_size) >> 24) & 255;
+ buffer[4] = transfer_size >> 0;
+ buffer[5] = transfer_size >> 8;
+ buffer[6] = transfer_size >> 16;
+ buffer[7] = transfer_size >> 24;
buffer[8] = data->TermCharEnabled * 2;
/* Use term character? */
buffer[9] = data->TermChar;
@@ -418,7 +416,7 @@ static int send_request_dev_dep_msg_in(struct usbtmc_device_data *data, size_t t
/* Increment bTag -- and increment again if zero */
data->bTag++;
if (!data->bTag)
- (data->bTag)++;
+ data->bTag++;
if (retval < 0) {
dev_err(&data->intf->dev, "usb_bulk_msg in send_request_dev_dep_msg_in() returned %d\n", retval);
@@ -473,7 +471,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
done = 0;
while (remaining > 0) {
- if (!(data->rigol_quirk)) {
+ if (!data->rigol_quirk) {
dev_dbg(dev, "usb_bulk_msg_in: remaining(%zu), count(%zu)\n", remaining, count);
if (remaining > USBTMC_SIZE_IOBUFFER - USBTMC_HEADER_SIZE - 3)
@@ -510,7 +508,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
}
/* Parse header in first packet */
- if ((done == 0) || (!(data->rigol_quirk))) {
+ if ((done == 0) || !data->rigol_quirk) {
/* Sanity checks for the header */
if (actual < USBTMC_HEADER_SIZE) {
dev_err(dev, "Device sent too small first packet: %u < %u\n", actual, USBTMC_HEADER_SIZE);
@@ -554,14 +552,14 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
if (remaining > n_characters)
remaining = n_characters;
/* Remove padding if it exists */
- if (actual > remaining)
+ if (actual > remaining)
actual = remaining;
}
else {
if (this_part > n_characters)
this_part = n_characters;
/* Remove padding if it exists */
- if (actual > this_part)
+ if (actual > this_part)
actual = this_part;
}
@@ -570,7 +568,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
remaining -= actual;
/* Terminate if end-of-message bit received from device */
- if ((buffer[8] & 0x01) && (actual >= n_characters))
+ if ((buffer[8] & 0x01) && (actual >= n_characters))
remaining = 0;
dev_dbg(dev, "Bulk-IN header: remaining(%zu), buf(%p), buffer(%p) done(%zu)\n", remaining,buf,buffer,done);
@@ -585,7 +583,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
done += actual;
}
else {
- if (actual > remaining)
+ if (actual > remaining)
actual = remaining;
remaining -= actual;
@@ -651,12 +649,12 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
/* Setup IO buffer for DEV_DEP_MSG_OUT message */
buffer[0] = 1;
buffer[1] = data->bTag;
- buffer[2] = ~(data->bTag);
+ buffer[2] = ~data->bTag;
buffer[3] = 0; /* Reserved */
- buffer[4] = this_part & 255;
- buffer[5] = (this_part >> 8) & 255;
- buffer[6] = (this_part >> 16) & 255;
- buffer[7] = (this_part >> 24) & 255;
+ buffer[4] = this_part >> 0;
+ buffer[5] = this_part >> 8;
+ buffer[6] = this_part >> 16;
+ buffer[7] = this_part >> 24;
/* buffer[8] is set above... */
buffer[9] = 0; /* Reserved */
buffer[10] = 0; /* Reserved */
@@ -901,7 +899,7 @@ err_out:
}
#define capability_attribute(name) \
-static ssize_t show_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -909,7 +907,7 @@ static ssize_t show_##name(struct device *dev, \
\
return sprintf(buf, "%d\n", data->capabilities.name); \
} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+static DEVICE_ATTR_RO(name)
capability_attribute(interface_capabilities);
capability_attribute(device_capabilities);
@@ -928,7 +926,7 @@ static struct attribute_group capability_attr_grp = {
.attrs = capability_attrs,
};
-static ssize_t show_TermChar(struct device *dev,
+static ssize_t TermChar_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *intf = to_usb_interface(dev);
@@ -937,7 +935,7 @@ static ssize_t show_TermChar(struct device *dev,
return sprintf(buf, "%c\n", data->TermChar);
}
-static ssize_t store_TermChar(struct device *dev,
+static ssize_t TermChar_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
@@ -949,10 +947,10 @@ static ssize_t store_TermChar(struct device *dev,
data->TermChar = buf[0];
return count;
}
-static DEVICE_ATTR(TermChar, S_IRUGO, show_TermChar, store_TermChar);
+static DEVICE_ATTR_RW(TermChar);
#define data_attribute(name) \
-static ssize_t show_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -960,7 +958,7 @@ static ssize_t show_##name(struct device *dev, \
\
return sprintf(buf, "%d\n", data->name); \
} \
-static ssize_t store_##name(struct device *dev, \
+static ssize_t name##_store(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
@@ -978,7 +976,7 @@ static ssize_t store_##name(struct device *dev, \
else \
return count; \
} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, store_##name)
+static DEVICE_ATTR_RW(name)
data_attribute(TermCharEnabled);
data_attribute(auto_abort);
@@ -1102,7 +1100,7 @@ static int usbtmc_probe(struct usb_interface *intf,
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = kmalloc(sizeof(struct usbtmc_device_data), GFP_KERNEL);
+ data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL);
if (!data) {
dev_err(&intf->dev, "Unable to allocate kernel memory\n");
return -ENOMEM;
@@ -1119,11 +1117,11 @@ static int usbtmc_probe(struct usb_interface *intf,
/* Determine if it is a Rigol or not */
data->rigol_quirk = 0;
dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n",
- data->usb_dev->descriptor.idVendor,
- data->usb_dev->descriptor.idProduct);
+ le16_to_cpu(data->usb_dev->descriptor.idVendor),
+ le16_to_cpu(data->usb_dev->descriptor.idProduct));
for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) {
- if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) &&
- (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) {
+ if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) &&
+ (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) {
dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n");
data->rigol_quirk = 1;
break;
diff --git a/drivers/usb/core/buffer.c b/drivers/usb/core/buffer.c
index b0585e623ba..23559746be9 100644
--- a/drivers/usb/core/buffer.c
+++ b/drivers/usb/core/buffer.c
@@ -43,10 +43,11 @@ static const size_t pool_max[HCD_BUFFER_POOLS] = {
*
* Call this as part of initializing a host controller that uses the dma
* memory allocators. It initializes some pools of dma-coherent memory that
- * will be shared by all drivers using that controller, or returns a negative
- * errno value on error.
+ * will be shared by all drivers using that controller.
*
* Call hcd_buffer_destroy() to clean up after using those pools.
+ *
+ * Return: 0 if successful. A negative errno value otherwise.
*/
int hcd_buffer_create(struct usb_hcd *hcd)
{
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 7199adccf44..a6b2cabe793 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
if (config->desc.bDescriptorType != USB_DT_CONFIG ||
- config->desc.bLength < USB_DT_CONFIG_SIZE) {
+ config->desc.bLength < USB_DT_CONFIG_SIZE ||
+ config->desc.bLength > size) {
dev_err(ddev, "invalid descriptor for config index %d: "
"type = 0x%X, length = %d\n", cfgidx,
config->desc.bDescriptorType, config->desc.bLength);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 05986507b58..737e3c19967 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -725,15 +725,15 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype,
/*
* check for the special corner case 'get_device_id' in the printer
- * class specification, where wIndex is (interface << 8 | altsetting)
- * instead of just interface
+ * class specification, which we always want to allow as it is used
+ * to query things like ink level, etc.
*/
if (requesttype == 0xa1 && request == 0) {
alt_setting = usb_find_alt_setting(ps->dev->actconfig,
index >> 8, index & 0xff);
if (alt_setting
&& alt_setting->desc.bInterfaceClass == USB_CLASS_PRINTER)
- index >>= 8;
+ return 0;
}
index &= 0xff;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 7609ac4aed1..f7841d44fed 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -94,32 +94,27 @@ ssize_t usb_show_dynids(struct usb_dynids *dynids, char *buf)
}
EXPORT_SYMBOL_GPL(usb_show_dynids);
-static ssize_t show_dynids(struct device_driver *driver, char *buf)
+static ssize_t new_id_show(struct device_driver *driver, char *buf)
{
struct usb_driver *usb_drv = to_usb_driver(driver);
return usb_show_dynids(&usb_drv->dynids, buf);
}
-static ssize_t store_new_id(struct device_driver *driver,
+static ssize_t new_id_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_driver *usb_drv = to_usb_driver(driver);
return usb_store_new_id(&usb_drv->dynids, driver, buf, count);
}
-static DRIVER_ATTR(new_id, S_IRUGO | S_IWUSR, show_dynids, store_new_id);
+static DRIVER_ATTR_RW(new_id);
-/**
- * store_remove_id - remove a USB device ID from this driver
- * @driver: target device driver
- * @buf: buffer for scanning device ID data
- * @count: input size
- *
- * Removes a dynamic usb device ID from this driver.
+/*
+ * Remove a USB device ID from this driver
*/
-static ssize_t
-store_remove_id(struct device_driver *driver, const char *buf, size_t count)
+static ssize_t remove_id_store(struct device_driver *driver, const char *buf,
+ size_t count)
{
struct usb_dynid *dynid, *n;
struct usb_driver *usb_driver = to_usb_driver(driver);
@@ -144,7 +139,12 @@ store_remove_id(struct device_driver *driver, const char *buf, size_t count)
spin_unlock(&usb_driver->dynids.lock);
return count;
}
-static DRIVER_ATTR(remove_id, S_IRUGO | S_IWUSR, show_dynids, store_remove_id);
+
+static ssize_t remove_id_show(struct device_driver *driver, char *buf)
+{
+ return new_id_show(driver, buf);
+}
+static DRIVER_ATTR_RW(remove_id);
static int usb_create_newid_files(struct usb_driver *usb_drv)
{
@@ -457,6 +457,8 @@ static int usb_unbind_interface(struct device *dev)
* Callers must own the device lock, so driver probe() entries don't need
* extra locking, but other call contexts may need to explicitly claim that
* lock.
+ *
+ * Return: 0 on success.
*/
int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
@@ -658,6 +660,8 @@ EXPORT_SYMBOL_GPL(usb_match_one_id);
* These device tables are exported with MODULE_DEVICE_TABLE, through
* modutils, to support the driver loading functionality of USB hotplugging.
*
+ * Return: The first matching usb_device_id, or %NULL.
+ *
* What Matches:
*
* The "match_flags" element in a usb_device_id controls which
@@ -823,7 +827,8 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
* Registers a USB device driver with the USB core. The list of
* unattached devices will be rescanned whenever a new driver is
* added, allowing the new driver to attach to any recognized devices.
- * Returns a negative error code on failure and 0 on success.
+ *
+ * Return: A negative error code on failure and 0 on success.
*/
int usb_register_device_driver(struct usb_device_driver *new_udriver,
struct module *owner)
@@ -879,7 +884,8 @@ EXPORT_SYMBOL_GPL(usb_deregister_device_driver);
* Registers a USB interface driver with the USB core. The list of
* unattached interfaces will be rescanned whenever a new driver is
* added, allowing the new driver to attach to any recognized interfaces.
- * Returns a negative error code on failure and 0 on success.
+ *
+ * Return: A negative error code on failure and 0 on success.
*
* NOTE: if you want your driver to use the USB major number, you must call
* usb_register_dev() to enable that functionality. This function no longer
@@ -1213,6 +1219,8 @@ done:
* unpredictable times.
*
* This routine can run only in process context.
+ *
+ * Return: 0 if the suspend succeeded.
*/
static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
{
@@ -1294,6 +1302,8 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
* unpredictable times.
*
* This routine can run only in process context.
+ *
+ * Return: 0 on success.
*/
static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
{
@@ -1491,6 +1501,8 @@ void usb_autosuspend_device(struct usb_device *udev)
* The caller must hold @udev's device lock.
*
* This routine can run only in process context.
+ *
+ * Return: 0 on success. A negative error code otherwise.
*/
int usb_autoresume_device(struct usb_device *udev)
{
@@ -1600,6 +1612,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
* However if the autoresume fails then the counter is re-decremented.
*
* This routine can run only in process context.
+ *
+ * Return: 0 on success.
*/
int usb_autopm_get_interface(struct usb_interface *intf)
{
@@ -1633,6 +1647,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
* resumed.
*
* This routine can run in atomic context.
+ *
+ * Return: 0 on success. A negative error code otherwise.
*/
int usb_autopm_get_interface_async(struct usb_interface *intf)
{
diff --git a/drivers/usb/core/endpoint.c b/drivers/usb/core/endpoint.c
index 68cc6532e74..39a24021fe4 100644
--- a/drivers/usb/core/endpoint.c
+++ b/drivers/usb/core/endpoint.c
@@ -12,7 +12,6 @@
#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
-#include <linux/idr.h>
#include <linux/usb.h>
#include "usb.h"
@@ -33,31 +32,31 @@ struct ep_attribute {
container_of(_attr, struct ep_attribute, attr)
#define usb_ep_attr(field, format_string) \
-static ssize_t show_ep_##field(struct device *dev, \
+static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ep_device *ep = to_ep_device(dev); \
return sprintf(buf, format_string, ep->desc->field); \
} \
-static DEVICE_ATTR(field, S_IRUGO, show_ep_##field, NULL);
+static DEVICE_ATTR_RO(field)
-usb_ep_attr(bLength, "%02x\n")
-usb_ep_attr(bEndpointAddress, "%02x\n")
-usb_ep_attr(bmAttributes, "%02x\n")
-usb_ep_attr(bInterval, "%02x\n")
+usb_ep_attr(bLength, "%02x\n");
+usb_ep_attr(bEndpointAddress, "%02x\n");
+usb_ep_attr(bmAttributes, "%02x\n");
+usb_ep_attr(bInterval, "%02x\n");
-static ssize_t show_ep_wMaxPacketSize(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t wMaxPacketSize_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ep_device *ep = to_ep_device(dev);
return sprintf(buf, "%04x\n",
usb_endpoint_maxp(ep->desc) & 0x07ff);
}
-static DEVICE_ATTR(wMaxPacketSize, S_IRUGO, show_ep_wMaxPacketSize, NULL);
+static DEVICE_ATTR_RO(wMaxPacketSize);
-static ssize_t show_ep_type(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *type = "unknown";
@@ -78,10 +77,10 @@ static ssize_t show_ep_type(struct device *dev, struct device_attribute *attr,
}
return sprintf(buf, "%s\n", type);
}
-static DEVICE_ATTR(type, S_IRUGO, show_ep_type, NULL);
+static DEVICE_ATTR_RO(type);
-static ssize_t show_ep_interval(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t interval_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char unit;
@@ -124,10 +123,10 @@ static ssize_t show_ep_interval(struct device *dev,
return sprintf(buf, "%d%cs\n", interval, unit);
}
-static DEVICE_ATTR(interval, S_IRUGO, show_ep_interval, NULL);
+static DEVICE_ATTR_RO(interval);
-static ssize_t show_ep_direction(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t direction_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct ep_device *ep = to_ep_device(dev);
char *direction;
@@ -140,7 +139,7 @@ static ssize_t show_ep_direction(struct device *dev,
direction = "out";
return sprintf(buf, "%s\n", direction);
}
-static DEVICE_ATTR(direction, S_IRUGO, show_ep_direction, NULL);
+static DEVICE_ATTR_RO(direction);
static struct attribute *ep_dev_attrs[] = {
&dev_attr_bLength.attr,
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 6a4c40766f0..7421888087a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -153,7 +153,7 @@ void usb_major_cleanup(void)
* usb_deregister_dev() must be called when the driver is done with
* the minor numbers given out by this function.
*
- * Returns -EINVAL if something bad happens with trying to register a
+ * Return: -EINVAL if something bad happens with trying to register a
* device, and 0 on success.
*/
int usb_register_dev(struct usb_interface *intf,
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index caeb8d6d39f..b9d3c43e385 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -171,6 +171,8 @@ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd,
* through the hotplug entry's driver_data.
*
* Store this function in the HCD's struct pci_driver as probe().
+ *
+ * Return: 0 if successful.
*/
int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 014dc996b4f..d6a8d23f047 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -378,9 +378,10 @@ MODULE_PARM_DESC(authorized_default,
* @buf: Buffer for USB string descriptor (header + UTF-16LE)
* @len: Length (in bytes; may be odd) of descriptor buffer.
*
- * The return value is the number of bytes filled in: 2 + 2*strlen(s) or
- * buflen, whichever is less.
+ * Return: The number of bytes filled in: 2 + 2*strlen(s) or @len,
+ * whichever is less.
*
+ * Note:
* USB String descriptors can contain at most 126 characters; input
* strings longer than that are truncated.
*/
@@ -416,7 +417,8 @@ ascii2desc(char const *s, u8 *buf, unsigned len)
*
* Produces either a manufacturer, product or serial number string for the
* virtual root hub device.
- * Returns the number of bytes filled in: the length of the descriptor or
+ *
+ * Return: The number of bytes filled in: the length of the descriptor or
* of the provided buffer, whichever is less.
*/
static unsigned
@@ -464,17 +466,13 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
struct usb_ctrlrequest *cmd;
u16 typeReq, wValue, wIndex, wLength;
u8 *ubuf = urb->transfer_buffer;
- /*
- * tbuf should be as big as the BOS descriptor and
- * the USB hub descriptor.
- */
- u8 tbuf[USB_DT_BOS_SIZE + USB_DT_USB_SS_CAP_SIZE]
- __attribute__((aligned(4)));
- const u8 *bufp = tbuf;
unsigned len = 0;
int status;
u8 patch_wakeup = 0;
u8 patch_protocol = 0;
+ u16 tbuf_size;
+ u8 *tbuf = NULL;
+ const u8 *bufp;
might_sleep();
@@ -494,6 +492,18 @@ static int rh_call_control (struct usb_hcd *hcd, struct urb *urb)
if (wLength > urb->transfer_buffer_length)
goto error;
+ /*
+ * tbuf should be at least as big as the
+ * USB hub descriptor.
+ */
+ tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
+ tbuf = kzalloc(tbuf_size, GFP_KERNEL);
+ if (!tbuf)
+ return -ENOMEM;
+
+ bufp = tbuf;
+
+
urb->actual_length = 0;
switch (typeReq) {
@@ -691,18 +701,12 @@ error:
bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
}
+ kfree(tbuf);
+
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- /* This peculiar use of spinlocks echoes what real HC drivers do.
- * Avoiding calls to local_irq_disable/enable makes the code
- * RT-friendly.
- */
- spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&hcd_root_hub_lock);
-
spin_unlock_irq(&hcd_root_hub_lock);
return 0;
}
@@ -742,9 +746,7 @@ void usb_hcd_poll_rh_status(struct usb_hcd *hcd)
memcpy(urb->transfer_buffer, buffer, length);
usb_hcd_unlink_urb_from_ep(hcd, urb);
- spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb, 0);
- spin_lock(&hcd_root_hub_lock);
} else {
length = 0;
set_bit(HCD_FLAG_POLL_PENDING, &hcd->flags);
@@ -834,10 +836,7 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (urb == hcd->status_urb) {
hcd->status_urb = NULL;
usb_hcd_unlink_urb_from_ep(hcd, urb);
-
- spin_unlock(&hcd_root_hub_lock);
usb_hcd_giveback_urb(hcd, urb, status);
- spin_lock(&hcd_root_hub_lock);
}
}
done:
@@ -850,9 +849,8 @@ static int usb_rh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
/*
* Show & store the current value of authorized_default
*/
-static ssize_t usb_host_authorized_default_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t authorized_default_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *rh_usb_dev = to_usb_device(dev);
struct usb_bus *usb_bus = rh_usb_dev->bus;
@@ -864,9 +862,9 @@ static ssize_t usb_host_authorized_default_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%u\n", usb_hcd->authorized_default);
}
-static ssize_t usb_host_authorized_default_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t authorized_default_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
{
ssize_t result;
unsigned val;
@@ -886,11 +884,7 @@ static ssize_t usb_host_authorized_default_store(struct device *dev,
result = -EINVAL;
return result;
}
-
-static DEVICE_ATTR(authorized_default, 0644,
- usb_host_authorized_default_show,
- usb_host_authorized_default_store);
-
+static DEVICE_ATTR_RW(authorized_default);
/* Group all the USB bus attributes */
static struct attribute *usb_bus_attrs[] = {
@@ -938,6 +932,8 @@ static void usb_bus_init (struct usb_bus *bus)
*
* Assigns a bus number, and links the controller into usbcore data
* structures so that it can be seen by scanning the bus list.
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
static int usb_register_bus(struct usb_bus *bus)
{
@@ -1002,6 +998,8 @@ static void usb_deregister_bus (struct usb_bus *bus)
* the device properly in the device tree and then calls usb_new_device()
* to register the usb device. It also assigns the root hub's USB address
* (always 1).
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
static int register_root_hub(struct usb_hcd *hcd)
{
@@ -1108,7 +1106,9 @@ EXPORT_SYMBOL_GPL(usb_hcd_end_port_resume);
* @isoc: true for isochronous transactions, false for interrupt ones
* @bytecount: how many bytes in the transaction.
*
- * Returns approximate bus time in nanoseconds for a periodic transaction.
+ * Return: Approximate bus time in nanoseconds for a periodic transaction.
+ *
+ * Note:
* See USB 2.0 spec section 5.11.3; only periodic transfers need to be
* scheduled in software, this function is only used for such scheduling.
*/
@@ -1166,7 +1166,7 @@ EXPORT_SYMBOL_GPL(usb_calc_bus_time);
* be disabled. The actions carried out here are required for URB
* submission, as well as for endpoint shutdown and for usb_kill_urb.
*
- * Returns 0 for no error, otherwise a negative error code (in which case
+ * Return: 0 for no error, otherwise a negative error code (in which case
* the enqueue() method must fail). If no error occurs but enqueue() fails
* anyway, it must call usb_hcd_unlink_urb_from_ep() before releasing
* the private spinlock and returning.
@@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_link_urb_to_ep);
* be disabled. The actions carried out here are required for making
* sure than an unlink is valid.
*
- * Returns 0 for no error, otherwise a negative error code (in which case
+ * Return: 0 for no error, otherwise a negative error code (in which case
* the dequeue() method must fail). The possible error codes are:
*
* -EIDRM: @urb was not submitted or has already completed.
@@ -1648,6 +1648,72 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
/*-------------------------------------------------------------------------*/
+static void __usb_hcd_giveback_urb(struct urb *urb)
+{
+ struct usb_hcd *hcd = bus_to_hcd(urb->dev->bus);
+ int status = urb->unlinked;
+ unsigned long flags;
+
+ urb->hcpriv = NULL;
+ if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+ urb->actual_length < urb->transfer_buffer_length &&
+ !status))
+ status = -EREMOTEIO;
+
+ unmap_urb_for_dma(hcd, urb);
+ usbmon_urb_complete(&hcd->self, urb, status);
+ usb_unanchor_urb(urb);
+
+ /* pass ownership to the completion handler */
+ urb->status = status;
+
+ /*
+ * We disable local IRQs here avoid possible deadlock because
+ * drivers may call spin_lock() to hold lock which might be
+ * acquired in one hard interrupt handler.
+ *
+ * The local_irq_save()/local_irq_restore() around complete()
+ * will be removed if current USB drivers have been cleaned up
+ * and no one may trigger the above deadlock situation when
+ * running complete() in tasklet.
+ */
+ local_irq_save(flags);
+ urb->complete(urb);
+ local_irq_restore(flags);
+
+ atomic_dec(&urb->use_count);
+ if (unlikely(atomic_read(&urb->reject)))
+ wake_up(&usb_kill_urb_queue);
+ usb_put_urb(urb);
+}
+
+static void usb_giveback_urb_bh(unsigned long param)
+{
+ struct giveback_urb_bh *bh = (struct giveback_urb_bh *)param;
+ struct list_head local_list;
+
+ spin_lock_irq(&bh->lock);
+ bh->running = true;
+ restart:
+ list_replace_init(&bh->head, &local_list);
+ spin_unlock_irq(&bh->lock);
+
+ while (!list_empty(&local_list)) {
+ struct urb *urb;
+
+ urb = list_entry(local_list.next, struct urb, urb_list);
+ list_del_init(&urb->urb_list);
+ __usb_hcd_giveback_urb(urb);
+ }
+
+ /* check if there are new URBs to giveback */
+ spin_lock_irq(&bh->lock);
+ if (!list_empty(&bh->head))
+ goto restart;
+ bh->running = false;
+ spin_unlock_irq(&bh->lock);
+}
+
/**
* usb_hcd_giveback_urb - return URB from HCD to device driver
* @hcd: host controller returning the URB
@@ -1667,25 +1733,37 @@ int usb_hcd_unlink_urb (struct urb *urb, int status)
*/
void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, int status)
{
- urb->hcpriv = NULL;
- if (unlikely(urb->unlinked))
- status = urb->unlinked;
- else if (unlikely((urb->transfer_flags & URB_SHORT_NOT_OK) &&
- urb->actual_length < urb->transfer_buffer_length &&
- !status))
- status = -EREMOTEIO;
+ struct giveback_urb_bh *bh;
+ bool running, high_prio_bh;
- unmap_urb_for_dma(hcd, urb);
- usbmon_urb_complete(&hcd->self, urb, status);
- usb_unanchor_urb(urb);
+ /* pass status to tasklet via unlinked */
+ if (likely(!urb->unlinked))
+ urb->unlinked = status;
- /* pass ownership to the completion handler */
- urb->status = status;
- urb->complete (urb);
- atomic_dec (&urb->use_count);
- if (unlikely(atomic_read(&urb->reject)))
- wake_up (&usb_kill_urb_queue);
- usb_put_urb (urb);
+ if (!hcd_giveback_urb_in_bh(hcd) && !is_root_hub(urb->dev)) {
+ __usb_hcd_giveback_urb(urb);
+ return;
+ }
+
+ if (usb_pipeisoc(urb->pipe) || usb_pipeint(urb->pipe)) {
+ bh = &hcd->high_prio_bh;
+ high_prio_bh = true;
+ } else {
+ bh = &hcd->low_prio_bh;
+ high_prio_bh = false;
+ }
+
+ spin_lock(&bh->lock);
+ list_add_tail(&urb->urb_list, &bh->head);
+ running = bh->running;
+ spin_unlock(&bh->lock);
+
+ if (running)
+ ;
+ else if (high_prio_bh)
+ tasklet_hi_schedule(&bh->bh);
+ else
+ tasklet_schedule(&bh->bh);
}
EXPORT_SYMBOL_GPL(usb_hcd_giveback_urb);
@@ -1784,7 +1862,7 @@ rescan:
* pass in the current alternate interface setting in cur_alt,
* and pass in the new alternate interface setting in new_alt.
*
- * Returns an error if the requested bandwidth change exceeds the
+ * Return: An error if the requested bandwidth change exceeds the
* bus bandwidth or host controller internal resources.
*/
int usb_hcd_alloc_bandwidth(struct usb_device *udev,
@@ -1954,9 +2032,12 @@ void usb_hcd_reset_endpoint(struct usb_device *udev,
* @num_streams: number of streams to allocate.
* @mem_flags: flags hcd should use to allocate memory.
*
- * Sets up a group of bulk endpoints to have num_streams stream IDs available.
+ * Sets up a group of bulk endpoints to have @num_streams stream IDs available.
* Drivers may queue multiple transfers to different stream IDs, which may
* complete in a different order than they were queued.
+ *
+ * Return: On success, the number of allocated streams. On failure, a negative
+ * error code.
*/
int usb_alloc_streams(struct usb_interface *interface,
struct usb_host_endpoint **eps, unsigned int num_eps,
@@ -2201,6 +2282,8 @@ EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
* khubd identifying and possibly configuring the device.
* This is needed by OTG controller drivers, where it helps meet
* HNP protocol timing requirements for starting a port reset.
+ *
+ * Return: 0 if successful.
*/
int usb_bus_start_enum(struct usb_bus *bus, unsigned port_num)
{
@@ -2235,6 +2318,8 @@ EXPORT_SYMBOL_GPL(usb_bus_start_enum);
*
* If the controller isn't HALTed, calls the driver's irq handler.
* Checks whether the controller is now dead.
+ *
+ * Return: %IRQ_HANDLED if the IRQ was handled. %IRQ_NONE otherwise.
*/
irqreturn_t usb_hcd_irq (int irq, void *__hcd)
{
@@ -2307,6 +2392,14 @@ EXPORT_SYMBOL_GPL (usb_hc_died);
/*-------------------------------------------------------------------------*/
+static void init_giveback_urb_bh(struct giveback_urb_bh *bh)
+{
+
+ spin_lock_init(&bh->lock);
+ INIT_LIST_HEAD(&bh->head);
+ tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh);
+}
+
/**
* usb_create_shared_hcd - create and initialize an HCD structure
* @driver: HC driver that will use this hcd
@@ -2320,7 +2413,8 @@ EXPORT_SYMBOL_GPL (usb_hc_died);
* HC driver's private data. Initialize the generic members of the
* hcd structure.
*
- * If memory is unavailable, returns NULL.
+ * Return: On success, a pointer to the created and initialized HCD structure.
+ * On failure (e.g. if memory is unavailable), %NULL.
*/
struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name,
@@ -2384,7 +2478,8 @@ EXPORT_SYMBOL_GPL(usb_create_shared_hcd);
* HC driver's private data. Initialize the generic members of the
* hcd structure.
*
- * If memory is unavailable, returns NULL.
+ * Return: On success, a pointer to the created and initialized HCD
+ * structure. On failure (e.g. if memory is unavailable), %NULL.
*/
struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name)
@@ -2563,7 +2658,7 @@ int usb_add_hcd(struct usb_hcd *hcd,
* should already have been reset (and boot firmware kicked off etc).
*/
if (hcd->driver->reset && (retval = hcd->driver->reset(hcd)) < 0) {
- dev_err(hcd->self.controller, "can't setup\n");
+ dev_err(hcd->self.controller, "can't setup: %d\n", retval);
goto err_hcd_driver_setup;
}
hcd->rh_pollable = 1;
@@ -2573,6 +2668,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
+ /* initialize tasklets */
+ init_giveback_urb_bh(&hcd->high_prio_bh);
+ init_giveback_urb_bh(&hcd->low_prio_bh);
+
/* enable irqs just before we start the controller,
* if the BIOS provides legacy PCI irqs.
*/
@@ -2681,6 +2780,16 @@ void usb_remove_hcd(struct usb_hcd *hcd)
usb_disconnect(&rhdev); /* Sets rhdev to NULL */
mutex_unlock(&usb_bus_list_lock);
+ /*
+ * tasklet_kill() isn't needed here because:
+ * - driver's disconnect() called from usb_disconnect() should
+ * make sure its URBs are completed during the disconnect()
+ * callback
+ *
+ * - it is too late to run complete() here since driver may have
+ * been removed already now
+ */
+
/* Prevent any more root-hub status calls from the timer.
* The HCD might still restart the timer (if a port status change
* interrupt occurs), but usb_hcd_poll_rh_status() won't invoke
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4191db32f12..dde4c83516a 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -451,7 +451,7 @@ static void led_work (struct work_struct *work)
if (hdev->state != USB_STATE_CONFIGURED || hub->quiescing)
return;
- for (i = 0; i < hub->descriptor->bNbrPorts; i++) {
+ for (i = 0; i < hdev->maxchild; i++) {
unsigned selector, mode;
/* 30%-50% duty cycle */
@@ -500,7 +500,7 @@ static void led_work (struct work_struct *work)
}
if (!changed && blinkenlights) {
cursor++;
- cursor %= hub->descriptor->bNbrPorts;
+ cursor %= hdev->maxchild;
set_port_led(hub, cursor + 1, HUB_LED_GREEN);
hub->indicator[cursor] = INDICATOR_CYCLE;
changed++;
@@ -668,6 +668,15 @@ resubmit:
static inline int
hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt)
{
+ /* Need to clear both directions for control ep */
+ if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_CONTROL) {
+ int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_CLEAR_TT_BUFFER, USB_RT_PORT,
+ devinfo ^ 0x8000, tt, NULL, 0, 1000);
+ if (status)
+ return status;
+ }
return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo,
tt, NULL, 0, 1000);
@@ -725,6 +734,8 @@ static void hub_tt_work(struct work_struct *work)
*
* call this function to control port's power via setting or
* clearing the port's PORT_POWER feature.
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
int port1, bool set)
@@ -753,6 +764,8 @@ int usb_hub_set_port_power(struct usb_device *hdev, struct usb_hub *hub,
*
* It may not be possible for that hub to handle additional full (or low)
* speed transactions until that state is fully cleared out.
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
int usb_hub_clear_tt_buffer(struct urb *urb)
{
@@ -817,7 +830,7 @@ static unsigned hub_power_on(struct usb_hub *hub, bool do_delay)
else
dev_dbg(hub->intfdev, "trying to enable port power on "
"non-switchable hub\n");
- for (port1 = 1; port1 <= hub->descriptor->bNbrPorts; port1++)
+ for (port1 = 1; port1 <= hub->hdev->maxchild; port1++)
if (hub->ports[port1 - 1]->power_is_on)
set_port_feature(hub->hdev, port1, USB_PORT_FEAT_POWER);
else
@@ -955,6 +968,8 @@ static void hub_port_logical_disconnect(struct usb_hub *hub, int port1)
* see that the device has been disconnected. When the device is
* physically unplugged and something is plugged in, the events will
* be received and processed normally.
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
int usb_remove_device(struct usb_device *udev)
{
@@ -1455,11 +1470,10 @@ static int hub_configure(struct usb_hub *hub,
* and battery-powered root hubs (may provide just 8 mA).
*/
ret = usb_get_status(hdev, USB_RECIP_DEVICE, 0, &hubstatus);
- if (ret < 2) {
+ if (ret) {
message = "can't get hub status";
goto fail;
}
- le16_to_cpus(&hubstatus);
hcd = bus_to_hcd(hdev->bus);
if (hdev == hdev->bus->root_hub) {
if (hcd->power_budget > 0)
@@ -1548,10 +1562,15 @@ static int hub_configure(struct usb_hub *hub,
if (hub->has_indicators && blinkenlights)
hub->indicator [0] = INDICATOR_CYCLE;
- for (i = 0; i < hdev->maxchild; i++)
- if (usb_hub_create_port_device(hub, i + 1) < 0)
+ for (i = 0; i < hdev->maxchild; i++) {
+ ret = usb_hub_create_port_device(hub, i + 1);
+ if (ret < 0) {
dev_err(hub->intfdev,
"couldn't create port%d device.\n", i + 1);
+ hdev->maxchild = i;
+ goto fail_keep_maxchild;
+ }
+ }
usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
@@ -1559,6 +1578,8 @@ static int hub_configure(struct usb_hub *hub,
return 0;
fail:
+ hdev->maxchild = 0;
+fail_keep_maxchild:
dev_err (hub_dev, "config failed, %s (err %d)\n",
message, ret);
/* hub_disconnect() frees urb and descriptor */
@@ -2107,6 +2128,8 @@ static inline void announce_device(struct usb_device *udev) { }
* @udev: newly addressed device (in ADDRESS state)
*
* Finish enumeration for On-The-Go devices
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device_otg(struct usb_device *udev)
{
@@ -2189,6 +2212,8 @@ fail:
* If the device is WUSB and not authorized, we don't attempt to read
* the string descriptors, as they will be errored out by the device
* until it has been authorized.
+ *
+ * Return: 0 if successful. A negative error code otherwise.
*/
static int usb_enumerate_device(struct usb_device *udev)
{
@@ -2269,13 +2294,14 @@ static void set_usb_port_removable(struct usb_device *udev)
* udev has already been installed, but udev is not yet visible through
* sysfs or other filesystem code.
*
- * It will return if the device is configured properly or not. Zero if
- * the interface was registered with the driver core; else a negative
- * errno value.
- *
* This call is synchronous, and may not be used in an interrupt context.
*
* Only the hub driver or root-hub registrar should ever call this.
+ *
+ * Return: Whether the device is configured properly or not. Zero if the
+ * interface was registered with the driver core; else a negative errno
+ * value.
+ *
*/
int usb_new_device(struct usb_device *udev)
{
@@ -2383,6 +2409,8 @@ fail:
*
* We share a lock (that we have) with device_del(), so we need to
* defer its call.
+ *
+ * Return: 0.
*/
int usb_deauthorize_device(struct usb_device *usb_dev)
{
@@ -2829,25 +2857,65 @@ void usb_enable_ltm(struct usb_device *udev)
}
EXPORT_SYMBOL_GPL(usb_enable_ltm);
-#ifdef CONFIG_PM
/*
- * usb_disable_function_remotewakeup - disable usb3.0
- * device's function remote wakeup
+ * usb_enable_remote_wakeup - enable remote wakeup for a device
* @udev: target device
*
- * Assume there's only one function on the USB 3.0
- * device and disable remote wake for the first
- * interface. FIXME if the interface association
- * descriptor shows there's more than one function.
+ * For USB-2 devices: Set the device's remote wakeup feature.
+ *
+ * For USB-3 devices: Assume there's only one function on the device and
+ * enable remote wake for the first interface. FIXME if the interface
+ * association descriptor shows there's more than one function.
*/
-static int usb_disable_function_remotewakeup(struct usb_device *udev)
+static int usb_enable_remote_wakeup(struct usb_device *udev)
{
- return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ if (udev->speed < USB_SPEED_SUPER)
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
+ USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ else
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_SET_FEATURE, USB_RECIP_INTERFACE,
+ USB_INTRF_FUNC_SUSPEND,
+ USB_INTRF_FUNC_SUSPEND_RW |
+ USB_INTRF_FUNC_SUSPEND_LP,
+ NULL, 0, USB_CTRL_SET_TIMEOUT);
+}
+
+/*
+ * usb_disable_remote_wakeup - disable remote wakeup for a device
+ * @udev: target device
+ *
+ * For USB-2 devices: Clear the device's remote wakeup feature.
+ *
+ * For USB-3 devices: Assume there's only one function on the device and
+ * disable remote wake for the first interface. FIXME if the interface
+ * association descriptor shows there's more than one function.
+ */
+static int usb_disable_remote_wakeup(struct usb_device *udev)
+{
+ if (udev->speed < USB_SPEED_SUPER)
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RECIP_DEVICE,
+ USB_DEVICE_REMOTE_WAKEUP, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ else
+ return usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
USB_REQ_CLEAR_FEATURE, USB_RECIP_INTERFACE,
USB_INTRF_FUNC_SUSPEND, 0, NULL, 0,
USB_CTRL_SET_TIMEOUT);
}
+/* Count of wakeup-enabled devices at or below udev */
+static unsigned wakeup_enabled_descendants(struct usb_device *udev)
+{
+ struct usb_hub *hub = usb_hub_to_struct_hub(udev);
+
+ return udev->do_remote_wakeup +
+ (hub ? hub->wakeup_enabled_descendants : 0);
+}
+
/*
* usb_port_suspend - suspend a usb device's upstream port
* @udev: device that's no longer in active use, not a root hub
@@ -2888,8 +2956,8 @@ static int usb_disable_function_remotewakeup(struct usb_device *udev)
* Linux (2.6) currently has NO mechanisms to initiate that: no khubd
* timer, no SRP, no requests through sysfs.
*
- * If Runtime PM isn't enabled or used, non-SuperSpeed devices really get
- * suspended only when their bus goes into global suspend (i.e., the root
+ * If Runtime PM isn't enabled or used, non-SuperSpeed devices may not get
+ * suspended until their bus goes into global suspend (i.e., the root
* hub is suspended). Nevertheless, we change @udev->state to
* USB_STATE_SUSPENDED as this is the device's "logical" state. The actual
* upstream port setting is stored in @udev->port_is_suspended.
@@ -2900,7 +2968,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
{
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
struct usb_port *port_dev = hub->ports[udev->portnum - 1];
- enum pm_qos_flags_status pm_qos_stat;
int port1 = udev->portnum;
int status;
bool really_suspend = true;
@@ -2912,33 +2979,13 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
* we don't explicitly enable it here.
*/
if (udev->do_remote_wakeup) {
- if (!hub_is_superspeed(hub->hdev)) {
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE, USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- } else {
- /* Assume there's only one function on the USB 3.0
- * device and enable remote wake for the first
- * interface. FIXME if the interface association
- * descriptor shows there's more than one function.
- */
- status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- USB_REQ_SET_FEATURE,
- USB_RECIP_INTERFACE,
- USB_INTRF_FUNC_SUSPEND,
- USB_INTRF_FUNC_SUSPEND_RW |
- USB_INTRF_FUNC_SUSPEND_LP,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- }
+ status = usb_enable_remote_wakeup(udev);
if (status) {
dev_dbg(&udev->dev, "won't remote wakeup, status %d\n",
status);
/* bail if autosuspend is requested */
if (PMSG_IS_AUTO(msg))
- return status;
+ goto err_wakeup;
}
}
@@ -2947,28 +2994,36 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
usb_set_usb2_hardware_lpm(udev, 0);
if (usb_disable_ltm(udev)) {
- dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
- __func__);
- return -ENOMEM;
+ dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
+ status = -ENOMEM;
+ if (PMSG_IS_AUTO(msg))
+ goto err_ltm;
}
if (usb_unlocked_disable_lpm(udev)) {
- dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
- __func__);
- return -ENOMEM;
+ dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
+ status = -ENOMEM;
+ if (PMSG_IS_AUTO(msg))
+ goto err_lpm3;
}
/* see 7.1.7.6 */
if (hub_is_superspeed(hub->hdev))
status = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_U3);
- else if (PMSG_IS_AUTO(msg))
- status = set_port_feature(hub->hdev, port1,
- USB_PORT_FEAT_SUSPEND);
+
/*
* For system suspend, we do not need to enable the suspend feature
* on individual USB-2 ports. The devices will automatically go
* into suspend a few ms after the root hub stops sending packets.
* The USB 2.0 spec calls this "global suspend".
+ *
+ * However, many USB hubs have a bug: They don't relay wakeup requests
+ * from a downstream port if the port's suspend feature isn't on.
+ * Therefore we will turn on the suspend feature if udev or any of its
+ * descendants is enabled for remote wakeup.
*/
+ else if (PMSG_IS_AUTO(msg) || wakeup_enabled_descendants(udev) > 0)
+ status = set_port_feature(hub->hdev, port1,
+ USB_PORT_FEAT_SUSPEND);
else {
really_suspend = false;
status = 0;
@@ -2976,54 +3031,37 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
if (status) {
dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
port1, status);
- /* paranoia: "should not happen" */
- if (udev->do_remote_wakeup) {
- if (!hub_is_superspeed(hub->hdev)) {
- (void) usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
- } else
- (void) usb_disable_function_remotewakeup(udev);
-
- }
+ /* Try to enable USB3 LPM and LTM again */
+ usb_unlocked_enable_lpm(udev);
+ err_lpm3:
+ usb_enable_ltm(udev);
+ err_ltm:
/* Try to enable USB2 hardware LPM again */
if (udev->usb2_hw_lpm_capable == 1)
usb_set_usb2_hardware_lpm(udev, 1);
- /* Try to enable USB3 LTM and LPM again */
- usb_enable_ltm(udev);
- usb_unlocked_enable_lpm(udev);
+ if (udev->do_remote_wakeup)
+ (void) usb_disable_remote_wakeup(udev);
+ err_wakeup:
/* System sleep transitions should never fail */
if (!PMSG_IS_AUTO(msg))
status = 0;
} else {
- /* device has up to 10 msec to fully suspend */
dev_dbg(&udev->dev, "usb %ssuspend, wakeup %d\n",
(PMSG_IS_AUTO(msg) ? "auto-" : ""),
udev->do_remote_wakeup);
- usb_set_device_state(udev, USB_STATE_SUSPENDED);
if (really_suspend) {
udev->port_is_suspended = 1;
+
+ /* device has up to 10 msec to fully suspend */
msleep(10);
}
+ usb_set_device_state(udev, USB_STATE_SUSPENDED);
}
- /*
- * Check whether current status meets the requirement of
- * usb port power off mechanism
- */
- pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
- PM_QOS_FLAG_NO_POWER_OFF);
- if (!udev->do_remote_wakeup
- && pm_qos_stat != PM_QOS_FLAGS_ALL
- && udev->persist_enabled
- && !status) {
+ if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
pm_runtime_put_sync(&port_dev->dev);
port_dev->did_runtime_put = true;
}
@@ -3077,8 +3115,6 @@ static int finish_port_resume(struct usb_device *udev)
if (status == 0) {
devstatus = 0;
status = usb_get_status(udev, USB_RECIP_DEVICE, 0, &devstatus);
- if (status >= 0)
- status = (status > 0 ? 0 : -ENODEV);
/* If a normal resume failed, try doing a reset-resume */
if (status && !udev->reset_resume && udev->persist_enabled) {
@@ -3098,24 +3134,15 @@ static int finish_port_resume(struct usb_device *udev)
* udev->reset_resume
*/
} else if (udev->actconfig && !udev->reset_resume) {
- if (!hub_is_superspeed(udev->parent)) {
- le16_to_cpus(&devstatus);
+ if (udev->speed < USB_SPEED_SUPER) {
if (devstatus & (1 << USB_DEVICE_REMOTE_WAKEUP))
- status = usb_control_msg(udev,
- usb_sndctrlpipe(udev, 0),
- USB_REQ_CLEAR_FEATURE,
- USB_RECIP_DEVICE,
- USB_DEVICE_REMOTE_WAKEUP, 0,
- NULL, 0,
- USB_CTRL_SET_TIMEOUT);
+ status = usb_disable_remote_wakeup(udev);
} else {
status = usb_get_status(udev, USB_RECIP_INTERFACE, 0,
&devstatus);
- le16_to_cpus(&devstatus);
if (!status && devstatus & (USB_INTRF_STAT_FUNC_RW_CAP
| USB_INTRF_STAT_FUNC_RW))
- status =
- usb_disable_function_remotewakeup(udev);
+ status = usb_disable_remote_wakeup(udev);
}
if (status)
@@ -3249,8 +3276,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return status;
}
-#endif /* CONFIG_PM */
-
#ifdef CONFIG_PM_RUNTIME
/* caller has locked udev */
@@ -3293,7 +3318,11 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
unsigned port1;
int status;
- /* Warn if children aren't already suspended */
+ /*
+ * Warn if children aren't already suspended.
+ * Also, add up the number of wakeup-enabled descendants.
+ */
+ hub->wakeup_enabled_descendants = 0;
for (port1 = 1; port1 <= hdev->maxchild; port1++) {
struct usb_device *udev;
@@ -3303,6 +3332,9 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
return -EBUSY;
}
+ if (udev)
+ hub->wakeup_enabled_descendants +=
+ wakeup_enabled_descendants(udev);
}
if (hdev->do_remote_wakeup && hub->quirk_check_port_auto_suspend) {
@@ -3811,7 +3843,8 @@ EXPORT_SYMBOL_GPL(usb_disable_ltm);
void usb_enable_ltm(struct usb_device *udev) { }
EXPORT_SYMBOL_GPL(usb_enable_ltm);
-#endif
+
+#endif /* CONFIG_PM */
/* USB 2.0 spec, 7.1.7.3 / fig 7-29:
@@ -4451,11 +4484,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
status = usb_get_status(udev, USB_RECIP_DEVICE, 0,
&devstat);
- if (status < 2) {
+ if (status) {
dev_dbg(&udev->dev, "get status %d ?\n", status);
goto loop_disable;
}
- le16_to_cpus(&devstat);
if ((devstat & (1 << USB_DEVICE_SELF_POWERED)) == 0) {
dev_err(&udev->dev,
"can't connect bus-powered hub "
@@ -4616,9 +4648,7 @@ static void hub_events(void)
hub_dev = hub->intfdev;
intf = to_usb_interface(hub_dev);
dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n",
- hdev->state, hub->descriptor
- ? hub->descriptor->bNbrPorts
- : 0,
+ hdev->state, hdev->maxchild,
/* NOTE: expects max 15 ports... */
(u16) hub->change_bits[0],
(u16) hub->event_bits[0]);
@@ -4663,7 +4693,7 @@ static void hub_events(void)
}
/* deal with port status changes */
- for (i = 1; i <= hub->descriptor->bNbrPorts; i++) {
+ for (i = 1; i <= hdev->maxchild; i++) {
if (test_bit(i, hub->busy_bits))
continue;
connect_change = test_bit(i, hub->change_bits);
@@ -4766,7 +4796,8 @@ static void hub_events(void)
hub->ports[i - 1]->child;
dev_dbg(hub_dev, "warm reset port %d\n", i);
- if (!udev) {
+ if (!udev || !(portstatus &
+ USB_PORT_STAT_CONNECTION)) {
status = hub_port_reset(hub, i,
NULL, HUB_BH_RESET_TIME,
true);
@@ -4776,8 +4807,8 @@ static void hub_events(void)
usb_lock_device(udev);
status = usb_reset_device(udev);
usb_unlock_device(udev);
+ connect_change = 0;
}
- connect_change = 0;
}
if (connect_change)
@@ -4913,7 +4944,8 @@ void usb_hub_cleanup(void)
} /* usb_hub_cleanup() */
static int descriptors_changed(struct usb_device *udev,
- struct usb_device_descriptor *old_device_descriptor)
+ struct usb_device_descriptor *old_device_descriptor,
+ struct usb_host_bos *old_bos)
{
int changed = 0;
unsigned index;
@@ -4927,6 +4959,16 @@ static int descriptors_changed(struct usb_device *udev,
sizeof(*old_device_descriptor)) != 0)
return 1;
+ if ((old_bos && !udev->bos) || (!old_bos && udev->bos))
+ return 1;
+ if (udev->bos) {
+ len = le16_to_cpu(udev->bos->desc->wTotalLength);
+ if (len != le16_to_cpu(old_bos->desc->wTotalLength))
+ return 1;
+ if (memcmp(udev->bos->desc, old_bos->desc, len))
+ return 1;
+ }
+
/* Since the idVendor, idProduct, and bcdDevice values in the
* device descriptor haven't changed, we will assume the
* Manufacturer and Product strings haven't changed either.
@@ -5002,10 +5044,11 @@ static int descriptors_changed(struct usb_device *udev,
* re-connected. All drivers will be unbound, and the device will be
* re-enumerated and probed all over again.
*
- * Returns 0 if the reset succeeded, -ENODEV if the device has been
+ * Return: 0 if the reset succeeded, -ENODEV if the device has been
* flagged for logical disconnection, or some other negative error code
* if the reset wasn't even attempted.
*
+ * Note:
* The caller must own the device lock. For example, it's safe to use
* this from a driver probe() routine after downloading new firmware.
* For calls that might not occur during probe(), drivers should lock
@@ -5022,6 +5065,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
struct usb_hub *parent_hub;
struct usb_hcd *hcd = bus_to_hcd(udev->bus);
struct usb_device_descriptor descriptor = udev->descriptor;
+ struct usb_host_bos *bos;
int i, ret = 0;
int port1 = udev->portnum;
@@ -5039,6 +5083,9 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
}
parent_hub = usb_hub_to_struct_hub(parent_hdev);
+ bos = udev->bos;
+ udev->bos = NULL;
+
/* Disable LPM and LTM while we reset the device and reinstall the alt
* settings. Device-initiated LPM settings, and system exit latency
* settings are cleared when the device is reset, so we have to set
@@ -5072,7 +5119,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
goto re_enumerate;
/* Device might have changed firmware (DFU or similar) */
- if (descriptors_changed(udev, &descriptor)) {
+ if (descriptors_changed(udev, &descriptor, bos)) {
dev_info(&udev->dev, "device firmware changed\n");
udev->descriptor = descriptor; /* for disconnect() calls */
goto re_enumerate;
@@ -5145,11 +5192,15 @@ done:
/* Now that the alt settings are re-installed, enable LTM and LPM. */
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return 0;
re_enumerate:
/* LPM state doesn't matter when we're about to destroy the device. */
hub_port_logical_disconnect(parent_hub, port1);
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return -ENODEV;
}
@@ -5161,8 +5212,9 @@ re_enumerate:
* method), performs the port reset, and then lets the drivers know that
* the reset is over (using their post_reset method).
*
- * Return value is the same as for usb_reset_and_verify_device().
+ * Return: The same as for usb_reset_and_verify_device().
*
+ * Note:
* The caller must own the device lock. For example, it's safe to use
* this from a driver probe() routine after downloading new firmware.
* For calls that might not occur during probe(), drivers should lock
@@ -5300,7 +5352,7 @@ EXPORT_SYMBOL_GPL(usb_queue_reset_device);
* USB drivers call this function to get hub's child device
* pointer.
*
- * Return NULL if input param is invalid and
+ * Return: %NULL if input param is invalid and
* child's usb_device pointer if non-NULL.
*/
struct usb_device *usb_hub_find_child(struct usb_device *hdev,
@@ -5334,8 +5386,8 @@ void usb_set_hub_port_connect_type(struct usb_device *hdev, int port1,
* @hdev: USB device belonging to the usb hub
* @port1: port num of the port
*
- * Return connect type of the port and if input params are
- * invalid, return USB_PORT_CONNECT_TYPE_UNKNOWN.
+ * Return: The connect type of the port if successful. Or
+ * USB_PORT_CONNECT_TYPE_UNKNOWN if input params are invalid.
*/
enum usb_port_connect_type
usb_get_hub_port_connect_type(struct usb_device *hdev, int port1)
@@ -5395,8 +5447,8 @@ void usb_hub_adjust_deviceremovable(struct usb_device *hdev,
* @hdev: USB device belonging to the usb hub
* @port1: port num of the port
*
- * Return port's acpi handle if successful, NULL if params are
- * invaild.
+ * Return: Port's acpi handle if successful, %NULL if params are
+ * invalid.
*/
acpi_handle usb_get_hub_port_acpi_handle(struct usb_device *hdev,
int port1)
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index 6508e02b3da..4e4790dea34 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -59,6 +59,9 @@ struct usb_hub {
struct usb_tt tt; /* Transaction Translator */
unsigned mA_per_port; /* current for each child */
+#ifdef CONFIG_PM
+ unsigned wakeup_enabled_descendants;
+#endif
unsigned limited_power:1;
unsigned quiescing:1;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index e7ee1e45166..82927e1ed27 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -119,15 +119,15 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
* This function sends a simple control message to a specified endpoint and
* waits for the message to complete, or timeout.
*
- * If successful, it returns the number of bytes transferred, otherwise a
- * negative error number.
- *
* Don't use this function from within an interrupt context, like a bottom half
* handler. If you need an asynchronous message, or need to send a message
* from within interrupt context, use usb_submit_urb().
* If a thread in your driver uses this call, make sure your disconnect()
* method can wait for it to complete. Since you don't have a handle on the
* URB used, you can't cancel the request.
+ *
+ * Return: If successful, the number of bytes transferred. Otherwise, a negative
+ * error number.
*/
int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
__u8 requesttype, __u16 value, __u16 index, void *data,
@@ -170,15 +170,16 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
* This function sends a simple interrupt message to a specified endpoint and
* waits for the message to complete, or timeout.
*
- * If successful, it returns 0, otherwise a negative error number. The number
- * of actual bytes transferred will be stored in the actual_length paramater.
- *
* Don't use this function from within an interrupt context, like a bottom half
* handler. If you need an asynchronous message, or need to send a message
* from within interrupt context, use usb_submit_urb() If a thread in your
* driver uses this call, make sure your disconnect() method can wait for it to
* complete. Since you don't have a handle on the URB used, you can't cancel
* the request.
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+ * bytes transferred will be stored in the @actual_length paramater.
*/
int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
@@ -203,9 +204,6 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
* This function sends a simple bulk message to a specified endpoint
* and waits for the message to complete, or timeout.
*
- * If successful, it returns 0, otherwise a negative error number. The number
- * of actual bytes transferred will be stored in the actual_length paramater.
- *
* Don't use this function from within an interrupt context, like a bottom half
* handler. If you need an asynchronous message, or need to send a message
* from within interrupt context, use usb_submit_urb() If a thread in your
@@ -217,6 +215,11 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
* users are forced to abuse this routine by using it to submit URBs for
* interrupt endpoints. We will take the liberty of creating an interrupt URB
* (with the default interval) if the target is an interrupt endpoint.
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+ * bytes transferred will be stored in the @actual_length paramater.
+ *
*/
int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout)
@@ -341,9 +344,9 @@ static void sg_complete(struct urb *urb)
* send every byte identified in the list.
* @mem_flags: SLAB_* flags affecting memory allocations in this call
*
- * Returns zero for success, else a negative errno value. This initializes a
- * scatter/gather request, allocating resources such as I/O mappings and urb
- * memory (except maybe memory used by USB controller drivers).
+ * This initializes a scatter/gather request, allocating resources such as
+ * I/O mappings and urb memory (except maybe memory used by USB controller
+ * drivers).
*
* The request must be issued using usb_sg_wait(), which waits for the I/O to
* complete (or to be canceled) and then cleans up all resources allocated by
@@ -351,6 +354,8 @@ static void sg_complete(struct urb *urb)
*
* The request may be canceled with usb_sg_cancel(), either before or after
* usb_sg_wait() is called.
+ *
+ * Return: Zero for success, else a negative errno value.
*/
int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
unsigned pipe, unsigned period, struct scatterlist *sg,
@@ -623,7 +628,7 @@ EXPORT_SYMBOL_GPL(usb_sg_cancel);
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns the number of bytes received on success, or else the status code
+ * Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_descriptor(struct usb_device *dev, unsigned char type,
@@ -671,7 +676,7 @@ EXPORT_SYMBOL_GPL(usb_get_descriptor);
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns the number of bytes received on success, or else the status code
+ * Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
static int usb_get_string(struct usb_device *dev, unsigned short langid,
@@ -805,7 +810,7 @@ static int usb_get_langid(struct usb_device *dev, unsigned char *tbuf)
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns length of the string (>= 0) or usb_control_msg status (< 0).
+ * Return: length of the string (>= 0) or usb_control_msg status (< 0).
*/
int usb_string(struct usb_device *dev, int index, char *buf, size_t size)
{
@@ -853,8 +858,8 @@ EXPORT_SYMBOL_GPL(usb_string);
* @udev: the device whose string descriptor is being read
* @index: the descriptor index
*
- * Returns a pointer to a kmalloc'ed buffer containing the descriptor string,
- * or NULL if the index is 0 or the string could not be read.
+ * Return: A pointer to a kmalloc'ed buffer containing the descriptor string,
+ * or %NULL if the index is 0 or the string could not be read.
*/
char *usb_cache_string(struct usb_device *udev, int index)
{
@@ -894,7 +899,7 @@ char *usb_cache_string(struct usb_device *udev, int index)
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns the number of bytes received on success, or else the status code
+ * Return: The number of bytes received on success, or else the status code
* returned by the underlying usb_control_msg() call.
*/
int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
@@ -934,13 +939,13 @@ int usb_get_device_descriptor(struct usb_device *dev, unsigned int size)
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns the number of bytes received on success, or else the status code
- * returned by the underlying usb_control_msg() call.
+ * Returns 0 and the status value in *@data (in host byte order) on success,
+ * or else the status code from the underlying usb_control_msg() call.
*/
int usb_get_status(struct usb_device *dev, int type, int target, void *data)
{
int ret;
- u16 *status = kmalloc(sizeof(*status), GFP_KERNEL);
+ __le16 *status = kmalloc(sizeof(*status), GFP_KERNEL);
if (!status)
return -ENOMEM;
@@ -949,7 +954,12 @@ int usb_get_status(struct usb_device *dev, int type, int target, void *data)
USB_REQ_GET_STATUS, USB_DIR_IN | type, 0, target, status,
sizeof(*status), USB_CTRL_GET_TIMEOUT);
- *(u16 *)data = *status;
+ if (ret == 2) {
+ *(u16 *) data = le16_to_cpu(*status);
+ ret = 0;
+ } else if (ret >= 0) {
+ ret = -EIO;
+ }
kfree(status);
return ret;
}
@@ -975,7 +985,7 @@ EXPORT_SYMBOL_GPL(usb_get_status);
*
* This call is synchronous, and may not be used in an interrupt context.
*
- * Returns zero on success, or else the status code returned by the
+ * Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_clear_halt(struct usb_device *dev, int pipe)
@@ -1272,7 +1282,7 @@ void usb_enable_interface(struct usb_device *dev,
* endpoints in that interface; all such urbs must first be completed
* (perhaps forced by unlinking).
*
- * Returns zero on success, or else the status code returned by the
+ * Return: Zero on success, or else the status code returned by the
* underlying usb_control_msg() call.
*/
int usb_set_interface(struct usb_device *dev, int interface, int alternate)
@@ -1426,7 +1436,7 @@ EXPORT_SYMBOL_GPL(usb_set_interface);
*
* The caller must own the device lock.
*
- * Returns zero on success, else a negative error code.
+ * Return: Zero on success, else a negative error code.
*/
int usb_reset_configuration(struct usb_device *dev)
{
@@ -1968,7 +1978,7 @@ static void cancel_async_set_config(struct usb_device *udev)
* routine gets around the normal restrictions by using a work thread to
* submit the change-config request.
*
- * Returns 0 if the request was successfully queued, error code otherwise.
+ * Return: 0 if the request was successfully queued, error code otherwise.
* The caller has no way to know whether the queued request will eventually
* succeed.
*/
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index d6b0fadf53e..51542f85239 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -23,8 +23,8 @@
static const struct attribute_group *port_dev_group[];
-static ssize_t show_port_connect_type(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t connect_type_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_port *port_dev = to_usb_port(dev);
char *result;
@@ -46,8 +46,7 @@ static ssize_t show_port_connect_type(struct device *dev,
return sprintf(buf, "%s\n", result);
}
-static DEVICE_ATTR(connect_type, S_IRUGO, show_port_connect_type,
- NULL);
+static DEVICE_ATTR_RO(connect_type);
static struct attribute *port_dev_attrs[] = {
&dev_attr_connect_type.attr,
@@ -89,22 +88,19 @@ static int usb_port_runtime_resume(struct device *dev)
retval = usb_hub_set_port_power(hdev, hub, port1, true);
if (port_dev->child && !retval) {
/*
- * Wait for usb hub port to be reconnected in order to make
- * the resume procedure successful.
+ * Attempt to wait for usb hub port to be reconnected in order
+ * to make the resume procedure successful. The device may have
+ * disconnected while the port was powered off, so ignore the
+ * return status.
*/
retval = hub_port_debounce_be_connected(hub, port1);
- if (retval < 0) {
+ if (retval < 0)
dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
retval);
- goto out;
- }
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
-
- /* Set return value to 0 if debounce successful */
retval = 0;
}
-out:
clear_bit(port1, hub->busy_bits);
usb_autopm_put_interface(intf);
return retval;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index a6359889507..5b44cd47da5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x04d8, 0x000c), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* CarrolTouch 4000U */
+ { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME },
+
+ /* CarrolTouch 4500U */
+ { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* Samsung Android phone modem - ID conflict with SPH-I500 */
{ USB_DEVICE(0x04e8, 0x6601), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index d9284b998bd..6d2c8edb1ff 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -18,8 +18,8 @@
/* Active configuration fields */
#define usb_actconfig_show(field, format_string) \
-static ssize_t show_##field(struct device *dev, \
- struct device_attribute *attr, char *buf) \
+static ssize_t field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
struct usb_host_config *actconfig; \
@@ -35,12 +35,12 @@ static ssize_t show_##field(struct device *dev, \
#define usb_actconfig_attr(field, format_string) \
usb_actconfig_show(field, format_string) \
- static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+ static DEVICE_ATTR_RO(field)
-usb_actconfig_attr(bNumInterfaces, "%2d\n")
-usb_actconfig_attr(bmAttributes, "%2x\n")
+usb_actconfig_attr(bNumInterfaces, "%2d\n");
+usb_actconfig_attr(bmAttributes, "%2x\n");
-static ssize_t show_bMaxPower(struct device *dev,
+static ssize_t bMaxPower_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -52,9 +52,9 @@ static ssize_t show_bMaxPower(struct device *dev,
return 0;
return sprintf(buf, "%dmA\n", usb_get_max_power(udev, actconfig));
}
-static DEVICE_ATTR(bMaxPower, S_IRUGO, show_bMaxPower, NULL);
+static DEVICE_ATTR_RO(bMaxPower);
-static ssize_t show_configuration_string(struct device *dev,
+static ssize_t configuration_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -66,14 +66,14 @@ static ssize_t show_configuration_string(struct device *dev,
return 0;
return sprintf(buf, "%s\n", actconfig->string);
}
-static DEVICE_ATTR(configuration, S_IRUGO, show_configuration_string, NULL);
+static DEVICE_ATTR_RO(configuration);
/* configuration value is always present, and r/w */
usb_actconfig_show(bConfigurationValue, "%u\n");
-static ssize_t
-set_bConfigurationValue(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t bConfigurationValue_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int config, value;
@@ -85,13 +85,12 @@ set_bConfigurationValue(struct device *dev, struct device_attribute *attr,
usb_unlock_device(udev);
return (value < 0) ? value : count;
}
-
static DEVICE_ATTR_IGNORE_LOCKDEP(bConfigurationValue, S_IRUGO | S_IWUSR,
- show_bConfigurationValue, set_bConfigurationValue);
+ bConfigurationValue_show, bConfigurationValue_store);
/* String fields */
#define usb_string_attr(name) \
-static ssize_t show_##name(struct device *dev, \
+static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_device *udev; \
@@ -103,14 +102,14 @@ static ssize_t show_##name(struct device *dev, \
usb_unlock_device(udev); \
return retval; \
} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL);
+static DEVICE_ATTR_RO(name)
usb_string_attr(product);
usb_string_attr(manufacturer);
usb_string_attr(serial);
-static ssize_t
-show_speed(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t speed_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
char *speed;
@@ -139,40 +138,40 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
}
return sprintf(buf, "%s\n", speed);
}
-static DEVICE_ATTR(speed, S_IRUGO, show_speed, NULL);
+static DEVICE_ATTR_RO(speed);
-static ssize_t
-show_busnum(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t busnum_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->bus->busnum);
}
-static DEVICE_ATTR(busnum, S_IRUGO, show_busnum, NULL);
+static DEVICE_ATTR_RO(busnum);
-static ssize_t
-show_devnum(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t devnum_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->devnum);
}
-static DEVICE_ATTR(devnum, S_IRUGO, show_devnum, NULL);
+static DEVICE_ATTR_RO(devnum);
-static ssize_t
-show_devpath(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t devpath_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%s\n", udev->devpath);
}
-static DEVICE_ATTR(devpath, S_IRUGO, show_devpath, NULL);
+static DEVICE_ATTR_RO(devpath);
-static ssize_t
-show_version(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t version_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
u16 bcdUSB;
@@ -181,30 +180,30 @@ show_version(struct device *dev, struct device_attribute *attr, char *buf)
bcdUSB = le16_to_cpu(udev->descriptor.bcdUSB);
return sprintf(buf, "%2x.%02x\n", bcdUSB >> 8, bcdUSB & 0xff);
}
-static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
+static DEVICE_ATTR_RO(version);
-static ssize_t
-show_maxchild(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t maxchild_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->maxchild);
}
-static DEVICE_ATTR(maxchild, S_IRUGO, show_maxchild, NULL);
+static DEVICE_ATTR_RO(maxchild);
-static ssize_t
-show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t quirks_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "0x%x\n", udev->quirks);
}
-static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
+static DEVICE_ATTR_RO(quirks);
-static ssize_t
-show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t avoid_reset_quirk_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -212,9 +211,9 @@ show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *
return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET));
}
-static ssize_t
-set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t avoid_reset_quirk_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int val;
@@ -229,22 +228,20 @@ set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
usb_unlock_device(udev);
return count;
}
+static DEVICE_ATTR_RW(avoid_reset_quirk);
-static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
- show_avoid_reset_quirk, set_avoid_reset_quirk);
-
-static ssize_t
-show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
udev = to_usb_device(dev);
return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
}
-static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
+static DEVICE_ATTR_RO(urbnum);
-static ssize_t
-show_removable(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev;
char *state;
@@ -264,30 +261,29 @@ show_removable(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", state);
}
-static DEVICE_ATTR(removable, S_IRUGO, show_removable, NULL);
+static DEVICE_ATTR_RO(removable);
-static ssize_t
-show_ltm_capable(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t ltm_capable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
if (usb_device_supports_ltm(to_usb_device(dev)))
return sprintf(buf, "%s\n", "yes");
return sprintf(buf, "%s\n", "no");
}
-static DEVICE_ATTR(ltm_capable, S_IRUGO, show_ltm_capable, NULL);
+static DEVICE_ATTR_RO(ltm_capable);
#ifdef CONFIG_PM
-static ssize_t
-show_persist(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t persist_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->persist_enabled);
}
-static ssize_t
-set_persist(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t persist_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int value;
@@ -304,8 +300,7 @@ set_persist(struct device *dev, struct device_attribute *attr,
usb_unlock_device(udev);
return count;
}
-
-static DEVICE_ATTR(persist, S_IRUGO | S_IWUSR, show_persist, set_persist);
+static DEVICE_ATTR_RW(persist);
static int add_persist_attributes(struct device *dev)
{
@@ -340,17 +335,15 @@ static void remove_persist_attributes(struct device *dev)
#ifdef CONFIG_PM_RUNTIME
-static ssize_t
-show_connected_duration(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t connected_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%u\n",
jiffies_to_msecs(jiffies - udev->connect_time));
}
-
-static DEVICE_ATTR(connected_duration, S_IRUGO, show_connected_duration, NULL);
+static DEVICE_ATTR_RO(connected_duration);
/*
* If the device is resumed, the last time the device was suspended has
@@ -359,9 +352,8 @@ static DEVICE_ATTR(connected_duration, S_IRUGO, show_connected_duration, NULL);
*
* If the device is suspended, the active_duration is up-to-date.
*/
-static ssize_t
-show_active_duration(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t active_duration_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
int duration;
@@ -372,18 +364,17 @@ show_active_duration(struct device *dev, struct device_attribute *attr,
duration = jiffies_to_msecs(udev->active_duration);
return sprintf(buf, "%u\n", duration);
}
+static DEVICE_ATTR_RO(active_duration);
-static DEVICE_ATTR(active_duration, S_IRUGO, show_active_duration, NULL);
-
-static ssize_t
-show_autosuspend(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t autosuspend_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev->power.autosuspend_delay / 1000);
}
-static ssize_t
-set_autosuspend(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t autosuspend_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
int value;
@@ -394,9 +385,7 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
pm_runtime_set_autosuspend_delay(dev, value * 1000);
return count;
}
-
-static DEVICE_ATTR(autosuspend, S_IRUGO | S_IWUSR,
- show_autosuspend, set_autosuspend);
+static DEVICE_ATTR_RW(autosuspend);
static const char on_string[] = "on";
static const char auto_string[] = "auto";
@@ -411,8 +400,8 @@ static void warn_level(void) {
}
}
-static ssize_t
-show_level(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t level_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p = auto_string;
@@ -423,9 +412,8 @@ show_level(struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%s\n", p);
}
-static ssize_t
-set_level(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t level_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int len = count;
@@ -453,12 +441,10 @@ set_level(struct device *dev, struct device_attribute *attr,
usb_unlock_device(udev);
return rc;
}
+static DEVICE_ATTR_RW(level);
-static DEVICE_ATTR(level, S_IRUGO | S_IWUSR, show_level, set_level);
-
-static ssize_t
-show_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usb2_hardware_lpm_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
const char *p;
@@ -471,9 +457,9 @@ show_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%s\n", p);
}
-static ssize_t
-set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t usb2_hardware_lpm_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
bool value;
@@ -493,21 +479,19 @@ set_usb2_hardware_lpm(struct device *dev, struct device_attribute *attr,
return ret;
}
+static DEVICE_ATTR_RW(usb2_hardware_lpm);
-static DEVICE_ATTR(usb2_hardware_lpm, S_IRUGO | S_IWUSR, show_usb2_hardware_lpm,
- set_usb2_hardware_lpm);
-
-static ssize_t
-show_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usb2_lpm_l1_timeout_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->l1_params.timeout);
}
-static ssize_t
-set_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t usb2_lpm_l1_timeout_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u16 timeout;
@@ -519,21 +503,18 @@ set_usb2_lpm_l1_timeout(struct device *dev, struct device_attribute *attr,
return count;
}
+static DEVICE_ATTR_RW(usb2_lpm_l1_timeout);
-static DEVICE_ATTR(usb2_lpm_l1_timeout, S_IRUGO | S_IWUSR,
- show_usb2_lpm_l1_timeout, set_usb2_lpm_l1_timeout);
-
-static ssize_t
-show_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t usb2_lpm_besl_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *udev = to_usb_device(dev);
return sprintf(buf, "%d\n", udev->l1_params.besl);
}
-static ssize_t
-set_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t usb2_lpm_besl_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
u8 besl;
@@ -545,9 +526,7 @@ set_usb2_lpm_besl(struct device *dev, struct device_attribute *attr,
return count;
}
-
-static DEVICE_ATTR(usb2_lpm_besl, S_IRUGO | S_IWUSR,
- show_usb2_lpm_besl, set_usb2_lpm_besl);
+static DEVICE_ATTR_RW(usb2_lpm_besl);
static struct attribute *usb2_hardware_lpm_attr[] = {
&dev_attr_usb2_hardware_lpm.attr,
@@ -604,7 +583,7 @@ static void remove_power_attributes(struct device *dev)
/* Descriptor fields */
#define usb_descriptor_attr_le16(field, format_string) \
static ssize_t \
-show_##field(struct device *dev, struct device_attribute *attr, \
+field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
@@ -613,15 +592,15 @@ show_##field(struct device *dev, struct device_attribute *attr, \
return sprintf(buf, format_string, \
le16_to_cpu(udev->descriptor.field)); \
} \
-static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+static DEVICE_ATTR_RO(field)
-usb_descriptor_attr_le16(idVendor, "%04x\n")
-usb_descriptor_attr_le16(idProduct, "%04x\n")
-usb_descriptor_attr_le16(bcdDevice, "%04x\n")
+usb_descriptor_attr_le16(idVendor, "%04x\n");
+usb_descriptor_attr_le16(idProduct, "%04x\n");
+usb_descriptor_attr_le16(bcdDevice, "%04x\n");
#define usb_descriptor_attr(field, format_string) \
static ssize_t \
-show_##field(struct device *dev, struct device_attribute *attr, \
+field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_device *udev; \
@@ -629,34 +608,31 @@ show_##field(struct device *dev, struct device_attribute *attr, \
udev = to_usb_device(dev); \
return sprintf(buf, format_string, udev->descriptor.field); \
} \
-static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
-
-usb_descriptor_attr(bDeviceClass, "%02x\n")
-usb_descriptor_attr(bDeviceSubClass, "%02x\n")
-usb_descriptor_attr(bDeviceProtocol, "%02x\n")
-usb_descriptor_attr(bNumConfigurations, "%d\n")
-usb_descriptor_attr(bMaxPacketSize0, "%d\n")
+static DEVICE_ATTR_RO(field)
+usb_descriptor_attr(bDeviceClass, "%02x\n");
+usb_descriptor_attr(bDeviceSubClass, "%02x\n");
+usb_descriptor_attr(bDeviceProtocol, "%02x\n");
+usb_descriptor_attr(bNumConfigurations, "%d\n");
+usb_descriptor_attr(bMaxPacketSize0, "%d\n");
/* show if the device is authorized (1) or not (0) */
-static ssize_t usb_dev_authorized_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t authorized_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_device *usb_dev = to_usb_device(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", usb_dev->authorized);
}
-
/*
* Authorize a device to be used in the system
*
* Writing a 0 deauthorizes the device, writing a 1 authorizes it.
*/
-static ssize_t usb_dev_authorized_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
+static ssize_t authorized_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t size)
{
ssize_t result;
struct usb_device *usb_dev = to_usb_device(dev);
@@ -670,14 +646,12 @@ static ssize_t usb_dev_authorized_store(struct device *dev,
result = usb_authorize_device(usb_dev);
return result < 0? result : size;
}
-
-static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, 0644,
- usb_dev_authorized_show, usb_dev_authorized_store);
+static DEVICE_ATTR_IGNORE_LOCKDEP(authorized, S_IRUGO | S_IWUSR,
+ authorized_show, authorized_store);
/* "Safely remove a device" */
-static ssize_t usb_remove_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
int rc = 0;
@@ -694,7 +668,7 @@ static ssize_t usb_remove_store(struct device *dev,
usb_unlock_device(udev);
return rc;
}
-static DEVICE_ATTR_IGNORE_LOCKDEP(remove, 0200, NULL, usb_remove_store);
+static DEVICE_ATTR_IGNORE_LOCKDEP(remove, S_IWUSR, NULL, remove_store);
static struct attribute *dev_attrs[] = {
@@ -853,7 +827,7 @@ void usb_remove_sysfs_dev_files(struct usb_device *udev)
/* Interface Accociation Descriptor fields */
#define usb_intf_assoc_attr(field, format_string) \
static ssize_t \
-show_iad_##field(struct device *dev, struct device_attribute *attr, \
+iad_##field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -861,18 +835,18 @@ show_iad_##field(struct device *dev, struct device_attribute *attr, \
return sprintf(buf, format_string, \
intf->intf_assoc->field); \
} \
-static DEVICE_ATTR(iad_##field, S_IRUGO, show_iad_##field, NULL);
+static DEVICE_ATTR_RO(iad_##field)
-usb_intf_assoc_attr(bFirstInterface, "%02x\n")
-usb_intf_assoc_attr(bInterfaceCount, "%02d\n")
-usb_intf_assoc_attr(bFunctionClass, "%02x\n")
-usb_intf_assoc_attr(bFunctionSubClass, "%02x\n")
-usb_intf_assoc_attr(bFunctionProtocol, "%02x\n")
+usb_intf_assoc_attr(bFirstInterface, "%02x\n");
+usb_intf_assoc_attr(bInterfaceCount, "%02d\n");
+usb_intf_assoc_attr(bFunctionClass, "%02x\n");
+usb_intf_assoc_attr(bFunctionSubClass, "%02x\n");
+usb_intf_assoc_attr(bFunctionProtocol, "%02x\n");
/* Interface fields */
#define usb_intf_attr(field, format_string) \
static ssize_t \
-show_##field(struct device *dev, struct device_attribute *attr, \
+field##_show(struct device *dev, struct device_attribute *attr, \
char *buf) \
{ \
struct usb_interface *intf = to_usb_interface(dev); \
@@ -880,17 +854,17 @@ show_##field(struct device *dev, struct device_attribute *attr, \
return sprintf(buf, format_string, \
intf->cur_altsetting->desc.field); \
} \
-static DEVICE_ATTR(field, S_IRUGO, show_##field, NULL);
+static DEVICE_ATTR_RO(field)
-usb_intf_attr(bInterfaceNumber, "%02x\n")
-usb_intf_attr(bAlternateSetting, "%2d\n")
-usb_intf_attr(bNumEndpoints, "%02x\n")
-usb_intf_attr(bInterfaceClass, "%02x\n")
-usb_intf_attr(bInterfaceSubClass, "%02x\n")
-usb_intf_attr(bInterfaceProtocol, "%02x\n")
+usb_intf_attr(bInterfaceNumber, "%02x\n");
+usb_intf_attr(bAlternateSetting, "%2d\n");
+usb_intf_attr(bNumEndpoints, "%02x\n");
+usb_intf_attr(bInterfaceClass, "%02x\n");
+usb_intf_attr(bInterfaceSubClass, "%02x\n");
+usb_intf_attr(bInterfaceProtocol, "%02x\n");
-static ssize_t show_interface_string(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t interface_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_interface *intf;
char *string;
@@ -903,10 +877,10 @@ static ssize_t show_interface_string(struct device *dev,
return 0;
return sprintf(buf, "%s\n", string);
}
-static DEVICE_ATTR(interface, S_IRUGO, show_interface_string, NULL);
+static DEVICE_ATTR_RO(interface);
-static ssize_t show_modalias(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_interface *intf;
struct usb_device *udev;
@@ -929,10 +903,11 @@ static ssize_t show_modalias(struct device *dev,
alt->desc.bInterfaceProtocol,
alt->desc.bInterfaceNumber);
}
-static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
+static DEVICE_ATTR_RO(modalias);
-static ssize_t show_supports_autosuspend(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t supports_autosuspend_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct usb_interface *intf;
struct usb_device *udev;
@@ -952,7 +927,7 @@ static ssize_t show_supports_autosuspend(struct device *dev,
return ret;
}
-static DEVICE_ATTR(supports_autosuspend, S_IRUGO, show_supports_autosuspend, NULL);
+static DEVICE_ATTR_RO(supports_autosuspend);
static struct attribute *intf_attrs[] = {
&dev_attr_bInterfaceNumber.attr,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 16927fa88fb..c12bc790a6a 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -7,6 +7,7 @@
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/hcd.h>
+#include <linux/scatterlist.h>
#define to_urb(d) container_of(d, struct urb, kref)
@@ -54,12 +55,12 @@ EXPORT_SYMBOL_GPL(usb_init_urb);
* Creates an urb for the USB driver to use, initializes a few internal
* structures, incrementes the usage counter, and returns a pointer to it.
*
- * If no memory is available, NULL is returned.
- *
* If the driver want to use this urb for interrupt, control, or bulk
* endpoints, pass '0' as the number of iso packets.
*
* The driver must call usb_free_urb() when it is finished with the urb.
+ *
+ * Return: A pointer to the new urb, or %NULL if no memory is available.
*/
struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags)
{
@@ -102,7 +103,7 @@ EXPORT_SYMBOL_GPL(usb_free_urb);
* host controller driver. This allows proper reference counting to happen
* for urbs.
*
- * A pointer to the urb with the incremented reference counter is returned.
+ * Return: A pointer to the urb with the incremented reference counter.
*/
struct urb *usb_get_urb(struct urb *urb)
{
@@ -199,13 +200,12 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
* the particular kind of transfer, although they will not initialize
* any transfer flags.
*
- * Successful submissions return 0; otherwise this routine returns a
- * negative error number. If the submission is successful, the complete()
- * callback from the URB will be called exactly once, when the USB core and
- * Host Controller Driver (HCD) are finished with the URB. When the completion
- * function is called, control of the URB is returned to the device
- * driver which issued the request. The completion handler may then
- * immediately free or reuse that URB.
+ * If the submission is successful, the complete() callback from the URB
+ * will be called exactly once, when the USB core and Host Controller Driver
+ * (HCD) are finished with the URB. When the completion function is called,
+ * control of the URB is returned to the device driver which issued the
+ * request. The completion handler may then immediately free or reuse that
+ * URB.
*
* With few exceptions, USB device drivers should never access URB fields
* provided by usbcore or the HCD until its complete() is called.
@@ -240,6 +240,9 @@ EXPORT_SYMBOL_GPL(usb_unanchor_urb);
* that are standardized in the USB 2.0 specification. For bulk
* endpoints, a synchronous usb_bulk_msg() call is available.
*
+ * Return:
+ * 0 on successful submissions. A negative error number otherwise.
+ *
* Request Queuing:
*
* URBs may be submitted to endpoints before previous ones complete, to
@@ -413,6 +416,14 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
urb->iso_frame_desc[n].status = -EXDEV;
urb->iso_frame_desc[n].actual_length = 0;
}
+ } else if (urb->num_sgs && !urb->dev->bus->no_sg_constraint &&
+ dev->speed != USB_SPEED_WIRELESS) {
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(urb->sg, sg, urb->num_sgs - 1, i)
+ if (sg->length % max)
+ return -EINVAL;
}
/* the I/O buffer must be mapped/unmapped, except when length=0 */
@@ -564,6 +575,9 @@ EXPORT_SYMBOL_GPL(usb_submit_urb);
* particular, when a driver calls this routine, it must insure that the
* completion handler cannot deallocate the URB.
*
+ * Return: -EINPROGRESS on success. See description for other values on
+ * failure.
+ *
* Unlinking and Endpoint Queues:
*
* [The behaviors and guarantees described below do not apply to virtual
@@ -838,6 +852,8 @@ EXPORT_SYMBOL_GPL(usb_unlink_anchored_urbs);
*
* Call this is you want to be sure all an anchor's
* URBs have finished
+ *
+ * Return: Non-zero if the anchor became unused. Zero on timeout.
*/
int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
unsigned int timeout)
@@ -851,8 +867,11 @@ EXPORT_SYMBOL_GPL(usb_wait_anchor_empty_timeout);
* usb_get_from_anchor - get an anchor's oldest urb
* @anchor: the anchor whose urb you want
*
- * this will take the oldest urb from an anchor,
+ * This will take the oldest urb from an anchor,
* unanchor and return it
+ *
+ * Return: The oldest urb from @anchor, or %NULL if @anchor has no
+ * urbs associated with it.
*/
struct urb *usb_get_from_anchor(struct usb_anchor *anchor)
{
@@ -901,7 +920,7 @@ EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs);
* usb_anchor_empty - is an anchor empty
* @anchor: the anchor you want to query
*
- * returns 1 if the anchor has no urbs associated with it
+ * Return: 1 if the anchor has no urbs associated with it.
*/
int usb_anchor_empty(struct usb_anchor *anchor)
{
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 7dad603dde4..0a6ee2e70b2 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -68,6 +68,8 @@ MODULE_PARM_DESC(autosuspend, "default autosuspend delay");
* @alt_num: alternate interface setting number to search for.
*
* Search the configuration's interface cache for the given alt setting.
+ *
+ * Return: The alternate setting, if found. %NULL otherwise.
*/
struct usb_host_interface *usb_find_alt_setting(
struct usb_host_config *config,
@@ -103,8 +105,7 @@ EXPORT_SYMBOL_GPL(usb_find_alt_setting);
* @ifnum: the desired interface
*
* This walks the device descriptor for the currently active configuration
- * and returns a pointer to the interface with that particular interface
- * number, or null.
+ * to find the interface object with the particular interface number.
*
* Note that configuration descriptors are not required to assign interface
* numbers sequentially, so that it would be incorrect to assume that
@@ -115,6 +116,9 @@ EXPORT_SYMBOL_GPL(usb_find_alt_setting);
*
* Don't call this function unless you are bound to one of the interfaces
* on this device or you have locked the device!
+ *
+ * Return: A pointer to the interface that has @ifnum as interface number,
+ * if found. %NULL otherwise.
*/
struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
unsigned ifnum)
@@ -139,8 +143,7 @@ EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
* @altnum: the desired alternate setting number
*
* This searches the altsetting array of the specified interface for
- * an entry with the correct bAlternateSetting value and returns a pointer
- * to that entry, or null.
+ * an entry with the correct bAlternateSetting value.
*
* Note that altsettings need not be stored sequentially by number, so
* it would be incorrect to assume that the first altsetting entry in
@@ -149,6 +152,9 @@ EXPORT_SYMBOL_GPL(usb_ifnum_to_if);
*
* Don't call this function unless you are bound to the intf interface
* or you have locked the device!
+ *
+ * Return: A pointer to the entry of the altsetting array of @intf that
+ * has @altnum as the alternate setting number. %NULL if not found.
*/
struct usb_host_interface *usb_altnum_to_altsetting(
const struct usb_interface *intf,
@@ -191,6 +197,8 @@ static int __find_interface(struct device *dev, void *data)
* This walks the bus device list and returns a pointer to the interface
* with the matching minor and driver. Note, this only works for devices
* that share the USB major number.
+ *
+ * Return: A pointer to the interface with the matching major and @minor.
*/
struct usb_interface *usb_find_interface(struct usb_driver *drv, int minor)
{
@@ -390,6 +398,9 @@ static unsigned usb_bus_is_wusb(struct usb_bus *bus)
* controllers) should ever call this.
*
* This call may not be used in a non-sleeping context.
+ *
+ * Return: On success, a pointer to the allocated usb device. %NULL on
+ * failure.
*/
struct usb_device *usb_alloc_dev(struct usb_device *parent,
struct usb_bus *bus, unsigned port1)
@@ -501,7 +512,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_dev(), in their disconnect() methods.
*
- * A pointer to the device with the incremented reference counter is returned.
+ * Return: A pointer to the device with the incremented reference counter.
*/
struct usb_device *usb_get_dev(struct usb_device *dev)
{
@@ -535,8 +546,7 @@ EXPORT_SYMBOL_GPL(usb_put_dev);
* their probe() methods, when they bind to an interface, and release
* them by calling usb_put_intf(), in their disconnect() methods.
*
- * A pointer to the interface with the incremented reference counter is
- * returned.
+ * Return: A pointer to the interface with the incremented reference counter.
*/
struct usb_interface *usb_get_intf(struct usb_interface *intf)
{
@@ -589,7 +599,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf);
* disconnect; in some drivers (such as usb-storage) the disconnect()
* or suspend() method will block waiting for a device reset to complete.
*
- * Returns a negative error code for failure, otherwise 0.
+ * Return: A negative error code for failure, otherwise 0.
*/
int usb_lock_device_for_reset(struct usb_device *udev,
const struct usb_interface *iface)
@@ -628,14 +638,15 @@ EXPORT_SYMBOL_GPL(usb_lock_device_for_reset);
* usb_get_current_frame_number - return current bus frame number
* @dev: the device whose bus is being queried
*
- * Returns the current frame number for the USB host controller
- * used with the given USB device. This can be used when scheduling
+ * Return: The current frame number for the USB host controller used
+ * with the given USB device. This can be used when scheduling
* isochronous requests.
*
- * Note that different kinds of host controller have different
- * "scheduling horizons". While one type might support scheduling only
- * 32 frames into the future, others could support scheduling up to
- * 1024 frames into the future.
+ * Note: Different kinds of host controller have different "scheduling
+ * horizons". While one type might support scheduling only 32 frames
+ * into the future, others could support scheduling up to 1024 frames
+ * into the future.
+ *
*/
int usb_get_current_frame_number(struct usb_device *dev)
{
@@ -685,11 +696,12 @@ EXPORT_SYMBOL_GPL(__usb_get_extra_descriptor);
* @mem_flags: affect whether allocation may block
* @dma: used to return DMA address of buffer
*
- * Return value is either null (indicating no buffer could be allocated), or
- * the cpu-space pointer to a buffer that may be used to perform DMA to the
+ * Return: Either null (indicating no buffer could be allocated), or the
+ * cpu-space pointer to a buffer that may be used to perform DMA to the
* specified device. Such cpu-space buffers are returned along with the DMA
* address (through the pointer provided).
*
+ * Note:
* These buffers are used with URB_NO_xxx_DMA_MAP set in urb->transfer_flags
* to avoid behaviors like using "DMA bounce buffers", or thrashing IOMMU
* hardware during URB completion/resubmit. The implementation varies between
@@ -735,17 +747,18 @@ EXPORT_SYMBOL_GPL(usb_free_coherent);
* usb_buffer_map - create DMA mapping(s) for an urb
* @urb: urb whose transfer_buffer/setup_packet will be mapped
*
- * Return value is either null (indicating no buffer could be mapped), or
- * the parameter. URB_NO_TRANSFER_DMA_MAP is
- * added to urb->transfer_flags if the operation succeeds. If the device
- * is connected to this system through a non-DMA controller, this operation
- * always succeeds.
+ * URB_NO_TRANSFER_DMA_MAP is added to urb->transfer_flags if the operation
+ * succeeds. If the device is connected to this system through a non-DMA
+ * controller, this operation always succeeds.
*
* This call would normally be used for an urb which is reused, perhaps
* as the target of a large periodic transfer, with usb_buffer_dmasync()
* calls to synchronize memory and dma state.
*
* Reverse the effect of this call with usb_buffer_unmap().
+ *
+ * Return: Either %NULL (indicating no buffer could be mapped), or @urb.
+ *
*/
#if 0
struct urb *usb_buffer_map(struct urb *urb)
@@ -850,9 +863,10 @@ EXPORT_SYMBOL_GPL(usb_buffer_unmap);
* @sg: the scatterlist to map
* @nents: the number of entries in the scatterlist
*
- * Return value is either < 0 (indicating no buffers could be mapped), or
- * the number of DMA mapping array entries in the scatterlist.
+ * Return: Either < 0 (indicating no buffers could be mapped), or the
+ * number of DMA mapping array entries in the scatterlist.
*
+ * Note:
* The caller is responsible for placing the resulting DMA addresses from
* the scatterlist into URB transfer buffer pointers, and for setting the
* URB_NO_TRANSFER_DMA_MAP transfer flag in each of those URBs.
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 757aa18027d..f969ea266ac 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,6 +1,7 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET) && GENERIC_HARDIRQS
+ depends on (USB || USB_GADGET) && GENERIC_HARDIRQS && HAS_DMA
+ depends on EXTCON
select USB_XHCI_PLATFORM if USB_SUPPORT && USB_XHCI_HCD
help
Say Y or M here if your system has a Dual Role SuperSpeed
@@ -40,6 +41,38 @@ config USB_DWC3_DUAL_ROLE
endchoice
+comment "Platform Glue Driver Support"
+
+config USB_DWC3_OMAP
+ tristate "Texas Instruments OMAP5 and similar Platforms"
+ depends on EXTCON
+ default USB_DWC3
+ help
+ Some platforms from Texas Instruments like OMAP5, DRA7xxx and
+ AM437x use this IP for USB2/3 functionality.
+
+ Say 'Y' or 'M' here if you have one such device
+
+config USB_DWC3_EXYNOS
+ tristate "Samsung Exynos Platform"
+ default USB_DWC3
+ help
+ Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
+ say 'Y' or 'M' if you have one such device.
+
+config USB_DWC3_PCI
+ tristate "PCIe-based Platforms"
+ depends on PCI
+ default USB_DWC3
+ help
+ If you're using the DesignWare Core IP with a PCIe, please say
+ 'Y' or 'M' here.
+
+ One such PCIe-based platform is Synopsys' PCIe HAPS model of
+ this IP.
+
+comment "Debugging features"
+
config USB_DWC3_DEBUG
bool "Enable Debugging Messages"
help
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 0c7ac92582b..dd1760145c4 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -27,15 +27,8 @@ endif
# the entire driver (with all its glue layers) on several architectures
# and make sure it compiles fine. This will also help with allmodconfig
# and allyesconfig builds.
-#
-# The only exception is the PCI glue layer, but that's only because
-# PCI doesn't provide nops if CONFIG_PCI isn't enabled.
##
-obj-$(CONFIG_USB_DWC3) += dwc3-omap.o
-obj-$(CONFIG_USB_DWC3) += dwc3-exynos.o
-
-ifneq ($(CONFIG_PCI),)
- obj-$(CONFIG_USB_DWC3) += dwc3-pci.o
-endif
-
+obj-$(CONFIG_USB_DWC3_OMAP) += dwc3-omap.o
+obj-$(CONFIG_USB_DWC3_EXYNOS) += dwc3-exynos.o
+obj-$(CONFIG_USB_DWC3_PCI) += dwc3-pci.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index c35d49d39b7..474162e9d01 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -6,34 +6,17 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
@@ -50,20 +33,18 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
-#include <linux/usb/otg.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/usb/otg.h>
+#include "platform_data.h"
#include "core.h"
#include "gadget.h"
#include "io.h"
#include "debug.h"
-static char *maximum_speed = "super";
-module_param(maximum_speed, charp, 0);
-MODULE_PARM_DESC(maximum_speed, "Maximum supported speed.");
-
/* -------------------------------------------------------------------------- */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
@@ -236,7 +217,7 @@ static int dwc3_event_buffers_setup(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n),
upper_32_bits(evt->dma));
dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n),
- evt->length & 0xffff);
+ DWC3_GEVNTSIZ_SIZE(evt->length));
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
}
@@ -255,7 +236,8 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
- dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), 0);
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(n), DWC3_GEVNTSIZ_INTMASK
+ | DWC3_GEVNTSIZ_SIZE(0));
dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(n), 0);
}
}
@@ -367,18 +349,17 @@ static void dwc3_core_exit(struct dwc3 *dwc)
static int dwc3_probe(struct platform_device *pdev)
{
- struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct dwc3_platform_data *pdata = dev_get_platdata(dev);
+ struct device_node *node = dev->of_node;
struct resource *res;
struct dwc3 *dwc;
- struct device *dev = &pdev->dev;
int ret = -ENOMEM;
void __iomem *regs;
void *mem;
- u8 mode;
-
mem = devm_kzalloc(dev, sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
dev_err(dev, "not enough memory\n");
@@ -402,38 +383,32 @@ static int dwc3_probe(struct platform_device *pdev)
dev_err(dev, "missing memory resource\n");
return -ENODEV;
}
- dwc->xhci_resources[0].start = res->start;
- dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
- DWC3_XHCI_REGS_END;
- dwc->xhci_resources[0].flags = res->flags;
- dwc->xhci_resources[0].name = res->name;
-
- /*
- * Request memory region but exclude xHCI regs,
- * since it will be requested by the xhci-plat driver.
- */
- res = devm_request_mem_region(dev, res->start + DWC3_GLOBALS_REGS_START,
- resource_size(res) - DWC3_GLOBALS_REGS_START,
- dev_name(dev));
- if (!res) {
- dev_err(dev, "can't request mem region\n");
- return -ENOMEM;
- }
-
- regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!regs) {
- dev_err(dev, "ioremap failed\n");
- return -ENOMEM;
- }
if (node) {
+ dwc->maximum_speed = of_usb_get_maximum_speed(node);
+
dwc->usb2_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0);
dwc->usb3_phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 1);
+
+ dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
+ dwc->dr_mode = of_usb_get_dr_mode(node);
+ } else if (pdata) {
+ dwc->maximum_speed = pdata->maximum_speed;
+
+ dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
+ dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
+
+ dwc->needs_fifo_resize = pdata->tx_fifo_resize;
+ dwc->dr_mode = pdata->dr_mode;
} else {
dwc->usb2_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
dwc->usb3_phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB3);
}
+ /* default to superspeed if no maximum_speed passed */
+ if (dwc->maximum_speed == USB_SPEED_UNKNOWN)
+ dwc->maximum_speed = USB_SPEED_SUPER;
+
if (IS_ERR(dwc->usb2_phy)) {
ret = PTR_ERR(dwc->usb2_phy);
@@ -450,7 +425,7 @@ static int dwc3_probe(struct platform_device *pdev)
}
if (IS_ERR(dwc->usb3_phy)) {
- ret = PTR_ERR(dwc->usb2_phy);
+ ret = PTR_ERR(dwc->usb3_phy);
/*
* if -ENXIO is returned, it means PHY layer wasn't
@@ -464,6 +439,22 @@ static int dwc3_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
}
+ dwc->xhci_resources[0].start = res->start;
+ dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
+ DWC3_XHCI_REGS_END;
+ dwc->xhci_resources[0].flags = res->flags;
+ dwc->xhci_resources[0].name = res->name;
+
+ res->start += DWC3_GLOBALS_REGS_START;
+
+ /*
+ * Request memory region but exclude xHCI regs,
+ * since it will be requested by the xhci-plat driver.
+ */
+ regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
usb_phy_set_suspend(dwc->usb2_phy, 0);
usb_phy_set_suspend(dwc->usb3_phy, 0);
@@ -478,19 +469,6 @@ static int dwc3_probe(struct platform_device *pdev)
dev->dma_parms = dev->parent->dma_parms;
dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
- if (!strncmp("super", maximum_speed, 5))
- dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
- else if (!strncmp("high", maximum_speed, 4))
- dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
- else if (!strncmp("full", maximum_speed, 4))
- dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
- else if (!strncmp("low", maximum_speed, 3))
- dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
- else
- dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
-
- dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize");
-
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
pm_runtime_forbid(dev);
@@ -517,14 +495,15 @@ static int dwc3_probe(struct platform_device *pdev)
}
if (IS_ENABLED(CONFIG_USB_DWC3_HOST))
- mode = DWC3_MODE_HOST;
+ dwc->dr_mode = USB_DR_MODE_HOST;
else if (IS_ENABLED(CONFIG_USB_DWC3_GADGET))
- mode = DWC3_MODE_DEVICE;
- else
- mode = DWC3_MODE_DRD;
+ dwc->dr_mode = USB_DR_MODE_PERIPHERAL;
+
+ if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
+ dwc->dr_mode = USB_DR_MODE_OTG;
- switch (mode) {
- case DWC3_MODE_DEVICE:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
@@ -532,7 +511,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
break;
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
ret = dwc3_host_init(dwc);
if (ret) {
@@ -540,7 +519,7 @@ static int dwc3_probe(struct platform_device *pdev)
goto err2;
}
break;
- case DWC3_MODE_DRD:
+ case USB_DR_MODE_OTG:
dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
ret = dwc3_host_init(dwc);
if (ret) {
@@ -555,10 +534,9 @@ static int dwc3_probe(struct platform_device *pdev)
}
break;
default:
- dev_err(dev, "Unsupported mode of operation %d\n", mode);
+ dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
goto err2;
}
- dwc->mode = mode;
ret = dwc3_debugfs_init(dwc);
if (ret) {
@@ -571,14 +549,14 @@ static int dwc3_probe(struct platform_device *pdev)
return 0;
err3:
- switch (mode) {
- case DWC3_MODE_DEVICE:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
dwc3_gadget_exit(dwc);
break;
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
dwc3_host_exit(dwc);
break;
- case DWC3_MODE_DRD:
+ case USB_DR_MODE_OTG:
dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
break;
@@ -611,14 +589,14 @@ static int dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
- switch (dwc->mode) {
- case DWC3_MODE_DEVICE:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
dwc3_gadget_exit(dwc);
break;
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
dwc3_host_exit(dwc);
break;
- case DWC3_MODE_DRD:
+ case USB_DR_MODE_OTG:
dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
break;
@@ -642,12 +620,12 @@ static int dwc3_prepare(struct device *dev)
spin_lock_irqsave(&dwc->lock, flags);
- switch (dwc->mode) {
- case DWC3_MODE_DEVICE:
- case DWC3_MODE_DRD:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
dwc3_gadget_prepare(dwc);
/* FALLTHROUGH */
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
default:
dwc3_event_buffers_cleanup(dwc);
break;
@@ -665,12 +643,12 @@ static void dwc3_complete(struct device *dev)
spin_lock_irqsave(&dwc->lock, flags);
- switch (dwc->mode) {
- case DWC3_MODE_DEVICE:
- case DWC3_MODE_DRD:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
dwc3_gadget_complete(dwc);
/* FALLTHROUGH */
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
default:
dwc3_event_buffers_setup(dwc);
break;
@@ -686,12 +664,12 @@ static int dwc3_suspend(struct device *dev)
spin_lock_irqsave(&dwc->lock, flags);
- switch (dwc->mode) {
- case DWC3_MODE_DEVICE:
- case DWC3_MODE_DRD:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
dwc3_gadget_suspend(dwc);
/* FALLTHROUGH */
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
default:
/* do nothing */
break;
@@ -719,12 +697,12 @@ static int dwc3_resume(struct device *dev)
dwc3_writel(dwc->regs, DWC3_GCTL, dwc->gctl);
- switch (dwc->mode) {
- case DWC3_MODE_DEVICE:
- case DWC3_MODE_DRD:
+ switch (dwc->dr_mode) {
+ case USB_DR_MODE_PERIPHERAL:
+ case USB_DR_MODE_OTG:
dwc3_gadget_resume(dwc);
/* FALLTHROUGH */
- case DWC3_MODE_HOST:
+ case USB_DR_MODE_HOST:
default:
/* do nothing */
break;
@@ -754,6 +732,9 @@ static const struct dev_pm_ops dwc3_dev_pm_ops = {
#ifdef CONFIG_OF
static const struct of_device_id of_dwc3_match[] = {
{
+ .compatible = "snps,dwc3"
+ },
+ {
.compatible = "synopsys,dwc3"
},
{ },
@@ -775,5 +756,5 @@ module_platform_driver(dwc3_driver);
MODULE_ALIAS("platform:dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index b69d322e3ca..f8af8d44af8 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_CORE_H
@@ -49,6 +29,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/otg.h>
/* Global constants */
#define DWC3_EP0_BOUNCE_SIZE 512
@@ -194,6 +175,10 @@
#define DWC3_GTXFIFOSIZ_TXFDEF(n) ((n) & 0xffff)
#define DWC3_GTXFIFOSIZ_TXFSTADDR(n) ((n) & 0xffff0000)
+/* Global Event Size Registers */
+#define DWC3_GEVNTSIZ_INTMASK (1 << 31)
+#define DWC3_GEVNTSIZ_SIZE(n) ((n) & 0xffff)
+
/* Global HWPARAMS1 Register */
#define DWC3_GHWPARAMS1_EN_PWROPT(n) (((n) & (3 << 24)) >> 24)
#define DWC3_GHWPARAMS1_EN_PWROPT_NO 0
@@ -207,7 +192,6 @@
#define DWC3_MAX_HIBER_SCRATCHBUFS 15
/* Device Configuration Register */
-#define DWC3_DCFG_LPM_CAP (1 << 22)
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -367,7 +351,6 @@ struct dwc3_trb;
/**
* struct dwc3_event_buffer - Software event buffer representation
- * @list: a list of event buffers
* @buf: _THE_ buffer
* @length: size of this buffer
* @lpos: event offset
@@ -415,7 +398,7 @@ struct dwc3_event_buffer {
* @number: endpoint number (1 - 15)
* @type: set to bmAttributes & USB_ENDPOINT_XFERTYPE_MASK
* @resource_index: Resource transfer index
- * @interval: the intervall on which the ISOC transfer is started
+ * @interval: the interval on which the ISOC transfer is started
* @name: a human readable name e.g. ep1out-bulk
* @direction: true for TX, false for RX
* @stream_capable: true when streams are enabled
@@ -566,11 +549,6 @@ struct dwc3_hwparams {
/* HWPARAMS0 */
#define DWC3_MODE(n) ((n) & 0x7)
-#define DWC3_MODE_DEVICE 0
-#define DWC3_MODE_HOST 1
-#define DWC3_MODE_DRD 2
-#define DWC3_MODE_HUB 3
-
#define DWC3_MDWIDTH(n) (((n) & 0xff00) >> 8)
/* HWPARAMS1 */
@@ -632,7 +610,7 @@ struct dwc3_scratchpad_array {
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
- * @mode: mode of operation
+ * @dr_mode: requested mode of operation
* @usb2_phy: pointer to USB2 PHY
* @usb3_phy: pointer to USB3 PHY
* @dcfg: saved contents of DCFG register
@@ -690,6 +668,8 @@ struct dwc3 {
void __iomem *regs;
size_t regs_size;
+ enum usb_dr_mode dr_mode;
+
/* used for suspend/resume */
u32 dcfg;
u32 gctl;
@@ -698,7 +678,6 @@ struct dwc3 {
u32 u1u2;
u32 maximum_speed;
u32 revision;
- u32 mode;
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
@@ -759,8 +738,8 @@ struct dwc3 {
struct dwc3_event_type {
u32 is_devspec:1;
- u32 type:6;
- u32 reserved8_31:25;
+ u32 type:7;
+ u32 reserved8_31:24;
} __packed;
#define DWC3_DEPEVT_XFERCOMPLETE 0x01
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 5894ee8222a..fceb39dc4bb 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include "core.h"
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 9e9f122162f..9ac37fe1b6a 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 8ce9d7fd6cf..2f2e88a3a11 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -6,10 +6,14 @@
*
* Author: Anton Tikhomirov <av.tikhomirov@samsung.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -20,7 +24,7 @@
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/usb/otg.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/of.h>
#include <linux/of_platform.h>
@@ -34,13 +38,13 @@ struct dwc3_exynos {
static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
{
- struct nop_usb_xceiv_platform_data pdata;
+ struct usb_phy_gen_xceiv_platform_data pdata;
struct platform_device *pdev;
int ret;
memset(&pdata, 0x00, sizeof(pdata));
- pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
+ pdev = platform_device_alloc("usb_phy_gen_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev)
return -ENOMEM;
@@ -51,7 +55,7 @@ static int dwc3_exynos_register_phys(struct dwc3_exynos *exynos)
if (ret)
goto err1;
- pdev = platform_device_alloc("nop_usb_xceiv", PLATFORM_DEVID_AUTO);
+ pdev = platform_device_alloc("usb_phy_gen_xceiv", PLATFORM_DEVID_AUTO);
if (!pdev) {
ret = -ENOMEM;
goto err1;
@@ -228,5 +232,5 @@ module_platform_driver(dwc3_exynos_driver);
MODULE_ALIAS("platform:exynos-dwc3");
MODULE_AUTHOR("Anton Tikhomirov <av.tikhomirov@samsung.com>");
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 EXYNOS Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 077f110bd74..7f7ea62e961 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/module.h>
@@ -43,13 +23,15 @@
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/platform_data/dwc3-omap.h>
-#include <linux/usb/dwc3-omap.h>
#include <linux/pm_runtime.h>
#include <linux/dma-mapping.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_platform.h>
+#include <linux/extcon.h>
+#include <linux/extcon/of_extcon.h>
+#include <linux/regulator/consumer.h>
#include <linux/usb/otg.h>
@@ -155,9 +137,21 @@ struct dwc3_omap {
u32 revision;
u32 dma_status:1;
+
+ struct extcon_specific_cable_nb extcon_vbus_dev;
+ struct extcon_specific_cable_nb extcon_id_dev;
+ struct notifier_block vbus_nb;
+ struct notifier_block id_nb;
+
+ struct regulator *vbus_reg;
};
-static struct dwc3_omap *_omap;
+enum omap_dwc3_vbus_id_status {
+ OMAP_DWC3_ID_FLOAT,
+ OMAP_DWC3_ID_GROUND,
+ OMAP_DWC3_VBUS_OFF,
+ OMAP_DWC3_VBUS_VALID,
+};
static inline u32 dwc3_omap_readl(void __iomem *base, u32 offset)
{
@@ -221,18 +215,24 @@ static void dwc3_omap_write_irq0_set(struct dwc3_omap *omap, u32 value)
omap->irq0_offset, value);
}
-int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
+static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
+ enum omap_dwc3_vbus_id_status status)
{
- u32 val;
- struct dwc3_omap *omap = _omap;
-
- if (!omap)
- return -EPROBE_DEFER;
+ int ret;
+ u32 val;
switch (status) {
case OMAP_DWC3_ID_GROUND:
dev_dbg(omap->dev, "ID GND\n");
+ if (omap->vbus_reg) {
+ ret = regulator_enable(omap->vbus_reg);
+ if (ret) {
+ dev_dbg(omap->dev, "regulator enable failed\n");
+ return;
+ }
+ }
+
val = dwc3_omap_read_utmi_status(omap);
val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
| USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
@@ -255,6 +255,9 @@ int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
break;
case OMAP_DWC3_ID_FLOAT:
+ if (omap->vbus_reg)
+ regulator_disable(omap->vbus_reg);
+
case OMAP_DWC3_VBUS_OFF:
dev_dbg(omap->dev, "VBUS Disconnect\n");
@@ -268,12 +271,9 @@ int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
break;
default:
- dev_dbg(omap->dev, "ID float\n");
+ dev_dbg(omap->dev, "invalid state\n");
}
-
- return 0;
}
-EXPORT_SYMBOL_GPL(dwc3_omap_mailbox);
static irqreturn_t dwc3_omap_interrupt(int irq, void *_omap)
{
@@ -366,6 +366,32 @@ static void dwc3_omap_disable_irqs(struct dwc3_omap *omap)
static u64 dwc3_omap_dma_mask = DMA_BIT_MASK(32);
+static int dwc3_omap_id_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, id_nb);
+
+ if (event)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+
+ return NOTIFY_DONE;
+}
+
+static int dwc3_omap_vbus_notifier(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct dwc3_omap *omap = container_of(nb, struct dwc3_omap, vbus_nb);
+
+ if (event)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+ return NOTIFY_DONE;
+}
+
static int dwc3_omap_probe(struct platform_device *pdev)
{
struct device_node *node = pdev->dev.of_node;
@@ -373,6 +399,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct resource *res;
struct device *dev = &pdev->dev;
+ struct extcon_dev *edev;
+ struct regulator *vbus_reg = NULL;
int ret = -ENOMEM;
int irq;
@@ -409,10 +437,16 @@ static int dwc3_omap_probe(struct platform_device *pdev)
return -EINVAL;
}
- base = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!base) {
- dev_err(dev, "ioremap failed\n");
- return -ENOMEM;
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ if (of_property_read_bool(node, "vbus-supply")) {
+ vbus_reg = devm_regulator_get(dev, "vbus");
+ if (IS_ERR(vbus_reg)) {
+ dev_err(dev, "vbus init failed\n");
+ return PTR_ERR(vbus_reg);
+ }
}
spin_lock_init(&omap->lock);
@@ -420,14 +454,9 @@ static int dwc3_omap_probe(struct platform_device *pdev)
omap->dev = dev;
omap->irq = irq;
omap->base = base;
+ omap->vbus_reg = vbus_reg;
dev->dma_mask = &dwc3_omap_dma_mask;
- /*
- * REVISIT if we ever have two instances of the wrapper, we will be
- * in big trouble
- */
- _omap = omap;
-
pm_runtime_enable(dev);
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
@@ -502,14 +531,46 @@ static int dwc3_omap_probe(struct platform_device *pdev)
dwc3_omap_enable_irqs(omap);
+ if (of_property_read_bool(node, "extcon")) {
+ edev = of_extcon_get_extcon_dev(dev, 0);
+ if (IS_ERR(edev)) {
+ dev_vdbg(dev, "couldn't get extcon device\n");
+ ret = PTR_ERR(edev);
+ goto err2;
+ }
+
+ omap->vbus_nb.notifier_call = dwc3_omap_vbus_notifier;
+ ret = extcon_register_interest(&omap->extcon_vbus_dev,
+ edev->name, "USB", &omap->vbus_nb);
+ if (ret < 0)
+ dev_vdbg(dev, "failed to register notifier for USB\n");
+ omap->id_nb.notifier_call = dwc3_omap_id_notifier;
+ ret = extcon_register_interest(&omap->extcon_id_dev, edev->name,
+ "USB-HOST", &omap->id_nb);
+ if (ret < 0)
+ dev_vdbg(dev,
+ "failed to register notifier for USB-HOST\n");
+
+ if (extcon_get_cable_state(edev, "USB") == true)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ if (extcon_get_cable_state(edev, "USB-HOST") == true)
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ }
+
ret = of_platform_populate(node, NULL, NULL, dev);
if (ret) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
- goto err2;
+ goto err3;
}
return 0;
+err3:
+ if (omap->extcon_vbus_dev.edev)
+ extcon_unregister_interest(&omap->extcon_vbus_dev);
+ if (omap->extcon_id_dev.edev)
+ extcon_unregister_interest(&omap->extcon_id_dev);
+
err2:
dwc3_omap_disable_irqs(omap);
@@ -526,6 +587,10 @@ static int dwc3_omap_remove(struct platform_device *pdev)
{
struct dwc3_omap *omap = platform_get_drvdata(pdev);
+ if (omap->extcon_vbus_dev.edev)
+ extcon_unregister_interest(&omap->extcon_vbus_dev);
+ if (omap->extcon_id_dev.edev)
+ extcon_unregister_interest(&omap->extcon_id_dev);
dwc3_omap_disable_irqs(omap);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@@ -610,5 +675,5 @@ module_platform_driver(dwc3_omap_driver);
MODULE_ALIAS("platform:omap-dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index ed07ec04a96..9b138129e85 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -43,7 +23,7 @@
#include <linux/platform_device.h>
#include <linux/usb/otg.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
@@ -58,13 +38,13 @@ struct dwc3_pci {
static int dwc3_pci_register_phys(struct dwc3_pci *glue)
{
- struct nop_usb_xceiv_platform_data pdata;
+ struct usb_phy_gen_xceiv_platform_data pdata;
struct platform_device *pdev;
int ret;
memset(&pdata, 0x00, sizeof(pdata));
- pdev = platform_device_alloc("nop_usb_xceiv", 0);
+ pdev = platform_device_alloc("usb_phy_gen_xceiv", 0);
if (!pdev)
return -ENOMEM;
@@ -75,7 +55,7 @@ static int dwc3_pci_register_phys(struct dwc3_pci *glue)
if (ret)
goto err1;
- pdev = platform_device_alloc("nop_usb_xceiv", 1);
+ pdev = platform_device_alloc("usb_phy_gen_xceiv", 1);
if (!pdev) {
ret = -ENOMEM;
goto err1;
@@ -211,7 +191,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dwc3_pci_suspend(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
@@ -236,28 +216,24 @@ static int dwc3_pci_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static const struct dev_pm_ops dwc3_pci_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_pci_suspend, dwc3_pci_resume)
};
-#define DEV_PM_OPS (&dwc3_pci_dev_pm_ops)
-#else
-#define DEV_PM_OPS NULL
-#endif /* CONFIG_PM */
-
static struct pci_driver dwc3_pci_driver = {
.name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = dwc3_pci_remove,
.driver = {
- .pm = DEV_PM_OPS,
+ .pm = &dwc3_pci_dev_pm_ops,
},
};
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
-MODULE_LICENSE("Dual BSD/GPL");
+MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 PCI Glue Layer");
module_pci_driver(dwc3_pci_driver);
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 5acbb948b70..7fa93f4bc50 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -168,6 +148,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
direction = !dwc->ep0_expect_in;
dwc->delayed_status = false;
+ usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED);
if (dwc->ep0state == EP0_STATUS_PHASE)
__dwc3_ep0_do_control_status(dwc, dwc->eps[direction]);
@@ -553,8 +534,16 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
- usb_gadget_set_state(&dwc->gadget,
- USB_STATE_CONFIGURED);
+
+ /*
+ * only change state if set_config has already
+ * been processed. If gadget driver returns
+ * USB_GADGET_DELAYED_STATUS, we will wait
+ * to change the state on the next usb_ep_queue()
+ */
+ if (ret == 0)
+ usb_gadget_set_state(&dwc->gadget,
+ USB_STATE_CONFIGURED);
/*
* Enable transition to U1/U2 state when
@@ -571,7 +560,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
case USB_STATE_CONFIGURED:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
- if (!cfg)
+ if (!cfg && !ret)
usb_gadget_set_state(&dwc->gadget,
USB_STATE_ADDRESS);
break;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index b5e5b35df49..f168eaebdef 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/kernel.h>
@@ -520,6 +500,8 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
u32 reg;
int ret = -ENOMEM;
+ dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
+
if (!(dep->flags & DWC3_EP_ENABLED)) {
ret = dwc3_gadget_start_config(dwc, dep);
if (ret)
@@ -676,8 +658,6 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
dev_err(dwc->dev, "invalid endpoint transfer type\n");
}
- dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
-
spin_lock_irqsave(&dwc->lock, flags);
ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false);
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1508,6 +1488,15 @@ static int dwc3_gadget_start(struct usb_gadget *g,
int irq;
u32 reg;
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
+ IRQF_SHARED, "dwc3", dwc);
+ if (ret) {
+ dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+ irq, ret);
+ goto err0;
+ }
+
spin_lock_irqsave(&dwc->lock, flags);
if (dwc->gadget_driver) {
@@ -1515,7 +1504,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget.name,
dwc->gadget_driver->driver.name);
ret = -EBUSY;
- goto err0;
+ goto err1;
}
dwc->gadget_driver = driver;
@@ -1536,10 +1525,25 @@ static int dwc3_gadget_start(struct usb_gadget *g,
* STAR#9000525659: Clock Domain Crossing on DCTL in
* USB 2.0 Mode
*/
- if (dwc->revision < DWC3_REVISION_220A)
+ if (dwc->revision < DWC3_REVISION_220A) {
reg |= DWC3_DCFG_SUPERSPEED;
- else
- reg |= dwc->maximum_speed;
+ } else {
+ switch (dwc->maximum_speed) {
+ case USB_SPEED_LOW:
+ reg |= DWC3_DSTS_LOWSPEED;
+ break;
+ case USB_SPEED_FULL:
+ reg |= DWC3_DSTS_FULLSPEED1;
+ break;
+ case USB_SPEED_HIGH:
+ reg |= DWC3_DSTS_HIGHSPEED;
+ break;
+ case USB_SPEED_SUPER: /* FALLTHROUGH */
+ case USB_SPEED_UNKNOWN: /* FALTHROUGH */
+ default:
+ reg |= DWC3_DSTS_SUPERSPEED;
+ }
+ }
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
dwc->start_config_issued = false;
@@ -1551,41 +1555,38 @@ static int dwc3_gadget_start(struct usb_gadget *g,
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err0;
+ goto err2;
}
dep = dwc->eps[1];
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
- goto err1;
+ goto err3;
}
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
- IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
- if (ret) {
- dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
- irq, ret);
- goto err1;
- }
-
dwc3_gadget_enable_irq(dwc);
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
-err1:
+err3:
__dwc3_gadget_ep_disable(dwc->eps[0]);
-err0:
+err2:
+ dwc->gadget_driver = NULL;
+
+err1:
spin_unlock_irqrestore(&dwc->lock, flags);
+ free_irq(irq, dwc);
+
+err0:
return ret;
}
@@ -1599,9 +1600,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
spin_lock_irqsave(&dwc->lock, flags);
dwc3_gadget_disable_irq(dwc);
- irq = platform_get_irq(to_platform_device(dwc->dev), 0);
- free_irq(irq, dwc);
-
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1609,6 +1607,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
spin_unlock_irqrestore(&dwc->lock, flags);
+ irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+ free_irq(irq, dwc);
+
return 0;
}
@@ -1641,13 +1642,15 @@ static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
dep->dwc = dwc;
dep->number = epnum;
+ dep->direction = !!direction;
dwc->eps[epnum] = dep;
snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
(epnum & 1) ? "in" : "out");
dep->endpoint.name = dep->name;
- dep->direction = (epnum & 1);
+
+ dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
if (epnum == 0 || epnum == 1) {
dep->endpoint.maxpacket = 512;
@@ -2104,34 +2107,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->setup_packet_pending = false;
}
-static void dwc3_gadget_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
-{
- u32 reg;
-
- reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
-
- if (suspend)
- reg |= DWC3_GUSB3PIPECTL_SUSPHY;
- else
- reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
-
- dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
-}
-
-static void dwc3_gadget_usb2_phy_suspend(struct dwc3 *dwc, int suspend)
-{
- u32 reg;
-
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
-
- if (suspend)
- reg |= DWC3_GUSB2PHYCFG_SUSPHY;
- else
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
-
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
-}
-
static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
{
u32 reg;
@@ -2172,13 +2147,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
/* after reset -> Default State */
usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT);
- /* Recent versions support automatic phy suspend and don't need this */
- if (dwc->revision < DWC3_REVISION_194A) {
- /* Resume PHYs */
- dwc3_gadget_usb2_phy_suspend(dwc, false);
- dwc3_gadget_usb3_phy_suspend(dwc, false);
- }
-
if (dwc->gadget.speed != USB_SPEED_UNKNOWN)
dwc3_disconnect_gadget(dwc);
@@ -2222,20 +2190,6 @@ static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
}
-static void dwc3_gadget_phy_suspend(struct dwc3 *dwc, u8 speed)
-{
- switch (speed) {
- case USB_SPEED_SUPER:
- dwc3_gadget_usb2_phy_suspend(dwc, true);
- break;
- case USB_SPEED_HIGH:
- case USB_SPEED_FULL:
- case USB_SPEED_LOW:
- dwc3_gadget_usb3_phy_suspend(dwc, true);
- break;
- }
-}
-
static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
{
struct dwc3_ep *dep;
@@ -2311,12 +2265,6 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
- /* Recent versions support automatic phy suspend and don't need this */
- if (dwc->revision < DWC3_REVISION_194A) {
- /* Suspend unneeded PHY */
- dwc3_gadget_phy_suspend(dwc, dwc->gadget.speed);
- }
-
dep = dwc->eps[0];
ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true);
if (ret) {
@@ -2494,61 +2442,75 @@ static void dwc3_process_event_entry(struct dwc3 *dwc,
}
}
-static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
+static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
{
- struct dwc3 *dwc = _dwc;
- unsigned long flags;
+ struct dwc3_event_buffer *evt;
irqreturn_t ret = IRQ_NONE;
- int i;
+ int left;
+ u32 reg;
- spin_lock_irqsave(&dwc->lock, flags);
+ evt = dwc->ev_buffs[buf];
+ left = evt->count;
- for (i = 0; i < dwc->num_event_buffers; i++) {
- struct dwc3_event_buffer *evt;
- int left;
+ if (!(evt->flags & DWC3_EVENT_PENDING))
+ return IRQ_NONE;
- evt = dwc->ev_buffs[i];
- left = evt->count;
+ while (left > 0) {
+ union dwc3_event event;
- if (!(evt->flags & DWC3_EVENT_PENDING))
- continue;
+ event.raw = *(u32 *) (evt->buf + evt->lpos);
- while (left > 0) {
- union dwc3_event event;
+ dwc3_process_event_entry(dwc, &event);
- event.raw = *(u32 *) (evt->buf + evt->lpos);
+ /*
+ * FIXME we wrap around correctly to the next entry as
+ * almost all entries are 4 bytes in size. There is one
+ * entry which has 12 bytes which is a regular entry
+ * followed by 8 bytes data. ATM I don't know how
+ * things are organized if we get next to the a
+ * boundary so I worry about that once we try to handle
+ * that.
+ */
+ evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
+ left -= 4;
- dwc3_process_event_entry(dwc, &event);
+ dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
+ }
- /*
- * FIXME we wrap around correctly to the next entry as
- * almost all entries are 4 bytes in size. There is one
- * entry which has 12 bytes which is a regular entry
- * followed by 8 bytes data. ATM I don't know how
- * things are organized if we get next to the a
- * boundary so I worry about that once we try to handle
- * that.
- */
- evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
- left -= 4;
+ evt->count = 0;
+ evt->flags &= ~DWC3_EVENT_PENDING;
+ ret = IRQ_HANDLED;
- dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(i), 4);
- }
+ /* Unmask interrupt */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+ reg &= ~DWC3_GEVNTSIZ_INTMASK;
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
- evt->count = 0;
- evt->flags &= ~DWC3_EVENT_PENDING;
- ret = IRQ_HANDLED;
- }
+ return ret;
+}
+
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
+{
+ struct dwc3 *dwc = _dwc;
+ unsigned long flags;
+ irqreturn_t ret = IRQ_NONE;
+ int i;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+
+ for (i = 0; i < dwc->num_event_buffers; i++)
+ ret |= dwc3_process_event_buf(dwc, i);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
-static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
+static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
{
struct dwc3_event_buffer *evt;
u32 count;
+ u32 reg;
evt = dwc->ev_buffs[buf];
@@ -2560,6 +2522,11 @@ static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
evt->count = count;
evt->flags |= DWC3_EVENT_PENDING;
+ /* Mask interrupt */
+ reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
+ reg |= DWC3_GEVNTSIZ_INTMASK;
+ dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
+
return IRQ_WAKE_THREAD;
}
@@ -2574,7 +2541,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
- status = dwc3_process_event_buf(dwc, i);
+ status = dwc3_check_event_buf(dwc, i);
if (status == IRQ_WAKE_THREAD)
ret = status;
}
@@ -2592,7 +2559,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
*/
int dwc3_gadget_init(struct dwc3 *dwc)
{
- u32 reg;
int ret;
dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
@@ -2642,16 +2608,6 @@ int dwc3_gadget_init(struct dwc3 *dwc)
if (ret)
goto err4;
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- reg |= DWC3_DCFG_LPM_CAP;
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
-
- /* Enable USB2 LPM and automatic phy suspend only on recent versions */
- if (dwc->revision >= DWC3_REVISION_194A) {
- dwc3_gadget_usb2_phy_suspend(dwc, false);
- dwc3_gadget_usb3_phy_suspend(dwc, false);
- }
-
ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
if (ret) {
dev_err(dwc->dev, "failed to register udc\n");
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 99e6d724882..febe1aa7b71 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_GADGET_H
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index 0fa1846eda4..32db328cc76 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -5,34 +5,14 @@
*
* Authors: Felipe Balbi <balbi@ti.com>,
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#include <linux/platform_device.h>
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index a50f76b9d19..d94441c14d8 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -6,34 +6,14 @@
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The names of the above-listed copyright holders may not be used
- * to endorse or promote products derived from this software without
- * specific prior written permission.
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
*
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2, as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
- * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
*/
#ifndef __DRIVERS_USB_DWC3_IO_H
diff --git a/drivers/usb/dwc3/platform_data.h b/drivers/usb/dwc3/platform_data.h
new file mode 100644
index 00000000000..7db34f00b89
--- /dev/null
+++ b/drivers/usb/dwc3/platform_data.h
@@ -0,0 +1,27 @@
+/**
+ * platform_data.h - USB DWC3 Platform Data Support
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Felipe Balbi <balbi@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+
+struct dwc3_platform_data {
+ enum usb_device_speed maximum_speed;
+ enum usb_dr_mode dr_mode;
+ bool tx_fifo_resize;
+};
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 62f6802f6e0..30e2dd8a1f2 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -144,7 +144,6 @@ config USB_AT91
config USB_LPC32XX
tristate "LPC32XX USB Peripheral Controller"
depends on ARCH_LPC32XX
- depends on USB_PHY
select USB_ISP1301
help
This option selects the USB device controller in the LPC32xx SoC.
@@ -188,11 +187,12 @@ config USB_FSL_USB2
config USB_FUSB300
tristate "Faraday FUSB300 USB Peripheral Controller"
- depends on !PHYS_ADDR_T_64BIT
+ depends on !PHYS_ADDR_T_64BIT && HAS_DMA
help
Faraday usb device controller FUSB300 driver
config USB_FOTG210_UDC
+ depends on HAS_DMA
tristate "Faraday FOTG210 USB Peripheral Controller"
help
Faraday USB2.0 OTG controller which can be configured as
@@ -205,7 +205,6 @@ config USB_FOTG210_UDC
config USB_OMAP
tristate "OMAP USB Device Controller"
depends on ARCH_OMAP1
- depends on USB_PHY
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3 || MACH_OMAP_H4_OTG
help
Many Texas Instruments OMAP processors have flexible full
@@ -245,6 +244,7 @@ config USB_PXA25X_SMALL
config USB_R8A66597
tristate "Renesas R8A66597 USB Peripheral Controller"
+ depends on HAS_DMA
help
R8A66597 is a discrete USB host and peripheral controller chip that
supports both full and high speed USB 2.0 data transfers.
@@ -286,21 +286,6 @@ config USB_S3C_HSOTG
The Samsung S3C64XX USB2.0 high-speed gadget controller
integrated into the S3C64XX series SoC.
-config USB_IMX
- tristate "Freescale i.MX1 USB Peripheral Controller"
- depends on ARCH_MXC
- depends on BROKEN
- help
- Freescale's i.MX1 includes an integrated full speed
- USB 1.1 device controller.
-
- It has Six fixed-function endpoints, as well as endpoint
- zero (for control transfers).
-
- Say "y" to link the driver statically, or "m" to build a
- dynamically linked module called "imx_udc" and force all
- gadget drivers to also be dynamically linked.
-
config USB_S3C2410
tristate "S3C2410 USB Device Controller"
depends on ARCH_S3C24XX
@@ -328,13 +313,14 @@ config USB_S3C_HSUDC
config USB_MV_UDC
tristate "Marvell USB2.0 Device Controller"
- depends on GENERIC_HARDIRQS
+ depends on GENERIC_HARDIRQS && HAS_DMA
help
Marvell Socs (including PXA and MMP series) include a high speed
USB2.0 OTG controller, which can be configured as high speed or
full speed USB peripheral.
config USB_MV_U3D
+ depends on HAS_DMA
tristate "MARVELL PXA2128 USB 3.0 controller"
help
MARVELL PXA2128 Processor series include a super speed USB3.0 device
@@ -400,7 +386,7 @@ config USB_NET2272
config USB_NET2272_DMA
boolean "Support external DMA controller"
- depends on USB_NET2272
+ depends on USB_NET2272 && HAS_DMA
help
The NET2272 part can optionally support an external DMA
controller, but your board has to have support in the
@@ -570,7 +556,7 @@ config USB_CONFIGFS
specified simply by creating appropriate directories in configfs.
Associating functions with configurations is done by creating
appropriate symbolic links.
- For more information see Documentation/usb/gadget-configfs.txt.
+ For more information see Documentation/usb/gadget_configfs.txt.
config USB_CONFIGFS_SERIAL
boolean "Generic serial bulk in/out"
@@ -639,6 +625,7 @@ config USB_CONFIGFS_RNDIS
depends on USB_CONFIGFS
depends on NET
select USB_U_ETHER
+ select USB_U_RNDIS
select USB_F_RNDIS
help
Microsoft Windows XP bundles the "Remote NDIS" (RNDIS) protocol,
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index bad08e66f36..386db9daf1d 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_USB_NET2280) += net2280.o
obj-$(CONFIG_USB_AMD5536UDC) += amd5536udc.o
obj-$(CONFIG_USB_PXA25X) += pxa25x_udc.o
obj-$(CONFIG_USB_PXA27X) += pxa27x_udc.o
-obj-$(CONFIG_USB_IMX) += imx_udc.o
obj-$(CONFIG_USB_GOKU) += goku_udc.o
obj-$(CONFIG_USB_OMAP) += omap_udc.o
obj-$(CONFIG_USB_S3C2410) += s3c2410_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index f52dcfe8f54..a9a4346c83a 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -1122,7 +1122,7 @@ udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
goto finished;
}
if (ep->dma) {
- retval = prep_dma(ep, req, gfp);
+ retval = prep_dma(ep, req, GFP_ATOMIC);
if (retval != 0)
goto finished;
/* write desc pointer to enable DMA */
@@ -1190,7 +1190,7 @@ udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
* for PPB modes, because of chain creation reasons
*/
if (ep->in) {
- retval = prep_dma(ep, req, gfp);
+ retval = prep_dma(ep, req, GFP_ATOMIC);
if (retval != 0)
goto finished;
}
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 073b938f913..4cc4fd6d147 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -870,8 +870,13 @@ static void clk_on(struct at91_udc *udc)
if (udc->clocked)
return;
udc->clocked = 1;
- clk_enable(udc->iclk);
- clk_enable(udc->fclk);
+
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(udc->uclk, 48000000);
+ clk_prepare_enable(udc->uclk);
+ }
+ clk_prepare_enable(udc->iclk);
+ clk_prepare_enable(udc->fclk);
}
static void clk_off(struct at91_udc *udc)
@@ -880,8 +885,10 @@ static void clk_off(struct at91_udc *udc)
return;
udc->clocked = 0;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- clk_disable(udc->fclk);
- clk_disable(udc->iclk);
+ clk_disable_unprepare(udc->fclk);
+ clk_disable_unprepare(udc->iclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(udc->uclk);
}
/*
@@ -1697,7 +1704,7 @@ static int at91udc_probe(struct platform_device *pdev)
int retval;
struct resource *res;
- if (!dev->platform_data && !pdev->dev.of_node) {
+ if (!dev_get_platdata(dev) && !pdev->dev.of_node) {
/* small (so we copy it) but critical! */
DBG("missing platform_data\n");
return -ENODEV;
@@ -1725,10 +1732,10 @@ static int at91udc_probe(struct platform_device *pdev)
/* init software state */
udc = &controller;
udc->gadget.dev.parent = dev;
- if (pdev->dev.of_node)
+ if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node)
at91udc_of_init(udc, pdev->dev.of_node);
else
- memcpy(&udc->board, dev->platform_data,
+ memcpy(&udc->board, dev_get_platdata(dev),
sizeof(struct at91_udc_data));
udc->pdev = pdev;
udc->enabled = 0;
@@ -1774,20 +1781,24 @@ static int at91udc_probe(struct platform_device *pdev)
/* get interface and function clocks */
udc->iclk = clk_get(dev, "udc_clk");
udc->fclk = clk_get(dev, "udpck");
- if (IS_ERR(udc->iclk) || IS_ERR(udc->fclk)) {
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ udc->uclk = clk_get(dev, "usb_clk");
+ if (IS_ERR(udc->iclk) || IS_ERR(udc->fclk) ||
+ (IS_ENABLED(CONFIG_COMMON_CLK) && IS_ERR(udc->uclk))) {
DBG("clocks missing\n");
retval = -ENODEV;
- /* NOTE: we "know" here that refcounts on these are NOPs */
goto fail1;
}
/* don't do anything until we have both gadget driver and VBUS */
- clk_enable(udc->iclk);
+ retval = clk_prepare_enable(udc->iclk);
+ if (retval)
+ goto fail1;
at91_udp_write(udc, AT91_UDP_TXVC, AT91_UDP_TXVC_TXVDIS);
at91_udp_write(udc, AT91_UDP_IDR, 0xffffffff);
/* Clear all pending interrupts - UDP may be used by bootloader. */
at91_udp_write(udc, AT91_UDP_ICR, 0xffffffff);
- clk_disable(udc->iclk);
+ clk_disable_unprepare(udc->iclk);
/* request UDC and maybe VBUS irqs */
udc->udp_irq = platform_get_irq(pdev, 0);
@@ -1849,6 +1860,12 @@ fail3:
fail2:
free_irq(udc->udp_irq, udc);
fail1:
+ if (IS_ENABLED(CONFIG_COMMON_CLK) && !IS_ERR(udc->uclk))
+ clk_put(udc->uclk);
+ if (!IS_ERR(udc->fclk))
+ clk_put(udc->fclk);
+ if (!IS_ERR(udc->iclk))
+ clk_put(udc->iclk);
iounmap(udc->udp_baseaddr);
fail0a:
if (cpu_is_at91rm9200())
@@ -1892,6 +1909,8 @@ static int __exit at91udc_remove(struct platform_device *pdev)
clk_put(udc->iclk);
clk_put(udc->fclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_put(udc->uclk);
return 0;
}
diff --git a/drivers/usb/gadget/at91_udc.h b/drivers/usb/gadget/at91_udc.h
index e647d1c2ada..01752466338 100644
--- a/drivers/usb/gadget/at91_udc.h
+++ b/drivers/usb/gadget/at91_udc.h
@@ -126,7 +126,7 @@ struct at91_udc {
unsigned active_suspend:1;
u8 addr;
struct at91_udc_data board;
- struct clk *iclk, *fclk;
+ struct clk *iclk, *fclk, *uclk;
struct platform_device *pdev;
struct proc_dir_entry *pde;
void __iomem *udp_baseaddr;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 1d9722203ca..2cb52e0438d 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1772,6 +1772,7 @@ out:
static int atmel_usba_start(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
+ int ret;
struct usba_udc *udc = container_of(gadget, struct usba_udc, gadget);
unsigned long flags;
@@ -1781,8 +1782,14 @@ static int atmel_usba_start(struct usb_gadget *gadget,
udc->driver = driver;
spin_unlock_irqrestore(&udc->lock, flags);
- clk_enable(udc->pclk);
- clk_enable(udc->hclk);
+ ret = clk_prepare_enable(udc->pclk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(udc->hclk);
+ if (ret) {
+ clk_disable_unprepare(udc->pclk);
+ return ret;
+ }
DBG(DBG_GADGET, "registered driver `%s'\n", driver->driver.name);
@@ -1822,8 +1829,8 @@ static int atmel_usba_stop(struct usb_gadget *gadget,
udc->driver = NULL;
- clk_disable(udc->hclk);
- clk_disable(udc->pclk);
+ clk_disable_unprepare(udc->hclk);
+ clk_disable_unprepare(udc->pclk);
DBG(DBG_GADGET, "unregistered driver `%s'\n", driver->driver.name);
@@ -1922,7 +1929,7 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
static struct usba_ep * usba_udc_pdata(struct platform_device *pdev,
struct usba_udc *udc)
{
- struct usba_platform_data *pdata = pdev->dev.platform_data;
+ struct usba_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usba_ep *eps;
int i;
@@ -2022,10 +2029,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, udc);
/* Make sure we start from a clean slate */
- clk_enable(pclk);
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable pclk, aborting.\n");
+ goto err_clk_enable;
+ }
toggle_bias(0);
usba_writel(udc, CTRL, USBA_DISABLE_MASK);
- clk_disable(pclk);
+ clk_disable_unprepare(pclk);
if (pdev->dev.of_node)
udc->usba_ep = atmel_udc_of_init(pdev, udc);
@@ -2081,6 +2092,7 @@ err_add_udc:
free_irq(irq, udc);
err_request_irq:
err_alloc_ep:
+err_clk_enable:
iounmap(udc->fifo);
err_map_fifo:
iounmap(udc->regs);
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index fd24cb4540a..c58fcf1ebe4 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -2313,7 +2313,7 @@ static void bcm63xx_udc_cleanup_debugfs(struct bcm63xx_udc *udc)
static int bcm63xx_udc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct bcm63xx_usbd_platform_data *pd = dev->platform_data;
+ struct bcm63xx_usbd_platform_data *pd = dev_get_platdata(dev);
struct bcm63xx_udc *udc;
struct resource *res;
int rc = -ENOMEM, i, irq;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 55f4df60f32..d4f0f330575 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1497,17 +1497,15 @@ void composite_disconnect(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
-static ssize_t composite_show_suspended(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_gadget *gadget = dev_to_usb_gadget(dev);
struct usb_composite_dev *cdev = get_gadget_data(gadget);
return sprintf(buf, "%d\n", cdev->suspended);
}
-
-static DEVICE_ATTR(suspended, 0444, composite_show_suspended, NULL);
+static DEVICE_ATTR_RO(suspended);
static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver)
{
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 80e7f75a56c..8f0d6141e5e 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -859,8 +859,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
list_for_each_entry_safe(f, tmp, &cfg->func_list, list) {
list_del(&f->list);
ret = usb_add_function(c, f);
- if (ret)
+ if (ret) {
+ list_add(&f->list, &cfg->func_list);
goto err_purge_funcs;
+ }
}
usb_ep_autoconfig_reset(cdev->gadget);
}
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index c588e8e486e..06ecd08fd57 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -868,7 +868,7 @@ static const struct usb_gadget_ops dummy_ops = {
/*-------------------------------------------------------------------------*/
/* "function" sysfs attribute */
-static ssize_t show_function(struct device *dev, struct device_attribute *attr,
+static ssize_t function_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dummy *dum = gadget_dev_to_dummy(dev);
@@ -877,7 +877,7 @@ static ssize_t show_function(struct device *dev, struct device_attribute *attr,
return 0;
return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
}
-static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
+static DEVICE_ATTR_RO(function);
/*-------------------------------------------------------------------------*/
@@ -2290,7 +2290,7 @@ static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
urb->actual_length, urb->transfer_buffer_length);
}
-static ssize_t show_urbs(struct device *dev, struct device_attribute *attr,
+static ssize_t urbs_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
@@ -2311,7 +2311,7 @@ static ssize_t show_urbs(struct device *dev, struct device_attribute *attr,
return size;
}
-static DEVICE_ATTR(urbs, S_IRUGO, show_urbs, NULL);
+static DEVICE_ATTR_RO(urbs);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index f48712ffe26..c1c113ef950 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -449,14 +449,20 @@ fail:
static int __exit eth_unbind(struct usb_composite_dev *cdev)
{
- if (has_rndis())
+ if (has_rndis()) {
+ usb_put_function(f_rndis);
usb_put_function_instance(fi_rndis);
- if (use_eem)
+ }
+ if (use_eem) {
+ usb_put_function(f_eem);
usb_put_function_instance(fi_eem);
- else if (can_support_ecm(cdev->gadget))
+ } else if (can_support_ecm(cdev->gadget)) {
+ usb_put_function(f_ecm);
usb_put_function_instance(fi_ecm);
- else
+ } else {
+ usb_put_function(f_geth);
usb_put_function_instance(fi_geth);
+ }
return 0;
}
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index 4b7e33e5d9c..ab1065afbbd 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -285,6 +285,7 @@ static struct usb_string acm_string_defs[] = {
[ACM_CTRL_IDX].s = "CDC Abstract Control Model (ACM)",
[ACM_DATA_IDX].s = "CDC ACM Data",
[ACM_IAD_IDX ].s = "CDC Serial",
+ { } /* end of list */
};
static struct usb_gadget_strings acm_string_table = {
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index 5d3561ea1c1..edab45da374 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -959,8 +959,11 @@ static struct usb_function_instance *ecm_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ecm_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &ecm_func_type);
diff --git a/drivers/usb/gadget/f_eem.c b/drivers/usb/gadget/f_eem.c
index 90ee8022e8d..d00392d879d 100644
--- a/drivers/usb/gadget/f_eem.c
+++ b/drivers/usb/gadget/f_eem.c
@@ -593,8 +593,11 @@ static struct usb_function_instance *eem_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = eem_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &eem_func_type);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index f394f295d63..1a66c5baa0d 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1417,8 +1417,8 @@ static void functionfs_unbind(struct ffs_data *ffs)
usb_ep_free_request(ffs->gadget->ep0, ffs->ep0req);
ffs->ep0req = NULL;
ffs->gadget = NULL;
- ffs_data_put(ffs);
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ ffs_data_put(ffs);
}
}
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 56f1fd1cba2..313b835eedf 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2578,14 +2578,12 @@ static int fsg_main_thread(void *common_)
/*************************** DEVICE ATTRIBUTES ***************************/
-static DEVICE_ATTR(ro, 0644, fsg_show_ro, fsg_store_ro);
-static DEVICE_ATTR(nofua, 0644, fsg_show_nofua, fsg_store_nofua);
-static DEVICE_ATTR(file, 0644, fsg_show_file, fsg_store_file);
+static DEVICE_ATTR_RW(ro);
+static DEVICE_ATTR_RW(nofua);
+static DEVICE_ATTR_RW(file);
-static struct device_attribute dev_attr_ro_cdrom =
- __ATTR(ro, 0444, fsg_show_ro, NULL);
-static struct device_attribute dev_attr_file_nonremovable =
- __ATTR(file, 0444, fsg_show_file, NULL);
+static struct device_attribute dev_attr_ro_cdrom = __ATTR_RO(ro);
+static struct device_attribute dev_attr_file_nonremovable = __ATTR_RO(file);
/****************************** FSG COMMON ******************************/
@@ -3043,12 +3041,12 @@ fsg_config_from_params(struct fsg_config *cfg,
lun->filename =
params->file_count > i && params->file[i][0]
? params->file[i]
- : 0;
+ : NULL;
}
/* Let MSF use defaults */
- cfg->vendor_name = 0;
- cfg->product_name = 0;
+ cfg->vendor_name = NULL;
+ cfg->product_name = NULL;
cfg->ops = NULL;
cfg->private_data = NULL;
diff --git a/drivers/usb/gadget/f_ncm.c b/drivers/usb/gadget/f_ncm.c
index 952177f7eb9..1c28fe13328 100644
--- a/drivers/usb/gadget/f_ncm.c
+++ b/drivers/usb/gadget/f_ncm.c
@@ -1350,8 +1350,11 @@ static struct usb_function_instance *ncm_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 7944fb0efe3..eb3aa817a66 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -488,7 +488,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
struct usb_ep *ep;
int status, i;
-#ifndef USBF_PHONET_INCLUDED
struct f_phonet_opts *phonet_opts;
phonet_opts = container_of(f->fi, struct f_phonet_opts, func_inst);
@@ -507,7 +506,6 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
return status;
phonet_opts->bound = true;
}
-#endif
/* Reserve interface IDs */
status = usb_interface_id(c, f);
@@ -656,8 +654,11 @@ static struct usb_function_instance *phonet_alloc_inst(void)
opts->func_inst.free_func_inst = phonet_free_inst;
opts->net = gphonet_setup_default();
- if (IS_ERR(opts->net))
- return ERR_PTR(PTR_ERR(opts->net));
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&phonet_func_type);
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 191df35ae69..717ed7f9563 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -963,8 +963,11 @@ static struct usb_function_instance *rndis_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = rndis_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&rndis_func_type);
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 5601e1d96c4..7c8674fa7e8 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -505,8 +505,11 @@ static struct usb_function_instance *geth_alloc_inst(void)
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = geth_free_inst;
opts->net = gether_setup_default();
- if (IS_ERR(opts->net))
- return ERR_CAST(opts->net);
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
config_group_init_type_name(&opts->func_inst.group, "",
&gether_func_type);
diff --git a/drivers/usb/gadget/f_uac1.c b/drivers/usb/gadget/f_uac1.c
index fa8ea4ea00c..2b4c82d84bf 100644
--- a/drivers/usb/gadget/f_uac1.c
+++ b/drivers/usb/gadget/f_uac1.c
@@ -695,7 +695,7 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
}
/* Todo: add more control selecotor dynamically */
-int __init control_selector_init(struct f_audio *audio)
+static int __init control_selector_init(struct f_audio *audio)
{
INIT_LIST_HEAD(&audio->cs);
list_add(&feature_unit.list, &audio->cs);
@@ -719,7 +719,7 @@ int __init control_selector_init(struct f_audio *audio)
*
* Returns zero on success, else negative errno.
*/
-int __init audio_bind_config(struct usb_configuration *c)
+static int __init audio_bind_config(struct usb_configuration *c)
{
struct f_audio *audio;
int status;
diff --git a/drivers/usb/gadget/fotg210-udc.c b/drivers/usb/gadget/fotg210-udc.c
index cce5535b1dc..32db2eee2d8 100644
--- a/drivers/usb/gadget/fotg210-udc.c
+++ b/drivers/usb/gadget/fotg210-udc.c
@@ -1074,9 +1074,9 @@ static struct usb_gadget_ops fotg210_gadget_ops = {
.udc_stop = fotg210_udc_stop,
};
-static int __exit fotg210_udc_remove(struct platform_device *pdev)
+static int fotg210_udc_remove(struct platform_device *pdev)
{
- struct fotg210_udc *fotg210 = dev_get_drvdata(&pdev->dev);
+ struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
usb_del_gadget_udc(&fotg210->gadget);
iounmap(fotg210->reg);
@@ -1088,7 +1088,7 @@ static int __exit fotg210_udc_remove(struct platform_device *pdev)
return 0;
}
-static int __init fotg210_udc_probe(struct platform_device *pdev)
+static int fotg210_udc_probe(struct platform_device *pdev)
{
struct resource *res, *ires;
struct fotg210_udc *fotg210 = NULL;
@@ -1134,7 +1134,7 @@ static int __init fotg210_udc_probe(struct platform_device *pdev)
spin_lock_init(&fotg210->lock);
- dev_set_drvdata(&pdev->dev, fotg210);
+ platform_set_drvdata(pdev, fotg210);
fotg210->gadget.ops = &fotg210_gadget_ops;
diff --git a/drivers/usb/gadget/fsl_mxc_udc.c b/drivers/usb/gadget/fsl_mxc_udc.c
index d3bd7b095ba..9b140fc4d3b 100644
--- a/drivers/usb/gadget/fsl_mxc_udc.c
+++ b/drivers/usb/gadget/fsl_mxc_udc.c
@@ -33,7 +33,7 @@ int fsl_udc_clk_init(struct platform_device *pdev)
unsigned long freq;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
if (IS_ERR(mxc_ipg_clk)) {
@@ -80,7 +80,7 @@ eclkrate:
int fsl_udc_clk_finalize(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
int ret = 0;
/* workaround ENGcm09152 for i.MX35 */
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index a766a4ca1cb..36ac7cfba91 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -2248,7 +2248,7 @@ static int __init struct_udc_setup(struct fsl_udc *udc,
struct fsl_usb2_platform_data *pdata;
size_t size;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
udc->phy_mode = pdata->phy_mode;
udc->eps = kzalloc(sizeof(struct fsl_ep) * udc->max_ep, GFP_KERNEL);
@@ -2343,7 +2343,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
udc_controller->pdata = pdata;
spin_lock_init(&udc_controller->lock);
udc_controller->stopped = 1;
@@ -2524,7 +2524,7 @@ err_kfree:
static int __exit fsl_udc_remove(struct platform_device *pdev)
{
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
DECLARE_COMPLETION(done);
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index c83f3e16532..f1dd6daabe2 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -557,7 +557,7 @@ static void fusb300_set_cxdone(struct fusb300 *fusb300)
}
/* read data from cx fifo */
-void fusb300_rdcxf(struct fusb300 *fusb300,
+static void fusb300_rdcxf(struct fusb300 *fusb300,
u8 *buffer, u32 length)
{
int i = 0;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 52dd6cc6c0a..c64deb9e3d6 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -772,7 +772,7 @@ goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
} /* else pio or dma irq handler advances the queue. */
- if (likely(req != 0))
+ if (likely(req != NULL))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c
index c36260ea8bf..778613eb37a 100644
--- a/drivers/usb/gadget/hid.c
+++ b/drivers/usb/gadget/hid.c
@@ -185,7 +185,7 @@ static int __exit hid_unbind(struct usb_composite_dev *cdev)
static int __init hidg_plat_driver_probe(struct platform_device *pdev)
{
- struct hidg_func_descriptor *func = pdev->dev.platform_data;
+ struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev);
struct hidg_func_node *entry;
if (!func) {
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
deleted file mode 100644
index 9b2d24e4c95..00000000000
--- a/drivers/usb/gadget/imx_udc.c
+++ /dev/null
@@ -1,1544 +0,0 @@
-/*
- * driver/usb/gadget/imx_udc.c
- *
- * Copyright (C) 2005 Mike Lee <eemike@gmail.com>
- * Copyright (C) 2008 Darius Augulis <augulis.darius@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/device.h>
-#include <linux/dma-mapping.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/timer.h>
-#include <linux/slab.h>
-#include <linux/prefetch.h>
-
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-
-#include <linux/platform_data/usb-imx_udc.h>
-#include <mach/hardware.h>
-
-#include "imx_udc.h"
-
-static const char driver_name[] = "imx_udc";
-static const char ep0name[] = "ep0";
-
-void ep0_chg_stat(const char *label, struct imx_udc_struct *imx_usb,
- enum ep0_state stat);
-
-/*******************************************************************************
- * IMX UDC hardware related functions
- *******************************************************************************
- */
-
-void imx_udc_enable(struct imx_udc_struct *imx_usb)
-{
- int temp = __raw_readl(imx_usb->base + USB_CTRL);
- __raw_writel(temp | CTRL_FE_ENA | CTRL_AFE_ENA,
- imx_usb->base + USB_CTRL);
- imx_usb->gadget.speed = USB_SPEED_FULL;
-}
-
-void imx_udc_disable(struct imx_udc_struct *imx_usb)
-{
- int temp = __raw_readl(imx_usb->base + USB_CTRL);
-
- __raw_writel(temp & ~(CTRL_FE_ENA | CTRL_AFE_ENA),
- imx_usb->base + USB_CTRL);
-
- ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
- imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
-}
-
-void imx_udc_reset(struct imx_udc_struct *imx_usb)
-{
- int temp = __raw_readl(imx_usb->base + USB_ENAB);
-
- /* set RST bit */
- __raw_writel(temp | ENAB_RST, imx_usb->base + USB_ENAB);
-
- /* wait RST bit to clear */
- do {} while (__raw_readl(imx_usb->base + USB_ENAB) & ENAB_RST);
-
- /* wait CFG bit to assert */
- do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
-
- /* udc module is now ready */
-}
-
-void imx_udc_config(struct imx_udc_struct *imx_usb)
-{
- u8 ep_conf[5];
- u8 i, j, cfg;
- struct imx_ep_struct *imx_ep;
-
- /* wait CFG bit to assert */
- do {} while (!(__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG));
-
- /* Download the endpoint buffer for endpoint 0. */
- for (j = 0; j < 5; j++) {
- i = (j == 2 ? imx_usb->imx_ep[0].fifosize : 0x00);
- __raw_writeb(i, imx_usb->base + USB_DDAT);
- do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_BSY);
- }
-
- /* Download the endpoint buffers for endpoints 1-5.
- * We specify two configurations, one interface
- */
- for (cfg = 1; cfg < 3; cfg++) {
- for (i = 1; i < IMX_USB_NB_EP; i++) {
- imx_ep = &imx_usb->imx_ep[i];
- /* EP no | Config no */
- ep_conf[0] = (i << 4) | (cfg << 2);
- /* Type | Direction */
- ep_conf[1] = (imx_ep->bmAttributes << 3) |
- (EP_DIR(imx_ep) << 2);
- /* Max packet size */
- ep_conf[2] = imx_ep->fifosize;
- /* TRXTYP */
- ep_conf[3] = 0xC0;
- /* FIFO no */
- ep_conf[4] = i;
-
- D_INI(imx_usb->dev,
- "<%s> ep%d_conf[%d]:"
- "[%02x-%02x-%02x-%02x-%02x]\n",
- __func__, i, cfg,
- ep_conf[0], ep_conf[1], ep_conf[2],
- ep_conf[3], ep_conf[4]);
-
- for (j = 0; j < 5; j++) {
- __raw_writeb(ep_conf[j],
- imx_usb->base + USB_DDAT);
- do {} while (__raw_readl(imx_usb->base
- + USB_DADR)
- & DADR_BSY);
- }
- }
- }
-
- /* wait CFG bit to clear */
- do {} while (__raw_readl(imx_usb->base + USB_DADR) & DADR_CFG);
-}
-
-void imx_udc_init_irq(struct imx_udc_struct *imx_usb)
-{
- int i;
-
- /* Mask and clear all irqs */
- __raw_writel(0xFFFFFFFF, imx_usb->base + USB_MASK);
- __raw_writel(0xFFFFFFFF, imx_usb->base + USB_INTR);
- for (i = 0; i < IMX_USB_NB_EP; i++) {
- __raw_writel(0x1FF, imx_usb->base + USB_EP_MASK(i));
- __raw_writel(0x1FF, imx_usb->base + USB_EP_INTR(i));
- }
-
- /* Enable USB irqs */
- __raw_writel(INTR_MSOF | INTR_FRAME_MATCH, imx_usb->base + USB_MASK);
-
- /* Enable EP0 irqs */
- __raw_writel(0x1FF & ~(EPINTR_DEVREQ | EPINTR_MDEVREQ | EPINTR_EOT
- | EPINTR_EOF | EPINTR_FIFO_EMPTY | EPINTR_FIFO_FULL),
- imx_usb->base + USB_EP_MASK(0));
-}
-
-void imx_udc_init_ep(struct imx_udc_struct *imx_usb)
-{
- int i, max, temp;
- struct imx_ep_struct *imx_ep;
- for (i = 0; i < IMX_USB_NB_EP; i++) {
- imx_ep = &imx_usb->imx_ep[i];
- switch (imx_ep->fifosize) {
- case 8:
- max = 0;
- break;
- case 16:
- max = 1;
- break;
- case 32:
- max = 2;
- break;
- case 64:
- max = 3;
- break;
- default:
- max = 1;
- break;
- }
- temp = (EP_DIR(imx_ep) << 7) | (max << 5)
- | (imx_ep->bmAttributes << 3);
- __raw_writel(temp, imx_usb->base + USB_EP_STAT(i));
- __raw_writel(temp | EPSTAT_FLUSH,
- imx_usb->base + USB_EP_STAT(i));
- D_INI(imx_usb->dev, "<%s> ep%d_stat %08x\n", __func__, i,
- __raw_readl(imx_usb->base + USB_EP_STAT(i)));
- }
-}
-
-void imx_udc_init_fifo(struct imx_udc_struct *imx_usb)
-{
- int i, temp;
- struct imx_ep_struct *imx_ep;
- for (i = 0; i < IMX_USB_NB_EP; i++) {
- imx_ep = &imx_usb->imx_ep[i];
-
- /* Fifo control */
- temp = EP_DIR(imx_ep) ? 0x0B000000 : 0x0F000000;
- __raw_writel(temp, imx_usb->base + USB_EP_FCTRL(i));
- D_INI(imx_usb->dev, "<%s> ep%d_fctrl %08x\n", __func__, i,
- __raw_readl(imx_usb->base + USB_EP_FCTRL(i)));
-
- /* Fifo alarm */
- temp = (i ? imx_ep->fifosize / 2 : 0);
- __raw_writel(temp, imx_usb->base + USB_EP_FALRM(i));
- D_INI(imx_usb->dev, "<%s> ep%d_falrm %08x\n", __func__, i,
- __raw_readl(imx_usb->base + USB_EP_FALRM(i)));
- }
-}
-
-static void imx_udc_init(struct imx_udc_struct *imx_usb)
-{
- /* Reset UDC */
- imx_udc_reset(imx_usb);
-
- /* Download config to enpoint buffer */
- imx_udc_config(imx_usb);
-
- /* Setup interrups */
- imx_udc_init_irq(imx_usb);
-
- /* Setup endpoints */
- imx_udc_init_ep(imx_usb);
-
- /* Setup fifos */
- imx_udc_init_fifo(imx_usb);
-}
-
-void imx_ep_irq_enable(struct imx_ep_struct *imx_ep)
-{
-
- int i = EP_NO(imx_ep);
-
- __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
- __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
- __raw_writel(0x1FF & ~(EPINTR_EOT | EPINTR_EOF),
- imx_ep->imx_usb->base + USB_EP_MASK(i));
-}
-
-void imx_ep_irq_disable(struct imx_ep_struct *imx_ep)
-{
-
- int i = EP_NO(imx_ep);
-
- __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_MASK(i));
- __raw_writel(0x1FF, imx_ep->imx_usb->base + USB_EP_INTR(i));
-}
-
-int imx_ep_empty(struct imx_ep_struct *imx_ep)
-{
- struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
-
- return __raw_readl(imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
- & FSTAT_EMPTY;
-}
-
-unsigned imx_fifo_bcount(struct imx_ep_struct *imx_ep)
-{
- struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
-
- return (__raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)))
- & EPSTAT_BCOUNT) >> 16;
-}
-
-void imx_flush(struct imx_ep_struct *imx_ep)
-{
- struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
-
- int temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
- __raw_writel(temp | EPSTAT_FLUSH,
- imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
-}
-
-void imx_ep_stall(struct imx_ep_struct *imx_ep)
-{
- struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
- int temp, i;
-
- D_ERR(imx_usb->dev,
- "<%s> Forced stall on %s\n", __func__, imx_ep->ep.name);
-
- imx_flush(imx_ep);
-
- /* Special care for ep0 */
- if (!EP_NO(imx_ep)) {
- temp = __raw_readl(imx_usb->base + USB_CTRL);
- __raw_writel(temp | CTRL_CMDOVER | CTRL_CMDERROR,
- imx_usb->base + USB_CTRL);
- do { } while (__raw_readl(imx_usb->base + USB_CTRL)
- & CTRL_CMDOVER);
- temp = __raw_readl(imx_usb->base + USB_CTRL);
- __raw_writel(temp & ~CTRL_CMDERROR, imx_usb->base + USB_CTRL);
- }
- else {
- temp = __raw_readl(imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
- __raw_writel(temp | EPSTAT_STALL,
- imx_usb->base + USB_EP_STAT(EP_NO(imx_ep)));
-
- for (i = 0; i < 100; i ++) {
- temp = __raw_readl(imx_usb->base
- + USB_EP_STAT(EP_NO(imx_ep)));
- if (!(temp & EPSTAT_STALL))
- break;
- udelay(20);
- }
- if (i == 100)
- D_ERR(imx_usb->dev, "<%s> Non finished stall on %s\n",
- __func__, imx_ep->ep.name);
- }
-}
-
-static int imx_udc_get_frame(struct usb_gadget *_gadget)
-{
- struct imx_udc_struct *imx_usb = container_of(_gadget,
- struct imx_udc_struct, gadget);
-
- return __raw_readl(imx_usb->base + USB_FRAME) & 0x7FF;
-}
-
-static int imx_udc_wakeup(struct usb_gadget *_gadget)
-{
- return 0;
-}
-
-/*******************************************************************************
- * USB request control functions
- *******************************************************************************
- */
-
-static void ep_add_request(struct imx_ep_struct *imx_ep,
- struct imx_request *req)
-{
- if (unlikely(!req))
- return;
-
- req->in_use = 1;
- list_add_tail(&req->queue, &imx_ep->queue);
-}
-
-static void ep_del_request(struct imx_ep_struct *imx_ep,
- struct imx_request *req)
-{
- if (unlikely(!req))
- return;
-
- list_del_init(&req->queue);
- req->in_use = 0;
-}
-
-static void done(struct imx_ep_struct *imx_ep,
- struct imx_request *req, int status)
-{
- ep_del_request(imx_ep, req);
-
- if (likely(req->req.status == -EINPROGRESS))
- req->req.status = status;
- else
- status = req->req.status;
-
- if (status && status != -ESHUTDOWN)
- D_ERR(imx_ep->imx_usb->dev,
- "<%s> complete %s req %p stat %d len %u/%u\n", __func__,
- imx_ep->ep.name, &req->req, status,
- req->req.actual, req->req.length);
-
- req->req.complete(&imx_ep->ep, &req->req);
-}
-
-static void nuke(struct imx_ep_struct *imx_ep, int status)
-{
- struct imx_request *req;
-
- while (!list_empty(&imx_ep->queue)) {
- req = list_entry(imx_ep->queue.next, struct imx_request, queue);
- done(imx_ep, req, status);
- }
-}
-
-/*******************************************************************************
- * Data tansfer over USB functions
- *******************************************************************************
- */
-static int read_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
-{
- u8 *buf;
- int bytes_ep, bufferspace, count, i;
-
- bytes_ep = imx_fifo_bcount(imx_ep);
- bufferspace = req->req.length - req->req.actual;
-
- buf = req->req.buf + req->req.actual;
- prefetchw(buf);
-
- if (unlikely(imx_ep_empty(imx_ep)))
- count = 0; /* zlp */
- else
- count = min(bytes_ep, bufferspace);
-
- for (i = count; i > 0; i--)
- *buf++ = __raw_readb(imx_ep->imx_usb->base
- + USB_EP_FDAT0(EP_NO(imx_ep)));
- req->req.actual += count;
-
- return count;
-}
-
-static int write_packet(struct imx_ep_struct *imx_ep, struct imx_request *req)
-{
- u8 *buf;
- int length, count, temp;
-
- if (unlikely(__raw_readl(imx_ep->imx_usb->base +
- USB_EP_STAT(EP_NO(imx_ep))) & EPSTAT_ZLPS)) {
- D_TRX(imx_ep->imx_usb->dev, "<%s> zlp still queued in EP %s\n",
- __func__, imx_ep->ep.name);
- return -1;
- }
-
- buf = req->req.buf + req->req.actual;
- prefetch(buf);
-
- length = min(req->req.length - req->req.actual, (u32)imx_ep->fifosize);
-
- if (imx_fifo_bcount(imx_ep) + length > imx_ep->fifosize) {
- D_TRX(imx_ep->imx_usb->dev, "<%s> packet overfill %s fifo\n",
- __func__, imx_ep->ep.name);
- return -1;
- }
-
- req->req.actual += length;
- count = length;
-
- if (!count && req->req.zero) { /* zlp */
- temp = __raw_readl(imx_ep->imx_usb->base
- + USB_EP_STAT(EP_NO(imx_ep)));
- __raw_writel(temp | EPSTAT_ZLPS, imx_ep->imx_usb->base
- + USB_EP_STAT(EP_NO(imx_ep)));
- D_TRX(imx_ep->imx_usb->dev, "<%s> zero packet\n", __func__);
- return 0;
- }
-
- while (count--) {
- if (count == 0) { /* last byte */
- temp = __raw_readl(imx_ep->imx_usb->base
- + USB_EP_FCTRL(EP_NO(imx_ep)));
- __raw_writel(temp | FCTRL_WFR, imx_ep->imx_usb->base
- + USB_EP_FCTRL(EP_NO(imx_ep)));
- }
- __raw_writeb(*buf++,
- imx_ep->imx_usb->base + USB_EP_FDAT0(EP_NO(imx_ep)));
- }
-
- return length;
-}
-
-static int read_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
-{
- int bytes = 0,
- count,
- completed = 0;
-
- while (__raw_readl(imx_ep->imx_usb->base + USB_EP_FSTAT(EP_NO(imx_ep)))
- & FSTAT_FR) {
- count = read_packet(imx_ep, req);
- bytes += count;
-
- completed = (count != imx_ep->fifosize);
- if (completed || req->req.actual == req->req.length) {
- completed = 1;
- break;
- }
- }
-
- if (completed || !req->req.length) {
- done(imx_ep, req, 0);
- D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
- __func__, imx_ep->ep.name, req,
- completed ? "completed" : "not completed");
- if (!EP_NO(imx_ep))
- ep0_chg_stat(__func__, imx_ep->imx_usb, EP0_IDLE);
- }
-
- D_TRX(imx_ep->imx_usb->dev, "<%s> bytes read: %d\n", __func__, bytes);
-
- return completed;
-}
-
-static int write_fifo(struct imx_ep_struct *imx_ep, struct imx_request *req)
-{
- int bytes = 0,
- count,
- completed = 0;
-
- while (!completed) {
- count = write_packet(imx_ep, req);
- if (count < 0)
- break; /* busy */
- bytes += count;
-
- /* last packet "must be" short (or a zlp) */
- completed = (count != imx_ep->fifosize);
-
- if (unlikely(completed)) {
- done(imx_ep, req, 0);
- D_REQ(imx_ep->imx_usb->dev, "<%s> %s req<%p> %s\n",
- __func__, imx_ep->ep.name, req,
- completed ? "completed" : "not completed");
- if (!EP_NO(imx_ep))
- ep0_chg_stat(__func__,
- imx_ep->imx_usb, EP0_IDLE);
- }
- }
-
- D_TRX(imx_ep->imx_usb->dev, "<%s> bytes sent: %d\n", __func__, bytes);
-
- return completed;
-}
-
-/*******************************************************************************
- * Endpoint handlers
- *******************************************************************************
- */
-static int handle_ep(struct imx_ep_struct *imx_ep)
-{
- struct imx_request *req;
- int completed = 0;
-
- do {
- if (!list_empty(&imx_ep->queue))
- req = list_entry(imx_ep->queue.next,
- struct imx_request, queue);
- else {
- D_REQ(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
- __func__, imx_ep->ep.name);
- return 0;
- }
-
- if (EP_DIR(imx_ep)) /* to host */
- completed = write_fifo(imx_ep, req);
- else /* to device */
- completed = read_fifo(imx_ep, req);
-
- dump_ep_stat(__func__, imx_ep);
-
- } while (completed);
-
- return 0;
-}
-
-static int handle_ep0(struct imx_ep_struct *imx_ep)
-{
- struct imx_request *req = NULL;
- int ret = 0;
-
- if (!list_empty(&imx_ep->queue)) {
- req = list_entry(imx_ep->queue.next, struct imx_request, queue);
-
- switch (imx_ep->imx_usb->ep0state) {
-
- case EP0_IN_DATA_PHASE: /* GET_DESCRIPTOR */
- write_fifo(imx_ep, req);
- break;
- case EP0_OUT_DATA_PHASE: /* SET_DESCRIPTOR */
- read_fifo(imx_ep, req);
- break;
- default:
- D_EP0(imx_ep->imx_usb->dev,
- "<%s> ep0 i/o, odd state %d\n",
- __func__, imx_ep->imx_usb->ep0state);
- ep_del_request(imx_ep, req);
- ret = -EL2HLT;
- break;
- }
- }
-
- else
- D_ERR(imx_ep->imx_usb->dev, "<%s> no request on %s\n",
- __func__, imx_ep->ep.name);
-
- return ret;
-}
-
-static void handle_ep0_devreq(struct imx_udc_struct *imx_usb)
-{
- struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
- union {
- struct usb_ctrlrequest r;
- u8 raw[8];
- u32 word[2];
- } u;
- int temp, i;
-
- nuke(imx_ep, -EPROTO);
-
- /* read SETUP packet */
- for (i = 0; i < 2; i++) {
- if (imx_ep_empty(imx_ep)) {
- D_ERR(imx_usb->dev,
- "<%s> no setup packet received\n", __func__);
- goto stall;
- }
- u.word[i] = __raw_readl(imx_usb->base
- + USB_EP_FDAT(EP_NO(imx_ep)));
- }
-
- temp = imx_ep_empty(imx_ep);
- while (!imx_ep_empty(imx_ep)) {
- i = __raw_readl(imx_usb->base + USB_EP_FDAT(EP_NO(imx_ep)));
- D_ERR(imx_usb->dev,
- "<%s> wrong to have extra bytes for setup : 0x%08x\n",
- __func__, i);
- }
- if (!temp)
- goto stall;
-
- le16_to_cpus(&u.r.wValue);
- le16_to_cpus(&u.r.wIndex);
- le16_to_cpus(&u.r.wLength);
-
- D_REQ(imx_usb->dev, "<%s> SETUP %02x.%02x v%04x i%04x l%04x\n",
- __func__, u.r.bRequestType, u.r.bRequest,
- u.r.wValue, u.r.wIndex, u.r.wLength);
-
- if (imx_usb->set_config) {
- /* NACK the host by using CMDOVER */
- temp = __raw_readl(imx_usb->base + USB_CTRL);
- __raw_writel(temp | CTRL_CMDOVER, imx_usb->base + USB_CTRL);
-
- D_ERR(imx_usb->dev,
- "<%s> set config req is pending, NACK the host\n",
- __func__);
- return;
- }
-
- if (u.r.bRequestType & USB_DIR_IN)
- ep0_chg_stat(__func__, imx_usb, EP0_IN_DATA_PHASE);
- else
- ep0_chg_stat(__func__, imx_usb, EP0_OUT_DATA_PHASE);
-
- i = imx_usb->driver->setup(&imx_usb->gadget, &u.r);
- if (i < 0) {
- D_ERR(imx_usb->dev, "<%s> device setup error %d\n",
- __func__, i);
- goto stall;
- }
-
- return;
-stall:
- D_ERR(imx_usb->dev, "<%s> protocol STALL\n", __func__);
- imx_ep_stall(imx_ep);
- ep0_chg_stat(__func__, imx_usb, EP0_STALL);
- return;
-}
-
-/*******************************************************************************
- * USB gadget callback functions
- *******************************************************************************
- */
-
-static int imx_ep_enable(struct usb_ep *usb_ep,
- const struct usb_endpoint_descriptor *desc)
-{
- struct imx_ep_struct *imx_ep = container_of(usb_ep,
- struct imx_ep_struct, ep);
- struct imx_udc_struct *imx_usb = imx_ep->imx_usb;
- unsigned long flags;
-
- if (!usb_ep
- || !desc
- || !EP_NO(imx_ep)
- || desc->bDescriptorType != USB_DT_ENDPOINT
- || imx_ep->bEndpointAddress != desc->bEndpointAddress) {
- D_ERR(imx_usb->dev,
- "<%s> bad ep or descriptor\n", __func__);
- return -EINVAL;
- }
-
- if (imx_ep->bmAttributes != desc->bmAttributes) {
- D_ERR(imx_usb->dev,
- "<%s> %s type mismatch\n", __func__, usb_ep->name);
- return -EINVAL;
- }
-
- if (imx_ep->fifosize < usb_endpoint_maxp(desc)) {
- D_ERR(imx_usb->dev,
- "<%s> bad %s maxpacket\n", __func__, usb_ep->name);
- return -ERANGE;
- }
-
- if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
- D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
- return -ESHUTDOWN;
- }
-
- local_irq_save(flags);
-
- imx_ep->stopped = 0;
- imx_flush(imx_ep);
- imx_ep_irq_enable(imx_ep);
-
- local_irq_restore(flags);
-
- D_EPX(imx_usb->dev, "<%s> ENABLED %s\n", __func__, usb_ep->name);
- return 0;
-}
-
-static int imx_ep_disable(struct usb_ep *usb_ep)
-{
- struct imx_ep_struct *imx_ep = container_of(usb_ep,
- struct imx_ep_struct, ep);
- unsigned long flags;
-
- if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
- D_ERR(imx_ep->imx_usb->dev, "<%s> %s can not be disabled\n",
- __func__, usb_ep ? imx_ep->ep.name : NULL);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- imx_ep->stopped = 1;
- nuke(imx_ep, -ESHUTDOWN);
- imx_flush(imx_ep);
- imx_ep_irq_disable(imx_ep);
-
- local_irq_restore(flags);
-
- D_EPX(imx_ep->imx_usb->dev,
- "<%s> DISABLED %s\n", __func__, usb_ep->name);
- return 0;
-}
-
-static struct usb_request *imx_ep_alloc_request
- (struct usb_ep *usb_ep, gfp_t gfp_flags)
-{
- struct imx_request *req;
-
- if (!usb_ep)
- return NULL;
-
- req = kzalloc(sizeof *req, gfp_flags);
- if (!req)
- return NULL;
-
- INIT_LIST_HEAD(&req->queue);
- req->in_use = 0;
-
- return &req->req;
-}
-
-static void imx_ep_free_request
- (struct usb_ep *usb_ep, struct usb_request *usb_req)
-{
- struct imx_request *req;
-
- req = container_of(usb_req, struct imx_request, req);
- WARN_ON(!list_empty(&req->queue));
- kfree(req);
-}
-
-static int imx_ep_queue
- (struct usb_ep *usb_ep, struct usb_request *usb_req, gfp_t gfp_flags)
-{
- struct imx_ep_struct *imx_ep;
- struct imx_udc_struct *imx_usb;
- struct imx_request *req;
- unsigned long flags;
- int ret = 0;
-
- imx_ep = container_of(usb_ep, struct imx_ep_struct, ep);
- imx_usb = imx_ep->imx_usb;
- req = container_of(usb_req, struct imx_request, req);
-
- /*
- Special care on IMX udc.
- Ignore enqueue when after set configuration from the
- host. This assume all gadget drivers reply set
- configuration with the next ep0 req enqueue.
- */
- if (imx_usb->set_config && !EP_NO(imx_ep)) {
- imx_usb->set_config = 0;
- D_ERR(imx_usb->dev,
- "<%s> gadget reply set config\n", __func__);
- return 0;
- }
-
- if (unlikely(!usb_req || !req || !usb_req->complete || !usb_req->buf)) {
- D_ERR(imx_usb->dev, "<%s> bad params\n", __func__);
- return -EINVAL;
- }
-
- if (unlikely(!usb_ep || !imx_ep)) {
- D_ERR(imx_usb->dev, "<%s> bad ep\n", __func__);
- return -EINVAL;
- }
-
- if (!imx_usb->driver || imx_usb->gadget.speed == USB_SPEED_UNKNOWN) {
- D_ERR(imx_usb->dev, "<%s> bogus device state\n", __func__);
- return -ESHUTDOWN;
- }
-
- /* Debug */
- D_REQ(imx_usb->dev, "<%s> ep%d %s request for [%d] bytes\n",
- __func__, EP_NO(imx_ep),
- ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state
- == EP0_IN_DATA_PHASE)
- || (EP_NO(imx_ep) && EP_DIR(imx_ep)))
- ? "IN" : "OUT", usb_req->length);
- dump_req(__func__, imx_ep, usb_req);
-
- if (imx_ep->stopped) {
- usb_req->status = -ESHUTDOWN;
- return -ESHUTDOWN;
- }
-
- if (req->in_use) {
- D_ERR(imx_usb->dev,
- "<%s> refusing to queue req %p (already queued)\n",
- __func__, req);
- return 0;
- }
-
- local_irq_save(flags);
-
- usb_req->status = -EINPROGRESS;
- usb_req->actual = 0;
-
- ep_add_request(imx_ep, req);
-
- if (!EP_NO(imx_ep))
- ret = handle_ep0(imx_ep);
- else
- ret = handle_ep(imx_ep);
-
- local_irq_restore(flags);
- return ret;
-}
-
-static int imx_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req)
-{
-
- struct imx_ep_struct *imx_ep = container_of
- (usb_ep, struct imx_ep_struct, ep);
- struct imx_request *req;
- unsigned long flags;
-
- if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
- D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- /* make sure it's actually queued on this endpoint */
- list_for_each_entry(req, &imx_ep->queue, queue) {
- if (&req->req == usb_req)
- break;
- }
- if (&req->req != usb_req) {
- local_irq_restore(flags);
- return -EINVAL;
- }
-
- done(imx_ep, req, -ECONNRESET);
-
- local_irq_restore(flags);
- return 0;
-}
-
-static int imx_ep_set_halt(struct usb_ep *usb_ep, int value)
-{
- struct imx_ep_struct *imx_ep = container_of
- (usb_ep, struct imx_ep_struct, ep);
- unsigned long flags;
-
- if (unlikely(!usb_ep || !EP_NO(imx_ep))) {
- D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
- return -EINVAL;
- }
-
- local_irq_save(flags);
-
- if ((imx_ep->bEndpointAddress & USB_DIR_IN)
- && !list_empty(&imx_ep->queue)) {
- local_irq_restore(flags);
- return -EAGAIN;
- }
-
- imx_ep_stall(imx_ep);
-
- local_irq_restore(flags);
-
- D_EPX(imx_ep->imx_usb->dev, "<%s> %s halt\n", __func__, usb_ep->name);
- return 0;
-}
-
-static int imx_ep_fifo_status(struct usb_ep *usb_ep)
-{
- struct imx_ep_struct *imx_ep = container_of
- (usb_ep, struct imx_ep_struct, ep);
-
- if (!usb_ep) {
- D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
- return -ENODEV;
- }
-
- if (imx_ep->imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
- return 0;
- else
- return imx_fifo_bcount(imx_ep);
-}
-
-static void imx_ep_fifo_flush(struct usb_ep *usb_ep)
-{
- struct imx_ep_struct *imx_ep = container_of
- (usb_ep, struct imx_ep_struct, ep);
- unsigned long flags;
-
- local_irq_save(flags);
-
- if (!usb_ep || !EP_NO(imx_ep) || !list_empty(&imx_ep->queue)) {
- D_ERR(imx_ep->imx_usb->dev, "<%s> bad ep\n", __func__);
- local_irq_restore(flags);
- return;
- }
-
- /* toggle and halt bits stay unchanged */
- imx_flush(imx_ep);
-
- local_irq_restore(flags);
-}
-
-static struct usb_ep_ops imx_ep_ops = {
- .enable = imx_ep_enable,
- .disable = imx_ep_disable,
-
- .alloc_request = imx_ep_alloc_request,
- .free_request = imx_ep_free_request,
-
- .queue = imx_ep_queue,
- .dequeue = imx_ep_dequeue,
-
- .set_halt = imx_ep_set_halt,
- .fifo_status = imx_ep_fifo_status,
- .fifo_flush = imx_ep_fifo_flush,
-};
-
-/*******************************************************************************
- * USB endpoint control functions
- *******************************************************************************
- */
-
-void ep0_chg_stat(const char *label,
- struct imx_udc_struct *imx_usb, enum ep0_state stat)
-{
- D_EP0(imx_usb->dev, "<%s> from %15s to %15s\n",
- label, state_name[imx_usb->ep0state], state_name[stat]);
-
- if (imx_usb->ep0state == stat)
- return;
-
- imx_usb->ep0state = stat;
-}
-
-static void usb_init_data(struct imx_udc_struct *imx_usb)
-{
- struct imx_ep_struct *imx_ep;
- u8 i;
-
- /* device/ep0 records init */
- INIT_LIST_HEAD(&imx_usb->gadget.ep_list);
- INIT_LIST_HEAD(&imx_usb->gadget.ep0->ep_list);
- ep0_chg_stat(__func__, imx_usb, EP0_IDLE);
-
- /* basic endpoint records init */
- for (i = 0; i < IMX_USB_NB_EP; i++) {
- imx_ep = &imx_usb->imx_ep[i];
-
- if (i) {
- list_add_tail(&imx_ep->ep.ep_list,
- &imx_usb->gadget.ep_list);
- imx_ep->stopped = 1;
- } else
- imx_ep->stopped = 0;
-
- INIT_LIST_HEAD(&imx_ep->queue);
- }
-}
-
-static void udc_stop_activity(struct imx_udc_struct *imx_usb,
- struct usb_gadget_driver *driver)
-{
- struct imx_ep_struct *imx_ep;
- int i;
-
- if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN)
- driver = NULL;
-
- /* prevent new request submissions, kill any outstanding requests */
- for (i = 1; i < IMX_USB_NB_EP; i++) {
- imx_ep = &imx_usb->imx_ep[i];
- imx_flush(imx_ep);
- imx_ep->stopped = 1;
- imx_ep_irq_disable(imx_ep);
- nuke(imx_ep, -ESHUTDOWN);
- }
-
- imx_usb->cfg = 0;
- imx_usb->intf = 0;
- imx_usb->alt = 0;
-
- if (driver)
- driver->disconnect(&imx_usb->gadget);
-}
-
-/*******************************************************************************
- * Interrupt handlers
- *******************************************************************************
- */
-
-/*
- * Called when timer expires.
- * Timer is started when CFG_CHG is received.
- */
-static void handle_config(unsigned long data)
-{
- struct imx_udc_struct *imx_usb = (void *)data;
- struct usb_ctrlrequest u;
- int temp, cfg, intf, alt;
-
- local_irq_disable();
-
- temp = __raw_readl(imx_usb->base + USB_STAT);
- cfg = (temp & STAT_CFG) >> 5;
- intf = (temp & STAT_INTF) >> 3;
- alt = temp & STAT_ALTSET;
-
- D_REQ(imx_usb->dev,
- "<%s> orig config C=%d, I=%d, A=%d / "
- "req config C=%d, I=%d, A=%d\n",
- __func__, imx_usb->cfg, imx_usb->intf, imx_usb->alt,
- cfg, intf, alt);
-
- if (cfg == 1 || cfg == 2) {
-
- if (imx_usb->cfg != cfg) {
- u.bRequest = USB_REQ_SET_CONFIGURATION;
- u.bRequestType = USB_DIR_OUT |
- USB_TYPE_STANDARD |
- USB_RECIP_DEVICE;
- u.wValue = cfg;
- u.wIndex = 0;
- u.wLength = 0;
- imx_usb->cfg = cfg;
- imx_usb->driver->setup(&imx_usb->gadget, &u);
-
- }
- if (imx_usb->intf != intf || imx_usb->alt != alt) {
- u.bRequest = USB_REQ_SET_INTERFACE;
- u.bRequestType = USB_DIR_OUT |
- USB_TYPE_STANDARD |
- USB_RECIP_INTERFACE;
- u.wValue = alt;
- u.wIndex = intf;
- u.wLength = 0;
- imx_usb->intf = intf;
- imx_usb->alt = alt;
- imx_usb->driver->setup(&imx_usb->gadget, &u);
- }
- }
-
- imx_usb->set_config = 0;
-
- local_irq_enable();
-}
-
-static irqreturn_t imx_udc_irq(int irq, void *dev)
-{
- struct imx_udc_struct *imx_usb = dev;
- int intr = __raw_readl(imx_usb->base + USB_INTR);
- int temp;
-
- if (intr & (INTR_WAKEUP | INTR_SUSPEND | INTR_RESUME | INTR_RESET_START
- | INTR_RESET_STOP | INTR_CFG_CHG)) {
- dump_intr(__func__, intr, imx_usb->dev);
- dump_usb_stat(__func__, imx_usb);
- }
-
- if (!imx_usb->driver)
- goto end_irq;
-
- if (intr & INTR_SOF) {
- /* Copy from Freescale BSP.
- We must enable SOF intr and set CMDOVER.
- Datasheet don't specifiy this action, but it
- is done in Freescale BSP, so just copy it.
- */
- if (imx_usb->ep0state == EP0_IDLE) {
- temp = __raw_readl(imx_usb->base + USB_CTRL);
- __raw_writel(temp | CTRL_CMDOVER,
- imx_usb->base + USB_CTRL);
- }
- }
-
- if (intr & INTR_CFG_CHG) {
- /* A workaround of serious IMX UDC bug.
- Handling of CFG_CHG should be delayed for some time, because
- IMX does not NACK the host when CFG_CHG interrupt is pending.
- There is no time to handle current CFG_CHG
- if next CFG_CHG or SETUP packed is send immediately.
- We have to clear CFG_CHG, start the timer and
- NACK the host by setting CTRL_CMDOVER
- if it sends any SETUP packet.
- When timer expires, handler is called to handle configuration
- changes. While CFG_CHG is not handled (set_config=1),
- we must NACK the host to every SETUP packed.
- This delay prevents from going out of sync with host.
- */
- __raw_writel(INTR_CFG_CHG, imx_usb->base + USB_INTR);
- imx_usb->set_config = 1;
- mod_timer(&imx_usb->timer, jiffies + 5);
- goto end_irq;
- }
-
- if (intr & INTR_WAKEUP) {
- if (imx_usb->gadget.speed == USB_SPEED_UNKNOWN
- && imx_usb->driver && imx_usb->driver->resume)
- imx_usb->driver->resume(&imx_usb->gadget);
- imx_usb->set_config = 0;
- del_timer(&imx_usb->timer);
- imx_usb->gadget.speed = USB_SPEED_FULL;
- }
-
- if (intr & INTR_SUSPEND) {
- if (imx_usb->gadget.speed != USB_SPEED_UNKNOWN
- && imx_usb->driver && imx_usb->driver->suspend)
- imx_usb->driver->suspend(&imx_usb->gadget);
- imx_usb->set_config = 0;
- del_timer(&imx_usb->timer);
- imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
- }
-
- if (intr & INTR_RESET_START) {
- __raw_writel(intr, imx_usb->base + USB_INTR);
- udc_stop_activity(imx_usb, imx_usb->driver);
- imx_usb->set_config = 0;
- del_timer(&imx_usb->timer);
- imx_usb->gadget.speed = USB_SPEED_UNKNOWN;
- }
-
- if (intr & INTR_RESET_STOP)
- imx_usb->gadget.speed = USB_SPEED_FULL;
-
-end_irq:
- __raw_writel(intr, imx_usb->base + USB_INTR);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t imx_udc_ctrl_irq(int irq, void *dev)
-{
- struct imx_udc_struct *imx_usb = dev;
- struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[0];
- int intr = __raw_readl(imx_usb->base + USB_EP_INTR(0));
-
- dump_ep_intr(__func__, 0, intr, imx_usb->dev);
-
- if (!imx_usb->driver) {
- __raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
- return IRQ_HANDLED;
- }
-
- /* DEVREQ has highest priority */
- if (intr & (EPINTR_DEVREQ | EPINTR_MDEVREQ))
- handle_ep0_devreq(imx_usb);
- /* Seem i.MX is missing EOF interrupt sometimes.
- * Therefore we don't monitor EOF.
- * We call handle_ep0() only if a request is queued for ep0.
- */
- else if (!list_empty(&imx_ep->queue))
- handle_ep0(imx_ep);
-
- __raw_writel(intr, imx_usb->base + USB_EP_INTR(0));
-
- return IRQ_HANDLED;
-}
-
-#ifndef MX1_INT_USBD0
-#define MX1_INT_USBD0 MX1_USBD_INT0
-#endif
-
-static irqreturn_t imx_udc_bulk_irq(int irq, void *dev)
-{
- struct imx_udc_struct *imx_usb = dev;
- struct imx_ep_struct *imx_ep = &imx_usb->imx_ep[irq - MX1_INT_USBD0];
- int intr = __raw_readl(imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
-
- dump_ep_intr(__func__, irq - MX1_INT_USBD0, intr, imx_usb->dev);
-
- if (!imx_usb->driver) {
- __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
- return IRQ_HANDLED;
- }
-
- handle_ep(imx_ep);
-
- __raw_writel(intr, imx_usb->base + USB_EP_INTR(EP_NO(imx_ep)));
-
- return IRQ_HANDLED;
-}
-
-irq_handler_t intr_handler(int i)
-{
- switch (i) {
- case 0:
- return imx_udc_ctrl_irq;
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- return imx_udc_bulk_irq;
- default:
- return imx_udc_irq;
- }
-}
-
-/*******************************************************************************
- * Static defined IMX UDC structure
- *******************************************************************************
- */
-
-static int imx_udc_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
-static int imx_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver);
-static const struct usb_gadget_ops imx_udc_ops = {
- .get_frame = imx_udc_get_frame,
- .wakeup = imx_udc_wakeup,
- .udc_start = imx_udc_start,
- .udc_stop = imx_udc_stop,
-};
-
-static struct imx_udc_struct controller = {
- .gadget = {
- .ops = &imx_udc_ops,
- .ep0 = &controller.imx_ep[0].ep,
- .name = driver_name,
- .dev = {
- .init_name = "gadget",
- },
- },
-
- .imx_ep[0] = {
- .ep = {
- .name = ep0name,
- .ops = &imx_ep_ops,
- .maxpacket = 32,
- },
- .imx_usb = &controller,
- .fifosize = 32,
- .bEndpointAddress = 0,
- .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
- },
- .imx_ep[1] = {
- .ep = {
- .name = "ep1in-bulk",
- .ops = &imx_ep_ops,
- .maxpacket = 64,
- },
- .imx_usb = &controller,
- .fifosize = 64,
- .bEndpointAddress = USB_DIR_IN | 1,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .imx_ep[2] = {
- .ep = {
- .name = "ep2out-bulk",
- .ops = &imx_ep_ops,
- .maxpacket = 64,
- },
- .imx_usb = &controller,
- .fifosize = 64,
- .bEndpointAddress = USB_DIR_OUT | 2,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .imx_ep[3] = {
- .ep = {
- .name = "ep3out-bulk",
- .ops = &imx_ep_ops,
- .maxpacket = 32,
- },
- .imx_usb = &controller,
- .fifosize = 32,
- .bEndpointAddress = USB_DIR_OUT | 3,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- },
- .imx_ep[4] = {
- .ep = {
- .name = "ep4in-int",
- .ops = &imx_ep_ops,
- .maxpacket = 32,
- },
- .imx_usb = &controller,
- .fifosize = 32,
- .bEndpointAddress = USB_DIR_IN | 4,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- },
- .imx_ep[5] = {
- .ep = {
- .name = "ep5out-int",
- .ops = &imx_ep_ops,
- .maxpacket = 32,
- },
- .imx_usb = &controller,
- .fifosize = 32,
- .bEndpointAddress = USB_DIR_OUT | 5,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- },
-};
-
-/*******************************************************************************
- * USB gadget driver functions
- *******************************************************************************
- */
-static int imx_udc_start(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- struct imx_udc_struct *imx_usb;
-
- imx_usb = container_of(gadget, struct imx_udc_struct, gadget);
- /* first hook up the driver ... */
- imx_usb->driver = driver;
-
- D_INI(imx_usb->dev, "<%s> registered gadget driver '%s'\n",
- __func__, driver->driver.name);
-
- imx_udc_enable(imx_usb);
-
- return 0;
-}
-
-static int imx_udc_stop(struct usb_gadget *gadget,
- struct usb_gadget_driver *driver)
-{
- struct imx_udc_struct *imx_usb = container_of(gadget,
- struct imx_udc_struct, gadget);
-
- udc_stop_activity(imx_usb, driver);
- imx_udc_disable(imx_usb);
- del_timer(&imx_usb->timer);
-
- imx_usb->driver = NULL;
-
- D_INI(imx_usb->dev, "<%s> unregistered gadget driver '%s'\n",
- __func__, driver->driver.name);
-
- return 0;
-}
-
-/*******************************************************************************
- * Module functions
- *******************************************************************************
- */
-
-static int __init imx_udc_probe(struct platform_device *pdev)
-{
- struct imx_udc_struct *imx_usb = &controller;
- struct resource *res;
- struct imxusb_platform_data *pdata;
- struct clk *clk;
- void __iomem *base;
- int ret = 0;
- int i;
- resource_size_t res_size;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "can't get device resources\n");
- return -ENODEV;
- }
-
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "driver needs platform data\n");
- return -ENODEV;
- }
-
- res_size = resource_size(res);
- if (!request_mem_region(res->start, res_size, res->name)) {
- dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
- res_size, res->start);
- return -ENOMEM;
- }
-
- if (pdata->init) {
- ret = pdata->init(&pdev->dev);
- if (ret)
- goto fail0;
- }
-
- base = ioremap(res->start, res_size);
- if (!base) {
- dev_err(&pdev->dev, "ioremap failed\n");
- ret = -EIO;
- goto fail1;
- }
-
- clk = clk_get(NULL, "usbd_clk");
- if (IS_ERR(clk)) {
- ret = PTR_ERR(clk);
- dev_err(&pdev->dev, "can't get USB clock\n");
- goto fail2;
- }
- clk_prepare_enable(clk);
-
- if (clk_get_rate(clk) != 48000000) {
- D_INI(&pdev->dev,
- "Bad USB clock (%d Hz), changing to 48000000 Hz\n",
- (int)clk_get_rate(clk));
- if (clk_set_rate(clk, 48000000)) {
- dev_err(&pdev->dev,
- "Unable to set correct USB clock (48MHz)\n");
- ret = -EIO;
- goto fail3;
- }
- }
-
- for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
- imx_usb->usbd_int[i] = platform_get_irq(pdev, i);
- if (imx_usb->usbd_int[i] < 0) {
- dev_err(&pdev->dev, "can't get irq number\n");
- ret = -ENODEV;
- goto fail3;
- }
- }
-
- for (i = 0; i < IMX_USB_NB_EP + 1; i++) {
- ret = request_irq(imx_usb->usbd_int[i], intr_handler(i),
- 0, driver_name, imx_usb);
- if (ret) {
- dev_err(&pdev->dev, "can't get irq %i, err %d\n",
- imx_usb->usbd_int[i], ret);
- for (--i; i >= 0; i--)
- free_irq(imx_usb->usbd_int[i], imx_usb);
- goto fail3;
- }
- }
-
- imx_usb->res = res;
- imx_usb->base = base;
- imx_usb->clk = clk;
- imx_usb->dev = &pdev->dev;
-
- platform_set_drvdata(pdev, imx_usb);
-
- usb_init_data(imx_usb);
- imx_udc_init(imx_usb);
-
- init_timer(&imx_usb->timer);
- imx_usb->timer.function = handle_config;
- imx_usb->timer.data = (unsigned long)imx_usb;
-
- ret = usb_add_gadget_udc(&pdev->dev, &imx_usb->gadget);
- if (ret)
- goto fail4;
-
- return 0;
-fail4:
- for (i = 0; i < IMX_USB_NB_EP + 1; i++)
- free_irq(imx_usb->usbd_int[i], imx_usb);
-fail3:
- clk_put(clk);
- clk_disable_unprepare(clk);
-fail2:
- iounmap(base);
-fail1:
- if (pdata->exit)
- pdata->exit(&pdev->dev);
-fail0:
- release_mem_region(res->start, res_size);
- return ret;
-}
-
-static int __exit imx_udc_remove(struct platform_device *pdev)
-{
- struct imx_udc_struct *imx_usb = platform_get_drvdata(pdev);
- struct imxusb_platform_data *pdata = pdev->dev.platform_data;
- int i;
-
- usb_del_gadget_udc(&imx_usb->gadget);
- imx_udc_disable(imx_usb);
- del_timer(&imx_usb->timer);
-
- for (i = 0; i < IMX_USB_NB_EP + 1; i++)
- free_irq(imx_usb->usbd_int[i], imx_usb);
-
- clk_put(imx_usb->clk);
- clk_disable_unprepare(imx_usb->clk);
- iounmap(imx_usb->base);
-
- release_mem_region(imx_usb->res->start, resource_size(imx_usb->res));
-
- if (pdata->exit)
- pdata->exit(&pdev->dev);
-
- return 0;
-}
-
-/*----------------------------------------------------------------------------*/
-
-#ifdef CONFIG_PM
-#define imx_udc_suspend NULL
-#define imx_udc_resume NULL
-#else
-#define imx_udc_suspend NULL
-#define imx_udc_resume NULL
-#endif
-
-/*----------------------------------------------------------------------------*/
-
-static struct platform_driver udc_driver = {
- .driver = {
- .name = driver_name,
- .owner = THIS_MODULE,
- },
- .remove = __exit_p(imx_udc_remove),
- .suspend = imx_udc_suspend,
- .resume = imx_udc_resume,
-};
-
-module_platform_driver_probe(udc_driver, imx_udc_probe);
-
-MODULE_DESCRIPTION("IMX USB Device Controller driver");
-MODULE_AUTHOR("Darius Augulis <augulis.darius@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx_udc");
diff --git a/drivers/usb/gadget/imx_udc.h b/drivers/usb/gadget/imx_udc.h
deleted file mode 100644
index d118fb77784..00000000000
--- a/drivers/usb/gadget/imx_udc.h
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright (C) 2005 Mike Lee(eemike@gmail.com)
- *
- * This udc driver is now under testing and code is based on pxa2xx_udc.h
- * Please use it with your own risk!
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_USB_GADGET_IMX_H
-#define __LINUX_USB_GADGET_IMX_H
-
-#include <linux/types.h>
-
-/* Helper macros */
-#define EP_NO(ep) ((ep->bEndpointAddress) & ~USB_DIR_IN) /* IN:1, OUT:0 */
-#define EP_DIR(ep) ((ep->bEndpointAddress) & USB_DIR_IN ? 1 : 0)
-#define IMX_USB_NB_EP 6
-
-/* Driver structures */
-struct imx_request {
- struct usb_request req;
- struct list_head queue;
- unsigned int in_use;
-};
-
-enum ep0_state {
- EP0_IDLE,
- EP0_IN_DATA_PHASE,
- EP0_OUT_DATA_PHASE,
- EP0_CONFIG,
- EP0_STALL,
-};
-
-struct imx_ep_struct {
- struct usb_ep ep;
- struct imx_udc_struct *imx_usb;
- struct list_head queue;
- unsigned char stopped;
- unsigned char fifosize;
- unsigned char bEndpointAddress;
- unsigned char bmAttributes;
-};
-
-struct imx_udc_struct {
- struct usb_gadget gadget;
- struct usb_gadget_driver *driver;
- struct device *dev;
- struct imx_ep_struct imx_ep[IMX_USB_NB_EP];
- struct clk *clk;
- struct timer_list timer;
- enum ep0_state ep0state;
- struct resource *res;
- void __iomem *base;
- unsigned char set_config;
- int cfg,
- intf,
- alt,
- usbd_int[7];
-};
-
-/* USB registers */
-#define USB_FRAME (0x00) /* USB frame */
-#define USB_SPEC (0x04) /* USB Spec */
-#define USB_STAT (0x08) /* USB Status */
-#define USB_CTRL (0x0C) /* USB Control */
-#define USB_DADR (0x10) /* USB Desc RAM addr */
-#define USB_DDAT (0x14) /* USB Desc RAM/EP buffer data */
-#define USB_INTR (0x18) /* USB interrupt */
-#define USB_MASK (0x1C) /* USB Mask */
-#define USB_ENAB (0x24) /* USB Enable */
-#define USB_EP_STAT(x) (0x30 + (x*0x30)) /* USB status/control */
-#define USB_EP_INTR(x) (0x34 + (x*0x30)) /* USB interrupt */
-#define USB_EP_MASK(x) (0x38 + (x*0x30)) /* USB mask */
-#define USB_EP_FDAT(x) (0x3C + (x*0x30)) /* USB FIFO data */
-#define USB_EP_FDAT0(x) (0x3C + (x*0x30)) /* USB FIFO data */
-#define USB_EP_FDAT1(x) (0x3D + (x*0x30)) /* USB FIFO data */
-#define USB_EP_FDAT2(x) (0x3E + (x*0x30)) /* USB FIFO data */
-#define USB_EP_FDAT3(x) (0x3F + (x*0x30)) /* USB FIFO data */
-#define USB_EP_FSTAT(x) (0x40 + (x*0x30)) /* USB FIFO status */
-#define USB_EP_FCTRL(x) (0x44 + (x*0x30)) /* USB FIFO control */
-#define USB_EP_LRFP(x) (0x48 + (x*0x30)) /* USB last rd f. pointer */
-#define USB_EP_LWFP(x) (0x4C + (x*0x30)) /* USB last wr f. pointer */
-#define USB_EP_FALRM(x) (0x50 + (x*0x30)) /* USB FIFO alarm */
-#define USB_EP_FRDP(x) (0x54 + (x*0x30)) /* USB FIFO read pointer */
-#define USB_EP_FWRP(x) (0x58 + (x*0x30)) /* USB FIFO write pointer */
-/* USB Control Register Bit Fields.*/
-#define CTRL_CMDOVER (1<<6) /* UDC status */
-#define CTRL_CMDERROR (1<<5) /* UDC status */
-#define CTRL_FE_ENA (1<<3) /* Enable Font End logic */
-#define CTRL_UDC_RST (1<<2) /* UDC reset */
-#define CTRL_AFE_ENA (1<<1) /* Analog Font end enable */
-#define CTRL_RESUME (1<<0) /* UDC resume */
-/* USB Status Register Bit Fields.*/
-#define STAT_RST (1<<8)
-#define STAT_SUSP (1<<7)
-#define STAT_CFG (3<<5)
-#define STAT_INTF (3<<3)
-#define STAT_ALTSET (7<<0)
-/* USB Interrupt Status/Mask Registers Bit fields */
-#define INTR_WAKEUP (1<<31) /* Wake up Interrupt */
-#define INTR_MSOF (1<<7) /* Missed Start of Frame */
-#define INTR_SOF (1<<6) /* Start of Frame */
-#define INTR_RESET_STOP (1<<5) /* Reset Signaling stop */
-#define INTR_RESET_START (1<<4) /* Reset Signaling start */
-#define INTR_RESUME (1<<3) /* Suspend to resume */
-#define INTR_SUSPEND (1<<2) /* Active to suspend */
-#define INTR_FRAME_MATCH (1<<1) /* Frame matched */
-#define INTR_CFG_CHG (1<<0) /* Configuration change occurred */
-/* USB Enable Register Bit Fields.*/
-#define ENAB_RST (1<<31) /* Reset USB modules */
-#define ENAB_ENAB (1<<30) /* Enable USB modules*/
-#define ENAB_SUSPEND (1<<29) /* Suspend USB modules */
-#define ENAB_ENDIAN (1<<28) /* Endian of USB modules */
-#define ENAB_PWRMD (1<<0) /* Power mode of USB modules */
-/* USB Descriptor Ram Address Register bit fields */
-#define DADR_CFG (1<<31) /* Configuration */
-#define DADR_BSY (1<<30) /* Busy status */
-#define DADR_DADR (0x1FF) /* Descriptor Ram Address */
-/* USB Descriptor RAM/Endpoint Buffer Data Register bit fields */
-#define DDAT_DDAT (0xFF) /* Descriptor Endpoint Buffer */
-/* USB Endpoint Status Register bit fields */
-#define EPSTAT_BCOUNT (0x7F<<16) /* Endpoint FIFO byte count */
-#define EPSTAT_SIP (1<<8) /* Endpoint setup in progress */
-#define EPSTAT_DIR (1<<7) /* Endpoint transfer direction */
-#define EPSTAT_MAX (3<<5) /* Endpoint Max packet size */
-#define EPSTAT_TYP (3<<3) /* Endpoint type */
-#define EPSTAT_ZLPS (1<<2) /* Send zero length packet */
-#define EPSTAT_FLUSH (1<<1) /* Endpoint FIFO Flush */
-#define EPSTAT_STALL (1<<0) /* Force stall */
-/* USB Endpoint FIFO Status Register bit fields */
-#define FSTAT_FRAME_STAT (0xF<<24) /* Frame status bit [0-3] */
-#define FSTAT_ERR (1<<22) /* FIFO error */
-#define FSTAT_UF (1<<21) /* FIFO underflow */
-#define FSTAT_OF (1<<20) /* FIFO overflow */
-#define FSTAT_FR (1<<19) /* FIFO frame ready */
-#define FSTAT_FULL (1<<18) /* FIFO full */
-#define FSTAT_ALRM (1<<17) /* FIFO alarm */
-#define FSTAT_EMPTY (1<<16) /* FIFO empty */
-/* USB Endpoint FIFO Control Register bit fields */
-#define FCTRL_WFR (1<<29) /* Write frame end */
-/* USB Endpoint Interrupt Status Regsiter bit fields */
-#define EPINTR_FIFO_FULL (1<<8) /* fifo full */
-#define EPINTR_FIFO_EMPTY (1<<7) /* fifo empty */
-#define EPINTR_FIFO_ERROR (1<<6) /* fifo error */
-#define EPINTR_FIFO_HIGH (1<<5) /* fifo high */
-#define EPINTR_FIFO_LOW (1<<4) /* fifo low */
-#define EPINTR_MDEVREQ (1<<3) /* multi Device request */
-#define EPINTR_EOT (1<<2) /* fifo end of transfer */
-#define EPINTR_DEVREQ (1<<1) /* Device request */
-#define EPINTR_EOF (1<<0) /* fifo end of frame */
-
-/* Debug macros */
-#ifdef DEBUG
-
-/* #define DEBUG_REQ */
-/* #define DEBUG_TRX */
-/* #define DEBUG_INIT */
-/* #define DEBUG_EP0 */
-/* #define DEBUG_EPX */
-/* #define DEBUG_IRQ */
-/* #define DEBUG_EPIRQ */
-/* #define DEBUG_DUMP */
-/* #define DEBUG_ERR */
-
-#ifdef DEBUG_REQ
- #define D_REQ(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_REQ(dev, args...) do {} while (0)
-#endif /* DEBUG_REQ */
-
-#ifdef DEBUG_TRX
- #define D_TRX(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_TRX(dev, args...) do {} while (0)
-#endif /* DEBUG_TRX */
-
-#ifdef DEBUG_INIT
- #define D_INI(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_INI(dev, args...) do {} while (0)
-#endif /* DEBUG_INIT */
-
-#ifdef DEBUG_EP0
- static const char *state_name[] = {
- "EP0_IDLE",
- "EP0_IN_DATA_PHASE",
- "EP0_OUT_DATA_PHASE",
- "EP0_CONFIG",
- "EP0_STALL"
- };
- #define D_EP0(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_EP0(dev, args...) do {} while (0)
-#endif /* DEBUG_EP0 */
-
-#ifdef DEBUG_EPX
- #define D_EPX(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_EPX(dev, args...) do {} while (0)
-#endif /* DEBUG_EP0 */
-
-#ifdef DEBUG_IRQ
- static void dump_intr(const char *label, int irqreg, struct device *dev)
- {
- dev_dbg(dev, "<%s> USB_INTR=[%s%s%s%s%s%s%s%s%s]\n", label,
- (irqreg & INTR_WAKEUP) ? " wake" : "",
- (irqreg & INTR_MSOF) ? " msof" : "",
- (irqreg & INTR_SOF) ? " sof" : "",
- (irqreg & INTR_RESUME) ? " resume" : "",
- (irqreg & INTR_SUSPEND) ? " suspend" : "",
- (irqreg & INTR_RESET_STOP) ? " noreset" : "",
- (irqreg & INTR_RESET_START) ? " reset" : "",
- (irqreg & INTR_FRAME_MATCH) ? " fmatch" : "",
- (irqreg & INTR_CFG_CHG) ? " config" : "");
- }
-#else
- #define dump_intr(x, y, z) do {} while (0)
-#endif /* DEBUG_IRQ */
-
-#ifdef DEBUG_EPIRQ
- static void dump_ep_intr(const char *label, int nr, int irqreg,
- struct device *dev)
- {
- dev_dbg(dev, "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n", label, nr,
- (irqreg & EPINTR_FIFO_FULL) ? " full" : "",
- (irqreg & EPINTR_FIFO_EMPTY) ? " fempty" : "",
- (irqreg & EPINTR_FIFO_ERROR) ? " ferr" : "",
- (irqreg & EPINTR_FIFO_HIGH) ? " fhigh" : "",
- (irqreg & EPINTR_FIFO_LOW) ? " flow" : "",
- (irqreg & EPINTR_MDEVREQ) ? " mreq" : "",
- (irqreg & EPINTR_EOF) ? " eof" : "",
- (irqreg & EPINTR_DEVREQ) ? " devreq" : "",
- (irqreg & EPINTR_EOT) ? " eot" : "");
- }
-#else
- #define dump_ep_intr(x, y, z, i) do {} while (0)
-#endif /* DEBUG_IRQ */
-
-#ifdef DEBUG_DUMP
- static void dump_usb_stat(const char *label,
- struct imx_udc_struct *imx_usb)
- {
- int temp = __raw_readl(imx_usb->base + USB_STAT);
-
- dev_dbg(imx_usb->dev,
- "<%s> USB_STAT=[%s%s CFG=%d, INTF=%d, ALTR=%d]\n", label,
- (temp & STAT_RST) ? " reset" : "",
- (temp & STAT_SUSP) ? " suspend" : "",
- (temp & STAT_CFG) >> 5,
- (temp & STAT_INTF) >> 3,
- (temp & STAT_ALTSET));
- }
-
- static void dump_ep_stat(const char *label,
- struct imx_ep_struct *imx_ep)
- {
- int temp = __raw_readl(imx_ep->imx_usb->base
- + USB_EP_INTR(EP_NO(imx_ep)));
-
- dev_dbg(imx_ep->imx_usb->dev,
- "<%s> EP%d_INTR=[%s%s%s%s%s%s%s%s%s]\n",
- label, EP_NO(imx_ep),
- (temp & EPINTR_FIFO_FULL) ? " full" : "",
- (temp & EPINTR_FIFO_EMPTY) ? " fempty" : "",
- (temp & EPINTR_FIFO_ERROR) ? " ferr" : "",
- (temp & EPINTR_FIFO_HIGH) ? " fhigh" : "",
- (temp & EPINTR_FIFO_LOW) ? " flow" : "",
- (temp & EPINTR_MDEVREQ) ? " mreq" : "",
- (temp & EPINTR_EOF) ? " eof" : "",
- (temp & EPINTR_DEVREQ) ? " devreq" : "",
- (temp & EPINTR_EOT) ? " eot" : "");
-
- temp = __raw_readl(imx_ep->imx_usb->base
- + USB_EP_STAT(EP_NO(imx_ep)));
-
- dev_dbg(imx_ep->imx_usb->dev,
- "<%s> EP%d_STAT=[%s%s bcount=%d]\n",
- label, EP_NO(imx_ep),
- (temp & EPSTAT_SIP) ? " sip" : "",
- (temp & EPSTAT_STALL) ? " stall" : "",
- (temp & EPSTAT_BCOUNT) >> 16);
-
- temp = __raw_readl(imx_ep->imx_usb->base
- + USB_EP_FSTAT(EP_NO(imx_ep)));
-
- dev_dbg(imx_ep->imx_usb->dev,
- "<%s> EP%d_FSTAT=[%s%s%s%s%s%s%s]\n",
- label, EP_NO(imx_ep),
- (temp & FSTAT_ERR) ? " ferr" : "",
- (temp & FSTAT_UF) ? " funder" : "",
- (temp & FSTAT_OF) ? " fover" : "",
- (temp & FSTAT_FR) ? " fready" : "",
- (temp & FSTAT_FULL) ? " ffull" : "",
- (temp & FSTAT_ALRM) ? " falarm" : "",
- (temp & FSTAT_EMPTY) ? " fempty" : "");
- }
-
- static void dump_req(const char *label, struct imx_ep_struct *imx_ep,
- struct usb_request *req)
- {
- int i;
-
- if (!req || !req->buf) {
- dev_dbg(imx_ep->imx_usb->dev,
- "<%s> req or req buf is free\n", label);
- return;
- }
-
- if ((!EP_NO(imx_ep) && imx_ep->imx_usb->ep0state
- == EP0_IN_DATA_PHASE)
- || (EP_NO(imx_ep) && EP_DIR(imx_ep))) {
-
- dev_dbg(imx_ep->imx_usb->dev,
- "<%s> request dump <", label);
- for (i = 0; i < req->length; i++)
- printk("%02x-", *((u8 *)req->buf + i));
- printk(">\n");
- }
- }
-
-#else
- #define dump_ep_stat(x, y) do {} while (0)
- #define dump_usb_stat(x, y) do {} while (0)
- #define dump_req(x, y, z) do {} while (0)
-#endif /* DEBUG_DUMP */
-
-#ifdef DEBUG_ERR
- #define D_ERR(dev, args...) dev_dbg(dev, ## args)
-#else
- #define D_ERR(dev, args...) do {} while (0)
-#endif
-
-#else
- #define D_REQ(dev, args...) do {} while (0)
- #define D_TRX(dev, args...) do {} while (0)
- #define D_INI(dev, args...) do {} while (0)
- #define D_EP0(dev, args...) do {} while (0)
- #define D_EPX(dev, args...) do {} while (0)
- #define dump_ep_intr(x, y, z, i) do {} while (0)
- #define dump_intr(x, y, z) do {} while (0)
- #define dump_ep_stat(x, y) do {} while (0)
- #define dump_usb_stat(x, y) do {} while (0)
- #define dump_req(x, y, z) do {} while (0)
- #define D_ERR(dev, args...) do {} while (0)
-#endif /* DEBUG */
-
-#endif /* __LINUX_USB_GADGET_IMX_H */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 570c005062a..465ef8e2cc9 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1270,10 +1270,6 @@ dev_release (struct inode *inode, struct file *fd)
dev->buf = NULL;
put_dev (dev);
- /* other endpoints were all decoupled from this device */
- spin_lock_irq(&dev->lock);
- dev->state = STATE_DEV_DISABLED;
- spin_unlock_irq(&dev->lock);
return 0;
}
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 46ba9838c3a..d5f050d30ed 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1584,7 +1584,7 @@ static int __init m66592_probe(struct platform_device *pdev)
goto clean_up;
}
- if (pdev->dev.platform_data == NULL) {
+ if (dev_get_platdata(&pdev->dev) == NULL) {
dev_err(&pdev->dev, "no platform data\n");
ret = -ENODEV;
goto clean_up;
@@ -1598,7 +1598,7 @@ static int __init m66592_probe(struct platform_device *pdev)
goto clean_up;
}
- m66592->pdata = pdev->dev.platform_data;
+ m66592->pdata = dev_get_platdata(&pdev->dev);
m66592->irq_trigger = ires->flags & IRQF_TRIGGER_MASK;
spin_lock_init(&m66592->lock);
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 032b96a51ce..2a1ebefd8f9 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -160,10 +160,8 @@ static __init int rndis_do_config(struct usb_configuration *c)
return ret;
f_acm_rndis = usb_get_function(fi_acm);
- if (IS_ERR(f_acm_rndis)) {
- ret = PTR_ERR(f_acm_rndis);
- goto err_func_acm;
- }
+ if (IS_ERR(f_acm_rndis))
+ return PTR_ERR(f_acm_rndis);
ret = usb_add_function(c, f_acm_rndis);
if (ret)
@@ -178,7 +176,6 @@ err_fsg:
usb_remove_function(c, f_acm_rndis);
err_conf:
usb_put_function(f_acm_rndis);
-err_func_acm:
return ret;
}
@@ -226,7 +223,7 @@ static __init int cdc_do_config(struct usb_configuration *c)
/* implicit port_num is zero */
f_acm_multi = usb_get_function(fi_acm);
if (IS_ERR(f_acm_multi))
- goto err_func_acm;
+ return PTR_ERR(f_acm_multi);
ret = usb_add_function(c, f_acm_multi);
if (ret)
@@ -241,7 +238,6 @@ err_fsg:
usb_remove_function(c, f_acm_multi);
err_conf:
usb_put_function(f_acm_multi);
-err_func_acm:
return ret;
}
diff --git a/drivers/usb/gadget/mv_u3d_core.c b/drivers/usb/gadget/mv_u3d_core.c
index 07fdb3eaf48..bbb6e98c438 100644
--- a/drivers/usb/gadget/mv_u3d_core.c
+++ b/drivers/usb/gadget/mv_u3d_core.c
@@ -1109,7 +1109,7 @@ static int mv_u3d_controller_reset(struct mv_u3d *u3d)
static int mv_u3d_enable(struct mv_u3d *u3d)
{
- struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
int retval;
if (u3d->active)
@@ -1138,7 +1138,7 @@ static int mv_u3d_enable(struct mv_u3d *u3d)
static void mv_u3d_disable(struct mv_u3d *u3d)
{
- struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
if (u3d->clock_gating && u3d->active) {
dev_dbg(u3d->dev, "disable u3d\n");
if (pdata->phy_deinit)
@@ -1246,7 +1246,7 @@ static int mv_u3d_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
- struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
unsigned long flags;
if (u3d->driver)
@@ -1277,7 +1277,7 @@ static int mv_u3d_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct mv_u3d *u3d = container_of(g, struct mv_u3d, gadget);
- struct mv_usb_platform_data *pdata = u3d->dev->platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(u3d->dev);
unsigned long flags;
u3d->vbus_valid_detect = 0;
@@ -1776,7 +1776,7 @@ static int mv_u3d_remove(struct platform_device *dev)
kfree(u3d->eps);
if (u3d->irq)
- free_irq(u3d->irq, &dev->dev);
+ free_irq(u3d->irq, u3d);
if (u3d->cap_regs)
iounmap(u3d->cap_regs);
@@ -1794,12 +1794,12 @@ static int mv_u3d_remove(struct platform_device *dev)
static int mv_u3d_probe(struct platform_device *dev)
{
struct mv_u3d *u3d = NULL;
- struct mv_usb_platform_data *pdata = dev->dev.platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&dev->dev);
int retval = 0;
struct resource *r;
size_t size;
- if (!dev->dev.platform_data) {
+ if (!dev_get_platdata(&dev->dev)) {
dev_err(&dev->dev, "missing platform_data\n");
retval = -ENODEV;
goto err_pdata;
@@ -1974,7 +1974,7 @@ static int mv_u3d_probe(struct platform_device *dev)
return 0;
err_unregister:
- free_irq(u3d->irq, &dev->dev);
+ free_irq(u3d->irq, u3d);
err_request_irq:
err_get_irq:
kfree(u3d->status_req);
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index c2a57023e46..104cdbea635 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -2100,7 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev)
static int mv_udc_probe(struct platform_device *pdev)
{
- struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mv_udc *udc;
int retval = 0;
struct resource *r;
@@ -2118,7 +2118,7 @@ static int mv_udc_probe(struct platform_device *pdev)
}
udc->done = &release_done;
- udc->pdata = pdev->dev.platform_data;
+ udc->pdata = dev_get_platdata(&pdev->dev);
spin_lock_init(&udc->lock);
udc->dev = pdev;
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index f1e50a3e322..bf2bb39f35a 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -1184,7 +1184,7 @@ static const struct usb_gadget_ops net2272_ops = {
/*---------------------------------------------------------------------------*/
static ssize_t
-net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
+registers_show(struct device *_dev, struct device_attribute *attr, char *buf)
{
struct net2272 *dev;
char *next;
@@ -1308,7 +1308,7 @@ net2272_show_registers(struct device *_dev, struct device_attribute *attr, char
return PAGE_SIZE - size;
}
-static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
+static DEVICE_ATTR_RO(registers);
/*---------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index fbd006ab31d..0781bff7001 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -1424,8 +1424,8 @@ static const struct usb_gadget_ops net2280_ops = {
*/
/* "function" sysfs attribute */
-static ssize_t
-show_function (struct device *_dev, struct device_attribute *attr, char *buf)
+static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
+ char *buf)
{
struct net2280 *dev = dev_get_drvdata (_dev);
@@ -1435,10 +1435,10 @@ show_function (struct device *_dev, struct device_attribute *attr, char *buf)
return 0;
return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
}
-static DEVICE_ATTR (function, S_IRUGO, show_function, NULL);
+static DEVICE_ATTR_RO(function);
-static ssize_t net2280_show_registers(struct device *_dev,
- struct device_attribute *attr, char *buf)
+static ssize_t registers_show(struct device *_dev,
+ struct device_attribute *attr, char *buf)
{
struct net2280 *dev;
char *next;
@@ -1590,10 +1590,10 @@ static ssize_t net2280_show_registers(struct device *_dev,
return PAGE_SIZE - size;
}
-static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL);
+static DEVICE_ATTR_RO(registers);
-static ssize_t
-show_queues (struct device *_dev, struct device_attribute *attr, char *buf)
+static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
+ char *buf)
{
struct net2280 *dev;
char *next;
@@ -1690,7 +1690,7 @@ done:
spin_unlock_irqrestore (&dev->lock, flags);
return PAGE_SIZE - size;
}
-static DEVICE_ATTR (queues, S_IRUGO, show_queues, NULL);
+static DEVICE_ATTR_RO(queues);
#else
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index b8ed74a823c..83957cc225d 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -2734,7 +2734,7 @@ static int omap_udc_probe(struct platform_device *pdev)
int hmc;
struct usb_phy *xceiv = NULL;
const char *type = NULL;
- struct omap_usb_config *config = pdev->dev.platform_data;
+ struct omap_usb_config *config = dev_get_platdata(&pdev->dev);
struct clk *dc_clk = NULL;
struct clk *hhc_clk = NULL;
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 95c531d5aa4..cc9207473db 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2117,7 +2117,7 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
/* other non-static parts of init */
dev->dev = &pdev->dev;
- dev->mach = pdev->dev.platform_data;
+ dev->mach = dev_get_platdata(&pdev->dev);
dev->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 41cea9566ac..3c97da7760d 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -2422,7 +2422,7 @@ static int pxa_udc_probe(struct platform_device *pdev)
return udc->irq;
udc->dev = &pdev->dev;
- udc->mach = pdev->dev.platform_data;
+ udc->mach = dev_get_platdata(&pdev->dev);
udc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
gpio = udc->mach->gpio_pullup;
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index c6af649f324..68be48d3340 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1910,7 +1910,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
spin_lock_init(&r8a66597->lock);
platform_set_drvdata(pdev, r8a66597);
- r8a66597->pdata = pdev->dev.platform_data;
+ r8a66597->pdata = dev_get_platdata(&pdev->dev);
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
r8a66597->gadget.ops = &r8a66597_gadget_ops;
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c
index 3e3ea720303..9575085ded8 100644
--- a/drivers/usb/gadget/rndis.c
+++ b/drivers/usb/gadget/rndis.c
@@ -1142,7 +1142,7 @@ static struct proc_dir_entry *rndis_connect_state [RNDIS_MAX_CONFIGS];
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
-int rndis_init(void)
+static int rndis_init(void)
{
u8 i;
@@ -1176,7 +1176,7 @@ int rndis_init(void)
}
module_init(rndis_init);
-void rndis_exit(void)
+static void rndis_exit(void)
{
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
u8 i;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index af22f24046b..d69b36a99db 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
+#include <linux/of_platform.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -3450,7 +3451,7 @@ static void s3c_hsotg_delete_debug(struct s3c_hsotg *hsotg)
static int s3c_hsotg_probe(struct platform_device *pdev)
{
- struct s3c_hsotg_plat *plat = pdev->dev.platform_data;
+ struct s3c_hsotg_plat *plat = dev_get_platdata(&pdev->dev);
struct usb_phy *phy;
struct device *dev = &pdev->dev;
struct s3c_hsotg_ep *eps;
@@ -3469,7 +3470,7 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
phy = devm_usb_get_phy(dev, USB_PHY_TYPE_USB2);
if (IS_ERR(phy)) {
/* Fallback for pdata */
- plat = pdev->dev.platform_data;
+ plat = dev_get_platdata(&pdev->dev);
if (!plat) {
dev_err(&pdev->dev, "no platform data or transceiver defined\n");
return -EPROBE_DEFER;
@@ -3648,10 +3649,19 @@ static int s3c_hsotg_remove(struct platform_device *pdev)
#define s3c_hsotg_resume NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id s3c_hsotg_of_ids[] = {
+ { .compatible = "samsung,s3c6400-hsotg", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, s3c_hsotg_of_ids);
+#endif
+
static struct platform_driver s3c_hsotg_driver = {
.driver = {
.name = "s3c-hsotg",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(s3c_hsotg_of_ids),
},
.probe = s3c_hsotg_probe,
.remove = s3c_hsotg_remove,
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index b1f0771fbd3..1a1a41498db 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -1262,7 +1262,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct resource *res;
struct s3c_hsudc *hsudc;
- struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
+ struct s3c24xx_hsudc_platdata *pd = dev_get_platdata(&pdev->dev);
int ret, i;
hsudc = devm_kzalloc(&pdev->dev, sizeof(struct s3c_hsudc) +
@@ -1275,7 +1275,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
hsudc->dev = dev;
- hsudc->pd = pdev->dev.platform_data;
+ hsudc->pd = dev_get_platdata(&pdev->dev);
hsudc->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 09c4f70c93c..c72d810e6b3 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1809,7 +1809,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
}
spin_lock_init(&udc->lock);
- udc_info = pdev->dev.platform_data;
+ udc_info = dev_get_platdata(&pdev->dev);
rsrc_start = S3C2410_PA_USBDEV;
rsrc_len = S3C24XX_SZ_USBDEV;
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index dbce3a9074e..08a1a3210a2 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -172,7 +172,7 @@ MODULE_PARM_DESC(num_buffers, "Number of pipeline buffers");
*/
#define fsg_num_buffers CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS
-#endif /* CONFIG_USB_DEBUG */
+#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/* check if fsg_num_buffers is within a valid range */
static inline int fsg_num_buffers_validate(void)
@@ -547,8 +547,8 @@ static void store_cdrom_address(u8 *dest, int msf, u32 addr)
/*-------------------------------------------------------------------------*/
-static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
@@ -557,16 +557,16 @@ static ssize_t fsg_show_ro(struct device *dev, struct device_attribute *attr,
: curlun->initially_ro);
}
-static ssize_t fsg_show_nofua(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t nofua_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
return sprintf(buf, "%u\n", curlun->nofua);
}
-static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t file_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
@@ -593,8 +593,8 @@ static ssize_t fsg_show_file(struct device *dev, struct device_attribute *attr,
}
-static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
ssize_t rc;
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
@@ -623,9 +623,8 @@ static ssize_t fsg_store_ro(struct device *dev, struct device_attribute *attr,
return rc;
}
-static ssize_t fsg_store_nofua(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t nofua_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
unsigned nofua;
@@ -644,8 +643,8 @@ static ssize_t fsg_store_nofua(struct device *dev,
return count;
}
-static ssize_t fsg_store_file(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t file_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct fsg_lun *curlun = fsg_lun_from_dev(dev);
struct rw_semaphore *filesem = dev_get_drvdata(dev);
diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c
index c7d460f4339..7a55fea4343 100644
--- a/drivers/usb/gadget/u_uac1.c
+++ b/drivers/usb/gadget/u_uac1.c
@@ -191,7 +191,7 @@ try_again:
frames = bytes_to_frames(runtime, count);
old_fs = get_fs();
set_fs(KERNEL_DS);
- result = snd_pcm_lib_write(snd->substream, buf, frames);
+ result = snd_pcm_lib_write(snd->substream, (void __user *)buf, frames);
if (result != frames) {
ERROR(card, "Playback error: %d\n", (int)result);
set_fs(old_fs);
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index ffd8fa54110..59891b1c48f 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -23,6 +23,7 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -50,6 +51,8 @@ static DEFINE_MUTEX(udc_lock);
/* ------------------------------------------------------------------------- */
+#ifdef CONFIG_HAS_DMA
+
int usb_gadget_map_request(struct usb_gadget *gadget,
struct usb_request *req, int is_in)
{
@@ -99,13 +102,22 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
}
EXPORT_SYMBOL_GPL(usb_gadget_unmap_request);
+#endif /* CONFIG_HAS_DMA */
+
/* ------------------------------------------------------------------------- */
+static void usb_gadget_state_work(struct work_struct *work)
+{
+ struct usb_gadget *gadget = work_to_gadget(work);
+
+ sysfs_notify(&gadget->dev.kobj, NULL, "state");
+}
+
void usb_gadget_set_state(struct usb_gadget *gadget,
enum usb_device_state state)
{
gadget->state = state;
- sysfs_notify(&gadget->dev.kobj, NULL, "status");
+ schedule_work(&gadget->work);
}
EXPORT_SYMBOL_GPL(usb_gadget_set_state);
@@ -192,11 +204,14 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget,
goto err1;
dev_set_name(&gadget->dev, "gadget");
+ INIT_WORK(&gadget->work, usb_gadget_state_work);
gadget->dev.parent = parent;
+#ifdef CONFIG_HAS_DMA
dma_set_coherent_mask(&gadget->dev, parent->coherent_dma_mask);
gadget->dev.dma_parms = parent->dma_parms;
gadget->dev.dma_mask = parent->dma_mask;
+#endif
if (release)
gadget->dev.release = release;
@@ -309,6 +324,7 @@ found:
usb_gadget_remove_driver(udc);
kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE);
+ flush_work(&gadget->work);
device_unregister(&udc->dev);
device_unregister(&gadget->dev);
}
@@ -454,31 +470,31 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
}
static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-static ssize_t usb_gadget_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
struct usb_gadget *gadget = udc->gadget;
return sprintf(buf, "%s\n", usb_state_string(gadget->state));
}
-static DEVICE_ATTR(state, S_IRUGO, usb_gadget_state_show, NULL);
+static DEVICE_ATTR_RO(state);
#define USB_UDC_SPEED_ATTR(name, param) \
-ssize_t usb_udc_##param##_show(struct device *dev, \
+ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
return snprintf(buf, PAGE_SIZE, "%s\n", \
usb_speed_string(udc->gadget->param)); \
} \
-static DEVICE_ATTR(name, S_IRUGO, usb_udc_##param##_show, NULL)
+static DEVICE_ATTR_RO(name)
static USB_UDC_SPEED_ATTR(current_speed, speed);
static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
#define USB_UDC_ATTR(name) \
-ssize_t usb_udc_##name##_show(struct device *dev, \
+ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
@@ -486,7 +502,7 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
\
return snprintf(buf, PAGE_SIZE, "%d\n", gadget->name); \
} \
-static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
+static DEVICE_ATTR_RO(name)
static USB_UDC_ATTR(is_otg);
static USB_UDC_ATTR(is_a_peripheral);
diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
index e6170478ea9..0bb5d50075d 100644
--- a/drivers/usb/gadget/uvc_queue.c
+++ b/drivers/usb/gadget/uvc_queue.c
@@ -193,12 +193,16 @@ static int uvc_queue_buffer(struct uvc_video_queue *queue,
mutex_lock(&queue->mutex);
ret = vb2_qbuf(&queue->queue, buf);
+ if (ret < 0)
+ goto done;
+
spin_lock_irqsave(&queue->irqlock, flags);
ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
queue->flags &= ~UVC_QUEUE_PAUSED;
spin_unlock_irqrestore(&queue->irqlock, flags);
- mutex_unlock(&queue->mutex);
+done:
+ mutex_unlock(&queue->mutex);
return ret;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 4263d011392..5be0326aae3 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -29,15 +29,6 @@ if USB_XHCI_HCD
config USB_XHCI_PLATFORM
tristate
-config USB_XHCI_HCD_DEBUGGING
- bool "Debugging for the xHCI host controller"
- ---help---
- Say 'Y' to turn on debugging for the xHCI host controller driver.
- This will spew debugging output, even in interrupt context.
- This should only be used for debugging xHCI driver bugs.
-
- If unsure, say N.
-
endif # USB_XHCI_HCD
config USB_EHCI_HCD
@@ -113,12 +104,6 @@ config USB_EHCI_HCD_PMC_MSP
Enables support for the onchip USB controller on the PMC_MSP7100 Family SoC's.
If unsure, say N.
-config USB_EHCI_BIG_ENDIAN_MMIO
- bool
-
-config USB_EHCI_BIG_ENDIAN_DESC
- bool
-
config XPS_USB_HCD_XILINX
bool "Use Xilinx usb host EHCI controller core"
depends on (PPC32 || MICROBLAZE)
@@ -148,13 +133,11 @@ config USB_EHCI_MXC
config USB_EHCI_HCD_OMAP
tristate "EHCI support for OMAP3 and later chips"
depends on ARCH_OMAP
+ select NOP_USB_XCEIV
default y
---help---
Enables support for the on-chip EHCI controller on
OMAP3 and later chips.
- If your system uses a PHY on the USB port, you will need to
- enable USB_PHY and the appropriate PHY driver as well. Most
- boards need the NOP_USB_XCEIV PHY driver.
config USB_EHCI_HCD_ORION
tristate "Support for Marvell EBU on-chip EHCI USB controller"
@@ -186,7 +169,6 @@ config USB_EHCI_HCD_AT91
config USB_EHCI_MSM
tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
depends on ARCH_MSM
- depends on USB_PHY
select USB_EHCI_ROOT_HUB_TT
select USB_MSM_OTG
---help---
@@ -354,6 +336,18 @@ config USB_FUSBH200_HCD
To compile this driver as a module, choose M here: the
module will be called fusbh200-hcd.
+config USB_FOTG210_HCD
+ tristate "FOTG210 HCD support"
+ depends on USB
+ default N
+ ---help---
+ Faraday FOTG210 is an OTG controller which can be configured as
+ an USB2.0 host. It is designed to meet USB2.0 EHCI specification
+ with minor modification.
+
+ To compile this driver as a module, choose M here: the
+ module will be called fotg210-hcd.
+
config USB_OHCI_HCD
tristate "OHCI HCD (USB 1.1) support"
select ISP1301_OMAP if MACH_OMAP_H2 || MACH_OMAP_H3
@@ -497,20 +491,6 @@ config USB_OCTEON_OHCI
controller. It is needed for low-speed USB 1.0 device
support. All CN6XXX based chips with USB are supported.
-
-config USB_OHCI_BIG_ENDIAN_DESC
- bool
- default n
-
-config USB_OHCI_BIG_ENDIAN_MMIO
- bool
- default n
-
-config USB_OHCI_LITTLE_ENDIAN
- bool
- default n if STB03xxx || PPC_MPC52xx
- default y
-
endif # USB_OHCI_HCD
config USB_UHCI_HCD
@@ -710,3 +690,20 @@ config USB_HCD_SSB
for ehci and ohci.
If unsure, say N.
+
+config USB_HCD_TEST_MODE
+ bool "HCD test mode support"
+ ---help---
+ Say 'Y' to enable additional software test modes that may be
+ supported by the host controller drivers.
+
+ One such test mode is the Embedded High-speed Host Electrical Test
+ (EHSET) for EHCI host controller hardware, specifically the "Single
+ Step Set Feature" test. Typically this will be enabled for On-the-Go
+ or embedded hosts that need to undergo USB-IF compliance testing with
+ the aid of special testing hardware. In the future, this may expand
+ to include other tests that require support from a HCD driver.
+
+ This option is of interest only to developers who need to validate
+ their USB hardware designs. It is not needed for normal use. If
+ unsure, say N.
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index bea71127b15..50b0041c09a 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -4,6 +4,9 @@
ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
+# tell define_trace.h where to find the xhci trace header
+CFLAGS_xhci-trace.o := -I$(src)
+
isp1760-y := isp1760-hcd.o isp1760-if.o
fhci-y := fhci-hcd.o fhci-hub.o fhci-q.o
@@ -13,6 +16,7 @@ fhci-$(CONFIG_FHCI_DEBUG) += fhci-dbg.o
xhci-hcd-y := xhci.o xhci-mem.o
xhci-hcd-y += xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-y += xhci-trace.o
xhci-hcd-$(CONFIG_PCI) += xhci-pci.o
ifneq ($(CONFIG_USB_XHCI_PLATFORM), )
@@ -58,3 +62,4 @@ obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o
obj-$(CONFIG_USB_HCD_BCMA) += bcma-hcd.o
obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FUSBH200_HCD) += fusbh200-hcd.o
+obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 5429d2645bb..aa5b603f393 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -18,7 +18,7 @@
/* this file is part of ehci-hcd.c */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
/* check the values in the HCSPARAMS register
* (host controller _Structural_ parameters)
@@ -62,7 +62,7 @@ static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
/* check the values in the HCCPARAMS register
* (host controller _Capability_ parameters)
@@ -101,7 +101,7 @@ static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {}
#endif
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
static void __maybe_unused
dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
@@ -301,7 +301,7 @@ static inline int __maybe_unused
dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
-#endif /* DEBUG */
+#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(ehci, label, status) { \
@@ -336,7 +336,6 @@ static inline void remove_debug_files (struct ehci_hcd *bus) { }
static int debug_async_open(struct inode *, struct file *);
static int debug_periodic_open(struct inode *, struct file *);
static int debug_registers_open(struct inode *, struct file *);
-static int debug_async_open(struct inode *, struct file *);
static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
static int debug_close(struct inode *, struct file *);
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index bd831ec06dc..947b009009f 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -57,7 +57,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
pr_debug("initializing FSL-SOC USB Controller\n");
/* Need platform data for setup */
- pdata = (struct fsl_usb2_platform_data *)pdev->dev.platform_data;
+ pdata = (struct fsl_usb2_platform_data *)dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev,
"No platform data for %s.\n", dev_name(&pdev->dev));
@@ -190,7 +190,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
if (!IS_ERR_OR_NULL(hcd->phy)) {
otg_set_host(hcd->phy->otg, NULL);
@@ -218,7 +218,7 @@ static int ehci_fsl_setup_phy(struct usb_hcd *hcd,
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
void __iomem *non_ehci = hcd->regs;
struct device *dev = hcd->self.controller;
- struct fsl_usb2_platform_data *pdata = dev->platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
if (pdata->controller_ver < 0) {
dev_warn(hcd->self.controller, "Could not get controller version\n");
@@ -291,7 +291,7 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
struct fsl_usb2_platform_data *pdata;
void __iomem *non_ehci = hcd->regs;
- pdata = hcd->self.controller->platform_data;
+ pdata = dev_get_platdata(hcd->self.controller);
if (pdata->have_sysif_regs) {
/*
@@ -363,7 +363,7 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
struct device *dev;
dev = hcd->self.controller;
- pdata = hcd->self.controller->platform_data;
+ pdata = dev_get_platdata(hcd->self.controller);
ehci->big_endian_desc = pdata->big_endian_desc;
ehci->big_endian_mmio = pdata->big_endian_mmio;
@@ -415,10 +415,10 @@ static int ehci_fsl_mpc512x_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct fsl_usb2_platform_data *pdata = dev->platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
u32 tmp;
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
u32 mode = ehci_readl(ehci, hcd->regs + FSL_SOC_USB_USBMODE);
mode &= USBMODE_CM_MASK;
tmp = ehci_readl(ehci, hcd->regs + 0x140); /* usbcmd */
@@ -484,7 +484,7 @@ static int ehci_fsl_mpc512x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct fsl_usb2_platform_data *pdata = dev->platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(dev);
u32 tmp;
dev_dbg(dev, "suspend=%d already_suspended=%d\n",
@@ -669,7 +669,7 @@ static const struct hc_driver ehci_fsl_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-grlib.c b/drivers/usb/host/ehci-grlib.c
index a77bd8dc33f..b52a66ce92e 100644
--- a/drivers/usb/host/ehci-grlib.c
+++ b/drivers/usb/host/ehci-grlib.c
@@ -43,7 +43,7 @@ static const struct hc_driver ehci_grlib_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -167,15 +167,6 @@ static int ehci_hcd_grlib_remove(struct platform_device *op)
}
-static void ehci_hcd_grlib_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = platform_get_drvdata(op);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-
static const struct of_device_id ehci_hcd_grlib_of_match[] = {
{
.name = "GAISLER_EHCI",
@@ -191,7 +182,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_grlib_of_match);
static struct platform_driver ehci_grlib_driver = {
.probe = ehci_hcd_grlib_probe,
.remove = ehci_hcd_grlib_remove,
- .shutdown = ehci_hcd_grlib_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "grlib-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 7abf1ce3a67..5d6022f30eb 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -440,14 +440,6 @@ static void ehci_stop (struct usb_hcd *hcd)
if (ehci->amd_pll_fix == 1)
usb_amd_dev_put();
-#ifdef EHCI_STATS
- ehci_dbg(ehci, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
- ehci->stats.normal, ehci->stats.error, ehci->stats.iaa,
- ehci->stats.lost_iaa);
- ehci_dbg (ehci, "complete %ld unlink %ld\n",
- ehci->stats.complete, ehci->stats.unlink);
-#endif
-
dbg_status (ehci, "ehci_stop completed",
ehci_readl(ehci, &ehci->regs->status));
}
@@ -487,6 +479,7 @@ static int ehci_init(struct usb_hcd *hcd)
ehci->periodic_size = DEFAULT_I_TDPS;
INIT_LIST_HEAD(&ehci->async_unlink);
INIT_LIST_HEAD(&ehci->async_idle);
+ INIT_LIST_HEAD(&ehci->intr_unlink_wait);
INIT_LIST_HEAD(&ehci->intr_unlink);
INIT_LIST_HEAD(&ehci->intr_qh_list);
INIT_LIST_HEAD(&ehci->cached_itd_list);
@@ -942,7 +935,7 @@ ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
unsigned long flags;
- struct ehci_qh *qh, *tmp;
+ struct ehci_qh *qh;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
@@ -972,17 +965,13 @@ rescan:
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
- case QH_STATE_COMPLETING:
- for (tmp = ehci->async->qh_next.qh;
- tmp && tmp != qh;
- tmp = tmp->qh_next.qh)
- continue;
- /* periodic qh self-unlinks on empty, and a COMPLETING qh
- * may already be unlinked.
- */
- if (tmp)
+ WARN_ON(!list_empty(&qh->qtd_list));
+ if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT)
start_unlink_async(ehci, qh);
+ else
+ start_unlink_intr(ehci, qh);
/* FALL THROUGH */
+ case QH_STATE_COMPLETING: /* already in unlinking */
case QH_STATE_UNLINK: /* wait for hw to finish? */
case QH_STATE_UNLINK_WAIT:
idle_timeout:
@@ -1169,7 +1158,7 @@ static const struct hc_driver ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -1303,7 +1292,7 @@ static int __init ehci_hcd_init(void)
sizeof(struct ehci_qh), sizeof(struct ehci_qtd),
sizeof(struct ehci_itd), sizeof(struct ehci_sitd));
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root);
if (!ehci_debug_root) {
retval = -ENOENT;
@@ -1352,7 +1341,7 @@ clean2:
platform_driver_unregister(&PLATFORM_DRIVER);
clean0:
#endif
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(ehci_debug_root);
ehci_debug_root = NULL;
err_debug:
@@ -1376,7 +1365,7 @@ static void __exit ehci_hcd_cleanup(void)
#ifdef PS3_SYSTEM_BUS_DRIVER
ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER);
#endif
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
debugfs_remove(ehci_debug_root);
#endif
clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 2b702772d04..835fc0844a6 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -183,7 +183,7 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
spin_lock_irq(&ehci->lock);
/* clear phy low-power mode before changing wakeup flags */
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -211,13 +211,11 @@ static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci,
else
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
}
- ehci_vdbg(ehci, "port %d, %08x -> %08x\n",
- port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
}
/* enter phy low-power mode again */
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -302,14 +300,12 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
if (t1 != t2) {
- ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
- port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
- if (changed && ehci->has_hostpc) {
+ if (changed && ehci->has_tdi_phy_lpm) {
spin_unlock_irq(&ehci->lock);
msleep(5); /* 5 ms for HCD to enter low-power mode */
spin_lock_irq(&ehci->lock);
@@ -345,6 +341,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
end_unlink_async(ehci);
unlink_empty_async_suspended(ehci);
+ ehci_handle_start_intr_unlinks(ehci);
ehci_handle_intr_unlinks(ehci);
end_free_itds(ehci);
@@ -435,7 +432,7 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
goto shutdown;
/* clear phy low-power mode before resume */
- if (ehci->bus_suspended && ehci->has_hostpc) {
+ if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) {
i = HCS_N_PORTS(ehci->hcs_params);
while (i--) {
if (test_bit(i, &ehci->bus_suspended)) {
@@ -482,7 +479,6 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
if (test_bit(i, &resume_needed)) {
temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
- ehci_vdbg (ehci, "resumed port %d\n", i + 1);
}
}
@@ -711,6 +707,145 @@ ehci_hub_descriptor (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+
+#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06
+
+static void usb_ehset_completion(struct urb *urb)
+{
+ struct completion *done = urb->context;
+
+ complete(done);
+}
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+);
+
+/*
+ * Allocate and initialize a control URB. This request will be used by the
+ * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages
+ * of the GetDescriptor request are sent 15 seconds after the SETUP stage.
+ * Return NULL if failed.
+ */
+static struct urb *request_single_step_set_feature_urb(
+ struct usb_device *udev,
+ void *dr,
+ void *buf,
+ struct completion *done
+) {
+ struct urb *urb;
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
+ struct usb_host_endpoint *ep;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return NULL;
+
+ urb->pipe = usb_rcvctrlpipe(udev, 0);
+ ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out)
+ [usb_pipeendpoint(urb->pipe)];
+ if (!ep) {
+ usb_free_urb(urb);
+ return NULL;
+ }
+
+ urb->ep = ep;
+ urb->dev = udev;
+ urb->setup_packet = (void *)dr;
+ urb->transfer_buffer = buf;
+ urb->transfer_buffer_length = USB_DT_DEVICE_SIZE;
+ urb->complete = usb_ehset_completion;
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->transfer_flags = URB_DIR_IN;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ urb->setup_dma = dma_map_single(
+ hcd->self.controller,
+ urb->setup_packet,
+ sizeof(struct usb_ctrlrequest),
+ DMA_TO_DEVICE);
+ urb->transfer_dma = dma_map_single(
+ hcd->self.controller,
+ urb->transfer_buffer,
+ urb->transfer_buffer_length,
+ DMA_FROM_DEVICE);
+ urb->context = done;
+ return urb;
+}
+
+static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
+{
+ int retval = -ENOMEM;
+ struct usb_ctrlrequest *dr;
+ struct urb *urb;
+ struct usb_device *udev;
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct usb_device_descriptor *buf;
+ DECLARE_COMPLETION_ONSTACK(done);
+
+ /* Obtain udev of the rhub's child port */
+ udev = usb_hub_find_child(hcd->self.root_hub, port);
+ if (!udev) {
+ ehci_err(ehci, "No device attached to the RootHub\n");
+ return -ENODEV;
+ }
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!dr) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Fill Setup packet for GetDescriptor */
+ dr->bRequestType = USB_DIR_IN;
+ dr->bRequest = USB_REQ_GET_DESCRIPTOR;
+ dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8);
+ dr->wIndex = 0;
+ dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE);
+ urb = request_single_step_set_feature_urb(udev, dr, buf, &done);
+ if (!urb)
+ goto cleanup;
+
+ /* Submit just the SETUP stage */
+ retval = submit_single_step_set_feature(hcd, urb, 1);
+ if (retval)
+ goto out1;
+ if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__);
+ goto out1;
+ }
+ msleep(15 * 1000);
+
+ /* Complete remaining DATA and STATUS stages using the same URB */
+ urb->status = -EINPROGRESS;
+ usb_get_urb(urb);
+ atomic_inc(&urb->use_count);
+ atomic_inc(&urb->dev->urbnum);
+ retval = submit_single_step_set_feature(hcd, urb, 0);
+ if (!retval && !wait_for_completion_timeout(&done,
+ msecs_to_jiffies(2000))) {
+ usb_kill_urb(urb);
+ retval = -ETIMEDOUT;
+ ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__);
+ }
+out1:
+ usb_free_urb(urb);
+cleanup:
+ kfree(dr);
+ kfree(buf);
+ return retval;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+/*-------------------------------------------------------------------------*/
static int ehci_hub_control (
struct usb_hcd *hcd,
@@ -788,7 +923,7 @@ static int ehci_hub_control (
goto error;
/* clear phy low-power mode before resume */
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 & ~HOSTPC_PHCD,
hostpc_reg);
@@ -801,6 +936,8 @@ static int ehci_hub_control (
ehci_writel(ehci, temp | PORT_RESUME, status_reg);
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
+ set_bit(wIndex, &ehci->resuming_ports);
+ usb_hcd_start_port_resume(&hcd->self, wIndex);
break;
case USB_PORT_FEAT_C_SUSPEND:
clear_bit(wIndex, &ehci->port_c_suspend);
@@ -865,52 +1002,49 @@ static int ehci_hub_control (
}
}
- /* whoever resumes must GetPortStatus to complete it!! */
- if (temp & PORT_RESUME) {
+ /* no reset or resume pending */
+ if (!ehci->reset_done[wIndex]) {
/* Remote Wakeup received? */
- if (!ehci->reset_done[wIndex]) {
+ if (temp & PORT_RESUME) {
/* resume signaling for 20 msec */
ehci->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
usb_hcd_start_port_resume(&hcd->self, wIndex);
+ set_bit(wIndex, &ehci->resuming_ports);
/* check the port again */
mod_timer(&ehci_to_hcd(ehci)->rh_timer,
ehci->reset_done[wIndex]);
}
- /* resume completed? */
- else if (time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
- clear_bit(wIndex, &ehci->suspended_ports);
- set_bit(wIndex, &ehci->port_c_suspend);
- ehci->reset_done[wIndex] = 0;
- usb_hcd_end_port_resume(&hcd->self, wIndex);
-
- /* stop resume signaling */
- temp &= ~(PORT_RWC_BITS |
- PORT_SUSPEND | PORT_RESUME);
- ehci_writel(ehci, temp, status_reg);
- clear_bit(wIndex, &ehci->resuming_ports);
- retval = ehci_handshake(ehci, status_reg,
- PORT_RESUME, 0, 2000 /* 2msec */);
- if (retval != 0) {
- ehci_err(ehci,
- "port %d resume error %d\n",
+ /* reset or resume not yet complete */
+ } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) {
+ ; /* wait until it is complete */
+
+ /* resume completed */
+ } else if (test_bit(wIndex, &ehci->resuming_ports)) {
+ clear_bit(wIndex, &ehci->suspended_ports);
+ set_bit(wIndex, &ehci->port_c_suspend);
+ ehci->reset_done[wIndex] = 0;
+ usb_hcd_end_port_resume(&hcd->self, wIndex);
+
+ /* stop resume signaling */
+ temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME);
+ ehci_writel(ehci, temp, status_reg);
+ clear_bit(wIndex, &ehci->resuming_ports);
+ retval = ehci_handshake(ehci, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ ehci_err(ehci, "port %d resume error %d\n",
wIndex + 1, retval);
- goto error;
- }
- temp = ehci_readl(ehci, status_reg);
+ goto error;
}
- }
+ temp = ehci_readl(ehci, status_reg);
/* whoever resets must GetPortStatus to complete it!! */
- if ((temp & PORT_RESET)
- && time_after_eq(jiffies,
- ehci->reset_done[wIndex])) {
+ } else {
status |= USB_PORT_STAT_C_RESET << 16;
ehci->reset_done [wIndex] = 0;
- clear_bit(wIndex, &ehci->resuming_ports);
/* force reset to complete */
ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
@@ -931,11 +1065,6 @@ static int ehci_hub_control (
ehci_readl(ehci, status_reg));
}
- if (!(temp & (PORT_RESUME|PORT_RESET))) {
- ehci->reset_done[wIndex] = 0;
- clear_bit(wIndex, &ehci->resuming_ports);
- }
-
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &ehci->companion_ports)) {
@@ -1031,12 +1160,12 @@ static int ehci_hub_control (
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
- * mode if we have hostpc feature
+ * mode if we have tdi_phy_lpm feature
*/
temp &= ~PORT_WKCONN_E;
temp |= PORT_WKDISC_E | PORT_WKOC_E;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
- if (ehci->has_hostpc) {
+ if (ehci->has_tdi_phy_lpm) {
spin_unlock_irqrestore(&ehci->lock, flags);
msleep(5);/* 5ms for HCD enter low pwr mode */
spin_lock_irqsave(&ehci->lock, flags);
@@ -1056,7 +1185,7 @@ static int ehci_hub_control (
status_reg);
break;
case USB_PORT_FEAT_RESET:
- if (temp & PORT_RESUME)
+ if (temp & (PORT_SUSPEND|PORT_RESUME))
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
@@ -1070,7 +1199,6 @@ static int ehci_hub_control (
wIndex + 1);
temp |= PORT_OWNER;
} else {
- ehci_vdbg (ehci, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
@@ -1091,6 +1219,15 @@ static int ehci_hub_control (
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
+#ifdef CONFIG_USB_HCD_TEST_MODE
+ if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) {
+ spin_unlock_irqrestore(&ehci->lock, flags);
+ retval = ehset_single_step_set_feature(hcd,
+ wIndex);
+ spin_lock_irqsave(&ehci->lock, flags);
+ break;
+ }
+#endif
if (!selector || selector > 5)
goto error;
spin_unlock_irqrestore(&ehci->lock, flags);
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index ef2c3a1eca4..52a77734a22 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -93,6 +93,7 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
+ INIT_LIST_HEAD(&qh->unlink_node);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 915c2db96dc..417c10da945 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -96,7 +96,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -131,7 +131,7 @@ static const struct hc_driver mv_ehci_hc_driver = {
static int mv_ehci_probe(struct platform_device *pdev)
{
- struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct ehci_hcd_mv *ehci_mv;
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index e4c34ac386c..0528dc4526c 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -49,7 +49,7 @@ static const struct ehci_driver_overrides ehci_mxc_overrides __initconst = {
static int ehci_mxc_drv_probe(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct resource *res;
int irq, ret;
@@ -174,7 +174,7 @@ err_alloc:
static int ehci_mxc_drv_remove(struct platform_device *pdev)
{
- struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
+ struct mxc_usbh_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct ehci_mxc_priv *priv = (struct ehci_mxc_priv *) ehci->priv;
@@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
if (pdata && pdata->exit)
pdata->exit(pdev);
- if (pdata->otg)
+ if (pdata && pdata->otg)
usb_phy_shutdown(pdata->otg);
clk_disable_unprepare(priv->usbclk);
@@ -197,20 +197,12 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
return 0;
}
-static void ehci_mxc_drv_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
MODULE_ALIAS("platform:mxc-ehci");
static struct platform_driver ehci_mxc_driver = {
.probe = ehci_mxc_drv_probe,
.remove = ehci_mxc_drv_remove,
- .shutdown = ehci_mxc_drv_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "mxc-ehci",
},
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index 45cc0015841..ab0397e4d8f 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -51,7 +51,7 @@ static const struct hc_driver ehci_octeon_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 9bd7dfe3315..78b01fa475b 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -100,7 +100,7 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
static int ehci_hcd_omap_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_omap_platform_data *pdata = dev_get_platdata(dev);
struct resource *res;
struct usb_hcd *hcd;
void __iomem *regs;
@@ -119,7 +119,7 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* For DT boot, get platform data from parent. i.e. usbhshost */
if (dev->of_node) {
- pdata = dev->parent->platform_data;
+ pdata = dev_get_platdata(dev->parent);
dev->platform_data = pdata;
}
@@ -278,14 +278,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
return 0;
}
-static void ehci_hcd_omap_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
static const struct of_device_id omap_ehci_dt_ids[] = {
{ .compatible = "ti,ehci-omap" },
{ }
@@ -296,7 +288,7 @@ MODULE_DEVICE_TABLE(of, omap_ehci_dt_ids);
static struct platform_driver ehci_hcd_omap_driver = {
.probe = ehci_hcd_omap_probe,
.remove = ehci_hcd_omap_remove,
- .shutdown = ehci_hcd_omap_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
/*.suspend = ehci_hcd_omap_suspend, */
/*.resume = ehci_hcd_omap_resume, */
.driver = {
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1a450aa13eb..d1dfb9db5b4 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -139,7 +139,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
static int ehci_orion_drv_probe(struct platform_device *pdev)
{
- struct orion_ehci_data *pd = pdev->dev.platform_data;
+ struct orion_ehci_data *pd = dev_get_platdata(&pdev->dev);
const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 595d210655b..6bd299e61f5 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -315,53 +315,11 @@ done:
* Also they depend on separate root hub suspend/resume.
*/
-static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev)
-{
- return pdev->class == PCI_CLASS_SERIAL_USB_EHCI &&
- pdev->vendor == PCI_VENDOR_ID_INTEL &&
- (pdev->device == 0x1E26 ||
- pdev->device == 0x8C2D ||
- pdev->device == 0x8C26 ||
- pdev->device == 0x9C26);
-}
-
-static void ehci_enable_xhci_companion(void)
-{
- struct pci_dev *companion = NULL;
-
- /* The xHCI and EHCI controllers are not on the same PCI slot */
- for_each_pci_dev(companion) {
- if (!usb_is_intel_switchable_xhci(companion))
- continue;
- usb_enable_xhci_ports(companion);
- return;
- }
-}
-
static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- /* The BIOS on systems with the Intel Panther Point chipset may or may
- * not support xHCI natively. That means that during system resume, it
- * may switch the ports back to EHCI so that users can use their
- * keyboard to select a kernel from GRUB after resume from hibernate.
- *
- * The BIOS is supposed to remember whether the OS had xHCI ports
- * enabled before resume, and switch the ports back to xHCI when the
- * BIOS/OS semaphore is written, but we all know we can't trust BIOS
- * writers.
- *
- * Unconditionally switch the ports back to xHCI after a system resume.
- * We can't tell whether the EHCI or xHCI controller will be resumed
- * first, so we have to do the port switchover in both drivers. Writing
- * a '1' to the port switchover registers should have no effect if the
- * port was already switched over.
- */
- if (usb_is_intel_switchable_ehci(pdev))
- ehci_enable_xhci_companion();
-
if (ehci_resume(hcd, hibernated) != 0)
(void) ehci_pci_reinit(ehci, pdev);
return 0;
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index 5196d728517..f6b790ca8cf 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -39,7 +39,7 @@ static const char hcd_name[] = "ehci-platform";
static int ehci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
- struct usb_ehci_pdata *pdata = pdev->dev.platform_data;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval;
@@ -87,14 +87,14 @@ static int ehci_platform_probe(struct platform_device *dev)
* use reasonable defaults so platforms don't have to provide these.
* with DT probing on ARM, none of these are set.
*/
- if (!dev->dev.platform_data)
+ if (!dev_get_platdata(&dev->dev))
dev->dev.platform_data = &ehci_platform_defaults;
if (!dev->dev.dma_mask)
dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
if (!dev->dev.coherent_dma_mask)
dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- pdata = dev->dev.platform_data;
+ pdata = dev_get_platdata(&dev->dev);
irq = platform_get_irq(dev, 0);
if (irq < 0) {
@@ -148,7 +148,7 @@ err_power:
static int ehci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct usb_ehci_pdata *pdata = dev->dev.platform_data;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(&dev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
@@ -167,7 +167,7 @@ static int ehci_platform_remove(struct platform_device *dev)
static int ehci_platform_suspend(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct usb_ehci_pdata *pdata = dev->platform_data;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
bool do_wakeup = device_may_wakeup(dev);
@@ -184,7 +184,7 @@ static int ehci_platform_suspend(struct device *dev)
static int ehci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct usb_ehci_pdata *pdata = dev->platform_data;
+ struct usb_ehci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
diff --git a/drivers/usb/host/ehci-pmcmsp.c b/drivers/usb/host/ehci-pmcmsp.c
index 601e208bd78..893b707f000 100644
--- a/drivers/usb/host/ehci-pmcmsp.c
+++ b/drivers/usb/host/ehci-pmcmsp.c
@@ -286,7 +286,7 @@ static const struct hc_driver ehci_msp_hc_driver = {
#else
.irq = ehci_irq,
#endif
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 86da09c0f8d..6cc5567bf9c 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -28,7 +28,7 @@ static const struct hc_driver ehci_ppc_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -215,15 +215,6 @@ static int ehci_hcd_ppc_of_remove(struct platform_device *op)
}
-static void ehci_hcd_ppc_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = platform_get_drvdata(op);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-
static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
@@ -236,7 +227,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_ppc_of_match);
static struct platform_driver ehci_hcd_ppc_of_driver = {
.probe = ehci_hcd_ppc_of_probe,
.remove = ehci_hcd_ppc_of_remove,
- .shutdown = ehci_hcd_ppc_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index fd983771b02..8188542ba17 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -71,7 +71,7 @@ static const struct hc_driver ps3_ehci_hc_driver = {
.product_desc = "PS3 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
.reset = ps3_ehci_hc_reset,
.start = ehci_run,
.stop = ehci_stop,
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index d34b399b78e..e321804c347 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -168,13 +168,13 @@ static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh,
* Note: this routine is never called for Isochronous transfers.
*/
if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
struct usb_device *tt = urb->dev->tt->hub;
dev_dbg(&tt->dev,
"clear tt buffer port %d, a%d ep%d t%08x\n",
urb->dev->ttport, urb->dev->devnum,
usb_pipeendpoint(urb->pipe), token);
-#endif /* DEBUG */
+#endif /* DEBUG || CONFIG_DYNAMIC_DEBUG */
if (!ehci_is_TDI(ehci)
|| urb->dev->tt->hub !=
ehci_to_hcd(ehci)->self.root_hub) {
@@ -240,13 +240,6 @@ static int qtd_copy_status (
} else { /* unknown */
status = -EPROTO;
}
-
- ehci_vdbg (ehci,
- "dev%d ep%d%s qtd token %08x --> status %d\n",
- usb_pipedevice (urb->pipe),
- usb_pipeendpoint (urb->pipe),
- usb_pipein (urb->pipe) ? "in" : "out",
- token, status);
}
return status;
@@ -254,8 +247,6 @@ static int qtd_copy_status (
static void
ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status)
-__releases(ehci->lock)
-__acquires(ehci->lock)
{
if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
/* ... update hc-wide periodic stats */
@@ -281,11 +272,8 @@ __acquires(ehci->lock)
urb->actual_length, urb->transfer_buffer_length);
#endif
- /* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb);
- spin_unlock (&ehci->lock);
usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status);
- spin_lock (&ehci->lock);
}
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
@@ -1144,6 +1132,109 @@ submit_async (
}
/*-------------------------------------------------------------------------*/
+#ifdef CONFIG_USB_HCD_TEST_MODE
+/*
+ * This function creates the qtds and submits them for the
+ * SINGLE_STEP_SET_FEATURE Test.
+ * This is done in two parts: first SETUP req for GetDesc is sent then
+ * 15 seconds later, the IN stage for GetDesc starts to req data from dev
+ *
+ * is_setup : i/p arguement decides which of the two stage needs to be
+ * performed; TRUE - SETUP and FALSE - IN+STATUS
+ * Returns 0 if success
+ */
+static int submit_single_step_set_feature(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ int is_setup
+) {
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct list_head qtd_list;
+ struct list_head *head;
+
+ struct ehci_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, maxpacket;
+ u32 token;
+
+ INIT_LIST_HEAD(&qtd_list);
+ head = &qtd_list;
+
+ /* URBs map to sequences of QTDs: one logical transaction */
+ qtd = ehci_qtd_alloc(ehci, GFP_KERNEL);
+ if (unlikely(!qtd))
+ return -1;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (EHCI_TUNE_CERR << 10);
+
+ len = urb->transfer_buffer_length;
+ /*
+ * Check if the request is to perform just the SETUP stage (getDesc)
+ * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens
+ * 15 secs after the setup
+ */
+ if (is_setup) {
+ /* SETUP pid */
+ qtd_fill(ehci, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
+ return 0; /*Return now; we shall come back after 15 seconds*/
+ }
+
+ /*
+ * IN: data transfer stage: buffer setup : start the IN txn phase for
+ * the get_Desc SETUP which was sent 15seconds back
+ */
+ token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/
+ buf = urb->transfer_dma;
+
+ token |= (1 /* "in" */ << 8); /*This is IN stage*/
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0));
+
+ qtd_fill(ehci, qtd, buf, len, token, maxpacket);
+
+ /*
+ * Our IN phase shall always be a short read; so keep the queue running
+ * and let it advance to the next qtd which zero length OUT status
+ */
+ qtd->hw_alt_next = EHCI_LIST_END(ehci);
+
+ /* STATUS stage for GetDesc control request */
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+
+ qtd_prev = qtd;
+ qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* dont fill any data in such packets */
+ qtd_fill(ehci, qtd, 0, 0, token, 0);
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+
+ submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
+
+ return 0;
+
+cleanup:
+ qtd_list_free(ehci, urb, head);
+ return -1;
+}
+#endif /* CONFIG_USB_HCD_TEST_MODE */
+
+/*-------------------------------------------------------------------------*/
static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 7cc26e621aa..7c3de95c705 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -75,7 +75,7 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev)
static int s5p_ehci_probe(struct platform_device *pdev)
{
- struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
+ struct s5p_ehci_platdata *pdata = dev_get_platdata(&pdev->dev);
struct s5p_ehci_hcd *s5p_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -220,14 +220,6 @@ static int s5p_ehci_remove(struct platform_device *pdev)
return 0;
}
-static void s5p_ehci_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
#ifdef CONFIG_PM
static int s5p_ehci_suspend(struct device *dev)
{
@@ -297,7 +289,7 @@ MODULE_DEVICE_TABLE(of, exynos_ehci_match);
static struct platform_driver s5p_ehci_driver = {
.probe = s5p_ehci_probe,
.remove = s5p_ehci_remove,
- .shutdown = s5p_ehci_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "s5p-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index f80d0330d54..85dd24ed97a 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -169,7 +169,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
break;
}
}
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
if (usecs > ehci->uframe_periodic_max)
ehci_err (ehci, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
@@ -327,17 +327,8 @@ static int tt_available (
periodic_tt_usecs (ehci, dev, frame, tt_usecs);
- ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in"
- " schedule %d/%d/%d/%d/%d/%d/%d/%d\n",
- frame, usecs, uframe,
- tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3],
- tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]);
-
- if (max_tt_usecs[uframe] <= tt_usecs[uframe]) {
- ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n",
- frame, uframe);
+ if (max_tt_usecs[uframe] <= tt_usecs[uframe])
return 0;
- }
/* special case for isoc transfers larger than 125us:
* the first and each subsequent fully used uframe
@@ -348,13 +339,8 @@ static int tt_available (
int ufs = (usecs / 125);
int i;
for (i = uframe; i < (uframe + ufs) && i < 8; i++)
- if (0 < tt_usecs[i]) {
- ehci_vdbg(ehci,
- "multi-uframe xfer can't fit "
- "in frame %d uframe %d\n",
- frame, i);
+ if (0 < tt_usecs[i])
return 0;
- }
}
tt_usecs[uframe] += usecs;
@@ -362,12 +348,8 @@ static int tt_available (
carryover_tt_bandwidth(tt_usecs);
/* fail if the carryover pushed bw past the last uframe's limit */
- if (max_tt_usecs[7] < tt_usecs[7]) {
- ehci_vdbg(ehci,
- "tt unavailable usecs %d frame %d uframe %d\n",
- usecs, frame, uframe);
+ if (max_tt_usecs[7] < tt_usecs[7])
return 0;
- }
}
return 1;
@@ -601,12 +583,29 @@ static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
list_del(&qh->intr_node);
}
+static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
+{
+ if (qh->qh_state != QH_STATE_LINKED ||
+ list_empty(&qh->unlink_node))
+ return;
+
+ list_del_init(&qh->unlink_node);
+
+ /*
+ * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for
+ * avoiding unnecessary CPU wakeup
+ */
+}
+
static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
/* If the QH isn't linked then there's nothing we can do. */
if (qh->qh_state != QH_STATE_LINKED)
return;
+ /* if the qh is waiting for unlink, cancel it now */
+ cancel_unlink_wait_intr(ehci, qh);
+
qh_unlink_periodic (ehci, qh);
/* Make sure the unlinks are visible before starting the timer */
@@ -632,6 +631,27 @@ static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
}
}
+/*
+ * It is common only one intr URB is scheduled on one qh, and
+ * given complete() is run in tasklet context, introduce a bit
+ * delay to avoid unlink qh too early.
+ */
+static void start_unlink_intr_wait(struct ehci_hcd *ehci,
+ struct ehci_qh *qh)
+{
+ qh->unlink_cycle = ehci->intr_unlink_wait_cycle;
+
+ /* New entries go at the end of the intr_unlink_wait list */
+ list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait);
+
+ if (ehci->rh_state < EHCI_RH_RUNNING)
+ ehci_handle_start_intr_unlinks(ehci);
+ else if (ehci->intr_unlink_wait.next == &qh->unlink_node) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
+
static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
@@ -889,6 +909,9 @@ static int intr_submit (
if (qh->qh_state == QH_STATE_IDLE) {
qh_refresh(ehci, qh);
qh_link_periodic(ehci, qh);
+ } else {
+ /* cancel unlink wait for the qh */
+ cancel_unlink_wait_intr(ehci, qh);
}
/* ... update usbfs periodic stats */
@@ -924,9 +947,11 @@ static void scan_intr(struct ehci_hcd *ehci)
* in qh_unlink_periodic().
*/
temp = qh_completions(ehci, qh);
- if (unlikely(temp || (list_empty(&qh->qtd_list) &&
- qh->qh_state == QH_STATE_LINKED)))
+ if (unlikely(temp))
start_unlink_intr(ehci, qh);
+ else if (unlikely(list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED))
+ start_unlink_intr_wait(ehci, qh);
}
}
}
@@ -1391,21 +1416,20 @@ iso_stream_schedule (
/* Behind the scheduling threshold? */
if (unlikely(start < next)) {
+ unsigned now2 = (now - base) & (mod - 1);
/* USB_ISO_ASAP: Round up to the first available slot */
if (urb->transfer_flags & URB_ISO_ASAP)
start += (next - start + period - 1) & -period;
/*
- * Not ASAP: Use the next slot in the stream. If
- * the entire URB falls before the threshold, fail.
+ * Not ASAP: Use the next slot in the stream,
+ * no matter what.
*/
- else if (start + span - period < next) {
- ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n",
+ else if (start + span - period < now2) {
+ ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n",
urb, start + base,
- span - period, next + base);
- status = -EXDEV;
- goto fail;
+ span - period, now2 + base);
}
}
@@ -1574,16 +1598,9 @@ static void itd_link_urb(
next_uframe = stream->next_uframe & (mod - 1);
- if (unlikely (list_empty(&stream->td_list))) {
+ if (unlikely (list_empty(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- urb->interval,
- next_uframe >> 3, next_uframe & 0x7);
- }
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
@@ -1718,14 +1735,9 @@ static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd)
usb_amd_quirk_pll_enable();
}
- if (unlikely(list_is_singular(&stream->td_list))) {
+ if (unlikely(list_is_singular(&stream->td_list)))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
done:
itd->urb = NULL;
@@ -1983,17 +1995,10 @@ static void sitd_link_urb(
next_uframe = stream->next_uframe;
- if (list_empty(&stream->td_list)) {
+ if (list_empty(&stream->td_list))
/* usbfs ignores TT bandwidth */
ehci_to_hcd(ehci)->self.bandwidth_allocated
+= stream->bandwidth;
- ehci_vdbg (ehci,
- "sched devp %s ep%d%s-iso [%d] %dms/%04x\n",
- urb->dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
- (next_uframe >> 3) & (ehci->periodic_size - 1),
- stream->interval, hc32_to_cpu(ehci, stream->splits));
- }
if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) {
if (ehci->amd_pll_fix == 1)
@@ -2107,14 +2112,9 @@ static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd)
usb_amd_quirk_pll_enable();
}
- if (list_is_singular(&stream->td_list)) {
+ if (list_is_singular(&stream->td_list))
ehci_to_hcd(ehci)->self.bandwidth_allocated
-= stream->bandwidth;
- ehci_vdbg (ehci,
- "deschedule devp %s ep%d%s-iso\n",
- dev->devpath, stream->bEndpointAddress & 0x0f,
- (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
- }
done:
sitd->urb = NULL;
diff --git a/drivers/usb/host/ehci-sead3.c b/drivers/usb/host/ehci-sead3.c
index b2de52d3961..8a734498079 100644
--- a/drivers/usb/host/ehci-sead3.c
+++ b/drivers/usb/host/ehci-sead3.c
@@ -55,7 +55,7 @@ const struct hc_driver ehci_sead3_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index c4c0ee92a39..dc899eb2b86 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -36,7 +36,7 @@ static const struct hc_driver ehci_sh_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2 | HCD_MEMORY,
+ .flags = HCD_USB2 | HCD_MEMORY | HCD_BH,
/*
* basic lifecycle operations
@@ -104,7 +104,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
goto fail_create_hcd;
}
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
/* initialize hcd */
hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 6ee7ef79b4f..78fa76da332 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -25,9 +25,9 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
-#include <linux/platform_data/tegra_usb.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/usb/ehci_def.h>
@@ -51,6 +51,10 @@
static struct hc_driver __read_mostly tegra_ehci_hc_driver;
+struct tegra_ehci_soc_config {
+ bool has_hostpc;
+};
+
static int (*orig_hub_control)(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
@@ -58,7 +62,6 @@ static int (*orig_hub_control)(struct usb_hcd *hcd,
struct tegra_ehci_hcd {
struct tegra_usb_phy *phy;
struct clk *clk;
- struct usb_phy *transceiver;
int port_resuming;
bool needs_double_reset;
enum tegra_usb_phy_port_speed port_speed;
@@ -322,50 +325,38 @@ static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
free_dma_aligned_buffer(urb);
}
-static int setup_vbus_gpio(struct platform_device *pdev,
- struct tegra_ehci_platform_data *pdata)
-{
- int err = 0;
- int gpio;
-
- gpio = pdata->vbus_gpio;
- if (!gpio_is_valid(gpio))
- gpio = of_get_named_gpio(pdev->dev.of_node,
- "nvidia,vbus-gpio", 0);
- if (!gpio_is_valid(gpio))
- return 0;
+static const struct tegra_ehci_soc_config tegra30_soc_config = {
+ .has_hostpc = true,
+};
- err = gpio_request(gpio, "vbus_gpio");
- if (err) {
- dev_err(&pdev->dev, "can't request vbus gpio %d", gpio);
- return err;
- }
- err = gpio_direction_output(gpio, 1);
- if (err) {
- dev_err(&pdev->dev, "can't enable vbus\n");
- return err;
- }
+static const struct tegra_ehci_soc_config tegra20_soc_config = {
+ .has_hostpc = false,
+};
- return err;
-}
+static struct of_device_id tegra_ehci_of_match[] = {
+ { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
+ { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
+ { },
+};
static int tegra_ehci_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
+ const struct tegra_ehci_soc_config *soc_config;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct tegra_ehci_hcd *tegra;
- struct tegra_ehci_platform_data *pdata;
int err = 0;
int irq;
- struct device_node *np_phy;
struct usb_phy *u_phy;
- pdata = pdev->dev.platform_data;
- if (!pdata) {
- dev_err(&pdev->dev, "Platform data missing\n");
- return -EINVAL;
+ match = of_match_device(tegra_ehci_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
}
+ soc_config = match->data;
/* Right now device-tree probed devices don't get dma_mask set.
* Since shared usb code relies on it, set it here for now.
@@ -376,14 +367,11 @@ static int tegra_ehci_probe(struct platform_device *pdev)
if (!pdev->dev.coherent_dma_mask)
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- setup_vbus_gpio(pdev, pdata);
-
hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
- err = -ENOMEM;
- goto cleanup_vbus_gpio;
+ return -ENOMEM;
}
platform_set_drvdata(pdev, hcd);
ehci = hcd_to_ehci(hcd);
@@ -406,13 +394,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
udelay(1);
tegra_periph_reset_deassert(tegra->clk);
- np_phy = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
- if (!np_phy) {
- err = -ENODEV;
- goto cleanup_clk_en;
- }
-
- u_phy = tegra_usb_get_phy(np_phy);
+ u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
err = PTR_ERR(u_phy);
goto cleanup_clk_en;
@@ -437,6 +419,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_clk_en;
}
ehci->caps = hcd->regs + 0x100;
+ ehci->has_hostpc = soc_config->has_hostpc;
err = usb_phy_init(hcd->phy);
if (err) {
@@ -466,26 +449,18 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto cleanup_phy;
}
- if (pdata->operating_mode == TEGRA_USB_OTG) {
- tegra->transceiver =
- devm_usb_get_phy(&pdev->dev, USB_PHY_TYPE_USB2);
- if (!IS_ERR(tegra->transceiver))
- otg_set_host(tegra->transceiver->otg, &hcd->self);
- } else {
- tegra->transceiver = ERR_PTR(-ENODEV);
- }
+ otg_set_host(u_phy->otg, &hcd->self);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
- goto cleanup_transceiver;
+ goto cleanup_otg_set_host;
}
return err;
-cleanup_transceiver:
- if (!IS_ERR(tegra->transceiver))
- otg_set_host(tegra->transceiver->otg, NULL);
+cleanup_otg_set_host:
+ otg_set_host(u_phy->otg, NULL);
cleanup_phy:
usb_phy_shutdown(hcd->phy);
cleanup_clk_en:
@@ -494,8 +469,6 @@ cleanup_clk_get:
clk_put(tegra->clk);
cleanup_hcd_create:
usb_put_hcd(hcd);
-cleanup_vbus_gpio:
- /* FIXME: Undo setup_vbus_gpio() here */
return err;
}
@@ -505,8 +478,7 @@ static int tegra_ehci_remove(struct platform_device *pdev)
struct tegra_ehci_hcd *tegra =
(struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
- if (!IS_ERR(tegra->transceiver))
- otg_set_host(tegra->transceiver->otg, NULL);
+ otg_set_host(hcd->phy->otg, NULL);
usb_phy_shutdown(hcd->phy);
usb_remove_hcd(hcd);
@@ -525,11 +497,6 @@ static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
-static struct of_device_id tegra_ehci_of_match[] = {
- { .compatible = "nvidia,tegra20-ehci", },
- { },
-};
-
static struct platform_driver tegra_ehci_driver = {
.probe = tegra_ehci_probe,
.remove = tegra_ehci_remove,
diff --git a/drivers/usb/host/ehci-tilegx.c b/drivers/usb/host/ehci-tilegx.c
index d72b2929c03..67026ffbf9a 100644
--- a/drivers/usb/host/ehci-tilegx.c
+++ b/drivers/usb/host/ehci-tilegx.c
@@ -61,7 +61,7 @@ static const struct hc_driver ehci_tilegx_hc_driver = {
* Generic hardware linkage.
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* Basic lifecycle operations.
@@ -101,7 +101,7 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
- struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
pte_t pte = { 0 };
int my_cpu = smp_processor_id();
int ret;
@@ -186,7 +186,7 @@ err_hcd:
static int ehci_hcd_tilegx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/ehci-timer.c b/drivers/usb/host/ehci-timer.c
index 11e5b32f73e..424ac5d8371 100644
--- a/drivers/usb/host/ehci-timer.c
+++ b/drivers/usb/host/ehci-timer.c
@@ -72,6 +72,7 @@ static unsigned event_delays_ns[] = {
1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */
1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */
2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */
+ 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */
6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */
10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */
10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */
@@ -215,6 +216,36 @@ static void ehci_handle_controller_death(struct ehci_hcd *ehci)
/* Not in process context, so don't try to reset the controller */
}
+/* start to unlink interrupt QHs */
+static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci)
+{
+ bool stopped = (ehci->rh_state < EHCI_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ while (!list_empty(&ehci->intr_unlink_wait)) {
+ struct ehci_qh *qh;
+
+ qh = list_first_entry(&ehci->intr_unlink_wait,
+ struct ehci_qh, unlink_node);
+ if (!stopped && (qh->unlink_cycle ==
+ ehci->intr_unlink_wait_cycle))
+ break;
+ list_del_init(&qh->unlink_node);
+ start_unlink_intr(ehci, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (!list_empty(&ehci->intr_unlink_wait)) {
+ ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true);
+ ++ehci->intr_unlink_wait_cycle;
+ }
+}
/* Handle unlinked interrupt QHs once they are gone from the hardware */
static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
@@ -236,7 +267,7 @@ static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
unlink_node);
if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
break;
- list_del(&qh->unlink_node);
+ list_del_init(&qh->unlink_node);
end_unlink_intr(ehci, qh);
}
@@ -363,6 +394,7 @@ static void (*event_handlers[])(struct ehci_hcd *) = {
ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */
ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */
end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */
+ ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */
unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */
ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */
ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index 59e0e24c753..1c370dfbee0 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -108,7 +108,7 @@ static const struct hc_driver ehci_w90x900_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_USB2|HCD_MEMORY,
+ .flags = HCD_USB2|HCD_MEMORY|HCD_BH,
/*
* basic lifecycle operations
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 35c7f90384a..95979f9f438 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -79,7 +79,7 @@ static const struct hc_driver ehci_xilinx_of_hc_driver = {
* generic hardware linkage
*/
.irq = ehci_irq,
- .flags = HCD_MEMORY | HCD_USB2,
+ .flags = HCD_MEMORY | HCD_USB2 | HCD_BH,
/*
* basic lifecycle operations
@@ -220,21 +220,6 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
return 0;
}
-/**
- * ehci_hcd_xilinx_of_shutdown - shutdown the hcd
- * @op: pointer to platform_device structure that is to be removed
- *
- * Properly shutdown the hcd, call driver's shutdown routine.
- */
-static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = platform_get_drvdata(op);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-
static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
@@ -244,7 +229,7 @@ MODULE_DEVICE_TABLE(of, ehci_hcd_xilinx_of_match);
static struct platform_driver ehci_hcd_xilinx_of_driver = {
.probe = ehci_hcd_xilinx_of_probe,
.remove = ehci_hcd_xilinx_of_remove,
- .shutdown = ehci_hcd_xilinx_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "xilinx-of-ehci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 64f9a08e959..291db7d09f2 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -38,7 +38,7 @@ typedef __u16 __bitwise __hc16;
#endif
/* statistics can be kept for tuning/monitoring */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
#define EHCI_STATS
#endif
@@ -88,6 +88,7 @@ enum ehci_hrtimer_event {
EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */
EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
@@ -143,7 +144,9 @@ struct ehci_hcd { /* one per controller */
unsigned i_thresh; /* uframes HC might cache */
union ehci_shadow *pshadow; /* mirror hw periodic table */
+ struct list_head intr_unlink_wait;
struct list_head intr_unlink;
+ unsigned intr_unlink_wait_cycle;
unsigned intr_unlink_cycle;
unsigned now_frame; /* frame from HC hardware */
unsigned last_iso_frame; /* last frame scanned for iso */
@@ -210,6 +213,7 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
unsigned has_hostpc:1;
+ unsigned has_tdi_phy_lpm:1;
unsigned has_ppcd:1; /* support per-port change bits */
u8 sbrn; /* packed release number */
@@ -222,7 +226,7 @@ struct ehci_hcd { /* one per controller */
#endif
/* debug files */
-#ifdef DEBUG
+#if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG)
struct dentry *debug_dir;
#endif
@@ -778,15 +782,10 @@ static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x)
#define ehci_warn(ehci, fmt, args...) \
dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args)
-#ifdef VERBOSE_DEBUG
-# define ehci_vdbg ehci_dbg
-#else
- static inline void ehci_vdbg(struct ehci_hcd *ehci, ...) {}
-#endif
-#ifndef DEBUG
+#if !defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG)
#define STUB_DEBUG_FILES
-#endif /* DEBUG */
+#endif /* !DEBUG && !CONFIG_DYNAMIC_DEBUG */
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
new file mode 100644
index 00000000000..fce13bcc4a3
--- /dev/null
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -0,0 +1,6049 @@
+/*
+ * Faraday FOTG210 EHCI-like driver
+ *
+ * Copyright (c) 2013 Faraday Technology Corporation
+ *
+ * Author: Yuan-Hsin Chen <yhchen@faraday-tech.com>
+ * Feng-Hsin Chiang <john453@faraday-tech.com>
+ * Po-Yu Chuang <ratbert.chuang@gmail.com>
+ *
+ * Most of code borrowed from the Linux-3.7 EHCI driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/sched.h>
+#include <linux/vmalloc.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/hrtimer.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/usb.h>
+#include <linux/usb/hcd.h>
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/unaligned.h>
+
+/*-------------------------------------------------------------------------*/
+#define DRIVER_AUTHOR "Yuan-Hsin Chen"
+#define DRIVER_DESC "FOTG210 Host Controller (EHCI) Driver"
+
+static const char hcd_name[] = "fotg210_hcd";
+
+#undef VERBOSE_DEBUG
+#undef FOTG210_URB_TRACE
+
+#ifdef DEBUG
+#define FOTG210_STATS
+#endif
+
+/* magic numbers that can affect system performance */
+#define FOTG210_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */
+#define FOTG210_TUNE_RL_HS 4 /* nak throttle; see 4.9 */
+#define FOTG210_TUNE_RL_TT 0
+#define FOTG210_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */
+#define FOTG210_TUNE_MULT_TT 1
+/*
+ * Some drivers think it's safe to schedule isochronous transfers more than
+ * 256 ms into the future (partly as a result of an old bug in the scheduling
+ * code). In an attempt to avoid trouble, we will use a minimum scheduling
+ * length of 512 frames instead of 256.
+ */
+#define FOTG210_TUNE_FLS 1 /* (medium) 512-frame schedule */
+
+/* Initial IRQ latency: faster than hw default */
+static int log2_irq_thresh; /* 0 to 6 */
+module_param(log2_irq_thresh, int, S_IRUGO);
+MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
+
+/* initial park setting: slower than hw default */
+static unsigned park;
+module_param(park, uint, S_IRUGO);
+MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
+
+/* for link power management(LPM) feature */
+static unsigned int hird;
+module_param(hird, int, S_IRUGO);
+MODULE_PARM_DESC(hird, "host initiated resume duration, +1 for each 75us");
+
+#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT)
+
+#include "fotg210.h"
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_dbg(fotg210, fmt, args...) \
+ dev_dbg(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_err(fotg210, fmt, args...) \
+ dev_err(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_info(fotg210, fmt, args...) \
+ dev_info(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+#define fotg210_warn(fotg210, fmt, args...) \
+ dev_warn(fotg210_to_hcd(fotg210)->self.controller , fmt , ## args)
+
+#ifdef VERBOSE_DEBUG
+# define fotg210_vdbg fotg210_dbg
+#else
+ static inline void fotg210_vdbg(struct fotg210_hcd *fotg210, ...) {}
+#endif
+
+#ifdef DEBUG
+
+/* check the values in the HCSPARAMS register
+ * (host controller _Structural_ parameters)
+ * see EHCI spec, Table 2-4 for each value
+ */
+static void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcs_params 0x%x ports=%d\n",
+ label, params,
+ HCS_N_PORTS(params)
+ );
+}
+#else
+
+static inline void dbg_hcs_params(struct fotg210_hcd *fotg210, char *label) {}
+
+#endif
+
+#ifdef DEBUG
+
+/* check the values in the HCCPARAMS register
+ * (host controller _Capability_ parameters)
+ * see EHCI Spec, Table 2-5 for each value
+ * */
+static void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label)
+{
+ u32 params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ fotg210_dbg(fotg210,
+ "%s hcc_params %04x uframes %s%s\n",
+ label,
+ params,
+ HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024",
+ HCC_CANPARK(params) ? " park" : "");
+}
+#else
+
+static inline void dbg_hcc_params(struct fotg210_hcd *fotg210, char *label) {}
+
+#endif
+
+#ifdef DEBUG
+
+static void __maybe_unused
+dbg_qtd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd)
+{
+ fotg210_dbg(fotg210, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd,
+ hc32_to_cpup(fotg210, &qtd->hw_next),
+ hc32_to_cpup(fotg210, &qtd->hw_alt_next),
+ hc32_to_cpup(fotg210, &qtd->hw_token),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[0]));
+ if (qtd->hw_buf[1])
+ fotg210_dbg(fotg210, " p1=%08x p2=%08x p3=%08x p4=%08x\n",
+ hc32_to_cpup(fotg210, &qtd->hw_buf[1]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[2]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[3]),
+ hc32_to_cpup(fotg210, &qtd->hw_buf[4]));
+}
+
+static void __maybe_unused
+dbg_qh(const char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ fotg210_dbg(fotg210, "%s qh %p n%08x info %x %x qtd %x\n", label,
+ qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
+ dbg_qtd("overlay", fotg210, (struct fotg210_qtd *) &hw->hw_qtd_next);
+}
+
+static void __maybe_unused
+dbg_itd(const char *label, struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ fotg210_dbg(fotg210, "%s[%d] itd %p, next %08x, urb %p\n",
+ label, itd->frame, itd, hc32_to_cpu(fotg210, itd->hw_next),
+ itd->urb);
+ fotg210_dbg(fotg210,
+ " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_transaction[0]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[1]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[2]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[3]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[4]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[5]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[6]),
+ hc32_to_cpu(fotg210, itd->hw_transaction[7]));
+ fotg210_dbg(fotg210,
+ " buf: %08x %08x %08x %08x %08x %08x %08x\n",
+ hc32_to_cpu(fotg210, itd->hw_bufp[0]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[1]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[2]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[3]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[4]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[5]),
+ hc32_to_cpu(fotg210, itd->hw_bufp[6]));
+ fotg210_dbg(fotg210, " index: %d %d %d %d %d %d %d %d\n",
+ itd->index[0], itd->index[1], itd->index[2],
+ itd->index[3], itd->index[4], itd->index[5],
+ itd->index[6], itd->index[7]);
+}
+
+static int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{
+ return scnprintf(buf, len,
+ "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", status,
+ (status & STS_ASS) ? " Async" : "",
+ (status & STS_PSS) ? " Periodic" : "",
+ (status & STS_RECL) ? " Recl" : "",
+ (status & STS_HALT) ? " Halt" : "",
+ (status & STS_IAA) ? " IAA" : "",
+ (status & STS_FATAL) ? " FATAL" : "",
+ (status & STS_FLR) ? " FLR" : "",
+ (status & STS_PCD) ? " PCD" : "",
+ (status & STS_ERR) ? " ERR" : "",
+ (status & STS_INT) ? " INT" : ""
+ );
+}
+
+static int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{
+ return scnprintf(buf, len,
+ "%s%sintrenable %02x%s%s%s%s%s%s",
+ label, label[0] ? " " : "", enable,
+ (enable & STS_IAA) ? " IAA" : "",
+ (enable & STS_FATAL) ? " FATAL" : "",
+ (enable & STS_FLR) ? " FLR" : "",
+ (enable & STS_PCD) ? " PCD" : "",
+ (enable & STS_ERR) ? " ERR" : "",
+ (enable & STS_INT) ? " INT" : ""
+ );
+}
+
+static const char *const fls_strings[] = { "1024", "512", "256", "??" };
+
+static int
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{
+ return scnprintf(buf, len,
+ "%s%scommand %07x %s=%d ithresh=%d%s%s%s "
+ "period=%s%s %s",
+ label, label[0] ? " " : "", command,
+ (command & CMD_PARK) ? " park" : "(park)",
+ CMD_PARK_CNT(command),
+ (command >> 16) & 0x3f,
+ (command & CMD_IAAD) ? " IAAD" : "",
+ (command & CMD_ASE) ? " Async" : "",
+ (command & CMD_PSE) ? " Periodic" : "",
+ fls_strings[(command >> 2) & 0x3],
+ (command & CMD_RESET) ? " Reset" : "",
+ (command & CMD_RUN) ? "RUN" : "HALT"
+ );
+}
+
+static int
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{
+ char *sig;
+
+ /* signaling state */
+ switch (status & (3 << 10)) {
+ case 0 << 10:
+ sig = "se0";
+ break;
+ case 1 << 10:
+ sig = "k";
+ break; /* low speed */
+ case 2 << 10:
+ sig = "j";
+ break;
+ default:
+ sig = "?";
+ break;
+ }
+
+ return scnprintf(buf, len,
+ "%s%sport:%d status %06x %d "
+ "sig=%s%s%s%s%s%s%s%s",
+ label, label[0] ? " " : "", port, status,
+ status>>25,/*device address */
+ sig,
+ (status & PORT_RESET) ? " RESET" : "",
+ (status & PORT_SUSPEND) ? " SUSPEND" : "",
+ (status & PORT_RESUME) ? " RESUME" : "",
+ (status & PORT_PEC) ? " PEC" : "",
+ (status & PORT_PE) ? " PE" : "",
+ (status & PORT_CSC) ? " CSC" : "",
+ (status & PORT_CONNECT) ? " CONNECT" : "");
+}
+
+#else
+static inline void __maybe_unused
+dbg_qh(char *label, struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{}
+
+static inline int __maybe_unused
+dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
+{ return 0; }
+
+static inline int __maybe_unused
+dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
+{ return 0; }
+
+#endif /* DEBUG */
+
+/* functions have the "wrong" filename when they're output... */
+#define dbg_status(fotg210, label, status) { \
+ char _buf[80]; \
+ dbg_status_buf(_buf, sizeof(_buf), label, status); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_cmd(fotg210, label, command) { \
+ char _buf[80]; \
+ dbg_command_buf(_buf, sizeof(_buf), label, command); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+#define dbg_port(fotg210, label, port, status) { \
+ char _buf[80]; \
+ dbg_port_buf(_buf, sizeof(_buf), label, port, status); \
+ fotg210_dbg(fotg210, "%s\n", _buf); \
+}
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef STUB_DEBUG_FILES
+
+static inline void create_debug_files(struct fotg210_hcd *bus) { }
+static inline void remove_debug_files(struct fotg210_hcd *bus) { }
+
+#else
+
+/* troubleshooting help: expose state in debugfs */
+
+static int debug_async_open(struct inode *, struct file *);
+static int debug_periodic_open(struct inode *, struct file *);
+static int debug_registers_open(struct inode *, struct file *);
+static int debug_async_open(struct inode *, struct file *);
+
+static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*);
+static int debug_close(struct inode *, struct file *);
+
+static const struct file_operations debug_async_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_async_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_periodic_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_periodic_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+static const struct file_operations debug_registers_fops = {
+ .owner = THIS_MODULE,
+ .open = debug_registers_open,
+ .read = debug_output,
+ .release = debug_close,
+ .llseek = default_llseek,
+};
+
+static struct dentry *fotg210_debug_root;
+
+struct debug_buffer {
+ ssize_t (*fill_func)(struct debug_buffer *); /* fill method */
+ struct usb_bus *bus;
+ struct mutex mutex; /* protect filling of buffer */
+ size_t count; /* number of characters filled into buffer */
+ char *output_buf;
+ size_t alloc_size;
+};
+
+#define speed_char(info1)({ char tmp; \
+ switch (info1 & (3 << 12)) { \
+ case QH_FULL_SPEED: \
+ tmp = 'f'; break; \
+ case QH_LOW_SPEED: \
+ tmp = 'l'; break; \
+ case QH_HIGH_SPEED: \
+ tmp = 'h'; break; \
+ default: \
+ tmp = '?'; break; \
+ }; tmp; })
+
+static inline char token_mark(struct fotg210_hcd *fotg210, __hc32 token)
+{
+ __u32 v = hc32_to_cpu(fotg210, token);
+
+ if (v & QTD_STS_ACTIVE)
+ return '*';
+ if (v & QTD_STS_HALT)
+ return '-';
+ if (!IS_SHORT_READ(v))
+ return ' ';
+ /* tries to advance through hw_alt_next */
+ return '/';
+}
+
+static void qh_lines(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ char **nextp,
+ unsigned *sizep
+)
+{
+ u32 scratch;
+ u32 hw_curr;
+ struct fotg210_qtd *td;
+ unsigned temp;
+ unsigned size = *sizep;
+ char *next = *nextp;
+ char mark;
+ __le32 list_end = FOTG210_LIST_END(fotg210);
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (hw->hw_qtd_next == list_end) /* NEC does this */
+ mark = '@';
+ else
+ mark = token_mark(fotg210, hw->hw_token);
+ if (mark == '/') { /* qh_alt_next controls qh advance? */
+ if ((hw->hw_alt_next & QTD_MASK(fotg210))
+ == fotg210->async->hw->hw_alt_next)
+ mark = '#'; /* blocked */
+ else if (hw->hw_alt_next == list_end)
+ mark = '.'; /* use hw_qtd_next */
+ /* else alt_next points to some other qtd */
+ }
+ scratch = hc32_to_cpup(fotg210, &hw->hw_info1);
+ hw_curr = (mark == '*') ? hc32_to_cpup(fotg210, &hw->hw_current) : 0;
+ temp = scnprintf(next, size,
+ "qh/%p dev%d %cs ep%d %08x %08x(%08x%c %s nak%d)",
+ qh, scratch & 0x007f,
+ speed_char(scratch),
+ (scratch >> 8) & 0x000f,
+ scratch, hc32_to_cpup(fotg210, &hw->hw_info2),
+ hc32_to_cpup(fotg210, &hw->hw_token), mark,
+ (cpu_to_hc32(fotg210, QTD_TOGGLE) & hw->hw_token)
+ ? "data1" : "data0",
+ (hc32_to_cpup(fotg210, &hw->hw_alt_next) >> 1) & 0x0f);
+ size -= temp;
+ next += temp;
+
+ /* hc may be modifying the list as we read it ... */
+ list_for_each_entry(td, &qh->qtd_list, qtd_list) {
+ scratch = hc32_to_cpup(fotg210, &td->hw_token);
+ mark = ' ';
+ if (hw_curr == td->qtd_dma)
+ mark = '*';
+ else if (hw->hw_qtd_next == cpu_to_hc32(fotg210, td->qtd_dma))
+ mark = '+';
+ else if (QTD_LENGTH(scratch)) {
+ if (td->hw_alt_next == fotg210->async->hw->hw_alt_next)
+ mark = '#';
+ else if (td->hw_alt_next != list_end)
+ mark = '/';
+ }
+ temp = snprintf(next, size,
+ "\n\t%p%c%s len=%d %08x urb %p",
+ td, mark, ({ char *tmp;
+ switch ((scratch>>8)&0x03) {
+ case 0:
+ tmp = "out";
+ break;
+ case 1:
+ tmp = "in";
+ break;
+ case 2:
+ tmp = "setup";
+ break;
+ default:
+ tmp = "?";
+ break;
+ } tmp; }),
+ (scratch >> 16) & 0x7fff,
+ scratch,
+ td->urb);
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+ if (temp == size)
+ goto done;
+ }
+
+ temp = snprintf(next, size, "\n");
+ if (size < temp)
+ temp = size;
+ size -= temp;
+ next += temp;
+
+done:
+ *sizep = size;
+ *nextp = next;
+}
+
+static ssize_t fill_async_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size;
+ char *next;
+ struct fotg210_qh *qh;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ *next = 0;
+
+ /* dumps a snapshot of the async schedule.
+ * usually empty except for long-term bulk reads, or head.
+ * one QH per line, and TDs we know about
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (qh = fotg210->async->qh_next.qh; size > 0 && qh;
+ qh = qh->qh_next.qh)
+ qh_lines(fotg210, qh, &next, &size);
+ if (fotg210->async_unlink && size > 0) {
+ temp = scnprintf(next, size, "\nunlink =\n");
+ size -= temp;
+ next += temp;
+
+ for (qh = fotg210->async_unlink; size > 0 && qh;
+ qh = qh->unlink_next)
+ qh_lines(fotg210, qh, &next, &size);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return strlen(buf->output_buf);
+}
+
+#define DBG_SCHED_LIMIT 64
+static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ union fotg210_shadow p, *seen;
+ unsigned temp, size, seen_count;
+ char *next;
+ unsigned i;
+ __hc32 tag;
+
+ seen = kmalloc(DBG_SCHED_LIMIT * sizeof(*seen), GFP_ATOMIC);
+ if (!seen)
+ return 0;
+ seen_count = 0;
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ temp = scnprintf(next, size, "size = %d\n", fotg210->periodic_size);
+ size -= temp;
+ next += temp;
+
+ /* dump a snapshot of the periodic schedule.
+ * iso changes, interrupt usually doesn't.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < fotg210->periodic_size; i++) {
+ p = fotg210->pshadow[i];
+ if (likely(!p.ptr))
+ continue;
+ tag = Q_NEXT_TYPE(fotg210, fotg210->periodic[i]);
+
+ temp = scnprintf(next, size, "%4d: ", i);
+ size -= temp;
+ next += temp;
+
+ do {
+ struct fotg210_qh_hw *hw;
+
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ hw = p.qh->hw;
+ temp = scnprintf(next, size, " qh%d-%04x/%p",
+ p.qh->period,
+ hc32_to_cpup(fotg210,
+ &hw->hw_info2)
+ /* uframe masks */
+ & (QH_CMASK | QH_SMASK),
+ p.qh);
+ size -= temp;
+ next += temp;
+ /* don't repeat what follows this qh */
+ for (temp = 0; temp < seen_count; temp++) {
+ if (seen[temp].ptr != p.ptr)
+ continue;
+ if (p.qh->qh_next.ptr) {
+ temp = scnprintf(next, size,
+ " ...");
+ size -= temp;
+ next += temp;
+ }
+ break;
+ }
+ /* show more info the first time around */
+ if (temp == seen_count) {
+ u32 scratch = hc32_to_cpup(fotg210,
+ &hw->hw_info1);
+ struct fotg210_qtd *qtd;
+ char *type = "";
+
+ /* count tds, get ep direction */
+ temp = 0;
+ list_for_each_entry(qtd,
+ &p.qh->qtd_list,
+ qtd_list) {
+ temp++;
+ switch (0x03 & (hc32_to_cpu(
+ fotg210,
+ qtd->hw_token) >> 8)) {
+ case 0:
+ type = "out";
+ continue;
+ case 1:
+ type = "in";
+ continue;
+ }
+ }
+
+ temp = scnprintf(next, size,
+ "(%c%d ep%d%s "
+ "[%d/%d] q%d p%d)",
+ speed_char(scratch),
+ scratch & 0x007f,
+ (scratch >> 8) & 0x000f, type,
+ p.qh->usecs, p.qh->c_usecs,
+ temp,
+ 0x7ff & (scratch >> 16));
+
+ if (seen_count < DBG_SCHED_LIMIT)
+ seen[seen_count++].qh = p.qh;
+ } else
+ temp = 0;
+ tag = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ p = p.qh->qh_next;
+ break;
+ case Q_TYPE_FSTN:
+ temp = scnprintf(next, size,
+ " fstn-%8x/%p", p.fstn->hw_prev,
+ p.fstn);
+ tag = Q_NEXT_TYPE(fotg210, p.fstn->hw_next);
+ p = p.fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ temp = scnprintf(next, size,
+ " itd/%p", p.itd);
+ tag = Q_NEXT_TYPE(fotg210, p.itd->hw_next);
+ p = p.itd->itd_next;
+ break;
+ }
+ size -= temp;
+ next += temp;
+ } while (p.ptr);
+
+ temp = scnprintf(next, size, "\n");
+ size -= temp;
+ next += temp;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ kfree(seen);
+
+ return buf->alloc_size - size;
+}
+#undef DBG_SCHED_LIMIT
+
+static const char *rh_state_string(struct fotg210_hcd *fotg210)
+{
+ switch (fotg210->rh_state) {
+ case FOTG210_RH_HALTED:
+ return "halted";
+ case FOTG210_RH_SUSPENDED:
+ return "suspended";
+ case FOTG210_RH_RUNNING:
+ return "running";
+ case FOTG210_RH_STOPPING:
+ return "stopping";
+ }
+ return "?";
+}
+
+static ssize_t fill_registers_buffer(struct debug_buffer *buf)
+{
+ struct usb_hcd *hcd;
+ struct fotg210_hcd *fotg210;
+ unsigned long flags;
+ unsigned temp, size, i;
+ char *next, scratch[80];
+ static const char fmt[] = "%*s\n";
+ static const char label[] = "";
+
+ hcd = bus_to_hcd(buf->bus);
+ fotg210 = hcd_to_fotg210(hcd);
+ next = buf->output_buf;
+ size = buf->alloc_size;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (!HCD_HW_ACCESSIBLE(hcd)) {
+ size = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "SUSPENDED(no register access)\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc);
+ goto done;
+ }
+
+ /* Capability Registers */
+ i = HC_VERSION(fotg210, fotg210_readl(fotg210,
+ &fotg210->caps->hc_capbase));
+ temp = scnprintf(next, size,
+ "bus %s, device %s\n"
+ "%s\n"
+ "EHCI %x.%02x, rh state %s\n",
+ hcd->self.controller->bus->name,
+ dev_name(hcd->self.controller),
+ hcd->product_desc,
+ i >> 8, i & 0x0ff, rh_state_string(fotg210));
+ size -= temp;
+ next += temp;
+
+ /* FIXME interpret both types of params */
+ i = fotg210_readl(fotg210, &fotg210->caps->hcs_params);
+ temp = scnprintf(next, size, "structural params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ i = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+ temp = scnprintf(next, size, "capability params 0x%08x\n", i);
+ size -= temp;
+ next += temp;
+
+ /* Operational Registers */
+ temp = dbg_status_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->status));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_command_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->command));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = dbg_intr_buf(scratch, sizeof(scratch), label,
+ fotg210_readl(fotg210, &fotg210->regs->intr_enable));
+ temp = scnprintf(next, size, fmt, temp, scratch);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "uframe %04x\n",
+ fotg210_read_frame_index(fotg210));
+ size -= temp;
+ next += temp;
+
+ if (fotg210->async_unlink) {
+ temp = scnprintf(next, size, "async unlink qh %p\n",
+ fotg210->async_unlink);
+ size -= temp;
+ next += temp;
+ }
+
+#ifdef FOTG210_STATS
+ temp = scnprintf(next, size,
+ "irq normal %ld err %ld iaa %ld(lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ size -= temp;
+ next += temp;
+
+ temp = scnprintf(next, size, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+ size -= temp;
+ next += temp;
+#endif
+
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ return buf->alloc_size - size;
+}
+
+static struct debug_buffer *alloc_buffer(struct usb_bus *bus,
+ ssize_t (*fill_func)(struct debug_buffer *))
+{
+ struct debug_buffer *buf;
+
+ buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL);
+
+ if (buf) {
+ buf->bus = bus;
+ buf->fill_func = fill_func;
+ mutex_init(&buf->mutex);
+ buf->alloc_size = PAGE_SIZE;
+ }
+
+ return buf;
+}
+
+static int fill_buffer(struct debug_buffer *buf)
+{
+ int ret = 0;
+
+ if (!buf->output_buf)
+ buf->output_buf = vmalloc(buf->alloc_size);
+
+ if (!buf->output_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = buf->fill_func(buf);
+
+ if (ret >= 0) {
+ buf->count = ret;
+ ret = 0;
+ }
+
+out:
+ return ret;
+}
+
+static ssize_t debug_output(struct file *file, char __user *user_buf,
+ size_t len, loff_t *offset)
+{
+ struct debug_buffer *buf = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&buf->mutex);
+ if (buf->count == 0) {
+ ret = fill_buffer(buf);
+ if (ret != 0) {
+ mutex_unlock(&buf->mutex);
+ goto out;
+ }
+ }
+ mutex_unlock(&buf->mutex);
+
+ ret = simple_read_from_buffer(user_buf, len, offset,
+ buf->output_buf, buf->count);
+
+out:
+ return ret;
+
+}
+
+static int debug_close(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf = file->private_data;
+
+ if (buf) {
+ vfree(buf->output_buf);
+ kfree(buf);
+ }
+
+ return 0;
+}
+static int debug_async_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private, fill_async_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static int debug_periodic_open(struct inode *inode, struct file *file)
+{
+ struct debug_buffer *buf;
+ buf = alloc_buffer(inode->i_private, fill_periodic_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE;
+ file->private_data = buf;
+ return 0;
+}
+
+static int debug_registers_open(struct inode *inode, struct file *file)
+{
+ file->private_data = alloc_buffer(inode->i_private,
+ fill_registers_buffer);
+
+ return file->private_data ? 0 : -ENOMEM;
+}
+
+static inline void create_debug_files(struct fotg210_hcd *fotg210)
+{
+ struct usb_bus *bus = &fotg210_to_hcd(fotg210)->self;
+
+ fotg210->debug_dir = debugfs_create_dir(bus->bus_name,
+ fotg210_debug_root);
+ if (!fotg210->debug_dir)
+ return;
+
+ if (!debugfs_create_file("async", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_async_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("periodic", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_periodic_fops))
+ goto file_error;
+
+ if (!debugfs_create_file("registers", S_IRUGO, fotg210->debug_dir, bus,
+ &debug_registers_fops))
+ goto file_error;
+
+ return;
+
+file_error:
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+static inline void remove_debug_files(struct fotg210_hcd *fotg210)
+{
+ debugfs_remove_recursive(fotg210->debug_dir);
+}
+
+#endif /* STUB_DEBUG_FILES */
+/*-------------------------------------------------------------------------*/
+
+/*
+ * handshake - spin reading hc until handshake completes or fails
+ * @ptr: address of hc register to be read
+ * @mask: bits to look at in result of read
+ * @done: value of those bits when handshake succeeds
+ * @usec: timeout in microseconds
+ *
+ * Returns negative errno, or zero on success
+ *
+ * Success happens when the "mask" bits have the specified value (hardware
+ * handshake done). There are two failure modes: "usec" have passed (major
+ * hardware flakeout), or the register reads as all-ones (hardware removed).
+ *
+ * That last failure should_only happen in cases like physical cardbus eject
+ * before driver shutdown. But it also seems to be caused by bugs in cardbus
+ * bridge shutdown: shutting down the bridge before the devices using it.
+ */
+static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr,
+ u32 mask, u32 done, int usec)
+{
+ u32 result;
+
+ do {
+ result = fotg210_readl(fotg210, ptr);
+ if (result == ~(u32)0) /* card removed */
+ return -ENODEV;
+ result &= mask;
+ if (result == done)
+ return 0;
+ udelay(1);
+ usec--;
+ } while (usec > 0);
+ return -ETIMEDOUT;
+}
+
+/*
+ * Force HC to halt state from unknown (EHCI spec section 2.3).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_halt(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ spin_lock_irq(&fotg210->lock);
+
+ /* disable any irqs left enabled by previous code */
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+
+ /*
+ * This routine gets called during probe before fotg210->command
+ * has been initialized, so we can't rely on its value.
+ */
+ fotg210->command &= ~CMD_RUN;
+ temp = fotg210_readl(fotg210, &fotg210->regs->command);
+ temp &= ~(CMD_RUN | CMD_IAAD);
+ fotg210_writel(fotg210, temp, &fotg210->regs->command);
+
+ spin_unlock_irq(&fotg210->lock);
+ synchronize_irq(fotg210_to_hcd(fotg210)->irq);
+
+ return handshake(fotg210, &fotg210->regs->status,
+ STS_HALT, STS_HALT, 16 * 125);
+}
+
+/*
+ * Reset a non-running (STS_HALT == 1) controller.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static int fotg210_reset(struct fotg210_hcd *fotg210)
+{
+ int retval;
+ u32 command = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /* If the EHCI debug controller is active, special care must be
+ * taken before and after a host controller reset */
+ if (fotg210->debug && !dbgp_reset_prep(fotg210_to_hcd(fotg210)))
+ fotg210->debug = NULL;
+
+ command |= CMD_RESET;
+ dbg_cmd(fotg210, "reset", command);
+ fotg210_writel(fotg210, command, &fotg210->regs->command);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210->next_statechange = jiffies;
+ retval = handshake(fotg210, &fotg210->regs->command,
+ CMD_RESET, 0, 250 * 1000);
+
+ if (retval)
+ return retval;
+
+ if (fotg210->debug)
+ dbgp_external_startup(fotg210_to_hcd(fotg210));
+
+ fotg210->port_c_suspend = fotg210->suspended_ports =
+ fotg210->resuming_ports = 0;
+ return retval;
+}
+
+/*
+ * Idle the controller (turn off the schedules).
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_quiesce(struct fotg210_hcd *fotg210)
+{
+ u32 temp;
+
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /* wait for any schedule enables/disables to take effect */
+ temp = (fotg210->command << 10) & (STS_ASS | STS_PSS);
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, temp,
+ 16 * 125);
+
+ /* then disable anything that's still active */
+ spin_lock_irq(&fotg210->lock);
+ fotg210->command &= ~(CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ spin_unlock_irq(&fotg210->lock);
+
+ /* hardware can take 16 microframes to turn off ... */
+ handshake(fotg210, &fotg210->regs->status, STS_ASS | STS_PSS, 0,
+ 16 * 125);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void end_unlink_async(struct fotg210_hcd *fotg210);
+static void unlink_empty_async(struct fotg210_hcd *fotg210);
+static void fotg210_work(struct fotg210_hcd *fotg210);
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh);
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*-------------------------------------------------------------------------*/
+
+/* Set a bit in the USBCMD register */
+static void fotg210_set_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command |= bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/* Clear a bit in the USBCMD register */
+static void fotg210_clear_command_bit(struct fotg210_hcd *fotg210, u32 bit)
+{
+ fotg210->command &= ~bit;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+
+ /* unblock posted write */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI timer support... Now using hrtimers.
+ *
+ * Lots of different events are triggered from fotg210->hrtimer. Whenever
+ * the timer routine runs, it checks each possible event; events that are
+ * currently enabled and whose expiration time has passed get handled.
+ * The set of enabled events is stored as a collection of bitflags in
+ * fotg210->enabled_hrtimer_events, and they are numbered in order of
+ * increasing delay values (ranging between 1 ms and 100 ms).
+ *
+ * Rather than implementing a sorted list or tree of all pending events,
+ * we keep track only of the lowest-numbered pending event, in
+ * fotg210->next_hrtimer_event. Whenever fotg210->hrtimer gets restarted, its
+ * expiration time is set to the timeout value for this event.
+ *
+ * As a result, events might not get handled right away; the actual delay
+ * could be anywhere up to twice the requested delay. This doesn't
+ * matter, because none of the events are especially time-critical. The
+ * ones that matter most all have a delay of 1 ms, so they will be
+ * handled after 2 ms at most, which is okay. In addition to this, we
+ * allow for an expiration range of 1 ms.
+ */
+
+/*
+ * Delay lengths for the hrtimer event types.
+ * Keep this list sorted by delay length, in the same order as
+ * the event types indexed by enum fotg210_hrtimer_event in fotg210.h.
+ */
+static unsigned event_delays_ns[] = {
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_ASS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_PSS */
+ 1 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_POLL_DEAD */
+ 1125 * NSEC_PER_USEC, /* FOTG210_HRTIMER_UNLINK_INTR */
+ 2 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_FREE_ITDS */
+ 6 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ 10 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ 15 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ 100 * NSEC_PER_MSEC, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+/* Enable a pending hrtimer event */
+static void fotg210_enable_event(struct fotg210_hcd *fotg210, unsigned event,
+ bool resched)
+{
+ ktime_t *timeout = &fotg210->hr_timeouts[event];
+
+ if (resched)
+ *timeout = ktime_add(ktime_get(),
+ ktime_set(0, event_delays_ns[event]));
+ fotg210->enabled_hrtimer_events |= (1 << event);
+
+ /* Track only the lowest-numbered pending event */
+ if (event < fotg210->next_hrtimer_event) {
+ fotg210->next_hrtimer_event = event;
+ hrtimer_start_range_ns(&fotg210->hrtimer, *timeout,
+ NSEC_PER_MSEC, HRTIMER_MODE_ABS);
+ }
+}
+
+
+/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */
+static void fotg210_poll_ASS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't enable anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_ASE) ? STS_ASS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_ASS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->ASS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_ASS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the async schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->ASS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->async_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_ASE);
+
+ } else { /* Running */
+ if (fotg210->async_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_ASYNC,
+ true);
+ }
+ }
+}
+
+/* Turn off the async schedule after a brief delay */
+static void fotg210_disable_ASE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_ASE);
+}
+
+
+/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */
+static void fotg210_poll_PSS(struct fotg210_hcd *fotg210)
+{
+ unsigned actual, want;
+
+ /* Don't do anything if the controller isn't running (e.g., died) */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ want = (fotg210->command & CMD_PSE) ? STS_PSS : 0;
+ actual = fotg210_readl(fotg210, &fotg210->regs->status) & STS_PSS;
+
+ if (want != actual) {
+
+ /* Poll again later, but give up after about 20 ms */
+ if (fotg210->PSS_poll_count++ < 20) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_POLL_PSS,
+ true);
+ return;
+ }
+ fotg210_dbg(fotg210, "Waited too long for the periodic schedule status (%x/%x), giving up\n",
+ want, actual);
+ }
+ fotg210->PSS_poll_count = 0;
+
+ /* The status is up-to-date; restart or stop the schedule as needed */
+ if (want == 0) { /* Stopped */
+ if (fotg210->periodic_count > 0)
+ fotg210_set_command_bit(fotg210, CMD_PSE);
+
+ } else { /* Running */
+ if (fotg210->periodic_count == 0) {
+
+ /* Turn off the schedule after a while */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_DISABLE_PERIODIC,
+ true);
+ }
+ }
+}
+
+/* Turn off the periodic schedule after a brief delay */
+static void fotg210_disable_PSE(struct fotg210_hcd *fotg210)
+{
+ fotg210_clear_command_bit(fotg210, CMD_PSE);
+}
+
+
+/* Poll the STS_HALT status bit; see when a dead controller stops */
+static void fotg210_handle_controller_death(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210_readl(fotg210, &fotg210->regs->status) & STS_HALT)) {
+
+ /* Give up after a few milliseconds */
+ if (fotg210->died_poll_count++ < 5) {
+ /* Try again later */
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_POLL_DEAD, true);
+ return;
+ }
+ fotg210_warn(fotg210, "Waited too long for the controller to stop, giving up\n");
+ }
+
+ /* Clean up the mess */
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_work(fotg210);
+ end_unlink_async(fotg210);
+
+ /* Not in process context, so don't try to reset the controller */
+}
+
+
+/* Handle unlinked interrupt QHs once they are gone from the hardware */
+static void fotg210_handle_intr_unlinks(struct fotg210_hcd *fotg210)
+{
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+
+ /*
+ * Process all the QHs on the intr_unlink list that were added
+ * before the current unlink cycle began. The list is in
+ * temporal order, so stop when we reach the first entry in the
+ * current cycle. But if the root hub isn't running then
+ * process all the QHs on the list.
+ */
+ fotg210->intr_unlinking = true;
+ while (fotg210->intr_unlink) {
+ struct fotg210_qh *qh = fotg210->intr_unlink;
+
+ if (!stopped && qh->unlink_cycle == fotg210->intr_unlink_cycle)
+ break;
+ fotg210->intr_unlink = qh->unlink_next;
+ qh->unlink_next = NULL;
+ end_unlink_intr(fotg210, qh);
+ }
+
+ /* Handle remaining entries later */
+ if (fotg210->intr_unlink) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+ fotg210->intr_unlinking = false;
+}
+
+
+/* Start another free-iTDs/siTDs cycle */
+static void start_free_itds(struct fotg210_hcd *fotg210)
+{
+ if (!(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_FREE_ITDS))) {
+ fotg210->last_itd_to_free = list_entry(
+ fotg210->cached_itd_list.prev,
+ struct fotg210_itd, itd_list);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_FREE_ITDS, true);
+ }
+}
+
+/* Wait for controller to stop using old iTDs and siTDs */
+static void end_free_itds(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_itd *itd, *n;
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210->last_itd_to_free = NULL;
+
+ list_for_each_entry_safe(itd, n, &fotg210->cached_itd_list, itd_list) {
+ list_del(&itd->itd_list);
+ dma_pool_free(fotg210->itd_pool, itd, itd->itd_dma);
+ if (itd == fotg210->last_itd_to_free)
+ break;
+ }
+
+ if (!list_empty(&fotg210->cached_itd_list))
+ start_free_itds(fotg210);
+}
+
+
+/* Handle lost (or very late) IAA interrupts */
+static void fotg210_iaa_watchdog(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->rh_state != FOTG210_RH_RUNNING)
+ return;
+
+ /*
+ * Lost IAA irqs wedge things badly; seen first with a vt8235.
+ * So we need this watchdog, but must protect it against both
+ * (a) SMP races against real IAA firing and retriggering, and
+ * (b) clean HC shutdown, when IAA watchdog was pending.
+ */
+ if (fotg210->async_iaa) {
+ u32 cmd, status;
+
+ /* If we get here, IAA is *REALLY* late. It's barely
+ * conceivable that the system is so busy that CMD_IAAD
+ * is still legitimately set, so let's be sure it's
+ * clear before we read STS_IAA. (The HC should clear
+ * CMD_IAAD when it sets STS_IAA.)
+ */
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+
+ /*
+ * If IAA is set here it either legitimately triggered
+ * after the watchdog timer expired (_way_ late, so we'll
+ * still count it as lost) ... or a silicon erratum:
+ * - VIA seems to set IAA without triggering the IRQ;
+ * - IAAD potentially cleared without setting IAA.
+ */
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+ if ((status & STS_IAA) || !(cmd & CMD_IAAD)) {
+ COUNT(fotg210->stats.lost_iaa);
+ fotg210_writel(fotg210, STS_IAA,
+ &fotg210->regs->status);
+ }
+
+ fotg210_vdbg(fotg210, "IAA watchdog: status %x cmd %x\n",
+ status, cmd);
+ end_unlink_async(fotg210);
+ }
+}
+
+
+/* Enable the I/O watchdog, if appropriate */
+static void turn_on_io_watchdog(struct fotg210_hcd *fotg210)
+{
+ /* Not needed if the controller isn't running or it's already enabled */
+ if (fotg210->rh_state != FOTG210_RH_RUNNING ||
+ (fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_IO_WATCHDOG)))
+ return;
+
+ /*
+ * Isochronous transfers always need the watchdog.
+ * For other sorts we use it only if the flag is set.
+ */
+ if (fotg210->isoc_count > 0 || (fotg210->need_io_watchdog &&
+ fotg210->async_count + fotg210->intr_count > 0))
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IO_WATCHDOG,
+ true);
+}
+
+
+/*
+ * Handler functions for the hrtimer event types.
+ * Keep this array in the same order as the event types indexed by
+ * enum fotg210_hrtimer_event in fotg210.h.
+ */
+static void (*event_handlers[])(struct fotg210_hcd *) = {
+ fotg210_poll_ASS, /* FOTG210_HRTIMER_POLL_ASS */
+ fotg210_poll_PSS, /* FOTG210_HRTIMER_POLL_PSS */
+ fotg210_handle_controller_death, /* FOTG210_HRTIMER_POLL_DEAD */
+ fotg210_handle_intr_unlinks, /* FOTG210_HRTIMER_UNLINK_INTR */
+ end_free_itds, /* FOTG210_HRTIMER_FREE_ITDS */
+ unlink_empty_async, /* FOTG210_HRTIMER_ASYNC_UNLINKS */
+ fotg210_iaa_watchdog, /* FOTG210_HRTIMER_IAA_WATCHDOG */
+ fotg210_disable_PSE, /* FOTG210_HRTIMER_DISABLE_PERIODIC */
+ fotg210_disable_ASE, /* FOTG210_HRTIMER_DISABLE_ASYNC */
+ fotg210_work, /* FOTG210_HRTIMER_IO_WATCHDOG */
+};
+
+static enum hrtimer_restart fotg210_hrtimer_func(struct hrtimer *t)
+{
+ struct fotg210_hcd *fotg210 =
+ container_of(t, struct fotg210_hcd, hrtimer);
+ ktime_t now;
+ unsigned long events;
+ unsigned long flags;
+ unsigned e;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ events = fotg210->enabled_hrtimer_events;
+ fotg210->enabled_hrtimer_events = 0;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ /*
+ * Check each pending event. If its time has expired, handle
+ * the event; otherwise re-enable it.
+ */
+ now = ktime_get();
+ for_each_set_bit(e, &events, FOTG210_HRTIMER_NUM_EVENTS) {
+ if (now.tv64 >= fotg210->hr_timeouts[e].tv64)
+ event_handlers[e](fotg210);
+ else
+ fotg210_enable_event(fotg210, e, false);
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return HRTIMER_NORESTART;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_bus_suspend NULL
+#define fotg210_bus_resume NULL
+
+/*-------------------------------------------------------------------------*/
+
+static int check_reset_complete(
+ struct fotg210_hcd *fotg210,
+ int index,
+ u32 __iomem *status_reg,
+ int port_status
+) {
+ if (!(port_status & PORT_CONNECT))
+ return port_status;
+
+ /* if reset finished and it's still not enabled -- handoff */
+ if (!(port_status & PORT_PE)) {
+ /* with integrated TT, there's nobody to hand it to! */
+ fotg210_dbg(fotg210,
+ "Failed to enable port %d on root hub TT\n",
+ index+1);
+ return port_status;
+ } else {
+ fotg210_dbg(fotg210, "port %d reset complete, port enabled\n",
+ index + 1);
+ }
+
+ return port_status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+
+/* build "status change" packet (one or two bytes) from HC registers */
+
+static int
+fotg210_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp, status;
+ u32 mask;
+ int retval = 1;
+ unsigned long flags;
+
+ /* init status to no-changes */
+ buf[0] = 0;
+
+ /* Inform the core about resumes-in-progress by returning
+ * a non-zero value even if there are no status changes.
+ */
+ status = fotg210->resuming_ports;
+
+ mask = PORT_CSC | PORT_PEC;
+ /* PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND */
+
+ /* no hub change reports (bit 0) for now (power, ...) */
+
+ /* port N changes (bit N)? */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, &fotg210->regs->port_status);
+
+ /*
+ * Return status information even for ports with OWNER set.
+ * Otherwise khubd wouldn't see the disconnect event when a
+ * high-speed device is switched over to the companion
+ * controller by the user.
+ */
+
+ if ((temp & mask) != 0 || test_bit(0, &fotg210->port_c_suspend)
+ || (fotg210->reset_done[0] && time_after_eq(
+ jiffies, fotg210->reset_done[0]))) {
+ buf[0] |= 1 << 1;
+ status = STS_PCD;
+ }
+ /* FIXME autosuspend idle root hubs */
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return status ? retval : 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void
+fotg210_hub_descriptor(
+ struct fotg210_hcd *fotg210,
+ struct usb_hub_descriptor *desc
+) {
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u16 temp;
+
+ desc->bDescriptorType = 0x29;
+ desc->bPwrOn2PwrGood = 10; /* fotg210 1.0, 2.3.9 says 20ms max */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = ports;
+ temp = 1 + (ports / 8);
+ desc->bDescLength = 7 + 2 * temp;
+
+ /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */
+ memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
+ memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
+
+ temp = 0x0008; /* per-port overcurrent reporting */
+ temp |= 0x0002; /* no power switching */
+ desc->wHubCharacteristics = cpu_to_le16(temp);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int fotg210_hub_control(
+ struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue,
+ u16 wIndex,
+ char *buf,
+ u16 wLength
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int ports = HCS_N_PORTS(fotg210->hcs_params);
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+ u32 temp, temp1, status;
+ unsigned long flags;
+ int retval = 0;
+ unsigned selector;
+
+ /*
+ * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
+ * HCS_INDICATOR may say we can change LEDs to off/amber/green.
+ * (track current state ourselves) ... blink for diagnostics,
+ * power, "this is the one", etc. EHCI spec supports this.
+ */
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ switch (typeReq) {
+ case ClearHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case ClearPortFeature:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+
+ /*
+ * Even if OWNER is set, so the port is owned by the
+ * companion controller, khubd needs to be able to clear
+ * the port-change status bits (especially
+ * USB_PORT_STAT_C_CONNECTION).
+ */
+
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ fotg210_writel(fotg210, temp & ~PORT_PE, status_reg);
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ fotg210_writel(fotg210, temp | PORT_PEC, status_reg);
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ if (temp & PORT_RESET)
+ goto error;
+ if (!(temp & PORT_SUSPEND))
+ break;
+ if ((temp & PORT_PE) == 0)
+ goto error;
+
+ /* resume signaling for 20 msec */
+ fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ clear_bit(wIndex, &fotg210->port_c_suspend);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ fotg210_writel(fotg210, temp | PORT_CSC, status_reg);
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ fotg210_writel(fotg210, temp | OTGISR_OVC,
+ &fotg210->regs->otgisr);
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ /* GetPortStatus clears reset */
+ break;
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+ case GetHubDescriptor:
+ fotg210_hub_descriptor(fotg210, (struct usb_hub_descriptor *)
+ buf);
+ break;
+ case GetHubStatus:
+ /* no hub-wide feature/status flags */
+ memset(buf, 0, 4);
+ /*cpu_to_le32s ((u32 *) buf); */
+ break;
+ case GetPortStatus:
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ status = 0;
+ temp = fotg210_readl(fotg210, status_reg);
+
+ /* wPortChange bits */
+ if (temp & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (temp & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+
+ /* whoever resumes must GetPortStatus to complete it!! */
+ if (temp & PORT_RESUME) {
+
+ /* Remote Wakeup received? */
+ if (!fotg210->reset_done[wIndex]) {
+ /* resume signaling for 20 msec */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(20);
+ /* check the port again */
+ mod_timer(&fotg210_to_hcd(fotg210)->rh_timer,
+ fotg210->reset_done[wIndex]);
+ }
+
+ /* resume completed? */
+ else if (time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ fotg210->reset_done[wIndex] = 0;
+
+ /* stop resume signaling */
+ temp = fotg210_readl(fotg210, status_reg);
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESUME),
+ status_reg);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ retval = handshake(fotg210, status_reg,
+ PORT_RESUME, 0, 2000 /* 2msec */);
+ if (retval != 0) {
+ fotg210_err(fotg210,
+ "port %d resume error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+ temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
+ }
+ }
+
+ /* whoever resets must GetPortStatus to complete it!! */
+ if ((temp & PORT_RESET)
+ && time_after_eq(jiffies,
+ fotg210->reset_done[wIndex])) {
+ status |= USB_PORT_STAT_C_RESET << 16;
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+
+ /* force reset to complete */
+ fotg210_writel(fotg210,
+ temp & ~(PORT_RWC_BITS | PORT_RESET),
+ status_reg);
+ /* REVISIT: some hardware needs 550+ usec to clear
+ * this bit; seems too long to spin routinely...
+ */
+ retval = handshake(fotg210, status_reg,
+ PORT_RESET, 0, 1000);
+ if (retval != 0) {
+ fotg210_err(fotg210, "port %d reset error %d\n",
+ wIndex + 1, retval);
+ goto error;
+ }
+
+ /* see what we found out */
+ temp = check_reset_complete(fotg210, wIndex, status_reg,
+ fotg210_readl(fotg210, status_reg));
+ }
+
+ if (!(temp & (PORT_RESUME|PORT_RESET))) {
+ fotg210->reset_done[wIndex] = 0;
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ }
+
+ /* transfer dedicated ports to the companion hc */
+ if ((temp & PORT_CONNECT) &&
+ test_bit(wIndex, &fotg210->companion_ports)) {
+ temp &= ~PORT_RWC_BITS;
+ fotg210_writel(fotg210, temp, status_reg);
+ fotg210_dbg(fotg210, "port %d --> companion\n",
+ wIndex + 1);
+ temp = fotg210_readl(fotg210, status_reg);
+ }
+
+ /*
+ * Even if OWNER is set, there's no harm letting khubd
+ * see the wPortStatus values (they should all be 0 except
+ * for PORT_POWER anyway).
+ */
+
+ if (temp & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= fotg210_port_speed(fotg210, temp);
+ }
+ if (temp & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+
+ /* maybe the port was unsuspended without our knowledge */
+ if (temp & (PORT_SUSPEND|PORT_RESUME)) {
+ status |= USB_PORT_STAT_SUSPEND;
+ } else if (test_bit(wIndex, &fotg210->suspended_ports)) {
+ clear_bit(wIndex, &fotg210->suspended_ports);
+ clear_bit(wIndex, &fotg210->resuming_ports);
+ fotg210->reset_done[wIndex] = 0;
+ if (temp & PORT_PE)
+ set_bit(wIndex, &fotg210->port_c_suspend);
+ }
+
+ temp1 = fotg210_readl(fotg210, &fotg210->regs->otgisr);
+ if (temp1 & OTGISR_OVC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (temp & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (test_bit(wIndex, &fotg210->port_c_suspend))
+ status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+#ifndef VERBOSE_DEBUG
+ if (status & ~0xffff) /* only if wPortChange is interesting */
+#endif
+ dbg_port(fotg210, "GetStatus", wIndex + 1, temp);
+ put_unaligned_le32(status, buf);
+ break;
+ case SetHubFeature:
+ switch (wValue) {
+ case C_HUB_LOCAL_POWER:
+ case C_HUB_OVER_CURRENT:
+ /* no hub-wide feature/status flags */
+ break;
+ default:
+ goto error;
+ }
+ break;
+ case SetPortFeature:
+ selector = wIndex >> 8;
+ wIndex &= 0xff;
+
+ if (!wIndex || wIndex > ports)
+ goto error;
+ wIndex--;
+ temp = fotg210_readl(fotg210, status_reg);
+ temp &= ~PORT_RWC_BITS;
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ if ((temp & PORT_PE) == 0
+ || (temp & PORT_RESET) != 0)
+ goto error;
+
+ /* After above check the port must be connected.
+ * Set appropriate bit thus could put phy into low power
+ * mode if we have hostpc feature
+ */
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+ set_bit(wIndex, &fotg210->suspended_ports);
+ break;
+ case USB_PORT_FEAT_RESET:
+ if (temp & PORT_RESUME)
+ goto error;
+ /* line status bits may report this as low speed,
+ * which can be fine if this root hub has a
+ * transaction translator built in.
+ */
+ fotg210_vdbg(fotg210, "port %d reset\n", wIndex + 1);
+ temp |= PORT_RESET;
+ temp &= ~PORT_PE;
+
+ /*
+ * caller must wait, then call GetPortStatus
+ * usb 2.0 spec says 50 ms resets on root
+ */
+ fotg210->reset_done[wIndex] = jiffies
+ + msecs_to_jiffies(50);
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ /* For downstream facing ports (these): one hub port is put
+ * into test mode according to USB2 11.24.2.13, then the hub
+ * must be reset (which for root hub now means rmmod+modprobe,
+ * or else system reboot). See EHCI 2.3.9 and 4.14 for info
+ * about the EHCI-specific stuff.
+ */
+ case USB_PORT_FEAT_TEST:
+ if (!selector || selector > 5)
+ goto error;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_quiesce(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /* Put all enabled ports into suspend */
+ temp = fotg210_readl(fotg210, status_reg) &
+ ~PORT_RWC_BITS;
+ if (temp & PORT_PE)
+ fotg210_writel(fotg210, temp | PORT_SUSPEND,
+ status_reg);
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ fotg210_halt(fotg210);
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ temp = fotg210_readl(fotg210, status_reg);
+ temp |= selector << 16;
+ fotg210_writel(fotg210, temp, status_reg);
+ break;
+
+ default:
+ goto error;
+ }
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ break;
+
+ default:
+error:
+ /* "stall" on error */
+ retval = -EPIPE;
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return retval;
+}
+
+static void __maybe_unused fotg210_relinquish_port(struct usb_hcd *hcd,
+ int portnum)
+{
+ return;
+}
+
+static int __maybe_unused fotg210_port_handed_over(struct usb_hcd *hcd,
+ int portnum)
+{
+ return 0;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * There's basically three types of memory:
+ * - data used only by the HCD ... kmalloc is fine
+ * - async and periodic schedules, shared by HC and HCD ... these
+ * need to use dma_pool or dma_alloc_coherent
+ * - driver buffers, read/written by HC ... single shot DMA mapped
+ *
+ * There's also "register" data (e.g. PCI or SOC), which is memory mapped.
+ * No memory seen by this driver is pageable.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* Allocate the key transfer structures from the previously allocated pool */
+
+static inline void fotg210_qtd_init(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd, dma_addr_t dma)
+{
+ memset(qtd, 0, sizeof(*qtd));
+ qtd->qtd_dma = dma;
+ qtd->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ qtd->hw_next = FOTG210_LIST_END(fotg210);
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+ INIT_LIST_HEAD(&qtd->qtd_list);
+}
+
+static struct fotg210_qtd *fotg210_qtd_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qtd *qtd;
+ dma_addr_t dma;
+
+ qtd = dma_pool_alloc(fotg210->qtd_pool, flags, &dma);
+ if (qtd != NULL)
+ fotg210_qtd_init(fotg210, qtd, dma);
+
+ return qtd;
+}
+
+static inline void fotg210_qtd_free(struct fotg210_hcd *fotg210,
+ struct fotg210_qtd *qtd)
+{
+ dma_pool_free(fotg210->qtd_pool, qtd, qtd->qtd_dma);
+}
+
+
+static void qh_destroy(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ /* clean qtds first, and know this is not linked */
+ if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
+ fotg210_dbg(fotg210, "unused qh not empty!\n");
+ BUG();
+ }
+ if (qh->dummy)
+ fotg210_qtd_free(fotg210, qh->dummy);
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+ kfree(qh);
+}
+
+static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
+ gfp_t flags)
+{
+ struct fotg210_qh *qh;
+ dma_addr_t dma;
+
+ qh = kzalloc(sizeof(*qh), GFP_ATOMIC);
+ if (!qh)
+ goto done;
+ qh->hw = (struct fotg210_qh_hw *)
+ dma_pool_alloc(fotg210->qh_pool, flags, &dma);
+ if (!qh->hw)
+ goto fail;
+ memset(qh->hw, 0, sizeof(*qh->hw));
+ qh->qh_dma = dma;
+ INIT_LIST_HEAD(&qh->qtd_list);
+
+ /* dummy td enables safe urb queuing */
+ qh->dummy = fotg210_qtd_alloc(fotg210, flags);
+ if (qh->dummy == NULL) {
+ fotg210_dbg(fotg210, "no dummy td\n");
+ goto fail1;
+ }
+done:
+ return qh;
+fail1:
+ dma_pool_free(fotg210->qh_pool, qh->hw, qh->qh_dma);
+fail:
+ kfree(qh);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* The queue heads and transfer descriptors are managed from pools tied
+ * to each of the "per device" structures.
+ * This is the initialisation and cleanup code.
+ */
+
+static void fotg210_mem_cleanup(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async)
+ qh_destroy(fotg210, fotg210->async);
+ fotg210->async = NULL;
+
+ if (fotg210->dummy)
+ qh_destroy(fotg210, fotg210->dummy);
+ fotg210->dummy = NULL;
+
+ /* DMA consistent memory and pools */
+ if (fotg210->qtd_pool)
+ dma_pool_destroy(fotg210->qtd_pool);
+ fotg210->qtd_pool = NULL;
+
+ if (fotg210->qh_pool) {
+ dma_pool_destroy(fotg210->qh_pool);
+ fotg210->qh_pool = NULL;
+ }
+
+ if (fotg210->itd_pool)
+ dma_pool_destroy(fotg210->itd_pool);
+ fotg210->itd_pool = NULL;
+
+ if (fotg210->periodic)
+ dma_free_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(u32),
+ fotg210->periodic, fotg210->periodic_dma);
+ fotg210->periodic = NULL;
+
+ /* shadow periodic table */
+ kfree(fotg210->pshadow);
+ fotg210->pshadow = NULL;
+}
+
+/* remember to add cleanup code (above) if you add anything here */
+static int fotg210_mem_init(struct fotg210_hcd *fotg210, gfp_t flags)
+{
+ int i;
+
+ /* QTDs for control/bulk/intr transfers */
+ fotg210->qtd_pool = dma_pool_create("fotg210_qtd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qtd),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qtd_pool)
+ goto fail;
+
+ /* QHs for control/bulk/intr transfers */
+ fotg210->qh_pool = dma_pool_create("fotg210_qh",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_qh_hw),
+ 32 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->qh_pool)
+ goto fail;
+
+ fotg210->async = fotg210_qh_alloc(fotg210, flags);
+ if (!fotg210->async)
+ goto fail;
+
+ /* ITD for high speed ISO transfers */
+ fotg210->itd_pool = dma_pool_create("fotg210_itd",
+ fotg210_to_hcd(fotg210)->self.controller,
+ sizeof(struct fotg210_itd),
+ 64 /* byte alignment (for hw parts) */,
+ 4096 /* can't cross 4K */);
+ if (!fotg210->itd_pool)
+ goto fail;
+
+ /* Hardware periodic table */
+ fotg210->periodic = (__le32 *)
+ dma_alloc_coherent(fotg210_to_hcd(fotg210)->self.controller,
+ fotg210->periodic_size * sizeof(__le32),
+ &fotg210->periodic_dma, 0);
+ if (fotg210->periodic == NULL)
+ goto fail;
+
+ for (i = 0; i < fotg210->periodic_size; i++)
+ fotg210->periodic[i] = FOTG210_LIST_END(fotg210);
+
+ /* software shadow of hardware table */
+ fotg210->pshadow = kcalloc(fotg210->periodic_size, sizeof(void *),
+ flags);
+ if (fotg210->pshadow != NULL)
+ return 0;
+
+fail:
+ fotg210_dbg(fotg210, "couldn't init memory\n");
+ fotg210_mem_cleanup(fotg210);
+ return -ENOMEM;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI hardware queue manipulation ... the core. QH/QTD manipulation.
+ *
+ * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd"
+ * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned
+ * buffers needed for the larger number). We use one QH per endpoint, queue
+ * multiple urbs (all three types) per endpoint. URBs may need several qtds.
+ *
+ * ISO traffic uses "ISO TD" (itd) records, and (along with
+ * interrupts) needs careful scheduling. Performance improvements can be
+ * an ongoing challenge. That's in "ehci-sched.c".
+ *
+ * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs,
+ * or otherwise through transaction translators (TTs) in USB 2.0 hubs using
+ * (b) special fields in qh entries or (c) split iso entries. TTs will
+ * buffer low/full speed data so the host collects it at high speed.
+ */
+
+/*-------------------------------------------------------------------------*/
+
+/* fill a qtd, returning how much of the buffer we were able to queue up */
+
+static int
+qtd_fill(struct fotg210_hcd *fotg210, struct fotg210_qtd *qtd, dma_addr_t buf,
+ size_t len, int token, int maxpacket)
+{
+ int i, count;
+ u64 addr = buf;
+
+ /* one buffer entry per 4K ... first might be short or unaligned */
+ qtd->hw_buf[0] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[0] = cpu_to_hc32(fotg210, (u32)(addr >> 32));
+ count = 0x1000 - (buf & 0x0fff); /* rest of that page */
+ if (likely(len < count)) /* ... iff needed */
+ count = len;
+ else {
+ buf += 0x1000;
+ buf &= ~0x0fff;
+
+ /* per-qtd limit: from 16K to 20K (best alignment) */
+ for (i = 1; count < len && i < 5; i++) {
+ addr = buf;
+ qtd->hw_buf[i] = cpu_to_hc32(fotg210, (u32)addr);
+ qtd->hw_buf_hi[i] = cpu_to_hc32(fotg210,
+ (u32)(addr >> 32));
+ buf += 0x1000;
+ if ((count + 0x1000) < len)
+ count += 0x1000;
+ else
+ count = len;
+ }
+
+ /* short packets may only terminate transfers */
+ if (count != len)
+ count -= (count % maxpacket);
+ }
+ qtd->hw_token = cpu_to_hc32(fotg210, (count << 16) | token);
+ qtd->length = count;
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+qh_update(struct fotg210_hcd *fotg210, struct fotg210_qh *qh,
+ struct fotg210_qtd *qtd)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ /* writes to an active overlay are unsafe */
+ BUG_ON(qh->qh_state != QH_STATE_IDLE);
+
+ hw->hw_qtd_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ hw->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /* Except for control endpoints, we make hardware maintain data
+ * toggle (like OHCI) ... here (re)initialize the toggle in the QH,
+ * and set the pseudo-toggle in udev. Only usb_clear_halt() will
+ * ever clear it.
+ */
+ if (!(hw->hw_info1 & cpu_to_hc32(fotg210, QH_TOGGLE_CTL))) {
+ unsigned is_out, epnum;
+
+ is_out = qh->is_out;
+ epnum = (hc32_to_cpup(fotg210, &hw->hw_info1) >> 8) & 0x0f;
+ if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
+ hw->hw_token &= ~cpu_to_hc32(fotg210, QTD_TOGGLE);
+ usb_settoggle(qh->dev, epnum, is_out, 1);
+ }
+ }
+
+ hw->hw_token &= cpu_to_hc32(fotg210, QTD_TOGGLE | QTD_STS_PING);
+}
+
+/* if it weren't for a common silicon quirk (writing the dummy into the qh
+ * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
+ * recovery (including urb dequeue) would need software changes to a QH...
+ */
+static void
+qh_refresh(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *qtd;
+
+ if (list_empty(&qh->qtd_list))
+ qtd = qh->dummy;
+ else {
+ qtd = list_entry(qh->qtd_list.next,
+ struct fotg210_qtd, qtd_list);
+ /*
+ * first qtd may already be partially processed.
+ * If we come here during unlink, the QH overlay region
+ * might have reference to the just unlinked qtd. The
+ * qtd is updated in qh_completions(). Update the QH
+ * overlay here.
+ */
+ if (cpu_to_hc32(fotg210, qtd->qtd_dma) == qh->hw->hw_current) {
+ qh->hw->hw_qtd_next = qtd->hw_next;
+ qtd = NULL;
+ }
+ }
+
+ if (qtd)
+ qh_update(fotg210, qh, qtd);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+static void fotg210_clear_tt_buffer_complete(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh = ep->hcpriv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh->clearing_tt = 0;
+ if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list)
+ && fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void fotg210_clear_tt_buffer(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh,
+ struct urb *urb, u32 token)
+{
+
+ /* If an async split transaction gets an error or is unlinked,
+ * the TT buffer may be left in an indeterminate state. We
+ * have to clear the TT buffer.
+ *
+ * Note: this routine is never called for Isochronous transfers.
+ */
+ if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) {
+#ifdef DEBUG
+ struct usb_device *tt = urb->dev->tt->hub;
+ dev_dbg(&tt->dev,
+ "clear tt buffer port %d, a%d ep%d t%08x\n",
+ urb->dev->ttport, urb->dev->devnum,
+ usb_pipeendpoint(urb->pipe), token);
+#endif /* DEBUG */
+ if (urb->dev->tt->hub !=
+ fotg210_to_hcd(fotg210)->self.root_hub) {
+ if (usb_hub_clear_tt_buffer(urb) == 0)
+ qh->clearing_tt = 1;
+ }
+ }
+}
+
+static int qtd_copy_status(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ size_t length,
+ u32 token
+)
+{
+ int status = -EINPROGRESS;
+
+ /* count IN/OUT bytes, not SETUP (even short packets) */
+ if (likely(QTD_PID(token) != 2))
+ urb->actual_length += length - QTD_LENGTH(token);
+
+ /* don't modify error codes */
+ if (unlikely(urb->unlinked))
+ return status;
+
+ /* force cleanup after short read; not always an error */
+ if (unlikely(IS_SHORT_READ(token)))
+ status = -EREMOTEIO;
+
+ /* serious "can't proceed" faults reported by the hardware */
+ if (token & QTD_STS_HALT) {
+ if (token & QTD_STS_BABBLE) {
+ /* FIXME "must" disable babbling device's port too */
+ status = -EOVERFLOW;
+ /* CERR nonzero + halt --> stall */
+ } else if (QTD_CERR(token)) {
+ status = -EPIPE;
+
+ /* In theory, more than one of the following bits can be set
+ * since they are sticky and the transaction is retried.
+ * Which to test first is rather arbitrary.
+ */
+ } else if (token & QTD_STS_MMF) {
+ /* fs/ls interrupt xfer missed the complete-split */
+ status = -EPROTO;
+ } else if (token & QTD_STS_DBE) {
+ status = (QTD_PID(token) == 1) /* IN ? */
+ ? -ENOSR /* hc couldn't read data */
+ : -ECOMM; /* hc couldn't write data */
+ } else if (token & QTD_STS_XACT) {
+ /* timeout, bad CRC, wrong PID, etc */
+ fotg210_dbg(fotg210, "devpath %s ep%d%s 3strikes\n",
+ urb->dev->devpath,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out");
+ status = -EPROTO;
+ } else { /* unknown */
+ status = -EPROTO;
+ }
+
+ fotg210_vdbg(fotg210,
+ "dev%d ep%d%s qtd token %08x --> status %d\n",
+ usb_pipedevice(urb->pipe),
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ token, status);
+ }
+
+ return status;
+}
+
+static void
+fotg210_urb_done(struct fotg210_hcd *fotg210, struct urb *urb, int status)
+__releases(fotg210->lock)
+__acquires(fotg210->lock)
+{
+ if (likely(urb->hcpriv != NULL)) {
+ struct fotg210_qh *qh = (struct fotg210_qh *) urb->hcpriv;
+
+ /* S-mask in a QH means it's an interrupt urb */
+ if ((qh->hw->hw_info2 & cpu_to_hc32(fotg210, QH_SMASK)) != 0) {
+
+ /* ... update hc-wide periodic stats (for usbfs) */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs--;
+ }
+ }
+
+ if (unlikely(urb->unlinked)) {
+ COUNT(fotg210->stats.unlink);
+ } else {
+ /* report non-error and short read status as zero */
+ if (status == -EINPROGRESS || status == -EREMOTEIO)
+ status = 0;
+ COUNT(fotg210->stats.complete);
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s status %d len %d/%d\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ status,
+ urb->actual_length, urb->transfer_buffer_length);
+#endif
+
+ /* complete() can reenter this HCD */
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ spin_unlock(&fotg210->lock);
+ usb_hcd_giveback_urb(fotg210_to_hcd(fotg210), urb, status);
+ spin_lock(&fotg210->lock);
+}
+
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh);
+
+/*
+ * Process and free completed qtds for a qh, returning URBs to drivers.
+ * Chases up to qh->hw_current. Returns number of completions called,
+ * indicating how much "real" work we did.
+ */
+static unsigned
+qh_completions(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qtd *last, *end = qh->dummy;
+ struct list_head *entry, *tmp;
+ int last_status;
+ int stopped;
+ unsigned count = 0;
+ u8 state;
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ if (unlikely(list_empty(&qh->qtd_list)))
+ return count;
+
+ /* completions (or tasks on other cpus) must never clobber HALT
+ * till we've gone through and cleaned everything up, even when
+ * they add urbs to this qh's queue or mark them for unlinking.
+ *
+ * NOTE: unlinking expects to be done in queue order.
+ *
+ * It's a bug for qh->qh_state to be anything other than
+ * QH_STATE_IDLE, unless our caller is scan_async() or
+ * scan_intr().
+ */
+ state = qh->qh_state;
+ qh->qh_state = QH_STATE_COMPLETING;
+ stopped = (state == QH_STATE_IDLE);
+
+ rescan:
+ last = NULL;
+ last_status = -EINPROGRESS;
+ qh->needs_rescan = 0;
+
+ /* remove de-activated QTDs from front of queue.
+ * after faults (including short reads), cleanup this urb
+ * then let the queue advance.
+ * if queue is stopped, handles unlinks.
+ */
+ list_for_each_safe(entry, tmp, &qh->qtd_list) {
+ struct fotg210_qtd *qtd;
+ struct urb *urb;
+ u32 token = 0;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ urb = qtd->urb;
+
+ /* clean up any state from previous QTD ...*/
+ if (last) {
+ if (likely(last->urb != urb)) {
+ fotg210_urb_done(fotg210, last->urb,
+ last_status);
+ count++;
+ last_status = -EINPROGRESS;
+ }
+ fotg210_qtd_free(fotg210, last);
+ last = NULL;
+ }
+
+ /* ignore urbs submitted during completions we reported */
+ if (qtd == end)
+ break;
+
+ /* hardware copies qtd out of qh overlay */
+ rmb();
+ token = hc32_to_cpu(fotg210, qtd->hw_token);
+
+ /* always clean up qtds the hc de-activated */
+ retry_xacterr:
+ if ((token & QTD_STS_ACTIVE) == 0) {
+
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ fotg210_dbg(fotg210,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc)
+ ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
+ /* on STALL, error, and short reads this urb must
+ * complete and all its qtds must be recycled.
+ */
+ if ((token & QTD_STS_HALT) != 0) {
+
+ /* retry transaction errors until we
+ * reach the software xacterr limit
+ */
+ if ((token & QTD_STS_XACT) &&
+ QTD_CERR(token) == 0 &&
+ ++qh->xacterrs < QH_XACTERR_MAX &&
+ !urb->unlinked) {
+ fotg210_dbg(fotg210,
+ "detected XactErr len %zu/%zu retry %d\n",
+ qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs);
+
+ /* reset the token in the qtd and the
+ * qh overlay (which still contains
+ * the qtd) so that we pick up from
+ * where we left off
+ */
+ token &= ~QTD_STS_HALT;
+ token |= QTD_STS_ACTIVE |
+ (FOTG210_TUNE_CERR << 10);
+ qtd->hw_token = cpu_to_hc32(fotg210,
+ token);
+ wmb();
+ hw->hw_token = cpu_to_hc32(fotg210,
+ token);
+ goto retry_xacterr;
+ }
+ stopped = 1;
+
+ /* magic dummy for some short reads; qh won't advance.
+ * that silicon quirk can kick in with this dummy too.
+ *
+ * other short reads won't stop the queue, including
+ * control transfers (status stage handles that) or
+ * most other single-qtd reads ... the queue stops if
+ * URB_SHORT_NOT_OK was set so the driver submitting
+ * the urbs could clean it up.
+ */
+ } else if (IS_SHORT_READ(token)
+ && !(qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210))) {
+ stopped = 1;
+ }
+
+ /* stop scanning when we reach qtds the hc is using */
+ } else if (likely(!stopped
+ && fotg210->rh_state >= FOTG210_RH_RUNNING)) {
+ break;
+
+ /* scan the whole queue for unlinks whenever it stops */
+ } else {
+ stopped = 1;
+
+ /* cancel everything if we halt, suspend, etc */
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ last_status = -ESHUTDOWN;
+
+ /* this qtd is active; skip it unless a previous qtd
+ * for its urb faulted, or its urb was canceled.
+ */
+ else if (last_status == -EINPROGRESS && !urb->unlinked)
+ continue;
+
+ /* qh unlinked; token in overlay may be most current */
+ if (state == QH_STATE_IDLE
+ && cpu_to_hc32(fotg210, qtd->qtd_dma)
+ == hw->hw_current) {
+ token = hc32_to_cpu(fotg210, hw->hw_token);
+
+ /* An unlink may leave an incomplete
+ * async transaction in the TT buffer.
+ * We have to clear it.
+ */
+ fotg210_clear_tt_buffer(fotg210, qh, urb,
+ token);
+ }
+ }
+
+ /* unless we already know the urb's status, collect qtd status
+ * and update count of bytes transferred. in common short read
+ * cases with only one data qtd (including control transfers),
+ * queue processing won't halt. but with two or more qtds (for
+ * example, with a 32 KB transfer), when the first qtd gets a
+ * short read the second must be removed by hand.
+ */
+ if (last_status == -EINPROGRESS) {
+ last_status = qtd_copy_status(fotg210, urb,
+ qtd->length, token);
+ if (last_status == -EREMOTEIO
+ && (qtd->hw_alt_next
+ & FOTG210_LIST_END(fotg210)))
+ last_status = -EINPROGRESS;
+
+ /* As part of low/full-speed endpoint-halt processing
+ * we must clear the TT buffer (11.17.5).
+ */
+ if (unlikely(last_status != -EINPROGRESS &&
+ last_status != -EREMOTEIO)) {
+ /* The TT's in some hubs malfunction when they
+ * receive this request following a STALL (they
+ * stop sending isochronous packets). Since a
+ * STALL can't leave the TT buffer in a busy
+ * state (if you believe Figures 11-48 - 11-51
+ * in the USB 2.0 spec), we won't clear the TT
+ * buffer in this case. Strictly speaking this
+ * is a violation of the spec.
+ */
+ if (last_status != -EPIPE)
+ fotg210_clear_tt_buffer(fotg210, qh,
+ urb, token);
+ }
+ }
+
+ /* if we're removing something not at the queue head,
+ * patch the hardware queue pointer.
+ */
+ if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
+ last = list_entry(qtd->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ last->hw_next = qtd->hw_next;
+ }
+
+ /* remove qtd; it's recycled after possible urb completion */
+ list_del(&qtd->qtd_list);
+ last = qtd;
+
+ /* reinit the xacterr counter for the next qtd */
+ qh->xacterrs = 0;
+ }
+
+ /* last urb's completion might still need calling */
+ if (likely(last != NULL)) {
+ fotg210_urb_done(fotg210, last->urb, last_status);
+ count++;
+ fotg210_qtd_free(fotg210, last);
+ }
+
+ /* Do we need to rescan for URBs dequeued during a giveback? */
+ if (unlikely(qh->needs_rescan)) {
+ /* If the QH is already unlinked, do the rescan now. */
+ if (state == QH_STATE_IDLE)
+ goto rescan;
+
+ /* Otherwise we have to wait until the QH is fully unlinked.
+ * Our caller will start an unlink if qh->needs_rescan is
+ * set. But if an unlink has already started, nothing needs
+ * to be done.
+ */
+ if (state != QH_STATE_LINKED)
+ qh->needs_rescan = 0;
+ }
+
+ /* restore original state; caller must unlink or relink */
+ qh->qh_state = state;
+
+ /* be sure the hardware's done with the qh before refreshing
+ * it after fault cleanup, or recovering from silicon wrongly
+ * overlaying the dummy qtd (which reduces DMA chatter).
+ */
+ if (stopped != 0 || hw->hw_qtd_next == FOTG210_LIST_END(fotg210)) {
+ switch (state) {
+ case QH_STATE_IDLE:
+ qh_refresh(fotg210, qh);
+ break;
+ case QH_STATE_LINKED:
+ /* We won't refresh a QH that's linked (after the HC
+ * stopped the queue). That avoids a race:
+ * - HC reads first part of QH;
+ * - CPU updates that first part and the token;
+ * - HC reads rest of that QH, including token
+ * Result: HC gets an inconsistent image, and then
+ * DMAs to/from the wrong memory (corrupting it).
+ *
+ * That should be rare for interrupt transfers,
+ * except maybe high bandwidth ...
+ */
+
+ /* Tell the caller to start an unlink */
+ qh->needs_rescan = 1;
+ break;
+ /* otherwise, unlink already started */
+ }
+ }
+
+ return count;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */
+#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
+/* ... and packet size, for any kind of endpoint descriptor */
+#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
+
+/*
+ * reverse of qh_urb_transaction: free a list of TDs.
+ * used for cleanup after errors, before HC sees an URB's TDs.
+ */
+static void qtd_list_free(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list
+) {
+ struct list_head *entry, *temp;
+
+ list_for_each_safe(entry, temp, qtd_list) {
+ struct fotg210_qtd *qtd;
+
+ qtd = list_entry(entry, struct fotg210_qtd, qtd_list);
+ list_del(&qtd->qtd_list);
+ fotg210_qtd_free(fotg210, qtd);
+ }
+}
+
+/*
+ * create a list of filled qtds for this URB; won't link into qh.
+ */
+static struct list_head *
+qh_urb_transaction(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *head,
+ gfp_t flags
+) {
+ struct fotg210_qtd *qtd, *qtd_prev;
+ dma_addr_t buf;
+ int len, this_sg_len, maxpacket;
+ int is_input;
+ u32 token;
+ int i;
+ struct scatterlist *sg;
+
+ /*
+ * URBs map to sequences of QTDs: one logical transaction
+ */
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ return NULL;
+ list_add_tail(&qtd->qtd_list, head);
+ qtd->urb = urb;
+
+ token = QTD_STS_ACTIVE;
+ token |= (FOTG210_TUNE_CERR << 10);
+ /* for split transactions, SplitXState initialized to zero */
+
+ len = urb->transfer_buffer_length;
+ is_input = usb_pipein(urb->pipe);
+ if (usb_pipecontrol(urb->pipe)) {
+ /* SETUP pid */
+ qtd_fill(fotg210, qtd, urb->setup_dma,
+ sizeof(struct usb_ctrlrequest),
+ token | (2 /* "setup" */ << 8), 8);
+
+ /* ... and always at least one more pid */
+ token ^= QTD_TOGGLE;
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* for zero length DATA stages, STATUS is always IN */
+ if (len == 0)
+ token |= (1 /* "in" */ << 8);
+ }
+
+ /*
+ * data transfer stage: buffer setup
+ */
+ i = urb->num_mapped_sgs;
+ if (len > 0 && i > 0) {
+ sg = urb->sg;
+ buf = sg_dma_address(sg);
+
+ /* urb->transfer_buffer_length may be smaller than the
+ * size of the scatterlist (or vice versa)
+ */
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ } else {
+ sg = NULL;
+ buf = urb->transfer_dma;
+ this_sg_len = len;
+ }
+
+ if (is_input)
+ token |= (1 /* "in" */ << 8);
+ /* else it's already initted to "out" pid (0 << 8) */
+
+ maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
+
+ /*
+ * buffer gets wrapped in one or more qtds;
+ * last one may be "short" (including zero len)
+ * and may serve as a control status ack
+ */
+ for (;;) {
+ int this_qtd_len;
+
+ this_qtd_len = qtd_fill(fotg210, qtd, buf, this_sg_len, token,
+ maxpacket);
+ this_sg_len -= this_qtd_len;
+ len -= this_qtd_len;
+ buf += this_qtd_len;
+
+ /*
+ * short reads advance to a "magic" dummy instead of the next
+ * qtd ... that forces the queue to stop, for manual cleanup.
+ * (this will usually be overridden later.)
+ */
+ if (is_input)
+ qtd->hw_alt_next = fotg210->async->hw->hw_alt_next;
+
+ /* qh makes control packets use qtd toggle; maybe switch it */
+ if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
+ token ^= QTD_TOGGLE;
+
+ if (likely(this_sg_len <= 0)) {
+ if (--i <= 0 || len <= 0)
+ break;
+ sg = sg_next(sg);
+ buf = sg_dma_address(sg);
+ this_sg_len = min_t(int, sg_dma_len(sg), len);
+ }
+
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+ }
+
+ /*
+ * unless the caller requires manual cleanup after short reads,
+ * have the alt_next mechanism keep the queue running after the
+ * last data qtd (the only one, for control and most other cases).
+ */
+ if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
+ || usb_pipecontrol(urb->pipe)))
+ qtd->hw_alt_next = FOTG210_LIST_END(fotg210);
+
+ /*
+ * control requests may need a terminating data "status" ack;
+ * other OUT ones may need a terminating short packet
+ * (zero length).
+ */
+ if (likely(urb->transfer_buffer_length != 0)) {
+ int one_more = 0;
+
+ if (usb_pipecontrol(urb->pipe)) {
+ one_more = 1;
+ token ^= 0x0100; /* "in" <--> "out" */
+ token |= QTD_TOGGLE; /* force DATA1 */
+ } else if (usb_pipeout(urb->pipe)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && !(urb->transfer_buffer_length % maxpacket)) {
+ one_more = 1;
+ }
+ if (one_more) {
+ qtd_prev = qtd;
+ qtd = fotg210_qtd_alloc(fotg210, flags);
+ if (unlikely(!qtd))
+ goto cleanup;
+ qtd->urb = urb;
+ qtd_prev->hw_next = QTD_NEXT(fotg210, qtd->qtd_dma);
+ list_add_tail(&qtd->qtd_list, head);
+
+ /* never any data in such packets */
+ qtd_fill(fotg210, qtd, 0, 0, token, 0);
+ }
+ }
+
+ /* by default, enable interrupt on urb completion */
+ if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
+ qtd->hw_token |= cpu_to_hc32(fotg210, QTD_IOC);
+ return head;
+
+cleanup:
+ qtd_list_free(fotg210, urb, head);
+ return NULL;
+}
+
+/*-------------------------------------------------------------------------*/
+/*
+ * Would be best to create all qh's from config descriptors,
+ * when each interface/altsetting is established. Unlink
+ * any previous qh and cancel its urbs first; endpoints are
+ * implicitly reset then (data toggle too).
+ * That'd mean updating how usbcore talks to HCDs. (2.7?)
+*/
+
+
+/*
+ * Each QH holds a qtd list; a QH is used for everything except iso.
+ *
+ * For interrupt urbs, the scheduler must set the microframe scheduling
+ * mask(s) each time the QH gets scheduled. For highspeed, that's
+ * just one microframe in the s-mask. For split interrupt transactions
+ * there are additional complications: c-mask, maybe FSTNs.
+ */
+static struct fotg210_qh *
+qh_make(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t flags
+) {
+ struct fotg210_qh *qh = fotg210_qh_alloc(fotg210, flags);
+ u32 info1 = 0, info2 = 0;
+ int is_input, type;
+ int maxp = 0;
+ struct usb_tt *tt = urb->dev->tt;
+ struct fotg210_qh_hw *hw;
+
+ if (!qh)
+ return qh;
+
+ /*
+ * init endpoint/device data for this QH
+ */
+ info1 |= usb_pipeendpoint(urb->pipe) << 8;
+ info1 |= usb_pipedevice(urb->pipe) << 0;
+
+ is_input = usb_pipein(urb->pipe);
+ type = usb_pipetype(urb->pipe);
+ maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
+
+ /* 1024 byte maxpacket is a hardware ceiling. High bandwidth
+ * acts like up to 3KB, but is built from smaller packets.
+ */
+ if (max_packet(maxp) > 1024) {
+ fotg210_dbg(fotg210, "bogus qh maxpacket %d\n",
+ max_packet(maxp));
+ goto done;
+ }
+
+ /* Compute interrupt scheduling parameters just once, and save.
+ * - allowing for high bandwidth, how many nsec/uframe are used?
+ * - split transactions need a second CSPLIT uframe; same question
+ * - splits also need a schedule gap (for full/low speed I/O)
+ * - qh has a polling interval
+ *
+ * For control/bulk requests, the HC or TT handles these.
+ */
+ if (type == PIPE_INTERRUPT) {
+ qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
+ is_input, 0,
+ hb_mult(maxp) * max_packet(maxp)));
+ qh->start = NO_FRAME;
+
+ if (urb->dev->speed == USB_SPEED_HIGH) {
+ qh->c_usecs = 0;
+ qh->gap_uf = 0;
+
+ qh->period = urb->interval >> 3;
+ if (qh->period == 0 && urb->interval != 1) {
+ /* NOTE interval 2 or 4 uframes could work.
+ * But interval 1 scheduling is simpler, and
+ * includes high bandwidth.
+ */
+ urb->interval = 1;
+ } else if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period << 3;
+ }
+ } else {
+ int think_time;
+
+ /* gap is f(FS/LS transfer times) */
+ qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, maxp) / (125 * 1000);
+
+ /* FIXME this just approximates SPLIT/CSPLIT times */
+ if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
+ qh->c_usecs = qh->usecs + HS_USECS(0);
+ qh->usecs = HS_USECS(1);
+ } else { /* SPLIT+DATA, gap, CSPLIT */
+ qh->usecs += HS_USECS(1);
+ qh->c_usecs = HS_USECS(0);
+ }
+
+ think_time = tt ? tt->think_time : 0;
+ qh->tt_usecs = NS_TO_US(think_time +
+ usb_calc_bus_time(urb->dev->speed,
+ is_input, 0, max_packet(maxp)));
+ qh->period = urb->interval;
+ if (qh->period > fotg210->periodic_size) {
+ qh->period = fotg210->periodic_size;
+ urb->interval = qh->period;
+ }
+ }
+ }
+
+ /* support for tt scheduling, and access to toggles */
+ qh->dev = urb->dev;
+
+ /* using TT? */
+ switch (urb->dev->speed) {
+ case USB_SPEED_LOW:
+ info1 |= QH_LOW_SPEED;
+ /* FALL THROUGH */
+
+ case USB_SPEED_FULL:
+ /* EPS 0 means "full" */
+ if (type != PIPE_INTERRUPT)
+ info1 |= (FOTG210_TUNE_RL_TT << 28);
+ if (type == PIPE_CONTROL) {
+ info1 |= QH_CONTROL_EP; /* for TT */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ }
+ info1 |= maxp << 16;
+
+ info2 |= (FOTG210_TUNE_MULT_TT << 30);
+
+ /* Some Freescale processors have an erratum in which the
+ * port number in the queue head was 0..N-1 instead of 1..N.
+ */
+ if (fotg210_has_fsl_portno_bug(fotg210))
+ info2 |= (urb->dev->ttport-1) << 23;
+ else
+ info2 |= urb->dev->ttport << 23;
+
+ /* set the address of the TT; for TDI's integrated
+ * root hub tt, leave it zeroed.
+ */
+ if (tt && tt->hub != fotg210_to_hcd(fotg210)->self.root_hub)
+ info2 |= tt->hub->devnum << 16;
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
+
+ break;
+
+ case USB_SPEED_HIGH: /* no TT involved */
+ info1 |= QH_HIGH_SPEED;
+ if (type == PIPE_CONTROL) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ info1 |= 64 << 16; /* usb2 fixed maxpacket */
+ info1 |= QH_TOGGLE_CTL; /* toggle from qtd */
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else if (type == PIPE_BULK) {
+ info1 |= (FOTG210_TUNE_RL_HS << 28);
+ /* The USB spec says that high speed bulk endpoints
+ * always use 512 byte maxpacket. But some device
+ * vendors decided to ignore that, and MSFT is happy
+ * to help them do so. So now people expect to use
+ * such nonconformant devices with Linux too; sigh.
+ */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= (FOTG210_TUNE_MULT_HS << 30);
+ } else { /* PIPE_INTERRUPT */
+ info1 |= max_packet(maxp) << 16;
+ info2 |= hb_mult(maxp) << 30;
+ }
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus dev %p speed %d\n", urb->dev,
+ urb->dev->speed);
+done:
+ qh_destroy(fotg210, qh);
+ return NULL;
+ }
+
+ /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
+
+ /* init as live, toggle clear, advance to dummy */
+ qh->qh_state = QH_STATE_IDLE;
+ hw = qh->hw;
+ hw->hw_info1 = cpu_to_hc32(fotg210, info1);
+ hw->hw_info2 = cpu_to_hc32(fotg210, info2);
+ qh->is_out = !is_input;
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
+ qh_refresh(fotg210, qh);
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_async(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->async_count++)
+ return;
+
+ /* Stop waiting to turn off the async schedule */
+ fotg210->enabled_hrtimer_events &= ~BIT(FOTG210_HRTIMER_DISABLE_ASYNC);
+
+ /* Don't start the schedule until ASS is 0 */
+ fotg210_poll_ASS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_async(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->async_count)
+ return;
+
+ /* The async schedule and async_unlink list are supposed to be empty */
+ WARN_ON(fotg210->async->qh_next.qh || fotg210->async_unlink);
+
+ /* Don't turn off the schedule until ASS is 1 */
+ fotg210_poll_ASS(fotg210);
+}
+
+/* move qh (and its qtds) onto async queue; maybe enable queue. */
+
+static void qh_link_async(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ __hc32 dma = QH_NEXT(fotg210, qh->qh_dma);
+ struct fotg210_qh *head;
+
+ /* Don't link a QH if there's a Clear-TT-Buffer pending */
+ if (unlikely(qh->clearing_tt))
+ return;
+
+ WARN_ON(qh->qh_state != QH_STATE_IDLE);
+
+ /* clear halt and/or toggle; and maybe recover from silicon quirk */
+ qh_refresh(fotg210, qh);
+
+ /* splice right after start */
+ head = fotg210->async;
+ qh->qh_next = head->qh_next;
+ qh->hw->hw_next = head->hw->hw_next;
+ wmb();
+
+ head->qh_next.qh = qh;
+ head->hw->hw_next = dma;
+
+ qh->xacterrs = 0;
+ qh->qh_state = QH_STATE_LINKED;
+ /* qtd completions reported later by interrupt */
+
+ enable_async(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * For control/bulk/interrupt, return QH with these TDs appended.
+ * Allocates and initializes the QH if necessary.
+ * Returns null if it can't allocate a QH it needs to.
+ * If the QH has TDs (urbs) already, that's great.
+ */
+static struct fotg210_qh *qh_append_tds(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ int epnum,
+ void **ptr
+)
+{
+ struct fotg210_qh *qh = NULL;
+ __hc32 qh_addr_mask = cpu_to_hc32(fotg210, 0x7f);
+
+ qh = (struct fotg210_qh *) *ptr;
+ if (unlikely(qh == NULL)) {
+ /* can't sleep here, we have fotg210->lock... */
+ qh = qh_make(fotg210, urb, GFP_ATOMIC);
+ *ptr = qh;
+ }
+ if (likely(qh != NULL)) {
+ struct fotg210_qtd *qtd;
+
+ if (unlikely(list_empty(qtd_list)))
+ qtd = NULL;
+ else
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd,
+ qtd_list);
+
+ /* control qh may need patching ... */
+ if (unlikely(epnum == 0)) {
+ /* usb_reset_device() briefly reverts to address 0 */
+ if (usb_pipedevice(urb->pipe) == 0)
+ qh->hw->hw_info1 &= ~qh_addr_mask;
+ }
+
+ /* just one way to queue requests: swap with the dummy qtd.
+ * only hc or qh_refresh() ever modify the overlay.
+ */
+ if (likely(qtd != NULL)) {
+ struct fotg210_qtd *dummy;
+ dma_addr_t dma;
+ __hc32 token;
+
+ /* to avoid racing the HC, use the dummy td instead of
+ * the first td of our list (becomes new dummy). both
+ * tds stay deactivated until we're done, when the
+ * HC is allowed to fetch the old dummy (4.10.2).
+ */
+ token = qtd->hw_token;
+ qtd->hw_token = HALT_BIT(fotg210);
+
+ dummy = qh->dummy;
+
+ dma = dummy->qtd_dma;
+ *dummy = *qtd;
+ dummy->qtd_dma = dma;
+
+ list_del(&qtd->qtd_list);
+ list_add(&dummy->qtd_list, qtd_list);
+ list_splice_tail(qtd_list, &qh->qtd_list);
+
+ fotg210_qtd_init(fotg210, qtd, qtd->qtd_dma);
+ qh->dummy = qtd;
+
+ /* hc must see the new dummy at list end */
+ dma = qtd->qtd_dma;
+ qtd = list_entry(qh->qtd_list.prev,
+ struct fotg210_qtd, qtd_list);
+ qtd->hw_next = QTD_NEXT(fotg210, dma);
+
+ /* let the hc process these next qtds */
+ wmb();
+ dummy->hw_token = token;
+
+ urb->hcpriv = qh;
+ }
+ }
+ return qh;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int
+submit_async(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ int epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh = NULL;
+ int rc;
+
+ epnum = urb->ep->desc.bEndpointAddress;
+
+#ifdef FOTG210_URB_TRACE
+ {
+ struct fotg210_qtd *qtd;
+ qtd = list_entry(qtd_list->next, struct fotg210_qtd, qtd_list);
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ __func__, urb->dev->devpath, urb,
+ epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd, urb->ep->hcpriv);
+ }
+#endif
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ rc = -ESHUTDOWN;
+ goto done;
+ }
+ rc = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(rc))
+ goto done;
+
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ if (unlikely(qh == NULL)) {
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* Control/bulk operations through TTs don't need scheduling,
+ * the HC and TT handle it when the TT has a buffer ready.
+ */
+ if (likely(qh->qh_state == QH_STATE_IDLE))
+ qh_link_async(fotg210, qh);
+ done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (unlikely(qh == NULL))
+ qtd_list_free(fotg210, urb, qtd_list);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void single_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ struct fotg210_qh *prev;
+
+ /* Add to the end of the list of QHs waiting for the next IAAD */
+ qh->qh_state = QH_STATE_UNLINK;
+ if (fotg210->async_unlink)
+ fotg210->async_unlink_last->unlink_next = qh;
+ else
+ fotg210->async_unlink = qh;
+ fotg210->async_unlink_last = qh;
+
+ /* Unlink it from the schedule */
+ prev = fotg210->async;
+ while (prev->qh_next.qh != qh)
+ prev = prev->qh_next.qh;
+
+ prev->hw->hw_next = qh->hw->hw_next;
+ prev->qh_next = qh->qh_next;
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = qh->qh_next.qh;
+}
+
+static void start_iaa_cycle(struct fotg210_hcd *fotg210, bool nested)
+{
+ /*
+ * Do nothing if an IAA cycle is already running or
+ * if one will be started shortly.
+ */
+ if (fotg210->async_iaa || fotg210->async_unlinking)
+ return;
+
+ /* Do all the waiting QHs at once */
+ fotg210->async_iaa = fotg210->async_unlink;
+ fotg210->async_unlink = NULL;
+
+ /* If the controller isn't running, we don't have to wait for it */
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING)) {
+ if (!nested) /* Avoid recursion */
+ end_unlink_async(fotg210);
+
+ /* Otherwise start a new IAA cycle */
+ } else if (likely(fotg210->rh_state == FOTG210_RH_RUNNING)) {
+ /* Make sure the unlinks are all visible to the hardware */
+ wmb();
+
+ fotg210_writel(fotg210, fotg210->command | CMD_IAAD,
+ &fotg210->regs->command);
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_IAA_WATCHDOG,
+ true);
+ }
+}
+
+/* the async qh for the qtds being unlinked are now gone from the HC */
+
+static void end_unlink_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ /* Process the idle QHs */
+ restart:
+ fotg210->async_unlinking = true;
+ while (fotg210->async_iaa) {
+ qh = fotg210->async_iaa;
+ fotg210->async_iaa = qh->unlink_next;
+ qh->unlink_next = NULL;
+
+ qh->qh_state = QH_STATE_IDLE;
+ qh->qh_next.qh = NULL;
+
+ qh_completions(fotg210, qh);
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING)
+ qh_link_async(fotg210, qh);
+ disable_async(fotg210);
+ }
+ fotg210->async_unlinking = false;
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink) {
+ start_iaa_cycle(fotg210, true);
+ if (unlikely(fotg210->rh_state < FOTG210_RH_RUNNING))
+ goto restart;
+ }
+}
+
+static void unlink_empty_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh, *next;
+ bool stopped = (fotg210->rh_state < FOTG210_RH_RUNNING);
+ bool check_unlinks_later = false;
+
+ /* Unlink all the async QHs that have been empty for a timer cycle */
+ next = fotg210->async->qh_next.qh;
+ while (next) {
+ qh = next;
+ next = qh->qh_next.qh;
+
+ if (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED) {
+ if (!stopped && qh->unlink_cycle ==
+ fotg210->async_unlink_cycle)
+ check_unlinks_later = true;
+ else
+ single_unlink_async(fotg210, qh);
+ }
+ }
+
+ /* Start a new IAA cycle if any QHs are waiting for it */
+ if (fotg210->async_unlink)
+ start_iaa_cycle(fotg210, false);
+
+ /* QHs that haven't been empty for long enough will be handled later */
+ if (check_unlinks_later) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_ASYNC_UNLINKS,
+ true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+
+/* makes sure the async qh will become idle */
+/* caller must own fotg210->lock */
+
+static void start_unlink_async(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /*
+ * If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ single_unlink_async(fotg210, qh);
+ start_iaa_cycle(fotg210, false);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_async(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+ bool check_unlinks_later = false;
+
+ fotg210->qh_scan_next = fotg210->async->qh_next.qh;
+ while (fotg210->qh_scan_next) {
+ qh = fotg210->qh_scan_next;
+ fotg210->qh_scan_next = qh->qh_next.qh;
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in single_unlink_async().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (qh->needs_rescan) {
+ start_unlink_async(fotg210, qh);
+ } else if (list_empty(&qh->qtd_list)
+ && qh->qh_state == QH_STATE_LINKED) {
+ qh->unlink_cycle = fotg210->async_unlink_cycle;
+ check_unlinks_later = true;
+ } else if (temp != 0)
+ goto rescan;
+ }
+ }
+
+ /*
+ * Unlink empty entries, reducing DMA usage as well
+ * as HCD schedule-scanning costs. Delay for any qh
+ * we just scanned, there's a not-unusual case that it
+ * doesn't stay idle for long.
+ */
+ if (check_unlinks_later && fotg210->rh_state == FOTG210_RH_RUNNING &&
+ !(fotg210->enabled_hrtimer_events &
+ BIT(FOTG210_HRTIMER_ASYNC_UNLINKS))) {
+ fotg210_enable_event(fotg210,
+ FOTG210_HRTIMER_ASYNC_UNLINKS, true);
+ ++fotg210->async_unlink_cycle;
+ }
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * EHCI scheduled transaction support: interrupt, iso, split iso
+ * These are called "periodic" transactions in the EHCI spec.
+ *
+ * Note that for interrupt transfers, the QH/QTD manipulation is shared
+ * with the "asynchronous" transaction support (control/bulk transfers).
+ * The only real difference is in how interrupt transfers are scheduled.
+ *
+ * For ISO, we make an "iso_stream" head to serve the same role as a QH.
+ * It keeps track of every ITD (or SITD) that's linked, and holds enough
+ * pre-calculated schedule data to make appending to the queue be quick.
+ */
+
+static int fotg210_get_frame(struct usb_hcd *hcd);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * periodic_next_shadow - return "next" pointer on shadow list
+ * @periodic: host pointer to qh/itd
+ * @tag: hardware tag for type of this record
+ */
+static union fotg210_shadow *
+periodic_next_shadow(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ case Q_TYPE_QH:
+ return &periodic->qh->qh_next;
+ case Q_TYPE_FSTN:
+ return &periodic->fstn->fstn_next;
+ default:
+ return &periodic->itd->itd_next;
+ }
+}
+
+static __hc32 *
+shadow_next_periodic(struct fotg210_hcd *fotg210,
+ union fotg210_shadow *periodic, __hc32 tag)
+{
+ switch (hc32_to_cpu(fotg210, tag)) {
+ /* our fotg210_shadow.qh is actually software part */
+ case Q_TYPE_QH:
+ return &periodic->qh->hw->hw_next;
+ /* others are hw parts */
+ default:
+ return periodic->hw_next;
+ }
+}
+
+/* caller must hold fotg210->lock */
+static void periodic_unlink(struct fotg210_hcd *fotg210, unsigned frame,
+ void *ptr)
+{
+ union fotg210_shadow *prev_p = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev_p;
+
+ /* find predecessor of "ptr"; hw and shadow lists are in sync */
+ while (here.ptr && here.ptr != ptr) {
+ prev_p = periodic_next_shadow(fotg210, prev_p,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ hw_p = shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+ here = *prev_p;
+ }
+ /* an interrupt entry (at list end) could have been shared */
+ if (!here.ptr)
+ return;
+
+ /* update shadow and hardware lists ... the old "next" pointers
+ * from ptr may still be in use, the caller updates them.
+ */
+ *prev_p = *periodic_next_shadow(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+
+ *hw_p = *shadow_next_periodic(fotg210, &here,
+ Q_NEXT_TYPE(fotg210, *hw_p));
+}
+
+/* how many of the uframe's 125 usecs are allocated? */
+static unsigned short
+periodic_usecs(struct fotg210_hcd *fotg210, unsigned frame, unsigned uframe)
+{
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow *q = &fotg210->pshadow[frame];
+ unsigned usecs = 0;
+ struct fotg210_qh_hw *hw;
+
+ while (q->ptr) {
+ switch (hc32_to_cpu(fotg210, Q_NEXT_TYPE(fotg210, *hw_p))) {
+ case Q_TYPE_QH:
+ hw = q->qh->hw;
+ /* is it in the S-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210, 1 << uframe))
+ usecs += q->qh->usecs;
+ /* ... or C-mask? */
+ if (hw->hw_info2 & cpu_to_hc32(fotg210,
+ 1 << (8 + uframe)))
+ usecs += q->qh->c_usecs;
+ hw_p = &hw->hw_next;
+ q = &q->qh->qh_next;
+ break;
+ /* case Q_TYPE_FSTN: */
+ default:
+ /* for "save place" FSTNs, count the relevant INTR
+ * bandwidth from the previous frame
+ */
+ if (q->fstn->hw_prev != FOTG210_LIST_END(fotg210))
+ fotg210_dbg(fotg210, "ignoring FSTN cost ...\n");
+
+ hw_p = &q->fstn->hw_next;
+ q = &q->fstn->fstn_next;
+ break;
+ case Q_TYPE_ITD:
+ if (q->itd->hw_transaction[uframe])
+ usecs += q->itd->stream->usecs;
+ hw_p = &q->itd->hw_next;
+ q = &q->itd->itd_next;
+ break;
+ }
+ }
+#ifdef DEBUG
+ if (usecs > fotg210->uframe_periodic_max)
+ fotg210_err(fotg210, "uframe %d sched overrun: %d usecs\n",
+ frame * 8 + uframe, usecs);
+#endif
+ return usecs;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int same_tt(struct usb_device *dev1, struct usb_device *dev2)
+{
+ if (!dev1->tt || !dev2->tt)
+ return 0;
+ if (dev1->tt != dev2->tt)
+ return 0;
+ if (dev1->tt->multi)
+ return dev1->ttport == dev2->ttport;
+ else
+ return 1;
+}
+
+/* return true iff the device's transaction translator is available
+ * for a periodic transfer starting at the specified frame, using
+ * all the uframes in the mask.
+ */
+static int tt_no_collision(
+ struct fotg210_hcd *fotg210,
+ unsigned period,
+ struct usb_device *dev,
+ unsigned frame,
+ u32 uf_mask
+)
+{
+ if (period == 0) /* error */
+ return 0;
+
+ /* note bandwidth wastage: split never follows csplit
+ * (different dev or endpoint) until the next uframe.
+ * calling convention doesn't make that distinction.
+ */
+ for (; frame < fotg210->periodic_size; frame += period) {
+ union fotg210_shadow here;
+ __hc32 type;
+ struct fotg210_qh_hw *hw;
+
+ here = fotg210->pshadow[frame];
+ type = Q_NEXT_TYPE(fotg210, fotg210->periodic[frame]);
+ while (here.ptr) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ type = Q_NEXT_TYPE(fotg210, here.itd->hw_next);
+ here = here.itd->itd_next;
+ continue;
+ case Q_TYPE_QH:
+ hw = here.qh->hw;
+ if (same_tt(dev, here.qh->dev)) {
+ u32 mask;
+
+ mask = hc32_to_cpu(fotg210,
+ hw->hw_info2);
+ /* "knows" no gap is needed */
+ mask |= mask >> 8;
+ if (mask & uf_mask)
+ break;
+ }
+ type = Q_NEXT_TYPE(fotg210, hw->hw_next);
+ here = here.qh->qh_next;
+ continue;
+ /* case Q_TYPE_FSTN: */
+ default:
+ fotg210_dbg(fotg210,
+ "periodic frame %d bogus type %d\n",
+ frame, type);
+ }
+
+ /* collision or error */
+ return 0;
+ }
+ }
+
+ /* no collision */
+ return 1;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void enable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (fotg210->periodic_count++)
+ return;
+
+ /* Stop waiting to turn off the periodic schedule */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_DISABLE_PERIODIC);
+
+ /* Don't start the schedule until PSS is 0 */
+ fotg210_poll_PSS(fotg210);
+ turn_on_io_watchdog(fotg210);
+}
+
+static void disable_periodic(struct fotg210_hcd *fotg210)
+{
+ if (--fotg210->periodic_count)
+ return;
+
+ /* Don't turn off the schedule until PSS is 1 */
+ fotg210_poll_PSS(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* periodic schedule slots have iso tds (normal or split) first, then a
+ * sparse tree for active interrupt transfers.
+ *
+ * this just links in a qh; caller guarantees uframe masks are set right.
+ * no FSTN support (yet; fotg210 0.96+)
+ */
+static void qh_link_periodic(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period = qh->period;
+
+ dev_dbg(&qh->dev->dev,
+ "link qh%d-%04x/%p start %d [%d/%d us]\n",
+ period, hc32_to_cpup(fotg210, &qh->hw->hw_info2)
+ & (QH_CMASK | QH_SMASK),
+ qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* high bandwidth, or otherwise every microframe */
+ if (period == 0)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period) {
+ union fotg210_shadow *prev = &fotg210->pshadow[i];
+ __hc32 *hw_p = &fotg210->periodic[i];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip the iso nodes at list head */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ /* sorting each branch by period (slow-->fast)
+ * enables sharing interior tree nodes
+ */
+ while (here.ptr && qh != here.qh) {
+ if (qh->period > here.qh->period)
+ break;
+ prev = &here.qh->qh_next;
+ hw_p = &here.qh->hw->hw_next;
+ here = *prev;
+ }
+ /* link in this qh, unless some earlier pass did that */
+ if (qh != here.qh) {
+ qh->qh_next = here;
+ if (here.qh)
+ qh->hw->hw_next = *hw_p;
+ wmb();
+ prev->qh = qh;
+ *hw_p = QH_NEXT(fotg210, qh->qh_dma);
+ }
+ }
+ qh->qh_state = QH_STATE_LINKED;
+ qh->xacterrs = 0;
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated += qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ list_add(&qh->intr_node, &fotg210->intr_qh_list);
+
+ /* maybe enable periodic schedule processing */
+ ++fotg210->intr_count;
+ enable_periodic(fotg210);
+}
+
+static void qh_unlink_periodic(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ unsigned i;
+ unsigned period;
+
+ /*
+ * If qh is for a low/full-speed device, simply unlinking it
+ * could interfere with an ongoing split transaction. To unlink
+ * it safely would require setting the QH_INACTIVATE bit and
+ * waiting at least one frame, as described in EHCI 4.12.2.5.
+ *
+ * We won't bother with any of this. Instead, we assume that the
+ * only reason for unlinking an interrupt QH while the current URB
+ * is still active is to dequeue all the URBs (flush the whole
+ * endpoint queue).
+ *
+ * If rebalancing the periodic schedule is ever implemented, this
+ * approach will no longer be valid.
+ */
+
+ /* high bandwidth, or otherwise part of every microframe */
+ period = qh->period;
+ if (!period)
+ period = 1;
+
+ for (i = qh->start; i < fotg210->periodic_size; i += period)
+ periodic_unlink(fotg210, i, qh);
+
+ /* update per-qh bandwidth for usbfs */
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated -= qh->period
+ ? ((qh->usecs + qh->c_usecs) / qh->period)
+ : (qh->usecs * 8);
+
+ dev_dbg(&qh->dev->dev,
+ "unlink qh%d-%04x/%p start %d [%d/%d us]\n",
+ qh->period,
+ hc32_to_cpup(fotg210, &qh->hw->hw_info2) &
+ (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs);
+
+ /* qh->qh_next still "live" to HC */
+ qh->qh_state = QH_STATE_UNLINK;
+ qh->qh_next.ptr = NULL;
+
+ if (fotg210->qh_scan_next == qh)
+ fotg210->qh_scan_next = list_entry(qh->intr_node.next,
+ struct fotg210_qh, intr_node);
+ list_del(&qh->intr_node);
+}
+
+static void start_unlink_intr(struct fotg210_hcd *fotg210,
+ struct fotg210_qh *qh)
+{
+ /* If the QH isn't linked then there's nothing we can do
+ * unless we were called during a giveback, in which case
+ * qh_completions() has to deal with it.
+ */
+ if (qh->qh_state != QH_STATE_LINKED) {
+ if (qh->qh_state == QH_STATE_COMPLETING)
+ qh->needs_rescan = 1;
+ return;
+ }
+
+ qh_unlink_periodic(fotg210, qh);
+
+ /* Make sure the unlinks are visible before starting the timer */
+ wmb();
+
+ /*
+ * The EHCI spec doesn't say how long it takes the controller to
+ * stop accessing an unlinked interrupt QH. The timer delay is
+ * 9 uframes; presumably that will be long enough.
+ */
+ qh->unlink_cycle = fotg210->intr_unlink_cycle;
+
+ /* New entries go at the end of the intr_unlink list */
+ if (fotg210->intr_unlink)
+ fotg210->intr_unlink_last->unlink_next = qh;
+ else
+ fotg210->intr_unlink = qh;
+ fotg210->intr_unlink_last = qh;
+
+ if (fotg210->intr_unlinking)
+ ; /* Avoid recursive calls */
+ else if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ fotg210_handle_intr_unlinks(fotg210);
+ else if (fotg210->intr_unlink == qh) {
+ fotg210_enable_event(fotg210, FOTG210_HRTIMER_UNLINK_INTR,
+ true);
+ ++fotg210->intr_unlink_cycle;
+ }
+}
+
+static void end_unlink_intr(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ struct fotg210_qh_hw *hw = qh->hw;
+ int rc;
+
+ qh->qh_state = QH_STATE_IDLE;
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+
+ qh_completions(fotg210, qh);
+
+ /* reschedule QH iff another request is queued */
+ if (!list_empty(&qh->qtd_list) &&
+ fotg210->rh_state == FOTG210_RH_RUNNING) {
+ rc = qh_schedule(fotg210, qh);
+
+ /* An error here likely indicates handshake failure
+ * or no space left in the schedule. Neither fault
+ * should happen often ...
+ *
+ * FIXME kill the now-dysfunctional queued urbs
+ */
+ if (rc != 0)
+ fotg210_err(fotg210, "can't reschedule qh %p, err %d\n",
+ qh, rc);
+ }
+
+ /* maybe turn off periodic schedule */
+ --fotg210->intr_count;
+ disable_periodic(fotg210);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int check_period(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ unsigned period,
+ unsigned usecs
+) {
+ int claimed;
+
+ /* complete split running into next frame?
+ * given FSTN support, we could sometimes check...
+ */
+ if (uframe >= 8)
+ return 0;
+
+ /* convert "usecs we need" to "max already claimed" */
+ usecs = fotg210->uframe_periodic_max - usecs;
+
+ /* we "know" 2 and 4 uframe intervals were rejected; so
+ * for period 0, check _every_ microframe in the schedule.
+ */
+ if (unlikely(period == 0)) {
+ do {
+ for (uframe = 0; uframe < 7; uframe++) {
+ claimed = periodic_usecs(fotg210, frame,
+ uframe);
+ if (claimed > usecs)
+ return 0;
+ }
+ } while ((frame += 1) < fotg210->periodic_size);
+
+ /* just check the specified uframe, at that period */
+ } else {
+ do {
+ claimed = periodic_usecs(fotg210, frame, uframe);
+ if (claimed > usecs)
+ return 0;
+ } while ((frame += period) < fotg210->periodic_size);
+ }
+
+ /* success! */
+ return 1;
+}
+
+static int check_intr_schedule(
+ struct fotg210_hcd *fotg210,
+ unsigned frame,
+ unsigned uframe,
+ const struct fotg210_qh *qh,
+ __hc32 *c_maskp
+)
+{
+ int retval = -ENOSPC;
+ u8 mask = 0;
+
+ if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
+ goto done;
+
+ if (!check_period(fotg210, frame, uframe, qh->period, qh->usecs))
+ goto done;
+ if (!qh->c_usecs) {
+ retval = 0;
+ *c_maskp = 0;
+ goto done;
+ }
+
+ /* Make sure this tt's buffer is also available for CSPLITs.
+ * We pessimize a bit; probably the typical full speed case
+ * doesn't need the second CSPLIT.
+ *
+ * NOTE: both SPLIT and CSPLIT could be checked in just
+ * one smart pass...
+ */
+ mask = 0x03 << (uframe + qh->gap_uf);
+ *c_maskp = cpu_to_hc32(fotg210, mask << 8);
+
+ mask |= 1 << uframe;
+ if (tt_no_collision(fotg210, qh->period, qh->dev, frame, mask)) {
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf + 1,
+ qh->period, qh->c_usecs))
+ goto done;
+ if (!check_period(fotg210, frame, uframe + qh->gap_uf,
+ qh->period, qh->c_usecs))
+ goto done;
+ retval = 0;
+ }
+done:
+ return retval;
+}
+
+/* "first fit" scheduling policy used the first time through,
+ * or when the previous schedule slot can't be re-used.
+ */
+static int qh_schedule(struct fotg210_hcd *fotg210, struct fotg210_qh *qh)
+{
+ int status;
+ unsigned uframe;
+ __hc32 c_mask;
+ unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
+ struct fotg210_qh_hw *hw = qh->hw;
+
+ qh_refresh(fotg210, qh);
+ hw->hw_next = FOTG210_LIST_END(fotg210);
+ frame = qh->start;
+
+ /* reuse the previous schedule slots, if we can */
+ if (frame < qh->period) {
+ uframe = ffs(hc32_to_cpup(fotg210, &hw->hw_info2) & QH_SMASK);
+ status = check_intr_schedule(fotg210, frame, --uframe,
+ qh, &c_mask);
+ } else {
+ uframe = 0;
+ c_mask = 0;
+ status = -ENOSPC;
+ }
+
+ /* else scan the schedule to find a group of slots such that all
+ * uframes have enough periodic bandwidth available.
+ */
+ if (status) {
+ /* "normal" case, uframing flexible except with splits */
+ if (qh->period) {
+ int i;
+
+ for (i = qh->period; status && i > 0; --i) {
+ frame = ++fotg210->random_frame % qh->period;
+ for (uframe = 0; uframe < 8; uframe++) {
+ status = check_intr_schedule(fotg210,
+ frame, uframe, qh,
+ &c_mask);
+ if (status == 0)
+ break;
+ }
+ }
+
+ /* qh->period == 0 means every uframe */
+ } else {
+ frame = 0;
+ status = check_intr_schedule(fotg210, 0, 0, qh,
+ &c_mask);
+ }
+ if (status)
+ goto done;
+ qh->start = frame;
+
+ /* reset S-frame and (maybe) C-frame masks */
+ hw->hw_info2 &= cpu_to_hc32(fotg210, ~(QH_CMASK | QH_SMASK));
+ hw->hw_info2 |= qh->period
+ ? cpu_to_hc32(fotg210, 1 << uframe)
+ : cpu_to_hc32(fotg210, QH_SMASK);
+ hw->hw_info2 |= c_mask;
+ } else
+ fotg210_dbg(fotg210, "reused qh %p schedule\n", qh);
+
+ /* stuff into the periodic schedule */
+ qh_link_periodic(fotg210, qh);
+done:
+ return status;
+}
+
+static int intr_submit(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct list_head *qtd_list,
+ gfp_t mem_flags
+) {
+ unsigned epnum;
+ unsigned long flags;
+ struct fotg210_qh *qh;
+ int status;
+ struct list_head empty;
+
+ /* get endpoint and transfer/schedule data */
+ epnum = urb->ep->desc.bEndpointAddress;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+
+ /* get qh and force any scheduling errors */
+ INIT_LIST_HEAD(&empty);
+ qh = qh_append_tds(fotg210, urb, &empty, epnum, &urb->ep->hcpriv);
+ if (qh == NULL) {
+ status = -ENOMEM;
+ goto done;
+ }
+ if (qh->qh_state == QH_STATE_IDLE) {
+ status = qh_schedule(fotg210, qh);
+ if (status)
+ goto done;
+ }
+
+ /* then queue the urb's tds to the qh */
+ qh = qh_append_tds(fotg210, urb, qtd_list, epnum, &urb->ep->hcpriv);
+ BUG_ON(qh == NULL);
+
+ /* ... update usbfs periodic stats */
+ fotg210_to_hcd(fotg210)->self.bandwidth_int_reqs++;
+
+done:
+ if (unlikely(status))
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ if (status)
+ qtd_list_free(fotg210, urb, qtd_list);
+
+ return status;
+}
+
+static void scan_intr(struct fotg210_hcd *fotg210)
+{
+ struct fotg210_qh *qh;
+
+ list_for_each_entry_safe(qh, fotg210->qh_scan_next,
+ &fotg210->intr_qh_list, intr_node) {
+ rescan:
+ /* clean any finished work for this qh */
+ if (!list_empty(&qh->qtd_list)) {
+ int temp;
+
+ /*
+ * Unlinks could happen here; completion reporting
+ * drops the lock. That's why fotg210->qh_scan_next
+ * always holds the next qh to scan; if the next qh
+ * gets unlinked then fotg210->qh_scan_next is adjusted
+ * in qh_unlink_periodic().
+ */
+ temp = qh_completions(fotg210, qh);
+ if (unlikely(qh->needs_rescan ||
+ (list_empty(&qh->qtd_list) &&
+ qh->qh_state == QH_STATE_LINKED)))
+ start_unlink_intr(fotg210, qh);
+ else if (temp != 0)
+ goto rescan;
+ }
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_stream ops work with both ITD and SITD */
+
+static struct fotg210_iso_stream *
+iso_stream_alloc(gfp_t mem_flags)
+{
+ struct fotg210_iso_stream *stream;
+
+ stream = kzalloc(sizeof(*stream), mem_flags);
+ if (likely(stream != NULL)) {
+ INIT_LIST_HEAD(&stream->td_list);
+ INIT_LIST_HEAD(&stream->free_list);
+ stream->next_uframe = -1;
+ }
+ return stream;
+}
+
+static void
+iso_stream_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_stream *stream,
+ struct usb_device *dev,
+ int pipe,
+ unsigned interval
+)
+{
+ u32 buf1;
+ unsigned epnum, maxp;
+ int is_input;
+ long bandwidth;
+ unsigned multi;
+
+ /*
+ * this might be a "high bandwidth" highspeed endpoint,
+ * as encoded in the ep descriptor's wMaxPacket field
+ */
+ epnum = usb_pipeendpoint(pipe);
+ is_input = usb_pipein(pipe) ? USB_DIR_IN : 0;
+ maxp = usb_maxpacket(dev, pipe, !is_input);
+ if (is_input)
+ buf1 = (1 << 11);
+ else
+ buf1 = 0;
+
+ maxp = max_packet(maxp);
+ multi = hb_mult(maxp);
+ buf1 |= maxp;
+ maxp *= multi;
+
+ stream->buf0 = cpu_to_hc32(fotg210, (epnum << 8) | dev->devnum);
+ stream->buf1 = cpu_to_hc32(fotg210, buf1);
+ stream->buf2 = cpu_to_hc32(fotg210, multi);
+
+ /* usbfs wants to report the average usecs per frame tied up
+ * when transfers on this endpoint are scheduled ...
+ */
+ if (dev->speed == USB_SPEED_FULL) {
+ interval <<= 3;
+ stream->usecs = NS_TO_US(usb_calc_bus_time(dev->speed,
+ is_input, 1, maxp));
+ stream->usecs /= 8;
+ } else {
+ stream->highspeed = 1;
+ stream->usecs = HS_USECS_ISO(maxp);
+ }
+ bandwidth = stream->usecs * 8;
+ bandwidth /= interval;
+
+ stream->bandwidth = bandwidth;
+ stream->udev = dev;
+ stream->bEndpointAddress = is_input | epnum;
+ stream->interval = interval;
+ stream->maxp = maxp;
+}
+
+static struct fotg210_iso_stream *
+iso_stream_find(struct fotg210_hcd *fotg210, struct urb *urb)
+{
+ unsigned epnum;
+ struct fotg210_iso_stream *stream;
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+
+ epnum = usb_pipeendpoint(urb->pipe);
+ if (usb_pipein(urb->pipe))
+ ep = urb->dev->ep_in[epnum];
+ else
+ ep = urb->dev->ep_out[epnum];
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ stream = ep->hcpriv;
+
+ if (unlikely(stream == NULL)) {
+ stream = iso_stream_alloc(GFP_ATOMIC);
+ if (likely(stream != NULL)) {
+ ep->hcpriv = stream;
+ stream->ep = ep;
+ iso_stream_init(fotg210, stream, urb->dev, urb->pipe,
+ urb->interval);
+ }
+
+ /* if dev->ep[epnum] is a QH, hw is set */
+ } else if (unlikely(stream->hw != NULL)) {
+ fotg210_dbg(fotg210, "dev %s ep%d%s, not iso??\n",
+ urb->dev->devpath, epnum,
+ usb_pipein(urb->pipe) ? "in" : "out");
+ stream = NULL;
+ }
+
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return stream;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* fotg210_iso_sched ops can be ITD-only or SITD-only */
+
+static struct fotg210_iso_sched *
+iso_sched_alloc(unsigned packets, gfp_t mem_flags)
+{
+ struct fotg210_iso_sched *iso_sched;
+ int size = sizeof(*iso_sched);
+
+ size += packets * sizeof(struct fotg210_iso_packet);
+ iso_sched = kzalloc(size, mem_flags);
+ if (likely(iso_sched != NULL))
+ INIT_LIST_HEAD(&iso_sched->td_list);
+
+ return iso_sched;
+}
+
+static inline void
+itd_sched_init(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_iso_sched *iso_sched,
+ struct fotg210_iso_stream *stream,
+ struct urb *urb
+)
+{
+ unsigned i;
+ dma_addr_t dma = urb->transfer_dma;
+
+ /* how many uframes are needed for these transfers */
+ iso_sched->span = urb->number_of_packets * stream->interval;
+
+ /* figure out per-uframe itd fields that we'll need later
+ * when we fit new itds into the schedule.
+ */
+ for (i = 0; i < urb->number_of_packets; i++) {
+ struct fotg210_iso_packet *uframe = &iso_sched->packet[i];
+ unsigned length;
+ dma_addr_t buf;
+ u32 trans;
+
+ length = urb->iso_frame_desc[i].length;
+ buf = dma + urb->iso_frame_desc[i].offset;
+
+ trans = FOTG210_ISOC_ACTIVE;
+ trans |= buf & 0x0fff;
+ if (unlikely(((i + 1) == urb->number_of_packets))
+ && !(urb->transfer_flags & URB_NO_INTERRUPT))
+ trans |= FOTG210_ITD_IOC;
+ trans |= length << 16;
+ uframe->transaction = cpu_to_hc32(fotg210, trans);
+
+ /* might need to cross a buffer page within a uframe */
+ uframe->bufp = (buf & ~(u64)0x0fff);
+ buf += length;
+ if (unlikely((uframe->bufp != (buf & ~(u64)0x0fff))))
+ uframe->cross = 1;
+ }
+}
+
+static void
+iso_sched_free(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_iso_sched *iso_sched
+)
+{
+ if (!iso_sched)
+ return;
+ /* caller must hold fotg210->lock!*/
+ list_splice(&iso_sched->td_list, &stream->free_list);
+ kfree(iso_sched);
+}
+
+static int
+itd_urb_transaction(
+ struct fotg210_iso_stream *stream,
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ gfp_t mem_flags
+)
+{
+ struct fotg210_itd *itd;
+ dma_addr_t itd_dma;
+ int i;
+ unsigned num_itds;
+ struct fotg210_iso_sched *sched;
+ unsigned long flags;
+
+ sched = iso_sched_alloc(urb->number_of_packets, mem_flags);
+ if (unlikely(sched == NULL))
+ return -ENOMEM;
+
+ itd_sched_init(fotg210, sched, stream, urb);
+
+ if (urb->interval < 8)
+ num_itds = 1 + (sched->span + 7) / 8;
+ else
+ num_itds = urb->number_of_packets;
+
+ /* allocate/init ITDs */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ for (i = 0; i < num_itds; i++) {
+
+ /*
+ * Use iTDs from the free list, but not iTDs that may
+ * still be in use by the hardware.
+ */
+ if (likely(!list_empty(&stream->free_list))) {
+ itd = list_first_entry(&stream->free_list,
+ struct fotg210_itd, itd_list);
+ if (itd->frame == fotg210->now_frame)
+ goto alloc_itd;
+ list_del(&itd->itd_list);
+ itd_dma = itd->itd_dma;
+ } else {
+ alloc_itd:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ itd = dma_pool_alloc(fotg210->itd_pool, mem_flags,
+ &itd_dma);
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (!itd) {
+ iso_sched_free(stream, sched);
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return -ENOMEM;
+ }
+ }
+
+ memset(itd, 0, sizeof(*itd));
+ itd->itd_dma = itd_dma;
+ list_add(&itd->itd_list, &sched->td_list);
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+
+ /* temporarily store schedule info in hcpriv */
+ urb->hcpriv = sched;
+ urb->error_count = 0;
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline int
+itd_slot_ok(
+ struct fotg210_hcd *fotg210,
+ u32 mod,
+ u32 uframe,
+ u8 usecs,
+ u32 period
+)
+{
+ uframe %= period;
+ do {
+ /* can't commit more than uframe_periodic_max usec */
+ if (periodic_usecs(fotg210, uframe >> 3, uframe & 0x7)
+ > (fotg210->uframe_periodic_max - usecs))
+ return 0;
+
+ /* we know urb->interval is 2^N uframes */
+ uframe += period;
+ } while (uframe < mod);
+ return 1;
+}
+
+/*
+ * This scheduler plans almost as far into the future as it has actual
+ * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
+ * "as small as possible" to be cache-friendlier.) That limits the size
+ * transfers you can stream reliably; avoid more than 64 msec per urb.
+ * Also avoid queue depths of less than fotg210's worst irq latency (affected
+ * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter,
+ * and other factors); or more than about 230 msec total (for portability,
+ * given FOTG210_TUNE_FLS and the slop). Or, write a smarter scheduler!
+ */
+
+#define SCHEDULE_SLOP 80 /* microframes */
+
+static int
+iso_stream_schedule(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ struct fotg210_iso_stream *stream
+)
+{
+ u32 now, next, start, period, span;
+ int status;
+ unsigned mod = fotg210->periodic_size << 3;
+ struct fotg210_iso_sched *sched = urb->hcpriv;
+
+ period = urb->interval;
+ span = sched->span;
+
+ if (span > mod - SCHEDULE_SLOP) {
+ fotg210_dbg(fotg210, "iso request %p too long\n", urb);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ now = fotg210_read_frame_index(fotg210) & (mod - 1);
+
+ /* Typical case: reuse current schedule, stream is still active.
+ * Hopefully there are no gaps from the host falling behind
+ * (irq delays etc), but if there are we'll take the next
+ * slot in the schedule, implicitly assuming URB_ISO_ASAP.
+ */
+ if (likely(!list_empty(&stream->td_list))) {
+ u32 excess;
+
+ /* For high speed devices, allow scheduling within the
+ * isochronous scheduling threshold. For full speed devices
+ * and Intel PCI-based controllers, don't (work around for
+ * Intel ICH9 bug).
+ */
+ if (!stream->highspeed && fotg210->fs_i_thresh)
+ next = now + fotg210->i_thresh;
+ else
+ next = now;
+
+ /* Fell behind (by up to twice the slop amount)?
+ * We decide based on the time of the last currently-scheduled
+ * slot, not the time of the next available slot.
+ */
+ excess = (stream->next_uframe - period - next) & (mod - 1);
+ if (excess >= mod - 2 * SCHEDULE_SLOP)
+ start = next + excess - mod + period *
+ DIV_ROUND_UP(mod - excess, period);
+ else
+ start = next + excess + period;
+ if (start - now >= mod) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now - period, period,
+ mod);
+ status = -EFBIG;
+ goto fail;
+ }
+ }
+
+ /* need to schedule; when's the next (u)frame we could start?
+ * this is bigger than fotg210->i_thresh allows; scheduling itself
+ * isn't free, the slop should handle reasonably slow cpus. it
+ * can also help high bandwidth if the dma and irq loads don't
+ * jump until after the queue is primed.
+ */
+ else {
+ int done = 0;
+ start = SCHEDULE_SLOP + (now & ~0x07);
+
+ /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
+
+ /* find a uframe slot with enough bandwidth.
+ * Early uframes are more precious because full-speed
+ * iso IN transfers can't use late uframes,
+ * and therefore they should be allocated last.
+ */
+ next = start;
+ start += period;
+ do {
+ start--;
+ /* check schedule: enough space? */
+ if (itd_slot_ok(fotg210, mod, start,
+ stream->usecs, period))
+ done = 1;
+ } while (start > next && !done);
+
+ /* no room in the schedule */
+ if (!done) {
+ fotg210_dbg(fotg210, "iso resched full %p (now %d max %d)\n",
+ urb, now, now + mod);
+ status = -ENOSPC;
+ goto fail;
+ }
+ }
+
+ /* Tried to schedule too far into the future? */
+ if (unlikely(start - now + span - period
+ >= mod - 2 * SCHEDULE_SLOP)) {
+ fotg210_dbg(fotg210, "request %p would overflow (%d+%d >= %d)\n",
+ urb, start - now, span - period,
+ mod - 2 * SCHEDULE_SLOP);
+ status = -EFBIG;
+ goto fail;
+ }
+
+ stream->next_uframe = start & (mod - 1);
+
+ /* report high speed start in uframes; full speed, in frames */
+ urb->start_frame = stream->next_uframe;
+ if (!stream->highspeed)
+ urb->start_frame >>= 3;
+
+ /* Make sure scan_isoc() sees these */
+ if (fotg210->isoc_count == 0)
+ fotg210->next_frame = now >> 3;
+ return 0;
+
+ fail:
+ iso_sched_free(stream, sched);
+ urb->hcpriv = NULL;
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline void
+itd_init(struct fotg210_hcd *fotg210, struct fotg210_iso_stream *stream,
+ struct fotg210_itd *itd)
+{
+ int i;
+
+ /* it's been recently zeroed */
+ itd->hw_next = FOTG210_LIST_END(fotg210);
+ itd->hw_bufp[0] = stream->buf0;
+ itd->hw_bufp[1] = stream->buf1;
+ itd->hw_bufp[2] = stream->buf2;
+
+ for (i = 0; i < 8; i++)
+ itd->index[i] = -1;
+
+ /* All other fields are filled when scheduling */
+}
+
+static inline void
+itd_patch(
+ struct fotg210_hcd *fotg210,
+ struct fotg210_itd *itd,
+ struct fotg210_iso_sched *iso_sched,
+ unsigned index,
+ u16 uframe
+)
+{
+ struct fotg210_iso_packet *uf = &iso_sched->packet[index];
+ unsigned pg = itd->pg;
+
+ uframe &= 0x07;
+ itd->index[uframe] = index;
+
+ itd->hw_transaction[uframe] = uf->transaction;
+ itd->hw_transaction[uframe] |= cpu_to_hc32(fotg210, pg << 12);
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, uf->bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(uf->bufp >> 32));
+
+ /* iso_frame_desc[].offset must be strictly increasing */
+ if (unlikely(uf->cross)) {
+ u64 bufp = uf->bufp + 4096;
+
+ itd->pg = ++pg;
+ itd->hw_bufp[pg] |= cpu_to_hc32(fotg210, bufp & ~(u32)0);
+ itd->hw_bufp_hi[pg] |= cpu_to_hc32(fotg210, (u32)(bufp >> 32));
+ }
+}
+
+static inline void
+itd_link(struct fotg210_hcd *fotg210, unsigned frame, struct fotg210_itd *itd)
+{
+ union fotg210_shadow *prev = &fotg210->pshadow[frame];
+ __hc32 *hw_p = &fotg210->periodic[frame];
+ union fotg210_shadow here = *prev;
+ __hc32 type = 0;
+
+ /* skip any iso nodes which might belong to previous microframes */
+ while (here.ptr) {
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ if (type == cpu_to_hc32(fotg210, Q_TYPE_QH))
+ break;
+ prev = periodic_next_shadow(fotg210, prev, type);
+ hw_p = shadow_next_periodic(fotg210, &here, type);
+ here = *prev;
+ }
+
+ itd->itd_next = here;
+ itd->hw_next = *hw_p;
+ prev->itd = itd;
+ itd->frame = frame;
+ wmb();
+ *hw_p = cpu_to_hc32(fotg210, itd->itd_dma | Q_TYPE_ITD);
+}
+
+/* fit urb's itds into the selected schedule slot; activate as needed */
+static void itd_link_urb(
+ struct fotg210_hcd *fotg210,
+ struct urb *urb,
+ unsigned mod,
+ struct fotg210_iso_stream *stream
+)
+{
+ int packet;
+ unsigned next_uframe, uframe, frame;
+ struct fotg210_iso_sched *iso_sched = urb->hcpriv;
+ struct fotg210_itd *itd;
+
+ next_uframe = stream->next_uframe & (mod - 1);
+
+ if (unlikely(list_empty(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ += stream->bandwidth;
+ fotg210_vdbg(fotg210,
+ "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
+ urb->dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
+ urb->interval,
+ next_uframe >> 3, next_uframe & 0x7);
+ }
+
+ /* fill iTDs uframe by uframe */
+ for (packet = 0, itd = NULL; packet < urb->number_of_packets;) {
+ if (itd == NULL) {
+ /* ASSERT: we have all necessary itds */
+
+ /* ASSERT: no itds for this endpoint in this uframe */
+
+ itd = list_entry(iso_sched->td_list.next,
+ struct fotg210_itd, itd_list);
+ list_move_tail(&itd->itd_list, &stream->td_list);
+ itd->stream = stream;
+ itd->urb = urb;
+ itd_init(fotg210, stream, itd);
+ }
+
+ uframe = next_uframe & 0x07;
+ frame = next_uframe >> 3;
+
+ itd_patch(fotg210, itd, iso_sched, packet, uframe);
+
+ next_uframe += stream->interval;
+ next_uframe &= mod - 1;
+ packet++;
+
+ /* link completed itds into the schedule */
+ if (((next_uframe >> 3) != frame)
+ || packet == urb->number_of_packets) {
+ itd_link(fotg210, frame & (fotg210->periodic_size - 1),
+ itd);
+ itd = NULL;
+ }
+ }
+ stream->next_uframe = next_uframe;
+
+ /* don't need that schedule data any more */
+ iso_sched_free(stream, iso_sched);
+ urb->hcpriv = NULL;
+
+ ++fotg210->isoc_count;
+ enable_periodic(fotg210);
+}
+
+#define ISO_ERRS (FOTG210_ISOC_BUF_ERR | FOTG210_ISOC_BABBLE |\
+ FOTG210_ISOC_XACTERR)
+
+/* Process and recycle a completed ITD. Return true iff its urb completed,
+ * and hence its completion callback probably added things to the hardware
+ * schedule.
+ *
+ * Note that we carefully avoid recycling this descriptor until after any
+ * completion callback runs, so that it won't be reused quickly. That is,
+ * assuming (a) no more than two urbs per frame on this endpoint, and also
+ * (b) only this endpoint's completions submit URBs. It seems some silicon
+ * corrupts things if you reuse completed descriptors very quickly...
+ */
+static bool itd_complete(struct fotg210_hcd *fotg210, struct fotg210_itd *itd)
+{
+ struct urb *urb = itd->urb;
+ struct usb_iso_packet_descriptor *desc;
+ u32 t;
+ unsigned uframe;
+ int urb_index = -1;
+ struct fotg210_iso_stream *stream = itd->stream;
+ struct usb_device *dev;
+ bool retval = false;
+
+ /* for each uframe with a packet */
+ for (uframe = 0; uframe < 8; uframe++) {
+ if (likely(itd->index[uframe] == -1))
+ continue;
+ urb_index = itd->index[uframe];
+ desc = &urb->iso_frame_desc[urb_index];
+
+ t = hc32_to_cpup(fotg210, &itd->hw_transaction[uframe]);
+ itd->hw_transaction[uframe] = 0;
+
+ /* report transfer status */
+ if (unlikely(t & ISO_ERRS)) {
+ urb->error_count++;
+ if (t & FOTG210_ISOC_BUF_ERR)
+ desc->status = usb_pipein(urb->pipe)
+ ? -ENOSR /* hc couldn't read */
+ : -ECOMM; /* hc couldn't write */
+ else if (t & FOTG210_ISOC_BABBLE)
+ desc->status = -EOVERFLOW;
+ else /* (t & FOTG210_ISOC_XACTERR) */
+ desc->status = -EPROTO;
+
+ /* HC need not update length with this error */
+ if (!(t & FOTG210_ISOC_BABBLE)) {
+ desc->actual_length =
+ fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ }
+ } else if (likely((t & FOTG210_ISOC_ACTIVE) == 0)) {
+ desc->status = 0;
+ desc->actual_length = fotg210_itdlen(urb, desc, t);
+ urb->actual_length += desc->actual_length;
+ } else {
+ /* URB was too late */
+ desc->status = -EXDEV;
+ }
+ }
+
+ /* handle completion now? */
+ if (likely((urb_index + 1) != urb->number_of_packets))
+ goto done;
+
+ /* ASSERT: it's really the last itd for this urb
+ list_for_each_entry (itd, &stream->td_list, itd_list)
+ BUG_ON (itd->urb == urb);
+ */
+
+ /* give urb back to the driver; completion often (re)submits */
+ dev = urb->dev;
+ fotg210_urb_done(fotg210, urb, 0);
+ retval = true;
+ urb = NULL;
+
+ --fotg210->isoc_count;
+ disable_periodic(fotg210);
+
+ if (unlikely(list_is_singular(&stream->td_list))) {
+ fotg210_to_hcd(fotg210)->self.bandwidth_allocated
+ -= stream->bandwidth;
+ fotg210_vdbg(fotg210,
+ "deschedule devp %s ep%d%s-iso\n",
+ dev->devpath, stream->bEndpointAddress & 0x0f,
+ (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
+ }
+
+done:
+ itd->urb = NULL;
+
+ /* Add to the end of the free list for later reuse */
+ list_move_tail(&itd->itd_list, &stream->free_list);
+
+ /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */
+ if (list_empty(&stream->td_list)) {
+ list_splice_tail_init(&stream->free_list,
+ &fotg210->cached_itd_list);
+ start_free_itds(fotg210);
+ }
+
+ return retval;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int itd_submit(struct fotg210_hcd *fotg210, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int status = -EINVAL;
+ unsigned long flags;
+ struct fotg210_iso_stream *stream;
+
+ /* Get iso_stream head */
+ stream = iso_stream_find(fotg210, urb);
+ if (unlikely(stream == NULL)) {
+ fotg210_dbg(fotg210, "can't get iso stream\n");
+ return -ENOMEM;
+ }
+ if (unlikely(urb->interval != stream->interval &&
+ fotg210_port_speed(fotg210, 0) ==
+ USB_PORT_STAT_HIGH_SPEED)) {
+ fotg210_dbg(fotg210, "can't change iso interval %d --> %d\n",
+ stream->interval, urb->interval);
+ goto done;
+ }
+
+#ifdef FOTG210_URB_TRACE
+ fotg210_dbg(fotg210,
+ "%s %s urb %p ep%d%s len %d, %d pkts %d uframes[%p]\n",
+ __func__, urb->dev->devpath, urb,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipein(urb->pipe) ? "in" : "out",
+ urb->transfer_buffer_length,
+ urb->number_of_packets, urb->interval,
+ stream);
+#endif
+
+ /* allocate ITDs w/o locking anything */
+ status = itd_urb_transaction(stream, fotg210, urb, mem_flags);
+ if (unlikely(status < 0)) {
+ fotg210_dbg(fotg210, "can't init itds\n");
+ goto done;
+ }
+
+ /* schedule ... need to lock */
+ spin_lock_irqsave(&fotg210->lock, flags);
+ if (unlikely(!HCD_HW_ACCESSIBLE(fotg210_to_hcd(fotg210)))) {
+ status = -ESHUTDOWN;
+ goto done_not_linked;
+ }
+ status = usb_hcd_link_urb_to_ep(fotg210_to_hcd(fotg210), urb);
+ if (unlikely(status))
+ goto done_not_linked;
+ status = iso_stream_schedule(fotg210, urb, stream);
+ if (likely(status == 0))
+ itd_link_urb(fotg210, urb, fotg210->periodic_size << 3, stream);
+ else
+ usb_hcd_unlink_urb_from_ep(fotg210_to_hcd(fotg210), urb);
+ done_not_linked:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ done:
+ return status;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void scan_isoc(struct fotg210_hcd *fotg210)
+{
+ unsigned uf, now_frame, frame;
+ unsigned fmask = fotg210->periodic_size - 1;
+ bool modified, live;
+
+ /*
+ * When running, scan from last scan point up to "now"
+ * else clean up by scanning everything that's left.
+ * Touches as few pages as possible: cache-friendly.
+ */
+ if (fotg210->rh_state >= FOTG210_RH_RUNNING) {
+ uf = fotg210_read_frame_index(fotg210);
+ now_frame = (uf >> 3) & fmask;
+ live = true;
+ } else {
+ now_frame = (fotg210->next_frame - 1) & fmask;
+ live = false;
+ }
+ fotg210->now_frame = now_frame;
+
+ frame = fotg210->next_frame;
+ for (;;) {
+ union fotg210_shadow q, *q_p;
+ __hc32 type, *hw_p;
+
+restart:
+ /* scan each element in frame's queue for completions */
+ q_p = &fotg210->pshadow[frame];
+ hw_p = &fotg210->periodic[frame];
+ q.ptr = q_p->ptr;
+ type = Q_NEXT_TYPE(fotg210, *hw_p);
+ modified = false;
+
+ while (q.ptr != NULL) {
+ switch (hc32_to_cpu(fotg210, type)) {
+ case Q_TYPE_ITD:
+ /* If this ITD is still active, leave it for
+ * later processing ... check the next entry.
+ * No need to check for activity unless the
+ * frame is current.
+ */
+ if (frame == now_frame && live) {
+ rmb();
+ for (uf = 0; uf < 8; uf++) {
+ if (q.itd->hw_transaction[uf] &
+ ITD_ACTIVE(fotg210))
+ break;
+ }
+ if (uf < 8) {
+ q_p = &q.itd->itd_next;
+ hw_p = &q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210,
+ q.itd->hw_next);
+ q = *q_p;
+ break;
+ }
+ }
+
+ /* Take finished ITDs out of the schedule
+ * and process them: recycle, maybe report
+ * URB completion. HC won't cache the
+ * pointer for much longer, if at all.
+ */
+ *q_p = q.itd->itd_next;
+ *hw_p = q.itd->hw_next;
+ type = Q_NEXT_TYPE(fotg210, q.itd->hw_next);
+ wmb();
+ modified = itd_complete(fotg210, q.itd);
+ q = *q_p;
+ break;
+ default:
+ fotg210_dbg(fotg210, "corrupt type %d frame %d shadow %p\n",
+ type, frame, q.ptr);
+ /* FALL THROUGH */
+ case Q_TYPE_QH:
+ case Q_TYPE_FSTN:
+ /* End of the iTDs and siTDs */
+ q.ptr = NULL;
+ break;
+ }
+
+ /* assume completion callbacks modify the queue */
+ if (unlikely(modified && fotg210->isoc_count > 0))
+ goto restart;
+ }
+
+ /* Stop when we have reached the current frame */
+ if (frame == now_frame)
+ break;
+ frame = (frame + 1) & fmask;
+ }
+ fotg210->next_frame = now_frame;
+}
+/*-------------------------------------------------------------------------*/
+/*
+ * Display / Set uframe_periodic_max
+ */
+static ssize_t show_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fotg210_hcd *fotg210;
+ int n;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ n = scnprintf(buf, PAGE_SIZE, "%d\n", fotg210->uframe_periodic_max);
+ return n;
+}
+
+
+static ssize_t store_uframe_periodic_max(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fotg210_hcd *fotg210;
+ unsigned uframe_periodic_max;
+ unsigned frame, uframe;
+ unsigned short allocated_max;
+ unsigned long flags;
+ ssize_t ret;
+
+ fotg210 = hcd_to_fotg210(bus_to_hcd(dev_get_drvdata(dev)));
+ if (kstrtouint(buf, 0, &uframe_periodic_max) < 0)
+ return -EINVAL;
+
+ if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) {
+ fotg210_info(fotg210, "rejecting invalid request for uframe_periodic_max=%u\n",
+ uframe_periodic_max);
+ return -EINVAL;
+ }
+
+ ret = -EINVAL;
+
+ /*
+ * lock, so that our checking does not race with possible periodic
+ * bandwidth allocation through submitting new urbs.
+ */
+ spin_lock_irqsave(&fotg210->lock, flags);
+
+ /*
+ * for request to decrease max periodic bandwidth, we have to check
+ * every microframe in the schedule to see whether the decrease is
+ * possible.
+ */
+ if (uframe_periodic_max < fotg210->uframe_periodic_max) {
+ allocated_max = 0;
+
+ for (frame = 0; frame < fotg210->periodic_size; ++frame)
+ for (uframe = 0; uframe < 7; ++uframe)
+ allocated_max = max(allocated_max,
+ periodic_usecs(fotg210, frame, uframe));
+
+ if (allocated_max > uframe_periodic_max) {
+ fotg210_info(fotg210,
+ "cannot decrease uframe_periodic_max becase "
+ "periodic bandwidth is already allocated "
+ "(%u > %u)\n",
+ allocated_max, uframe_periodic_max);
+ goto out_unlock;
+ }
+ }
+
+ /* increasing is always ok */
+
+ fotg210_info(fotg210, "setting max periodic bandwidth to %u%% (== %u usec/uframe)\n",
+ 100 * uframe_periodic_max/125, uframe_periodic_max);
+
+ if (uframe_periodic_max != 100)
+ fotg210_warn(fotg210, "max periodic bandwidth set is non-standard\n");
+
+ fotg210->uframe_periodic_max = uframe_periodic_max;
+ ret = count;
+
+out_unlock:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return ret;
+}
+
+static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max,
+ store_uframe_periodic_max);
+
+static inline int create_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+ int i = 0;
+
+ if (i)
+ goto out;
+
+ i = device_create_file(controller, &dev_attr_uframe_periodic_max);
+out:
+ return i;
+}
+
+static inline void remove_sysfs_files(struct fotg210_hcd *fotg210)
+{
+ struct device *controller = fotg210_to_hcd(fotg210)->self.controller;
+
+ device_remove_file(controller, &dev_attr_uframe_periodic_max);
+}
+/*-------------------------------------------------------------------------*/
+
+/* On some systems, leaving remote wakeup enabled prevents system shutdown.
+ * The firmware seems to think that powering off is a wakeup event!
+ * This routine turns off remote wakeup and everything else, on all ports.
+ */
+static void fotg210_turn_off_all_ports(struct fotg210_hcd *fotg210)
+{
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ fotg210_writel(fotg210, PORT_RWC_BITS, status_reg);
+}
+
+/*
+ * Halt HC, turn off all ports, and let the BIOS use the companion controllers.
+ * Must be called with interrupts enabled and the lock not held.
+ */
+static void fotg210_silence_controller(struct fotg210_hcd *fotg210)
+{
+ fotg210_halt(fotg210);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->rh_state = FOTG210_RH_HALTED;
+ fotg210_turn_off_all_ports(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+}
+
+/* fotg210_shutdown kick in for silicon on any bus (not just pci, etc).
+ * This forcibly disables dma and IRQs, helping kexec and other cases
+ * where the next system software may expect clean state.
+ */
+static void fotg210_shutdown(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_silence_controller(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * fotg210_work is called from some interrupts, timers, and so on.
+ * it calls driver completion functions, after dropping fotg210->lock.
+ */
+static void fotg210_work(struct fotg210_hcd *fotg210)
+{
+ /* another CPU may drop fotg210->lock during a schedule scan while
+ * it reports urb completions. this flag guards against bogus
+ * attempts at re-entrant schedule scanning.
+ */
+ if (fotg210->scanning) {
+ fotg210->need_rescan = true;
+ return;
+ }
+ fotg210->scanning = true;
+
+ rescan:
+ fotg210->need_rescan = false;
+ if (fotg210->async_count)
+ scan_async(fotg210);
+ if (fotg210->intr_count > 0)
+ scan_intr(fotg210);
+ if (fotg210->isoc_count > 0)
+ scan_isoc(fotg210);
+ if (fotg210->need_rescan)
+ goto rescan;
+ fotg210->scanning = false;
+
+ /* the IO watchdog guards against hardware or driver bugs that
+ * misplace IRQs, and should let us run completely without IRQs.
+ * such lossage has been observed on both VT6202 and VT8235.
+ */
+ turn_on_io_watchdog(fotg210);
+}
+
+/*
+ * Called when the fotg210_hcd module is removed.
+ */
+static void fotg210_stop(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210_dbg(fotg210, "stop\n");
+
+ /* no more interrupts ... */
+
+ spin_lock_irq(&fotg210->lock);
+ fotg210->enabled_hrtimer_events = 0;
+ spin_unlock_irq(&fotg210->lock);
+
+ fotg210_quiesce(fotg210);
+ fotg210_silence_controller(fotg210);
+ fotg210_reset(fotg210);
+
+ hrtimer_cancel(&fotg210->hrtimer);
+ remove_sysfs_files(fotg210);
+ remove_debug_files(fotg210);
+
+ /* root hub is shut down separately (first, when possible) */
+ spin_lock_irq(&fotg210->lock);
+ end_free_itds(fotg210);
+ spin_unlock_irq(&fotg210->lock);
+ fotg210_mem_cleanup(fotg210);
+
+#ifdef FOTG210_STATS
+ fotg210_dbg(fotg210, "irq normal %ld err %ld iaa %ld (lost %ld)\n",
+ fotg210->stats.normal, fotg210->stats.error, fotg210->stats.iaa,
+ fotg210->stats.lost_iaa);
+ fotg210_dbg(fotg210, "complete %ld unlink %ld\n",
+ fotg210->stats.complete, fotg210->stats.unlink);
+#endif
+
+ dbg_status(fotg210, "fotg210_stop completed",
+ fotg210_readl(fotg210, &fotg210->regs->status));
+}
+
+/* one-time init, only for memory state */
+static int hcd_fotg210_init(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ int retval;
+ u32 hcc_params;
+ struct fotg210_qh_hw *hw;
+
+ spin_lock_init(&fotg210->lock);
+
+ /*
+ * keep io watchdog by default, those good HCDs could turn off it later
+ */
+ fotg210->need_io_watchdog = 1;
+
+ hrtimer_init(&fotg210->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ fotg210->hrtimer.function = fotg210_hrtimer_func;
+ fotg210->next_hrtimer_event = FOTG210_HRTIMER_NO_EVENT;
+
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * by default set standard 80% (== 100 usec/uframe) max periodic
+ * bandwidth as required by USB 2.0
+ */
+ fotg210->uframe_periodic_max = 100;
+
+ /*
+ * hw default: 1K periodic list heads, one per frame.
+ * periodic_size can shrink by USBCMD update if hcc_params allows.
+ */
+ fotg210->periodic_size = DEFAULT_I_TDPS;
+ INIT_LIST_HEAD(&fotg210->intr_qh_list);
+ INIT_LIST_HEAD(&fotg210->cached_itd_list);
+
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ switch (FOTG210_TUNE_FLS) {
+ case 0:
+ fotg210->periodic_size = 1024;
+ break;
+ case 1:
+ fotg210->periodic_size = 512;
+ break;
+ case 2:
+ fotg210->periodic_size = 256;
+ break;
+ default:
+ BUG();
+ }
+ }
+ retval = fotg210_mem_init(fotg210, GFP_KERNEL);
+ if (retval < 0)
+ return retval;
+
+ /* controllers may cache some of the periodic schedule ... */
+ fotg210->i_thresh = 2;
+
+ /*
+ * dedicate a qh for the async ring head, since we couldn't unlink
+ * a 'real' qh without stopping the async schedule [4.8]. use it
+ * as the 'reclamation list head' too.
+ * its dummy is used in hw_alt_next of many tds, to prevent the qh
+ * from automatically advancing to the next td after short reads.
+ */
+ fotg210->async->qh_next.qh = NULL;
+ hw = fotg210->async->hw;
+ hw->hw_next = QH_NEXT(fotg210, fotg210->async->qh_dma);
+ hw->hw_info1 = cpu_to_hc32(fotg210, QH_HEAD);
+ hw->hw_token = cpu_to_hc32(fotg210, QTD_STS_HALT);
+ hw->hw_qtd_next = FOTG210_LIST_END(fotg210);
+ fotg210->async->qh_state = QH_STATE_LINKED;
+ hw->hw_alt_next = QTD_NEXT(fotg210, fotg210->async->dummy->qtd_dma);
+
+ /* clear interrupt enables, set irq latency */
+ if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
+ log2_irq_thresh = 0;
+ temp = 1 << (16 + log2_irq_thresh);
+ if (HCC_CANPARK(hcc_params)) {
+ /* HW default park == 3, on hardware that supports it (like
+ * NVidia and ALI silicon), maximizes throughput on the async
+ * schedule by avoiding QH fetches between transfers.
+ *
+ * With fast usb storage devices and NForce2, "park" seems to
+ * make problems: throughput reduction (!), data errors...
+ */
+ if (park) {
+ park = min_t(unsigned, park, 3);
+ temp |= CMD_PARK;
+ temp |= park << 8;
+ }
+ fotg210_dbg(fotg210, "park %d\n", park);
+ }
+ if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
+ /* periodic schedule size can be smaller than default */
+ temp &= ~(3 << 2);
+ temp |= (FOTG210_TUNE_FLS << 2);
+ }
+ fotg210->command = temp;
+
+ /* Accept arbitrarily long scatter-gather lists */
+ if (!(hcd->driver->flags & HCD_LOCAL_MEM))
+ hcd->self.sg_tablesize = ~0;
+ return 0;
+}
+
+/* start HC running; it's halted, hcd_fotg210_init() has been run (once) */
+static int fotg210_run(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 temp;
+ u32 hcc_params;
+
+ hcd->uses_new_polling = 1;
+
+ /* EHCI spec section 4.1 */
+
+ fotg210_writel(fotg210, fotg210->periodic_dma,
+ &fotg210->regs->frame_list);
+ fotg210_writel(fotg210, (u32)fotg210->async->qh_dma,
+ &fotg210->regs->async_next);
+
+ /*
+ * hcc_params controls whether fotg210->regs->segment must (!!!)
+ * be used; it constrains QH/ITD/SITD and QTD locations.
+ * pci_pool consistent memory always uses segment zero.
+ * streaming mappings for I/O buffers, like pci_map_single(),
+ * can return segments above 4GB, if the device allows.
+ *
+ * NOTE: the dma mask is visible through dma_supported(), so
+ * drivers can pass this info along ... like NETIF_F_HIGHDMA,
+ * Scsi_Host.highmem_io, and so forth. It's readonly to all
+ * host side drivers though.
+ */
+ hcc_params = fotg210_readl(fotg210, &fotg210->caps->hcc_params);
+
+ /*
+ * Philips, Intel, and maybe others need CMD_RUN before the
+ * root hub will detect new devices (why?); NEC doesn't
+ */
+ fotg210->command &= ~(CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET);
+ fotg210->command |= CMD_RUN;
+ fotg210_writel(fotg210, fotg210->command, &fotg210->regs->command);
+ dbg_cmd(fotg210, "init", fotg210->command);
+
+ /*
+ * Start, enabling full USB 2.0 functionality ... usb 1.1 devices
+ * are explicitly handed to companion controller(s), so no TT is
+ * involved with the root hub. (Except where one is integrated,
+ * and there's no companion controller unless maybe for USB OTG.)
+ *
+ * Turning on the CF flag will transfer ownership of all ports
+ * from the companions to the EHCI controller. If any of the
+ * companions are in the middle of a port reset at the time, it
+ * could cause trouble. Write-locking ehci_cf_port_reset_rwsem
+ * guarantees that no resets are in progress. After we set CF,
+ * a short delay lets the hardware catch up; new resets shouldn't
+ * be started before the port switching actions could complete.
+ */
+ down_write(&ehci_cf_port_reset_rwsem);
+ fotg210->rh_state = FOTG210_RH_RUNNING;
+ /* unblock posted writes */
+ fotg210_readl(fotg210, &fotg210->regs->command);
+ msleep(5);
+ up_write(&ehci_cf_port_reset_rwsem);
+ fotg210->last_periodic_enable = ktime_get_real();
+
+ temp = HC_VERSION(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ fotg210_info(fotg210,
+ "USB %x.%x started, EHCI %x.%02x\n",
+ ((fotg210->sbrn & 0xf0)>>4), (fotg210->sbrn & 0x0f),
+ temp >> 8, temp & 0xff);
+
+ fotg210_writel(fotg210, INTR_MASK,
+ &fotg210->regs->intr_enable); /* Turn On Interrupts */
+
+ /* GRR this is run-once init(), being done every time the HC starts.
+ * So long as they're part of class devices, we can't do it init()
+ * since the class device isn't created that early.
+ */
+ create_debug_files(fotg210);
+ create_sysfs_files(fotg210);
+
+ return 0;
+}
+
+static int fotg210_setup(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ int retval;
+
+ fotg210->regs = (void __iomem *)fotg210->caps +
+ HC_LENGTH(fotg210,
+ fotg210_readl(fotg210, &fotg210->caps->hc_capbase));
+ dbg_hcs_params(fotg210, "reset");
+ dbg_hcc_params(fotg210, "reset");
+
+ /* cache this readonly data; minimize chip reads */
+ fotg210->hcs_params = fotg210_readl(fotg210,
+ &fotg210->caps->hcs_params);
+
+ fotg210->sbrn = HCD_USB2;
+
+ /* data structure init */
+ retval = hcd_fotg210_init(hcd);
+ if (retval)
+ return retval;
+
+ retval = fotg210_halt(fotg210);
+ if (retval)
+ return retval;
+
+ fotg210_reset(fotg210);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static irqreturn_t fotg210_irq(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ u32 status, masked_status, pcd_status = 0, cmd;
+ int bh;
+
+ spin_lock(&fotg210->lock);
+
+ status = fotg210_readl(fotg210, &fotg210->regs->status);
+
+ /* e.g. cardbus physical eject */
+ if (status == ~(u32) 0) {
+ fotg210_dbg(fotg210, "device removed\n");
+ goto dead;
+ }
+
+ /*
+ * We don't use STS_FLR, but some controllers don't like it to
+ * remain on, so mask it out along with the other status bits.
+ */
+ masked_status = status & (INTR_MASK | STS_FLR);
+
+ /* Shared IRQ? */
+ if (!masked_status ||
+ unlikely(fotg210->rh_state == FOTG210_RH_HALTED)) {
+ spin_unlock(&fotg210->lock);
+ return IRQ_NONE;
+ }
+
+ /* clear (just) interrupts */
+ fotg210_writel(fotg210, masked_status, &fotg210->regs->status);
+ cmd = fotg210_readl(fotg210, &fotg210->regs->command);
+ bh = 0;
+
+#ifdef VERBOSE_DEBUG
+ /* unrequested/ignored: Frame List Rollover */
+ dbg_status(fotg210, "irq", status);
+#endif
+
+ /* INT, ERR, and IAA interrupt rates can be throttled */
+
+ /* normal [4.15.1.2] or error [4.15.1.1] completion */
+ if (likely((status & (STS_INT|STS_ERR)) != 0)) {
+ if (likely((status & STS_ERR) == 0))
+ COUNT(fotg210->stats.normal);
+ else
+ COUNT(fotg210->stats.error);
+ bh = 1;
+ }
+
+ /* complete the unlinking of some qh [4.15.2.3] */
+ if (status & STS_IAA) {
+
+ /* Turn off the IAA watchdog */
+ fotg210->enabled_hrtimer_events &=
+ ~BIT(FOTG210_HRTIMER_IAA_WATCHDOG);
+
+ /*
+ * Mild optimization: Allow another IAAD to reset the
+ * hrtimer, if one occurs before the next expiration.
+ * In theory we could always cancel the hrtimer, but
+ * tests show that about half the time it will be reset
+ * for some other event anyway.
+ */
+ if (fotg210->next_hrtimer_event == FOTG210_HRTIMER_IAA_WATCHDOG)
+ ++fotg210->next_hrtimer_event;
+
+ /* guard against (alleged) silicon errata */
+ if (cmd & CMD_IAAD)
+ fotg210_dbg(fotg210, "IAA with IAAD still set?\n");
+ if (fotg210->async_iaa) {
+ COUNT(fotg210->stats.iaa);
+ end_unlink_async(fotg210);
+ } else
+ fotg210_dbg(fotg210, "IAA with nothing unlinked?\n");
+ }
+
+ /* remote wakeup [4.3.1] */
+ if (status & STS_PCD) {
+ int pstatus;
+ u32 __iomem *status_reg = &fotg210->regs->port_status;
+
+ /* kick root hub later */
+ pcd_status = status;
+
+ /* resume root hub? */
+ if (fotg210->rh_state == FOTG210_RH_SUSPENDED)
+ usb_hcd_resume_root_hub(hcd);
+
+ pstatus = fotg210_readl(fotg210, status_reg);
+
+ if (test_bit(0, &fotg210->suspended_ports) &&
+ ((pstatus & PORT_RESUME) ||
+ !(pstatus & PORT_SUSPEND)) &&
+ (pstatus & PORT_PE) &&
+ fotg210->reset_done[0] == 0) {
+
+ /* start 20 msec resume signaling from this port,
+ * and make khubd collect PORT_STAT_C_SUSPEND to
+ * stop that signaling. Use 5 ms extra for safety,
+ * like usb_port_resume() does.
+ */
+ fotg210->reset_done[0] = jiffies + msecs_to_jiffies(25);
+ set_bit(0, &fotg210->resuming_ports);
+ fotg210_dbg(fotg210, "port 1 remote wakeup\n");
+ mod_timer(&hcd->rh_timer, fotg210->reset_done[0]);
+ }
+ }
+
+ /* PCI errors [4.15.2.4] */
+ if (unlikely((status & STS_FATAL) != 0)) {
+ fotg210_err(fotg210, "fatal error\n");
+ dbg_cmd(fotg210, "fatal", cmd);
+ dbg_status(fotg210, "fatal", status);
+dead:
+ usb_hc_died(hcd);
+
+ /* Don't let the controller do anything more */
+ fotg210->shutdown = true;
+ fotg210->rh_state = FOTG210_RH_STOPPING;
+ fotg210->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE);
+ fotg210_writel(fotg210, fotg210->command,
+ &fotg210->regs->command);
+ fotg210_writel(fotg210, 0, &fotg210->regs->intr_enable);
+ fotg210_handle_controller_death(fotg210);
+
+ /* Handle completions when the controller stops */
+ bh = 0;
+ }
+
+ if (bh)
+ fotg210_work(fotg210);
+ spin_unlock(&fotg210->lock);
+ if (pcd_status)
+ usb_hcd_poll_rh_status(hcd);
+ return IRQ_HANDLED;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * non-error returns are a promise to giveback() the urb later
+ * we drop ownership so next owner (or urb unlink) can get it
+ *
+ * urb + dev is in hcd.self.controller.urb_list
+ * we're queueing TDs onto software and hardware lists
+ *
+ * hcd-specific init for hcpriv hasn't been done yet
+ *
+ * NOTE: control, bulk, and interrupt share the same code to append TDs
+ * to a (possibly active) QH, and the same QH scanning code.
+ */
+static int fotg210_urb_enqueue(
+ struct usb_hcd *hcd,
+ struct urb *urb,
+ gfp_t mem_flags
+) {
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct list_head qtd_list;
+
+ INIT_LIST_HEAD(&qtd_list);
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ /* qh_completions() code doesn't handle all the fault cases
+ * in multi-TD control transfers. Even 1KB is rare anyway.
+ */
+ if (urb->transfer_buffer_length > (16 * 1024))
+ return -EMSGSIZE;
+ /* FALLTHROUGH */
+ /* case PIPE_BULK: */
+ default:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return submit_async(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_INTERRUPT:
+ if (!qh_urb_transaction(fotg210, urb, &qtd_list, mem_flags))
+ return -ENOMEM;
+ return intr_submit(fotg210, urb, &qtd_list, mem_flags);
+
+ case PIPE_ISOCHRONOUS:
+ return itd_submit(fotg210, urb, mem_flags);
+ }
+}
+
+/* remove from hardware lists
+ * completions normally happen asynchronously
+ */
+
+static int fotg210_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ rc = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (rc)
+ goto done;
+
+ switch (usb_pipetype(urb->pipe)) {
+ /* case PIPE_CONTROL: */
+ /* case PIPE_BULK:*/
+ default:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_async(fotg210, qh);
+ break;
+ case QH_STATE_UNLINK:
+ case QH_STATE_UNLINK_WAIT:
+ /* already started */
+ break;
+ case QH_STATE_IDLE:
+ /* QH might be waiting for a Clear-TT-Buffer */
+ qh_completions(fotg210, qh);
+ break;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ qh = (struct fotg210_qh *) urb->hcpriv;
+ if (!qh)
+ break;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ start_unlink_intr(fotg210, qh);
+ break;
+ case QH_STATE_IDLE:
+ qh_completions(fotg210, qh);
+ break;
+ default:
+ fotg210_dbg(fotg210, "bogus qh %p state %d\n",
+ qh, qh->qh_state);
+ goto done;
+ }
+ break;
+
+ case PIPE_ISOCHRONOUS:
+ /* itd... */
+
+ /* wait till next completion, do it then. */
+ /* completion irqs can wait up to 1024 msec, */
+ break;
+ }
+done:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ return rc;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* bulk qh holds the data toggle */
+
+static void
+fotg210_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ unsigned long flags;
+ struct fotg210_qh *qh, *tmp;
+
+ /* ASSERT: any requests/urbs are being unlinked */
+ /* ASSERT: nobody can be submitting urbs for this any more */
+
+rescan:
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+ if (!qh)
+ goto done;
+
+ /* endpoints can be iso streams. for now, we don't
+ * accelerate iso completions ... so spin a while.
+ */
+ if (qh->hw == NULL) {
+ struct fotg210_iso_stream *stream = ep->hcpriv;
+
+ if (!list_empty(&stream->td_list))
+ goto idle_timeout;
+
+ /* BUG_ON(!list_empty(&stream->free_list)); */
+ kfree(stream);
+ goto done;
+ }
+
+ if (fotg210->rh_state < FOTG210_RH_RUNNING)
+ qh->qh_state = QH_STATE_IDLE;
+ switch (qh->qh_state) {
+ case QH_STATE_LINKED:
+ case QH_STATE_COMPLETING:
+ for (tmp = fotg210->async->qh_next.qh;
+ tmp && tmp != qh;
+ tmp = tmp->qh_next.qh)
+ continue;
+ /* periodic qh self-unlinks on empty, and a COMPLETING qh
+ * may already be unlinked.
+ */
+ if (tmp)
+ start_unlink_async(fotg210, qh);
+ /* FALL THROUGH */
+ case QH_STATE_UNLINK: /* wait for hw to finish? */
+ case QH_STATE_UNLINK_WAIT:
+idle_timeout:
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+ schedule_timeout_uninterruptible(1);
+ goto rescan;
+ case QH_STATE_IDLE: /* fully unlinked */
+ if (qh->clearing_tt)
+ goto idle_timeout;
+ if (list_empty(&qh->qtd_list)) {
+ qh_destroy(fotg210, qh);
+ break;
+ }
+ /* else FALL THROUGH */
+ default:
+ /* caller was supposed to have unlinked any requests;
+ * that's not our job. just leak this memory.
+ */
+ fotg210_err(fotg210, "qh %p (#%02x) state %d%s\n",
+ qh, ep->desc.bEndpointAddress, qh->qh_state,
+ list_empty(&qh->qtd_list) ? "" : "(has tds)");
+ break;
+ }
+ done:
+ ep->hcpriv = NULL;
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static void
+fotg210_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ struct fotg210_qh *qh;
+ int eptype = usb_endpoint_type(&ep->desc);
+ int epnum = usb_endpoint_num(&ep->desc);
+ int is_out = usb_endpoint_dir_out(&ep->desc);
+ unsigned long flags;
+
+ if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT)
+ return;
+
+ spin_lock_irqsave(&fotg210->lock, flags);
+ qh = ep->hcpriv;
+
+ /* For Bulk and Interrupt endpoints we maintain the toggle state
+ * in the hardware; the toggle bits in udev aren't used at all.
+ * When an endpoint is reset by usb_clear_halt() we must reset
+ * the toggle bit in the QH.
+ */
+ if (qh) {
+ usb_settoggle(qh->dev, epnum, is_out, 0);
+ if (!list_empty(&qh->qtd_list)) {
+ WARN_ONCE(1, "clear_halt for a busy endpoint\n");
+ } else if (qh->qh_state == QH_STATE_LINKED ||
+ qh->qh_state == QH_STATE_COMPLETING) {
+
+ /* The toggle value in the QH can't be updated
+ * while the QH is active. Unlink it now;
+ * re-linking will call qh_refresh().
+ */
+ if (eptype == USB_ENDPOINT_XFER_BULK)
+ start_unlink_async(fotg210, qh);
+ else
+ start_unlink_intr(fotg210, qh);
+ }
+ }
+ spin_unlock_irqrestore(&fotg210->lock, flags);
+}
+
+static int fotg210_get_frame(struct usb_hcd *hcd)
+{
+ struct fotg210_hcd *fotg210 = hcd_to_fotg210(hcd);
+ return (fotg210_read_frame_index(fotg210) >> 3) %
+ fotg210->periodic_size;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * The EHCI in ChipIdea HDRC cannot be a separate module or device,
+ * because its registers (and irq) are shared between host/gadget/otg
+ * functions and in order to facilitate role switching we cannot
+ * give the fotg210 driver exclusive access to those.
+ */
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_LICENSE("GPL");
+
+static const struct hc_driver fotg210_fotg210_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Faraday USB2.0 Host Controller",
+ .hcd_priv_size = sizeof(struct fotg210_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = fotg210_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = hcd_fotg210_init,
+ .start = fotg210_run,
+ .stop = fotg210_stop,
+ .shutdown = fotg210_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = fotg210_urb_enqueue,
+ .urb_dequeue = fotg210_urb_dequeue,
+ .endpoint_disable = fotg210_endpoint_disable,
+ .endpoint_reset = fotg210_endpoint_reset,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = fotg210_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = fotg210_hub_status_data,
+ .hub_control = fotg210_hub_control,
+ .bus_suspend = fotg210_bus_suspend,
+ .bus_resume = fotg210_bus_resume,
+
+ .relinquish_port = fotg210_relinquish_port,
+ .port_handed_over = fotg210_port_handed_over,
+
+ .clear_tt_buffer_complete = fotg210_clear_tt_buffer_complete,
+};
+
+static void fotg210_init(struct fotg210_hcd *fotg210)
+{
+ u32 value;
+
+ iowrite32(GMIR_MDEV_INT | GMIR_MOTG_INT | GMIR_INT_POLARITY,
+ &fotg210->regs->gmir);
+
+ value = ioread32(&fotg210->regs->otgcsr);
+ value &= ~OTGCSR_A_BUS_DROP;
+ value |= OTGCSR_A_BUS_REQ;
+ iowrite32(value, &fotg210->regs->otgcsr);
+}
+
+/**
+ * fotg210_hcd_probe - initialize faraday FOTG210 HCDs
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int fotg210_hcd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
+ int retval = -ENODEV;
+ struct fotg210_hcd *fotg210;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pdev->dev.power.power_state = PMSG_ON;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no IRQ. Check %s setup!\n",
+ dev_name(dev));
+ return -ENODEV;
+ }
+
+ irq = res->start;
+
+ hcd = usb_create_hcd(&fotg210_fotg210_hc_driver, dev,
+ dev_name(dev));
+ if (!hcd) {
+ dev_err(dev, "failed to create hcd with err %d\n", retval);
+ retval = -ENOMEM;
+ goto fail_create_hcd;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->has_tt = 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
+ fotg210_fotg210_hc_driver.description)) {
+ dev_dbg(dev, "controller already in use\n");
+ retval = -EBUSY;
+ goto fail_request_resource;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(dev,
+ "Found HC with no register addr. Check %s setup!\n",
+ dev_name(dev));
+ retval = -ENODEV;
+ goto fail_request_resource;
+ }
+
+ hcd->regs = ioremap_nocache(res->start, resource_size(res));
+ if (hcd->regs == NULL) {
+ dev_dbg(dev, "error mapping memory\n");
+ retval = -EFAULT;
+ goto fail_ioremap;
+ }
+
+ fotg210 = hcd_to_fotg210(hcd);
+
+ fotg210->caps = hcd->regs;
+
+ retval = fotg210_setup(hcd);
+ if (retval)
+ goto fail_add_hcd;
+
+ fotg210_init(fotg210);
+
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(dev, "failed to add hcd with err %d\n", retval);
+ goto fail_add_hcd;
+ }
+
+ return retval;
+
+fail_add_hcd:
+ iounmap(hcd->regs);
+fail_ioremap:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+fail_request_resource:
+ usb_put_hcd(hcd);
+fail_create_hcd:
+ dev_err(dev, "init %s fail, %d\n", dev_name(dev), retval);
+ return retval;
+}
+
+/**
+ * fotg210_hcd_remove - shutdown processing for EHCI HCDs
+ * @dev: USB Host Controller being removed
+ *
+ */
+static int fotg210_hcd_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ if (!hcd)
+ return 0;
+
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+static struct platform_driver fotg210_hcd_driver = {
+ .driver = {
+ .name = "fotg210-hcd",
+ },
+ .probe = fotg210_hcd_probe,
+ .remove = fotg210_hcd_remove,
+};
+
+static int __init fotg210_hcd_init(void)
+{
+ int retval = 0;
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ pr_info("%s: " DRIVER_DESC "\n", hcd_name);
+ set_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) ||
+ test_bit(USB_OHCI_LOADED, &usb_hcds_loaded))
+ pr_warn(KERN_WARNING "Warning! fotg210_hcd should always be loaded before uhci_hcd and ohci_hcd, not after\n");
+
+ pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd\n",
+ hcd_name,
+ sizeof(struct fotg210_qh), sizeof(struct fotg210_qtd),
+ sizeof(struct fotg210_itd));
+
+#ifdef DEBUG
+ fotg210_debug_root = debugfs_create_dir("fotg210", usb_debug_root);
+ if (!fotg210_debug_root) {
+ retval = -ENOENT;
+ goto err_debug;
+ }
+#endif
+
+ retval = platform_driver_register(&fotg210_hcd_driver);
+ if (retval < 0)
+ goto clean;
+ return retval;
+
+ platform_driver_unregister(&fotg210_hcd_driver);
+clean:
+#ifdef DEBUG
+ debugfs_remove(fotg210_debug_root);
+ fotg210_debug_root = NULL;
+err_debug:
+#endif
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+ return retval;
+}
+module_init(fotg210_hcd_init);
+
+static void __exit fotg210_hcd_cleanup(void)
+{
+ platform_driver_unregister(&fotg210_hcd_driver);
+#ifdef DEBUG
+ debugfs_remove(fotg210_debug_root);
+#endif
+ clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded);
+}
+module_exit(fotg210_hcd_cleanup);
diff --git a/drivers/usb/host/fotg210.h b/drivers/usb/host/fotg210.h
new file mode 100644
index 00000000000..8920f9d3256
--- /dev/null
+++ b/drivers/usb/host/fotg210.h
@@ -0,0 +1,750 @@
+#ifndef __LINUX_FOTG210_H
+#define __LINUX_FOTG210_H
+
+/* definitions used for the EHCI driver */
+
+/*
+ * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to
+ * __leXX (normally) or __beXX (given FOTG210_BIG_ENDIAN_DESC), depending on
+ * the host controller implementation.
+ *
+ * To facilitate the strongest possible byte-order checking from "sparse"
+ * and so on, we use __leXX unless that's not practical.
+ */
+#define __hc32 __le32
+#define __hc16 __le16
+
+/* statistics can be kept for tuning/monitoring */
+struct fotg210_stats {
+ /* irq usage */
+ unsigned long normal;
+ unsigned long error;
+ unsigned long iaa;
+ unsigned long lost_iaa;
+
+ /* termination of urbs from core */
+ unsigned long complete;
+ unsigned long unlink;
+};
+
+/* fotg210_hcd->lock guards shared data against other CPUs:
+ * fotg210_hcd: async, unlink, periodic (and shadow), ...
+ * usb_host_endpoint: hcpriv
+ * fotg210_qh: qh_next, qtd_list
+ * fotg210_qtd: qtd_list
+ *
+ * Also, hold this lock when talking to HC registers or
+ * when updating hw_* fields in shared qh/qtd/... structures.
+ */
+
+#define FOTG210_MAX_ROOT_PORTS 1 /* see HCS_N_PORTS */
+
+/*
+ * fotg210_rh_state values of FOTG210_RH_RUNNING or above mean that the
+ * controller may be doing DMA. Lower values mean there's no DMA.
+ */
+enum fotg210_rh_state {
+ FOTG210_RH_HALTED,
+ FOTG210_RH_SUSPENDED,
+ FOTG210_RH_RUNNING,
+ FOTG210_RH_STOPPING
+};
+
+/*
+ * Timer events, ordered by increasing delay length.
+ * Always update event_delays_ns[] and event_handlers[] (defined in
+ * ehci-timer.c) in parallel with this list.
+ */
+enum fotg210_hrtimer_event {
+ FOTG210_HRTIMER_POLL_ASS, /* Poll for async schedule off */
+ FOTG210_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */
+ FOTG210_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */
+ FOTG210_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */
+ FOTG210_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */
+ FOTG210_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */
+ FOTG210_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */
+ FOTG210_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */
+ FOTG210_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */
+ FOTG210_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */
+ FOTG210_HRTIMER_NUM_EVENTS /* Must come last */
+};
+#define FOTG210_HRTIMER_NO_EVENT 99
+
+struct fotg210_hcd { /* one per controller */
+ /* timing support */
+ enum fotg210_hrtimer_event next_hrtimer_event;
+ unsigned enabled_hrtimer_events;
+ ktime_t hr_timeouts[FOTG210_HRTIMER_NUM_EVENTS];
+ struct hrtimer hrtimer;
+
+ int PSS_poll_count;
+ int ASS_poll_count;
+ int died_poll_count;
+
+ /* glue to PCI and HCD framework */
+ struct fotg210_caps __iomem *caps;
+ struct fotg210_regs __iomem *regs;
+ struct fotg210_dbg_port __iomem *debug;
+
+ __u32 hcs_params; /* cached register copy */
+ spinlock_t lock;
+ enum fotg210_rh_state rh_state;
+
+ /* general schedule support */
+ bool scanning:1;
+ bool need_rescan:1;
+ bool intr_unlinking:1;
+ bool async_unlinking:1;
+ bool shutdown:1;
+ struct fotg210_qh *qh_scan_next;
+
+ /* async schedule support */
+ struct fotg210_qh *async;
+ struct fotg210_qh *dummy; /* For AMD quirk use */
+ struct fotg210_qh *async_unlink;
+ struct fotg210_qh *async_unlink_last;
+ struct fotg210_qh *async_iaa;
+ unsigned async_unlink_cycle;
+ unsigned async_count; /* async activity count */
+
+ /* periodic schedule support */
+#define DEFAULT_I_TDPS 1024 /* some HCs can do less */
+ unsigned periodic_size;
+ __hc32 *periodic; /* hw periodic table */
+ dma_addr_t periodic_dma;
+ struct list_head intr_qh_list;
+ unsigned i_thresh; /* uframes HC might cache */
+
+ union fotg210_shadow *pshadow; /* mirror hw periodic table */
+ struct fotg210_qh *intr_unlink;
+ struct fotg210_qh *intr_unlink_last;
+ unsigned intr_unlink_cycle;
+ unsigned now_frame; /* frame from HC hardware */
+ unsigned next_frame; /* scan periodic, start here */
+ unsigned intr_count; /* intr activity count */
+ unsigned isoc_count; /* isoc activity count */
+ unsigned periodic_count; /* periodic activity count */
+ /* max periodic time per uframe */
+ unsigned uframe_periodic_max;
+
+
+ /* list of itds completed while now_frame was still active */
+ struct list_head cached_itd_list;
+ struct fotg210_itd *last_itd_to_free;
+
+ /* per root hub port */
+ unsigned long reset_done[FOTG210_MAX_ROOT_PORTS];
+
+ /* bit vectors (one bit per port) */
+ unsigned long bus_suspended; /* which ports were
+ already suspended at the start of a bus suspend */
+ unsigned long companion_ports; /* which ports are
+ dedicated to the companion controller */
+ unsigned long owned_ports; /* which ports are
+ owned by the companion during a bus suspend */
+ unsigned long port_c_suspend; /* which ports have
+ the change-suspend feature turned on */
+ unsigned long suspended_ports; /* which ports are
+ suspended */
+ unsigned long resuming_ports; /* which ports have
+ started to resume */
+
+ /* per-HC memory pools (could be per-bus, but ...) */
+ struct dma_pool *qh_pool; /* qh per active urb */
+ struct dma_pool *qtd_pool; /* one or more per qh */
+ struct dma_pool *itd_pool; /* itd per iso urb */
+
+ unsigned random_frame;
+ unsigned long next_statechange;
+ ktime_t last_periodic_enable;
+ u32 command;
+
+ /* SILICON QUIRKS */
+ unsigned need_io_watchdog:1;
+ unsigned fs_i_thresh:1; /* Intel iso scheduling */
+
+ u8 sbrn; /* packed release number */
+
+ /* irq statistics */
+#ifdef FOTG210_STATS
+ struct fotg210_stats stats;
+# define COUNT(x) ((x)++)
+#else
+# define COUNT(x)
+#endif
+
+ /* debug files */
+#ifdef DEBUG
+ struct dentry *debug_dir;
+#endif
+};
+
+/* convert between an HCD pointer and the corresponding FOTG210_HCD */
+static inline struct fotg210_hcd *hcd_to_fotg210(struct usb_hcd *hcd)
+{
+ return (struct fotg210_hcd *)(hcd->hcd_priv);
+}
+static inline struct usb_hcd *fotg210_to_hcd(struct fotg210_hcd *fotg210)
+{
+ return container_of((void *) fotg210, struct usb_hcd, hcd_priv);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* EHCI register interface, corresponds to EHCI Revision 0.95 specification */
+
+/* Section 2.2 Host Controller Capability Registers */
+struct fotg210_caps {
+ /* these fields are specified as 8 and 16 bit registers,
+ * but some hosts can't perform 8 or 16 bit PCI accesses.
+ * some hosts treat caplength and hciversion as parts of a 32-bit
+ * register, others treat them as two separate registers, this
+ * affects the memory map for big endian controllers.
+ */
+ u32 hc_capbase;
+#define HC_LENGTH(fotg210, p) (0x00ff&((p) >> /* bits 7:0 / offset 00h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 24 : 0)))
+#define HC_VERSION(fotg210, p) (0xffff&((p) >> /* bits 31:16 / offset 02h */ \
+ (fotg210_big_endian_capbase(fotg210) ? 0 : 16)))
+ u32 hcs_params; /* HCSPARAMS - offset 0x4 */
+#define HCS_N_PORTS(p) (((p)>>0)&0xf) /* bits 3:0, ports on HC */
+
+ u32 hcc_params; /* HCCPARAMS - offset 0x8 */
+#define HCC_CANPARK(p) ((p)&(1 << 2)) /* true: can park on async qh */
+#define HCC_PGM_FRAMELISTLEN(p) ((p)&(1 << 1)) /* true: periodic_size changes*/
+ u8 portroute[8]; /* nibbles for routing - offset 0xC */
+};
+
+
+/* Section 2.3 Host Controller Operational Registers */
+struct fotg210_regs {
+
+ /* USBCMD: offset 0x00 */
+ u32 command;
+
+/* EHCI 1.1 addendum */
+/* 23:16 is r/w intr rate, in microframes; default "8" == 1/msec */
+#define CMD_PARK (1<<11) /* enable "park" on async qh */
+#define CMD_PARK_CNT(c) (((c)>>8)&3) /* how many transfers to park for */
+#define CMD_IAAD (1<<6) /* "doorbell" interrupt async advance */
+#define CMD_ASE (1<<5) /* async schedule enable */
+#define CMD_PSE (1<<4) /* periodic schedule enable */
+/* 3:2 is periodic frame list size */
+#define CMD_RESET (1<<1) /* reset HC not bus */
+#define CMD_RUN (1<<0) /* start/stop HC */
+
+ /* USBSTS: offset 0x04 */
+ u32 status;
+#define STS_ASS (1<<15) /* Async Schedule Status */
+#define STS_PSS (1<<14) /* Periodic Schedule Status */
+#define STS_RECL (1<<13) /* Reclamation */
+#define STS_HALT (1<<12) /* Not running (any reason) */
+/* some bits reserved */
+ /* these STS_* flags are also intr_enable bits (USBINTR) */
+#define STS_IAA (1<<5) /* Interrupted on async advance */
+#define STS_FATAL (1<<4) /* such as some PCI access errors */
+#define STS_FLR (1<<3) /* frame list rolled over */
+#define STS_PCD (1<<2) /* port change detect */
+#define STS_ERR (1<<1) /* "error" completion (overflow, ...) */
+#define STS_INT (1<<0) /* "normal" completion (short, ...) */
+
+ /* USBINTR: offset 0x08 */
+ u32 intr_enable;
+
+ /* FRINDEX: offset 0x0C */
+ u32 frame_index; /* current microframe number */
+ /* CTRLDSSEGMENT: offset 0x10 */
+ u32 segment; /* address bits 63:32 if needed */
+ /* PERIODICLISTBASE: offset 0x14 */
+ u32 frame_list; /* points to periodic list */
+ /* ASYNCLISTADDR: offset 0x18 */
+ u32 async_next; /* address of next async queue head */
+
+ u32 reserved1;
+ /* PORTSC: offset 0x20 */
+ u32 port_status;
+/* 31:23 reserved */
+#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
+#define PORT_RESET (1<<8) /* reset port */
+#define PORT_SUSPEND (1<<7) /* suspend port */
+#define PORT_RESUME (1<<6) /* resume it */
+#define PORT_PEC (1<<3) /* port enable change */
+#define PORT_PE (1<<2) /* port enable */
+#define PORT_CSC (1<<1) /* connect status change */
+#define PORT_CONNECT (1<<0) /* device connected */
+#define PORT_RWC_BITS (PORT_CSC | PORT_PEC)
+ u32 reserved2[19];
+
+ /* OTGCSR: offet 0x70 */
+ u32 otgcsr;
+#define OTGCSR_HOST_SPD_TYP (3 << 22)
+#define OTGCSR_A_BUS_DROP (1 << 5)
+#define OTGCSR_A_BUS_REQ (1 << 4)
+
+ /* OTGISR: offset 0x74 */
+ u32 otgisr;
+#define OTGISR_OVC (1 << 10)
+
+ u32 reserved3[15];
+
+ /* GMIR: offset 0xB4 */
+ u32 gmir;
+#define GMIR_INT_POLARITY (1 << 3) /*Active High*/
+#define GMIR_MHC_INT (1 << 2)
+#define GMIR_MOTG_INT (1 << 1)
+#define GMIR_MDEV_INT (1 << 0)
+};
+
+/* Appendix C, Debug port ... intended for use with special "debug devices"
+ * that can help if there's no serial console. (nonstandard enumeration.)
+ */
+struct fotg210_dbg_port {
+ u32 control;
+#define DBGP_OWNER (1<<30)
+#define DBGP_ENABLED (1<<28)
+#define DBGP_DONE (1<<16)
+#define DBGP_INUSE (1<<10)
+#define DBGP_ERRCODE(x) (((x)>>7)&0x07)
+# define DBGP_ERR_BAD 1
+# define DBGP_ERR_SIGNAL 2
+#define DBGP_ERROR (1<<6)
+#define DBGP_GO (1<<5)
+#define DBGP_OUT (1<<4)
+#define DBGP_LEN(x) (((x)>>0)&0x0f)
+ u32 pids;
+#define DBGP_PID_GET(x) (((x)>>16)&0xff)
+#define DBGP_PID_SET(data, tok) (((data)<<8)|(tok))
+ u32 data03;
+ u32 data47;
+ u32 address;
+#define DBGP_EPADDR(dev, ep) (((dev)<<8)|(ep))
+};
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+#include <linux/init.h>
+extern int __init early_dbgp_init(char *s);
+extern struct console early_dbgp_console;
+#endif /* CONFIG_EARLY_PRINTK_DBGP */
+
+struct usb_hcd;
+
+static inline int xen_dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return 1; /* Shouldn't this be 0? */
+}
+
+static inline int xen_dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return -1;
+}
+
+#ifdef CONFIG_EARLY_PRINTK_DBGP
+/* Call backs from fotg210 host driver to fotg210 debug driver */
+extern int dbgp_external_startup(struct usb_hcd *);
+extern int dbgp_reset_prep(struct usb_hcd *hcd);
+#else
+static inline int dbgp_reset_prep(struct usb_hcd *hcd)
+{
+ return xen_dbgp_reset_prep(hcd);
+}
+static inline int dbgp_external_startup(struct usb_hcd *hcd)
+{
+ return xen_dbgp_external_startup(hcd);
+}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#define QTD_NEXT(fotg210, dma) cpu_to_hc32(fotg210, (u32)dma)
+
+/*
+ * EHCI Specification 0.95 Section 3.5
+ * QTD: describe data transfer components (buffer, direction, ...)
+ * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram".
+ *
+ * These are associated only with "QH" (Queue Head) structures,
+ * used with control, bulk, and interrupt transfers.
+ */
+struct fotg210_qtd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.5.1 */
+ __hc32 hw_alt_next; /* see EHCI 3.5.2 */
+ __hc32 hw_token; /* see EHCI 3.5.3 */
+#define QTD_TOGGLE (1 << 31) /* data toggle */
+#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff)
+#define QTD_IOC (1 << 15) /* interrupt on complete */
+#define QTD_CERR(tok) (((tok)>>10) & 0x3)
+#define QTD_PID(tok) (((tok)>>8) & 0x3)
+#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */
+#define QTD_STS_HALT (1 << 6) /* halted on error */
+#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */
+#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */
+#define QTD_STS_XACT (1 << 3) /* device gave illegal response */
+#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */
+#define QTD_STS_STS (1 << 1) /* split transaction state */
+#define QTD_STS_PING (1 << 0) /* issue PING? */
+
+#define ACTIVE_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_ACTIVE)
+#define HALT_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_HALT)
+#define STATUS_BIT(fotg210) cpu_to_hc32(fotg210, QTD_STS_STS)
+
+ __hc32 hw_buf[5]; /* see EHCI 3.5.4 */
+ __hc32 hw_buf_hi[5]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t qtd_dma; /* qtd address */
+ struct list_head qtd_list; /* sw qtd list */
+ struct urb *urb; /* qtd's urb */
+ size_t length; /* length of buffer */
+} __aligned(32);
+
+/* mask NakCnt+T in qh->hw_alt_next */
+#define QTD_MASK(fotg210) cpu_to_hc32(fotg210, ~0x1f)
+
+#define IS_SHORT_READ(token) (QTD_LENGTH(token) != 0 && QTD_PID(token) == 1)
+
+/*-------------------------------------------------------------------------*/
+
+/* type tag from {qh,itd,fstn}->hw_next */
+#define Q_NEXT_TYPE(fotg210, dma) ((dma) & cpu_to_hc32(fotg210, 3 << 1))
+
+/*
+ * Now the following defines are not converted using the
+ * cpu_to_le32() macro anymore, since we have to support
+ * "dynamic" switching between be and le support, so that the driver
+ * can be used on one system with SoC EHCI controller using big-endian
+ * descriptors as well as a normal little-endian PCI EHCI controller.
+ */
+/* values for that type tag */
+#define Q_TYPE_ITD (0 << 1)
+#define Q_TYPE_QH (1 << 1)
+#define Q_TYPE_SITD (2 << 1)
+#define Q_TYPE_FSTN (3 << 1)
+
+/* next async queue entry, or pointer to interrupt/periodic QH */
+#define QH_NEXT(fotg210, dma) \
+ (cpu_to_hc32(fotg210, (((u32)dma)&~0x01f)|Q_TYPE_QH))
+
+/* for periodic/async schedules and qtd lists, mark end of list */
+#define FOTG210_LIST_END(fotg210) \
+ cpu_to_hc32(fotg210, 1) /* "null pointer" to hw */
+
+/*
+ * Entries in periodic shadow table are pointers to one of four kinds
+ * of data structure. That's dictated by the hardware; a type tag is
+ * encoded in the low bits of the hardware's periodic schedule. Use
+ * Q_NEXT_TYPE to get the tag.
+ *
+ * For entries in the async schedule, the type tag always says "qh".
+ */
+union fotg210_shadow {
+ struct fotg210_qh *qh; /* Q_TYPE_QH */
+ struct fotg210_itd *itd; /* Q_TYPE_ITD */
+ struct fotg210_fstn *fstn; /* Q_TYPE_FSTN */
+ __hc32 *hw_next; /* (all types) */
+ void *ptr;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.6
+ * QH: describes control/bulk/interrupt endpoints
+ * See Fig 3-7 "Queue Head Structure Layout".
+ *
+ * These appear in both the async and (for interrupt) periodic schedules.
+ */
+
+/* first part defined by EHCI spec */
+struct fotg210_qh_hw {
+ __hc32 hw_next; /* see EHCI 3.6.1 */
+ __hc32 hw_info1; /* see EHCI 3.6.2 */
+#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */
+#define QH_HEAD (1 << 15) /* Head of async reclamation list */
+#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */
+#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */
+#define QH_LOW_SPEED (1 << 12)
+#define QH_FULL_SPEED (0 << 12)
+#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */
+ __hc32 hw_info2; /* see EHCI 3.6.2 */
+#define QH_SMASK 0x000000ff
+#define QH_CMASK 0x0000ff00
+#define QH_HUBADDR 0x007f0000
+#define QH_HUBPORT 0x3f800000
+#define QH_MULT 0xc0000000
+ __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */
+
+ /* qtd overlay (hardware parts of a struct fotg210_qtd) */
+ __hc32 hw_qtd_next;
+ __hc32 hw_alt_next;
+ __hc32 hw_token;
+ __hc32 hw_buf[5];
+ __hc32 hw_buf_hi[5];
+} __aligned(32);
+
+struct fotg210_qh {
+ struct fotg210_qh_hw *hw; /* Must come first */
+ /* the rest is HCD-private */
+ dma_addr_t qh_dma; /* address of qh */
+ union fotg210_shadow qh_next; /* ptr to qh; or periodic */
+ struct list_head qtd_list; /* sw qtd list */
+ struct list_head intr_node; /* list of intr QHs */
+ struct fotg210_qtd *dummy;
+ struct fotg210_qh *unlink_next; /* next on unlink list */
+
+ unsigned unlink_cycle;
+
+ u8 needs_rescan; /* Dequeue during giveback */
+ u8 qh_state;
+#define QH_STATE_LINKED 1 /* HC sees this */
+#define QH_STATE_UNLINK 2 /* HC may still see this */
+#define QH_STATE_IDLE 3 /* HC doesn't see this */
+#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */
+#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
+
+ u8 xacterrs; /* XactErr retry counter */
+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
+
+ /* periodic schedule info */
+ u8 usecs; /* intr bandwidth */
+ u8 gap_uf; /* uframes split/csplit gap */
+ u8 c_usecs; /* ... split completion bw */
+ u16 tt_usecs; /* tt downstream bandwidth */
+ unsigned short period; /* polling interval */
+ unsigned short start; /* where polling starts */
+#define NO_FRAME ((unsigned short)~0) /* pick new start */
+
+ struct usb_device *dev; /* access to TT */
+ unsigned is_out:1; /* bulk or intr OUT */
+ unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* description of one iso transaction (up to 3 KB data if highspeed) */
+struct fotg210_iso_packet {
+ /* These will be copied to iTD when scheduling */
+ u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */
+ __hc32 transaction; /* itd->hw_transaction[i] |= */
+ u8 cross; /* buf crosses pages */
+ /* for full speed OUT splits */
+ u32 buf1;
+};
+
+/* temporary schedule data for packets from iso urbs (both speeds)
+ * each packet is one logical usb transaction to the device (not TT),
+ * beginning at stream->next_uframe
+ */
+struct fotg210_iso_sched {
+ struct list_head td_list;
+ unsigned span;
+ struct fotg210_iso_packet packet[0];
+};
+
+/*
+ * fotg210_iso_stream - groups all (s)itds for this endpoint.
+ * acts like a qh would, if EHCI had them for ISO.
+ */
+struct fotg210_iso_stream {
+ /* first field matches fotg210_hq, but is NULL */
+ struct fotg210_qh_hw *hw;
+
+ u8 bEndpointAddress;
+ u8 highspeed;
+ struct list_head td_list; /* queued itds */
+ struct list_head free_list; /* list of unused itds */
+ struct usb_device *udev;
+ struct usb_host_endpoint *ep;
+
+ /* output of (re)scheduling */
+ int next_uframe;
+ __hc32 splits;
+
+ /* the rest is derived from the endpoint descriptor,
+ * trusting urb->interval == f(epdesc->bInterval) and
+ * including the extra info for hw_bufp[0..2]
+ */
+ u8 usecs, c_usecs;
+ u16 interval;
+ u16 tt_usecs;
+ u16 maxp;
+ u16 raw_mask;
+ unsigned bandwidth;
+
+ /* This is used to initialize iTD's hw_bufp fields */
+ __hc32 buf0;
+ __hc32 buf1;
+ __hc32 buf2;
+
+ /* this is used to initialize sITD's tt info */
+ __hc32 address;
+};
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.95 Section 3.3
+ * Fig 3-4 "Isochronous Transaction Descriptor (iTD)"
+ *
+ * Schedule records for high speed iso xfers
+ */
+struct fotg210_itd {
+ /* first part defined by EHCI spec */
+ __hc32 hw_next; /* see EHCI 3.3.1 */
+ __hc32 hw_transaction[8]; /* see EHCI 3.3.2 */
+#define FOTG210_ISOC_ACTIVE (1<<31) /* activate transfer this slot */
+#define FOTG210_ISOC_BUF_ERR (1<<30) /* Data buffer error */
+#define FOTG210_ISOC_BABBLE (1<<29) /* babble detected */
+#define FOTG210_ISOC_XACTERR (1<<28) /* XactErr - transaction error */
+#define FOTG210_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff)
+#define FOTG210_ITD_IOC (1 << 15) /* interrupt on complete */
+
+#define ITD_ACTIVE(fotg210) cpu_to_hc32(fotg210, FOTG210_ISOC_ACTIVE)
+
+ __hc32 hw_bufp[7]; /* see EHCI 3.3.3 */
+ __hc32 hw_bufp_hi[7]; /* Appendix B */
+
+ /* the rest is HCD-private */
+ dma_addr_t itd_dma; /* for this itd */
+ union fotg210_shadow itd_next; /* ptr to periodic q entry */
+
+ struct urb *urb;
+ struct fotg210_iso_stream *stream; /* endpoint's queue */
+ struct list_head itd_list; /* list of stream's itds */
+
+ /* any/all hw_transactions here may be used by that urb */
+ unsigned frame; /* where scheduled */
+ unsigned pg;
+ unsigned index[8]; /* in urb->iso_frame_desc */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * EHCI Specification 0.96 Section 3.7
+ * Periodic Frame Span Traversal Node (FSTN)
+ *
+ * Manages split interrupt transactions (using TT) that span frame boundaries
+ * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN
+ * makes the HC jump (back) to a QH to scan for fs/ls QH completions until
+ * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work.
+ */
+struct fotg210_fstn {
+ __hc32 hw_next; /* any periodic q entry */
+ __hc32 hw_prev; /* qh or FOTG210_LIST_END */
+
+ /* the rest is HCD-private */
+ dma_addr_t fstn_dma;
+ union fotg210_shadow fstn_next; /* ptr to periodic q entry */
+} __aligned(32);
+
+/*-------------------------------------------------------------------------*/
+
+/* Prepare the PORTSC wakeup flags during controller suspend/resume */
+
+#define fotg210_prepare_ports_for_controller_suspend(fotg210, do_wakeup) \
+ fotg210_adjust_port_wakeup_flags(fotg210, true, do_wakeup);
+
+#define fotg210_prepare_ports_for_controller_resume(fotg210) \
+ fotg210_adjust_port_wakeup_flags(fotg210, false, false);
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Some EHCI controllers have a Transaction Translator built into the
+ * root hub. This is a non-standard feature. Each controller will need
+ * to add code to the following inline functions, and call them as
+ * needed (mostly in root hub code).
+ */
+
+static inline unsigned int
+fotg210_get_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ return (readl(&fotg210->regs->otgcsr)
+ & OTGCSR_HOST_SPD_TYP) >> 22;
+}
+
+/* Returns the speed of a device attached to a port on the root hub. */
+static inline unsigned int
+fotg210_port_speed(struct fotg210_hcd *fotg210, unsigned int portsc)
+{
+ switch (fotg210_get_speed(fotg210, portsc)) {
+ case 0:
+ return 0;
+ case 1:
+ return USB_PORT_STAT_LOW_SPEED;
+ case 2:
+ default:
+ return USB_PORT_STAT_HIGH_SPEED;
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define fotg210_has_fsl_portno_bug(e) (0)
+
+/*
+ * While most USB host controllers implement their registers in
+ * little-endian format, a minority (celleb companion chip) implement
+ * them in big endian format.
+ *
+ * This attempts to support either format at compile time without a
+ * runtime penalty, or both formats with the additional overhead
+ * of checking a flag bit.
+ *
+ */
+
+#define fotg210_big_endian_mmio(e) 0
+#define fotg210_big_endian_capbase(e) 0
+
+static inline unsigned int fotg210_readl(const struct fotg210_hcd *fotg210,
+ __u32 __iomem *regs)
+{
+ return readl(regs);
+}
+
+static inline void fotg210_writel(const struct fotg210_hcd *fotg210,
+ const unsigned int val, __u32 __iomem *regs)
+{
+ writel(val, regs);
+}
+
+/* cpu to fotg210 */
+static inline __hc32 cpu_to_hc32(const struct fotg210_hcd *fotg210, const u32 x)
+{
+ return cpu_to_le32(x);
+}
+
+/* fotg210 to cpu */
+static inline u32 hc32_to_cpu(const struct fotg210_hcd *fotg210, const __hc32 x)
+{
+ return le32_to_cpu(x);
+}
+
+static inline u32 hc32_to_cpup(const struct fotg210_hcd *fotg210,
+ const __hc32 *x)
+{
+ return le32_to_cpup(x);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static inline unsigned fotg210_read_frame_index(struct fotg210_hcd *fotg210)
+{
+ return fotg210_readl(fotg210, &fotg210->regs->frame_index);
+}
+
+#define fotg210_itdlen(urb, desc, t) ({ \
+ usb_pipein((urb)->pipe) ? \
+ (desc)->length - FOTG210_ITD_LENGTH(t) : \
+ FOTG210_ITD_LENGTH(t); \
+})
+/*-------------------------------------------------------------------------*/
+
+#ifndef DEBUG
+#define STUB_DEBUG_FILES
+#endif /* DEBUG */
+
+/*-------------------------------------------------------------------------*/
+
+#endif /* __LINUX_FOTG210_H */
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 11e0b79ff9d..cfbff716182 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -258,7 +258,7 @@ static int fsl_usb2_mph_dr_of_remove(struct platform_device *ofdev)
int fsl_usb2_mpc5121_init(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct clk *clk;
char clk_name[10];
int base, clk_num;
@@ -298,7 +298,7 @@ int fsl_usb2_mpc5121_init(struct platform_device *pdev)
static void fsl_usb2_mpc5121_exit(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
pdata->regs = NULL;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 483990c716a..5b86ffb88f1 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -161,6 +161,13 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
usb_hcd->uses_new_polling = 1;
set_bit(HCD_FLAG_POLL_RH, &usb_hcd->flags);
usb_hcd->state = HC_STATE_RUNNING;
+
+ /*
+ * prevent USB core from suspending the root hub since
+ * bus_suspend and bus_resume are not yet supported.
+ */
+ pm_runtime_get_noresume(&usb_hcd->self.root_hub->dev);
+
result = 0;
out:
mutex_unlock(&wusbhc->mutex);
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 03dc4d9cbec..60a5de505ca 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -1860,7 +1860,7 @@ static int imx21_probe(struct platform_device *pdev)
imx21 = hcd_to_imx21(hcd);
imx21->hcd = hcd;
imx21->dev = &pdev->dev;
- imx21->pdata = pdev->dev.platform_data;
+ imx21->pdata = dev_get_platdata(&pdev->dev);
if (!imx21->pdata)
imx21->pdata = &default_pdata;
diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
index b64e661618b..c7d0f8f231b 100644
--- a/drivers/usb/host/isp116x-hcd.c
+++ b/drivers/usb/host/isp116x-hcd.c
@@ -1626,7 +1626,7 @@ static int isp116x_probe(struct platform_device *pdev)
isp116x->addr_reg = addr_reg;
spin_lock_init(&isp116x->lock);
INIT_LIST_HEAD(&isp116x->async);
- isp116x->board = pdev->dev.platform_data;
+ isp116x->board = dev_get_platdata(&pdev->dev);
if (!isp116x->board) {
ERR("Platform data structure not initialized\n");
diff --git a/drivers/usb/host/isp116x.h b/drivers/usb/host/isp116x.h
index 9a2c400e609..dd34b7a3396 100644
--- a/drivers/usb/host/isp116x.h
+++ b/drivers/usb/host/isp116x.h
@@ -325,11 +325,7 @@ struct isp116x_ep {
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(stuff...) printk(KERN_DEBUG "116x: " stuff)
-#else
-#define DBG(stuff...) do{}while(0)
-#endif
+#define DBG(stuff...) pr_debug("116x: " stuff)
#ifdef VERBOSE
# define VDBG DBG
@@ -358,15 +354,8 @@ struct isp116x_ep {
#define isp116x_check_platform_delay(h) 0
#endif
-#if defined(DEBUG)
-#define IRQ_TEST() BUG_ON(!irqs_disabled())
-#else
-#define IRQ_TEST() do{}while(0)
-#endif
-
static inline void isp116x_write_addr(struct isp116x *isp116x, unsigned reg)
{
- IRQ_TEST();
writew(reg & 0xff, isp116x->addr_reg);
isp116x_delay(isp116x, 300);
}
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index b04e8ece4d3..6f29abad681 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -37,11 +37,7 @@
* recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz.
*/
-#ifdef CONFIG_USB_DEBUG
-# define ISP1362_DEBUG
-#else
-# undef ISP1362_DEBUG
-#endif
+#undef ISP1362_DEBUG
/*
* The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and
@@ -82,6 +78,8 @@
#include <linux/io.h>
#include <linux/bitmap.h>
#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <asm/irq.h>
#include <asm/byteorder.h>
@@ -92,7 +90,6 @@ static int dbg_level;
module_param(dbg_level, int, 0644);
#else
module_param(dbg_level, int, 0);
-#define STUB_DEBUG_FILE
#endif
#include "../core/usb.h"
@@ -350,8 +347,6 @@ static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep
struct ptd *ptd = &ep->ptd;
int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length;
- _BUG_ON(ep->ptd_offset < 0);
-
prefetch(ptd);
isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE);
if (len)
@@ -1575,12 +1570,12 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
DBG(0, "ClearHubFeature: ");
switch (wValue) {
case C_HUB_OVER_CURRENT:
- _DBG(0, "C_HUB_OVER_CURRENT\n");
+ DBG(0, "C_HUB_OVER_CURRENT\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC);
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
case C_HUB_LOCAL_POWER:
- _DBG(0, "C_HUB_LOCAL_POWER\n");
+ DBG(0, "C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
@@ -1591,7 +1586,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case C_HUB_OVER_CURRENT:
case C_HUB_LOCAL_POWER:
- _DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
+ DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n");
break;
default:
goto error;
@@ -1622,36 +1617,36 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
- _DBG(0, "USB_PORT_FEAT_ENABLE\n");
+ DBG(0, "USB_PORT_FEAT_ENABLE\n");
tmp = RH_PS_CCS;
break;
case USB_PORT_FEAT_C_ENABLE:
- _DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
+ DBG(0, "USB_PORT_FEAT_C_ENABLE\n");
tmp = RH_PS_PESC;
break;
case USB_PORT_FEAT_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
tmp = RH_PS_POCI;
break;
case USB_PORT_FEAT_C_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_C_SUSPEND\n");
tmp = RH_PS_PSSC;
break;
case USB_PORT_FEAT_POWER:
- _DBG(0, "USB_PORT_FEAT_POWER\n");
+ DBG(0, "USB_PORT_FEAT_POWER\n");
tmp = RH_PS_LSDA;
break;
case USB_PORT_FEAT_C_CONNECTION:
- _DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
+ DBG(0, "USB_PORT_FEAT_C_CONNECTION\n");
tmp = RH_PS_CSC;
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- _DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
+ DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n");
tmp = RH_PS_OCIC;
break;
case USB_PORT_FEAT_C_RESET:
- _DBG(0, "USB_PORT_FEAT_C_RESET\n");
+ DBG(0, "USB_PORT_FEAT_C_RESET\n");
tmp = RH_PS_PRSC;
break;
default:
@@ -1671,7 +1666,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
wIndex--;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
- _DBG(0, "USB_PORT_FEAT_SUSPEND\n");
+ DBG(0, "USB_PORT_FEAT_SUSPEND\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS);
isp1362_hcd->rhport[wIndex] =
@@ -1679,7 +1674,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_POWER:
- _DBG(0, "USB_PORT_FEAT_POWER\n");
+ DBG(0, "USB_PORT_FEAT_POWER\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS);
isp1362_hcd->rhport[wIndex] =
@@ -1687,7 +1682,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
spin_unlock_irqrestore(&isp1362_hcd->lock, flags);
break;
case USB_PORT_FEAT_RESET:
- _DBG(0, "USB_PORT_FEAT_RESET\n");
+ DBG(0, "USB_PORT_FEAT_RESET\n");
spin_lock_irqsave(&isp1362_hcd->lock, flags);
t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH);
@@ -1721,7 +1716,7 @@ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
error:
/* "protocol stall" on error */
- _DBG(0, "PROTOCOL STALL\n");
+ DBG(0, "PROTOCOL STALL\n");
retval = -EPIPE;
}
@@ -1913,20 +1908,6 @@ static int isp1362_bus_resume(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILE
-
-static inline void create_debug_file(struct isp1362_hcd *isp1362_hcd)
-{
-}
-static inline void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
-{
-}
-
-#else
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
static void dump_irq(struct seq_file *s, char *label, u16 mask)
{
seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask,
@@ -2069,7 +2050,7 @@ static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd)
isp1362_read_reg16(isp1362_hcd, HCATLDTCTO));
}
-static int proc_isp1362_show(struct seq_file *s, void *unused)
+static int isp1362_show(struct seq_file *s, void *unused)
{
struct isp1362_hcd *isp1362_hcd = s->private;
struct isp1362_ep *ep;
@@ -2173,41 +2154,31 @@ static int proc_isp1362_show(struct seq_file *s, void *unused)
return 0;
}
-static int proc_isp1362_open(struct inode *inode, struct file *file)
+static int isp1362_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_isp1362_show, PDE_DATA(inode));
+ return single_open(file, isp1362_show, inode);
}
-static const struct file_operations proc_ops = {
- .open = proc_isp1362_open,
+static const struct file_operations debug_ops = {
+ .open = isp1362_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* expect just one isp1362_hcd per system */
-static const char proc_filename[] = "driver/isp1362";
-
static void create_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- struct proc_dir_entry *pde;
-
- pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, isp1362_hcd);
- if (pde == NULL) {
- pr_warning("%s: Failed to create debug file '%s'\n", __func__, proc_filename);
- return;
- }
- isp1362_hcd->pde = pde;
+ isp1362_hcd->debug_file = debugfs_create_file("isp1362", S_IRUGO,
+ usb_debug_root,
+ isp1362_hcd, &debug_ops);
}
static void remove_debug_file(struct isp1362_hcd *isp1362_hcd)
{
- if (isp1362_hcd->pde)
- remove_proc_entry(proc_filename, NULL);
+ debugfs_remove(isp1362_hcd->debug_file);
}
-#endif
-
/*-------------------------------------------------------------------------*/
static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd)
@@ -2754,7 +2725,7 @@ static int isp1362_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&isp1362_hcd->periodic);
INIT_LIST_HEAD(&isp1362_hcd->isoc);
INIT_LIST_HEAD(&isp1362_hcd->remove_list);
- isp1362_hcd->board = pdev->dev.platform_data;
+ isp1362_hcd->board = dev_get_platdata(&pdev->dev);
#if USE_PLATFORM_DELAY
if (!isp1362_hcd->board->delay) {
dev_err(hcd->self.controller, "No platform delay function given\n");
diff --git a/drivers/usb/host/isp1362.h b/drivers/usb/host/isp1362.h
index 0f97820e65b..3b0b4847c3a 100644
--- a/drivers/usb/host/isp1362.h
+++ b/drivers/usb/host/isp1362.h
@@ -76,14 +76,14 @@ static inline void delayed_insw(unsigned int addr, void *buf, int len)
#define ISP1362_REG_WRITE_OFFSET 0x80
-#ifdef ISP1362_DEBUG
-typedef const unsigned int isp1362_reg_t;
-
#define REG_WIDTH_16 0x000
#define REG_WIDTH_32 0x100
#define REG_WIDTH_MASK 0x100
#define REG_NO_MASK 0x0ff
+#ifdef ISP1362_DEBUG
+typedef const unsigned int isp1362_reg_t;
+
#define REG_ACCESS_R 0x200
#define REG_ACCESS_W 0x400
#define REG_ACCESS_RW 0x600
@@ -91,9 +91,6 @@ typedef const unsigned int isp1362_reg_t;
#define ISP1362_REG_NO(r) ((r) & REG_NO_MASK)
-#define _BUG_ON(x) BUG_ON(x)
-#define _WARN_ON(x) WARN_ON(x)
-
#define ISP1362_REG(name, addr, width, rw) \
static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
@@ -102,8 +99,6 @@ static isp1362_reg_t ISP1362_REG_##name = ((addr) | (width) | (rw))
#else
typedef const unsigned char isp1362_reg_t;
#define ISP1362_REG_NO(r) (r)
-#define _BUG_ON(x) do {} while (0)
-#define _WARN_ON(x) do {} while (0)
#define ISP1362_REG(name, addr, width, rw) \
static isp1362_reg_t ISP1362_REG_##name = addr
@@ -485,7 +480,7 @@ struct isp1362_hcd {
struct isp1362_platform_data *board;
- struct proc_dir_entry *pde;
+ struct dentry *debug_file;
unsigned long stat1, stat2, stat4, stat8, stat16;
/* HC registers */
@@ -587,21 +582,11 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd
* ISP1362 HW Interface
*/
-#ifdef ISP1362_DEBUG
#define DBG(level, fmt...) \
do { \
if (dbg_level > level) \
pr_debug(fmt); \
} while (0)
-#define _DBG(level, fmt...) \
- do { \
- if (dbg_level > level) \
- printk(fmt); \
- } while (0)
-#else
-#define DBG(fmt...) do {} while (0)
-#define _DBG DBG
-#endif
#ifdef VERBOSE
# define VDBG(fmt...) DBG(3, fmt)
@@ -645,9 +630,7 @@ static inline struct usb_hcd *isp1362_hcd_to_hcd(struct isp1362_hcd *isp1362_hcd
*/
static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t reg)
{
- /*_BUG_ON((reg & ISP1362_REG_WRITE_OFFSET) && !(reg & REG_ACCESS_W));*/
REG_ACCESS_TEST(reg);
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
writew(ISP1362_REG_NO(reg), isp1362_hcd->addr_reg);
DUMMY_DELAY_ACCESS;
@@ -656,7 +639,6 @@ static void isp1362_write_addr(struct isp1362_hcd *isp1362_hcd, isp1362_reg_t re
static void isp1362_write_data16(struct isp1362_hcd *isp1362_hcd, u16 val)
{
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
writew(val, isp1362_hcd->data_reg);
}
@@ -665,7 +647,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
{
u16 val;
- _BUG_ON(!irqs_disabled());
DUMMY_DELAY_ACCESS;
val = readw(isp1362_hcd->data_reg);
@@ -674,7 +655,6 @@ static u16 isp1362_read_data16(struct isp1362_hcd *isp1362_hcd)
static void isp1362_write_data32(struct isp1362_hcd *isp1362_hcd, u32 val)
{
- _BUG_ON(!irqs_disabled());
#if USE_32BIT
DUMMY_DELAY_ACCESS;
writel(val, isp1362_hcd->data_reg);
@@ -690,7 +670,6 @@ static u32 isp1362_read_data32(struct isp1362_hcd *isp1362_hcd)
{
u32 val;
- _BUG_ON(!irqs_disabled());
#if USE_32BIT
DUMMY_DELAY_ACCESS;
val = readl(isp1362_hcd->data_reg);
@@ -713,8 +692,6 @@ static void isp1362_read_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 le
if (!len)
return;
- _BUG_ON(!irqs_disabled());
-
RDBG("%s: Reading %d byte from fifo to mem @ %p\n", __func__, len, buf);
#if USE_32BIT
if (len >= 4) {
@@ -760,8 +737,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
return;
}
- _BUG_ON(!irqs_disabled());
-
RDBG("%s: Writing %d byte to fifo from memory @%p\n", __func__, len, buf);
#if USE_32BIT
if (len >= 4) {
@@ -854,7 +829,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
isp1362_write_reg32(d, r, __v & ~m); \
}
-#ifdef ISP1362_DEBUG
#define isp1362_show_reg(d, r) { \
if ((ISP1362_REG_##r & REG_WIDTH_MASK) == REG_WIDTH_32) \
DBG(0, "%-12s[%02x]: %08x\n", #r, \
@@ -863,9 +837,6 @@ static void isp1362_write_fifo(struct isp1362_hcd *isp1362_hcd, void *buf, u16 l
DBG(0, "%-12s[%02x]: %04x\n", #r, \
ISP1362_REG_NO(ISP1362_REG_##r), isp1362_read_reg16(d, r)); \
}
-#else
-#define isp1362_show_reg(d, r) do {} while (0)
-#endif
static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *isp1362_hcd)
{
@@ -923,10 +894,6 @@ static void __attribute__((__unused__)) isp1362_show_regs(struct isp1362_hcd *is
static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u16 len)
{
- _BUG_ON(offset & 1);
- _BUG_ON(offset >= ISP1362_BUF_SIZE);
- _BUG_ON(len > ISP1362_BUF_SIZE);
- _BUG_ON(offset + len > ISP1362_BUF_SIZE);
len = (len + 1) & ~1;
isp1362_clr_mask16(isp1362_hcd, HCDMACFG, HCDMACFG_CTR_ENABLE);
@@ -936,42 +903,32 @@ static void isp1362_write_diraddr(struct isp1362_hcd *isp1362_hcd, u16 offset, u
static void isp1362_read_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
{
- _BUG_ON(offset & 1);
-
isp1362_write_diraddr(isp1362_hcd, offset, len);
DBG(3, "%s: Reading %d byte from buffer @%04x to memory @ %p\n",
__func__, len, offset, buf);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA);
isp1362_read_fifo(isp1362_hcd, buf, len);
- _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
}
static void isp1362_write_buffer(struct isp1362_hcd *isp1362_hcd, void *buf, u16 offset, int len)
{
- _BUG_ON(offset & 1);
-
isp1362_write_diraddr(isp1362_hcd, offset, len);
DBG(3, "%s: Writing %d byte to buffer @%04x from memory @ %p\n",
__func__, len, offset, buf);
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_addr(isp1362_hcd, ISP1362_REG_HCDIRDATA | ISP1362_REG_WRITE_OFFSET);
isp1362_write_fifo(isp1362_hcd, buf, len);
- _WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_EOT);
- _WARN_ON((isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_EOT));
}
static void __attribute__((unused)) dump_data(char *buf, int len)
@@ -1002,7 +959,7 @@ static void __attribute__((unused)) dump_data(char *buf, int len)
}
}
-#if defined(ISP1362_DEBUG) && defined(PTD_TRACE)
+#if defined(PTD_TRACE)
static void dump_ptd(struct ptd *ptd)
{
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 3df49b169b5..df931e9ba5b 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -351,7 +351,7 @@ static int isp1760_plat_probe(struct platform_device *pdev)
struct resource *mem_res;
struct resource *irq_res;
resource_size_t mem_size;
- struct isp1760_platform_data *priv = pdev->dev.platform_data;
+ struct isp1760_platform_data *priv = dev_get_platdata(&pdev->dev);
unsigned int devflags = 0;
unsigned long irqflags = IRQF_SHARED;
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 9677f683120..caa3764a340 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -31,8 +31,8 @@
#define at91_for_each_port(index) \
for ((index) = 0; (index) < AT91_MAX_USBH_PORTS; (index)++)
-/* interface and function clocks; sometimes also an AHB clock */
-static struct clk *iclk, *fclk, *hclk;
+/* interface, function and usb clocks; sometimes also an AHB clock */
+static struct clk *iclk, *fclk, *uclk, *hclk;
static int clocked;
extern int usb_disabled(void);
@@ -41,6 +41,10 @@ extern int usb_disabled(void);
static void at91_start_clock(void)
{
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ clk_set_rate(uclk, 48000000);
+ clk_prepare_enable(uclk);
+ }
clk_prepare_enable(hclk);
clk_prepare_enable(iclk);
clk_prepare_enable(fclk);
@@ -52,6 +56,8 @@ static void at91_stop_clock(void)
clk_disable_unprepare(fclk);
clk_disable_unprepare(iclk);
clk_disable_unprepare(hclk);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_disable_unprepare(uclk);
clocked = 0;
}
@@ -162,6 +168,14 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
retval = PTR_ERR(hclk);
goto err5;
}
+ if (IS_ENABLED(CONFIG_COMMON_CLK)) {
+ uclk = clk_get(&pdev->dev, "usb_clk");
+ if (IS_ERR(uclk)) {
+ dev_err(&pdev->dev, "failed to get uclk\n");
+ retval = PTR_ERR(uclk);
+ goto err6;
+ }
+ }
at91_start_hc(pdev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -173,6 +187,9 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
/* Error handling */
at91_stop_hc(pdev);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_put(uclk);
+ err6:
clk_put(hclk);
err5:
clk_put(fclk);
@@ -212,12 +229,12 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
+ if (IS_ENABLED(CONFIG_COMMON_CLK))
+ clk_put(uclk);
clk_put(hclk);
clk_put(fclk);
clk_put(iclk);
fclk = iclk = hclk = NULL;
-
- dev_set_drvdata(&pdev->dev, NULL);
}
/*-------------------------------------------------------------------------*/
@@ -225,7 +242,7 @@ static void usb_hcd_at91_remove(struct usb_hcd *hcd,
static int
ohci_at91_reset (struct usb_hcd *hcd)
{
- struct at91_usbh_data *board = hcd->self.controller->platform_data;
+ struct at91_usbh_data *board = dev_get_platdata(hcd->self.controller);
struct ohci_hcd *ohci = hcd_to_ohci (hcd);
int ret;
@@ -280,7 +297,7 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
*/
static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
{
- struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
int length = ohci_hub_status_data(hcd, buf);
int port;
@@ -301,7 +318,7 @@ static int ohci_at91_hub_status_data(struct usb_hcd *hcd, char *buf)
static int ohci_at91_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
- struct at91_usbh_data *pdata = hcd->self.controller->platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(hcd->self.controller);
struct usb_hub_descriptor *desc;
int ret = -EINVAL;
u32 *data = (u32 *)buf;
@@ -461,7 +478,7 @@ static const struct hc_driver ohci_at91_hc_driver = {
static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
{
struct platform_device *pdev = data;
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int val, gpio, port;
/* From the GPIO notifying the over-current situation, find
@@ -567,7 +584,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
if (ret)
return ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata) {
at91_for_each_port(i) {
@@ -643,7 +660,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
{
- struct at91_usbh_data *pdata = pdev->dev.platform_data;
+ struct at91_usbh_data *pdata = dev_get_platdata(&pdev->dev);
int i;
if (pdata) {
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
index 6aaa9c9c8eb..9be59f11e05 100644
--- a/drivers/usb/host/ohci-da8xx.c
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -85,7 +85,7 @@ static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
static int ohci_da8xx_init(struct usb_hcd *hcd)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int result;
u32 rh_a;
@@ -171,7 +171,7 @@ static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
struct device *dev = hcd->self.controller;
- struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(dev);
int temp;
switch (typeReq) {
@@ -292,7 +292,7 @@ static const struct hc_driver ohci_da8xx_hc_driver = {
static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
struct usb_hcd *hcd;
struct resource *mem;
int error, irq;
@@ -380,7 +380,7 @@ err0:
static inline void
usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
{
- struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct da8xx_ohci_root_hub *hub = dev_get_platdata(&pdev->dev);
hub->ocic_notify(NULL);
usb_remove_hcd(hcd);
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index 8704e9fa5a8..84a20d5223b 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -30,83 +30,6 @@
static struct clk *usb_host_clock;
-static void ep93xx_start_hc(struct device *dev)
-{
- clk_enable(usb_host_clock);
-}
-
-static void ep93xx_stop_hc(struct device *dev)
-{
- clk_disable(usb_host_clock);
-}
-
-static int usb_hcd_ep93xx_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
-{
- int retval;
- struct usb_hcd *hcd;
-
- if (pdev->resource[1].flags != IORESOURCE_IRQ) {
- dev_dbg(&pdev->dev, "resource[1] is not IORESOURCE_IRQ\n");
- return -ENOMEM;
- }
-
- hcd = usb_create_hcd(driver, &pdev->dev, "ep93xx");
- if (hcd == NULL)
- return -ENOMEM;
-
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
- if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- usb_put_hcd(hcd);
- retval = -EBUSY;
- goto err1;
- }
-
- hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
- if (hcd->regs == NULL) {
- dev_dbg(&pdev->dev, "ioremap failed\n");
- retval = -ENOMEM;
- goto err2;
- }
-
- usb_host_clock = clk_get(&pdev->dev, NULL);
- if (IS_ERR(usb_host_clock)) {
- dev_dbg(&pdev->dev, "clk_get failed\n");
- retval = PTR_ERR(usb_host_clock);
- goto err3;
- }
-
- ep93xx_start_hc(&pdev->dev);
-
- ohci_hcd_init(hcd_to_ohci(hcd));
-
- retval = usb_add_hcd(hcd, pdev->resource[1].start, 0);
- if (retval == 0)
- return retval;
-
- ep93xx_stop_hc(&pdev->dev);
-err3:
- iounmap(hcd->regs);
-err2:
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
-err1:
- usb_put_hcd(hcd);
-
- return retval;
-}
-
-static void usb_hcd_ep93xx_remove(struct usb_hcd *hcd,
- struct platform_device *pdev)
-{
- usb_remove_hcd(hcd);
- ep93xx_stop_hc(&pdev->dev);
- clk_put(usb_host_clock);
- iounmap(hcd->regs);
- release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
- usb_put_hcd(hcd);
-}
-
static int ohci_ep93xx_start(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
@@ -147,15 +70,57 @@ static struct hc_driver ohci_ep93xx_hc_driver = {
.start_port_reset = ohci_start_port_reset,
};
-extern int usb_disabled(void);
-
static int ohci_hcd_ep93xx_drv_probe(struct platform_device *pdev)
{
+ struct usb_hcd *hcd;
+ struct resource *res;
+ int irq;
int ret;
- ret = -ENODEV;
- if (!usb_disabled())
- ret = usb_hcd_ep93xx_probe(&ohci_ep93xx_hc_driver, pdev);
+ if (usb_disabled())
+ return -ENODEV;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&ohci_ep93xx_hc_driver, &pdev->dev, "ep93xx");
+ if (!hcd)
+ return -ENOMEM;
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+
+ hcd->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hcd->regs)) {
+ ret = PTR_ERR(hcd->regs);
+ goto err_put_hcd;
+ }
+
+ usb_host_clock = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usb_host_clock)) {
+ ret = PTR_ERR(usb_host_clock);
+ goto err_put_hcd;
+ }
+
+ clk_enable(usb_host_clock);
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ ret = usb_add_hcd(hcd, irq, 0);
+ if (ret)
+ goto err_clk_disable;
+
+ return 0;
+
+err_clk_disable:
+ clk_disable(usb_host_clock);
+err_put_hcd:
+ usb_put_hcd(hcd);
return ret;
}
@@ -164,7 +129,9 @@ static int ohci_hcd_ep93xx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- usb_hcd_ep93xx_remove(hcd, pdev);
+ usb_remove_hcd(hcd);
+ clk_disable(usb_host_clock);
+ usb_put_hcd(hcd);
return 0;
}
@@ -179,7 +146,7 @@ static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_
msleep(5);
ohci->next_statechange = jiffies;
- ep93xx_stop_hc(&pdev->dev);
+ clk_disable(usb_host_clock);
return 0;
}
@@ -192,7 +159,7 @@ static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
msleep(5);
ohci->next_statechange = jiffies;
- ep93xx_start_hc(&pdev->dev);
+ clk_enable(usb_host_clock);
ohci_resume(hcd, false);
return 0;
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index b0b542c14e3..dc6ee9adacf 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -100,7 +100,7 @@ static const struct hc_driver exynos_ohci_hc_driver = {
static int exynos_ohci_probe(struct platform_device *pdev)
{
- struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ struct exynos4_ohci_platdata *pdata = dev_get_platdata(&pdev->dev);
struct exynos_ohci_hcd *exynos_ohci;
struct usb_hcd *hcd;
struct ohci_hcd *ohci;
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index a9d3437da22..8f6b695af6a 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -938,8 +938,8 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_nec(ohci))
flush_work(&ohci->nec_work);
- ohci_usb_reset (ohci);
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
+ ohci_usb_reset(ohci);
free_irq(hcd->irq, hcd);
hcd->irq = 0;
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index 8747fa6a51b..31d3a12eb48 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -191,7 +191,7 @@ static void start_hnp(struct ohci_hcd *ohci)
static int ohci_omap_init(struct usb_hcd *hcd)
{
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
- struct omap_usb_config *config = hcd->self.controller->platform_data;
+ struct omap_usb_config *config = dev_get_platdata(hcd->self.controller);
int need_transceiver = (config->otg != 0);
int ret;
@@ -427,7 +427,7 @@ ohci_omap_start (struct usb_hcd *hcd)
if (!host_enabled)
return 0;
- config = hcd->self.controller->platform_data;
+ config = dev_get_platdata(hcd->self.controller);
if (config->otg || config->rwc) {
ohci->hc_control = OHCI_CTRL_RWC;
writel(OHCI_CTRL_RWC, &ohci->regs->control);
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 8f713571a0b..a09af26f69e 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -231,14 +231,6 @@ static int ohci_hcd_omap3_remove(struct platform_device *pdev)
return 0;
}
-static void ohci_hcd_omap3_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
static const struct of_device_id omap_ohci_dt_ids[] = {
{ .compatible = "ti,ohci-omap3" },
{ }
@@ -249,7 +241,7 @@ MODULE_DEVICE_TABLE(of, omap_ohci_dt_ids);
static struct platform_driver ohci_hcd_omap3_driver = {
.probe = ohci_hcd_omap3_probe,
.remove = ohci_hcd_omap3_remove,
- .shutdown = ohci_hcd_omap3_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ohci-omap3",
.of_match_table = omap_ohci_dt_ids,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 08613e24189..ec337c2bd5e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver = {
.remove = usb_hcd_pci_remove,
.shutdown = usb_hcd_pci_shutdown,
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
.driver = {
.pm = &usb_hcd_pci_pm_ops
},
@@ -304,6 +304,13 @@ static int __init ohci_pci_init(void)
pr_info("%s: " DRIVER_DESC "\n", hcd_name);
ohci_init_driver(&ohci_pci_hc_driver, &pci_overrides);
+
+#ifdef CONFIG_PM
+ /* Entries for the PCI suspend/resume callbacks are special */
+ ohci_pci_hc_driver.pci_suspend = ohci_suspend;
+ ohci_pci_hc_driver.pci_resume = ohci_resume;
+#endif
+
return pci_register_driver(&ohci_pci_driver);
}
module_init(ohci_pci_init);
diff --git a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c
index bc30475c3a2..a4c6410f0ed 100644
--- a/drivers/usb/host/ohci-platform.c
+++ b/drivers/usb/host/ohci-platform.c
@@ -33,7 +33,7 @@ static const char hcd_name[] = "ohci-platform";
static int ohci_platform_reset(struct usb_hcd *hcd)
{
struct platform_device *pdev = to_platform_device(hcd->self.controller);
- struct usb_ohci_pdata *pdata = pdev->dev.platform_data;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&pdev->dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
if (pdata->big_endian_desc)
@@ -59,7 +59,7 @@ static int ohci_platform_probe(struct platform_device *dev)
{
struct usb_hcd *hcd;
struct resource *res_mem;
- struct usb_ohci_pdata *pdata = dev->dev.platform_data;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
int irq;
int err = -ENOMEM;
@@ -124,7 +124,7 @@ err_power:
static int ohci_platform_remove(struct platform_device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(dev);
- struct usb_ohci_pdata *pdata = dev->dev.platform_data;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(&dev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
@@ -139,7 +139,7 @@ static int ohci_platform_remove(struct platform_device *dev)
static int ohci_platform_suspend(struct device *dev)
{
- struct usb_ohci_pdata *pdata = dev->platform_data;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
@@ -152,7 +152,7 @@ static int ohci_platform_suspend(struct device *dev)
static int ohci_platform_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
- struct usb_ohci_pdata *pdata = dev->platform_data;
+ struct usb_ohci_pdata *pdata = dev_get_platdata(dev);
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 8294e2fcc2f..75f5a1e2f01 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -200,15 +200,6 @@ static int ohci_hcd_ppc_of_remove(struct platform_device *op)
return 0;
}
-static void ohci_hcd_ppc_of_shutdown(struct platform_device *op)
-{
- struct usb_hcd *hcd = platform_get_drvdata(op);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-
static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
@@ -243,7 +234,7 @@ MODULE_DEVICE_TABLE(of, ohci_hcd_ppc_of_match);
static struct platform_driver ohci_hcd_ppc_of_driver = {
.probe = ohci_hcd_ppc_of_probe,
.remove = ohci_hcd_ppc_of_remove,
- .shutdown = ohci_hcd_ppc_of_shutdown,
+ .shutdown = usb_hcd_platform_shutdown,
.driver = {
.name = "ppc-of-ohci",
.owner = THIS_MODULE,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 3a9c01d8b79..93371a235e8 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -219,7 +219,7 @@ static int pxa27x_start_hc(struct pxa27x_ohci *ohci, struct device *dev)
struct pxaohci_platform_data *inf;
uint32_t uhchr;
- inf = dev->platform_data;
+ inf = dev_get_platdata(dev);
clk_prepare_enable(ohci->clk);
@@ -256,7 +256,7 @@ static void pxa27x_stop_hc(struct pxa27x_ohci *ohci, struct device *dev)
struct pxaohci_platform_data *inf;
uint32_t uhccoms;
- inf = dev->platform_data;
+ inf = dev_get_platdata(dev);
if (cpu_is_pxa3xx())
pxa3xx_u2d_stop_hc(&ohci_to_hcd(&ohci->ohci)->self);
@@ -364,7 +364,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
if (retval)
return retval;
- inf = pdev->dev.platform_data;
+ inf = dev_get_platdata(&pdev->dev);
if (!inf)
return -ENODEV;
@@ -577,7 +577,7 @@ static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
- struct pxaohci_platform_data *inf = dev->platform_data;
+ struct pxaohci_platform_data *inf = dev_get_platdata(dev);
int status;
if (time_before(jiffies, ohci->ohci.next_statechange))
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index e125770b893..4919afa4125 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -38,12 +38,12 @@ static void s3c2410_hcd_oc(struct s3c2410_hcd_info *info, int port_oc);
static struct s3c2410_hcd_info *to_s3c2410_info(struct usb_hcd *hcd)
{
- return hcd->self.controller->platform_data;
+ return dev_get_platdata(hcd->self.controller);
}
static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
{
- struct s3c2410_hcd_info *info = dev->dev.platform_data;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_start_hc:\n");
@@ -63,7 +63,7 @@ static void s3c2410_start_hc(struct platform_device *dev, struct usb_hcd *hcd)
static void s3c2410_stop_hc(struct platform_device *dev)
{
- struct s3c2410_hcd_info *info = dev->dev.platform_data;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
dev_dbg(&dev->dev, "s3c2410_stop_hc:\n");
@@ -339,10 +339,11 @@ static int usb_hcd_s3c2410_probe(const struct hc_driver *driver,
struct platform_device *dev)
{
struct usb_hcd *hcd = NULL;
+ struct s3c2410_hcd_info *info = dev_get_platdata(&dev->dev);
int retval;
- s3c2410_usb_set_power(dev->dev.platform_data, 1, 1);
- s3c2410_usb_set_power(dev->dev.platform_data, 2, 1);
+ s3c2410_usb_set_power(info, 1, 1);
+ s3c2410_usb_set_power(info, 2, 1);
hcd = usb_create_hcd(driver, &dev->dev, "s3c24xx");
if (hcd == NULL)
diff --git a/drivers/usb/host/ohci-tilegx.c b/drivers/usb/host/ohci-tilegx.c
index 197d514fe0d..22540ab71f5 100644
--- a/drivers/usb/host/ohci-tilegx.c
+++ b/drivers/usb/host/ohci-tilegx.c
@@ -95,7 +95,7 @@ static const struct hc_driver ohci_tilegx_hc_driver = {
static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
- struct tilegx_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
pte_t pte = { 0 };
int my_cpu = smp_processor_id();
int ret;
@@ -175,7 +175,7 @@ err_hcd:
static int ohci_hcd_tilegx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tilegx_usb_platform_data* pdata = pdev->dev.platform_data;
+ struct tilegx_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index b9848e4d3d4..2c76ef1320e 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -735,32 +735,6 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done,
return -ETIMEDOUT;
}
-#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31
-#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31
-
-bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev)
-{
- return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
- pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI;
-}
-
-/* The Intel Lynx Point chipset also has switchable ports. */
-bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev)
-{
- return pdev->class == PCI_CLASS_SERIAL_USB_XHCI &&
- pdev->vendor == PCI_VENDOR_ID_INTEL &&
- (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI);
-}
-
-bool usb_is_intel_switchable_xhci(struct pci_dev *pdev)
-{
- return usb_is_intel_ppt_switchable_xhci(pdev) ||
- usb_is_intel_lpt_switchable_xhci(pdev);
-}
-EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
-
/*
* Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that
* share some number of ports. These ports can be switched between either
@@ -779,9 +753,23 @@ EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci);
* terminations before switching the USB 2.0 wires over, so that USB 3.0
* devices connect at SuperSpeed, rather than at USB 2.0 speeds.
*/
-void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev)
{
u32 ports_available;
+ bool ehci_found = false;
+ struct pci_dev *companion = NULL;
+
+ /* make sure an intel EHCI controller exists */
+ for_each_pci_dev(companion) {
+ if (companion->class == PCI_CLASS_SERIAL_USB_EHCI &&
+ companion->vendor == PCI_VENDOR_ID_INTEL) {
+ ehci_found = true;
+ break;
+ }
+ }
+
+ if (!ehci_found)
+ return;
/* Don't switchover the ports if the user hasn't compiled the xHCI
* driver. Otherwise they will see "dead" USB ports that don't power
@@ -840,7 +828,7 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over "
"to xHCI: 0x%x\n", ports_available);
}
-EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
{
@@ -921,8 +909,8 @@ static void quirk_usb_handoff_xhci(struct pci_dev *pdev)
writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
hc_init:
- if (usb_is_intel_switchable_xhci(pdev))
- usb_enable_xhci_ports(pdev);
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
op_reg_base = base + XHCI_HC_LENGTH(readl(base));
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index 4b8a2092432..ed6700d00fe 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -8,11 +8,11 @@ int usb_amd_find_chipset_info(void);
void usb_amd_dev_put(void);
void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
-bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
-void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
#else
+struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
static inline void usb_amd_dev_put(void) {}
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index a6fd8f5371d..2ad004ae747 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -2393,7 +2393,7 @@ static const struct dev_pm_ops r8a66597_dev_pm_ops = {
static int r8a66597_remove(struct platform_device *pdev)
{
- struct r8a66597 *r8a66597 = dev_get_drvdata(&pdev->dev);
+ struct r8a66597 *r8a66597 = platform_get_drvdata(pdev);
struct usb_hcd *hcd = r8a66597_to_hcd(r8a66597);
del_timer_sync(&r8a66597->rh_timer);
@@ -2466,8 +2466,8 @@ static int r8a66597_probe(struct platform_device *pdev)
}
r8a66597 = hcd_to_r8a66597(hcd);
memset(r8a66597, 0, sizeof(struct r8a66597));
- dev_set_drvdata(&pdev->dev, r8a66597);
- r8a66597->pdata = pdev->dev.platform_data;
+ platform_set_drvdata(pdev, r8a66597);
+ r8a66597->pdata = dev_get_platdata(&pdev->dev);
r8a66597->irq_sense_low = irq_trigger == IRQF_TRIGGER_LOW;
if (r8a66597->pdata->on_chip) {
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index b2ec7fe758d..5477bf5df21 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -48,6 +48,8 @@
#include <linux/usb/hcd.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
#include <asm/io.h>
#include <asm/irq.h>
@@ -63,11 +65,6 @@ MODULE_ALIAS("platform:sl811-hcd");
#define DRIVER_VERSION "19 May 2005"
-
-#ifndef DEBUG
-# define STUB_DEBUG_FILE
-#endif
-
/* for now, use only one transfer register bank */
#undef USE_B
@@ -100,7 +97,8 @@ static void port_power(struct sl811 *sl811, int is_on)
if (sl811->board && sl811->board->port_power) {
/* switch VBUS, at 500mA unless hub power budget gets set */
- DBG("power %s\n", is_on ? "on" : "off");
+ dev_dbg(hcd->self.controller, "power %s\n",
+ is_on ? "on" : "off");
sl811->board->port_power(hcd->self.controller, is_on);
}
@@ -282,7 +280,7 @@ static inline void sofirq_on(struct sl811 *sl811)
{
if (sl811->irq_enable & SL11H_INTMASK_SOFINTR)
return;
- VDBG("sof irq on\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq on\n");
sl811->irq_enable |= SL11H_INTMASK_SOFINTR;
}
@@ -290,7 +288,7 @@ static inline void sofirq_off(struct sl811 *sl811)
{
if (!(sl811->irq_enable & SL11H_INTMASK_SOFINTR))
return;
- VDBG("sof irq off\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "sof irq off\n");
sl811->irq_enable &= ~SL11H_INTMASK_SOFINTR;
}
@@ -338,7 +336,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
}
if (unlikely(list_empty(&ep->hep->urb_list))) {
- DBG("empty %p queue?\n", ep);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "empty %p queue?\n", ep);
return NULL;
}
@@ -391,7 +390,8 @@ static struct sl811h_ep *start(struct sl811 *sl811, u8 bank)
status_packet(sl811, ep, urb, bank, control);
break;
default:
- DBG("bad ep%p pid %02x\n", ep, ep->nextpid);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "bad ep%p pid %02x\n", ep, ep->nextpid);
ep = NULL;
}
return ep;
@@ -447,7 +447,8 @@ static void finish_request(
}
/* periodic deschedule */
- DBG("deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "deschedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep *temp;
struct sl811h_ep **prev = &sl811->periodic[i];
@@ -593,7 +594,8 @@ static inline u8 checkdone(struct sl811 *sl811)
ctl = sl811_read(sl811, SL811_EP_A(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_A(SL11H_HOSTCTLREG), 0);
- DBG("%s DONE_A: ctrl %02x sts %02x\n",
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "%s DONE_A: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_A(SL11H_PKTSTATREG)));
@@ -604,7 +606,8 @@ static inline u8 checkdone(struct sl811 *sl811)
ctl = sl811_read(sl811, SL811_EP_B(SL11H_HOSTCTLREG));
if (ctl & SL11H_HCTLMASK_ARM)
sl811_write(sl811, SL811_EP_B(SL11H_HOSTCTLREG), 0);
- DBG("%s DONE_B: ctrl %02x sts %02x\n",
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "%s DONE_B: ctrl %02x sts %02x\n",
(ctl & SL11H_HCTLMASK_ARM) ? "timeout" : "lost",
ctl,
sl811_read(sl811, SL811_EP_B(SL11H_PKTSTATREG)));
@@ -665,7 +668,7 @@ retry:
* this one has nothing scheduled.
*/
if (sl811->next_periodic) {
- // ERR("overrun to slot %d\n", index);
+ // dev_err(hcd->self.controller, "overrun to slot %d\n", index);
sl811->stat_overrun++;
}
if (sl811->periodic[index])
@@ -723,7 +726,7 @@ retry:
} else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & USB_PORT_STAT_SUSPEND) {
- DBG("wakeup\n");
+ dev_dbg(hcd->self.controller, "wakeup\n");
sl811->port1 |= USB_PORT_STAT_C_SUSPEND << 16;
sl811->stat_wake++;
} else
@@ -852,8 +855,9 @@ static int sl811h_urb_enqueue(
if (ep->maxpacket > H_MAXPACKET) {
/* iso packets up to 240 bytes could work... */
- DBG("dev %d ep%d maxpacket %d\n",
- udev->devnum, epnum, ep->maxpacket);
+ dev_dbg(hcd->self.controller,
+ "dev %d ep%d maxpacket %d\n", udev->devnum,
+ epnum, ep->maxpacket);
retval = -EINVAL;
kfree(ep);
goto fail;
@@ -917,7 +921,8 @@ static int sl811h_urb_enqueue(
* to share the faster parts of the tree without needing
* dummy/placeholder nodes
*/
- DBG("schedule qh%d/%p branch %d\n", ep->period, ep, ep->branch);
+ dev_dbg(hcd->self.controller, "schedule qh%d/%p branch %d\n",
+ ep->period, ep, ep->branch);
for (i = ep->branch; i < PERIODIC_SIZE; i += ep->period) {
struct sl811h_ep **prev = &sl811->periodic[i];
struct sl811h_ep *here = *prev;
@@ -976,7 +981,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else if (sl811->active_a == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
- DBG("giveup on DONE_A: ctrl %02x sts %02x\n",
+ dev_dbg(hcd->self.controller,
+ "giveup on DONE_A: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_A(SL11H_HOSTCTLREG)),
sl811_read(sl811,
@@ -990,7 +996,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
} else if (sl811->active_b == ep) {
if (time_before_eq(sl811->jiffies_a, jiffies)) {
/* happens a lot with lowspeed?? */
- DBG("giveup on DONE_B: ctrl %02x sts %02x\n",
+ dev_dbg(hcd->self.controller,
+ "giveup on DONE_B: ctrl %02x sts %02x\n",
sl811_read(sl811,
SL811_EP_B(SL11H_HOSTCTLREG)),
sl811_read(sl811,
@@ -1008,7 +1015,8 @@ static int sl811h_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
if (urb)
finish_request(sl811, ep, urb, 0);
else
- VDBG("dequeue, urb %p active %s; wait4irq\n", urb,
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "dequeue, urb %p active %s; wait4irq\n", urb,
(sl811->active_a == ep) ? "A" : "B");
} else
retval = -EINVAL;
@@ -1029,7 +1037,7 @@ sl811h_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep)
if (!list_empty(&hep->urb_list))
msleep(3);
if (!list_empty(&hep->urb_list))
- WARNING("ep %p not empty?\n", ep);
+ dev_warn(hcd->self.controller, "ep %p not empty?\n", ep);
kfree(ep);
hep->hcpriv = NULL;
@@ -1132,7 +1140,7 @@ sl811h_timer(unsigned long _sl811)
switch (signaling) {
case SL11H_CTL1MASK_SE0:
- DBG("end reset\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "end reset\n");
sl811->port1 = (USB_PORT_STAT_C_RESET << 16)
| USB_PORT_STAT_POWER;
sl811->ctrl1 = 0;
@@ -1141,11 +1149,12 @@ sl811h_timer(unsigned long _sl811)
irqstat &= ~SL11H_INTMASK_RD;
break;
case SL11H_CTL1MASK_K:
- DBG("end resume\n");
+ dev_dbg(sl811_to_hcd(sl811)->self.controller, "end resume\n");
sl811->port1 &= ~USB_PORT_STAT_SUSPEND;
break;
default:
- DBG("odd timer signaling: %02x\n", signaling);
+ dev_dbg(sl811_to_hcd(sl811)->self.controller,
+ "odd timer signaling: %02x\n", signaling);
break;
}
sl811_write(sl811, SL11H_IRQ_STATUS, irqstat);
@@ -1243,7 +1252,7 @@ sl811h_hub_control(
break;
/* 20 msec of resume/K signaling, other irqs blocked */
- DBG("start resume...\n");
+ dev_dbg(hcd->self.controller, "start resume...\n");
sl811->irq_enable = 0;
sl811_write(sl811, SL11H_IRQ_ENABLE,
sl811->irq_enable);
@@ -1281,7 +1290,8 @@ sl811h_hub_control(
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
#endif
- DBG("GetPortStatus %08x\n", sl811->port1);
+ dev_dbg(hcd->self.controller, "GetPortStatus %08x\n",
+ sl811->port1);
break;
case SetPortFeature:
if (wIndex != 1 || wLength != 0)
@@ -1293,7 +1303,7 @@ sl811h_hub_control(
if (!(sl811->port1 & USB_PORT_STAT_ENABLE))
goto error;
- DBG("suspend...\n");
+ dev_dbg(hcd->self.controller,"suspend...\n");
sl811->ctrl1 &= ~SL11H_CTL1MASK_SOF_ENA;
sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
break;
@@ -1338,7 +1348,7 @@ static int
sl811h_bus_suspend(struct usb_hcd *hcd)
{
// SOFs off
- DBG("%s\n", __func__);
+ dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
@@ -1346,7 +1356,7 @@ static int
sl811h_bus_resume(struct usb_hcd *hcd)
{
// SOFs on
- DBG("%s\n", __func__);
+ dev_dbg(hcd->self.controller, "%s\n", __func__);
return 0;
}
@@ -1360,16 +1370,6 @@ sl811h_bus_resume(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef STUB_DEBUG_FILE
-
-static inline void create_debug_file(struct sl811 *sl811) { }
-static inline void remove_debug_file(struct sl811 *sl811) { }
-
-#else
-
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-
static void dump_irq(struct seq_file *s, char *label, u8 mask)
{
seq_printf(s, "%s %02x%s%s%s%s%s%s\n", label, mask,
@@ -1381,7 +1381,7 @@ static void dump_irq(struct seq_file *s, char *label, u8 mask)
(mask & SL11H_INTMASK_DP) ? " dp" : "");
}
-static int proc_sl811h_show(struct seq_file *s, void *unused)
+static int sl811h_show(struct seq_file *s, void *unused)
{
struct sl811 *sl811 = s->private;
struct sl811h_ep *ep;
@@ -1492,34 +1492,31 @@ static int proc_sl811h_show(struct seq_file *s, void *unused)
return 0;
}
-static int proc_sl811h_open(struct inode *inode, struct file *file)
+static int sl811h_open(struct inode *inode, struct file *file)
{
- return single_open(file, proc_sl811h_show, PDE_DATA(inode));
+ return single_open(file, sl811h_show, inode->i_private);
}
-static const struct file_operations proc_ops = {
- .open = proc_sl811h_open,
+static const struct file_operations debug_ops = {
+ .open = sl811h_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/* expect just one sl811 per system */
-static const char proc_filename[] = "driver/sl811h";
-
static void create_debug_file(struct sl811 *sl811)
{
- sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811);
+ sl811->debug_file = debugfs_create_file("sl811h", S_IRUGO,
+ usb_debug_root, sl811,
+ &debug_ops);
}
static void remove_debug_file(struct sl811 *sl811)
{
- if (sl811->pde)
- remove_proc_entry(proc_filename, NULL);
+ debugfs_remove(sl811->debug_file);
}
-#endif
-
/*-------------------------------------------------------------------------*/
static void
@@ -1648,7 +1645,7 @@ sl811h_probe(struct platform_device *dev)
/* refuse to confuse usbcore */
if (dev->dev.dma_mask) {
- DBG("no we won't dma\n");
+ dev_dbg(&dev->dev, "no we won't dma\n");
return -EINVAL;
}
@@ -1694,7 +1691,7 @@ sl811h_probe(struct platform_device *dev)
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
- sl811->board = dev->dev.platform_data;
+ sl811->board = dev_get_platdata(&dev->dev);
init_timer(&sl811->timer);
sl811->timer.function = sl811h_timer;
sl811->timer.data = (unsigned long) sl811;
@@ -1716,7 +1713,7 @@ sl811h_probe(struct platform_device *dev)
break;
default:
/* reject case 0, SL11S is less functional */
- DBG("chiprev %02x\n", tmp);
+ dev_dbg(&dev->dev, "chiprev %02x\n", tmp);
retval = -ENXIO;
goto err6;
}
@@ -1747,7 +1744,7 @@ sl811h_probe(struct platform_device *dev)
if (!ioaddr)
iounmap(addr_reg);
err2:
- DBG("init error, %d\n", retval);
+ dev_dbg(&dev->dev, "init error, %d\n", retval);
return retval;
}
diff --git a/drivers/usb/host/sl811.h b/drivers/usb/host/sl811.h
index b6b8c1f233d..1e23ef49bec 100644
--- a/drivers/usb/host/sl811.h
+++ b/drivers/usb/host/sl811.h
@@ -122,7 +122,7 @@ struct sl811 {
void __iomem *addr_reg;
void __iomem *data_reg;
struct sl811_platform_data *board;
- struct proc_dir_entry *pde;
+ struct dentry *debug_file;
unsigned long stat_insrmv;
unsigned long stat_wake;
@@ -242,25 +242,8 @@ sl811_read_buf(struct sl811 *sl811, int addr, void *buf, size_t count)
/*-------------------------------------------------------------------------*/
-#ifdef DEBUG
-#define DBG(stuff...) printk(KERN_DEBUG "sl811: " stuff)
-#else
-#define DBG(stuff...) do{}while(0)
-#endif
-
-#ifdef VERBOSE
-# define VDBG DBG
-#else
-# define VDBG(stuff...) do{}while(0)
-#endif
-
#ifdef PACKET_TRACE
-# define PACKET VDBG
+# define PACKET pr_debug("sl811: "stuff)
#else
# define PACKET(stuff...) do{}while(0)
#endif
-
-#define ERR(stuff...) printk(KERN_ERR "sl811: " stuff)
-#define WARNING(stuff...) printk(KERN_WARNING "sl811: " stuff)
-#define INFO(stuff...) printk(KERN_INFO "sl811: " stuff)
-
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 5c124bf5d01..e402beb5a06 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -1809,9 +1809,9 @@ static int u132_hcd_start(struct usb_hcd *hcd)
struct platform_device *pdev =
to_platform_device(hcd->self.controller);
u16 vendor = ((struct u132_platform_data *)
- (pdev->dev.platform_data))->vendor;
+ dev_get_platdata(&pdev->dev))->vendor;
u16 device = ((struct u132_platform_data *)
- (pdev->dev.platform_data))->device;
+ dev_get_platdata(&pdev->dev))->device;
mutex_lock(&u132->sw_lock);
msleep(10);
if (vendor == PCI_VENDOR_ID_AMD && device == 0x740c) {
@@ -3034,7 +3034,7 @@ static void u132_initialise(struct u132 *u132, struct platform_device *pdev)
int addrs = MAX_U132_ADDRS;
int udevs = MAX_U132_UDEVS;
int endps = MAX_U132_ENDPS;
- u132->board = pdev->dev.platform_data;
+ u132->board = dev_get_platdata(&pdev->dev);
u132->platform_dev = pdev;
u132->power = 0;
u132->reset = 0;
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 5d5e58fdecc..73503a81ee8 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -580,3 +580,17 @@ void xhci_dbg_ctx(struct xhci_hcd *xhci,
xhci_dbg_slot_ctx(xhci, ctx);
xhci_dbg_ep_ctx(xhci, ctx, last_ep);
}
+
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ xhci_dbg(xhci, "%pV\n", &vaf);
+ trace(&vaf);
+ va_end(args);
+}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index 8d7a1324e2f..9fe3225e6c6 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -71,7 +71,7 @@
/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
#define XHCI_HLC (1 << 19)
-#define XHCI_BLC (1 << 19)
+#define XHCI_BLC (1 << 20)
/* command register values to disable interrupts and halt the HC */
/* start/stop HC execution - do not write unless HC is halted*/
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 1d3545943c5..fae697ed0b7 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -24,6 +24,7 @@
#include <asm/unaligned.h>
#include "xhci.h"
+#include "xhci-trace.h"
#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
#define PORT_RWC_BITS (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
@@ -461,8 +462,15 @@ void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
}
}
+/* Updates Link Status for USB 2.1 port */
+static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg)
+{
+ if ((status_reg & PORT_PLS_MASK) == XDEV_U2)
+ *status |= USB_PORT_STAT_L1;
+}
+
/* Updates Link Status for super Speed port */
-static void xhci_hub_report_link_state(u32 *status, u32 status_reg)
+static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg)
{
u32 pls = status_reg & PORT_PLS_MASK;
@@ -528,12 +536,128 @@ void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex)
xhci->port_status_u0 |= 1 << wIndex;
if (xhci->port_status_u0 == all_ports_seen_u0) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
- xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n");
- xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "All USB3 ports have entered U0 already!");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance Mode Recovery Timer Deleted.");
}
}
}
+/*
+ * Converts a raw xHCI port status into the format that external USB 2.0 or USB
+ * 3.0 hubs use.
+ *
+ * Possible side effects:
+ * - Mark a port as being done with device resume,
+ * and ring the endpoint doorbells.
+ * - Stop the Synopsys redriver Compliance Mode polling.
+ */
+static u32 xhci_get_port_status(struct usb_hcd *hcd,
+ struct xhci_bus_state *bus_state,
+ __le32 __iomem **port_array,
+ u16 wIndex, u32 raw_port_status)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ u32 status = 0;
+ int slot_id;
+
+ /* wPortChange bits */
+ if (raw_port_status & PORT_CSC)
+ status |= USB_PORT_STAT_C_CONNECTION << 16;
+ if (raw_port_status & PORT_PEC)
+ status |= USB_PORT_STAT_C_ENABLE << 16;
+ if ((raw_port_status & PORT_OCC))
+ status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+ if ((raw_port_status & PORT_RC))
+ status |= USB_PORT_STAT_C_RESET << 16;
+ /* USB3.0 only */
+ if (hcd->speed == HCD_USB3) {
+ if ((raw_port_status & PORT_PLC))
+ status |= USB_PORT_STAT_C_LINK_STATE << 16;
+ if ((raw_port_status & PORT_WRC))
+ status |= USB_PORT_STAT_C_BH_RESET << 16;
+ }
+
+ if (hcd->speed != HCD_USB3) {
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_U3
+ && (raw_port_status & PORT_POWER))
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_RESUME &&
+ !DEV_SUPERSPEED(raw_port_status)) {
+ if ((raw_port_status & PORT_RESET) ||
+ !(raw_port_status & PORT_PE))
+ return 0xffffffff;
+ if (time_after_eq(jiffies,
+ bus_state->resume_done[wIndex])) {
+ xhci_dbg(xhci, "Resume USB2 port %d\n",
+ wIndex + 1);
+ bus_state->resume_done[wIndex] = 0;
+ clear_bit(wIndex, &bus_state->resuming_ports);
+ xhci_set_link_state(xhci, port_array, wIndex,
+ XDEV_U0);
+ xhci_dbg(xhci, "set port %d resume\n",
+ wIndex + 1);
+ slot_id = xhci_find_slot_id_by_port(hcd, xhci,
+ wIndex + 1);
+ if (!slot_id) {
+ xhci_dbg(xhci, "slot_id is zero\n");
+ return 0xffffffff;
+ }
+ xhci_ring_device(xhci, slot_id);
+ bus_state->port_c_suspend |= 1 << wIndex;
+ bus_state->suspended_ports &= ~(1 << wIndex);
+ } else {
+ /*
+ * The resume has been signaling for less than
+ * 20ms. Report the port status as SUSPEND,
+ * let the usbcore check port status again
+ * and clear resume signaling later.
+ */
+ status |= USB_PORT_STAT_SUSPEND;
+ }
+ }
+ if ((raw_port_status & PORT_PLS_MASK) == XDEV_U0
+ && (raw_port_status & PORT_POWER)
+ && (bus_state->suspended_ports & (1 << wIndex))) {
+ bus_state->suspended_ports &= ~(1 << wIndex);
+ if (hcd->speed != HCD_USB3)
+ bus_state->port_c_suspend |= 1 << wIndex;
+ }
+ if (raw_port_status & PORT_CONNECT) {
+ status |= USB_PORT_STAT_CONNECTION;
+ status |= xhci_port_speed(raw_port_status);
+ }
+ if (raw_port_status & PORT_PE)
+ status |= USB_PORT_STAT_ENABLE;
+ if (raw_port_status & PORT_OC)
+ status |= USB_PORT_STAT_OVERCURRENT;
+ if (raw_port_status & PORT_RESET)
+ status |= USB_PORT_STAT_RESET;
+ if (raw_port_status & PORT_POWER) {
+ if (hcd->speed == HCD_USB3)
+ status |= USB_SS_PORT_STAT_POWER;
+ else
+ status |= USB_PORT_STAT_POWER;
+ }
+ /* Update Port Link State */
+ if (hcd->speed == HCD_USB3) {
+ xhci_hub_report_usb3_link_state(&status, raw_port_status);
+ /*
+ * Verify if all USB3 Ports Have entered U0 already.
+ * Delete Compliance Mode Timer if so.
+ */
+ xhci_del_comp_mod_timer(xhci, raw_port_status, wIndex);
+ } else {
+ xhci_hub_report_usb2_link_state(&status, raw_port_status);
+ }
+ if (bus_state->port_c_suspend & (1 << wIndex))
+ status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+
+ return status;
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -598,104 +722,20 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
if (!wIndex || wIndex > max_ports)
goto error;
wIndex--;
- status = 0;
temp = xhci_readl(xhci, port_array[wIndex]);
if (temp == 0xffffffff) {
retval = -ENODEV;
break;
}
- xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n", wIndex, temp);
-
- /* wPortChange bits */
- if (temp & PORT_CSC)
- status |= USB_PORT_STAT_C_CONNECTION << 16;
- if (temp & PORT_PEC)
- status |= USB_PORT_STAT_C_ENABLE << 16;
- if ((temp & PORT_OCC))
- status |= USB_PORT_STAT_C_OVERCURRENT << 16;
- if ((temp & PORT_RC))
- status |= USB_PORT_STAT_C_RESET << 16;
- /* USB3.0 only */
- if (hcd->speed == HCD_USB3) {
- if ((temp & PORT_PLC))
- status |= USB_PORT_STAT_C_LINK_STATE << 16;
- if ((temp & PORT_WRC))
- status |= USB_PORT_STAT_C_BH_RESET << 16;
- }
+ status = xhci_get_port_status(hcd, bus_state, port_array,
+ wIndex, temp);
+ if (status == 0xffffffff)
+ goto error;
- if (hcd->speed != HCD_USB3) {
- if ((temp & PORT_PLS_MASK) == XDEV_U3
- && (temp & PORT_POWER))
- status |= USB_PORT_STAT_SUSPEND;
- }
- if ((temp & PORT_PLS_MASK) == XDEV_RESUME &&
- !DEV_SUPERSPEED(temp)) {
- if ((temp & PORT_RESET) || !(temp & PORT_PE))
- goto error;
- if (time_after_eq(jiffies,
- bus_state->resume_done[wIndex])) {
- xhci_dbg(xhci, "Resume USB2 port %d\n",
- wIndex + 1);
- bus_state->resume_done[wIndex] = 0;
- clear_bit(wIndex, &bus_state->resuming_ports);
- xhci_set_link_state(xhci, port_array, wIndex,
- XDEV_U0);
- xhci_dbg(xhci, "set port %d resume\n",
- wIndex + 1);
- slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- wIndex + 1);
- if (!slot_id) {
- xhci_dbg(xhci, "slot_id is zero\n");
- goto error;
- }
- xhci_ring_device(xhci, slot_id);
- bus_state->port_c_suspend |= 1 << wIndex;
- bus_state->suspended_ports &= ~(1 << wIndex);
- } else {
- /*
- * The resume has been signaling for less than
- * 20ms. Report the port status as SUSPEND,
- * let the usbcore check port status again
- * and clear resume signaling later.
- */
- status |= USB_PORT_STAT_SUSPEND;
- }
- }
- if ((temp & PORT_PLS_MASK) == XDEV_U0
- && (temp & PORT_POWER)
- && (bus_state->suspended_ports & (1 << wIndex))) {
- bus_state->suspended_ports &= ~(1 << wIndex);
- if (hcd->speed != HCD_USB3)
- bus_state->port_c_suspend |= 1 << wIndex;
- }
- if (temp & PORT_CONNECT) {
- status |= USB_PORT_STAT_CONNECTION;
- status |= xhci_port_speed(temp);
- }
- if (temp & PORT_PE)
- status |= USB_PORT_STAT_ENABLE;
- if (temp & PORT_OC)
- status |= USB_PORT_STAT_OVERCURRENT;
- if (temp & PORT_RESET)
- status |= USB_PORT_STAT_RESET;
- if (temp & PORT_POWER) {
- if (hcd->speed == HCD_USB3)
- status |= USB_SS_PORT_STAT_POWER;
- else
- status |= USB_PORT_STAT_POWER;
- }
- /* Update Port Link State for super speed ports*/
- if (hcd->speed == HCD_USB3) {
- xhci_hub_report_link_state(&status, temp);
- /*
- * Verify if all USB3 Ports Have entered U0 already.
- * Delete Compliance Mode Timer if so.
- */
- xhci_del_comp_mod_timer(xhci, temp, wIndex);
- }
- if (bus_state->port_c_suspend & (1 << wIndex))
- status |= 1 << USB_PORT_FEAT_C_SUSPEND;
+ xhci_dbg(xhci, "get port status, actual port %d status = 0x%x\n",
+ wIndex, temp);
xhci_dbg(xhci, "Get port status returned 0x%x\n", status);
+
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetPortFeature:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index df6978abd7e..53b972c2a09 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -24,8 +24,10 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
+#include "xhci-trace.h"
/*
* Allocates a generic ring segment from the ring pool, sets the dma address,
@@ -347,7 +349,8 @@ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
return -ENOMEM;
xhci_link_rings(xhci, ring, first, last, num_segs);
- xhci_dbg(xhci, "ring expansion succeed, now has %d segments\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+ "ring expansion succeed, now has %d segments",
ring->num_segs);
return 0;
@@ -481,17 +484,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
return ep->ring;
}
-/* Only use this when you know stream_info is valid */
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static struct xhci_ring *dma_to_stream_ring(
- struct xhci_stream_info *stream_info,
- u64 address)
-{
- return radix_tree_lookup(&stream_info->trb_address_map,
- address >> TRB_SEGMENT_SHIFT);
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
@@ -509,58 +501,6 @@ struct xhci_ring *xhci_stream_id_to_ring(
return ep->stream_info->stream_rings[stream_id];
}
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static int xhci_test_radix_tree(struct xhci_hcd *xhci,
- unsigned int num_streams,
- struct xhci_stream_info *stream_info)
-{
- u32 cur_stream;
- struct xhci_ring *cur_ring;
- u64 addr;
-
- for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
- struct xhci_ring *mapped_ring;
- int trb_size = sizeof(union xhci_trb);
-
- cur_ring = stream_info->stream_rings[cur_stream];
- for (addr = cur_ring->first_seg->dma;
- addr < cur_ring->first_seg->dma + TRB_SEGMENT_SIZE;
- addr += trb_size) {
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (cur_ring != mapped_ring) {
- xhci_warn(xhci, "WARN: DMA address 0x%08llx "
- "didn't map to stream ID %u; "
- "mapped to ring %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- /* One TRB after the end of the ring segment shouldn't return a
- * pointer to the current ring (although it may be a part of a
- * different ring).
- */
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- if (mapped_ring != cur_ring) {
- /* One TRB before should also fail */
- addr = cur_ring->first_seg->dma - trb_size;
- mapped_ring = dma_to_stream_ring(stream_info, addr);
- }
- if (mapped_ring == cur_ring) {
- xhci_warn(xhci, "WARN: Bad DMA address 0x%08llx "
- "mapped to valid stream ID %u; "
- "mapped ring = %p\n",
- (unsigned long long) addr,
- cur_stream,
- mapped_ring);
- return -EINVAL;
- }
- }
- return 0;
-}
-#endif /* CONFIG_USB_XHCI_HCD_DEBUGGING */
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -687,13 +627,6 @@ struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
* was any other way, the host controller would assume the ring is
* "empty" and wait forever for data to be queued to that stream ID).
*/
-#if XHCI_DEBUG
- /* Do a little test on the radix tree to make sure it returns the
- * correct values.
- */
- if (xhci_test_radix_tree(xhci, num_streams, stream_info))
- goto cleanup_rings;
-#endif
return stream_info;
@@ -731,7 +664,8 @@ void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
* fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
*/
max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
- xhci_dbg(xhci, "Setting number of stream ctx array entries to %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Setting number of stream ctx array entries to %u",
1 << (max_primary_streams + 1));
ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
@@ -1613,7 +1547,8 @@ static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
struct device *dev = xhci_to_hcd(xhci)->self.controller;
int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2);
- xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocating %d scratchpad buffers", num_sp);
if (!num_sp)
return 0;
@@ -1770,11 +1705,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
dma_free_coherent(&pdev->dev, size,
xhci->erst.entries, xhci->erst.erst_dma_addr);
xhci->erst.entries = NULL;
- xhci_dbg(xhci, "Freed ERST\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed ERST");
if (xhci->event_ring)
xhci_ring_free(xhci, xhci->event_ring);
xhci->event_ring = NULL;
- xhci_dbg(xhci, "Freed event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed event ring");
if (xhci->lpm_command)
xhci_free_command(xhci, xhci->lpm_command);
@@ -1782,7 +1717,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->cmd_ring)
xhci_ring_free(xhci, xhci->cmd_ring);
xhci->cmd_ring = NULL;
- xhci_dbg(xhci, "Freed command ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed command ring");
list_for_each_entry_safe(cur_cd, next_cd,
&xhci->cancel_cmd_list, cancel_cmd_list) {
list_del(&cur_cd->cancel_cmd_list);
@@ -1795,22 +1730,24 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
if (xhci->segment_pool)
dma_pool_destroy(xhci->segment_pool);
xhci->segment_pool = NULL;
- xhci_dbg(xhci, "Freed segment pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed segment pool");
if (xhci->device_pool)
dma_pool_destroy(xhci->device_pool);
xhci->device_pool = NULL;
- xhci_dbg(xhci, "Freed device context pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed device context pool");
if (xhci->small_streams_pool)
dma_pool_destroy(xhci->small_streams_pool);
xhci->small_streams_pool = NULL;
- xhci_dbg(xhci, "Freed small stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed small stream array pool");
if (xhci->medium_streams_pool)
dma_pool_destroy(xhci->medium_streams_pool);
xhci->medium_streams_pool = NULL;
- xhci_dbg(xhci, "Freed medium stream array pool\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Freed medium stream array pool");
if (xhci->dcbaa)
dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
@@ -2036,8 +1973,9 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
* there might be more events to service.
*/
temp &= ~ERST_EHB;
- xhci_dbg(xhci, "// Write event ring dequeue pointer, "
- "preserving EHB bit\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write event ring dequeue pointer, "
+ "preserving EHB bit");
xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2060,8 +1998,9 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
temp = xhci_readl(xhci, addr + 2);
port_offset = XHCI_EXT_PORT_OFF(temp);
port_count = XHCI_EXT_PORT_COUNT(temp);
- xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
- "count = %u, revision = 0x%x\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Ext Cap %p, port offset = %u, "
+ "count = %u, revision = 0x%x",
addr, port_offset, port_count, major_revision);
/* Port count includes the current port offset */
if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
@@ -2075,15 +2014,18 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* Check the host's USB2 LPM capability */
if ((xhci->hci_version == 0x96) && (major_revision != 0x03) &&
(temp & XHCI_L1C)) {
- xhci_dbg(xhci, "xHCI 0.96: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 0.96: support USB2 software lpm");
xhci->sw_lpm_support = 1;
}
if ((xhci->hci_version >= 0x100) && (major_revision != 0x03)) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 software lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 software lpm");
xhci->sw_lpm_support = 1;
if (temp & XHCI_HLC) {
- xhci_dbg(xhci, "xHCI 1.0: support USB2 hardware lpm\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI 1.0: support USB2 hardware lpm");
xhci->hw_lpm_support = 1;
}
}
@@ -2207,18 +2149,21 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci_warn(xhci, "No ports on the roothubs?\n");
return -ENODEV;
}
- xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Found %u USB 2.0 ports and %u USB 3.0 ports.",
xhci->num_usb2_ports, xhci->num_usb3_ports);
/* Place limits on the number of roothub ports so that the hub
* descriptors aren't longer than the USB core will allocate.
*/
if (xhci->num_usb3_ports > 15) {
- xhci_dbg(xhci, "Limiting USB 3.0 roothub ports to 15.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 3.0 roothub ports to 15.");
xhci->num_usb3_ports = 15;
}
if (xhci->num_usb2_ports > USB_MAXCHILDREN) {
- xhci_dbg(xhci, "Limiting USB 2.0 roothub ports to %u.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Limiting USB 2.0 roothub ports to %u.",
USB_MAXCHILDREN);
xhci->num_usb2_ports = USB_MAXCHILDREN;
}
@@ -2243,8 +2188,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb2_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 2.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 2.0 port at index %u, "
+ "addr = %p", i,
xhci->usb2_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb2_ports)
@@ -2263,8 +2209,9 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
xhci->usb3_ports[port_index] =
&xhci->op_regs->port_status_base +
NUM_PORT_REGS*i;
- xhci_dbg(xhci, "USB 3.0 port at index %u, "
- "addr = %p\n", i,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "USB 3.0 port at index %u, "
+ "addr = %p", i,
xhci->usb3_ports[port_index]);
port_index++;
if (port_index == xhci->num_usb3_ports)
@@ -2288,32 +2235,35 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cancel_cmd_list);
page_size = xhci_readl(xhci, &xhci->op_regs->page_size);
- xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size register = 0x%x", page_size);
for (i = 0; i < 16; i++) {
if ((0x1 & page_size) != 0)
break;
page_size = page_size >> 1;
}
if (i < 16)
- xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Supported page size of %iK", (1 << (i+12)) / 1024);
else
xhci_warn(xhci, "WARN: no supported page size\n");
/* Use 4K pages, since that's common and the minimum the HC supports */
xhci->page_shift = 12;
xhci->page_size = 1 << xhci->page_shift;
- xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "HCD page size set to %iK", xhci->page_size / 1024);
/*
* Program the Number of Device Slots Enabled field in the CONFIG
* register with the max value of slots the HC can handle.
*/
val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1));
- xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// xHC can handle at most %d device slots.", val);
val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
val |= (val2 & ~HCS_SLOTS_MASK);
- xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n",
- (unsigned int) val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting Max device slots reg = 0x%x.", val);
xhci_writel(xhci, val, &xhci->op_regs->config_reg);
/*
@@ -2326,7 +2276,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
goto fail;
memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa));
xhci->dcbaa->dma = dma;
- xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
@@ -2365,8 +2316,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, flags);
if (!xhci->cmd_ring)
goto fail;
- xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring);
- xhci_dbg(xhci, "First segment DMA is 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Allocated command ring at %p", xhci->cmd_ring);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "First segment DMA is 0x%llx",
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
@@ -2374,7 +2326,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%x", val);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
@@ -2390,8 +2343,9 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->cap_regs->db_off);
val &= DBOFF_MASK;
- xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
- " from cap regs base addr\n", val);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Doorbell array is located at offset 0x%x"
+ " from cap regs base addr", val);
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
@@ -2402,7 +2356,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
* Event ring setup: Allocate a normal ring, but also setup
* the event ring segment table (ERST). Section 4.9.3.
*/
- xhci_dbg(xhci, "// Allocating event ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Allocating event ring");
xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1, TYPE_EVENT,
flags);
if (!xhci->event_ring)
@@ -2415,13 +2369,15 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
GFP_KERNEL);
if (!xhci->erst.entries)
goto fail;
- xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Allocated event ring segment table at 0x%llx",
(unsigned long long)dma);
memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
xhci->erst.num_entries = ERST_NUM_SEGS;
xhci->erst.erst_dma_addr = dma;
- xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx",
xhci->erst.num_entries,
xhci->erst.entries,
(unsigned long long)xhci->erst.erst_dma_addr);
@@ -2439,13 +2395,16 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val = xhci_readl(xhci, &xhci->ir_set->erst_size);
val &= ERST_SIZE_MASK;
val |= ERST_NUM_SEGS;
- xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Write ERST size = %i to ir_set 0 (some bits preserved)",
val);
xhci_writel(xhci, val, &xhci->ir_set->erst_size);
- xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST entries to point to event ring.");
/* set the segment table base address */
- xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
@@ -2454,7 +2413,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
- xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
/*
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index cc24e39b97d..c2d495057eb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include "xhci.h"
+#include "xhci-trace.h"
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
@@ -64,16 +65,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
- xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
- " endpoint cmd after reset endpoint\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Fresco Logic xHC needs configure"
+ " endpoint cmd after reset endpoint");
}
/* Fresco Logic confirms: all revisions of this chip do not
* support MSI, even though some of them claim to in their PCI
* capabilities.
*/
xhci->quirks |= XHCI_BROKEN_MSI;
- xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
- "has broken MSI implementation\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Fresco Logic revision %u "
+ "has broken MSI implementation",
pdev->revision);
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
@@ -93,7 +96,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
- xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;
@@ -111,7 +113,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
- xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_VIA)
@@ -250,13 +253,15 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
* writers.
*
* Unconditionally switch the ports back to xHCI after a system resume.
- * We can't tell whether the EHCI or xHCI controller will be resumed
- * first, so we have to do the port switchover in both drivers. Writing
- * a '1' to the port switchover registers should have no effect if the
- * port was already switched over.
+ * It should not matter whether the EHCI or xHCI controller is
+ * resumed first. It's enough to do the switchover in xHCI because
+ * USB core won't notice anything as the hub driver doesn't start
+ * running again until after all the devices (including both EHCI and
+ * xHCI host controllers) have been resumed.
*/
- if (usb_is_intel_switchable_xhci(pdev))
- usb_enable_xhci_ports(pdev);
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL)
+ usb_enable_intel_xhci_ports(pdev);
retval = xhci_resume(xhci, hibernated);
return retval;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 51e22bf8950..d9c169f470d 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
@@ -24,7 +26,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
* here that the generic code does not try to make a pci_dev from our
* dev struct in order to setup MSI
*/
- xhci->quirks |= XHCI_BROKEN_MSI;
+ xhci->quirks |= XHCI_PLAT;
}
/* called during probe() after chip reset completes */
@@ -104,6 +106,15 @@ static int xhci_plat_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
+ /* Initialize dma_mask and coherent_dma_mask to 32-bits */
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret)
+ return ret;
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+ else
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd)
return -ENOMEM;
@@ -186,11 +197,46 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM
+static int xhci_plat_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return xhci_suspend(xhci);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ return xhci_resume(xhci, 0);
+}
+
+static const struct dev_pm_ops xhci_plat_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+};
+#define DEV_PM_OPS (&xhci_plat_pm_ops)
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_OF
+static const struct of_device_id usb_xhci_of_match[] = {
+ { .compatible = "xhci-platform" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, usb_xhci_of_match);
+#endif
+
static struct platform_driver usb_xhci_driver = {
.probe = xhci_plat_probe,
.remove = xhci_plat_remove,
.driver = {
.name = "xhci-hcd",
+ .pm = DEV_PM_OPS,
+ .of_match_table = of_match_ptr(usb_xhci_of_match),
},
};
MODULE_ALIAS("platform:xhci-hcd");
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 1e57eafa691..411da1fc7ae 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -67,6 +67,7 @@
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include "xhci.h"
+#include "xhci-trace.h"
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
@@ -434,7 +435,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
/* A ring has pending URBs if its TD list is not empty */
if (!(ep->ep_state & EP_HAS_STREAMS)) {
- if (!(list_empty(&ep->ring->td_list)))
+ if (ep->ring && !(list_empty(&ep->ring->td_list)))
xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0);
return;
}
@@ -555,7 +556,8 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
return;
}
state->new_cycle_state = 0;
- xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding segment containing stopped TRB.");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
@@ -565,12 +567,14 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
}
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
- xhci_dbg(xhci, "Finding endpoint context\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding endpoint context");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
state->new_cycle_state = 0x1 & le64_to_cpu(ep_ctx->deq);
state->new_deq_ptr = cur_td->last_trb;
- xhci_dbg(xhci, "Finding segment containing last TRB in TD.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Finding segment containing last TRB in TD.");
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
@@ -597,13 +601,16 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (ep_ring->first_seg == ep_ring->first_seg->next &&
state->new_deq_ptr < dev->eps[ep_index].stopped_trb)
state->new_cycle_state ^= 0x1;
- xhci_dbg(xhci, "Cycle state = 0x%x\n", state->new_cycle_state);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cycle state = 0x%x", state->new_cycle_state);
/* Don't update the ring cycle state for the producer (us). */
- xhci_dbg(xhci, "New dequeue segment = %p (virtual)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "New dequeue segment = %p (virtual)",
state->new_deq_seg);
addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
- xhci_dbg(xhci, "New dequeue pointer = 0x%llx (DMA)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "New dequeue pointer = 0x%llx (DMA)",
(unsigned long long) addr);
}
@@ -631,9 +638,11 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
if (flip_cycle)
cur_trb->generic.field[3] ^=
cpu_to_le32(TRB_CYCLE);
- xhci_dbg(xhci, "Cancel (unchain) link TRB\n");
- xhci_dbg(xhci, "Address = %p (0x%llx dma); "
- "in seg %p (0x%llx dma)\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cancel (unchain) link TRB");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Address = %p (0x%llx dma); "
+ "in seg %p (0x%llx dma)",
cur_trb,
(unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
cur_seg,
@@ -651,7 +660,8 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "TRB to noop at offset 0x%llx",
(unsigned long long)
xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
@@ -672,8 +682,9 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
- xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
- "new deq ptr = %p (0x%llx dma), new cycle = %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
+ "new deq ptr = %p (0x%llx dma), new cycle = %u",
deq_state->new_deq_seg,
(unsigned long long)deq_state->new_deq_seg->dma,
deq_state->new_deq_ptr,
@@ -793,7 +804,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
- xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Removing canceled TD starting at 0x%llx (dma).",
(unsigned long long)xhci_trb_virt_to_dma(
cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
@@ -913,14 +925,16 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
ep->stop_cmds_pending--;
if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg(xhci, "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Stop EP timer ran, but another timer marked "
+ "xHCI as DYING, exiting.");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
- xhci_dbg(xhci, "Stop EP timer ran, but no command pending, "
- "exiting.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Stop EP timer ran, but no command pending, "
+ "exiting.");
spin_unlock_irqrestore(&xhci->lock, flags);
return;
}
@@ -962,8 +976,9 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
ring = temp_ep->ring;
if (!ring)
continue;
- xhci_dbg(xhci, "Killing URBs for slot ID %u, "
- "ep index %u\n", i, j);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Killing URBs for slot ID %u, "
+ "ep index %u", i, j);
while (!list_empty(&ring->td_list)) {
cur_td = list_first_entry(&ring->td_list,
struct xhci_td,
@@ -986,9 +1001,11 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
}
}
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "Calling usb_hc_died()\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Calling usb_hc_died()");
usb_hc_died(xhci_to_hcd(xhci)->primary_hcd);
- xhci_dbg(xhci, "xHCI host controller is dead.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "xHCI host controller is dead.");
}
@@ -1092,7 +1109,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_state &= EP_STATE_MASK;
slot_state = le32_to_cpu(slot_ctx->dev_state);
slot_state = GET_SLOT_STATE(slot_state);
- xhci_dbg(xhci, "Slot state = %u, EP state = %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Slot state = %u, EP state = %u",
slot_state, ep_state);
break;
case COMP_EBADSLT:
@@ -1112,7 +1130,8 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
* cancelling URBs, which might not be an error...
*/
} else {
- xhci_dbg(xhci, "Successful Set TR Deq Ptr cmd, deq = @%08llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Successful Set TR Deq Ptr cmd, deq = @%08llx",
le64_to_cpu(ep_ctx->deq));
if (xhci_trb_virt_to_dma(dev->eps[ep_index].queued_deq_seg,
dev->eps[ep_index].queued_deq_ptr) ==
@@ -1150,7 +1169,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
- xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Ignoring reset ep completion code of %u",
GET_COMP_CODE(le32_to_cpu(event->status)));
/* HW with the reset endpoint quirk needs to have a configure endpoint
@@ -1158,7 +1178,8 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
- xhci_dbg(xhci, "Queueing configure endpoint command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Queueing configure endpoint command");
xhci_queue_configure_endpoint(xhci,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
@@ -1377,6 +1398,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
return;
}
+ trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
+ (struct xhci_generic_trb *) event);
+
if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
/* If the return value is 0, we think the trb pointed by
@@ -1444,8 +1468,9 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
if (!(ep_state & EP_HALTED))
goto bandwidth_change;
- xhci_dbg(xhci, "Completed config ep cmd - "
- "last ep index = %d, state = %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Completed config ep cmd - "
+ "last ep index = %d, state = %d",
ep_index, ep_state);
/* Clear internal halted state and restart ring(s) */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
@@ -1454,7 +1479,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
}
bandwidth_change:
- xhci_dbg(xhci, "Completed config ep cmd\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Completed config ep cmd");
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(le32_to_cpu(event->status));
complete(&xhci->devs[slot_id]->cmd_completion);
@@ -1497,7 +1523,8 @@ bandwidth_change:
xhci->error_bitmask |= 1 << 6;
break;
}
- xhci_dbg(xhci, "NEC firmware version %2x.%02x\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "NEC firmware version %2x.%02x",
NEC_FW_MAJOR(le32_to_cpu(event->status)),
NEC_FW_MINOR(le32_to_cpu(event->status)));
break;
@@ -2877,8 +2904,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
return -ENOMEM;
}
- xhci_dbg(xhci, "ERROR no room on ep ring, "
- "try ring expansion\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_ring_expansion,
+ "ERROR no room on ep ring, try ring expansion");
num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
if (xhci_ring_expansion(xhci, ep_ring, num_trbs_needed,
mem_flags)) {
@@ -3060,14 +3087,10 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (printk_ratelimit())
- dev_dbg(&urb->dev->dev, "Driver uses different interval"
- " (%d microframe%s) than xHCI "
- "(%d microframe%s)\n",
- ep_interval,
- ep_interval == 1 ? "" : "s",
- xhci_interval,
- xhci_interval == 1 ? "" : "s");
+ dev_dbg_ratelimited(&urb->dev->dev,
+ "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
+ ep_interval, ep_interval == 1 ? "" : "s",
+ xhci_interval, xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
@@ -3849,14 +3872,10 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
- if (printk_ratelimit())
- dev_dbg(&urb->dev->dev, "Driver uses different interval"
- " (%d microframe%s) than xHCI "
- "(%d microframe%s)\n",
- ep_interval,
- ep_interval == 1 ? "" : "s",
- xhci_interval,
- xhci_interval == 1 ? "" : "s");
+ dev_dbg_ratelimited(&urb->dev->dev,
+ "Driver uses different interval (%d microframe%s) than xHCI (%d microframe%s)\n",
+ ep_interval, ep_interval == 1 ? "" : "s",
+ xhci_interval, xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
diff --git a/arch/arm/kernel/signal.h b/drivers/usb/host/xhci-trace.c
index 5ff067b7c75..7cf30c83dcf 100644
--- a/arch/arm/kernel/signal.h
+++ b/drivers/usb/host/xhci-trace.c
@@ -1,12 +1,15 @@
/*
- * linux/arch/arm/kernel/signal.h
+ * xHCI host controller driver
*
- * Copyright (C) 2005-2009 Russell King.
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
-extern const unsigned long sigreturn_codes[7];
+#define CREATE_TRACE_POINTS
+#include "xhci-trace.h"
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
new file mode 100644
index 00000000000..20364cc8d2f
--- /dev/null
+++ b/drivers/usb/host/xhci-trace.h
@@ -0,0 +1,151 @@
+/*
+ * xHCI host controller driver
+ *
+ * Copyright (C) 2013 Xenia Ragiadakou
+ *
+ * Author: Xenia Ragiadakou
+ * Email : burzalodowa@gmail.com
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xhci-hcd
+
+#if !defined(__XHCI_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __XHCI_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "xhci.h"
+
+#define XHCI_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(xhci_log_msg,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf),
+ TP_STRUCT__entry(__dynamic_array(char, msg, XHCI_MSG_MAX)),
+ TP_fast_assign(
+ vsnprintf(__get_str(msg), XHCI_MSG_MAX, vaf->fmt, *vaf->va);
+ ),
+ TP_printk("%s", __get_str(msg))
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_address,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_context_change,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_quirks,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_reset_ep,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_cancel_urb,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_init,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DEFINE_EVENT(xhci_log_msg, xhci_dbg_ring_expansion,
+ TP_PROTO(struct va_format *vaf),
+ TP_ARGS(vaf)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_ctx,
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ unsigned int ep_num),
+ TP_ARGS(xhci, ctx, ep_num),
+ TP_STRUCT__entry(
+ __field(int, ctx_64)
+ __field(unsigned, ctx_type)
+ __field(dma_addr_t, ctx_dma)
+ __field(u8 *, ctx_va)
+ __field(unsigned, ctx_ep_num)
+ __field(int, slot_id)
+ __dynamic_array(u32, ctx_data,
+ ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 8) *
+ ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1))
+ ),
+ TP_fast_assign(
+ struct usb_device *udev;
+
+ udev = to_usb_device(xhci_to_hcd(xhci)->self.controller);
+ __entry->ctx_64 = HCC_64BYTE_CONTEXT(xhci->hcc_params);
+ __entry->ctx_type = ctx->type;
+ __entry->ctx_dma = ctx->dma;
+ __entry->ctx_va = ctx->bytes;
+ __entry->slot_id = udev->slot_id;
+ __entry->ctx_ep_num = ep_num;
+ memcpy(__get_dynamic_array(ctx_data), ctx->bytes,
+ ((HCC_64BYTE_CONTEXT(xhci->hcc_params) + 1) * 32) *
+ ((ctx->type == XHCI_CTX_TYPE_INPUT) + ep_num + 1));
+ ),
+ TP_printk("\nctx_64=%d, ctx_type=%u, ctx_dma=@%llx, ctx_va=@%p",
+ __entry->ctx_64, __entry->ctx_type,
+ (unsigned long long) __entry->ctx_dma, __entry->ctx_va
+ )
+);
+
+DEFINE_EVENT(xhci_log_ctx, xhci_address_ctx,
+ TP_PROTO(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ unsigned int ep_num),
+ TP_ARGS(xhci, ctx, ep_num)
+);
+
+DECLARE_EVENT_CLASS(xhci_log_event,
+ TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
+ TP_ARGS(trb_va, ev),
+ TP_STRUCT__entry(
+ __field(void *, va)
+ __field(u64, dma)
+ __field(u32, status)
+ __field(u32, flags)
+ __dynamic_array(__le32, trb, 4)
+ ),
+ TP_fast_assign(
+ __entry->va = trb_va;
+ __entry->dma = le64_to_cpu(((u64)ev->field[1]) << 32 |
+ ev->field[0]);
+ __entry->status = le32_to_cpu(ev->field[2]);
+ __entry->flags = le32_to_cpu(ev->field[3]);
+ memcpy(__get_dynamic_array(trb), trb_va,
+ sizeof(struct xhci_generic_trb));
+ ),
+ TP_printk("\ntrb_dma=@%llx, trb_va=@%p, status=%08x, flags=%08x",
+ (unsigned long long) __entry->dma, __entry->va,
+ __entry->status, __entry->flags
+ )
+);
+
+DEFINE_EVENT(xhci_log_event, xhci_cmd_completion,
+ TP_PROTO(void *trb_va, struct xhci_generic_trb *ev),
+ TP_ARGS(trb_va, ev)
+);
+
+#endif /* __XHCI_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE xhci-trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2c49f00260c..49b6edb84a7 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -27,8 +27,10 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/dmi.h>
+#include <linux/dma-mapping.h>
#include "xhci.h"
+#include "xhci-trace.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
@@ -100,7 +102,7 @@ void xhci_quiesce(struct xhci_hcd *xhci)
int xhci_halt(struct xhci_hcd *xhci)
{
int ret;
- xhci_dbg(xhci, "// Halt the HC\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Halt the HC");
xhci_quiesce(xhci);
ret = xhci_handshake(xhci, &xhci->op_regs->status,
@@ -124,7 +126,7 @@ static int xhci_start(struct xhci_hcd *xhci)
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_RUN);
- xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Turn on HC, cmd = 0x%x.",
temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
@@ -162,7 +164,7 @@ int xhci_reset(struct xhci_hcd *xhci)
return 0;
}
- xhci_dbg(xhci, "// Reset the HC\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "// Reset the HC");
command = xhci_readl(xhci, &xhci->op_regs->command);
command |= CMD_RESET;
xhci_writel(xhci, command, &xhci->op_regs->command);
@@ -172,7 +174,8 @@ int xhci_reset(struct xhci_hcd *xhci)
if (ret)
return ret;
- xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Wait for controller to be ready for doorbell rings");
/*
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
@@ -214,14 +217,16 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_dbg(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "failed to allocate MSI entry");
return ret;
}
ret = request_irq(pdev->irq, xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_dbg(xhci, "disable MSI interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "disable MSI interrupt");
pci_disable_msi(pdev);
}
@@ -284,7 +289,8 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_dbg(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Failed to enable MSI-X");
goto free_entries;
}
@@ -300,7 +306,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
return ret;
disable_msix:
- xhci_dbg(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "disable MSI-X interrupt");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
@@ -329,7 +335,7 @@ static void xhci_cleanup_msix(struct xhci_hcd *xhci)
return;
}
-static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
+static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
{
int i;
@@ -342,9 +348,14 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci)
static int xhci_try_enable_msi(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
+ struct pci_dev *pdev;
int ret;
+ /* The xhci platform device has set up IRQs through usb_add_hcd. */
+ if (xhci->quirks & XHCI_PLAT)
+ return 0;
+
+ pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
/*
* Some Fresco Logic host controllers advertise MSI, but fail to
* generate interrupts. Don't even try to enable MSI.
@@ -417,9 +428,11 @@ static void compliance_mode_recovery(unsigned long arg)
* Compliance Mode Detected. Letting USB Core
* handle the Warm Reset
*/
- xhci_dbg(xhci, "Compliance mode detected->port %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance mode detected->port %d",
i + 1);
- xhci_dbg(xhci, "Attempting compliance mode recovery\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Attempting compliance mode recovery");
hcd = xhci->shared_hcd;
if (hcd->state == HC_STATE_SUSPENDED)
@@ -457,7 +470,8 @@ static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
set_timer_slack(&xhci->comp_mode_recovery_timer,
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS));
add_timer(&xhci->comp_mode_recovery_timer);
- xhci_dbg(xhci, "Compliance mode recovery timer initialized\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance mode recovery timer initialized");
}
/*
@@ -505,16 +519,18 @@ int xhci_init(struct usb_hcd *hcd)
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
int retval = 0;
- xhci_dbg(xhci, "xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_init");
spin_lock_init(&xhci->lock);
if (xhci->hci_version == 0x95 && link_quirk) {
- xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "QUIRK: Not clearing Link TRB chain bits.");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
- xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xHCI doesn't need link TRB QUIRK");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
- xhci_dbg(xhci, "Finished xhci_init\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_init");
/* Initializing Compliance Mode Recovery Data If Needed */
if (xhci_compliance_mode_recovery_timer_quirk_check()) {
@@ -528,57 +544,6 @@ int xhci_init(struct usb_hcd *hcd)
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-static void xhci_event_ring_work(unsigned long arg)
-{
- unsigned long flags;
- int temp;
- u64 temp_64;
- struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
- int i, j;
-
- xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
-
- spin_lock_irqsave(&xhci->lock, flags);
- temp = xhci_readl(xhci, &xhci->op_regs->status);
- xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
- if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
- (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg(xhci, "HW died, polling stopped.\n");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
-
- temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
- xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
- xhci->error_bitmask = 0;
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
- xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
- temp_64 &= ~ERST_PTR_MASK;
- xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
- xhci_dbg(xhci, "Command ring:\n");
- xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
- xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
- xhci_dbg_cmd_ptrs(xhci);
- for (i = 0; i < MAX_HC_SLOTS; ++i) {
- if (!xhci->devs[i])
- continue;
- for (j = 0; j < 31; ++j) {
- xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
- }
- }
- spin_unlock_irqrestore(&xhci->lock, flags);
-
- if (!xhci->zombie)
- mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
- else
- xhci_dbg(xhci, "Quit polling the event ring.\n");
-}
-#endif
-
static int xhci_run_finished(struct xhci_hcd *xhci)
{
if (xhci_start(xhci)) {
@@ -591,7 +556,8 @@ static int xhci_run_finished(struct xhci_hcd *xhci)
if (xhci->quirks & XHCI_NEC_HOST)
xhci_ring_cmd_db(xhci);
- xhci_dbg(xhci, "Finished xhci_run for USB3 roothub\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB3 roothub");
return 0;
}
@@ -622,23 +588,12 @@ int xhci_run(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return xhci_run_finished(xhci);
- xhci_dbg(xhci, "xhci_run\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "xhci_run");
ret = xhci_try_enable_msi(hcd);
if (ret)
return ret;
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- init_timer(&xhci->event_ring_timer);
- xhci->event_ring_timer.data = (unsigned long) xhci;
- xhci->event_ring_timer.function = xhci_event_ring_work;
- /* Poll the event ring */
- xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
- xhci->zombie = 0;
- xhci_dbg(xhci, "Setting event ring polling timer\n");
- add_timer(&xhci->event_ring_timer);
-#endif
-
xhci_dbg(xhci, "Command ring memory map follows:\n");
xhci_debug_ring(xhci, xhci->cmd_ring);
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
@@ -651,9 +606,11 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
- xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "ERST deq = 64'h%0lx", (long unsigned int) temp_64);
- xhci_dbg(xhci, "// Set the interrupt modulation register\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Set the interrupt modulation register");
temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
temp &= ~ER_IRQ_INTERVAL_MASK;
temp |= (u32) 160;
@@ -662,12 +619,13 @@ int xhci_run(struct usb_hcd *hcd)
/* Set the HCD state before we enable the irqs */
temp = xhci_readl(xhci, &xhci->op_regs->command);
temp |= (CMD_EIE);
- xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
- temp);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enable interrupts, cmd = 0x%x.", temp);
xhci_writel(xhci, temp, &xhci->op_regs->command);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
- xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Enabling event ring interrupter %p by writing 0x%x to irq_pending",
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
@@ -677,7 +635,8 @@ int xhci_run(struct usb_hcd *hcd)
xhci_queue_vendor_command(xhci, 0, 0, 0,
TRB_TYPE(TRB_NEC_GET_FW));
- xhci_dbg(xhci, "Finished xhci_run for USB2 roothub\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "Finished xhci_run for USB2 roothub");
return 0;
}
@@ -725,24 +684,20 @@ void xhci_stop(struct usb_hcd *hcd)
xhci_cleanup_msix(xhci);
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Tell the event ring poll function not to reschedule */
- xhci->zombie = 1;
- del_timer_sync(&xhci->event_ring_timer);
-#endif
-
/* Deleting Compliance Mode Recovery Timer */
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
- xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "%s: compliance mode recovery timer deleted",
__func__);
}
if (xhci->quirks & XHCI_AMD_PLL_FIX)
usb_amd_dev_put();
- xhci_dbg(xhci, "// Disabling event ring interrupts\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Disabling event ring interrupts");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
@@ -750,10 +705,11 @@ void xhci_stop(struct usb_hcd *hcd)
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, 0);
- xhci_dbg(xhci, "cleaning up memory\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
- xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_stop completed - status = %x",
+ xhci_readl(xhci, &xhci->op_regs->status));
}
/*
@@ -778,8 +734,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
xhci_cleanup_msix(xhci);
- xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
- xhci_readl(xhci, &xhci->op_regs->status));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "xhci_shutdown completed - status = %x",
+ xhci_readl(xhci, &xhci->op_regs->status));
}
#ifdef CONFIG_PM
@@ -820,7 +777,8 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci->cmd_ring->dequeue) &
(u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
- xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+ "// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
@@ -933,7 +891,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
(!(xhci_all_ports_seen_u0(xhci)))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
- xhci_dbg(xhci, "%s: compliance mode recovery timer deleted\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "%s: compliance mode recovery timer deleted",
__func__);
}
@@ -998,7 +957,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) &&
!(xhci_all_ports_seen_u0(xhci))) {
del_timer_sync(&xhci->comp_mode_recovery_timer);
- xhci_dbg(xhci, "Compliance Mode Recovery Timer deleted!\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Compliance Mode Recovery Timer deleted!");
}
/* Let the USB core know _both_ roothubs lost power. */
@@ -1011,12 +971,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
spin_unlock_irq(&xhci->lock);
xhci_cleanup_msix(xhci);
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Tell the event ring poll function not to reschedule */
- xhci->zombie = 1;
- del_timer_sync(&xhci->event_ring_timer);
-#endif
-
xhci_dbg(xhci, "// Disabling event ring interrupts\n");
temp = xhci_readl(xhci, &xhci->op_regs->status);
xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
@@ -1170,35 +1124,33 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
if (!hcd || (check_ep && !ep) || !udev) {
- printk(KERN_DEBUG "xHCI %s called with invalid args\n",
- func);
+ pr_debug("xHCI %s called with invalid args\n", func);
return -EINVAL;
}
if (!udev->parent) {
- printk(KERN_DEBUG "xHCI %s called for root hub\n",
- func);
+ pr_debug("xHCI %s called for root hub\n", func);
return 0;
}
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_HALTED)
- return -ENODEV;
-
if (check_virt_dev) {
if (!udev->slot_id || !xhci->devs[udev->slot_id]) {
- printk(KERN_DEBUG "xHCI %s called with unaddressed "
- "device\n", func);
+ xhci_dbg(xhci, "xHCI %s called with unaddressed device\n",
+ func);
return -EINVAL;
}
virt_dev = xhci->devs[udev->slot_id];
if (virt_dev->udev != udev) {
- printk(KERN_DEBUG "xHCI %s called with udev and "
+ xhci_dbg(xhci, "xHCI %s called with udev and "
"virt_dev does not match\n", func);
return -EINVAL;
}
}
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
+ return -ENODEV;
+
return 1;
}
@@ -1228,12 +1180,16 @@ static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
hw_max_packet_size = MAX_PACKET_DECODED(le32_to_cpu(ep_ctx->ep_info2));
max_packet_size = usb_endpoint_maxp(&urb->dev->ep0.desc);
if (hw_max_packet_size != max_packet_size) {
- xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
- xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max Packet Size for ep 0 changed.");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max packet size in usb_device = %d",
max_packet_size);
- xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Max packet size in xHCI HW = %d",
hw_max_packet_size);
- xhci_dbg(xhci, "Issuing evaluate context command.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Issuing evaluate context command.");
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
@@ -1498,7 +1454,8 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
temp = xhci_readl(xhci, &xhci->op_regs->status);
if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg(xhci, "HW died, freeing TD.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "HW died, freeing TD.");
urb_priv = urb->hcpriv;
for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
td = urb_priv->td[i];
@@ -1516,8 +1473,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
}
if ((xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Ep 0x%x: URB %p to be canceled on "
+ "non-responsive xHCI host.",
urb->ep->desc.bEndpointAddress, urb);
/* Let the stop endpoint command watchdog timer (which set this
* state) finish cleaning up the endpoint TD lists. We must
@@ -1538,8 +1496,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
urb_priv = urb->hcpriv;
i = urb_priv->td_cnt;
if (i < urb_priv->length)
- xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
- "starting at offset 0x%llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
+ "Cancel URB %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx",
urb, urb->dev->devpath,
urb->ep->desc.bEndpointAddress,
(unsigned long long) xhci_trb_virt_to_dma(
@@ -1851,7 +1810,8 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
ret = -ENODEV;
break;
case COMP_SUCCESS:
- dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Successful Endpoint Configure command");
ret = 0;
break;
default:
@@ -1897,7 +1857,8 @@ static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
ret = -EINVAL;
break;
case COMP_SUCCESS:
- dev_dbg(&udev->dev, "Successful evaluate context command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Successful evaluate context command");
ret = 0;
break;
default:
@@ -1963,14 +1924,16 @@ static int xhci_reserve_host_resources(struct xhci_hcd *xhci,
added_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
if (xhci->num_active_eps + added_eps > xhci->limit_active_eps) {
- xhci_dbg(xhci, "Not enough ep ctxs: "
- "%u active, need to add %u, limit is %u.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Not enough ep ctxs: "
+ "%u active, need to add %u, limit is %u.",
xhci->num_active_eps, added_eps,
xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += added_eps;
- xhci_dbg(xhci, "Adding %u ep ctxs, %u now active.\n", added_eps,
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Adding %u ep ctxs, %u now active.", added_eps,
xhci->num_active_eps);
return 0;
}
@@ -1988,7 +1951,8 @@ static void xhci_free_host_resources(struct xhci_hcd *xhci,
num_failed_eps = xhci_count_num_new_endpoints(xhci, ctrl_ctx);
xhci->num_active_eps -= num_failed_eps;
- xhci_dbg(xhci, "Removing %u failed ep ctxs, %u now active.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Removing %u failed ep ctxs, %u now active.",
num_failed_eps,
xhci->num_active_eps);
}
@@ -2007,7 +1971,8 @@ static void xhci_finish_resource_reservation(struct xhci_hcd *xhci,
num_dropped_eps = xhci_count_num_dropped_endpoints(xhci, ctrl_ctx);
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
- xhci_dbg(xhci, "Removing %u dropped ep ctxs, %u now active.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Removing %u dropped ep ctxs, %u now active.",
num_dropped_eps,
xhci->num_active_eps);
}
@@ -2168,18 +2133,21 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
* that the HS bus has enough bandwidth if we are activing a new TT.
*/
if (virt_dev->tt_info) {
- xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for rootport %u",
virt_dev->real_port);
if (xhci_check_tt_bw_table(xhci, virt_dev, old_active_eps)) {
xhci_warn(xhci, "Not enough bandwidth on HS bus for "
"newly activated TT.\n");
return -ENOMEM;
}
- xhci_dbg(xhci, "Recalculating BW for TT slot %u port %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for TT slot %u port %u",
virt_dev->tt_info->slot_id,
virt_dev->tt_info->ttport);
} else {
- xhci_dbg(xhci, "Recalculating BW for rootport %u\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Recalculating BW for rootport %u",
virt_dev->real_port);
}
@@ -2287,8 +2255,9 @@ static int xhci_check_bw_table(struct xhci_hcd *xhci,
xhci->rh_bw[port_index].num_active_tts;
}
- xhci_dbg(xhci, "Final bandwidth: %u, Limit: %u, Reserved: %u, "
- "Available: %u " "percent\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Final bandwidth: %u, Limit: %u, Reserved: %u, "
+ "Available: %u " "percent",
bw_used, max_bandwidth, bw_reserved,
(max_bandwidth - bw_used - bw_reserved) * 100 /
max_bandwidth);
@@ -2658,7 +2627,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
if ((xhci->quirks & XHCI_EP_LIMIT_QUIRK))
xhci_free_host_resources(xhci, ctrl_ctx);
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "FIXME allocate a new ring segment");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
@@ -2871,7 +2841,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
- xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Cleaning up stalled endpoint ring");
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
@@ -2884,7 +2855,8 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
- xhci_dbg(xhci, "Queueing new dequeue state\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Queueing new dequeue state");
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep->stopped_stream, &deq_state);
} else {
@@ -2893,8 +2865,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
* XXX: No idea how this hardware will react when stream rings
* are enabled.
*/
- xhci_dbg(xhci, "Setting up input context for "
- "configure endpoint command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Setting up input context for "
+ "configure endpoint command");
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
ep_index, &deq_state);
}
@@ -2926,16 +2899,19 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
ep_index = xhci_get_endpoint_index(&ep->desc);
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
if (!virt_ep->stopped_td) {
- xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
- ep->desc.bEndpointAddress);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Endpoint 0x%x not halted, refusing to reset.",
+ ep->desc.bEndpointAddress);
return;
}
if (usb_endpoint_xfer_control(&ep->desc)) {
- xhci_dbg(xhci, "Control endpoint stall already handled.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Control endpoint stall already handled.");
return;
}
- xhci_dbg(xhci, "Queueing reset endpoint command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
+ "Queueing reset endpoint command");
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
/*
@@ -3075,8 +3051,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
/* Are streams already being freed for the endpoint? */
if (ep_state & EP_GETTING_NO_STREAMS) {
xhci_warn(xhci, "WARN Can't disable streams for "
- "endpoint 0x%x\n, "
- "streams are being disabled already.",
+ "endpoint 0x%x, "
+ "streams are being disabled already\n",
eps[i]->desc.bEndpointAddress);
return 0;
}
@@ -3084,8 +3060,8 @@ static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
if (!(ep_state & EP_HAS_STREAMS) &&
!(ep_state & EP_GETTING_STREAMS)) {
xhci_warn(xhci, "WARN Can't disable streams for "
- "endpoint 0x%x\n, "
- "streams are already disabled!",
+ "endpoint 0x%x, "
+ "streams are already disabled!\n",
eps[i]->desc.bEndpointAddress);
xhci_warn(xhci, "WARN xhci_free_streams() called "
"with non-streams endpoint\n");
@@ -3373,8 +3349,9 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
}
xhci->num_active_eps -= num_dropped_eps;
if (num_dropped_eps)
- xhci_dbg(xhci, "Dropped %u ep ctxs, flags = 0x%x, "
- "%u now active.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Dropped %u ep ctxs, flags = 0x%x, "
+ "%u now active.",
num_dropped_eps, drop_flags,
xhci->num_active_eps);
}
@@ -3508,10 +3485,10 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
switch (ret) {
case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
case COMP_CTX_STATE: /* 0.96 completion code for same thing */
- xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
+ xhci_dbg(xhci, "Can't reset device (slot ID %u) in %s state\n",
slot_id,
xhci_get_slot_state(xhci, virt_dev->out_ctx));
- xhci_info(xhci, "Not freeing device rings.\n");
+ xhci_dbg(xhci, "Not freeing device rings.\n");
/* Don't treat this as an error. May change my mind later. */
ret = 0;
goto command_cleanup;
@@ -3584,6 +3561,16 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
u32 state;
int i, ret;
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * We called pm_runtime_get_noresume when the device was attached.
+ * Decrement the counter here to allow controller to runtime suspend
+ * if no devices remain.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_put_noidle(hcd->self.controller);
+#endif
+
ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
/* If the host is halted due to driver unload, we still need to free the
* device.
@@ -3636,13 +3623,15 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
{
if (xhci->num_active_eps + 1 > xhci->limit_active_eps) {
- xhci_dbg(xhci, "Not enough ep ctxs: "
- "%u active, need to add 1, limit is %u.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Not enough ep ctxs: "
+ "%u active, need to add 1, limit is %u.",
xhci->num_active_eps, xhci->limit_active_eps);
return -ENOMEM;
}
xhci->num_active_eps += 1;
- xhci_dbg(xhci, "Adding 1 ep ctx, %u now active.\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
+ "Adding 1 ep ctx, %u now active.",
xhci->num_active_eps);
return 0;
}
@@ -3707,6 +3696,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
goto disable_slot;
}
udev->slot_id = xhci->slot_id;
+
+#ifndef CONFIG_USB_DEFAULT_PERSIST
+ /*
+ * If resetting upon resume, we can't put the controller into runtime
+ * suspend if there is a device attached.
+ */
+ if (xhci->quirks & XHCI_RESET_ON_RESUME)
+ pm_runtime_get_noresume(hcd->self.controller);
+#endif
+
/* Is this a LS or FS device under a HS hub? */
/* Hub or peripherial? */
return 1;
@@ -3742,7 +3741,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
union xhci_trb *cmd_trb;
if (!udev->slot_id) {
- xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Bad Slot ID %d", udev->slot_id);
return -EINVAL;
}
@@ -3781,6 +3781,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+ slot_ctx->dev_info >> 27);
spin_lock_irqsave(&xhci->lock, flags);
cmd_trb = xhci->cmd_ring->dequeue;
@@ -3788,7 +3790,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
udev->slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "FIXME: allocate a command ring segment");
return ret;
}
xhci_ring_cmd_db(xhci);
@@ -3828,13 +3831,15 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
ret = -ENODEV;
break;
case COMP_SUCCESS:
- xhci_dbg(xhci, "Successful Address Device command\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Successful Address Device command");
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
}
@@ -3842,16 +3847,21 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
return ret;
}
temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
- xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
- xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
- udev->slot_id,
- &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
- (unsigned long long)
- le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
- xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Op regs DCBAA ptr = %#016llx", temp_64);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Slot ID %d dcbaa entry @%p = %#016llx",
+ udev->slot_id,
+ &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
+ (unsigned long long)
+ le64_to_cpu(xhci->dcbaa->dev_context_ptrs[udev->slot_id]));
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Output Context DMA address = %#08llx",
(unsigned long long)virt_dev->out_ctx->dma);
xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
+ trace_xhci_address_ctx(xhci, virt_dev->in_ctx,
+ slot_ctx->dev_info >> 27);
xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
/*
@@ -3859,6 +3869,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
* address given back to us by the HC.
*/
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ trace_xhci_address_ctx(xhci, virt_dev->out_ctx,
+ slot_ctx->dev_info >> 27);
/* Use kernel assigned address for devices; store xHC assigned
* address locally. */
virt_dev->address = (le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK)
@@ -3867,7 +3879,8 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
ctrl_ctx->add_flags = 0;
ctrl_ctx->drop_flags = 0;
- xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
+ xhci_dbg_trace(xhci, trace_xhci_dbg_address,
+ "Internal device address = %d", virt_dev->address);
return 0;
}
@@ -3898,7 +3911,7 @@ int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1)
* Issue an Evaluate Context command to change the Maximum Exit Latency in the
* slot context. If that succeeds, store the new MEL in the xhci_virt_device.
*/
-static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
+static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci,
struct usb_device *udev, u16 max_exit_latency)
{
struct xhci_virt_device *virt_dev;
@@ -3933,7 +3946,8 @@ static int xhci_change_max_exit_latency(struct xhci_hcd *xhci,
slot_ctx->dev_info2 &= cpu_to_le32(~((u32) MAX_EXIT));
slot_ctx->dev_info2 |= cpu_to_le32(max_exit_latency);
- xhci_dbg(xhci, "Set up evaluate context for LPM MEL change.\n");
+ xhci_dbg_trace(xhci, trace_xhci_dbg_context_change,
+ "Set up evaluate context for LPM MEL change.");
xhci_dbg(xhci, "Slot %u Input Context:\n", udev->slot_id);
xhci_dbg_ctx(xhci, command->in_ctx, 0);
@@ -4353,7 +4367,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
state_name, sel);
else
dev_dbg(&udev->dev, "Device-initiated %s disabled "
- "due to long PEL %llu\n ms",
+ "due to long PEL %llu ms\n",
state_name, pel);
return USB3_LPM_DISABLED;
}
@@ -4837,10 +4851,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
struct xhci_hcd *xhci;
struct device *dev = hcd->self.controller;
int retval;
- u32 temp;
/* Accept arbitrarily long scatter-gather lists */
hcd->self.sg_tablesize = ~0;
+
+ /* support to build packet from discontinuous buffers */
+ hcd->self.no_sg_constraint = 1;
+
/* XHCI controllers don't stop the ep queue on short packets :| */
hcd->self.no_stop_on_short = 1;
@@ -4865,14 +4882,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
/* xHCI private pointer was set in xhci_pci_probe for the second
* registered roothub.
*/
- xhci = hcd_to_xhci(hcd);
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- if (HCC_64BIT_ADDR(temp)) {
- xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
- }
return 0;
}
@@ -4892,6 +4901,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
get_quirks(dev, xhci);
+ /* In xhci controllers which follow xhci 1.0 spec gives a spurious
+ * success event after a short transfer. This quirk will ignore such
+ * spurious event.
+ */
+ if (xhci->hci_version > 0x96)
+ xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
+
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
@@ -4904,12 +4920,12 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
goto error;
xhci_dbg(xhci, "Reset complete\n");
- temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
- if (HCC_64BIT_ADDR(temp)) {
+ /* Set dma_mask and coherent_dma_mask to 64-bits,
+ * if xHC supports 64-bit addressing */
+ if (HCC_64BIT_ADDR(xhci->hcc_params) &&
+ !dma_set_mask(dev, DMA_BIT_MASK(64))) {
xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
- } else {
- dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
}
xhci_dbg(xhci, "Calling HCD init\n");
@@ -4934,12 +4950,12 @@ static int __init xhci_hcd_init(void)
retval = xhci_register_pci();
if (retval < 0) {
- printk(KERN_DEBUG "Problem registering PCI driver.");
+ pr_debug("Problem registering PCI driver.\n");
return retval;
}
retval = xhci_register_plat();
if (retval < 0) {
- printk(KERN_DEBUG "Problem registering platform driver.");
+ pr_debug("Problem registering platform driver.\n");
goto unreg_pci;
}
/*
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index c338741a675..46aa1489414 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1490,11 +1490,6 @@ struct xhci_hcd {
struct dma_pool *small_streams_pool;
struct dma_pool *medium_streams_pool;
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
- /* Poll the rings - for debugging */
- struct timer_list event_ring_timer;
- int zombie;
-#endif
/* Host controller watchdog timer structures */
unsigned int xhc_state;
@@ -1542,6 +1537,7 @@ struct xhci_hcd {
#define XHCI_SPURIOUS_REBOOT (1 << 13)
#define XHCI_COMP_MODE_QUIRK (1 << 14)
#define XHCI_AVOID_BEI (1 << 15)
+#define XHCI_PLAT (1 << 16)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
@@ -1579,16 +1575,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
return xhci->main_hcd;
}
-#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
-#define XHCI_DEBUG 1
-#else
-#define XHCI_DEBUG 0
-#endif
-
#define xhci_dbg(xhci, fmt, args...) \
- do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
-#define xhci_info(xhci, fmt, args...) \
- do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
+ dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_err(xhci, fmt, args...) \
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
@@ -1660,6 +1648,8 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_virt_ep *ep);
+void xhci_dbg_trace(struct xhci_hcd *xhci, void (*trace)(struct va_format *),
+ const char *fmt, ...);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index a51e7d6afda..e2b21c1d9c4 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -200,6 +200,19 @@ config USB_TEST
See <http://www.linux-usb.org/usbtest/> for more information,
including sample test device firmware and "how to use it".
+config USB_EHSET_TEST_FIXTURE
+ tristate "USB EHSET Test Fixture driver"
+ help
+ Say Y here if you want to support the special test fixture device
+ used for the USB-IF Embedded Host High-Speed Electrical Test procedure.
+
+ When the test fixture is connected, it can enumerate as one of several
+ VID/PID pairs. This driver then initiates a corresponding test mode on
+ the downstream port to which the test fixture is attached.
+
+ See <http://www.usb.org/developers/onthego/EHSET_v1.01.pdf> for more
+ information.
+
config USB_ISIGHTFW
tristate "iSight firmware loading support"
select FW_LOADER
@@ -233,5 +246,6 @@ config USB_EZUSB_FX2
config USB_HSIC_USB3503
tristate "USB3503 HSIC to USB20 Driver"
depends on I2C
+ select REGMAP
help
This option enables support for SMSC USB3503 HSIC to USB 2.0 Driver.
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 3e1bd70b06e..e748fd5dbe9 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -2,9 +2,6 @@
# Makefile for the rest of the USB drivers
# (the ones that don't fit into any other categories)
#
-
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
obj-$(CONFIG_USB_CYPRESS_CY7C63) += cypress_cy7c63.o
@@ -22,6 +19,7 @@ obj-$(CONFIG_USB_LED) += usbled.o
obj-$(CONFIG_USB_LEGOTOWER) += legousbtower.o
obj-$(CONFIG_USB_RIO500) += rio500.o
obj-$(CONFIG_USB_TEST) += usbtest.o
+obj-$(CONFIG_USB_EHSET_TEST_FIXTURE) += ehset.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index eb3c8c142fa..3eaa83f0508 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -18,6 +18,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -27,30 +29,11 @@
#include <linux/mutex.h>
#include <linux/uaccess.h>
-#ifdef CONFIG_USB_DEBUG
-static int debug = 5;
-#else
-static int debug = 1;
-#endif
-
-/* Use our own dbg macro */
-#undef dbg
-#define dbg(lvl, format, arg...) \
-do { \
- if (debug >= lvl) \
- printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
-} while (0)
-
-
/* Version Information */
#define DRIVER_VERSION "v0.0.13"
#define DRIVER_AUTHOR "John Homppi"
#define DRIVER_DESC "adutux (see www.ontrak.net)"
-/* Module parameters */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
/* Define these values to match your device */
#define ADU_VENDOR_ID 0x0a07
#define ADU_PRODUCT_ID 0x0064
@@ -124,19 +107,11 @@ static DEFINE_MUTEX(adutux_mutex);
static struct usb_driver adu_driver;
-static void adu_debug_data(int level, const char *function, int size,
- const unsigned char *data)
+static inline void adu_debug_data(struct device *dev, const char *function,
+ int size, const unsigned char *data)
{
- int i;
-
- if (debug < level)
- return;
-
- printk(KERN_DEBUG "%s: %s - length = %d, data = ",
- __FILE__, function, size);
- for (i = 0; i < size; ++i)
- printk("%.2x ", data[i]);
- printk("\n");
+ dev_dbg(dev, "%s - length = %d, data = %*ph\n",
+ function, size, size, data);
}
/**
@@ -147,12 +122,8 @@ static void adu_abort_transfers(struct adu_device *dev)
{
unsigned long flags;
- dbg(2, " %s : enter", __func__);
-
- if (dev->udev == NULL) {
- dbg(1, " %s : udev is null", __func__);
- goto exit;
- }
+ if (dev->udev == NULL)
+ return;
/* shutdown transfer */
@@ -170,15 +141,10 @@ static void adu_abort_transfers(struct adu_device *dev)
usb_kill_urb(dev->interrupt_out_urb);
} else
spin_unlock_irqrestore(&dev->buflock, flags);
-
-exit:
- dbg(2, " %s : leave", __func__);
}
static void adu_delete(struct adu_device *dev)
{
- dbg(2, "%s enter", __func__);
-
/* free data structures */
usb_free_urb(dev->interrupt_in_urb);
usb_free_urb(dev->interrupt_out_urb);
@@ -187,8 +153,6 @@ static void adu_delete(struct adu_device *dev)
kfree(dev->interrupt_in_buffer);
kfree(dev->interrupt_out_buffer);
kfree(dev);
-
- dbg(2, "%s : leave", __func__);
}
static void adu_interrupt_in_callback(struct urb *urb)
@@ -196,17 +160,17 @@ static void adu_interrupt_in_callback(struct urb *urb)
struct adu_device *dev = urb->context;
int status = urb->status;
- dbg(4, " %s : enter, status %d", __func__, status);
- adu_debug_data(5, __func__, urb->actual_length,
- urb->transfer_buffer);
+ adu_debug_data(&dev->udev->dev, __func__,
+ urb->actual_length, urb->transfer_buffer);
spin_lock(&dev->buflock);
if (status != 0) {
if ((status != -ENOENT) && (status != -ECONNRESET) &&
(status != -ESHUTDOWN)) {
- dbg(1, " %s : nonzero status received: %d",
- __func__, status);
+ dev_dbg(&dev->udev->dev,
+ "%s : nonzero status received: %d\n",
+ __func__, status);
}
goto exit;
}
@@ -220,10 +184,11 @@ static void adu_interrupt_in_callback(struct urb *urb)
dev->interrupt_in_buffer, urb->actual_length);
dev->read_buffer_length += urb->actual_length;
- dbg(2, " %s reading %d ", __func__,
- urb->actual_length);
+ dev_dbg(&dev->udev->dev,"%s reading %d\n", __func__,
+ urb->actual_length);
} else {
- dbg(1, " %s : read_buffer overflow", __func__);
+ dev_dbg(&dev->udev->dev,"%s : read_buffer overflow\n",
+ __func__);
}
}
@@ -232,9 +197,6 @@ exit:
spin_unlock(&dev->buflock);
/* always wake up so we recover from errors */
wake_up_interruptible(&dev->read_wait);
- adu_debug_data(5, __func__, urb->actual_length,
- urb->transfer_buffer);
- dbg(4, " %s : leave, status %d", __func__, status);
}
static void adu_interrupt_out_callback(struct urb *urb)
@@ -242,27 +204,23 @@ static void adu_interrupt_out_callback(struct urb *urb)
struct adu_device *dev = urb->context;
int status = urb->status;
- dbg(4, " %s : enter, status %d", __func__, status);
- adu_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
+ adu_debug_data(&dev->udev->dev, __func__,
+ urb->actual_length, urb->transfer_buffer);
if (status != 0) {
if ((status != -ENOENT) &&
(status != -ECONNRESET)) {
- dbg(1, " %s :nonzero status received: %d",
- __func__, status);
+ dev_dbg(&dev->udev->dev,
+ "%s :nonzero status received: %d\n", __func__,
+ status);
}
- goto exit;
+ return;
}
spin_lock(&dev->buflock);
dev->out_urb_finished = 1;
wake_up(&dev->write_wait);
spin_unlock(&dev->buflock);
-exit:
-
- adu_debug_data(5, __func__, urb->actual_length,
- urb->transfer_buffer);
- dbg(4, " %s : leave, status %d", __func__, status);
}
static int adu_open(struct inode *inode, struct file *file)
@@ -272,20 +230,16 @@ static int adu_open(struct inode *inode, struct file *file)
int subminor;
int retval;
- dbg(2, "%s : enter", __func__);
-
subminor = iminor(inode);
retval = mutex_lock_interruptible(&adutux_mutex);
- if (retval) {
- dbg(2, "%s : mutex lock failed", __func__);
+ if (retval)
goto exit_no_lock;
- }
interface = usb_find_interface(&adu_driver, subminor);
if (!interface) {
- printk(KERN_ERR "adutux: %s - error, can't find device for "
- "minor %d\n", __func__, subminor);
+ pr_err("%s - error, can't find device for minor %d\n",
+ __func__, subminor);
retval = -ENODEV;
goto exit_no_device;
}
@@ -303,7 +257,8 @@ static int adu_open(struct inode *inode, struct file *file)
}
++dev->open_count;
- dbg(2, "%s : open count %d", __func__, dev->open_count);
+ dev_dbg(&dev->udev->dev, "%s: open count %d\n", __func__,
+ dev->open_count);
/* save device in the file's private structure */
file->private_data = dev;
@@ -333,23 +288,19 @@ static int adu_open(struct inode *inode, struct file *file)
exit_no_device:
mutex_unlock(&adutux_mutex);
exit_no_lock:
- dbg(2, "%s : leave, return value %d ", __func__, retval);
return retval;
}
static void adu_release_internal(struct adu_device *dev)
{
- dbg(2, " %s : enter", __func__);
-
/* decrement our usage count for the device */
--dev->open_count;
- dbg(2, " %s : open count %d", __func__, dev->open_count);
+ dev_dbg(&dev->udev->dev, "%s : open count %d\n", __func__,
+ dev->open_count);
if (dev->open_count <= 0) {
adu_abort_transfers(dev);
dev->open_count = 0;
}
-
- dbg(2, " %s : leave", __func__);
}
static int adu_release(struct inode *inode, struct file *file)
@@ -357,17 +308,13 @@ static int adu_release(struct inode *inode, struct file *file)
struct adu_device *dev;
int retval = 0;
- dbg(2, " %s : enter", __func__);
-
if (file == NULL) {
- dbg(1, " %s : file is NULL", __func__);
retval = -ENODEV;
goto exit;
}
dev = file->private_data;
if (dev == NULL) {
- dbg(1, " %s : object is NULL", __func__);
retval = -ENODEV;
goto exit;
}
@@ -375,7 +322,7 @@ static int adu_release(struct inode *inode, struct file *file)
mutex_lock(&adutux_mutex); /* not interruptible */
if (dev->open_count <= 0) {
- dbg(1, " %s : device not opened", __func__);
+ dev_dbg(&dev->udev->dev, "%s : device not opened\n", __func__);
retval = -ENODEV;
goto unlock;
}
@@ -389,7 +336,6 @@ static int adu_release(struct inode *inode, struct file *file)
unlock:
mutex_unlock(&adutux_mutex);
exit:
- dbg(2, " %s : leave, return value %d", __func__, retval);
return retval;
}
@@ -406,35 +352,32 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
unsigned long flags;
DECLARE_WAITQUEUE(wait, current);
- dbg(2, " %s : enter, count = %Zd, file=%p", __func__, count, file);
-
dev = file->private_data;
- dbg(2, " %s : dev=%p", __func__, dev);
-
if (mutex_lock_interruptible(&dev->mtx))
return -ERESTARTSYS;
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
- printk(KERN_ERR "adutux: No device or device unplugged %d\n",
- retval);
+ pr_err("No device or device unplugged %d\n", retval);
goto exit;
}
/* verify that some data was requested */
if (count == 0) {
- dbg(1, " %s : read request of 0 bytes", __func__);
+ dev_dbg(&dev->udev->dev, "%s : read request of 0 bytes\n",
+ __func__);
goto exit;
}
timeout = COMMAND_TIMEOUT;
- dbg(2, " %s : about to start looping", __func__);
+ dev_dbg(&dev->udev->dev, "%s : about to start looping\n", __func__);
while (bytes_to_read) {
int data_in_secondary = dev->secondary_tail - dev->secondary_head;
- dbg(2, " %s : while, data_in_secondary=%d, status=%d",
- __func__, data_in_secondary,
- dev->interrupt_in_urb->status);
+ dev_dbg(&dev->udev->dev,
+ "%s : while, data_in_secondary=%d, status=%d\n",
+ __func__, data_in_secondary,
+ dev->interrupt_in_urb->status);
if (data_in_secondary) {
/* drain secondary buffer */
@@ -457,8 +400,9 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
if (dev->read_buffer_length) {
/* we secure access to the primary */
char *tmp;
- dbg(2, " %s : swap, read_buffer_length = %d",
- __func__, dev->read_buffer_length);
+ dev_dbg(&dev->udev->dev,
+ "%s : swap, read_buffer_length = %d\n",
+ __func__, dev->read_buffer_length);
tmp = dev->read_buffer_secondary;
dev->read_buffer_secondary = dev->read_buffer_primary;
dev->read_buffer_primary = tmp;
@@ -473,10 +417,14 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
if (!dev->read_urb_finished) {
/* somebody is doing IO */
spin_unlock_irqrestore(&dev->buflock, flags);
- dbg(2, " %s : submitted already", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s : submitted already\n",
+ __func__);
} else {
/* we must initiate input */
- dbg(2, " %s : initiate input", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s : initiate input\n",
+ __func__);
dev->read_urb_finished = 0;
spin_unlock_irqrestore(&dev->buflock, flags);
@@ -494,7 +442,9 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
if (retval == -ENOMEM) {
retval = bytes_read ? bytes_read : -ENOMEM;
}
- dbg(2, " %s : submit failed", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s : submit failed\n",
+ __func__);
goto exit;
}
}
@@ -513,13 +463,16 @@ static ssize_t adu_read(struct file *file, __user char *buffer, size_t count,
remove_wait_queue(&dev->read_wait, &wait);
if (timeout <= 0) {
- dbg(2, " %s : timeout", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s : timeout\n", __func__);
retval = bytes_read ? bytes_read : -ETIMEDOUT;
goto exit;
}
if (signal_pending(current)) {
- dbg(2, " %s : signal pending", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s : signal pending\n",
+ __func__);
retval = bytes_read ? bytes_read : -EINTR;
goto exit;
}
@@ -552,7 +505,6 @@ exit:
/* unlock the device */
mutex_unlock(&dev->mtx);
- dbg(2, " %s : leave, return value %d", __func__, retval);
return retval;
}
@@ -567,8 +519,6 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
unsigned long flags;
int retval;
- dbg(2, " %s : enter, count = %Zd", __func__, count);
-
dev = file->private_data;
retval = mutex_lock_interruptible(&dev->mtx);
@@ -578,14 +528,14 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
- printk(KERN_ERR "adutux: No device or device unplugged %d\n",
- retval);
+ pr_err("No device or device unplugged %d\n", retval);
goto exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
- dbg(1, " %s : write request of 0 bytes", __func__);
+ dev_dbg(&dev->udev->dev, "%s : write request of 0 bytes\n",
+ __func__);
goto exit;
}
@@ -598,13 +548,15 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
mutex_unlock(&dev->mtx);
if (signal_pending(current)) {
- dbg(1, " %s : interrupted", __func__);
+ dev_dbg(&dev->udev->dev, "%s : interrupted\n",
+ __func__);
set_current_state(TASK_RUNNING);
retval = -EINTR;
goto exit_onqueue;
}
if (schedule_timeout(COMMAND_TIMEOUT) == 0) {
- dbg(1, "%s - command timed out.", __func__);
+ dev_dbg(&dev->udev->dev,
+ "%s - command timed out.\n", __func__);
retval = -ETIMEDOUT;
goto exit_onqueue;
}
@@ -615,18 +567,22 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
goto exit_nolock;
}
- dbg(4, " %s : in progress, count = %Zd", __func__, count);
+ dev_dbg(&dev->udev->dev,
+ "%s : in progress, count = %Zd\n",
+ __func__, count);
} else {
spin_unlock_irqrestore(&dev->buflock, flags);
set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->write_wait, &waita);
- dbg(4, " %s : sending, count = %Zd", __func__, count);
+ dev_dbg(&dev->udev->dev, "%s : sending, count = %Zd\n",
+ __func__, count);
/* write the data into interrupt_out_buffer from userspace */
buffer_size = usb_endpoint_maxp(dev->interrupt_out_endpoint);
bytes_to_write = count > buffer_size ? buffer_size : count;
- dbg(4, " %s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd",
- __func__, buffer_size, count, bytes_to_write);
+ dev_dbg(&dev->udev->dev,
+ "%s : buffer_size = %Zd, count = %Zd, bytes_to_write = %Zd\n",
+ __func__, buffer_size, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write) != 0) {
retval = -EFAULT;
@@ -665,7 +621,6 @@ static ssize_t adu_write(struct file *file, const __user char *buffer,
exit:
mutex_unlock(&dev->mtx);
exit_nolock:
- dbg(2, " %s : leave, return value %d", __func__, retval);
return retval;
exit_onqueue:
@@ -711,8 +666,6 @@ static int adu_probe(struct usb_interface *interface,
int out_end_size;
int i;
- dbg(2, " %s : enter", __func__);
-
if (udev == NULL) {
dev_err(&interface->dev, "udev is NULL.\n");
goto exit;
@@ -812,7 +765,7 @@ static int adu_probe(struct usb_interface *interface,
dev_err(&interface->dev, "Could not retrieve serial number\n");
goto error;
}
- dbg(2, " %s : serial_number=%s", __func__, dev->serial_number);
+ dev_dbg(&interface->dev,"serial_number=%s", dev->serial_number);
/* we can register the device now, as it is ready */
usb_set_intfdata(interface, dev);
@@ -830,11 +783,9 @@ static int adu_probe(struct usb_interface *interface,
/* let the user know what node this device is now attached to */
dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n",
- udev->descriptor.idProduct, dev->serial_number,
+ le16_to_cpu(udev->descriptor.idProduct), dev->serial_number,
(dev->minor - ADU_MINOR_BASE));
exit:
- dbg(2, " %s : leave, return value %p (dev)", __func__, dev);
-
return retval;
error:
@@ -852,8 +803,6 @@ static void adu_disconnect(struct usb_interface *interface)
struct adu_device *dev;
int minor;
- dbg(2, " %s : enter", __func__);
-
dev = usb_get_intfdata(interface);
mutex_lock(&dev->mtx); /* not interruptible */
@@ -866,7 +815,8 @@ static void adu_disconnect(struct usb_interface *interface)
usb_set_intfdata(interface, NULL);
/* if the device is not opened, then we clean up right now */
- dbg(2, " %s : open count %d", __func__, dev->open_count);
+ dev_dbg(&dev->udev->dev, "%s : open count %d\n",
+ __func__, dev->open_count);
if (!dev->open_count)
adu_delete(dev);
@@ -874,8 +824,6 @@ static void adu_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "ADU device adutux%d now disconnected\n",
(minor - ADU_MINOR_BASE));
-
- dbg(2, " %s : leave", __func__);
}
/* usb specific object needed to register this driver with the usb subsystem */
diff --git a/drivers/usb/misc/ehset.c b/drivers/usb/misc/ehset.c
new file mode 100644
index 00000000000..c31b4a33e6b
--- /dev/null
+++ b/drivers/usb/misc/ehset.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+#include <linux/usb/ch11.h>
+
+#define TEST_SE0_NAK_PID 0x0101
+#define TEST_J_PID 0x0102
+#define TEST_K_PID 0x0103
+#define TEST_PACKET_PID 0x0104
+#define TEST_HS_HOST_PORT_SUSPEND_RESUME 0x0106
+#define TEST_SINGLE_STEP_GET_DEV_DESC 0x0107
+#define TEST_SINGLE_STEP_SET_FEATURE 0x0108
+
+static int ehset_probe(struct usb_interface *intf,
+ const struct usb_device_id *id)
+{
+ int ret = -EINVAL;
+ struct usb_device *dev = interface_to_usbdev(intf);
+ struct usb_device *hub_udev = dev->parent;
+ struct usb_device_descriptor *buf;
+ u8 portnum = dev->portnum;
+ u16 test_pid = le16_to_cpu(dev->descriptor.idProduct);
+
+ switch (test_pid) {
+ case TEST_SE0_NAK_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_SE0_NAK << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_J_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_J << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_K_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_K << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_PACKET_PID:
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (TEST_PACKET << 8) | portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_HS_HOST_PORT_SUSPEND_RESUME:
+ /* Test: wait for 15secs -> suspend -> 15secs delay -> resume */
+ msleep(15 * 1000);
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_SUSPEND, portnum,
+ NULL, 0, 1000);
+ if (ret < 0)
+ break;
+
+ msleep(15 * 1000);
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_CLEAR_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_SUSPEND, portnum,
+ NULL, 0, 1000);
+ break;
+ case TEST_SINGLE_STEP_GET_DEV_DESC:
+ /* Test: wait for 15secs -> GetDescriptor request */
+ msleep(15 * 1000);
+ buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ USB_REQ_GET_DESCRIPTOR, USB_DIR_IN,
+ USB_DT_DEVICE << 8, 0,
+ buf, USB_DT_DEVICE_SIZE,
+ USB_CTRL_GET_TIMEOUT);
+ kfree(buf);
+ break;
+ case TEST_SINGLE_STEP_SET_FEATURE:
+ /*
+ * GetDescriptor SETUP request -> 15secs delay -> IN & STATUS
+ *
+ * Note, this test is only supported on root hubs since the
+ * SetPortFeature handling can only be done inside the HCD's
+ * hub_control callback function.
+ */
+ if (hub_udev != dev->bus->root_hub) {
+ dev_err(&intf->dev, "SINGLE_STEP_SET_FEATURE test only supported on root hub\n");
+ break;
+ }
+
+ ret = usb_control_msg(hub_udev, usb_sndctrlpipe(hub_udev, 0),
+ USB_REQ_SET_FEATURE, USB_RT_PORT,
+ USB_PORT_FEAT_TEST,
+ (6 << 8) | portnum,
+ NULL, 0, 60 * 1000);
+
+ break;
+ default:
+ dev_err(&intf->dev, "%s: unsupported PID: 0x%x\n",
+ __func__, test_pid);
+ }
+
+ return (ret < 0) ? ret : 0;
+}
+
+static void ehset_disconnect(struct usb_interface *intf)
+{
+}
+
+static const struct usb_device_id ehset_id_table[] = {
+ { USB_DEVICE(0x1a0a, TEST_SE0_NAK_PID) },
+ { USB_DEVICE(0x1a0a, TEST_J_PID) },
+ { USB_DEVICE(0x1a0a, TEST_K_PID) },
+ { USB_DEVICE(0x1a0a, TEST_PACKET_PID) },
+ { USB_DEVICE(0x1a0a, TEST_HS_HOST_PORT_SUSPEND_RESUME) },
+ { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_GET_DEV_DESC) },
+ { USB_DEVICE(0x1a0a, TEST_SINGLE_STEP_SET_FEATURE) },
+ { } /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(usb, ehset_id_table);
+
+static struct usb_driver ehset_driver = {
+ .name = "usb_ehset_test",
+ .probe = ehset_probe,
+ .disconnect = ehset_disconnect,
+ .id_table = ehset_id_table,
+};
+
+module_usb_driver(ehset_driver);
+
+MODULE_DESCRIPTION("USB Driver for EHSET Test Fixture");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index ac762299eaa..b1d59532ac2 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -129,19 +129,6 @@ MODULE_DESCRIPTION("LD USB Driver");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("LD USB Devices");
-#ifdef CONFIG_USB_DEBUG
- static int debug = 1;
-#else
- static int debug = 0;
-#endif
-
-/* Use our own dbg macro */
-#define dbg_info(dev, format, arg...) do { if (debug) dev_info(dev , format , ## arg); } while (0)
-
-/* Module parameters */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-
/* All interrupt in transfers are collected in a ring buffer to
* avoid racing conditions and get better performance of the driver.
*/
@@ -256,8 +243,9 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
status == -ESHUTDOWN) {
goto exit;
} else {
- dbg_info(&dev->intf->dev, "%s: nonzero status received: %d\n",
- __func__, status);
+ dev_dbg(&dev->intf->dev,
+ "%s: nonzero status received: %d\n", __func__,
+ status);
spin_lock(&dev->rbsl);
goto resubmit; /* maybe we can recover */
}
@@ -272,8 +260,8 @@ static void ld_usb_interrupt_in_callback(struct urb *urb)
*actual_buffer = urb->actual_length;
memcpy(actual_buffer+1, dev->interrupt_in_buffer, urb->actual_length);
dev->ring_head = next_ring_head;
- dbg_info(&dev->intf->dev, "%s: received %d bytes\n",
- __func__, urb->actual_length);
+ dev_dbg(&dev->intf->dev, "%s: received %d bytes\n",
+ __func__, urb->actual_length);
} else {
dev_warn(&dev->intf->dev,
"Ring buffer overflow, %d bytes dropped\n",
@@ -310,9 +298,9 @@ static void ld_usb_interrupt_out_callback(struct urb *urb)
if (status && !(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN))
- dbg_info(&dev->intf->dev,
- "%s - nonzero write interrupt status received: %d\n",
- __func__, status);
+ dev_dbg(&dev->intf->dev,
+ "%s - nonzero write interrupt status received: %d\n",
+ __func__, status);
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
@@ -585,7 +573,8 @@ static ssize_t ld_usb_write(struct file *file, const char __user *buffer,
bytes_to_write = min(count, write_buffer_size*dev->interrupt_out_endpoint_size);
if (bytes_to_write < count)
dev_warn(&dev->intf->dev, "Write buffer overflow, %zd bytes dropped\n",count-bytes_to_write);
- dbg_info(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n", __func__, count, bytes_to_write);
+ dev_dbg(&dev->intf->dev, "%s: count = %zd, bytes_to_write = %zd\n",
+ __func__, count, bytes_to_write);
if (copy_from_user(dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 80894791c02..eb37c954205 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -75,6 +75,8 @@
* - move reset into open to clean out spurious data
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -87,28 +89,11 @@
#include <linux/poll.h>
-#ifdef CONFIG_USB_DEBUG
- static int debug = 4;
-#else
- static int debug = 0;
-#endif
-
-/* Use our own dbg macro */
-#undef dbg
-#define dbg(lvl, format, arg...) \
-do { \
- if (debug >= lvl) \
- printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
-} while (0)
-
/* Version Information */
#define DRIVER_VERSION "v0.96"
#define DRIVER_AUTHOR "Juergen Stuber <starblue@sourceforge.net>"
#define DRIVER_DESC "LEGO USB Tower Driver"
-/* Module parameters */
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
/* The defaults are chosen to work with the latest versions of leJOS and NQC.
*/
@@ -298,18 +283,12 @@ static struct usb_driver tower_driver = {
/**
* lego_usb_tower_debug_data
*/
-static inline void lego_usb_tower_debug_data (int level, const char *function, int size, const unsigned char *data)
+static inline void lego_usb_tower_debug_data(struct device *dev,
+ const char *function, int size,
+ const unsigned char *data)
{
- int i;
-
- if (debug < level)
- return;
-
- printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
- for (i = 0; i < size; ++i) {
- printk ("%.2x ", data[i]);
- }
- printk ("\n");
+ dev_dbg(dev, "%s - length = %d, data = %*ph\n",
+ function, size, size, data);
}
@@ -318,8 +297,6 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i
*/
static inline void tower_delete (struct lego_usb_tower *dev)
{
- dbg(2, "%s: enter", __func__);
-
tower_abort_transfers (dev);
/* free data structures */
@@ -329,8 +306,6 @@ static inline void tower_delete (struct lego_usb_tower *dev)
kfree (dev->interrupt_in_buffer);
kfree (dev->interrupt_out_buffer);
kfree (dev);
-
- dbg(2, "%s: leave", __func__);
}
@@ -346,16 +321,13 @@ static int tower_open (struct inode *inode, struct file *file)
struct tower_reset_reply reset_reply;
int result;
- dbg(2, "%s: enter", __func__);
-
nonseekable_open(inode, file);
subminor = iminor(inode);
interface = usb_find_interface (&tower_driver, subminor);
if (!interface) {
- printk(KERN_ERR "%s - error, can't find device for minor %d\n",
- __func__, subminor);
+ pr_err("error, can't find device for minor %d\n", subminor);
retval = -ENODEV;
goto exit;
}
@@ -435,8 +407,6 @@ unlock_exit:
mutex_unlock(&dev->lock);
exit:
- dbg(2, "%s: leave, return value %d ", __func__, retval);
-
return retval;
}
@@ -448,12 +418,9 @@ static int tower_release (struct inode *inode, struct file *file)
struct lego_usb_tower *dev;
int retval = 0;
- dbg(2, "%s: enter", __func__);
-
dev = file->private_data;
if (dev == NULL) {
- dbg(1, "%s: object is NULL", __func__);
retval = -ENODEV;
goto exit_nolock;
}
@@ -465,7 +432,8 @@ static int tower_release (struct inode *inode, struct file *file)
}
if (dev->open_count != 1) {
- dbg(1, "%s: device not opened exactly once", __func__);
+ dev_dbg(&dev->udev->dev, "%s: device not opened exactly once\n",
+ __func__);
retval = -ENODEV;
goto unlock_exit;
}
@@ -491,7 +459,6 @@ unlock_exit:
exit:
mutex_unlock(&open_disc_mutex);
exit_nolock:
- dbg(2, "%s: leave, return value %d", __func__, retval);
return retval;
}
@@ -502,12 +469,8 @@ exit_nolock:
*/
static void tower_abort_transfers (struct lego_usb_tower *dev)
{
- dbg(2, "%s: enter", __func__);
-
- if (dev == NULL) {
- dbg(1, "%s: dev is null", __func__);
- goto exit;
- }
+ if (dev == NULL)
+ return;
/* shutdown transfer */
if (dev->interrupt_in_running) {
@@ -518,9 +481,6 @@ static void tower_abort_transfers (struct lego_usb_tower *dev)
}
if (dev->interrupt_out_busy && dev->udev)
usb_kill_urb(dev->interrupt_out_urb);
-
-exit:
- dbg(2, "%s: leave", __func__);
}
@@ -553,8 +513,6 @@ static unsigned int tower_poll (struct file *file, poll_table *wait)
struct lego_usb_tower *dev;
unsigned int mask = 0;
- dbg(2, "%s: enter", __func__);
-
dev = file->private_data;
if (!dev->udev)
@@ -571,8 +529,6 @@ static unsigned int tower_poll (struct file *file, poll_table *wait)
mask |= POLLOUT | POLLWRNORM;
}
- dbg(2, "%s: leave, mask = %d", __func__, mask);
-
return mask;
}
@@ -597,8 +553,6 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
int retval = 0;
unsigned long timeout = 0;
- dbg(2, "%s: enter, count = %Zd", __func__, count);
-
dev = file->private_data;
/* lock this object */
@@ -610,13 +564,13 @@ static ssize_t tower_read (struct file *file, char __user *buffer, size_t count,
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
- printk(KERN_ERR "legousbtower: No device or device unplugged %d\n", retval);
+ pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* verify that we actually have some data to read */
if (count == 0) {
- dbg(1, "%s: read request of 0 bytes", __func__);
+ dev_dbg(&dev->udev->dev, "read request of 0 bytes\n");
goto unlock_exit;
}
@@ -672,7 +626,6 @@ unlock_exit:
mutex_unlock(&dev->lock);
exit:
- dbg(2, "%s: leave, return value %d", __func__, retval);
return retval;
}
@@ -686,8 +639,6 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
size_t bytes_to_write;
int retval = 0;
- dbg(2, "%s: enter, count = %Zd", __func__, count);
-
dev = file->private_data;
/* lock this object */
@@ -699,13 +650,13 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* verify that the device wasn't unplugged */
if (dev->udev == NULL) {
retval = -ENODEV;
- printk(KERN_ERR "legousbtower: No device or device unplugged %d\n", retval);
+ pr_err("No device or device unplugged %d\n", retval);
goto unlock_exit;
}
/* verify that we actually have some data to write */
if (count == 0) {
- dbg(1, "%s: write request of 0 bytes", __func__);
+ dev_dbg(&dev->udev->dev, "write request of 0 bytes\n");
goto unlock_exit;
}
@@ -723,7 +674,8 @@ static ssize_t tower_write (struct file *file, const char __user *buffer, size_t
/* write the data into interrupt_out_buffer from userspace */
bytes_to_write = min_t(int, count, write_buffer_size);
- dbg(4, "%s: count = %Zd, bytes_to_write = %Zd", __func__, count, bytes_to_write);
+ dev_dbg(&dev->udev->dev, "%s: count = %Zd, bytes_to_write = %Zd\n",
+ __func__, count, bytes_to_write);
if (copy_from_user (dev->interrupt_out_buffer, buffer, bytes_to_write)) {
retval = -EFAULT;
@@ -757,8 +709,6 @@ unlock_exit:
mutex_unlock(&dev->lock);
exit:
- dbg(2, "%s: leave, return value %d", __func__, retval);
-
return retval;
}
@@ -772,9 +722,8 @@ static void tower_interrupt_in_callback (struct urb *urb)
int status = urb->status;
int retval;
- dbg(4, "%s: enter, status %d", __func__, status);
-
- lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
+ lego_usb_tower_debug_data(&dev->udev->dev, __func__,
+ urb->actual_length, urb->transfer_buffer);
if (status) {
if (status == -ENOENT ||
@@ -782,7 +731,9 @@ static void tower_interrupt_in_callback (struct urb *urb)
status == -ESHUTDOWN) {
goto exit;
} else {
- dbg(1, "%s: nonzero status received: %d", __func__, status);
+ dev_dbg(&dev->udev->dev,
+ "%s: nonzero status received: %d\n", __func__,
+ status);
goto resubmit; /* maybe we can recover */
}
}
@@ -795,9 +746,11 @@ static void tower_interrupt_in_callback (struct urb *urb)
urb->actual_length);
dev->read_buffer_length += urb->actual_length;
dev->read_last_arrival = jiffies;
- dbg(3, "%s: received %d bytes", __func__, urb->actual_length);
+ dev_dbg(&dev->udev->dev, "%s: received %d bytes\n",
+ __func__, urb->actual_length);
} else {
- printk(KERN_WARNING "%s: read_buffer overflow, %d bytes dropped", __func__, urb->actual_length);
+ pr_warn("read_buffer overflow, %d bytes dropped\n",
+ urb->actual_length);
}
spin_unlock (&dev->read_buffer_lock);
}
@@ -815,9 +768,6 @@ resubmit:
exit:
dev->interrupt_in_done = 1;
wake_up_interruptible (&dev->read_wait);
-
- lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
- dbg(4, "%s: leave, status %d", __func__, status);
}
@@ -829,22 +779,20 @@ static void tower_interrupt_out_callback (struct urb *urb)
struct lego_usb_tower *dev = urb->context;
int status = urb->status;
- dbg(4, "%s: enter, status %d", __func__, status);
- lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
+ lego_usb_tower_debug_data(&dev->udev->dev, __func__,
+ urb->actual_length, urb->transfer_buffer);
/* sync/async unlink faults aren't errors */
if (status && !(status == -ENOENT ||
status == -ECONNRESET ||
status == -ESHUTDOWN)) {
- dbg(1, "%s - nonzero write bulk status received: %d",
- __func__, status);
+ dev_dbg(&dev->udev->dev,
+ "%s: nonzero write bulk status received: %d\n", __func__,
+ status);
}
dev->interrupt_out_busy = 0;
wake_up_interruptible(&dev->write_wait);
-
- lego_usb_tower_debug_data(5, __func__, urb->actual_length, urb->transfer_buffer);
- dbg(4, "%s: leave, status %d", __func__, status);
}
@@ -866,8 +814,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
int retval = -ENOMEM;
int result;
- dbg(2, "%s: enter", __func__);
-
/* allocate memory for our device state and initialize it */
dev = kmalloc (sizeof(struct lego_usb_tower), GFP_KERNEL);
@@ -993,8 +939,6 @@ static int tower_probe (struct usb_interface *interface, const struct usb_device
exit:
- dbg(2, "%s: leave, return value 0x%.8lx (dev)", __func__, (long) dev);
-
return retval;
error:
@@ -1013,8 +957,6 @@ static void tower_disconnect (struct usb_interface *interface)
struct lego_usb_tower *dev;
int minor;
- dbg(2, "%s: enter", __func__);
-
dev = usb_get_intfdata (interface);
mutex_lock(&open_disc_mutex);
usb_set_intfdata (interface, NULL);
@@ -1041,8 +983,6 @@ static void tower_disconnect (struct usb_interface *interface)
dev_info(&interface->dev, "LEGO USB Tower #%d now disconnected\n",
(minor - LEGO_USB_TOWER_MINOR_BASE));
-
- dbg(2, "%s: leave", __func__);
}
module_usb_driver(tower_driver);
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index c21386ec5d3..de98906f786 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0903) },
{ USB_DEVICE(0x0711, 0x0918) },
{ USB_DEVICE(0x0711, 0x0920) },
+ { USB_DEVICE(0x0711, 0x0950) },
{ USB_DEVICE(0x182d, 0x021c) },
{ USB_DEVICE(0x182d, 0x0269) },
{ }
diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c
index c3578393dde..a31641e18d1 100644
--- a/drivers/usb/misc/usb3503.c
+++ b/drivers/usb/misc/usb3503.c
@@ -26,6 +26,7 @@
#include <linux/of_gpio.h>
#include <linux/platform_device.h>
#include <linux/platform_data/usb3503.h>
+#include <linux/regmap.h>
#define USB3503_VIDL 0x00
#define USB3503_VIDM 0x01
@@ -50,60 +51,25 @@
#define USB3503_CFGP 0xee
#define USB3503_CLKSUSP (1 << 7)
+#define USB3503_RESET 0xff
+
struct usb3503 {
enum usb3503_mode mode;
- struct i2c_client *client;
+ struct regmap *regmap;
+ struct device *dev;
u8 port_off_mask;
int gpio_intn;
int gpio_reset;
int gpio_connect;
};
-static int usb3503_write_register(struct i2c_client *client,
- char reg, char data)
-{
- return i2c_smbus_write_byte_data(client, reg, data);
-}
-
-static int usb3503_read_register(struct i2c_client *client, char reg)
-{
- return i2c_smbus_read_byte_data(client, reg);
-}
-
-static int usb3503_set_bits(struct i2c_client *client, char reg, char req)
+static int usb3503_reset(struct usb3503 *hub, int state)
{
- int err;
-
- err = usb3503_read_register(client, reg);
- if (err < 0)
- return err;
-
- err = usb3503_write_register(client, reg, err | req);
- if (err < 0)
- return err;
-
- return 0;
-}
-
-static int usb3503_clear_bits(struct i2c_client *client, char reg, char req)
-{
- int err;
-
- err = usb3503_read_register(client, reg);
- if (err < 0)
- return err;
-
- err = usb3503_write_register(client, reg, err & ~req);
- if (err < 0)
- return err;
-
- return 0;
-}
+ if (!state && gpio_is_valid(hub->gpio_connect))
+ gpio_set_value_cansleep(hub->gpio_connect, 0);
-static int usb3503_reset(int gpio_reset, int state)
-{
- if (gpio_is_valid(gpio_reset))
- gpio_set_value(gpio_reset, state);
+ if (gpio_is_valid(hub->gpio_reset))
+ gpio_set_value_cansleep(hub->gpio_reset, state);
/* Wait T_HUBINIT == 4ms for hub logic to stabilize */
if (state)
@@ -112,90 +78,105 @@ static int usb3503_reset(int gpio_reset, int state)
return 0;
}
-static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
+static int usb3503_connect(struct usb3503 *hub)
{
- struct i2c_client *i2c = hub->client;
- int err = 0;
+ struct device *dev = hub->dev;
+ int err;
- switch (mode) {
- case USB3503_MODE_HUB:
- usb3503_reset(hub->gpio_reset, 1);
+ usb3503_reset(hub, 1);
+ if (hub->regmap) {
/* SP_ILOCK: set connect_n, config_n for config */
- err = usb3503_write_register(i2c, USB3503_SP_ILOCK,
- (USB3503_SPILOCK_CONNECT
+ err = regmap_write(hub->regmap, USB3503_SP_ILOCK,
+ (USB3503_SPILOCK_CONNECT
| USB3503_SPILOCK_CONFIG));
if (err < 0) {
- dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
- goto err_hubmode;
+ dev_err(dev, "SP_ILOCK failed (%d)\n", err);
+ return err;
}
/* PDS : Disable For Self Powered Operation */
if (hub->port_off_mask) {
- err = usb3503_set_bits(i2c, USB3503_PDS,
+ err = regmap_update_bits(hub->regmap, USB3503_PDS,
+ hub->port_off_mask,
hub->port_off_mask);
if (err < 0) {
- dev_err(&i2c->dev, "PDS failed (%d)\n", err);
- goto err_hubmode;
+ dev_err(dev, "PDS failed (%d)\n", err);
+ return err;
}
}
/* CFG1 : SELF_BUS_PWR -> Self-Powerd operation */
- err = usb3503_set_bits(i2c, USB3503_CFG1, USB3503_SELF_BUS_PWR);
+ err = regmap_update_bits(hub->regmap, USB3503_CFG1,
+ USB3503_SELF_BUS_PWR,
+ USB3503_SELF_BUS_PWR);
if (err < 0) {
- dev_err(&i2c->dev, "CFG1 failed (%d)\n", err);
- goto err_hubmode;
+ dev_err(dev, "CFG1 failed (%d)\n", err);
+ return err;
}
/* SP_LOCK: clear connect_n, config_n for hub connect */
- err = usb3503_clear_bits(i2c, USB3503_SP_ILOCK,
- (USB3503_SPILOCK_CONNECT
- | USB3503_SPILOCK_CONFIG));
+ err = regmap_update_bits(hub->regmap, USB3503_SP_ILOCK,
+ (USB3503_SPILOCK_CONNECT
+ | USB3503_SPILOCK_CONFIG), 0);
if (err < 0) {
- dev_err(&i2c->dev, "SP_ILOCK failed (%d)\n", err);
- goto err_hubmode;
+ dev_err(dev, "SP_ILOCK failed (%d)\n", err);
+ return err;
}
+ }
- hub->mode = mode;
- dev_info(&i2c->dev, "switched to HUB mode\n");
+ if (gpio_is_valid(hub->gpio_connect))
+ gpio_set_value_cansleep(hub->gpio_connect, 1);
+
+ hub->mode = USB3503_MODE_HUB;
+ dev_info(dev, "switched to HUB mode\n");
+
+ return 0;
+}
+
+static int usb3503_switch_mode(struct usb3503 *hub, enum usb3503_mode mode)
+{
+ struct device *dev = hub->dev;
+ int err = 0;
+
+ switch (mode) {
+ case USB3503_MODE_HUB:
+ err = usb3503_connect(hub);
break;
case USB3503_MODE_STANDBY:
- usb3503_reset(hub->gpio_reset, 0);
+ usb3503_reset(hub, 0);
hub->mode = mode;
- dev_info(&i2c->dev, "switched to STANDBY mode\n");
+ dev_info(dev, "switched to STANDBY mode\n");
break;
default:
- dev_err(&i2c->dev, "unknown mode is request\n");
+ dev_err(dev, "unknown mode is requested\n");
err = -EINVAL;
break;
}
-err_hubmode:
return err;
}
-static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+static const struct regmap_config usb3503_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = USB3503_RESET,
+};
+
+static int usb3503_probe(struct usb3503 *hub)
{
- struct usb3503_platform_data *pdata = i2c->dev.platform_data;
- struct device_node *np = i2c->dev.of_node;
- struct usb3503 *hub;
- int err = -ENOMEM;
- u32 mode = USB3503_MODE_UNKNOWN;
+ struct device *dev = hub->dev;
+ struct usb3503_platform_data *pdata = dev_get_platdata(dev);
+ struct device_node *np = dev->of_node;
+ int err;
+ u32 mode = USB3503_MODE_HUB;
const u32 *property;
int len;
- hub = kzalloc(sizeof(struct usb3503), GFP_KERNEL);
- if (!hub) {
- dev_err(&i2c->dev, "private data alloc fail\n");
- return err;
- }
-
- i2c_set_clientdata(i2c, hub);
- hub->client = i2c;
-
if (pdata) {
hub->port_off_mask = pdata->port_off_mask;
hub->gpio_intn = pdata->gpio_intn;
@@ -215,10 +196,10 @@ static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
}
}
- hub->gpio_intn = of_get_named_gpio(np, "connect-gpios", 0);
+ hub->gpio_intn = of_get_named_gpio(np, "intn-gpios", 0);
if (hub->gpio_intn == -EPROBE_DEFER)
return -EPROBE_DEFER;
- hub->gpio_connect = of_get_named_gpio(np, "intn-gpios", 0);
+ hub->gpio_connect = of_get_named_gpio(np, "connect-gpios", 0);
if (hub->gpio_connect == -EPROBE_DEFER)
return -EPROBE_DEFER;
hub->gpio_reset = of_get_named_gpio(np, "reset-gpios", 0);
@@ -228,72 +209,86 @@ static int usb3503_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
hub->mode = mode;
}
+ if (hub->port_off_mask && !hub->regmap)
+ dev_err(dev, "Ports disabled with no control interface\n");
+
if (gpio_is_valid(hub->gpio_intn)) {
- err = gpio_request_one(hub->gpio_intn,
+ err = devm_gpio_request_one(dev, hub->gpio_intn,
GPIOF_OUT_INIT_HIGH, "usb3503 intn");
if (err) {
- dev_err(&i2c->dev,
- "unable to request GPIO %d as connect pin (%d)\n",
- hub->gpio_intn, err);
- goto err_out;
+ dev_err(dev,
+ "unable to request GPIO %d as connect pin (%d)\n",
+ hub->gpio_intn, err);
+ return err;
}
}
if (gpio_is_valid(hub->gpio_connect)) {
- err = gpio_request_one(hub->gpio_connect,
- GPIOF_OUT_INIT_HIGH, "usb3503 connect");
+ err = devm_gpio_request_one(dev, hub->gpio_connect,
+ GPIOF_OUT_INIT_LOW, "usb3503 connect");
if (err) {
- dev_err(&i2c->dev,
- "unable to request GPIO %d as connect pin (%d)\n",
- hub->gpio_connect, err);
- goto err_gpio_connect;
+ dev_err(dev,
+ "unable to request GPIO %d as connect pin (%d)\n",
+ hub->gpio_connect, err);
+ return err;
}
}
if (gpio_is_valid(hub->gpio_reset)) {
- err = gpio_request_one(hub->gpio_reset,
+ err = devm_gpio_request_one(dev, hub->gpio_reset,
GPIOF_OUT_INIT_LOW, "usb3503 reset");
if (err) {
- dev_err(&i2c->dev,
- "unable to request GPIO %d as reset pin (%d)\n",
- hub->gpio_reset, err);
- goto err_gpio_reset;
+ dev_err(dev,
+ "unable to request GPIO %d as reset pin (%d)\n",
+ hub->gpio_reset, err);
+ return err;
}
}
usb3503_switch_mode(hub, hub->mode);
- dev_info(&i2c->dev, "%s: probed on %s mode\n", __func__,
+ dev_info(dev, "%s: probed in %s mode\n", __func__,
(hub->mode == USB3503_MODE_HUB) ? "hub" : "standby");
return 0;
+}
-err_gpio_reset:
- if (gpio_is_valid(hub->gpio_connect))
- gpio_free(hub->gpio_connect);
-err_gpio_connect:
- if (gpio_is_valid(hub->gpio_intn))
- gpio_free(hub->gpio_intn);
-err_out:
- kfree(hub);
+static int usb3503_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct usb3503 *hub;
+ int err;
- return err;
+ hub = devm_kzalloc(&i2c->dev, sizeof(struct usb3503), GFP_KERNEL);
+ if (!hub) {
+ dev_err(&i2c->dev, "private data alloc fail\n");
+ return -ENOMEM;
+ }
+
+ i2c_set_clientdata(i2c, hub);
+ hub->regmap = devm_regmap_init_i2c(i2c, &usb3503_regmap_config);
+ if (IS_ERR(hub->regmap)) {
+ err = PTR_ERR(hub->regmap);
+ dev_err(&i2c->dev, "Failed to initialise regmap: %d\n", err);
+ return err;
+ }
+ hub->dev = &i2c->dev;
+
+ return usb3503_probe(hub);
}
-static int usb3503_remove(struct i2c_client *i2c)
+static int usb3503_platform_probe(struct platform_device *pdev)
{
- struct usb3503 *hub = i2c_get_clientdata(i2c);
-
- if (gpio_is_valid(hub->gpio_intn))
- gpio_free(hub->gpio_intn);
- if (gpio_is_valid(hub->gpio_connect))
- gpio_free(hub->gpio_connect);
- if (gpio_is_valid(hub->gpio_reset))
- gpio_free(hub->gpio_reset);
+ struct usb3503 *hub;
- kfree(hub);
+ hub = devm_kzalloc(&pdev->dev, sizeof(struct usb3503), GFP_KERNEL);
+ if (!hub) {
+ dev_err(&pdev->dev, "private data alloc fail\n");
+ return -ENOMEM;
+ }
+ hub->dev = &pdev->dev;
- return 0;
+ return usb3503_probe(hub);
}
static const struct i2c_device_id usb3503_id[] = {
@@ -305,22 +300,53 @@ MODULE_DEVICE_TABLE(i2c, usb3503_id);
#ifdef CONFIG_OF
static const struct of_device_id usb3503_of_match[] = {
{ .compatible = "smsc,usb3503", },
+ { .compatible = "smsc,usb3503a", },
{},
};
MODULE_DEVICE_TABLE(of, usb3503_of_match);
#endif
-static struct i2c_driver usb3503_driver = {
+static struct i2c_driver usb3503_i2c_driver = {
.driver = {
.name = USB3503_I2C_NAME,
.of_match_table = of_match_ptr(usb3503_of_match),
},
- .probe = usb3503_probe,
- .remove = usb3503_remove,
+ .probe = usb3503_i2c_probe,
.id_table = usb3503_id,
};
-module_i2c_driver(usb3503_driver);
+static struct platform_driver usb3503_platform_driver = {
+ .driver = {
+ .name = USB3503_I2C_NAME,
+ .of_match_table = of_match_ptr(usb3503_of_match),
+ .owner = THIS_MODULE,
+ },
+ .probe = usb3503_platform_probe,
+};
+
+static int __init usb3503_init(void)
+{
+ int err;
+
+ err = i2c_register_driver(THIS_MODULE, &usb3503_i2c_driver);
+ if (err != 0)
+ pr_err("usb3503: Failed to register I2C driver: %d\n", err);
+
+ err = platform_driver_register(&usb3503_platform_driver);
+ if (err != 0)
+ pr_err("usb3503: Failed to register platform driver: %d\n",
+ err);
+
+ return 0;
+}
+module_init(usb3503_init);
+
+static void __exit usb3503_exit(void)
+{
+ platform_driver_unregister(&usb3503_platform_driver);
+ i2c_del_driver(&usb3503_i2c_driver);
+}
+module_exit(usb3503_exit);
MODULE_AUTHOR("Dongjin Kim <tobetter@gmail.com>");
MODULE_DESCRIPTION("USB3503 USB HUB driver");
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 8b4ca1cb450..aa28ac8c760 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -747,9 +747,9 @@ static int ch9_postconfig(struct usbtest_dev *dev)
/* [9.4.5] get_status always works */
retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
- if (retval != 2) {
+ if (retval) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
- return (retval < 0) ? retval : -EDOM;
+ return retval;
}
/* FIXME configuration.bmAttributes says if we could try to set/clear
@@ -758,9 +758,9 @@ static int ch9_postconfig(struct usbtest_dev *dev)
retval = usb_get_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
- if (retval != 2) {
+ if (retval) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
- return (retval < 0) ? retval : -EDOM;
+ return retval;
}
/* FIXME get status for each endpoint in the interface */
@@ -1351,7 +1351,6 @@ static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
ep, retval);
return retval;
}
- le16_to_cpus(&status);
if (status != 1) {
ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
return -EINVAL;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index e129cf66122..40ef40affe8 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -75,7 +75,7 @@ struct uss720_async_request {
struct list_head asynclist;
struct completion compl;
struct urb *urb;
- struct usb_ctrlrequest dr;
+ struct usb_ctrlrequest *dr;
__u8 reg[7];
};
@@ -98,6 +98,7 @@ static void destroy_async(struct kref *kref)
if (likely(rq->urb))
usb_free_urb(rq->urb);
+ kfree(rq->dr);
spin_lock_irqsave(&priv->asynclock, flags);
list_del_init(&rq->asynclist);
spin_unlock_irqrestore(&priv->asynclock, flags);
@@ -120,7 +121,7 @@ static void async_complete(struct urb *urb)
if (status) {
dev_err(&urb->dev->dev, "async_complete: urb error %d\n",
status);
- } else if (rq->dr.bRequest == 3) {
+ } else if (rq->dr->bRequest == 3) {
memcpy(priv->reg, rq->reg, sizeof(priv->reg));
#if 0
dev_dbg(&priv->usbdev->dev,
@@ -152,7 +153,7 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
usbdev = priv->usbdev;
if (!usbdev)
return NULL;
- rq = kmalloc(sizeof(struct uss720_async_request), mem_flags);
+ rq = kzalloc(sizeof(struct uss720_async_request), mem_flags);
if (!rq) {
dev_err(&usbdev->dev, "submit_async_request out of memory\n");
return NULL;
@@ -168,13 +169,18 @@ static struct uss720_async_request *submit_async_request(struct parport_uss720_p
dev_err(&usbdev->dev, "submit_async_request out of memory\n");
return NULL;
}
- rq->dr.bRequestType = requesttype;
- rq->dr.bRequest = request;
- rq->dr.wValue = cpu_to_le16(value);
- rq->dr.wIndex = cpu_to_le16(index);
- rq->dr.wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
+ rq->dr = kmalloc(sizeof(*rq->dr), mem_flags);
+ if (!rq->dr) {
+ kref_put(&rq->ref_count, destroy_async);
+ return NULL;
+ }
+ rq->dr->bRequestType = requesttype;
+ rq->dr->bRequest = request;
+ rq->dr->wValue = cpu_to_le16(value);
+ rq->dr->wIndex = cpu_to_le16(index);
+ rq->dr->wLength = cpu_to_le16((request == 3) ? sizeof(rq->reg) : 0);
usb_fill_control_urb(rq->urb, usbdev, (requesttype & 0x80) ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0),
- (unsigned char *)&rq->dr,
+ (unsigned char *)rq->dr,
(request == 3) ? rq->reg : NULL, (request == 3) ? sizeof(rq->reg) : 0, async_complete, rq);
/* rq->urb->transfer_flags |= URB_ASYNC_UNLINK; */
spin_lock_irqsave(&priv->asynclock, flags);
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 797e3fd4551..c64ee09a7c0 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -83,6 +83,8 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
+ select USB_MUSB_AM335X_CHILD
+ depends on OF_IRQ
config USB_MUSB_BLACKFIN
tristate "Blackfin"
@@ -93,6 +95,9 @@ config USB_MUSB_UX500
endchoice
+config USB_MUSB_AM335X_CHILD
+ tristate
+
choice
prompt 'MUSB DMA mode'
default MUSB_PIO_ONLY if ARCH_MULTIPLATFORM
@@ -125,6 +130,10 @@ config USB_TI_CPPI_DMA
help
Enable DMA transfers when TI CPPI DMA is available.
+config USB_TI_CPPI41_DMA
+ bool 'TI CPPI 4.1 (AM335x)'
+ depends on ARCH_OMAP
+
config USB_TUSB_OMAP_DMA
bool 'TUSB 6010'
depends on USB_MUSB_TUSB6010
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index 2b82ed7c85c..c5ea5c6dc16 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -20,6 +20,9 @@ obj-$(CONFIG_USB_MUSB_DA8XX) += da8xx.o
obj-$(CONFIG_USB_MUSB_BLACKFIN) += blackfin.o
obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
+
+obj-$(CONFIG_USB_MUSB_AM335X_CHILD) += musb_am335x.o
+
# the kconfig must guarantee that only one of the
# possible I/O schemes will be enabled at a time ...
# PIO only, or DMA (several potential schemes).
@@ -29,3 +32,4 @@ musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI41_DMA) += musb_cppi41.o
diff --git a/drivers/usb/musb/am35x.c b/drivers/usb/musb/am35x.c
index 2231850c062..5c310c66421 100644
--- a/drivers/usb/musb/am35x.c
+++ b/drivers/usb/musb/am35x.c
@@ -33,7 +33,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/platform_data/usb-omap.h>
#include "musb_core.h"
@@ -218,7 +218,7 @@ static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
struct musb *musb = hci;
void __iomem *reg_base = musb->ctrl_base;
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
struct usb_otg *otg = musb->xceiv->otg;
unsigned long flags;
@@ -335,7 +335,7 @@ eoi:
static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
{
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
int retval = 0;
@@ -350,7 +350,7 @@ static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
static int am35x_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
void __iomem *reg_base = musb->ctrl_base;
u32 rev;
@@ -394,7 +394,7 @@ static int am35x_musb_init(struct musb *musb)
static int am35x_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
del_timer_sync(&otg_workaround);
@@ -456,7 +456,7 @@ static u64 am35x_dmamask = DMA_BIT_MASK(32);
static int am35x_probe(struct platform_device *pdev)
{
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct am35x_glue *glue;
@@ -577,7 +577,7 @@ static int am35x_remove(struct platform_device *pdev)
static int am35x_suspend(struct device *dev)
{
struct am35x_glue *glue = dev_get_drvdata(dev);
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
/* Shutdown the on-chip PHY and its PLL. */
@@ -593,7 +593,7 @@ static int am35x_suspend(struct device *dev)
static int am35x_resume(struct device *dev)
{
struct am35x_glue *glue = dev_get_drvdata(dev);
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
int ret;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 6ba8439bd5a..72e2056b608 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -19,7 +19,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/prefetch.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <asm/cacheflush.h>
@@ -451,7 +451,7 @@ static u64 bfin_dmamask = DMA_BIT_MASK(32);
static int bfin_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct bfin_glue *glue;
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 9db211ee15b..904fb85d85a 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -150,14 +150,11 @@ static void cppi_pool_free(struct cppi_channel *c)
c->last_processed = NULL;
}
-static int cppi_controller_start(struct dma_controller *c)
+static void cppi_controller_start(struct cppi *controller)
{
- struct cppi *controller;
void __iomem *tibase;
int i;
- controller = container_of(c, struct cppi, controller);
-
/* do whatever is necessary to start controller */
for (i = 0; i < ARRAY_SIZE(controller->tx); i++) {
controller->tx[i].transmit = true;
@@ -212,8 +209,6 @@ static int cppi_controller_start(struct dma_controller *c)
/* disable RNDIS mode, also host rx RNDIS autorequest */
musb_writel(tibase, DAVINCI_RNDIS_REG, 0);
musb_writel(tibase, DAVINCI_AUTOREQ_REG, 0);
-
- return 0;
}
/*
@@ -222,14 +217,12 @@ static int cppi_controller_start(struct dma_controller *c)
* De-Init the DMA controller as necessary.
*/
-static int cppi_controller_stop(struct dma_controller *c)
+static void cppi_controller_stop(struct cppi *controller)
{
- struct cppi *controller;
void __iomem *tibase;
int i;
struct musb *musb;
- controller = container_of(c, struct cppi, controller);
musb = controller->musb;
tibase = controller->tibase;
@@ -255,8 +248,6 @@ static int cppi_controller_stop(struct dma_controller *c)
/*disable tx/rx cppi */
musb_writel(tibase, DAVINCI_TXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
musb_writel(tibase, DAVINCI_RXCPPI_CTRL_REG, DAVINCI_DMA_CTRL_DISABLE);
-
- return 0;
}
/* While dma channel is allocated, we only want the core irqs active
@@ -1321,8 +1312,6 @@ struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *mr
controller->tibase = mregs - DAVINCI_BASE_OFFSET;
controller->musb = musb;
- controller->controller.start = cppi_controller_start;
- controller->controller.stop = cppi_controller_stop;
controller->controller.channel_alloc = cppi_channel_allocate;
controller->controller.channel_release = cppi_channel_release;
controller->controller.channel_program = cppi_channel_program;
@@ -1351,6 +1340,7 @@ struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *mr
controller->irq = irq;
}
+ cppi_controller_start(controller);
return &controller->controller;
}
@@ -1363,6 +1353,8 @@ void dma_controller_destroy(struct dma_controller *c)
cppi = container_of(c, struct cppi, controller);
+ cppi_controller_stop(cppi);
+
if (cppi->irq)
free_irq(cppi->irq, cppi->musb);
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c
index 0da6f648a9f..d9ddf4122f3 100644
--- a/drivers/usb/musb/da8xx.c
+++ b/drivers/usb/musb/da8xx.c
@@ -33,7 +33,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <mach/da8xx.h>
#include <linux/platform_data/usb-davinci.h>
@@ -477,7 +477,7 @@ static u64 da8xx_dmamask = DMA_BIT_MASK(32);
static int da8xx_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct da8xx_glue *glue;
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index f8aeaf2e2cd..ed0834e2b72 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -33,7 +33,7 @@
#include <linux/gpio.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <mach/cputype.h>
#include <mach/hardware.h>
@@ -510,7 +510,7 @@ static u64 davinci_dmamask = DMA_BIT_MASK(32);
static int davinci_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct davinci_glue *glue;
struct clk *clk;
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
new file mode 100644
index 00000000000..41ac5b5b57c
--- /dev/null
+++ b/drivers/usb/musb/musb_am335x.c
@@ -0,0 +1,55 @@
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+static int am335x_child_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ goto err;
+
+ return 0;
+err:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int of_remove_populated_child(struct device *dev, void *d)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+
+ of_device_unregister(pdev);
+ return 0;
+}
+
+static int am335x_child_remove(struct platform_device *pdev)
+{
+ device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id am335x_child_of_match[] = {
+ { .compatible = "ti,am33xx-usb" },
+ { },
+};
+MODULE_DEVICE_TABLE(of, am335x_child_of_match);
+
+static struct platform_driver am335x_child_driver = {
+ .probe = am335x_child_probe,
+ .remove = am335x_child_remove,
+ .driver = {
+ .name = "am335x-usb-childs",
+ .of_match_table = of_match_ptr(am335x_child_of_match),
+ },
+};
+
+module_platform_driver(am335x_child_driver);
+MODULE_DESCRIPTION("AM33xx child devices");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 29a24ced674..18e877ffe7b 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -99,7 +99,6 @@
#include <linux/prefetch.h>
#include <linux/platform_device.h>
#include <linux/io.h>
-#include <linux/idr.h>
#include <linux/dma-mapping.h>
#include "musb_core.h"
@@ -1764,12 +1763,8 @@ static void musb_free(struct musb *musb)
disable_irq_wake(musb->nIrq);
free_irq(musb->nIrq, musb);
}
- if (is_dma_capable() && musb->dma_controller) {
- struct dma_controller *c = musb->dma_controller;
-
- (void) c->stop(c);
- dma_controller_destroy(c);
- }
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
musb_host_free(musb);
}
@@ -1787,7 +1782,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
{
int status;
struct musb *musb;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
/* The driver might handle more features than the board; OK.
* Fail when the board needs a feature that's not enabled.
@@ -1844,19 +1839,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
pm_runtime_get_sync(musb->controller);
-#ifndef CONFIG_MUSB_PIO_ONLY
- if (use_dma && dev->dma_mask) {
- struct dma_controller *c;
-
- c = dma_controller_create(musb, musb->mregs);
- musb->dma_controller = c;
- if (c)
- (void) c->start(c);
- }
-#endif
- /* ideally this would be abstracted in platform setup */
- if (!is_dma_capable() || !musb->dma_controller)
- dev->dma_mask = NULL;
+ if (use_dma && dev->dma_mask)
+ musb->dma_controller = dma_controller_create(musb, musb->mregs);
/* be sure interrupts are disabled before connecting ISR */
musb_platform_disable(musb);
@@ -1944,6 +1928,8 @@ fail4:
musb_gadget_cleanup(musb);
fail3:
+ if (musb->dma_controller)
+ dma_controller_destroy(musb->dma_controller);
pm_runtime_put_sync(musb->controller);
fail2:
@@ -2002,9 +1988,6 @@ static int musb_remove(struct platform_device *pdev)
musb_free(musb);
device_init_wakeup(dev, 0);
-#ifndef CONFIG_MUSB_PIO_ONLY
- dma_set_mask(dev, *dev->parent->dma_mask);
-#endif
return 0;
}
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 7d341c387ea..65f3917b4fc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -83,11 +83,6 @@ enum {
MUSB_PORT_MODE_DUAL_ROLE,
};
-#ifdef CONFIG_PROC_FS
-#include <linux/fs.h>
-#define MUSB_CONFIG_PROC_FS
-#endif
-
/****************************** CONSTANTS ********************************/
#ifndef MUSB_C_NUM_EPS
@@ -425,9 +420,6 @@ struct musb {
struct musb_hdrc_config *config;
-#ifdef MUSB_CONFIG_PROC_FS
- struct proc_dir_entry *proc_entry;
-#endif
int xceiv_old_state;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
new file mode 100644
index 00000000000..ae959746f77
--- /dev/null
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -0,0 +1,557 @@
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/sizes.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+
+#include "musb_core.h"
+
+#define RNDIS_REG(x) (0x80 + ((x - 1) * 4))
+
+#define EP_MODE_AUTOREG_NONE 0
+#define EP_MODE_AUTOREG_ALL_NEOP 1
+#define EP_MODE_AUTOREG_ALWAYS 3
+
+#define EP_MODE_DMA_TRANSPARENT 0
+#define EP_MODE_DMA_RNDIS 1
+#define EP_MODE_DMA_GEN_RNDIS 3
+
+#define USB_CTRL_TX_MODE 0x70
+#define USB_CTRL_RX_MODE 0x74
+#define USB_CTRL_AUTOREQ 0xd0
+#define USB_TDOWN 0xd8
+
+struct cppi41_dma_channel {
+ struct dma_channel channel;
+ struct cppi41_dma_controller *controller;
+ struct musb_hw_ep *hw_ep;
+ struct dma_chan *dc;
+ dma_cookie_t cookie;
+ u8 port_num;
+ u8 is_tx;
+ u8 is_allocated;
+ u8 usb_toggle;
+
+ dma_addr_t buf_addr;
+ u32 total_len;
+ u32 prog_len;
+ u32 transferred;
+ u32 packet_sz;
+};
+
+#define MUSB_DMA_NUM_CHANNELS 15
+
+struct cppi41_dma_controller {
+ struct dma_controller controller;
+ struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS];
+ struct musb *musb;
+ u32 rx_mode;
+ u32 tx_mode;
+ u32 auto_req;
+};
+
+static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(cppi41_channel->controller->musb))
+ return;
+
+ csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static void update_rx_toggle(struct cppi41_dma_channel *cppi41_channel)
+{
+ u16 csr;
+ u8 toggle;
+
+ if (cppi41_channel->is_tx)
+ return;
+ if (!is_host_active(cppi41_channel->controller->musb))
+ return;
+
+ csr = musb_readw(cppi41_channel->hw_ep->regs, MUSB_RXCSR);
+ toggle = csr & MUSB_RXCSR_H_DATATOGGLE ? 1 : 0;
+
+ /*
+ * AM335x Advisory 1.0.13: Due to internal synchronisation error the
+ * data toggle may reset from DATA1 to DATA0 during receiving data from
+ * more than one endpoint.
+ */
+ if (!toggle && toggle == cppi41_channel->usb_toggle) {
+ csr |= MUSB_RXCSR_H_DATATOGGLE | MUSB_RXCSR_H_WR_DATATOGGLE;
+ musb_writew(cppi41_channel->hw_ep->regs, MUSB_RXCSR, csr);
+ dev_dbg(cppi41_channel->controller->musb->controller,
+ "Restoring DATA1 toggle.\n");
+ }
+
+ cppi41_channel->usb_toggle = toggle;
+}
+
+static void cppi41_dma_callback(void *private_data)
+{
+ struct dma_channel *channel = private_data;
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep;
+ struct musb *musb = hw_ep->musb;
+ unsigned long flags;
+ struct dma_tx_state txstate;
+ u32 transferred;
+
+ spin_lock_irqsave(&musb->lock, flags);
+
+ dmaengine_tx_status(cppi41_channel->dc, cppi41_channel->cookie,
+ &txstate);
+ transferred = cppi41_channel->prog_len - txstate.residue;
+ cppi41_channel->transferred += transferred;
+
+ dev_dbg(musb->controller, "DMA transfer done on hw_ep=%d bytes=%d/%d\n",
+ hw_ep->epnum, cppi41_channel->transferred,
+ cppi41_channel->total_len);
+
+ update_rx_toggle(cppi41_channel);
+
+ if (cppi41_channel->transferred == cppi41_channel->total_len ||
+ transferred < cppi41_channel->packet_sz) {
+
+ /* done, complete */
+ cppi41_channel->channel.actual_len =
+ cppi41_channel->transferred;
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx);
+ } else {
+ /* next iteration, reload */
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ u16 csr;
+ u32 remain_bytes;
+ void __iomem *epio = cppi41_channel->hw_ep->regs;
+
+ cppi41_channel->buf_addr += cppi41_channel->packet_sz;
+
+ remain_bytes = cppi41_channel->total_len;
+ remain_bytes -= cppi41_channel->transferred;
+ remain_bytes = min(remain_bytes, cppi41_channel->packet_sz);
+ cppi41_channel->prog_len = remain_bytes;
+
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV
+ : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc,
+ cppi41_channel->buf_addr,
+ remain_bytes,
+ direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (WARN_ON(!dma_desc)) {
+ spin_unlock_irqrestore(&musb->lock, flags);
+ return;
+ }
+
+ dma_desc->callback = cppi41_dma_callback;
+ dma_desc->callback_param = channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+ dma_async_issue_pending(dc);
+
+ if (!cppi41_channel->is_tx) {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr |= MUSB_RXCSR_H_REQPKT;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+ spin_unlock_irqrestore(&musb->lock, flags);
+}
+
+static u32 update_ep_mode(unsigned ep, unsigned mode, u32 old)
+{
+ unsigned shift;
+
+ shift = (ep - 1) * 2;
+ old &= ~(3 << shift);
+ old |= mode << shift;
+ return old;
+}
+
+static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ if (cppi41_channel->is_tx)
+ old_mode = controller->tx_mode;
+ else
+ old_mode = controller->rx_mode;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ if (cppi41_channel->is_tx) {
+ controller->tx_mode = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_TX_MODE,
+ new_mode);
+ } else {
+ controller->rx_mode = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_RX_MODE,
+ new_mode);
+ }
+}
+
+static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel,
+ unsigned mode)
+{
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ u32 port;
+ u32 new_mode;
+ u32 old_mode;
+
+ old_mode = controller->auto_req;
+ port = cppi41_channel->port_num;
+ new_mode = update_ep_mode(port, mode, old_mode);
+
+ if (new_mode == old_mode)
+ return;
+ controller->auto_req = new_mode;
+ musb_writel(controller->musb->ctrl_base, USB_CTRL_AUTOREQ, new_mode);
+}
+
+static bool cppi41_configure_channel(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct dma_chan *dc = cppi41_channel->dc;
+ struct dma_async_tx_descriptor *dma_desc;
+ enum dma_transfer_direction direction;
+ struct musb *musb = cppi41_channel->controller->musb;
+ unsigned use_gen_rndis = 0;
+
+ dev_dbg(musb->controller,
+ "configure ep%d/%x packet_sz=%d, mode=%d, dma_addr=0x%llx, len=%d is_tx=%d\n",
+ cppi41_channel->port_num, RNDIS_REG(cppi41_channel->port_num),
+ packet_sz, mode, (unsigned long long) dma_addr,
+ len, cppi41_channel->is_tx);
+
+ cppi41_channel->buf_addr = dma_addr;
+ cppi41_channel->total_len = len;
+ cppi41_channel->transferred = 0;
+ cppi41_channel->packet_sz = packet_sz;
+
+ /*
+ * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more
+ * than max packet size at a time.
+ */
+ if (cppi41_channel->is_tx)
+ use_gen_rndis = 1;
+
+ if (use_gen_rndis) {
+ /* RNDIS mode */
+ if (len > packet_sz) {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), len);
+ /* gen rndis */
+ cppi41_set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_GEN_RNDIS);
+
+ /* auto req */
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREG_ALL_NEOP);
+ } else {
+ musb_writel(musb->ctrl_base,
+ RNDIS_REG(cppi41_channel->port_num), 0);
+ cppi41_set_dma_mode(cppi41_channel,
+ EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel,
+ EP_MODE_AUTOREG_NONE);
+ }
+ } else {
+ /* fallback mode */
+ cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT);
+ cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREG_NONE);
+ len = min_t(u32, packet_sz, len);
+ }
+ cppi41_channel->prog_len = len;
+ direction = cppi41_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
+ dma_desc = dmaengine_prep_slave_single(dc, dma_addr, len, direction,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!dma_desc)
+ return false;
+
+ dma_desc->callback = cppi41_dma_callback;
+ dma_desc->callback_param = channel;
+ cppi41_channel->cookie = dma_desc->tx_submit(dma_desc);
+
+ save_rx_toggle(cppi41_channel);
+ dma_async_issue_pending(dc);
+ return true;
+}
+
+static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c,
+ struct musb_hw_ep *hw_ep, u8 is_tx)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+ struct cppi41_dma_channel *cppi41_channel = NULL;
+ u8 ch_num = hw_ep->epnum - 1;
+
+ if (ch_num >= MUSB_DMA_NUM_CHANNELS)
+ return NULL;
+
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[ch_num];
+ else
+ cppi41_channel = &controller->rx_channel[ch_num];
+
+ if (!cppi41_channel->dc)
+ return NULL;
+
+ if (cppi41_channel->is_allocated)
+ return NULL;
+
+ cppi41_channel->hw_ep = hw_ep;
+ cppi41_channel->is_allocated = 1;
+
+ return &cppi41_channel->channel;
+}
+
+static void cppi41_dma_channel_release(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+
+ if (cppi41_channel->is_allocated) {
+ cppi41_channel->is_allocated = 0;
+ channel->status = MUSB_DMA_STATUS_FREE;
+ channel->actual_len = 0;
+ }
+}
+
+static int cppi41_dma_channel_program(struct dma_channel *channel,
+ u16 packet_sz, u8 mode,
+ dma_addr_t dma_addr, u32 len)
+{
+ int ret;
+
+ BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
+ channel->status == MUSB_DMA_STATUS_BUSY);
+
+ channel->status = MUSB_DMA_STATUS_BUSY;
+ channel->actual_len = 0;
+ ret = cppi41_configure_channel(channel, packet_sz, mode, dma_addr, len);
+ if (!ret)
+ channel->status = MUSB_DMA_STATUS_FREE;
+
+ return ret;
+}
+
+static int cppi41_is_compatible(struct dma_channel *channel, u16 maxpacket,
+ void *buf, u32 length)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+
+ if (is_host_active(musb)) {
+ WARN_ON(1);
+ return 1;
+ }
+ if (cppi41_channel->is_tx)
+ return 1;
+ /* AM335x Advisory 1.0.13. No workaround for device RX mode */
+ return 0;
+}
+
+static int cppi41_dma_channel_abort(struct dma_channel *channel)
+{
+ struct cppi41_dma_channel *cppi41_channel = channel->private_data;
+ struct cppi41_dma_controller *controller = cppi41_channel->controller;
+ struct musb *musb = controller->musb;
+ void __iomem *epio = cppi41_channel->hw_ep->regs;
+ int tdbit;
+ int ret;
+ unsigned is_tx;
+ u16 csr;
+
+ is_tx = cppi41_channel->is_tx;
+ dev_dbg(musb->controller, "abort channel=%d, is_tx=%d\n",
+ cppi41_channel->port_num, is_tx);
+
+ if (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)
+ return 0;
+
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ } else {
+ csr = musb_readw(epio, MUSB_RXCSR);
+ csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
+ musb_writew(epio, MUSB_RXCSR, csr);
+
+ csr = musb_readw(epio, MUSB_RXCSR);
+ if (csr & MUSB_RXCSR_RXPKTRDY) {
+ csr |= MUSB_RXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_RXCSR, csr);
+ musb_writew(epio, MUSB_RXCSR, csr);
+ }
+ }
+
+ tdbit = 1 << cppi41_channel->port_num;
+ if (is_tx)
+ tdbit <<= 16;
+
+ do {
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+ ret = dmaengine_terminate_all(cppi41_channel->dc);
+ } while (ret == -EAGAIN);
+
+ musb_writel(musb->ctrl_base, USB_TDOWN, tdbit);
+
+ if (is_tx) {
+ csr = musb_readw(epio, MUSB_TXCSR);
+ if (csr & MUSB_TXCSR_TXPKTRDY) {
+ csr |= MUSB_TXCSR_FLUSHFIFO;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ }
+ }
+
+ cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE;
+ return 0;
+}
+
+static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl)
+{
+ struct dma_chan *dc;
+ int i;
+
+ for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) {
+ dc = ctrl->tx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ dc = ctrl->rx_channel[i].dc;
+ if (dc)
+ dma_release_channel(dc);
+ }
+}
+
+static void cppi41_dma_controller_stop(struct cppi41_dma_controller *controller)
+{
+ cppi41_release_all_dma_chans(controller);
+}
+
+static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
+{
+ struct musb *musb = controller->musb;
+ struct device *dev = musb->controller;
+ struct device_node *np = dev->of_node;
+ struct cppi41_dma_channel *cppi41_channel;
+ int count;
+ int i;
+ int ret;
+
+ count = of_property_count_strings(np, "dma-names");
+ if (count < 0)
+ return count;
+
+ for (i = 0; i < count; i++) {
+ struct dma_chan *dc;
+ struct dma_channel *musb_dma;
+ const char *str;
+ unsigned is_tx;
+ unsigned int port;
+
+ ret = of_property_read_string_index(np, "dma-names", i, &str);
+ if (ret)
+ goto err;
+ if (!strncmp(str, "tx", 2))
+ is_tx = 1;
+ else if (!strncmp(str, "rx", 2))
+ is_tx = 0;
+ else {
+ dev_err(dev, "Wrong dmatype %s\n", str);
+ goto err;
+ }
+ ret = kstrtouint(str + 2, 0, &port);
+ if (ret)
+ goto err;
+
+ if (port > MUSB_DMA_NUM_CHANNELS || !port)
+ goto err;
+ if (is_tx)
+ cppi41_channel = &controller->tx_channel[port - 1];
+ else
+ cppi41_channel = &controller->rx_channel[port - 1];
+
+ cppi41_channel->controller = controller;
+ cppi41_channel->port_num = port;
+ cppi41_channel->is_tx = is_tx;
+
+ musb_dma = &cppi41_channel->channel;
+ musb_dma->private_data = cppi41_channel;
+ musb_dma->status = MUSB_DMA_STATUS_FREE;
+ musb_dma->max_len = SZ_4M;
+
+ dc = dma_request_slave_channel(dev, str);
+ if (!dc) {
+ dev_err(dev, "Falied to request %s.\n", str);
+ goto err;
+ }
+ cppi41_channel->dc = dc;
+ }
+ return 0;
+err:
+ cppi41_release_all_dma_chans(controller);
+ return -EINVAL;
+}
+
+void dma_controller_destroy(struct dma_controller *c)
+{
+ struct cppi41_dma_controller *controller = container_of(c,
+ struct cppi41_dma_controller, controller);
+
+ cppi41_dma_controller_stop(controller);
+ kfree(controller);
+}
+
+struct dma_controller *dma_controller_create(struct musb *musb,
+ void __iomem *base)
+{
+ struct cppi41_dma_controller *controller;
+ int ret;
+
+ if (!musb->controller->of_node) {
+ dev_err(musb->controller, "Need DT for the DMA engine.\n");
+ return NULL;
+ }
+
+ controller = kzalloc(sizeof(*controller), GFP_KERNEL);
+ if (!controller)
+ goto kzalloc_fail;
+
+ controller->musb = musb;
+
+ controller->controller.channel_alloc = cppi41_dma_channel_allocate;
+ controller->controller.channel_release = cppi41_dma_channel_release;
+ controller->controller.channel_program = cppi41_dma_channel_program;
+ controller->controller.channel_abort = cppi41_dma_channel_abort;
+ controller->controller.is_compatible = cppi41_is_compatible;
+
+ ret = cppi41_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
+ return &controller->controller;
+
+plat_get_fail:
+ kfree(controller);
+kzalloc_fail:
+ return NULL;
+}
diff --git a/drivers/usb/musb/musb_dma.h b/drivers/usb/musb/musb_dma.h
index 1b6b827b769..1345a4ff041 100644
--- a/drivers/usb/musb/musb_dma.h
+++ b/drivers/usb/musb/musb_dma.h
@@ -62,13 +62,13 @@ struct musb_hw_ep;
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
-#ifndef CONFIG_MUSB_PIO_ONLY
-#define is_dma_capable() (1)
-#else
+#ifdef CONFIG_MUSB_PIO_ONLY
#define is_dma_capable() (0)
+#else
+#define is_dma_capable() (1)
#endif
-#ifdef CONFIG_USB_TI_CPPI_DMA
+#if defined(CONFIG_USB_TI_CPPI_DMA) || defined(CONFIG_USB_TI_CPPI41_DMA)
#define is_cppi_enabled() 1
#else
#define is_cppi_enabled() 0
@@ -159,8 +159,6 @@ dma_channel_status(struct dma_channel *c)
* Controllers manage dma channels.
*/
struct dma_controller {
- int (*start)(struct dma_controller *);
- int (*stop)(struct dma_controller *);
struct dma_channel *(*channel_alloc)(struct dma_controller *,
struct musb_hw_ep *, u8 is_tx);
void (*channel_release)(struct dma_channel *);
@@ -177,9 +175,20 @@ struct dma_controller {
/* called after channel_program(), may indicate a fault */
extern void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit);
+#ifdef CONFIG_MUSB_PIO_ONLY
+static inline struct dma_controller *dma_controller_create(struct musb *m,
+ void __iomem *io)
+{
+ return NULL;
+}
+
+static inline void dma_controller_destroy(struct dma_controller *d) { }
+
+#else
extern struct dma_controller *dma_controller_create(struct musb *, void __iomem *);
extern void dma_controller_destroy(struct dma_controller *);
+#endif
#endif /* __MUSB_DMA_H__ */
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 5233804d66b..4047cbb91ba 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -36,19 +36,19 @@
#include <linux/dma-mapping.h>
#include <linux/pm_runtime.h>
#include <linux/module.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/platform_data/usb-omap.h>
#include <linux/sizes.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/usb/of.h>
#include "musb_core.h"
-#ifdef CONFIG_OF
static const struct of_device_id musb_dsps_of_match[];
-#endif
/**
* avoid using musb_readx()/musb_writex() as glue layer should not be
@@ -75,7 +75,6 @@ struct dsps_musb_wrapper {
u16 revision;
u16 control;
u16 status;
- u16 eoi;
u16 epintr_set;
u16 epintr_clear;
u16 epintr_status;
@@ -108,10 +107,7 @@ struct dsps_musb_wrapper {
/* bit positions for mode */
unsigned iddig:5;
/* miscellaneous stuff */
- u32 musb_core_offset;
u8 poll_seconds;
- /* number of musb instances */
- u8 instances;
};
/**
@@ -119,53 +115,12 @@ struct dsps_musb_wrapper {
*/
struct dsps_glue {
struct device *dev;
- struct platform_device *musb[2]; /* child musb pdev */
+ struct platform_device *musb; /* child musb pdev */
const struct dsps_musb_wrapper *wrp; /* wrapper register offsets */
- struct timer_list timer[2]; /* otg_workaround timer */
- unsigned long last_timer[2]; /* last timer data for each instance */
- u32 __iomem *usb_ctrl[2];
+ struct timer_list timer; /* otg_workaround timer */
+ unsigned long last_timer; /* last timer data for each instance */
};
-#define DSPS_AM33XX_CONTROL_MODULE_PHYS_0 0x44e10620
-#define DSPS_AM33XX_CONTROL_MODULE_PHYS_1 0x44e10628
-
-static const resource_size_t dsps_control_module_phys[] = {
- DSPS_AM33XX_CONTROL_MODULE_PHYS_0,
- DSPS_AM33XX_CONTROL_MODULE_PHYS_1,
-};
-
-#define USBPHY_CM_PWRDN (1 << 0)
-#define USBPHY_OTG_PWRDN (1 << 1)
-#define USBPHY_OTGVDET_EN (1 << 19)
-#define USBPHY_OTGSESSEND_EN (1 << 20)
-
-/**
- * musb_dsps_phy_control - phy on/off
- * @glue: struct dsps_glue *
- * @id: musb instance
- * @on: flag for phy to be switched on or off
- *
- * This is to enable the PHY using usb_ctrl register in system control
- * module space.
- *
- * XXX: This function will be removed once we have a seperate driver for
- * control module
- */
-static void musb_dsps_phy_control(struct dsps_glue *glue, u8 id, u8 on)
-{
- u32 usbphycfg;
-
- usbphycfg = readl(glue->usb_ctrl[id]);
-
- if (on) {
- usbphycfg &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN);
- usbphycfg |= USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN;
- } else {
- usbphycfg |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
- }
-
- writel(usbphycfg, glue->usb_ctrl[id]);
-}
/**
* dsps_musb_enable - enable interrupts
*/
@@ -205,7 +160,6 @@ static void dsps_musb_disable(struct musb *musb)
dsps_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
dsps_writeb(musb->mregs, MUSB_DEVCTL, 0);
- dsps_writel(reg_base, wrp->eoi, 0);
}
static void otg_timer(unsigned long _musb)
@@ -213,7 +167,6 @@ static void otg_timer(unsigned long _musb)
struct musb *musb = (void *)_musb;
void __iomem *mregs = musb->mregs;
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
u8 devctl;
@@ -250,7 +203,7 @@ static void otg_timer(unsigned long _musb)
case OTG_STATE_B_IDLE:
devctl = dsps_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
- mod_timer(&glue->timer[pdev->id],
+ mod_timer(&glue->timer,
jiffies + wrp->poll_seconds * HZ);
else
musb->xceiv->state = OTG_STATE_A_IDLE;
@@ -264,7 +217,6 @@ static void otg_timer(unsigned long _musb)
static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
{
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
if (timeout == 0)
@@ -275,23 +227,25 @@ static void dsps_musb_try_idle(struct musb *musb, unsigned long timeout)
musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
usb_otg_state_string(musb->xceiv->state));
- del_timer(&glue->timer[pdev->id]);
- glue->last_timer[pdev->id] = jiffies;
+ del_timer(&glue->timer);
+ glue->last_timer = jiffies;
return;
}
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
+ return;
- if (time_after(glue->last_timer[pdev->id], timeout) &&
- timer_pending(&glue->timer[pdev->id])) {
+ if (time_after(glue->last_timer, timeout) &&
+ timer_pending(&glue->timer)) {
dev_dbg(musb->controller,
"Longer idle timer already pending, ignoring...\n");
return;
}
- glue->last_timer[pdev->id] = timeout;
+ glue->last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
usb_otg_state_string(musb->xceiv->state),
jiffies_to_msecs(timeout - jiffies));
- mod_timer(&glue->timer[pdev->id], timeout);
+ mod_timer(&glue->timer, timeout);
}
static irqreturn_t dsps_interrupt(int irq, void *hci)
@@ -299,7 +253,6 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
struct musb *musb = hci;
void __iomem *reg_base = musb->ctrl_base;
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
unsigned long flags;
@@ -319,7 +272,7 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
/* Get usb core interrupts */
usbintr = dsps_readl(reg_base, wrp->coreintr_status);
if (!usbintr && !epintr)
- goto eoi;
+ goto out;
musb->int_usb = (usbintr & wrp->usb_bitmap) >> wrp->usb_shift;
if (usbintr)
@@ -359,15 +312,14 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
- mod_timer(&glue->timer[pdev->id],
+ mod_timer(&glue->timer,
jiffies + wrp->poll_seconds * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (drvvbus) {
- musb->is_active = 1;
MUSB_HST_MODE(musb);
musb->xceiv->otg->default_a = 1;
musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
- del_timer(&glue->timer[pdev->id]);
+ del_timer(&glue->timer);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
@@ -387,16 +339,10 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
- eoi:
- /* EOI needs to be written for the IRQ to be re-asserted. */
- if (ret == IRQ_HANDLED || epintr || usbintr)
- dsps_writel(reg_base, wrp->eoi, 1);
-
/* Poll for ID change */
if (musb->xceiv->state == OTG_STATE_B_IDLE)
- mod_timer(&glue->timer[pdev->id],
- jiffies + wrp->poll_seconds * HZ);
-
+ mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
+out:
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
@@ -405,37 +351,38 @@ static irqreturn_t dsps_interrupt(int irq, void *hci)
static int dsps_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
+ struct platform_device *parent = to_platform_device(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
- void __iomem *reg_base = musb->ctrl_base;
+ void __iomem *reg_base;
+ struct resource *r;
u32 rev, val;
- int status;
- /* mentor core register starts at offset of 0x400 from musb base */
- musb->mregs += wrp->musb_core_offset;
+ r = platform_get_resource_byname(parent, IORESOURCE_MEM, "control");
+ if (!r)
+ return -EINVAL;
+
+ reg_base = devm_ioremap_resource(dev, r);
+ if (IS_ERR(reg_base))
+ return PTR_ERR(reg_base);
+ musb->ctrl_base = reg_base;
/* NOP driver needs change if supporting dual instance */
- usb_nop_xceiv_register();
- musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
- if (IS_ERR_OR_NULL(musb->xceiv))
- return -EPROBE_DEFER;
+ musb->xceiv = devm_usb_get_phy_by_phandle(dev, "phys", 0);
+ if (IS_ERR(musb->xceiv))
+ return PTR_ERR(musb->xceiv);
/* Returns zero if e.g. not clocked */
rev = dsps_readl(reg_base, wrp->revision);
- if (!rev) {
- status = -ENODEV;
- goto err0;
- }
+ if (!rev)
+ return -ENODEV;
- setup_timer(&glue->timer[pdev->id], otg_timer, (unsigned long) musb);
+ usb_phy_init(musb->xceiv);
+ setup_timer(&glue->timer, otg_timer, (unsigned long) musb);
/* Reset the musb */
dsps_writel(reg_base, wrp->control, (1 << wrp->reset));
- /* Start the on-chip PHY and its PLL. */
- musb_dsps_phy_control(glue, pdev->id, 1);
-
musb->isr = dsps_interrupt;
/* reset the otgdisable bit, needed for host mode to work */
@@ -443,31 +390,17 @@ static int dsps_musb_init(struct musb *musb)
val &= ~(1 << wrp->otg_disable);
dsps_writel(musb->ctrl_base, wrp->phy_utmi, val);
- /* clear level interrupt */
- dsps_writel(reg_base, wrp->eoi, 0);
-
return 0;
-err0:
- usb_put_phy(musb->xceiv);
- usb_nop_xceiv_unregister();
- return status;
}
static int dsps_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
- struct platform_device *pdev = to_platform_device(dev);
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
- del_timer_sync(&glue->timer[pdev->id]);
-
- /* Shutdown the on-chip PHY and its PLL. */
- musb_dsps_phy_control(glue, pdev->id, 0);
-
- /* NOP driver needs change if supporting dual instance */
- usb_put_phy(musb->xceiv);
- usb_nop_xceiv_unregister();
+ del_timer_sync(&glue->timer);
+ usb_phy_shutdown(musb->xceiv);
return 0;
}
@@ -483,116 +416,115 @@ static struct musb_platform_ops dsps_ops = {
static u64 musb_dmamask = DMA_BIT_MASK(32);
-static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
+static int get_int_prop(struct device_node *dn, const char *s)
{
- struct device *dev = glue->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct musb_hdrc_platform_data *pdata = dev->platform_data;
- struct device_node *np = pdev->dev.of_node;
- struct musb_hdrc_config *config;
- struct platform_device *musb;
- struct resource *res;
- struct resource resources[2];
- char res_name[11];
int ret;
+ u32 val;
- resources[0].start = dsps_control_module_phys[id];
- resources[0].end = resources[0].start + SZ_4 - 1;
- resources[0].flags = IORESOURCE_MEM;
+ ret = of_property_read_u32(dn, s, &val);
+ if (ret)
+ return 0;
+ return val;
+}
- glue->usb_ctrl[id] = devm_ioremap_resource(&pdev->dev, resources);
- if (IS_ERR(glue->usb_ctrl[id])) {
- ret = PTR_ERR(glue->usb_ctrl[id]);
- goto err0;
- }
+static int get_musb_port_mode(struct device *dev)
+{
+ enum usb_dr_mode mode;
+
+ mode = of_usb_get_dr_mode(dev->of_node);
+ switch (mode) {
+ case USB_DR_MODE_HOST:
+ return MUSB_PORT_MODE_HOST;
+
+ case USB_DR_MODE_PERIPHERAL:
+ return MUSB_PORT_MODE_GADGET;
+
+ case USB_DR_MODE_UNKNOWN:
+ case USB_DR_MODE_OTG:
+ default:
+ return MUSB_PORT_MODE_DUAL_ROLE;
+ };
+}
+
+static int dsps_create_musb_pdev(struct dsps_glue *glue,
+ struct platform_device *parent)
+{
+ struct musb_hdrc_platform_data pdata;
+ struct resource resources[2];
+ struct resource *res;
+ struct device *dev = &parent->dev;
+ struct musb_hdrc_config *config;
+ struct platform_device *musb;
+ struct device_node *dn = parent->dev.of_node;
+ int ret;
- /* first resource is for usbss, so start index from 1 */
- res = platform_get_resource(pdev, IORESOURCE_MEM, id + 1);
+ memset(resources, 0, sizeof(resources));
+ res = platform_get_resource_byname(parent, IORESOURCE_MEM, "mc");
if (!res) {
- dev_err(dev, "failed to get memory for instance %d\n", id);
- ret = -ENODEV;
- goto err0;
+ dev_err(dev, "failed to get memory.\n");
+ return -EINVAL;
}
- res->parent = NULL;
resources[0] = *res;
- /* first resource is for usbss, so start index from 1 */
- res = platform_get_resource(pdev, IORESOURCE_IRQ, id + 1);
+ res = platform_get_resource_byname(parent, IORESOURCE_IRQ, "mc");
if (!res) {
- dev_err(dev, "failed to get irq for instance %d\n", id);
- ret = -ENODEV;
- goto err0;
+ dev_err(dev, "failed to get irq.\n");
+ return -EINVAL;
}
- res->parent = NULL;
resources[1] = *res;
- resources[1].name = "mc";
/* allocate the child platform device */
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
dev_err(dev, "failed to allocate musb device\n");
- ret = -ENOMEM;
- goto err0;
+ return -ENOMEM;
}
musb->dev.parent = dev;
musb->dev.dma_mask = &musb_dmamask;
musb->dev.coherent_dma_mask = musb_dmamask;
+ musb->dev.of_node = of_node_get(dn);
- glue->musb[id] = musb;
+ glue->musb = musb;
- ret = platform_device_add_resources(musb, resources, 2);
+ ret = platform_device_add_resources(musb, resources,
+ ARRAY_SIZE(resources));
if (ret) {
dev_err(dev, "failed to add resources\n");
- goto err2;
+ goto err;
}
- if (np) {
- pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
- if (!pdata) {
- dev_err(&pdev->dev,
- "failed to allocate musb platform data\n");
- ret = -ENOMEM;
- goto err2;
- }
-
- config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL);
- if (!config) {
- dev_err(&pdev->dev,
- "failed to allocate musb hdrc config\n");
- ret = -ENOMEM;
- goto err2;
- }
-
- of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps);
- of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits);
- snprintf(res_name, sizeof(res_name), "port%d-mode", id);
- of_property_read_u32(np, res_name, (u32 *)&pdata->mode);
- of_property_read_u32(np, "power", (u32 *)&pdata->power);
- config->multipoint = of_property_read_bool(np, "multipoint");
-
- pdata->config = config;
+ config = devm_kzalloc(&parent->dev, sizeof(*config), GFP_KERNEL);
+ if (!config) {
+ dev_err(dev, "failed to allocate musb hdrc config\n");
+ ret = -ENOMEM;
+ goto err;
}
+ pdata.config = config;
+ pdata.platform_ops = &dsps_ops;
- pdata->platform_ops = &dsps_ops;
+ config->num_eps = get_int_prop(dn, "mentor,num-eps");
+ config->ram_bits = get_int_prop(dn, "mentor,ram-bits");
+ pdata.mode = get_musb_port_mode(dev);
+ /* DT keeps this entry in mA, musb expects it as per USB spec */
+ pdata.power = get_int_prop(dn, "mentor,power") / 2;
+ config->multipoint = of_property_read_bool(dn, "mentor,multipoint");
- ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
+ ret = platform_device_add_data(musb, &pdata, sizeof(pdata));
if (ret) {
dev_err(dev, "failed to add platform_data\n");
- goto err2;
+ goto err;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(dev, "failed to register musb device\n");
- goto err2;
+ goto err;
}
-
return 0;
-err2:
+err:
platform_device_put(musb);
-err0:
return ret;
}
@@ -601,14 +533,12 @@ static int dsps_probe(struct platform_device *pdev)
const struct of_device_id *match;
const struct dsps_musb_wrapper *wrp;
struct dsps_glue *glue;
- struct resource *iomem;
- int ret, i;
+ int ret;
match = of_match_node(musb_dsps_of_match, pdev->dev.of_node);
if (!match) {
dev_err(&pdev->dev, "fail to get matching of_match struct\n");
- ret = -EINVAL;
- goto err0;
+ return -EINVAL;
}
wrp = match->data;
@@ -616,29 +546,13 @@ static int dsps_probe(struct platform_device *pdev)
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&pdev->dev, "unable to allocate glue memory\n");
- ret = -ENOMEM;
- goto err0;
- }
-
- /* get memory resource */
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iomem) {
- dev_err(&pdev->dev, "failed to get usbss mem resourse\n");
- ret = -ENODEV;
- goto err1;
+ return -ENOMEM;
}
glue->dev = &pdev->dev;
+ glue->wrp = wrp;
- glue->wrp = kmemdup(wrp, sizeof(*wrp), GFP_KERNEL);
- if (!glue->wrp) {
- dev_err(&pdev->dev, "failed to duplicate wrapper struct memory\n");
- ret = -ENOMEM;
- goto err1;
- }
platform_set_drvdata(pdev, glue);
-
- /* enable the usbss clocks */
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
@@ -647,17 +561,9 @@ static int dsps_probe(struct platform_device *pdev)
goto err2;
}
- /* create the child platform device for all instances of musb */
- for (i = 0; i < wrp->instances ; i++) {
- ret = dsps_create_musb_pdev(glue, i);
- if (ret != 0) {
- dev_err(&pdev->dev, "failed to create child pdev\n");
- /* release resources of previously created instances */
- for (i--; i >= 0 ; i--)
- platform_device_unregister(glue->musb[i]);
- goto err3;
- }
- }
+ ret = dsps_create_musb_pdev(glue, pdev);
+ if (ret)
+ goto err3;
return 0;
@@ -665,65 +571,27 @@ err3:
pm_runtime_put(&pdev->dev);
err2:
pm_runtime_disable(&pdev->dev);
- kfree(glue->wrp);
-err1:
kfree(glue);
-err0:
return ret;
}
+
static int dsps_remove(struct platform_device *pdev)
{
struct dsps_glue *glue = platform_get_drvdata(pdev);
- const struct dsps_musb_wrapper *wrp = glue->wrp;
- int i;
- /* delete the child platform device */
- for (i = 0; i < wrp->instances ; i++)
- platform_device_unregister(glue->musb[i]);
+ platform_device_unregister(glue->musb);
/* disable usbss clocks */
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(glue->wrp);
kfree(glue);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int dsps_suspend(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev->parent);
- struct dsps_glue *glue = platform_get_drvdata(pdev);
- const struct dsps_musb_wrapper *wrp = glue->wrp;
- int i;
-
- for (i = 0; i < wrp->instances; i++)
- musb_dsps_phy_control(glue, i, 0);
-
- return 0;
-}
-
-static int dsps_resume(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev->parent);
- struct dsps_glue *glue = platform_get_drvdata(pdev);
- const struct dsps_musb_wrapper *wrp = glue->wrp;
- int i;
-
- for (i = 0; i < wrp->instances; i++)
- musb_dsps_phy_control(glue, i, 1);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(dsps_pm_ops, dsps_suspend, dsps_resume);
-
-static const struct dsps_musb_wrapper ti81xx_driver_data = {
+static const struct dsps_musb_wrapper am33xx_driver_data = {
.revision = 0x00,
.control = 0x14,
.status = 0x18,
- .eoi = 0x24,
.epintr_set = 0x38,
.epintr_clear = 0x40,
.epintr_status = 0x30,
@@ -745,38 +613,23 @@ static const struct dsps_musb_wrapper ti81xx_driver_data = {
.rxep_shift = 16,
.rxep_mask = 0xfffe,
.rxep_bitmap = (0xfffe << 16),
- .musb_core_offset = 0x400,
.poll_seconds = 2,
- .instances = 1,
-};
-
-static const struct platform_device_id musb_dsps_id_table[] = {
- {
- .name = "musb-ti81xx",
- .driver_data = (kernel_ulong_t) &ti81xx_driver_data,
- },
- { }, /* Terminating Entry */
};
-MODULE_DEVICE_TABLE(platform, musb_dsps_id_table);
-#ifdef CONFIG_OF
static const struct of_device_id musb_dsps_of_match[] = {
{ .compatible = "ti,musb-am33xx",
- .data = (void *) &ti81xx_driver_data, },
+ .data = (void *) &am33xx_driver_data, },
{ },
};
MODULE_DEVICE_TABLE(of, musb_dsps_of_match);
-#endif
static struct platform_driver dsps_usbss_driver = {
.probe = dsps_probe,
.remove = dsps_remove,
.driver = {
.name = "musb-dsps",
- .pm = &dsps_pm_ops,
.of_match_table = of_match_ptr(musb_dsps_of_match),
},
- .id_table = musb_dsps_id_table,
};
MODULE_DESCRIPTION("TI DSPS MUSB Glue Layer");
@@ -784,14 +637,4 @@ MODULE_AUTHOR("Ravi B <ravibabu@ti.com>");
MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
MODULE_LICENSE("GPL v2");
-static int __init dsps_init(void)
-{
- return platform_driver_register(&dsps_usbss_driver);
-}
-subsys_initcall(dsps_init);
-
-static void __exit dsps_exit(void)
-{
- platform_driver_unregister(&dsps_usbss_driver);
-}
-module_exit(dsps_exit);
+module_platform_driver(dsps_usbss_driver);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 0414bc19d00..9a08679d204 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -76,13 +76,21 @@ static inline void map_dma_buffer(struct musb_request *request,
return;
if (request->request.dma == DMA_ADDR_INVALID) {
- request->request.dma = dma_map_single(
+ dma_addr_t dma_addr;
+ int ret;
+
+ dma_addr = dma_map_single(
musb->controller,
request->request.buf,
request->request.length,
request->tx
? DMA_TO_DEVICE
: DMA_FROM_DEVICE);
+ ret = dma_mapping_error(musb->controller, dma_addr);
+ if (ret)
+ return;
+
+ request->request.dma = dma_addr;
request->map_state = MUSB_MAPPED;
} else {
dma_sync_single_for_device(musb->controller,
@@ -357,47 +365,49 @@ static void txstate(struct musb *musb, struct musb_request *req)
}
}
-#elif defined(CONFIG_USB_TI_CPPI_DMA)
- /* program endpoint CSR first, then setup DMA */
- csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
- csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
- MUSB_TXCSR_MODE;
- musb_writew(epio, MUSB_TXCSR,
- (MUSB_TXCSR_P_WZC_BITS & ~MUSB_TXCSR_P_UNDERRUN)
- | csr);
-
- /* ensure writebuffer is empty */
- csr = musb_readw(epio, MUSB_TXCSR);
-
- /* NOTE host side sets DMAENAB later than this; both are
- * OK since the transfer dma glue (between CPPI and Mentor
- * fifos) just tells CPPI it could start. Data only moves
- * to the USB TX fifo when both fifos are ready.
- */
-
- /* "mode" is irrelevant here; handle terminating ZLPs like
- * PIO does, since the hardware RNDIS mode seems unreliable
- * except for the last-packet-is-already-short case.
- */
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- 0,
- request->dma + request->actual,
- request_size);
- if (!use_dma) {
- c->channel_release(musb_ep->dma);
- musb_ep->dma = NULL;
- csr &= ~MUSB_TXCSR_DMAENAB;
- musb_writew(epio, MUSB_TXCSR, csr);
- /* invariant: prequest->buf is non-null */
- }
-#elif defined(CONFIG_USB_TUSB_OMAP_DMA)
- use_dma = use_dma && c->channel_program(
- musb_ep->dma, musb_ep->packet_sz,
- request->zero,
- request->dma + request->actual,
- request_size);
#endif
+ if (is_cppi_enabled()) {
+ /* program endpoint CSR first, then setup DMA */
+ csr &= ~(MUSB_TXCSR_P_UNDERRUN | MUSB_TXCSR_TXPKTRDY);
+ csr |= MUSB_TXCSR_DMAENAB | MUSB_TXCSR_DMAMODE |
+ MUSB_TXCSR_MODE;
+ musb_writew(epio, MUSB_TXCSR, (MUSB_TXCSR_P_WZC_BITS &
+ ~MUSB_TXCSR_P_UNDERRUN) | csr);
+
+ /* ensure writebuffer is empty */
+ csr = musb_readw(epio, MUSB_TXCSR);
+
+ /*
+ * NOTE host side sets DMAENAB later than this; both are
+ * OK since the transfer dma glue (between CPPI and
+ * Mentor fifos) just tells CPPI it could start. Data
+ * only moves to the USB TX fifo when both fifos are
+ * ready.
+ */
+ /*
+ * "mode" is irrelevant here; handle terminating ZLPs
+ * like PIO does, since the hardware RNDIS mode seems
+ * unreliable except for the
+ * last-packet-is-already-short case.
+ */
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ 0,
+ request->dma + request->actual,
+ request_size);
+ if (!use_dma) {
+ c->channel_release(musb_ep->dma);
+ musb_ep->dma = NULL;
+ csr &= ~MUSB_TXCSR_DMAENAB;
+ musb_writew(epio, MUSB_TXCSR, csr);
+ /* invariant: prequest->buf is non-null */
+ }
+ } else if (tusb_dma_omap())
+ use_dma = use_dma && c->channel_program(
+ musb_ep->dma, musb_ep->packet_sz,
+ request->zero,
+ request->dma + request->actual,
+ request_size);
}
#endif
@@ -1266,7 +1276,8 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
dev_dbg(musb->controller, "req %p queued to %s while ep %s\n",
req, ep->name, "disabled");
status = -ESHUTDOWN;
- goto cleanup;
+ unmap_dma_buffer(request, musb);
+ goto unlock;
}
/* add request to the list */
@@ -1276,7 +1287,7 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
musb_ep_restart(musb, request);
-cleanup:
+unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
return status;
}
@@ -1801,6 +1812,8 @@ err:
void musb_gadget_cleanup(struct musb *musb)
{
+ if (musb->port_mode == MUSB_PORT_MODE_HOST)
+ return;
usb_del_gadget_udc(&musb->g);
}
@@ -1926,7 +1939,8 @@ static int musb_gadget_stop(struct usb_gadget *g,
stop_activity(musb, driver);
otg_set_peripheral(musb->xceiv->otg, NULL);
- dev_dbg(musb->controller, "unregistering driver %s\n", driver->function);
+ dev_dbg(musb->controller, "unregistering driver %s\n",
+ driver ? driver->function : "(removed)");
musb->is_active = 0;
musb->gadget_driver = NULL;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index a9695f5a92f..9a2b8c85f19 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2628,6 +2628,8 @@ int musb_host_alloc(struct musb *musb)
void musb_host_cleanup(struct musb *musb)
{
+ if (musb->port_mode == MUSB_PORT_MODE_GADGET)
+ return;
usb_remove_hcd(musb->hcd);
musb->hcd = NULL;
}
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 3d1fd52a15a..e8e9f9aab20 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -37,18 +37,10 @@
#include "musb_core.h"
#include "musbhsdma.h"
-static int dma_controller_start(struct dma_controller *c)
-{
- /* nothing to do */
- return 0;
-}
-
static void dma_channel_release(struct dma_channel *channel);
-static int dma_controller_stop(struct dma_controller *c)
+static void dma_controller_stop(struct musb_dma_controller *controller)
{
- struct musb_dma_controller *controller = container_of(c,
- struct musb_dma_controller, controller);
struct musb *musb = controller->private_data;
struct dma_channel *channel;
u8 bit;
@@ -67,8 +59,6 @@ static int dma_controller_stop(struct dma_controller *c)
}
}
}
-
- return 0;
}
static struct dma_channel *dma_channel_allocate(struct dma_controller *c,
@@ -371,8 +361,7 @@ void dma_controller_destroy(struct dma_controller *c)
struct musb_dma_controller *controller = container_of(c,
struct musb_dma_controller, controller);
- if (!controller)
- return;
+ dma_controller_stop(controller);
if (controller->irq)
free_irq(controller->irq, c);
@@ -400,8 +389,6 @@ struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *ba
controller->private_data = musb;
controller->base = base;
- controller->controller.start = dma_controller_start;
- controller->controller.stop = dma_controller_stop;
controller->controller.channel_alloc = dma_channel_allocate;
controller->controller.channel_release = dma_channel_release;
controller->controller.channel_program = dma_channel_program;
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 6708a3b78ad..59d2245db1c 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -255,7 +255,7 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
{
struct musb *musb = glue_to_musb(glue);
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *pdata = dev->platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
struct usb_otg *otg = musb->xceiv->otg;
@@ -341,7 +341,7 @@ static int omap2430_musb_init(struct musb *musb)
int status = 0;
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct omap_musb_board_data *data = plat->board_data;
/* We require some kind of external transceiver, hooked
@@ -412,7 +412,7 @@ static void omap2430_musb_enable(struct musb *musb)
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
struct device *dev = musb->controller;
struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
- struct musb_hdrc_platform_data *pdata = dev->platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
struct omap_musb_board_data *data = pdata->board_data;
switch (glue->status) {
@@ -481,8 +481,8 @@ static u64 omap2430_dmamask = DMA_BIT_MASK(32);
static int omap2430_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct omap_musb_board_data *data;
struct platform_device *musb;
struct omap2430_glue *glue;
@@ -581,6 +581,11 @@ static int omap2430_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
ret = platform_device_add_resources(musb, musb_resources,
ARRAY_SIZE(musb_resources));
if (ret) {
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 2c06a8969a9..b3b3ed72388 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -25,7 +25,7 @@
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include "musb_core.h"
@@ -1156,8 +1156,8 @@ static u64 tusb_dmamask = DMA_BIT_MASK(32);
static int tusb_probe(struct platform_device *pdev)
{
- struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct resource musb_resources[3];
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct platform_device *musb;
struct tusb6010_glue *glue;
@@ -1199,6 +1199,11 @@ static int tusb_probe(struct platform_device *pdev)
musb_resources[1].end = pdev->resource[1].end;
musb_resources[1].flags = pdev->resource[1].flags;
+ musb_resources[2].name = pdev->resource[2].name;
+ musb_resources[2].start = pdev->resource[2].start;
+ musb_resources[2].end = pdev->resource[2].end;
+ musb_resources[2].flags = pdev->resource[2].flags;
+
ret = platform_device_add_resources(musb, musb_resources,
ARRAY_SIZE(musb_resources));
if (ret) {
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 98df17c984a..b8794eb81e9 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -66,28 +66,6 @@ struct tusb_omap_dma {
unsigned multichannel:1;
};
-static int tusb_omap_dma_start(struct dma_controller *c)
-{
- struct tusb_omap_dma *tusb_dma;
-
- tusb_dma = container_of(c, struct tusb_omap_dma, controller);
-
- /* dev_dbg(musb->controller, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
-
- return 0;
-}
-
-static int tusb_omap_dma_stop(struct dma_controller *c)
-{
- struct tusb_omap_dma *tusb_dma;
-
- tusb_dma = container_of(c, struct tusb_omap_dma, controller);
-
- /* dev_dbg(musb->controller, "ep%i ch: %i\n", chdat->epnum, chdat->ch); */
-
- return 0;
-}
-
/*
* Allocate dmareq0 to the current channel unless it's already taken
*/
@@ -695,8 +673,6 @@ struct dma_controller *dma_controller_create(struct musb *musb, void __iomem *ba
tusb_dma->dmareq = -1;
tusb_dma->sync_dev = -1;
- tusb_dma->controller.start = tusb_omap_dma_start;
- tusb_dma->controller.stop = tusb_omap_dma_stop;
tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
tusb_dma->controller.channel_release = tusb_omap_dma_release;
tusb_dma->controller.channel_program = tusb_omap_dma_program;
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index fce71b60593..59256b12f74 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -227,7 +227,7 @@ ux500_of_probe(struct platform_device *pdev, struct device_node *np)
static int ux500_probe(struct platform_device *pdev)
{
struct resource musb_resources[2];
- struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
+ struct musb_hdrc_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct platform_device *musb;
struct ux500_glue *glue;
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index bfb7a65d83c..3700e971325 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -254,10 +254,8 @@ static int ux500_dma_channel_abort(struct dma_channel *channel)
return 0;
}
-static int ux500_dma_controller_stop(struct dma_controller *c)
+static void ux500_dma_controller_stop(struct ux500_dma_controller *controller)
{
- struct ux500_dma_controller *controller = container_of(c,
- struct ux500_dma_controller, controller);
struct ux500_dma_channel *ux500_channel;
struct dma_channel *channel;
u8 ch_num;
@@ -281,18 +279,14 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
if (ux500_channel->dma_chan)
dma_release_channel(ux500_channel->dma_chan);
}
-
- return 0;
}
-static int ux500_dma_controller_start(struct dma_controller *c)
+static int ux500_dma_controller_start(struct ux500_dma_controller *controller)
{
- struct ux500_dma_controller *controller = container_of(c,
- struct ux500_dma_controller, controller);
struct ux500_dma_channel *ux500_channel = NULL;
struct musb *musb = controller->private_data;
struct device *dev = musb->controller;
- struct musb_hdrc_platform_data *plat = dev->platform_data;
+ struct musb_hdrc_platform_data *plat = dev_get_platdata(dev);
struct ux500_musb_board_data *data;
struct dma_channel *dma_channel = NULL;
char **chan_names;
@@ -339,7 +333,9 @@ static int ux500_dma_controller_start(struct dma_controller *c)
if (!ux500_channel->dma_chan)
ux500_channel->dma_chan =
dma_request_channel(mask,
- data->dma_filter,
+ data ?
+ data->dma_filter :
+ NULL,
param_array[ch_num]);
if (!ux500_channel->dma_chan) {
@@ -347,7 +343,7 @@ static int ux500_dma_controller_start(struct dma_controller *c)
dir, ch_num);
/* Release already allocated channels */
- ux500_dma_controller_stop(c);
+ ux500_dma_controller_stop(controller);
return -EBUSY;
}
@@ -369,6 +365,7 @@ void dma_controller_destroy(struct dma_controller *c)
struct ux500_dma_controller *controller = container_of(c,
struct ux500_dma_controller, controller);
+ ux500_dma_controller_stop(controller);
kfree(controller);
}
@@ -378,6 +375,7 @@ struct dma_controller *dma_controller_create(struct musb *musb,
struct ux500_dma_controller *controller;
struct platform_device *pdev = to_platform_device(musb->controller);
struct resource *iomem;
+ int ret;
controller = kzalloc(sizeof(*controller), GFP_KERNEL);
if (!controller)
@@ -394,14 +392,15 @@ struct dma_controller *dma_controller_create(struct musb *musb,
controller->phy_base = (dma_addr_t) iomem->start;
- controller->controller.start = ux500_dma_controller_start;
- controller->controller.stop = ux500_dma_controller_stop;
controller->controller.channel_alloc = ux500_dma_channel_allocate;
controller->controller.channel_release = ux500_dma_channel_release;
controller->controller.channel_program = ux500_dma_channel_program;
controller->controller.channel_abort = ux500_dma_channel_abort;
controller->controller.is_compatible = ux500_dma_is_compatible;
+ ret = ux500_dma_controller_start(controller);
+ if (ret)
+ goto plat_get_fail;
return &controller->controller;
plat_get_fail:
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 3622fff8b79..d5589f9c60a 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -1,22 +1,10 @@
#
# Physical Layer USB driver configuration
#
-menuconfig USB_PHY
- bool "USB Physical Layer drivers"
- help
- Most USB controllers have the physical layer signalling part
- (commonly called a PHY) built in. However, dual-role devices
- (a.k.a. USB on-the-go) which support being USB master or slave
- with the same connector often use an external PHY.
-
- The drivers in this submenu add support for such PHY devices.
- They are not needed for standard master-only (or the vast
- majority of slave-only) USB interfaces.
+menu "USB Physical Layer drivers"
- If you're not sure if this applies to you, it probably doesn't;
- say N here.
-
-if USB_PHY
+config USB_PHY
+ def_bool n
#
# USB Transceiver Drivers
@@ -24,6 +12,7 @@ if USB_PHY
config AB8500_USB
tristate "AB8500 USB Transceiver Driver"
depends on AB8500_CORE
+ select USB_PHY
help
Enable this to support the USB OTG transceiver in AB8500 chip.
This transceiver supports high and full speed devices plus,
@@ -33,12 +22,14 @@ config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
select USB_OTG
+ select USB_PHY
help
Enable this to support Freescale USB OTG transceiver.
config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
+ select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
USB-On-The-Go transceiver working with the OMAP OTG controller.
@@ -52,12 +43,14 @@ config ISP1301_OMAP
config MV_U3D_PHY
bool "Marvell USB 3.0 PHY controller Driver"
depends on CPU_MMP3
+ select USB_PHY
help
Enable this to support Marvell USB 3.0 phy controller for Marvell
SoC.
config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
+ select USB_PHY
help
This driver is to be used by all the usb transceiver which are either
built-in with usb ip or which are autonomous and doesn't require any
@@ -65,6 +58,7 @@ config NOP_USB_XCEIV
config OMAP_CONTROL_USB
tristate "OMAP CONTROL USB Driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
Enable this to add support for the USB part present in the control
module. This driver has API to power on the USB2 PHY and to write to
@@ -76,6 +70,7 @@ config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
select OMAP_CONTROL_USB
+ select USB_PHY
help
Enable this to support the transceiver that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
@@ -84,13 +79,27 @@ config OMAP_USB2
config OMAP_USB3
tristate "OMAP USB3 PHY Driver"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
select OMAP_CONTROL_USB
+ select USB_PHY
help
Enable this to support the USB3 PHY that is part of SOC. This
driver takes care of all the PHY functionality apart from comparator.
This driver interacts with the "OMAP Control USB Driver" to power
on/off the PHY.
+config AM335X_CONTROL_USB
+ tristate
+
+config AM335X_PHY_USB
+ tristate "AM335x USB PHY Driver"
+ select USB_PHY
+ select AM335X_CONTROL_USB
+ select NOP_USB_XCEIV
+ help
+ This driver provides PHY support for that phy which part for the
+ AM335x SoC.
+
config SAMSUNG_USBPHY
tristate
help
@@ -101,6 +110,7 @@ config SAMSUNG_USBPHY
config SAMSUNG_USB2PHY
tristate "Samsung USB 2.0 PHY controller Driver"
select SAMSUNG_USBPHY
+ select USB_PHY
help
Enable this to support Samsung USB 2.0 (High Speed) PHY controller
driver for Samsung SoCs.
@@ -108,6 +118,7 @@ config SAMSUNG_USB2PHY
config SAMSUNG_USB3PHY
tristate "Samsung USB 3.0 PHY controller Driver"
select SAMSUNG_USBPHY
+ select USB_PHY
help
Enable this to support Samsung USB 3.0 (Super Speed) phy controller
for samsung SoCs.
@@ -115,6 +126,7 @@ config SAMSUNG_USB3PHY
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+ select USB_PHY
help
Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices).
@@ -135,6 +147,7 @@ config TWL6030_USB
config USB_GPIO_VBUS
tristate "GPIO based peripheral-only VBUS sensing 'transceiver'"
depends on GPIOLIB
+ select USB_PHY
help
Provides simple GPIO VBUS sensing for controllers with an
internal transceiver via the usb_phy interface, and
@@ -145,6 +158,7 @@ config USB_ISP1301
tristate "NXP ISP1301 USB transceiver support"
depends on USB || USB_GADGET
depends on I2C
+ select USB_PHY
help
Say Y here to add support for the NXP ISP1301 USB transceiver driver.
This chip is typically used as USB transceiver for USB host, gadget
@@ -156,6 +170,7 @@ config USB_ISP1301
config USB_MSM_OTG
tristate "OTG support for Qualcomm on-chip USB controller"
depends on (USB || USB_GADGET) && ARCH_MSM
+ select USB_PHY
help
Enable this to support the USB OTG transceiver on MSM chips. It
handles PHY initialization, clock management, and workarounds
@@ -169,6 +184,7 @@ config USB_MV_OTG
tristate "Marvell USB OTG support"
depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
select USB_OTG
+ select USB_PHY
help
Say Y here if you want to build Marvell USB OTG transciever
driver in kernel (including PXA and MMP series). This driver
@@ -180,6 +196,7 @@ config USB_MXS_PHY
tristate "Freescale MXS USB PHY support"
depends on ARCH_MXC || ARCH_MXS
select STMP_DEVICE
+ select USB_PHY
help
Enable this to support the Freescale MXS USB PHY.
@@ -188,6 +205,7 @@ config USB_MXS_PHY
config USB_RCAR_PHY
tristate "Renesas R-Car USB PHY support"
depends on USB || USB_GADGET
+ select USB_PHY
help
Say Y here to add support for the Renesas R-Car USB common PHY driver.
This chip is typically used as USB PHY for USB host, gadget.
@@ -210,4 +228,4 @@ config USB_ULPI_VIEWPORT
Provides read/write operations to the ULPI phy register set for
controllers with a viewport register (e.g. Chipidea/ARC controllers).
-endif # USB_PHY
+endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 070eca3af18..2135e85f46e 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -1,9 +1,6 @@
#
# Makefile for physical layer USB drivers
#
-
-ccflags-$(CONFIG_USB_DEBUG) := -DDEBUG
-
obj-$(CONFIG_USB_PHY) += phy.o
obj-$(CONFIG_OF) += of.o
@@ -14,8 +11,10 @@ phy-fsl-usb2-objs := phy-fsl-usb.o phy-fsm-usb.o
obj-$(CONFIG_FSL_USB2_OTG) += phy-fsl-usb2.o
obj-$(CONFIG_ISP1301_OMAP) += phy-isp1301-omap.o
obj-$(CONFIG_MV_U3D_PHY) += phy-mv-u3d-usb.o
-obj-$(CONFIG_NOP_USB_XCEIV) += phy-nop.o
+obj-$(CONFIG_NOP_USB_XCEIV) += phy-generic.o
obj-$(CONFIG_OMAP_CONTROL_USB) += phy-omap-control.o
+obj-$(CONFIG_AM335X_CONTROL_USB) += phy-am335x-control.o
+obj-$(CONFIG_AM335X_PHY_USB) += phy-am335x.o
obj-$(CONFIG_OMAP_USB2) += phy-omap-usb2.o
obj-$(CONFIG_OMAP_USB3) += phy-omap-usb3.o
obj-$(CONFIG_SAMSUNG_USBPHY) += phy-samsung-usb.o
diff --git a/drivers/usb/phy/am35x-phy-control.h b/drivers/usb/phy/am35x-phy-control.h
new file mode 100644
index 00000000000..b96594d1962
--- /dev/null
+++ b/drivers/usb/phy/am35x-phy-control.h
@@ -0,0 +1,21 @@
+#ifndef _AM335x_PHY_CONTROL_H_
+#define _AM335x_PHY_CONTROL_H_
+
+struct phy_control {
+ void (*phy_power)(struct phy_control *phy_ctrl, u32 id, bool on);
+ void (*phy_wkup)(struct phy_control *phy_ctrl, u32 id, bool on);
+};
+
+static inline void phy_ctrl_power(struct phy_control *phy_ctrl, u32 id, bool on)
+{
+ phy_ctrl->phy_power(phy_ctrl, id, on);
+}
+
+static inline void phy_ctrl_wkup(struct phy_control *phy_ctrl, u32 id, bool on)
+{
+ phy_ctrl->phy_wkup(phy_ctrl, id, on);
+}
+
+struct phy_control *am335x_get_phy_control(struct device *dev);
+
+#endif
diff --git a/drivers/usb/phy/phy-am335x-control.c b/drivers/usb/phy/phy-am335x-control.c
new file mode 100644
index 00000000000..22cf07d62e4
--- /dev/null
+++ b/drivers/usb/phy/phy-am335x-control.c
@@ -0,0 +1,137 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/io.h>
+
+struct phy_control {
+ void (*phy_power)(struct phy_control *phy_ctrl, u32 id, bool on);
+ void (*phy_wkup)(struct phy_control *phy_ctrl, u32 id, bool on);
+};
+
+struct am335x_control_usb {
+ struct device *dev;
+ void __iomem *phy_reg;
+ void __iomem *wkup;
+ spinlock_t lock;
+ struct phy_control phy_ctrl;
+};
+
+#define AM335X_USB0_CTRL 0x0
+#define AM335X_USB1_CTRL 0x8
+#define AM335x_USB_WKUP 0x0
+
+#define USBPHY_CM_PWRDN (1 << 0)
+#define USBPHY_OTG_PWRDN (1 << 1)
+#define USBPHY_OTGVDET_EN (1 << 19)
+#define USBPHY_OTGSESSEND_EN (1 << 20)
+
+static void am335x_phy_power(struct phy_control *phy_ctrl, u32 id, bool on)
+{
+ struct am335x_control_usb *usb_ctrl;
+ u32 val;
+ u32 reg;
+
+ usb_ctrl = container_of(phy_ctrl, struct am335x_control_usb, phy_ctrl);
+
+ switch (id) {
+ case 0:
+ reg = AM335X_USB0_CTRL;
+ break;
+ case 1:
+ reg = AM335X_USB1_CTRL;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+
+ val = readl(usb_ctrl->phy_reg + reg);
+ if (on) {
+ val &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN);
+ val |= USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN;
+ } else {
+ val |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;
+ }
+
+ writel(val, usb_ctrl->phy_reg + reg);
+}
+
+static const struct phy_control ctrl_am335x = {
+ .phy_power = am335x_phy_power,
+};
+
+static const struct of_device_id omap_control_usb_id_table[] = {
+ { .compatible = "ti,am335x-usb-ctrl-module", .data = &ctrl_am335x },
+ {}
+};
+MODULE_DEVICE_TABLE(of, omap_control_usb_id_table);
+
+static struct platform_driver am335x_control_driver;
+static int match(struct device *dev, void *data)
+{
+ struct device_node *node = (struct device_node *)data;
+ return dev->of_node == node &&
+ dev->driver == &am335x_control_driver.driver;
+}
+
+struct phy_control *am335x_get_phy_control(struct device *dev)
+{
+ struct device_node *node;
+ struct am335x_control_usb *ctrl_usb;
+
+ node = of_parse_phandle(dev->of_node, "ti,ctrl_mod", 0);
+ if (!node)
+ return NULL;
+
+ dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ ctrl_usb = dev_get_drvdata(dev);
+ if (!ctrl_usb)
+ return NULL;
+ return &ctrl_usb->phy_ctrl;
+}
+EXPORT_SYMBOL_GPL(am335x_get_phy_control);
+
+static int am335x_control_usb_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct am335x_control_usb *ctrl_usb;
+ const struct of_device_id *of_id;
+ const struct phy_control *phy_ctrl;
+
+ of_id = of_match_node(omap_control_usb_id_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ phy_ctrl = of_id->data;
+
+ ctrl_usb = devm_kzalloc(&pdev->dev, sizeof(*ctrl_usb), GFP_KERNEL);
+ if (!ctrl_usb) {
+ dev_err(&pdev->dev, "unable to alloc memory for control usb\n");
+ return -ENOMEM;
+ }
+
+ ctrl_usb->dev = &pdev->dev;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_ctrl");
+ ctrl_usb->phy_reg = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ctrl_usb->phy_reg))
+ return PTR_ERR(ctrl_usb->phy_reg);
+ spin_lock_init(&ctrl_usb->lock);
+ ctrl_usb->phy_ctrl = *phy_ctrl;
+
+ dev_set_drvdata(ctrl_usb->dev, ctrl_usb);
+ return 0;
+}
+
+static struct platform_driver am335x_control_driver = {
+ .probe = am335x_control_usb_probe,
+ .driver = {
+ .name = "am335x-control-usb",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(omap_control_usb_id_table),
+ },
+};
+
+module_platform_driver(am335x_control_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
new file mode 100644
index 00000000000..c4d614d1f17
--- /dev/null
+++ b/drivers/usb/phy/phy-am335x.c
@@ -0,0 +1,99 @@
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "am35x-phy-control.h"
+#include "phy-generic.h"
+
+struct am335x_phy {
+ struct usb_phy_gen_xceiv usb_phy_gen;
+ struct phy_control *phy_ctrl;
+ int id;
+};
+
+static int am335x_init(struct usb_phy *phy)
+{
+ struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
+
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, true);
+ return 0;
+}
+
+static void am335x_shutdown(struct usb_phy *phy)
+{
+ struct am335x_phy *am_phy = dev_get_drvdata(phy->dev);
+
+ phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, false);
+}
+
+static int am335x_phy_probe(struct platform_device *pdev)
+{
+ struct am335x_phy *am_phy;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ am_phy = devm_kzalloc(dev, sizeof(*am_phy), GFP_KERNEL);
+ if (!am_phy)
+ return -ENOMEM;
+
+ am_phy->phy_ctrl = am335x_get_phy_control(dev);
+ if (!am_phy->phy_ctrl)
+ return -EPROBE_DEFER;
+ am_phy->id = of_alias_get_id(pdev->dev.of_node, "phy");
+ if (am_phy->id < 0) {
+ dev_err(&pdev->dev, "Missing PHY id: %d\n", am_phy->id);
+ return am_phy->id;
+ }
+
+ ret = usb_phy_gen_create_phy(dev, &am_phy->usb_phy_gen,
+ USB_PHY_TYPE_USB2, 0, false, false);
+ if (ret)
+ return ret;
+
+ ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
+ if (ret)
+ goto err_add;
+ am_phy->usb_phy_gen.phy.init = am335x_init;
+ am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
+
+ platform_set_drvdata(pdev, am_phy);
+ return 0;
+
+err_add:
+ usb_phy_gen_cleanup_phy(&am_phy->usb_phy_gen);
+ return ret;
+}
+
+static int am335x_phy_remove(struct platform_device *pdev)
+{
+ struct am335x_phy *am_phy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&am_phy->usb_phy_gen.phy);
+ return 0;
+}
+
+static const struct of_device_id am335x_phy_ids[] = {
+ { .compatible = "ti,am335x-usb-phy" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, am335x_phy_ids);
+
+static struct platform_driver am335x_phy_driver = {
+ .probe = am335x_phy_probe,
+ .remove = am335x_phy_remove,
+ .driver = {
+ .name = "am335x-phy-driver",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(am335x_phy_ids),
+ },
+};
+
+module_platform_driver(am335x_phy_driver);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index e771bafb9f1..fa7c9f9628b 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -611,7 +611,7 @@ static int fsl_otg_set_peripheral(struct usb_otg *otg,
otg_dev->fsm.b_bus_req = 1;
/* start the gadget right away if the ID pin says Mini-B */
- DBG("ID pin=%d\n", otg_dev->fsm.id);
+ pr_debug("ID pin=%d\n", otg_dev->fsm.id);
if (otg_dev->fsm.id == 1) {
fsl_otg_start_host(&otg_dev->fsm, 0);
otg_drv_vbus(&otg_dev->fsm, 0);
@@ -684,7 +684,7 @@ static int fsl_otg_start_hnp(struct usb_otg *otg)
if (otg_dev != fsl_otg_dev)
return -ENODEV;
- DBG("start_hnp...n");
+ pr_debug("start_hnp...\n");
/* clear a_bus_req to enter a_suspend state */
otg_dev->fsm.a_bus_req = 0;
@@ -834,7 +834,7 @@ int usb_otg_start(struct platform_device *pdev)
int status;
struct resource *res;
u32 temp;
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
p_otg = container_of(otg_trans, struct fsl_otg, phy);
fsm = &p_otg->fsm;
@@ -941,7 +941,7 @@ int usb_otg_start(struct platform_device *pdev)
p_otg->fsm.id = 0;
}
- DBG("initial ID pin=%d\n", p_otg->fsm.id);
+ pr_debug("initial ID pin=%d\n", p_otg->fsm.id);
/* enable OTG ID pin interrupt */
temp = fsl_readl(&p_otg->dr_mem_map->otgsc);
@@ -1105,7 +1105,7 @@ static int fsl_otg_probe(struct platform_device *pdev)
{
int ret;
- if (!pdev->dev.platform_data)
+ if (!dev_get_platdata(&pdev->dev))
return -ENODEV;
/* configure the OTG */
@@ -1137,7 +1137,7 @@ static int fsl_otg_probe(struct platform_device *pdev)
static int fsl_otg_remove(struct platform_device *pdev)
{
- struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
+ struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
usb_remove_phy(&fsl_otg_dev->phy);
free_irq(fsl_otg_dev->irq, fsl_otg_dev);
diff --git a/drivers/usb/phy/phy-fsl-usb.h b/drivers/usb/phy/phy-fsl-usb.h
index ca266280895..e1859b8ef56 100644
--- a/drivers/usb/phy/phy-fsl-usb.h
+++ b/drivers/usb/phy/phy-fsl-usb.h
@@ -15,7 +15,7 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include "otg_fsm.h"
+#include "phy-fsm-usb.h"
#include <linux/usb/otg.h>
#include <linux/ioctl.h>
diff --git a/drivers/usb/phy/phy-fsm-usb.c b/drivers/usb/phy/phy-fsm-usb.c
index c520b3548e7..7f4596606e1 100644
--- a/drivers/usb/phy/phy-fsm-usb.c
+++ b/drivers/usb/phy/phy-fsm-usb.c
@@ -29,7 +29,7 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
-#include "phy-otg-fsm.h"
+#include "phy-fsm-usb.h"
/* Change USB protocol when there is a protocol change */
static int otg_set_protocol(struct otg_fsm *fsm, int protocol)
diff --git a/drivers/usb/phy/phy-fsm-usb.h b/drivers/usb/phy/phy-fsm-usb.h
index c30a2e1d9e4..fbe586206f3 100644
--- a/drivers/usb/phy/phy-fsm-usb.h
+++ b/drivers/usb/phy/phy-fsm-usb.h
@@ -15,18 +15,11 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#undef DEBUG
#undef VERBOSE
-#ifdef DEBUG
-#define DBG(fmt, args...) printk(KERN_DEBUG "[%s] " fmt , \
- __func__, ## args)
-#else
-#define DBG(fmt, args...) do {} while (0)
-#endif
-
#ifdef VERBOSE
-#define VDBG DBG
+#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
+ __func__, ## args)
#else
#define VDBG(stuff...) do {} while (0)
#endif
diff --git a/drivers/usb/phy/phy-nop.c b/drivers/usb/phy/phy-generic.c
index 55445e5d72e..efe59f3f7fd 100644
--- a/drivers/usb/phy/phy-nop.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -30,19 +30,13 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/usb/otg.h>
-#include <linux/usb/nop-usb-xceiv.h>
+#include <linux/usb/usb_phy_gen_xceiv.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include <linux/of.h>
-struct nop_usb_xceiv {
- struct usb_phy phy;
- struct device *dev;
- struct clk *clk;
- struct regulator *vcc;
- struct regulator *reset;
-};
+#include "phy-generic.h"
static struct platform_device *pd;
@@ -50,9 +44,9 @@ void usb_nop_xceiv_register(void)
{
if (pd)
return;
- pd = platform_device_register_simple("nop_usb_xceiv", -1, NULL, 0);
+ pd = platform_device_register_simple("usb_phy_gen_xceiv", -1, NULL, 0);
if (!pd) {
- printk(KERN_ERR "Unable to register usb nop transceiver\n");
+ pr_err("Unable to register generic usb transceiver\n");
return;
}
}
@@ -70,9 +64,9 @@ static int nop_set_suspend(struct usb_phy *x, int suspend)
return 0;
}
-static int nop_init(struct usb_phy *phy)
+int usb_gen_phy_init(struct usb_phy *phy)
{
- struct nop_usb_xceiv *nop = dev_get_drvdata(phy->dev);
+ struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev);
if (!IS_ERR(nop->vcc)) {
if (regulator_enable(nop->vcc))
@@ -90,10 +84,11 @@ static int nop_init(struct usb_phy *phy)
return 0;
}
+EXPORT_SYMBOL_GPL(usb_gen_phy_init);
-static void nop_shutdown(struct usb_phy *phy)
+void usb_gen_phy_shutdown(struct usb_phy *phy)
{
- struct nop_usb_xceiv *nop = dev_get_drvdata(phy->dev);
+ struct usb_phy_gen_xceiv *nop = dev_get_drvdata(phy->dev);
if (!IS_ERR(nop->reset)) {
/* Assert RESET */
@@ -109,6 +104,7 @@ static void nop_shutdown(struct usb_phy *phy)
dev_err(phy->dev, "Failed to disable power\n");
}
}
+EXPORT_SYMBOL_GPL(usb_gen_phy_shutdown);
static int nop_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget)
{
@@ -139,52 +135,27 @@ static int nop_set_host(struct usb_otg *otg, struct usb_bus *host)
return 0;
}
-static int nop_usb_xceiv_probe(struct platform_device *pdev)
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
+ enum usb_phy_type type, u32 clk_rate, bool needs_vcc,
+ bool needs_reset)
{
- struct device *dev = &pdev->dev;
- struct nop_usb_xceiv_platform_data *pdata = pdev->dev.platform_data;
- struct nop_usb_xceiv *nop;
- enum usb_phy_type type = USB_PHY_TYPE_USB2;
int err;
- u32 clk_rate = 0;
- bool needs_vcc = false;
- bool needs_reset = false;
-
- nop = devm_kzalloc(&pdev->dev, sizeof(*nop), GFP_KERNEL);
- if (!nop)
- return -ENOMEM;
- nop->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*nop->phy.otg),
- GFP_KERNEL);
+ nop->phy.otg = devm_kzalloc(dev, sizeof(*nop->phy.otg),
+ GFP_KERNEL);
if (!nop->phy.otg)
return -ENOMEM;
- if (dev->of_node) {
- struct device_node *node = dev->of_node;
-
- if (of_property_read_u32(node, "clock-frequency", &clk_rate))
- clk_rate = 0;
-
- needs_vcc = of_property_read_bool(node, "vcc-supply");
- needs_reset = of_property_read_bool(node, "reset-supply");
-
- } else if (pdata) {
- type = pdata->type;
- clk_rate = pdata->clk_rate;
- needs_vcc = pdata->needs_vcc;
- needs_reset = pdata->needs_reset;
- }
-
- nop->clk = devm_clk_get(&pdev->dev, "main_clk");
+ nop->clk = devm_clk_get(dev, "main_clk");
if (IS_ERR(nop->clk)) {
- dev_dbg(&pdev->dev, "Can't get phy clock: %ld\n",
+ dev_dbg(dev, "Can't get phy clock: %ld\n",
PTR_ERR(nop->clk));
}
if (!IS_ERR(nop->clk) && clk_rate) {
err = clk_set_rate(nop->clk, clk_rate);
if (err) {
- dev_err(&pdev->dev, "Error setting clock rate\n");
+ dev_err(dev, "Error setting clock rate\n");
return err;
}
}
@@ -192,33 +163,31 @@ static int nop_usb_xceiv_probe(struct platform_device *pdev)
if (!IS_ERR(nop->clk)) {
err = clk_prepare(nop->clk);
if (err) {
- dev_err(&pdev->dev, "Error preparing clock\n");
+ dev_err(dev, "Error preparing clock\n");
return err;
}
}
- nop->vcc = devm_regulator_get(&pdev->dev, "vcc");
+ nop->vcc = devm_regulator_get(dev, "vcc");
if (IS_ERR(nop->vcc)) {
- dev_dbg(&pdev->dev, "Error getting vcc regulator: %ld\n",
+ dev_dbg(dev, "Error getting vcc regulator: %ld\n",
PTR_ERR(nop->vcc));
if (needs_vcc)
return -EPROBE_DEFER;
}
- nop->reset = devm_regulator_get(&pdev->dev, "reset");
+ nop->reset = devm_regulator_get(dev, "reset");
if (IS_ERR(nop->reset)) {
- dev_dbg(&pdev->dev, "Error getting reset regulator: %ld\n",
+ dev_dbg(dev, "Error getting reset regulator: %ld\n",
PTR_ERR(nop->reset));
if (needs_reset)
return -EPROBE_DEFER;
}
- nop->dev = &pdev->dev;
+ nop->dev = dev;
nop->phy.dev = nop->dev;
nop->phy.label = "nop-xceiv";
nop->phy.set_suspend = nop_set_suspend;
- nop->phy.init = nop_init;
- nop->phy.shutdown = nop_shutdown;
nop->phy.state = OTG_STATE_UNDEFINED;
nop->phy.type = type;
@@ -226,6 +195,59 @@ static int nop_usb_xceiv_probe(struct platform_device *pdev)
nop->phy.otg->set_host = nop_set_host;
nop->phy.otg->set_peripheral = nop_set_peripheral;
+ ATOMIC_INIT_NOTIFIER_HEAD(&nop->phy.notifier);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(usb_phy_gen_create_phy);
+
+void usb_phy_gen_cleanup_phy(struct usb_phy_gen_xceiv *nop)
+{
+ if (!IS_ERR(nop->clk))
+ clk_unprepare(nop->clk);
+}
+EXPORT_SYMBOL_GPL(usb_phy_gen_cleanup_phy);
+
+static int usb_phy_gen_xceiv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct usb_phy_gen_xceiv_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
+ struct usb_phy_gen_xceiv *nop;
+ enum usb_phy_type type = USB_PHY_TYPE_USB2;
+ int err;
+ u32 clk_rate = 0;
+ bool needs_vcc = false;
+ bool needs_reset = false;
+
+ if (dev->of_node) {
+ struct device_node *node = dev->of_node;
+
+ if (of_property_read_u32(node, "clock-frequency", &clk_rate))
+ clk_rate = 0;
+
+ needs_vcc = of_property_read_bool(node, "vcc-supply");
+ needs_reset = of_property_read_bool(node, "reset-supply");
+
+ } else if (pdata) {
+ type = pdata->type;
+ clk_rate = pdata->clk_rate;
+ needs_vcc = pdata->needs_vcc;
+ needs_reset = pdata->needs_reset;
+ }
+
+ nop = devm_kzalloc(dev, sizeof(*nop), GFP_KERNEL);
+ if (!nop)
+ return -ENOMEM;
+
+
+ err = usb_phy_gen_create_phy(dev, nop, type, clk_rate, needs_vcc,
+ needs_reset);
+ if (err)
+ return err;
+
+ nop->phy.init = usb_gen_phy_init;
+ nop->phy.shutdown = usb_gen_phy_shutdown;
+
err = usb_add_phy_dev(&nop->phy);
if (err) {
dev_err(&pdev->dev, "can't register transceiver, err: %d\n",
@@ -235,23 +257,18 @@ static int nop_usb_xceiv_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nop);
- ATOMIC_INIT_NOTIFIER_HEAD(&nop->phy.notifier);
-
return 0;
err_add:
- if (!IS_ERR(nop->clk))
- clk_unprepare(nop->clk);
+ usb_phy_gen_cleanup_phy(nop);
return err;
}
-static int nop_usb_xceiv_remove(struct platform_device *pdev)
+static int usb_phy_gen_xceiv_remove(struct platform_device *pdev)
{
- struct nop_usb_xceiv *nop = platform_get_drvdata(pdev);
-
- if (!IS_ERR(nop->clk))
- clk_unprepare(nop->clk);
+ struct usb_phy_gen_xceiv *nop = platform_get_drvdata(pdev);
+ usb_phy_gen_cleanup_phy(nop);
usb_remove_phy(&nop->phy);
return 0;
@@ -264,29 +281,29 @@ static const struct of_device_id nop_xceiv_dt_ids[] = {
MODULE_DEVICE_TABLE(of, nop_xceiv_dt_ids);
-static struct platform_driver nop_usb_xceiv_driver = {
- .probe = nop_usb_xceiv_probe,
- .remove = nop_usb_xceiv_remove,
+static struct platform_driver usb_phy_gen_xceiv_driver = {
+ .probe = usb_phy_gen_xceiv_probe,
+ .remove = usb_phy_gen_xceiv_remove,
.driver = {
- .name = "nop_usb_xceiv",
+ .name = "usb_phy_gen_xceiv",
.owner = THIS_MODULE,
.of_match_table = nop_xceiv_dt_ids,
},
};
-static int __init nop_usb_xceiv_init(void)
+static int __init usb_phy_gen_xceiv_init(void)
{
- return platform_driver_register(&nop_usb_xceiv_driver);
+ return platform_driver_register(&usb_phy_gen_xceiv_driver);
}
-subsys_initcall(nop_usb_xceiv_init);
+subsys_initcall(usb_phy_gen_xceiv_init);
-static void __exit nop_usb_xceiv_exit(void)
+static void __exit usb_phy_gen_xceiv_exit(void)
{
- platform_driver_unregister(&nop_usb_xceiv_driver);
+ platform_driver_unregister(&usb_phy_gen_xceiv_driver);
}
-module_exit(nop_usb_xceiv_exit);
+module_exit(usb_phy_gen_xceiv_exit);
-MODULE_ALIAS("platform:nop_usb_xceiv");
+MODULE_ALIAS("platform:usb_phy_gen_xceiv");
MODULE_AUTHOR("Texas Instruments Inc");
MODULE_DESCRIPTION("NOP USB Transceiver driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/phy/phy-generic.h b/drivers/usb/phy/phy-generic.h
new file mode 100644
index 00000000000..61687d5a965
--- /dev/null
+++ b/drivers/usb/phy/phy-generic.h
@@ -0,0 +1,20 @@
+#ifndef _PHY_GENERIC_H_
+#define _PHY_GENERIC_H_
+
+struct usb_phy_gen_xceiv {
+ struct usb_phy phy;
+ struct device *dev;
+ struct clk *clk;
+ struct regulator *vcc;
+ struct regulator *reset;
+};
+
+int usb_gen_phy_init(struct usb_phy *phy);
+void usb_gen_phy_shutdown(struct usb_phy *phy);
+
+int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_gen_xceiv *nop,
+ enum usb_phy_type type, u32 clk_rate, bool needs_vcc,
+ bool needs_reset);
+void usb_phy_gen_cleanup_phy(struct usb_phy_gen_xceiv *nop);
+
+#endif
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 8443335c2ea..b2f29c9aebb 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -101,7 +101,7 @@ static void gpio_vbus_work(struct work_struct *work)
{
struct gpio_vbus_data *gpio_vbus =
container_of(work, struct gpio_vbus_data, work.work);
- struct gpio_vbus_mach_info *pdata = gpio_vbus->dev->platform_data;
+ struct gpio_vbus_mach_info *pdata = dev_get_platdata(gpio_vbus->dev);
int gpio, status, vbus;
if (!gpio_vbus->phy.otg->gadget)
@@ -155,7 +155,7 @@ static void gpio_vbus_work(struct work_struct *work)
static irqreturn_t gpio_vbus_irq(int irq, void *data)
{
struct platform_device *pdev = data;
- struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
struct usb_otg *otg = gpio_vbus->phy.otg;
@@ -182,7 +182,7 @@ static int gpio_vbus_set_peripheral(struct usb_otg *otg,
gpio_vbus = container_of(otg->phy, struct gpio_vbus_data, phy);
pdev = to_platform_device(gpio_vbus->dev);
- pdata = gpio_vbus->dev->platform_data;
+ pdata = dev_get_platdata(gpio_vbus->dev);
gpio = pdata->gpio_pullup;
if (!gadget) {
@@ -243,7 +243,7 @@ static int gpio_vbus_set_suspend(struct usb_phy *phy, int suspend)
static int __init gpio_vbus_probe(struct platform_device *pdev)
{
- struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
struct gpio_vbus_data *gpio_vbus;
struct resource *res;
int err, gpio, irq;
@@ -352,7 +352,7 @@ err_gpio:
static int __exit gpio_vbus_remove(struct platform_device *pdev)
{
struct gpio_vbus_data *gpio_vbus = platform_get_drvdata(pdev);
- struct gpio_vbus_mach_info *pdata = pdev->dev.platform_data;
+ struct gpio_vbus_mach_info *pdata = dev_get_platdata(&pdev->dev);
int gpio = pdata->gpio_vbus;
device_init_wakeup(&pdev->dev, 0);
diff --git a/drivers/usb/phy/phy-isp1301-omap.c b/drivers/usb/phy/phy-isp1301-omap.c
index ae481afcb3e..d3a5160e4cc 100644
--- a/drivers/usb/phy/phy-isp1301-omap.c
+++ b/drivers/usb/phy/phy-isp1301-omap.c
@@ -40,9 +40,7 @@
#include <mach/usb.h>
-#ifndef DEBUG
-#undef VERBOSE
-#endif
+#undef VERBOSE
#define DRIVER_VERSION "24 August 2004"
@@ -387,7 +385,6 @@ static void b_idle(struct isp1301 *isp, const char *tag)
static void
dump_regs(struct isp1301 *isp, const char *label)
{
-#ifdef DEBUG
u8 ctrl = isp1301_get_u8(isp, ISP1301_OTG_CONTROL_1);
u8 status = isp1301_get_u8(isp, ISP1301_OTG_STATUS);
u8 src = isp1301_get_u8(isp, ISP1301_INTERRUPT_SOURCE);
@@ -396,7 +393,6 @@ dump_regs(struct isp1301 *isp, const char *label)
omap_readl(OTG_CTRL), label, state_name(isp),
ctrl, status, src);
/* mode control and irq enables don't change much */
-#endif
}
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index d08f33435e9..e9d4cd960ec 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1419,7 +1419,7 @@ static int __init msm_otg_probe(struct platform_device *pdev)
struct usb_phy *phy;
dev_info(&pdev->dev, "msm_otg probe\n");
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "No platform data given. Bailing out\n");
return -ENODEV;
}
@@ -1436,7 +1436,7 @@ static int __init msm_otg_probe(struct platform_device *pdev)
return -ENOMEM;
}
- motg->pdata = pdev->dev.platform_data;
+ motg->pdata = dev_get_platdata(&pdev->dev);
phy = &motg->phy;
phy->dev = &pdev->dev;
diff --git a/drivers/usb/phy/phy-mv-u3d-usb.c b/drivers/usb/phy/phy-mv-u3d-usb.c
index 1568ea63e33..d317903022b 100644
--- a/drivers/usb/phy/phy-mv-u3d-usb.c
+++ b/drivers/usb/phy/phy-mv-u3d-usb.c
@@ -82,7 +82,7 @@ static void mv_u3d_phy_write(void __iomem *base, u32 reg, u32 value)
writel_relaxed(value, data);
}
-void mv_u3d_phy_shutdown(struct usb_phy *phy)
+static void mv_u3d_phy_shutdown(struct usb_phy *phy)
{
struct mv_u3d_phy *mv_u3d_phy;
void __iomem *base;
@@ -271,7 +271,7 @@ static int mv_u3d_phy_probe(struct platform_device *pdev)
void __iomem *phy_base;
int ret;
- pdata = pdev->dev.platform_data;
+ pdata = dev_get_platdata(&pdev->dev);
if (!pdata) {
dev_err(&pdev->dev, "%s: no platform data defined\n", __func__);
return -EINVAL;
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 4a6b03c7387..98f6ac6a78e 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -653,7 +653,7 @@ static struct attribute_group inputs_attr_group = {
.attrs = inputs_attrs,
};
-int mv_otg_remove(struct platform_device *pdev)
+static int mv_otg_remove(struct platform_device *pdev)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
@@ -673,7 +673,7 @@ int mv_otg_remove(struct platform_device *pdev)
static int mv_otg_probe(struct platform_device *pdev)
{
- struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_usb_platform_data *pdata = dev_get_platdata(&pdev->dev);
struct mv_otg *mvotg;
struct usb_otg *otg;
struct resource *r;
@@ -893,7 +893,7 @@ static int mv_otg_resume(struct platform_device *pdev)
static struct platform_driver mv_otg_driver = {
.probe = mv_otg_probe,
- .remove = __exit_p(mv_otg_remove),
+ .remove = mv_otg_remove,
.driver = {
.owner = THIS_MODULE,
.name = driver_name,
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index bd601c537c8..fdd33b44dbd 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -41,11 +41,14 @@ struct mxs_phy {
#define to_mxs_phy(p) container_of((p), struct mxs_phy, phy)
-static void mxs_phy_hw_init(struct mxs_phy *mxs_phy)
+static int mxs_phy_hw_init(struct mxs_phy *mxs_phy)
{
+ int ret;
void __iomem *base = mxs_phy->phy.io_priv;
- stmp_reset_block(base + HW_USBPHY_CTRL);
+ ret = stmp_reset_block(base + HW_USBPHY_CTRL);
+ if (ret)
+ return ret;
/* Power up the PHY */
writel(0, base + HW_USBPHY_PWD);
@@ -54,6 +57,8 @@ static void mxs_phy_hw_init(struct mxs_phy *mxs_phy)
writel(BM_USBPHY_CTRL_ENUTMILEVEL2 |
BM_USBPHY_CTRL_ENUTMILEVEL3,
base + HW_USBPHY_CTRL_SET);
+
+ return 0;
}
static int mxs_phy_init(struct usb_phy *phy)
@@ -61,9 +66,7 @@ static int mxs_phy_init(struct usb_phy *phy)
struct mxs_phy *mxs_phy = to_mxs_phy(phy);
clk_prepare_enable(mxs_phy->clk);
- mxs_phy_hw_init(mxs_phy);
-
- return 0;
+ return mxs_phy_hw_init(mxs_phy);
}
static void mxs_phy_shutdown(struct usb_phy *phy)
diff --git a/drivers/usb/phy/phy-omap-control.c b/drivers/usb/phy/phy-omap-control.c
index 1419ceda975..a4dda8e1256 100644
--- a/drivers/usb/phy/phy-omap-control.c
+++ b/drivers/usb/phy/phy-omap-control.c
@@ -197,7 +197,8 @@ static int omap_control_usb_probe(struct platform_device *pdev)
{
struct resource *res;
struct device_node *np = pdev->dev.of_node;
- struct omap_control_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct omap_control_usb_platform_data *pdata =
+ dev_get_platdata(&pdev->dev);
control_usb = devm_kzalloc(&pdev->dev, sizeof(*control_usb),
GFP_KERNEL);
diff --git a/drivers/usb/phy/phy-omap-usb2.c b/drivers/usb/phy/phy-omap-usb2.c
index 844ab68f08d..d266861d24f 100644
--- a/drivers/usb/phy/phy-omap-usb2.c
+++ b/drivers/usb/phy/phy-omap-usb2.c
@@ -98,8 +98,8 @@ static int omap_usb_set_peripheral(struct usb_otg *otg,
static int omap_usb2_suspend(struct usb_phy *x, int suspend)
{
- u32 ret;
struct omap_usb *phy = phy_to_omapusb(x);
+ int ret;
if (suspend && !phy->is_suspended) {
omap_control_usb_phy_power(phy->control_dev, 0);
@@ -108,8 +108,7 @@ static int omap_usb2_suspend(struct usb_phy *x, int suspend)
} else if (!suspend && phy->is_suspended) {
ret = pm_runtime_get_sync(phy->dev);
if (ret < 0) {
- dev_err(phy->dev, "get_sync failed with err %d\n",
- ret);
+ dev_err(phy->dev, "get_sync failed with err %d\n", ret);
return ret;
}
omap_control_usb_phy_power(phy->control_dev, 1);
@@ -209,9 +208,9 @@ static int omap_usb2_runtime_suspend(struct device *dev)
static int omap_usb2_runtime_resume(struct device *dev)
{
- u32 ret = 0;
struct platform_device *pdev = to_platform_device(dev);
struct omap_usb *phy = platform_get_drvdata(pdev);
+ int ret;
ret = clk_enable(phy->wkupclk);
if (ret < 0) {
diff --git a/drivers/usb/phy/phy-omap-usb3.c b/drivers/usb/phy/phy-omap-usb3.c
index efe6e1464f4..fc15694d303 100644
--- a/drivers/usb/phy/phy-omap-usb3.c
+++ b/drivers/usb/phy/phy-omap-usb3.c
@@ -27,7 +27,6 @@
#include <linux/delay.h>
#include <linux/usb/omap_control_usb.h>
-#define NUM_SYS_CLKS 6
#define PLL_STATUS 0x00000004
#define PLL_GO 0x00000008
#define PLL_CONFIGURATION1 0x0000000C
@@ -57,26 +56,32 @@
*/
# define PLL_IDLE_TIME 100;
-enum sys_clk_rate {
- CLK_RATE_UNDEFINED = -1,
- CLK_RATE_12MHZ,
- CLK_RATE_16MHZ,
- CLK_RATE_19MHZ,
- CLK_RATE_20MHZ,
- CLK_RATE_26MHZ,
- CLK_RATE_38MHZ
+struct usb_dpll_map {
+ unsigned long rate;
+ struct usb_dpll_params params;
};
-static struct usb_dpll_params omap_usb3_dpll_params[NUM_SYS_CLKS] = {
- {1250, 5, 4, 20, 0}, /* 12 MHz */
- {3125, 20, 4, 20, 0}, /* 16.8 MHz */
- {1172, 8, 4, 20, 65537}, /* 19.2 MHz */
- {1250, 12, 4, 20, 0}, /* 26 MHz */
- {3125, 47, 4, 20, 92843}, /* 38.4 MHz */
- {1000, 7, 4, 10, 0}, /* 20 MHz */
-
+static struct usb_dpll_map dpll_map[] = {
+ {12000000, {1250, 5, 4, 20, 0} }, /* 12 MHz */
+ {16800000, {3125, 20, 4, 20, 0} }, /* 16.8 MHz */
+ {19200000, {1172, 8, 4, 20, 65537} }, /* 19.2 MHz */
+ {20000000, {1000, 7, 4, 10, 0} }, /* 20 MHz */
+ {26000000, {1250, 12, 4, 20, 0} }, /* 26 MHz */
+ {38400000, {3125, 47, 4, 20, 92843} }, /* 38.4 MHz */
};
+static struct usb_dpll_params *omap_usb3_get_dpll_params(unsigned long rate)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpll_map); i++) {
+ if (rate == dpll_map[i].rate)
+ return &dpll_map[i].params;
+ }
+
+ return 0;
+}
+
static int omap_usb3_suspend(struct usb_phy *x, int suspend)
{
struct omap_usb *phy = phy_to_omapusb(x);
@@ -116,26 +121,6 @@ static int omap_usb3_suspend(struct usb_phy *x, int suspend)
return 0;
}
-static inline enum sys_clk_rate __get_sys_clk_index(unsigned long rate)
-{
- switch (rate) {
- case 12000000:
- return CLK_RATE_12MHZ;
- case 16800000:
- return CLK_RATE_16MHZ;
- case 19200000:
- return CLK_RATE_19MHZ;
- case 20000000:
- return CLK_RATE_20MHZ;
- case 26000000:
- return CLK_RATE_26MHZ;
- case 38400000:
- return CLK_RATE_38MHZ;
- default:
- return CLK_RATE_UNDEFINED;
- }
-}
-
static void omap_usb_dpll_relock(struct omap_usb *phy)
{
u32 val;
@@ -155,39 +140,39 @@ static int omap_usb_dpll_lock(struct omap_usb *phy)
{
u32 val;
unsigned long rate;
- enum sys_clk_rate clk_index;
-
- rate = clk_get_rate(phy->sys_clk);
- clk_index = __get_sys_clk_index(rate);
+ struct usb_dpll_params *dpll_params;
- if (clk_index == CLK_RATE_UNDEFINED) {
- pr_err("dpll cannot be locked for sys clk freq:%luHz\n", rate);
+ rate = clk_get_rate(phy->sys_clk);
+ dpll_params = omap_usb3_get_dpll_params(rate);
+ if (!dpll_params) {
+ dev_err(phy->dev,
+ "No DPLL configuration for %lu Hz SYS CLK\n", rate);
return -EINVAL;
}
val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
val &= ~PLL_REGN_MASK;
- val |= omap_usb3_dpll_params[clk_index].n << PLL_REGN_SHIFT;
+ val |= dpll_params->n << PLL_REGN_SHIFT;
omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
val &= ~PLL_SELFREQDCO_MASK;
- val |= omap_usb3_dpll_params[clk_index].freq << PLL_SELFREQDCO_SHIFT;
+ val |= dpll_params->freq << PLL_SELFREQDCO_SHIFT;
omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION1);
val &= ~PLL_REGM_MASK;
- val |= omap_usb3_dpll_params[clk_index].m << PLL_REGM_SHIFT;
+ val |= dpll_params->m << PLL_REGM_SHIFT;
omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION1, val);
val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION4);
val &= ~PLL_REGM_F_MASK;
- val |= omap_usb3_dpll_params[clk_index].mf << PLL_REGM_F_SHIFT;
+ val |= dpll_params->mf << PLL_REGM_F_SHIFT;
omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION4, val);
val = omap_usb_readl(phy->pll_ctrl_base, PLL_CONFIGURATION3);
val &= ~PLL_SD_MASK;
- val |= omap_usb3_dpll_params[clk_index].sd << PLL_SD_SHIFT;
+ val |= dpll_params->sd << PLL_SD_SHIFT;
omap_usb_writel(phy->pll_ctrl_base, PLL_CONFIGURATION3, val);
omap_usb_dpll_relock(phy);
@@ -198,8 +183,12 @@ static int omap_usb_dpll_lock(struct omap_usb *phy)
static int omap_usb3_init(struct usb_phy *x)
{
struct omap_usb *phy = phy_to_omapusb(x);
+ int ret;
+
+ ret = omap_usb_dpll_lock(phy);
+ if (ret)
+ return ret;
- omap_usb_dpll_lock(phy);
omap_control_usb3_phy_power(phy->control_dev, 1);
return 0;
diff --git a/drivers/usb/phy/phy-rcar-usb.c b/drivers/usb/phy/phy-rcar-usb.c
index ae909408958..33265a5b2cd 100644
--- a/drivers/usb/phy/phy-rcar-usb.c
+++ b/drivers/usb/phy/phy-rcar-usb.c
@@ -83,7 +83,7 @@ static int rcar_usb_phy_init(struct usb_phy *phy)
{
struct rcar_usb_phy_priv *priv = usb_phy_to_priv(phy);
struct device *dev = phy->dev;
- struct rcar_phy_platform_data *pdata = dev->platform_data;
+ struct rcar_phy_platform_data *pdata = dev_get_platdata(dev);
void __iomem *reg0 = priv->reg0;
void __iomem *reg1 = priv->reg1;
static const u8 ovcn_act[] = { OVC0_ACT, OVC1_ACT, OVC2_ACT };
@@ -184,17 +184,12 @@ static int rcar_usb_phy_probe(struct platform_device *pdev)
void __iomem *reg0, *reg1 = NULL;
int ret;
- if (!pdev->dev.platform_data) {
+ if (!dev_get_platdata(&pdev->dev)) {
dev_err(dev, "No platform data\n");
return -EINVAL;
}
res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res0) {
- dev_err(dev, "Not enough platform resources\n");
- return -EINVAL;
- }
-
reg0 = devm_ioremap_resource(dev, res0);
if (IS_ERR(reg0))
return PTR_ERR(reg0);
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index 1011c16ade7..ff70e4b19b9 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -359,7 +359,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
{
struct samsung_usbphy *sphy;
struct usb_otg *otg;
- struct samsung_usbphy_data *pdata = pdev->dev.platform_data;
+ struct samsung_usbphy_data *pdata = dev_get_platdata(&pdev->dev);
const struct samsung_usbphy_drvdata *drv_data;
struct device *dev = &pdev->dev;
struct resource *phy_mem;
@@ -388,7 +388,7 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
clk = devm_clk_get(dev, "otg");
if (IS_ERR(clk)) {
- dev_err(dev, "Failed to get otg clock\n");
+ dev_err(dev, "Failed to get usbhost/otg clock\n");
return PTR_ERR(clk);
}
diff --git a/drivers/usb/phy/phy-samsung-usb3.c b/drivers/usb/phy/phy-samsung-usb3.c
index 300e0cf5e31..c6eb22213de 100644
--- a/drivers/usb/phy/phy-samsung-usb3.c
+++ b/drivers/usb/phy/phy-samsung-usb3.c
@@ -231,7 +231,7 @@ static void samsung_usb3phy_shutdown(struct usb_phy *phy)
static int samsung_usb3phy_probe(struct platform_device *pdev)
{
struct samsung_usbphy *sphy;
- struct samsung_usbphy_data *pdata = pdev->dev.platform_data;
+ struct samsung_usbphy_data *pdata = dev_get_platdata(&pdev->dev);
struct device *dev = &pdev->dev;
struct resource *phy_mem;
void __iomem *phy_base;
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index cec0855ed24..e9cb1cb8abc 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -28,20 +28,28 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/usb/otg.h>
#include <linux/usb/ulpi.h>
+#include <linux/usb/of.h>
#include <asm/mach-types.h>
#include <linux/usb/ehci_def.h>
#include <linux/usb/tegra_usb_phy.h>
+#include <linux/regulator/consumer.h>
#define ULPI_VIEWPORT 0x170
-/* PORTSC registers */
+/* PORTSC PTS/PHCD bits, Tegra20 only */
#define TEGRA_USB_PORTSC1 0x184
#define TEGRA_USB_PORTSC1_PTS(x) (((x) & 0x3) << 30)
#define TEGRA_USB_PORTSC1_PHCD (1 << 23)
+/* HOSTPC1 PTS/PHCD bits, Tegra30 and above */
+#define TEGRA_USB_HOSTPC1_DEVLC 0x1b4
+#define TEGRA_USB_HOSTPC1_DEVLC_PTS(x) (((x) & 0x7) << 29)
+#define TEGRA_USB_HOSTPC1_DEVLC_PHCD (1 << 22)
+
/* Bits of PORTSC1, which will get cleared by writing 1 into them */
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
@@ -84,16 +92,22 @@
#define UTMIP_XCVR_CFG0 0x808
#define UTMIP_XCVR_SETUP(x) (((x) & 0xf) << 0)
+#define UTMIP_XCVR_SETUP_MSB(x) ((((x) & 0x70) >> 4) << 22)
#define UTMIP_XCVR_LSRSLEW(x) (((x) & 0x3) << 8)
#define UTMIP_XCVR_LSFSLEW(x) (((x) & 0x3) << 10)
#define UTMIP_FORCE_PD_POWERDOWN (1 << 14)
#define UTMIP_FORCE_PD2_POWERDOWN (1 << 16)
#define UTMIP_FORCE_PDZI_POWERDOWN (1 << 18)
-#define UTMIP_XCVR_HSSLEW_MSB(x) (((x) & 0x7f) << 25)
+#define UTMIP_XCVR_LSBIAS_SEL (1 << 21)
+#define UTMIP_XCVR_HSSLEW(x) (((x) & 0x3) << 4)
+#define UTMIP_XCVR_HSSLEW_MSB(x) ((((x) & 0x1fc) >> 2) << 25)
#define UTMIP_BIAS_CFG0 0x80c
#define UTMIP_OTGPD (1 << 11)
#define UTMIP_BIASPD (1 << 10)
+#define UTMIP_HSSQUELCH_LEVEL(x) (((x) & 0x3) << 0)
+#define UTMIP_HSDISCON_LEVEL(x) (((x) & 0x3) << 2)
+#define UTMIP_HSDISCON_LEVEL_MSB(x) ((((x) & 0x4) >> 2) << 24)
#define UTMIP_HSRX_CFG0 0x810
#define UTMIP_ELASTIC_LIMIT(x) (((x) & 0x1f) << 10)
@@ -137,6 +151,12 @@
#define UTMIP_BIAS_CFG1 0x83c
#define UTMIP_BIAS_PDTRK_COUNT(x) (((x) & 0x1f) << 3)
+/* For Tegra30 and above only, the address is different in Tegra20 */
+#define USB_USBMODE 0x1f8
+#define USB_USBMODE_MASK (3 << 0)
+#define USB_USBMODE_HOST (3 << 0)
+#define USB_USBMODE_DEVICE (2 << 0)
+
static DEFINE_SPINLOCK(utmip_pad_lock);
static int utmip_pad_count;
@@ -184,36 +204,22 @@ static const struct tegra_xtal_freq tegra_freq_table[] = {
},
};
-static struct tegra_utmip_config utmip_default[] = {
- [0] = {
- .hssync_start_delay = 9,
- .idle_wait_delay = 17,
- .elastic_limit = 16,
- .term_range_adj = 6,
- .xcvr_setup = 9,
- .xcvr_lsfslew = 1,
- .xcvr_lsrslew = 1,
- },
- [2] = {
- .hssync_start_delay = 9,
- .idle_wait_delay = 17,
- .elastic_limit = 16,
- .term_range_adj = 6,
- .xcvr_setup = 9,
- .xcvr_lsfslew = 2,
- .xcvr_lsrslew = 2,
- },
-};
-
static void set_pts(struct tegra_usb_phy *phy, u8 pts_val)
{
void __iomem *base = phy->regs;
unsigned long val;
- val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
- val &= ~TEGRA_USB_PORTSC1_PTS(3);
- val |= TEGRA_USB_PORTSC1_PTS(pts_val & 3);
- writel(val, base + TEGRA_USB_PORTSC1);
+ if (phy->soc_config->has_hostpc) {
+ val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ val &= ~TEGRA_USB_HOSTPC1_DEVLC_PTS(~0);
+ val |= TEGRA_USB_HOSTPC1_DEVLC_PTS(pts_val);
+ writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ } else {
+ val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
+ val &= ~TEGRA_USB_PORTSC1_PTS(~0);
+ val |= TEGRA_USB_PORTSC1_PTS(pts_val);
+ writel(val, base + TEGRA_USB_PORTSC1);
+ }
}
static void set_phcd(struct tegra_usb_phy *phy, bool enable)
@@ -221,17 +227,26 @@ static void set_phcd(struct tegra_usb_phy *phy, bool enable)
void __iomem *base = phy->regs;
unsigned long val;
- val = readl(base + TEGRA_USB_PORTSC1) & ~TEGRA_PORTSC1_RWC_BITS;
- if (enable)
- val |= TEGRA_USB_PORTSC1_PHCD;
- else
- val &= ~TEGRA_USB_PORTSC1_PHCD;
- writel(val, base + TEGRA_USB_PORTSC1);
+ if (phy->soc_config->has_hostpc) {
+ val = readl(base + TEGRA_USB_HOSTPC1_DEVLC);
+ if (enable)
+ val |= TEGRA_USB_HOSTPC1_DEVLC_PHCD;
+ else
+ val &= ~TEGRA_USB_HOSTPC1_DEVLC_PHCD;
+ writel(val, base + TEGRA_USB_HOSTPC1_DEVLC);
+ } else {
+ val = readl(base + TEGRA_USB_PORTSC1) & ~PORT_RWC_BITS;
+ if (enable)
+ val |= TEGRA_USB_PORTSC1_PHCD;
+ else
+ val &= ~TEGRA_USB_PORTSC1_PHCD;
+ writel(val, base + TEGRA_USB_PORTSC1);
+ }
}
static int utmip_pad_open(struct tegra_usb_phy *phy)
{
- phy->pad_clk = devm_clk_get(phy->dev, "utmi-pads");
+ phy->pad_clk = devm_clk_get(phy->u_phy.dev, "utmi-pads");
if (IS_ERR(phy->pad_clk)) {
pr_err("%s: can't get utmip pad clock\n", __func__);
return PTR_ERR(phy->pad_clk);
@@ -244,6 +259,7 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
{
unsigned long val, flags;
void __iomem *base = phy->pad_regs;
+ struct tegra_utmip_config *config = phy->config;
clk_prepare_enable(phy->pad_clk);
@@ -252,6 +268,16 @@ static void utmip_pad_power_on(struct tegra_usb_phy *phy)
if (utmip_pad_count++ == 0) {
val = readl(base + UTMIP_BIAS_CFG0);
val &= ~(UTMIP_OTGPD | UTMIP_BIASPD);
+
+ if (phy->soc_config->requires_extra_tuning_parameters) {
+ val &= ~(UTMIP_HSSQUELCH_LEVEL(~0) |
+ UTMIP_HSDISCON_LEVEL(~0) |
+ UTMIP_HSDISCON_LEVEL_MSB(~0));
+
+ val |= UTMIP_HSSQUELCH_LEVEL(config->hssquelch_level);
+ val |= UTMIP_HSDISCON_LEVEL(config->hsdiscon_level);
+ val |= UTMIP_HSDISCON_LEVEL_MSB(config->hsdiscon_level);
+ }
writel(val, base + UTMIP_BIAS_CFG0);
}
@@ -361,7 +387,7 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
}
val = readl(base + UTMIP_TX_CFG0);
- val &= ~UTMIP_FS_PREABMLE_J;
+ val |= UTMIP_FS_PREABMLE_J;
writel(val, base + UTMIP_TX_CFG0);
val = readl(base + UTMIP_HSRX_CFG0);
@@ -384,34 +410,56 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val &= ~UTMIP_SUSPEND_EXIT_ON_EDGE;
writel(val, base + UTMIP_MISC_CFG0);
- val = readl(base + UTMIP_MISC_CFG1);
- val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) | UTMIP_PLLU_STABLE_COUNT(~0));
- val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
- UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
- writel(val, base + UTMIP_MISC_CFG1);
-
- val = readl(base + UTMIP_PLL_CFG1);
- val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) | UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
- val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
- UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
- writel(val, base + UTMIP_PLL_CFG1);
+ if (!phy->soc_config->utmi_pll_config_in_car_module) {
+ val = readl(base + UTMIP_MISC_CFG1);
+ val &= ~(UTMIP_PLL_ACTIVE_DLY_COUNT(~0) |
+ UTMIP_PLLU_STABLE_COUNT(~0));
+ val |= UTMIP_PLL_ACTIVE_DLY_COUNT(phy->freq->active_delay) |
+ UTMIP_PLLU_STABLE_COUNT(phy->freq->stable_count);
+ writel(val, base + UTMIP_MISC_CFG1);
+
+ val = readl(base + UTMIP_PLL_CFG1);
+ val &= ~(UTMIP_XTAL_FREQ_COUNT(~0) |
+ UTMIP_PLLU_ENABLE_DLY_COUNT(~0));
+ val |= UTMIP_XTAL_FREQ_COUNT(phy->freq->xtal_freq_count) |
+ UTMIP_PLLU_ENABLE_DLY_COUNT(phy->freq->enable_delay);
+ writel(val, base + UTMIP_PLL_CFG1);
+ }
- if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
+ if (phy->mode == USB_DR_MODE_PERIPHERAL) {
val = readl(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel(val, base + USB_SUSP_CTRL);
+
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val &= ~UTMIP_PD_CHRG;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
+ } else {
+ val = readl(base + UTMIP_BAT_CHRG_CFG0);
+ val |= UTMIP_PD_CHRG;
+ writel(val, base + UTMIP_BAT_CHRG_CFG0);
}
utmip_pad_power_on(phy);
val = readl(base + UTMIP_XCVR_CFG0);
val &= ~(UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
- UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_SETUP(~0) |
- UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0) |
- UTMIP_XCVR_HSSLEW_MSB(~0));
- val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
+ UTMIP_FORCE_PDZI_POWERDOWN | UTMIP_XCVR_LSBIAS_SEL |
+ UTMIP_XCVR_SETUP(~0) | UTMIP_XCVR_SETUP_MSB(~0) |
+ UTMIP_XCVR_LSFSLEW(~0) | UTMIP_XCVR_LSRSLEW(~0));
+
+ if (!config->xcvr_setup_use_fuses) {
+ val |= UTMIP_XCVR_SETUP(config->xcvr_setup);
+ val |= UTMIP_XCVR_SETUP_MSB(config->xcvr_setup);
+ }
val |= UTMIP_XCVR_LSFSLEW(config->xcvr_lsfslew);
val |= UTMIP_XCVR_LSRSLEW(config->xcvr_lsrslew);
+
+ if (phy->soc_config->requires_extra_tuning_parameters) {
+ val &= ~(UTMIP_XCVR_HSSLEW(~0) | UTMIP_XCVR_HSSLEW_MSB(~0));
+ val |= UTMIP_XCVR_HSSLEW(config->xcvr_hsslew);
+ val |= UTMIP_XCVR_HSSLEW_MSB(config->xcvr_hsslew);
+ }
writel(val, base + UTMIP_XCVR_CFG0);
val = readl(base + UTMIP_XCVR_CFG1);
@@ -420,23 +468,19 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val |= UTMIP_XCVR_TERM_RANGE_ADJ(config->term_range_adj);
writel(val, base + UTMIP_XCVR_CFG1);
- val = readl(base + UTMIP_BAT_CHRG_CFG0);
- val &= ~UTMIP_PD_CHRG;
- writel(val, base + UTMIP_BAT_CHRG_CFG0);
-
val = readl(base + UTMIP_BIAS_CFG1);
val &= ~UTMIP_BIAS_PDTRK_COUNT(~0);
val |= UTMIP_BIAS_PDTRK_COUNT(0x5);
writel(val, base + UTMIP_BIAS_CFG1);
- if (phy->is_legacy_phy) {
- val = readl(base + UTMIP_SPARE_CFG0);
- if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE)
- val &= ~FUSE_SETUP_SEL;
- else
- val |= FUSE_SETUP_SEL;
- writel(val, base + UTMIP_SPARE_CFG0);
- } else {
+ val = readl(base + UTMIP_SPARE_CFG0);
+ if (config->xcvr_setup_use_fuses)
+ val |= FUSE_SETUP_SEL;
+ else
+ val &= ~FUSE_SETUP_SEL;
+ writel(val, base + UTMIP_SPARE_CFG0);
+
+ if (!phy->is_legacy_phy) {
val = readl(base + USB_SUSP_CTRL);
val |= UTMIP_PHY_ENABLE;
writel(val, base + USB_SUSP_CTRL);
@@ -459,6 +503,16 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
utmi_phy_clk_enable(phy);
+ if (phy->soc_config->requires_usbmode_setup) {
+ val = readl(base + USB_USBMODE);
+ val &= ~USB_USBMODE_MASK;
+ if (phy->mode == USB_DR_MODE_HOST)
+ val |= USB_USBMODE_HOST;
+ else
+ val |= USB_USBMODE_DEVICE;
+ writel(val, base + USB_USBMODE);
+ }
+
if (!phy->is_legacy_phy)
set_pts(phy, 0);
@@ -472,7 +526,7 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
utmi_phy_clk_disable(phy);
- if (phy->mode == TEGRA_USB_PHY_MODE_DEVICE) {
+ if (phy->mode == USB_DR_MODE_PERIPHERAL) {
val = readl(base + USB_SUSP_CTRL);
val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
@@ -560,13 +614,15 @@ static int ulpi_phy_power_on(struct tegra_usb_phy *phy)
ret = gpio_direction_output(phy->reset_gpio, 0);
if (ret < 0) {
- dev_err(phy->dev, "gpio %d not set to 0\n", phy->reset_gpio);
+ dev_err(phy->u_phy.dev, "gpio %d not set to 0\n",
+ phy->reset_gpio);
return ret;
}
msleep(5);
ret = gpio_direction_output(phy->reset_gpio, 1);
if (ret < 0) {
- dev_err(phy->dev, "gpio %d not set to 1\n", phy->reset_gpio);
+ dev_err(phy->u_phy.dev, "gpio %d not set to 1\n",
+ phy->reset_gpio);
return ret;
}
@@ -634,6 +690,9 @@ static void tegra_usb_phy_close(struct usb_phy *x)
{
struct tegra_usb_phy *phy = container_of(x, struct tegra_usb_phy, u_phy);
+ if (!IS_ERR(phy->vbus))
+ regulator_disable(phy->vbus);
+
clk_disable_unprepare(phy->pll_u);
}
@@ -666,29 +725,30 @@ static int ulpi_open(struct tegra_usb_phy *phy)
{
int err;
- phy->clk = devm_clk_get(phy->dev, "ulpi-link");
+ phy->clk = devm_clk_get(phy->u_phy.dev, "ulpi-link");
if (IS_ERR(phy->clk)) {
pr_err("%s: can't get ulpi clock\n", __func__);
return PTR_ERR(phy->clk);
}
- err = devm_gpio_request(phy->dev, phy->reset_gpio, "ulpi_phy_reset_b");
+ err = devm_gpio_request(phy->u_phy.dev, phy->reset_gpio,
+ "ulpi_phy_reset_b");
if (err < 0) {
- dev_err(phy->dev, "request failed for gpio: %d\n",
+ dev_err(phy->u_phy.dev, "request failed for gpio: %d\n",
phy->reset_gpio);
return err;
}
err = gpio_direction_output(phy->reset_gpio, 0);
if (err < 0) {
- dev_err(phy->dev, "gpio %d direction not set to output\n",
+ dev_err(phy->u_phy.dev, "gpio %d direction not set to output\n",
phy->reset_gpio);
return err;
}
phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
if (!phy->ulpi) {
- dev_err(phy->dev, "otg_ulpi_create returned NULL\n");
+ dev_err(phy->u_phy.dev, "otg_ulpi_create returned NULL\n");
err = -ENOMEM;
return err;
}
@@ -703,14 +763,7 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
int i;
int err;
- if (!phy->is_ulpi_phy) {
- if (phy->is_legacy_phy)
- phy->config = &utmip_default[0];
- else
- phy->config = &utmip_default[2];
- }
-
- phy->pll_u = devm_clk_get(phy->dev, "pll_u");
+ phy->pll_u = devm_clk_get(phy->u_phy.dev, "pll_u");
if (IS_ERR(phy->pll_u)) {
pr_err("Can't get pll_u clock\n");
return PTR_ERR(phy->pll_u);
@@ -733,6 +786,16 @@ static int tegra_usb_phy_init(struct tegra_usb_phy *phy)
goto fail;
}
+ if (!IS_ERR(phy->vbus)) {
+ err = regulator_enable(phy->vbus);
+ if (err) {
+ dev_err(phy->u_phy.dev,
+ "failed to enable usb vbus regulator: %d\n",
+ err);
+ goto fail;
+ }
+ }
+
if (phy->is_ulpi_phy)
err = ulpi_open(phy);
else
@@ -784,11 +847,138 @@ void tegra_ehci_phy_restore_end(struct usb_phy *x)
}
EXPORT_SYMBOL_GPL(tegra_ehci_phy_restore_end);
+static int read_utmi_param(struct platform_device *pdev, const char *param,
+ u8 *dest)
+{
+ u32 value;
+ int err = of_property_read_u32(pdev->dev.of_node, param, &value);
+ *dest = (u8)value;
+ if (err < 0)
+ dev_err(&pdev->dev, "Failed to read USB UTMI parameter %s: %d\n",
+ param, err);
+ return err;
+}
+
+static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ int err;
+ struct tegra_utmip_config *config;
+
+ tegra_phy->is_ulpi_phy = false;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get UTMI Pad regs\n");
+ return -ENXIO;
+ }
+
+ tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!tegra_phy->regs) {
+ dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
+ return -ENOMEM;
+ }
+
+ tegra_phy->config = devm_kzalloc(&pdev->dev,
+ sizeof(*tegra_phy->config), GFP_KERNEL);
+ if (!tegra_phy->config) {
+ dev_err(&pdev->dev,
+ "unable to allocate memory for USB UTMIP config\n");
+ return -ENOMEM;
+ }
+
+ config = tegra_phy->config;
+
+ err = read_utmi_param(pdev, "nvidia,hssync-start-delay",
+ &config->hssync_start_delay);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,elastic-limit",
+ &config->elastic_limit);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,idle-wait-delay",
+ &config->idle_wait_delay);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,term-range-adj",
+ &config->term_range_adj);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,xcvr-lsfslew",
+ &config->xcvr_lsfslew);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,xcvr-lsrslew",
+ &config->xcvr_lsrslew);
+ if (err < 0)
+ return err;
+
+ if (tegra_phy->soc_config->requires_extra_tuning_parameters) {
+ err = read_utmi_param(pdev, "nvidia,xcvr-hsslew",
+ &config->xcvr_hsslew);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,hssquelch-level",
+ &config->hssquelch_level);
+ if (err < 0)
+ return err;
+
+ err = read_utmi_param(pdev, "nvidia,hsdiscon-level",
+ &config->hsdiscon_level);
+ if (err < 0)
+ return err;
+ }
+
+ config->xcvr_setup_use_fuses = of_property_read_bool(
+ pdev->dev.of_node, "nvidia,xcvr-setup-use-fuses");
+
+ if (!config->xcvr_setup_use_fuses) {
+ err = read_utmi_param(pdev, "nvidia,xcvr-setup",
+ &config->xcvr_setup);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static const struct tegra_phy_soc_config tegra20_soc_config = {
+ .utmi_pll_config_in_car_module = false,
+ .has_hostpc = false,
+ .requires_usbmode_setup = false,
+ .requires_extra_tuning_parameters = false,
+};
+
+static const struct tegra_phy_soc_config tegra30_soc_config = {
+ .utmi_pll_config_in_car_module = true,
+ .has_hostpc = true,
+ .requires_usbmode_setup = true,
+ .requires_extra_tuning_parameters = true,
+};
+
+static struct of_device_id tegra_usb_phy_id_table[] = {
+ { .compatible = "nvidia,tegra30-usb-phy", .data = &tegra30_soc_config },
+ { .compatible = "nvidia,tegra20-usb-phy", .data = &tegra20_soc_config },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
+
static int tegra_usb_phy_probe(struct platform_device *pdev)
{
+ const struct of_device_id *match;
struct resource *res;
struct tegra_usb_phy *tegra_phy = NULL;
struct device_node *np = pdev->dev.of_node;
+ enum usb_phy_interface phy_type;
int err;
tegra_phy = devm_kzalloc(&pdev->dev, sizeof(*tegra_phy), GFP_KERNEL);
@@ -797,6 +987,13 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ match = of_match_device(tegra_usb_phy_id_table, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ tegra_phy->soc_config = match->data;
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
@@ -813,23 +1010,15 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->is_legacy_phy =
of_property_read_bool(np, "nvidia,has-legacy-mode");
- err = of_property_match_string(np, "phy_type", "ulpi");
- if (err < 0) {
- tegra_phy->is_ulpi_phy = false;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- if (!res) {
- dev_err(&pdev->dev, "Failed to get UTMI Pad regs\n");
- return -ENXIO;
- }
+ phy_type = of_usb_get_phy_mode(np);
+ switch (phy_type) {
+ case USBPHY_INTERFACE_MODE_UTMI:
+ err = utmi_phy_probe(tegra_phy, pdev);
+ if (err < 0)
+ return err;
+ break;
- tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
- resource_size(res));
- if (!tegra_phy->regs) {
- dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
- return -ENOMEM;
- }
- } else {
+ case USBPHY_INTERFACE_MODE_ULPI:
tegra_phy->is_ulpi_phy = true;
tegra_phy->reset_gpio =
@@ -839,19 +1028,35 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->reset_gpio);
return tegra_phy->reset_gpio;
}
+ tegra_phy->config = NULL;
+ break;
+
+ default:
+ dev_err(&pdev->dev, "phy_type is invalid or unsupported\n");
+ return -EINVAL;
}
- err = of_property_match_string(np, "dr_mode", "otg");
- if (err < 0) {
- err = of_property_match_string(np, "dr_mode", "peripheral");
- if (err < 0)
- tegra_phy->mode = TEGRA_USB_PHY_MODE_HOST;
- else
- tegra_phy->mode = TEGRA_USB_PHY_MODE_DEVICE;
- } else
- tegra_phy->mode = TEGRA_USB_PHY_MODE_OTG;
+ if (of_find_property(np, "dr_mode", NULL))
+ tegra_phy->mode = of_usb_get_dr_mode(np);
+ else
+ tegra_phy->mode = USB_DR_MODE_HOST;
- tegra_phy->dev = &pdev->dev;
+ if (tegra_phy->mode == USB_DR_MODE_UNKNOWN) {
+ dev_err(&pdev->dev, "dr_mode is invalid\n");
+ return -EINVAL;
+ }
+
+ /* On some boards, the VBUS regulator doesn't need to be controlled */
+ if (of_find_property(np, "vbus-supply", NULL)) {
+ tegra_phy->vbus = devm_regulator_get(&pdev->dev, "vbus");
+ if (IS_ERR(tegra_phy->vbus))
+ return PTR_ERR(tegra_phy->vbus);
+ } else {
+ dev_notice(&pdev->dev, "no vbus regulator");
+ tegra_phy->vbus = ERR_PTR(-ENODEV);
+ }
+
+ tegra_phy->u_phy.dev = &pdev->dev;
err = tegra_usb_phy_init(tegra_phy);
if (err < 0)
return err;
@@ -859,18 +1064,29 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->u_phy.shutdown = tegra_usb_phy_close;
tegra_phy->u_phy.set_suspend = tegra_usb_phy_suspend;
- dev_set_drvdata(&pdev->dev, tegra_phy);
+ platform_set_drvdata(pdev, tegra_phy);
+
+ err = usb_add_phy_dev(&tegra_phy->u_phy);
+ if (err < 0) {
+ tegra_usb_phy_close(&tegra_phy->u_phy);
+ return err;
+ }
+
return 0;
}
-static struct of_device_id tegra_usb_phy_id_table[] = {
- { .compatible = "nvidia,tegra20-usb-phy", },
- { },
-};
-MODULE_DEVICE_TABLE(of, tegra_usb_phy_id_table);
+static int tegra_usb_phy_remove(struct platform_device *pdev)
+{
+ struct tegra_usb_phy *tegra_phy = platform_get_drvdata(pdev);
+
+ usb_remove_phy(&tegra_phy->u_phy);
+
+ return 0;
+}
static struct platform_driver tegra_usb_phy_driver = {
.probe = tegra_usb_phy_probe,
+ .remove = tegra_usb_phy_remove,
.driver = {
.name = "tegra-phy",
.owner = THIS_MODULE,
@@ -879,29 +1095,5 @@ static struct platform_driver tegra_usb_phy_driver = {
};
module_platform_driver(tegra_usb_phy_driver);
-static int tegra_usb_phy_match(struct device *dev, void *data)
-{
- struct tegra_usb_phy *tegra_phy = dev_get_drvdata(dev);
- struct device_node *dn = data;
-
- return (tegra_phy->dev->of_node == dn) ? 1 : 0;
-}
-
-struct usb_phy *tegra_usb_get_phy(struct device_node *dn)
-{
- struct device *dev;
- struct tegra_usb_phy *tegra_phy;
-
- dev = driver_find_device(&tegra_usb_phy_driver.driver, NULL, dn,
- tegra_usb_phy_match);
- if (!dev)
- return ERR_PTR(-EPROBE_DEFER);
-
- tegra_phy = dev_get_drvdata(dev);
-
- return &tegra_phy->u_phy;
-}
-EXPORT_SYMBOL_GPL(tegra_usb_get_phy);
-
MODULE_DESCRIPTION("Tegra USB PHY driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-twl4030-usb.c b/drivers/usb/phy/phy-twl4030-usb.c
index 8f78d2d4072..90730c8762b 100644
--- a/drivers/usb/phy/phy-twl4030-usb.c
+++ b/drivers/usb/phy/phy-twl4030-usb.c
@@ -648,7 +648,7 @@ static int twl4030_set_host(struct usb_otg *otg, struct usb_bus *host)
static int twl4030_usb_probe(struct platform_device *pdev)
{
- struct twl4030_usb_data *pdata = pdev->dev.platform_data;
+ struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev);
struct twl4030_usb *twl;
int status, err;
struct usb_otg *otg;
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 1753bd367e0..16dbc938267 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -324,7 +324,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
int status, err;
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct twl4030_usb_data *pdata = dev->platform_data;
+ struct twl4030_usb_data *pdata = dev_get_platdata(dev);
twl = devm_kzalloc(dev, sizeof *twl, GFP_KERNEL);
if (!twl)
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index cfd205036ab..17267b0a2e9 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -416,7 +416,7 @@ static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
*/
static int usbhs_probe(struct platform_device *pdev)
{
- struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
+ struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
struct renesas_usbhs_driver_callback *dfunc;
struct usbhs_priv *priv;
struct resource *res, *irq_res;
@@ -499,7 +499,7 @@ static int usbhs_probe(struct platform_device *pdev)
goto probe_end_fifo_exit;
/* dev_set_drvdata should be called after usbhs_mod_init */
- dev_set_drvdata(&pdev->dev, priv);
+ platform_set_drvdata(pdev, priv);
/*
* deviece reset here because
@@ -558,7 +558,7 @@ probe_end_pipe_exit:
static int usbhs_remove(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
- struct renesas_usbhs_platform_info *info = pdev->dev.platform_data;
+ struct renesas_usbhs_platform_info *info = dev_get_platdata(&pdev->dev);
struct renesas_usbhs_driver_callback *dfunc = &info->driver_callback;
dev_dbg(&pdev->dev, "usb remove\n");
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index ed4949faa70..3385aeb5a36 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -77,9 +77,9 @@ struct usbhsg_recip_handle {
struct usbhsg_gpriv, mod)
#define __usbhsg_for_each_uep(start, pos, g, i) \
- for (i = start, pos = (g)->uep + i; \
- i < (g)->uep_size; \
- i++, pos = (g)->uep + i)
+ for ((i) = start; \
+ ((i) < (g)->uep_size) && ((pos) = (g)->uep + (i)); \
+ (i)++)
#define usbhsg_for_each_uep(pos, gpriv, i) \
__usbhsg_for_each_uep(1, pos, gpriv, i)
@@ -855,10 +855,6 @@ static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
- if (!driver ||
- !driver->unbind)
- return -EINVAL;
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
gpriv->driver = NULL;
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index b86815421c8..e40f565004d 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -111,9 +111,9 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
#define __usbhsh_for_each_udev(start, pos, h, i) \
- for (i = start, pos = (h)->udev + i; \
- i < USBHSH_DEVICE_MAX; \
- i++, pos = (h)->udev + i)
+ for ((i) = start; \
+ ((i) < USBHSH_DEVICE_MAX) && ((pos) = (h)->udev + (i)); \
+ (i)++)
#define usbhsh_for_each_udev(pos, hpriv, i) \
__usbhsh_for_each_udev(1, pos, hpriv, i)
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index b476fde955b..3e534987983 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -54,9 +54,9 @@ struct usbhs_pipe_info {
* pipe list
*/
#define __usbhs_for_each_pipe(start, pos, info, i) \
- for (i = start, pos = (info)->pipe + i; \
- i < (info)->size; \
- i++, pos = (info)->pipe + i)
+ for ((i) = start; \
+ ((i) < (info)->size) && ((pos) = (info)->pipe + (i)); \
+ (i)++)
#define usbhs_for_each_pipe(pos, priv, i) \
__usbhs_for_each_pipe(1, pos, &((priv)->pipe_info), i)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 8c3a42ea910..c454bfa22a1 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -51,6 +51,24 @@ config USB_SERIAL_GENERIC
support" be compiled as a module for this driver to be used
properly.
+config USB_SERIAL_SIMPLE
+ tristate "USB Serial Simple Driver"
+ help
+ Say Y here to use the USB serial "simple" driver. This driver
+ handles a wide range of very simple devices, all in one
+ driver. Specifically, it supports:
+ - Suunto ANT+ USB device.
+ - Fundamental Software dongle.
+ - HP4x calculators
+ - a number of Motoroloa phones
+ - Siemens USB/MPI adapter.
+ - ViVOtech ViVOpay USB device.
+ - Infineon Modem Flashloader USB interface
+ - ZIO Motherboard USB serial interface
+
+ To compile this driver as a module, choose M here: the module
+ will be called usb-serial-simple.
+
config USB_SERIAL_AIRCABLE
tristate "USB AIRcable Bluetooth Dongle Driver"
help
@@ -158,14 +176,6 @@ config USB_SERIAL_FTDI_SIO
To compile this driver as a module, choose M here: the
module will be called ftdi_sio.
-config USB_SERIAL_FUNSOFT
- tristate "USB Fundamental Software Dongle Driver"
- ---help---
- Say Y here if you want to use the Fundamental Software dongle.
-
- To compile this driver as a module, choose M here: the
- module will be called funsoft.
-
config USB_SERIAL_VISOR
tristate "USB Handspring Visor / Palm m50x / Sony Clie Driver"
help
@@ -462,15 +472,6 @@ config USB_SERIAL_MOS7840
To compile this driver as a module, choose M here: the
module will be called mos7840. If unsure, choose N.
-config USB_SERIAL_MOTOROLA
- tristate "USB Motorola Phone modem driver"
- ---help---
- Say Y here if you want to use a Motorola phone with a USB
- connector as a modem link.
-
- To compile this driver as a module, choose M here: the
- module will be called moto_modem. If unsure, choose N.
-
config USB_SERIAL_NAVMAN
tristate "USB Navman GPS device"
help
@@ -525,14 +526,6 @@ config USB_SERIAL_SPCP8X5
To compile this driver as a module, choose M here: the
module will be called spcp8x5.
-config USB_SERIAL_HP4X
- tristate "USB HP4x Calculators support"
- help
- Say Y here if you want to use an Hewlett-Packard 4x Calculator.
-
- To compile this driver as a module, choose M here: the
- module will be called hp4x.
-
config USB_SERIAL_SAFE
tristate "USB Safe Serial (Encapsulated) Driver"
@@ -540,14 +533,6 @@ config USB_SERIAL_SAFE_PADDED
bool "USB Secure Encapsulated Driver - Padded"
depends on USB_SERIAL_SAFE
-config USB_SERIAL_SIEMENS_MPI
- tristate "USB Siemens MPI driver"
- help
- Say M here if you want to use a Siemens USB/MPI adapter.
-
- To compile this driver as a module, choose M here: the
- module will be called siemens_mpi.
-
config USB_SERIAL_SIERRAWIRELESS
tristate "USB Sierra Wireless Driver"
help
@@ -639,14 +624,6 @@ config USB_SERIAL_OPTICON
To compile this driver as a module, choose M here: the
module will be called opticon.
-config USB_SERIAL_VIVOPAY_SERIAL
- tristate "USB ViVOpay serial interface driver"
- help
- Say Y here if you want to use a ViVOtech ViVOpay USB device.
-
- To compile this driver as a module, choose M here: the
- module will be called vivopay-serial.
-
config USB_SERIAL_XSENS_MT
tristate "Xsens motion tracker serial interface driver"
help
@@ -659,14 +636,6 @@ config USB_SERIAL_XSENS_MT
To compile this driver as a module, choose M here: the
module will be called xsens_mt.
-config USB_SERIAL_ZIO
- tristate "ZIO Motherboard USB serial interface driver"
- help
- Say Y here if you want to use ZIO Motherboard.
-
- To compile this driver as a module, choose M here: the
- module will be called zio.
-
config USB_SERIAL_WISHBONE
tristate "USB-Wishbone adapter interface driver"
help
@@ -710,16 +679,6 @@ config USB_SERIAL_QT2
To compile this driver as a module, choose M here: the
module will be called quatech-serial.
-config USB_SERIAL_FLASHLOADER
- tristate "Infineon Modem Flashloader USB interface driver"
- help
- Say Y here if you want to download Infineon Modem
- via USB Flashloader serial driver.
-
- To compile this driver as a module, choose M here: the
- module will be called flashloader.
-
-
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index f7130114488..42670f0b5bc 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -24,9 +24,7 @@ obj-$(CONFIG_USB_SERIAL_EDGEPORT_TI) += io_ti.o
obj-$(CONFIG_USB_SERIAL_EMPEG) += empeg.o
obj-$(CONFIG_USB_SERIAL_F81232) += f81232.o
obj-$(CONFIG_USB_SERIAL_FTDI_SIO) += ftdi_sio.o
-obj-$(CONFIG_USB_SERIAL_FUNSOFT) += funsoft.o
obj-$(CONFIG_USB_SERIAL_GARMIN) += garmin_gps.o
-obj-$(CONFIG_USB_SERIAL_HP4X) += hp4x.o
obj-$(CONFIG_USB_SERIAL_IPAQ) += ipaq.o
obj-$(CONFIG_USB_SERIAL_IPW) += ipw.o
obj-$(CONFIG_USB_SERIAL_IR) += ir-usb.o
@@ -39,7 +37,6 @@ obj-$(CONFIG_USB_SERIAL_MCT_U232) += mct_u232.o
obj-$(CONFIG_USB_SERIAL_METRO) += metro-usb.o
obj-$(CONFIG_USB_SERIAL_MOS7720) += mos7720.o
obj-$(CONFIG_USB_SERIAL_MOS7840) += mos7840.o
-obj-$(CONFIG_USB_SERIAL_MOTOROLA) += moto_modem.o
obj-$(CONFIG_USB_SERIAL_NAVMAN) += navman.o
obj-$(CONFIG_USB_SERIAL_OMNINET) += omninet.o
obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
@@ -50,8 +47,8 @@ obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
obj-$(CONFIG_USB_SERIAL_QT2) += quatech2.o
obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
-obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
obj-$(CONFIG_USB_SERIAL_SIERRAWIRELESS) += sierra.o
+obj-$(CONFIG_USB_SERIAL_SIMPLE) += usb-serial-simple.o
obj-$(CONFIG_USB_SERIAL_SPCP8X5) += spcp8x5.o
obj-$(CONFIG_USB_SERIAL_SSU100) += ssu100.o
obj-$(CONFIG_USB_SERIAL_SYMBOL) += symbolserial.o
@@ -61,8 +58,5 @@ obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WISHBONE) += wishbone-serial.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
-obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
obj-$(CONFIG_USB_SERIAL_XSENS_MT) += xsens_mt.o
-obj-$(CONFIG_USB_SERIAL_ZIO) += zio.o
obj-$(CONFIG_USB_SERIAL_ZTE) += zte_ev.o
-obj-$(CONFIG_USB_SERIAL_FLASHLOADER) += flashloader.o
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index f053b302a00..6335490d576 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -38,15 +38,14 @@ static int usb_serial_device_match(struct device *dev,
return 0;
}
-static ssize_t show_port_number(struct device *dev,
+static ssize_t port_number_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
return sprintf(buf, "%d\n", port->port_number);
}
-
-static DEVICE_ATTR(port_number, S_IRUGO, show_port_number, NULL);
+static DEVICE_ATTR_RO(port_number);
static int usb_serial_device_probe(struct device *dev)
{
@@ -122,7 +121,7 @@ static int usb_serial_device_remove(struct device *dev)
return retval;
}
-static ssize_t store_new_id(struct device_driver *driver,
+static ssize_t new_id_store(struct device_driver *driver,
const char *buf, size_t count)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
@@ -135,17 +134,19 @@ static ssize_t store_new_id(struct device_driver *driver,
return retval;
}
-static ssize_t show_dynids(struct device_driver *driver, char *buf)
+static ssize_t new_id_show(struct device_driver *driver, char *buf)
{
struct usb_serial_driver *usb_drv = to_usb_serial_driver(driver);
return usb_show_dynids(&usb_drv->dynids, buf);
}
+static DRIVER_ATTR_RW(new_id);
-static struct driver_attribute drv_attrs[] = {
- __ATTR(new_id, S_IRUGO | S_IWUSR, show_dynids, store_new_id),
- __ATTR_NULL,
+static struct attribute *usb_serial_drv_attrs[] = {
+ &driver_attr_new_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(usb_serial_drv);
static void free_dynids(struct usb_serial_driver *drv)
{
@@ -164,7 +165,7 @@ struct bus_type usb_serial_bus_type = {
.match = usb_serial_device_match,
.probe = usb_serial_device_probe,
.remove = usb_serial_device_remove,
- .drv_attrs = drv_attrs,
+ .drv_groups = usb_serial_drv_groups,
};
int usb_serial_bus_register(struct usb_serial_driver *driver)
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index afb50eab204..c69bb50d466 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -151,11 +151,7 @@ static int usb_console_setup(struct console *co, char *options)
/* only call the device specific open if this
* is the first time the port is opened */
- if (serial->type->open)
- retval = serial->type->open(NULL, port);
- else
- retval = usb_serial_generic_open(NULL, port);
-
+ retval = serial->type->open(NULL, port);
if (retval) {
dev_err(&port->dev, "could not open USB console port\n");
goto fail;
@@ -210,10 +206,10 @@ static void usb_console_write(struct console *co,
if (count == 0)
return;
- pr_debug("%s - minor %d, %d byte(s)\n", __func__, port->minor, count);
+ dev_dbg(&port->dev, "%s - %d byte(s)\n", __func__, count);
if (!port->port.console) {
- pr_debug("%s - port not opened\n", __func__);
+ dev_dbg(&port->dev, "%s - port not opened\n", __func__);
return;
}
@@ -230,21 +226,14 @@ static void usb_console_write(struct console *co,
}
/* pass on to the driver specific version of this function if
it is available */
- if (serial->type->write)
- retval = serial->type->write(NULL, port, buf, i);
- else
- retval = usb_serial_generic_write(NULL, port, buf, i);
- pr_debug("%s - return value : %d\n", __func__, retval);
+ retval = serial->type->write(NULL, port, buf, i);
+ dev_dbg(&port->dev, "%s - write: %d\n", __func__, retval);
if (lf) {
/* append CR after LF */
unsigned char cr = 13;
- if (serial->type->write)
- retval = serial->type->write(NULL,
- port, &cr, 1);
- else
- retval = usb_serial_generic_write(NULL,
- port, &cr, 1);
- pr_debug("%s - return value : %d\n", __func__, retval);
+ retval = serial->type->write(NULL, port, &cr, 1);
+ dev_dbg(&port->dev, "%s - write cr: %d\n",
+ __func__, retval);
}
buf += i;
count -= i;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index d6ef2f8da37..6987b535aa9 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -53,6 +53,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
+ { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
@@ -118,6 +119,8 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
+ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
@@ -148,6 +151,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
+ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
{ USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
{ USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
@@ -666,9 +670,6 @@ static void cp210x_set_termios(struct tty_struct *tty,
unsigned int bits;
unsigned int modem_ctl[4];
- if (!tty)
- return;
-
cflag = tty->termios.c_cflag;
old_cflag = old_termios->c_cflag;
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index e948dc02795..558605d646f 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -495,6 +495,8 @@ static int cypress_generic_port_probe(struct usb_serial_port *port)
}
usb_set_serial_port_data(port, priv);
+ port->port.drain_delay = 256;
+
return 0;
}
@@ -625,7 +627,7 @@ static int cypress_open(struct tty_struct *tty, struct usb_serial_port *port)
__func__, result);
cypress_set_dead(port);
}
- port->port.drain_delay = 256;
+
return result;
} /* cypress_open */
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 75e85cbf9e8..639a18fb67e 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -207,7 +207,6 @@ static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
return result;
}
- port->port.drain_delay = 256;
return 0;
}
@@ -322,6 +321,8 @@ static int f81232_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
+ port->port.drain_delay = 256;
+
return 0;
}
diff --git a/drivers/usb/serial/flashloader.c b/drivers/usb/serial/flashloader.c
deleted file mode 100644
index e6f5c10e891..00000000000
--- a/drivers/usb/serial/flashloader.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Infineon Flashloader driver
- *
- * Copyright (C) 2013 Wei Shuai <cpuwolf@gmail.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-#include <linux/uaccess.h>
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x8087, 0x0716) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver flashloader_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "flashloader",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &flashloader_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7260ec66034..c45f9c0a1b3 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -51,8 +51,6 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
-static __u16 vendor = FTDI_VID;
-static __u16 product;
struct ftdi_private {
enum ftdi_chip_type chip_type;
@@ -144,8 +142,8 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
/*
- * Device ID not listed? Test via module params product/vendor or
- * /sys/bus/usb/ftdi_sio/new_id, then send patch/report!
+ * Device ID not listed? Test it using
+ * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
*/
static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
@@ -735,9 +733,34 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID),
.driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
{ USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
- { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) },
+ { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
@@ -881,7 +904,6 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) },
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
- { }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -905,9 +927,6 @@ static const char *ftdi_chip_name[] = {
#define FTDI_STATUS_B1_MASK (FTDI_RS_BI)
/* End TIOCMIWAIT */
-#define FTDI_IMPL_ASYNC_FLAGS = (ASYNC_SPD_HI | ASYNC_SPD_VHI \
- | ASYNC_SPD_CUST | ASYNC_SPD_SHI | ASYNC_SPD_WARP)
-
/* function prototypes for a FTDI serial converter */
static int ftdi_sio_probe(struct usb_serial *serial,
const struct usb_device_id *id);
@@ -974,10 +993,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
#define WDR_TIMEOUT 5000 /* default urb timeout */
#define WDR_SHORT_TIMEOUT 1000 /* shorter urb timeout */
-/* High and low are for DTR, RTS etc etc */
-#define HIGH 1
-#define LOW 0
-
/*
* ***************************************************************************
* Utility functions
@@ -1546,8 +1561,8 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
* ***************************************************************************
*/
-static ssize_t show_latency_timer(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t latency_timer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
@@ -1557,11 +1572,10 @@ static ssize_t show_latency_timer(struct device *dev,
return sprintf(buf, "%i\n", priv->latency);
}
-
/* Write a new value of the latency timer, in units of milliseconds. */
-static ssize_t store_latency_timer(struct device *dev,
- struct device_attribute *attr, const char *valbuf,
- size_t count)
+static ssize_t latency_timer_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
@@ -1574,6 +1588,7 @@ static ssize_t store_latency_timer(struct device *dev,
return -EIO;
return count;
}
+static DEVICE_ATTR_RW(latency_timer);
/* Write an event character directly to the FTDI register. The ASCII
value is in the low 8 bits, with the enable bit in the 9th bit. */
@@ -1601,9 +1616,6 @@ static ssize_t store_event_char(struct device *dev,
return count;
}
-
-static DEVICE_ATTR(latency_timer, S_IWUSR | S_IRUGO, show_latency_timer,
- store_latency_timer);
static DEVICE_ATTR(event_char, S_IWUSR, NULL, store_event_char);
static int create_sysfs_attrs(struct usb_serial_port *port)
@@ -1842,7 +1854,6 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
- struct ktermios dummy;
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
@@ -1858,10 +1869,8 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
This is same behaviour as serial.c/rs_open() - Kuba */
/* ftdi_set_termios will send usb control messages */
- if (tty) {
- memset(&dummy, 0, sizeof(dummy));
- ftdi_set_termios(tty, port, &dummy);
- }
+ if (tty)
+ ftdi_set_termios(tty, port, NULL);
return usb_serial_generic_open(tty, port);
}
@@ -2190,7 +2199,7 @@ no_data_parity_stop_changes:
dev_err(ddev, "%s urb failed to set baudrate\n", __func__);
mutex_unlock(&priv->cfg_lock);
/* Ensure RTS and DTR are raised when baudrate changed from 0 */
- if (!old_termios || (old_termios->c_cflag & CBAUD) == B0)
+ if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
}
@@ -2380,38 +2389,11 @@ static int ftdi_ioctl(struct tty_struct *tty,
return -ENOIOCTLCMD;
}
-static int __init ftdi_init(void)
-{
- if (vendor > 0 && product > 0) {
- /* Add user specified VID/PID to reserved element of table. */
- int i;
- for (i = 0; id_table_combined[i].idVendor; i++)
- ;
- id_table_combined[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- id_table_combined[i].idVendor = vendor;
- id_table_combined[i].idProduct = product;
- }
- return usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, id_table_combined);
-}
-
-static void __exit ftdi_exit(void)
-{
- usb_serial_deregister_drivers(serial_drivers);
-}
-
-
-module_init(ftdi_init);
-module_exit(ftdi_exit);
+module_usb_serial_driver(serial_drivers, id_table_combined);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-module_param(vendor, ushort, 0);
-MODULE_PARM_DESC(vendor, "User specified vendor ID (default="
- __MODULE_STRING(FTDI_VID)")");
-module_param(product, ushort, 0);
-MODULE_PARM_DESC(product, "User specified product ID");
-
module_param(ndi_latency_timer, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ndi_latency_timer, "NDI device latency timer override");
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 6dd79253205..1b8af461b52 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -815,11 +815,35 @@
/*
* RT Systems programming cables for various ham radios
*/
-#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
-#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
-#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
-#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
-
+#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
+#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */
+#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */
+#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */
+#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */
+#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */
+#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */
+#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/
+#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/
+#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/
+#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/
+#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/
+#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/
+#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/
+#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/
+#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/
+#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/
+#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/
+#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/
+#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/
+#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/
+#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/
/*
* Physik Instrumente
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
deleted file mode 100644
index 9362f8fd238..00000000000
--- a/drivers/usb/serial/funsoft.c
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Funsoft Serial USB driver
- *
- * Copyright (C) 2006 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-#include <linux/uaccess.h>
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1404, 0xcddc) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver funsoft_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "funsoft",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &funsoft_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index ba45170c78e..1f31e6b4c25 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -460,12 +460,7 @@ static bool usb_serial_generic_msr_changed(struct tty_struct *tty,
/*
* Use tty-port initialised flag to detect all hangups including the
* one generated at USB-device disconnect.
- *
- * FIXME: Remove hupping check once tty_port_hangup calls shutdown
- * (which clears the initialised flag) before wake up.
*/
- if (test_bit(TTY_HUPPING, &tty->flags))
- return true;
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
return true;
@@ -496,12 +491,8 @@ int usb_serial_generic_tiocmiwait(struct tty_struct *tty, unsigned long arg)
ret = wait_event_interruptible(port->port.delta_msr_wait,
usb_serial_generic_msr_changed(tty, arg, &cnow));
- if (!ret) {
- if (test_bit(TTY_HUPPING, &tty->flags))
- ret = -EIO;
- if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
- ret = -EIO;
- }
+ if (!ret && !test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ ret = -EIO;
return ret;
}
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
deleted file mode 100644
index 2cba60d90c7..00000000000
--- a/drivers/usb/serial/hp4x.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * HP4x Calculators Serial USB driver
- *
- * Copyright (C) 2005 Arthur Huillet (ahuillet@users.sf.net)
- * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * See Documentation/usb/usb-serial.txt for more information on using this
- * driver
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-#define DRIVER_DESC "HP4x (48/49) Generic Serial driver"
-
-#define HP_VENDOR_ID 0x03f0
-#define HP49GP_PRODUCT_ID 0x0121
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
- { } /* Terminating entry */
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver hp49gp_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "hp4X",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &hp49gp_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index dc2803b5eb0..c91481d74a1 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -56,9 +56,7 @@
#define MAX_NAME_LEN 64
-#define CHASE_TIMEOUT (5*HZ) /* 5 seconds */
#define OPEN_TIMEOUT (5*HZ) /* 5 seconds */
-#define COMMAND_TIMEOUT (5*HZ) /* 5 seconds */
/* receive port state */
enum RXSTATE {
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 60054e72b75..b7187bf3246 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -64,8 +64,6 @@
#define EDGE_CLOSING_WAIT 4000 /* in .01 sec */
-#define EDGE_OUT_BUF_SIZE 1024
-
/* Product information read from the Edgeport */
struct product_info {
@@ -93,7 +91,6 @@ struct edgeport_port {
spinlock_t ep_lock;
int ep_read_urb_state;
int ep_write_urb_in_use;
- struct kfifo write_fifo;
};
struct edgeport_serial {
@@ -1732,22 +1729,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
return -ENODEV;
port_number = port->port_number;
- switch (port_number) {
- case 0:
- edge_port->uart_base = UMPMEM_BASE_UART1;
- edge_port->dma_address = UMPD_OEDB1_ADDRESS;
- break;
- case 1:
- edge_port->uart_base = UMPMEM_BASE_UART2;
- edge_port->dma_address = UMPD_OEDB2_ADDRESS;
- break;
- default:
- dev_err(&port->dev, "Unknown port number!!!\n");
- return -ENODEV;
- }
-
- dev_dbg(&port->dev, "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n",
- __func__, port_number, edge_port->uart_base, edge_port->dma_address);
dev = port->serial->dev;
@@ -1872,8 +1853,6 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
++edge_serial->num_ports_open;
- port->port.drain_delay = 1;
-
goto release_es_lock;
unlink_int_urb:
@@ -1905,7 +1884,7 @@ static void edge_close(struct usb_serial_port *port)
usb_kill_urb(port->write_urb);
edge_port->ep_write_urb_in_use = 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- kfifo_reset_out(&edge_port->write_fifo);
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - send umpc_close_port\n", __func__);
@@ -1939,7 +1918,7 @@ static int edge_write(struct tty_struct *tty, struct usb_serial_port *port,
if (edge_port->close_pending == 1)
return -ENODEV;
- count = kfifo_in_locked(&edge_port->write_fifo, data, count,
+ count = kfifo_in_locked(&port->write_fifo, data, count,
&edge_port->ep_lock);
edge_send(port, tty);
@@ -1959,7 +1938,7 @@ static void edge_send(struct usb_serial_port *port, struct tty_struct *tty)
return;
}
- count = kfifo_out(&edge_port->write_fifo,
+ count = kfifo_out(&port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -2007,7 +1986,7 @@ static int edge_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- room = kfifo_avail(&edge_port->write_fifo);
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
@@ -2024,7 +2003,7 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&edge_port->ep_lock, flags);
- chars = kfifo_len(&edge_port->write_fifo);
+ chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&edge_port->ep_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
@@ -2451,30 +2430,45 @@ static int edge_port_probe(struct usb_serial_port *port)
if (!edge_port)
return -ENOMEM;
- ret = kfifo_alloc(&edge_port->write_fifo, EDGE_OUT_BUF_SIZE,
- GFP_KERNEL);
- if (ret) {
- kfree(edge_port);
- return -ENOMEM;
- }
-
spin_lock_init(&edge_port->ep_lock);
edge_port->port = port;
edge_port->edge_serial = usb_get_serial_data(port->serial);
edge_port->bUartMode = default_uart_mode;
+ switch (port->port_number) {
+ case 0:
+ edge_port->uart_base = UMPMEM_BASE_UART1;
+ edge_port->dma_address = UMPD_OEDB1_ADDRESS;
+ break;
+ case 1:
+ edge_port->uart_base = UMPMEM_BASE_UART2;
+ edge_port->dma_address = UMPD_OEDB2_ADDRESS;
+ break;
+ default:
+ dev_err(&port->dev, "unknown port number\n");
+ ret = -ENODEV;
+ goto err;
+ }
+
+ dev_dbg(&port->dev,
+ "%s - port_number = %d, uart_base = %04x, dma_address = %04x\n",
+ __func__, port->port_number, edge_port->uart_base,
+ edge_port->dma_address);
+
usb_set_serial_port_data(port, edge_port);
ret = edge_create_sysfs_attrs(port);
- if (ret) {
- kfifo_free(&edge_port->write_fifo);
- kfree(edge_port);
- return ret;
- }
+ if (ret)
+ goto err;
port->port.closing_wait = msecs_to_jiffies(closing_wait * 10);
+ port->port.drain_delay = 1;
return 0;
+err:
+ kfree(edge_port);
+
+ return ret;
}
static int edge_port_remove(struct usb_serial_port *port)
@@ -2483,7 +2477,6 @@ static int edge_port_remove(struct usb_serial_port *port)
edge_port = usb_get_serial_port_data(port);
edge_remove_sysfs_attrs(port);
- kfifo_free(&edge_port->write_fifo);
kfree(edge_port);
return 0;
@@ -2491,7 +2484,7 @@ static int edge_port_remove(struct usb_serial_port *port)
/* Sysfs Attributes */
-static ssize_t show_uart_mode(struct device *dev,
+static ssize_t uart_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
@@ -2500,7 +2493,7 @@ static ssize_t show_uart_mode(struct device *dev,
return sprintf(buf, "%d\n", edge_port->bUartMode);
}
-static ssize_t store_uart_mode(struct device *dev,
+static ssize_t uart_mode_store(struct device *dev,
struct device_attribute *attr, const char *valbuf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
@@ -2516,9 +2509,7 @@ static ssize_t store_uart_mode(struct device *dev,
return count;
}
-
-static DEVICE_ATTR(uart_mode, S_IWUSR | S_IRUGO, show_uart_mode,
- store_uart_mode);
+static DEVICE_ATTR_RW(uart_mode);
static int edge_create_sysfs_attrs(struct usb_serial_port *port)
{
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 790673e5faa..57c439a24b5 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -1130,7 +1130,7 @@ static int iuu_vcc_set(struct usb_serial_port *port, unsigned int vcc)
* Sysfs Attributes
*/
-static ssize_t show_vcc_mode(struct device *dev,
+static ssize_t vcc_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
@@ -1139,7 +1139,7 @@ static ssize_t show_vcc_mode(struct device *dev,
return sprintf(buf, "%d\n", priv->vcc);
}
-static ssize_t store_vcc_mode(struct device *dev,
+static ssize_t vcc_mode_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct usb_serial_port *port = to_usb_serial_port(dev);
@@ -1163,9 +1163,7 @@ static ssize_t store_vcc_mode(struct device *dev,
fail_store_vcc_mode:
return count;
}
-
-static DEVICE_ATTR(vcc_mode, S_IRUSR | S_IWUSR, show_vcc_mode,
- store_vcc_mode);
+static DEVICE_ATTR_RW(vcc_mode);
static int iuu_create_sysfs_attrs(struct usb_serial_port *port)
{
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 5a979729f8e..d6960aebe24 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -50,23 +50,27 @@
#define INSTAT_BUFLEN 32
#define GLOCONT_BUFLEN 64
#define INDAT49W_BUFLEN 512
+#define IN_BUFLEN 64
+#define OUT_BUFLEN 64
+#define INACK_BUFLEN 1
+#define OUTCONT_BUFLEN 64
/* Per device and per port private data */
struct keyspan_serial_private {
const struct keyspan_device_details *device_details;
struct urb *instat_urb;
- char instat_buf[INSTAT_BUFLEN];
+ char *instat_buf;
/* added to support 49wg, where data from all 4 ports comes in
on 1 EP and high-speed supported */
struct urb *indat_urb;
- char indat_buf[INDAT49W_BUFLEN];
+ char *indat_buf;
/* XXX this one probably will need a lock */
struct urb *glocont_urb;
- char glocont_buf[GLOCONT_BUFLEN];
- char ctrl_buf[8]; /* for EP0 control message */
+ char *glocont_buf;
+ char *ctrl_buf; /* for EP0 control message */
};
struct keyspan_port_private {
@@ -81,18 +85,18 @@ struct keyspan_port_private {
/* Input endpoints and buffer for this port */
struct urb *in_urbs[2];
- char in_buffer[2][64];
+ char *in_buffer[2];
/* Output endpoints and buffer for this port */
struct urb *out_urbs[2];
- char out_buffer[2][64];
+ char *out_buffer[2];
/* Input ack endpoint */
struct urb *inack_urb;
- char inack_buffer[1];
+ char *inack_buffer;
/* Output control endpoint */
struct urb *outcont_urb;
- char outcont_buffer[64];
+ char *outcont_buffer;
/* Settings for the port */
int baud;
@@ -2303,7 +2307,7 @@ static int keyspan_startup(struct usb_serial *serial)
if (d_details == NULL) {
dev_err(&serial->dev->dev, "%s - unknown product id %x\n",
__func__, le16_to_cpu(serial->dev->descriptor.idProduct));
- return 1;
+ return -ENODEV;
}
/* Setup private data for serial driver */
@@ -2313,6 +2317,22 @@ static int keyspan_startup(struct usb_serial *serial)
return -ENOMEM;
}
+ s_priv->instat_buf = kzalloc(INSTAT_BUFLEN, GFP_KERNEL);
+ if (!s_priv->instat_buf)
+ goto err_instat_buf;
+
+ s_priv->indat_buf = kzalloc(INDAT49W_BUFLEN, GFP_KERNEL);
+ if (!s_priv->indat_buf)
+ goto err_indat_buf;
+
+ s_priv->glocont_buf = kzalloc(GLOCONT_BUFLEN, GFP_KERNEL);
+ if (!s_priv->glocont_buf)
+ goto err_glocont_buf;
+
+ s_priv->ctrl_buf = kzalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
+ if (!s_priv->ctrl_buf)
+ goto err_ctrl_buf;
+
s_priv->device_details = d_details;
usb_set_serial_data(serial, s_priv);
@@ -2330,6 +2350,17 @@ static int keyspan_startup(struct usb_serial *serial)
}
return 0;
+
+err_ctrl_buf:
+ kfree(s_priv->glocont_buf);
+err_glocont_buf:
+ kfree(s_priv->indat_buf);
+err_indat_buf:
+ kfree(s_priv->instat_buf);
+err_instat_buf:
+ kfree(s_priv);
+
+ return -ENOMEM;
}
static void keyspan_disconnect(struct usb_serial *serial)
@@ -2353,6 +2384,11 @@ static void keyspan_release(struct usb_serial *serial)
usb_free_urb(s_priv->indat_urb);
usb_free_urb(s_priv->glocont_urb);
+ kfree(s_priv->ctrl_buf);
+ kfree(s_priv->glocont_buf);
+ kfree(s_priv->indat_buf);
+ kfree(s_priv->instat_buf);
+
kfree(s_priv);
}
@@ -2374,6 +2410,26 @@ static int keyspan_port_probe(struct usb_serial_port *port)
if (!p_priv)
return -ENOMEM;
+ for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i) {
+ p_priv->in_buffer[i] = kzalloc(IN_BUFLEN, GFP_KERNEL);
+ if (!p_priv->in_buffer[i])
+ goto err_in_buffer;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i) {
+ p_priv->out_buffer[i] = kzalloc(OUT_BUFLEN, GFP_KERNEL);
+ if (!p_priv->out_buffer[i])
+ goto err_out_buffer;
+ }
+
+ p_priv->inack_buffer = kzalloc(INACK_BUFLEN, GFP_KERNEL);
+ if (!p_priv->inack_buffer)
+ goto err_inack_buffer;
+
+ p_priv->outcont_buffer = kzalloc(OUTCONT_BUFLEN, GFP_KERNEL);
+ if (!p_priv->outcont_buffer)
+ goto err_outcont_buffer;
+
p_priv->device_details = d_details;
/* Setup values for the various callback routines */
@@ -2386,7 +2442,8 @@ static int keyspan_port_probe(struct usb_serial_port *port)
for (i = 0; i <= d_details->indat_endp_flip; ++i, ++endp) {
p_priv->in_urbs[i] = keyspan_setup_urb(serial, endp,
USB_DIR_IN, port,
- p_priv->in_buffer[i], 64,
+ p_priv->in_buffer[i],
+ IN_BUFLEN,
cback->indat_callback);
}
/* outdat endpoints also have flip */
@@ -2394,25 +2451,41 @@ static int keyspan_port_probe(struct usb_serial_port *port)
for (i = 0; i <= d_details->outdat_endp_flip; ++i, ++endp) {
p_priv->out_urbs[i] = keyspan_setup_urb(serial, endp,
USB_DIR_OUT, port,
- p_priv->out_buffer[i], 64,
+ p_priv->out_buffer[i],
+ OUT_BUFLEN,
cback->outdat_callback);
}
/* inack endpoint */
p_priv->inack_urb = keyspan_setup_urb(serial,
d_details->inack_endpoints[port_num],
USB_DIR_IN, port,
- p_priv->inack_buffer, 1,
+ p_priv->inack_buffer,
+ INACK_BUFLEN,
cback->inack_callback);
/* outcont endpoint */
p_priv->outcont_urb = keyspan_setup_urb(serial,
d_details->outcont_endpoints[port_num],
USB_DIR_OUT, port,
- p_priv->outcont_buffer, 64,
+ p_priv->outcont_buffer,
+ OUTCONT_BUFLEN,
cback->outcont_callback);
usb_set_serial_port_data(port, p_priv);
return 0;
+
+err_outcont_buffer:
+ kfree(p_priv->inack_buffer);
+err_inack_buffer:
+ for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i)
+ kfree(p_priv->out_buffer[i]);
+err_out_buffer:
+ for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i)
+ kfree(p_priv->in_buffer[i]);
+err_in_buffer:
+ kfree(p_priv);
+
+ return -ENOMEM;
}
static int keyspan_port_remove(struct usb_serial_port *port)
@@ -2436,6 +2509,13 @@ static int keyspan_port_remove(struct usb_serial_port *port)
usb_free_urb(p_priv->out_urbs[i]);
}
+ kfree(p_priv->outcont_buffer);
+ kfree(p_priv->inack_buffer);
+ for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i)
+ kfree(p_priv->out_buffer[i]);
+ for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i)
+ kfree(p_priv->in_buffer[i]);
+
kfree(p_priv);
return 0;
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 51da424327b..84657e07dc5 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -90,6 +90,7 @@ struct urbtracker {
struct list_head urblist_entry;
struct kref ref_count;
struct urb *urb;
+ struct usb_ctrlrequest *setup;
};
enum mos7715_pp_modes {
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref)
struct mos7715_parport *mos_parport = urbtrack->mos_parport;
usb_free_urb(urbtrack->urb);
+ kfree(urbtrack->setup);
kfree(urbtrack);
kref_put(&mos_parport->ref_count, destroy_mos_parport);
}
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
struct urbtracker *urbtrack;
int ret_val;
unsigned long flags;
- struct usb_ctrlrequest setup;
struct usb_serial *serial = mos_parport->serial;
struct usb_device *usbdev = serial->dev;
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
kfree(urbtrack);
return -ENOMEM;
}
- setup.bRequestType = (__u8)0x40;
- setup.bRequest = (__u8)0x0e;
- setup.wValue = get_reg_value(reg, dummy);
- setup.wIndex = get_reg_index(reg);
- setup.wLength = 0;
+ urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
+ if (!urbtrack->setup) {
+ usb_free_urb(urbtrack->urb);
+ kfree(urbtrack);
+ return -ENOMEM;
+ }
+ urbtrack->setup->bRequestType = (__u8)0x40;
+ urbtrack->setup->bRequest = (__u8)0x0e;
+ urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
+ urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
+ urbtrack->setup->wLength = 0;
usb_fill_control_urb(urbtrack->urb, usbdev,
usb_sndctrlpipe(usbdev, 0),
- (unsigned char *)&setup,
+ (unsigned char *)urbtrack->setup,
NULL, 0, async_complete, urbtrack);
kref_init(&urbtrack->ref_count);
INIT_LIST_HEAD(&urbtrack->urblist_entry);
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 0a818b23850..fdf953539c6 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -183,7 +183,10 @@
#define LED_ON_MS 500
#define LED_OFF_MS 500
-static int device_type;
+enum mos7840_flag {
+ MOS7840_FLAG_CTRL_BUSY,
+ MOS7840_FLAG_LED_BUSY,
+};
static const struct usb_device_id id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
@@ -218,7 +221,6 @@ struct moschip_port {
__u8 shadowMCR; /* last MCR value received */
char open;
char open_ports;
- wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */
struct usb_serial_port *port; /* loop back to the owner of this object */
/* Offsets */
@@ -238,9 +240,12 @@ struct moschip_port {
/* For device(s) with LED indicator */
bool has_led;
- bool led_flag;
struct timer_list led_timer1; /* Timer for LED on */
struct timer_list led_timer2; /* Timer for LED off */
+ struct urb *led_urb;
+ struct usb_ctrlrequest *led_dr;
+
+ unsigned long flags;
};
/*
@@ -460,10 +465,10 @@ static void mos7840_control_callback(struct urb *urb)
case -ESHUTDOWN:
/* this urb is terminated, clean up */
dev_dbg(dev, "%s - urb shutting down with status: %d\n", __func__, status);
- return;
+ goto out;
default:
dev_dbg(dev, "%s - nonzero urb status received: %d\n", __func__, status);
- return;
+ goto out;
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
@@ -476,6 +481,8 @@ static void mos7840_control_callback(struct urb *urb)
mos7840_handle_new_msr(mos7840_port, regval);
else if (mos7840_port->MsrLsr == 1)
mos7840_handle_new_lsr(mos7840_port, regval);
+out:
+ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mos7840_port->flags);
}
static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
@@ -486,6 +493,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
unsigned char *buffer = mcs->ctrl_buf;
int ret;
+ if (test_and_set_bit_lock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags))
+ return -EBUSY;
+
dr->bRequestType = MCS_RD_RTYPE;
dr->bRequest = MCS_RDREQ;
dr->wValue = cpu_to_le16(Wval); /* 0 */
@@ -497,6 +507,9 @@ static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg,
mos7840_control_callback, mcs);
mcs->control_urb->transfer_buffer_length = 2;
ret = usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+ if (ret)
+ clear_bit_unlock(MOS7840_FLAG_CTRL_BUSY, &mcs->flags);
+
return ret;
}
@@ -523,7 +536,7 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
__u16 reg)
{
struct usb_device *dev = mcs->port->serial->dev;
- struct usb_ctrlrequest *dr = mcs->dr;
+ struct usb_ctrlrequest *dr = mcs->led_dr;
dr->bRequestType = MCS_WR_RTYPE;
dr->bRequest = MCS_WRREQ;
@@ -531,10 +544,10 @@ static void mos7840_set_led_async(struct moschip_port *mcs, __u16 wval,
dr->wIndex = cpu_to_le16(reg);
dr->wLength = cpu_to_le16(0);
- usb_fill_control_urb(mcs->control_urb, dev, usb_sndctrlpipe(dev, 0),
+ usb_fill_control_urb(mcs->led_urb, dev, usb_sndctrlpipe(dev, 0),
(unsigned char *)dr, NULL, 0, mos7840_set_led_callback, NULL);
- usb_submit_urb(mcs->control_urb, GFP_ATOMIC);
+ usb_submit_urb(mcs->led_urb, GFP_ATOMIC);
}
static void mos7840_set_led_sync(struct usb_serial_port *port, __u16 reg,
@@ -560,7 +573,19 @@ static void mos7840_led_flag_off(unsigned long arg)
{
struct moschip_port *mcs = (struct moschip_port *) arg;
- mcs->led_flag = false;
+ clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
+}
+
+static void mos7840_led_activity(struct usb_serial_port *port)
+{
+ struct moschip_port *mos7840_port = usb_get_serial_port_data(port);
+
+ if (test_and_set_bit_lock(MOS7840_FLAG_LED_BUSY, &mos7840_port->flags))
+ return;
+
+ mos7840_set_led_async(mos7840_port, 0x0301, MODEM_CONTROL_REGISTER);
+ mod_timer(&mos7840_port->led_timer1,
+ jiffies + msecs_to_jiffies(LED_ON_MS));
}
/*****************************************************************************
@@ -758,14 +783,8 @@ static void mos7840_bulk_in_callback(struct urb *urb)
return;
}
- /* Turn on LED */
- if (mos7840_port->has_led && !mos7840_port->led_flag) {
- mos7840_port->led_flag = true;
- mos7840_set_led_async(mos7840_port, 0x0301,
- MODEM_CONTROL_REGISTER);
- mod_timer(&mos7840_port->led_timer1,
- jiffies + msecs_to_jiffies(LED_ON_MS));
- }
+ if (mos7840_port->has_led)
+ mos7840_led_activity(port);
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -816,18 +835,6 @@ static void mos7840_bulk_out_data_callback(struct urb *urb)
/************************************************************************/
/* D R I V E R T T Y I N T E R F A C E F U N C T I O N S */
/************************************************************************/
-#ifdef MCSSerialProbe
-static int mos7840_serial_probe(struct usb_serial *serial,
- const struct usb_device_id *id)
-{
-
- /*need to implement the mode_reg reading and updating\
- structures usb_serial_ device_type\
- (i.e num_ports, num_bulkin,bulkout etc) */
- /* Also we can update the changes attach */
- return 1;
-}
-#endif
/*****************************************************************************
* mos7840_open
@@ -905,20 +912,20 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_get_reg_sync(port, mos7840_port->SpRegOffset, &Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Spreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}
Data &= ~0x80;
status = mos7840_set_reg_sync(port, mos7840_port->SpRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Spreg failed\n");
- return -1;
+ goto err;
}
/* End of block to be checked */
@@ -927,7 +934,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
&Data);
if (status < 0) {
dev_dbg(&port->dev, "Reading Controlreg failed\n");
- return -1;
+ goto err;
}
Data |= 0x08; /* Driver done bit */
Data |= 0x20; /* rx_disable */
@@ -935,7 +942,7 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
mos7840_port->ControlRegOffset, Data);
if (status < 0) {
dev_dbg(&port->dev, "writing Controlreg failed\n");
- return -1;
+ goto err;
}
/* do register settings here */
/* Set all regs to the device default values. */
@@ -946,21 +953,21 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
status = mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "disabling interrupts failed\n");
- return -1;
+ goto err;
}
/* Set FIFO_CONTROL_REGISTER to the default value */
Data = 0x00;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}
Data = 0xcf;
status = mos7840_set_uart_reg(port, FIFO_CONTROL_REGISTER, Data);
if (status < 0) {
dev_dbg(&port->dev, "Writing FIFO_CONTROL_REGISTER failed\n");
- return -1;
+ goto err;
}
Data = 0x03;
@@ -1092,9 +1099,6 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
mos7840_port->read_urb_busy = false;
}
- /* initialize our wait queues */
- init_waitqueue_head(&mos7840_port->wait_chase);
-
/* initialize our port settings */
/* Must set to enable ints! */
mos7840_port->shadowMCR = MCR_MASTER_IE;
@@ -1103,6 +1107,15 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port)
/* mos7840_change_port_settings(mos7840_port,old_termios); */
return 0;
+err:
+ for (j = 0; j < NUM_URBS; ++j) {
+ urb = mos7840_port->write_urb_pool[j];
+ if (!urb)
+ continue;
+ kfree(urb->transfer_buffer);
+ usb_free_urb(urb);
+ }
+ return status;
}
/*****************************************************************************
@@ -1211,47 +1224,6 @@ static void mos7840_close(struct usb_serial_port *port)
mos7840_port->open = 0;
}
-/************************************************************************
- *
- * mos7840_block_until_chase_response
- *
- * This function will block the close until one of the following:
- * 1. Response to our Chase comes from mos7840
- * 2. A timeout of 10 seconds without activity has expired
- * (1K of mos7840 data @ 2400 baud ==> 4 sec to empty)
- *
- ************************************************************************/
-
-static void mos7840_block_until_chase_response(struct tty_struct *tty,
- struct moschip_port *mos7840_port)
-{
- int timeout = msecs_to_jiffies(1000);
- int wait = 10;
- int count;
-
- while (1) {
- count = mos7840_chars_in_buffer(tty);
-
- /* Check for Buffer status */
- if (count <= 0)
- return;
-
- /* Block the thread for a while */
- interruptible_sleep_on_timeout(&mos7840_port->wait_chase,
- timeout);
- /* No activity.. count down section */
- wait--;
- if (wait == 0) {
- dev_dbg(&mos7840_port->port->dev, "%s - TIMEOUT\n", __func__);
- return;
- } else {
- /* Reset timeout value back to seconds */
- wait = 10;
- }
- }
-
-}
-
/*****************************************************************************
* mos7840_break
* this function sends a break to the port
@@ -1275,9 +1247,6 @@ static void mos7840_break(struct tty_struct *tty, int break_state)
if (mos7840_port == NULL)
return;
- /* flush and block until tx is empty */
- mos7840_block_until_chase_response(tty, mos7840_port);
-
if (break_state == -1)
data = mos7840_port->shadowLCR | LCR_SET_BREAK;
else
@@ -1445,13 +1414,8 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port,
data1 = urb->transfer_buffer;
dev_dbg(&port->dev, "bulkout endpoint is %d\n", port->bulk_out_endpointAddress);
- /* Turn on LED */
- if (mos7840_port->has_led && !mos7840_port->led_flag) {
- mos7840_port->led_flag = true;
- mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0301);
- mod_timer(&mos7840_port->led_timer1,
- jiffies + msecs_to_jiffies(LED_ON_MS));
- }
+ if (mos7840_port->has_led)
+ mos7840_led_activity(port);
/* send it down the pipe */
status = usb_submit_urb(urb, GFP_ATOMIC);
@@ -2178,38 +2142,48 @@ static int mos7810_check(struct usb_serial *serial)
return 0;
}
-static int mos7840_calc_num_ports(struct usb_serial *serial)
+static int mos7840_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
{
- __u16 data = 0x00;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
u8 *buf;
- int mos7840_num_ports;
+ int device_type;
+
+ if (product == MOSCHIP_DEVICE_ID_7810 ||
+ product == MOSCHIP_DEVICE_ID_7820) {
+ device_type = product;
+ goto out;
+ }
buf = kzalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
- if (buf) {
- usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+ if (!buf)
+ return -ENOMEM;
+
+ usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, buf,
VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT);
- data = *buf;
- kfree(buf);
- }
- if (serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7810 ||
- serial->dev->descriptor.idProduct == MOSCHIP_DEVICE_ID_7820) {
- device_type = serial->dev->descriptor.idProduct;
- } else {
- /* For a MCS7840 device GPIO0 must be set to 1 */
- if ((data & 0x01) == 1)
- device_type = MOSCHIP_DEVICE_ID_7840;
- else if (mos7810_check(serial))
- device_type = MOSCHIP_DEVICE_ID_7810;
- else
- device_type = MOSCHIP_DEVICE_ID_7820;
- }
+ /* For a MCS7840 device GPIO0 must be set to 1 */
+ if (buf[0] & 0x01)
+ device_type = MOSCHIP_DEVICE_ID_7840;
+ else if (mos7810_check(serial))
+ device_type = MOSCHIP_DEVICE_ID_7810;
+ else
+ device_type = MOSCHIP_DEVICE_ID_7820;
+
+ kfree(buf);
+out:
+ usb_set_serial_data(serial, (void *)(unsigned long)device_type);
+
+ return 0;
+}
+
+static int mos7840_calc_num_ports(struct usb_serial *serial)
+{
+ int device_type = (unsigned long)usb_get_serial_data(serial);
+ int mos7840_num_ports;
mos7840_num_ports = (device_type >> 4) & 0x000F;
- serial->num_bulk_in = mos7840_num_ports;
- serial->num_bulk_out = mos7840_num_ports;
- serial->num_ports = mos7840_num_ports;
return mos7840_num_ports;
}
@@ -2217,6 +2191,7 @@ static int mos7840_calc_num_ports(struct usb_serial *serial)
static int mos7840_port_probe(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ int device_type = (unsigned long)usb_get_serial_data(serial);
struct moschip_port *mos7840_port;
int status;
int pnum;
@@ -2392,6 +2367,14 @@ static int mos7840_port_probe(struct usb_serial_port *port)
if (device_type == MOSCHIP_DEVICE_ID_7810) {
mos7840_port->has_led = true;
+ mos7840_port->led_urb = usb_alloc_urb(0, GFP_KERNEL);
+ mos7840_port->led_dr = kmalloc(sizeof(*mos7840_port->led_dr),
+ GFP_KERNEL);
+ if (!mos7840_port->led_urb || !mos7840_port->led_dr) {
+ status = -ENOMEM;
+ goto error;
+ }
+
init_timer(&mos7840_port->led_timer1);
mos7840_port->led_timer1.function = mos7840_led_off;
mos7840_port->led_timer1.expires =
@@ -2404,8 +2387,6 @@ static int mos7840_port_probe(struct usb_serial_port *port)
jiffies + msecs_to_jiffies(LED_OFF_MS);
mos7840_port->led_timer2.data = (unsigned long)mos7840_port;
- mos7840_port->led_flag = false;
-
/* Turn off LED */
mos7840_set_led_sync(port, MODEM_CONTROL_REGISTER, 0x0300);
}
@@ -2427,6 +2408,8 @@ out:
}
return 0;
error:
+ kfree(mos7840_port->led_dr);
+ usb_free_urb(mos7840_port->led_urb);
kfree(mos7840_port->dr);
kfree(mos7840_port->ctrl_buf);
usb_free_urb(mos7840_port->control_urb);
@@ -2447,6 +2430,10 @@ static int mos7840_port_remove(struct usb_serial_port *port)
del_timer_sync(&mos7840_port->led_timer1);
del_timer_sync(&mos7840_port->led_timer2);
+
+ usb_kill_urb(mos7840_port->led_urb);
+ usb_free_urb(mos7840_port->led_urb);
+ kfree(mos7840_port->led_dr);
}
usb_kill_urb(mos7840_port->control_urb);
usb_free_urb(mos7840_port->control_urb);
@@ -2473,9 +2460,7 @@ static struct usb_serial_driver moschip7840_4port_device = {
.throttle = mos7840_throttle,
.unthrottle = mos7840_unthrottle,
.calc_num_ports = mos7840_calc_num_ports,
-#ifdef MCSSerialProbe
- .probe = mos7840_serial_probe,
-#endif
+ .probe = mos7840_probe,
.ioctl = mos7840_ioctl,
.set_termios = mos7840_set_termios,
.break_ctl = mos7840_break,
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
deleted file mode 100644
index c5ff6c7795a..00000000000
--- a/drivers/usb/serial/moto_modem.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Motorola USB Phone driver
- *
- * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * {sigh}
- * Motorola should be using the CDC ACM USB spec, but instead
- * they try to just "do their own thing"... This driver should handle a
- * few phones in which a basic "dumb serial connection" is needed to be
- * able to get a connection through to them.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
- { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
- { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
- { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */
- { USB_DEVICE(0x22b8, 0x2c64) }, /* Motorola V950 phone */
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver moto_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "moto-modem",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &moto_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5dd857de05b..1cf6f125f5f 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -341,17 +341,12 @@ static void option_instat_callback(struct urb *urb);
#define OLIVETTI_VENDOR_ID 0x0b3c
#define OLIVETTI_PRODUCT_OLICARD100 0xc000
#define OLIVETTI_PRODUCT_OLICARD145 0xc003
+#define OLIVETTI_PRODUCT_OLICARD200 0xc005
/* Celot products */
#define CELOT_VENDOR_ID 0x211f
#define CELOT_PRODUCT_CT680M 0x6801
-/* ONDA Communication vendor id */
-#define ONDA_VENDOR_ID 0x1ee8
-
-/* ONDA MT825UP HSDPA 14.2 modem */
-#define ONDA_MT825UP 0x000b
-
/* Samsung products */
#define SAMSUNG_VENDOR_ID 0x04e8
#define SAMSUNG_PRODUCT_GT_B3730 0x6889
@@ -444,7 +439,8 @@ static void option_instat_callback(struct urb *urb);
/* Hyundai Petatel Inc. products */
#define PETATEL_VENDOR_ID 0x1ff4
-#define PETATEL_PRODUCT_NP10T 0x600e
+#define PETATEL_PRODUCT_NP10T_600A 0x600a
+#define PETATEL_PRODUCT_NP10T_600E 0x600e
/* TP-LINK Incorporated products */
#define TPLINK_VENDOR_ID 0x2357
@@ -782,6 +778,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
@@ -817,7 +814,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
@@ -1256,8 +1254,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+ { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
- { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) },
@@ -1329,9 +1327,12 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) },
{ USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) },
- { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) },
+ { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -1339,6 +1340,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 7e3e0782e51..a2080ac7b7e 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -343,6 +343,8 @@ static int oti6858_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
+ port->port.drain_delay = 256; /* FIXME: check the FIFO length */
+
return 0;
}
@@ -411,9 +413,6 @@ static void oti6858_set_termios(struct tty_struct *tty,
__le16 divisor;
int br;
- if (!tty)
- return;
-
cflag = tty->termios.c_cflag;
spin_lock_irqsave(&priv->lock, flags);
@@ -509,7 +508,6 @@ static void oti6858_set_termios(struct tty_struct *tty,
static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct oti6858_private *priv = usb_get_serial_port_data(port);
- struct ktermios tmp_termios;
struct usb_serial *serial = port->serial;
struct oti6858_control_pkt *buf;
unsigned long flags;
@@ -560,8 +558,8 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
/* setup termios */
if (tty)
- oti6858_set_termios(tty, port, &tmp_termios);
- port->port.drain_delay = 256; /* FIXME: check the FIFO length */
+ oti6858_set_termios(tty, port, NULL);
+
return 0;
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index cb6bbed374f..e7a84f0f517 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -4,6 +4,11 @@
* Copyright (C) 2001-2007 Greg Kroah-Hartman (greg@kroah.com)
* Copyright (C) 2003 IBM Corp.
*
+ * Copyright (C) 2009, 2013 Frank Schäfer <fschaefer.oss@googlemail.com>
+ * - fixes, improvements and documentation for the baud rate encoding methods
+ * Copyright (C) 2013 Reinhard Max <max@suse.de>
+ * - fixes and improvements for the divisor based baud rate encoding method
+ *
* Original driver for 2.2.x by anonymous
*
* This program is free software; you can redistribute it and/or
@@ -29,6 +34,7 @@
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
+#include <asm/unaligned.h>
#include "pl2303.h"
/*
@@ -128,10 +134,17 @@ MODULE_DEVICE_TABLE(usb, id_table);
enum pl2303_type {
- type_0, /* don't know the difference between type 0 and */
- type_1, /* type 1, until someone from prolific tells us... */
- HX, /* HX version of the pl2303 chip */
+ type_0, /* H version ? */
+ type_1, /* H version ? */
+ HX_TA, /* HX(A) / X(A) / TA version */ /* TODO: improve */
+ HXD_EA_RA_SA, /* HXD / EA / RA / SA version */ /* TODO: improve */
+ TB, /* TB version */
};
+/*
+ * NOTE: don't know the difference between type 0 and type 1,
+ * until someone from Prolific tells us...
+ * TODO: distinguish between X/HX, TA and HXD, EA, RA, SA variants
+ */
struct pl2303_serial_private {
enum pl2303_type type;
@@ -171,6 +184,7 @@ static int pl2303_startup(struct usb_serial *serial)
{
struct pl2303_serial_private *spriv;
enum pl2303_type type = type_0;
+ char *type_str = "unknown (treating as type_0)";
unsigned char *buf;
spriv = kzalloc(sizeof(*spriv), GFP_KERNEL);
@@ -183,15 +197,38 @@ static int pl2303_startup(struct usb_serial *serial)
return -ENOMEM;
}
- if (serial->dev->descriptor.bDeviceClass == 0x02)
+ if (serial->dev->descriptor.bDeviceClass == 0x02) {
type = type_0;
- else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40)
- type = HX;
- else if (serial->dev->descriptor.bDeviceClass == 0x00)
- type = type_1;
- else if (serial->dev->descriptor.bDeviceClass == 0xFF)
+ type_str = "type_0";
+ } else if (serial->dev->descriptor.bMaxPacketSize0 == 0x40) {
+ /*
+ * NOTE: The bcdDevice version is the only difference between
+ * the device descriptors of the X/HX, HXD, EA, RA, SA, TA, TB
+ */
+ if (le16_to_cpu(serial->dev->descriptor.bcdDevice) == 0x300) {
+ type = HX_TA;
+ type_str = "X/HX/TA";
+ } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
+ == 0x400) {
+ type = HXD_EA_RA_SA;
+ type_str = "HXD/EA/RA/SA";
+ } else if (le16_to_cpu(serial->dev->descriptor.bcdDevice)
+ == 0x500) {
+ type = TB;
+ type_str = "TB";
+ } else {
+ dev_info(&serial->interface->dev,
+ "unknown/unsupported device type\n");
+ kfree(spriv);
+ kfree(buf);
+ return -ENODEV;
+ }
+ } else if (serial->dev->descriptor.bDeviceClass == 0x00
+ || serial->dev->descriptor.bDeviceClass == 0xFF) {
type = type_1;
- dev_dbg(&serial->interface->dev, "device type: %d\n", type);
+ type_str = "type_1";
+ }
+ dev_dbg(&serial->interface->dev, "device type: %s\n", type_str);
spriv->type = type;
usb_set_serial_data(serial, spriv);
@@ -206,10 +243,10 @@ static int pl2303_startup(struct usb_serial *serial)
pl2303_vendor_read(0x8383, 0, serial, buf);
pl2303_vendor_write(0, 1, serial);
pl2303_vendor_write(1, 0, serial);
- if (type == HX)
- pl2303_vendor_write(2, 0x44, serial);
- else
+ if (type == type_0 || type == type_1)
pl2303_vendor_write(2, 0x24, serial);
+ else
+ pl2303_vendor_write(2, 0x44, serial);
kfree(buf);
return 0;
@@ -235,6 +272,8 @@ static int pl2303_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
+ port->port.drain_delay = 256;
+
return 0;
}
@@ -261,6 +300,175 @@ static int pl2303_set_control_lines(struct usb_serial_port *port, u8 value)
return retval;
}
+static int pl2303_baudrate_encode_direct(int baud, enum pl2303_type type,
+ u8 buf[4])
+{
+ /*
+ * NOTE: Only the values defined in baud_sup are supported !
+ * => if unsupported values are set, the PL2303 seems to
+ * use 9600 baud (at least my PL2303X always does)
+ */
+ const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
+ 4800, 7200, 9600, 14400, 19200, 28800, 38400,
+ 57600, 115200, 230400, 460800, 614400, 921600,
+ 1228800, 2457600, 3000000, 6000000, 12000000 };
+ /*
+ * NOTE: With the exception of type_0/1 devices, the following
+ * additional baud rates are supported (tested with HX rev. 3A only):
+ * 110*, 56000*, 128000, 134400, 161280, 201600, 256000*, 268800,
+ * 403200, 806400. (*: not HX)
+ *
+ * Maximum values: HXD, TB: 12000000; HX, TA: 6000000;
+ * type_0+1: 1228800; RA: 921600; SA: 115200
+ *
+ * As long as we are not using this encoding method for anything else
+ * than the type_0+1 and HX chips, there is no point in complicating
+ * the code to support them.
+ */
+ int i;
+
+ /* Set baudrate to nearest supported value */
+ for (i = 0; i < ARRAY_SIZE(baud_sup); ++i) {
+ if (baud_sup[i] > baud)
+ break;
+ }
+ if (i == ARRAY_SIZE(baud_sup))
+ baud = baud_sup[i - 1];
+ else if (i > 0 && (baud_sup[i] - baud) > (baud - baud_sup[i - 1]))
+ baud = baud_sup[i - 1];
+ else
+ baud = baud_sup[i];
+ /* Respect the chip type specific baud rate limits */
+ /*
+ * FIXME: as long as we don't know how to distinguish between the
+ * HXD, EA, RA, and SA chip variants, allow the max. value of 12M.
+ */
+ if (type == HX_TA)
+ baud = min_t(int, baud, 6000000);
+ else if (type == type_0 || type == type_1)
+ baud = min_t(int, baud, 1228800);
+ /* Direct (standard) baud rate encoding method */
+ put_unaligned_le32(baud, buf);
+
+ return baud;
+}
+
+static int pl2303_baudrate_encode_divisor(int baud, enum pl2303_type type,
+ u8 buf[4])
+{
+ /*
+ * Divisor based baud rate encoding method
+ *
+ * NOTE: it's not clear if the type_0/1 chips support this method
+ *
+ * divisor = 12MHz * 32 / baudrate = 2^A * B
+ *
+ * with
+ *
+ * A = buf[1] & 0x0e
+ * B = buf[0] + (buf[1] & 0x01) << 8
+ *
+ * Special cases:
+ * => 8 < B < 16: device seems to work not properly
+ * => B <= 8: device uses the max. value B = 512 instead
+ */
+ unsigned int A, B;
+
+ /*
+ * NOTE: The Windows driver allows maximum baud rates of 110% of the
+ * specified maximium value.
+ * Quick tests with early (2004) HX (rev. A) chips suggest, that even
+ * higher baud rates (up to the maximum of 24M baud !) are working fine,
+ * but that should really be tested carefully in "real life" scenarios
+ * before removing the upper limit completely.
+ * Baud rates smaller than the specified 75 baud are definitely working
+ * fine.
+ */
+ if (type == type_0 || type == type_1)
+ baud = min_t(int, baud, 1228800 * 1.1);
+ else if (type == HX_TA)
+ baud = min_t(int, baud, 6000000 * 1.1);
+ else if (type == HXD_EA_RA_SA)
+ /* HXD, EA: 12Mbps; RA: 1Mbps; SA: 115200 bps */
+ /*
+ * FIXME: as long as we don't know how to distinguish between
+ * these chip variants, allow the max. of these values
+ */
+ baud = min_t(int, baud, 12000000 * 1.1);
+ else if (type == TB)
+ baud = min_t(int, baud, 12000000 * 1.1);
+ /* Determine factors A and B */
+ A = 0;
+ B = 12000000 * 32 / baud; /* 12MHz */
+ B <<= 1; /* Add one bit for rounding */
+ while (B > (512 << 1) && A <= 14) {
+ A += 2;
+ B >>= 2;
+ }
+ if (A > 14) { /* max. divisor = min. baudrate reached */
+ A = 14;
+ B = 512;
+ /* => ~45.78 baud */
+ } else {
+ B = (B + 1) >> 1; /* Round the last bit */
+ }
+ /* Handle special cases */
+ if (B == 512)
+ B = 0; /* also: 1 to 8 */
+ else if (B < 16)
+ /*
+ * NOTE: With the current algorithm this happens
+ * only for A=0 and means that the min. divisor
+ * (respectively: the max. baudrate) is reached.
+ */
+ B = 16; /* => 24 MBaud */
+ /* Encode the baud rate */
+ buf[3] = 0x80; /* Select divisor encoding method */
+ buf[2] = 0;
+ buf[1] = (A & 0x0e); /* A */
+ buf[1] |= ((B & 0x100) >> 8); /* MSB of B */
+ buf[0] = B & 0xff; /* 8 LSBs of B */
+ /* Calculate the actual/resulting baud rate */
+ if (B <= 8)
+ B = 512;
+ baud = 12000000 * 32 / ((1 << A) * B);
+
+ return baud;
+}
+
+static void pl2303_encode_baudrate(struct tty_struct *tty,
+ struct usb_serial_port *port,
+ enum pl2303_type type,
+ u8 buf[4])
+{
+ int baud;
+
+ baud = tty_get_baud_rate(tty);
+ dev_dbg(&port->dev, "baud requested = %d\n", baud);
+ if (!baud)
+ return;
+ /*
+ * There are two methods for setting/encoding the baud rate
+ * 1) Direct method: encodes the baud rate value directly
+ * => supported by all chip types
+ * 2) Divisor based method: encodes a divisor to a base value (12MHz*32)
+ * => supported by HX chips (and likely not by type_0/1 chips)
+ *
+ * NOTE: Although the divisor based baud rate encoding method is much
+ * more flexible, some of the standard baud rate values can not be
+ * realized exactly. But the difference is very small (max. 0.2%) and
+ * the device likely uses the same baud rate generator for both methods
+ * so that there is likley no difference.
+ */
+ if (type == type_0 || type == type_1)
+ baud = pl2303_baudrate_encode_direct(baud, type, buf);
+ else
+ baud = pl2303_baudrate_encode_divisor(baud, type, buf);
+ /* Save resulting baud rate */
+ tty_encode_baud_rate(tty, baud, baud);
+ dev_dbg(&port->dev, "baud set = %d\n", baud);
+}
+
static void pl2303_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -268,27 +476,18 @@ static void pl2303_set_termios(struct tty_struct *tty,
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
struct pl2303_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
- unsigned int cflag;
unsigned char *buf;
- int baud;
int i;
u8 control;
- const int baud_sup[] = { 75, 150, 300, 600, 1200, 1800, 2400, 3600,
- 4800, 7200, 9600, 14400, 19200, 28800, 38400,
- 57600, 115200, 230400, 460800, 500000, 614400,
- 921600, 1228800, 2457600, 3000000, 6000000 };
- int baud_floor, baud_ceil;
- int k;
-
- /* The PL2303 is reported to lose bytes if you change
- serial settings even to the same values as before. Thus
- we actually need to filter in this specific case */
+ /*
+ * The PL2303 is reported to lose bytes if you change serial settings
+ * even to the same values as before. Thus we actually need to filter
+ * in this specific case.
+ */
if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
return;
- cflag = tty->termios.c_cflag;
-
buf = kzalloc(7, GFP_KERNEL);
if (!buf) {
dev_err(&port->dev, "%s - out of memory.\n", __func__);
@@ -303,8 +502,8 @@ static void pl2303_set_termios(struct tty_struct *tty,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
- if (cflag & CSIZE) {
- switch (cflag & CSIZE) {
+ if (C_CSIZE(tty)) {
+ switch (C_CSIZE(tty)) {
case CS5:
buf[6] = 5;
break;
@@ -317,73 +516,22 @@ static void pl2303_set_termios(struct tty_struct *tty,
default:
case CS8:
buf[6] = 8;
- break;
}
dev_dbg(&port->dev, "data bits = %d\n", buf[6]);
}
- /* For reference buf[0]:buf[3] baud rate value */
- /* NOTE: Only the values defined in baud_sup are supported !
- * => if unsupported values are set, the PL2303 seems to use
- * 9600 baud (at least my PL2303X always does)
- */
- baud = tty_get_baud_rate(tty);
- dev_dbg(&port->dev, "baud requested = %d\n", baud);
- if (baud) {
- /* Set baudrate to nearest supported value */
- for (k=0; k<ARRAY_SIZE(baud_sup); k++) {
- if (baud_sup[k] / baud) {
- baud_ceil = baud_sup[k];
- if (k==0) {
- baud = baud_ceil;
- } else {
- baud_floor = baud_sup[k-1];
- if ((baud_ceil % baud)
- > (baud % baud_floor))
- baud = baud_floor;
- else
- baud = baud_ceil;
- }
- break;
- }
- }
- if (baud > 1228800) {
- /* type_0, type_1 only support up to 1228800 baud */
- if (spriv->type != HX)
- baud = 1228800;
- else if (baud > 6000000)
- baud = 6000000;
- }
- dev_dbg(&port->dev, "baud set = %d\n", baud);
- if (baud <= 115200) {
- buf[0] = baud & 0xff;
- buf[1] = (baud >> 8) & 0xff;
- buf[2] = (baud >> 16) & 0xff;
- buf[3] = (baud >> 24) & 0xff;
- } else {
- /* apparently the formula for higher speeds is:
- * baudrate = 12M * 32 / (2^buf[1]) / buf[0]
- */
- unsigned tmp = 12*1000*1000*32 / baud;
- buf[3] = 0x80;
- buf[2] = 0;
- buf[1] = (tmp >= 256);
- while (tmp >= 256) {
- tmp >>= 2;
- buf[1] <<= 1;
- }
- buf[0] = tmp;
- }
- }
+ /* For reference: buf[0]:buf[3] baud rate value */
+ pl2303_encode_baudrate(tty, port, spriv->type, buf);
/* For reference buf[4]=0 is 1 stop bits */
/* For reference buf[4]=1 is 1.5 stop bits */
/* For reference buf[4]=2 is 2 stop bits */
- if (cflag & CSTOPB) {
- /* NOTE: Comply with "real" UARTs / RS232:
+ if (C_CSTOPB(tty)) {
+ /*
+ * NOTE: Comply with "real" UARTs / RS232:
* use 1.5 instead of 2 stop bits with 5 data bits
*/
- if ((cflag & CSIZE) == CS5) {
+ if (C_CSIZE(tty) == CS5) {
buf[4] = 1;
dev_dbg(&port->dev, "stop bits = 1.5\n");
} else {
@@ -395,14 +543,14 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "stop bits = 1\n");
}
- if (cflag & PARENB) {
+ if (C_PARENB(tty)) {
/* For reference buf[5]=0 is none parity */
/* For reference buf[5]=1 is odd parity */
/* For reference buf[5]=2 is even parity */
/* For reference buf[5]=3 is mark parity */
/* For reference buf[5]=4 is space parity */
- if (cflag & PARODD) {
- if (cflag & CMSPAR) {
+ if (C_PARODD(tty)) {
+ if (tty->termios.c_cflag & CMSPAR) {
buf[5] = 3;
dev_dbg(&port->dev, "parity = mark\n");
} else {
@@ -410,7 +558,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
dev_dbg(&port->dev, "parity = odd\n");
}
} else {
- if (cflag & CMSPAR) {
+ if (tty->termios.c_cflag & CMSPAR) {
buf[5] = 4;
dev_dbg(&port->dev, "parity = space\n");
} else {
@@ -431,7 +579,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
/* change control lines if we are switching to or from B0 */
spin_lock_irqsave(&priv->lock, flags);
control = priv->line_control;
- if ((cflag & CBAUD) == B0)
+ if (C_BAUD(tty) == B0)
priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
@@ -443,26 +591,21 @@ static void pl2303_set_termios(struct tty_struct *tty,
spin_unlock_irqrestore(&priv->lock, flags);
}
- buf[0] = buf[1] = buf[2] = buf[3] = buf[4] = buf[5] = buf[6] = 0;
-
+ memset(buf, 0, 7);
i = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
GET_LINE_REQUEST, GET_LINE_REQUEST_TYPE,
0, 0, buf, 7, 100);
dev_dbg(&port->dev, "0xa1:0x21:0:0 %d - %7ph\n", i, buf);
- if (cflag & CRTSCTS) {
- if (spriv->type == HX)
- pl2303_vendor_write(0x0, 0x61, serial);
- else
+ if (C_CRTSCTS(tty)) {
+ if (spriv->type == type_0 || spriv->type == type_1)
pl2303_vendor_write(0x0, 0x41, serial);
+ else
+ pl2303_vendor_write(0x0, 0x61, serial);
} else {
pl2303_vendor_write(0x0, 0x0, serial);
}
- /* Save resulting baud rate */
- if (baud)
- tty_encode_baud_rate(tty, baud, baud);
-
kfree(buf);
}
@@ -495,7 +638,7 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
int result;
- if (spriv->type != HX) {
+ if (spriv->type == type_0 || spriv->type == type_1) {
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
} else {
@@ -521,7 +664,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
return result;
}
- port->port.drain_delay = 256;
return 0;
}
@@ -789,8 +931,10 @@ static void pl2303_process_read_urb(struct urb *urb)
tty_flag = TTY_PARITY;
else if (line_status & UART_FRAME_ERROR)
tty_flag = TTY_FRAME;
- dev_dbg(&port->dev, "%s - tty_flag = %d\n", __func__, tty_flag);
+ if (tty_flag != TTY_NORMAL)
+ dev_dbg(&port->dev, "%s - tty_flag = %d\n", __func__,
+ tty_flag);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index d99743290fc..a24d59ae403 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -62,6 +62,7 @@
#define MAX_BAUD_RATE 921600
#define DEFAULT_BAUD_RATE 9600
+#define QT2_READ_BUFFER_SIZE 512 /* size of read buffer */
#define QT2_WRITE_BUFFER_SIZE 512 /* size of write buffer */
#define QT2_WRITE_CONTROL_SIZE 5 /* control bytes used for a write */
@@ -112,7 +113,7 @@ struct qt2_serial_private {
unsigned char current_port; /* current port for incoming data */
struct urb *read_urb; /* shared among all ports */
- char read_buffer[512];
+ char *read_buffer;
};
struct qt2_port_private {
@@ -121,7 +122,7 @@ struct qt2_port_private {
spinlock_t urb_lock;
bool urb_in_use;
struct urb *write_urb;
- char write_buffer[QT2_WRITE_BUFFER_SIZE];
+ char *write_buffer;
spinlock_t lock;
u8 shadowLSR;
@@ -142,6 +143,7 @@ static void qt2_release(struct usb_serial *serial)
serial_priv = usb_get_serial_data(serial);
usb_free_urb(serial_priv->read_urb);
+ kfree(serial_priv->read_buffer);
kfree(serial_priv);
}
@@ -683,7 +685,7 @@ static int qt2_setup_urbs(struct usb_serial *serial)
usb_rcvbulkpipe(serial->dev,
port0->bulk_in_endpointAddress),
serial_priv->read_buffer,
- sizeof(serial_priv->read_buffer),
+ QT2_READ_BUFFER_SIZE,
qt2_read_bulk_callback, serial);
status = usb_submit_urb(serial_priv->read_urb, GFP_KERNEL);
@@ -718,6 +720,12 @@ static int qt2_attach(struct usb_serial *serial)
return -ENOMEM;
}
+ serial_priv->read_buffer = kmalloc(QT2_READ_BUFFER_SIZE, GFP_KERNEL);
+ if (!serial_priv->read_buffer) {
+ status = -ENOMEM;
+ goto err_buf;
+ }
+
usb_set_serial_data(serial, serial_priv);
status = qt2_setup_urbs(serial);
@@ -727,6 +735,8 @@ static int qt2_attach(struct usb_serial *serial)
return 0;
attach_failed:
+ kfree(serial_priv->read_buffer);
+err_buf:
kfree(serial_priv);
return status;
}
@@ -745,21 +755,29 @@ static int qt2_port_probe(struct usb_serial_port *port)
spin_lock_init(&port_priv->urb_lock);
port_priv->port = port;
+ port_priv->write_buffer = kmalloc(QT2_WRITE_BUFFER_SIZE, GFP_KERNEL);
+ if (!port_priv->write_buffer)
+ goto err_buf;
+
port_priv->write_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port_priv->write_urb) {
- kfree(port_priv);
- return -ENOMEM;
- }
+ if (!port_priv->write_urb)
+ goto err_urb;
+
bEndpointAddress = serial->port[0]->bulk_out_endpointAddress;
usb_fill_bulk_urb(port_priv->write_urb, serial->dev,
usb_sndbulkpipe(serial->dev, bEndpointAddress),
port_priv->write_buffer,
- sizeof(port_priv->write_buffer),
+ QT2_WRITE_BUFFER_SIZE,
qt2_write_bulk_callback, port);
usb_set_serial_port_data(port, port_priv);
return 0;
+err_urb:
+ kfree(port_priv->write_buffer);
+err_buf:
+ kfree(port_priv);
+ return -ENOMEM;
}
static int qt2_port_remove(struct usb_serial_port *port)
@@ -768,6 +786,7 @@ static int qt2_port_remove(struct usb_serial_port *port)
port_priv = usb_get_serial_port_data(port);
usb_free_urb(port_priv->write_urb);
+ kfree(port_priv->write_buffer);
kfree(port_priv);
return 0;
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index 21cd7bf2a8c..ba895989d8c 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -92,13 +92,6 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
-static __u16 vendor; /* no default */
-static __u16 product; /* no default */
-module_param(vendor, ushort, 0);
-MODULE_PARM_DESC(vendor, "User specified USB idVendor (required)");
-module_param(product, ushort, 0);
-MODULE_PARM_DESC(product, "User specified USB idProduct (required)");
-
module_param(safe, bool, 0);
MODULE_PARM_DESC(safe, "Turn Safe Encapsulation On/Off");
@@ -140,8 +133,6 @@ static struct usb_device_id id_table[] = {
{MY_USB_DEVICE(0x4dd, 0x8003, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */
{MY_USB_DEVICE(0x4dd, 0x8004, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Collie */
{MY_USB_DEVICE(0x5f9, 0xffff, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)}, /* Sharp tmp */
- /* extra null entry for module vendor/produc parameters */
- {MY_USB_DEVICE(0, 0, CDC_DEVICE_CLASS, LINEO_INTERFACE_CLASS, LINEO_INTERFACE_SUBCLASS_SAFESERIAL)},
{} /* terminating entry */
};
@@ -272,7 +263,19 @@ static int safe_prepare_write_buffer(struct usb_serial_port *port,
static int safe_startup(struct usb_serial *serial)
{
- switch (serial->interface->cur_altsetting->desc.bInterfaceProtocol) {
+ struct usb_interface_descriptor *desc;
+
+ if (serial->dev->descriptor.bDeviceClass != CDC_DEVICE_CLASS)
+ return -ENODEV;
+
+ desc = &serial->interface->cur_altsetting->desc;
+
+ if (desc->bInterfaceClass != LINEO_INTERFACE_CLASS)
+ return -ENODEV;
+ if (desc->bInterfaceSubClass != LINEO_INTERFACE_SUBCLASS_SAFESERIAL)
+ return -ENODEV;
+
+ switch (desc->bInterfaceProtocol) {
case LINEO_SAFESERIAL_CRC:
break;
case LINEO_SAFESERIAL_CRC_PADDED:
@@ -300,30 +303,4 @@ static struct usb_serial_driver * const serial_drivers[] = {
&safe_device, NULL
};
-static int __init safe_init(void)
-{
- int i;
-
- /* if we have vendor / product parameters patch them into id list */
- if (vendor || product) {
- pr_info("vendor: %x product: %x\n", vendor, product);
-
- for (i = 0; i < ARRAY_SIZE(id_table); i++) {
- if (!id_table[i].idVendor && !id_table[i].idProduct) {
- id_table[i].idVendor = vendor;
- id_table[i].idProduct = product;
- break;
- }
- }
- }
-
- return usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, id_table);
-}
-
-static void __exit safe_exit(void)
-{
- usb_serial_deregister_drivers(serial_drivers);
-}
-
-module_init(safe_init);
-module_exit(safe_exit);
+module_usb_serial_driver(serial_drivers, id_table);
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
deleted file mode 100644
index a76b1ae54a2..00000000000
--- a/drivers/usb/serial/siemens_mpi.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Siemens USB-MPI Serial USB driver
- *
- * Copyright (C) 2005 Thomas Hergenhahn <thomas.hergenhahn@suse.de>
- * Copyright (C) 2005,2008 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-#define DRIVER_AUTHOR "Thomas Hergenhahn@web.de http://libnodave.sf.net"
-#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
-
-
-static const struct usb_device_id id_table[] = {
- /* Vendor and product id for 6ES7-972-0CB20-0XA0 */
- { USB_DEVICE(0x908, 0x0004) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver siemens_usb_mpi_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "siemens_mpi",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &siemens_usb_mpi_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-
-MODULE_AUTHOR(DRIVER_AUTHOR);
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index ddf6c47137d..4abac28b599 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -169,6 +169,8 @@ static int spcp8x5_port_probe(struct usb_serial_port *port)
usb_set_serial_port_data(port, priv);
+ port->port.drain_delay = 256;
+
return 0;
}
@@ -411,8 +413,6 @@ static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
spcp8x5_set_termios(tty, port, NULL);
- port->port.drain_delay = 256;
-
return usb_serial_generic_open(tty, port);
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 7182bb774b7..760b78560f6 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -45,8 +45,6 @@
#define TI_FIRMWARE_BUF_SIZE 16284
-#define TI_WRITE_BUF_SIZE 1024
-
#define TI_TRANSFER_TIMEOUT 2
#define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
@@ -71,13 +69,11 @@ struct ti_port {
__u8 tp_uart_mode; /* 232 or 485 modes */
unsigned int tp_uart_base_addr;
int tp_flags;
- wait_queue_head_t tp_write_wait;
struct ti_device *tp_tdev;
struct usb_serial_port *tp_port;
spinlock_t tp_lock;
int tp_read_urb_state;
int tp_write_urb_in_use;
- struct kfifo write_fifo;
};
struct ti_device {
@@ -145,20 +141,9 @@ static int ti_download_firmware(struct ti_device *tdev);
/* module parameters */
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
-static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
-static unsigned int vendor_3410_count;
-static ushort product_3410[TI_EXTRA_VID_PID_COUNT];
-static unsigned int product_3410_count;
-static ushort vendor_5052[TI_EXTRA_VID_PID_COUNT];
-static unsigned int vendor_5052_count;
-static ushort product_5052[TI_EXTRA_VID_PID_COUNT];
-static unsigned int product_5052_count;
/* supported devices */
-/* the array dimension is the number of default entries plus */
-/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
-/* null entry */
-static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -175,16 +160,18 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
+ { } /* terminator */
};
-static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_5052[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5152_BOOT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_EEPROM_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
+ { } /* terminator */
};
-static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -204,7 +191,7 @@ static struct usb_device_id ti_id_table_combined[19+2*TI_EXTRA_VID_PID_COUNT+1]
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
- { }
+ { } /* terminator */
};
static struct usb_serial_driver ti_1port_device = {
@@ -293,61 +280,12 @@ module_param(closing_wait, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(closing_wait,
"Maximum wait for data to drain in close, in .01 secs, default is 4000");
-module_param_array(vendor_3410, ushort, &vendor_3410_count, S_IRUGO);
-MODULE_PARM_DESC(vendor_3410,
- "Vendor ids for 3410 based devices, 1-5 short integers");
-module_param_array(product_3410, ushort, &product_3410_count, S_IRUGO);
-MODULE_PARM_DESC(product_3410,
- "Product ids for 3410 based devices, 1-5 short integers");
-module_param_array(vendor_5052, ushort, &vendor_5052_count, S_IRUGO);
-MODULE_PARM_DESC(vendor_5052,
- "Vendor ids for 5052 based devices, 1-5 short integers");
-module_param_array(product_5052, ushort, &product_5052_count, S_IRUGO);
-MODULE_PARM_DESC(product_5052,
- "Product ids for 5052 based devices, 1-5 short integers");
-
MODULE_DEVICE_TABLE(usb, ti_id_table_combined);
+module_usb_serial_driver(serial_drivers, ti_id_table_combined);
/* Functions */
-static int __init ti_init(void)
-{
- int i, j, c;
-
- /* insert extra vendor and product ids */
- c = ARRAY_SIZE(ti_id_table_combined) - 2 * TI_EXTRA_VID_PID_COUNT - 1;
- j = ARRAY_SIZE(ti_id_table_3410) - TI_EXTRA_VID_PID_COUNT - 1;
- for (i = 0; i < min(vendor_3410_count, product_3410_count); i++, j++, c++) {
- ti_id_table_3410[j].idVendor = vendor_3410[i];
- ti_id_table_3410[j].idProduct = product_3410[i];
- ti_id_table_3410[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- ti_id_table_combined[c].idVendor = vendor_3410[i];
- ti_id_table_combined[c].idProduct = product_3410[i];
- ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- }
- j = ARRAY_SIZE(ti_id_table_5052) - TI_EXTRA_VID_PID_COUNT - 1;
- for (i = 0; i < min(vendor_5052_count, product_5052_count); i++, j++, c++) {
- ti_id_table_5052[j].idVendor = vendor_5052[i];
- ti_id_table_5052[j].idProduct = product_5052[i];
- ti_id_table_5052[j].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- ti_id_table_combined[c].idVendor = vendor_5052[i];
- ti_id_table_combined[c].idProduct = product_5052[i];
- ti_id_table_combined[c].match_flags = USB_DEVICE_ID_MATCH_DEVICE;
- }
-
- return usb_serial_register_drivers(serial_drivers, KBUILD_MODNAME, ti_id_table_combined);
-}
-
-static void __exit ti_exit(void)
-{
- usb_serial_deregister_drivers(serial_drivers);
-}
-
-module_init(ti_init);
-module_exit(ti_exit);
-
-
static int ti_startup(struct usb_serial *serial)
{
struct ti_device *tdev;
@@ -371,7 +309,7 @@ static int ti_startup(struct usb_serial *serial)
usb_set_serial_data(serial, tdev);
/* determine device type */
- if (usb_match_id(serial->interface, ti_id_table_3410))
+ if (serial->type == &ti_1port_device)
tdev->td_is_3410 = 1;
dev_dbg(&dev->dev, "%s - device type is %s\n", __func__,
tdev->td_is_3410 ? "3410" : "5052");
@@ -430,17 +368,14 @@ static int ti_port_probe(struct usb_serial_port *port)
else
tport->tp_uart_base_addr = TI_UART2_BASE_ADDR;
port->port.closing_wait = msecs_to_jiffies(10 * closing_wait);
- init_waitqueue_head(&tport->tp_write_wait);
- if (kfifo_alloc(&tport->write_fifo, TI_WRITE_BUF_SIZE, GFP_KERNEL)) {
- kfree(tport);
- return -ENOMEM;
- }
tport->tp_port = port;
tport->tp_tdev = usb_get_serial_data(port->serial);
tport->tp_uart_mode = 0; /* default is RS232 */
usb_set_serial_port_data(port, tport);
+ port->port.drain_delay = 3;
+
return 0;
}
@@ -449,7 +384,6 @@ static int ti_port_remove(struct usb_serial_port *port)
struct ti_port *tport;
tport = usb_get_serial_port_data(port);
- kfifo_free(&tport->write_fifo);
kfree(tport);
return 0;
@@ -582,8 +516,6 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
tport->tp_is_open = 1;
++tdev->td_open_port_count;
- port->port.drain_delay = 3;
-
goto release_lock;
unlink_int_urb:
@@ -616,7 +548,7 @@ static void ti_close(struct usb_serial_port *port)
usb_kill_urb(port->write_urb);
tport->tp_write_urb_in_use = 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- kfifo_reset_out(&tport->write_fifo);
+ kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
port_number = port->port_number;
@@ -655,7 +587,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
if (tport == NULL || !tport->tp_is_open)
return -ENODEV;
- count = kfifo_in_locked(&tport->write_fifo, data, count,
+ count = kfifo_in_locked(&port->write_fifo, data, count,
&tport->tp_lock);
ti_send(tport);
@@ -674,7 +606,7 @@ static int ti_write_room(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- room = kfifo_avail(&tport->write_fifo);
+ room = kfifo_avail(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, room);
@@ -693,7 +625,7 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
return 0;
spin_lock_irqsave(&tport->tp_lock, flags);
- chars = kfifo_len(&tport->write_fifo);
+ chars = kfifo_len(&port->write_fifo);
spin_unlock_irqrestore(&tport->tp_lock, flags);
dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
@@ -1090,13 +1022,11 @@ static void ti_bulk_in_callback(struct urb *urb)
case -ESHUTDOWN:
dev_dbg(dev, "%s - urb shutting down, %d\n", __func__, status);
tport->tp_tdev->td_urb_error = 1;
- wake_up_interruptible(&tport->tp_write_wait);
return;
default:
dev_err(dev, "%s - nonzero urb status, %d\n",
__func__, status);
tport->tp_tdev->td_urb_error = 1;
- wake_up_interruptible(&tport->tp_write_wait);
}
if (status == -EPIPE)
@@ -1152,13 +1082,11 @@ static void ti_bulk_out_callback(struct urb *urb)
case -ESHUTDOWN:
dev_dbg(&port->dev, "%s - urb shutting down, %d\n", __func__, status);
tport->tp_tdev->td_urb_error = 1;
- wake_up_interruptible(&tport->tp_write_wait);
return;
default:
dev_err_console(port, "%s - nonzero urb status, %d\n",
__func__, status);
tport->tp_tdev->td_urb_error = 1;
- wake_up_interruptible(&tport->tp_write_wait);
}
/* send any buffered data */
@@ -1197,7 +1125,7 @@ static void ti_send(struct ti_port *tport)
if (tport->tp_write_urb_in_use)
goto unlock;
- count = kfifo_out(&tport->write_fifo,
+ count = kfifo_out(&port->write_fifo,
port->write_urb->transfer_buffer,
port->bulk_out_size);
@@ -1232,7 +1160,6 @@ static void ti_send(struct ti_port *tport)
/* more room in the buffer for new writes, wakeup */
tty_port_tty_wakeup(&port->port);
- wake_up_interruptible(&tport->tp_write_wait);
return;
unlock:
spin_unlock_irqrestore(&tport->tp_lock, flags);
@@ -1312,7 +1239,7 @@ static int ti_get_serial_info(struct ti_port *tport,
ret_serial.line = port->minor;
ret_serial.port = port->port_number;
ret_serial.flags = tport->tp_flags;
- ret_serial.xmit_fifo_size = TI_WRITE_BUF_SIZE;
+ ret_serial.xmit_fifo_size = kfifo_size(&port->write_fifo);
ret_serial.baud_base = tport->tp_tdev->td_is_3410 ? 921600 : 460800;
ret_serial.closing_wait = cwait;
@@ -1536,14 +1463,15 @@ static int ti_download_firmware(struct ti_device *tdev)
char buf[32];
/* try ID specific firmware first, then try generic firmware */
- sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor,
- dev->descriptor.idProduct);
+ sprintf(buf, "ti_usb-v%04x-p%04x.fw",
+ le16_to_cpu(dev->descriptor.idVendor),
+ le16_to_cpu(dev->descriptor.idProduct));
status = request_firmware(&fw_p, buf, &dev->dev);
if (status != 0) {
buf[0] = '\0';
- if (dev->descriptor.idVendor == MTS_VENDOR_ID) {
- switch (dev->descriptor.idProduct) {
+ if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) {
+ switch (le16_to_cpu(dev->descriptor.idProduct)) {
case MTS_CDMA_PRODUCT_ID:
strcpy(buf, "mts_cdma.fw");
break;
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
new file mode 100644
index 00000000000..52eb91f2eb2
--- /dev/null
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -0,0 +1,110 @@
+/*
+ * USB Serial "Simple" driver
+ *
+ * Copyright (C) 2001-2006,2008,2013 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2005 Arthur Huillet (ahuillet@users.sf.net)
+ * Copyright (C) 2005 Thomas Hergenhahn <thomas.hergenhahn@suse.de>
+ * Copyright (C) 2009 Outpost Embedded, LLC
+ * Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
+ * Copyright (C) 2013 Wei Shuai <cpuwolf@gmail.com>
+ * Copyright (C) 2013 Linux Foundation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+#define DEVICE(vendor, IDS) \
+static const struct usb_device_id vendor##_id_table[] = { \
+ IDS(), \
+ { }, \
+}; \
+static struct usb_serial_driver vendor##_device = { \
+ .driver = { \
+ .owner = THIS_MODULE, \
+ .name = #vendor, \
+ }, \
+ .id_table = vendor##_id_table, \
+ .num_ports = 1, \
+};
+
+
+/* ZIO Motherboard USB driver */
+#define ZIO_IDS() \
+ { USB_DEVICE(0x1CBE, 0x0103) }
+DEVICE(zio, ZIO_IDS);
+
+/* Funsoft Serial USB driver */
+#define FUNSOFT_IDS() \
+ { USB_DEVICE(0x1404, 0xcddc) }
+DEVICE(funsoft, FUNSOFT_IDS);
+
+/* Infineon Flashloader driver */
+#define FLASHLOADER_IDS() \
+ { USB_DEVICE(0x8087, 0x0716) }
+DEVICE(flashloader, FLASHLOADER_IDS);
+
+/* ViVOpay USB Serial Driver */
+#define VIVOPAY_IDS() \
+ { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
+DEVICE(vivopay, VIVOPAY_IDS);
+
+/* Motorola USB Phone driver */
+#define MOTO_IDS() \
+ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
+ { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ \
+ { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \
+ { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \
+ { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
+DEVICE(moto_modem, MOTO_IDS);
+
+/* HP4x (48/49) Generic Serial driver */
+#define HP4X_IDS() \
+ { USB_DEVICE(0x03f0, 0x0121) }
+DEVICE(hp4x, HP4X_IDS);
+
+/* Suunto ANT+ USB Driver */
+#define SUUNTO_IDS() \
+ { USB_DEVICE(0x0fcf, 0x1008) }
+DEVICE(suunto, SUUNTO_IDS);
+
+/* Siemens USB/MPI adapter */
+#define SIEMENS_IDS() \
+ { USB_DEVICE(0x908, 0x0004) }
+DEVICE(siemens_mpi, SIEMENS_IDS);
+
+/* All of the above structures mushed into two lists */
+static struct usb_serial_driver * const serial_drivers[] = {
+ &zio_device,
+ &funsoft_device,
+ &flashloader_device,
+ &vivopay_device,
+ &moto_modem_device,
+ &hp4x_device,
+ &suunto_device,
+ &siemens_mpi_device,
+ NULL
+};
+
+static const struct usb_device_id id_table[] = {
+ ZIO_IDS(),
+ FUNSOFT_IDS(),
+ FLASHLOADER_IDS(),
+ VIVOPAY_IDS(),
+ MOTO_IDS(),
+ HP4X_IDS(),
+ SUUNTO_IDS(),
+ SIEMENS_IDS(),
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+module_usb_serial_driver(serial_drivers, id_table);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index cb27fcb2fc9..6091bd5a1f4 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -681,20 +681,10 @@ static int serial_port_carrier_raised(struct tty_port *port)
static void serial_port_dtr_rts(struct tty_port *port, int on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
- struct usb_serial *serial = p->serial;
- struct usb_serial_driver *drv = serial->type;
+ struct usb_serial_driver *drv = p->serial->type;
- if (!drv->dtr_rts)
- return;
- /*
- * Work-around bug in the tty-layer which can result in dtr_rts
- * being called after a disconnect (and tty_unregister_device
- * has returned). Remove once bug has been squashed.
- */
- mutex_lock(&serial->disc_mutex);
- if (!serial->disconnected)
+ if (drv->dtr_rts)
drv->dtr_rts(p, on);
- mutex_unlock(&serial->disc_mutex);
}
static const struct tty_port_operations serial_port_ops = {
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index 8257d30c407..85365784040 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb)
tty_flip_buffer_push(&port->port);
} else
dev_dbg(dev, "%s: empty read urb received\n", __func__);
-
- /* Resubmit urb so we continue receiving */
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err) {
- if (err != -EPERM) {
- dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err);
- /* busy also in error unless we are killed */
- usb_mark_last_busy(port->serial->dev);
- }
- } else {
+ }
+ /* Resubmit urb so we continue receiving */
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err) {
+ if (err != -EPERM) {
+ dev_err(dev, "%s: resubmit read urb failed. (%d)\n",
+ __func__, err);
+ /* busy also in error unless we are killed */
usb_mark_last_busy(port->serial->dev);
}
+ } else {
+ usb_mark_last_busy(port->serial->dev);
}
}
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
deleted file mode 100644
index 6299526ff8c..00000000000
--- a/drivers/usb/serial/vivopay-serial.c
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2009 Outpost Embedded, LLC
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-
-#define DRIVER_DESC "ViVOpay USB Serial Driver"
-
-#define VIVOPAY_VENDOR_ID 0x1d5f
-
-
-static struct usb_device_id id_table [] = {
- /* ViVOpay 8800 */
- { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
- { },
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver vivopay_serial_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "vivopay-serial",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &vivopay_serial_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-
-MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/zio.c b/drivers/usb/serial/zio.c
deleted file mode 100644
index c043aa84a0b..00000000000
--- a/drivers/usb/serial/zio.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * ZIO Motherboard USB driver
- *
- * Copyright (C) 2010 Zilogic Systems <code@zilogic.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/tty.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-#include <linux/usb/serial.h>
-#include <linux/uaccess.h>
-
-static const struct usb_device_id id_table[] = {
- { USB_DEVICE(0x1CBE, 0x0103) },
- { },
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static struct usb_serial_driver zio_device = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "zio",
- },
- .id_table = id_table,
- .num_ports = 1,
-};
-
-static struct usb_serial_driver * const serial_drivers[] = {
- &zio_device, NULL
-};
-
-module_usb_serial_driver(serial_drivers, id_table);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 92b05d95ec5..94d75edef77 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -496,7 +496,7 @@ US_DO_ALL_FLAGS
***********************************************************************/
/* Output routine for the sysfs max_sectors file */
-static ssize_t show_max_sectors(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t max_sectors_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -504,7 +504,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
}
/* Input routine for the sysfs max_sectors file */
-static ssize_t store_max_sectors(struct device *dev, struct device_attribute *attr, const char *buf,
+static ssize_t max_sectors_store(struct device *dev, struct device_attribute *attr, const char *buf,
size_t count)
{
struct scsi_device *sdev = to_scsi_device(dev);
@@ -514,16 +514,14 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
blk_queue_max_hw_sectors(sdev->request_queue, ms);
return count;
}
- return -EINVAL;
+ return -EINVAL;
}
-
-static DEVICE_ATTR(max_sectors, S_IRUGO | S_IWUSR, show_max_sectors,
- store_max_sectors);
+static DEVICE_ATTR_RW(max_sectors);
static struct device_attribute *sysfs_device_attr_list[] = {
- &dev_attr_max_sectors,
- NULL,
- };
+ &dev_attr_max_sectors,
+ NULL,
+};
/*
* this defines our host template, with which we'll allocate hosts
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1799335288b..c015f2c1672 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -665,6 +665,13 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_INQUIRY ),
+/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */
+UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100,
+ "Sony Corp.",
+ "MicroVault Flash Drive",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_READ_CAPACITY_16 ),
+
/* floppy reports multiple luns */
UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210,
"SAMSUNG",
diff --git a/drivers/usb/usb-common.c b/drivers/usb/usb-common.c
index 675384dabfe..d771870a819 100644
--- a/drivers/usb/usb-common.c
+++ b/drivers/usb/usb-common.c
@@ -43,20 +43,20 @@ const char *usb_otg_state_string(enum usb_otg_state state)
}
EXPORT_SYMBOL_GPL(usb_otg_state_string);
+static const char *const speed_names[] = {
+ [USB_SPEED_UNKNOWN] = "UNKNOWN",
+ [USB_SPEED_LOW] = "low-speed",
+ [USB_SPEED_FULL] = "full-speed",
+ [USB_SPEED_HIGH] = "high-speed",
+ [USB_SPEED_WIRELESS] = "wireless",
+ [USB_SPEED_SUPER] = "super-speed",
+};
+
const char *usb_speed_string(enum usb_device_speed speed)
{
- static const char *const names[] = {
- [USB_SPEED_UNKNOWN] = "UNKNOWN",
- [USB_SPEED_LOW] = "low-speed",
- [USB_SPEED_FULL] = "full-speed",
- [USB_SPEED_HIGH] = "high-speed",
- [USB_SPEED_WIRELESS] = "wireless",
- [USB_SPEED_SUPER] = "super-speed",
- };
-
- if (speed < 0 || speed >= ARRAY_SIZE(names))
+ if (speed < 0 || speed >= ARRAY_SIZE(speed_names))
speed = USB_SPEED_UNKNOWN;
- return names[speed];
+ return speed_names[speed];
}
EXPORT_SYMBOL_GPL(usb_speed_string);
@@ -112,6 +112,33 @@ enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
return USB_DR_MODE_UNKNOWN;
}
EXPORT_SYMBOL_GPL(of_usb_get_dr_mode);
+
+/**
+ * of_usb_get_maximum_speed - Get maximum requested speed for a given USB
+ * controller.
+ * @np: Pointer to the given device_node
+ *
+ * The function gets the maximum speed string from property "maximum-speed",
+ * and returns the corresponding enum usb_device_speed.
+ */
+enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np)
+{
+ const char *maximum_speed;
+ int err;
+ int i;
+
+ err = of_property_read_string(np, "maximum-speed", &maximum_speed);
+ if (err < 0)
+ return USB_SPEED_UNKNOWN;
+
+ for (i = 0; i < ARRAY_SIZE(speed_names); i++)
+ if (strcmp(maximum_speed, speed_names[i]) == 0)
+ return i;
+
+ return USB_SPEED_UNKNOWN;
+}
+EXPORT_SYMBOL_GPL(of_usb_get_maximum_speed);
+
#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 7ed3b039dbe..ff97652343a 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -325,9 +325,8 @@ retry:
rv = skel_do_read_io(dev, count);
if (rv < 0)
goto exit;
- else if (!(file->f_flags & O_NONBLOCK))
+ else
goto retry;
- rv = -EAGAIN;
}
exit:
mutex_unlock(&dev->io_mutex);
diff --git a/drivers/usb/wusbcore/rh.c b/drivers/usb/wusbcore/rh.c
index bdb0cc3046b..fe8bc777ab8 100644
--- a/drivers/usb/wusbcore/rh.c
+++ b/drivers/usb/wusbcore/rh.c
@@ -141,18 +141,26 @@ static int wusbhc_rh_port_reset(struct wusbhc *wusbhc, u8 port_idx)
int wusbhc_rh_status_data(struct usb_hcd *usb_hcd, char *_buf)
{
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
- size_t cnt, size;
- unsigned long *buf = (unsigned long *) _buf;
+ size_t cnt, size, bits_set = 0;
/* WE DON'T LOCK, see comment */
- size = wusbhc->ports_max + 1 /* hub bit */;
- size = (size + 8 - 1) / 8; /* round to bytes */
- for (cnt = 0; cnt < wusbhc->ports_max; cnt++)
- if (wusb_port_by_idx(wusbhc, cnt)->change)
- set_bit(cnt + 1, buf);
- else
- clear_bit(cnt + 1, buf);
- return size;
+ /* round up to bytes. Hub bit is bit 0 so add 1. */
+ size = DIV_ROUND_UP(wusbhc->ports_max + 1, 8);
+
+ /* clear the output buffer. */
+ memset(_buf, 0, size);
+ /* set the bit for each changed port. */
+ for (cnt = 0; cnt < wusbhc->ports_max; cnt++) {
+
+ if (wusb_port_by_idx(wusbhc, cnt)->change) {
+ const int bitpos = cnt+1;
+
+ _buf[bitpos/8] |= (1 << (bitpos % 8));
+ bits_set++;
+ }
+ }
+
+ return bits_set ? size : 0;
}
EXPORT_SYMBOL_GPL(wusbhc_rh_status_data);
diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
index d6bea3e0b54..cf250c21e94 100644
--- a/drivers/usb/wusbcore/wa-hc.h
+++ b/drivers/usb/wusbcore/wa-hc.h
@@ -91,6 +91,7 @@
struct wusbhc;
struct wahc;
extern void wa_urb_enqueue_run(struct work_struct *ws);
+extern void wa_process_errored_transfers_run(struct work_struct *ws);
/**
* RPipe instance
@@ -190,8 +191,14 @@ struct wahc {
struct list_head xfer_list;
struct list_head xfer_delayed_list;
+ struct list_head xfer_errored_list;
+ /*
+ * lock for the above xfer lists. Can be taken while a xfer->lock is
+ * held but not in the reverse order.
+ */
spinlock_t xfer_list_lock;
- struct work_struct xfer_work;
+ struct work_struct xfer_enqueue_work;
+ struct work_struct xfer_error_work;
atomic_t xfer_id_count;
};
@@ -244,8 +251,10 @@ static inline void wa_init(struct wahc *wa)
edc_init(&wa->dti_edc);
INIT_LIST_HEAD(&wa->xfer_list);
INIT_LIST_HEAD(&wa->xfer_delayed_list);
+ INIT_LIST_HEAD(&wa->xfer_errored_list);
spin_lock_init(&wa->xfer_list_lock);
- INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
+ INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
atomic_set(&wa->xfer_id_count, 1);
}
@@ -269,6 +278,8 @@ static inline void rpipe_put(struct wa_rpipe *rpipe)
}
extern void rpipe_ep_disable(struct wahc *, struct usb_host_endpoint *);
+extern void rpipe_clear_feature_stalled(struct wahc *,
+ struct usb_host_endpoint *);
extern int wa_rpipes_create(struct wahc *);
extern void wa_rpipes_destroy(struct wahc *);
static inline void rpipe_avail_dec(struct wa_rpipe *rpipe)
diff --git a/drivers/usb/wusbcore/wa-rpipe.c b/drivers/usb/wusbcore/wa-rpipe.c
index 9a595c1ed86..fd4f1ce6256 100644
--- a/drivers/usb/wusbcore/wa-rpipe.c
+++ b/drivers/usb/wusbcore/wa-rpipe.c
@@ -527,3 +527,24 @@ void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep)
mutex_unlock(&wa->rpipe_mutex);
}
EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+
+/* Clear the stalled status of an RPIPE. */
+void rpipe_clear_feature_stalled(struct wahc *wa, struct usb_host_endpoint *ep)
+{
+ struct wa_rpipe *rpipe;
+
+ mutex_lock(&wa->rpipe_mutex);
+ rpipe = ep->hcpriv;
+ if (rpipe != NULL) {
+ u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex);
+
+ usb_control_msg(
+ wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0),
+ USB_REQ_CLEAR_FEATURE,
+ USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE,
+ RPIPE_STALL, index, NULL, 0, 1000);
+ }
+ mutex_unlock(&wa->rpipe_mutex);
+}
+EXPORT_SYMBOL_GPL(rpipe_clear_feature_stalled);
+
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index 16968c89949..6ad02f57c36 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -125,10 +125,13 @@ struct wa_seg {
u8 xfer_extra[]; /* xtra space for xfer_hdr_ctl */
};
-static void wa_seg_init(struct wa_seg *seg)
+static inline void wa_seg_init(struct wa_seg *seg)
{
- /* usb_init_urb() repeats a lot of work, so we do it here */
- kref_init(&seg->urb.kref);
+ usb_init_urb(&seg->urb);
+
+ /* set the remaining memory to 0. */
+ memset(((void *)seg) + sizeof(seg->urb), 0,
+ sizeof(*seg) - sizeof(seg->urb));
}
/*
@@ -166,8 +169,8 @@ static inline void wa_xfer_init(struct wa_xfer *xfer)
/*
* Destroy a transfer structure
*
- * Note that the xfer->seg[index] thingies follow the URB life cycle,
- * so we need to put them, not free them.
+ * Note that freeing xfer->seg[cnt]->urb will free the containing
+ * xfer->seg[cnt] memory that was allocated by __wa_xfer_setup_segs.
*/
static void wa_xfer_destroy(struct kref *_xfer)
{
@@ -175,9 +178,8 @@ static void wa_xfer_destroy(struct kref *_xfer)
if (xfer->seg) {
unsigned cnt;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- if (xfer->is_inbound)
- usb_put_urb(xfer->seg[cnt]->dto_urb);
- usb_put_urb(&xfer->seg[cnt]->urb);
+ usb_free_urb(xfer->seg[cnt]->dto_urb);
+ usb_free_urb(&xfer->seg[cnt]->urb);
}
}
kfree(xfer);
@@ -732,9 +734,9 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
buf_itr = 0;
buf_size = xfer->urb->transfer_buffer_length;
for (cnt = 0; cnt < xfer->segs; cnt++) {
- seg = xfer->seg[cnt] = kzalloc(alloc_size, GFP_ATOMIC);
+ seg = xfer->seg[cnt] = kmalloc(alloc_size, GFP_ATOMIC);
if (seg == NULL)
- goto error_seg_kzalloc;
+ goto error_seg_kmalloc;
wa_seg_init(seg);
seg->xfer = xfer;
seg->index = cnt;
@@ -804,15 +806,17 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
return 0;
error_sg_alloc:
- kfree(seg->dto_urb);
+ usb_free_urb(xfer->seg[cnt]->dto_urb);
error_dto_alloc:
kfree(xfer->seg[cnt]);
cnt--;
-error_seg_kzalloc:
+error_seg_kmalloc:
/* use the fact that cnt is left at were it failed */
for (; cnt >= 0; cnt--) {
- if (xfer->seg[cnt] && xfer->is_inbound == 0)
+ if (xfer->seg[cnt] && xfer->is_inbound == 0) {
usb_free_urb(xfer->seg[cnt]->dto_urb);
+ kfree(xfer->seg[cnt]->dto_urb->sg);
+ }
kfree(xfer->seg[cnt]);
}
error_segs_kzalloc:
@@ -928,7 +932,7 @@ static void wa_xfer_delayed_run(struct wa_rpipe *rpipe)
spin_lock_irqsave(&rpipe->seg_lock, flags);
while (atomic_read(&rpipe->segs_available) > 0
&& !list_empty(&rpipe->seg_list)) {
- seg = list_entry(rpipe->seg_list.next, struct wa_seg,
+ seg = list_first_entry(&(rpipe->seg_list), struct wa_seg,
list_node);
list_del(&seg->list_node);
xfer = seg->xfer;
@@ -1093,34 +1097,82 @@ error_xfer_submit:
*
* We need to be careful here, as dequeue() could be called in the
* middle. That's why we do the whole thing under the
- * wa->xfer_list_lock. If dequeue() jumps in, it first locks urb->lock
+ * wa->xfer_list_lock. If dequeue() jumps in, it first locks xfer->lock
* and then checks the list -- so as we would be acquiring in inverse
- * order, we just drop the lock once we have the xfer and reacquire it
- * later.
+ * order, we move the delayed list to a separate list while locked and then
+ * submit them without the list lock held.
*/
void wa_urb_enqueue_run(struct work_struct *ws)
{
- struct wahc *wa = container_of(ws, struct wahc, xfer_work);
+ struct wahc *wa = container_of(ws, struct wahc, xfer_enqueue_work);
struct wa_xfer *xfer, *next;
struct urb *urb;
+ LIST_HEAD(tmp_list);
+ /* Create a copy of the wa->xfer_delayed_list while holding the lock */
spin_lock_irq(&wa->xfer_list_lock);
- list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list,
- list_node) {
+ list_cut_position(&tmp_list, &wa->xfer_delayed_list,
+ wa->xfer_delayed_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * enqueue from temp list without list lock held since wa_urb_enqueue_b
+ * can take xfer->lock as well as lock mutexes.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
list_del_init(&xfer->list_node);
- spin_unlock_irq(&wa->xfer_list_lock);
urb = xfer->urb;
wa_urb_enqueue_b(xfer);
usb_put_urb(urb); /* taken when queuing */
-
- spin_lock_irq(&wa->xfer_list_lock);
}
- spin_unlock_irq(&wa->xfer_list_lock);
}
EXPORT_SYMBOL_GPL(wa_urb_enqueue_run);
/*
+ * Process the errored transfers on the Wire Adapter outside of interrupt.
+ */
+void wa_process_errored_transfers_run(struct work_struct *ws)
+{
+ struct wahc *wa = container_of(ws, struct wahc, xfer_error_work);
+ struct wa_xfer *xfer, *next;
+ LIST_HEAD(tmp_list);
+
+ pr_info("%s: Run delayed STALL processing.\n", __func__);
+
+ /* Create a copy of the wa->xfer_errored_list while holding the lock */
+ spin_lock_irq(&wa->xfer_list_lock);
+ list_cut_position(&tmp_list, &wa->xfer_errored_list,
+ wa->xfer_errored_list.prev);
+ spin_unlock_irq(&wa->xfer_list_lock);
+
+ /*
+ * run rpipe_clear_feature_stalled from temp list without list lock
+ * held.
+ */
+ list_for_each_entry_safe(xfer, next, &tmp_list, list_node) {
+ struct usb_host_endpoint *ep;
+ unsigned long flags;
+ struct wa_rpipe *rpipe;
+
+ spin_lock_irqsave(&xfer->lock, flags);
+ ep = xfer->ep;
+ rpipe = ep->hcpriv;
+ spin_unlock_irqrestore(&xfer->lock, flags);
+
+ /* clear RPIPE feature stalled without holding a lock. */
+ rpipe_clear_feature_stalled(wa, ep);
+
+ /* complete the xfer. This removes it from the tmp list. */
+ wa_xfer_completion(xfer);
+
+ /* check for work. */
+ wa_xfer_delayed_run(rpipe);
+ }
+}
+EXPORT_SYMBOL_GPL(wa_process_errored_transfers_run);
+
+/*
* Submit a transfer to the Wire Adapter in a delayed way
*
* The process of enqueuing involves possible sleeps() [see
@@ -1175,7 +1227,7 @@ int wa_urb_enqueue(struct wahc *wa, struct usb_host_endpoint *ep,
spin_lock_irqsave(&wa->xfer_list_lock, my_flags);
list_add_tail(&xfer->list_node, &wa->xfer_delayed_list);
spin_unlock_irqrestore(&wa->xfer_list_lock, my_flags);
- queue_work(wusbd, &wa->xfer_work);
+ queue_work(wusbd, &wa->xfer_enqueue_work);
} else {
wa_urb_enqueue_b(xfer);
}
@@ -1217,7 +1269,8 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
xfer = urb->hcpriv;
if (xfer == NULL) {
- /* NOthing setup yet enqueue will see urb->status !=
+ /*
+ * Nothing setup yet enqueue will see urb->status !=
* -EINPROGRESS (by hcd layer) and bail out with
* error, no need to do completion
*/
@@ -1226,6 +1279,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb)
}
spin_lock_irqsave(&xfer->lock, flags);
rpipe = xfer->ep->hcpriv;
+ if (rpipe == NULL) {
+ pr_debug("%s: xfer id 0x%08X has no RPIPE. %s",
+ __func__, wa_xfer_id(xfer),
+ "Probably already aborted.\n" );
+ goto out_unlock;
+ }
/* Check the delayed list -> if there, release and complete */
spin_lock_irqsave(&wa->xfer_list_lock, flags2);
if (!list_empty(&xfer->list_node) && xfer->seg == NULL)
@@ -1355,7 +1414,7 @@ static int wa_xfer_status_to_errno(u8 status)
*
* inbound transfers: need to schedule a DTI read
*
- * FIXME: this functio needs to be broken up in parts
+ * FIXME: this function needs to be broken up in parts
*/
static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer)
{
@@ -1477,17 +1536,37 @@ error_submit_buf_in:
seg->result = result;
kfree(wa->buf_in_urb->sg);
error_sg_alloc:
+ __wa_xfer_abort(xfer);
error_complete:
seg->status = WA_SEG_ERROR;
xfer->segs_done++;
rpipe_ready = rpipe_avail_inc(rpipe);
- __wa_xfer_abort(xfer);
done = __wa_xfer_is_done(xfer);
- spin_unlock_irqrestore(&xfer->lock, flags);
- if (done)
- wa_xfer_completion(xfer);
- if (rpipe_ready)
- wa_xfer_delayed_run(rpipe);
+ /*
+ * queue work item to clear STALL for control endpoints.
+ * Otherwise, let endpoint_reset take care of it.
+ */
+ if (((usb_status & 0x3f) == WA_XFER_STATUS_HALTED) &&
+ usb_endpoint_xfer_control(&xfer->ep->desc) &&
+ done) {
+
+ dev_info(dev, "Control EP stall. Queue delayed work.\n");
+ spin_lock_irq(&wa->xfer_list_lock);
+ /* remove xfer from xfer_list. */
+ list_del(&xfer->list_node);
+ /* add xfer to xfer_errored_list. */
+ list_add_tail(&xfer->list_node, &wa->xfer_errored_list);
+ spin_unlock_irq(&wa->xfer_list_lock);
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ queue_work(wusbd, &wa->xfer_error_work);
+ } else {
+ spin_unlock_irqrestore(&xfer->lock, flags);
+ if (done)
+ wa_xfer_completion(xfer);
+ if (rpipe_ready)
+ wa_xfer_delayed_run(rpipe);
+ }
+
return;
error_bad_seg:
@@ -1644,8 +1723,7 @@ static void wa_xfer_result_cb(struct urb *urb)
break;
}
usb_status = xfer_result->bTransferStatus & 0x3f;
- if (usb_status == WA_XFER_STATUS_ABORTED
- || usb_status == WA_XFER_STATUS_NOT_FOUND)
+ if (usb_status == WA_XFER_STATUS_NOT_FOUND)
/* taken care of already */
break;
xfer_id = xfer_result->dwTransferID;
diff --git a/drivers/uwb/drp-ie.c b/drivers/uwb/drp-ie.c
index 520673109a7..b7d4f6b75ee 100644
--- a/drivers/uwb/drp-ie.c
+++ b/drivers/uwb/drp-ie.c
@@ -27,7 +27,7 @@
/*
* Return the reason code for a reservations's DRP IE.
*/
-int uwb_rsv_reason_code(struct uwb_rsv *rsv)
+static int uwb_rsv_reason_code(struct uwb_rsv *rsv)
{
static const int reason_codes[] = {
[UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED,
@@ -55,7 +55,7 @@ int uwb_rsv_reason_code(struct uwb_rsv *rsv)
/*
* Return the reason code for a reservations's companion DRP IE .
*/
-int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
+static int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv)
{
static const int companion_reason_codes[] = {
[UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED,
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0621abef9b4..0257f35cfb9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -611,7 +611,16 @@ static
int hwarc_reset(struct uwb_rc *uwb_rc)
{
struct hwarc *hwarc = uwb_rc->priv;
- return usb_reset_device(hwarc->usb_dev);
+ int result;
+
+ /* device lock must be held when calling usb_reset_device. */
+ result = usb_lock_device_for_reset(hwarc->usb_dev, NULL);
+ if (result >= 0) {
+ result = usb_reset_device(hwarc->usb_dev);
+ usb_unlock_device(hwarc->usb_dev);
+ }
+
+ return result;
}
/**
@@ -709,8 +718,10 @@ static int hwarc_neep_init(struct uwb_rc *rc)
error_neep_submit:
usb_free_urb(hwarc->neep_urb);
+ hwarc->neep_urb = NULL;
error_urb_alloc:
free_page((unsigned long)hwarc->rd_buffer);
+ hwarc->rd_buffer = NULL;
error_rd_buffer:
return -ENOMEM;
}
@@ -723,7 +734,10 @@ static void hwarc_neep_release(struct uwb_rc *rc)
usb_kill_urb(hwarc->neep_urb);
usb_free_urb(hwarc->neep_urb);
+ hwarc->neep_urb = NULL;
+
free_page((unsigned long)hwarc->rd_buffer);
+ hwarc->rd_buffer = NULL;
}
/**
diff --git a/drivers/uwb/pal.c b/drivers/uwb/pal.c
index 690577d2a35..c1304b8d498 100644
--- a/drivers/uwb/pal.c
+++ b/drivers/uwb/pal.c
@@ -68,8 +68,40 @@ int uwb_pal_register(struct uwb_pal *pal)
}
EXPORT_SYMBOL_GPL(uwb_pal_register);
+static int find_rc(struct device *dev, const void *data)
+{
+ const struct uwb_rc *target_rc = data;
+ struct uwb_rc *rc = dev_get_drvdata(dev);
+
+ if (rc == NULL) {
+ WARN_ON(1);
+ return 0;
+ }
+ if (rc == target_rc) {
+ if (rc->ready == 0)
+ return 0;
+ else
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * Given a radio controller descriptor see if it is registered.
+ *
+ * @returns false if the rc does not exist or is quiescing; true otherwise.
+ */
+static bool uwb_rc_class_device_exists(struct uwb_rc *target_rc)
+{
+ struct device *dev;
+
+ dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc);
+
+ return (dev != NULL);
+}
+
/**
- * uwb_pal_register - unregister a UWB PAL
+ * uwb_pal_unregister - unregister a UWB PAL
* @pal: the PAL
*/
void uwb_pal_unregister(struct uwb_pal *pal)
@@ -85,7 +117,11 @@ void uwb_pal_unregister(struct uwb_pal *pal)
debugfs_remove(pal->debugfs_dir);
if (pal->device) {
- sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
+ /* remove link to the PAL in the UWB device's directory. */
+ if (uwb_rc_class_device_exists(rc))
+ sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name);
+
+ /* remove link to uwb_rc in the PAL device's directory. */
sysfs_remove_link(&pal->device->kobj, "uwb_rc");
}
}
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index c5179e269df..cef6002acbd 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -137,8 +137,27 @@ static void vfio_pci_disable(struct vfio_pci_device *vdev)
*/
pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
- if (vdev->reset_works)
- __pci_reset_function(pdev);
+ /*
+ * Careful, device_lock may already be held. This is the case if
+ * a driver unbind is blocked. Try to get the locks ourselves to
+ * prevent a deadlock.
+ */
+ if (vdev->reset_works) {
+ bool reset_done = false;
+
+ if (pci_cfg_access_trylock(pdev)) {
+ if (device_trylock(&pdev->dev)) {
+ __pci_reset_function_locked(pdev);
+ reset_done = true;
+ device_unlock(&pdev->dev);
+ }
+ pci_cfg_access_unlock(pdev);
+ }
+
+ if (!reset_done)
+ pr_warn("%s: Unable to acquire locks for reset of %s\n",
+ __func__, dev_name(&pdev->dev));
+ }
pci_restore_state(pdev);
}
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index c488da5db7c..842f4507883 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -494,27 +494,6 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
return 0;
}
-static int vfio_group_nb_del_dev(struct vfio_group *group, struct device *dev)
-{
- struct vfio_device *device;
-
- /*
- * Expect to fall out here. If a device was in use, it would
- * have been bound to a vfio sub-driver, which would have blocked
- * in .remove at vfio_del_group_dev. Sanity check that we no
- * longer track the device, so it's safe to remove.
- */
- device = vfio_group_get_device(group, dev);
- if (likely(!device))
- return 0;
-
- WARN("Device %s removed from live group %d!\n", dev_name(dev),
- iommu_group_id(group->iommu_group));
-
- vfio_device_put(device);
- return 0;
-}
-
static int vfio_group_nb_verify(struct vfio_group *group, struct device *dev)
{
/* We don't care what happens when the group isn't in use */
@@ -531,13 +510,11 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
struct device *dev = data;
/*
- * Need to go through a group_lock lookup to get a reference or
- * we risk racing a group being removed. Leave a WARN_ON for
- * debuging, but if the group no longer exists, a spurious notify
- * is harmless.
+ * Need to go through a group_lock lookup to get a reference or we
+ * risk racing a group being removed. Ignore spurious notifies.
*/
group = vfio_group_try_get(group);
- if (WARN_ON(!group))
+ if (!group)
return NOTIFY_OK;
switch (action) {
@@ -545,7 +522,13 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
vfio_group_nb_add_dev(group, dev);
break;
case IOMMU_GROUP_NOTIFY_DEL_DEVICE:
- vfio_group_nb_del_dev(group, dev);
+ /*
+ * Nothing to do here. If the device is in use, then the
+ * vfio sub-driver should block the remove callback until
+ * it is unused. If the device is unused or attached to a
+ * stub driver, then it should be released and we don't
+ * care that it will be going away.
+ */
break;
case IOMMU_GROUP_NOTIFY_BIND_DRIVER:
pr_debug("%s: Device %s, group %d binding to driver\n",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 969a85960e9..831eb4fd197 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -276,12 +276,12 @@ static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
* of used idx. Once lower device DMA done contiguously, we will signal KVM
* guest used idx.
*/
-static int vhost_zerocopy_signal_used(struct vhost_net *net,
- struct vhost_virtqueue *vq)
+static void vhost_zerocopy_signal_used(struct vhost_net *net,
+ struct vhost_virtqueue *vq)
{
struct vhost_net_virtqueue *nvq =
container_of(vq, struct vhost_net_virtqueue, vq);
- int i;
+ int i, add;
int j = 0;
for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
@@ -289,15 +289,17 @@ static int vhost_zerocopy_signal_used(struct vhost_net *net,
vhost_net_tx_err(net);
if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
- vhost_add_used_and_signal(vq->dev, vq,
- vq->heads[i].id, 0);
++j;
} else
break;
}
- if (j)
- nvq->done_idx = i;
- return j;
+ while (j) {
+ add = min(UIO_MAXIOV - nvq->done_idx, j);
+ vhost_add_used_and_signal_n(vq->dev, vq,
+ &vq->heads[nvq->done_idx], add);
+ nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
+ j -= add;
+ }
}
static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
@@ -306,6 +308,11 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
struct vhost_virtqueue *vq = ubufs->vq;
int cnt = atomic_read(&ubufs->kref.refcount);
+ /* set len to mark this desc buffers done DMA */
+ vq->heads[ubuf->desc].len = success ?
+ VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
+ vhost_net_ubuf_put(ubufs);
+
/*
* Trigger polling thread if guest stopped submitting new buffers:
* in this case, the refcount after decrement will eventually reach 1
@@ -316,10 +323,6 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
*/
if (cnt <= 2 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
- /* set len to mark this desc buffers done DMA */
- vq->heads[ubuf->desc].len = success ?
- VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
}
/* Expects to be always run from workqueue - which acts as
@@ -360,6 +363,13 @@ static void handle_tx(struct vhost_net *net)
if (zcopy)
vhost_zerocopy_signal_used(net, vq);
+ /* If more outstanding DMAs, queue the work.
+ * Handle upend_idx wrap around
+ */
+ if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
+ % UIO_MAXIOV == nvq->done_idx))
+ break;
+
head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
ARRAY_SIZE(vq->iov),
&out, &in,
@@ -369,17 +379,6 @@ static void handle_tx(struct vhost_net *net)
break;
/* Nothing new? Wait for eventfd to tell us they refilled. */
if (head == vq->num) {
- int num_pends;
-
- /* If more outstanding DMAs, queue the work.
- * Handle upend_idx wrap around
- */
- num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
- (nvq->upend_idx - nvq->done_idx) :
- (nvq->upend_idx + UIO_MAXIOV -
- nvq->done_idx);
- if (unlikely(num_pends > VHOST_MAX_PEND))
- break;
if (unlikely(vhost_enable_notify(&net->dev, vq))) {
vhost_disable_notify(&net->dev, vq);
continue;
@@ -402,43 +401,36 @@ static void handle_tx(struct vhost_net *net)
iov_length(nvq->hdr, s), hdr_size);
break;
}
- zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
- nvq->upend_idx != nvq->done_idx);
+
+ zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
+ && (nvq->upend_idx + 1) % UIO_MAXIOV !=
+ nvq->done_idx
+ && vhost_net_tx_select_zcopy(net);
/* use msg_control to pass vhost zerocopy ubuf info to skb */
if (zcopy_used) {
+ struct ubuf_info *ubuf;
+ ubuf = nvq->ubuf_info + nvq->upend_idx;
+
vq->heads[nvq->upend_idx].id = head;
- if (!vhost_net_tx_select_zcopy(net) ||
- len < VHOST_GOODCOPY_LEN) {
- /* copy don't need to wait for DMA done */
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_DONE_LEN;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- ubufs = NULL;
- } else {
- struct ubuf_info *ubuf;
- ubuf = nvq->ubuf_info + nvq->upend_idx;
-
- vq->heads[nvq->upend_idx].len =
- VHOST_DMA_IN_PROGRESS;
- ubuf->callback = vhost_zerocopy_callback;
- ubuf->ctx = nvq->ubufs;
- ubuf->desc = nvq->upend_idx;
- msg.msg_control = ubuf;
- msg.msg_controllen = sizeof(ubuf);
- ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
- }
+ vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
+ ubuf->callback = vhost_zerocopy_callback;
+ ubuf->ctx = nvq->ubufs;
+ ubuf->desc = nvq->upend_idx;
+ msg.msg_control = ubuf;
+ msg.msg_controllen = sizeof(ubuf);
+ ubufs = nvq->ubufs;
+ kref_get(&ubufs->kref);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
- } else
+ } else {
msg.msg_control = NULL;
+ ubufs = NULL;
+ }
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(NULL, sock, &msg, len);
if (unlikely(err < 0)) {
if (zcopy_used) {
- if (ubufs)
- vhost_net_ubuf_put(ubufs);
+ vhost_net_ubuf_put(ubufs);
nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
% UIO_MAXIOV;
}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e58cf0001ce..9a9502a4aa5 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -13,7 +13,7 @@
#include <linux/eventfd.h>
#include <linux/vhost.h>
-#include <linux/socket.h> /* memcpy_fromiovec */
+#include <linux/uio.h>
#include <linux/mm.h>
#include <linux/mmu_context.h>
#include <linux/miscdevice.h>
@@ -1332,48 +1332,9 @@ EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
* want to notify the guest, using eventfd. */
int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
{
- struct vring_used_elem __user *used;
+ struct vring_used_elem heads = { head, len };
- /* The virtqueue contains a ring of used buffers. Get a pointer to the
- * next entry in that used ring. */
- used = &vq->used->ring[vq->last_used_idx % vq->num];
- if (__put_user(head, &used->id)) {
- vq_err(vq, "Failed to write used id");
- return -EFAULT;
- }
- if (__put_user(len, &used->len)) {
- vq_err(vq, "Failed to write used len");
- return -EFAULT;
- }
- /* Make sure buffer is written before we update index. */
- smp_wmb();
- if (__put_user(vq->last_used_idx + 1, &vq->used->idx)) {
- vq_err(vq, "Failed to increment used idx");
- return -EFAULT;
- }
- if (unlikely(vq->log_used)) {
- /* Make sure data is seen before log. */
- smp_wmb();
- /* Log used ring entry write. */
- log_write(vq->log_base,
- vq->log_addr +
- ((void __user *)used - (void __user *)vq->used),
- sizeof *used);
- /* Log used index update. */
- log_write(vq->log_base,
- vq->log_addr + offsetof(struct vring_used, idx),
- sizeof vq->used->idx);
- if (vq->log_ctx)
- eventfd_signal(vq->log_ctx, 1);
- }
- vq->last_used_idx++;
- /* If the driver never bothers to signal in a very long while,
- * used index might wrap around. If that happens, invalidate
- * signalled_used index we stored. TODO: make sure driver
- * signals at least once in 2^16 and remove this. */
- if (unlikely(vq->last_used_idx == vq->signalled_used))
- vq->signalled_used_valid = false;
- return 0;
+ return vhost_add_used_n(vq, &heads, 1);
}
EXPORT_SYMBOL_GPL(vhost_add_used);
@@ -1387,7 +1348,16 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
start = vq->last_used_idx % vq->num;
used = vq->used->ring + start;
- if (__copy_to_user(used, heads, count * sizeof *used)) {
+ if (count == 1) {
+ if (__put_user(heads[0].id, &used->id)) {
+ vq_err(vq, "Failed to write used id");
+ return -EFAULT;
+ }
+ if (__put_user(heads[0].len, &used->len)) {
+ vq_err(vq, "Failed to write used len");
+ return -EFAULT;
+ }
+ } else if (__copy_to_user(used, heads, count * sizeof *used)) {
vq_err(vq, "Failed to write used");
return -EFAULT;
}
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 4cf1e1dd562..84b685f7ab6 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2100,13 +2100,6 @@ config GPM1040A0_320X240
bool "Giantplus Technology GPM1040A0 320x240 Color TFT LCD"
depends on FB_NUC900
-config FB_NUC900_DEBUG
- bool "NUC900 lcd debug messages"
- depends on FB_NUC900
- help
- Turn on debugging messages. Note that you can set/unset at run time
- through sysfs
-
config FB_SM501
tristate "Silicon Motion SM501 framebuffer support"
depends on FB && MFD_SM501
@@ -2228,15 +2221,17 @@ config FB_SH7760
panels <= 320 pixel horizontal resolution.
config FB_DA8XX
- tristate "DA8xx/OMAP-L1xx Framebuffer support"
- depends on FB && ARCH_DAVINCI_DA8XX
+ tristate "DA8xx/OMAP-L1xx/AM335x Framebuffer support"
+ depends on FB && (ARCH_DAVINCI_DA8XX || SOC_AM33XX)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select FB_CFB_REV_PIXELS_IN_BYTE
+ select FB_MODE_HELPERS
+ select VIDEOMODE_HELPERS
---help---
This is the frame buffer device driver for the TI LCD controller
- found on DA8xx/OMAP-L1xx SoCs.
+ found on DA8xx/OMAP-L1xx/AM335x SoCs.
If unsure, say N.
config FB_VIRTUAL
@@ -2457,7 +2452,7 @@ config FB_HYPERV
config FB_SIMPLE
bool "Simple framebuffer support"
- depends on (FB = y) && OF
+ depends on (FB = y)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -2469,8 +2464,7 @@ config FB_SIMPLE
pre-allocated frame buffer surface.
Configuration re: surface address, size, and format must be provided
- through device tree, or potentially plain old platform data in the
- future.
+ through device tree, or plain old platform data.
source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index effdb373b8d..088511a58a2 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -902,14 +902,14 @@ static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
static void atmel_lcdfb_start_clock(struct atmel_lcdfb_info *sinfo)
{
- clk_enable(sinfo->bus_clk);
- clk_enable(sinfo->lcdc_clk);
+ clk_prepare_enable(sinfo->bus_clk);
+ clk_prepare_enable(sinfo->lcdc_clk);
}
static void atmel_lcdfb_stop_clock(struct atmel_lcdfb_info *sinfo)
{
- clk_disable(sinfo->bus_clk);
- clk_disable(sinfo->lcdc_clk);
+ clk_disable_unprepare(sinfo->bus_clk);
+ clk_disable_unprepare(sinfo->lcdc_clk);
}
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index a89c15de9f4..9b0f12c5c28 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -435,8 +435,8 @@ static int correct_chipset(struct atyfb_par *par)
const char *name;
int i;
- for (i = ARRAY_SIZE(aty_chips); i > 0; i--)
- if (par->pci_id == aty_chips[i - 1].pci_id)
+ for (i = (int)ARRAY_SIZE(aty_chips) - 1; i >= 0; i--)
+ if (par->pci_id == aty_chips[i].pci_id)
break;
if (i < 0)
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 3fccb6d3c8c..94a403a9717 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -103,16 +103,16 @@ static void backlight_generate_event(struct backlight_device *bd,
sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness");
}
-static ssize_t backlight_show_power(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t bl_power_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%d\n", bd->props.power);
}
-static ssize_t backlight_store_power(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t bl_power_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
int rc;
struct backlight_device *bd = to_backlight_device(dev);
@@ -136,8 +136,9 @@ static ssize_t backlight_store_power(struct device *dev,
return rc;
}
+static DEVICE_ATTR_RW(bl_power);
-static ssize_t backlight_show_brightness(struct device *dev,
+static ssize_t brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
@@ -145,7 +146,7 @@ static ssize_t backlight_show_brightness(struct device *dev,
return sprintf(buf, "%d\n", bd->props.brightness);
}
-static ssize_t backlight_store_brightness(struct device *dev,
+static ssize_t brightness_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc;
@@ -175,24 +176,27 @@ static ssize_t backlight_store_brightness(struct device *dev,
return rc;
}
+static DEVICE_ATTR_RW(brightness);
-static ssize_t backlight_show_type(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%s\n", backlight_types[bd->props.type]);
}
+static DEVICE_ATTR_RO(type);
-static ssize_t backlight_show_max_brightness(struct device *dev,
+static ssize_t max_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
return sprintf(buf, "%d\n", bd->props.max_brightness);
}
+static DEVICE_ATTR_RO(max_brightness);
-static ssize_t backlight_show_actual_brightness(struct device *dev,
+static ssize_t actual_brightness_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc = -ENXIO;
@@ -205,6 +209,7 @@ static ssize_t backlight_show_actual_brightness(struct device *dev,
return rc;
}
+static DEVICE_ATTR_RO(actual_brightness);
static struct class *backlight_class;
@@ -247,16 +252,15 @@ static void bl_device_release(struct device *dev)
kfree(bd);
}
-static struct device_attribute bl_device_attributes[] = {
- __ATTR(bl_power, 0644, backlight_show_power, backlight_store_power),
- __ATTR(brightness, 0644, backlight_show_brightness,
- backlight_store_brightness),
- __ATTR(actual_brightness, 0444, backlight_show_actual_brightness,
- NULL),
- __ATTR(max_brightness, 0444, backlight_show_max_brightness, NULL),
- __ATTR(type, 0444, backlight_show_type, NULL),
- __ATTR_NULL,
+static struct attribute *bl_device_attrs[] = {
+ &dev_attr_bl_power.attr,
+ &dev_attr_brightness.attr,
+ &dev_attr_actual_brightness.attr,
+ &dev_attr_max_brightness.attr,
+ &dev_attr_type.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bl_device);
/**
* backlight_force_update - tell the backlight subsystem that hardware state
@@ -493,7 +497,7 @@ static int __init backlight_class_init(void)
return PTR_ERR(backlight_class);
}
- backlight_class->dev_attrs = bl_device_attributes;
+ backlight_class->dev_groups = bl_device_groups;
backlight_class->pm = &backlight_class_dev_pm_ops;
return 0;
}
diff --git a/drivers/video/backlight/hx8357.c b/drivers/video/backlight/hx8357.c
index a0482b567bf..c7af8c45ab8 100644
--- a/drivers/video/backlight/hx8357.c
+++ b/drivers/video/backlight/hx8357.c
@@ -71,11 +71,24 @@
#define HX8357_SET_POWER_NORMAL 0xd2
#define HX8357_SET_PANEL_RELATED 0xe9
+#define HX8369_SET_DISPLAY_BRIGHTNESS 0x51
+#define HX8369_WRITE_CABC_DISPLAY_VALUE 0x53
+#define HX8369_WRITE_CABC_BRIGHT_CTRL 0x55
+#define HX8369_WRITE_CABC_MIN_BRIGHTNESS 0x5e
+#define HX8369_SET_POWER 0xb1
+#define HX8369_SET_DISPLAY_MODE 0xb2
+#define HX8369_SET_DISPLAY_WAVEFORM_CYC 0xb4
+#define HX8369_SET_VCOM 0xb6
+#define HX8369_SET_EXTENSION_COMMAND 0xb9
+#define HX8369_SET_GIP 0xd5
+#define HX8369_SET_GAMMA_CURVE_RELATED 0xe0
+
struct hx8357_data {
unsigned im_pins[HX8357_NUM_IM_PINS];
unsigned reset;
struct spi_device *spi;
int state;
+ bool use_im_pins;
};
static u8 hx8357_seq_power[] = {
@@ -143,6 +156,61 @@ static u8 hx8357_seq_display_mode[] = {
HX8357_SET_DISPLAY_MODE_RGB_INTERFACE,
};
+static u8 hx8369_seq_write_CABC_min_brightness[] = {
+ HX8369_WRITE_CABC_MIN_BRIGHTNESS, 0x00,
+};
+
+static u8 hx8369_seq_write_CABC_control[] = {
+ HX8369_WRITE_CABC_DISPLAY_VALUE, 0x24,
+};
+
+static u8 hx8369_seq_set_display_brightness[] = {
+ HX8369_SET_DISPLAY_BRIGHTNESS, 0xFF,
+};
+
+static u8 hx8369_seq_write_CABC_control_setting[] = {
+ HX8369_WRITE_CABC_BRIGHT_CTRL, 0x02,
+};
+
+static u8 hx8369_seq_extension_command[] = {
+ HX8369_SET_EXTENSION_COMMAND, 0xff, 0x83, 0x69,
+};
+
+static u8 hx8369_seq_display_related[] = {
+ HX8369_SET_DISPLAY_MODE, 0x00, 0x2b, 0x03, 0x03, 0x70, 0x00,
+ 0xff, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, 0x00, 0x01,
+};
+
+static u8 hx8369_seq_panel_waveform_cycle[] = {
+ HX8369_SET_DISPLAY_WAVEFORM_CYC, 0x0a, 0x1d, 0x80, 0x06, 0x02,
+};
+
+static u8 hx8369_seq_set_address_mode[] = {
+ HX8357_SET_ADDRESS_MODE, 0x00,
+};
+
+static u8 hx8369_seq_vcom[] = {
+ HX8369_SET_VCOM, 0x3e, 0x3e,
+};
+
+static u8 hx8369_seq_gip[] = {
+ HX8369_SET_GIP, 0x00, 0x01, 0x03, 0x25, 0x01, 0x02, 0x28, 0x70,
+ 0x11, 0x13, 0x00, 0x00, 0x40, 0x26, 0x51, 0x37, 0x00, 0x00, 0x71,
+ 0x35, 0x60, 0x24, 0x07, 0x0f, 0x04, 0x04,
+};
+
+static u8 hx8369_seq_power[] = {
+ HX8369_SET_POWER, 0x01, 0x00, 0x34, 0x03, 0x00, 0x11, 0x11, 0x32,
+ 0x2f, 0x3f, 0x3f, 0x01, 0x3a, 0x01, 0xe6, 0xe6, 0xe6, 0xe6, 0xe6,
+};
+
+static u8 hx8369_seq_gamma_curve_related[] = {
+ HX8369_SET_GAMMA_CURVE_RELATED, 0x00, 0x0d, 0x19, 0x2f, 0x3b, 0x3d,
+ 0x2e, 0x4a, 0x08, 0x0e, 0x0f, 0x14, 0x16, 0x14, 0x14, 0x14, 0x1e,
+ 0x00, 0x0d, 0x19, 0x2f, 0x3b, 0x3d, 0x2e, 0x4a, 0x08, 0x0e, 0x0f,
+ 0x14, 0x16, 0x14, 0x14, 0x14, 0x1e,
+};
+
static int hx8357_spi_write_then_read(struct lcd_device *lcdev,
u8 *txbuf, u16 txlen,
u8 *rxbuf, u16 rxlen)
@@ -219,6 +287,10 @@ static int hx8357_enter_standby(struct lcd_device *lcdev)
if (ret < 0)
return ret;
+ /*
+ * The controller needs 120ms when entering in sleep mode before we can
+ * send the command to go off sleep mode
+ */
msleep(120);
return 0;
@@ -232,6 +304,10 @@ static int hx8357_exit_standby(struct lcd_device *lcdev)
if (ret < 0)
return ret;
+ /*
+ * The controller needs 120ms when exiting from sleep mode before we
+ * can send the command to enter in sleep mode
+ */
msleep(120);
ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
@@ -241,18 +317,9 @@ static int hx8357_exit_standby(struct lcd_device *lcdev)
return 0;
}
-static int hx8357_lcd_init(struct lcd_device *lcdev)
+static void hx8357_lcd_reset(struct lcd_device *lcdev)
{
struct hx8357_data *lcd = lcd_get_data(lcdev);
- int ret;
-
- /*
- * Set the interface selection pins to SPI mode, with three
- * wires
- */
- gpio_set_value_cansleep(lcd->im_pins[0], 1);
- gpio_set_value_cansleep(lcd->im_pins[1], 0);
- gpio_set_value_cansleep(lcd->im_pins[2], 1);
/* Reset the screen */
gpio_set_value(lcd->reset, 1);
@@ -260,7 +327,25 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
gpio_set_value(lcd->reset, 0);
usleep_range(10000, 12000);
gpio_set_value(lcd->reset, 1);
+
+ /* The controller needs 120ms to recover from reset */
msleep(120);
+}
+
+static int hx8357_lcd_init(struct lcd_device *lcdev)
+{
+ struct hx8357_data *lcd = lcd_get_data(lcdev);
+ int ret;
+
+ /*
+ * Set the interface selection pins to SPI mode, with three
+ * wires
+ */
+ if (lcd->use_im_pins) {
+ gpio_set_value_cansleep(lcd->im_pins[0], 1);
+ gpio_set_value_cansleep(lcd->im_pins[1], 0);
+ gpio_set_value_cansleep(lcd->im_pins[2], 1);
+ }
ret = hx8357_spi_write_array(lcdev, hx8357_seq_power,
ARRAY_SIZE(hx8357_seq_power));
@@ -341,6 +426,9 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
if (ret < 0)
return ret;
+ /*
+ * The controller needs 120ms to fully recover from exiting sleep mode
+ */
msleep(120);
ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
@@ -356,6 +444,96 @@ static int hx8357_lcd_init(struct lcd_device *lcdev)
return 0;
}
+static int hx8369_lcd_init(struct lcd_device *lcdev)
+{
+ int ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_extension_command,
+ ARRAY_SIZE(hx8369_seq_extension_command));
+ if (ret < 0)
+ return ret;
+ usleep_range(10000, 12000);
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_display_related,
+ ARRAY_SIZE(hx8369_seq_display_related));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_panel_waveform_cycle,
+ ARRAY_SIZE(hx8369_seq_panel_waveform_cycle));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_set_address_mode,
+ ARRAY_SIZE(hx8369_seq_set_address_mode));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_vcom,
+ ARRAY_SIZE(hx8369_seq_vcom));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_gip,
+ ARRAY_SIZE(hx8369_seq_gip));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_power,
+ ARRAY_SIZE(hx8369_seq_power));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The controller needs 120ms to fully recover from exiting sleep mode
+ */
+ msleep(120);
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_gamma_curve_related,
+ ARRAY_SIZE(hx8369_seq_gamma_curve_related));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_EXIT_SLEEP_MODE);
+ if (ret < 0)
+ return ret;
+ usleep_range(1000, 1200);
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_write_CABC_control,
+ ARRAY_SIZE(hx8369_seq_write_CABC_control));
+ if (ret < 0)
+ return ret;
+ usleep_range(10000, 12000);
+
+ ret = hx8357_spi_write_array(lcdev,
+ hx8369_seq_write_CABC_control_setting,
+ ARRAY_SIZE(hx8369_seq_write_CABC_control_setting));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_array(lcdev,
+ hx8369_seq_write_CABC_min_brightness,
+ ARRAY_SIZE(hx8369_seq_write_CABC_min_brightness));
+ if (ret < 0)
+ return ret;
+ usleep_range(10000, 12000);
+
+ ret = hx8357_spi_write_array(lcdev, hx8369_seq_set_display_brightness,
+ ARRAY_SIZE(hx8369_seq_set_display_brightness));
+ if (ret < 0)
+ return ret;
+
+ ret = hx8357_spi_write_byte(lcdev, HX8357_SET_DISPLAY_ON);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
#define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL)
static int hx8357_set_power(struct lcd_device *lcdev, int power)
@@ -388,10 +566,24 @@ static struct lcd_ops hx8357_ops = {
.get_power = hx8357_get_power,
};
+static const struct of_device_id hx8357_dt_ids[] = {
+ {
+ .compatible = "himax,hx8357",
+ .data = hx8357_lcd_init,
+ },
+ {
+ .compatible = "himax,hx8369",
+ .data = hx8369_lcd_init,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
+
static int hx8357_probe(struct spi_device *spi)
{
struct lcd_device *lcdev;
struct hx8357_data *lcd;
+ const struct of_device_id *match;
int i, ret;
lcd = devm_kzalloc(&spi->dev, sizeof(*lcd), GFP_KERNEL);
@@ -408,6 +600,10 @@ static int hx8357_probe(struct spi_device *spi)
lcd->spi = spi;
+ match = of_match_device(hx8357_dt_ids, &spi->dev);
+ if (!match || !match->data)
+ return -EINVAL;
+
lcd->reset = of_get_named_gpio(spi->dev.of_node, "gpios-reset", 0);
if (!gpio_is_valid(lcd->reset)) {
dev_err(&spi->dev, "Missing dt property: gpios-reset\n");
@@ -424,25 +620,32 @@ static int hx8357_probe(struct spi_device *spi)
return -EINVAL;
}
- for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
- lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
- "im-gpios", i);
- if (lcd->im_pins[i] == -EPROBE_DEFER) {
- dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
- return -EPROBE_DEFER;
- }
- if (!gpio_is_valid(lcd->im_pins[i])) {
- dev_err(&spi->dev, "Missing dt property: im-gpios\n");
- return -EINVAL;
- }
-
- ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
- GPIOF_OUT_INIT_LOW, "im_pins");
- if (ret) {
- dev_err(&spi->dev, "failed to request gpio %d: %d\n",
- lcd->im_pins[i], ret);
- return -EINVAL;
+ if (of_find_property(spi->dev.of_node, "im-gpios", NULL)) {
+ lcd->use_im_pins = 1;
+
+ for (i = 0; i < HX8357_NUM_IM_PINS; i++) {
+ lcd->im_pins[i] = of_get_named_gpio(spi->dev.of_node,
+ "im-gpios", i);
+ if (lcd->im_pins[i] == -EPROBE_DEFER) {
+ dev_info(&spi->dev, "GPIO requested is not here yet, deferring the probe\n");
+ return -EPROBE_DEFER;
+ }
+ if (!gpio_is_valid(lcd->im_pins[i])) {
+ dev_err(&spi->dev, "Missing dt property: im-gpios\n");
+ return -EINVAL;
+ }
+
+ ret = devm_gpio_request_one(&spi->dev, lcd->im_pins[i],
+ GPIOF_OUT_INIT_LOW,
+ "im_pins");
+ if (ret) {
+ dev_err(&spi->dev, "failed to request gpio %d: %d\n",
+ lcd->im_pins[i], ret);
+ return -EINVAL;
+ }
}
+ } else {
+ lcd->use_im_pins = 0;
}
lcdev = lcd_device_register("mxsfb", &spi->dev, lcd, &hx8357_ops);
@@ -452,7 +655,9 @@ static int hx8357_probe(struct spi_device *spi)
}
spi_set_drvdata(spi, lcdev);
- ret = hx8357_lcd_init(lcdev);
+ hx8357_lcd_reset(lcdev);
+
+ ret = ((int (*)(struct lcd_device *))match->data)(lcdev);
if (ret) {
dev_err(&spi->dev, "Couldn't initialize panel\n");
goto init_error;
@@ -475,12 +680,6 @@ static int hx8357_remove(struct spi_device *spi)
return 0;
}
-static const struct of_device_id hx8357_dt_ids[] = {
- { .compatible = "himax,hx8357" },
- {},
-};
-MODULE_DEVICE_TABLE(of, hx8357_dt_ids);
-
static struct spi_driver hx8357_driver = {
.probe = hx8357_probe,
.remove = hx8357_remove,
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 41964a71a03..93cf15efc71 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -89,7 +89,7 @@ static inline void lcd_unregister_fb(struct lcd_device *ld)
}
#endif /* CONFIG_FB */
-static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr,
+static ssize_t lcd_power_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
int rc;
@@ -105,7 +105,7 @@ static ssize_t lcd_show_power(struct device *dev, struct device_attribute *attr,
return rc;
}
-static ssize_t lcd_store_power(struct device *dev,
+static ssize_t lcd_power_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc;
@@ -128,8 +128,9 @@ static ssize_t lcd_store_power(struct device *dev,
return rc;
}
+static DEVICE_ATTR_RW(lcd_power);
-static ssize_t lcd_show_contrast(struct device *dev,
+static ssize_t contrast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
int rc = -ENXIO;
@@ -143,7 +144,7 @@ static ssize_t lcd_show_contrast(struct device *dev,
return rc;
}
-static ssize_t lcd_store_contrast(struct device *dev,
+static ssize_t contrast_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc;
@@ -166,14 +167,16 @@ static ssize_t lcd_store_contrast(struct device *dev,
return rc;
}
+static DEVICE_ATTR_RW(contrast);
-static ssize_t lcd_show_max_contrast(struct device *dev,
+static ssize_t max_contrast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lcd_device *ld = to_lcd_device(dev);
return sprintf(buf, "%d\n", ld->props.max_contrast);
}
+static DEVICE_ATTR_RO(max_contrast);
static struct class *lcd_class;
@@ -183,12 +186,13 @@ static void lcd_device_release(struct device *dev)
kfree(ld);
}
-static struct device_attribute lcd_device_attributes[] = {
- __ATTR(lcd_power, 0644, lcd_show_power, lcd_store_power),
- __ATTR(contrast, 0644, lcd_show_contrast, lcd_store_contrast),
- __ATTR(max_contrast, 0444, lcd_show_max_contrast, NULL),
- __ATTR_NULL,
+static struct attribute *lcd_device_attrs[] = {
+ &dev_attr_lcd_power.attr,
+ &dev_attr_contrast.attr,
+ &dev_attr_max_contrast.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(lcd_device);
/**
* lcd_device_register - register a new object of lcd_device class.
@@ -344,7 +348,7 @@ static int __init lcd_class_init(void)
return PTR_ERR(lcd_class);
}
- lcd_class->dev_attrs = lcd_device_attributes;
+ lcd_class->dev_groups = lcd_device_groups;
return 0;
}
diff --git a/drivers/video/backlight/lp855x_bl.c b/drivers/video/backlight/lp855x_bl.c
index a0e1e02bdc2..c0b41f13bd4 100644
--- a/drivers/video/backlight/lp855x_bl.c
+++ b/drivers/video/backlight/lp855x_bl.c
@@ -246,7 +246,7 @@ static int lp855x_bl_update_status(struct backlight_device *bl)
{
struct lp855x *lp = bl_get_data(bl);
- if (bl->props.state & BL_CORE_SUSPENDED)
+ if (bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK))
bl->props.brightness = 0;
if (lp->mode == PWM_BASED) {
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 5ca11b066b7..886e797f75f 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -101,33 +101,37 @@ static const struct backlight_ops max8925_backlight_ops = {
.get_brightness = max8925_backlight_get_brightness,
};
-#ifdef CONFIG_OF
-static int max8925_backlight_dt_init(struct platform_device *pdev,
- struct max8925_backlight_pdata *pdata)
+static void max8925_backlight_dt_init(struct platform_device *pdev)
{
struct device_node *nproot = pdev->dev.parent->of_node, *np;
- int dual_string;
+ struct max8925_backlight_pdata *pdata;
+ u32 val;
+
+ if (!nproot || !IS_ENABLED(CONFIG_OF))
+ return;
+
+ pdata = devm_kzalloc(&pdev->dev,
+ sizeof(struct max8925_backlight_pdata),
+ GFP_KERNEL);
+ if (!pdata)
+ return;
- if (!nproot)
- return -ENODEV;
np = of_find_node_by_name(nproot, "backlight");
if (!np) {
dev_err(&pdev->dev, "failed to find backlight node\n");
- return -ENODEV;
+ return;
}
- of_property_read_u32(np, "maxim,max8925-dual-string", &dual_string);
- pdata->dual_string = dual_string;
- return 0;
+ if (!of_property_read_u32(np, "maxim,max8925-dual-string", &val))
+ pdata->dual_string = val;
+
+ pdev->dev.platform_data = pdata;
}
-#else
-#define max8925_backlight_dt_init(x, y) (-1)
-#endif
static int max8925_backlight_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
- struct max8925_backlight_pdata *pdata = pdev->dev.platform_data;
+ struct max8925_backlight_pdata *pdata;
struct max8925_backlight_data *data;
struct backlight_device *bl;
struct backlight_properties props;
@@ -170,13 +174,10 @@ static int max8925_backlight_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, bl);
value = 0;
- if (pdev->dev.parent->of_node && !pdata) {
- pdata = devm_kzalloc(&pdev->dev,
- sizeof(struct max8925_backlight_pdata),
- GFP_KERNEL);
- max8925_backlight_dt_init(pdev, pdata);
- }
+ if (!pdev->dev.platform_data)
+ max8925_backlight_dt_init(pdev);
+ pdata = pdev->dev.platform_data;
if (pdata) {
if (pdata->lxw_scl)
value |= (1 << 7);
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 8c30603e0a8..846caab75a4 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -92,7 +92,8 @@ config DUMMY_CONSOLE_ROWS
config FRAMEBUFFER_CONSOLE
tristate "Framebuffer Console support"
- depends on FB
+ depends on FB && !UML
+ select VT_HW_CONSOLE_BINDING
select CRC32
select FONT_SUPPORT
help
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index 0810939936f..e030e17a83f 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -131,29 +131,28 @@
#define WSI_TIMEOUT 50
#define PALETTE_SIZE 256
-#define LEFT_MARGIN 64
-#define RIGHT_MARGIN 64
-#define UPPER_MARGIN 32
-#define LOWER_MARGIN 32
+
+#define CLK_MIN_DIV 2
+#define CLK_MAX_DIV 255
static void __iomem *da8xx_fb_reg_base;
-static struct resource *lcdc_regs;
static unsigned int lcd_revision;
static irq_handler_t lcdc_irq_handler;
static wait_queue_head_t frame_done_wq;
static int frame_done_flag;
-static inline unsigned int lcdc_read(unsigned int addr)
+static unsigned int lcdc_read(unsigned int addr)
{
return (unsigned int)__raw_readl(da8xx_fb_reg_base + (addr));
}
-static inline void lcdc_write(unsigned int val, unsigned int addr)
+static void lcdc_write(unsigned int val, unsigned int addr)
{
__raw_writel(val, da8xx_fb_reg_base + (addr));
}
struct da8xx_fb_par {
+ struct device *dev;
resource_size_t p_palette_base;
unsigned char *v_palette_base;
dma_addr_t vram_phys;
@@ -164,7 +163,6 @@ struct da8xx_fb_par {
struct clk *lcdc_clk;
int irq;
unsigned int palette_sz;
- unsigned int pxl_clk;
int blank;
wait_queue_head_t vsync_wait;
int vsync_flag;
@@ -178,29 +176,15 @@ struct da8xx_fb_par {
unsigned int which_dma_channel_done;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
- unsigned int lcd_fck_rate;
#endif
+ unsigned int lcdc_clk_rate;
void (*panel_power_ctrl)(int);
u32 pseudo_palette[16];
+ struct fb_videomode mode;
+ struct lcd_ctrl_config cfg;
};
-/* Variable Screen Information */
-static struct fb_var_screeninfo da8xx_fb_var = {
- .xoffset = 0,
- .yoffset = 0,
- .transp = {0, 0, 0},
- .nonstd = 0,
- .activate = 0,
- .height = -1,
- .width = -1,
- .accel_flags = 0,
- .left_margin = LEFT_MARGIN,
- .right_margin = RIGHT_MARGIN,
- .upper_margin = UPPER_MARGIN,
- .lower_margin = LOWER_MARGIN,
- .sync = 0,
- .vmode = FB_VMODE_NONINTERLACED
-};
+static struct fb_var_screeninfo da8xx_fb_var;
static struct fb_fix_screeninfo da8xx_fb_fix = {
.id = "DA8xx FB Drv",
@@ -219,7 +203,7 @@ static struct fb_videomode known_lcd_panels[] = {
.name = "Sharp_LCD035Q3DG01",
.xres = 320,
.yres = 240,
- .pixclock = 4608000,
+ .pixclock = KHZ2PICOS(4607),
.left_margin = 6,
.right_margin = 8,
.upper_margin = 2,
@@ -234,7 +218,7 @@ static struct fb_videomode known_lcd_panels[] = {
.name = "Sharp_LK043T1DG01",
.xres = 480,
.yres = 272,
- .pixclock = 7833600,
+ .pixclock = KHZ2PICOS(7833),
.left_margin = 2,
.right_margin = 2,
.upper_margin = 2,
@@ -249,7 +233,7 @@ static struct fb_videomode known_lcd_panels[] = {
.name = "SP10Q010",
.xres = 320,
.yres = 240,
- .pixclock = 7833600,
+ .pixclock = KHZ2PICOS(7833),
.left_margin = 10,
.right_margin = 10,
.upper_margin = 10,
@@ -261,8 +245,13 @@ static struct fb_videomode known_lcd_panels[] = {
},
};
+static bool da8xx_fb_is_raster_enabled(void)
+{
+ return !!(lcdc_read(LCD_RASTER_CTRL_REG) & LCD_RASTER_ENABLE);
+}
+
/* Enable the Raster Engine of the LCD Controller */
-static inline void lcd_enable_raster(void)
+static void lcd_enable_raster(void)
{
u32 reg;
@@ -284,7 +273,7 @@ static inline void lcd_enable_raster(void)
}
/* Disable the Raster Engine of the LCD Controller */
-static inline void lcd_disable_raster(bool wait_for_frame_done)
+static void lcd_disable_raster(enum da8xx_frame_complete wait_for_frame_done)
{
u32 reg;
int ret;
@@ -296,7 +285,8 @@ static inline void lcd_disable_raster(bool wait_for_frame_done)
/* return if already disabled */
return;
- if ((wait_for_frame_done == true) && (lcd_revision == LCD_VERSION_2)) {
+ if ((wait_for_frame_done == DA8XX_FRAME_WAIT) &&
+ (lcd_revision == LCD_VERSION_2)) {
frame_done_flag = 0;
ret = wait_event_interruptible_timeout(frame_done_wq,
frame_done_flag != 0,
@@ -331,7 +321,7 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par)
reg_int = lcdc_read(LCD_INT_ENABLE_SET_REG) |
LCD_V2_END_OF_FRAME0_INT_ENA |
LCD_V2_END_OF_FRAME1_INT_ENA |
- LCD_FRAME_DONE;
+ LCD_FRAME_DONE | LCD_SYNC_LOST;
lcdc_write(reg_int, LCD_INT_ENABLE_SET_REG);
}
reg_dma |= LCD_DUAL_FRAME_BUFFER_ENABLE;
@@ -417,10 +407,25 @@ static void lcd_cfg_horizontal_sync(int back_porch, int pulse_width,
u32 reg;
reg = lcdc_read(LCD_RASTER_TIMING_0_REG) & 0xf;
- reg |= ((back_porch & 0xff) << 24)
- | ((front_porch & 0xff) << 16)
- | ((pulse_width & 0x3f) << 10);
+ reg |= (((back_porch-1) & 0xff) << 24)
+ | (((front_porch-1) & 0xff) << 16)
+ | (((pulse_width-1) & 0x3f) << 10);
lcdc_write(reg, LCD_RASTER_TIMING_0_REG);
+
+ /*
+ * LCDC Version 2 adds some extra bits that increase the allowable
+ * size of the horizontal timing registers.
+ * remember that the registers use 0 to represent 1 so all values
+ * that get set into register need to be decremented by 1
+ */
+ if (lcd_revision == LCD_VERSION_2) {
+ /* Mask off the bits we want to change */
+ reg = lcdc_read(LCD_RASTER_TIMING_2_REG) & ~0x780000ff;
+ reg |= ((front_porch-1) & 0x300) >> 8;
+ reg |= ((back_porch-1) & 0x300) >> 4;
+ reg |= ((pulse_width-1) & 0x3c0) << 21;
+ lcdc_write(reg, LCD_RASTER_TIMING_2_REG);
+ }
}
static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
@@ -431,7 +436,7 @@ static void lcd_cfg_vertical_sync(int back_porch, int pulse_width,
reg = lcdc_read(LCD_RASTER_TIMING_1_REG) & 0x3ff;
reg |= ((back_porch & 0xff) << 24)
| ((front_porch & 0xff) << 16)
- | ((pulse_width & 0x3f) << 10);
+ | (((pulse_width-1) & 0x3f) << 10);
lcdc_write(reg, LCD_RASTER_TIMING_1_REG);
}
@@ -488,12 +493,12 @@ static int lcd_cfg_display(const struct lcd_ctrl_config *cfg,
else
reg &= ~LCD_SYNC_EDGE;
- if (panel->sync & FB_SYNC_HOR_HIGH_ACT)
+ if ((panel->sync & FB_SYNC_HOR_HIGH_ACT) == 0)
reg |= LCD_INVERT_LINE_CLOCK;
else
reg &= ~LCD_INVERT_LINE_CLOCK;
- if (panel->sync & FB_SYNC_VERT_HIGH_ACT)
+ if ((panel->sync & FB_SYNC_VERT_HIGH_ACT) == 0)
reg |= LCD_INVERT_FRAME_CLOCK;
else
reg &= ~LCD_INVERT_FRAME_CLOCK;
@@ -565,10 +570,11 @@ static int lcd_cfg_frame_buffer(struct da8xx_fb_par *par, u32 width, u32 height,
break;
case 24:
reg |= LCD_V2_TFT_24BPP_MODE;
+ break;
case 32:
+ reg |= LCD_V2_TFT_24BPP_MODE;
reg |= LCD_V2_TFT_24BPP_UNPACK;
break;
-
case 8:
par->palette_sz = 256 * 2;
break;
@@ -681,11 +687,8 @@ static int fb_setcolreg(unsigned regno, unsigned red, unsigned green,
}
#undef CNVT_TOHW
-static void lcd_reset(struct da8xx_fb_par *par)
+static void da8xx_fb_lcd_reset(void)
{
- /* Disable the Raster if previously Enabled */
- lcd_disable_raster(false);
-
/* DMA has to be disabled */
lcdc_write(0, LCD_DMA_CTRL_REG);
lcdc_write(0, LCD_RASTER_CTRL_REG);
@@ -698,21 +701,76 @@ static void lcd_reset(struct da8xx_fb_par *par)
}
}
-static void lcd_calc_clk_divider(struct da8xx_fb_par *par)
+static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
+ unsigned lcdc_clk_div,
+ unsigned lcdc_clk_rate)
{
- unsigned int lcd_clk, div;
+ int ret;
- lcd_clk = clk_get_rate(par->lcdc_clk);
- div = lcd_clk / par->pxl_clk;
+ if (par->lcdc_clk_rate != lcdc_clk_rate) {
+ ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(par->dev,
+ "unable to set clock rate at %u\n",
+ lcdc_clk_rate);
+ return ret;
+ }
+ par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
+ }
/* Configure the LCD clock divisor. */
- lcdc_write(LCD_CLK_DIVISOR(div) |
+ lcdc_write(LCD_CLK_DIVISOR(lcdc_clk_div) |
(LCD_RASTER_MODE & 0x1), LCD_CTRL_REG);
if (lcd_revision == LCD_VERSION_2)
lcdc_write(LCD_V2_DMA_CLK_EN | LCD_V2_LIDD_CLK_EN |
LCD_V2_CORE_CLK_EN, LCD_CLK_ENABLE_REG);
+ return 0;
+}
+
+static unsigned int da8xx_fb_calc_clk_divider(struct da8xx_fb_par *par,
+ unsigned pixclock,
+ unsigned *lcdc_clk_rate)
+{
+ unsigned lcdc_clk_div;
+
+ pixclock = PICOS2KHZ(pixclock) * 1000;
+
+ *lcdc_clk_rate = par->lcdc_clk_rate;
+
+ if (pixclock < (*lcdc_clk_rate / CLK_MAX_DIV)) {
+ *lcdc_clk_rate = clk_round_rate(par->lcdc_clk,
+ pixclock * CLK_MAX_DIV);
+ lcdc_clk_div = CLK_MAX_DIV;
+ } else if (pixclock > (*lcdc_clk_rate / CLK_MIN_DIV)) {
+ *lcdc_clk_rate = clk_round_rate(par->lcdc_clk,
+ pixclock * CLK_MIN_DIV);
+ lcdc_clk_div = CLK_MIN_DIV;
+ } else {
+ lcdc_clk_div = *lcdc_clk_rate / pixclock;
+ }
+
+ return lcdc_clk_div;
+}
+
+static int da8xx_fb_calc_config_clk_divider(struct da8xx_fb_par *par,
+ struct fb_videomode *mode)
+{
+ unsigned lcdc_clk_rate;
+ unsigned lcdc_clk_div = da8xx_fb_calc_clk_divider(par, mode->pixclock,
+ &lcdc_clk_rate);
+
+ return da8xx_fb_config_clk_divider(par, lcdc_clk_div, lcdc_clk_rate);
+}
+
+static unsigned da8xx_fb_round_clk(struct da8xx_fb_par *par,
+ unsigned pixclock)
+{
+ unsigned lcdc_clk_div, lcdc_clk_rate;
+
+ lcdc_clk_div = da8xx_fb_calc_clk_divider(par, pixclock, &lcdc_clk_rate);
+ return KHZ2PICOS(lcdc_clk_rate / (1000 * lcdc_clk_div));
}
static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
@@ -721,10 +779,11 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
u32 bpp;
int ret = 0;
- lcd_reset(par);
-
- /* Calculate the divider */
- lcd_calc_clk_divider(par);
+ ret = da8xx_fb_calc_config_clk_divider(par, panel);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(par->dev, "unable to configure clock\n");
+ return ret;
+ }
if (panel->sync & FB_SYNC_CLK_INVERT)
lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) |
@@ -739,10 +798,10 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
return ret;
/* Configure the vertical and horizontal sync properties. */
- lcd_cfg_vertical_sync(panel->lower_margin, panel->vsync_len,
- panel->upper_margin);
- lcd_cfg_horizontal_sync(panel->right_margin, panel->hsync_len,
- panel->left_margin);
+ lcd_cfg_vertical_sync(panel->upper_margin, panel->vsync_len,
+ panel->lower_margin);
+ lcd_cfg_horizontal_sync(panel->left_margin, panel->hsync_len,
+ panel->right_margin);
/* Configure for disply */
ret = lcd_cfg_display(cfg, panel);
@@ -773,7 +832,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
u32 stat = lcdc_read(LCD_MASKED_STAT_REG);
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- lcd_disable_raster(false);
+ lcd_disable_raster(DA8XX_FRAME_NOWAIT);
lcdc_write(stat, LCD_MASKED_STAT_REG);
lcd_enable_raster();
} else if (stat & LCD_PL_LOAD_DONE) {
@@ -783,7 +842,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg)
* interrupt via the following write to the status register. If
* this is done after then one gets multiple PL done interrupts.
*/
- lcd_disable_raster(false);
+ lcd_disable_raster(DA8XX_FRAME_NOWAIT);
lcdc_write(stat, LCD_MASKED_STAT_REG);
@@ -836,7 +895,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
u32 reg_ras;
if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) {
- lcd_disable_raster(false);
+ lcd_disable_raster(DA8XX_FRAME_NOWAIT);
lcdc_write(stat, LCD_STAT_REG);
lcd_enable_raster();
} else if (stat & LCD_PL_LOAD_DONE) {
@@ -846,7 +905,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg)
* interrupt via the following write to the status register. If
* this is done after then one gets multiple PL done interrupts.
*/
- lcd_disable_raster(false);
+ lcd_disable_raster(DA8XX_FRAME_NOWAIT);
lcdc_write(stat, LCD_STAT_REG);
@@ -888,6 +947,9 @@ static int fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int err = 0;
+ struct da8xx_fb_par *par = info->par;
+ int bpp = var->bits_per_pixel >> 3;
+ unsigned long line_size = var->xres_virtual * bpp;
if (var->bits_per_pixel > 16 && lcd_revision == LCD_VERSION_1)
return -EINVAL;
@@ -955,6 +1017,23 @@ static int fb_check_var(struct fb_var_screeninfo *var,
var->green.msb_right = 0;
var->blue.msb_right = 0;
var->transp.msb_right = 0;
+
+ if (line_size * var->yres_virtual > par->vram_size)
+ var->yres_virtual = par->vram_size / line_size;
+
+ if (var->yres > var->yres_virtual)
+ var->yres = var->yres_virtual;
+
+ if (var->xres > var->xres_virtual)
+ var->xres = var->xres_virtual;
+
+ if (var->xres + var->xoffset > var->xres_virtual)
+ var->xoffset = var->xres_virtual - var->xres;
+ if (var->yres + var->yoffset > var->yres_virtual)
+ var->yoffset = var->yres_virtual - var->yres;
+
+ var->pixclock = da8xx_fb_round_clk(par, var->pixclock);
+
return err;
}
@@ -966,10 +1045,10 @@ static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
par = container_of(nb, struct da8xx_fb_par, freq_transition);
if (val == CPUFREQ_POSTCHANGE) {
- if (par->lcd_fck_rate != clk_get_rate(par->lcdc_clk)) {
- par->lcd_fck_rate = clk_get_rate(par->lcdc_clk);
- lcd_disable_raster(true);
- lcd_calc_clk_divider(par);
+ if (par->lcdc_clk_rate != clk_get_rate(par->lcdc_clk)) {
+ par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
+ da8xx_fb_calc_config_clk_divider(par, &par->mode);
if (par->blank == FB_BLANK_UNBLANK)
lcd_enable_raster();
}
@@ -978,7 +1057,7 @@ static int lcd_da8xx_cpufreq_transition(struct notifier_block *nb,
return 0;
}
-static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
+static int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
{
par->freq_transition.notifier_call = lcd_da8xx_cpufreq_transition;
@@ -986,7 +1065,7 @@ static inline int lcd_da8xx_cpufreq_register(struct da8xx_fb_par *par)
CPUFREQ_TRANSITION_NOTIFIER);
}
-static inline void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
+static void lcd_da8xx_cpufreq_deregister(struct da8xx_fb_par *par)
{
cpufreq_unregister_notifier(&par->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -1006,7 +1085,7 @@ static int fb_remove(struct platform_device *dev)
if (par->panel_power_ctrl)
par->panel_power_ctrl(0);
- lcd_disable_raster(true);
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
lcdc_write(0, LCD_RASTER_CTRL_REG);
/* disable DMA */
@@ -1018,12 +1097,9 @@ static int fb_remove(struct platform_device *dev)
par->p_palette_base);
dma_free_coherent(NULL, par->vram_size, par->vram_virt,
par->vram_phys);
- free_irq(par->irq, par);
pm_runtime_put_sync(&dev->dev);
pm_runtime_disable(&dev->dev);
framebuffer_release(info);
- iounmap(da8xx_fb_reg_base);
- release_mem_region(lcdc_regs->start, resource_size(lcdc_regs));
}
return 0;
@@ -1122,7 +1198,7 @@ static int cfb_blank(int blank, struct fb_info *info)
if (par->panel_power_ctrl)
par->panel_power_ctrl(0);
- lcd_disable_raster(true);
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
break;
default:
ret = -EINVAL;
@@ -1183,9 +1259,50 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var,
return ret;
}
+static int da8xxfb_set_par(struct fb_info *info)
+{
+ struct da8xx_fb_par *par = info->par;
+ int ret;
+ bool raster = da8xx_fb_is_raster_enabled();
+
+ if (raster)
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
+
+ fb_var_to_videomode(&par->mode, &info->var);
+
+ par->cfg.bpp = info->var.bits_per_pixel;
+
+ info->fix.visual = (par->cfg.bpp <= 8) ?
+ FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR;
+ info->fix.line_length = (par->mode.xres * par->cfg.bpp) / 8;
+
+ ret = lcd_init(par, &par->cfg, &par->mode);
+ if (ret < 0) {
+ dev_err(par->dev, "lcd init failed\n");
+ return ret;
+ }
+
+ par->dma_start = info->fix.smem_start +
+ info->var.yoffset * info->fix.line_length +
+ info->var.xoffset * info->var.bits_per_pixel / 8;
+ par->dma_end = par->dma_start +
+ info->var.yres * info->fix.line_length - 1;
+
+ lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG);
+ lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG);
+ lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG);
+ lcdc_write(par->dma_end, LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG);
+
+ if (raster)
+ lcd_enable_raster();
+
+ return 0;
+}
+
static struct fb_ops da8xx_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = fb_check_var,
+ .fb_set_par = da8xxfb_set_par,
.fb_setcolreg = fb_setcolreg,
.fb_pan_display = da8xx_pan_display,
.fb_ioctl = fb_ioctl,
@@ -1195,33 +1312,38 @@ static struct fb_ops da8xx_fb_ops = {
.fb_blank = cfb_blank,
};
-/* Calculate and return pixel clock period in pico seconds */
-static unsigned int da8xxfb_pixel_clk_period(struct da8xx_fb_par *par)
+static struct fb_videomode *da8xx_fb_get_videomode(struct platform_device *dev)
{
- unsigned int lcd_clk, div;
- unsigned int configured_pix_clk;
- unsigned long long pix_clk_period_picosec = 1000000000000ULL;
+ struct da8xx_lcdc_platform_data *fb_pdata = dev->dev.platform_data;
+ struct fb_videomode *lcdc_info;
+ int i;
- lcd_clk = clk_get_rate(par->lcdc_clk);
- div = lcd_clk / par->pxl_clk;
- configured_pix_clk = (lcd_clk / div);
+ for (i = 0, lcdc_info = known_lcd_panels;
+ i < ARRAY_SIZE(known_lcd_panels); i++, lcdc_info++) {
+ if (strcmp(fb_pdata->type, lcdc_info->name) == 0)
+ break;
+ }
- do_div(pix_clk_period_picosec, configured_pix_clk);
+ if (i == ARRAY_SIZE(known_lcd_panels)) {
+ dev_err(&dev->dev, "no panel found\n");
+ return NULL;
+ }
+ dev_info(&dev->dev, "found %s panel\n", lcdc_info->name);
- return pix_clk_period_picosec;
+ return lcdc_info;
}
static int fb_probe(struct platform_device *device)
{
struct da8xx_lcdc_platform_data *fb_pdata =
device->dev.platform_data;
+ static struct resource *lcdc_regs;
struct lcd_ctrl_config *lcd_cfg;
struct fb_videomode *lcdc_info;
struct fb_info *da8xx_fb_info;
- struct clk *fb_clk = NULL;
struct da8xx_fb_par *par;
- resource_size_t len;
- int ret, i;
+ struct clk *tmp_lcdc_clk;
+ int ret;
unsigned long ulcm;
if (fb_pdata == NULL) {
@@ -1229,30 +1351,19 @@ static int fb_probe(struct platform_device *device)
return -ENOENT;
}
- lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0);
- if (!lcdc_regs) {
- dev_err(&device->dev,
- "Can not get memory resource for LCD controller\n");
- return -ENOENT;
- }
-
- len = resource_size(lcdc_regs);
+ lcdc_info = da8xx_fb_get_videomode(device);
+ if (lcdc_info == NULL)
+ return -ENODEV;
- lcdc_regs = request_mem_region(lcdc_regs->start, len, lcdc_regs->name);
- if (!lcdc_regs)
- return -EBUSY;
-
- da8xx_fb_reg_base = ioremap(lcdc_regs->start, len);
- if (!da8xx_fb_reg_base) {
- ret = -EBUSY;
- goto err_request_mem;
- }
+ lcdc_regs = platform_get_resource(device, IORESOURCE_MEM, 0);
+ da8xx_fb_reg_base = devm_ioremap_resource(&device->dev, lcdc_regs);
+ if (IS_ERR(da8xx_fb_reg_base))
+ return PTR_ERR(da8xx_fb_reg_base);
- fb_clk = clk_get(&device->dev, "fck");
- if (IS_ERR(fb_clk)) {
+ tmp_lcdc_clk = devm_clk_get(&device->dev, "fck");
+ if (IS_ERR(tmp_lcdc_clk)) {
dev_err(&device->dev, "Can not get device clock\n");
- ret = -ENODEV;
- goto err_ioremap;
+ return PTR_ERR(tmp_lcdc_clk);
}
pm_runtime_enable(&device->dev);
@@ -1275,22 +1386,12 @@ static int fb_probe(struct platform_device *device)
break;
}
- for (i = 0, lcdc_info = known_lcd_panels;
- i < ARRAY_SIZE(known_lcd_panels);
- i++, lcdc_info++) {
- if (strcmp(fb_pdata->type, lcdc_info->name) == 0)
- break;
- }
+ lcd_cfg = (struct lcd_ctrl_config *)fb_pdata->controller_data;
- if (i == ARRAY_SIZE(known_lcd_panels)) {
- dev_err(&device->dev, "GLCD: No valid panel found\n");
- ret = -ENODEV;
+ if (!lcd_cfg) {
+ ret = -EINVAL;
goto err_pm_runtime_disable;
- } else
- dev_info(&device->dev, "GLCD: Found %s panel\n",
- fb_pdata->type);
-
- lcd_cfg = (struct lcd_ctrl_config *)fb_pdata->controller_data;
+ }
da8xx_fb_info = framebuffer_alloc(sizeof(struct da8xx_fb_par),
&device->dev);
@@ -1301,21 +1402,18 @@ static int fb_probe(struct platform_device *device)
}
par = da8xx_fb_info->par;
- par->lcdc_clk = fb_clk;
-#ifdef CONFIG_CPU_FREQ
- par->lcd_fck_rate = clk_get_rate(fb_clk);
-#endif
- par->pxl_clk = lcdc_info->pixclock;
+ par->dev = &device->dev;
+ par->lcdc_clk = tmp_lcdc_clk;
+ par->lcdc_clk_rate = clk_get_rate(par->lcdc_clk);
if (fb_pdata->panel_power_ctrl) {
par->panel_power_ctrl = fb_pdata->panel_power_ctrl;
par->panel_power_ctrl(1);
}
- if (lcd_init(par, lcd_cfg, lcdc_info) < 0) {
- dev_err(&device->dev, "lcd_init failed\n");
- ret = -EFAULT;
- goto err_release_fb;
- }
+ fb_videomode_to_var(&da8xx_fb_var, lcdc_info);
+ par->cfg = *lcd_cfg;
+
+ da8xx_fb_lcd_reset();
/* allocate frame buffer */
par->vram_size = lcdc_info->xres * lcdc_info->yres * lcd_cfg->bpp;
@@ -1363,27 +1461,10 @@ static int fb_probe(struct platform_device *device)
goto err_release_pl_mem;
}
- /* Initialize par */
- da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
-
- da8xx_fb_var.xres = lcdc_info->xres;
- da8xx_fb_var.xres_virtual = lcdc_info->xres;
-
- da8xx_fb_var.yres = lcdc_info->yres;
- da8xx_fb_var.yres_virtual = lcdc_info->yres * LCD_NUM_BUFFERS;
-
da8xx_fb_var.grayscale =
lcd_cfg->panel_shade == MONOCHROME ? 1 : 0;
da8xx_fb_var.bits_per_pixel = lcd_cfg->bpp;
- da8xx_fb_var.hsync_len = lcdc_info->hsync_len;
- da8xx_fb_var.vsync_len = lcdc_info->vsync_len;
- da8xx_fb_var.right_margin = lcdc_info->right_margin;
- da8xx_fb_var.left_margin = lcdc_info->left_margin;
- da8xx_fb_var.lower_margin = lcdc_info->lower_margin;
- da8xx_fb_var.upper_margin = lcdc_info->upper_margin;
- da8xx_fb_var.pixclock = da8xxfb_pixel_clk_period(par);
-
/* Initialize fbinfo */
da8xx_fb_info->flags = FBINFO_FLAG_DEFAULT;
da8xx_fb_info->fix = da8xx_fb_fix;
@@ -1433,8 +1514,8 @@ static int fb_probe(struct platform_device *device)
lcdc_irq_handler = lcdc_irq_handler_rev02;
}
- ret = request_irq(par->irq, lcdc_irq_handler, 0,
- DRIVER_NAME, par);
+ ret = devm_request_irq(&device->dev, par->irq, lcdc_irq_handler, 0,
+ DRIVER_NAME, par);
if (ret)
goto irq_freq;
return 0;
@@ -1463,12 +1544,6 @@ err_pm_runtime_disable:
pm_runtime_put_sync(&device->dev);
pm_runtime_disable(&device->dev);
-err_ioremap:
- iounmap(da8xx_fb_reg_base);
-
-err_request_mem:
- release_mem_region(lcdc_regs->start, len);
-
return ret;
}
@@ -1546,7 +1621,7 @@ static int fb_suspend(struct platform_device *dev, pm_message_t state)
par->panel_power_ctrl(0);
fb_set_suspend(info, 1);
- lcd_disable_raster(true);
+ lcd_disable_raster(DA8XX_FRAME_WAIT);
lcd_context_save();
pm_runtime_put_sync(&dev->dev);
console_unlock();
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index 50fe668c617..7f9ff75d0db 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -15,6 +15,7 @@
#include <linux/dmi.h>
#include <linux/pci.h>
#include <video/vga.h>
+#include <asm/sysfb.h>
static bool request_mem_succeeded = false;
@@ -38,223 +39,6 @@ static struct fb_fix_screeninfo efifb_fix = {
.visual = FB_VISUAL_TRUECOLOR,
};
-enum {
- M_I17, /* 17-Inch iMac */
- M_I20, /* 20-Inch iMac */
- M_I20_SR, /* 20-Inch iMac (Santa Rosa) */
- M_I24, /* 24-Inch iMac */
- M_I24_8_1, /* 24-Inch iMac, 8,1th gen */
- M_I24_10_1, /* 24-Inch iMac, 10,1th gen */
- M_I27_11_1, /* 27-Inch iMac, 11,1th gen */
- M_MINI, /* Mac Mini */
- M_MINI_3_1, /* Mac Mini, 3,1th gen */
- M_MINI_4_1, /* Mac Mini, 4,1th gen */
- M_MB, /* MacBook */
- M_MB_2, /* MacBook, 2nd rev. */
- M_MB_3, /* MacBook, 3rd rev. */
- M_MB_5_1, /* MacBook, 5th rev. */
- M_MB_6_1, /* MacBook, 6th rev. */
- M_MB_7_1, /* MacBook, 7th rev. */
- M_MB_SR, /* MacBook, 2nd gen, (Santa Rosa) */
- M_MBA, /* MacBook Air */
- M_MBA_3, /* Macbook Air, 3rd rev */
- M_MBP, /* MacBook Pro */
- M_MBP_2, /* MacBook Pro 2nd gen */
- M_MBP_2_2, /* MacBook Pro 2,2nd gen */
- M_MBP_SR, /* MacBook Pro (Santa Rosa) */
- M_MBP_4, /* MacBook Pro, 4th gen */
- M_MBP_5_1, /* MacBook Pro, 5,1th gen */
- M_MBP_5_2, /* MacBook Pro, 5,2th gen */
- M_MBP_5_3, /* MacBook Pro, 5,3rd gen */
- M_MBP_6_1, /* MacBook Pro, 6,1th gen */
- M_MBP_6_2, /* MacBook Pro, 6,2th gen */
- M_MBP_7_1, /* MacBook Pro, 7,1th gen */
- M_MBP_8_2, /* MacBook Pro, 8,2nd gen */
- M_UNKNOWN /* placeholder */
-};
-
-#define OVERRIDE_NONE 0x0
-#define OVERRIDE_BASE 0x1
-#define OVERRIDE_STRIDE 0x2
-#define OVERRIDE_HEIGHT 0x4
-#define OVERRIDE_WIDTH 0x8
-
-static struct efifb_dmi_info {
- char *optname;
- unsigned long base;
- int stride;
- int width;
- int height;
- int flags;
-} dmi_list[] __initdata = {
- [M_I17] = { "i17", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
- [M_I20] = { "i20", 0x80010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE }, /* guess */
- [M_I20_SR] = { "imac7", 0x40010000, 1728 * 4, 1680, 1050, OVERRIDE_NONE },
- [M_I24] = { "i24", 0x80010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE }, /* guess */
- [M_I24_8_1] = { "imac8", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
- [M_I24_10_1] = { "imac10", 0xc0010000, 2048 * 4, 1920, 1080, OVERRIDE_NONE },
- [M_I27_11_1] = { "imac11", 0xc0010000, 2560 * 4, 2560, 1440, OVERRIDE_NONE },
- [M_MINI]= { "mini", 0x80000000, 2048 * 4, 1024, 768, OVERRIDE_NONE },
- [M_MINI_3_1] = { "mini31", 0x40010000, 1024 * 4, 1024, 768, OVERRIDE_NONE },
- [M_MINI_4_1] = { "mini41", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
- [M_MB] = { "macbook", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- [M_MB_5_1] = { "macbook51", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- [M_MB_6_1] = { "macbook61", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- [M_MB_7_1] = { "macbook71", 0x80010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- [M_MBA] = { "mba", 0x80000000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- /* 11" Macbook Air 3,1 passes the wrong stride */
- [M_MBA_3] = { "mba3", 0, 2048 * 4, 0, 0, OVERRIDE_STRIDE },
- [M_MBP] = { "mbp", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
- [M_MBP_2] = { "mbp2", 0, 0, 0, 0, OVERRIDE_NONE }, /* placeholder */
- [M_MBP_2_2] = { "mbp22", 0x80010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
- [M_MBP_SR] = { "mbp3", 0x80030000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
- [M_MBP_4] = { "mbp4", 0xc0060000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
- [M_MBP_5_1] = { "mbp51", 0xc0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
- [M_MBP_5_2] = { "mbp52", 0xc0010000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
- [M_MBP_5_3] = { "mbp53", 0xd0010000, 2048 * 4, 1440, 900, OVERRIDE_NONE },
- [M_MBP_6_1] = { "mbp61", 0x90030000, 2048 * 4, 1920, 1200, OVERRIDE_NONE },
- [M_MBP_6_2] = { "mbp62", 0x90030000, 2048 * 4, 1680, 1050, OVERRIDE_NONE },
- [M_MBP_7_1] = { "mbp71", 0xc0010000, 2048 * 4, 1280, 800, OVERRIDE_NONE },
- [M_MBP_8_2] = { "mbp82", 0x90010000, 1472 * 4, 1440, 900, OVERRIDE_NONE },
- [M_UNKNOWN] = { NULL, 0, 0, 0, 0, OVERRIDE_NONE }
-};
-
-static int set_system(const struct dmi_system_id *id);
-
-#define EFIFB_DMI_SYSTEM_ID(vendor, name, enumid) \
- { set_system, name, { \
- DMI_MATCH(DMI_BIOS_VENDOR, vendor), \
- DMI_MATCH(DMI_PRODUCT_NAME, name) }, \
- &dmi_list[enumid] }
-
-static const struct dmi_system_id dmi_system_table[] __initconst = {
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac4,1", M_I17),
- /* At least one of these two will be right; maybe both? */
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac5,1", M_I20),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac5,1", M_I20),
- /* At least one of these two will be right; maybe both? */
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "iMac6,1", M_I24),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac6,1", M_I24),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac7,1", M_I20_SR),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac8,1", M_I24_8_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac10,1", M_I24_10_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "iMac11,1", M_I27_11_1),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "Macmini1,1", M_MINI),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini3,1", M_MINI_3_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "Macmini4,1", M_MINI_4_1),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook1,1", M_MB),
- /* At least one of these two will be right; maybe both? */
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook2,1", M_MB),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook2,1", M_MB),
- /* At least one of these two will be right; maybe both? */
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBook3,1", M_MB),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook3,1", M_MB),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook4,1", M_MB),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook5,1", M_MB_5_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook6,1", M_MB_6_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBook7,1", M_MB_7_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir1,1", M_MBA),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookAir3,1", M_MBA_3),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro1,1", M_MBP),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,1", M_MBP_2),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro2,2", M_MBP_2_2),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro2,1", M_MBP_2),
- EFIFB_DMI_SYSTEM_ID("Apple Computer, Inc.", "MacBookPro3,1", M_MBP_SR),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro3,1", M_MBP_SR),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro4,1", M_MBP_4),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,1", M_MBP_5_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,2", M_MBP_5_2),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro5,3", M_MBP_5_3),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,1", M_MBP_6_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro6,2", M_MBP_6_2),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro7,1", M_MBP_7_1),
- EFIFB_DMI_SYSTEM_ID("Apple Inc.", "MacBookPro8,2", M_MBP_8_2),
- {},
-};
-
-#define choose_value(dmivalue, fwvalue, field, flags) ({ \
- typeof(fwvalue) _ret_ = fwvalue; \
- if ((flags) & (field)) \
- _ret_ = dmivalue; \
- else if ((fwvalue) == 0) \
- _ret_ = dmivalue; \
- _ret_; \
- })
-
-static int set_system(const struct dmi_system_id *id)
-{
- struct efifb_dmi_info *info = id->driver_data;
-
- if (info->base == 0 && info->height == 0 && info->width == 0
- && info->stride == 0)
- return 0;
-
- /* Trust the bootloader over the DMI tables */
- if (screen_info.lfb_base == 0) {
-#if defined(CONFIG_PCI)
- struct pci_dev *dev = NULL;
- int found_bar = 0;
-#endif
- if (info->base) {
- screen_info.lfb_base = choose_value(info->base,
- screen_info.lfb_base, OVERRIDE_BASE,
- info->flags);
-
-#if defined(CONFIG_PCI)
- /* make sure that the address in the table is actually
- * on a VGA device's PCI BAR */
-
- for_each_pci_dev(dev) {
- int i;
- if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
- continue;
- for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
- resource_size_t start, end;
-
- start = pci_resource_start(dev, i);
- if (start == 0)
- break;
- end = pci_resource_end(dev, i);
- if (screen_info.lfb_base >= start &&
- screen_info.lfb_base < end) {
- found_bar = 1;
- }
- }
- }
- if (!found_bar)
- screen_info.lfb_base = 0;
-#endif
- }
- }
- if (screen_info.lfb_base) {
- screen_info.lfb_linelength = choose_value(info->stride,
- screen_info.lfb_linelength, OVERRIDE_STRIDE,
- info->flags);
- screen_info.lfb_width = choose_value(info->width,
- screen_info.lfb_width, OVERRIDE_WIDTH,
- info->flags);
- screen_info.lfb_height = choose_value(info->height,
- screen_info.lfb_height, OVERRIDE_HEIGHT,
- info->flags);
- if (screen_info.orig_video_isVGA == 0)
- screen_info.orig_video_isVGA = VIDEO_TYPE_EFI;
- } else {
- screen_info.lfb_linelength = 0;
- screen_info.lfb_width = 0;
- screen_info.lfb_height = 0;
- screen_info.orig_video_isVGA = 0;
- return 0;
- }
-
- printk(KERN_INFO "efifb: dmi detected %s - framebuffer at 0x%08x "
- "(%dx%d, stride %d)\n", id->ident,
- screen_info.lfb_base, screen_info.lfb_width,
- screen_info.lfb_height, screen_info.lfb_linelength);
-
-
- return 1;
-}
-
static int efifb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
@@ -288,6 +72,7 @@ static void efifb_destroy(struct fb_info *info)
if (request_mem_succeeded)
release_mem_region(info->apertures->ranges[0].base,
info->apertures->ranges[0].size);
+ fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
@@ -312,7 +97,7 @@ void vga_set_default_device(struct pci_dev *pdev)
default_vga = pdev;
}
-static int __init efifb_setup(char *options)
+static int efifb_setup(char *options)
{
char *this_opt;
int i;
@@ -323,12 +108,12 @@ static int __init efifb_setup(char *options)
if (!*this_opt) continue;
for (i = 0; i < M_UNKNOWN; i++) {
- if (!strcmp(this_opt, dmi_list[i].optname) &&
- dmi_list[i].base != 0) {
- screen_info.lfb_base = dmi_list[i].base;
- screen_info.lfb_linelength = dmi_list[i].stride;
- screen_info.lfb_width = dmi_list[i].width;
- screen_info.lfb_height = dmi_list[i].height;
+ if (!strcmp(this_opt, efifb_dmi_list[i].optname) &&
+ efifb_dmi_list[i].base != 0) {
+ screen_info.lfb_base = efifb_dmi_list[i].base;
+ screen_info.lfb_linelength = efifb_dmi_list[i].stride;
+ screen_info.lfb_width = efifb_dmi_list[i].width;
+ screen_info.lfb_height = efifb_dmi_list[i].height;
}
}
if (!strncmp(this_opt, "base:", 5))
@@ -369,13 +154,28 @@ static int __init efifb_setup(char *options)
return 0;
}
-static int __init efifb_probe(struct platform_device *dev)
+static int efifb_probe(struct platform_device *dev)
{
struct fb_info *info;
int err;
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
+ char *option = NULL;
+
+ if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
+ return -ENODEV;
+
+ if (fb_get_options("efifb", &option))
+ return -ENODEV;
+ efifb_setup(option);
+
+ /* We don't get linelength from UGA Draw Protocol, only from
+ * EFI Graphics Protocol. So if it's not in DMI, and it's not
+ * passed in from the user, we really can't use the framebuffer.
+ */
+ if (!screen_info.lfb_linelength)
+ return -ENODEV;
if (!screen_info.lfb_depth)
screen_info.lfb_depth = 32;
@@ -539,55 +339,12 @@ err_release_mem:
}
static struct platform_driver efifb_driver = {
- .driver = {
- .name = "efifb",
+ .driver = {
+ .name = "efi-framebuffer",
+ .owner = THIS_MODULE,
},
+ .probe = efifb_probe,
};
-static struct platform_device efifb_device = {
- .name = "efifb",
-};
-
-static int __init efifb_init(void)
-{
- int ret;
- char *option = NULL;
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
- !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
- dmi_check_system(dmi_system_table);
-
- if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
- return -ENODEV;
-
- if (fb_get_options("efifb", &option))
- return -ENODEV;
- efifb_setup(option);
-
- /* We don't get linelength from UGA Draw Protocol, only from
- * EFI Graphics Protocol. So if it's not in DMI, and it's not
- * passed in from the user, we really can't use the framebuffer.
- */
- if (!screen_info.lfb_linelength)
- return -ENODEV;
-
- ret = platform_device_register(&efifb_device);
- if (ret)
- return ret;
-
- /*
- * This is not just an optimization. We will interfere
- * with a real driver if we get reprobed, so don't allow
- * it.
- */
- ret = platform_driver_probe(&efifb_driver, efifb_probe);
- if (ret) {
- platform_device_unregister(&efifb_device);
- return ret;
- }
-
- return ret;
-}
-module_init(efifb_init);
-
+module_platform_driver(efifb_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
index 15c5abd408d..c148d06540c 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_lowlevel.c
@@ -27,6 +27,7 @@
#include <video/exynos_mipi_dsim.h>
#include "exynos_mipi_dsi_regs.h"
+#include "exynos_mipi_dsi_lowlevel.h"
void exynos_mipi_dsi_func_reset(struct mipi_dsim_device *dsim)
{
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index 5c3960da755..f89245b8ba8 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -285,13 +285,8 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
rc = -ENODEV;
goto out;
}
- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
- !info->fbops->fb_setcmap)) {
- rc = -EINVAL;
- goto out1;
- }
+
rc = fb_set_cmap(&umap, info);
-out1:
unlock_fb_info(info);
out:
fb_dealloc_cmap(&umap);
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 36e1fe21b9b..dacaf74256a 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -43,8 +43,12 @@
#define FBPIXMAPSIZE (1024 * 8)
static DEFINE_MUTEX(registration_lock);
+
struct fb_info *registered_fb[FB_MAX] __read_mostly;
+EXPORT_SYMBOL(registered_fb);
+
int num_registered_fb __read_mostly;
+EXPORT_SYMBOL(num_registered_fb);
static struct fb_info *get_fb_info(unsigned int idx)
{
@@ -182,6 +186,7 @@ char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size
return addr;
}
+EXPORT_SYMBOL(fb_get_buffer_offset);
#ifdef CONFIG_LOGO
@@ -669,6 +674,7 @@ int fb_show_logo(struct fb_info *info, int rotate)
int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
#endif /* CONFIG_LOGO */
+EXPORT_SYMBOL(fb_show_logo);
static void *fb_seq_start(struct seq_file *m, loff_t *pos)
{
@@ -909,6 +915,7 @@ fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var)
info->var.vmode &= ~FB_VMODE_YWRAP;
return 0;
}
+EXPORT_SYMBOL(fb_pan_display);
static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
u32 activate)
@@ -1042,6 +1049,7 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
done:
return ret;
}
+EXPORT_SYMBOL(fb_set_var);
int
fb_blank(struct fb_info *info, int blank)
@@ -1073,6 +1081,7 @@ fb_blank(struct fb_info *info, int blank)
return ret;
}
+EXPORT_SYMBOL(fb_blank);
static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
@@ -1745,6 +1754,7 @@ register_framebuffer(struct fb_info *fb_info)
return ret;
}
+EXPORT_SYMBOL(register_framebuffer);
/**
* unregister_framebuffer - releases a frame buffer device
@@ -1773,6 +1783,7 @@ unregister_framebuffer(struct fb_info *fb_info)
return ret;
}
+EXPORT_SYMBOL(unregister_framebuffer);
/**
* fb_set_suspend - low level driver signals suspend
@@ -1796,6 +1807,7 @@ void fb_set_suspend(struct fb_info *info, int state)
fb_notifier_call_chain(FB_EVENT_RESUME, &event);
}
}
+EXPORT_SYMBOL(fb_set_suspend);
/**
* fbmem_init - init frame buffer subsystem
@@ -1912,6 +1924,7 @@ int fb_get_options(const char *name, char **option)
return retval;
}
+EXPORT_SYMBOL(fb_get_options);
#ifndef MODULE
/**
@@ -1959,20 +1972,4 @@ static int __init video_setup(char *options)
__setup("video=", video_setup);
#endif
- /*
- * Visible symbols for modules
- */
-
-EXPORT_SYMBOL(register_framebuffer);
-EXPORT_SYMBOL(unregister_framebuffer);
-EXPORT_SYMBOL(num_registered_fb);
-EXPORT_SYMBOL(registered_fb);
-EXPORT_SYMBOL(fb_show_logo);
-EXPORT_SYMBOL(fb_set_var);
-EXPORT_SYMBOL(fb_blank);
-EXPORT_SYMBOL(fb_pan_display);
-EXPORT_SYMBOL(fb_get_buffer_offset);
-EXPORT_SYMBOL(fb_set_suspend);
-EXPORT_SYMBOL(fb_get_options);
-
MODULE_LICENSE("GPL");
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 40178338b61..9e758a8f890 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -22,6 +22,7 @@
*/
#include <linux/bitops.h>
+#include <linux/bug.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/hdmi.h>
@@ -52,7 +53,7 @@ int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
- frame->length = 13;
+ frame->length = HDMI_AVI_INFOFRAME_SIZE;
return 0;
}
@@ -83,7 +84,7 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@@ -95,13 +96,18 @@ ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
ptr[0] = ((frame->colorspace & 0x3) << 5) | (frame->scan_mode & 0x3);
- if (frame->active_info_valid)
+ /*
+ * Data byte 1, bit 4 has to be set if we provide the active format
+ * aspect ratio
+ */
+ if (frame->active_aspect & 0xf)
ptr[0] |= BIT(4);
- if (frame->horizontal_bar_valid)
+ /* Bit 3 and 2 indicate if we transmit horizontal/vertical bar data */
+ if (frame->top_bar || frame->bottom_bar)
ptr[0] |= BIT(3);
- if (frame->vertical_bar_valid)
+ if (frame->left_bar || frame->right_bar)
ptr[0] |= BIT(2);
ptr[1] = ((frame->colorimetry & 0x3) << 6) |
@@ -151,7 +157,7 @@ int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
frame->type = HDMI_INFOFRAME_TYPE_SPD;
frame->version = 1;
- frame->length = 25;
+ frame->length = HDMI_SPD_INFOFRAME_SIZE;
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
strncpy(frame->product, product, sizeof(frame->product));
@@ -185,7 +191,7 @@ ssize_t hdmi_spd_infoframe_pack(struct hdmi_spd_infoframe *frame, void *buffer,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
@@ -218,7 +224,7 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame)
frame->type = HDMI_INFOFRAME_TYPE_AUDIO;
frame->version = 1;
- frame->length = 10;
+ frame->length = HDMI_AUDIO_INFOFRAME_SIZE;
return 0;
}
@@ -250,7 +256,7 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
if (frame->channels >= 2)
channels = frame->channels - 1;
@@ -282,9 +288,33 @@ ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
/**
- * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary
- * buffer
+ * hdmi_vendor_infoframe_init() - initialize an HDMI vendor infoframe
* @frame: HDMI vendor infoframe
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+
+ frame->type = HDMI_INFOFRAME_TYPE_VENDOR;
+ frame->version = 1;
+
+ frame->oui = HDMI_IEEE_OUI;
+
+ /*
+ * 0 is a valid value for s3d_struct, so we use a special "not set"
+ * value
+ */
+ frame->s3d_struct = HDMI_3D_STRUCTURE_INVALID;
+
+ return 0;
+}
+EXPORT_SYMBOL(hdmi_vendor_infoframe_init);
+
+/**
+ * hdmi_vendor_infoframe_pack() - write a HDMI vendor infoframe to binary buffer
+ * @frame: HDMI infoframe
* @buffer: destination buffer
* @size: size of buffer
*
@@ -297,27 +327,110 @@ EXPORT_SYMBOL(hdmi_audio_infoframe_pack);
* error code on failure.
*/
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
- void *buffer, size_t size)
+ void *buffer, size_t size)
{
u8 *ptr = buffer;
size_t length;
+ /* empty info frame */
+ if (frame->vic == 0 && frame->s3d_struct == HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* only one of those can be supplied */
+ if (frame->vic != 0 && frame->s3d_struct != HDMI_3D_STRUCTURE_INVALID)
+ return -EINVAL;
+
+ /* for side by side (half) we also need to provide 3D_Ext_Data */
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ frame->length = 6;
+ else
+ frame->length = 5;
+
length = HDMI_INFOFRAME_HEADER_SIZE + frame->length;
if (size < length)
return -ENOSPC;
- memset(buffer, 0, length);
+ memset(buffer, 0, size);
ptr[0] = frame->type;
ptr[1] = frame->version;
ptr[2] = frame->length;
ptr[3] = 0; /* checksum */
- memcpy(&ptr[HDMI_INFOFRAME_HEADER_SIZE], frame->data, frame->length);
+ /* HDMI OUI */
+ ptr[4] = 0x03;
+ ptr[5] = 0x0c;
+ ptr[6] = 0x00;
+
+ if (frame->vic) {
+ ptr[7] = 0x1 << 5; /* video format */
+ ptr[8] = frame->vic;
+ } else {
+ ptr[7] = 0x2 << 5; /* video format */
+ ptr[8] = (frame->s3d_struct & 0xf) << 4;
+ if (frame->s3d_struct >= HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF)
+ ptr[9] = (frame->s3d_ext_data & 0xf) << 4;
+ }
hdmi_infoframe_checksum(buffer, length);
return length;
}
EXPORT_SYMBOL(hdmi_vendor_infoframe_pack);
+
+/*
+ * hdmi_vendor_any_infoframe_pack() - write a vendor infoframe to binary buffer
+ */
+static ssize_t
+hdmi_vendor_any_infoframe_pack(union hdmi_vendor_any_infoframe *frame,
+ void *buffer, size_t size)
+{
+ /* we only know about HDMI vendor infoframes */
+ if (frame->any.oui != HDMI_IEEE_OUI)
+ return -EINVAL;
+
+ return hdmi_vendor_infoframe_pack(&frame->hdmi, buffer, size);
+}
+
+/**
+ * hdmi_infoframe_pack() - write a HDMI infoframe to binary buffer
+ * @frame: HDMI infoframe
+ * @buffer: destination buffer
+ * @size: size of buffer
+ *
+ * Packs the information contained in the @frame structure into a binary
+ * representation that can be written into the corresponding controller
+ * registers. Also computes the checksum as required by section 5.3.5 of
+ * the HDMI 1.4 specification.
+ *
+ * Returns the number of bytes packed into the binary buffer or a negative
+ * error code on failure.
+ */
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size)
+{
+ ssize_t length;
+
+ switch (frame->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
+ length = hdmi_avi_infoframe_pack(&frame->avi, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_SPD:
+ length = hdmi_spd_infoframe_pack(&frame->spd, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_AUDIO:
+ length = hdmi_audio_infoframe_pack(&frame->audio, buffer, size);
+ break;
+ case HDMI_INFOFRAME_TYPE_VENDOR:
+ length = hdmi_vendor_any_infoframe_pack(&frame->vendor,
+ buffer, size);
+ break;
+ default:
+ WARN(1, "Bad infoframe type %d\n", frame->any.type);
+ length = -EINVAL;
+ }
+
+ return length;
+}
+EXPORT_SYMBOL(hdmi_infoframe_pack);
diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
index d4d2c5fe248..8ac99b87c07 100644
--- a/drivers/video/hyperv_fb.c
+++ b/drivers/video/hyperv_fb.c
@@ -825,5 +825,4 @@ module_init(hvfb_drv_init);
module_exit(hvfb_drv_exit);
MODULE_LICENSE("GPL");
-MODULE_VERSION(HV_DRV_VERSION);
MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 401a56e250b..24565291165 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -2029,10 +2029,9 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
return -1;
}
- minfo = kmalloc(sizeof(*minfo), GFP_KERNEL);
+ minfo = kzalloc(sizeof(*minfo), GFP_KERNEL);
if (!minfo)
return -1;
- memset(minfo, 0, sizeof(*minfo));
minfo->pcidev = pdev;
minfo->dead = 0;
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 3ba37713b1f..d250ed0f806 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -46,7 +46,6 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
-#include <linux/pinctrl/consumer.h>
#include <linux/fb.h>
#include <linux/regulator/consumer.h>
#include <video/of_display_timing.h>
@@ -239,24 +238,6 @@ static const struct fb_bitfield def_rgb565[] = {
}
};
-static const struct fb_bitfield def_rgb666[] = {
- [RED] = {
- .offset = 16,
- .length = 6,
- },
- [GREEN] = {
- .offset = 8,
- .length = 6,
- },
- [BLUE] = {
- .offset = 0,
- .length = 6,
- },
- [TRANSP] = { /* no support for transparency */
- .length = 0,
- }
-};
-
static const struct fb_bitfield def_rgb888[] = {
[RED] = {
.offset = 16,
@@ -309,9 +290,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var,
break;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- rgb = def_rgb666;
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
rgb = def_rgb888;
@@ -453,11 +431,6 @@ static int mxsfb_set_par(struct fb_info *fb_info)
return -EINVAL;
case STMLCDIF_16BIT:
case STMLCDIF_18BIT:
- /* 24 bit to 18 bit mapping */
- ctrl |= CTRL_DF24; /* ignore the upper 2 bits in
- * each colour component
- */
- break;
case STMLCDIF_24BIT:
/* real 24 bit */
break;
@@ -877,18 +850,11 @@ static int mxsfb_probe(struct platform_device *pdev)
struct mxsfb_info *host;
struct fb_info *fb_info;
struct fb_modelist *modelist;
- struct pinctrl *pinctrl;
int ret;
if (of_id)
pdev->id_entry = of_id->data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Cannot get memory IO resource\n");
- return -ENODEV;
- }
-
fb_info = framebuffer_alloc(sizeof(struct mxsfb_info), &pdev->dev);
if (!fb_info) {
dev_err(&pdev->dev, "Failed to allocate fbdev\n");
@@ -897,6 +863,7 @@ static int mxsfb_probe(struct platform_device *pdev)
host = to_imxfb_host(fb_info);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
host->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(host->base)) {
ret = PTR_ERR(host->base);
@@ -908,12 +875,6 @@ static int mxsfb_probe(struct platform_device *pdev)
host->devdata = &mxsfb_devdata[pdev->id_entry->driver_data];
- pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
- if (IS_ERR(pinctrl)) {
- ret = PTR_ERR(pinctrl);
- goto fb_release;
- }
-
host->clk = devm_clk_get(&host->pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index 8c527e5b293..796e5112cee 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -587,8 +587,7 @@ static int nuc900fb_probe(struct platform_device *pdev)
fbinfo->flags = FBINFO_FLAG_DEFAULT;
fbinfo->pseudo_palette = &fbi->pseudo_pal;
- ret = request_irq(irq, nuc900fb_irqhandler, 0,
- pdev->name, fbinfo);
+ ret = request_irq(irq, nuc900fb_irqhandler, 0, pdev->name, fbi);
if (ret) {
dev_err(&pdev->dev, "cannot register irq handler %d -err %d\n",
irq, ret);
diff --git a/drivers/video/omap2/Kconfig b/drivers/video/omap2/Kconfig
index 56cad0f5386..63b23f87081 100644
--- a/drivers/video/omap2/Kconfig
+++ b/drivers/video/omap2/Kconfig
@@ -5,7 +5,6 @@ if ARCH_OMAP2PLUS
source "drivers/video/omap2/dss/Kconfig"
source "drivers/video/omap2/omapfb/Kconfig"
-source "drivers/video/omap2/displays/Kconfig"
source "drivers/video/omap2/displays-new/Kconfig"
endif
diff --git a/drivers/video/omap2/Makefile b/drivers/video/omap2/Makefile
index 86873c2fbb2..bf8127df8c7 100644
--- a/drivers/video/omap2/Makefile
+++ b/drivers/video/omap2/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_OMAP2_VRFB) += vrfb.o
obj-$(CONFIG_OMAP2_DSS) += dss/
-obj-y += displays/
obj-y += displays-new/
obj-$(CONFIG_FB_OMAP2) += omapfb/
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c
index 5338f362293..1b60698f141 100644
--- a/drivers/video/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/omap2/displays-new/connector-analog-tv.c
@@ -28,6 +28,20 @@ struct panel_drv_data {
bool invert_polarity;
};
+static const struct omap_video_timings tvc_pal_timings = {
+ .x_res = 720,
+ .y_res = 574,
+ .pixel_clock = 13500,
+ .hsw = 64,
+ .hfp = 12,
+ .hbp = 68,
+ .vsw = 5,
+ .vfp = 5,
+ .vbp = 41,
+
+ .interlace = true,
+};
+
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
static int tvc_connect(struct omap_dss_device *dssdev)
@@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev)
return -ENODEV;
}
- ddata->timings = omap_dss_pal_timings;
+ ddata->timings = tvc_pal_timings;
dssdev = &ddata->dssdev;
dssdev->driver = &tvc_driver;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.timings = omap_dss_pal_timings;
+ dssdev->panel.timings = tvc_pal_timings;
r = omapdss_register_display(dssdev);
if (r) {
diff --git a/drivers/video/omap2/displays-new/encoder-tfp410.c b/drivers/video/omap2/displays-new/encoder-tfp410.c
index a04f65856d6..4a291e756be 100644
--- a/drivers/video/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/omap2/displays-new/encoder-tfp410.c
@@ -43,8 +43,8 @@ static int tfp410_connect(struct omap_dss_device *dssdev,
if (r)
return r;
- dst->output = dssdev;
- dssdev->device = dst;
+ dst->src = dssdev;
+ dssdev->dst = dst;
return 0;
}
@@ -59,12 +59,12 @@ static void tfp410_disconnect(struct omap_dss_device *dssdev,
if (!omapdss_device_is_connected(dssdev))
return;
- WARN_ON(dst != dssdev->device);
- if (dst != dssdev->device)
+ WARN_ON(dst != dssdev->dst);
+ if (dst != dssdev->dst)
return;
- dst->output = NULL;
- dssdev->device = NULL;
+ dst->src = NULL;
+ dssdev->dst = NULL;
in->ops.dpi->disconnect(in, &ddata->dssdev);
}
@@ -244,7 +244,7 @@ static int __exit tfp410_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tfp410_disconnect(dssdev, dssdev->device);
+ tfp410_disconnect(dssdev, dssdev->dst);
omap_dss_put_device(in);
diff --git a/drivers/video/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
index ce0e010026c..798ef200b05 100644
--- a/drivers/video/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/omap2/displays-new/encoder-tpd12s015.c
@@ -66,8 +66,8 @@ static int tpd_connect(struct omap_dss_device *dssdev,
if (r)
return r;
- dst->output = dssdev;
- dssdev->device = dst;
+ dst->src = dssdev;
+ dssdev->dst = dst;
INIT_COMPLETION(ddata->hpd_completion);
@@ -95,15 +95,15 @@ static void tpd_disconnect(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
struct omap_dss_device *in = ddata->in;
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
gpio_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
- dst->output = NULL;
- dssdev->device = NULL;
+ dst->src = NULL;
+ dssdev->dst = NULL;
in->ops.hdmi->disconnect(in, &ddata->dssdev);
}
@@ -372,7 +372,7 @@ static int __exit tpd_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tpd_disconnect(dssdev, dssdev->device);
+ tpd_disconnect(dssdev, dssdev->dst);
omap_dss_put_device(in);
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
deleted file mode 100644
index e80ac1c7956..00000000000
--- a/drivers/video/omap2/displays/Kconfig
+++ /dev/null
@@ -1,75 +0,0 @@
-menu "OMAP2/3 Display Device Drivers (old device model)"
- depends on OMAP2_DSS
-
-config PANEL_GENERIC_DPI
- tristate "Generic DPI Panel"
- depends on OMAP2_DSS_DPI
- help
- Generic DPI panel driver.
- Supports DVI output for Beagle and OMAP3 SDP.
- Supports LCD Panel used in TI SDP3430 and EVM boards,
- OMAP3517 EVM boards and CM-T35.
-
-config PANEL_TFP410
- tristate "TFP410 DPI-to-DVI chip"
- depends on OMAP2_DSS_DPI && I2C
- help
- Driver for TFP410 DPI-to-DVI chip. The driver uses i2c to read EDID
- information from the monitor.
-
-config PANEL_LGPHILIPS_LB035Q02
- tristate "LG.Philips LB035Q02 LCD Panel"
- depends on OMAP2_DSS_DPI && SPI
- help
- LCD Panel used on the Gumstix Overo Palo35
-
-config PANEL_SHARP_LS037V7DW01
- tristate "Sharp LS037V7DW01 LCD Panel"
- depends on OMAP2_DSS_DPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- LCD Panel used in TI's SDP3430 and EVM boards
-
-config PANEL_NEC_NL8048HL11_01B
- tristate "NEC NL8048HL11-01B Panel"
- depends on OMAP2_DSS_DPI
- depends on SPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- This NEC NL8048HL11-01B panel is TFT LCD
- used in the Zoom2/3/3630 sdp boards.
-
-config PANEL_PICODLP
- tristate "TI PICO DLP mini-projector"
- depends on OMAP2_DSS_DPI && I2C
- help
- A mini-projector used in TI's SDP4430 and EVM boards
- For more info please visit http://www.dlp.com/projector/
-
-config PANEL_TAAL
- tristate "Taal DSI Panel"
- depends on OMAP2_DSS_DSI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- Taal DSI command mode panel from TPO.
-
-config PANEL_TPO_TD043MTEA1
- tristate "TPO TD043MTEA1 LCD Panel"
- depends on OMAP2_DSS_DPI && SPI
- help
- LCD Panel used in OMAP3 Pandora
-
-config PANEL_ACX565AKM
- tristate "ACX565AKM Panel"
- depends on OMAP2_DSS_SDI && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- This is the LCD panel used on Nokia N900
-
-config PANEL_N8X0
- tristate "N8X0 Panel"
- depends on OMAP2_DSS_RFBI && SPI
- depends on BACKLIGHT_CLASS_DEVICE
- help
- This is the LCD panel used on Nokia N8x0
-endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
deleted file mode 100644
index 58a5176b07b..00000000000
--- a/drivers/video/omap2/displays/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-obj-$(CONFIG_PANEL_GENERIC_DPI) += panel-generic-dpi.o
-obj-$(CONFIG_PANEL_TFP410) += panel-tfp410.o
-obj-$(CONFIG_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
-obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
-obj-$(CONFIG_PANEL_NEC_NL8048HL11_01B) += panel-nec-nl8048hl11-01b.o
-
-obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
-obj-$(CONFIG_PANEL_PICODLP) += panel-picodlp.o
-obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
-obj-$(CONFIG_PANEL_ACX565AKM) += panel-acx565akm.o
-obj-$(CONFIG_PANEL_N8X0) += panel-n8x0.o
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
deleted file mode 100644
index 3fd100fc853..00000000000
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ /dev/null
@@ -1,798 +0,0 @@
-/*
- * Support for ACX565AKM LCD Panel used on Nokia N900
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Original Driver Author: Imre Deak <imre.deak@nokia.com>
- * Based on panel-generic.c by Tomi Valkeinen <tomi.valkeinen@nokia.com>
- * Adapted to new DSS2 framework: Roger Quadros <roger.quadros@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#define MIPID_CMD_READ_DISP_ID 0x04
-#define MIPID_CMD_READ_RED 0x06
-#define MIPID_CMD_READ_GREEN 0x07
-#define MIPID_CMD_READ_BLUE 0x08
-#define MIPID_CMD_READ_DISP_STATUS 0x09
-#define MIPID_CMD_RDDSDR 0x0F
-#define MIPID_CMD_SLEEP_IN 0x10
-#define MIPID_CMD_SLEEP_OUT 0x11
-#define MIPID_CMD_DISP_OFF 0x28
-#define MIPID_CMD_DISP_ON 0x29
-#define MIPID_CMD_WRITE_DISP_BRIGHTNESS 0x51
-#define MIPID_CMD_READ_DISP_BRIGHTNESS 0x52
-#define MIPID_CMD_WRITE_CTRL_DISP 0x53
-
-#define CTRL_DISP_BRIGHTNESS_CTRL_ON (1 << 5)
-#define CTRL_DISP_AMBIENT_LIGHT_CTRL_ON (1 << 4)
-#define CTRL_DISP_BACKLIGHT_ON (1 << 2)
-#define CTRL_DISP_AUTO_BRIGHTNESS_ON (1 << 1)
-
-#define MIPID_CMD_READ_CTRL_DISP 0x54
-#define MIPID_CMD_WRITE_CABC 0x55
-#define MIPID_CMD_READ_CABC 0x56
-
-#define MIPID_VER_LPH8923 3
-#define MIPID_VER_LS041Y3 4
-#define MIPID_VER_L4F00311 8
-#define MIPID_VER_ACX565AKM 9
-
-struct acx565akm_device {
- char *name;
- int enabled;
- int model;
- int revision;
- u8 display_id[3];
- unsigned has_bc:1;
- unsigned has_cabc:1;
- unsigned cabc_mode;
- unsigned long hw_guard_end; /* next value of jiffies
- when we can issue the
- next sleep in/out command */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- struct spi_device *spi;
- struct mutex mutex;
-
- struct omap_dss_device *dssdev;
- struct backlight_device *bl_dev;
-};
-
-static struct acx565akm_device acx_dev;
-static int acx565akm_bl_update_status(struct backlight_device *dev);
-
-/*--------------------MIPID interface-----------------------------*/
-
-static void acx565akm_transfer(struct acx565akm_device *md, int cmd,
- const u8 *wbuf, int wlen, u8 *rbuf, int rlen)
-{
- struct spi_message m;
- struct spi_transfer *x, xfer[5];
- int r;
-
- BUG_ON(md->spi == NULL);
-
- spi_message_init(&m);
-
- memset(xfer, 0, sizeof(xfer));
- x = &xfer[0];
-
- cmd &= 0xff;
- x->tx_buf = &cmd;
- x->bits_per_word = 9;
- x->len = 2;
-
- if (rlen > 1 && wlen == 0) {
- /*
- * Between the command and the response data there is a
- * dummy clock cycle. Add an extra bit after the command
- * word to account for this.
- */
- x->bits_per_word = 10;
- cmd <<= 1;
- }
- spi_message_add_tail(x, &m);
-
- if (wlen) {
- x++;
- x->tx_buf = wbuf;
- x->len = wlen;
- x->bits_per_word = 9;
- spi_message_add_tail(x, &m);
- }
-
- if (rlen) {
- x++;
- x->rx_buf = rbuf;
- x->len = rlen;
- spi_message_add_tail(x, &m);
- }
-
- r = spi_sync(md->spi, &m);
- if (r < 0)
- dev_dbg(&md->spi->dev, "spi_sync %d\n", r);
-}
-
-static inline void acx565akm_cmd(struct acx565akm_device *md, int cmd)
-{
- acx565akm_transfer(md, cmd, NULL, 0, NULL, 0);
-}
-
-static inline void acx565akm_write(struct acx565akm_device *md,
- int reg, const u8 *buf, int len)
-{
- acx565akm_transfer(md, reg, buf, len, NULL, 0);
-}
-
-static inline void acx565akm_read(struct acx565akm_device *md,
- int reg, u8 *buf, int len)
-{
- acx565akm_transfer(md, reg, NULL, 0, buf, len);
-}
-
-static void hw_guard_start(struct acx565akm_device *md, int guard_msec)
-{
- md->hw_guard_wait = msecs_to_jiffies(guard_msec);
- md->hw_guard_end = jiffies + md->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct acx565akm_device *md)
-{
- unsigned long wait = md->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= md->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-/*----------------------MIPID wrappers----------------------------*/
-
-static void set_sleep_mode(struct acx565akm_device *md, int on)
-{
- int cmd;
-
- if (on)
- cmd = MIPID_CMD_SLEEP_IN;
- else
- cmd = MIPID_CMD_SLEEP_OUT;
- /*
- * We have to keep 120msec between sleep in/out commands.
- * (8.2.15, 8.2.16).
- */
- hw_guard_wait(md);
- acx565akm_cmd(md, cmd);
- hw_guard_start(md, 120);
-}
-
-static void set_display_state(struct acx565akm_device *md, int enabled)
-{
- int cmd = enabled ? MIPID_CMD_DISP_ON : MIPID_CMD_DISP_OFF;
-
- acx565akm_cmd(md, cmd);
-}
-
-static int panel_enabled(struct acx565akm_device *md)
-{
- u32 disp_status;
- int enabled;
-
- acx565akm_read(md, MIPID_CMD_READ_DISP_STATUS, (u8 *)&disp_status, 4);
- disp_status = __be32_to_cpu(disp_status);
- enabled = (disp_status & (1 << 17)) && (disp_status & (1 << 10));
- dev_dbg(&md->spi->dev,
- "LCD panel %senabled by bootloader (status 0x%04x)\n",
- enabled ? "" : "not ", disp_status);
- return enabled;
-}
-
-static int panel_detect(struct acx565akm_device *md)
-{
- acx565akm_read(md, MIPID_CMD_READ_DISP_ID, md->display_id, 3);
- dev_dbg(&md->spi->dev, "MIPI display ID: %02x%02x%02x\n",
- md->display_id[0], md->display_id[1], md->display_id[2]);
-
- switch (md->display_id[0]) {
- case 0x10:
- md->model = MIPID_VER_ACX565AKM;
- md->name = "acx565akm";
- md->has_bc = 1;
- md->has_cabc = 1;
- break;
- case 0x29:
- md->model = MIPID_VER_L4F00311;
- md->name = "l4f00311";
- break;
- case 0x45:
- md->model = MIPID_VER_LPH8923;
- md->name = "lph8923";
- break;
- case 0x83:
- md->model = MIPID_VER_LS041Y3;
- md->name = "ls041y3";
- break;
- default:
- md->name = "unknown";
- dev_err(&md->spi->dev, "invalid display ID\n");
- return -ENODEV;
- }
-
- md->revision = md->display_id[1];
-
- dev_info(&md->spi->dev, "omapfb: %s rev %02x LCD detected\n",
- md->name, md->revision);
-
- return 0;
-}
-
-/*----------------------Backlight Control-------------------------*/
-
-static void enable_backlight_ctrl(struct acx565akm_device *md, int enable)
-{
- u16 ctrl;
-
- acx565akm_read(md, MIPID_CMD_READ_CTRL_DISP, (u8 *)&ctrl, 1);
- if (enable) {
- ctrl |= CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON;
- } else {
- ctrl &= ~(CTRL_DISP_BRIGHTNESS_CTRL_ON |
- CTRL_DISP_BACKLIGHT_ON);
- }
-
- ctrl |= 1 << 8;
- acx565akm_write(md, MIPID_CMD_WRITE_CTRL_DISP, (u8 *)&ctrl, 2);
-}
-
-static void set_cabc_mode(struct acx565akm_device *md, unsigned mode)
-{
- u16 cabc_ctrl;
-
- md->cabc_mode = mode;
- if (!md->enabled)
- return;
- cabc_ctrl = 0;
- acx565akm_read(md, MIPID_CMD_READ_CABC, (u8 *)&cabc_ctrl, 1);
- cabc_ctrl &= ~3;
- cabc_ctrl |= (1 << 8) | (mode & 3);
- acx565akm_write(md, MIPID_CMD_WRITE_CABC, (u8 *)&cabc_ctrl, 2);
-}
-
-static unsigned get_cabc_mode(struct acx565akm_device *md)
-{
- return md->cabc_mode;
-}
-
-static unsigned get_hw_cabc_mode(struct acx565akm_device *md)
-{
- u8 cabc_ctrl;
-
- acx565akm_read(md, MIPID_CMD_READ_CABC, &cabc_ctrl, 1);
- return cabc_ctrl & 3;
-}
-
-static void acx565akm_set_brightness(struct acx565akm_device *md, int level)
-{
- int bv;
-
- bv = level | (1 << 8);
- acx565akm_write(md, MIPID_CMD_WRITE_DISP_BRIGHTNESS, (u8 *)&bv, 2);
-
- if (level)
- enable_backlight_ctrl(md, 1);
- else
- enable_backlight_ctrl(md, 0);
-}
-
-static int acx565akm_get_actual_brightness(struct acx565akm_device *md)
-{
- u8 bv;
-
- acx565akm_read(md, MIPID_CMD_READ_DISP_BRIGHTNESS, &bv, 1);
-
- return bv;
-}
-
-
-static int acx565akm_bl_update_status(struct backlight_device *dev)
-{
- struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
- int r;
- int level;
-
- dev_dbg(&md->spi->dev, "%s\n", __func__);
-
- mutex_lock(&md->mutex);
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- r = 0;
- if (md->has_bc)
- acx565akm_set_brightness(md, level);
- else
- r = -ENODEV;
-
- mutex_unlock(&md->mutex);
-
- return r;
-}
-
-static int acx565akm_bl_get_intensity(struct backlight_device *dev)
-{
- struct acx565akm_device *md = dev_get_drvdata(&dev->dev);
-
- dev_dbg(&dev->dev, "%s\n", __func__);
-
- if (!md->has_bc)
- return -ENODEV;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK) {
- if (md->has_bc)
- return acx565akm_get_actual_brightness(md);
- else
- return dev->props.brightness;
- }
-
- return 0;
-}
-
-static const struct backlight_ops acx565akm_bl_ops = {
- .get_brightness = acx565akm_bl_get_intensity,
- .update_status = acx565akm_bl_update_status,
-};
-
-/*--------------------Auto Brightness control via Sysfs---------------------*/
-
-static const char *cabc_modes[] = {
- "off", /* always used when CABC is not supported */
- "ui",
- "still-image",
- "moving-image",
-};
-
-static ssize_t show_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct acx565akm_device *md = dev_get_drvdata(dev);
- const char *mode_str;
- int mode;
- int len;
-
- if (!md->has_cabc)
- mode = 0;
- else
- mode = get_cabc_mode(md);
- mode_str = "unknown";
- if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
- mode_str = cabc_modes[mode];
- len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
-
- return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
-}
-
-static ssize_t store_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct acx565akm_device *md = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
- const char *mode_str = cabc_modes[i];
- int cmp_len = strlen(mode_str);
-
- if (count > 0 && buf[count - 1] == '\n')
- count--;
- if (count != cmp_len)
- continue;
-
- if (strncmp(buf, mode_str, cmp_len) == 0)
- break;
- }
-
- if (i == ARRAY_SIZE(cabc_modes))
- return -EINVAL;
-
- if (!md->has_cabc && i != 0)
- return -EINVAL;
-
- mutex_lock(&md->mutex);
- set_cabc_mode(md, i);
- mutex_unlock(&md->mutex);
-
- return count;
-}
-
-static ssize_t show_cabc_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct acx565akm_device *md = dev_get_drvdata(dev);
- int len;
- int i;
-
- if (!md->has_cabc)
- return snprintf(buf, PAGE_SIZE, "%s\n", cabc_modes[0]);
-
- for (i = 0, len = 0;
- len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
- i ? " " : "", cabc_modes[i],
- i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
-
- return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
-}
-
-static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
- show_cabc_mode, store_cabc_mode);
-static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
- show_cabc_available_modes, NULL);
-
-static struct attribute *bldev_attrs[] = {
- &dev_attr_cabc_mode.attr,
- &dev_attr_cabc_available_modes.attr,
- NULL,
-};
-
-static struct attribute_group bldev_attr_group = {
- .attrs = bldev_attrs,
-};
-
-
-/*---------------------------ACX Panel----------------------------*/
-
-static int acx_get_recommended_bpp(struct omap_dss_device *dssdev)
-{
- return 16;
-}
-
-static struct omap_video_timings acx_panel_timings = {
- .x_res = 800,
- .y_res = 480,
- .pixel_clock = 24000,
- .hfp = 28,
- .hsw = 4,
- .hbp = 24,
- .vfp = 3,
- .vsw = 3,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
-};
-
-static struct panel_acx565akm_data *get_panel_data(struct omap_dss_device *dssdev)
-{
- return (struct panel_acx565akm_data *) dssdev->data;
-}
-
-static int acx_panel_probe(struct omap_dss_device *dssdev)
-{
- int r;
- struct acx565akm_device *md = &acx_dev;
- struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
- struct backlight_device *bldev;
- int max_brightness, brightness;
- struct backlight_properties props;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (!panel_data)
- return -EINVAL;
-
- /* FIXME AC bias ? */
- dssdev->panel.timings = acx_panel_timings;
-
- if (gpio_is_valid(panel_data->reset_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, panel_data->reset_gpio,
- GPIOF_OUT_INIT_LOW, "lcd reset");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(panel_data->reset_gpio))
- gpio_set_value(panel_data->reset_gpio, 1);
-
- /*
- * After reset we have to wait 5 msec before the first
- * command can be sent.
- */
- msleep(5);
-
- md->enabled = panel_enabled(md);
-
- r = panel_detect(md);
- if (r) {
- dev_err(dssdev->dev, "%s panel detect error\n", __func__);
- if (!md->enabled && gpio_is_valid(panel_data->reset_gpio))
- gpio_set_value(panel_data->reset_gpio, 0);
-
- return r;
- }
-
- mutex_lock(&acx_dev.mutex);
- acx_dev.dssdev = dssdev;
- mutex_unlock(&acx_dev.mutex);
-
- if (!md->enabled) {
- if (gpio_is_valid(panel_data->reset_gpio))
- gpio_set_value(panel_data->reset_gpio, 0);
- }
-
- /*------- Backlight control --------*/
-
- memset(&props, 0, sizeof(props));
- props.fb_blank = FB_BLANK_UNBLANK;
- props.power = FB_BLANK_UNBLANK;
- props.type = BACKLIGHT_RAW;
-
- bldev = backlight_device_register("acx565akm", &md->spi->dev,
- md, &acx565akm_bl_ops, &props);
- md->bl_dev = bldev;
- if (md->has_cabc) {
- r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
- if (r) {
- dev_err(&bldev->dev,
- "%s failed to create sysfs files\n", __func__);
- backlight_device_unregister(bldev);
- return r;
- }
- md->cabc_mode = get_hw_cabc_mode(md);
- }
-
- max_brightness = 255;
-
- if (md->has_bc)
- brightness = acx565akm_get_actual_brightness(md);
- else
- brightness = 0;
-
- bldev->props.max_brightness = max_brightness;
- bldev->props.brightness = brightness;
-
- acx565akm_bl_update_status(bldev);
- return 0;
-}
-
-static void acx_panel_remove(struct omap_dss_device *dssdev)
-{
- struct acx565akm_device *md = &acx_dev;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
- sysfs_remove_group(&md->bl_dev->dev.kobj, &bldev_attr_group);
- backlight_device_unregister(md->bl_dev);
- mutex_lock(&acx_dev.mutex);
- acx_dev.dssdev = NULL;
- mutex_unlock(&acx_dev.mutex);
-}
-
-static int acx_panel_power_on(struct omap_dss_device *dssdev)
-{
- struct acx565akm_device *md = &acx_dev;
- struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
- int r;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- mutex_lock(&md->mutex);
-
- omapdss_sdi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_sdi_set_datapairs(dssdev, dssdev->phy.sdi.datapairs);
-
- r = omapdss_sdi_display_enable(dssdev);
- if (r) {
- pr_err("%s sdi enable failed\n", __func__);
- goto fail_unlock;
- }
-
- /*FIXME tweak me */
- msleep(50);
-
- if (gpio_is_valid(panel_data->reset_gpio))
- gpio_set_value(panel_data->reset_gpio, 1);
-
- if (md->enabled) {
- dev_dbg(&md->spi->dev, "panel already enabled\n");
- mutex_unlock(&md->mutex);
- return 0;
- }
-
- /*
- * We have to meet all the following delay requirements:
- * 1. tRW: reset pulse width 10usec (7.12.1)
- * 2. tRT: reset cancel time 5msec (7.12.1)
- * 3. Providing PCLK,HS,VS signals for 2 frames = ~50msec worst
- * case (7.6.2)
- * 4. 120msec before the sleep out command (7.12.1)
- */
- msleep(120);
-
- set_sleep_mode(md, 0);
- md->enabled = 1;
-
- /* 5msec between sleep out and the next command. (8.2.16) */
- msleep(5);
- set_display_state(md, 1);
- set_cabc_mode(md, md->cabc_mode);
-
- mutex_unlock(&md->mutex);
-
- return acx565akm_bl_update_status(md->bl_dev);
-
-fail_unlock:
- mutex_unlock(&md->mutex);
- return r;
-}
-
-static void acx_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct acx565akm_device *md = &acx_dev;
- struct panel_acx565akm_data *panel_data = get_panel_data(dssdev);
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- mutex_lock(&md->mutex);
-
- if (!md->enabled) {
- mutex_unlock(&md->mutex);
- return;
- }
- set_display_state(md, 0);
- set_sleep_mode(md, 1);
- md->enabled = 0;
- /*
- * We have to provide PCLK,HS,VS signals for 2 frames (worst case
- * ~50msec) after sending the sleep in command and asserting the
- * reset signal. We probably could assert the reset w/o the delay
- * but we still delay to avoid possible artifacts. (7.6.1)
- */
- msleep(50);
-
- if (gpio_is_valid(panel_data->reset_gpio))
- gpio_set_value(panel_data->reset_gpio, 0);
-
- /* FIXME need to tweak this delay */
- msleep(100);
-
- omapdss_sdi_display_disable(dssdev);
-
- mutex_unlock(&md->mutex);
-}
-
-static int acx_panel_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- dev_dbg(dssdev->dev, "%s\n", __func__);
- r = acx_panel_power_on(dssdev);
-
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return 0;
-}
-
-static void acx_panel_disable(struct omap_dss_device *dssdev)
-{
- dev_dbg(dssdev->dev, "%s\n", __func__);
- acx_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static void acx_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- omapdss_sdi_set_timings(dssdev, timings);
-
- dssdev->panel.timings = *timings;
-}
-
-static int acx_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- return 0;
-}
-
-
-static struct omap_dss_driver acx_panel_driver = {
- .probe = acx_panel_probe,
- .remove = acx_panel_remove,
-
- .enable = acx_panel_enable,
- .disable = acx_panel_disable,
-
- .set_timings = acx_panel_set_timings,
- .check_timings = acx_panel_check_timings,
-
- .get_recommended_bpp = acx_get_recommended_bpp,
-
- .driver = {
- .name = "panel-acx565akm",
- .owner = THIS_MODULE,
- },
-};
-
-/*--------------------SPI probe-------------------------*/
-
-static int acx565akm_spi_probe(struct spi_device *spi)
-{
- struct acx565akm_device *md = &acx_dev;
-
- dev_dbg(&spi->dev, "%s\n", __func__);
-
- spi->mode = SPI_MODE_3;
- md->spi = spi;
- mutex_init(&md->mutex);
- dev_set_drvdata(&spi->dev, md);
-
- omap_dss_register_driver(&acx_panel_driver);
-
- return 0;
-}
-
-static int acx565akm_spi_remove(struct spi_device *spi)
-{
- struct acx565akm_device *md = dev_get_drvdata(&spi->dev);
-
- dev_dbg(&md->spi->dev, "%s\n", __func__);
- omap_dss_unregister_driver(&acx_panel_driver);
-
- return 0;
-}
-
-static struct spi_driver acx565akm_spi_driver = {
- .driver = {
- .name = "acx565akm",
- .owner = THIS_MODULE,
- },
- .probe = acx565akm_spi_probe,
- .remove = acx565akm_spi_remove,
-};
-
-module_spi_driver(acx565akm_spi_driver);
-
-MODULE_AUTHOR("Nokia Corporation");
-MODULE_DESCRIPTION("acx565akm LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
deleted file mode 100644
index bebebd45847..00000000000
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ /dev/null
@@ -1,744 +0,0 @@
-/*
- * Generic DPI Panels support
- *
- * Copyright (C) 2010 Canonical Ltd.
- * Author: Bryan Wu <bryan.wu@canonical.com>
- *
- * LCD panel driver for Sharp LQ043T1DG01
- *
- * Copyright (C) 2009 Texas Instruments Inc
- * Author: Vaibhav Hiremath <hvaibhav@ti.com>
- *
- * LCD panel driver for Toppoly TDO35S
- *
- * Copyright (C) 2009 CompuLab, Ltd.
- * Author: Mike Rapoport <mike@compulab.co.il>
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <video/omapdss.h>
-
-#include <video/omap-panel-data.h>
-
-struct panel_config {
- struct omap_video_timings timings;
-
- int power_on_delay;
- int power_off_delay;
-
- /*
- * Used to match device to panel configuration
- * when use generic panel driver
- */
- const char *name;
-};
-
-/* Panel configurations */
-static struct panel_config generic_dpi_panels[] = {
- /* Sharp LQ043T1DG01 */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 9000,
-
- .hsw = 42,
- .hfp = 3,
- .hbp = 2,
-
- .vsw = 11,
- .vfp = 3,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_LOW,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 50,
- .power_off_delay = 100,
- .name = "sharp_lq",
- },
-
- /* Sharp LS037V7DW01 */
- {
- {
- .x_res = 480,
- .y_res = 640,
-
- .pixel_clock = 19200,
-
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 50,
- .power_off_delay = 100,
- .name = "sharp_ls",
- },
-
- /* Toppoly TDO35S */
- {
- {
- .x_res = 480,
- .y_res = 640,
-
- .pixel_clock = 26000,
-
- .hfp = 104,
- .hsw = 8,
- .hbp = 8,
-
- .vfp = 4,
- .vsw = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "toppoly_tdo35s",
- },
-
- /* Samsung LTE430WQ-F0C */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 9200,
-
- .hfp = 8,
- .hsw = 41,
- .hbp = 45 - 41,
-
- .vfp = 4,
- .vsw = 10,
- .vbp = 12 - 10,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "samsung_lte430wq_f0c",
- },
-
- /* Seiko 70WVW1TZ3Z3 */
- {
- {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 33000,
-
- .hsw = 128,
- .hfp = 10,
- .hbp = 10,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 11,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "seiko_70wvw1tz3",
- },
-
- /* Powertip PH480272T */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 9000,
-
- .hsw = 40,
- .hfp = 2,
- .hbp = 2,
-
- .vsw = 10,
- .vfp = 2,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_LOW,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "powertip_ph480272t",
- },
-
- /* Innolux AT070TN83 */
- {
- {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 40000,
-
- .hsw = 48,
- .hfp = 1,
- .hbp = 1,
-
- .vsw = 3,
- .vfp = 12,
- .vbp = 25,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "innolux_at070tn83",
- },
-
- /* NEC NL2432DR22-11B */
- {
- {
- .x_res = 240,
- .y_res = 320,
-
- .pixel_clock = 5400,
-
- .hsw = 3,
- .hfp = 3,
- .hbp = 39,
-
- .vsw = 1,
- .vfp = 2,
- .vbp = 7,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "nec_nl2432dr22-11b",
- },
-
- /* Unknown panel used in OMAP H4 */
- {
- {
- .x_res = 240,
- .y_res = 320,
-
- .pixel_clock = 6250,
-
- .hsw = 15,
- .hfp = 15,
- .hbp = 60,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "h4",
- },
-
- /* FocalTech ETM070003DH6 */
- {
- {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 28000,
-
- .hsw = 48,
- .hfp = 40,
- .hbp = 40,
-
- .vsw = 3,
- .vfp = 13,
- .vbp = 29,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "focaltech_etm070003dh6",
- },
-
- /* Microtips Technologies - UMSH-8173MD */
- {
- {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 34560,
-
- .hsw = 13,
- .hfp = 101,
- .hbp = 101,
-
- .vsw = 23,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .power_on_delay = 0,
- .power_off_delay = 0,
- .name = "microtips_umsh_8173md",
- },
-
- /* OrtusTech COM43H4M10XTC */
- {
- {
- .x_res = 480,
- .y_res = 272,
-
- .pixel_clock = 8000,
-
- .hsw = 41,
- .hfp = 8,
- .hbp = 4,
-
- .vsw = 10,
- .vfp = 4,
- .vbp = 2,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "ortustech_com43h4m10xtc",
- },
-
- /* Innolux AT080TN52 */
- {
- {
- .x_res = 800,
- .y_res = 600,
-
- .pixel_clock = 41142,
-
- .hsw = 20,
- .hfp = 210,
- .hbp = 46,
-
- .vsw = 10,
- .vfp = 12,
- .vbp = 23,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_LOW,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "innolux_at080tn52",
- },
-
- /* Mitsubishi AA084SB01 */
- {
- {
- .x_res = 800,
- .y_res = 600,
- .pixel_clock = 40000,
-
- .hsw = 1,
- .hfp = 254,
- .hbp = 1,
-
- .vsw = 1,
- .vfp = 26,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "mitsubishi_aa084sb01",
- },
- /* EDT ET0500G0DH6 */
- {
- {
- .x_res = 800,
- .y_res = 480,
- .pixel_clock = 33260,
-
- .hsw = 128,
- .hfp = 216,
- .hbp = 40,
-
- .vsw = 2,
- .vfp = 35,
- .vbp = 10,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "edt_et0500g0dh6",
- },
-
- /* Prime-View PD050VL1 */
- {
- {
- .x_res = 640,
- .y_res = 480,
-
- .pixel_clock = 25000,
-
- .hsw = 96,
- .hfp = 18,
- .hbp = 46,
-
- .vsw = 2,
- .vfp = 10,
- .vbp = 33,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "primeview_pd050vl1",
- },
-
- /* Prime-View PM070WL4 */
- {
- {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 32000,
-
- .hsw = 128,
- .hfp = 42,
- .hbp = 86,
-
- .vsw = 2,
- .vfp = 10,
- .vbp = 33,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "primeview_pm070wl4",
- },
-
- /* Prime-View PD104SLF */
- {
- {
- .x_res = 800,
- .y_res = 600,
-
- .pixel_clock = 40000,
-
- .hsw = 128,
- .hfp = 42,
- .hbp = 86,
-
- .vsw = 4,
- .vfp = 1,
- .vbp = 23,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
- },
- .name = "primeview_pd104slf",
- },
-};
-
-struct panel_drv_data {
-
- struct omap_dss_device *dssdev;
-
- struct panel_config *panel_config;
-
- struct mutex lock;
-};
-
-static inline struct panel_generic_dpi_data
-*get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct panel_generic_dpi_data *) dssdev->data;
-}
-
-static int generic_dpi_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r, i;
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
- struct panel_config *panel_config = drv_data->panel_config;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- /* wait couple of vsyncs until enabling the LCD */
- if (panel_config->power_on_delay)
- msleep(panel_config->power_on_delay);
-
- for (i = 0; i < panel_data->num_gpios; ++i) {
- gpio_set_value_cansleep(panel_data->gpios[i],
- panel_data->gpio_invert[i] ? 0 : 1);
- }
-
- return 0;
-
-err0:
- return r;
-}
-
-static void generic_dpi_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
- struct panel_config *panel_config = drv_data->panel_config;
- int i;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- for (i = panel_data->num_gpios - 1; i >= 0; --i) {
- gpio_set_value_cansleep(panel_data->gpios[i],
- panel_data->gpio_invert[i] ? 1 : 0);
- }
-
- /* wait couple of vsyncs after disabling the LCD */
- if (panel_config->power_off_delay)
- msleep(panel_config->power_off_delay);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int generic_dpi_panel_probe(struct omap_dss_device *dssdev)
-{
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct panel_config *panel_config = NULL;
- struct panel_drv_data *drv_data = NULL;
- int i, r;
-
- dev_dbg(dssdev->dev, "probe\n");
-
- if (!panel_data || !panel_data->name)
- return -EINVAL;
-
- for (i = 0; i < ARRAY_SIZE(generic_dpi_panels); i++) {
- if (strcmp(panel_data->name, generic_dpi_panels[i].name) == 0) {
- panel_config = &generic_dpi_panels[i];
- break;
- }
- }
-
- if (!panel_config)
- return -EINVAL;
-
- for (i = 0; i < panel_data->num_gpios; ++i) {
- r = devm_gpio_request_one(dssdev->dev, panel_data->gpios[i],
- panel_data->gpio_invert[i] ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "panel gpio");
- if (r)
- return r;
- }
-
- dssdev->panel.timings = panel_config->timings;
-
- drv_data = devm_kzalloc(dssdev->dev, sizeof(*drv_data), GFP_KERNEL);
- if (!drv_data)
- return -ENOMEM;
-
- drv_data->dssdev = dssdev;
- drv_data->panel_config = panel_config;
-
- mutex_init(&drv_data->lock);
-
- dev_set_drvdata(dssdev->dev, drv_data);
-
- return 0;
-}
-
-static void __exit generic_dpi_panel_remove(struct omap_dss_device *dssdev)
-{
- dev_dbg(dssdev->dev, "remove\n");
-
- dev_set_drvdata(dssdev->dev, NULL);
-}
-
-static int generic_dpi_panel_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&drv_data->lock);
-
- r = generic_dpi_panel_power_on(dssdev);
- if (r)
- goto err;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-err:
- mutex_unlock(&drv_data->lock);
-
- return r;
-}
-
-static void generic_dpi_panel_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&drv_data->lock);
-
- generic_dpi_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&drv_data->lock);
-}
-
-static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&drv_data->lock);
-
- omapdss_dpi_set_timings(dssdev, timings);
-
- dssdev->panel.timings = *timings;
-
- mutex_unlock(&drv_data->lock);
-}
-
-static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&drv_data->lock);
-
- *timings = dssdev->panel.timings;
-
- mutex_unlock(&drv_data->lock);
-}
-
-static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *drv_data = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&drv_data->lock);
-
- r = dpi_check_timings(dssdev, timings);
-
- mutex_unlock(&drv_data->lock);
-
- return r;
-}
-
-static struct omap_dss_driver dpi_driver = {
- .probe = generic_dpi_panel_probe,
- .remove = __exit_p(generic_dpi_panel_remove),
-
- .enable = generic_dpi_panel_enable,
- .disable = generic_dpi_panel_disable,
-
- .set_timings = generic_dpi_panel_set_timings,
- .get_timings = generic_dpi_panel_get_timings,
- .check_timings = generic_dpi_panel_check_timings,
-
- .driver = {
- .name = "generic_dpi_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init generic_dpi_panel_drv_init(void)
-{
- return omap_dss_register_driver(&dpi_driver);
-}
-
-static void __exit generic_dpi_panel_drv_exit(void)
-{
- omap_dss_unregister_driver(&dpi_driver);
-}
-
-module_init(generic_dpi_panel_drv_init);
-module_exit(generic_dpi_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
deleted file mode 100644
index 6c51430ddb3..00000000000
--- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * LCD panel driver for LG.Philips LB035Q02
- *
- * Author: Steve Sakoman <steve@sakoman.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/mutex.h>
-#include <linux/gpio.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-struct lb035q02_data {
- struct mutex lock;
-};
-
-static struct omap_video_timings lb035q02_timings = {
- .x_res = 320,
- .y_res = 240,
-
- .pixel_clock = 6500,
-
- .hsw = 2,
- .hfp = 20,
- .hbp = 68,
-
- .vsw = 2,
- .vfp = 4,
- .vbp = 18,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
-};
-
-static inline struct panel_generic_dpi_data
-*get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct panel_generic_dpi_data *) dssdev->data;
-}
-
-static int lb035q02_panel_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- int r, i;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- for (i = 0; i < panel_data->num_gpios; ++i) {
- gpio_set_value_cansleep(panel_data->gpios[i],
- panel_data->gpio_invert[i] ? 0 : 1);
- }
-
- return 0;
-
-err0:
- return r;
-}
-
-static void lb035q02_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- int i;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- for (i = panel_data->num_gpios - 1; i >= 0; --i) {
- gpio_set_value_cansleep(panel_data->gpios[i],
- panel_data->gpio_invert[i] ? 1 : 0);
- }
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int lb035q02_panel_probe(struct omap_dss_device *dssdev)
-{
- struct panel_generic_dpi_data *panel_data = get_panel_data(dssdev);
- struct lb035q02_data *ld;
- int r, i;
-
- if (!panel_data)
- return -EINVAL;
-
- dssdev->panel.timings = lb035q02_timings;
-
- ld = devm_kzalloc(dssdev->dev, sizeof(*ld), GFP_KERNEL);
- if (!ld)
- return -ENOMEM;
-
- for (i = 0; i < panel_data->num_gpios; ++i) {
- r = devm_gpio_request_one(dssdev->dev, panel_data->gpios[i],
- panel_data->gpio_invert[i] ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
- "panel gpio");
- if (r)
- return r;
- }
-
- mutex_init(&ld->lock);
- dev_set_drvdata(dssdev->dev, ld);
-
- return 0;
-}
-
-static void lb035q02_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int lb035q02_panel_enable(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&ld->lock);
-
- r = lb035q02_panel_power_on(dssdev);
- if (r)
- goto err;
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ld->lock);
- return 0;
-err:
- mutex_unlock(&ld->lock);
- return r;
-}
-
-static void lb035q02_panel_disable(struct omap_dss_device *dssdev)
-{
- struct lb035q02_data *ld = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&ld->lock);
-
- lb035q02_panel_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&ld->lock);
-}
-
-static struct omap_dss_driver lb035q02_driver = {
- .probe = lb035q02_panel_probe,
- .remove = lb035q02_panel_remove,
-
- .enable = lb035q02_panel_enable,
- .disable = lb035q02_panel_disable,
-
- .driver = {
- .name = "lgphilips_lb035q02_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int lb035q02_write_reg(struct spi_device *spi, u8 reg, u16 val)
-{
- struct spi_message msg;
- struct spi_transfer index_xfer = {
- .len = 3,
- .cs_change = 1,
- };
- struct spi_transfer value_xfer = {
- .len = 3,
- };
- u8 buffer[16];
-
- spi_message_init(&msg);
-
- /* register index */
- buffer[0] = 0x70;
- buffer[1] = 0x00;
- buffer[2] = reg & 0x7f;
- index_xfer.tx_buf = buffer;
- spi_message_add_tail(&index_xfer, &msg);
-
- /* register value */
- buffer[4] = 0x72;
- buffer[5] = val >> 8;
- buffer[6] = val;
- value_xfer.tx_buf = buffer + 4;
- spi_message_add_tail(&value_xfer, &msg);
-
- return spi_sync(spi, &msg);
-}
-
-static void init_lb035q02_panel(struct spi_device *spi)
-{
- /* Init sequence from page 28 of the lb035q02 spec */
- lb035q02_write_reg(spi, 0x01, 0x6300);
- lb035q02_write_reg(spi, 0x02, 0x0200);
- lb035q02_write_reg(spi, 0x03, 0x0177);
- lb035q02_write_reg(spi, 0x04, 0x04c7);
- lb035q02_write_reg(spi, 0x05, 0xffc0);
- lb035q02_write_reg(spi, 0x06, 0xe806);
- lb035q02_write_reg(spi, 0x0a, 0x4008);
- lb035q02_write_reg(spi, 0x0b, 0x0000);
- lb035q02_write_reg(spi, 0x0d, 0x0030);
- lb035q02_write_reg(spi, 0x0e, 0x2800);
- lb035q02_write_reg(spi, 0x0f, 0x0000);
- lb035q02_write_reg(spi, 0x16, 0x9f80);
- lb035q02_write_reg(spi, 0x17, 0x0a0f);
- lb035q02_write_reg(spi, 0x1e, 0x00c1);
- lb035q02_write_reg(spi, 0x30, 0x0300);
- lb035q02_write_reg(spi, 0x31, 0x0007);
- lb035q02_write_reg(spi, 0x32, 0x0000);
- lb035q02_write_reg(spi, 0x33, 0x0000);
- lb035q02_write_reg(spi, 0x34, 0x0707);
- lb035q02_write_reg(spi, 0x35, 0x0004);
- lb035q02_write_reg(spi, 0x36, 0x0302);
- lb035q02_write_reg(spi, 0x37, 0x0202);
- lb035q02_write_reg(spi, 0x3a, 0x0a0d);
- lb035q02_write_reg(spi, 0x3b, 0x0806);
-}
-
-static int lb035q02_panel_spi_probe(struct spi_device *spi)
-{
- init_lb035q02_panel(spi);
- return omap_dss_register_driver(&lb035q02_driver);
-}
-
-static int lb035q02_panel_spi_remove(struct spi_device *spi)
-{
- omap_dss_unregister_driver(&lb035q02_driver);
- return 0;
-}
-
-static struct spi_driver lb035q02_spi_driver = {
- .driver = {
- .name = "lgphilips_lb035q02_panel-spi",
- .owner = THIS_MODULE,
- },
- .probe = lb035q02_panel_spi_probe,
- .remove = lb035q02_panel_spi_remove,
-};
-
-module_spi_driver(lb035q02_spi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
deleted file mode 100644
index 1d525fc84db..00000000000
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ /dev/null
@@ -1,616 +0,0 @@
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/spi/spi.h>
-#include <linux/fb.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#define BLIZZARD_REV_CODE 0x00
-#define BLIZZARD_CONFIG 0x02
-#define BLIZZARD_PLL_DIV 0x04
-#define BLIZZARD_PLL_LOCK_RANGE 0x06
-#define BLIZZARD_PLL_CLOCK_SYNTH_0 0x08
-#define BLIZZARD_PLL_CLOCK_SYNTH_1 0x0a
-#define BLIZZARD_PLL_MODE 0x0c
-#define BLIZZARD_CLK_SRC 0x0e
-#define BLIZZARD_MEM_BANK0_ACTIVATE 0x10
-#define BLIZZARD_MEM_BANK0_STATUS 0x14
-#define BLIZZARD_PANEL_CONFIGURATION 0x28
-#define BLIZZARD_HDISP 0x2a
-#define BLIZZARD_HNDP 0x2c
-#define BLIZZARD_VDISP0 0x2e
-#define BLIZZARD_VDISP1 0x30
-#define BLIZZARD_VNDP 0x32
-#define BLIZZARD_HSW 0x34
-#define BLIZZARD_VSW 0x38
-#define BLIZZARD_DISPLAY_MODE 0x68
-#define BLIZZARD_INPUT_WIN_X_START_0 0x6c
-#define BLIZZARD_DATA_SOURCE_SELECT 0x8e
-#define BLIZZARD_DISP_MEM_DATA_PORT 0x90
-#define BLIZZARD_DISP_MEM_READ_ADDR0 0x92
-#define BLIZZARD_POWER_SAVE 0xE6
-#define BLIZZARD_NDISP_CTRL_STATUS 0xE8
-
-/* Data source select */
-/* For S1D13745 */
-#define BLIZZARD_SRC_WRITE_LCD_BACKGROUND 0x00
-#define BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE 0x01
-#define BLIZZARD_SRC_WRITE_OVERLAY_ENABLE 0x04
-#define BLIZZARD_SRC_DISABLE_OVERLAY 0x05
-/* For S1D13744 */
-#define BLIZZARD_SRC_WRITE_LCD 0x00
-#define BLIZZARD_SRC_BLT_LCD 0x06
-
-#define BLIZZARD_COLOR_RGB565 0x01
-#define BLIZZARD_COLOR_YUV420 0x09
-
-#define BLIZZARD_VERSION_S1D13745 0x01 /* Hailstorm */
-#define BLIZZARD_VERSION_S1D13744 0x02 /* Blizzard */
-
-#define MIPID_CMD_READ_DISP_ID 0x04
-#define MIPID_CMD_READ_RED 0x06
-#define MIPID_CMD_READ_GREEN 0x07
-#define MIPID_CMD_READ_BLUE 0x08
-#define MIPID_CMD_READ_DISP_STATUS 0x09
-#define MIPID_CMD_RDDSDR 0x0F
-#define MIPID_CMD_SLEEP_IN 0x10
-#define MIPID_CMD_SLEEP_OUT 0x11
-#define MIPID_CMD_DISP_OFF 0x28
-#define MIPID_CMD_DISP_ON 0x29
-
-static struct panel_drv_data {
- struct mutex lock;
-
- struct omap_dss_device *dssdev;
- struct spi_device *spidev;
-
- int blizzard_ver;
-} s_drv_data;
-
-
-static inline
-struct panel_n8x0_data *get_board_data(const struct omap_dss_device *dssdev)
-{
- return dssdev->data;
-}
-
-static inline
-struct panel_drv_data *get_drv_data(const struct omap_dss_device *dssdev)
-{
- return &s_drv_data;
-}
-
-
-static inline void blizzard_cmd(u8 cmd)
-{
- omap_rfbi_write_command(&cmd, 1);
-}
-
-static inline void blizzard_write(u8 cmd, const u8 *buf, int len)
-{
- omap_rfbi_write_command(&cmd, 1);
- omap_rfbi_write_data(buf, len);
-}
-
-static inline void blizzard_read(u8 cmd, u8 *buf, int len)
-{
- omap_rfbi_write_command(&cmd, 1);
- omap_rfbi_read_data(buf, len);
-}
-
-static u8 blizzard_read_reg(u8 cmd)
-{
- u8 data;
- blizzard_read(cmd, &data, 1);
- return data;
-}
-
-static void blizzard_ctrl_setup_update(struct omap_dss_device *dssdev,
- int x, int y, int w, int h)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- u8 tmp[18];
- int x_end, y_end;
-
- x_end = x + w - 1;
- y_end = y + h - 1;
-
- tmp[0] = x;
- tmp[1] = x >> 8;
- tmp[2] = y;
- tmp[3] = y >> 8;
- tmp[4] = x_end;
- tmp[5] = x_end >> 8;
- tmp[6] = y_end;
- tmp[7] = y_end >> 8;
-
- /* scaling? */
- tmp[8] = x;
- tmp[9] = x >> 8;
- tmp[10] = y;
- tmp[11] = y >> 8;
- tmp[12] = x_end;
- tmp[13] = x_end >> 8;
- tmp[14] = y_end;
- tmp[15] = y_end >> 8;
-
- tmp[16] = BLIZZARD_COLOR_RGB565;
-
- if (ddata->blizzard_ver == BLIZZARD_VERSION_S1D13745)
- tmp[17] = BLIZZARD_SRC_WRITE_LCD_BACKGROUND;
- else
- tmp[17] = ddata->blizzard_ver == BLIZZARD_VERSION_S1D13744 ?
- BLIZZARD_SRC_WRITE_LCD :
- BLIZZARD_SRC_WRITE_LCD_DESTRUCTIVE;
-
- omapdss_rfbi_set_pixel_size(dssdev, 16);
- omapdss_rfbi_set_data_lines(dssdev, 8);
-
- omap_rfbi_configure(dssdev);
-
- blizzard_write(BLIZZARD_INPUT_WIN_X_START_0, tmp, 18);
-
- omapdss_rfbi_set_pixel_size(dssdev, 16);
- omapdss_rfbi_set_data_lines(dssdev, 16);
-
- omap_rfbi_configure(dssdev);
-}
-
-static void mipid_transfer(struct spi_device *spi, int cmd, const u8 *wbuf,
- int wlen, u8 *rbuf, int rlen)
-{
- struct spi_message m;
- struct spi_transfer *x, xfer[4];
- u16 w;
- int r;
-
- spi_message_init(&m);
-
- memset(xfer, 0, sizeof(xfer));
- x = &xfer[0];
-
- cmd &= 0xff;
- x->tx_buf = &cmd;
- x->bits_per_word = 9;
- x->len = 2;
- spi_message_add_tail(x, &m);
-
- if (wlen) {
- x++;
- x->tx_buf = wbuf;
- x->len = wlen;
- x->bits_per_word = 9;
- spi_message_add_tail(x, &m);
- }
-
- if (rlen) {
- x++;
- x->rx_buf = &w;
- x->len = 1;
- spi_message_add_tail(x, &m);
-
- if (rlen > 1) {
- /* Arrange for the extra clock before the first
- * data bit.
- */
- x->bits_per_word = 9;
- x->len = 2;
-
- x++;
- x->rx_buf = &rbuf[1];
- x->len = rlen - 1;
- spi_message_add_tail(x, &m);
- }
- }
-
- r = spi_sync(spi, &m);
- if (r < 0)
- dev_dbg(&spi->dev, "spi_sync %d\n", r);
-
- if (rlen)
- rbuf[0] = w & 0xff;
-}
-
-static inline void mipid_cmd(struct spi_device *spi, int cmd)
-{
- mipid_transfer(spi, cmd, NULL, 0, NULL, 0);
-}
-
-static inline void mipid_write(struct spi_device *spi,
- int reg, const u8 *buf, int len)
-{
- mipid_transfer(spi, reg, buf, len, NULL, 0);
-}
-
-static inline void mipid_read(struct spi_device *spi,
- int reg, u8 *buf, int len)
-{
- mipid_transfer(spi, reg, NULL, 0, buf, len);
-}
-
-static void set_data_lines(struct spi_device *spi, int data_lines)
-{
- u16 par;
-
- switch (data_lines) {
- case 16:
- par = 0x150;
- break;
- case 18:
- par = 0x160;
- break;
- case 24:
- par = 0x170;
- break;
- }
-
- mipid_write(spi, 0x3a, (u8 *)&par, 2);
-}
-
-static void send_init_string(struct spi_device *spi)
-{
- u16 initpar[] = { 0x0102, 0x0100, 0x0100 };
- mipid_write(spi, 0xc2, (u8 *)initpar, sizeof(initpar));
-}
-
-static void send_display_on(struct spi_device *spi)
-{
- mipid_cmd(spi, MIPID_CMD_DISP_ON);
-}
-
-static void send_display_off(struct spi_device *spi)
-{
- mipid_cmd(spi, MIPID_CMD_DISP_OFF);
-}
-
-static void send_sleep_out(struct spi_device *spi)
-{
- mipid_cmd(spi, MIPID_CMD_SLEEP_OUT);
- msleep(120);
-}
-
-static void send_sleep_in(struct spi_device *spi)
-{
- mipid_cmd(spi, MIPID_CMD_SLEEP_IN);
- msleep(50);
-}
-
-static int n8x0_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r;
- struct panel_n8x0_data *bdata = get_board_data(dssdev);
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- struct spi_device *spi = ddata->spidev;
- u8 rev, conf;
- u8 display_id[3];
- const char *panel_name;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- gpio_direction_output(bdata->ctrl_pwrdown, 1);
-
- omapdss_rfbi_set_size(dssdev, dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
- omapdss_rfbi_set_pixel_size(dssdev, dssdev->ctrl.pixel_size);
- omapdss_rfbi_set_data_lines(dssdev, dssdev->phy.rfbi.data_lines);
- omapdss_rfbi_set_interface_timings(dssdev, &dssdev->ctrl.rfbi_timings);
-
- r = omapdss_rfbi_display_enable(dssdev);
- if (r)
- goto err_rfbi_en;
-
- rev = blizzard_read_reg(BLIZZARD_REV_CODE);
- conf = blizzard_read_reg(BLIZZARD_CONFIG);
-
- switch (rev & 0xfc) {
- case 0x9c:
- ddata->blizzard_ver = BLIZZARD_VERSION_S1D13744;
- dev_info(dssdev->dev, "s1d13744 LCD controller rev %d "
- "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
- break;
- case 0xa4:
- ddata->blizzard_ver = BLIZZARD_VERSION_S1D13745;
- dev_info(dssdev->dev, "s1d13745 LCD controller rev %d "
- "initialized (CNF pins %x)\n", rev & 0x03, conf & 0x07);
- break;
- default:
- dev_err(dssdev->dev, "invalid s1d1374x revision %02x\n", rev);
- r = -ENODEV;
- goto err_inv_chip;
- }
-
- /* panel */
-
- gpio_direction_output(bdata->panel_reset, 1);
-
- mipid_read(spi, MIPID_CMD_READ_DISP_ID, display_id, 3);
- dev_dbg(&spi->dev, "MIPI display ID: %02x%02x%02x\n",
- display_id[0], display_id[1], display_id[2]);
-
- switch (display_id[0]) {
- case 0x45:
- panel_name = "lph8923";
- break;
- case 0x83:
- panel_name = "ls041y3";
- break;
- default:
- dev_err(dssdev->dev, "invalid display ID 0x%x\n",
- display_id[0]);
- r = -ENODEV;
- goto err_inv_panel;
- }
-
- dev_info(dssdev->dev, "%s rev %02x LCD detected\n",
- panel_name, display_id[1]);
-
- send_sleep_out(spi);
- send_init_string(spi);
- set_data_lines(spi, 24);
- send_display_on(spi);
-
- return 0;
-
-err_inv_panel:
- /*
- * HACK: we should turn off the panel here, but there is some problem
- * with the initialization sequence, and we fail to init the panel if we
- * have turned it off
- */
- /* gpio_direction_output(bdata->panel_reset, 0); */
-err_inv_chip:
- omapdss_rfbi_display_disable(dssdev);
-err_rfbi_en:
- gpio_direction_output(bdata->ctrl_pwrdown, 0);
- return r;
-}
-
-static void n8x0_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_n8x0_data *bdata = get_board_data(dssdev);
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- struct spi_device *spi = ddata->spidev;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- send_display_off(spi);
- send_sleep_in(spi);
-
- /*
- * HACK: we should turn off the panel here, but there is some problem
- * with the initialization sequence, and we fail to init the panel if we
- * have turned it off
- */
- /* gpio_direction_output(bdata->panel_reset, 0); */
- gpio_direction_output(bdata->ctrl_pwrdown, 0);
- omapdss_rfbi_display_disable(dssdev);
-}
-
-static const struct rfbi_timings n8x0_panel_timings = {
- .cs_on_time = 0,
-
- .we_on_time = 9000,
- .we_off_time = 18000,
- .we_cycle_time = 36000,
-
- .re_on_time = 9000,
- .re_off_time = 27000,
- .re_cycle_time = 36000,
-
- .access_time = 27000,
- .cs_off_time = 36000,
-
- .cs_pulse_width = 0,
-};
-
-static int n8x0_panel_probe(struct omap_dss_device *dssdev)
-{
- struct panel_n8x0_data *bdata = get_board_data(dssdev);
- struct panel_drv_data *ddata;
- int r;
-
- dev_dbg(dssdev->dev, "probe\n");
-
- if (!bdata)
- return -EINVAL;
-
- s_drv_data.dssdev = dssdev;
-
- ddata = &s_drv_data;
-
- mutex_init(&ddata->lock);
-
- dssdev->panel.timings.x_res = 800;
- dssdev->panel.timings.y_res = 480;
- dssdev->ctrl.pixel_size = 16;
- dssdev->ctrl.rfbi_timings = n8x0_panel_timings;
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
-
- if (gpio_is_valid(bdata->panel_reset)) {
- r = devm_gpio_request_one(dssdev->dev, bdata->panel_reset,
- GPIOF_OUT_INIT_LOW, "PANEL RESET");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(bdata->ctrl_pwrdown)) {
- r = devm_gpio_request_one(dssdev->dev, bdata->ctrl_pwrdown,
- GPIOF_OUT_INIT_LOW, "PANEL PWRDOWN");
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void n8x0_panel_remove(struct omap_dss_device *dssdev)
-{
- dev_dbg(dssdev->dev, "remove\n");
-
- dev_set_drvdata(dssdev->dev, NULL);
-}
-
-static int n8x0_panel_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- int r;
-
- dev_dbg(dssdev->dev, "enable\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- r = n8x0_panel_power_on(dssdev);
-
- rfbi_bus_unlock();
-
- if (r) {
- mutex_unlock(&ddata->lock);
- return r;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static void n8x0_panel_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
-
- dev_dbg(dssdev->dev, "disable\n");
-
- mutex_lock(&ddata->lock);
-
- rfbi_bus_lock();
-
- n8x0_panel_power_off(dssdev);
-
- rfbi_bus_unlock();
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&ddata->lock);
-}
-
-static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres)
-{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
-}
-
-static void update_done(void *data)
-{
- rfbi_bus_unlock();
-}
-
-static int n8x0_panel_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
- u16 dw, dh;
-
- dev_dbg(dssdev->dev, "update\n");
-
- dw = dssdev->panel.timings.x_res;
- dh = dssdev->panel.timings.y_res;
-
- if (x != 0 || y != 0 || w != dw || h != dh) {
- dev_err(dssdev->dev, "invalid update region %d, %d, %d, %d\n",
- x, y, w, h);
- return -EINVAL;
- }
-
- mutex_lock(&ddata->lock);
- rfbi_bus_lock();
-
- blizzard_ctrl_setup_update(dssdev, x, y, w, h);
-
- omap_rfbi_update(dssdev, update_done, NULL);
-
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static int n8x0_panel_sync(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = get_drv_data(dssdev);
-
- dev_dbg(dssdev->dev, "sync\n");
-
- mutex_lock(&ddata->lock);
- rfbi_bus_lock();
- rfbi_bus_unlock();
- mutex_unlock(&ddata->lock);
-
- return 0;
-}
-
-static struct omap_dss_driver n8x0_panel_driver = {
- .probe = n8x0_panel_probe,
- .remove = n8x0_panel_remove,
-
- .enable = n8x0_panel_enable,
- .disable = n8x0_panel_disable,
-
- .update = n8x0_panel_update,
- .sync = n8x0_panel_sync,
-
- .get_resolution = n8x0_panel_get_resolution,
- .get_recommended_bpp = omapdss_default_get_recommended_bpp,
-
- .driver = {
- .name = "n8x0_panel",
- .owner = THIS_MODULE,
- },
-};
-
-/* PANEL */
-
-static int mipid_spi_probe(struct spi_device *spi)
-{
- int r;
-
- dev_dbg(&spi->dev, "mipid_spi_probe\n");
-
- spi->mode = SPI_MODE_0;
-
- s_drv_data.spidev = spi;
-
- r = omap_dss_register_driver(&n8x0_panel_driver);
- if (r)
- pr_err("n8x0_panel: dss driver registration failed\n");
-
- return r;
-}
-
-static int mipid_spi_remove(struct spi_device *spi)
-{
- dev_dbg(&spi->dev, "mipid_spi_remove\n");
- omap_dss_unregister_driver(&n8x0_panel_driver);
- return 0;
-}
-
-static struct spi_driver mipid_spi_driver = {
- .driver = {
- .name = "lcd_mipid",
- .owner = THIS_MODULE,
- },
- .probe = mipid_spi_probe,
- .remove = mipid_spi_remove,
-};
-module_spi_driver(mipid_spi_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
deleted file mode 100644
index 6b9f7925e91..00000000000
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Support for NEC-nl8048hl11-01b panel driver
- *
- * Copyright (C) 2010 Texas Instruments Inc.
- * Author: Erik Gilling <konkers@android.com>
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/fb.h>
-#include <linux/gpio.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#define LCD_XRES 800
-#define LCD_YRES 480
-/*
- * NEC PIX Clock Ratings
- * MIN:21.8MHz TYP:23.8MHz MAX:25.7MHz
- */
-#define LCD_PIXEL_CLOCK 23800
-
-static const struct {
- unsigned char addr;
- unsigned char dat;
-} nec_8048_init_seq[] = {
- { 3, 0x01 }, { 0, 0x00 }, { 1, 0x01 }, { 4, 0x00 }, { 5, 0x14 },
- { 6, 0x24 }, { 16, 0xD7 }, { 17, 0x00 }, { 18, 0x00 }, { 19, 0x55 },
- { 20, 0x01 }, { 21, 0x70 }, { 22, 0x1E }, { 23, 0x25 }, { 24, 0x25 },
- { 25, 0x02 }, { 26, 0x02 }, { 27, 0xA0 }, { 32, 0x2F }, { 33, 0x0F },
- { 34, 0x0F }, { 35, 0x0F }, { 36, 0x0F }, { 37, 0x0F }, { 38, 0x0F },
- { 39, 0x00 }, { 40, 0x02 }, { 41, 0x02 }, { 42, 0x02 }, { 43, 0x0F },
- { 44, 0x0F }, { 45, 0x0F }, { 46, 0x0F }, { 47, 0x0F }, { 48, 0x0F },
- { 49, 0x0F }, { 50, 0x00 }, { 51, 0x02 }, { 52, 0x02 }, { 53, 0x02 },
- { 80, 0x0C }, { 83, 0x42 }, { 84, 0x42 }, { 85, 0x41 }, { 86, 0x14 },
- { 89, 0x88 }, { 90, 0x01 }, { 91, 0x00 }, { 92, 0x02 }, { 93, 0x0C },
- { 94, 0x1C }, { 95, 0x27 }, { 98, 0x49 }, { 99, 0x27 }, { 102, 0x76 },
- { 103, 0x27 }, { 112, 0x01 }, { 113, 0x0E }, { 114, 0x02 },
- { 115, 0x0C }, { 118, 0x0C }, { 121, 0x30 }, { 130, 0x00 },
- { 131, 0x00 }, { 132, 0xFC }, { 134, 0x00 }, { 136, 0x00 },
- { 138, 0x00 }, { 139, 0x00 }, { 140, 0x00 }, { 141, 0xFC },
- { 143, 0x00 }, { 145, 0x00 }, { 147, 0x00 }, { 148, 0x00 },
- { 149, 0x00 }, { 150, 0xFC }, { 152, 0x00 }, { 154, 0x00 },
- { 156, 0x00 }, { 157, 0x00 }, { 2, 0x00 },
-};
-
-/*
- * NEC NL8048HL11-01B Manual
- * defines HFB, HSW, HBP, VFP, VSW, VBP as shown below
- */
-
-static struct omap_video_timings nec_8048_panel_timings = {
- /* 800 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
- .x_res = LCD_XRES,
- .y_res = LCD_YRES,
- .pixel_clock = LCD_PIXEL_CLOCK,
- .hfp = 6,
- .hsw = 1,
- .hbp = 4,
- .vfp = 3,
- .vsw = 1,
- .vbp = 4,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
-};
-
-static inline struct panel_nec_nl8048_data
-*get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct panel_nec_nl8048_data *) dssdev->data;
-}
-
-static int nec_8048_panel_probe(struct omap_dss_device *dssdev)
-{
- struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
- int r;
-
- if (!pd)
- return -EINVAL;
-
- dssdev->panel.timings = nec_8048_panel_timings;
-
- if (gpio_is_valid(pd->qvga_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->qvga_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd QVGA");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(pd->res_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->res_gpio,
- GPIOF_OUT_INIT_LOW, "lcd RES");
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- if (gpio_is_valid(pd->res_gpio))
- gpio_set_value_cansleep(pd->res_gpio, 1);
-
- return 0;
-
-err0:
- return r;
-}
-
-static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_nec_nl8048_data *pd = get_panel_data(dssdev);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (gpio_is_valid(pd->res_gpio))
- gpio_set_value_cansleep(pd->res_gpio, 0);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
-{
- int r;
-
- r = nec_8048_panel_power_on(dssdev);
- if (r)
- return r;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
-{
- nec_8048_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
-{
- return 16;
-}
-
-static struct omap_dss_driver nec_8048_driver = {
- .probe = nec_8048_panel_probe,
- .remove = nec_8048_panel_remove,
- .enable = nec_8048_panel_enable,
- .disable = nec_8048_panel_disable,
- .get_recommended_bpp = nec_8048_recommended_bpp,
-
- .driver = {
- .name = "NEC_8048_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int nec_8048_spi_send(struct spi_device *spi, unsigned char reg_addr,
- unsigned char reg_data)
-{
- int ret = 0;
- unsigned int cmd = 0, data = 0;
-
- cmd = 0x0000 | reg_addr; /* register address write */
- data = 0x0100 | reg_data ; /* register data write */
- data = (cmd << 16) | data;
-
- ret = spi_write(spi, (unsigned char *)&data, 4);
- if (ret)
- pr_err("error in spi_write %x\n", data);
-
- return ret;
-}
-
-static int init_nec_8048_wvga_lcd(struct spi_device *spi)
-{
- unsigned int i;
- /* Initialization Sequence */
- /* nec_8048_spi_send(spi, REG, VAL) */
- for (i = 0; i < (ARRAY_SIZE(nec_8048_init_seq) - 1); i++)
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- udelay(20);
- nec_8048_spi_send(spi, nec_8048_init_seq[i].addr,
- nec_8048_init_seq[i].dat);
- return 0;
-}
-
-static int nec_8048_spi_probe(struct spi_device *spi)
-{
- spi->mode = SPI_MODE_0;
- spi->bits_per_word = 32;
- spi_setup(spi);
-
- init_nec_8048_wvga_lcd(spi);
-
- return omap_dss_register_driver(&nec_8048_driver);
-}
-
-static int nec_8048_spi_remove(struct spi_device *spi)
-{
- omap_dss_unregister_driver(&nec_8048_driver);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-
-static int nec_8048_spi_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- nec_8048_spi_send(spi, 2, 0x01);
- mdelay(40);
-
- return 0;
-}
-
-static int nec_8048_spi_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
-
- /* reinitialize the panel */
- spi_setup(spi);
- nec_8048_spi_send(spi, 2, 0x00);
- init_nec_8048_wvga_lcd(spi);
-
- return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(nec_8048_spi_pm_ops, nec_8048_spi_suspend,
- nec_8048_spi_resume);
-#define NEC_8048_SPI_PM_OPS (&nec_8048_spi_pm_ops)
-#else
-#define NEC_8048_SPI_PM_OPS NULL
-#endif
-
-static struct spi_driver nec_8048_spi_driver = {
- .probe = nec_8048_spi_probe,
- .remove = nec_8048_spi_remove,
- .driver = {
- .name = "nec_8048_spi",
- .owner = THIS_MODULE,
- .pm = NEC_8048_SPI_PM_OPS,
- },
-};
-
-module_spi_driver(nec_8048_spi_driver);
-
-MODULE_AUTHOR("Erik Gilling <konkers@android.com>");
-MODULE_DESCRIPTION("NEC-nl8048hl11-01b Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c
deleted file mode 100644
index 153e9bea0f6..00000000000
--- a/drivers/video/omap2/displays/panel-picodlp.c
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * picodlp panel driver
- * picodlp_i2c_driver: i2c_client driver
- *
- * Copyright (C) 2009-2011 Texas Instruments
- * Author: Mythri P K <mythripk@ti.com>
- * Mayuresh Janorkar <mayur@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/input.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-#include <linux/i2c.h>
-#include <linux/delay.h>
-#include <linux/gpio.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#include "panel-picodlp.h"
-
-struct picodlp_data {
- struct mutex lock;
- struct i2c_client *picodlp_i2c_client;
-};
-
-static struct i2c_board_info picodlp_i2c_board_info = {
- I2C_BOARD_INFO("picodlp_i2c_driver", 0x1b),
-};
-
-struct picodlp_i2c_data {
- struct mutex xfer_lock;
-};
-
-static struct i2c_device_id picodlp_i2c_id[] = {
- { "picodlp_i2c_driver", 0 },
- { }
-};
-
-struct picodlp_i2c_command {
- u8 reg;
- u32 value;
-};
-
-static struct omap_video_timings pico_ls_timings = {
- .x_res = 864,
- .y_res = 480,
- .hsw = 7,
- .hfp = 11,
- .hbp = 7,
-
- .pixel_clock = 19200,
-
- .vsw = 2,
- .vfp = 3,
- .vbp = 14,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
-};
-
-static inline struct picodlp_panel_data
- *get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct picodlp_panel_data *) dssdev->data;
-}
-
-static u32 picodlp_i2c_read(struct i2c_client *client, u8 reg)
-{
- u8 read_cmd[] = {READ_REG_SELECT, reg}, data[4];
- struct picodlp_i2c_data *picodlp_i2c_data = i2c_get_clientdata(client);
- struct i2c_msg msg[2];
-
- mutex_lock(&picodlp_i2c_data->xfer_lock);
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 2;
- msg[0].buf = read_cmd;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = 4;
- msg[1].buf = data;
-
- i2c_transfer(client->adapter, msg, 2);
- mutex_unlock(&picodlp_i2c_data->xfer_lock);
- return (data[3] | (data[2] << 8) | (data[1] << 16) | (data[0] << 24));
-}
-
-static int picodlp_i2c_write_block(struct i2c_client *client,
- u8 *data, int len)
-{
- struct i2c_msg msg;
- int i, r, msg_count = 1;
-
- struct picodlp_i2c_data *picodlp_i2c_data = i2c_get_clientdata(client);
-
- if (len < 1 || len > 32) {
- dev_err(&client->dev,
- "too long syn_write_block len %d\n", len);
- return -EIO;
- }
- mutex_lock(&picodlp_i2c_data->xfer_lock);
-
- msg.addr = client->addr;
- msg.flags = 0;
- msg.len = len;
- msg.buf = data;
- r = i2c_transfer(client->adapter, &msg, msg_count);
- mutex_unlock(&picodlp_i2c_data->xfer_lock);
-
- /*
- * i2c_transfer returns:
- * number of messages sent in case of success
- * a negative error number in case of failure
- */
- if (r != msg_count)
- goto err;
-
- /* In case of success */
- for (i = 0; i < len; i++)
- dev_dbg(&client->dev,
- "addr %x bw 0x%02x[%d]: 0x%02x\n",
- client->addr, data[0] + i, i, data[i]);
-
- return 0;
-err:
- dev_err(&client->dev, "picodlp_i2c_write error\n");
- return r;
-}
-
-static int picodlp_i2c_write(struct i2c_client *client, u8 reg, u32 value)
-{
- u8 data[5];
- int i;
-
- data[0] = reg;
- for (i = 1; i < 5; i++)
- data[i] = (value >> (32 - (i) * 8)) & 0xFF;
-
- return picodlp_i2c_write_block(client, data, 5);
-}
-
-static int picodlp_i2c_write_array(struct i2c_client *client,
- const struct picodlp_i2c_command commands[],
- int count)
-{
- int i, r = 0;
- for (i = 0; i < count; i++) {
- r = picodlp_i2c_write(client, commands[i].reg,
- commands[i].value);
- if (r)
- return r;
- }
- return r;
-}
-
-static int picodlp_wait_for_dma_done(struct i2c_client *client)
-{
- u8 trial = 100;
-
- do {
- msleep(1);
- if (!trial--)
- return -ETIMEDOUT;
- } while (picodlp_i2c_read(client, MAIN_STATUS) & DMA_STATUS);
-
- return 0;
-}
-
-/**
- * picodlp_i2c_init: i2c_initialization routine
- * client: i2c_client for communication
- *
- * return
- * 0 : Success, no error
- * error code : Failure
- */
-static int picodlp_i2c_init(struct i2c_client *client)
-{
- int r;
- static const struct picodlp_i2c_command init_cmd_set1[] = {
- {SOFT_RESET, 1},
- {DMD_PARK_TRIGGER, 1},
- {MISC_REG, 5},
- {SEQ_CONTROL, 0},
- {SEQ_VECTOR, 0x100},
- {DMD_BLOCK_COUNT, 7},
- {DMD_VCC_CONTROL, 0x109},
- {DMD_PARK_PULSE_COUNT, 0xA},
- {DMD_PARK_PULSE_WIDTH, 0xB},
- {DMD_PARK_DELAY, 0x2ED},
- {DMD_SHADOW_ENABLE, 0},
- {FLASH_OPCODE, 0xB},
- {FLASH_DUMMY_BYTES, 1},
- {FLASH_ADDR_BYTES, 3},
- {PBC_CONTROL, 0},
- {FLASH_START_ADDR, CMT_LUT_0_START_ADDR},
- {FLASH_READ_BYTES, CMT_LUT_0_SIZE},
- {CMT_SPLASH_LUT_START_ADDR, 0},
- {CMT_SPLASH_LUT_DEST_SELECT, CMT_LUT_ALL},
- {PBC_CONTROL, 1},
- };
-
- static const struct picodlp_i2c_command init_cmd_set2[] = {
- {PBC_CONTROL, 0},
- {CMT_SPLASH_LUT_DEST_SELECT, 0},
- {PBC_CONTROL, 0},
- {FLASH_START_ADDR, SEQUENCE_0_START_ADDR},
- {FLASH_READ_BYTES, SEQUENCE_0_SIZE},
- {SEQ_RESET_LUT_START_ADDR, 0},
- {SEQ_RESET_LUT_DEST_SELECT, SEQ_SEQ_LUT},
- {PBC_CONTROL, 1},
- };
-
- static const struct picodlp_i2c_command init_cmd_set3[] = {
- {PBC_CONTROL, 0},
- {SEQ_RESET_LUT_DEST_SELECT, 0},
- {PBC_CONTROL, 0},
- {FLASH_START_ADDR, DRC_TABLE_0_START_ADDR},
- {FLASH_READ_BYTES, DRC_TABLE_0_SIZE},
- {SEQ_RESET_LUT_START_ADDR, 0},
- {SEQ_RESET_LUT_DEST_SELECT, SEQ_DRC_LUT_ALL},
- {PBC_CONTROL, 1},
- };
-
- static const struct picodlp_i2c_command init_cmd_set4[] = {
- {PBC_CONTROL, 0},
- {SEQ_RESET_LUT_DEST_SELECT, 0},
- {SDC_ENABLE, 1},
- {AGC_CTRL, 7},
- {CCA_C1A, 0x100},
- {CCA_C1B, 0x0},
- {CCA_C1C, 0x0},
- {CCA_C2A, 0x0},
- {CCA_C2B, 0x100},
- {CCA_C2C, 0x0},
- {CCA_C3A, 0x0},
- {CCA_C3B, 0x0},
- {CCA_C3C, 0x100},
- {CCA_C7A, 0x100},
- {CCA_C7B, 0x100},
- {CCA_C7C, 0x100},
- {CCA_ENABLE, 1},
- {CPU_IF_MODE, 1},
- {SHORT_FLIP, 1},
- {CURTAIN_CONTROL, 0},
- {DMD_PARK_TRIGGER, 0},
- {R_DRIVE_CURRENT, 0x298},
- {G_DRIVE_CURRENT, 0x298},
- {B_DRIVE_CURRENT, 0x298},
- {RGB_DRIVER_ENABLE, 7},
- {SEQ_CONTROL, 0},
- {ACTGEN_CONTROL, 0x10},
- {SEQUENCE_MODE, SEQ_LOCK},
- {DATA_FORMAT, RGB888},
- {INPUT_RESOLUTION, WVGA_864_LANDSCAPE},
- {INPUT_SOURCE, PARALLEL_RGB},
- {CPU_IF_SYNC_METHOD, 1},
- {SEQ_CONTROL, 1}
- };
-
- r = picodlp_i2c_write_array(client, init_cmd_set1,
- ARRAY_SIZE(init_cmd_set1));
- if (r)
- return r;
-
- r = picodlp_wait_for_dma_done(client);
- if (r)
- return r;
-
- r = picodlp_i2c_write_array(client, init_cmd_set2,
- ARRAY_SIZE(init_cmd_set2));
- if (r)
- return r;
-
- r = picodlp_wait_for_dma_done(client);
- if (r)
- return r;
-
- r = picodlp_i2c_write_array(client, init_cmd_set3,
- ARRAY_SIZE(init_cmd_set3));
- if (r)
- return r;
-
- r = picodlp_wait_for_dma_done(client);
- if (r)
- return r;
-
- r = picodlp_i2c_write_array(client, init_cmd_set4,
- ARRAY_SIZE(init_cmd_set4));
- if (r)
- return r;
-
- return 0;
-}
-
-static int picodlp_i2c_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct picodlp_i2c_data *picodlp_i2c_data;
-
- picodlp_i2c_data = kzalloc(sizeof(struct picodlp_i2c_data), GFP_KERNEL);
-
- if (!picodlp_i2c_data)
- return -ENOMEM;
-
- mutex_init(&picodlp_i2c_data->xfer_lock);
- i2c_set_clientdata(client, picodlp_i2c_data);
-
- return 0;
-}
-
-static int picodlp_i2c_remove(struct i2c_client *client)
-{
- struct picodlp_i2c_data *picodlp_i2c_data =
- i2c_get_clientdata(client);
- kfree(picodlp_i2c_data);
- return 0;
-}
-
-static struct i2c_driver picodlp_i2c_driver = {
- .driver = {
- .name = "picodlp_i2c_driver",
- },
- .probe = picodlp_i2c_probe,
- .remove = picodlp_i2c_remove,
- .id_table = picodlp_i2c_id,
-};
-
-static int picodlp_panel_power_on(struct omap_dss_device *dssdev)
-{
- int r, trial = 100;
- struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
- struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
-
- gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
- msleep(1);
- gpio_set_value(picodlp_pdata->pwrgood_gpio, 1);
-
- while (!gpio_get_value(picodlp_pdata->emu_done_gpio)) {
- if (!trial--) {
- dev_err(dssdev->dev, "emu_done signal not"
- " going high\n");
- return -ETIMEDOUT;
- }
- msleep(5);
- }
- /*
- * As per dpp2600 programming guide,
- * it is required to sleep for 1000ms after emu_done signal goes high
- * then only i2c commands can be successfully sent to dpp2600
- */
- msleep(1000);
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r) {
- dev_err(dssdev->dev, "failed to enable DPI\n");
- goto err1;
- }
-
- r = picodlp_i2c_init(picod->picodlp_i2c_client);
- if (r)
- goto err;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return r;
-err:
- omapdss_dpi_display_disable(dssdev);
-err1:
- return r;
-}
-
-static void picodlp_panel_power_off(struct omap_dss_device *dssdev)
-{
- struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
-
- omapdss_dpi_display_disable(dssdev);
-
- gpio_set_value(picodlp_pdata->emu_done_gpio, 0);
- gpio_set_value(picodlp_pdata->pwrgood_gpio, 0);
-}
-
-static int picodlp_panel_probe(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod;
- struct picodlp_panel_data *picodlp_pdata = get_panel_data(dssdev);
- struct i2c_adapter *adapter;
- struct i2c_client *picodlp_i2c_client;
- int r, picodlp_adapter_id;
-
- dssdev->panel.timings = pico_ls_timings;
-
- if (!picodlp_pdata)
- return -EINVAL;
-
- picod = devm_kzalloc(dssdev->dev, sizeof(*picod), GFP_KERNEL);
- if (!picod)
- return -ENOMEM;
-
- mutex_init(&picod->lock);
-
- picodlp_adapter_id = picodlp_pdata->picodlp_adapter_id;
-
- adapter = i2c_get_adapter(picodlp_adapter_id);
- if (!adapter) {
- dev_err(dssdev->dev, "can't get i2c adapter\n");
- return -ENODEV;
- }
-
- picodlp_i2c_client = i2c_new_device(adapter, &picodlp_i2c_board_info);
- if (!picodlp_i2c_client) {
- dev_err(dssdev->dev, "can't add i2c device::"
- " picodlp_i2c_client is NULL\n");
- return -ENODEV;
- }
-
- picod->picodlp_i2c_client = picodlp_i2c_client;
-
- dev_set_drvdata(dssdev->dev, picod);
-
- if (gpio_is_valid(picodlp_pdata->emu_done_gpio)) {
- r = devm_gpio_request_one(dssdev->dev,
- picodlp_pdata->emu_done_gpio,
- GPIOF_IN, "DLP EMU DONE");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(picodlp_pdata->pwrgood_gpio)) {
- r = devm_gpio_request_one(dssdev->dev,
- picodlp_pdata->pwrgood_gpio,
- GPIOF_OUT_INIT_LOW, "DLP PWRGOOD");
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void picodlp_panel_remove(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
-
- i2c_unregister_device(picod->picodlp_i2c_client);
- dev_set_drvdata(dssdev->dev, NULL);
- dev_dbg(dssdev->dev, "removing picodlp panel\n");
-}
-
-static int picodlp_panel_enable(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
- int r;
-
- dev_dbg(dssdev->dev, "enabling picodlp panel\n");
-
- mutex_lock(&picod->lock);
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- mutex_unlock(&picod->lock);
- return -EINVAL;
- }
-
- r = picodlp_panel_power_on(dssdev);
- mutex_unlock(&picod->lock);
-
- return r;
-}
-
-static void picodlp_panel_disable(struct omap_dss_device *dssdev)
-{
- struct picodlp_data *picod = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&picod->lock);
- /* Turn off DLP Power */
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- picodlp_panel_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- mutex_unlock(&picod->lock);
-
- dev_dbg(dssdev->dev, "disabling picodlp panel\n");
-}
-
-static void picodlp_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres)
-{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
-}
-
-static struct omap_dss_driver picodlp_driver = {
- .probe = picodlp_panel_probe,
- .remove = picodlp_panel_remove,
-
- .enable = picodlp_panel_enable,
- .disable = picodlp_panel_disable,
-
- .get_resolution = picodlp_get_resolution,
-
- .driver = {
- .name = "picodlp_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init picodlp_init(void)
-{
- int r = 0;
-
- r = i2c_add_driver(&picodlp_i2c_driver);
- if (r) {
- printk(KERN_WARNING "picodlp_i2c_driver" \
- " registration failed\n");
- return r;
- }
-
- r = omap_dss_register_driver(&picodlp_driver);
- if (r)
- i2c_del_driver(&picodlp_i2c_driver);
-
- return r;
-}
-
-static void __exit picodlp_exit(void)
-{
- i2c_del_driver(&picodlp_i2c_driver);
- omap_dss_unregister_driver(&picodlp_driver);
-}
-
-module_init(picodlp_init);
-module_exit(picodlp_exit);
-
-MODULE_AUTHOR("Mythri P K <mythripk@ti.com>");
-MODULE_DESCRIPTION("picodlp driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-picodlp.h b/drivers/video/omap2/displays/panel-picodlp.h
deleted file mode 100644
index a34b431a726..00000000000
--- a/drivers/video/omap2/displays/panel-picodlp.h
+++ /dev/null
@@ -1,288 +0,0 @@
-/*
- * Header file required by picodlp panel driver
- *
- * Copyright (C) 2009-2011 Texas Instruments
- * Author: Mythri P K <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
-*/
-
-#ifndef __OMAP2_DISPLAY_PANEL_PICODLP_H
-#define __OMAP2_DISPLAY_PANEL_PICODLP_H
-
-/* Commands used for configuring picodlp panel */
-
-#define MAIN_STATUS 0x03
-#define PBC_CONTROL 0x08
-#define INPUT_SOURCE 0x0B
-#define INPUT_RESOLUTION 0x0C
-#define DATA_FORMAT 0x0D
-#define IMG_ROTATION 0x0E
-#define LONG_FLIP 0x0F
-#define SHORT_FLIP 0x10
-#define TEST_PAT_SELECT 0x11
-#define R_DRIVE_CURRENT 0x12
-#define G_DRIVE_CURRENT 0x13
-#define B_DRIVE_CURRENT 0x14
-#define READ_REG_SELECT 0x15
-#define RGB_DRIVER_ENABLE 0x16
-
-#define CPU_IF_MODE 0x18
-#define FRAME_RATE 0x19
-#define CPU_IF_SYNC_METHOD 0x1A
-#define CPU_IF_SOF 0x1B
-#define CPU_IF_EOF 0x1C
-#define CPU_IF_SLEEP 0x1D
-
-#define SEQUENCE_MODE 0x1E
-#define SOFT_RESET 0x1F
-#define FRONT_END_RESET 0x21
-#define AUTO_PWR_ENABLE 0x22
-
-#define VSYNC_LINE_DELAY 0x23
-#define CPU_PI_HORIZ_START 0x24
-#define CPU_PI_VERT_START 0x25
-#define CPU_PI_HORIZ_WIDTH 0x26
-#define CPU_PI_VERT_HEIGHT 0x27
-
-#define PIXEL_MASK_CROP 0x28
-#define CROP_FIRST_LINE 0x29
-#define CROP_LAST_LINE 0x2A
-#define CROP_FIRST_PIXEL 0x2B
-#define CROP_LAST_PIXEL 0x2C
-#define DMD_PARK_TRIGGER 0x2D
-
-#define MISC_REG 0x30
-
-/* AGC registers */
-#define AGC_CTRL 0x50
-#define AGC_CLIPPED_PIXS 0x55
-#define AGC_BRIGHT_PIXS 0x56
-#define AGC_BG_PIXS 0x57
-#define AGC_SAFETY_MARGIN 0x17
-
-/* Color Coordinate Adjustment registers */
-#define CCA_ENABLE 0x5E
-#define CCA_C1A 0x5F
-#define CCA_C1B 0x60
-#define CCA_C1C 0x61
-#define CCA_C2A 0x62
-#define CCA_C2B 0x63
-#define CCA_C2C 0x64
-#define CCA_C3A 0x65
-#define CCA_C3B 0x66
-#define CCA_C3C 0x67
-#define CCA_C7A 0x71
-#define CCA_C7B 0x72
-#define CCA_C7C 0x73
-
-/**
- * DLP Pico Processor 2600 comes with flash
- * We can do DMA operations from flash for accessing Look Up Tables
- */
-#define DMA_STATUS 0x100
-#define FLASH_ADDR_BYTES 0x74
-#define FLASH_DUMMY_BYTES 0x75
-#define FLASH_WRITE_BYTES 0x76
-#define FLASH_READ_BYTES 0x77
-#define FLASH_OPCODE 0x78
-#define FLASH_START_ADDR 0x79
-#define FLASH_DUMMY2 0x7A
-#define FLASH_WRITE_DATA 0x7B
-
-#define TEMPORAL_DITH_DISABLE 0x7E
-#define SEQ_CONTROL 0x82
-#define SEQ_VECTOR 0x83
-
-/* DMD is Digital Micromirror Device */
-#define DMD_BLOCK_COUNT 0x84
-#define DMD_VCC_CONTROL 0x86
-#define DMD_PARK_PULSE_COUNT 0x87
-#define DMD_PARK_PULSE_WIDTH 0x88
-#define DMD_PARK_DELAY 0x89
-#define DMD_SHADOW_ENABLE 0x8E
-#define SEQ_STATUS 0x8F
-#define FLASH_CLOCK_CONTROL 0x98
-#define DMD_PARK 0x2D
-
-#define SDRAM_BIST_ENABLE 0x46
-#define DDR_DRIVER_STRENGTH 0x9A
-#define SDC_ENABLE 0x9D
-#define SDC_BUFF_SWAP_DISABLE 0xA3
-#define CURTAIN_CONTROL 0xA6
-#define DDR_BUS_SWAP_ENABLE 0xA7
-#define DMD_TRC_ENABLE 0xA8
-#define DMD_BUS_SWAP_ENABLE 0xA9
-
-#define ACTGEN_ENABLE 0xAE
-#define ACTGEN_CONTROL 0xAF
-#define ACTGEN_HORIZ_BP 0xB0
-#define ACTGEN_VERT_BP 0xB1
-
-/* Look Up Table access */
-#define CMT_SPLASH_LUT_START_ADDR 0xFA
-#define CMT_SPLASH_LUT_DEST_SELECT 0xFB
-#define CMT_SPLASH_LUT_DATA 0xFC
-#define SEQ_RESET_LUT_START_ADDR 0xFD
-#define SEQ_RESET_LUT_DEST_SELECT 0xFE
-#define SEQ_RESET_LUT_DATA 0xFF
-
-/* Input source definitions */
-#define PARALLEL_RGB 0
-#define INT_TEST_PATTERN 1
-#define SPLASH_SCREEN 2
-#define CPU_INTF 3
-#define BT656 4
-
-/* Standard input resolution definitions */
-#define QWVGA_LANDSCAPE 3 /* (427h*240v) */
-#define WVGA_864_LANDSCAPE 21 /* (864h*480v) */
-#define WVGA_DMD_OPTICAL_TEST 35 /* (608h*684v) */
-
-/* Standard data format definitions */
-#define RGB565 0
-#define RGB666 1
-#define RGB888 2
-
-/* Test Pattern definitions */
-#define TPG_CHECKERBOARD 0
-#define TPG_BLACK 1
-#define TPG_WHITE 2
-#define TPG_RED 3
-#define TPG_BLUE 4
-#define TPG_GREEN 5
-#define TPG_VLINES_BLACK 6
-#define TPG_HLINES_BLACK 7
-#define TPG_VLINES_ALT 8
-#define TPG_HLINES_ALT 9
-#define TPG_DIAG_LINES 10
-#define TPG_GREYRAMP_VERT 11
-#define TPG_GREYRAMP_HORIZ 12
-#define TPG_ANSI_CHECKERBOARD 13
-
-/* sequence mode definitions */
-#define SEQ_FREE_RUN 0
-#define SEQ_LOCK 1
-
-/* curtain color definitions */
-#define CURTAIN_BLACK 0
-#define CURTAIN_RED 1
-#define CURTAIN_GREEN 2
-#define CURTAIN_BLUE 3
-#define CURTAIN_YELLOW 4
-#define CURTAIN_MAGENTA 5
-#define CURTAIN_CYAN 6
-#define CURTAIN_WHITE 7
-
-/* LUT definitions */
-#define CMT_LUT_NONE 0
-#define CMT_LUT_GREEN 1
-#define CMT_LUT_RED 2
-#define CMT_LUT_BLUE 3
-#define CMT_LUT_ALL 4
-#define SPLASH_LUT 5
-
-#define SEQ_LUT_NONE 0
-#define SEQ_DRC_LUT_0 1
-#define SEQ_DRC_LUT_1 2
-#define SEQ_DRC_LUT_2 3
-#define SEQ_DRC_LUT_3 4
-#define SEQ_SEQ_LUT 5
-#define SEQ_DRC_LUT_ALL 6
-#define WPC_PROGRAM_LUT 7
-
-#define BITSTREAM_START_ADDR 0x00000000
-#define BITSTREAM_SIZE 0x00040000
-
-#define WPC_FW_0_START_ADDR 0x00040000
-#define WPC_FW_0_SIZE 0x00000ce8
-
-#define SEQUENCE_0_START_ADDR 0x00044000
-#define SEQUENCE_0_SIZE 0x00001000
-
-#define SEQUENCE_1_START_ADDR 0x00045000
-#define SEQUENCE_1_SIZE 0x00000d10
-
-#define SEQUENCE_2_START_ADDR 0x00046000
-#define SEQUENCE_2_SIZE 0x00000d10
-
-#define SEQUENCE_3_START_ADDR 0x00047000
-#define SEQUENCE_3_SIZE 0x00000d10
-
-#define SEQUENCE_4_START_ADDR 0x00048000
-#define SEQUENCE_4_SIZE 0x00000d10
-
-#define SEQUENCE_5_START_ADDR 0x00049000
-#define SEQUENCE_5_SIZE 0x00000d10
-
-#define SEQUENCE_6_START_ADDR 0x0004a000
-#define SEQUENCE_6_SIZE 0x00000d10
-
-#define CMT_LUT_0_START_ADDR 0x0004b200
-#define CMT_LUT_0_SIZE 0x00000600
-
-#define CMT_LUT_1_START_ADDR 0x0004b800
-#define CMT_LUT_1_SIZE 0x00000600
-
-#define CMT_LUT_2_START_ADDR 0x0004be00
-#define CMT_LUT_2_SIZE 0x00000600
-
-#define CMT_LUT_3_START_ADDR 0x0004c400
-#define CMT_LUT_3_SIZE 0x00000600
-
-#define CMT_LUT_4_START_ADDR 0x0004ca00
-#define CMT_LUT_4_SIZE 0x00000600
-
-#define CMT_LUT_5_START_ADDR 0x0004d000
-#define CMT_LUT_5_SIZE 0x00000600
-
-#define CMT_LUT_6_START_ADDR 0x0004d600
-#define CMT_LUT_6_SIZE 0x00000600
-
-#define DRC_TABLE_0_START_ADDR 0x0004dc00
-#define DRC_TABLE_0_SIZE 0x00000100
-
-#define SPLASH_0_START_ADDR 0x0004dd00
-#define SPLASH_0_SIZE 0x00032280
-
-#define SEQUENCE_7_START_ADDR 0x00080000
-#define SEQUENCE_7_SIZE 0x00000d10
-
-#define SEQUENCE_8_START_ADDR 0x00081800
-#define SEQUENCE_8_SIZE 0x00000d10
-
-#define SEQUENCE_9_START_ADDR 0x00083000
-#define SEQUENCE_9_SIZE 0x00000d10
-
-#define CMT_LUT_7_START_ADDR 0x0008e000
-#define CMT_LUT_7_SIZE 0x00000600
-
-#define CMT_LUT_8_START_ADDR 0x0008e800
-#define CMT_LUT_8_SIZE 0x00000600
-
-#define CMT_LUT_9_START_ADDR 0x0008f000
-#define CMT_LUT_9_SIZE 0x00000600
-
-#define SPLASH_1_START_ADDR 0x0009a000
-#define SPLASH_1_SIZE 0x00032280
-
-#define SPLASH_2_START_ADDR 0x000cd000
-#define SPLASH_2_SIZE 0x00032280
-
-#define SPLASH_3_START_ADDR 0x00100000
-#define SPLASH_3_SIZE 0x00032280
-
-#define OPT_SPLASH_0_START_ADDR 0x00134000
-#define OPT_SPLASH_0_SIZE 0x000cb100
-
-#endif
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
deleted file mode 100644
index 78f0a677975..00000000000
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * LCD panel driver for Sharp LS037V7DW01
- *
- * Copyright (C) 2008 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/fb.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-static struct omap_video_timings sharp_ls_timings = {
- .x_res = 480,
- .y_res = 640,
-
- .pixel_clock = 19200,
-
- .hsw = 2,
- .hfp = 1,
- .hbp = 28,
-
- .vsw = 1,
- .vfp = 1,
- .vbp = 1,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
-};
-
-static inline struct panel_sharp_ls037v7dw01_data
-*get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct panel_sharp_ls037v7dw01_data *) dssdev->data;
-}
-
-static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
-{
- struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
- int r;
-
- if (!pd)
- return -EINVAL;
-
- dssdev->panel.timings = sharp_ls_timings;
-
- if (gpio_is_valid(pd->mo_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->mo_gpio,
- GPIOF_OUT_INIT_LOW, "lcd MO");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(pd->lr_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->lr_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd LR");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(pd->ud_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->ud_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd UD");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(pd->resb_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->resb_gpio,
- GPIOF_OUT_INIT_LOW, "lcd RESB");
- if (r)
- return r;
- }
-
- if (gpio_is_valid(pd->ini_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, pd->ini_gpio,
- GPIOF_OUT_INIT_LOW, "lcd INI");
- if (r)
- return r;
- }
-
- return 0;
-}
-
-static void __exit sharp_ls_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int sharp_ls_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
- int r = 0;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- /* wait couple of vsyncs until enabling the LCD */
- msleep(50);
-
- if (gpio_is_valid(pd->resb_gpio))
- gpio_set_value_cansleep(pd->resb_gpio, 1);
-
- if (gpio_is_valid(pd->ini_gpio))
- gpio_set_value_cansleep(pd->ini_gpio, 1);
-
- return 0;
-err0:
- return r;
-}
-
-static void sharp_ls_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_sharp_ls037v7dw01_data *pd = get_panel_data(dssdev);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (gpio_is_valid(pd->ini_gpio))
- gpio_set_value_cansleep(pd->ini_gpio, 0);
-
- if (gpio_is_valid(pd->resb_gpio))
- gpio_set_value_cansleep(pd->resb_gpio, 0);
-
- /* wait at least 5 vsyncs after disabling the LCD */
-
- msleep(100);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
-{
- int r;
- r = sharp_ls_power_on(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- return r;
-}
-
-static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
-{
- sharp_ls_power_off(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static struct omap_dss_driver sharp_ls_driver = {
- .probe = sharp_ls_panel_probe,
- .remove = __exit_p(sharp_ls_panel_remove),
-
- .enable = sharp_ls_panel_enable,
- .disable = sharp_ls_panel_disable,
-
- .driver = {
- .name = "sharp_ls_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init sharp_ls_panel_drv_init(void)
-{
- return omap_dss_register_driver(&sharp_ls_driver);
-}
-
-static void __exit sharp_ls_panel_drv_exit(void)
-{
- omap_dss_unregister_driver(&sharp_ls_driver);
-}
-
-module_init(sharp_ls_panel_drv_init);
-module_exit(sharp_ls_panel_drv_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
deleted file mode 100644
index 54a07da8587..00000000000
--- a/drivers/video/omap2/displays/panel-taal.c
+++ /dev/null
@@ -1,1551 +0,0 @@
-/*
- * Taal DSI command mode panel
- *
- * Copyright (C) 2009 Nokia Corporation
- * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-/*#define DEBUG*/
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/interrupt.h>
-#include <linux/gpio.h>
-#include <linux/workqueue.h>
-#include <linux/slab.h>
-#include <linux/mutex.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-#include <video/mipi_display.h>
-
-/* DSI Virtual channel. Hardcoded for now. */
-#define TCH 0
-
-#define DCS_READ_NUM_ERRORS 0x05
-#define DCS_BRIGHTNESS 0x51
-#define DCS_CTRL_DISPLAY 0x53
-#define DCS_WRITE_CABC 0x55
-#define DCS_READ_CABC 0x56
-#define DCS_GET_ID1 0xda
-#define DCS_GET_ID2 0xdb
-#define DCS_GET_ID3 0xdc
-
-static irqreturn_t taal_te_isr(int irq, void *data);
-static void taal_te_timeout_work_callback(struct work_struct *work);
-static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
-
-static int taal_panel_reset(struct omap_dss_device *dssdev);
-
-struct taal_data {
- struct mutex lock;
-
- struct backlight_device *bldev;
-
- unsigned long hw_guard_end; /* next value of jiffies when we can
- * issue the next sleep in/out command
- */
- unsigned long hw_guard_wait; /* max guard time in jiffies */
-
- struct omap_dss_device *dssdev;
-
- /* panel HW configuration from DT or platform data */
- int reset_gpio;
- int ext_te_gpio;
-
- bool use_dsi_backlight;
-
- struct omap_dsi_pin_config pin_config;
-
- /* runtime variables */
- bool enabled;
-
- bool te_enabled;
-
- atomic_t do_update;
- int channel;
-
- struct delayed_work te_timeout_work;
-
- bool cabc_broken;
- unsigned cabc_mode;
-
- bool intro_printed;
-
- struct workqueue_struct *workqueue;
-
- struct delayed_work esd_work;
- unsigned esd_interval;
-
- bool ulps_enabled;
- unsigned ulps_timeout;
- struct delayed_work ulps_work;
-};
-
-static void taal_esd_work(struct work_struct *work);
-static void taal_ulps_work(struct work_struct *work);
-
-static void hw_guard_start(struct taal_data *td, int guard_msec)
-{
- td->hw_guard_wait = msecs_to_jiffies(guard_msec);
- td->hw_guard_end = jiffies + td->hw_guard_wait;
-}
-
-static void hw_guard_wait(struct taal_data *td)
-{
- unsigned long wait = td->hw_guard_end - jiffies;
-
- if ((long)wait > 0 && wait <= td->hw_guard_wait) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(wait);
- }
-}
-
-static int taal_dcs_read_1(struct taal_data *td, u8 dcs_cmd, u8 *data)
-{
- int r;
- u8 buf[1];
-
- r = dsi_vc_dcs_read(td->dssdev, td->channel, dcs_cmd, buf, 1);
-
- if (r < 0)
- return r;
-
- *data = buf[0];
-
- return 0;
-}
-
-static int taal_dcs_write_0(struct taal_data *td, u8 dcs_cmd)
-{
- return dsi_vc_dcs_write(td->dssdev, td->channel, &dcs_cmd, 1);
-}
-
-static int taal_dcs_write_1(struct taal_data *td, u8 dcs_cmd, u8 param)
-{
- u8 buf[2];
- buf[0] = dcs_cmd;
- buf[1] = param;
- return dsi_vc_dcs_write(td->dssdev, td->channel, buf, 2);
-}
-
-static int taal_sleep_in(struct taal_data *td)
-
-{
- u8 cmd;
- int r;
-
- hw_guard_wait(td);
-
- cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, &cmd, 1);
- if (r)
- return r;
-
- hw_guard_start(td, 120);
-
- msleep(5);
-
- return 0;
-}
-
-static int taal_sleep_out(struct taal_data *td)
-{
- int r;
-
- hw_guard_wait(td);
-
- r = taal_dcs_write_0(td, MIPI_DCS_EXIT_SLEEP_MODE);
- if (r)
- return r;
-
- hw_guard_start(td, 120);
-
- msleep(5);
-
- return 0;
-}
-
-static int taal_get_id(struct taal_data *td, u8 *id1, u8 *id2, u8 *id3)
-{
- int r;
-
- r = taal_dcs_read_1(td, DCS_GET_ID1, id1);
- if (r)
- return r;
- r = taal_dcs_read_1(td, DCS_GET_ID2, id2);
- if (r)
- return r;
- r = taal_dcs_read_1(td, DCS_GET_ID3, id3);
- if (r)
- return r;
-
- return 0;
-}
-
-static int taal_set_update_window(struct taal_data *td,
- u16 x, u16 y, u16 w, u16 h)
-{
- int r;
- u16 x1 = x;
- u16 x2 = x + w - 1;
- u16 y1 = y;
- u16 y2 = y + h - 1;
-
- u8 buf[5];
- buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS;
- buf[1] = (x1 >> 8) & 0xff;
- buf[2] = (x1 >> 0) & 0xff;
- buf[3] = (x2 >> 8) & 0xff;
- buf[4] = (x2 >> 0) & 0xff;
-
- r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- buf[0] = MIPI_DCS_SET_PAGE_ADDRESS;
- buf[1] = (y1 >> 8) & 0xff;
- buf[2] = (y1 >> 0) & 0xff;
- buf[3] = (y2 >> 8) & 0xff;
- buf[4] = (y2 >> 0) & 0xff;
-
- r = dsi_vc_dcs_write_nosync(td->dssdev, td->channel, buf, sizeof(buf));
- if (r)
- return r;
-
- dsi_vc_send_bta_sync(td->dssdev, td->channel);
-
- return r;
-}
-
-static void taal_queue_esd_work(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- if (td->esd_interval > 0)
- queue_delayed_work(td->workqueue, &td->esd_work,
- msecs_to_jiffies(td->esd_interval));
-}
-
-static void taal_cancel_esd_work(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- cancel_delayed_work(&td->esd_work);
-}
-
-static void taal_queue_ulps_work(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- if (td->ulps_timeout > 0)
- queue_delayed_work(td->workqueue, &td->ulps_work,
- msecs_to_jiffies(td->ulps_timeout));
-}
-
-static void taal_cancel_ulps_work(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- cancel_delayed_work(&td->ulps_work);
-}
-
-static int taal_enter_ulps(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- if (td->ulps_enabled)
- return 0;
-
- taal_cancel_ulps_work(dssdev);
-
- r = _taal_enable_te(dssdev, false);
- if (r)
- goto err;
-
- if (gpio_is_valid(td->ext_te_gpio))
- disable_irq(gpio_to_irq(td->ext_te_gpio));
-
- omapdss_dsi_display_disable(dssdev, false, true);
-
- td->ulps_enabled = true;
-
- return 0;
-
-err:
- dev_err(dssdev->dev, "enter ULPS failed");
- taal_panel_reset(dssdev);
-
- td->ulps_enabled = false;
-
- taal_queue_ulps_work(dssdev);
-
- return r;
-}
-
-static int taal_exit_ulps(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- if (!td->ulps_enabled)
- return 0;
-
- r = omapdss_dsi_display_enable(dssdev);
- if (r) {
- dev_err(dssdev->dev, "failed to enable DSI\n");
- goto err1;
- }
-
- omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
-
- r = _taal_enable_te(dssdev, true);
- if (r) {
- dev_err(dssdev->dev, "failed to re-enable TE");
- goto err2;
- }
-
- if (gpio_is_valid(td->ext_te_gpio))
- enable_irq(gpio_to_irq(td->ext_te_gpio));
-
- taal_queue_ulps_work(dssdev);
-
- td->ulps_enabled = false;
-
- return 0;
-
-err2:
- dev_err(dssdev->dev, "failed to exit ULPS");
-
- r = taal_panel_reset(dssdev);
- if (!r) {
- if (gpio_is_valid(td->ext_te_gpio))
- enable_irq(gpio_to_irq(td->ext_te_gpio));
- td->ulps_enabled = false;
- }
-err1:
- taal_queue_ulps_work(dssdev);
-
- return r;
-}
-
-static int taal_wake_up(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- if (td->ulps_enabled)
- return taal_exit_ulps(dssdev);
-
- taal_cancel_ulps_work(dssdev);
- taal_queue_ulps_work(dssdev);
- return 0;
-}
-
-static int taal_bl_update_status(struct backlight_device *dev)
-{
- struct omap_dss_device *dssdev = dev_get_drvdata(&dev->dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
- int level;
-
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- level = dev->props.brightness;
- else
- level = 0;
-
- dev_dbg(dssdev->dev, "update brightness to %d\n", level);
-
- mutex_lock(&td->lock);
-
- if (td->enabled) {
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (!r)
- r = taal_dcs_write_1(td, DCS_BRIGHTNESS, level);
-
- dsi_bus_unlock(dssdev);
- } else {
- r = 0;
- }
-
- mutex_unlock(&td->lock);
-
- return r;
-}
-
-static int taal_bl_get_intensity(struct backlight_device *dev)
-{
- if (dev->props.fb_blank == FB_BLANK_UNBLANK &&
- dev->props.power == FB_BLANK_UNBLANK)
- return dev->props.brightness;
-
- return 0;
-}
-
-static const struct backlight_ops taal_bl_ops = {
- .get_brightness = taal_bl_get_intensity,
- .update_status = taal_bl_update_status,
-};
-
-static void taal_get_resolution(struct omap_dss_device *dssdev,
- u16 *xres, u16 *yres)
-{
- *xres = dssdev->panel.timings.x_res;
- *yres = dssdev->panel.timings.y_res;
-}
-
-static ssize_t taal_num_errors_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- u8 errors = 0;
- int r;
-
- mutex_lock(&td->lock);
-
- if (td->enabled) {
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (!r)
- r = taal_dcs_read_1(td, DCS_READ_NUM_ERRORS, &errors);
-
- dsi_bus_unlock(dssdev);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&td->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%d\n", errors);
-}
-
-static ssize_t taal_hw_revision_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- u8 id1, id2, id3;
- int r;
-
- mutex_lock(&td->lock);
-
- if (td->enabled) {
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (!r)
- r = taal_get_id(td, &id1, &id2, &id3);
-
- dsi_bus_unlock(dssdev);
- } else {
- r = -ENODEV;
- }
-
- mutex_unlock(&td->lock);
-
- if (r)
- return r;
-
- return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3);
-}
-
-static const char *cabc_modes[] = {
- "off", /* used also always when CABC is not supported */
- "ui",
- "still-image",
- "moving-image",
-};
-
-static ssize_t show_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- const char *mode_str;
- int mode;
- int len;
-
- mode = td->cabc_mode;
-
- mode_str = "unknown";
- if (mode >= 0 && mode < ARRAY_SIZE(cabc_modes))
- mode_str = cabc_modes[mode];
- len = snprintf(buf, PAGE_SIZE, "%s\n", mode_str);
-
- return len < PAGE_SIZE - 1 ? len : PAGE_SIZE - 1;
-}
-
-static ssize_t store_cabc_mode(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int i;
- int r;
-
- for (i = 0; i < ARRAY_SIZE(cabc_modes); i++) {
- if (sysfs_streq(cabc_modes[i], buf))
- break;
- }
-
- if (i == ARRAY_SIZE(cabc_modes))
- return -EINVAL;
-
- mutex_lock(&td->lock);
-
- if (td->enabled) {
- dsi_bus_lock(dssdev);
-
- if (!td->cabc_broken) {
- r = taal_wake_up(dssdev);
- if (r)
- goto err;
-
- r = taal_dcs_write_1(td, DCS_WRITE_CABC, i);
- if (r)
- goto err;
- }
-
- dsi_bus_unlock(dssdev);
- }
-
- td->cabc_mode = i;
-
- mutex_unlock(&td->lock);
-
- return count;
-err:
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
- return r;
-}
-
-static ssize_t show_cabc_available_modes(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int len;
- int i;
-
- for (i = 0, len = 0;
- len < PAGE_SIZE && i < ARRAY_SIZE(cabc_modes); i++)
- len += snprintf(&buf[len], PAGE_SIZE - len, "%s%s%s",
- i ? " " : "", cabc_modes[i],
- i == ARRAY_SIZE(cabc_modes) - 1 ? "\n" : "");
-
- return len < PAGE_SIZE ? len : PAGE_SIZE - 1;
-}
-
-static ssize_t taal_store_esd_interval(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 10, &t);
- if (r)
- return r;
-
- mutex_lock(&td->lock);
- taal_cancel_esd_work(dssdev);
- td->esd_interval = t;
- if (td->enabled)
- taal_queue_esd_work(dssdev);
- mutex_unlock(&td->lock);
-
- return count;
-}
-
-static ssize_t taal_show_esd_interval(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- unsigned t;
-
- mutex_lock(&td->lock);
- t = td->esd_interval;
- mutex_unlock(&td->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t taal_store_ulps(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 10, &t);
- if (r)
- return r;
-
- mutex_lock(&td->lock);
-
- if (td->enabled) {
- dsi_bus_lock(dssdev);
-
- if (t)
- r = taal_enter_ulps(dssdev);
- else
- r = taal_wake_up(dssdev);
-
- dsi_bus_unlock(dssdev);
- }
-
- mutex_unlock(&td->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t taal_show_ulps(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- unsigned t;
-
- mutex_lock(&td->lock);
- t = td->ulps_enabled;
- mutex_unlock(&td->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static ssize_t taal_store_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- unsigned long t;
- int r;
-
- r = kstrtoul(buf, 10, &t);
- if (r)
- return r;
-
- mutex_lock(&td->lock);
- td->ulps_timeout = t;
-
- if (td->enabled) {
- /* taal_wake_up will restart the timer */
- dsi_bus_lock(dssdev);
- r = taal_wake_up(dssdev);
- dsi_bus_unlock(dssdev);
- }
-
- mutex_unlock(&td->lock);
-
- if (r)
- return r;
-
- return count;
-}
-
-static ssize_t taal_show_ulps_timeout(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- unsigned t;
-
- mutex_lock(&td->lock);
- t = td->ulps_timeout;
- mutex_unlock(&td->lock);
-
- return snprintf(buf, PAGE_SIZE, "%u\n", t);
-}
-
-static DEVICE_ATTR(num_dsi_errors, S_IRUGO, taal_num_errors_show, NULL);
-static DEVICE_ATTR(hw_revision, S_IRUGO, taal_hw_revision_show, NULL);
-static DEVICE_ATTR(cabc_mode, S_IRUGO | S_IWUSR,
- show_cabc_mode, store_cabc_mode);
-static DEVICE_ATTR(cabc_available_modes, S_IRUGO,
- show_cabc_available_modes, NULL);
-static DEVICE_ATTR(esd_interval, S_IRUGO | S_IWUSR,
- taal_show_esd_interval, taal_store_esd_interval);
-static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR,
- taal_show_ulps, taal_store_ulps);
-static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR,
- taal_show_ulps_timeout, taal_store_ulps_timeout);
-
-static struct attribute *taal_attrs[] = {
- &dev_attr_num_dsi_errors.attr,
- &dev_attr_hw_revision.attr,
- &dev_attr_cabc_mode.attr,
- &dev_attr_cabc_available_modes.attr,
- &dev_attr_esd_interval.attr,
- &dev_attr_ulps.attr,
- &dev_attr_ulps_timeout.attr,
- NULL,
-};
-
-static struct attribute_group taal_attr_group = {
- .attrs = taal_attrs,
-};
-
-static void taal_hw_reset(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- if (!gpio_is_valid(td->reset_gpio))
- return;
-
- gpio_set_value(td->reset_gpio, 1);
- udelay(10);
- /* reset the panel */
- gpio_set_value(td->reset_gpio, 0);
- /* assert reset */
- udelay(10);
- gpio_set_value(td->reset_gpio, 1);
- /* wait after releasing reset */
- msleep(5);
-}
-
-static void taal_probe_pdata(struct taal_data *td,
- const struct nokia_dsi_panel_data *pdata)
-{
- td->reset_gpio = pdata->reset_gpio;
-
- if (pdata->use_ext_te)
- td->ext_te_gpio = pdata->ext_te_gpio;
- else
- td->ext_te_gpio = -1;
-
- td->esd_interval = pdata->esd_interval;
- td->ulps_timeout = pdata->ulps_timeout;
-
- td->use_dsi_backlight = pdata->use_dsi_backlight;
-
- td->pin_config = pdata->pin_config;
-}
-
-static int taal_probe(struct omap_dss_device *dssdev)
-{
- struct backlight_properties props;
- struct taal_data *td;
- struct backlight_device *bldev = NULL;
- int r;
-
- dev_dbg(dssdev->dev, "probe\n");
-
- td = devm_kzalloc(dssdev->dev, sizeof(*td), GFP_KERNEL);
- if (!td)
- return -ENOMEM;
-
- dev_set_drvdata(dssdev->dev, td);
- td->dssdev = dssdev;
-
- if (dssdev->data) {
- const struct nokia_dsi_panel_data *pdata = dssdev->data;
-
- taal_probe_pdata(td, pdata);
- } else {
- return -ENODEV;
- }
-
- dssdev->panel.timings.x_res = 864;
- dssdev->panel.timings.y_res = 480;
- dssdev->panel.timings.pixel_clock = DIV_ROUND_UP(864 * 480 * 60, 1000);
- dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
- dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
- OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
-
- mutex_init(&td->lock);
-
- atomic_set(&td->do_update, 0);
-
- if (gpio_is_valid(td->reset_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, td->reset_gpio,
- GPIOF_OUT_INIT_LOW, "taal rst");
- if (r) {
- dev_err(dssdev->dev, "failed to request reset gpio\n");
- return r;
- }
- }
-
- if (gpio_is_valid(td->ext_te_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, td->ext_te_gpio,
- GPIOF_IN, "taal irq");
- if (r) {
- dev_err(dssdev->dev, "GPIO request failed\n");
- return r;
- }
-
- r = devm_request_irq(dssdev->dev, gpio_to_irq(td->ext_te_gpio),
- taal_te_isr,
- IRQF_TRIGGER_RISING,
- "taal vsync", dssdev);
-
- if (r) {
- dev_err(dssdev->dev, "IRQ request failed\n");
- return r;
- }
-
- INIT_DEFERRABLE_WORK(&td->te_timeout_work,
- taal_te_timeout_work_callback);
-
- dev_dbg(dssdev->dev, "Using GPIO TE\n");
- }
-
- td->workqueue = create_singlethread_workqueue("taal_esd");
- if (td->workqueue == NULL) {
- dev_err(dssdev->dev, "can't create ESD workqueue\n");
- return -ENOMEM;
- }
- INIT_DEFERRABLE_WORK(&td->esd_work, taal_esd_work);
- INIT_DELAYED_WORK(&td->ulps_work, taal_ulps_work);
-
- taal_hw_reset(dssdev);
-
- if (td->use_dsi_backlight) {
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 255;
-
- props.type = BACKLIGHT_RAW;
- bldev = backlight_device_register(dev_name(dssdev->dev),
- dssdev->dev, dssdev, &taal_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_bl;
- }
-
- td->bldev = bldev;
-
- bldev->props.fb_blank = FB_BLANK_UNBLANK;
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 255;
-
- taal_bl_update_status(bldev);
- }
-
- r = omap_dsi_request_vc(dssdev, &td->channel);
- if (r) {
- dev_err(dssdev->dev, "failed to get virtual channel\n");
- goto err_req_vc;
- }
-
- r = omap_dsi_set_vc_id(dssdev, td->channel, TCH);
- if (r) {
- dev_err(dssdev->dev, "failed to set VC_ID\n");
- goto err_vc_id;
- }
-
- r = sysfs_create_group(&dssdev->dev->kobj, &taal_attr_group);
- if (r) {
- dev_err(dssdev->dev, "failed to create sysfs files\n");
- goto err_vc_id;
- }
-
- return 0;
-
-err_vc_id:
- omap_dsi_release_vc(dssdev, td->channel);
-err_req_vc:
- if (bldev != NULL)
- backlight_device_unregister(bldev);
-err_bl:
- destroy_workqueue(td->workqueue);
- return r;
-}
-
-static void __exit taal_remove(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- struct backlight_device *bldev;
-
- dev_dbg(dssdev->dev, "remove\n");
-
- sysfs_remove_group(&dssdev->dev->kobj, &taal_attr_group);
- omap_dsi_release_vc(dssdev, td->channel);
-
- bldev = td->bldev;
- if (bldev != NULL) {
- bldev->props.power = FB_BLANK_POWERDOWN;
- taal_bl_update_status(bldev);
- backlight_device_unregister(bldev);
- }
-
- taal_cancel_ulps_work(dssdev);
- taal_cancel_esd_work(dssdev);
- destroy_workqueue(td->workqueue);
-
- /* reset, to be sure that the panel is in a valid state */
- taal_hw_reset(dssdev);
-}
-
-static int taal_power_on(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- u8 id1, id2, id3;
- int r;
- struct omap_dss_dsi_config dsi_config = {
- .mode = OMAP_DSS_DSI_CMD_MODE,
- .pixel_format = OMAP_DSS_DSI_FMT_RGB888,
- .timings = &dssdev->panel.timings,
- .hs_clk_min = 150000000,
- .hs_clk_max = 300000000,
- .lp_clk_min = 7000000,
- .lp_clk_max = 10000000,
- };
-
- r = omapdss_dsi_configure_pins(dssdev, &td->pin_config);
- if (r) {
- dev_err(dssdev->dev, "failed to configure DSI pins\n");
- goto err0;
- };
-
- r = omapdss_dsi_set_config(dssdev, &dsi_config);
- if (r) {
- dev_err(dssdev->dev, "failed to configure DSI\n");
- goto err0;
- }
-
- r = omapdss_dsi_display_enable(dssdev);
- if (r) {
- dev_err(dssdev->dev, "failed to enable DSI\n");
- goto err0;
- }
-
- taal_hw_reset(dssdev);
-
- omapdss_dsi_vc_enable_hs(dssdev, td->channel, false);
-
- r = taal_sleep_out(td);
- if (r)
- goto err;
-
- r = taal_get_id(td, &id1, &id2, &id3);
- if (r)
- goto err;
-
- /* on early Taal revisions CABC is broken */
- if (id2 == 0x00 || id2 == 0xff || id2 == 0x81)
- td->cabc_broken = true;
-
- r = taal_dcs_write_1(td, DCS_BRIGHTNESS, 0xff);
- if (r)
- goto err;
-
- r = taal_dcs_write_1(td, DCS_CTRL_DISPLAY,
- (1<<2) | (1<<5)); /* BL | BCTRL */
- if (r)
- goto err;
-
- r = taal_dcs_write_1(td, MIPI_DCS_SET_PIXEL_FORMAT,
- MIPI_DCS_PIXEL_FMT_24BIT);
- if (r)
- goto err;
-
- if (!td->cabc_broken) {
- r = taal_dcs_write_1(td, DCS_WRITE_CABC, td->cabc_mode);
- if (r)
- goto err;
- }
-
- r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_ON);
- if (r)
- goto err;
-
- r = _taal_enable_te(dssdev, td->te_enabled);
- if (r)
- goto err;
-
- r = dsi_enable_video_output(dssdev, td->channel);
- if (r)
- goto err;
-
- td->enabled = 1;
-
- if (!td->intro_printed) {
- dev_info(dssdev->dev, "panel revision %02x.%02x.%02x\n",
- id1, id2, id3);
- if (td->cabc_broken)
- dev_info(dssdev->dev,
- "old Taal version, CABC disabled\n");
- td->intro_printed = true;
- }
-
- omapdss_dsi_vc_enable_hs(dssdev, td->channel, true);
-
- return 0;
-err:
- dev_err(dssdev->dev, "error while enabling panel, issuing HW reset\n");
-
- taal_hw_reset(dssdev);
-
- omapdss_dsi_display_disable(dssdev, true, false);
-err0:
- return r;
-}
-
-static void taal_power_off(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- dsi_disable_video_output(dssdev, td->channel);
-
- r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF);
- if (!r)
- r = taal_sleep_in(td);
-
- if (r) {
- dev_err(dssdev->dev,
- "error disabling panel, issuing HW reset\n");
- taal_hw_reset(dssdev);
- }
-
- omapdss_dsi_display_disable(dssdev, true, false);
-
- td->enabled = 0;
-}
-
-static int taal_panel_reset(struct omap_dss_device *dssdev)
-{
- dev_err(dssdev->dev, "performing LCD reset\n");
-
- taal_power_off(dssdev);
- taal_hw_reset(dssdev);
- return taal_power_on(dssdev);
-}
-
-static int taal_enable(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- dev_dbg(dssdev->dev, "enable\n");
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- r = -EINVAL;
- goto err;
- }
-
- dsi_bus_lock(dssdev);
-
- r = taal_power_on(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- if (r)
- goto err;
-
- taal_queue_esd_work(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&td->lock);
-
- return 0;
-err:
- dev_dbg(dssdev->dev, "enable failed\n");
- mutex_unlock(&td->lock);
- return r;
-}
-
-static void taal_disable(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- dev_dbg(dssdev->dev, "disable\n");
-
- mutex_lock(&td->lock);
-
- taal_cancel_ulps_work(dssdev);
- taal_cancel_esd_work(dssdev);
-
- dsi_bus_lock(dssdev);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- int r;
-
- r = taal_wake_up(dssdev);
- if (!r)
- taal_power_off(dssdev);
- }
-
- dsi_bus_unlock(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&td->lock);
-}
-
-static void taal_framedone_cb(int err, void *data)
-{
- struct omap_dss_device *dssdev = data;
- dev_dbg(dssdev->dev, "framedone, err %d\n", err);
- dsi_bus_unlock(dssdev);
-}
-
-static irqreturn_t taal_te_isr(int irq, void *data)
-{
- struct omap_dss_device *dssdev = data;
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int old;
- int r;
-
- old = atomic_cmpxchg(&td->do_update, 1, 0);
-
- if (old) {
- cancel_delayed_work(&td->te_timeout_work);
-
- r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
- dssdev);
- if (r)
- goto err;
- }
-
- return IRQ_HANDLED;
-err:
- dev_err(dssdev->dev, "start update failed\n");
- dsi_bus_unlock(dssdev);
- return IRQ_HANDLED;
-}
-
-static void taal_te_timeout_work_callback(struct work_struct *work)
-{
- struct taal_data *td = container_of(work, struct taal_data,
- te_timeout_work.work);
- struct omap_dss_device *dssdev = td->dssdev;
-
- dev_err(dssdev->dev, "TE not received for 250ms!\n");
-
- atomic_set(&td->do_update, 0);
- dsi_bus_unlock(dssdev);
-}
-
-static int taal_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- dev_dbg(dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
-
- mutex_lock(&td->lock);
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (r)
- goto err;
-
- if (!td->enabled) {
- r = 0;
- goto err;
- }
-
- /* XXX no need to send this every frame, but dsi break if not done */
- r = taal_set_update_window(td, 0, 0,
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
- if (r)
- goto err;
-
- if (td->te_enabled && gpio_is_valid(td->ext_te_gpio)) {
- schedule_delayed_work(&td->te_timeout_work,
- msecs_to_jiffies(250));
- atomic_set(&td->do_update, 1);
- } else {
- r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
- dssdev);
- if (r)
- goto err;
- }
-
- /* note: no bus_unlock here. unlock is in framedone_cb */
- mutex_unlock(&td->lock);
- return 0;
-err:
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
- return r;
-}
-
-static int taal_sync(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- dev_dbg(dssdev->dev, "sync\n");
-
- mutex_lock(&td->lock);
- dsi_bus_lock(dssdev);
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
-
- dev_dbg(dssdev->dev, "sync done\n");
-
- return 0;
-}
-
-static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- if (enable)
- r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0);
- else
- r = taal_dcs_write_0(td, MIPI_DCS_SET_TEAR_OFF);
-
- if (!gpio_is_valid(td->ext_te_gpio))
- omapdss_dsi_enable_te(dssdev, enable);
-
- /* possible panel bug */
- msleep(100);
-
- return r;
-}
-
-static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&td->lock);
-
- if (td->te_enabled == enable)
- goto end;
-
- dsi_bus_lock(dssdev);
-
- if (td->enabled) {
- r = taal_wake_up(dssdev);
- if (r)
- goto err;
-
- r = _taal_enable_te(dssdev, enable);
- if (r)
- goto err;
- }
-
- td->te_enabled = enable;
-
- dsi_bus_unlock(dssdev);
-end:
- mutex_unlock(&td->lock);
-
- return 0;
-err:
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
-
- return r;
-}
-
-static int taal_get_te(struct omap_dss_device *dssdev)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&td->lock);
- r = td->te_enabled;
- mutex_unlock(&td->lock);
-
- return r;
-}
-
-static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
-{
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
- u8 id1, id2, id3;
- int r;
-
- mutex_lock(&td->lock);
-
- if (!td->enabled) {
- r = -ENODEV;
- goto err1;
- }
-
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (r)
- goto err2;
-
- r = taal_dcs_read_1(td, DCS_GET_ID1, &id1);
- if (r)
- goto err2;
- r = taal_dcs_read_1(td, DCS_GET_ID2, &id2);
- if (r)
- goto err2;
- r = taal_dcs_read_1(td, DCS_GET_ID3, &id3);
- if (r)
- goto err2;
-
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
- return 0;
-err2:
- dsi_bus_unlock(dssdev);
-err1:
- mutex_unlock(&td->lock);
- return r;
-}
-
-static int taal_memory_read(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h)
-{
- int r;
- int first = 1;
- int plen;
- unsigned buf_used = 0;
- struct taal_data *td = dev_get_drvdata(dssdev->dev);
-
- if (size < w * h * 3)
- return -ENOMEM;
-
- mutex_lock(&td->lock);
-
- if (!td->enabled) {
- r = -ENODEV;
- goto err1;
- }
-
- size = min(w * h * 3,
- dssdev->panel.timings.x_res *
- dssdev->panel.timings.y_res * 3);
-
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (r)
- goto err2;
-
- /* plen 1 or 2 goes into short packet. until checksum error is fixed,
- * use short packets. plen 32 works, but bigger packets seem to cause
- * an error. */
- if (size % 2)
- plen = 1;
- else
- plen = 2;
-
- taal_set_update_window(td, x, y, w, h);
-
- r = dsi_vc_set_max_rx_packet_size(dssdev, td->channel, plen);
- if (r)
- goto err2;
-
- while (buf_used < size) {
- u8 dcs_cmd = first ? 0x2e : 0x3e;
- first = 0;
-
- r = dsi_vc_dcs_read(dssdev, td->channel, dcs_cmd,
- buf + buf_used, size - buf_used);
-
- if (r < 0) {
- dev_err(dssdev->dev, "read error\n");
- goto err3;
- }
-
- buf_used += r;
-
- if (r < plen) {
- dev_err(dssdev->dev, "short read\n");
- break;
- }
-
- if (signal_pending(current)) {
- dev_err(dssdev->dev, "signal pending, "
- "aborting memory read\n");
- r = -ERESTARTSYS;
- goto err3;
- }
- }
-
- r = buf_used;
-
-err3:
- dsi_vc_set_max_rx_packet_size(dssdev, td->channel, 1);
-err2:
- dsi_bus_unlock(dssdev);
-err1:
- mutex_unlock(&td->lock);
- return r;
-}
-
-static void taal_ulps_work(struct work_struct *work)
-{
- struct taal_data *td = container_of(work, struct taal_data,
- ulps_work.work);
- struct omap_dss_device *dssdev = td->dssdev;
-
- mutex_lock(&td->lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !td->enabled) {
- mutex_unlock(&td->lock);
- return;
- }
-
- dsi_bus_lock(dssdev);
-
- taal_enter_ulps(dssdev);
-
- dsi_bus_unlock(dssdev);
- mutex_unlock(&td->lock);
-}
-
-static void taal_esd_work(struct work_struct *work)
-{
- struct taal_data *td = container_of(work, struct taal_data,
- esd_work.work);
- struct omap_dss_device *dssdev = td->dssdev;
- u8 state1, state2;
- int r;
-
- mutex_lock(&td->lock);
-
- if (!td->enabled) {
- mutex_unlock(&td->lock);
- return;
- }
-
- dsi_bus_lock(dssdev);
-
- r = taal_wake_up(dssdev);
- if (r) {
- dev_err(dssdev->dev, "failed to exit ULPS\n");
- goto err;
- }
-
- r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state1);
- if (r) {
- dev_err(dssdev->dev, "failed to read Taal status\n");
- goto err;
- }
-
- /* Run self diagnostics */
- r = taal_sleep_out(td);
- if (r) {
- dev_err(dssdev->dev, "failed to run Taal self-diagnostics\n");
- goto err;
- }
-
- r = taal_dcs_read_1(td, MIPI_DCS_GET_DIAGNOSTIC_RESULT, &state2);
- if (r) {
- dev_err(dssdev->dev, "failed to read Taal status\n");
- goto err;
- }
-
- /* Each sleep out command will trigger a self diagnostic and flip
- * Bit6 if the test passes.
- */
- if (!((state1 ^ state2) & (1 << 6))) {
- dev_err(dssdev->dev, "LCD self diagnostics failed\n");
- goto err;
- }
- /* Self-diagnostics result is also shown on TE GPIO line. We need
- * to re-enable TE after self diagnostics */
- if (td->te_enabled && gpio_is_valid(td->ext_te_gpio)) {
- r = taal_dcs_write_1(td, MIPI_DCS_SET_TEAR_ON, 0);
- if (r)
- goto err;
- }
-
- dsi_bus_unlock(dssdev);
-
- taal_queue_esd_work(dssdev);
-
- mutex_unlock(&td->lock);
- return;
-err:
- dev_err(dssdev->dev, "performing LCD reset\n");
-
- taal_panel_reset(dssdev);
-
- dsi_bus_unlock(dssdev);
-
- taal_queue_esd_work(dssdev);
-
- mutex_unlock(&td->lock);
-}
-
-static struct omap_dss_driver taal_driver = {
- .probe = taal_probe,
- .remove = __exit_p(taal_remove),
-
- .enable = taal_enable,
- .disable = taal_disable,
-
- .update = taal_update,
- .sync = taal_sync,
-
- .get_resolution = taal_get_resolution,
- .get_recommended_bpp = omapdss_default_get_recommended_bpp,
-
- .enable_te = taal_enable_te,
- .get_te = taal_get_te,
-
- .run_test = taal_run_test,
- .memory_read = taal_memory_read,
-
- .driver = {
- .name = "taal",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init taal_init(void)
-{
- omap_dss_register_driver(&taal_driver);
-
- return 0;
-}
-
-static void __exit taal_exit(void)
-{
- omap_dss_unregister_driver(&taal_driver);
-}
-
-module_init(taal_init);
-module_exit(taal_exit);
-
-MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
-MODULE_DESCRIPTION("Taal Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
deleted file mode 100644
index 1fdfb158a2a..00000000000
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ /dev/null
@@ -1,353 +0,0 @@
-/*
- * TFP410 DPI-to-DVI chip
- *
- * Copyright (C) 2011 Texas Instruments Inc
- * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <video/omapdss.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <drm/drm_edid.h>
-
-#include <video/omap-panel-data.h>
-
-static const struct omap_video_timings tfp410_default_timings = {
- .x_res = 640,
- .y_res = 480,
-
- .pixel_clock = 23500,
-
- .hfp = 48,
- .hsw = 32,
- .hbp = 80,
-
- .vfp = 3,
- .vsw = 4,
- .vbp = 7,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
-};
-
-struct panel_drv_data {
- struct omap_dss_device *dssdev;
-
- struct mutex lock;
-
- int pd_gpio;
-
- struct i2c_adapter *i2c_adapter;
-};
-
-static int tfp410_power_on(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 1);
-
- return 0;
-err0:
- return r;
-}
-
-static void tfp410_power_off(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 0);
-
- omapdss_dpi_display_disable(dssdev);
-}
-
-static int tfp410_probe(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata;
- int r;
- int i2c_bus_num;
-
- ddata = devm_kzalloc(dssdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- dssdev->panel.timings = tfp410_default_timings;
-
- ddata->dssdev = dssdev;
- mutex_init(&ddata->lock);
-
- if (dssdev->data) {
- struct tfp410_platform_data *pdata = dssdev->data;
-
- ddata->pd_gpio = pdata->power_down_gpio;
- i2c_bus_num = pdata->i2c_bus_num;
- } else {
- ddata->pd_gpio = -1;
- i2c_bus_num = -1;
- }
-
- if (gpio_is_valid(ddata->pd_gpio)) {
- r = devm_gpio_request_one(dssdev->dev, ddata->pd_gpio,
- GPIOF_OUT_INIT_LOW, "tfp410 pd");
- if (r) {
- dev_err(dssdev->dev, "Failed to request PD GPIO %d\n",
- ddata->pd_gpio);
- return r;
- }
- }
-
- if (i2c_bus_num != -1) {
- struct i2c_adapter *adapter;
-
- adapter = i2c_get_adapter(i2c_bus_num);
- if (!adapter) {
- dev_err(dssdev->dev, "Failed to get I2C adapter, bus %d\n",
- i2c_bus_num);
- return -EPROBE_DEFER;
- }
-
- ddata->i2c_adapter = adapter;
- }
-
- dev_set_drvdata(dssdev->dev, ddata);
-
- return 0;
-}
-
-static void __exit tfp410_remove(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&ddata->lock);
-
- if (ddata->i2c_adapter)
- i2c_put_adapter(ddata->i2c_adapter);
-
- dev_set_drvdata(dssdev->dev, NULL);
-
- mutex_unlock(&ddata->lock);
-}
-
-static int tfp410_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&ddata->lock);
-
- r = tfp410_power_on(dssdev);
- if (r == 0)
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-static void tfp410_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&ddata->lock);
-
- tfp410_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&ddata->lock);
-}
-
-static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&ddata->lock);
- omapdss_dpi_set_timings(dssdev, timings);
- dssdev->panel.timings = *timings;
- mutex_unlock(&ddata->lock);
-}
-
-static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- mutex_lock(&ddata->lock);
- *timings = dssdev->panel.timings;
- mutex_unlock(&ddata->lock);
-}
-
-static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
- int r;
-
- mutex_lock(&ddata->lock);
- r = dpi_check_timings(dssdev, timings);
- mutex_unlock(&ddata->lock);
-
- return r;
-}
-
-
-static int tfp410_ddc_read(struct i2c_adapter *adapter,
- unsigned char *buf, u16 count, u8 offset)
-{
- int r, retries;
-
- for (retries = 3; retries > 0; retries--) {
- struct i2c_msg msgs[] = {
- {
- .addr = DDC_ADDR,
- .flags = 0,
- .len = 1,
- .buf = &offset,
- }, {
- .addr = DDC_ADDR,
- .flags = I2C_M_RD,
- .len = count,
- .buf = buf,
- }
- };
-
- r = i2c_transfer(adapter, msgs, 2);
- if (r == 2)
- return 0;
-
- if (r != -EAGAIN)
- break;
- }
-
- return r < 0 ? r : -EIO;
-}
-
-static int tfp410_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
- int r, l, bytes_read;
-
- mutex_lock(&ddata->lock);
-
- if (!ddata->i2c_adapter) {
- r = -ENODEV;
- goto err;
- }
-
- l = min(EDID_LENGTH, len);
- r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
- if (r)
- goto err;
-
- bytes_read = l;
-
- /* if there are extensions, read second block */
- if (len > EDID_LENGTH && edid[0x7e] > 0) {
- l = min(EDID_LENGTH, len - EDID_LENGTH);
-
- r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
- l, EDID_LENGTH);
- if (r)
- goto err;
-
- bytes_read += l;
- }
-
- mutex_unlock(&ddata->lock);
-
- return bytes_read;
-
-err:
- mutex_unlock(&ddata->lock);
- return r;
-}
-
-static bool tfp410_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
- unsigned char out;
- int r;
-
- mutex_lock(&ddata->lock);
-
- if (!ddata->i2c_adapter)
- goto out;
-
- r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
-
- mutex_unlock(&ddata->lock);
-
- return r == 0;
-
-out:
- mutex_unlock(&ddata->lock);
- return true;
-}
-
-static struct omap_dss_driver tfp410_driver = {
- .probe = tfp410_probe,
- .remove = __exit_p(tfp410_remove),
-
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-
- .set_timings = tfp410_set_timings,
- .get_timings = tfp410_get_timings,
- .check_timings = tfp410_check_timings,
-
- .read_edid = tfp410_read_edid,
- .detect = tfp410_detect,
-
- .driver = {
- .name = "tfp410",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init tfp410_init(void)
-{
- return omap_dss_register_driver(&tfp410_driver);
-}
-
-static void __exit tfp410_exit(void)
-{
- omap_dss_unregister_driver(&tfp410_driver);
-}
-
-module_init(tfp410_init);
-module_exit(tfp410_exit);
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
deleted file mode 100644
index 7729b6fa6f9..00000000000
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ /dev/null
@@ -1,596 +0,0 @@
-/*
- * LCD panel driver for TPO TD043MTEA1
- *
- * Author: Gražvydas Ignotas <notasas@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-
-#include <video/omapdss.h>
-#include <video/omap-panel-data.h>
-
-#define TPO_R02_MODE(x) ((x) & 7)
-#define TPO_R02_MODE_800x480 7
-#define TPO_R02_NCLK_RISING BIT(3)
-#define TPO_R02_HSYNC_HIGH BIT(4)
-#define TPO_R02_VSYNC_HIGH BIT(5)
-
-#define TPO_R03_NSTANDBY BIT(0)
-#define TPO_R03_EN_CP_CLK BIT(1)
-#define TPO_R03_EN_VGL_PUMP BIT(2)
-#define TPO_R03_EN_PWM BIT(3)
-#define TPO_R03_DRIVING_CAP_100 BIT(4)
-#define TPO_R03_EN_PRE_CHARGE BIT(6)
-#define TPO_R03_SOFTWARE_CTL BIT(7)
-
-#define TPO_R04_NFLIP_H BIT(0)
-#define TPO_R04_NFLIP_V BIT(1)
-#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
-#define TPO_R04_VGL_FREQ_1H BIT(4)
-
-#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
- TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
- TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
- TPO_R03_SOFTWARE_CTL)
-
-#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
- TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
-
-static const u16 tpo_td043_def_gamma[12] = {
- 105, 315, 381, 431, 490, 537, 579, 686, 780, 837, 880, 1023
-};
-
-struct tpo_td043_device {
- struct spi_device *spi;
- struct regulator *vcc_reg;
- int nreset_gpio;
- u16 gamma[12];
- u32 mode;
- u32 hmirror:1;
- u32 vmirror:1;
- u32 powered_on:1;
- u32 spi_suspended:1;
- u32 power_on_resume:1;
-};
-
-/* used to pass spi_device from SPI to DSS portion of the driver */
-static struct tpo_td043_device *g_tpo_td043;
-
-static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
-{
- struct spi_message m;
- struct spi_transfer xfer;
- u16 w;
- int r;
-
- spi_message_init(&m);
-
- memset(&xfer, 0, sizeof(xfer));
-
- w = ((u16)addr << 10) | (1 << 8) | data;
- xfer.tx_buf = &w;
- xfer.bits_per_word = 16;
- xfer.len = 2;
- spi_message_add_tail(&xfer, &m);
-
- r = spi_sync(spi, &m);
- if (r < 0)
- dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
- return r;
-}
-
-static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
-{
- u8 i, val;
-
- /* gamma bits [9:8] */
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x11, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x12, val);
-
- for (val = i = 0; i < 4; i++)
- val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
- tpo_td043_write(spi, 0x13, val);
-
- /* gamma bits [7:0] */
- for (val = i = 0; i < 12; i++)
- tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
-}
-
-static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
-{
- u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | \
- TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
- if (h)
- reg4 &= ~TPO_R04_NFLIP_H;
- if (v)
- reg4 &= ~TPO_R04_NFLIP_V;
-
- return tpo_td043_write(spi, 4, reg4);
-}
-
-static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
-
- tpo_td043->hmirror = enable;
- return tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror,
- tpo_td043->vmirror);
-}
-
-static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
-
- return tpo_td043->hmirror;
-}
-
-static ssize_t tpo_td043_vmirror_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->vmirror);
-}
-
-static ssize_t tpo_td043_vmirror_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
- int val;
- int ret;
-
- ret = kstrtoint(buf, 0, &val);
- if (ret < 0)
- return ret;
-
- val = !!val;
-
- ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val);
- if (ret < 0)
- return ret;
-
- tpo_td043->vmirror = val;
-
- return count;
-}
-
-static ssize_t tpo_td043_mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
-
- return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->mode);
-}
-
-static ssize_t tpo_td043_mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
- long val;
- int ret;
-
- ret = kstrtol(buf, 0, &val);
- if (ret != 0 || val & ~7)
- return -EINVAL;
-
- tpo_td043->mode = val;
-
- val |= TPO_R02_NCLK_RISING;
- tpo_td043_write(tpo_td043->spi, 2, val);
-
- return count;
-}
-
-static ssize_t tpo_td043_gamma_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
- ssize_t len = 0;
- int ret;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tpo_td043->gamma); i++) {
- ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
- tpo_td043->gamma[i]);
- if (ret < 0)
- return ret;
- len += ret;
- }
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static ssize_t tpo_td043_gamma_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
- unsigned int g[12];
- int ret;
- int i;
-
- ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
- &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
- &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
-
- if (ret != 12)
- return -EINVAL;
-
- for (i = 0; i < 12; i++)
- tpo_td043->gamma[i] = g[i];
-
- tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma);
-
- return count;
-}
-
-static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
- tpo_td043_vmirror_show, tpo_td043_vmirror_store);
-static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
- tpo_td043_mode_show, tpo_td043_mode_store);
-static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
- tpo_td043_gamma_show, tpo_td043_gamma_store);
-
-static struct attribute *tpo_td043_attrs[] = {
- &dev_attr_vmirror.attr,
- &dev_attr_mode.attr,
- &dev_attr_gamma.attr,
- NULL,
-};
-
-static struct attribute_group tpo_td043_attr_group = {
- .attrs = tpo_td043_attrs,
-};
-
-static const struct omap_video_timings tpo_td043_timings = {
- .x_res = 800,
- .y_res = 480,
-
- .pixel_clock = 36000,
-
- .hsw = 1,
- .hfp = 68,
- .hbp = 214,
-
- .vsw = 1,
- .vfp = 39,
- .vbp = 34,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
- .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
- .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES,
-};
-
-static inline struct panel_tpo_td043_data
-*get_panel_data(const struct omap_dss_device *dssdev)
-{
- return (struct panel_tpo_td043_data *) dssdev->data;
-}
-
-static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
-{
- int r;
-
- if (tpo_td043->powered_on)
- return 0;
-
- r = regulator_enable(tpo_td043->vcc_reg);
- if (r != 0)
- return r;
-
- /* wait for panel to stabilize */
- msleep(160);
-
- if (gpio_is_valid(tpo_td043->nreset_gpio))
- gpio_set_value(tpo_td043->nreset_gpio, 1);
-
- tpo_td043_write(tpo_td043->spi, 2,
- TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING);
- tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_NORMAL);
- tpo_td043_write(tpo_td043->spi, 0x20, 0xf0);
- tpo_td043_write(tpo_td043->spi, 0x21, 0xf0);
- tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror,
- tpo_td043->vmirror);
- tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma);
-
- tpo_td043->powered_on = 1;
- return 0;
-}
-
-static void tpo_td043_power_off(struct tpo_td043_device *tpo_td043)
-{
- if (!tpo_td043->powered_on)
- return;
-
- tpo_td043_write(tpo_td043->spi, 3,
- TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
-
- if (gpio_is_valid(tpo_td043->nreset_gpio))
- gpio_set_value(tpo_td043->nreset_gpio, 0);
-
- /* wait for at least 2 vsyncs before cutting off power */
- msleep(50);
-
- tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_STANDBY);
-
- regulator_disable(tpo_td043->vcc_reg);
-
- tpo_td043->powered_on = 0;
-}
-
-static int tpo_td043_enable_dss(struct omap_dss_device *dssdev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
- int r;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- omapdss_dpi_set_timings(dssdev, &dssdev->panel.timings);
- omapdss_dpi_set_data_lines(dssdev, dssdev->phy.dpi.data_lines);
-
- r = omapdss_dpi_display_enable(dssdev);
- if (r)
- goto err0;
-
- /*
- * If we are resuming from system suspend, SPI clocks might not be
- * enabled yet, so we'll program the LCD from SPI PM resume callback.
- */
- if (!tpo_td043->spi_suspended) {
- r = tpo_td043_power_on(tpo_td043);
- if (r)
- goto err1;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-err1:
- omapdss_dpi_display_disable(dssdev);
-err0:
- return r;
-}
-
-static void tpo_td043_disable_dss(struct omap_dss_device *dssdev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return;
-
- omapdss_dpi_display_disable(dssdev);
-
- if (!tpo_td043->spi_suspended)
- tpo_td043_power_off(tpo_td043);
-}
-
-static int tpo_td043_enable(struct omap_dss_device *dssdev)
-{
- dev_dbg(dssdev->dev, "enable\n");
-
- return tpo_td043_enable_dss(dssdev);
-}
-
-static void tpo_td043_disable(struct omap_dss_device *dssdev)
-{
- dev_dbg(dssdev->dev, "disable\n");
-
- tpo_td043_disable_dss(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-
-static int tpo_td043_probe(struct omap_dss_device *dssdev)
-{
- struct tpo_td043_device *tpo_td043 = g_tpo_td043;
- struct panel_tpo_td043_data *pdata = get_panel_data(dssdev);
- int ret = 0;
-
- dev_dbg(dssdev->dev, "probe\n");
-
- if (tpo_td043 == NULL) {
- dev_err(dssdev->dev, "missing tpo_td043_device\n");
- return -ENODEV;
- }
-
- if (!pdata)
- return -EINVAL;
-
- tpo_td043->nreset_gpio = pdata->nreset_gpio;
-
- dssdev->panel.timings = tpo_td043_timings;
- dssdev->ctrl.pixel_size = 24;
-
- tpo_td043->mode = TPO_R02_MODE_800x480;
- memcpy(tpo_td043->gamma, tpo_td043_def_gamma, sizeof(tpo_td043->gamma));
-
- tpo_td043->vcc_reg = regulator_get(dssdev->dev, "vcc");
- if (IS_ERR(tpo_td043->vcc_reg)) {
- dev_err(dssdev->dev, "failed to get LCD VCC regulator\n");
- ret = PTR_ERR(tpo_td043->vcc_reg);
- goto fail_regulator;
- }
-
- if (gpio_is_valid(tpo_td043->nreset_gpio)) {
- ret = devm_gpio_request_one(dssdev->dev,
- tpo_td043->nreset_gpio, GPIOF_OUT_INIT_LOW,
- "lcd reset");
- if (ret < 0) {
- dev_err(dssdev->dev, "couldn't request reset GPIO\n");
- goto fail_gpio_req;
- }
- }
-
- ret = sysfs_create_group(&dssdev->dev->kobj, &tpo_td043_attr_group);
- if (ret)
- dev_warn(dssdev->dev, "failed to create sysfs files\n");
-
- dev_set_drvdata(dssdev->dev, tpo_td043);
-
- return 0;
-
-fail_gpio_req:
- regulator_put(tpo_td043->vcc_reg);
-fail_regulator:
- kfree(tpo_td043);
- return ret;
-}
-
-static void tpo_td043_remove(struct omap_dss_device *dssdev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dssdev->dev);
-
- dev_dbg(dssdev->dev, "remove\n");
-
- sysfs_remove_group(&dssdev->dev->kobj, &tpo_td043_attr_group);
- regulator_put(tpo_td043->vcc_reg);
-}
-
-static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- omapdss_dpi_set_timings(dssdev, timings);
-
- dssdev->panel.timings = *timings;
-}
-
-static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- return dpi_check_timings(dssdev, timings);
-}
-
-static struct omap_dss_driver tpo_td043_driver = {
- .probe = tpo_td043_probe,
- .remove = tpo_td043_remove,
-
- .enable = tpo_td043_enable,
- .disable = tpo_td043_disable,
- .set_mirror = tpo_td043_set_hmirror,
- .get_mirror = tpo_td043_get_hmirror,
-
- .set_timings = tpo_td043_set_timings,
- .check_timings = tpo_td043_check_timings,
-
- .driver = {
- .name = "tpo_td043mtea1_panel",
- .owner = THIS_MODULE,
- },
-};
-
-static int tpo_td043_spi_probe(struct spi_device *spi)
-{
- struct omap_dss_device *dssdev = spi->dev.platform_data;
- struct tpo_td043_device *tpo_td043;
- int ret;
-
- if (dssdev == NULL) {
- dev_err(&spi->dev, "missing dssdev\n");
- return -ENODEV;
- }
-
- if (g_tpo_td043 != NULL)
- return -EBUSY;
-
- spi->bits_per_word = 16;
- spi->mode = SPI_MODE_0;
-
- ret = spi_setup(spi);
- if (ret < 0) {
- dev_err(&spi->dev, "spi_setup failed: %d\n", ret);
- return ret;
- }
-
- tpo_td043 = kzalloc(sizeof(*tpo_td043), GFP_KERNEL);
- if (tpo_td043 == NULL)
- return -ENOMEM;
-
- tpo_td043->spi = spi;
- dev_set_drvdata(&spi->dev, tpo_td043);
- g_tpo_td043 = tpo_td043;
-
- omap_dss_register_driver(&tpo_td043_driver);
-
- return 0;
-}
-
-static int tpo_td043_spi_remove(struct spi_device *spi)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&spi->dev);
-
- omap_dss_unregister_driver(&tpo_td043_driver);
- kfree(tpo_td043);
- g_tpo_td043 = NULL;
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int tpo_td043_spi_suspend(struct device *dev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
-
- dev_dbg(dev, "tpo_td043_spi_suspend, tpo %p\n", tpo_td043);
-
- tpo_td043->power_on_resume = tpo_td043->powered_on;
- tpo_td043_power_off(tpo_td043);
- tpo_td043->spi_suspended = 1;
-
- return 0;
-}
-
-static int tpo_td043_spi_resume(struct device *dev)
-{
- struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
- int ret;
-
- dev_dbg(dev, "tpo_td043_spi_resume\n");
-
- if (tpo_td043->power_on_resume) {
- ret = tpo_td043_power_on(tpo_td043);
- if (ret)
- return ret;
- }
- tpo_td043->spi_suspended = 0;
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(tpo_td043_spi_pm,
- tpo_td043_spi_suspend, tpo_td043_spi_resume);
-
-static struct spi_driver tpo_td043_spi_driver = {
- .driver = {
- .name = "tpo_td043mtea1_panel_spi",
- .owner = THIS_MODULE,
- .pm = &tpo_td043_spi_pm,
- },
- .probe = tpo_td043_spi_probe,
- .remove = tpo_td043_spi_remove,
-};
-
-module_spi_driver(tpo_td043_spi_driver);
-
-MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
-MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 8f70a8300b8..dde4281663b 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -42,6 +42,7 @@ config OMAP2_DSS_DPI
config OMAP2_DSS_RFBI
bool "RFBI support"
+ depends on BROKEN
default n
help
MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index 61949ff7940..94832eb06a3 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -7,9 +7,8 @@ omapdss-y += manager.o manager-sysfs.o overlay.o overlay-sysfs.o apply.o \
dispc-compat.o display-sysfs.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
-omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o venc_panel.o
+omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o
omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o
-omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o \
- hdmi_panel.o ti_hdmi_4xxx_ip.o
+omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi.o ti_hdmi_4xxx_ip.o
ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index d6212d63cfb..60758dbefd7 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -428,8 +428,8 @@ static struct omap_dss_device *dss_mgr_get_device(struct omap_overlay_manager *m
if (dssdev == NULL)
return NULL;
- while (dssdev->device)
- dssdev = dssdev->device;
+ while (dssdev->dst)
+ dssdev = dssdev->dst;
if (dssdev->driver)
return dssdev;
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 1aeb274e30f..60d3958d04f 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -43,9 +43,6 @@
static struct {
struct platform_device *pdev;
- struct regulator *vdds_dsi_reg;
- struct regulator *vdds_sdi_reg;
-
const char *default_display_name;
} core;
@@ -79,36 +76,6 @@ struct platform_device *dss_get_core_pdev(void)
return core.pdev;
}
-/* REGULATORS */
-
-struct regulator *dss_get_vdds_dsi(void)
-{
- struct regulator *reg;
-
- if (core.vdds_dsi_reg != NULL)
- return core.vdds_dsi_reg;
-
- reg = devm_regulator_get(&core.pdev->dev, "vdds_dsi");
- if (!IS_ERR(reg))
- core.vdds_dsi_reg = reg;
-
- return reg;
-}
-
-struct regulator *dss_get_vdds_sdi(void)
-{
- struct regulator *reg;
-
- if (core.vdds_sdi_reg != NULL)
- return core.vdds_sdi_reg;
-
- reg = devm_regulator_get(&core.pdev->dev, "vdds_sdi");
- if (!IS_ERR(reg))
- core.vdds_sdi_reg = reg;
-
- return reg;
-}
-
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
@@ -189,7 +156,7 @@ int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
write, &dss_debug_fops);
- return PTR_RET(d);
+ return PTR_ERR_OR_ZERO(d);
}
#else /* CONFIG_OMAP2_DSS_DEBUGFS */
static inline int dss_initialize_debugfs(void)
@@ -281,235 +248,6 @@ static struct platform_driver omap_dss_driver = {
},
};
-/* BUS */
-static int dss_bus_match(struct device *dev, struct device_driver *driver)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
-
- DSSDBG("bus_match. dev %s/%s, drv %s\n",
- dev_name(dev), dssdev->driver_name, driver->name);
-
- return strcmp(dssdev->driver_name, driver->name) == 0;
-}
-
-static struct bus_type dss_bus_type = {
- .name = "omapdss",
- .match = dss_bus_match,
-};
-
-static void dss_bus_release(struct device *dev)
-{
- DSSDBG("bus_release\n");
-}
-
-static struct device dss_bus = {
- .release = dss_bus_release,
-};
-
-struct bus_type *dss_get_bus(void)
-{
- return &dss_bus_type;
-}
-
-/* DRIVER */
-static int dss_driver_probe(struct device *dev)
-{
- int r;
- struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
- struct omap_dss_device *dssdev = to_dss_device(dev);
-
- DSSDBG("driver_probe: dev %s/%s, drv %s\n",
- dev_name(dev), dssdev->driver_name,
- dssdrv->driver.name);
-
- r = dssdrv->probe(dssdev);
-
- if (r) {
- DSSERR("driver probe failed: %d\n", r);
- return r;
- }
-
- DSSDBG("probe done for device %s\n", dev_name(dev));
-
- dssdev->driver = dssdrv;
-
- return 0;
-}
-
-static int dss_driver_remove(struct device *dev)
-{
- struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
- struct omap_dss_device *dssdev = to_dss_device(dev);
-
- DSSDBG("driver_remove: dev %s/%s\n", dev_name(dev),
- dssdev->driver_name);
-
- dssdrv->remove(dssdev);
-
- dssdev->driver = NULL;
-
- return 0;
-}
-
-static int omapdss_default_connect(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out;
- struct omap_overlay_manager *mgr;
- int r;
-
- out = dssdev->output;
-
- if (out == NULL)
- return -ENODEV;
-
- mgr = omap_dss_get_overlay_manager(out->dispc_channel);
- if (!mgr)
- return -ENODEV;
-
- r = dss_mgr_connect(mgr, out);
- if (r)
- return r;
-
- return 0;
-}
-
-static void omapdss_default_disconnect(struct omap_dss_device *dssdev)
-{
- struct omap_dss_device *out;
- struct omap_overlay_manager *mgr;
-
- out = dssdev->output;
-
- if (out == NULL)
- return;
-
- mgr = out->manager;
-
- if (mgr == NULL)
- return;
-
- dss_mgr_disconnect(mgr, out);
-}
-
-int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
-{
- dssdriver->driver.bus = &dss_bus_type;
- dssdriver->driver.probe = dss_driver_probe;
- dssdriver->driver.remove = dss_driver_remove;
-
- if (dssdriver->get_resolution == NULL)
- dssdriver->get_resolution = omapdss_default_get_resolution;
- if (dssdriver->get_recommended_bpp == NULL)
- dssdriver->get_recommended_bpp =
- omapdss_default_get_recommended_bpp;
- if (dssdriver->get_timings == NULL)
- dssdriver->get_timings = omapdss_default_get_timings;
- if (dssdriver->connect == NULL)
- dssdriver->connect = omapdss_default_connect;
- if (dssdriver->disconnect == NULL)
- dssdriver->disconnect = omapdss_default_disconnect;
-
- return driver_register(&dssdriver->driver);
-}
-EXPORT_SYMBOL(omap_dss_register_driver);
-
-void omap_dss_unregister_driver(struct omap_dss_driver *dssdriver)
-{
- driver_unregister(&dssdriver->driver);
-}
-EXPORT_SYMBOL(omap_dss_unregister_driver);
-
-/* DEVICE */
-
-static void omap_dss_dev_release(struct device *dev)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- kfree(dssdev);
-}
-
-static int disp_num_counter;
-
-struct omap_dss_device *dss_alloc_and_init_device(struct device *parent)
-{
- struct omap_dss_device *dssdev;
-
- dssdev = kzalloc(sizeof(*dssdev), GFP_KERNEL);
- if (!dssdev)
- return NULL;
-
- dssdev->old_dev.bus = &dss_bus_type;
- dssdev->old_dev.parent = parent;
- dssdev->old_dev.release = omap_dss_dev_release;
- dev_set_name(&dssdev->old_dev, "display%d", disp_num_counter++);
-
- device_initialize(&dssdev->old_dev);
-
- return dssdev;
-}
-
-int dss_add_device(struct omap_dss_device *dssdev)
-{
- dssdev->dev = &dssdev->old_dev;
-
- omapdss_register_display(dssdev);
- return device_add(&dssdev->old_dev);
-}
-
-void dss_put_device(struct omap_dss_device *dssdev)
-{
- put_device(&dssdev->old_dev);
-}
-
-void dss_unregister_device(struct omap_dss_device *dssdev)
-{
- device_unregister(&dssdev->old_dev);
- omapdss_unregister_display(dssdev);
-}
-
-static int dss_unregister_dss_dev(struct device *dev, void *data)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- dss_unregister_device(dssdev);
- return 0;
-}
-
-void dss_unregister_child_devices(struct device *parent)
-{
- device_for_each_child(parent, NULL, dss_unregister_dss_dev);
-}
-
-void dss_copy_device_pdata(struct omap_dss_device *dst,
- const struct omap_dss_device *src)
-{
- u8 *d = (u8 *)dst;
- u8 *s = (u8 *)src;
- size_t dsize = sizeof(struct device);
-
- memcpy(d + dsize, s + dsize, sizeof(struct omap_dss_device) - dsize);
-}
-
-/* BUS */
-static int __init omap_dss_bus_register(void)
-{
- int r;
-
- r = bus_register(&dss_bus_type);
- if (r) {
- DSSERR("bus register failed\n");
- return r;
- }
-
- dev_set_name(&dss_bus, "omapdss");
- r = device_register(&dss_bus);
- if (r) {
- DSSERR("bus driver register failed\n");
- bus_unregister(&dss_bus_type);
- return r;
- }
-
- return 0;
-}
-
/* INIT */
static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -555,7 +293,7 @@ static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
-static int __init omap_dss_register_drivers(void)
+static int __init omap_dss_init(void)
{
int r;
int i;
@@ -586,6 +324,8 @@ static int __init omap_dss_register_drivers(void)
dss_output_drv_loaded[i] = true;
}
+ dss_initialized = true;
+
return 0;
err_dispc:
@@ -596,7 +336,7 @@ err_dss:
return r;
}
-static void __exit omap_dss_unregister_drivers(void)
+static void __exit omap_dss_exit(void)
{
int i;
@@ -611,64 +351,8 @@ static void __exit omap_dss_unregister_drivers(void)
platform_driver_unregister(&omap_dss_driver);
}
-#ifdef CONFIG_OMAP2_DSS_MODULE
-static void omap_dss_bus_unregister(void)
-{
- device_unregister(&dss_bus);
-
- bus_unregister(&dss_bus_type);
-}
-
-static int __init omap_dss_init(void)
-{
- int r;
-
- r = omap_dss_bus_register();
- if (r)
- return r;
-
- r = omap_dss_register_drivers();
- if (r) {
- omap_dss_bus_unregister();
- return r;
- }
-
- dss_initialized = true;
-
- return 0;
-}
-
-static void __exit omap_dss_exit(void)
-{
- omap_dss_unregister_drivers();
-
- omap_dss_bus_unregister();
-}
-
module_init(omap_dss_init);
module_exit(omap_dss_exit);
-#else
-static int __init omap_dss_init(void)
-{
- return omap_dss_bus_register();
-}
-
-static int __init omap_dss_init2(void)
-{
- int r;
-
- r = omap_dss_register_drivers();
- if (r)
- return r;
-
- dss_initialized = true;
-
- return 0;
-}
-
-core_initcall(omap_dss_init);
-device_initcall(omap_dss_init2);
-#endif
MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
MODULE_DESCRIPTION("OMAP2/3 Display Subsystem");
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index a6b331ef776..bd48cde5356 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -345,7 +345,7 @@ static void dpi_config_lcd_manager(struct omap_overlay_manager *mgr)
dss_mgr_set_lcd_config(mgr, &dpi.mgr_config);
}
-int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &dpi.output;
int r;
@@ -423,9 +423,8 @@ err_no_reg:
mutex_unlock(&dpi.lock);
return r;
}
-EXPORT_SYMBOL(omapdss_dpi_display_enable);
-void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
+static void dpi_display_disable(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = dpi.output.manager;
@@ -446,9 +445,8 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&dpi.lock);
}
-EXPORT_SYMBOL(omapdss_dpi_display_disable);
-void omapdss_dpi_set_timings(struct omap_dss_device *dssdev,
+static void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
DSSDBG("dpi_set_timings\n");
@@ -459,7 +457,6 @@ void omapdss_dpi_set_timings(struct omap_dss_device *dssdev,
mutex_unlock(&dpi.lock);
}
-EXPORT_SYMBOL(omapdss_dpi_set_timings);
static void dpi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
@@ -471,7 +468,7 @@ static void dpi_get_timings(struct omap_dss_device *dssdev,
mutex_unlock(&dpi.lock);
}
-int dpi_check_timings(struct omap_dss_device *dssdev,
+static int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct omap_overlay_manager *mgr = dpi.output.manager;
@@ -510,9 +507,8 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-EXPORT_SYMBOL(dpi_check_timings);
-void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
+static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
{
mutex_lock(&dpi.lock);
@@ -520,7 +516,6 @@ void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
mutex_unlock(&dpi.lock);
}
-EXPORT_SYMBOL(omapdss_dpi_set_data_lines);
static int dpi_verify_dsi_pll(struct platform_device *dsidev)
{
@@ -554,14 +549,10 @@ static int dpi_init_regulator(void)
if (dpi.vdds_dsi_reg)
return 0;
- vdds_dsi = dss_get_vdds_dsi();
-
+ vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
if (IS_ERR(vdds_dsi)) {
- vdds_dsi = devm_regulator_get(&dpi.pdev->dev, "vdds_dsi");
- if (IS_ERR(vdds_dsi)) {
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
}
dpi.vdds_dsi_reg = vdds_dsi;
@@ -618,76 +609,6 @@ static enum omap_channel dpi_get_channel(void)
}
}
-static struct omap_dss_device *dpi_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int dpi_probe_pdata(struct platform_device *dpidev)
-{
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- int r;
-
- plat_dssdev = dpi_find_dssdev(dpidev);
-
- if (!plat_dssdev)
- return 0;
-
- r = dpi_init_regulator();
- if (r)
- return r;
-
- dpi_init_pll();
-
- dssdev = dss_alloc_and_init_device(&dpidev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- r = omapdss_output_set_device(&dpi.output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&dpi.output);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static int dpi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -722,9 +643,9 @@ static int dpi_connect(struct omap_dss_device *dssdev,
static void dpi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
@@ -737,14 +658,14 @@ static const struct omapdss_dpi_ops dpi_ops = {
.connect = dpi_connect,
.disconnect = dpi_disconnect,
- .enable = omapdss_dpi_display_enable,
- .disable = omapdss_dpi_display_disable,
+ .enable = dpi_display_enable,
+ .disable = dpi_display_disable,
.check_timings = dpi_check_timings,
- .set_timings = omapdss_dpi_set_timings,
+ .set_timings = dpi_set_timings,
.get_timings = dpi_get_timings,
- .set_data_lines = omapdss_dpi_set_data_lines,
+ .set_data_lines = dpi_set_data_lines,
};
static void dpi_init_output(struct platform_device *pdev)
@@ -771,31 +692,17 @@ static void __exit dpi_uninit_output(struct platform_device *pdev)
static int omap_dpi_probe(struct platform_device *pdev)
{
- int r;
-
dpi.pdev = pdev;
mutex_init(&dpi.lock);
dpi_init_output(pdev);
- if (pdev->dev.platform_data) {
- r = dpi_probe_pdata(pdev);
- if (r)
- goto err_probe;
- }
-
return 0;
-
-err_probe:
- dpi_uninit_output(pdev);
- return r;
}
static int __exit omap_dpi_remove(struct platform_device *pdev)
{
- dss_unregister_child_devices(&pdev->dev);
-
dpi_uninit_output(pdev);
return 0;
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 99a043b08f0..a598b581228 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -205,6 +205,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
static void dsi_display_uninit_dispc(struct platform_device *dsidev,
struct omap_overlay_manager *mgr);
+static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
+
#define DSI_MAX_NR_ISRS 2
#define DSI_MAX_NR_LANES 5
@@ -383,16 +385,7 @@ static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dside
static inline struct platform_device *dsi_get_dsidev_from_dssdev(struct omap_dss_device *dssdev)
{
- /* HACK: dssdev can be either the panel device, when using old API, or
- * the dsi device itself, when using the new API. So we solve this for
- * now by checking the dssdev->id. This will be removed when the old API
- * is removed.
- */
- if (dssdev->id == OMAP_DSS_OUTPUT_DSI1 ||
- dssdev->id == OMAP_DSS_OUTPUT_DSI2)
- return to_platform_device(dssdev->dev);
-
- return to_platform_device(dssdev->output->dev);
+ return to_platform_device(dssdev->dev);
}
struct platform_device *dsi_get_dsidev_from_id(int module)
@@ -432,23 +425,21 @@ static inline u32 dsi_read_reg(struct platform_device *dsidev,
return __raw_readl(dsi->base + idx.idx);
}
-void dsi_bus_lock(struct omap_dss_device *dssdev)
+static void dsi_bus_lock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
down(&dsi->bus_lock);
}
-EXPORT_SYMBOL(dsi_bus_lock);
-void dsi_bus_unlock(struct omap_dss_device *dssdev)
+static void dsi_bus_unlock(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
up(&dsi->bus_lock);
}
-EXPORT_SYMBOL(dsi_bus_unlock);
static bool dsi_bus_is_locked(struct platform_device *dsidev)
{
@@ -2713,7 +2704,7 @@ static int dsi_vc_config_source(struct platform_device *dsidev, int channel,
return 0;
}
-void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
+static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
bool enable)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -2737,7 +2728,6 @@ void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
if (dsi->vm_timings.ddr_clk_always_on && enable)
dsi_vc_send_null(dssdev, channel);
}
-EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
static void dsi_vc_flush_long_data(struct platform_device *dsidev, int channel)
{
@@ -2842,7 +2832,7 @@ static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
return 0;
}
-int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
DECLARE_COMPLETION_ONSTACK(completion);
@@ -2885,7 +2875,6 @@ err1:
err0:
return r;
}
-EXPORT_SYMBOL(dsi_vc_send_bta_sync);
static inline void dsi_vc_write_long_header(struct platform_device *dsidev,
int channel, u8 data_type, u16 len, u8 ecc)
@@ -3011,14 +3000,13 @@ static int dsi_vc_send_short(struct platform_device *dsidev, int channel,
return 0;
}
-int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
+static int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
return dsi_vc_send_long(dsidev, channel, MIPI_DSI_NULL_PACKET, NULL,
0, 0);
}
-EXPORT_SYMBOL(dsi_vc_send_null);
static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
int channel, u8 *data, int len, enum dss_dsi_content_type type)
@@ -3050,7 +3038,7 @@ static int dsi_vc_write_nosync_common(struct platform_device *dsidev,
return r;
}
-int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
+static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3058,9 +3046,8 @@ int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
return dsi_vc_write_nosync_common(dsidev, channel, data, len,
DSS_DSI_CONTENT_DCS);
}
-EXPORT_SYMBOL(dsi_vc_dcs_write_nosync);
-int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
+static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
u8 *data, int len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3068,7 +3055,6 @@ int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
return dsi_vc_write_nosync_common(dsidev, channel, data, len,
DSS_DSI_CONTENT_GENERIC);
}
-EXPORT_SYMBOL(dsi_vc_generic_write_nosync);
static int dsi_vc_write_common(struct omap_dss_device *dssdev, int channel,
u8 *data, int len, enum dss_dsi_content_type type)
@@ -3099,60 +3085,19 @@ err:
return r;
}
-int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
+static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
int len)
{
return dsi_vc_write_common(dssdev, channel, data, len,
DSS_DSI_CONTENT_DCS);
}
-EXPORT_SYMBOL(dsi_vc_dcs_write);
-int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
+static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
int len)
{
return dsi_vc_write_common(dssdev, channel, data, len,
DSS_DSI_CONTENT_GENERIC);
}
-EXPORT_SYMBOL(dsi_vc_generic_write);
-
-int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd)
-{
- return dsi_vc_dcs_write(dssdev, channel, &dcs_cmd, 1);
-}
-EXPORT_SYMBOL(dsi_vc_dcs_write_0);
-
-int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel)
-{
- return dsi_vc_generic_write(dssdev, channel, NULL, 0);
-}
-EXPORT_SYMBOL(dsi_vc_generic_write_0);
-
-int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 param)
-{
- u8 buf[2];
- buf[0] = dcs_cmd;
- buf[1] = param;
- return dsi_vc_dcs_write(dssdev, channel, buf, 2);
-}
-EXPORT_SYMBOL(dsi_vc_dcs_write_1);
-
-int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
- u8 param)
-{
- return dsi_vc_generic_write(dssdev, channel, &param, 1);
-}
-EXPORT_SYMBOL(dsi_vc_generic_write_1);
-
-int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
- u8 param1, u8 param2)
-{
- u8 buf[2];
- buf[0] = param1;
- buf[1] = param2;
- return dsi_vc_generic_write(dssdev, channel, buf, 2);
-}
-EXPORT_SYMBOL(dsi_vc_generic_write_2);
static int dsi_vc_dcs_send_read_request(struct platform_device *dsidev,
int channel, u8 dcs_cmd)
@@ -3319,7 +3264,7 @@ err:
return r;
}
-int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
+static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
u8 *buf, int buflen)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3348,7 +3293,6 @@ err:
DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd);
return r;
}
-EXPORT_SYMBOL(dsi_vc_dcs_read);
static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
u8 *reqdata, int reqlen, u8 *buf, int buflen)
@@ -3377,56 +3321,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel,
return 0;
}
-int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
- int buflen)
-{
- int r;
-
- r = dsi_vc_generic_read(dssdev, channel, NULL, 0, buf, buflen);
- if (r) {
- DSSERR("dsi_vc_generic_read_0(ch %d) failed\n", channel);
- return r;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(dsi_vc_generic_read_0);
-
-int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
- u8 *buf, int buflen)
-{
- int r;
-
- r = dsi_vc_generic_read(dssdev, channel, &param, 1, buf, buflen);
- if (r) {
- DSSERR("dsi_vc_generic_read_1(ch %d) failed\n", channel);
- return r;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(dsi_vc_generic_read_1);
-
-int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
- u8 param1, u8 param2, u8 *buf, int buflen)
-{
- int r;
- u8 reqdata[2];
-
- reqdata[0] = param1;
- reqdata[1] = param2;
-
- r = dsi_vc_generic_read(dssdev, channel, reqdata, 2, buf, buflen);
- if (r) {
- DSSERR("dsi_vc_generic_read_2(ch %d) failed\n", channel);
- return r;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(dsi_vc_generic_read_2);
-
-int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
+static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
u16 len)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3434,7 +3329,6 @@ int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
return dsi_vc_send_short(dsidev, channel,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0);
}
-EXPORT_SYMBOL(dsi_vc_set_max_rx_packet_size);
static int dsi_enter_ulps(struct platform_device *dsidev)
{
@@ -4068,7 +3962,7 @@ static void dsi_proto_timings(struct platform_device *dsidev)
}
}
-int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
+static int dsi_configure_pins(struct omap_dss_device *dssdev,
const struct omap_dsi_pin_config *pin_cfg)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4134,9 +4028,8 @@ int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
return 0;
}
-EXPORT_SYMBOL(omapdss_dsi_configure_pins);
-int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
+static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4206,9 +4099,8 @@ err_pix_fmt:
err_init_dispc:
return r;
}
-EXPORT_SYMBOL(dsi_enable_video_output);
-void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
+static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4229,7 +4121,6 @@ void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
dsi_display_uninit_dispc(dsidev, mgr);
}
-EXPORT_SYMBOL(dsi_disable_video_output);
static void dsi_update_screen_dispc(struct platform_device *dsidev)
{
@@ -4369,7 +4260,7 @@ static void dsi_framedone_irq_callback(void *data)
dsi_handle_framedone(dsidev, 0);
}
-int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
+static int dsi_update(struct omap_dss_device *dssdev, int channel,
void (*callback)(int, void *), void *data)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4394,7 +4285,6 @@ int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
return 0;
}
-EXPORT_SYMBOL(omap_dsi_update);
/* Display funcs */
@@ -4589,7 +4479,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
dsi_pll_uninit(dsidev, disconnect_lanes);
}
-int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
+static int dsi_display_enable(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4625,9 +4515,8 @@ err_get_dsi:
DSSDBG("dsi_display_enable FAILED\n");
return r;
}
-EXPORT_SYMBOL(omapdss_dsi_display_enable);
-void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
+static void dsi_display_disable(struct omap_dss_device *dssdev,
bool disconnect_lanes, bool enter_ulps)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -4651,9 +4540,8 @@ void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
mutex_unlock(&dsi->lock);
}
-EXPORT_SYMBOL(omapdss_dsi_display_disable);
-int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4661,7 +4549,6 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
dsi->te_enabled = enable;
return 0;
}
-EXPORT_SYMBOL(omapdss_dsi_enable_te);
#ifdef PRINT_VERBOSE_VM_TIMINGS
static void print_dsi_vm(const char *str,
@@ -5136,7 +5023,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
dsi_vm_calc_pll_cb, ctx);
}
-int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
+static int dsi_set_config(struct omap_dss_device *dssdev,
const struct omap_dss_dsi_config *config)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -5184,7 +5071,6 @@ err:
return r;
}
-EXPORT_SYMBOL(omapdss_dsi_set_config);
/*
* Return a hardcoded channel for the DSI output. This should work for
@@ -5235,7 +5121,7 @@ static enum omap_channel dsi_get_channel(int module_id)
}
}
-int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
+static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -5252,9 +5138,8 @@ int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel)
DSSERR("cannot get VC for display %s", dssdev->name);
return -ENOSPC;
}
-EXPORT_SYMBOL(omap_dsi_request_vc);
-int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
+static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -5279,9 +5164,8 @@ int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id)
return 0;
}
-EXPORT_SYMBOL(omap_dsi_set_vc_id);
-void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
+static void dsi_release_vc(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -5292,7 +5176,6 @@ void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel)
dsi->vc[channel].vc_id = 0;
}
}
-EXPORT_SYMBOL(omap_dsi_release_vc);
void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev)
{
@@ -5348,79 +5231,6 @@ static int dsi_get_clocks(struct platform_device *dsidev)
return 0;
}
-static struct omap_dss_device *dsi_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- struct dsi_data *dsi = dsi_get_dsidrv_data(pdev);
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
- continue;
-
- if (dssdev->phy.dsi.module != dsi->module_id)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int dsi_probe_pdata(struct platform_device *dsidev)
-{
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- int r;
-
- plat_dssdev = dsi_find_dssdev(dsidev);
-
- if (!plat_dssdev)
- return 0;
-
- r = dsi_regulator_init(dsidev);
- if (r)
- return r;
-
- dssdev = dss_alloc_and_init_device(&dsidev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- r = omapdss_output_set_device(&dsi->output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&dsi->output);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static int dsi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -5454,9 +5264,9 @@ static int dsi_connect(struct omap_dss_device *dssdev,
static void dsi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
@@ -5472,24 +5282,24 @@ static const struct omapdss_dsi_ops dsi_ops = {
.bus_lock = dsi_bus_lock,
.bus_unlock = dsi_bus_unlock,
- .enable = omapdss_dsi_display_enable,
- .disable = omapdss_dsi_display_disable,
+ .enable = dsi_display_enable,
+ .disable = dsi_display_disable,
- .enable_hs = omapdss_dsi_vc_enable_hs,
+ .enable_hs = dsi_vc_enable_hs,
- .configure_pins = omapdss_dsi_configure_pins,
- .set_config = omapdss_dsi_set_config,
+ .configure_pins = dsi_configure_pins,
+ .set_config = dsi_set_config,
.enable_video_output = dsi_enable_video_output,
.disable_video_output = dsi_disable_video_output,
- .update = omap_dsi_update,
+ .update = dsi_update,
- .enable_te = omapdss_dsi_enable_te,
+ .enable_te = dsi_enable_te,
- .request_vc = omap_dsi_request_vc,
- .set_vc_id = omap_dsi_set_vc_id,
- .release_vc = omap_dsi_release_vc,
+ .request_vc = dsi_request_vc,
+ .set_vc_id = dsi_set_vc_id,
+ .release_vc = dsi_release_vc,
.dcs_write = dsi_vc_dcs_write,
.dcs_write_nosync = dsi_vc_dcs_write_nosync,
@@ -5627,12 +5437,6 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dsi_init_output(dsidev);
- if (dsidev->dev.platform_data) {
- r = dsi_probe_pdata(dsidev);
- if (r)
- goto err_probe;
- }
-
dsi_runtime_put(dsidev);
if (dsi->module_id == 0)
@@ -5648,9 +5452,6 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
#endif
return 0;
-err_probe:
- dsi_runtime_put(dsidev);
- dsi_uninit_output(dsidev);
err_runtime_get:
pm_runtime_disable(&dsidev->dev);
return r;
@@ -5662,8 +5463,6 @@ static int __exit omap_dsihw_remove(struct platform_device *dsidev)
WARN_ON(dsi->scp_clk_refcount > 0);
- dss_unregister_child_devices(&dsidev->dev);
-
dsi_uninit_output(dsidev);
pm_runtime_disable(&dsidev->dev);
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 50a2362ef8f..e172531d196 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -163,22 +163,11 @@ struct platform_device;
/* core */
struct platform_device *dss_get_core_pdev(void);
-struct bus_type *dss_get_bus(void);
-struct regulator *dss_get_vdds_dsi(void);
-struct regulator *dss_get_vdds_sdi(void);
int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
-struct omap_dss_device *dss_alloc_and_init_device(struct device *parent);
-int dss_add_device(struct omap_dss_device *dssdev);
-void dss_unregister_device(struct omap_dss_device *dssdev);
-void dss_unregister_child_devices(struct device *parent);
-void dss_put_device(struct omap_dss_device *dssdev);
-void dss_copy_device_pdata(struct omap_dss_device *dst,
- const struct omap_dss_device *src);
-
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
@@ -436,44 +425,10 @@ int dispc_wb_setup(const struct omap_dss_writeback_info *wi,
/* VENC */
int venc_init_platform_driver(void) __init;
void venc_uninit_platform_driver(void) __exit;
-int omapdss_venc_display_enable(struct omap_dss_device *dssdev);
-void omapdss_venc_display_disable(struct omap_dss_device *dssdev);
-void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-int omapdss_venc_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-u32 omapdss_venc_get_wss(struct omap_dss_device *dssdev);
-int omapdss_venc_set_wss(struct omap_dss_device *dssdev, u32 wss);
-void omapdss_venc_set_type(struct omap_dss_device *dssdev,
- enum omap_dss_venc_type type);
-void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
- bool invert_polarity);
-int venc_panel_init(void);
-void venc_panel_exit(void);
/* HDMI */
int hdmi_init_platform_driver(void) __init;
void hdmi_uninit_platform_driver(void) __exit;
-int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev);
-void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev);
-int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev);
-void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev);
-void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-int omapdss_hdmi_read_edid(u8 *buf, int len);
-bool omapdss_hdmi_detect(void);
-int hdmi_panel_init(void);
-void hdmi_panel_exit(void);
-#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
-int hdmi_audio_enable(void);
-void hdmi_audio_disable(void);
-int hdmi_audio_start(void);
-void hdmi_audio_stop(void);
-bool hdmi_mode_has_audio(void);
-int hdmi_audio_config(struct omap_dss_audio *audio);
-#endif
/* RFBI */
int rfbi_init_platform_driver(void) __init;
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 44a885b9282..82a96407499 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -66,10 +66,6 @@ static struct {
struct clk *sys_clk;
struct regulator *vdda_hdmi_dac_reg;
- int ct_cp_hpd_gpio;
- int ls_oe_gpio;
- int hpd_gpio;
-
bool core_enabled;
struct omap_dss_device output;
@@ -353,40 +349,6 @@ static int hdmi_init_regulator(void)
return 0;
}
-static int hdmi_init_display(struct omap_dss_device *dssdev)
-{
- int r;
-
- struct gpio gpios[] = {
- { hdmi.ct_cp_hpd_gpio, GPIOF_OUT_INIT_LOW, "hdmi_ct_cp_hpd" },
- { hdmi.ls_oe_gpio, GPIOF_OUT_INIT_LOW, "hdmi_ls_oe" },
- { hdmi.hpd_gpio, GPIOF_DIR_IN, "hdmi_hpd" },
- };
-
- DSSDBG("init_display\n");
-
- dss_init_hdmi_ip_ops(&hdmi.ip_data, omapdss_get_version());
-
- r = hdmi_init_regulator();
- if (r)
- return r;
-
- r = gpio_request_array(gpios, ARRAY_SIZE(gpios));
- if (r)
- return r;
-
- return 0;
-}
-
-static void hdmi_uninit_display(struct omap_dss_device *dssdev)
-{
- DSSDBG("uninit_display\n");
-
- gpio_free(hdmi.ct_cp_hpd_gpio);
- gpio_free(hdmi.ls_oe_gpio);
- gpio_free(hdmi.hpd_gpio);
-}
-
static const struct hdmi_config *hdmi_find_timing(
const struct hdmi_config *timings_arr,
int len)
@@ -517,17 +479,9 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
{
int r;
- if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 1);
- if (gpio_is_valid(hdmi.ls_oe_gpio))
- gpio_set_value(hdmi.ls_oe_gpio, 1);
-
- /* wait 300us after CT_CP_HPD for the 5V power output to reach 90% */
- udelay(300);
-
r = regulator_enable(hdmi.vdda_hdmi_dac_reg);
if (r)
- goto err_vdac_enable;
+ return r;
r = hdmi_runtime_get();
if (r)
@@ -542,11 +496,7 @@ static int hdmi_power_on_core(struct omap_dss_device *dssdev)
err_runtime_get:
regulator_disable(hdmi.vdda_hdmi_dac_reg);
-err_vdac_enable:
- if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- if (gpio_is_valid(hdmi.ls_oe_gpio))
- gpio_set_value(hdmi.ls_oe_gpio, 0);
+
return r;
}
@@ -556,10 +506,6 @@ static void hdmi_power_off_core(struct omap_dss_device *dssdev)
hdmi_runtime_put();
regulator_disable(hdmi.vdda_hdmi_dac_reg);
- if (gpio_is_valid(hdmi.ct_cp_hpd_gpio))
- gpio_set_value(hdmi.ct_cp_hpd_gpio, 0);
- if (gpio_is_valid(hdmi.ls_oe_gpio))
- gpio_set_value(hdmi.ls_oe_gpio, 0);
}
static int hdmi_power_on_full(struct omap_dss_device *dssdev)
@@ -640,7 +586,7 @@ static void hdmi_power_off_full(struct omap_dss_device *dssdev)
hdmi_power_off_core(dssdev);
}
-int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
+static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct hdmi_cm cm;
@@ -654,7 +600,7 @@ int omapdss_hdmi_display_check_timing(struct omap_dss_device *dssdev,
}
-void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
+static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
struct hdmi_cm cm;
@@ -666,15 +612,16 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev,
hdmi.ip_data.cfg.cm = cm;
t = hdmi_get_timings();
- if (t != NULL)
+ if (t != NULL) {
hdmi.ip_data.cfg = *t;
- dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ dispc_set_tv_pclk(t->timings.pixel_clock * 1000);
+ }
mutex_unlock(&hdmi.lock);
}
-static void omapdss_hdmi_display_get_timings(struct omap_dss_device *dssdev,
+static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
const struct hdmi_config *cfg;
@@ -704,7 +651,7 @@ static void hdmi_dump_regs(struct seq_file *s)
mutex_unlock(&hdmi.lock);
}
-int omapdss_hdmi_read_edid(u8 *buf, int len)
+static int read_edid(u8 *buf, int len)
{
int r;
@@ -721,24 +668,7 @@ int omapdss_hdmi_read_edid(u8 *buf, int len)
return r;
}
-bool omapdss_hdmi_detect(void)
-{
- int r;
-
- mutex_lock(&hdmi.lock);
-
- r = hdmi_runtime_get();
- BUG_ON(r);
-
- r = gpio_get_value(hdmi.hpd_gpio);
-
- hdmi_runtime_put();
- mutex_unlock(&hdmi.lock);
-
- return r == 1;
-}
-
-int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
+static int hdmi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &hdmi.output;
int r = 0;
@@ -767,7 +697,7 @@ err0:
return r;
}
-void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi_display_disable(struct omap_dss_device *dssdev)
{
DSSDBG("Enter hdmi_display_disable\n");
@@ -778,7 +708,7 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi.lock);
}
-int omapdss_hdmi_core_enable(struct omap_dss_device *dssdev)
+static int hdmi_core_enable(struct omap_dss_device *dssdev)
{
int r = 0;
@@ -800,7 +730,7 @@ err0:
return r;
}
-void omapdss_hdmi_core_disable(struct omap_dss_device *dssdev)
+static void hdmi_core_disable(struct omap_dss_device *dssdev)
{
DSSDBG("Enter omapdss_hdmi_core_disable\n");
@@ -927,35 +857,7 @@ int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
return 0;
}
-int hdmi_audio_enable(void)
-{
- DSSDBG("audio_enable\n");
-
- return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
-}
-
-void hdmi_audio_disable(void)
-{
- DSSDBG("audio_disable\n");
-
- hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
-}
-
-int hdmi_audio_start(void)
-{
- DSSDBG("audio_start\n");
-
- return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
-}
-
-void hdmi_audio_stop(void)
-{
- DSSDBG("audio_stop\n");
-
- hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
-}
-
-bool hdmi_mode_has_audio(void)
+static bool hdmi_mode_has_audio(void)
{
if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
return true;
@@ -963,92 +865,8 @@ bool hdmi_mode_has_audio(void)
return false;
}
-int hdmi_audio_config(struct omap_dss_audio *audio)
-{
- return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
-}
-
#endif
-static struct omap_dss_device *hdmi_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int hdmi_probe_pdata(struct platform_device *pdev)
-{
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- struct omap_dss_hdmi_data *priv;
- int r;
-
- plat_dssdev = hdmi_find_dssdev(pdev);
-
- if (!plat_dssdev)
- return 0;
-
- dssdev = dss_alloc_and_init_device(&pdev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- priv = dssdev->data;
-
- hdmi.ct_cp_hpd_gpio = priv->ct_cp_hpd_gpio;
- hdmi.ls_oe_gpio = priv->ls_oe_gpio;
- hdmi.hpd_gpio = priv->hpd_gpio;
-
- r = hdmi_init_display(dssdev);
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
- r = omapdss_output_set_device(&hdmi.output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&hdmi.output);
- hdmi_uninit_display(dssdev);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static int hdmi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -1083,9 +901,9 @@ static int hdmi_connect(struct omap_dss_device *dssdev,
static void hdmi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
@@ -1103,21 +921,21 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
need_enable = hdmi.core_enabled == false;
if (need_enable) {
- r = omapdss_hdmi_core_enable(dssdev);
+ r = hdmi_core_enable(dssdev);
if (r)
return r;
}
- r = omapdss_hdmi_read_edid(edid, len);
+ r = read_edid(edid, len);
if (need_enable)
- omapdss_hdmi_core_disable(dssdev);
+ hdmi_core_disable(dssdev);
return r;
}
#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int omapdss_hdmi_audio_enable(struct omap_dss_device *dssdev)
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -1128,7 +946,8 @@ static int omapdss_hdmi_audio_enable(struct omap_dss_device *dssdev)
goto err;
}
- r = hdmi_audio_enable();
+
+ r = hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
if (r)
goto err;
@@ -1140,22 +959,22 @@ err:
return r;
}
-static void omapdss_hdmi_audio_disable(struct omap_dss_device *dssdev)
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
{
- hdmi_audio_disable();
+ hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
}
-static int omapdss_hdmi_audio_start(struct omap_dss_device *dssdev)
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
{
- return hdmi_audio_start();
+ return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
}
-static void omapdss_hdmi_audio_stop(struct omap_dss_device *dssdev)
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
{
- hdmi_audio_stop();
+ hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
}
-static bool omapdss_hdmi_audio_supported(struct omap_dss_device *dssdev)
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
{
bool r;
@@ -1167,7 +986,7 @@ static bool omapdss_hdmi_audio_supported(struct omap_dss_device *dssdev)
return r;
}
-static int omapdss_hdmi_audio_config(struct omap_dss_device *dssdev,
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
struct omap_dss_audio *audio)
{
int r;
@@ -1179,7 +998,7 @@ static int omapdss_hdmi_audio_config(struct omap_dss_device *dssdev,
goto err;
}
- r = hdmi_audio_config(audio);
+ r = hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
if (r)
goto err;
@@ -1191,30 +1010,30 @@ err:
return r;
}
#else
-static int omapdss_hdmi_audio_enable(struct omap_dss_device *dssdev)
+static int hdmi_audio_enable(struct omap_dss_device *dssdev)
{
return -EPERM;
}
-static void omapdss_hdmi_audio_disable(struct omap_dss_device *dssdev)
+static void hdmi_audio_disable(struct omap_dss_device *dssdev)
{
}
-static int omapdss_hdmi_audio_start(struct omap_dss_device *dssdev)
+static int hdmi_audio_start(struct omap_dss_device *dssdev)
{
return -EPERM;
}
-static void omapdss_hdmi_audio_stop(struct omap_dss_device *dssdev)
+static void hdmi_audio_stop(struct omap_dss_device *dssdev)
{
}
-static bool omapdss_hdmi_audio_supported(struct omap_dss_device *dssdev)
+static bool hdmi_audio_supported(struct omap_dss_device *dssdev)
{
return false;
}
-static int omapdss_hdmi_audio_config(struct omap_dss_device *dssdev,
+static int hdmi_audio_config(struct omap_dss_device *dssdev,
struct omap_dss_audio *audio)
{
return -EPERM;
@@ -1225,21 +1044,21 @@ static const struct omapdss_hdmi_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
- .enable = omapdss_hdmi_display_enable,
- .disable = omapdss_hdmi_display_disable,
+ .enable = hdmi_display_enable,
+ .disable = hdmi_display_disable,
- .check_timings = omapdss_hdmi_display_check_timing,
- .set_timings = omapdss_hdmi_display_set_timing,
- .get_timings = omapdss_hdmi_display_get_timings,
+ .check_timings = hdmi_display_check_timing,
+ .set_timings = hdmi_display_set_timing,
+ .get_timings = hdmi_display_get_timings,
.read_edid = hdmi_read_edid,
- .audio_enable = omapdss_hdmi_audio_enable,
- .audio_disable = omapdss_hdmi_audio_disable,
- .audio_start = omapdss_hdmi_audio_start,
- .audio_stop = omapdss_hdmi_audio_stop,
- .audio_supported = omapdss_hdmi_audio_supported,
- .audio_config = omapdss_hdmi_audio_config,
+ .audio_enable = hdmi_audio_enable,
+ .audio_disable = hdmi_audio_disable,
+ .audio_start = hdmi_audio_start,
+ .audio_stop = hdmi_audio_stop,
+ .audio_supported = hdmi_audio_supported,
+ .audio_config = hdmi_audio_config,
};
static void hdmi_init_output(struct platform_device *pdev)
@@ -1301,50 +1120,15 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
hdmi.ip_data.pll_offset = HDMI_PLLCTRL;
hdmi.ip_data.phy_offset = HDMI_PHY;
- hdmi.ct_cp_hpd_gpio = -1;
- hdmi.ls_oe_gpio = -1;
- hdmi.hpd_gpio = -1;
-
hdmi_init_output(pdev);
- r = hdmi_panel_init();
- if (r) {
- DSSERR("can't init panel\n");
- return r;
- }
-
dss_debugfs_create_file("hdmi", hdmi_dump_regs);
- if (pdev->dev.platform_data) {
- r = hdmi_probe_pdata(pdev);
- if (r)
- goto err_probe;
- }
-
- return 0;
-
-err_probe:
- hdmi_panel_exit();
- hdmi_uninit_output(pdev);
- pm_runtime_disable(&pdev->dev);
- return r;
-}
-
-static int __exit hdmi_remove_child(struct device *dev, void *data)
-{
- struct omap_dss_device *dssdev = to_dss_device(dev);
- hdmi_uninit_display(dssdev);
return 0;
}
static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
{
- device_for_each_child(&pdev->dev, NULL, hdmi_remove_child);
-
- dss_unregister_child_devices(&pdev->dev);
-
- hdmi_panel_exit();
-
hdmi_uninit_output(pdev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
deleted file mode 100644
index dfb8eda81b6..00000000000
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * hdmi_panel.c
- *
- * HDMI library support functions for TI OMAP4 processors.
- *
- * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com/
- * Authors: Mythri P k <mythripk@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <video/omapdss.h>
-#include <linux/slab.h>
-
-#include "dss.h"
-
-static struct {
- /* This protects the panel ops, mainly when accessing the HDMI IP. */
- struct mutex lock;
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- /* This protects the audio ops, specifically. */
- spinlock_t audio_lock;
-#endif
-} hdmi;
-
-
-static int hdmi_panel_probe(struct omap_dss_device *dssdev)
-{
- /* Initialize default timings to VGA in DVI mode */
- const struct omap_video_timings default_timings = {
- .x_res = 640,
- .y_res = 480,
- .pixel_clock = 25175,
- .hsw = 96,
- .hfp = 16,
- .hbp = 48,
- .vsw = 2,
- .vfp = 11,
- .vbp = 31,
-
- .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
- .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
-
- .interlace = false,
- };
-
- DSSDBG("ENTER hdmi_panel_probe\n");
-
- dssdev->panel.timings = default_timings;
-
- DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n",
- dssdev->panel.timings.x_res,
- dssdev->panel.timings.y_res);
-
- omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings);
-
- return 0;
-}
-
-static void hdmi_panel_remove(struct omap_dss_device *dssdev)
-{
-
-}
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
-static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
-{
- unsigned long flags;
- int r;
-
- mutex_lock(&hdmi.lock);
- spin_lock_irqsave(&hdmi.audio_lock, flags);
-
- /* enable audio only if the display is active and supports audio */
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
- !hdmi_mode_has_audio()) {
- DSSERR("audio not supported or display is off\n");
- r = -EPERM;
- goto err;
- }
-
- r = hdmi_audio_enable();
-
- if (!r)
- dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
-
-err:
- spin_unlock_irqrestore(&hdmi.audio_lock, flags);
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hdmi.audio_lock, flags);
-
- hdmi_audio_disable();
-
- dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
-
- spin_unlock_irqrestore(&hdmi.audio_lock, flags);
-}
-
-static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
-{
- unsigned long flags;
- int r;
-
- spin_lock_irqsave(&hdmi.audio_lock, flags);
- /*
- * No need to check the panel state. It was checked when trasitioning
- * to AUDIO_ENABLED.
- */
- if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
- DSSERR("audio start from invalid state\n");
- r = -EPERM;
- goto err;
- }
-
- r = hdmi_audio_start();
-
- if (!r)
- dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
-
-err:
- spin_unlock_irqrestore(&hdmi.audio_lock, flags);
- return r;
-}
-
-static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hdmi.audio_lock, flags);
-
- hdmi_audio_stop();
- dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
-
- spin_unlock_irqrestore(&hdmi.audio_lock, flags);
-}
-
-static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
-{
- bool r = false;
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- goto err;
-
- if (!hdmi_mode_has_audio())
- goto err;
-
- r = true;
-err:
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- unsigned long flags;
- int r;
-
- mutex_lock(&hdmi.lock);
- spin_lock_irqsave(&hdmi.audio_lock, flags);
-
- /* config audio only if the display is active and supports audio */
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
- !hdmi_mode_has_audio()) {
- DSSERR("audio not supported or display is off\n");
- r = -EPERM;
- goto err;
- }
-
- r = hdmi_audio_config(audio);
-
- if (!r)
- dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
-
-err:
- spin_unlock_irqrestore(&hdmi.audio_lock, flags);
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-#else
-static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
-{
-}
-
-static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
-{
- return -EPERM;
-}
-
-static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
-{
-}
-
-static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
-{
- return false;
-}
-
-static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
- struct omap_dss_audio *audio)
-{
- return -EPERM;
-}
-#endif
-
-static int hdmi_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
- DSSDBG("ENTER hdmi_panel_enable\n");
-
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- r = -EINVAL;
- goto err;
- }
-
- omapdss_hdmi_display_set_timing(dssdev, &dssdev->panel.timings);
-
- r = omapdss_hdmi_display_enable(dssdev);
- if (r) {
- DSSERR("failed to power on\n");
- goto err;
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static void hdmi_panel_disable(struct omap_dss_device *dssdev)
-{
- mutex_lock(&hdmi.lock);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- /*
- * TODO: notify audio users that the display was disabled. For
- * now, disable audio locally to not break our audio state
- * machine.
- */
- hdmi_panel_audio_disable(dssdev);
- omapdss_hdmi_display_disable(dssdev);
- }
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- mutex_unlock(&hdmi.lock);
-}
-
-static void hdmi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- mutex_lock(&hdmi.lock);
-
- *timings = dssdev->panel.timings;
-
- mutex_unlock(&hdmi.lock);
-}
-
-static void hdmi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- DSSDBG("hdmi_set_timings\n");
-
- mutex_lock(&hdmi.lock);
-
- /*
- * TODO: notify audio users that there was a timings change. For
- * now, disable audio locally to not break our audio state machine.
- */
- hdmi_panel_audio_disable(dssdev);
-
- omapdss_hdmi_display_set_timing(dssdev, timings);
- dssdev->panel.timings = *timings;
-
- mutex_unlock(&hdmi.lock);
-}
-
-static int hdmi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- int r = 0;
-
- DSSDBG("hdmi_check_timings\n");
-
- mutex_lock(&hdmi.lock);
-
- r = omapdss_hdmi_display_check_timing(dssdev, timings);
-
- mutex_unlock(&hdmi.lock);
- return r;
-}
-
-static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
-{
- int r;
- bool need_enable;
-
- mutex_lock(&hdmi.lock);
-
- need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
-
- if (need_enable) {
- r = omapdss_hdmi_core_enable(dssdev);
- if (r)
- goto err;
- }
-
- r = omapdss_hdmi_read_edid(buf, len);
-
- if (need_enable)
- omapdss_hdmi_core_disable(dssdev);
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static bool hdmi_detect(struct omap_dss_device *dssdev)
-{
- int r;
- bool need_enable;
-
- mutex_lock(&hdmi.lock);
-
- need_enable = dssdev->state == OMAP_DSS_DISPLAY_DISABLED;
-
- if (need_enable) {
- r = omapdss_hdmi_core_enable(dssdev);
- if (r)
- goto err;
- }
-
- r = omapdss_hdmi_detect();
-
- if (need_enable)
- omapdss_hdmi_core_disable(dssdev);
-err:
- mutex_unlock(&hdmi.lock);
-
- return r;
-}
-
-static struct omap_dss_driver hdmi_driver = {
- .probe = hdmi_panel_probe,
- .remove = hdmi_panel_remove,
- .enable = hdmi_panel_enable,
- .disable = hdmi_panel_disable,
- .get_timings = hdmi_get_timings,
- .set_timings = hdmi_set_timings,
- .check_timings = hdmi_check_timings,
- .read_edid = hdmi_read_edid,
- .detect = hdmi_detect,
- .audio_enable = hdmi_panel_audio_enable,
- .audio_disable = hdmi_panel_audio_disable,
- .audio_start = hdmi_panel_audio_start,
- .audio_stop = hdmi_panel_audio_stop,
- .audio_supported = hdmi_panel_audio_supported,
- .audio_config = hdmi_panel_audio_config,
- .driver = {
- .name = "hdmi_panel",
- .owner = THIS_MODULE,
- },
-};
-
-int hdmi_panel_init(void)
-{
- mutex_init(&hdmi.lock);
-
-#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
- spin_lock_init(&hdmi.audio_lock);
-#endif
-
- return omap_dss_register_driver(&hdmi_driver);
-}
-
-void hdmi_panel_exit(void)
-{
- omap_dss_unregister_driver(&hdmi_driver);
-
-}
diff --git a/drivers/video/omap2/dss/manager-sysfs.c b/drivers/video/omap2/dss/manager-sysfs.c
index de7e7b5b1b7..37b59fe28dc 100644
--- a/drivers/video/omap2/dss/manager-sysfs.c
+++ b/drivers/video/omap2/dss/manager-sysfs.c
@@ -285,9 +285,10 @@ static ssize_t manager_alpha_blending_enabled_show(
{
struct omap_overlay_manager_info info;
- mgr->get_manager_info(mgr, &info);
+ if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ return -ENODEV;
- WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
+ mgr->get_manager_info(mgr, &info);
return snprintf(buf, PAGE_SIZE, "%d\n",
info.partial_alpha_enabled);
@@ -301,7 +302,8 @@ static ssize_t manager_alpha_blending_enabled_store(
bool enable;
int r;
- WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
+ if(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
+ return -ENODEV;
r = strtobool(buf, &enable);
if (r)
diff --git a/drivers/video/omap2/dss/output.c b/drivers/video/omap2/dss/output.c
index 3f5c0a758b3..2ab3afa615e 100644
--- a/drivers/video/omap2/dss/output.c
+++ b/drivers/video/omap2/dss/output.c
@@ -34,9 +34,9 @@ int omapdss_output_set_device(struct omap_dss_device *out,
mutex_lock(&output_lock);
- if (out->device) {
+ if (out->dst) {
DSSERR("output already has device %s connected to it\n",
- out->device->name);
+ out->dst->name);
r = -EINVAL;
goto err;
}
@@ -47,8 +47,8 @@ int omapdss_output_set_device(struct omap_dss_device *out,
goto err;
}
- out->device = dssdev;
- dssdev->output = out;
+ out->dst = dssdev;
+ dssdev->src = out;
mutex_unlock(&output_lock);
@@ -66,21 +66,21 @@ int omapdss_output_unset_device(struct omap_dss_device *out)
mutex_lock(&output_lock);
- if (!out->device) {
+ if (!out->dst) {
DSSERR("output doesn't have a device connected to it\n");
r = -EINVAL;
goto err;
}
- if (out->device->state != OMAP_DSS_DISPLAY_DISABLED) {
+ if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) {
DSSERR("device %s is not disabled, cannot unset device\n",
- out->device->name);
+ out->dst->name);
r = -EINVAL;
goto err;
}
- out->device->output = NULL;
- out->device = NULL;
+ out->dst->src = NULL;
+ out->dst = NULL;
mutex_unlock(&output_lock);
@@ -146,8 +146,8 @@ EXPORT_SYMBOL(omap_dss_find_output_by_node);
struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
{
- while (dssdev->output)
- dssdev = dssdev->output;
+ while (dssdev->src)
+ dssdev = dssdev->src;
if (dssdev->id != 0)
return omap_dss_get_device(dssdev);
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index fdfe6e6f25d..c8a81a2b879 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -151,19 +151,17 @@ static void rfbi_runtime_put(void)
WARN_ON(r < 0 && r != -ENOSYS);
}
-void rfbi_bus_lock(void)
+static void rfbi_bus_lock(void)
{
down(&rfbi.bus_lock);
}
-EXPORT_SYMBOL(rfbi_bus_lock);
-void rfbi_bus_unlock(void)
+static void rfbi_bus_unlock(void)
{
up(&rfbi.bus_lock);
}
-EXPORT_SYMBOL(rfbi_bus_unlock);
-void omap_rfbi_write_command(const void *buf, u32 len)
+static void rfbi_write_command(const void *buf, u32 len)
{
switch (rfbi.parallelmode) {
case OMAP_DSS_RFBI_PARALLELMODE_8:
@@ -189,9 +187,8 @@ void omap_rfbi_write_command(const void *buf, u32 len)
BUG();
}
}
-EXPORT_SYMBOL(omap_rfbi_write_command);
-void omap_rfbi_read_data(void *buf, u32 len)
+static void rfbi_read_data(void *buf, u32 len)
{
switch (rfbi.parallelmode) {
case OMAP_DSS_RFBI_PARALLELMODE_8:
@@ -221,9 +218,8 @@ void omap_rfbi_read_data(void *buf, u32 len)
BUG();
}
}
-EXPORT_SYMBOL(omap_rfbi_read_data);
-void omap_rfbi_write_data(const void *buf, u32 len)
+static void rfbi_write_data(const void *buf, u32 len)
{
switch (rfbi.parallelmode) {
case OMAP_DSS_RFBI_PARALLELMODE_8:
@@ -250,9 +246,8 @@ void omap_rfbi_write_data(const void *buf, u32 len)
}
}
-EXPORT_SYMBOL(omap_rfbi_write_data);
-void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
+static void rfbi_write_pixels(const void __iomem *buf, int scr_width,
u16 x, u16 y,
u16 w, u16 h)
{
@@ -305,7 +300,6 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
BUG();
}
}
-EXPORT_SYMBOL(omap_rfbi_write_pixels);
static int rfbi_transfer_area(struct omap_dss_device *dssdev,
void (*callback)(void *data), void *data)
@@ -574,7 +568,7 @@ static int rfbi_convert_timings(struct rfbi_timings *t)
}
/* xxx FIX module selection missing */
-int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
+static int rfbi_setup_te(enum omap_rfbi_te_mode mode,
unsigned hs_pulse_time, unsigned vs_pulse_time,
int hs_pol_inv, int vs_pol_inv, int extif_div)
{
@@ -613,10 +607,9 @@ int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
return 0;
}
-EXPORT_SYMBOL(omap_rfbi_setup_te);
/* xxx FIX module selection missing */
-int omap_rfbi_enable_te(bool enable, unsigned line)
+static int rfbi_enable_te(bool enable, unsigned line)
{
u32 l;
@@ -636,9 +629,8 @@ int omap_rfbi_enable_te(bool enable, unsigned line)
return 0;
}
-EXPORT_SYMBOL(omap_rfbi_enable_te);
-static int rfbi_configure(int rfbi_module, int bpp, int lines)
+static int rfbi_configure_bus(int rfbi_module, int bpp, int lines)
{
u32 l;
int cycle1 = 0, cycle2 = 0, cycle3 = 0;
@@ -770,45 +762,39 @@ static int rfbi_configure(int rfbi_module, int bpp, int lines)
return 0;
}
-int omap_rfbi_configure(struct omap_dss_device *dssdev)
+static int rfbi_configure(struct omap_dss_device *dssdev)
{
- return rfbi_configure(dssdev->phy.rfbi.channel, rfbi.pixel_size,
+ return rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
rfbi.data_lines);
}
-EXPORT_SYMBOL(omap_rfbi_configure);
-int omap_rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
+static int rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
void *data)
{
return rfbi_transfer_area(dssdev, callback, data);
}
-EXPORT_SYMBOL(omap_rfbi_update);
-void omapdss_rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
+static void rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h)
{
rfbi.timings.x_res = w;
rfbi.timings.y_res = h;
}
-EXPORT_SYMBOL(omapdss_rfbi_set_size);
-void omapdss_rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
+static void rfbi_set_pixel_size(struct omap_dss_device *dssdev, int pixel_size)
{
rfbi.pixel_size = pixel_size;
}
-EXPORT_SYMBOL(omapdss_rfbi_set_pixel_size);
-void omapdss_rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
+static void rfbi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
{
rfbi.data_lines = data_lines;
}
-EXPORT_SYMBOL(omapdss_rfbi_set_data_lines);
-void omapdss_rfbi_set_interface_timings(struct omap_dss_device *dssdev,
+static void rfbi_set_interface_timings(struct omap_dss_device *dssdev,
struct rfbi_timings *timings)
{
rfbi.intf_timings = *timings;
}
-EXPORT_SYMBOL(omapdss_rfbi_set_interface_timings);
static void rfbi_dump_regs(struct seq_file *s)
{
@@ -888,7 +874,7 @@ static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_timings(mgr, &rfbi.timings);
}
-int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
+static int rfbi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &rfbi.output;
int r;
@@ -911,7 +897,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
rfbi_config_lcd_manager(dssdev);
- rfbi_configure(dssdev->phy.rfbi.channel, rfbi.pixel_size,
+ rfbi_configure_bus(dssdev->phy.rfbi.channel, rfbi.pixel_size,
rfbi.data_lines);
rfbi_set_timings(dssdev->phy.rfbi.channel, &rfbi.intf_timings);
@@ -921,9 +907,8 @@ err1:
rfbi_runtime_put();
return r;
}
-EXPORT_SYMBOL(omapdss_rfbi_display_enable);
-void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
+static void rfbi_display_disable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &rfbi.output;
@@ -932,7 +917,6 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
rfbi_runtime_put();
}
-EXPORT_SYMBOL(omapdss_rfbi_display_disable);
static int rfbi_init_display(struct omap_dss_device *dssdev)
{
@@ -940,77 +924,6 @@ static int rfbi_init_display(struct omap_dss_device *dssdev)
return 0;
}
-static struct omap_dss_device *rfbi_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int rfbi_probe_pdata(struct platform_device *rfbidev)
-{
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- int r;
-
- plat_dssdev = rfbi_find_dssdev(rfbidev);
-
- if (!plat_dssdev)
- return 0;
-
- dssdev = dss_alloc_and_init_device(&rfbidev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- r = rfbi_init_display(dssdev);
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
- r = omapdss_output_set_device(&rfbi.output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&rfbi.output);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static void rfbi_init_output(struct platform_device *pdev)
{
struct omap_dss_device *out = &rfbi.output;
@@ -1085,16 +998,8 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
rfbi_init_output(pdev);
- if (pdev->dev.platform_data) {
- r = rfbi_probe_pdata(pdev);
- if (r)
- goto err_probe;
- }
-
return 0;
-err_probe:
- rfbi_uninit_output(pdev);
err_runtime_get:
pm_runtime_disable(&pdev->dev);
return r;
@@ -1102,8 +1007,6 @@ err_runtime_get:
static int __exit omap_rfbihw_remove(struct platform_device *pdev)
{
- dss_unregister_child_devices(&pdev->dev);
-
rfbi_uninit_output(pdev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 856af2e8976..ccc569ae7cc 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -124,7 +124,7 @@ static void sdi_config_lcd_manager(struct omap_dss_device *dssdev)
dss_mgr_set_lcd_config(mgr, &sdi.mgr_config);
}
-int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
+static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &sdi.output;
struct omap_video_timings *t = &sdi.timings;
@@ -211,9 +211,8 @@ err_get_dispc:
err_reg_enable:
return r;
}
-EXPORT_SYMBOL(omapdss_sdi_display_enable);
-void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_display_disable(struct omap_dss_device *dssdev)
{
struct omap_overlay_manager *mgr = sdi.output.manager;
@@ -225,14 +224,12 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
regulator_disable(sdi.vdds_sdi_reg);
}
-EXPORT_SYMBOL(omapdss_sdi_display_disable);
-void omapdss_sdi_set_timings(struct omap_dss_device *dssdev,
+static void sdi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
sdi.timings = *timings;
}
-EXPORT_SYMBOL(omapdss_sdi_set_timings);
static void sdi_get_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
@@ -254,11 +251,10 @@ static int sdi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
+static void sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs)
{
sdi.datapairs = datapairs;
}
-EXPORT_SYMBOL(omapdss_sdi_set_datapairs);
static int sdi_init_regulator(void)
{
@@ -267,14 +263,10 @@ static int sdi_init_regulator(void)
if (sdi.vdds_sdi_reg)
return 0;
- vdds_sdi = dss_get_vdds_sdi();
-
+ vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
if (IS_ERR(vdds_sdi)) {
- vdds_sdi = devm_regulator_get(&sdi.pdev->dev, "vdds_sdi");
- if (IS_ERR(vdds_sdi)) {
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(vdds_sdi);
- }
+ DSSERR("can't get VDDS_SDI regulator\n");
+ return PTR_ERR(vdds_sdi);
}
sdi.vdds_sdi_reg = vdds_sdi;
@@ -282,77 +274,6 @@ static int sdi_init_regulator(void)
return 0;
}
-static struct omap_dss_device *sdi_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int sdi_probe_pdata(struct platform_device *sdidev)
-{
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- int r;
-
- plat_dssdev = sdi_find_dssdev(sdidev);
-
- if (!plat_dssdev)
- return 0;
-
- dssdev = dss_alloc_and_init_device(&sdidev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- r = sdi_init_regulator();
- if (r) {
- DSSERR("device %s init failed: %d\n", dssdev->name, r);
- dss_put_device(dssdev);
- return r;
- }
-
- r = omapdss_output_set_device(&sdi.output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&sdi.output);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static int sdi_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -385,9 +306,9 @@ static int sdi_connect(struct omap_dss_device *dssdev,
static void sdi_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
@@ -400,14 +321,14 @@ static const struct omapdss_sdi_ops sdi_ops = {
.connect = sdi_connect,
.disconnect = sdi_disconnect,
- .enable = omapdss_sdi_display_enable,
- .disable = omapdss_sdi_display_disable,
+ .enable = sdi_display_enable,
+ .disable = sdi_display_disable,
.check_timings = sdi_check_timings,
- .set_timings = omapdss_sdi_set_timings,
+ .set_timings = sdi_set_timings,
.get_timings = sdi_get_timings,
- .set_datapairs = omapdss_sdi_set_datapairs,
+ .set_datapairs = sdi_set_datapairs,
};
static void sdi_init_output(struct platform_device *pdev)
@@ -434,29 +355,15 @@ static void __exit sdi_uninit_output(struct platform_device *pdev)
static int omap_sdi_probe(struct platform_device *pdev)
{
- int r;
-
sdi.pdev = pdev;
sdi_init_output(pdev);
- if (pdev->dev.platform_data) {
- r = sdi_probe_pdata(pdev);
- if (r)
- goto err_probe;
- }
-
return 0;
-
-err_probe:
- sdi_uninit_output(pdev);
- return r;
}
static int __exit omap_sdi_remove(struct platform_device *pdev)
{
- dss_unregister_child_devices(&pdev->dev);
-
sdi_uninit_output(pdev);
return 0;
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index e242ed85cb0..3dfe00956a4 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -779,16 +779,14 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
struct omap_video_timings video_timing;
struct hdmi_video_format video_format;
/* HDMI core */
- struct hdmi_core_infoframe_avi avi_cfg = ip_data->avi_cfg;
+ struct hdmi_core_infoframe_avi *avi_cfg = &ip_data->avi_cfg;
struct hdmi_core_video_config v_core_cfg;
struct hdmi_core_packet_enable_repeat repeat_cfg;
struct hdmi_config *cfg = &ip_data->cfg;
hdmi_wp_init(&video_timing, &video_format);
- hdmi_core_init(&v_core_cfg,
- &avi_cfg,
- &repeat_cfg);
+ hdmi_core_init(&v_core_cfg, avi_cfg, &repeat_cfg);
hdmi_wp_video_init_format(&video_format, &video_timing, cfg);
@@ -822,24 +820,24 @@ void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data)
* configure packet
* info frame video see doc CEA861-D page 65
*/
- avi_cfg.db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
- avi_cfg.db1_active_info =
- HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
- avi_cfg.db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
- avi_cfg.db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
- avi_cfg.db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
- avi_cfg.db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
- avi_cfg.db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
- avi_cfg.db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
- avi_cfg.db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
- avi_cfg.db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
- avi_cfg.db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
- avi_cfg.db4_videocode = cfg->cm.code;
- avi_cfg.db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
- avi_cfg.db6_7_line_eoftop = 0;
- avi_cfg.db8_9_line_sofbottom = 0;
- avi_cfg.db10_11_pixel_eofleft = 0;
- avi_cfg.db12_13_pixel_sofright = 0;
+ avi_cfg->db1_format = HDMI_INFOFRAME_AVI_DB1Y_RGB;
+ avi_cfg->db1_active_info =
+ HDMI_INFOFRAME_AVI_DB1A_ACTIVE_FORMAT_OFF;
+ avi_cfg->db1_bar_info_dv = HDMI_INFOFRAME_AVI_DB1B_NO;
+ avi_cfg->db1_scan_info = HDMI_INFOFRAME_AVI_DB1S_0;
+ avi_cfg->db2_colorimetry = HDMI_INFOFRAME_AVI_DB2C_NO;
+ avi_cfg->db2_aspect_ratio = HDMI_INFOFRAME_AVI_DB2M_NO;
+ avi_cfg->db2_active_fmt_ar = HDMI_INFOFRAME_AVI_DB2R_SAME;
+ avi_cfg->db3_itc = HDMI_INFOFRAME_AVI_DB3ITC_NO;
+ avi_cfg->db3_ec = HDMI_INFOFRAME_AVI_DB3EC_XVYUV601;
+ avi_cfg->db3_q_range = HDMI_INFOFRAME_AVI_DB3Q_DEFAULT;
+ avi_cfg->db3_nup_scaling = HDMI_INFOFRAME_AVI_DB3SC_NO;
+ avi_cfg->db4_videocode = cfg->cm.code;
+ avi_cfg->db5_pixel_repeat = HDMI_INFOFRAME_AVI_DB5PR_NO;
+ avi_cfg->db6_7_line_eoftop = 0;
+ avi_cfg->db8_9_line_sofbottom = 0;
+ avi_cfg->db10_11_pixel_eofleft = 0;
+ avi_cfg->db12_13_pixel_sofright = 0;
hdmi_core_aux_infoframe_avi_config(ip_data);
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 496a106fe82..5f88ac47b7f 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -492,7 +492,7 @@ static void venc_power_off(struct omap_dss_device *dssdev)
venc_runtime_put();
}
-int omapdss_venc_display_enable(struct omap_dss_device *dssdev)
+static int venc_display_enable(struct omap_dss_device *dssdev)
{
struct omap_dss_device *out = &venc.output;
int r;
@@ -521,7 +521,7 @@ err0:
return r;
}
-void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
+static void venc_display_disable(struct omap_dss_device *dssdev)
{
DSSDBG("venc_display_disable\n");
@@ -532,7 +532,7 @@ void omapdss_venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc.venc_lock);
}
-void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
+static void venc_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
DSSDBG("venc_set_timings\n");
@@ -550,7 +550,7 @@ void omapdss_venc_set_timings(struct omap_dss_device *dssdev,
mutex_unlock(&venc.venc_lock);
}
-int omapdss_venc_check_timings(struct omap_dss_device *dssdev,
+static int venc_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
DSSDBG("venc_check_timings\n");
@@ -574,13 +574,13 @@ static void venc_get_timings(struct omap_dss_device *dssdev,
mutex_unlock(&venc.venc_lock);
}
-u32 omapdss_venc_get_wss(struct omap_dss_device *dssdev)
+static u32 venc_get_wss(struct omap_dss_device *dssdev)
{
/* Invert due to VENC_L21_WC_CTL:INV=1 */
return (venc.wss_data >> 8) ^ 0xfffff;
}
-int omapdss_venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
+static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
{
const struct venc_config *config;
int r;
@@ -609,7 +609,7 @@ err:
return r;
}
-void omapdss_venc_set_type(struct omap_dss_device *dssdev,
+static void venc_set_type(struct omap_dss_device *dssdev,
enum omap_dss_venc_type type)
{
mutex_lock(&venc.venc_lock);
@@ -619,7 +619,7 @@ void omapdss_venc_set_type(struct omap_dss_device *dssdev,
mutex_unlock(&venc.venc_lock);
}
-void omapdss_venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
+static void venc_invert_vid_out_polarity(struct omap_dss_device *dssdev,
bool invert_polarity)
{
mutex_lock(&venc.venc_lock);
@@ -721,74 +721,6 @@ static int venc_get_clocks(struct platform_device *pdev)
return 0;
}
-static struct omap_dss_device *venc_find_dssdev(struct platform_device *pdev)
-{
- struct omap_dss_board_info *pdata = pdev->dev.platform_data;
- const char *def_disp_name = omapdss_get_default_display_name();
- struct omap_dss_device *def_dssdev;
- int i;
-
- def_dssdev = NULL;
-
- for (i = 0; i < pdata->num_devices; ++i) {
- struct omap_dss_device *dssdev = pdata->devices[i];
-
- if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
- continue;
-
- if (def_dssdev == NULL)
- def_dssdev = dssdev;
-
- if (def_disp_name != NULL &&
- strcmp(dssdev->name, def_disp_name) == 0) {
- def_dssdev = dssdev;
- break;
- }
- }
-
- return def_dssdev;
-}
-
-static int venc_probe_pdata(struct platform_device *vencdev)
-{
- struct omap_dss_device *plat_dssdev;
- struct omap_dss_device *dssdev;
- int r;
-
- plat_dssdev = venc_find_dssdev(vencdev);
-
- if (!plat_dssdev)
- return 0;
-
- r = venc_init_regulator();
- if (r)
- return r;
-
- dssdev = dss_alloc_and_init_device(&vencdev->dev);
- if (!dssdev)
- return -ENOMEM;
-
- dss_copy_device_pdata(dssdev, plat_dssdev);
-
- r = omapdss_output_set_device(&venc.output, dssdev);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_put_device(dssdev);
- return r;
- }
-
- r = dss_add_device(dssdev);
- if (r) {
- DSSERR("device %s register failed: %d\n", dssdev->name, r);
- omapdss_output_unset_device(&venc.output);
- dss_put_device(dssdev);
- return r;
- }
-
- return 0;
-}
-
static int venc_connect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
@@ -821,9 +753,9 @@ static int venc_connect(struct omap_dss_device *dssdev,
static void venc_disconnect(struct omap_dss_device *dssdev,
struct omap_dss_device *dst)
{
- WARN_ON(dst != dssdev->device);
+ WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->device)
+ if (dst != dssdev->dst)
return;
omapdss_output_unset_device(dssdev);
@@ -836,18 +768,18 @@ static const struct omapdss_atv_ops venc_ops = {
.connect = venc_connect,
.disconnect = venc_disconnect,
- .enable = omapdss_venc_display_enable,
- .disable = omapdss_venc_display_disable,
+ .enable = venc_display_enable,
+ .disable = venc_display_disable,
- .check_timings = omapdss_venc_check_timings,
- .set_timings = omapdss_venc_set_timings,
+ .check_timings = venc_check_timings,
+ .set_timings = venc_set_timings,
.get_timings = venc_get_timings,
- .set_type = omapdss_venc_set_type,
- .invert_vid_out_polarity = omapdss_venc_invert_vid_out_polarity,
+ .set_type = venc_set_type,
+ .invert_vid_out_polarity = venc_invert_vid_out_polarity,
- .set_wss = omapdss_venc_set_wss,
- .get_wss = omapdss_venc_get_wss,
+ .set_wss = venc_set_wss,
+ .get_wss = venc_get_wss,
};
static void venc_init_output(struct platform_device *pdev)
@@ -913,26 +845,12 @@ static int omap_venchw_probe(struct platform_device *pdev)
venc_runtime_put();
- r = venc_panel_init();
- if (r)
- goto err_panel_init;
-
dss_debugfs_create_file("venc", venc_dump_regs);
venc_init_output(pdev);
- if (pdev->dev.platform_data) {
- r = venc_probe_pdata(pdev);
- if (r)
- goto err_probe;
- }
-
return 0;
-err_probe:
- venc_panel_exit();
- venc_uninit_output(pdev);
-err_panel_init:
err_runtime_get:
pm_runtime_disable(&pdev->dev);
return r;
@@ -940,10 +858,6 @@ err_runtime_get:
static int __exit omap_venchw_remove(struct platform_device *pdev)
{
- dss_unregister_child_devices(&pdev->dev);
-
- venc_panel_exit();
-
venc_uninit_output(pdev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/video/output.c b/drivers/video/output.c
index 6285b971845..1446c49fe6a 100644
--- a/drivers/video/output.c
+++ b/drivers/video/output.c
@@ -32,8 +32,8 @@ MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>");
-static ssize_t video_output_show_state(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
ssize_t ret_size = 0;
struct output_device *od = to_output_device(dev);
@@ -42,9 +42,8 @@ static ssize_t video_output_show_state(struct device *dev,
return ret_size;
}
-static ssize_t video_output_store_state(struct device *dev,
- struct device_attribute *attr,
- const char *buf,size_t count)
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf,size_t count)
{
char *endp;
struct output_device *od = to_output_device(dev);
@@ -62,6 +61,7 @@ static ssize_t video_output_store_state(struct device *dev,
}
return count;
}
+static DEVICE_ATTR_RW(state);
static void video_output_release(struct device *dev)
{
@@ -69,16 +69,16 @@ static void video_output_release(struct device *dev)
kfree(od);
}
-static struct device_attribute video_output_attributes[] = {
- __ATTR(state, 0644, video_output_show_state, video_output_store_state),
- __ATTR_NULL,
+static struct attribute *video_output_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
};
-
+ATTRIBUTE_GROUPS(video_output);
static struct class video_output_class = {
.name = "video_output",
.dev_release = video_output_release,
- .dev_attrs = video_output_attributes,
+ .dev_groups = video_output_groups,
};
struct output_device *video_output_register(const char *name,
diff --git a/drivers/video/sgivwfb.c b/drivers/video/sgivwfb.c
index b2a8912f643..a9ac3ce2d0e 100644
--- a/drivers/video/sgivwfb.c
+++ b/drivers/video/sgivwfb.c
@@ -713,7 +713,7 @@ static int sgivwfb_mmap(struct fb_info *info,
r = vm_iomap_memory(vma, sgivwfb_mem_phys, sgivwfb_mem_size);
printk(KERN_DEBUG "sgivwfb: mmap framebuffer P(%lx)->V(%lx)\n",
- offset, vma->vm_start);
+ sgivwfb_mem_phys + (vma->vm_pgoff << PAGE_SHIFT), vma->vm_start);
return r;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index a8c6c43a465..1265b25f9f9 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -567,7 +567,7 @@ static int sh7760fb_remove(struct platform_device *dev)
fb_dealloc_cmap(&info->cmap);
sh7760fb_free_mem(info);
if (par->irq >= 0)
- free_irq(par->irq, par);
+ free_irq(par->irq, &par->vsync);
iounmap(par->base);
release_mem_region(par->ioarea->start, resource_size(par->ioarea));
framebuffer_release(info);
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
index e2e9e3e61b7..8d781061305 100644
--- a/drivers/video/simplefb.c
+++ b/drivers/video/simplefb.c
@@ -24,6 +24,7 @@
#include <linux/fb.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
static struct fb_fix_screeninfo simplefb_fix = {
@@ -73,18 +74,7 @@ static struct fb_ops simplefb_ops = {
.fb_imageblit = cfb_imageblit,
};
-struct simplefb_format {
- const char *name;
- u32 bits_per_pixel;
- struct fb_bitfield red;
- struct fb_bitfield green;
- struct fb_bitfield blue;
- struct fb_bitfield transp;
-};
-
-static struct simplefb_format simplefb_formats[] = {
- { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
-};
+static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS;
struct simplefb_params {
u32 width;
@@ -139,6 +129,33 @@ static int simplefb_parse_dt(struct platform_device *pdev,
return 0;
}
+static int simplefb_parse_pd(struct platform_device *pdev,
+ struct simplefb_params *params)
+{
+ struct simplefb_platform_data *pd = pdev->dev.platform_data;
+ int i;
+
+ params->width = pd->width;
+ params->height = pd->height;
+ params->stride = pd->stride;
+
+ params->format = NULL;
+ for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) {
+ if (strcmp(pd->format, simplefb_formats[i].name))
+ continue;
+
+ params->format = &simplefb_formats[i];
+ break;
+ }
+
+ if (!params->format) {
+ dev_err(&pdev->dev, "Invalid format value\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int simplefb_probe(struct platform_device *pdev)
{
int ret;
@@ -149,7 +166,12 @@ static int simplefb_probe(struct platform_device *pdev)
if (fb_get_options("simplefb", NULL))
return -ENODEV;
- ret = simplefb_parse_dt(pdev, &params);
+ ret = -ENODEV;
+ if (pdev->dev.platform_data)
+ ret = simplefb_parse_pd(pdev, &params);
+ else if (pdev->dev.of_node)
+ ret = simplefb_parse_dt(pdev, &params);
+
if (ret)
return ret;
@@ -180,8 +202,16 @@ static int simplefb_probe(struct platform_device *pdev)
info->var.blue = params.format->blue;
info->var.transp = params.format->transp;
+ info->apertures = alloc_apertures(1);
+ if (!info->apertures) {
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
+ info->apertures->ranges[0].base = info->fix.smem_start;
+ info->apertures->ranges[0].size = info->fix.smem_len;
+
info->fbops = &simplefb_ops;
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_MISC_FIRMWARE;
info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
info->fix.smem_len);
if (!info->screen_base) {
diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
index 501b3406c6d..bd83233ec22 100644
--- a/drivers/video/vesafb.c
+++ b/drivers/video/vesafb.c
@@ -29,7 +29,7 @@
/* --------------------------------------------------------------------- */
-static struct fb_var_screeninfo vesafb_defined __initdata = {
+static struct fb_var_screeninfo vesafb_defined = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
.width = -1,
@@ -40,7 +40,7 @@ static struct fb_var_screeninfo vesafb_defined __initdata = {
.vmode = FB_VMODE_NONINTERLACED,
};
-static struct fb_fix_screeninfo vesafb_fix __initdata = {
+static struct fb_fix_screeninfo vesafb_fix = {
.id = "VESA VGA",
.type = FB_TYPE_PACKED_PIXELS,
.accel = FB_ACCEL_NONE,
@@ -48,8 +48,8 @@ static struct fb_fix_screeninfo vesafb_fix __initdata = {
static int inverse __read_mostly;
static int mtrr __read_mostly; /* disable mtrr */
-static int vram_remap __initdata; /* Set amount of memory to be used */
-static int vram_total __initdata; /* Set total amount of memory */
+static int vram_remap; /* Set amount of memory to be used */
+static int vram_total; /* Set total amount of memory */
static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
static void (*pmi_start)(void) __read_mostly;
@@ -192,7 +192,7 @@ static struct fb_ops vesafb_ops = {
.fb_imageblit = cfb_imageblit,
};
-static int __init vesafb_setup(char *options)
+static int vesafb_setup(char *options)
{
char *this_opt;
@@ -226,13 +226,18 @@ static int __init vesafb_setup(char *options)
return 0;
}
-static int __init vesafb_probe(struct platform_device *dev)
+static int vesafb_probe(struct platform_device *dev)
{
struct fb_info *info;
int i, err;
unsigned int size_vmode;
unsigned int size_remap;
unsigned int size_total;
+ char *option = NULL;
+
+ /* ignore error return of fb_get_options */
+ fb_get_options("vesafb", &option);
+ vesafb_setup(option);
if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
return -ENODEV;
@@ -496,40 +501,12 @@ err:
}
static struct platform_driver vesafb_driver = {
- .driver = {
- .name = "vesafb",
+ .driver = {
+ .name = "vesa-framebuffer",
+ .owner = THIS_MODULE,
},
+ .probe = vesafb_probe,
};
-static struct platform_device *vesafb_device;
-
-static int __init vesafb_init(void)
-{
- int ret;
- char *option = NULL;
-
- /* ignore error return of fb_get_options */
- fb_get_options("vesafb", &option);
- vesafb_setup(option);
-
- vesafb_device = platform_device_alloc("vesafb", 0);
- if (!vesafb_device)
- return -ENOMEM;
-
- ret = platform_device_add(vesafb_device);
- if (!ret) {
- ret = platform_driver_probe(&vesafb_driver, vesafb_probe);
- if (ret)
- platform_device_del(vesafb_device);
- }
-
- if (ret) {
- platform_device_put(vesafb_device);
- vesafb_device = NULL;
- }
-
- return ret;
-}
-module_init(vesafb_init);
-
+module_platform_driver(vesafb_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 830ded45fd4..2827333703d 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,7 +1265,6 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
static void vga16fb_destroy(struct fb_info *info)
{
- struct platform_device *dev = container_of(info->device, struct platform_device, dev);
iounmap(info->screen_base);
fb_dealloc_cmap(&info->cmap);
/* XXX unshare VGA regions */
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index f3d4a69e1e4..84c664ea8eb 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -259,12 +259,12 @@ static int xilinxfb_assign(struct platform_device *pdev,
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drvdata->regs_phys = res->start;
- drvdata->regs = devm_request_and_ioremap(&pdev->dev, res);
- if (!drvdata->regs) {
- rc = -EADDRNOTAVAIL;
+ drvdata->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drvdata->regs)) {
+ rc = PTR_ERR(drvdata->regs);
goto err_region;
}
+ drvdata->regs_phys = res->start;
}
/* Allocate the framebuffer memory */
@@ -341,8 +341,8 @@ static int xilinxfb_assign(struct platform_device *pdev,
if (drvdata->flags & BUS_ACCESS_FLAG) {
/* Put a banner in the log (for DEBUG) */
- dev_dbg(dev, "regs: phys=%x, virt=%p\n", drvdata->regs_phys,
- drvdata->regs);
+ dev_dbg(dev, "regs: phys=%pa, virt=%p\n",
+ &drvdata->regs_phys, drvdata->regs);
}
/* Put a banner in the log (for DEBUG) */
dev_dbg(dev, "fb: phys=%llx, virt=%p, size=%x\n",
diff --git a/drivers/vme/boards/vme_vmivme7805.c b/drivers/vme/boards/vme_vmivme7805.c
index dd22b5072e2..cf74aee2cef 100644
--- a/drivers/vme/boards/vme_vmivme7805.c
+++ b/drivers/vme/boards/vme_vmivme7805.c
@@ -23,7 +23,7 @@ static int vmic_probe(struct pci_dev *, const struct pci_device_id *);
static void vmic_remove(struct pci_dev *);
/** Base address to access FPGA register */
-static void *vmic_base;
+static void __iomem *vmic_base;
static const char driver_name[] = "vmivme_7805";
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 64bfea31442..f8448573d03 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -243,6 +243,8 @@ static int ca91cx42_irq_init(struct vme_bridge *ca91cx42_bridge)
static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
struct pci_dev *pdev)
{
+ struct vme_bridge *ca91cx42_bridge;
+
/* Disable interrupts from PCI to VME */
iowrite32(0, bridge->base + VINT_EN);
@@ -251,7 +253,9 @@ static void ca91cx42_irq_exit(struct ca91cx42_driver *bridge,
/* Clear Any Pending PCI Interrupts */
iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
- free_irq(pdev->irq, pdev);
+ ca91cx42_bridge = container_of((void *)bridge, struct vme_bridge,
+ driver_priv);
+ free_irq(pdev->irq, ca91cx42_bridge);
}
static int ca91cx42_iack_received(struct ca91cx42_driver *bridge, int level)
@@ -856,7 +860,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
void *buf, size_t count, loff_t offset)
{
ssize_t retval;
- void *addr = image->kern_base + offset;
+ void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
@@ -916,7 +920,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
void *buf, size_t count, loff_t offset)
{
ssize_t retval;
- void *addr = image->kern_base + offset;
+ void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 94c892f27be..9cf88337e4e 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1267,7 +1267,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
- void *addr = image->kern_base + offset;
+ void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
@@ -1348,7 +1348,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
int retval = 0, enabled;
unsigned long long vme_base, size;
u32 aspace, cycle, dwidth;
- void *addr = image->kern_base + offset;
+ void __iomem *addr = image->kern_base + offset;
unsigned int done = 0;
unsigned int count32;
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c
index cb8a8e5d957..7dfa0e11688 100644
--- a/drivers/w1/slaves/w1_ds2408.c
+++ b/drivers/w1/slaves/w1_ds2408.c
@@ -72,10 +72,9 @@ static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf)
return 1;
}
-static ssize_t w1_f29_read_state(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -85,10 +84,9 @@ static ssize_t w1_f29_read_state(
return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf);
}
-static ssize_t w1_f29_read_output(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t output_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -99,10 +97,9 @@ static ssize_t w1_f29_read_output(
W1_F29_REG_OUTPUT_LATCH_STATE, buf);
}
-static ssize_t w1_f29_read_activity(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t activity_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -113,10 +110,9 @@ static ssize_t w1_f29_read_activity(
W1_F29_REG_ACTIVITY_LATCH_STATE, buf);
}
-static ssize_t w1_f29_read_cond_search_mask(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t cond_search_mask_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
dev_dbg(&kobj_to_w1_slave(kobj)->dev,
"Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p",
@@ -127,10 +123,10 @@ static ssize_t w1_f29_read_cond_search_mask(
W1_F29_REG_COND_SEARCH_SELECT_MASK, buf);
}
-static ssize_t w1_f29_read_cond_search_polarity(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t cond_search_polarity_read(struct file *filp,
+ struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buf, loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
@@ -138,10 +134,9 @@ static ssize_t w1_f29_read_cond_search_polarity(
W1_F29_REG_COND_SEARCH_POL_SELECT, buf);
}
-static ssize_t w1_f29_read_status_control(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t status_control_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
if (count != 1 || off != 0)
return -EFAULT;
@@ -149,13 +144,9 @@ static ssize_t w1_f29_read_status_control(
W1_F29_REG_CONTROL_AND_STATUS, buf);
}
-
-
-
-static ssize_t w1_f29_write_output(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t output_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
@@ -224,10 +215,9 @@ error:
/**
* Writing to the activity file resets the activity latches.
*/
-static ssize_t w1_f29_write_activity(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t activity_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
unsigned int retries = W1_F29_RETRIES;
@@ -255,13 +245,9 @@ error:
return -EIO;
}
-static ssize_t w1_f29_write_status_control(
- struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf,
- loff_t off,
- size_t count)
+static ssize_t status_control_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[4];
@@ -330,91 +316,35 @@ out:
return res;
}
-static struct bin_attribute w1_f29_sysfs_bin_files[] = {
- {
- .attr = {
- .name = "state",
- .mode = S_IRUGO,
- },
- .size = 1,
- .read = w1_f29_read_state,
- },
- {
- .attr = {
- .name = "output",
- .mode = S_IRUGO | S_IWUSR | S_IWGRP,
- },
- .size = 1,
- .read = w1_f29_read_output,
- .write = w1_f29_write_output,
- },
- {
- .attr = {
- .name = "activity",
- .mode = S_IRUGO,
- },
- .size = 1,
- .read = w1_f29_read_activity,
- .write = w1_f29_write_activity,
- },
- {
- .attr = {
- .name = "cond_search_mask",
- .mode = S_IRUGO,
- },
- .size = 1,
- .read = w1_f29_read_cond_search_mask,
- },
- {
- .attr = {
- .name = "cond_search_polarity",
- .mode = S_IRUGO,
- },
- .size = 1,
- .read = w1_f29_read_cond_search_polarity,
- },
- {
- .attr = {
- .name = "status_control",
- .mode = S_IRUGO | S_IWUSR | S_IWGRP,
- },
- .size = 1,
- .read = w1_f29_read_status_control,
- .write = w1_f29_write_status_control,
- }
+static BIN_ATTR_RO(state, 1);
+static BIN_ATTR_RW(output, 1);
+static BIN_ATTR_RW(activity, 1);
+static BIN_ATTR_RO(cond_search_mask, 1);
+static BIN_ATTR_RO(cond_search_polarity, 1);
+static BIN_ATTR_RW(status_control, 1);
+
+static struct bin_attribute *w1_f29_bin_attrs[] = {
+ &bin_attr_state,
+ &bin_attr_output,
+ &bin_attr_activity,
+ &bin_attr_cond_search_mask,
+ &bin_attr_cond_search_polarity,
+ &bin_attr_status_control,
+ NULL,
};
-static int w1_f29_add_slave(struct w1_slave *sl)
-{
- int err = 0;
- int i;
-
- err = w1_f29_disable_test_mode(sl);
- if (err)
- return err;
-
- for (i = 0; i < ARRAY_SIZE(w1_f29_sysfs_bin_files) && !err; ++i)
- err = sysfs_create_bin_file(
- &sl->dev.kobj,
- &(w1_f29_sysfs_bin_files[i]));
- if (err)
- while (--i >= 0)
- sysfs_remove_bin_file(&sl->dev.kobj,
- &(w1_f29_sysfs_bin_files[i]));
- return err;
-}
+static const struct attribute_group w1_f29_group = {
+ .bin_attrs = w1_f29_bin_attrs,
+};
-static void w1_f29_remove_slave(struct w1_slave *sl)
-{
- int i;
- for (i = ARRAY_SIZE(w1_f29_sysfs_bin_files) - 1; i >= 0; --i)
- sysfs_remove_bin_file(&sl->dev.kobj,
- &(w1_f29_sysfs_bin_files[i]));
-}
+static const struct attribute_group *w1_f29_groups[] = {
+ &w1_f29_group,
+ NULL,
+};
static struct w1_family_ops w1_f29_fops = {
- .add_slave = w1_f29_add_slave,
- .remove_slave = w1_f29_remove_slave,
+ .add_slave = w1_f29_disable_test_mode,
+ .groups = w1_f29_groups,
};
static struct w1_family w1_family_29 = {
diff --git a/drivers/w1/slaves/w1_ds2413.c b/drivers/w1/slaves/w1_ds2413.c
index 85937773a96..ee28fc1ff39 100644
--- a/drivers/w1/slaves/w1_ds2413.c
+++ b/drivers/w1/slaves/w1_ds2413.c
@@ -30,10 +30,9 @@ MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS2413));
#define W1_F3A_FUNC_PIO_ACCESS_WRITE 0x5A
#define W1_F3A_SUCCESS_CONFIRM_BYTE 0xAA
-static ssize_t w1_f3a_read_state(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
dev_dbg(&sl->dev,
@@ -66,10 +65,11 @@ static ssize_t w1_f3a_read_state(
return 1;
}
-static ssize_t w1_f3a_write_output(
- struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static BIN_ATTR_RO(state, 1);
+
+static ssize_t output_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
u8 w1_buf[3];
@@ -110,53 +110,25 @@ error:
return -EIO;
}
-#define NB_SYSFS_BIN_FILES 2
-static struct bin_attribute w1_f3a_sysfs_bin_files[NB_SYSFS_BIN_FILES] = {
- {
- .attr = {
- .name = "state",
- .mode = S_IRUGO,
- },
- .size = 1,
- .read = w1_f3a_read_state,
- },
- {
- .attr = {
- .name = "output",
- .mode = S_IRUGO | S_IWUSR | S_IWGRP,
- },
- .size = 1,
- .write = w1_f3a_write_output,
- }
+static BIN_ATTR(output, S_IRUGO | S_IWUSR | S_IWGRP, NULL, output_write, 1);
+
+static struct bin_attribute *w1_f3a_bin_attrs[] = {
+ &bin_attr_state,
+ &bin_attr_output,
+ NULL,
};
-static int w1_f3a_add_slave(struct w1_slave *sl)
-{
- int err = 0;
- int i;
-
- for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
- err = sysfs_create_bin_file(
- &sl->dev.kobj,
- &(w1_f3a_sysfs_bin_files[i]));
- if (err)
- while (--i >= 0)
- sysfs_remove_bin_file(&sl->dev.kobj,
- &(w1_f3a_sysfs_bin_files[i]));
- return err;
-}
+static const struct attribute_group w1_f3a_group = {
+ .bin_attrs = w1_f3a_bin_attrs,
+};
-static void w1_f3a_remove_slave(struct w1_slave *sl)
-{
- int i;
- for (i = NB_SYSFS_BIN_FILES - 1; i >= 0; --i)
- sysfs_remove_bin_file(&sl->dev.kobj,
- &(w1_f3a_sysfs_bin_files[i]));
-}
+static const struct attribute_group *w1_f3a_groups[] = {
+ &w1_f3a_group,
+ NULL,
+};
static struct w1_family_ops w1_f3a_fops = {
- .add_slave = w1_f3a_add_slave,
- .remove_slave = w1_f3a_remove_slave,
+ .groups = w1_f3a_groups,
};
static struct w1_family w1_family_3a = {
diff --git a/drivers/w1/slaves/w1_ds2423.c b/drivers/w1/slaves/w1_ds2423.c
index 7f86aec7408..7e41b7d91fb 100644
--- a/drivers/w1/slaves/w1_ds2423.c
+++ b/drivers/w1/slaves/w1_ds2423.c
@@ -40,14 +40,8 @@
#define COUNTER_COUNT 4
#define READ_BYTE_COUNT 42
-static ssize_t w1_counter_read(struct device *device,
- struct device_attribute *attr, char *buf);
-
-static struct device_attribute w1_counter_attr =
- __ATTR(w1_slave, S_IRUGO, w1_counter_read, NULL);
-
-static ssize_t w1_counter_read(struct device *device,
- struct device_attribute *attr, char *out_buf)
+static ssize_t w1_slave_show(struct device *device,
+ struct device_attribute *attr, char *out_buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
@@ -128,19 +122,16 @@ static ssize_t w1_counter_read(struct device *device,
return PAGE_SIZE - c;
}
-static int w1_f1d_add_slave(struct w1_slave *sl)
-{
- return device_create_file(&sl->dev, &w1_counter_attr);
-}
+static DEVICE_ATTR_RO(w1_slave);
-static void w1_f1d_remove_slave(struct w1_slave *sl)
-{
- device_remove_file(&sl->dev, &w1_counter_attr);
-}
+static struct attribute *w1_f1d_attrs[] = {
+ &dev_attr_w1_slave.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(w1_f1d);
static struct w1_family_ops w1_f1d_fops = {
- .add_slave = w1_f1d_add_slave,
- .remove_slave = w1_f1d_remove_slave,
+ .groups = w1_f1d_groups,
};
static struct w1_family w1_family_1d = {
diff --git a/drivers/w1/slaves/w1_ds2431.c b/drivers/w1/slaves/w1_ds2431.c
index cef8605e43e..9c4ff9d28ad 100644
--- a/drivers/w1/slaves/w1_ds2431.c
+++ b/drivers/w1/slaves/w1_ds2431.c
@@ -96,9 +96,9 @@ static int w1_f2d_readblock(struct w1_slave *sl, int off, int count, char *buf)
return -1;
}
-static ssize_t w1_f2d_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int todo = count;
@@ -202,9 +202,9 @@ retry:
return 0;
}
-static ssize_t w1_f2d_write_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len;
@@ -264,29 +264,24 @@ out_up:
return count;
}
-static struct bin_attribute w1_f2d_bin_attr = {
- .attr = {
- .name = "eeprom",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = W1_F2D_EEPROM_SIZE,
- .read = w1_f2d_read_bin,
- .write = w1_f2d_write_bin,
+static BIN_ATTR_RW(eeprom, W1_F2D_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f2d_bin_attrs[] = {
+ &bin_attr_eeprom,
+ NULL,
};
-static int w1_f2d_add_slave(struct w1_slave *sl)
-{
- return sysfs_create_bin_file(&sl->dev.kobj, &w1_f2d_bin_attr);
-}
+static const struct attribute_group w1_f2d_group = {
+ .bin_attrs = w1_f2d_bin_attrs,
+};
-static void w1_f2d_remove_slave(struct w1_slave *sl)
-{
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_f2d_bin_attr);
-}
+static const struct attribute_group *w1_f2d_groups[] = {
+ &w1_f2d_group,
+ NULL,
+};
static struct w1_family_ops w1_f2d_fops = {
- .add_slave = w1_f2d_add_slave,
- .remove_slave = w1_f2d_remove_slave,
+ .groups = w1_f2d_groups,
};
static struct w1_family w1_family_2d = {
diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c
index 10cc1b6176e..72319a968a9 100644
--- a/drivers/w1/slaves/w1_ds2433.c
+++ b/drivers/w1/slaves/w1_ds2433.c
@@ -93,9 +93,9 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data,
}
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-static ssize_t w1_f23_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
@@ -207,9 +207,9 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data)
return 0;
}
-static ssize_t w1_f23_write_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
int addr, len, idx;
@@ -257,19 +257,24 @@ out_up:
return count;
}
-static struct bin_attribute w1_f23_bin_attr = {
- .attr = {
- .name = "eeprom",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = W1_EEPROM_SIZE,
- .read = w1_f23_read_bin,
- .write = w1_f23_write_bin,
+static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+
+static struct bin_attribute *w1_f23_bin_attributes[] = {
+ &bin_attr_eeprom,
+ NULL,
+};
+
+static const struct attribute_group w1_f23_group = {
+ .bin_attrs = w1_f23_bin_attributes,
+};
+
+static const struct attribute_group *w1_f23_groups[] = {
+ &w1_f23_group,
+ NULL,
};
static int w1_f23_add_slave(struct w1_slave *sl)
{
- int err;
#ifdef CONFIG_W1_SLAVE_DS2433_CRC
struct w1_f23_data *data;
@@ -279,15 +284,7 @@ static int w1_f23_add_slave(struct w1_slave *sl)
sl->family_data = data;
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-
- err = sysfs_create_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
-
-#ifdef CONFIG_W1_SLAVE_DS2433_CRC
- if (err)
- kfree(data);
-#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
-
- return err;
+ return 0;
}
static void w1_f23_remove_slave(struct w1_slave *sl)
@@ -296,12 +293,12 @@ static void w1_f23_remove_slave(struct w1_slave *sl)
kfree(sl->family_data);
sl->family_data = NULL;
#endif /* CONFIG_W1_SLAVE_DS2433_CRC */
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_f23_bin_attr);
}
static struct w1_family_ops w1_f23_fops = {
.add_slave = w1_f23_add_slave,
.remove_slave = w1_f23_remove_slave,
+ .groups = w1_f23_groups,
};
static struct w1_family w1_family_23 = {
diff --git a/drivers/w1/slaves/w1_ds2760.c b/drivers/w1/slaves/w1_ds2760.c
index 93719d25d84..65f90dccd60 100644
--- a/drivers/w1/slaves/w1_ds2760.c
+++ b/drivers/w1/slaves/w1_ds2760.c
@@ -97,21 +97,28 @@ int w1_ds2760_recall_eeprom(struct device *dev, int addr)
return w1_ds2760_eeprom_cmd(dev, addr, W1_DS2760_RECALL_DATA);
}
-static ssize_t w1_ds2760_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
return w1_ds2760_read(dev, buf, off, count);
}
-static struct bin_attribute w1_ds2760_bin_attr = {
- .attr = {
- .name = "w1_slave",
- .mode = S_IRUGO,
- },
- .size = DS2760_DATA_SIZE,
- .read = w1_ds2760_read_bin,
+static BIN_ATTR_RO(w1_slave, DS2760_DATA_SIZE);
+
+static struct bin_attribute *w1_ds2760_bin_attrs[] = {
+ &bin_attr_w1_slave,
+ NULL,
+};
+
+static const struct attribute_group w1_ds2760_group = {
+ .bin_attrs = w1_ds2760_bin_attrs,
+};
+
+static const struct attribute_group *w1_ds2760_groups[] = {
+ &w1_ds2760_group,
+ NULL,
};
static DEFINE_IDA(bat_ida);
@@ -139,16 +146,10 @@ static int w1_ds2760_add_slave(struct w1_slave *sl)
if (ret)
goto pdev_add_failed;
- ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr);
- if (ret)
- goto bin_attr_failed;
-
dev_set_drvdata(&sl->dev, pdev);
goto success;
-bin_attr_failed:
- platform_device_del(pdev);
pdev_add_failed:
platform_device_put(pdev);
pdev_alloc_failed:
@@ -165,12 +166,12 @@ static void w1_ds2760_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
ida_simple_remove(&bat_ida, id);
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2760_bin_attr);
}
static struct w1_family_ops w1_ds2760_fops = {
.add_slave = w1_ds2760_add_slave,
.remove_slave = w1_ds2760_remove_slave,
+ .groups = w1_ds2760_groups,
};
static struct w1_family w1_ds2760_family = {
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c
index 0cd7a27b5d6..50e85f7929d 100644
--- a/drivers/w1/slaves/w1_ds2780.c
+++ b/drivers/w1/slaves/w1_ds2780.c
@@ -89,22 +89,28 @@ int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd)
}
EXPORT_SYMBOL(w1_ds2780_eeprom_cmd);
-static ssize_t w1_ds2780_read_bin(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
return w1_ds2780_io(dev, buf, off, count, 0);
}
-static struct bin_attribute w1_ds2780_bin_attr = {
- .attr = {
- .name = "w1_slave",
- .mode = S_IRUGO,
- },
- .size = DS2780_DATA_SIZE,
- .read = w1_ds2780_read_bin,
+static BIN_ATTR_RO(w1_slave, DS2780_DATA_SIZE);
+
+static struct bin_attribute *w1_ds2780_bin_attrs[] = {
+ &bin_attr_w1_slave,
+ NULL,
+};
+
+static const struct attribute_group w1_ds2780_group = {
+ .bin_attrs = w1_ds2780_bin_attrs,
+};
+
+static const struct attribute_group *w1_ds2780_groups[] = {
+ &w1_ds2780_group,
+ NULL,
};
static DEFINE_IDA(bat_ida);
@@ -132,16 +138,10 @@ static int w1_ds2780_add_slave(struct w1_slave *sl)
if (ret)
goto pdev_add_failed;
- ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
- if (ret)
- goto bin_attr_failed;
-
dev_set_drvdata(&sl->dev, pdev);
return 0;
-bin_attr_failed:
- platform_device_del(pdev);
pdev_add_failed:
platform_device_put(pdev);
pdev_alloc_failed:
@@ -157,12 +157,12 @@ static void w1_ds2780_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
ida_simple_remove(&bat_ida, id);
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr);
}
static struct w1_family_ops w1_ds2780_fops = {
.add_slave = w1_ds2780_add_slave,
.remove_slave = w1_ds2780_remove_slave,
+ .groups = w1_ds2780_groups,
};
static struct w1_family w1_ds2780_family = {
diff --git a/drivers/w1/slaves/w1_ds2781.c b/drivers/w1/slaves/w1_ds2781.c
index 1aba8e41ad4..1eb98fb1688 100644
--- a/drivers/w1/slaves/w1_ds2781.c
+++ b/drivers/w1/slaves/w1_ds2781.c
@@ -87,22 +87,28 @@ int w1_ds2781_eeprom_cmd(struct device *dev, int addr, int cmd)
}
EXPORT_SYMBOL(w1_ds2781_eeprom_cmd);
-static ssize_t w1_ds2781_read_bin(struct file *filp,
- struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t w1_slave_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct device *dev = container_of(kobj, struct device, kobj);
return w1_ds2781_io(dev, buf, off, count, 0);
}
-static struct bin_attribute w1_ds2781_bin_attr = {
- .attr = {
- .name = "w1_slave",
- .mode = S_IRUGO,
- },
- .size = DS2781_DATA_SIZE,
- .read = w1_ds2781_read_bin,
+static BIN_ATTR_RO(w1_slave, DS2781_DATA_SIZE);
+
+static struct bin_attribute *w1_ds2781_bin_attrs[] = {
+ &bin_attr_w1_slave,
+ NULL,
+};
+
+static const struct attribute_group w1_ds2781_group = {
+ .bin_attrs = w1_ds2781_bin_attrs,
+};
+
+static const struct attribute_group *w1_ds2781_groups[] = {
+ &w1_ds2781_group,
+ NULL,
};
static DEFINE_IDA(bat_ida);
@@ -130,16 +136,10 @@ static int w1_ds2781_add_slave(struct w1_slave *sl)
if (ret)
goto pdev_add_failed;
- ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2781_bin_attr);
- if (ret)
- goto bin_attr_failed;
-
dev_set_drvdata(&sl->dev, pdev);
return 0;
-bin_attr_failed:
- platform_device_del(pdev);
pdev_add_failed:
platform_device_put(pdev);
pdev_alloc_failed:
@@ -155,12 +155,12 @@ static void w1_ds2781_remove_slave(struct w1_slave *sl)
platform_device_unregister(pdev);
ida_simple_remove(&bat_ida, id);
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2781_bin_attr);
}
static struct w1_family_ops w1_ds2781_fops = {
.add_slave = w1_ds2781_add_slave,
.remove_slave = w1_ds2781_remove_slave,
+ .groups = w1_ds2781_groups,
};
static struct w1_family w1_ds2781_family = {
diff --git a/drivers/w1/slaves/w1_ds28e04.c b/drivers/w1/slaves/w1_ds28e04.c
index cd30a6d95ea..365d6dff21d 100644
--- a/drivers/w1/slaves/w1_ds28e04.c
+++ b/drivers/w1/slaves/w1_ds28e04.c
@@ -118,9 +118,9 @@ static int w1_f1C_read(struct w1_slave *sl, int addr, int len, char *data)
return w1_read_block(sl->master, data, len);
}
-static ssize_t w1_f1C_read_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
struct w1_f1C_data *data = sl->family_data;
@@ -226,9 +226,9 @@ static int w1_f1C_write(struct w1_slave *sl, int addr, int len, const u8 *data)
return 0;
}
-static ssize_t w1_f1C_write_bin(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t eeprom_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf,
+ loff_t off, size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -280,9 +280,11 @@ out_up:
return count;
}
-static ssize_t w1_f1C_read_pio(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE);
+
+static ssize_t pio_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -299,9 +301,9 @@ static ssize_t w1_f1C_read_pio(struct file *filp, struct kobject *kobj,
return ret;
}
-static ssize_t w1_f1C_write_pio(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t pio_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -339,8 +341,10 @@ static ssize_t w1_f1C_write_pio(struct file *filp, struct kobject *kobj,
return count;
}
-static ssize_t w1_f1C_show_crccheck(struct device *dev,
- struct device_attribute *attr, char *buf)
+static BIN_ATTR_RW(pio, 1);
+
+static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
if (put_user(w1_enable_crccheck + 0x30, buf))
return -EFAULT;
@@ -348,9 +352,8 @@ static ssize_t w1_f1C_show_crccheck(struct device *dev,
return sizeof(w1_enable_crccheck);
}
-static ssize_t w1_f1C_store_crccheck(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
char val;
@@ -371,35 +374,31 @@ static ssize_t w1_f1C_store_crccheck(struct device *dev,
return sizeof(w1_enable_crccheck);
}
-#define NB_SYSFS_BIN_FILES 2
-static struct bin_attribute w1_f1C_bin_attr[NB_SYSFS_BIN_FILES] = {
- {
- .attr = {
- .name = "eeprom",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = W1_EEPROM_SIZE,
- .read = w1_f1C_read_bin,
- .write = w1_f1C_write_bin,
- },
- {
- .attr = {
- .name = "pio",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = 1,
- .read = w1_f1C_read_pio,
- .write = w1_f1C_write_pio,
- }
+static DEVICE_ATTR_RW(crccheck);
+
+static struct attribute *w1_f1C_attrs[] = {
+ &dev_attr_crccheck.attr,
+ NULL,
};
-static DEVICE_ATTR(crccheck, S_IWUSR | S_IRUGO,
- w1_f1C_show_crccheck, w1_f1C_store_crccheck);
+static struct bin_attribute *w1_f1C_bin_attrs[] = {
+ &bin_attr_eeprom,
+ &bin_attr_pio,
+ NULL,
+};
+
+static const struct attribute_group w1_f1C_group = {
+ .attrs = w1_f1C_attrs,
+ .bin_attrs = w1_f1C_bin_attrs,
+};
+
+static const struct attribute_group *w1_f1C_groups[] = {
+ &w1_f1C_group,
+ NULL,
+};
static int w1_f1C_add_slave(struct w1_slave *sl)
{
- int err = 0;
- int i;
struct w1_f1C_data *data = NULL;
if (w1_enable_crccheck) {
@@ -409,46 +408,19 @@ static int w1_f1C_add_slave(struct w1_slave *sl)
sl->family_data = data;
}
- /* create binary sysfs attributes */
- for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i)
- err = sysfs_create_bin_file(
- &sl->dev.kobj, &(w1_f1C_bin_attr[i]));
-
- if (!err) {
- /* create device attributes */
- err = device_create_file(&sl->dev, &dev_attr_crccheck);
- }
-
- if (err) {
- /* remove binary sysfs attributes */
- for (i = 0; i < NB_SYSFS_BIN_FILES; ++i)
- sysfs_remove_bin_file(
- &sl->dev.kobj, &(w1_f1C_bin_attr[i]));
-
- kfree(data);
- }
-
- return err;
+ return 0;
}
static void w1_f1C_remove_slave(struct w1_slave *sl)
{
- int i;
-
kfree(sl->family_data);
sl->family_data = NULL;
-
- /* remove device attributes */
- device_remove_file(&sl->dev, &dev_attr_crccheck);
-
- /* remove binary sysfs attributes */
- for (i = 0; i < NB_SYSFS_BIN_FILES; ++i)
- sysfs_remove_bin_file(&sl->dev.kobj, &(w1_f1C_bin_attr[i]));
}
static struct w1_family_ops w1_f1C_fops = {
.add_slave = w1_f1C_add_slave,
.remove_slave = w1_f1C_remove_slave,
+ .groups = w1_f1C_groups,
};
static struct w1_family w1_family_1C = {
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index 8978360bd38..8b5ff33f72c 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -59,25 +59,19 @@ static int w1_strong_pullup = 1;
module_param_named(strong_pullup, w1_strong_pullup, int, 0);
-static ssize_t w1_therm_read(struct device *device,
+static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf);
-static struct device_attribute w1_therm_attr =
- __ATTR(w1_slave, S_IRUGO, w1_therm_read, NULL);
+static DEVICE_ATTR_RO(w1_slave);
-static int w1_therm_add_slave(struct w1_slave *sl)
-{
- return device_create_file(&sl->dev, &w1_therm_attr);
-}
-
-static void w1_therm_remove_slave(struct w1_slave *sl)
-{
- device_remove_file(&sl->dev, &w1_therm_attr);
-}
+static struct attribute *w1_therm_attrs[] = {
+ &dev_attr_w1_slave.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(w1_therm);
static struct w1_family_ops w1_therm_fops = {
- .add_slave = w1_therm_add_slave,
- .remove_slave = w1_therm_remove_slave,
+ .groups = w1_therm_groups,
};
static struct w1_family w1_therm_family_DS18S20 = {
@@ -178,7 +172,7 @@ static inline int w1_convert_temp(u8 rom[9], u8 fid)
}
-static ssize_t w1_therm_read(struct device *device,
+static ssize_t w1_slave_show(struct device *device,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(device);
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 0459df843c5..22013ca2119 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -96,14 +96,15 @@ static void w1_slave_release(struct device *dev)
complete(&sl->released);
}
-static ssize_t w1_slave_read_name(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
return sprintf(buf, "%s\n", sl->name);
}
+static DEVICE_ATTR_RO(name);
-static ssize_t w1_slave_read_id(struct device *dev,
+static ssize_t id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct w1_slave *sl = dev_to_w1_slave(dev);
@@ -112,17 +113,20 @@ static ssize_t w1_slave_read_id(struct device *dev,
memcpy(buf, (u8 *)&sl->reg_num, count);
return count;
}
+static DEVICE_ATTR_RO(id);
-static struct device_attribute w1_slave_attr_name =
- __ATTR(name, S_IRUGO, w1_slave_read_name, NULL);
-static struct device_attribute w1_slave_attr_id =
- __ATTR(id, S_IRUGO, w1_slave_read_id, NULL);
+static struct attribute *w1_slave_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_id.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(w1_slave);
/* Default family */
-static ssize_t w1_default_write(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t rw_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -139,9 +143,9 @@ out_up:
return count;
}
-static ssize_t w1_default_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *bin_attr,
- char *buf, loff_t off, size_t count)
+static ssize_t rw_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off,
+ size_t count)
{
struct w1_slave *sl = kobj_to_w1_slave(kobj);
@@ -151,29 +155,24 @@ static ssize_t w1_default_read(struct file *filp, struct kobject *kobj,
return count;
}
-static struct bin_attribute w1_default_attr = {
- .attr = {
- .name = "rw",
- .mode = S_IRUGO | S_IWUSR,
- },
- .size = PAGE_SIZE,
- .read = w1_default_read,
- .write = w1_default_write,
+static BIN_ATTR_RW(rw, PAGE_SIZE);
+
+static struct bin_attribute *w1_slave_bin_attrs[] = {
+ &bin_attr_rw,
+ NULL,
};
-static int w1_default_add_slave(struct w1_slave *sl)
-{
- return sysfs_create_bin_file(&sl->dev.kobj, &w1_default_attr);
-}
+static const struct attribute_group w1_slave_default_group = {
+ .bin_attrs = w1_slave_bin_attrs,
+};
-static void w1_default_remove_slave(struct w1_slave *sl)
-{
- sysfs_remove_bin_file(&sl->dev.kobj, &w1_default_attr);
-}
+static const struct attribute_group *w1_slave_default_groups[] = {
+ &w1_slave_default_group,
+ NULL,
+};
static struct w1_family_ops w1_default_fops = {
- .add_slave = w1_default_add_slave,
- .remove_slave = w1_default_remove_slave,
+ .groups = w1_slave_default_groups,
};
static struct w1_family w1_default_family = {
@@ -587,6 +586,66 @@ end:
return err;
}
+/*
+ * Handle sysfs file creation and removal here, before userspace is told that
+ * the device is added / removed from the system
+ */
+static int w1_bus_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+ struct w1_slave *sl;
+ struct w1_family_ops *fops;
+ int err;
+
+ /*
+ * Only care about slave devices at the moment. Yes, we should use a
+ * separate "type" for this, but for now, look at the release function
+ * to know which type it is...
+ */
+ if (dev->release != w1_slave_release)
+ return 0;
+
+ sl = dev_to_w1_slave(dev);
+ fops = sl->family->fops;
+
+ switch (action) {
+ case BUS_NOTIFY_ADD_DEVICE:
+ /* if the family driver needs to initialize something... */
+ if (fops->add_slave) {
+ err = fops->add_slave(sl);
+ if (err < 0) {
+ dev_err(&sl->dev,
+ "add_slave() call failed. err=%d\n",
+ err);
+ return err;
+ }
+ }
+ if (fops->groups) {
+ err = sysfs_create_groups(&sl->dev.kobj, fops->groups);
+ if (err) {
+ dev_err(&sl->dev,
+ "sysfs group creation failed. err=%d\n",
+ err);
+ return err;
+ }
+ }
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+ if (fops->remove_slave)
+ sl->family->fops->remove_slave(sl);
+ if (fops->groups)
+ sysfs_remove_groups(&sl->dev.kobj, fops->groups);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block w1_bus_nb = {
+ .notifier_call = w1_bus_notify,
+};
+
static int __w1_attach_slave_device(struct w1_slave *sl)
{
int err;
@@ -595,6 +654,7 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
sl->dev.driver = &w1_slave_driver;
sl->dev.bus = &w1_bus_type;
sl->dev.release = &w1_slave_release;
+ sl->dev.groups = w1_slave_groups;
dev_set_name(&sl->dev, "%02x-%012llx",
(unsigned int) sl->reg_num.family,
@@ -615,44 +675,13 @@ static int __w1_attach_slave_device(struct w1_slave *sl)
return err;
}
- /* Create "name" entry */
- err = device_create_file(&sl->dev, &w1_slave_attr_name);
- if (err < 0) {
- dev_err(&sl->dev,
- "sysfs file creation for [%s] failed. err=%d\n",
- dev_name(&sl->dev), err);
- goto out_unreg;
- }
-
- /* Create "id" entry */
- err = device_create_file(&sl->dev, &w1_slave_attr_id);
- if (err < 0) {
- dev_err(&sl->dev,
- "sysfs file creation for [%s] failed. err=%d\n",
- dev_name(&sl->dev), err);
- goto out_rem1;
- }
- /* if the family driver needs to initialize something... */
- if (sl->family->fops && sl->family->fops->add_slave &&
- ((err = sl->family->fops->add_slave(sl)) < 0)) {
- dev_err(&sl->dev,
- "sysfs file creation for [%s] failed. err=%d\n",
- dev_name(&sl->dev), err);
- goto out_rem2;
- }
+ dev_set_uevent_suppress(&sl->dev, false);
+ kobject_uevent(&sl->dev.kobj, KOBJ_ADD);
list_add_tail(&sl->w1_slave_entry, &sl->master->slist);
return 0;
-
-out_rem2:
- device_remove_file(&sl->dev, &w1_slave_attr_id);
-out_rem1:
- device_remove_file(&sl->dev, &w1_slave_attr_name);
-out_unreg:
- device_unregister(&sl->dev);
- return err;
}
static int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
@@ -723,16 +752,11 @@ void w1_slave_detach(struct w1_slave *sl)
list_del(&sl->w1_slave_entry);
- if (sl->family->fops && sl->family->fops->remove_slave)
- sl->family->fops->remove_slave(sl);
-
memset(&msg, 0, sizeof(msg));
memcpy(msg.id.id, &sl->reg_num, sizeof(msg.id));
msg.type = W1_SLAVE_REMOVE;
w1_netlink_send(sl->master, &msg);
- device_remove_file(&sl->dev, &w1_slave_attr_id);
- device_remove_file(&sl->dev, &w1_slave_attr_name);
device_unregister(&sl->dev);
wait_for_completion(&sl->released);
@@ -1017,6 +1041,10 @@ static int __init w1_init(void)
goto err_out_exit_init;
}
+ retval = bus_register_notifier(&w1_bus_type, &w1_bus_nb);
+ if (retval)
+ goto err_out_bus_unregister;
+
retval = driver_register(&w1_master_driver);
if (retval) {
printk(KERN_ERR
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 625dd08f775..4ad0e81b640 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -52,6 +52,7 @@ struct w1_family_ops
{
int (* add_slave)(struct w1_slave *);
void (* remove_slave)(struct w1_slave *);
+ const struct attribute_group **groups;
};
struct w1_family
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 9e02d60a364..23eae5cb69c 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -145,7 +145,7 @@ config SWIOTLB_XEN
config XEN_TMEM
tristate
- depends on !ARM
+ depends on !ARM && !ARM64
default m if (CLEANCACHE || FRONTSWAP)
help
Shim to interface in-kernel Transcendent Memory hooks
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index eabd0ee1c2b..14fe79d8634 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -1,9 +1,8 @@
-ifneq ($(CONFIG_ARM),y)
-obj-y += manage.o
+ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),)
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
endif
obj-$(CONFIG_X86) += fallback.o
-obj-y += grant-table.o features.o events.o balloon.o
+obj-y += grant-table.o features.o events.o balloon.o manage.o
obj-y += xenbus/
nostackp := $(call cc-option, -fno-stack-protector)
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
index 119d42a2bf5..90307c0b630 100644
--- a/drivers/xen/acpi.c
+++ b/drivers/xen/acpi.c
@@ -35,28 +35,43 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
- u32 pm1a_cnt, u32 pm1b_cnt)
+static int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+ u32 val_a, u32 val_b,
+ bool extended)
{
+ unsigned int bits = extended ? 8 : 16;
+
struct xen_platform_op op = {
.cmd = XENPF_enter_acpi_sleep,
.interface_version = XENPF_INTERFACE_VERSION,
- .u = {
- .enter_acpi_sleep = {
- .pm1a_cnt_val = (u16)pm1a_cnt,
- .pm1b_cnt_val = (u16)pm1b_cnt,
- .sleep_state = sleep_state,
- },
+ .u.enter_acpi_sleep = {
+ .val_a = (u16)val_a,
+ .val_b = (u16)val_b,
+ .sleep_state = sleep_state,
+ .flags = extended ? XENPF_ACPI_SLEEP_EXTENDED : 0,
},
};
- if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
- WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
- "Email xen-devel@lists.xensource.com Thank you.\n", \
- pm1a_cnt, pm1b_cnt);
+ if (WARN((val_a & (~0 << bits)) || (val_b & (~0 << bits)),
+ "Using more than %u bits of sleep control values %#x/%#x!"
+ "Email xen-devel@lists.xen.org - Thank you.\n", \
+ bits, val_a, val_b))
return -1;
- }
HYPERVISOR_dom0_op(&op);
return 1;
}
+
+int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
+ u32 pm1a_cnt, u32 pm1b_cnt)
+{
+ return xen_acpi_notify_hypervisor_state(sleep_state, pm1a_cnt,
+ pm1b_cnt, false);
+}
+
+int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b)
+{
+ return xen_acpi_notify_hypervisor_state(sleep_state, val_a,
+ val_b, true);
+}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 2a2ef97697b..3101cf6daf5 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -38,6 +38,7 @@
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
@@ -52,6 +53,7 @@
#include <linux/notifier.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
+#include <linux/percpu-defs.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
@@ -90,6 +92,8 @@ EXPORT_SYMBOL_GPL(balloon_stats);
/* We increase/decrease in batches which fit in a page */
static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
+static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
+
/* List of ballooned pages, threaded through the mem_map array. */
static LIST_HEAD(ballooned_pages);
@@ -412,7 +416,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
if (xen_pv_domain() && !PageHighMem(page)) {
ret = HYPERVISOR_update_va_mapping(
(unsigned long)__va(pfn << PAGE_SHIFT),
- __pte_ma(0), 0);
+ pfn_pte(page_to_pfn(__get_cpu_var(balloon_scratch_page)),
+ PAGE_KERNEL_RO), 0);
BUG_ON(ret);
}
#endif
@@ -425,7 +430,13 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
/* No more mappings: invalidate P2M and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = mfn_to_pfn(frame_list[i]);
- __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ unsigned long p;
+ struct page *pg;
+ pg = __get_cpu_var(balloon_scratch_page);
+ p = page_to_pfn(pg);
+ __set_phys_to_machine(pfn, pfn_to_mfn(p));
+ }
balloon_append(pfn_to_page(pfn));
}
@@ -480,6 +491,18 @@ static void balloon_process(struct work_struct *work)
mutex_unlock(&balloon_mutex);
}
+struct page *get_balloon_scratch_page(void)
+{
+ struct page *ret = get_cpu_var(balloon_scratch_page);
+ BUG_ON(ret == NULL);
+ return ret;
+}
+
+void put_balloon_scratch_page(void)
+{
+ put_cpu_var(balloon_scratch_page);
+}
+
/* Resets the Xen limit, sets new target, and kicks off processing. */
void balloon_set_new_target(unsigned long target)
{
@@ -573,13 +596,47 @@ static void __init balloon_add_region(unsigned long start_pfn,
}
}
+static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ int cpu = (long)hcpu;
+ switch (action) {
+ case CPU_UP_PREPARE:
+ if (per_cpu(balloon_scratch_page, cpu) != NULL)
+ break;
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return NOTIFY_BAD;
+ }
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
+ .notifier_call = balloon_cpu_notify,
+};
+
static int __init balloon_init(void)
{
- int i;
+ int i, cpu;
if (!xen_domain())
return -ENODEV;
+ for_each_online_cpu(cpu)
+ {
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
+ }
+ register_cpu_notifier(&balloon_cpu_notifier);
+
pr_info("Initialising balloon driver\n");
balloon_stats.current_pages = xen_pv_domain()
@@ -616,4 +673,15 @@ static int __init balloon_init(void)
subsys_initcall(balloon_init);
+static int __init balloon_clear(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(balloon_scratch_page, cpu) = NULL;
+
+ return 0;
+}
+early_initcall(balloon_clear);
+
MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index a58ac435a9a..4035e833ea2 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -56,6 +56,7 @@
#include <xen/interface/hvm/params.h>
#include <xen/interface/physdev.h>
#include <xen/interface/sched.h>
+#include <xen/interface/vcpu.h>
#include <asm/hw_irq.h>
/*
@@ -348,7 +349,7 @@ static void init_evtchn_cpu_bindings(void)
for_each_possible_cpu(i)
memset(per_cpu(cpu_evtchn_mask, i),
- (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
+ (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8);
}
static inline void clear_evtchn(int port)
@@ -1212,7 +1213,17 @@ EXPORT_SYMBOL_GPL(evtchn_put);
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
- int irq = per_cpu(ipi_to_irq, cpu)[vector];
+ int irq;
+
+#ifdef CONFIG_X86
+ if (unlikely(vector == XEN_NMI_VECTOR)) {
+ int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL);
+ if (rc < 0)
+ printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
+ return;
+ }
+#endif
+ irq = per_cpu(ipi_to_irq, cpu)[vector];
BUG_ON(irq < 0);
notify_remote_via_irq(irq);
}
@@ -1379,14 +1390,21 @@ static void __xen_evtchn_do_upcall(void)
pending_bits = active_evtchns(cpu, s, word_idx);
bit_idx = 0; /* usually scan entire word from start */
+ /*
+ * We scan the starting word in two parts.
+ *
+ * 1st time: start in the middle, scanning the
+ * upper bits.
+ *
+ * 2nd time: scan the whole word (not just the
+ * parts skipped in the first pass) -- if an
+ * event in the previously scanned bits is
+ * pending again it would just be scanned on
+ * the next loop anyway.
+ */
if (word_idx == start_word_idx) {
- /* We scan the starting word in two parts */
if (i == 0)
- /* 1st time: start in the middle */
bit_idx = start_bit_idx;
- else
- /* 2nd time: mask bits done already */
- bit_idx &= (1UL << start_bit_idx) - 1;
}
do {
@@ -1493,8 +1511,10 @@ void rebind_evtchn_irq(int evtchn, int irq)
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
+ struct shared_info *s = HYPERVISOR_shared_info;
struct evtchn_bind_vcpu bind_vcpu;
int evtchn = evtchn_from_irq(irq);
+ int masked;
if (!VALID_EVTCHN(evtchn))
return -1;
@@ -1511,6 +1531,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
bind_vcpu.vcpu = tcpu;
/*
+ * Mask the event while changing the VCPU binding to prevent
+ * it being delivered on an unexpected VCPU.
+ */
+ masked = sync_test_and_set_bit(evtchn, BM(s->evtchn_mask));
+
+ /*
* If this fails, it usually just indicates that we're dealing with a
* virq or IPI channel, which don't actually need to be rebound. Ignore
* it, but don't do the xenlinux-level rebind in that case.
@@ -1518,6 +1544,9 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu);
+ if (!masked)
+ unmask_evtchn(evtchn);
+
return 0;
}
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 8feecf01d55..8b3a69a06c3 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -57,6 +57,7 @@
struct per_user_data {
struct mutex bind_mutex; /* serialize bind/unbind operations */
+ struct rb_root evtchns;
/* Notification ring, accessed via /dev/xen/evtchn. */
#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
@@ -64,6 +65,7 @@ struct per_user_data {
evtchn_port_t *ring;
unsigned int ring_cons, ring_prod, ring_overflow;
struct mutex ring_cons_mutex; /* protect against concurrent readers */
+ spinlock_t ring_prod_lock; /* product against concurrent interrupts */
/* Processes wait on this queue when ring is empty. */
wait_queue_head_t evtchn_wait;
@@ -71,54 +73,79 @@ struct per_user_data {
const char *name;
};
-/*
- * Who's bound to each port? This is logically an array of struct
- * per_user_data *, but we encode the current enabled-state in bit 0.
- */
-static unsigned long *port_user;
-static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
+struct user_evtchn {
+ struct rb_node node;
+ struct per_user_data *user;
+ unsigned port;
+ bool enabled;
+};
-static inline struct per_user_data *get_port_user(unsigned port)
+static int add_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
{
- return (struct per_user_data *)(port_user[port] & ~1);
-}
+ struct rb_node **new = &(u->evtchns.rb_node), *parent = NULL;
-static inline void set_port_user(unsigned port, struct per_user_data *u)
-{
- port_user[port] = (unsigned long)u;
+ while (*new) {
+ struct user_evtchn *this;
+
+ this = container_of(*new, struct user_evtchn, node);
+
+ parent = *new;
+ if (this->port < evtchn->port)
+ new = &((*new)->rb_left);
+ else if (this->port > evtchn->port)
+ new = &((*new)->rb_right);
+ else
+ return -EEXIST;
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&evtchn->node, parent, new);
+ rb_insert_color(&evtchn->node, &u->evtchns);
+
+ return 0;
}
-static inline bool get_port_enabled(unsigned port)
+static void del_evtchn(struct per_user_data *u, struct user_evtchn *evtchn)
{
- return port_user[port] & 1;
+ rb_erase(&evtchn->node, &u->evtchns);
+ kfree(evtchn);
}
-static inline void set_port_enabled(unsigned port, bool enabled)
+static struct user_evtchn *find_evtchn(struct per_user_data *u, unsigned port)
{
- if (enabled)
- port_user[port] |= 1;
- else
- port_user[port] &= ~1;
+ struct rb_node *node = u->evtchns.rb_node;
+
+ while (node) {
+ struct user_evtchn *evtchn;
+
+ evtchn = container_of(node, struct user_evtchn, node);
+
+ if (evtchn->port < port)
+ node = node->rb_left;
+ else if (evtchn->port > port)
+ node = node->rb_right;
+ else
+ return evtchn;
+ }
+ return NULL;
}
static irqreturn_t evtchn_interrupt(int irq, void *data)
{
- unsigned int port = (unsigned long)data;
- struct per_user_data *u;
+ struct user_evtchn *evtchn = data;
+ struct per_user_data *u = evtchn->user;
- spin_lock(&port_user_lock);
-
- u = get_port_user(port);
-
- WARN(!get_port_enabled(port),
+ WARN(!evtchn->enabled,
"Interrupt for port %d, but apparently not enabled; per-user %p\n",
- port, u);
+ evtchn->port, u);
disable_irq_nosync(irq);
- set_port_enabled(port, false);
+ evtchn->enabled = false;
+
+ spin_lock(&u->ring_prod_lock);
if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
- u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
+ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
wmb(); /* Ensure ring contents visible */
if (u->ring_cons == u->ring_prod++) {
wake_up_interruptible(&u->evtchn_wait);
@@ -128,7 +155,7 @@ static irqreturn_t evtchn_interrupt(int irq, void *data)
} else
u->ring_overflow = 1;
- spin_unlock(&port_user_lock);
+ spin_unlock(&u->ring_prod_lock);
return IRQ_HANDLED;
}
@@ -229,20 +256,20 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
if (copy_from_user(kbuf, buf, count) != 0)
goto out;
- spin_lock_irq(&port_user_lock);
+ mutex_lock(&u->bind_mutex);
for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
unsigned port = kbuf[i];
+ struct user_evtchn *evtchn;
- if (port < NR_EVENT_CHANNELS &&
- get_port_user(port) == u &&
- !get_port_enabled(port)) {
- set_port_enabled(port, true);
+ evtchn = find_evtchn(u, port);
+ if (evtchn && !evtchn->enabled) {
+ evtchn->enabled = true;
enable_irq(irq_from_evtchn(port));
}
}
- spin_unlock_irq(&port_user_lock);
+ mutex_unlock(&u->bind_mutex);
rc = count;
@@ -253,6 +280,8 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
static int evtchn_bind_to_user(struct per_user_data *u, int port)
{
+ struct user_evtchn *evtchn;
+ struct evtchn_close close;
int rc = 0;
/*
@@ -263,35 +292,46 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
* interrupt handler yet, and our caller has already
* serialized bind operations.)
*/
- BUG_ON(get_port_user(port) != NULL);
- set_port_user(port, u);
- set_port_enabled(port, true); /* start enabled */
+
+ evtchn = kzalloc(sizeof(*evtchn), GFP_KERNEL);
+ if (!evtchn)
+ return -ENOMEM;
+
+ evtchn->user = u;
+ evtchn->port = port;
+ evtchn->enabled = true; /* start enabled */
+
+ rc = add_evtchn(u, evtchn);
+ if (rc < 0)
+ goto err;
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
- u->name, (void *)(unsigned long)port);
- if (rc >= 0)
- rc = evtchn_make_refcounted(port);
- else {
- /* bind failed, should close the port now */
- struct evtchn_close close;
- close.port = port;
- if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
- BUG();
- set_port_user(port, NULL);
- }
+ u->name, evtchn);
+ if (rc < 0)
+ goto err;
+
+ rc = evtchn_make_refcounted(port);
+ return rc;
+err:
+ /* bind failed, should close the port now */
+ close.port = port;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
+ BUG();
+ del_evtchn(u, evtchn);
return rc;
}
-static void evtchn_unbind_from_user(struct per_user_data *u, int port)
+static void evtchn_unbind_from_user(struct per_user_data *u,
+ struct user_evtchn *evtchn)
{
- int irq = irq_from_evtchn(port);
+ int irq = irq_from_evtchn(evtchn->port);
BUG_ON(irq < 0);
- unbind_from_irqhandler(irq, (void *)(unsigned long)port);
+ unbind_from_irqhandler(irq, evtchn);
- set_port_user(port, NULL);
+ del_evtchn(u, evtchn);
}
static long evtchn_ioctl(struct file *file,
@@ -370,6 +410,7 @@ static long evtchn_ioctl(struct file *file,
case IOCTL_EVTCHN_UNBIND: {
struct ioctl_evtchn_unbind unbind;
+ struct user_evtchn *evtchn;
rc = -EFAULT;
if (copy_from_user(&unbind, uarg, sizeof(unbind)))
@@ -379,36 +420,28 @@ static long evtchn_ioctl(struct file *file,
if (unbind.port >= NR_EVENT_CHANNELS)
break;
- spin_lock_irq(&port_user_lock);
-
rc = -ENOTCONN;
- if (get_port_user(unbind.port) != u) {
- spin_unlock_irq(&port_user_lock);
+ evtchn = find_evtchn(u, unbind.port);
+ if (!evtchn)
break;
- }
disable_irq(irq_from_evtchn(unbind.port));
-
- spin_unlock_irq(&port_user_lock);
-
- evtchn_unbind_from_user(u, unbind.port);
-
+ evtchn_unbind_from_user(u, evtchn);
rc = 0;
break;
}
case IOCTL_EVTCHN_NOTIFY: {
struct ioctl_evtchn_notify notify;
+ struct user_evtchn *evtchn;
rc = -EFAULT;
if (copy_from_user(&notify, uarg, sizeof(notify)))
break;
- if (notify.port >= NR_EVENT_CHANNELS) {
- rc = -EINVAL;
- } else if (get_port_user(notify.port) != u) {
- rc = -ENOTCONN;
- } else {
+ rc = -ENOTCONN;
+ evtchn = find_evtchn(u, notify.port);
+ if (evtchn) {
notify_remote_via_evtchn(notify.port);
rc = 0;
}
@@ -418,9 +451,9 @@ static long evtchn_ioctl(struct file *file,
case IOCTL_EVTCHN_RESET: {
/* Initialise the ring to empty. Clear errors. */
mutex_lock(&u->ring_cons_mutex);
- spin_lock_irq(&port_user_lock);
+ spin_lock_irq(&u->ring_prod_lock);
u->ring_cons = u->ring_prod = u->ring_overflow = 0;
- spin_unlock_irq(&port_user_lock);
+ spin_unlock_irq(&u->ring_prod_lock);
mutex_unlock(&u->ring_cons_mutex);
rc = 0;
break;
@@ -479,6 +512,7 @@ static int evtchn_open(struct inode *inode, struct file *filp)
mutex_init(&u->bind_mutex);
mutex_init(&u->ring_cons_mutex);
+ spin_lock_init(&u->ring_prod_lock);
filp->private_data = u;
@@ -487,29 +521,18 @@ static int evtchn_open(struct inode *inode, struct file *filp)
static int evtchn_release(struct inode *inode, struct file *filp)
{
- int i;
struct per_user_data *u = filp->private_data;
+ struct rb_node *node;
- spin_lock_irq(&port_user_lock);
-
- free_page((unsigned long)u->ring);
-
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (get_port_user(i) != u)
- continue;
-
- disable_irq(irq_from_evtchn(i));
- }
-
- spin_unlock_irq(&port_user_lock);
-
- for (i = 0; i < NR_EVENT_CHANNELS; i++) {
- if (get_port_user(i) != u)
- continue;
+ while ((node = u->evtchns.rb_node)) {
+ struct user_evtchn *evtchn;
- evtchn_unbind_from_user(get_port_user(i), i);
+ evtchn = rb_entry(node, struct user_evtchn, node);
+ disable_irq(irq_from_evtchn(evtchn->port));
+ evtchn_unbind_from_user(u, evtchn);
}
+ free_page((unsigned long)u->ring);
kfree(u->name);
kfree(u);
@@ -540,12 +563,6 @@ static int __init evtchn_init(void)
if (!xen_domain())
return -ENODEV;
- port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL);
- if (port_user == NULL)
- return -ENOMEM;
-
- spin_lock_init(&port_user_lock);
-
/* Create '/dev/xen/evtchn'. */
err = misc_register(&evtchn_miscdev);
if (err != 0) {
@@ -560,9 +577,6 @@ static int __init evtchn_init(void)
static void __exit evtchn_cleanup(void)
{
- kfree(port_user);
- port_user = NULL;
-
misc_deregister(&evtchn_miscdev);
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index eab5427c75f..e41c79c986e 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -272,19 +272,12 @@ static int map_grant_pages(struct grant_map *map)
* with find_grant_ptes.
*/
for (i = 0; i < map->count; i++) {
- unsigned level;
unsigned long address = (unsigned long)
pfn_to_kaddr(page_to_pfn(map->pages[i]));
- pte_t *ptep;
- u64 pte_maddr = 0;
BUG_ON(PageHighMem(map->pages[i]));
- ptep = lookup_address(address, &level);
- pte_maddr = arbitrary_virt_to_machine(ptep).maddr;
- gnttab_set_map_op(&map->kmap_ops[i], pte_maddr,
- map->flags |
- GNTMAP_host_map |
- GNTMAP_contains_pte,
+ gnttab_set_map_op(&map->kmap_ops[i], address,
+ map->flags | GNTMAP_host_map,
map->grants[i].ref,
map->grants[i].domid);
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 04cdeb8e371..c4d2298893b 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -730,9 +730,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
void (*fn)(void *), void *arg, u16 count)
{
unsigned long flags;
+ struct gnttab_free_callback *cb;
+
spin_lock_irqsave(&gnttab_list_lock, flags);
- if (callback->next)
- goto out;
+
+ /* Check if the callback is already on the list */
+ cb = gnttab_free_callback_list;
+ while (cb) {
+ if (cb == callback)
+ goto out;
+ cb = cb->next;
+ }
+
callback->fn = fn;
callback->arg = arg;
callback->count = count;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index f8e5dd701ec..8e74590fa1b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -43,9 +43,10 @@ MODULE_LICENSE("GPL");
#define PRIV_VMA_LOCKED ((void *)1)
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
-#endif
+static int privcmd_vma_range_is_mapped(
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long nr_pages);
static long privcmd_ioctl_hypercall(void __user *udata)
{
@@ -225,9 +226,9 @@ static long privcmd_ioctl_mmap(void __user *udata)
vma = find_vma(mm, msg->va);
rc = -EINVAL;
- if (!vma || (msg->va != vma->vm_start) ||
- !privcmd_enforce_singleshot_mapping(vma))
+ if (!vma || (msg->va != vma->vm_start) || vma->vm_private_data)
goto out_up;
+ vma->vm_private_data = PRIV_VMA_LOCKED;
}
state.va = vma->vm_start;
@@ -358,7 +359,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
kfree(pages);
return -ENOMEM;
}
- BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
+ BUG_ON(vma->vm_private_data != NULL);
vma->vm_private_data = pages;
return 0;
@@ -421,19 +422,43 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
vma = find_vma(mm, m.addr);
if (!vma ||
- vma->vm_ops != &privcmd_vm_ops ||
- (m.addr != vma->vm_start) ||
- ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
- !privcmd_enforce_singleshot_mapping(vma)) {
- up_write(&mm->mmap_sem);
+ vma->vm_ops != &privcmd_vm_ops) {
ret = -EINVAL;
- goto out;
+ goto out_unlock;
}
- if (xen_feature(XENFEAT_auto_translated_physmap)) {
- ret = alloc_empty_pages(vma, m.num);
- if (ret < 0) {
- up_write(&mm->mmap_sem);
- goto out;
+
+ /*
+ * Caller must either:
+ *
+ * Map the whole VMA range, which will also allocate all the
+ * pages required for the auto_translated_physmap case.
+ *
+ * Or
+ *
+ * Map unmapped holes left from a previous map attempt (e.g.,
+ * because those foreign frames were previously paged out).
+ */
+ if (vma->vm_private_data == NULL) {
+ if (m.addr != vma->vm_start ||
+ m.addr + (nr_pages << PAGE_SHIFT) != vma->vm_end) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (xen_feature(XENFEAT_auto_translated_physmap)) {
+ ret = alloc_empty_pages(vma, m.num);
+ if (ret < 0)
+ goto out_unlock;
+ } else
+ vma->vm_private_data = PRIV_VMA_LOCKED;
+ } else {
+ if (m.addr < vma->vm_start ||
+ m.addr + (nr_pages << PAGE_SHIFT) > vma->vm_end) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ if (privcmd_vma_range_is_mapped(vma, m.addr, nr_pages)) {
+ ret = -EINVAL;
+ goto out_unlock;
}
}
@@ -466,8 +491,11 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
out:
free_page_list(&pagelist);
-
return ret;
+
+out_unlock:
+ up_write(&mm->mmap_sem);
+ goto out;
}
static long privcmd_ioctl(struct file *file,
@@ -540,9 +568,24 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
+/*
+ * For MMAPBATCH*. This allows asserting the singleshot mapping
+ * on a per pfn/pte basis. Mapping calls that fail with ENOENT
+ * can be then retried until success.
+ */
+static int is_mapped_fn(pte_t *pte, struct page *pmd_page,
+ unsigned long addr, void *data)
+{
+ return pte_none(*pte) ? 0 : -EBUSY;
+}
+
+static int privcmd_vma_range_is_mapped(
+ struct vm_area_struct *vma,
+ unsigned long addr,
+ unsigned long nr_pages)
{
- return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
+ return apply_to_page_range(vma->vm_mm, addr, nr_pages << PAGE_SHIFT,
+ is_mapped_fn, NULL) != 0;
}
const struct file_operations xen_privcmd_fops = {
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index aadffcf7db9..1b2277c311d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -506,13 +506,13 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
to do proper error handling. */
xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
- sgl[0].dma_length = 0;
+ sg_dma_len(sgl) = 0;
return DMA_ERROR_CODE;
}
sg->dma_address = xen_phys_to_bus(map);
} else
sg->dma_address = dev_addr;
- sg->dma_length = sg->length;
+ sg_dma_len(sg) = sg->length;
}
return nelems;
}
@@ -533,7 +533,7 @@ xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- xen_unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+ xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
}
EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs);
@@ -555,7 +555,7 @@ xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
for_each_sg(sgl, sg, nelems, i)
xen_swiotlb_sync_single(hwdev, sg->dma_address,
- sg->dma_length, dir, target);
+ sg_dma_len(sg), dir, target);
}
void
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 02817a85f87..21e18c18c7a 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -265,8 +265,10 @@ static ssize_t store_selfballooning(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &tmp);
- if (err || ((tmp != 0) && (tmp != 1)))
+ err = kstrtoul(buf, 10, &tmp);
+ if (err)
+ return err;
+ if ((tmp != 0) && (tmp != 1))
return -EINVAL;
xen_selfballooning_enabled = !!tmp;
@@ -292,8 +294,10 @@ static ssize_t store_selfballoon_interval(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
selfballoon_interval = val;
return count;
@@ -314,8 +318,10 @@ static ssize_t store_selfballoon_downhys(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
selfballoon_downhysteresis = val;
return count;
@@ -337,8 +343,10 @@ static ssize_t store_selfballoon_uphys(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
selfballoon_uphysteresis = val;
return count;
@@ -360,8 +368,10 @@ static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
selfballoon_min_usable_mb = val;
return count;
@@ -384,8 +394,10 @@ static ssize_t store_selfballoon_reserved_mb(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
selfballoon_reserved_mb = val;
return count;
@@ -410,8 +422,10 @@ static ssize_t store_frontswap_selfshrinking(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &tmp);
- if (err || ((tmp != 0) && (tmp != 1)))
+ err = kstrtoul(buf, 10, &tmp);
+ if (err)
+ return err;
+ if ((tmp != 0) && (tmp != 1))
return -EINVAL;
frontswap_selfshrinking = !!tmp;
if (!was_enabled && !xen_selfballooning_enabled &&
@@ -437,8 +451,10 @@ static ssize_t store_frontswap_inertia(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
frontswap_inertia = val;
frontswap_inertia_counter = val;
@@ -460,8 +476,10 @@ static ssize_t store_frontswap_hysteresis(struct device *dev,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
- if (err || val == 0)
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+ if (val == 0)
return -EINVAL;
frontswap_hysteresis = val;
return count;
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 6ed8a9df447..34b20bfa4e8 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -115,7 +115,6 @@ static int xenbus_frontend_dev_resume(struct device *dev)
return -EFAULT;
}
- INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
queue_work(xenbus_frontend_wq, &xdev->work);
return 0;
@@ -124,6 +123,16 @@ static int xenbus_frontend_dev_resume(struct device *dev)
return xenbus_dev_resume(dev);
}
+static int xenbus_frontend_dev_probe(struct device *dev)
+{
+ if (xen_store_domain_type == XS_LOCAL) {
+ struct xenbus_device *xdev = to_xenbus_device(dev);
+ INIT_WORK(&xdev->work, xenbus_frontend_delayed_resume);
+ }
+
+ return xenbus_dev_probe(dev);
+}
+
static const struct dev_pm_ops xenbus_pm_ops = {
.suspend = xenbus_dev_suspend,
.resume = xenbus_frontend_dev_resume,
@@ -142,7 +151,7 @@ static struct xen_bus_type xenbus_frontend = {
.name = "xen",
.match = xenbus_match,
.uevent = xenbus_uevent_frontend,
- .probe = xenbus_dev_probe,
+ .probe = xenbus_frontend_dev_probe,
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
@@ -474,7 +483,11 @@ static int __init xenbus_probe_frontend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+ if (xen_store_domain_type == XS_LOCAL) {
+ xenbus_frontend_wq = create_workqueue("xenbus_frontend");
+ if (!xenbus_frontend_wq)
+ pr_warn("create xenbus frontend workqueue failed, S3 resume is likely to fail\n");
+ }
return 0;
}
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 5e376bb9341..8defc6b3f9a 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -40,7 +40,7 @@ struct inode *bfs_iget(struct super_block *sb, unsigned long ino)
int block, off;
inode = iget_locked(sb, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/bio.c b/fs/bio.c
index 94bbc04dba7..b3b20ed9510 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -1045,12 +1045,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
int bio_uncopy_user(struct bio *bio)
{
struct bio_map_data *bmd = bio->bi_private;
- int ret = 0;
+ struct bio_vec *bvec;
+ int ret = 0, i;
- if (!bio_flagged(bio, BIO_NULL_MAPPED))
- ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
- bmd->nr_sgvecs, bio_data_dir(bio) == READ,
- 0, bmd->is_our_pages);
+ if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
+ /*
+ * if we're in a workqueue, the request is orphaned, so
+ * don't copy into a random user address space, just free.
+ */
+ if (current->mm)
+ ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs,
+ bmd->nr_sgvecs, bio_data_dir(bio) == READ,
+ 0, bmd->is_our_pages);
+ else if (bmd->is_our_pages)
+ bio_for_each_segment_all(bvec, bio, i)
+ __free_page(bvec->bv_page);
+ }
bio_free_map_data(bmd);
bio_put(bio);
return ret;
@@ -1946,7 +1956,7 @@ int bio_associate_current(struct bio *bio)
/* associate blkcg if exists */
rcu_read_lock();
- css = task_subsys_state(current, blkio_subsys_id);
+ css = task_css(current, blkio_subsys_id);
if (css && css_tryget(css))
bio->bi_css = css;
rcu_read_unlock();
diff --git a/fs/block_dev.c b/fs/block_dev.c
index c7bda5cd3da..1173a4ee083 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1519,7 +1519,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov,
blk_start_plug(&plug);
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
- if (ret > 0 || ret == -EIOCBQUEUED) {
+ if (ret > 0) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index eaf133384a8..8bc5e8ccb09 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
u64 extent_item_pos,
struct extent_inode_elem **eie)
{
- u64 data_offset;
- u64 data_len;
+ u64 offset = 0;
struct extent_inode_elem *e;
- data_offset = btrfs_file_extent_offset(eb, fi);
- data_len = btrfs_file_extent_num_bytes(eb, fi);
+ if (!btrfs_file_extent_compression(eb, fi) &&
+ !btrfs_file_extent_encryption(eb, fi) &&
+ !btrfs_file_extent_other_encoding(eb, fi)) {
+ u64 data_offset;
+ u64 data_len;
- if (extent_item_pos < data_offset ||
- extent_item_pos >= data_offset + data_len)
- return 1;
+ data_offset = btrfs_file_extent_offset(eb, fi);
+ data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+ if (extent_item_pos < data_offset ||
+ extent_item_pos >= data_offset + data_len)
+ return 1;
+ offset = extent_item_pos - data_offset;
+ }
e = kmalloc(sizeof(*e), GFP_NOFS);
if (!e)
@@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
e->next = *eie;
e->inum = key->objectid;
- e->offset = key->offset + (extent_item_pos - data_offset);
+ e->offset = key->offset + offset;
*eie = e;
return 0;
@@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
struct extent_buffer *eb;
struct btrfs_key key;
struct btrfs_file_extent_item *fi;
- struct extent_inode_elem *eie = NULL;
+ struct extent_inode_elem *eie = NULL, *old = NULL;
u64 disk_byte;
if (level != 0) {
@@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (disk_byte == wanted_disk_byte) {
eie = NULL;
+ old = NULL;
if (extent_item_pos) {
ret = check_extent_in_eb(&key, eb, fi,
*extent_item_pos,
@@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
if (ret < 0)
break;
}
- if (!ret) {
- ret = ulist_add(parents, eb->start,
- (uintptr_t)eie, GFP_NOFS);
- if (ret < 0)
- break;
- if (!extent_item_pos) {
- ret = btrfs_next_old_leaf(root, path,
- time_seq);
- continue;
- }
+ if (ret > 0)
+ goto next;
+ ret = ulist_add_merge(parents, eb->start,
+ (uintptr_t)eie,
+ (u64 *)&old, GFP_NOFS);
+ if (ret < 0)
+ break;
+ if (!ret && extent_item_pos) {
+ while (old->next)
+ old = old->next;
+ old->next = eie;
}
}
+next:
ret = btrfs_next_old_item(root, path, time_seq);
}
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 5bf4c39e2ad..ed504607d8e 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
BUG_ON(!eb_rewin);
}
- extent_buffer_get(eb_rewin);
btrfs_tree_read_unlock(eb);
free_extent_buffer(eb);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 4253ad580e3..5f8f3341c09 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -747,7 +747,7 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
WARN_ON(atomic_xchg(
&fs_info->mutually_exclusive_operation_running, 1));
task = kthread_run(btrfs_dev_replace_kthread, fs_info, "btrfs-devrepl");
- return PTR_RET(task);
+ return PTR_ERR_OR_ZERO(task);
}
static int btrfs_dev_replace_kthread(void *data)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 583d98bd065..fe443fece85 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
while (!end) {
- u64 offset_in_extent;
+ u64 offset_in_extent = 0;
/* break if the extent we found is outside the range */
if (em->start >= max || extent_map_end(em) < off)
@@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/*
* record the offset from the start of the extent
- * for adjusting the disk offset below
+ * for adjusting the disk offset below. Only do this if the
+ * extent isn't compressed since our in ram offset may be past
+ * what we have actually allocated on disk.
*/
- offset_in_extent = em_start - em->start;
+ if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
+ offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
emflags = em->flags;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a005fe2c072..4d2eb641714 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
if (no_splits)
goto next;
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- em->start < start) {
+ if (em->start < start) {
split->start = em->start;
split->len = start - em->start;
- split->orig_start = em->orig_start;
- split->block_start = em->block_start;
- if (compressed)
- split->block_len = em->block_len;
- else
- split->block_len = split->len;
- split->ram_bytes = em->ram_bytes;
- split->orig_block_len = max(split->block_len,
- em->orig_block_len);
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_start = em->orig_start;
+ split->block_start = em->block_start;
+
+ if (compressed)
+ split->block_len = em->block_len;
+ else
+ split->block_len = split->len;
+ split->orig_block_len = max(split->block_len,
+ em->orig_block_len);
+ split->ram_bytes = em->ram_bytes;
+ } else {
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
+ split->ram_bytes = split->len;
+ }
+
split->generation = gen;
split->bdev = em->bdev;
split->flags = flags;
@@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split = split2;
split2 = NULL;
}
- if (em->block_start < EXTENT_MAP_LAST_BYTE &&
- testend && em->start + em->len > start + len) {
+ if (testend && em->start + em->len > start + len) {
u64 diff = start + len - em->start;
split->start = start + len;
@@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
split->flags = flags;
split->compress_type = em->compress_type;
split->generation = gen;
- split->orig_block_len = max(em->block_len,
+
+ if (em->block_start < EXTENT_MAP_LAST_BYTE) {
+ split->orig_block_len = max(em->block_len,
em->orig_block_len);
- split->ram_bytes = em->ram_bytes;
- if (compressed) {
- split->block_len = em->block_len;
- split->block_start = em->block_start;
- split->orig_start = em->orig_start;
+ split->ram_bytes = em->ram_bytes;
+ if (compressed) {
+ split->block_len = em->block_len;
+ split->block_start = em->block_start;
+ split->orig_start = em->orig_start;
+ } else {
+ split->block_len = split->len;
+ split->block_start = em->block_start
+ + diff;
+ split->orig_start = em->orig_start;
+ }
} else {
- split->block_len = split->len;
- split->block_start = em->block_start + diff;
- split->orig_start = em->orig_start;
+ split->ram_bytes = split->len;
+ split->orig_start = split->start;
+ split->block_len = 0;
+ split->block_start = em->block_start;
+ split->orig_block_len = 0;
}
ret = add_extent_mapping(em_tree, split, modified);
@@ -1709,7 +1727,7 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
*/
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
BTRFS_I(inode)->last_sub_trans = root->log_transid;
- if (num_written > 0 || num_written == -EIOCBQUEUED) {
+ if (num_written > 0) {
err = generic_write_sync(file, pos, num_written);
if (err < 0 && num_written > 0)
num_written = err;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 6d1b93c8aaf..7bdc83d04d5 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr)
continue;
- extent_offset = btrfs_file_extent_offset(leaf, extent);
- if (key.offset - extent_offset != offset)
+ /*
+ * 'offset' refers to the exact key.offset,
+ * NOT the 'offset' field in btrfs_extent_data_ref, ie.
+ * (key.offset - extent_offset).
+ */
+ if (key.offset != offset)
continue;
+ extent_offset = btrfs_file_extent_offset(leaf, extent);
num_bytes = btrfs_file_extent_num_bytes(leaf, extent);
+
if (extent_offset >= old->extent_offset + old->offset +
old->len || extent_offset + num_bytes <=
old->extent_offset + old->offset)
continue;
+ ret = 0;
break;
}
@@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id,
backref->root_id = root_id;
backref->inum = inum;
- backref->file_pos = offset + extent_offset;
+ backref->file_pos = offset;
backref->num_bytes = num_bytes;
backref->extent_offset = extent_offset;
backref->generation = btrfs_file_extent_generation(leaf, extent);
@@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path,
new->path = path;
list_for_each_entry_safe(old, tmp, &new->head, list) {
- ret = iterate_inodes_from_logical(old->bytenr, fs_info,
+ ret = iterate_inodes_from_logical(old->bytenr +
+ old->extent_offset, fs_info,
path, record_one_backref,
old);
BUG_ON(ret < 0 && ret != -ENOENT);
@@ -3166,7 +3174,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
found_key.type = BTRFS_INODE_ITEM_KEY;
found_key.offset = 0;
inode = btrfs_iget(root->fs_info->sb, &found_key, root, NULL);
- ret = PTR_RET(inode);
+ ret = PTR_ERR_OR_ZERO(inode);
if (ret && ret != -ESTALE)
goto out;
@@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
int mask = attr->ia_valid;
int ret;
- if (newsize == oldsize)
- return 0;
-
/*
* The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
* special case where we need to update the times despite not having
@@ -5165,14 +5170,31 @@ next:
}
/* Reached end of directory/root. Bump pos past the last item. */
- if (key_type == BTRFS_DIR_INDEX_KEY)
- /*
- * 32-bit glibc will use getdents64, but then strtol -
- * so the last number we can serve is this.
- */
- ctx->pos = 0x7fffffff;
- else
- ctx->pos++;
+ ctx->pos++;
+
+ /*
+ * Stop new entries from being returned after we return the last
+ * entry.
+ *
+ * New directory entries are assigned a strictly increasing
+ * offset. This means that new entries created during readdir
+ * are *guaranteed* to be seen in the future by that readdir.
+ * This has broken buggy programs which operate on names as
+ * they're returned by readdir. Until we re-use freed offsets
+ * we have this hack to stop new entries from being returned
+ * under the assumption that they'll never reach this huge
+ * offset.
+ *
+ * This is being careful not to overflow 32bit loff_t unless the
+ * last entry requires it because doing so has broken 32bit apps
+ * in the past.
+ */
+ if (key_type == BTRFS_DIR_INDEX_KEY) {
+ if (ctx->pos >= INT_MAX)
+ ctx->pos = LLONG_MAX;
+ else
+ ctx->pos = INT_MAX;
+ }
nopos:
ret = 0;
err:
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index d58cce77fc6..af1931a5960 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
* a dirty root struct and adds it into the list of dead roots that need to
* be deleted
*/
-int btrfs_add_dead_root(struct btrfs_root *root)
+void btrfs_add_dead_root(struct btrfs_root *root)
{
spin_lock(&root->fs_info->trans_lock);
- list_add_tail(&root->root_list, &root->fs_info->dead_roots);
+ if (list_empty(&root->root_list))
+ list_add_tail(&root->root_list, &root->fs_info->dead_roots);
spin_unlock(&root->fs_info->trans_lock);
- return 0;
}
/*
@@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
}
root = list_first_entry(&fs_info->dead_roots,
struct btrfs_root, root_list);
- list_del(&root->root_list);
+ list_del_init(&root->root_list);
spin_unlock(&fs_info->trans_lock);
pr_debug("btrfs: cleaner removing %llu\n",
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 005b0375d18..defbc426989 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
-int btrfs_add_dead_root(struct btrfs_root *root);
+void btrfs_add_dead_root(struct btrfs_root *root);
int btrfs_defrag_root(struct btrfs_root *root);
int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root);
int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 2c679149363..ff60d8978ae 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3746,8 +3746,9 @@ next_slot:
}
log_extents:
+ btrfs_release_path(path);
+ btrfs_release_path(dst_path);
if (fast_search) {
- btrfs_release_path(dst_path);
ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
if (ret) {
err = ret;
@@ -3764,8 +3765,6 @@ log_extents:
}
if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
- btrfs_release_path(path);
- btrfs_release_path(dst_path);
ret = log_directory_changes(trans, root, inode, path, dst_path);
if (ret) {
err = ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 78b871753cb..67a08538184 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3302,7 +3302,7 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
}
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
- return PTR_RET(tsk);
+ return PTR_ERR_OR_ZERO(tsk);
}
int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 45e57cc3820..fc6f4f3a1a9 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(server->secmech.md5)) {
cifs_dbg(VFS, "could not allocate crypto md5\n");
- return PTR_ERR(server->secmech.md5);
+ rc = PTR_ERR(server->secmech.md5);
+ server->secmech.md5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
crypto_shash_descsize(server->secmech.md5);
server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
if (!server->secmech.sdescmd5) {
- rc = -ENOMEM;
crypto_free_shash(server->secmech.md5);
server->secmech.md5 = NULL;
- return rc;
+ return -ENOMEM;
}
server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
server->secmech.sdescmd5->shash.flags = 0x0;
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
if (blobptr + attrsize > blobend)
break;
if (type == NTLMSSP_AV_NB_DOMAIN_NAME) {
- if (!attrsize)
+ if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN)
break;
if (!ses->domainName) {
ses->domainName =
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
/* check if already allocated */
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
if (IS_ERR(server->secmech.hmacmd5)) {
cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
- return PTR_ERR(server->secmech.hmacmd5);
+ rc = PTR_ERR(server->secmech.hmacmd5);
+ server->secmech.hmacmd5 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 4bdd547dbf6..85ea98d139f 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb)
goto out_no_root;
}
+ if (cifs_sb_master_tcon(cifs_sb)->nocase)
+ sb->s_d_op = &cifs_ci_dentry_ops;
+ else
+ sb->s_d_op = &cifs_dentry_ops;
+
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
rc = -ENOMEM;
goto out_no_root;
}
- /* do that *after* d_make_root() - we want NULL ->d_op for root here */
- if (cifs_sb_master_tcon(cifs_sb)->nocase)
- sb->s_d_op = &cifs_ci_dentry_ops;
- else
- sb->s_d_op = &cifs_dentry_ops;
-
#ifdef CONFIG_CIFS_NFSD_EXPORT
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cifs_dbg(FYI, "export ops supported\n");
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1fdc3704105..52ca861ed35 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -44,6 +44,7 @@
#define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1)
#define MAX_SERVER_SIZE 15
#define MAX_SHARE_SIZE 80
+#define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */
#define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */
#define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */
@@ -369,6 +370,9 @@ struct smb_version_operations {
void (*generate_signingkey)(struct TCP_Server_Info *server);
int (*calc_signature)(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
+ int (*query_mf_symlink)(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
};
struct smb_version_values {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index f7e584d047e..b29a012bed3 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
-
+int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid);
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index fa68813396b..d67c550c498 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
if (string == NULL)
goto out_nomem;
- if (strnlen(string, 256) == 256) {
+ if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN)
+ == CIFS_MAX_DOMAINNAME_LEN) {
printk(KERN_WARNING "CIFS: domain name too"
" long\n");
goto cifs_parse_mount_err;
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses)
#ifdef CONFIG_KEYS
-/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
-#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+/* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1)
/* Populate username and pw fields from keyring if possible */
static int
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1e57f36ea1b..9d0dd952ad7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
oflags, &oplock, &cfile->fid.netfid, xid);
if (rc == 0) {
cifs_dbg(FYI, "posix reopen succeeded\n");
+ oparms.reconnect = true;
goto reopen_success;
}
/*
@@ -2552,7 +2553,7 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
mutex_unlock(&inode->i_mutex);
}
- if (rc > 0 || rc == -EIOCBQUEUED) {
+ if (rc > 0) {
ssize_t err;
err = generic_write_sync(file, pos, rc);
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index b83c3f5646b..562044f700e 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr)
}
int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid)
+open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
+ unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb,
+ unsigned int xid)
{
int rc;
int oplock = 0;
__u16 netfid = 0;
struct tcon_link *tlink;
- struct cifs_tcon *pTcon;
+ struct cifs_tcon *ptcon;
struct cifs_io_parms io_parms;
- u8 *buf;
- char *pbuf;
- unsigned int bytes_read = 0;
int buf_type = CIFS_NO_BUFFER;
- unsigned int link_len = 0;
FILE_ALL_INFO file_info;
- if (!CIFSCouldBeMFSymlink(fattr))
- /* it's not a symlink */
- return 0;
-
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
- pTcon = tlink_tcon(tlink);
+ ptcon = tlink_tcon(tlink);
- rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ,
+ rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ,
CREATE_NOT_DIR, &netfid, &oplock, &file_info,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc != 0)
- goto out;
+ if (rc != 0) {
+ cifs_put_tlink(tlink);
+ return rc;
+ }
if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) {
- CIFSSMBClose(xid, pTcon, netfid);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
/* it's not a symlink */
- goto out;
+ return rc;
}
- buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
- pbuf = buf;
io_parms.netfid = netfid;
io_parms.pid = current->tgid;
- io_parms.tcon = pTcon;
+ io_parms.tcon = ptcon;
io_parms.offset = 0;
io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE;
- rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type);
- CIFSSMBClose(xid, pTcon, netfid);
- if (rc != 0) {
- kfree(buf);
+ rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type);
+ CIFSSMBClose(xid, ptcon, netfid);
+ cifs_put_tlink(tlink);
+ return rc;
+}
+
+
+int
+CIFSCheckMFSymlink(struct cifs_fattr *fattr,
+ const unsigned char *path,
+ struct cifs_sb_info *cifs_sb, unsigned int xid)
+{
+ int rc = 0;
+ u8 *buf = NULL;
+ unsigned int link_len = 0;
+ unsigned int bytes_read = 0;
+ struct cifs_tcon *ptcon;
+
+ if (!CIFSCouldBeMFSymlink(fattr))
+ /* it's not a symlink */
+ return 0;
+
+ buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
+ if (!buf) {
+ rc = -ENOMEM;
goto out;
}
+ ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
+ if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
+ rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
+ &bytes_read, cifs_sb, xid);
+ else
+ goto out;
+
+ if (rc != 0)
+ goto out;
+
+ if (bytes_read == 0) /* not a symlink */
+ goto out;
+
rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL);
- kfree(buf);
if (rc == -EINVAL) {
/* it's not a symlink */
rc = 0;
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr,
fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO;
fattr->cf_dtype = DT_LNK;
out:
- cifs_put_tlink(tlink);
+ kfree(buf);
return rc;
}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ab877846939..69d2c826a23 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
return;
}
+ /*
+ * If we know that the inode will need to be revalidated immediately,
+ * then don't create a new dentry for it. We'll end up doing an on
+ * the wire call either way and this spares us an invalidation.
+ */
+ if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL)
+ return;
+
dentry = d_alloc(parent, name);
if (!dentry)
return;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 79358e341fd..08dd37bb23a 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
bytes_ret = 0;
} else
bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
- 256, nls_cp);
+ CIFS_MAX_DOMAINNAME_LEN, nls_cp);
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null terminator */
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, 256);
- bcc_ptr += strnlen(ses->domainName, 256);
+ strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 6457690731a..60943978aec 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = {
.mand_lock = cifs_mand_lock,
.mand_unlock_range = cifs_unlock_range,
.push_mand_locks = cifs_push_mandatory_locks,
+ .query_mf_symlink = open_query_close_cifs_symlink,
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 301b191270b..4f2300d020c 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -42,6 +42,7 @@
static int
smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
{
+ int rc;
unsigned int size;
if (server->secmech.sdeschmacsha256 != NULL)
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
if (IS_ERR(server->secmech.hmacsha256)) {
cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
- return PTR_ERR(server->secmech.hmacsha256);
+ rc = PTR_ERR(server->secmech.hmacsha256);
+ server->secmech.hmacsha256 = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
server->secmech.sdeschmacsha256 = NULL;
crypto_free_shash(server->secmech.hmacsha256);
server->secmech.hmacsha256 = NULL;
- return PTR_ERR(server->secmech.cmacaes);
+ rc = PTR_ERR(server->secmech.cmacaes);
+ server->secmech.cmacaes = NULL;
+ return rc;
}
size = sizeof(struct shash_desc) +
diff --git a/fs/dcache.c b/fs/dcache.c
index 87bdb5329c3..5aa53bc056b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -229,7 +229,7 @@ static void __d_free(struct rcu_head *head)
*/
static void d_free(struct dentry *dentry)
{
- BUG_ON(dentry->d_count);
+ BUG_ON(dentry->d_lockref.count);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -467,12 +467,12 @@ relock:
}
if (ref)
- dentry->d_count--;
+ dentry->d_lockref.count--;
/*
* inform the fs via d_prune that this dentry is about to be
* unhashed and destroyed.
*/
- if (dentry->d_flags & DCACHE_OP_PRUNE)
+ if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
dentry->d_op->d_prune(dentry);
dentry_lru_del(dentry);
@@ -513,15 +513,10 @@ void dput(struct dentry *dentry)
return;
repeat:
- if (dentry->d_count == 1)
+ if (dentry->d_lockref.count == 1)
might_sleep();
- spin_lock(&dentry->d_lock);
- BUG_ON(!dentry->d_count);
- if (dentry->d_count > 1) {
- dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ if (lockref_put_or_lock(&dentry->d_lockref))
return;
- }
if (dentry->d_flags & DCACHE_OP_DELETE) {
if (dentry->d_op->d_delete(dentry))
@@ -535,7 +530,7 @@ repeat:
dentry->d_flags |= DCACHE_REFERENCED;
dentry_lru_add(dentry);
- dentry->d_count--;
+ dentry->d_lockref.count--;
spin_unlock(&dentry->d_lock);
return;
@@ -590,7 +585,7 @@ int d_invalidate(struct dentry * dentry)
* We also need to leave mountpoints alone,
* directory or not.
*/
- if (dentry->d_count > 1 && dentry->d_inode) {
+ if (dentry->d_lockref.count > 1 && dentry->d_inode) {
if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
spin_unlock(&dentry->d_lock);
return -EBUSY;
@@ -606,20 +601,33 @@ EXPORT_SYMBOL(d_invalidate);
/* This must be called with d_lock held */
static inline void __dget_dlock(struct dentry *dentry)
{
- dentry->d_count++;
+ dentry->d_lockref.count++;
}
static inline void __dget(struct dentry *dentry)
{
- spin_lock(&dentry->d_lock);
- __dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
+ lockref_get(&dentry->d_lockref);
}
struct dentry *dget_parent(struct dentry *dentry)
{
+ int gotref;
struct dentry *ret;
+ /*
+ * Do optimistic parent lookup without any
+ * locking.
+ */
+ rcu_read_lock();
+ ret = ACCESS_ONCE(dentry->d_parent);
+ gotref = lockref_get_not_zero(&ret->d_lockref);
+ rcu_read_unlock();
+ if (likely(gotref)) {
+ if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
+ return ret;
+ dput(ret);
+ }
+
repeat:
/*
* Don't need rcu_dereference because we re-check it was correct under
@@ -634,8 +642,8 @@ repeat:
goto repeat;
}
rcu_read_unlock();
- BUG_ON(!ret->d_count);
- ret->d_count++;
+ BUG_ON(!ret->d_lockref.count);
+ ret->d_lockref.count++;
spin_unlock(&ret->d_lock);
return ret;
}
@@ -718,7 +726,15 @@ restart:
spin_lock(&inode->i_lock);
hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
- if (!dentry->d_count) {
+ if (!dentry->d_lockref.count) {
+ /*
+ * inform the fs via d_prune that this dentry
+ * is about to be unhashed and destroyed.
+ */
+ if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
+ !d_unhashed(dentry))
+ dentry->d_op->d_prune(dentry);
+
__dget_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -763,12 +779,8 @@ static void try_prune_one_dentry(struct dentry *dentry)
/* Prune ancestors. */
dentry = parent;
while (dentry) {
- spin_lock(&dentry->d_lock);
- if (dentry->d_count > 1) {
- dentry->d_count--;
- spin_unlock(&dentry->d_lock);
+ if (lockref_put_or_lock(&dentry->d_lockref))
return;
- }
dentry = dentry_kill(dentry, 1);
}
}
@@ -793,7 +805,7 @@ static void shrink_dentry_list(struct list_head *list)
* the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list.
*/
- if (dentry->d_count) {
+ if (dentry->d_lockref.count) {
dentry_lru_del(dentry);
spin_unlock(&dentry->d_lock);
continue;
@@ -907,13 +919,14 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
* inform the fs that this dentry is about to be
* unhashed and destroyed.
*/
- if (dentry->d_flags & DCACHE_OP_PRUNE)
+ if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
+ !d_unhashed(dentry))
dentry->d_op->d_prune(dentry);
dentry_lru_del(dentry);
__d_shrink(dentry);
- if (dentry->d_count != 0) {
+ if (dentry->d_lockref.count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
" still in use (%d)"
@@ -922,7 +935,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
- dentry->d_count,
+ dentry->d_lockref.count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
@@ -933,7 +946,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
list_del(&dentry->d_u.d_child);
} else {
parent = dentry->d_parent;
- parent->d_count--;
+ parent->d_lockref.count--;
list_del(&dentry->d_u.d_child);
}
@@ -981,7 +994,7 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root;
sb->s_root = NULL;
- dentry->d_count--;
+ dentry->d_lockref.count--;
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_bl_empty(&sb->s_anon)) {
@@ -1147,7 +1160,7 @@ resume:
* loop in shrink_dcache_parent() might not make any progress
* and loop forever.
*/
- if (dentry->d_count) {
+ if (dentry->d_lockref.count) {
dentry_lru_del(dentry);
} else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
dentry_lru_move_list(dentry, dispose);
@@ -1269,7 +1282,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
smp_wmb();
dentry->d_name.name = dname;
- dentry->d_count = 1;
+ dentry->d_lockref.count = 1;
dentry->d_flags = 0;
spin_lock_init(&dentry->d_lock);
seqcount_init(&dentry->d_seq);
@@ -1782,7 +1795,7 @@ static noinline enum slow_d_compare slow_dentry_cmp(
* without taking d_lock and checking d_seq sequence count against @seq
* returned here.
*
- * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
+ * A refcount may be taken on the found dentry with the d_rcu_to_refcount
* function.
*
* Alternatively, __d_lookup_rcu may be called again to look up the child of
@@ -1970,7 +1983,7 @@ struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
goto next;
}
- dentry->d_count++;
+ dentry->d_lockref.count++;
found = dentry;
spin_unlock(&dentry->d_lock);
break;
@@ -2069,7 +2082,7 @@ again:
spin_lock(&dentry->d_lock);
inode = dentry->d_inode;
isdir = S_ISDIR(inode->i_mode);
- if (dentry->d_count == 1) {
+ if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
@@ -2724,6 +2737,17 @@ char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
return memcpy(buffer, temp, sz);
}
+char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ char *end = buffer + buflen;
+ /* these dentries are never renamed, so d_lock is not needed */
+ if (prepend(&end, &buflen, " (deleted)", 11) ||
+ prepend_name(&end, &buflen, &dentry->d_name) ||
+ prepend(&end, &buflen, "/", 1))
+ end = ERR_PTR(-ENAMETOOLONG);
+ return end;
+}
+
/*
* Write full pathname from the root of the filesystem into the buffer.
*/
@@ -2937,7 +2961,7 @@ resume:
}
if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
dentry->d_flags |= DCACHE_GENOCIDE;
- dentry->d_count--;
+ dentry->d_lockref.count--;
}
spin_unlock(&dentry->d_lock);
}
@@ -2945,7 +2969,7 @@ resume:
struct dentry *child = this_parent;
if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
this_parent->d_flags |= DCACHE_GENOCIDE;
- this_parent->d_count--;
+ this_parent->d_lockref.count--;
}
this_parent = try_to_ascend(this_parent, locked, seq);
if (!this_parent)
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 4888cb3fdef..c7c83ff0f75 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove);
*/
void debugfs_remove_recursive(struct dentry *dentry)
{
- struct dentry *child;
- struct dentry *parent;
+ struct dentry *child, *next, *parent;
if (IS_ERR_OR_NULL(dentry))
return;
@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry)
return;
parent = dentry;
+ down:
mutex_lock(&parent->d_inode->i_mutex);
+ list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) {
+ if (!debugfs_positive(child))
+ continue;
- while (1) {
- /*
- * When all dentries under "parent" has been removed,
- * walk up the tree until we reach our starting point.
- */
- if (list_empty(&parent->d_subdirs)) {
- mutex_unlock(&parent->d_inode->i_mutex);
- if (parent == dentry)
- break;
- parent = parent->d_parent;
- mutex_lock(&parent->d_inode->i_mutex);
- }
- child = list_entry(parent->d_subdirs.next, struct dentry,
- d_u.d_child);
- next_sibling:
-
- /*
- * If "child" isn't empty, walk down the tree and
- * remove all its descendants first.
- */
+ /* perhaps simple_empty(child) makes more sense */
if (!list_empty(&child->d_subdirs)) {
mutex_unlock(&parent->d_inode->i_mutex);
parent = child;
- mutex_lock(&parent->d_inode->i_mutex);
- continue;
+ goto down;
}
- __debugfs_remove(child, parent);
- if (parent->d_subdirs.next == &child->d_u.d_child) {
- /*
- * Try the next sibling.
- */
- if (child->d_u.d_child.next != &parent->d_subdirs) {
- child = list_entry(child->d_u.d_child.next,
- struct dentry,
- d_u.d_child);
- goto next_sibling;
- }
-
- /*
- * Avoid infinite loop if we fail to remove
- * one dentry.
- */
- mutex_unlock(&parent->d_inode->i_mutex);
- break;
- }
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
+ up:
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
- parent = dentry->d_parent;
+ mutex_unlock(&parent->d_inode->i_mutex);
+ child = parent;
+ parent = parent->d_parent;
mutex_lock(&parent->d_inode->i_mutex);
- __debugfs_remove(dentry, parent);
+
+ if (child != dentry) {
+ next = list_entry(child->d_u.d_child.next, struct dentry,
+ d_u.d_child);
+ goto up;
+ }
+
+ if (!__debugfs_remove(child, parent))
+ simple_release_fs(&debugfs_mount, &debugfs_mount_count);
mutex_unlock(&parent->d_inode->i_mutex);
- simple_release_fs(&debugfs_mount, &debugfs_mount_count);
}
EXPORT_SYMBOL_GPL(debugfs_remove_recursive);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 7ab90f5081e..1782023bd68 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -127,6 +127,7 @@ struct dio {
spinlock_t bio_lock; /* protects BIO fields below */
int page_errors; /* errno from get_user_pages() */
int is_async; /* is IO async ? */
+ bool defer_completion; /* defer AIO completion to workqueue? */
int io_error; /* IO error in completion path */
unsigned long refcount; /* direct_io_worker() and bios */
struct bio *bio_list; /* singly linked via bi_private */
@@ -141,7 +142,10 @@ struct dio {
* allocation time. Don't add new fields after pages[] unless you
* wish that they not be zeroed.
*/
- struct page *pages[DIO_PAGES]; /* page buffer */
+ union {
+ struct page *pages[DIO_PAGES]; /* page buffer */
+ struct work_struct complete_work;/* deferred AIO completion */
+ };
} ____cacheline_aligned_in_smp;
static struct kmem_cache *dio_cache __read_mostly;
@@ -221,16 +225,16 @@ static inline struct page *dio_get_page(struct dio *dio,
* dio_complete() - called when all DIO BIO I/O has been completed
* @offset: the byte offset in the file of the completed operation
*
- * This releases locks as dictated by the locking type, lets interested parties
- * know that a DIO operation has completed, and calculates the resulting return
- * code for the operation.
+ * This drops i_dio_count, lets interested parties know that a DIO operation
+ * has completed, and calculates the resulting return code for the operation.
*
* It lets the filesystem know if it registered an interest earlier via
* get_block. Pass the private field of the map buffer_head so that
* filesystems can use it to hold additional state between get_block calls and
* dio_complete.
*/
-static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is_async)
+static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret,
+ bool is_async)
{
ssize_t transferred = 0;
@@ -258,19 +262,36 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, bool is
if (ret == 0)
ret = transferred;
- if (dio->end_io && dio->result) {
- dio->end_io(dio->iocb, offset, transferred,
- dio->private, ret, is_async);
- } else {
- inode_dio_done(dio->inode);
- if (is_async)
- aio_complete(dio->iocb, ret, 0);
+ if (dio->end_io && dio->result)
+ dio->end_io(dio->iocb, offset, transferred, dio->private);
+
+ inode_dio_done(dio->inode);
+ if (is_async) {
+ if (dio->rw & WRITE) {
+ int err;
+
+ err = generic_write_sync(dio->iocb->ki_filp, offset,
+ transferred);
+ if (err < 0 && ret > 0)
+ ret = err;
+ }
+
+ aio_complete(dio->iocb, ret, 0);
}
+ kmem_cache_free(dio_cache, dio);
return ret;
}
+static void dio_aio_complete_work(struct work_struct *work)
+{
+ struct dio *dio = container_of(work, struct dio, complete_work);
+
+ dio_complete(dio, dio->iocb->ki_pos, 0, true);
+}
+
static int dio_bio_complete(struct dio *dio, struct bio *bio);
+
/*
* Asynchronous IO callback.
*/
@@ -290,8 +311,13 @@ static void dio_bio_end_aio(struct bio *bio, int error)
spin_unlock_irqrestore(&dio->bio_lock, flags);
if (remaining == 0) {
- dio_complete(dio, dio->iocb->ki_pos, 0, true);
- kmem_cache_free(dio_cache, dio);
+ if (dio->result && dio->defer_completion) {
+ INIT_WORK(&dio->complete_work, dio_aio_complete_work);
+ queue_work(dio->inode->i_sb->s_dio_done_wq,
+ &dio->complete_work);
+ } else {
+ dio_complete(dio, dio->iocb->ki_pos, 0, true);
+ }
}
}
@@ -511,6 +537,41 @@ static inline int dio_bio_reap(struct dio *dio, struct dio_submit *sdio)
}
/*
+ * Create workqueue for deferred direct IO completions. We allocate the
+ * workqueue when it's first needed. This avoids creating workqueue for
+ * filesystems that don't need it and also allows us to create the workqueue
+ * late enough so the we can include s_id in the name of the workqueue.
+ */
+static int sb_init_dio_done_wq(struct super_block *sb)
+{
+ struct workqueue_struct *wq = alloc_workqueue("dio/%s",
+ WQ_MEM_RECLAIM, 0,
+ sb->s_id);
+ if (!wq)
+ return -ENOMEM;
+ /*
+ * This has to be atomic as more DIOs can race to create the workqueue
+ */
+ cmpxchg(&sb->s_dio_done_wq, NULL, wq);
+ /* Someone created workqueue before us? Free ours... */
+ if (wq != sb->s_dio_done_wq)
+ destroy_workqueue(wq);
+ return 0;
+}
+
+static int dio_set_defer_completion(struct dio *dio)
+{
+ struct super_block *sb = dio->inode->i_sb;
+
+ if (dio->defer_completion)
+ return 0;
+ dio->defer_completion = true;
+ if (!sb->s_dio_done_wq)
+ return sb_init_dio_done_wq(sb);
+ return 0;
+}
+
+/*
* Call into the fs to map some more disk blocks. We record the current number
* of available blocks at sdio->blocks_available. These are in units of the
* fs blocksize, (1 << inode->i_blkbits).
@@ -581,6 +642,9 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
/* Store for completion */
dio->private = map_bh->b_private;
+
+ if (ret == 0 && buffer_defer_completion(map_bh))
+ ret = dio_set_defer_completion(dio);
}
return ret;
}
@@ -1129,11 +1193,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
}
/*
- * Will be decremented at I/O completion time.
- */
- atomic_inc(&inode->i_dio_count);
-
- /*
* For file extending writes updating i_size before data
* writeouts complete can expose uninitialized blocks. So
* even for AIO, we need to wait for i/o to complete before
@@ -1141,11 +1200,33 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
*/
dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
(end > i_size_read(inode)));
-
- retval = 0;
-
dio->inode = inode;
dio->rw = rw;
+
+ /*
+ * For AIO O_(D)SYNC writes we need to defer completions to a workqueue
+ * so that we can call ->fsync.
+ */
+ if (dio->is_async && (rw & WRITE) &&
+ ((iocb->ki_filp->f_flags & O_DSYNC) ||
+ IS_SYNC(iocb->ki_filp->f_mapping->host))) {
+ retval = dio_set_defer_completion(dio);
+ if (retval) {
+ /*
+ * We grab i_mutex only for reads so we don't have
+ * to release it here
+ */
+ kmem_cache_free(dio_cache, dio);
+ goto out;
+ }
+ }
+
+ /*
+ * Will be decremented at I/O completion time.
+ */
+ atomic_inc(&inode->i_dio_count);
+
+ retval = 0;
sdio.blkbits = blkbits;
sdio.blkfactor = i_blkbits - blkbits;
sdio.block_in_file = offset >> blkbits;
@@ -1269,7 +1350,6 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
if (drop_refcount(dio) == 0) {
retval = dio_complete(dio, offset, retval, false);
- kmem_cache_free(dio_cache, dio);
} else
BUG_ON(retval != -EIOCBQUEUED);
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 27a6ba9aaee..0e90f0c91b9 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -267,10 +267,7 @@ void dlm_callback_work(struct work_struct *work)
int dlm_callback_start(struct dlm_ls *ls)
{
ls->ls_callback_wq = alloc_workqueue("dlm_callback",
- WQ_UNBOUND |
- WQ_MEM_RECLAIM |
- WQ_NON_REENTRANT,
- 0);
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
if (!ls->ls_callback_wq) {
log_print("can't start dlm_callback workqueue");
return -ENOMEM;
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 911649a47dd..142e21655ee 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -493,7 +493,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
{
struct dlm_user_proc *proc = file->private_data;
struct dlm_write_request *kbuf;
- sigset_t tmpsig, allsigs;
int error;
#ifdef CONFIG_COMPAT
@@ -557,9 +556,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
goto out_free;
}
- sigfillset(&allsigs);
- sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
-
error = -EINVAL;
switch (kbuf->cmd)
@@ -567,7 +563,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_LOCK:
if (!proc) {
log_print("no locking on control device");
- goto out_sig;
+ goto out_free;
}
error = device_user_lock(proc, &kbuf->i.lock);
break;
@@ -575,7 +571,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_UNLOCK:
if (!proc) {
log_print("no locking on control device");
- goto out_sig;
+ goto out_free;
}
error = device_user_unlock(proc, &kbuf->i.lock);
break;
@@ -583,7 +579,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_DEADLOCK:
if (!proc) {
log_print("no locking on control device");
- goto out_sig;
+ goto out_free;
}
error = device_user_deadlock(proc, &kbuf->i.lock);
break;
@@ -591,7 +587,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_CREATE_LOCKSPACE:
if (proc) {
log_print("create/remove only on control device");
- goto out_sig;
+ goto out_free;
}
error = device_create_lockspace(&kbuf->i.lspace);
break;
@@ -599,7 +595,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_REMOVE_LOCKSPACE:
if (proc) {
log_print("create/remove only on control device");
- goto out_sig;
+ goto out_free;
}
error = device_remove_lockspace(&kbuf->i.lspace);
break;
@@ -607,7 +603,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
case DLM_USER_PURGE:
if (!proc) {
log_print("no locking on control device");
- goto out_sig;
+ goto out_free;
}
error = device_user_purge(proc, &kbuf->i.purge);
break;
@@ -617,8 +613,6 @@ static ssize_t device_write(struct file *file, const char __user *buf,
kbuf->cmd);
}
- out_sig:
- sigprocmask(SIG_SETMASK, &tmpsig, NULL);
out_free:
kfree(kbuf);
return error;
@@ -659,15 +653,11 @@ static int device_close(struct inode *inode, struct file *file)
{
struct dlm_user_proc *proc = file->private_data;
struct dlm_ls *ls;
- sigset_t tmpsig, allsigs;
ls = dlm_find_lockspace_local(proc->lockspace);
if (!ls)
return -ENOENT;
- sigfillset(&allsigs);
- sigprocmask(SIG_BLOCK, &allsigs, &tmpsig);
-
set_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags);
dlm_clear_proc_locks(ls, proc);
@@ -685,9 +675,6 @@ static int device_close(struct inode *inode, struct file *file)
/* FIXME: AUTOFREE: if this ls is no longer used do
device_remove_lockspace() */
- sigprocmask(SIG_SETMASK, &tmpsig, NULL);
- recalc_sigpending();
-
return 0;
}
diff --git a/fs/efs/inode.c b/fs/efs/inode.c
index f3913eb2c47..d15ccf20f1b 100644
--- a/fs/efs/inode.c
+++ b/fs/efs/inode.c
@@ -57,7 +57,7 @@ struct inode *efs_iget(struct super_block *super, unsigned long ino)
struct inode *inode;
inode = iget_locked(super, ino);
- if (IS_ERR(inode))
+ if (!inode)
return ERR_PTR(-ENOMEM);
if (!(inode->i_state & I_NEW))
return inode;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 9ad17b15b45..293f86741dd 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1792,7 +1792,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
{
int error;
int did_lock_epmutex = 0;
- struct file *file, *tfile;
+ struct fd f, tf;
struct eventpoll *ep;
struct epitem *epi;
struct epoll_event epds;
@@ -1802,20 +1802,19 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
copy_from_user(&epds, event, sizeof(struct epoll_event)))
goto error_return;
- /* Get the "struct file *" for the eventpoll file */
error = -EBADF;
- file = fget(epfd);
- if (!file)
+ f = fdget(epfd);
+ if (!f.file)
goto error_return;
/* Get the "struct file *" for the target file */
- tfile = fget(fd);
- if (!tfile)
+ tf = fdget(fd);
+ if (!tf.file)
goto error_fput;
/* The target file descriptor must support poll */
error = -EPERM;
- if (!tfile->f_op || !tfile->f_op->poll)
+ if (!tf.file->f_op || !tf.file->f_op->poll)
goto error_tgt_fput;
/* Check if EPOLLWAKEUP is allowed */
@@ -1828,14 +1827,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
* adding an epoll file descriptor inside itself.
*/
error = -EINVAL;
- if (file == tfile || !is_file_epoll(file))
+ if (f.file == tf.file || !is_file_epoll(f.file))
goto error_tgt_fput;
/*
* At this point it is safe to assume that the "private_data" contains
* our own data structure.
*/
- ep = file->private_data;
+ ep = f.file->private_data;
/*
* When we insert an epoll file descriptor, inside another epoll file
@@ -1854,14 +1853,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
did_lock_epmutex = 1;
}
if (op == EPOLL_CTL_ADD) {
- if (is_file_epoll(tfile)) {
+ if (is_file_epoll(tf.file)) {
error = -ELOOP;
- if (ep_loop_check(ep, tfile) != 0) {
+ if (ep_loop_check(ep, tf.file) != 0) {
clear_tfile_check_list();
goto error_tgt_fput;
}
} else
- list_add(&tfile->f_tfile_llink, &tfile_check_list);
+ list_add(&tf.file->f_tfile_llink, &tfile_check_list);
}
mutex_lock_nested(&ep->mtx, 0);
@@ -1871,14 +1870,14 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
* above, we can be sure to be able to use the item looked up by
* ep_find() till we release the mutex.
*/
- epi = ep_find(ep, tfile, fd);
+ epi = ep_find(ep, tf.file, fd);
error = -EINVAL;
switch (op) {
case EPOLL_CTL_ADD:
if (!epi) {
epds.events |= POLLERR | POLLHUP;
- error = ep_insert(ep, &epds, tfile, fd);
+ error = ep_insert(ep, &epds, tf.file, fd);
} else
error = -EEXIST;
clear_tfile_check_list();
@@ -1903,9 +1902,9 @@ error_tgt_fput:
if (did_lock_epmutex)
mutex_unlock(&epmutex);
- fput(tfile);
+ fdput(tf);
error_fput:
- fput(file);
+ fdput(f);
error_return:
return error;
diff --git a/fs/exec.c b/fs/exec.c
index 9c73def8764..fd774c7cb48 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
return -ENOMEM;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, old_start, old_end);
if (new_end > old_start) {
/*
* when the old and new regions overlap clear from new_end.
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
}
- tlb_finish_mmu(&tlb, new_end, old_end);
+ tlb_finish_mmu(&tlb, old_start, old_end);
/*
* Shrink the vma to just the new range. Always succeeds.
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index f522425aaa2..bafdd48eefd 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -41,7 +41,7 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
/**
* Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which chould potentially get coverted to use htree
+ * (or a directory which could potentially get converted to use htree
* indexing).
*
* Return 1 if it is a dx dir, 0 if not
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index c47f1475072..c50c7619037 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -27,6 +27,7 @@
#include <linux/seq_file.h>
#include <linux/log2.h>
#include <linux/cleancache.h>
+#include <linux/namei.h>
#include <asm/uaccess.h>
@@ -819,6 +820,7 @@ enum {
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+ Opt_journal_path,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -860,6 +862,7 @@ static const match_table_t tokens = {
{Opt_journal_update, "journal=update"},
{Opt_journal_inum, "journal=%u"},
{Opt_journal_dev, "journal_dev=%u"},
+ {Opt_journal_path, "journal_path=%s"},
{Opt_abort, "abort"},
{Opt_data_journal, "data=journal"},
{Opt_data_ordered, "data=ordered"},
@@ -975,6 +978,11 @@ static int parse_options (char *options, struct super_block *sb,
int option;
kuid_t uid;
kgid_t gid;
+ char *journal_path;
+ struct inode *journal_inode;
+ struct path path;
+ int error;
+
#ifdef CONFIG_QUOTA
int qfmt;
#endif
@@ -1129,6 +1137,41 @@ static int parse_options (char *options, struct super_block *sb,
return 0;
*journal_devnum = option;
break;
+ case Opt_journal_path:
+ if (is_remount) {
+ ext3_msg(sb, KERN_ERR, "error: cannot specify "
+ "journal on remount");
+ return 0;
+ }
+
+ journal_path = match_strdup(&args[0]);
+ if (!journal_path) {
+ ext3_msg(sb, KERN_ERR, "error: could not dup "
+ "journal device string");
+ return 0;
+ }
+
+ error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+ if (error) {
+ ext3_msg(sb, KERN_ERR, "error: could not find "
+ "journal device path: error %d", error);
+ kfree(journal_path);
+ return 0;
+ }
+
+ journal_inode = path.dentry->d_inode;
+ if (!S_ISBLK(journal_inode->i_mode)) {
+ ext3_msg(sb, KERN_ERR, "error: journal path %s "
+ "is not a block device", journal_path);
+ path_put(&path);
+ kfree(journal_path);
+ return 0;
+ }
+
+ *journal_devnum = new_encode_dev(journal_inode->i_rdev);
+ path_put(&path);
+ kfree(journal_path);
+ break;
case Opt_noload:
set_opt (sbi->s_mount_opt, NOLOAD);
break;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ddd715e42a5..dc5d572ebd6 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -184,6 +184,7 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t start, tmp;
int flex_bg = 0;
+ struct ext4_group_info *grp;
J_ASSERT_BH(bh, buffer_locked(bh));
@@ -191,11 +192,9 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
ext4_error(sb, "Checksum bad for group %u", block_group);
- ext4_free_group_clusters_set(sb, gdp, 0);
- ext4_free_inodes_set(sb, gdp, 0);
- ext4_itable_unused_set(sb, gdp, 0);
- memset(bh->b_data, 0xff, sb->s_blocksize);
- ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
+ grp = ext4_get_group_info(sb, block_group);
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
return;
}
memset(bh->b_data, 0, sb->s_blocksize);
@@ -305,7 +304,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
*/
static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_group_desc *desc,
- unsigned int block_group,
+ ext4_group_t block_group,
struct buffer_head *bh)
{
ext4_grpblk_t offset;
@@ -352,10 +351,11 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
void ext4_validate_block_bitmap(struct super_block *sb,
struct ext4_group_desc *desc,
- unsigned int block_group,
+ ext4_group_t block_group,
struct buffer_head *bh)
{
ext4_fsblk_t blk;
+ struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
if (buffer_verified(bh))
return;
@@ -366,12 +366,14 @@ void ext4_validate_block_bitmap(struct super_block *sb,
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
block_group, blk);
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
return;
}
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
desc, bh))) {
ext4_unlock_group(sb, block_group);
ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
return;
}
set_buffer_verified(bh);
@@ -445,7 +447,10 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
return bh;
verify:
ext4_validate_block_bitmap(sb, desc, block_group, bh);
- return bh;
+ if (buffer_verified(bh))
+ return bh;
+ put_bh(bh);
+ return NULL;
}
/* Returns 0 on success, 1 on error */
@@ -469,7 +474,8 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
clear_buffer_new(bh);
/* Panic or remount fs read-only if block bitmap is invalid */
ext4_validate_block_bitmap(sb, desc, block_group, bh);
- return 0;
+ /* ...but check for error just in case errors=continue. */
+ return !buffer_verified(bh);
}
struct buffer_head *
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 3c7d288ae94..680bb338891 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -33,7 +33,7 @@ static int ext4_dx_readdir(struct file *, struct dir_context *);
/**
* Check if the given dir-inode refers to an htree-indexed directory
- * (or a directory which chould potentially get coverted to use htree
+ * (or a directory which could potentially get converted to use htree
* indexing).
*
* Return 1 if it is a dx dir, 0 if not
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b577e45425b..af815ea9d7c 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -180,7 +180,6 @@ struct ext4_map_blocks {
* Flags for ext4_io_end->flags
*/
#define EXT4_IO_END_UNWRITTEN 0x0001
-#define EXT4_IO_END_DIRECT 0x0002
/*
* For converting uninitialized extents on a work queue. 'handle' is used for
@@ -196,8 +195,6 @@ typedef struct ext4_io_end {
unsigned int flag; /* unwritten or not */
loff_t offset; /* offset in the file */
ssize_t size; /* size of the extent */
- struct kiocb *iocb; /* iocb struct for AIO */
- int result; /* error value for AIO */
atomic_t count; /* reference counter */
} ext4_io_end_t;
@@ -561,6 +558,18 @@ enum {
#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
/*
+ * The bit position of these flags must not overlap with any of the
+ * EXT4_GET_BLOCKS_*. They are used by ext4_ext_find_extent(),
+ * read_extent_tree_block(), ext4_split_extent_at(),
+ * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf().
+ * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be
+ * caching the extents when reading from the extent tree while a
+ * truncate or punch hole operation is in progress.
+ */
+#define EXT4_EX_NOCACHE 0x0400
+#define EXT4_EX_FORCE_CACHE 0x0800
+
+/*
* Flags used by ext4_free_blocks
*/
#define EXT4_FREE_BLOCKS_METADATA 0x0001
@@ -569,6 +578,7 @@ enum {
#define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
#define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
#define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
+#define EXT4_FREE_BLOCKS_RESERVE 0x0040
/*
* ioctl commands
@@ -590,6 +600,7 @@ enum {
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
#define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64)
#define EXT4_IOC_SWAP_BOOT _IO('f', 17)
+#define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
@@ -900,11 +911,9 @@ struct ext4_inode_info {
* Completed IOs that need unwritten extents handling and don't have
* transaction reserved
*/
- struct list_head i_unrsv_conversion_list;
atomic_t i_ioend_count; /* Number of outstanding io_end structs */
atomic_t i_unwritten; /* Nr. of inflight conversions pending */
struct work_struct i_rsv_conversion_work;
- struct work_struct i_unrsv_conversion_work;
spinlock_t i_block_reservation_lock;
@@ -1276,8 +1285,6 @@ struct ext4_sb_info {
struct flex_groups *s_flex_groups;
ext4_group_t s_flex_groups_allocated;
- /* workqueue for unreserved extent convertions (dio) */
- struct workqueue_struct *unrsv_conversion_wq;
/* workqueue for reserved extent conversions (buffered io) */
struct workqueue_struct *rsv_conversion_wq;
@@ -1340,9 +1347,6 @@ static inline void ext4_set_io_unwritten_flag(struct inode *inode,
struct ext4_io_end *io_end)
{
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
- /* Writeback has to have coversion transaction reserved */
- WARN_ON(EXT4_SB(inode->i_sb)->s_journal && !io_end->handle &&
- !(io_end->flag & EXT4_IO_END_DIRECT));
io_end->flag |= EXT4_IO_END_UNWRITTEN;
atomic_inc(&EXT4_I(inode)->i_unwritten);
}
@@ -1375,6 +1379,7 @@ enum {
nolocking */
EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
EXT4_STATE_ORDERED_MODE, /* data=ordered mode */
+ EXT4_STATE_EXT_PRECACHED, /* extents have been precached */
};
#define EXT4_INODE_BIT_FNS(name, field, offset) \
@@ -1915,7 +1920,7 @@ extern ext4_group_t ext4_get_group_number(struct super_block *sb,
extern void ext4_validate_block_bitmap(struct super_block *sb,
struct ext4_group_desc *desc,
- unsigned int block_group,
+ ext4_group_t block_group,
struct buffer_head *bh);
extern unsigned int ext4_block_group(struct super_block *sb,
ext4_fsblk_t blocknr);
@@ -2086,6 +2091,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *);
extern void ext4_dirty_inode(struct inode *, int);
extern int ext4_change_inode_journal_flag(struct inode *, int);
extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *);
+extern int ext4_inode_attach_jinode(struct inode *inode);
extern int ext4_can_truncate(struct inode *inode);
extern void ext4_truncate(struct inode *);
extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length);
@@ -2416,16 +2422,32 @@ do { \
#define EXT4_FREECLUSTERS_WATERMARK 0
#endif
+/* Update i_disksize. Requires i_mutex to avoid races with truncate */
static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
{
- /*
- * XXX: replace with spinlock if seen contended -bzzz
- */
+ WARN_ON_ONCE(S_ISREG(inode->i_mode) &&
+ !mutex_is_locked(&inode->i_mutex));
down_write(&EXT4_I(inode)->i_data_sem);
if (newsize > EXT4_I(inode)->i_disksize)
EXT4_I(inode)->i_disksize = newsize;
up_write(&EXT4_I(inode)->i_data_sem);
- return ;
+}
+
+/*
+ * Update i_disksize after writeback has been started. Races with truncate
+ * are avoided by checking i_size under i_data_sem.
+ */
+static inline void ext4_wb_update_i_disksize(struct inode *inode, loff_t newsize)
+{
+ loff_t i_size;
+
+ down_write(&EXT4_I(inode)->i_data_sem);
+ i_size = i_size_read(inode);
+ if (newsize > i_size)
+ newsize = i_size;
+ if (newsize > EXT4_I(inode)->i_disksize)
+ EXT4_I(inode)->i_disksize = newsize;
+ up_write(&EXT4_I(inode)->i_data_sem);
}
struct ext4_group_info {
@@ -2448,9 +2470,15 @@ struct ext4_group_info {
#define EXT4_GROUP_INFO_NEED_INIT_BIT 0
#define EXT4_GROUP_INFO_WAS_TRIMMED_BIT 1
+#define EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT 2
+#define EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT 3
#define EXT4_MB_GRP_NEED_INIT(grp) \
(test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_BBITMAP_CORRUPT(grp) \
+ (test_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
+#define EXT4_MB_GRP_IBITMAP_CORRUPT(grp) \
+ (test_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &((grp)->bb_state)))
#define EXT4_MB_GRP_WAS_TRIMMED(grp) \
(test_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state)))
@@ -2654,6 +2682,12 @@ extern int ext4_check_blockref(const char *, unsigned int,
struct ext4_ext_path;
struct ext4_extent;
+/*
+ * Maximum number of logical blocks in a file; ext4_extent's ee_block is
+ * __le32.
+ */
+#define EXT_MAX_BLOCKS 0xffffffff
+
extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int extents);
@@ -2683,7 +2717,8 @@ extern int ext4_ext_insert_extent(handle_t *, struct inode *,
struct ext4_ext_path *,
struct ext4_extent *, int);
extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t,
- struct ext4_ext_path *);
+ struct ext4_ext_path *,
+ int flags);
extern void ext4_ext_drop_refs(struct ext4_ext_path *);
extern int ext4_ext_check_inode(struct inode *inode);
extern int ext4_find_delalloc_range(struct inode *inode,
@@ -2692,7 +2727,7 @@ extern int ext4_find_delalloc_range(struct inode *inode,
extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len);
-
+extern int ext4_ext_precache(struct inode *inode);
/* move_extent.c */
extern void ext4_double_down_write_data_sem(struct inode *first,
@@ -2715,7 +2750,6 @@ extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
extern void ext4_io_submit_init(struct ext4_io_submit *io,
struct writeback_control *wbc);
extern void ext4_end_io_rsv_work(struct work_struct *work);
-extern void ext4_end_io_unrsv_work(struct work_struct *work);
extern void ext4_io_submit(struct ext4_io_submit *io);
extern int ext4_bio_write_page(struct ext4_io_submit *io,
struct page *page,
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 51bc821ade9..5074fe23f19 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -134,12 +134,6 @@ struct ext4_ext_path {
*/
/*
- * Maximum number of logical blocks in a file; ext4_extent's ee_block is
- * __le32.
- */
-#define EXT_MAX_BLOCKS 0xffffffff
-
-/*
* EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
* initialized extent. This is 2^15 and not (2^16 - 1), since we use the
* MSB of ee_len field in the extent datastructure to signify if this
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 72a3600aedb..17ac112ab10 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
set_buffer_prio(bh);
if (ext4_handle_valid(handle)) {
err = jbd2_journal_dirty_metadata(handle, bh);
- if (err) {
- /* Errors can only happen if there is a bug */
- handle->h_err = err;
- __ext4_journal_stop(where, line, handle);
+ /* Errors can only happen if there is a bug */
+ if (WARN_ON_ONCE(err)) {
+ ext4_journal_abort_handle(where, line, __func__, bh,
+ handle, err);
}
} else {
if (inode)
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 2877258d949..81cfefa9dc0 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -197,7 +197,7 @@ static inline void ext4_journal_callback_add(handle_t *handle,
* ext4_journal_callback_del: delete a registered callback
* @handle: active journal transaction handle on which callback was registered
* @jce: registered journal callback entry to unregister
- * Return true if object was sucessfully removed
+ * Return true if object was successfully removed
*/
static inline bool ext4_journal_callback_try_del(handle_t *handle,
struct ext4_journal_cb_entry *jce)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index a61873808f7..54d52afcdb1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -407,7 +407,7 @@ static int ext4_valid_extent_entries(struct inode *inode,
static int __ext4_ext_check(const char *function, unsigned int line,
struct inode *inode, struct ext4_extent_header *eh,
- int depth)
+ int depth, ext4_fsblk_t pblk)
{
const char *error_msg;
int max = 0;
@@ -447,42 +447,149 @@ static int __ext4_ext_check(const char *function, unsigned int line,
corrupted:
ext4_error_inode(inode, function, line, 0,
- "bad header/extent: %s - magic %x, "
- "entries %u, max %u(%u), depth %u(%u)",
- error_msg, le16_to_cpu(eh->eh_magic),
- le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
- max, le16_to_cpu(eh->eh_depth), depth);
-
+ "pblk %llu bad header/extent: %s - magic %x, "
+ "entries %u, max %u(%u), depth %u(%u)",
+ (unsigned long long) pblk, error_msg,
+ le16_to_cpu(eh->eh_magic),
+ le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
+ max, le16_to_cpu(eh->eh_depth), depth);
return -EIO;
}
-#define ext4_ext_check(inode, eh, depth) \
- __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
+#define ext4_ext_check(inode, eh, depth, pblk) \
+ __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
int ext4_ext_check_inode(struct inode *inode)
{
- return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
+ return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode), 0);
}
-static int __ext4_ext_check_block(const char *function, unsigned int line,
- struct inode *inode,
- struct ext4_extent_header *eh,
- int depth,
- struct buffer_head *bh)
+static struct buffer_head *
+__read_extent_tree_block(const char *function, unsigned int line,
+ struct inode *inode, ext4_fsblk_t pblk, int depth,
+ int flags)
{
- int ret;
+ struct buffer_head *bh;
+ int err;
- if (buffer_verified(bh))
- return 0;
- ret = ext4_ext_check(inode, eh, depth);
- if (ret)
- return ret;
+ bh = sb_getblk(inode->i_sb, pblk);
+ if (unlikely(!bh))
+ return ERR_PTR(-ENOMEM);
+
+ if (!bh_uptodate_or_lock(bh)) {
+ trace_ext4_ext_load_extent(inode, pblk, _RET_IP_);
+ err = bh_submit_read(bh);
+ if (err < 0)
+ goto errout;
+ }
+ if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE))
+ return bh;
+ err = __ext4_ext_check(function, line, inode,
+ ext_block_hdr(bh), depth, pblk);
+ if (err)
+ goto errout;
set_buffer_verified(bh);
- return ret;
+ /*
+ * If this is a leaf block, cache all of its entries
+ */
+ if (!(flags & EXT4_EX_NOCACHE) && depth == 0) {
+ struct ext4_extent_header *eh = ext_block_hdr(bh);
+ struct ext4_extent *ex = EXT_FIRST_EXTENT(eh);
+ ext4_lblk_t prev = 0;
+ int i;
+
+ for (i = le16_to_cpu(eh->eh_entries); i > 0; i--, ex++) {
+ unsigned int status = EXTENT_STATUS_WRITTEN;
+ ext4_lblk_t lblk = le32_to_cpu(ex->ee_block);
+ int len = ext4_ext_get_actual_len(ex);
+
+ if (prev && (prev != lblk))
+ ext4_es_cache_extent(inode, prev,
+ lblk - prev, ~0,
+ EXTENT_STATUS_HOLE);
+
+ if (ext4_ext_is_uninitialized(ex))
+ status = EXTENT_STATUS_UNWRITTEN;
+ ext4_es_cache_extent(inode, lblk, len,
+ ext4_ext_pblock(ex), status);
+ prev = lblk + len;
+ }
+ }
+ return bh;
+errout:
+ put_bh(bh);
+ return ERR_PTR(err);
+
}
-#define ext4_ext_check_block(inode, eh, depth, bh) \
- __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
+#define read_extent_tree_block(inode, pblk, depth, flags) \
+ __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
+ (depth), (flags))
+
+/*
+ * This function is called to cache a file's extent information in the
+ * extent status tree
+ */
+int ext4_ext_precache(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct ext4_ext_path *path = NULL;
+ struct buffer_head *bh;
+ int i = 0, depth, ret = 0;
+
+ if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ return 0; /* not an extent-mapped inode */
+
+ down_read(&ei->i_data_sem);
+ depth = ext_depth(inode);
+
+ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
+ GFP_NOFS);
+ if (path == NULL) {
+ up_read(&ei->i_data_sem);
+ return -ENOMEM;
+ }
+
+ /* Don't cache anything if there are no external extent blocks */
+ if (depth == 0)
+ goto out;
+ path[0].p_hdr = ext_inode_hdr(inode);
+ ret = ext4_ext_check(inode, path[0].p_hdr, depth, 0);
+ if (ret)
+ goto out;
+ path[0].p_idx = EXT_FIRST_INDEX(path[0].p_hdr);
+ while (i >= 0) {
+ /*
+ * If this is a leaf block or we've reached the end of
+ * the index block, go up
+ */
+ if ((i == depth) ||
+ path[i].p_idx > EXT_LAST_INDEX(path[i].p_hdr)) {
+ brelse(path[i].p_bh);
+ path[i].p_bh = NULL;
+ i--;
+ continue;
+ }
+ bh = read_extent_tree_block(inode,
+ ext4_idx_pblock(path[i].p_idx++),
+ depth - i - 1,
+ EXT4_EX_FORCE_CACHE);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
+ break;
+ }
+ i++;
+ path[i].p_bh = bh;
+ path[i].p_hdr = ext_block_hdr(bh);
+ path[i].p_idx = EXT_FIRST_INDEX(path[i].p_hdr);
+ }
+ ext4_set_inode_state(inode, EXT4_STATE_EXT_PRECACHED);
+out:
+ up_read(&ei->i_data_sem);
+ ext4_ext_drop_refs(path);
+ kfree(path);
+ return ret;
+}
#ifdef EXT_DEBUG
static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
@@ -716,7 +823,7 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
struct ext4_ext_path *
ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
- struct ext4_ext_path *path)
+ struct ext4_ext_path *path, int flags)
{
struct ext4_extent_header *eh;
struct buffer_head *bh;
@@ -748,20 +855,13 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
path[ppos].p_depth = i;
path[ppos].p_ext = NULL;
- bh = sb_getblk(inode->i_sb, path[ppos].p_block);
- if (unlikely(!bh)) {
- ret = -ENOMEM;
+ bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
+ flags);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
goto err;
}
- if (!bh_uptodate_or_lock(bh)) {
- trace_ext4_ext_load_extent(inode, block,
- path[ppos].p_block);
- ret = bh_submit_read(bh);
- if (ret < 0) {
- put_bh(bh);
- goto err;
- }
- }
+
eh = ext_block_hdr(bh);
ppos++;
if (unlikely(ppos > depth)) {
@@ -773,11 +873,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
}
path[ppos].p_bh = bh;
path[ppos].p_hdr = eh;
- i--;
-
- ret = ext4_ext_check_block(inode, eh, i, bh);
- if (ret < 0)
- goto err;
}
path[ppos].p_depth = i;
@@ -1198,7 +1293,8 @@ out:
* if no free index is found, then it requests in-depth growing.
*/
static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
- unsigned int flags,
+ unsigned int mb_flags,
+ unsigned int gb_flags,
struct ext4_ext_path *path,
struct ext4_extent *newext)
{
@@ -1220,7 +1316,7 @@ repeat:
if (EXT_HAS_FREE_INDEX(curp)) {
/* if we found index with free entry, then use that
* entry: create all needed subtree and add new leaf */
- err = ext4_ext_split(handle, inode, flags, path, newext, i);
+ err = ext4_ext_split(handle, inode, mb_flags, path, newext, i);
if (err)
goto out;
@@ -1228,12 +1324,12 @@ repeat:
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
- path);
+ path, gb_flags);
if (IS_ERR(path))
err = PTR_ERR(path);
} else {
/* tree is full, time to grow in depth */
- err = ext4_ext_grow_indepth(handle, inode, flags, newext);
+ err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext);
if (err)
goto out;
@@ -1241,7 +1337,7 @@ repeat:
ext4_ext_drop_refs(path);
path = ext4_ext_find_extent(inode,
(ext4_lblk_t)le32_to_cpu(newext->ee_block),
- path);
+ path, gb_flags);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
@@ -1412,29 +1508,21 @@ got_index:
ix++;
block = ext4_idx_pblock(ix);
while (++depth < path->p_depth) {
- bh = sb_bread(inode->i_sb, block);
- if (bh == NULL)
- return -EIO;
- eh = ext_block_hdr(bh);
/* subtract from p_depth to get proper eh_depth */
- if (ext4_ext_check_block(inode, eh,
- path->p_depth - depth, bh)) {
- put_bh(bh);
- return -EIO;
- }
+ bh = read_extent_tree_block(inode, block,
+ path->p_depth - depth, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ eh = ext_block_hdr(bh);
ix = EXT_FIRST_INDEX(eh);
block = ext4_idx_pblock(ix);
put_bh(bh);
}
- bh = sb_bread(inode->i_sb, block);
- if (bh == NULL)
- return -EIO;
+ bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
eh = ext_block_hdr(bh);
- if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
- put_bh(bh);
- return -EIO;
- }
ex = EXT_FIRST_EXTENT(eh);
found_extent:
*logical = le32_to_cpu(ex->ee_block);
@@ -1705,7 +1793,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
brelse(path[1].p_bh);
ext4_free_blocks(handle, inode, NULL, blk, 1,
- EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+ EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
+ EXT4_FREE_BLOCKS_RESERVE);
}
/*
@@ -1793,7 +1882,7 @@ out:
*/
int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
struct ext4_ext_path *path,
- struct ext4_extent *newext, int flag)
+ struct ext4_extent *newext, int gb_flags)
{
struct ext4_extent_header *eh;
struct ext4_extent *ex, *fex;
@@ -1802,7 +1891,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
int depth, len, err;
ext4_lblk_t next;
unsigned uninitialized = 0;
- int flags = 0;
+ int mb_flags = 0;
if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
@@ -1817,7 +1906,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
}
/* try to insert block into found extent and return */
- if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) {
+ if (ex && !(gb_flags & EXT4_GET_BLOCKS_PRE_IO)) {
/*
* Try to see whether we should rather test the extent on
@@ -1920,7 +2009,7 @@ prepend:
if (next != EXT_MAX_BLOCKS) {
ext_debug("next leaf block - %u\n", next);
BUG_ON(npath != NULL);
- npath = ext4_ext_find_extent(inode, next, NULL);
+ npath = ext4_ext_find_extent(inode, next, NULL, 0);
if (IS_ERR(npath))
return PTR_ERR(npath);
BUG_ON(npath->p_depth != path->p_depth);
@@ -1939,9 +2028,10 @@ prepend:
* There is no free space in the found leaf.
* We're gonna add a new leaf in the tree.
*/
- if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
- flags = EXT4_MB_USE_RESERVED;
- err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
+ if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
+ mb_flags = EXT4_MB_USE_RESERVED;
+ err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
+ path, newext);
if (err)
goto cleanup;
depth = ext_depth(inode);
@@ -2007,7 +2097,7 @@ has_space:
merge:
/* try to merge extents */
- if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
+ if (!(gb_flags & EXT4_GET_BLOCKS_PRE_IO))
ext4_ext_try_to_merge(handle, inode, path, nearex);
@@ -2050,7 +2140,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
path = NULL;
}
- path = ext4_ext_find_extent(inode, block, path);
+ path = ext4_ext_find_extent(inode, block, path, 0);
if (IS_ERR(path)) {
up_read(&EXT4_I(inode)->i_data_sem);
err = PTR_ERR(path);
@@ -2195,8 +2285,8 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_lblk_t block)
{
int depth = ext_depth(inode);
- unsigned long len;
- ext4_lblk_t lblock;
+ unsigned long len = 0;
+ ext4_lblk_t lblock = 0;
struct ext4_extent *ex;
ex = path[depth].p_ext;
@@ -2233,7 +2323,6 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
ext4_es_insert_extent(inode, lblock, len, ~0,
EXTENT_STATUS_HOLE);
} else {
- lblock = len = 0;
BUG();
}
@@ -2712,7 +2801,7 @@ again:
ext4_lblk_t ee_block;
/* find extent for this block */
- path = ext4_ext_find_extent(inode, end, NULL);
+ path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
if (IS_ERR(path)) {
ext4_journal_stop(handle);
return PTR_ERR(path);
@@ -2754,6 +2843,7 @@ again:
*/
err = ext4_split_extent_at(handle, inode, path,
end + 1, split_flag,
+ EXT4_EX_NOCACHE |
EXT4_GET_BLOCKS_PRE_IO |
EXT4_GET_BLOCKS_METADATA_NOFAIL);
@@ -2782,7 +2872,7 @@ again:
path[0].p_hdr = ext_inode_hdr(inode);
i = 0;
- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+ if (ext4_ext_check(inode, path[0].p_hdr, depth, 0)) {
err = -EIO;
goto out;
}
@@ -2829,10 +2919,12 @@ again:
ext_debug("move to level %d (block %llu)\n",
i + 1, ext4_idx_pblock(path[i].p_idx));
memset(path + i + 1, 0, sizeof(*path));
- bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
- if (!bh) {
+ bh = read_extent_tree_block(inode,
+ ext4_idx_pblock(path[i].p_idx), depth - i - 1,
+ EXT4_EX_NOCACHE);
+ if (IS_ERR(bh)) {
/* should we reset i_size? */
- err = -EIO;
+ err = PTR_ERR(bh);
break;
}
/* Yield here to deal with large extent trees.
@@ -2842,11 +2934,6 @@ again:
err = -EIO;
break;
}
- if (ext4_ext_check_block(inode, ext_block_hdr(bh),
- depth - i - 1, bh)) {
- err = -EIO;
- break;
- }
path[i + 1].p_bh = bh;
/* save actual number of indexes since this
@@ -2961,6 +3048,23 @@ void ext4_ext_release(struct super_block *sb)
#endif
}
+static int ext4_zeroout_es(struct inode *inode, struct ext4_extent *ex)
+{
+ ext4_lblk_t ee_block;
+ ext4_fsblk_t ee_pblock;
+ unsigned int ee_len;
+
+ ee_block = le32_to_cpu(ex->ee_block);
+ ee_len = ext4_ext_get_actual_len(ex);
+ ee_pblock = ext4_ext_pblock(ex);
+
+ if (ee_len == 0)
+ return 0;
+
+ return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
+ EXTENT_STATUS_WRITTEN);
+}
+
/* FIXME!! we need to try to merge to left or right after zero-out */
static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
{
@@ -3113,7 +3217,7 @@ static int ext4_split_extent_at(handle_t *handle,
goto fix_extent_len;
/* update extent status tree */
- err = ext4_es_zeroout(inode, &zero_ex);
+ err = ext4_zeroout_es(inode, &zero_ex);
goto out;
} else if (err)
@@ -3133,7 +3237,7 @@ fix_extent_len:
* ext4_split_extents() splits an extent and mark extent which is covered
* by @map as split_flags indicates
*
- * It may result in splitting the extent into multiple extents (upto three)
+ * It may result in splitting the extent into multiple extents (up to three)
* There are three possibilities:
* a> There is no split required
* b> Splits in two extents: Split is happening at either end of the extent
@@ -3181,7 +3285,7 @@ static int ext4_split_extent(handle_t *handle,
* result in split of original leaf or extent zeroout.
*/
ext4_ext_drop_refs(path);
- path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
if (IS_ERR(path))
return PTR_ERR(path);
depth = ext_depth(inode);
@@ -3464,7 +3568,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
out:
/* If we have gotten a failure, don't zero out status tree */
if (!err)
- err = ext4_es_zeroout(inode, &zero_ex);
+ err = ext4_zeroout_es(inode, &zero_ex);
return err ? err : allocated;
}
@@ -3565,7 +3669,7 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
if (err < 0)
goto out;
ext4_ext_drop_refs(path);
- path = ext4_ext_find_extent(inode, map->m_lblk, path);
+ path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
goto out;
@@ -4052,7 +4156,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
/* find extent for this block */
- path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
+ path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0);
if (IS_ERR(path)) {
err = PTR_ERR(path);
path = NULL;
@@ -4412,7 +4516,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode)
retry:
err = ext4_es_remove_extent(inode, last_block,
EXT_MAX_BLOCKS - last_block);
- if (err == ENOMEM) {
+ if (err == -ENOMEM) {
cond_resched();
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto retry;
@@ -4744,6 +4848,12 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return error;
}
+ if (fieinfo->fi_flags & FIEMAP_FLAG_CACHE) {
+ error = ext4_ext_precache(inode);
+ if (error)
+ return error;
+ }
+
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return generic_block_fiemap(inode, fieinfo, start, len,
@@ -4771,6 +4881,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
error = ext4_fill_fiemap_extents(inode, start_blk,
len_blks, fieinfo);
}
-
+ ext4_es_lru_add(inode);
return error;
}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 91cb110da1b..2d1bdbe78c0 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -13,7 +13,6 @@
#include <linux/list_sort.h>
#include "ext4.h"
#include "extents_status.h"
-#include "ext4_extents.h"
#include <trace/events/ext4.h>
@@ -263,7 +262,7 @@ void ext4_es_find_delayed_extent_range(struct inode *inode,
if (tree->cache_es) {
es1 = tree->cache_es;
if (in_range(lblk, es1->es_lblk, es1->es_len)) {
- es_debug("%u cached by [%u/%u) %llu %llx\n",
+ es_debug("%u cached by [%u/%u) %llu %x\n",
lblk, es1->es_lblk, es1->es_len,
ext4_es_pblock(es1), ext4_es_status(es1));
goto out;
@@ -409,6 +408,8 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
}
#ifdef ES_AGGRESSIVE_TEST
+#include "ext4_extents.h" /* Needed when ES_AGGRESSIVE_TEST is defined */
+
static void ext4_es_insert_extent_ext_check(struct inode *inode,
struct extent_status *es)
{
@@ -419,7 +420,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
unsigned short ee_len;
int depth, ee_status, es_status;
- path = ext4_ext_find_extent(inode, es->es_lblk, NULL);
+ path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
if (IS_ERR(path))
return;
@@ -641,13 +642,13 @@ out:
*/
int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned long long status)
+ unsigned int status)
{
struct extent_status newes;
ext4_lblk_t end = lblk + len - 1;
int err = 0;
- es_debug("add [%u/%u) %llu %llx to extent status tree of inode %lu\n",
+ es_debug("add [%u/%u) %llu %x to extent status tree of inode %lu\n",
lblk, len, pblk, status, inode->i_ino);
if (!len)
@@ -684,6 +685,38 @@ error:
}
/*
+ * ext4_es_cache_extent() inserts information into the extent status
+ * tree if and only if there isn't information about the range in
+ * question already.
+ */
+void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status)
+{
+ struct extent_status *es;
+ struct extent_status newes;
+ ext4_lblk_t end = lblk + len - 1;
+
+ newes.es_lblk = lblk;
+ newes.es_len = len;
+ ext4_es_store_pblock(&newes, pblk);
+ ext4_es_store_status(&newes, status);
+ trace_ext4_es_cache_extent(inode, &newes);
+
+ if (!len)
+ return;
+
+ BUG_ON(end < lblk);
+
+ write_lock(&EXT4_I(inode)->i_es_lock);
+
+ es = __es_tree_search(&EXT4_I(inode)->i_es_tree.root, lblk);
+ if (!es || es->es_lblk > end)
+ __es_insert_extent(inode, &newes);
+ write_unlock(&EXT4_I(inode)->i_es_lock);
+}
+
+/*
* ext4_es_lookup_extent() looks up an extent in extent status tree.
*
* ext4_es_lookup_extent is called by ext4_map_blocks/ext4_da_map_blocks.
@@ -871,23 +904,6 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
return err;
}
-int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex)
-{
- ext4_lblk_t ee_block;
- ext4_fsblk_t ee_pblock;
- unsigned int ee_len;
-
- ee_block = le32_to_cpu(ex->ee_block);
- ee_len = ext4_ext_get_actual_len(ex);
- ee_pblock = ext4_ext_pblock(ex);
-
- if (ee_len == 0)
- return 0;
-
- return ext4_es_insert_extent(inode, ee_block, ee_len, ee_pblock,
- EXTENT_STATUS_WRITTEN);
-}
-
static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
struct list_head *b)
{
@@ -895,6 +911,12 @@ static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a,
eia = list_entry(a, struct ext4_inode_info, i_es_lru);
eib = list_entry(b, struct ext4_inode_info, i_es_lru);
+ if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+ !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+ return 1;
+ if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
+ ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
+ return -1;
if (eia->i_touch_when == eib->i_touch_when)
return 0;
if (time_after(eia->i_touch_when, eib->i_touch_when))
@@ -908,21 +930,13 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
{
struct ext4_inode_info *ei;
struct list_head *cur, *tmp;
- LIST_HEAD(skiped);
+ LIST_HEAD(skipped);
int ret, nr_shrunk = 0;
+ int retried = 0, skip_precached = 1, nr_skipped = 0;
spin_lock(&sbi->s_es_lru_lock);
- /*
- * If the inode that is at the head of LRU list is newer than
- * last_sorted time, that means that we need to sort this list.
- */
- ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, i_es_lru);
- if (sbi->s_es_last_sorted < ei->i_touch_when) {
- list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
- sbi->s_es_last_sorted = jiffies;
- }
-
+retry:
list_for_each_safe(cur, tmp, &sbi->s_es_lru) {
/*
* If we have already reclaimed all extents from extent
@@ -933,9 +947,16 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
- /* Skip the inode that is newer than the last_sorted time */
- if (sbi->s_es_last_sorted < ei->i_touch_when) {
- list_move_tail(cur, &skiped);
+ /*
+ * Skip the inode that is newer than the last_sorted
+ * time. Normally we try hard to avoid shrinking
+ * precached inodes, but we will as a last resort.
+ */
+ if ((sbi->s_es_last_sorted < ei->i_touch_when) ||
+ (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
+ EXT4_STATE_EXT_PRECACHED))) {
+ nr_skipped++;
+ list_move_tail(cur, &skipped);
continue;
}
@@ -955,11 +976,33 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
}
/* Move the newer inodes into the tail of the LRU list. */
- list_splice_tail(&skiped, &sbi->s_es_lru);
+ list_splice_tail(&skipped, &sbi->s_es_lru);
+ INIT_LIST_HEAD(&skipped);
+
+ /*
+ * If we skipped any inodes, and we weren't able to make any
+ * forward progress, sort the list and try again.
+ */
+ if ((nr_shrunk == 0) && nr_skipped && !retried) {
+ retried++;
+ list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
+ sbi->s_es_last_sorted = jiffies;
+ ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
+ i_es_lru);
+ /*
+ * If there are no non-precached inodes left on the
+ * list, start releasing precached extents.
+ */
+ if (ext4_test_inode_state(&ei->vfs_inode,
+ EXT4_STATE_EXT_PRECACHED))
+ skip_precached = 0;
+ goto retry;
+ }
+
spin_unlock(&sbi->s_es_lru_lock);
if (locked_ei && nr_shrunk == 0)
- nr_shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
+ nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
return nr_shrunk;
}
@@ -1034,10 +1077,16 @@ static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei,
struct rb_node *node;
struct extent_status *es;
int nr_shrunk = 0;
+ static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
+ DEFAULT_RATELIMIT_BURST);
if (ei->i_es_lru_nr == 0)
return 0;
+ if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
+ __ratelimit(&_rs))
+ ext4_warning(inode->i_sb, "forced shrink of precached extents");
+
node = rb_first(&tree->root);
while (node != NULL) {
es = rb_entry(node, struct extent_status, rb_node);
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index e936730cc5b..167f4ab8ecc 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -29,16 +29,26 @@
/*
* These flags live in the high bits of extent_status.es_pblk
*/
-#define EXTENT_STATUS_WRITTEN (1ULL << 63)
-#define EXTENT_STATUS_UNWRITTEN (1ULL << 62)
-#define EXTENT_STATUS_DELAYED (1ULL << 61)
-#define EXTENT_STATUS_HOLE (1ULL << 60)
+#define ES_SHIFT 60
+
+#define EXTENT_STATUS_WRITTEN (1 << 3)
+#define EXTENT_STATUS_UNWRITTEN (1 << 2)
+#define EXTENT_STATUS_DELAYED (1 << 1)
+#define EXTENT_STATUS_HOLE (1 << 0)
#define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \
EXTENT_STATUS_UNWRITTEN | \
EXTENT_STATUS_DELAYED | \
EXTENT_STATUS_HOLE)
+#define ES_WRITTEN (1ULL << 63)
+#define ES_UNWRITTEN (1ULL << 62)
+#define ES_DELAYED (1ULL << 61)
+#define ES_HOLE (1ULL << 60)
+
+#define ES_MASK (ES_WRITTEN | ES_UNWRITTEN | \
+ ES_DELAYED | ES_HOLE)
+
struct ext4_sb_info;
struct ext4_extent;
@@ -60,7 +70,10 @@ extern void ext4_es_init_tree(struct ext4_es_tree *tree);
extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len, ext4_fsblk_t pblk,
- unsigned long long status);
+ unsigned int status);
+extern void ext4_es_cache_extent(struct inode *inode, ext4_lblk_t lblk,
+ ext4_lblk_t len, ext4_fsblk_t pblk,
+ unsigned int status);
extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
ext4_lblk_t len);
extern void ext4_es_find_delayed_extent_range(struct inode *inode,
@@ -68,36 +81,35 @@ extern void ext4_es_find_delayed_extent_range(struct inode *inode,
struct extent_status *es);
extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
struct extent_status *es);
-extern int ext4_es_zeroout(struct inode *inode, struct ext4_extent *ex);
static inline int ext4_es_is_written(struct extent_status *es)
{
- return (es->es_pblk & EXTENT_STATUS_WRITTEN) != 0;
+ return (es->es_pblk & ES_WRITTEN) != 0;
}
static inline int ext4_es_is_unwritten(struct extent_status *es)
{
- return (es->es_pblk & EXTENT_STATUS_UNWRITTEN) != 0;
+ return (es->es_pblk & ES_UNWRITTEN) != 0;
}
static inline int ext4_es_is_delayed(struct extent_status *es)
{
- return (es->es_pblk & EXTENT_STATUS_DELAYED) != 0;
+ return (es->es_pblk & ES_DELAYED) != 0;
}
static inline int ext4_es_is_hole(struct extent_status *es)
{
- return (es->es_pblk & EXTENT_STATUS_HOLE) != 0;
+ return (es->es_pblk & ES_HOLE) != 0;
}
-static inline ext4_fsblk_t ext4_es_status(struct extent_status *es)
+static inline unsigned int ext4_es_status(struct extent_status *es)
{
- return (es->es_pblk & EXTENT_STATUS_FLAGS);
+ return es->es_pblk >> ES_SHIFT;
}
static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
{
- return (es->es_pblk & ~EXTENT_STATUS_FLAGS);
+ return es->es_pblk & ~ES_MASK;
}
static inline void ext4_es_store_pblock(struct extent_status *es,
@@ -105,19 +117,16 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
{
ext4_fsblk_t block;
- block = (pb & ~EXTENT_STATUS_FLAGS) |
- (es->es_pblk & EXTENT_STATUS_FLAGS);
+ block = (pb & ~ES_MASK) | (es->es_pblk & ES_MASK);
es->es_pblk = block;
}
static inline void ext4_es_store_status(struct extent_status *es,
- unsigned long long status)
+ unsigned int status)
{
- ext4_fsblk_t block;
-
- block = (status & EXTENT_STATUS_FLAGS) |
- (es->es_pblk & ~EXTENT_STATUS_FLAGS);
- es->es_pblk = block;
+ es->es_pblk = (((ext4_fsblk_t)
+ (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) |
+ (es->es_pblk & ~ES_MASK));
}
extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 6f4cc567c38..3da21945ff1 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -149,7 +149,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
- if (ret > 0 || ret == -EIOCBQUEUED) {
+ if (ret > 0) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
{
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- struct ext4_inode_info *ei = EXT4_I(inode);
struct vfsmount *mnt = filp->f_path.mnt;
struct path path;
char buf[64], *cp;
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
*/
- if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) {
- struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL);
-
- spin_lock(&inode->i_lock);
- if (!ei->jinode) {
- if (!jinode) {
- spin_unlock(&inode->i_lock);
- return -ENOMEM;
- }
- ei->jinode = jinode;
- jbd2_journal_init_jbd_inode(ei->jinode, inode);
- jinode = NULL;
- }
- spin_unlock(&inode->i_lock);
- if (unlikely(jinode != NULL))
- jbd2_free_inode(jinode);
+ if (filp->f_mode & FMODE_WRITE) {
+ int ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ return ret;
}
return dquot_file_open(inode, filp);
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f03598c6ffd..137193ff389 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -70,18 +70,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
ext4_group_t block_group,
struct ext4_group_desc *gdp)
{
+ struct ext4_group_info *grp;
J_ASSERT_BH(bh, buffer_locked(bh));
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
ext4_error(sb, "Checksum bad for group %u", block_group);
- ext4_free_group_clusters_set(sb, gdp, 0);
- ext4_free_inodes_set(sb, gdp, 0);
- ext4_itable_unused_set(sb, gdp, 0);
- memset(bh->b_data, 0xff, sb->s_blocksize);
- ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
- EXT4_INODES_PER_GROUP(sb) / 8);
+ grp = ext4_get_group_info(sb, block_group);
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
return 0;
}
@@ -117,6 +115,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
struct ext4_group_desc *desc;
struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk;
+ struct ext4_group_info *grp;
desc = ext4_get_group_desc(sb, block_group, NULL);
if (!desc)
@@ -185,6 +184,8 @@ verify:
put_bh(bh);
ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
"inode_bitmap = %llu", block_group, bitmap_blk);
+ grp = ext4_get_group_info(sb, block_group);
+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
return NULL;
}
ext4_unlock_group(sb, block_group);
@@ -221,6 +222,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
struct ext4_super_block *es;
struct ext4_sb_info *sbi;
int fatal = 0, err, count, cleared;
+ struct ext4_group_info *grp;
if (!sb) {
printk(KERN_ERR "EXT4-fs: %s:%d: inode on "
@@ -266,7 +268,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
bitmap_bh = ext4_read_inode_bitmap(sb, block_group);
- if (!bitmap_bh)
+ /* Don't bother if the inode bitmap is corrupt. */
+ grp = ext4_get_group_info(sb, block_group);
+ if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) || !bitmap_bh)
goto error_return;
BUFFER_TRACE(bitmap_bh, "get_write_access");
@@ -315,8 +319,10 @@ out:
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
- } else
+ } else {
ext4_error(sb, "bit already cleared for inode %lu", ino);
+ set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
+ }
error_return:
brelse(bitmap_bh);
@@ -625,6 +631,51 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
}
/*
+ * In no journal mode, if an inode has recently been deleted, we want
+ * to avoid reusing it until we're reasonably sure the inode table
+ * block has been written back to disk. (Yes, these values are
+ * somewhat arbitrary...)
+ */
+#define RECENTCY_MIN 5
+#define RECENTCY_DIRTY 30
+
+static int recently_deleted(struct super_block *sb, ext4_group_t group, int ino)
+{
+ struct ext4_group_desc *gdp;
+ struct ext4_inode *raw_inode;
+ struct buffer_head *bh;
+ unsigned long dtime, now;
+ int inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
+ int offset, ret = 0, recentcy = RECENTCY_MIN;
+
+ gdp = ext4_get_group_desc(sb, group, NULL);
+ if (unlikely(!gdp))
+ return 0;
+
+ bh = sb_getblk(sb, ext4_inode_table(sb, gdp) +
+ (ino / inodes_per_block));
+ if (unlikely(!bh) || !buffer_uptodate(bh))
+ /*
+ * If the block is not in the buffer cache, then it
+ * must have been written out.
+ */
+ goto out;
+
+ offset = (ino % inodes_per_block) * EXT4_INODE_SIZE(sb);
+ raw_inode = (struct ext4_inode *) (bh->b_data + offset);
+ dtime = le32_to_cpu(raw_inode->i_dtime);
+ now = get_seconds();
+ if (buffer_dirty(bh))
+ recentcy += RECENTCY_DIRTY;
+
+ if (dtime && (dtime < now) && (now < dtime + recentcy))
+ ret = 1;
+out:
+ brelse(bh);
+ return ret;
+}
+
+/*
* There are two policies for allocating an inode. If the new inode is
* a directory, then a forward search is made for a block group with both
* free space and a low directory-to-inode ratio; if that fails, then of
@@ -652,6 +703,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
struct inode *ret;
ext4_group_t i;
ext4_group_t flex_group;
+ struct ext4_group_info *grp;
/* Cannot create files in a deleted directory */
if (!dir || !dir->i_nlink)
@@ -725,25 +777,39 @@ got_group:
continue;
}
+ grp = ext4_get_group_info(sb, group);
+ /* Skip groups with already-known suspicious inode tables */
+ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+ if (++group == ngroups)
+ group = 0;
+ continue;
+ }
+
brelse(inode_bitmap_bh);
inode_bitmap_bh = ext4_read_inode_bitmap(sb, group);
- if (!inode_bitmap_bh)
- goto out;
+ /* Skip groups with suspicious inode tables */
+ if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp) || !inode_bitmap_bh) {
+ if (++group == ngroups)
+ group = 0;
+ continue;
+ }
repeat_in_this_group:
ino = ext4_find_next_zero_bit((unsigned long *)
inode_bitmap_bh->b_data,
EXT4_INODES_PER_GROUP(sb), ino);
- if (ino >= EXT4_INODES_PER_GROUP(sb)) {
- if (++group == ngroups)
- group = 0;
- continue;
- }
+ if (ino >= EXT4_INODES_PER_GROUP(sb))
+ goto next_group;
if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) {
ext4_error(sb, "reserved inode found cleared - "
"inode=%lu", ino + 1);
continue;
}
+ if ((EXT4_SB(sb)->s_journal == NULL) &&
+ recently_deleted(sb, group, ino)) {
+ ino++;
+ goto next_inode;
+ }
if (!handle) {
BUG_ON(nblocks <= 0);
handle = __ext4_journal_start_sb(dir->i_sb, line_no,
@@ -767,8 +833,12 @@ repeat_in_this_group:
ino++; /* the inode bitmap is zero-based */
if (!ret2)
goto got; /* we grabbed the inode! */
+next_inode:
if (ino < EXT4_INODES_PER_GROUP(sb))
goto repeat_in_this_group;
+next_group:
+ if (++group == ngroups)
+ group = 0;
}
err = -ENOSPC;
goto out;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 87b30cd357e..594009f5f52 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -23,7 +23,6 @@
#include <linux/aio.h>
#include "ext4_jbd2.h"
#include "truncate.h"
-#include "ext4_extents.h" /* Needed for EXT_MAX_BLOCKS */
#include <trace/events/ext4.h>
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ba33c67d6e4..c79fd7dabe7 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -553,16 +553,15 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
}
if (retval > 0) {
int ret;
- unsigned long long status;
+ unsigned int status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -654,16 +653,15 @@ found:
if (retval > 0) {
int ret;
- unsigned long long status;
+ unsigned int status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (allocation)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
/*
* If the extent has been zeroed out, we don't need to update
@@ -729,8 +727,12 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
ret = ext4_map_blocks(handle, inode, &map, flags);
if (ret > 0) {
+ ext4_io_end_t *io_end = ext4_inode_aio(inode);
+
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
+ set_buffer_defer_completion(bh);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
ret = 0;
}
@@ -971,7 +973,8 @@ retry_journal:
ext4_journal_stop(handle);
goto retry_grab;
}
- wait_on_page_writeback(page);
+ /* In case writeback began while the page was unlocked */
+ wait_for_stable_page(page);
if (ext4_should_dioread_nolock(inode))
ret = __block_write_begin(page, pos, len, ext4_get_block_write);
@@ -1635,16 +1638,15 @@ add_delayed:
set_buffer_delay(bh);
} else if (retval > 0) {
int ret;
- unsigned long long status;
+ unsigned int status;
-#ifdef ES_AGGRESSIVE_TEST
- if (retval != map->m_len) {
- printk("ES len assertion failed for inode: %lu "
- "retval %d != map->m_len %d "
- "in %s (lookup)\n", inode->i_ino, retval,
- map->m_len, __func__);
+ if (unlikely(retval != map->m_len)) {
+ ext4_warning(inode->i_sb,
+ "ES len assertion failed for inode "
+ "%lu: retval %d != map->m_len %d",
+ inode->i_ino, retval, map->m_len);
+ WARN_ON(1);
}
-#endif
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
@@ -1893,12 +1895,32 @@ static int ext4_writepage(struct page *page,
return ret;
}
+static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
+{
+ int len;
+ loff_t size = i_size_read(mpd->inode);
+ int err;
+
+ BUG_ON(page->index != mpd->first_page);
+ if (page->index == size >> PAGE_CACHE_SHIFT)
+ len = size & ~PAGE_CACHE_MASK;
+ else
+ len = PAGE_CACHE_SIZE;
+ clear_page_dirty_for_io(page);
+ err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
+ if (!err)
+ mpd->wbc->nr_to_write--;
+ mpd->first_page++;
+
+ return err;
+}
+
#define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
/*
* mballoc gives us at most this number of blocks...
* XXX: That seems to be only a limitation of ext4_mb_normalize_request().
- * The rest of mballoc seems to handle chunks upto full group size.
+ * The rest of mballoc seems to handle chunks up to full group size.
*/
#define MAX_WRITEPAGES_EXTENT_LEN 2048
@@ -1907,82 +1929,94 @@ static int ext4_writepage(struct page *page,
*
* @mpd - extent of blocks
* @lblk - logical number of the block in the file
- * @b_state - b_state of the buffer head added
+ * @bh - buffer head we want to add to the extent
*
- * the function is used to collect contig. blocks in same state
+ * The function is used to collect contig. blocks in the same state. If the
+ * buffer doesn't require mapping for writeback and we haven't started the
+ * extent of buffers to map yet, the function returns 'true' immediately - the
+ * caller can write the buffer right away. Otherwise the function returns true
+ * if the block has been added to the extent, false if the block couldn't be
+ * added.
*/
-static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
- unsigned long b_state)
+static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
+ struct buffer_head *bh)
{
struct ext4_map_blocks *map = &mpd->map;
- /* Don't go larger than mballoc is willing to allocate */
- if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
- return 0;
+ /* Buffer that doesn't need mapping for writeback? */
+ if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
+ (!buffer_delay(bh) && !buffer_unwritten(bh))) {
+ /* So far no extent to map => we write the buffer right away */
+ if (map->m_len == 0)
+ return true;
+ return false;
+ }
/* First block in the extent? */
if (map->m_len == 0) {
map->m_lblk = lblk;
map->m_len = 1;
- map->m_flags = b_state & BH_FLAGS;
- return 1;
+ map->m_flags = bh->b_state & BH_FLAGS;
+ return true;
}
+ /* Don't go larger than mballoc is willing to allocate */
+ if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
+ return false;
+
/* Can we merge the block to our big extent? */
if (lblk == map->m_lblk + map->m_len &&
- (b_state & BH_FLAGS) == map->m_flags) {
+ (bh->b_state & BH_FLAGS) == map->m_flags) {
map->m_len++;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
-static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
- struct buffer_head *head,
- struct buffer_head *bh,
- ext4_lblk_t lblk)
+/*
+ * mpage_process_page_bufs - submit page buffers for IO or add them to extent
+ *
+ * @mpd - extent of blocks for mapping
+ * @head - the first buffer in the page
+ * @bh - buffer we should start processing from
+ * @lblk - logical number of the block in the file corresponding to @bh
+ *
+ * Walk through page buffers from @bh upto @head (exclusive) and either submit
+ * the page for IO if all buffers in this page were mapped and there's no
+ * accumulated extent of buffers to map or add buffers in the page to the
+ * extent of buffers to map. The function returns 1 if the caller can continue
+ * by processing the next page, 0 if it should stop adding buffers to the
+ * extent to map because we cannot extend it anymore. It can also return value
+ * < 0 in case of error during IO submission.
+ */
+static int mpage_process_page_bufs(struct mpage_da_data *mpd,
+ struct buffer_head *head,
+ struct buffer_head *bh,
+ ext4_lblk_t lblk)
{
struct inode *inode = mpd->inode;
+ int err;
ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
>> inode->i_blkbits;
do {
BUG_ON(buffer_locked(bh));
- if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
- (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
- lblk >= blocks) {
+ if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
/* Found extent to map? */
if (mpd->map.m_len)
- return false;
- if (lblk >= blocks)
- return true;
- continue;
+ return 0;
+ /* Everything mapped so far and we hit EOF */
+ break;
}
- if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
- return false;
} while (lblk++, (bh = bh->b_this_page) != head);
- return true;
-}
-
-static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
-{
- int len;
- loff_t size = i_size_read(mpd->inode);
- int err;
-
- BUG_ON(page->index != mpd->first_page);
- if (page->index == size >> PAGE_CACHE_SHIFT)
- len = size & ~PAGE_CACHE_MASK;
- else
- len = PAGE_CACHE_SIZE;
- clear_page_dirty_for_io(page);
- err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
- if (!err)
- mpd->wbc->nr_to_write--;
- mpd->first_page++;
-
- return err;
+ /* So far everything mapped? Submit the page for IO. */
+ if (mpd->map.m_len == 0) {
+ err = mpage_submit_page(mpd, head->b_page);
+ if (err < 0)
+ return err;
+ }
+ return lblk < blocks;
}
/*
@@ -2006,8 +2040,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
struct inode *inode = mpd->inode;
struct buffer_head *head, *bh;
int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
- >> inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
sector_t pblock;
@@ -2029,7 +2061,7 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
if (page->index > end)
break;
- /* Upto 'end' pages must be contiguous */
+ /* Up to 'end' pages must be contiguous */
BUG_ON(page->index != start);
bh = head = page_buffers(page);
do {
@@ -2042,18 +2074,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
- add_page_bufs_to_extent(mpd, head, bh,
- lblk);
+ /*
+ * FIXME: If dioread_nolock supports
+ * blocksize < pagesize, we need to make
+ * sure we add size mapped so far to
+ * io_end->size as the following call
+ * can submit the page for IO.
+ */
+ err = mpage_process_page_bufs(mpd, head,
+ bh, lblk);
pagevec_release(&pvec);
- return 0;
+ if (err > 0)
+ err = 0;
+ return err;
}
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
- } while (++lblk < blocks &&
- (bh = bh->b_this_page) != head);
+ } while (lblk++, (bh = bh->b_this_page) != head);
/*
* FIXME: This is going to break if dioread_nolock
@@ -2202,12 +2242,10 @@ static int mpage_map_and_submit_extent(handle_t *handle,
/* Update on-disk size after IO is submitted */
disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
- if (disksize > i_size_read(inode))
- disksize = i_size_read(inode);
if (disksize > EXT4_I(inode)->i_disksize) {
int err2;
- ext4_update_i_disksize(inode, disksize);
+ ext4_wb_update_i_disksize(inode, disksize);
err2 = ext4_mark_inode_dirty(handle, inode);
if (err2)
ext4_error(inode->i_sb,
@@ -2222,7 +2260,7 @@ static int mpage_map_and_submit_extent(handle_t *handle,
/*
* Calculate the total number of credits to reserve for one writepages
* iteration. This is called from ext4_writepages(). We map an extent of
- * upto MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
+ * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
* the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
* bpp - 1 blocks in bpp different extents.
*/
@@ -2322,14 +2360,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
lblk = ((ext4_lblk_t)page->index) <<
(PAGE_CACHE_SHIFT - blkbits);
head = page_buffers(page);
- if (!add_page_bufs_to_extent(mpd, head, head, lblk))
+ err = mpage_process_page_bufs(mpd, head, head, lblk);
+ if (err <= 0)
goto out;
- /* So far everything mapped? Submit the page for IO. */
- if (mpd->map.m_len == 0) {
- err = mpage_submit_page(mpd, page);
- if (err < 0)
- goto out;
- }
+ err = 0;
/*
* Accumulated enough dirty pages? This doesn't apply
@@ -2413,7 +2447,7 @@ static int ext4_writepages(struct address_space *mapping,
if (ext4_should_dioread_nolock(inode)) {
/*
- * We may need to convert upto one extent per block in
+ * We may need to convert up to one extent per block in
* the page and we may dirty the inode.
*/
rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
@@ -2649,7 +2683,7 @@ retry_journal:
goto retry_grab;
}
/* In case writeback began while the page was unlocked */
- wait_on_page_writeback(page);
+ wait_for_stable_page(page);
ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
if (ret < 0) {
@@ -2994,19 +3028,13 @@ static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
}
static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
- ssize_t size, void *private, int ret,
- bool is_async)
+ ssize_t size, void *private)
{
- struct inode *inode = file_inode(iocb->ki_filp);
ext4_io_end_t *io_end = iocb->private;
/* if not async direct IO just return */
- if (!io_end) {
- inode_dio_done(inode);
- if (is_async)
- aio_complete(iocb, ret, 0);
+ if (!io_end)
return;
- }
ext_debug("ext4_end_io_dio(): io_end 0x%p "
"for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3016,11 +3044,7 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
iocb->private = NULL;
io_end->offset = offset;
io_end->size = size;
- if (is_async) {
- io_end->iocb = iocb;
- io_end->result = ret;
- }
- ext4_put_io_end_defer(io_end);
+ ext4_put_io_end(io_end);
}
/*
@@ -3105,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
ret = -ENOMEM;
goto retake_lock;
}
- io_end->flag |= EXT4_IO_END_DIRECT;
/*
* Grab reference for DIO. Will be dropped in ext4_end_io_dio()
*/
@@ -3150,13 +3173,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
WARN_ON(iocb->private != io_end);
WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
- WARN_ON(io_end->iocb);
- /*
- * Generic code already did inode_dio_done() so we
- * have to clear EXT4_IO_END_DIRECT to not do it for
- * the second time.
- */
- io_end->flag = 0;
ext4_put_io_end(io_end);
iocb->private = NULL;
}
@@ -3536,6 +3552,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
offset;
}
+ if (offset & (sb->s_blocksize - 1) ||
+ (offset + length) & (sb->s_blocksize - 1)) {
+ /*
+ * Attach jinode to inode for jbd2 if we do any zeroing of
+ * partial block
+ */
+ ret = ext4_inode_attach_jinode(inode);
+ if (ret < 0)
+ goto out_mutex;
+
+ }
+
first_block_offset = round_up(offset, sb->s_blocksize);
last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
@@ -3604,6 +3632,31 @@ out_mutex:
return ret;
}
+int ext4_inode_attach_jinode(struct inode *inode)
+{
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ struct jbd2_inode *jinode;
+
+ if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
+ return 0;
+
+ jinode = jbd2_alloc_inode(GFP_KERNEL);
+ spin_lock(&inode->i_lock);
+ if (!ei->jinode) {
+ if (!jinode) {
+ spin_unlock(&inode->i_lock);
+ return -ENOMEM;
+ }
+ ei->jinode = jinode;
+ jbd2_journal_init_jbd_inode(ei->jinode, inode);
+ jinode = NULL;
+ }
+ spin_unlock(&inode->i_lock);
+ if (unlikely(jinode != NULL))
+ jbd2_free_inode(jinode);
+ return 0;
+}
+
/*
* ext4_truncate()
*
@@ -3664,6 +3717,12 @@ void ext4_truncate(struct inode *inode)
return;
}
+ /* If we zero-out tail of the page, we have to create jinode for jbd2 */
+ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
+ if (ext4_inode_attach_jinode(inode) < 0)
+ return;
+ }
+
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
credits = ext4_writepage_trans_blocks(inode);
else
@@ -4526,7 +4585,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
ext4_journal_stop(handle);
}
- if (attr->ia_valid & ATTR_SIZE) {
+ if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
+ handle_t *handle;
+ loff_t oldsize = inode->i_size;
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
@@ -4534,73 +4595,69 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
if (attr->ia_size > sbi->s_bitmap_maxbytes)
return -EFBIG;
}
- }
-
- if (S_ISREG(inode->i_mode) &&
- attr->ia_valid & ATTR_SIZE &&
- (attr->ia_size < inode->i_size)) {
- handle_t *handle;
-
- handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
- if (IS_ERR(handle)) {
- error = PTR_ERR(handle);
- goto err_out;
- }
- if (ext4_handle_valid(handle)) {
- error = ext4_orphan_add(handle, inode);
- orphan = 1;
- }
- EXT4_I(inode)->i_disksize = attr->ia_size;
- rc = ext4_mark_inode_dirty(handle, inode);
- if (!error)
- error = rc;
- ext4_journal_stop(handle);
-
- if (ext4_should_order_data(inode)) {
- error = ext4_begin_ordered_truncate(inode,
+ if (S_ISREG(inode->i_mode) &&
+ (attr->ia_size < inode->i_size)) {
+ if (ext4_should_order_data(inode)) {
+ error = ext4_begin_ordered_truncate(inode,
attr->ia_size);
- if (error) {
- /* Do as much error cleanup as possible */
- handle = ext4_journal_start(inode,
- EXT4_HT_INODE, 3);
- if (IS_ERR(handle)) {
- ext4_orphan_del(NULL, inode);
+ if (error)
goto err_out;
- }
- ext4_orphan_del(handle, inode);
- orphan = 0;
- ext4_journal_stop(handle);
+ }
+ handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
+ if (IS_ERR(handle)) {
+ error = PTR_ERR(handle);
goto err_out;
}
- }
- }
-
- if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size != inode->i_size) {
- loff_t oldsize = inode->i_size;
-
- i_size_write(inode, attr->ia_size);
- /*
- * Blocks are going to be removed from the inode. Wait
- * for dio in flight. Temporarily disable
- * dioread_nolock to prevent livelock.
- */
- if (orphan) {
- if (!ext4_should_journal_data(inode)) {
- ext4_inode_block_unlocked_dio(inode);
- inode_dio_wait(inode);
- ext4_inode_resume_unlocked_dio(inode);
- } else
- ext4_wait_for_tail_page_commit(inode);
+ if (ext4_handle_valid(handle)) {
+ error = ext4_orphan_add(handle, inode);
+ orphan = 1;
}
+ down_write(&EXT4_I(inode)->i_data_sem);
+ EXT4_I(inode)->i_disksize = attr->ia_size;
+ rc = ext4_mark_inode_dirty(handle, inode);
+ if (!error)
+ error = rc;
/*
- * Truncate pagecache after we've waited for commit
- * in data=journal mode to make pages freeable.
+ * We have to update i_size under i_data_sem together
+ * with i_disksize to avoid races with writeback code
+ * running ext4_wb_update_i_disksize().
*/
- truncate_pagecache(inode, oldsize, inode->i_size);
+ if (!error)
+ i_size_write(inode, attr->ia_size);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ ext4_journal_stop(handle);
+ if (error) {
+ ext4_orphan_del(NULL, inode);
+ goto err_out;
+ }
+ } else
+ i_size_write(inode, attr->ia_size);
+
+ /*
+ * Blocks are going to be removed from the inode. Wait
+ * for dio in flight. Temporarily disable
+ * dioread_nolock to prevent livelock.
+ */
+ if (orphan) {
+ if (!ext4_should_journal_data(inode)) {
+ ext4_inode_block_unlocked_dio(inode);
+ inode_dio_wait(inode);
+ ext4_inode_resume_unlocked_dio(inode);
+ } else
+ ext4_wait_for_tail_page_commit(inode);
}
- ext4_truncate(inode);
+ /*
+ * Truncate pagecache after we've waited for commit
+ * in data=journal mode to make pages freeable.
+ */
+ truncate_pagecache(inode, oldsize, inode->i_size);
}
+ /*
+ * We want to call ext4_truncate() even if attr->ia_size ==
+ * inode->i_size for cases like truncation of fallocated space
+ */
+ if (attr->ia_valid & ATTR_SIZE)
+ ext4_truncate(inode);
if (!rc) {
setattr_copy(inode, attr);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 9491ac0590f..a569d335f80 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -17,7 +17,6 @@
#include <asm/uaccess.h>
#include "ext4_jbd2.h"
#include "ext4.h"
-#include "ext4_extents.h"
#define MAX_32_NUM ((((unsigned long long) 1) << 32) - 1)
@@ -77,8 +76,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data));
memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags));
memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
- memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree));
- memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr));
+ ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
+ ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
+ ext4_es_lru_del(inode1);
+ ext4_es_lru_del(inode2);
isize = i_size_read(inode1);
i_size_write(inode1, i_size_read(inode2));
@@ -622,6 +623,8 @@ resizefs_out:
return 0;
}
+ case EXT4_IOC_PRECACHE_EXTENTS:
+ return ext4_ext_precache(inode);
default:
return -ENOTTY;
@@ -686,6 +689,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC_MOVE_EXT:
case FITRIM:
case EXT4_IOC_RESIZE_FS:
+ case EXT4_IOC_PRECACHE_EXTENTS:
break;
default:
return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4bbbf13bd74..a41e3ba8cfa 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -751,13 +751,15 @@ void ext4_mb_generate_buddy(struct super_block *sb,
if (free != grp->bb_free) {
ext4_grp_locked_error(sb, group, 0, 0,
- "%u clusters in bitmap, %u in gd",
+ "%u clusters in bitmap, %u in gd; "
+ "block bitmap corrupt.",
free, grp->bb_free);
/*
- * If we intent to continue, we consider group descritor
+ * If we intend to continue, we consider group descriptor
* corrupt and update bb_free using bitmap value
*/
grp->bb_free = free;
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
}
mb_set_largest_free_order(sb, grp);
@@ -1398,6 +1400,10 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
BUG_ON(last >= (sb->s_blocksize << 3));
assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group));
+ /* Don't bother if the block group is corrupt. */
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info)))
+ return;
+
mb_check_buddy(e4b);
mb_free_blocks_double(inode, e4b, first, count);
@@ -1423,7 +1429,11 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
inode ? inode->i_ino : 0,
blocknr,
"freeing already freed block "
- "(bit %u)", block);
+ "(bit %u); block bitmap corrupt.",
+ block);
+ /* Mark the block group as corrupt. */
+ set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
+ &e4b->bd_info->bb_state);
mb_regenerate_buddy(e4b);
goto done;
}
@@ -1790,6 +1800,11 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
if (err)
return err;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) {
+ ext4_mb_unload_buddy(e4b);
+ return 0;
+ }
+
ext4_lock_group(ac->ac_sb, group);
max = mb_find_extent(e4b, ac->ac_g_ex.fe_start,
ac->ac_g_ex.fe_len, &ex);
@@ -1987,6 +2002,9 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
if (cr <= 2 && free < ac->ac_g_ex.fe_len)
return 0;
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
+ return 0;
+
/* We only do this if the grp has never been initialized */
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
int ret = ext4_mb_init_group(ac->ac_sb, group);
@@ -4585,6 +4603,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
struct buffer_head *gd_bh;
ext4_group_t block_group;
struct ext4_sb_info *sbi;
+ struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_buddy e4b;
unsigned int count_clusters;
int err = 0;
@@ -4673,6 +4692,10 @@ do_more:
overflow = 0;
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
+ if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
+ ext4_get_group_info(sb, block_group))))
+ return;
+
/*
* Check to see if we are freeing blocks across a group
* boundary.
@@ -4784,7 +4807,6 @@ do_more:
ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
ext4_group_desc_csum_set(sb, block_group, gdp);
ext4_unlock_group(sb, block_group);
- percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
@@ -4792,10 +4814,23 @@ do_more:
&sbi->s_flex_groups[flex_group].free_clusters);
}
- ext4_mb_unload_buddy(&e4b);
-
- if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+ if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
+ percpu_counter_add(&sbi->s_dirtyclusters_counter,
+ count_clusters);
+ spin_lock(&ei->i_block_reservation_lock);
+ if (flags & EXT4_FREE_BLOCKS_METADATA)
+ ei->i_reserved_meta_blocks += count_clusters;
+ else
+ ei->i_reserved_data_blocks += count_clusters;
+ spin_unlock(&ei->i_block_reservation_lock);
+ if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+ dquot_reclaim_block(inode,
+ EXT4_C2B(sbi, count_clusters));
+ } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
+ percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+
+ ext4_mb_unload_buddy(&e4b);
/* We dirtied the bitmap block */
BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 49e8bdff916..2ae73a80c19 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -39,7 +39,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
newext.ee_block = cpu_to_le32(lb->first_block);
newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
ext4_ext_store_pblock(&newext, lb->first_pblock);
- path = ext4_ext_find_extent(inode, lb->first_block, NULL);
+ path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
if (IS_ERR(path)) {
retval = PTR_ERR(path);
@@ -494,7 +494,7 @@ int ext4_ext_migrate(struct inode *inode)
* superblock modification.
*
* For the tmp_inode we already have committed the
- * trascation that created the inode. Later as and
+ * transaction that created the inode. Later as and
* when we add extents we extent the journal
*/
/*
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index e86dddbd829..7fa4d855dbd 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -37,7 +37,7 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
int ret = 0;
struct ext4_ext_path *path;
- path = ext4_ext_find_extent(inode, lblock, *orig_path);
+ path = ext4_ext_find_extent(inode, lblock, *orig_path, EXT4_EX_NOCACHE);
if (IS_ERR(path))
ret = PTR_ERR(path);
else if (path[ext_depth(inode)].p_ext == NULL)
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 35f55a0dbc4..1bec5a5c1e4 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3005,15 +3005,19 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
/*
* Anybody can rename anything with this: the permission checks are left to the
* higher-level routines.
+ *
+ * n.b. old_{dentry,inode) refers to the source dentry/inode
+ * while new_{dentry,inode) refers to the destination dentry/inode
+ * This comes from rename(const char *oldpath, const char *newpath)
*/
static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
- handle_t *handle;
+ handle_t *handle = NULL;
struct inode *old_inode, *new_inode;
struct buffer_head *old_bh, *new_bh, *dir_bh;
struct ext4_dir_entry_2 *old_de, *new_de;
- int retval, force_da_alloc = 0;
+ int retval;
int inlined = 0, new_inlined = 0;
struct ext4_dir_entry_2 *parent_de;
@@ -3026,14 +3030,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
* in separate transaction */
if (new_dentry->d_inode)
dquot_initialize(new_dentry->d_inode);
- handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
- (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
- EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
-
- if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
- ext4_handle_sync(handle);
old_bh = ext4_find_entry(old_dir, &old_dentry->d_name, &old_de, NULL);
/*
@@ -3056,6 +3052,18 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
new_bh = NULL;
}
}
+ if (new_inode && !test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
+ ext4_alloc_da_blocks(old_inode);
+
+ handle = ext4_journal_start(old_dir, EXT4_HT_DIR,
+ (2 * EXT4_DATA_TRANS_BLOCKS(old_dir->i_sb) +
+ EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2));
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+
+ if (IS_DIRSYNC(old_dir) || IS_DIRSYNC(new_dir))
+ ext4_handle_sync(handle);
+
if (S_ISDIR(old_inode->i_mode)) {
if (new_inode) {
retval = -ENOTEMPTY;
@@ -3186,8 +3194,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
ext4_mark_inode_dirty(handle, new_inode);
if (!new_inode->i_nlink)
ext4_orphan_add(handle, new_inode);
- if (!test_opt(new_dir->i_sb, NO_AUTO_DA_ALLOC))
- force_da_alloc = 1;
}
retval = 0;
@@ -3195,9 +3201,8 @@ end_rename:
brelse(dir_bh);
brelse(old_bh);
brelse(new_bh);
- ext4_journal_stop(handle);
- if (retval == 0 && force_da_alloc)
- ext4_alloc_da_blocks(old_inode);
+ if (handle)
+ ext4_journal_stop(handle);
return retval;
}
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 6625d210fb4..d7d0c7b46ed 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -123,10 +123,6 @@ static void ext4_release_io_end(ext4_io_end_t *io_end)
ext4_finish_bio(bio);
bio_put(bio);
}
- if (io_end->flag & EXT4_IO_END_DIRECT)
- inode_dio_done(io_end->inode);
- if (io_end->iocb)
- aio_complete(io_end->iocb, io_end->result, 0);
kmem_cache_free(io_end_cachep, io_end);
}
@@ -204,19 +200,14 @@ static void ext4_add_complete_io(ext4_io_end_t *io_end)
struct workqueue_struct *wq;
unsigned long flags;
- BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ /* Only reserved conversions from writeback should enter here */
+ WARN_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
+ WARN_ON(!io_end->handle);
spin_lock_irqsave(&ei->i_completed_io_lock, flags);
- if (io_end->handle) {
- wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
- if (list_empty(&ei->i_rsv_conversion_list))
- queue_work(wq, &ei->i_rsv_conversion_work);
- list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
- } else {
- wq = EXT4_SB(io_end->inode->i_sb)->unrsv_conversion_wq;
- if (list_empty(&ei->i_unrsv_conversion_list))
- queue_work(wq, &ei->i_unrsv_conversion_work);
- list_add_tail(&io_end->list, &ei->i_unrsv_conversion_list);
- }
+ wq = EXT4_SB(io_end->inode->i_sb)->rsv_conversion_wq;
+ if (list_empty(&ei->i_rsv_conversion_list))
+ queue_work(wq, &ei->i_rsv_conversion_work);
+ list_add_tail(&io_end->list, &ei->i_rsv_conversion_list);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
}
@@ -256,13 +247,6 @@ void ext4_end_io_rsv_work(struct work_struct *work)
ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_rsv_conversion_list);
}
-void ext4_end_io_unrsv_work(struct work_struct *work)
-{
- struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
- i_unrsv_conversion_work);
- ext4_do_flush_completed_IO(&ei->vfs_inode, &ei->i_unrsv_conversion_list);
-}
-
ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ffdfe385b02..2c2e6cbc6be 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -762,9 +762,7 @@ static void ext4_put_super(struct super_block *sb)
ext4_unregister_li_request(sb);
dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
- flush_workqueue(sbi->unrsv_conversion_wq);
flush_workqueue(sbi->rsv_conversion_wq);
- destroy_workqueue(sbi->unrsv_conversion_wq);
destroy_workqueue(sbi->rsv_conversion_wq);
if (sbi->s_journal) {
@@ -875,14 +873,12 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
#endif
ei->jinode = NULL;
INIT_LIST_HEAD(&ei->i_rsv_conversion_list);
- INIT_LIST_HEAD(&ei->i_unrsv_conversion_list);
spin_lock_init(&ei->i_completed_io_lock);
ei->i_sync_tid = 0;
ei->i_datasync_tid = 0;
atomic_set(&ei->i_ioend_count, 0);
atomic_set(&ei->i_unwritten, 0);
INIT_WORK(&ei->i_rsv_conversion_work, ext4_end_io_rsv_work);
- INIT_WORK(&ei->i_unrsv_conversion_work, ext4_end_io_unrsv_work);
return &ei->vfs_inode;
}
@@ -1134,8 +1130,8 @@ enum {
Opt_nouid32, Opt_debug, Opt_removed,
Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload,
- Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
- Opt_journal_dev, Opt_journal_checksum, Opt_journal_async_commit,
+ Opt_commit, Opt_min_batch_time, Opt_max_batch_time, Opt_journal_dev,
+ Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit,
Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
Opt_data_err_abort, Opt_data_err_ignore,
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
@@ -1179,6 +1175,7 @@ static const match_table_t tokens = {
{Opt_min_batch_time, "min_batch_time=%u"},
{Opt_max_batch_time, "max_batch_time=%u"},
{Opt_journal_dev, "journal_dev=%u"},
+ {Opt_journal_path, "journal_path=%s"},
{Opt_journal_checksum, "journal_checksum"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_abort, "abort"},
@@ -1338,6 +1335,7 @@ static int clear_qf_name(struct super_block *sb, int qtype)
#define MOPT_NO_EXT2 0x0100
#define MOPT_NO_EXT3 0x0200
#define MOPT_EXT4_ONLY (MOPT_NO_EXT2 | MOPT_NO_EXT3)
+#define MOPT_STRING 0x0400
static const struct mount_opts {
int token;
@@ -1359,7 +1357,7 @@ static const struct mount_opts {
{Opt_delalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
- MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT},
+ MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_SET},
{Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -1387,6 +1385,7 @@ static const struct mount_opts {
{Opt_resuid, 0, MOPT_GTE0},
{Opt_resgid, 0, MOPT_GTE0},
{Opt_journal_dev, 0, MOPT_GTE0},
+ {Opt_journal_path, 0, MOPT_STRING},
{Opt_journal_ioprio, 0, MOPT_GTE0},
{Opt_data_journal, EXT4_MOUNT_JOURNAL_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
{Opt_data_ordered, EXT4_MOUNT_ORDERED_DATA, MOPT_NO_EXT2 | MOPT_DATAJ},
@@ -1480,7 +1479,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
- if (args->from && match_int(args, &arg))
+ if (args->from && !(m->flags & MOPT_STRING) && match_int(args, &arg))
return -1;
if (args->from && (m->flags & MOPT_GTE0) && (arg < 0))
return -1;
@@ -1544,6 +1543,44 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
return -1;
}
*journal_devnum = arg;
+ } else if (token == Opt_journal_path) {
+ char *journal_path;
+ struct inode *journal_inode;
+ struct path path;
+ int error;
+
+ if (is_remount) {
+ ext4_msg(sb, KERN_ERR,
+ "Cannot specify journal on remount");
+ return -1;
+ }
+ journal_path = match_strdup(&args[0]);
+ if (!journal_path) {
+ ext4_msg(sb, KERN_ERR, "error: could not dup "
+ "journal device string");
+ return -1;
+ }
+
+ error = kern_path(journal_path, LOOKUP_FOLLOW, &path);
+ if (error) {
+ ext4_msg(sb, KERN_ERR, "error: could not find "
+ "journal device path: error %d", error);
+ kfree(journal_path);
+ return -1;
+ }
+
+ journal_inode = path.dentry->d_inode;
+ if (!S_ISBLK(journal_inode->i_mode)) {
+ ext4_msg(sb, KERN_ERR, "error: journal path %s "
+ "is not a block device", journal_path);
+ path_put(&path);
+ kfree(journal_path);
+ return -1;
+ }
+
+ *journal_devnum = new_encode_dev(journal_inode->i_rdev);
+ path_put(&path);
+ kfree(journal_path);
} else if (token == Opt_journal_ioprio) {
if (arg > 7) {
ext4_msg(sb, KERN_ERR, "Invalid journal IO priority"
@@ -3483,7 +3520,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
if (test_opt(sb, DIOREAD_NOLOCK)) {
ext4_msg(sb, KERN_ERR, "can't mount with "
- "both data=journal and delalloc");
+ "both data=journal and dioread_nolock");
goto failed_mount;
}
if (test_opt(sb, DELALLOC))
@@ -3954,14 +3991,6 @@ no_journal:
goto failed_mount4;
}
- EXT4_SB(sb)->unrsv_conversion_wq =
- alloc_workqueue("ext4-unrsv-conversion", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
- if (!EXT4_SB(sb)->unrsv_conversion_wq) {
- printk(KERN_ERR "EXT4-fs: failed to create workqueue\n");
- ret = -ENOMEM;
- goto failed_mount4;
- }
-
/*
* The jbd2_journal_load will have done any necessary log recovery,
* so we can safely mount the rest of the filesystem now.
@@ -4115,8 +4144,6 @@ failed_mount4:
ext4_msg(sb, KERN_ERR, "mount failed");
if (EXT4_SB(sb)->rsv_conversion_wq)
destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
- if (EXT4_SB(sb)->unrsv_conversion_wq)
- destroy_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
failed_mount_wq:
if (sbi->s_journal) {
jbd2_journal_destroy(sbi->s_journal);
@@ -4564,7 +4591,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
trace_ext4_sync_fs(sb, wait);
flush_workqueue(sbi->rsv_conversion_wq);
- flush_workqueue(sbi->unrsv_conversion_wq);
/*
* Writeback quota in non-journalled quota case - journalled quota has
* no dirty dquots
@@ -4600,7 +4626,6 @@ static int ext4_sync_fs_nojournal(struct super_block *sb, int wait)
trace_ext4_sync_fs(sb, wait);
flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
- flush_workqueue(EXT4_SB(sb)->unrsv_conversion_wq);
dquot_writeback_dquots(sb, -1);
if (wait && test_opt(sb, BARRIER))
ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
@@ -4727,6 +4752,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
goto restore_opts;
}
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ if (test_opt(sb, DIOREAD_NOLOCK)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and dioread_nolock");
+ err = -EINVAL;
+ goto restore_opts;
+ }
+ }
+
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
@@ -5481,6 +5521,7 @@ static void __exit ext4_exit_fs(void)
kset_unregister(ext4_kset);
ext4_exit_system_zone();
ext4_exit_pageio();
+ ext4_exit_es();
}
MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 66a6b85a51d..bb312201ca9 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -182,7 +182,7 @@ const struct address_space_operations f2fs_meta_aops = {
.set_page_dirty = f2fs_set_meta_page_dirty,
};
-int check_orphan_space(struct f2fs_sb_info *sbi)
+int acquire_orphan_inode(struct f2fs_sb_info *sbi)
{
unsigned int max_orphans;
int err = 0;
@@ -197,10 +197,19 @@ int check_orphan_space(struct f2fs_sb_info *sbi)
mutex_lock(&sbi->orphan_inode_mutex);
if (sbi->n_orphans >= max_orphans)
err = -ENOSPC;
+ else
+ sbi->n_orphans++;
mutex_unlock(&sbi->orphan_inode_mutex);
return err;
}
+void release_orphan_inode(struct f2fs_sb_info *sbi)
+{
+ mutex_lock(&sbi->orphan_inode_mutex);
+ sbi->n_orphans--;
+ mutex_unlock(&sbi->orphan_inode_mutex);
+}
+
void add_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
struct list_head *head, *this;
@@ -229,21 +238,18 @@ retry:
list_add(&new->list, this->prev);
else
list_add_tail(&new->list, head);
-
- sbi->n_orphans++;
out:
mutex_unlock(&sbi->orphan_inode_mutex);
}
void remove_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
{
- struct list_head *this, *next, *head;
+ struct list_head *head;
struct orphan_inode_entry *orphan;
mutex_lock(&sbi->orphan_inode_mutex);
head = &sbi->orphan_inode_list;
- list_for_each_safe(this, next, head) {
- orphan = list_entry(this, struct orphan_inode_entry, list);
+ list_for_each_entry(orphan, head, list) {
if (orphan->ino == ino) {
list_del(&orphan->list);
kmem_cache_free(orphan_entry_slab, orphan);
@@ -373,7 +379,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp1;
- pre_version = le64_to_cpu(cp_block->checkpoint_ver);
+ pre_version = cur_cp_version(cp_block);
/* Read the 2nd cp block in this CP pack */
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
@@ -388,7 +394,7 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
if (!f2fs_crc_valid(crc, cp_block, crc_offset))
goto invalid_cp2;
- cur_version = le64_to_cpu(cp_block->checkpoint_ver);
+ cur_version = cur_cp_version(cp_block);
if (cur_version == pre_version) {
*version = cur_version;
@@ -793,7 +799,7 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount)
* Increase the version number so that
* SIT entries and seg summaries are written at correct place
*/
- ckpt_ver = le64_to_cpu(ckpt->checkpoint_ver);
+ ckpt_ver = cur_cp_version(ckpt);
ckpt->checkpoint_ver = cpu_to_le64(++ckpt_ver);
/* write cached NAT/SIT entries to NAT/SIT area */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 035f9a345cd..941f9b9ca3a 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -37,9 +37,9 @@ static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
struct page *node_page = dn->node_page;
unsigned int ofs_in_node = dn->ofs_in_node;
- wait_on_page_writeback(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, false);
- rn = (struct f2fs_node *)page_address(node_page);
+ rn = F2FS_NODE(node_page);
/* Get physical address of data block */
addr_array = blkaddr_in_node(rn);
@@ -117,7 +117,8 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
block_t start_blkaddr, end_blkaddr;
BUG_ON(blk_addr == NEW_ADDR);
- fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
+ fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
+ dn->ofs_in_node;
/* Update the page address in the parent node */
__set_data_blkaddr(dn, blk_addr);
@@ -176,7 +177,6 @@ void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
end_update:
write_unlock(&fi->ext.ext_lock);
sync_inode_page(dn);
- return;
}
struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
@@ -260,8 +260,17 @@ repeat:
if (PageUptodate(page))
return page;
- BUG_ON(dn.data_blkaddr == NEW_ADDR);
- BUG_ON(dn.data_blkaddr == NULL_ADDR);
+ /*
+ * A new dentry page is allocated but not able to be written, since its
+ * new inode page couldn't be allocated due to -ENOSPC.
+ * In such the case, its blkaddr can be remained as NEW_ADDR.
+ * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
+ */
+ if (dn.data_blkaddr == NEW_ADDR) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ return page;
+ }
err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
if (err)
@@ -365,7 +374,6 @@ static void read_end_io(struct bio *bio, int err)
}
unlock_page(page);
} while (bvec >= bio->bi_io_vec);
- kfree(bio->bi_private);
bio_put(bio);
}
@@ -391,7 +399,6 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
bio->bi_end_io = read_end_io;
if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
- kfree(bio->bi_private);
bio_put(bio);
up_read(&sbi->bio_sem);
f2fs_put_page(page, 1);
@@ -442,7 +449,7 @@ static int get_data_block_ro(struct inode *inode, sector_t iblock,
unsigned int end_offset;
end_offset = IS_INODE(dn.node_page) ?
- ADDRS_PER_INODE :
+ ADDRS_PER_INODE(F2FS_I(inode)) :
ADDRS_PER_BLOCK;
clear_buffer_new(bh_result);
@@ -636,9 +643,6 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping,
int err = 0;
int ilock;
- /* for nobh_write_end */
- *fsdata = NULL;
-
f2fs_balance_fs(sbi);
repeat:
page = grab_cache_page_write_begin(mapping, index, flags);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 0d6c6aafb23..a84b0a8e685 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -29,7 +29,7 @@ static DEFINE_MUTEX(f2fs_stat_mutex);
static void update_general_status(struct f2fs_sb_info *sbi)
{
- struct f2fs_stat_info *si = sbi->stat_info;
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
int i;
/* valid check of the segment numbers */
@@ -83,7 +83,7 @@ static void update_general_status(struct f2fs_sb_info *sbi)
*/
static void update_sit_info(struct f2fs_sb_info *sbi)
{
- struct f2fs_stat_info *si = sbi->stat_info;
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
unsigned int blks_per_sec, hblks_per_sec, total_vblocks, bimodal, dist;
struct sit_info *sit_i = SIT_I(sbi);
unsigned int segno, vblocks;
@@ -118,7 +118,7 @@ static void update_sit_info(struct f2fs_sb_info *sbi)
*/
static void update_mem_info(struct f2fs_sb_info *sbi)
{
- struct f2fs_stat_info *si = sbi->stat_info;
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
unsigned npages;
if (si->base_mem)
@@ -253,21 +253,21 @@ static int stat_show(struct seq_file *s, void *v)
si->nats, NM_WOUT_THRESHOLD);
seq_printf(s, " - SITs: %5d\n - free_nids: %5d\n",
si->sits, si->fnids);
- seq_printf(s, "\nDistribution of User Blocks:");
- seq_printf(s, " [ valid | invalid | free ]\n");
- seq_printf(s, " [");
+ seq_puts(s, "\nDistribution of User Blocks:");
+ seq_puts(s, " [ valid | invalid | free ]\n");
+ seq_puts(s, " [");
for (j = 0; j < si->util_valid; j++)
- seq_printf(s, "-");
- seq_printf(s, "|");
+ seq_putc(s, '-');
+ seq_putc(s, '|');
for (j = 0; j < si->util_invalid; j++)
- seq_printf(s, "-");
- seq_printf(s, "|");
+ seq_putc(s, '-');
+ seq_putc(s, '|');
for (j = 0; j < si->util_free; j++)
- seq_printf(s, "-");
- seq_printf(s, "]\n\n");
+ seq_putc(s, '-');
+ seq_puts(s, "]\n\n");
seq_printf(s, "SSR: %u blocks in %u segments\n",
si->block_count[SSR], si->segment_count[SSR]);
seq_printf(s, "LFS: %u blocks in %u segments\n",
@@ -305,11 +305,10 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
struct f2fs_stat_info *si;
- sbi->stat_info = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
- if (!sbi->stat_info)
+ si = kzalloc(sizeof(struct f2fs_stat_info), GFP_KERNEL);
+ if (!si)
return -ENOMEM;
- si = sbi->stat_info;
si->all_area_segs = le32_to_cpu(raw_super->segment_count);
si->sit_area_segs = le32_to_cpu(raw_super->segment_count_sit);
si->nat_area_segs = le32_to_cpu(raw_super->segment_count_nat);
@@ -319,6 +318,7 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
si->main_area_zones = si->main_area_sections /
le32_to_cpu(raw_super->secs_per_zone);
si->sbi = sbi;
+ sbi->stat_info = si;
mutex_lock(&f2fs_stat_mutex);
list_add_tail(&si->stat_list, &f2fs_stat_list);
@@ -329,13 +329,13 @@ int f2fs_build_stats(struct f2fs_sb_info *sbi)
void f2fs_destroy_stats(struct f2fs_sb_info *sbi)
{
- struct f2fs_stat_info *si = sbi->stat_info;
+ struct f2fs_stat_info *si = F2FS_STAT(sbi);
mutex_lock(&f2fs_stat_mutex);
list_del(&si->stat_list);
mutex_unlock(&f2fs_stat_mutex);
- kfree(sbi->stat_info);
+ kfree(si);
}
void __init f2fs_create_root_stats(void)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 62f0d5977c6..384c6daf9a8 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -270,12 +270,27 @@ static void init_dent_inode(const struct qstr *name, struct page *ipage)
struct f2fs_node *rn;
/* copy name info. to this inode page */
- rn = (struct f2fs_node *)page_address(ipage);
+ rn = F2FS_NODE(ipage);
rn->i.i_namelen = cpu_to_le32(name->len);
memcpy(rn->i.i_name, name->name, name->len);
set_page_dirty(ipage);
}
+int update_dent_inode(struct inode *inode, const struct qstr *name)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct page *page;
+
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ init_dent_inode(name, page);
+ f2fs_put_page(page, 1);
+
+ return 0;
+}
+
static int make_empty_dir(struct inode *inode,
struct inode *parent, struct page *page)
{
@@ -557,6 +572,8 @@ void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page,
if (inode->i_nlink == 0)
add_orphan_inode(sbi, inode->i_ino);
+ else
+ release_orphan_inode(sbi);
}
if (bit_pos == NR_DENTRY_IN_BLOCK) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 467d42d65c4..608f0df5b91 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -17,6 +17,7 @@
#include <linux/slab.h>
#include <linux/crc32.h>
#include <linux/magic.h>
+#include <linux/kobject.h>
/*
* For mount options
@@ -28,6 +29,7 @@
#define F2FS_MOUNT_XATTR_USER 0x00000010
#define F2FS_MOUNT_POSIX_ACL 0x00000020
#define F2FS_MOUNT_DISABLE_EXT_IDENTIFY 0x00000040
+#define F2FS_MOUNT_INLINE_XATTR 0x00000080
#define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option)
#define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option)
@@ -134,11 +136,13 @@ static inline int update_sits_in_cursum(struct f2fs_summary_block *rs, int i)
/*
* For INODE and NODE manager
*/
-#define XATTR_NODE_OFFSET (-1) /*
- * store xattrs to one node block per
- * file keeping -1 as its node offset to
- * distinguish from index node blocks.
- */
+/*
+ * XATTR_NODE_OFFSET stores xattrs to one node block per file keeping -1
+ * as its node offset to distinguish from index node blocks.
+ * But some bits are used to mark the node block.
+ */
+#define XATTR_NODE_OFFSET ((((unsigned int)-1) << OFFSET_BIT_SHIFT) \
+ >> OFFSET_BIT_SHIFT)
enum {
ALLOC_NODE, /* allocate a new node page if needed */
LOOKUP_NODE, /* look up a node without readahead */
@@ -178,6 +182,7 @@ struct f2fs_inode_info {
f2fs_hash_t chash; /* hash value of given file name */
unsigned int clevel; /* maximum level of given file name */
nid_t i_xattr_nid; /* node id that contains xattrs */
+ unsigned long long xattr_ver; /* cp version of xattr modification */
struct extent_info ext; /* in-memory extent cache entry */
};
@@ -296,15 +301,6 @@ struct f2fs_sm_info {
};
/*
- * For directory operation
- */
-#define NODE_DIR1_BLOCK (ADDRS_PER_INODE + 1)
-#define NODE_DIR2_BLOCK (ADDRS_PER_INODE + 2)
-#define NODE_IND1_BLOCK (ADDRS_PER_INODE + 3)
-#define NODE_IND2_BLOCK (ADDRS_PER_INODE + 4)
-#define NODE_DIND_BLOCK (ADDRS_PER_INODE + 5)
-
-/*
* For superblock
*/
/*
@@ -350,6 +346,7 @@ enum page_type {
struct f2fs_sb_info {
struct super_block *sb; /* pointer to VFS super block */
+ struct proc_dir_entry *s_proc; /* proc entry */
struct buffer_head *raw_super_buf; /* buffer head of raw sb */
struct f2fs_super_block *raw_super; /* raw super block pointer */
int s_dirty; /* dirty flag for checkpoint */
@@ -429,6 +426,10 @@ struct f2fs_sb_info {
#endif
unsigned int last_victim[2]; /* last victim segment # */
spinlock_t stat_lock; /* lock for stat operations */
+
+ /* For sysfs suppport */
+ struct kobject s_kobj;
+ struct completion s_kobj_unregister;
};
/*
@@ -454,6 +455,11 @@ static inline struct f2fs_checkpoint *F2FS_CKPT(struct f2fs_sb_info *sbi)
return (struct f2fs_checkpoint *)(sbi->ckpt);
}
+static inline struct f2fs_node *F2FS_NODE(struct page *page)
+{
+ return (struct f2fs_node *)page_address(page);
+}
+
static inline struct f2fs_nm_info *NM_I(struct f2fs_sb_info *sbi)
{
return (struct f2fs_nm_info *)(sbi->nm_info);
@@ -489,6 +495,11 @@ static inline void F2FS_RESET_SB_DIRT(struct f2fs_sb_info *sbi)
sbi->s_dirty = 0;
}
+static inline unsigned long long cur_cp_version(struct f2fs_checkpoint *cp)
+{
+ return le64_to_cpu(cp->checkpoint_ver);
+}
+
static inline bool is_set_ckpt_flags(struct f2fs_checkpoint *cp, unsigned int f)
{
unsigned int ckpt_flags = le32_to_cpu(cp->ckpt_flags);
@@ -677,7 +688,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi)
{
block_t start_addr;
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- unsigned long long ckpt_version = le64_to_cpu(ckpt->checkpoint_ver);
+ unsigned long long ckpt_version = cur_cp_version(ckpt);
start_addr = le32_to_cpu(F2FS_RAW_SUPER(sbi)->cp_blkaddr);
@@ -812,7 +823,7 @@ static inline struct kmem_cache *f2fs_kmem_cache_create(const char *name,
static inline bool IS_INODE(struct page *page)
{
- struct f2fs_node *p = (struct f2fs_node *)page_address(page);
+ struct f2fs_node *p = F2FS_NODE(page);
return RAW_IS_INODE(p);
}
@@ -826,7 +837,7 @@ static inline block_t datablock_addr(struct page *node_page,
{
struct f2fs_node *raw_node;
__le32 *addr_array;
- raw_node = (struct f2fs_node *)page_address(node_page);
+ raw_node = F2FS_NODE(node_page);
addr_array = blkaddr_in_node(raw_node);
return le32_to_cpu(addr_array[offset]);
}
@@ -873,6 +884,7 @@ enum {
FI_NO_ALLOC, /* should not allocate any blocks */
FI_UPDATE_DIR, /* should update inode block for consistency */
FI_DELAY_IPUT, /* used for the recovery */
+ FI_INLINE_XATTR, /* used for inline xattr */
};
static inline void set_inode_flag(struct f2fs_inode_info *fi, int flag)
@@ -905,6 +917,45 @@ static inline int cond_clear_inode_flag(struct f2fs_inode_info *fi, int flag)
return 0;
}
+static inline void get_inline_info(struct f2fs_inode_info *fi,
+ struct f2fs_inode *ri)
+{
+ if (ri->i_inline & F2FS_INLINE_XATTR)
+ set_inode_flag(fi, FI_INLINE_XATTR);
+}
+
+static inline void set_raw_inline(struct f2fs_inode_info *fi,
+ struct f2fs_inode *ri)
+{
+ ri->i_inline = 0;
+
+ if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ ri->i_inline |= F2FS_INLINE_XATTR;
+}
+
+static inline unsigned int addrs_per_inode(struct f2fs_inode_info *fi)
+{
+ if (is_inode_flag_set(fi, FI_INLINE_XATTR))
+ return DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS;
+ return DEF_ADDRS_PER_INODE;
+}
+
+static inline void *inline_xattr_addr(struct page *page)
+{
+ struct f2fs_inode *ri;
+ ri = (struct f2fs_inode *)page_address(page);
+ return (void *)&(ri->i_addr[DEF_ADDRS_PER_INODE -
+ F2FS_INLINE_XATTR_ADDRS]);
+}
+
+static inline int inline_xattr_size(struct inode *inode)
+{
+ if (is_inode_flag_set(F2FS_I(inode), FI_INLINE_XATTR))
+ return F2FS_INLINE_XATTR_ADDRS << 2;
+ else
+ return 0;
+}
+
static inline int f2fs_readonly(struct super_block *sb)
{
return sb->s_flags & MS_RDONLY;
@@ -947,6 +998,7 @@ struct f2fs_dir_entry *f2fs_parent_dir(struct inode *, struct page **);
ino_t f2fs_inode_by_name(struct inode *, struct qstr *);
void f2fs_set_link(struct inode *, struct f2fs_dir_entry *,
struct page *, struct inode *);
+int update_dent_inode(struct inode *, const struct qstr *);
int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *);
void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *);
int f2fs_make_empty(struct inode *, struct inode *);
@@ -980,6 +1032,7 @@ int is_checkpointed_node(struct f2fs_sb_info *, nid_t);
void get_node_info(struct f2fs_sb_info *, nid_t, struct node_info *);
int get_dnode_of_data(struct dnode_of_data *, pgoff_t, int);
int truncate_inode_blocks(struct inode *, pgoff_t);
+int truncate_xattr_node(struct inode *, struct page *);
int remove_inode_page(struct inode *);
struct page *new_inode_page(struct inode *, const struct qstr *);
struct page *new_node_page(struct dnode_of_data *, unsigned int, struct page *);
@@ -1012,7 +1065,8 @@ int npages_for_summary_flush(struct f2fs_sb_info *);
void allocate_new_segments(struct f2fs_sb_info *);
struct page *get_sum_page(struct f2fs_sb_info *, unsigned int);
struct bio *f2fs_bio_alloc(struct block_device *, int);
-void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool sync);
+void f2fs_submit_bio(struct f2fs_sb_info *, enum page_type, bool);
+void f2fs_wait_on_page_writeback(struct page *, enum page_type, bool);
void write_meta_page(struct f2fs_sb_info *, struct page *);
void write_node_page(struct f2fs_sb_info *, struct page *, unsigned int,
block_t, block_t *);
@@ -1037,7 +1091,8 @@ void destroy_segment_manager(struct f2fs_sb_info *);
struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t);
struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t);
long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long);
-int check_orphan_space(struct f2fs_sb_info *);
+int acquire_orphan_inode(struct f2fs_sb_info *);
+void release_orphan_inode(struct f2fs_sb_info *);
void add_orphan_inode(struct f2fs_sb_info *, nid_t);
void remove_orphan_inode(struct f2fs_sb_info *, nid_t);
int recover_orphan_inodes(struct f2fs_sb_info *);
@@ -1068,7 +1123,7 @@ int do_write_data_page(struct page *);
*/
int start_gc_thread(struct f2fs_sb_info *);
void stop_gc_thread(struct f2fs_sb_info *);
-block_t start_bidx_of_node(unsigned int);
+block_t start_bidx_of_node(unsigned int, struct f2fs_inode_info *);
int f2fs_gc(struct f2fs_sb_info *);
void build_gc_manager(struct f2fs_sb_info *);
int __init create_gc_caches(void);
@@ -1112,11 +1167,16 @@ struct f2fs_stat_info {
unsigned base_mem, cache_mem;
};
+static inline struct f2fs_stat_info *F2FS_STAT(struct f2fs_sb_info *sbi)
+{
+ return (struct f2fs_stat_info*)sbi->stat_info;
+}
+
#define stat_inc_call_count(si) ((si)->call_count++)
#define stat_inc_seg_count(sbi, type) \
do { \
- struct f2fs_stat_info *si = sbi->stat_info; \
+ struct f2fs_stat_info *si = F2FS_STAT(sbi); \
(si)->tot_segs++; \
if (type == SUM_TYPE_DATA) \
si->data_segs++; \
@@ -1129,14 +1189,14 @@ struct f2fs_stat_info {
#define stat_inc_data_blk_count(sbi, blks) \
do { \
- struct f2fs_stat_info *si = sbi->stat_info; \
+ struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->data_blks += (blks); \
} while (0)
#define stat_inc_node_blk_count(sbi, blks) \
do { \
- struct f2fs_stat_info *si = sbi->stat_info; \
+ struct f2fs_stat_info *si = F2FS_STAT(sbi); \
stat_inc_tot_blk_count(si, blks); \
si->node_blks += (blks); \
} while (0)
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index d2d2b7dbdcc..02c906971cc 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -112,11 +112,13 @@ static int get_parent_ino(struct inode *inode, nid_t *pino)
if (!dentry)
return 0;
- inode = igrab(dentry->d_parent->d_inode);
- dput(dentry);
+ if (update_dent_inode(inode, &dentry->d_name)) {
+ dput(dentry);
+ return 0;
+ }
- *pino = inode->i_ino;
- iput(inode);
+ *pino = parent_ino(dentry);
+ dput(dentry);
return 1;
}
@@ -147,9 +149,10 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
mutex_lock(&inode->i_mutex);
- if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
- goto out;
-
+ /*
+ * Both of fdatasync() and fsync() are able to be recovered from
+ * sudden-power-off.
+ */
if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1)
need_cp = true;
else if (file_wrong_pino(inode))
@@ -158,10 +161,14 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
need_cp = true;
else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino))
need_cp = true;
+ else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi)))
+ need_cp = true;
if (need_cp) {
nid_t pino;
+ F2FS_I(inode)->xattr_ver = 0;
+
/* all the dirty node pages should be flushed for POR */
ret = f2fs_sync_fs(inode->i_sb, 1);
if (file_wrong_pino(inode) && inode->i_nlink == 1 &&
@@ -205,7 +212,7 @@ int truncate_data_blocks_range(struct dnode_of_data *dn, int count)
struct f2fs_node *raw_node;
__le32 *addr;
- raw_node = page_address(dn->node_page);
+ raw_node = F2FS_NODE(dn->node_page);
addr = blkaddr_in_node(raw_node) + ofs;
for ( ; count > 0; count--, addr++, dn->ofs_in_node++) {
@@ -283,7 +290,7 @@ static int truncate_blocks(struct inode *inode, u64 from)
}
if (IS_INODE(dn.node_page))
- count = ADDRS_PER_INODE;
+ count = ADDRS_PER_INODE(F2FS_I(inode));
else
count = ADDRS_PER_BLOCK;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 35f9b1a196a..2f157e88368 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -29,10 +29,11 @@ static struct kmem_cache *winode_slab;
static int gc_thread_func(void *data)
{
struct f2fs_sb_info *sbi = data;
+ struct f2fs_gc_kthread *gc_th = sbi->gc_thread;
wait_queue_head_t *wq = &sbi->gc_thread->gc_wait_queue_head;
long wait_ms;
- wait_ms = GC_THREAD_MIN_SLEEP_TIME;
+ wait_ms = gc_th->min_sleep_time;
do {
if (try_to_freeze())
@@ -45,7 +46,7 @@ static int gc_thread_func(void *data)
break;
if (sbi->sb->s_writers.frozen >= SB_FREEZE_WRITE) {
- wait_ms = GC_THREAD_MAX_SLEEP_TIME;
+ wait_ms = increase_sleep_time(gc_th, wait_ms);
continue;
}
@@ -66,15 +67,15 @@ static int gc_thread_func(void *data)
continue;
if (!is_idle(sbi)) {
- wait_ms = increase_sleep_time(wait_ms);
+ wait_ms = increase_sleep_time(gc_th, wait_ms);
mutex_unlock(&sbi->gc_mutex);
continue;
}
if (has_enough_invalid_blocks(sbi))
- wait_ms = decrease_sleep_time(wait_ms);
+ wait_ms = decrease_sleep_time(gc_th, wait_ms);
else
- wait_ms = increase_sleep_time(wait_ms);
+ wait_ms = increase_sleep_time(gc_th, wait_ms);
#ifdef CONFIG_F2FS_STAT_FS
sbi->bg_gc++;
@@ -82,7 +83,7 @@ static int gc_thread_func(void *data)
/* if return value is not zero, no victim was selected */
if (f2fs_gc(sbi))
- wait_ms = GC_THREAD_NOGC_SLEEP_TIME;
+ wait_ms = gc_th->no_gc_sleep_time;
} while (!kthread_should_stop());
return 0;
}
@@ -101,6 +102,12 @@ int start_gc_thread(struct f2fs_sb_info *sbi)
goto out;
}
+ gc_th->min_sleep_time = DEF_GC_THREAD_MIN_SLEEP_TIME;
+ gc_th->max_sleep_time = DEF_GC_THREAD_MAX_SLEEP_TIME;
+ gc_th->no_gc_sleep_time = DEF_GC_THREAD_NOGC_SLEEP_TIME;
+
+ gc_th->gc_idle = 0;
+
sbi->gc_thread = gc_th;
init_waitqueue_head(&sbi->gc_thread->gc_wait_queue_head);
sbi->gc_thread->f2fs_gc_task = kthread_run(gc_thread_func, sbi,
@@ -125,9 +132,17 @@ void stop_gc_thread(struct f2fs_sb_info *sbi)
sbi->gc_thread = NULL;
}
-static int select_gc_type(int gc_type)
+static int select_gc_type(struct f2fs_gc_kthread *gc_th, int gc_type)
{
- return (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+ int gc_mode = (gc_type == BG_GC) ? GC_CB : GC_GREEDY;
+
+ if (gc_th && gc_th->gc_idle) {
+ if (gc_th->gc_idle == 1)
+ gc_mode = GC_CB;
+ else if (gc_th->gc_idle == 2)
+ gc_mode = GC_GREEDY;
+ }
+ return gc_mode;
}
static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
@@ -138,12 +153,18 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
if (p->alloc_mode == SSR) {
p->gc_mode = GC_GREEDY;
p->dirty_segmap = dirty_i->dirty_segmap[type];
+ p->max_search = dirty_i->nr_dirty[type];
p->ofs_unit = 1;
} else {
- p->gc_mode = select_gc_type(gc_type);
+ p->gc_mode = select_gc_type(sbi->gc_thread, gc_type);
p->dirty_segmap = dirty_i->dirty_segmap[DIRTY];
+ p->max_search = dirty_i->nr_dirty[DIRTY];
p->ofs_unit = sbi->segs_per_sec;
}
+
+ if (p->max_search > MAX_VICTIM_SEARCH)
+ p->max_search = MAX_VICTIM_SEARCH;
+
p->offset = sbi->last_victim[p->gc_mode];
}
@@ -290,7 +311,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
if (cost == max_cost)
continue;
- if (nsearched++ >= MAX_VICTIM_SEARCH) {
+ if (nsearched++ >= p.max_search) {
sbi->last_victim[p.gc_mode] = segno;
break;
}
@@ -407,8 +428,7 @@ next_step:
/* set page dirty and write it */
if (gc_type == FG_GC) {
- f2fs_submit_bio(sbi, NODE, true);
- wait_on_page_writeback(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, true);
set_page_dirty(node_page);
} else {
if (!PageWriteback(node_page))
@@ -447,7 +467,7 @@ next_step:
* as indirect or double indirect node blocks, are given, it must be a caller's
* bug.
*/
-block_t start_bidx_of_node(unsigned int node_ofs)
+block_t start_bidx_of_node(unsigned int node_ofs, struct f2fs_inode_info *fi)
{
unsigned int indirect_blks = 2 * NIDS_PER_BLOCK + 4;
unsigned int bidx;
@@ -464,7 +484,7 @@ block_t start_bidx_of_node(unsigned int node_ofs)
int dec = (node_ofs - indirect_blks - 3) / (NIDS_PER_BLOCK + 1);
bidx = node_ofs - 5 - dec;
}
- return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE;
+ return bidx * ADDRS_PER_BLOCK + ADDRS_PER_INODE(fi);
}
static int check_dnode(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
@@ -508,10 +528,7 @@ static void move_data_page(struct inode *inode, struct page *page, int gc_type)
} else {
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- if (PageWriteback(page)) {
- f2fs_submit_bio(sbi, DATA, true);
- wait_on_page_writeback(page);
- }
+ f2fs_wait_on_page_writeback(page, DATA, true);
if (clear_page_dirty_for_io(page) &&
S_ISDIR(inode->i_mode)) {
@@ -575,7 +592,6 @@ next_step:
continue;
}
- start_bidx = start_bidx_of_node(nofs);
ofs_in_node = le16_to_cpu(entry->ofs_in_node);
if (phase == 2) {
@@ -583,6 +599,8 @@ next_step:
if (IS_ERR(inode))
continue;
+ start_bidx = start_bidx_of_node(nofs, F2FS_I(inode));
+
data_page = find_data_page(inode,
start_bidx + ofs_in_node, false);
if (IS_ERR(data_page))
@@ -593,6 +611,8 @@ next_step:
} else {
inode = find_gc_inode(dni.ino, ilist);
if (inode) {
+ start_bidx = start_bidx_of_node(nofs,
+ F2FS_I(inode));
data_page = get_lock_data_page(inode,
start_bidx + ofs_in_node);
if (IS_ERR(data_page))
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h
index 2c6a6bd0832..507056d2220 100644
--- a/fs/f2fs/gc.h
+++ b/fs/f2fs/gc.h
@@ -13,18 +13,26 @@
* whether IO subsystem is idle
* or not
*/
-#define GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
-#define GC_THREAD_MAX_SLEEP_TIME 60000
-#define GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
+#define DEF_GC_THREAD_MIN_SLEEP_TIME 30000 /* milliseconds */
+#define DEF_GC_THREAD_MAX_SLEEP_TIME 60000
+#define DEF_GC_THREAD_NOGC_SLEEP_TIME 300000 /* wait 5 min */
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
/* Search max. number of dirty segments to select a victim segment */
-#define MAX_VICTIM_SEARCH 20
+#define MAX_VICTIM_SEARCH 4096 /* covers 8GB */
struct f2fs_gc_kthread {
struct task_struct *f2fs_gc_task;
wait_queue_head_t gc_wait_queue_head;
+
+ /* for gc sleep time */
+ unsigned int min_sleep_time;
+ unsigned int max_sleep_time;
+ unsigned int no_gc_sleep_time;
+
+ /* for changing gc mode */
+ unsigned int gc_idle;
};
struct inode_entry {
@@ -56,25 +64,25 @@ static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
}
-static inline long increase_sleep_time(long wait)
+static inline long increase_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
{
- if (wait == GC_THREAD_NOGC_SLEEP_TIME)
+ if (wait == gc_th->no_gc_sleep_time)
return wait;
- wait += GC_THREAD_MIN_SLEEP_TIME;
- if (wait > GC_THREAD_MAX_SLEEP_TIME)
- wait = GC_THREAD_MAX_SLEEP_TIME;
+ wait += gc_th->min_sleep_time;
+ if (wait > gc_th->max_sleep_time)
+ wait = gc_th->max_sleep_time;
return wait;
}
-static inline long decrease_sleep_time(long wait)
+static inline long decrease_sleep_time(struct f2fs_gc_kthread *gc_th, long wait)
{
- if (wait == GC_THREAD_NOGC_SLEEP_TIME)
- wait = GC_THREAD_MAX_SLEEP_TIME;
+ if (wait == gc_th->no_gc_sleep_time)
+ wait = gc_th->max_sleep_time;
- wait -= GC_THREAD_MIN_SLEEP_TIME;
- if (wait <= GC_THREAD_MIN_SLEEP_TIME)
- wait = GC_THREAD_MIN_SLEEP_TIME;
+ wait -= gc_th->min_sleep_time;
+ if (wait <= gc_th->min_sleep_time)
+ wait = gc_th->min_sleep_time;
return wait;
}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 2b2d45d19e3..9339cd29204 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -56,7 +56,7 @@ static int do_read_inode(struct inode *inode)
if (IS_ERR(node_page))
return PTR_ERR(node_page);
- rn = page_address(node_page);
+ rn = F2FS_NODE(node_page);
ri = &(rn->i);
inode->i_mode = le16_to_cpu(ri->i_mode);
@@ -85,6 +85,7 @@ static int do_read_inode(struct inode *inode)
fi->i_advise = ri->i_advise;
fi->i_pino = le32_to_cpu(ri->i_pino);
get_extent_info(&fi->ext, ri->i_ext);
+ get_inline_info(fi, ri);
f2fs_put_page(node_page, 1);
return 0;
}
@@ -151,9 +152,9 @@ void update_inode(struct inode *inode, struct page *node_page)
struct f2fs_node *rn;
struct f2fs_inode *ri;
- wait_on_page_writeback(node_page);
+ f2fs_wait_on_page_writeback(node_page, NODE, false);
- rn = page_address(node_page);
+ rn = F2FS_NODE(node_page);
ri = &(rn->i);
ri->i_mode = cpu_to_le16(inode->i_mode);
@@ -164,6 +165,7 @@ void update_inode(struct inode *inode, struct page *node_page)
ri->i_size = cpu_to_le64(i_size_read(inode));
ri->i_blocks = cpu_to_le64(inode->i_blocks);
set_raw_extent(&F2FS_I(inode)->ext, &ri->i_ext);
+ set_raw_inline(F2FS_I(inode), ri);
ri->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
ri->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
@@ -221,9 +223,6 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
if (!is_inode_flag_set(F2FS_I(inode), FI_DIRTY_INODE))
return 0;
- if (wbc)
- f2fs_balance_fs(sbi);
-
/*
* We need to lock here to prevent from producing dirty node pages
* during the urgent cleaning time when runing out of free sections.
@@ -231,6 +230,10 @@ int f2fs_write_inode(struct inode *inode, struct writeback_control *wbc)
ilock = mutex_lock_op(sbi);
ret = update_inode_page(inode);
mutex_unlock_op(sbi, ilock);
+
+ if (wbc)
+ f2fs_balance_fs(sbi);
+
return ret;
}
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 64c07169df0..2a5359c990f 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -83,21 +83,11 @@ static int is_multimedia_file(const unsigned char *s, const char *sub)
{
size_t slen = strlen(s);
size_t sublen = strlen(sub);
- int ret;
if (sublen > slen)
return 0;
- ret = memcmp(s + slen - sublen, sub, sublen);
- if (ret) { /* compare upper case */
- int i;
- char upper_sub[8];
- for (i = 0; i < sublen && i < sizeof(upper_sub); i++)
- upper_sub[i] = toupper(sub[i]);
- return !memcmp(s + slen - sublen, upper_sub, sublen);
- }
-
- return !ret;
+ return !strncasecmp(s + slen - sublen, sub, sublen);
}
/*
@@ -239,7 +229,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
if (!de)
goto fail;
- err = check_orphan_space(sbi);
+ err = acquire_orphan_inode(sbi);
if (err) {
kunmap(page);
f2fs_put_page(page, 0);
@@ -393,7 +383,7 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *old_inode = old_dentry->d_inode;
struct inode *new_inode = new_dentry->d_inode;
struct page *old_dir_page;
- struct page *old_page;
+ struct page *old_page, *new_page;
struct f2fs_dir_entry *old_dir_entry = NULL;
struct f2fs_dir_entry *old_entry;
struct f2fs_dir_entry *new_entry;
@@ -415,7 +405,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
ilock = mutex_lock_op(sbi);
if (new_inode) {
- struct page *new_page;
err = -ENOTEMPTY;
if (old_dir_entry && !f2fs_empty_dir(new_inode))
@@ -427,14 +416,28 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!new_entry)
goto out_dir;
+ err = acquire_orphan_inode(sbi);
+ if (err)
+ goto put_out_dir;
+
+ if (update_dent_inode(old_inode, &new_dentry->d_name)) {
+ release_orphan_inode(sbi);
+ goto put_out_dir;
+ }
+
f2fs_set_link(new_dir, new_entry, new_page, old_inode);
new_inode->i_ctime = CURRENT_TIME;
if (old_dir_entry)
drop_nlink(new_inode);
drop_nlink(new_inode);
+
if (!new_inode->i_nlink)
add_orphan_inode(sbi, new_inode->i_ino);
+ else
+ release_orphan_inode(sbi);
+
+ update_inode_page(old_inode);
update_inode_page(new_inode);
} else {
err = f2fs_add_link(new_dentry, old_inode);
@@ -467,6 +470,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
mutex_unlock_op(sbi, ilock);
return 0;
+put_out_dir:
+ f2fs_put_page(new_page, 1);
out_dir:
if (old_dir_entry) {
kunmap(old_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index b418aee0957..51ef2789443 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -315,9 +315,10 @@ cache:
* The maximum depth is four.
* Offset[0] will have raw inode offset.
*/
-static int get_node_path(long block, int offset[4], unsigned int noffset[4])
+static int get_node_path(struct f2fs_inode_info *fi, long block,
+ int offset[4], unsigned int noffset[4])
{
- const long direct_index = ADDRS_PER_INODE;
+ const long direct_index = ADDRS_PER_INODE(fi);
const long direct_blks = ADDRS_PER_BLOCK;
const long dptrs_per_blk = NIDS_PER_BLOCK;
const long indirect_blks = ADDRS_PER_BLOCK * NIDS_PER_BLOCK;
@@ -405,7 +406,7 @@ int get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode)
int level, i;
int err = 0;
- level = get_node_path(index, offset, noffset);
+ level = get_node_path(F2FS_I(dn->inode), index, offset, noffset);
nids[0] = dn->inode->i_ino;
npage[0] = dn->inode_page;
@@ -565,7 +566,7 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs,
return PTR_ERR(page);
}
- rn = (struct f2fs_node *)page_address(page);
+ rn = F2FS_NODE(page);
if (depth < 3) {
for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) {
child_nid = le32_to_cpu(rn->in.nid[i]);
@@ -687,7 +688,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from)
trace_f2fs_truncate_inode_blocks_enter(inode, from);
- level = get_node_path(from, offset, noffset);
+ level = get_node_path(F2FS_I(inode), from, offset, noffset);
restart:
page = get_node_page(sbi, inode->i_ino);
if (IS_ERR(page)) {
@@ -698,7 +699,7 @@ restart:
set_new_dnode(&dn, inode, page, NULL, 0);
unlock_page(page);
- rn = page_address(page);
+ rn = F2FS_NODE(page);
switch (level) {
case 0:
case 1:
@@ -771,6 +772,33 @@ fail:
return err > 0 ? 0 : err;
}
+int truncate_xattr_node(struct inode *inode, struct page *page)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ nid_t nid = F2FS_I(inode)->i_xattr_nid;
+ struct dnode_of_data dn;
+ struct page *npage;
+
+ if (!nid)
+ return 0;
+
+ npage = get_node_page(sbi, nid);
+ if (IS_ERR(npage))
+ return PTR_ERR(npage);
+
+ F2FS_I(inode)->i_xattr_nid = 0;
+
+ /* need to do checkpoint during fsync */
+ F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+
+ set_new_dnode(&dn, inode, page, npage, nid);
+
+ if (page)
+ dn.inode_page_locked = 1;
+ truncate_node(&dn);
+ return 0;
+}
+
/*
* Caller should grab and release a mutex by calling mutex_lock_op() and
* mutex_unlock_op().
@@ -781,22 +809,16 @@ int remove_inode_page(struct inode *inode)
struct page *page;
nid_t ino = inode->i_ino;
struct dnode_of_data dn;
+ int err;
page = get_node_page(sbi, ino);
if (IS_ERR(page))
return PTR_ERR(page);
- if (F2FS_I(inode)->i_xattr_nid) {
- nid_t nid = F2FS_I(inode)->i_xattr_nid;
- struct page *npage = get_node_page(sbi, nid);
-
- if (IS_ERR(npage))
- return PTR_ERR(npage);
-
- F2FS_I(inode)->i_xattr_nid = 0;
- set_new_dnode(&dn, inode, page, npage, nid);
- dn.inode_page_locked = 1;
- truncate_node(&dn);
+ err = truncate_xattr_node(inode, page);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
}
/* 0 is possible, after f2fs_new_inode() is failed */
@@ -833,29 +855,32 @@ struct page *new_node_page(struct dnode_of_data *dn,
if (!page)
return ERR_PTR(-ENOMEM);
- get_node_info(sbi, dn->nid, &old_ni);
+ if (!inc_valid_node_count(sbi, dn->inode, 1)) {
+ err = -ENOSPC;
+ goto fail;
+ }
- SetPageUptodate(page);
- fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
+ get_node_info(sbi, dn->nid, &old_ni);
/* Reinitialize old_ni with new node page */
BUG_ON(old_ni.blk_addr != NULL_ADDR);
new_ni = old_ni;
new_ni.ino = dn->inode->i_ino;
-
- if (!inc_valid_node_count(sbi, dn->inode, 1)) {
- err = -ENOSPC;
- goto fail;
- }
set_node_addr(sbi, &new_ni, NEW_ADDR);
+
+ fill_node_footer(page, dn->nid, dn->inode->i_ino, ofs, true);
set_cold_node(dn->inode, page);
+ SetPageUptodate(page);
+ set_page_dirty(page);
+
+ if (ofs == XATTR_NODE_OFFSET)
+ F2FS_I(dn->inode)->i_xattr_nid = dn->nid;
dn->node_page = page;
if (ipage)
update_inode(dn->inode, ipage);
else
sync_inode_page(dn);
- set_page_dirty(page);
if (ofs == 0)
inc_valid_inode_count(sbi);
@@ -916,7 +941,6 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid)
f2fs_put_page(apage, 0);
else if (err == LOCKED_PAGE)
f2fs_put_page(apage, 1);
- return;
}
struct page *get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid)
@@ -1167,9 +1191,9 @@ static int f2fs_write_node_page(struct page *page,
/*
* It is very important to gather dirty pages and write at once, so that we can
* submit a big bio without interfering other data writes.
- * Be default, 512 pages (2MB), a segment size, is quite reasonable.
+ * Be default, 512 pages (2MB) * 3 node types, is more reasonable.
*/
-#define COLLECT_DIRTY_NODES 512
+#define COLLECT_DIRTY_NODES 1536
static int f2fs_write_node_pages(struct address_space *mapping,
struct writeback_control *wbc)
{
@@ -1187,9 +1211,10 @@ static int f2fs_write_node_pages(struct address_space *mapping,
return 0;
/* if mounting is failed, skip writing node pages */
- wbc->nr_to_write = max_hw_blocks(sbi);
+ wbc->nr_to_write = 3 * max_hw_blocks(sbi);
sync_node_pages(sbi, 0, wbc);
- wbc->nr_to_write = nr_to_write - (max_hw_blocks(sbi) - wbc->nr_to_write);
+ wbc->nr_to_write = nr_to_write - (3 * max_hw_blocks(sbi) -
+ wbc->nr_to_write);
return 0;
}
@@ -1444,6 +1469,9 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid)
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i;
+ if (!nid)
+ return;
+
spin_lock(&nm_i->free_nid_list_lock);
i = __lookup_free_nid_list(nid, &nm_i->free_nid_list);
BUG_ON(!i || i->state != NID_ALLOC);
@@ -1484,8 +1512,8 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
SetPageUptodate(ipage);
fill_node_footer(ipage, ino, ino, 0, true);
- src = (struct f2fs_node *)page_address(page);
- dst = (struct f2fs_node *)page_address(ipage);
+ src = F2FS_NODE(page);
+ dst = F2FS_NODE(ipage);
memcpy(dst, src, (unsigned long)&src->i.i_ext - (unsigned long)&src->i);
dst->i.i_size = 0;
@@ -1515,8 +1543,8 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
/* alloc temporal page for read node */
page = alloc_page(GFP_NOFS | __GFP_ZERO);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (!page)
+ return -ENOMEM;
lock_page(page);
/* scan the node segment */
@@ -1535,7 +1563,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi,
goto out;
lock_page(page);
- rn = (struct f2fs_node *)page_address(page);
+ rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
sum_entry->ofs_in_node = 0;
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index c65fb4f4230..3496bb3e15d 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -155,8 +155,7 @@ static inline void set_to_next_nat(struct f2fs_nm_info *nm_i, nid_t start_nid)
static inline void fill_node_footer(struct page *page, nid_t nid,
nid_t ino, unsigned int ofs, bool reset)
{
- void *kaddr = page_address(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(page);
if (reset)
memset(rn, 0, sizeof(*rn));
rn->footer.nid = cpu_to_le32(nid);
@@ -166,10 +165,8 @@ static inline void fill_node_footer(struct page *page, nid_t nid,
static inline void copy_node_footer(struct page *dst, struct page *src)
{
- void *src_addr = page_address(src);
- void *dst_addr = page_address(dst);
- struct f2fs_node *src_rn = (struct f2fs_node *)src_addr;
- struct f2fs_node *dst_rn = (struct f2fs_node *)dst_addr;
+ struct f2fs_node *src_rn = F2FS_NODE(src);
+ struct f2fs_node *dst_rn = F2FS_NODE(dst);
memcpy(&dst_rn->footer, &src_rn->footer, sizeof(struct node_footer));
}
@@ -177,45 +174,40 @@ static inline void fill_node_footer_blkaddr(struct page *page, block_t blkaddr)
{
struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
- void *kaddr = page_address(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(page);
+
rn->footer.cp_ver = ckpt->checkpoint_ver;
rn->footer.next_blkaddr = cpu_to_le32(blkaddr);
}
static inline nid_t ino_of_node(struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.ino);
}
static inline nid_t nid_of_node(struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.nid);
}
static inline unsigned int ofs_of_node(struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(node_page);
unsigned flag = le32_to_cpu(rn->footer.flag);
return flag >> OFFSET_BIT_SHIFT;
}
static inline unsigned long long cpver_of_node(struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(node_page);
return le64_to_cpu(rn->footer.cp_ver);
}
static inline block_t next_blkaddr_of_node(struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(node_page);
return le32_to_cpu(rn->footer.next_blkaddr);
}
@@ -237,6 +229,10 @@ static inline block_t next_blkaddr_of_node(struct page *node_page)
static inline bool IS_DNODE(struct page *node_page)
{
unsigned int ofs = ofs_of_node(node_page);
+
+ if (ofs == XATTR_NODE_OFFSET)
+ return false;
+
if (ofs == 3 || ofs == 4 + NIDS_PER_BLOCK ||
ofs == 5 + 2 * NIDS_PER_BLOCK)
return false;
@@ -250,7 +246,7 @@ static inline bool IS_DNODE(struct page *node_page)
static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
{
- struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+ struct f2fs_node *rn = F2FS_NODE(p);
wait_on_page_writeback(p);
@@ -263,7 +259,8 @@ static inline void set_nid(struct page *p, int off, nid_t nid, bool i)
static inline nid_t get_nid(struct page *p, int off, bool i)
{
- struct f2fs_node *rn = (struct f2fs_node *)page_address(p);
+ struct f2fs_node *rn = F2FS_NODE(p);
+
if (i)
return le32_to_cpu(rn->i.i_nid[off - NODE_DIR1_BLOCK]);
return le32_to_cpu(rn->in.nid[off]);
@@ -314,8 +311,7 @@ static inline void clear_cold_data(struct page *page)
static inline int is_node(struct page *page, int type)
{
- void *kaddr = page_address(page);
- struct f2fs_node *rn = (struct f2fs_node *)kaddr;
+ struct f2fs_node *rn = F2FS_NODE(page);
return le32_to_cpu(rn->footer.flag) & (1 << type);
}
@@ -325,7 +321,7 @@ static inline int is_node(struct page *page, int type)
static inline void set_cold_node(struct inode *inode, struct page *page)
{
- struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+ struct f2fs_node *rn = F2FS_NODE(page);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (S_ISDIR(inode->i_mode))
@@ -337,7 +333,7 @@ static inline void set_cold_node(struct inode *inode, struct page *page)
static inline void set_mark(struct page *page, int mark, int type)
{
- struct f2fs_node *rn = (struct f2fs_node *)page_address(page);
+ struct f2fs_node *rn = F2FS_NODE(page);
unsigned int flag = le32_to_cpu(rn->footer.flag);
if (mark)
flag |= (0x1 << type);
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index d56d951c225..51ef5eec33d 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -40,8 +40,7 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
static int recover_dentry(struct page *ipage, struct inode *inode)
{
- void *kaddr = page_address(ipage);
- struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+ struct f2fs_node *raw_node = F2FS_NODE(ipage);
struct f2fs_inode *raw_inode = &(raw_node->i);
nid_t pino = le32_to_cpu(raw_inode->i_pino);
struct f2fs_dir_entry *de;
@@ -93,8 +92,7 @@ out:
static int recover_inode(struct inode *inode, struct page *node_page)
{
- void *kaddr = page_address(node_page);
- struct f2fs_node *raw_node = (struct f2fs_node *)kaddr;
+ struct f2fs_node *raw_node = F2FS_NODE(node_page);
struct f2fs_inode *raw_inode = &(raw_node->i);
if (!IS_INODE(node_page))
@@ -119,7 +117,7 @@ static int recover_inode(struct inode *inode, struct page *node_page)
static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
{
- unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+ unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page;
block_t blkaddr;
@@ -131,8 +129,8 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
/* read node page */
page = alloc_page(GFP_F2FS_ZERO);
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (!page)
+ return -ENOMEM;
lock_page(page);
while (1) {
@@ -215,6 +213,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
void *kaddr;
struct inode *inode;
struct page *node_page;
+ unsigned int offset;
block_t bidx;
int i;
@@ -259,8 +258,8 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
node_page = get_node_page(sbi, nid);
if (IS_ERR(node_page))
return PTR_ERR(node_page);
- bidx = start_bidx_of_node(ofs_of_node(node_page)) +
- le16_to_cpu(sum.ofs_in_node);
+
+ offset = ofs_of_node(node_page);
ino = ino_of_node(node_page);
f2fs_put_page(node_page, 1);
@@ -269,6 +268,9 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
if (IS_ERR(inode))
return PTR_ERR(inode);
+ bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
+ le16_to_cpu(sum.ofs_in_node);
+
truncate_hole(inode, bidx, bidx + 1);
iput(inode);
return 0;
@@ -277,6 +279,7 @@ static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
struct page *page, block_t blkaddr)
{
+ struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int start, end;
struct dnode_of_data dn;
struct f2fs_summary sum;
@@ -284,9 +287,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
int err = 0, recovered = 0;
int ilock;
- start = start_bidx_of_node(ofs_of_node(page));
+ start = start_bidx_of_node(ofs_of_node(page), fi);
if (IS_INODE(page))
- end = start + ADDRS_PER_INODE;
+ end = start + ADDRS_PER_INODE(fi);
else
end = start + ADDRS_PER_BLOCK;
@@ -357,7 +360,7 @@ err:
static int recover_data(struct f2fs_sb_info *sbi,
struct list_head *head, int type)
{
- unsigned long long cp_ver = le64_to_cpu(sbi->ckpt->checkpoint_ver);
+ unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
struct curseg_info *curseg;
struct page *page;
int err = 0;
@@ -369,7 +372,7 @@ static int recover_data(struct f2fs_sb_info *sbi,
/* read node page */
page = alloc_page(GFP_NOFS | __GFP_ZERO);
- if (IS_ERR(page))
+ if (!page)
return -ENOMEM;
lock_page(page);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a86d125a988..09af9c7b0f5 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -117,7 +117,6 @@ static void locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno)
}
mutex_unlock(&dirty_i->seglist_lock);
- return;
}
/*
@@ -261,7 +260,6 @@ static void __add_sum_entry(struct f2fs_sb_info *sbi, int type,
void *addr = curseg->sum_blk;
addr += curseg->next_blkoff * sizeof(struct f2fs_summary);
memcpy(addr, sum, sizeof(struct f2fs_summary));
- return;
}
/*
@@ -542,12 +540,9 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
- if (force) {
+ if (force)
new_curseg(sbi, type, true);
- goto out;
- }
-
- if (type == CURSEG_WARM_NODE)
+ else if (type == CURSEG_WARM_NODE)
new_curseg(sbi, type, false);
else if (curseg->alloc_type == LFS && is_next_segment_free(sbi, type))
new_curseg(sbi, type, false);
@@ -555,11 +550,9 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
change_curseg(sbi, type, true);
else
new_curseg(sbi, type, false);
-out:
#ifdef CONFIG_F2FS_STAT_FS
sbi->segment_count[curseg->alloc_type]++;
#endif
- return;
}
void allocate_new_segments(struct f2fs_sb_info *sbi)
@@ -611,18 +604,12 @@ static void f2fs_end_io_write(struct bio *bio, int err)
struct bio *f2fs_bio_alloc(struct block_device *bdev, int npages)
{
struct bio *bio;
- struct bio_private *priv;
-retry:
- priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
- if (!priv) {
- cond_resched();
- goto retry;
- }
/* No failure on bio allocation */
bio = bio_alloc(GFP_NOIO, npages);
bio->bi_bdev = bdev;
- bio->bi_private = priv;
+ bio->bi_private = NULL;
+
return bio;
}
@@ -681,8 +668,17 @@ static void submit_write_page(struct f2fs_sb_info *sbi, struct page *page,
do_submit_bio(sbi, type, false);
alloc_new:
if (sbi->bio[type] == NULL) {
+ struct bio_private *priv;
+retry:
+ priv = kmalloc(sizeof(struct bio_private), GFP_NOFS);
+ if (!priv) {
+ cond_resched();
+ goto retry;
+ }
+
sbi->bio[type] = f2fs_bio_alloc(bdev, max_hw_blocks(sbi));
sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
+ sbi->bio[type]->bi_private = priv;
/*
* The end_io will be assigned at the sumbission phase.
* Until then, let bio_add_page() merge consecutive IOs as much
@@ -702,6 +698,16 @@ alloc_new:
trace_f2fs_submit_write_page(page, blk_addr, type);
}
+void f2fs_wait_on_page_writeback(struct page *page,
+ enum page_type type, bool sync)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(page->mapping->host->i_sb);
+ if (PageWriteback(page)) {
+ f2fs_submit_bio(sbi, type, sync);
+ wait_on_page_writeback(page);
+ }
+}
+
static bool __has_curseg_space(struct f2fs_sb_info *sbi, int type)
{
struct curseg_info *curseg = CURSEG_I(sbi, type);
@@ -1179,7 +1185,6 @@ void write_node_summaries(struct f2fs_sb_info *sbi, block_t start_blk)
{
if (is_set_ckpt_flags(F2FS_CKPT(sbi), CP_UMOUNT_FLAG))
write_normal_summaries(sbi, start_blk, CURSEG_HOT_NODE);
- return;
}
int lookup_journal_in_cursum(struct f2fs_summary_block *sum, int type,
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 062424a0e4c..bdd10eab8c4 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -142,6 +142,7 @@ struct victim_sel_policy {
int alloc_mode; /* LFS or SSR */
int gc_mode; /* GC_CB or GC_GREEDY */
unsigned long *dirty_segmap; /* dirty segment bitmap */
+ unsigned int max_search; /* maximum # of segments to search */
unsigned int offset; /* last scanned bitmap offset */
unsigned int ofs_unit; /* bitmap search unit */
unsigned int min_cost; /* minimum cost */
@@ -453,7 +454,8 @@ static inline int reserved_sections(struct f2fs_sb_info *sbi)
static inline bool need_SSR(struct f2fs_sb_info *sbi)
{
- return (free_sections(sbi) < overprovision_sections(sbi));
+ return ((prefree_segments(sbi) / sbi->segs_per_sec)
+ + free_sections(sbi) < overprovision_sections(sbi));
}
static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
@@ -470,7 +472,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
static inline int utilization(struct f2fs_sb_info *sbi)
{
- return div_u64(valid_user_blocks(sbi) * 100, sbi->user_block_count);
+ return div_u64((u64)valid_user_blocks(sbi) * 100, sbi->user_block_count);
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 75c7dc363e9..13d0a0fe49d 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -18,20 +18,25 @@
#include <linux/parser.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <linux/random.h>
#include <linux/exportfs.h>
#include <linux/blkdev.h>
#include <linux/f2fs_fs.h>
+#include <linux/sysfs.h>
#include "f2fs.h"
#include "node.h"
#include "segment.h"
#include "xattr.h"
+#include "gc.h"
#define CREATE_TRACE_POINTS
#include <trace/events/f2fs.h>
+static struct proc_dir_entry *f2fs_proc_root;
static struct kmem_cache *f2fs_inode_cachep;
+static struct kset *f2fs_kset;
enum {
Opt_gc_background,
@@ -42,6 +47,7 @@ enum {
Opt_noacl,
Opt_active_logs,
Opt_disable_ext_identify,
+ Opt_inline_xattr,
Opt_err,
};
@@ -54,9 +60,117 @@ static match_table_t f2fs_tokens = {
{Opt_noacl, "noacl"},
{Opt_active_logs, "active_logs=%u"},
{Opt_disable_ext_identify, "disable_ext_identify"},
+ {Opt_inline_xattr, "inline_xattr"},
{Opt_err, NULL},
};
+/* Sysfs support for f2fs */
+struct f2fs_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct f2fs_attr *, struct f2fs_sb_info *, char *);
+ ssize_t (*store)(struct f2fs_attr *, struct f2fs_sb_info *,
+ const char *, size_t);
+ int offset;
+};
+
+static ssize_t f2fs_sbi_show(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi, char *buf)
+{
+ struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+ unsigned int *ui;
+
+ if (!gc_kth)
+ return -EINVAL;
+
+ ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
+}
+
+static ssize_t f2fs_sbi_store(struct f2fs_attr *a,
+ struct f2fs_sb_info *sbi,
+ const char *buf, size_t count)
+{
+ struct f2fs_gc_kthread *gc_kth = sbi->gc_thread;
+ unsigned long t;
+ unsigned int *ui;
+ ssize_t ret;
+
+ if (!gc_kth)
+ return -EINVAL;
+
+ ui = (unsigned int *)(((char *)gc_kth) + a->offset);
+
+ ret = kstrtoul(skip_spaces(buf), 0, &t);
+ if (ret < 0)
+ return ret;
+ *ui = t;
+ return count;
+}
+
+static ssize_t f2fs_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->show ? a->show(a, sbi, buf) : 0;
+}
+
+static ssize_t f2fs_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t len)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ struct f2fs_attr *a = container_of(attr, struct f2fs_attr, attr);
+
+ return a->store ? a->store(a, sbi, buf, len) : 0;
+}
+
+static void f2fs_sb_release(struct kobject *kobj)
+{
+ struct f2fs_sb_info *sbi = container_of(kobj, struct f2fs_sb_info,
+ s_kobj);
+ complete(&sbi->s_kobj_unregister);
+}
+
+#define F2FS_ATTR_OFFSET(_name, _mode, _show, _store, _elname) \
+static struct f2fs_attr f2fs_attr_##_name = { \
+ .attr = {.name = __stringify(_name), .mode = _mode }, \
+ .show = _show, \
+ .store = _store, \
+ .offset = offsetof(struct f2fs_gc_kthread, _elname), \
+}
+
+#define F2FS_RW_ATTR(name, elname) \
+ F2FS_ATTR_OFFSET(name, 0644, f2fs_sbi_show, f2fs_sbi_store, elname)
+
+F2FS_RW_ATTR(gc_min_sleep_time, min_sleep_time);
+F2FS_RW_ATTR(gc_max_sleep_time, max_sleep_time);
+F2FS_RW_ATTR(gc_no_gc_sleep_time, no_gc_sleep_time);
+F2FS_RW_ATTR(gc_idle, gc_idle);
+
+#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
+static struct attribute *f2fs_attrs[] = {
+ ATTR_LIST(gc_min_sleep_time),
+ ATTR_LIST(gc_max_sleep_time),
+ ATTR_LIST(gc_no_gc_sleep_time),
+ ATTR_LIST(gc_idle),
+ NULL,
+};
+
+static const struct sysfs_ops f2fs_attr_ops = {
+ .show = f2fs_attr_show,
+ .store = f2fs_attr_store,
+};
+
+static struct kobj_type f2fs_ktype = {
+ .default_attrs = f2fs_attrs,
+ .sysfs_ops = &f2fs_attr_ops,
+ .release = f2fs_sb_release,
+};
+
void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...)
{
struct va_format vaf;
@@ -126,11 +240,18 @@ static int parse_options(struct super_block *sb, char *options)
case Opt_nouser_xattr:
clear_opt(sbi, XATTR_USER);
break;
+ case Opt_inline_xattr:
+ set_opt(sbi, INLINE_XATTR);
+ break;
#else
case Opt_nouser_xattr:
f2fs_msg(sb, KERN_INFO,
"nouser_xattr options not supported");
break;
+ case Opt_inline_xattr:
+ f2fs_msg(sb, KERN_INFO,
+ "inline_xattr options not supported");
+ break;
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
case Opt_noacl:
@@ -180,6 +301,9 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb)
set_inode_flag(fi, FI_NEW_INODE);
+ if (test_opt(F2FS_SB(sb), INLINE_XATTR))
+ set_inode_flag(fi, FI_INLINE_XATTR);
+
return &fi->vfs_inode;
}
@@ -205,7 +329,6 @@ static int f2fs_drop_inode(struct inode *inode)
static void f2fs_dirty_inode(struct inode *inode, int flags)
{
set_inode_flag(F2FS_I(inode), FI_DIRTY_INODE);
- return;
}
static void f2fs_i_callback(struct rcu_head *head)
@@ -223,6 +346,12 @@ static void f2fs_put_super(struct super_block *sb)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ if (sbi->s_proc) {
+ remove_proc_entry("segment_info", sbi->s_proc);
+ remove_proc_entry(sb->s_id, f2fs_proc_root);
+ }
+ kobject_del(&sbi->s_kobj);
+
f2fs_destroy_stats(sbi);
stop_gc_thread(sbi);
@@ -236,6 +365,8 @@ static void f2fs_put_super(struct super_block *sb)
destroy_segment_manager(sbi);
kfree(sbi->ckpt);
+ kobject_put(&sbi->s_kobj);
+ wait_for_completion(&sbi->s_kobj_unregister);
sb->s_fs_info = NULL;
brelse(sbi->raw_super_buf);
@@ -325,6 +456,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, ",user_xattr");
else
seq_puts(seq, ",nouser_xattr");
+ if (test_opt(sbi, INLINE_XATTR))
+ seq_puts(seq, ",inline_xattr");
#endif
#ifdef CONFIG_F2FS_FS_POSIX_ACL
if (test_opt(sbi, POSIX_ACL))
@@ -340,6 +473,36 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
return 0;
}
+static int segment_info_seq_show(struct seq_file *seq, void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ unsigned int total_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+ int i;
+
+ for (i = 0; i < total_segs; i++) {
+ seq_printf(seq, "%u", get_valid_blocks(sbi, i, 1));
+ if (i != 0 && (i % 10) == 0)
+ seq_puts(seq, "\n");
+ else
+ seq_puts(seq, " ");
+ }
+ return 0;
+}
+
+static int segment_info_open_fs(struct inode *inode, struct file *file)
+{
+ return single_open(file, segment_info_seq_show, PDE_DATA(inode));
+}
+
+static const struct file_operations f2fs_seq_segment_info_fops = {
+ .owner = THIS_MODULE,
+ .open = segment_info_open_fs,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int f2fs_remount(struct super_block *sb, int *flags, char *data)
{
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -455,7 +618,7 @@ static const struct export_operations f2fs_export_ops = {
static loff_t max_file_size(unsigned bits)
{
- loff_t result = ADDRS_PER_INODE;
+ loff_t result = (DEF_ADDRS_PER_INODE - F2FS_INLINE_XATTR_ADDRS);
loff_t leaf_count = ADDRS_PER_BLOCK;
/* two direct node blocks */
@@ -766,6 +929,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
if (err)
goto fail;
+ if (f2fs_proc_root)
+ sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root);
+
+ if (sbi->s_proc)
+ proc_create_data("segment_info", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_segment_info_fops, sb);
+
if (test_opt(sbi, DISCARD)) {
struct request_queue *q = bdev_get_queue(sb->s_bdev);
if (!blk_queue_discard(q))
@@ -774,6 +944,13 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
"the device does not support discard");
}
+ sbi->s_kobj.kset = f2fs_kset;
+ init_completion(&sbi->s_kobj_unregister);
+ err = kobject_init_and_add(&sbi->s_kobj, &f2fs_ktype, NULL,
+ "%s", sb->s_id);
+ if (err)
+ goto fail;
+
return 0;
fail:
stop_gc_thread(sbi);
@@ -841,29 +1018,49 @@ static int __init init_f2fs_fs(void)
goto fail;
err = create_node_manager_caches();
if (err)
- goto fail;
+ goto free_inodecache;
err = create_gc_caches();
if (err)
- goto fail;
+ goto free_node_manager_caches;
err = create_checkpoint_caches();
if (err)
- goto fail;
+ goto free_gc_caches;
+ f2fs_kset = kset_create_and_add("f2fs", NULL, fs_kobj);
+ if (!f2fs_kset) {
+ err = -ENOMEM;
+ goto free_checkpoint_caches;
+ }
err = register_filesystem(&f2fs_fs_type);
if (err)
- goto fail;
+ goto free_kset;
f2fs_create_root_stats();
+ f2fs_proc_root = proc_mkdir("fs/f2fs", NULL);
+ return 0;
+
+free_kset:
+ kset_unregister(f2fs_kset);
+free_checkpoint_caches:
+ destroy_checkpoint_caches();
+free_gc_caches:
+ destroy_gc_caches();
+free_node_manager_caches:
+ destroy_node_manager_caches();
+free_inodecache:
+ destroy_inodecache();
fail:
return err;
}
static void __exit exit_f2fs_fs(void)
{
+ remove_proc_entry("fs/f2fs", NULL);
f2fs_destroy_root_stats();
unregister_filesystem(&f2fs_fs_type);
destroy_checkpoint_caches();
destroy_gc_caches();
destroy_node_manager_caches();
destroy_inodecache();
+ kset_unregister(f2fs_kset);
}
module_init(init_f2fs_fs)
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 3ab07ecd86c..1ac8a5f6e38 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -246,40 +246,170 @@ static inline const struct xattr_handler *f2fs_xattr_handler(int name_index)
return handler;
}
+static struct f2fs_xattr_entry *__find_xattr(void *base_addr, int name_index,
+ size_t name_len, const char *name)
+{
+ struct f2fs_xattr_entry *entry;
+
+ list_for_each_xattr(entry, base_addr) {
+ if (entry->e_name_index != name_index)
+ continue;
+ if (entry->e_name_len != name_len)
+ continue;
+ if (!memcmp(entry->e_name, name, name_len))
+ break;
+ }
+ return entry;
+}
+
+static void *read_all_xattrs(struct inode *inode, struct page *ipage)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ struct f2fs_xattr_header *header;
+ size_t size = PAGE_SIZE, inline_size = 0;
+ void *txattr_addr;
+
+ inline_size = inline_xattr_size(inode);
+
+ txattr_addr = kzalloc(inline_size + size, GFP_KERNEL);
+ if (!txattr_addr)
+ return NULL;
+
+ /* read from inline xattr */
+ if (inline_size) {
+ struct page *page = NULL;
+ void *inline_addr;
+
+ if (ipage) {
+ inline_addr = inline_xattr_addr(ipage);
+ } else {
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page))
+ goto fail;
+ inline_addr = inline_xattr_addr(page);
+ }
+ memcpy(txattr_addr, inline_addr, inline_size);
+ f2fs_put_page(page, 1);
+ }
+
+ /* read from xattr node block */
+ if (F2FS_I(inode)->i_xattr_nid) {
+ struct page *xpage;
+ void *xattr_addr;
+
+ /* The inode already has an extended attribute block. */
+ xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+ if (IS_ERR(xpage))
+ goto fail;
+
+ xattr_addr = page_address(xpage);
+ memcpy(txattr_addr + inline_size, xattr_addr, PAGE_SIZE);
+ f2fs_put_page(xpage, 1);
+ }
+
+ header = XATTR_HDR(txattr_addr);
+
+ /* never been allocated xattrs */
+ if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
+ header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
+ header->h_refcount = cpu_to_le32(1);
+ }
+ return txattr_addr;
+fail:
+ kzfree(txattr_addr);
+ return NULL;
+}
+
+static inline int write_all_xattrs(struct inode *inode, __u32 hsize,
+ void *txattr_addr, struct page *ipage)
+{
+ struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
+ size_t inline_size = 0;
+ void *xattr_addr;
+ struct page *xpage;
+ nid_t new_nid = 0;
+ int err;
+
+ inline_size = inline_xattr_size(inode);
+
+ if (hsize > inline_size && !F2FS_I(inode)->i_xattr_nid)
+ if (!alloc_nid(sbi, &new_nid))
+ return -ENOSPC;
+
+ /* write to inline xattr */
+ if (inline_size) {
+ struct page *page = NULL;
+ void *inline_addr;
+
+ if (ipage) {
+ inline_addr = inline_xattr_addr(ipage);
+ } else {
+ page = get_node_page(sbi, inode->i_ino);
+ if (IS_ERR(page)) {
+ alloc_nid_failed(sbi, new_nid);
+ return PTR_ERR(page);
+ }
+ inline_addr = inline_xattr_addr(page);
+ }
+ memcpy(inline_addr, txattr_addr, inline_size);
+ f2fs_put_page(page, 1);
+
+ /* no need to use xattr node block */
+ if (hsize <= inline_size) {
+ err = truncate_xattr_node(inode, ipage);
+ alloc_nid_failed(sbi, new_nid);
+ return err;
+ }
+ }
+
+ /* write to xattr node block */
+ if (F2FS_I(inode)->i_xattr_nid) {
+ xpage = get_node_page(sbi, F2FS_I(inode)->i_xattr_nid);
+ if (IS_ERR(xpage)) {
+ alloc_nid_failed(sbi, new_nid);
+ return PTR_ERR(xpage);
+ }
+ BUG_ON(new_nid);
+ } else {
+ struct dnode_of_data dn;
+ set_new_dnode(&dn, inode, NULL, NULL, new_nid);
+ xpage = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
+ if (IS_ERR(xpage)) {
+ alloc_nid_failed(sbi, new_nid);
+ return PTR_ERR(xpage);
+ }
+ alloc_nid_done(sbi, new_nid);
+ }
+
+ xattr_addr = page_address(xpage);
+ memcpy(xattr_addr, txattr_addr + inline_size, PAGE_SIZE -
+ sizeof(struct node_footer));
+ set_page_dirty(xpage);
+ f2fs_put_page(xpage, 1);
+
+ /* need to checkpoint during fsync */
+ F2FS_I(inode)->xattr_ver = cur_cp_version(F2FS_CKPT(sbi));
+ return 0;
+}
+
int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
void *buffer, size_t buffer_size)
{
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *entry;
- struct page *page;
void *base_addr;
- int error = 0, found = 0;
+ int error = 0;
size_t value_len, name_len;
if (name == NULL)
return -EINVAL;
name_len = strlen(name);
- if (!fi->i_xattr_nid)
- return -ENODATA;
+ base_addr = read_all_xattrs(inode, NULL);
+ if (!base_addr)
+ return -ENOMEM;
- page = get_node_page(sbi, fi->i_xattr_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
- base_addr = page_address(page);
-
- list_for_each_xattr(entry, base_addr) {
- if (entry->e_name_index != name_index)
- continue;
- if (entry->e_name_len != name_len)
- continue;
- if (!memcmp(entry->e_name, name, name_len)) {
- found = 1;
- break;
- }
- }
- if (!found) {
+ entry = __find_xattr(base_addr, name_index, name_len, name);
+ if (IS_XATTR_LAST_ENTRY(entry)) {
error = -ENODATA;
goto cleanup;
}
@@ -298,28 +428,21 @@ int f2fs_getxattr(struct inode *inode, int name_index, const char *name,
error = value_len;
cleanup:
- f2fs_put_page(page, 1);
+ kzfree(base_addr);
return error;
}
ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
{
struct inode *inode = dentry->d_inode;
- struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
- struct f2fs_inode_info *fi = F2FS_I(inode);
struct f2fs_xattr_entry *entry;
- struct page *page;
void *base_addr;
int error = 0;
size_t rest = buffer_size;
- if (!fi->i_xattr_nid)
- return 0;
-
- page = get_node_page(sbi, fi->i_xattr_nid);
- if (IS_ERR(page))
- return PTR_ERR(page);
- base_addr = page_address(page);
+ base_addr = read_all_xattrs(inode, NULL);
+ if (!base_addr)
+ return -ENOMEM;
list_for_each_xattr(entry, base_addr) {
const struct xattr_handler *handler =
@@ -342,7 +465,7 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
}
error = buffer_size - rest;
cleanup:
- f2fs_put_page(page, 1);
+ kzfree(base_addr);
return error;
}
@@ -351,14 +474,13 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
{
struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
struct f2fs_inode_info *fi = F2FS_I(inode);
- struct f2fs_xattr_header *header = NULL;
struct f2fs_xattr_entry *here, *last;
- struct page *page;
void *base_addr;
- int error, found, free, newsize;
+ int found, newsize;
size_t name_len;
- char *pval;
int ilock;
+ __u32 new_hsize;
+ int error = -ENOMEM;
if (name == NULL)
return -EINVAL;
@@ -368,67 +490,21 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
name_len = strlen(name);
- if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN)
+ if (name_len > F2FS_NAME_LEN || value_len > MAX_VALUE_LEN(inode))
return -ERANGE;
f2fs_balance_fs(sbi);
ilock = mutex_lock_op(sbi);
- if (!fi->i_xattr_nid) {
- /* Allocate new attribute block */
- struct dnode_of_data dn;
-
- if (!alloc_nid(sbi, &fi->i_xattr_nid)) {
- error = -ENOSPC;
- goto exit;
- }
- set_new_dnode(&dn, inode, NULL, NULL, fi->i_xattr_nid);
- mark_inode_dirty(inode);
-
- page = new_node_page(&dn, XATTR_NODE_OFFSET, ipage);
- if (IS_ERR(page)) {
- alloc_nid_failed(sbi, fi->i_xattr_nid);
- fi->i_xattr_nid = 0;
- error = PTR_ERR(page);
- goto exit;
- }
-
- alloc_nid_done(sbi, fi->i_xattr_nid);
- base_addr = page_address(page);
- header = XATTR_HDR(base_addr);
- header->h_magic = cpu_to_le32(F2FS_XATTR_MAGIC);
- header->h_refcount = cpu_to_le32(1);
- } else {
- /* The inode already has an extended attribute block. */
- page = get_node_page(sbi, fi->i_xattr_nid);
- if (IS_ERR(page)) {
- error = PTR_ERR(page);
- goto exit;
- }
-
- base_addr = page_address(page);
- header = XATTR_HDR(base_addr);
- }
-
- if (le32_to_cpu(header->h_magic) != F2FS_XATTR_MAGIC) {
- error = -EIO;
- goto cleanup;
- }
+ base_addr = read_all_xattrs(inode, ipage);
+ if (!base_addr)
+ goto exit;
/* find entry with wanted name. */
- found = 0;
- list_for_each_xattr(here, base_addr) {
- if (here->e_name_index != name_index)
- continue;
- if (here->e_name_len != name_len)
- continue;
- if (!memcmp(here->e_name, name, name_len)) {
- found = 1;
- break;
- }
- }
+ here = __find_xattr(base_addr, name_index, name_len, name);
+ found = IS_XATTR_LAST_ENTRY(here) ? 0 : 1;
last = here;
while (!IS_XATTR_LAST_ENTRY(last))
@@ -439,22 +515,25 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
/* 1. Check space */
if (value) {
- /* If value is NULL, it is remove operation.
+ int free;
+ /*
+ * If value is NULL, it is remove operation.
* In case of update operation, we caculate free.
*/
- free = MIN_OFFSET - ((char *)last - (char *)header);
+ free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr);
if (found)
free = free - ENTRY_SIZE(here);
if (free < newsize) {
error = -ENOSPC;
- goto cleanup;
+ goto exit;
}
}
/* 2. Remove old entry */
if (found) {
- /* If entry is found, remove old entry.
+ /*
+ * If entry is found, remove old entry.
* If not found, remove operation is not needed.
*/
struct f2fs_xattr_entry *next = XATTR_NEXT_ENTRY(here);
@@ -465,10 +544,15 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
memset(last, 0, oldsize);
}
+ new_hsize = (char *)last - (char *)base_addr;
+
/* 3. Write new entry */
if (value) {
- /* Before we come here, old entry is removed.
- * We just write new entry. */
+ char *pval;
+ /*
+ * Before we come here, old entry is removed.
+ * We just write new entry.
+ */
memset(last, 0, newsize);
last->e_name_index = name_index;
last->e_name_len = name_len;
@@ -476,26 +560,25 @@ int f2fs_setxattr(struct inode *inode, int name_index, const char *name,
pval = last->e_name + name_len;
memcpy(pval, value, value_len);
last->e_value_size = cpu_to_le16(value_len);
+ new_hsize += newsize;
}
- set_page_dirty(page);
- f2fs_put_page(page, 1);
+ error = write_all_xattrs(inode, new_hsize, base_addr, ipage);
+ if (error)
+ goto exit;
if (is_inode_flag_set(fi, FI_ACL_MODE)) {
inode->i_mode = fi->i_acl_mode;
inode->i_ctime = CURRENT_TIME;
clear_inode_flag(fi, FI_ACL_MODE);
}
+
if (ipage)
update_inode(inode, ipage);
else
update_inode_page(inode);
- mutex_unlock_op(sbi, ilock);
-
- return 0;
-cleanup:
- f2fs_put_page(page, 1);
exit:
mutex_unlock_op(sbi, ilock);
+ kzfree(base_addr);
return error;
}
diff --git a/fs/f2fs/xattr.h b/fs/f2fs/xattr.h
index 3c0817bef25..02a08fb88a1 100644
--- a/fs/f2fs/xattr.h
+++ b/fs/f2fs/xattr.h
@@ -51,7 +51,7 @@ struct f2fs_xattr_entry {
#define XATTR_HDR(ptr) ((struct f2fs_xattr_header *)(ptr))
#define XATTR_ENTRY(ptr) ((struct f2fs_xattr_entry *)(ptr))
-#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr)+1))
+#define XATTR_FIRST_ENTRY(ptr) (XATTR_ENTRY(XATTR_HDR(ptr) + 1))
#define XATTR_ROUND (3)
#define XATTR_ALIGN(size) ((size + XATTR_ROUND) & ~XATTR_ROUND)
@@ -69,17 +69,16 @@ struct f2fs_xattr_entry {
!IS_XATTR_LAST_ENTRY(entry);\
entry = XATTR_NEXT_ENTRY(entry))
+#define MIN_OFFSET(i) XATTR_ALIGN(inline_xattr_size(i) + PAGE_SIZE - \
+ sizeof(struct node_footer) - sizeof(__u32))
-#define MIN_OFFSET XATTR_ALIGN(PAGE_SIZE - \
- sizeof(struct node_footer) - \
- sizeof(__u32))
-
-#define MAX_VALUE_LEN (MIN_OFFSET - sizeof(struct f2fs_xattr_header) - \
- sizeof(struct f2fs_xattr_entry))
+#define MAX_VALUE_LEN(i) (MIN_OFFSET(i) - \
+ sizeof(struct f2fs_xattr_header) - \
+ sizeof(struct f2fs_xattr_entry))
/*
* On-disk structure of f2fs_xattr
- * We use only 1 block for xattr.
+ * We use inline xattrs space + 1 block for xattr.
*
* +--------------------+
* | f2fs_xattr_header |
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 6599222536e..65343c3741f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -730,14 +730,14 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH
+ __FMODE_EXEC | O_PATH | __O_TMPFILE
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/file_table.c b/fs/file_table.c
index b44e4c55978..322cd37626c 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -385,6 +385,10 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
*/
void file_sb_list_add(struct file *file, struct super_block *sb)
{
+ if (likely(!(file->f_mode & FMODE_WRITE)))
+ return;
+ if (!S_ISREG(file_inode(file)->i_mode))
+ return;
lg_local_lock(&files_lglock);
__file_sb_list_add(file, sb);
lg_local_unlock(&files_lglock);
@@ -450,8 +454,6 @@ void mark_files_ro(struct super_block *sb)
lg_global_lock(&files_lglock);
do_file_list_for_each_entry(sb, f) {
- if (!S_ISREG(file_inode(f)->i_mode))
- continue;
if (!file_count(f))
continue;
if (!(f->f_mode & FMODE_WRITE))
diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
index aef34b1e635..adbfd66b380 100644
--- a/fs/fuse/cuse.c
+++ b/fs/fuse/cuse.c
@@ -568,6 +568,7 @@ static ssize_t cuse_class_waiting_show(struct device *dev,
return sprintf(buf, "%d\n", atomic_read(&cc->fc.num_waiting));
}
+static DEVICE_ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL);
static ssize_t cuse_class_abort_store(struct device *dev,
struct device_attribute *attr,
@@ -578,12 +579,14 @@ static ssize_t cuse_class_abort_store(struct device *dev,
fuse_abort_conn(&cc->fc);
return count;
}
+static DEVICE_ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store);
-static struct device_attribute cuse_class_dev_attrs[] = {
- __ATTR(waiting, S_IFREG | 0400, cuse_class_waiting_show, NULL),
- __ATTR(abort, S_IFREG | 0200, NULL, cuse_class_abort_store),
- { }
+static struct attribute *cuse_class_dev_attrs[] = {
+ &dev_attr_waiting.attr,
+ &dev_attr_abort.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(cuse_class_dev);
static struct miscdevice cuse_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
@@ -609,7 +612,7 @@ static int __init cuse_init(void)
if (IS_ERR(cuse_class))
return PTR_ERR(cuse_class);
- cuse_class->dev_attrs = cuse_class_dev_attrs;
+ cuse_class->dev_groups = cuse_class_dev_groups;
rc = misc_register(&cuse_miscdev);
if (rc) {
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9435384562a..544a809819c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1838,14 +1838,14 @@ int __init gfs2_glock_init(void)
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZABLE, 0);
- if (IS_ERR(glock_workqueue))
- return PTR_ERR(glock_workqueue);
+ if (!glock_workqueue)
+ return -ENOMEM;
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZABLE,
0);
- if (IS_ERR(gfs2_delete_workqueue)) {
+ if (!gfs2_delete_workqueue) {
destroy_workqueue(glock_workqueue);
- return PTR_ERR(gfs2_delete_workqueue);
+ return -ENOMEM;
}
register_shrinker(&glock_shrinker);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 5f2e5224c51..e2e0a90396e 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -47,7 +47,8 @@ static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
* None of the buffers should be dirty, locked, or pinned.
*/
-static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
+static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
+ unsigned int nr_revokes)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct list_head *head = &gl->gl_ail_list;
@@ -57,7 +58,9 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_log_lock(sdp);
spin_lock(&sdp->sd_ail_lock);
- list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) {
+ list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
+ if (nr_revokes == 0)
+ break;
bh = bd->bd_bh;
if (bh->b_state & b_state) {
if (fsync)
@@ -65,6 +68,7 @@ static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
gfs2_ail_error(gl, bh);
}
gfs2_trans_add_revoke(sdp, bd);
+ nr_revokes--;
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
@@ -91,7 +95,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
WARN_ON_ONCE(current->journal_info);
current->journal_info = &tr;
- __gfs2_ail_flush(gl, 0);
+ __gfs2_ail_flush(gl, 0, tr.tr_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
@@ -101,15 +105,19 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
unsigned int revokes = atomic_read(&gl->gl_ail_count);
+ unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
int ret;
if (!revokes)
return;
- ret = gfs2_trans_begin(sdp, 0, revokes);
+ while (revokes > max_revokes)
+ max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
+
+ ret = gfs2_trans_begin(sdp, 0, max_revokes);
if (ret)
return;
- __gfs2_ail_flush(gl, fsync);
+ __gfs2_ail_flush(gl, fsync, max_revokes);
gfs2_trans_end(sdp);
gfs2_log_flush(sdp, NULL);
}
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index bbb2715171c..64915eeae5a 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -594,7 +594,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
}
gfs2_glock_dq_uninit(ghs);
if (IS_ERR(d))
- return PTR_RET(d);
+ return PTR_ERR(d);
return error;
} else if (error != -ENOENT) {
goto fail_gunlock;
@@ -1750,6 +1750,10 @@ static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name,
struct gfs2_holder gh;
int ret;
+ /* For selinux during lookup */
+ if (gfs2_glock_is_locked_by_me(ip->i_gl))
+ return generic_getxattr(dentry, name, data, size);
+
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
ret = gfs2_glock_nq(&gh);
if (ret == 0) {
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index e04d0e09ee7..7b0f5043cf2 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -155,7 +155,7 @@ static int __init init_gfs2_fs(void)
goto fail_wq;
gfs2_control_wq = alloc_workqueue("gfs2_control",
- WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0);
+ WQ_UNBOUND | WQ_FREEZABLE, 0);
if (!gfs2_control_wq)
goto fail_recovery;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index a3f868ae3fd..d19b30ababf 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb,
return inode;
}
+/*
+ * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
+ * be taken from reclaim -- unlike regular filesystems. This needs an
+ * annotation because huge_pmd_share() does an allocation under
+ * i_mmap_mutex.
+ */
+struct lock_class_key hugetlbfs_i_mmap_mutex_key;
+
static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct inode *dir,
umode_t mode, dev_t dev)
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb,
struct hugetlbfs_inode_info *info;
inode->i_ino = get_next_ino();
inode_init_owner(inode, dir, mode);
+ lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
+ &hugetlbfs_i_mmap_mutex_key);
inode->i_mapping->a_ops = &hugetlbfs_aops;
inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -916,14 +926,8 @@ static int get_hstate_idx(int page_size_log)
return h - hstates;
}
-static char *hugetlb_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = hugetlb_dname
+ .d_dname = simple_dname
};
/*
diff --git a/fs/inode.c b/fs/inode.c
index d6dfb09c828..93a0625b46e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1525,7 +1525,7 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
* This function automatically handles read only file systems and media,
* as well as the "noatime" flag and inode specific "noatime" markers.
*/
-void touch_atime(struct path *path)
+void touch_atime(const struct path *path)
{
struct vfsmount *mnt = path->mnt;
struct inode *inode = path->dentry->d_inode;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index c348d6d8862..e5d408a7ea4 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -117,8 +117,8 @@ static void destroy_inodecache(void)
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
- /* we probably want a lot more here */
- *flags |= MS_RDONLY;
+ if (!(*flags & MS_RDONLY))
+ return -EROFS;
return 0;
}
@@ -763,15 +763,6 @@ root_found:
*/
s->s_maxbytes = 0x80000000000LL;
- /*
- * The CDROM is read-only, has no nodes (devices) on it, and since
- * all of the files appear to be owned by root, we really do not want
- * to allow suid. (suid or devices will not show up unless we have
- * Rock Ridge extensions)
- */
-
- s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
-
/* Set this for reference. Its not currently used except on write
which we don't have .. */
@@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
static struct dentry *isofs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
+ /* We don't support read-write mounts */
+ if (!(flags & MS_RDONLY))
+ return ERR_PTR(-EACCES);
return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
}
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index 11bb11f48b3..bb217dcb41a 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -340,13 +340,13 @@ void journal_commit_transaction(journal_t *journal)
J_ASSERT(journal->j_committing_transaction == NULL);
commit_transaction = journal->j_running_transaction;
- J_ASSERT(commit_transaction->t_state == T_RUNNING);
trace_jbd_start_commit(journal, commit_transaction);
jbd_debug(1, "JBD: starting commit of transaction %d\n",
commit_transaction->t_tid);
spin_lock(&journal->j_state_lock);
+ J_ASSERT(commit_transaction->t_state == T_RUNNING);
commit_transaction->t_state = T_LOCKED;
trace_jbd_commit_locking(journal, commit_transaction);
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 6510d635572..2d04f9afafd 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -90,6 +90,24 @@ static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
static const char *journal_dev_name(journal_t *journal, char *buffer);
+#ifdef CONFIG_JBD_DEBUG
+void __jbd_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ if (level > journal_enable_debug)
+ return;
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk(KERN_DEBUG "%s: (%s, %u): %pV\n", file, func, line, &vaf);
+ va_end(args);
+}
+EXPORT_SYMBOL(__jbd_debug);
+#endif
+
/*
* Helper function used to manage commit timeouts
*/
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 559bec1a37b..cf2fc059406 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -343,14 +343,14 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
struct page *page = bh->b_page;
__u8 *addr;
__u32 csum32;
+ __be32 seq;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
- sequence = cpu_to_be32(sequence);
+ seq = cpu_to_be32(sequence);
addr = kmap_atomic(page);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
- sizeof(sequence));
+ csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
csum32 = jbd2_chksum(j, csum32, addr + offset_in_page(bh->b_data),
bh->b_size);
kunmap_atomic(addr);
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 02c7ad9d7a4..52032647dd4 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -130,9 +130,10 @@ int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
}
-static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
+static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
{
- __u32 csum, old_csum;
+ __u32 csum;
+ __be32 old_csum;
old_csum = sb->s_checksum;
sb->s_checksum = 0;
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index d4851464b57..3929c50428b 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -178,7 +178,8 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
void *buf)
{
struct jbd2_journal_block_tail *tail;
- __u32 provided, calculated;
+ __be32 provided;
+ __u32 calculated;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return 1;
@@ -190,8 +191,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j,
calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
tail->t_checksum = provided;
- provided = be32_to_cpu(provided);
- return provided == calculated;
+ return provided == cpu_to_be32(calculated);
}
/*
@@ -381,7 +381,8 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh,
static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
{
struct commit_header *h;
- __u32 provided, calculated;
+ __be32 provided;
+ __u32 calculated;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return 1;
@@ -392,21 +393,20 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
h->h_chksum[0] = provided;
- provided = be32_to_cpu(provided);
- return provided == calculated;
+ return provided == cpu_to_be32(calculated);
}
static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
void *buf, __u32 sequence)
{
__u32 csum32;
+ __be32 seq;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return 1;
- sequence = cpu_to_be32(sequence);
- csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
- sizeof(sequence));
+ seq = cpu_to_be32(sequence);
+ csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq));
csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize);
return tag->t_checksum == cpu_to_be16(csum32);
@@ -808,7 +808,8 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
void *buf)
{
struct jbd2_journal_revoke_tail *tail;
- __u32 provided, calculated;
+ __be32 provided;
+ __u32 calculated;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return 1;
@@ -820,8 +821,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j,
calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
tail->r_checksum = provided;
- provided = be32_to_cpu(provided);
- return provided == calculated;
+ return provided == cpu_to_be32(calculated);
}
/* Scan a revoke record, marking all blocks mentioned as revoked. */
diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c
index 8743ba9c674..984c2bbf4f6 100644
--- a/fs/jfs/jfs_dtree.c
+++ b/fs/jfs/jfs_dtree.c
@@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
dir_index = (u32) ctx->pos;
+ /*
+ * NFSv4 reserves cookies 1 and 2 for . and .. so the value
+ * we return to the vfs is one greater than the one we use
+ * internally.
+ */
+ if (dir_index)
+ dir_index--;
+
if (dir_index > 1) {
struct dir_table_slot dirtab_slot;
@@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
if (p->header.flag & BT_INTERNAL) {
jfs_err("jfs_readdir: bad index table");
DT_PUTPAGE(mp);
- ctx->pos = -1;
+ ctx->pos = DIREND;
return 0;
}
} else {
@@ -3094,14 +3102,14 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/*
* self "."
*/
- ctx->pos = 0;
+ ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0;
}
/*
* parent ".."
*/
- ctx->pos = 1;
+ ctx->pos = 2;
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0;
@@ -3122,22 +3130,23 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
/*
* Legacy filesystem - OS/2 & Linux JFS < 0.3.6
*
- * pn = index = 0: First entry "."
- * pn = 0; index = 1: Second entry ".."
+ * pn = 0; index = 1: First entry "."
+ * pn = 0; index = 2: Second entry ".."
* pn > 0: Real entries, pn=1 -> leftmost page
* pn = index = -1: No more entries
*/
dtpos = ctx->pos;
- if (dtpos == 0) {
+ if (dtpos < 2) {
/* build "." entry */
+ ctx->pos = 1;
if (!dir_emit(ctx, ".", 1, ip->i_ino, DT_DIR))
return 0;
- dtoffset->index = 1;
+ dtoffset->index = 2;
ctx->pos = dtpos;
}
if (dtoffset->pn == 0) {
- if (dtoffset->index == 1) {
+ if (dtoffset->index == 2) {
/* build ".." entry */
if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR))
return 0;
@@ -3228,6 +3237,12 @@ int jfs_readdir(struct file *file, struct dir_context *ctx)
}
jfs_dirent->position = unique_pos++;
}
+ /*
+ * We add 1 to the index because we may
+ * use a value of 2 internally, and NFSv4
+ * doesn't like that.
+ */
+ jfs_dirent->position++;
} else {
jfs_dirent->position = dtpos;
len = min(d_namleft, DTLHDRDATALEN_LEGACY);
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index 01bfe766275..41e491b8e5d 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
nlm_init->protocol, nlm_version,
nlm_init->hostname, nlm_init->noresvport,
nlm_init->net);
- if (host == NULL) {
- lockd_down(nlm_init->net);
- return ERR_PTR(-ENOLCK);
- }
+ if (host == NULL)
+ goto out_nohost;
+ if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
+ goto out_nobind;
return host;
+out_nobind:
+ nlmclnt_release_host(host);
+out_nohost:
+ lockd_down(nlm_init->net);
+ return ERR_PTR(-ENOLCK);
}
EXPORT_SYMBOL_GPL(nlmclnt_init);
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
index 9760ecb9b60..acd39471634 100644
--- a/fs/lockd/clntproc.c
+++ b/fs/lockd/clntproc.c
@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl)
{
struct nlm_args *argp = &req->a_args;
struct nlm_lock *lock = &argp->lock;
+ char *nodename = req->a_host->h_rpcclnt->cl_nodename;
nlmclnt_next_cookie(&argp->cookie);
memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
- lock->caller = utsname()->nodename;
+ lock->caller = nodename;
lock->oh.data = req->a_owner;
lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
(unsigned int)fl->fl_u.nfs_fl.owner->pid,
- utsname()->nodename);
+ nodename);
lock->svid = fl->fl_u.nfs_fl.owner->pid;
lock->fl.fl_start = fl->fl_start;
lock->fl.fl_end = fl->fl_end;
diff --git a/fs/namei.c b/fs/namei.c
index 8b61d103a8a..f415c6683a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -494,6 +494,50 @@ static inline void unlock_rcu_walk(void)
br_read_unlock(&vfsmount_lock);
}
+/*
+ * When we move over from the RCU domain to properly refcounted
+ * long-lived dentries, we need to check the sequence numbers
+ * we got before lookup very carefully.
+ *
+ * We cannot blindly increment a dentry refcount - even if it
+ * is not locked - if it is zero, because it may have gone
+ * through the final d_kill() logic already.
+ *
+ * So for a zero refcount, we need to get the spinlock (which is
+ * safe even for a dead dentry because the de-allocation is
+ * RCU-delayed), and check the sequence count under the lock.
+ *
+ * Once we have checked the sequence count, we know it is live,
+ * and since we hold the spinlock it cannot die from under us.
+ *
+ * In contrast, if the reference count wasn't zero, we can just
+ * increment the lockref without having to take the spinlock.
+ * Even if the sequence number ends up being stale, we haven't
+ * gone through the final dput() and killed the dentry yet.
+ */
+static inline int d_rcu_to_refcount(struct dentry *dentry, seqcount_t *validate, unsigned seq)
+{
+ int gotref;
+
+ gotref = lockref_get_or_lock(&dentry->d_lockref);
+
+ /* Does the sequence number still match? */
+ if (read_seqcount_retry(validate, seq)) {
+ if (gotref)
+ dput(dentry);
+ else
+ spin_unlock(&dentry->d_lock);
+ return -ECHILD;
+ }
+
+ /* Get the ref now, if we couldn't get it originally */
+ if (!gotref) {
+ dentry->d_lockref.count++;
+ spin_unlock(&dentry->d_lock);
+ }
+ return 0;
+}
+
/**
* unlazy_walk - try to switch to ref-walk mode.
* @nd: nameidata pathwalk data
@@ -518,29 +562,28 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
nd->root.dentry != fs->root.dentry)
goto err_root;
}
- spin_lock(&parent->d_lock);
+
+ /*
+ * For a negative lookup, the lookup sequence point is the parents
+ * sequence point, and it only needs to revalidate the parent dentry.
+ *
+ * For a positive lookup, we need to move both the parent and the
+ * dentry from the RCU domain to be properly refcounted. And the
+ * sequence number in the dentry validates *both* dentry counters,
+ * since we checked the sequence number of the parent after we got
+ * the child sequence number. So we know the parent must still
+ * be valid if the child sequence number is still valid.
+ */
if (!dentry) {
- if (!__d_rcu_to_refcount(parent, nd->seq))
- goto err_parent;
+ if (d_rcu_to_refcount(parent, &parent->d_seq, nd->seq) < 0)
+ goto err_root;
BUG_ON(nd->inode != parent->d_inode);
} else {
- if (dentry->d_parent != parent)
+ if (d_rcu_to_refcount(dentry, &dentry->d_seq, nd->seq) < 0)
+ goto err_root;
+ if (d_rcu_to_refcount(parent, &dentry->d_seq, nd->seq) < 0)
goto err_parent;
- spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
- if (!__d_rcu_to_refcount(dentry, nd->seq))
- goto err_child;
- /*
- * If the sequence check on the child dentry passed, then
- * the child has not been removed from its parent. This
- * means the parent dentry must be valid and able to take
- * a reference at this point.
- */
- BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent);
- BUG_ON(!parent->d_count);
- parent->d_count++;
- spin_unlock(&dentry->d_lock);
}
- spin_unlock(&parent->d_lock);
if (want_root) {
path_get(&nd->root);
spin_unlock(&fs->lock);
@@ -551,10 +594,8 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
nd->flags &= ~LOOKUP_RCU;
return 0;
-err_child:
- spin_unlock(&dentry->d_lock);
err_parent:
- spin_unlock(&parent->d_lock);
+ dput(dentry);
err_root:
if (want_root)
spin_unlock(&fs->lock);
@@ -585,14 +626,11 @@ static int complete_walk(struct nameidata *nd)
nd->flags &= ~LOOKUP_RCU;
if (!(nd->flags & LOOKUP_ROOT))
nd->root.mnt = NULL;
- spin_lock(&dentry->d_lock);
- if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
- spin_unlock(&dentry->d_lock);
+
+ if (d_rcu_to_refcount(dentry, &dentry->d_seq, nd->seq) < 0) {
unlock_rcu_walk();
return -ECHILD;
}
- BUG_ON(nd->inode != dentry->d_inode);
- spin_unlock(&dentry->d_lock);
mntget(nd->path.mnt);
unlock_rcu_walk();
}
@@ -2184,6 +2222,188 @@ user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
return s;
}
+/**
+ * umount_lookup_last - look up last component for umount
+ * @nd: pathwalk nameidata - currently pointing at parent directory of "last"
+ * @path: pointer to container for result
+ *
+ * This is a special lookup_last function just for umount. In this case, we
+ * need to resolve the path without doing any revalidation.
+ *
+ * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since
+ * mountpoints are always pinned in the dcache, their ancestors are too. Thus,
+ * in almost all cases, this lookup will be served out of the dcache. The only
+ * cases where it won't are if nd->last refers to a symlink or the path is
+ * bogus and it doesn't exist.
+ *
+ * Returns:
+ * -error: if there was an error during lookup. This includes -ENOENT if the
+ * lookup found a negative dentry. The nd->path reference will also be
+ * put in this case.
+ *
+ * 0: if we successfully resolved nd->path and found it to not to be a
+ * symlink that needs to be followed. "path" will also be populated.
+ * The nd->path reference will also be put.
+ *
+ * 1: if we successfully resolved nd->last and found it to be a symlink
+ * that needs to be followed. "path" will be populated with the path
+ * to the link, and nd->path will *not* be put.
+ */
+static int
+umount_lookup_last(struct nameidata *nd, struct path *path)
+{
+ int error = 0;
+ struct dentry *dentry;
+ struct dentry *dir = nd->path.dentry;
+
+ if (unlikely(nd->flags & LOOKUP_RCU)) {
+ WARN_ON_ONCE(1);
+ error = -ECHILD;
+ goto error_check;
+ }
+
+ nd->flags &= ~LOOKUP_PARENT;
+
+ if (unlikely(nd->last_type != LAST_NORM)) {
+ error = handle_dots(nd, nd->last_type);
+ if (!error)
+ dentry = dget(nd->path.dentry);
+ goto error_check;
+ }
+
+ mutex_lock(&dir->d_inode->i_mutex);
+ dentry = d_lookup(dir, &nd->last);
+ if (!dentry) {
+ /*
+ * No cached dentry. Mounted dentries are pinned in the cache,
+ * so that means that this dentry is probably a symlink or the
+ * path doesn't actually point to a mounted dentry.
+ */
+ dentry = d_alloc(dir, &nd->last);
+ if (!dentry) {
+ error = -ENOMEM;
+ } else {
+ dentry = lookup_real(dir->d_inode, dentry, nd->flags);
+ if (IS_ERR(dentry))
+ error = PTR_ERR(dentry);
+ }
+ }
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+error_check:
+ if (!error) {
+ if (!dentry->d_inode) {
+ error = -ENOENT;
+ dput(dentry);
+ } else {
+ path->dentry = dentry;
+ path->mnt = mntget(nd->path.mnt);
+ if (should_follow_link(dentry->d_inode,
+ nd->flags & LOOKUP_FOLLOW))
+ return 1;
+ follow_mount(path);
+ }
+ }
+ terminate_walk(nd);
+ return error;
+}
+
+/**
+ * path_umountat - look up a path to be umounted
+ * @dfd: directory file descriptor to start walk from
+ * @name: full pathname to walk
+ * @flags: lookup flags
+ * @nd: pathwalk nameidata
+ *
+ * Look up the given name, but don't attempt to revalidate the last component.
+ * Returns 0 and "path" will be valid on success; Retuns error otherwise.
+ */
+static int
+path_umountat(int dfd, const char *name, struct path *path, unsigned int flags)
+{
+ struct file *base = NULL;
+ struct nameidata nd;
+ int err;
+
+ err = path_init(dfd, name, flags | LOOKUP_PARENT, &nd, &base);
+ if (unlikely(err))
+ return err;
+
+ current->total_link_count = 0;
+ err = link_path_walk(name, &nd);
+ if (err)
+ goto out;
+
+ /* If we're in rcuwalk, drop out of it to handle last component */
+ if (nd.flags & LOOKUP_RCU) {
+ err = unlazy_walk(&nd, NULL);
+ if (err) {
+ terminate_walk(&nd);
+ goto out;
+ }
+ }
+
+ err = umount_lookup_last(&nd, path);
+ while (err > 0) {
+ void *cookie;
+ struct path link = *path;
+ err = may_follow_link(&link, &nd);
+ if (unlikely(err))
+ break;
+ nd.flags |= LOOKUP_PARENT;
+ err = follow_link(&link, &nd, &cookie);
+ if (err)
+ break;
+ err = umount_lookup_last(&nd, path);
+ put_link(&nd, &link, cookie);
+ }
+out:
+ if (base)
+ fput(base);
+
+ if (nd.root.mnt && !(nd.flags & LOOKUP_ROOT))
+ path_put(&nd.root);
+
+ return err;
+}
+
+/**
+ * user_path_umountat - lookup a path from userland in order to umount it
+ * @dfd: directory file descriptor
+ * @name: pathname from userland
+ * @flags: lookup flags
+ * @path: pointer to container to hold result
+ *
+ * A umount is a special case for path walking. We're not actually interested
+ * in the inode in this situation, and ESTALE errors can be a problem. We
+ * simply want track down the dentry and vfsmount attached at the mountpoint
+ * and avoid revalidating the last component.
+ *
+ * Returns 0 and populates "path" on success.
+ */
+int
+user_path_umountat(int dfd, const char __user *name, unsigned int flags,
+ struct path *path)
+{
+ struct filename *s = getname(name);
+ int error;
+
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ error = path_umountat(dfd, s->name, path, flags | LOOKUP_RCU);
+ if (unlikely(error == -ECHILD))
+ error = path_umountat(dfd, s->name, path, flags);
+ if (unlikely(error == -ESTALE))
+ error = path_umountat(dfd, s->name, path, flags | LOOKUP_REVAL);
+
+ if (likely(!error))
+ audit_inode(s, path->dentry, 0);
+
+ putname(s);
+ return error;
+}
+
/*
* It's inline, so penalty for filesystems that don't use sticky bit is
* minimal.
@@ -3327,7 +3547,7 @@ void dentry_unhash(struct dentry *dentry)
{
shrink_dcache_parent(dentry);
spin_lock(&dentry->d_lock);
- if (dentry->d_count == 1)
+ if (dentry->d_lockref.count == 1)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
}
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b1ca9ba0b0..ad8ea9bc251 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1318,7 +1318,7 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- retval = user_path_at(AT_FDCWD, name, lookup_flags, &path);
+ retval = user_path_umountat(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
mnt = real_mount(path.mnt);
@@ -1429,7 +1429,7 @@ struct vfsmount *collect_mounts(struct path *path)
CL_COPY_ALL | CL_PRIVATE);
namespace_unlock();
if (IS_ERR(tree))
- return NULL;
+ return ERR_CAST(tree);
return &tree->mnt;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index af6e806044d..941246f2b43 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
unlock_new_inode(inode);
} else
nfs_refresh_inode(inode, fattr);
- nfs_setsecurity(inode, fattr, label);
dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n",
inode->i_sb->s_id,
(long long)NFS_FILEID(inode),
@@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode);
static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping)
{
struct nfs_inode *nfsi = NFS_I(inode);
-
+ int ret;
+
if (mapping->nrpages != 0) {
- int ret = invalidate_inode_pages2(mapping);
+ if (S_ISREG(inode->i_mode)) {
+ ret = nfs_sync_mapping(mapping);
+ if (ret < 0)
+ return ret;
+ }
+ ret = invalidate_inode_pages2(mapping);
if (ret < 0)
return ret;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index cf11799297c..108a774095f 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3071,15 +3071,13 @@ struct rpc_clnt *
nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name,
struct nfs_fh *fhandle, struct nfs_fattr *fattr)
{
+ struct rpc_clnt *client = NFS_CLIENT(dir);
int status;
- struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir));
status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL);
- if (status < 0) {
- rpc_shutdown_client(client);
+ if (status < 0)
return ERR_PTR(status);
- }
- return client;
+ return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client;
}
static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 71fdc0dfa0d..f6db66d8f64 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server,
if (server->flags & NFS_MOUNT_NOAC)
sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
+ if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
+ sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
if (IS_ERR(s)) {
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 0d4c410e458..419572f33b7 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
{
return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\
- 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\
+ 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\
2 + /*eir_server_owner.so_minor_id */\
/* eir_server_owner.so_major_id<> */\
XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 280acef6f0d..43f42290e5d 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
struct svc_cred *cr = &rqstp->rq_cred;
u32 service;
+ if (!cr->cr_gss_mech)
+ return false;
service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
return service == RPC_GSS_SVC_INTEGRITY ||
service == RPC_GSS_SVC_PRIVACY;
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 0c0f3ea90de..d9454fe5653 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1816,10 +1816,7 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
static __be32 nfsd4_encode_path(const struct path *root,
const struct path *path, __be32 **pp, int *buflen)
{
- struct path cur = {
- .mnt = path->mnt,
- .dentry = path->dentry,
- };
+ struct path cur = *path;
__be32 *p = *pp;
struct dentry **components = NULL;
unsigned int ncomponents = 0;
@@ -1859,14 +1856,19 @@ static __be32 nfsd4_encode_path(const struct path *root,
while (ncomponents) {
struct dentry *dentry = components[ncomponents - 1];
- unsigned int len = dentry->d_name.len;
+ unsigned int len;
+ spin_lock(&dentry->d_lock);
+ len = dentry->d_name.len;
*buflen -= 4 + (XDR_QUADLEN(len) << 2);
- if (*buflen < 0)
+ if (*buflen < 0) {
+ spin_unlock(&dentry->d_lock);
goto out_free;
+ }
WRITE32(len);
WRITEMEM(dentry->d_name.name, len);
dprintk("/%s", dentry->d_name.name);
+ spin_unlock(&dentry->d_lock);
dput(dentry);
ncomponents--;
}
@@ -3360,7 +3362,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
8 /* eir_clientid */ +
4 /* eir_sequenceid */ +
4 /* eir_flags */ +
- 4 /* spr_how (SP4_NONE) */ +
+ 4 /* spr_how */ +
+ 8 /* spo_must_enforce, spo_must_allow */ +
8 /* so_minor_id */ +
4 /* so_major_id.len */ +
(XDR_QUADLEN(major_id_sz) * 4) +
@@ -3372,8 +3375,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
WRITE32(exid->seqid);
WRITE32(exid->flags);
- /* state_protect4_r. Currently only support SP4_NONE */
- BUG_ON(exid->spa_how != SP4_NONE);
WRITE32(exid->spa_how);
switch (exid->spa_how) {
case SP4_NONE:
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 8ff6a0019b0..c827acb0e94 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -830,9 +830,10 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type,
flags = O_WRONLY|O_LARGEFILE;
}
*filp = dentry_open(&path, flags, current_cred());
- if (IS_ERR(*filp))
+ if (IS_ERR(*filp)) {
host_err = PTR_ERR(*filp);
- else {
+ *filp = NULL;
+ } else {
host_err = ima_file_check(*filp, may_flags);
if (may_flags & NFSD_MAY_64BIT_COOKIE)
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index dc9a913784a..2d8be51f90d 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err)
if (err == -EOPNOTSUPP) {
set_bit(BIO_EOPNOTSUPP, &bio->bi_flags);
- bio_put(bio);
- /* to be detected by submit_seg_bio() */
+ /* to be detected by nilfs_segbuf_submit_bio() */
}
if (!uptodate)
@@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
bio->bi_private = segbuf;
bio_get(bio);
submit_bio(mode, bio);
+ segbuf->sb_nbio++;
if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
bio_put(bio);
err = -EOPNOTSUPP;
goto failed;
}
- segbuf->sb_nbio++;
bio_put(bio);
wi->bio = NULL;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index af3ba0478cd..7ac2a122ca1 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -994,23 +994,16 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
return ret;
}
-static int nilfs_tree_was_touched(struct dentry *root_dentry)
-{
- return d_count(root_dentry) > 1;
-}
-
/**
- * nilfs_try_to_shrink_tree() - try to shrink dentries of a checkpoint
+ * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
* @root_dentry: root dentry of the tree to be shrunk
*
* This function returns true if the tree was in-use.
*/
-static int nilfs_try_to_shrink_tree(struct dentry *root_dentry)
+static bool nilfs_tree_is_busy(struct dentry *root_dentry)
{
- if (have_submounts(root_dentry))
- return true;
shrink_dcache_parent(root_dentry);
- return nilfs_tree_was_touched(root_dentry);
+ return d_count(root_dentry) > 1;
}
int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
@@ -1034,8 +1027,7 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
if (inode) {
dentry = d_find_alias(inode);
if (dentry) {
- if (nilfs_tree_was_touched(dentry))
- ret = nilfs_try_to_shrink_tree(dentry);
+ ret = nilfs_tree_is_busy(dentry);
dput(dentry);
}
iput(inode);
@@ -1331,11 +1323,8 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
s->s_flags |= MS_ACTIVE;
} else if (!sd.cno) {
- int busy = false;
-
- if (nilfs_tree_was_touched(s->s_root)) {
- busy = nilfs_try_to_shrink_tree(s->s_root);
- if (busy && (flags ^ s->s_flags) & MS_RDONLY) {
+ if (nilfs_tree_is_busy(s->s_root)) {
+ if ((flags ^ s->s_flags) & MS_RDONLY) {
printk(KERN_ERR "NILFS: the device already "
"has a %s mount.\n",
(s->s_flags & MS_RDONLY) ?
@@ -1343,8 +1332,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
err = -EBUSY;
goto failed_super;
}
- }
- if (!busy) {
+ } else {
/*
* Try remount to setup mount states if the current
* tree is not mounted and only snapshots use this sb.
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 79736a28d84..94417a85ce6 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -565,9 +565,7 @@ bail:
static void ocfs2_dio_end_io(struct kiocb *iocb,
loff_t offset,
ssize_t bytes,
- void *private,
- int ret,
- bool is_async)
+ void *private)
{
struct inode *inode = file_inode(iocb->ki_filp);
int level;
@@ -592,10 +590,6 @@ static void ocfs2_dio_end_io(struct kiocb *iocb,
level = ocfs2_iocb_rw_locked_level(iocb);
ocfs2_rw_unlock(inode, level);
-
- inode_dio_done(inode);
- if (is_async)
- aio_complete(iocb, ret, 0);
}
/*
@@ -1757,7 +1751,7 @@ try_again:
goto out;
} else if (ret == 1) {
clusters_need = wc->w_clen;
- ret = ocfs2_refcount_cow(inode, filp, di_bh,
+ ret = ocfs2_refcount_cow(inode, di_bh,
wc->w_cpos, wc->w_clen, UINT_MAX);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index eb760d8acd5..30544ce8e9f 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode)
{
int ret;
struct ocfs2_empty_dir_priv priv = {
- .ctx.actor = ocfs2_empty_dir_filldir
+ .ctx.actor = ocfs2_empty_dir_filldir,
};
- memset(&priv, 0, sizeof(priv));
-
if (ocfs2_dir_indexed(inode)) {
ret = ocfs2_empty_dir_dx(inode, &priv);
if (ret)
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 41000f223ca..3261d71319e 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode,
if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
goto out;
- return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1);
+ return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
out:
return status;
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode,
zero_clusters = last_cpos - zero_cpos;
if (needs_cow) {
- rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos,
+ rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
zero_clusters, UINT_MAX);
if (rc) {
mlog_errno(rc);
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode,
*meta_level = 1;
- ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX);
+ ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
if (ret)
mlog_errno(ret);
out:
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 96f9ac237e8..0a992737dca 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb,
extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth);
return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks +
- ocfs2_quota_trans_credits(sb) + bits_wanted;
+ ocfs2_quota_trans_credits(sb);
}
static inline int ocfs2_calc_symlink_credits(struct super_block *sb)
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index f1fc172175b..452068b4574 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle,
u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci);
u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos);
- ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos,
+ ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos,
p_cpos, new_p_cpos, len);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 998b17eda09..a70d604593b 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -49,7 +49,6 @@
struct ocfs2_cow_context {
struct inode *inode;
- struct file *file;
u32 cow_start;
u32 cow_len;
struct ocfs2_extent_tree data_et;
@@ -66,7 +65,7 @@ struct ocfs2_cow_context {
u32 *num_clusters,
unsigned int *extent_flags);
int (*cow_duplicate_clusters)(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
};
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
}
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0, partial;
- struct inode *inode = file_inode(file);
- struct ocfs2_caching_info *ci = INODE_CACHE(inode);
- struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
+ struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
@@ -2965,6 +2962,11 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
to = map_end & (PAGE_CACHE_SIZE - 1);
page = find_or_create_page(mapping, page_index, GFP_NOFS);
+ if (!page) {
+ ret = -ENOMEM;
+ mlog_errno(ret);
+ break;
+ }
/*
* In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
@@ -2973,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
BUG_ON(PageDirty(page));
- if (PageReadahead(page)) {
- page_cache_async_readahead(mapping,
- &file->f_ra, file,
- page, page_index,
- readahead_pages);
- }
-
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
if (ret) {
@@ -2999,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
}
- ocfs2_map_and_dirty_page(inode, handle, from, to,
+ ocfs2_map_and_dirty_page(inode,
+ handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
unlock:
@@ -3015,12 +3011,11 @@ unlock:
}
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len)
{
int ret = 0;
- struct inode *inode = file_inode(file);
struct super_block *sb = inode->i_sb;
struct ocfs2_caching_info *ci = INODE_CACHE(inode);
int i, blocks = ocfs2_clusters_to_blocks(sb, new_len);
@@ -3145,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle,
/*If the old clusters is unwritten, no need to duplicate. */
if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
- ret = context->cow_duplicate_clusters(handle, context->file,
+ ret = context->cow_duplicate_clusters(handle, context->inode,
cpos, old, new, len);
if (ret) {
mlog_errno(ret);
@@ -3423,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
return ret;
}
-static void ocfs2_readahead_for_cow(struct inode *inode,
- struct file *file,
- u32 start, u32 len)
-{
- struct address_space *mapping;
- pgoff_t index;
- unsigned long num_pages;
- int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits;
-
- if (!file)
- return;
-
- mapping = file->f_mapping;
- num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT;
- if (!num_pages)
- num_pages = 1;
-
- index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT;
- page_cache_sync_readahead(mapping, &file->f_ra, file,
- index, num_pages);
-}
-
/*
* Starting at cpos, try to CoW write_len clusters. Don't CoW
* past max_cpos. This will stop when it runs into a hole or an
* unrefcounted extent.
*/
static int ocfs2_refcount_cow_hunk(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3480,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
BUG_ON(cow_len == 0);
- ocfs2_readahead_for_cow(inode, file, cow_start, cow_len);
-
context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
if (!context) {
ret = -ENOMEM;
@@ -3503,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode,
context->ref_root_bh = ref_root_bh;
context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
context->get_clusters = ocfs2_di_get_clusters;
- context->file = file;
ocfs2_init_dinode_extent_tree(&context->data_et,
INODE_CACHE(inode), di_bh);
@@ -3532,7 +3501,6 @@ out:
* clusters between cpos and cpos+write_len are safe to modify.
*/
int ocfs2_refcount_cow(struct inode *inode,
- struct file *file,
struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos)
{
@@ -3552,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode,
num_clusters = write_len;
if (ext_flags & OCFS2_EXT_REFCOUNTED) {
- ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos,
+ ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
num_clusters, max_cpos);
if (ret) {
mlog_errno(ret);
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h
index 7754608c83a..6422bbcdb52 100644
--- a/fs/ocfs2/refcounttree.h
+++ b/fs/ocfs2/refcounttree.h
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
int *credits,
int *ref_blocks);
int ocfs2_refcount_cow(struct inode *inode,
- struct file *filep, struct buffer_head *di_bh,
+ struct buffer_head *di_bh,
u32 cpos, u32 write_len, u32 max_cpos);
typedef int (ocfs2_post_refcount_func)(struct inode *inode,
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode,
u32 cpos, u32 write_len,
struct ocfs2_post_refcount *post);
int ocfs2_duplicate_clusters_by_page(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_duplicate_clusters_by_jbd(handle_t *handle,
- struct file *file,
+ struct inode *inode,
u32 cpos, u32 old_cluster,
u32 new_cluster, u32 new_len);
int ocfs2_cow_sync_writeback(struct super_block *sb,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 854d80955bf..121da2dc3be 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1022,7 +1022,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
struct inode *inode = NULL;
struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL;
- char nodestr[8];
+ char nodestr[12];
struct ocfs2_blockcheck_stats stats;
trace_ocfs2_fill_super(sb, data, silent);
diff --git a/fs/open.c b/fs/open.c
index d53e2989508..8070825b285 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -485,14 +485,13 @@ out_unlock:
SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode)
{
- struct file * file;
+ struct fd f = fdget(fd);
int err = -EBADF;
- file = fget(fd);
- if (file) {
- audit_inode(NULL, file->f_path.dentry, 0);
- err = chmod_common(&file->f_path, mode);
- fput(file);
+ if (f.file) {
+ audit_inode(NULL, f.file->f_path.dentry, 0);
+ err = chmod_common(&f.file->f_path, mode);
+ fdput(f);
}
return err;
}
@@ -823,7 +822,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o
int lookup_flags = 0;
int acc_mode;
- if (flags & O_CREAT)
+ if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
op->mode = 0;
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 75f2890abbd..0ff80f9b930 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -230,8 +230,6 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
if (!dir_emit_dots(file, ctx))
goto out;
- if (!dir_emit_dots(file, ctx))
- goto out;
files = get_files_struct(p);
if (!files)
goto out;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 94441a40733..737e15615b0 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -271,7 +271,7 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
de = next;
} while (de);
spin_unlock(&proc_subdir_lock);
- return 0;
+ return 1;
}
int proc_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 073aea60cf8..9f8ef9b7674 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -285,6 +285,20 @@ static int proc_reg_mmap(struct file *file, struct vm_area_struct *vma)
return rv;
}
+static unsigned long proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct proc_dir_entry *pde = PDE(file_inode(file));
+ int rv = -EIO;
+ unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+ if (use_pde(pde)) {
+ get_unmapped_area = pde->proc_fops->get_unmapped_area;
+ if (get_unmapped_area)
+ rv = get_unmapped_area(file, orig_addr, len, pgoff, flags);
+ unuse_pde(pde);
+ }
+ return rv;
+}
+
static int proc_reg_open(struct inode *inode, struct file *file)
{
struct proc_dir_entry *pde = PDE(inode);
@@ -356,6 +370,7 @@ static const struct file_operations proc_reg_file_ops = {
.compat_ioctl = proc_reg_compat_ioctl,
#endif
.mmap = proc_reg_mmap,
+ .get_unmapped_area = proc_reg_get_unmapped_area,
.open = proc_reg_open,
.release = proc_reg_release,
};
@@ -368,6 +383,7 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
.poll = proc_reg_poll,
.unlocked_ioctl = proc_reg_unlocked_ioctl,
.mmap = proc_reg_mmap,
+ .get_unmapped_area = proc_reg_get_unmapped_area,
.open = proc_reg_open,
.release = proc_reg_release,
};
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 229e366598d..e0a790da726 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -205,7 +205,9 @@ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentr
static int proc_root_readdir(struct file *file, struct dir_context *ctx)
{
if (ctx->pos < FIRST_PROCESS_ENTRY) {
- proc_readdir(file, ctx);
+ int error = proc_readdir(file, ctx);
+ if (unlikely(error <= 0))
+ return error;
ctx->pos = FIRST_PROCESS_ENTRY;
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index dbf61f6174f..107d026f5d6 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma,
* of how soft-dirty works.
*/
pte_t ptent = *pte;
- ptent = pte_wrprotect(ptent);
- ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+
+ if (pte_present(ptent)) {
+ ptent = pte_wrprotect(ptent);
+ ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
+ } else if (is_swap_pte(ptent)) {
+ ptent = pte_swp_clear_soft_dirty(ptent);
+ } else if (pte_file(ptent)) {
+ ptent = pte_file_clear_soft_dirty(ptent);
+ }
+
set_pte_at(vma->vm_mm, addr, pte, ptent);
#endif
}
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
- if (!pte_present(ptent))
- continue;
if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
clear_soft_dirty(vma, addr, pte);
continue;
}
+ if (!pte_present(ptent))
+ continue;
+
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
@@ -859,7 +868,7 @@ typedef struct {
} pagemap_entry_t;
struct pagemapread {
- int pos, len;
+ int pos, len; /* units: PM_ENTRY_BYTES, not bytes */
pagemap_entry_t *buffer;
bool v2;
};
@@ -867,7 +876,7 @@ struct pagemapread {
#define PAGEMAP_WALK_SIZE (PMD_SIZE)
#define PAGEMAP_WALK_MASK (PMD_MASK)
-#define PM_ENTRY_BYTES sizeof(u64)
+#define PM_ENTRY_BYTES sizeof(pagemap_entry_t)
#define PM_STATUS_BITS 3
#define PM_STATUS_OFFSET (64 - PM_STATUS_BITS)
#define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET)
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm,
flags = PM_PRESENT;
page = vm_normal_page(vma, addr, pte);
} else if (is_swap_pte(pte)) {
- swp_entry_t entry = pte_to_swp_entry(pte);
-
+ swp_entry_t entry;
+ if (pte_swp_soft_dirty(pte))
+ flags2 |= __PM_SOFT_DIRTY;
+ entry = pte_to_swp_entry(pte);
frame = swp_type(entry) |
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
flags = PM_SWAP;
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
goto out_task;
pm.v2 = soft_dirty_cleared;
- pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
- pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
+ pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY);
ret = -ENOMEM;
if (!pm.buffer)
goto out_task;
diff --git a/fs/pstore/Kconfig b/fs/pstore/Kconfig
index ca71db69da0..983d9510bec 100644
--- a/fs/pstore/Kconfig
+++ b/fs/pstore/Kconfig
@@ -1,6 +1,8 @@
config PSTORE
bool "Persistent store support"
default n
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
help
This option enables generic access to platform level
persistent storage via "pstore" filesystem that can
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 71bf5f4ae84..12823845d32 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -275,8 +275,8 @@ int pstore_is_mounted(void)
* Set the mtime & ctime to the date that this record was originally stored.
*/
int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
- char *data, size_t size, struct timespec time,
- struct pstore_info *psi)
+ char *data, bool compressed, size_t size,
+ struct timespec time, struct pstore_info *psi)
{
struct dentry *root = pstore_sb->s_root;
struct dentry *dentry;
@@ -315,7 +315,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
switch (type) {
case PSTORE_TYPE_DMESG:
- sprintf(name, "dmesg-%s-%lld", psname, id);
+ sprintf(name, "dmesg-%s-%lld%s", psname, id,
+ compressed ? ".enc.z" : "");
break;
case PSTORE_TYPE_CONSOLE:
sprintf(name, "console-%s", psname);
@@ -345,9 +346,8 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
mutex_lock(&root->d_inode->i_mutex);
- rc = -ENOSPC;
dentry = d_alloc_name(root, name);
- if (IS_ERR(dentry))
+ if (!dentry)
goto fail_lockedalloc;
memcpy(private->data, data, size);
diff --git a/fs/pstore/internal.h b/fs/pstore/internal.h
index 937d820f273..3b3d305277c 100644
--- a/fs/pstore/internal.h
+++ b/fs/pstore/internal.h
@@ -50,8 +50,9 @@ extern struct pstore_info *psinfo;
extern void pstore_set_kmsg_bytes(int);
extern void pstore_get_records(int);
extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
- int count, char *data, size_t size,
- struct timespec time, struct pstore_info *psi);
+ int count, char *data, bool compressed,
+ size_t size, struct timespec time,
+ struct pstore_info *psi);
extern int pstore_is_mounted(void);
#endif
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 422962ae9fc..4ffb7ab5e39 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -26,6 +26,7 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pstore.h>
+#include <linux/zlib.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
@@ -65,6 +66,15 @@ struct pstore_info *psinfo;
static char *backend;
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+static char *big_oops_buf;
+static size_t big_oops_buf_sz;
+
/* How much of the console log to snapshot */
static unsigned long kmsg_bytes = 10240;
@@ -117,6 +127,121 @@ bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
}
EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
+/* Derived from logfs_compress() */
+static int pstore_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Derived from logfs_uncompress */
+static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_inflateInit(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_inflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+static void allocate_buf_for_compression(void)
+{
+ size_t size;
+
+ big_oops_buf_sz = (psinfo->bufsize * 100) / 45;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ zlib_inflate_workspacesize());
+ stream.workspace = kmalloc(size, GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("pstore: No memory for compression workspace; "
+ "skipping compression\n");
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed data; "
+ "skipping compression\n");
+ stream.workspace = NULL;
+ }
+
+}
+
+/*
+ * Called when compression fails, since the printk buffer
+ * would be fetched for compression calling it again when
+ * compression fails would have moved the iterator of
+ * printk buffer which results in fetching old contents.
+ * Copy the recent messages from big_oops_buf to psinfo->buf
+ */
+static size_t copy_kmsg_to_buffer(int hsize, size_t len)
+{
+ size_t total_len;
+ size_t diff;
+
+ total_len = hsize + len;
+
+ if (total_len > psinfo->bufsize) {
+ diff = total_len - psinfo->bufsize + hsize;
+ memcpy(psinfo->buf, big_oops_buf, hsize);
+ memcpy(psinfo->buf + hsize, big_oops_buf + diff,
+ psinfo->bufsize - hsize);
+ total_len = psinfo->bufsize;
+ } else
+ memcpy(psinfo->buf, big_oops_buf, total_len);
+
+ return total_len;
+}
+
/*
* callback from kmsg_dump. (s2,l2) has the most recently
* written bytes, older bytes are in (s1,l1). Save as much
@@ -148,22 +273,56 @@ static void pstore_dump(struct kmsg_dumper *dumper,
char *dst;
unsigned long size;
int hsize;
+ int zipped_len = -1;
size_t len;
+ bool compressed;
+ size_t total_len;
- dst = psinfo->buf;
- hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
- size = psinfo->bufsize - hsize;
- dst += hsize;
+ if (big_oops_buf) {
+ dst = big_oops_buf;
+ hsize = sprintf(dst, "%s#%d Part%d\n", why,
+ oopscount, part);
+ size = big_oops_buf_sz - hsize;
- if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
- break;
+ if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
+ size, &len))
+ break;
+
+ zipped_len = pstore_compress(dst, psinfo->buf,
+ hsize + len, psinfo->bufsize);
+
+ if (zipped_len > 0) {
+ compressed = true;
+ total_len = zipped_len;
+ } else {
+ pr_err("pstore: compression failed for Part %d"
+ " returned %d\n", part, zipped_len);
+ pr_err("pstore: Capture uncompressed"
+ " oops/panic report of Part %d\n", part);
+ compressed = false;
+ total_len = copy_kmsg_to_buffer(hsize, len);
+ }
+ } else {
+ dst = psinfo->buf;
+ hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount,
+ part);
+ size = psinfo->bufsize - hsize;
+ dst += hsize;
+
+ if (!kmsg_dump_get_buffer(dumper, true, dst,
+ size, &len))
+ break;
+
+ compressed = false;
+ total_len = hsize + len;
+ }
ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
- oopscount, hsize, hsize + len, psinfo);
+ oopscount, compressed, total_len, psinfo);
if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
pstore_new_entry = 1;
- total += hsize + len;
+ total += total_len;
part++;
}
if (pstore_cannot_block_path(reason)) {
@@ -221,10 +380,10 @@ static void pstore_register_console(void) {}
static int pstore_write_compat(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part, int count,
- size_t hsize, size_t size,
+ bool compressed, size_t size,
struct pstore_info *psi)
{
- return psi->write_buf(type, reason, id, part, psinfo->buf, hsize,
+ return psi->write_buf(type, reason, id, part, psinfo->buf, compressed,
size, psi);
}
@@ -261,6 +420,8 @@ int pstore_register(struct pstore_info *psi)
return -EINVAL;
}
+ allocate_buf_for_compression();
+
if (pstore_is_mounted())
pstore_get_records(0);
@@ -297,6 +458,8 @@ void pstore_get_records(int quiet)
enum pstore_type_id type;
struct timespec time;
int failed = 0, rc;
+ bool compressed;
+ int unzipped_len = -1;
if (!psi)
return;
@@ -305,11 +468,32 @@ void pstore_get_records(int quiet)
if (psi->open && psi->open(psi))
goto out;
- while ((size = psi->read(&id, &type, &count, &time, &buf, psi)) > 0) {
+ while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed,
+ psi)) > 0) {
+ if (compressed && (type == PSTORE_TYPE_DMESG)) {
+ if (big_oops_buf)
+ unzipped_len = pstore_decompress(buf,
+ big_oops_buf, size,
+ big_oops_buf_sz);
+
+ if (unzipped_len > 0) {
+ buf = big_oops_buf;
+ size = unzipped_len;
+ compressed = false;
+ } else {
+ pr_err("pstore: decompression failed;"
+ "returned %d\n", unzipped_len);
+ compressed = true;
+ }
+ }
rc = pstore_mkfile(type, psi->name, id, count, buf,
- (size_t)size, time, psi);
- kfree(buf);
- buf = NULL;
+ compressed, (size_t)size, time, psi);
+ if (unzipped_len < 0) {
+ /* Free buffer other than big oops */
+ kfree(buf);
+ buf = NULL;
+ } else
+ unzipped_len = -1;
if (rc && (rc != -EEXIST || !quiet))
failed++;
}
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index a6119f9469e..fa8cef2cca3 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -131,9 +131,31 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
return prz;
}
+static void ramoops_read_kmsg_hdr(char *buffer, struct timespec *time,
+ bool *compressed)
+{
+ char data_type;
+
+ if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n",
+ &time->tv_sec, &time->tv_nsec, &data_type) == 3) {
+ if (data_type == 'C')
+ *compressed = true;
+ else
+ *compressed = false;
+ } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
+ &time->tv_sec, &time->tv_nsec) == 2) {
+ *compressed = false;
+ } else {
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ *compressed = false;
+ }
+}
+
static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time,
- char **buf, struct pstore_info *psi)
+ char **buf, bool *compressed,
+ struct pstore_info *psi)
{
ssize_t size;
ssize_t ecc_notice_size;
@@ -152,10 +174,6 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
if (!prz)
return 0;
- /* TODO(kees): Bogus time for the moment. */
- time->tv_sec = 0;
- time->tv_nsec = 0;
-
size = persistent_ram_old_size(prz);
/* ECC correction notice */
@@ -166,12 +184,14 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
return -ENOMEM;
memcpy(*buf, persistent_ram_old(prz), size);
+ ramoops_read_kmsg_hdr(*buf, time, compressed);
persistent_ram_ecc_string(prz, *buf + size, ecc_notice_size + 1);
return size + ecc_notice_size;
}
-static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
+static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
+ bool compressed)
{
char *hdr;
struct timespec timestamp;
@@ -182,8 +202,9 @@ static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz)
timestamp.tv_sec = 0;
timestamp.tv_nsec = 0;
}
- hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu\n",
- (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000));
+ hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n",
+ (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000),
+ compressed ? 'C' : 'D');
WARN_ON_ONCE(!hdr);
len = hdr ? strlen(hdr) : 0;
persistent_ram_write(prz, hdr, len);
@@ -196,7 +217,7 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
enum kmsg_dump_reason reason,
u64 *id, unsigned int part,
const char *buf,
- size_t hsize, size_t size,
+ bool compressed, size_t size,
struct pstore_info *psi)
{
struct ramoops_context *cxt = psi->data;
@@ -242,7 +263,7 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
prz = cxt->przs[cxt->dump_write_cnt];
- hlen = ramoops_write_kmsg_hdr(prz);
+ hlen = ramoops_write_kmsg_hdr(prz, compressed);
if (size + hlen > prz->buffer_size)
size = prz->buffer_size - hlen;
persistent_ram_write(prz, buf, size);
@@ -400,11 +421,11 @@ static int ramoops_probe(struct platform_device *pdev)
goto fail_out;
}
- if (!is_power_of_2(pdata->record_size))
+ if (pdata->record_size && !is_power_of_2(pdata->record_size))
pdata->record_size = rounddown_pow_of_two(pdata->record_size);
- if (!is_power_of_2(pdata->console_size))
+ if (pdata->console_size && !is_power_of_2(pdata->console_size))
pdata->console_size = rounddown_pow_of_two(pdata->console_size);
- if (!is_power_of_2(pdata->ftrace_size))
+ if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
cxt->dump_read_cnt = 0;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index fbad622841f..9a702e19353 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1094,6 +1094,14 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
dquot->dq_dqb.dqb_rsvspace -= number;
}
+static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
+{
+ if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+ number = dquot->dq_dqb.dqb_curspace;
+ dquot->dq_dqb.dqb_rsvspace += number;
+ dquot->dq_dqb.dqb_curspace -= number;
+}
+
static inline
void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
{
@@ -1528,6 +1536,15 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number)
}
EXPORT_SYMBOL(inode_claim_rsv_space);
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
+{
+ spin_lock(&inode->i_lock);
+ *inode_reserved_space(inode) += number;
+ __inode_sub_bytes(inode, number);
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(inode_reclaim_rsv_space);
+
void inode_sub_rsv_space(struct inode *inode, qsize_t number)
{
spin_lock(&inode->i_lock);
@@ -1702,6 +1719,35 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
EXPORT_SYMBOL(dquot_claim_space_nodirty);
/*
+ * Convert allocated space back to in-memory reserved quotas
+ */
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+{
+ int cnt;
+
+ if (!dquot_active(inode)) {
+ inode_reclaim_rsv_space(inode, number);
+ return;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ spin_lock(&dq_data_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt])
+ dquot_reclaim_reserved_space(inode->i_dquot[cnt],
+ number);
+ }
+ /* Update inode bytes */
+ inode_reclaim_rsv_space(inode, number);
+ spin_unlock(&dq_data_lock);
+ mark_all_dquot_dirty(inode->i_dquot);
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ return;
+}
+EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
+
+/*
* This operation can block, but only after everything is updated
*/
void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index a98b7740a0f..dc9a6829f7c 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -423,8 +423,11 @@ static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
journal_mark_dirty(th, s, sbh);
- if (for_unformatted)
+ if (for_unformatted) {
+ int depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(inode, 1);
+ reiserfs_write_lock_nested(s, depth);
+ }
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1128,6 +1131,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
int passno = 0;
int nr_allocated = 0;
+ int depth;
determine_prealloc_size(hint);
if (!hint->formatted_node) {
@@ -1137,10 +1141,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
"reiserquota: allocating %d blocks id=%u",
amount_needed, hint->inode->i_uid);
#endif
+ depth = reiserfs_write_unlock_nested(s);
quota_ret =
dquot_alloc_block_nodirty(hint->inode, amount_needed);
- if (quota_ret) /* Quota exceeded? */
+ if (quota_ret) { /* Quota exceeded? */
+ reiserfs_write_lock_nested(s, depth);
return QUOTA_EXCEEDED;
+ }
if (hint->preallocate && hint->prealloc_size) {
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(s, REISERFS_DEBUG_CODE,
@@ -1153,6 +1160,7 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
hint->preallocate = hint->prealloc_size = 0;
}
/* for unformatted nodes, force large allocations */
+ reiserfs_write_lock_nested(s, depth);
}
do {
@@ -1181,9 +1189,11 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
hint->inode->i_uid);
#endif
/* Free not allocated blocks */
+ depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(hint->inode,
amount_needed + hint->prealloc_size -
nr_allocated);
+ reiserfs_write_lock_nested(s, depth);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@@ -1214,10 +1224,13 @@ static inline int blocknrs_and_prealloc_arrays_from_search_start
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
+
+ depth = reiserfs_write_unlock_nested(s);
dquot_free_block_nodirty(hint->inode, amount_needed +
hint->prealloc_size - nr_allocated -
REISERFS_I(hint->inode)->
i_prealloc_count);
+ reiserfs_write_lock_nested(s, depth);
}
return CARRY_ON;
@@ -1340,10 +1353,11 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
"reading failed", __func__, block);
else {
if (buffer_locked(bh)) {
+ int depth;
PROC_INFO_INC(sb, scan_bitmap.wait);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
__wait_on_buffer(bh);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
}
BUG_ON(!buffer_uptodate(bh));
BUG_ON(atomic_read(&bh->b_count) == 0);
diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
index 03e4ca5624d..1fd2051109a 100644
--- a/fs/reiserfs/dir.c
+++ b/fs/reiserfs/dir.c
@@ -71,6 +71,7 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
char small_buf[32]; /* avoid kmalloc if we can */
struct reiserfs_dir_entry de;
int ret = 0;
+ int depth;
reiserfs_write_lock(inode->i_sb);
@@ -181,17 +182,17 @@ int reiserfs_readdir_inode(struct inode *inode, struct dir_context *ctx)
* Since filldir might sleep, we can release
* the write lock here for other waiters
*/
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
if (!dir_emit
(ctx, local_buf, d_reclen, d_ino,
DT_UNKNOWN)) {
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (local_buf != small_buf) {
kfree(local_buf);
}
goto end;
}
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (local_buf != small_buf) {
kfree(local_buf);
}
diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c
index 430e0658704..dc4d4153031 100644
--- a/fs/reiserfs/fix_node.c
+++ b/fs/reiserfs/fix_node.c
@@ -1022,9 +1022,9 @@ static int get_far_parent(struct tree_balance *tb,
if (buffer_locked(*pcom_father)) {
/* Release the write lock while the buffer is busy */
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(*pcom_father);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb)) {
brelse(*pcom_father);
return REPEAT_SEARCH;
@@ -1929,9 +1929,9 @@ static int get_direct_parent(struct tree_balance *tb, int h)
return REPEAT_SEARCH;
if (buffer_locked(bh)) {
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(bh);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -1952,6 +1952,7 @@ static int get_neighbors(struct tree_balance *tb, int h)
unsigned long son_number;
struct super_block *sb = tb->tb_sb;
struct buffer_head *bh;
+ int depth;
PROC_INFO_INC(sb, get_neighbors[h]);
@@ -1969,9 +1970,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
FL[h]);
son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
bh = sb_bread(sb, son_number);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2009,9 +2010,9 @@ static int get_neighbors(struct tree_balance *tb, int h)
child_position =
(bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
bh = sb_bread(sb, son_number);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (!bh)
return IO_ERROR;
if (FILESYSTEM_CHANGED_TB(tb)) {
@@ -2272,6 +2273,7 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
}
if (locked) {
+ int depth;
#ifdef CONFIG_REISERFS_CHECK
repeat_counter++;
if ((repeat_counter % 10000) == 0) {
@@ -2286,9 +2288,9 @@ static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
REPEAT_SEARCH : CARRY_ON;
}
#endif
- reiserfs_write_unlock(tb->tb_sb);
+ depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(locked);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
@@ -2359,9 +2361,9 @@ int fix_nodes(int op_mode, struct tree_balance *tb,
/* if it possible in indirect_to_direct conversion */
if (buffer_locked(tbS0)) {
- reiserfs_write_unlock(tb->tb_sb);
+ int depth = reiserfs_write_unlock_nested(tb->tb_sb);
__wait_on_buffer(tbS0);
- reiserfs_write_lock(tb->tb_sb);
+ reiserfs_write_lock_nested(tb->tb_sb, depth);
if (FILESYSTEM_CHANGED_TB(tb))
return REPEAT_SEARCH;
}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 0048cc16a6a..ad62bdbb451 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -30,7 +30,6 @@ void reiserfs_evict_inode(struct inode *inode)
JOURNAL_PER_BALANCE_CNT * 2 +
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
- int depth;
int err;
if (!inode->i_nlink && !is_bad_inode(inode))
@@ -40,12 +39,13 @@ void reiserfs_evict_inode(struct inode *inode)
if (inode->i_nlink)
goto no_delete;
- depth = reiserfs_write_lock_once(inode->i_sb);
-
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
+
reiserfs_delete_xattrs(inode);
+ reiserfs_write_lock(inode->i_sb);
+
if (journal_begin(&th, inode->i_sb, jbegin_count))
goto out;
reiserfs_update_inode_transaction(inode);
@@ -57,8 +57,11 @@ void reiserfs_evict_inode(struct inode *inode)
/* Do quota update inside a transaction for journaled quotas. We must do that
* after delete_object so that quota updates go into the same transaction as
* stat data deletion */
- if (!err)
+ if (!err) {
+ int depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_inode(inode);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
+ }
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@@ -72,12 +75,12 @@ void reiserfs_evict_inode(struct inode *inode)
/* all items of file are deleted, so we can remove "save" link */
remove_save_link(inode, 0 /* not truncate */ ); /* we can't do anything
* about an error here */
+out:
+ reiserfs_write_unlock(inode->i_sb);
} else {
/* no object items are in the tree */
;
}
- out:
- reiserfs_write_unlock_once(inode->i_sb, depth);
clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
dquot_drop(inode);
inode->i_blocks = 0;
@@ -610,7 +613,6 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
__le32 *item;
int done;
int fs_gen;
- int lock_depth;
struct reiserfs_transaction_handle *th = NULL;
/* space reserved in transaction batch:
. 3 balancings in direct->indirect conversion
@@ -626,11 +628,11 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
loff_t new_offset =
(((loff_t) block) << inode->i_sb->s_blocksize_bits) + 1;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
version = get_inode_item_key_version(inode);
if (!file_capable(inode, block)) {
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return -EFBIG;
}
@@ -642,7 +644,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
/* find number of block-th logical block of the file */
ret = _get_block_create_0(inode, block, bh_result,
create | GET_BLOCK_READ_DIRECT);
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return ret;
}
/*
@@ -760,7 +762,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
if (!dangle && th)
retval = reiserfs_end_persistent_transaction(th);
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
/* the item was found, so new blocks were not added to the file
** there is no need to make sure the inode is updated with this
@@ -1011,11 +1013,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
* long time. reschedule if needed and also release the write
* lock for others.
*/
- if (need_resched()) {
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
- schedule();
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
- }
+ reiserfs_cond_resched(inode->i_sb);
retval = search_for_position_by_key(inode->i_sb, &key, &path);
if (retval == IO_ERROR) {
@@ -1050,7 +1048,7 @@ int reiserfs_get_block(struct inode *inode, sector_t block,
retval = err;
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
reiserfs_check_path(&path);
return retval;
}
@@ -1509,14 +1507,15 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
{
struct inode *inode;
struct reiserfs_iget_args args;
+ int depth;
args.objectid = key->on_disk_key.k_objectid;
args.dirid = key->on_disk_key.k_dir_id;
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
inode = iget5_locked(s, key->on_disk_key.k_objectid,
reiserfs_find_actor, reiserfs_init_locked_inode,
(void *)(&args));
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -1772,7 +1771,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct inode *inode,
struct reiserfs_security_handle *security)
{
- struct super_block *sb;
+ struct super_block *sb = dir->i_sb;
struct reiserfs_iget_args args;
INITIALIZE_PATH(path_to_key);
struct cpu_key key;
@@ -1780,12 +1779,13 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
struct stat_data sd;
int retval;
int err;
+ int depth;
BUG_ON(!th->t_trans_id);
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(sb);
err = dquot_alloc_inode(inode);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(sb, depth);
if (err)
goto out_end_trans;
if (!dir->i_nlink) {
@@ -1793,8 +1793,6 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
goto out_bad_inode;
}
- sb = dir->i_sb;
-
/* item head of new item */
ih.ih_key.k_dir_id = reiserfs_choose_packing(dir);
ih.ih_key.k_objectid = cpu_to_le32(reiserfs_get_unused_objectid(th));
@@ -1812,10 +1810,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
memcpy(INODE_PKEY(inode), &(ih.ih_key), KEY_SIZE);
args.dirid = le32_to_cpu(ih.ih_key.k_dir_id);
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
err = insert_inode_locked4(inode, args.objectid,
reiserfs_find_actor, &args);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (err) {
err = -EINVAL;
goto out_bad_inode;
@@ -1941,7 +1939,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
}
if (reiserfs_posixacl(inode->i_sb)) {
+ reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
@@ -1956,7 +1956,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
inode->i_flags |= S_PRIVATE;
if (security->name) {
+ reiserfs_write_unlock(inode->i_sb);
retval = reiserfs_security_write(th, inode, security);
+ reiserfs_write_lock(inode->i_sb);
if (retval) {
err = retval;
reiserfs_check_path(&path_to_key);
@@ -1982,14 +1984,16 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
INODE_PKEY(inode)->k_objectid = 0;
/* Quota change must be inside a transaction for journaling */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_inode(inode);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
out_end_trans:
journal_end(th, th->t_super, th->t_blocks_allocated);
- reiserfs_write_unlock(inode->i_sb);
/* Drop can be outside and it needs more credits so it's better to have it outside */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_drop(inode);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
inode->i_flags |= S_NOQUOTA;
make_bad_inode(inode);
@@ -2103,9 +2107,8 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
int error;
struct buffer_head *bh = NULL;
int err2;
- int lock_depth;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (inode->i_size > 0) {
error = grab_tail_page(inode, &page, &bh);
@@ -2174,7 +2177,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return 0;
out:
@@ -2183,7 +2186,7 @@ int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
page_cache_release(page);
}
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
return error;
}
@@ -2648,10 +2651,11 @@ int __reiserfs_write_begin(struct page *page, unsigned from, unsigned len)
struct inode *inode = page->mapping->host;
int ret;
int old_ref = 0;
+ int depth;
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
fix_tail_page_for_writing(page);
if (reiserfs_transaction_running(inode->i_sb)) {
@@ -2708,7 +2712,6 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
int update_sd = 0;
struct reiserfs_transaction_handle *th;
unsigned start;
- int lock_depth = 0;
bool locked = false;
if ((unsigned long)fsdata & AOP_FLAG_CONT_EXPAND)
@@ -2737,7 +2740,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
*/
if (pos + copied > inode->i_size) {
struct reiserfs_transaction_handle myth;
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
locked = true;
/* If the file have grown beyond the border where it
can have a tail, unmark it as needing a tail
@@ -2768,7 +2771,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
}
if (th) {
if (!locked) {
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
locked = true;
}
if (!update_sd)
@@ -2780,7 +2783,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
out:
if (locked)
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
page_cache_release(page);
@@ -2790,7 +2793,7 @@ static int reiserfs_write_end(struct file *file, struct address_space *mapping,
return ret == 0 ? copied : ret;
journal_error:
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
locked = false;
if (th) {
if (!update_sd)
@@ -2808,10 +2811,11 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int ret = 0;
int update_sd = 0;
struct reiserfs_transaction_handle *th = NULL;
+ int depth;
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
reiserfs_wait_on_write_block(inode->i_sb);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (reiserfs_transaction_running(inode->i_sb)) {
th = current->journal_info;
@@ -3110,7 +3114,6 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
unsigned int ia_valid;
- int depth;
int error;
error = inode_change_ok(inode, attr);
@@ -3122,13 +3125,14 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (is_quota_modification(inode, attr))
dquot_initialize(inode);
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
*/
if (get_inode_item_key_version(inode) == KEY_FORMAT_3_5 &&
attr->ia_size > MAX_NON_LFS) {
+ reiserfs_write_unlock(inode->i_sb);
error = -EFBIG;
goto out;
}
@@ -3150,8 +3154,10 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
if (err)
error = err;
}
- if (error)
+ if (error) {
+ reiserfs_write_unlock(inode->i_sb);
goto out;
+ }
/*
* file size is changed, ctime and mtime are
* to be updated
@@ -3159,6 +3165,7 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
attr->ia_valid |= (ATTR_MTIME | ATTR_CTIME);
}
}
+ reiserfs_write_unlock(inode->i_sb);
if ((((attr->ia_valid & ATTR_UID) && (from_kuid(&init_user_ns, attr->ia_uid) & ~0xffff)) ||
((attr->ia_valid & ATTR_GID) && (from_kgid(&init_user_ns, attr->ia_gid) & ~0xffff))) &&
@@ -3183,14 +3190,16 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
return error;
/* (user+group)*(old+new) structure - we count quota info and , inode write (sb, inode) */
+ reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error)
goto out;
- reiserfs_write_unlock_once(inode->i_sb, depth);
error = dquot_transfer(inode, attr);
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
if (error) {
journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
goto out;
}
@@ -3202,17 +3211,11 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
inode->i_gid = attr->ia_gid;
mark_inode_dirty(inode);
error = journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error)
goto out;
}
- /*
- * Relax the lock here, as it might truncate the
- * inode pages and wait for inode pages locks.
- * To release such page lock, the owner needs the
- * reiserfs lock
- */
- reiserfs_write_unlock_once(inode->i_sb, depth);
if ((attr->ia_valid & ATTR_SIZE) &&
attr->ia_size != i_size_read(inode)) {
error = inode_newsize_ok(inode, attr->ia_size);
@@ -3226,16 +3229,13 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
setattr_copy(inode, attr);
mark_inode_dirty(inode);
}
- depth = reiserfs_write_lock_once(inode->i_sb);
if (!error && reiserfs_posixacl(inode->i_sb)) {
if (attr->ia_valid & ATTR_MODE)
error = reiserfs_acl_chmod(inode);
}
- out:
- reiserfs_write_unlock_once(inode->i_sb, depth);
-
+out:
return error;
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index 15cb5fe6b42..946ccbf5b5a 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -167,7 +167,6 @@ int reiserfs_commit_write(struct file *f, struct page *page,
int reiserfs_unpack(struct inode *inode, struct file *filp)
{
int retval = 0;
- int depth;
int index;
struct page *page;
struct address_space *mapping;
@@ -183,11 +182,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
return 0;
}
- depth = reiserfs_write_lock_once(inode->i_sb);
-
/* we need to make sure nobody is changing the file size beneath us */
reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
+
write_from = inode->i_size & (blocksize - 1);
/* if we are on a block boundary, we are already unpacked. */
if (write_from == 0) {
@@ -221,6 +220,6 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
out:
mutex_unlock(&inode->i_mutex);
- reiserfs_write_unlock_once(inode->i_sb, depth);
+ reiserfs_write_unlock(inode->i_sb);
return retval;
}
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 742fdd4c209..73feacc49b2 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -947,9 +947,11 @@ static int reiserfs_async_progress_wait(struct super_block *s)
struct reiserfs_journal *j = SB_JOURNAL(s);
if (atomic_read(&j->j_async_throttle)) {
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
congestion_wait(BLK_RW_ASYNC, HZ / 10);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
return 0;
@@ -972,6 +974,7 @@ static int flush_commit_list(struct super_block *s,
struct reiserfs_journal *journal = SB_JOURNAL(s);
int retval = 0;
int write_len;
+ int depth;
reiserfs_check_lock_depth(s, "flush_commit_list");
@@ -1018,12 +1021,12 @@ static int flush_commit_list(struct super_block *s,
* We might sleep in numerous places inside
* write_ordered_buffers. Relax the write lock.
*/
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
ret = write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_bh_list);
if (ret < 0 && retval == 0)
retval = ret;
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
BUG_ON(!list_empty(&jl->j_bh_list));
/*
@@ -1043,9 +1046,9 @@ static int flush_commit_list(struct super_block *s,
tbh = journal_find_get_block(s, bn);
if (tbh) {
if (buffer_dirty(tbh)) {
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
ll_rw_block(WRITE, 1, &tbh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
put_bh(tbh) ;
}
@@ -1057,17 +1060,17 @@ static int flush_commit_list(struct super_block *s,
(jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s);
tbh = journal_find_get_block(s, bn);
- reiserfs_write_unlock(s);
- wait_on_buffer(tbh);
- reiserfs_write_lock(s);
+ depth = reiserfs_write_unlock_nested(s);
+ __wait_on_buffer(tbh);
+ reiserfs_write_lock_nested(s, depth);
// since we're using ll_rw_blk above, it might have skipped over
// a locked buffer. Double check here
//
/* redundant, sync_dirty_buffer() checks */
if (buffer_dirty(tbh)) {
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(tbh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
if (unlikely(!buffer_uptodate(tbh))) {
#ifdef CONFIG_REISERFS_CHECK
@@ -1091,12 +1094,12 @@ static int flush_commit_list(struct super_block *s,
if (buffer_dirty(jl->j_commit_bh))
BUG();
mark_buffer_dirty(jl->j_commit_bh) ;
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
if (reiserfs_barrier_flush(s))
__sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA);
else
sync_dirty_buffer(jl->j_commit_bh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
/* If there was a write error in the journal - we can't commit this
@@ -1228,15 +1231,16 @@ static int _update_journal_header_block(struct super_block *sb,
{
struct reiserfs_journal_header *jh;
struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ int depth;
if (reiserfs_is_journal_aborted(journal))
return -EIO;
if (trans_id >= journal->j_last_flush_trans_id) {
if (buffer_locked((journal->j_header_bh))) {
- reiserfs_write_unlock(sb);
- wait_on_buffer((journal->j_header_bh));
- reiserfs_write_lock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
+ __wait_on_buffer(journal->j_header_bh);
+ reiserfs_write_lock_nested(sb, depth);
if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
#ifdef CONFIG_REISERFS_CHECK
reiserfs_warning(sb, "journal-699",
@@ -1254,14 +1258,14 @@ static int _update_journal_header_block(struct super_block *sb,
jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
set_buffer_dirty(journal->j_header_bh);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
if (reiserfs_barrier_flush(sb))
__sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA);
else
sync_dirty_buffer(journal->j_header_bh);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
if (!buffer_uptodate(journal->j_header_bh)) {
reiserfs_warning(sb, "journal-837",
"IO error during journal replay");
@@ -1341,6 +1345,7 @@ static int flush_journal_list(struct super_block *s,
unsigned long j_len_saved = jl->j_len;
struct reiserfs_journal *journal = SB_JOURNAL(s);
int err = 0;
+ int depth;
BUG_ON(j_len_saved <= 0);
@@ -1495,9 +1500,9 @@ static int flush_journal_list(struct super_block *s,
"cn->bh is NULL");
}
- reiserfs_write_unlock(s);
- wait_on_buffer(cn->bh);
- reiserfs_write_lock(s);
+ depth = reiserfs_write_unlock_nested(s);
+ __wait_on_buffer(cn->bh);
+ reiserfs_write_lock_nested(s, depth);
if (!cn->bh) {
reiserfs_panic(s, "journal-1012",
@@ -1974,6 +1979,7 @@ static int journal_compare_desc_commit(struct super_block *sb,
/* returns 0 if it did not find a description block
** returns -1 if it found a corrupt commit block
** returns 1 if both desc and commit were valid
+** NOTE: only called during fs mount
*/
static int journal_transaction_is_valid(struct super_block *sb,
struct buffer_head *d_bh,
@@ -2073,8 +2079,9 @@ static void brelse_array(struct buffer_head **heads, int num)
/*
** given the start, and values for the oldest acceptable transactions,
-** this either reads in a replays a transaction, or returns because the transaction
-** is invalid, or too old.
+** this either reads in a replays a transaction, or returns because the
+** transaction is invalid, or too old.
+** NOTE: only called during fs mount
*/
static int journal_read_transaction(struct super_block *sb,
unsigned long cur_dblock,
@@ -2208,10 +2215,7 @@ static int journal_read_transaction(struct super_block *sb,
ll_rw_block(READ, get_desc_trans_len(desc), log_blocks);
for (i = 0; i < get_desc_trans_len(desc); i++) {
- reiserfs_write_unlock(sb);
wait_on_buffer(log_blocks[i]);
- reiserfs_write_lock(sb);
-
if (!buffer_uptodate(log_blocks[i])) {
reiserfs_warning(sb, "journal-1212",
"REPLAY FAILURE fsck required! "
@@ -2318,12 +2322,13 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
/*
** read and replay the log
-** on a clean unmount, the journal header's next unflushed pointer will be to an invalid
-** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast.
-**
-** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid.
-**
+** on a clean unmount, the journal header's next unflushed pointer will
+** be to an invalid transaction. This tests that before finding all the
+** transactions in the log, which makes normal mount times fast.
+** After a crash, this starts with the next unflushed transaction, and
+** replays until it finds one too old, or invalid.
** On exit, it sets things up so the first transaction will work correctly.
+** NOTE: only called during fs mount
*/
static int journal_read(struct super_block *sb)
{
@@ -2501,14 +2506,18 @@ static int journal_read(struct super_block *sb)
"replayed %d transactions in %lu seconds\n",
replay_count, get_seconds() - start);
}
+ /* needed to satisfy the locking in _update_journal_header_block */
+ reiserfs_write_lock(sb);
if (!bdev_read_only(sb->s_bdev) &&
_update_journal_header_block(sb, journal->j_start,
journal->j_last_flush_trans_id)) {
+ reiserfs_write_unlock(sb);
/* replay failed, caller must call free_journal_ram and abort
** the mount
*/
return -1;
}
+ reiserfs_write_unlock(sb);
return 0;
}
@@ -2828,13 +2837,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
goto free_and_return;
}
- /*
- * Journal_read needs to be inspected in order to push down
- * the lock further inside (or even remove it).
- */
- reiserfs_write_lock(sb);
ret = journal_read(sb);
- reiserfs_write_unlock(sb);
if (ret < 0) {
reiserfs_warning(sb, "reiserfs-2006",
"Replay Failure, unable to mount");
@@ -2923,9 +2926,9 @@ static void queue_log_writer(struct super_block *s)
add_wait_queue(&journal->j_join_wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) {
- reiserfs_write_unlock(s);
+ int depth = reiserfs_write_unlock_nested(s);
schedule();
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
__set_current_state(TASK_RUNNING);
remove_wait_queue(&journal->j_join_wait, &wait);
@@ -2943,9 +2946,12 @@ static void let_transaction_grow(struct super_block *sb, unsigned int trans_id)
struct reiserfs_journal *journal = SB_JOURNAL(sb);
unsigned long bcount = journal->j_bcount;
while (1) {
- reiserfs_write_unlock(sb);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(sb);
schedule_timeout_uninterruptible(1);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
+
journal->j_current_jl->j_state |= LIST_COMMIT_PENDING;
while ((atomic_read(&journal->j_wcount) > 0 ||
atomic_read(&journal->j_jlock)) &&
@@ -2976,6 +2982,7 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
struct reiserfs_transaction_handle myth;
int sched_count = 0;
int retval;
+ int depth;
reiserfs_check_lock_depth(sb, "journal_begin");
BUG_ON(nblocks > journal->j_trans_max);
@@ -2996,9 +3003,9 @@ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
unlock_journal(sb);
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
reiserfs_wait_on_write_block(sb);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
PROC_INFO_INC(sb, journal.journal_relock_writers);
goto relock;
}
@@ -3821,6 +3828,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
if (test_clear_buffer_journal_restore_dirty(bh) &&
buffer_journal_dirty(bh)) {
struct reiserfs_journal_cnode *cn;
+ reiserfs_write_lock(sb);
cn = get_journal_hash_dev(sb,
journal->j_list_hash_table,
bh->b_blocknr);
@@ -3828,6 +3836,7 @@ void reiserfs_restore_prepared_buffer(struct super_block *sb,
set_buffer_journal_test(bh);
mark_buffer_dirty(bh);
}
+ reiserfs_write_unlock(sb);
}
clear_buffer_journal_prepared(bh);
}
@@ -3911,6 +3920,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
unsigned long jindex;
unsigned int commit_trans_id;
int trans_half;
+ int depth;
BUG_ON(th->t_refcount > 1);
BUG_ON(!th->t_trans_id);
@@ -4116,9 +4126,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
next = cn->next;
free_cnode(sb, cn);
cn = next;
- reiserfs_write_unlock(sb);
- cond_resched();
- reiserfs_write_lock(sb);
+ reiserfs_cond_resched(sb);
}
/* we are done with both the c_bh and d_bh, but
@@ -4165,10 +4173,10 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
* is lost.
*/
if (!list_empty(&jl->j_tail_bh_list)) {
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
write_ordered_buffers(&journal->j_dirty_buffers_lock,
journal, jl, &jl->j_tail_bh_list);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
}
BUG_ON(!list_empty(&jl->j_tail_bh_list));
mutex_unlock(&jl->j_commit_mutex);
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index d735bc8470e..045b83ef9fd 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -48,30 +48,35 @@ void reiserfs_write_unlock(struct super_block *s)
}
}
-/*
- * If we already own the lock, just exit and don't increase the depth.
- * Useful when we don't want to lock more than once.
- *
- * We always return the lock_depth we had before calling
- * this function.
- */
-int reiserfs_write_lock_once(struct super_block *s)
+int __must_check reiserfs_write_unlock_nested(struct super_block *s)
{
struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+ int depth;
- if (sb_i->lock_owner != current) {
- mutex_lock(&sb_i->lock);
- sb_i->lock_owner = current;
- return sb_i->lock_depth++;
- }
+ /* this can happen when the lock isn't always held */
+ if (sb_i->lock_owner != current)
+ return -1;
+
+ depth = sb_i->lock_depth;
+
+ sb_i->lock_depth = -1;
+ sb_i->lock_owner = NULL;
+ mutex_unlock(&sb_i->lock);
- return sb_i->lock_depth;
+ return depth;
}
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth)
+void reiserfs_write_lock_nested(struct super_block *s, int depth)
{
- if (lock_depth == -1)
- reiserfs_write_unlock(s);
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(s);
+
+ /* this can happen when the lock isn't always held */
+ if (depth == -1)
+ return;
+
+ mutex_lock(&sb_i->lock);
+ sb_i->lock_owner = current;
+ sb_i->lock_depth = depth;
}
/*
@@ -82,9 +87,7 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
{
struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
- if (sb_i->lock_depth < 0)
- reiserfs_panic(sb, "%s called without kernel lock held %d",
- caller);
+ WARN_ON(sb_i->lock_depth < 0);
}
#ifdef CONFIG_REISERFS_CHECK
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index 8567fb84760..dc5236f6de1 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -325,7 +325,6 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
int retval;
- int lock_depth;
struct inode *inode = NULL;
struct reiserfs_dir_entry de;
INITIALIZE_PATH(path_to_entry);
@@ -333,12 +332,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (REISERFS_MAX_NAME(dir->i_sb->s_blocksize) < dentry->d_name.len)
return ERR_PTR(-ENAMETOOLONG);
- /*
- * Might be called with or without the write lock, must be careful
- * to not recursively hold it in case we want to release the lock
- * before rescheduling.
- */
- lock_depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
de.de_gen_number_bit_string = NULL;
retval =
@@ -349,7 +343,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
inode = reiserfs_iget(dir->i_sb,
(struct cpu_key *)&(de.de_dir_id));
if (!inode || IS_ERR(inode)) {
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
return ERR_PTR(-EACCES);
}
@@ -358,7 +352,7 @@ static struct dentry *reiserfs_lookup(struct inode *dir, struct dentry *dentry,
if (IS_PRIVATE(dir))
inode->i_flags |= S_PRIVATE;
}
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
if (retval == IO_ERROR) {
return ERR_PTR(-EIO);
}
@@ -727,7 +721,6 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct inode *inode;
struct reiserfs_transaction_handle th;
struct reiserfs_security_handle security;
- int lock_depth;
/* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
int jbegin_count =
JOURNAL_PER_BALANCE_CNT * 3 +
@@ -753,7 +746,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
return retval;
}
jbegin_count += retval;
- lock_depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval) {
@@ -804,7 +797,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
d_instantiate(dentry, inode);
retval = journal_end(&th, dir->i_sb, jbegin_count);
out_failed:
- reiserfs_write_unlock_once(dir->i_sb, lock_depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
}
@@ -920,7 +913,6 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
struct reiserfs_transaction_handle th;
int jbegin_count;
unsigned long savelink;
- int depth;
dquot_initialize(dir);
@@ -934,7 +926,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
- depth = reiserfs_write_lock_once(dir->i_sb);
+ reiserfs_write_lock(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval)
goto out_unlink;
@@ -995,7 +987,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
retval = journal_end(&th, dir->i_sb, jbegin_count);
reiserfs_check_path(&path);
- reiserfs_write_unlock_once(dir->i_sb, depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
end_unlink:
@@ -1005,7 +997,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
if (err)
retval = err;
out_unlink:
- reiserfs_write_unlock_once(dir->i_sb, depth);
+ reiserfs_write_unlock(dir->i_sb);
return retval;
}
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index c0b1112ab7e..54944d5a4a6 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -358,12 +358,13 @@ void __reiserfs_panic(struct super_block *sb, const char *id,
dump_stack();
#endif
if (sb)
- panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
+ printk(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
sb->s_id, id ? id : "", id ? " " : "",
function, error_buf);
else
- panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
+ printk(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
id ? id : "", id ? " " : "", function, error_buf);
+ BUG();
}
void __reiserfs_error(struct super_block *sb, const char *id,
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 33532f79b4f..a958444a75f 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -19,12 +19,13 @@
/*
* LOCKING:
*
- * We rely on new Alexander Viro's super-block locking.
+ * These guys are evicted from procfs as the very first step in ->kill_sb().
*
*/
-static int show_version(struct seq_file *m, struct super_block *sb)
+static int show_version(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
char *format;
if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) {
@@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb)
#define DJP( x ) le32_to_cpu( jp -> x )
#define JF( x ) ( r -> s_journal -> x )
-static int show_super(struct seq_file *m, struct super_block *sb)
+static int show_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "state: \t%s\n"
@@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_per_level(struct seq_file *m, struct super_block *sb)
+static int show_per_level(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
int level;
@@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_bitmap(struct seq_file *m, struct super_block *sb)
+static int show_bitmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
seq_printf(m, "free_block: %lu\n"
@@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
+static int show_on_disk_super(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
int hash_code = DFL(s_hash_function_code);
@@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_oidmap(struct seq_file *m, struct super_block *sb)
+static int show_oidmap(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *sb_info = REISERFS_SB(sb);
struct reiserfs_super_block *rs = sb_info->s_rs;
unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize);
@@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb)
return 0;
}
-static int show_journal(struct seq_file *m, struct super_block *sb)
+static int show_journal(struct seq_file *m, void *unused)
{
+ struct super_block *sb = m->private;
struct reiserfs_sb_info *r = REISERFS_SB(sb);
struct reiserfs_super_block *rs = r->s_rs;
struct journal_params *jp = &rs->s_v1.s_journal;
@@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
return 0;
}
-/* iterator */
-static int test_sb(struct super_block *sb, void *data)
-{
- return data == sb;
-}
-
-static int set_sb(struct super_block *sb, void *data)
-{
- return -ENOENT;
-}
-
-struct reiserfs_seq_private {
- struct super_block *sb;
- int (*show) (struct seq_file *, struct super_block *);
-};
-
-static void *r_start(struct seq_file *m, loff_t * pos)
-{
- struct reiserfs_seq_private *priv = m->private;
- loff_t l = *pos;
-
- if (l)
- return NULL;
-
- if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb)))
- return NULL;
-
- up_write(&priv->sb->s_umount);
- return priv->sb;
-}
-
-static void *r_next(struct seq_file *m, void *v, loff_t * pos)
-{
- ++*pos;
- if (v)
- deactivate_super(v);
- return NULL;
-}
-
-static void r_stop(struct seq_file *m, void *v)
-{
- if (v)
- deactivate_super(v);
-}
-
-static int r_show(struct seq_file *m, void *v)
-{
- struct reiserfs_seq_private *priv = m->private;
- return priv->show(m, v);
-}
-
-static const struct seq_operations r_ops = {
- .start = r_start,
- .next = r_next,
- .stop = r_stop,
- .show = r_show,
-};
-
static int r_open(struct inode *inode, struct file *file)
{
- struct reiserfs_seq_private *priv;
- int ret = seq_open_private(file, &r_ops,
- sizeof(struct reiserfs_seq_private));
-
- if (!ret) {
- struct seq_file *m = file->private_data;
- priv = m->private;
- priv->sb = proc_get_parent_data(inode);
- priv->show = PDE_DATA(inode);
- }
- return ret;
+ return single_open(file, PDE_DATA(inode),
+ proc_get_parent_data(inode));
}
static const struct file_operations r_file_operations = {
.open = r_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release_private,
- .owner = THIS_MODULE,
+ .release = single_release,
};
static struct proc_dir_entry *proc_info_root = NULL;
static const char proc_info_root_name[] = "fs/reiserfs";
static void add_file(struct super_block *sb, char *name,
- int (*func) (struct seq_file *, struct super_block *))
+ int (*func) (struct seq_file *, void *))
{
proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
&r_file_operations, func);
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index 3df5ce6c724..f8adaee537c 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -630,8 +630,8 @@ static inline int __reiserfs_is_journal_aborted(struct reiserfs_journal
*/
void reiserfs_write_lock(struct super_block *s);
void reiserfs_write_unlock(struct super_block *s);
-int reiserfs_write_lock_once(struct super_block *s);
-void reiserfs_write_unlock_once(struct super_block *s, int lock_depth);
+int __must_check reiserfs_write_unlock_nested(struct super_block *s);
+void reiserfs_write_lock_nested(struct super_block *s, int depth);
#ifdef CONFIG_REISERFS_CHECK
void reiserfs_lock_check_recursive(struct super_block *s);
@@ -667,31 +667,33 @@ static inline void reiserfs_lock_check_recursive(struct super_block *s) { }
* - The inode mutex
*/
static inline void reiserfs_mutex_lock_safe(struct mutex *m,
- struct super_block *s)
+ struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
mutex_lock(m);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
static inline void
reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass,
- struct super_block *s)
+ struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
mutex_lock_nested(m, subclass);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
static inline void
reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
{
- reiserfs_lock_check_recursive(s);
- reiserfs_write_unlock(s);
- down_read(sem);
- reiserfs_write_lock(s);
+ int depth;
+ depth = reiserfs_write_unlock_nested(s);
+ down_read(sem);
+ reiserfs_write_lock_nested(s, depth);
}
/*
@@ -701,9 +703,11 @@ reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s)
static inline void reiserfs_cond_resched(struct super_block *s)
{
if (need_resched()) {
- reiserfs_write_unlock(s);
+ int depth;
+
+ depth = reiserfs_write_unlock_nested(s);
schedule();
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
}
}
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 3ce02cff5e9..a4ef5cd606e 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -34,6 +34,7 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
unsigned long int block_count, free_blocks;
int i;
int copy_size;
+ int depth;
sb = SB_DISK_SUPER_BLOCK(s);
@@ -43,7 +44,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
}
/* check the device size */
+ depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, block_count_new - 1);
+ reiserfs_write_lock_nested(s, depth);
if (!bh) {
printk("reiserfs_resize: can\'t read last block\n");
return -EINVAL;
@@ -125,9 +128,12 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
* transaction begins, and the new bitmaps don't matter if the
* transaction fails. */
for (i = bmap_nr; i < bmap_nr_new; i++) {
+ int depth;
/* don't use read_bitmap_block since it will cache
* the uninitialized bitmap */
+ depth = reiserfs_write_unlock_nested(s);
bh = sb_bread(s, i * s->s_blocksize * 8);
+ reiserfs_write_lock_nested(s, depth);
if (!bh) {
vfree(bitmap);
return -EIO;
@@ -138,9 +144,9 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(s);
sync_dirty_buffer(bh);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(s, depth);
// update bitmap_info stuff
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
brelse(bh);
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 2f40a4c70a4..b14706a05d5 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -524,14 +524,14 @@ static int is_tree_node(struct buffer_head *bh, int level)
* the caller (search_by_key) will perform other schedule-unsafe
* operations just after calling this function.
*
- * @return true if we have unlocked
+ * @return depth of lock to be restored after read completes
*/
-static bool search_by_key_reada(struct super_block *s,
+static int search_by_key_reada(struct super_block *s,
struct buffer_head **bh,
b_blocknr_t *b, int num)
{
int i, j;
- bool unlocked = false;
+ int depth = -1;
for (i = 0; i < num; i++) {
bh[i] = sb_getblk(s, b[i]);
@@ -549,15 +549,13 @@ static bool search_by_key_reada(struct super_block *s,
* you have to make sure the prepared bit isn't set on this buffer
*/
if (!buffer_uptodate(bh[j])) {
- if (!unlocked) {
- reiserfs_write_unlock(s);
- unlocked = true;
- }
+ if (depth == -1)
+ depth = reiserfs_write_unlock_nested(s);
ll_rw_block(READA, 1, bh + j);
}
brelse(bh[j]);
}
- return unlocked;
+ return depth;
}
/**************************************************************************
@@ -645,26 +643,26 @@ int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to s
have a pointer to it. */
if ((bh = last_element->pe_buffer =
sb_getblk(sb, block_number))) {
- bool unlocked = false;
- if (!buffer_uptodate(bh) && reada_count > 1)
- /* may unlock the write lock */
- unlocked = search_by_key_reada(sb, reada_bh,
- reada_blocks, reada_count);
/*
- * If we haven't already unlocked the write lock,
- * then we need to do that here before reading
- * the current block
+ * We'll need to drop the lock if we encounter any
+ * buffers that need to be read. If all of them are
+ * already up to date, we don't need to drop the lock.
*/
- if (!buffer_uptodate(bh) && !unlocked) {
- reiserfs_write_unlock(sb);
- unlocked = true;
- }
+ int depth = -1;
+
+ if (!buffer_uptodate(bh) && reada_count > 1)
+ depth = search_by_key_reada(sb, reada_bh,
+ reada_blocks, reada_count);
+
+ if (!buffer_uptodate(bh) && depth == -1)
+ depth = reiserfs_write_unlock_nested(sb);
+
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
- if (unlocked)
- reiserfs_write_lock(sb);
+ if (depth != -1)
+ reiserfs_write_lock_nested(sb, depth);
if (!buffer_uptodate(bh))
goto io_error;
} else {
@@ -1059,9 +1057,7 @@ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, st
reiserfs_free_block(th, inode, block, 1);
}
- reiserfs_write_unlock(sb);
- cond_resched();
- reiserfs_write_lock(sb);
+ reiserfs_cond_resched(sb);
if (item_moved (&s_ih, path)) {
need_re_search = 1;
@@ -1190,6 +1186,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
struct item_head *q_ih;
int quota_cut_bytes;
int ret_value, del_size, removed;
+ int depth;
#ifdef CONFIG_REISERFS_CHECK
char mode;
@@ -1299,7 +1296,9 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, inode->i_uid, head2type(&s_ih));
#endif
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_space_nodirty(inode, quota_cut_bytes);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
/* Return deleted body length */
return ret_value;
@@ -1325,6 +1324,7 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
struct inode *inode, struct reiserfs_key *key)
{
+ struct super_block *sb = th->t_super;
struct tree_balance tb;
INITIALIZE_PATH(path);
int item_len = 0;
@@ -1377,14 +1377,17 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
if (retval == CARRY_ON) {
do_balance(&tb, NULL, NULL, M_DELETE);
if (inode) { /* Should we count quota for item? (we don't count quotas for save-links) */
+ int depth;
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
"reiserquota delete_solid_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid,
key2type(key));
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode,
quota_cut_bytes);
+ reiserfs_write_lock_nested(sb, depth);
}
break;
}
@@ -1561,6 +1564,7 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
int retval2 = -1;
int quota_cut_bytes;
loff_t tail_pos = 0;
+ int depth;
BUG_ON(!th->t_trans_id);
@@ -1733,7 +1737,9 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, inode->i_uid, '?');
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode, quota_cut_bytes);
+ reiserfs_write_lock_nested(sb, depth);
return ret_value;
}
@@ -1953,9 +1959,11 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
const char *body, /* Pointer to the bytes to paste. */
int pasted_size)
{ /* Size of pasted bytes. */
+ struct super_block *sb = inode->i_sb;
struct tree_balance s_paste_balance;
int retval;
int fs_gen;
+ int depth;
BUG_ON(!th->t_trans_id);
@@ -1968,9 +1976,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
key2type(&(key->on_disk_key)));
#endif
- reiserfs_write_unlock(inode->i_sb);
+ depth = reiserfs_write_unlock_nested(sb);
retval = dquot_alloc_space_nodirty(inode, pasted_size);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(sb, depth);
if (retval) {
pathrelse(search_path);
return retval;
@@ -2027,7 +2035,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
pasted_size, inode->i_uid,
key2type(&(key->on_disk_key)));
#endif
+ depth = reiserfs_write_unlock_nested(sb);
dquot_free_space_nodirty(inode, pasted_size);
+ reiserfs_write_lock_nested(sb, depth);
return retval;
}
@@ -2050,6 +2060,7 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
BUG_ON(!th->t_trans_id);
if (inode) { /* Do we count quotas for item? */
+ int depth;
fs_gen = get_generation(inode->i_sb);
quota_bytes = ih_item_len(ih);
@@ -2063,11 +2074,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
"reiserquota insert_item(): allocating %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
- reiserfs_write_unlock(inode->i_sb);
/* We can't dirty inode here. It would be immediately written but
* appropriate stat item isn't inserted yet... */
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
retval = dquot_alloc_space_nodirty(inode, quota_bytes);
- reiserfs_write_lock(inode->i_sb);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (retval) {
pathrelse(path);
return retval;
@@ -2118,7 +2129,10 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(ih));
#endif
- if (inode)
+ if (inode) {
+ int depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_free_space_nodirty(inode, quota_bytes);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
+ }
return retval;
}
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f8a23c3078f..3ead145dadc 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -243,6 +243,7 @@ static int finish_unfinished(struct super_block *s)
done = 0;
REISERFS_SB(s)->s_is_unlinked_ok = 1;
while (!retval) {
+ int depth;
retval = search_item(s, &max_cpu_key, &path);
if (retval != ITEM_NOT_FOUND) {
reiserfs_error(s, "vs-2140",
@@ -298,9 +299,9 @@ static int finish_unfinished(struct super_block *s)
retval = remove_save_link_only(s, &save_link_key, 0);
continue;
}
- reiserfs_write_unlock(s);
+ depth = reiserfs_write_unlock_nested(inode->i_sb);
dquot_initialize(inode);
- reiserfs_write_lock(s);
+ reiserfs_write_lock_nested(inode->i_sb, depth);
if (truncate && S_ISDIR(inode->i_mode)) {
/* We got a truncate request for a dir which is impossible.
@@ -356,10 +357,12 @@ static int finish_unfinished(struct super_block *s)
#ifdef CONFIG_QUOTA
/* Turn quotas off */
+ reiserfs_write_unlock(s);
for (i = 0; i < MAXQUOTAS; i++) {
if (sb_dqopt(s)->files[i] && quota_enabled[i])
dquot_quota_off(s, i);
}
+ reiserfs_write_lock(s);
if (ms_active_set)
/* Restore the flag back */
s->s_flags &= ~MS_ACTIVE;
@@ -499,6 +502,7 @@ int remove_save_link(struct inode *inode, int truncate)
static void reiserfs_kill_sb(struct super_block *s)
{
if (REISERFS_SB(s)) {
+ reiserfs_proc_info_done(s);
/*
* Force any pending inode evictions to occur now. Any
* inodes to be removed that have extended attributes
@@ -554,8 +558,6 @@ static void reiserfs_put_super(struct super_block *s)
REISERFS_SB(s)->reserved_blocks);
}
- reiserfs_proc_info_done(s);
-
reiserfs_write_unlock(s);
mutex_destroy(&REISERFS_SB(s)->lock);
kfree(s->s_fs_info);
@@ -624,7 +626,6 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
struct reiserfs_transaction_handle th;
int err = 0;
- int lock_depth;
if (inode->i_sb->s_flags & MS_RDONLY) {
reiserfs_warning(inode->i_sb, "clm-6006",
@@ -632,7 +633,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
inode->i_ino);
return;
}
- lock_depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
/* this is really only used for atime updates, so they don't have
** to be included in O_SYNC or fsync
@@ -645,7 +646,7 @@ static void reiserfs_dirty_inode(struct inode *inode, int flags)
journal_end(&th, inode->i_sb, 1);
out:
- reiserfs_write_unlock_once(inode->i_sb, lock_depth);
+ reiserfs_write_unlock(inode->i_sb);
}
static int reiserfs_show_options(struct seq_file *seq, struct dentry *root)
@@ -1335,7 +1336,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
kfree(qf_names[i]);
#endif
err = -EINVAL;
- goto out_unlock;
+ goto out_err_unlock;
}
#ifdef CONFIG_QUOTA
handle_quota_files(s, qf_names, &qfmt);
@@ -1379,35 +1380,32 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
if (blocks) {
err = reiserfs_resize(s, blocks);
if (err != 0)
- goto out_unlock;
+ goto out_err_unlock;
}
if (*mount_flags & MS_RDONLY) {
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
/* remount read-only */
if (s->s_flags & MS_RDONLY)
/* it is read-only already */
- goto out_ok;
+ goto out_ok_unlocked;
- /*
- * Drop write lock. Quota will retake it when needed and lock
- * ordering requires calling dquot_suspend() without it.
- */
- reiserfs_write_unlock(s);
err = dquot_suspend(s, -1);
if (err < 0)
goto out_err;
- reiserfs_write_lock(s);
/* try to remount file system with read-only permissions */
if (sb_umount_state(rs) == REISERFS_VALID_FS
|| REISERFS_SB(s)->s_mount_state != REISERFS_VALID_FS) {
- goto out_ok;
+ goto out_ok_unlocked;
}
+ reiserfs_write_lock(s);
+
err = journal_begin(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
/* Mounting a rw partition read-only. */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1416,13 +1414,14 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
} else {
/* remount read-write */
if (!(s->s_flags & MS_RDONLY)) {
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
- goto out_ok; /* We are read-write already */
+ goto out_ok_unlocked; /* We are read-write already */
}
if (reiserfs_is_journal_aborted(journal)) {
err = journal->j_errno;
- goto out_unlock;
+ goto out_err_unlock;
}
handle_data_mode(s, mount_options);
@@ -1431,7 +1430,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */
err = journal_begin(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1448,26 +1447,22 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
SB_JOURNAL(s)->j_must_wait = 1;
err = journal_end(&th, s, 10);
if (err)
- goto out_unlock;
+ goto out_err_unlock;
+ reiserfs_write_unlock(s);
if (!(*mount_flags & MS_RDONLY)) {
- /*
- * Drop write lock. Quota will retake it when needed and lock
- * ordering requires calling dquot_resume() without it.
- */
- reiserfs_write_unlock(s);
dquot_resume(s, -1);
reiserfs_write_lock(s);
finish_unfinished(s);
+ reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
}
-out_ok:
+out_ok_unlocked:
replace_mount_options(s, new_opts);
- reiserfs_write_unlock(s);
return 0;
-out_unlock:
+out_err_unlock:
reiserfs_write_unlock(s);
out_err:
kfree(new_opts);
@@ -2014,12 +2009,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
goto error;
}
+ reiserfs_write_unlock(s);
if ((errval = reiserfs_lookup_privroot(s)) ||
(errval = reiserfs_xattr_init(s, s->s_flags))) {
dput(s->s_root);
s->s_root = NULL;
- goto error;
+ goto error_unlocked;
}
+ reiserfs_write_lock(s);
/* look for files which were to be removed in previous session */
finish_unfinished(s);
@@ -2028,12 +2025,14 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
reiserfs_info(s, "using 3.5.x disk format\n");
}
+ reiserfs_write_unlock(s);
if ((errval = reiserfs_lookup_privroot(s)) ||
(errval = reiserfs_xattr_init(s, s->s_flags))) {
dput(s->s_root);
s->s_root = NULL;
- goto error;
+ goto error_unlocked;
}
+ reiserfs_write_lock(s);
}
// mark hash in super block: it could be unset. overwrite should be ok
set_sb_hash_function_code(rs, function2code(sbi->s_hash_function));
@@ -2101,6 +2100,7 @@ static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
reiserfs_write_lock(dquot->dq_sb);
ret =
@@ -2108,9 +2108,9 @@ static int reiserfs_write_dquot(struct dquot *dquot)
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
- reiserfs_write_unlock(dquot->dq_sb);
+ depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_commit(dquot);
- reiserfs_write_lock(dquot->dq_sb);
+ reiserfs_write_lock_nested(dquot->dq_sb, depth);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
@@ -2125,6 +2125,7 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
reiserfs_write_lock(dquot->dq_sb);
ret =
@@ -2132,9 +2133,9 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
if (ret)
goto out;
- reiserfs_write_unlock(dquot->dq_sb);
+ depth = reiserfs_write_unlock_nested(dquot->dq_sb);
ret = dquot_acquire(dquot);
- reiserfs_write_lock(dquot->dq_sb);
+ reiserfs_write_lock_nested(dquot->dq_sb, depth);
err =
journal_end(&th, dquot->dq_sb,
REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
@@ -2187,15 +2188,16 @@ static int reiserfs_write_info(struct super_block *sb, int type)
{
struct reiserfs_transaction_handle th;
int ret, err;
+ int depth;
/* Data block + inode block */
reiserfs_write_lock(sb);
ret = journal_begin(&th, sb, 2);
if (ret)
goto out;
- reiserfs_write_unlock(sb);
+ depth = reiserfs_write_unlock_nested(sb);
ret = dquot_commit_info(sb, type);
- reiserfs_write_lock(sb);
+ reiserfs_write_lock_nested(sb, depth);
err = journal_end(&th, sb, 2);
if (!ret && err)
ret = err;
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index c69cdd749f0..8a9e2dcfe00 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -81,8 +81,7 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
- I_MUTEX_CHILD, dir->i_sb);
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->unlink(dir, dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -96,8 +95,7 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
int error;
BUG_ON(!mutex_is_locked(&dir->i_mutex));
- reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
- I_MUTEX_CHILD, dir->i_sb);
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
dentry->d_inode->i_flags |= S_DEAD;
@@ -232,22 +230,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
return 0;
- reiserfs_write_unlock(inode->i_sb);
dir = open_xa_dir(inode, XATTR_REPLACE);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
- reiserfs_write_lock(inode->i_sb);
goto out;
} else if (!dir->d_inode) {
err = 0;
- reiserfs_write_lock(inode->i_sb);
goto out_dir;
}
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
- reiserfs_write_lock(inode->i_sb);
-
buf.xadir = dir;
while (1) {
err = reiserfs_readdir_inode(dir->d_inode, &buf.ctx);
@@ -281,14 +274,17 @@ static int reiserfs_for_each_xattr(struct inode *inode,
int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
+ reiserfs_write_lock(inode->i_sb);
err = journal_begin(&th, inode->i_sb, blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (!err) {
int jerror;
- reiserfs_mutex_lock_nested_safe(
- &dir->d_parent->d_inode->i_mutex,
- I_MUTEX_XATTR, inode->i_sb);
+ mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
+ I_MUTEX_XATTR);
err = action(dir, data);
+ reiserfs_write_lock(inode->i_sb);
jerror = journal_end(&th, inode->i_sb, blocks);
+ reiserfs_write_unlock(inode->i_sb);
mutex_unlock(&dir->d_parent->d_inode->i_mutex);
err = jerror ?: err;
}
@@ -455,9 +451,7 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
}
if (dentry->d_inode) {
- reiserfs_write_lock(inode->i_sb);
err = xattr_unlink(xadir->d_inode, dentry);
- reiserfs_write_unlock(inode->i_sb);
update_ctime(inode);
}
@@ -491,24 +485,17 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- reiserfs_write_unlock(inode->i_sb);
-
if (!buffer) {
err = lookup_and_delete_xattr(inode, name);
- reiserfs_write_lock(inode->i_sb);
return err;
}
dentry = xattr_lookup(inode, name, flags);
- if (IS_ERR(dentry)) {
- reiserfs_write_lock(inode->i_sb);
+ if (IS_ERR(dentry))
return PTR_ERR(dentry);
- }
down_write(&REISERFS_I(inode)->i_xattr_sem);
- reiserfs_write_lock(inode->i_sb);
-
xahash = xattr_hash(buffer, buffer_size);
while (buffer_pos < buffer_size || buffer_pos == 0) {
size_t chunk;
@@ -538,6 +525,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
rxh->h_hash = cpu_to_le32(xahash);
}
+ reiserfs_write_lock(inode->i_sb);
err = __reiserfs_write_begin(page, page_offset, chunk + skip);
if (!err) {
if (buffer)
@@ -546,6 +534,7 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
page_offset + chunk +
skip);
}
+ reiserfs_write_unlock(inode->i_sb);
unlock_page(page);
reiserfs_put_page(page);
buffer_pos += chunk;
@@ -563,10 +552,8 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
- reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
inode_dio_wait(dentry->d_inode);
- reiserfs_write_lock(inode->i_sb);
err = reiserfs_setattr(dentry, &newattrs);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -592,18 +579,19 @@ int reiserfs_xattr_set(struct inode *inode, const char *name,
reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error) {
- reiserfs_write_unlock(inode->i_sb);
return error;
}
error = reiserfs_xattr_set_handle(&th, inode, name,
buffer, buffer_size, flags);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, jbegin_count);
+ reiserfs_write_unlock(inode->i_sb);
if (error == 0)
error = error2;
- reiserfs_write_unlock(inode->i_sb);
return error;
}
@@ -968,7 +956,7 @@ int reiserfs_lookup_privroot(struct super_block *s)
int err = 0;
/* If we don't have the privroot located yet - go find it */
- reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+ mutex_lock(&s->s_root->d_inode->i_mutex);
dentry = lookup_one_len(PRIVROOT_NAME, s->s_root,
strlen(PRIVROOT_NAME));
if (!IS_ERR(dentry)) {
@@ -996,14 +984,14 @@ int reiserfs_xattr_init(struct super_block *s, int mount_flags)
goto error;
if (!privroot->d_inode && !(mount_flags & MS_RDONLY)) {
- reiserfs_mutex_lock_safe(&s->s_root->d_inode->i_mutex, s);
+ mutex_lock(&s->s_root->d_inode->i_mutex);
err = create_privroot(REISERFS_SB(s)->priv_root);
mutex_unlock(&s->s_root->d_inode->i_mutex);
}
if (privroot->d_inode) {
s->s_xattr = reiserfs_xattr_handlers;
- reiserfs_mutex_lock_safe(&privroot->d_inode->i_mutex, s);
+ mutex_lock(&privroot->d_inode->i_mutex);
if (!REISERFS_SB(s)->xattr_root) {
struct dentry *dentry;
dentry = lookup_one_len(XAROOT_NAME, privroot,
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 6c8767fdfc6..06c04f73da6 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -49,13 +49,15 @@ posix_acl_set(struct dentry *dentry, const char *name, const void *value,
reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, jcreate_blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (error == 0) {
error = reiserfs_set_acl(&th, inode, type, acl);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
+ reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
}
- reiserfs_write_unlock(inode->i_sb);
release_and_out:
posix_acl_release(acl);
@@ -435,12 +437,14 @@ int reiserfs_cache_default_acl(struct inode *inode)
return nblocks;
}
+/*
+ * Called under i_mutex
+ */
int reiserfs_acl_chmod(struct inode *inode)
{
struct reiserfs_transaction_handle th;
struct posix_acl *acl;
size_t size;
- int depth;
int error;
if (IS_PRIVATE(inode))
@@ -454,9 +458,7 @@ int reiserfs_acl_chmod(struct inode *inode)
return 0;
}
- reiserfs_write_unlock(inode->i_sb);
acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
- reiserfs_write_lock(inode->i_sb);
if (!acl)
return 0;
if (IS_ERR(acl))
@@ -466,16 +468,18 @@ int reiserfs_acl_chmod(struct inode *inode)
return error;
size = reiserfs_xattr_nblocks(inode, reiserfs_acl_size(acl->a_count));
- depth = reiserfs_write_lock_once(inode->i_sb);
+ reiserfs_write_lock(inode->i_sb);
error = journal_begin(&th, inode->i_sb, size * 2);
+ reiserfs_write_unlock(inode->i_sb);
if (!error) {
int error2;
error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS, acl);
+ reiserfs_write_lock(inode->i_sb);
error2 = journal_end(&th, inode->i_sb, size * 2);
+ reiserfs_write_unlock(inode->i_sb);
if (error2)
error = error2;
}
- reiserfs_write_unlock_once(inode->i_sb, depth);
posix_acl_release(acl);
return error;
}
diff --git a/fs/stat.c b/fs/stat.c
index 04ce1ac20d2..d0ea7ef75e2 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -447,9 +447,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
EXPORT_SYMBOL(inode_add_bytes);
-void inode_sub_bytes(struct inode *inode, loff_t bytes)
+void __inode_sub_bytes(struct inode *inode, loff_t bytes)
{
- spin_lock(&inode->i_lock);
inode->i_blocks -= bytes >> 9;
bytes &= 511;
if (inode->i_bytes < bytes) {
@@ -457,6 +456,14 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes)
inode->i_bytes += 512;
}
inode->i_bytes -= bytes;
+}
+
+EXPORT_SYMBOL(__inode_sub_bytes);
+
+void inode_sub_bytes(struct inode *inode, loff_t bytes)
+{
+ spin_lock(&inode->i_lock);
+ __inode_sub_bytes(inode, bytes);
spin_unlock(&inode->i_lock);
}
diff --git a/fs/super.c b/fs/super.c
index 68307c02922..5536a95186e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -152,15 +152,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags)
static const struct super_operations default_op;
if (s) {
- if (security_sb_alloc(s)) {
- /*
- * We cannot call security_sb_free() without
- * security_sb_alloc() succeeding. So bail out manually
- */
- kfree(s);
- s = NULL;
- goto out;
- }
+ if (security_sb_alloc(s))
+ goto out_free_sb;
+
#ifdef CONFIG_SMP
s->s_files = alloc_percpu(struct list_head);
if (!s->s_files)
@@ -228,6 +222,7 @@ err_out:
free_percpu(s->s_files);
#endif
destroy_sb_writers(s);
+out_free_sb:
kfree(s);
s = NULL;
goto out;
@@ -414,6 +409,11 @@ void generic_shutdown_super(struct super_block *sb)
evict_inodes(sb);
+ if (sb->s_dio_done_wq) {
+ destroy_workqueue(sb->s_dio_done_wq);
+ sb->s_dio_done_wq = NULL;
+ }
+
if (sop->put_super)
sop->put_super(sb);
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 15c68f9489a..c590cabd57b 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -22,8 +22,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/mm.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sysfs.h"
@@ -391,7 +390,7 @@ out_unlock:
return rc;
}
-static int open(struct inode * inode, struct file * file)
+static int open(struct inode *inode, struct file *file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct bin_attribute *attr = attr_sd->s_bin_attr.bin_attr;
@@ -435,7 +434,7 @@ static int open(struct inode * inode, struct file * file)
return error;
}
-static int release(struct inode * inode, struct file * file)
+static int release(struct inode *inode, struct file *file)
{
struct bin_buffer *bb = file->private_data;
@@ -481,7 +480,6 @@ void unmap_bin_file(struct sysfs_dirent *attr_sd)
* @kobj: object.
* @attr: attribute descriptor.
*/
-
int sysfs_create_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
@@ -489,19 +487,16 @@ int sysfs_create_bin_file(struct kobject *kobj,
return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
}
-
+EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
/**
* sysfs_remove_bin_file - remove binary file for object.
* @kobj: object.
* @attr: attribute descriptor.
*/
-
void sysfs_remove_bin_file(struct kobject *kobj,
const struct bin_attribute *attr)
{
sysfs_hash_and_remove(kobj->sd, NULL, attr->attr.name);
}
-
-EXPORT_SYMBOL_GPL(sysfs_create_bin_file);
EXPORT_SYMBOL_GPL(sysfs_remove_bin_file);
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index e068e744dbd..99ec5b40e97 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -46,7 +46,7 @@ static unsigned int sysfs_name_hash(const void *ns, const char *name)
unsigned int len = strlen(name);
while (len--)
hash = partial_name_hash(*name++, hash);
- hash = ( end_name_hash(hash) ^ hash_ptr( (void *)ns, 31 ) );
+ hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
hash &= 0x7fffffffU;
/* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
if (hash < 1)
@@ -258,7 +258,7 @@ static void sysfs_free_ino(unsigned int ino)
spin_unlock(&sysfs_ino_lock);
}
-void release_sysfs_dirent(struct sysfs_dirent * sd)
+void release_sysfs_dirent(struct sysfs_dirent *sd)
{
struct sysfs_dirent *parent_sd;
@@ -451,7 +451,7 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
if (!!sysfs_ns_type(acxt->parent_sd) != !!sd->s_ns) {
WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(acxt->parent_sd)? "required": "invalid",
+ sysfs_ns_type(acxt->parent_sd) ? "required" : "invalid",
acxt->parent_sd->s_name, sd->s_name);
return -EINVAL;
}
@@ -619,7 +619,7 @@ struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
if (!!sysfs_ns_type(parent_sd) != !!ns) {
WARN(1, KERN_WARNING "sysfs: ns %s in '%s' for '%s'\n",
- sysfs_ns_type(parent_sd)? "required": "invalid",
+ sysfs_ns_type(parent_sd) ? "required" : "invalid",
parent_sd->s_name, name);
return NULL;
}
@@ -674,7 +674,7 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
enum kobj_ns_type type, const void *ns, const char *name,
struct sysfs_dirent **p_sd)
{
- umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+ umode_t mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
int rc;
@@ -735,9 +735,9 @@ static enum kobj_ns_type sysfs_read_ns_type(struct kobject *kobj)
/**
* sysfs_create_dir - create a directory for an object.
- * @kobj: object we're creating directory for.
+ * @kobj: object we're creating directory for.
*/
-int sysfs_create_dir(struct kobject * kobj)
+int sysfs_create_dir(struct kobject *kobj)
{
enum kobj_ns_type type;
struct sysfs_dirent *parent_sd, *sd;
@@ -764,8 +764,8 @@ int sysfs_create_dir(struct kobject * kobj)
return error;
}
-static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
- unsigned int flags)
+static struct dentry *sysfs_lookup(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
struct dentry *ret = NULL;
struct dentry *parent = dentry->d_parent;
@@ -857,7 +857,7 @@ static void __sysfs_remove_dir(struct sysfs_dirent *dir_sd)
* what used to be sysfs_rmdir() below, instead of calling separately.
*/
-void sysfs_remove_dir(struct kobject * kobj)
+void sysfs_remove_dir(struct kobject *kobj)
{
struct sysfs_dirent *sd = kobj->sd;
@@ -896,7 +896,9 @@ int sysfs_rename(struct sysfs_dirent *sd,
sd->s_name = new_name;
}
- /* Move to the appropriate place in the appropriate directories rbtree. */
+ /*
+ * Move to the appropriate place in the appropriate directories rbtree.
+ */
sysfs_unlink_sibling(sd);
sysfs_get(new_parent_sd);
sysfs_put(sd->s_parent);
@@ -988,20 +990,21 @@ static struct sysfs_dirent *sysfs_dir_next_pos(const void *ns,
struct sysfs_dirent *parent_sd, ino_t ino, struct sysfs_dirent *pos)
{
pos = sysfs_dir_pos(ns, parent_sd, ino, pos);
- if (pos) do {
- struct rb_node *node = rb_next(&pos->s_rb);
- if (!node)
- pos = NULL;
- else
- pos = to_sysfs_dirent(node);
- } while (pos && pos->s_ns != ns);
+ if (pos)
+ do {
+ struct rb_node *node = rb_next(&pos->s_rb);
+ if (!node)
+ pos = NULL;
+ else
+ pos = to_sysfs_dirent(node);
+ } while (pos && pos->s_ns != ns);
return pos;
}
static int sysfs_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dentry = file->f_path.dentry;
- struct sysfs_dirent * parent_sd = dentry->d_fsdata;
+ struct sysfs_dirent *parent_sd = dentry->d_fsdata;
struct sysfs_dirent *pos = file->private_data;
enum kobj_ns_type type;
const void *ns;
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index d2bb7ed8fa7..15ef5eb1366 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -20,7 +20,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/limits.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "sysfs.h"
@@ -45,8 +45,8 @@ struct sysfs_open_dirent {
struct sysfs_buffer {
size_t count;
loff_t pos;
- char * page;
- const struct sysfs_ops * ops;
+ char *page;
+ const struct sysfs_ops *ops;
struct mutex mutex;
int needs_read_fill;
int event;
@@ -59,16 +59,16 @@ struct sysfs_buffer {
* @buffer: data buffer for file.
*
* Allocate @buffer->page, if it hasn't been already, then call the
- * kobject's show() method to fill the buffer with this attribute's
- * data.
+ * kobject's show() method to fill the buffer with this attribute's
+ * data.
* This is called only once, on the file's first read unless an error
* is returned.
*/
-static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
+static int fill_read_buffer(struct dentry *dentry, struct sysfs_buffer *buffer)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- const struct sysfs_ops * ops = buffer->ops;
+ const struct sysfs_ops *ops = buffer->ops;
int ret = 0;
ssize_t count;
@@ -106,7 +106,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
}
/**
- * sysfs_read_file - read an attribute.
+ * sysfs_read_file - read an attribute.
* @file: file pointer.
* @buf: buffer to fill.
* @count: number of bytes to read.
@@ -127,12 +127,12 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
static ssize_t
sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct sysfs_buffer * buffer = file->private_data;
+ struct sysfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill || *ppos == 0) {
- retval = fill_read_buffer(file->f_path.dentry,buffer);
+ retval = fill_read_buffer(file->f_path.dentry, buffer);
if (retval)
goto out;
}
@@ -154,9 +154,8 @@ out:
* Allocate @buffer->page if it hasn't been already, then
* copy the user-supplied buffer into it.
*/
-
-static int
-fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t count)
+static int fill_write_buffer(struct sysfs_buffer *buffer,
+ const char __user *buf, size_t count)
{
int error;
@@ -167,7 +166,7 @@ fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t
if (count >= PAGE_SIZE)
count = PAGE_SIZE - 1;
- error = copy_from_user(buffer->page,buf,count);
+ error = copy_from_user(buffer->page, buf, count);
buffer->needs_read_fill = 1;
/* if buf is assumed to contain a string, terminate it by \0,
so e.g. sscanf() can scan the string easily */
@@ -183,16 +182,15 @@ fill_write_buffer(struct sysfs_buffer * buffer, const char __user * buf, size_t
* @count: number of bytes
*
* Get the correct pointers for the kobject and the attribute we're
- * dealing with, then call the store() method for the attribute,
+ * dealing with, then call the store() method for the attribute,
* passing the buffer that we acquired in fill_write_buffer().
*/
-
-static int
-flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
+static int flush_write_buffer(struct dentry *dentry,
+ struct sysfs_buffer *buffer, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
- const struct sysfs_ops * ops = buffer->ops;
+ const struct sysfs_ops *ops = buffer->ops;
int rc;
/* need attr_sd for attr and ops, its parent for kobj */
@@ -219,15 +217,14 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
* then push it to the kobject in flush_write_buffer().
* There is no easy way for us to know if userspace is only doing a partial
* write, so we don't support them. We expect the entire buffer to come
- * on the first write.
+ * on the first write.
* Hint: if you're writing a value, first read the file, modify only the
- * the value you're changing, then write entire buffer back.
+ * the value you're changing, then write entire buffer back.
*/
-
-static ssize_t
-sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+static ssize_t sysfs_write_file(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
{
- struct sysfs_buffer * buffer = file->private_data;
+ struct sysfs_buffer *buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
@@ -339,13 +336,14 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
if (kobj->ktype && kobj->ktype->sysfs_ops)
ops = kobj->ktype->sysfs_ops;
else {
- WARN(1, KERN_ERR "missing sysfs attribute operations for "
- "kobject: %s\n", kobject_name(kobj));
+ WARN(1, KERN_ERR
+ "missing sysfs attribute operations for kobject: %s\n",
+ kobject_name(kobj));
goto err_out;
}
/* File needs write support.
- * The inode's perms must say it's ok,
+ * The inode's perms must say it's ok,
* and we must have a store method.
*/
if (file->f_mode & FMODE_WRITE) {
@@ -420,7 +418,7 @@ static int sysfs_release(struct inode *inode, struct file *filp)
*/
static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
{
- struct sysfs_buffer * buffer = filp->private_data;
+ struct sysfs_buffer *buffer = filp->private_data;
struct sysfs_dirent *attr_sd = filp->f_path.dentry->d_fsdata;
struct sysfs_open_dirent *od = attr_sd->s_attr.open;
@@ -518,8 +516,9 @@ static int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr,
ns = ops->namespace(kobj, attr);
out:
if (err) {
- WARN(1, KERN_ERR "missing sysfs namespace attribute operation for "
- "kobject: %s\n", kobject_name(kobj));
+ WARN(1, KERN_ERR
+ "missing sysfs namespace attribute operation for kobject: %s\n",
+ kobject_name(kobj));
}
*pns = ns;
return err;
@@ -566,17 +565,17 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
/**
* sysfs_create_file - create an attribute file for an object.
- * @kobj: object we're creating for.
+ * @kobj: object we're creating for.
* @attr: attribute descriptor.
*/
-
-int sysfs_create_file(struct kobject * kobj, const struct attribute * attr)
+int sysfs_create_file(struct kobject *kobj, const struct attribute *attr)
{
BUG_ON(!kobj || !kobj->sd || !attr);
return sysfs_add_file(kobj->sd, attr, SYSFS_KOBJ_ATTR);
}
+EXPORT_SYMBOL_GPL(sysfs_create_file);
int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
{
@@ -590,6 +589,7 @@ int sysfs_create_files(struct kobject *kobj, const struct attribute **ptr)
sysfs_remove_file(kobj, ptr[i]);
return err;
}
+EXPORT_SYMBOL_GPL(sysfs_create_files);
/**
* sysfs_add_file_to_group - add an attribute file to a pre-existing group.
@@ -654,7 +654,6 @@ int sysfs_chmod_file(struct kobject *kobj, const struct attribute *attr,
}
EXPORT_SYMBOL_GPL(sysfs_chmod_file);
-
/**
* sysfs_remove_file - remove an object attribute.
* @kobj: object we're acting for.
@@ -662,8 +661,7 @@ EXPORT_SYMBOL_GPL(sysfs_chmod_file);
*
* Hash the attribute name and kill the victim.
*/
-
-void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
+void sysfs_remove_file(struct kobject *kobj, const struct attribute *attr)
{
const void *ns;
@@ -672,13 +670,15 @@ void sysfs_remove_file(struct kobject * kobj, const struct attribute * attr)
sysfs_hash_and_remove(kobj->sd, ns, attr->name);
}
+EXPORT_SYMBOL_GPL(sysfs_remove_file);
-void sysfs_remove_files(struct kobject * kobj, const struct attribute **ptr)
+void sysfs_remove_files(struct kobject *kobj, const struct attribute **ptr)
{
int i;
for (i = 0; ptr[i]; i++)
sysfs_remove_file(kobj, ptr[i]);
}
+EXPORT_SYMBOL_GPL(sysfs_remove_files);
/**
* sysfs_remove_file_from_group - remove an attribute file from a group.
@@ -793,9 +793,3 @@ int sysfs_schedule_callback(struct kobject *kobj, void (*func)(void *),
return 0;
}
EXPORT_SYMBOL_GPL(sysfs_schedule_callback);
-
-
-EXPORT_SYMBOL_GPL(sysfs_create_file);
-EXPORT_SYMBOL_GPL(sysfs_remove_file);
-EXPORT_SYMBOL_GPL(sysfs_remove_files);
-EXPORT_SYMBOL_GPL(sysfs_create_files);
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index 09a1a25cd14..5f92cd2f61c 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -3,8 +3,10 @@
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
+ * Copyright (c) 2013 Greg Kroah-Hartman
+ * Copyright (c) 2013 The Linux Foundation
*
- * This file is released undert the GPL v2.
+ * This file is released undert the GPL v2.
*
*/
@@ -19,8 +21,8 @@
static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp)
{
- struct attribute *const* attr;
- struct bin_attribute *const* bin_attr;
+ struct attribute *const *attr;
+ struct bin_attribute *const *bin_attr;
if (grp->attrs)
for (attr = grp->attrs; *attr; attr++)
@@ -33,8 +35,8 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
const struct attribute_group *grp, int update)
{
- struct attribute *const* attr;
- struct bin_attribute *const* bin_attr;
+ struct attribute *const *attr;
+ struct bin_attribute *const *bin_attr;
int error = 0, i;
if (grp->attrs) {
@@ -129,6 +131,41 @@ int sysfs_create_group(struct kobject *kobj,
{
return internal_create_group(kobj, 0, grp);
}
+EXPORT_SYMBOL_GPL(sysfs_create_group);
+
+/**
+ * sysfs_create_groups - given a directory kobject, create a bunch of attribute groups
+ * @kobj: The kobject to create the group on
+ * @groups: The attribute groups to create, NULL terminated
+ *
+ * This function creates a bunch of attribute groups. If an error occurs when
+ * creating a group, all previously created groups will be removed, unwinding
+ * everything back to the original state when this function was called.
+ * It will explicitly warn and error if any of the attribute files being
+ * created already exist.
+ *
+ * Returns 0 on success or error code from sysfs_create_group on error.
+ */
+int sysfs_create_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+ int error = 0;
+ int i;
+
+ if (!groups)
+ return 0;
+
+ for (i = 0; groups[i]; i++) {
+ error = sysfs_create_group(kobj, groups[i]);
+ if (error) {
+ while (--i >= 0)
+ sysfs_remove_group(kobj, groups[i]);
+ break;
+ }
+ }
+ return error;
+}
+EXPORT_SYMBOL_GPL(sysfs_create_groups);
/**
* sysfs_update_group - given a directory kobject, update an attribute group
@@ -152,11 +189,18 @@ int sysfs_update_group(struct kobject *kobj,
{
return internal_create_group(kobj, 1, grp);
}
+EXPORT_SYMBOL_GPL(sysfs_update_group);
-
-
-void sysfs_remove_group(struct kobject * kobj,
- const struct attribute_group * grp)
+/**
+ * sysfs_remove_group: remove a group from a kobject
+ * @kobj: kobject to remove the group from
+ * @grp: group to remove
+ *
+ * This function removes a group of attributes from a kobject. The attributes
+ * previously have to have been created for this group, otherwise it will fail.
+ */
+void sysfs_remove_group(struct kobject *kobj,
+ const struct attribute_group *grp)
{
struct sysfs_dirent *dir_sd = kobj->sd;
struct sysfs_dirent *sd;
@@ -164,8 +208,9 @@ void sysfs_remove_group(struct kobject * kobj,
if (grp->name) {
sd = sysfs_get_dirent(dir_sd, NULL, grp->name);
if (!sd) {
- WARN(!sd, KERN_WARNING "sysfs group %p not found for "
- "kobject '%s'\n", grp, kobject_name(kobj));
+ WARN(!sd, KERN_WARNING
+ "sysfs group %p not found for kobject '%s'\n",
+ grp, kobject_name(kobj));
return;
}
} else
@@ -177,6 +222,27 @@ void sysfs_remove_group(struct kobject * kobj,
sysfs_put(sd);
}
+EXPORT_SYMBOL_GPL(sysfs_remove_group);
+
+/**
+ * sysfs_remove_groups - remove a list of groups
+ *
+ * @kobj: The kobject for the groups to be removed from
+ * @groups: NULL terminated list of groups to be removed
+ *
+ * If groups is not NULL, remove the specified groups from the kobject.
+ */
+void sysfs_remove_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+ int i;
+
+ if (!groups)
+ return;
+ for (i = 0; groups[i]; i++)
+ sysfs_remove_group(kobj, groups[i]);
+}
+EXPORT_SYMBOL_GPL(sysfs_remove_groups);
/**
* sysfs_merge_group - merge files into a pre-existing attribute group.
@@ -273,7 +339,3 @@ void sysfs_remove_link_from_group(struct kobject *kobj, const char *group_name,
}
}
EXPORT_SYMBOL_GPL(sysfs_remove_link_from_group);
-
-EXPORT_SYMBOL_GPL(sysfs_create_group);
-EXPORT_SYMBOL_GPL(sysfs_update_group);
-EXPORT_SYMBOL_GPL(sysfs_remove_group);
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 3e2837a633e..963f910c803 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -10,7 +10,7 @@
* Please see Documentation/filesystems/sysfs.txt for more information.
*/
-#undef DEBUG
+#undef DEBUG
#include <linux/pagemap.h>
#include <linux/namei.h>
@@ -36,7 +36,7 @@ static struct backing_dev_info sysfs_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
-static const struct inode_operations sysfs_inode_operations ={
+static const struct inode_operations sysfs_inode_operations = {
.permission = sysfs_permission,
.setattr = sysfs_setattr,
.getattr = sysfs_getattr,
@@ -67,7 +67,7 @@ static struct sysfs_inode_attrs *sysfs_init_inode_attrs(struct sysfs_dirent *sd)
return attrs;
}
-int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
+int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr)
{
struct sysfs_inode_attrs *sd_attrs;
struct iattr *iattrs;
@@ -128,7 +128,8 @@ out:
return error;
}
-static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *secdata_len)
+static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata,
+ u32 *secdata_len)
{
struct sysfs_inode_attrs *iattrs;
void *old_secdata;
@@ -186,13 +187,13 @@ out:
return error;
}
-static inline void set_default_inode_attr(struct inode * inode, umode_t mode)
+static inline void set_default_inode_attr(struct inode *inode, umode_t mode)
{
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
}
-static inline void set_inode_attr(struct inode * inode, struct iattr * iattr)
+static inline void set_inode_attr(struct inode *inode, struct iattr *iattr)
{
inode->i_uid = iattr->ia_uid;
inode->i_gid = iattr->ia_gid;
@@ -220,7 +221,8 @@ static void sysfs_refresh_inode(struct sysfs_dirent *sd, struct inode *inode)
set_nlink(inode, sd->s_dir.subdirs + 2);
}
-int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
+int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
struct inode *inode = dentry->d_inode;
@@ -285,7 +287,7 @@ static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
* RETURNS:
* Pointer to allocated inode on success, NULL on failure.
*/
-struct inode * sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd)
+struct inode *sysfs_get_inode(struct super_block *sb, struct sysfs_dirent *sd)
{
struct inode *inode;
@@ -312,7 +314,8 @@ void sysfs_evict_inode(struct inode *inode)
sysfs_put(sd);
}
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name)
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns,
+ const char *name)
{
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index afd83273e6c..fd7ce7a39f9 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -64,7 +64,7 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
/* instantiate and link root dentry */
root = d_make_root(inode);
if (!root) {
- pr_debug("%s: could not get root dentry!\n",__func__);
+ pr_debug("%s: could not get root dentry!\n", __func__);
return -ENOMEM;
}
root->d_fsdata = &sysfs_root;
diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
index 8c940df97a5..2dd4507d9ed 100644
--- a/fs/sysfs/symlink.c
+++ b/fs/sysfs/symlink.c
@@ -125,6 +125,7 @@ int sysfs_create_link(struct kobject *kobj, struct kobject *target,
{
return sysfs_do_create_link(kobj, target, name, 1);
}
+EXPORT_SYMBOL_GPL(sysfs_create_link);
/**
* sysfs_create_link_nowarn - create symlink between two objects.
@@ -166,8 +167,7 @@ void sysfs_delete_link(struct kobject *kobj, struct kobject *targ,
* @kobj: object we're acting for.
* @name: name of the symlink to remove.
*/
-
-void sysfs_remove_link(struct kobject * kobj, const char * name)
+void sysfs_remove_link(struct kobject *kobj, const char *name)
{
struct sysfs_dirent *parent_sd = NULL;
@@ -178,6 +178,7 @@ void sysfs_remove_link(struct kobject * kobj, const char * name)
sysfs_hash_and_remove(parent_sd, NULL, name);
}
+EXPORT_SYMBOL_GPL(sysfs_remove_link);
/**
* sysfs_rename_link - rename symlink in object's directory.
@@ -223,6 +224,7 @@ out:
sysfs_put(sd);
return result;
}
+EXPORT_SYMBOL_GPL(sysfs_rename_link);
static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
struct sysfs_dirent *target_sd, char *path)
@@ -276,7 +278,7 @@ static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
return 0;
}
-static int sysfs_getlink(struct dentry *dentry, char * path)
+static int sysfs_getlink(struct dentry *dentry, char *path)
{
struct sysfs_dirent *sd = dentry->d_fsdata;
struct sysfs_dirent *parent_sd = sd->s_parent;
@@ -295,7 +297,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
int error = -ENOMEM;
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (page) {
- error = sysfs_getlink(dentry, (char *) page);
+ error = sysfs_getlink(dentry, (char *) page);
if (error < 0)
free_page((unsigned long)page);
}
@@ -303,7 +305,8 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
return NULL;
}
-static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
{
char *page = nd_get_link(nd);
if (!IS_ERR(page))
@@ -319,8 +322,3 @@ const struct inode_operations sysfs_symlink_inode_operations = {
.getattr = sysfs_getattr,
.permission = sysfs_permission,
};
-
-
-EXPORT_SYMBOL_GPL(sysfs_create_link);
-EXPORT_SYMBOL_GPL(sysfs_remove_link);
-EXPORT_SYMBOL_GPL(sysfs_rename_link);
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index d1e4043eb0c..b6deca3e301 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -78,7 +78,7 @@ struct sysfs_dirent {
};
unsigned short s_flags;
- umode_t s_mode;
+ umode_t s_mode;
unsigned int s_ino;
struct sysfs_inode_attrs *s_iattr;
};
@@ -123,9 +123,9 @@ do { \
key = &attr->skey; \
\
lockdep_init_map(&sd->dep_map, "s_active", key, 0); \
-} while(0)
+} while (0)
#else
-#define sysfs_dirent_init_lockdep(sd) do {} while(0)
+#define sysfs_dirent_init_lockdep(sd) do {} while (0)
#endif
/*
@@ -186,8 +186,8 @@ int sysfs_create_subdir(struct kobject *kobj, const char *name,
struct sysfs_dirent **p_sd);
void sysfs_remove_subdir(struct sysfs_dirent *sd);
-int sysfs_rename(struct sysfs_dirent *sd,
- struct sysfs_dirent *new_parent_sd, const void *ns, const char *new_name);
+int sysfs_rename(struct sysfs_dirent *sd, struct sysfs_dirent *new_parent_sd,
+ const void *ns, const char *new_name);
static inline struct sysfs_dirent *__sysfs_get(struct sysfs_dirent *sd)
{
@@ -214,10 +214,12 @@ void sysfs_evict_inode(struct inode *inode);
int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr *iattr);
int sysfs_permission(struct inode *inode, int mask);
int sysfs_setattr(struct dentry *dentry, struct iattr *iattr);
-int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
+int sysfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat);
int sysfs_setxattr(struct dentry *dentry, const char *name, const void *value,
- size_t size, int flags);
-int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const char *name);
+ size_t size, int flags);
+int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns,
+ const char *name);
int sysfs_inode_init(void);
/*
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 9ac4057a86c..839a2bad7f4 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -630,6 +630,12 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
struct udf_sb_info *sbi = UDF_SB(sb);
int error = 0;
+ if (sbi->s_lvid_bh) {
+ int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
+ if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+ return -EACCES;
+ }
+
uopt.flags = sbi->s_flags;
uopt.uid = sbi->s_uid;
uopt.gid = sbi->s_gid;
@@ -649,12 +655,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
sbi->s_dmode = uopt.dmode;
write_unlock(&sbi->s_cred_lock);
- if (sbi->s_lvid_bh) {
- int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
- if (write_rev > UDF_MAX_WRITE_VERSION)
- *flags |= MS_RDONLY;
- }
-
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out_unlock;
@@ -843,27 +843,38 @@ static int udf_find_fileset(struct super_block *sb,
return 1;
}
+/*
+ * Load primary Volume Descriptor Sequence
+ *
+ * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
+ * should be tried.
+ */
static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
{
struct primaryVolDesc *pvoldesc;
struct ustr *instr, *outstr;
struct buffer_head *bh;
uint16_t ident;
- int ret = 1;
+ int ret = -ENOMEM;
instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!instr)
- return 1;
+ return -ENOMEM;
outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!outstr)
goto out1;
bh = udf_read_tagged(sb, block, block, &ident);
- if (!bh)
+ if (!bh) {
+ ret = -EAGAIN;
goto out2;
+ }
- BUG_ON(ident != TAG_IDENT_PVD);
+ if (ident != TAG_IDENT_PVD) {
+ ret = -EIO;
+ goto out_bh;
+ }
pvoldesc = (struct primaryVolDesc *)bh->b_data;
@@ -889,8 +900,9 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
if (udf_CS0toUTF8(outstr, instr))
udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
- brelse(bh);
ret = 0;
+out_bh:
+ brelse(bh);
out2:
kfree(outstr);
out1:
@@ -947,7 +959,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
if (mdata->s_mirror_fe == NULL) {
udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
- goto error_exit;
+ return -EIO;
}
}
@@ -964,23 +976,18 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
addr.logicalBlockNum, addr.partitionReferenceNum);
mdata->s_bitmap_fe = udf_iget(sb, &addr);
-
if (mdata->s_bitmap_fe == NULL) {
if (sb->s_flags & MS_RDONLY)
udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
else {
udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
- goto error_exit;
+ return -EIO;
}
}
}
udf_debug("udf_load_metadata_files Ok\n");
-
return 0;
-
-error_exit:
- return 1;
}
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
@@ -1069,7 +1076,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!map->s_uspace.s_table) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
- return 1;
+ return -EIO;
}
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
@@ -1079,7 +1086,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (phd->unallocSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
- return 1;
+ return -ENOMEM;
map->s_uspace.s_bitmap = bitmap;
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
@@ -1102,7 +1109,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (!map->s_fspace.s_table) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
- return 1;
+ return -EIO;
}
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
@@ -1113,7 +1120,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
if (phd->freedSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
- return 1;
+ return -ENOMEM;
map->s_fspace.s_bitmap = bitmap;
bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition);
@@ -1165,7 +1172,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
}
if (!sbi->s_vat_inode)
- return 1;
+ return -EIO;
if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
map->s_type_specific.s_virtual.s_start_offset = 0;
@@ -1177,7 +1184,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
pos = udf_block_map(sbi->s_vat_inode, 0);
bh = sb_bread(sb, pos);
if (!bh)
- return 1;
+ return -EIO;
vat20 = (struct virtualAllocationTable20 *)bh->b_data;
} else {
vat20 = (struct virtualAllocationTable20 *)
@@ -1195,6 +1202,12 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
return 0;
}
+/*
+ * Load partition descriptor block
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
+ * sequence.
+ */
static int udf_load_partdesc(struct super_block *sb, sector_t block)
{
struct buffer_head *bh;
@@ -1204,13 +1217,15 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
int i, type1_idx;
uint16_t partitionNumber;
uint16_t ident;
- int ret = 0;
+ int ret;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 1;
- if (ident != TAG_IDENT_PD)
+ return -EAGAIN;
+ if (ident != TAG_IDENT_PD) {
+ ret = 0;
goto out_bh;
+ }
p = (struct partitionDesc *)bh->b_data;
partitionNumber = le16_to_cpu(p->partitionNumber);
@@ -1229,10 +1244,13 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
if (i >= sbi->s_partitions) {
udf_debug("Partition (%d) not found in partition map\n",
partitionNumber);
+ ret = 0;
goto out_bh;
}
ret = udf_fill_partdesc_info(sb, p, i);
+ if (ret < 0)
+ goto out_bh;
/*
* Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
@@ -1249,32 +1267,37 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
break;
}
- if (i >= sbi->s_partitions)
+ if (i >= sbi->s_partitions) {
+ ret = 0;
goto out_bh;
+ }
ret = udf_fill_partdesc_info(sb, p, i);
- if (ret)
+ if (ret < 0)
goto out_bh;
if (map->s_partition_type == UDF_METADATA_MAP25) {
ret = udf_load_metadata_files(sb, i);
- if (ret) {
+ if (ret < 0) {
udf_err(sb, "error loading MetaData partition map %d\n",
i);
goto out_bh;
}
} else {
- ret = udf_load_vat(sb, i, type1_idx);
- if (ret)
- goto out_bh;
/*
- * Mark filesystem read-only if we have a partition with
- * virtual map since we don't handle writing to it (we
- * overwrite blocks instead of relocating them).
+ * If we have a partition with virtual map, we don't handle
+ * writing to it (we overwrite blocks instead of relocating
+ * them).
*/
- sb->s_flags |= MS_RDONLY;
- pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
+ if (!(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
+ goto out_bh;
+ }
+ ret = udf_load_vat(sb, i, type1_idx);
+ if (ret < 0)
+ goto out_bh;
}
+ ret = 0;
out_bh:
/* In case loading failed, we handle cleanup in udf_fill_super */
brelse(bh);
@@ -1340,11 +1363,11 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
uint16_t ident;
struct buffer_head *bh;
unsigned int table_len;
- int ret = 0;
+ int ret;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 1;
+ return -EAGAIN;
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
table_len = le32_to_cpu(lvd->mapTableLength);
@@ -1352,7 +1375,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
udf_err(sb, "error loading logical volume descriptor: "
"Partition table too long (%u > %lu)\n", table_len,
sb->s_blocksize - sizeof(*lvd));
- ret = 1;
+ ret = -EIO;
goto out_bh;
}
@@ -1396,11 +1419,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
- if (udf_load_sparable_map(sb, map,
- (struct sparablePartitionMap *)gpm) < 0) {
- ret = 1;
+ ret = udf_load_sparable_map(sb, map,
+ (struct sparablePartitionMap *)gpm);
+ if (ret < 0)
goto out_bh;
- }
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
@@ -1465,7 +1487,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
}
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
-
+ ret = 0;
out_bh:
brelse(bh);
return ret;
@@ -1503,22 +1525,18 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
}
/*
- * udf_process_sequence
- *
- * PURPOSE
- * Process a main/reserve volume descriptor sequence.
- *
- * PRE-CONDITIONS
- * sb Pointer to _locked_ superblock.
- * block First block of first extent of the sequence.
- * lastblock Lastblock of first extent of the sequence.
+ * Process a main/reserve volume descriptor sequence.
+ * @block First block of first extent of the sequence.
+ * @lastblock Lastblock of first extent of the sequence.
+ * @fileset There we store extent containing root fileset
*
- * HISTORY
- * July 1, 1997 - Andrew E. Mileski
- * Written, tested, and released.
+ * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
+ * sequence
*/
-static noinline int udf_process_sequence(struct super_block *sb, long block,
- long lastblock, struct kernel_lb_addr *fileset)
+static noinline int udf_process_sequence(
+ struct super_block *sb,
+ sector_t block, sector_t lastblock,
+ struct kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1529,6 +1547,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
+ int ret;
memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
@@ -1543,7 +1562,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
udf_err(sb,
"Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
(unsigned long long)block);
- return 1;
+ return -EAGAIN;
}
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
@@ -1616,14 +1635,19 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
*/
if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
udf_err(sb, "Primary Volume Descriptor not found!\n");
- return 1;
+ return -EAGAIN;
+ }
+ ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
+ if (ret < 0)
+ return ret;
+
+ if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
+ ret = udf_load_logicalvol(sb,
+ vds[VDS_POS_LOGICAL_VOL_DESC].block,
+ fileset);
+ if (ret < 0)
+ return ret;
}
- if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
- return 1;
-
- if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
- vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
- return 1;
if (vds[VDS_POS_PARTITION_DESC].block) {
/*
@@ -1632,19 +1656,27 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
*/
for (block = vds[VDS_POS_PARTITION_DESC].block;
block < vds[VDS_POS_TERMINATING_DESC].block;
- block++)
- if (udf_load_partdesc(sb, block))
- return 1;
+ block++) {
+ ret = udf_load_partdesc(sb, block);
+ if (ret < 0)
+ return ret;
+ }
}
return 0;
}
+/*
+ * Load Volume Descriptor Sequence described by anchor in bh
+ *
+ * Returns <0 on error, 0 on success
+ */
static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
struct kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
- long main_s, main_e, reserve_s, reserve_e;
+ sector_t main_s, main_e, reserve_s, reserve_e;
+ int ret;
anchor = (struct anchorVolDescPtr *)bh->b_data;
@@ -1662,18 +1694,26 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
/* Process the main & reserve sequences */
/* responsible for finding the PartitionDesc(s) */
- if (!udf_process_sequence(sb, main_s, main_e, fileset))
- return 1;
- udf_sb_free_partitions(sb);
- if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
- return 1;
+ ret = udf_process_sequence(sb, main_s, main_e, fileset);
+ if (ret != -EAGAIN)
+ return ret;
udf_sb_free_partitions(sb);
- return 0;
+ ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
+ if (ret < 0) {
+ udf_sb_free_partitions(sb);
+ /* No sequence was OK, return -EIO */
+ if (ret == -EAGAIN)
+ ret = -EIO;
+ }
+ return ret;
}
/*
* Check whether there is an anchor block in the given block and
* load Volume Descriptor Sequence if so.
+ *
+ * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
+ * block
*/
static int udf_check_anchor_block(struct super_block *sb, sector_t block,
struct kernel_lb_addr *fileset)
@@ -1685,33 +1725,40 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block,
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
udf_fixed_to_variable(block) >=
sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
- return 0;
+ return -EAGAIN;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 0;
+ return -EAGAIN;
if (ident != TAG_IDENT_AVDP) {
brelse(bh);
- return 0;
+ return -EAGAIN;
}
ret = udf_load_sequence(sb, bh, fileset);
brelse(bh);
return ret;
}
-/* Search for an anchor volume descriptor pointer */
-static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
- struct kernel_lb_addr *fileset)
+/*
+ * Search for an anchor volume descriptor pointer.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
+ * of anchors.
+ */
+static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
+ struct kernel_lb_addr *fileset)
{
sector_t last[6];
int i;
struct udf_sb_info *sbi = UDF_SB(sb);
int last_count = 0;
+ int ret;
/* First try user provided anchor */
if (sbi->s_anchor) {
- if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
- return lastblock;
+ ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
+ if (ret != -EAGAIN)
+ return ret;
}
/*
* according to spec, anchor is in either:
@@ -1720,39 +1767,46 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
* lastblock
* however, if the disc isn't closed, it could be 512.
*/
- if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
- return lastblock;
+ ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
+ if (ret != -EAGAIN)
+ return ret;
/*
* The trouble is which block is the last one. Drives often misreport
* this so we try various possibilities.
*/
- last[last_count++] = lastblock;
- if (lastblock >= 1)
- last[last_count++] = lastblock - 1;
- last[last_count++] = lastblock + 1;
- if (lastblock >= 2)
- last[last_count++] = lastblock - 2;
- if (lastblock >= 150)
- last[last_count++] = lastblock - 150;
- if (lastblock >= 152)
- last[last_count++] = lastblock - 152;
+ last[last_count++] = *lastblock;
+ if (*lastblock >= 1)
+ last[last_count++] = *lastblock - 1;
+ last[last_count++] = *lastblock + 1;
+ if (*lastblock >= 2)
+ last[last_count++] = *lastblock - 2;
+ if (*lastblock >= 150)
+ last[last_count++] = *lastblock - 150;
+ if (*lastblock >= 152)
+ last[last_count++] = *lastblock - 152;
for (i = 0; i < last_count; i++) {
if (last[i] >= sb->s_bdev->bd_inode->i_size >>
sb->s_blocksize_bits)
continue;
- if (udf_check_anchor_block(sb, last[i], fileset))
- return last[i];
+ ret = udf_check_anchor_block(sb, last[i], fileset);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ *lastblock = last[i];
+ return ret;
+ }
if (last[i] < 256)
continue;
- if (udf_check_anchor_block(sb, last[i] - 256, fileset))
- return last[i];
+ ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
+ if (ret != -EAGAIN) {
+ if (!ret)
+ *lastblock = last[i];
+ return ret;
+ }
}
/* Finally try block 512 in case media is open */
- if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
- return last[0];
- return 0;
+ return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
}
/*
@@ -1760,54 +1814,59 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
* area specified by it. The function expects sbi->s_lastblock to be the last
* block on the media.
*
- * Return 1 if ok, 0 if not found.
- *
+ * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
+ * was not found.
*/
static int udf_find_anchor(struct super_block *sb,
struct kernel_lb_addr *fileset)
{
- sector_t lastblock;
struct udf_sb_info *sbi = UDF_SB(sb);
+ sector_t lastblock = sbi->s_last_block;
+ int ret;
- lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
- if (lastblock)
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret != -EAGAIN)
goto out;
/* No anchor found? Try VARCONV conversion of block numbers */
UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
+ lastblock = udf_variable_to_fixed(sbi->s_last_block);
/* Firstly, we try to not convert number of the last block */
- lastblock = udf_scan_anchors(sb,
- udf_variable_to_fixed(sbi->s_last_block),
- fileset);
- if (lastblock)
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret != -EAGAIN)
goto out;
+ lastblock = sbi->s_last_block;
/* Secondly, we try with converted number of the last block */
- lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
- if (!lastblock) {
+ ret = udf_scan_anchors(sb, &lastblock, fileset);
+ if (ret < 0) {
/* VARCONV didn't help. Clear it. */
UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
- return 0;
}
out:
- sbi->s_last_block = lastblock;
- return 1;
+ if (ret == 0)
+ sbi->s_last_block = lastblock;
+ return ret;
}
/*
* Check Volume Structure Descriptor, find Anchor block and load Volume
- * Descriptor Sequence
+ * Descriptor Sequence.
+ *
+ * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
+ * block was not found.
*/
static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
int silent, struct kernel_lb_addr *fileset)
{
struct udf_sb_info *sbi = UDF_SB(sb);
loff_t nsr_off;
+ int ret;
if (!sb_set_blocksize(sb, uopt->blocksize)) {
if (!silent)
udf_warn(sb, "Bad block size\n");
- return 0;
+ return -EINVAL;
}
sbi->s_last_block = uopt->lastblock;
if (!uopt->novrs) {
@@ -1828,12 +1887,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
/* Look for anchor block and load Volume Descriptor Sequence */
sbi->s_anchor = uopt->anchor;
- if (!udf_find_anchor(sb, fileset)) {
- if (!silent)
+ ret = udf_find_anchor(sb, fileset);
+ if (ret < 0) {
+ if (!silent && ret == -EAGAIN)
udf_warn(sb, "No anchor found\n");
- return 0;
+ return ret;
}
- return 1;
+ return 0;
}
static void udf_open_lvid(struct super_block *sb)
@@ -1939,7 +1999,7 @@ u64 lvid_get_unique_id(struct super_block *sb)
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
- int ret;
+ int ret = -EINVAL;
struct inode *inode = NULL;
struct udf_options uopt;
struct kernel_lb_addr rootdir, fileset;
@@ -2011,7 +2071,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
} else {
uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
- if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
+ if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
if (!silent)
pr_notice("Rescanning with blocksize %d\n",
UDF_DEFAULT_BLOCKSIZE);
@@ -2021,8 +2081,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
}
}
- if (!ret) {
- udf_warn(sb, "No partition found (1)\n");
+ if (ret < 0) {
+ if (ret == -EAGAIN) {
+ udf_warn(sb, "No partition found (1)\n");
+ ret = -EINVAL;
+ }
goto error_out;
}
@@ -2040,9 +2103,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
le16_to_cpu(lvidiu->minUDFReadRev),
UDF_MAX_READ_VERSION);
+ ret = -EINVAL;
+ goto error_out;
+ } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
+ !(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
goto error_out;
- } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
- sb->s_flags |= MS_RDONLY;
+ }
sbi->s_udfrev = minUDFWriteRev;
@@ -2054,17 +2121,20 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!sbi->s_partitions) {
udf_warn(sb, "No partition found (2)\n");
+ ret = -EINVAL;
goto error_out;
}
if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
- UDF_PART_FLAG_READ_ONLY) {
- pr_notice("Partition marked readonly; forcing readonly mount\n");
- sb->s_flags |= MS_RDONLY;
+ UDF_PART_FLAG_READ_ONLY &&
+ !(sb->s_flags & MS_RDONLY)) {
+ ret = -EACCES;
+ goto error_out;
}
if (udf_find_fileset(sb, &fileset, &rootdir)) {
udf_warn(sb, "No fileset found\n");
+ ret = -EINVAL;
goto error_out;
}
@@ -2086,6 +2156,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
if (!inode) {
udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
+ ret = -EIO;
goto error_out;
}
@@ -2093,6 +2164,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
udf_err(sb, "Couldn't allocate root dentry\n");
+ ret = -ENOMEM;
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
@@ -2113,7 +2185,7 @@ error_out:
kfree(sbi);
sb->s_fs_info = NULL;
- return -EINVAL;
+ return ret;
}
void _udf_err(struct super_block *sb, const char *function,
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 596ec71da00..e11d654af78 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -86,14 +86,6 @@ xfs_destroy_ioend(
bh->b_end_io(bh, !ioend->io_error);
}
- if (ioend->io_iocb) {
- inode_dio_done(ioend->io_inode);
- if (ioend->io_isasync) {
- aio_complete(ioend->io_iocb, ioend->io_error ?
- ioend->io_error : ioend->io_result, 0);
- }
- }
-
mempool_free(ioend, xfs_ioend_pool);
}
@@ -281,7 +273,6 @@ xfs_alloc_ioend(
* all the I/O from calling the completion routine too early.
*/
atomic_set(&ioend->io_remaining, 1);
- ioend->io_isasync = 0;
ioend->io_isdirect = 0;
ioend->io_error = 0;
ioend->io_list = NULL;
@@ -291,8 +282,6 @@ xfs_alloc_ioend(
ioend->io_buffer_tail = NULL;
ioend->io_offset = 0;
ioend->io_size = 0;
- ioend->io_iocb = NULL;
- ioend->io_result = 0;
ioend->io_append_trans = NULL;
INIT_WORK(&ioend->io_work, xfs_end_io);
@@ -1292,8 +1281,10 @@ __xfs_get_blocks(
if (create || !ISUNWRITTEN(&imap))
xfs_map_buffer(inode, bh_result, &imap, offset);
if (create && ISUNWRITTEN(&imap)) {
- if (direct)
+ if (direct) {
bh_result->b_private = inode;
+ set_buffer_defer_completion(bh_result);
+ }
set_buffer_unwritten(bh_result);
}
}
@@ -1390,9 +1381,7 @@ xfs_end_io_direct_write(
struct kiocb *iocb,
loff_t offset,
ssize_t size,
- void *private,
- int ret,
- bool is_async)
+ void *private)
{
struct xfs_ioend *ioend = iocb->private;
@@ -1414,17 +1403,10 @@ xfs_end_io_direct_write(
ioend->io_offset = offset;
ioend->io_size = size;
- ioend->io_iocb = iocb;
- ioend->io_result = ret;
if (private && size > 0)
ioend->io_type = XFS_IO_UNWRITTEN;
- if (is_async) {
- ioend->io_isasync = 1;
- xfs_finish_ioend(ioend);
- } else {
- xfs_finish_ioend_sync(ioend);
- }
+ xfs_finish_ioend_sync(ioend);
}
STATIC ssize_t
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index c325abb8d61..f94dd459dff 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -45,7 +45,6 @@ typedef struct xfs_ioend {
unsigned int io_type; /* delalloc / unwritten */
int io_error; /* I/O error code */
atomic_t io_remaining; /* hold count */
- unsigned int io_isasync : 1; /* needs aio_complete */
unsigned int io_isdirect : 1;/* direct I/O */
struct inode *io_inode; /* file being written to */
struct buffer_head *io_buffer_head;/* buffer linked list head */
@@ -54,8 +53,6 @@ typedef struct xfs_ioend {
xfs_off_t io_offset; /* offset in the file */
struct work_struct io_work; /* xfsdatad work queue */
struct xfs_trans *io_append_trans;/* xact. for size update */
- struct kiocb *io_iocb;
- int io_result;
} xfs_ioend_t;
extern const struct address_space_operations xfs_address_space_operations;
diff --git a/fs/xfs/xfs_dinode.h b/fs/xfs/xfs_dinode.h
index 07d735a80a0..e5869b50dc4 100644
--- a/fs/xfs/xfs_dinode.h
+++ b/fs/xfs/xfs_dinode.h
@@ -39,6 +39,9 @@ typedef struct xfs_timestamp {
* There is a very similar struct icdinode in xfs_inode which matches the
* layout of the first 96 bytes of this structure, but is kept in native
* format instead of big endian.
+ *
+ * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
+ * padding field for v3 inodes.
*/
typedef struct xfs_dinode {
__be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b78481f99d9..bb262c25c8d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -896,7 +896,6 @@ xfs_dinode_to_disk(
to->di_projid_lo = cpu_to_be16(from->di_projid_lo);
to->di_projid_hi = cpu_to_be16(from->di_projid_hi);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
- to->di_flushiter = cpu_to_be16(from->di_flushiter);
to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
@@ -924,6 +923,9 @@ xfs_dinode_to_disk(
to->di_lsn = cpu_to_be64(from->di_lsn);
memcpy(to->di_pad2, from->di_pad2, sizeof(to->di_pad2));
uuid_copy(&to->di_uuid, &from->di_uuid);
+ to->di_flushiter = 0;
+ } else {
+ to->di_flushiter = cpu_to_be16(from->di_flushiter);
}
}
@@ -1029,10 +1031,14 @@ xfs_dinode_calc_crc(
/*
* Read the disk inode attributes into the in-core inode structure.
*
- * If we are initialising a new inode and we are not utilising the
- * XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new inode core
- * with a random generation number. If we are keeping inodes around, we need to
- * read the inode cluster to get the existing generation number off disk.
+ * For version 5 superblocks, if we are initialising a new inode and we are not
+ * utilising the XFS_MOUNT_IKEEP inode cluster mode, we can simple build the new
+ * inode core with a random generation number. If we are keeping inodes around,
+ * we need to read the inode cluster to get the existing generation number off
+ * disk. Further, if we are using version 4 superblocks (i.e. v1/v2 inode
+ * format) then log recovery is dependent on the di_flushiter field being
+ * initialised from the current on-disk value and hence we must also read the
+ * inode off disk.
*/
int
xfs_iread(
@@ -1054,6 +1060,7 @@ xfs_iread(
/* shortcut IO on inode allocation if possible */
if ((iget_flags & XFS_IGET_CREATE) &&
+ xfs_sb_version_hascrc(&mp->m_sb) &&
!(mp->m_flags & XFS_MOUNT_IKEEP)) {
/* initialise the on-disk inode core */
memset(&ip->i_d, 0, sizeof(ip->i_d));
@@ -2882,12 +2889,18 @@ xfs_iflush_int(
__func__, ip->i_ino, ip->i_d.di_forkoff, ip);
goto corrupt_out;
}
+
/*
- * bump the flush iteration count, used to detect flushes which
- * postdate a log record during recovery. This is redundant as we now
- * log every change and hence this can't happen. Still, it doesn't hurt.
+ * Inode item log recovery for v1/v2 inodes are dependent on the
+ * di_flushiter count for correct sequencing. We bump the flush
+ * iteration count so we can detect flushes which postdate a log record
+ * during recovery. This is redundant as we now log every change and
+ * hence this can't happen but we need to still do it to ensure
+ * backwards compatibility with old kernels that predate logging all
+ * inode changes.
*/
- ip->i_d.di_flushiter++;
+ if (ip->i_d.di_version < 3)
+ ip->i_d.di_flushiter++;
/*
* Copy the dirty parts of the inode into the on-disk
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 6fcc910a50b..7681b19aa5d 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -2592,8 +2592,16 @@ xlog_recover_inode_pass2(
goto error;
}
- /* Skip replay when the on disk inode is newer than the log one */
- if (dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
+ /*
+ * di_flushiter is only valid for v1/2 inodes. All changes for v3 inodes
+ * are transactional and if ordering is necessary we can determine that
+ * more accurately by the LSN field in the V3 inode core. Don't trust
+ * the inode versions we might be changing them here - use the
+ * superblock flag to determine whether we need to look at di_flushiter
+ * to skip replay when the on disk inode is newer than the log one
+ */
+ if (!xfs_sb_version_hascrc(&mp->m_sb) &&
+ dicp->di_flushiter < be16_to_cpu(dip->di_flushiter)) {
/*
* Deal with the wrap case, DI_MAX_FLUSH is less
* than smaller numbers
@@ -2608,6 +2616,7 @@ xlog_recover_inode_pass2(
goto error;
}
}
+
/* Take the opportunity to reset the flush iteration count */
dicp->di_flushiter = 0;
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 56e6b68c8d2..02e113bb8b7 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -56,6 +56,16 @@ acpi_evaluate_hotplug_ost(acpi_handle handle, u32 source_event,
acpi_status
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
+
+bool acpi_has_method(acpi_handle handle, char *name);
+acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
+ u64 arg);
+acpi_status acpi_evaluate_ej0(acpi_handle handle);
+acpi_status acpi_evaluate_lck(acpi_handle handle, int lock);
+bool acpi_ata_match(acpi_handle handle);
+bool acpi_bay_match(acpi_handle handle);
+bool acpi_dock_match(acpi_handle handle);
+
#ifdef CONFIG_ACPI
#include <linux/proc_fs.h>
@@ -157,9 +167,8 @@ struct acpi_device_flags {
u32 removable:1;
u32 ejectable:1;
u32 power_manageable:1;
- u32 eject_pending:1;
u32 match_driver:1;
- u32 reserved:26;
+ u32 reserved:27;
};
/* File System */
@@ -274,15 +283,12 @@ struct acpi_device_wakeup {
};
struct acpi_device_physical_node {
- u8 node_id;
+ unsigned int node_id;
struct list_head node;
struct device *dev;
bool put_online:1;
};
-/* set maximum of physical nodes to 32 for expansibility */
-#define ACPI_MAX_PHYSICAL_NODE 32
-
/* Device */
struct acpi_device {
int device_type;
@@ -302,10 +308,9 @@ struct acpi_device {
struct acpi_driver *driver;
void *driver_data;
struct device dev;
- u8 physical_node_count;
+ unsigned int physical_node_count;
struct list_head physical_node_list;
struct mutex physical_node_lock;
- DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE);
struct list_head power_dependent;
void (*remove)(struct acpi_device *);
};
@@ -356,14 +361,11 @@ extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
-extern int register_acpi_bus_notifier(struct notifier_block *nb);
-extern void unregister_acpi_bus_notifier(struct notifier_block *nb);
/*
* External Functions
*/
int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device);
-void acpi_bus_data_handler(acpi_handle handle, void *context);
acpi_status acpi_bus_get_status_handle(acpi_handle handle,
unsigned long long *sta);
int acpi_bus_get_status(struct acpi_device *device);
@@ -383,15 +385,6 @@ bool acpi_bus_can_wakeup(acpi_handle handle);
static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
#endif
-#ifdef CONFIG_ACPI_PROC_EVENT
-int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
-int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
-int acpi_bus_receive_event(struct acpi_bus_event *event);
-#else
-static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
- { return 0; }
-#endif
-
void acpi_scan_lock_acquire(void);
void acpi_scan_lock_release(void);
int acpi_scan_add_handler(struct acpi_scan_handler *handler);
@@ -445,7 +438,11 @@ struct acpi_pci_root {
};
/* helper */
-acpi_handle acpi_get_child(acpi_handle, u64);
+acpi_handle acpi_find_child(acpi_handle, u64, bool);
+static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr)
+{
+ return acpi_find_child(handle, addr, false);
+}
int acpi_is_root_bridge(acpi_handle);
struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
#define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev))
@@ -478,7 +475,8 @@ static inline int acpi_pm_device_sleep_state(struct device *d, int *p, int m)
if (p)
*p = ACPI_STATE_D0;
- return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3) ? m : ACPI_STATE_D0;
+ return (m >= ACPI_STATE_D0 && m <= ACPI_STATE_D3_COLD) ?
+ m : ACPI_STATE_D0;
}
static inline void acpi_dev_pm_add_dependent(acpi_handle handle,
struct device *depdev) {}
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index b420939f5eb..1cedfcb1bd8 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -113,14 +113,13 @@ void pci_acpi_crs_quirks(void);
Dock Station
-------------------------------------------------------------------------- */
struct acpi_dock_ops {
+ acpi_notify_handler fixup;
acpi_notify_handler handler;
acpi_notify_handler uevent;
};
-#if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
+#ifdef CONFIG_ACPI_DOCK
extern int is_dock_device(acpi_handle handle);
-extern int register_dock_notifier(struct notifier_block *nb);
-extern void unregister_dock_notifier(struct notifier_block *nb);
extern int register_hotplug_dock_device(acpi_handle handle,
const struct acpi_dock_ops *ops,
void *context,
@@ -132,13 +131,6 @@ static inline int is_dock_device(acpi_handle handle)
{
return 0;
}
-static inline int register_dock_notifier(struct notifier_block *nb)
-{
- return -ENODEV;
-}
-static inline void unregister_dock_notifier(struct notifier_block *nb)
-{
-}
static inline int register_hotplug_dock_device(acpi_handle handle,
const struct acpi_dock_ops *ops,
void *context,
@@ -150,6 +142,6 @@ static inline int register_hotplug_dock_device(acpi_handle handle,
static inline void unregister_hotplug_dock_device(acpi_handle handle)
{
}
-#endif
+#endif /* CONFIG_ACPI_DOCK */
#endif /*__ACPI_DRIVERS_H__*/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 22d497ee6ef..85bfdbe1780 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20130517
+#define ACPI_CA_VERSION 0x20130725
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -147,6 +147,8 @@ acpi_status acpi_install_interface(acpi_string interface_name);
acpi_status acpi_remove_interface(acpi_string interface_name);
+acpi_status acpi_update_interfaces(u8 action);
+
u32
acpi_check_address_range(acpi_adr_space_type space_id,
acpi_physical_address address,
@@ -210,8 +212,8 @@ acpi_status
acpi_walk_namespace(acpi_object_type type,
acpi_handle start_object,
u32 max_depth,
- acpi_walk_callback pre_order_visit,
- acpi_walk_callback post_order_visit,
+ acpi_walk_callback descending_callback,
+ acpi_walk_callback ascending_callback,
void *context, void **return_value);
acpi_status
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 22b03c9286e..b748aefce92 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -668,13 +668,6 @@ typedef u32 acpi_event_status;
#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08
-/*
- * General Purpose Events (GPE)
- */
-#define ACPI_GPE_INVALID 0xFF
-#define ACPI_GPE_MAX 0xFF
-#define ACPI_NUM_GPE 256
-
/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
#define ACPI_GPE_ENABLE 0
@@ -1144,7 +1137,19 @@ struct acpi_memory_list {
#endif
};
-/* Definitions for _OSI support */
+/* Definitions of _OSI support */
+
+#define ACPI_VENDOR_STRINGS 0x01
+#define ACPI_FEATURE_STRINGS 0x02
+#define ACPI_ENABLE_INTERFACES 0x00
+#define ACPI_DISABLE_INTERFACES 0x04
+
+#define ACPI_DISABLE_ALL_VENDOR_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS)
+#define ACPI_DISABLE_ALL_FEATURE_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_FEATURE_STRINGS)
+#define ACPI_DISABLE_ALL_STRINGS (ACPI_DISABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
+#define ACPI_ENABLE_ALL_VENDOR_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS)
+#define ACPI_ENABLE_ALL_FEATURE_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_FEATURE_STRINGS)
+#define ACPI_ENABLE_ALL_STRINGS (ACPI_ENABLE_INTERFACES | ACPI_VENDOR_STRINGS | ACPI_FEATURE_STRINGS)
#define ACPI_OSI_WIN_2000 0x01
#define ACPI_OSI_WIN_XP 0x02
diff --git a/include/acpi/video.h b/include/acpi/video.h
index b26dc4fb7ba..61109f2609f 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -17,21 +17,12 @@ struct acpi_device;
#define ACPI_VIDEO_DISPLAY_LEGACY_TV 0x0200
#if (defined CONFIG_ACPI_VIDEO || defined CONFIG_ACPI_VIDEO_MODULE)
-extern int __acpi_video_register(bool backlight_quirks);
-static inline int acpi_video_register(void)
-{
- return __acpi_video_register(false);
-}
-static inline int acpi_video_register_with_quirks(void)
-{
- return __acpi_video_register(true);
-}
+extern int acpi_video_register(void);
extern void acpi_video_unregister(void);
extern int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid);
#else
static inline int acpi_video_register(void) { return 0; }
-static inline int acpi_video_register_with_quirks(void) { return 0; }
static inline void acpi_video_unregister(void) { return; }
static inline int acpi_video_get_edid(struct acpi_device *device, int type,
int device_id, void **edid)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 2f47ade1b56..f330d28e4d0 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -208,10 +208,6 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
-#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
-#define page_test_and_clear_young(pfn) (0)
-#endif
-
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
#endif
@@ -417,6 +413,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
{
return pmd;
}
+
+static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_swp_soft_dirty(pte_t pte)
+{
+ return 0;
+}
+
+static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte;
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return 0;
+}
#endif
#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 13821c339a4..5672d7ea1fa 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -112,7 +112,7 @@ struct mmu_gather {
#define HAVE_GENERIC_MMU_GATHER
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start,
unsigned long end);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 69732d279e8..83e2c31e8b0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -122,8 +122,12 @@
#define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
*(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
+#define TRACEPOINT_STR() VMLINUX_SYMBOL(__start___tracepoint_str) = .; \
+ *(__tracepoint_str) /* Trace_printk fmt' pointer */ \
+ VMLINUX_SYMBOL(__stop___tracepoint_str) = .;
#else
#define TRACE_PRINTKS()
+#define TRACEPOINT_STR()
#endif
#ifdef CONFIG_FTRACE_SYSCALLS
@@ -190,7 +194,8 @@
VMLINUX_SYMBOL(__stop___verbose) = .; \
LIKELY_PROFILE() \
BRANCH_PROFILE() \
- TRACE_PRINTKS()
+ TRACE_PRINTKS() \
+ TRACEPOINT_STR()
/*
* Data section helpers
diff --git a/include/asm-generic/vtime.h b/include/asm-generic/vtime.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/include/asm-generic/vtime.h
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index c463ce990c4..93b7f96f9c5 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -23,16 +23,20 @@
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
-#define ARCH_TIMER_REG_CTRL 0
-#define ARCH_TIMER_REG_TVAL 1
+enum arch_timer_reg {
+ ARCH_TIMER_REG_CTRL,
+ ARCH_TIMER_REG_TVAL,
+};
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
+#define ARCH_TIMER_MEM_PHYS_ACCESS 2
+#define ARCH_TIMER_MEM_VIRT_ACCESS 3
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
-extern u64 arch_timer_read_counter(void);
+extern u64 (*arch_timer_read_counter)(void);
extern struct timecounter *arch_timer_get_timecounter(void);
#else
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 12083dc862a..290734191f7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -45,7 +45,6 @@
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
-#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/file.h>
#include <linux/platform_device.h>
@@ -62,20 +61,18 @@
#endif
#include <asm/mman.h>
#include <asm/uaccess.h>
-#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
#include <linux/types.h>
#include <linux/agp_backend.h>
-#endif
#include <linux/workqueue.h>
#include <linux/poll.h>
#include <asm/pgalloc.h>
#include <drm/drm.h>
#include <drm/drm_sarea.h>
+#include <drm/drm_vma_manager.h>
#include <linux/idr.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
-#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
struct module;
@@ -140,19 +137,15 @@ int drm_err(const char *func, const char *format, ...);
/* driver capabilities and requirements mask */
#define DRIVER_USE_AGP 0x1
#define DRIVER_REQUIRE_AGP 0x2
-#define DRIVER_USE_MTRR 0x4
#define DRIVER_PCI_DMA 0x8
#define DRIVER_SG 0x10
#define DRIVER_HAVE_DMA 0x20
#define DRIVER_HAVE_IRQ 0x40
#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_IRQ_VBL 0x100
-#define DRIVER_DMA_QUEUE 0x200
-#define DRIVER_FB_DMA 0x400
-#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
#define DRIVER_PRIME 0x4000
+#define DRIVER_RENDER 0x8000
#define DRIVER_BUS_PCI 0x1
#define DRIVER_BUS_PLATFORM 0x2
@@ -168,13 +161,7 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */
#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */
-#define DRM_LOOPING_LIMIT 5000000
-#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */
-#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */
-
-#define DRM_FLAG_DEBUG 0x01
-#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
#define DRM_MAP_HASH_OFFSET 0x10000000
/*@}*/
@@ -263,9 +250,6 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x)
-#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1))
-#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x))
-
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
/**
@@ -307,6 +291,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
#define DRM_ROOT_ONLY 0x4
#define DRM_CONTROL_ALLOW 0x8
#define DRM_UNLOCKED 0x10
+#define DRM_RENDER_ALLOW 0x20
struct drm_ioctl_desc {
unsigned int cmd;
@@ -587,7 +572,6 @@ struct drm_map_list {
struct drm_local_map *map; /**< mapping */
uint64_t user_token;
struct drm_master *master;
- struct drm_mm_node *file_offset_node; /**< fake offset */
};
/**
@@ -622,8 +606,7 @@ struct drm_ati_pcigart_info {
* GEM specific mm private for tracking GEM objects
*/
struct drm_gem_mm {
- struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
- struct drm_open_hash offset_hash; /**< User token hash table for maps */
+ struct drm_vma_offset_manager vma_manager;
};
/**
@@ -634,8 +617,16 @@ struct drm_gem_object {
/** Reference count of this object */
struct kref refcount;
- /** Handle count of this object. Each handle also holds a reference */
- atomic_t handle_count; /* number of handles on this object */
+ /**
+ * handle_count - gem file_priv handle count of this object
+ *
+ * Each handle also holds a reference. Note that when the handle_count
+ * drops to 0 any global names (e.g. the id in the flink namespace) will
+ * be cleared.
+ *
+ * Protected by dev->object_name_lock.
+ * */
+ unsigned handle_count;
/** Related drm device */
struct drm_device *dev;
@@ -644,7 +635,7 @@ struct drm_gem_object {
struct file *filp;
/* Mapping info for this object */
- struct drm_map_list map_list;
+ struct drm_vma_offset_node vma_node;
/**
* Size of the object, in bytes. Immutable over the object's
@@ -678,10 +669,32 @@ struct drm_gem_object {
void *driver_private;
- /* dma buf exported from this GEM object */
- struct dma_buf *export_dma_buf;
+ /**
+ * dma_buf - dma buf associated with this GEM object
+ *
+ * Pointer to the dma-buf associated with this gem object (either
+ * through importing or exporting). We break the resulting reference
+ * loop when the last gem handle for this object is released.
+ *
+ * Protected by obj->object_name_lock
+ */
+ struct dma_buf *dma_buf;
- /* dma buf attachment backing this object */
+ /**
+ * import_attach - dma buf attachment backing this object
+ *
+ * Any foreign dma_buf imported as a gem object has this set to the
+ * attachment point for the device. This is invariant over the lifetime
+ * of a gem object.
+ *
+ * The driver's ->gem_free_object callback is responsible for cleaning
+ * up the dma_buf attachment and references acquired at import time.
+ *
+ * Note that the drm gem/prime core does not depend upon drivers setting
+ * this field any more. So for drivers where this doesn't make sense
+ * (e.g. virtual devices or a displaylink behind an usb bus) they can
+ * simply leave it as NULL.
+ */
struct dma_buf_attachment *import_attach;
};
@@ -737,6 +750,7 @@ struct drm_bus {
int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
/* hooks that are for PCI */
int (*agp_init)(struct drm_device *dev);
+ void (*agp_destroy)(struct drm_device *dev);
};
@@ -885,8 +899,6 @@ struct drm_driver {
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
- void (*set_version) (struct drm_device *dev,
- struct drm_set_version *sv);
/* Master routines */
int (*master_create)(struct drm_device *dev, struct drm_master *master);
@@ -966,7 +978,7 @@ struct drm_driver {
u32 driver_features;
int dev_priv_size;
- struct drm_ioctl_desc *ioctls;
+ const struct drm_ioctl_desc *ioctls;
int num_ioctls;
const struct file_operations *fops;
union {
@@ -1037,8 +1049,6 @@ struct drm_minor {
struct device kdev; /**< Linux device */
struct drm_device *dev;
- struct proc_dir_entry *proc_root; /**< proc directory entry */
- struct drm_info_node proc_nodes;
struct dentry *debugfs_root;
struct list_head debugfs_list;
@@ -1131,12 +1141,7 @@ struct drm_device {
/*@{ */
int irq_enabled; /**< True if irq handler is enabled */
__volatile__ long context_flag; /**< Context swapping flag */
- __volatile__ long interrupt_flag; /**< Interruption handler flag */
- __volatile__ long dma_flag; /**< DMA dispatch flag */
- wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */
- int last_checked; /**< Last context checked for DMA */
int last_context; /**< Last current context */
- unsigned long last_switch; /**< jiffies at last context switch */
/*@} */
struct work_struct work;
@@ -1174,12 +1179,6 @@ struct drm_device {
spinlock_t event_lock;
/*@} */
- cycles_t ctx_start;
- cycles_t lck_start;
-
- struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */
- wait_queue_head_t buf_readers; /**< Processes waiting to read */
- wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
struct drm_agp_head *agp; /**< AGP data */
@@ -1207,12 +1206,13 @@ struct drm_device {
unsigned int agp_buffer_token;
struct drm_minor *control; /**< Control node for card */
struct drm_minor *primary; /**< render type primary screen head */
+ struct drm_minor *render; /**< render node for card */
struct drm_mode_config mode_config; /**< Current mode config */
/** \name GEM information */
/*@{ */
- spinlock_t object_name_lock;
+ struct mutex object_name_lock;
struct idr object_name_idr;
/*@} */
int switch_power_state;
@@ -1223,6 +1223,7 @@ struct drm_device {
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
+#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@@ -1235,25 +1236,6 @@ static inline int drm_dev_to_irq(struct drm_device *dev)
return dev->driver->bus->get_irq(dev);
}
-
-#if __OS_HAS_AGP
-static inline int drm_core_has_AGP(struct drm_device *dev)
-{
- return drm_core_check_feature(dev, DRIVER_USE_AGP);
-}
-#else
-#define drm_core_has_AGP(dev) (0)
-#endif
-
-#if __OS_HAS_MTRR
-static inline int drm_core_has_MTRR(struct drm_device *dev)
-{
- return drm_core_check_feature(dev, DRIVER_USE_MTRR);
-}
-#else
-#define drm_core_has_MTRR(dev) (0)
-#endif
-
static inline void drm_device_set_unplugged(struct drm_device *dev)
{
smp_wmb();
@@ -1272,6 +1254,11 @@ static inline bool drm_modeset_is_locked(struct drm_device *dev)
return mutex_is_locked(&dev->mode_config.mutex);
}
+static inline bool drm_is_render_client(struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_RENDER;
+}
+
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
@@ -1287,7 +1274,6 @@ extern int drm_lastclose(struct drm_device *dev);
extern struct mutex drm_global_mutex;
extern int drm_open(struct inode *inode, struct file *filp);
extern int drm_stub_open(struct inode *inode, struct file *filp);
-extern int drm_fasync(int fd, struct file *filp, int on);
extern ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
extern int drm_release(struct inode *inode, struct file *filp);
@@ -1301,14 +1287,6 @@ extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Memory management support (drm_memory.h) */
#include <drm/drm_memory.h>
-extern void drm_free_agp(DRM_AGP_MEM * handle, int pages);
-extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
-extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
- struct page **pages,
- unsigned long num_pages,
- uint32_t gtt_offset,
- uint32_t type);
-extern int drm_unbind_agp(DRM_AGP_MEM * handle);
/* Misc. IOCTL support (drm_ioctl.h) */
extern int drm_irq_by_busid(struct drm_device *dev, void *data,
@@ -1335,8 +1313,6 @@ extern int drm_resctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_addctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int drm_modctx(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
extern int drm_getctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_switchctx(struct drm_device *dev, void *data,
@@ -1346,9 +1322,10 @@ extern int drm_newctx(struct drm_device *dev, void *data,
extern int drm_rmctx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int drm_ctxbitmap_init(struct drm_device *dev);
-extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
-extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+extern void drm_legacy_ctxbitmap_init(struct drm_device *dev);
+extern void drm_legacy_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_legacy_ctxbitmap_release(struct drm_device *dev,
+ struct drm_file *file_priv);
extern int drm_setsareactx(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1405,11 +1382,12 @@ extern int drm_freebufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int drm_mapbufs(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int drm_order(unsigned long size);
+extern int drm_dma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* DMA support (drm_dma.h) */
-extern int drm_dma_setup(struct drm_device *dev);
-extern void drm_dma_takedown(struct drm_device *dev);
+extern int drm_legacy_dma_setup(struct drm_device *dev);
+extern void drm_legacy_dma_takedown(struct drm_device *dev);
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
extern void drm_core_reclaim_buffers(struct drm_device *dev,
struct drm_file *filp);
@@ -1423,7 +1401,6 @@ extern int drm_irq_uninstall(struct drm_device *dev);
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
-extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
@@ -1465,31 +1442,8 @@ extern int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* AGP/GART support (drm_agpsupport.h) */
-extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-extern int drm_agp_acquire(struct drm_device *dev);
-extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_release(struct drm_device *dev);
-extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+
+#include <drm/drm_agpsupport.h>
/* Stub support (drm_stub.h) */
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1504,23 +1458,19 @@ extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern void drm_unplug_dev(struct drm_device *dev);
extern unsigned int drm_debug;
+extern unsigned int drm_rnodes;
extern unsigned int drm_vblank_offdelay;
extern unsigned int drm_timestamp_precision;
extern unsigned int drm_timestamp_monotonic;
extern struct class *drm_class;
-extern struct proc_dir_entry *drm_proc_root;
extern struct dentry *drm_debugfs_root;
extern struct idr drm_minors_idr;
extern struct drm_local_map *drm_getsarea(struct drm_device *dev);
- /* Proc support (drm_proc.h) */
-extern int drm_proc_init(struct drm_minor *minor, struct proc_dir_entry *root);
-extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root);
-
/* Debugfs support */
#if defined(CONFIG_DEBUG_FS)
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id,
@@ -1550,6 +1500,7 @@ extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
struct dma_buf *dma_buf);
extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1561,25 +1512,22 @@ extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **
extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages);
extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+int drm_gem_dumb_destroy(struct drm_file *file,
+ struct drm_device *dev,
+ uint32_t handle);
void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
-int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
-void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
-
-int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
-int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
- struct drm_gem_object **obj);
+void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
#if DRM_DEBUG_CODE
extern int drm_vma_info(struct seq_file *m, void *data);
#endif
/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(struct drm_sg_mem * entry);
-extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+extern void drm_legacy_sg_cleanup(struct drm_device *dev);
+extern int drm_sg_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
extern int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1613,9 +1561,8 @@ struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
-int drm_gem_private_object_init(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size);
-void drm_gem_object_handle_free(struct drm_gem_object *obj);
+void drm_gem_private_object_init(struct drm_device *dev,
+ struct drm_gem_object *obj, size_t size);
void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
@@ -1640,66 +1587,32 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{
- if (obj != NULL) {
+ if (obj && !atomic_add_unless(&obj->refcount.refcount, -1, 1)) {
struct drm_device *dev = obj->dev;
+
mutex_lock(&dev->struct_mutex);
- kref_put(&obj->refcount, drm_gem_object_free);
+ if (likely(atomic_dec_and_test(&obj->refcount.refcount)))
+ drm_gem_object_free(&obj->refcount);
mutex_unlock(&dev->struct_mutex);
}
}
+int drm_gem_handle_create_tail(struct drm_file *file_priv,
+ struct drm_gem_object *obj,
+ u32 *handlep);
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
-static inline void
-drm_gem_object_handle_reference(struct drm_gem_object *obj)
-{
- drm_gem_object_reference(obj);
- atomic_inc(&obj->handle_count);
-}
-
-static inline void
-drm_gem_object_handle_unreference(struct drm_gem_object *obj)
-{
- if (obj == NULL)
- return;
-
- if (atomic_read(&obj->handle_count) == 0)
- return;
- /*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
- if (atomic_dec_and_test(&obj->handle_count))
- drm_gem_object_handle_free(obj);
- drm_gem_object_unreference(obj);
-}
-
-static inline void
-drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
-{
- if (obj == NULL)
- return;
-
- if (atomic_read(&obj->handle_count) == 0)
- return;
-
- /*
- * Must bump handle count first as this may be the last
- * ref, in which case the object would disappear before we
- * checked for a name
- */
-
- if (atomic_dec_and_test(&obj->handle_count))
- drm_gem_object_handle_free(obj);
- drm_gem_object_unreference_unlocked(obj);
-}
void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
+int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev,
struct drm_file *filp,
@@ -1769,9 +1682,6 @@ extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
-extern int drm_get_platform_dev(struct platform_device *pdev,
- struct drm_driver *driver);
-
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
{
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
new file mode 100644
index 00000000000..a184eeee9c9
--- /dev/null
+++ b/include/drm/drm_agpsupport.h
@@ -0,0 +1,194 @@
+#ifndef _DRM_AGPSUPPORT_H_
+#define _DRM_AGPSUPPORT_H_
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/agp_backend.h>
+#include <drm/drmP.h>
+
+#if __OS_HAS_AGP
+
+void drm_free_agp(DRM_AGP_MEM * handle, int pages);
+int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
+int drm_unbind_agp(DRM_AGP_MEM * handle);
+DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type);
+
+struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+void drm_agp_destroy(struct drm_agp_head *agp);
+void drm_agp_clear(struct drm_device *dev);
+int drm_agp_acquire(struct drm_device *dev);
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_release(struct drm_device *dev);
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_USE_AGP);
+}
+
+#else /* __OS_HAS_AGP */
+
+static inline void drm_free_agp(DRM_AGP_MEM * handle, int pages)
+{
+}
+
+static inline int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
+{
+ return -ENODEV;
+}
+
+static inline int drm_unbind_agp(DRM_AGP_MEM * handle)
+{
+ return -ENODEV;
+}
+
+static inline DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev,
+ struct page **pages,
+ unsigned long num_pages,
+ uint32_t gtt_offset,
+ uint32_t type)
+{
+ return NULL;
+}
+
+static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+{
+ return NULL;
+}
+
+static inline void drm_agp_destroy(struct drm_agp_head *agp)
+{
+}
+
+static inline void drm_agp_clear(struct drm_device *dev)
+{
+}
+
+static inline int drm_agp_acquire(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_release(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_enable(struct drm_device *dev,
+ struct drm_agp_mode mode)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_info(struct drm_device *dev,
+ struct drm_agp_info *info)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_alloc(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_free(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_unbind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_bind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return -ENODEV;
+}
+
+static inline int drm_core_has_AGP(struct drm_device *dev)
+{
+ return 0;
+}
+
+#endif /* __OS_HAS_AGP */
+
+#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index fa12a2fa429..24f499569a2 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -49,6 +49,7 @@ struct drm_clip_rect;
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
+#define DRM_MODE_OBJECT_BRIDGE 0xbdbdbdbd
struct drm_mode_object {
uint32_t id;
@@ -305,6 +306,7 @@ struct drm_connector;
struct drm_encoder;
struct drm_pending_vblank_event;
struct drm_plane;
+struct drm_bridge;
/**
* drm_crtc_funcs - control CRTCs for a given device
@@ -363,7 +365,8 @@ struct drm_crtc_funcs {
*/
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event);
+ struct drm_pending_vblank_event *event,
+ uint32_t flags);
int (*set_property)(struct drm_crtc *crtc,
struct drm_property *property, uint64_t val);
@@ -494,8 +497,6 @@ struct drm_encoder_funcs {
void (*destroy)(struct drm_encoder *encoder);
};
-#define DRM_CONNECTOR_MAX_UMODES 16
-#define DRM_CONNECTOR_LEN 32
#define DRM_CONNECTOR_MAX_ENCODER 3
/**
@@ -507,6 +508,7 @@ struct drm_encoder_funcs {
* @possible_crtcs: bitmask of potential CRTC bindings
* @possible_clones: bitmask of potential sibling encoders for cloning
* @crtc: currently bound CRTC
+ * @bridge: bridge associated to the encoder
* @funcs: control functions
* @helper_private: mid-layer private data
*
@@ -523,6 +525,7 @@ struct drm_encoder {
uint32_t possible_clones;
struct drm_crtc *crtc;
+ struct drm_bridge *bridge;
const struct drm_encoder_funcs *funcs;
void *helper_private;
};
@@ -683,6 +686,48 @@ struct drm_plane {
};
/**
+ * drm_bridge_funcs - drm_bridge control functions
+ * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
+ * @disable: Called right before encoder prepare, disables the bridge
+ * @post_disable: Called right after encoder prepare, for lockstepped disable
+ * @mode_set: Set this mode to the bridge
+ * @pre_enable: Called right before encoder commit, for lockstepped commit
+ * @enable: Called right after encoder commit, enables the bridge
+ * @destroy: make object go away
+ */
+struct drm_bridge_funcs {
+ bool (*mode_fixup)(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*disable)(struct drm_bridge *bridge);
+ void (*post_disable)(struct drm_bridge *bridge);
+ void (*mode_set)(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*pre_enable)(struct drm_bridge *bridge);
+ void (*enable)(struct drm_bridge *bridge);
+ void (*destroy)(struct drm_bridge *bridge);
+};
+
+/**
+ * drm_bridge - central DRM bridge control structure
+ * @dev: DRM device this bridge belongs to
+ * @head: list management
+ * @base: base mode object
+ * @funcs: control functions
+ * @driver_private: pointer to the bridge driver's internal context
+ */
+struct drm_bridge {
+ struct drm_device *dev;
+ struct list_head head;
+
+ struct drm_mode_object base;
+
+ const struct drm_bridge_funcs *funcs;
+ void *driver_private;
+};
+
+/**
* drm_mode_set - new values for a CRTC config change
* @head: list management
* @fb: framebuffer to use for new config
@@ -742,6 +787,7 @@ struct drm_mode_group {
uint32_t num_crtcs;
uint32_t num_encoders;
uint32_t num_connectors;
+ uint32_t num_bridges;
/* list of object IDs for this group */
uint32_t *id_list;
@@ -756,6 +802,8 @@ struct drm_mode_group {
* @fb_list: list of framebuffers available
* @num_connector: number of connectors on this device
* @connector_list: list of connector objects
+ * @num_bridge: number of bridges on this device
+ * @bridge_list: list of bridge objects
* @num_encoder: number of encoders on this device
* @encoder_list: list of encoder objects
* @num_crtc: number of CRTCs on this device
@@ -793,6 +841,8 @@ struct drm_mode_config {
int num_connector;
struct list_head connector_list;
+ int num_bridge;
+ struct list_head bridge_list;
int num_encoder;
struct list_head encoder_list;
int num_plane;
@@ -839,11 +889,13 @@ struct drm_mode_config {
/* Optional properties */
struct drm_property *scaling_mode_property;
- struct drm_property *dithering_mode_property;
struct drm_property *dirty_info_property;
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
+
+ /* whether async page flip is supported or not */
+ bool async_page_flip;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
@@ -869,6 +921,8 @@ extern int drm_crtc_init(struct drm_device *dev,
const struct drm_crtc_funcs *funcs);
extern void drm_crtc_cleanup(struct drm_crtc *crtc);
+extern void drm_connector_ida_init(void);
+extern void drm_connector_ida_destroy(void);
extern int drm_connector_init(struct drm_device *dev,
struct drm_connector *connector,
const struct drm_connector_funcs *funcs,
@@ -878,6 +932,10 @@ extern void drm_connector_cleanup(struct drm_connector *connector);
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
+extern int drm_bridge_init(struct drm_device *dev, struct drm_bridge *bridge,
+ const struct drm_bridge_funcs *funcs);
+extern void drm_bridge_cleanup(struct drm_bridge *bridge);
+
extern int drm_encoder_init(struct drm_device *dev,
struct drm_encoder *encoder,
const struct drm_encoder_funcs *funcs,
@@ -908,7 +966,6 @@ extern struct edid *drm_get_edid(struct drm_connector *connector,
struct i2c_adapter *adapter);
extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
-extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_copy(struct drm_display_mode *dst, const struct drm_display_mode *src);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
const struct drm_display_mode *mode);
@@ -925,14 +982,9 @@ extern int drm_mode_height(const struct drm_display_mode *mode);
/* for us by fb module */
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev);
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode);
-extern void drm_mode_list_concat(struct list_head *head,
- struct list_head *new);
extern void drm_mode_validate_size(struct drm_device *dev,
struct list_head *mode_list,
int maxX, int maxY, int maxPitch);
-extern void drm_mode_validate_clocks(struct drm_device *dev,
- struct list_head *mode_list,
- int *min, int *max, int n_ranges);
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
@@ -949,9 +1001,6 @@ extern int drm_object_property_set_value(struct drm_mode_object *obj,
extern int drm_object_property_get_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *value);
-extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev);
-extern void drm_framebuffer_set_object(struct drm_device *dev,
- unsigned long handle);
extern int drm_framebuffer_init(struct drm_device *dev,
struct drm_framebuffer *fb,
const struct drm_framebuffer_funcs *funcs);
@@ -962,10 +1011,6 @@ extern void drm_framebuffer_reference(struct drm_framebuffer *fb);
extern void drm_framebuffer_remove(struct drm_framebuffer *fb);
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
extern void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
-extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
-extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
-extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY);
-extern bool drm_crtc_in_use(struct drm_crtc *crtc);
extern void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
@@ -990,7 +1035,6 @@ extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats,
char *formats[]);
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
-extern int drm_mode_create_dithering_property(struct drm_device *dev);
extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
extern const char *drm_get_encoder_name(const struct drm_encoder *encoder);
@@ -1040,17 +1084,12 @@ extern int drm_mode_getblob_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-extern int drm_mode_hotplug_ioctl(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
-extern int drm_mode_replacefb(struct drm_device *dev,
- void *data, struct drm_file *file_priv);
extern int drm_mode_getencoder(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
-extern u8 *drm_find_cea_extension(struct edid *edid);
extern u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index e8e1417af3d..ae8dbfb1207 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -342,13 +342,42 @@ u8 drm_dp_get_adjust_request_voltage(u8 link_status[DP_LINK_STATUS_SIZE],
u8 drm_dp_get_adjust_request_pre_emphasis(u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
-#define DP_RECEIVER_CAP_SIZE 0xf
+#define DP_RECEIVER_CAP_SIZE 0xf
+#define EDP_PSR_RECEIVER_CAP_SIZE 2
+
void drm_dp_link_train_clock_recovery_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_link_train_channel_eq_delay(u8 dpcd[DP_RECEIVER_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
+struct edp_sdp_header {
+ u8 HB0; /* Secondary Data Packet ID */
+ u8 HB1; /* Secondary Data Packet Type */
+ u8 HB2; /* 7:5 reserved, 4:0 revision number */
+ u8 HB3; /* 7:5 reserved, 4:0 number of valid data bytes */
+} __packed;
+
+#define EDP_SDP_HEADER_REVISION_MASK 0x1F
+#define EDP_SDP_HEADER_VALID_PAYLOAD_BYTES 0x1F
+
+struct edp_vsc_psr {
+ struct edp_sdp_header sdp_header;
+ u8 DB0; /* Stereo Interface */
+ u8 DB1; /* 0 - PSR State; 1 - Update RFB; 2 - CRC Valid */
+ u8 DB2; /* CRC value bits 7:0 of the R or Cr component */
+ u8 DB3; /* CRC value bits 15:8 of the R or Cr component */
+ u8 DB4; /* CRC value bits 7:0 of the G or Y component */
+ u8 DB5; /* CRC value bits 15:8 of the G or Y component */
+ u8 DB6; /* CRC value bits 7:0 of the B or Cb component */
+ u8 DB7; /* CRC value bits 15:8 of the B or Cb component */
+ u8 DB8_31[24]; /* Reserved */
+} __packed;
+
+#define EDP_VSC_PSR_STATE_ACTIVE (1<<0)
+#define EDP_VSC_PSR_UPDATE_RFB (1<<1)
+#define EDP_VSC_PSR_CRC_VALUES_VALID (1<<2)
+
static inline int
drm_dp_max_link_rate(u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index fc481fc1708..a1441c5ac63 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -256,9 +256,11 @@ struct drm_encoder;
struct drm_connector;
struct drm_display_mode;
struct hdmi_avi_infoframe;
+struct hdmi_vendor_infoframe;
void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid);
int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads);
+int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb);
int drm_av_sync_delay(struct drm_connector *connector,
struct drm_display_mode *mode);
struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
@@ -268,5 +270,8 @@ int drm_load_edid_firmware(struct drm_connector *connector);
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode);
+int
+drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
+ const struct drm_display_mode *mode);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4a3fc244301..c54cf3d4a03 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -24,7 +24,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
unsigned int plane);
#ifdef CONFIG_DEBUG_FS
-void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m);
int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg);
#endif
diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
index f5e1168c764..d639049a613 100644
--- a/include/drm/drm_fixed.h
+++ b/include/drm/drm_fixed.h
@@ -84,12 +84,12 @@ static inline int drm_fixp2int(int64_t a)
return ((s64)a) >> DRM_FIXED_POINT;
}
-static inline s64 drm_fixp_msbset(int64_t a)
+static inline unsigned drm_fixp_msbset(int64_t a)
{
unsigned shift, sign = (a >> 63) & 1;
for (shift = 62; shift > 0; --shift)
- if ((a >> shift) != sign)
+ if (((a >> shift) & 1) != sign)
return shift;
return 0;
@@ -100,9 +100,9 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
unsigned shift = drm_fixp_msbset(a) + drm_fixp_msbset(b);
s64 result;
- if (shift > 63) {
- shift = shift - 63;
- a >>= shift >> 1;
+ if (shift > 61) {
+ shift = shift - 61;
+ a >>= (shift >> 1) + (shift & 1);
b >>= shift >> 1;
} else
shift = 0;
@@ -120,7 +120,7 @@ static inline s64 drm_fixp_mul(s64 a, s64 b)
static inline s64 drm_fixp_div(s64 a, s64 b)
{
- unsigned shift = 63 - drm_fixp_msbset(a);
+ unsigned shift = 62 - drm_fixp_msbset(a);
s64 result;
a <<= shift;
@@ -154,7 +154,7 @@ static inline s64 drm_fixp_exp(s64 x)
}
if (x < 0)
- sum = drm_fixp_div(1, sum);
+ sum = drm_fixp_div(DRM_FIXED_ONE, sum);
return sum;
}
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
new file mode 100644
index 00000000000..35c776ae7d3
--- /dev/null
+++ b/include/drm/drm_flip_work.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef DRM_FLIP_WORK_H
+#define DRM_FLIP_WORK_H
+
+#include <linux/kfifo.h>
+#include <linux/workqueue.h>
+
+/**
+ * DOC: flip utils
+ *
+ * Util to queue up work to run from work-queue context after flip/vblank.
+ * Typically this can be used to defer unref of framebuffer's, cursor
+ * bo's, etc until after vblank. The APIs are all safe (and lockless)
+ * for up to one producer and once consumer at a time. The single-consumer
+ * aspect is ensured by committing the queued work to a single work-queue.
+ */
+
+struct drm_flip_work;
+
+/*
+ * drm_flip_func_t - callback function
+ *
+ * @work: the flip work
+ * @val: value queued via drm_flip_work_queue()
+ *
+ * Callback function to be called for each of the queue'd work items after
+ * drm_flip_work_commit() is called.
+ */
+typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
+
+/**
+ * struct drm_flip_work - flip work queue
+ * @name: debug name
+ * @pending: number of queued but not committed items
+ * @count: number of committed items
+ * @func: callback fxn called for each committed item
+ * @worker: worker which calls @func
+ */
+struct drm_flip_work {
+ const char *name;
+ atomic_t pending, count;
+ drm_flip_func_t func;
+ struct work_struct worker;
+ DECLARE_KFIFO_PTR(fifo, void *);
+};
+
+void drm_flip_work_queue(struct drm_flip_work *work, void *val);
+void drm_flip_work_commit(struct drm_flip_work *work,
+ struct workqueue_struct *wq);
+int drm_flip_work_init(struct drm_flip_work *work, int size,
+ const char *name, drm_flip_func_t func);
+void drm_flip_work_cleanup(struct drm_flip_work *work);
+
+#endif /* DRM_FLIP_WORK_H */
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index c34f27f80bc..89b4d7db1eb 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -30,14 +30,6 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
/* set vm_flags and we can change the vm attribute to other one at here. */
int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
-/*
- * destroy memory region allocated.
- * - a gem handle and physical memory region pointed by a gem object
- * would be released by drm_gem_handle_delete().
- */
-int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *drm, unsigned int handle);
-
/* allocate physical memory. */
struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
unsigned int size);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 4d06edb56d5..cba67865d18 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -36,11 +36,19 @@
/*
* Generic range manager structs
*/
+#include <linux/bug.h>
+#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/spinlock.h>
#ifdef CONFIG_DEBUG_FS
#include <linux/seq_file.h>
#endif
+enum drm_mm_search_flags {
+ DRM_MM_SEARCH_DEFAULT = 0,
+ DRM_MM_SEARCH_BEST = 1 << 0,
+};
+
struct drm_mm_node {
struct list_head node_list;
struct list_head hole_stack;
@@ -62,9 +70,6 @@ struct drm_mm {
/* head_node.node_list is the list of all memory nodes, ordered
* according to the (increasing) start address of the memory node. */
struct drm_mm_node head_node;
- struct list_head unused_nodes;
- int num_unused;
- spinlock_t unused_lock;
unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_color;
@@ -115,13 +120,6 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \
&(mm)->head_node.node_list, \
node_list)
-#define drm_mm_for_each_scanned_node_reverse(entry, n, mm) \
- for (entry = (mm)->prev_scanned_node, \
- next = entry ? list_entry(entry->node_list.next, \
- struct drm_mm_node, node_list) : NULL; \
- entry != NULL; entry = next, \
- next = entry ? list_entry(entry->node_list.next, \
- struct drm_mm_node, node_list) : NULL) \
/* Note that we need to unroll list_for_each_entry in order to inline
* setting hole_start and hole_end on each iteration and keep the
@@ -138,124 +136,50 @@ static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
/*
* Basic range manager support (drm_mm.c)
*/
-extern struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
- unsigned long start,
- unsigned long size,
- bool atomic);
-extern struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- int atomic);
-extern struct drm_mm_node *drm_mm_get_block_range_generic(
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- int atomic);
-static inline struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment)
-{
- return drm_mm_get_block_generic(parent, size, alignment, 0, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment)
-{
- return drm_mm_get_block_generic(parent, size, alignment, 0, 1);
-}
-static inline struct drm_mm_node *drm_mm_get_block_range(
- struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end)
-{
- return drm_mm_get_block_range_generic(parent, size, alignment, 0,
- start, end, 0);
-}
-static inline struct drm_mm_node *drm_mm_get_block_atomic_range(
- struct drm_mm_node *parent,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end)
-{
- return drm_mm_get_block_range_generic(parent, size, alignment, 0,
- start, end, 1);
-}
+extern int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node);
-extern int drm_mm_insert_node(struct drm_mm *mm,
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment);
-extern int drm_mm_insert_node_in_range(struct drm_mm *mm,
- struct drm_mm_node *node,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end);
extern int drm_mm_insert_node_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
- unsigned long color);
+ unsigned long color,
+ enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ enum drm_mm_search_flags flags)
+{
+ return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags);
+}
+
extern int drm_mm_insert_node_in_range_generic(struct drm_mm *mm,
struct drm_mm_node *node,
unsigned long size,
unsigned alignment,
unsigned long color,
unsigned long start,
- unsigned long end);
-extern void drm_mm_put_block(struct drm_mm_node *cur);
-extern void drm_mm_remove_node(struct drm_mm_node *node);
-extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
-extern struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- bool best_match);
-extern struct drm_mm_node *drm_mm_search_free_in_range_generic(
- const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long color,
- unsigned long start,
- unsigned long end,
- bool best_match);
-static inline struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- bool best_match)
+ unsigned long end,
+ enum drm_mm_search_flags flags);
+static inline int drm_mm_insert_node_in_range(struct drm_mm *mm,
+ struct drm_mm_node *node,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end,
+ enum drm_mm_search_flags flags)
{
- return drm_mm_search_free_generic(mm,size, alignment, 0, best_match);
-}
-static inline struct drm_mm_node *drm_mm_search_free_in_range(
- const struct drm_mm *mm,
- unsigned long size,
- unsigned alignment,
- unsigned long start,
- unsigned long end,
- bool best_match)
-{
- return drm_mm_search_free_in_range_generic(mm, size, alignment, 0,
- start, end, best_match);
+ return drm_mm_insert_node_in_range_generic(mm, node, size, alignment,
+ 0, start, end, flags);
}
+extern void drm_mm_remove_node(struct drm_mm_node *node);
+extern void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new);
extern void drm_mm_init(struct drm_mm *mm,
unsigned long start,
unsigned long size);
extern void drm_mm_takedown(struct drm_mm *mm);
extern int drm_mm_clean(struct drm_mm *mm);
-extern int drm_mm_pre_get(struct drm_mm *mm);
-
-static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
-{
- return block->mm;
-}
void drm_mm_init_scan(struct drm_mm *mm,
unsigned long size,
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 34efaf64cc8..fd54a14a7c2 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,4 +1,22 @@
#define radeon_PCI_IDS \
+ {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -690,29 +708,6 @@
{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
{0, 0, 0}
-#define mach64_PCI_IDS \
- {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x474c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x474f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4752, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4753, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x474d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x474e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
#define sisdrv_PCI_IDS \
{0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0x1039, 0x5300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
@@ -752,10 +747,6 @@
{0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
-#define gamma_PCI_IDS \
- {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
- {0, 0, 0}
-
#define savage_PCI_IDS \
{0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
{0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
@@ -781,6 +772,3 @@
{0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
{0, 0, 0}
-
-#define ffb_PCI_IDS \
- {0, 0, 0}
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
new file mode 100644
index 00000000000..c18a593d174
--- /dev/null
+++ b/include/drm/drm_vma_manager.h
@@ -0,0 +1,257 @@
+#ifndef __DRM_VMA_MANAGER_H__
+#define __DRM_VMA_MANAGER_H__
+
+/*
+ * Copyright (c) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drm/drm_mm.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+struct drm_vma_offset_file {
+ struct rb_node vm_rb;
+ struct file *vm_filp;
+ unsigned long vm_count;
+};
+
+struct drm_vma_offset_node {
+ rwlock_t vm_lock;
+ struct drm_mm_node vm_node;
+ struct rb_node vm_rb;
+ struct rb_root vm_files;
+};
+
+struct drm_vma_offset_manager {
+ rwlock_t vm_lock;
+ struct rb_root vm_addr_space_rb;
+ struct drm_mm vm_addr_space_mm;
+};
+
+void drm_vma_offset_manager_init(struct drm_vma_offset_manager *mgr,
+ unsigned long page_offset, unsigned long size);
+void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr);
+
+struct drm_vma_offset_node *drm_vma_offset_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+struct drm_vma_offset_node *drm_vma_offset_lookup_locked(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages);
+int drm_vma_offset_add(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node, unsigned long pages);
+void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
+ struct drm_vma_offset_node *node);
+
+int drm_vma_node_allow(struct drm_vma_offset_node *node, struct file *filp);
+void drm_vma_node_revoke(struct drm_vma_offset_node *node, struct file *filp);
+bool drm_vma_node_is_allowed(struct drm_vma_offset_node *node,
+ struct file *filp);
+
+/**
+ * drm_vma_offset_exact_lookup() - Look up node by exact address
+ * @mgr: Manager object
+ * @start: Start address (page-based, not byte-based)
+ * @pages: Size of object (page-based)
+ *
+ * Same as drm_vma_offset_lookup() but does not allow any offset into the node.
+ * It only returns the exact object with the given start address.
+ *
+ * RETURNS:
+ * Node at exact start address @start.
+ */
+static inline struct drm_vma_offset_node *
+drm_vma_offset_exact_lookup(struct drm_vma_offset_manager *mgr,
+ unsigned long start,
+ unsigned long pages)
+{
+ struct drm_vma_offset_node *node;
+
+ node = drm_vma_offset_lookup(mgr, start, pages);
+ return (node && node->vm_node.start == start) ? node : NULL;
+}
+
+/**
+ * drm_vma_offset_lock_lookup() - Lock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Lock VMA manager for extended lookups. Only *_locked() VMA function calls
+ * are allowed while holding this lock. All other contexts are blocked from VMA
+ * until the lock is released via drm_vma_offset_unlock_lookup().
+ *
+ * Use this if you need to take a reference to the objects returned by
+ * drm_vma_offset_lookup_locked() before releasing this lock again.
+ *
+ * This lock must not be used for anything else than extended lookups. You must
+ * not call any other VMA helpers while holding this lock.
+ *
+ * Note: You're in atomic-context while holding this lock!
+ *
+ * Example:
+ * drm_vma_offset_lock_lookup(mgr);
+ * node = drm_vma_offset_lookup_locked(mgr);
+ * if (node)
+ * kref_get_unless_zero(container_of(node, sth, entr));
+ * drm_vma_offset_unlock_lookup(mgr);
+ */
+static inline void drm_vma_offset_lock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_lock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_offset_unlock_lookup() - Unlock lookup for extended private use
+ * @mgr: Manager object
+ *
+ * Release lookup-lock. See drm_vma_offset_lock_lookup() for more information.
+ */
+static inline void drm_vma_offset_unlock_lookup(struct drm_vma_offset_manager *mgr)
+{
+ read_unlock(&mgr->vm_lock);
+}
+
+/**
+ * drm_vma_node_reset() - Initialize or reset node object
+ * @node: Node to initialize or reset
+ *
+ * Reset a node to its initial state. This must be called before using it with
+ * any VMA offset manager.
+ *
+ * This must not be called on an already allocated node, or you will leak
+ * memory.
+ */
+static inline void drm_vma_node_reset(struct drm_vma_offset_node *node)
+{
+ memset(node, 0, sizeof(*node));
+ node->vm_files = RB_ROOT;
+ rwlock_init(&node->vm_lock);
+}
+
+/**
+ * drm_vma_node_start() - Return start address for page-based addressing
+ * @node: Node to inspect
+ *
+ * Return the start address of the given node. This can be used as offset into
+ * the linear VM space that is provided by the VMA offset manager. Note that
+ * this can only be used for page-based addressing. If you need a proper offset
+ * for user-space mappings, you must apply "<< PAGE_SHIFT" or use the
+ * drm_vma_node_offset_addr() helper instead.
+ *
+ * RETURNS:
+ * Start address of @node for page-based addressing. 0 if the node does not
+ * have an offset allocated.
+ */
+static inline unsigned long drm_vma_node_start(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.start;
+}
+
+/**
+ * drm_vma_node_size() - Return size (page-based)
+ * @node: Node to inspect
+ *
+ * Return the size as number of pages for the given node. This is the same size
+ * that was passed to drm_vma_offset_add(). If no offset is allocated for the
+ * node, this is 0.
+ *
+ * RETURNS:
+ * Size of @node as number of pages. 0 if the node does not have an offset
+ * allocated.
+ */
+static inline unsigned long drm_vma_node_size(struct drm_vma_offset_node *node)
+{
+ return node->vm_node.size;
+}
+
+/**
+ * drm_vma_node_has_offset() - Check whether node is added to offset manager
+ * @node: Node to be checked
+ *
+ * RETURNS:
+ * true iff the node was previously allocated an offset and added to
+ * an vma offset manager.
+ */
+static inline bool drm_vma_node_has_offset(struct drm_vma_offset_node *node)
+{
+ return drm_mm_node_allocated(&node->vm_node);
+}
+
+/**
+ * drm_vma_node_offset_addr() - Return sanitized offset for user-space mmaps
+ * @node: Linked offset node
+ *
+ * Same as drm_vma_node_start() but returns the address as a valid offset that
+ * can be used for user-space mappings during mmap().
+ * This must not be called on unlinked nodes.
+ *
+ * RETURNS:
+ * Offset of @node for byte-based addressing. 0 if the node does not have an
+ * object allocated.
+ */
+static inline __u64 drm_vma_node_offset_addr(struct drm_vma_offset_node *node)
+{
+ return ((__u64)node->vm_node.start) << PAGE_SHIFT;
+}
+
+/**
+ * drm_vma_node_unmap() - Unmap offset node
+ * @node: Offset node
+ * @file_mapping: Address space to unmap @node from
+ *
+ * Unmap all userspace mappings for a given offset node. The mappings must be
+ * associated with the @file_mapping address-space. If no offset exists or
+ * the address-space is invalid, nothing is done.
+ *
+ * This call is unlocked. The caller must guarantee that drm_vma_offset_remove()
+ * is not called on this node concurrently.
+ */
+static inline void drm_vma_node_unmap(struct drm_vma_offset_node *node,
+ struct address_space *file_mapping)
+{
+ if (file_mapping && drm_vma_node_has_offset(node))
+ unmap_mapping_range(file_mapping,
+ drm_vma_node_offset_addr(node),
+ drm_vma_node_size(node) << PAGE_SHIFT, 1);
+}
+
+/**
+ * drm_vma_node_verify_access() - Access verification helper for TTM
+ * @node: Offset node
+ * @filp: Open-file
+ *
+ * This checks whether @filp is granted access to @node. It is the same as
+ * drm_vma_node_is_allowed() but suitable as drop-in helper for TTM
+ * verify_access() callbacks.
+ *
+ * RETURNS:
+ * 0 if access is granted, -EACCES otherwise.
+ */
+static inline int drm_vma_node_verify_access(struct drm_vma_offset_node *node,
+ struct file *filp)
+{
+ return drm_vma_node_is_allowed(node, filp) ? 0 : -EACCES;
+}
+
+#endif /* __DRM_VMA_MANAGER_H__ */
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index d6aeaf3c6d6..cb65fa14acf 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -15,6 +15,7 @@
#define _EXYNOS_DRM_H_
#include <uapi/drm/exynos_drm.h>
+#include <video/videomode.h>
/**
* A structure for lcd panel information.
@@ -24,7 +25,7 @@
* @height_mm: physical size of lcd height.
*/
struct exynos_drm_panel_info {
- struct fb_videomode timing;
+ struct videomode vm;
u32 width_mm;
u32 height_mm;
};
diff --git a/include/drm/i2c/tda998x.h b/include/drm/i2c/tda998x.h
new file mode 100644
index 00000000000..3e419d92cf5
--- /dev/null
+++ b/include/drm/i2c/tda998x.h
@@ -0,0 +1,30 @@
+#ifndef __DRM_I2C_TDA998X_H__
+#define __DRM_I2C_TDA998X_H__
+
+struct tda998x_encoder_params {
+ u8 swap_b:3;
+ u8 mirr_b:1;
+ u8 swap_a:3;
+ u8 mirr_a:1;
+ u8 swap_d:3;
+ u8 mirr_d:1;
+ u8 swap_c:3;
+ u8 mirr_c:1;
+ u8 swap_f:3;
+ u8 mirr_f:1;
+ u8 swap_e:3;
+ u8 mirr_e:1;
+
+ u8 audio_cfg;
+ u8 audio_clk_cfg;
+ u8 audio_frame[6];
+
+ enum {
+ AFMT_SPDIF,
+ AFMT_I2S
+ } audio_format;
+
+ unsigned audio_sample_rate;
+};
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 8a6aa56ece5..751eaffbf0d 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -32,12 +32,12 @@
#define _TTM_BO_API_H_
#include <drm/drm_hashtab.h>
+#include <drm/drm_vma_manager.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/mm.h>
-#include <linux/rbtree.h>
#include <linux/bitmap.h>
#include <linux/reservation.h>
@@ -145,7 +145,6 @@ struct ttm_tt;
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @addr_space_offset: Address space offset.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is put on the delayed delete list.
@@ -166,8 +165,7 @@ struct ttm_tt;
* @swap: List head for swap LRU list.
* @sync_obj: Pointer to a synchronization object.
* @priv_flags: Flags describing buffer object internal state.
- * @vm_rb: Rb node for the vm rb tree.
- * @vm_node: Address space manager node.
+ * @vma_node: Address space manager node.
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
@@ -194,7 +192,6 @@ struct ttm_buffer_object {
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
unsigned long num_pages;
- uint64_t addr_space_offset;
size_t acc_size;
/**
@@ -238,13 +235,7 @@ struct ttm_buffer_object {
void *sync_obj;
unsigned long priv_flags;
- /**
- * Members protected by the bdev::vm_lock
- */
-
- struct rb_node vm_rb;
- struct drm_mm_node *vm_node;
-
+ struct drm_vma_offset_node vma_node;
/**
* Special members that are protected by the reserve lock
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 984fc2d571a..8639c85d61c 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -36,6 +36,7 @@
#include <ttm/ttm_placement.h>
#include <drm/drm_mm.h>
#include <drm/drm_global.h>
+#include <drm/drm_vma_manager.h>
#include <linux/workqueue.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
@@ -519,7 +520,7 @@ struct ttm_bo_global {
* @man: An array of mem_type_managers.
* @fence_lock: Protects the synchronizing members on *all* bos belonging
* to this device.
- * @addr_space_mm: Range manager for the device address space.
+ * @vma_manager: Address space manager
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @val_seq: Current validation sequence.
@@ -537,14 +538,13 @@ struct ttm_bo_device {
struct list_head device_list;
struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
- rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
spinlock_t fence_lock;
+
/*
- * Protected by the vm lock.
+ * Protected by internal locks.
*/
- struct rb_root addr_space_rb;
- struct drm_mm addr_space_mm;
+ struct drm_vma_offset_manager vma_manager;
/*
* Protected by the global:lru lock.
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index 15e997fa78f..4aa2b48cd15 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -158,6 +158,8 @@
#define VF610_CLK_GPU_SEL 145
#define VF610_CLK_GPU_EN 146
#define VF610_CLK_GPU2D 147
-#define VF610_CLK_END 148
+#define VF610_CLK_ENET0 148
+#define VF610_CLK_ENET1 149
+#define VF610_CLK_END 150
#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/pinctrl/am33xx.h b/include/dt-bindings/pinctrl/am33xx.h
index 469e0325e6f..2fbc804e1a4 100644
--- a/include/dt-bindings/pinctrl/am33xx.h
+++ b/include/dt-bindings/pinctrl/am33xx.h
@@ -5,7 +5,7 @@
#ifndef _DT_BINDINGS_PINCTRL_AM33XX_H
#define _DT_BINDINGS_PINCTRL_AM33XX_H
-#include <include/dt-bindings/pinctrl/omap.h>
+#include <dt-bindings/pinctrl/omap.h>
/* am33xx specific mux bit defines */
#undef PULL_ENA
diff --git a/include/dt-bindings/pwm/pwm.h b/include/dt-bindings/pwm/pwm.h
new file mode 100644
index 00000000000..96f49e82253
--- /dev/null
+++ b/include/dt-bindings/pwm/pwm.h
@@ -0,0 +1,14 @@
+/*
+ * This header provides constants for most PWM bindings.
+ *
+ * Most PWM bindings can include a flags cell as part of the PWM specifier.
+ * In most cases, the format of the flags cell uses the standard values
+ * defined in this header.
+ */
+
+#ifndef _DT_BINDINGS_PWM_PWM_H
+#define _DT_BINDINGS_PWM_PWM_H
+
+#define PWM_POLARITY_INVERTED (1 << 0)
+
+#endif
diff --git a/include/dt-bindings/sound/fsl-imx-audmux.h b/include/dt-bindings/sound/fsl-imx-audmux.h
new file mode 100644
index 00000000000..50b09e96f24
--- /dev/null
+++ b/include/dt-bindings/sound/fsl-imx-audmux.h
@@ -0,0 +1,56 @@
+#ifndef __DT_FSL_IMX_AUDMUX_H
+#define __DT_FSL_IMX_AUDMUX_H
+
+#define MX27_AUDMUX_HPCR1_SSI0 0
+#define MX27_AUDMUX_HPCR2_SSI1 1
+#define MX27_AUDMUX_HPCR3_SSI_PINS_4 2
+#define MX27_AUDMUX_PPCR1_SSI_PINS_1 3
+#define MX27_AUDMUX_PPCR2_SSI_PINS_2 4
+#define MX27_AUDMUX_PPCR3_SSI_PINS_3 5
+
+#define MX31_AUDMUX_PORT1_SSI0 0
+#define MX31_AUDMUX_PORT2_SSI1 1
+#define MX31_AUDMUX_PORT3_SSI_PINS_3 2
+#define MX31_AUDMUX_PORT4_SSI_PINS_4 3
+#define MX31_AUDMUX_PORT5_SSI_PINS_5 4
+#define MX31_AUDMUX_PORT6_SSI_PINS_6 5
+#define MX31_AUDMUX_PORT7_SSI_PINS_7 6
+
+#define MX51_AUDMUX_PORT1_SSI0 0
+#define MX51_AUDMUX_PORT2_SSI1 1
+#define MX51_AUDMUX_PORT3 2
+#define MX51_AUDMUX_PORT4 3
+#define MX51_AUDMUX_PORT5 4
+#define MX51_AUDMUX_PORT6 5
+#define MX51_AUDMUX_PORT7 6
+
+/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
+#define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff)
+#define IMX_AUDMUX_V1_PCR_INMEN (1 << 8)
+#define IMX_AUDMUX_V1_PCR_TXRXEN (1 << 10)
+#define IMX_AUDMUX_V1_PCR_SYN (1 << 12)
+#define IMX_AUDMUX_V1_PCR_RXDSEL(x) (((x) & 0x7) << 13)
+#define IMX_AUDMUX_V1_PCR_RFCSEL(x) (((x) & 0xf) << 20)
+#define IMX_AUDMUX_V1_PCR_RCLKDIR (1 << 24)
+#define IMX_AUDMUX_V1_PCR_RFSDIR (1 << 25)
+#define IMX_AUDMUX_V1_PCR_TFCSEL(x) (((x) & 0xf) << 26)
+#define IMX_AUDMUX_V1_PCR_TCLKDIR (1 << 30)
+#define IMX_AUDMUX_V1_PCR_TFSDIR (1 << 31)
+
+/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */
+#define IMX_AUDMUX_V2_PTCR_TFSDIR (1 << 31)
+#define IMX_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27)
+#define IMX_AUDMUX_V2_PTCR_TCLKDIR (1 << 26)
+#define IMX_AUDMUX_V2_PTCR_TCSEL(x) (((x) & 0xf) << 22)
+#define IMX_AUDMUX_V2_PTCR_RFSDIR (1 << 21)
+#define IMX_AUDMUX_V2_PTCR_RFSEL(x) (((x) & 0xf) << 17)
+#define IMX_AUDMUX_V2_PTCR_RCLKDIR (1 << 16)
+#define IMX_AUDMUX_V2_PTCR_RCSEL(x) (((x) & 0xf) << 12)
+#define IMX_AUDMUX_V2_PTCR_SYN (1 << 11)
+
+#define IMX_AUDMUX_V2_PDCR_RXDSEL(x) (((x) & 0x7) << 13)
+#define IMX_AUDMUX_V2_PDCR_TXRXEN (1 << 12)
+#define IMX_AUDMUX_V2_PDCR_MODE(x) (((x) & 0x3) << 8)
+#define IMX_AUDMUX_V2_PDCR_INMMASK(x) ((x) & 0xff)
+
+#endif /* __DT_FSL_IMX_AUDMUX_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 343744e4809..7e2d15837b0 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -26,7 +26,7 @@
#include <linux/types.h>
#include <linux/irqchip/arm-gic.h>
-#define VGIC_NR_IRQS 128
+#define VGIC_NR_IRQS 256
#define VGIC_NR_SGIS 16
#define VGIC_NR_PPIS 16
#define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 6ad72f92469..a5db4aeefa3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -191,7 +191,6 @@ extern bool wmi_has_guid(const char *guid);
#define ACPI_VIDEO_BACKLIGHT_DMI_VIDEO 0x0200
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR 0x0400
#define ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO 0x0800
-#define ACPI_VIDEO_SKIP_BACKLIGHT 0x1000
#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
@@ -482,6 +481,13 @@ void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
acpi_status acpi_os_prepare_sleep(u8 sleep_state,
u32 pm1a_control, u32 pm1b_control);
+
+void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
+ u32 val_a, u32 val_b));
+
+acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b);
+
#ifdef CONFIG_X86
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
diff --git a/include/linux/ata.h b/include/linux/ata.h
index ee0bd952405..bf4c69ca76d 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -239,6 +239,8 @@ enum {
ATA_CMD_WRITE_QUEUED_FUA_EXT = 0x3E,
ATA_CMD_FPDMA_READ = 0x60,
ATA_CMD_FPDMA_WRITE = 0x61,
+ ATA_CMD_FPDMA_SEND = 0x64,
+ ATA_CMD_FPDMA_RECV = 0x65,
ATA_CMD_PIO_READ = 0x20,
ATA_CMD_PIO_READ_EXT = 0x24,
ATA_CMD_PIO_WRITE = 0x30,
@@ -293,8 +295,13 @@ enum {
/* marked obsolete in the ATA/ATAPI-7 spec */
ATA_CMD_RESTORE = 0x10,
+ /* Subcmds for ATA_CMD_FPDMA_SEND */
+ ATA_SUBCMD_FPDMA_SEND_DSM = 0x00,
+ ATA_SUBCMD_FPDMA_SEND_WR_LOG_DMA_EXT = 0x02,
+
/* READ_LOG_EXT pages */
ATA_LOG_SATA_NCQ = 0x10,
+ ATA_LOG_NCQ_SEND_RECV = 0x13,
ATA_LOG_SATA_ID_DEV_DATA = 0x30,
ATA_LOG_SATA_SETTINGS = 0x08,
ATA_LOG_DEVSLP_OFFSET = 0x30,
@@ -305,6 +312,15 @@ enum {
ATA_LOG_DEVSLP_VALID = 0x07,
ATA_LOG_DEVSLP_VALID_MASK = 0x80,
+ /* NCQ send and receive log */
+ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_OFFSET = 0x00,
+ ATA_LOG_NCQ_SEND_RECV_SUBCMDS_DSM = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET = 0x04,
+ ATA_LOG_NCQ_SEND_RECV_DSM_TRIM = (1 << 0),
+ ATA_LOG_NCQ_SEND_RECV_RD_LOG_OFFSET = 0x08,
+ ATA_LOG_NCQ_SEND_RECV_WR_LOG_OFFSET = 0x0C,
+ ATA_LOG_NCQ_SEND_RECV_SIZE = 0x10,
+
/* READ/WRITE LONG (obsolete) */
ATA_CMD_READ_LONG = 0x22,
ATA_CMD_READ_LONG_ONCE = 0x23,
@@ -446,22 +462,6 @@ enum {
SERR_TRANS_ST_ERROR = (1 << 24), /* Transport state trans. error */
SERR_UNRECOG_FIS = (1 << 25), /* Unrecognized FIS */
SERR_DEV_XCHG = (1 << 26), /* device exchanged */
-
- /* struct ata_taskfile flags */
- ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
- ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
- ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
- ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
- ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
- ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
- ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
-
- /* protocol flags */
- ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
- ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
- ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
- ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
- ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
};
enum ata_tf_protocols {
@@ -488,83 +488,6 @@ struct ata_bmdma_prd {
__le32 flags_len;
};
-struct ata_taskfile {
- unsigned long flags; /* ATA_TFLAG_xxx */
- u8 protocol; /* ATA_PROT_xxx */
-
- u8 ctl; /* control reg */
-
- u8 hob_feature; /* additional data */
- u8 hob_nsect; /* to support LBA48 */
- u8 hob_lbal;
- u8 hob_lbam;
- u8 hob_lbah;
-
- u8 feature;
- u8 nsect;
- u8 lbal;
- u8 lbam;
- u8 lbah;
-
- u8 device;
-
- u8 command; /* IO operation */
-};
-
-/*
- * protocol tests
- */
-static inline unsigned int ata_prot_flags(u8 prot)
-{
- switch (prot) {
- case ATA_PROT_NODATA:
- return 0;
- case ATA_PROT_PIO:
- return ATA_PROT_FLAG_PIO;
- case ATA_PROT_DMA:
- return ATA_PROT_FLAG_DMA;
- case ATA_PROT_NCQ:
- return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
- case ATAPI_PROT_NODATA:
- return ATA_PROT_FLAG_ATAPI;
- case ATAPI_PROT_PIO:
- return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
- case ATAPI_PROT_DMA:
- return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
- }
- return 0;
-}
-
-static inline int ata_is_atapi(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
-}
-
-static inline int ata_is_nodata(u8 prot)
-{
- return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
-}
-
-static inline int ata_is_pio(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
-}
-
-static inline int ata_is_dma(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
-}
-
-static inline int ata_is_ncq(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
-}
-
-static inline int ata_is_data(u8 prot)
-{
- return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
-}
-
/*
* id tests
*/
@@ -865,6 +788,11 @@ static inline int ata_id_rotation_rate(const u16 *id)
return val;
}
+static inline bool ata_id_has_ncq_send_and_recv(const u16 *id)
+{
+ return id[ATA_ID_SATA_CAPABILITY_2] & BIT(6);
+}
+
static inline bool ata_id_has_trim(const u16 *id)
{
if (ata_id_major_version(id) >= 7 &&
@@ -1060,15 +988,6 @@ static inline unsigned ata_set_lba_range_entries(void *_buffer,
return used_bytes;
}
-static inline int is_multi_taskfile(struct ata_taskfile *tf)
-{
- return (tf->command == ATA_CMD_READ_MULTI) ||
- (tf->command == ATA_CMD_WRITE_MULTI) ||
- (tf->command == ATA_CMD_READ_MULTI_EXT) ||
- (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
- (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
-}
-
static inline bool ata_ok(u8 status)
{
return ((status & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | ATA_ERR))
diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h
index deb0ae58b99..66a0e5384ed 100644
--- a/include/linux/atmel-ssc.h
+++ b/include/linux/atmel-ssc.h
@@ -11,7 +11,7 @@ struct atmel_ssc_platform_data {
struct ssc_device {
struct list_head list;
- resource_size_t phybase;
+ dma_addr_t phybase;
void __iomem *regs;
struct platform_device *pdev;
struct atmel_ssc_platform_data *pdata;
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
index fd6833764d7..be201ca2990 100644
--- a/include/linux/atmel_serial.h
+++ b/include/linux/atmel_serial.h
@@ -124,4 +124,6 @@
#define ATMEL_US_NER 0x44 /* Number of Errors Register */
#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
+#define ATMEL_US_NAME 0xf0 /* Ip Name */
+
#endif
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index 622fc505d3e..4d043c30216 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -72,7 +72,19 @@ struct bcma_host_ops {
/* Core-ID values. */
#define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */
#define BCMA_CORE_4706_CHIPCOMMON 0x500
+#define BCMA_CORE_PCIEG2 0x501
+#define BCMA_CORE_DMA 0x502
+#define BCMA_CORE_SDIO3 0x503
+#define BCMA_CORE_USB20 0x504
+#define BCMA_CORE_USB30 0x505
+#define BCMA_CORE_A9JTAG 0x506
+#define BCMA_CORE_DDR23 0x507
+#define BCMA_CORE_ROM 0x508
+#define BCMA_CORE_NAND 0x509
+#define BCMA_CORE_QSPI 0x50A
+#define BCMA_CORE_CHIPCOMMON_B 0x50B
#define BCMA_CORE_4706_SOC_RAM 0x50E
+#define BCMA_CORE_ARMCA9 0x510
#define BCMA_CORE_4706_MAC_GBIT 0x52D
#define BCMA_CORE_AMEMC 0x52E /* DDR1/2 memory controller core */
#define BCMA_CORE_ALTA 0x534 /* I2S core */
@@ -177,6 +189,11 @@ struct bcma_host_ops {
#define BCMA_PKG_ID_BCM5357 11
#define BCMA_CHIP_ID_BCM53572 53572
#define BCMA_PKG_ID_BCM47188 9
+#define BCMA_CHIP_ID_BCM4707 53010
+#define BCMA_PKG_ID_BCM4707 1
+#define BCMA_PKG_ID_BCM4708 2
+#define BCMA_PKG_ID_BCM4709 0
+#define BCMA_CHIP_ID_BCM53018 53018
/* Board types (on PCI usually equals to the subsystem dev id) */
/* BCM4313 */
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 424760f01b9..d66033f418c 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -181,10 +181,31 @@ struct pci_dev;
#define BCMA_CORE_PCI_CFG_DEVCTRL 0xd8
+#define BCMA_CORE_PCI_
+
+/* MDIO devices (SERDES modules) */
+#define BCMA_CORE_PCI_MDIO_IEEE0 0x000
+#define BCMA_CORE_PCI_MDIO_IEEE1 0x001
+#define BCMA_CORE_PCI_MDIO_BLK0 0x800
+#define BCMA_CORE_PCI_MDIO_BLK1 0x801
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT0 0x16
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT1 0x17
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT2 0x18
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT3 0x19
+#define BCMA_CORE_PCI_MDIO_BLK1_MGMT4 0x1A
+#define BCMA_CORE_PCI_MDIO_BLK2 0x802
+#define BCMA_CORE_PCI_MDIO_BLK3 0x803
+#define BCMA_CORE_PCI_MDIO_BLK4 0x804
+#define BCMA_CORE_PCI_MDIO_TXPLL 0x808 /* TXPLL register block idx */
+#define BCMA_CORE_PCI_MDIO_TXCTRL0 0x820
+#define BCMA_CORE_PCI_MDIO_SERDESID 0x831
+#define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840
+
/* PCIE Root Capability Register bits (Host mode only) */
#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001
struct bcma_drv_pci;
+struct bcma_bus;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
struct bcma_drv_pci_host {
@@ -219,7 +240,8 @@ struct bcma_drv_pci {
extern void bcma_core_pci_init(struct bcma_drv_pci *pc);
extern int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc,
struct bcma_device *core, bool enable);
-extern void bcma_core_pci_extend_L1timer(struct bcma_drv_pci *pc, bool extend);
+extern void bcma_core_pci_up(struct bcma_bus *bus);
+extern void bcma_core_pci_down(struct bcma_bus *bus);
extern int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev);
extern int bcma_core_pci_plat_dev_init(struct pci_dev *dev);
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 91fa9a94ae9..d77797a52b7 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -36,6 +36,7 @@ enum bh_state_bits {
BH_Quiet, /* Buffer Error Prinks to be quiet */
BH_Meta, /* Buffer contains metadata */
BH_Prio, /* Buffer should be submitted with REQ_PRIO */
+ BH_Defer_Completion, /* Defer AIO completion to workqueue */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
@@ -128,6 +129,7 @@ BUFFER_FNS(Write_EIO, write_io_error)
BUFFER_FNS(Unwritten, unwritten)
BUFFER_FNS(Meta, meta)
BUFFER_FNS(Prio, prio)
+BUFFER_FNS(Defer_Completion, defer_completion)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 089fe43211a..dc029dba7a0 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -9,26 +9,13 @@
#include <linux/spi/spi.h>
-/**
+/*
* struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
* @oscillator_frequency: - oscillator frequency in Hz
- * @irq_flags: - IRQF configuration flags
- * @board_specific_setup: - called before probing the chip (power,reset)
- * @transceiver_enable: - called to power on/off the transceiver
- * @power_enable: - called to power on/off the mcp *and* the
- * transceiver
- *
- * Please note that you should define power_enable or transceiver_enable or
- * none of them. Defining both of them is no use.
- *
*/
struct mcp251x_platform_data {
unsigned long oscillator_frequency;
- unsigned long irq_flags;
- int (*board_specific_setup)(struct spi_device *spi);
- int (*transceiver_enable)(int enable);
- int (*power_enable) (int enable);
};
#endif /* __CAN_PLATFORM_MCP251X_H__ */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index e9ac882868c..3561d305b1e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -66,22 +66,25 @@ enum cgroup_subsys_id {
/* Per-subsystem/per-cgroup state maintained by the system. */
struct cgroup_subsys_state {
- /*
- * The cgroup that this subsystem is attached to. Useful
- * for subsystems that want to know about the cgroup
- * hierarchy structure
- */
+ /* the cgroup that this css is attached to */
struct cgroup *cgroup;
+ /* the cgroup subsystem that this css is attached to */
+ struct cgroup_subsys *ss;
+
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
+ /* the parent css */
+ struct cgroup_subsys_state *parent;
+
unsigned long flags;
/* ID for this css, if possible */
struct css_id __rcu *id;
- /* Used to put @cgroup->dentry on the last css_put() */
- struct work_struct dput_work;
+ /* percpu_ref killing and RCU release */
+ struct rcu_head rcu_head;
+ struct work_struct destroy_work;
};
/* bits in struct cgroup_subsys_state flags field */
@@ -161,7 +164,16 @@ struct cgroup_name {
struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
- int id; /* ida allocated in-hierarchy ID */
+ /*
+ * idr allocated in-hierarchy ID.
+ *
+ * The ID of the root cgroup is always 0, and a new cgroup
+ * will be assigned with a smallest available ID.
+ */
+ int id;
+
+ /* the number of attached css's */
+ int nr_css;
/*
* We link our 'sibling' struct into our parent's 'children'.
@@ -196,7 +208,7 @@ struct cgroup {
struct cgroup_name __rcu *name;
/* Private pointers for each registered subsystem */
- struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
struct cgroupfs_root *root;
@@ -220,10 +232,12 @@ struct cgroup {
struct list_head pidlists;
struct mutex pidlist_mutex;
+ /* dummy css with NULL ->ss, points back to this cgroup */
+ struct cgroup_subsys_state dummy_css;
+
/* For css percpu_ref killing and RCU-protected deletion */
struct rcu_head rcu_head;
struct work_struct destroy_work;
- atomic_t css_kill_cnt;
/* List of events which userspace want to receive */
struct list_head event_list;
@@ -322,7 +336,7 @@ struct cgroupfs_root {
unsigned long flags;
/* IDs for cgroups in this hierarchy */
- struct ida cgroup_ida;
+ struct idr cgroup_idr;
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
@@ -394,9 +408,10 @@ struct cgroup_map_cb {
/* cftype->flags */
enum {
- CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cg */
- CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cg */
+ CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */
+ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
+ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
};
#define MAX_CFTYPE_NAME 64
@@ -424,35 +439,41 @@ struct cftype {
/* CFTYPE_* flags */
unsigned int flags;
+ /*
+ * The subsys this file belongs to. Initialized automatically
+ * during registration. NULL for cgroup core files.
+ */
+ struct cgroup_subsys *ss;
+
int (*open)(struct inode *inode, struct file *file);
- ssize_t (*read)(struct cgroup *cgrp, struct cftype *cft,
+ ssize_t (*read)(struct cgroup_subsys_state *css, struct cftype *cft,
struct file *file,
char __user *buf, size_t nbytes, loff_t *ppos);
/*
* read_u64() is a shortcut for the common case of returning a
* single integer. Use it in place of read()
*/
- u64 (*read_u64)(struct cgroup *cgrp, struct cftype *cft);
+ u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft);
/*
* read_s64() is a signed version of read_u64()
*/
- s64 (*read_s64)(struct cgroup *cgrp, struct cftype *cft);
+ s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft);
/*
* read_map() is used for defining a map of key/value
* pairs. It should call cb->fill(cb, key, value) for each
* entry. The key/value pairs (and their ordering) should not
* change between reboots.
*/
- int (*read_map)(struct cgroup *cgrp, struct cftype *cft,
+ int (*read_map)(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb);
/*
* read_seq_string() is used for outputting a simple sequence
* using seqfile.
*/
- int (*read_seq_string)(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *m);
+ int (*read_seq_string)(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m);
- ssize_t (*write)(struct cgroup *cgrp, struct cftype *cft,
+ ssize_t (*write)(struct cgroup_subsys_state *css, struct cftype *cft,
struct file *file,
const char __user *buf, size_t nbytes, loff_t *ppos);
@@ -461,18 +482,20 @@ struct cftype {
* a single integer (as parsed by simple_strtoull) from
* userspace. Use in place of write(); return 0 or error.
*/
- int (*write_u64)(struct cgroup *cgrp, struct cftype *cft, u64 val);
+ int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val);
/*
* write_s64() is a signed version of write_u64()
*/
- int (*write_s64)(struct cgroup *cgrp, struct cftype *cft, s64 val);
+ int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val);
/*
* write_string() is passed a nul-terminated kernelspace
* buffer of maximum length determined by max_write_len.
* Returns 0 or -ve error code.
*/
- int (*write_string)(struct cgroup *cgrp, struct cftype *cft,
+ int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer);
/*
* trigger() callback can be used to get some kick from the
@@ -480,7 +503,7 @@ struct cftype {
* at all. The private field can be used to determine the
* kick type for multiplexing.
*/
- int (*trigger)(struct cgroup *cgrp, unsigned int event);
+ int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
int (*release)(struct inode *inode, struct file *file);
@@ -490,16 +513,18 @@ struct cftype {
* you want to provide this functionality. Use eventfd_signal()
* on eventfd to send notification to userspace.
*/
- int (*register_event)(struct cgroup *cgrp, struct cftype *cft,
- struct eventfd_ctx *eventfd, const char *args);
+ int (*register_event)(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct eventfd_ctx *eventfd,
+ const char *args);
/*
* unregister_event() callback will be called when userspace
* closes the eventfd or on cgroup removing.
* This callback must be implemented, if you want provide
* notification functionality.
*/
- void (*unregister_event)(struct cgroup *cgrp, struct cftype *cft,
- struct eventfd_ctx *eventfd);
+ void (*unregister_event)(struct cgroup_subsys_state *css,
+ struct cftype *cft,
+ struct eventfd_ctx *eventfd);
};
/*
@@ -512,15 +537,6 @@ struct cftype_set {
struct cftype *cfts;
};
-struct cgroup_scanner {
- struct cgroup *cg;
- int (*test_task)(struct task_struct *p, struct cgroup_scanner *scan);
- void (*process_task)(struct task_struct *p,
- struct cgroup_scanner *scan);
- struct ptr_heap *heap;
- void *data;
-};
-
/*
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
* function can be called as long as @cgrp is accessible.
@@ -537,7 +553,7 @@ static inline const char *cgroup_name(const struct cgroup *cgrp)
}
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_rm_cftypes(struct cftype *cfts);
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
@@ -553,20 +569,22 @@ int cgroup_task_count(const struct cgroup *cgrp);
struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
-struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset);
+struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
+ int subsys_id);
int cgroup_taskset_size(struct cgroup_taskset *tset);
/**
* cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor
- * @skip_cgrp: skip if task's cgroup matches this, %NULL to iterate through all
+ * @skip_css: skip if task's css matches this, %NULL to iterate through all
* @tset: taskset to iterate
*/
-#define cgroup_taskset_for_each(task, skip_cgrp, tset) \
+#define cgroup_taskset_for_each(task, skip_css, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset))) \
- if (!(skip_cgrp) || \
- cgroup_taskset_cur_cgroup((tset)) != (skip_cgrp))
+ if (!(skip_css) || \
+ cgroup_taskset_cur_css((tset), \
+ (skip_css)->ss->subsys_id) != (skip_css))
/*
* Control Group subsystem type.
@@ -574,18 +592,22 @@ int cgroup_taskset_size(struct cgroup_taskset *tset);
*/
struct cgroup_subsys {
- struct cgroup_subsys_state *(*css_alloc)(struct cgroup *cgrp);
- int (*css_online)(struct cgroup *cgrp);
- void (*css_offline)(struct cgroup *cgrp);
- void (*css_free)(struct cgroup *cgrp);
-
- int (*can_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
- void (*cancel_attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
- void (*attach)(struct cgroup *cgrp, struct cgroup_taskset *tset);
+ struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
+ int (*css_online)(struct cgroup_subsys_state *css);
+ void (*css_offline)(struct cgroup_subsys_state *css);
+ void (*css_free)(struct cgroup_subsys_state *css);
+
+ int (*can_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*cancel_attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
+ void (*attach)(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset);
void (*fork)(struct task_struct *task);
- void (*exit)(struct cgroup *cgrp, struct cgroup *old_cgrp,
+ void (*exit)(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
struct task_struct *task);
- void (*bind)(struct cgroup *root);
+ void (*bind)(struct cgroup_subsys_state *root_css);
int subsys_id;
int disabled;
@@ -641,10 +663,17 @@ struct cgroup_subsys {
#undef IS_SUBSYS_ENABLED
#undef SUBSYS
-static inline struct cgroup_subsys_state *cgroup_subsys_state(
- struct cgroup *cgrp, int subsys_id)
+/**
+ * css_parent - find the parent css
+ * @css: the target cgroup_subsys_state
+ *
+ * Return the parent css of @css. This function is guaranteed to return
+ * non-NULL parent as long as @css isn't the root.
+ */
+static inline
+struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
{
- return cgrp->subsys[subsys_id];
+ return css->parent;
}
/**
@@ -672,7 +701,7 @@ extern struct mutex cgroup_mutex;
#endif
/**
- * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds
+ * task_css_check - obtain css for (task, subsys) w/ extra access conds
* @task: the target task
* @subsys_id: the target subsystem ID
* @__c: extra condition expression to be passed to rcu_dereference_check()
@@ -680,7 +709,7 @@ extern struct mutex cgroup_mutex;
* Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
* synchronization rules are the same as task_css_set_check().
*/
-#define task_subsys_state_check(task, subsys_id, __c) \
+#define task_css_check(task, subsys_id, __c) \
task_css_set_check((task), (__c))->subsys[(subsys_id)]
/**
@@ -695,87 +724,92 @@ static inline struct css_set *task_css_set(struct task_struct *task)
}
/**
- * task_subsys_state - obtain css for (task, subsys)
+ * task_css - obtain css for (task, subsys)
* @task: the target task
* @subsys_id: the target subsystem ID
*
- * See task_subsys_state_check().
+ * See task_css_check().
*/
-static inline struct cgroup_subsys_state *
-task_subsys_state(struct task_struct *task, int subsys_id)
+static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
+ int subsys_id)
{
- return task_subsys_state_check(task, subsys_id, false);
+ return task_css_check(task, subsys_id, false);
}
-static inline struct cgroup* task_cgroup(struct task_struct *task,
- int subsys_id)
+static inline struct cgroup *task_cgroup(struct task_struct *task,
+ int subsys_id)
{
- return task_subsys_state(task, subsys_id)->cgroup;
+ return task_css(task, subsys_id)->cgroup;
}
-struct cgroup *cgroup_next_sibling(struct cgroup *pos);
+struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *parent);
+
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
/**
- * cgroup_for_each_child - iterate through children of a cgroup
- * @pos: the cgroup * to use as the loop cursor
- * @cgrp: cgroup whose children to walk
+ * css_for_each_child - iterate through children of a css
+ * @pos: the css * to use as the loop cursor
+ * @parent: css whose children to walk
*
- * Walk @cgrp's children. Must be called under rcu_read_lock(). A child
- * cgroup which hasn't finished ->css_online() or already has finished
+ * Walk @parent's children. Must be called under rcu_read_lock(). A child
+ * css which hasn't finished ->css_online() or already has finished
* ->css_offline() may show up during traversal and it's each subsystem's
* responsibility to verify that each @pos is alive.
*
* If a subsystem synchronizes against the parent in its ->css_online() and
- * before starting iterating, a cgroup which finished ->css_online() is
+ * before starting iterating, a css which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* It is allowed to temporarily drop RCU read lock during iteration. The
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
-#define cgroup_for_each_child(pos, cgrp) \
- for ((pos) = list_first_or_null_rcu(&(cgrp)->children, \
- struct cgroup, sibling); \
- (pos); (pos) = cgroup_next_sibling((pos)))
+#define css_for_each_child(pos, parent) \
+ for ((pos) = css_next_child(NULL, (parent)); (pos); \
+ (pos) = css_next_child((pos), (parent)))
+
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
- struct cgroup *cgroup);
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos);
/**
- * cgroup_for_each_descendant_pre - pre-order walk of a cgroup's descendants
- * @pos: the cgroup * to use as the loop cursor
- * @cgroup: cgroup whose descendants to walk
+ * css_for_each_descendant_pre - pre-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @root: css whose descendants to walk
*
- * Walk @cgroup's descendants. Must be called under rcu_read_lock(). A
- * descendant cgroup which hasn't finished ->css_online() or already has
+ * Walk @root's descendants. @root is included in the iteration and the
+ * first node to be visited. Must be called under rcu_read_lock(). A
+ * descendant css which hasn't finished ->css_online() or already has
* finished ->css_offline() may show up during traversal and it's each
* subsystem's responsibility to verify that each @pos is alive.
*
* If a subsystem synchronizes against the parent in its ->css_online() and
* before starting iterating, and synchronizes against @pos on each
- * iteration, any descendant cgroup which finished ->css_online() is
+ * iteration, any descendant css which finished ->css_online() is
* guaranteed to be visible in the future iterations.
*
* In other words, the following guarantees that a descendant can't escape
* state updates of its ancestors.
*
- * my_online(@cgrp)
+ * my_online(@css)
* {
- * Lock @cgrp->parent and @cgrp;
- * Inherit state from @cgrp->parent;
+ * Lock @css's parent and @css;
+ * Inherit state from the parent;
* Unlock both.
* }
*
- * my_update_state(@cgrp)
+ * my_update_state(@css)
* {
- * Lock @cgrp;
- * Update @cgrp's state;
- * Unlock @cgrp;
- *
- * cgroup_for_each_descendant_pre(@pos, @cgrp) {
+ * css_for_each_descendant_pre(@pos, @css) {
* Lock @pos;
- * Verify @pos is alive and inherit state from @pos->parent;
+ * if (@pos == @css)
+ * Update @css's state;
+ * else
+ * Verify @pos is alive and inherit state from its parent;
* Unlock @pos;
* }
* }
@@ -786,8 +820,7 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
* visible by walking order and, as long as inheriting operations to the
* same @pos are atomic to each other, multiple updates racing each other
* still result in the correct state. It's guaranateed that at least one
- * inheritance happens for any cgroup after the latest update to its
- * parent.
+ * inheritance happens for any css after the latest update to its parent.
*
* If checking parent's state requires locking the parent, each inheriting
* iteration should lock and unlock both @pos->parent and @pos.
@@ -800,52 +833,45 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos);
* caller is responsible for ensuring that @pos remains accessible until
* the start of the next iteration by, for example, bumping the css refcnt.
*/
-#define cgroup_for_each_descendant_pre(pos, cgroup) \
- for (pos = cgroup_next_descendant_pre(NULL, (cgroup)); (pos); \
- pos = cgroup_next_descendant_pre((pos), (cgroup)))
+#define css_for_each_descendant_pre(pos, css) \
+ for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_pre((pos), (css)))
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
- struct cgroup *cgroup);
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *css);
/**
- * cgroup_for_each_descendant_post - post-order walk of a cgroup's descendants
- * @pos: the cgroup * to use as the loop cursor
- * @cgroup: cgroup whose descendants to walk
+ * css_for_each_descendant_post - post-order walk of a css's descendants
+ * @pos: the css * to use as the loop cursor
+ * @css: css whose descendants to walk
*
- * Similar to cgroup_for_each_descendant_pre() but performs post-order
- * traversal instead. Note that the walk visibility guarantee described in
- * pre-order walk doesn't apply the same to post-order walks.
+ * Similar to css_for_each_descendant_pre() but performs post-order
+ * traversal instead. @root is included in the iteration and the last
+ * node to be visited. Note that the walk visibility guarantee described
+ * in pre-order walk doesn't apply the same to post-order walks.
*/
-#define cgroup_for_each_descendant_post(pos, cgroup) \
- for (pos = cgroup_next_descendant_post(NULL, (cgroup)); (pos); \
- pos = cgroup_next_descendant_post((pos), (cgroup)))
-
-/* A cgroup_iter should be treated as an opaque object */
-struct cgroup_iter {
- struct list_head *cset_link;
- struct list_head *task;
+#define css_for_each_descendant_post(pos, css) \
+ for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
+ (pos) = css_next_descendant_post((pos), (css)))
+
+/* A css_task_iter should be treated as an opaque object */
+struct css_task_iter {
+ struct cgroup_subsys_state *origin_css;
+ struct list_head *cset_link;
+ struct list_head *task;
};
-/*
- * To iterate across the tasks in a cgroup:
- *
- * 1) call cgroup_iter_start to initialize an iterator
- *
- * 2) call cgroup_iter_next() to retrieve member tasks until it
- * returns NULL or until you want to end the iteration
- *
- * 3) call cgroup_iter_end() to destroy the iterator.
- *
- * Or, call cgroup_scan_tasks() to iterate through every task in a
- * cgroup - cgroup_scan_tasks() holds the css_set_lock when calling
- * the test_task() callback, but not while calling the process_task()
- * callback.
- */
-void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it);
-struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
- struct cgroup_iter *it);
-void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it);
-int cgroup_scan_tasks(struct cgroup_scanner *scan);
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it);
+struct task_struct *css_task_iter_next(struct css_task_iter *it);
+void css_task_iter_end(struct css_task_iter *it);
+
+int css_scan_tasks(struct cgroup_subsys_state *css,
+ bool (*test)(struct task_struct *, void *),
+ void (*process)(struct task_struct *, void *),
+ void *data, struct ptr_heap *heap);
+
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
@@ -878,7 +904,8 @@ bool css_is_ancestor(struct cgroup_subsys_state *cg,
/* Get id and depth of css */
unsigned short css_id(struct cgroup_subsys_state *css);
-struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id);
+struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss);
#else /* !CONFIG_CGROUPS */
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 7f0c1dd0907..ec1aee4aec9 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -669,6 +669,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
+#define compat_save_altstack_ex(uss, sp) do { \
+ compat_stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
+ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval);
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index fc09d7b0dac..158158704c3 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -2,100 +2,110 @@
#define _LINUX_CONTEXT_TRACKING_H
#include <linux/sched.h>
-#include <linux/percpu.h>
#include <linux/vtime.h>
+#include <linux/context_tracking_state.h>
#include <asm/ptrace.h>
-struct context_tracking {
- /*
- * When active is false, probes are unset in order
- * to minimize overhead: TIF flags are cleared
- * and calls to user_enter/exit are ignored. This
- * may be further optimized using static keys.
- */
- bool active;
- enum ctx_state {
- IN_KERNEL = 0,
- IN_USER,
- } state;
-};
-
-static inline void __guest_enter(void)
-{
- /*
- * This is running in ioctl context so we can avoid
- * the call to vtime_account() with its unnecessary idle check.
- */
- vtime_account_system(current);
- current->flags |= PF_VCPU;
-}
-
-static inline void __guest_exit(void)
-{
- /*
- * This is running in ioctl context so we can avoid
- * the call to vtime_account() with its unnecessary idle check.
- */
- vtime_account_system(current);
- current->flags &= ~PF_VCPU;
-}
#ifdef CONFIG_CONTEXT_TRACKING
-DECLARE_PER_CPU(struct context_tracking, context_tracking);
+extern void context_tracking_cpu_set(int cpu);
-static inline bool context_tracking_in_user(void)
+extern void context_tracking_user_enter(void);
+extern void context_tracking_user_exit(void);
+extern void __context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next);
+
+static inline void user_enter(void)
{
- return __this_cpu_read(context_tracking.state) == IN_USER;
-}
+ if (static_key_false(&context_tracking_enabled))
+ context_tracking_user_enter();
-static inline bool context_tracking_active(void)
+}
+static inline void user_exit(void)
{
- return __this_cpu_read(context_tracking.active);
+ if (static_key_false(&context_tracking_enabled))
+ context_tracking_user_exit();
}
-extern void user_enter(void);
-extern void user_exit(void);
-
-extern void guest_enter(void);
-extern void guest_exit(void);
-
static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
+ if (!static_key_false(&context_tracking_enabled))
+ return 0;
+
prev_ctx = this_cpu_read(context_tracking.state);
- user_exit();
+ context_tracking_user_exit();
return prev_ctx;
}
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (prev_ctx == IN_USER)
- user_enter();
+ if (static_key_false(&context_tracking_enabled)) {
+ if (prev_ctx == IN_USER)
+ context_tracking_user_enter();
+ }
}
-extern void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next);
+static inline void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
+{
+ if (static_key_false(&context_tracking_enabled))
+ __context_tracking_task_switch(prev, next);
+}
#else
-static inline bool context_tracking_in_user(void) { return false; }
static inline void user_enter(void) { }
static inline void user_exit(void) { }
+static inline enum ctx_state exception_enter(void) { return 0; }
+static inline void exception_exit(enum ctx_state prev_ctx) { }
+static inline void context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next) { }
+#endif /* !CONFIG_CONTEXT_TRACKING */
+
+
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+extern void context_tracking_init(void);
+#else
+static inline void context_tracking_init(void) { }
+#endif /* CONFIG_CONTEXT_TRACKING_FORCE */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
static inline void guest_enter(void)
{
- __guest_enter();
+ if (vtime_accounting_enabled())
+ vtime_guest_enter(current);
+ else
+ current->flags |= PF_VCPU;
}
static inline void guest_exit(void)
{
- __guest_exit();
+ if (vtime_accounting_enabled())
+ vtime_guest_exit(current);
+ else
+ current->flags &= ~PF_VCPU;
}
-static inline enum ctx_state exception_enter(void) { return 0; }
-static inline void exception_exit(enum ctx_state prev_ctx) { }
-static inline void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next) { }
-#endif /* !CONFIG_CONTEXT_TRACKING */
+#else
+static inline void guest_enter(void)
+{
+ /*
+ * This is running in ioctl context so its safe
+ * to assume that it's the stime pending cputime
+ * to flush.
+ */
+ vtime_account_system(current);
+ current->flags |= PF_VCPU;
+}
+
+static inline void guest_exit(void)
+{
+ /* Flush the guest cputime we spent on the guest */
+ vtime_account_system(current);
+ current->flags &= ~PF_VCPU;
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
#endif
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
new file mode 100644
index 00000000000..0f1979d0674
--- /dev/null
+++ b/include/linux/context_tracking_state.h
@@ -0,0 +1,39 @@
+#ifndef _LINUX_CONTEXT_TRACKING_STATE_H
+#define _LINUX_CONTEXT_TRACKING_STATE_H
+
+#include <linux/percpu.h>
+#include <linux/static_key.h>
+
+struct context_tracking {
+ /*
+ * When active is false, probes are unset in order
+ * to minimize overhead: TIF flags are cleared
+ * and calls to user_enter/exit are ignored. This
+ * may be further optimized using static keys.
+ */
+ bool active;
+ enum ctx_state {
+ IN_KERNEL = 0,
+ IN_USER,
+ } state;
+};
+
+#ifdef CONFIG_CONTEXT_TRACKING
+extern struct static_key context_tracking_enabled;
+DECLARE_PER_CPU(struct context_tracking, context_tracking);
+
+static inline bool context_tracking_in_user(void)
+{
+ return __this_cpu_read(context_tracking.state) == IN_USER;
+}
+
+static inline bool context_tracking_active(void)
+{
+ return __this_cpu_read(context_tracking.active);
+}
+#else
+static inline bool context_tracking_in_user(void) { return false; }
+static inline bool context_tracking_active(void) { return false; }
+#endif /* CONFIG_CONTEXT_TRACKING */
+
+#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index ab0eade7303..801ff9e7367 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -28,6 +28,7 @@ struct cpu {
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
+extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
@@ -172,6 +173,8 @@ extern struct bus_type cpu_subsys;
#ifdef CONFIG_HOTPLUG_CPU
/* Stop CPUs going up and down. */
+extern void cpu_hotplug_begin(void);
+extern void cpu_hotplug_done(void);
extern void get_online_cpus(void);
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@@ -197,6 +200,8 @@ static inline void cpu_hotplug_driver_unlock(void)
#else /* CONFIG_HOTPLUG_CPU */
+static inline void cpu_hotplug_begin(void) {}
+static inline void cpu_hotplug_done(void) {}
#define get_online_cpus() do { } while (0)
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 90d5a15120d..d568f3975ee 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -11,71 +11,36 @@
#ifndef _LINUX_CPUFREQ_H
#define _LINUX_CPUFREQ_H
-#include <asm/cputime.h>
-#include <linux/mutex.h>
-#include <linux/notifier.h>
-#include <linux/threads.h>
+#include <linux/cpumask.h>
+#include <linux/completion.h>
#include <linux/kobject.h>
+#include <linux/notifier.h>
#include <linux/sysfs.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/cpumask.h>
-#include <asm/div64.h>
-
-#define CPUFREQ_NAME_LEN 16
-/* Print length for names. Extra 1 space for accomodating '\n' in prints */
-#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
/*********************************************************************
- * CPUFREQ NOTIFIER INTERFACE *
+ * CPUFREQ INTERFACE *
*********************************************************************/
-
-#define CPUFREQ_TRANSITION_NOTIFIER (0)
-#define CPUFREQ_POLICY_NOTIFIER (1)
-
-#ifdef CONFIG_CPU_FREQ
-int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
-int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
-extern void disable_cpufreq(void);
-#else /* CONFIG_CPU_FREQ */
-static inline int cpufreq_register_notifier(struct notifier_block *nb,
- unsigned int list)
-{
- return 0;
-}
-static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
- unsigned int list)
-{
- return 0;
-}
-static inline void disable_cpufreq(void) { }
-#endif /* CONFIG_CPU_FREQ */
-
-/* if (cpufreq_driver->target) exists, the ->governor decides what frequency
- * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
- * two generic policies are available:
- */
-
-#define CPUFREQ_POLICY_POWERSAVE (1)
-#define CPUFREQ_POLICY_PERFORMANCE (2)
-
-/* Frequency values here are CPU kHz so that hardware which doesn't run
- * with some frequencies can complain without having to guess what per
- * cent / per mille means.
+/*
+ * Frequency values here are CPU kHz
+ *
* Maximum transition latency is in nanoseconds - if it's unknown,
* CPUFREQ_ETERNAL shall be used.
*/
+#define CPUFREQ_ETERNAL (-1)
+#define CPUFREQ_NAME_LEN 16
+/* Print length for names. Extra 1 space for accomodating '\n' in prints */
+#define CPUFREQ_NAME_PLEN (CPUFREQ_NAME_LEN + 1)
+
struct cpufreq_governor;
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-int cpufreq_get_global_kobject(void);
-void cpufreq_put_global_kobject(void);
-int cpufreq_sysfs_create_file(const struct attribute *attr);
-void cpufreq_sysfs_remove_file(const struct attribute *attr);
+struct cpufreq_freqs {
+ unsigned int cpu; /* cpu nr */
+ unsigned int old;
+ unsigned int new;
+ u8 flags; /* flags of cpufreq_driver, see below. */
+};
-#define CPUFREQ_ETERNAL (-1)
struct cpufreq_cpuinfo {
unsigned int max_freq;
unsigned int min_freq;
@@ -117,123 +82,103 @@ struct cpufreq_policy {
struct cpufreq_real_policy user_policy;
+ struct list_head policy_list;
struct kobject kobj;
struct completion kobj_unregister;
int transition_ongoing; /* Tracks transition status */
};
-#define CPUFREQ_ADJUST (0)
-#define CPUFREQ_INCOMPATIBLE (1)
-#define CPUFREQ_NOTIFY (2)
-#define CPUFREQ_START (3)
-#define CPUFREQ_UPDATE_POLICY_CPU (4)
-
/* Only for ACPI */
#define CPUFREQ_SHARED_TYPE_NONE (0) /* None */
#define CPUFREQ_SHARED_TYPE_HW (1) /* HW does needed coordination */
#define CPUFREQ_SHARED_TYPE_ALL (2) /* All dependent CPUs should set freq */
#define CPUFREQ_SHARED_TYPE_ANY (3) /* Freq can be set from any dependent CPU*/
+struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
+void cpufreq_cpu_put(struct cpufreq_policy *policy);
+
static inline bool policy_is_shared(struct cpufreq_policy *policy)
{
return cpumask_weight(policy->cpus) > 1;
}
-/******************** cpufreq transition notifiers *******************/
-
-#define CPUFREQ_PRECHANGE (0)
-#define CPUFREQ_POSTCHANGE (1)
-#define CPUFREQ_RESUMECHANGE (8)
-#define CPUFREQ_SUSPENDCHANGE (9)
+/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
+extern struct kobject *cpufreq_global_kobject;
+int cpufreq_get_global_kobject(void);
+void cpufreq_put_global_kobject(void);
+int cpufreq_sysfs_create_file(const struct attribute *attr);
+void cpufreq_sysfs_remove_file(const struct attribute *attr);
-struct cpufreq_freqs {
- unsigned int cpu; /* cpu nr */
- unsigned int old;
- unsigned int new;
- u8 flags; /* flags of cpufreq_driver, see below. */
-};
+#ifdef CONFIG_CPU_FREQ
+unsigned int cpufreq_get(unsigned int cpu);
+unsigned int cpufreq_quick_get(unsigned int cpu);
+unsigned int cpufreq_quick_get_max(unsigned int cpu);
+void disable_cpufreq(void);
-/**
- * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
- * safe)
- * @old: old value
- * @div: divisor
- * @mult: multiplier
- *
- *
- * new = old * mult / div
- */
-static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
- u_int mult)
+u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
+int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
+int cpufreq_update_policy(unsigned int cpu);
+bool have_governor_per_policy(void);
+struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+#else
+static inline unsigned int cpufreq_get(unsigned int cpu)
{
-#if BITS_PER_LONG == 32
-
- u64 result = ((u64) old) * ((u64) mult);
- do_div(result, div);
- return (unsigned long) result;
-
-#elif BITS_PER_LONG == 64
-
- unsigned long result = old * ((u64) mult);
- result /= div;
- return result;
-
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+{
+ return 0;
+}
+static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+{
+ return 0;
+}
+static inline void disable_cpufreq(void) { }
#endif
-};
/*********************************************************************
- * CPUFREQ GOVERNORS *
+ * CPUFREQ DRIVER INTERFACE *
*********************************************************************/
-#define CPUFREQ_GOV_START 1
-#define CPUFREQ_GOV_STOP 2
-#define CPUFREQ_GOV_LIMITS 3
-#define CPUFREQ_GOV_POLICY_INIT 4
-#define CPUFREQ_GOV_POLICY_EXIT 5
+#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
+#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
-struct cpufreq_governor {
- char name[CPUFREQ_NAME_LEN];
- int initialized;
- int (*governor) (struct cpufreq_policy *policy,
- unsigned int event);
- ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
- char *buf);
- int (*store_setspeed) (struct cpufreq_policy *policy,
- unsigned int freq);
- unsigned int max_transition_latency; /* HW must be able to switch to
- next freq faster than this value in nano secs or we
- will fallback to performance governor */
- struct list_head governor_list;
- struct module *owner;
+struct freq_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct cpufreq_policy *, char *);
+ ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
};
-/*
- * Pass a target to the cpufreq driver.
- */
-extern int cpufreq_driver_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
-extern int __cpufreq_driver_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation);
+#define cpufreq_freq_attr_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
-extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy,
- unsigned int cpu);
+#define cpufreq_freq_attr_ro_perm(_name, _perm) \
+static struct freq_attr _name = \
+__ATTR(_name, _perm, show_##_name, NULL)
-int cpufreq_register_governor(struct cpufreq_governor *governor);
-void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+#define cpufreq_freq_attr_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
-/*********************************************************************
- * CPUFREQ DRIVER INTERFACE *
- *********************************************************************/
+struct global_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ ssize_t (*store)(struct kobject *a, struct attribute *b,
+ const char *c, size_t count);
+};
-#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
-#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
+#define define_one_global_ro(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+#define define_one_global_rw(_name) \
+static struct global_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
-struct freq_attr;
struct cpufreq_driver {
- struct module *owner;
char name[CPUFREQ_NAME_LEN];
u8 flags;
/*
@@ -258,8 +203,6 @@ struct cpufreq_driver {
unsigned int (*get) (unsigned int cpu);
/* optional */
- unsigned int (*getavg) (struct cpufreq_policy *policy,
- unsigned int cpu);
int (*bios_limit) (int cpu, unsigned int *limit);
int (*exit) (struct cpufreq_policy *policy);
@@ -269,7 +212,6 @@ struct cpufreq_driver {
};
/* flags */
-
#define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
* all ->init() calls failed */
#define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel
@@ -281,8 +223,7 @@ struct cpufreq_driver {
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
-void cpufreq_notify_transition(struct cpufreq_policy *policy,
- struct cpufreq_freqs *freqs, unsigned int state);
+const char *cpufreq_get_current_driver(void);
static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
unsigned int min, unsigned int max)
@@ -300,87 +241,118 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
return;
}
-struct freq_attr {
- struct attribute attr;
- ssize_t (*show)(struct cpufreq_policy *, char *);
- ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count);
-};
-
-#define cpufreq_freq_attr_ro(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
-
-#define cpufreq_freq_attr_ro_perm(_name, _perm) \
-static struct freq_attr _name = \
-__ATTR(_name, _perm, show_##_name, NULL)
-
-#define cpufreq_freq_attr_rw(_name) \
-static struct freq_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/*********************************************************************
+ * CPUFREQ NOTIFIER INTERFACE *
+ *********************************************************************/
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
+#define CPUFREQ_TRANSITION_NOTIFIER (0)
+#define CPUFREQ_POLICY_NOTIFIER (1)
-#define define_one_global_ro(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0444, show_##_name, NULL)
+/* Transition notifiers */
+#define CPUFREQ_PRECHANGE (0)
+#define CPUFREQ_POSTCHANGE (1)
+#define CPUFREQ_RESUMECHANGE (8)
+#define CPUFREQ_SUSPENDCHANGE (9)
-#define define_one_global_rw(_name) \
-static struct global_attr _name = \
-__ATTR(_name, 0644, show_##_name, store_##_name)
+/* Policy Notifiers */
+#define CPUFREQ_ADJUST (0)
+#define CPUFREQ_INCOMPATIBLE (1)
+#define CPUFREQ_NOTIFY (2)
+#define CPUFREQ_START (3)
+#define CPUFREQ_UPDATE_POLICY_CPU (4)
-struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu);
-void cpufreq_cpu_put(struct cpufreq_policy *data);
-const char *cpufreq_get_current_driver(void);
+#ifdef CONFIG_CPU_FREQ
+int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list);
+int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list);
-/*********************************************************************
- * CPUFREQ 2.6. INTERFACE *
- *********************************************************************/
-u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
-int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_update_policy(unsigned int cpu);
-bool have_governor_per_policy(void);
-struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
+void cpufreq_notify_transition(struct cpufreq_policy *policy,
+ struct cpufreq_freqs *freqs, unsigned int state);
-#ifdef CONFIG_CPU_FREQ
-/*
- * query the current CPU frequency (in kHz). If zero, cpufreq couldn't detect it
- */
-unsigned int cpufreq_get(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_get(unsigned int cpu)
+#else /* CONFIG_CPU_FREQ */
+static inline int cpufreq_register_notifier(struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
-#endif
-
-/*
- * query the last known CPU freq (in kHz). If zero, cpufreq couldn't detect it
- */
-#ifdef CONFIG_CPU_FREQ
-unsigned int cpufreq_quick_get(unsigned int cpu);
-unsigned int cpufreq_quick_get_max(unsigned int cpu);
-#else
-static inline unsigned int cpufreq_quick_get(unsigned int cpu)
+static inline int cpufreq_unregister_notifier(struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
-static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
+#endif /* !CONFIG_CPU_FREQ */
+
+/**
+ * cpufreq_scale - "old * mult / div" calculation for large values (32-bit-arch
+ * safe)
+ * @old: old value
+ * @div: divisor
+ * @mult: multiplier
+ *
+ *
+ * new = old * mult / div
+ */
+static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
+ u_int mult)
{
- return 0;
-}
+#if BITS_PER_LONG == 32
+ u64 result = ((u64) old) * ((u64) mult);
+ do_div(result, div);
+ return (unsigned long) result;
+
+#elif BITS_PER_LONG == 64
+ unsigned long result = old * ((u64) mult);
+ result /= div;
+ return result;
#endif
+}
/*********************************************************************
- * CPUFREQ DEFAULT GOVERNOR *
+ * CPUFREQ GOVERNORS *
*********************************************************************/
/*
+ * If (cpufreq_driver->target) exists, the ->governor decides what frequency
+ * within the limits is used. If (cpufreq_driver->setpolicy> exists, these
+ * two generic policies are available:
+ */
+#define CPUFREQ_POLICY_POWERSAVE (1)
+#define CPUFREQ_POLICY_PERFORMANCE (2)
+
+/* Governor Events */
+#define CPUFREQ_GOV_START 1
+#define CPUFREQ_GOV_STOP 2
+#define CPUFREQ_GOV_LIMITS 3
+#define CPUFREQ_GOV_POLICY_INIT 4
+#define CPUFREQ_GOV_POLICY_EXIT 5
+
+struct cpufreq_governor {
+ char name[CPUFREQ_NAME_LEN];
+ int initialized;
+ int (*governor) (struct cpufreq_policy *policy,
+ unsigned int event);
+ ssize_t (*show_setspeed) (struct cpufreq_policy *policy,
+ char *buf);
+ int (*store_setspeed) (struct cpufreq_policy *policy,
+ unsigned int freq);
+ unsigned int max_transition_latency; /* HW must be able to switch to
+ next freq faster than this value in nano secs or we
+ will fallback to performance governor */
+ struct list_head governor_list;
+ struct module *owner;
+};
+
+/* Pass a target to the cpufreq driver */
+int cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int __cpufreq_driver_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation);
+int cpufreq_register_governor(struct cpufreq_governor *governor);
+void cpufreq_unregister_governor(struct cpufreq_governor *governor);
+
+/* CPUFREQ DEFAULT GOVERNOR */
+/*
* Performance governor is fallback governor if any other gov failed to auto
* load due latency restrictions
*/
@@ -428,18 +400,16 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
unsigned int relation,
unsigned int *index);
-/* the following 3 funtions are for cpufreq core use only */
+void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
+ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
+
+/* the following funtion is for cpufreq core use only */
struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu);
/* the following are really really optional */
extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs;
-
void cpufreq_frequency_table_get_attr(struct cpufreq_frequency_table *table,
unsigned int cpu);
-void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy);
-
void cpufreq_frequency_table_put_attr(unsigned int cpu);
-ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf);
-
#endif /* _LINUX_CPUFREQ_H */
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 0bc4b74668e..781addc66f0 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -13,8 +13,6 @@
#include <linux/percpu.h>
#include <linux/list.h>
-#include <linux/kobject.h>
-#include <linux/completion.h>
#include <linux/hrtimer.h>
#define CPUIDLE_STATE_MAX 10
@@ -61,6 +59,10 @@ struct cpuidle_state {
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
+struct cpuidle_device_kobj;
+struct cpuidle_state_kobj;
+struct cpuidle_driver_kobj;
+
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
@@ -71,9 +73,8 @@ struct cpuidle_device {
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
+ struct cpuidle_device_kobj *kobj_dev;
struct list_head device_list;
- struct kobject kobj;
- struct completion kobj_unregister;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
int safe_state_index;
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index b90337c9d46..9169b91ea2d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -9,6 +9,7 @@
#include <linux/seqlock.h>
#include <linux/cache.h>
#include <linux/rcupdate.h>
+#include <linux/lockref.h>
struct nameidata;
struct path;
@@ -100,6 +101,8 @@ extern unsigned int full_name_hash(const unsigned char *, unsigned int);
# endif
#endif
+#define d_lock d_lockref.lock
+
struct dentry {
/* RCU lookup touched fields */
unsigned int d_flags; /* protected by d_lock */
@@ -112,8 +115,7 @@ struct dentry {
unsigned char d_iname[DNAME_INLINE_LEN]; /* small names */
/* Ref lookup also touches following */
- unsigned int d_count; /* protected by d_lock */
- spinlock_t d_lock; /* per dentry lock */
+ struct lockref d_lockref; /* per-dentry lock and refcount */
const struct dentry_operations *d_op;
struct super_block *d_sb; /* The root of the dentry tree */
unsigned long d_time; /* used by d_revalidate */
@@ -302,31 +304,9 @@ extern struct dentry *__d_lookup(const struct dentry *, const struct qstr *);
extern struct dentry *__d_lookup_rcu(const struct dentry *parent,
const struct qstr *name, unsigned *seq);
-/**
- * __d_rcu_to_refcount - take a refcount on dentry if sequence check is ok
- * @dentry: dentry to take a ref on
- * @seq: seqcount to verify against
- * Returns: 0 on failure, else 1.
- *
- * __d_rcu_to_refcount operates on a dentry,seq pair that was returned
- * by __d_lookup_rcu, to get a reference on an rcu-walk dentry.
- */
-static inline int __d_rcu_to_refcount(struct dentry *dentry, unsigned seq)
-{
- int ret = 0;
-
- assert_spin_locked(&dentry->d_lock);
- if (!read_seqcount_retry(&dentry->d_seq, seq)) {
- ret = 1;
- dentry->d_count++;
- }
-
- return ret;
-}
-
static inline unsigned d_count(const struct dentry *dentry)
{
- return dentry->d_count;
+ return dentry->d_lockref.count;
}
/* validate "insecure" dentry pointer */
@@ -336,6 +316,7 @@ extern int d_validate(struct dentry *, struct dentry *);
* helper function for dentry_operations.d_dname() members
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+extern char *simple_dname(struct dentry *, char *, int);
extern char *__d_path(const struct path *, const struct path *, char *, int);
extern char *d_absolute_path(const struct path *, char *, int);
@@ -356,17 +337,14 @@ extern char *dentry_path(struct dentry *, char *, int);
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
if (dentry)
- dentry->d_count++;
+ dentry->d_lockref.count++;
return dentry;
}
static inline struct dentry *dget(struct dentry *dentry)
{
- if (dentry) {
- spin_lock(&dentry->d_lock);
- dget_dlock(dentry);
- spin_unlock(&dentry->d_lock);
- }
+ if (dentry)
+ lockref_get(&dentry->d_lockref);
return dentry;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index d68b4ea7343..263489d0788 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -192,6 +192,13 @@ static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
+static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent,
+ u64 *value)
+{
+ return ERR_PTR(-ENODEV);
+}
+
static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
struct dentry *parent,
size_t *value)
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
index 0e5f5785d9f..98ffcbd4888 100644
--- a/include/linux/debugobjects.h
+++ b/include/linux/debugobjects.h
@@ -63,7 +63,7 @@ struct debug_obj_descr {
extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
extern void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
-extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
+extern int debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
@@ -85,8 +85,8 @@ static inline void
debug_object_init (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
-static inline void
-debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
+static inline int
+debug_object_activate (void *addr, struct debug_obj_descr *descr) { return 0; }
static inline void
debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
static inline void
diff --git a/include/linux/device.h b/include/linux/device.h
index 22b546a5859..f46646e4923 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -66,6 +66,9 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @bus_attrs: Default attributes of the bus.
* @dev_attrs: Default attributes of the devices on the bus.
* @drv_attrs: Default attributes of the device drivers on the bus.
+ * @bus_groups: Default attributes of the bus.
+ * @dev_groups: Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
* @match: Called, perhaps multiple times, whenever a new device or driver
* is added for this bus. It should return a nonzero value if the
* given device can be handled by the given driver.
@@ -103,9 +106,12 @@ struct bus_type {
const char *name;
const char *dev_name;
struct device *dev_root;
- struct bus_attribute *bus_attrs;
- struct device_attribute *dev_attrs;
- struct driver_attribute *drv_attrs;
+ struct bus_attribute *bus_attrs; /* use bus_groups instead */
+ struct device_attribute *dev_attrs; /* use dev_groups instead */
+ struct driver_attribute *drv_attrs; /* use drv_groups instead */
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
int (*match)(struct device *dev, struct device_driver *drv);
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
@@ -271,6 +277,8 @@ struct driver_attribute {
struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
#define DRIVER_ATTR_RO(_name) \
struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
extern int __must_check driver_create_file(struct device_driver *driver,
const struct driver_attribute *attr);
@@ -528,6 +536,8 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
#define DEVICE_ATTR_RO(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name) \
+ struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
#define DEVICE_ULONG_ATTR(_name, _mode, _var) \
struct dev_ext_attribute dev_attr_##_name = \
{ __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
@@ -895,6 +905,7 @@ static inline bool device_supports_offline(struct device *dev)
extern void lock_device_hotplug(void);
extern void unlock_device_hotplug(void);
+extern int lock_device_hotplug_sysfs(void);
extern int device_offline(struct device *dev);
extern int device_online(struct device *dev);
/*
@@ -1099,7 +1110,8 @@ do { \
dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
#define dev_info_ratelimited(dev, fmt, ...) \
dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
static DEFINE_RATELIMIT_STATE(_rs, \
@@ -1108,8 +1120,17 @@ do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
- __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \
- ##__VA_ARGS__); \
+ __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
diff --git a/include/linux/dm9000.h b/include/linux/dm9000.h
index 96e87693d93..841925fbfe8 100644
--- a/include/linux/dm9000.h
+++ b/include/linux/dm9000.h
@@ -14,6 +14,8 @@
#ifndef __DM9000_PLATFORM_DATA
#define __DM9000_PLATFORM_DATA __FILE__
+#include <linux/if_ether.h>
+
/* IO control flags */
#define DM9000_PLATF_8BITONLY (0x0001)
@@ -27,7 +29,7 @@
struct dm9000_plat_data {
unsigned int flags;
- unsigned char dev_addr[6];
+ unsigned char dev_addr[ETH_ALEN];
/* allow replacement IO routines */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 01b5c84be82..00141d3325f 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -57,7 +57,7 @@ struct cma;
struct page;
struct device;
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
/*
* There is always at least global CMA area and a few optional device
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 94af4185851..3a8d0a2af60 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -132,9 +132,8 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
- void *ret = dma_alloc_coherent(dev, size, dma_handle, flag);
- if (ret)
- memset(ret, 0, size);
+ void *ret = dma_alloc_coherent(dev, size, dma_handle,
+ flag | __GFP_ZERO);
return ret;
}
diff --git a/include/linux/err.h b/include/linux/err.h
index 221fcfb676c..15f92e07245 100644
--- a/include/linux/err.h
+++ b/include/linux/err.h
@@ -52,7 +52,7 @@ static inline void * __must_check ERR_CAST(__force const void *ptr)
return (void *) ptr;
}
-static inline int __must_check PTR_RET(__force const void *ptr)
+static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
@@ -60,6 +60,9 @@ static inline int __must_check PTR_RET(__force const void *ptr)
return 0;
}
+/* Deprecated */
+#define PTR_RET(p) PTR_ERR_OR_ZERO(p)
+
#endif
#endif /* _LINUX_ERR_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c623861964e..d8b512496e5 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -199,6 +199,21 @@ static inline void eth_hw_addr_random(struct net_device *dev)
}
/**
+ * eth_hw_addr_inherit - Copy dev_addr from another net_device
+ * @dst: pointer to net_device to copy dev_addr to
+ * @src: pointer to net_device to copy dev_addr from
+ *
+ * Copy the Ethernet address from one net_device to another along with
+ * the address attributes (addr_assign_type).
+ */
+static inline void eth_hw_addr_inherit(struct net_device *dst,
+ struct net_device *src)
+{
+ dst->addr_assign_type = src->addr_assign_type;
+ memcpy(dst->dev_addr, src->dev_addr, ETH_ALEN);
+}
+
+/**
* compare_ether_addr - Compare two Ethernet addresses
* @addr1: Pointer to a six-byte array containing the Ethernet address
* @addr2: Pointer other six-byte array containing the Ethernet address
diff --git a/include/linux/extcon/of_extcon.h b/include/linux/extcon/of_extcon.h
new file mode 100644
index 00000000000..0ebfeff1b55
--- /dev/null
+++ b/include/linux/extcon/of_extcon.h
@@ -0,0 +1,31 @@
+/*
+ * OF helpers for External connector (extcon) framework
+ *
+ * Copyright (C) 2013 Texas Instruments, Inc.
+ * Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * Copyright (C) 2013 Samsung Electronics
+ * Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __LINUX_OF_EXTCON_H
+#define __LINUX_OF_EXTCON_H
+
+#include <linux/err.h>
+
+#if IS_ENABLED(CONFIG_OF_EXTCON)
+extern struct extcon_dev
+ *of_extcon_get_extcon_dev(struct device *dev, int index);
+#else
+static inline struct extcon_dev
+ *of_extcon_get_extcon_dev(struct device *dev, int index)
+{
+ return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_OF_EXTCON */
+#endif /* __LINUX_OF_EXTCON_H */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 383d5e39b28..bb942f6d570 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -140,14 +140,24 @@ struct f2fs_extent {
} __packed;
#define F2FS_NAME_LEN 255
-#define ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
-#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+#define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */
+#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+#define ADDRS_PER_INODE(fi) addrs_per_inode(fi)
+#define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+
+#define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1)
+#define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2)
+#define NODE_IND1_BLOCK (DEF_ADDRS_PER_INODE + 3)
+#define NODE_IND2_BLOCK (DEF_ADDRS_PER_INODE + 4)
+#define NODE_DIND_BLOCK (DEF_ADDRS_PER_INODE + 5)
+
+#define F2FS_INLINE_XATTR 0x01 /* file inline xattr flag */
struct f2fs_inode {
__le16 i_mode; /* file mode */
__u8 i_advise; /* file hints */
- __u8 i_reserved; /* reserved */
+ __u8 i_inline; /* file inline flags */
__le32 i_uid; /* user ID */
__le32 i_gid; /* group ID */
__le32 i_links; /* links count */
@@ -170,7 +180,7 @@ struct f2fs_inode {
struct f2fs_extent i_ext; /* caching a largest extent */
- __le32 i_addr[ADDRS_PER_INODE]; /* Pointers to data blocks */
+ __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
__le32 i_nid[5]; /* direct(2), indirect(2),
double_indirect(1) node id */
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 3b0e820375a..5d7782e42b8 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -436,6 +436,7 @@ struct fw_iso_context {
int type;
int channel;
int speed;
+ bool drop_overflow_headers;
size_t header_size;
union {
fw_iso_callback_t sc;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 981874773e8..3b4cd8296e4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -46,6 +46,7 @@ struct vfsmount;
struct cred;
struct swap_info_struct;
struct seq_file;
+struct workqueue_struct;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -63,8 +64,7 @@ struct buffer_head;
typedef int (get_block_t)(struct inode *inode, sector_t iblock,
struct buffer_head *bh_result, int create);
typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
- ssize_t bytes, void *private, int ret,
- bool is_async);
+ ssize_t bytes, void *private);
#define MAY_EXEC 0x00000001
#define MAY_WRITE 0x00000002
@@ -1328,6 +1328,9 @@ struct super_block {
/* Being remounted read-only */
int s_readonly_remount;
+
+ /* AIO completions deferred from interrupt context */
+ struct workqueue_struct *s_dio_done_wq;
};
/* superblock cache pruning functions */
@@ -1804,7 +1807,7 @@ enum file_time_flags {
S_VERSION = 8,
};
-extern void touch_atime(struct path *);
+extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
if (!(file->f_flags & O_NOATIME))
@@ -2503,6 +2506,7 @@ extern void generic_fillattr(struct inode *, struct kstat *);
extern int vfs_getattr(struct path *, struct kstat *);
void __inode_add_bytes(struct inode *inode, loff_t bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes);
+void __inode_sub_bytes(struct inode *inode, loff_t bytes);
void inode_sub_bytes(struct inode *inode, loff_t bytes);
loff_t inode_get_bytes(struct inode *inode);
void inode_set_bytes(struct inode *inode, loff_t bytes);
diff --git a/include/linux/fs_enet_pd.h b/include/linux/fs_enet_pd.h
index 51b793466ff..efb05961bdd 100644
--- a/include/linux/fs_enet_pd.h
+++ b/include/linux/fs_enet_pd.h
@@ -16,8 +16,10 @@
#ifndef FS_ENET_PD_H
#define FS_ENET_PD_H
+#include <linux/clk.h>
#include <linux/string.h>
#include <linux/of_mdio.h>
+#include <linux/if_ether.h>
#include <asm/types.h>
#define FS_ENET_NAME "fs_enet"
@@ -135,13 +137,15 @@ struct fs_platform_info {
const struct fs_mii_bus_info *bus_info;
int rx_ring, tx_ring; /* number of buffers on rx */
- __u8 macaddr[6]; /* mac address */
+ __u8 macaddr[ETH_ALEN]; /* mac address */
int rx_copybreak; /* limit we copy small frames */
int use_napi; /* use NAPI */
int napi_weight; /* NAPI weight */
int use_rmii; /* use RMII mode */
int has_phy; /* if the network is phy container as well...*/
+
+ struct clk *clk_per; /* 'per' clock for register access */
};
struct fs_mii_fec_platform_info {
u32 irq[32];
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4372658c73a..5eaa746735f 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -78,6 +78,11 @@ struct trace_iterator {
/* trace_seq for __print_flags() and __print_symbolic() etc. */
struct trace_seq tmp_seq;
+ cpumask_var_t started;
+
+ /* it's true when current open file is snapshot */
+ bool snapshot;
+
/* The below is zeroed out in pipe_read */
struct trace_seq seq;
struct trace_entry *ent;
@@ -90,10 +95,7 @@ struct trace_iterator {
loff_t pos;
long idx;
- cpumask_var_t started;
-
- /* it's true when current open file is snapshot */
- bool snapshot;
+ /* All new field here will be zeroed out in pipe_read */
};
enum trace_iter_flags {
@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
int is_signed, int filter_type);
extern int trace_add_event_call(struct ftrace_event_call *call);
-extern void trace_remove_event_call(struct ftrace_event_call *call);
+extern int trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)1)
@@ -357,6 +359,40 @@ do { \
__trace_printk(ip, fmt, ##args); \
} while (0)
+/**
+ * tracepoint_string - register constant persistent string to trace system
+ * @str - a constant persistent string that will be referenced in tracepoints
+ *
+ * If constant strings are being used in tracepoints, it is faster and
+ * more efficient to just save the pointer to the string and reference
+ * that with a printf "%s" instead of saving the string in the ring buffer
+ * and wasting space and time.
+ *
+ * The problem with the above approach is that userspace tools that read
+ * the binary output of the trace buffers do not have access to the string.
+ * Instead they just show the address of the string which is not very
+ * useful to users.
+ *
+ * With tracepoint_string(), the string will be registered to the tracing
+ * system and exported to userspace via the debugfs/tracing/printk_formats
+ * file that maps the string address to the string text. This way userspace
+ * tools that read the binary buffers have a way to map the pointers to
+ * the ASCII strings they represent.
+ *
+ * The @str used must be a constant string and persistent as it would not
+ * make sense to show a string that no longer exists. But it is still fine
+ * to be used with modules, because when modules are unloaded, if they
+ * had tracepoints, the ring buffers are cleared too. As long as the string
+ * does not change during the life of the module, it is fine to use
+ * tracepoint_string() within a module.
+ */
+#define tracepoint_string(str) \
+ ({ \
+ static const char *___tp_str __tracepoint_string = str; \
+ ___tp_str; \
+ })
+#define __tracepoint_string __attribute__((section("__tracepoint_str")))
+
#ifdef CONFIG_PERF_EVENTS
struct perf_event;
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 05bcc090376..ccfe17c5c8d 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -1,126 +1,11 @@
#ifndef LINUX_HARDIRQ_H
#define LINUX_HARDIRQ_H
-#include <linux/preempt.h>
+#include <linux/preempt_mask.h>
#include <linux/lockdep.h>
#include <linux/ftrace_irq.h>
#include <linux/vtime.h>
-#include <asm/hardirq.h>
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count can in theory reach the same as NR_IRQS.
- * In reality, the number of nested IRQS is limited to the stack
- * size as well. For archs with over 1000 IRQS it is not practical
- * to expect that they will all nest. We give a max of 10 bits for
- * hardirq nesting. An arch may choose to give less than 10 bits.
- * m68k expects it to be 8.
- *
- * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
- * - bit 26 is the NMI_MASK
- * - bit 27 is the PREEMPT_ACTIVE flag
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x03ff0000
- * NMI_MASK: 0x04000000
- */
-#define PREEMPT_BITS 8
-#define SOFTIRQ_BITS 8
-#define NMI_BITS 1
-
-#define MAX_HARDIRQ_BITS 10
-
-#ifndef HARDIRQ_BITS
-# define HARDIRQ_BITS MAX_HARDIRQ_BITS
-#endif
-
-#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
-#error HARDIRQ_BITS too high!
-#endif
-
-#define PREEMPT_SHIFT 0
-#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
-
-#define __IRQ_MASK(x) ((1UL << (x))-1)
-
-#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-#define NMI_OFFSET (1UL << NMI_SHIFT)
-
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-
-#ifndef PREEMPT_ACTIVE
-#define PREEMPT_ACTIVE_BITS 1
-#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-#endif
-
-#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
-#error PREEMPT_ACTIVE is too low!
-#endif
-
-#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
-#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
-#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
- */
-#define in_irq() (hardirq_count())
-#define in_softirq() (softirq_count())
-#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi() (preempt_count() & NMI_MASK)
-
-#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_CHECK_OFFSET 0
-#endif
-
-/*
- * Are we running in atomic context? WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels. Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
-#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-
-/*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler, *after* releasing the kernel lock)
- */
-#define in_atomic_preempt_off() \
- ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
-
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
#if defined(CONFIG_SMP) || defined(CONFIG_GENERIC_HARDIRQS)
extern void synchronize_irq(unsigned int irq);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 3b589440ecf..9231be9e90a 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -18,11 +18,21 @@ enum hdmi_infoframe_type {
HDMI_INFOFRAME_TYPE_AUDIO = 0x84,
};
+#define HDMI_IEEE_OUI 0x000c03
#define HDMI_INFOFRAME_HEADER_SIZE 4
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
#define HDMI_AUDIO_INFOFRAME_SIZE 10
+#define HDMI_INFOFRAME_SIZE(type) \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_ ## type ## _INFOFRAME_SIZE)
+
+struct hdmi_any_infoframe {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+};
+
enum hdmi_colorspace {
HDMI_COLORSPACE_RGB,
HDMI_COLORSPACE_YUV422,
@@ -100,9 +110,6 @@ struct hdmi_avi_infoframe {
unsigned char version;
unsigned char length;
enum hdmi_colorspace colorspace;
- bool active_info_valid;
- bool horizontal_bar_valid;
- bool vertical_bar_valid;
enum hdmi_scan_mode scan_mode;
enum hdmi_colorimetry colorimetry;
enum hdmi_picture_aspect picture_aspect;
@@ -218,14 +225,52 @@ int hdmi_audio_infoframe_init(struct hdmi_audio_infoframe *frame);
ssize_t hdmi_audio_infoframe_pack(struct hdmi_audio_infoframe *frame,
void *buffer, size_t size);
+enum hdmi_3d_structure {
+ HDMI_3D_STRUCTURE_INVALID = -1,
+ HDMI_3D_STRUCTURE_FRAME_PACKING = 0,
+ HDMI_3D_STRUCTURE_FIELD_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_LINE_ALTERNATIVE,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_FULL,
+ HDMI_3D_STRUCTURE_L_DEPTH,
+ HDMI_3D_STRUCTURE_L_DEPTH_GFX_GFX_DEPTH,
+ HDMI_3D_STRUCTURE_TOP_AND_BOTTOM,
+ HDMI_3D_STRUCTURE_SIDE_BY_SIDE_HALF = 8,
+};
+
+
struct hdmi_vendor_infoframe {
enum hdmi_infoframe_type type;
unsigned char version;
unsigned char length;
- u8 data[27];
+ unsigned int oui;
+ u8 vic;
+ enum hdmi_3d_structure s3d_struct;
+ unsigned int s3d_ext_data;
};
+int hdmi_vendor_infoframe_init(struct hdmi_vendor_infoframe *frame);
ssize_t hdmi_vendor_infoframe_pack(struct hdmi_vendor_infoframe *frame,
void *buffer, size_t size);
+union hdmi_vendor_any_infoframe {
+ struct {
+ enum hdmi_infoframe_type type;
+ unsigned char version;
+ unsigned char length;
+ unsigned int oui;
+ } any;
+ struct hdmi_vendor_infoframe hdmi;
+};
+
+union hdmi_infoframe {
+ struct hdmi_any_infoframe any;
+ struct hdmi_avi_infoframe avi;
+ struct hdmi_spd_infoframe spd;
+ union hdmi_vendor_any_infoframe vendor;
+ struct hdmi_audio_infoframe audio;
+};
+
+ssize_t
+hdmi_infoframe_pack(union hdmi_infoframe *frame, void *buffer, size_t size);
+
#endif /* _DRM_HDMI_H */
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index ecefb7311dd..32ba45158d3 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -172,7 +172,7 @@ struct hid_sensor_common {
struct hid_sensor_hub_attribute_info sensitivity;
};
-/*Convert from hid unit expo to regular exponent*/
+/* Convert from hid unit expo to regular exponent */
static inline int hid_sensor_convert_exponent(int unit_expo)
{
if (unit_expo < 0x08)
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 6f24446e766..4f945d3ed49 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -37,7 +37,7 @@
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Y_AXIS 0x200458
#define HID_USAGE_SENSOR_ANGL_VELOCITY_Z_AXIS 0x200459
-/*ORIENTATION: Compass 3D: (200083) */
+/* ORIENTATION: Compass 3D: (200083) */
#define HID_USAGE_SENSOR_COMPASS_3D 0x200083
#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING 0x200471
#define HID_USAGE_SENSOR_ORIENT_MAGN_HEADING_X 0x200472
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 0c48991b040..ee1ffc5e19c 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -252,6 +252,8 @@ struct hid_item {
#define HID_OUTPUT_REPORT 1
#define HID_FEATURE_REPORT 2
+#define HID_REPORT_TYPES 3
+
/*
* HID connect requests
*/
@@ -283,6 +285,7 @@ struct hid_item {
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
+#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000
#define HID_QUIRK_NO_INIT_REPORTS 0x20000000
@@ -295,6 +298,7 @@ struct hid_item {
#define HID_GROUP_GENERIC 0x0001
#define HID_GROUP_MULTITOUCH 0x0002
#define HID_GROUP_SENSOR_HUB 0x0003
+#define HID_GROUP_MULTITOUCH_WIN_8 0x0004
/*
* This is the global environment of the parser. This information is
@@ -393,14 +397,14 @@ struct hid_report {
struct hid_device *device; /* associated device */
};
+#define HID_MAX_IDS 256
+
struct hid_report_enum {
unsigned numbered;
struct list_head report_list;
- struct hid_report *report_id_hash[256];
+ struct hid_report *report_id_hash[HID_MAX_IDS];
};
-#define HID_REPORT_TYPES 3
-
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
@@ -456,6 +460,7 @@ struct hid_device { /* device report descriptor */
enum hid_type type; /* device type (mouse, kbd, ...) */
unsigned country; /* HID country */
struct hid_report_enum report_enum[HID_REPORT_TYPES];
+ struct work_struct led_work; /* delayed LED worker */
struct semaphore driver_lock; /* protects the current driver, except during input */
struct semaphore driver_input_lock; /* protects the current driver */
@@ -532,6 +537,8 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
#define HID_GLOBAL_STACK_SIZE 4
#define HID_COLLECTION_STACK_SIZE 4
+#define HID_SCAN_FLAG_MT_WIN_8 0x00000001
+
struct hid_parser {
struct hid_global global;
struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
@@ -540,6 +547,7 @@ struct hid_parser {
unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
unsigned collection_stack_ptr;
struct hid_device *device;
+ unsigned scan_flags;
};
struct hid_class_descriptor {
@@ -744,6 +752,7 @@ struct hid_field *hidinput_get_led_field(struct hid_device *hid);
unsigned int hidinput_count_leds(struct hid_device *hid);
__s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code);
void hid_output_report(struct hid_report *report, __u8 *data);
+u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags);
struct hid_device *hid_allocate_device(void);
struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
@@ -989,7 +998,6 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct);
int usbhid_quirks_init(char **quirks_param);
void usbhid_quirks_exit(void);
-void usbhid_set_leds(struct hid_device *hid);
#ifdef CONFIG_HID_PID
int hid_pidff_init(struct hid_device *hid);
diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h
index 2451662c728..ddf52612eed 100644
--- a/include/linux/hidraw.h
+++ b/include/linux/hidraw.h
@@ -23,6 +23,7 @@ struct hidraw {
wait_queue_head_t wait;
struct hid_device *hid;
struct device *dev;
+ spinlock_t list_lock;
struct list_head list;
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index fae8bac907e..a3b8b2e2d24 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -27,6 +27,14 @@
#include <linux/types.h>
+/*
+ * Framework version for util services.
+ */
+
+#define UTIL_FW_MAJOR 3
+#define UTIL_FW_MINOR 0
+#define UTIL_FW_MAJOR_MINOR (UTIL_FW_MAJOR << 16 | UTIL_FW_MINOR)
+
/*
* Implementation of host controlled snapshot of the guest.
@@ -455,27 +463,6 @@ hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi,
*read = dsize - *write;
}
-
-/*
- * We use the same version numbering for all Hyper-V modules.
- *
- * Definition of versioning is as follows;
- *
- * Major Number Changes for these scenarios;
- * 1. When a new version of Windows Hyper-V
- * is released.
- * 2. A Major change has occurred in the
- * Linux IC's.
- * (For example the merge for the first time
- * into the kernel) Every time the Major Number
- * changes, the Revision number is reset to 0.
- * Minor Number Changes when new functionality is added
- * to the Linux IC's that is not a bug fix.
- *
- * 3.1 - Added completed hv_utils driver. Shutdown/Heartbeat/Timesync
- */
-#define HV_DRV_VERSION "3.1"
-
/*
* VMBUS version is 32 bit entity broken up into
* two 16 bit quantities: major_number. minor_number.
@@ -1494,7 +1481,7 @@ struct hyperv_service_callback {
};
#define MAX_SRV_VER 0x7ffffff
-extern void vmbus_prep_negotiate_resp(struct icmsg_hdr *,
+extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *,
struct icmsg_negotiate *, u8 *, int,
int);
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e988fa935b3..2ab11dc3807 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -447,11 +447,13 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data)
static inline struct i2c_adapter *
i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter)
{
+#if IS_ENABLED(I2C_MUX)
struct device *parent = adapter->dev.parent;
if (parent != NULL && parent->type == &i2c_adapter_type)
return to_i2c_adapter(parent);
else
+#endif
return NULL;
}
@@ -542,10 +544,24 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
#endif /* I2C */
-#if IS_ENABLED(CONFIG_ACPI_I2C)
-extern void acpi_i2c_register_devices(struct i2c_adapter *adap);
+#if IS_ENABLED(CONFIG_OF)
+/* must call put_device() when done with returned i2c_client device */
+extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+
#else
-static inline void acpi_i2c_register_devices(struct i2c_adapter *adap) {}
-#endif
+
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+ return NULL;
+}
+
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h
index 60e411d764d..7aa901d9205 100644
--- a/include/linux/i2c/i2c-hid.h
+++ b/include/linux/i2c/i2c-hid.h
@@ -19,7 +19,8 @@
* @hid_descriptor_address: i2c register where the HID descriptor is stored.
*
* Note that it is the responsibility of the platform driver (or the acpi 5.0
- * driver) to setup the irq related to the gpio in the struct i2c_board_info.
+ * driver, or the flattened device tree) to setup the irq related to the gpio in
+ * the struct i2c_board_info.
* The platform driver should also setup the gpio according to the device:
*
* A typical example is the following:
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/i2c/pxa-i2c.h
index 1a9f65e6ec0..53aab243cbd 100644
--- a/include/linux/i2c/pxa-i2c.h
+++ b/include/linux/i2c/pxa-i2c.h
@@ -67,6 +67,9 @@ struct i2c_pxa_platform_data {
unsigned int class;
unsigned int use_pio :1;
unsigned int fast_mode :1;
+ unsigned int high_mode:1;
+ unsigned char master_code;
+ unsigned long rate;
};
extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index b0dc87a2a37..a5b598a79be 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -16,6 +16,7 @@
#define LINUX_IEEE80211_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#include <asm/byteorder.h>
/*
@@ -209,28 +210,28 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
- u8 addr4[6];
+ u8 addr4[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_hdr_3addr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
} __packed __aligned(2);
struct ieee80211_qos_hdr {
__le16 frame_control;
__le16 duration_id;
- u8 addr1[6];
- u8 addr2[6];
- u8 addr3[6];
+ u8 addr1[ETH_ALEN];
+ u8 addr2[ETH_ALEN];
+ u8 addr3[ETH_ALEN];
__le16 seq_ctrl;
__le16 qos_ctrl;
} __packed __aligned(2);
@@ -608,8 +609,8 @@ struct ieee80211s_hdr {
u8 flags;
u8 ttl;
__le32 seqnum;
- u8 eaddr1[6];
- u8 eaddr2[6];
+ u8 eaddr1[ETH_ALEN];
+ u8 eaddr2[ETH_ALEN];
} __packed __aligned(2);
/* Mesh flags */
@@ -758,7 +759,7 @@ struct ieee80211_rann_ie {
u8 rann_flags;
u8 rann_hopcount;
u8 rann_ttl;
- u8 rann_addr[6];
+ u8 rann_addr[ETH_ALEN];
__le32 rann_seq;
__le32 rann_interval;
__le32 rann_metric;
@@ -802,9 +803,9 @@ enum ieee80211_vht_opmode_bits {
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
- u8 da[6];
- u8 sa[6];
- u8 bssid[6];
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
__le16 seq_ctrl;
union {
struct {
@@ -833,7 +834,7 @@ struct ieee80211_mgmt {
struct {
__le16 capab_info;
__le16 listen_interval;
- u8 current_ap[6];
+ u8 current_ap[ETH_ALEN];
/* followed by SSID and Supported rates */
u8 variable[0];
} __packed reassoc_req;
@@ -966,21 +967,21 @@ struct ieee80211_vendor_ie {
struct ieee80211_rts {
__le16 frame_control;
__le16 duration;
- u8 ra[6];
- u8 ta[6];
+ u8 ra[ETH_ALEN];
+ u8 ta[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_cts {
__le16 frame_control;
__le16 duration;
- u8 ra[6];
+ u8 ra[ETH_ALEN];
} __packed __aligned(2);
struct ieee80211_pspoll {
__le16 frame_control;
__le16 aid;
- u8 bssid[6];
- u8 ta[6];
+ u8 bssid[ETH_ALEN];
+ u8 ta[ETH_ALEN];
} __packed __aligned(2);
/* TDLS */
@@ -989,14 +990,14 @@ struct ieee80211_pspoll {
struct ieee80211_tdls_lnkie {
u8 ie_type; /* Link Identifier IE */
u8 ie_len;
- u8 bssid[6];
- u8 init_sta[6];
- u8 resp_sta[6];
+ u8 bssid[ETH_ALEN];
+ u8 init_sta[ETH_ALEN];
+ u8 resp_sta[ETH_ALEN];
} __packed;
struct ieee80211_tdls_data {
- u8 da[6];
- u8 sa[6];
+ u8 da[ETH_ALEN];
+ u8 sa[ETH_ALEN];
__be16 ether_type;
u8 payload_type;
u8 category;
@@ -1090,8 +1091,8 @@ struct ieee80211_p2p_noa_attr {
struct ieee80211_bar {
__le16 frame_control;
__le16 duration;
- __u8 ra[6];
- __u8 ta[6];
+ __u8 ra[ETH_ALEN];
+ __u8 ta[ETH_ALEN];
__le16 control;
__le16 start_seq_num;
} __packed;
@@ -1709,6 +1710,10 @@ enum ieee80211_eid {
WLAN_EID_OPMODE_NOTIF = 199,
WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+ WLAN_EID_EXTENDED_BSS_LOAD = 193,
+ WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_AID = 197,
+ WLAN_EID_QUIET_CHANNEL = 198,
/* 802.11ad */
WLAN_EID_NON_TX_BSSID_CAP = 83,
@@ -1860,6 +1865,11 @@ enum ieee80211_tdls_actioncode {
WLAN_TDLS_DISCOVERY_REQUEST = 10,
};
+/* Interworking capabilities are set in 7th bit of 4th byte of the
+ * @WLAN_EID_EXT_CAPABILITY information element
+ */
+#define WLAN_EXT_CAPA4_INTERWORKING_ENABLED BIT(7)
+
/*
* TDLS capabililites to be enabled in the 5th byte of the
* @WLAN_EID_EXT_CAPABILITY information element
@@ -2279,4 +2289,8 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
return !!(tim->virtual_map[index] & mask);
}
+/* convert time units */
+#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
+#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index f6156f91eb1..a899dc24be1 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -10,9 +10,9 @@
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
-
#include <linux/netpoll.h>
#include <net/sch_generic.h>
+#include <linux/types.h>
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
@@ -194,6 +194,18 @@ struct team {
bool user_carrier_enabled;
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } notify_peers;
+ struct {
+ unsigned int count;
+ unsigned int interval; /* in ms */
+ atomic_t count_pending;
+ struct delayed_work dw;
+ } mcast_rejoin;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index e3362b5f13e..f47550d75f8 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-extern void ip_mc_rejoin_groups(struct in_device *in_dev);
#endif
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 72b26940730..e51f65480ea 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -17,6 +17,8 @@
#include <linux/iio/trigger.h>
#include <linux/bitops.h>
+#include <linux/platform_data/st_sensors_pdata.h>
+
#define ST_SENSORS_TX_MAX_LENGTH 2
#define ST_SENSORS_RX_MAX_LENGTH 6
@@ -118,14 +120,16 @@ struct st_sensor_bdu {
/**
* struct st_sensor_data_ready_irq - ST sensor device data-ready interrupt
* @addr: address of the register.
- * @mask: mask to write the on/off value.
+ * @mask_int1: mask to enable/disable IRQ on INT1 pin.
+ * @mask_int2: mask to enable/disable IRQ on INT2 pin.
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
*/
struct st_sensor_data_ready_irq {
u8 addr;
- u8 mask;
+ u8 mask_int1;
+ u8 mask_int2;
struct {
u8 en_addr;
u8 en_mask;
@@ -201,6 +205,7 @@ struct st_sensors {
* @buffer_data: Data used by buffer part.
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
+ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
@@ -219,6 +224,8 @@ struct st_sensor_data {
unsigned int odr;
unsigned int num_data_channels;
+ u8 drdy_int_pin;
+
unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
const struct st_sensor_transfer_function *tf;
@@ -249,7 +256,8 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
}
#endif
-int st_sensors_init_sensor(struct iio_dev *indio_dev);
+int st_sensors_init_sensor(struct iio_dev *indio_dev,
+ struct st_sensors_platform_data *pdata);
int st_sensors_set_enable(struct iio_dev *indio_dev, bool enable);
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 3d35b702359..2103cc32a5f 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -532,6 +532,60 @@ static inline struct iio_dev *iio_priv_to_dev(void *priv)
void iio_device_free(struct iio_dev *indio_dev);
/**
+ * devm_iio_device_alloc - Resource-managed iio_device_alloc()
+ * @dev: Device to allocate iio_dev for
+ * @sizeof_priv: Space to allocate for private structure.
+ *
+ * Managed iio_device_alloc. iio_dev allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_dev allocated with this function needs to be freed separately,
+ * devm_iio_device_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_dev on success, NULL on failure.
+ */
+struct iio_dev *devm_iio_device_alloc(struct device *dev, int sizeof_priv);
+
+/**
+ * devm_iio_device_free - Resource-managed iio_device_free()
+ * @dev: Device this iio_dev belongs to
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Free iio_dev allocated with devm_iio_device_alloc().
+ */
+void devm_iio_device_free(struct device *dev, struct iio_dev *indio_dev);
+
+/**
+ * devm_iio_trigger_alloc - Resource-managed iio_trigger_alloc()
+ * @dev: Device to allocate iio_trigger for
+ * @fmt: trigger name format. If it includes format
+ * specifiers, the additional arguments following
+ * format are formatted and inserted in the resulting
+ * string replacing their respective specifiers.
+ *
+ * Managed iio_trigger_alloc. iio_trigger allocated with this function is
+ * automatically freed on driver detach.
+ *
+ * If an iio_trigger allocated with this function needs to be freed separately,
+ * devm_iio_trigger_free() must be used.
+ *
+ * RETURNS:
+ * Pointer to allocated iio_trigger on success, NULL on failure.
+ */
+struct iio_trigger *devm_iio_trigger_alloc(struct device *dev,
+ const char *fmt, ...);
+
+/**
+ * devm_iio_trigger_free - Resource-managed iio_trigger_free()
+ * @dev: Device this iio_dev belongs to
+ * @iio_trig: the iio_trigger associated with the device
+ *
+ * Free iio_trigger allocated with devm_iio_trigger_alloc().
+ */
+void devm_iio_trigger_free(struct device *dev, struct iio_trigger *iio_trig);
+
+/**
* iio_buffer_enabled() - helper function to test if the buffer is enabled
* @indio_dev: IIO device structure for device
**/
diff --git a/include/linux/iio/sysfs.h b/include/linux/iio/sysfs.h
index b7a934b9431..2958c960003 100644
--- a/include/linux/iio/sysfs.h
+++ b/include/linux/iio/sysfs.h
@@ -73,11 +73,6 @@ struct iio_const_attr {
.dev_attr = __ATTR(_name, S_IRUGO, iio_read_const_attr, NULL)}
/* Generic attributes of onetype or another */
-/**
- * IIO_DEV_ATTR_RESET: resets the device
- **/
-#define IIO_DEV_ATTR_RESET(_store) \
- IIO_DEVICE_ATTR(reset, S_IWUSR, NULL, _store, 0)
/**
* IIO_DEV_ATTR_SAMP_FREQ - sets any internal clock frequency
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h
index 3869c525b05..369cf2cd514 100644
--- a/include/linux/iio/trigger.h
+++ b/include/linux/iio/trigger.h
@@ -8,6 +8,7 @@
*/
#include <linux/irq.h>
#include <linux/module.h>
+#include <linux/atomic.h>
#ifndef _IIO_TRIGGER_H_
#define _IIO_TRIGGER_H_
@@ -61,7 +62,7 @@ struct iio_trigger {
struct list_head list;
struct list_head alloc_list;
- int use_count;
+ atomic_t use_count;
struct irq_chip subirq_chip;
int subirq_base;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index b99cd23f347..79640e015a8 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -5,45 +5,13 @@
#include <linux/bitmap.h>
#include <linux/if.h>
+#include <linux/ip.h>
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
-enum
-{
- IPV4_DEVCONF_FORWARDING=1,
- IPV4_DEVCONF_MC_FORWARDING,
- IPV4_DEVCONF_PROXY_ARP,
- IPV4_DEVCONF_ACCEPT_REDIRECTS,
- IPV4_DEVCONF_SECURE_REDIRECTS,
- IPV4_DEVCONF_SEND_REDIRECTS,
- IPV4_DEVCONF_SHARED_MEDIA,
- IPV4_DEVCONF_RP_FILTER,
- IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
- IPV4_DEVCONF_BOOTP_RELAY,
- IPV4_DEVCONF_LOG_MARTIANS,
- IPV4_DEVCONF_TAG,
- IPV4_DEVCONF_ARPFILTER,
- IPV4_DEVCONF_MEDIUM_ID,
- IPV4_DEVCONF_NOXFRM,
- IPV4_DEVCONF_NOPOLICY,
- IPV4_DEVCONF_FORCE_IGMP_VERSION,
- IPV4_DEVCONF_ARP_ANNOUNCE,
- IPV4_DEVCONF_ARP_IGNORE,
- IPV4_DEVCONF_PROMOTE_SECONDARIES,
- IPV4_DEVCONF_ARP_ACCEPT,
- IPV4_DEVCONF_ARP_NOTIFY,
- IPV4_DEVCONF_ACCEPT_LOCAL,
- IPV4_DEVCONF_SRC_VMARK,
- IPV4_DEVCONF_PROXY_ARP_PVLAN,
- IPV4_DEVCONF_ROUTE_LOCALNET,
- __IPV4_DEVCONF_MAX
-};
-
-#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
-
struct ipv4_devconf {
void *sysctl;
int data[IPV4_DEVCONF_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 850e95bc766..28ea3843931 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -19,6 +19,8 @@ struct ipv6_devconf {
__s32 rtr_solicit_interval;
__s32 rtr_solicit_delay;
__s32 force_mld_version;
+ __s32 mldv1_unsolicited_report_interval;
+ __s32 mldv2_unsolicited_report_interval;
#ifdef CONFIG_IPV6_PRIVACY
__s32 use_tempaddr;
__s32 temp_valid_lft;
@@ -48,6 +50,7 @@ struct ipv6_devconf {
__s32 accept_dad;
__s32 force_tllao;
__s32 ndisc_notify;
+ __s32 suppress_frag_ndisc;
void *sysctl;
};
@@ -101,6 +104,7 @@ struct inet6_skb_parm {
#define IP6SKB_FORWARDED 2
#define IP6SKB_REROUTED 4
#define IP6SKB_ROUTERALERT 8
+#define IP6SKB_FRAGMENTED 16
};
#define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb))
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index 8685d1be12c..31229e0be90 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -57,16 +57,13 @@
#define JBD_EXPENSIVE_CHECKING
extern u8 journal_enable_debug;
-#define jbd_debug(n, f, a...) \
- do { \
- if ((n) <= journal_enable_debug) { \
- printk (KERN_DEBUG "(%s, %d): %s: ", \
- __FILE__, __LINE__, __func__); \
- printk (f, ## a); \
- } \
- } while (0)
+void __jbd_debug(int level, const char *file, const char *func,
+ unsigned int line, const char *fmt, ...);
+
+#define jbd_debug(n, fmt, a...) \
+ __jbd_debug((n), __FILE__, __func__, __LINE__, (fmt), ##a)
#else
-#define jbd_debug(f, a...) /**/
+#define jbd_debug(n, fmt, a...) /**/
#endif
static inline void *jbd_alloc(size_t size, gfp_t flags)
@@ -77,7 +74,7 @@ static inline void *jbd_alloc(size_t size, gfp_t flags)
static inline void jbd_free(void *ptr, size_t size)
{
free_pages((unsigned long)ptr, get_order(size));
-};
+}
#define JFS_MIN_JOURNAL_BLOCKS 1024
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 97ba4e78a37..d235e88cfd7 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -101,13 +101,13 @@ static inline u64 get_jiffies_64(void)
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
- ((long)(b) - (long)(a) < 0))
+ ((long)((b) - (a)) < 0))
#define time_before(a,b) time_after(b,a)
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
- ((long)(a) - (long)(b) >= 0))
+ ((long)((a) - (b)) >= 0))
#define time_before_eq(a,b) time_after_eq(b,a)
/*
@@ -130,13 +130,13 @@ static inline u64 get_jiffies_64(void)
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
- ((__s64)(b) - (__s64)(a) < 0))
+ ((__s64)((b) - (a)) < 0))
#define time_before64(a,b) time_after64(b,a)
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
- ((__s64)(a) - (__s64)(b) >= 0))
+ ((__s64)((a) - (b)) >= 0))
#define time_before_eq64(a,b) time_after_eq64(b,a)
#define time_in_range64(a, b, c) \
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 0976fc46d1e..a5079072da6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -48,7 +48,6 @@
#include <linux/types.h>
#include <linux/compiler.h>
-#include <linux/workqueue.h>
#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
@@ -61,12 +60,6 @@ struct static_key {
#endif
};
-struct static_key_deferred {
- struct static_key key;
- unsigned long timeout;
- struct delayed_work work;
-};
-
# include <asm/jump_label.h>
# define HAVE_JUMP_LABEL
#endif /* CC_HAVE_ASM_GOTO && CONFIG_JUMP_LABEL */
@@ -78,6 +71,7 @@ enum jump_label_type {
struct module;
+#include <linux/atomic.h>
#ifdef HAVE_JUMP_LABEL
#define JUMP_LABEL_TRUE_BRANCH 1UL
@@ -119,10 +113,7 @@ extern void arch_jump_label_transform_static(struct jump_entry *entry,
extern int jump_label_text_reserved(void *start, void *end);
extern void static_key_slow_inc(struct static_key *key);
extern void static_key_slow_dec(struct static_key *key);
-extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
extern void jump_label_apply_nops(struct module *mod);
-extern void
-jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
{ .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
@@ -131,8 +122,6 @@ jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
#else /* !HAVE_JUMP_LABEL */
-#include <linux/atomic.h>
-
struct static_key {
atomic_t enabled;
};
@@ -141,10 +130,6 @@ static __always_inline void jump_label_init(void)
{
}
-struct static_key_deferred {
- struct static_key key;
-};
-
static __always_inline bool static_key_false(struct static_key *key)
{
if (unlikely(atomic_read(&key->enabled)) > 0)
@@ -169,11 +154,6 @@ static inline void static_key_slow_dec(struct static_key *key)
atomic_dec(&key->enabled);
}
-static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
-{
- static_key_slow_dec(&key->key);
-}
-
static inline int jump_label_text_reserved(void *start, void *end)
{
return 0;
@@ -187,12 +167,6 @@ static inline int jump_label_apply_nops(struct module *mod)
return 0;
}
-static inline void
-jump_label_rate_limit(struct static_key_deferred *key,
- unsigned long rl)
-{
-}
-
#define STATIC_KEY_INIT_TRUE ((struct static_key) \
{ .enabled = ATOMIC_INIT(1) })
#define STATIC_KEY_INIT_FALSE ((struct static_key) \
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
new file mode 100644
index 00000000000..113788389b3
--- /dev/null
+++ b/include/linux/jump_label_ratelimit.h
@@ -0,0 +1,34 @@
+#ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
+#define _LINUX_JUMP_LABEL_RATELIMIT_H
+
+#include <linux/jump_label.h>
+#include <linux/workqueue.h>
+
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+struct static_key_deferred {
+ struct static_key key;
+ unsigned long timeout;
+ struct delayed_work work;
+};
+#endif
+
+#ifdef HAVE_JUMP_LABEL
+extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void
+jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+
+#else /* !HAVE_JUMP_LABEL */
+struct static_key_deferred {
+ struct static_key key;
+};
+static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
+{
+ static_key_slow_dec(&key->key);
+}
+static inline void
+jump_label_rate_limit(struct static_key_deferred *key,
+ unsigned long rl)
+{
+}
+#endif /* HAVE_JUMP_LABEL */
+#endif /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --git a/include/linux/kbd_kern.h b/include/linux/kbd_kern.h
index b7c8cdc1d42..cbfb171bbcb 100644
--- a/include/linux/kbd_kern.h
+++ b/include/linux/kbd_kern.h
@@ -36,10 +36,9 @@ struct kbd_struct {
#define VC_CTRLRLOCK KG_CTRLR /* ctrlr lock mode */
unsigned char slockstate; /* for `sticky' Shift, Ctrl, etc. */
- unsigned char ledmode:2; /* one 2-bit value */
+ unsigned char ledmode:1;
#define LED_SHOW_FLAGS 0 /* traditional state */
#define LED_SHOW_IOCTL 1 /* only change leds upon ioctl */
-#define LED_SHOW_MEM 2 /* `heartbeat': peek into memory */
unsigned char ledflagstate:4; /* flags, not lights */
unsigned char default_ledflagstate:4;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 3bef14c6586..482ad2d84a3 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode);
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline void ftrace_off_permanent(void) { }
-static inline void trace_dump_stack(void) { }
+static inline void trace_dump_stack(int skip) { }
static inline void tracing_on(void) { }
static inline void tracing_off(void) { }
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 939b11268c8..de6dcbcc6ef 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -26,6 +26,7 @@
#include <linux/kernel.h>
#include <linux/wait.h>
#include <linux/atomic.h>
+#include <linux/workqueue.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@@ -65,6 +66,9 @@ struct kobject {
struct kobj_type *ktype;
struct sysfs_dirent *sd;
struct kref kref;
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index a63d83ebd15..ca645a01d37 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -85,6 +85,12 @@ static inline bool is_noslot_pfn(pfn_t pfn)
return pfn == KVM_PFN_NOSLOT;
}
+/*
+ * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
+ * provide own defines and kvm_is_error_hva
+ */
+#ifndef KVM_HVA_ERR_BAD
+
#define KVM_HVA_ERR_BAD (PAGE_OFFSET)
#define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
@@ -93,6 +99,8 @@ static inline bool kvm_is_error_hva(unsigned long addr)
return addr >= PAGE_OFFSET;
}
+#endif
+
#define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
static inline bool is_error_page(struct page *page)
@@ -160,8 +168,12 @@ enum kvm_bus {
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val);
+int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val, long cookie);
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, int len,
void *val);
+int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val, long cookie);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -499,6 +511,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
void kvm_arch_free_memslot(struct kvm_memory_slot *free,
struct kvm_memory_slot *dont);
int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages);
+void kvm_arch_memslots_updated(struct kvm *kvm);
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 4ea55bb45de..0e23c26485f 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -138,6 +138,22 @@ enum {
ATA_SHT_THIS_ID = -1,
ATA_SHT_USE_CLUSTERING = 1,
+ /* struct ata_taskfile flags */
+ ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */
+ ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */
+ ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */
+ ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */
+ ATA_TFLAG_LBA = (1 << 4), /* enable LBA */
+ ATA_TFLAG_FUA = (1 << 5), /* enable FUA */
+ ATA_TFLAG_POLLING = (1 << 6), /* set nIEN to 1 and use polling */
+
+ /* protocol flags */
+ ATA_PROT_FLAG_PIO = (1 << 0), /* is PIO */
+ ATA_PROT_FLAG_DMA = (1 << 1), /* is DMA */
+ ATA_PROT_FLAG_DATA = ATA_PROT_FLAG_PIO | ATA_PROT_FLAG_DMA,
+ ATA_PROT_FLAG_NCQ = (1 << 2), /* is NCQ */
+ ATA_PROT_FLAG_ATAPI = (1 << 3), /* is ATAPI */
+
/* struct ata_device stuff */
ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
@@ -156,6 +172,7 @@ enum {
ATA_DFLAG_DUBIOUS_XFER = (1 << 16), /* data transfer not verified */
ATA_DFLAG_NO_UNLOAD = (1 << 17), /* device doesn't support unload */
ATA_DFLAG_UNLOCK_HPA = (1 << 18), /* unlock HPA */
+ ATA_DFLAG_NCQ_SEND_RECV = (1 << 19), /* device supports NCQ SEND and RECV */
ATA_DFLAG_INIT_MASK = (1 << 24) - 1,
ATA_DFLAG_DETACH = (1 << 24),
@@ -207,6 +224,7 @@ enum {
ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */
ATA_FLAG_AN = (1 << 18), /* controller supports AN */
ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */
+ ATA_FLAG_FPDMA_AUX = (1 << 20), /* controller supports H2DFIS aux field */
ATA_FLAG_EM = (1 << 21), /* driver supports enclosure
* management */
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
@@ -518,6 +536,33 @@ enum sw_activity {
BLINK_OFF,
};
+struct ata_taskfile {
+ unsigned long flags; /* ATA_TFLAG_xxx */
+ u8 protocol; /* ATA_PROT_xxx */
+
+ u8 ctl; /* control reg */
+
+ u8 hob_feature; /* additional data */
+ u8 hob_nsect; /* to support LBA48 */
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+
+ u8 feature;
+ u8 nsect;
+ u8 lbal;
+ u8 lbam;
+ u8 lbah;
+
+ u8 device;
+
+ u8 command; /* IO operation */
+
+ u32 auxiliary; /* auxiliary field */
+ /* from SATA 3.1 and */
+ /* ATA-8 ACS-3 */
+};
+
#ifdef CONFIG_ATA_SFF
struct ata_ioports {
void __iomem *cmd_addr;
@@ -660,6 +705,9 @@ struct ata_device {
/* DEVSLP Timing Variables from Identify Device Data Log */
u8 devslp_timing[ATA_LOG_DEVSLP_SIZE];
+ /* NCQ send and receive log subcommand support */
+ u8 ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_SIZE];
+
/* error history */
int spdn_cnt;
/* ering is CLEAR_END, read comment above CLEAR_END */
@@ -959,6 +1007,69 @@ extern const unsigned long sata_deb_timing_long[];
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
+/*
+ * protocol tests
+ */
+static inline unsigned int ata_prot_flags(u8 prot)
+{
+ switch (prot) {
+ case ATA_PROT_NODATA:
+ return 0;
+ case ATA_PROT_PIO:
+ return ATA_PROT_FLAG_PIO;
+ case ATA_PROT_DMA:
+ return ATA_PROT_FLAG_DMA;
+ case ATA_PROT_NCQ:
+ return ATA_PROT_FLAG_DMA | ATA_PROT_FLAG_NCQ;
+ case ATAPI_PROT_NODATA:
+ return ATA_PROT_FLAG_ATAPI;
+ case ATAPI_PROT_PIO:
+ return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_PIO;
+ case ATAPI_PROT_DMA:
+ return ATA_PROT_FLAG_ATAPI | ATA_PROT_FLAG_DMA;
+ }
+ return 0;
+}
+
+static inline int ata_is_atapi(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_ATAPI;
+}
+
+static inline int ata_is_nodata(u8 prot)
+{
+ return !(ata_prot_flags(prot) & ATA_PROT_FLAG_DATA);
+}
+
+static inline int ata_is_pio(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_PIO;
+}
+
+static inline int ata_is_dma(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_DMA;
+}
+
+static inline int ata_is_ncq(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_NCQ;
+}
+
+static inline int ata_is_data(u8 prot)
+{
+ return ata_prot_flags(prot) & ATA_PROT_FLAG_DATA;
+}
+
+static inline int is_multi_taskfile(struct ata_taskfile *tf)
+{
+ return (tf->command == ATA_CMD_READ_MULTI) ||
+ (tf->command == ATA_CMD_WRITE_MULTI) ||
+ (tf->command == ATA_CMD_READ_MULTI_EXT) ||
+ (tf->command == ATA_CMD_WRITE_MULTI_EXT) ||
+ (tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
+}
+
static inline const unsigned long *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
@@ -1142,8 +1253,6 @@ int ata_acpi_stm(struct ata_port *ap, const struct ata_acpi_gtm *stm);
int ata_acpi_gtm(struct ata_port *ap, struct ata_acpi_gtm *stm);
unsigned long ata_acpi_gtm_xfermask(struct ata_device *dev,
const struct ata_acpi_gtm *gtm);
-acpi_handle ata_ap_acpi_handle(struct ata_port *ap);
-acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm);
#else
static inline const struct ata_acpi_gtm *ata_acpi_init_gtm(struct ata_port *ap)
@@ -1497,6 +1606,13 @@ static inline int ata_ncq_enabled(struct ata_device *dev)
ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}
+static inline bool ata_fpdma_dsm_supported(struct ata_device *dev)
+{
+ return (dev->flags & ATA_DFLAG_NCQ_SEND_RECV) &&
+ (dev->ncq_send_recv_cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &
+ ATA_LOG_NCQ_SEND_RECV_DSM_TRIM);
+}
+
static inline void ata_qc_set_polling(struct ata_queued_cmd *qc)
{
qc->tf.ctl |= ATA_NIEN;
diff --git a/include/linux/llist.h b/include/linux/llist.h
index cdaa7f02389..8828a78dec9 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -125,6 +125,29 @@ static inline void init_llist_head(struct llist_head *list)
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
+ * llist_for_each_entry_safe - iterate over some deleted entries of lock-less list of given type
+ * safe against removal of list entry
+ * @pos: the type * to use as a loop cursor.
+ * @n: another type * to use as temporary storage
+ * @node: the first entry of deleted list entries.
+ * @member: the name of the llist_node with the struct.
+ *
+ * In general, some entries of the lock-less list can be traversed
+ * safely only after being removed from list, so start with an entry
+ * instead of list head.
+ *
+ * If being used on entries deleted from lock-less list directly, the
+ * traverse order is from the newest to the oldest added entry. If
+ * you want to traverse from the oldest to the newest, you must
+ * reverse the order by yourself before traversing.
+ */
+#define llist_for_each_entry_safe(pos, n, node, member) \
+ for (pos = llist_entry((node), typeof(*pos), member); \
+ &pos->member != NULL && \
+ (n = llist_entry(pos->member.next, typeof(*n), member), true); \
+ pos = n)
+
+/**
* llist_empty - tests whether a lock-less list is empty
* @head: the list to test
*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index f1e877b79ed..cfc2f119779 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -365,7 +365,7 @@ extern void lockdep_trace_alloc(gfp_t mask);
#define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
-#else /* !LOCKDEP */
+#else /* !CONFIG_LOCKDEP */
static inline void lockdep_off(void)
{
@@ -479,82 +479,36 @@ static inline void print_irqtrace_events(struct task_struct *curr)
* on the per lock-class debug mode:
*/
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
-# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
-# else
-# define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
-# define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
-# endif
-# define spin_release(l, n, i) lock_release(l, n, i)
+#ifdef CONFIG_PROVE_LOCKING
+ #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
+ #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 2, n, i)
+ #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 2, n, i)
#else
-# define spin_acquire(l, s, t, i) do { } while (0)
-# define spin_release(l, n, i) do { } while (0)
+ #define lock_acquire_exclusive(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
+ #define lock_acquire_shared(l, s, t, n, i) lock_acquire(l, s, t, 1, 1, n, i)
+ #define lock_acquire_shared_recursive(l, s, t, n, i) lock_acquire(l, s, t, 2, 1, n, i)
#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
-# else
-# define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
-# define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
-# endif
-# define rwlock_release(l, n, i) lock_release(l, n, i)
-#else
-# define rwlock_acquire(l, s, t, i) do { } while (0)
-# define rwlock_acquire_read(l, s, t, i) do { } while (0)
-# define rwlock_release(l, n, i) do { } while (0)
-#endif
+#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define spin_release(l, n, i) lock_release(l, n, i)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
-# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
-# else
-# define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
-# define mutex_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
-# endif
-# define mutex_release(l, n, i) lock_release(l, n, i)
-#else
-# define mutex_acquire(l, s, t, i) do { } while (0)
-# define mutex_acquire_nest(l, s, t, n, i) do { } while (0)
-# define mutex_release(l, n, i) do { } while (0)
-#endif
+#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
+#define rwlock_release(l, n, i) lock_release(l, n, i)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
-# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
-# else
-# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
-# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
-# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
-# endif
+#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define mutex_release(l, n, i) lock_release(l, n, i)
+
+#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
+#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
+#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
# define rwsem_release(l, n, i) lock_release(l, n, i)
-#else
-# define rwsem_acquire(l, s, t, i) do { } while (0)
-# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
-# define rwsem_acquire_read(l, s, t, i) do { } while (0)
-# define rwsem_release(l, n, i) do { } while (0)
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# ifdef CONFIG_PROVE_LOCKING
-# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
-# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 2, NULL, _THIS_IP_)
-# else
-# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
-# define lock_map_acquire_read(l) lock_acquire(l, 0, 0, 2, 1, NULL, _THIS_IP_)
-# endif
+#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
+#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
-#else
-# define lock_map_acquire(l) do { } while (0)
-# define lock_map_acquire_read(l) do { } while (0)
-# define lock_map_release(l) do { } while (0)
-#endif
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
new file mode 100644
index 00000000000..ca07b5028b0
--- /dev/null
+++ b/include/linux/lockref.h
@@ -0,0 +1,36 @@
+#ifndef __LINUX_LOCKREF_H
+#define __LINUX_LOCKREF_H
+
+/*
+ * Locked reference counts.
+ *
+ * These are different from just plain atomic refcounts in that they
+ * are atomic with respect to the spinlock that goes with them. In
+ * particular, there can be implementations that don't actually get
+ * the spinlock for the common decrement/increment operations, but they
+ * still have to check that the operation is done semantically as if
+ * the spinlock had been taken (using a cmpxchg operation that covers
+ * both the lock and the count word, or using memory transactions, for
+ * example).
+ */
+
+#include <linux/spinlock.h>
+
+struct lockref {
+ union {
+#ifdef CONFIG_CMPXCHG_LOCKREF
+ aligned_u64 lock_count;
+#endif
+ struct {
+ spinlock_t lock;
+ unsigned int count;
+ };
+ };
+};
+
+extern void lockref_get(struct lockref *);
+extern int lockref_get_not_zero(struct lockref *);
+extern int lockref_get_or_lock(struct lockref *);
+extern int lockref_put_or_lock(struct lockref *);
+
+#endif /* __LINUX_LOCKREF_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 7b4d9d79570..6c416092e32 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -85,7 +85,7 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
-extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
+extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
static inline
bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 85c31a8e290..9a6bbf76452 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -25,16 +25,9 @@
struct memory_block {
unsigned long start_section_nr;
unsigned long end_section_nr;
- unsigned long state;
- int section_count;
-
- /*
- * This serializes all state change requests. It isn't
- * held during creation because the control files are
- * created long after the critical areas during
- * initialization.
- */
- struct mutex state_mutex;
+ unsigned long state; /* serialized by the dev->lock */
+ int section_count; /* serialized by mem_sysfs_mutex */
+ int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
void *hw; /* optional pointer to fw/hw data */
int (*phys_callback)(struct memory_block *);
@@ -125,7 +118,6 @@ extern struct memory_block *find_memory_block_hinted(struct mem_section *,
struct memory_block *);
extern struct memory_block *find_memory_block(struct mem_section *);
#define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT)
-enum mem_add_context { BOOT, HOTPLUG };
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/include/linux/mfd/arizona/gpio.h b/include/linux/mfd/arizona/gpio.h
new file mode 100644
index 00000000000..d2146bb74f8
--- /dev/null
+++ b/include/linux/mfd/arizona/gpio.h
@@ -0,0 +1,96 @@
+/*
+ * GPIO configuration for Arizona devices
+ *
+ * Copyright 2013 Wolfson Microelectronics. PLC.
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _ARIZONA_GPIO_H
+#define _ARIZONA_GPIO_H
+
+#define ARIZONA_GP_FN_TXLRCLK 0x00
+#define ARIZONA_GP_FN_GPIO 0x01
+#define ARIZONA_GP_FN_IRQ1 0x02
+#define ARIZONA_GP_FN_IRQ2 0x03
+#define ARIZONA_GP_FN_OPCLK 0x04
+#define ARIZONA_GP_FN_FLL1_OUT 0x05
+#define ARIZONA_GP_FN_FLL2_OUT 0x06
+#define ARIZONA_GP_FN_PWM1 0x08
+#define ARIZONA_GP_FN_PWM2 0x09
+#define ARIZONA_GP_FN_SYSCLK_UNDERCLOCKED 0x0A
+#define ARIZONA_GP_FN_ASYNCCLK_UNDERCLOCKED 0x0B
+#define ARIZONA_GP_FN_FLL1_LOCK 0x0C
+#define ARIZONA_GP_FN_FLL2_LOCK 0x0D
+#define ARIZONA_GP_FN_FLL1_CLOCK_OK 0x0F
+#define ARIZONA_GP_FN_FLL2_CLOCK_OK 0x10
+#define ARIZONA_GP_FN_HEADPHONE_DET 0x12
+#define ARIZONA_GP_FN_MIC_DET 0x13
+#define ARIZONA_GP_FN_WSEQ_STATUS 0x15
+#define ARIZONA_GP_FN_CIF_ADDRESS_ERROR 0x16
+#define ARIZONA_GP_FN_ASRC1_LOCK 0x1A
+#define ARIZONA_GP_FN_ASRC2_LOCK 0x1B
+#define ARIZONA_GP_FN_ASRC_CONFIG_ERROR 0x1C
+#define ARIZONA_GP_FN_DRC1_SIGNAL_DETECT 0x1D
+#define ARIZONA_GP_FN_DRC1_ANTICLIP 0x1E
+#define ARIZONA_GP_FN_DRC1_DECAY 0x1F
+#define ARIZONA_GP_FN_DRC1_NOISE 0x20
+#define ARIZONA_GP_FN_DRC1_QUICK_RELEASE 0x21
+#define ARIZONA_GP_FN_DRC2_SIGNAL_DETECT 0x22
+#define ARIZONA_GP_FN_DRC2_ANTICLIP 0x23
+#define ARIZONA_GP_FN_DRC2_DECAY 0x24
+#define ARIZONA_GP_FN_DRC2_NOISE 0x25
+#define ARIZONA_GP_FN_DRC2_QUICK_RELEASE 0x26
+#define ARIZONA_GP_FN_MIXER_DROPPED_SAMPLE 0x27
+#define ARIZONA_GP_FN_AIF1_CONFIG_ERROR 0x28
+#define ARIZONA_GP_FN_AIF2_CONFIG_ERROR 0x29
+#define ARIZONA_GP_FN_AIF3_CONFIG_ERROR 0x2A
+#define ARIZONA_GP_FN_SPK_TEMP_SHUTDOWN 0x2B
+#define ARIZONA_GP_FN_SPK_TEMP_WARNING 0x2C
+#define ARIZONA_GP_FN_UNDERCLOCKED 0x2D
+#define ARIZONA_GP_FN_OVERCLOCKED 0x2E
+#define ARIZONA_GP_FN_DSP_IRQ1 0x35
+#define ARIZONA_GP_FN_DSP_IRQ2 0x36
+#define ARIZONA_GP_FN_ASYNC_OPCLK 0x3D
+#define ARIZONA_GP_FN_BOOT_DONE 0x44
+#define ARIZONA_GP_FN_DSP1_RAM_READY 0x45
+#define ARIZONA_GP_FN_SYSCLK_ENA_STATUS 0x4B
+#define ARIZONA_GP_FN_ASYNCCLK_ENA_STATUS 0x4C
+
+#define ARIZONA_GPN_DIR 0x8000 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
+#define ARIZONA_GPN_DIR_WIDTH 1 /* GPN_DIR */
+#define ARIZONA_GPN_PU 0x4000 /* GPN_PU */
+#define ARIZONA_GPN_PU_MASK 0x4000 /* GPN_PU */
+#define ARIZONA_GPN_PU_SHIFT 14 /* GPN_PU */
+#define ARIZONA_GPN_PU_WIDTH 1 /* GPN_PU */
+#define ARIZONA_GPN_PD 0x2000 /* GPN_PD */
+#define ARIZONA_GPN_PD_MASK 0x2000 /* GPN_PD */
+#define ARIZONA_GPN_PD_SHIFT 13 /* GPN_PD */
+#define ARIZONA_GPN_PD_WIDTH 1 /* GPN_PD */
+#define ARIZONA_GPN_LVL 0x0800 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_MASK 0x0800 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_SHIFT 11 /* GPN_LVL */
+#define ARIZONA_GPN_LVL_WIDTH 1 /* GPN_LVL */
+#define ARIZONA_GPN_POL 0x0400 /* GPN_POL */
+#define ARIZONA_GPN_POL_MASK 0x0400 /* GPN_POL */
+#define ARIZONA_GPN_POL_SHIFT 10 /* GPN_POL */
+#define ARIZONA_GPN_POL_WIDTH 1 /* GPN_POL */
+#define ARIZONA_GPN_OP_CFG 0x0200 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_MASK 0x0200 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_SHIFT 9 /* GPN_OP_CFG */
+#define ARIZONA_GPN_OP_CFG_WIDTH 1 /* GPN_OP_CFG */
+#define ARIZONA_GPN_DB 0x0100 /* GPN_DB */
+#define ARIZONA_GPN_DB_MASK 0x0100 /* GPN_DB */
+#define ARIZONA_GPN_DB_SHIFT 8 /* GPN_DB */
+#define ARIZONA_GPN_DB_WIDTH 1 /* GPN_DB */
+#define ARIZONA_GPN_FN_MASK 0x007F /* GPN_DB */
+#define ARIZONA_GPN_FN_SHIFT 0 /* GPN_DB */
+#define ARIZONA_GPN_FN_WIDTH 7 /* GPN_DB */
+
+#endif
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h
index 1a8dd7afe08..37e48c95779 100644
--- a/include/linux/mfd/palmas.h
+++ b/include/linux/mfd/palmas.h
@@ -160,7 +160,8 @@ enum palmas_regulators {
PALMAS_REG_SMPS7,
PALMAS_REG_SMPS8,
PALMAS_REG_SMPS9,
- PALMAS_REG_SMPS10,
+ PALMAS_REG_SMPS10_OUT2,
+ PALMAS_REG_SMPS10_OUT1,
/* LDO regulators */
PALMAS_REG_LDO1,
PALMAS_REG_LDO2,
@@ -355,9 +356,9 @@ struct palmas_pmic {
int smps123;
int smps457;
- int range[PALMAS_REG_SMPS10];
- unsigned int ramp_delay[PALMAS_REG_SMPS10];
- unsigned int current_reg_mode[PALMAS_REG_SMPS10];
+ int range[PALMAS_REG_SMPS10_OUT1];
+ unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1];
+ unsigned int current_reg_mode[PALMAS_REG_SMPS10_OUT1];
};
struct palmas_resource {
@@ -371,17 +372,15 @@ struct palmas_usb {
struct extcon_dev edev;
- /* used to set vbus, in atomic path */
- struct work_struct set_vbus_work;
-
int id_otg_irq;
int id_irq;
int vbus_otg_irq;
int vbus_irq;
- int vbus_enable;
-
enum palmas_usb_state linkstat;
+ int wakeup;
+ bool enable_vbus_detection;
+ bool enable_id_detection;
};
#define comparator_to_palmas(x) container_of((x), struct palmas_usb, comparator)
@@ -449,7 +448,7 @@ enum usb_irq_events {
#define PALMAS_DVFS_BASE 0x180
#define PALMAS_PMU_CONTROL_BASE 0x1A0
#define PALMAS_RESOURCE_BASE 0x1D4
-#define PALMAS_PU_PD_OD_BASE 0x1F4
+#define PALMAS_PU_PD_OD_BASE 0x1F0
#define PALMAS_LED_BASE 0x200
#define PALMAS_INTERRUPT_BASE 0x210
#define PALMAS_USB_OTG_BASE 0x250
@@ -1734,16 +1733,20 @@ enum usb_irq_events {
#define PALMAS_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0
/* Registers for function PAD_CONTROL */
-#define PALMAS_PU_PD_INPUT_CTRL1 0x0
-#define PALMAS_PU_PD_INPUT_CTRL2 0x1
-#define PALMAS_PU_PD_INPUT_CTRL3 0x2
-#define PALMAS_OD_OUTPUT_CTRL 0x4
-#define PALMAS_POLARITY_CTRL 0x5
-#define PALMAS_PRIMARY_SECONDARY_PAD1 0x6
-#define PALMAS_PRIMARY_SECONDARY_PAD2 0x7
-#define PALMAS_I2C_SPI 0x8
-#define PALMAS_PU_PD_INPUT_CTRL4 0x9
-#define PALMAS_PRIMARY_SECONDARY_PAD3 0xA
+#define PALMAS_OD_OUTPUT_CTRL2 0x2
+#define PALMAS_POLARITY_CTRL2 0x3
+#define PALMAS_PU_PD_INPUT_CTRL1 0x4
+#define PALMAS_PU_PD_INPUT_CTRL2 0x5
+#define PALMAS_PU_PD_INPUT_CTRL3 0x6
+#define PALMAS_PU_PD_INPUT_CTRL5 0x7
+#define PALMAS_OD_OUTPUT_CTRL 0x8
+#define PALMAS_POLARITY_CTRL 0x9
+#define PALMAS_PRIMARY_SECONDARY_PAD1 0xA
+#define PALMAS_PRIMARY_SECONDARY_PAD2 0xB
+#define PALMAS_I2C_SPI 0xC
+#define PALMAS_PU_PD_INPUT_CTRL4 0xD
+#define PALMAS_PRIMARY_SECONDARY_PAD3 0xE
+#define PALMAS_PRIMARY_SECONDARY_PAD4 0xF
/* Bit definitions for PU_PD_INPUT_CTRL1 */
#define PALMAS_PU_PD_INPUT_CTRL1_RESET_IN_PD 0x40
@@ -2501,6 +2504,15 @@ enum usb_irq_events {
#define PALMAS_PU_PD_GPIO_CTRL1 0x6
#define PALMAS_PU_PD_GPIO_CTRL2 0x7
#define PALMAS_OD_OUTPUT_GPIO_CTRL 0x8
+#define PALMAS_GPIO_DATA_IN2 0x9
+#define PALMAS_GPIO_DATA_DIR2 0x0A
+#define PALMAS_GPIO_DATA_OUT2 0x0B
+#define PALMAS_GPIO_DEBOUNCE_EN2 0x0C
+#define PALMAS_GPIO_CLEAR_DATA_OUT2 0x0D
+#define PALMAS_GPIO_SET_DATA_OUT2 0x0E
+#define PALMAS_PU_PD_GPIO_CTRL3 0x0F
+#define PALMAS_PU_PD_GPIO_CTRL4 0x10
+#define PALMAS_OD_OUTPUT_GPIO_CTRL2 0x11
/* Bit definitions for GPIO_DATA_IN */
#define PALMAS_GPIO_DATA_IN_GPIO_7_IN 0x80
diff --git a/include/linux/mfd/samsung/s2mps11.h b/include/linux/mfd/samsung/s2mps11.h
index 4e94dc65f98..d0d52ea6007 100644
--- a/include/linux/mfd/samsung/s2mps11.h
+++ b/include/linux/mfd/samsung/s2mps11.h
@@ -191,6 +191,17 @@ enum s2mps11_regulators {
#define S2MPS11_BUCK_N_VOLTAGES (S2MPS11_BUCK_VSEL_MASK + 1)
#define S2MPS11_RAMP_DELAY 25000 /* uV/us */
+
+#define S2MPS11_BUCK2_RAMP_SHIFT 6
+#define S2MPS11_BUCK34_RAMP_SHIFT 4
+#define S2MPS11_BUCK5_RAMP_SHIFT 6
+#define S2MPS11_BUCK16_RAMP_SHIFT 4
+#define S2MPS11_BUCK7810_RAMP_SHIFT 2
+#define S2MPS11_BUCK9_RAMP_SHIFT 0
+#define S2MPS11_BUCK2_RAMP_EN_SHIFT 3
+#define S2MPS11_BUCK3_RAMP_EN_SHIFT 2
+#define S2MPS11_BUCK4_RAMP_EN_SHIFT 1
+#define S2MPS11_BUCK6_RAMP_EN_SHIFT 0
#define S2MPS11_PMIC_EN_SHIFT 6
#define S2MPS11_REGULATOR_MAX (S2MPS11_REG_MAX - 3)
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index dab34a1deb2..b6bdcd66c07 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -103,15 +103,15 @@
#define IMX6Q_GPR1_EXC_MON_MASK BIT(22)
#define IMX6Q_GPR1_EXC_MON_OKAY 0x0
#define IMX6Q_GPR1_EXC_MON_SLVE BIT(22)
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_MASK BIT(21)
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_GASKET 0x0
-#define IMX6Q_GPR1_MIPI_IPU2_SEL_IOMUX BIT(21)
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(20)
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
-#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(20)
-#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(19)
+#define IMX6Q_GPR1_ENET_CLK_SEL_MASK BIT(21)
+#define IMX6Q_GPR1_ENET_CLK_SEL_PAD 0
+#define IMX6Q_GPR1_ENET_CLK_SEL_ANATOP BIT(21)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_MASK BIT(20)
#define IMX6Q_GPR1_MIPI_IPU2_MUX_GASKET 0x0
-#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(19)
+#define IMX6Q_GPR1_MIPI_IPU2_MUX_IOMUX BIT(20)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_MASK BIT(19)
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_GASKET 0x0
+#define IMX6Q_GPR1_MIPI_IPU1_MUX_IOMUX BIT(19)
#define IMX6Q_GPR1_PCIE_TEST_PD BIT(18)
#define IMX6Q_GPR1_IPU_VPU_MUX_MASK BIT(17)
#define IMX6Q_GPR1_IPU_VPU_MUX_IPU1 0x0
@@ -279,41 +279,88 @@
#define IMX6Q_GPR13_CAN2_STOP_REQ BIT(29)
#define IMX6Q_GPR13_CAN1_STOP_REQ BIT(28)
#define IMX6Q_GPR13_ENET_STOP_REQ BIT(27)
-#define IMX6Q_GPR13_SATA_PHY_8_MASK (0x7 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_0_5_DB (0x0 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_1_0_DB (0x1 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_1_5_DB (0x2 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_2_0_DB (0x3 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_2_5_DB (0x4 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_3_0_DB (0x5 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_3_5_DB (0x6 << 24)
-#define IMX6Q_GPR13_SATA_PHY_8_4_0_DB (0x7 << 24)
-#define IMX6Q_GPR13_SATA_PHY_7_MASK (0x1f << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1I (0x10 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1M (0x10 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA1X (0x1a << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2I (0x12 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2M (0x12 << 19)
-#define IMX6Q_GPR13_SATA_PHY_7_SATA2X (0x1a << 19)
-#define IMX6Q_GPR13_SATA_PHY_6_MASK (0x7 << 16)
-#define IMX6Q_GPR13_SATA_SPEED_MASK BIT(15)
-#define IMX6Q_GPR13_SATA_SPEED_1P5G 0x0
-#define IMX6Q_GPR13_SATA_SPEED_3P0G BIT(15)
-#define IMX6Q_GPR13_SATA_PHY_5 BIT(14)
-#define IMX6Q_GPR13_SATA_PHY_4_MASK (0x7 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_16_16 (0x0 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_14_16 (0x1 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_12_16 (0x2 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_10_16 (0x3 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_9_16 (0x4 << 11)
-#define IMX6Q_GPR13_SATA_PHY_4_8_16 (0x5 << 11)
-#define IMX6Q_GPR13_SATA_PHY_3_MASK (0xf << 7)
-#define IMX6Q_GPR13_SATA_PHY_3_OFF 0x7
-#define IMX6Q_GPR13_SATA_PHY_2_MASK (0x1f << 2)
-#define IMX6Q_GPR13_SATA_PHY_2_OFF 0x2
-#define IMX6Q_GPR13_SATA_PHY_1_MASK (0x3 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_FAST (0x0 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_MED (0x1 << 0)
-#define IMX6Q_GPR13_SATA_PHY_1_SLOW (0x2 << 0)
-
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK (0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB (0x0 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB (0x1 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB (0x2 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB (0x3 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB (0x4 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB (0x5 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB (0x6 << 24)
+#define IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB (0x7 << 24)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_MASK (0x1f << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1I (0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1M (0x10 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA1X (0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2I (0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M (0x12 << 19)
+#define IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2X (0x1a << 19)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_MASK (0x7 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_1F (0x0 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_2F (0x1 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_1P_4F (0x2 << 16)
+#define IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F (0x3 << 16)
+#define IMX6Q_GPR13_SATA_SPD_MODE_MASK BIT(15)
+#define IMX6Q_GPR13_SATA_SPD_MODE_1P5G 0x0
+#define IMX6Q_GPR13_SATA_SPD_MODE_3P0G BIT(15)
+#define IMX6Q_GPR13_SATA_MPLL_SS_EN BIT(14)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_MASK (0x7 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_16_16 (0x0 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_14_16 (0x1 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_12_16 (0x2 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_10_16 (0x3 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_9_16 (0x4 << 11)
+#define IMX6Q_GPR13_SATA_TX_ATTEN_8_16 (0x5 << 11)
+#define IMX6Q_GPR13_SATA_TX_BOOST_MASK (0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB (0x0 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB (0x1 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB (0x2 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB (0x3 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB (0x4 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB (0x5 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB (0x6 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB (0x7 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB (0x8 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB (0x9 << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB (0xa << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB (0xb << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB (0xc << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB (0xd << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB (0xe << 7)
+#define IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB (0xf << 7)
+#define IMX6Q_GPR13_SATA_TX_LVL_MASK (0x1f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_937_V (0x00 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_947_V (0x01 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_957_V (0x02 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_966_V (0x03 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_976_V (0x04 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_986_V (0x05 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_0_996_V (0x06 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_005_V (0x07 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_015_V (0x08 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_025_V (0x09 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_035_V (0x0a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_045_V (0x0b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_054_V (0x0c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_064_V (0x0d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_074_V (0x0e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_084_V (0x0f << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_094_V (0x10 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_104_V (0x11 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_113_V (0x12 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_123_V (0x13 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_133_V (0x14 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_143_V (0x15 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_152_V (0x16 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_162_V (0x17 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_172_V (0x18 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_182_V (0x19 << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_191_V (0x1a << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_201_V (0x1b << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_211_V (0x1c << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_221_V (0x1d << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_230_V (0x1e << 2)
+#define IMX6Q_GPR13_SATA_TX_LVL_1_240_V (0x1f << 2)
+#define IMX6Q_GPR13_SATA_MPLL_CLK_EN BIT(1)
+#define IMX6Q_GPR13_SATA_TX_EDGE_RATE BIT(0)
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h
index 8d73fe29796..db1791bb997 100644
--- a/include/linux/mfd/ti_am335x_tscadc.h
+++ b/include/linux/mfd/ti_am335x_tscadc.h
@@ -113,11 +113,27 @@
#define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3)
#define CNTRLREG_TSCENB BIT(7)
+/* FIFO READ Register */
+#define FIFOREAD_DATA_MASK (0xfff << 0)
+#define FIFOREAD_CHNLID_MASK (0xf << 16)
+
+/* Sequencer Status */
+#define SEQ_STATUS BIT(5)
+
#define ADC_CLK 3000000
#define MAX_CLK_DIV 7
#define TOTAL_STEPS 16
#define TOTAL_CHANNELS 8
+/*
+* ADC runs at 3MHz, and it takes
+* 15 cycles to latch one data output.
+* Hence the idle time for ADC to
+* process one sample data would be
+* around 5 micro seconds.
+*/
+#define IDLE_TIMEOUT 5 /* microsec */
+
#define TSCADC_CELLS 2
struct ti_tscadc_dev {
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index 29eab2bd3df..a5a7f0130e9 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -244,24 +244,6 @@ struct tps65217_board {
};
/**
- * struct tps_info - packages regulator constraints
- * @name: Voltage regulator name
- * @min_uV: minimum micro volts
- * @max_uV: minimum micro volts
- * @vsel_to_uv: Function pointer to get voltage from selector
- * @uv_to_vsel: Function pointer to get selector from voltage
- *
- * This data is used to check the regualtor voltage limits while setting.
- */
-struct tps_info {
- const char *name;
- int min_uV;
- int max_uV;
- int (*vsel_to_uv)(unsigned int vsel);
- int (*uv_to_vsel)(int uV, unsigned int *vsel);
-};
-
-/**
* struct tps65217 - tps65217 sub-driver chip access routines
*
* Device data may be used to access the TPS65217 chip
@@ -273,7 +255,6 @@ struct tps65217 {
unsigned int id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
- struct tps_info *info[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
};
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index bb1c8096a7e..cd1fdf75103 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -69,6 +69,7 @@ enum {
MLX4_CMD_SET_ICM_SIZE = 0xffd,
/*master notify fw on finish for slave's flr*/
MLX4_CMD_INFORM_FLR_DONE = 0x5b,
+ MLX4_CMD_GET_OP_REQ = 0x59,
/* TPT commands */
MLX4_CMD_SW2HW_MPT = 0xd,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 52c23a892ba..24ce6bdd540 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -33,6 +33,7 @@
#ifndef MLX4_DEVICE_H
#define MLX4_DEVICE_H
+#include <linux/if_ether.h>
#include <linux/pci.h>
#include <linux/completion.h>
#include <linux/radix-tree.h>
@@ -207,6 +208,7 @@ enum mlx4_event {
MLX4_EVENT_TYPE_CMD = 0x0a,
MLX4_EVENT_TYPE_VEP_UPDATE = 0x19,
MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18,
+ MLX4_EVENT_TYPE_OP_REQUIRED = 0x1a,
MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b,
MLX4_EVENT_TYPE_FLR_EVENT = 0x1c,
MLX4_EVENT_TYPE_PORT_MNG_CHG_EVENT = 0x1d,
@@ -619,7 +621,7 @@ struct mlx4_eth_av {
u8 dgid[16];
u32 reserved4[2];
__be16 vlan;
- u8 mac[6];
+ u8 mac[ETH_ALEN];
};
union mlx4_ext_av {
@@ -913,10 +915,10 @@ enum mlx4_net_trans_promisc_mode {
};
struct mlx4_spec_eth {
- u8 dst_mac[6];
- u8 dst_mac_msk[6];
- u8 src_mac[6];
- u8 src_mac_msk[6];
+ u8 dst_mac[ETH_ALEN];
+ u8 dst_mac_msk[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u8 src_mac_msk[ETH_ALEN];
u8 ether_type_enable;
__be16 ether_type;
__be16 vlan_id_msk;
@@ -1052,11 +1054,6 @@ struct _rule_hw {
};
};
-/* translating DMFS verbs sniffer rule to the FW API would need two reg IDs */
-struct mlx4_flow_handle {
- u64 reg_id[2];
-};
-
int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn,
enum mlx4_net_trans_promisc_mode mode);
int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 262deac02c9..6d351473c29 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -34,6 +34,7 @@
#define MLX4_QP_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#include <linux/mlx4/device.h>
@@ -143,7 +144,7 @@ struct mlx4_qp_path {
u8 feup;
u8 fvl_rx;
u8 reserved4[2];
- u8 dmac[6];
+ u8 dmac[ETH_ALEN];
};
enum { /* fl */
@@ -318,7 +319,7 @@ struct mlx4_wqe_datagram_seg {
__be32 dqpn;
__be32 qkey;
__be16 vlan;
- u8 mac[6];
+ u8 mac[ETH_ALEN];
};
struct mlx4_wqe_lso_seg {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8de8d8f2238..68029b30c3d 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -309,21 +309,20 @@ struct mlx5_hca_cap {
__be16 max_desc_sz_rq;
u8 rsvd21[2];
__be16 max_desc_sz_sq_dc;
- u8 rsvd22[4];
- __be16 max_qp_mcg;
- u8 rsvd23;
+ __be32 max_qp_mcg;
+ u8 rsvd22[3];
u8 log_max_mcg;
- u8 rsvd24;
+ u8 rsvd23;
u8 log_max_pd;
- u8 rsvd25;
+ u8 rsvd24;
u8 log_max_xrcd;
- u8 rsvd26[42];
+ u8 rsvd25[42];
__be16 log_uar_page_sz;
- u8 rsvd27[28];
+ u8 rsvd26[28];
u8 log_msx_atomic_size_qp;
- u8 rsvd28[2];
+ u8 rsvd27[2];
u8 log_msx_atomic_size_dc;
- u8 rsvd29[76];
+ u8 rsvd28[76];
};
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd {
struct mlx5_eqe_page_req {
u8 rsvd0[2];
__be16 func_id;
- u8 rsvd1[2];
- __be16 num_pages;
- __be32 rsvd2[5];
+ __be32 num_pages;
+ __be32 rsvd1[5];
};
union ev_data {
@@ -690,6 +688,26 @@ struct mlx5_query_cq_mbox_out {
__be64 pas[0];
};
+struct mlx5_enable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_enable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_in {
+ struct mlx5_inbox_hdr hdr;
+ u8 rsvd[8];
+};
+
+struct mlx5_disable_hca_mbox_out {
+ struct mlx5_outbox_hdr hdr;
+ u8 rsvd[8];
+};
+
struct mlx5_eq_context {
u8 status;
u8 ec_oi;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index f22e4419839..8888381fc15 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -101,6 +101,8 @@ enum {
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
MLX5_CMD_OP_TEARDOWN_HCA = 0x103,
+ MLX5_CMD_OP_ENABLE_HCA = 0x104,
+ MLX5_CMD_OP_DISABLE_HCA = 0x105,
MLX5_CMD_OP_QUERY_PAGES = 0x107,
MLX5_CMD_OP_MANAGE_PAGES = 0x108,
MLX5_CMD_OP_SET_HCA_CAP = 0x109,
@@ -356,7 +358,7 @@ struct mlx5_caps {
u32 reserved_lkey;
u8 local_ca_ack_delay;
u8 log_max_mcg;
- u16 max_qp_mcg;
+ u32 max_qp_mcg;
int min_page_sz;
};
@@ -689,8 +691,8 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s16 npages);
-int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev);
+ s32 npages);
+int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
void mlx5_unregister_debugfs(void);
@@ -729,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
-typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size);
-int mlx5_register_health_report_handler(health_handler_t handler);
-void mlx5_unregister_health_report_handler(void);
const char *mlx5_command_str(int command);
int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f0224608d15..d2d59b4149d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1798,6 +1798,7 @@ enum mf_flags {
MF_COUNT_INCREASED = 1 << 0,
MF_ACTION_REQUIRED = 1 << 1,
MF_MUST_KILL = 1 << 2,
+ MF_SOFT_OFFLINE = 1 << 3,
};
extern int memory_failure(unsigned long pfn, int trapno, int flags);
extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index fb425aa16c0..faf4b7c1ad1 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -332,6 +332,7 @@ struct mm_struct {
unsigned long pgoff, unsigned long flags);
#endif
unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b62d4af6c66..329aa307cb7 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -277,7 +277,7 @@ struct pcmcia_device_id {
#define INPUT_DEVICE_ID_KEY_MIN_INTERESTING 0x71
#define INPUT_DEVICE_ID_KEY_MAX 0x2ff
#define INPUT_DEVICE_ID_REL_MAX 0x0f
-#define INPUT_DEVICE_ID_ABS_MAX 0x3f
+#define INPUT_DEVICE_ID_ABS_MAX 0x4f
#define INPUT_DEVICE_ID_MSC_MAX 0x07
#define INPUT_DEVICE_ID_LED_MAX 0x0f
#define INPUT_DEVICE_ID_SND_MAX 0x07
@@ -361,7 +361,8 @@ struct ssb_device_id {
__u16 vendor;
__u16 coreid;
__u8 revision;
-};
+ __u8 __pad;
+} __attribute__((packed, aligned(2)));
#define SSB_DEVICE(_vendor, _coreid, _revision) \
{ .vendor = _vendor, .coreid = _coreid, .revision = _revision, }
#define SSB_DEVTABLE_END \
@@ -377,7 +378,7 @@ struct bcma_device_id {
__u16 id;
__u8 rev;
__u8 class;
-};
+} __attribute__((packed,aligned(2)));
#define BCMA_CORE(_manuf, _id, _rev, _class) \
{ .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, }
#define BCMA_CORETABLE_END \
diff --git a/include/linux/module.h b/include/linux/module.h
index 46f1ea01e6f..05f2447f8c1 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -42,6 +42,7 @@ struct module_kobject {
struct module *mod;
struct kobject *drivers_dir;
struct module_param_attrs *mp;
+ struct completion *kobj_completion;
};
struct module_attribute {
@@ -97,6 +98,11 @@ extern const struct gtype##_id __mod_##gtype##_table \
/* For userspace: you can also call me... */
#define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias)
+/* Soft module dependencies. See man modprobe.d for details.
+ * Example: MODULE_SOFTDEP("pre: module-foo module-bar post: module-baz")
+ */
+#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
+
/*
* The following license idents are currently accepted as indicating free
* software modules
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 27d9da3f86f..c3eb102a9cc 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -36,7 +36,18 @@ static const char __UNIQUE_ID(name)[] \
struct kernel_param;
+/*
+ * Flags available for kernel_param_ops
+ *
+ * NOARG - the parameter allows for no argument (foo instead of foo=1)
+ */
+enum {
+ KERNEL_PARAM_FL_NOARG = (1 << 0)
+};
+
struct kernel_param_ops {
+ /* How the ops should behave */
+ unsigned int flags;
/* Returns 0, or -errno. arg is in kp->arg. */
int (*set)(const char *val, const struct kernel_param *kp);
/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
@@ -187,7 +198,7 @@ struct kparam_array
/* Obsolete - use module_param_cb() */
#define module_param_call(name, set, get, arg, perm) \
static struct kernel_param_ops __param_ops_##name = \
- { (void *)set, (void *)get }; \
+ { 0, (void *)set, (void *)get }; \
__module_param_call(MODULE_PARAM_PREFIX, \
name, &__param_ops_##name, arg, \
(perm) + sizeof(__check_old_set_param(set))*0, -1)
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 6e8215b1599..61a0da38d0c 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -6,6 +6,7 @@
#define __LINUX_MV643XX_ETH_H
#include <linux/mbus.h>
+#include <linux/if_ether.h>
#define MV643XX_ETH_SHARED_NAME "mv643xx_eth"
#define MV643XX_ETH_NAME "mv643xx_eth_port"
@@ -48,7 +49,7 @@ struct mv643xx_eth_platform_data {
* Use this MAC address if it is valid, overriding the
* address that is already in the hardware.
*/
- u8 mac_addr[6];
+ u8 mac_addr[ETH_ALEN];
/*
* If speed is 0, autonegotiation is enabled.
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 5a5ff57ceed..cd09751c71a 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -58,6 +58,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
extern int user_path_at(int, const char __user *, unsigned, struct path *);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
+extern int user_path_umountat(int, const char __user *, unsigned int, struct path *);
#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0741a1e919a..8ed4ae94305 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -728,6 +728,16 @@ struct netdev_fcoe_hbainfo {
};
#endif
+#define MAX_PHYS_PORT_ID_LEN 32
+
+/* This structure holds a unique identifier to identify the
+ * physical port used by a netdevice.
+ */
+struct netdev_phys_port_id {
+ unsigned char id[MAX_PHYS_PORT_ID_LEN];
+ unsigned char id_len;
+};
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -932,6 +942,25 @@ struct netdev_fcoe_hbainfo {
* that determine carrier state from physical hardware properties (eg
* network cables) or protocol-dependent mechanisms (eg
* USB_CDC_NOTIFY_NETWORK_CONNECTION) should NOT implement this function.
+ *
+ * int (*ndo_get_phys_port_id)(struct net_device *dev,
+ * struct netdev_phys_port_id *ppid);
+ * Called to get ID of physical port of this device. If driver does
+ * not implement this, it is assumed that the hw is not able to have
+ * multiple net devices on single physical port.
+ *
+ * void (*ndo_add_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __u16 port);
+ * Called by vxlan to notiy a driver about the UDP port and socket
+ * address family that vxlan is listnening to. It is called only when
+ * a new port starts listening. The operation is protected by the
+ * vxlan_net->sock_lock.
+ *
+ * void (*ndo_del_vxlan_port)(struct net_device *dev,
+ * sa_family_t sa_family, __u16 port);
+ * Called by vxlan to notify the driver about a UDP port and socket
+ * address family that vxlan is not listening to anymore. The operation
+ * is protected by the vxlan_net->sock_lock.
*/
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
@@ -973,7 +1002,7 @@ struct net_device_ops {
gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
int (*ndo_busy_poll)(struct napi_struct *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
@@ -1060,6 +1089,14 @@ struct net_device_ops {
struct nlmsghdr *nlh);
int (*ndo_change_carrier)(struct net_device *dev,
bool new_carrier);
+ int (*ndo_get_phys_port_id)(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
+ void (*ndo_add_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __u16 port);
+ void (*ndo_del_vxlan_port)(struct net_device *dev,
+ sa_family_t sa_family,
+ __u16 port);
};
/*
@@ -1107,6 +1144,7 @@ struct net_device {
struct list_head napi_list;
struct list_head unreg_list;
struct list_head upper_dev_list; /* List of upper devices */
+ struct list_head lower_dev_list;
/* currently active device features */
@@ -1633,6 +1671,7 @@ struct packet_offload {
#define NETDEV_NOTIFY_PEERS 0x0013
#define NETDEV_JOIN 0x0014
#define NETDEV_CHANGEUPPER 0x0015
+#define NETDEV_RESEND_IGMP 0x0016
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
@@ -1665,9 +1704,6 @@ extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern rwlock_t dev_base_lock; /* Device list lock */
-extern seqcount_t devnet_rename_seq; /* Device rename seq */
-
-
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
@@ -2317,6 +2353,8 @@ extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_change_carrier(struct net_device *,
bool new_carrier);
+extern int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid);
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
@@ -2749,6 +2787,16 @@ extern int bpf_jit_enable;
extern bool netdev_has_upper_dev(struct net_device *dev,
struct net_device *upper_dev);
extern bool netdev_has_any_upper_dev(struct net_device *dev);
+extern struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter);
+
+/* iterate through upper list, must be called under RCU read lock */
+#define netdev_for_each_upper_dev_rcu(dev, upper, iter) \
+ for (iter = &(dev)->upper_dev_list, \
+ upper = netdev_upper_get_next_dev_rcu(dev, &(iter)); \
+ upper; \
+ upper = netdev_upper_get_next_dev_rcu(dev, &(iter)))
+
extern struct net_device *netdev_master_upper_dev_get(struct net_device *dev);
extern struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev);
extern int netdev_upper_dev_link(struct net_device *dev,
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index de70f7b45b6..708fe72ab91 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -314,25 +314,24 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
#endif /*CONFIG_NETFILTER*/
#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-extern void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu;
-extern void nf_ct_attach(struct sk_buff *, struct sk_buff *);
+extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
+extern void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
extern void (*nf_ct_destroy)(struct nf_conntrack *) __rcu;
struct nf_conn;
+enum ip_conntrack_info;
struct nlattr;
struct nfq_ct_hook {
size_t (*build_size)(const struct nf_conn *ct);
int (*build)(struct sk_buff *skb, struct nf_conn *ct);
int (*parse)(const struct nlattr *attr, struct nf_conn *ct);
-};
-extern struct nfq_ct_hook __rcu *nfq_ct_hook;
-
-struct nfq_ct_nat_hook {
+ int (*attach_expect)(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report);
void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct,
- u32 ctinfo, int off);
+ enum ip_conntrack_info ctinfo, s32 off);
};
-extern struct nfq_ct_nat_hook __rcu *nfq_ct_nat_hook;
+extern struct nfq_ct_hook __rcu *nfq_ct_hook;
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
#endif
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 4e2cbfa640b..58b9a02c38d 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -98,8 +98,17 @@
typedef struct { DECLARE_BITMAP(bits, MAX_NUMNODES); } nodemask_t;
extern nodemask_t _unused_nodemask_arg_;
+/*
+ * The inline keyword gives the compiler room to decide to inline, or
+ * not inline a function as it sees best. However, as these functions
+ * are called in both __init and non-__init functions, if they are not
+ * inlined we will end up with a section mis-match error (of the type of
+ * freeable items not being freed). So we must use __always_inline here
+ * to fix the problem. If other functions in the future also end up in
+ * this situation they will also need to be annotated as __always_inline
+ */
#define node_set(node, dst) __node_set((node), &(dst))
-static inline void __node_set(int node, volatile nodemask_t *dstp)
+static __always_inline void __node_set(int node, volatile nodemask_t *dstp)
{
set_bit(node, dstp->bits);
}
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 10e5947491c..b4ec59d159a 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -14,6 +14,10 @@ struct fs_struct;
* A structure to contain pointers to all per-process
* namespaces - fs (mount), uts, network, sysvipc, etc.
*
+ * The pid namespace is an exception -- it's accessed using
+ * task_active_pid_ns. The pid namespace here is the
+ * namespace that children will use.
+ *
* 'count' is the number of tasks holding a reference.
* The count for each namespace, then, will be the number
* of nsproxies pointing to it, not the number of tasks.
@@ -27,7 +31,7 @@ struct nsproxy {
struct uts_namespace *uts_ns;
struct ipc_namespace *ipc_ns;
struct mnt_namespace *mnt_ns;
- struct pid_namespace *pid_ns;
+ struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/of.h b/include/linux/of.h
index 1fd08ca2310..3a45c4f593a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -266,6 +266,7 @@ extern int of_device_is_available(const struct device_node *device);
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
+extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -343,6 +344,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur);
s; \
s = of_prop_next_string(prop, s))
+int of_device_is_stdout_path(struct device_node *dn);
+
#else /* CONFIG_OF */
static inline const char* of_node_full_name(struct device_node *np)
@@ -459,6 +462,12 @@ static inline const void *of_get_property(const struct device_node *node,
return NULL;
}
+static inline struct device_node *of_get_cpu_node(int cpu,
+ unsigned int *thread)
+{
+ return NULL;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
@@ -505,6 +514,11 @@ static inline int of_machine_is_compatible(const char *compat)
return 0;
}
+static inline int of_device_is_stdout_path(struct device_node *dn)
+{
+ return 0;
+}
+
#define of_match_ptr(_ptr) NULL
#define of_match_node(_matches, _node) NULL
#define of_property_for_each_u32(np, propname, prop, p, u) \
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 9d27475feec..82ce324fdce 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_OF_DEVICE_H
#define _LINUX_OF_DEVICE_H
+#include <linux/cpu.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h> /* temporary until merge */
@@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
of_node_put(dev->of_node);
}
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ struct device *cpu_dev;
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev)
+ return NULL;
+ return of_node_get(cpu_dev->of_node);
+}
+
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
{
return NULL;
}
+
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+ return NULL;
+}
#endif /* CONFIG_OF */
#endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
deleted file mode 100644
index cfb545cd86b..00000000000
--- a/include/linux/of_i2c.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Generic I2C API implementation for PowerPC.
- *
- * Copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __LINUX_OF_I2C_H
-#define __LINUX_OF_I2C_H
-
-#if defined(CONFIG_OF_I2C) || defined(CONFIG_OF_I2C_MODULE)
-#include <linux/i2c.h>
-
-extern void of_i2c_register_devices(struct i2c_adapter *adap);
-
-/* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(
- struct device_node *node);
-
-#else
-static inline void of_i2c_register_devices(struct i2c_adapter *adap)
-{
- return;
-}
-
-static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
- return NULL;
-}
-
-/* must call put_device() when done with returned i2c_adapter device */
-static inline struct i2c_adapter *of_find_i2c_adapter_by_node(
- struct device_node *node)
-{
- return NULL;
-}
-#endif /* CONFIG_OF_I2C */
-
-#endif /* __LINUX_OF_I2C_H */
diff --git a/include/linux/olpc-ec.h b/include/linux/olpc-ec.h
index 5bb6e760aa6..2925df3ce78 100644
--- a/include/linux/olpc-ec.h
+++ b/include/linux/olpc-ec.h
@@ -6,6 +6,7 @@
#define EC_WRITE_SCI_MASK 0x1b
#define EC_WAKE_UP_WLAN 0x24
#define EC_WLAN_LEAVE_RESET 0x25
+#define EC_DCON_POWER_MODE 0x26
#define EC_READ_EB_MODE 0x2a
#define EC_SET_SCI_INHIBIT 0x32
#define EC_SET_SCI_INHIBIT_RELEASE 0x34
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index a4c562453f6..b2a0f15f11f 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -42,7 +42,6 @@
#define IBS_FETCH_CODE 13
#define IBS_OP_CODE 14
-struct super_block;
struct dentry;
struct file_operations;
struct pt_regs;
@@ -51,7 +50,7 @@ struct pt_regs;
struct oprofile_operations {
/* create any necessary configuration files in the oprofile fs.
* Optional. */
- int (*create_files)(struct super_block * sb, struct dentry * root);
+ int (*create_files)(struct dentry * root);
/* Do any necessary interrupt setup. Optional. */
int (*setup)(void);
/* Do any necessary interrupt shutdown. Optional. */
@@ -125,27 +124,26 @@ void oprofile_add_trace(unsigned long eip);
* Create a file of the given name as a child of the given root, with
* the specified file operations.
*/
-int oprofilefs_create_file(struct super_block * sb, struct dentry * root,
+int oprofilefs_create_file(struct dentry * root,
char const * name, const struct file_operations * fops);
-int oprofilefs_create_file_perm(struct super_block * sb, struct dentry * root,
+int oprofilefs_create_file_perm(struct dentry * root,
char const * name, const struct file_operations * fops, int perm);
/** Create a file for read/write access to an unsigned long. */
-int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
+int oprofilefs_create_ulong(struct dentry * root,
char const * name, ulong * val);
/** Create a file for read-only access to an unsigned long. */
-int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
+int oprofilefs_create_ro_ulong(struct dentry * root,
char const * name, ulong * val);
/** Create a file for read-only access to an atomic_t. */
-int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
+int oprofilefs_create_ro_atomic(struct dentry * root,
char const * name, atomic_t * val);
/** create a directory */
-struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
- char const * name);
+struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
/**
* Write the given asciz string to the given user buffer @buf, updating *offset
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 17044797727..d006f0ca60f 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -47,24 +47,22 @@ void acpi_pci_remove_bus(struct pci_bus *bus);
#ifdef CONFIG_ACPI_PCI_SLOT
void acpi_pci_slot_init(void);
-void acpi_pci_slot_enumerate(struct pci_bus *bus, acpi_handle handle);
+void acpi_pci_slot_enumerate(struct pci_bus *bus);
void acpi_pci_slot_remove(struct pci_bus *bus);
#else
static inline void acpi_pci_slot_init(void) { }
-static inline void acpi_pci_slot_enumerate(struct pci_bus *bus,
- acpi_handle handle) { }
+static inline void acpi_pci_slot_enumerate(struct pci_bus *bus) { }
static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
#endif
#ifdef CONFIG_HOTPLUG_PCI_ACPI
void acpiphp_init(void);
-void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
+void acpiphp_enumerate_slots(struct pci_bus *bus);
void acpiphp_remove_slots(struct pci_bus *bus);
void acpiphp_check_host_bridge(acpi_handle handle);
#else
static inline void acpiphp_init(void) { }
-static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
- acpi_handle handle) { }
+static inline void acpiphp_enumerate_slots(struct pci_bus *bus) { }
static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0fd1f1582fa..20888589c09 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -183,6 +183,19 @@ enum pci_bus_flags {
PCI_BUS_FLAGS_NO_MMRBC = (__force pci_bus_flags_t) 2,
};
+/* These values come from the PCI Express Spec */
+enum pcie_link_width {
+ PCIE_LNK_WIDTH_RESRV = 0x00,
+ PCIE_LNK_X1 = 0x01,
+ PCIE_LNK_X2 = 0x02,
+ PCIE_LNK_X4 = 0x04,
+ PCIE_LNK_X8 = 0x08,
+ PCIE_LNK_X12 = 0x0C,
+ PCIE_LNK_X16 = 0x10,
+ PCIE_LNK_X32 = 0x20,
+ PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
+};
+
/* Based on the PCI Hotplug Spec, but some values are made up by us */
enum pci_bus_speed {
PCI_SPEED_33MHz = 0x00,
@@ -675,7 +688,7 @@ struct pci_driver {
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
-void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss);
+void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
PCIE_BUS_TUNE_OFF,
@@ -914,6 +927,7 @@ bool pci_check_and_unmask_intx(struct pci_dev *dev);
void pci_msi_off(struct pci_dev *dev);
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
+int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
int pcix_get_mmrbc(struct pci_dev *dev);
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc);
@@ -921,9 +935,16 @@ int pcie_get_readrq(struct pci_dev *dev);
int pcie_set_readrq(struct pci_dev *dev, int rq);
int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
+int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
+ enum pcie_link_width *width);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
+int pci_probe_reset_slot(struct pci_slot *slot);
+int pci_reset_slot(struct pci_slot *slot);
+int pci_probe_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_bus *bus);
+void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@@ -1003,6 +1024,7 @@ int pci_claim_resource(struct pci_dev *, int);
void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
+void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
@@ -1043,7 +1065,6 @@ int __must_check pci_bus_alloc_resource(struct pci_bus *bus,
resource_size_t,
resource_size_t),
void *alignf_data);
-void pci_enable_bridges(struct pci_bus *bus);
/* Proper probing supporting hot-pluggable devices */
int __must_check __pci_register_driver(struct pci_driver *, struct module *,
@@ -1648,6 +1669,10 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev,
int pcibios_add_device(struct pci_dev *dev);
void pcibios_release_device(struct pci_dev *dev);
+#ifdef CONFIG_HIBERNATE_CALLBACKS
+extern struct dev_pm_ops pcibios_pm_ops;
+#endif
+
#ifdef CONFIG_PCI_MMCONFIG
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 8db71dcd633..430dd963707 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -28,19 +28,6 @@
#ifndef _PCI_HOTPLUG_H
#define _PCI_HOTPLUG_H
-/* These values come from the PCI Express Spec */
-enum pcie_link_width {
- PCIE_LNK_WIDTH_RESRV = 0x00,
- PCIE_LNK_X1 = 0x01,
- PCIE_LNK_X2 = 0x02,
- PCIE_LNK_X4 = 0x04,
- PCIE_LNK_X8 = 0x08,
- PCIE_LNK_X12 = 0x0C,
- PCIE_LNK_X16 = 0x10,
- PCIE_LNK_X32 = 0x20,
- PCIE_LNK_WIDTH_UNKNOWN = 0xFF,
-};
-
/**
* struct hotplug_slot_ops -the callbacks that the hotplug pci core can use
* @owner: The module owner of this structure
@@ -63,6 +50,9 @@ enum pcie_link_width {
* @get_adapter_status: Called to get see if an adapter is present in the slot or not.
* If this field is NULL, the value passed in the struct hotplug_slot_info
* will be used when this value is requested by a user.
+ * @reset_slot: Optional interface to allow override of a bus reset for the
+ * slot for cases where a secondary bus reset can result in spurious
+ * hotplug events or where a slot can be reset independent of the bus.
*
* The table of function pointers that is passed to the hotplug pci core by a
* hotplug pci driver. These functions are called by the hotplug pci core when
@@ -80,6 +70,7 @@ struct hotplug_slot_ops {
int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
+ int (*reset_slot) (struct hotplug_slot *slot, int probe);
};
/**
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3bed2e89611..bc95b2b391b 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -518,6 +518,8 @@
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
#define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
+#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
@@ -1311,6 +1313,8 @@
#define PCI_DEVICE_ID_IMS_TT128 0x9128
#define PCI_DEVICE_ID_IMS_TT3D 0x9135
+#define PCI_VENDOR_ID_AMCC 0x10e8
+
#define PCI_VENDOR_ID_INTERG 0x10ea
#define PCI_DEVICE_ID_INTERG_1682 0x1682
#define PCI_DEVICE_ID_INTERG_2000 0x2000
@@ -2256,12 +2260,10 @@
/*
* ADDI-DATA GmbH communication cards <info@addi-data.com>
*/
-#define PCI_VENDOR_ID_ADDIDATA_OLD 0x10E8
#define PCI_VENDOR_ID_ADDIDATA 0x15B8
#define PCI_DEVICE_ID_ADDIDATA_APCI7500 0x7000
#define PCI_DEVICE_ID_ADDIDATA_APCI7420 0x7001
#define PCI_DEVICE_ID_ADDIDATA_APCI7300 0x7002
-#define PCI_DEVICE_ID_ADDIDATA_APCI7800 0x818E
#define PCI_DEVICE_ID_ADDIDATA_APCI7500_2 0x7009
#define PCI_DEVICE_ID_ADDIDATA_APCI7420_2 0x700A
#define PCI_DEVICE_ID_ADDIDATA_APCI7300_2 0x700B
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 27ef6b190ea..57e890abe1f 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -22,9 +22,12 @@
* Macro which verifies @ptr is a percpu pointer without evaluating
* @ptr. This is to be used in percpu accessors to verify that the
* input parameter is a percpu pointer.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
*/
#define __verify_pcpu_ptr(ptr) do { \
- const void __percpu *__vpp_verify = (typeof(ptr))NULL; \
+ const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \
(void)__vpp_verify; \
} while (0)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c43f6eabad5..866e85c5eb9 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -48,6 +48,7 @@ struct perf_guest_info_callbacks {
#include <linux/cpu.h>
#include <linux/irq_work.h>
#include <linux/static_key.h>
+#include <linux/jump_label_ratelimit.h>
#include <linux/atomic.h>
#include <linux/sysfs.h>
#include <linux/perf_regs.h>
@@ -64,30 +65,6 @@ struct perf_raw_record {
};
/*
- * single taken branch record layout:
- *
- * from: source instruction (may not always be a branch insn)
- * to: branch target
- * mispred: branch target was mispredicted
- * predicted: branch target was predicted
- *
- * support for mispred, predicted is optional. In case it
- * is not supported mispred = predicted = 0.
- *
- * in_tx: running in a hardware transaction
- * abort: aborting a hardware transaction
- */
-struct perf_branch_entry {
- __u64 from;
- __u64 to;
- __u64 mispred:1, /* target mispredicted */
- predicted:1,/* target predicted */
- in_tx:1, /* in transaction */
- abort:1, /* transaction abort */
- reserved:60;
-};
-
-/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
*
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index bf7e989abcb..fb90ef5eb03 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -137,6 +137,39 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
return PIN_CONF_PACKED(param, argument);
}
+#ifdef CONFIG_OF
+
+#include <linux/device.h>
+#include <linux/pinctrl/machine.h>
+struct pinctrl_dev;
+struct pinctrl_map;
+
+int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np, struct pinctrl_map **map,
+ unsigned *reserved_maps, unsigned *num_maps,
+ enum pinctrl_map_type type);
+int pinconf_generic_dt_node_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np_config, struct pinctrl_map **map,
+ unsigned *num_maps, enum pinctrl_map_type type);
+
+static inline int pinconf_generic_dt_node_to_map_group(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_CONFIGS_GROUP);
+}
+
+static inline int pinconf_generic_dt_node_to_map_pin(
+ struct pinctrl_dev *pctldev, struct device_node *np_config,
+ struct pinctrl_map **map, unsigned *num_maps)
+{
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_CONFIGS_PIN);
+}
+
+#endif
+
#endif /* CONFIG_GENERIC_PINCONF */
#endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index f6998692bdc..09eb80f2574 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -47,13 +47,15 @@ struct pinconf_ops {
unsigned long *config);
int (*pin_config_set) (struct pinctrl_dev *pctldev,
unsigned pin,
- unsigned long config);
+ unsigned long *configs,
+ unsigned num_configs);
int (*pin_config_group_get) (struct pinctrl_dev *pctldev,
unsigned selector,
unsigned long *config);
int (*pin_config_group_set) (struct pinctrl_dev *pctldev,
unsigned selector,
- unsigned long config);
+ unsigned long *configs,
+ unsigned num_configs);
int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev,
const char *arg,
unsigned long *config);
diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h
index 88272591a89..9efc04dd255 100644
--- a/include/linux/platform_data/asoc-s3c.h
+++ b/include/linux/platform_data/asoc-s3c.h
@@ -36,6 +36,7 @@ struct samsung_i2s {
*/
#define QUIRK_NO_MUXPSR (1 << 2)
#define QUIRK_NEED_RSTCLR (1 << 3)
+#define QUIRK_SUPPORTS_TDM (1 << 4)
/* Quirks of the I2S controller */
u32 quirks;
dma_addr_t idma_addr;
diff --git a/include/linux/platform_data/at91_adc.h b/include/linux/platform_data/at91_adc.h
index e15745b4f3a..b3ca1e94e0c 100644
--- a/include/linux/platform_data/at91_adc.h
+++ b/include/linux/platform_data/at91_adc.h
@@ -14,12 +14,16 @@
(Interruptions registers mostly)
* @status_register: Offset of the Interrupt Status Register
* @trigger_register: Offset of the Trigger setup register
+ * @mr_prescal_mask: Mask of the PRESCAL field in the adc MR register
+ * @mr_startup_mask: Mask of the STARTUP field in the adc MR register
*/
struct at91_adc_reg_desc {
u8 channel_base;
u32 drdy_mask;
u8 status_register;
u8 trigger_register;
+ u32 mr_prescal_mask;
+ u32 mr_startup_mask;
};
/**
diff --git a/include/linux/platform_data/brcmfmac-sdio.h b/include/linux/platform_data/brcmfmac-sdio.h
index b7174998c24..e75dcbf2b23 100644
--- a/include/linux/platform_data/brcmfmac-sdio.h
+++ b/include/linux/platform_data/brcmfmac-sdio.h
@@ -94,6 +94,10 @@ void __init brcmfmac_init_pdata(void)
* Set this to true if the SDIO host controller has higher align requirement
* than 32 bytes for each scatterlist item.
*
+ * sd_head_align: alignment requirement for start of data buffer
+ *
+ * sd_sgentry_align: length alignment requirement for each sg entry
+ *
* power_on: This function is called by the brcmfmac when the module gets
* loaded. This can be particularly useful for low power devices. The platform
* spcific routine may for example decide to power up the complete device.
@@ -121,6 +125,8 @@ struct brcmfmac_sdio_platform_data {
unsigned int oob_irq_nr;
unsigned long oob_irq_flags;
bool broken_sg_support;
+ unsigned short sd_head_align;
+ unsigned short sd_sgentry_align;
void (*power_on)(void);
void (*power_off)(void);
void (*reset)(void);
diff --git a/include/linux/platform_data/camera-mx3.h b/include/linux/platform_data/camera-mx3.h
index f226ee3777e..a910dadc825 100644
--- a/include/linux/platform_data/camera-mx3.h
+++ b/include/linux/platform_data/camera-mx3.h
@@ -33,6 +33,8 @@
#define MX3_CAMERA_DATAWIDTH_MASK (MX3_CAMERA_DATAWIDTH_4 | MX3_CAMERA_DATAWIDTH_8 | \
MX3_CAMERA_DATAWIDTH_10 | MX3_CAMERA_DATAWIDTH_15)
+struct v4l2_async_subdev;
+
/**
* struct mx3_camera_pdata - i.MX3x camera platform data
* @flags: MX3_CAMERA_* flags
@@ -43,6 +45,8 @@ struct mx3_camera_pdata {
unsigned long flags;
unsigned long mclk_10khz;
struct device *dma_dev;
+ struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+ int *asd_sizes; /* 0-terminated array of asd group sizes */
};
#endif
diff --git a/include/linux/platform_data/camera-rcar.h b/include/linux/platform_data/camera-rcar.h
new file mode 100644
index 00000000000..dfc83c58159
--- /dev/null
+++ b/include/linux/platform_data/camera-rcar.h
@@ -0,0 +1,25 @@
+/*
+ * Platform data for Renesas R-Car VIN soc-camera driver
+ *
+ * Copyright (C) 2011-2013 Renesas Solutions Corp.
+ * Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __CAMERA_RCAR_H_
+#define __CAMERA_RCAR_H_
+
+#define RCAR_VIN_HSYNC_ACTIVE_LOW (1 << 0)
+#define RCAR_VIN_VSYNC_ACTIVE_LOW (1 << 1)
+#define RCAR_VIN_BT601 (1 << 2)
+#define RCAR_VIN_BT656 (1 << 3)
+
+struct rcar_vin_platform_data {
+ unsigned int flags;
+};
+
+#endif /* __CAMERA_RCAR_H_ */
diff --git a/include/linux/platform_data/efm32-spi.h b/include/linux/platform_data/efm32-spi.h
new file mode 100644
index 00000000000..31b19ca1d73
--- /dev/null
+++ b/include/linux/platform_data/efm32-spi.h
@@ -0,0 +1,14 @@
+#ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__
+#define __LINUX_PLATFORM_DATA_EFM32_SPI_H__
+
+#include <linux/types.h>
+
+/**
+ * struct efm32_spi_pdata
+ * @location: pinmux location for the I/O pins (to be written to the ROUTE
+ * register)
+ */
+struct efm32_spi_pdata {
+ u8 location;
+};
+#endif /* ifndef __LINUX_PLATFORM_DATA_EFM32_SPI_H__ */
diff --git a/include/linux/platform_data/max310x.h b/include/linux/platform_data/max310x.h
index 91648bf5fc5..dd11dcd1a18 100644
--- a/include/linux/platform_data/max310x.h
+++ b/include/linux/platform_data/max310x.h
@@ -1,5 +1,5 @@
/*
- * Maxim (Dallas) MAX3107/8 serial driver
+ * Maxim (Dallas) MAX3107/8/9, MAX14830 serial driver
*
* Copyright (C) 2012 Alexander Shiyan <shc_work@mail.ru>
*
@@ -37,14 +37,13 @@
* };
*/
-#define MAX310X_MAX_UARTS 1
+#define MAX310X_MAX_UARTS 4
/* MAX310X platform data structure */
struct max310x_pdata {
/* Flags global to driver */
- const u8 driver_flags:2;
+ const u8 driver_flags;
#define MAX310X_EXT_CLK (0x00000001) /* External clock enable */
-#define MAX310X_AUTOSLEEP (0x00000002) /* Enable AutoSleep mode */
/* Flags global to UART port */
const u8 uart_flags[MAX310X_MAX_UARTS];
#define MAX310X_LOOPBACK (0x00000001) /* Loopback mode enable */
@@ -60,8 +59,6 @@ struct max310x_pdata {
void (*init)(void);
/* Called before finish */
void (*exit)(void);
- /* Suspend callback */
- void (*suspend)(int do_suspend);
};
#endif
diff --git a/include/linux/platform_data/mmc-pxamci.h b/include/linux/platform_data/mmc-pxamci.h
index 9eb515bb799..1706b3597ce 100644
--- a/include/linux/platform_data/mmc-pxamci.h
+++ b/include/linux/platform_data/mmc-pxamci.h
@@ -12,7 +12,7 @@ struct pxamci_platform_data {
unsigned long detect_delay_ms; /* delay in millisecond before detecting cards after interrupt */
int (*init)(struct device *, irq_handler_t , void *);
int (*get_ro)(struct device *);
- void (*setpower)(struct device *, unsigned int);
+ int (*setpower)(struct device *, unsigned int);
void (*exit)(struct device *, void *);
int gpio_card_detect; /* gpio detecting card insertion */
int gpio_card_ro; /* gpio detecting read only toggle */
diff --git a/include/linux/platform_data/omap-abe-twl6040.h b/include/linux/platform_data/omap-abe-twl6040.h
deleted file mode 100644
index 5d298ac10fc..00000000000
--- a/include/linux/platform_data/omap-abe-twl6040.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/**
- * omap-abe-twl6040.h - ASoC machine driver OMAP4+ devices, header.
- *
- * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
- * All rights reserved.
- *
- * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-
-#ifndef _OMAP_ABE_TWL6040_H_
-#define _OMAP_ABE_TWL6040_H_
-
-/* To select if only one channel is connected in a stereo port */
-#define ABE_TWL6040_LEFT (1 << 0)
-#define ABE_TWL6040_RIGHT (1 << 1)
-
-struct omap_abe_twl6040_data {
- char *card_name;
- /* Feature flags for connected audio pins */
- u8 has_hs;
- u8 has_hf;
- bool has_ep;
- u8 has_aux;
- u8 has_vibra;
- bool has_dmic;
- bool has_hsmic;
- bool has_mainmic;
- bool has_submic;
- u8 has_afm;
- /* Other features */
- bool jack_detection; /* board can detect jack events */
- int mclk_freq; /* MCLK frequency speed for twl6040 */
-};
-
-#endif /* _OMAP_ABE_TWL6040_H_ */
diff --git a/include/linux/platform_data/pinctrl-nomadik.h b/include/linux/platform_data/pinctrl-nomadik.h
index f73b2f0c55b..abf5bed84df 100644
--- a/include/linux/platform_data/pinctrl-nomadik.h
+++ b/include/linux/platform_data/pinctrl-nomadik.h
@@ -226,30 +226,6 @@ enum nmk_gpio_slpm {
NMK_GPIO_SLPM_WAKEUP_DISABLE = NMK_GPIO_SLPM_NOCHANGE,
};
-/* Older deprecated pin config API that should go away soon */
-extern int nmk_config_pin(pin_cfg_t cfg, bool sleep);
-extern int nmk_config_pins(pin_cfg_t *cfgs, int num);
-extern int nmk_config_pins_sleep(pin_cfg_t *cfgs, int num);
-extern int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode);
-extern int nmk_gpio_set_pull(int gpio, enum nmk_gpio_pull pull);
-#ifdef CONFIG_PINCTRL_NOMADIK
-extern int nmk_gpio_set_mode(int gpio, int gpio_mode);
-#else
-static inline int nmk_gpio_set_mode(int gpio, int gpio_mode)
-{
- return -ENODEV;
-}
-#endif
-extern int nmk_gpio_get_mode(int gpio);
-
-extern void nmk_gpio_wakeups_suspend(void);
-extern void nmk_gpio_wakeups_resume(void);
-
-extern void nmk_gpio_clocks_enable(void);
-extern void nmk_gpio_clocks_disable(void);
-
-extern void nmk_gpio_read_pull(int gpio_bank, u32 *pull_up);
-
/*
* Platform data to register a block: only the initial gpio/irq number.
*/
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
index 80587fdbba3..1a2e9901a22 100644
--- a/include/linux/platform_data/rcar-du.h
+++ b/include/linux/platform_data/rcar-du.h
@@ -16,8 +16,18 @@
#include <drm/drm_mode.h>
+enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+ RCAR_DU_OUTPUT_DPAD1,
+ RCAR_DU_OUTPUT_LVDS0,
+ RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_TCON,
+ RCAR_DU_OUTPUT_MAX,
+};
+
enum rcar_du_encoder_type {
RCAR_DU_ENCODER_UNUSED = 0,
+ RCAR_DU_ENCODER_NONE,
RCAR_DU_ENCODER_VGA,
RCAR_DU_ENCODER_LVDS,
};
@@ -28,22 +38,32 @@ struct rcar_du_panel_data {
struct drm_mode_modeinfo mode;
};
-struct rcar_du_encoder_lvds_data {
+struct rcar_du_connector_lvds_data {
struct rcar_du_panel_data panel;
};
-struct rcar_du_encoder_vga_data {
+struct rcar_du_connector_vga_data {
/* TODO: Add DDC information for EDID retrieval */
};
+/*
+ * struct rcar_du_encoder_data - Encoder platform data
+ * @type: the encoder type (RCAR_DU_ENCODER_*)
+ * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
+ * @connector.lvds: platform data for LVDS connectors
+ * @connector.vga: platform data for VGA connectors
+ *
+ * Encoder platform data describes an on-board encoder, its associated DU SoC
+ * output, and the connector.
+ */
struct rcar_du_encoder_data {
- enum rcar_du_encoder_type encoder;
- unsigned int output;
+ enum rcar_du_encoder_type type;
+ enum rcar_du_output output;
union {
- struct rcar_du_encoder_lvds_data lvds;
- struct rcar_du_encoder_vga_data vga;
- } u;
+ struct rcar_du_connector_lvds_data lvds;
+ struct rcar_du_connector_vga_data vga;
+ } connector;
};
struct rcar_du_platform_data {
diff --git a/include/linux/platform_data/serial-sccnxp.h b/include/linux/platform_data/serial-sccnxp.h
index bdc510d0324..af0c8c3b89a 100644
--- a/include/linux/platform_data/serial-sccnxp.h
+++ b/include/linux/platform_data/serial-sccnxp.h
@@ -60,7 +60,6 @@
* };
*
* static struct sccnxp_pdata sc2892_info = {
- * .frequency = 3686400,
* .mctrl_cfg[0] = MCTRL_SIG(DIR_OP, LINE_OP0),
* .mctrl_cfg[1] = MCTRL_SIG(DIR_OP, LINE_OP1),
* };
@@ -78,8 +77,6 @@
/* SCCNXP platform data structure */
struct sccnxp_pdata {
- /* Frequency (extrenal clock or crystal) */
- int frequency;
/* Shift for A0 line */
const u8 reg_shift;
/* Modem control lines configuration */
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
new file mode 100644
index 00000000000..077303cedbf
--- /dev/null
+++ b/include/linux/platform_data/simplefb.h
@@ -0,0 +1,64 @@
+/*
+ * simplefb.h - Simple Framebuffer Device
+ *
+ * Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __PLATFORM_DATA_SIMPLEFB_H__
+#define __PLATFORM_DATA_SIMPLEFB_H__
+
+#include <drm/drm_fourcc.h>
+#include <linux/fb.h>
+#include <linux/kernel.h>
+
+/* format array, use it to initialize a "struct simplefb_format" array */
+#define SIMPLEFB_FORMATS \
+{ \
+ { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0}, DRM_FORMAT_RGB565 }, \
+ { "x1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {0, 0}, DRM_FORMAT_XRGB1555 }, \
+ { "a1r5g5b5", 16, {10, 5}, {5, 5}, {0, 5}, {15, 1}, DRM_FORMAT_ARGB1555 }, \
+ { "r8g8b8", 24, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_RGB888 }, \
+ { "x8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {0, 0}, DRM_FORMAT_XRGB8888 }, \
+ { "a8r8g8b8", 32, {16, 8}, {8, 8}, {0, 8}, {24, 8}, DRM_FORMAT_ARGB8888 }, \
+ { "a8b8g8r8", 32, {0, 8}, {8, 8}, {16, 8}, {24, 8}, DRM_FORMAT_ABGR8888 }, \
+ { "x2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {0, 0}, DRM_FORMAT_XRGB2101010 }, \
+ { "a2r10g10b10", 32, {20, 10}, {10, 10}, {0, 10}, {30, 2}, DRM_FORMAT_ARGB2101010 }, \
+}
+
+/*
+ * Data-Format for Simple-Framebuffers
+ * @name: unique 0-terminated name that can be used to identify the mode
+ * @red,green,blue: Offsets and sizes of the single RGB parts
+ * @transp: Offset and size of the alpha bits. length=0 means no alpha
+ * @fourcc: 32bit DRM four-CC code (see drm_fourcc.h)
+ */
+struct simplefb_format {
+ const char *name;
+ u32 bits_per_pixel;
+ struct fb_bitfield red;
+ struct fb_bitfield green;
+ struct fb_bitfield blue;
+ struct fb_bitfield transp;
+ u32 fourcc;
+};
+
+/*
+ * Simple-Framebuffer description
+ * If the arch-boot code creates simple-framebuffers without DT support, it
+ * can pass the width, height, stride and format via this platform-data object.
+ * The framebuffer location must be given as IORESOURCE_MEM resource.
+ * @format must be a format as described in "struct simplefb_format" above.
+ */
+struct simplefb_platform_data {
+ u32 width;
+ u32 height;
+ u32 stride;
+ const char *format;
+};
+
+#endif /* __PLATFORM_DATA_SIMPLEFB_H__ */
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
new file mode 100644
index 00000000000..753839187ba
--- /dev/null
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -0,0 +1,24 @@
+/*
+ * STMicroelectronics sensors platform-data driver
+ *
+ * Copyright 2013 STMicroelectronics Inc.
+ *
+ * Denis Ciocca <denis.ciocca@st.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef ST_SENSORS_PDATA_H
+#define ST_SENSORS_PDATA_H
+
+/**
+ * struct st_sensors_platform_data - Platform data for the ST sensors
+ * @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * Available only for accelerometer and pressure sensors.
+ * Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ */
+struct st_sensors_platform_data {
+ u8 drdy_int_pin;
+};
+
+#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/tegra_usb.h b/include/linux/platform_data/tegra_usb.h
deleted file mode 100644
index 66c673fef40..00000000000
--- a/include/linux/platform_data/tegra_usb.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _TEGRA_USB_H_
-#define _TEGRA_USB_H_
-
-enum tegra_usb_operating_modes {
- TEGRA_USB_DEVICE,
- TEGRA_USB_HOST,
- TEGRA_USB_OTG,
-};
-
-struct tegra_ehci_platform_data {
- enum tegra_usb_operating_modes operating_mode;
- /* power down the phy on bus suspend */
- int power_down_on_bus_suspend;
- void *phy_config;
- int vbus_gpio;
-};
-
-#endif /* _TEGRA_USB_H_ */
diff --git a/include/linux/platform_data/vsp1.h b/include/linux/platform_data/vsp1.h
new file mode 100644
index 00000000000..a73a456d7f1
--- /dev/null
+++ b/include/linux/platform_data/vsp1.h
@@ -0,0 +1,25 @@
+/*
+ * vsp1.h -- R-Car VSP1 Platform Data
+ *
+ * Copyright (C) 2013 Renesas Corporation
+ *
+ * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef __PLATFORM_VSP1_H__
+#define __PLATFORM_VSP1_H__
+
+#define VSP1_HAS_LIF (1 << 0)
+
+struct vsp1_platform_data {
+ unsigned int features;
+ unsigned int rpf_count;
+ unsigned int uds_count;
+ unsigned int wpf_count;
+};
+
+#endif /* __PLATFORM_VSP1_H__ */
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h
index 7db3eb93a07..1d2cd21242e 100644
--- a/include/linux/pps_kernel.h
+++ b/include/linux/pps_kernel.h
@@ -80,7 +80,7 @@ struct pps_device {
* Global variables
*/
-extern struct device_attribute pps_attrs[];
+extern const struct attribute_group *pps_groups[];
/*
* Internal functions.
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
new file mode 100644
index 00000000000..931bc616219
--- /dev/null
+++ b/include/linux/preempt_mask.h
@@ -0,0 +1,122 @@
+#ifndef LINUX_PREEMPT_MASK_H
+#define LINUX_PREEMPT_MASK_H
+
+#include <linux/preempt.h>
+#include <asm/hardirq.h>
+
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count can in theory reach the same as NR_IRQS.
+ * In reality, the number of nested IRQS is limited to the stack
+ * size as well. For archs with over 1000 IRQS it is not practical
+ * to expect that they will all nest. We give a max of 10 bits for
+ * hardirq nesting. An arch may choose to give less than 10 bits.
+ * m68k expects it to be 8.
+ *
+ * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024)
+ * - bit 26 is the NMI_MASK
+ * - bit 27 is the PREEMPT_ACTIVE flag
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x03ff0000
+ * NMI_MASK: 0x04000000
+ */
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+#define NMI_BITS 1
+
+#define MAX_HARDIRQ_BITS 10
+
+#ifndef HARDIRQ_BITS
+# define HARDIRQ_BITS MAX_HARDIRQ_BITS
+#endif
+
+#if HARDIRQ_BITS > MAX_HARDIRQ_BITS
+#error HARDIRQ_BITS too high!
+#endif
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#ifndef PREEMPT_ACTIVE
+#define PREEMPT_ACTIVE_BITS 1
+#define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+#endif
+
+#if PREEMPT_ACTIVE < (1 << (NMI_SHIFT + NMI_BITS))
+#error PREEMPT_ACTIVE is too low!
+#endif
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+ | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi() (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT_COUNT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+#else
+# define preemptible() 0
+#endif
+
+#endif /* LINUX_PREEMPT_MASK_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 22c7052e937..e6131a78248 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -200,7 +200,7 @@ static inline void show_regs_print_info(const char *log_lvl)
}
#endif
-extern void dump_stack(void) __cold;
+extern asmlinkage void dump_stack(void) __cold;
#ifndef pr_fmt
#define pr_fmt(fmt) fmt
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 4aa80ba830a..abd437d0a8a 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -55,14 +55,14 @@ struct pstore_info {
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
int *count, struct timespec *time, char **buf,
- struct pstore_info *psi);
+ bool *compressed, struct pstore_info *psi);
int (*write)(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, size_t hsize,
+ unsigned int part, int count, bool compressed,
size_t size, struct pstore_info *psi);
int (*write_buf)(enum pstore_type_id type,
enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char *buf, size_t hsize,
+ unsigned int part, const char *buf, bool compressed,
size_t size, struct pstore_info *psi);
int (*erase)(enum pstore_type_id type, u64 id,
int count, struct timespec time,
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 467cc6307b6..49444203328 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -21,6 +21,8 @@
#include <linux/list.h>
#include <linux/io.h>
+#include <linux/of.h>
+
/*
* SSP Serial Port Registers
@@ -190,6 +192,8 @@ struct ssp_device {
int irq;
int drcmr_rx;
int drcmr_tx;
+
+ struct device_node *of_node;
};
/**
@@ -218,11 +222,18 @@ static inline u32 pxa_ssp_read_reg(struct ssp_device *dev, u32 reg)
#ifdef CONFIG_ARCH_PXA
struct ssp_device *pxa_ssp_request(int port, const char *label);
void pxa_ssp_free(struct ssp_device *);
+struct ssp_device *pxa_ssp_request_of(const struct device_node *of_node,
+ const char *label);
#else
static inline struct ssp_device *pxa_ssp_request(int port, const char *label)
{
return NULL;
}
+static inline struct ssp_device *pxa_ssp_request_of(const struct device_node *n,
+ const char *name)
+{
+ return NULL;
+}
static inline void pxa_ssp_free(struct ssp_device *ssp) {}
#endif
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 1c50093ae65..6965fe394c3 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -41,6 +41,7 @@ void __quota_error(struct super_block *sb, const char *func,
void inode_add_rsv_space(struct inode *inode, qsize_t number);
void inode_claim_rsv_space(struct inode *inode, qsize_t number);
void inode_sub_rsv_space(struct inode *inode, qsize_t number);
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
void dquot_initialize(struct inode *inode);
void dquot_drop(struct inode *inode);
@@ -59,6 +60,7 @@ int dquot_alloc_inode(const struct inode *inode);
int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
void dquot_free_inode(const struct inode *inode);
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
int dquot_disable(struct super_block *sb, int type, unsigned int flags);
/* Suspend quotas on remount RO */
@@ -238,6 +240,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
return 0;
}
+static inline int dquot_reclaim_space_nodirty(struct inode *inode,
+ qsize_t number)
+{
+ inode_sub_bytes(inode, number);
+ return 0;
+}
+
static inline int dquot_disable(struct super_block *sb, int type,
unsigned int flags)
{
@@ -336,6 +345,12 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
return ret;
}
+static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
+{
+ dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits);
+ mark_inode_dirty_sync(inode);
+}
+
static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
{
__dquot_free_space(inode, nr, 0);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 8dfaa2ce2e9..0f424698064 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -114,6 +114,11 @@ extern const struct raid6_recov_calls raid6_recov_intx1;
extern const struct raid6_recov_calls raid6_recov_ssse3;
extern const struct raid6_recov_calls raid6_recov_avx2;
+extern const struct raid6_calls raid6_neonx1;
+extern const struct raid6_calls raid6_neonx2;
+extern const struct raid6_calls raid6_neonx4;
+extern const struct raid6_calls raid6_neonx8;
+
/* Algorithm list */
extern const struct raid6_calls * const raid6_algos[];
extern const struct raid6_recov_calls *const raid6_recov_algos[];
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index f4b1001a467..4106721c4e5 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
*/
#define list_first_or_null_rcu(ptr, type, member) \
({struct list_head *__ptr = (ptr); \
- struct list_head __rcu *__next = list_next_rcu(__ptr); \
- likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
+ struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+ likely(__ptr != __next) ? \
+ list_entry_rcu(__next, type, member) : NULL; \
})
/**
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 4b14bdc911d..f1f1bc39346 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,7 +52,7 @@ extern int rcutorture_runnable; /* for sysctl */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
extern void rcutorture_record_test_transition(void);
extern void rcutorture_record_progress(unsigned long vernum);
-extern void do_trace_rcu_torture_read(char *rcutorturename,
+extern void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old,
@@ -65,7 +65,7 @@ static inline void rcutorture_record_progress(unsigned long vernum)
{
}
#ifdef CONFIG_RCU_TRACE
-extern void do_trace_rcu_torture_read(char *rcutorturename,
+extern void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old,
@@ -229,13 +229,9 @@ extern void rcu_irq_exit(void);
#ifdef CONFIG_RCU_USER_QS
extern void rcu_user_enter(void);
extern void rcu_user_exit(void);
-extern void rcu_user_enter_after_irq(void);
-extern void rcu_user_exit_after_irq(void);
#else
static inline void rcu_user_enter(void) { }
static inline void rcu_user_exit(void) { }
-static inline void rcu_user_enter_after_irq(void) { }
-static inline void rcu_user_exit_after_irq(void) { }
static inline void rcu_user_hooks_switch(struct task_struct *prev,
struct task_struct *next) { }
#endif /* CONFIG_RCU_USER_QS */
@@ -1015,4 +1011,22 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
#endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
+/* Only for use by adaptive-ticks code. */
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+extern bool rcu_sys_is_idle(void);
+extern void rcu_sysidle_force_exit(void);
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+static inline bool rcu_sys_is_idle(void)
+{
+ return false;
+}
+
+static inline void rcu_sysidle_force_exit(void)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 75981d0b57d..a10380bfbea 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -15,6 +15,8 @@
#include <linux/list.h>
#include <linux/rbtree.h>
+#include <linux/err.h>
+#include <linux/bug.h>
struct module;
struct device;
@@ -470,6 +472,9 @@ struct regmap_irq {
* @ack_base: Base ack address. If zero then the chip is clear on read.
* @wake_base: Base address for wake enables. If zero unsupported.
* @irq_reg_stride: Stride to use for chips where registers are not contiguous.
+ * @init_ack_masked: Ack all masked interrupts once during initalization.
+ * @mask_invert: Inverted mask register: cleared bits are masked out.
+ * @wake_invert: Inverted wake register: cleared bits are wake enabled.
* @runtime_pm: Hold a runtime PM lock on the device when accessing it.
*
* @num_regs: Number of registers in each control bank.
@@ -485,9 +490,10 @@ struct regmap_irq_chip {
unsigned int ack_base;
unsigned int wake_base;
unsigned int irq_reg_stride;
- unsigned int mask_invert;
- unsigned int wake_invert;
- bool runtime_pm;
+ bool init_ack_masked:1;
+ bool mask_invert:1;
+ bool wake_invert:1;
+ bool runtime_pm:1;
int num_regs;
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 3a76389c6aa..27be915caa9 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -137,6 +137,12 @@ struct regulator *__must_check devm_regulator_get(struct device *dev,
const char *id);
struct regulator *__must_check regulator_get_exclusive(struct device *dev,
const char *id);
+struct regulator *__must_check devm_regulator_get_exclusive(struct device *dev,
+ const char *id);
+struct regulator *__must_check regulator_get_optional(struct device *dev,
+ const char *id);
+struct regulator *__must_check devm_regulator_get_optional(struct device *dev,
+ const char *id);
void regulator_put(struct regulator *regulator);
void devm_regulator_put(struct regulator *regulator);
@@ -217,6 +223,25 @@ devm_regulator_get(struct device *dev, const char *id)
return NULL;
}
+static inline struct regulator *__must_check
+regulator_get_exclusive(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+static inline struct regulator *__must_check
+regulator_get_optional(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
+
+static inline struct regulator *__must_check
+devm_regulator_get_optional(struct device *dev, const char *id)
+{
+ return NULL;
+}
+
static inline void regulator_put(struct regulator *regulator)
{
}
@@ -369,8 +394,11 @@ static inline int regulator_count_voltages(struct regulator *regulator)
static inline int regulator_set_voltage_tol(struct regulator *regulator,
int new_uV, int tol_uV)
{
- return regulator_set_voltage(regulator,
- new_uV - tol_uV, new_uV + tol_uV);
+ if (regulator_set_voltage(regulator, new_uV, new_uV + tol_uV) == 0)
+ return 0;
+ else
+ return regulator_set_voltage(regulator,
+ new_uV - tol_uV, new_uV + tol_uV);
}
static inline int regulator_is_supported_voltage_tol(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 6700cc94bdd..67e13aa5a47 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -40,6 +40,24 @@ enum regulator_status {
};
/**
+ * Specify a range of voltages for regulator_map_linar_range() and
+ * regulator_list_linear_range().
+ *
+ * @min_uV: Lowest voltage in range
+ * @max_uV: Highest voltage in range
+ * @min_sel: Lowest selector for range
+ * @max_sel: Highest selector for range
+ * @uV_step: Step size
+ */
+struct regulator_linear_range {
+ unsigned int min_uV;
+ unsigned int max_uV;
+ unsigned int min_sel;
+ unsigned int max_sel;
+ unsigned int uV_step;
+};
+
+/**
* struct regulator_ops - regulator operations.
*
* @enable: Configure the regulator as enabled.
@@ -223,6 +241,9 @@ struct regulator_desc {
unsigned int linear_min_sel;
unsigned int ramp_delay;
+ const struct regulator_linear_range *linear_ranges;
+ int n_linear_ranges;
+
const unsigned int *volt_table;
unsigned int vsel_reg;
@@ -326,10 +347,14 @@ int regulator_mode_to_status(unsigned int);
int regulator_list_voltage_linear(struct regulator_dev *rdev,
unsigned int selector);
+int regulator_list_voltage_linear_range(struct regulator_dev *rdev,
+ unsigned int selector);
int regulator_list_voltage_table(struct regulator_dev *rdev,
unsigned int selector);
int regulator_map_voltage_linear(struct regulator_dev *rdev,
int min_uV, int max_uV);
+int regulator_map_voltage_linear_range(struct regulator_dev *rdev,
+ int min_uV, int max_uV);
int regulator_map_voltage_iterate(struct regulator_dev *rdev,
int min_uV, int max_uV);
int regulator_map_voltage_ascend(struct regulator_dev *rdev,
diff --git a/include/linux/regulator/fan53555.h b/include/linux/regulator/fan53555.h
index 5c45c85d52c..f13880e84d8 100644
--- a/include/linux/regulator/fan53555.h
+++ b/include/linux/regulator/fan53555.h
@@ -11,6 +11,7 @@
*/
#ifndef __FAN53555_H__
+#define __FAN53555_H__
/* VSEL ID */
enum {
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index 36adbc82de6..999b20ce06c 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -134,6 +134,7 @@ struct regulation_constraints {
unsigned always_on:1; /* regulator never off when system is on */
unsigned boot_on:1; /* bootloader/firmware enabled regulator */
unsigned apply_uV:1; /* apply uV constraint if min == max */
+ unsigned ramp_disable:1; /* disable ramp delay */
};
/**
diff --git a/include/linux/regulator/max8660.h b/include/linux/regulator/max8660.h
index 9936763621c..f8a6a484486 100644
--- a/include/linux/regulator/max8660.h
+++ b/include/linux/regulator/max8660.h
@@ -39,7 +39,7 @@ enum {
*/
struct max8660_subdev_data {
int id;
- char *name;
+ const char *name;
struct regulator_init_data *platform_data;
};
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
new file mode 100644
index 00000000000..65d550bf395
--- /dev/null
+++ b/include/linux/regulator/pfuze100.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2011-2013 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __LINUX_REG_PFUZE100_H
+#define __LINUX_REG_PFUZE100_H
+
+#define PFUZE100_SW1AB 0
+#define PFUZE100_SW1C 1
+#define PFUZE100_SW2 2
+#define PFUZE100_SW3A 3
+#define PFUZE100_SW3B 4
+#define PFUZE100_SW4 5
+#define PFUZE100_SWBST 6
+#define PFUZE100_VSNVS 7
+#define PFUZE100_VREFDDR 8
+#define PFUZE100_VGEN1 9
+#define PFUZE100_VGEN2 10
+#define PFUZE100_VGEN3 11
+#define PFUZE100_VGEN4 12
+#define PFUZE100_VGEN5 13
+#define PFUZE100_VGEN6 14
+#define PFUZE100_MAX_REGULATOR 15
+
+struct regulator_init_data;
+
+struct pfuze_regulator_platform_data {
+ struct regulator_init_data *init_data[PFUZE100_MAX_REGULATOR];
+};
+
+#endif /* __LINUX_REG_PFUZE100_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 50d04b92ced..ce1e1c0aaa3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -107,14 +107,6 @@ extern unsigned long this_cpu_load(void);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
-/* Notifier for when a task gets migrated to a new CPU */
-struct task_migration_notifier {
- struct task_struct *task;
- int from_cpu;
- int to_cpu;
-};
-extern void register_task_migration_notifier(struct notifier_block *n);
-
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
@@ -1034,6 +1026,9 @@ struct task_struct {
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
+ struct task_struct *last_wakee;
+ unsigned long wakee_flips;
+ unsigned long wakee_flip_decay_ts;
#endif
int on_rq;
@@ -1532,6 +1527,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk)
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
+ *
+ * Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(struct task_struct *p)
{
@@ -1543,6 +1540,8 @@ static inline int pid_alive(struct task_struct *p)
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
+ *
+ * Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
@@ -1628,6 +1627,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut,
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -1893,6 +1893,8 @@ extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
+ *
+ * Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h
index fc305713fc6..90b5e30c2f2 100644
--- a/include/linux/sh_eth.h
+++ b/include/linux/sh_eth.h
@@ -2,23 +2,17 @@
#define __ASM_SH_ETH_H__
#include <linux/phy.h>
+#include <linux/if_ether.h>
enum {EDMAC_LITTLE_ENDIAN, EDMAC_BIG_ENDIAN};
-enum {
- SH_ETH_REG_GIGABIT,
- SH_ETH_REG_FAST_RCAR,
- SH_ETH_REG_FAST_SH4,
- SH_ETH_REG_FAST_SH3_SH2
-};
struct sh_eth_plat_data {
int phy;
int edmac_endian;
- int register_type;
phy_interface_t phy_interface;
void (*set_mdio_gate)(void *addr);
- unsigned char mac_addr[6];
+ unsigned char mac_addr[ETH_ALEN];
unsigned no_ether_link:1;
unsigned ether_link_active_low:1;
unsigned needs_init:1;
diff --git a/include/linux/shdma-base.h b/include/linux/shdma-base.h
index 382cf710ca9..5b1c9848124 100644
--- a/include/linux/shdma-base.h
+++ b/include/linux/shdma-base.h
@@ -124,6 +124,10 @@ void shdma_chan_remove(struct shdma_chan *schan);
int shdma_init(struct device *dev, struct shdma_dev *sdev,
int chan_num);
void shdma_cleanup(struct shdma_dev *sdev);
+#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
bool shdma_chan_filter(struct dma_chan *chan, void *arg);
+#else
+#define shdma_chan_filter NULL
+#endif
#endif
diff --git a/include/linux/signal.h b/include/linux/signal.h
index d897484730c..2ac423bdb67 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -434,6 +434,14 @@ void signals_init(void);
int restore_altstack(const stack_t __user *);
int __save_altstack(stack_t __user *, unsigned long);
+#define save_altstack_ex(uss, sp) do { \
+ stack_t __user *__uss = uss; \
+ struct task_struct *t = current; \
+ put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
+ put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
+ put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+} while (0);
+
#ifdef CONFIG_PROC_FS
struct seq_file;
extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5afefa01a13..2ddb48d9312 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -501,7 +501,7 @@ struct sk_buff {
/* 7/9 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2);
-#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL
+#if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union {
unsigned int napi_id;
dma_cookie_t dma_cookie;
@@ -1805,10 +1805,13 @@ static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
*/
static inline void skb_orphan(struct sk_buff *skb)
{
- if (skb->destructor)
+ if (skb->destructor) {
skb->destructor(skb);
- skb->destructor = NULL;
- skb->sk = NULL;
+ skb->destructor = NULL;
+ skb->sk = NULL;
+ } else {
+ BUG_ON(skb->sk);
+ }
}
/**
@@ -1902,8 +1905,8 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
}
-/*
- * __skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
+/**
+ * __skb_alloc_pages - allocate pages for ps-rx on a skb and preserve pfmemalloc data
* @gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
* @skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
* @order: size of the allocation
@@ -2356,6 +2359,10 @@ extern int skb_copy_datagram_from_iovec(struct sk_buff *skb,
const struct iovec *from,
int from_offset,
int len);
+extern int zerocopy_sg_from_iovec(struct sk_buff *skb,
+ const struct iovec *frm,
+ int offset,
+ size_t count);
extern int skb_copy_datagram_const_iovec(const struct sk_buff *from,
int offset,
const struct iovec *to,
@@ -2385,7 +2392,7 @@ extern void skb_split(struct sk_buff *skb,
struct sk_buff *skb1, const u32 len);
extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
int shiftlen);
-extern void skb_scrub_packet(struct sk_buff *skb);
+extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
extern struct sk_buff *skb_segment(struct sk_buff *skb,
netdev_features_t features);
diff --git a/include/linux/smsc911x.h b/include/linux/smsc911x.h
index 4dde70e7482..eec3efd19be 100644
--- a/include/linux/smsc911x.h
+++ b/include/linux/smsc911x.h
@@ -22,6 +22,7 @@
#define __LINUX_SMSC911X_H__
#include <linux/phy.h>
+#include <linux/if_ether.h>
/* platform_device configuration data, should be assigned to
* the platform_device's dev.platform_data */
@@ -31,7 +32,7 @@ struct smsc911x_platform_config {
unsigned int flags;
unsigned int shift;
phy_interface_t phy_interface;
- unsigned char mac[6];
+ unsigned char mac[ETH_ALEN];
};
/* Constants for platform_device irq polarity configuration */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 230c04bda3e..445ef7519dc 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -313,6 +313,8 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
struct iovec *iov,
int offset,
unsigned int len, __wsum *csump);
+extern unsigned long iov_pages(const struct iovec *iov, int offset,
+ unsigned long nr_segs);
extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 28e440be1c0..887116dbce2 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -74,7 +74,7 @@ struct spi_device {
struct spi_master *master;
u32 max_speed_hz;
u8 chip_select;
- u8 mode;
+ u16 mode;
#define SPI_CPHA 0x01 /* clock phase */
#define SPI_CPOL 0x02 /* clock polarity */
#define SPI_MODE_0 (0|0) /* (original MicroWire) */
@@ -87,6 +87,10 @@ struct spi_device {
#define SPI_LOOP 0x20 /* loopback mode */
#define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
#define SPI_READY 0x80 /* slave pulls low to pause */
+#define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
+#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
+#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
+#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
u8 bits_per_word;
int irq;
void *controller_state;
@@ -233,6 +237,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* suported. If set, the SPI core will reject any transfer with an
* unsupported bits_per_word. If not set, this value is simply ignored,
* and it's up to the individual driver to perform any validation.
+ * @min_speed_hz: Lowest supported transfer speed
+ * @max_speed_hz: Highest supported transfer speed
* @flags: other constraints relevant to this driver
* @bus_lock_spinlock: spinlock for SPI bus locking
* @bus_lock_mutex: mutex for SPI bus locking
@@ -254,6 +260,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @busy: message pump is busy
* @running: message pump is running
* @rt: whether this queue is set to run as a realtime task
+ * @auto_runtime_pm: the core should ensure a runtime PM reference is held
+ * while the hardware is prepared, using the parent
+ * device for the spidev
* @prepare_transfer_hardware: a message will soon arrive from the queue
* so the subsystem requests the driver to prepare the transfer hardware
* by issuing this call
@@ -309,9 +318,13 @@ struct spi_master {
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
-#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0UL : (BIT(bits) - 1))
+#define SPI_BIT_MASK(bits) (((bits) == 32) ? ~0U : (BIT(bits) - 1))
#define SPI_BPW_RANGE_MASK(min, max) (SPI_BIT_MASK(max) - SPI_BIT_MASK(min - 1))
+ /* limits on transfer speed */
+ u32 min_speed_hz;
+ u32 max_speed_hz;
+
/* other constraints relevant to this driver */
u16 flags;
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
@@ -374,11 +387,13 @@ struct spi_master {
bool busy;
bool running;
bool rt;
+ bool auto_runtime_pm;
int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master,
struct spi_message *mesg);
int (*unprepare_transfer_hardware)(struct spi_master *master);
+
/* gpio chip select */
int *cs_gpios;
};
@@ -448,6 +463,10 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* @rx_buf: data to be read (dma-safe memory), or NULL
* @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
* @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
+ * @tx_nbits: number of bits used for writting. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
+ * @rx_nbits: number of bits used for reading. If 0 the default
+ * (SPI_NBITS_SINGLE) is used.
* @len: size of rx and tx buffers (in bytes)
* @speed_hz: Select a speed other than the device default for this
* transfer. If 0 the default (from @spi_device) is used.
@@ -502,6 +521,11 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* by the results of previous messages and where the whole transaction
* ends when the chipselect goes intactive.
*
+ * When SPI can transfer in 1x,2x or 4x. It can get this tranfer information
+ * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
+ * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
+ * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
+ *
* The code that submits an spi_message (and its spi_transfers)
* to the lower layers is responsible for managing its memory.
* Zero-initialize every field you don't set up explicitly, to
@@ -522,6 +546,11 @@ struct spi_transfer {
dma_addr_t rx_dma;
unsigned cs_change:1;
+ u8 tx_nbits;
+ u8 rx_nbits;
+#define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
+#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
+#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
@@ -578,6 +607,7 @@ struct spi_message {
/* completion is reported through a callback */
void (*complete)(void *context);
void *context;
+ unsigned frame_length;
unsigned actual_length;
int status;
@@ -869,7 +899,7 @@ struct spi_board_info {
/* mode becomes spi_device.mode, and is essential for chips
* where the default of SPI_CS_HIGH = 0 is wrong.
*/
- u8 mode;
+ u16 mode;
/* ... may need additional spi_device chip config data here.
* avoid stuff protocol drivers can set; but include stuff
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index f987a2bee16..daebaba886a 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -4,11 +4,7 @@
#include <linux/workqueue.h>
struct spi_bitbang {
- struct workqueue_struct *workqueue;
- struct work_struct work;
-
spinlock_t lock;
- struct list_head queue;
u8 busy;
u8 use_dma;
u8 flags; /* extra spi->mode support */
@@ -41,7 +37,6 @@ struct spi_bitbang {
*/
extern int spi_bitbang_setup(struct spi_device *spi);
extern void spi_bitbang_cleanup(struct spi_device *spi);
-extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m);
extern int spi_bitbang_setup_transfer(struct spi_device *spi,
struct spi_transfer *t);
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 7d537ced949..75f34949d9a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -117,9 +117,17 @@ do { \
#endif /*arch_spin_is_contended*/
#endif
-/* The lock does not imply full memory barrier. */
-#ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
-static inline void smp_mb__after_lock(void) { smp_mb(); }
+/*
+ * Despite its name it doesn't necessarily has to be a full barrier.
+ * It should only guarantee that a STORE before the critical section
+ * can not be reordered with a LOAD inside this section.
+ * spin_lock() is the one-way barrier, this LOAD can not escape out
+ * of the region. So the default implementation simply ensures that
+ * a STORE can not move into the critical section, smp_wmb() should
+ * serialize it with another STORE done by spin_lock().
+ */
+#ifndef smp_mb__before_spinlock
+#define smp_mb__before_spinlock() smp_wmb()
#endif
/**
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 9e495d31516..bb5deb0feb6 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -108,6 +108,7 @@ struct plat_stmmacenet_data {
int bugged_jumbo;
int pmt;
int force_sf_dma_mode;
+ int force_thresh_dma_mode;
int riwt_off;
void (*fix_mac_speed)(void *priv, unsigned int speed);
void (*bus_setup)(void __iomem *ioaddr);
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 6d870353674..1821445708d 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -121,6 +121,7 @@ struct rpc_task_setup {
#define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
#define RPC_TASK_SENT 0x0800 /* message was sent */
#define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
+#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
#define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
#define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index c5fd30d2a41..8d4fa82bfb9 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte)
swp_entry_t arch_entry;
BUG_ON(pte_file(pte));
+ if (pte_swp_soft_dirty(pte))
+ pte = pte_swp_clear_soft_dirty(pte);
arch_entry = __pte_to_swp_entry(pte);
return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
}
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 4147d700a29..84662ecc7b5 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void);
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int,
int __user *);
#else
+#ifdef CONFIG_CLONE_BACKWARDS3
+asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *,
+ int __user *, int);
+#else
asmlinkage long sys_clone(unsigned long, unsigned long, int __user *,
int __user *, int);
#endif
+#endif
asmlinkage long sys_execve(const char __user *filename,
const char __user *const __user *argv,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 9e8a9b555ad..11baec7c9b2 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -51,9 +51,9 @@ do { \
static struct lock_class_key __key; \
\
(attr)->key = &__key; \
-} while(0)
+} while (0)
#else
-#define sysfs_attr_init(attr) do {} while(0)
+#define sysfs_attr_init(attr) do {} while (0)
#endif
struct attribute_group {
@@ -69,7 +69,7 @@ struct attribute_group {
* for examples..
*/
-#define __ATTR(_name,_mode,_show,_store) { \
+#define __ATTR(_name, _mode, _show, _store) { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
@@ -80,6 +80,11 @@ struct attribute_group {
.show = _name##_show, \
}
+#define __ATTR_WO(_name) { \
+ .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
+ .store = _name##_store, \
+}
+
#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
_name##_show, _name##_store)
@@ -108,8 +113,6 @@ static const struct attribute_group _name##_group = { \
}; \
__ATTRIBUTE_GROUPS(_name)
-#define attr_name(_attr) (_attr).attr.name
-
struct file;
struct vm_area_struct;
@@ -119,7 +122,7 @@ struct bin_attribute {
void *private;
ssize_t (*read)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
- ssize_t (*write)(struct file *,struct kobject *, struct bin_attribute *,
+ ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
@@ -153,7 +156,7 @@ struct bin_attribute {
#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \
(S_IWUSR | S_IRUGO), _name##_read, \
- _name##_write)
+ _name##_write, _size)
#define __BIN_ATTR_NULL __ATTR_NULL
@@ -168,8 +171,8 @@ struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size)
struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
struct sysfs_ops {
- ssize_t (*show)(struct kobject *, struct attribute *,char *);
- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
+ ssize_t (*show)(struct kobject *, struct attribute *, char *);
+ ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
const void *(*namespace)(struct kobject *, const struct attribute *);
};
@@ -215,10 +218,14 @@ void sysfs_delete_link(struct kobject *dir, struct kobject *targ,
int __must_check sysfs_create_group(struct kobject *kobj,
const struct attribute_group *grp);
+int __must_check sysfs_create_groups(struct kobject *kobj,
+ const struct attribute_group **groups);
int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp);
void sysfs_remove_group(struct kobject *kobj,
const struct attribute_group *grp);
+void sysfs_remove_groups(struct kobject *kobj,
+ const struct attribute_group **groups);
int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group);
void sysfs_remove_file_from_group(struct kobject *kobj,
@@ -343,6 +350,12 @@ static inline int sysfs_create_group(struct kobject *kobj,
return 0;
}
+static inline int sysfs_create_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+ return 0;
+}
+
static inline int sysfs_update_group(struct kobject *kobj,
const struct attribute_group *grp)
{
@@ -354,6 +367,11 @@ static inline void sysfs_remove_group(struct kobject *kobj,
{
}
+static inline void sysfs_remove_groups(struct kobject *kobj,
+ const struct attribute_group **groups)
+{
+}
+
static inline int sysfs_add_file_to_group(struct kobject *kobj,
const struct attribute *attr, const char *group)
{
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 472120b4fac..d68633452d9 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -107,7 +107,6 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
* only four options will fit in a standard TCP header */
#define TCP_NUM_SACKS 4
-struct tcp_cookie_values;
struct tcp_request_sock_ops;
struct tcp_request_sock {
@@ -238,6 +237,7 @@ struct tcp_sock {
u32 rcv_wnd; /* Current receiver window */
u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
+ u32 notsent_lowat; /* TCP_NOTSENT_LOWAT */
u32 pushed_seq; /* Last pushed seq, required to talk to windows */
u32 lost_out; /* Lost packets */
u32 sacked_out; /* SACK'd packets */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9180f4b85e6..5128d33bbb3 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -10,6 +10,8 @@
#include <linux/irqflags.h>
#include <linux/percpu.h>
#include <linux/hrtimer.h>
+#include <linux/context_tracking_state.h>
+#include <linux/cpumask.h>
#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -158,26 +160,51 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
# endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
+extern bool tick_nohz_full_running;
+extern cpumask_var_t tick_nohz_full_mask;
+
+static inline bool tick_nohz_full_enabled(void)
+{
+ if (!static_key_false(&context_tracking_enabled))
+ return false;
+
+ return tick_nohz_full_running;
+}
+
+static inline bool tick_nohz_full_cpu(int cpu)
+{
+ if (!tick_nohz_full_enabled())
+ return false;
+
+ return cpumask_test_cpu(cpu, tick_nohz_full_mask);
+}
+
extern void tick_nohz_init(void);
-extern int tick_nohz_full_cpu(int cpu);
-extern void tick_nohz_full_check(void);
+extern void __tick_nohz_full_check(void);
extern void tick_nohz_full_kick(void);
extern void tick_nohz_full_kick_all(void);
-extern void tick_nohz_task_switch(struct task_struct *tsk);
+extern void __tick_nohz_task_switch(struct task_struct *tsk);
#else
static inline void tick_nohz_init(void) { }
-static inline int tick_nohz_full_cpu(int cpu) { return 0; }
-static inline void tick_nohz_full_check(void) { }
+static inline bool tick_nohz_full_enabled(void) { return false; }
+static inline bool tick_nohz_full_cpu(int cpu) { return false; }
+static inline void __tick_nohz_full_check(void) { }
static inline void tick_nohz_full_kick(void) { }
static inline void tick_nohz_full_kick_all(void) { }
-static inline void tick_nohz_task_switch(struct task_struct *tsk) { }
+static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
#endif
+static inline void tick_nohz_full_check(void)
+{
+ if (tick_nohz_full_enabled())
+ __tick_nohz_full_check();
+}
+
+static inline void tick_nohz_task_switch(struct task_struct *tsk)
+{
+ if (tick_nohz_full_enabled())
+ __tick_nohz_task_switch(tsk);
+}
-# ifdef CONFIG_CPU_IDLE_GOV_MENU
-extern void menu_hrtimer_cancel(void);
-# else
-static inline void menu_hrtimer_cancel(void) {}
-# endif /* CONFIG_CPU_IDLE_GOV_MENU */
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 01ac30efd6a..64f864651d8 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -10,6 +10,8 @@
#include <linux/mutex.h>
#include <linux/tty_flags.h>
#include <uapi/linux/tty.h>
+#include <linux/rwsem.h>
+#include <linux/llist.h>
@@ -29,9 +31,10 @@
#define __DISABLED_CHAR '\0'
struct tty_buffer {
- struct tty_buffer *next;
- char *char_buf_ptr;
- unsigned char *flag_buf_ptr;
+ union {
+ struct tty_buffer *next;
+ struct llist_node free;
+ };
int used;
int size;
int commit;
@@ -40,25 +43,25 @@ struct tty_buffer {
unsigned long data[0];
};
-/*
- * We default to dicing tty buffer allocations to this many characters
- * in order to avoid multiple page allocations. We know the size of
- * tty_buffer itself but it must also be taken into account that the
- * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
- * logic this must match
- */
-
-#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
+static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return ((unsigned char *)b->data) + ofs;
+}
+static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+{
+ return (char *)char_buf_ptr(b, ofs) + b->size;
+}
struct tty_bufhead {
- struct work_struct work;
- spinlock_t lock;
struct tty_buffer *head; /* Queue head */
+ struct work_struct work;
+ struct mutex lock;
+ atomic_t priority;
+ struct tty_buffer sentinel;
+ struct llist_head free; /* Free queue head */
+ atomic_t memory_used; /* In-use buffers excluding free list */
struct tty_buffer *tail; /* Active buffer */
- struct tty_buffer *free; /* Free queue head */
- int memory_used; /* Buffer space used excluding
- free queue */
};
/*
* When a break, frame error, or parity error happens, these codes are
@@ -199,9 +202,6 @@ struct tty_port {
wait_queue_head_t close_wait; /* Close waiters */
wait_queue_head_t delta_msr_wait; /* Modem status change */
unsigned long flags; /* TTY flags ASY_*/
- unsigned long iflags; /* TTYP_ internal flags */
-#define TTYP_FLUSHING 1 /* Flushing to ldisc in progress */
-#define TTYP_FLUSHPENDING 2 /* Queued buffer flush pending */
unsigned char console:1, /* port is a console */
low_latency:1; /* direct buffer flush */
struct mutex mutex; /* Locking */
@@ -238,14 +238,16 @@ struct tty_struct {
int index;
/* Protects ldisc changes: Lock tty not pty */
- struct mutex ldisc_mutex;
+ struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
- struct mutex termios_mutex;
+ struct mutex throttle_mutex;
+ struct rw_semaphore termios_rwsem;
+ struct mutex winsize_mutex;
spinlock_t ctrl_lock;
- /* Termios values are protected by the termios mutex */
+ /* Termios values are protected by the termios rwsem */
struct ktermios termios, termios_locked;
struct termiox *termiox; /* May be NULL for unsupported */
char name[64];
@@ -253,7 +255,7 @@ struct tty_struct {
struct pid *session;
unsigned long flags;
int count;
- struct winsize winsize; /* termios mutex */
+ struct winsize winsize; /* winsize_mutex */
unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1;
unsigned char ctrl_status; /* ctrl_lock */
unsigned int receive_room; /* Bytes free for queue */
@@ -303,10 +305,7 @@ struct tty_file_private {
#define TTY_EXCLUSIVE 3 /* Exclusive open mode */
#define TTY_DEBUG 4 /* Debugging */
#define TTY_DO_WRITE_WAKEUP 5 /* Call write_wakeup after queuing new */
-#define TTY_PUSH 6 /* n_tty private */
#define TTY_CLOSING 7 /* ->close() in progress */
-#define TTY_LDISC 9 /* Line discipline attached */
-#define TTY_LDISC_CHANGING 10 /* Line discipline changing */
#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_PTY_LOCK 16 /* pty private */
#define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
@@ -559,6 +558,19 @@ extern void tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
+static inline int tty_ldisc_receive_buf(struct tty_ldisc *ld, unsigned char *p,
+ char *f, int count)
+{
+ if (ld->ops->receive_buf2)
+ count = ld->ops->receive_buf2(ld->tty, p, f, count);
+ else {
+ count = min_t(int, count, ld->tty->receive_room);
+ if (count)
+ ld->ops->receive_buf(ld->tty, p, f, count);
+ }
+ return count;
+}
+
/* n_tty.c */
extern struct tty_ldisc_ops tty_ldisc_N_TTY;
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index e0f252633b4..21ddd7d9ea1 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_TTY_FLIP_H
#define _LINUX_TTY_FLIP_H
+extern int tty_buffer_space_avail(struct tty_port *port);
extern int tty_buffer_request_room(struct tty_port *port, size_t size);
extern int tty_insert_flip_string_flags(struct tty_port *port,
const unsigned char *chars, const char *flags, size_t size);
@@ -18,8 +19,8 @@ static inline int tty_insert_flip_char(struct tty_port *port,
{
struct tty_buffer *tb = port->buf.tail;
if (tb && tb->used < tb->size) {
- tb->flag_buf_ptr[tb->used] = flag;
- tb->char_buf_ptr[tb->used++] = ch;
+ *flag_buf_ptr(tb, tb->used) = flag;
+ *char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
return tty_insert_flip_string_flags(port, &ch, &flag, 1);
@@ -31,4 +32,7 @@ static inline int tty_insert_flip_string(struct tty_port *port,
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
+extern void tty_buffer_lock_exclusive(struct tty_port *port);
+extern void tty_buffer_unlock_exclusive(struct tty_port *port);
+
#endif /* _LINUX_TTY_FLIP_H */
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index a1b04899982..f15c898ff46 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -109,6 +109,17 @@
*
* Tells the discipline that the DCD pin has changed its status.
* Used exclusively by the N_PPS (Pulse-Per-Second) line discipline.
+ *
+ * int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
+ * char *fp, int count);
+ *
+ * This function is called by the low-level tty driver to send
+ * characters received by the hardware to the line discpline for
+ * processing. <cp> is a pointer to the buffer of input
+ * character received by the device. <fp> is a pointer to a
+ * pointer of flag bytes which indicate whether a character was
+ * received with a parity error, etc.
+ * If assigned, prefer this function for automatic flow control.
*/
#include <linux/fs.h>
@@ -195,6 +206,8 @@ struct tty_ldisc_ops {
void (*write_wakeup)(struct tty_struct *);
void (*dcd_change)(struct tty_struct *, unsigned int);
void (*fasync)(struct tty_struct *tty, int on);
+ int (*receive_buf2)(struct tty_struct *, const unsigned char *cp,
+ char *fp, int count);
struct module *owner;
@@ -203,8 +216,7 @@ struct tty_ldisc_ops {
struct tty_ldisc {
struct tty_ldisc_ops *ops;
- atomic_t users;
- wait_queue_head_t wq_idle;
+ struct tty_struct *tty;
};
#define TTY_LDISC_MAGIC 0x5403
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a232b7ece1f..001629cd1a9 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -337,6 +337,7 @@ struct usb_bus {
* the ep queue on a short transfer
* with the URB_SHORT_NOT_OK flag set.
*/
+ unsigned no_sg_constraint:1; /* no sg constraint */
unsigned sg_tablesize; /* 0 or largest number of sg list entries */
int devnum_next; /* Next open device number in
@@ -367,17 +368,6 @@ struct usb_bus {
/* ----------------------------------------------------------------------- */
-/* This is arbitrary.
- * From USB 2.0 spec Table 11-13, offset 7, a hub can
- * have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
- * do 31, so plus one evens out to four bytes.
- */
-#define USB_MAXCHILDREN (31)
-
struct usb_tt;
enum usb_device_removable {
@@ -695,6 +685,11 @@ static inline bool usb_device_supports_ltm(struct usb_device *udev)
return udev->bos->ss_cap->bmAttributes & USB_LTM_SUPPORT;
}
+static inline bool usb_device_no_sg_constraint(struct usb_device *udev)
+{
+ return udev && udev->bus && udev->bus->no_sg_constraint;
+}
+
/*-------------------------------------------------------------------------*/
@@ -719,7 +714,10 @@ extern int usb_driver_claim_interface(struct usb_driver *driver,
* usb_interface_claimed - returns true iff an interface is claimed
* @iface: the interface being checked
*
- * Returns true (nonzero) iff the interface is claimed, else false (zero).
+ * Return: %true (nonzero) iff the interface is claimed, else %false
+ * (zero).
+ *
+ * Note:
* Callers must own the driver model's usb bus readlock. So driver
* probe() entries don't need extra locking, but other call contexts
* may need to explicitly claim that lock.
@@ -756,8 +754,9 @@ extern struct usb_host_interface *usb_find_alt_setting(
* @buf: where to put the string
* @size: how big is "buf"?
*
- * Returns length of the string (> 0) or negative if size was too small.
+ * Return: Length of the string (> 0) or negative if size was too small.
*
+ * Note:
* This identifier is intended to be "stable", reflecting physical paths in
* hardware such as physical bus addresses for host controllers or ports on
* USB hubs. That makes it stay the same until systems are physically
@@ -1258,7 +1257,9 @@ typedef void (*usb_complete_t)(struct urb *);
* the device driver is saying that it provided this DMA address,
* which the host controller driver should use in preference to the
* transfer_buffer.
- * @sg: scatter gather buffer list
+ * @sg: scatter gather buffer list, the buffer size of each element in
+ * the list (except the last) must be divisible by the endpoint's
+ * max packet size if no_sg_constraint isn't set in 'struct usb_bus'
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
@@ -1545,10 +1546,16 @@ static inline void usb_fill_int_urb(struct urb *urb,
urb->transfer_buffer_length = buffer_length;
urb->complete = complete_fn;
urb->context = context;
- if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER)
+
+ if (dev->speed == USB_SPEED_HIGH || dev->speed == USB_SPEED_SUPER) {
+ /* make sure interval is within allowed range */
+ interval = clamp(interval, 1, 16);
+
urb->interval = 1 << (interval - 1);
- else
+ } else {
urb->interval = interval;
+ }
+
urb->start_frame = -1;
}
@@ -1581,7 +1588,7 @@ extern int usb_anchor_empty(struct usb_anchor *anchor);
* usb_urb_dir_in - check if an URB describes an IN transfer
* @urb: URB to be checked
*
- * Returns 1 if @urb describes an IN transfer (device-to-host),
+ * Return: 1 if @urb describes an IN transfer (device-to-host),
* otherwise 0.
*/
static inline int usb_urb_dir_in(struct urb *urb)
@@ -1593,7 +1600,7 @@ static inline int usb_urb_dir_in(struct urb *urb)
* usb_urb_dir_out - check if an URB describes an OUT transfer
* @urb: URB to be checked
*
- * Returns 1 if @urb describes an OUT transfer (host-to-device),
+ * Return: 1 if @urb describes an OUT transfer (host-to-device),
* otherwise 0.
*/
static inline int usb_urb_dir_out(struct urb *urb)
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 25629948c84..7d399671a56 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -18,12 +18,17 @@ struct ci_hdrc_platform_data {
unsigned long flags;
#define CI_HDRC_REGS_SHARED BIT(0)
#define CI_HDRC_REQUIRE_TRANSCEIVER BIT(1)
-#define CI_HDRC_PULLUP_ON_VBUS BIT(2)
#define CI_HDRC_DISABLE_STREAMING BIT(3)
+ /*
+ * Only set it when DCCPARAMS.DC==1 and DCCPARAMS.HC==1,
+ * but otg is not supported (no register otgsc).
+ */
+#define CI_HDRC_DUAL_ROLE_NOT_OTG BIT(4)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
void (*notify_event) (struct ci_hdrc *ci, unsigned event);
+ struct regulator *reg_vbus;
};
/* Default offset of capability registers */
diff --git a/include/linux/usb/dwc3-omap.h b/include/linux/usb/dwc3-omap.h
deleted file mode 100644
index 5615f4d8272..00000000000
--- a/include/linux/usb/dwc3-omap.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2013 by Texas Instruments
- *
- * The Inventra Controller Driver for Linux is free software; you
- * can redistribute it and/or modify it under the terms of the GNU
- * General Public License version 2 as published by the Free Software
- * Foundation.
- */
-
-#ifndef __DWC3_OMAP_H__
-#define __DWC3_OMAP_H__
-
-enum omap_dwc3_vbus_id_status {
- OMAP_DWC3_UNKNOWN = 0,
- OMAP_DWC3_ID_GROUND,
- OMAP_DWC3_ID_FLOAT,
- OMAP_DWC3_VBUS_VALID,
- OMAP_DWC3_VBUS_OFF,
-};
-
-#if (defined(CONFIG_USB_DWC3) || defined(CONFIG_USB_DWC3_MODULE))
-extern int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status);
-#else
-static inline int dwc3_omap_mailbox(enum omap_dwc3_vbus_id_status status)
-{
- return -ENODEV;
-}
-#endif
-
-#endif /* __DWC3_OMAP_H__ */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index f1b0dca60f1..942ef5e053b 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
+#include <linux/workqueue.h>
#include <linux/usb/ch9.h>
struct usb_ep;
@@ -475,6 +476,7 @@ struct usb_gadget_ops {
/**
* struct usb_gadget - represents a usb slave device
+ * @work: (internal use) Workqueue to be used for sysfs_notify()
* @ops: Function pointers used to access hardware-specific operations.
* @ep0: Endpoint zero, used when reading or writing responses to
* driver setup() requests
@@ -520,6 +522,7 @@ struct usb_gadget_ops {
* device is acting as a B-Peripheral (so is_a_peripheral is false).
*/
struct usb_gadget {
+ struct work_struct work;
/* readonly to gadget driver */
const struct usb_gadget_ops *ops;
struct usb_ep *ep0;
@@ -538,6 +541,7 @@ struct usb_gadget {
unsigned out_epnum;
unsigned in_epnum;
};
+#define work_to_gadget(w) (container_of((w), struct usb_gadget, work))
static inline void set_gadget_data(struct usb_gadget *gadget, void *data)
{ dev_set_drvdata(&gadget->dev, data); }
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 1e88377e22f..75efc45eaa2 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -22,6 +22,7 @@
#ifdef __KERNEL__
#include <linux/rwsem.h>
+#include <linux/interrupt.h>
#define MAX_TOPO_LEVEL 6
@@ -67,6 +68,13 @@
/*-------------------------------------------------------------------------*/
+struct giveback_urb_bh {
+ bool running;
+ spinlock_t lock;
+ struct list_head head;
+ struct tasklet_struct bh;
+};
+
struct usb_hcd {
/*
@@ -139,6 +147,9 @@ struct usb_hcd {
resource_size_t rsrc_len; /* memory/io resource length */
unsigned power_budget; /* in mA, 0 = no limit */
+ struct giveback_urb_bh high_prio_bh;
+ struct giveback_urb_bh low_prio_bh;
+
/* bandwidth_mutex should be taken before adding or removing
* any new bus bandwidth constraints:
* 1. Before adding a configuration for a new device.
@@ -221,6 +232,7 @@ struct hc_driver {
#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_MASK 0x0070
+#define HCD_BH 0x0100 /* URB complete in BH context */
/* called to init HCD and root hub */
int (*reset) (struct usb_hcd *hcd);
@@ -361,6 +373,11 @@ struct hc_driver {
int (*find_raw_port_number)(struct usb_hcd *, int);
};
+static inline int hcd_giveback_urb_in_bh(struct usb_hcd *hcd)
+{
+ return hcd->driver->flags & HCD_BH;
+}
+
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
extern int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
int status);
@@ -411,7 +428,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
#endif /* CONFIG_PCI */
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index a0ef405368b..8c38aa26b3b 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -7,19 +7,27 @@
#ifndef __LINUX_USB_OF_H
#define __LINUX_USB_OF_H
+#include <linux/usb/ch9.h>
#include <linux/usb/otg.h>
#include <linux/usb/phy.h>
#if IS_ENABLED(CONFIG_OF)
enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np);
+enum usb_device_speed of_usb_get_maximum_speed(struct device_node *np);
#else
static inline enum usb_dr_mode of_usb_get_dr_mode(struct device_node *np)
{
return USB_DR_MODE_UNKNOWN;
}
+
+static inline enum usb_device_speed
+of_usb_get_maximum_speed(struct device_node *np)
+{
+ return USB_SPEED_UNKNOWN;
+}
#endif
-#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_PHY)
+#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np);
#else
static inline enum usb_phy_interface of_usb_get_phy_mode(struct device_node *np)
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 44036808bf0..6c0b1c513db 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -142,7 +142,7 @@ extern void usb_remove_phy(struct usb_phy *);
/* helpers for direct access thru low-level io interface */
static inline int usb_phy_io_read(struct usb_phy *x, u32 reg)
{
- if (x->io_ops && x->io_ops->read)
+ if (x && x->io_ops && x->io_ops->read)
return x->io_ops->read(x, reg);
return -EINVAL;
@@ -150,7 +150,7 @@ static inline int usb_phy_io_read(struct usb_phy *x, u32 reg)
static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg)
{
- if (x->io_ops && x->io_ops->write)
+ if (x && x->io_ops && x->io_ops->write)
return x->io_ops->write(x, val, reg);
return -EINVAL;
@@ -159,7 +159,7 @@ static inline int usb_phy_io_write(struct usb_phy *x, u32 val, u32 reg)
static inline int
usb_phy_init(struct usb_phy *x)
{
- if (x->init)
+ if (x && x->init)
return x->init(x);
return 0;
@@ -168,14 +168,14 @@ usb_phy_init(struct usb_phy *x)
static inline void
usb_phy_shutdown(struct usb_phy *x)
{
- if (x->shutdown)
+ if (x && x->shutdown)
x->shutdown(x);
}
static inline int
usb_phy_vbus_on(struct usb_phy *x)
{
- if (!x->set_vbus)
+ if (!x || !x->set_vbus)
return 0;
return x->set_vbus(x, true);
@@ -184,7 +184,7 @@ usb_phy_vbus_on(struct usb_phy *x)
static inline int
usb_phy_vbus_off(struct usb_phy *x)
{
- if (!x->set_vbus)
+ if (!x || !x->set_vbus)
return 0;
return x->set_vbus(x, false);
@@ -258,7 +258,7 @@ usb_phy_set_power(struct usb_phy *x, unsigned mA)
static inline int
usb_phy_set_suspend(struct usb_phy *x, int suspend)
{
- if (x->set_suspend != NULL)
+ if (x && x->set_suspend != NULL)
return x->set_suspend(x, suspend);
else
return 0;
@@ -267,7 +267,7 @@ usb_phy_set_suspend(struct usb_phy *x, int suspend)
static inline int
usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
{
- if (x->notify_connect)
+ if (x && x->notify_connect)
return x->notify_connect(x, speed);
else
return 0;
@@ -276,7 +276,7 @@ usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
static inline int
usb_phy_notify_disconnect(struct usb_phy *x, enum usb_device_speed speed)
{
- if (x->notify_disconnect)
+ if (x && x->notify_disconnect)
return x->notify_disconnect(x, speed);
else
return 0;
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index d2ca919a5b7..1de16c324ec 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -18,19 +18,36 @@
#include <linux/clk.h>
#include <linux/usb/otg.h>
+/*
+ * utmi_pll_config_in_car_module: true if the UTMI PLL configuration registers
+ * should be set up by clk-tegra, false if by the PHY code
+ * has_hostpc: true if the USB controller has the HOSTPC extension, which
+ * changes the location of the PHCD and PTS fields
+ * requires_usbmode_setup: true if the USBMODE register needs to be set to
+ * enter host mode
+ * requires_extra_tuning_parameters: true if xcvr_hsslew, hssquelch_level
+ * and hsdiscon_level should be set for adequate signal quality
+ */
+
+struct tegra_phy_soc_config {
+ bool utmi_pll_config_in_car_module;
+ bool has_hostpc;
+ bool requires_usbmode_setup;
+ bool requires_extra_tuning_parameters;
+};
+
struct tegra_utmip_config {
u8 hssync_start_delay;
u8 elastic_limit;
u8 idle_wait_delay;
u8 term_range_adj;
+ bool xcvr_setup_use_fuses;
u8 xcvr_setup;
u8 xcvr_lsfslew;
u8 xcvr_lsrslew;
-};
-
-struct tegra_ulpi_config {
- int reset_gpio;
- const char *clk;
+ u8 xcvr_hsslew;
+ u8 hssquelch_level;
+ u8 hsdiscon_level;
};
enum tegra_usb_phy_port_speed {
@@ -39,12 +56,6 @@ enum tegra_usb_phy_port_speed {
TEGRA_USB_PHY_PORT_SPEED_HIGH,
};
-enum tegra_usb_phy_mode {
- TEGRA_USB_PHY_MODE_DEVICE,
- TEGRA_USB_PHY_MODE_HOST,
- TEGRA_USB_PHY_MODE_OTG,
-};
-
struct tegra_xtal_freq;
struct tegra_usb_phy {
@@ -55,18 +66,17 @@ struct tegra_usb_phy {
struct clk *clk;
struct clk *pll_u;
struct clk *pad_clk;
- enum tegra_usb_phy_mode mode;
+ struct regulator *vbus;
+ enum usb_dr_mode mode;
void *config;
+ const struct tegra_phy_soc_config *soc_config;
struct usb_phy *ulpi;
struct usb_phy u_phy;
- struct device *dev;
bool is_legacy_phy;
bool is_ulpi_phy;
int reset_gpio;
};
-struct usb_phy *tegra_usb_get_phy(struct device_node *dn);
-
void tegra_usb_phy_preresume(struct usb_phy *phy);
void tegra_usb_phy_postresume(struct usb_phy *phy);
diff --git a/include/linux/usb/nop-usb-xceiv.h b/include/linux/usb/usb_phy_gen_xceiv.h
index 148d35171aa..f9a7e7bc925 100644
--- a/include/linux/usb/nop-usb-xceiv.h
+++ b/include/linux/usb/usb_phy_gen_xceiv.h
@@ -3,7 +3,7 @@
#include <linux/usb/otg.h>
-struct nop_usb_xceiv_platform_data {
+struct usb_phy_gen_xceiv_platform_data {
enum usb_phy_type type;
unsigned long clk_rate;
@@ -12,7 +12,7 @@ struct nop_usb_xceiv_platform_data {
unsigned int needs_reset:1;
};
-#if defined(CONFIG_NOP_USB_XCEIV) || (defined(CONFIG_NOP_USB_XCEIV_MODULE) && defined(MODULE))
+#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
/* sometimes transceivers are accessed only through e.g. ULPI */
extern void usb_nop_xceiv_register(void);
extern void usb_nop_xceiv_unregister(void);
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index f18d64129f9..9cb2fe8ca94 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -34,6 +34,8 @@ struct usbnet {
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;
+ unsigned short rx_qlen, tx_qlen;
+ unsigned can_dma_sg:1;
/* i/o info: pipes etc */
unsigned in, out;
@@ -253,4 +255,6 @@ extern void usbnet_link_change(struct usbnet *, bool, bool);
extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
+extern void usbnet_update_max_qlen(struct usbnet *dev);
+
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/wusb-wa.h b/include/linux/usb/wusb-wa.h
index 6be985b2a43..4ff744e2b67 100644
--- a/include/linux/usb/wusb-wa.h
+++ b/include/linux/usb/wusb-wa.h
@@ -66,6 +66,7 @@ enum {
WA_ENABLE = 0x01,
WA_RESET = 0x02,
RPIPE_PAUSE = 0x1,
+ RPIPE_STALL = 0x2,
};
/* Responses from Get Status request ([WUSB] section 8.3.1.6) */
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index b6b215f13b4..14105c26a83 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -23,6 +23,7 @@ struct user_namespace {
struct uid_gid_map projid_map;
atomic_t count;
struct user_namespace *parent;
+ int level;
kuid_t owner;
kgid_t group;
unsigned int proc_inum;
diff --git a/include/linux/uwb/spec.h b/include/linux/uwb/spec.h
index b52e44f1bd3..0df24bfcdb3 100644
--- a/include/linux/uwb/spec.h
+++ b/include/linux/uwb/spec.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/bitmap.h>
+#include <linux/if_ether.h>
#define i1480_FW 0x00000303
/* #define i1480_FW 0x00000302 */
@@ -130,7 +131,7 @@ enum { UWB_DRP_BACKOFF_WIN_MAX = 16 };
* it is also used to define headers sent down and up the wire/radio).
*/
struct uwb_mac_addr {
- u8 data[6];
+ u8 data[ETH_ALEN];
} __attribute__((packed));
@@ -568,7 +569,7 @@ struct uwb_rc_evt_confirm {
/* Device Address Management event. [WHCI] section 3.1.3.2. */
struct uwb_rc_evt_dev_addr_mgmt {
struct uwb_rceb rceb;
- u8 baAddr[6];
+ u8 baAddr[ETH_ALEN];
u8 bResultCode;
} __attribute__((packed));
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index ddb419cf453..502073a53dd 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -45,7 +45,8 @@ struct vga_switcheroo_client_ops {
#if defined(CONFIG_VGA_SWITCHEROO)
void vga_switcheroo_unregister_client(struct pci_dev *dev);
int vga_switcheroo_register_client(struct pci_dev *dev,
- const struct vga_switcheroo_client_ops *ops);
+ const struct vga_switcheroo_client_ops *ops,
+ bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
int id, bool active);
@@ -60,11 +61,15 @@ int vga_switcheroo_process_delayed_switch(void);
int vga_switcheroo_get_client_state(struct pci_dev *dev);
+void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
+
+int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
+int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
#else
static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
static inline int vga_switcheroo_register_client(struct pci_dev *dev,
- const struct vga_switcheroo_client_ops *ops) { return 0; }
+ const struct vga_switcheroo_client_ops *ops, bool driver_power_control) { return 0; }
static inline void vga_switcheroo_client_fb_set(struct pci_dev *dev, struct fb_info *info) {}
static inline int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
@@ -74,6 +79,10 @@ static inline void vga_switcheroo_unregister_handler(void) {}
static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
+static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
+
+static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
+static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
#endif
#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/linux/vmpressure.h b/include/linux/vmpressure.h
index 76be077340e..3f3788d4936 100644
--- a/include/linux/vmpressure.h
+++ b/include/linux/vmpressure.h
@@ -12,7 +12,7 @@ struct vmpressure {
unsigned long scanned;
unsigned long reclaimed;
/* The lock is used to keep the scanned/reclaimed above in sync. */
- struct mutex sr_lock;
+ struct spinlock sr_lock;
/* The list of vmpressure_event structs. */
struct list_head events;
@@ -30,13 +30,16 @@ extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
extern void vmpressure_init(struct vmpressure *vmpr);
+extern void vmpressure_cleanup(struct vmpressure *vmpr);
extern struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg);
extern struct cgroup_subsys_state *vmpressure_to_css(struct vmpressure *vmpr);
extern struct vmpressure *css_to_vmpressure(struct cgroup_subsys_state *css);
-extern int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
+extern int vmpressure_register_event(struct cgroup_subsys_state *css,
+ struct cftype *cft,
struct eventfd_ctx *eventfd,
const char *args);
-extern void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+extern void vmpressure_unregister_event(struct cgroup_subsys_state *css,
+ struct cftype *cft,
struct eventfd_ctx *eventfd);
#else
static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index b1dd2db8007..f5b72b364bd 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -1,18 +1,68 @@
#ifndef _LINUX_KERNEL_VTIME_H
#define _LINUX_KERNEL_VTIME_H
+#include <linux/context_tracking_state.h>
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+#include <asm/vtime.h>
+#endif
+
+
struct task_struct;
+/*
+ * vtime_accounting_enabled() definitions/declarations
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+static inline bool vtime_accounting_enabled(void) { return true; }
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+static inline bool vtime_accounting_enabled(void)
+{
+ if (static_key_false(&context_tracking_enabled)) {
+ if (context_tracking_active())
+ return true;
+ }
+
+ return false;
+}
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+static inline bool vtime_accounting_enabled(void) { return false; }
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
+
+
+/*
+ * Common vtime APIs
+ */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+
+#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
extern void vtime_task_switch(struct task_struct *prev);
+#else
+extern void vtime_common_task_switch(struct task_struct *prev);
+static inline void vtime_task_switch(struct task_struct *prev)
+{
+ if (vtime_accounting_enabled())
+ vtime_common_task_switch(prev);
+}
+#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
+
extern void vtime_account_system(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
extern void vtime_account_user(struct task_struct *tsk);
-extern void vtime_account_irq_enter(struct task_struct *tsk);
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-static inline bool vtime_accounting_enabled(void) { return true; }
-#endif
+#ifdef __ARCH_HAS_VTIME_ACCOUNT
+extern void vtime_account_irq_enter(struct task_struct *tsk);
+#else
+extern void vtime_common_account_irq_enter(struct task_struct *tsk);
+static inline void vtime_account_irq_enter(struct task_struct *tsk)
+{
+ if (vtime_accounting_enabled())
+ vtime_common_account_irq_enter(tsk);
+}
+#endif /* __ARCH_HAS_VTIME_ACCOUNT */
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
@@ -20,14 +70,20 @@ static inline void vtime_task_switch(struct task_struct *prev) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account_user(struct task_struct *tsk) { }
static inline void vtime_account_irq_enter(struct task_struct *tsk) { }
-static inline bool vtime_accounting_enabled(void) { return false; }
-#endif
+#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void arch_vtime_task_switch(struct task_struct *tsk);
-extern void vtime_account_irq_exit(struct task_struct *tsk);
-extern bool vtime_accounting_enabled(void);
+extern void vtime_gen_account_irq_exit(struct task_struct *tsk);
+
+static inline void vtime_account_irq_exit(struct task_struct *tsk)
+{
+ if (vtime_accounting_enabled())
+ vtime_gen_account_irq_exit(tsk);
+}
+
extern void vtime_user_enter(struct task_struct *tsk);
+
static inline void vtime_user_exit(struct task_struct *tsk)
{
vtime_account_user(tsk);
@@ -35,7 +91,7 @@ static inline void vtime_user_exit(struct task_struct *tsk)
extern void vtime_guest_enter(struct task_struct *tsk);
extern void vtime_guest_exit(struct task_struct *tsk);
extern void vtime_init_idle(struct task_struct *tsk, int cpu);
-#else
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING_GEN */
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
/* On hard|softirq exit we always account to hard|softirq cputime */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index f487a4750b7..a67fc163559 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -811,6 +811,63 @@ do { \
__ret; \
})
+#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \
+ lock, ret) \
+do { \
+ DEFINE_WAIT(__wait); \
+ \
+ for (;;) { \
+ prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
+ if (condition) \
+ break; \
+ if (signal_pending(current)) { \
+ ret = -ERESTARTSYS; \
+ break; \
+ } \
+ spin_unlock_irq(&lock); \
+ ret = schedule_timeout(ret); \
+ spin_lock_irq(&lock); \
+ if (!ret) \
+ break; \
+ } \
+ finish_wait(&wq, &__wait); \
+} while (0)
+
+/**
+ * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses.
+ * The condition is checked under the lock. This is expected
+ * to be called with the lock taken.
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @lock: a locked spinlock_t, which will be released before schedule()
+ * and reacquired afterwards.
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true or signal is received. The @condition is
+ * checked each time the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * This is supposed to be called while holding the lock. The lock is
+ * dropped before going to sleep and is reacquired afterwards.
+ *
+ * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
+ * was interrupted by a signal, and the remaining jiffies otherwise
+ * if the condition evaluated to true before the timeout elapsed.
+ */
+#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \
+ timeout) \
+({ \
+ int __ret = timeout; \
+ \
+ if (!(condition)) \
+ __wait_event_interruptible_lock_irq_timeout( \
+ wq, condition, lock, __ret); \
+ __ret; \
+})
+
/*
* These are the old interfaces to sleep waiting for an event.
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index a0ed78ab54d..594521ba0d4 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -295,7 +295,12 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* Documentation/workqueue.txt.
*/
enum {
- WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
+ /*
+ * All wqs are now non-reentrant making the following flag
+ * meaningless. Will be removed.
+ */
+ WQ_NON_REENTRANT = 1 << 0, /* DEPRECATED */
+
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
diff --git a/include/media/adv7343.h b/include/media/adv7343.h
index 944757be49b..e4142b1ef8c 100644
--- a/include/media/adv7343.h
+++ b/include/media/adv7343.h
@@ -28,12 +28,7 @@
* @pll_control: PLL and oversampling control. This control allows internal
* PLL 1 circuit to be powered down and the oversampling to be
* switched off.
- * @dac_1: power on/off DAC 1.
- * @dac_2: power on/off DAC 2.
- * @dac_3: power on/off DAC 3.
- * @dac_4: power on/off DAC 4.
- * @dac_5: power on/off DAC 5.
- * @dac_6: power on/off DAC 6.
+ * @dac: array to configure power on/off DAC's 1..6
*
* Power mode register (Register 0x0), for more info refer REGISTER MAP ACCESS
* section of datasheet[1], table 17 page no 30.
@@ -43,23 +38,16 @@
struct adv7343_power_mode {
bool sleep_mode;
bool pll_control;
- bool dac_1;
- bool dac_2;
- bool dac_3;
- bool dac_4;
- bool dac_5;
- bool dac_6;
+ u32 dac[6];
};
/**
* struct adv7343_sd_config - SD Only Output Configuration.
- * @sd_dac_out1: Configure SD DAC Output 1.
- * @sd_dac_out2: Configure SD DAC Output 2.
+ * @sd_dac_out: array configuring SD DAC Outputs 1 and 2
*/
struct adv7343_sd_config {
/* SD only Output Configuration */
- bool sd_dac_out1;
- bool sd_dac_out2;
+ u32 sd_dac_out[2];
};
/**
diff --git a/include/media/adv7511.h b/include/media/adv7511.h
new file mode 100644
index 00000000000..bb78bed9a5b
--- /dev/null
+++ b/include/media/adv7511.h
@@ -0,0 +1,48 @@
+/*
+ * Analog Devices ADV7511 HDMI Transmitter Device Driver
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef ADV7511_H
+#define ADV7511_H
+
+/* notify events */
+#define ADV7511_MONITOR_DETECT 0
+#define ADV7511_EDID_DETECT 1
+
+
+struct adv7511_monitor_detect {
+ int present;
+};
+
+struct adv7511_edid_detect {
+ int present;
+ int segment;
+};
+
+struct adv7511_cec_arg {
+ void *arg;
+ u32 f_flags;
+};
+
+struct adv7511_platform_data {
+ uint8_t i2c_edid;
+ uint8_t i2c_cec;
+ uint32_t cec_clk;
+};
+
+#endif
diff --git a/include/media/adv7842.h b/include/media/adv7842.h
new file mode 100644
index 00000000000..c02201d1c09
--- /dev/null
+++ b/include/media/adv7842.h
@@ -0,0 +1,226 @@
+/*
+ * adv7842 - Analog Devices ADV7842 video decoder driver
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _ADV7842_
+#define _ADV7842_
+
+/* Analog input muxing modes (AFE register 0x02, [2:0]) */
+enum adv7842_ain_sel {
+ ADV7842_AIN1_2_3_NC_SYNC_1_2 = 0,
+ ADV7842_AIN4_5_6_NC_SYNC_2_1 = 1,
+ ADV7842_AIN7_8_9_NC_SYNC_3_1 = 2,
+ ADV7842_AIN10_11_12_NC_SYNC_4_1 = 3,
+ ADV7842_AIN9_4_5_6_SYNC_2_1 = 4,
+};
+
+/* Bus rotation and reordering (IO register 0x04, [7:5]) */
+enum adv7842_op_ch_sel {
+ ADV7842_OP_CH_SEL_GBR = 0,
+ ADV7842_OP_CH_SEL_GRB = 1,
+ ADV7842_OP_CH_SEL_BGR = 2,
+ ADV7842_OP_CH_SEL_RGB = 3,
+ ADV7842_OP_CH_SEL_BRG = 4,
+ ADV7842_OP_CH_SEL_RBG = 5,
+};
+
+/* Mode of operation */
+enum adv7842_mode {
+ ADV7842_MODE_SDP,
+ ADV7842_MODE_COMP,
+ ADV7842_MODE_RGB,
+ ADV7842_MODE_HDMI
+};
+
+/* Video standard select (IO register 0x00, [5:0]) */
+enum adv7842_vid_std_select {
+ /* SDP */
+ ADV7842_SDP_VID_STD_CVBS_SD_4x1 = 0x01,
+ ADV7842_SDP_VID_STD_YC_SD4_x1 = 0x09,
+ /* RGB */
+ ADV7842_RGB_VID_STD_AUTO_GRAPH_MODE = 0x07,
+ /* HDMI GR */
+ ADV7842_HDMI_GR_VID_STD_AUTO_GRAPH_MODE = 0x02,
+ /* HDMI COMP */
+ ADV7842_HDMI_COMP_VID_STD_HD_1250P = 0x1e,
+};
+
+/* Input Color Space (IO register 0x02, [7:4]) */
+enum adv7842_inp_color_space {
+ ADV7842_INP_COLOR_SPACE_LIM_RGB = 0,
+ ADV7842_INP_COLOR_SPACE_FULL_RGB = 1,
+ ADV7842_INP_COLOR_SPACE_LIM_YCbCr_601 = 2,
+ ADV7842_INP_COLOR_SPACE_LIM_YCbCr_709 = 3,
+ ADV7842_INP_COLOR_SPACE_XVYCC_601 = 4,
+ ADV7842_INP_COLOR_SPACE_XVYCC_709 = 5,
+ ADV7842_INP_COLOR_SPACE_FULL_YCbCr_601 = 6,
+ ADV7842_INP_COLOR_SPACE_FULL_YCbCr_709 = 7,
+ ADV7842_INP_COLOR_SPACE_AUTO = 0xf,
+};
+
+/* Select output format (IO register 0x03, [7:0]) */
+enum adv7842_op_format_sel {
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_8 = 0x00,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_10 = 0x01,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE0 = 0x02,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE1 = 0x06,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_12_MODE2 = 0x0a,
+ ADV7842_OP_FORMAT_SEL_DDR_422_8 = 0x20,
+ ADV7842_OP_FORMAT_SEL_DDR_422_10 = 0x21,
+ ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE0 = 0x22,
+ ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE1 = 0x23,
+ ADV7842_OP_FORMAT_SEL_DDR_422_12_MODE2 = 0x24,
+ ADV7842_OP_FORMAT_SEL_SDR_444_24 = 0x40,
+ ADV7842_OP_FORMAT_SEL_SDR_444_30 = 0x41,
+ ADV7842_OP_FORMAT_SEL_SDR_444_36_MODE0 = 0x42,
+ ADV7842_OP_FORMAT_SEL_DDR_444_24 = 0x60,
+ ADV7842_OP_FORMAT_SEL_DDR_444_30 = 0x61,
+ ADV7842_OP_FORMAT_SEL_DDR_444_36 = 0x62,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_16 = 0x80,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_20 = 0x81,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE0 = 0x82,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE1 = 0x86,
+ ADV7842_OP_FORMAT_SEL_SDR_ITU656_24_MODE2 = 0x8a,
+};
+
+enum adv7842_select_input {
+ ADV7842_SELECT_HDMI_PORT_A,
+ ADV7842_SELECT_HDMI_PORT_B,
+ ADV7842_SELECT_VGA_RGB,
+ ADV7842_SELECT_VGA_COMP,
+ ADV7842_SELECT_SDP_CVBS,
+ ADV7842_SELECT_SDP_YC,
+};
+
+struct adv7842_sdp_csc_coeff {
+ bool manual;
+ uint16_t scaling;
+ uint16_t A1;
+ uint16_t A2;
+ uint16_t A3;
+ uint16_t A4;
+ uint16_t B1;
+ uint16_t B2;
+ uint16_t B3;
+ uint16_t B4;
+ uint16_t C1;
+ uint16_t C2;
+ uint16_t C3;
+ uint16_t C4;
+};
+
+struct adv7842_sdp_io_sync_adjustment {
+ bool adjust;
+ uint16_t hs_beg;
+ uint16_t hs_width;
+ uint16_t de_beg;
+ uint16_t de_end;
+};
+
+/* Platform dependent definition */
+struct adv7842_platform_data {
+ /* connector - HDMI or DVI? */
+ unsigned connector_hdmi:1;
+
+ /* chip reset during probe */
+ unsigned chip_reset:1;
+
+ /* DIS_PWRDNB: 1 if the PWRDNB pin is unused and unconnected */
+ unsigned disable_pwrdnb:1;
+
+ /* DIS_CABLE_DET_RST: 1 if the 5V pins are unused and unconnected */
+ unsigned disable_cable_det_rst:1;
+
+ /* Analog input muxing mode */
+ enum adv7842_ain_sel ain_sel;
+
+ /* Bus rotation and reordering */
+ enum adv7842_op_ch_sel op_ch_sel;
+
+ /* Default mode */
+ enum adv7842_mode mode;
+
+ /* Video standard */
+ enum adv7842_vid_std_select vid_std_select;
+
+ /* Input Color Space */
+ enum adv7842_inp_color_space inp_color_space;
+
+ /* Select output format */
+ enum adv7842_op_format_sel op_format_sel;
+
+ /* IO register 0x02 */
+ unsigned alt_gamma:1;
+ unsigned op_656_range:1;
+ unsigned rgb_out:1;
+ unsigned alt_data_sat:1;
+
+ /* IO register 0x05 */
+ unsigned blank_data:1;
+ unsigned insert_av_codes:1;
+ unsigned replicate_av_codes:1;
+ unsigned invert_cbcr:1;
+
+ /* IO register 0x30 */
+ unsigned output_bus_lsb_to_msb:1;
+
+ /* IO register 0x14 */
+ struct {
+ unsigned data:2;
+ unsigned clock:2;
+ unsigned sync:2;
+ } drive_strength;
+
+ /* External RAM for 3-D comb or frame synchronizer */
+ unsigned sd_ram_size; /* ram size in MB */
+ unsigned sd_ram_ddr:1; /* ddr or sdr sdram */
+
+ /* Free run */
+ unsigned hdmi_free_run_mode;
+
+ struct adv7842_sdp_csc_coeff sdp_csc_coeff;
+
+ struct adv7842_sdp_io_sync_adjustment sdp_io_sync;
+
+ /* i2c addresses */
+ u8 i2c_sdp_io;
+ u8 i2c_sdp;
+ u8 i2c_cp;
+ u8 i2c_vdp;
+ u8 i2c_afe;
+ u8 i2c_hdmi;
+ u8 i2c_repeater;
+ u8 i2c_edid;
+ u8 i2c_infoframe;
+ u8 i2c_cec;
+ u8 i2c_avlink;
+};
+
+#define V4L2_CID_ADV_RX_ANALOG_SAMPLING_PHASE (V4L2_CID_DV_CLASS_BASE + 0x1000)
+#define V4L2_CID_ADV_RX_FREE_RUN_COLOR_MANUAL (V4L2_CID_DV_CLASS_BASE + 0x1001)
+#define V4L2_CID_ADV_RX_FREE_RUN_COLOR (V4L2_CID_DV_CLASS_BASE + 0x1002)
+
+/* notify events */
+#define ADV7842_FMT_CHANGE 1
+
+/* custom ioctl, used to test the external RAM that's used by the
+ * deinterlacer. */
+#define ADV7842_CMD_RAM_TEST _IO('V', BASE_VIDIOC_PRIVATE)
+
+#endif
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index 3882e0675cc..3cb1704a065 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -59,6 +59,8 @@ struct vpif_display_config {
int subdev_count;
struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
const char *card_name;
+ struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+ int *asd_sizes; /* 0-terminated array of asd group sizes */
};
struct vpif_input {
@@ -81,5 +83,7 @@ struct vpif_capture_config {
struct vpif_subdev_info *subdev_info;
int subdev_count;
const char *card_name;
+ struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
+ int *asd_sizes; /* 0-terminated array of asd group sizes */
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/lirc_dev.h b/include/media/lirc_dev.h
index 168dd0b1bae..78f0637ca68 100644
--- a/include/media/lirc_dev.h
+++ b/include/media/lirc_dev.h
@@ -139,6 +139,7 @@ struct lirc_driver {
struct lirc_buffer *rbuf;
int (*set_use_inc) (void *data);
void (*set_use_dec) (void *data);
+ struct rc_dev *rdev;
const struct file_operations *fops;
struct device *dev;
struct module *owner;
diff --git a/include/media/media-entity.h b/include/media/media-entity.h
index 06bacf937d6..10df5518798 100644
--- a/include/media/media-entity.h
+++ b/include/media/media-entity.h
@@ -23,6 +23,7 @@
#ifndef _MEDIA_ENTITY_H
#define _MEDIA_ENTITY_H
+#include <linux/bitops.h>
#include <linux/list.h>
#include <linux/media.h>
@@ -113,12 +114,15 @@ static inline u32 media_entity_subtype(struct media_entity *entity)
}
#define MEDIA_ENTITY_ENUM_MAX_DEPTH 16
+#define MEDIA_ENTITY_ENUM_MAX_ID 64
struct media_entity_graph {
struct {
struct media_entity *entity;
int link;
} stack[MEDIA_ENTITY_ENUM_MAX_DEPTH];
+
+ DECLARE_BITMAP(entities, MEDIA_ENTITY_ENUM_MAX_ID);
int top;
};
diff --git a/include/media/mt9v032.h b/include/media/mt9v032.h
index 78fd39eac21..12175a63c5b 100644
--- a/include/media/mt9v032.h
+++ b/include/media/mt9v032.h
@@ -1,13 +1,9 @@
#ifndef _MEDIA_MT9V032_H
#define _MEDIA_MT9V032_H
-struct v4l2_subdev;
-
struct mt9v032_platform_data {
unsigned int clk_pol:1;
- void (*set_clock)(struct v4l2_subdev *subdev, unsigned int rate);
-
const s64 *link_freqs;
s64 link_def_freq;
};
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 06a75deff55..2f6f1f78d95 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -101,6 +101,7 @@ struct rc_dev {
bool idle;
u64 allowed_protos;
u64 enabled_protocols;
+ u32 users;
u32 scanmask;
void *priv;
spinlock_t keylock;
@@ -142,6 +143,9 @@ void rc_free_device(struct rc_dev *dev);
int rc_register_device(struct rc_dev *dev);
void rc_unregister_device(struct rc_dev *dev);
+int rc_open(struct rc_dev *rdev);
+void rc_close(struct rc_dev *rdev);
+
void rc_repeat(struct rc_dev *dev);
void rc_keydown(struct rc_dev *dev, int scancode, u8 toggle);
void rc_keydown_notimeout(struct rc_dev *dev, int scancode, u8 toggle);
diff --git a/include/media/saa7115.h b/include/media/saa7115.h
index 407918625c8..76911e71de1 100644
--- a/include/media/saa7115.h
+++ b/include/media/saa7115.h
@@ -47,9 +47,11 @@
#define SAA7111_FMT_YUV411 0xc0
/* config flags */
-/* Register 0x85 should set bit 0 to 0 (it's 1 by default). This bit
+/*
+ * Register 0x85 should set bit 0 to 0 (it's 1 by default). This bit
* controls the IDQ signal polarity which is set to 'inverted' if the bit
- * it 1 and to 'default' if it is 0. */
+ * it 1 and to 'default' if it is 0.
+ */
#define SAA7115_IDQ_IS_DEFAULT (1 << 0)
/* s_crystal_freq values and flags */
@@ -64,5 +66,76 @@
#define SAA7115_FREQ_FL_APLL (1 << 2) /* SA 3A[3], APLL, SAA7114/5 only */
#define SAA7115_FREQ_FL_DOUBLE_ASCLK (1 << 3) /* SA 39, LRDIV, SAA7114/5 only */
+/* ===== SAA7113 Config enums ===== */
+
+/* Register 0x08 "Horizontal time constant" [Bit 3..4]:
+ * Should be set to "Fast Locking Mode" according to the datasheet,
+ * and that is the default setting in the gm7113c_init table.
+ * saa7113_init sets this value to "VTR Mode". */
+enum saa7113_r08_htc {
+ SAA7113_HTC_TV_MODE = 0x00,
+ SAA7113_HTC_VTR_MODE, /* Default for saa7113_init */
+ SAA7113_HTC_FAST_LOCKING_MODE = 0x03 /* Default for gm7113c_init */
+};
+
+/* Register 0x10 "Output format selection" [Bit 6..7]:
+ * Defaults to ITU_656 as specified in datasheet. */
+enum saa7113_r10_ofts {
+ SAA7113_OFTS_ITU_656 = 0x0, /* Default */
+ SAA7113_OFTS_VFLAG_BY_VREF,
+ SAA7113_OFTS_VFLAG_BY_DATA_TYPE
+};
+
+/*
+ * Register 0x12 "Output control" [Bit 0..3 Or Bit 4..7]:
+ * This is used to select what data is output on the RTS0 and RTS1 pins.
+ * RTS1 [Bit 4..7] Defaults to DOT_IN. (This value can not be set for RTS0)
+ * RTS0 [Bit 0..3] Defaults to VIPB in gm7113c_init as specified
+ * in the datasheet, but is set to HREF_HS in the saa7113_init table.
+ */
+enum saa7113_r12_rts {
+ SAA7113_RTS_DOT_IN = 0, /* OBS: Only for RTS1 (Default RTS1) */
+ SAA7113_RTS_VIPB, /* Default RTS0 For gm7113c_init */
+ SAA7113_RTS_GPSW,
+ SAA7115_RTS_HL,
+ SAA7113_RTS_VL,
+ SAA7113_RTS_DL,
+ SAA7113_RTS_PLIN,
+ SAA7113_RTS_HREF_HS, /* Default RTS0 For saa7113_init */
+ SAA7113_RTS_HS,
+ SAA7113_RTS_HQ,
+ SAA7113_RTS_ODD,
+ SAA7113_RTS_VS,
+ SAA7113_RTS_V123,
+ SAA7113_RTS_VGATE,
+ SAA7113_RTS_VREF,
+ SAA7113_RTS_FID
+};
+
+/**
+ * struct saa7115_platform_data - Allow overriding default initialization
+ *
+ * @saa7113_force_gm7113c_init: Force the use of the gm7113c_init table
+ * instead of saa7113_init table
+ * (saa7113 only)
+ * @saa7113_r08_htc: [R_08 - Bit 3..4]
+ * @saa7113_r10_vrln: [R_10 - Bit 3]
+ * default: Disabled for gm7113c_init
+ * Enabled for saa7113c_init
+ * @saa7113_r10_ofts: [R_10 - Bit 6..7]
+ * @saa7113_r12_rts0: [R_12 - Bit 0..3]
+ * @saa7113_r12_rts1: [R_12 - Bit 4..7]
+ * @saa7113_r13_adlsb: [R_13 - Bit 7] - default: disabled
+ */
+struct saa7115_platform_data {
+ bool saa7113_force_gm7113c_init;
+ enum saa7113_r08_htc *saa7113_r08_htc;
+ bool *saa7113_r10_vrln;
+ enum saa7113_r10_ofts *saa7113_r10_ofts;
+ enum saa7113_r12_rts *saa7113_r12_rts0;
+ enum saa7113_r12_rts *saa7113_r12_rts1;
+ bool *saa7113_r13_adlsb;
+};
+
#endif
diff --git a/include/media/smiapp.h b/include/media/smiapp.h
index 07f96a89e18..0b8f124a630 100644
--- a/include/media/smiapp.h
+++ b/include/media/smiapp.h
@@ -77,7 +77,6 @@ struct smiapp_platform_data {
struct smiapp_flash_strobe_parms *strobe_setup;
int (*set_xclk)(struct v4l2_subdev *sd, int hz);
- char *ext_clk_name;
int xshutdown; /* gpio or SMIAPP_NO_XSHUTDOWN */
};
diff --git a/include/sound/tea575x-tuner.h b/include/media/tea575x.h
index 098c4de4494..2d4fa59db90 100644
--- a/include/sound/tea575x-tuner.h
+++ b/include/media/tea575x.h
@@ -71,6 +71,7 @@ struct snd_tea575x {
int (*ext_init)(struct snd_tea575x *tea);
};
+int snd_tea575x_hw_init(struct snd_tea575x *tea);
int snd_tea575x_init(struct snd_tea575x *tea, struct module *owner);
void snd_tea575x_exit(struct snd_tea575x *tea);
void snd_tea575x_set_freq(struct snd_tea575x *tea);
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index 4a1191abd93..f7119ee3977 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -12,6 +12,8 @@ enum tveeprom_audio_processor {
TVEEPROM_AUDPROC_OTHER,
};
+#include <linux/if_ether.h>
+
struct tveeprom {
u32 has_radio;
/* If has_ir == 0, then it is unknown what the IR capabilities are,
@@ -40,7 +42,7 @@ struct tveeprom {
u32 revision;
u32 serial_number;
char rev_str[5];
- u8 MAC_address[6];
+ u8 MAC_address[ETH_ALEN];
};
void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index c3ec6ac75f7..768356917be 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -15,6 +15,7 @@
#include <linux/mutex.h>
struct device;
+struct device_node;
struct v4l2_device;
struct v4l2_subdev;
struct v4l2_async_notifier;
@@ -22,10 +23,11 @@ struct v4l2_async_notifier;
/* A random max subdevice number, used to allocate an array on stack */
#define V4L2_MAX_SUBDEVS 128U
-enum v4l2_async_bus_type {
- V4L2_ASYNC_BUS_CUSTOM,
- V4L2_ASYNC_BUS_PLATFORM,
- V4L2_ASYNC_BUS_I2C,
+enum v4l2_async_match_type {
+ V4L2_ASYNC_MATCH_CUSTOM,
+ V4L2_ASYNC_MATCH_DEVNAME,
+ V4L2_ASYNC_MATCH_I2C,
+ V4L2_ASYNC_MATCH_OF,
};
/**
@@ -36,11 +38,14 @@ enum v4l2_async_bus_type {
* probed, to a notifier->waiting list
*/
struct v4l2_async_subdev {
- enum v4l2_async_bus_type bus_type;
+ enum v4l2_async_match_type match_type;
union {
struct {
+ const struct device_node *node;
+ } of;
+ struct {
const char *name;
- } platform;
+ } device_name;
struct {
int adapter_id;
unsigned short address;
@@ -57,25 +62,12 @@ struct v4l2_async_subdev {
};
/**
- * v4l2_async_subdev_list - provided by subdevices
- * @list: links struct v4l2_async_subdev_list objects to a global list
- * before probing, and onto notifier->done after probing
- * @asd: pointer to respective struct v4l2_async_subdev
- * @notifier: pointer to managing notifier
- */
-struct v4l2_async_subdev_list {
- struct list_head list;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier *notifier;
-};
-
-/**
* v4l2_async_notifier - v4l2_device notifier data
* @num_subdevs:number of subdevices
- * @subdev: array of pointers to subdevice descriptors
+ * @subdevs: array of pointers to subdevice descriptors
* @v4l2_dev: pointer to struct v4l2_device
* @waiting: list of struct v4l2_async_subdev, waiting for their drivers
- * @done: list of struct v4l2_async_subdev_list, already probed
+ * @done: list of struct v4l2_subdev, already probed
* @list: member in a global list of notifiers
* @bound: a subdevice driver has successfully probed one of subdevices
* @complete: all subdevices have been probed successfully
@@ -83,7 +75,7 @@ struct v4l2_async_subdev_list {
*/
struct v4l2_async_notifier {
unsigned int num_subdevs;
- struct v4l2_async_subdev **subdev;
+ struct v4l2_async_subdev **subdevs;
struct v4l2_device *v4l2_dev;
struct list_head waiting;
struct list_head done;
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 015ff82da73..16550c43900 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -86,6 +86,7 @@ int v4l2_ctrl_check(struct v4l2_ext_control *ctrl, struct v4l2_queryctrl *qctrl,
const char * const *menu_items);
const char *v4l2_ctrl_get_name(u32 id);
const char * const *v4l2_ctrl_get_menu(u32 id);
+const s64 const *v4l2_ctrl_get_int_menu(u32 id, u32 *len);
int v4l2_ctrl_query_fill(struct v4l2_queryctrl *qctrl, s32 min, s32 max, s32 step, s32 def);
int v4l2_ctrl_query_menu(struct v4l2_querymenu *qmenu,
struct v4l2_queryctrl *qctrl, const char * const *menu_items);
@@ -201,19 +202,6 @@ const struct v4l2_frmsize_discrete *v4l2_find_nearest_format(
const struct v4l2_discrete_probe *probe,
s32 width, s32 height);
-bool v4l_match_dv_timings(const struct v4l2_dv_timings *t1,
- const struct v4l2_dv_timings *t2,
- unsigned pclock_delta);
-
-bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_dv_timings *fmt);
-
-bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
- u32 polarities, struct v4l2_fract aspect,
- struct v4l2_dv_timings *fmt);
-
-struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
-
void v4l2_get_timestamp(struct timeval *tv);
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 7343a27fe81..47ada23345a 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -22,6 +22,7 @@
#define _V4L2_CTRLS_H
#include <linux/list.h>
+#include <linux/mutex.h>
#include <linux/videodev2.h>
/* forward references */
diff --git a/include/media/v4l2-dv-timings.h b/include/media/v4l2-dv-timings.h
new file mode 100644
index 00000000000..4becc671639
--- /dev/null
+++ b/include/media/v4l2-dv-timings.h
@@ -0,0 +1,161 @@
+/*
+ * v4l2-dv-timings - Internal header with dv-timings helper functions
+ *
+ * Copyright 2013 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef __V4L2_DV_TIMINGS_H
+#define __V4L2_DV_TIMINGS_H
+
+#include <linux/videodev2.h>
+
+/** v4l2_dv_timings_presets: list of all dv_timings presets.
+ */
+extern const struct v4l2_dv_timings v4l2_dv_timings_presets[];
+
+/** v4l2_check_dv_timings_fnc - timings check callback
+ * @t: the v4l2_dv_timings struct.
+ * @handle: a handle from the driver.
+ *
+ * Returns true if the given timings are valid.
+ */
+typedef bool v4l2_check_dv_timings_fnc(const struct v4l2_dv_timings *t, void *handle);
+
+/** v4l2_valid_dv_timings() - are these timings valid?
+ * @t: the v4l2_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * Returns true if the given dv_timings struct is supported by the
+ * hardware capabilities and the callback function (if non-NULL), returns
+ * false otherwise.
+ */
+bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle);
+
+/** v4l2_enum_dv_timings_cap() - Helper function to enumerate possible DV timings based on capabilities
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @fnc: callback to check if this timing is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This enumerates dv_timings using the full list of possible CEA-861 and DMT
+ * timings, filtering out any timings that are not supported based on the
+ * hardware capabilities and the callback function (if non-NULL).
+ *
+ * If a valid timing for the given index is found, it will fill in @t and
+ * return 0, otherwise it returns -EINVAL.
+ */
+int v4l2_enum_dv_timings_cap(struct v4l2_enum_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle);
+
+/** v4l2_find_dv_timings_cap() - Find the closest timings struct
+ * @t: the v4l2_enum_dv_timings struct.
+ * @cap: the v4l2_dv_timings_cap capabilities.
+ * @pclock_delta: maximum delta between t->pixelclock and the timing struct
+ * under consideration.
+ * @fnc: callback to check if a given timings struct is OK. May be NULL.
+ * @fnc_handle: a handle that is passed on to @fnc.
+ *
+ * This function tries to map the given timings to an entry in the
+ * full list of possible CEA-861 and DMT timings, filtering out any timings
+ * that are not supported based on the hardware capabilities and the callback
+ * function (if non-NULL).
+ *
+ * On success it will fill in @t with the found timings and it returns true.
+ * On failure it will return false.
+ */
+bool v4l2_find_dv_timings_cap(struct v4l2_dv_timings *t,
+ const struct v4l2_dv_timings_cap *cap,
+ unsigned pclock_delta,
+ v4l2_check_dv_timings_fnc fnc,
+ void *fnc_handle);
+
+/** v4l2_match_dv_timings() - do two timings match?
+ * @measured: the measured timings data.
+ * @standard: the timings according to the standard.
+ * @pclock_delta: maximum delta in Hz between standard->pixelclock and
+ * the measured timings.
+ *
+ * Returns true if the two timings match, returns false otherwise.
+ */
+bool v4l2_match_dv_timings(const struct v4l2_dv_timings *measured,
+ const struct v4l2_dv_timings *standard,
+ unsigned pclock_delta);
+
+/** v4l2_print_dv_timings() - log the contents of a dv_timings struct
+ * @dev_prefix:device prefix for each log line.
+ * @prefix: additional prefix for each log line, may be NULL.
+ * @t: the timings data.
+ * @detailed: if true, give a detailed log.
+ */
+void v4l2_print_dv_timings(const char *dev_prefix, const char *prefix,
+ const struct v4l2_dv_timings *t, bool detailed);
+
+/** v4l2_detect_cvt - detect if the given timings follow the CVT standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid CVT format. If so, then it will return true, and fmt will be filled
+ * in with the found CVT timings.
+ */
+bool v4l2_detect_cvt(unsigned frame_height, unsigned hfreq, unsigned vsync,
+ u32 polarities, struct v4l2_dv_timings *fmt);
+
+/** v4l2_detect_gtf - detect if the given timings follow the GTF standard
+ * @frame_height - the total height of the frame (including blanking) in lines.
+ * @hfreq - the horizontal frequency in Hz.
+ * @vsync - the height of the vertical sync in lines.
+ * @polarities - the horizontal and vertical polarities (same as struct
+ * v4l2_bt_timings polarities).
+ * @aspect - preferred aspect ratio. GTF has no method of determining the
+ * aspect ratio in order to derive the image width from the
+ * image height, so it has to be passed explicitly. Usually
+ * the native screen aspect ratio is used for this. If it
+ * is not filled in correctly, then 16:9 will be assumed.
+ * @fmt - the resulting timings.
+ *
+ * This function will attempt to detect if the given values correspond to a
+ * valid GTF format. If so, then it will return true, and fmt will be filled
+ * in with the found GTF timings.
+ */
+bool v4l2_detect_gtf(unsigned frame_height, unsigned hfreq, unsigned vsync,
+ u32 polarities, struct v4l2_fract aspect,
+ struct v4l2_dv_timings *fmt);
+
+/** v4l2_calc_aspect_ratio - calculate the aspect ratio based on bytes
+ * 0x15 and 0x16 from the EDID.
+ * @hor_landscape - byte 0x15 from the EDID.
+ * @vert_portrait - byte 0x16 from the EDID.
+ *
+ * Determines the aspect ratio from the EDID.
+ * See VESA Enhanced EDID standard, release A, rev 2, section 3.6.2:
+ * "Horizontal and Vertical Screen Size or Aspect Ratio"
+ */
+struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait);
+
+#endif
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 83ae07e5335..395c4a95a42 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -40,6 +40,9 @@
#define V4L2_MBUS_FIELD_EVEN_HIGH (1 << 10)
/* FIELD = 1/0 - Field1 (odd)/Field2 (even) */
#define V4L2_MBUS_FIELD_EVEN_LOW (1 << 11)
+/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH (1 << 12)
+#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW (1 << 13)
/* Serial flags */
/* How many lanes the client can use */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0f4555b2a31..44542a20ab8 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -60,6 +60,7 @@ struct v4l2_m2m_queue_ctx {
struct list_head rdy_queue;
spinlock_t rdy_spinlock;
u8 num_rdy;
+ bool buffered;
};
struct v4l2_m2m_ctx {
@@ -134,6 +135,18 @@ struct v4l2_m2m_ctx *v4l2_m2m_ctx_init(struct v4l2_m2m_dev *m2m_dev,
void *drv_priv,
int (*queue_init)(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq));
+static inline void v4l2_m2m_set_src_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+ bool buffered)
+{
+ m2m_ctx->out_q_ctx.buffered = buffered;
+}
+
+static inline void v4l2_m2m_set_dst_buffered(struct v4l2_m2m_ctx *m2m_ctx,
+ bool buffered)
+{
+ m2m_ctx->cap_q_ctx.buffered = buffered;
+}
+
void v4l2_m2m_ctx_release(struct v4l2_m2m_ctx *m2m_ctx);
void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, struct vb2_buffer *vb);
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 3250cc5e792..bfda0fe9aeb 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -586,15 +586,14 @@ struct v4l2_subdev {
struct video_device *devnode;
/* pointer to the physical device, if any */
struct device *dev;
- struct v4l2_async_subdev_list asdl;
+ /* Links this subdev to a global subdev_list or @notifier->done list. */
+ struct list_head async_list;
+ /* Pointer to respective struct v4l2_async_subdev. */
+ struct v4l2_async_subdev *asd;
+ /* Pointer to the managing notifier. */
+ struct v4l2_async_notifier *notifier;
};
-static inline struct v4l2_subdev *v4l2_async_to_subdev(
- struct v4l2_async_subdev_list *asdl)
-{
- return container_of(asdl, struct v4l2_subdev, asdl);
-}
-
#define media_entity_to_v4l2_subdev(ent) \
container_of(ent, struct v4l2_subdev, entity)
#define vdev_to_v4l2_subdev(vdev) \
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index d88a098d1af..6781258d0b6 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -219,8 +219,9 @@ struct vb2_buffer {
* configured format and *num_buffers is the total number
* of buffers, that are being allocated. When called from
* VIDIOC_CREATE_BUFS, fmt != NULL and it describes the
- * target frame format. In this case *num_buffers are being
- * allocated additionally to q->num_buffers.
+ * target frame format (if the format isn't valid the
+ * callback must return -EINVAL). In this case *num_buffers
+ * are being allocated additionally to q->num_buffers.
* @wait_prepare: release any locks taken while calling vb2 functions;
* it is called before an ioctl needs to wait for a new
* buffer to arrive; required to avoid a deadlock in
@@ -236,8 +237,10 @@ struct vb2_buffer {
* @buf_prepare: called every time the buffer is queued from userspace
* and from the VIDIOC_PREPARE_BUF ioctl; drivers may
* perform any initialization required before each hardware
- * operation in this callback; if an error is returned, the
- * buffer will not be queued in driver; optional
+ * operation in this callback; drivers that support
+ * VIDIOC_CREATE_BUFS must also validate the buffer size;
+ * if an error is returned, the buffer will not be queued
+ * in driver; optional
* @buf_finish: called before every dequeue of the buffer back to
* userspace; drivers may perform any operations required
* before userspace accesses the buffer; optional
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c4..9a36d929711 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -40,8 +40,6 @@
* @close: member function to discard a connection on this transport
* @request: member function to issue a request to the transport
* @cancel: member function to cancel a request (if it hasn't been sent)
- * @cancelled: member function to notify that a cancelled request will not
- * not receive a reply
*
* This is the basic API for a transport module which is registered by the
* transport module with the 9P core network module and used by the client
@@ -60,7 +58,6 @@ struct p9_trans_module {
void (*close) (struct p9_client *);
int (*request) (struct p9_client *, struct p9_req_t *req);
int (*cancel) (struct p9_client *, struct p9_req_t *req);
- int (*cancelled)(struct p9_client *, struct p9_req_t *req);
int (*zc_request)(struct p9_client *, struct p9_req_t *,
char *, char *, int , int, int, int);
};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b8ffac7b6ba..9e90fdff470 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -82,36 +82,36 @@ struct tc_action_ops {
int (*walk)(struct sk_buff *, struct netlink_callback *, int, struct tc_action *);
};
-extern struct tcf_common *tcf_hash_lookup(u32 index,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo);
-extern int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a);
-extern u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
-extern int tcf_hash_search(struct tc_action *a, u32 index);
-extern struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
- int bind, struct tcf_hashinfo *hinfo);
-extern struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size,
- int bind, u32 *idx_gen,
- struct tcf_hashinfo *hinfo);
-extern void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_release(struct tcf_common *p, int bind,
+ struct tcf_hashinfo *hinfo);
+int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ int type, struct tc_action *a);
+u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo);
+int tcf_hash_search(struct tc_action *a, u32 index);
+struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
+ int bind, struct tcf_hashinfo *hinfo);
+struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
+ struct tc_action *a, int size,
+ int bind, u32 *idx_gen,
+ struct tcf_hashinfo *hinfo);
+void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-extern int tcf_register_action(struct tc_action_ops *a);
-extern int tcf_unregister_action(struct tc_action_ops *a);
-extern void tcf_action_destroy(struct tc_action *a, int bind);
-extern int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res);
-extern struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
-extern struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
- struct nlattr *est, char *n, int ovr,
- int bind);
-extern int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
-extern int tcf_action_copy_stats (struct sk_buff *,struct tc_action *, int);
+int tcf_register_action(struct tc_action_ops *a);
+int tcf_unregister_action(struct tc_action_ops *a);
+void tcf_action_destroy(struct tc_action *a, int bind);
+int tcf_action_exec(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res);
+struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind);
+struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
+ struct nlattr *est, char *n, int ovr,
+ int bind);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
#endif /* CONFIG_NET_CLS_ACT */
#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index c7b181cb47a..fb314de2b61 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -53,51 +53,36 @@ struct prefix_info {
#define IN6_ADDR_HSIZE_SHIFT 4
#define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
-extern int addrconf_init(void);
-extern void addrconf_cleanup(void);
+int addrconf_init(void);
+void addrconf_cleanup(void);
-extern int addrconf_add_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_del_ifaddr(struct net *net,
- void __user *arg);
-extern int addrconf_set_dstaddr(struct net *net,
- void __user *arg);
+int addrconf_add_ifaddr(struct net *net, void __user *arg);
+int addrconf_del_ifaddr(struct net *net, void __user *arg);
+int addrconf_set_dstaddr(struct net *net, void __user *arg);
-extern int ipv6_chk_addr(struct net *net,
- const struct in6_addr *addr,
- const struct net_device *dev,
- int strict);
+int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
+ const struct net_device *dev, int strict);
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
-extern int ipv6_chk_home_addr(struct net *net,
- const struct in6_addr *addr);
+int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr);
#endif
-extern int ipv6_chk_prefix(const struct in6_addr *addr,
- struct net_device *dev);
-
-extern struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
- const struct in6_addr *addr,
- struct net_device *dev,
- int strict);
-
-extern int ipv6_dev_get_saddr(struct net *net,
- const struct net_device *dev,
- const struct in6_addr *daddr,
- unsigned int srcprefs,
- struct in6_addr *saddr);
-extern int __ipv6_get_lladdr(struct inet6_dev *idev,
- struct in6_addr *addr,
- unsigned char banned_flags);
-extern int ipv6_get_lladdr(struct net_device *dev,
- struct in6_addr *addr,
- unsigned char banned_flags);
-extern int ipv6_rcv_saddr_equal(const struct sock *sk,
- const struct sock *sk2);
-extern void addrconf_join_solict(struct net_device *dev,
- const struct in6_addr *addr);
-extern void addrconf_leave_solict(struct inet6_dev *idev,
- const struct in6_addr *addr);
+int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev);
+
+struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net,
+ const struct in6_addr *addr,
+ struct net_device *dev, int strict);
+
+int ipv6_dev_get_saddr(struct net *net, const struct net_device *dev,
+ const struct in6_addr *daddr, unsigned int srcprefs,
+ struct in6_addr *saddr);
+int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
+ unsigned char banned_flags);
+int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
+ unsigned char banned_flags);
+int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2);
+void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
+void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
static inline unsigned long addrconf_timeout_fixup(u32 timeout,
unsigned int unit)
@@ -124,41 +109,58 @@ static inline int addrconf_finite_timeout(unsigned long timeout)
/*
* IPv6 Address Label subsystem (addrlabel.c)
*/
-extern int ipv6_addr_label_init(void);
-extern void ipv6_addr_label_cleanup(void);
-extern void ipv6_addr_label_rtnl_register(void);
-extern u32 ipv6_addr_label(struct net *net,
- const struct in6_addr *addr,
- int type, int ifindex);
+int ipv6_addr_label_init(void);
+void ipv6_addr_label_cleanup(void);
+void ipv6_addr_label_rtnl_register(void);
+u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
+ int type, int ifindex);
/*
* multicast prototypes (mcast.c)
*/
-extern int ipv6_sock_mc_join(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
- const struct in6_addr *addr);
-extern void ipv6_sock_mc_close(struct sock *sk);
-extern bool inet6_mc_check(struct sock *sk,
- const struct in6_addr *mc_addr,
- const struct in6_addr *src_addr);
-
-extern int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
-extern void ipv6_mc_up(struct inet6_dev *idev);
-extern void ipv6_mc_down(struct inet6_dev *idev);
-extern void ipv6_mc_unmap(struct inet6_dev *idev);
-extern void ipv6_mc_remap(struct inet6_dev *idev);
-extern void ipv6_mc_init_dev(struct inet6_dev *idev);
-extern void ipv6_mc_destroy_dev(struct inet6_dev *idev);
-extern void addrconf_dad_failure(struct inet6_ifaddr *ifp);
-
-extern bool ipv6_chk_mcast_addr(struct net_device *dev,
- const struct in6_addr *group,
- const struct in6_addr *src_addr);
-
-extern void ipv6_mc_dad_complete(struct inet6_dev *idev);
+int ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_mc_close(struct sock *sk);
+bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
+ const struct in6_addr *src_addr);
+
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+int ipv6_dev_mc_dec(struct net_device *dev, const struct in6_addr *addr);
+void ipv6_mc_up(struct inet6_dev *idev);
+void ipv6_mc_down(struct inet6_dev *idev);
+void ipv6_mc_unmap(struct inet6_dev *idev);
+void ipv6_mc_remap(struct inet6_dev *idev);
+void ipv6_mc_init_dev(struct inet6_dev *idev);
+void ipv6_mc_destroy_dev(struct inet6_dev *idev);
+void addrconf_dad_failure(struct inet6_ifaddr *ifp);
+
+bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
+ const struct in6_addr *src_addr);
+
+void ipv6_mc_dad_complete(struct inet6_dev *idev);
+
+/* A stub used by vxlan module. This is ugly, ideally these
+ * symbols should be built into the core kernel.
+ */
+struct ipv6_stub {
+ int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+ int (*ipv6_dst_lookup)(struct sock *sk, struct dst_entry **dst,
+ struct flowi6 *fl6);
+ void (*udpv6_encap_enable)(void);
+ void (*ndisc_send_na)(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt);
+ struct neigh_table *nd_tbl;
+};
+extern const struct ipv6_stub *ipv6_stub __read_mostly;
+
/*
* identify MLD packets for MLD filter exceptions
*/
@@ -184,29 +186,31 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
return false;
}
-extern void addrconf_prefix_rcv(struct net_device *dev,
- u8 *opt, int len, bool sllao);
+void addrconf_prefix_rcv(struct net_device *dev,
+ u8 *opt, int len, bool sllao);
/*
* anycast prototypes (anycast.c)
*/
-extern int ipv6_sock_ac_join(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern int ipv6_sock_ac_drop(struct sock *sk,int ifindex, const struct in6_addr *addr);
-extern void ipv6_sock_ac_close(struct sock *sk);
-
-extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
-extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
-extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
+int ipv6_sock_ac_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+int ipv6_sock_ac_drop(struct sock *sk, int ifindex,
+ const struct in6_addr *addr);
+void ipv6_sock_ac_close(struct sock *sk);
+
+int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
+int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
+bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
const struct in6_addr *addr);
/* Device notifier */
-extern int register_inet6addr_notifier(struct notifier_block *nb);
-extern int unregister_inet6addr_notifier(struct notifier_block *nb);
-extern int inet6addr_notifier_call_chain(unsigned long val, void *v);
+int register_inet6addr_notifier(struct notifier_block *nb);
+int unregister_inet6addr_notifier(struct notifier_block *nb);
+int inet6addr_notifier_call_chain(unsigned long val, void *v);
-extern void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv6_devconf *devconf);
+void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
+ struct ipv6_devconf *devconf);
/**
* __in6_dev_get - get inet6_dev pointer from netdevice
@@ -240,7 +244,7 @@ static inline struct inet6_dev *in6_dev_get(const struct net_device *dev)
return idev;
}
-extern void in6_dev_finish_destroy(struct inet6_dev *idev);
+void in6_dev_finish_destroy(struct inet6_dev *idev);
static inline void in6_dev_put(struct inet6_dev *idev)
{
@@ -258,7 +262,7 @@ static inline void in6_dev_hold(struct inet6_dev *idev)
atomic_inc(&idev->refcnt);
}
-extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
+void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp);
static inline void in6_ifa_put(struct inet6_ifaddr *ifp)
{
@@ -340,8 +344,8 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
}
#ifdef CONFIG_PROC_FS
-extern int if6_proc_init(void);
-extern void if6_proc_exit(void);
+int if6_proc_init(void);
+void if6_proc_exit(void);
#endif
#endif
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 03e6e945362..e797d45a5ae 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -31,24 +31,21 @@ enum {
typedef void (*rxrpc_interceptor_t)(struct sock *, unsigned long,
struct sk_buff *);
-extern void rxrpc_kernel_intercept_rx_messages(struct socket *,
- rxrpc_interceptor_t);
-extern struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
- struct sockaddr_rxrpc *,
- struct key *,
- unsigned long,
- gfp_t);
-extern int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *,
- size_t);
-extern void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
-extern void rxrpc_kernel_end_call(struct rxrpc_call *);
-extern bool rxrpc_kernel_is_data_last(struct sk_buff *);
-extern u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
-extern int rxrpc_kernel_get_error_number(struct sk_buff *);
-extern void rxrpc_kernel_data_delivered(struct sk_buff *);
-extern void rxrpc_kernel_free_skb(struct sk_buff *);
-extern struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *,
- unsigned long);
-extern int rxrpc_kernel_reject_call(struct socket *);
+void rxrpc_kernel_intercept_rx_messages(struct socket *, rxrpc_interceptor_t);
+struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
+ struct sockaddr_rxrpc *,
+ struct key *,
+ unsigned long,
+ gfp_t);
+int rxrpc_kernel_send_data(struct rxrpc_call *, struct msghdr *, size_t);
+void rxrpc_kernel_abort_call(struct rxrpc_call *, u32);
+void rxrpc_kernel_end_call(struct rxrpc_call *);
+bool rxrpc_kernel_is_data_last(struct sk_buff *);
+u32 rxrpc_kernel_get_abort_code(struct sk_buff *);
+int rxrpc_kernel_get_error_number(struct sk_buff *);
+void rxrpc_kernel_data_delivered(struct sk_buff *);
+void rxrpc_kernel_free_skb(struct sk_buff *);
+struct rxrpc_call *rxrpc_kernel_accept_call(struct socket *, unsigned long);
+int rxrpc_kernel_reject_call(struct socket *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index dbdfd2b0f3b..a175ba4a7ad 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,12 +6,12 @@
#include <linux/mutex.h>
#include <net/sock.h>
-extern void unix_inflight(struct file *fp);
-extern void unix_notinflight(struct file *fp);
-extern void unix_gc(void);
-extern void wait_for_unix_gc(void);
-extern struct sock *unix_get_socket(struct file *filp);
-extern struct sock *unix_peer_get(struct sock *);
+void unix_inflight(struct file *fp);
+void unix_notinflight(struct file *fp);
+void unix_gc(void);
+void wait_for_unix_gc(void);
+struct sock *unix_get_socket(struct file *filp);
+struct sock *unix_peer_get(struct sock *);
#define UNIX_HASH_SIZE 256
#define UNIX_HASH_BITS 8
@@ -35,6 +35,7 @@ struct unix_skb_parms {
#ifdef CONFIG_SECURITY_NETWORK
u32 secid; /* Security ID */
#endif
+ u32 consumed;
};
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
@@ -71,8 +72,8 @@ long unix_inq_len(struct sock *sk);
long unix_outq_len(struct sock *sk);
#ifdef CONFIG_SYSCTL
-extern int unix_sysctl_register(struct net *net);
-extern void unix_sysctl_unregister(struct net *net);
+int unix_sysctl_register(struct net *net);
+void unix_sysctl_unregister(struct net *net);
#else
static inline int unix_sysctl_register(struct net *net) { return 0; }
static inline void unix_sysctl_unregister(struct net *net) {}
diff --git a/net/vmw_vsock/af_vsock.h b/include/net/af_vsock.h
index 7d64d3609ec..7d64d3609ec 100644
--- a/net/vmw_vsock/af_vsock.h
+++ b/include/net/af_vsock.h
diff --git a/include/net/arp.h b/include/net/arp.h
index b630dae0341..7509d9da4e3 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -46,22 +46,22 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
return n;
}
-extern void arp_init(void);
-extern int arp_find(unsigned char *haddr, struct sk_buff *skb);
-extern int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
-extern void arp_send(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw, const unsigned char *th);
-extern int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
-extern void arp_ifdown(struct net_device *dev);
+void arp_init(void);
+int arp_find(unsigned char *haddr, struct sk_buff *skb);
+int arp_ioctl(struct net *net, unsigned int cmd, void __user *arg);
+void arp_send(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw, const unsigned char *th);
+int arp_mc_map(__be32 addr, u8 *haddr, struct net_device *dev, int dir);
+void arp_ifdown(struct net_device *dev);
-extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
- struct net_device *dev, __be32 src_ip,
- const unsigned char *dest_hw,
- const unsigned char *src_hw,
- const unsigned char *target_hw);
-extern void arp_xmit(struct sk_buff *skb);
+struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
+ struct net_device *dev, __be32 src_ip,
+ const unsigned char *dest_hw,
+ const unsigned char *src_hw,
+ const unsigned char *target_hw);
+void arp_xmit(struct sk_buff *skb);
int arp_invalidate(struct net_device *dev, __be32 ip);
#endif /* _ARP_H */
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 89ed9ac5701..bf0396e9a5d 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -195,7 +195,7 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
atomic_inc(&ax25_rt->refcount);
}
-extern void __ax25_put_route(ax25_route *ax25_rt);
+void __ax25_put_route(ax25_route *ax25_rt);
static inline void ax25_put_route(ax25_route *ax25_rt)
{
@@ -272,30 +272,31 @@ static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev
/* af_ax25.c */
extern struct hlist_head ax25_list;
extern spinlock_t ax25_list_lock;
-extern void ax25_cb_add(ax25_cb *);
+void ax25_cb_add(ax25_cb *);
struct sock *ax25_find_listener(ax25_address *, int, struct net_device *, int);
struct sock *ax25_get_socket(ax25_address *, ax25_address *, int);
-extern ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
-extern void ax25_destroy_socket(ax25_cb *);
-extern ax25_cb * __must_check ax25_create_cb(void);
-extern void ax25_fillin_cb(ax25_cb *, ax25_dev *);
-extern struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
+ax25_cb *ax25_find_cb(ax25_address *, ax25_address *, ax25_digi *,
+ struct net_device *);
+void ax25_send_to_raw(ax25_address *, struct sk_buff *, int);
+void ax25_destroy_socket(ax25_cb *);
+ax25_cb * __must_check ax25_create_cb(void);
+void ax25_fillin_cb(ax25_cb *, ax25_dev *);
+struct sock *ax25_make_new(struct sock *, struct ax25_dev *);
/* ax25_addr.c */
extern const ax25_address ax25_bcast;
extern const ax25_address ax25_defaddr;
extern const ax25_address null_ax25_address;
-extern char *ax2asc(char *buf, const ax25_address *);
-extern void asc2ax(ax25_address *addr, const char *callsign);
-extern int ax25cmp(const ax25_address *, const ax25_address *);
-extern int ax25digicmp(const ax25_digi *, const ax25_digi *);
-extern const unsigned char *ax25_addr_parse(const unsigned char *, int,
+char *ax2asc(char *buf, const ax25_address *);
+void asc2ax(ax25_address *addr, const char *callsign);
+int ax25cmp(const ax25_address *, const ax25_address *);
+int ax25digicmp(const ax25_digi *, const ax25_digi *);
+const unsigned char *ax25_addr_parse(const unsigned char *, int,
ax25_address *, ax25_address *, ax25_digi *, int *, int *);
-extern int ax25_addr_build(unsigned char *, const ax25_address *,
- const ax25_address *, const ax25_digi *, int, int);
-extern int ax25_addr_size(const ax25_digi *);
-extern void ax25_digi_invert(const ax25_digi *, ax25_digi *);
+int ax25_addr_build(unsigned char *, const ax25_address *,
+ const ax25_address *, const ax25_digi *, int, int);
+int ax25_addr_size(const ax25_digi *);
+void ax25_digi_invert(const ax25_digi *, ax25_digi *);
/* ax25_dev.c */
extern ax25_dev *ax25_dev_list;
@@ -306,33 +307,33 @@ static inline ax25_dev *ax25_dev_ax25dev(struct net_device *dev)
return dev->ax25_ptr;
}
-extern ax25_dev *ax25_addr_ax25dev(ax25_address *);
-extern void ax25_dev_device_up(struct net_device *);
-extern void ax25_dev_device_down(struct net_device *);
-extern int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
-extern struct net_device *ax25_fwd_dev(struct net_device *);
-extern void ax25_dev_free(void);
+ax25_dev *ax25_addr_ax25dev(ax25_address *);
+void ax25_dev_device_up(struct net_device *);
+void ax25_dev_device_down(struct net_device *);
+int ax25_fwd_ioctl(unsigned int, struct ax25_fwd_struct *);
+struct net_device *ax25_fwd_dev(struct net_device *);
+void ax25_dev_free(void);
/* ax25_ds_in.c */
-extern int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_ds_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_ds_subr.c */
-extern void ax25_ds_nr_error_recovery(ax25_cb *);
-extern void ax25_ds_enquiry_response(ax25_cb *);
-extern void ax25_ds_establish_data_link(ax25_cb *);
-extern void ax25_dev_dama_off(ax25_dev *);
-extern void ax25_dama_on(ax25_cb *);
-extern void ax25_dama_off(ax25_cb *);
+void ax25_ds_nr_error_recovery(ax25_cb *);
+void ax25_ds_enquiry_response(ax25_cb *);
+void ax25_ds_establish_data_link(ax25_cb *);
+void ax25_dev_dama_off(ax25_dev *);
+void ax25_dama_on(ax25_cb *);
+void ax25_dama_off(ax25_cb *);
/* ax25_ds_timer.c */
-extern void ax25_ds_setup_timer(ax25_dev *);
-extern void ax25_ds_set_timer(ax25_dev *);
-extern void ax25_ds_del_timer(ax25_dev *);
-extern void ax25_ds_timer(ax25_cb *);
-extern void ax25_ds_t1_timeout(ax25_cb *);
-extern void ax25_ds_heartbeat_expiry(ax25_cb *);
-extern void ax25_ds_t3timer_expiry(ax25_cb *);
-extern void ax25_ds_idletimer_expiry(ax25_cb *);
+void ax25_ds_setup_timer(ax25_dev *);
+void ax25_ds_set_timer(ax25_dev *);
+void ax25_ds_del_timer(ax25_dev *);
+void ax25_ds_timer(ax25_cb *);
+void ax25_ds_t1_timeout(ax25_cb *);
+void ax25_ds_heartbeat_expiry(ax25_cb *);
+void ax25_ds_t3timer_expiry(ax25_cb *);
+void ax25_ds_idletimer_expiry(ax25_cb *);
/* ax25_iface.c */
@@ -342,107 +343,109 @@ struct ax25_protocol {
int (*func)(struct sk_buff *, ax25_cb *);
};
-extern void ax25_register_pid(struct ax25_protocol *ap);
-extern void ax25_protocol_release(unsigned int);
+void ax25_register_pid(struct ax25_protocol *ap);
+void ax25_protocol_release(unsigned int);
struct ax25_linkfail {
struct hlist_node lf_node;
void (*func)(ax25_cb *, int);
};
-extern void ax25_linkfail_register(struct ax25_linkfail *lf);
-extern void ax25_linkfail_release(struct ax25_linkfail *lf);
-extern int __must_check ax25_listen_register(ax25_address *,
- struct net_device *);
-extern void ax25_listen_release(ax25_address *, struct net_device *);
-extern int (*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
-extern int ax25_listen_mine(ax25_address *, struct net_device *);
-extern void ax25_link_failed(ax25_cb *, int);
-extern int ax25_protocol_is_registered(unsigned int);
+void ax25_linkfail_register(struct ax25_linkfail *lf);
+void ax25_linkfail_release(struct ax25_linkfail *lf);
+int __must_check ax25_listen_register(ax25_address *, struct net_device *);
+void ax25_listen_release(ax25_address *, struct net_device *);
+int(*ax25_protocol_function(unsigned int))(struct sk_buff *, ax25_cb *);
+int ax25_listen_mine(ax25_address *, struct net_device *);
+void ax25_link_failed(ax25_cb *, int);
+int ax25_protocol_is_registered(unsigned int);
/* ax25_in.c */
-extern int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
-extern int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
+int ax25_rx_iframe(ax25_cb *, struct sk_buff *);
+int ax25_kiss_rcv(struct sk_buff *, struct net_device *, struct packet_type *,
+ struct net_device *);
/* ax25_ip.c */
-extern int ax25_hard_header(struct sk_buff *, struct net_device *,
- unsigned short, const void *,
- const void *, unsigned int);
-extern int ax25_rebuild_header(struct sk_buff *);
+int ax25_hard_header(struct sk_buff *, struct net_device *, unsigned short,
+ const void *, const void *, unsigned int);
+int ax25_rebuild_header(struct sk_buff *);
extern const struct header_ops ax25_header_ops;
/* ax25_out.c */
-extern ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *, ax25_digi *, struct net_device *);
-extern void ax25_output(ax25_cb *, int, struct sk_buff *);
-extern void ax25_kick(ax25_cb *);
-extern void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
-extern void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
-extern int ax25_check_iframes_acked(ax25_cb *, unsigned short);
+ax25_cb *ax25_send_frame(struct sk_buff *, int, ax25_address *, ax25_address *,
+ ax25_digi *, struct net_device *);
+void ax25_output(ax25_cb *, int, struct sk_buff *);
+void ax25_kick(ax25_cb *);
+void ax25_transmit_buffer(ax25_cb *, struct sk_buff *, int);
+void ax25_queue_xmit(struct sk_buff *skb, struct net_device *dev);
+int ax25_check_iframes_acked(ax25_cb *, unsigned short);
/* ax25_route.c */
-extern void ax25_rt_device_down(struct net_device *);
-extern int ax25_rt_ioctl(unsigned int, void __user *);
+void ax25_rt_device_down(struct net_device *);
+int ax25_rt_ioctl(unsigned int, void __user *);
extern const struct file_operations ax25_route_fops;
-extern ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
-extern int ax25_rt_autobind(ax25_cb *, ax25_address *);
-extern struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_rt_free(void);
+ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev);
+int ax25_rt_autobind(ax25_cb *, ax25_address *);
+struct sk_buff *ax25_rt_build_path(struct sk_buff *, ax25_address *,
+ ax25_address *, ax25_digi *);
+void ax25_rt_free(void);
/* ax25_std_in.c */
-extern int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
+int ax25_std_frame_in(ax25_cb *, struct sk_buff *, int);
/* ax25_std_subr.c */
-extern void ax25_std_nr_error_recovery(ax25_cb *);
-extern void ax25_std_establish_data_link(ax25_cb *);
-extern void ax25_std_transmit_enquiry(ax25_cb *);
-extern void ax25_std_enquiry_response(ax25_cb *);
-extern void ax25_std_timeout_response(ax25_cb *);
+void ax25_std_nr_error_recovery(ax25_cb *);
+void ax25_std_establish_data_link(ax25_cb *);
+void ax25_std_transmit_enquiry(ax25_cb *);
+void ax25_std_enquiry_response(ax25_cb *);
+void ax25_std_timeout_response(ax25_cb *);
/* ax25_std_timer.c */
-extern void ax25_std_heartbeat_expiry(ax25_cb *);
-extern void ax25_std_t1timer_expiry(ax25_cb *);
-extern void ax25_std_t2timer_expiry(ax25_cb *);
-extern void ax25_std_t3timer_expiry(ax25_cb *);
-extern void ax25_std_idletimer_expiry(ax25_cb *);
+void ax25_std_heartbeat_expiry(ax25_cb *);
+void ax25_std_t1timer_expiry(ax25_cb *);
+void ax25_std_t2timer_expiry(ax25_cb *);
+void ax25_std_t3timer_expiry(ax25_cb *);
+void ax25_std_idletimer_expiry(ax25_cb *);
/* ax25_subr.c */
-extern void ax25_clear_queues(ax25_cb *);
-extern void ax25_frames_acked(ax25_cb *, unsigned short);
-extern void ax25_requeue_frames(ax25_cb *);
-extern int ax25_validate_nr(ax25_cb *, unsigned short);
-extern int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
-extern void ax25_send_control(ax25_cb *, int, int, int);
-extern void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *, ax25_digi *);
-extern void ax25_calculate_t1(ax25_cb *);
-extern void ax25_calculate_rtt(ax25_cb *);
-extern void ax25_disconnect(ax25_cb *, int);
+void ax25_clear_queues(ax25_cb *);
+void ax25_frames_acked(ax25_cb *, unsigned short);
+void ax25_requeue_frames(ax25_cb *);
+int ax25_validate_nr(ax25_cb *, unsigned short);
+int ax25_decode(ax25_cb *, struct sk_buff *, int *, int *, int *);
+void ax25_send_control(ax25_cb *, int, int, int);
+void ax25_return_dm(struct net_device *, ax25_address *, ax25_address *,
+ ax25_digi *);
+void ax25_calculate_t1(ax25_cb *);
+void ax25_calculate_rtt(ax25_cb *);
+void ax25_disconnect(ax25_cb *, int);
/* ax25_timer.c */
-extern void ax25_setup_timers(ax25_cb *);
-extern void ax25_start_heartbeat(ax25_cb *);
-extern void ax25_start_t1timer(ax25_cb *);
-extern void ax25_start_t2timer(ax25_cb *);
-extern void ax25_start_t3timer(ax25_cb *);
-extern void ax25_start_idletimer(ax25_cb *);
-extern void ax25_stop_heartbeat(ax25_cb *);
-extern void ax25_stop_t1timer(ax25_cb *);
-extern void ax25_stop_t2timer(ax25_cb *);
-extern void ax25_stop_t3timer(ax25_cb *);
-extern void ax25_stop_idletimer(ax25_cb *);
-extern int ax25_t1timer_running(ax25_cb *);
-extern unsigned long ax25_display_timer(struct timer_list *);
+void ax25_setup_timers(ax25_cb *);
+void ax25_start_heartbeat(ax25_cb *);
+void ax25_start_t1timer(ax25_cb *);
+void ax25_start_t2timer(ax25_cb *);
+void ax25_start_t3timer(ax25_cb *);
+void ax25_start_idletimer(ax25_cb *);
+void ax25_stop_heartbeat(ax25_cb *);
+void ax25_stop_t1timer(ax25_cb *);
+void ax25_stop_t2timer(ax25_cb *);
+void ax25_stop_t3timer(ax25_cb *);
+void ax25_stop_idletimer(ax25_cb *);
+int ax25_t1timer_running(ax25_cb *);
+unsigned long ax25_display_timer(struct timer_list *);
/* ax25_uid.c */
extern int ax25_uid_policy;
-extern ax25_uid_assoc *ax25_findbyuid(kuid_t);
-extern int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
+ax25_uid_assoc *ax25_findbyuid(kuid_t);
+int __must_check ax25_uid_ioctl(int, struct sockaddr_ax25 *);
extern const struct file_operations ax25_uid_fops;
-extern void ax25_uid_free(void);
+void ax25_uid_free(void);
/* sysctl_net_ax25.c */
#ifdef CONFIG_SYSCTL
-extern int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
-extern void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
+int ax25_register_dev_sysctl(ax25_dev *ax25_dev);
+void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev);
#else
static inline int ax25_register_dev_sysctl(ax25_dev *ax25_dev) { return 0; }
static inline void ax25_unregister_dev_sysctl(ax25_dev *ax25_dev) {}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 10eb9b38901..10d43d8c703 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -107,6 +107,14 @@ struct bt_power {
*/
#define BT_CHANNEL_POLICY_AMP_PREFERRED 2
+#define BT_VOICE 11
+struct bt_voice {
+ __u16 setting;
+};
+
+#define BT_VOICE_TRANSPARENT 0x0003
+#define BT_VOICE_CVSD_16BIT 0x0060
+
__printf(1, 2)
int bt_info(const char *fmt, ...);
__printf(1, 2)
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 3c592cf473d..aaeaf0938ec 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -238,6 +238,7 @@ enum {
#define LMP_CVSD 0x01
#define LMP_PSCHEME 0x02
#define LMP_PCONTROL 0x04
+#define LMP_TRANSPARENT 0x08
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -296,6 +297,12 @@ enum {
#define HCI_AT_GENERAL_BONDING 0x04
#define HCI_AT_GENERAL_BONDING_MITM 0x05
+/* I/O capabilities */
+#define HCI_IO_DISPLAY_ONLY 0x00
+#define HCI_IO_DISPLAY_YESNO 0x01
+#define HCI_IO_KEYBOARD_ONLY 0x02
+#define HCI_IO_NO_INPUT_OUTPUT 0x03
+
/* Link Key types */
#define HCI_LK_COMBINATION 0x00
#define HCI_LK_LOCAL_UNIT 0x01
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index f77885ea78c..3ede820d328 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -320,6 +320,7 @@ struct hci_conn {
__u32 passkey_notify;
__u8 passkey_entered;
__u16 disc_timeout;
+ __u16 setting;
unsigned long flags;
__u8 remote_cap;
@@ -569,7 +570,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
}
void hci_disconnect(struct hci_conn *conn, __u8 reason);
-void hci_setup_sync(struct hci_conn *conn, __u16 handle);
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle);
void hci_sco_setup(struct hci_conn *conn, __u8 status);
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst);
@@ -584,6 +585,8 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle);
struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
__u8 dst_type, __u8 sec_level, __u8 auth_type);
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting);
int hci_conn_check_link_mode(struct hci_conn *conn);
int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level);
int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type);
@@ -797,6 +800,7 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_lsto_capable(dev) ((dev)->features[0][7] & LMP_LSTO)
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
+#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
/* ----- Extended LMP capabilities ----- */
#define lmp_host_ssp_capable(dev) ((dev)->features[1][0] & LMP_HOST_SSP)
@@ -1213,4 +1217,8 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
u8 bdaddr_to_le(u8 bdaddr_type);
+#define SCO_AIRMODE_MASK 0x0003
+#define SCO_AIRMODE_CVSD 0x0000
+#define SCO_AIRMODE_TRANSP 0x0003
+
#endif /* __HCI_CORE_H */
diff --git a/include/net/bluetooth/sco.h b/include/net/bluetooth/sco.h
index 1e35c43657c..e252a31ee6b 100644
--- a/include/net/bluetooth/sco.h
+++ b/include/net/bluetooth/sco.h
@@ -73,6 +73,7 @@ struct sco_conn {
struct sco_pinfo {
struct bt_sock bt;
__u32 flags;
+ __u16 setting;
struct sco_conn *conn;
};
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index a14339c2985..829627d7b84 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -27,7 +27,7 @@
#include <linux/netdevice.h>
#include <net/ip.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly;
@@ -122,7 +122,8 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
if (rc > 0)
/* local bh are disabled so it is ok to use _BH */
NET_ADD_STATS_BH(sock_net(sk),
- LINUX_MIB_LOWLATENCYRXPACKETS, rc);
+ LINUX_MIB_BUSYPOLLRXPACKETS, rc);
+ cpu_relax();
} while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) &&
!need_resched() && !busy_loop_timeout(end_time));
@@ -146,7 +147,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
sk->sk_napi_id = skb->napi_id;
}
-#else /* CONFIG_NET_LL_RX_POLL */
+#else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void)
{
return 0;
@@ -162,11 +163,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
return false;
}
-static inline bool sk_busy_poll(struct sock *sk, int nonblock)
-{
- return false;
-}
-
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
{
@@ -181,5 +177,10 @@ static inline bool busy_loop_timeout(unsigned long end_time)
return true;
}
-#endif /* CONFIG_NET_LL_RX_POLL */
+static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+{
+ return false;
+}
+
+#endif /* CONFIG_NET_RX_BUSY_POLL */
#endif /* _LINUX_NET_BUSY_POLL_H */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 7b0730aeb89..cb710913d5c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -461,6 +461,33 @@ ieee80211_chandef_rate_flags(struct cfg80211_chan_def *chandef)
}
/**
+ * ieee80211_chandef_max_power - maximum transmission power for the chandef
+ *
+ * In some regulations, the transmit power may depend on the configured channel
+ * bandwidth which may be defined as dBm/MHz. This function returns the actual
+ * max_power for non-standard (20 MHz) channels.
+ *
+ * @chandef: channel definition for the channel
+ *
+ * Returns: maximum allowed transmission power in dBm for the chandef
+ */
+static inline int
+ieee80211_chandef_max_power(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return min(chandef->chan->max_reg_power - 6,
+ chandef->chan->max_power);
+ case NL80211_CHAN_WIDTH_10:
+ return min(chandef->chan->max_reg_power - 3,
+ chandef->chan->max_power);
+ default:
+ break;
+ }
+ return chandef->chan->max_power;
+}
+
+/**
* enum survey_info_flags - survey information flags
*
* @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in
@@ -490,7 +517,7 @@ enum survey_info_flags {
* @channel: the channel this survey record reports, mandatory
* @filled: bitflag of flags from &enum survey_info_flags
* @noise: channel noise in dBm. This and all following fields are
- * optional
+ * optional
* @channel_time: amount of time in ms the radio spent on the channel
* @channel_time_busy: amount of time the primary channel was sensed busy
* @channel_time_ext_busy: amount of time the extension channel was sensed busy
@@ -546,9 +573,9 @@ struct cfg80211_crypto_settings {
/**
* struct cfg80211_beacon_data - beacon data
* @head: head portion of beacon (before TIM IE)
- * or %NULL if not changed
+ * or %NULL if not changed
* @tail: tail portion of beacon (after TIM IE)
- * or %NULL if not changed
+ * or %NULL if not changed
* @head_len: length of @head
* @tail_len: length of @tail
* @beacon_ies: extra information element(s) to add into Beacon frames or %NULL
@@ -639,6 +666,30 @@ struct cfg80211_ap_settings {
};
/**
+ * struct cfg80211_csa_settings - channel switch settings
+ *
+ * Used for channel switch
+ *
+ * @chandef: defines the channel to use after the switch
+ * @beacon_csa: beacon data while performing the switch
+ * @counter_offset_beacon: offset for the counter within the beacon (tail)
+ * @counter_offset_presp: offset for the counter within the probe response
+ * @beacon_after: beacon data to be used on the new channel
+ * @radar_required: whether radar detection is required on the new channel
+ * @block_tx: whether transmissions should be blocked while changing
+ * @count: number of beacons until switch
+ */
+struct cfg80211_csa_settings {
+ struct cfg80211_chan_def chandef;
+ struct cfg80211_beacon_data beacon_csa;
+ u16 counter_offset_beacon, counter_offset_presp;
+ struct cfg80211_beacon_data beacon_after;
+ bool radar_required;
+ bool block_tx;
+ u8 count;
+};
+
+/**
* enum station_parameters_apply_mask - station parameter values to apply
* @STATION_PARAM_APPLY_UAPSD: apply new uAPSD parameters (uapsd_queues, max_sp)
* @STATION_PARAM_APPLY_CAPABILITY: apply new capability
@@ -764,7 +815,7 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @STATION_INFO_PLINK_STATE: @plink_state filled
* @STATION_INFO_SIGNAL: @signal filled
* @STATION_INFO_TX_BITRATE: @txrate fields are filled
- * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
+ * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs)
* @STATION_INFO_RX_PACKETS: @rx_packets filled with 32-bit value
* @STATION_INFO_TX_PACKETS: @tx_packets filled with 32-bit value
* @STATION_INFO_TX_RETRIES: @tx_retries filled
@@ -1285,6 +1336,7 @@ struct cfg80211_ssid {
* @n_ssids: number of SSIDs
* @channels: channels to scan on.
* @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
* @flags: bit field of flags controlling operation
@@ -1300,6 +1352,7 @@ struct cfg80211_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
const u8 *ie;
size_t ie_len;
u32 flags;
@@ -1333,6 +1386,7 @@ struct cfg80211_match_set {
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
+ * @scan_width: channel width for scanning
* @interval: interval between each scheduled scan cycle
* @ie: optional information element(s) to add into Probe Request or %NULL
* @ie_len: length of ie in octets
@@ -1352,6 +1406,7 @@ struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
+ enum nl80211_bss_scan_width scan_width;
u32 interval;
const u8 *ie;
size_t ie_len;
@@ -1403,6 +1458,7 @@ struct cfg80211_bss_ies {
* for use in scan results and similar.
*
* @channel: channel this BSS is on
+ * @scan_width: width of the control channel
* @bssid: BSSID of the BSS
* @beacon_interval: the beacon interval as from the frame
* @capability: the capability field in host byte order
@@ -1424,6 +1480,7 @@ struct cfg80211_bss_ies {
*/
struct cfg80211_bss {
struct ieee80211_channel *channel;
+ enum nl80211_bss_scan_width scan_width;
const struct cfg80211_bss_ies __rcu *ies;
const struct cfg80211_bss_ies __rcu *beacon_ies;
@@ -1509,7 +1566,7 @@ enum cfg80211_assoc_req_flags {
* @prev_bssid: previous BSSID, if not %NULL use reassociate frame
* @flags: See &enum cfg80211_assoc_req_flags
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
- * will be used in ht_capa. Un-supported values will be ignored.
+ * will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT capability override
* @vht_capa_mask: VHT capability mask indicating which fields to use
@@ -1592,6 +1649,9 @@ struct cfg80211_disassoc_request {
* user space. Otherwise, port is marked authorized by default.
* @basic_rates: bitmap of basic rates to use when creating the IBSS
* @mcast_rate: per-band multicast rate index + 1 (0: disabled)
+ * @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
+ * will be used in ht_capa. Un-supported values will be ignored.
+ * @ht_capa_mask: The bits of ht_capa which are to be used.
*/
struct cfg80211_ibss_params {
u8 *ssid;
@@ -1605,6 +1665,8 @@ struct cfg80211_ibss_params {
bool privacy;
bool control_port;
int mcast_rate[IEEE80211_NUM_BANDS];
+ struct ieee80211_ht_cap ht_capa;
+ struct ieee80211_ht_cap ht_capa_mask;
};
/**
@@ -1630,9 +1692,9 @@ struct cfg80211_ibss_params {
* @key: WEP key for shared key authentication
* @flags: See &enum cfg80211_assoc_req_flags
* @bg_scan_period: Background scan period in seconds
- * or -1 to indicate that default value is to be used.
+ * or -1 to indicate that default value is to be used.
* @ht_capa: HT Capabilities over-rides. Values set in ht_capa_mask
- * will be used in ht_capa. Un-supported values will be ignored.
+ * will be used in ht_capa. Un-supported values will be ignored.
* @ht_capa_mask: The bits of ht_capa which are to be used.
* @vht_capa: VHT Capability overrides
* @vht_capa_mask: The bits of vht_capa which are to be used.
@@ -1698,7 +1760,7 @@ struct cfg80211_pmksa {
};
/**
- * struct cfg80211_wowlan_trig_pkt_pattern - packet pattern
+ * struct cfg80211_pkt_pattern - packet pattern
* @mask: bitmask where to match pattern and where to ignore bytes,
* one bit per byte, in same format as nl80211
* @pattern: bytes to match where bitmask is 1
@@ -1708,7 +1770,7 @@ struct cfg80211_pmksa {
* Internal note: @mask and @pattern are allocated in one chunk of
* memory, free @mask only!
*/
-struct cfg80211_wowlan_trig_pkt_pattern {
+struct cfg80211_pkt_pattern {
u8 *mask, *pattern;
int pattern_len;
int pkt_offset;
@@ -1770,12 +1832,41 @@ struct cfg80211_wowlan {
bool any, disconnect, magic_pkt, gtk_rekey_failure,
eap_identity_req, four_way_handshake,
rfkill_release;
- struct cfg80211_wowlan_trig_pkt_pattern *patterns;
+ struct cfg80211_pkt_pattern *patterns;
struct cfg80211_wowlan_tcp *tcp;
int n_patterns;
};
/**
+ * struct cfg80211_coalesce_rules - Coalesce rule parameters
+ *
+ * This structure defines coalesce rule for the device.
+ * @delay: maximum coalescing delay in msecs.
+ * @condition: condition for packet coalescence.
+ * see &enum nl80211_coalesce_condition.
+ * @patterns: array of packet patterns
+ * @n_patterns: number of patterns
+ */
+struct cfg80211_coalesce_rules {
+ int delay;
+ enum nl80211_coalesce_condition condition;
+ struct cfg80211_pkt_pattern *patterns;
+ int n_patterns;
+};
+
+/**
+ * struct cfg80211_coalesce - Packet coalescing settings
+ *
+ * This structure defines coalescing settings.
+ * @rules: array of coalesce rules
+ * @n_rules: number of rules
+ */
+struct cfg80211_coalesce {
+ struct cfg80211_coalesce_rules *rules;
+ int n_rules;
+};
+
+/**
* struct cfg80211_wowlan_wakeup - wakeup report
* @disconnect: woke up by getting disconnected
* @magic_pkt: woke up by receiving magic packet
@@ -1990,7 +2081,7 @@ struct cfg80211_update_ft_ies_params {
* @mgmt_tx_cancel_wait: Cancel the wait time from transmitting a management
* frame on another channel
*
- * @testmode_cmd: run a test mode command
+ * @testmode_cmd: run a test mode command; @wdev may be %NULL
* @testmode_dump: Implement a test mode dump. The cb->args[2] and up may be
* used by the function, but 0 and 1 must not be touched. Additionally,
* return error codes other than -ENOBUFS and -ENOENT will terminate the
@@ -2071,6 +2162,9 @@ struct cfg80211_update_ft_ies_params {
* driver can take the most appropriate actions.
* @crit_proto_stop: Indicates critical protocol no longer needs increased link
* reliability. This operation can not fail.
+ * @set_coalesce: Set coalesce parameters.
+ *
+ * @channel_switch: initiate channel-switch procedure (with CSA)
*/
struct cfg80211_ops {
int (*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -2196,7 +2290,8 @@ struct cfg80211_ops {
void (*rfkill_poll)(struct wiphy *wiphy);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct wiphy *wiphy, void *data, int len);
+ int (*testmode_cmd)(struct wiphy *wiphy, struct wireless_dev *wdev,
+ void *data, int len);
int (*testmode_dump)(struct wiphy *wiphy, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
@@ -2306,6 +2401,12 @@ struct cfg80211_ops {
u16 duration);
void (*crit_proto_stop)(struct wiphy *wiphy,
struct wireless_dev *wdev);
+ int (*set_coalesce)(struct wiphy *wiphy,
+ struct cfg80211_coalesce *coalesce);
+
+ int (*channel_switch)(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params);
};
/*
@@ -2371,6 +2472,8 @@ struct cfg80211_ops {
* @WIPHY_FLAG_OFFCHAN_TX: Device supports direct off-channel TX.
* @WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL: Device supports remain-on-channel call.
* @WIPHY_FLAG_SUPPORTS_5_10_MHZ: Device supports 5 MHz and 10 MHz channels.
+ * @WIPHY_FLAG_HAS_CHANNEL_SWITCH: Device supports channel switch in
+ * beaconing mode (AP, IBSS, Mesh, ...).
*/
enum wiphy_flags {
WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0),
@@ -2395,6 +2498,7 @@ enum wiphy_flags {
WIPHY_FLAG_OFFCHAN_TX = BIT(20),
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL = BIT(21),
WIPHY_FLAG_SUPPORTS_5_10_MHZ = BIT(22),
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH = BIT(23),
};
/**
@@ -2532,6 +2636,25 @@ struct wiphy_wowlan_support {
};
/**
+ * struct wiphy_coalesce_support - coalesce support data
+ * @n_rules: maximum number of coalesce rules
+ * @max_delay: maximum supported coalescing delay in msecs
+ * @n_patterns: number of supported patterns in a rule
+ * (see nl80211.h for the pattern definition)
+ * @pattern_max_len: maximum length of each pattern
+ * @pattern_min_len: minimum length of each pattern
+ * @max_pkt_offset: maximum Rx packet offset
+ */
+struct wiphy_coalesce_support {
+ int n_rules;
+ int max_delay;
+ int n_patterns;
+ int pattern_max_len;
+ int pattern_min_len;
+ int max_pkt_offset;
+};
+
+/**
* struct wiphy - wireless hardware description
* @reg_notifier: the driver's regulatory notification callback,
* note that if your driver uses wiphy_apply_custom_regulatory()
@@ -2641,6 +2764,7 @@ struct wiphy_wowlan_support {
* 802.11-2012 8.4.2.29 for the defined fields.
* @extended_capabilities_mask: mask of the valid values
* @extended_capabilities_len: length of the extended capabilities
+ * @coalesce: packet coalescing support information
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2750,6 +2874,8 @@ struct wiphy {
const struct iw_handler_def *wext;
#endif
+ const struct wiphy_coalesce_support *coalesce;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -2841,7 +2967,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv);
*
* Return: A non-negative wiphy index or a negative error code.
*/
-extern int wiphy_register(struct wiphy *wiphy);
+int wiphy_register(struct wiphy *wiphy);
/**
* wiphy_unregister - deregister a wiphy from cfg80211
@@ -2852,14 +2978,14 @@ extern int wiphy_register(struct wiphy *wiphy);
* pointer, but the call may sleep to wait for an outstanding
* request that is being handled.
*/
-extern void wiphy_unregister(struct wiphy *wiphy);
+void wiphy_unregister(struct wiphy *wiphy);
/**
* wiphy_free - free wiphy
*
* @wiphy: The wiphy to free
*/
-extern void wiphy_free(struct wiphy *wiphy);
+void wiphy_free(struct wiphy *wiphy);
/* internal structs */
struct cfg80211_conn;
@@ -3014,14 +3140,14 @@ static inline void *wdev_priv(struct wireless_dev *wdev)
* @band: band, necessary due to channel number overlap
* Return: The corresponding frequency (in MHz), or 0 if the conversion failed.
*/
-extern int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
+int ieee80211_channel_to_frequency(int chan, enum ieee80211_band band);
/**
* ieee80211_frequency_to_channel - convert frequency to channel number
* @freq: center frequency
* Return: The corresponding channel, or 0 if the conversion failed.
*/
-extern int ieee80211_frequency_to_channel(int freq);
+int ieee80211_frequency_to_channel(int freq);
/*
* Name indirection necessary because the ieee80211 code also has
@@ -3030,8 +3156,8 @@ extern int ieee80211_frequency_to_channel(int freq);
* to include both header files you'll (rightfully!) get a symbol
* clash.
*/
-extern struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
- int freq);
+struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
+ int freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
* @wiphy: the struct wiphy to get the channel for
@@ -3063,11 +3189,13 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
/**
* ieee80211_mandatory_rates - get mandatory rates for a given band
* @sband: the band to look for rates in
+ * @scan_width: width of the control channel
*
* This function returns a bitmap of the mandatory rates for the given
* band, bits are set according to the rate position in the bitrates array.
*/
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband);
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width);
/*
* Radiotap parsing functions -- for controlled injection support
@@ -3141,13 +3269,14 @@ struct ieee80211_radiotap_iterator {
int _reset_on_ext;
};
-extern int ieee80211_radiotap_iterator_init(
- struct ieee80211_radiotap_iterator *iterator,
- struct ieee80211_radiotap_header *radiotap_header,
- int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns);
+int
+ieee80211_radiotap_iterator_init(struct ieee80211_radiotap_iterator *iterator,
+ struct ieee80211_radiotap_header *radiotap_header,
+ int max_length,
+ const struct ieee80211_radiotap_vendor_namespaces *vns);
-extern int ieee80211_radiotap_iterator_next(
- struct ieee80211_radiotap_iterator *iterator);
+int
+ieee80211_radiotap_iterator_next(struct ieee80211_radiotap_iterator *iterator);
extern const unsigned char rfc1042_header[6];
@@ -3307,7 +3436,7 @@ const u8 *cfg80211_find_vendor_ie(unsigned int oui, u8 oui_type,
*
* Return: 0 on success. -ENOMEM.
*/
-extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
+int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
/**
* wiphy_apply_custom_regulatory - apply a custom driver regulatory domain
@@ -3321,9 +3450,8 @@ extern int regulatory_hint(struct wiphy *wiphy, const char *alpha2);
* default channel settings will be disregarded. If no rule is found for a
* channel on the regulatory domain the channel will be disabled.
*/
-extern void wiphy_apply_custom_regulatory(
- struct wiphy *wiphy,
- const struct ieee80211_regdomain *regd);
+void wiphy_apply_custom_regulatory(struct wiphy *wiphy,
+ const struct ieee80211_regdomain *regd);
/**
* freq_reg_info - get regulatory information for the given frequency
@@ -3379,10 +3507,11 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy);
void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
/**
- * cfg80211_inform_bss_frame - inform cfg80211 of a received BSS frame
+ * cfg80211_inform_bss_width_frame - inform cfg80211 of a received BSS frame
*
* @wiphy: the wiphy reporting the BSS
* @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @mgmt: the management frame (probe response or beacon)
* @len: length of the management frame
* @signal: the signal strength, type depends on the wiphy's signal_type
@@ -3395,16 +3524,29 @@ void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss_frame(struct wiphy *wiphy,
struct ieee80211_channel *channel,
struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width_frame(wiphy, channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ mgmt, len, signal, gfp);
+}
/**
* cfg80211_inform_bss - inform cfg80211 of a new BSS
*
* @wiphy: the wiphy reporting the BSS
* @channel: The channel the frame was received on
+ * @scan_width: width of the control channel
* @bssid: the BSSID of the BSS
* @tsf: the TSF sent by the peer in the beacon/probe response (or 0)
* @capability: the capability field sent by the peer
@@ -3421,11 +3563,26 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
* Or %NULL on error.
*/
struct cfg80211_bss * __must_check
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp);
+
+static inline struct cfg80211_bss * __must_check
cfg80211_inform_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid, u64 tsf, u16 capability,
u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp);
+ s32 signal, gfp_t gfp)
+{
+ return cfg80211_inform_bss_width(wiphy, channel,
+ NL80211_BSS_CHAN_WIDTH_20,
+ bssid, tsf, capability,
+ beacon_interval, ie, ielen, signal,
+ gfp);
+}
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -3471,6 +3628,19 @@ void cfg80211_put_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
*/
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *bss);
+static inline enum nl80211_bss_scan_width
+cfg80211_chandef_to_scan_width(const struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return NL80211_BSS_CHAN_WIDTH_5;
+ case NL80211_CHAN_WIDTH_10:
+ return NL80211_BSS_CHAN_WIDTH_10;
+ default:
+ return NL80211_BSS_CHAN_WIDTH_20;
+ }
+}
+
/**
* cfg80211_rx_mlme_mgmt - notification of processed MLME management frame
* @dev: network device
@@ -3886,6 +4056,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* @sig_dbm: signal strength in mBm, or 0 if unknown
* @buf: Management frame (header + body)
* @len: length of the frame data
+ * @flags: flags, as defined in enum nl80211_rxmgmt_flags
* @gfp: context flags
*
* This function is called whenever an Action frame is received for a station
@@ -3897,7 +4068,7 @@ void cfg80211_conn_failed(struct net_device *dev, const u8 *mac_addr,
* driver is responsible for rejecting the frame.
*/
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp);
/**
* cfg80211_mgmt_tx_status - notification of TX status for management frame
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 600d1d705bb..8f59ca50477 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -107,11 +107,11 @@ static inline void csum_replace2(__sum16 *sum, __be16 from, __be16 to)
}
struct sk_buff;
-extern void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
- __be32 from, __be32 to, int pseudohdr);
-extern void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
- const __be32 *from, const __be32 *to,
- int pseudohdr);
+void inet_proto_csum_replace4(__sum16 *sum, struct sk_buff *skb,
+ __be32 from, __be32 to, int pseudohdr);
+void inet_proto_csum_replace16(__sum16 *sum, struct sk_buff *skb,
+ const __be32 *from, const __be32 *to,
+ int pseudohdr);
static inline void inet_proto_csum_replace2(__sum16 *sum, struct sk_buff *skb,
__be16 from, __be16 to,
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h
index 0fee0617fb7..33d03b64864 100644
--- a/include/net/cls_cgroup.h
+++ b/include/net/cls_cgroup.h
@@ -24,7 +24,7 @@ struct cgroup_cls_state
u32 classid;
};
-extern void sock_update_classid(struct sock *sk);
+void sock_update_classid(struct sock *sk);
#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
static inline u32 task_cls_classid(struct task_struct *p)
@@ -35,7 +35,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0;
rcu_read_lock();
- classid = container_of(task_subsys_state(p, net_cls_subsys_id),
+ classid = container_of(task_css(p, net_cls_subsys_id),
struct cgroup_cls_state, css)->classid;
rcu_read_unlock();
@@ -51,7 +51,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0;
rcu_read_lock();
- css = task_subsys_state(p, net_cls_subsys_id);
+ css = task_css(p, net_cls_subsys_id);
if (css)
classid = container_of(css,
struct cgroup_cls_state, css)->classid;
diff --git a/include/net/dst.h b/include/net/dst.h
index 1f8fd109e22..3bc4865f826 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -311,11 +311,13 @@ static inline void skb_dst_force(struct sk_buff *skb)
* __skb_tunnel_rx - prepare skb for rx reinsert
* @skb: buffer
* @dev: tunnel device
+ * @net: netns for packet i/o
*
* After decapsulation, packet is going to re-enter (netif_rx()) our stack,
* so make some cleanups. (no accounting done)
*/
-static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
skb->dev = dev;
@@ -327,8 +329,7 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
if (!skb->l4_rxhash)
skb->rxhash = 0;
skb_set_queue_mapping(skb, 0);
- skb_dst_drop(skb);
- nf_reset(skb);
+ skb_scrub_packet(skb, !net_eq(net, dev_net(dev)));
}
/**
@@ -340,12 +341,13 @@ static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
* so make some cleanups, and perform accounting.
* Note: this accounting is not SMP safe.
*/
-static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev)
+static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev,
+ struct net *net)
{
/* TODO : stats should be SMP safe */
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
- __skb_tunnel_rx(skb, dev);
+ __skb_tunnel_rx(skb, dev, net);
}
/* Children define the path of the packet through the
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index e361f488242..4b2b557fb0e 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -10,21 +10,25 @@
struct fib_rule {
struct list_head list;
- atomic_t refcnt;
int iifindex;
int oifindex;
u32 mark;
u32 mark_mask;
- u32 pref;
u32 flags;
u32 table;
u8 action;
+ /* 3 bytes hole, try to use */
u32 target;
struct fib_rule __rcu *ctarget;
+ struct net *fr_net;
+
+ atomic_t refcnt;
+ u32 pref;
+ int suppress_ifgroup;
+ int suppress_prefixlen;
char iifname[IFNAMSIZ];
char oifname[IFNAMSIZ];
struct rcu_head rcu;
- struct net * fr_net;
};
struct fib_lookup_arg {
@@ -46,6 +50,8 @@ struct fib_rules_ops {
int (*action)(struct fib_rule *,
struct flowi *, int,
struct fib_lookup_arg *);
+ bool (*suppress)(struct fib_rule *,
+ struct fib_lookup_arg *);
int (*match)(struct fib_rule *,
struct flowi *, int);
int (*configure)(struct fib_rule *,
@@ -80,6 +86,8 @@ struct fib_rules_ops {
[FRA_FWMARK] = { .type = NLA_U32 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
+ [FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
[FRA_GOTO] = { .type = NLA_U32 }
static inline void fib_rule_get(struct fib_rule *rule)
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 93024a47e0e..8e0b6c856a1 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -61,6 +61,7 @@ struct genl_family {
struct list_head ops_list; /* private */
struct list_head family_list; /* private */
struct list_head mcast_groups; /* private */
+ struct module *module;
};
/**
@@ -121,9 +122,24 @@ struct genl_ops {
struct list_head ops_list;
};
-extern int genl_register_family(struct genl_family *family);
-extern int genl_register_family_with_ops(struct genl_family *family,
+extern int __genl_register_family(struct genl_family *family);
+
+static inline int genl_register_family(struct genl_family *family)
+{
+ family->module = THIS_MODULE;
+ return __genl_register_family(family);
+}
+
+extern int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops);
+
+static inline int genl_register_family_with_ops(struct genl_family *family,
+ struct genl_ops *ops, size_t n_ops)
+{
+ family->module = THIS_MODULE;
+ return __genl_register_family_with_ops(family, ops, n_ops);
+}
+
extern int genl_unregister_family(struct genl_family *family);
extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index c6d07cb074b..8b5b7143329 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -230,6 +230,10 @@ enum ieee80211_radiotap_type {
#define IEEE80211_CHAN_PASSIVE 0x0200 /* Only passive scan allowed */
#define IEEE80211_CHAN_DYN 0x0400 /* Dynamic CCK-OFDM channel */
#define IEEE80211_CHAN_GFSK 0x0800 /* GFSK channel (FHSS PHY) */
+#define IEEE80211_CHAN_GSM 0x1000 /* GSM (900 MHz) */
+#define IEEE80211_CHAN_STURBO 0x2000 /* Static Turbo */
+#define IEEE80211_CHAN_HALF 0x4000 /* Half channel (10 MHz wide) */
+#define IEEE80211_CHAN_QUARTER 0x8000 /* Quarter channel (5 MHz wide) */
/* For IEEE80211_RADIOTAP_FLAGS */
#define IEEE80211_RADIOTAP_F_CFP 0x01 /* sent/received
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 736b5fb9547..02ef7727bb5 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -171,12 +171,17 @@ struct inet6_dev {
struct ifmcaddr6 *mc_list;
struct ifmcaddr6 *mc_tomb;
spinlock_t mc_lock;
- unsigned char mc_qrv;
+
+ unsigned char mc_qrv; /* Query Robustness Variable */
unsigned char mc_gq_running;
unsigned char mc_ifc_count;
unsigned char mc_dad_count;
- unsigned long mc_v1_seen;
+
+ unsigned long mc_v1_seen; /* Max time we stay in MLDv1 mode */
+ unsigned long mc_qi; /* Query Interval */
+ unsigned long mc_qri; /* Query Response Interval */
unsigned long mc_maxdelay;
+
struct timer_list mc_gq_timer; /* general query timer */
struct timer_list mc_ifc_timer; /* interface change timer */
struct timer_list mc_dad_timer; /* dad complete mc timer */
diff --git a/include/net/ip.h b/include/net/ip.h
index a68f838a132..48f55979d84 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -194,7 +194,17 @@ static inline u64 snmp_fold_field64(void __percpu *mib[], int offt, size_t syncp
}
#endif
extern int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align);
-extern void snmp_mib_free(void __percpu *ptr[2]);
+
+static inline void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
+{
+ int i;
+
+ BUG_ON(ptr == NULL);
+ for (i = 0; i < SNMP_ARRAY_SZ; i++) {
+ free_percpu(ptr[i]);
+ ptr[i] = NULL;
+ }
+}
extern struct local_ports {
seqlock_t lock;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 2a601e7da1b..48ec25a7fcb 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -300,7 +300,7 @@ extern void inet6_rt_notify(int event, struct rt6_info *rt,
struct nl_info *info);
extern void fib6_run_gc(unsigned long expires,
- struct net *net);
+ struct net *net, bool force);
extern void fib6_gc_cleanup(void);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 260f83f16bc..f525e7038cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -112,8 +112,6 @@ extern struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
const struct in6_addr *addr,
bool anycast);
-extern int ip6_dst_hoplimit(struct dst_entry *dst);
-
/*
* support functions for ND
*
@@ -135,6 +133,8 @@ extern void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
extern void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk,
__be32 mtu);
extern void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
+extern void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark);
extern void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk);
struct netlink_callback;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 4da5de10d1d..6d1549c4893 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -36,6 +36,7 @@ struct __ip6_tnl_parm {
struct ip6_tnl {
struct ip6_tnl __rcu *next; /* next tunnel in list */
struct net_device *dev; /* virtual device associated with tunnel */
+ struct net *net; /* netns for packet i/o */
struct __ip6_tnl_parm parms; /* tunnel configuration parameters */
struct flowi fl; /* flowi template for xmit */
struct dst_entry *dst_cache; /* cached dst */
@@ -74,7 +75,6 @@ static inline void ip6tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct net_device_stats *stats = &dev->stats;
int pkt_len, err;
- nf_reset(skb);
pkt_len = skb->len;
err = ip6_local_out(skb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 781b3cf86a2..a0a4a100f5c 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -86,12 +86,12 @@ struct tnl_ptk_info {
#define PACKET_RCVD 0
#define PACKET_REJECT 1
-#define IP_TNL_HASH_BITS 10
+#define IP_TNL_HASH_BITS 7
#define IP_TNL_HASH_SIZE (1 << IP_TNL_HASH_BITS)
struct ip_tunnel_net {
- struct hlist_head *tunnels;
struct net_device *fb_tunnel_dev;
+ struct hlist_head tunnels[IP_TNL_HASH_SIZE];
};
#ifdef CONFIG_INET
@@ -102,7 +102,7 @@ void ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
struct rtnl_link_ops *ops, char *devname);
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops);
void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
const struct iphdr *tnl_params, const u8 protocol);
@@ -145,25 +145,10 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph,
return INET_ECN_encapsulate(tos, inner);
}
-static inline void tunnel_ip_select_ident(struct sk_buff *skb,
- const struct iphdr *old_iph,
- struct dst_entry *dst)
-{
- struct iphdr *iph = ip_hdr(skb);
-
- /* Use inner packet iph-id if possible. */
- if (skb->protocol == htons(ETH_P_IP) && old_iph->id)
- iph->id = old_iph->id;
- else
- __ip_select_ident(iph, dst,
- (skb_shinfo(skb)->gso_segs ?: 1) - 1);
-}
-
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto);
-int iptunnel_xmit(struct net *net, struct rtable *rt,
- struct sk_buff *skb,
+int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df);
+ __u8 tos, __u8 ttl, __be16 df, bool xnet);
static inline void iptunnel_xmit_stats(int err,
struct net_device_stats *err_stats,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 5fe56498517..bbf1c8fb851 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -41,6 +41,7 @@
#define NEXTHDR_ICMP 58 /* ICMP for IPv6. */
#define NEXTHDR_NONE 59 /* No next header */
#define NEXTHDR_DEST 60 /* Destination options header. */
+#define NEXTHDR_SCTP 132 /* SCTP message. */
#define NEXTHDR_MOBILITY 135 /* Mobility header. */
#define NEXTHDR_MAX 255
@@ -657,6 +658,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
extern void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt);
+extern int ip6_dst_hoplimit(struct dst_entry *dst);
+
/*
* Header manipulation
*/
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h
index 0af8b8dfbc2..550c2d6ec7f 100644
--- a/include/net/irda/irlan_common.h
+++ b/include/net/irda/irlan_common.h
@@ -32,6 +32,7 @@
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include <net/irda/irttp.h>
@@ -161,7 +162,7 @@ struct irlan_provider_cb {
int access_type; /* Access type */
__u16 send_arb_val;
- __u8 mac_address[6]; /* Generated MAC address for peer device */
+ __u8 mac_address[ETH_ALEN]; /* Generated MAC address for peer device */
};
/*
diff --git a/include/net/llc_if.h b/include/net/llc_if.h
index b595a004d31..f0cb909b60e 100644
--- a/include/net/llc_if.h
+++ b/include/net/llc_if.h
@@ -62,36 +62,6 @@
#define LLC_STATUS_CONFLICT 7 /* disconnect conn */
#define LLC_STATUS_RESET_DONE 8 /* */
-/**
- * llc_mac_null - determines if a address is a null mac address
- * @mac: Mac address to test if null.
- *
- * Determines if a given address is a null mac address. Returns 0 if the
- * address is not a null mac, 1 if the address is a null mac.
- */
-static inline int llc_mac_null(const u8 *mac)
-{
- return is_zero_ether_addr(mac);
-}
-
-static inline int llc_mac_multicast(const u8 *mac)
-{
- return is_multicast_ether_addr(mac);
-}
-/**
- * llc_mac_match - determines if two mac addresses are the same
- * @mac1: First mac address to compare.
- * @mac2: Second mac address to compare.
- *
- * Determines if two given mac address are the same. Returns 0 if there
- * is not a complete match up to len, 1 if a complete match up to len is
- * found.
- */
-static inline int llc_mac_match(const u8 *mac1, const u8 *mac2)
-{
- return !compare_ether_addr(mac1, mac2);
-}
-
extern int llc_establish_connection(struct sock *sk, u8 *lmac,
u8 *dmac, u8 dsap);
extern int llc_build_and_send_pkt(struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5b7a3dadadd..cc6035f1a2f 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -152,11 +152,14 @@ struct ieee80211_low_level_stats {
* @IEEE80211_CHANCTX_CHANGE_WIDTH: The channel width changed
* @IEEE80211_CHANCTX_CHANGE_RX_CHAINS: The number of RX chains changed
* @IEEE80211_CHANCTX_CHANGE_RADAR: radar detection flag changed
+ * @IEEE80211_CHANCTX_CHANGE_CHANNEL: switched to another operating channel,
+ * this is used only with channel switching with CSA
*/
enum ieee80211_chanctx_change {
IEEE80211_CHANCTX_CHANGE_WIDTH = BIT(0),
IEEE80211_CHANCTX_CHANGE_RX_CHAINS = BIT(1),
IEEE80211_CHANCTX_CHANGE_RADAR = BIT(2),
+ IEEE80211_CHANCTX_CHANGE_CHANNEL = BIT(3),
};
/**
@@ -372,7 +375,7 @@ struct ieee80211_bss_conf {
};
/**
- * enum mac80211_tx_control_flags - flags to describe transmission information/status
+ * enum mac80211_tx_info_flags - flags to describe transmission information/status
*
* These flags are used with the @flags member of &ieee80211_tx_info.
*
@@ -468,7 +471,7 @@ struct ieee80211_bss_conf {
* Note: If you have to add new flags to the enumeration, then don't
* forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary.
*/
-enum mac80211_tx_control_flags {
+enum mac80211_tx_info_flags {
IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0),
IEEE80211_TX_CTL_ASSIGN_SEQ = BIT(1),
IEEE80211_TX_CTL_NO_ACK = BIT(2),
@@ -504,6 +507,18 @@ enum mac80211_tx_control_flags {
#define IEEE80211_TX_CTL_STBC_SHIFT 23
+/**
+ * enum mac80211_tx_control_flags - flags to describe transmit control
+ *
+ * @IEEE80211_TX_CTRL_PORT_CTRL_PROTO: this frame is a port control
+ * protocol frame (e.g. EAP)
+ *
+ * These flags are used in tx_info->control.flags.
+ */
+enum mac80211_tx_control_flags {
+ IEEE80211_TX_CTRL_PORT_CTRL_PROTO = BIT(0),
+};
+
/*
* This definition is used as a mask to clear all temporary flags, which are
* set by the tx handlers for each transmission attempt by the mac80211 stack.
@@ -677,7 +692,8 @@ struct ieee80211_tx_info {
/* NB: vif can be NULL for injected frames */
struct ieee80211_vif *vif;
struct ieee80211_key_conf *hw_key;
- /* 8 bytes free */
+ u32 flags;
+ /* 4 bytes free */
} control;
struct {
struct ieee80211_tx_rate rates[IEEE80211_TX_MAX_RATES];
@@ -811,6 +827,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
+ * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -839,6 +857,8 @@ enum mac80211_rx_flags {
RX_FLAG_80P80MHZ = BIT(24),
RX_FLAG_160MHZ = BIT(25),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
+ RX_FLAG_10MHZ = BIT(28),
+ RX_FLAG_5MHZ = BIT(29),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1004,11 +1024,11 @@ enum ieee80211_smps_mode {
* @radar_enabled: whether radar detection is enabled
*
* @long_frame_max_tx_count: Maximum number of transmissions for a "long" frame
- * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
- * but actually means the number of transmissions not the number of retries
+ * (a frame not RTS protected), called "dot11LongRetryLimit" in 802.11,
+ * but actually means the number of transmissions not the number of retries
* @short_frame_max_tx_count: Maximum number of transmissions for a "short"
- * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
- * number of transmissions not the number of retries
+ * frame, called "dot11ShortRetryLimit" in 802.11, but actually means the
+ * number of transmissions not the number of retries
*
* @smps_mode: spatial multiplexing powersave mode; note that
* %IEEE80211_SMPS_STATIC is used when the device is not
@@ -1080,6 +1100,7 @@ enum ieee80211_vif_flags {
* @addr: address of this interface
* @p2p: indicates whether this AP or STA interface is a p2p
* interface, i.e. a GO or p2p-sta respectively
+ * @csa_active: marks whether a channel switch is going on
* @driver_flags: flags/capabilities the driver has for this interface,
* these need to be set (or cleared) when the interface is added
* or, if supported by the driver, the interface type is changed
@@ -1092,7 +1113,7 @@ enum ieee80211_vif_flags {
* be off when it is %NULL there can still be races and packets could be
* processed after it switches back to %NULL.
* @debugfs_dir: debugfs dentry, can be used by drivers to create own per
- * interface debug files. Note that it will be NULL for the virtual
+ * interface debug files. Note that it will be NULL for the virtual
* monitor interface (if that is requested.)
* @drv_priv: data area for driver use, will always be aligned to
* sizeof(void *).
@@ -1102,6 +1123,7 @@ struct ieee80211_vif {
struct ieee80211_bss_conf bss_conf;
u8 addr[ETH_ALEN];
bool p2p;
+ bool csa_active;
u8 cab_queue;
u8 hw_queue[IEEE80211_NUM_ACS];
@@ -1425,10 +1447,10 @@ struct ieee80211_tx_control {
* the stack.
*
* @IEEE80211_HW_CONNECTION_MONITOR:
- * The hardware performs its own connection monitoring, including
- * periodic keep-alives to the AP and probing the AP on beacon loss.
- * When this flag is set, signaling beacon-loss will cause an immediate
- * change to disassociated state.
+ * The hardware performs its own connection monitoring, including
+ * periodic keep-alives to the AP and probing the AP on beacon loss.
+ * When this flag is set, signaling beacon-loss will cause an immediate
+ * change to disassociated state.
*
* @IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC:
* This device needs to get data from beacon before association (i.e.
@@ -1499,6 +1521,7 @@ enum ieee80211_hw_flags {
IEEE80211_HW_SUPPORTS_RC_TABLE = 1<<24,
IEEE80211_HW_P2P_DEV_ADDR_FOR_INTF = 1<<25,
IEEE80211_HW_TIMING_BEACON_ONLY = 1<<26,
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES = 1<<27,
};
/**
@@ -1526,10 +1549,10 @@ enum ieee80211_hw_flags {
* @channel_change_time: time (in microseconds) it takes to change channels.
*
* @max_signal: Maximum value for signal (rssi) in RX information, used
- * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
+ * only when @IEEE80211_HW_SIGNAL_UNSPEC or @IEEE80211_HW_SIGNAL_DB
*
* @max_listen_interval: max listen interval in units of beacon interval
- * that HW supports
+ * that HW supports
*
* @queues: number of available hardware transmit queues for
* data packets. WMM/QoS requires at least four, these
@@ -2443,7 +2466,7 @@ enum ieee80211_roc_type {
* The callback can sleep.
*
* @set_tsf: Set the TSF timer to the specified value in the firmware/hardware.
- * Currently, this is only used for IBSS mode debugging. Is not a
+ * Currently, this is only used for IBSS mode debugging. Is not a
* required function.
* The callback can sleep.
*
@@ -2494,8 +2517,8 @@ enum ieee80211_roc_type {
* in IEEE 802.11-2007 section 17.3.8.6 and modify ACK timeout
* accordingly. This callback is not required and may sleep.
*
- * @testmode_cmd: Implement a cfg80211 test mode command.
- * The callback can sleep.
+ * @testmode_cmd: Implement a cfg80211 test mode command. The passed @vif may
+ * be %NULL. The callback can sleep.
* @testmode_dump: Implement a cfg80211 test mode dump. The callback can sleep.
*
* @flush: Flush all pending frames from the hardware queue, making sure
@@ -2633,6 +2656,16 @@ enum ieee80211_roc_type {
* @ipv6_addr_change: IPv6 address assignment on the given interface changed.
* Currently, this is only called for managed or P2P client interfaces.
* This callback is optional; it must not sleep.
+ *
+ * @channel_switch_beacon: Starts a channel switch to a new channel.
+ * Beacons are modified to include CSA or ECSA IEs before calling this
+ * function. The corresponding count fields in these IEs must be
+ * decremented, and when they reach zero the driver must call
+ * ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
+ * get the csa counter decremented by mac80211, but must check if it is
+ * zero using ieee80211_csa_is_complete() after the beacon has been
+ * transmitted and then call ieee80211_csa_finish().
+ *
*/
struct ieee80211_ops {
void (*tx)(struct ieee80211_hw *hw,
@@ -2746,7 +2779,8 @@ struct ieee80211_ops {
void (*rfkill_poll)(struct ieee80211_hw *hw);
void (*set_coverage_class)(struct ieee80211_hw *hw, u8 coverage_class);
#ifdef CONFIG_NL80211_TESTMODE
- int (*testmode_cmd)(struct ieee80211_hw *hw, void *data, int len);
+ int (*testmode_cmd)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
int (*testmode_dump)(struct ieee80211_hw *hw, struct sk_buff *skb,
struct netlink_callback *cb,
void *data, int len);
@@ -2820,6 +2854,9 @@ struct ieee80211_ops {
struct ieee80211_vif *vif,
struct inet6_dev *idev);
#endif
+ void (*channel_switch_beacon)(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_chan_def *chandef);
};
/**
@@ -2877,14 +2914,14 @@ enum ieee80211_tpt_led_trigger_flags {
};
#ifdef CONFIG_MAC80211_LEDS
-extern char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
-extern char *__ieee80211_create_tpt_led_trigger(
- struct ieee80211_hw *hw, unsigned int flags,
- const struct ieee80211_tpt_blink *blink_table,
- unsigned int blink_table_len);
+char *__ieee80211_get_tx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_rx_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_assoc_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_get_radio_led_name(struct ieee80211_hw *hw);
+char *__ieee80211_create_tpt_led_trigger(struct ieee80211_hw *hw,
+ unsigned int flags,
+ const struct ieee80211_tpt_blink *blink_table,
+ unsigned int blink_table_len);
#endif
/**
* ieee80211_get_tx_led_name - get name of TX LED
@@ -3315,6 +3352,25 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
}
/**
+ * ieee80211_csa_finish - notify mac80211 about channel switch
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * After a channel switch announcement was scheduled and the counter in this
+ * announcement hit zero, this function must be called by the driver to
+ * notify mac80211 that the channel can be changed.
+ */
+void ieee80211_csa_finish(struct ieee80211_vif *vif);
+
+/**
+ * ieee80211_csa_is_complete - find out if counters reached zero
+ * @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ *
+ * This function returns whether the channel switch counters reached zero.
+ */
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif);
+
+
+/**
* ieee80211_proberesp_get - retrieve a Probe Response template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
@@ -3633,6 +3689,89 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
int tid, struct ieee80211_key_seq *seq);
/**
+ * ieee80211_set_key_tx_seq - set key TX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current TX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and the
+ * device may have transmitted frames using the PTK, e.g. replies to
+ * ARP requests.
+ *
+ * Note that this function may only be called when no TX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_set_key_rx_seq - set key RX sequence counter
+ *
+ * @keyconf: the parameter passed with the set key
+ * @tid: The TID, or -1 for the management frame value (CCMP only);
+ * the value on TID 0 is also used for non-QoS frames. For
+ * CMAC, only TID 0 is valid.
+ * @seq: new sequence data
+ *
+ * This function allows a driver to set the current RX IV/PNs for the
+ * given key. This is useful when resuming from WoWLAN sleep and GTK
+ * rekey may have been done while suspended. It should not be called
+ * if IV checking is done by the device and not by mac80211.
+ *
+ * Note that this function may only be called when no RX processing
+ * can be done concurrently.
+ */
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq);
+
+/**
+ * ieee80211_remove_key - remove the given key
+ * @keyconf: the parameter passed with the set key
+ *
+ * Remove the given key. If the key was uploaded to the hardware at the
+ * time this function is called, it is not deleted in the hardware but
+ * instead assumed to have been removed already.
+ *
+ * Note that due to locking considerations this function can (currently)
+ * only be called during key iteration (ieee80211_iter_keys().)
+ */
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf);
+
+/**
+ * ieee80211_gtk_rekey_add - add a GTK key from rekeying during WoWLAN
+ * @vif: the virtual interface to add the key on
+ * @keyconf: new key data
+ *
+ * When GTK rekeying was done while the system was suspended, (a) new
+ * key(s) will be available. These will be needed by mac80211 for proper
+ * RX processing, so this function allows setting them.
+ *
+ * The function returns the newly allocated key structure, which will
+ * have similar contents to the passed key configuration but point to
+ * mac80211-owned memory. In case of errors, the function returns an
+ * ERR_PTR(), use IS_ERR() etc.
+ *
+ * Note that this function assumes the key isn't added to hardware
+ * acceleration, so no TX will be done with the key. Since it's a GTK
+ * on managed (station) networks, this is true anyway. If the driver
+ * calls this function from the resume callback and subsequently uses
+ * the return code 1 to reconfigure the device, this key will be part
+ * of the reconfiguration.
+ *
+ * Note that the driver should also call ieee80211_set_key_rx_seq()
+ * for the new key for each TID to set up sequence counters properly.
+ *
+ * IMPORTANT: If this replaces a key that is present in the hardware,
+ * then it will attempt to remove it during this call. In many cases
+ * this isn't what you want, so call ieee80211_remove_key() first for
+ * the key that's being replaced.
+ */
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf);
+
+/**
* ieee80211_gtk_rekey_notify - notify userspace supplicant of rekeying
* @vif: virtual interface the rekeying was done on
* @bssid: The BSSID of the AP, for checking association
@@ -4204,8 +4343,10 @@ struct rate_control_ops {
void *(*alloc_sta)(void *priv, struct ieee80211_sta *sta, gfp_t gfp);
void (*rate_init)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta);
void (*rate_update)(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed);
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
diff --git a/include/net/mld.h b/include/net/mld.h
index 467143cd4e2..faa1d161bf2 100644
--- a/include/net/mld.h
+++ b/include/net/mld.h
@@ -63,13 +63,48 @@ struct mld2_query {
#define mld2q_mrc mld2q_hdr.icmp6_maxdelay
#define mld2q_resv1 mld2q_hdr.icmp6_dataun.un_data16[1]
-/* Max Response Code */
-#define MLDV2_MASK(value, nb) ((nb)>=32 ? (value) : ((1<<(nb))-1) & (value))
-#define MLDV2_EXP(thresh, nbmant, nbexp, value) \
- ((value) < (thresh) ? (value) : \
- ((MLDV2_MASK(value, nbmant) | (1<<(nbmant))) << \
- (MLDV2_MASK((value) >> (nbmant), nbexp) + (nbexp))))
-
-#define MLDV2_MRC(value) MLDV2_EXP(0x8000, 12, 3, value)
+/* RFC3810, 5.1.3. Maximum Response Code:
+ *
+ * If Maximum Response Code >= 32768, Maximum Response Code represents a
+ * floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7 8 9 A B C D E F
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_MRC_EXP(value) (((value) >> 12) & 0x0007)
+#define MLDV2_MRC_MAN(value) ((value) & 0x0fff)
+
+/* RFC3810, 5.1.9. QQIC (Querier's Query Interval Code):
+ *
+ * If QQIC >= 128, QQIC represents a floating-point value as follows:
+ *
+ * 0 1 2 3 4 5 6 7
+ * +-+-+-+-+-+-+-+-+
+ * |1| exp | mant |
+ * +-+-+-+-+-+-+-+-+
+ */
+#define MLDV2_QQIC_EXP(value) (((value) >> 4) & 0x07)
+#define MLDV2_QQIC_MAN(value) ((value) & 0x0f)
+
+static inline unsigned long mldv2_mrc(const struct mld2_query *mlh2)
+{
+ /* RFC3810, 5.1.3. Maximum Response Code */
+ unsigned long ret, mc_mrc = ntohs(mlh2->mld2q_mrc);
+
+ if (mc_mrc < 32768) {
+ ret = mc_mrc;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_MRC_EXP(mc_mrc);
+ mc_man = MLDV2_MRC_MAN(mc_mrc);
+
+ ret = (mc_man | 0x1000) << (mc_exp + 3);
+ }
+
+ return ret;
+}
#endif
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 949d77528f2..3c4211f0bed 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -119,7 +119,7 @@ extern struct ndisc_options *ndisc_parse_options(u8 *opt, int opt_len,
* if RFC 3831 IPv6-over-Fibre Channel is ever implemented it may
* also need a pad of 2.
*/
-static int ndisc_addr_option_pad(unsigned short type)
+static inline int ndisc_addr_option_pad(unsigned short type)
{
switch (type) {
case ARPHRD_INFINIBAND: return 2;
@@ -204,6 +204,11 @@ extern void ndisc_send_ns(struct net_device *dev,
extern void ndisc_send_rs(struct net_device *dev,
const struct in6_addr *saddr,
const struct in6_addr *daddr);
+extern void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override,
+ bool inc_opt);
extern void ndisc_send_redirect(struct sk_buff *skb,
const struct in6_addr *target);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7e748ad8b50..536501a3e58 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -195,68 +195,67 @@ static inline void *neighbour_priv(const struct neighbour *n)
#define NEIGH_UPDATE_F_ISROUTER 0x40000000
#define NEIGH_UPDATE_F_ADMIN 0x80000000
-extern void neigh_table_init(struct neigh_table *tbl);
-extern int neigh_table_clear(struct neigh_table *tbl);
-extern struct neighbour * neigh_lookup(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev);
-extern struct neighbour * neigh_lookup_nodev(struct neigh_table *tbl,
- struct net *net,
- const void *pkey);
-extern struct neighbour * __neigh_create(struct neigh_table *tbl,
- const void *pkey,
- struct net_device *dev,
- bool want_ref);
+void neigh_table_init(struct neigh_table *tbl);
+int neigh_table_clear(struct neigh_table *tbl);
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev);
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+ const void *pkey);
+struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev, bool want_ref);
static inline struct neighbour *neigh_create(struct neigh_table *tbl,
const void *pkey,
struct net_device *dev)
{
return __neigh_create(tbl, pkey, dev, true);
}
-extern void neigh_destroy(struct neighbour *neigh);
-extern int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
- u32 flags);
-extern void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
-extern int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
-extern int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
-extern struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+void neigh_destroy(struct neighbour *neigh);
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb);
+int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb);
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
u8 *lladdr, void *saddr,
struct net_device *dev);
-extern struct neigh_parms *neigh_parms_alloc(struct net_device *dev, struct neigh_table *tbl);
-extern void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl);
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms);
static inline
-struct net *neigh_parms_net(const struct neigh_parms *parms)
+struct net *neigh_parms_net(const struct neigh_parms *parms)
{
return read_pnet(&parms->net);
}
-extern unsigned long neigh_rand_reach_time(unsigned long base);
+unsigned long neigh_rand_reach_time(unsigned long base);
-extern void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
- struct sk_buff *skb);
-extern struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev, int creat);
-extern struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
- struct net *net,
- const void *key,
- struct net_device *dev);
-extern int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key, struct net_device *dev);
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb);
+struct pneigh_entry *pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev,
+ int creat);
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl, struct net *net,
+ const void *key, struct net_device *dev);
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *key,
+ struct net_device *dev);
-static inline
-struct net *pneigh_net(const struct pneigh_entry *pneigh)
+static inline struct net *pneigh_net(const struct pneigh_entry *pneigh)
{
return read_pnet(&pneigh->net);
}
-extern void neigh_app_ns(struct neighbour *n);
-extern void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie);
-extern void __neigh_for_each_release(struct neigh_table *tbl, int (*cb)(struct neighbour *));
-extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_entry *));
+void neigh_app_ns(struct neighbour *n);
+void neigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct neighbour *, void *), void *cookie);
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *));
+void pneigh_for_each(struct neigh_table *tbl,
+ void (*cb)(struct pneigh_entry *));
struct neigh_seq_state {
struct seq_net_private p;
@@ -270,15 +269,14 @@ struct neigh_seq_state {
#define NEIGH_SEQ_IS_PNEIGH 0x00000002
#define NEIGH_SEQ_SKIP_NOARP 0x00000004
};
-extern void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *, unsigned int);
-extern void *neigh_seq_next(struct seq_file *, void *, loff_t *);
-extern void neigh_seq_stop(struct seq_file *, void *);
-
-extern int neigh_sysctl_register(struct net_device *dev,
- struct neigh_parms *p,
- char *p_name,
- proc_handler *proc_handler);
-extern void neigh_sysctl_unregister(struct neigh_parms *p);
+void *neigh_seq_start(struct seq_file *, loff_t *, struct neigh_table *,
+ unsigned int);
+void *neigh_seq_next(struct seq_file *, void *, loff_t *);
+void neigh_seq_stop(struct seq_file *, void *);
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ char *p_name, proc_handler *proc_handler);
+void neigh_sysctl_unregister(struct neigh_parms *p);
static inline void __neigh_parms_put(struct neigh_parms *parms)
{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 84e37b1ca9e..1313456a099 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -119,7 +119,6 @@ struct net {
struct netns_ipvs *ipvs;
#endif
struct sock *diag_nlsk;
- atomic_t rt_genid;
atomic_t fnhe_genid;
};
@@ -333,14 +332,42 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
}
#endif
-static inline int rt_genid(struct net *net)
+static inline int rt_genid_ipv4(struct net *net)
{
- return atomic_read(&net->rt_genid);
+ return atomic_read(&net->ipv4.rt_genid);
}
-static inline void rt_genid_bump(struct net *net)
+static inline void rt_genid_bump_ipv4(struct net *net)
{
- atomic_inc(&net->rt_genid);
+ atomic_inc(&net->ipv4.rt_genid);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return atomic_read(&net->ipv6.rt_genid);
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+ atomic_inc(&net->ipv6.rt_genid);
+}
+#else
+static inline int rt_genid_ipv6(struct net *net)
+{
+ return 0;
+}
+
+static inline void rt_genid_bump_ipv6(struct net *net)
+{
+}
+#endif
+
+/* For callers who don't really care about whether it's IPv4 or IPv6 */
+static inline void rt_genid_bump_all(struct net *net)
+{
+ rt_genid_bump_ipv4(net);
+ rt_genid_bump_ipv6(net);
}
static inline int fnhe_genid(struct net *net)
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 644d9c223d2..0c1288a50e8 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -181,8 +181,7 @@ __nf_conntrack_find(struct net *net, u16 zone,
const struct nf_conntrack_tuple *tuple);
extern int nf_conntrack_hash_check_insert(struct nf_conn *ct);
-extern void nf_ct_delete_from_lists(struct nf_conn *ct);
-extern void nf_ct_dying_timeout(struct nf_conn *ct);
+bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
extern void nf_conntrack_flush_report(struct net *net, u32 portid, int report);
@@ -235,7 +234,7 @@ static inline bool nf_ct_kill(struct nf_conn *ct)
}
/* These are for NAT. Icky. */
-extern s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
+extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
@@ -249,7 +248,9 @@ extern void nf_ct_untracked_status_or(unsigned long bits);
/* Iterate over all conntracks: if iter returns true, it's deleted. */
extern void
-nf_ct_iterate_cleanup(struct net *net, int (*iter)(struct nf_conn *i, void *data), void *data);
+nf_ct_iterate_cleanup(struct net *net,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data, u32 portid, int report);
extern void nf_conntrack_free(struct nf_conn *ct);
extern struct nf_conn *
nf_conntrack_alloc(struct net *net, u16 zone,
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 977bc8a4644..ff95434e50c 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -10,6 +10,7 @@ enum nf_ct_ext_id {
#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
NF_CT_EXT_NAT,
#endif
+ NF_CT_EXT_SEQADJ,
NF_CT_EXT_ACCT,
#ifdef CONFIG_NF_CONNTRACK_EVENTS
NF_CT_EXT_ECACHE,
@@ -26,17 +27,22 @@ enum nf_ct_ext_id {
#ifdef CONFIG_NF_CONNTRACK_LABELS
NF_CT_EXT_LABELS,
#endif
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ NF_CT_EXT_SYNPROXY,
+#endif
NF_CT_EXT_NUM,
};
#define NF_CT_EXT_HELPER_TYPE struct nf_conn_help
#define NF_CT_EXT_NAT_TYPE struct nf_conn_nat
+#define NF_CT_EXT_SEQADJ_TYPE struct nf_conn_seqadj
#define NF_CT_EXT_ACCT_TYPE struct nf_conn_counter
#define NF_CT_EXT_ECACHE_TYPE struct nf_conntrack_ecache
#define NF_CT_EXT_ZONE_TYPE struct nf_conntrack_zone
#define NF_CT_EXT_TSTAMP_TYPE struct nf_conn_tstamp
#define NF_CT_EXT_TIMEOUT_TYPE struct nf_conn_timeout
#define NF_CT_EXT_LABELS_TYPE struct nf_conn_labels
+#define NF_CT_EXT_SYNPROXY_TYPE struct nf_conn_synproxy
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 914d8d90079..b411d7b17de 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -148,17 +148,10 @@ extern int nf_ct_port_nlattr_tuple_size(void);
extern const struct nla_policy nf_ct_port_nla_policy[];
#ifdef CONFIG_SYSCTL
-#ifdef DEBUG_INVALID_PACKETS
#define LOG_INVALID(net, proto) \
((net)->ct.sysctl_log_invalid == (proto) || \
(net)->ct.sysctl_log_invalid == IPPROTO_RAW)
#else
-#define LOG_INVALID(net, proto) \
- (((net)->ct.sysctl_log_invalid == (proto) || \
- (net)->ct.sysctl_log_invalid == IPPROTO_RAW) \
- && net_ratelimit())
-#endif
-#else
static inline int LOG_INVALID(struct net *net, int proto) { return 0; }
#endif /* CONFIG_SYSCTL */
diff --git a/include/net/netfilter/nf_conntrack_seqadj.h b/include/net/netfilter/nf_conntrack_seqadj.h
new file mode 100644
index 00000000000..f6177a5fe0c
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_seqadj.h
@@ -0,0 +1,51 @@
+#ifndef _NF_CONNTRACK_SEQADJ_H
+#define _NF_CONNTRACK_SEQADJ_H
+
+#include <net/netfilter/nf_conntrack_extend.h>
+
+/**
+ * struct nf_ct_seqadj - sequence number adjustment information
+ *
+ * @correction_pos: position of the last TCP sequence number modification
+ * @offset_before: sequence number offset before last modification
+ * @offset_after: sequence number offset after last modification
+ */
+struct nf_ct_seqadj {
+ u32 correction_pos;
+ s32 offset_before;
+ s32 offset_after;
+};
+
+struct nf_conn_seqadj {
+ struct nf_ct_seqadj seq[IP_CT_DIR_MAX];
+};
+
+static inline struct nf_conn_seqadj *nfct_seqadj(const struct nf_conn *ct)
+{
+ return nf_ct_ext_find(ct, NF_CT_EXT_SEQADJ);
+}
+
+static inline struct nf_conn_seqadj *nfct_seqadj_ext_add(struct nf_conn *ct)
+{
+ return nf_ct_ext_add(ct, NF_CT_EXT_SEQADJ, GFP_ATOMIC);
+}
+
+extern int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off);
+extern int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off);
+extern void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ s32 off);
+
+extern int nf_ct_seq_adjust(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff);
+extern s32 nf_ct_seq_offset(const struct nf_conn *ct, enum ip_conntrack_dir,
+ u32 seq);
+
+extern int nf_conntrack_seqadj_init(void);
+extern void nf_conntrack_seqadj_fini(void);
+
+#endif /* _NF_CONNTRACK_SEQADJ_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
new file mode 100644
index 00000000000..806f54a290d
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -0,0 +1,77 @@
+#ifndef _NF_CONNTRACK_SYNPROXY_H
+#define _NF_CONNTRACK_SYNPROXY_H
+
+#include <net/netns/generic.h>
+
+struct nf_conn_synproxy {
+ u32 isn;
+ u32 its;
+ u32 tsoff;
+};
+
+static inline struct nf_conn_synproxy *nfct_synproxy(const struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_find(ct, NF_CT_EXT_SYNPROXY);
+#else
+ return NULL;
+#endif
+}
+
+static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
+{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
+ return nf_ct_ext_add(ct, NF_CT_EXT_SYNPROXY, GFP_ATOMIC);
+#else
+ return NULL;
+#endif
+}
+
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats __percpu *stats;
+};
+
+extern int synproxy_net_id;
+static inline struct synproxy_net *synproxy_pernet(struct net *net)
+{
+ return net_generic(net, synproxy_net_id);
+}
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct tcphdr;
+struct xt_synproxy_info;
+extern void synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+extern unsigned int synproxy_options_size(const struct synproxy_options *opts);
+extern void synproxy_build_options(struct tcphdr *th,
+ const struct synproxy_options *opts);
+
+extern void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts);
+extern void synproxy_check_timestamp_cookie(struct synproxy_options *opts);
+
+extern unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *th,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy);
+
+#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index ad14a799fd2..59a19242005 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -13,15 +13,6 @@ enum nf_nat_manip_type {
#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \
(hooknum) != NF_INET_LOCAL_IN)
-/* NAT sequence number modifications */
-struct nf_nat_seq {
- /* position of the last TCP sequence number modification (if any) */
- u_int32_t correction_pos;
-
- /* sequence number offset before and after last modification */
- int16_t offset_before, offset_after;
-};
-
#include <linux/list.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
#include <net/netfilter/nf_conntrack_extend.h>
@@ -39,7 +30,6 @@ struct nf_conn;
/* The structure embedded in the conntrack structure. */
struct nf_conn_nat {
struct hlist_node bysource;
- struct nf_nat_seq seq[IP_CT_DIR_MAX];
struct nf_conn *ct;
union nf_conntrack_nat_help help;
#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index b4d6bfc2af0..404324d1d0c 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -39,28 +39,9 @@ extern int nf_nat_mangle_udp_packet(struct sk_buff *skb,
const char *rep_buffer,
unsigned int rep_len);
-extern void nf_nat_set_seq_adjust(struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off);
-extern int nf_nat_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-extern int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff);
-
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
extern void nf_nat_follow_master(struct nf_conn *ct,
struct nf_conntrack_expect *this);
-extern s16 nf_nat_get_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-
-extern void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
- u32 dir, int off);
-
#endif
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
deleted file mode 100644
index 36d9379d4c4..00000000000
--- a/include/net/netfilter/nf_tproxy_core.h
+++ /dev/null
@@ -1,210 +0,0 @@
-#ifndef _NF_TPROXY_CORE_H
-#define _NF_TPROXY_CORE_H
-
-#include <linux/types.h>
-#include <linux/in.h>
-#include <linux/skbuff.h>
-#include <net/sock.h>
-#include <net/inet_hashtables.h>
-#include <net/inet6_hashtables.h>
-#include <net/tcp.h>
-
-#define NFT_LOOKUP_ANY 0
-#define NFT_LOOKUP_LISTENER 1
-#define NFT_LOOKUP_ESTABLISHED 2
-
-/* look up and get a reference to a matching socket */
-
-
-/* This function is used by the 'TPROXY' target and the 'socket'
- * match. The following lookups are supported:
- *
- * Explicit TProxy target rule
- * ===========================
- *
- * This is used when the user wants to intercept a connection matching
- * an explicit iptables rule. In this case the sockets are assumed
- * matching in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple, it is returned, assuming the redirection
- * already took place and we process a packet belonging to an
- * established connection
- *
- * - match: if there's a listening socket matching the redirection
- * (e.g. on-port & on-ip of the connection), it is returned,
- * regardless if it was bound to 0.0.0.0 or an explicit
- * address. The reasoning is that if there's an explicit rule, it
- * does not really matter if the listener is bound to an interface
- * or to 0. The user already stated that he wants redirection
- * (since he added the rule).
- *
- * "socket" match based redirection (no specific rule)
- * ===================================================
- *
- * There are connections with dynamic endpoints (e.g. FTP data
- * connection) that the user is unable to add explicit rules
- * for. These are taken care of by a generic "socket" rule. It is
- * assumed that the proxy application is trusted to open such
- * connections without explicit iptables rule (except of course the
- * generic 'socket' rule). In this case the following sockets are
- * matched in preference order:
- *
- * - match: if there's a fully established connection matching the
- * _packet_ tuple
- *
- * - match: if there's a non-zero bound listener (possibly with a
- * non-local address) We don't accept zero-bound listeners, since
- * then local services could intercept traffic going through the
- * box.
- *
- * Please note that there's an overlap between what a TPROXY target
- * and a socket match will match. Normally if you have both rules the
- * "socket" match will be the first one, effectively all packets
- * belonging to established connections going through that one.
- */
-static inline struct sock *
-nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
- const __be32 saddr, const __be32 daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = __inet_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, dport,
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = inet_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
- protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-
-#if IS_ENABLED(CONFIG_IPV6)
-static inline struct sock *
-nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
- const struct in6_addr *saddr, const struct in6_addr *daddr,
- const __be16 sport, const __be16 dport,
- const struct net_device *in, int lookup_type)
-{
- struct sock *sk;
-
- /* look up socket */
- switch (protocol) {
- case IPPROTO_TCP:
- switch (lookup_type) {
- case NFT_LOOKUP_ANY:
- sk = inet6_lookup(net, &tcp_hashinfo,
- saddr, sport, daddr, dport,
- in->ifindex);
- break;
- case NFT_LOOKUP_LISTENER:
- sk = inet6_lookup_listener(net, &tcp_hashinfo,
- saddr, sport,
- daddr, ntohs(dport),
- in->ifindex);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
-
- break;
- case NFT_LOOKUP_ESTABLISHED:
- sk = __inet6_lookup_established(net, &tcp_hashinfo,
- saddr, sport, daddr, ntohs(dport),
- in->ifindex);
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- break;
- }
- break;
- case IPPROTO_UDP:
- sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
- in->ifindex);
- if (sk && lookup_type != NFT_LOOKUP_ANY) {
- int connected = (sk->sk_state == TCP_ESTABLISHED);
- int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
-
- /* NOTE: we return listeners even if bound to
- * 0.0.0.0, those are filtered out in
- * xt_socket, since xt_TPROXY needs 0 bound
- * listeners too */
- if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
- (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
- sock_put(sk);
- sk = NULL;
- }
- }
- break;
- default:
- WARN_ON(1);
- sk = NULL;
- }
-
- pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
- protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
-
- return sk;
-}
-#endif
-
-/* assign a socket to the skb -- consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
-
-#endif
diff --git a/include/net/netfilter/nfnetlink_queue.h b/include/net/netfilter/nfnetlink_queue.h
index 86267a52951..aff88ba9139 100644
--- a/include/net/netfilter/nfnetlink_queue.h
+++ b/include/net/netfilter/nfnetlink_queue.h
@@ -15,6 +15,8 @@ int nfqnl_ct_put(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff);
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report);
#else
inline struct nf_conn *
nfqnl_ct_get(struct sk_buff *entskb, size_t *size, enum ip_conntrack_info *ctinfo)
@@ -39,5 +41,11 @@ inline void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff)
{
}
+
+inline int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ return 0;
+}
#endif /* NF_CONNTRACK */
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2ba9de89e8e..bf2ec2202c5 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -77,5 +77,6 @@ struct netns_ipv4 {
struct fib_rules_ops *mr_rules_ops;
#endif
#endif
+ atomic_t rt_genid;
};
#endif
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 005e2c2e39a..0fb2401197c 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -72,6 +72,7 @@ struct netns_ipv6 {
#endif
#endif
atomic_t dev_addr_genid;
+ atomic_t rt_genid;
};
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index 50ab8c26ab5..099d02782e2 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -25,11 +25,7 @@ struct netprio_map {
u32 priomap[];
};
-struct cgroup_netprio_state {
- struct cgroup_subsys_state css;
-};
-
-extern void sock_update_netprioidx(struct sock *sk);
+void sock_update_netprioidx(struct sock *sk);
#if IS_BUILTIN(CONFIG_NETPRIO_CGROUP)
@@ -39,7 +35,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
u32 idx;
rcu_read_lock();
- css = task_subsys_state(p, net_prio_subsys_id);
+ css = task_css(p, net_prio_subsys_id);
idx = css->cgroup->id;
rcu_read_unlock();
return idx;
@@ -53,7 +49,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
u32 idx = 0;
rcu_read_lock();
- css = task_subsys_state(p, net_prio_subsys_id);
+ css = task_css(p, net_prio_subsys_id);
if (css)
idx = css->cgroup->id;
rcu_read_unlock();
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 0af851c3b03..b64b7bce4b9 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -59,7 +59,7 @@ struct nfc_hci_ops {
struct nfc_target *target);
int (*event_received)(struct nfc_hci_dev *hdev, u8 gate, u8 event,
struct sk_buff *skb);
- int (*fw_upload)(struct nfc_hci_dev *hdev, const char *firmware_name);
+ int (*fw_download)(struct nfc_hci_dev *hdev, const char *firmware_name);
int (*discover_se)(struct nfc_hci_dev *dev);
int (*enable_se)(struct nfc_hci_dev *dev, u32 se_idx);
int (*disable_se)(struct nfc_hci_dev *dev, u32 se_idx);
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 0e353f1658b..f68ee68e4e3 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -68,7 +68,7 @@ struct nfc_ops {
void *cb_context);
int (*tm_send)(struct nfc_dev *dev, struct sk_buff *skb);
int (*check_presence)(struct nfc_dev *dev, struct nfc_target *target);
- int (*fw_upload)(struct nfc_dev *dev, const char *firmware_name);
+ int (*fw_download)(struct nfc_dev *dev, const char *firmware_name);
/* Secure Element API */
int (*discover_se)(struct nfc_dev *dev);
@@ -127,7 +127,7 @@ struct nfc_dev {
int targets_generation;
struct device dev;
bool dev_up;
- bool fw_upload_in_progress;
+ bool fw_download_in_progress;
u8 rf_mode;
bool polling;
struct nfc_target *active_target;
@@ -224,6 +224,9 @@ int nfc_set_remote_general_bytes(struct nfc_dev *dev,
u8 *gt, u8 gt_len);
u8 *nfc_get_local_general_bytes(struct nfc_dev *dev, size_t *gb_len);
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
+
int nfc_targets_found(struct nfc_dev *dev,
struct nfc_target *targets, int ntargets);
int nfc_target_lost(struct nfc_dev *dev, u32 target_idx);
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index 13174509cdf..2ebef77a2f9 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -14,8 +14,8 @@ struct tcf_walker {
int (*fn)(struct tcf_proto *, unsigned long node, struct tcf_walker *);
};
-extern int register_tcf_proto_ops(struct tcf_proto_ops *ops);
-extern int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
+int register_tcf_proto_ops(struct tcf_proto_ops *ops);
+int unregister_tcf_proto_ops(struct tcf_proto_ops *ops);
static inline unsigned long
__cls_set_class(unsigned long *clp, unsigned long cl)
@@ -126,17 +126,17 @@ tcf_exts_exec(struct sk_buff *skb, struct tcf_exts *exts,
return 0;
}
-extern int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
- struct nlattr **tb, struct nlattr *rate_tlv,
- struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
-extern void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
- struct tcf_exts *src);
-extern int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
-extern int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
- const struct tcf_ext_map *map);
+int tcf_exts_validate(struct net *net, struct tcf_proto *tp,
+ struct nlattr **tb, struct nlattr *rate_tlv,
+ struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
+void tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts);
+void tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
+ struct tcf_exts *src);
+int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
+int tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
+ const struct tcf_ext_map *map);
/**
* struct tcf_pkt_info - packet information
@@ -239,14 +239,14 @@ struct tcf_ematch_ops {
struct list_head link;
};
-extern int tcf_em_register(struct tcf_ematch_ops *);
-extern void tcf_em_unregister(struct tcf_ematch_ops *);
-extern int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
- struct tcf_ematch_tree *);
-extern void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
-extern int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
-extern int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
- struct tcf_pkt_info *);
+int tcf_em_register(struct tcf_ematch_ops *);
+void tcf_em_unregister(struct tcf_ematch_ops *);
+int tcf_em_tree_validate(struct tcf_proto *, struct nlattr *,
+ struct tcf_ematch_tree *);
+void tcf_em_tree_destroy(struct tcf_proto *, struct tcf_ematch_tree *);
+int tcf_em_tree_dump(struct sk_buff *, struct tcf_ematch_tree *, int);
+int __tcf_em_tree_match(struct sk_buff *, struct tcf_ematch_tree *,
+ struct tcf_pkt_info *);
/**
* tcf_em_tree_change - replace ematch tree of a running classifier
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 388bf8b6d06..59ec3cd15d6 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -64,8 +64,8 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
-extern void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
-extern void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
+void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
+void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
psched_time_t expires)
@@ -73,31 +73,34 @@ static inline void qdisc_watchdog_schedule(struct qdisc_watchdog *wd,
qdisc_watchdog_schedule_ns(wd, PSCHED_TICKS2NS(expires));
}
-extern void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
+void qdisc_watchdog_cancel(struct qdisc_watchdog *wd);
extern struct Qdisc_ops pfifo_qdisc_ops;
extern struct Qdisc_ops bfifo_qdisc_ops;
extern struct Qdisc_ops pfifo_head_drop_qdisc_ops;
-extern int fifo_set_limit(struct Qdisc *q, unsigned int limit);
-extern struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
- unsigned int limit);
-
-extern int register_qdisc(struct Qdisc_ops *qops);
-extern int unregister_qdisc(struct Qdisc_ops *qops);
-extern void qdisc_list_del(struct Qdisc *q);
-extern struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
-extern struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
-extern struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
- struct nlattr *tab);
-extern void qdisc_put_rtab(struct qdisc_rate_table *tab);
-extern void qdisc_put_stab(struct qdisc_size_table *tab);
-extern void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
-extern int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
- struct net_device *dev, struct netdev_queue *txq,
- spinlock_t *root_lock);
-
-extern void __qdisc_run(struct Qdisc *q);
+int fifo_set_limit(struct Qdisc *q, unsigned int limit);
+struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
+ unsigned int limit);
+
+int register_qdisc(struct Qdisc_ops *qops);
+int unregister_qdisc(struct Qdisc_ops *qops);
+void qdisc_get_default(char *id, size_t len);
+int qdisc_set_default(const char *id);
+
+void qdisc_list_del(struct Qdisc *q);
+struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
+struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
+struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r,
+ struct nlattr *tab);
+void qdisc_put_rtab(struct qdisc_rate_table *tab);
+void qdisc_put_stab(struct qdisc_size_table *tab);
+void qdisc_warn_nonwc(char *txt, struct Qdisc *qdisc);
+int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
+ struct net_device *dev, struct netdev_queue *txq,
+ spinlock_t *root_lock);
+
+void __qdisc_run(struct Qdisc *q);
static inline void qdisc_run(struct Qdisc *q)
{
@@ -105,10 +108,10 @@ static inline void qdisc_run(struct Qdisc *q)
__qdisc_run(q);
}
-extern int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
- struct tcf_result *res);
-extern int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+int tc_classify_compat(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res);
+int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res);
/* Calculate maximal size of packet seen by hard_start_xmit
routine of this device.
diff --git a/include/net/route.h b/include/net/route.h
index 2ea40c1b5e0..afdeeb5bec2 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -317,4 +317,12 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
return hoplimit;
}
+static inline int ip_skb_dst_mtu(struct sk_buff *skb)
+{
+ struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
+
+ return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
+ skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
+}
+
#endif /* _ROUTE_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6eab63363e5..f4eb365f7dc 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -316,6 +316,7 @@ extern struct Qdisc noop_qdisc;
extern struct Qdisc_ops noop_qdisc_ops;
extern struct Qdisc_ops pfifo_fast_ops;
extern struct Qdisc_ops mq_qdisc_ops;
+extern const struct Qdisc_ops *default_qdisc_ops;
struct Qdisc_class_common {
u32 classid;
@@ -350,30 +351,32 @@ qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
return NULL;
}
-extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
-extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
-extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
-extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
-
-extern void dev_init_scheduler(struct net_device *dev);
-extern void dev_shutdown(struct net_device *dev);
-extern void dev_activate(struct net_device *dev);
-extern void dev_deactivate(struct net_device *dev);
-extern void dev_deactivate_many(struct list_head *head);
-extern struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
- struct Qdisc *qdisc);
-extern void qdisc_reset(struct Qdisc *qdisc);
-extern void qdisc_destroy(struct Qdisc *qdisc);
-extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
-extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops);
-extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, u32 parentid);
-extern void __qdisc_calculate_pkt_len(struct sk_buff *skb,
- const struct qdisc_size_table *stab);
-extern void tcf_destroy(struct tcf_proto *tp);
-extern void tcf_destroy_chain(struct tcf_proto **fl);
+int qdisc_class_hash_init(struct Qdisc_class_hash *);
+void qdisc_class_hash_insert(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_remove(struct Qdisc_class_hash *,
+ struct Qdisc_class_common *);
+void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
+void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
+
+void dev_init_scheduler(struct net_device *dev);
+void dev_shutdown(struct net_device *dev);
+void dev_activate(struct net_device *dev);
+void dev_deactivate(struct net_device *dev);
+void dev_deactivate_many(struct list_head *head);
+struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
+ struct Qdisc *qdisc);
+void qdisc_reset(struct Qdisc *qdisc);
+void qdisc_destroy(struct Qdisc *qdisc);
+void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
+struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops);
+struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
+ const struct Qdisc_ops *ops, u32 parentid);
+void __qdisc_calculate_pkt_len(struct sk_buff *skb,
+ const struct qdisc_size_table *stab);
+void tcf_destroy(struct tcf_proto *tp);
+void tcf_destroy_chain(struct tcf_proto **fl);
/* Reset all TX qdiscs greater then index of a device. */
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
@@ -683,16 +686,23 @@ struct psched_ratecfg {
u64 rate_bytes_ps; /* bytes per second */
u32 mult;
u16 overhead;
+ u8 linklayer;
u8 shift;
};
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
unsigned int len)
{
- return ((u64)(len + r->overhead) * r->mult) >> r->shift;
+ len += r->overhead;
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
+ return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
+
+ return ((u64)len * r->mult) >> r->shift;
}
-extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf);
+void psched_ratecfg_precompute(struct psched_ratecfg *r,
+ const struct tc_ratespec *conf);
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
const struct psched_ratecfg *r)
@@ -700,6 +710,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
memset(res, 0, sizeof(*res));
res->rate = r->rate_bytes_ps;
res->overhead = r->overhead;
+ res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
}
#endif
diff --git a/include/net/sctp/auth.h b/include/net/sctp/auth.h
index 49bc9577c61..aa80bef3c9d 100644
--- a/include/net/sctp/auth.h
+++ b/include/net/sctp/auth.h
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_auth_h__
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 0cb08e6fb6d..259924d63ba 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Dinakaran Joseph
@@ -37,9 +34,6 @@
*
* Rewritten to use libcrc32c by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_checksum_h__
@@ -85,4 +79,19 @@ static inline __le32 sctp_end_cksum(__u32 crc32)
return cpu_to_le32(~crc32);
}
+/* Calculate the CRC32C checksum of an SCTP packet. */
+static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
+ unsigned int offset)
+{
+ const struct sk_buff *iter;
+
+ __u32 crc32 = sctp_start_cksum(skb->data + offset,
+ skb_headlen(skb) - offset);
+ skb_walk_frags(skb, iter)
+ crc32 = sctp_update_cksum((__u8 *) iter->data,
+ skb_headlen(iter), crc32);
+
+ return sctp_end_cksum(crc32);
+}
+
#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 35247271e55..832f2191489 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -23,19 +23,17 @@
* the Free Software Foundation, 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
- * Please send any bug reports or fixes you make to one of the
- * following email addresses:
+ * Please send any bug reports or fixes you make to the
+ * email address(es):
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
- * La Monte H.P. Yarroll <piggy@acm.org>
- * Karl Knutson <karl@athena.chicago.il.us>
- * Ardelle Fan <ardelle.fan@intel.com>
- * Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
+ * Written or modified by:
+ * La Monte H.P. Yarroll <piggy@acm.org>
+ * Karl Knutson <karl@athena.chicago.il.us>
+ * Ardelle Fan <ardelle.fan@intel.com>
+ * Sridhar Samudrala <sri@us.ibm.com>
*/
-
#ifndef __net_sctp_command_h__
#define __net_sctp_command_h__
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index ca50e0751e4..2f0a565a0fd 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -39,9 +36,6 @@
* Xingang Guo <xingang.guo@intel.com>
* Sridhar Samudrala <samudrala@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_constants_h__
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index d8e37ecea69..3794c5ad20f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __net_sctp_h__
@@ -613,7 +607,7 @@ static inline void sctp_v4_map_v6(union sctp_addr *addr)
*/
static inline struct dst_entry *sctp_transport_dst_check(struct sctp_transport *t)
{
- if (t->dst && !dst_check(t->dst, 0)) {
+ if (t->dst && !dst_check(t->dst, t->dst_cookie)) {
dst_release(t->dst);
t->dst = NULL;
}
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index 2a82d138470..4ef75af340b 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index e745c92a153..2174d8da077 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Randall Stewart <randall@sctp.chicago.il.us>
@@ -46,9 +43,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_structs_h__
@@ -119,29 +113,27 @@ struct sctp_hashbucket {
/* The SCTP globals structure. */
extern struct sctp_globals {
- /* The following variables are implementation specific. */
-
- /* Default initialization values to be applied to new associations. */
- __u16 max_instreams;
- __u16 max_outstreams;
-
/* This is a list of groups of functions for each address
* family that we support.
*/
struct list_head address_families;
/* This is the hash of all endpoints. */
- int ep_hashsize;
struct sctp_hashbucket *ep_hashtable;
-
/* This is the hash of all associations. */
- int assoc_hashsize;
struct sctp_hashbucket *assoc_hashtable;
-
/* This is the sctp port control hash. */
- int port_hashsize;
struct sctp_bind_hashbucket *port_hashtable;
+ /* Sizes of above hashtables. */
+ int ep_hashsize;
+ int assoc_hashsize;
+ int port_hashsize;
+
+ /* Default initialization values to be applied to new associations. */
+ __u16 max_instreams;
+ __u16 max_outstreams;
+
/* Flag to indicate whether computing and verifying checksum
* is disabled. */
bool checksum_disable;
@@ -782,6 +774,7 @@ struct sctp_transport {
/* Has this transport moved the ctsn since we last sacked */
__u32 sack_generation;
+ u32 dst_cookie;
struct flowi fl;
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h
index 2c5d2b4d5d1..54bbbe54730 100644
--- a/include/net/sctp/tsnmap.h
+++ b/include/net/sctp/tsnmap.h
@@ -28,19 +28,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/constants.h>
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index ca4693b4e09..27b9f5c9015 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -31,19 +31,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpevent_h__
diff --git a/include/net/sctp/ulpqueue.h b/include/net/sctp/ulpqueue.h
index 00e50ba3f24..b0cf5d54d71 100644
--- a/include/net/sctp/ulpqueue.h
+++ b/include/net/sctp/ulpqueue.h
@@ -30,18 +30,12 @@
*
* Please send any bug reports or fixes you make to the
* email addresses:
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#ifndef __sctp_ulpqueue_h__
diff --git a/include/net/sock.h b/include/net/sock.h
index 95a5a2c6925..6ba2e7b0e2b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -232,6 +232,7 @@ struct cg_proto;
* @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode
+ * @sk_pacing_rate: Pacing rate (if supported by transport/packet scheduler)
* @sk_sndbuf: size of send buffer in bytes
* @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -327,7 +328,7 @@ struct sock {
#ifdef CONFIG_RPS
__u32 sk_rxhash;
#endif
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id;
unsigned int sk_ll_usec;
#endif
@@ -361,6 +362,7 @@ struct sock {
kmemcheck_bitfield_end(flags);
int sk_wmem_queued;
gfp_t sk_allocation;
+ u32 sk_pacing_rate; /* bytes per second */
netdev_features_t sk_route_caps;
netdev_features_t sk_route_nocaps;
int sk_gso_type;
@@ -746,11 +748,6 @@ static inline int sk_stream_wspace(const struct sock *sk)
extern void sk_stream_write_space(struct sock *sk);
-static inline bool sk_stream_memory_free(const struct sock *sk)
-{
- return sk->sk_wmem_queued < sk->sk_sndbuf;
-}
-
/* OOB backlog add */
static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
{
@@ -950,6 +947,7 @@ struct proto {
unsigned int inuse_idx;
#endif
+ bool (*stream_memory_free)(const struct sock *sk);
/* Memory pressure */
void (*enter_memory_pressure)(struct sock *sk);
atomic_long_t *memory_allocated; /* Current allocated memory. */
@@ -1088,6 +1086,21 @@ static inline struct cg_proto *parent_cg_proto(struct proto *proto,
}
#endif
+static inline bool sk_stream_memory_free(const struct sock *sk)
+{
+ if (sk->sk_wmem_queued >= sk->sk_sndbuf)
+ return false;
+
+ return sk->sk_prot->stream_memory_free ?
+ sk->sk_prot->stream_memory_free(sk) : true;
+}
+
+static inline bool sk_stream_is_writeable(const struct sock *sk)
+{
+ return sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) &&
+ sk_stream_memory_free(sk);
+}
+
static inline bool sk_has_memory_pressure(const struct sock *sk)
{
@@ -1509,6 +1522,7 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk,
unsigned long size, int force,
gfp_t priority);
extern void sock_wfree(struct sk_buff *skb);
+extern void skb_orphan_partial(struct sk_buff *skb);
extern void sock_rfree(struct sk_buff *skb);
extern void sock_edemux(struct sk_buff *skb);
@@ -1527,7 +1541,8 @@ extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
unsigned long header_len,
unsigned long data_len,
int noblock,
- int *errcode);
+ int *errcode,
+ int max_page_order);
extern void *sock_kmalloc(struct sock *sk, int size,
gfp_t priority);
extern void sock_kfree_s(struct sock *sk, void *mem, int size);
@@ -2249,6 +2264,8 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb)
extern void sock_enable_timestamp(struct sock *sk, int flag);
extern int sock_get_timestamp(struct sock *, struct timeval __user *);
extern int sock_get_timestampns(struct sock *, struct timespec __user *);
+extern int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ int level, int type);
/*
* Enable debug/info messages
diff --git a/include/net/tcp.h b/include/net/tcp.h
index d1980054ec7..b1aa324c5e6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -192,10 +192,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOLEN_TIMESTAMP 10
#define TCPOLEN_MD5SIG 18
#define TCPOLEN_EXP_FASTOPEN_BASE 4
-#define TCPOLEN_COOKIE_BASE 2 /* Cookie-less header extension */
-#define TCPOLEN_COOKIE_PAIR 3 /* Cookie pair header extension */
-#define TCPOLEN_COOKIE_MIN (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MIN)
-#define TCPOLEN_COOKIE_MAX (TCPOLEN_COOKIE_BASE+TCP_COOKIE_MAX)
/* But this is what stacks really send out. */
#define TCPOLEN_TSTAMP_ALIGNED 12
@@ -284,6 +280,8 @@ extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
+extern unsigned int sysctl_tcp_notsent_lowat;
+extern int sysctl_tcp_min_tso_segs;
extern atomic_long_t tcp_memory_allocated;
extern struct percpu_counter tcp_sockets_allocated;
@@ -373,8 +371,8 @@ extern void tcp_delack_timer_handler(struct sock *sk);
extern int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
const struct tcphdr *th, unsigned int len);
-extern int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len);
+extern void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len);
extern void tcp_rcv_space_adjust(struct sock *sk);
extern void tcp_cleanup_rbuf(struct sock *sk, int copied);
extern int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
@@ -479,9 +477,13 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
+extern int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie);
extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
+extern u32 __cookie_v4_init_sequence(const struct iphdr *iph,
+ const struct tcphdr *th, u16 *mssp);
extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
#else
@@ -498,8 +500,12 @@ extern bool cookie_check_timestamp(struct tcp_options_received *opt,
struct net *net, bool *ecn_ok);
/* From net/ipv6/syncookies.c */
+extern int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ u32 cookie);
extern struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
#ifdef CONFIG_SYN_COOKIES
+extern u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, u16 *mssp);
extern __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
__u16 *mss);
#else
@@ -591,7 +597,6 @@ extern void tcp_initialize_rcv_mss(struct sock *sk);
extern int tcp_mtu_to_mss(struct sock *sk, int pmtu);
extern int tcp_mss_to_mtu(struct sock *sk, int mss);
extern void tcp_mtup_init(struct sock *sk);
-extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt);
extern void tcp_init_buffer_space(struct sock *sk);
static inline void tcp_bound_rto(const struct sock *sk)
@@ -1094,15 +1099,6 @@ static inline void tcp_openreq_init(struct request_sock *req,
ireq->loc_port = tcp_hdr(skb)->dest;
}
-/* Compute time elapsed between SYNACK and the ACK completing 3WHS */
-static inline void tcp_synack_rtt_meas(struct sock *sk,
- struct request_sock *req)
-{
- if (tcp_rsk(req)->snt_synack)
- tcp_valid_rtt_meas(sk,
- tcp_time_stamp - tcp_rsk(req)->snt_synack);
-}
-
extern void tcp_enter_memory_pressure(struct sock *sk);
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
@@ -1313,7 +1309,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp);
extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
int tcp_fastopen_reset_cipher(void *key, unsigned int len);
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc);
+extern void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc);
#define TCP_FASTOPEN_KEY_LENGTH 16
@@ -1549,6 +1546,19 @@ extern int tcp_gro_complete(struct sk_buff *skb);
extern void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr,
__be32 daddr);
+static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
+{
+ return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
+}
+
+static inline bool tcp_stream_memory_free(const struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
+
+ return notsent_bytes < tcp_notsent_lowat(tp);
+}
+
#ifdef CONFIG_PROC_FS
extern int tcp4_proc_init(void);
extern void tcp4_proc_exit(void);
diff --git a/include/net/udp.h b/include/net/udp.h
index 74c10ec5e74..ef2e0b7843a 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -183,6 +183,7 @@ extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len);
extern int udp_push_pending_frames(struct sock *sk);
extern void udp_flush_pending_frames(struct sock *sk);
+extern void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst);
extern int udp_rcv(struct sk_buff *skb);
extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
extern int udp_disconnect(struct sock *sk, int flags);
diff --git a/net/vmw_vsock/vsock_addr.h b/include/net/vsock_addr.h
index 9ccd5316eac..9ccd5316eac 100644
--- a/net/vmw_vsock/vsock_addr.h
+++ b/include/net/vsock_addr.h
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
new file mode 100644
index 00000000000..2d64d3cd499
--- /dev/null
+++ b/include/net/vxlan.h
@@ -0,0 +1,40 @@
+#ifndef __NET_VXLAN_H
+#define __NET_VXLAN_H 1
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/udp.h>
+
+#define VNI_HASH_BITS 10
+#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
+
+struct vxlan_sock;
+typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
+
+/* per UDP socket information */
+struct vxlan_sock {
+ struct hlist_node hlist;
+ vxlan_rcv_t *rcv;
+ void *data;
+ struct work_struct del_work;
+ struct socket *sock;
+ struct rcu_head rcu;
+ struct hlist_head vni_list[VNI_HASH_SIZE];
+ atomic_t refcnt;
+};
+
+struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
+ vxlan_rcv_t *rcv, void *data,
+ bool no_share, bool ipv6);
+
+void vxlan_sock_release(struct vxlan_sock *vs);
+
+int vxlan_xmit_skb(struct vxlan_sock *vs,
+ struct rtable *rt, struct sk_buff *skb,
+ __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
+ __be16 src_port, __be16 dst_port, __be32 vni);
+
+__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
+
+void vxlan_get_rx_port(struct net_device *netdev);
+#endif
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 94ce082b29d..e253bf0cc7e 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -341,10 +341,13 @@ struct xfrm_state_afinfo {
struct sk_buff *skb);
int (*transport_finish)(struct sk_buff *skb,
int async);
+ void (*local_error)(struct sk_buff *skb, u32 mtu);
};
extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
+extern struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
+extern void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
extern void xfrm_state_delete_tunnel(struct xfrm_state *x);
@@ -1477,6 +1480,7 @@ extern int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
extern int xfrm_output_resume(struct sk_buff *skb, int err);
extern int xfrm_output(struct sk_buff *skb);
extern int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
+extern void xfrm_local_error(struct sk_buff *skb, int mtu);
extern int xfrm4_extract_header(struct sk_buff *skb);
extern int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
@@ -1497,6 +1501,7 @@ extern int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short fam
extern int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
extern int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel *handler);
extern int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel *handler);
+extern void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
extern int xfrm6_extract_header(struct sk_buff *skb);
extern int xfrm6_extract_input(struct xfrm_state *x, struct sk_buff *skb);
extern int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi);
@@ -1514,6 +1519,7 @@ extern int xfrm6_output(struct sk_buff *skb);
extern int xfrm6_output_finish(struct sk_buff *skb);
extern int xfrm6_find_1stfragopt(struct xfrm_state *x, struct sk_buff *skb,
u8 **prevhdr);
+extern void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
#ifdef CONFIG_XFRM
extern int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
@@ -1548,7 +1554,7 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir, u32
int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info);
u32 xfrm_get_acqseq(void);
extern int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
-struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark,
+struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
u8 mode, u32 reqid, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 645c3cedce9..e393171e2fa 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -116,7 +116,8 @@ enum ib_device_cap_flags {
IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
IB_DEVICE_MEM_WINDOW_TYPE_2A = (1<<23),
- IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24)
+ IB_DEVICE_MEM_WINDOW_TYPE_2B = (1<<24),
+ IB_DEVICE_MANAGED_FLOW_STEERING = (1<<29)
};
enum ib_atomic_cap {
@@ -635,6 +636,12 @@ enum ib_qp_create_flags {
IB_QP_CREATE_RESERVED_END = 1 << 31,
};
+
+/*
+ * Note: users may not call ib_close_qp or ib_destroy_qp from the event_handler
+ * callback to destroy the passed in QP.
+ */
+
struct ib_qp_init_attr {
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
@@ -953,6 +960,7 @@ struct ib_ucontext {
struct list_head srq_list;
struct list_head ah_list;
struct list_head xrcd_list;
+ struct list_head rule_list;
int closing;
};
@@ -1033,7 +1041,8 @@ struct ib_qp {
struct ib_srq *srq;
struct ib_xrcd *xrcd; /* XRC TGT QPs only */
struct list_head xrcd_list;
- atomic_t usecnt; /* count times opened, mcast attaches */
+ /* count times opened, mcast attaches, flow attaches */
+ atomic_t usecnt;
struct list_head open_list;
struct ib_qp *real_qp;
struct ib_uobject *uobject;
@@ -1068,6 +1077,112 @@ struct ib_fmr {
u32 rkey;
};
+/* Supported steering options */
+enum ib_flow_attr_type {
+ /* steering according to rule specifications */
+ IB_FLOW_ATTR_NORMAL = 0x0,
+ /* default unicast and multicast rule -
+ * receive all Eth traffic which isn't steered to any QP
+ */
+ IB_FLOW_ATTR_ALL_DEFAULT = 0x1,
+ /* default multicast rule -
+ * receive all Eth multicast traffic which isn't steered to any QP
+ */
+ IB_FLOW_ATTR_MC_DEFAULT = 0x2,
+ /* sniffer rule - receive all port traffic */
+ IB_FLOW_ATTR_SNIFFER = 0x3
+};
+
+/* Supported steering header types */
+enum ib_flow_spec_type {
+ /* L2 headers*/
+ IB_FLOW_SPEC_ETH = 0x20,
+ /* L3 header*/
+ IB_FLOW_SPEC_IPV4 = 0x30,
+ /* L4 headers*/
+ IB_FLOW_SPEC_TCP = 0x40,
+ IB_FLOW_SPEC_UDP = 0x41
+};
+
+#define IB_FLOW_SPEC_SUPPORT_LAYERS 4
+
+/* Flow steering rule priority is set according to it's domain.
+ * Lower domain value means higher priority.
+ */
+enum ib_flow_domain {
+ IB_FLOW_DOMAIN_USER,
+ IB_FLOW_DOMAIN_ETHTOOL,
+ IB_FLOW_DOMAIN_RFS,
+ IB_FLOW_DOMAIN_NIC,
+ IB_FLOW_DOMAIN_NUM /* Must be last */
+};
+
+struct ib_flow_eth_filter {
+ u8 dst_mac[6];
+ u8 src_mac[6];
+ __be16 ether_type;
+ __be16 vlan_tag;
+};
+
+struct ib_flow_spec_eth {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_eth_filter val;
+ struct ib_flow_eth_filter mask;
+};
+
+struct ib_flow_ipv4_filter {
+ __be32 src_ip;
+ __be32 dst_ip;
+};
+
+struct ib_flow_spec_ipv4 {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_ipv4_filter val;
+ struct ib_flow_ipv4_filter mask;
+};
+
+struct ib_flow_tcp_udp_filter {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ib_flow_spec_tcp_udp {
+ enum ib_flow_spec_type type;
+ u16 size;
+ struct ib_flow_tcp_udp_filter val;
+ struct ib_flow_tcp_udp_filter mask;
+};
+
+union ib_flow_spec {
+ struct {
+ enum ib_flow_spec_type type;
+ u16 size;
+ };
+ struct ib_flow_spec_eth eth;
+ struct ib_flow_spec_ipv4 ipv4;
+ struct ib_flow_spec_tcp_udp tcp_udp;
+};
+
+struct ib_flow_attr {
+ enum ib_flow_attr_type type;
+ u16 size;
+ u16 priority;
+ u32 flags;
+ u8 num_of_specs;
+ u8 port;
+ /* Following are the optional layers according to user request
+ * struct ib_flow_spec_xxx
+ * struct ib_flow_spec_yyy
+ */
+};
+
+struct ib_flow {
+ struct ib_qp *qp;
+ struct ib_uobject *uobject;
+};
+
struct ib_mad;
struct ib_grh;
@@ -1300,6 +1415,11 @@ struct ib_device {
struct ib_ucontext *ucontext,
struct ib_udata *udata);
int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
+ struct ib_flow * (*create_flow)(struct ib_qp *qp,
+ struct ib_flow_attr
+ *flow_attr,
+ int domain);
+ int (*destroy_flow)(struct ib_flow *flow_id);
struct ib_dma_mapping_ops *dma_ops;
@@ -2260,4 +2380,8 @@ struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
*/
int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
+struct ib_flow *ib_create_flow(struct ib_qp *qp,
+ struct ib_flow_attr *flow_attr, int domain);
+int ib_destroy_flow(struct ib_flow *flow_id);
+
#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 1a046b1595c..1017e0bdf8b 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -49,8 +49,8 @@ enum iw_cm_event_type {
struct iw_cm_event {
enum iw_cm_event_type event;
int status;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
void *private_data;
void *provider_data;
u8 private_data_len;
@@ -83,8 +83,8 @@ struct iw_cm_id {
iw_cm_handler cm_handler; /* client callback function */
void *context; /* client cb context */
struct ib_device *device;
- struct sockaddr_in local_addr;
- struct sockaddr_in remote_addr;
+ struct sockaddr_storage local_addr;
+ struct sockaddr_storage remote_addr;
void *provider_data; /* provider private data */
iw_event_handler event_handler; /* cb for provider
events */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 9d28ded2a3f..13d81c5c4eb 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -494,6 +494,38 @@ enum iscsi_param {
ISCSI_PARAM_BOOT_NIC,
ISCSI_PARAM_BOOT_TARGET,
+ ISCSI_PARAM_AUTO_SND_TGT_DISABLE,
+ ISCSI_PARAM_DISCOVERY_SESS,
+ ISCSI_PARAM_PORTAL_TYPE,
+ ISCSI_PARAM_CHAP_AUTH_EN,
+ ISCSI_PARAM_DISCOVERY_LOGOUT_EN,
+ ISCSI_PARAM_BIDI_CHAP_EN,
+ ISCSI_PARAM_DISCOVERY_AUTH_OPTIONAL,
+
+ ISCSI_PARAM_DEF_TIME2WAIT,
+ ISCSI_PARAM_DEF_TIME2RETAIN,
+ ISCSI_PARAM_MAX_SEGMENT_SIZE,
+ ISCSI_PARAM_STATSN,
+ ISCSI_PARAM_KEEPALIVE_TMO,
+ ISCSI_PARAM_LOCAL_PORT,
+ ISCSI_PARAM_TSID,
+ ISCSI_PARAM_DEF_TASKMGMT_TMO,
+
+ ISCSI_PARAM_TCP_TIMESTAMP_STAT,
+ ISCSI_PARAM_TCP_WSF_DISABLE,
+ ISCSI_PARAM_TCP_NAGLE_DISABLE,
+ ISCSI_PARAM_TCP_TIMER_SCALE,
+ ISCSI_PARAM_TCP_TIMESTAMP_EN,
+ ISCSI_PARAM_TCP_XMIT_WSF,
+ ISCSI_PARAM_TCP_RECV_WSF,
+ ISCSI_PARAM_IP_FRAGMENT_DISABLE,
+ ISCSI_PARAM_IPV4_TOS,
+ ISCSI_PARAM_IPV6_TC,
+ ISCSI_PARAM_IPV6_FLOW_LABEL,
+ ISCSI_PARAM_IS_FW_ASSIGNED_IPV6,
+
+ ISCSI_PARAM_DISCOVERY_PARENT_IDX,
+ ISCSI_PARAM_DISCOVERY_PARENT_TYPE,
/* must always be last */
ISCSI_PARAM_MAX,
};
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4265a4bb83c..6ac9e17acdc 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -62,6 +62,8 @@ enum {
TMF_NOT_FOUND,
};
+#define ISID_SIZE 6
+
/* Connection suspend "bit" */
#define ISCSI_SUSPEND_BIT 1
@@ -173,6 +175,7 @@ struct iscsi_conn {
/* iSCSI connection-wide sequencing */
uint32_t exp_statsn;
+ uint32_t statsn;
/* control data */
int id; /* CID */
@@ -213,6 +216,22 @@ struct iscsi_conn {
int persistent_port;
char *persistent_address;
+ unsigned max_segment_size;
+ unsigned tcp_xmit_wsf;
+ unsigned tcp_recv_wsf;
+ uint16_t keepalive_tmo;
+ uint16_t local_port;
+ uint8_t tcp_timestamp_stat;
+ uint8_t tcp_nagle_disable;
+ uint8_t tcp_wsf_disable;
+ uint8_t tcp_timer_scale;
+ uint8_t tcp_timestamp_en;
+ uint8_t fragment_disable;
+ uint8_t ipv4_tos;
+ uint8_t ipv6_traffic_class;
+ uint8_t ipv6_flow_label;
+ uint8_t is_fw_assigned_ipv6;
+
/* MIB-statistics */
uint64_t txdata_octets;
uint64_t rxdata_octets;
@@ -290,6 +309,18 @@ struct iscsi_session {
char *boot_root;
char *boot_nic;
char *boot_target;
+ char *portal_type;
+ char *discovery_parent_type;
+ uint16_t discovery_parent_idx;
+ uint16_t def_taskmgmt_tmo;
+ uint16_t tsid;
+ uint8_t auto_snd_tgt_disable;
+ uint8_t discovery_sess;
+ uint8_t chap_auth_en;
+ uint8_t discovery_logout_en;
+ uint8_t bidi_chap_en;
+ uint8_t discovery_auth_optional;
+ uint8_t isid[ISID_SIZE];
/* control data */
struct iscsi_transport *tt;
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 4b87d99e7fa..d477bfb73fb 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -457,6 +457,8 @@ static inline int scsi_is_wlun(unsigned int lun)
* other paths */
#define DID_NEXUS_FAILURE 0x11 /* Permanent nexus failure, retry on other
* paths might yield different results */
+#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */
+#define DID_MEDIUM_ERROR 0x13 /* Medium error */
#define DRIVER_OK 0x00 /* Driver status */
/*
@@ -486,7 +488,6 @@ static inline int scsi_is_wlun(unsigned int lun)
#define TIMEOUT_ERROR 0x2007
#define SCSI_RETURN_NOT_HANDLED 0x2008
#define FAST_IO_FAIL 0x2009
-#define TARGET_ERROR 0x200A
/*
* Midlevel queue return values.
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index a44954c7cdc..d65fbec2533 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -52,8 +52,15 @@ enum scsi_device_state {
enum scsi_device_event {
SDEV_EVT_MEDIA_CHANGE = 1, /* media has changed */
+ SDEV_EVT_INQUIRY_CHANGE_REPORTED, /* 3F 03 UA reported */
+ SDEV_EVT_CAPACITY_CHANGE_REPORTED, /* 2A 09 UA reported */
+ SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED, /* 38 07 UA reported */
+ SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED, /* 2A 01 UA reported */
+ SDEV_EVT_LUN_CHANGE_REPORTED, /* 3F 0E UA reported */
+
+ SDEV_EVT_FIRST = SDEV_EVT_MEDIA_CHANGE,
+ SDEV_EVT_LAST = SDEV_EVT_LUN_CHANGE_REPORTED,
- SDEV_EVT_LAST = SDEV_EVT_MEDIA_CHANGE,
SDEV_EVT_MAXBITS = SDEV_EVT_LAST + 1
};
@@ -164,6 +171,7 @@ struct scsi_device {
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
+ DECLARE_BITMAP(pending_events, SDEV_EVT_MAXBITS); /* pending events */
struct list_head event_list; /* asserted events */
struct work_struct event_work;
@@ -261,6 +269,9 @@ struct scsi_target {
* means no lun present. */
unsigned int no_report_luns:1; /* Don't use
* REPORT LUNS for scanning. */
+ unsigned int expecting_lun_change:1; /* A device has reported
+ * a 3F/0E UA, other devices on
+ * the same target will also. */
/* commands actually active on LLD. protected by host lock. */
unsigned int target_busy;
/*
diff --git a/include/sound/core.h b/include/sound/core.h
index c586617cfa0..2a14f1f02d4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -27,6 +27,7 @@
#include <linux/rwsem.h> /* struct rw_semaphore */
#include <linux/pm.h> /* pm_message_t */
#include <linux/stringify.h>
+#include <linux/printk.h>
/* number of supported soundcards */
#ifdef CONFIG_SND_DYNAMIC_MINORS
@@ -376,6 +377,11 @@ void __snd_printk(unsigned int level, const char *file, int line,
#define snd_BUG() WARN(1, "BUG?\n")
/**
+ * Suppress high rates of output when CONFIG_SND_DEBUG is enabled.
+ */
+#define snd_printd_ratelimit() printk_ratelimit()
+
+/**
* snd_BUG_ON - debugging check macro
* @cond: condition to evaluate
*
@@ -398,6 +404,8 @@ static inline void _snd_printd(int level, const char *format, ...) {}
unlikely(__ret_warn_on); \
})
+static inline bool snd_printd_ratelimit(void) { return false; }
+
#endif /* CONFIG_SND_DEBUG */
#ifdef CONFIG_SND_DEBUG_VERBOSE
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 2fd3d251d9a..56e818e4a1c 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -6,13 +6,6 @@
/* PCM */
-struct pxa2xx_pcm_dma_params {
- char *name; /* stream identifier */
- u32 dcmd; /* DMA descriptor dcmd field */
- volatile u32 *drcmr; /* the DMA request channel to use */
- u32 dev_addr; /* device physical address for DMA */
-};
-
extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
diff --git a/include/sound/rcar_snd.h b/include/sound/rcar_snd.h
new file mode 100644
index 00000000000..d35412ae03b
--- /dev/null
+++ b/include/sound/rcar_snd.h
@@ -0,0 +1,84 @@
+/*
+ * Renesas R-Car SRU/SCU/SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef RCAR_SND_H
+#define RCAR_SND_H
+
+#include <linux/sh_clk.h>
+
+#define RSND_GEN1_SRU 0
+#define RSND_GEN1_ADG 1
+#define RSND_GEN1_SSI 2
+
+#define RSND_GEN2_SRU 0
+#define RSND_GEN2_ADG 1
+#define RSND_GEN2_SSIU 2
+#define RSND_GEN2_SSI 3
+
+#define RSND_BASE_MAX 4
+
+/*
+ * flags
+ *
+ * 0xAB000000
+ *
+ * A : clock sharing settings
+ * B : SSI direction
+ */
+#define RSND_SSI_CLK_PIN_SHARE (1 << 31)
+#define RSND_SSI_CLK_FROM_ADG (1 << 30) /* clock parent is master */
+#define RSND_SSI_SYNC (1 << 29) /* SSI34_sync etc */
+#define RSND_SSI_DEPENDENT (1 << 28) /* SSI needs SRU/SCU */
+
+#define RSND_SSI_PLAY (1 << 24)
+
+#define RSND_SSI_SET(_dai_id, _dma_id, _pio_irq, _flags) \
+{ .dai_id = _dai_id, .dma_id = _dma_id, .pio_irq = _pio_irq, .flags = _flags }
+#define RSND_SSI_UNUSED \
+{ .dai_id = -1, .dma_id = -1, .pio_irq = -1, .flags = 0 }
+
+struct rsnd_ssi_platform_info {
+ int dai_id;
+ int dma_id;
+ int pio_irq;
+ u32 flags;
+};
+
+/*
+ * flags
+ */
+#define RSND_SCU_USB_HPBIF (1 << 31) /* it needs RSND_SSI_DEPENDENT */
+
+struct rsnd_scu_platform_info {
+ u32 flags;
+};
+
+/*
+ * flags
+ *
+ * 0x0000000A
+ *
+ * A : generation
+ */
+#define RSND_GEN1 (1 << 0) /* fixme */
+#define RSND_GEN2 (2 << 0) /* fixme */
+
+struct rcar_snd_info {
+ u32 flags;
+ struct rsnd_ssi_platform_info *ssi_info;
+ int ssi_info_nr;
+ struct rsnd_scu_platform_info *scu_info;
+ int scu_info_nr;
+ int (*start)(int id);
+ int (*stop)(int id);
+};
+
+#endif
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 3e479f4e15f..27a72d5d4b0 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -70,121 +70,144 @@ struct device;
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
+#define SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) \
+ .reg = wreg, .mask = 1, .shift = wshift, \
+ .on_val = winvert ? 0 : 1, .off_val = winvert ? 1 : 0
+
/* path domain */
#define SND_SOC_DAPM_PGA(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{ .id = snd_soc_dapm_pga, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{ .id = snd_soc_dapm_out_drv, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER_NAMED_CTL(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, .reg = wreg, \
- .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
- .num_kcontrols = wncontrols}
+{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_micbias, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = NULL, .num_kcontrols = 0}
+{ .id = snd_soc_dapm_micbias, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = NULL, .num_kcontrols = 0}
#define SND_SOC_DAPM_SWITCH(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{ .id = snd_soc_dapm_switch, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_VIRT_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1}
+{ .id = snd_soc_dapm_virt_mux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_VALUE_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_value_mux, .name = wname, .reg = wreg, \
- .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
- .num_kcontrols = 1}
+{ .id = snd_soc_dapm_value_mux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
wcontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{ .id = snd_soc_dapm_pga, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_NAMED_CTL_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, .reg = wreg, \
- .shift = wshift, .invert = winvert, .kcontrol_news = wcontrols, \
- .num_kcontrols = ARRAY_SIZE(wcontrols)}
+{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
/* path domain with event - event handler must return 0 for success */
#define SND_SOC_DAPM_PGA_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{ .id = snd_soc_dapm_pga, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{ .id = snd_soc_dapm_out_drv, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_NAMED_CTL_E(wname, wreg, wshift, winvert, \
wcontrols, wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, \
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, \
.num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_switch, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{ .id = snd_soc_dapm_switch, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mux, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{ .id = snd_soc_dapm_mux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_VIRT_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_virt_mux, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = 1, \
+{ .id = snd_soc_dapm_virt_mux, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
/* additional sequencing control within an event type */
#define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .event = wevent, .event_flags = wflags, \
+{ .id = snd_soc_dapm_pga, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .event = wevent, .event_flags = wflags, \
.subseq = wsubseq}
#define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \
- .shift = wshift, .invert = winvert, .event = wevent, \
- .event_flags = wflags, .subseq = wsubseq}
+{ .id = snd_soc_dapm_supply, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .event = wevent, .event_flags = wflags, .subseq = wsubseq}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+{ .id = snd_soc_dapm_pga, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_NAMED_CTL_E_ARRAY(wname, wreg, wshift, winvert, \
wcontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, .reg = wreg, .shift = wshift, \
- .invert = winvert, .kcontrol_news = wcontrols, \
- .num_kcontrols = ARRAY_SIZE(wcontrols), .event = wevent, .event_flags = wflags}
+{ .id = snd_soc_dapm_mixer, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
+ .event = wevent, .event_flags = wflags}
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
@@ -199,35 +222,36 @@ struct device;
/* stream domain */
#define SND_SOC_DAPM_AIF_IN(wname, stname, wslot, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
- .reg = wreg, .shift = wshift, .invert = winvert }
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wslot, wreg, wshift, winvert, \
wevent, wflags) \
{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
- .reg = wreg, .shift = wshift, .invert = winvert, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_AIF_OUT(wname, stname, wslot, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
- .reg = wreg, .shift = wshift, .invert = winvert }
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wslot, wreg, wshift, winvert, \
wevent, wflags) \
{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
- .reg = wreg, .shift = wshift, .invert = winvert, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
- .shift = wshift, .invert = winvert}
+{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) }
#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
- .shift = wshift, .invert = winvert, \
+{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
+
#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
- .shift = wshift, .invert = winvert}
+{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
- .shift = wshift, .invert = winvert, \
+{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
{ .id = snd_soc_dapm_clock_supply, .name = wname, \
@@ -241,14 +265,14 @@ struct device;
.on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \
- .shift = wshift, .invert = winvert, .event = wevent, \
- .event_flags = wflags}
+{ .id = snd_soc_dapm_supply, .name = wname, \
+ SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
+ .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags) \
{ .id = snd_soc_dapm_regulator_supply, .name = wname, \
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
- .invert = wflags}
+ .on_val = wflags}
/* dapm kcontrol types */
@@ -256,14 +280,26 @@ struct device;
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_AUTODISABLE(xname, reg, shift, max, invert) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
#define SOC_DAPM_SINGLE_TLV(xname, reg, shift, max, invert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
.tlv.p = (tlv_array), \
.get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
+#define SOC_DAPM_SINGLE_TLV_AUTODISABLE(xname, reg, shift, max, invert, tlv_array) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_info_volsw, \
+ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
+ .tlv.p = (tlv_array), \
+ .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
#define SOC_DAPM_ENUM(xname, xenum) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_enum_double, \
@@ -333,6 +369,7 @@ struct snd_soc_dapm_route;
struct snd_soc_dapm_context;
struct regulator;
struct snd_soc_dapm_widget_list;
+struct snd_soc_dapm_update;
int dapm_reg_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
@@ -376,7 +413,7 @@ int snd_soc_dapm_new_pcm(struct snd_soc_card *card,
struct snd_soc_dapm_widget *sink);
/* dapm path setup */
-int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm);
+int snd_soc_dapm_new_widgets(struct snd_soc_card *card);
void snd_soc_dapm_free(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_add_routes(struct snd_soc_dapm_context *dapm,
const struct snd_soc_dapm_route *route, int num);
@@ -391,10 +428,12 @@ void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
void snd_soc_dapm_shutdown(struct snd_soc_card *card);
/* external DAPM widget events */
-int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int connect);
-int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e);
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int connect,
+ struct snd_soc_dapm_update *update);
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
+ struct snd_soc_dapm_update *update);
/* dapm sys fs - used by the core */
int snd_soc_dapm_sys_add(struct device *dev);
@@ -424,6 +463,8 @@ void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
struct snd_soc_dapm_widget_list **list);
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol);
+
/* dapm widget types */
enum snd_soc_dapm_type {
snd_soc_dapm_input = 0, /* input pin */
@@ -455,6 +496,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_dai_in, /* link to DAI structure */
snd_soc_dapm_dai_out,
snd_soc_dapm_dai_link, /* link between two DAI structures */
+ snd_soc_dapm_kcontrol, /* Auto-disabled kcontrol */
};
enum snd_soc_dapm_subclass {
@@ -485,7 +527,6 @@ struct snd_soc_dapm_path {
/* source (input) and sink (output) widgets */
struct snd_soc_dapm_widget *source;
struct snd_soc_dapm_widget *sink;
- struct snd_kcontrol *kcontrol;
/* status */
u32 connect:1; /* source and sink widgets are connected */
@@ -498,6 +539,7 @@ struct snd_soc_dapm_path {
struct list_head list_source;
struct list_head list_sink;
+ struct list_head list_kcontrol;
struct list_head list;
};
@@ -518,12 +560,10 @@ struct snd_soc_dapm_widget {
/* dapm control */
int reg; /* negative reg = no direct dapm */
unsigned char shift; /* bits to shift */
- unsigned int value; /* widget current value */
unsigned int mask; /* non-shifted mask */
unsigned int on_val; /* on state value */
unsigned int off_val; /* off state value */
unsigned char power:1; /* block power status */
- unsigned char invert:1; /* invert the power bit */
unsigned char active:1; /* active stream on DAC, ADC's */
unsigned char connected:1; /* connected codec pin */
unsigned char new:1; /* cnew complete */
@@ -559,7 +599,6 @@ struct snd_soc_dapm_widget {
};
struct snd_soc_dapm_update {
- struct snd_soc_dapm_widget *widget;
struct snd_kcontrol *kcontrol;
int reg;
int mask;
@@ -573,8 +612,6 @@ struct snd_soc_dapm_context {
struct delayed_work delayed_work;
unsigned int idle_bias_off:1; /* Use BIAS_OFF instead of STANDBY */
- struct snd_soc_dapm_update *update;
-
void (*seq_notifier)(struct snd_soc_dapm_context *,
enum snd_soc_dapm_type, int);
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 04598f1efd7..047d657c331 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -133,6 +133,6 @@ void snd_soc_dpcm_be_set_state(struct snd_soc_pcm_runtime *be, int stream,
/* internal use only */
int soc_dpcm_be_digital_mute(struct snd_soc_pcm_runtime *fe, int mute);
int soc_dpcm_debugfs_add(struct snd_soc_pcm_runtime *rtd);
-int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *);
+int soc_dpcm_runtime_update(struct snd_soc_card *);
#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 6eabee7ec15..d22cb0a06fe 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -30,13 +30,13 @@
/*
* Convenience kcontrol builders
*/
-#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert) \
+#define SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, xmax, xinvert, xautodisable) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .rreg = xreg, .shift = shift_left, \
.rshift = shift_right, .max = xmax, .platform_max = xmax, \
- .invert = xinvert})
-#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) \
- SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert)
+ .invert = xinvert, .autodisable = xautodisable})
+#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
+ SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
((unsigned long)&(struct soc_mixer_control) \
{.reg = xreg, .max = xmax, .platform_max = xmax, .invert = xinvert})
@@ -52,7 +52,7 @@
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
.put = snd_soc_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
#define SOC_SINGLE_RANGE(xname, xreg, xshift, xmin, xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw_range, .get = snd_soc_get_volsw_range, \
@@ -68,7 +68,7 @@
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
.put = snd_soc_put_volsw, \
- .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) }
+ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
#define SOC_SINGLE_SX_TLV(xname, xreg, xshift, xmin, xmax, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
@@ -97,7 +97,7 @@
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
- max, invert) }
+ max, invert, 0) }
#define SOC_DOUBLE_R(xname, reg_left, reg_right, xshift, xmax, xinvert) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.info = snd_soc_info_volsw, \
@@ -119,7 +119,7 @@
.info = snd_soc_info_volsw, .get = snd_soc_get_volsw, \
.put = snd_soc_put_volsw, \
.private_value = SOC_DOUBLE_VALUE(reg, shift_left, shift_right, \
- max, invert) }
+ max, invert, 0) }
#define SOC_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
@@ -190,14 +190,14 @@
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
- .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
#define SOC_DOUBLE_EXT(xname, reg, shift_left, shift_right, max, invert,\
xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = \
- SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert) }
+ SOC_DOUBLE_VALUE(reg, shift_left, shift_right, max, invert, 0) }
#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -206,7 +206,7 @@
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
- .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+ .private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, 0) }
#define SOC_DOUBLE_EXT_TLV(xname, xreg, shift_left, shift_right, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -216,7 +216,7 @@
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_DOUBLE_VALUE(xreg, shift_left, shift_right, \
- xmax, xinvert) }
+ xmax, xinvert, 0) }
#define SOC_DOUBLE_R_EXT_TLV(xname, reg_left, reg_right, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
@@ -234,7 +234,7 @@
.private_value = xdata }
#define SOC_ENUM_EXT(xname, xenum, xhandler_get, xhandler_put) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
- .info = snd_soc_info_enum_ext, \
+ .info = snd_soc_info_enum_double, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = (unsigned long)&xenum }
@@ -468,6 +468,8 @@ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
void snd_soc_free_ac97_codec(struct snd_soc_codec *codec);
int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
+int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev);
/*
*Controls
@@ -475,6 +477,8 @@ int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
struct snd_kcontrol *snd_soc_cnew(const struct snd_kcontrol_new *_template,
void *data, const char *long_name,
const char *prefix);
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name);
int snd_soc_add_codec_controls(struct snd_soc_codec *codec,
const struct snd_kcontrol_new *controls, int num_controls);
int snd_soc_add_platform_controls(struct snd_soc_platform *platform,
@@ -485,8 +489,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
const struct snd_kcontrol_new *controls, int num_controls);
int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
-int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
@@ -497,8 +499,6 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
-int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
#define snd_soc_info_bool_ext snd_ctl_boolean_mono_info
int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
@@ -697,7 +697,6 @@ struct snd_soc_codec {
unsigned int probed:1; /* Codec has been probed */
unsigned int ac97_registered:1; /* Codec has been AC97 registered */
unsigned int ac97_created:1; /* Codec has been created by SoC */
- unsigned int sysfs_registered:1; /* codec has been sysfs registered */
unsigned int cache_init:1; /* codec cache has been initialized */
unsigned int using_regmap:1; /* using regmap access */
u32 cache_only; /* Suppress writes to hardware */
@@ -705,7 +704,6 @@ struct snd_soc_codec {
/* codec IO */
void *control_data; /* codec control (i2c/3wire) data */
- enum snd_soc_control_type control_type;
hw_write_t hw_write;
unsigned int (*hw_read)(struct snd_soc_codec *, unsigned int);
unsigned int (*read)(struct snd_soc_codec *, unsigned int);
@@ -724,7 +722,6 @@ struct snd_soc_codec {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_codec_root;
struct dentry *debugfs_reg;
- struct dentry *debugfs_dapm;
#endif
};
@@ -849,7 +846,6 @@ struct snd_soc_platform {
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_platform_root;
- struct dentry *debugfs_dapm;
#endif
};
@@ -934,6 +930,10 @@ struct snd_soc_dai_link {
/* machine stream operations */
const struct snd_soc_ops *ops;
const struct snd_soc_compr_ops *compr_ops;
+
+ /* For unidirectional dai links */
+ bool playback_only;
+ bool capture_only;
};
struct snd_soc_codec_conf {
@@ -1042,6 +1042,7 @@ struct snd_soc_card {
/* Generic DAPM context for the card */
struct snd_soc_dapm_context dapm;
struct snd_soc_dapm_stats dapm_stats;
+ struct snd_soc_dapm_update *update;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_card_root;
@@ -1087,7 +1088,9 @@ struct snd_soc_pcm_runtime {
/* mixer control */
struct soc_mixer_control {
int min, max, platform_max;
- unsigned int reg, rreg, shift, rshift, invert;
+ unsigned int reg, rreg, shift, rshift;
+ unsigned int invert:1;
+ unsigned int autodisable:1;
};
struct soc_bytes {
diff --git a/include/trace/events/context_tracking.h b/include/trace/events/context_tracking.h
new file mode 100644
index 00000000000..ce8007cf29c
--- /dev/null
+++ b/include/trace/events/context_tracking.h
@@ -0,0 +1,58 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM context_tracking
+
+#if !defined(_TRACE_CONTEXT_TRACKING_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CONTEXT_TRACKING_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(context_tracking_user,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy),
+
+ TP_STRUCT__entry(
+ __field( int, dummy )
+ ),
+
+ TP_fast_assign(
+ __entry->dummy = dummy;
+ ),
+
+ TP_printk("%s", "")
+);
+
+/**
+ * user_enter - called when the kernel resumes to userspace
+ * @dummy: dummy arg to make trace event macro happy
+ *
+ * This event occurs when the kernel resumes to userspace after
+ * an exception or a syscall.
+ */
+DEFINE_EVENT(context_tracking_user, user_enter,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy)
+);
+
+/**
+ * user_exit - called when userspace enters the kernel
+ * @dummy: dummy arg to make trace event macro happy
+ *
+ * This event occurs when userspace enters the kernel through
+ * an exception or a syscall.
+ */
+DEFINE_EVENT(context_tracking_user, user_exit,
+
+ TP_PROTO(int dummy),
+
+ TP_ARGS(dummy)
+);
+
+
+#endif /* _TRACE_CONTEXT_TRACKING_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 2068db241f2..197d3125df2 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -14,7 +14,6 @@ struct ext4_prealloc_space;
struct ext4_inode_info;
struct mpage_da_data;
struct ext4_map_blocks;
-struct ext4_extent;
struct extent_status;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -64,10 +63,10 @@ struct extent_status;
{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
#define show_extent_status(status) __print_flags(status, "", \
- { (1 << 3), "W" }, \
- { (1 << 2), "U" }, \
- { (1 << 1), "D" }, \
- { (1 << 0), "H" })
+ { EXTENT_STATUS_WRITTEN, "W" }, \
+ { EXTENT_STATUS_UNWRITTEN, "U" }, \
+ { EXTENT_STATUS_DELAYED, "D" }, \
+ { EXTENT_STATUS_HOLE, "H" })
TRACE_EVENT(ext4_free_inode,
@@ -2192,7 +2191,7 @@ TRACE_EVENT(ext4_ext_remove_space_done,
(unsigned short) __entry->eh_entries)
);
-TRACE_EVENT(ext4_es_insert_extent,
+DECLARE_EVENT_CLASS(ext4__es_extent,
TP_PROTO(struct inode *inode, struct extent_status *es),
TP_ARGS(inode, es),
@@ -2212,7 +2211,7 @@ TRACE_EVENT(ext4_es_insert_extent,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2222,6 +2221,18 @@ TRACE_EVENT(ext4_es_insert_extent,
__entry->pblk, show_extent_status(__entry->status))
);
+DEFINE_EVENT(ext4__es_extent, ext4_es_insert_extent,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
+
+ TP_ARGS(inode, es)
+);
+
+DEFINE_EVENT(ext4__es_extent, ext4_es_cache_extent,
+ TP_PROTO(struct inode *inode, struct extent_status *es),
+
+ TP_ARGS(inode, es)
+);
+
TRACE_EVENT(ext4_es_remove_extent,
TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len),
@@ -2289,7 +2300,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
),
TP_printk("dev %d,%d ino %lu es [%u/%u) mapped %llu status %s",
@@ -2343,7 +2354,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
__entry->pblk = ext4_es_pblock(es);
- __entry->status = ext4_es_status(es) >> 60;
+ __entry->status = ext4_es_status(es);
__entry->found = found;
),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 8e42410bd15..cda100d6762 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -66,6 +66,43 @@ TRACE_EVENT(machine_suspend,
TP_printk("state=%lu", (unsigned long)__entry->state)
);
+TRACE_EVENT(device_pm_report_time,
+
+ TP_PROTO(struct device *dev, const char *pm_ops, s64 ops_time,
+ char *pm_event_str, int error),
+
+ TP_ARGS(dev, pm_ops, ops_time, pm_event_str, error),
+
+ TP_STRUCT__entry(
+ __string(device, dev_name(dev))
+ __string(driver, dev_driver_string(dev))
+ __string(parent, dev->parent ? dev_name(dev->parent) : "none")
+ __string(pm_ops, pm_ops ? pm_ops : "none ")
+ __string(pm_event_str, pm_event_str)
+ __field(s64, ops_time)
+ __field(int, error)
+ ),
+
+ TP_fast_assign(
+ const char *tmp = dev->parent ? dev_name(dev->parent) : "none";
+ const char *tmp_i = pm_ops ? pm_ops : "none ";
+
+ __assign_str(device, dev_name(dev));
+ __assign_str(driver, dev_driver_string(dev));
+ __assign_str(parent, tmp);
+ __assign_str(pm_ops, tmp_i);
+ __assign_str(pm_event_str, pm_event_str);
+ __entry->ops_time = ops_time;
+ __entry->error = error;
+ ),
+
+ /* ops_str has an extra space at the end */
+ TP_printk("%s %s parent=%s state=%s ops=%snsecs=%lld err=%d",
+ __get_str(driver), __get_str(device), __get_str(parent),
+ __get_str(pm_event_str), __get_str(pm_ops),
+ __entry->ops_time, __entry->error)
+);
+
DECLARE_EVENT_CLASS(wakeup_source,
TP_PROTO(const char *name, unsigned int state),
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 59ebcc89f14..ee2376cfaab 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -19,12 +19,12 @@
*/
TRACE_EVENT(rcu_utilization,
- TP_PROTO(char *s),
+ TP_PROTO(const char *s),
TP_ARGS(s),
TP_STRUCT__entry(
- __field(char *, s)
+ __field(const char *, s)
),
TP_fast_assign(
@@ -51,14 +51,14 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(char *rcuname, unsigned long gpnum, char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
TP_ARGS(rcuname, gpnum, gpevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
- __field(char *, gpevent)
+ __field(const char *, gpevent)
),
TP_fast_assign(
@@ -89,21 +89,21 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(char *rcuname, unsigned long gpnum, unsigned long completed,
+ TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
unsigned long c, u8 level, int grplo, int grphi,
- char *gpevent),
+ const char *gpevent),
TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, completed)
__field(unsigned long, c)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
- __field(char *, gpevent)
+ __field(const char *, gpevent)
),
TP_fast_assign(
@@ -132,13 +132,13 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
int grplo, int grphi, unsigned long qsmask),
TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(u8, level)
__field(int, grplo)
@@ -168,12 +168,12 @@ TRACE_EVENT(rcu_grace_period_init,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
TP_ARGS(rcuname, pid, gpnum),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
@@ -195,12 +195,12 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
TP_ARGS(rcuname, gpnum, pid),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, pid)
),
@@ -224,14 +224,14 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gpnum,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
@@ -268,15 +268,15 @@ TRACE_EVENT(rcu_quiescent_state_report,
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(char *rcuname, unsigned long gpnum, int cpu, char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
TP_ARGS(rcuname, gpnum, cpu, qsevent),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(unsigned long, gpnum)
__field(int, cpu)
- __field(char *, qsevent)
+ __field(const char *, qsevent)
),
TP_fast_assign(
@@ -308,12 +308,12 @@ TRACE_EVENT(rcu_fqs,
*/
TRACE_EVENT(rcu_dyntick,
- TP_PROTO(char *polarity, long long oldnesting, long long newnesting),
+ TP_PROTO(const char *polarity, long long oldnesting, long long newnesting),
TP_ARGS(polarity, oldnesting, newnesting),
TP_STRUCT__entry(
- __field(char *, polarity)
+ __field(const char *, polarity)
__field(long long, oldnesting)
__field(long long, newnesting)
),
@@ -352,12 +352,12 @@ TRACE_EVENT(rcu_dyntick,
*/
TRACE_EVENT(rcu_prep_idle,
- TP_PROTO(char *reason),
+ TP_PROTO(const char *reason),
TP_ARGS(reason),
TP_STRUCT__entry(
- __field(char *, reason)
+ __field(const char *, reason)
),
TP_fast_assign(
@@ -376,13 +376,13 @@ TRACE_EVENT(rcu_prep_idle,
*/
TRACE_EVENT(rcu_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, long qlen_lazy,
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
long qlen),
TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
__field(long, qlen_lazy)
@@ -412,13 +412,13 @@ TRACE_EVENT(rcu_callback,
*/
TRACE_EVENT(rcu_kfree_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset,
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
long qlen_lazy, long qlen),
TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
__field(long, qlen_lazy)
@@ -447,12 +447,12 @@ TRACE_EVENT(rcu_kfree_callback,
*/
TRACE_EVENT(rcu_batch_start,
- TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit),
+ TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(long, qlen_lazy)
__field(long, qlen)
__field(long, blimit)
@@ -477,12 +477,12 @@ TRACE_EVENT(rcu_batch_start,
*/
TRACE_EVENT(rcu_invoke_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp),
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp),
TP_ARGS(rcuname, rhp),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
),
@@ -506,12 +506,12 @@ TRACE_EVENT(rcu_invoke_callback,
*/
TRACE_EVENT(rcu_invoke_kfree_callback,
- TP_PROTO(char *rcuname, struct rcu_head *rhp, unsigned long offset),
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset),
TP_ARGS(rcuname, rhp, offset),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
),
@@ -539,13 +539,13 @@ TRACE_EVENT(rcu_invoke_kfree_callback,
*/
TRACE_EVENT(rcu_batch_end,
- TP_PROTO(char *rcuname, int callbacks_invoked,
+ TP_PROTO(const char *rcuname, int callbacks_invoked,
bool cb, bool nr, bool iit, bool risk),
TP_ARGS(rcuname, callbacks_invoked, cb, nr, iit, risk),
TP_STRUCT__entry(
- __field(char *, rcuname)
+ __field(const char *, rcuname)
__field(int, callbacks_invoked)
__field(bool, cb)
__field(bool, nr)
@@ -577,13 +577,13 @@ TRACE_EVENT(rcu_batch_end,
*/
TRACE_EVENT(rcu_torture_read,
- TP_PROTO(char *rcutorturename, struct rcu_head *rhp,
+ TP_PROTO(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs, unsigned long c_old, unsigned long c),
TP_ARGS(rcutorturename, rhp, secs, c_old, c),
TP_STRUCT__entry(
- __field(char *, rcutorturename)
+ __field(const char *, rcutorturename)
__field(struct rcu_head *, rhp)
__field(unsigned long, secs)
__field(unsigned long, c_old)
@@ -623,13 +623,13 @@ TRACE_EVENT(rcu_torture_read,
*/
TRACE_EVENT(rcu_barrier,
- TP_PROTO(char *rcuname, char *s, int cpu, int cnt, unsigned long done),
+ TP_PROTO(const char *rcuname, const char *s, int cpu, int cnt, unsigned long done),
TP_ARGS(rcuname, s, cpu, cnt, done),
TP_STRUCT__entry(
- __field(char *, rcuname)
- __field(char *, s)
+ __field(const char *, rcuname)
+ __field(const char *, s)
__field(int, cpu)
__field(int, cnt)
__field(unsigned long, done)
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index e5586caff67..2e7d9947a10 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -57,7 +57,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_PROTO(struct task_struct *p, int success),
- TP_ARGS(p, success),
+ TP_ARGS(__perf_task(p), success),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -73,9 +73,6 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
__entry->prio = p->prio;
__entry->success = success;
__entry->target_cpu = task_cpu(p);
- )
- TP_perf_assign(
- __perf_task(p);
),
TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
@@ -313,7 +310,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
TP_PROTO(struct task_struct *tsk, u64 delay),
- TP_ARGS(tsk, delay),
+ TP_ARGS(__perf_task(tsk), __perf_count(delay)),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -325,10 +322,6 @@ DECLARE_EVENT_CLASS(sched_stat_template,
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->delay = delay;
- )
- TP_perf_assign(
- __perf_count(delay);
- __perf_task(tsk);
),
TP_printk("comm=%s pid=%d delay=%Lu [ns]",
@@ -372,11 +365,11 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
-TRACE_EVENT(sched_stat_runtime,
+DECLARE_EVENT_CLASS(sched_stat_runtime,
TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
- TP_ARGS(tsk, runtime, vruntime),
+ TP_ARGS(tsk, __perf_count(runtime), vruntime),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -390,9 +383,6 @@ TRACE_EVENT(sched_stat_runtime,
__entry->pid = tsk->pid;
__entry->runtime = runtime;
__entry->vruntime = vruntime;
- )
- TP_perf_assign(
- __perf_count(runtime);
),
TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
@@ -401,6 +391,10 @@ TRACE_EVENT(sched_stat_runtime,
(unsigned long long)__entry->vruntime)
);
+DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
+ TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
+ TP_ARGS(tsk, runtime, vruntime));
+
/*
* Tracepoint for showing priority inheritance modifying a tasks
* priority.
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 41a6643e213..5c7ab17cbb0 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -507,8 +507,14 @@ static inline notrace int ftrace_get_offsets_##call( \
#undef TP_fast_assign
#define TP_fast_assign(args...) args
-#undef TP_perf_assign
-#define TP_perf_assign(args...)
+#undef __perf_addr
+#define __perf_addr(a) (a)
+
+#undef __perf_count
+#define __perf_count(c) (c)
+
+#undef __perf_task
+#define __perf_task(t) (t)
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -636,16 +642,13 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
#define __get_str(field) (char *)__get_dynamic_array(field)
#undef __perf_addr
-#define __perf_addr(a) __addr = (a)
+#define __perf_addr(a) (__addr = (a))
#undef __perf_count
-#define __perf_count(c) __count = (c)
+#define __perf_count(c) (__count = (c))
#undef __perf_task
-#define __perf_task(t) __task = (t)
-
-#undef TP_perf_assign
-#define TP_perf_assign(args...) args
+#define __perf_task(t) (__task = (t))
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
@@ -663,15 +666,20 @@ perf_trace_##call(void *__data, proto) \
int __data_size; \
int rctx; \
\
- perf_fetch_caller_regs(&__regs); \
- \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ \
+ head = this_cpu_ptr(event_call->perf_events); \
+ if (__builtin_constant_p(!__task) && !__task && \
+ hlist_empty(head)) \
+ return; \
+ \
__entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
sizeof(u64)); \
__entry_size -= sizeof(u32); \
\
- entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
- __entry_size, event_call->event.type, &__regs, &rctx); \
+ perf_fetch_caller_regs(&__regs); \
+ entry = perf_trace_buf_prepare(__entry_size, \
+ event_call->event.type, &__regs, &rctx); \
if (!entry) \
return; \
\
@@ -679,7 +687,6 @@ perf_trace_##call(void *__data, proto) \
\
{ assign; } \
\
- head = this_cpu_ptr(event_call->perf_events); \
perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
__count, &__regs, head, __task); \
}
diff --git a/include/uapi/drm/Kbuild b/include/uapi/drm/Kbuild
index 119487e05e6..2d9a25daab0 100644
--- a/include/uapi/drm/Kbuild
+++ b/include/uapi/drm/Kbuild
@@ -16,3 +16,4 @@ header-y += sis_drm.h
header-y += tegra_drm.h
header-y += via_drm.h
header-y += vmwgfx_drm.h
+header-y += msm_drm.h
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 238a166b9fe..ece867889cc 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -181,7 +181,7 @@ enum drm_map_type {
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
- _DRM_GEM = 6, /**< GEM object */
+ _DRM_GEM = 6, /**< GEM object (obsolete) */
};
/**
@@ -780,6 +780,7 @@ struct drm_event_vblank {
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
+#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 53db7cea373..550811712f7 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -412,7 +412,8 @@ struct drm_mode_crtc_lut {
};
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
-#define DRM_MODE_PAGE_FLIP_FLAGS DRM_MODE_PAGE_FLIP_EVENT
+#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
+#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
/*
* Request a page flip on the specified crtc.
@@ -426,11 +427,14 @@ struct drm_mode_crtc_lut {
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
- * The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
- * request that drm sends back a vblank event (see drm.h: struct
- * drm_event_vblank) when the page flip is done. The user_data field
- * passed in with this ioctl will be returned as the user_data field
- * in the vblank event struct.
+ * Flag DRM_MODE_PAGE_FLIP_EVENT requests that drm sends back a vblank
+ * event (see drm.h: struct drm_event_vblank) when the page flip is
+ * done. The user_data field passed in with this ioctl will be
+ * returned as the user_data field in the vblank event struct.
+ *
+ * Flag DRM_MODE_PAGE_FLIP_ASYNC requests that the flip happen
+ * 'as soon as possible', meaning that it not delay waiting for vblank.
+ * This may cause tearing on the screen.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 923ed7fe577..55bb5729bd7 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -33,6 +33,30 @@
* subject to backwards-compatibility constraints.
*/
+/**
+ * DOC: uevents generated by i915 on it's device node
+ *
+ * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
+ * event from the gpu l3 cache. Additional information supplied is ROW,
+ * BANK, SUBBANK of the affected cacheline. Userspace should keep track of
+ * these events and if a specific cache-line seems to have a persistent
+ * error remap it with the l3 remapping tool supplied in intel-gpu-tools.
+ * The value supplied with the event is always 1.
+ *
+ * I915_ERROR_UEVENT - Generated upon error detection, currently only via
+ * hangcheck. The error detection event is a good indicator of when things
+ * began to go badly. The value supplied with the event is a 1 upon error
+ * detection, and a 0 upon reset completion, signifying no more error
+ * exists. NOTE: Disabling hangcheck or reset via module parameter will
+ * cause the related events to not be seen.
+ *
+ * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
+ * the GPU. The value supplied with the event is always 1. NOTE: Disable
+ * reset via module parameter will cause this event to not be seen.
+ */
+#define I915_L3_PARITY_UEVENT "L3_PARITY_ERROR"
+#define I915_ERROR_UEVENT "ERROR"
+#define I915_RESET_UEVENT "RESET"
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
@@ -310,6 +334,7 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_PINNED_BATCHES 24
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
+#define I915_PARAM_HAS_WT 27
typedef struct drm_i915_getparam {
int param;
@@ -744,8 +769,32 @@ struct drm_i915_gem_busy {
__u32 busy;
};
+/**
+ * I915_CACHING_NONE
+ *
+ * GPU access is not coherent with cpu caches. Default for machines without an
+ * LLC.
+ */
#define I915_CACHING_NONE 0
+/**
+ * I915_CACHING_CACHED
+ *
+ * GPU access is coherent with cpu caches and furthermore the data is cached in
+ * last-level caches shared between cpu cores and the gpu GT. Default on
+ * machines with HAS_LLC.
+ */
#define I915_CACHING_CACHED 1
+/**
+ * I915_CACHING_DISPLAY
+ *
+ * Special GPU caching mode which is coherent with the scanout engines.
+ * Transparently falls back to I915_CACHING_NONE on platforms where no special
+ * cache mode (like write-through or gfdt flushing) is available. The kernel
+ * automatically sets this mode when using a buffer as a scanout target.
+ * Userspace can manually set this mode to avoid a costly stall and clflush in
+ * the hotpath of drawing the first frame.
+ */
+#define I915_CACHING_DISPLAY 2
struct drm_i915_gem_caching {
/**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
new file mode 100644
index 00000000000..d3c62074016
--- /dev/null
+++ b/include/uapi/drm/msm_drm.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __MSM_DRM_H__
+#define __MSM_DRM_H__
+
+#include <stddef.h>
+#include <drm/drm.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints:
+ * 1) Do not use pointers, use uint64_t instead for 32 bit / 64 bit
+ * user/kernel compatibility
+ * 2) Keep fields aligned to their size
+ * 3) Because of how drm_ioctl() works, we can add new fields at
+ * the end of an ioctl if some care is taken: drm_ioctl() will
+ * zero out the new fields at the tail of the ioctl, so a zero
+ * value should have a backwards compatible meaning. And for
+ * output params, userspace won't see the newly added output
+ * fields.. so that has to be somehow ok.
+ */
+
+#define MSM_PIPE_NONE 0x00
+#define MSM_PIPE_2D0 0x01
+#define MSM_PIPE_2D1 0x02
+#define MSM_PIPE_3D0 0x10
+
+/* timeouts are specified in clock-monotonic absolute times (to simplify
+ * restarting interrupted ioctls). The following struct is logically the
+ * same as 'struct timespec' but 32/64b ABI safe.
+ */
+struct drm_msm_timespec {
+ int64_t tv_sec; /* seconds */
+ int64_t tv_nsec; /* nanoseconds */
+};
+
+#define MSM_PARAM_GPU_ID 0x01
+#define MSM_PARAM_GMEM_SIZE 0x02
+
+struct drm_msm_param {
+ uint32_t pipe; /* in, MSM_PIPE_x */
+ uint32_t param; /* in, MSM_PARAM_x */
+ uint64_t value; /* out (get_param) or in (set_param) */
+};
+
+/*
+ * GEM buffers:
+ */
+
+#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
+#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_CACHE_MASK 0x000f0000
+/* cache modes */
+#define MSM_BO_CACHED 0x00010000
+#define MSM_BO_WC 0x00020000
+#define MSM_BO_UNCACHED 0x00040000
+
+struct drm_msm_gem_new {
+ uint64_t size; /* in */
+ uint32_t flags; /* in, mask of MSM_BO_x */
+ uint32_t handle; /* out */
+};
+
+struct drm_msm_gem_info {
+ uint32_t handle; /* in */
+ uint32_t pad;
+ uint64_t offset; /* out, offset to pass to mmap() */
+};
+
+#define MSM_PREP_READ 0x01
+#define MSM_PREP_WRITE 0x02
+#define MSM_PREP_NOSYNC 0x04
+
+struct drm_msm_gem_cpu_prep {
+ uint32_t handle; /* in */
+ uint32_t op; /* in, mask of MSM_PREP_x */
+ struct drm_msm_timespec timeout; /* in */
+};
+
+struct drm_msm_gem_cpu_fini {
+ uint32_t handle; /* in */
+};
+
+/*
+ * Cmdstream Submission:
+ */
+
+/* The value written into the cmdstream is logically:
+ *
+ * ((relocbuf->gpuaddr + reloc_offset) << shift) | or
+ *
+ * When we have GPU's w/ >32bit ptrs, it should be possible to deal
+ * with this by emit'ing two reloc entries with appropriate shift
+ * values. Or a new MSM_SUBMIT_CMD_x type would also be an option.
+ *
+ * NOTE that reloc's must be sorted by order of increasing submit_offset,
+ * otherwise EINVAL.
+ */
+struct drm_msm_gem_submit_reloc {
+ uint32_t submit_offset; /* in, offset from submit_bo */
+ uint32_t or; /* in, value OR'd with result */
+ int32_t shift; /* in, amount of left shift (can be negative) */
+ uint32_t reloc_idx; /* in, index of reloc_bo buffer */
+ uint64_t reloc_offset; /* in, offset from start of reloc_bo */
+};
+
+/* submit-types:
+ * BUF - this cmd buffer is executed normally.
+ * IB_TARGET_BUF - this cmd buffer is an IB target. Reloc's are
+ * processed normally, but the kernel does not setup an IB to
+ * this buffer in the first-level ringbuffer
+ * CTX_RESTORE_BUF - only executed if there has been a GPU context
+ * switch since the last SUBMIT ioctl
+ */
+#define MSM_SUBMIT_CMD_BUF 0x0001
+#define MSM_SUBMIT_CMD_IB_TARGET_BUF 0x0002
+#define MSM_SUBMIT_CMD_CTX_RESTORE_BUF 0x0003
+struct drm_msm_gem_submit_cmd {
+ uint32_t type; /* in, one of MSM_SUBMIT_CMD_x */
+ uint32_t submit_idx; /* in, index of submit_bo cmdstream buffer */
+ uint32_t submit_offset; /* in, offset into submit_bo */
+ uint32_t size; /* in, cmdstream size */
+ uint32_t pad;
+ uint32_t nr_relocs; /* in, number of submit_reloc's */
+ uint64_t __user relocs; /* in, ptr to array of submit_reloc's */
+};
+
+/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
+ * cmdstream buffer(s) themselves or reloc entries) has one (and only
+ * one) entry in the submit->bos[] table.
+ *
+ * As a optimization, the current buffer (gpu virtual address) can be
+ * passed back through the 'presumed' field. If on a subsequent reloc,
+ * userspace passes back a 'presumed' address that is still valid,
+ * then patching the cmdstream for this entry is skipped. This can
+ * avoid kernel needing to map/access the cmdstream bo in the common
+ * case.
+ */
+#define MSM_SUBMIT_BO_READ 0x0001
+#define MSM_SUBMIT_BO_WRITE 0x0002
+struct drm_msm_gem_submit_bo {
+ uint32_t flags; /* in, mask of MSM_SUBMIT_BO_x */
+ uint32_t handle; /* in, GEM handle */
+ uint64_t presumed; /* in/out, presumed buffer address */
+};
+
+/* Each cmdstream submit consists of a table of buffers involved, and
+ * one or more cmdstream buffers. This allows for conditional execution
+ * (context-restore), and IB buffers needed for per tile/bin draw cmds.
+ */
+struct drm_msm_gem_submit {
+ uint32_t pipe; /* in, MSM_PIPE_x */
+ uint32_t fence; /* out */
+ uint32_t nr_bos; /* in, number of submit_bo's */
+ uint32_t nr_cmds; /* in, number of submit_cmd's */
+ uint64_t __user bos; /* in, ptr to array of submit_bo's */
+ uint64_t __user cmds; /* in, ptr to array of submit_cmd's */
+};
+
+/* The normal way to synchronize with the GPU is just to CPU_PREP on
+ * a buffer if you need to access it from the CPU (other cmdstream
+ * submission from same or other contexts, PAGE_FLIP ioctl, etc, all
+ * handle the required synchronization under the hood). This ioctl
+ * mainly just exists as a way to implement the gallium pipe_fence
+ * APIs without requiring a dummy bo to synchronize on.
+ */
+struct drm_msm_wait_fence {
+ uint32_t fence; /* in */
+ uint32_t pad;
+ struct drm_msm_timespec timeout; /* in */
+};
+
+#define DRM_MSM_GET_PARAM 0x00
+/* placeholder:
+#define DRM_MSM_SET_PARAM 0x01
+ */
+#define DRM_MSM_GEM_NEW 0x02
+#define DRM_MSM_GEM_INFO 0x03
+#define DRM_MSM_GEM_CPU_PREP 0x04
+#define DRM_MSM_GEM_CPU_FINI 0x05
+#define DRM_MSM_GEM_SUBMIT 0x06
+#define DRM_MSM_WAIT_FENCE 0x07
+#define DRM_MSM_NUM_IOCTLS 0x08
+
+#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
+#define DRM_IOCTL_MSM_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_NEW, struct drm_msm_gem_new)
+#define DRM_IOCTL_MSM_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_INFO, struct drm_msm_gem_info)
+#define DRM_IOCTL_MSM_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_PREP, struct drm_msm_gem_cpu_prep)
+#define DRM_IOCTL_MSM_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_GEM_CPU_FINI, struct drm_msm_gem_cpu_fini)
+#define DRM_IOCTL_MSM_GEM_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GEM_SUBMIT, struct drm_msm_gem_submit)
+#define DRM_IOCTL_MSM_WAIT_FENCE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_WAIT_FENCE, struct drm_msm_wait_fence)
+
+#endif /* __MSM_DRM_H__ */
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 321d4ac5c51..fa8b3adf9ff 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -979,6 +979,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_RING_WORKING 0x15
/* SI tile mode array */
#define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
+/* query if CP DMA is supported on the compute ring */
+#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
struct drm_radeon_info {
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 997f9f2f096..e7c94eeb947 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -227,6 +227,7 @@ header-y += kvm_para.h
endif
header-y += l2tp.h
+header-y += libc-compat.h
header-y += limits.h
header-y += llc.h
header-y += loop.h
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index ae07bec74f4..4e27c82b564 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -45,6 +45,7 @@ enum {
CGW_DST_IF, /* ifindex of destination network interface */
CGW_FILTER, /* specify struct can_filter on source CAN device */
CGW_DELETED, /* number of deleted CAN frames (see max_hops param) */
+ CGW_LIM_HOPS, /* limit the number of hops of this specific rule */
__CGW_MAX
};
@@ -116,13 +117,19 @@ enum {
* Sets a CAN receive filter for the gateway job specified by the
* struct can_filter described in include/linux/can.h
*
- * CGW_MOD_XXX (length 17 bytes):
+ * CGW_MOD_(AND|OR|XOR|SET) (length 17 bytes):
* Specifies a modification that's done to a received CAN frame before it is
* send out to the destination interface.
*
* <struct can_frame> data used as operator
* <u8> affected CAN frame elements
*
+ * CGW_LIM_HOPS (length 1 byte):
+ * Limit the number of hops of this specific rule. Usually the received CAN
+ * frame can be processed as much as 'max_hops' times (which is given at module
+ * load time of the can-gw module). This value is used to reduce the number of
+ * possible hops for this gateway rule to a value smaller then max_hops.
+ *
* CGW_CS_XOR (length 4 bytes):
* Set a simple XOR checksum starting with an initial value into
* data[result-idx] using data[start-idx] .. data[end-idx]
diff --git a/include/uapi/linux/cm4000_cs.h b/include/uapi/linux/cm4000_cs.h
index bc51f77db91..1217f751a1b 100644
--- a/include/uapi/linux/cm4000_cs.h
+++ b/include/uapi/linux/cm4000_cs.h
@@ -2,6 +2,7 @@
#define _UAPI_CM4000_H_
#include <linux/types.h>
+#include <linux/ioctl.h>
#define MAX_ATR 33
diff --git a/include/uapi/linux/dn.h b/include/uapi/linux/dn.h
index 9c50445462d..5fbdd3d49eb 100644
--- a/include/uapi/linux/dn.h
+++ b/include/uapi/linux/dn.h
@@ -2,6 +2,7 @@
#define _LINUX_DN_H
#include <linux/types.h>
+#include <linux/if_ether.h>
/*
@@ -120,7 +121,7 @@ struct linkinfo_dn {
* Ethernet address format (for DECnet)
*/
union etheraddress {
- __u8 dne_addr[6]; /* Full ethernet address */
+ __u8 dne_addr[ETH_ALEN]; /* Full ethernet address */
struct {
__u8 dne_hiord[4]; /* DECnet HIORD prefix */
__u8 dne_nodeaddr[2]; /* DECnet node address */
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 51da65b68b8..2b82d7e3097 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -44,8 +44,8 @@ enum {
FRA_FWMARK, /* mark */
FRA_FLOW, /* flow/class id */
FRA_UNUSED6,
- FRA_UNUSED7,
- FRA_UNUSED8,
+ FRA_SUPPRESS_IFGROUP,
+ FRA_SUPPRESS_PREFIXLEN,
FRA_TABLE, /* Extended table id */
FRA_FWMASK, /* mask for netfilter mark */
FRA_OIFNAME,
diff --git a/include/uapi/linux/fiemap.h b/include/uapi/linux/fiemap.h
index d830747f5c0..0c51d617dae 100644
--- a/include/uapi/linux/fiemap.h
+++ b/include/uapi/linux/fiemap.h
@@ -40,6 +40,7 @@ struct fiemap {
#define FIEMAP_FLAG_SYNC 0x00000001 /* sync file data before map */
#define FIEMAP_FLAG_XATTR 0x00000002 /* map extended attribute tree */
+#define FIEMAP_FLAG_CACHE 0x00000004 /* request caching of the extents */
#define FIEMAP_FLAGS_COMPAT (FIEMAP_FLAG_SYNC | FIEMAP_FLAG_XATTR)
diff --git a/include/uapi/linux/firewire-cdev.h b/include/uapi/linux/firewire-cdev.h
index d5003695349..1db453e4b55 100644
--- a/include/uapi/linux/firewire-cdev.h
+++ b/include/uapi/linux/firewire-cdev.h
@@ -215,8 +215,8 @@ struct fw_cdev_event_request2 {
* with the %FW_CDEV_ISO_INTERRUPT bit set, when explicitly requested with
* %FW_CDEV_IOC_FLUSH_ISO, or when there have been so many completed packets
* without the interrupt bit set that the kernel's internal buffer for @header
- * is about to overflow. (In the last case, kernels with ABI version < 5 drop
- * header data up to the next interrupt packet.)
+ * is about to overflow. (In the last case, ABI versions < 5 drop header data
+ * up to the next interrupt packet.)
*
* Isochronous transmit events (context type %FW_CDEV_ISO_CONTEXT_TRANSMIT):
*
diff --git a/include/uapi/linux/icmpv6.h b/include/uapi/linux/icmpv6.h
index e0133c73c30..590beda78ea 100644
--- a/include/uapi/linux/icmpv6.h
+++ b/include/uapi/linux/icmpv6.h
@@ -115,6 +115,8 @@ struct icmp6hdr {
#define ICMPV6_NOT_NEIGHBOUR 2
#define ICMPV6_ADDR_UNREACH 3
#define ICMPV6_PORT_UNREACH 4
+#define ICMPV6_POLICY_FAIL 5
+#define ICMPV6_REJECT_ROUTE 6
/*
* Codes for Time Exceeded
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 2d70d79ce2f..39f621a9fe8 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -14,6 +14,7 @@
#define _UAPI_LINUX_IF_BRIDGE_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#define SYSFS_BRIDGE_ATTR "bridge"
#define SYSFS_BRIDGE_FDB "brforward"
@@ -88,7 +89,7 @@ struct __port_info {
};
struct __fdb_entry {
- __u8 mac_addr[6];
+ __u8 mac_addr[ETH_ALEN];
__u8 port_no;
__u8 is_local;
__u32 ageing_timer_value;
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 03f6170ab33..80394e8dc3a 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -143,6 +143,7 @@ enum {
IFLA_NUM_TX_QUEUES,
IFLA_NUM_RX_QUEUES,
IFLA_CARRIER,
+ IFLA_PHYS_PORT_ID,
__IFLA_MAX
};
@@ -313,6 +314,8 @@ enum {
IFLA_VXLAN_L2MISS,
IFLA_VXLAN_L3MISS,
IFLA_VXLAN_PORT, /* destination port */
+ IFLA_VXLAN_GROUP6,
+ IFLA_VXLAN_LOCAL6,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index b950c02030c..dbf06667394 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -56,6 +56,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT_LB 1
#define PACKET_FANOUT_CPU 2
#define PACKET_FANOUT_ROLLOVER 3
+#define PACKET_FANOUT_RND 4
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index e36a4aecd31..e128769331b 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -46,7 +46,7 @@ struct pppoe_addr {
* PPTP addressing definition
*/
struct pptp_addr {
- __be16 call_id;
+ __u16 call_id;
struct in_addr sin_addr;
};
diff --git a/include/uapi/linux/if_tun.h b/include/uapi/linux/if_tun.h
index 82334f88967..e9502dd1ee2 100644
--- a/include/uapi/linux/if_tun.h
+++ b/include/uapi/linux/if_tun.h
@@ -56,6 +56,8 @@
#define TUNGETVNETHDRSZ _IOR('T', 215, int)
#define TUNSETVNETHDRSZ _IOW('T', 216, int)
#define TUNSETQUEUE _IOW('T', 217, int)
+#define TUNSETIFINDEX _IOW('T', 218, unsigned int)
+#define TUNGETFILTER _IOR('T', 219, struct sock_fprog)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
@@ -70,6 +72,10 @@
#define IFF_DETACH_QUEUE 0x0400
/* read-only flag */
#define IFF_PERSIST 0x0800
+#define IFF_NOFILTER 0x1000
+
+/* Socket options */
+#define TUN_TX_TIMESTAMP 1
/* Features for GSO (TUNSETOFFLOAD). */
#define TUN_F_CSUM 0x01 /* You can hand me unchecksummed packets. */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 9edb441df82..f9e8e496ae5 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -24,30 +24,53 @@
/* Standard well-defined IP protocols. */
enum {
IPPROTO_IP = 0, /* Dummy protocol for TCP */
+#define IPPROTO_IP IPPROTO_IP
IPPROTO_ICMP = 1, /* Internet Control Message Protocol */
+#define IPPROTO_ICMP IPPROTO_ICMP
IPPROTO_IGMP = 2, /* Internet Group Management Protocol */
+#define IPPROTO_IGMP IPPROTO_IGMP
IPPROTO_IPIP = 4, /* IPIP tunnels (older KA9Q tunnels use 94) */
+#define IPPROTO_IPIP IPPROTO_IPIP
IPPROTO_TCP = 6, /* Transmission Control Protocol */
+#define IPPROTO_TCP IPPROTO_TCP
IPPROTO_EGP = 8, /* Exterior Gateway Protocol */
+#define IPPROTO_EGP IPPROTO_EGP
IPPROTO_PUP = 12, /* PUP protocol */
+#define IPPROTO_PUP IPPROTO_PUP
IPPROTO_UDP = 17, /* User Datagram Protocol */
+#define IPPROTO_UDP IPPROTO_UDP
IPPROTO_IDP = 22, /* XNS IDP protocol */
+#define IPPROTO_IDP IPPROTO_IDP
+ IPPROTO_TP = 29, /* SO Transport Protocol Class 4 */
+#define IPPROTO_TP IPPROTO_TP
IPPROTO_DCCP = 33, /* Datagram Congestion Control Protocol */
- IPPROTO_RSVP = 46, /* RSVP protocol */
+#define IPPROTO_DCCP IPPROTO_DCCP
+ IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
+#define IPPROTO_IPV6 IPPROTO_IPV6
+ IPPROTO_RSVP = 46, /* RSVP Protocol */
+#define IPPROTO_RSVP IPPROTO_RSVP
IPPROTO_GRE = 47, /* Cisco GRE tunnels (rfc 1701,1702) */
-
- IPPROTO_IPV6 = 41, /* IPv6-in-IPv4 tunnelling */
-
- IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
- IPPROTO_AH = 51, /* Authentication Header protocol */
- IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
- IPPROTO_PIM = 103, /* Protocol Independent Multicast */
-
- IPPROTO_COMP = 108, /* Compression Header protocol */
- IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_GRE IPPROTO_GRE
+ IPPROTO_ESP = 50, /* Encapsulation Security Payload protocol */
+#define IPPROTO_ESP IPPROTO_ESP
+ IPPROTO_AH = 51, /* Authentication Header protocol */
+#define IPPROTO_AH IPPROTO_AH
+ IPPROTO_MTP = 92, /* Multicast Transport Protocol */
+#define IPPROTO_MTP IPPROTO_MTP
+ IPPROTO_BEETPH = 94, /* IP option pseudo header for BEET */
+#define IPPROTO_BEETPH IPPROTO_BEETPH
+ IPPROTO_ENCAP = 98, /* Encapsulation Header */
+#define IPPROTO_ENCAP IPPROTO_ENCAP
+ IPPROTO_PIM = 103, /* Protocol Independent Multicast */
+#define IPPROTO_PIM IPPROTO_PIM
+ IPPROTO_COMP = 108, /* Compression Header Protocol */
+#define IPPROTO_COMP IPPROTO_COMP
+ IPPROTO_SCTP = 132, /* Stream Control Transport Protocol */
+#define IPPROTO_SCTP IPPROTO_SCTP
IPPROTO_UDPLITE = 136, /* UDP-Lite (RFC 3828) */
-
- IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_UDPLITE IPPROTO_UDPLITE
+ IPPROTO_RAW = 255, /* Raw IP packets */
+#define IPPROTO_RAW IPPROTO_RAW
IPPROTO_MAX
};
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 53b1d56a6e7..440d5c47914 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -22,22 +22,30 @@
#define _UAPI_LINUX_IN6_H
#include <linux/types.h>
+#include <linux/libc-compat.h>
/*
* IPv6 address structure
*/
+#if __UAPI_DEF_IN6_ADDR
struct in6_addr {
union {
__u8 u6_addr8[16];
+#if __UAPI_DEF_IN6_ADDR_ALT
__be16 u6_addr16[8];
__be32 u6_addr32[4];
+#endif
} in6_u;
#define s6_addr in6_u.u6_addr8
+#if __UAPI_DEF_IN6_ADDR_ALT
#define s6_addr16 in6_u.u6_addr16
#define s6_addr32 in6_u.u6_addr32
+#endif
};
+#endif /* __UAPI_DEF_IN6_ADDR */
+#if __UAPI_DEF_SOCKADDR_IN6
struct sockaddr_in6 {
unsigned short int sin6_family; /* AF_INET6 */
__be16 sin6_port; /* Transport layer port # */
@@ -45,7 +53,9 @@ struct sockaddr_in6 {
struct in6_addr sin6_addr; /* IPv6 address */
__u32 sin6_scope_id; /* scope id (new in RFC2553) */
};
+#endif /* __UAPI_DEF_SOCKADDR_IN6 */
+#if __UAPI_DEF_IPV6_MREQ
struct ipv6_mreq {
/* IPv6 multicast address of group */
struct in6_addr ipv6mr_multiaddr;
@@ -53,6 +63,7 @@ struct ipv6_mreq {
/* local IPv6 address of interface */
int ipv6mr_ifindex;
};
+#endif /* __UAPI_DEF_IVP6_MREQ */
#define ipv6mr_acaddr ipv6mr_multiaddr
@@ -114,13 +125,24 @@ struct in6_flowlabel_req {
/*
* IPV6 extension headers
*/
-#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
-#define IPPROTO_ROUTING 43 /* IPv6 routing header */
-#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
-#define IPPROTO_ICMPV6 58 /* ICMPv6 */
-#define IPPROTO_NONE 59 /* IPv6 no next header */
-#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
-#define IPPROTO_MH 135 /* IPv6 mobility header */
+#if __UAPI_DEF_IPPROTO_V6
+enum {
+ IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */
+#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS
+ IPPROTO_ROUTING = 43, /* IPv6 routing header */
+#define IPPROTO_ROUTING IPPROTO_ROUTING
+ IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */
+#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT
+ IPPROTO_ICMPV6 = 58, /* ICMPv6 */
+#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
+ IPPROTO_NONE = 59, /* IPv6 no next header */
+#define IPPROTO_NONE IPPROTO_NONE
+ IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
+#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
+ IPPROTO_MH = 135, /* IPv6 mobility header */
+#define IPPROTO_MH IPPROTO_MH
+};
+#endif /* __UAPI_DEF_IPPROTO_V6 */
/*
* IPv6 TLV options.
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index d584047b072..76457eef172 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -716,6 +716,14 @@ struct input_keymap_entry {
#define BTN_DPAD_LEFT 0x222
#define BTN_DPAD_RIGHT 0x223
+#define BTN_FRET_FAR_UP 0x224
+#define BTN_FRET_UP 0x225
+#define BTN_FRET_MID 0x226
+#define BTN_FRET_LOW 0x227
+#define BTN_FRET_FAR_LOW 0x228
+#define BTN_STRUM_BAR_UP 0x229
+#define BTN_STRUM_BAR_DOWN 0x22a
+
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -829,8 +837,21 @@ struct input_keymap_entry {
#define ABS_MT_TOOL_X 0x3c /* Center X tool position */
#define ABS_MT_TOOL_Y 0x3d /* Center Y tool position */
-
-#define ABS_MAX 0x3f
+/* Drums and guitars (mostly toys) */
+#define ABS_TOM_FAR_LEFT 0x40
+#define ABS_TOM_LEFT 0x41
+#define ABS_TOM_RIGHT 0x42
+#define ABS_TOM_FAR_RIGHT 0x43
+#define ABS_CYMBAL_FAR_LEFT 0x44
+#define ABS_CYMBAL_LEFT 0x45
+#define ABS_CYMBAL_RIGHT 0x46
+#define ABS_CYMBAL_FAR_RIGHT 0x47
+#define ABS_BASS 0x48
+#define ABS_HI_HAT 0x49
+#define ABS_FRET_BOARD 0x4a /* Guitar fret board, vertical pos */
+#define ABS_WHAMMY_BAR 0x4b /* Guitar whammy bar (or vibrato) */
+
+#define ABS_MAX 0x4f
#define ABS_CNT (ABS_MAX+1)
/*
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index 6cf06bfd841..411959405ab 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -133,4 +133,40 @@ struct ip_beet_phdr {
__u8 reserved;
};
+/* index values for the variables in ipv4_devconf */
+enum
+{
+ IPV4_DEVCONF_FORWARDING=1,
+ IPV4_DEVCONF_MC_FORWARDING,
+ IPV4_DEVCONF_PROXY_ARP,
+ IPV4_DEVCONF_ACCEPT_REDIRECTS,
+ IPV4_DEVCONF_SECURE_REDIRECTS,
+ IPV4_DEVCONF_SEND_REDIRECTS,
+ IPV4_DEVCONF_SHARED_MEDIA,
+ IPV4_DEVCONF_RP_FILTER,
+ IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE,
+ IPV4_DEVCONF_BOOTP_RELAY,
+ IPV4_DEVCONF_LOG_MARTIANS,
+ IPV4_DEVCONF_TAG,
+ IPV4_DEVCONF_ARPFILTER,
+ IPV4_DEVCONF_MEDIUM_ID,
+ IPV4_DEVCONF_NOXFRM,
+ IPV4_DEVCONF_NOPOLICY,
+ IPV4_DEVCONF_FORCE_IGMP_VERSION,
+ IPV4_DEVCONF_ARP_ANNOUNCE,
+ IPV4_DEVCONF_ARP_IGNORE,
+ IPV4_DEVCONF_PROMOTE_SECONDARIES,
+ IPV4_DEVCONF_ARP_ACCEPT,
+ IPV4_DEVCONF_ARP_NOTIFY,
+ IPV4_DEVCONF_ACCEPT_LOCAL,
+ IPV4_DEVCONF_SRC_VMARK,
+ IPV4_DEVCONF_PROXY_ARP_PVLAN,
+ IPV4_DEVCONF_ROUTE_LOCALNET,
+ IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ __IPV4_DEVCONF_MAX
+};
+
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
#endif /* _UAPI_LINUX_IP_H */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 4bda4cf5b0f..593b0e32d95 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -160,6 +160,9 @@ enum {
DEVCONF_ACCEPT_DAD,
DEVCONF_FORCE_TLLAO,
DEVCONF_NDISC_NOTIFY,
+ DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
+ DEVCONF_SUPPRESS_FRAG_NDISC,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index acccd08be6c..99c25338ede 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -667,6 +667,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_PPC_RTAS 91
#define KVM_CAP_IRQ_XICS 92
#define KVM_CAP_ARM_EL1_32BIT 93
+#define KVM_CAP_SPAPR_MULTITCE 94
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
index cea2c5c72d2..2841f86eae0 100644
--- a/include/uapi/linux/kvm_para.h
+++ b/include/uapi/linux/kvm_para.h
@@ -19,6 +19,7 @@
#define KVM_HC_MMU_OP 2
#define KVM_HC_FEATURES 3
#define KVM_HC_PPC_MAP_MAGIC_PAGE 4
+#define KVM_HC_KICK_CPU 5
/*
* hypercalls use architecture specific
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
new file mode 100644
index 00000000000..335e8a7cad3
--- /dev/null
+++ b/include/uapi/linux/libc-compat.h
@@ -0,0 +1,103 @@
+/*
+ * Compatibility interface for userspace libc header coordination:
+ *
+ * Define compatibility macros that are used to control the inclusion or
+ * exclusion of UAPI structures and definitions in coordination with another
+ * userspace C library.
+ *
+ * This header is intended to solve the problem of UAPI definitions that
+ * conflict with userspace definitions. If a UAPI header has such conflicting
+ * definitions then the solution is as follows:
+ *
+ * * Synchronize the UAPI header and the libc headers so either one can be
+ * used and such that the ABI is preserved. If this is not possible then
+ * no simple compatibility interface exists (you need to write translating
+ * wrappers and rename things) and you can't use this interface.
+ *
+ * Then follow this process:
+ *
+ * (a) Include libc-compat.h in the UAPI header.
+ * e.g. #include <linux/libc-compat.h>
+ * This include must be as early as possible.
+ *
+ * (b) In libc-compat.h add enough code to detect that the comflicting
+ * userspace libc header has been included first.
+ *
+ * (c) If the userspace libc header has been included first define a set of
+ * guard macros of the form __UAPI_DEF_FOO and set their values to 1, else
+ * set their values to 0.
+ *
+ * (d) Back in the UAPI header with the conflicting definitions, guard the
+ * definitions with:
+ * #if __UAPI_DEF_FOO
+ * ...
+ * #endif
+ *
+ * This fixes the situation where the linux headers are included *after* the
+ * libc headers. To fix the problem with the inclusion in the other order the
+ * userspace libc headers must be fixed like this:
+ *
+ * * For all definitions that conflict with kernel definitions wrap those
+ * defines in the following:
+ * #if !__UAPI_DEF_FOO
+ * ...
+ * #endif
+ *
+ * This prevents the redefinition of a construct already defined by the kernel.
+ */
+#ifndef _UAPI_LIBC_COMPAT_H
+#define _UAPI_LIBC_COMPAT_H
+
+/* We have included glibc headers... */
+#if defined(__GLIBC__)
+
+/* Coordinate with glibc netinet/in.h header. */
+#if defined(_NETINET_IN_H)
+
+/* GLIBC headers included first so don't define anything
+ * that would already be defined. */
+#define __UAPI_DEF_IN6_ADDR 0
+/* The exception is the in6_addr macros which must be defined
+ * if the glibc code didn't define them. This guard matches
+ * the guard in glibc/inet/netinet/in.h which defines the
+ * additional in6_addr macros e.g. s6_addr16, and s6_addr32. */
+#if defined(__USE_MISC) || defined (__USE_GNU)
+#define __UAPI_DEF_IN6_ADDR_ALT 0
+#else
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#endif
+#define __UAPI_DEF_SOCKADDR_IN6 0
+#define __UAPI_DEF_IPV6_MREQ 0
+#define __UAPI_DEF_IPPROTO_V6 0
+
+#else
+
+/* Linux headers included first, and we must define everything
+ * we need. The expectation is that glibc will check the
+ * __UAPI_DEF_* defines and adjust appropriately. */
+#define __UAPI_DEF_IN6_ADDR 1
+/* We unconditionally define the in6_addr macros and glibc must
+ * coordinate. */
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#define __UAPI_DEF_SOCKADDR_IN6 1
+#define __UAPI_DEF_IPV6_MREQ 1
+#define __UAPI_DEF_IPPROTO_V6 1
+
+#endif /* _NETINET_IN_H */
+
+
+/* If we did not see any headers from any supported C libraries,
+ * or we are being included in the kernel, then define everything
+ * that we need. */
+#else /* !defined(__GLIBC__) */
+
+/* Definitions for in6.h */
+#define __UAPI_DEF_IN6_ADDR 1
+#define __UAPI_DEF_IN6_ADDR_ALT 1
+#define __UAPI_DEF_SOCKADDR_IN6 1
+#define __UAPI_DEF_IPV6_MREQ 1
+#define __UAPI_DEF_IPPROTO_V6 1
+
+#endif /* __GLIBC__ */
+
+#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/include/uapi/linux/netfilter/Kbuild b/include/uapi/linux/netfilter/Kbuild
index 41115776d76..174915420d3 100644
--- a/include/uapi/linux/netfilter/Kbuild
+++ b/include/uapi/linux/netfilter/Kbuild
@@ -22,6 +22,7 @@ header-y += xt_CONNMARK.h
header-y += xt_CONNSECMARK.h
header-y += xt_CT.h
header-y += xt_DSCP.h
+header-y += xt_HMARK.h
header-y += xt_IDLETIMER.h
header-y += xt_LED.h
header-y += xt_LOG.h
@@ -68,6 +69,7 @@ header-y += xt_quota.h
header-y += xt_rateest.h
header-y += xt_realm.h
header-y += xt_recent.h
+header-y += xt_rpfilter.h
header-y += xt_sctp.h
header-y += xt_set.h
header-y += xt_socket.h
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index d69483fb382..8dd803818eb 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -99,7 +99,8 @@ enum ip_conntrack_events {
IPCT_PROTOINFO, /* protocol information has changed */
IPCT_HELPER, /* new helper has been set */
IPCT_MARK, /* new mark has been set */
- IPCT_NATSEQADJ, /* NAT is doing sequence adjustment */
+ IPCT_SEQADJ, /* sequence adjustment has changed */
+ IPCT_NATSEQADJ = IPCT_SEQADJ,
IPCT_SECMARK, /* new security mark has been set */
IPCT_LABEL, /* new connlabel has been set */
};
diff --git a/include/uapi/linux/netfilter/nfnetlink_conntrack.h b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
index 08fabc6c93f..acad6c52a65 100644
--- a/include/uapi/linux/netfilter/nfnetlink_conntrack.h
+++ b/include/uapi/linux/netfilter/nfnetlink_conntrack.h
@@ -42,8 +42,10 @@ enum ctattr_type {
CTA_ID,
CTA_NAT_DST,
CTA_TUPLE_MASTER,
- CTA_NAT_SEQ_ADJ_ORIG,
- CTA_NAT_SEQ_ADJ_REPLY,
+ CTA_SEQ_ADJ_ORIG,
+ CTA_NAT_SEQ_ADJ_ORIG = CTA_SEQ_ADJ_ORIG,
+ CTA_SEQ_ADJ_REPLY,
+ CTA_NAT_SEQ_ADJ_REPLY = CTA_SEQ_ADJ_REPLY,
CTA_SECMARK, /* obsolete */
CTA_ZONE,
CTA_SECCTX,
@@ -165,6 +167,15 @@ enum ctattr_protonat {
};
#define CTA_PROTONAT_MAX (__CTA_PROTONAT_MAX - 1)
+enum ctattr_seqadj {
+ CTA_SEQADJ_UNSPEC,
+ CTA_SEQADJ_CORRECTION_POS,
+ CTA_SEQADJ_OFFSET_BEFORE,
+ CTA_SEQADJ_OFFSET_AFTER,
+ __CTA_SEQADJ_MAX
+};
+#define CTA_SEQADJ_MAX (__CTA_SEQADJ_MAX - 1)
+
enum ctattr_natseq {
CTA_NAT_SEQ_UNSPEC,
CTA_NAT_SEQ_CORRECTION_POS,
diff --git a/include/uapi/linux/netfilter/nfnetlink_queue.h b/include/uapi/linux/netfilter/nfnetlink_queue.h
index 3a9b9214733..0132bad79de 100644
--- a/include/uapi/linux/netfilter/nfnetlink_queue.h
+++ b/include/uapi/linux/netfilter/nfnetlink_queue.h
@@ -46,6 +46,7 @@ enum nfqnl_attr_type {
NFQA_CT_INFO, /* enum ip_conntrack_info */
NFQA_CAP_LEN, /* __u32 length of captured packet */
NFQA_SKB_INFO, /* __u32 skb meta information */
+ NFQA_EXP, /* nf_conntrack_netlink.h */
__NFQA_MAX
};
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/uapi/linux/netfilter/xt_HMARK.h
index 826fc580757..826fc580757 100644
--- a/include/linux/netfilter/xt_HMARK.h
+++ b/include/uapi/linux/netfilter/xt_HMARK.h
diff --git a/include/uapi/linux/netfilter/xt_SYNPROXY.h b/include/uapi/linux/netfilter/xt_SYNPROXY.h
new file mode 100644
index 00000000000..2d59fbaa93c
--- /dev/null
+++ b/include/uapi/linux/netfilter/xt_SYNPROXY.h
@@ -0,0 +1,16 @@
+#ifndef _XT_SYNPROXY_H
+#define _XT_SYNPROXY_H
+
+#define XT_SYNPROXY_OPT_MSS 0x01
+#define XT_SYNPROXY_OPT_WSCALE 0x02
+#define XT_SYNPROXY_OPT_SACK_PERM 0x04
+#define XT_SYNPROXY_OPT_TIMESTAMP 0x08
+#define XT_SYNPROXY_OPT_ECN 0x10
+
+struct xt_synproxy_info {
+ __u8 options;
+ __u8 wscale;
+ __u16 mss;
+};
+
+#endif /* _XT_SYNPROXY_H */
diff --git a/include/linux/netfilter/xt_rpfilter.h b/include/uapi/linux/netfilter/xt_rpfilter.h
index 8358d4f7195..8358d4f7195 100644
--- a/include/linux/netfilter/xt_rpfilter.h
+++ b/include/uapi/linux/netfilter/xt_rpfilter.h
diff --git a/include/uapi/linux/netfilter_bridge/ebt_802_3.h b/include/uapi/linux/netfilter_bridge/ebt_802_3.h
index 5bf84912a08..f37522aade2 100644
--- a/include/uapi/linux/netfilter_bridge/ebt_802_3.h
+++ b/include/uapi/linux/netfilter_bridge/ebt_802_3.h
@@ -2,6 +2,7 @@
#define _UAPI__LINUX_BRIDGE_EBT_802_3_H
#include <linux/types.h>
+#include <linux/if_ether.h>
#define EBT_802_3_SAP 0x01
#define EBT_802_3_TYPE 0x02
@@ -42,8 +43,8 @@ struct hdr_ni {
};
struct ebt_802_3_hdr {
- __u8 daddr[6];
- __u8 saddr[6];
+ __u8 daddr[ETH_ALEN];
+ __u8 saddr[ETH_ALEN];
__be16 len;
union {
struct hdr_ui ui;
diff --git a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
index c6a204c9704..eac0f6548f4 100644
--- a/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
+++ b/include/uapi/linux/netfilter_ipv4/ipt_CLUSTERIP.h
@@ -2,6 +2,7 @@
#define _IPT_CLUSTERIP_H_target
#include <linux/types.h>
+#include <linux/if_ether.h>
enum clusterip_hashmode {
CLUSTERIP_HASHMODE_SIP = 0,
@@ -22,7 +23,7 @@ struct ipt_clusterip_tgt_info {
__u32 flags;
/* only relevant for new ones */
- __u8 clustermac[6];
+ __u8 clustermac[ETH_ALEN];
__u16 num_total_nodes;
__u16 num_local_nodes;
__u16 local_nodes[CLUSTERIP_MAX_NODES];
diff --git a/include/uapi/linux/nfc.h b/include/uapi/linux/nfc.h
index caed0f324d5..29bed72a4ac 100644
--- a/include/uapi/linux/nfc.h
+++ b/include/uapi/linux/nfc.h
@@ -69,8 +69,22 @@
* starting a poll from a device which has a secure element enabled means
* we want to do SE based card emulation.
* @NFC_CMD_DISABLE_SE: Disable the physical link to a specific secure element.
- * @NFC_CMD_FW_UPLOAD: Request to Load/flash firmware, or event to inform that
- * some firmware was loaded
+ * @NFC_CMD_FW_DOWNLOAD: Request to Load/flash firmware, or event to inform
+ * that some firmware was loaded
+ * @NFC_EVENT_SE_ADDED: Event emitted when a new secure element is discovered.
+ * This typically will be sent whenever a new NFC controller with either
+ * an embedded SE or an UICC one connected to it through SWP.
+ * @NFC_EVENT_SE_REMOVED: Event emitted when a secure element is removed from
+ * the system, as a consequence of e.g. an NFC controller being unplugged.
+ * @NFC_EVENT_SE_CONNECTIVITY: This event is emitted whenever a secure element
+ * is requesting connectivity access. For example a UICC SE may need to
+ * talk with a sleeping modem and will notify this need by sending this
+ * event. It is then up to userspace to decide if it will wake the modem
+ * up or not.
+ * @NFC_EVENT_SE_TRANSACTION: This event is sent when an application running on
+ * a specific SE notifies us about the end of a transaction. The parameter
+ * for this event is the application ID (AID).
+ * @NFC_CMD_GET_SE: Dump all discovered secure elements from an NFC controller.
*/
enum nfc_commands {
NFC_CMD_UNSPEC,
@@ -94,9 +108,12 @@ enum nfc_commands {
NFC_CMD_DISABLE_SE,
NFC_CMD_LLC_SDREQ,
NFC_EVENT_LLC_SDRES,
- NFC_CMD_FW_UPLOAD,
+ NFC_CMD_FW_DOWNLOAD,
NFC_EVENT_SE_ADDED,
NFC_EVENT_SE_REMOVED,
+ NFC_EVENT_SE_CONNECTIVITY,
+ NFC_EVENT_SE_TRANSACTION,
+ NFC_CMD_GET_SE,
/* private: internal use only */
__NFC_CMD_AFTER_LAST
};
@@ -129,6 +146,7 @@ enum nfc_commands {
* @NFC_ATTR_FIRMWARE_NAME: Free format firmware version
* @NFC_ATTR_SE_INDEX: Secure element index
* @NFC_ATTR_SE_TYPE: Secure element type (UICC or EMBEDDED)
+ * @NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS: Firmware download operation status
*/
enum nfc_attrs {
NFC_ATTR_UNSPEC,
@@ -154,6 +172,8 @@ enum nfc_attrs {
NFC_ATTR_FIRMWARE_NAME,
NFC_ATTR_SE_INDEX,
NFC_ATTR_SE_TYPE,
+ NFC_ATTR_SE_AID,
+ NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS,
/* private: internal use only */
__NFC_ATTR_AFTER_LAST
};
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 861e5eba395..fde2c021b26 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -126,6 +126,31 @@
*/
/**
+ * DOC: packet coalesce support
+ *
+ * In most cases, host that receives IPv4 and IPv6 multicast/broadcast
+ * packets does not do anything with these packets. Therefore the
+ * reception of these unwanted packets causes unnecessary processing
+ * and power consumption.
+ *
+ * Packet coalesce feature helps to reduce number of received interrupts
+ * to host by buffering these packets in firmware/hardware for some
+ * predefined time. Received interrupt will be generated when one of the
+ * following events occur.
+ * a) Expiration of hardware timer whose expiration time is set to maximum
+ * coalescing delay of matching coalesce rule.
+ * b) Coalescing buffer in hardware reaches it's limit.
+ * c) Packet doesn't match any of the configured coalesce rules.
+ *
+ * User needs to configure following parameters for creating a coalesce
+ * rule.
+ * a) Maximum coalescing delay
+ * b) List of packet patterns which needs to be matched
+ * c) Condition for coalescence. pattern 'match' or 'no match'
+ * Multiple such rules can be created.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -648,6 +673,19 @@
* @NL80211_CMD_CRIT_PROTOCOL_STOP: Indicates the connection reliability can
* return back to normal.
*
+ * @NL80211_CMD_GET_COALESCE: Get currently supported coalesce rules.
+ * @NL80211_CMD_SET_COALESCE: Configure coalesce rules or clear existing rules.
+ *
+ * @NL80211_CMD_CHANNEL_SWITCH: Perform a channel switch by announcing the
+ * the new channel information (Channel Switch Announcement - CSA)
+ * in the beacon for some time (as defined in the
+ * %NL80211_ATTR_CH_SWITCH_COUNT parameter) and then change to the
+ * new channel. Userspace provides the new channel information (using
+ * %NL80211_ATTR_WIPHY_FREQ and the attributes determining channel
+ * width). %NL80211_ATTR_CH_SWITCH_BLOCK_TX may be supplied to inform
+ * other station that transmission must be blocked until the channel
+ * switch is complete.
+ *
* @NL80211_CMD_MAX: highest used command number
* @__NL80211_CMD_AFTER_LAST: internal use
*/
@@ -810,6 +848,11 @@ enum nl80211_commands {
NL80211_CMD_CRIT_PROTOCOL_START,
NL80211_CMD_CRIT_PROTOCOL_STOP,
+ NL80211_CMD_GET_COALESCE,
+ NL80211_CMD_SET_COALESCE,
+
+ NL80211_CMD_CHANNEL_SWITCH,
+
/* add new commands above here */
/* used to define NL80211_CMD_MAX below */
@@ -1436,6 +1479,23 @@ enum nl80211_commands {
* allowed to be used with the first @NL80211_CMD_SET_STATION command to
* update a TDLS peer STA entry.
*
+ * @NL80211_ATTR_COALESCE_RULE: Coalesce rule information.
+ *
+ * @NL80211_ATTR_CH_SWITCH_COUNT: u32 attribute specifying the number of TBTT's
+ * until the channel switch event.
+ * @NL80211_ATTR_CH_SWITCH_BLOCK_TX: flag attribute specifying that transmission
+ * must be blocked on the current channel (before the channel switch
+ * operation).
+ * @NL80211_ATTR_CSA_IES: Nested set of attributes containing the IE information
+ * for the time while performing a channel switch.
+ * @NL80211_ATTR_CSA_C_OFF_BEACON: Offset of the channel switch counter
+ * field in the beacons tail (%NL80211_ATTR_BEACON_TAIL).
+ * @NL80211_ATTR_CSA_C_OFF_PRESP: Offset of the channel switch counter
+ * field in the probe response (%NL80211_ATTR_PROBE_RESP).
+ *
+ * @NL80211_ATTR_RXMGMT_FLAGS: flags for nl80211_send_mgmt(), u32.
+ * As specified in the &enum nl80211_rxmgmt_flags.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1736,6 +1796,16 @@ enum nl80211_attrs {
NL80211_ATTR_PEER_AID,
+ NL80211_ATTR_COALESCE_RULE,
+
+ NL80211_ATTR_CH_SWITCH_COUNT,
+ NL80211_ATTR_CH_SWITCH_BLOCK_TX,
+ NL80211_ATTR_CSA_IES,
+ NL80211_ATTR_CSA_C_OFF_BEACON,
+ NL80211_ATTR_CSA_C_OFF_PRESP,
+
+ NL80211_ATTR_RXMGMT_FLAGS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2773,6 +2843,21 @@ enum nl80211_chan_width {
};
/**
+ * enum nl80211_bss_scan_width - control channel width for a BSS
+ *
+ * These values are used with the %NL80211_BSS_CHAN_WIDTH attribute.
+ *
+ * @NL80211_BSS_CHAN_WIDTH_20: control channel is 20 MHz wide or compatible
+ * @NL80211_BSS_CHAN_WIDTH_10: control channel is 10 MHz wide
+ * @NL80211_BSS_CHAN_WIDTH_5: control channel is 5 MHz wide
+ */
+enum nl80211_bss_scan_width {
+ NL80211_BSS_CHAN_WIDTH_20,
+ NL80211_BSS_CHAN_WIDTH_10,
+ NL80211_BSS_CHAN_WIDTH_5,
+};
+
+/**
* enum nl80211_bss - netlink attributes for a BSS
*
* @__NL80211_BSS_INVALID: invalid
@@ -2796,6 +2881,8 @@ enum nl80211_chan_width {
* @NL80211_BSS_BEACON_IES: binary attribute containing the raw information
* elements from a Beacon frame (bin); not present if no Beacon frame has
* yet been received
+ * @NL80211_BSS_CHAN_WIDTH: channel width of the control channel
+ * (u32, enum nl80211_bss_scan_width)
* @__NL80211_BSS_AFTER_LAST: internal
* @NL80211_BSS_MAX: highest BSS attribute
*/
@@ -2812,6 +2899,7 @@ enum nl80211_bss {
NL80211_BSS_STATUS,
NL80211_BSS_SEEN_MS_AGO,
NL80211_BSS_BEACON_IES,
+ NL80211_BSS_CHAN_WIDTH,
/* keep last */
__NL80211_BSS_AFTER_LAST,
@@ -3060,11 +3148,11 @@ enum nl80211_tx_power_setting {
};
/**
- * enum nl80211_wowlan_packet_pattern_attr - WoWLAN packet pattern attribute
- * @__NL80211_WOWLAN_PKTPAT_INVALID: invalid number for nested attribute
- * @NL80211_WOWLAN_PKTPAT_PATTERN: the pattern, values where the mask has
+ * enum nl80211_packet_pattern_attr - packet pattern attribute
+ * @__NL80211_PKTPAT_INVALID: invalid number for nested attribute
+ * @NL80211_PKTPAT_PATTERN: the pattern, values where the mask has
* a zero bit are ignored
- * @NL80211_WOWLAN_PKTPAT_MASK: pattern mask, must be long enough to have
+ * @NL80211_PKTPAT_MASK: pattern mask, must be long enough to have
* a bit for each byte in the pattern. The lowest-order bit corresponds
* to the first byte of the pattern, but the bytes of the pattern are
* in a little-endian-like format, i.e. the 9th byte of the pattern
@@ -3075,39 +3163,50 @@ enum nl80211_tx_power_setting {
* Note that the pattern matching is done as though frames were not
* 802.11 frames but 802.3 frames, i.e. the frame is fully unpacked
* first (including SNAP header unpacking) and then matched.
- * @NL80211_WOWLAN_PKTPAT_OFFSET: packet offset, pattern is matched after
+ * @NL80211_PKTPAT_OFFSET: packet offset, pattern is matched after
* these fixed number of bytes of received packet
- * @NUM_NL80211_WOWLAN_PKTPAT: number of attributes
- * @MAX_NL80211_WOWLAN_PKTPAT: max attribute number
+ * @NUM_NL80211_PKTPAT: number of attributes
+ * @MAX_NL80211_PKTPAT: max attribute number
*/
-enum nl80211_wowlan_packet_pattern_attr {
- __NL80211_WOWLAN_PKTPAT_INVALID,
- NL80211_WOWLAN_PKTPAT_MASK,
- NL80211_WOWLAN_PKTPAT_PATTERN,
- NL80211_WOWLAN_PKTPAT_OFFSET,
+enum nl80211_packet_pattern_attr {
+ __NL80211_PKTPAT_INVALID,
+ NL80211_PKTPAT_MASK,
+ NL80211_PKTPAT_PATTERN,
+ NL80211_PKTPAT_OFFSET,
- NUM_NL80211_WOWLAN_PKTPAT,
- MAX_NL80211_WOWLAN_PKTPAT = NUM_NL80211_WOWLAN_PKTPAT - 1,
+ NUM_NL80211_PKTPAT,
+ MAX_NL80211_PKTPAT = NUM_NL80211_PKTPAT - 1,
};
/**
- * struct nl80211_wowlan_pattern_support - pattern support information
+ * struct nl80211_pattern_support - packet pattern support information
* @max_patterns: maximum number of patterns supported
* @min_pattern_len: minimum length of each pattern
* @max_pattern_len: maximum length of each pattern
* @max_pkt_offset: maximum Rx packet offset
*
* This struct is carried in %NL80211_WOWLAN_TRIG_PKT_PATTERN when
- * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED in the
- * capability information given by the kernel to userspace.
+ * that is part of %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED or in
+ * %NL80211_ATTR_COALESCE_RULE_PKT_PATTERN when that is part of
+ * %NL80211_ATTR_COALESCE_RULE in the capability information given
+ * by the kernel to userspace.
*/
-struct nl80211_wowlan_pattern_support {
+struct nl80211_pattern_support {
__u32 max_patterns;
__u32 min_pattern_len;
__u32 max_pattern_len;
__u32 max_pkt_offset;
} __attribute__((packed));
+/* only for backward compatibility */
+#define __NL80211_WOWLAN_PKTPAT_INVALID __NL80211_PKTPAT_INVALID
+#define NL80211_WOWLAN_PKTPAT_MASK NL80211_PKTPAT_MASK
+#define NL80211_WOWLAN_PKTPAT_PATTERN NL80211_PKTPAT_PATTERN
+#define NL80211_WOWLAN_PKTPAT_OFFSET NL80211_PKTPAT_OFFSET
+#define NUM_NL80211_WOWLAN_PKTPAT NUM_NL80211_PKTPAT
+#define MAX_NL80211_WOWLAN_PKTPAT MAX_NL80211_PKTPAT
+#define nl80211_wowlan_pattern_support nl80211_pattern_support
+
/**
* enum nl80211_wowlan_triggers - WoWLAN trigger definitions
* @__NL80211_WOWLAN_TRIG_INVALID: invalid number for nested attributes
@@ -3127,7 +3226,7 @@ struct nl80211_wowlan_pattern_support {
* pattern matching is done after the packet is converted to the MSDU.
*
* In %NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED, it is a binary attribute
- * carrying a &struct nl80211_wowlan_pattern_support.
+ * carrying a &struct nl80211_pattern_support.
*
* When reporting wakeup. it is a u32 attribute containing the 0-based
* index of the pattern that caused the wakeup, in the patterns passed
@@ -3284,7 +3383,7 @@ struct nl80211_wowlan_tcp_data_token_feature {
* @NL80211_WOWLAN_TCP_WAKE_PAYLOAD: wake packet payload, for advertising a
* u32 attribute holding the maximum length
* @NL80211_WOWLAN_TCP_WAKE_MASK: Wake packet payload mask, not used for
- * feature advertising. The mask works like @NL80211_WOWLAN_PKTPAT_MASK
+ * feature advertising. The mask works like @NL80211_PKTPAT_MASK
* but on the TCP payload only.
* @NUM_NL80211_WOWLAN_TCP: number of TCP attributes
* @MAX_NL80211_WOWLAN_TCP: highest attribute number
@@ -3309,6 +3408,55 @@ enum nl80211_wowlan_tcp_attrs {
};
/**
+ * struct nl80211_coalesce_rule_support - coalesce rule support information
+ * @max_rules: maximum number of rules supported
+ * @pat: packet pattern support information
+ * @max_delay: maximum supported coalescing delay in msecs
+ *
+ * This struct is carried in %NL80211_ATTR_COALESCE_RULE in the
+ * capability information given by the kernel to userspace.
+ */
+struct nl80211_coalesce_rule_support {
+ __u32 max_rules;
+ struct nl80211_pattern_support pat;
+ __u32 max_delay;
+} __attribute__((packed));
+
+/**
+ * enum nl80211_attr_coalesce_rule - coalesce rule attribute
+ * @__NL80211_COALESCE_RULE_INVALID: invalid number for nested attribute
+ * @NL80211_ATTR_COALESCE_RULE_DELAY: delay in msecs used for packet coalescing
+ * @NL80211_ATTR_COALESCE_RULE_CONDITION: condition for packet coalescence,
+ * see &enum nl80211_coalesce_condition.
+ * @NL80211_ATTR_COALESCE_RULE_PKT_PATTERN: packet offset, pattern is matched
+ * after these fixed number of bytes of received packet
+ * @NUM_NL80211_ATTR_COALESCE_RULE: number of attributes
+ * @NL80211_ATTR_COALESCE_RULE_MAX: max attribute number
+ */
+enum nl80211_attr_coalesce_rule {
+ __NL80211_COALESCE_RULE_INVALID,
+ NL80211_ATTR_COALESCE_RULE_DELAY,
+ NL80211_ATTR_COALESCE_RULE_CONDITION,
+ NL80211_ATTR_COALESCE_RULE_PKT_PATTERN,
+
+ /* keep last */
+ NUM_NL80211_ATTR_COALESCE_RULE,
+ NL80211_ATTR_COALESCE_RULE_MAX = NUM_NL80211_ATTR_COALESCE_RULE - 1
+};
+
+/**
+ * enum nl80211_coalesce_condition - coalesce rule conditions
+ * @NL80211_COALESCE_CONDITION_MATCH: coalaesce Rx packets when patterns
+ * in a rule are matched.
+ * @NL80211_COALESCE_CONDITION_NO_MATCH: coalesce Rx packets when patterns
+ * in a rule are not matched.
+ */
+enum nl80211_coalesce_condition {
+ NL80211_COALESCE_CONDITION_MATCH,
+ NL80211_COALESCE_CONDITION_NO_MATCH
+};
+
+/**
* enum nl80211_iface_limit_attrs - limit attributes
* @NL80211_IFACE_LIMIT_UNSPEC: (reserved)
* @NL80211_IFACE_LIMIT_MAX: maximum number of interfaces that
@@ -3758,4 +3906,15 @@ enum nl80211_crit_proto_id {
/* maximum duration for critical protocol measures */
#define NL80211_CRIT_PROTO_MAX_DURATION 5000 /* msec */
+/**
+ * enum nl80211_rxmgmt_flags - flags for received management frame.
+ *
+ * Used by cfg80211_rx_mgmt()
+ *
+ * @NL80211_RXMGMT_FLAG_ANSWERED: frame was answered by device/driver.
+ */
+enum nl80211_rxmgmt_flags {
+ NL80211_RXMGMT_FLAG_ANSWERED = 1 << 0,
+};
+
#endif /* __LINUX_NL80211_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index c55efaaa9bb..a74d375b439 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -1,6 +1,6 @@
/*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -165,6 +165,7 @@ enum ovs_vport_type {
OVS_VPORT_TYPE_NETDEV, /* network device */
OVS_VPORT_TYPE_INTERNAL, /* network device implemented by datapath */
OVS_VPORT_TYPE_GRE, /* GRE tunnel. */
+ OVS_VPORT_TYPE_VXLAN, /* VXLAN tunnel. */
__OVS_VPORT_TYPE_MAX
};
@@ -211,6 +212,16 @@ enum ovs_vport_attr {
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+/* OVS_VPORT_ATTR_OPTIONS attributes for tunnels.
+ */
+enum {
+ OVS_TUNNEL_ATTR_UNSPEC,
+ OVS_TUNNEL_ATTR_DST_PORT, /* 16-bit UDP port, used by L4 tunnels. */
+ __OVS_TUNNEL_ATTR_MAX
+};
+
+#define OVS_TUNNEL_ATTR_MAX (__OVS_TUNNEL_ATTR_MAX - 1)
+
/* Flows. */
#define OVS_FLOW_FAMILY "ovs_flow"
@@ -248,6 +259,7 @@ enum ovs_key_attr {
OVS_KEY_ATTR_ND, /* struct ovs_key_nd */
OVS_KEY_ATTR_SKB_MARK, /* u32 skb mark */
OVS_KEY_ATTR_TUNNEL, /* Nested set of ovs_tunnel attributes */
+ OVS_KEY_ATTR_SCTP, /* struct ovs_key_sctp */
#ifdef __KERNEL__
OVS_KEY_ATTR_IPV4_TUNNEL, /* struct ovs_key_ipv4_tunnel */
@@ -322,6 +334,11 @@ struct ovs_key_udp {
__be16 udp_dst;
};
+struct ovs_key_sctp {
+ __be16 sctp_src;
+ __be16 sctp_dst;
+};
+
struct ovs_key_icmp {
__u8 icmp_type;
__u8 icmp_code;
@@ -368,6 +385,12 @@ struct ovs_key_nd {
* @OVS_FLOW_ATTR_CLEAR: If present in a %OVS_FLOW_CMD_SET request, clears the
* last-used time, accumulated TCP flags, and statistics for this flow.
* Otherwise ignored in requests. Never present in notifications.
+ * @OVS_FLOW_ATTR_MASK: Nested %OVS_KEY_ATTR_* attributes specifying the
+ * mask bits for wildcarded flow match. Mask bit value '1' specifies exact
+ * match with corresponding flow key bit, while mask bit value '0' specifies
+ * a wildcarded match. Omitting attribute is treated as wildcarding all
+ * corresponding fields. Optional for all requests. If not present,
+ * all flow key bits are exact match bits.
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_FLOW_* commands.
@@ -380,6 +403,7 @@ enum ovs_flow_attr {
OVS_FLOW_ATTR_TCP_FLAGS, /* 8-bit OR'd TCP flags. */
OVS_FLOW_ATTR_USED, /* u64 msecs last used in monotonic time. */
OVS_FLOW_ATTR_CLEAR, /* Flag to clear stats, tcp_flags, used. */
+ OVS_FLOW_ATTR_MASK, /* Sequence of OVS_KEY_ATTR_* attributes. */
__OVS_FLOW_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index c3cc01d474b..baa7852468e 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -421,24 +421,24 @@
#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */
#define PCI_EXP_TYPE_UPSTREAM 0x5 /* Upstream Port */
#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */
-#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCI/PCI-X Bridge */
-#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIE Bridge */
+#define PCI_EXP_TYPE_PCI_BRIDGE 0x7 /* PCIe to PCI/PCI-X Bridge */
+#define PCI_EXP_TYPE_PCIE_BRIDGE 0x8 /* PCI/PCI-X to PCIe Bridge */
#define PCI_EXP_TYPE_RC_END 0x9 /* Root Complex Integrated Endpoint */
#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */
#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */
#define PCI_EXP_FLAGS_IRQ 0x3e00 /* Interrupt message number */
#define PCI_EXP_DEVCAP 4 /* Device capabilities */
-#define PCI_EXP_DEVCAP_PAYLOAD 0x07 /* Max_Payload_Size */
-#define PCI_EXP_DEVCAP_PHANTOM 0x18 /* Phantom functions */
-#define PCI_EXP_DEVCAP_EXT_TAG 0x20 /* Extended tags */
-#define PCI_EXP_DEVCAP_L0S 0x1c0 /* L0s Acceptable Latency */
-#define PCI_EXP_DEVCAP_L1 0xe00 /* L1 Acceptable Latency */
-#define PCI_EXP_DEVCAP_ATN_BUT 0x1000 /* Attention Button Present */
-#define PCI_EXP_DEVCAP_ATN_IND 0x2000 /* Attention Indicator Present */
-#define PCI_EXP_DEVCAP_PWR_IND 0x4000 /* Power Indicator Present */
-#define PCI_EXP_DEVCAP_RBER 0x8000 /* Role-Based Error Reporting */
-#define PCI_EXP_DEVCAP_PWR_VAL 0x3fc0000 /* Slot Power Limit Value */
-#define PCI_EXP_DEVCAP_PWR_SCL 0xc000000 /* Slot Power Limit Scale */
+#define PCI_EXP_DEVCAP_PAYLOAD 0x00000007 /* Max_Payload_Size */
+#define PCI_EXP_DEVCAP_PHANTOM 0x00000018 /* Phantom functions */
+#define PCI_EXP_DEVCAP_EXT_TAG 0x00000020 /* Extended tags */
+#define PCI_EXP_DEVCAP_L0S 0x000001c0 /* L0s Acceptable Latency */
+#define PCI_EXP_DEVCAP_L1 0x00000e00 /* L1 Acceptable Latency */
+#define PCI_EXP_DEVCAP_ATN_BUT 0x00001000 /* Attention Button Present */
+#define PCI_EXP_DEVCAP_ATN_IND 0x00002000 /* Attention Indicator Present */
+#define PCI_EXP_DEVCAP_PWR_IND 0x00004000 /* Power Indicator Present */
+#define PCI_EXP_DEVCAP_RBER 0x00008000 /* Role-Based Error Reporting */
+#define PCI_EXP_DEVCAP_PWR_VAL 0x03fc0000 /* Slot Power Limit Value */
+#define PCI_EXP_DEVCAP_PWR_SCL 0x0c000000 /* Slot Power Limit Scale */
#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */
#define PCI_EXP_DEVCTL 8 /* Device Control */
#define PCI_EXP_DEVCTL_CERE 0x0001 /* Correctable Error Reporting En. */
@@ -454,16 +454,16 @@
#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */
#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */
#define PCI_EXP_DEVSTA 10 /* Device Status */
-#define PCI_EXP_DEVSTA_CED 0x01 /* Correctable Error Detected */
-#define PCI_EXP_DEVSTA_NFED 0x02 /* Non-Fatal Error Detected */
-#define PCI_EXP_DEVSTA_FED 0x04 /* Fatal Error Detected */
-#define PCI_EXP_DEVSTA_URD 0x08 /* Unsupported Request Detected */
-#define PCI_EXP_DEVSTA_AUXPD 0x10 /* AUX Power Detected */
-#define PCI_EXP_DEVSTA_TRPND 0x20 /* Transactions Pending */
+#define PCI_EXP_DEVSTA_CED 0x0001 /* Correctable Error Detected */
+#define PCI_EXP_DEVSTA_NFED 0x0002 /* Non-Fatal Error Detected */
+#define PCI_EXP_DEVSTA_FED 0x0004 /* Fatal Error Detected */
+#define PCI_EXP_DEVSTA_URD 0x0008 /* Unsupported Request Detected */
+#define PCI_EXP_DEVSTA_AUXPD 0x0010 /* AUX Power Detected */
+#define PCI_EXP_DEVSTA_TRPND 0x0020 /* Transactions Pending */
#define PCI_EXP_LNKCAP 12 /* Link Capabilities */
#define PCI_EXP_LNKCAP_SLS 0x0000000f /* Supported Link Speeds */
-#define PCI_EXP_LNKCAP_SLS_2_5GB 0x1 /* LNKCAP2 SLS Vector bit 0 (2.5GT/s) */
-#define PCI_EXP_LNKCAP_SLS_5_0GB 0x2 /* LNKCAP2 SLS Vector bit 1 (5.0GT/s) */
+#define PCI_EXP_LNKCAP_SLS_2_5GB 0x00000001 /* LNKCAP2 SLS Vector bit 0 */
+#define PCI_EXP_LNKCAP_SLS_5_0GB 0x00000002 /* LNKCAP2 SLS Vector bit 1 */
#define PCI_EXP_LNKCAP_MLW 0x000003f0 /* Maximum Link Width */
#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */
#define PCI_EXP_LNKCAP_L0SEL 0x00007000 /* L0s Exit Latency */
@@ -475,21 +475,21 @@
#define PCI_EXP_LNKCAP_PN 0xff000000 /* Port Number */
#define PCI_EXP_LNKCTL 16 /* Link Control */
#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */
-#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */
-#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */
+#define PCI_EXP_LNKCTL_ASPM_L0S 0x0001 /* L0s Enable */
+#define PCI_EXP_LNKCTL_ASPM_L1 0x0002 /* L1 Enable */
#define PCI_EXP_LNKCTL_RCB 0x0008 /* Read Completion Boundary */
#define PCI_EXP_LNKCTL_LD 0x0010 /* Link Disable */
#define PCI_EXP_LNKCTL_RL 0x0020 /* Retrain Link */
#define PCI_EXP_LNKCTL_CCC 0x0040 /* Common Clock Configuration */
#define PCI_EXP_LNKCTL_ES 0x0080 /* Extended Synch */
-#define PCI_EXP_LNKCTL_CLKREQ_EN 0x100 /* Enable clkreq */
+#define PCI_EXP_LNKCTL_CLKREQ_EN 0x0100 /* Enable clkreq */
#define PCI_EXP_LNKCTL_HAWD 0x0200 /* Hardware Autonomous Width Disable */
#define PCI_EXP_LNKCTL_LBMIE 0x0400 /* Link Bandwidth Management Interrupt Enable */
#define PCI_EXP_LNKCTL_LABIE 0x0800 /* Lnk Autonomous Bandwidth Interrupt Enable */
#define PCI_EXP_LNKSTA 18 /* Link Status */
#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */
-#define PCI_EXP_LNKSTA_CLS_2_5GB 0x01 /* Current Link Speed 2.5GT/s */
-#define PCI_EXP_LNKSTA_CLS_5_0GB 0x02 /* Current Link Speed 5.0GT/s */
+#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 /* Current Link Speed 2.5GT/s */
+#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 /* Current Link Speed 5.0GT/s */
#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Nogotiated Link Width */
#define PCI_EXP_LNKSTA_NLW_SHIFT 4 /* start of NLW mask in link status */
#define PCI_EXP_LNKSTA_LT 0x0800 /* Link Training */
@@ -534,44 +534,49 @@
#define PCI_EXP_SLTSTA_EIS 0x0080 /* Electromechanical Interlock Status */
#define PCI_EXP_SLTSTA_DLLSC 0x0100 /* Data Link Layer State Changed */
#define PCI_EXP_RTCTL 28 /* Root Control */
-#define PCI_EXP_RTCTL_SECEE 0x01 /* System Error on Correctable Error */
-#define PCI_EXP_RTCTL_SENFEE 0x02 /* System Error on Non-Fatal Error */
-#define PCI_EXP_RTCTL_SEFEE 0x04 /* System Error on Fatal Error */
-#define PCI_EXP_RTCTL_PMEIE 0x08 /* PME Interrupt Enable */
-#define PCI_EXP_RTCTL_CRSSVE 0x10 /* CRS Software Visibility Enable */
+#define PCI_EXP_RTCTL_SECEE 0x0001 /* System Error on Correctable Error */
+#define PCI_EXP_RTCTL_SENFEE 0x0002 /* System Error on Non-Fatal Error */
+#define PCI_EXP_RTCTL_SEFEE 0x0004 /* System Error on Fatal Error */
+#define PCI_EXP_RTCTL_PMEIE 0x0008 /* PME Interrupt Enable */
+#define PCI_EXP_RTCTL_CRSSVE 0x0010 /* CRS Software Visibility Enable */
#define PCI_EXP_RTCAP 30 /* Root Capabilities */
#define PCI_EXP_RTSTA 32 /* Root Status */
-#define PCI_EXP_RTSTA_PME 0x10000 /* PME status */
-#define PCI_EXP_RTSTA_PENDING 0x20000 /* PME pending */
+#define PCI_EXP_RTSTA_PME 0x00010000 /* PME status */
+#define PCI_EXP_RTSTA_PENDING 0x00020000 /* PME pending */
/*
- * Note that the following PCI Express 'Capability Structure' registers
- * were introduced with 'Capability Version' 0x2 (v2). These registers
- * do not exist on devices with Capability Version 1. Use pci_pcie_cap2()
- * to use these fields safely.
+ * The Device Capabilities 2, Device Status 2, Device Control 2,
+ * Link Capabilities 2, Link Status 2, Link Control 2,
+ * Slot Capabilities 2, Slot Status 2, and Slot Control 2 registers
+ * are only present on devices with PCIe Capability version 2.
+ * Use pcie_capability_read_word() and similar interfaces to use them
+ * safely.
*/
#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */
-#define PCI_EXP_DEVCAP2_ARI 0x20 /* Alternative Routing-ID */
-#define PCI_EXP_DEVCAP2_LTR 0x800 /* Latency tolerance reporting */
-#define PCI_EXP_OBFF_MASK 0xc0000 /* OBFF support mechanism */
-#define PCI_EXP_OBFF_MSG 0x40000 /* New message signaling */
-#define PCI_EXP_OBFF_WAKE 0x80000 /* Re-use WAKE# for OBFF */
+#define PCI_EXP_DEVCAP2_ARI 0x00000020 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCAP2_LTR 0x00000800 /* Latency tolerance reporting */
+#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
+#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
+#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
-#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
-#define PCI_EXP_IDO_REQ_EN 0x100 /* ID-based ordering request enable */
-#define PCI_EXP_IDO_CMP_EN 0x200 /* ID-based ordering completion enable */
-#define PCI_EXP_LTR_EN 0x400 /* Latency tolerance reporting */
-#define PCI_EXP_OBFF_MSGA_EN 0x2000 /* OBFF enable with Message type A */
-#define PCI_EXP_OBFF_MSGB_EN 0x4000 /* OBFF enable with Message type B */
-#define PCI_EXP_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
+#define PCI_EXP_DEVCTL2_ARI 0x20 /* Alternative Routing-ID */
+#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
+#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
+#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
+#define PCI_EXP_DEVCTL2_OBFF_MSGA_EN 0x2000 /* Enable OBFF Message type A */
+#define PCI_EXP_DEVCTL2_OBFF_MSGB_EN 0x4000 /* Enable OBFF Message type B */
+#define PCI_EXP_DEVCTL2_OBFF_WAKE_EN 0x6000 /* OBFF using WAKE# signaling */
+#define PCI_EXP_DEVSTA2 42 /* Device Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 44 /* v2 endpoints end here */
-#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */
-#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
-#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
-#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
-#define PCI_EXP_LNKCAP2_CROSSLINK 0x100 /* Crosslink supported */
+#define PCI_EXP_LNKCAP2 44 /* Link Capabilities 2 */
+#define PCI_EXP_LNKCAP2_SLS_2_5GB 0x00000002 /* Supported Speed 2.5GT/s */
+#define PCI_EXP_LNKCAP2_SLS_5_0GB 0x00000004 /* Supported Speed 5.0GT/s */
+#define PCI_EXP_LNKCAP2_SLS_8_0GB 0x00000008 /* Supported Speed 8.0GT/s */
+#define PCI_EXP_LNKCAP2_CROSSLINK 0x00000100 /* Crosslink supported */
#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
+#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
#define PCI_EXP_SLTCTL2 56 /* Slot Control 2 */
+#define PCI_EXP_SLTSTA2 58 /* Slot Status 2 */
/* Extended Capabilities (PCI-X 2.0 and Express) */
#define PCI_EXT_CAP_ID(header) (header & 0x0000ffff)
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 0b1df41691e..ca1d90bcb74 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -109,6 +109,7 @@ enum perf_sw_ids {
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
PERF_COUNT_SW_EMULATION_FAULTS = 8,
+ PERF_COUNT_SW_DUMMY = 9,
PERF_COUNT_SW_MAX, /* non-ABI */
};
@@ -134,8 +135,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_STACK_USER = 1U << 13,
PERF_SAMPLE_WEIGHT = 1U << 14,
PERF_SAMPLE_DATA_SRC = 1U << 15,
+ PERF_SAMPLE_IDENTIFIER = 1U << 16,
- PERF_SAMPLE_MAX = 1U << 16, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 17, /* non-ABI */
};
/*
@@ -275,8 +277,9 @@ struct perf_event_attr {
exclude_callchain_kernel : 1, /* exclude kernel callchains */
exclude_callchain_user : 1, /* exclude user callchains */
+ mmap2 : 1, /* include mmap with inode data */
- __reserved_1 : 41;
+ __reserved_1 : 40;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -321,6 +324,7 @@ struct perf_event_attr {
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, u64 *)
enum perf_event_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
@@ -375,9 +379,12 @@ struct perf_event_mmap_page {
__u64 time_running; /* time event on cpu */
union {
__u64 capabilities;
- __u64 cap_usr_time : 1,
- cap_usr_rdpmc : 1,
- cap_____res : 62;
+ struct {
+ __u64 cap_usr_time : 1,
+ cap_usr_rdpmc : 1,
+ cap_usr_time_zero : 1,
+ cap_____res : 61;
+ };
};
/*
@@ -418,12 +425,29 @@ struct perf_event_mmap_page {
__u16 time_shift;
__u32 time_mult;
__u64 time_offset;
+ /*
+ * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
+ * from sample timestamps.
+ *
+ * time = timestamp - time_zero;
+ * quot = time / time_mult;
+ * rem = time % time_mult;
+ * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
+ *
+ * And vice versa:
+ *
+ * quot = cyc >> time_shift;
+ * rem = cyc & ((1 << time_shift) - 1);
+ * timestamp = time_zero + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ */
+ __u64 time_zero;
/*
* Hole for extension of the self monitor capabilities
*/
- __u64 __reserved[120]; /* align to 1k */
+ __u64 __reserved[119]; /* align to 1k */
/*
* Control data for the mmap() data buffer.
@@ -471,13 +495,28 @@ enum perf_event_type {
/*
* If perf_event_attr.sample_id_all is set then all event types will
* have the sample_type selected fields related to where/when
- * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
- * described in PERF_RECORD_SAMPLE below, it will be stashed just after
- * the perf_event_header and the fields already present for the existing
- * fields, i.e. at the end of the payload. That way a newer perf.data
- * file will be supported by older perf tools, with these new optional
- * fields being ignored.
+ * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
+ * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
+ * just after the perf_event_header and the fields already present for
+ * the existing fields, i.e. at the end of the payload. That way a newer
+ * perf.data file will be supported by older perf tools, with these new
+ * optional fields being ignored.
+ *
+ * struct sample_id {
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * } && perf_event_attr::sample_id_all
*
+ * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
+ * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
+ * relative to header.size.
+ */
+
+ /*
* The MMAP events record the PROT_EXEC mappings so that we can
* correlate userspace IPs to code. They have the following structure:
*
@@ -498,6 +537,7 @@ enum perf_event_type {
* struct perf_event_header header;
* u64 id;
* u64 lost;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_LOST = 2,
@@ -508,6 +548,7 @@ enum perf_event_type {
*
* u32 pid, tid;
* char comm[];
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_COMM = 3,
@@ -518,6 +559,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_EXIT = 4,
@@ -528,6 +570,7 @@ enum perf_event_type {
* u64 time;
* u64 id;
* u64 stream_id;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_THROTTLE = 5,
@@ -539,6 +582,7 @@ enum perf_event_type {
* u32 pid, ppid;
* u32 tid, ptid;
* u64 time;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_FORK = 7,
@@ -549,6 +593,7 @@ enum perf_event_type {
* u32 pid, tid;
*
* struct read_format values;
+ * struct sample_id sample_id;
* };
*/
PERF_RECORD_READ = 8,
@@ -557,6 +602,13 @@ enum perf_event_type {
* struct {
* struct perf_event_header header;
*
+ * #
+ * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
+ * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
+ * # is fixed relative to header.
+ * #
+ *
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
* { u64 ip; } && PERF_SAMPLE_IP
* { u32 pid, tid; } && PERF_SAMPLE_TID
* { u64 time; } && PERF_SAMPLE_TIME
@@ -596,11 +648,32 @@ enum perf_event_type {
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
*
* { u64 weight; } && PERF_SAMPLE_WEIGHT
- * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
* };
*/
PERF_RECORD_SAMPLE = 9,
+ /*
+ * The MMAP2 records are an augmented version of MMAP, they add
+ * maj, min, ino numbers to be used to uniquely identify each mapping
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_MMAP2 = 10,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -685,4 +758,28 @@ union perf_mem_data_src {
#define PERF_MEM_S(a, s) \
(((u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
+/*
+ * single taken branch record layout:
+ *
+ * from: source instruction (may not always be a branch insn)
+ * to: branch target
+ * mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ *
+ * in_tx: running in a hardware transaction
+ * abort: aborting a hardware transaction
+ */
+struct perf_branch_entry {
+ __u64 from;
+ __u64 to;
+ __u64 mispred:1, /* target mispredicted */
+ predicted:1,/* target predicted */
+ in_tx:1, /* in transaction */
+ abort:1, /* transaction abort */
+ reserved:60;
+};
+
#endif /* _UAPI_LINUX_PERF_EVENT_H */
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index dbd71b0c7d8..9b829134d42 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -73,9 +73,17 @@ struct tc_estimator {
#define TC_H_ROOT (0xFFFFFFFFU)
#define TC_H_INGRESS (0xFFFFFFF1U)
+/* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */
+enum tc_link_layer {
+ TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */
+ TC_LINKLAYER_ETHERNET,
+ TC_LINKLAYER_ATM,
+};
+#define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */
+
struct tc_ratespec {
unsigned char cell_log;
- unsigned char __reserved;
+ __u8 linklayer; /* lower 4 bits */
unsigned short overhead;
short cell_align;
unsigned short mpu;
@@ -736,4 +744,45 @@ struct tc_fq_codel_xstats {
};
};
+/* FQ */
+
+enum {
+ TCA_FQ_UNSPEC,
+
+ TCA_FQ_PLIMIT, /* limit of total number of packets in queue */
+
+ TCA_FQ_FLOW_PLIMIT, /* limit of packets per flow */
+
+ TCA_FQ_QUANTUM, /* RR quantum */
+
+ TCA_FQ_INITIAL_QUANTUM, /* RR quantum for new flow */
+
+ TCA_FQ_RATE_ENABLE, /* enable/disable rate limiting */
+
+ TCA_FQ_FLOW_DEFAULT_RATE,/* for sockets with unspecified sk_rate,
+ * use the following rate
+ */
+
+ TCA_FQ_FLOW_MAX_RATE, /* per flow max rate */
+
+ TCA_FQ_BUCKETS_LOG, /* log2(number of buckets) */
+ __TCA_FQ_MAX
+};
+
+#define TCA_FQ_MAX (__TCA_FQ_MAX - 1)
+
+struct tc_fq_qd_stats {
+ __u64 gc_flows;
+ __u64 highprio_packets;
+ __u64 tcp_retrans;
+ __u64 throttled;
+ __u64 flows_plimit;
+ __u64 pkts_too_long;
+ __u64 allocation_errors;
+ __s64 time_next_delayed_flow;
+ __u32 flows;
+ __u32 inactive_flows;
+ __u32 throttled_flows;
+ __u32 pad;
+};
#endif
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 66b466e4ca0..ca451e99b28 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -28,7 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Or submit a bug report through the following website:
* http://www.sf.net/projects/lksctp
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 9119cc0977b..e40ebe124ce 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -232,4 +232,7 @@
/* SH-SCI */
#define PORT_HSCIF 104
+/* ST ASC type numbers */
+#define PORT_ASC 105
+
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index af0a674cc67..1bdb4a39d1e 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -51,6 +51,10 @@ enum
IPSTATS_MIB_INBCASTOCTETS, /* InBcastOctets */
IPSTATS_MIB_OUTBCASTOCTETS, /* OutBcastOctets */
IPSTATS_MIB_CSUMERRORS, /* InCsumErrors */
+ IPSTATS_MIB_NOECTPKTS, /* InNoECTPkts */
+ IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
+ IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
+ IPSTATS_MIB_CEPKTS, /* InCEPkts */
__IPSTATS_MIB_MAX
};
@@ -253,7 +257,7 @@ enum
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
- LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */
+ LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 8d776ebc482..377f1e59411 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -111,6 +111,7 @@ enum {
#define TCP_REPAIR_OPTIONS 22
#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */
#define TCP_TIMESTAMP 24
+#define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */
struct tcp_repair_opt {
__u32 opt_code;
diff --git a/include/uapi/linux/uhid.h b/include/uapi/linux/uhid.h
index e9ed951e2b0..414b74be4da 100644
--- a/include/uapi/linux/uhid.h
+++ b/include/uapi/linux/uhid.h
@@ -30,7 +30,7 @@ enum uhid_event_type {
UHID_OPEN,
UHID_CLOSE,
UHID_OUTPUT,
- UHID_OUTPUT_EV,
+ UHID_OUTPUT_EV, /* obsolete! */
UHID_INPUT,
UHID_FEATURE,
UHID_FEATURE_ANSWER,
@@ -69,6 +69,8 @@ struct uhid_output_req {
__u8 rtype;
} __attribute__((__packed__));
+/* Obsolete! Newer kernels will no longer send these events but instead convert
+ * it into raw output reports via UHID_OUTPUT. */
struct uhid_output_ev_req {
__u16 type;
__u16 code;
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 7692dc69ccf..331499d597f 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -11,6 +11,17 @@
#include <linux/types.h> /* __u8 etc */
+/* This is arbitrary.
+ * From USB 2.0 spec Table 11-13, offset 7, a hub can
+ * have up to 255 ports. The most yet reported is 10.
+ *
+ * Current Wireless USB host hardware (Intel i1480 for example) allows
+ * up to 22 devices to connect. Upcoming hardware might raise that
+ * limit. Because the arrays need to add a bit for hub status data, we
+ * use 31, so plus one evens out to four bytes.
+ */
+#define USB_MAXCHILDREN 31
+
/*
* Hub request types
*/
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e90a88a8708..083bb5a5aae 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -161,6 +161,8 @@ enum v4l2_colorfx {
#define V4L2_CID_USER_SI476X_BASE (V4L2_CID_USER_BASE + 0x1040)
/* MPEG-class control IDs */
+/* The MPEG controls are applicable to all codec controls
+ * and the 'MPEG' part of the define is historical */
#define V4L2_CID_MPEG_BASE (V4L2_CTRL_CLASS_MPEG | 0x900)
#define V4L2_CID_MPEG_CLASS (V4L2_CTRL_CLASS_MPEG | 1)
@@ -522,6 +524,33 @@ enum v4l2_mpeg_video_mpeg4_profile {
};
#define V4L2_CID_MPEG_VIDEO_MPEG4_QPEL (V4L2_CID_MPEG_BASE+407)
+/* Control IDs for VP8 streams
+ * Although VP8 is not part of MPEG we add these controls to the MPEG class
+ * as that class is already handling other video compression standards
+ */
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_PARTITIONS (V4L2_CID_MPEG_BASE+500)
+enum v4l2_vp8_num_partitions {
+ V4L2_CID_MPEG_VIDEO_VPX_1_PARTITION = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_2_PARTITIONS = 1,
+ V4L2_CID_MPEG_VIDEO_VPX_4_PARTITIONS = 2,
+ V4L2_CID_MPEG_VIDEO_VPX_8_PARTITIONS = 3,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_IMD_DISABLE_4X4 (V4L2_CID_MPEG_BASE+501)
+#define V4L2_CID_MPEG_VIDEO_VPX_NUM_REF_FRAMES (V4L2_CID_MPEG_BASE+502)
+enum v4l2_vp8_num_ref_frames {
+ V4L2_CID_MPEG_VIDEO_VPX_1_REF_FRAME = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_2_REF_FRAME = 1,
+ V4L2_CID_MPEG_VIDEO_VPX_3_REF_FRAME = 2,
+};
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_LEVEL (V4L2_CID_MPEG_BASE+503)
+#define V4L2_CID_MPEG_VIDEO_VPX_FILTER_SHARPNESS (V4L2_CID_MPEG_BASE+504)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_REF_PERIOD (V4L2_CID_MPEG_BASE+505)
+#define V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_SEL (V4L2_CID_MPEG_BASE+506)
+enum v4l2_vp8_golden_frame_sel {
+ V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_PREV = 0,
+ V4L2_CID_MPEG_VIDEO_VPX_GOLDEN_FRAME_USE_REF_PERIOD = 1,
+};
+
/* MPEG-class control IDs specific to the CX2341x driver as defined by V4L2 */
#define V4L2_CID_MPEG_CX2341X_BASE (V4L2_CTRL_CLASS_MPEG | 0x1000)
#define V4L2_CID_MPEG_CX2341X_VIDEO_SPATIAL_FILTER_MODE (V4L2_CID_MPEG_CX2341X_BASE+0)
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 4e0c58d25ff..be709fe2955 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -823,12 +823,4 @@
V4L2_DV_FL_REDUCED_BLANKING) \
}
-#define V4L2_DV_BT_DMT_1366X768P60 { \
- .type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(1366, 768, 0, \
- V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
- 85500000, 70, 143, 213, 3, 3, 24, 0, 0, 0, \
- V4L2_DV_BT_STD_DMT, 0) \
-}
-
#endif
diff --git a/include/uapi/linux/v4l2-mediabus.h b/include/uapi/linux/v4l2-mediabus.h
index 6ee63d09b32..a9601257bb4 100644
--- a/include/uapi/linux/v4l2-mediabus.h
+++ b/include/uapi/linux/v4l2-mediabus.h
@@ -37,7 +37,7 @@
enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_FIXED = 0x0001,
- /* RGB - next is 0x100d */
+ /* RGB - next is 0x100e */
V4L2_MBUS_FMT_RGB444_2X8_PADHI_BE = 0x1001,
V4L2_MBUS_FMT_RGB444_2X8_PADHI_LE = 0x1002,
V4L2_MBUS_FMT_RGB555_2X8_PADHI_BE = 0x1003,
@@ -50,8 +50,9 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_RGB888_1X24 = 0x100a,
V4L2_MBUS_FMT_RGB888_2X12_BE = 0x100b,
V4L2_MBUS_FMT_RGB888_2X12_LE = 0x100c,
+ V4L2_MBUS_FMT_ARGB8888_1X32 = 0x100d,
- /* YUV (including grey) - next is 0x2017 */
+ /* YUV (including grey) - next is 0x2018 */
V4L2_MBUS_FMT_Y8_1X8 = 0x2001,
V4L2_MBUS_FMT_UV8_1X8 = 0x2015,
V4L2_MBUS_FMT_UYVY8_1_5X8 = 0x2002,
@@ -74,6 +75,7 @@ enum v4l2_mbus_pixelcode {
V4L2_MBUS_FMT_YUYV10_1X20 = 0x200d,
V4L2_MBUS_FMT_YVYU10_1X20 = 0x200e,
V4L2_MBUS_FMT_YUV10_1X30 = 0x2016,
+ V4L2_MBUS_FMT_AYUV8_1X32 = 0x2017,
/* Bayer - next is 0x3019 */
V4L2_MBUS_FMT_SBGGR8_1X8 = 0x3001,
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 95ef4551edc..437f1b0f893 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -348,6 +348,8 @@ struct v4l2_pix_format {
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define V4L2_PIX_FMT_NV21M v4l2_fourcc('N', 'M', '2', '1') /* 21 Y/CrCb 4:2:0 */
+#define V4L2_PIX_FMT_NV16M v4l2_fourcc('N', 'M', '1', '6') /* 16 Y/CbCr 4:2:2 */
+#define V4L2_PIX_FMT_NV61M v4l2_fourcc('N', 'M', '6', '1') /* 16 Y/CrCb 4:2:2 */
#define V4L2_PIX_FMT_NV12MT v4l2_fourcc('T', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 64x32 macroblocks */
#define V4L2_PIX_FMT_NV12MT_16X16 v4l2_fourcc('V', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 16x16 macroblocks */
@@ -1055,6 +1057,16 @@ struct v4l2_bt_timings {
or used depends on the hardware. */
#define V4L2_DV_FL_HALF_LINE (1 << 3)
+/* A few useful defines to calculate the total blanking and frame sizes */
+#define V4L2_DV_BT_BLANKING_WIDTH(bt) \
+ (bt->hfrontporch + bt->hsync + bt->hbackporch)
+#define V4L2_DV_BT_FRAME_WIDTH(bt) \
+ (bt->width + V4L2_DV_BT_BLANKING_WIDTH(bt))
+#define V4L2_DV_BT_BLANKING_HEIGHT(bt) \
+ (bt->vfrontporch + bt->vsync + bt->vbackporch + \
+ bt->il_vfrontporch + bt->il_vsync + bt->il_vbackporch)
+#define V4L2_DV_BT_FRAME_HEIGHT(bt) \
+ (bt->height + V4L2_DV_BT_BLANKING_HEIGHT(bt))
/** struct v4l2_dv_timings - DV timings
* @type: the type of the timings
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index c520203fac2..172a7f00780 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -60,7 +60,7 @@
struct virtio_net_config {
/* The config defining mac address (if VIRTIO_NET_F_MAC) */
- __u8 mac[6];
+ __u8 mac[ETH_ALEN];
/* See VIRTIO_NET_F_STATUS and VIRTIO_NET_S_* above */
__u16 status;
/* Maximum number of each of transmit and receive queues;
@@ -70,7 +70,9 @@ struct virtio_net_config {
__u16 max_virtqueue_pairs;
} __attribute__((packed));
-/* This is the first element of the scatter-gather list. If you don't
+/* This header comes first in the scatter-gather list.
+ * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must
+ * be the first element of the scatter-gather list. If you don't
* specify GSO or CSUM features, you can simply ignore the header. */
struct virtio_net_hdr {
#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset
diff --git a/include/uapi/linux/wimax/i2400m.h b/include/uapi/linux/wimax/i2400m.h
index 62d35615356..fd198bc24a3 100644
--- a/include/uapi/linux/wimax/i2400m.h
+++ b/include/uapi/linux/wimax/i2400m.h
@@ -122,7 +122,7 @@
#define __LINUX__WIMAX__I2400M_H__
#include <linux/types.h>
-
+#include <linux/if_ether.h>
/*
* Host Device Interface (HDI) common to all busses
@@ -487,7 +487,7 @@ struct i2400m_tlv_l4_message_versions {
struct i2400m_tlv_detailed_device_info {
struct i2400m_tlv_hdr hdr;
__u8 reserved1[400];
- __u8 mac_address[6];
+ __u8 mac_address[ETH_ALEN];
__u8 reserved2[2];
} __attribute__((packed));
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 805711ea200..0b233c56b0e 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -43,6 +43,7 @@
* compatibility are made.
*/
#define IB_USER_VERBS_ABI_VERSION 6
+#define IB_USER_VERBS_CMD_THRESHOLD 50
enum {
IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -85,7 +86,9 @@ enum {
IB_USER_VERBS_CMD_OPEN_XRCD,
IB_USER_VERBS_CMD_CLOSE_XRCD,
IB_USER_VERBS_CMD_CREATE_XSRQ,
- IB_USER_VERBS_CMD_OPEN_QP
+ IB_USER_VERBS_CMD_OPEN_QP,
+ IB_USER_VERBS_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
+ IB_USER_VERBS_CMD_DESTROY_FLOW
};
/*
@@ -123,6 +126,15 @@ struct ib_uverbs_cmd_hdr {
__u16 out_words;
};
+struct ib_uverbs_cmd_hdr_ex {
+ __u32 command;
+ __u16 in_words;
+ __u16 out_words;
+ __u16 provider_in_words;
+ __u16 provider_out_words;
+ __u32 cmd_hdr_reserved;
+};
+
struct ib_uverbs_get_context {
__u64 response;
__u64 driver_data[0];
@@ -684,6 +696,91 @@ struct ib_uverbs_detach_mcast {
__u64 driver_data[0];
};
+struct ib_kern_eth_filter {
+ __u8 dst_mac[6];
+ __u8 src_mac[6];
+ __be16 ether_type;
+ __be16 vlan_tag;
+};
+
+struct ib_kern_spec_eth {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ struct ib_kern_eth_filter val;
+ struct ib_kern_eth_filter mask;
+};
+
+struct ib_kern_ipv4_filter {
+ __be32 src_ip;
+ __be32 dst_ip;
+};
+
+struct ib_kern_spec_ipv4 {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ struct ib_kern_ipv4_filter val;
+ struct ib_kern_ipv4_filter mask;
+};
+
+struct ib_kern_tcp_udp_filter {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ib_kern_spec_tcp_udp {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ struct ib_kern_tcp_udp_filter val;
+ struct ib_kern_tcp_udp_filter mask;
+};
+
+struct ib_kern_spec {
+ union {
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ struct ib_kern_spec_eth eth;
+ struct ib_kern_spec_ipv4 ipv4;
+ struct ib_kern_spec_tcp_udp tcp_udp;
+ };
+};
+
+struct ib_kern_flow_attr {
+ __u32 type;
+ __u16 size;
+ __u16 priority;
+ __u8 num_of_specs;
+ __u8 reserved[2];
+ __u8 port;
+ __u32 flags;
+ /* Following are the optional layers according to user request
+ * struct ib_flow_spec_xxx
+ * struct ib_flow_spec_yyy
+ */
+};
+
+struct ib_uverbs_create_flow {
+ __u32 comp_mask;
+ __u64 response;
+ __u32 qp_handle;
+ struct ib_kern_flow_attr flow_attr;
+};
+
+struct ib_uverbs_create_flow_resp {
+ __u32 comp_mask;
+ __u32 flow_handle;
+};
+
+struct ib_uverbs_destroy_flow {
+ __u32 comp_mask;
+ __u32 flow_handle;
+};
+
struct ib_uverbs_create_srq {
__u64 response;
__u64 user_handle;
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index 1f59ea2a4a7..d956c3593f6 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -111,7 +111,7 @@ struct hdspm_ltc {
enum hdspm_ltc_input_format input_format;
};
-#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_mixer_ioctl)
+#define SNDRV_HDSPM_IOCTL_GET_LTC _IOR('H', 0x46, struct hdspm_ltc)
/**
* The status data reflects the device's current state
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index f88825928dd..efed3c3383d 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -23,6 +23,11 @@ enum raster_load_mode {
LOAD_PALETTE,
};
+enum da8xx_frame_complete {
+ DA8XX_FRAME_WAIT,
+ DA8XX_FRAME_NOWAIT,
+};
+
struct da8xx_lcdc_platform_data {
const char manu_name[10];
void *controller_data;
diff --git a/include/video/omap-panel-data.h b/include/video/omap-panel-data.h
index 6b2366fb6e5..f7ac8d972af 100644
--- a/include/video/omap-panel-data.h
+++ b/include/video/omap-panel-data.h
@@ -33,124 +33,6 @@
struct omap_dss_device;
/**
- * struct panel_generic_dpi_data - panel driver configuration data
- * @name: panel name
- * @platform_enable: platform specific panel enable function
- * @platform_disable: platform specific panel disable function
- * @num_gpios: number of gpios connected to panel
- * @gpios: gpio numbers on the platform
- * @gpio_invert: configure gpio as active high or low
- */
-struct panel_generic_dpi_data {
- const char *name;
- int (*platform_enable)(struct omap_dss_device *dssdev);
- void (*platform_disable)(struct omap_dss_device *dssdev);
-
- int num_gpios;
- int gpios[10];
- bool gpio_invert[10];
-};
-
-/**
- * struct panel_n8x0_data - N800 panel driver configuration data
- */
-struct panel_n8x0_data {
- int (*platform_enable)(struct omap_dss_device *dssdev);
- void (*platform_disable)(struct omap_dss_device *dssdev);
- int panel_reset;
- int ctrl_pwrdown;
-};
-
-/**
- * struct nokia_dsi_panel_data - Nokia DSI panel driver configuration data
- * @name: panel name
- * @use_ext_te: use external TE
- * @ext_te_gpio: external TE GPIO
- * @esd_interval: interval of ESD checks, 0 = disabled (ms)
- * @ulps_timeout: time to wait before entering ULPS, 0 = disabled (ms)
- * @use_dsi_backlight: true if panel uses DSI command to control backlight
- * @pin_config: DSI pin configuration
- */
-
-struct nokia_dsi_panel_data {
- const char *name;
-
- int reset_gpio;
-
- bool use_ext_te;
- int ext_te_gpio;
-
- unsigned esd_interval;
- unsigned ulps_timeout;
-
- bool use_dsi_backlight;
-
- struct omap_dsi_pin_config pin_config;
-};
-
-/**
- * struct picodlp_panel_data - picodlp panel driver configuration data
- * @picodlp_adapter_id: i2c_adapter number for picodlp
- */
-struct picodlp_panel_data {
- int picodlp_adapter_id;
- int emu_done_gpio;
- int pwrgood_gpio;
-};
-
-/**
- * struct tfp410_platform_data - tfp410 panel driver configuration data
- * @i2c_bus_num: i2c bus id for the panel
- * @power_down_gpio: gpio number for PD pin (or -1 if not available)
- */
-struct tfp410_platform_data {
- int i2c_bus_num;
- int power_down_gpio;
-};
-
-/**
- * sharp ls panel driver configuration data
- * @resb_gpio: reset signal
- * @ini_gpio: power on control
- * @mo_gpio: selection for resolution(VGA/QVGA)
- * @lr_gpio: selection for horizontal scanning direction
- * @ud_gpio: selection for vertical scanning direction
- */
-struct panel_sharp_ls037v7dw01_data {
- int resb_gpio;
- int ini_gpio;
- int mo_gpio;
- int lr_gpio;
- int ud_gpio;
-};
-
-/**
- * acx565akm panel driver configuration data
- * @reset_gpio: reset signal
- */
-struct panel_acx565akm_data {
- int reset_gpio;
-};
-
-/**
- * nec nl8048 panel driver configuration data
- * @res_gpio: reset signal
- * @qvga_gpio: selection for resolution(QVGA/WVGA)
- */
-struct panel_nec_nl8048_data {
- int res_gpio;
- int qvga_gpio;
-};
-
-/**
- * tpo td043 panel driver configuration data
- * @nreset_gpio: reset signal
- */
-struct panel_tpo_td043_data {
- int nreset_gpio;
-};
-
-/**
* encoder_tfp410 platform data
* @name: name for this display entity
* @power_down_gpio: gpio number for PD pin (or -1 if not available)
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index b3946355384..3d7c51a6f9f 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -250,19 +250,6 @@ struct rfbi_timings {
int converted;
};
-void omap_rfbi_write_command(const void *buf, u32 len);
-void omap_rfbi_read_data(void *buf, u32 len);
-void omap_rfbi_write_data(const void *buf, u32 len);
-void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
- u16 x, u16 y,
- u16 w, u16 h);
-int omap_rfbi_enable_te(bool enable, unsigned line);
-int omap_rfbi_setup_te(enum omap_rfbi_te_mode mode,
- unsigned hs_pulse_time, unsigned vs_pulse_time,
- int hs_pol_inv, int vs_pol_inv, int extif_div);
-void rfbi_bus_lock(void);
-void rfbi_bus_unlock(void);
-
/* DSI */
enum omap_dss_dsi_trans_mode {
@@ -321,39 +308,6 @@ struct omap_dss_dsi_config {
enum omap_dss_dsi_trans_mode trans_mode;
};
-void dsi_bus_lock(struct omap_dss_device *dssdev);
-void dsi_bus_unlock(struct omap_dss_device *dssdev);
-int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len);
-int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data,
- int len);
-int dsi_vc_dcs_write_0(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd);
-int dsi_vc_generic_write_0(struct omap_dss_device *dssdev, int channel);
-int dsi_vc_dcs_write_1(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 param);
-int dsi_vc_generic_write_1(struct omap_dss_device *dssdev, int channel,
- u8 param);
-int dsi_vc_generic_write_2(struct omap_dss_device *dssdev, int channel,
- u8 param1, u8 param2);
-int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
-int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel,
- u8 *data, int len);
-int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
- u8 *buf, int buflen);
-int dsi_vc_generic_read_0(struct omap_dss_device *dssdev, int channel, u8 *buf,
- int buflen);
-int dsi_vc_generic_read_1(struct omap_dss_device *dssdev, int channel, u8 param,
- u8 *buf, int buflen);
-int dsi_vc_generic_read_2(struct omap_dss_device *dssdev, int channel,
- u8 param1, u8 param2, u8 *buf, int buflen);
-int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel,
- u16 len);
-int dsi_vc_send_null(struct omap_dss_device *dssdev, int channel);
-int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel);
-int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel);
-void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel);
-
enum omapdss_version {
OMAPDSS_VER_UNKNOWN = 0,
OMAPDSS_VER_OMAP24xx,
@@ -749,10 +703,6 @@ struct omapdss_dsi_ops {
};
struct omap_dss_device {
- /* old device, to be removed */
- struct device old_dev;
-
- /* new device, pointer to panel device */
struct device *dev;
struct module *owner;
@@ -765,9 +715,6 @@ struct omap_dss_device {
enum omap_display_type type;
enum omap_display_type output_type;
- /* obsolete, to be removed */
- enum omap_channel channel;
-
union {
struct {
u8 data_lines;
@@ -827,7 +774,7 @@ struct omap_dss_device {
enum omap_display_caps caps;
- struct omap_dss_device *output;
+ struct omap_dss_device *src;
enum omap_dss_display_state state;
@@ -846,7 +793,7 @@ struct omap_dss_device {
/* dynamic fields */
struct omap_overlay_manager *manager;
- struct omap_dss_device *device;
+ struct omap_dss_device *dst;
};
struct omap_dss_hdmi_data
@@ -857,8 +804,6 @@ struct omap_dss_hdmi_data
};
struct omap_dss_driver {
- struct device_driver driver;
-
int (*probe)(struct omap_dss_device *);
void (*remove)(struct omap_dss_device *);
@@ -1023,51 +968,6 @@ int dispc_ovl_setup(enum omap_plane plane, const struct omap_overlay_info *oi,
#define to_dss_driver(x) container_of((x), struct omap_dss_driver, driver)
#define to_dss_device(x) container_of((x), struct omap_dss_device, old_dev)
-void omapdss_dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel,
- bool enable);
-int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable);
-int omapdss_dsi_set_config(struct omap_dss_device *dssdev,
- const struct omap_dss_dsi_config *config);
-
-int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
- void (*callback)(int, void *), void *data);
-int omap_dsi_request_vc(struct omap_dss_device *dssdev, int *channel);
-int omap_dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id);
-void omap_dsi_release_vc(struct omap_dss_device *dssdev, int channel);
-int omapdss_dsi_configure_pins(struct omap_dss_device *dssdev,
- const struct omap_dsi_pin_config *pin_cfg);
-
-int omapdss_dsi_display_enable(struct omap_dss_device *dssdev);
-void omapdss_dsi_display_disable(struct omap_dss_device *dssdev,
- bool disconnect_lanes, bool enter_ulps);
-
-int omapdss_dpi_display_enable(struct omap_dss_device *dssdev);
-void omapdss_dpi_display_disable(struct omap_dss_device *dssdev);
-void omapdss_dpi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-int dpi_check_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-void omapdss_dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines);
-
-int omapdss_sdi_display_enable(struct omap_dss_device *dssdev);
-void omapdss_sdi_display_disable(struct omap_dss_device *dssdev);
-void omapdss_sdi_set_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings);
-void omapdss_sdi_set_datapairs(struct omap_dss_device *dssdev, int datapairs);
-
-int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev);
-void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev);
-int omap_rfbi_update(struct omap_dss_device *dssdev, void (*callback)(void *),
- void *data);
-int omap_rfbi_configure(struct omap_dss_device *dssdev);
-void omapdss_rfbi_set_size(struct omap_dss_device *dssdev, u16 w, u16 h);
-void omapdss_rfbi_set_pixel_size(struct omap_dss_device *dssdev,
- int pixel_size);
-void omapdss_rfbi_set_data_lines(struct omap_dss_device *dssdev,
- int data_lines);
-void omapdss_rfbi_set_interface_timings(struct omap_dss_device *dssdev,
- struct rfbi_timings *timings);
-
int omapdss_compat_init(void);
void omapdss_compat_uninit(void);
@@ -1111,7 +1011,7 @@ void dss_mgr_unregister_framedone_handler(struct omap_overlay_manager *mgr,
static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
{
- return dssdev->output;
+ return dssdev->src;
}
static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
index 46aa3d1c165..4ddd7dc4a61 100644
--- a/include/xen/acpi.h
+++ b/include/xen/acpi.h
@@ -75,8 +75,10 @@ static inline int xen_acpi_get_pxm(acpi_handle h)
return -ENXIO;
}
-int xen_acpi_notify_hypervisor_state(u8 sleep_state,
+int xen_acpi_notify_hypervisor_sleep(u8 sleep_state,
u32 pm1a_cnt, u32 pm1b_cnd);
+int xen_acpi_notify_hypervisor_extended_sleep(u8 sleep_state,
+ u32 val_a, u32 val_b);
static inline int xen_acpi_suspend_lowlevel(void)
{
@@ -93,7 +95,9 @@ static inline void xen_acpi_sleep_register(void)
{
if (xen_initial_domain()) {
acpi_os_set_prepare_sleep(
- &xen_acpi_notify_hypervisor_state);
+ &xen_acpi_notify_hypervisor_sleep);
+ acpi_os_set_prepare_extended_sleep(
+ &xen_acpi_notify_hypervisor_extended_sleep);
acpi_suspend_lowlevel = xen_acpi_suspend_lowlevel;
}
diff --git a/include/xen/balloon.h b/include/xen/balloon.h
index cc2e1a7e44e..a4c1c6a9369 100644
--- a/include/xen/balloon.h
+++ b/include/xen/balloon.h
@@ -29,6 +29,9 @@ int alloc_xenballooned_pages(int nr_pages, struct page **pages,
bool highmem);
void free_xenballooned_pages(int nr_pages, struct page **pages);
+struct page *get_balloon_scratch_page(void);
+void put_balloon_scratch_page(void);
+
struct device;
#ifdef CONFIG_XEN_SELFBALLOONING
extern int register_xen_selfballooning(struct device *dev);
diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
new file mode 100644
index 00000000000..28e7dcd75e8
--- /dev/null
+++ b/include/xen/interface/io/tpmif.h
@@ -0,0 +1,52 @@
+/******************************************************************************
+ * tpmif.h
+ *
+ * TPM I/O interface for Xen guest OSes, v2
+ *
+ * This file is in the public domain.
+ *
+ */
+
+#ifndef __XEN_PUBLIC_IO_TPMIF_H__
+#define __XEN_PUBLIC_IO_TPMIF_H__
+
+/*
+ * Xenbus state machine
+ *
+ * Device open:
+ * 1. Both ends start in XenbusStateInitialising
+ * 2. Backend transitions to InitWait (frontend does not wait on this step)
+ * 3. Frontend populates ring-ref, event-channel, feature-protocol-v2
+ * 4. Frontend transitions to Initialised
+ * 5. Backend maps grant and event channel, verifies feature-protocol-v2
+ * 6. Backend transitions to Connected
+ * 7. Frontend verifies feature-protocol-v2, transitions to Connected
+ *
+ * Device close:
+ * 1. State is changed to XenbusStateClosing
+ * 2. Frontend transitions to Closed
+ * 3. Backend unmaps grant and event, changes state to InitWait
+ */
+
+enum vtpm_shared_page_state {
+ VTPM_STATE_IDLE, /* no contents / vTPM idle / cancel complete */
+ VTPM_STATE_SUBMIT, /* request ready / vTPM working */
+ VTPM_STATE_FINISH, /* response ready / vTPM idle */
+ VTPM_STATE_CANCEL, /* cancel requested / vTPM working */
+};
+/* The backend should only change state to IDLE or FINISH, while the
+ * frontend should only change to SUBMIT or CANCEL. */
+
+
+struct vtpm_shared_page {
+ uint32_t length; /* request/response length in bytes */
+
+ uint8_t state; /* enum vtpm_shared_page_state */
+ uint8_t locality; /* for the current request */
+ uint8_t pad;
+
+ uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
+ uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
+};
+
+#endif
diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h
index c57d5f67f70..f1331e3e727 100644
--- a/include/xen/interface/platform.h
+++ b/include/xen/interface/platform.h
@@ -152,10 +152,11 @@ DEFINE_GUEST_HANDLE_STRUCT(xenpf_firmware_info_t);
#define XENPF_enter_acpi_sleep 51
struct xenpf_enter_acpi_sleep {
/* IN variables */
- uint16_t pm1a_cnt_val; /* PM1a control value. */
- uint16_t pm1b_cnt_val; /* PM1b control value. */
+ uint16_t val_a; /* PM1a control / sleep type A. */
+ uint16_t val_b; /* PM1b control / sleep type B. */
uint32_t sleep_state; /* Which state to enter (Sn). */
- uint32_t flags; /* Must be zero. */
+#define XENPF_ACPI_SLEEP_EXTENDED 0x00000001
+ uint32_t flags; /* XENPF_ACPI_SLEEP_*. */
};
DEFINE_GUEST_HANDLE_STRUCT(xenpf_enter_acpi_sleep_t);
diff --git a/include/xen/interface/vcpu.h b/include/xen/interface/vcpu.h
index 87e6f8a4866..b05288ce399 100644
--- a/include/xen/interface/vcpu.h
+++ b/include/xen/interface/vcpu.h
@@ -170,4 +170,6 @@ struct vcpu_register_vcpu_info {
};
DEFINE_GUEST_HANDLE_STRUCT(vcpu_register_vcpu_info);
+/* Send an NMI to the specified VCPU. @extra_arg == NULL. */
+#define VCPUOP_send_nmi 11
#endif /* __XEN_PUBLIC_VCPU_H__ */
diff --git a/init/Kconfig b/init/Kconfig
index 247084be059..0a2c4bcf179 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -470,6 +470,7 @@ config TREE_RCU
config TREE_PREEMPT_RCU
bool "Preemptible tree-based hierarchical RCU"
depends on PREEMPT
+ select IRQ_WORK
help
This option selects the RCU implementation that is
designed for very large SMP systems with hundreds or
@@ -527,13 +528,29 @@ config RCU_USER_QS
config CONTEXT_TRACKING_FORCE
bool "Force context tracking"
depends on CONTEXT_TRACKING
- default CONTEXT_TRACKING
+ default y if !NO_HZ_FULL
help
- Probe on user/kernel boundaries by default in order to
- test the features that rely on it such as userspace RCU extended
- quiescent states.
- This test is there for debugging until we have a real user like the
- full dynticks mode.
+ The major pre-requirement for full dynticks to work is to
+ support the context tracking subsystem. But there are also
+ other dependencies to provide in order to make the full
+ dynticks working.
+
+ This option stands for testing when an arch implements the
+ context tracking backend but doesn't yet fullfill all the
+ requirements to make the full dynticks feature working.
+ Without the full dynticks, there is no way to test the support
+ for context tracking and the subsystems that rely on it: RCU
+ userspace extended quiescent state and tickless cputime
+ accounting. This option copes with the absence of the full
+ dynticks subsystem by forcing the context tracking on all
+ CPUs in the system.
+
+ Say Y only if you're working on the developpement of an
+ architecture backend for the context tracking.
+
+ Say N otherwise, this option brings an overhead that you
+ don't want in production.
+
config RCU_FANOUT
int "Tree-based hierarchical RCU fanout value"
@@ -955,7 +972,7 @@ config MEMCG_SWAP_ENABLED
Memory Resource Controller Swap Extension comes with its price in
a bigger memory consumption. General purpose distribution kernels
which want to enable the feature but keep it disabled by default
- and let the user enable it by swapaccount boot command line
+ and let the user enable it by swapaccount=1 boot command line
parameter should have this option unselected.
For those who want to have the feature enabled by default should
select this option (if, for some reason, they need to disable it
diff --git a/init/main.c b/init/main.c
index d03d2ec2eac..af310afbef2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -75,6 +75,7 @@
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/sched_clock.h>
+#include <linux/context_tracking.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -545,6 +546,7 @@ asmlinkage void __init start_kernel(void)
idr_init_cache();
rcu_init();
tick_nohz_init();
+ context_tracking_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
early_irq_init();
diff --git a/ipc/msg.c b/ipc/msg.c
index bd60d7e159e..b65fdf1a09d 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -680,16 +680,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock1;
}
+ ipc_lock_object(&msq->q_perm);
+
for (;;) {
struct msg_sender s;
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IWUGO))
- goto out_unlock1;
+ goto out_unlock0;
err = security_msg_queue_msgsnd(msq, msg, msgflg);
if (err)
- goto out_unlock1;
+ goto out_unlock0;
if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
1 + msq->q_qnum <= msq->q_qbytes) {
@@ -699,10 +701,9 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
/* queue full, wait: */
if (msgflg & IPC_NOWAIT) {
err = -EAGAIN;
- goto out_unlock1;
+ goto out_unlock0;
}
- ipc_lock_object(&msq->q_perm);
ss_add(msq, &s);
if (!ipc_rcu_getref(msq)) {
@@ -730,10 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock0;
}
- ipc_unlock_object(&msq->q_perm);
}
-
- ipc_lock_object(&msq->q_perm);
msq->q_lspid = task_tgid_vnr(current);
msq->q_stime = get_seconds();
@@ -839,7 +837,7 @@ static inline void free_copy(struct msg_msg *copy)
static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{
- struct msg_msg *msg;
+ struct msg_msg *msg, *found = NULL;
long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) {
@@ -848,6 +846,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
+ found = msg;
} else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count)
return msg;
@@ -857,7 +856,7 @@ static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
}
}
- return ERR_PTR(-EAGAIN);
+ return found ?: ERR_PTR(-EAGAIN);
}
long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg,
diff --git a/kernel/Makefile b/kernel/Makefile
index 470839d1a30..35ef1185e35 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -2,7 +2,7 @@
# Makefile for the linux kernel.
#
-obj-y = fork.o exec_domain.o panic.o printk.o \
+obj-y = fork.o exec_domain.o panic.o \
cpu.o exit.o itimer.o time.o softirq.o resource.o \
sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
@@ -24,6 +24,7 @@ endif
obj-y += sched/
obj-y += power/
+obj-y += printk/
obj-y += cpu/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 789ec4683db..e0aeb32415f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -81,7 +81,7 @@
*/
#ifdef CONFIG_PROVE_RCU
DEFINE_MUTEX(cgroup_mutex);
-EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for task_subsys_state_check() */
+EXPORT_SYMBOL_GPL(cgroup_mutex); /* only for lockdep */
#else
static DEFINE_MUTEX(cgroup_mutex);
#endif
@@ -117,6 +117,7 @@ struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
+ struct cgroup_subsys_state *css;
/* file xattrs */
struct simple_xattrs xattrs;
@@ -159,9 +160,9 @@ struct css_id {
*/
struct cgroup_event {
/*
- * Cgroup which the event belongs to.
+ * css which the event belongs to.
*/
- struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
/*
* Control file which the event associated.
*/
@@ -215,10 +216,33 @@ static u64 cgroup_serial_nr_next = 1;
*/
static int need_forkexit_callback __read_mostly;
-static void cgroup_offline_fn(struct work_struct *work);
+static struct cftype cgroup_base_files[];
+
+static void cgroup_destroy_css_killed(struct cgroup *cgrp);
static int cgroup_destroy_locked(struct cgroup *cgrp);
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype cfts[], bool is_add);
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add);
+
+/**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+ * @cgrp: the cgroup of interest
+ * @ss: the subsystem of interest (%NULL returns the dummy_css)
+ *
+ * Return @cgrp's css (cgroup_subsys_state) associated with @ss. This
+ * function must be called either under cgroup_mutex or rcu_read_lock() and
+ * the caller is responsible for pinning the returned css if it wants to
+ * keep accessing it outside the said locks. This function may return
+ * %NULL if @cgrp doesn't have @subsys_id enabled.
+ */
+static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp,
+ struct cgroup_subsys *ss)
+{
+ if (ss)
+ return rcu_dereference_check(cgrp->subsys[ss->subsys_id],
+ lockdep_is_held(&cgroup_mutex));
+ else
+ return &cgrp->dummy_css;
+}
/* convenient tests for these bits */
static inline bool cgroup_is_dead(const struct cgroup *cgrp)
@@ -365,9 +389,11 @@ static struct cgrp_cset_link init_cgrp_cset_link;
static int cgroup_init_idr(struct cgroup_subsys *ss,
struct cgroup_subsys_state *css);
-/* css_set_lock protects the list of css_set objects, and the
- * chain of tasks off each css_set. Nests outside task->alloc_lock
- * due to cgroup_iter_start() */
+/*
+ * css_set_lock protects the list of css_set objects, and the chain of
+ * tasks off each css_set. Nests outside task->alloc_lock due to
+ * css_task_iter_start().
+ */
static DEFINE_RWLOCK(css_set_lock);
static int css_set_count;
@@ -392,10 +418,12 @@ static unsigned long css_set_hash(struct cgroup_subsys_state *css[])
return key;
}
-/* We don't maintain the lists running through each css_set to its
- * task until after the first call to cgroup_iter_start(). This
- * reduces the fork()/exit() overhead for people who have cgroups
- * compiled into their kernel but not actually in use */
+/*
+ * We don't maintain the lists running through each css_set to its task
+ * until after the first call to css_task_iter_start(). This reduces the
+ * fork()/exit() overhead for people who have cgroups compiled into their
+ * kernel but not actually in use.
+ */
static int use_task_css_set_links __read_mostly;
static void __put_css_set(struct css_set *cset, int taskexit)
@@ -464,7 +492,7 @@ static inline void put_css_set_taskexit(struct css_set *cset)
* @new_cgrp: cgroup that's being entered by the task
* @template: desired set of css pointers in css_set (pre-calculated)
*
- * Returns true if "cg" matches "old_cg" except for the hierarchy
+ * Returns true if "cset" matches "old_cset" except for the hierarchy
* which "new_cgrp" belongs to, for which it should match "new_cgrp".
*/
static bool compare_css_sets(struct css_set *cset,
@@ -555,7 +583,7 @@ static struct css_set *find_existing_css_set(struct css_set *old_cset,
/* Subsystem is in this hierarchy. So we want
* the subsystem state from the new
* cgroup */
- template[i] = cgrp->subsys[i];
+ template[i] = cgroup_css(cgrp, ss);
} else {
/* Subsystem is not in this hierarchy, so we
* don't want to change the subsystem state */
@@ -803,8 +831,7 @@ static struct cgroup *task_cgroup_from_root(struct task_struct *task,
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry);
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask);
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
static const struct inode_operations cgroup_dir_inode_operations;
static const struct file_operations proc_cgroupstats_operations;
@@ -813,8 +840,7 @@ static struct backing_dev_info cgroup_backing_dev_info = {
.capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
};
-static int alloc_css_id(struct cgroup_subsys *ss,
- struct cgroup *parent, struct cgroup *child);
+static int alloc_css_id(struct cgroup_subsys_state *child_css);
static struct inode *cgroup_new_inode(umode_t mode, struct super_block *sb)
{
@@ -845,15 +871,8 @@ static struct cgroup_name *cgroup_alloc_name(struct dentry *dentry)
static void cgroup_free_fn(struct work_struct *work)
{
struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
- struct cgroup_subsys *ss;
mutex_lock(&cgroup_mutex);
- /*
- * Release the subsystem state objects.
- */
- for_each_root_subsys(cgrp->root, ss)
- ss->css_free(cgrp);
-
cgrp->root->number_of_cgroups--;
mutex_unlock(&cgroup_mutex);
@@ -864,8 +883,6 @@ static void cgroup_free_fn(struct work_struct *work)
*/
dput(cgrp->parent->dentry);
- ida_simple_remove(&cgrp->root->cgroup_ida, cgrp->id);
-
/*
* Drop the active superblock reference that we took when we
* created the cgroup. This will free cgrp->root, if we are
@@ -956,27 +973,22 @@ static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
}
/**
- * cgroup_clear_directory - selective removal of base and subsystem files
- * @dir: directory containing the files
- * @base_files: true if the base files should be removed
+ * cgroup_clear_dir - remove subsys files in a cgroup directory
+ * @cgrp: target cgroup
* @subsys_mask: mask of the subsystem ids whose files should be removed
*/
-static void cgroup_clear_directory(struct dentry *dir, bool base_files,
- unsigned long subsys_mask)
+static void cgroup_clear_dir(struct cgroup *cgrp, unsigned long subsys_mask)
{
- struct cgroup *cgrp = __d_cgrp(dir);
struct cgroup_subsys *ss;
+ int i;
- for_each_root_subsys(cgrp->root, ss) {
+ for_each_subsys(ss, i) {
struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
+
+ if (!test_bit(i, &subsys_mask))
continue;
list_for_each_entry(set, &ss->cftsets, node)
- cgroup_addrm_files(cgrp, NULL, set->cfts, false);
- }
- if (base_files) {
- while (!list_empty(&cgrp->files))
- cgroup_rm_file(cgrp, NULL);
+ cgroup_addrm_files(cgrp, set->cfts, false);
}
}
@@ -986,9 +998,6 @@ static void cgroup_clear_directory(struct dentry *dir, bool base_files,
static void cgroup_d_remove_dir(struct dentry *dentry)
{
struct dentry *parent;
- struct cgroupfs_root *root = dentry->d_sb->s_fs_info;
-
- cgroup_clear_directory(dentry, true, root->subsys_mask);
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
@@ -1009,79 +1018,84 @@ static int rebind_subsystems(struct cgroupfs_root *root,
{
struct cgroup *cgrp = &root->top_cgroup;
struct cgroup_subsys *ss;
- int i;
+ unsigned long pinned = 0;
+ int i, ret;
BUG_ON(!mutex_is_locked(&cgroup_mutex));
BUG_ON(!mutex_is_locked(&cgroup_root_mutex));
/* Check that any added subsystems are currently free */
for_each_subsys(ss, i) {
- unsigned long bit = 1UL << i;
-
- if (!(bit & added_mask))
+ if (!(added_mask & (1 << i)))
continue;
+ /* is the subsystem mounted elsewhere? */
if (ss->root != &cgroup_dummy_root) {
- /* Subsystem isn't free */
- return -EBUSY;
+ ret = -EBUSY;
+ goto out_put;
+ }
+
+ /* pin the module */
+ if (!try_module_get(ss->module)) {
+ ret = -ENOENT;
+ goto out_put;
}
+ pinned |= 1 << i;
}
- /* Currently we don't handle adding/removing subsystems when
- * any child cgroups exist. This is theoretically supportable
- * but involves complex error handling, so it's being left until
- * later */
- if (root->number_of_cgroups > 1)
- return -EBUSY;
+ /* subsys could be missing if unloaded between parsing and here */
+ if (added_mask != pinned) {
+ ret = -ENOENT;
+ goto out_put;
+ }
+
+ ret = cgroup_populate_dir(cgrp, added_mask);
+ if (ret)
+ goto out_put;
+
+ /*
+ * Nothing can fail from this point on. Remove files for the
+ * removed subsystems and rebind each subsystem.
+ */
+ cgroup_clear_dir(cgrp, removed_mask);
- /* Process each subsystem */
for_each_subsys(ss, i) {
unsigned long bit = 1UL << i;
if (bit & added_mask) {
/* We're binding this subsystem to this hierarchy */
- BUG_ON(cgrp->subsys[i]);
- BUG_ON(!cgroup_dummy_top->subsys[i]);
- BUG_ON(cgroup_dummy_top->subsys[i]->cgroup != cgroup_dummy_top);
+ BUG_ON(cgroup_css(cgrp, ss));
+ BUG_ON(!cgroup_css(cgroup_dummy_top, ss));
+ BUG_ON(cgroup_css(cgroup_dummy_top, ss)->cgroup != cgroup_dummy_top);
+
+ rcu_assign_pointer(cgrp->subsys[i],
+ cgroup_css(cgroup_dummy_top, ss));
+ cgroup_css(cgrp, ss)->cgroup = cgrp;
- cgrp->subsys[i] = cgroup_dummy_top->subsys[i];
- cgrp->subsys[i]->cgroup = cgrp;
list_move(&ss->sibling, &root->subsys_list);
ss->root = root;
if (ss->bind)
- ss->bind(cgrp);
+ ss->bind(cgroup_css(cgrp, ss));
/* refcount was already taken, and we're keeping it */
root->subsys_mask |= bit;
} else if (bit & removed_mask) {
/* We're removing this subsystem */
- BUG_ON(cgrp->subsys[i] != cgroup_dummy_top->subsys[i]);
- BUG_ON(cgrp->subsys[i]->cgroup != cgrp);
+ BUG_ON(cgroup_css(cgrp, ss) != cgroup_css(cgroup_dummy_top, ss));
+ BUG_ON(cgroup_css(cgrp, ss)->cgroup != cgrp);
if (ss->bind)
- ss->bind(cgroup_dummy_top);
- cgroup_dummy_top->subsys[i]->cgroup = cgroup_dummy_top;
- cgrp->subsys[i] = NULL;
+ ss->bind(cgroup_css(cgroup_dummy_top, ss));
+
+ cgroup_css(cgroup_dummy_top, ss)->cgroup = cgroup_dummy_top;
+ RCU_INIT_POINTER(cgrp->subsys[i], NULL);
+
cgroup_subsys[i]->root = &cgroup_dummy_root;
list_move(&ss->sibling, &cgroup_dummy_root.subsys_list);
/* subsystem is now free - drop reference on module */
module_put(ss->module);
root->subsys_mask &= ~bit;
- } else if (bit & root->subsys_mask) {
- /* Subsystem state should already exist */
- BUG_ON(!cgrp->subsys[i]);
- /*
- * a refcount was taken, but we already had one, so
- * drop the extra reference.
- */
- module_put(ss->module);
-#ifdef CONFIG_MODULE_UNLOAD
- BUG_ON(ss->module && !module_refcount(ss->module));
-#endif
- } else {
- /* Subsystem state shouldn't exist */
- BUG_ON(cgrp->subsys[i]);
}
}
@@ -1092,6 +1106,12 @@ static int rebind_subsystems(struct cgroupfs_root *root,
root->flags |= CGRP_ROOT_SUBSYS_BOUND;
return 0;
+
+out_put:
+ for_each_subsys(ss, i)
+ if (pinned & (1 << i))
+ module_put(ss->module);
+ return ret;
}
static int cgroup_show_options(struct seq_file *seq, struct dentry *dentry)
@@ -1142,7 +1162,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
char *token, *o = data;
bool all_ss = false, one_ss = false;
unsigned long mask = (unsigned long)-1;
- bool module_pin_failed = false;
struct cgroup_subsys *ss;
int i;
@@ -1285,52 +1304,9 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
if (!opts->subsys_mask && !opts->name)
return -EINVAL;
- /*
- * Grab references on all the modules we'll need, so the subsystems
- * don't dance around before rebind_subsystems attaches them. This may
- * take duplicate reference counts on a subsystem that's already used,
- * but rebind_subsystems handles this case.
- */
- for_each_subsys(ss, i) {
- if (!(opts->subsys_mask & (1UL << i)))
- continue;
- if (!try_module_get(cgroup_subsys[i]->module)) {
- module_pin_failed = true;
- break;
- }
- }
- if (module_pin_failed) {
- /*
- * oops, one of the modules was going away. this means that we
- * raced with a module_delete call, and to the user this is
- * essentially a "subsystem doesn't exist" case.
- */
- for (i--; i >= 0; i--) {
- /* drop refcounts only on the ones we took */
- unsigned long bit = 1UL << i;
-
- if (!(bit & opts->subsys_mask))
- continue;
- module_put(cgroup_subsys[i]->module);
- }
- return -ENOENT;
- }
-
return 0;
}
-static void drop_parsed_module_refcounts(unsigned long subsys_mask)
-{
- struct cgroup_subsys *ss;
- int i;
-
- mutex_lock(&cgroup_mutex);
- for_each_subsys(ss, i)
- if (subsys_mask & (1UL << i))
- module_put(cgroup_subsys[i]->module);
- mutex_unlock(&cgroup_mutex);
-}
-
static int cgroup_remount(struct super_block *sb, int *flags, char *data)
{
int ret = 0;
@@ -1370,22 +1346,15 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
goto out_unlock;
}
- /*
- * Clear out the files of subsystems that should be removed, do
- * this before rebind_subsystems, since rebind_subsystems may
- * change this hierarchy's subsys_list.
- */
- cgroup_clear_directory(cgrp->dentry, false, removed_mask);
-
- ret = rebind_subsystems(root, added_mask, removed_mask);
- if (ret) {
- /* rebind_subsystems failed, re-populate the removed files */
- cgroup_populate_dir(cgrp, false, removed_mask);
+ /* remounting is not allowed for populated hierarchies */
+ if (root->number_of_cgroups > 1) {
+ ret = -EBUSY;
goto out_unlock;
}
- /* re-populate subsystem files */
- cgroup_populate_dir(cgrp, false, added_mask);
+ ret = rebind_subsystems(root, added_mask, removed_mask);
+ if (ret)
+ goto out_unlock;
if (opts.release_agent)
strcpy(root->release_agent_path, opts.release_agent);
@@ -1395,8 +1364,6 @@ static int cgroup_remount(struct super_block *sb, int *flags, char *data)
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
- if (ret)
- drop_parsed_module_refcounts(opts.subsys_mask);
return ret;
}
@@ -1416,6 +1383,7 @@ static void init_cgroup_housekeeping(struct cgroup *cgrp)
INIT_LIST_HEAD(&cgrp->release_list);
INIT_LIST_HEAD(&cgrp->pidlists);
mutex_init(&cgrp->pidlist_mutex);
+ cgrp->dummy_css.cgroup = cgrp;
INIT_LIST_HEAD(&cgrp->event_list);
spin_lock_init(&cgrp->event_list_lock);
simple_xattrs_init(&cgrp->xattrs);
@@ -1431,6 +1399,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
cgrp->root = root;
RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
init_cgroup_housekeeping(cgrp);
+ idr_init(&root->cgroup_idr);
}
static int cgroup_init_root_id(struct cgroupfs_root *root, int start, int end)
@@ -1503,7 +1472,6 @@ static struct cgroupfs_root *cgroup_root_from_opts(struct cgroup_sb_opts *opts)
*/
root->subsys_mask = opts->subsys_mask;
root->flags = opts->flags;
- ida_init(&root->cgroup_ida);
if (opts->release_agent)
strcpy(root->release_agent_path, opts->release_agent);
if (opts->name)
@@ -1519,7 +1487,7 @@ static void cgroup_free_root(struct cgroupfs_root *root)
/* hierarhcy ID shoulid already have been released */
WARN_ON_ONCE(root->hierarchy_id);
- ida_destroy(&root->cgroup_ida);
+ idr_destroy(&root->cgroup_idr);
kfree(root);
}
}
@@ -1584,7 +1552,9 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
int ret = 0;
struct super_block *sb;
struct cgroupfs_root *new_root;
+ struct list_head tmp_links;
struct inode *inode;
+ const struct cred *cred;
/* First find the desired set of subsystems */
mutex_lock(&cgroup_mutex);
@@ -1600,7 +1570,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
new_root = cgroup_root_from_opts(&opts);
if (IS_ERR(new_root)) {
ret = PTR_ERR(new_root);
- goto drop_modules;
+ goto out_err;
}
opts.new_root = new_root;
@@ -1609,17 +1579,15 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
if (IS_ERR(sb)) {
ret = PTR_ERR(sb);
cgroup_free_root(opts.new_root);
- goto drop_modules;
+ goto out_err;
}
root = sb->s_fs_info;
BUG_ON(!root);
if (root == opts.new_root) {
/* We used the new root structure, so this is a new hierarchy */
- struct list_head tmp_links;
struct cgroup *root_cgrp = &root->top_cgroup;
struct cgroupfs_root *existing_root;
- const struct cred *cred;
int i;
struct css_set *cset;
@@ -1634,6 +1602,11 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
+ root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
+ 0, 1, GFP_KERNEL);
+ if (root_cgrp->id < 0)
+ goto unlock_drop;
+
/* Check for name clashes with existing mounts */
ret = -EBUSY;
if (strlen(root->name))
@@ -1657,26 +1630,37 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
if (ret)
goto unlock_drop;
+ sb->s_root->d_fsdata = root_cgrp;
+ root_cgrp->dentry = sb->s_root;
+
+ /*
+ * We're inside get_sb() and will call lookup_one_len() to
+ * create the root files, which doesn't work if SELinux is
+ * in use. The following cred dancing somehow works around
+ * it. See 2ce9738ba ("cgroupfs: use init_cred when
+ * populating new cgroupfs mount") for more details.
+ */
+ cred = override_creds(&init_cred);
+
+ ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+ if (ret)
+ goto rm_base_files;
+
ret = rebind_subsystems(root, root->subsys_mask, 0);
- if (ret == -EBUSY) {
- free_cgrp_cset_links(&tmp_links);
- goto unlock_drop;
- }
+ if (ret)
+ goto rm_base_files;
+
+ revert_creds(cred);
+
/*
* There must be no failure case after here, since rebinding
* takes care of subsystems' refcounts, which are explicitly
* dropped in the failure exit path.
*/
- /* EBUSY should be the only error here */
- BUG_ON(ret);
-
list_add(&root->root_list, &cgroup_roots);
cgroup_root_count++;
- sb->s_root->d_fsdata = root_cgrp;
- root->top_cgroup.dentry = sb->s_root;
-
/* Link the top cgroup in this hierarchy into all
* the css_set objects */
write_lock(&css_set_lock);
@@ -1689,9 +1673,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
BUG_ON(!list_empty(&root_cgrp->children));
BUG_ON(root->number_of_cgroups != 1);
- cred = override_creds(&init_cred);
- cgroup_populate_dir(root_cgrp, true, root->subsys_mask);
- revert_creds(cred);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
@@ -1711,15 +1692,16 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
pr_warning("cgroup: new mount options do not match the existing superblock, will be ignored\n");
}
}
-
- /* no subsys rebinding, so refcounts don't change */
- drop_parsed_module_refcounts(opts.subsys_mask);
}
kfree(opts.release_agent);
kfree(opts.name);
return dget(sb->s_root);
+ rm_base_files:
+ free_cgrp_cset_links(&tmp_links);
+ cgroup_addrm_files(&root->top_cgroup, cgroup_base_files, false);
+ revert_creds(cred);
unlock_drop:
cgroup_exit_root_id(root);
mutex_unlock(&cgroup_root_mutex);
@@ -1727,8 +1709,6 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_unlock(&inode->i_mutex);
drop_new_super:
deactivate_locked_super(sb);
- drop_modules:
- drop_parsed_module_refcounts(opts.subsys_mask);
out_err:
kfree(opts.release_agent);
kfree(opts.name);
@@ -1746,6 +1726,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
BUG_ON(root->number_of_cgroups != 1);
BUG_ON(!list_empty(&cgrp->children));
+ mutex_lock(&cgrp->dentry->d_inode->i_mutex);
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
@@ -1778,6 +1759,7 @@ static void cgroup_kill_sb(struct super_block *sb) {
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
+ mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
simple_xattrs_free(&cgrp->xattrs);
@@ -1889,7 +1871,7 @@ EXPORT_SYMBOL_GPL(task_cgroup_path);
struct task_and_cgroup {
struct task_struct *task;
struct cgroup *cgrp;
- struct css_set *cg;
+ struct css_set *cset;
};
struct cgroup_taskset {
@@ -1939,18 +1921,20 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/**
- * cgroup_taskset_cur_cgroup - return the matching cgroup for the current task
+ * cgroup_taskset_cur_css - return the matching css for the current task
* @tset: taskset of interest
+ * @subsys_id: the ID of the target subsystem
*
- * Return the cgroup for the current (last returned) task of @tset. This
- * function must be preceded by either cgroup_taskset_first() or
- * cgroup_taskset_next().
+ * Return the css for the current (last returned) task of @tset for
+ * subsystem specified by @subsys_id. This function must be preceded by
+ * either cgroup_taskset_first() or cgroup_taskset_next().
*/
-struct cgroup *cgroup_taskset_cur_cgroup(struct cgroup_taskset *tset)
+struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
+ int subsys_id)
{
- return tset->cur_cgrp;
+ return cgroup_css(tset->cur_cgrp, cgroup_subsys[subsys_id]);
}
-EXPORT_SYMBOL_GPL(cgroup_taskset_cur_cgroup);
+EXPORT_SYMBOL_GPL(cgroup_taskset_cur_css);
/**
* cgroup_taskset_size - return the number of tasks in taskset
@@ -2089,8 +2073,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
* step 1: check that we can legitimately attach to the cgroup.
*/
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss->can_attach) {
- retval = ss->can_attach(cgrp, &tset);
+ retval = ss->can_attach(css, &tset);
if (retval) {
failed_ss = ss;
goto out_cancel_attach;
@@ -2107,8 +2093,8 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
tc = flex_array_get(group, i);
old_cset = task_css_set(tc->task);
- tc->cg = find_css_set(old_cset, cgrp);
- if (!tc->cg) {
+ tc->cset = find_css_set(old_cset, cgrp);
+ if (!tc->cset) {
retval = -ENOMEM;
goto out_put_css_set_refs;
}
@@ -2121,7 +2107,7 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
*/
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- cgroup_task_migrate(tc->cgrp, tc->task, tc->cg);
+ cgroup_task_migrate(tc->cgrp, tc->task, tc->cset);
}
/* nothing is sensitive to fork() after this point. */
@@ -2129,8 +2115,10 @@ static int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk,
* step 4: do subsystem attach callbacks.
*/
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss->attach)
- ss->attach(cgrp, &tset);
+ ss->attach(css, &tset);
}
/*
@@ -2141,18 +2129,20 @@ out_put_css_set_refs:
if (retval) {
for (i = 0; i < group_size; i++) {
tc = flex_array_get(group, i);
- if (!tc->cg)
+ if (!tc->cset)
break;
- put_css_set(tc->cg);
+ put_css_set(tc->cset);
}
}
out_cancel_attach:
if (retval) {
for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
if (ss == failed_ss)
break;
if (ss->cancel_attach)
- ss->cancel_attach(cgrp, &tset);
+ ss->cancel_attach(css, &tset);
}
}
out_free_group_list:
@@ -2253,9 +2243,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
mutex_lock(&cgroup_mutex);
for_each_active_root(root) {
- struct cgroup *from_cg = task_cgroup_from_root(from, root);
+ struct cgroup *from_cgrp = task_cgroup_from_root(from, root);
- retval = cgroup_attach_task(from_cg, tsk, false);
+ retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
break;
}
@@ -2265,34 +2255,38 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
}
EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
-static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
+static int cgroup_tasks_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 pid)
{
- return attach_task_by_pid(cgrp, pid, false);
+ return attach_task_by_pid(css->cgroup, pid, false);
}
-static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid)
+static int cgroup_procs_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 tgid)
{
- return attach_task_by_pid(cgrp, tgid, true);
+ return attach_task_by_pid(css->cgroup, tgid, true);
}
-static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static int cgroup_release_agent_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buffer)
{
- BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
+ BUILD_BUG_ON(sizeof(css->cgroup->root->release_agent_path) < PATH_MAX);
if (strlen(buffer) >= PATH_MAX)
return -EINVAL;
- if (!cgroup_lock_live_group(cgrp))
+ if (!cgroup_lock_live_group(css->cgroup))
return -ENODEV;
mutex_lock(&cgroup_root_mutex);
- strcpy(cgrp->root->release_agent_path, buffer);
+ strcpy(css->cgroup->root->release_agent_path, buffer);
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
return 0;
}
-static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_release_agent_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
+ struct cgroup *cgrp = css->cgroup;
+
if (!cgroup_lock_live_group(cgrp))
return -ENODEV;
seq_puts(seq, cgrp->root->release_agent_path);
@@ -2301,20 +2295,20 @@ static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
return 0;
}
-static int cgroup_sane_behavior_show(struct cgroup *cgrp, struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_sane_behavior_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
- seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+ seq_printf(seq, "%d\n", cgroup_sane_behavior(css->cgroup));
return 0;
}
/* A buffer size big enough for numbers or short strings */
#define CGROUP_LOCAL_BUFFER_SIZE 64
-static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ const char __user *userbuf, size_t nbytes,
+ loff_t *unused_ppos)
{
char buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
@@ -2332,22 +2326,22 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
u64 val = simple_strtoull(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
- retval = cft->write_u64(cgrp, cft, val);
+ retval = cft->write_u64(css, cft, val);
} else {
s64 val = simple_strtoll(strstrip(buffer), &end, 0);
if (*end)
return -EINVAL;
- retval = cft->write_s64(cgrp, cft, val);
+ retval = cft->write_s64(css, cft, val);
}
if (!retval)
retval = nbytes;
return retval;
}
-static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- const char __user *userbuf,
- size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_string(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ const char __user *userbuf, size_t nbytes,
+ loff_t *unused_ppos)
{
char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
int retval = 0;
@@ -2370,7 +2364,7 @@ static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
}
buffer[nbytes] = 0; /* nul-terminate */
- retval = cft->write_string(cgrp, cft, strstrip(buffer));
+ retval = cft->write_string(css, cft, strstrip(buffer));
if (!retval)
retval = nbytes;
out:
@@ -2380,65 +2374,60 @@ out:
}
static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
+ struct cgroup_subsys_state *css = cfe->css;
- if (cgroup_is_dead(cgrp))
- return -ENODEV;
if (cft->write)
- return cft->write(cgrp, cft, file, buf, nbytes, ppos);
+ return cft->write(css, cft, file, buf, nbytes, ppos);
if (cft->write_u64 || cft->write_s64)
- return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_write_X64(css, cft, file, buf, nbytes, ppos);
if (cft->write_string)
- return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_write_string(css, cft, file, buf, nbytes, ppos);
if (cft->trigger) {
- int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+ int ret = cft->trigger(css, (unsigned int)cft->private);
return ret ? ret : nbytes;
}
return -EINVAL;
}
-static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- u64 val = cft->read_u64(cgrp, cft);
+ u64 val = cft->read_u64(css, cft);
int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
-static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
- struct file *file,
- char __user *buf, size_t nbytes,
- loff_t *ppos)
+static ssize_t cgroup_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
{
char tmp[CGROUP_LOCAL_BUFFER_SIZE];
- s64 val = cft->read_s64(cgrp, cft);
+ s64 val = cft->read_s64(css, cft);
int len = sprintf(tmp, "%lld\n", (long long) val);
return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
}
static ssize_t cgroup_file_read(struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+ size_t nbytes, loff_t *ppos)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
- struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
-
- if (cgroup_is_dead(cgrp))
- return -ENODEV;
+ struct cgroup_subsys_state *css = cfe->css;
if (cft->read)
- return cft->read(cgrp, cft, file, buf, nbytes, ppos);
+ return cft->read(css, cft, file, buf, nbytes, ppos);
if (cft->read_u64)
- return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_read_u64(css, cft, file, buf, nbytes, ppos);
if (cft->read_s64)
- return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
+ return cgroup_read_s64(css, cft, file, buf, nbytes, ppos);
return -EINVAL;
}
@@ -2447,11 +2436,6 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
* supports string->u64 maps, but can be extended in future.
*/
-struct cgroup_seqfile_state {
- struct cftype *cft;
- struct cgroup *cgroup;
-};
-
static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
{
struct seq_file *sf = cb->state;
@@ -2460,69 +2444,86 @@ static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
static int cgroup_seqfile_show(struct seq_file *m, void *arg)
{
- struct cgroup_seqfile_state *state = m->private;
- struct cftype *cft = state->cft;
+ struct cfent *cfe = m->private;
+ struct cftype *cft = cfe->type;
+ struct cgroup_subsys_state *css = cfe->css;
+
if (cft->read_map) {
struct cgroup_map_cb cb = {
.fill = cgroup_map_add,
.state = m,
};
- return cft->read_map(state->cgroup, cft, &cb);
+ return cft->read_map(css, cft, &cb);
}
- return cft->read_seq_string(state->cgroup, cft, m);
-}
-
-static int cgroup_seqfile_release(struct inode *inode, struct file *file)
-{
- struct seq_file *seq = file->private_data;
- kfree(seq->private);
- return single_release(inode, file);
+ return cft->read_seq_string(css, cft, m);
}
static const struct file_operations cgroup_seqfile_operations = {
.read = seq_read,
.write = cgroup_file_write,
.llseek = seq_lseek,
- .release = cgroup_seqfile_release,
+ .release = single_release,
};
static int cgroup_file_open(struct inode *inode, struct file *file)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
+ struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup *cgrp = __d_cgrp(cfe->dentry->d_parent);
+ struct cgroup_subsys_state *css;
int err;
- struct cftype *cft;
err = generic_file_open(inode, file);
if (err)
return err;
- cft = __d_cft(file->f_dentry);
- if (cft->read_map || cft->read_seq_string) {
- struct cgroup_seqfile_state *state;
+ /*
+ * If the file belongs to a subsystem, pin the css. Will be
+ * unpinned either on open failure or release. This ensures that
+ * @css stays alive for all file operations.
+ */
+ rcu_read_lock();
+ css = cgroup_css(cgrp, cft->ss);
+ if (cft->ss && !css_tryget(css))
+ css = NULL;
+ rcu_read_unlock();
- state = kzalloc(sizeof(*state), GFP_USER);
- if (!state)
- return -ENOMEM;
+ if (!css)
+ return -ENODEV;
+
+ /*
+ * @cfe->css is used by read/write/close to determine the
+ * associated css. @file->private_data would be a better place but
+ * that's already used by seqfile. Multiple accessors may use it
+ * simultaneously which is okay as the association never changes.
+ */
+ WARN_ON_ONCE(cfe->css && cfe->css != css);
+ cfe->css = css;
- state->cft = cft;
- state->cgroup = __d_cgrp(file->f_dentry->d_parent);
+ if (cft->read_map || cft->read_seq_string) {
file->f_op = &cgroup_seqfile_operations;
- err = single_open(file, cgroup_seqfile_show, state);
- if (err < 0)
- kfree(state);
- } else if (cft->open)
+ err = single_open(file, cgroup_seqfile_show, cfe);
+ } else if (cft->open) {
err = cft->open(inode, file);
- else
- err = 0;
+ }
+ if (css->ss && err)
+ css_put(css);
return err;
}
static int cgroup_file_release(struct inode *inode, struct file *file)
{
+ struct cfent *cfe = __d_cfe(file->f_dentry);
struct cftype *cft = __d_cft(file->f_dentry);
+ struct cgroup_subsys_state *css = cfe->css;
+ int ret = 0;
+
if (cft->release)
- return cft->release(inode, file);
- return 0;
+ ret = cft->release(inode, file);
+ if (css->ss)
+ css_put(css);
+ return ret;
}
/*
@@ -2736,8 +2737,7 @@ static umode_t cgroup_file_mode(const struct cftype *cft)
return mode;
}
-static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype *cft)
+static int cgroup_add_file(struct cgroup *cgrp, struct cftype *cft)
{
struct dentry *dir = cgrp->dentry;
struct cgroup *parent = __d_cgrp(dir);
@@ -2747,8 +2747,9 @@ static int cgroup_add_file(struct cgroup *cgrp, struct cgroup_subsys *subsys,
umode_t mode;
char name[MAX_CGROUP_TYPE_NAMELEN + MAX_CFTYPE_NAME + 2] = { 0 };
- if (subsys && !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
- strcpy(name, subsys->name);
+ if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
+ !(cgrp->root->flags & CGRP_ROOT_NOPREFIX)) {
+ strcpy(name, cft->ss->name);
strcat(name, ".");
}
strcat(name, cft->name);
@@ -2782,11 +2783,25 @@ out:
return error;
}
-static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
- struct cftype cfts[], bool is_add)
+/**
+ * cgroup_addrm_files - add or remove files to a cgroup directory
+ * @cgrp: the target cgroup
+ * @cfts: array of cftypes to be added
+ * @is_add: whether to add or remove
+ *
+ * Depending on @is_add, add or remove files defined by @cfts on @cgrp.
+ * For removals, this function never fails. If addition fails, this
+ * function doesn't remove files already added. The caller is responsible
+ * for cleaning up.
+ */
+static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add)
{
struct cftype *cft;
- int err, ret = 0;
+ int ret;
+
+ lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
+ lockdep_assert_held(&cgroup_mutex);
for (cft = cfts; cft->name[0] != '\0'; cft++) {
/* does cft->flags tell us to skip this file on @cgrp? */
@@ -2798,16 +2813,17 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cgroup_subsys *subsys,
continue;
if (is_add) {
- err = cgroup_add_file(cgrp, subsys, cft);
- if (err)
+ ret = cgroup_add_file(cgrp, cft);
+ if (ret) {
pr_warn("cgroup_addrm_files: failed to add %s, err=%d\n",
- cft->name, err);
- ret = err;
+ cft->name, ret);
+ return ret;
+ }
} else {
cgroup_rm_file(cgrp, cft);
}
}
- return ret;
+ return 0;
}
static void cgroup_cfts_prepare(void)
@@ -2816,28 +2832,30 @@ static void cgroup_cfts_prepare(void)
/*
* Thanks to the entanglement with vfs inode locking, we can't walk
* the existing cgroups under cgroup_mutex and create files.
- * Instead, we use cgroup_for_each_descendant_pre() and drop RCU
- * read lock before calling cgroup_addrm_files().
+ * Instead, we use css_for_each_descendant_pre() and drop RCU read
+ * lock before calling cgroup_addrm_files().
*/
mutex_lock(&cgroup_mutex);
}
-static void cgroup_cfts_commit(struct cgroup_subsys *ss,
- struct cftype *cfts, bool is_add)
+static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
__releases(&cgroup_mutex)
{
LIST_HEAD(pending);
- struct cgroup *cgrp, *root = &ss->root->top_cgroup;
+ struct cgroup_subsys *ss = cfts[0].ss;
+ struct cgroup *root = &ss->root->top_cgroup;
struct super_block *sb = ss->root->sb;
struct dentry *prev = NULL;
struct inode *inode;
+ struct cgroup_subsys_state *css;
u64 update_before;
+ int ret = 0;
/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
if (!cfts || ss->root == &cgroup_dummy_root ||
!atomic_inc_not_zero(&sb->s_active)) {
mutex_unlock(&cgroup_mutex);
- return;
+ return 0;
}
/*
@@ -2849,17 +2867,11 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
mutex_unlock(&cgroup_mutex);
- /* @root always needs to be updated */
- inode = root->dentry->d_inode;
- mutex_lock(&inode->i_mutex);
- mutex_lock(&cgroup_mutex);
- cgroup_addrm_files(root, ss, cfts, is_add);
- mutex_unlock(&cgroup_mutex);
- mutex_unlock(&inode->i_mutex);
-
/* add/rm files for all cgroups created before */
rcu_read_lock();
- cgroup_for_each_descendant_pre(cgrp, root) {
+ css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
+ struct cgroup *cgrp = css->cgroup;
+
if (cgroup_is_dead(cgrp))
continue;
@@ -2873,15 +2885,18 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
- cgroup_addrm_files(cgrp, ss, cfts, is_add);
+ ret = cgroup_addrm_files(cgrp, cfts, is_add);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
rcu_read_lock();
+ if (ret)
+ break;
}
rcu_read_unlock();
dput(prev);
deactivate_super(sb);
+ return ret;
}
/**
@@ -2901,49 +2916,56 @@ static void cgroup_cfts_commit(struct cgroup_subsys *ss,
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
{
struct cftype_set *set;
+ struct cftype *cft;
+ int ret;
set = kzalloc(sizeof(*set), GFP_KERNEL);
if (!set)
return -ENOMEM;
+ for (cft = cfts; cft->name[0] != '\0'; cft++)
+ cft->ss = ss;
+
cgroup_cfts_prepare();
set->cfts = cfts;
list_add_tail(&set->node, &ss->cftsets);
- cgroup_cfts_commit(ss, cfts, true);
-
- return 0;
+ ret = cgroup_cfts_commit(cfts, true);
+ if (ret)
+ cgroup_rm_cftypes(cfts);
+ return ret;
}
EXPORT_SYMBOL_GPL(cgroup_add_cftypes);
/**
* cgroup_rm_cftypes - remove an array of cftypes from a subsystem
- * @ss: target cgroup subsystem
* @cfts: zero-length name terminated array of cftypes
*
- * Unregister @cfts from @ss. Files described by @cfts are removed from
- * all existing cgroups to which @ss is attached and all future cgroups
- * won't have them either. This function can be called anytime whether @ss
- * is attached or not.
+ * Unregister @cfts. Files described by @cfts are removed from all
+ * existing cgroups and all future cgroups won't have them either. This
+ * function can be called anytime whether @cfts' subsys is attached or not.
*
* Returns 0 on successful unregistration, -ENOENT if @cfts is not
- * registered with @ss.
+ * registered.
*/
-int cgroup_rm_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+int cgroup_rm_cftypes(struct cftype *cfts)
{
struct cftype_set *set;
+ if (!cfts || !cfts[0].ss)
+ return -ENOENT;
+
cgroup_cfts_prepare();
- list_for_each_entry(set, &ss->cftsets, node) {
+ list_for_each_entry(set, &cfts[0].ss->cftsets, node) {
if (set->cfts == cfts) {
list_del(&set->node);
kfree(set);
- cgroup_cfts_commit(ss, cfts, false);
+ cgroup_cfts_commit(cfts, false);
return 0;
}
}
- cgroup_cfts_commit(ss, NULL, false);
+ cgroup_cfts_commit(NULL, false);
return -ENOENT;
}
@@ -2966,34 +2988,10 @@ int cgroup_task_count(const struct cgroup *cgrp)
}
/*
- * Advance a list_head iterator. The iterator should be positioned at
- * the start of a css_set
- */
-static void cgroup_advance_iter(struct cgroup *cgrp, struct cgroup_iter *it)
-{
- struct list_head *l = it->cset_link;
- struct cgrp_cset_link *link;
- struct css_set *cset;
-
- /* Advance to the next non-empty css_set */
- do {
- l = l->next;
- if (l == &cgrp->cset_links) {
- it->cset_link = NULL;
- return;
- }
- link = list_entry(l, struct cgrp_cset_link, cset_link);
- cset = link->cset;
- } while (list_empty(&cset->tasks));
- it->cset_link = l;
- it->task = cset->tasks.next;
-}
-
-/*
- * To reduce the fork() overhead for systems that are not actually
- * using their cgroups capability, we don't maintain the lists running
- * through each css_set to its tasks until we see the list actually
- * used - in other words after the first call to cgroup_iter_start().
+ * To reduce the fork() overhead for systems that are not actually using
+ * their cgroups capability, we don't maintain the lists running through
+ * each css_set to its tasks until we see the list actually used - in other
+ * words after the first call to css_task_iter_start().
*/
static void cgroup_enable_task_cg_lists(void)
{
@@ -3024,16 +3022,21 @@ static void cgroup_enable_task_cg_lists(void)
}
/**
- * cgroup_next_sibling - find the next sibling of a given cgroup
- * @pos: the current cgroup
+ * css_next_child - find the next child of a given css
+ * @pos_css: the current position (%NULL to initiate traversal)
+ * @parent_css: css whose children to walk
*
- * This function returns the next sibling of @pos and should be called
- * under RCU read lock. The only requirement is that @pos is accessible.
- * The next sibling is guaranteed to be returned regardless of @pos's
- * state.
+ * This function returns the next child of @parent_css and should be called
+ * under RCU read lock. The only requirement is that @parent_css and
+ * @pos_css are accessible. The next sibling is guaranteed to be returned
+ * regardless of their states.
*/
-struct cgroup *cgroup_next_sibling(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_next_child(struct cgroup_subsys_state *pos_css,
+ struct cgroup_subsys_state *parent_css)
{
+ struct cgroup *pos = pos_css ? pos_css->cgroup : NULL;
+ struct cgroup *cgrp = parent_css->cgroup;
struct cgroup *next;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3048,78 +3051,81 @@ struct cgroup *cgroup_next_sibling(struct cgroup *pos)
* safe to dereference from this RCU critical section. If
* ->sibling.next is inaccessible, cgroup_is_dead() is guaranteed
* to be visible as %true here.
+ *
+ * If @pos is dead, its next pointer can't be dereferenced;
+ * however, as each cgroup is given a monotonically increasing
+ * unique serial number and always appended to the sibling list,
+ * the next one can be found by walking the parent's children until
+ * we see a cgroup with higher serial number than @pos's. While
+ * this path can be slower, it's taken only when either the current
+ * cgroup is removed or iteration and removal race.
*/
- if (likely(!cgroup_is_dead(pos))) {
+ if (!pos) {
+ next = list_entry_rcu(cgrp->children.next, struct cgroup, sibling);
+ } else if (likely(!cgroup_is_dead(pos))) {
next = list_entry_rcu(pos->sibling.next, struct cgroup, sibling);
- if (&next->sibling != &pos->parent->children)
- return next;
- return NULL;
+ } else {
+ list_for_each_entry_rcu(next, &cgrp->children, sibling)
+ if (next->serial_nr > pos->serial_nr)
+ break;
}
- /*
- * Can't dereference the next pointer. Each cgroup is given a
- * monotonically increasing unique serial number and always
- * appended to the sibling list, so the next one can be found by
- * walking the parent's children until we see a cgroup with higher
- * serial number than @pos's.
- *
- * While this path can be slow, it's taken only when either the
- * current cgroup is removed or iteration and removal race.
- */
- list_for_each_entry_rcu(next, &pos->parent->children, sibling)
- if (next->serial_nr > pos->serial_nr)
- return next;
- return NULL;
+ if (&next->sibling == &cgrp->children)
+ return NULL;
+
+ return cgroup_css(next, parent_css->ss);
}
-EXPORT_SYMBOL_GPL(cgroup_next_sibling);
+EXPORT_SYMBOL_GPL(css_next_child);
/**
- * cgroup_next_descendant_pre - find the next descendant for pre-order walk
+ * css_next_descendant_pre - find the next descendant for pre-order walk
* @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
*
- * To be used by cgroup_for_each_descendant_pre(). Find the next
- * descendant to visit for pre-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_pre(). Find the next descendant
+ * to visit for pre-order traversal of @root's descendants. @root is
+ * included in the iteration and the first node to be visited.
*
* While this function requires RCU read locking, it doesn't require the
* whole traversal to be contained in a single RCU critical section. This
* function will return the correct next descendant as long as both @pos
- * and @cgroup are accessible and @pos is a descendant of @cgroup.
+ * and @root are accessible and @pos is a descendant of @root.
*/
-struct cgroup *cgroup_next_descendant_pre(struct cgroup *pos,
- struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_pre(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *root)
{
- struct cgroup *next;
+ struct cgroup_subsys_state *next;
WARN_ON_ONCE(!rcu_read_lock_held());
- /* if first iteration, pretend we just visited @cgroup */
+ /* if first iteration, visit @root */
if (!pos)
- pos = cgroup;
+ return root;
/* visit the first child if exists */
- next = list_first_or_null_rcu(&pos->children, struct cgroup, sibling);
+ next = css_next_child(NULL, pos);
if (next)
return next;
/* no child, visit my or the closest ancestor's next sibling */
- while (pos != cgroup) {
- next = cgroup_next_sibling(pos);
+ while (pos != root) {
+ next = css_next_child(pos, css_parent(pos));
if (next)
return next;
- pos = pos->parent;
+ pos = css_parent(pos);
}
return NULL;
}
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
+EXPORT_SYMBOL_GPL(css_next_descendant_pre);
/**
- * cgroup_rightmost_descendant - return the rightmost descendant of a cgroup
- * @pos: cgroup of interest
+ * css_rightmost_descendant - return the rightmost descendant of a css
+ * @pos: css of interest
*
- * Return the rightmost descendant of @pos. If there's no descendant,
- * @pos is returned. This can be used during pre-order traversal to skip
+ * Return the rightmost descendant of @pos. If there's no descendant, @pos
+ * is returned. This can be used during pre-order traversal to skip
* subtree of @pos.
*
* While this function requires RCU read locking, it doesn't require the
@@ -3127,9 +3133,10 @@ EXPORT_SYMBOL_GPL(cgroup_next_descendant_pre);
* function will return the correct rightmost descendant as long as @pos is
* accessible.
*/
-struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
+struct cgroup_subsys_state *
+css_rightmost_descendant(struct cgroup_subsys_state *pos)
{
- struct cgroup *last, *tmp;
+ struct cgroup_subsys_state *last, *tmp;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -3137,82 +3144,138 @@ struct cgroup *cgroup_rightmost_descendant(struct cgroup *pos)
last = pos;
/* ->prev isn't RCU safe, walk ->next till the end */
pos = NULL;
- list_for_each_entry_rcu(tmp, &last->children, sibling)
+ css_for_each_child(tmp, last)
pos = tmp;
} while (pos);
return last;
}
-EXPORT_SYMBOL_GPL(cgroup_rightmost_descendant);
+EXPORT_SYMBOL_GPL(css_rightmost_descendant);
-static struct cgroup *cgroup_leftmost_descendant(struct cgroup *pos)
+static struct cgroup_subsys_state *
+css_leftmost_descendant(struct cgroup_subsys_state *pos)
{
- struct cgroup *last;
+ struct cgroup_subsys_state *last;
do {
last = pos;
- pos = list_first_or_null_rcu(&pos->children, struct cgroup,
- sibling);
+ pos = css_next_child(NULL, pos);
} while (pos);
return last;
}
/**
- * cgroup_next_descendant_post - find the next descendant for post-order walk
+ * css_next_descendant_post - find the next descendant for post-order walk
* @pos: the current position (%NULL to initiate traversal)
- * @cgroup: cgroup whose descendants to walk
+ * @root: css whose descendants to walk
*
- * To be used by cgroup_for_each_descendant_post(). Find the next
- * descendant to visit for post-order traversal of @cgroup's descendants.
+ * To be used by css_for_each_descendant_post(). Find the next descendant
+ * to visit for post-order traversal of @root's descendants. @root is
+ * included in the iteration and the last node to be visited.
*
* While this function requires RCU read locking, it doesn't require the
* whole traversal to be contained in a single RCU critical section. This
* function will return the correct next descendant as long as both @pos
* and @cgroup are accessible and @pos is a descendant of @cgroup.
*/
-struct cgroup *cgroup_next_descendant_post(struct cgroup *pos,
- struct cgroup *cgroup)
+struct cgroup_subsys_state *
+css_next_descendant_post(struct cgroup_subsys_state *pos,
+ struct cgroup_subsys_state *root)
{
- struct cgroup *next;
+ struct cgroup_subsys_state *next;
WARN_ON_ONCE(!rcu_read_lock_held());
/* if first iteration, visit the leftmost descendant */
if (!pos) {
- next = cgroup_leftmost_descendant(cgroup);
- return next != cgroup ? next : NULL;
+ next = css_leftmost_descendant(root);
+ return next != root ? next : NULL;
}
+ /* if we visited @root, we're done */
+ if (pos == root)
+ return NULL;
+
/* if there's an unvisited sibling, visit its leftmost descendant */
- next = cgroup_next_sibling(pos);
+ next = css_next_child(pos, css_parent(pos));
if (next)
- return cgroup_leftmost_descendant(next);
+ return css_leftmost_descendant(next);
/* no sibling left, visit parent */
- next = pos->parent;
- return next != cgroup ? next : NULL;
+ return css_parent(pos);
+}
+EXPORT_SYMBOL_GPL(css_next_descendant_post);
+
+/**
+ * css_advance_task_iter - advance a task itererator to the next css_set
+ * @it: the iterator to advance
+ *
+ * Advance @it to the next css_set to walk.
+ */
+static void css_advance_task_iter(struct css_task_iter *it)
+{
+ struct list_head *l = it->cset_link;
+ struct cgrp_cset_link *link;
+ struct css_set *cset;
+
+ /* Advance to the next non-empty css_set */
+ do {
+ l = l->next;
+ if (l == &it->origin_css->cgroup->cset_links) {
+ it->cset_link = NULL;
+ return;
+ }
+ link = list_entry(l, struct cgrp_cset_link, cset_link);
+ cset = link->cset;
+ } while (list_empty(&cset->tasks));
+ it->cset_link = l;
+ it->task = cset->tasks.next;
}
-EXPORT_SYMBOL_GPL(cgroup_next_descendant_post);
-void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_start - initiate task iteration
+ * @css: the css to walk tasks of
+ * @it: the task iterator to use
+ *
+ * Initiate iteration through the tasks of @css. The caller can call
+ * css_task_iter_next() to walk through the tasks until the function
+ * returns NULL. On completion of iteration, css_task_iter_end() must be
+ * called.
+ *
+ * Note that this function acquires a lock which is released when the
+ * iteration finishes. The caller can't sleep while iteration is in
+ * progress.
+ */
+void css_task_iter_start(struct cgroup_subsys_state *css,
+ struct css_task_iter *it)
__acquires(css_set_lock)
{
/*
- * The first time anyone tries to iterate across a cgroup,
- * we need to enable the list linking each css_set to its
- * tasks, and fix up all existing tasks.
+ * The first time anyone tries to iterate across a css, we need to
+ * enable the list linking each css_set to its tasks, and fix up
+ * all existing tasks.
*/
if (!use_task_css_set_links)
cgroup_enable_task_cg_lists();
read_lock(&css_set_lock);
- it->cset_link = &cgrp->cset_links;
- cgroup_advance_iter(cgrp, it);
+
+ it->origin_css = css;
+ it->cset_link = &css->cgroup->cset_links;
+
+ css_advance_task_iter(it);
}
-struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
- struct cgroup_iter *it)
+/**
+ * css_task_iter_next - return the next task for the iterator
+ * @it: the task iterator being iterated
+ *
+ * The "next" function for task iteration. @it should have been
+ * initialized via css_task_iter_start(). Returns NULL when the iteration
+ * reaches the end.
+ */
+struct task_struct *css_task_iter_next(struct css_task_iter *it)
{
struct task_struct *res;
struct list_head *l = it->task;
@@ -3226,16 +3289,24 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp,
l = l->next;
link = list_entry(it->cset_link, struct cgrp_cset_link, cset_link);
if (l == &link->cset->tasks) {
- /* We reached the end of this task list - move on to
- * the next cg_cgroup_link */
- cgroup_advance_iter(cgrp, it);
+ /*
+ * We reached the end of this task list - move on to the
+ * next cgrp_cset_link.
+ */
+ css_advance_task_iter(it);
} else {
it->task = l;
}
return res;
}
-void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it)
+/**
+ * css_task_iter_end - finish task iteration
+ * @it: the task iterator to finish
+ *
+ * Finish task iteration started by css_task_iter_start().
+ */
+void css_task_iter_end(struct css_task_iter *it)
__releases(css_set_lock)
{
read_unlock(&css_set_lock);
@@ -3276,46 +3347,49 @@ static inline int started_after(void *p1, void *p2)
}
/**
- * cgroup_scan_tasks - iterate though all the tasks in a cgroup
- * @scan: struct cgroup_scanner containing arguments for the scan
+ * css_scan_tasks - iterate though all the tasks in a css
+ * @css: the css to iterate tasks of
+ * @test: optional test callback
+ * @process: process callback
+ * @data: data passed to @test and @process
+ * @heap: optional pre-allocated heap used for task iteration
+ *
+ * Iterate through all the tasks in @css, calling @test for each, and if it
+ * returns %true, call @process for it also.
*
- * Arguments include pointers to callback functions test_task() and
- * process_task().
- * Iterate through all the tasks in a cgroup, calling test_task() for each,
- * and if it returns true, call process_task() for it also.
- * The test_task pointer may be NULL, meaning always true (select all tasks).
- * Effectively duplicates cgroup_iter_{start,next,end}()
- * but does not lock css_set_lock for the call to process_task().
- * The struct cgroup_scanner may be embedded in any structure of the caller's
- * creation.
- * It is guaranteed that process_task() will act on every task that
- * is a member of the cgroup for the duration of this call. This
- * function may or may not call process_task() for tasks that exit
- * or move to a different cgroup during the call, or are forked or
- * move into the cgroup during the call.
+ * @test may be NULL, meaning always true (select all tasks), which
+ * effectively duplicates css_task_iter_{start,next,end}() but does not
+ * lock css_set_lock for the call to @process.
*
- * Note that test_task() may be called with locks held, and may in some
- * situations be called multiple times for the same task, so it should
- * be cheap.
- * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been
- * pre-allocated and will be used for heap operations (and its "gt" member will
- * be overwritten), else a temporary heap will be used (allocation of which
- * may cause this function to fail).
+ * It is guaranteed that @process will act on every task that is a member
+ * of @css for the duration of this call. This function may or may not
+ * call @process for tasks that exit or move to a different css during the
+ * call, or are forked or move into the css during the call.
+ *
+ * Note that @test may be called with locks held, and may in some
+ * situations be called multiple times for the same task, so it should be
+ * cheap.
+ *
+ * If @heap is non-NULL, a heap has been pre-allocated and will be used for
+ * heap operations (and its "gt" member will be overwritten), else a
+ * temporary heap will be used (allocation of which may cause this function
+ * to fail).
*/
-int cgroup_scan_tasks(struct cgroup_scanner *scan)
+int css_scan_tasks(struct cgroup_subsys_state *css,
+ bool (*test)(struct task_struct *, void *),
+ void (*process)(struct task_struct *, void *),
+ void *data, struct ptr_heap *heap)
{
int retval, i;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *p, *dropped;
/* Never dereference latest_task, since it's not refcounted */
struct task_struct *latest_task = NULL;
struct ptr_heap tmp_heap;
- struct ptr_heap *heap;
struct timespec latest_time = { 0, 0 };
- if (scan->heap) {
+ if (heap) {
/* The caller supplied our heap and pre-allocated its memory */
- heap = scan->heap;
heap->gt = &started_after;
} else {
/* We need to allocate our own heap memory */
@@ -3328,25 +3402,24 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
again:
/*
- * Scan tasks in the cgroup, using the scanner's "test_task" callback
- * to determine which are of interest, and using the scanner's
- * "process_task" callback to process any of them that need an update.
- * Since we don't want to hold any locks during the task updates,
- * gather tasks to be processed in a heap structure.
- * The heap is sorted by descending task start time.
- * If the statically-sized heap fills up, we overflow tasks that
- * started later, and in future iterations only consider tasks that
- * started after the latest task in the previous pass. This
+ * Scan tasks in the css, using the @test callback to determine
+ * which are of interest, and invoking @process callback on the
+ * ones which need an update. Since we don't want to hold any
+ * locks during the task updates, gather tasks to be processed in a
+ * heap structure. The heap is sorted by descending task start
+ * time. If the statically-sized heap fills up, we overflow tasks
+ * that started later, and in future iterations only consider tasks
+ * that started after the latest task in the previous pass. This
* guarantees forward progress and that we don't miss any tasks.
*/
heap->size = 0;
- cgroup_iter_start(scan->cg, &it);
- while ((p = cgroup_iter_next(scan->cg, &it))) {
+ css_task_iter_start(css, &it);
+ while ((p = css_task_iter_next(&it))) {
/*
* Only affect tasks that qualify per the caller's callback,
* if he provided one
*/
- if (scan->test_task && !scan->test_task(p, scan))
+ if (test && !test(p, data))
continue;
/*
* Only process tasks that started after the last task
@@ -3374,7 +3447,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
* the heap and wasn't inserted
*/
}
- cgroup_iter_end(scan->cg, &it);
+ css_task_iter_end(&it);
if (heap->size) {
for (i = 0; i < heap->size; i++) {
@@ -3384,7 +3457,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
latest_task = q;
}
/* Process the task per the caller's callback */
- scan->process_task(q, scan);
+ process(q, data);
put_task_struct(q);
}
/*
@@ -3401,10 +3474,9 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
return 0;
}
-static void cgroup_transfer_one_task(struct task_struct *task,
- struct cgroup_scanner *scan)
+static void cgroup_transfer_one_task(struct task_struct *task, void *data)
{
- struct cgroup *new_cgroup = scan->data;
+ struct cgroup *new_cgroup = data;
mutex_lock(&cgroup_mutex);
cgroup_attach_task(new_cgroup, task, false);
@@ -3418,15 +3490,8 @@ static void cgroup_transfer_one_task(struct task_struct *task,
*/
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
{
- struct cgroup_scanner scan;
-
- scan.cg = from;
- scan.test_task = NULL; /* select all tasks in cgroup */
- scan.process_task = cgroup_transfer_one_task;
- scan.heap = NULL;
- scan.data = to;
-
- return cgroup_scan_tasks(&scan);
+ return css_scan_tasks(&from->dummy_css, NULL, cgroup_transfer_one_task,
+ to, NULL);
}
/*
@@ -3468,7 +3533,7 @@ struct cgroup_pidlist {
/* pointer to the cgroup we belong to, for list removal purposes */
struct cgroup *owner;
/* protects the other fields */
- struct rw_semaphore mutex;
+ struct rw_semaphore rwsem;
};
/*
@@ -3541,7 +3606,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
struct pid_namespace *ns = task_active_pid_ns(current);
/*
- * We can't drop the pidlist_mutex before taking the l->mutex in case
+ * We can't drop the pidlist_mutex before taking the l->rwsem in case
* the last ref-holder is trying to remove l from the list at the same
* time. Holding the pidlist_mutex precludes somebody taking whichever
* list we find out from under us - compare release_pid_array().
@@ -3550,7 +3615,7 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
list_for_each_entry(l, &cgrp->pidlists, links) {
if (l->key.type == type && l->key.ns == ns) {
/* make sure l doesn't vanish out from under us */
- down_write(&l->mutex);
+ down_write(&l->rwsem);
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
@@ -3561,8 +3626,8 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp,
mutex_unlock(&cgrp->pidlist_mutex);
return l;
}
- init_rwsem(&l->mutex);
- down_write(&l->mutex);
+ init_rwsem(&l->rwsem);
+ down_write(&l->rwsem);
l->key.type = type;
l->key.ns = get_pid_ns(ns);
l->owner = cgrp;
@@ -3580,7 +3645,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
pid_t *array;
int length;
int pid, n = 0; /* used for populating the array */
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *tsk;
struct cgroup_pidlist *l;
@@ -3595,8 +3660,8 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
if (!array)
return -ENOMEM;
/* now, populate the array */
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ css_task_iter_start(&cgrp->dummy_css, &it);
+ while ((tsk = css_task_iter_next(&it))) {
if (unlikely(n == length))
break;
/* get tgid or pid for procs or tasks file respectively */
@@ -3607,7 +3672,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
if (pid > 0) /* make sure to only use valid results */
array[n++] = pid;
}
- cgroup_iter_end(cgrp, &it);
+ css_task_iter_end(&it);
length = n;
/* now sort & (if procs) strip out duplicates */
sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -3623,7 +3688,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
l->list = array;
l->length = length;
l->use_count++;
- up_write(&l->mutex);
+ up_write(&l->rwsem);
*lp = l;
return 0;
}
@@ -3641,7 +3706,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
{
int ret = -EINVAL;
struct cgroup *cgrp;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *tsk;
/*
@@ -3655,8 +3720,8 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
ret = 0;
cgrp = dentry->d_fsdata;
- cgroup_iter_start(cgrp, &it);
- while ((tsk = cgroup_iter_next(cgrp, &it))) {
+ css_task_iter_start(&cgrp->dummy_css, &it);
+ while ((tsk = css_task_iter_next(&it))) {
switch (tsk->state) {
case TASK_RUNNING:
stats->nr_running++;
@@ -3676,7 +3741,7 @@ int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry)
break;
}
}
- cgroup_iter_end(cgrp, &it);
+ css_task_iter_end(&it);
err:
return ret;
@@ -3701,7 +3766,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
int index = 0, pid = *pos;
int *iter;
- down_read(&l->mutex);
+ down_read(&l->rwsem);
if (pid) {
int end = l->length;
@@ -3728,7 +3793,7 @@ static void *cgroup_pidlist_start(struct seq_file *s, loff_t *pos)
static void cgroup_pidlist_stop(struct seq_file *s, void *v)
{
struct cgroup_pidlist *l = s->private;
- up_read(&l->mutex);
+ up_read(&l->rwsem);
}
static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
@@ -3774,7 +3839,7 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
* pidlist_mutex, we have to take pidlist_mutex first.
*/
mutex_lock(&l->owner->pidlist_mutex);
- down_write(&l->mutex);
+ down_write(&l->rwsem);
BUG_ON(!l->use_count);
if (!--l->use_count) {
/* we're the last user if refcount is 0; remove and free */
@@ -3782,12 +3847,12 @@ static void cgroup_release_pid_array(struct cgroup_pidlist *l)
mutex_unlock(&l->owner->pidlist_mutex);
pidlist_free(l->list);
put_pid_ns(l->key.ns);
- up_write(&l->mutex);
+ up_write(&l->rwsem);
kfree(l);
return;
}
mutex_unlock(&l->owner->pidlist_mutex);
- up_write(&l->mutex);
+ up_write(&l->rwsem);
}
static int cgroup_pidlist_release(struct inode *inode, struct file *file)
@@ -3851,21 +3916,20 @@ static int cgroup_procs_open(struct inode *unused, struct file *file)
return cgroup_pidlist_open(file, CGROUP_FILE_PROCS);
}
-static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft)
+static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return notify_on_release(cgrp);
+ return notify_on_release(css->cgroup);
}
-static int cgroup_write_notify_on_release(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
+static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- clear_bit(CGRP_RELEASABLE, &cgrp->flags);
+ clear_bit(CGRP_RELEASABLE, &css->cgroup->flags);
if (val)
- set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
else
- clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
+ clear_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags);
return 0;
}
@@ -3895,18 +3959,18 @@ static void cgroup_event_remove(struct work_struct *work)
{
struct cgroup_event *event = container_of(work, struct cgroup_event,
remove);
- struct cgroup *cgrp = event->cgrp;
+ struct cgroup_subsys_state *css = event->css;
remove_wait_queue(event->wqh, &event->wait);
- event->cft->unregister_event(cgrp, event->cft, event->eventfd);
+ event->cft->unregister_event(css, event->cft, event->eventfd);
/* Notify userspace the event is going away. */
eventfd_signal(event->eventfd, 1);
eventfd_ctx_put(event->eventfd);
kfree(event);
- cgroup_dput(cgrp);
+ css_put(css);
}
/*
@@ -3919,7 +3983,7 @@ static int cgroup_event_wake(wait_queue_t *wait, unsigned mode,
{
struct cgroup_event *event = container_of(wait,
struct cgroup_event, wait);
- struct cgroup *cgrp = event->cgrp;
+ struct cgroup *cgrp = event->css->cgroup;
unsigned long flags = (unsigned long)key;
if (flags & POLLHUP) {
@@ -3963,14 +4027,15 @@ static void cgroup_event_ptable_queue_proc(struct file *file,
* Input must be in format '<event_fd> <control_fd> <args>'.
* Interpretation of args is defined by control file implementation.
*/
-static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css,
+ struct cftype *cft, const char *buffer)
{
- struct cgroup_event *event = NULL;
- struct cgroup *cgrp_cfile;
+ struct cgroup *cgrp = dummy_css->cgroup;
+ struct cgroup_event *event;
+ struct cgroup_subsys_state *cfile_css;
unsigned int efd, cfd;
- struct file *efile = NULL;
- struct file *cfile = NULL;
+ struct file *efile;
+ struct file *cfile;
char *endp;
int ret;
@@ -3987,7 +4052,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return -ENOMEM;
- event->cgrp = cgrp;
+
INIT_LIST_HEAD(&event->list);
init_poll_funcptr(&event->pt, cgroup_event_ptable_queue_proc);
init_waitqueue_func_entry(&event->wait, cgroup_event_wake);
@@ -3996,62 +4061,68 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
efile = eventfd_fget(efd);
if (IS_ERR(efile)) {
ret = PTR_ERR(efile);
- goto fail;
+ goto out_kfree;
}
event->eventfd = eventfd_ctx_fileget(efile);
if (IS_ERR(event->eventfd)) {
ret = PTR_ERR(event->eventfd);
- goto fail;
+ goto out_put_efile;
}
cfile = fget(cfd);
if (!cfile) {
ret = -EBADF;
- goto fail;
+ goto out_put_eventfd;
}
/* the process need read permission on control file */
/* AV: shouldn't we check that it's been opened for read instead? */
ret = inode_permission(file_inode(cfile), MAY_READ);
if (ret < 0)
- goto fail;
+ goto out_put_cfile;
event->cft = __file_cft(cfile);
if (IS_ERR(event->cft)) {
ret = PTR_ERR(event->cft);
- goto fail;
+ goto out_put_cfile;
+ }
+
+ if (!event->cft->ss) {
+ ret = -EBADF;
+ goto out_put_cfile;
}
/*
- * The file to be monitored must be in the same cgroup as
- * cgroup.event_control is.
+ * Determine the css of @cfile, verify it belongs to the same
+ * cgroup as cgroup.event_control, and associate @event with it.
+ * Remaining events are automatically removed on cgroup destruction
+ * but the removal is asynchronous, so take an extra ref.
*/
- cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent);
- if (cgrp_cfile != cgrp) {
- ret = -EINVAL;
- goto fail;
- }
+ rcu_read_lock();
+
+ ret = -EINVAL;
+ event->css = cgroup_css(cgrp, event->cft->ss);
+ cfile_css = css_from_dir(cfile->f_dentry->d_parent, event->cft->ss);
+ if (event->css && event->css == cfile_css && css_tryget(event->css))
+ ret = 0;
+
+ rcu_read_unlock();
+ if (ret)
+ goto out_put_cfile;
if (!event->cft->register_event || !event->cft->unregister_event) {
ret = -EINVAL;
- goto fail;
+ goto out_put_css;
}
- ret = event->cft->register_event(cgrp, event->cft,
+ ret = event->cft->register_event(event->css, event->cft,
event->eventfd, buffer);
if (ret)
- goto fail;
+ goto out_put_css;
efile->f_op->poll(efile, &event->pt);
- /*
- * Events should be removed after rmdir of cgroup directory, but before
- * destroying subsystem state objects. Let's take reference to cgroup
- * directory dentry to do that.
- */
- dget(cgrp->dentry);
-
spin_lock(&cgrp->event_list_lock);
list_add(&event->list, &cgrp->event_list);
spin_unlock(&cgrp->event_list_lock);
@@ -4061,35 +4132,33 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft,
return 0;
-fail:
- if (cfile)
- fput(cfile);
-
- if (event && event->eventfd && !IS_ERR(event->eventfd))
- eventfd_ctx_put(event->eventfd);
-
- if (!IS_ERR_OR_NULL(efile))
- fput(efile);
-
+out_put_css:
+ css_put(event->css);
+out_put_cfile:
+ fput(cfile);
+out_put_eventfd:
+ eventfd_ctx_put(event->eventfd);
+out_put_efile:
+ fput(efile);
+out_kfree:
kfree(event);
return ret;
}
-static u64 cgroup_clone_children_read(struct cgroup *cgrp,
- struct cftype *cft)
+static u64 cgroup_clone_children_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ return test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
}
-static int cgroup_clone_children_write(struct cgroup *cgrp,
- struct cftype *cft,
- u64 val)
+static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
if (val)
- set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ set_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
else
- clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
+ clear_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags);
return 0;
}
@@ -4148,36 +4217,34 @@ static struct cftype cgroup_base_files[] = {
};
/**
- * cgroup_populate_dir - selectively creation of files in a directory
+ * cgroup_populate_dir - create subsys files in a cgroup directory
* @cgrp: target cgroup
- * @base_files: true if the base files should be added
* @subsys_mask: mask of the subsystem ids whose files should be added
+ *
+ * On failure, no file is added.
*/
-static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
- unsigned long subsys_mask)
+static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask)
{
- int err;
struct cgroup_subsys *ss;
-
- if (base_files) {
- err = cgroup_addrm_files(cgrp, NULL, cgroup_base_files, true);
- if (err < 0)
- return err;
- }
+ int i, ret = 0;
/* process cftsets of each subsystem */
- for_each_root_subsys(cgrp->root, ss) {
+ for_each_subsys(ss, i) {
struct cftype_set *set;
- if (!test_bit(ss->subsys_id, &subsys_mask))
+
+ if (!test_bit(i, &subsys_mask))
continue;
- list_for_each_entry(set, &ss->cftsets, node)
- cgroup_addrm_files(cgrp, ss, set->cfts, true);
+ list_for_each_entry(set, &ss->cftsets, node) {
+ ret = cgroup_addrm_files(cgrp, set->cfts, true);
+ if (ret < 0)
+ goto err;
+ }
}
/* This cgroup is ready now */
for_each_root_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
struct css_id *id = rcu_dereference_protected(css->id, true);
/*
@@ -4190,14 +4257,57 @@ static int cgroup_populate_dir(struct cgroup *cgrp, bool base_files,
}
return 0;
+err:
+ cgroup_clear_dir(cgrp, subsys_mask);
+ return ret;
+}
+
+/*
+ * css destruction is four-stage process.
+ *
+ * 1. Destruction starts. Killing of the percpu_ref is initiated.
+ * Implemented in kill_css().
+ *
+ * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
+ * and thus css_tryget() is guaranteed to fail, the css can be offlined
+ * by invoking offline_css(). After offlining, the base ref is put.
+ * Implemented in css_killed_work_fn().
+ *
+ * 3. When the percpu_ref reaches zero, the only possible remaining
+ * accessors are inside RCU read sections. css_release() schedules the
+ * RCU callback.
+ *
+ * 4. After the grace period, the css can be freed. Implemented in
+ * css_free_work_fn().
+ *
+ * It is actually hairier because both step 2 and 4 require process context
+ * and thus involve punting to css->destroy_work adding two additional
+ * steps to the already complex sequence.
+ */
+static void css_free_work_fn(struct work_struct *work)
+{
+ struct cgroup_subsys_state *css =
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+ struct cgroup *cgrp = css->cgroup;
+
+ if (css->parent)
+ css_put(css->parent);
+
+ css->ss->css_free(css);
+ cgroup_dput(cgrp);
}
-static void css_dput_fn(struct work_struct *work)
+static void css_free_rcu_fn(struct rcu_head *rcu_head)
{
struct cgroup_subsys_state *css =
- container_of(work, struct cgroup_subsys_state, dput_work);
+ container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
- cgroup_dput(css->cgroup);
+ /*
+ * css holds an extra ref to @cgrp->dentry which is put on the last
+ * css_put(). dput() requires process context which we don't have.
+ */
+ INIT_WORK(&css->destroy_work, css_free_work_fn);
+ schedule_work(&css->destroy_work);
}
static void css_release(struct percpu_ref *ref)
@@ -4205,49 +4315,47 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- schedule_work(&css->dput_work);
+ call_rcu(&css->rcu_head, css_free_rcu_fn);
}
-static void init_cgroup_css(struct cgroup_subsys_state *css,
- struct cgroup_subsys *ss,
- struct cgroup *cgrp)
+static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
+ struct cgroup *cgrp)
{
css->cgroup = cgrp;
+ css->ss = ss;
css->flags = 0;
css->id = NULL;
- if (cgrp == cgroup_dummy_top)
+
+ if (cgrp->parent)
+ css->parent = cgroup_css(cgrp->parent, ss);
+ else
css->flags |= CSS_ROOT;
- BUG_ON(cgrp->subsys[ss->subsys_id]);
- cgrp->subsys[ss->subsys_id] = css;
- /*
- * css holds an extra ref to @cgrp->dentry which is put on the last
- * css_put(). dput() requires process context, which css_put() may
- * be called without. @css->dput_work will be used to invoke
- * dput() asynchronously from css_put().
- */
- INIT_WORK(&css->dput_work, css_dput_fn);
+ BUG_ON(cgroup_css(cgrp, ss));
}
-/* invoke ->post_create() on a new CSS and mark it online if successful */
-static int online_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
+/* invoke ->css_online() on a new CSS and mark it online if successful */
+static int online_css(struct cgroup_subsys_state *css)
{
+ struct cgroup_subsys *ss = css->ss;
int ret = 0;
lockdep_assert_held(&cgroup_mutex);
if (ss->css_online)
- ret = ss->css_online(cgrp);
- if (!ret)
- cgrp->subsys[ss->subsys_id]->flags |= CSS_ONLINE;
+ ret = ss->css_online(css);
+ if (!ret) {
+ css->flags |= CSS_ONLINE;
+ css->cgroup->nr_css++;
+ rcu_assign_pointer(css->cgroup->subsys[ss->subsys_id], css);
+ }
return ret;
}
-/* if the CSS is online, invoke ->pre_destory() on it and mark it offline */
-static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
- __releases(&cgroup_mutex) __acquires(&cgroup_mutex)
+/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
+static void offline_css(struct cgroup_subsys_state *css)
{
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys *ss = css->ss;
lockdep_assert_held(&cgroup_mutex);
@@ -4255,9 +4363,11 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
return;
if (ss->css_offline)
- ss->css_offline(cgrp);
+ ss->css_offline(css);
- cgrp->subsys[ss->subsys_id]->flags &= ~CSS_ONLINE;
+ css->flags &= ~CSS_ONLINE;
+ css->cgroup->nr_css--;
+ RCU_INIT_POINTER(css->cgroup->subsys[ss->subsys_id], css);
}
/*
@@ -4271,6 +4381,7 @@ static void offline_css(struct cgroup_subsys *ss, struct cgroup *cgrp)
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
umode_t mode)
{
+ struct cgroup_subsys_state *css_ar[CGROUP_SUBSYS_COUNT] = { };
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
@@ -4288,7 +4399,11 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
goto err_free_cgrp;
rcu_assign_pointer(cgrp->name, name);
- cgrp->id = ida_simple_get(&root->cgroup_ida, 1, 0, GFP_KERNEL);
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
if (cgrp->id < 0)
goto err_free_name;
@@ -4317,6 +4432,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
cgrp->dentry = dentry;
cgrp->parent = parent;
+ cgrp->dummy_css.parent = &parent->dummy_css;
cgrp->root = parent->root;
if (notify_on_release(parent))
@@ -4328,20 +4444,21 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
for_each_root_subsys(root, ss) {
struct cgroup_subsys_state *css;
- css = ss->css_alloc(cgrp);
+ css = ss->css_alloc(cgroup_css(parent, ss));
if (IS_ERR(css)) {
err = PTR_ERR(css);
goto err_free_all;
}
+ css_ar[ss->subsys_id] = css;
err = percpu_ref_init(&css->refcnt, css_release);
if (err)
goto err_free_all;
- init_cgroup_css(css, ss, cgrp);
+ init_css(css, ss, cgrp);
if (ss->use_id) {
- err = alloc_css_id(ss, parent, cgrp);
+ err = alloc_css_id(css);
if (err)
goto err_free_all;
}
@@ -4363,16 +4480,22 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
- /* each css holds a ref to the cgroup's dentry */
- for_each_root_subsys(root, ss)
+ /* each css holds a ref to the cgroup's dentry and the parent css */
+ for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
dget(dentry);
+ css_get(css->parent);
+ }
/* hold a ref to the parent's dentry */
dget(parent->dentry);
/* creation succeeded, notify subsystems */
for_each_root_subsys(root, ss) {
- err = online_css(ss, cgrp);
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+ err = online_css(css);
if (err)
goto err_destroy;
@@ -4386,7 +4509,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
}
}
- err = cgroup_populate_dir(cgrp, true, root->subsys_mask);
+ idr_replace(&root->cgroup_idr, cgrp, cgrp->id);
+
+ err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+ if (err)
+ goto err_destroy;
+
+ err = cgroup_populate_dir(cgrp, root->subsys_mask);
if (err)
goto err_destroy;
@@ -4397,18 +4526,18 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
err_free_all:
for_each_root_subsys(root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
if (css) {
percpu_ref_cancel_init(&css->refcnt);
- ss->css_free(cgrp);
+ ss->css_free(css);
}
}
mutex_unlock(&cgroup_mutex);
/* Release the reference count that we took on the superblock */
deactivate_super(sb);
err_free_id:
- ida_simple_remove(&root->cgroup_ida, cgrp->id);
+ idr_remove(&root->cgroup_idr, cgrp->id);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
@@ -4430,22 +4559,84 @@ static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}
-static void cgroup_css_killed(struct cgroup *cgrp)
+/*
+ * This is called when the refcnt of a css is confirmed to be killed.
+ * css_tryget() is now guaranteed to fail.
+ */
+static void css_killed_work_fn(struct work_struct *work)
{
- if (!atomic_dec_and_test(&cgrp->css_kill_cnt))
- return;
+ struct cgroup_subsys_state *css =
+ container_of(work, struct cgroup_subsys_state, destroy_work);
+ struct cgroup *cgrp = css->cgroup;
- /* percpu ref's of all css's are killed, kick off the next step */
- INIT_WORK(&cgrp->destroy_work, cgroup_offline_fn);
- schedule_work(&cgrp->destroy_work);
+ mutex_lock(&cgroup_mutex);
+
+ /*
+ * css_tryget() is guaranteed to fail now. Tell subsystems to
+ * initate destruction.
+ */
+ offline_css(css);
+
+ /*
+ * If @cgrp is marked dead, it's waiting for refs of all css's to
+ * be disabled before proceeding to the second phase of cgroup
+ * destruction. If we are the last one, kick it off.
+ */
+ if (!cgrp->nr_css && cgroup_is_dead(cgrp))
+ cgroup_destroy_css_killed(cgrp);
+
+ mutex_unlock(&cgroup_mutex);
+
+ /*
+ * Put the css refs from kill_css(). Each css holds an extra
+ * reference to the cgroup's dentry and cgroup removal proceeds
+ * regardless of css refs. On the last put of each css, whenever
+ * that may be, the extra dentry ref is put so that dentry
+ * destruction happens only after all css's are released.
+ */
+ css_put(css);
}
-static void css_ref_killed_fn(struct percpu_ref *ref)
+/* css kill confirmation processing requires process context, bounce */
+static void css_killed_ref_fn(struct percpu_ref *ref)
{
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- cgroup_css_killed(css->cgroup);
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ schedule_work(&css->destroy_work);
+}
+
+/**
+ * kill_css - destroy a css
+ * @css: css to destroy
+ *
+ * This function initiates destruction of @css by removing cgroup interface
+ * files and putting its base reference. ->css_offline() will be invoked
+ * asynchronously once css_tryget() is guaranteed to fail and when the
+ * reference count reaches zero, @css will be released.
+ */
+static void kill_css(struct cgroup_subsys_state *css)
+{
+ cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+
+ /*
+ * Killing would put the base ref, but we need to keep it alive
+ * until after ->css_offline().
+ */
+ css_get(css);
+
+ /*
+ * cgroup core guarantees that, by the time ->css_offline() is
+ * invoked, no new css reference will be given out via
+ * css_tryget(). We can't simply call percpu_ref_kill() and
+ * proceed to offlining css's because percpu_ref_kill() doesn't
+ * guarantee that the ref is seen as killed on all CPUs on return.
+ *
+ * Use percpu_ref_kill_and_confirm() to get notifications as each
+ * css is confirmed to be seen as killed on all CPUs.
+ */
+ percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
}
/**
@@ -4478,6 +4669,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
struct dentry *d = cgrp->dentry;
struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss;
+ struct cgroup *child;
bool empty;
lockdep_assert_held(&d->d_inode->i_mutex);
@@ -4488,47 +4680,41 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* @cgrp from being removed while __put_css_set() is in progress.
*/
read_lock(&css_set_lock);
- empty = list_empty(&cgrp->cset_links) && list_empty(&cgrp->children);
+ empty = list_empty(&cgrp->cset_links);
read_unlock(&css_set_lock);
if (!empty)
return -EBUSY;
/*
- * Block new css_tryget() by killing css refcnts. cgroup core
- * guarantees that, by the time ->css_offline() is invoked, no new
- * css reference will be given out via css_tryget(). We can't
- * simply call percpu_ref_kill() and proceed to offlining css's
- * because percpu_ref_kill() doesn't guarantee that the ref is seen
- * as killed on all CPUs on return.
- *
- * Use percpu_ref_kill_and_confirm() to get notifications as each
- * css is confirmed to be seen as killed on all CPUs. The
- * notification callback keeps track of the number of css's to be
- * killed and schedules cgroup_offline_fn() to perform the rest of
- * destruction once the percpu refs of all css's are confirmed to
- * be killed.
+ * Make sure there's no live children. We can't test ->children
+ * emptiness as dead children linger on it while being destroyed;
+ * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
*/
- atomic_set(&cgrp->css_kill_cnt, 1);
- for_each_root_subsys(cgrp->root, ss) {
- struct cgroup_subsys_state *css = cgrp->subsys[ss->subsys_id];
-
- /*
- * Killing would put the base ref, but we need to keep it
- * alive until after ->css_offline.
- */
- percpu_ref_get(&css->refcnt);
-
- atomic_inc(&cgrp->css_kill_cnt);
- percpu_ref_kill_and_confirm(&css->refcnt, css_ref_killed_fn);
+ empty = true;
+ rcu_read_lock();
+ list_for_each_entry_rcu(child, &cgrp->children, sibling) {
+ empty = cgroup_is_dead(child);
+ if (!empty)
+ break;
}
- cgroup_css_killed(cgrp);
+ rcu_read_unlock();
+ if (!empty)
+ return -EBUSY;
+
+ /*
+ * Initiate massacre of all css's. cgroup_destroy_css_killed()
+ * will be invoked to perform the rest of destruction once the
+ * percpu refs of all css's are confirmed to be killed.
+ */
+ for_each_root_subsys(cgrp->root, ss)
+ kill_css(cgroup_css(cgrp, ss));
/*
* Mark @cgrp dead. This prevents further task migration and child
* creation by disabling cgroup_lock_live_group(). Note that
- * CGRP_DEAD assertion is depended upon by cgroup_next_sibling() to
+ * CGRP_DEAD assertion is depended upon by css_next_child() to
* resume iteration after dropping RCU read lock. See
- * cgroup_next_sibling() for details.
+ * css_next_child() for details.
*/
set_bit(CGRP_DEAD, &cgrp->flags);
@@ -4539,9 +4725,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
raw_spin_unlock(&release_list_lock);
/*
- * Remove @cgrp directory. The removal puts the base ref but we
- * aren't quite done with @cgrp yet, so hold onto it.
+ * If @cgrp has css's attached, the second stage of cgroup
+ * destruction is kicked off from css_killed_work_fn() after the
+ * refs of all attached css's are killed. If @cgrp doesn't have
+ * any css, we kick it off here.
*/
+ if (!cgrp->nr_css)
+ cgroup_destroy_css_killed(cgrp);
+
+ /*
+ * Clear the base files and remove @cgrp directory. The removal
+ * puts the base ref but we aren't quite done with @cgrp yet, so
+ * hold onto it.
+ */
+ cgroup_addrm_files(cgrp, cgroup_base_files, false);
dget(d);
cgroup_d_remove_dir(d);
@@ -4561,50 +4758,36 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
};
/**
- * cgroup_offline_fn - the second step of cgroup destruction
+ * cgroup_destroy_css_killed - the second step of cgroup destruction
* @work: cgroup->destroy_free_work
*
* This function is invoked from a work item for a cgroup which is being
- * destroyed after the percpu refcnts of all css's are guaranteed to be
- * seen as killed on all CPUs, and performs the rest of destruction. This
- * is the second step of destruction described in the comment above
- * cgroup_destroy_locked().
+ * destroyed after all css's are offlined and performs the rest of
+ * destruction. This is the second step of destruction described in the
+ * comment above cgroup_destroy_locked().
*/
-static void cgroup_offline_fn(struct work_struct *work)
+static void cgroup_destroy_css_killed(struct cgroup *cgrp)
{
- struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
struct cgroup *parent = cgrp->parent;
struct dentry *d = cgrp->dentry;
- struct cgroup_subsys *ss;
- mutex_lock(&cgroup_mutex);
+ lockdep_assert_held(&cgroup_mutex);
- /*
- * css_tryget() is guaranteed to fail now. Tell subsystems to
- * initate destruction.
- */
- for_each_root_subsys(cgrp->root, ss)
- offline_css(ss, cgrp);
+ /* delete this cgroup from parent->children */
+ list_del_rcu(&cgrp->sibling);
/*
- * Put the css refs from cgroup_destroy_locked(). Each css holds
- * an extra reference to the cgroup's dentry and cgroup removal
- * proceeds regardless of css refs. On the last put of each css,
- * whenever that may be, the extra dentry ref is put so that dentry
- * destruction happens only after all css's are released.
+ * We should remove the cgroup object from idr before its grace
+ * period starts, so we won't be looking up a cgroup while the
+ * cgroup is being freed.
*/
- for_each_root_subsys(cgrp->root, ss)
- css_put(cgrp->subsys[ss->subsys_id]);
-
- /* delete this cgroup from parent->children */
- list_del_rcu(&cgrp->sibling);
+ idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
check_for_release(parent);
-
- mutex_unlock(&cgroup_mutex);
}
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
@@ -4627,6 +4810,11 @@ static void __init_or_module cgroup_init_cftsets(struct cgroup_subsys *ss)
* deregistration.
*/
if (ss->base_cftypes) {
+ struct cftype *cft;
+
+ for (cft = ss->base_cftypes; cft->name[0] != '\0'; cft++)
+ cft->ss = ss;
+
ss->base_cftset.cfts = ss->base_cftypes;
list_add_tail(&ss->base_cftset.node, &ss->cftsets);
}
@@ -4646,10 +4834,10 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
/* Create the top cgroup state for this subsystem */
list_add(&ss->sibling, &cgroup_dummy_root.subsys_list);
ss->root = &cgroup_dummy_root;
- css = ss->css_alloc(cgroup_dummy_top);
+ css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
/* We don't handle early failures gracefully */
BUG_ON(IS_ERR(css));
- init_cgroup_css(css, ss, cgroup_dummy_top);
+ init_css(css, ss, cgroup_dummy_top);
/* Update the init_css_set to contain a subsys
* pointer to this state - since the subsystem is
@@ -4664,7 +4852,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
* need to invoke fork callbacks here. */
BUG_ON(!list_empty(&init_task.tasks));
- BUG_ON(online_css(ss, cgroup_dummy_top));
+ BUG_ON(online_css(css));
mutex_unlock(&cgroup_mutex);
@@ -4725,7 +4913,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
* struct, so this can happen first (i.e. before the dummy root
* attachment).
*/
- css = ss->css_alloc(cgroup_dummy_top);
+ css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
if (IS_ERR(css)) {
/* failure case - need to deassign the cgroup_subsys[] slot. */
cgroup_subsys[ss->subsys_id] = NULL;
@@ -4737,8 +4925,8 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
ss->root = &cgroup_dummy_root;
/* our new subsystem will be attached to the dummy hierarchy. */
- init_cgroup_css(css, ss, cgroup_dummy_top);
- /* init_idr must be after init_cgroup_css because it sets css->id. */
+ init_css(css, ss, cgroup_dummy_top);
+ /* init_idr must be after init_css() because it sets css->id. */
if (ss->use_id) {
ret = cgroup_init_idr(ss, css);
if (ret)
@@ -4768,7 +4956,7 @@ int __init_or_module cgroup_load_subsys(struct cgroup_subsys *ss)
}
write_unlock(&css_set_lock);
- ret = online_css(ss, cgroup_dummy_top);
+ ret = online_css(css);
if (ret)
goto err_unload;
@@ -4800,14 +4988,14 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
/*
* we shouldn't be called if the subsystem is in use, and the use of
- * try_module_get in parse_cgroupfs_options should ensure that it
+ * try_module_get() in rebind_subsystems() should ensure that it
* doesn't start being used while we're killing it off.
*/
BUG_ON(ss->root != &cgroup_dummy_root);
mutex_lock(&cgroup_mutex);
- offline_css(ss, cgroup_dummy_top);
+ offline_css(cgroup_css(cgroup_dummy_top, ss));
if (ss->use_id)
idr_destroy(&ss->idr);
@@ -4841,8 +5029,8 @@ void cgroup_unload_subsys(struct cgroup_subsys *ss)
* the cgrp->subsys pointer to find their state. note that this
* also takes care of freeing the css_id.
*/
- ss->css_free(cgroup_dummy_top);
- cgroup_dummy_top->subsys[ss->subsys_id] = NULL;
+ ss->css_free(cgroup_css(cgroup_dummy_top, ss));
+ RCU_INIT_POINTER(cgroup_dummy_top->subsys[ss->subsys_id], NULL);
mutex_unlock(&cgroup_mutex);
}
@@ -4924,6 +5112,10 @@ int __init cgroup_init(void)
BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
+ err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
+ 0, 1, GFP_KERNEL);
+ BUG_ON(err < 0);
+
mutex_unlock(&cgroup_root_mutex);
mutex_unlock(&cgroup_mutex);
@@ -5080,7 +5272,7 @@ void cgroup_fork(struct task_struct *child)
* Adds the task to the list running through its css_set if necessary and
* call the subsystem fork() callbacks. Has to be after the task is
* visible on the task list in case we race with the first call to
- * cgroup_iter_start() - to guarantee that the new task ends up on its
+ * cgroup_task_iter_start() - to guarantee that the new task ends up on its
* list.
*/
void cgroup_post_fork(struct task_struct *child)
@@ -5193,10 +5385,10 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
*/
for_each_builtin_subsys(ss, i) {
if (ss->exit) {
- struct cgroup *old_cgrp = cset->subsys[i]->cgroup;
- struct cgroup *cgrp = task_cgroup(tsk, i);
+ struct cgroup_subsys_state *old_css = cset->subsys[i];
+ struct cgroup_subsys_state *css = task_css(tsk, i);
- ss->exit(cgrp, old_cgrp, tsk);
+ ss->exit(css, old_css, tsk);
}
}
}
@@ -5455,20 +5647,16 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
return 0;
}
-static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent,
- struct cgroup *child)
+static int alloc_css_id(struct cgroup_subsys_state *child_css)
{
- int subsys_id, i, depth = 0;
- struct cgroup_subsys_state *parent_css, *child_css;
+ struct cgroup_subsys_state *parent_css = css_parent(child_css);
struct css_id *child_id, *parent_id;
+ int i, depth;
- subsys_id = ss->subsys_id;
- parent_css = parent->subsys[subsys_id];
- child_css = child->subsys[subsys_id];
parent_id = rcu_dereference_protected(parent_css->id, true);
depth = parent_id->depth + 1;
- child_id = get_new_cssid(ss, depth);
+ child_id = get_new_cssid(child_css->ss, depth);
if (IS_ERR(child_id))
return PTR_ERR(child_id);
@@ -5506,31 +5694,56 @@ struct cgroup_subsys_state *css_lookup(struct cgroup_subsys *ss, int id)
}
EXPORT_SYMBOL_GPL(css_lookup);
-/*
- * get corresponding css from file open on cgroupfs directory
+/**
+ * css_from_dir - get corresponding css from the dentry of a cgroup dir
+ * @dentry: directory dentry of interest
+ * @ss: subsystem of interest
+ *
+ * Must be called under RCU read lock. The caller is responsible for
+ * pinning the returned css if it needs to be accessed outside the RCU
+ * critical section.
*/
-struct cgroup_subsys_state *cgroup_css_from_dir(struct file *f, int id)
+struct cgroup_subsys_state *css_from_dir(struct dentry *dentry,
+ struct cgroup_subsys *ss)
{
struct cgroup *cgrp;
- struct inode *inode;
- struct cgroup_subsys_state *css;
- inode = file_inode(f);
- /* check in cgroup filesystem dir */
- if (inode->i_op != &cgroup_dir_inode_operations)
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ /* is @dentry a cgroup dir? */
+ if (!dentry->d_inode ||
+ dentry->d_inode->i_op != &cgroup_dir_inode_operations)
return ERR_PTR(-EBADF);
- if (id < 0 || id >= CGROUP_SUBSYS_COUNT)
- return ERR_PTR(-EINVAL);
+ cgrp = __d_cgrp(dentry);
+ return cgroup_css(cgrp, ss) ?: ERR_PTR(-ENOENT);
+}
- /* get cgroup */
- cgrp = __d_cgrp(f->f_dentry);
- css = cgrp->subsys[id];
- return css ? css : ERR_PTR(-ENOENT);
+/**
+ * css_from_id - lookup css by id
+ * @id: the cgroup id
+ * @ss: cgroup subsys to be looked into
+ *
+ * Returns the css if there's valid one with @id, otherwise returns NULL.
+ * Should be called under rcu_read_lock().
+ */
+struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
+{
+ struct cgroup *cgrp;
+
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&cgroup_mutex),
+ "css_from_id() needs proper protection");
+
+ cgrp = idr_find(&ss->root->cgroup_idr, id);
+ if (cgrp)
+ return cgroup_css(cgrp, ss);
+ return NULL;
}
#ifdef CONFIG_CGROUP_DEBUG
-static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+debug_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
@@ -5540,22 +5753,24 @@ static struct cgroup_subsys_state *debug_css_alloc(struct cgroup *cgrp)
return css;
}
-static void debug_css_free(struct cgroup *cgrp)
+static void debug_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgrp->subsys[debug_subsys_id]);
+ kfree(css);
}
-static u64 debug_taskcount_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return cgroup_task_count(cgrp);
+ return cgroup_task_count(css->cgroup);
}
-static u64 current_css_set_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 current_css_set_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
return (u64)(unsigned long)current->cgroups;
}
-static u64 current_css_set_refcount_read(struct cgroup *cgrp,
+static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
u64 count;
@@ -5566,7 +5781,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cgrp,
return count;
}
-static int current_css_set_cg_links_read(struct cgroup *cgrp,
+static int current_css_set_cg_links_read(struct cgroup_subsys_state *css,
struct cftype *cft,
struct seq_file *seq)
{
@@ -5593,14 +5808,13 @@ static int current_css_set_cg_links_read(struct cgroup *cgrp,
}
#define MAX_TASKS_SHOWN_PER_CSS 25
-static int cgroup_css_links_read(struct cgroup *cgrp,
- struct cftype *cft,
- struct seq_file *seq)
+static int cgroup_css_links_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *seq)
{
struct cgrp_cset_link *link;
read_lock(&css_set_lock);
- list_for_each_entry(link, &cgrp->cset_links, cset_link) {
+ list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
int count = 0;
@@ -5619,9 +5833,9 @@ static int cgroup_css_links_read(struct cgroup *cgrp,
return 0;
}
-static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+ return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
}
static struct cftype debug_files[] = {
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 75dda1ea502..f0ff64d0eba 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -45,25 +45,19 @@ struct freezer {
spinlock_t lock;
};
-static inline struct freezer *cgroup_freezer(struct cgroup *cgroup)
+static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgroup, freezer_subsys_id),
- struct freezer, css);
+ return css ? container_of(css, struct freezer, css) : NULL;
}
static inline struct freezer *task_freezer(struct task_struct *task)
{
- return container_of(task_subsys_state(task, freezer_subsys_id),
- struct freezer, css);
+ return css_freezer(task_css(task, freezer_subsys_id));
}
static struct freezer *parent_freezer(struct freezer *freezer)
{
- struct cgroup *pcg = freezer->css.cgroup->parent;
-
- if (pcg)
- return cgroup_freezer(pcg);
- return NULL;
+ return css_freezer(css_parent(&freezer->css));
}
bool cgroup_freezing(struct task_struct *task)
@@ -92,7 +86,8 @@ static const char *freezer_state_strs(unsigned int state)
struct cgroup_subsys freezer_subsys;
-static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+freezer_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct freezer *freezer;
@@ -105,22 +100,22 @@ static struct cgroup_subsys_state *freezer_css_alloc(struct cgroup *cgroup)
}
/**
- * freezer_css_online - commit creation of a freezer cgroup
- * @cgroup: cgroup being created
+ * freezer_css_online - commit creation of a freezer css
+ * @css: css being created
*
- * We're committing to creation of @cgroup. Mark it online and inherit
+ * We're committing to creation of @css. Mark it online and inherit
* parent's freezing state while holding both parent's and our
* freezer->lock.
*/
-static int freezer_css_online(struct cgroup *cgroup)
+static int freezer_css_online(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
struct freezer *parent = parent_freezer(freezer);
/*
* The following double locking and freezing state inheritance
* guarantee that @cgroup can never escape ancestors' freezing
- * states. See cgroup_for_each_descendant_pre() for details.
+ * states. See css_for_each_descendant_pre() for details.
*/
if (parent)
spin_lock_irq(&parent->lock);
@@ -141,15 +136,15 @@ static int freezer_css_online(struct cgroup *cgroup)
}
/**
- * freezer_css_offline - initiate destruction of @cgroup
- * @cgroup: cgroup being destroyed
+ * freezer_css_offline - initiate destruction of a freezer css
+ * @css: css being destroyed
*
- * @cgroup is going away. Mark it dead and decrement system_freezing_count
- * if it was holding one.
+ * @css is going away. Mark it dead and decrement system_freezing_count if
+ * it was holding one.
*/
-static void freezer_css_offline(struct cgroup *cgroup)
+static void freezer_css_offline(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
spin_lock_irq(&freezer->lock);
@@ -161,9 +156,9 @@ static void freezer_css_offline(struct cgroup *cgroup)
spin_unlock_irq(&freezer->lock);
}
-static void freezer_css_free(struct cgroup *cgroup)
+static void freezer_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgroup_freezer(cgroup));
+ kfree(css_freezer(css));
}
/*
@@ -175,25 +170,26 @@ static void freezer_css_free(struct cgroup *cgroup)
* @freezer->lock. freezer_attach() makes the new tasks conform to the
* current state and all following state changes can see the new tasks.
*/
-static void freezer_attach(struct cgroup *new_cgrp, struct cgroup_taskset *tset)
+static void freezer_attach(struct cgroup_subsys_state *new_css,
+ struct cgroup_taskset *tset)
{
- struct freezer *freezer = cgroup_freezer(new_cgrp);
+ struct freezer *freezer = css_freezer(new_css);
struct task_struct *task;
bool clear_frozen = false;
spin_lock_irq(&freezer->lock);
/*
- * Make the new tasks conform to the current state of @new_cgrp.
+ * Make the new tasks conform to the current state of @new_css.
* For simplicity, when migrating any task to a FROZEN cgroup, we
* revert it to FREEZING and let update_if_frozen() determine the
* correct state later.
*
- * Tasks in @tset are on @new_cgrp but may not conform to its
+ * Tasks in @tset are on @new_css but may not conform to its
* current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/
- cgroup_taskset_for_each(task, new_cgrp, tset) {
+ cgroup_taskset_for_each(task, new_css, tset) {
if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task);
} else {
@@ -231,7 +227,7 @@ static void freezer_fork(struct task_struct *task)
* The root cgroup is non-freezable, so we can skip the
* following check.
*/
- if (!freezer->css.cgroup->parent)
+ if (!parent_freezer(freezer))
goto out;
spin_lock_irq(&freezer->lock);
@@ -244,7 +240,7 @@ out:
/**
* update_if_frozen - update whether a cgroup finished freezing
- * @cgroup: cgroup of interest
+ * @css: css of interest
*
* Once FREEZING is initiated, transition to FROZEN is lazily updated by
* calling this function. If the current state is FREEZING but not FROZEN,
@@ -255,14 +251,14 @@ out:
* update_if_frozen() on all descendants prior to invoking this function.
*
* Task states and freezer state might disagree while tasks are being
- * migrated into or out of @cgroup, so we can't verify task states against
+ * migrated into or out of @css, so we can't verify task states against
* @freezer state here. See freezer_attach() for details.
*/
-static void update_if_frozen(struct cgroup *cgroup)
+static void update_if_frozen(struct cgroup_subsys_state *css)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
- struct cgroup *pos;
- struct cgroup_iter it;
+ struct freezer *freezer = css_freezer(css);
+ struct cgroup_subsys_state *pos;
+ struct css_task_iter it;
struct task_struct *task;
WARN_ON_ONCE(!rcu_read_lock_held());
@@ -274,8 +270,8 @@ static void update_if_frozen(struct cgroup *cgroup)
goto out_unlock;
/* are all (live) children frozen? */
- cgroup_for_each_child(pos, cgroup) {
- struct freezer *child = cgroup_freezer(pos);
+ css_for_each_child(pos, css) {
+ struct freezer *child = css_freezer(pos);
if ((child->state & CGROUP_FREEZER_ONLINE) &&
!(child->state & CGROUP_FROZEN))
@@ -283,9 +279,9 @@ static void update_if_frozen(struct cgroup *cgroup)
}
/* are all tasks frozen? */
- cgroup_iter_start(cgroup, &it);
+ css_task_iter_start(css, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
+ while ((task = css_task_iter_next(&it))) {
if (freezing(task)) {
/*
* freezer_should_skip() indicates that the task
@@ -300,52 +296,49 @@ static void update_if_frozen(struct cgroup *cgroup)
freezer->state |= CGROUP_FROZEN;
out_iter_end:
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
out_unlock:
spin_unlock_irq(&freezer->lock);
}
-static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *m)
{
- struct cgroup *pos;
+ struct cgroup_subsys_state *pos;
rcu_read_lock();
/* update states bottom-up */
- cgroup_for_each_descendant_post(pos, cgroup)
+ css_for_each_descendant_post(pos, css)
update_if_frozen(pos);
- update_if_frozen(cgroup);
rcu_read_unlock();
- seq_puts(m, freezer_state_strs(cgroup_freezer(cgroup)->state));
+ seq_puts(m, freezer_state_strs(css_freezer(css)->state));
seq_putc(m, '\n');
return 0;
}
static void freeze_cgroup(struct freezer *freezer)
{
- struct cgroup *cgroup = freezer->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it)))
+ css_task_iter_start(&freezer->css, &it);
+ while ((task = css_task_iter_next(&it)))
freeze_task(task);
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
static void unfreeze_cgroup(struct freezer *freezer)
{
- struct cgroup *cgroup = freezer->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it)))
+ css_task_iter_start(&freezer->css, &it);
+ while ((task = css_task_iter_next(&it)))
__thaw_task(task);
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
/**
@@ -395,12 +388,7 @@ static void freezer_apply_state(struct freezer *freezer, bool freeze,
*/
static void freezer_change_state(struct freezer *freezer, bool freeze)
{
- struct cgroup *pos;
-
- /* update @freezer */
- spin_lock_irq(&freezer->lock);
- freezer_apply_state(freezer, freeze, CGROUP_FREEZING_SELF);
- spin_unlock_irq(&freezer->lock);
+ struct cgroup_subsys_state *pos;
/*
* Update all its descendants in pre-order traversal. Each
@@ -408,24 +396,33 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
* CGROUP_FREEZING_PARENT.
*/
rcu_read_lock();
- cgroup_for_each_descendant_pre(pos, freezer->css.cgroup) {
- struct freezer *pos_f = cgroup_freezer(pos);
+ css_for_each_descendant_pre(pos, &freezer->css) {
+ struct freezer *pos_f = css_freezer(pos);
struct freezer *parent = parent_freezer(pos_f);
- /*
- * Our update to @parent->state is already visible which is
- * all we need. No need to lock @parent. For more info on
- * synchronization, see freezer_post_create().
- */
spin_lock_irq(&pos_f->lock);
- freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING,
- CGROUP_FREEZING_PARENT);
+
+ if (pos_f == freezer) {
+ freezer_apply_state(pos_f, freeze,
+ CGROUP_FREEZING_SELF);
+ } else {
+ /*
+ * Our update to @parent->state is already visible
+ * which is all we need. No need to lock @parent.
+ * For more info on synchronization, see
+ * freezer_post_create().
+ */
+ freezer_apply_state(pos_f,
+ parent->state & CGROUP_FREEZING,
+ CGROUP_FREEZING_PARENT);
+ }
+
spin_unlock_irq(&pos_f->lock);
}
rcu_read_unlock();
}
-static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
+static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer)
{
bool freeze;
@@ -437,20 +434,22 @@ static int freezer_write(struct cgroup *cgroup, struct cftype *cft,
else
return -EINVAL;
- freezer_change_state(cgroup_freezer(cgroup), freeze);
+ freezer_change_state(css_freezer(css), freeze);
return 0;
}
-static u64 freezer_self_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_self_freezing_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_SELF);
}
-static u64 freezer_parent_freezing_read(struct cgroup *cgroup, struct cftype *cft)
+static u64 freezer_parent_freezing_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct freezer *freezer = cgroup_freezer(cgroup);
+ struct freezer *freezer = css_freezer(css);
return (bool)(freezer->state & CGROUP_FREEZING_PARENT);
}
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 383f8231e43..247091bf058 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -20,22 +20,33 @@
#include <linux/hardirq.h>
#include <linux/export.h>
-DEFINE_PER_CPU(struct context_tracking, context_tracking) = {
-#ifdef CONFIG_CONTEXT_TRACKING_FORCE
- .active = true,
-#endif
-};
+#define CREATE_TRACE_POINTS
+#include <trace/events/context_tracking.h>
+
+struct static_key context_tracking_enabled = STATIC_KEY_INIT_FALSE;
+EXPORT_SYMBOL_GPL(context_tracking_enabled);
+
+DEFINE_PER_CPU(struct context_tracking, context_tracking);
+EXPORT_SYMBOL_GPL(context_tracking);
+
+void context_tracking_cpu_set(int cpu)
+{
+ if (!per_cpu(context_tracking.active, cpu)) {
+ per_cpu(context_tracking.active, cpu) = true;
+ static_key_slow_inc(&context_tracking_enabled);
+ }
+}
/**
- * user_enter - Inform the context tracking that the CPU is going to
- * enter userspace mode.
+ * context_tracking_user_enter - Inform the context tracking that the CPU is going to
+ * enter userspace mode.
*
* This function must be called right before we switch from the kernel
* to userspace, when it's guaranteed the remaining kernel instructions
* to execute won't use any RCU read side critical section because this
* function sets RCU in extended quiescent state.
*/
-void user_enter(void)
+void context_tracking_user_enter(void)
{
unsigned long flags;
@@ -54,17 +65,32 @@ void user_enter(void)
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
- if (__this_cpu_read(context_tracking.active) &&
- __this_cpu_read(context_tracking.state) != IN_USER) {
+ if ( __this_cpu_read(context_tracking.state) != IN_USER) {
+ if (__this_cpu_read(context_tracking.active)) {
+ trace_user_enter(0);
+ /*
+ * At this stage, only low level arch entry code remains and
+ * then we'll run in userspace. We can assume there won't be
+ * any RCU read-side critical section until the next call to
+ * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
+ * on the tick.
+ */
+ vtime_user_enter(current);
+ rcu_user_enter();
+ }
/*
- * At this stage, only low level arch entry code remains and
- * then we'll run in userspace. We can assume there won't be
- * any RCU read-side critical section until the next call to
- * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
- * on the tick.
+ * Even if context tracking is disabled on this CPU, because it's outside
+ * the full dynticks mask for example, we still have to keep track of the
+ * context transitions and states to prevent inconsistency on those of
+ * other CPUs.
+ * If a task triggers an exception in userspace, sleep on the exception
+ * handler and then migrate to another CPU, that new CPU must know where
+ * the exception returns by the time we call exception_exit().
+ * This information can only be provided by the previous CPU when it called
+ * exception_enter().
+ * OTOH we can spare the calls to vtime and RCU when context_tracking.active
+ * is false because we know that CPU is not tickless.
*/
- vtime_user_enter(current);
- rcu_user_enter();
__this_cpu_write(context_tracking.state, IN_USER);
}
local_irq_restore(flags);
@@ -87,10 +113,9 @@ void user_enter(void)
*/
void __sched notrace preempt_schedule_context(void)
{
- struct thread_info *ti = current_thread_info();
enum ctx_state prev_ctx;
- if (likely(ti->preempt_count || irqs_disabled()))
+ if (likely(!preemptible()))
return;
/*
@@ -112,8 +137,8 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
#endif /* CONFIG_PREEMPT */
/**
- * user_exit - Inform the context tracking that the CPU is
- * exiting userspace mode and entering the kernel.
+ * context_tracking_user_exit - Inform the context tracking that the CPU is
+ * exiting userspace mode and entering the kernel.
*
* This function must be called after we entered the kernel from userspace
* before any use of RCU read side critical section. This potentially include
@@ -122,7 +147,7 @@ EXPORT_SYMBOL_GPL(preempt_schedule_context);
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
-void user_exit(void)
+void context_tracking_user_exit(void)
{
unsigned long flags;
@@ -131,38 +156,22 @@ void user_exit(void)
local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == IN_USER) {
- /*
- * We are going to run code that may use RCU. Inform
- * RCU core about that (ie: we may need the tick again).
- */
- rcu_user_exit();
- vtime_user_exit(current);
+ if (__this_cpu_read(context_tracking.active)) {
+ /*
+ * We are going to run code that may use RCU. Inform
+ * RCU core about that (ie: we may need the tick again).
+ */
+ rcu_user_exit();
+ vtime_user_exit(current);
+ trace_user_exit(0);
+ }
__this_cpu_write(context_tracking.state, IN_KERNEL);
}
local_irq_restore(flags);
}
-void guest_enter(void)
-{
- if (vtime_accounting_enabled())
- vtime_guest_enter(current);
- else
- __guest_enter();
-}
-EXPORT_SYMBOL_GPL(guest_enter);
-
-void guest_exit(void)
-{
- if (vtime_accounting_enabled())
- vtime_guest_exit(current);
- else
- __guest_exit();
-}
-EXPORT_SYMBOL_GPL(guest_exit);
-
-
/**
- * context_tracking_task_switch - context switch the syscall callbacks
+ * __context_tracking_task_switch - context switch the syscall callbacks
* @prev: the task that is being switched out
* @next: the task that is being switched in
*
@@ -174,11 +183,19 @@ EXPORT_SYMBOL_GPL(guest_exit);
* migrate to some CPU that doesn't do the context tracking. As such the TIF
* flag may not be desired there.
*/
-void context_tracking_task_switch(struct task_struct *prev,
- struct task_struct *next)
+void __context_tracking_task_switch(struct task_struct *prev,
+ struct task_struct *next)
{
- if (__this_cpu_read(context_tracking.active)) {
- clear_tsk_thread_flag(prev, TIF_NOHZ);
- set_tsk_thread_flag(next, TIF_NOHZ);
- }
+ clear_tsk_thread_flag(prev, TIF_NOHZ);
+ set_tsk_thread_flag(next, TIF_NOHZ);
}
+
+#ifdef CONFIG_CONTEXT_TRACKING_FORCE
+void __init context_tracking_init(void)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ context_tracking_cpu_set(cpu);
+}
+#endif
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b2b227b8212..d7f07a2da5a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -113,7 +113,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
* get_online_cpus() not an api which is called all that often.
*
*/
-static void cpu_hotplug_begin(void)
+void cpu_hotplug_begin(void)
{
cpu_hotplug.active_writer = current;
@@ -127,7 +127,7 @@ static void cpu_hotplug_begin(void)
}
}
-static void cpu_hotplug_done(void)
+void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
mutex_unlock(&cpu_hotplug.lock);
@@ -154,10 +154,7 @@ void cpu_hotplug_enable(void)
cpu_maps_update_done();
}
-#else /* #if CONFIG_HOTPLUG_CPU */
-static void cpu_hotplug_begin(void) {}
-static void cpu_hotplug_done(void) {}
-#endif /* #else #if CONFIG_HOTPLUG_CPU */
+#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int __ref register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index e5657788fed..6bf981e13c4 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -68,10 +68,6 @@
*/
int number_of_cpusets __read_mostly;
-/* Forward declare cgroup structures */
-struct cgroup_subsys cpuset_subsys;
-struct cpuset;
-
/* See "Frequency meter" comments, below. */
struct fmeter {
@@ -115,27 +111,20 @@ struct cpuset {
int relax_domain_level;
};
-/* Retrieve the cpuset for a cgroup */
-static inline struct cpuset *cgroup_cs(struct cgroup *cgrp)
+static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpuset_subsys_id),
- struct cpuset, css);
+ return css ? container_of(css, struct cpuset, css) : NULL;
}
/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
- return container_of(task_subsys_state(task, cpuset_subsys_id),
- struct cpuset, css);
+ return css_cs(task_css(task, cpuset_subsys_id));
}
-static inline struct cpuset *parent_cs(const struct cpuset *cs)
+static inline struct cpuset *parent_cs(struct cpuset *cs)
{
- struct cgroup *pcgrp = cs->css.cgroup->parent;
-
- if (pcgrp)
- return cgroup_cs(pcgrp);
- return NULL;
+ return css_cs(css_parent(&cs->css));
}
#ifdef CONFIG_NUMA
@@ -212,29 +201,30 @@ static struct cpuset top_cpuset = {
/**
* cpuset_for_each_child - traverse online children of a cpuset
* @child_cs: loop cursor pointing to the current child
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @parent_cs: target cpuset to walk children of
*
* Walk @child_cs through the online children of @parent_cs. Must be used
* with RCU read locked.
*/
-#define cpuset_for_each_child(child_cs, pos_cgrp, parent_cs) \
- cgroup_for_each_child((pos_cgrp), (parent_cs)->css.cgroup) \
- if (is_cpuset_online(((child_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_child(child_cs, pos_css, parent_cs) \
+ css_for_each_child((pos_css), &(parent_cs)->css) \
+ if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
/**
* cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
* @des_cs: loop cursor pointing to the current descendant
- * @pos_cgrp: used for iteration
+ * @pos_css: used for iteration
* @root_cs: target cpuset to walk ancestor of
*
* Walk @des_cs through the online descendants of @root_cs. Must be used
- * with RCU read locked. The caller may modify @pos_cgrp by calling
- * cgroup_rightmost_descendant() to skip subtree.
+ * with RCU read locked. The caller may modify @pos_css by calling
+ * css_rightmost_descendant() to skip subtree. @root_cs is included in the
+ * iteration and the first node to be visited.
*/
-#define cpuset_for_each_descendant_pre(des_cs, pos_cgrp, root_cs) \
- cgroup_for_each_descendant_pre((pos_cgrp), (root_cs)->css.cgroup) \
- if (is_cpuset_online(((des_cs) = cgroup_cs((pos_cgrp)))))
+#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
+ css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
+ if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
/*
* There are two global mutexes guarding cpuset structures - cpuset_mutex
@@ -320,8 +310,7 @@ static struct file_system_type cpuset_fs_type = {
*
* Call with callback_mutex held.
*/
-static void guarantee_online_cpus(const struct cpuset *cs,
- struct cpumask *pmask)
+static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
{
while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
cs = parent_cs(cs);
@@ -339,7 +328,7 @@ static void guarantee_online_cpus(const struct cpuset *cs,
*
* Call with callback_mutex held.
*/
-static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask)
+static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
{
while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
cs = parent_cs(cs);
@@ -384,7 +373,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
* alloc_trial_cpuset - allocate a trial cpuset
* @cs: the cpuset that the trial cpuset duplicates
*/
-static struct cpuset *alloc_trial_cpuset(const struct cpuset *cs)
+static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
{
struct cpuset *trial;
@@ -431,9 +420,9 @@ static void free_trial_cpuset(struct cpuset *trial)
* Return 0 if valid, -errno if not.
*/
-static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
+static int validate_change(struct cpuset *cur, struct cpuset *trial)
{
- struct cgroup *cgrp;
+ struct cgroup_subsys_state *css;
struct cpuset *c, *par;
int ret;
@@ -441,7 +430,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
/* Each of our child cpusets must be a subset of us */
ret = -EBUSY;
- cpuset_for_each_child(c, cgrp, cur)
+ cpuset_for_each_child(c, css, cur)
if (!is_cpuset_subset(c, trial))
goto out;
@@ -462,7 +451,7 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
* overlap
*/
ret = -EINVAL;
- cpuset_for_each_child(c, cgrp, par) {
+ cpuset_for_each_child(c, css, par) {
if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
c != cur &&
cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
@@ -475,13 +464,17 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial)
/*
* Cpusets with tasks - existing or newly being attached - can't
- * have empty cpus_allowed or mems_allowed.
+ * be changed to have empty cpus_allowed or mems_allowed.
*/
ret = -ENOSPC;
- if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress) &&
- (cpumask_empty(trial->cpus_allowed) &&
- nodes_empty(trial->mems_allowed)))
- goto out;
+ if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) {
+ if (!cpumask_empty(cur->cpus_allowed) &&
+ cpumask_empty(trial->cpus_allowed))
+ goto out;
+ if (!nodes_empty(cur->mems_allowed) &&
+ nodes_empty(trial->mems_allowed))
+ goto out;
+ }
ret = 0;
out:
@@ -511,13 +504,16 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
struct cpuset *root_cs)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs)
+ continue;
+
/* skip the whole subtree if @cp doesn't have any CPU */
if (cpumask_empty(cp->cpus_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+ pos_css = css_rightmost_descendant(pos_css);
continue;
}
@@ -592,7 +588,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
struct sched_domain_attr *dattr; /* attributes for custom domains */
int ndoms = 0; /* number of sched domains in result */
int nslot; /* next empty doms[] struct cpumask slot */
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
doms = NULL;
dattr = NULL;
@@ -621,7 +617,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
csn = 0;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, &top_cpuset) {
+ cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
+ if (cp == &top_cpuset)
+ continue;
/*
* Continue traversing beyond @cp iff @cp has some CPUs and
* isn't load balancing. The former is obvious. The
@@ -638,7 +636,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
csa[csn++] = cp;
/* skip @cp's subtree */
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
+ pos_css = css_rightmost_descendant(pos_css);
}
rcu_read_unlock();
@@ -833,52 +831,45 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
/**
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
* @tsk: task to test
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
*
- * Called by cgroup_scan_tasks() for each task in a cgroup whose
- * cpus_allowed mask needs to be changed.
+ * Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
+ * mask needs to be changed.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
-static void cpuset_change_cpumask(struct task_struct *tsk,
- struct cgroup_scanner *scan)
+static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
{
- struct cpuset *cpus_cs;
+ struct cpuset *cs = data;
+ struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
- cpus_cs = effective_cpumask_cpuset(cgroup_cs(scan->cg));
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
}
/**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
*
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap)
{
- struct cgroup_scanner scan;
-
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_cpumask;
- scan.heap = heap;
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap);
}
/*
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
* @root_cs: the root cpuset of the hierarchy
* @update_root: update root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
*
* This will update cpumasks of tasks in @root_cs and all other empty cpusets
* which take on cpumask of @root_cs.
@@ -889,17 +880,19 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
bool update_root, struct ptr_heap *heap)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
-
- if (update_root)
- update_tasks_cpumask(root_cs, heap);
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
- /* skip the whole subtree if @cp have some CPU */
- if (!cpumask_empty(cp->cpus_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
- continue;
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs) {
+ if (!update_root)
+ continue;
+ } else {
+ /* skip the whole subtree if @cp have some CPU */
+ if (!cpumask_empty(cp->cpus_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
}
if (!css_tryget(&cp->css))
continue;
@@ -1055,20 +1048,24 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
task_unlock(tsk);
}
+struct cpuset_change_nodemask_arg {
+ struct cpuset *cs;
+ nodemask_t *newmems;
+};
+
/*
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
* memory_migrate flag is set. Called with cpuset_mutex held.
*/
-static void cpuset_change_nodemask(struct task_struct *p,
- struct cgroup_scanner *scan)
+static void cpuset_change_nodemask(struct task_struct *p, void *data)
{
- struct cpuset *cs = cgroup_cs(scan->cg);
+ struct cpuset_change_nodemask_arg *arg = data;
+ struct cpuset *cs = arg->cs;
struct mm_struct *mm;
int migrate;
- nodemask_t *newmems = scan->data;
- cpuset_change_task_nodemask(p, newmems);
+ cpuset_change_task_nodemask(p, arg->newmems);
mm = get_task_mm(p);
if (!mm)
@@ -1078,7 +1075,7 @@ static void cpuset_change_nodemask(struct task_struct *p,
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
- cpuset_migrate_mm(mm, &cs->old_mems_allowed, newmems);
+ cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
mmput(mm);
}
@@ -1087,28 +1084,22 @@ static void *cpuset_being_rebound;
/**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
- * Called with cpuset_mutex held
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
- * if @heap != NULL.
+ * Called with cpuset_mutex held. No return value. It's guaranteed that
+ * css_scan_tasks() always returns 0 if @heap != NULL.
*/
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
{
static nodemask_t newmems; /* protected by cpuset_mutex */
- struct cgroup_scanner scan;
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
+ struct cpuset_change_nodemask_arg arg = { .cs = cs,
+ .newmems = &newmems };
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
guarantee_online_mems(mems_cs, &newmems);
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_nodemask;
- scan.heap = heap;
- scan.data = &newmems;
-
/*
* The mpol_rebind_mm() call takes mmap_sem, which we couldn't
* take while holding tasklist_lock. Forks can happen - the
@@ -1119,7 +1110,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes.
*/
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap);
/*
* All the tasks' nodemasks have been updated, update
@@ -1135,7 +1126,7 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
* @cs: the root cpuset of the hierarchy
* @update_root: update the root cpuset or not?
- * @heap: the heap used by cgroup_scan_tasks()
+ * @heap: the heap used by css_scan_tasks()
*
* This will update nodemasks of tasks in @root_cs and all other empty cpusets
* which take on nodemask of @root_cs.
@@ -1146,17 +1137,19 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
bool update_root, struct ptr_heap *heap)
{
struct cpuset *cp;
- struct cgroup *pos_cgrp;
-
- if (update_root)
- update_tasks_nodemask(root_cs, heap);
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cp, pos_cgrp, root_cs) {
- /* skip the whole subtree if @cp have some CPU */
- if (!nodes_empty(cp->mems_allowed)) {
- pos_cgrp = cgroup_rightmost_descendant(pos_cgrp);
- continue;
+ cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
+ if (cp == root_cs) {
+ if (!update_root)
+ continue;
+ } else {
+ /* skip the whole subtree if @cp have some CPU */
+ if (!nodes_empty(cp->mems_allowed)) {
+ pos_css = css_rightmost_descendant(pos_css);
+ continue;
+ }
}
if (!css_tryget(&cp->css))
continue;
@@ -1263,44 +1256,39 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
return 0;
}
-/*
+/**
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
* @tsk: task to be updated
- * @scan: struct cgroup_scanner containing the cgroup of the task
+ * @data: cpuset to @tsk belongs to
*
- * Called by cgroup_scan_tasks() for each task in a cgroup.
+ * Called by css_scan_tasks() for each task in a cgroup.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
-static void cpuset_change_flag(struct task_struct *tsk,
- struct cgroup_scanner *scan)
+static void cpuset_change_flag(struct task_struct *tsk, void *data)
{
- cpuset_update_task_spread_flag(cgroup_cs(scan->cg), tsk);
+ struct cpuset *cs = data;
+
+ cpuset_update_task_spread_flag(cs, tsk);
}
-/*
+/**
* update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed
- * @heap: if NULL, defer allocating heap memory to cgroup_scan_tasks()
+ * @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
*
- * The cgroup_scan_tasks() function will scan all the tasks in a cgroup,
+ * The css_scan_tasks() function will scan all the tasks in a cgroup,
* calling callback functions for each.
*
- * No return value. It's guaranteed that cgroup_scan_tasks() always returns 0
+ * No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap)
{
- struct cgroup_scanner scan;
-
- scan.cg = cs->css.cgroup;
- scan.test_task = NULL;
- scan.process_task = cpuset_change_flag;
- scan.heap = heap;
- cgroup_scan_tasks(&scan);
+ css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap);
}
/*
@@ -1458,9 +1446,10 @@ static int fmeter_getrate(struct fmeter *fmp)
}
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
-static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static int cpuset_can_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct task_struct *task;
int ret;
@@ -1471,11 +1460,11 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
* flag is set.
*/
ret = -ENOSPC;
- if (!cgroup_sane_behavior(cgrp) &&
+ if (!cgroup_sane_behavior(css->cgroup) &&
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock;
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
/*
* Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their cpu
@@ -1504,11 +1493,11 @@ out_unlock:
return ret;
}
-static void cpuset_cancel_attach(struct cgroup *cgrp,
+static void cpuset_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
mutex_lock(&cpuset_mutex);
- cgroup_cs(cgrp)->attach_in_progress--;
+ css_cs(css)->attach_in_progress--;
mutex_unlock(&cpuset_mutex);
}
@@ -1519,16 +1508,18 @@ static void cpuset_cancel_attach(struct cgroup *cgrp,
*/
static cpumask_var_t cpus_attach;
-static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void cpuset_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
/* static buf protected by cpuset_mutex */
static nodemask_t cpuset_attach_nodemask_to;
struct mm_struct *mm;
struct task_struct *task;
struct task_struct *leader = cgroup_taskset_first(tset);
- struct cgroup *oldcgrp = cgroup_taskset_cur_cgroup(tset);
- struct cpuset *cs = cgroup_cs(cgrp);
- struct cpuset *oldcs = cgroup_cs(oldcgrp);
+ struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
+ cpuset_subsys_id);
+ struct cpuset *cs = css_cs(css);
+ struct cpuset *oldcs = css_cs(oldcss);
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
@@ -1542,7 +1533,7 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
/*
* can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here
@@ -1604,15 +1595,18 @@ typedef enum {
FILE_SPREAD_SLAB,
} cpuset_filetype_t;
-static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 val)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
- int retval = -ENODEV;
+ int retval = 0;
mutex_lock(&cpuset_mutex);
- if (!is_cpuset_online(cs))
+ if (!is_cpuset_online(cs)) {
+ retval = -ENODEV;
goto out_unlock;
+ }
switch (type) {
case FILE_CPU_EXCLUSIVE:
@@ -1651,9 +1645,10 @@ out_unlock:
return retval;
}
-static int cpuset_write_s64(struct cgroup *cgrp, struct cftype *cft, s64 val)
+static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
+ s64 val)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
int retval = -ENODEV;
@@ -1677,10 +1672,10 @@ out_unlock:
/*
* Common handling for a write to a "cpus" or "mems" file.
*/
-static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
- const char *buf)
+static int cpuset_write_resmask(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buf)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct cpuset *trialcs;
int retval = -ENODEV;
@@ -1759,13 +1754,12 @@ static size_t cpuset_sprintf_memlist(char *page, struct cpuset *cs)
return count;
}
-static ssize_t cpuset_common_file_read(struct cgroup *cgrp,
- struct cftype *cft,
- struct file *file,
- char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t cpuset_common_file_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
char *page;
ssize_t retval = 0;
@@ -1795,9 +1789,9 @@ out:
return retval;
}
-static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_CPU_EXCLUSIVE:
@@ -1826,9 +1820,9 @@ static u64 cpuset_read_u64(struct cgroup *cgrp, struct cftype *cft)
return 0;
}
-static s64 cpuset_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
cpuset_filetype_t type = cft->private;
switch (type) {
case FILE_SCHED_RELAX_DOMAIN_LEVEL:
@@ -1943,11 +1937,12 @@ static struct cftype files[] = {
* cgrp: control group that the new cpuset will be part of
*/
-static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuset *cs;
- if (!cgrp->parent)
+ if (!parent_css)
return &top_cpuset.css;
cs = kzalloc(sizeof(*cs), GFP_KERNEL);
@@ -1967,12 +1962,12 @@ static struct cgroup_subsys_state *cpuset_css_alloc(struct cgroup *cgrp)
return &cs->css;
}
-static int cpuset_css_online(struct cgroup *cgrp)
+static int cpuset_css_online(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
struct cpuset *parent = parent_cs(cs);
struct cpuset *tmp_cs;
- struct cgroup *pos_cg;
+ struct cgroup_subsys_state *pos_css;
if (!parent)
return 0;
@@ -1987,7 +1982,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
number_of_cpusets++;
- if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags))
+ if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
/*
@@ -2004,7 +1999,7 @@ static int cpuset_css_online(struct cgroup *cgrp)
* (and likewise for mems) to the new cgroup.
*/
rcu_read_lock();
- cpuset_for_each_child(tmp_cs, pos_cg, parent) {
+ cpuset_for_each_child(tmp_cs, pos_css, parent) {
if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
rcu_read_unlock();
goto out_unlock;
@@ -2021,9 +2016,15 @@ out_unlock:
return 0;
}
-static void cpuset_css_offline(struct cgroup *cgrp)
+/*
+ * If the cpuset being removed has its flag 'sched_load_balance'
+ * enabled, then simulate turning sched_load_balance off, which
+ * will call rebuild_sched_domains_locked().
+ */
+
+static void cpuset_css_offline(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
mutex_lock(&cpuset_mutex);
@@ -2036,15 +2037,9 @@ static void cpuset_css_offline(struct cgroup *cgrp)
mutex_unlock(&cpuset_mutex);
}
-/*
- * If the cpuset being removed has its flag 'sched_load_balance'
- * enabled, then simulate turning sched_load_balance off, which
- * will call rebuild_sched_domains_locked().
- */
-
-static void cpuset_css_free(struct cgroup *cgrp)
+static void cpuset_css_free(struct cgroup_subsys_state *css)
{
- struct cpuset *cs = cgroup_cs(cgrp);
+ struct cpuset *cs = css_cs(css);
free_cpumask_var(cs->cpus_allowed);
kfree(cs);
@@ -2251,11 +2246,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
/* if cpus or mems changed, we need to propagate to descendants */
if (cpus_updated || mems_updated) {
struct cpuset *cs;
- struct cgroup *pos_cgrp;
+ struct cgroup_subsys_state *pos_css;
rcu_read_lock();
- cpuset_for_each_descendant_pre(cs, pos_cgrp, &top_cpuset) {
- if (!css_tryget(&cs->css))
+ cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
+ if (cs == &top_cpuset || !css_tryget(&cs->css))
continue;
rcu_read_unlock();
@@ -2344,7 +2339,7 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
{
- const struct cpuset *cpus_cs;
+ struct cpuset *cpus_cs;
rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
@@ -2417,7 +2412,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
* callback_mutex. If no ancestor is mem_exclusive or mem_hardwall
* (an unusual configuration), then returns the root cpuset.
*/
-static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
+static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
{
while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
cs = parent_cs(cs);
@@ -2487,7 +2482,7 @@ static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
*/
int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
- const struct cpuset *cs; /* current cpuset ancestors */
+ struct cpuset *cs; /* current cpuset ancestors */
int allowed; /* is allocation in zone z allowed? */
if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
@@ -2725,7 +2720,7 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
goto out_free;
rcu_read_lock();
- css = task_subsys_state(tsk, cpuset_subsys_id);
+ css = task_css(tsk, cpuset_subsys_id);
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE);
rcu_read_unlock();
if (retval < 0)
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index c77206184b8..97b67df8fbf 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -116,6 +116,9 @@ int get_callchain_buffers(void)
err = alloc_callchain_buffers();
exit:
+ if (err)
+ atomic_dec(&nr_callchain_events);
+
mutex_unlock(&callchain_mutex);
return err;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f86599e8c12..2207efc941d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -145,6 +145,7 @@ static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
+static atomic_t nr_freq_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
@@ -340,8 +341,8 @@ struct perf_cgroup {
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
- return container_of(task_subsys_state(task, perf_subsys_id),
- struct perf_cgroup, css);
+ return container_of(task_css(task, perf_subsys_id),
+ struct perf_cgroup, css);
}
static inline bool
@@ -591,7 +592,9 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
if (!f.file)
return -EBADF;
- css = cgroup_css_from_dir(f.file, perf_subsys_id);
+ rcu_read_lock();
+
+ css = css_from_dir(f.file->f_dentry, &perf_subsys);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
@@ -617,6 +620,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
ret = -EINVAL;
}
out:
+ rcu_read_unlock();
fdput(f);
return ret;
}
@@ -869,12 +873,8 @@ static void perf_pmu_rotate_start(struct pmu *pmu)
WARN_ON(!irqs_disabled());
- if (list_empty(&cpuctx->rotation_list)) {
- int was_empty = list_empty(head);
+ if (list_empty(&cpuctx->rotation_list))
list_add(&cpuctx->rotation_list, head);
- if (was_empty)
- tick_nohz_full_kick();
- }
}
static void get_ctx(struct perf_event_context *ctx)
@@ -1216,6 +1216,9 @@ static void perf_event__id_header_size(struct perf_event *event)
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
+
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
@@ -2712,7 +2715,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
hwc = &event->hw;
- if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
+ if (hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
@@ -2811,10 +2814,11 @@ done:
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
- if (list_empty(&__get_cpu_var(rotation_list)))
- return true;
- else
+ if (atomic_read(&nr_freq_events) ||
+ __this_cpu_read(perf_throttled_count))
return false;
+ else
+ return true;
}
#endif
@@ -3128,36 +3132,63 @@ static void free_event_rcu(struct rcu_head *head)
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
-static void free_event(struct perf_event *event)
+static void unaccount_event_cpu(struct perf_event *event, int cpu)
{
- irq_work_sync(&event->pending);
+ if (event->parent)
+ return;
+
+ if (has_branch_stack(event)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
+ atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
+ }
+ if (is_cgroup_event(event))
+ atomic_dec(&per_cpu(perf_cgroup_events, cpu));
+}
+static void unaccount_event(struct perf_event *event)
+{
+ if (event->parent)
+ return;
+
+ if (event->attach_state & PERF_ATTACH_TASK)
+ static_key_slow_dec_deferred(&perf_sched_events);
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_dec(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_dec(&nr_comm_events);
+ if (event->attr.task)
+ atomic_dec(&nr_task_events);
+ if (event->attr.freq)
+ atomic_dec(&nr_freq_events);
+ if (is_cgroup_event(event))
+ static_key_slow_dec_deferred(&perf_sched_events);
+ if (has_branch_stack(event))
+ static_key_slow_dec_deferred(&perf_sched_events);
+
+ unaccount_event_cpu(event, event->cpu);
+}
+
+static void __free_event(struct perf_event *event)
+{
if (!event->parent) {
- if (event->attach_state & PERF_ATTACH_TASK)
- static_key_slow_dec_deferred(&perf_sched_events);
- if (event->attr.mmap || event->attr.mmap_data)
- atomic_dec(&nr_mmap_events);
- if (event->attr.comm)
- atomic_dec(&nr_comm_events);
- if (event->attr.task)
- atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
- if (is_cgroup_event(event)) {
- atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
- static_key_slow_dec_deferred(&perf_sched_events);
- }
-
- if (has_branch_stack(event)) {
- static_key_slow_dec_deferred(&perf_sched_events);
- /* is system-wide event */
- if (!(event->attach_state & PERF_ATTACH_TASK)) {
- atomic_dec(&per_cpu(perf_branch_stack_events,
- event->cpu));
- }
- }
}
+ if (event->destroy)
+ event->destroy(event);
+
+ if (event->ctx)
+ put_ctx(event->ctx);
+
+ call_rcu(&event->rcu_head, free_event_rcu);
+}
+static void free_event(struct perf_event *event)
+{
+ irq_work_sync(&event->pending);
+
+ unaccount_event(event);
+
if (event->rb) {
struct ring_buffer *rb;
@@ -3180,13 +3211,8 @@ static void free_event(struct perf_event *event)
if (is_cgroup_event(event))
perf_detach_cgroup(event);
- if (event->destroy)
- event->destroy(event);
-
- if (event->ctx)
- put_ctx(event->ctx);
- call_rcu(&event->rcu_head, free_event_rcu);
+ __free_event(event);
}
int perf_event_release_kernel(struct perf_event *event)
@@ -3544,6 +3570,15 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
+ case PERF_EVENT_IOC_ID:
+ {
+ u64 id = primary_event_id(event);
+
+ if (copy_to_user((void __user *)arg, &id, sizeof(id)))
+ return -EFAULT;
+ return 0;
+ }
+
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
@@ -3641,6 +3676,10 @@ void perf_event_update_userpage(struct perf_event *event)
u64 enabled, running, now;
rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ if (!rb)
+ goto unlock;
+
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
@@ -3651,12 +3690,8 @@ void perf_event_update_userpage(struct perf_event *event)
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
- rb = rcu_dereference(event->rb);
- if (!rb)
- goto unlock;
userpg = rb->user_page;
-
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
@@ -4251,7 +4286,7 @@ static void __perf_event_header__init_id(struct perf_event_header *header,
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
- if (sample_type & PERF_SAMPLE_ID)
+ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
@@ -4290,6 +4325,9 @@ static void __perf_event__output_id_sample(struct perf_output_handle *handle,
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ perf_output_put(handle, data->id);
}
void perf_event__output_id_sample(struct perf_event *event,
@@ -4355,7 +4393,8 @@ static void perf_output_read_group(struct perf_output_handle *handle,
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
- if (sub != event)
+ if ((sub != event) &&
+ (sub->state == PERF_EVENT_STATE_ACTIVE))
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
@@ -4402,6 +4441,9 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, *header);
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ perf_output_put(handle, data->id);
+
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
@@ -4462,20 +4504,6 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
- if (!event->attr.watermark) {
- int wakeup_events = event->attr.wakeup_events;
-
- if (wakeup_events) {
- struct ring_buffer *rb = handle->rb;
- int events = local_inc_return(&rb->events);
-
- if (events >= wakeup_events) {
- local_sub(wakeup_events, &rb->events);
- local_inc(&rb->wakeup);
- }
- }
- }
-
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
@@ -4511,16 +4539,31 @@ void perf_output_sample(struct perf_output_handle *handle,
}
}
- if (sample_type & PERF_SAMPLE_STACK_USER)
+ if (sample_type & PERF_SAMPLE_STACK_USER) {
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
+ }
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
+
+ if (!event->attr.watermark) {
+ int wakeup_events = event->attr.wakeup_events;
+
+ if (wakeup_events) {
+ struct ring_buffer *rb = handle->rb;
+ int events = local_inc_return(&rb->events);
+
+ if (events >= wakeup_events) {
+ local_sub(wakeup_events, &rb->events);
+ local_inc(&rb->wakeup);
+ }
+ }
+ }
}
void perf_prepare_sample(struct perf_event_header *header,
@@ -4680,12 +4723,10 @@ perf_event_read_event(struct perf_event *event,
perf_output_end(&handle);
}
-typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
static void
perf_event_aux_ctx(struct perf_event_context *ctx,
- perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data)
{
@@ -4696,15 +4737,12 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
continue;
if (!event_filter_match(event))
continue;
- if (match(event, data))
- output(event, data);
+ output(event, data);
}
}
static void
-perf_event_aux(perf_event_aux_match_cb match,
- perf_event_aux_output_cb output,
- void *data,
+perf_event_aux(perf_event_aux_output_cb output, void *data,
struct perf_event_context *task_ctx)
{
struct perf_cpu_context *cpuctx;
@@ -4717,7 +4755,7 @@ perf_event_aux(perf_event_aux_match_cb match,
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
- perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
+ perf_event_aux_ctx(&cpuctx->ctx, output, data);
if (task_ctx)
goto next;
ctxn = pmu->task_ctx_nr;
@@ -4725,14 +4763,14 @@ perf_event_aux(perf_event_aux_match_cb match,
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
- perf_event_aux_ctx(ctx, match, output, data);
+ perf_event_aux_ctx(ctx, output, data);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_ctx) {
preempt_disable();
- perf_event_aux_ctx(task_ctx, match, output, data);
+ perf_event_aux_ctx(task_ctx, output, data);
preempt_enable();
}
rcu_read_unlock();
@@ -4741,7 +4779,7 @@ next:
/*
* task tracking -- fork/exit
*
- * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
+ * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
*/
struct perf_task_event {
@@ -4759,6 +4797,13 @@ struct perf_task_event {
} event_id;
};
+static int perf_event_task_match(struct perf_event *event)
+{
+ return event->attr.comm || event->attr.mmap ||
+ event->attr.mmap2 || event->attr.mmap_data ||
+ event->attr.task;
+}
+
static void perf_event_task_output(struct perf_event *event,
void *data)
{
@@ -4768,6 +4813,9 @@ static void perf_event_task_output(struct perf_event *event,
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
+ if (!perf_event_task_match(event))
+ return;
+
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
@@ -4790,13 +4838,6 @@ out:
task_event->event_id.header.size = size;
}
-static int perf_event_task_match(struct perf_event *event,
- void *data __maybe_unused)
-{
- return event->attr.comm || event->attr.mmap ||
- event->attr.mmap_data || event->attr.task;
-}
-
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
@@ -4825,8 +4866,7 @@ static void perf_event_task(struct task_struct *task,
},
};
- perf_event_aux(perf_event_task_match,
- perf_event_task_output,
+ perf_event_aux(perf_event_task_output,
&task_event,
task_ctx);
}
@@ -4853,6 +4893,11 @@ struct perf_comm_event {
} event_id;
};
+static int perf_event_comm_match(struct perf_event *event)
+{
+ return event->attr.comm;
+}
+
static void perf_event_comm_output(struct perf_event *event,
void *data)
{
@@ -4862,6 +4907,9 @@ static void perf_event_comm_output(struct perf_event *event,
int size = comm_event->event_id.header.size;
int ret;
+ if (!perf_event_comm_match(event))
+ return;
+
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
@@ -4883,12 +4931,6 @@ out:
comm_event->event_id.header.size = size;
}
-static int perf_event_comm_match(struct perf_event *event,
- void *data __maybe_unused)
-{
- return event->attr.comm;
-}
-
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
char comm[TASK_COMM_LEN];
@@ -4903,8 +4945,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
- perf_event_aux(perf_event_comm_match,
- perf_event_comm_output,
+ perf_event_aux(perf_event_comm_output,
comm_event,
NULL);
}
@@ -4955,6 +4996,9 @@ struct perf_mmap_event {
const char *file_name;
int file_size;
+ int maj, min;
+ u64 ino;
+ u64 ino_generation;
struct {
struct perf_event_header header;
@@ -4967,6 +5011,17 @@ struct perf_mmap_event {
} event_id;
};
+static int perf_event_mmap_match(struct perf_event *event,
+ void *data)
+{
+ struct perf_mmap_event *mmap_event = data;
+ struct vm_area_struct *vma = mmap_event->vma;
+ int executable = vma->vm_flags & VM_EXEC;
+
+ return (!executable && event->attr.mmap_data) ||
+ (executable && (event->attr.mmap || event->attr.mmap2));
+}
+
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
@@ -4976,6 +5031,16 @@ static void perf_event_mmap_output(struct perf_event *event,
int size = mmap_event->event_id.header.size;
int ret;
+ if (!perf_event_mmap_match(event, data))
+ return;
+
+ if (event->attr.mmap2) {
+ mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
+ mmap_event->event_id.header.size += sizeof(mmap_event->maj);
+ mmap_event->event_id.header.size += sizeof(mmap_event->min);
+ mmap_event->event_id.header.size += sizeof(mmap_event->ino);
+ }
+
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
@@ -4986,6 +5051,14 @@ static void perf_event_mmap_output(struct perf_event *event,
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
+
+ if (event->attr.mmap2) {
+ perf_output_put(&handle, mmap_event->maj);
+ perf_output_put(&handle, mmap_event->min);
+ perf_output_put(&handle, mmap_event->ino);
+ perf_output_put(&handle, mmap_event->ino_generation);
+ }
+
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
@@ -4996,21 +5069,12 @@ out:
mmap_event->event_id.header.size = size;
}
-static int perf_event_mmap_match(struct perf_event *event,
- void *data)
-{
- struct perf_mmap_event *mmap_event = data;
- struct vm_area_struct *vma = mmap_event->vma;
- int executable = vma->vm_flags & VM_EXEC;
-
- return (!executable && event->attr.mmap_data) ||
- (executable && event->attr.mmap);
-}
-
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
+ int maj = 0, min = 0;
+ u64 ino = 0, gen = 0;
unsigned int size;
char tmp[16];
char *buf = NULL;
@@ -5019,6 +5083,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
memset(tmp, 0, sizeof(tmp));
if (file) {
+ struct inode *inode;
+ dev_t dev;
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
@@ -5034,6 +5100,13 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
+ inode = file_inode(vma->vm_file);
+ dev = inode->i_sb->s_dev;
+ ino = inode->i_ino;
+ gen = inode->i_generation;
+ maj = MAJOR(dev);
+ min = MINOR(dev);
+
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
@@ -5064,14 +5137,17 @@ got_name:
mmap_event->file_name = name;
mmap_event->file_size = size;
+ mmap_event->maj = maj;
+ mmap_event->min = min;
+ mmap_event->ino = ino;
+ mmap_event->ino_generation = gen;
if (!(vma->vm_flags & VM_EXEC))
mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
- perf_event_aux(perf_event_mmap_match,
- perf_event_mmap_output,
+ perf_event_aux(perf_event_mmap_output,
mmap_event,
NULL);
@@ -5101,6 +5177,10 @@ void perf_event_mmap(struct vm_area_struct *vma)
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
+ /* .maj (attr_mmap2 only) */
+ /* .min (attr_mmap2 only) */
+ /* .ino (attr_mmap2 only) */
+ /* .ino_generation (attr_mmap2 only) */
};
perf_event_mmap_event(&mmap_event);
@@ -5178,6 +5258,7 @@ static int __perf_event_overflow(struct perf_event *event,
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
+ tick_nohz_full_kick();
ret = 1;
}
}
@@ -6443,6 +6524,44 @@ unlock:
return pmu;
}
+static void account_event_cpu(struct perf_event *event, int cpu)
+{
+ if (event->parent)
+ return;
+
+ if (has_branch_stack(event)) {
+ if (!(event->attach_state & PERF_ATTACH_TASK))
+ atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
+ }
+ if (is_cgroup_event(event))
+ atomic_inc(&per_cpu(perf_cgroup_events, cpu));
+}
+
+static void account_event(struct perf_event *event)
+{
+ if (event->parent)
+ return;
+
+ if (event->attach_state & PERF_ATTACH_TASK)
+ static_key_slow_inc(&perf_sched_events.key);
+ if (event->attr.mmap || event->attr.mmap_data)
+ atomic_inc(&nr_mmap_events);
+ if (event->attr.comm)
+ atomic_inc(&nr_comm_events);
+ if (event->attr.task)
+ atomic_inc(&nr_task_events);
+ if (event->attr.freq) {
+ if (atomic_inc_return(&nr_freq_events) == 1)
+ tick_nohz_full_kick_all();
+ }
+ if (has_branch_stack(event))
+ static_key_slow_inc(&perf_sched_events.key);
+ if (is_cgroup_event(event))
+ static_key_slow_inc(&perf_sched_events.key);
+
+ account_event_cpu(event, event->cpu);
+}
+
/*
* Allocate and initialize a event structure
*/
@@ -6457,7 +6576,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
- long err;
+ long err = -EINVAL;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
@@ -6540,49 +6659,35 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
- goto done;
+ goto err_ns;
pmu = perf_init_event(event);
-
-done:
- err = 0;
if (!pmu)
- err = -EINVAL;
- else if (IS_ERR(pmu))
+ goto err_ns;
+ else if (IS_ERR(pmu)) {
err = PTR_ERR(pmu);
-
- if (err) {
- if (event->ns)
- put_pid_ns(event->ns);
- kfree(event);
- return ERR_PTR(err);
+ goto err_ns;
}
if (!event->parent) {
- if (event->attach_state & PERF_ATTACH_TASK)
- static_key_slow_inc(&perf_sched_events.key);
- if (event->attr.mmap || event->attr.mmap_data)
- atomic_inc(&nr_mmap_events);
- if (event->attr.comm)
- atomic_inc(&nr_comm_events);
- if (event->attr.task)
- atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
- if (err) {
- free_event(event);
- return ERR_PTR(err);
- }
- }
- if (has_branch_stack(event)) {
- static_key_slow_inc(&perf_sched_events.key);
- if (!(event->attach_state & PERF_ATTACH_TASK))
- atomic_inc(&per_cpu(perf_branch_stack_events,
- event->cpu));
+ if (err)
+ goto err_pmu;
}
}
return event;
+
+err_pmu:
+ if (event->destroy)
+ event->destroy(event);
+err_ns:
+ if (event->ns)
+ put_pid_ns(event->ns);
+ kfree(event);
+
+ return ERR_PTR(err);
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
@@ -6864,17 +6969,14 @@ SYSCALL_DEFINE5(perf_event_open,
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
- if (err)
- goto err_alloc;
- /*
- * one more event:
- * - that has cgroup constraint on event->cpu
- * - that may need work on context switch
- */
- atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
- static_key_slow_inc(&perf_sched_events.key);
+ if (err) {
+ __free_event(event);
+ goto err_task;
+ }
}
+ account_event(event);
+
/*
* Special case software events and allow them to be part of
* any hardware group.
@@ -7070,6 +7172,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err;
}
+ account_event(event);
+
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
@@ -7106,6 +7210,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event);
+ unaccount_event_cpu(event, src_cpu);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
}
@@ -7118,6 +7223,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
list_del(&event->event_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
+ account_event_cpu(event, dst_cpu);
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
@@ -7798,7 +7904,8 @@ unlock:
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
-static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
+static struct cgroup_subsys_state *
+perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct perf_cgroup *jc;
@@ -7815,11 +7922,10 @@ static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
return &jc->css;
}
-static void perf_cgroup_css_free(struct cgroup *cont)
+static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct perf_cgroup *jc;
- jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
- struct perf_cgroup, css);
+ struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
+
free_percpu(jc->info);
kfree(jc);
}
@@ -7831,15 +7937,17 @@ static int __perf_cgroup_move(void *info)
return 0;
}
-static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void perf_cgroup_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset)
+ cgroup_taskset_for_each(task, css, tset)
task_function_call(task, __perf_cgroup_move, task);
}
-static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
+static void perf_cgroup_exit(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
struct task_struct *task)
{
/*
diff --git a/kernel/fork.c b/kernel/fork.c
index 403d2bb8a96..bf46287c91a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1177,7 +1177,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
* don't allow the creation of threads.
*/
if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
- (task_active_pid_ns(current) != current->nsproxy->pid_ns))
+ (task_active_pid_ns(current) !=
+ current->nsproxy->pid_ns_for_children))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
@@ -1351,7 +1352,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (pid != &init_struct_pid) {
retval = -ENOMEM;
- pid = alloc_pid(p->nsproxy->pid_ns);
+ pid = alloc_pid(p->nsproxy->pid_ns_for_children);
if (!pid)
goto bad_fork_cleanup_io;
}
@@ -1679,6 +1680,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
+#elif defined(CONFIG_CLONE_BACKWARDS3)
+SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
+ int, stack_size,
+ int __user *, parent_tidptr,
+ int __user *, child_tidptr,
+ int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
diff --git a/kernel/freezer.c b/kernel/freezer.c
index 8b2afc1c9df..b462fa19751 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(freezer_lock);
*/
bool freezing_slow_path(struct task_struct *p)
{
- if (p->flags & PF_NOFREEZE)
+ if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
return false;
if (pm_nosig_freezing || cgroup_freezing(p))
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index 6df614912b9..3e97fb126e6 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
+#include <linux/utsname.h>
/*
* The number of tasks checked:
@@ -99,10 +100,14 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
* Ok, the task did not get scheduled for more than 2 minutes,
* complain:
*/
- printk(KERN_ERR "INFO: task %s:%d blocked for more than "
- "%ld seconds.\n", t->comm, t->pid, timeout);
- printk(KERN_ERR "\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
- " disables this message.\n");
+ pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
+ t->comm, t->pid, timeout);
+ pr_err(" %s %s %.*s\n",
+ print_tainted(), init_utsname()->release,
+ (int)strcspn(init_utsname()->version, " "),
+ init_utsname()->version);
+ pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
+ " disables this message.\n");
sched_show_task(t);
debug_show_held_locks(t);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 60f48fa0fd0..297a9247a3b 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -13,6 +13,7 @@
#include <linux/sort.h>
#include <linux/err.h>
#include <linux/static_key.h>
+#include <linux/jump_label_ratelimit.h>
#ifdef HAVE_JUMP_LABEL
diff --git a/kernel/lglock.c b/kernel/lglock.c
index 6535a667a5a..86ae2aebf00 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
arch_spinlock_t *lock;
preempt_disable();
- rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
arch_spin_lock(lock);
}
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
{
arch_spinlock_t *lock;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = this_cpu_ptr(lg->lock);
arch_spin_unlock(lock);
preempt_enable();
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
arch_spinlock_t *lock;
preempt_disable();
- rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
arch_spin_lock(lock);
}
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
{
arch_spinlock_t *lock;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
lock = per_cpu_ptr(lg->lock, cpu);
arch_spin_unlock(lock);
preempt_enable();
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
int i;
preempt_disable();
- rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
for_each_possible_cpu(i) {
arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
{
int i;
- rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
+ lock_release(&lg->lock_dep_map, 1, _RET_IP_);
for_each_possible_cpu(i) {
arch_spinlock_t *lock;
lock = per_cpu_ptr(lg->lock, i);
diff --git a/kernel/module.c b/kernel/module.c
index 206915830d2..dc582749fa1 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -136,6 +136,7 @@ static int param_set_bool_enable_only(const char *val,
}
static const struct kernel_param_ops param_ops_bool_enable_only = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bool_enable_only,
.get = param_get_bool,
};
@@ -603,7 +604,7 @@ static void setup_modinfo_##field(struct module *mod, const char *s) \
static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
struct module_kobject *mk, char *buffer) \
{ \
- return sprintf(buffer, "%s\n", mk->mod->field); \
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \
} \
static int modinfo_##field##_exists(struct module *mod) \
{ \
@@ -1611,6 +1612,14 @@ static void module_remove_modinfo_attrs(struct module *mod)
kfree(mod->modinfo_attrs);
}
+static void mod_kobject_put(struct module *mod)
+{
+ DECLARE_COMPLETION_ONSTACK(c);
+ mod->mkobj.kobj_completion = &c;
+ kobject_put(&mod->mkobj.kobj);
+ wait_for_completion(&c);
+}
+
static int mod_sysfs_init(struct module *mod)
{
int err;
@@ -1638,7 +1647,7 @@ static int mod_sysfs_init(struct module *mod)
err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
"%s", mod->name);
if (err)
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
/* delay uevent until full sysfs population */
out:
@@ -1682,7 +1691,7 @@ out_unreg_param:
out_unreg_holders:
kobject_put(mod->holders_dir);
out_unreg:
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
out:
return err;
}
@@ -1691,7 +1700,7 @@ static void mod_sysfs_fini(struct module *mod)
{
remove_notes_attrs(mod);
remove_sect_attrs(mod);
- kobject_put(&mod->mkobj.kobj);
+ mod_kobject_put(mod);
}
#else /* !CONFIG_SYSFS */
@@ -2540,21 +2549,20 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
/* Sets info->hdr and info->len. */
static int copy_module_from_fd(int fd, struct load_info *info)
{
- struct file *file;
+ struct fd f = fdget(fd);
int err;
struct kstat stat;
loff_t pos;
ssize_t bytes = 0;
- file = fget(fd);
- if (!file)
+ if (!f.file)
return -ENOEXEC;
- err = security_kernel_module_from_file(file);
+ err = security_kernel_module_from_file(f.file);
if (err)
goto out;
- err = vfs_getattr(&file->f_path, &stat);
+ err = vfs_getattr(&f.file->f_path, &stat);
if (err)
goto out;
@@ -2577,7 +2585,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
pos = 0;
while (pos < stat.size) {
- bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
+ bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos,
stat.size - pos);
if (bytes < 0) {
vfree(info->hdr);
@@ -2591,7 +2599,7 @@ static int copy_module_from_fd(int fd, struct load_info *info)
info->len = pos;
out:
- fput(file);
+ fdput(f);
return err;
}
diff --git a/kernel/mutex.c b/kernel/mutex.c
index ff05f4bd86e..6d647aedffe 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -209,11 +209,13 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
*/
static inline int mutex_can_spin_on_owner(struct mutex *lock)
{
+ struct task_struct *owner;
int retval = 1;
rcu_read_lock();
- if (lock->owner)
- retval = lock->owner->on_cpu;
+ owner = ACCESS_ONCE(lock->owner);
+ if (owner)
+ retval = owner->on_cpu;
rcu_read_unlock();
/*
* if lock->owner is not set, the mutex owner may have just acquired
@@ -461,7 +463,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* performed the optimistic spinning cannot be done.
*/
if (ACCESS_ONCE(ww->ctx))
- break;
+ goto slowpath;
}
/*
@@ -472,7 +474,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
owner = ACCESS_ONCE(lock->owner);
if (owner && !mutex_spin_on_owner(lock, owner)) {
mspin_unlock(MLOCK(lock), &node);
- break;
+ goto slowpath;
}
if ((atomic_read(&lock->count) == 1) &&
@@ -499,7 +501,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
* the owner complete.
*/
if (!owner && (need_resched() || rt_task(task)))
- break;
+ goto slowpath;
/*
* The cpu_relax() call is a compiler barrier which forces
@@ -513,6 +515,10 @@ slowpath:
#endif
spin_lock_mutex(&lock->wait_lock, flags);
+ /* once more, can we acquire the lock? */
+ if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
+ goto skip_wait;
+
debug_mutex_lock_common(lock, &waiter);
debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
@@ -520,9 +526,6 @@ slowpath:
list_add_tail(&waiter.list, &lock->wait_list);
waiter.task = task;
- if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, -1) == 1))
- goto done;
-
lock_contended(&lock->dep_map, ip);
for (;;) {
@@ -536,7 +539,7 @@ slowpath:
* other waiters:
*/
if (MUTEX_SHOW_NO_WAITER(lock) &&
- (atomic_xchg(&lock->count, -1) == 1))
+ (atomic_xchg(&lock->count, -1) == 1))
break;
/*
@@ -561,24 +564,25 @@ slowpath:
schedule_preempt_disabled();
spin_lock_mutex(&lock->wait_lock, flags);
}
+ mutex_remove_waiter(lock, &waiter, current_thread_info());
+ /* set it to 0 if there are no waiters left: */
+ if (likely(list_empty(&lock->wait_list)))
+ atomic_set(&lock->count, 0);
+ debug_mutex_free_waiter(&waiter);
-done:
+skip_wait:
+ /* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip);
- /* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, current_thread_info());
mutex_set_owner(lock);
if (!__builtin_constant_p(ww_ctx == NULL)) {
- struct ww_mutex *ww = container_of(lock,
- struct ww_mutex,
- base);
+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct mutex_waiter *cur;
/*
* This branch gets optimized out for the common case,
* and is only important for ww_mutex_lock.
*/
-
ww_mutex_lock_acquired(ww, ww_ctx);
ww->ctx = ww_ctx;
@@ -592,15 +596,8 @@ done:
}
}
- /* set it to 0 if there are no waiters left: */
- if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
-
spin_unlock_mutex(&lock->wait_lock, flags);
-
- debug_mutex_free_waiter(&waiter);
preempt_enable();
-
return 0;
err:
@@ -686,7 +683,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
@@ -702,7 +699,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx);
- if (!ret && ctx->acquired > 0)
+ if (!ret && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx);
return ret;
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 364ceab15f0..997cbb951a3 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -29,15 +29,15 @@
static struct kmem_cache *nsproxy_cachep;
struct nsproxy init_nsproxy = {
- .count = ATOMIC_INIT(1),
- .uts_ns = &init_uts_ns,
+ .count = ATOMIC_INIT(1),
+ .uts_ns = &init_uts_ns,
#if defined(CONFIG_POSIX_MQUEUE) || defined(CONFIG_SYSVIPC)
- .ipc_ns = &init_ipc_ns,
+ .ipc_ns = &init_ipc_ns,
#endif
- .mnt_ns = NULL,
- .pid_ns = &init_pid_ns,
+ .mnt_ns = NULL,
+ .pid_ns_for_children = &init_pid_ns,
#ifdef CONFIG_NET
- .net_ns = &init_net,
+ .net_ns = &init_net,
#endif
};
@@ -85,9 +85,10 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
goto out_ipc;
}
- new_nsp->pid_ns = copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns);
- if (IS_ERR(new_nsp->pid_ns)) {
- err = PTR_ERR(new_nsp->pid_ns);
+ new_nsp->pid_ns_for_children =
+ copy_pid_ns(flags, user_ns, tsk->nsproxy->pid_ns_for_children);
+ if (IS_ERR(new_nsp->pid_ns_for_children)) {
+ err = PTR_ERR(new_nsp->pid_ns_for_children);
goto out_pid;
}
@@ -100,8 +101,8 @@ static struct nsproxy *create_new_namespaces(unsigned long flags,
return new_nsp;
out_net:
- if (new_nsp->pid_ns)
- put_pid_ns(new_nsp->pid_ns);
+ if (new_nsp->pid_ns_for_children)
+ put_pid_ns(new_nsp->pid_ns_for_children);
out_pid:
if (new_nsp->ipc_ns)
put_ipc_ns(new_nsp->ipc_ns);
@@ -174,8 +175,8 @@ void free_nsproxy(struct nsproxy *ns)
put_uts_ns(ns->uts_ns);
if (ns->ipc_ns)
put_ipc_ns(ns->ipc_ns);
- if (ns->pid_ns)
- put_pid_ns(ns->pid_ns);
+ if (ns->pid_ns_for_children)
+ put_pid_ns(ns->pid_ns_for_children);
put_net(ns->net_ns);
kmem_cache_free(nsproxy_cachep, ns);
}
diff --git a/kernel/params.c b/kernel/params.c
index 440e65d1a54..501bde4f3be 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -103,8 +103,8 @@ static int parse_one(char *param,
|| params[i].level > max_level)
return 0;
/* No one handled NULL, so do it here. */
- if (!val && params[i].ops->set != param_set_bool
- && params[i].ops->set != param_set_bint)
+ if (!val &&
+ !(params[i].ops->flags & KERNEL_PARAM_FL_NOARG))
return -EINVAL;
pr_debug("handling %s with %p\n", param,
params[i].ops->set);
@@ -241,7 +241,8 @@ int parse_args(const char *doing,
} \
int param_get_##name(char *buffer, const struct kernel_param *kp) \
{ \
- return sprintf(buffer, format, *((type *)kp->arg)); \
+ return scnprintf(buffer, PAGE_SIZE, format, \
+ *((type *)kp->arg)); \
} \
struct kernel_param_ops param_ops_##name = { \
.set = param_set_##name, \
@@ -252,7 +253,7 @@ int parse_args(const char *doing,
EXPORT_SYMBOL(param_ops_##name)
-STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul);
+STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, strict_strtoul);
STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol);
STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul);
STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol);
@@ -285,7 +286,7 @@ EXPORT_SYMBOL(param_set_charp);
int param_get_charp(char *buffer, const struct kernel_param *kp)
{
- return sprintf(buffer, "%s", *((char **)kp->arg));
+ return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg));
}
EXPORT_SYMBOL(param_get_charp);
@@ -320,6 +321,7 @@ int param_get_bool(char *buffer, const struct kernel_param *kp)
EXPORT_SYMBOL(param_get_bool);
struct kernel_param_ops param_ops_bool = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bool,
.get = param_get_bool,
};
@@ -370,6 +372,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp)
EXPORT_SYMBOL(param_set_bint);
struct kernel_param_ops param_ops_bint = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_bint,
.get = param_get_int,
};
@@ -827,7 +830,7 @@ ssize_t __modver_version_show(struct module_attribute *mattr,
struct module_version_attribute *vattr =
container_of(mattr, struct module_version_attribute, mattr);
- return sprintf(buf, "%s\n", vattr->version);
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version);
}
extern const struct module_version_attribute *__start___modver[];
@@ -912,7 +915,14 @@ static const struct kset_uevent_ops module_uevent_ops = {
struct kset *module_kset;
int module_sysfs_initialized;
+static void module_kobj_release(struct kobject *kobj)
+{
+ struct module_kobject *mk = to_module_kobject(kobj);
+ complete(mk->kobj_completion);
+}
+
struct kobj_type module_ktype = {
+ .release = module_kobj_release,
.sysfs_ops = &module_sysfs_ops,
};
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 6917e8edb48..601bb361c23 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -349,8 +349,8 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns)
if (ancestor != active)
return -EINVAL;
- put_pid_ns(nsproxy->pid_ns);
- nsproxy->pid_ns = get_pid_ns(new);
+ put_pid_ns(nsproxy->pid_ns_for_children);
+ nsproxy->pid_ns_for_children = get_pid_ns(new);
return 0;
}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index b26f5f1e773..3085e62a80a 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -39,7 +39,7 @@ static int resume_delay;
static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
-int in_suspend __nosavedata;
+__visible int in_suspend __nosavedata;
enum {
HIBERNATION_INVALID,
diff --git a/kernel/power/process.c b/kernel/power/process.c
index fc0df848644..06ec8869dbf 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -109,6 +109,8 @@ static int try_to_freeze_tasks(bool user_only)
/**
* freeze_processes - Signal user space processes to enter the refrigerator.
+ * The current thread will not be frozen. The same process that calls
+ * freeze_processes must later call thaw_processes.
*
* On success, returns 0. On failure, -errno and system is fully thawed.
*/
@@ -120,6 +122,9 @@ int freeze_processes(void)
if (error)
return error;
+ /* Make sure this task doesn't get frozen */
+ current->flags |= PF_SUSPEND_TASK;
+
if (!pm_freezing)
atomic_inc(&system_freezing_cnt);
@@ -168,6 +173,7 @@ int freeze_kernel_threads(void)
void thaw_processes(void)
{
struct task_struct *g, *p;
+ struct task_struct *curr = current;
if (pm_freezing)
atomic_dec(&system_freezing_cnt);
@@ -182,10 +188,15 @@ void thaw_processes(void)
read_lock(&tasklist_lock);
do_each_thread(g, p) {
+ /* No other threads should have PF_SUSPEND_TASK set */
+ WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
__thaw_task(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
+ curr->flags &= ~PF_SUSPEND_TASK;
+
usermodehelper_enable();
schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 06fe28589e9..a394297f8b2 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req)
}
EXPORT_SYMBOL_GPL(pm_qos_request_active);
+static void __pm_qos_update_request(struct pm_qos_request *req,
+ s32 new_value)
+{
+ trace_pm_qos_update_request(req->pm_qos_class, new_value);
+
+ if (new_value != req->node.prio)
+ pm_qos_update_target(
+ pm_qos_array[req->pm_qos_class]->constraints,
+ &req->node, PM_QOS_UPDATE_REQ, new_value);
+}
+
/**
* pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
* @work: work struct for the delayed work (timeout)
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work)
struct pm_qos_request,
work);
- pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
+ __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
}
/**
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
}
cancel_delayed_work_sync(&req->work);
-
- trace_pm_qos_update_request(req->pm_qos_class, new_value);
- if (new_value != req->node.prio)
- pm_qos_update_target(
- pm_qos_array[req->pm_qos_class]->constraints,
- &req->node, PM_QOS_UPDATE_REQ, new_value);
+ __pm_qos_update_request(req, new_value);
}
EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index ece04223bb1..62ee437b5c7 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -210,6 +210,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
goto Platform_wake;
}
+ ftrace_stop();
error = disable_nonboot_cpus();
if (error || suspend_test(TEST_CPUS))
goto Enable_cpus;
@@ -232,6 +233,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
Enable_cpus:
enable_nonboot_cpus();
+ ftrace_start();
Platform_wake:
if (need_suspend_ops(state) && suspend_ops->wake)
@@ -265,7 +267,6 @@ int suspend_devices_and_enter(suspend_state_t state)
goto Close;
}
suspend_console();
- ftrace_stop();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
@@ -285,7 +286,6 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
- ftrace_start();
resume_console();
Close:
if (need_suspend_ops(state) && suspend_ops->end)
diff --git a/kernel/printk/Makefile b/kernel/printk/Makefile
new file mode 100644
index 00000000000..85405bdcf2b
--- /dev/null
+++ b/kernel/printk/Makefile
@@ -0,0 +1,2 @@
+obj-y = printk.o
+obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c
new file mode 100644
index 00000000000..276762f3a46
--- /dev/null
+++ b/kernel/printk/braille.c
@@ -0,0 +1,49 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/string.h>
+
+#include "console_cmdline.h"
+#include "braille.h"
+
+char *_braille_console_setup(char **str, char **brl_options)
+{
+ if (!memcmp(*str, "brl,", 4)) {
+ *brl_options = "";
+ *str += 4;
+ } else if (!memcmp(str, "brl=", 4)) {
+ *brl_options = *str + 4;
+ *str = strchr(*brl_options, ',');
+ if (!*str)
+ pr_err("need port name after brl=\n");
+ else
+ *((*str)++) = 0;
+ } else
+ return NULL;
+
+ return *str;
+}
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ int rtn = 0;
+
+ if (c->brl_options) {
+ console->flags |= CON_BRL;
+ rtn = braille_register_console(console, c->index, c->options,
+ c->brl_options);
+ }
+
+ return rtn;
+}
+
+int
+_braille_unregister_console(struct console *console)
+{
+ if (console->flags & CON_BRL)
+ return braille_unregister_console(console);
+
+ return 0;
+}
diff --git a/kernel/printk/braille.h b/kernel/printk/braille.h
new file mode 100644
index 00000000000..769d771145c
--- /dev/null
+++ b/kernel/printk/braille.h
@@ -0,0 +1,48 @@
+#ifndef _PRINTK_BRAILLE_H
+#define _PRINTK_BRAILLE_H
+
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+ c->brl_options = brl_options;
+}
+
+char *
+_braille_console_setup(char **str, char **brl_options);
+
+int
+_braille_register_console(struct console *console, struct console_cmdline *c);
+
+int
+_braille_unregister_console(struct console *console);
+
+#else
+
+static inline void
+braille_set_options(struct console_cmdline *c, char *brl_options)
+{
+}
+
+static inline char *
+_braille_console_setup(char **str, char **brl_options)
+{
+ return NULL;
+}
+
+static inline int
+_braille_register_console(struct console *console, struct console_cmdline *c)
+{
+ return 0;
+}
+
+static inline int
+_braille_unregister_console(struct console *console)
+{
+ return 0;
+}
+
+#endif
+
+#endif
diff --git a/kernel/printk/console_cmdline.h b/kernel/printk/console_cmdline.h
new file mode 100644
index 00000000000..cbd69d84234
--- /dev/null
+++ b/kernel/printk/console_cmdline.h
@@ -0,0 +1,14 @@
+#ifndef _CONSOLE_CMDLINE_H
+#define _CONSOLE_CMDLINE_H
+
+struct console_cmdline
+{
+ char name[8]; /* Name of the driver */
+ int index; /* Minor dev. to use */
+ char *options; /* Options for the driver */
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+ char *brl_options; /* Options for braille driver */
+#endif
+};
+
+#endif
diff --git a/kernel/printk.c b/kernel/printk/printk.c
index 69b0890ed7e..b4e8500afdb 100644
--- a/kernel/printk.c
+++ b/kernel/printk/printk.c
@@ -51,6 +51,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+#include "console_cmdline.h"
+#include "braille.h"
+
/* printk's without a loglevel use this.. */
#define DEFAULT_MESSAGE_LOGLEVEL CONFIG_DEFAULT_MESSAGE_LOGLEVEL
@@ -105,19 +108,11 @@ static struct console *exclusive_console;
/*
* Array of consoles built from command line options (console=)
*/
-struct console_cmdline
-{
- char name[8]; /* Name of the driver */
- int index; /* Minor dev. to use */
- char *options; /* Options for the driver */
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- char *brl_options; /* Options for braille driver */
-#endif
-};
#define MAX_CMDLINECONSOLES 8
static struct console_cmdline console_cmdline[MAX_CMDLINECONSOLES];
+
static int selected_console = -1;
static int preferred_console = -1;
int console_set_on_cmdline;
@@ -178,7 +173,7 @@ static int console_may_schedule;
* 67 "g"
* 0032 00 00 00 padding to next message header
*
- * The 'struct log' buffer header must never be directly exported to
+ * The 'struct printk_log' buffer header must never be directly exported to
* userspace, it is a kernel-private implementation detail that might
* need to be changed in the future, when the requirements change.
*
@@ -200,7 +195,7 @@ enum log_flags {
LOG_CONT = 8, /* text is a fragment of a continuation line */
};
-struct log {
+struct printk_log {
u64 ts_nsec; /* timestamp in nanoseconds */
u16 len; /* length of entire record */
u16 text_len; /* length of text buffer */
@@ -248,7 +243,7 @@ static u32 clear_idx;
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
#define LOG_ALIGN 4
#else
-#define LOG_ALIGN __alignof__(struct log)
+#define LOG_ALIGN __alignof__(struct printk_log)
#endif
#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -259,35 +254,35 @@ static u32 log_buf_len = __LOG_BUF_LEN;
static volatile unsigned int logbuf_cpu = UINT_MAX;
/* human readable text of the record */
-static char *log_text(const struct log *msg)
+static char *log_text(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log);
+ return (char *)msg + sizeof(struct printk_log);
}
/* optional key/value pair dictionary attached to the record */
-static char *log_dict(const struct log *msg)
+static char *log_dict(const struct printk_log *msg)
{
- return (char *)msg + sizeof(struct log) + msg->text_len;
+ return (char *)msg + sizeof(struct printk_log) + msg->text_len;
}
/* get record by index; idx must point to valid msg */
-static struct log *log_from_idx(u32 idx)
+static struct printk_log *log_from_idx(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/*
* A length == 0 record is the end of buffer marker. Wrap around and
* read the message at the start of the buffer.
*/
if (!msg->len)
- return (struct log *)log_buf;
+ return (struct printk_log *)log_buf;
return msg;
}
/* get next record; idx must point to valid msg */
static u32 log_next(u32 idx)
{
- struct log *msg = (struct log *)(log_buf + idx);
+ struct printk_log *msg = (struct printk_log *)(log_buf + idx);
/* length == 0 indicates the end of the buffer; wrap */
/*
@@ -296,7 +291,7 @@ static u32 log_next(u32 idx)
* return the one after that.
*/
if (!msg->len) {
- msg = (struct log *)log_buf;
+ msg = (struct printk_log *)log_buf;
return msg->len;
}
return idx + msg->len;
@@ -308,11 +303,11 @@ static void log_store(int facility, int level,
const char *dict, u16 dict_len,
const char *text, u16 text_len)
{
- struct log *msg;
+ struct printk_log *msg;
u32 size, pad_len;
/* number of '\0' padding bytes to next message */
- size = sizeof(struct log) + text_len + dict_len;
+ size = sizeof(struct printk_log) + text_len + dict_len;
pad_len = (-size) & (LOG_ALIGN - 1);
size += pad_len;
@@ -324,7 +319,7 @@ static void log_store(int facility, int level,
else
free = log_first_idx - log_next_idx;
- if (free > size + sizeof(struct log))
+ if (free > size + sizeof(struct printk_log))
break;
/* drop old messages until we have enough contiuous space */
@@ -332,18 +327,18 @@ static void log_store(int facility, int level,
log_first_seq++;
}
- if (log_next_idx + size + sizeof(struct log) >= log_buf_len) {
+ if (log_next_idx + size + sizeof(struct printk_log) >= log_buf_len) {
/*
* This message + an additional empty header does not fit
* at the end of the buffer. Add an empty header with len == 0
* to signify a wrap around.
*/
- memset(log_buf + log_next_idx, 0, sizeof(struct log));
+ memset(log_buf + log_next_idx, 0, sizeof(struct printk_log));
log_next_idx = 0;
}
/* fill message */
- msg = (struct log *)(log_buf + log_next_idx);
+ msg = (struct printk_log *)(log_buf + log_next_idx);
memcpy(log_text(msg), text, text_len);
msg->text_len = text_len;
memcpy(log_dict(msg), dict, dict_len);
@@ -356,7 +351,7 @@ static void log_store(int facility, int level,
else
msg->ts_nsec = local_clock();
memset(log_dict(msg) + dict_len, 0, pad_len);
- msg->len = sizeof(struct log) + text_len + dict_len + pad_len;
+ msg->len = sizeof(struct printk_log) + text_len + dict_len + pad_len;
/* insert message */
log_next_idx += msg->len;
@@ -479,7 +474,7 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct devkmsg_user *user = file->private_data;
- struct log *msg;
+ struct printk_log *msg;
u64 ts_usec;
size_t i;
char cont = '-';
@@ -724,14 +719,14 @@ void log_buf_kexec_setup(void)
VMCOREINFO_SYMBOL(log_first_idx);
VMCOREINFO_SYMBOL(log_next_idx);
/*
- * Export struct log size and field offsets. User space tools can
+ * Export struct printk_log size and field offsets. User space tools can
* parse it and detect any changes to structure down the line.
*/
- VMCOREINFO_STRUCT_SIZE(log);
- VMCOREINFO_OFFSET(log, ts_nsec);
- VMCOREINFO_OFFSET(log, len);
- VMCOREINFO_OFFSET(log, text_len);
- VMCOREINFO_OFFSET(log, dict_len);
+ VMCOREINFO_STRUCT_SIZE(printk_log);
+ VMCOREINFO_OFFSET(printk_log, ts_nsec);
+ VMCOREINFO_OFFSET(printk_log, len);
+ VMCOREINFO_OFFSET(printk_log, text_len);
+ VMCOREINFO_OFFSET(printk_log, dict_len);
}
#endif
@@ -884,7 +879,7 @@ static size_t print_time(u64 ts, char *buf)
(unsigned long)ts, rem_nsec / 1000);
}
-static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
+static size_t print_prefix(const struct printk_log *msg, bool syslog, char *buf)
{
size_t len = 0;
unsigned int prefix = (msg->facility << 3) | msg->level;
@@ -907,7 +902,7 @@ static size_t print_prefix(const struct log *msg, bool syslog, char *buf)
return len;
}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size)
{
const char *text = log_text(msg);
@@ -969,7 +964,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev,
static int syslog_print(char __user *buf, int size)
{
char *text;
- struct log *msg;
+ struct printk_log *msg;
int len = 0;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
@@ -1060,7 +1055,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len += msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1073,7 +1068,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
idx = clear_idx;
prev = 0;
while (len > size && seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
len -= msg_print_text(msg, prev, true, NULL, 0);
prev = msg->flags;
@@ -1087,7 +1082,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
len = 0;
prev = 0;
while (len >= 0 && seq < next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
int textlen;
textlen = msg_print_text(msg, prev, true, text,
@@ -1233,7 +1228,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
error = 0;
while (seq < log_next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
error += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -1719,10 +1714,10 @@ static struct cont {
u8 level;
bool flushed:1;
} cont;
-static struct log *log_from_idx(u32 idx) { return NULL; }
+static struct printk_log *log_from_idx(u32 idx) { return NULL; }
static u32 log_next(u32 idx) { return 0; }
static void call_console_drivers(int level, const char *text, size_t len) {}
-static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+static size_t msg_print_text(const struct printk_log *msg, enum log_flags prev,
bool syslog, char *buf, size_t size) { return 0; }
static size_t cont_print_text(char *text, size_t size) { return 0; }
@@ -1761,23 +1756,23 @@ static int __add_preferred_console(char *name, int idx, char *options,
* See if this tty is not yet registered, and
* if we have a slot free.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- if (!brl_options)
- selected_console = i;
- return 0;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ if (!brl_options)
+ selected_console = i;
+ return 0;
}
+ }
if (i == MAX_CMDLINECONSOLES)
return -E2BIG;
if (!brl_options)
selected_console = i;
- c = &console_cmdline[i];
strlcpy(c->name, name, sizeof(c->name));
c->options = options;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- c->brl_options = brl_options;
-#endif
+ braille_set_options(c, brl_options);
+
c->index = idx;
return 0;
}
@@ -1790,20 +1785,8 @@ static int __init console_setup(char *str)
char *s, *options, *brl_options = NULL;
int idx;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (!memcmp(str, "brl,", 4)) {
- brl_options = "";
- str += 4;
- } else if (!memcmp(str, "brl=", 4)) {
- brl_options = str + 4;
- str = strchr(brl_options, ',');
- if (!str) {
- printk(KERN_ERR "need port name after brl=\n");
- return 1;
- }
- *(str++) = 0;
- }
-#endif
+ if (_braille_console_setup(&str, &brl_options))
+ return 1;
/*
* Decode str into name, index, options.
@@ -1858,15 +1841,15 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
struct console_cmdline *c;
int i;
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
- if (strcmp(console_cmdline[i].name, name) == 0 &&
- console_cmdline[i].index == idx) {
- c = &console_cmdline[i];
- strlcpy(c->name, name_new, sizeof(c->name));
- c->name[sizeof(c->name) - 1] = 0;
- c->options = options;
- c->index = idx_new;
- return i;
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++)
+ if (strcmp(c->name, name) == 0 && c->index == idx) {
+ strlcpy(c->name, name_new, sizeof(c->name));
+ c->name[sizeof(c->name) - 1] = 0;
+ c->options = options;
+ c->index = idx_new;
+ return i;
}
/* not found */
return -1;
@@ -2046,7 +2029,7 @@ void console_unlock(void)
console_cont_flush(text, sizeof(text));
again:
for (;;) {
- struct log *msg;
+ struct printk_log *msg;
size_t len;
int level;
@@ -2241,6 +2224,14 @@ void register_console(struct console *newcon)
int i;
unsigned long flags;
struct console *bcon = NULL;
+ struct console_cmdline *c;
+
+ if (console_drivers)
+ for_each_console(bcon)
+ if (WARN(bcon == newcon,
+ "console '%s%d' already registered\n",
+ bcon->name, bcon->index))
+ return;
/*
* before we register a new CON_BOOT console, make sure we don't
@@ -2288,30 +2279,25 @@ void register_console(struct console *newcon)
* See if this console matches one we selected on
* the command line.
*/
- for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0];
- i++) {
- if (strcmp(console_cmdline[i].name, newcon->name) != 0)
+ for (i = 0, c = console_cmdline;
+ i < MAX_CMDLINECONSOLES && c->name[0];
+ i++, c++) {
+ if (strcmp(c->name, newcon->name) != 0)
continue;
if (newcon->index >= 0 &&
- newcon->index != console_cmdline[i].index)
+ newcon->index != c->index)
continue;
if (newcon->index < 0)
- newcon->index = console_cmdline[i].index;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console_cmdline[i].brl_options) {
- newcon->flags |= CON_BRL;
- braille_register_console(newcon,
- console_cmdline[i].index,
- console_cmdline[i].options,
- console_cmdline[i].brl_options);
+ newcon->index = c->index;
+
+ if (_braille_register_console(newcon, c))
return;
- }
-#endif
+
if (newcon->setup &&
newcon->setup(newcon, console_cmdline[i].options) != 0)
break;
newcon->flags |= CON_ENABLED;
- newcon->index = console_cmdline[i].index;
+ newcon->index = c->index;
if (i == selected_console) {
newcon->flags |= CON_CONSDEV;
preferred_console = selected_console;
@@ -2394,13 +2380,13 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
struct console *a, *b;
- int res = 1;
+ int res;
-#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
- if (console->flags & CON_BRL)
- return braille_unregister_console(console);
-#endif
+ res = _braille_unregister_console(console);
+ if (res)
+ return res;
+ res = 1;
console_lock();
if (console_drivers == console) {
console_drivers=console->next;
@@ -2666,7 +2652,7 @@ void kmsg_dump(enum kmsg_dump_reason reason)
bool kmsg_dump_get_line_nolock(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
- struct log *msg;
+ struct printk_log *msg;
size_t l = 0;
bool ret = false;
@@ -2778,7 +2764,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2791,7 +2777,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
idx = dumper->cur_idx;
prev = 0;
while (l > size && seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l -= msg_print_text(msg, prev, true, NULL, 0);
idx = log_next(idx);
@@ -2806,7 +2792,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
l = 0;
prev = 0;
while (seq < dumper->next_seq) {
- struct log *msg = log_from_idx(idx);
+ struct printk_log *msg = log_from_idx(idx);
l += msg_print_text(msg, prev, syslog, buf + l, size - l);
idx = log_next(idx);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 4041f5747e7..a146ee327f6 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data)
/* Architecture-specific hardware disable .. */
ptrace_disable(child);
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
- flush_ptrace_hw_breakpoint(child);
write_lock_irq(&tasklist_lock);
/*
diff --git a/kernel/rcu.h b/kernel/rcu.h
index 7f8e7590e3e..77131966c4a 100644
--- a/kernel/rcu.h
+++ b/kernel/rcu.h
@@ -67,12 +67,15 @@
extern struct debug_obj_descr rcuhead_debug_descr;
-static inline void debug_rcu_head_queue(struct rcu_head *head)
+static inline int debug_rcu_head_queue(struct rcu_head *head)
{
- debug_object_activate(head, &rcuhead_debug_descr);
+ int r1;
+
+ r1 = debug_object_activate(head, &rcuhead_debug_descr);
debug_object_active_state(head, &rcuhead_debug_descr,
STATE_RCU_HEAD_READY,
STATE_RCU_HEAD_QUEUED);
+ return r1;
}
static inline void debug_rcu_head_unqueue(struct rcu_head *head)
@@ -83,8 +86,9 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
debug_object_deactivate(head, &rcuhead_debug_descr);
}
#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
-static inline void debug_rcu_head_queue(struct rcu_head *head)
+static inline int debug_rcu_head_queue(struct rcu_head *head)
{
+ return 0;
}
static inline void debug_rcu_head_unqueue(struct rcu_head *head)
@@ -94,7 +98,7 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
extern void kfree(const void *);
-static inline bool __rcu_reclaim(char *rn, struct rcu_head *head)
+static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
{
unsigned long offset = (unsigned long)head->func;
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index cce6ba8bbac..33eb4620aa1 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -212,43 +212,6 @@ static inline void debug_rcu_head_free(struct rcu_head *head)
}
/*
- * fixup_init is called when:
- * - an active object is initialized
- */
-static int rcuhead_fixup_init(void *addr, enum debug_obj_state state)
-{
- struct rcu_head *head = addr;
-
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_init(head, &rcuhead_debug_descr);
- return 1;
- default:
- return 0;
- }
-}
-
-/*
* fixup_activate is called when:
* - an active object is activated
* - an unknown object is activated (might be a statically initialized object)
@@ -268,69 +231,8 @@ static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
debug_object_init(head, &rcuhead_debug_descr);
debug_object_activate(head, &rcuhead_debug_descr);
return 0;
-
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_activate(head, &rcuhead_debug_descr);
- return 1;
default:
- return 0;
- }
-}
-
-/*
- * fixup_free is called when:
- * - an active object is freed
- */
-static int rcuhead_fixup_free(void *addr, enum debug_obj_state state)
-{
- struct rcu_head *head = addr;
-
- switch (state) {
- case ODEBUG_STATE_ACTIVE:
- /*
- * Ensure that queued callbacks are all executed.
- * If we detect that we are nested in a RCU read-side critical
- * section, we should simply fail, otherwise we would deadlock.
- * In !PREEMPT configurations, there is no way to tell if we are
- * in a RCU read-side critical section or not, so we never
- * attempt any fixup and just print a warning.
- */
-#ifndef CONFIG_PREEMPT
- WARN_ON_ONCE(1);
- return 0;
-#endif
- if (rcu_preempt_depth() != 0 || preempt_count() != 0 ||
- irqs_disabled()) {
- WARN_ON_ONCE(1);
- return 0;
- }
- rcu_barrier();
- rcu_barrier_sched();
- rcu_barrier_bh();
- debug_object_free(head, &rcuhead_debug_descr);
return 1;
- default:
- return 0;
}
}
@@ -369,15 +271,13 @@ EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
struct debug_obj_descr rcuhead_debug_descr = {
.name = "rcu_head",
- .fixup_init = rcuhead_fixup_init,
.fixup_activate = rcuhead_fixup_activate,
- .fixup_free = rcuhead_fixup_free,
};
EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
-void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp,
+void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
unsigned long secs,
unsigned long c_old, unsigned long c)
{
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index aa344111de3..9ed6075dc56 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -264,7 +264,7 @@ void rcu_check_callbacks(int cpu, int user)
*/
static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{
- char *rn = NULL;
+ const char *rn = NULL;
struct rcu_head *next, *list;
unsigned long flags;
RCU_TRACE(int cb_count = 0);
diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
index 0cd385acccf..280d06cae35 100644
--- a/kernel/rcutiny_plugin.h
+++ b/kernel/rcutiny_plugin.h
@@ -36,7 +36,7 @@ struct rcu_ctrlblk {
RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
- RCU_TRACE(char *name); /* Name of RCU type. */
+ RCU_TRACE(const char *name); /* Name of RCU type. */
};
/* Definition for rcupdate control block. */
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index f4871e52c54..be63101c617 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -52,72 +52,78 @@
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
-static int nreaders = -1; /* # reader threads, defaults to 2*ncpus */
-static int nfakewriters = 4; /* # fake writer threads */
-static int stat_interval = 60; /* Interval between stats, in seconds. */
- /* Zero means "only at end of test". */
-static bool verbose; /* Print more debug info. */
-static bool test_no_idle_hz = true;
- /* Test RCU support for tickless idle CPUs. */
-static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
-static int stutter = 5; /* Start/stop testing interval (in sec) */
-static int irqreader = 1; /* RCU readers from irq (timers). */
-static int fqs_duration; /* Duration of bursts (us), 0 to disable. */
-static int fqs_holdoff; /* Hold time within burst (us). */
-static int fqs_stutter = 3; /* Wait time between bursts (s). */
-static int n_barrier_cbs; /* Number of callbacks to test RCU barriers. */
-static int onoff_interval; /* Wait time between CPU hotplugs, 0=disable. */
-static int onoff_holdoff; /* Seconds after boot before CPU hotplugs. */
-static int shutdown_secs; /* Shutdown time (s). <=0 for no shutdown. */
-static int stall_cpu; /* CPU-stall duration (s). 0 for no stall. */
-static int stall_cpu_holdoff = 10; /* Time to wait until stall (s). */
-static int test_boost = 1; /* Test RCU prio boost: 0=no, 1=maybe, 2=yes. */
-static int test_boost_interval = 7; /* Interval between boost tests, seconds. */
-static int test_boost_duration = 4; /* Duration of each boost test, seconds. */
-static char *torture_type = "rcu"; /* What RCU implementation to torture. */
-
-module_param(nreaders, int, 0444);
-MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
-module_param(nfakewriters, int, 0444);
-MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
-module_param(stat_interval, int, 0644);
-MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
-module_param(verbose, bool, 0444);
-MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
-module_param(test_no_idle_hz, bool, 0444);
-MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
-module_param(shuffle_interval, int, 0444);
-MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
-module_param(stutter, int, 0444);
-MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
-module_param(irqreader, int, 0444);
-MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
+static int fqs_duration;
module_param(fqs_duration, int, 0444);
-MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us)");
+MODULE_PARM_DESC(fqs_duration, "Duration of fqs bursts (us), 0 to disable");
+static int fqs_holdoff;
module_param(fqs_holdoff, int, 0444);
MODULE_PARM_DESC(fqs_holdoff, "Holdoff time within fqs bursts (us)");
+static int fqs_stutter = 3;
module_param(fqs_stutter, int, 0444);
MODULE_PARM_DESC(fqs_stutter, "Wait time between fqs bursts (s)");
+static bool gp_exp;
+module_param(gp_exp, bool, 0444);
+MODULE_PARM_DESC(gp_exp, "Use expedited GP wait primitives");
+static bool gp_normal;
+module_param(gp_normal, bool, 0444);
+MODULE_PARM_DESC(gp_normal, "Use normal (non-expedited) GP wait primitives");
+static int irqreader = 1;
+module_param(irqreader, int, 0444);
+MODULE_PARM_DESC(irqreader, "Allow RCU readers from irq handlers");
+static int n_barrier_cbs;
module_param(n_barrier_cbs, int, 0444);
MODULE_PARM_DESC(n_barrier_cbs, "# of callbacks/kthreads for barrier testing");
-module_param(onoff_interval, int, 0444);
-MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+static int nfakewriters = 4;
+module_param(nfakewriters, int, 0444);
+MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
+static int nreaders = -1;
+module_param(nreaders, int, 0444);
+MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
+static int object_debug;
+module_param(object_debug, int, 0444);
+MODULE_PARM_DESC(object_debug, "Enable debug-object double call_rcu() testing");
+static int onoff_holdoff;
module_param(onoff_holdoff, int, 0444);
MODULE_PARM_DESC(onoff_holdoff, "Time after boot before CPU hotplugs (s)");
+static int onoff_interval;
+module_param(onoff_interval, int, 0444);
+MODULE_PARM_DESC(onoff_interval, "Time between CPU hotplugs (s), 0=disable");
+static int shuffle_interval = 3;
+module_param(shuffle_interval, int, 0444);
+MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
+static int shutdown_secs;
module_param(shutdown_secs, int, 0444);
-MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), zero to disable.");
+MODULE_PARM_DESC(shutdown_secs, "Shutdown time (s), <= zero to disable.");
+static int stall_cpu;
module_param(stall_cpu, int, 0444);
MODULE_PARM_DESC(stall_cpu, "Stall duration (s), zero to disable.");
+static int stall_cpu_holdoff = 10;
module_param(stall_cpu_holdoff, int, 0444);
MODULE_PARM_DESC(stall_cpu_holdoff, "Time to wait before starting stall (s).");
+static int stat_interval = 60;
+module_param(stat_interval, int, 0644);
+MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
+static int stutter = 5;
+module_param(stutter, int, 0444);
+MODULE_PARM_DESC(stutter, "Number of seconds to run/halt test");
+static int test_boost = 1;
module_param(test_boost, int, 0444);
MODULE_PARM_DESC(test_boost, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
-module_param(test_boost_interval, int, 0444);
-MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
+static int test_boost_duration = 4;
module_param(test_boost_duration, int, 0444);
MODULE_PARM_DESC(test_boost_duration, "Duration of each boost test, seconds.");
+static int test_boost_interval = 7;
+module_param(test_boost_interval, int, 0444);
+MODULE_PARM_DESC(test_boost_interval, "Interval between boost tests, seconds.");
+static bool test_no_idle_hz = true;
+module_param(test_no_idle_hz, bool, 0444);
+MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
+static char *torture_type = "rcu";
module_param(torture_type, charp, 0444);
-MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
+MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, ...)");
+static bool verbose;
+module_param(verbose, bool, 0444);
+MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
#define TORTURE_FLAG "-torture:"
#define PRINTK_STRING(s) \
@@ -267,7 +273,7 @@ rcutorture_shutdown_notify(struct notifier_block *unused1,
* Absorb kthreads into a kernel function that won't return, so that
* they won't ever access module text or data again.
*/
-static void rcutorture_shutdown_absorb(char *title)
+static void rcutorture_shutdown_absorb(const char *title)
{
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
pr_notice(
@@ -337,7 +343,7 @@ rcu_random(struct rcu_random_state *rrsp)
}
static void
-rcu_stutter_wait(char *title)
+rcu_stutter_wait(const char *title)
{
while (stutter_pause_test || !rcutorture_runnable) {
if (rcutorture_runnable)
@@ -360,13 +366,14 @@ struct rcu_torture_ops {
int (*completed)(void);
void (*deferred_free)(struct rcu_torture *p);
void (*sync)(void);
+ void (*exp_sync)(void);
void (*call)(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
void (*cb_barrier)(void);
void (*fqs)(void);
int (*stats)(char *page);
int irq_capable;
int can_boost;
- char *name;
+ const char *name;
};
static struct rcu_torture_ops *cur_ops;
@@ -443,81 +450,27 @@ static void rcu_torture_deferred_free(struct rcu_torture *p)
call_rcu(&p->rtort_rcu, rcu_torture_cb);
}
-static struct rcu_torture_ops rcu_ops = {
- .init = NULL,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay,
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_torture_completed,
- .deferred_free = rcu_torture_deferred_free,
- .sync = synchronize_rcu,
- .call = call_rcu,
- .cb_barrier = rcu_barrier,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .name = "rcu"
-};
-
-static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
-{
- int i;
- struct rcu_torture *rp;
- struct rcu_torture *rp1;
-
- cur_ops->sync();
- list_add(&p->rtort_free, &rcu_torture_removed);
- list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
- i = rp->rtort_pipe_count;
- if (i > RCU_TORTURE_PIPE_LEN)
- i = RCU_TORTURE_PIPE_LEN;
- atomic_inc(&rcu_torture_wcount[i]);
- if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
- rp->rtort_mbtest = 0;
- list_del(&rp->rtort_free);
- rcu_torture_free(rp);
- }
- }
-}
-
static void rcu_sync_torture_init(void)
{
INIT_LIST_HEAD(&rcu_torture_removed);
}
-static struct rcu_torture_ops rcu_sync_ops = {
+static struct rcu_torture_ops rcu_ops = {
.init = rcu_sync_torture_init,
.readlock = rcu_torture_read_lock,
.read_delay = rcu_read_delay,
.readunlock = rcu_torture_read_unlock,
.completed = rcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
+ .deferred_free = rcu_torture_deferred_free,
.sync = synchronize_rcu,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .can_boost = rcu_can_boost(),
- .name = "rcu_sync"
-};
-
-static struct rcu_torture_ops rcu_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_expedited,
- .call = NULL,
- .cb_barrier = NULL,
+ .exp_sync = synchronize_rcu_expedited,
+ .call = call_rcu,
+ .cb_barrier = rcu_barrier,
.fqs = rcu_force_quiescent_state,
.stats = NULL,
.irq_capable = 1,
.can_boost = rcu_can_boost(),
- .name = "rcu_expedited"
+ .name = "rcu"
};
/*
@@ -546,13 +499,14 @@ static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
}
static struct rcu_torture_ops rcu_bh_ops = {
- .init = NULL,
+ .init = rcu_sync_torture_init,
.readlock = rcu_bh_torture_read_lock,
.read_delay = rcu_read_delay, /* just reuse rcu's version. */
.readunlock = rcu_bh_torture_read_unlock,
.completed = rcu_bh_torture_completed,
.deferred_free = rcu_bh_torture_deferred_free,
.sync = synchronize_rcu_bh,
+ .exp_sync = synchronize_rcu_bh_expedited,
.call = call_rcu_bh,
.cb_barrier = rcu_barrier_bh,
.fqs = rcu_bh_force_quiescent_state,
@@ -561,38 +515,6 @@ static struct rcu_torture_ops rcu_bh_ops = {
.name = "rcu_bh"
};
-static struct rcu_torture_ops rcu_bh_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_bh,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "rcu_bh_sync"
-};
-
-static struct rcu_torture_ops rcu_bh_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = rcu_bh_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = rcu_bh_torture_read_unlock,
- .completed = rcu_bh_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_rcu_bh_expedited,
- .call = NULL,
- .cb_barrier = NULL,
- .fqs = rcu_bh_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "rcu_bh_expedited"
-};
-
/*
* Definitions for srcu torture testing.
*/
@@ -667,6 +589,11 @@ static int srcu_torture_stats(char *page)
return cnt;
}
+static void srcu_torture_synchronize_expedited(void)
+{
+ synchronize_srcu_expedited(&srcu_ctl);
+}
+
static struct rcu_torture_ops srcu_ops = {
.init = rcu_sync_torture_init,
.readlock = srcu_torture_read_lock,
@@ -675,45 +602,13 @@ static struct rcu_torture_ops srcu_ops = {
.completed = srcu_torture_completed,
.deferred_free = srcu_torture_deferred_free,
.sync = srcu_torture_synchronize,
+ .exp_sync = srcu_torture_synchronize_expedited,
.call = srcu_torture_call,
.cb_barrier = srcu_torture_barrier,
.stats = srcu_torture_stats,
.name = "srcu"
};
-static struct rcu_torture_ops srcu_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = srcu_torture_read_lock,
- .read_delay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize,
- .call = NULL,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu_sync"
-};
-
-static void srcu_torture_synchronize_expedited(void)
-{
- synchronize_srcu_expedited(&srcu_ctl);
-}
-
-static struct rcu_torture_ops srcu_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = srcu_torture_read_lock,
- .read_delay = srcu_read_delay,
- .readunlock = srcu_torture_read_unlock,
- .completed = srcu_torture_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = srcu_torture_synchronize_expedited,
- .call = NULL,
- .cb_barrier = NULL,
- .stats = srcu_torture_stats,
- .name = "srcu_expedited"
-};
-
/*
* Definitions for sched torture testing.
*/
@@ -742,6 +637,8 @@ static struct rcu_torture_ops sched_ops = {
.completed = rcu_no_completed,
.deferred_free = rcu_sched_torture_deferred_free,
.sync = synchronize_sched,
+ .exp_sync = synchronize_sched_expedited,
+ .call = call_rcu_sched,
.cb_barrier = rcu_barrier_sched,
.fqs = rcu_sched_force_quiescent_state,
.stats = NULL,
@@ -749,35 +646,6 @@ static struct rcu_torture_ops sched_ops = {
.name = "sched"
};
-static struct rcu_torture_ops sched_sync_ops = {
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_sched,
- .cb_barrier = NULL,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .name = "sched_sync"
-};
-
-static struct rcu_torture_ops sched_expedited_ops = {
- .init = rcu_sync_torture_init,
- .readlock = sched_torture_read_lock,
- .read_delay = rcu_read_delay, /* just reuse rcu's version. */
- .readunlock = sched_torture_read_unlock,
- .completed = rcu_no_completed,
- .deferred_free = rcu_sync_torture_deferred_free,
- .sync = synchronize_sched_expedited,
- .cb_barrier = NULL,
- .fqs = rcu_sched_force_quiescent_state,
- .stats = NULL,
- .irq_capable = 1,
- .name = "sched_expedited"
-};
-
/*
* RCU torture priority-boost testing. Runs one real-time thread per
* CPU for moderate bursts, repeatedly registering RCU callbacks and
@@ -927,9 +795,10 @@ rcu_torture_fqs(void *arg)
static int
rcu_torture_writer(void *arg)
{
+ bool exp;
int i;
- long oldbatch = rcu_batches_completed();
struct rcu_torture *rp;
+ struct rcu_torture *rp1;
struct rcu_torture *old_rp;
static DEFINE_RCU_RANDOM(rand);
@@ -954,10 +823,33 @@ rcu_torture_writer(void *arg)
i = RCU_TORTURE_PIPE_LEN;
atomic_inc(&rcu_torture_wcount[i]);
old_rp->rtort_pipe_count++;
- cur_ops->deferred_free(old_rp);
+ if (gp_normal == gp_exp)
+ exp = !!(rcu_random(&rand) & 0x80);
+ else
+ exp = gp_exp;
+ if (!exp) {
+ cur_ops->deferred_free(old_rp);
+ } else {
+ cur_ops->exp_sync();
+ list_add(&old_rp->rtort_free,
+ &rcu_torture_removed);
+ list_for_each_entry_safe(rp, rp1,
+ &rcu_torture_removed,
+ rtort_free) {
+ i = rp->rtort_pipe_count;
+ if (i > RCU_TORTURE_PIPE_LEN)
+ i = RCU_TORTURE_PIPE_LEN;
+ atomic_inc(&rcu_torture_wcount[i]);
+ if (++rp->rtort_pipe_count >=
+ RCU_TORTURE_PIPE_LEN) {
+ rp->rtort_mbtest = 0;
+ list_del(&rp->rtort_free);
+ rcu_torture_free(rp);
+ }
+ }
+ }
}
rcutorture_record_progress(++rcu_torture_current_version);
- oldbatch = cur_ops->completed();
rcu_stutter_wait("rcu_torture_writer");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
@@ -983,10 +875,18 @@ rcu_torture_fakewriter(void *arg)
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
udelay(rcu_random(&rand) & 0x3ff);
if (cur_ops->cb_barrier != NULL &&
- rcu_random(&rand) % (nfakewriters * 8) == 0)
+ rcu_random(&rand) % (nfakewriters * 8) == 0) {
cur_ops->cb_barrier();
- else
+ } else if (gp_normal == gp_exp) {
+ if (rcu_random(&rand) & 0x80)
+ cur_ops->sync();
+ else
+ cur_ops->exp_sync();
+ } else if (gp_normal) {
cur_ops->sync();
+ } else {
+ cur_ops->exp_sync();
+ }
rcu_stutter_wait("rcu_torture_fakewriter");
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
@@ -1364,7 +1264,7 @@ rcu_torture_stutter(void *arg)
}
static inline void
-rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, char *tag)
+rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s: nreaders=%d nfakewriters=%d "
@@ -1534,7 +1434,13 @@ rcu_torture_onoff(void *arg)
torture_type, cpu);
starttime = jiffies;
n_online_attempts++;
- if (cpu_up(cpu) == 0) {
+ ret = cpu_up(cpu);
+ if (ret) {
+ if (verbose)
+ pr_alert("%s" TORTURE_FLAG
+ "rcu_torture_onoff task: online %d failed: errno %d\n",
+ torture_type, cpu, ret);
+ } else {
if (verbose)
pr_alert("%s" TORTURE_FLAG
"rcu_torture_onoff task: onlined %d\n",
@@ -1934,6 +1840,62 @@ rcu_torture_cleanup(void)
rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
}
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+static void rcu_torture_leak_cb(struct rcu_head *rhp)
+{
+}
+
+static void rcu_torture_err_cb(struct rcu_head *rhp)
+{
+ /*
+ * This -might- happen due to race conditions, but is unlikely.
+ * The scenario that leads to this happening is that the
+ * first of the pair of duplicate callbacks is queued,
+ * someone else starts a grace period that includes that
+ * callback, then the second of the pair must wait for the
+ * next grace period. Unlikely, but can happen. If it
+ * does happen, the debug-objects subsystem won't have splatted.
+ */
+ pr_alert("rcutorture: duplicated callback was invoked.\n");
+}
+#endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+
+/*
+ * Verify that double-free causes debug-objects to complain, but only
+ * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
+ * cannot be carried out.
+ */
+static void rcu_test_debug_objects(void)
+{
+#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+ struct rcu_head rh1;
+ struct rcu_head rh2;
+
+ init_rcu_head_on_stack(&rh1);
+ init_rcu_head_on_stack(&rh2);
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test starting.\n");
+
+ /* Try to queue the rh2 pair of callbacks for the same grace period. */
+ preempt_disable(); /* Prevent preemption from interrupting test. */
+ rcu_read_lock(); /* Make it impossible to finish a grace period. */
+ call_rcu(&rh1, rcu_torture_leak_cb); /* Start grace period. */
+ local_irq_disable(); /* Make it harder to start a new grace period. */
+ call_rcu(&rh2, rcu_torture_leak_cb);
+ call_rcu(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
+ local_irq_enable();
+ rcu_read_unlock();
+ preempt_enable();
+
+ /* Wait for them all to get done so we can safely return. */
+ rcu_barrier();
+ pr_alert("rcutorture: WARN: Duplicate call_rcu() test complete.\n");
+ destroy_rcu_head_on_stack(&rh1);
+ destroy_rcu_head_on_stack(&rh2);
+#else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+ pr_alert("rcutorture: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n");
+#endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+}
+
static int __init
rcu_torture_init(void)
{
@@ -1941,11 +1903,9 @@ rcu_torture_init(void)
int cpu;
int firsterr = 0;
int retval;
- static struct rcu_torture_ops *torture_ops[] =
- { &rcu_ops, &rcu_sync_ops, &rcu_expedited_ops,
- &rcu_bh_ops, &rcu_bh_sync_ops, &rcu_bh_expedited_ops,
- &srcu_ops, &srcu_sync_ops, &srcu_expedited_ops,
- &sched_ops, &sched_sync_ops, &sched_expedited_ops, };
+ static struct rcu_torture_ops *torture_ops[] = {
+ &rcu_ops, &rcu_bh_ops, &srcu_ops, &sched_ops,
+ };
mutex_lock(&fullstop_mutex);
@@ -2163,6 +2123,8 @@ rcu_torture_init(void)
firsterr = retval;
goto unwind;
}
+ if (object_debug)
+ rcu_test_debug_objects();
rcutorture_record_test_transition();
mutex_unlock(&fullstop_mutex);
return 0;
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 068de3a9360..32618b3fe4e 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -53,18 +53,38 @@
#include <linux/delay.h>
#include <linux/stop_machine.h>
#include <linux/random.h>
+#include <linux/ftrace_event.h>
+#include <linux/suspend.h>
#include "rcutree.h"
#include <trace/events/rcu.h>
#include "rcu.h"
+/*
+ * Strings used in tracepoints need to be exported via the
+ * tracing system such that tools like perf and trace-cmd can
+ * translate the string address pointers to actual text.
+ */
+#define TPS(x) tracepoint_string(x)
+
/* Data structures. */
static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
-#define RCU_STATE_INITIALIZER(sname, sabbr, cr) { \
+/*
+ * In order to export the rcu_state name to the tracing tools, it
+ * needs to be added in the __tracepoint_string section.
+ * This requires defining a separate variable tp_<sname>_varname
+ * that points to the string being used, and this will allow
+ * the tracing userspace tools to be able to decipher the string
+ * address to the matching string.
+ */
+#define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
+static char sname##_varname[] = #sname; \
+static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname; \
+struct rcu_state sname##_state = { \
.level = { &sname##_state.node[0] }, \
.call = cr, \
.fqs_state = RCU_GP_IDLE, \
@@ -75,16 +95,13 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
.orphan_donetail = &sname##_state.orphan_donelist, \
.barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
.onoff_mutex = __MUTEX_INITIALIZER(sname##_state.onoff_mutex), \
- .name = #sname, \
+ .name = sname##_varname, \
.abbr = sabbr, \
-}
-
-struct rcu_state rcu_sched_state =
- RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
-DEFINE_PER_CPU(struct rcu_data, rcu_sched_data);
+}; \
+DEFINE_PER_CPU(struct rcu_data, sname##_data)
-struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
-DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
+RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
+RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
static struct rcu_state *rcu_state;
LIST_HEAD(rcu_struct_flavors);
@@ -178,7 +195,7 @@ void rcu_sched_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_sched"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
@@ -187,7 +204,7 @@ void rcu_bh_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_bh"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
}
@@ -198,16 +215,20 @@ void rcu_bh_qs(int cpu)
*/
void rcu_note_context_switch(int cpu)
{
- trace_rcu_utilization("Start context switch");
+ trace_rcu_utilization(TPS("Start context switch"));
rcu_sched_qs(cpu);
rcu_preempt_note_context_switch(cpu);
- trace_rcu_utilization("End context switch");
+ trace_rcu_utilization(TPS("End context switch"));
}
EXPORT_SYMBOL_GPL(rcu_note_context_switch);
DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
.dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
.dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+ .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
};
static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
@@ -226,7 +247,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644);
static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp);
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *));
+static void force_qs_rnp(struct rcu_state *rsp,
+ int (*f)(struct rcu_data *rsp, bool *isidle,
+ unsigned long *maxj),
+ bool *isidle, unsigned long *maxj);
static void force_quiescent_state(struct rcu_state *rsp);
static int rcu_pending(int cpu);
@@ -345,11 +369,11 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
bool user)
{
- trace_rcu_dyntick("Start", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
- trace_rcu_dyntick("Error on entry: not idle task", oldval, 0);
+ trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
current->pid, current->comm,
@@ -411,6 +435,7 @@ void rcu_idle_enter(void)
local_irq_save(flags);
rcu_eqs_enter(false);
+ rcu_sysidle_enter(&__get_cpu_var(rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_enter);
@@ -428,27 +453,6 @@ void rcu_user_enter(void)
{
rcu_eqs_enter(1);
}
-
-/**
- * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace
- * after the current irq returns.
- *
- * This is similar to rcu_user_enter() but in the context of a non-nesting
- * irq. After this call, RCU enters into idle mode when the interrupt
- * returns.
- */
-void rcu_user_enter_after_irq(void)
-{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- /* Ensure this irq is interrupting a non-idle RCU state. */
- WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK));
- rdtp->dynticks_nesting = 1;
- local_irq_restore(flags);
-}
#endif /* CONFIG_RCU_USER_QS */
/**
@@ -479,9 +483,10 @@ void rcu_irq_exit(void)
rdtp->dynticks_nesting--;
WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
if (rdtp->dynticks_nesting)
- trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
else
rcu_eqs_enter_common(rdtp, oldval, true);
+ rcu_sysidle_enter(rdtp, 1);
local_irq_restore(flags);
}
@@ -501,11 +506,11 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
smp_mb__after_atomic_inc(); /* See above. */
WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
rcu_cleanup_after_idle(smp_processor_id());
- trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
if (!user && !is_idle_task(current)) {
struct task_struct *idle = idle_task(smp_processor_id());
- trace_rcu_dyntick("Error on exit: not idle task",
+ trace_rcu_dyntick(TPS("Error on exit: not idle task"),
oldval, rdtp->dynticks_nesting);
ftrace_dump(DUMP_ORIG);
WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
@@ -550,6 +555,7 @@ void rcu_idle_exit(void)
local_irq_save(flags);
rcu_eqs_exit(false);
+ rcu_sysidle_exit(&__get_cpu_var(rcu_dynticks), 0);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(rcu_idle_exit);
@@ -565,28 +571,6 @@ void rcu_user_exit(void)
{
rcu_eqs_exit(1);
}
-
-/**
- * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace
- * idle mode after the current non-nesting irq returns.
- *
- * This is similar to rcu_user_exit() but in the context of an irq.
- * This is called when the irq has interrupted a userspace RCU idle mode
- * context. When the current non-nesting interrupt returns after this call,
- * the CPU won't restore the RCU idle mode.
- */
-void rcu_user_exit_after_irq(void)
-{
- unsigned long flags;
- struct rcu_dynticks *rdtp;
-
- local_irq_save(flags);
- rdtp = &__get_cpu_var(rcu_dynticks);
- /* Ensure we are interrupting an RCU idle mode. */
- WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK);
- rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE;
- local_irq_restore(flags);
-}
#endif /* CONFIG_RCU_USER_QS */
/**
@@ -620,9 +604,10 @@ void rcu_irq_enter(void)
rdtp->dynticks_nesting++;
WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
if (oldval)
- trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting);
+ trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
else
rcu_eqs_exit_common(rdtp, oldval, true);
+ rcu_sysidle_exit(rdtp, 1);
local_irq_restore(flags);
}
@@ -746,9 +731,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
* credit them with an implicit quiescent state. Return 1 if this CPU
* is in dynticks idle mode, which is an extended quiescent state.
*/
-static int dyntick_save_progress_counter(struct rcu_data *rdp)
+static int dyntick_save_progress_counter(struct rcu_data *rdp,
+ bool *isidle, unsigned long *maxj)
{
rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
+ rcu_sysidle_check_cpu(rdp, isidle, maxj);
return (rdp->dynticks_snap & 0x1) == 0;
}
@@ -758,7 +745,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp)
* idle state since the last call to dyntick_save_progress_counter()
* for this same CPU, or by virtue of having been offline.
*/
-static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
+static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
+ bool *isidle, unsigned long *maxj)
{
unsigned int curr;
unsigned int snap;
@@ -775,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
* of the current RCU grace period.
*/
if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "dti");
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
rdp->dynticks_fqs++;
return 1;
}
@@ -795,7 +783,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
return 0; /* Grace period is not old enough. */
barrier();
if (cpu_is_offline(rdp->cpu)) {
- trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl");
+ trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
rdp->offline_fqs++;
return 1;
}
@@ -1032,7 +1020,7 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
* rcu_nocb_wait_gp().
*/
static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long c, char *s)
+ unsigned long c, const char *s)
{
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level,
@@ -1058,9 +1046,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* grace period is already marked as needed, return to the caller.
*/
c = rcu_cbs_completed(rdp->rsp, rnp);
- trace_rcu_future_gp(rnp, rdp, c, "Startleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
if (rnp->need_future_gp[c & 0x1]) {
- trace_rcu_future_gp(rnp, rdp, c, "Prestartleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
return c;
}
@@ -1074,7 +1062,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
if (rnp->gpnum != rnp->completed ||
ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
rnp->need_future_gp[c & 0x1]++;
- trace_rcu_future_gp(rnp, rdp, c, "Startedleaf");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
return c;
}
@@ -1102,7 +1090,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
* recorded, trace and leave.
*/
if (rnp_root->need_future_gp[c & 0x1]) {
- trace_rcu_future_gp(rnp, rdp, c, "Prestartedroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
goto unlock_out;
}
@@ -1111,9 +1099,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp)
/* If a grace period is not already in progress, start one. */
if (rnp_root->gpnum != rnp_root->completed) {
- trace_rcu_future_gp(rnp, rdp, c, "Startedleafroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
} else {
- trace_rcu_future_gp(rnp, rdp, c, "Startedroot");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
}
unlock_out:
@@ -1137,7 +1125,8 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
rcu_nocb_gp_cleanup(rsp, rnp);
rnp->need_future_gp[c & 0x1] = 0;
needmore = rnp->need_future_gp[(c + 1) & 0x1];
- trace_rcu_future_gp(rnp, rdp, c, needmore ? "CleanupMore" : "Cleanup");
+ trace_rcu_future_gp(rnp, rdp, c,
+ needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore;
}
@@ -1205,9 +1194,9 @@ static void rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
/* Trace depending on how much we were able to accelerate. */
if (!*rdp->nxttail[RCU_WAIT_TAIL])
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccWaitCB");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
else
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "AccReadyCB");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
}
/*
@@ -1273,7 +1262,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
/* Remember that we saw this grace-period completion. */
rdp->completed = rnp->completed;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuend");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
}
if (rdp->gpnum != rnp->gpnum) {
@@ -1283,7 +1272,7 @@ static void __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp, struc
* go looking for one.
*/
rdp->gpnum = rnp->gpnum;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
rdp->passed_quiesce = 0;
rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
zero_cpu_stall_ticks(rdp);
@@ -1315,6 +1304,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
struct rcu_data *rdp;
struct rcu_node *rnp = rcu_get_root(rsp);
+ rcu_bind_gp_kthread();
raw_spin_lock_irq(&rnp->lock);
rsp->gp_flags = 0; /* Clear all flags: New grace period. */
@@ -1326,7 +1316,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
/* Advance to a new grace period and initialize state. */
rsp->gpnum++;
- trace_rcu_grace_period(rsp->name, rsp->gpnum, "start");
+ trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
record_gp_stall_check_time(rsp);
raw_spin_unlock_irq(&rnp->lock);
@@ -1379,16 +1369,25 @@ static int rcu_gp_init(struct rcu_state *rsp)
int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
{
int fqs_state = fqs_state_in;
+ bool isidle = false;
+ unsigned long maxj;
struct rcu_node *rnp = rcu_get_root(rsp);
rsp->n_force_qs++;
if (fqs_state == RCU_SAVE_DYNTICK) {
/* Collect dyntick-idle snapshots. */
- force_qs_rnp(rsp, dyntick_save_progress_counter);
+ if (is_sysidle_rcu_state(rsp)) {
+ isidle = 1;
+ maxj = jiffies - ULONG_MAX / 4;
+ }
+ force_qs_rnp(rsp, dyntick_save_progress_counter,
+ &isidle, &maxj);
+ rcu_sysidle_report_gp(rsp, isidle, maxj);
fqs_state = RCU_FORCE_QS;
} else {
/* Handle dyntick-idle and offline CPUs. */
- force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
+ isidle = 0;
+ force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
}
/* Clear flag to prevent immediate re-entry. */
if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
@@ -1448,7 +1447,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
rcu_nocb_gp_set(rnp, nocb);
rsp->completed = rsp->gpnum; /* Declare grace period done. */
- trace_rcu_grace_period(rsp->name, rsp->completed, "end");
+ trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
rsp->fqs_state = RCU_GP_IDLE;
rdp = this_cpu_ptr(rsp->rda);
rcu_advance_cbs(rsp, rnp, rdp); /* Reduce false positives below. */
@@ -1558,10 +1557,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
/*
* We can't do wakeups while holding the rnp->lock, as that
- * could cause possible deadlocks with the rq->lock. Deter
- * the wakeup to interrupt context.
+ * could cause possible deadlocks with the rq->lock. Defer
+ * the wakeup to interrupt context. And don't bother waking
+ * up the running kthread.
*/
- irq_work_queue(&rsp->wakeup_work);
+ if (current != rsp->gp_kthread)
+ irq_work_queue(&rsp->wakeup_work);
}
/*
@@ -1857,7 +1858,7 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
RCU_TRACE(mask = rdp->grpmask);
trace_rcu_grace_period(rsp->name,
rnp->gpnum + 1 - !!(rnp->qsmask & mask),
- "cpuofl");
+ TPS("cpuofl"));
}
/*
@@ -2044,7 +2045,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
*/
void rcu_check_callbacks(int cpu, int user)
{
- trace_rcu_utilization("Start scheduler-tick");
+ trace_rcu_utilization(TPS("Start scheduler-tick"));
increment_cpu_stall_ticks();
if (user || rcu_is_cpu_rrupt_from_idle()) {
@@ -2077,7 +2078,7 @@ void rcu_check_callbacks(int cpu, int user)
rcu_preempt_check_callbacks(cpu);
if (rcu_pending(cpu))
invoke_rcu_core();
- trace_rcu_utilization("End scheduler-tick");
+ trace_rcu_utilization(TPS("End scheduler-tick"));
}
/*
@@ -2087,7 +2088,10 @@ void rcu_check_callbacks(int cpu, int user)
*
* The caller must have suppressed start of new grace periods.
*/
-static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
+static void force_qs_rnp(struct rcu_state *rsp,
+ int (*f)(struct rcu_data *rsp, bool *isidle,
+ unsigned long *maxj),
+ bool *isidle, unsigned long *maxj)
{
unsigned long bit;
int cpu;
@@ -2110,9 +2114,12 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *))
cpu = rnp->grplo;
bit = 1;
for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
- if ((rnp->qsmask & bit) != 0 &&
- f(per_cpu_ptr(rsp->rda, cpu)))
- mask |= bit;
+ if ((rnp->qsmask & bit) != 0) {
+ if ((rnp->qsmaskinit & bit) != 0)
+ *isidle = 0;
+ if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
+ mask |= bit;
+ }
}
if (mask != 0) {
@@ -2208,10 +2215,10 @@ static void rcu_process_callbacks(struct softirq_action *unused)
if (cpu_is_offline(smp_processor_id()))
return;
- trace_rcu_utilization("Start RCU core");
+ trace_rcu_utilization(TPS("Start RCU core"));
for_each_rcu_flavor(rsp)
__rcu_process_callbacks(rsp);
- trace_rcu_utilization("End RCU core");
+ trace_rcu_utilization(TPS("End RCU core"));
}
/*
@@ -2287,6 +2294,13 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
}
/*
+ * RCU callback function to leak a callback.
+ */
+static void rcu_leak_callback(struct rcu_head *rhp)
+{
+}
+
+/*
* Helper function for call_rcu() and friends. The cpu argument will
* normally be -1, indicating "currently running CPU". It may specify
* a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
@@ -2300,7 +2314,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
struct rcu_data *rdp;
WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
- debug_rcu_head_queue(head);
+ if (debug_rcu_head_queue(head)) {
+ /* Probable double call_rcu(), so leak the callback. */
+ ACCESS_ONCE(head->func) = rcu_leak_callback;
+ WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
+ return;
+ }
head->func = func;
head->next = NULL;
@@ -2720,7 +2739,7 @@ static int rcu_cpu_has_callbacks(int cpu, bool *all_lazy)
* Helper function for _rcu_barrier() tracing. If tracing is disabled,
* the compiler is expected to optimize this away.
*/
-static void _rcu_barrier_trace(struct rcu_state *rsp, char *s,
+static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
int cpu, unsigned long done)
{
trace_rcu_barrier(rsp->name, s, cpu,
@@ -2785,9 +2804,20 @@ static void _rcu_barrier(struct rcu_state *rsp)
* transition. The "if" expression below therefore rounds the old
* value up to the next even number and adds two before comparing.
*/
- snap_done = ACCESS_ONCE(rsp->n_barrier_done);
+ snap_done = rsp->n_barrier_done;
_rcu_barrier_trace(rsp, "Check", -1, snap_done);
- if (ULONG_CMP_GE(snap_done, ((snap + 1) & ~0x1) + 2)) {
+
+ /*
+ * If the value in snap is odd, we needed to wait for the current
+ * rcu_barrier() to complete, then wait for the next one, in other
+ * words, we need the value of snap_done to be three larger than
+ * the value of snap. On the other hand, if the value in snap is
+ * even, we only had to wait for the next rcu_barrier() to complete,
+ * in other words, we need the value of snap_done to be only two
+ * greater than the value of snap. The "(snap + 3) & ~0x1" computes
+ * this for us (thank you, Linus!).
+ */
+ if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
_rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
smp_mb(); /* caller's subsequent code after above check. */
mutex_unlock(&rsp->barrier_mutex);
@@ -2930,6 +2960,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->blimit = blimit;
init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
+ rcu_sysidle_init_percpu_data(rdp->dynticks);
atomic_set(&rdp->dynticks->dynticks,
(atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
@@ -2952,7 +2983,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
rdp->completed = rnp->completed;
rdp->passed_quiesce = 0;
rdp->qs_pending = 0;
- trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl");
+ trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
}
raw_spin_unlock(&rnp->lock); /* irqs already disabled. */
rnp = rnp->parent;
@@ -2982,7 +3013,7 @@ static int rcu_cpu_notify(struct notifier_block *self,
struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
- trace_rcu_utilization("Start CPU hotplug");
+ trace_rcu_utilization(TPS("Start CPU hotplug"));
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
@@ -3011,7 +3042,26 @@ static int rcu_cpu_notify(struct notifier_block *self,
default:
break;
}
- trace_rcu_utilization("End CPU hotplug");
+ trace_rcu_utilization(TPS("End CPU hotplug"));
+ return NOTIFY_OK;
+}
+
+static int rcu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
+ rcu_expedited = 1;
+ break;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ rcu_expedited = 0;
+ break;
+ default:
+ break;
+ }
return NOTIFY_OK;
}
@@ -3256,6 +3306,7 @@ void __init rcu_init(void)
* or the scheduler are operational.
*/
cpu_notifier(rcu_cpu_notify, 0);
+ pm_notifier(rcu_pm_notify, 0);
for_each_online_cpu(cpu)
rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
}
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index b3832581043..5f97eab602c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -88,6 +88,14 @@ struct rcu_dynticks {
/* Process level is worth LLONG_MAX/2. */
int dynticks_nmi_nesting; /* Track NMI nesting level. */
atomic_t dynticks; /* Even value for idle, else odd. */
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+ long long dynticks_idle_nesting;
+ /* irq/process nesting level from idle. */
+ atomic_t dynticks_idle; /* Even value for idle, else odd. */
+ /* "Idle" excludes userspace execution. */
+ unsigned long dynticks_idle_jiffies;
+ /* End of last non-NMI non-idle period. */
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
#ifdef CONFIG_RCU_FAST_NO_HZ
bool all_lazy; /* Are all CPU's CBs lazy? */
unsigned long nonlazy_posted;
@@ -445,7 +453,7 @@ struct rcu_state {
/* for CPU stalls. */
unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */
- char *name; /* Name of structure. */
+ const char *name; /* Name of structure. */
char abbr; /* Abbreviated name. */
struct list_head flavors; /* List of RCU flavors. */
struct irq_work wakeup_work; /* Postponed wakeups */
@@ -545,6 +553,15 @@ static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
static void rcu_kick_nohz_cpu(int cpu);
static bool init_nocb_callback_list(struct rcu_data *rdp);
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj);
+static bool is_sysidle_rcu_state(struct rcu_state *rsp);
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj);
+static void rcu_bind_gp_kthread(void);
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp);
#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 769e12e3151..130c97b027f 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -28,7 +28,7 @@
#include <linux/gfp.h>
#include <linux/oom.h>
#include <linux/smpboot.h>
-#include <linux/tick.h>
+#include "time/tick-internal.h"
#define RCU_KTHREAD_PRIO 1
@@ -110,9 +110,7 @@ static void __init rcu_bootup_announce_oddness(void)
#ifdef CONFIG_TREE_PREEMPT_RCU
-struct rcu_state rcu_preempt_state =
- RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
-DEFINE_PER_CPU(struct rcu_data, rcu_preempt_data);
+RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
static struct rcu_state *rcu_state = &rcu_preempt_state;
static int rcu_preempted_readers_exp(struct rcu_node *rnp);
@@ -169,7 +167,7 @@ static void rcu_preempt_qs(int cpu)
struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu);
if (rdp->passed_quiesce == 0)
- trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs");
+ trace_rcu_grace_period(TPS("rcu_preempt"), rdp->gpnum, TPS("cpuqs"));
rdp->passed_quiesce = 1;
current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
}
@@ -388,7 +386,7 @@ void rcu_read_unlock_special(struct task_struct *t)
np = rcu_next_node_entry(t, rnp);
list_del_init(&t->rcu_node_entry);
t->rcu_blocked_node = NULL;
- trace_rcu_unlock_preempted_task("rcu_preempt",
+ trace_rcu_unlock_preempted_task(TPS("rcu_preempt"),
rnp->gpnum, t->pid);
if (&t->rcu_node_entry == rnp->gp_tasks)
rnp->gp_tasks = np;
@@ -412,7 +410,7 @@ void rcu_read_unlock_special(struct task_struct *t)
*/
empty_exp_now = !rcu_preempted_readers_exp(rnp);
if (!empty && !rcu_preempt_blocked_readers_cgp(rnp)) {
- trace_rcu_quiescent_state_report("preempt_rcu",
+ trace_rcu_quiescent_state_report(TPS("preempt_rcu"),
rnp->gpnum,
0, rnp->qsmask,
rnp->level,
@@ -1250,12 +1248,12 @@ static int rcu_boost_kthread(void *arg)
int spincnt = 0;
int more2boost;
- trace_rcu_utilization("Start boost kthread@init");
+ trace_rcu_utilization(TPS("Start boost kthread@init"));
for (;;) {
rnp->boost_kthread_status = RCU_KTHREAD_WAITING;
- trace_rcu_utilization("End boost kthread@rcu_wait");
+ trace_rcu_utilization(TPS("End boost kthread@rcu_wait"));
rcu_wait(rnp->boost_tasks || rnp->exp_tasks);
- trace_rcu_utilization("Start boost kthread@rcu_wait");
+ trace_rcu_utilization(TPS("Start boost kthread@rcu_wait"));
rnp->boost_kthread_status = RCU_KTHREAD_RUNNING;
more2boost = rcu_boost(rnp);
if (more2boost)
@@ -1264,14 +1262,14 @@ static int rcu_boost_kthread(void *arg)
spincnt = 0;
if (spincnt > 10) {
rnp->boost_kthread_status = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization("End boost kthread@rcu_yield");
+ trace_rcu_utilization(TPS("End boost kthread@rcu_yield"));
schedule_timeout_interruptible(2);
- trace_rcu_utilization("Start boost kthread@rcu_yield");
+ trace_rcu_utilization(TPS("Start boost kthread@rcu_yield"));
spincnt = 0;
}
}
/* NOTREACHED */
- trace_rcu_utilization("End boost kthread@notreached");
+ trace_rcu_utilization(TPS("End boost kthread@notreached"));
return 0;
}
@@ -1419,7 +1417,7 @@ static void rcu_cpu_kthread(unsigned int cpu)
int spincnt;
for (spincnt = 0; spincnt < 10; spincnt++) {
- trace_rcu_utilization("Start CPU kthread@rcu_wait");
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
local_bh_disable();
*statusp = RCU_KTHREAD_RUNNING;
this_cpu_inc(rcu_cpu_kthread_loops);
@@ -1431,15 +1429,15 @@ static void rcu_cpu_kthread(unsigned int cpu)
rcu_kthread_do_work();
local_bh_enable();
if (*workp == 0) {
- trace_rcu_utilization("End CPU kthread@rcu_wait");
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
*statusp = RCU_KTHREAD_WAITING;
return;
}
}
*statusp = RCU_KTHREAD_YIELDING;
- trace_rcu_utilization("Start CPU kthread@rcu_yield");
+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
schedule_timeout_interruptible(2);
- trace_rcu_utilization("End CPU kthread@rcu_yield");
+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
*statusp = RCU_KTHREAD_WAITING;
}
@@ -2202,7 +2200,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Wait for the grace period. Do so interruptibly to avoid messing
* up the load average.
*/
- trace_rcu_future_gp(rnp, rdp, c, "StartWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) {
wait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1],
@@ -2210,9 +2208,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (likely(d))
break;
flush_signals(current);
- trace_rcu_future_gp(rnp, rdp, c, "ResumeWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait"));
}
- trace_rcu_future_gp(rnp, rdp, c, "EndWait");
+ trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait"));
smp_mb(); /* Ensure that CB invocation happens after GP end. */
}
@@ -2375,3 +2373,425 @@ static void rcu_kick_nohz_cpu(int cpu)
smp_send_reschedule(cpu);
#endif /* #ifdef CONFIG_NO_HZ_FULL */
}
+
+
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+
+/*
+ * Define RCU flavor that holds sysidle state. This needs to be the
+ * most active flavor of RCU.
+ */
+#ifdef CONFIG_PREEMPT_RCU
+static struct rcu_state *rcu_sysidle_state = &rcu_preempt_state;
+#else /* #ifdef CONFIG_PREEMPT_RCU */
+static struct rcu_state *rcu_sysidle_state = &rcu_sched_state;
+#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+static int full_sysidle_state; /* Current system-idle state. */
+#define RCU_SYSIDLE_NOT 0 /* Some CPU is not idle. */
+#define RCU_SYSIDLE_SHORT 1 /* All CPUs idle for brief period. */
+#define RCU_SYSIDLE_LONG 2 /* All CPUs idle for long enough. */
+#define RCU_SYSIDLE_FULL 3 /* All CPUs idle, ready for sysidle. */
+#define RCU_SYSIDLE_FULL_NOTED 4 /* Actually entered sysidle state. */
+
+/*
+ * Invoked to note exit from irq or task transition to idle. Note that
+ * usermode execution does -not- count as idle here! After all, we want
+ * to detect full-system idle states, not RCU quiescent states and grace
+ * periods. The caller must have disabled interrupts.
+ */
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
+{
+ unsigned long j;
+
+ /* Adjust nesting, check for fully idle. */
+ if (irq) {
+ rdtp->dynticks_idle_nesting--;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
+ if (rdtp->dynticks_idle_nesting != 0)
+ return; /* Still not fully idle. */
+ } else {
+ if ((rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) ==
+ DYNTICK_TASK_NEST_VALUE) {
+ rdtp->dynticks_idle_nesting = 0;
+ } else {
+ rdtp->dynticks_idle_nesting -= DYNTICK_TASK_NEST_VALUE;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting < 0);
+ return; /* Still not fully idle. */
+ }
+ }
+
+ /* Record start of fully idle period. */
+ j = jiffies;
+ ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
+ smp_mb__before_atomic_inc();
+ atomic_inc(&rdtp->dynticks_idle);
+ smp_mb__after_atomic_inc();
+ WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
+}
+
+/*
+ * Unconditionally force exit from full system-idle state. This is
+ * invoked when a normal CPU exits idle, but must be called separately
+ * for the timekeeping CPU (tick_do_timer_cpu). The reason for this
+ * is that the timekeeping CPU is permitted to take scheduling-clock
+ * interrupts while the system is in system-idle state, and of course
+ * rcu_sysidle_exit() has no way of distinguishing a scheduling-clock
+ * interrupt from any other type of interrupt.
+ */
+void rcu_sysidle_force_exit(void)
+{
+ int oldstate = ACCESS_ONCE(full_sysidle_state);
+ int newoldstate;
+
+ /*
+ * Each pass through the following loop attempts to exit full
+ * system-idle state. If contention proves to be a problem,
+ * a trylock-based contention tree could be used here.
+ */
+ while (oldstate > RCU_SYSIDLE_SHORT) {
+ newoldstate = cmpxchg(&full_sysidle_state,
+ oldstate, RCU_SYSIDLE_NOT);
+ if (oldstate == newoldstate &&
+ oldstate == RCU_SYSIDLE_FULL_NOTED) {
+ rcu_kick_nohz_cpu(tick_do_timer_cpu);
+ return; /* We cleared it, done! */
+ }
+ oldstate = newoldstate;
+ }
+ smp_mb(); /* Order initial oldstate fetch vs. later non-idle work. */
+}
+
+/*
+ * Invoked to note entry to irq or task transition from idle. Note that
+ * usermode execution does -not- count as idle here! The caller must
+ * have disabled interrupts.
+ */
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
+{
+ /* Adjust nesting, check for already non-idle. */
+ if (irq) {
+ rdtp->dynticks_idle_nesting++;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
+ if (rdtp->dynticks_idle_nesting != 1)
+ return; /* Already non-idle. */
+ } else {
+ /*
+ * Allow for irq misnesting. Yes, it really is possible
+ * to enter an irq handler then never leave it, and maybe
+ * also vice versa. Handle both possibilities.
+ */
+ if (rdtp->dynticks_idle_nesting & DYNTICK_TASK_NEST_MASK) {
+ rdtp->dynticks_idle_nesting += DYNTICK_TASK_NEST_VALUE;
+ WARN_ON_ONCE(rdtp->dynticks_idle_nesting <= 0);
+ return; /* Already non-idle. */
+ } else {
+ rdtp->dynticks_idle_nesting = DYNTICK_TASK_EXIT_IDLE;
+ }
+ }
+
+ /* Record end of idle period. */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&rdtp->dynticks_idle);
+ smp_mb__after_atomic_inc();
+ WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
+
+ /*
+ * If we are the timekeeping CPU, we are permitted to be non-idle
+ * during a system-idle state. This must be the case, because
+ * the timekeeping CPU has to take scheduling-clock interrupts
+ * during the time that the system is transitioning to full
+ * system-idle state. This means that the timekeeping CPU must
+ * invoke rcu_sysidle_force_exit() directly if it does anything
+ * more than take a scheduling-clock interrupt.
+ */
+ if (smp_processor_id() == tick_do_timer_cpu)
+ return;
+
+ /* Update system-idle state: We are clearly no longer fully idle! */
+ rcu_sysidle_force_exit();
+}
+
+/*
+ * Check to see if the current CPU is idle. Note that usermode execution
+ * does not count as idle. The caller must have disabled interrupts.
+ */
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj)
+{
+ int cur;
+ unsigned long j;
+ struct rcu_dynticks *rdtp = rdp->dynticks;
+
+ /*
+ * If some other CPU has already reported non-idle, if this is
+ * not the flavor of RCU that tracks sysidle state, or if this
+ * is an offline or the timekeeping CPU, nothing to do.
+ */
+ if (!*isidle || rdp->rsp != rcu_sysidle_state ||
+ cpu_is_offline(rdp->cpu) || rdp->cpu == tick_do_timer_cpu)
+ return;
+ if (rcu_gp_in_progress(rdp->rsp))
+ WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
+
+ /* Pick up current idle and NMI-nesting counter and check. */
+ cur = atomic_read(&rdtp->dynticks_idle);
+ if (cur & 0x1) {
+ *isidle = false; /* We are not idle! */
+ return;
+ }
+ smp_mb(); /* Read counters before timestamps. */
+
+ /* Pick up timestamps. */
+ j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
+ /* If this CPU entered idle more recently, update maxj timestamp. */
+ if (ULONG_CMP_LT(*maxj, j))
+ *maxj = j;
+}
+
+/*
+ * Is this the flavor of RCU that is handling full-system idle?
+ */
+static bool is_sysidle_rcu_state(struct rcu_state *rsp)
+{
+ return rsp == rcu_sysidle_state;
+}
+
+/*
+ * Bind the grace-period kthread for the sysidle flavor of RCU to the
+ * timekeeping CPU.
+ */
+static void rcu_bind_gp_kthread(void)
+{
+ int cpu = ACCESS_ONCE(tick_do_timer_cpu);
+
+ if (cpu < 0 || cpu >= nr_cpu_ids)
+ return;
+ if (raw_smp_processor_id() != cpu)
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
+}
+
+/*
+ * Return a delay in jiffies based on the number of CPUs, rcu_node
+ * leaf fanout, and jiffies tick rate. The idea is to allow larger
+ * systems more time to transition to full-idle state in order to
+ * avoid the cache thrashing that otherwise occur on the state variable.
+ * Really small systems (less than a couple of tens of CPUs) should
+ * instead use a single global atomically incremented counter, and later
+ * versions of this will automatically reconfigure themselves accordingly.
+ */
+static unsigned long rcu_sysidle_delay(void)
+{
+ if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
+ return 0;
+ return DIV_ROUND_UP(nr_cpu_ids * HZ, rcu_fanout_leaf * 1000);
+}
+
+/*
+ * Advance the full-system-idle state. This is invoked when all of
+ * the non-timekeeping CPUs are idle.
+ */
+static void rcu_sysidle(unsigned long j)
+{
+ /* Check the current state. */
+ switch (ACCESS_ONCE(full_sysidle_state)) {
+ case RCU_SYSIDLE_NOT:
+
+ /* First time all are idle, so note a short idle period. */
+ ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
+ break;
+
+ case RCU_SYSIDLE_SHORT:
+
+ /*
+ * Idle for a bit, time to advance to next state?
+ * cmpxchg failure means race with non-idle, let them win.
+ */
+ if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
+ (void)cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_SHORT, RCU_SYSIDLE_LONG);
+ break;
+
+ case RCU_SYSIDLE_LONG:
+
+ /*
+ * Do an additional check pass before advancing to full.
+ * cmpxchg failure means race with non-idle, let them win.
+ */
+ if (ULONG_CMP_GE(jiffies, j + rcu_sysidle_delay()))
+ (void)cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_LONG, RCU_SYSIDLE_FULL);
+ break;
+
+ default:
+ break;
+ }
+}
+
+/*
+ * Found a non-idle non-timekeeping CPU, so kick the system-idle state
+ * back to the beginning.
+ */
+static void rcu_sysidle_cancel(void)
+{
+ smp_mb();
+ ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
+}
+
+/*
+ * Update the sysidle state based on the results of a force-quiescent-state
+ * scan of the CPUs' dyntick-idle state.
+ */
+static void rcu_sysidle_report(struct rcu_state *rsp, int isidle,
+ unsigned long maxj, bool gpkt)
+{
+ if (rsp != rcu_sysidle_state)
+ return; /* Wrong flavor, ignore. */
+ if (gpkt && nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL)
+ return; /* Running state machine from timekeeping CPU. */
+ if (isidle)
+ rcu_sysidle(maxj); /* More idle! */
+ else
+ rcu_sysidle_cancel(); /* Idle is over. */
+}
+
+/*
+ * Wrapper for rcu_sysidle_report() when called from the grace-period
+ * kthread's context.
+ */
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj)
+{
+ rcu_sysidle_report(rsp, isidle, maxj, true);
+}
+
+/* Callback and function for forcing an RCU grace period. */
+struct rcu_sysidle_head {
+ struct rcu_head rh;
+ int inuse;
+};
+
+static void rcu_sysidle_cb(struct rcu_head *rhp)
+{
+ struct rcu_sysidle_head *rshp;
+
+ /*
+ * The following memory barrier is needed to replace the
+ * memory barriers that would normally be in the memory
+ * allocator.
+ */
+ smp_mb(); /* grace period precedes setting inuse. */
+
+ rshp = container_of(rhp, struct rcu_sysidle_head, rh);
+ ACCESS_ONCE(rshp->inuse) = 0;
+}
+
+/*
+ * Check to see if the system is fully idle, other than the timekeeping CPU.
+ * The caller must have disabled interrupts.
+ */
+bool rcu_sys_is_idle(void)
+{
+ static struct rcu_sysidle_head rsh;
+ int rss = ACCESS_ONCE(full_sysidle_state);
+
+ if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
+ return false;
+
+ /* Handle small-system case by doing a full scan of CPUs. */
+ if (nr_cpu_ids <= CONFIG_NO_HZ_FULL_SYSIDLE_SMALL) {
+ int oldrss = rss - 1;
+
+ /*
+ * One pass to advance to each state up to _FULL.
+ * Give up if any pass fails to advance the state.
+ */
+ while (rss < RCU_SYSIDLE_FULL && oldrss < rss) {
+ int cpu;
+ bool isidle = true;
+ unsigned long maxj = jiffies - ULONG_MAX / 4;
+ struct rcu_data *rdp;
+
+ /* Scan all the CPUs looking for nonidle CPUs. */
+ for_each_possible_cpu(cpu) {
+ rdp = per_cpu_ptr(rcu_sysidle_state->rda, cpu);
+ rcu_sysidle_check_cpu(rdp, &isidle, &maxj);
+ if (!isidle)
+ break;
+ }
+ rcu_sysidle_report(rcu_sysidle_state,
+ isidle, maxj, false);
+ oldrss = rss;
+ rss = ACCESS_ONCE(full_sysidle_state);
+ }
+ }
+
+ /* If this is the first observation of an idle period, record it. */
+ if (rss == RCU_SYSIDLE_FULL) {
+ rss = cmpxchg(&full_sysidle_state,
+ RCU_SYSIDLE_FULL, RCU_SYSIDLE_FULL_NOTED);
+ return rss == RCU_SYSIDLE_FULL;
+ }
+
+ smp_mb(); /* ensure rss load happens before later caller actions. */
+
+ /* If already fully idle, tell the caller (in case of races). */
+ if (rss == RCU_SYSIDLE_FULL_NOTED)
+ return true;
+
+ /*
+ * If we aren't there yet, and a grace period is not in flight,
+ * initiate a grace period. Either way, tell the caller that
+ * we are not there yet. We use an xchg() rather than an assignment
+ * to make up for the memory barriers that would otherwise be
+ * provided by the memory allocator.
+ */
+ if (nr_cpu_ids > CONFIG_NO_HZ_FULL_SYSIDLE_SMALL &&
+ !rcu_gp_in_progress(rcu_sysidle_state) &&
+ !rsh.inuse && xchg(&rsh.inuse, 1) == 0)
+ call_rcu(&rsh.rh, rcu_sysidle_cb);
+ return false;
+}
+
+/*
+ * Initialize dynticks sysidle state for CPUs coming online.
+ */
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
+{
+ rdtp->dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE;
+}
+
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+
+static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
+{
+}
+
+static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
+{
+}
+
+static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
+ unsigned long *maxj)
+{
+}
+
+static bool is_sysidle_rcu_state(struct rcu_state *rsp)
+{
+ return false;
+}
+
+static void rcu_bind_gp_kthread(void)
+{
+}
+
+static void rcu_sysidle_report_gp(struct rcu_state *rsp, int isidle,
+ unsigned long maxj)
+{
+}
+
+static void rcu_sysidle_init_percpu_data(struct rcu_dynticks *rdtp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7c32cb7bfe..5ac63c9a995 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p)
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
+ *
+ * Return: 1 if the task is currently executing. 0 otherwise.
*/
inline int task_curr(const struct task_struct *p)
{
@@ -976,13 +978,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq->skip_clock_update = 1;
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1013,18 +1008,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
- struct task_migration_notifier tmn;
-
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0);
-
- tmn.task = p;
- tmn.from_cpu = task_cpu(p);
- tmn.to_cpu = new_cpu;
-
- atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
@@ -1482,7 +1469,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
* the simpler "current->state = TASK_RUNNING" to mark yourself
* runnable without the overhead of this.
*
- * Returns %true if @p was woken up, %false if it was already running
+ * Return: %true if @p was woken up, %false if it was already running.
* or @state didn't match @p's state.
*/
static int
@@ -1491,7 +1478,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
unsigned long flags;
int cpu, success = 0;
- smp_wmb();
+ /*
+ * If we are going to wake up a thread waiting for CONDITION we
+ * need to ensure that CONDITION=1 done by the caller can not be
+ * reordered with p->state check below. This pairs with mb() in
+ * set_current_state() the waiting thread does.
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
if (!(p->state & state))
goto out;
@@ -1577,8 +1570,9 @@ out:
* @p: The process to be woken up.
*
* Attempt to wake up the nominated process and move it to the set of runnable
- * processes. Returns 1 if the process was woken up, 0 if it was already
- * running.
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
*
* It may be assumed that this function implies a write memory barrier before
* changing the task state if and only if any tasks are woken up.
@@ -2191,6 +2185,8 @@ void scheduler_tick(void)
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
*/
u64 scheduler_tick_max_deferment(void)
{
@@ -2394,6 +2390,12 @@ need_resched:
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ /*
+ * Make sure that signal_pending_state()->signal_pending() below
+ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
+ * done by the caller to avoid the race with signal_wake_up().
+ */
+ smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
@@ -2510,13 +2512,11 @@ void __sched schedule_preempt_disabled(void)
*/
asmlinkage void __sched notrace preempt_schedule(void)
{
- struct thread_info *ti = current_thread_info();
-
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
- if (likely(ti->preempt_count || irqs_disabled()))
+ if (likely(!preemptible()))
return;
do {
@@ -2660,7 +2660,7 @@ void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
if (unlikely(!q))
return;
- if (unlikely(!nr_exclusive))
+ if (unlikely(nr_exclusive != 1))
wake_flags = 0;
spin_lock_irqsave(&q->lock, flags);
@@ -2796,8 +2796,8 @@ EXPORT_SYMBOL(wait_for_completion);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
@@ -2829,8 +2829,8 @@ EXPORT_SYMBOL(wait_for_completion_io);
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible. The caller is accounted as waiting for IO.
*
- * The return value is 0 if timed out, and positive (at least 1, or number of
- * jiffies left till timeout) if completed.
+ * Return: 0 if timed out, and positive (at least 1, or number of jiffies left
+ * till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_io_timeout(struct completion *x, unsigned long timeout)
@@ -2846,7 +2846,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout);
* This waits for completion of a specific task to be signaled. It is
* interruptible.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
@@ -2865,8 +2865,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
@@ -2883,7 +2883,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
@@ -2903,8 +2903,8 @@ EXPORT_SYMBOL(wait_for_completion_killable);
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
*
- * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
- * positive (at least 1, or number of jiffies left till timeout) if completed.
+ * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
+ * or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
@@ -2918,7 +2918,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout);
* try_wait_for_completion - try to decrement a completion without blocking
* @x: completion structure
*
- * Returns: 0 if a decrement cannot be done without blocking
+ * Return: 0 if a decrement cannot be done without blocking
* 1 if a decrement succeeded.
*
* If a completion is being used as a counting completion,
@@ -2945,7 +2945,7 @@ EXPORT_SYMBOL(try_wait_for_completion);
* completion_done - Test to see if a completion has any waiters
* @x: completion structure
*
- * Returns: 0 if there are waiters (wait_for_completion() in progress)
+ * Return: 0 if there are waiters (wait_for_completion() in progress)
* 1 if there are no waiters.
*
*/
@@ -3182,7 +3182,7 @@ SYSCALL_DEFINE1(nice, int, increment)
* task_prio - return the priority value of a given task.
* @p: the task in question.
*
- * This is the priority value as seen by users in /proc.
+ * Return: The priority value as seen by users in /proc.
* RT tasks are offset by -200. Normal tasks are centered
* around 0, value goes from -16 to +15.
*/
@@ -3194,6 +3194,8 @@ int task_prio(const struct task_struct *p)
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
+ *
+ * Return: The nice value [ -20 ... 0 ... 19 ].
*/
int task_nice(const struct task_struct *p)
{
@@ -3204,6 +3206,8 @@ EXPORT_SYMBOL(task_nice);
/**
* idle_cpu - is a given cpu idle currently?
* @cpu: the processor in question.
+ *
+ * Return: 1 if the CPU is currently idle. 0 otherwise.
*/
int idle_cpu(int cpu)
{
@@ -3226,6 +3230,8 @@ int idle_cpu(int cpu)
/**
* idle_task - return the idle task for a given cpu.
* @cpu: the processor in question.
+ *
+ * Return: The idle task for the cpu @cpu.
*/
struct task_struct *idle_task(int cpu)
{
@@ -3235,6 +3241,8 @@ struct task_struct *idle_task(int cpu)
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
+ *
+ * The task of @pid, if found. %NULL otherwise.
*/
static struct task_struct *find_process_by_pid(pid_t pid)
{
@@ -3432,6 +3440,8 @@ recheck:
* @policy: new policy.
* @param: structure containing the new RT priority.
*
+ * Return: 0 on success. An error code otherwise.
+ *
* NOTE that the task may be already dead.
*/
int sched_setscheduler(struct task_struct *p, int policy,
@@ -3451,6 +3461,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler);
* current context has permission. For example, this is needed in
* stop_machine(): we create temporary high priority worker threads,
* but our caller might not have that capability.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
int sched_setscheduler_nocheck(struct task_struct *p, int policy,
const struct sched_param *param)
@@ -3485,6 +3497,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
* @pid: the pid in question.
* @policy: new policy.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
struct sched_param __user *, param)
@@ -3500,6 +3514,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
* sys_sched_setparam - set/change the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the new RT priority.
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3509,6 +3525,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
/**
* sys_sched_getscheduler - get the policy (scheduling class) of a thread
* @pid: the pid in question.
+ *
+ * Return: On success, the policy of the thread. Otherwise, a negative error
+ * code.
*/
SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
{
@@ -3535,6 +3554,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
* sys_sched_getparam - get the RT priority of a thread
* @pid: the pid in question.
* @param: structure containing the RT priority.
+ *
+ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error
+ * code.
*/
SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
{
@@ -3659,6 +3681,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to the new cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3710,6 +3734,8 @@ out_unlock:
* @pid: pid of the process
* @len: length in bytes of the bitmask pointed to by user_mask_ptr
* @user_mask_ptr: user-space pointer to hold the current cpu mask
+ *
+ * Return: 0 on success. An error code otherwise.
*/
SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
unsigned long __user *, user_mask_ptr)
@@ -3744,6 +3770,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
*
* This function yields the current CPU to other tasks. If there are no
* other threads running on this CPU then this function will return.
+ *
+ * Return: 0.
*/
SYSCALL_DEFINE0(sched_yield)
{
@@ -3869,7 +3897,7 @@ EXPORT_SYMBOL(yield);
* It's the caller's job to ensure that the target task struct
* can't go away on us before we can do any checks.
*
- * Returns:
+ * Return:
* true (>0) if we indeed boosted the target task.
* false (0) if we failed to boost the target.
* -ESRCH if there's no task to yield to.
@@ -3972,8 +4000,9 @@ long __sched io_schedule_timeout(long timeout)
* sys_sched_get_priority_max - return maximum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the maximum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the maximum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
{
@@ -3997,8 +4026,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
* sys_sched_get_priority_min - return minimum RT priority.
* @policy: scheduling class.
*
- * this syscall returns the minimum rt_priority that can be used
- * by a given scheduling class.
+ * Return: On success, this syscall returns the minimum
+ * rt_priority that can be used by a given scheduling class.
+ * On failure, a negative error code is returned.
*/
SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
{
@@ -4024,6 +4054,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
*
* this syscall writes the default timeslice value of a given process
* into the user-space timespec buffer. A value of '0' means infinity.
+ *
+ * Return: On success, 0 and the timeslice is in @interval. Otherwise,
+ * an error code.
*/
SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
struct timespec __user *, interval)
@@ -4914,7 +4947,8 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
SD_BALANCE_FORK |
SD_BALANCE_EXEC |
SD_SHARE_CPUPOWER |
- SD_SHARE_PKG_RESOURCES);
+ SD_SHARE_PKG_RESOURCES |
+ SD_PREFER_SIBLING);
if (nr_node_ids == 1)
pflags &= ~SD_SERIALIZE;
}
@@ -5083,18 +5117,23 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
* two cpus are in the same cache domain, see cpus_share_cache().
*/
DEFINE_PER_CPU(struct sched_domain *, sd_llc);
+DEFINE_PER_CPU(int, sd_llc_size);
DEFINE_PER_CPU(int, sd_llc_id);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
int id = cpu;
+ int size = 1;
sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
- if (sd)
+ if (sd) {
id = cpumask_first(sched_domain_span(sd));
+ size = cpumask_weight(sched_domain_span(sd));
+ }
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
+ per_cpu(sd_llc_size, cpu) = size;
per_cpu(sd_llc_id, cpu) = id;
}
@@ -5118,6 +5157,13 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
tmp->parent = parent->parent;
if (parent->parent)
parent->parent->child = tmp;
+ /*
+ * Transfer SD_PREFER_SIBLING down in case of a
+ * degenerate parent; the spans match for this
+ * so the property transfers.
+ */
+ if (parent->flags & SD_PREFER_SIBLING)
+ tmp->flags |= SD_PREFER_SIBLING;
destroy_sched_domain(parent, cpu);
} else
tmp = tmp->parent;
@@ -6184,8 +6230,9 @@ match1:
;
}
+ n = ndoms_cur;
if (doms_new == NULL) {
- ndoms_cur = 0;
+ n = 0;
doms_new = &fallback_doms;
cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
WARN_ON_ONCE(dattr_new);
@@ -6193,7 +6240,7 @@ match1:
/* Build new domains */
for (i = 0; i < ndoms_new; i++) {
- for (j = 0; j < ndoms_cur && !new_topology; j++) {
+ for (j = 0; j < n && !new_topology; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j])
&& dattrs_equal(dattr_new, i, dattr_cur, j))
goto match2;
@@ -6632,6 +6679,8 @@ void normalize_rt_tasks(void)
* @cpu: the processor in question.
*
* ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
+ *
+ * Return: The current task for @cpu.
*/
struct task_struct *curr_task(int cpu)
{
@@ -6763,7 +6812,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk);
- tg = container_of(task_subsys_state_check(tsk, cpu_cgroup_subsys_id,
+ tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id,
lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css);
tg = autogroup_task_group(tsk, tg);
@@ -7085,23 +7134,22 @@ int sched_rt_handler(struct ctl_table *table, int write,
#ifdef CONFIG_CGROUP_SCHED
-/* return corresponding task_group object of a cgroup */
-static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
+static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
- struct task_group, css);
+ return css ? container_of(css, struct task_group, css) : NULL;
}
-static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
- struct task_group *tg, *parent;
+ struct task_group *parent = css_tg(parent_css);
+ struct task_group *tg;
- if (!cgrp->parent) {
+ if (!parent) {
/* This is early initialization for the top cgroup */
return &root_task_group.css;
}
- parent = cgroup_tg(cgrp->parent);
tg = sched_create_group(parent);
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
@@ -7109,41 +7157,38 @@ static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp)
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup *cgrp)
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
- struct task_group *parent;
-
- if (!cgrp->parent)
- return 0;
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css_parent(css));
- parent = cgroup_tg(cgrp->parent);
- sched_online_group(tg, parent);
+ if (parent)
+ sched_online_group(tg, parent);
return 0;
}
-static void cpu_cgroup_css_free(struct cgroup *cgrp)
+static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
sched_destroy_group(tg);
}
-static void cpu_cgroup_css_offline(struct cgroup *cgrp)
+static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
sched_offline_group(tg);
}
-static int cpu_cgroup_can_attach(struct cgroup *cgrp,
+static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset) {
+ cgroup_taskset_for_each(task, css, tset) {
#ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+ if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL;
#else
/* We don't support RT-tasks being in separate groups */
@@ -7154,18 +7199,18 @@ static int cpu_cgroup_can_attach(struct cgroup *cgrp,
return 0;
}
-static void cpu_cgroup_attach(struct cgroup *cgrp,
+static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *task;
- cgroup_taskset_for_each(task, cgrp, tset)
+ cgroup_taskset_for_each(task, css, tset)
sched_move_task(task);
}
-static void
-cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
- struct task_struct *task)
+static void cpu_cgroup_exit(struct cgroup_subsys_state *css,
+ struct cgroup_subsys_state *old_css,
+ struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
@@ -7179,15 +7224,16 @@ cpu_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
}
#ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 shareval)
+static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 shareval)
{
- return sched_group_set_shares(cgroup_tg(cgrp), scale_load(shareval));
+ return sched_group_set_shares(css_tg(css), scale_load(shareval));
}
-static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
return (u64) scale_load_down(tg->shares);
}
@@ -7309,26 +7355,28 @@ long tg_get_cfs_period(struct task_group *tg)
return cfs_period_us;
}
-static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return tg_get_cfs_quota(cgroup_tg(cgrp));
+ return tg_get_cfs_quota(css_tg(css));
}
-static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype,
- s64 cfs_quota_us)
+static int cpu_cfs_quota_write_s64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, s64 cfs_quota_us)
{
- return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us);
+ return tg_set_cfs_quota(css_tg(css), cfs_quota_us);
}
-static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_cfs_period_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return tg_get_cfs_period(cgroup_tg(cgrp));
+ return tg_get_cfs_period(css_tg(css));
}
-static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype,
- u64 cfs_period_us)
+static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 cfs_period_us)
{
- return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us);
+ return tg_set_cfs_period(css_tg(css), cfs_period_us);
}
struct cfs_schedulable_data {
@@ -7409,10 +7457,10 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota)
return ret;
}
-static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
+static int cpu_stats_show(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb)
{
- struct task_group *tg = cgroup_tg(cgrp);
+ struct task_group *tg = css_tg(css);
struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth;
cb->fill(cb, "nr_periods", cfs_b->nr_periods);
@@ -7425,26 +7473,28 @@ static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft,
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
-static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
- s64 val)
+static int cpu_rt_runtime_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, s64 val)
{
- return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
+ return sched_group_set_rt_runtime(css_tg(css), val);
}
-static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
+static s64 cpu_rt_runtime_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return sched_group_rt_runtime(cgroup_tg(cgrp));
+ return sched_group_rt_runtime(css_tg(css));
}
-static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
- u64 rt_period_us)
+static int cpu_rt_period_write_uint(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 rt_period_us)
{
- return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
+ return sched_group_set_rt_period(css_tg(css), rt_period_us);
}
-static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_rt_period_read_uint(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return sched_group_rt_period(cgroup_tg(cgrp));
+ return sched_group_rt_period(css_tg(css));
}
#endif /* CONFIG_RT_GROUP_SCHED */
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index dbb7e2cd95e..f64722ff029 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -33,30 +33,20 @@ struct cpuacct {
struct kernel_cpustat __percpu *cpustat;
};
-/* return cpu accounting group corresponding to this container */
-static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
+static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
- struct cpuacct, css);
+ return css ? container_of(css, struct cpuacct, css) : NULL;
}
/* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
- return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
- struct cpuacct, css);
-}
-
-static inline struct cpuacct *__parent_ca(struct cpuacct *ca)
-{
- return cgroup_ca(ca->css.cgroup->parent);
+ return css_ca(task_css(tsk, cpuacct_subsys_id));
}
static inline struct cpuacct *parent_ca(struct cpuacct *ca)
{
- if (!ca->css.cgroup->parent)
- return NULL;
- return cgroup_ca(ca->css.cgroup->parent);
+ return css_ca(css_parent(&ca->css));
}
static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage);
@@ -66,11 +56,12 @@ static struct cpuacct root_cpuacct = {
};
/* create a new cpu accounting group */
-static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cpuacct *ca;
- if (!cgrp->parent)
+ if (!parent_css)
return &root_cpuacct.css;
ca = kzalloc(sizeof(*ca), GFP_KERNEL);
@@ -96,9 +87,9 @@ out:
}
/* destroy an existing cpu accounting group */
-static void cpuacct_css_free(struct cgroup *cgrp)
+static void cpuacct_css_free(struct cgroup_subsys_state *css)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
free_percpu(ca->cpustat);
free_percpu(ca->cpuusage);
@@ -141,9 +132,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
}
/* return total cpu usage (in nanoseconds) of a group */
-static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
u64 totalcpuusage = 0;
int i;
@@ -153,10 +144,10 @@ static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
return totalcpuusage;
}
-static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
- u64 reset)
+static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 reset)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
int err = 0;
int i;
@@ -172,10 +163,10 @@ out:
return err;
}
-static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
+static int cpuacct_percpu_seq_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m)
{
- struct cpuacct *ca = cgroup_ca(cgroup);
+ struct cpuacct *ca = css_ca(css);
u64 percpu;
int i;
@@ -192,10 +183,10 @@ static const char * const cpuacct_stat_desc[] = {
[CPUACCT_STAT_SYSTEM] = "system",
};
-static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
- struct cgroup_map_cb *cb)
+static int cpuacct_stats_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct cgroup_map_cb *cb)
{
- struct cpuacct *ca = cgroup_ca(cgrp);
+ struct cpuacct *ca = css_ca(css);
int cpu;
s64 val = 0;
@@ -281,7 +272,7 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
while (ca != &root_cpuacct) {
kcpustat = this_cpu_ptr(ca->cpustat);
kcpustat->cpustat[index] += val;
- ca = __parent_ca(ca);
+ ca = parent_ca(ca);
}
rcu_read_unlock();
}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 1095e878a46..8b836b376d9 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -62,7 +62,7 @@ static int convert_prio(int prio)
* any discrepancies created by racing against the uncertainty of the current
* priority configuration.
*
- * Returns: (int)bool - CPUs were found
+ * Return: (int)bool - CPUs were found
*/
int cpupri_find(struct cpupri *cp, struct task_struct *p,
struct cpumask *lowest_mask)
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
* cpupri_init - initialize the cpupri structure
* @cp: The cpupri context
*
- * Returns: -ENOMEM if memory fails.
+ * Return: -ENOMEM on memory allocation failure.
*/
int cpupri_init(struct cpupri *cp)
{
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index a7959e05a9d..99947919e30 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -121,7 +121,7 @@ static inline void task_group_account_field(struct task_struct *p, int index,
* is the only cgroup, then nothing else should be necessary.
*
*/
- __get_cpu_var(kernel_cpustat).cpustat[index] += tmp;
+ __this_cpu_add(kernel_cpustat.cpustat[index], tmp);
cpuacct_account_field(p, index, tmp);
}
@@ -378,11 +378,8 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
-void vtime_task_switch(struct task_struct *prev)
+void vtime_common_task_switch(struct task_struct *prev)
{
- if (!vtime_accounting_enabled())
- return;
-
if (is_idle_task(prev))
vtime_account_idle(prev);
else
@@ -404,11 +401,8 @@ void vtime_task_switch(struct task_struct *prev)
* vtime_account().
*/
#ifndef __ARCH_HAS_VTIME_ACCOUNT
-void vtime_account_irq_enter(struct task_struct *tsk)
+void vtime_common_account_irq_enter(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
if (!in_interrupt()) {
/*
* If we interrupted user, context_tracking_in_user()
@@ -428,7 +422,7 @@ void vtime_account_irq_enter(struct task_struct *tsk)
}
vtime_account_system(tsk);
}
-EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
+EXPORT_SYMBOL_GPL(vtime_common_account_irq_enter);
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
@@ -557,16 +551,7 @@ static void cputime_adjust(struct task_cputime *curr,
struct cputime *prev,
cputime_t *ut, cputime_t *st)
{
- cputime_t rtime, stime, utime, total;
-
- if (vtime_accounting_enabled()) {
- *ut = curr->utime;
- *st = curr->stime;
- return;
- }
-
- stime = curr->stime;
- total = stime + curr->utime;
+ cputime_t rtime, stime, utime;
/*
* Tick based cputime accounting depend on random scheduling
@@ -588,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr,
if (prev->stime + prev->utime >= rtime)
goto out;
- if (total) {
+ stime = curr->stime;
+ utime = curr->utime;
+
+ if (utime == 0) {
+ stime = rtime;
+ } else if (stime == 0) {
+ utime = rtime;
+ } else {
+ cputime_t total = stime + utime;
+
stime = scale_stime((__force u64)stime,
(__force u64)rtime, (__force u64)total);
utime = rtime - stime;
- } else {
- stime = rtime;
- utime = 0;
}
/*
@@ -664,23 +655,17 @@ static void __vtime_account_system(struct task_struct *tsk)
void vtime_account_system(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
write_sequnlock(&tsk->vtime_seqlock);
}
-void vtime_account_irq_exit(struct task_struct *tsk)
+void vtime_gen_account_irq_exit(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
+ __vtime_account_system(tsk);
if (context_tracking_in_user())
tsk->vtime_snap_whence = VTIME_USER;
- __vtime_account_system(tsk);
write_sequnlock(&tsk->vtime_seqlock);
}
@@ -688,12 +673,8 @@ void vtime_account_user(struct task_struct *tsk)
{
cputime_t delta_cpu;
- if (!vtime_accounting_enabled())
- return;
-
- delta_cpu = get_vtime_delta(tsk);
-
write_seqlock(&tsk->vtime_seqlock);
+ delta_cpu = get_vtime_delta(tsk);
tsk->vtime_snap_whence = VTIME_SYS;
account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
write_sequnlock(&tsk->vtime_seqlock);
@@ -701,22 +682,27 @@ void vtime_account_user(struct task_struct *tsk)
void vtime_user_enter(struct task_struct *tsk)
{
- if (!vtime_accounting_enabled())
- return;
-
write_seqlock(&tsk->vtime_seqlock);
- tsk->vtime_snap_whence = VTIME_USER;
__vtime_account_system(tsk);
+ tsk->vtime_snap_whence = VTIME_USER;
write_sequnlock(&tsk->vtime_seqlock);
}
void vtime_guest_enter(struct task_struct *tsk)
{
+ /*
+ * The flags must be updated under the lock with
+ * the vtime_snap flush and update.
+ * That enforces a right ordering and update sequence
+ * synchronization against the reader (task_gtime())
+ * that can thus safely catch up with a tickless delta.
+ */
write_seqlock(&tsk->vtime_seqlock);
__vtime_account_system(tsk);
current->flags |= PF_VCPU;
write_sequnlock(&tsk->vtime_seqlock);
}
+EXPORT_SYMBOL_GPL(vtime_guest_enter);
void vtime_guest_exit(struct task_struct *tsk)
{
@@ -725,6 +711,7 @@ void vtime_guest_exit(struct task_struct *tsk)
current->flags &= ~PF_VCPU;
write_sequnlock(&tsk->vtime_seqlock);
}
+EXPORT_SYMBOL_GPL(vtime_guest_exit);
void vtime_account_idle(struct task_struct *tsk)
{
@@ -733,11 +720,6 @@ void vtime_account_idle(struct task_struct *tsk)
account_idle_time(delta_cpu);
}
-bool vtime_accounting_enabled(void)
-{
- return context_tracking_active();
-}
-
void arch_vtime_task_switch(struct task_struct *prev)
{
write_seqlock(&prev->vtime_seqlock);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bb456f44b7b..7f0a5e6cdae 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -851,7 +851,7 @@ void task_numa_fault(int node, int pages, bool migrated)
{
struct task_struct *p = current;
- if (!sched_feat_numa(NUMA))
+ if (!numabalancing_enabled)
return;
/* FIXME: Allocate task-specific structure for placement policy here */
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
*/
update_entity_load_avg(curr, 1);
update_cfs_rq_blocked_load(cfs_rq, 1);
+ update_cfs_shares(cfs_rq);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -3017,6 +3018,23 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return 0;
}
+static void record_wakee(struct task_struct *p)
+{
+ /*
+ * Rough decay (wiping) for cost saving, don't worry
+ * about the boundary, really active task won't care
+ * about the loss.
+ */
+ if (jiffies > current->wakee_flip_decay_ts + HZ) {
+ current->wakee_flips = 0;
+ current->wakee_flip_decay_ts = jiffies;
+ }
+
+ if (current->last_wakee != p) {
+ current->last_wakee = p;
+ current->wakee_flips++;
+ }
+}
static void task_waking_fair(struct task_struct *p)
{
@@ -3037,6 +3055,7 @@ static void task_waking_fair(struct task_struct *p)
#endif
se->vruntime -= min_vruntime;
+ record_wakee(p);
}
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -3155,6 +3174,28 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
#endif
+static int wake_wide(struct task_struct *p)
+{
+ int factor = this_cpu_read(sd_llc_size);
+
+ /*
+ * Yeah, it's the switching-frequency, could means many wakee or
+ * rapidly switch, use factor here will just help to automatically
+ * adjust the loose-degree, so bigger node will lead to more pull.
+ */
+ if (p->wakee_flips > factor) {
+ /*
+ * wakee is somewhat hot, it needs certain amount of cpu
+ * resource, so if waker is far more hot, prefer to leave
+ * it alone.
+ */
+ if (current->wakee_flips > (factor * p->wakee_flips))
+ return 1;
+ }
+
+ return 0;
+}
+
static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
{
s64 this_load, load;
@@ -3164,6 +3205,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
unsigned long weight;
int balanced;
+ /*
+ * If we wake multiple tasks be careful to not bounce
+ * ourselves around too much.
+ */
+ if (wake_wide(p))
+ return 0;
+
idx = sd->wake_idx;
this_cpu = smp_processor_id();
prev_cpu = task_cpu(p);
@@ -4171,47 +4219,48 @@ static void update_blocked_averages(int cpu)
}
/*
- * Compute the cpu's hierarchical load factor for each task group.
+ * Compute the hierarchical load factor for cfs_rq and all its ascendants.
* This needs to be done in a top-down fashion because the load of a child
* group is a fraction of its parents load.
*/
-static int tg_load_down(struct task_group *tg, void *data)
-{
- unsigned long load;
- long cpu = (long)data;
-
- if (!tg->parent) {
- load = cpu_rq(cpu)->avg.load_avg_contrib;
- } else {
- load = tg->parent->cfs_rq[cpu]->h_load;
- load = div64_ul(load * tg->se[cpu]->avg.load_avg_contrib,
- tg->parent->cfs_rq[cpu]->runnable_load_avg + 1);
- }
-
- tg->cfs_rq[cpu]->h_load = load;
-
- return 0;
-}
-
-static void update_h_load(long cpu)
+static void update_cfs_rq_h_load(struct cfs_rq *cfs_rq)
{
- struct rq *rq = cpu_rq(cpu);
+ struct rq *rq = rq_of(cfs_rq);
+ struct sched_entity *se = cfs_rq->tg->se[cpu_of(rq)];
unsigned long now = jiffies;
+ unsigned long load;
- if (rq->h_load_throttle == now)
+ if (cfs_rq->last_h_load_update == now)
return;
- rq->h_load_throttle = now;
+ cfs_rq->h_load_next = NULL;
+ for_each_sched_entity(se) {
+ cfs_rq = cfs_rq_of(se);
+ cfs_rq->h_load_next = se;
+ if (cfs_rq->last_h_load_update == now)
+ break;
+ }
- rcu_read_lock();
- walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
- rcu_read_unlock();
+ if (!se) {
+ cfs_rq->h_load = rq->avg.load_avg_contrib;
+ cfs_rq->last_h_load_update = now;
+ }
+
+ while ((se = cfs_rq->h_load_next) != NULL) {
+ load = cfs_rq->h_load;
+ load = div64_ul(load * se->avg.load_avg_contrib,
+ cfs_rq->runnable_load_avg + 1);
+ cfs_rq = group_cfs_rq(se);
+ cfs_rq->h_load = load;
+ cfs_rq->last_h_load_update = now;
+ }
}
static unsigned long task_h_load(struct task_struct *p)
{
struct cfs_rq *cfs_rq = task_cfs_rq(p);
+ update_cfs_rq_h_load(cfs_rq);
return div64_ul(p->se.avg.load_avg_contrib * cfs_rq->h_load,
cfs_rq->runnable_load_avg + 1);
}
@@ -4220,10 +4269,6 @@ static inline void update_blocked_averages(int cpu)
{
}
-static inline void update_h_load(long cpu)
-{
-}
-
static unsigned long task_h_load(struct task_struct *p)
{
return p->se.avg.load_avg_contrib;
@@ -4232,54 +4277,62 @@ static unsigned long task_h_load(struct task_struct *p)
/********** Helpers for find_busiest_group ************************/
/*
- * sd_lb_stats - Structure to store the statistics of a sched_domain
- * during load balancing.
- */
-struct sd_lb_stats {
- struct sched_group *busiest; /* Busiest group in this sd */
- struct sched_group *this; /* Local group in this sd */
- unsigned long total_load; /* Total load of all groups in sd */
- unsigned long total_pwr; /* Total power of all groups in sd */
- unsigned long avg_load; /* Average load across all groups in sd */
-
- /** Statistics of this group */
- unsigned long this_load;
- unsigned long this_load_per_task;
- unsigned long this_nr_running;
- unsigned long this_has_capacity;
- unsigned int this_idle_cpus;
-
- /* Statistics of the busiest group */
- unsigned int busiest_idle_cpus;
- unsigned long max_load;
- unsigned long busiest_load_per_task;
- unsigned long busiest_nr_running;
- unsigned long busiest_group_capacity;
- unsigned long busiest_has_capacity;
- unsigned int busiest_group_weight;
-
- int group_imb; /* Is there imbalance in this sd */
-};
-
-/*
* sg_lb_stats - stats of a sched_group required for load_balancing
*/
struct sg_lb_stats {
unsigned long avg_load; /*Avg load across the CPUs of the group */
unsigned long group_load; /* Total load over the CPUs of the group */
- unsigned long sum_nr_running; /* Nr tasks running in the group */
unsigned long sum_weighted_load; /* Weighted load of group's tasks */
- unsigned long group_capacity;
- unsigned long idle_cpus;
- unsigned long group_weight;
+ unsigned long load_per_task;
+ unsigned long group_power;
+ unsigned int sum_nr_running; /* Nr tasks running in the group */
+ unsigned int group_capacity;
+ unsigned int idle_cpus;
+ unsigned int group_weight;
int group_imb; /* Is there an imbalance in the group ? */
int group_has_capacity; /* Is there extra capacity in the group? */
};
+/*
+ * sd_lb_stats - Structure to store the statistics of a sched_domain
+ * during load balancing.
+ */
+struct sd_lb_stats {
+ struct sched_group *busiest; /* Busiest group in this sd */
+ struct sched_group *local; /* Local group in this sd */
+ unsigned long total_load; /* Total load of all groups in sd */
+ unsigned long total_pwr; /* Total power of all groups in sd */
+ unsigned long avg_load; /* Average load across all groups in sd */
+
+ struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
+ struct sg_lb_stats local_stat; /* Statistics of the local group */
+};
+
+static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
+{
+ /*
+ * Skimp on the clearing to avoid duplicate work. We can avoid clearing
+ * local_stat because update_sg_lb_stats() does a full clear/assignment.
+ * We must however clear busiest_stat::avg_load because
+ * update_sd_pick_busiest() reads this before assignment.
+ */
+ *sds = (struct sd_lb_stats){
+ .busiest = NULL,
+ .local = NULL,
+ .total_load = 0UL,
+ .total_pwr = 0UL,
+ .busiest_stat = {
+ .avg_load = 0UL,
+ },
+ };
+}
+
/**
* get_sd_load_idx - Obtain the load index for a given sched domain.
* @sd: The sched_domain whose load_idx is to be obtained.
* @idle: The Idle status of the CPU for whose sd load_icx is obtained.
+ *
+ * Return: The load index.
*/
static inline int get_sd_load_idx(struct sched_domain *sd,
enum cpu_idle_type idle)
@@ -4457,33 +4510,99 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
return 0;
}
+/*
+ * Group imbalance indicates (and tries to solve) the problem where balancing
+ * groups is inadequate due to tsk_cpus_allowed() constraints.
+ *
+ * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
+ * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
+ * Something like:
+ *
+ * { 0 1 2 3 } { 4 5 6 7 }
+ * * * * *
+ *
+ * If we were to balance group-wise we'd place two tasks in the first group and
+ * two tasks in the second group. Clearly this is undesired as it will overload
+ * cpu 3 and leave one of the cpus in the second group unused.
+ *
+ * The current solution to this issue is detecting the skew in the first group
+ * by noticing it has a cpu that is overloaded while the remaining cpus are
+ * idle -- or rather, there's a distinct imbalance in the cpus; see
+ * sg_imbalanced().
+ *
+ * When this is so detected; this group becomes a candidate for busiest; see
+ * update_sd_pick_busiest(). And calculcate_imbalance() and
+ * find_busiest_group() avoid some of the usual balance conditional to allow it
+ * to create an effective group imbalance.
+ *
+ * This is a somewhat tricky proposition since the next run might not find the
+ * group imbalance and decide the groups need to be balanced again. A most
+ * subtle and fragile situation.
+ */
+
+struct sg_imb_stats {
+ unsigned long max_nr_running, min_nr_running;
+ unsigned long max_cpu_load, min_cpu_load;
+};
+
+static inline void init_sg_imb_stats(struct sg_imb_stats *sgi)
+{
+ sgi->max_cpu_load = sgi->max_nr_running = 0UL;
+ sgi->min_cpu_load = sgi->min_nr_running = ~0UL;
+}
+
+static inline void
+update_sg_imb_stats(struct sg_imb_stats *sgi,
+ unsigned long load, unsigned long nr_running)
+{
+ if (load > sgi->max_cpu_load)
+ sgi->max_cpu_load = load;
+ if (sgi->min_cpu_load > load)
+ sgi->min_cpu_load = load;
+
+ if (nr_running > sgi->max_nr_running)
+ sgi->max_nr_running = nr_running;
+ if (sgi->min_nr_running > nr_running)
+ sgi->min_nr_running = nr_running;
+}
+
+static inline int
+sg_imbalanced(struct sg_lb_stats *sgs, struct sg_imb_stats *sgi)
+{
+ /*
+ * Consider the group unbalanced when the imbalance is larger
+ * than the average weight of a task.
+ *
+ * APZ: with cgroup the avg task weight can vary wildly and
+ * might not be a suitable number - should we keep a
+ * normalized nr_running number somewhere that negates
+ * the hierarchy?
+ */
+ if ((sgi->max_cpu_load - sgi->min_cpu_load) >= sgs->load_per_task &&
+ (sgi->max_nr_running - sgi->min_nr_running) > 1)
+ return 1;
+
+ return 0;
+}
+
/**
* update_sg_lb_stats - Update sched_group's statistics for load balancing.
* @env: The load balancing environment.
* @group: sched_group whose statistics are to be updated.
* @load_idx: Load index of sched_domain of this_cpu for load calc.
* @local_group: Does group contain this_cpu.
- * @balance: Should we balance.
* @sgs: variable to hold the statistics for this group.
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, int *balance, struct sg_lb_stats *sgs)
+ int local_group, struct sg_lb_stats *sgs)
{
- unsigned long nr_running, max_nr_running, min_nr_running;
- unsigned long load, max_cpu_load, min_cpu_load;
- unsigned int balance_cpu = -1, first_idle_cpu = 0;
- unsigned long avg_load_per_task = 0;
+ struct sg_imb_stats sgi;
+ unsigned long nr_running;
+ unsigned long load;
int i;
- if (local_group)
- balance_cpu = group_balance_cpu(group);
-
- /* Tally up the load of all CPUs in the group */
- max_cpu_load = 0;
- min_cpu_load = ~0UL;
- max_nr_running = 0;
- min_nr_running = ~0UL;
+ init_sg_imb_stats(&sgi);
for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);
@@ -4492,24 +4611,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
/* Bias balancing toward cpus of our domain */
if (local_group) {
- if (idle_cpu(i) && !first_idle_cpu &&
- cpumask_test_cpu(i, sched_group_mask(group))) {
- first_idle_cpu = 1;
- balance_cpu = i;
- }
-
load = target_load(i, load_idx);
} else {
load = source_load(i, load_idx);
- if (load > max_cpu_load)
- max_cpu_load = load;
- if (min_cpu_load > load)
- min_cpu_load = load;
-
- if (nr_running > max_nr_running)
- max_nr_running = nr_running;
- if (min_nr_running > nr_running)
- min_nr_running = nr_running;
+ update_sg_imb_stats(&sgi, load, nr_running);
}
sgs->group_load += load;
@@ -4519,46 +4624,25 @@ static inline void update_sg_lb_stats(struct lb_env *env,
sgs->idle_cpus++;
}
- /*
- * First idle cpu or the first cpu(busiest) in this sched group
- * is eligible for doing load balancing at this and above
- * domains. In the newly idle case, we will allow all the cpu's
- * to do the newly idle load balance.
- */
- if (local_group) {
- if (env->idle != CPU_NEWLY_IDLE) {
- if (balance_cpu != env->dst_cpu) {
- *balance = 0;
- return;
- }
- update_group_power(env->sd, env->dst_cpu);
- } else if (time_after_eq(jiffies, group->sgp->next_update))
- update_group_power(env->sd, env->dst_cpu);
- }
+ if (local_group && (env->idle != CPU_NEWLY_IDLE ||
+ time_after_eq(jiffies, group->sgp->next_update)))
+ update_group_power(env->sd, env->dst_cpu);
/* Adjust by relative CPU power of the group */
- sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
+ sgs->group_power = group->sgp->power;
+ sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_power;
- /*
- * Consider the group unbalanced when the imbalance is larger
- * than the average weight of a task.
- *
- * APZ: with cgroup the avg task weight can vary wildly and
- * might not be a suitable number - should we keep a
- * normalized nr_running number somewhere that negates
- * the hierarchy?
- */
if (sgs->sum_nr_running)
- avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+ sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
+
+ sgs->group_imb = sg_imbalanced(sgs, &sgi);
- if ((max_cpu_load - min_cpu_load) >= avg_load_per_task &&
- (max_nr_running - min_nr_running) > 1)
- sgs->group_imb = 1;
+ sgs->group_capacity =
+ DIV_ROUND_CLOSEST(sgs->group_power, SCHED_POWER_SCALE);
- sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
- SCHED_POWER_SCALE);
if (!sgs->group_capacity)
sgs->group_capacity = fix_small_capacity(env->sd, group);
+
sgs->group_weight = group->group_weight;
if (sgs->group_capacity > sgs->sum_nr_running)
@@ -4574,13 +4658,16 @@ static inline void update_sg_lb_stats(struct lb_env *env,
*
* Determine if @sg is a busier group than the previously selected
* busiest group.
+ *
+ * Return: %true if @sg is a busier group than the previously selected
+ * busiest group. %false otherwise.
*/
static bool update_sd_pick_busiest(struct lb_env *env,
struct sd_lb_stats *sds,
struct sched_group *sg,
struct sg_lb_stats *sgs)
{
- if (sgs->avg_load <= sds->max_load)
+ if (sgs->avg_load <= sds->busiest_stat.avg_load)
return false;
if (sgs->sum_nr_running > sgs->group_capacity)
@@ -4613,11 +4700,11 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @sds: variable to hold the statistics for this sched_domain.
*/
static inline void update_sd_lb_stats(struct lb_env *env,
- int *balance, struct sd_lb_stats *sds)
+ struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
- struct sg_lb_stats sgs;
+ struct sg_lb_stats tmp_sgs;
int load_idx, prefer_sibling = 0;
if (child && child->flags & SD_PREFER_SIBLING)
@@ -4626,17 +4713,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
load_idx = get_sd_load_idx(env->sd, env->idle);
do {
+ struct sg_lb_stats *sgs = &tmp_sgs;
int local_group;
local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
- memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);
-
- if (local_group && !(*balance))
- return;
+ if (local_group) {
+ sds->local = sg;
+ sgs = &sds->local_stat;
+ }
- sds->total_load += sgs.group_load;
- sds->total_pwr += sg->sgp->power;
+ memset(sgs, 0, sizeof(*sgs));
+ update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
/*
* In case the child domain prefers tasks go to siblings
@@ -4648,26 +4735,17 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* heaviest group when it is already under-utilized (possible
* with a large weight task outweighs the tasks on the system).
*/
- if (prefer_sibling && !local_group && sds->this_has_capacity)
- sgs.group_capacity = min(sgs.group_capacity, 1UL);
+ if (prefer_sibling && !local_group &&
+ sds->local && sds->local_stat.group_has_capacity)
+ sgs->group_capacity = min(sgs->group_capacity, 1U);
- if (local_group) {
- sds->this_load = sgs.avg_load;
- sds->this = sg;
- sds->this_nr_running = sgs.sum_nr_running;
- sds->this_load_per_task = sgs.sum_weighted_load;
- sds->this_has_capacity = sgs.group_has_capacity;
- sds->this_idle_cpus = sgs.idle_cpus;
- } else if (update_sd_pick_busiest(env, sds, sg, &sgs)) {
- sds->max_load = sgs.avg_load;
+ /* Now, start updating sd_lb_stats */
+ sds->total_load += sgs->group_load;
+ sds->total_pwr += sgs->group_power;
+
+ if (!local_group && update_sd_pick_busiest(env, sds, sg, sgs)) {
sds->busiest = sg;
- sds->busiest_nr_running = sgs.sum_nr_running;
- sds->busiest_idle_cpus = sgs.idle_cpus;
- sds->busiest_group_capacity = sgs.group_capacity;
- sds->busiest_load_per_task = sgs.sum_weighted_load;
- sds->busiest_has_capacity = sgs.group_has_capacity;
- sds->busiest_group_weight = sgs.group_weight;
- sds->group_imb = sgs.group_imb;
+ sds->busiest_stat = *sgs;
}
sg = sg->next;
@@ -4691,7 +4769,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,
* assuming lower CPU number will be equivalent to lower a SMT thread
* number.
*
- * Returns 1 when packing is required and a task should be moved to
+ * Return: 1 when packing is required and a task should be moved to
* this CPU. The amount of the imbalance is returned in *imbalance.
*
* @env: The load balancing environment.
@@ -4712,7 +4790,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
return 0;
env->imbalance = DIV_ROUND_CLOSEST(
- sds->max_load * sds->busiest->sgp->power, SCHED_POWER_SCALE);
+ sds->busiest_stat.avg_load * sds->busiest_stat.group_power,
+ SCHED_POWER_SCALE);
return 1;
}
@@ -4730,24 +4809,23 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
unsigned long tmp, pwr_now = 0, pwr_move = 0;
unsigned int imbn = 2;
unsigned long scaled_busy_load_per_task;
+ struct sg_lb_stats *local, *busiest;
- if (sds->this_nr_running) {
- sds->this_load_per_task /= sds->this_nr_running;
- if (sds->busiest_load_per_task >
- sds->this_load_per_task)
- imbn = 1;
- } else {
- sds->this_load_per_task =
- cpu_avg_load_per_task(env->dst_cpu);
- }
+ local = &sds->local_stat;
+ busiest = &sds->busiest_stat;
+
+ if (!local->sum_nr_running)
+ local->load_per_task = cpu_avg_load_per_task(env->dst_cpu);
+ else if (busiest->load_per_task > local->load_per_task)
+ imbn = 1;
- scaled_busy_load_per_task = sds->busiest_load_per_task
- * SCHED_POWER_SCALE;
- scaled_busy_load_per_task /= sds->busiest->sgp->power;
+ scaled_busy_load_per_task =
+ (busiest->load_per_task * SCHED_POWER_SCALE) /
+ busiest->group_power;
- if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
- (scaled_busy_load_per_task * imbn)) {
- env->imbalance = sds->busiest_load_per_task;
+ if (busiest->avg_load - local->avg_load + scaled_busy_load_per_task >=
+ (scaled_busy_load_per_task * imbn)) {
+ env->imbalance = busiest->load_per_task;
return;
}
@@ -4757,34 +4835,37 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
* moving them.
*/
- pwr_now += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load);
- pwr_now += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load);
+ pwr_now += busiest->group_power *
+ min(busiest->load_per_task, busiest->avg_load);
+ pwr_now += local->group_power *
+ min(local->load_per_task, local->avg_load);
pwr_now /= SCHED_POWER_SCALE;
/* Amount of load we'd subtract */
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->busiest->sgp->power;
- if (sds->max_load > tmp)
- pwr_move += sds->busiest->sgp->power *
- min(sds->busiest_load_per_task, sds->max_load - tmp);
+ tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
+ busiest->group_power;
+ if (busiest->avg_load > tmp) {
+ pwr_move += busiest->group_power *
+ min(busiest->load_per_task,
+ busiest->avg_load - tmp);
+ }
/* Amount of load we'd add */
- if (sds->max_load * sds->busiest->sgp->power <
- sds->busiest_load_per_task * SCHED_POWER_SCALE)
- tmp = (sds->max_load * sds->busiest->sgp->power) /
- sds->this->sgp->power;
- else
- tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
- sds->this->sgp->power;
- pwr_move += sds->this->sgp->power *
- min(sds->this_load_per_task, sds->this_load + tmp);
+ if (busiest->avg_load * busiest->group_power <
+ busiest->load_per_task * SCHED_POWER_SCALE) {
+ tmp = (busiest->avg_load * busiest->group_power) /
+ local->group_power;
+ } else {
+ tmp = (busiest->load_per_task * SCHED_POWER_SCALE) /
+ local->group_power;
+ }
+ pwr_move += local->group_power *
+ min(local->load_per_task, local->avg_load + tmp);
pwr_move /= SCHED_POWER_SCALE;
/* Move if we gain throughput */
if (pwr_move > pwr_now)
- env->imbalance = sds->busiest_load_per_task;
+ env->imbalance = busiest->load_per_task;
}
/**
@@ -4796,11 +4877,18 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
{
unsigned long max_pull, load_above_capacity = ~0UL;
+ struct sg_lb_stats *local, *busiest;
- sds->busiest_load_per_task /= sds->busiest_nr_running;
- if (sds->group_imb) {
- sds->busiest_load_per_task =
- min(sds->busiest_load_per_task, sds->avg_load);
+ local = &sds->local_stat;
+ busiest = &sds->busiest_stat;
+
+ if (busiest->group_imb) {
+ /*
+ * In the group_imb case we cannot rely on group-wide averages
+ * to ensure cpu-load equilibrium, look at wider averages. XXX
+ */
+ busiest->load_per_task =
+ min(busiest->load_per_task, sds->avg_load);
}
/*
@@ -4808,21 +4896,22 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* max load less than avg load(as we skip the groups at or below
* its cpu_power, while calculating max_load..)
*/
- if (sds->max_load < sds->avg_load) {
+ if (busiest->avg_load < sds->avg_load) {
env->imbalance = 0;
return fix_small_imbalance(env, sds);
}
- if (!sds->group_imb) {
+ if (!busiest->group_imb) {
/*
* Don't want to pull so many tasks that a group would go idle.
+ * Except of course for the group_imb case, since then we might
+ * have to drop below capacity to reach cpu-load equilibrium.
*/
- load_above_capacity = (sds->busiest_nr_running -
- sds->busiest_group_capacity);
+ load_above_capacity =
+ (busiest->sum_nr_running - busiest->group_capacity);
load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
-
- load_above_capacity /= sds->busiest->sgp->power;
+ load_above_capacity /= busiest->group_power;
}
/*
@@ -4832,15 +4921,14 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* we also don't want to reduce the group load below the group capacity
* (so that we can implement power-savings policies etc). Thus we look
* for the minimum possible imbalance.
- * Be careful of negative numbers as they'll appear as very large values
- * with unsigned longs.
*/
- max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
+ max_pull = min(busiest->avg_load - sds->avg_load, load_above_capacity);
/* How much load to actually move to equalise the imbalance */
- env->imbalance = min(max_pull * sds->busiest->sgp->power,
- (sds->avg_load - sds->this_load) * sds->this->sgp->power)
- / SCHED_POWER_SCALE;
+ env->imbalance = min(
+ max_pull * busiest->group_power,
+ (sds->avg_load - local->avg_load) * local->group_power
+ ) / SCHED_POWER_SCALE;
/*
* if *imbalance is less than the average load per runnable task
@@ -4848,9 +4936,8 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* a think about bumping its value to force at least one task to be
* moved
*/
- if (env->imbalance < sds->busiest_load_per_task)
+ if (env->imbalance < busiest->load_per_task)
return fix_small_imbalance(env, sds);
-
}
/******* find_busiest_group() helpers end here *********************/
@@ -4866,69 +4953,62 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
- * @balance: Pointer to a variable indicating if this_cpu
- * is the appropriate cpu to perform load balancing at this_level.
*
- * Returns: - the busiest group if imbalance exists.
+ * Return: - The busiest group if imbalance exists.
* - If no imbalance and user has opted for power-savings balance,
* return the least loaded group whose CPUs can be
* put to idle by rebalancing its tasks onto our group.
*/
-static struct sched_group *
-find_busiest_group(struct lb_env *env, int *balance)
+static struct sched_group *find_busiest_group(struct lb_env *env)
{
+ struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
- memset(&sds, 0, sizeof(sds));
+ init_sd_lb_stats(&sds);
/*
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, balance, &sds);
-
- /*
- * this_cpu is not the appropriate cpu to perform load balancing at
- * this level.
- */
- if (!(*balance))
- goto ret;
+ update_sd_lb_stats(env, &sds);
+ local = &sds.local_stat;
+ busiest = &sds.busiest_stat;
if ((env->idle == CPU_IDLE || env->idle == CPU_NEWLY_IDLE) &&
check_asym_packing(env, &sds))
return sds.busiest;
/* There is no busy sibling group to pull tasks from */
- if (!sds.busiest || sds.busiest_nr_running == 0)
+ if (!sds.busiest || busiest->sum_nr_running == 0)
goto out_balanced;
sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
/*
* If the busiest group is imbalanced the below checks don't
- * work because they assumes all things are equal, which typically
+ * work because they assume all things are equal, which typically
* isn't true due to cpus_allowed constraints and the like.
*/
- if (sds.group_imb)
+ if (busiest->group_imb)
goto force_balance;
/* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
- if (env->idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
- !sds.busiest_has_capacity)
+ if (env->idle == CPU_NEWLY_IDLE && local->group_has_capacity &&
+ !busiest->group_has_capacity)
goto force_balance;
/*
* If the local group is more busy than the selected busiest group
* don't try and pull any tasks.
*/
- if (sds.this_load >= sds.max_load)
+ if (local->avg_load >= busiest->avg_load)
goto out_balanced;
/*
* Don't pull any tasks if this group is already above the domain
* average load.
*/
- if (sds.this_load >= sds.avg_load)
+ if (local->avg_load >= sds.avg_load)
goto out_balanced;
if (env->idle == CPU_IDLE) {
@@ -4938,15 +5018,16 @@ find_busiest_group(struct lb_env *env, int *balance)
* there is no imbalance between this and busiest group
* wrt to idle cpu's, it is balanced.
*/
- if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
- sds.busiest_nr_running <= sds.busiest_group_weight)
+ if ((local->idle_cpus < busiest->idle_cpus) &&
+ busiest->sum_nr_running <= busiest->group_weight)
goto out_balanced;
} else {
/*
* In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
* imbalance_pct to be conservative.
*/
- if (100 * sds.max_load <= env->sd->imbalance_pct * sds.this_load)
+ if (100 * busiest->avg_load <=
+ env->sd->imbalance_pct * local->avg_load)
goto out_balanced;
}
@@ -4956,7 +5037,6 @@ force_balance:
return sds.busiest;
out_balanced:
-ret:
env->imbalance = 0;
return NULL;
}
@@ -4968,10 +5048,10 @@ static struct rq *find_busiest_queue(struct lb_env *env,
struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
- unsigned long max_load = 0;
+ unsigned long busiest_load = 0, busiest_power = 1;
int i;
- for_each_cpu(i, sched_group_cpus(group)) {
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
unsigned long power = power_of(i);
unsigned long capacity = DIV_ROUND_CLOSEST(power,
SCHED_POWER_SCALE);
@@ -4980,9 +5060,6 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);
- if (!cpumask_test_cpu(i, env->cpus))
- continue;
-
rq = cpu_rq(i);
wl = weighted_cpuload(i);
@@ -4998,11 +5075,15 @@ static struct rq *find_busiest_queue(struct lb_env *env,
* the weighted_cpuload() scaled with the cpu power, so that
* the load can be moved away from the cpu that is potentially
* running at a lower capacity.
+ *
+ * Thus we're looking for max(wl_i / power_i), crosswise
+ * multiplication to rid ourselves of the division works out
+ * to: wl_i * power_j > wl_j * power_i; where j is our
+ * previous maximum.
*/
- wl = (wl * SCHED_POWER_SCALE) / power;
-
- if (wl > max_load) {
- max_load = wl;
+ if (wl * busiest_power > busiest_load * power) {
+ busiest_load = wl;
+ busiest_power = power;
busiest = rq;
}
}
@@ -5039,13 +5120,47 @@ static int need_active_balance(struct lb_env *env)
static int active_load_balance_cpu_stop(void *data);
+static int should_we_balance(struct lb_env *env)
+{
+ struct sched_group *sg = env->sd->groups;
+ struct cpumask *sg_cpus, *sg_mask;
+ int cpu, balance_cpu = -1;
+
+ /*
+ * In the newly idle case, we will allow all the cpu's
+ * to do the newly idle load balance.
+ */
+ if (env->idle == CPU_NEWLY_IDLE)
+ return 1;
+
+ sg_cpus = sched_group_cpus(sg);
+ sg_mask = sched_group_mask(sg);
+ /* Try to find first idle cpu */
+ for_each_cpu_and(cpu, sg_cpus, env->cpus) {
+ if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
+ continue;
+
+ balance_cpu = cpu;
+ break;
+ }
+
+ if (balance_cpu == -1)
+ balance_cpu = group_balance_cpu(sg);
+
+ /*
+ * First idle cpu or the first cpu(busiest) in this sched group
+ * is eligible for doing load balancing at this and above domains.
+ */
+ return balance_cpu != env->dst_cpu;
+}
+
/*
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_domain *sd, enum cpu_idle_type idle,
- int *balance)
+ int *continue_balancing)
{
int ld_moved, cur_ld_moved, active_balance = 0;
struct sched_group *group;
@@ -5075,11 +5190,12 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);
redo:
- group = find_busiest_group(&env, balance);
-
- if (*balance == 0)
+ if (!should_we_balance(&env)) {
+ *continue_balancing = 0;
goto out_balanced;
+ }
+ group = find_busiest_group(&env);
if (!group) {
schedstat_inc(sd, lb_nobusyg[idle]);
goto out_balanced;
@@ -5108,7 +5224,6 @@ redo:
env.src_rq = busiest;
env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running);
- update_h_load(env.src_cpu);
more_balance:
local_irq_save(flags);
double_rq_lock(env.dst_rq, busiest);
@@ -5292,7 +5407,7 @@ void idle_balance(int this_cpu, struct rq *this_rq)
rcu_read_lock();
for_each_domain(this_cpu, sd) {
unsigned long interval;
- int balance = 1;
+ int continue_balancing = 1;
if (!(sd->flags & SD_LOAD_BALANCE))
continue;
@@ -5300,7 +5415,8 @@ void idle_balance(int this_cpu, struct rq *this_rq)
if (sd->flags & SD_BALANCE_NEWIDLE) {
/* If we've pulled tasks over stop searching: */
pulled_task = load_balance(this_cpu, this_rq,
- sd, CPU_NEWLY_IDLE, &balance);
+ sd, CPU_NEWLY_IDLE,
+ &continue_balancing);
}
interval = msecs_to_jiffies(sd->balance_interval);
@@ -5538,7 +5654,7 @@ void update_max_interval(void)
*/
static void rebalance_domains(int cpu, enum cpu_idle_type idle)
{
- int balance = 1;
+ int continue_balancing = 1;
struct rq *rq = cpu_rq(cpu);
unsigned long interval;
struct sched_domain *sd;
@@ -5570,7 +5686,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
}
if (time_after_eq(jiffies, sd->last_balance + interval)) {
- if (load_balance(cpu, rq, sd, idle, &balance)) {
+ if (load_balance(cpu, rq, sd, idle, &continue_balancing)) {
/*
* The LBF_SOME_PINNED logic could have changed
* env->dst_cpu, so we can't know our idle
@@ -5593,7 +5709,7 @@ out:
* CPU in our sched group which is doing load balancing more
* actively.
*/
- if (!balance)
+ if (!continue_balancing)
break;
}
rcu_read_unlock();
@@ -5786,7 +5902,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
entity_tick(cfs_rq, se, queued);
}
- if (sched_feat_numa(NUMA))
+ if (numabalancing_enabled)
task_tick_numa(rq, curr);
update_rq_runnable_avg(rq, 1);
@@ -5889,11 +6005,9 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
* and ensure we don't carry in an old decay_count if we
* switch back.
*/
- if (p->se.avg.decay_count) {
- struct cfs_rq *cfs_rq = cfs_rq_of(&p->se);
- __synchronize_entity_decay(&p->se);
- subtract_blocked_load_contrib(cfs_rq,
- p->se.avg.load_avg_contrib);
+ if (se->avg.decay_count) {
+ __synchronize_entity_decay(se);
+ subtract_blocked_load_contrib(cfs_rq, se->avg.load_avg_contrib);
}
#endif
}
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ef0a7b2439d..b3c5653e1dc 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -285,7 +285,6 @@ struct cfs_rq {
/* Required to track per-cpu representation of a task_group */
u32 tg_runnable_contrib;
unsigned long tg_load_contrib;
-#endif /* CONFIG_FAIR_GROUP_SCHED */
/*
* h_load = weight * f(tg)
@@ -294,6 +293,9 @@ struct cfs_rq {
* this group.
*/
unsigned long h_load;
+ u64 last_h_load_update;
+ struct sched_entity *h_load_next;
+#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */
#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -429,9 +431,6 @@ struct rq {
#ifdef CONFIG_FAIR_GROUP_SCHED
/* list of leaf cfs_rq on this cpu: */
struct list_head leaf_cfs_rq_list;
-#ifdef CONFIG_SMP
- unsigned long h_load_throttle;
-#endif /* CONFIG_SMP */
#endif /* CONFIG_FAIR_GROUP_SCHED */
#ifdef CONFIG_RT_GROUP_SCHED
@@ -595,6 +594,7 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
}
DECLARE_PER_CPU(struct sched_domain *, sd_llc);
+DECLARE_PER_CPU(int, sd_llc_size);
DECLARE_PER_CPU(int, sd_llc_id);
struct sched_group_power {
@@ -665,9 +665,9 @@ extern int group_balance_cpu(struct sched_group *sg);
/*
* Return the group to which this tasks belongs.
*
- * We cannot use task_subsys_state() and friends because the cgroup
- * subsystem changes that value before the cgroup_subsys::attach() method
- * is called, therefore we cannot pin it and might observe the wrong value.
+ * We cannot use task_css() and friends because the cgroup subsystem
+ * changes that value before the cgroup_subsys::attach() method is called,
+ * therefore we cannot pin it and might observe the wrong value.
*
* The same is true for autogroup's p->signal->autogroup->tg, the autogroup
* core changes this before calling sched_move_task().
diff --git a/kernel/smp.c b/kernel/smp.c
index fe9f773d711..449b707fc20 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -186,25 +186,13 @@ void generic_smp_call_function_single_interrupt(void)
while (!list_empty(&list)) {
struct call_single_data *csd;
- unsigned int csd_flags;
csd = list_entry(list.next, struct call_single_data, list);
list_del(&csd->list);
- /*
- * 'csd' can be invalid after this call if flags == 0
- * (when called through generic_exec_single()),
- * so save them away before making the call:
- */
- csd_flags = csd->flags;
-
csd->func(csd->info);
- /*
- * Unlocked CSDs are valid through generic_exec_single():
- */
- if (csd_flags & CSD_FLAG_LOCK)
- csd_unlock(csd);
+ csd_unlock(csd);
}
}
@@ -278,8 +266,6 @@ EXPORT_SYMBOL(smp_call_function_single);
* @wait: If true, wait until function has completed.
*
* Returns 0 on success, else a negative status code (if no cpus were online).
- * Note that @wait will be implicitly turned on in case of allocation failures,
- * since we fall back to on-stack allocation.
*
* Selection preference:
* 1) current cpu if in @mask
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ac09d98490a..07f6fc468e1 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2346,7 +2346,11 @@ static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp,
int write, void *data)
{
if (write) {
- *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+ unsigned long jif = msecs_to_jiffies(*negp ? -*lvalp : *lvalp);
+
+ if (jif > INT_MAX)
+ return 1;
+ *valp = (int)jif;
} else {
int val = *valp;
unsigned long lval;
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 70f27e89012..2b62fe86f9e 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -105,7 +105,6 @@ config NO_HZ_FULL
select RCU_USER_QS
select RCU_NOCB_CPU
select VIRT_CPU_ACCOUNTING_GEN
- select CONTEXT_TRACKING_FORCE
select IRQ_WORK
help
Adaptively try to shutdown the tick whenever possible, even when
@@ -134,6 +133,56 @@ config NO_HZ_FULL_ALL
Note the boot CPU will still be kept outside the range to
handle the timekeeping duty.
+config NO_HZ_FULL_SYSIDLE
+ bool "Detect full-system idle state for full dynticks system"
+ depends on NO_HZ_FULL
+ default n
+ help
+ At least one CPU must keep the scheduling-clock tick running for
+ timekeeping purposes whenever there is a non-idle CPU, where
+ "non-idle" also includes dynticks CPUs as long as they are
+ running non-idle tasks. Because the underlying adaptive-tick
+ support cannot distinguish between all CPUs being idle and
+ all CPUs each running a single task in dynticks mode, the
+ underlying support simply ensures that there is always a CPU
+ handling the scheduling-clock tick, whether or not all CPUs
+ are idle. This Kconfig option enables scalable detection of
+ the all-CPUs-idle state, thus allowing the scheduling-clock
+ tick to be disabled when all CPUs are idle. Note that scalable
+ detection of the all-CPUs-idle state means that larger systems
+ will be slower to declare the all-CPUs-idle state.
+
+ Say Y if you would like to help debug all-CPUs-idle detection.
+
+ Say N if you are unsure.
+
+config NO_HZ_FULL_SYSIDLE_SMALL
+ int "Number of CPUs above which large-system approach is used"
+ depends on NO_HZ_FULL_SYSIDLE
+ range 1 NR_CPUS
+ default 8
+ help
+ The full-system idle detection mechanism takes a lazy approach
+ on large systems, as is required to attain decent scalability.
+ However, on smaller systems, scalability is not anywhere near as
+ large a concern as is energy efficiency. The sysidle subsystem
+ therefore uses a fast but non-scalable algorithm for small
+ systems and a lazier but scalable algorithm for large systems.
+ This Kconfig parameter defines the number of CPUs in the largest
+ system that will be considered to be "small".
+
+ The default value will be fine in most cases. Battery-powered
+ systems that (1) enable NO_HZ_FULL_SYSIDLE, (2) have larger
+ numbers of CPUs, and (3) are suffering from battery-lifetime
+ problems due to long sysidle latencies might wish to experiment
+ with larger values for this Kconfig parameter. On the other
+ hand, they might be even better served by disabling NO_HZ_FULL
+ entirely, given that NO_HZ_FULL is intended for HPC and
+ real-time workloads that at present do not tend to be run on
+ battery-powered systems.
+
+ Take the default if you are unsure.
+
config NO_HZ
bool "Old Idle dynticks config"
depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f0..0b479a6a22b 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -121,7 +121,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
BUG_ON(bits > 32);
WARN_ON(!irqs_disabled());
read_sched_clock = read;
- sched_clock_mask = (1 << bits) - 1;
+ sched_clock_mask = (1ULL << bits) - 1;
cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index e80183f4a6c..3612fc77f83 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -23,6 +23,7 @@
#include <linux/irq_work.h>
#include <linux/posix-timers.h>
#include <linux/perf_event.h>
+#include <linux/context_tracking.h>
#include <asm/irq_regs.h>
@@ -148,8 +149,8 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
}
#ifdef CONFIG_NO_HZ_FULL
-static cpumask_var_t nohz_full_mask;
-bool have_nohz_full_mask;
+cpumask_var_t tick_nohz_full_mask;
+bool tick_nohz_full_running;
static bool can_stop_full_tick(void)
{
@@ -182,7 +183,8 @@ static bool can_stop_full_tick(void)
* Don't allow the user to think they can get
* full NO_HZ with this machine.
*/
- WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
+ WARN_ONCE(tick_nohz_full_running,
+ "NO_HZ FULL will not work with unstable sched clock");
return false;
}
#endif
@@ -196,7 +198,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
* Re-evaluate the need for the tick on the current CPU
* and restart it if necessary.
*/
-void tick_nohz_full_check(void)
+void __tick_nohz_full_check(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
@@ -210,7 +212,7 @@ void tick_nohz_full_check(void)
static void nohz_full_kick_work_func(struct irq_work *work)
{
- tick_nohz_full_check();
+ __tick_nohz_full_check();
}
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
@@ -229,7 +231,7 @@ void tick_nohz_full_kick(void)
static void nohz_full_kick_ipi(void *info)
{
- tick_nohz_full_check();
+ __tick_nohz_full_check();
}
/*
@@ -238,12 +240,13 @@ static void nohz_full_kick_ipi(void *info)
*/
void tick_nohz_full_kick_all(void)
{
- if (!have_nohz_full_mask)
+ if (!tick_nohz_full_running)
return;
preempt_disable();
- smp_call_function_many(nohz_full_mask,
+ smp_call_function_many(tick_nohz_full_mask,
nohz_full_kick_ipi, NULL, false);
+ tick_nohz_full_kick();
preempt_enable();
}
@@ -252,7 +255,7 @@ void tick_nohz_full_kick_all(void)
* It might need the tick due to per task/process properties:
* perf events, posix cpu timers, ...
*/
-void tick_nohz_task_switch(struct task_struct *tsk)
+void __tick_nohz_task_switch(struct task_struct *tsk)
{
unsigned long flags;
@@ -268,31 +271,23 @@ out:
local_irq_restore(flags);
}
-int tick_nohz_full_cpu(int cpu)
-{
- if (!have_nohz_full_mask)
- return 0;
-
- return cpumask_test_cpu(cpu, nohz_full_mask);
-}
-
/* Parse the boot-time nohz CPU list from the kernel parameters. */
static int __init tick_nohz_full_setup(char *str)
{
int cpu;
- alloc_bootmem_cpumask_var(&nohz_full_mask);
- if (cpulist_parse(str, nohz_full_mask) < 0) {
+ alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+ if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
return 1;
}
cpu = smp_processor_id();
- if (cpumask_test_cpu(cpu, nohz_full_mask)) {
+ if (cpumask_test_cpu(cpu, tick_nohz_full_mask)) {
pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
- cpumask_clear_cpu(cpu, nohz_full_mask);
+ cpumask_clear_cpu(cpu, tick_nohz_full_mask);
}
- have_nohz_full_mask = true;
+ tick_nohz_full_running = true;
return 1;
}
@@ -310,7 +305,7 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
* If we handle the timekeeping duty for full dynticks CPUs,
* we can't safely shutdown that CPU.
*/
- if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
+ if (tick_nohz_full_running && tick_do_timer_cpu == cpu)
return NOTIFY_BAD;
break;
}
@@ -329,14 +324,14 @@ static int tick_nohz_init_all(void)
int err = -1;
#ifdef CONFIG_NO_HZ_FULL_ALL
- if (!alloc_cpumask_var(&nohz_full_mask, GFP_KERNEL)) {
+ if (!alloc_cpumask_var(&tick_nohz_full_mask, GFP_KERNEL)) {
pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
return err;
}
err = 0;
- cpumask_setall(nohz_full_mask);
- cpumask_clear_cpu(smp_processor_id(), nohz_full_mask);
- have_nohz_full_mask = true;
+ cpumask_setall(tick_nohz_full_mask);
+ cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
+ tick_nohz_full_running = true;
#endif
return err;
}
@@ -345,17 +340,18 @@ void __init tick_nohz_init(void)
{
int cpu;
- if (!have_nohz_full_mask) {
+ if (!tick_nohz_full_running) {
if (tick_nohz_init_all() < 0)
return;
}
+ for_each_cpu(cpu, tick_nohz_full_mask)
+ context_tracking_cpu_set(cpu);
+
cpu_notifier(tick_nohz_cpu_down_callback, 0);
- cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
+ cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask);
pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
}
-#else
-#define have_nohz_full_mask (0)
#endif
/*
@@ -733,7 +729,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
return false;
}
- if (have_nohz_full_mask) {
+ if (tick_nohz_full_enabled()) {
/*
* Keep the tick alive to guarantee timekeeping progression
* if there are full dynticks CPUs around
@@ -827,13 +823,10 @@ void tick_nohz_irq_exit(void)
{
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
- if (ts->inidle) {
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
+ if (ts->inidle)
__tick_nohz_idle_enter(ts);
- } else {
+ else
tick_nohz_full_stop_tick(ts);
- }
}
/**
@@ -931,8 +924,6 @@ void tick_nohz_idle_exit(void)
ts->inidle = 0;
- /* Cancel the timer because CPU already waken up from the C-states*/
- menu_hrtimer_cancel();
if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 3bdf2832301..61ed862cdd3 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -265,10 +265,9 @@ static inline void timer_list_header(struct seq_file *m, u64 now)
static int timer_list_show(struct seq_file *m, void *v)
{
struct timer_list_iter *iter = v;
- u64 now = ktime_to_ns(ktime_get());
if (iter->cpu == -1 && !iter->second_pass)
- timer_list_header(m, now);
+ timer_list_header(m, iter->now);
else if (!iter->second_pass)
print_cpu(m, iter->cpu, iter->now);
#ifdef CONFIG_GENERIC_CLOCKEVENTS
@@ -298,33 +297,41 @@ void sysrq_timer_list_show(void)
return;
}
-static void *timer_list_start(struct seq_file *file, loff_t *offset)
+static void *move_iter(struct timer_list_iter *iter, loff_t offset)
{
- struct timer_list_iter *iter = file->private;
-
- if (!*offset) {
- iter->cpu = -1;
- iter->now = ktime_to_ns(ktime_get());
- } else if (iter->cpu >= nr_cpu_ids) {
+ for (; offset; offset--) {
+ iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
+ if (iter->cpu >= nr_cpu_ids) {
#ifdef CONFIG_GENERIC_CLOCKEVENTS
- if (!iter->second_pass) {
- iter->cpu = -1;
- iter->second_pass = true;
- } else
- return NULL;
+ if (!iter->second_pass) {
+ iter->cpu = -1;
+ iter->second_pass = true;
+ } else
+ return NULL;
#else
- return NULL;
+ return NULL;
#endif
+ }
}
return iter;
}
+static void *timer_list_start(struct seq_file *file, loff_t *offset)
+{
+ struct timer_list_iter *iter = file->private;
+
+ if (!*offset)
+ iter->now = ktime_to_ns(ktime_get());
+ iter->cpu = -1;
+ iter->second_pass = false;
+ return move_iter(iter, *offset);
+}
+
static void *timer_list_next(struct seq_file *file, void *v, loff_t *offset)
{
struct timer_list_iter *iter = file->private;
- iter->cpu = cpumask_next(iter->cpu, cpu_online_mask);
++*offset;
- return timer_list_start(file, offset);
+ return move_iter(iter, 1);
}
static void timer_list_stop(struct seq_file *seq, void *v)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 67708f46baa..a6d098c6df3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1441,12 +1441,22 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
* the hashes are freed with call_rcu_sched().
*/
static int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
struct ftrace_hash *filter_hash;
struct ftrace_hash *notrace_hash;
int ret;
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ /*
+ * There's a small race when adding ops that the ftrace handler
+ * that wants regs, may be called without them. We can not
+ * allow that handler to be called if regs is NULL.
+ */
+ if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
+ return 0;
+#endif
+
filter_hash = rcu_dereference_raw_notrace(ops->filter_hash);
notrace_hash = rcu_dereference_raw_notrace(ops->notrace_hash);
@@ -2159,12 +2169,57 @@ static cycle_t ftrace_update_time;
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int ops_traces_mod(struct ftrace_ops *ops)
+static inline int ops_traces_mod(struct ftrace_ops *ops)
{
- struct ftrace_hash *hash;
+ /*
+ * Filter_hash being empty will default to trace module.
+ * But notrace hash requires a test of individual module functions.
+ */
+ return ftrace_hash_empty(ops->filter_hash) &&
+ ftrace_hash_empty(ops->notrace_hash);
+}
+
+/*
+ * Check if the current ops references the record.
+ *
+ * If the ops traces all functions, then it was already accounted for.
+ * If the ops does not trace the current record function, skip it.
+ * If the ops ignores the function via notrace filter, skip it.
+ */
+static inline bool
+ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
+{
+ /* If ops isn't enabled, ignore it */
+ if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
+ return 0;
+
+ /* If ops traces all mods, we already accounted for it */
+ if (ops_traces_mod(ops))
+ return 0;
+
+ /* The function must be in the filter */
+ if (!ftrace_hash_empty(ops->filter_hash) &&
+ !ftrace_lookup_ip(ops->filter_hash, rec->ip))
+ return 0;
+
+ /* If in notrace hash, we ignore it too */
+ if (ftrace_lookup_ip(ops->notrace_hash, rec->ip))
+ return 0;
- hash = ops->filter_hash;
- return ftrace_hash_empty(hash);
+ return 1;
+}
+
+static int referenced_filters(struct dyn_ftrace *rec)
+{
+ struct ftrace_ops *ops;
+ int cnt = 0;
+
+ for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
+ if (ops_references_rec(ops, rec))
+ cnt++;
+ }
+
+ return cnt;
}
static int ftrace_update_code(struct module *mod)
@@ -2173,6 +2228,7 @@ static int ftrace_update_code(struct module *mod)
struct dyn_ftrace *p;
cycle_t start, stop;
unsigned long ref = 0;
+ bool test = false;
int i;
/*
@@ -2186,9 +2242,12 @@ static int ftrace_update_code(struct module *mod)
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next) {
- if (ops->flags & FTRACE_OPS_FL_ENABLED &&
- ops_traces_mod(ops))
- ref++;
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
+ if (ops_traces_mod(ops))
+ ref++;
+ else
+ test = true;
+ }
}
}
@@ -2198,12 +2257,16 @@ static int ftrace_update_code(struct module *mod)
for (pg = ftrace_new_pgs; pg; pg = pg->next) {
for (i = 0; i < pg->index; i++) {
+ int cnt = ref;
+
/* If something went wrong, bail without enabling anything */
if (unlikely(ftrace_disabled))
return -1;
p = &pg->records[i];
- p->flags = ref;
+ if (test)
+ cnt += referenced_filters(p);
+ p->flags = cnt;
/*
* Do the initial record conversion from mcount jump
@@ -2223,7 +2286,7 @@ static int ftrace_update_code(struct module *mod)
* conversion puts the module to the correct state, thus
* passing the ftrace_make_call check.
*/
- if (ftrace_start_up && ref) {
+ if (ftrace_start_up && cnt) {
int failed = __ftrace_replace_code(p, 1);
if (failed)
ftrace_bug(failed, p->ip);
@@ -3374,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
return add_hash_entry(hash, ip);
}
+static void ftrace_ops_update_code(struct ftrace_ops *ops)
+{
+ if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+}
+
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
@@ -3416,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
- if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(ops);
mutex_unlock(&ftrace_lock);
@@ -3645,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
- if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED)
- && ftrace_enabled)
- ftrace_run_update_code(FTRACE_UPDATE_CALLS);
+ if (!ret)
+ ftrace_ops_update_code(iter->ops);
mutex_unlock(&ftrace_lock);
}
@@ -4218,7 +4285,7 @@ static inline void ftrace_startup_enable(int command) { }
# define ftrace_shutdown_sysctl() do { } while (0)
static inline int
-ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
+ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{
return 1;
}
@@ -4241,7 +4308,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
do_for_each_ftrace_op(op, ftrace_control_list) {
if (!(op->flags & FTRACE_OPS_FL_STUB) &&
!ftrace_function_local_disabled(op) &&
- ftrace_ops_test(op, ip))
+ ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
trace_recursion_clear(TRACE_CONTROL_BIT);
@@ -4274,7 +4341,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
*/
preempt_disable_notrace();
do_for_each_ftrace_op(op, ftrace_ops_list) {
- if (ftrace_ops_test(op, ip))
+ if (ftrace_ops_test(op, ip, regs))
op->func(ip, parent_ip, op, regs);
} while_for_each_ftrace_op(op);
preempt_enable_notrace();
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3f2477713ac..496f94d5769 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(filter_current_check_discard);
-cycle_t ftrace_now(int cpu)
+cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
{
u64 ts;
/* Early boot up does not have a buffer yet */
- if (!global_trace.trace_buffer.buffer)
+ if (!buf->buffer)
return trace_clock_local();
- ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
- ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
+ ts = ring_buffer_time_stamp(buf->buffer, cpu);
+ ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
return ts;
}
+cycle_t ftrace_now(int cpu)
+{
+ return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
+}
+
/**
* tracing_is_enabled - Show if global_trace has been disabled
*
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
/* Make sure all commits have finished */
synchronize_sched();
- buf->time_start = ftrace_now(buf->cpu);
+ buf->time_start = buffer_ftrace_now(buf, buf->cpu);
for_each_online_cpu(cpu)
ring_buffer_reset_cpu(buffer, cpu);
@@ -1219,23 +1224,17 @@ void tracing_reset_online_cpus(struct trace_buffer *buf)
ring_buffer_record_enable(buffer);
}
-void tracing_reset_current(int cpu)
-{
- tracing_reset(&global_trace.trace_buffer, cpu);
-}
-
+/* Must have trace_types_lock held */
void tracing_reset_all_online_cpus(void)
{
struct trace_array *tr;
- mutex_lock(&trace_types_lock);
list_for_each_entry(tr, &ftrace_trace_arrays, list) {
tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
tracing_reset_online_cpus(&tr->max_buffer);
#endif
}
- mutex_unlock(&trace_types_lock);
}
#define SAVED_CMDLINES 128
@@ -2843,6 +2842,17 @@ static int s_show(struct seq_file *m, void *v)
return 0;
}
+/*
+ * Should be used after trace_array_get(), trace_types_lock
+ * ensures that i_cdev was already initialized.
+ */
+static inline int tracing_get_cpu(struct inode *inode)
+{
+ if (inode->i_cdev) /* See trace_create_cpu_file() */
+ return (long)inode->i_cdev - 1;
+ return RING_BUFFER_ALL_CPUS;
+}
+
static const struct seq_operations tracer_seq_ops = {
.start = s_start,
.next = s_next,
@@ -2851,9 +2861,9 @@ static const struct seq_operations tracer_seq_ops = {
};
static struct trace_iterator *
-__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
- struct inode *inode, struct file *file, bool snapshot)
+__tracing_open(struct inode *inode, struct file *file, bool snapshot)
{
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int cpu;
@@ -2894,8 +2904,8 @@ __tracing_open(struct trace_array *tr, struct trace_cpu *tc,
iter->trace_buffer = &tr->trace_buffer;
iter->snapshot = snapshot;
iter->pos = -1;
+ iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
- iter->cpu_file = tc->cpu;
/* Notify the tracer early; before we stop tracing. */
if (iter->trace && iter->trace->open)
@@ -2971,44 +2981,22 @@ static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
filp->private_data = inode->i_private;
return 0;
-
-}
-
-static int tracing_open_generic_tc(struct inode *inode, struct file *filp)
-{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
-
- if (tracing_disabled)
- return -ENODEV;
-
- if (trace_array_get(tr) < 0)
- return -ENODEV;
-
- filp->private_data = inode->i_private;
-
- return 0;
-
}
static int tracing_release(struct inode *inode, struct file *file)
{
+ struct trace_array *tr = inode->i_private;
struct seq_file *m = file->private_data;
struct trace_iterator *iter;
- struct trace_array *tr;
int cpu;
- /* Writes do not use seq_file, need to grab tr from inode */
if (!(file->f_mode & FMODE_READ)) {
- struct trace_cpu *tc = inode->i_private;
-
- trace_array_put(tc->tr);
+ trace_array_put(tr);
return 0;
}
+ /* Writes do not use seq_file */
iter = m->private;
- tr = iter->tr;
-
mutex_lock(&trace_types_lock);
for_each_tracing_cpu(cpu) {
@@ -3044,15 +3032,6 @@ static int tracing_release_generic_tr(struct inode *inode, struct file *file)
return 0;
}
-static int tracing_release_generic_tc(struct inode *inode, struct file *file)
-{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
-
- trace_array_put(tr);
- return 0;
-}
-
static int tracing_single_release_tr(struct inode *inode, struct file *file)
{
struct trace_array *tr = inode->i_private;
@@ -3064,8 +3043,7 @@ static int tracing_single_release_tr(struct inode *inode, struct file *file)
static int tracing_open(struct inode *inode, struct file *file)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int ret = 0;
@@ -3073,16 +3051,17 @@ static int tracing_open(struct inode *inode, struct file *file)
return -ENODEV;
/* If this file was open for write, then erase contents */
- if ((file->f_mode & FMODE_WRITE) &&
- (file->f_flags & O_TRUNC)) {
- if (tc->cpu == RING_BUFFER_ALL_CPUS)
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ int cpu = tracing_get_cpu(inode);
+
+ if (cpu == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->trace_buffer);
else
- tracing_reset(&tr->trace_buffer, tc->cpu);
+ tracing_reset(&tr->trace_buffer, cpu);
}
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(tr, tc, inode, file, false);
+ iter = __tracing_open(inode, file, false);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3948,8 +3927,7 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
static int tracing_open_pipe(struct inode *inode, struct file *filp)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
int ret = 0;
@@ -3995,9 +3973,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
if (trace_clocks[tr->clock_id].in_ns)
iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
- iter->cpu_file = tc->cpu;
- iter->tr = tc->tr;
- iter->trace_buffer = &tc->tr->trace_buffer;
+ iter->tr = tr;
+ iter->trace_buffer = &tr->trace_buffer;
+ iter->cpu_file = tracing_get_cpu(inode);
mutex_init(&iter->mutex);
filp->private_data = iter;
@@ -4020,8 +3998,7 @@ fail:
static int tracing_release_pipe(struct inode *inode, struct file *file)
{
struct trace_iterator *iter = file->private_data;
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
mutex_lock(&trace_types_lock);
@@ -4174,6 +4151,7 @@ waitagain:
memset(&iter->seq, 0,
sizeof(struct trace_iterator) -
offsetof(struct trace_iterator, seq));
+ cpumask_clear(iter->started);
iter->pos = -1;
trace_event_read_lock();
@@ -4374,15 +4352,16 @@ static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
- struct trace_array *tr = tc->tr;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
+ int cpu = tracing_get_cpu(inode);
char buf[64];
int r = 0;
ssize_t ret;
mutex_lock(&trace_types_lock);
- if (tc->cpu == RING_BUFFER_ALL_CPUS) {
+ if (cpu == RING_BUFFER_ALL_CPUS) {
int cpu, buf_size_same;
unsigned long size;
@@ -4409,7 +4388,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
} else
r = sprintf(buf, "X\n");
} else
- r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
+ r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
mutex_unlock(&trace_types_lock);
@@ -4421,7 +4400,8 @@ static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
unsigned long val;
int ret;
@@ -4435,8 +4415,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
/* value is in KB */
val <<= 10;
-
- ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
+ ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
if (ret < 0)
return ret;
@@ -4490,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
/* disable tracing ? */
if (trace_flags & TRACE_ITER_STOP_ON_FREE)
- tracing_off();
+ tracer_tracing_off(tr);
/* resize the ring buffer to 0 */
tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
@@ -4655,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
* New clock may not be consistent with the previous clock.
* Reset the buffer so that it doesn't have incomparable timestamps.
*/
- tracing_reset_online_cpus(&global_trace.trace_buffer);
+ tracing_reset_online_cpus(&tr->trace_buffer);
#ifdef CONFIG_TRACER_MAX_TRACE
if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
- tracing_reset_online_cpus(&global_trace.max_buffer);
+ tracing_reset_online_cpus(&tr->max_buffer);
#endif
mutex_unlock(&trace_types_lock);
@@ -4697,8 +4676,7 @@ struct ftrace_buffer_info {
#ifdef CONFIG_TRACER_SNAPSHOT
static int tracing_snapshot_open(struct inode *inode, struct file *file)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct trace_iterator *iter;
struct seq_file *m;
int ret = 0;
@@ -4707,7 +4685,7 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
return -ENODEV;
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(tr, tc, inode, file, true);
+ iter = __tracing_open(inode, file, true);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
} else {
@@ -4724,8 +4702,8 @@ static int tracing_snapshot_open(struct inode *inode, struct file *file)
ret = 0;
iter->tr = tr;
- iter->trace_buffer = &tc->tr->max_buffer;
- iter->cpu_file = tc->cpu;
+ iter->trace_buffer = &tr->max_buffer;
+ iter->cpu_file = tracing_get_cpu(inode);
m->private = iter;
file->private_data = m;
}
@@ -4884,11 +4862,11 @@ static const struct file_operations tracing_pipe_fops = {
};
static const struct file_operations tracing_entries_fops = {
- .open = tracing_open_generic_tc,
+ .open = tracing_open_generic_tr,
.read = tracing_entries_read,
.write = tracing_entries_write,
.llseek = generic_file_llseek,
- .release = tracing_release_generic_tc,
+ .release = tracing_release_generic_tr,
};
static const struct file_operations tracing_total_entries_fops = {
@@ -4940,8 +4918,7 @@ static const struct file_operations snapshot_raw_fops = {
static int tracing_buffers_open(struct inode *inode, struct file *filp)
{
- struct trace_cpu *tc = inode->i_private;
- struct trace_array *tr = tc->tr;
+ struct trace_array *tr = inode->i_private;
struct ftrace_buffer_info *info;
int ret;
@@ -4960,7 +4937,7 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
mutex_lock(&trace_types_lock);
info->iter.tr = tr;
- info->iter.cpu_file = tc->cpu;
+ info->iter.cpu_file = tracing_get_cpu(inode);
info->iter.trace = tr->current_trace;
info->iter.trace_buffer = &tr->trace_buffer;
info->spare = NULL;
@@ -5277,14 +5254,14 @@ static ssize_t
tracing_stats_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos)
{
- struct trace_cpu *tc = filp->private_data;
- struct trace_array *tr = tc->tr;
+ struct inode *inode = file_inode(filp);
+ struct trace_array *tr = inode->i_private;
struct trace_buffer *trace_buf = &tr->trace_buffer;
+ int cpu = tracing_get_cpu(inode);
struct trace_seq *s;
unsigned long cnt;
unsigned long long t;
unsigned long usec_rem;
- int cpu = tc->cpu;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
@@ -5337,10 +5314,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
}
static const struct file_operations tracing_stats_fops = {
- .open = tracing_open_generic_tc,
+ .open = tracing_open_generic_tr,
.read = tracing_stats_read,
.llseek = generic_file_llseek,
- .release = tracing_release_generic_tc,
+ .release = tracing_release_generic_tr,
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -5529,10 +5506,20 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
return tr->percpu_dir;
}
+static struct dentry *
+trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
+ void *data, long cpu, const struct file_operations *fops)
+{
+ struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
+
+ if (ret) /* See tracing_get_cpu() */
+ ret->d_inode->i_cdev = (void *)(cpu + 1);
+ return ret;
+}
+
static void
tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
{
- struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
struct dentry *d_cpu;
char cpu_dir[30]; /* 30 characters should be more than enough */
@@ -5548,28 +5535,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
}
/* per cpu trace_pipe */
- trace_create_file("trace_pipe", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_pipe_fops);
+ trace_create_cpu_file("trace_pipe", 0444, d_cpu,
+ tr, cpu, &tracing_pipe_fops);
/* per cpu trace */
- trace_create_file("trace", 0644, d_cpu,
- (void *)&data->trace_cpu, &tracing_fops);
+ trace_create_cpu_file("trace", 0644, d_cpu,
+ tr, cpu, &tracing_fops);
- trace_create_file("trace_pipe_raw", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_buffers_fops);
+ trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
+ tr, cpu, &tracing_buffers_fops);
- trace_create_file("stats", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_stats_fops);
+ trace_create_cpu_file("stats", 0444, d_cpu,
+ tr, cpu, &tracing_stats_fops);
- trace_create_file("buffer_size_kb", 0444, d_cpu,
- (void *)&data->trace_cpu, &tracing_entries_fops);
+ trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
+ tr, cpu, &tracing_entries_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
- trace_create_file("snapshot", 0644, d_cpu,
- (void *)&data->trace_cpu, &snapshot_fops);
+ trace_create_cpu_file("snapshot", 0644, d_cpu,
+ tr, cpu, &snapshot_fops);
- trace_create_file("snapshot_raw", 0444, d_cpu,
- (void *)&data->trace_cpu, &snapshot_raw_fops);
+ trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
+ tr, cpu, &snapshot_raw_fops);
#endif
}
@@ -5878,17 +5865,6 @@ struct dentry *trace_instance_dir;
static void
init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
-static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
-{
- int cpu;
-
- for_each_tracing_cpu(cpu) {
- memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
- per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
- per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
- }
-}
-
static int
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
{
@@ -5906,8 +5882,6 @@ allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size
return -ENOMEM;
}
- init_trace_buffers(tr, buf);
-
/* Allocate the first page for all buffers */
set_buffer_entries(&tr->trace_buffer,
ring_buffer_size(tr->trace_buffer.buffer, 0));
@@ -5974,10 +5948,6 @@ static int new_instance_create(const char *name)
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
- /* Holder for file callbacks */
- tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
- tr->trace_cpu.tr = tr;
-
tr->dir = debugfs_create_dir(name, trace_instance_dir);
if (!tr->dir)
goto out_free_tr;
@@ -6132,13 +6102,13 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
tr, &tracing_iter_fops);
trace_create_file("trace", 0644, d_tracer,
- (void *)&tr->trace_cpu, &tracing_fops);
+ tr, &tracing_fops);
trace_create_file("trace_pipe", 0444, d_tracer,
- (void *)&tr->trace_cpu, &tracing_pipe_fops);
+ tr, &tracing_pipe_fops);
trace_create_file("buffer_size_kb", 0644, d_tracer,
- (void *)&tr->trace_cpu, &tracing_entries_fops);
+ tr, &tracing_entries_fops);
trace_create_file("buffer_total_size_kb", 0444, d_tracer,
tr, &tracing_total_entries_fops);
@@ -6153,11 +6123,11 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
&trace_clock_fops);
trace_create_file("tracing_on", 0644, d_tracer,
- tr, &rb_simple_fops);
+ tr, &rb_simple_fops);
#ifdef CONFIG_TRACER_SNAPSHOT
trace_create_file("snapshot", 0644, d_tracer,
- (void *)&tr->trace_cpu, &snapshot_fops);
+ tr, &snapshot_fops);
#endif
for_each_tracing_cpu(cpu)
@@ -6451,10 +6421,6 @@ __init static int tracer_alloc_buffers(void)
global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
- /* Holder for file callbacks */
- global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
- global_trace.trace_cpu.tr = &global_trace;
-
INIT_LIST_HEAD(&global_trace.systems);
INIT_LIST_HEAD(&global_trace.events);
list_add(&global_trace.list, &ftrace_trace_arrays);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index e7d643b8a90..fe39acd4c1a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -130,19 +130,12 @@ enum trace_flag_type {
struct trace_array;
-struct trace_cpu {
- struct trace_array *tr;
- struct dentry *dir;
- int cpu;
-};
-
/*
* The CPU trace array - it consists of thousands of trace entries
* plus some other descriptor data: (for example which task started
* the trace, etc.)
*/
struct trace_array_cpu {
- struct trace_cpu trace_cpu;
atomic_t disabled;
void *buffer_page; /* ring buffer spare */
@@ -196,7 +189,6 @@ struct trace_array {
bool allocated_snapshot;
#endif
int buffer_disabled;
- struct trace_cpu trace_cpu; /* place holder */
#ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter;
int sys_refcount_exit;
@@ -1030,6 +1022,9 @@ extern struct list_head ftrace_events;
extern const char *__start___trace_bprintk_fmt[];
extern const char *__stop___trace_bprintk_fmt[];
+extern const char *__start___tracepoint_str[];
+extern const char *__stop___tracepoint_str[];
+
void trace_printk_init_buffers(void);
void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 898f868833f..29a7ebcfb42 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir)
mutex_unlock(&event_mutex);
}
-/*
- * Open and update trace_array ref count.
- * Must have the current trace_array passed to it.
- */
-static int tracing_open_generic_file(struct inode *inode, struct file *filp)
+static void remove_subsystem(struct ftrace_subsystem_dir *dir)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
- int ret;
+ if (!dir)
+ return;
- if (trace_array_get(tr) < 0)
- return -ENODEV;
+ if (!--dir->nr_events) {
+ debugfs_remove_recursive(dir->entry);
+ list_del(&dir->list);
+ __put_system_dir(dir);
+ }
+}
- ret = tracing_open_generic(inode, filp);
- if (ret < 0)
- trace_array_put(tr);
- return ret;
+static void *event_file_data(struct file *filp)
+{
+ return ACCESS_ONCE(file_inode(filp)->i_private);
}
-static int tracing_release_generic_file(struct inode *inode, struct file *filp)
+static void remove_event_file_dir(struct ftrace_event_file *file)
{
- struct ftrace_event_file *file = inode->i_private;
- struct trace_array *tr = file->tr;
+ struct dentry *dir = file->dir;
+ struct dentry *child;
- trace_array_put(tr);
+ if (dir) {
+ spin_lock(&dir->d_lock); /* probably unneeded */
+ list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) {
+ if (child->d_inode) /* probably unneeded */
+ child->d_inode->i_private = NULL;
+ }
+ spin_unlock(&dir->d_lock);
- return 0;
+ debugfs_remove_recursive(dir);
+ }
+
+ list_del(&file->list);
+ remove_subsystem(file->system);
+ kmem_cache_free(file_cachep, file);
}
/*
@@ -679,15 +688,25 @@ static ssize_t
event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
+ unsigned long flags;
char buf[4] = "0";
- if (file->flags & FTRACE_EVENT_FL_ENABLED &&
- !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED))
+ mutex_lock(&event_mutex);
+ file = event_file_data(filp);
+ if (likely(file))
+ flags = file->flags;
+ mutex_unlock(&event_mutex);
+
+ if (!file)
+ return -ENODEV;
+
+ if (flags & FTRACE_EVENT_FL_ENABLED &&
+ !(flags & FTRACE_EVENT_FL_SOFT_DISABLED))
strcpy(buf, "1");
- if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
- file->flags & FTRACE_EVENT_FL_SOFT_MODE)
+ if (flags & FTRACE_EVENT_FL_SOFT_DISABLED ||
+ flags & FTRACE_EVENT_FL_SOFT_MODE)
strcat(buf, "*");
strcat(buf, "\n");
@@ -699,13 +718,10 @@ static ssize_t
event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_file *file = filp->private_data;
+ struct ftrace_event_file *file;
unsigned long val;
int ret;
- if (!file)
- return -EINVAL;
-
ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
if (ret)
return ret;
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
switch (val) {
case 0:
case 1:
+ ret = -ENODEV;
mutex_lock(&event_mutex);
- ret = ftrace_event_enable_disable(file, val);
+ file = event_file_data(filp);
+ if (likely(file))
+ ret = ftrace_event_enable_disable(file, val);
mutex_unlock(&event_mutex);
break;
@@ -825,7 +844,7 @@ enum {
static void *f_next(struct seq_file *m, void *v, loff_t *pos)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct list_head *common_head = &ftrace_common_fields;
struct list_head *head = trace_get_fields(call);
struct list_head *node = v;
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos)
static int f_show(struct seq_file *m, void *v)
{
- struct ftrace_event_call *call = m->private;
+ struct ftrace_event_call *call = event_file_data(m->private);
struct ftrace_event_field *field;
const char *array_descriptor;
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos)
void *p = (void *)FORMAT_HEADER;
loff_t l = 0;
+ /* ->stop() is called even if ->start() fails */
+ mutex_lock(&event_mutex);
+ if (!event_file_data(m->private))
+ return ERR_PTR(-ENODEV);
+
while (l < *pos && p)
p = f_next(m, p, &l);
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos)
static void f_stop(struct seq_file *m, void *p)
{
+ mutex_unlock(&event_mutex);
}
static const struct seq_operations trace_format_seq_ops = {
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = {
static int trace_format_open(struct inode *inode, struct file *file)
{
- struct ftrace_event_call *call = inode->i_private;
struct seq_file *m;
int ret;
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file)
return ret;
m = file->private_data;
- m->private = call;
+ m->private = file;
return 0;
}
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file)
static ssize_t
event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ int id = (long)event_file_data(filp);
char buf[32];
int len;
if (*ppos)
return 0;
- len = sprintf(buf, "%d\n", call->event.type);
+ if (unlikely(!id))
+ return -ENODEV;
+
+ len = sprintf(buf, "%d\n", id);
+
return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
}
@@ -961,21 +989,28 @@ static ssize_t
event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
struct trace_seq *s;
- int r;
+ int r = -ENODEV;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
+
if (!s)
return -ENOMEM;
trace_seq_init(s);
- print_event_filter(call, s);
- r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ print_event_filter(call, s);
+ mutex_unlock(&event_mutex);
+
+ if (call)
+ r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len);
kfree(s);
@@ -986,9 +1021,9 @@ static ssize_t
event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
loff_t *ppos)
{
- struct ftrace_event_call *call = filp->private_data;
+ struct ftrace_event_call *call;
char *buf;
- int err;
+ int err = -ENODEV;
if (cnt >= PAGE_SIZE)
return -EINVAL;
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
}
buf[cnt] = '\0';
- err = apply_event_filter(call, buf);
+ mutex_lock(&event_mutex);
+ call = event_file_data(filp);
+ if (call)
+ err = apply_event_filter(call, buf);
+ mutex_unlock(&event_mutex);
+
free_page((unsigned long) buf);
if (err < 0)
return err;
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = {
};
static const struct file_operations ftrace_enable_fops = {
- .open = tracing_open_generic_file,
+ .open = tracing_open_generic,
.read = event_enable_read,
.write = event_enable_write,
- .release = tracing_release_generic_file,
.llseek = default_llseek,
};
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = {
};
static const struct file_operations ftrace_event_id_fops = {
- .open = tracing_open_generic,
.read = event_id_read,
.llseek = default_llseek,
};
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent,
#ifdef CONFIG_PERF_EVENTS
if (call->event.type && call->class->reg)
- trace_create_file("id", 0444, file->dir, call,
- id);
+ trace_create_file("id", 0444, file->dir,
+ (void *)(long)call->event.type, id);
#endif
/*
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent,
return 0;
}
-static void remove_subsystem(struct ftrace_subsystem_dir *dir)
-{
- if (!dir)
- return;
-
- if (!--dir->nr_events) {
- debugfs_remove_recursive(dir->entry);
- list_del(&dir->list);
- __put_system_dir(dir);
- }
-}
-
static void remove_event_from_tracers(struct ftrace_event_call *call)
{
struct ftrace_event_file *file;
struct trace_array *tr;
do_for_each_event_file_safe(tr, file) {
-
if (file->event_call != call)
continue;
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
-
+ remove_event_file_dir(file);
/*
* The do_for_each_event_file_safe() is
* a double loop. After finding the call for this
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
destroy_preds(call);
}
+static int probe_remove_event_call(struct ftrace_event_call *call)
+{
+ struct trace_array *tr;
+ struct ftrace_event_file *file;
+
+#ifdef CONFIG_PERF_EVENTS
+ if (call->perf_refcount)
+ return -EBUSY;
+#endif
+ do_for_each_event_file(tr, file) {
+ if (file->event_call != call)
+ continue;
+ /*
+ * We can't rely on ftrace_event_enable_disable(enable => 0)
+ * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress
+ * TRACE_REG_UNREGISTER.
+ */
+ if (file->flags & FTRACE_EVENT_FL_ENABLED)
+ return -EBUSY;
+ /*
+ * The do_for_each_event_file_safe() is
+ * a double loop. After finding the call for this
+ * trace_array, we use break to jump to the next
+ * trace_array.
+ */
+ break;
+ } while_for_each_event_file();
+
+ __trace_remove_event_call(call);
+
+ return 0;
+}
+
/* Remove an event_call */
-void trace_remove_event_call(struct ftrace_event_call *call)
+int trace_remove_event_call(struct ftrace_event_call *call)
{
+ int ret;
+
mutex_lock(&trace_types_lock);
mutex_lock(&event_mutex);
down_write(&trace_event_sem);
- __trace_remove_event_call(call);
+ ret = probe_remove_event_call(call);
up_write(&trace_event_sem);
mutex_unlock(&event_mutex);
mutex_unlock(&trace_types_lock);
+
+ return ret;
}
#define for_each_event(event, start, end) \
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr)
{
struct ftrace_event_file *file, *next;
- list_for_each_entry_safe(file, next, &tr->events, list) {
- list_del(&file->list);
- debugfs_remove_recursive(file->dir);
- remove_subsystem(file->system);
- kmem_cache_free(file_cachep, file);
- }
+ list_for_each_entry_safe(file, next, &tr->events, list)
+ remove_event_file_dir(file);
}
static void
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 0c7b75a8acc..97daa8cf958 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps,
free_page((unsigned long) buf);
}
+/* caller must hold event_mutex */
void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s)
{
- struct event_filter *filter;
+ struct event_filter *filter = call->filter;
- mutex_lock(&event_mutex);
- filter = call->filter;
if (filter && filter->filter_string)
trace_seq_printf(s, "%s\n", filter->filter_string);
else
trace_seq_puts(s, "none\n");
- mutex_unlock(&event_mutex);
}
void print_subsystem_event_filter(struct event_subsystem *system,
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system,
return err;
}
+/* caller must hold event_mutex */
int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
{
struct event_filter *filter;
- int err = 0;
-
- mutex_lock(&event_mutex);
+ int err;
if (!strcmp(strstrip(filter_string), "0")) {
filter_disable(call);
filter = call->filter;
if (!filter)
- goto out_unlock;
+ return 0;
RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
- goto out_unlock;
+ return 0;
}
err = create_filter(call, filter_string, true, &filter);
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
__free_filter(tmp);
}
}
-out_unlock:
- mutex_unlock(&event_mutex);
return err;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3811487e7a7..243f6834d02 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp)
}
static int register_probe_event(struct trace_probe *tp);
-static void unregister_probe_event(struct trace_probe *tp);
+static int unregister_probe_event(struct trace_probe *tp);
static DEFINE_MUTEX(probe_lock);
static LIST_HEAD(probe_list);
@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp)
if (trace_probe_is_enabled(tp))
return -EBUSY;
+ /* Will fail if probe is being used by ftrace or perf */
+ if (unregister_probe_event(tp))
+ return -EBUSY;
+
__unregister_trace_probe(tp);
list_del(&tp->list);
- unregister_probe_event(tp);
return 0;
}
@@ -632,7 +635,9 @@ static int release_all_trace_probes(void)
/* TODO: Use batch unregistration */
while (!list_empty(&probe_list)) {
tp = list_entry(probe_list.next, struct trace_probe, list);
- unregister_trace_probe(tp);
+ ret = unregister_trace_probe(tp);
+ if (ret)
+ goto end;
free_trace_probe(tp);
}
@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp)
return ret;
}
-static void unregister_probe_event(struct trace_probe *tp)
+static int unregister_probe_event(struct trace_probe *tp)
{
+ int ret;
+
/* tp->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tp->call);
- kfree(tp->call.print_fmt);
+ ret = trace_remove_event_call(&tp->call);
+ if (!ret)
+ kfree(tp->call.print_fmt);
+ return ret;
}
/* Make a debugfs interface for controlling probe points */
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index a9077c1b4ad..2900817ba65 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -244,12 +244,31 @@ static const char **find_next(void *v, loff_t *pos)
{
const char **fmt = v;
int start_index;
+ int last_index;
start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
if (*pos < start_index)
return __start___trace_bprintk_fmt + *pos;
+ /*
+ * The __tracepoint_str section is treated the same as the
+ * __trace_printk_fmt section. The difference is that the
+ * __trace_printk_fmt section should only be used by trace_printk()
+ * in a debugging environment, as if anything exists in that section
+ * the trace_prink() helper buffers are allocated, which would just
+ * waste space in a production environment.
+ *
+ * The __tracepoint_str sections on the other hand are used by
+ * tracepoints which need to map pointers to their strings to
+ * the ASCII text for userspace.
+ */
+ last_index = start_index;
+ start_index = __stop___tracepoint_str - __start___tracepoint_str;
+
+ if (*pos < last_index + start_index)
+ return __start___tracepoint_str + (*pos - last_index);
+
return find_next_mod_format(start_index, v, fmt, pos);
}
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index a23d2d71188..272261b5f94 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -70,7 +70,7 @@ struct trace_uprobe {
(sizeof(struct probe_arg) * (n)))
static int register_uprobe_event(struct trace_uprobe *tu);
-static void unregister_uprobe_event(struct trace_uprobe *tu);
+static int unregister_uprobe_event(struct trace_uprobe *tu);
static DEFINE_MUTEX(uprobe_lock);
static LIST_HEAD(uprobe_list);
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou
}
/* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */
-static void unregister_trace_uprobe(struct trace_uprobe *tu)
+static int unregister_trace_uprobe(struct trace_uprobe *tu)
{
+ int ret;
+
+ ret = unregister_uprobe_event(tu);
+ if (ret)
+ return ret;
+
list_del(&tu->list);
- unregister_uprobe_event(tu);
free_trace_uprobe(tu);
+ return 0;
}
/* Register a trace_uprobe and probe_event */
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu)
/* register as an event */
old_tp = find_probe_event(tu->call.name, tu->call.class->system);
- if (old_tp)
+ if (old_tp) {
/* delete old event */
- unregister_trace_uprobe(old_tp);
+ ret = unregister_trace_uprobe(old_tp);
+ if (ret)
+ goto end;
+ }
ret = register_uprobe_event(tu);
if (ret) {
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv)
group = UPROBE_EVENT_SYSTEM;
if (is_delete) {
+ int ret;
+
if (!event) {
pr_info("Delete command needs an event name.\n");
return -EINVAL;
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv)
return -ENOENT;
}
/* delete an event */
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
mutex_unlock(&uprobe_lock);
- return 0;
+ return ret;
}
if (argc < 2) {
@@ -408,16 +419,20 @@ fail_address_parse:
return ret;
}
-static void cleanup_all_probes(void)
+static int cleanup_all_probes(void)
{
struct trace_uprobe *tu;
+ int ret = 0;
mutex_lock(&uprobe_lock);
while (!list_empty(&uprobe_list)) {
tu = list_entry(uprobe_list.next, struct trace_uprobe, list);
- unregister_trace_uprobe(tu);
+ ret = unregister_trace_uprobe(tu);
+ if (ret)
+ break;
}
mutex_unlock(&uprobe_lock);
+ return ret;
}
/* Probes listing interfaces */
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = {
static int probes_open(struct inode *inode, struct file *file)
{
- if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC))
- cleanup_all_probes();
+ int ret;
+
+ if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
+ ret = cleanup_all_probes();
+ if (ret)
+ return ret;
+ }
return seq_open(file, &probes_seq_op);
}
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu)
return ret;
}
-static void unregister_uprobe_event(struct trace_uprobe *tu)
+static int unregister_uprobe_event(struct trace_uprobe *tu)
{
+ int ret;
+
/* tu->event is unregistered in trace_remove_event_call() */
- trace_remove_event_call(&tu->call);
+ ret = trace_remove_event_call(&tu->call);
+ if (ret)
+ return ret;
kfree(tu->call.print_fmt);
tu->call.print_fmt = NULL;
+ return 0;
}
/* Make a trace interface for controling probe points */
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index d8c30db06c5..9064b919a40 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new)
kgid_t group = new->egid;
int ret;
+ if (parent_ns->level > 32)
+ return -EUSERS;
+
/*
* Verify that we can not violate the policy of which files
* may be accessed that is specified by the root directory,
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new)
atomic_set(&ns->count, 1);
/* Leave the new->user_ns reference with the new user namespace. */
ns->parent = parent_ns;
+ ns->level = parent_ns->level + 1;
ns->owner = owner;
ns->group = group;
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new)
int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
{
struct cred *cred;
+ int err = -ENOMEM;
if (!(unshare_flags & CLONE_NEWUSER))
return 0;
cred = prepare_creds();
- if (!cred)
- return -ENOMEM;
+ if (cred) {
+ err = create_user_ns(cred);
+ if (err)
+ put_cred(cred);
+ else
+ *new_cred = cred;
+ }
- *new_cred = cred;
- return create_user_ns(cred);
+ return err;
}
void free_user_ns(struct user_namespace *ns)
diff --git a/kernel/wait.c b/kernel/wait.c
index dec68bd4e9d..d550920e040 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -363,8 +363,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_atomic_t);
/**
* wake_up_atomic_t - Wake up a waiter on a atomic_t
- * @word: The word being waited on, a kernel virtual address
- * @bit: The bit of the word being waited on
+ * @p: The atomic_t being waited on, a kernel virtual address
*
* Wake up anyone waiting for the atomic_t to go to zero.
*
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1241d8c91d5..51c4f34d258 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -553,14 +553,6 @@ void __init lockup_detector_init(void)
{
set_sample_period();
-#ifdef CONFIG_NO_HZ_FULL
- if (watchdog_user_enabled) {
- watchdog_user_enabled = 0;
- pr_warning("Disabled lockup detectors by default for full dynticks\n");
- pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n");
- }
-#endif
-
if (watchdog_user_enabled)
watchdog_enable_all_cpus();
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f01a3eeaf9..987293d03eb 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -16,9 +16,10 @@
*
* This is the generic async execution mechanism. Work items as are
* executed in process context. The worker pool is shared and
- * automatically managed. There is one worker pool for each CPU and
- * one extra for works which are better served by workers which are
- * not bound to any specific CPU.
+ * automatically managed. There are two worker pools for each CPU (one for
+ * normal work items and the other for high priority ones) and some extra
+ * pools for workqueues which are not bound to any specific CPU - the
+ * number of these backing pools is dynamic.
*
* Please read Documentation/workqueue.txt for details.
*/
@@ -2039,8 +2040,11 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
- * multiple times. Does GFP_KERNEL allocations.
+ * %false if the pool don't need management and the caller can safely start
+ * processing works, %true indicates that the function released pool->lock
+ * and reacquired it to perform some management function and that the
+ * conditions that the caller verified while holding the lock before
+ * calling the function might no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
@@ -2207,6 +2211,15 @@ __acquires(&pool->lock)
dump_stack();
}
+ /*
+ * The following prevents a kworker from hogging CPU on !PREEMPT
+ * kernels, where a requeueing work item waiting for something to
+ * happen could deadlock with stop_machine as such work item could
+ * indefinitely requeue itself while all other CPUs are trapped in
+ * stop_machine.
+ */
+ cond_resched();
+
spin_lock_irq(&pool->lock);
/* clear cpu intensive status */
@@ -2827,6 +2840,19 @@ already_gone:
return false;
}
+static bool __flush_work(struct work_struct *work)
+{
+ struct wq_barrier barr;
+
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
+}
+
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2840,18 +2866,10 @@ already_gone:
*/
bool flush_work(struct work_struct *work)
{
- struct wq_barrier barr;
-
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
+ return __flush_work(work);
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -3095,25 +3113,26 @@ static struct workqueue_struct *dev_to_wq(struct device *dev)
return wq_dev->wq;
}
-static ssize_t wq_per_cpu_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
}
+static DEVICE_ATTR_RO(per_cpu);
-static ssize_t wq_max_active_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t max_active_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct workqueue_struct *wq = dev_to_wq(dev);
return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
}
-static ssize_t wq_max_active_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t max_active_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
{
struct workqueue_struct *wq = dev_to_wq(dev);
int val;
@@ -3124,12 +3143,14 @@ static ssize_t wq_max_active_store(struct device *dev,
workqueue_set_max_active(wq, val);
return count;
}
+static DEVICE_ATTR_RW(max_active);
-static struct device_attribute wq_sysfs_attrs[] = {
- __ATTR(per_cpu, 0444, wq_per_cpu_show, NULL),
- __ATTR(max_active, 0644, wq_max_active_show, wq_max_active_store),
- __ATTR_NULL,
+static struct attribute *wq_sysfs_attrs[] = {
+ &dev_attr_per_cpu.attr,
+ &dev_attr_max_active.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(wq_sysfs);
static ssize_t wq_pool_ids_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -3279,7 +3300,7 @@ static struct device_attribute wq_sysfs_unbound_attrs[] = {
static struct bus_type wq_subsys = {
.name = "workqueue",
- .dev_attrs = wq_sysfs_attrs,
+ .dev_groups = wq_sysfs_groups,
};
static int __init wq_sysfs_init(void)
@@ -3427,6 +3448,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
{
to->nice = from->nice;
cpumask_copy(to->cpumask, from->cpumask);
+ /*
+ * Unlike hash and equality test, this function doesn't ignore
+ * ->no_numa as it is used for both pool and wq attrs. Instead,
+ * get_unbound_pool() explicitly clears ->no_numa after copying.
+ */
+ to->no_numa = from->no_numa;
}
/* hash value of the content of @attr */
@@ -3598,6 +3625,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */
copy_workqueue_attrs(pool->attrs, attrs);
+ /*
+ * no_numa isn't a worker_pool attribute, always clear it. See
+ * 'struct workqueue_attrs' comments for detail.
+ */
+ pool->attrs->no_numa = false;
+
/* if cpumask is contained inside a NUMA node, we belong to that node */
if (wq_numa_enabled) {
for_each_node(node) {
@@ -4781,7 +4814,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
- flush_work(&wfc.work);
+
+ /*
+ * The work item is on-stack and can't lead to deadlock through
+ * flushing. Use __flush_work() to avoid spurious lockdep warnings
+ * when work_on_cpu()s are nested.
+ */
+ __flush_work(&wfc.work);
+
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/lib/Kconfig b/lib/Kconfig
index 71d9f81f6ee..65561716c16 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -48,6 +48,16 @@ config STMP_DEVICE
config PERCPU_RWSEM
boolean
+config ARCH_USE_CMPXCHG_LOCKREF
+ bool
+
+config CMPXCHG_LOCKREF
+ def_bool y if ARCH_USE_CMPXCHG_LOCKREF
+ depends on SMP
+ depends on !GENERIC_LOCKBREAK
+ depends on !DEBUG_SPINLOCK
+ depends on !DEBUG_LOCK_ALLOC
+
config CRC_CCITT
tristate "CRC-CCITT functions"
help
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1501aa55322..444e1c12fea 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -981,6 +981,25 @@ config DEBUG_KOBJECT
If you say Y here, some extra kobject debugging messages will be sent
to the syslog.
+config DEBUG_KOBJECT_RELEASE
+ bool "kobject release debugging"
+ depends on DEBUG_KERNEL
+ help
+ kobjects are reference counted objects. This means that their
+ last reference count put is not predictable, and the kobject can
+ live on past the point at which a driver decides to drop it's
+ initial reference to the kobject gained on allocation. An
+ example of this would be a struct device which has just been
+ unregistered.
+
+ However, some buggy drivers assume that after such an operation,
+ the memory backing the kobject can be immediately freed. This
+ goes completely against the principles of a refcounted object.
+
+ If you say Y here, the kernel will delay the release of kobjects
+ on the last reference count to improve the visibility of this
+ kind of kobject release bug.
+
config HAVE_DEBUG_BUGVERBOSE
bool
diff --git a/lib/Makefile b/lib/Makefile
index 7baccfd8a4e..f2cb3082697 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -20,6 +20,7 @@ lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
lib-y += kobject.o klist.o
+obj-y += lockref.o
obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 37061ede8b8..bf2c8b1043d 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -381,19 +381,21 @@ void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
* debug_object_activate - debug checks when an object is activated
* @addr: address of the object
* @descr: pointer to an object specific debug description structure
+ * Returns 0 for success, -EINVAL for check failed.
*/
-void debug_object_activate(void *addr, struct debug_obj_descr *descr)
+int debug_object_activate(void *addr, struct debug_obj_descr *descr)
{
enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
+ int ret;
struct debug_obj o = { .object = addr,
.state = ODEBUG_STATE_NOTAVAILABLE,
.descr = descr };
if (!debug_objects_enabled)
- return;
+ return 0;
db = get_bucket((unsigned long) addr);
@@ -405,23 +407,26 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_ACTIVE;
+ ret = 0;
break;
case ODEBUG_STATE_ACTIVE:
debug_print_object(obj, "activate");
state = obj->state;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_object_fixup(descr->fixup_activate, addr, state);
- return;
+ ret = debug_object_fixup(descr->fixup_activate, addr, state);
+ return ret ? -EINVAL : 0;
case ODEBUG_STATE_DESTROYED:
debug_print_object(obj, "activate");
+ ret = -EINVAL;
break;
default:
+ ret = 0;
break;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- return;
+ return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
@@ -431,8 +436,11 @@ void debug_object_activate(void *addr, struct debug_obj_descr *descr)
* true or not.
*/
if (debug_object_fixup(descr->fixup_activate, addr,
- ODEBUG_STATE_NOTAVAILABLE))
+ ODEBUG_STATE_NOTAVAILABLE)) {
debug_print_object(&o, "activate");
+ return -EINVAL;
+ }
+ return 0;
}
/**
diff --git a/lib/dump_stack.c b/lib/dump_stack.c
index c03154173cc..f23b63f0a1c 100644
--- a/lib/dump_stack.c
+++ b/lib/dump_stack.c
@@ -23,7 +23,7 @@ static void __dump_stack(void)
#ifdef CONFIG_SMP
static atomic_t dump_lock = ATOMIC_INIT(-1);
-void dump_stack(void)
+asmlinkage void dump_stack(void)
{
int was_locked;
int old;
@@ -55,7 +55,7 @@ retry:
preempt_enable();
}
#else
-void dump_stack(void)
+asmlinkage void dump_stack(void)
{
__dump_stack();
}
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index 99fec3ae405..c37aeacd765 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -309,7 +309,7 @@ static int ddebug_parse_query(char *words[], int nwords,
struct ddebug_query *query, const char *modname)
{
unsigned int i;
- int rc;
+ int rc = 0;
/* check we have an even number of words */
if (nwords % 2 != 0) {
diff --git a/lib/earlycpio.c b/lib/earlycpio.c
index 7aa7ce250c9..3eb3e4722b8 100644
--- a/lib/earlycpio.c
+++ b/lib/earlycpio.c
@@ -49,22 +49,23 @@ enum cpio_fields {
/**
* cpio_data find_cpio_data - Search for files in an uncompressed cpio
- * @path: The directory to search for, including a slash at the end
- * @data: Pointer to the the cpio archive or a header inside
- * @len: Remaining length of the cpio based on data pointer
- * @offset: When a matching file is found, this is the offset to the
- * beginning of the cpio. It can be used to iterate through
- * the cpio to find all files inside of a directory path
+ * @path: The directory to search for, including a slash at the end
+ * @data: Pointer to the the cpio archive or a header inside
+ * @len: Remaining length of the cpio based on data pointer
+ * @nextoff: When a matching file is found, this is the offset from the
+ * beginning of the cpio to the beginning of the next file, not the
+ * matching file itself. It can be used to iterate through the cpio
+ * to find all files inside of a directory path.
*
- * @return: struct cpio_data containing the address, length and
- * filename (with the directory path cut off) of the found file.
- * If you search for a filename and not for files in a directory,
- * pass the absolute path of the filename in the cpio and make sure
- * the match returned an empty filename string.
+ * @return: struct cpio_data containing the address, length and
+ * filename (with the directory path cut off) of the found file.
+ * If you search for a filename and not for files in a directory,
+ * pass the absolute path of the filename in the cpio and make sure
+ * the match returned an empty filename string.
*/
struct cpio_data find_cpio_data(const char *path, void *data,
- size_t len, long *offset)
+ size_t len, long *nextoff)
{
const size_t cpio_header_len = 8*C_NFIELDS - 2;
struct cpio_data cd = { NULL, 0, "" };
@@ -124,7 +125,7 @@ struct cpio_data find_cpio_data(const char *path, void *data,
if ((ch[C_MODE] & 0170000) == 0100000 &&
ch[C_NAMESIZE] >= mypathsize &&
!memcmp(p, path, mypathsize)) {
- *offset = (long)nptr - (long)data;
+ *nextoff = (long)nptr - (long)data;
if (ch[C_NAMESIZE] - mypathsize >= MAX_CPIO_FILE_NAME) {
pr_warn(
"File %s exceeding MAX_CPIO_FILE_NAME [%d]\n",
diff --git a/lib/kobject.c b/lib/kobject.c
index 4a1f33d4354..1d46c151a4a 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -545,8 +545,8 @@ static void kobject_cleanup(struct kobject *kobj)
struct kobj_type *t = get_ktype(kobj);
const char *name = kobj->name;
- pr_debug("kobject: '%s' (%p): %s\n",
- kobject_name(kobj), kobj, __func__);
+ pr_debug("kobject: '%s' (%p): %s, parent %p\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent);
if (t && !t->release)
pr_debug("kobject: '%s' (%p): does not have a release() "
@@ -580,9 +580,25 @@ static void kobject_cleanup(struct kobject *kobj)
}
}
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+static void kobject_delayed_cleanup(struct work_struct *work)
+{
+ kobject_cleanup(container_of(to_delayed_work(work),
+ struct kobject, release));
+}
+#endif
+
static void kobject_release(struct kref *kref)
{
- kobject_cleanup(container_of(kref, struct kobject, kref));
+ struct kobject *kobj = container_of(kref, struct kobject, kref);
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ pr_debug("kobject: '%s' (%p): %s, parent %p (delayed)\n",
+ kobject_name(kobj), kobj, __func__, kobj->parent);
+ INIT_DELAYED_WORK(&kobj->release, kobject_delayed_cleanup);
+ schedule_delayed_work(&kobj->release, HZ);
+#else
+ kobject_cleanup(kobj);
+#endif
}
/**
diff --git a/lib/lockref.c b/lib/lockref.c
new file mode 100644
index 00000000000..9d76f404ce9
--- /dev/null
+++ b/lib/lockref.c
@@ -0,0 +1,128 @@
+#include <linux/export.h>
+#include <linux/lockref.h>
+
+#ifdef CONFIG_CMPXCHG_LOCKREF
+
+/*
+ * Note that the "cmpxchg()" reloads the "old" value for the
+ * failure case.
+ */
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
+ struct lockref old; \
+ BUILD_BUG_ON(sizeof(old) != 8); \
+ old.lock_count = ACCESS_ONCE(lockref->lock_count); \
+ while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \
+ struct lockref new = old, prev = old; \
+ CODE \
+ old.lock_count = cmpxchg(&lockref->lock_count, \
+ old.lock_count, new.lock_count); \
+ if (likely(old.lock_count == prev.lock_count)) { \
+ SUCCESS; \
+ } \
+ cpu_relax(); \
+ } \
+} while (0)
+
+#else
+
+#define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0)
+
+#endif
+
+/**
+ * lockref_get - Increments reference count unconditionally
+ * @lockcnt: pointer to lockref structure
+ *
+ * This operation is only valid if you already hold a reference
+ * to the object, so you know the count cannot be zero.
+ */
+void lockref_get(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count++;
+ ,
+ return;
+ );
+
+ spin_lock(&lockref->lock);
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+}
+EXPORT_SYMBOL(lockref_get);
+
+/**
+ * lockref_get_not_zero - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count was zero
+ */
+int lockref_get_not_zero(struct lockref *lockref)
+{
+ int retval;
+
+ CMPXCHG_LOOP(
+ new.count++;
+ if (!old.count)
+ return 0;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ retval = 0;
+ if (lockref->count) {
+ lockref->count++;
+ retval = 1;
+ }
+ spin_unlock(&lockref->lock);
+ return retval;
+}
+EXPORT_SYMBOL(lockref_get_not_zero);
+
+/**
+ * lockref_get_or_lock - Increments count unless the count is 0
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count was zero
+ * and we got the lock instead.
+ */
+int lockref_get_or_lock(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count++;
+ if (!old.count)
+ break;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ if (!lockref->count)
+ return 0;
+ lockref->count++;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+EXPORT_SYMBOL(lockref_get_or_lock);
+
+/**
+ * lockref_put_or_lock - decrements count unless count <= 1 before decrement
+ * @lockcnt: pointer to lockref structure
+ * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken
+ */
+int lockref_put_or_lock(struct lockref *lockref)
+{
+ CMPXCHG_LOOP(
+ new.count--;
+ if (old.count <= 1)
+ break;
+ ,
+ return 1;
+ );
+
+ spin_lock(&lockref->lock);
+ if (lockref->count <= 1)
+ return 0;
+ lockref->count--;
+ spin_unlock(&lockref->lock);
+ return 1;
+}
+EXPORT_SYMBOL(lockref_put_or_lock);
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index fd94058bd7f..28321d8f75e 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -437,7 +437,7 @@ int lz4_compress(const unsigned char *src, size_t src_len,
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4_compress);
+EXPORT_SYMBOL(lz4_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 compressor");
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index d3414eae73a..411be80ddb4 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -299,7 +299,7 @@ exit_0:
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress);
+EXPORT_SYMBOL(lz4_decompress);
#endif
int lz4_decompress_unknownoutputsize(const char *src, size_t src_len,
@@ -319,8 +319,8 @@ exit_0:
return ret;
}
#ifndef STATIC
-EXPORT_SYMBOL_GPL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4 Decompressor");
#endif
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index eb1a74f5e36..f344f76b655 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -533,7 +533,7 @@ int lz4hc_compress(const unsigned char *src, size_t src_len,
exit:
return ret;
}
-EXPORT_SYMBOL_GPL(lz4hc_compress);
+EXPORT_SYMBOL(lz4hc_compress);
-MODULE_LICENSE("GPL");
+MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("LZ4HC compressor");
diff --git a/lib/raid6/.gitignore b/lib/raid6/.gitignore
index 162becacf97..0a7e494b2bc 100644
--- a/lib/raid6/.gitignore
+++ b/lib/raid6/.gitignore
@@ -2,3 +2,4 @@ mktables
altivec*.c
int*.c
tables.c
+neon?.c
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 9f7c184725d..b4625787c7e 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -5,6 +5,7 @@ raid6_pq-y += algos.o recov.o tables.o int1.o int2.o int4.o \
raid6_pq-$(CONFIG_X86) += recov_ssse3.o recov_avx2.o mmx.o sse1.o sse2.o avx2.o
raid6_pq-$(CONFIG_ALTIVEC) += altivec1.o altivec2.o altivec4.o altivec8.o
+raid6_pq-$(CONFIG_KERNEL_MODE_NEON) += neon.o neon1.o neon2.o neon4.o neon8.o
hostprogs-y += mktables
@@ -16,6 +17,21 @@ ifeq ($(CONFIG_ALTIVEC),y)
altivec_flags := -maltivec -mabi=altivec
endif
+# The GCC option -ffreestanding is required in order to compile code containing
+# ARM/NEON intrinsics in a non C99-compliant environment (such as the kernel)
+ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
+NEON_FLAGS := -ffreestanding
+ifeq ($(ARCH),arm)
+NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
+endif
+ifeq ($(ARCH),arm64)
+CFLAGS_REMOVE_neon1.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon2.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon4.o += -mgeneral-regs-only
+CFLAGS_REMOVE_neon8.o += -mgeneral-regs-only
+endif
+endif
+
targets += int1.c
$(obj)/int1.c: UNROLL := 1
$(obj)/int1.c: $(src)/int.uc $(src)/unroll.awk FORCE
@@ -70,6 +86,30 @@ $(obj)/altivec8.c: UNROLL := 8
$(obj)/altivec8.c: $(src)/altivec.uc $(src)/unroll.awk FORCE
$(call if_changed,unroll)
+CFLAGS_neon1.o += $(NEON_FLAGS)
+targets += neon1.c
+$(obj)/neon1.c: UNROLL := 1
+$(obj)/neon1.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_neon2.o += $(NEON_FLAGS)
+targets += neon2.c
+$(obj)/neon2.c: UNROLL := 2
+$(obj)/neon2.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_neon4.o += $(NEON_FLAGS)
+targets += neon4.c
+$(obj)/neon4.c: UNROLL := 4
+$(obj)/neon4.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
+CFLAGS_neon8.o += $(NEON_FLAGS)
+targets += neon8.c
+$(obj)/neon8.c: UNROLL := 8
+$(obj)/neon8.c: $(src)/neon.uc $(src)/unroll.awk FORCE
+ $(call if_changed,unroll)
+
quiet_cmd_mktable = TABLE $@
cmd_mktable = $(obj)/mktables > $@ || ( rm -f $@ && exit 1 )
diff --git a/lib/raid6/algos.c b/lib/raid6/algos.c
index 6d7316fe9f3..74e6f5629db 100644
--- a/lib/raid6/algos.c
+++ b/lib/raid6/algos.c
@@ -70,6 +70,12 @@ const struct raid6_calls * const raid6_algos[] = {
&raid6_intx2,
&raid6_intx4,
&raid6_intx8,
+#ifdef CONFIG_KERNEL_MODE_NEON
+ &raid6_neonx1,
+ &raid6_neonx2,
+ &raid6_neonx4,
+ &raid6_neonx8,
+#endif
NULL
};
diff --git a/lib/raid6/neon.c b/lib/raid6/neon.c
new file mode 100644
index 00000000000..36ad4705df1
--- /dev/null
+++ b/lib/raid6/neon.c
@@ -0,0 +1,58 @@
+/*
+ * linux/lib/raid6/neon.c - RAID6 syndrome calculation using ARM NEON intrinsics
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/raid/pq.h>
+
+#ifdef __KERNEL__
+#include <asm/neon.h>
+#else
+#define kernel_neon_begin()
+#define kernel_neon_end()
+#define cpu_has_neon() (1)
+#endif
+
+/*
+ * There are 2 reasons these wrappers are kept in a separate compilation unit
+ * from the actual implementations in neonN.c (generated from neon.uc by
+ * unroll.awk):
+ * - the actual implementations use NEON intrinsics, and the GCC support header
+ * (arm_neon.h) is not fully compatible (type wise) with the kernel;
+ * - the neonN.c files are compiled with -mfpu=neon and optimization enabled,
+ * and we have to make sure that we never use *any* NEON/VFP instructions
+ * outside a kernel_neon_begin()/kernel_neon_end() pair.
+ */
+
+#define RAID6_NEON_WRAPPER(_n) \
+ static void raid6_neon ## _n ## _gen_syndrome(int disks, \
+ size_t bytes, void **ptrs) \
+ { \
+ void raid6_neon ## _n ## _gen_syndrome_real(int, \
+ unsigned long, void**); \
+ kernel_neon_begin(); \
+ raid6_neon ## _n ## _gen_syndrome_real(disks, \
+ (unsigned long)bytes, ptrs); \
+ kernel_neon_end(); \
+ } \
+ struct raid6_calls const raid6_neonx ## _n = { \
+ raid6_neon ## _n ## _gen_syndrome, \
+ raid6_have_neon, \
+ "neonx" #_n, \
+ 0 \
+ }
+
+static int raid6_have_neon(void)
+{
+ return cpu_has_neon();
+}
+
+RAID6_NEON_WRAPPER(1);
+RAID6_NEON_WRAPPER(2);
+RAID6_NEON_WRAPPER(4);
+RAID6_NEON_WRAPPER(8);
diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc
new file mode 100644
index 00000000000..1b9ed793342
--- /dev/null
+++ b/lib/raid6/neon.uc
@@ -0,0 +1,80 @@
+/* -----------------------------------------------------------------------
+ *
+ * neon.uc - RAID-6 syndrome calculation using ARM NEON instructions
+ *
+ * Copyright (C) 2012 Rob Herring
+ *
+ * Based on altivec.uc:
+ * Copyright 2002-2004 H. Peter Anvin - All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
+ * Boston MA 02111-1307, USA; either version 2 of the License, or
+ * (at your option) any later version; incorporated herein by reference.
+ *
+ * ----------------------------------------------------------------------- */
+
+/*
+ * neon$#.c
+ *
+ * $#-way unrolled NEON intrinsics math RAID-6 instruction set
+ *
+ * This file is postprocessed using unroll.awk
+ */
+
+#include <arm_neon.h>
+
+typedef uint8x16_t unative_t;
+
+#define NBYTES(x) ((unative_t){x,x,x,x, x,x,x,x, x,x,x,x, x,x,x,x})
+#define NSIZE sizeof(unative_t)
+
+/*
+ * The SHLBYTE() operation shifts each byte left by 1, *not*
+ * rolling over into the next byte
+ */
+static inline unative_t SHLBYTE(unative_t v)
+{
+ return vshlq_n_u8(v, 1);
+}
+
+/*
+ * The MASK() operation returns 0xFF in any byte for which the high
+ * bit is 1, 0x00 for any byte for which the high bit is 0.
+ */
+static inline unative_t MASK(unative_t v)
+{
+ const uint8x16_t temp = NBYTES(0);
+ return (unative_t)vcltq_s8((int8x16_t)v, (int8x16_t)temp);
+}
+
+void raid6_neon$#_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs)
+{
+ uint8_t **dptr = (uint8_t **)ptrs;
+ uint8_t *p, *q;
+ int d, z, z0;
+
+ register unative_t wd$$, wq$$, wp$$, w1$$, w2$$;
+ const unative_t x1d = NBYTES(0x1d);
+
+ z0 = disks - 3; /* Highest data disk */
+ p = dptr[z0+1]; /* XOR parity */
+ q = dptr[z0+2]; /* RS syndrome */
+
+ for ( d = 0 ; d < bytes ; d += NSIZE*$# ) {
+ wq$$ = wp$$ = vld1q_u8(&dptr[z0][d+$$*NSIZE]);
+ for ( z = z0-1 ; z >= 0 ; z-- ) {
+ wd$$ = vld1q_u8(&dptr[z][d+$$*NSIZE]);
+ wp$$ = veorq_u8(wp$$, wd$$);
+ w2$$ = MASK(wq$$);
+ w1$$ = SHLBYTE(wq$$);
+
+ w2$$ = vandq_u8(w2$$, x1d);
+ w1$$ = veorq_u8(w1$$, w2$$);
+ wq$$ = veorq_u8(w1$$, wd$$);
+ }
+ vst1q_u8(&p[d+NSIZE*$$], wp$$);
+ vst1q_u8(&q[d+NSIZE*$$], wq$$);
+ }
+}
diff --git a/lib/raid6/test/Makefile b/lib/raid6/test/Makefile
index 087332dbf8a..28afa1a06e0 100644
--- a/lib/raid6/test/Makefile
+++ b/lib/raid6/test/Makefile
@@ -22,11 +22,23 @@ ifeq ($(ARCH),x86_64)
IS_X86 = yes
endif
+ifeq ($(ARCH),arm)
+ CFLAGS += -I../../../arch/arm/include -mfpu=neon
+ HAS_NEON = yes
+endif
+ifeq ($(ARCH),arm64)
+ CFLAGS += -I../../../arch/arm64/include
+ HAS_NEON = yes
+endif
+
ifeq ($(IS_X86),yes)
OBJS += mmx.o sse1.o sse2.o avx2.o recov_ssse3.o recov_avx2.o
CFLAGS += $(shell echo "vpbroadcastb %xmm0, %ymm1" | \
gcc -c -x assembler - >&/dev/null && \
rm ./-.o && echo -DCONFIG_AS_AVX2=1)
+else ifeq ($(HAS_NEON),yes)
+ OBJS += neon.o neon1.o neon2.o neon4.o neon8.o
+ CFLAGS += -DCONFIG_KERNEL_MODE_NEON=1
else
HAS_ALTIVEC := $(shell echo -e '\#include <altivec.h>\nvector int a;' |\
gcc -c -x c - >&/dev/null && \
@@ -55,6 +67,18 @@ raid6.a: $(OBJS)
raid6test: test.c raid6.a
$(CC) $(CFLAGS) -o raid6test $^
+neon1.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=1 < neon.uc > $@
+
+neon2.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=2 < neon.uc > $@
+
+neon4.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=4 < neon.uc > $@
+
+neon8.c: neon.uc ../unroll.awk
+ $(AWK) ../unroll.awk -vN=8 < neon.uc > $@
+
altivec1.c: altivec.uc ../unroll.awk
$(AWK) ../unroll.awk -vN=1 < altivec.uc > $@
@@ -89,7 +113,7 @@ tables.c: mktables
./mktables > tables.c
clean:
- rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c tables.c raid6test
+ rm -f *.o *.a mktables mktables.c *.uc int*.c altivec*.c neon*.c tables.c raid6test
spotless: clean
rm -f *~
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d23762e6652..4e8686c7e5a 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -870,13 +870,13 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
swiotlb_full(hwdev, sg->length, dir, 0);
swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
attrs);
- sgl[0].dma_length = 0;
+ sg_dma_len(sgl) = 0;
return 0;
}
sg->dma_address = phys_to_dma(hwdev, map);
} else
sg->dma_address = dev_addr;
- sg->dma_length = sg->length;
+ sg_dma_len(sg) = sg->length;
}
return nelems;
}
@@ -904,7 +904,7 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
BUG_ON(dir == DMA_NONE);
for_each_sg(sgl, sg, nelems, i)
- unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
+ unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir);
}
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
@@ -934,7 +934,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl,
for_each_sg(sgl, sg, nelems, i)
swiotlb_sync_single(hwdev, sg->dma_address,
- sg->dma_length, dir, target);
+ sg_dma_len(sg), dir, target);
}
void
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 739a36366b7..26559bdb4c4 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -26,6 +26,7 @@
#include <linux/math64.h>
#include <linux/uaccess.h>
#include <linux/ioport.h>
+#include <linux/dcache.h>
#include <net/addrconf.h>
#include <asm/page.h> /* for PAGE_SIZE */
@@ -532,6 +533,81 @@ char *string(char *buf, char *end, const char *s, struct printf_spec spec)
return buf;
}
+static void widen(char *buf, char *end, unsigned len, unsigned spaces)
+{
+ size_t size;
+ if (buf >= end) /* nowhere to put anything */
+ return;
+ size = end - buf;
+ if (size <= spaces) {
+ memset(buf, ' ', size);
+ return;
+ }
+ if (len) {
+ if (len > size - spaces)
+ len = size - spaces;
+ memmove(buf + spaces, buf, len);
+ }
+ memset(buf, ' ', spaces);
+}
+
+static noinline_for_stack
+char *dentry_name(char *buf, char *end, const struct dentry *d, struct printf_spec spec,
+ const char *fmt)
+{
+ const char *array[4], *s;
+ const struct dentry *p;
+ int depth;
+ int i, n;
+
+ switch (fmt[1]) {
+ case '2': case '3': case '4':
+ depth = fmt[1] - '0';
+ break;
+ default:
+ depth = 1;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < depth; i++, d = p) {
+ p = ACCESS_ONCE(d->d_parent);
+ array[i] = ACCESS_ONCE(d->d_name.name);
+ if (p == d) {
+ if (i)
+ array[i] = "";
+ i++;
+ break;
+ }
+ }
+ s = array[--i];
+ for (n = 0; n != spec.precision; n++, buf++) {
+ char c = *s++;
+ if (!c) {
+ if (!i)
+ break;
+ c = '/';
+ s = array[--i];
+ }
+ if (buf < end)
+ *buf = c;
+ }
+ rcu_read_unlock();
+ if (n < spec.field_width) {
+ /* we want to pad the sucker */
+ unsigned spaces = spec.field_width - n;
+ if (!(spec.flags & LEFT)) {
+ widen(buf - n, end, n, spaces);
+ return buf + spaces;
+ }
+ while (spaces--) {
+ if (buf < end)
+ *buf = ' ';
+ ++buf;
+ }
+ }
+ return buf;
+}
+
static noinline_for_stack
char *symbol_string(char *buf, char *end, void *ptr,
struct printf_spec spec, const char *fmt)
@@ -1253,6 +1329,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
spec.base = 16;
return number(buf, end,
(unsigned long long) *((phys_addr_t *)ptr), spec);
+ case 'd':
+ return dentry_name(buf, end, ptr, spec, fmt);
+ case 'D':
+ return dentry_name(buf, end,
+ ((const struct file *)ptr)->f_path.dentry,
+ spec, fmt);
}
spec.flags |= SMALL;
if (spec.field_width == -1) {
diff --git a/mm/Kconfig b/mm/Kconfig
index 8028dcc6615..6cdd2704330 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -478,6 +478,30 @@ config FRONTSWAP
If unsure, say Y to enable frontswap.
+config CMA
+ bool "Contiguous Memory Allocator"
+ depends on HAVE_MEMBLOCK
+ select MIGRATION
+ select MEMORY_ISOLATION
+ help
+ This enables the Contiguous Memory Allocator which allows other
+ subsystems to allocate big physically-contiguous blocks of memory.
+ CMA reserves a region of memory and allows only movable pages to
+ be allocated from it. This way, the kernel can use the memory for
+ pagecache and when a subsystem requests for contiguous area, the
+ allocated pages are migrated away to serve the contiguous request.
+
+ If unsure, say "n".
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL && CMA
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
config ZBUD
tristate
default n
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index e04454cdb33..37d9edcd14c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -180,7 +180,8 @@ static ssize_t name##_show(struct device *dev, \
struct backing_dev_info *bdi = dev_get_drvdata(dev); \
\
return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
-}
+} \
+static DEVICE_ATTR_RW(name);
BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
@@ -231,14 +232,16 @@ static ssize_t stable_pages_required_show(struct device *dev,
return snprintf(page, PAGE_SIZE-1, "%d\n",
bdi_cap_stable_pages_required(bdi) ? 1 : 0);
}
+static DEVICE_ATTR_RO(stable_pages_required);
-static struct device_attribute bdi_dev_attrs[] = {
- __ATTR_RW(read_ahead_kb),
- __ATTR_RW(min_ratio),
- __ATTR_RW(max_ratio),
- __ATTR_RO(stable_pages_required),
- __ATTR_NULL,
+static struct attribute *bdi_dev_attrs[] = {
+ &dev_attr_read_ahead_kb.attr,
+ &dev_attr_min_ratio.attr,
+ &dev_attr_max_ratio.attr,
+ &dev_attr_stable_pages_required.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(bdi_dev);
static __init int bdi_class_init(void)
{
@@ -246,7 +249,7 @@ static __init int bdi_class_init(void)
if (IS_ERR(bdi_class))
return PTR_ERR(bdi_class);
- bdi_class->dev_attrs = bdi_dev_attrs;
+ bdi_class->dev_groups = bdi_dev_groups;
bdi_debug_init();
return 0;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index 4b51ac1acae..731a2c24532 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2550,7 +2550,7 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
- if (ret > 0 || ret == -EIOCBQUEUED) {
+ if (ret > 0) {
ssize_t err;
err = generic_write_sync(file, pos, ret);
diff --git a/mm/fremap.c b/mm/fremap.c
index 87da3590c61..5bff0814776 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
- pte_t *pte;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
- if (!pte_none(*pte))
+ ptfile = pgoff_to_pte(pgoff);
+
+ if (!pte_none(*pte)) {
+ if (pte_present(*pte) && pte_soft_dirty(*pte))
+ pte_file_mksoft_dirty(ptfile);
zap_pte(mm, vma, addr, pte);
+ }
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ set_pte_at(mm, addr, pte, ptfile);
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 243e710c603..a92012a7170 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1620,7 +1620,9 @@ static void __split_huge_page_refcount(struct page *page,
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_mlocked) |
- (1L << PG_uptodate)));
+ (1L << PG_uptodate) |
+ (1L << PG_active) |
+ (1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty);
/* clear PageTail before overwriting first_page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 83aff0a4d09..b60f33080a2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
mm = vma->vm_mm;
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
__unmap_hugepage_range(&tlb, vma, start, end, ref_page);
tlb_finish_mmu(&tlb, start, end);
}
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 9cea7de22ff..bda8e44f6fd 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -36,21 +36,13 @@ static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
{
- return container_of(s, struct hugetlb_cgroup, css);
-}
-
-static inline
-struct hugetlb_cgroup *hugetlb_cgroup_from_cgroup(struct cgroup *cgroup)
-{
- return hugetlb_cgroup_from_css(cgroup_subsys_state(cgroup,
- hugetlb_subsys_id));
+ return s ? container_of(s, struct hugetlb_cgroup, css) : NULL;
}
static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{
- return hugetlb_cgroup_from_css(task_subsys_state(task,
- hugetlb_subsys_id));
+ return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id));
}
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
@@ -58,17 +50,15 @@ static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
return (h_cg == root_h_cgroup);
}
-static inline struct hugetlb_cgroup *parent_hugetlb_cgroup(struct cgroup *cg)
+static inline struct hugetlb_cgroup *
+parent_hugetlb_cgroup(struct hugetlb_cgroup *h_cg)
{
- if (!cg->parent)
- return NULL;
- return hugetlb_cgroup_from_cgroup(cg->parent);
+ return hugetlb_cgroup_from_css(css_parent(&h_cg->css));
}
-static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
+static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg)
{
int idx;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cg);
for (idx = 0; idx < hugetlb_max_hstate; idx++) {
if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0)
@@ -77,19 +67,18 @@ static inline bool hugetlb_cgroup_have_usage(struct cgroup *cg)
return false;
}
-static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
+ struct hugetlb_cgroup *parent_h_cgroup = hugetlb_cgroup_from_css(parent_css);
+ struct hugetlb_cgroup *h_cgroup;
int idx;
- struct cgroup *parent_cgroup;
- struct hugetlb_cgroup *h_cgroup, *parent_h_cgroup;
h_cgroup = kzalloc(sizeof(*h_cgroup), GFP_KERNEL);
if (!h_cgroup)
return ERR_PTR(-ENOMEM);
- parent_cgroup = cgroup->parent;
- if (parent_cgroup) {
- parent_h_cgroup = hugetlb_cgroup_from_cgroup(parent_cgroup);
+ if (parent_h_cgroup) {
for (idx = 0; idx < HUGE_MAX_HSTATE; idx++)
res_counter_init(&h_cgroup->hugepage[idx],
&parent_h_cgroup->hugepage[idx]);
@@ -101,11 +90,11 @@ static struct cgroup_subsys_state *hugetlb_cgroup_css_alloc(struct cgroup *cgrou
return &h_cgroup->css;
}
-static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct hugetlb_cgroup *h_cgroup;
- h_cgroup = hugetlb_cgroup_from_cgroup(cgroup);
+ h_cgroup = hugetlb_cgroup_from_css(css);
kfree(h_cgroup);
}
@@ -117,15 +106,14 @@ static void hugetlb_cgroup_css_free(struct cgroup *cgroup)
* page reference and test for page active here. This function
* cannot fail.
*/
-static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup,
+static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg,
struct page *page)
{
int csize;
struct res_counter *counter;
struct res_counter *fail_res;
struct hugetlb_cgroup *page_hcg;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
- struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup);
+ struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg);
page_hcg = hugetlb_cgroup_from_page(page);
/*
@@ -155,8 +143,9 @@ out:
* Force the hugetlb cgroup to empty the hugetlb resources by moving them to
* the parent cgroup.
*/
-static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
+static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css)
{
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
struct hstate *h;
struct page *page;
int idx = 0;
@@ -165,13 +154,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup *cgroup)
for_each_hstate(h) {
spin_lock(&hugetlb_lock);
list_for_each_entry(page, &h->hugepage_activelist, lru)
- hugetlb_cgroup_move_parent(idx, cgroup, page);
+ hugetlb_cgroup_move_parent(idx, h_cg, page);
spin_unlock(&hugetlb_lock);
idx++;
}
cond_resched();
- } while (hugetlb_cgroup_have_usage(cgroup));
+ } while (hugetlb_cgroup_have_usage(h_cg));
}
int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages,
@@ -253,14 +242,15 @@ void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages,
return;
}
-static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
- struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t hugetlb_cgroup_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes,
+ loff_t *ppos)
{
u64 val;
char str[64];
int idx, name, len;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private);
@@ -270,12 +260,12 @@ static ssize_t hugetlb_cgroup_read(struct cgroup *cgroup, struct cftype *cft,
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
-static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
- const char *buffer)
+static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buffer)
{
int idx, name, ret;
unsigned long long val;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(cft->private);
name = MEMFILE_ATTR(cft->private);
@@ -300,10 +290,11 @@ static int hugetlb_cgroup_write(struct cgroup *cgroup, struct cftype *cft,
return ret;
}
-static int hugetlb_cgroup_reset(struct cgroup *cgroup, unsigned int event)
+static int hugetlb_cgroup_reset(struct cgroup_subsys_state *css,
+ unsigned int event)
{
int idx, name, ret = 0;
- struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup);
+ struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css);
idx = MEMFILE_IDX(event);
name = MEMFILE_ATTR(event);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 00a7a664b9c..3b83957b643 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -483,10 +483,9 @@ enum res_type {
*/
static DEFINE_MUTEX(memcg_create_mutex);
-static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *s)
{
- return container_of(s, struct mem_cgroup, css);
+ return s ? container_of(s, struct mem_cgroup, css) : NULL;
}
/* Some nice accessors for the vmpressure. */
@@ -1035,12 +1034,6 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
preempt_enable();
}
-struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
-{
- return mem_cgroup_from_css(
- cgroup_subsys_state(cont, mem_cgroup_subsys_id));
-}
-
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
{
/*
@@ -1051,7 +1044,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
if (unlikely(!p))
return NULL;
- return mem_cgroup_from_css(task_subsys_state(p, mem_cgroup_subsys_id));
+ return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id));
}
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
@@ -1084,20 +1077,11 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
struct mem_cgroup *last_visited)
{
- struct cgroup *prev_cgroup, *next_cgroup;
+ struct cgroup_subsys_state *prev_css, *next_css;
- /*
- * Root is not visited by cgroup iterators so it needs an
- * explicit visit.
- */
- if (!last_visited)
- return root;
-
- prev_cgroup = (last_visited == root) ? NULL
- : last_visited->css.cgroup;
+ prev_css = last_visited ? &last_visited->css : NULL;
skip_node:
- next_cgroup = cgroup_next_descendant_pre(
- prev_cgroup, root->css.cgroup);
+ next_css = css_next_descendant_pre(prev_css, &root->css);
/*
* Even if we found a group we have to make sure it is
@@ -1106,13 +1090,13 @@ skip_node:
* last_visited css is safe to use because it is
* protected by css_get and the tree walk is rcu safe.
*/
- if (next_cgroup) {
- struct mem_cgroup *mem = mem_cgroup_from_cont(
- next_cgroup);
+ if (next_css) {
+ struct mem_cgroup *mem = mem_cgroup_from_css(next_css);
+
if (css_tryget(&mem->css))
return mem;
else {
- prev_cgroup = next_cgroup;
+ prev_css = next_css;
goto skip_node;
}
}
@@ -1525,10 +1509,8 @@ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg)
int mem_cgroup_swappiness(struct mem_cgroup *memcg)
{
- struct cgroup *cgrp = memcg->css.cgroup;
-
/* root ? */
- if (cgrp->parent == NULL)
+ if (!css_parent(&memcg->css))
return vm_swappiness;
return memcg->swappiness;
@@ -1805,12 +1787,11 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
for_each_mem_cgroup_tree(iter, memcg) {
- struct cgroup *cgroup = iter->css.cgroup;
- struct cgroup_iter it;
+ struct css_task_iter it;
struct task_struct *task;
- cgroup_iter_start(cgroup, &it);
- while ((task = cgroup_iter_next(cgroup, &it))) {
+ css_task_iter_start(&iter->css, &it);
+ while ((task = css_task_iter_next(&it))) {
switch (oom_scan_process_thread(task, totalpages, NULL,
false)) {
case OOM_SCAN_SELECT:
@@ -1823,7 +1804,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
case OOM_SCAN_CONTINUE:
continue;
case OOM_SCAN_ABORT:
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
mem_cgroup_iter_break(memcg, iter);
if (chosen)
put_task_struct(chosen);
@@ -1840,7 +1821,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
get_task_struct(chosen);
}
}
- cgroup_iter_end(cgroup, &it);
+ css_task_iter_end(&it);
}
if (!chosen)
@@ -2954,10 +2935,10 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p)
}
#ifdef CONFIG_SLABINFO
-static int mem_cgroup_slabinfo_read(struct cgroup *cont, struct cftype *cft,
- struct seq_file *m)
+static int mem_cgroup_slabinfo_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct memcg_cache_params *params;
if (!memcg_can_account_kmem(memcg))
@@ -3195,11 +3176,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
if (!s->memcg_params)
return -ENOMEM;
- INIT_WORK(&s->memcg_params->destroy,
- kmem_cache_destroy_work_func);
if (memcg) {
s->memcg_params->memcg = memcg;
s->memcg_params->root_cache = root_cache;
+ INIT_WORK(&s->memcg_params->destroy,
+ kmem_cache_destroy_work_func);
} else
s->memcg_params->is_root_cache = true;
@@ -4943,10 +4924,10 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg)
*/
static inline bool __memcg_has_children(struct mem_cgroup *memcg)
{
- struct cgroup *pos;
+ struct cgroup_subsys_state *pos;
/* bounce at first found */
- cgroup_for_each_child(pos, memcg->css.cgroup)
+ css_for_each_child(pos, &memcg->css)
return true;
return false;
}
@@ -5002,9 +4983,10 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
return 0;
}
-static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_force_empty_write(struct cgroup_subsys_state *css,
+ unsigned int event)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
int ret;
if (mem_cgroup_is_root(memcg))
@@ -5017,21 +4999,18 @@ static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
}
-static u64 mem_cgroup_hierarchy_read(struct cgroup *cont, struct cftype *cft)
+static u64 mem_cgroup_hierarchy_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- return mem_cgroup_from_cont(cont)->use_hierarchy;
+ return mem_cgroup_from_css(css)->use_hierarchy;
}
-static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
- u64 val)
+static int mem_cgroup_hierarchy_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
int retval = 0;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- struct cgroup *parent = cont->parent;
- struct mem_cgroup *parent_memcg = NULL;
-
- if (parent)
- parent_memcg = mem_cgroup_from_cont(parent);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent_memcg = mem_cgroup_from_css(css_parent(&memcg->css));
mutex_lock(&memcg_create_mutex);
@@ -5101,11 +5080,11 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap)
return val << PAGE_SHIFT;
}
-static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
- struct file *file, char __user *buf,
- size_t nbytes, loff_t *ppos)
+static ssize_t mem_cgroup_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct file *file,
+ char __user *buf, size_t nbytes, loff_t *ppos)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
char str[64];
u64 val;
int name, len;
@@ -5138,11 +5117,11 @@ static ssize_t mem_cgroup_read(struct cgroup *cont, struct cftype *cft,
return simple_read_from_buffer(buf, nbytes, ppos, str, len);
}
-static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
+static int memcg_update_kmem_limit(struct cgroup_subsys_state *css, u64 val)
{
int ret = -EINVAL;
#ifdef CONFIG_MEMCG_KMEM
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
/*
* For simplicity, we won't allow this to be disabled. It also can't
* be changed if the cgroup has children already, or if tasks had
@@ -5158,7 +5137,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
mutex_lock(&memcg_create_mutex);
mutex_lock(&set_limit_mutex);
if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
- if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
+ if (cgroup_task_count(css->cgroup) || memcg_has_children(memcg)) {
ret = -EBUSY;
goto out;
}
@@ -5228,10 +5207,10 @@ out:
* The user of this function is...
* RES_LIMIT.
*/
-static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
+static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
enum res_type type;
int name;
unsigned long long val;
@@ -5255,7 +5234,7 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
else if (type == _MEMSWAP)
ret = mem_cgroup_resize_memsw_limit(memcg, val);
else if (type == _KMEM)
- ret = memcg_update_kmem_limit(cont, val);
+ ret = memcg_update_kmem_limit(css, val);
else
return -EINVAL;
break;
@@ -5283,18 +5262,15 @@ static int mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg,
unsigned long long *mem_limit, unsigned long long *memsw_limit)
{
- struct cgroup *cgroup;
unsigned long long min_limit, min_memsw_limit, tmp;
min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT);
min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT);
- cgroup = memcg->css.cgroup;
if (!memcg->use_hierarchy)
goto out;
- while (cgroup->parent) {
- cgroup = cgroup->parent;
- memcg = mem_cgroup_from_cont(cgroup);
+ while (css_parent(&memcg->css)) {
+ memcg = mem_cgroup_from_css(css_parent(&memcg->css));
if (!memcg->use_hierarchy)
break;
tmp = res_counter_read_u64(&memcg->res, RES_LIMIT);
@@ -5307,9 +5283,9 @@ out:
*memsw_limit = min_memsw_limit;
}
-static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
+static int mem_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
int name;
enum res_type type;
@@ -5342,17 +5318,17 @@ static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
return 0;
}
-static u64 mem_cgroup_move_charge_read(struct cgroup *cgrp,
+static u64 mem_cgroup_move_charge_read(struct cgroup_subsys_state *css,
struct cftype *cft)
{
- return mem_cgroup_from_cont(cgrp)->move_charge_at_immigrate;
+ return mem_cgroup_from_css(css)->move_charge_at_immigrate;
}
#ifdef CONFIG_MMU
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
if (val >= (1 << NR_MOVE_TYPE))
return -EINVAL;
@@ -5367,7 +5343,7 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
return 0;
}
#else
-static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
+static int mem_cgroup_move_charge_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
return -ENOSYS;
@@ -5375,13 +5351,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
#endif
#ifdef CONFIG_NUMA
-static int memcg_numa_stat_show(struct cgroup *cont, struct cftype *cft,
- struct seq_file *m)
+static int memcg_numa_stat_show(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m)
{
int nid;
unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
unsigned long node_nr;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
seq_printf(m, "total=%lu", total_nr);
@@ -5426,10 +5402,10 @@ static inline void mem_cgroup_lru_names_not_uptodate(void)
BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
}
-static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
+static int memcg_stat_show(struct cgroup_subsys_state *css, struct cftype *cft,
struct seq_file *m)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup *mi;
unsigned int i;
@@ -5513,27 +5489,23 @@ static int memcg_stat_show(struct cgroup *cont, struct cftype *cft,
return 0;
}
-static u64 mem_cgroup_swappiness_read(struct cgroup *cgrp, struct cftype *cft)
+static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css,
+ struct cftype *cft)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
return mem_cgroup_swappiness(memcg);
}
-static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
- u64 val)
+static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup *parent;
-
- if (val > 100)
- return -EINVAL;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
- if (cgrp->parent == NULL)
+ if (val > 100 || !parent)
return -EINVAL;
- parent = mem_cgroup_from_cont(cgrp->parent);
-
mutex_lock(&memcg_create_mutex);
/* If under hierarchy, only empty-root can set this value */
@@ -5636,10 +5608,10 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg)
mem_cgroup_oom_notify_cb(iter);
}
-static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
+static int mem_cgroup_usage_register_event(struct cgroup_subsys_state *css,
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5719,10 +5691,10 @@ unlock:
return ret;
}
-static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_usage_unregister_event(struct cgroup_subsys_state *css,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_thresholds *thresholds;
struct mem_cgroup_threshold_ary *new;
enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5798,10 +5770,10 @@ unlock:
mutex_unlock(&memcg->thresholds_lock);
}
-static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
+static int mem_cgroup_oom_register_event(struct cgroup_subsys_state *css,
struct cftype *cft, struct eventfd_ctx *eventfd, const char *args)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_eventfd_list *event;
enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5823,10 +5795,10 @@ static int mem_cgroup_oom_register_event(struct cgroup *cgrp,
return 0;
}
-static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
+static void mem_cgroup_oom_unregister_event(struct cgroup_subsys_state *css,
struct cftype *cft, struct eventfd_ctx *eventfd)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_eventfd_list *ev, *tmp;
enum res_type type = MEMFILE_TYPE(cft->private);
@@ -5844,10 +5816,10 @@ static void mem_cgroup_oom_unregister_event(struct cgroup *cgrp,
spin_unlock(&memcg_oom_lock);
}
-static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
+static int mem_cgroup_oom_control_read(struct cgroup_subsys_state *css,
struct cftype *cft, struct cgroup_map_cb *cb)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
cb->fill(cb, "oom_kill_disable", memcg->oom_kill_disable);
@@ -5858,18 +5830,16 @@ static int mem_cgroup_oom_control_read(struct cgroup *cgrp,
return 0;
}
-static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
+static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css,
struct cftype *cft, u64 val)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
- struct mem_cgroup *parent;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(&memcg->css));
/* cannot set to root cgroup and only 0 and 1 are allowed */
- if (!cgrp->parent || !((val == 0) || (val == 1)))
+ if (!parent || !((val == 0) || (val == 1)))
return -EINVAL;
- parent = mem_cgroup_from_cont(cgrp->parent);
-
mutex_lock(&memcg_create_mutex);
/* oom-kill-disable is a flag for subhierarchy. */
if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
@@ -6228,7 +6198,7 @@ static void __init mem_cgroup_soft_limit_tree_init(void)
}
static struct cgroup_subsys_state * __ref
-mem_cgroup_css_alloc(struct cgroup *cont)
+mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct mem_cgroup *memcg;
long error = -ENOMEM;
@@ -6243,7 +6213,7 @@ mem_cgroup_css_alloc(struct cgroup *cont)
goto free_out;
/* root ? */
- if (cont->parent == NULL) {
+ if (parent_css == NULL) {
root_mem_cgroup = memcg;
res_counter_init(&memcg->res, NULL);
res_counter_init(&memcg->memsw, NULL);
@@ -6265,17 +6235,16 @@ free_out:
}
static int
-mem_cgroup_css_online(struct cgroup *cont)
+mem_cgroup_css_online(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg, *parent;
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ struct mem_cgroup *parent = mem_cgroup_from_css(css_parent(css));
int error = 0;
- if (!cont->parent)
+ if (!parent)
return 0;
mutex_lock(&memcg_create_mutex);
- memcg = mem_cgroup_from_cont(cont);
- parent = mem_cgroup_from_cont(cont->parent);
memcg->use_hierarchy = parent->use_hierarchy;
memcg->oom_kill_disable = parent->oom_kill_disable;
@@ -6326,20 +6295,21 @@ static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg)
mem_cgroup_iter_invalidate(root_mem_cgroup);
}
-static void mem_cgroup_css_offline(struct cgroup *cont)
+static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg);
mem_cgroup_reparent_charges(memcg);
mem_cgroup_destroy_all_caches(memcg);
+ vmpressure_cleanup(&memcg->vmpressure);
}
-static void mem_cgroup_css_free(struct cgroup *cont)
+static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
memcg_destroy_kmem(memcg);
__mem_cgroup_free(memcg);
@@ -6709,12 +6679,12 @@ static void mem_cgroup_clear_mc(void)
mem_cgroup_end_move(from);
}
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cgroup);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long move_charge_at_immigrate;
/*
@@ -6756,7 +6726,7 @@ static int mem_cgroup_can_attach(struct cgroup *cgroup,
return ret;
}
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
@@ -6904,7 +6874,7 @@ retry:
up_read(&mm->mmap_sem);
}
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
@@ -6919,16 +6889,16 @@ static void mem_cgroup_move_task(struct cgroup *cont,
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup *cgroup,
+static int mem_cgroup_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
return 0;
}
-static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup *cont,
+static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset)
{
}
@@ -6938,15 +6908,15 @@ static void mem_cgroup_move_task(struct cgroup *cont,
* Cgroup retains root cgroups across [un]mount cycles making it necessary
* to verify sane_behavior flag on each mount attempt.
*/
-static void mem_cgroup_bind(struct cgroup *root)
+static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
{
/*
* use_hierarchy is forced with sane_behavior. cgroup core
* guarantees that @root doesn't have any children, so turning it
* on for the root memcg is enough.
*/
- if (cgroup_sane_behavior(root))
- mem_cgroup_from_cont(root)->use_hierarchy = true;
+ if (cgroup_sane_behavior(root_css->cgroup))
+ mem_cgroup_from_css(root_css)->use_hierarchy = true;
}
struct cgroup_subsys mem_cgroup_subsys = {
@@ -6968,7 +6938,6 @@ struct cgroup_subsys mem_cgroup_subsys = {
#ifdef CONFIG_MEMCG_SWAP
static int __init enable_swap_account(char *s)
{
- /* consider enabled if no parameter or 1 is given */
if (!strcmp(s, "1"))
really_do_swap_account = 1;
else if (!strcmp(s, "0"))
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b6fefcf13cb..d84c5e5331b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1286,7 +1286,10 @@ static void memory_failure_work_func(struct work_struct *work)
spin_unlock_irqrestore(&mf_cpu->lock, proc_flags);
if (!gotten)
break;
- memory_failure(entry.pfn, entry.trapno, entry.flags);
+ if (entry.flags & MF_SOFT_OFFLINE)
+ soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
+ else
+ memory_failure(entry.pfn, entry.trapno, entry.flags);
}
}
diff --git a/mm/memory.c b/mm/memory.c
index 1ce2e2a734f..b3c6bf9a398 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb)
* tear-down from @mm. The @fullmm argument is used when @mm is without
* users and we're going to destroy the full address space (exit/execve).
*/
-void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm)
+void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end)
{
tlb->mm = mm;
- tlb->fullmm = fullmm;
+ /* Is it from 0 to ~0? */
+ tlb->fullmm = !(start | (end+1));
tlb->need_flush_all = 0;
- tlb->start = -1UL;
- tlb->end = 0;
+ tlb->start = start;
+ tlb->end = end;
tlb->need_flush = 0;
tlb->local.next = NULL;
tlb->local.nr = 0;
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e
{
struct mmu_gather_batch *batch, *next;
- tlb->start = start;
- tlb->end = end;
tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
spinlock_t *ptl;
pte_t *start_pte;
pte_t *pte;
- unsigned long range_start = addr;
again:
init_rss_vec(rss);
@@ -1141,9 +1139,12 @@ again:
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
- addr) != page->index)
- set_pte_at(mm, addr, pte,
- pgoff_to_pte(page->index));
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, addr, pte, ptfile);
+ }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
@@ -1202,17 +1203,25 @@ again:
* and page-free while holding it.
*/
if (force_flush) {
+ unsigned long old_end;
+
force_flush = 0;
-#ifdef HAVE_GENERIC_MMU_GATHER
- tlb->start = range_start;
+ /*
+ * Flush the TLB just for the previous segment,
+ * then update the range to be the remaining
+ * TLB range.
+ */
+ old_end = tlb->end;
tlb->end = addr;
-#endif
+
tlb_flush_mmu(tlb);
- if (addr != end) {
- range_start = addr;
+
+ tlb->start = addr;
+ tlb->end = old_end;
+
+ if (addr != end)
goto again;
- }
}
return addr;
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end = start + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, start, end);
for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
unsigned long end = address + size;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, address, end);
update_hiwater_rss(mm);
mmu_notifier_invalidate_range_start(mm, address, end);
unmap_single_vma(&tlb, vma, address, end, details);
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
exclusive = 1;
}
flush_icache_page(vma, page);
+ if (pte_swp_soft_dirty(orig_pte))
+ pte = pte_mksoft_dirty(pte);
set_pte_at(mm, address, page_table, pte);
if (page == swapcache)
do_page_add_anon_rmap(page, vma, address, exclusive);
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
entry = mk_pte(page, vma->vm_page_prot);
if (flags & FAULT_FLAG_WRITE)
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte))
+ pte_mksoft_dirty(entry);
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
@@ -4066,6 +4079,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return len;
}
+EXPORT_SYMBOL_GPL(generic_access_phys);
#endif
/*
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 74310017296..4baf12e534d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -732,7 +732,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (prev) {
vma = prev;
next = vma->vm_next;
- continue;
+ if (mpol_equal(vma_policy(vma), new_pol))
+ continue;
+ /* vma_merge() joined vma && vma->next, case 8 */
+ goto replace;
}
if (vma->vm_start != vmstart) {
err = split_vma(vma->vm_mm, vma, vmstart, 1);
@@ -744,6 +747,7 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
if (err)
goto out;
}
+ replace:
err = vma_replace_policy(vma, new_pol);
if (err)
goto out;
diff --git a/mm/mmap.c b/mm/mmap.c
index fbad7b09109..f9c97d10b87 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -865,7 +865,7 @@ again: remove_next = 1 + (end > next->vm_end);
if (next->anon_vma)
anon_vma_merge(vma, next);
mm->map_count--;
- vma_set_policy(vma, vma_policy(next));
+ mpol_put(vma_policy(next));
kmem_cache_free(vm_area_cachep, next);
/*
* In mprotect's case 6 (see comments on vma_merge),
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm,
struct mmu_gather tlb;
lru_add_drain();
- tlb_gather_mmu(&tlb, mm, 0);
+ tlb_gather_mmu(&tlb, mm, start, end);
update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm)
lru_add_drain();
flush_cache_mm(mm);
- tlb_gather_mmu(&tlb, mm, 1);
+ tlb_gather_mmu(&tlb, mm, 0, -1);
/* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1);
diff --git a/mm/mremap.c b/mm/mremap.c
index 457d34ef3bf..0843feb66f3 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -15,6 +15,7 @@
#include <linux/swap.h>
#include <linux/capability.h>
#include <linux/fs.h>
+#include <linux/swapops.h>
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
@@ -69,6 +70,23 @@ static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
return pmd;
}
+static pte_t move_soft_dirty_pte(pte_t pte)
+{
+ /*
+ * Set soft dirty bit so we can notice
+ * in userspace the ptes were moved.
+ */
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ if (pte_present(pte))
+ pte = pte_mksoft_dirty(pte);
+ else if (is_swap_pte(pte))
+ pte = pte_swp_mksoft_dirty(pte);
+ else if (pte_file(pte))
+ pte = pte_file_mksoft_dirty(pte);
+#endif
+ return pte;
+}
+
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
@@ -126,7 +144,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
continue;
pte = ptep_get_and_clear(mm, old_addr, old_pte);
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
- set_pte_at(mm, new_addr, new_pte, pte_mksoft_dirty(pte));
+ pte = move_soft_dirty_pte(pte);
+ set_pte_at(mm, new_addr, new_pte, pte);
}
arch_leave_lazy_mmu_mode();
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 79e451a78c9..98e75f2ac7b 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -288,7 +288,7 @@ enum oom_scan_t oom_scan_process_thread(struct task_struct *task,
/*
* Simple selection loop. We chose the process with the highest
- * number of 'points'.
+ * number of 'points'. Returns -1 on scan abort.
*
* (not docbooked, we don't want this one cluttering up the manual)
*/
@@ -314,7 +314,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
continue;
case OOM_SCAN_ABORT:
rcu_read_unlock();
- return ERR_PTR(-1UL);
+ return (struct task_struct *)(-1UL);
case OOM_SCAN_OK:
break;
};
@@ -657,7 +657,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
panic("Out of memory and no killable processes...\n");
}
- if (PTR_ERR(p) != -1UL) {
+ if (p != (void *)-1UL) {
oom_kill_process(p, gfp_mask, order, points, totalpages, NULL,
nodemask, "Out of memory");
killed = 1;
diff --git a/mm/rmap.c b/mm/rmap.c
index cd356df4f71..07748e68b72 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -873,9 +873,6 @@ int page_referenced(struct page *page,
vm_flags);
if (we_locked)
unlock_page(page);
-
- if (page_test_and_clear_young(page_to_pfn(page)))
- referenced++;
}
out:
return referenced;
@@ -1236,6 +1233,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_entry_to_pte(make_hwpoison_entry(page)));
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(page) };
+ pte_t swp_pte;
if (PageSwapCache(page)) {
/*
@@ -1264,7 +1262,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
entry = make_migration_entry(page, pte_write(pteval));
}
- set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
+ swp_pte = swp_entry_to_pte(entry);
+ if (pte_soft_dirty(pteval))
+ swp_pte = pte_swp_mksoft_dirty(swp_pte);
+ set_pte_at(mm, address, pte, swp_pte);
BUG_ON(pte_file(*pte));
} else if (IS_ENABLED(CONFIG_MIGRATION) &&
(TTU_ACTION(flags) == TTU_MIGRATION)) {
@@ -1401,8 +1402,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+ if (page->index != linear_page_index(vma, address)) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+ if (pte_soft_dirty(pteval))
+ pte_file_mksoft_dirty(ptfile);
+ set_pte_at(mm, address, pte, ptfile);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
diff --git a/mm/shmem.c b/mm/shmem.c
index a87990cf9f9..526149846d0 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1798,7 +1798,8 @@ static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
}
}
- offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
+ if (offset >= 0)
+ offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
mutex_unlock(&inode->i_mutex);
return offset;
}
@@ -2614,13 +2615,15 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
- if (!(sb->s_flags & MS_NOUSER)) {
+ if (!(sb->s_flags & MS_KERNMOUNT)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
err = -EINVAL;
goto failed;
}
+ } else {
+ sb->s_flags |= MS_NOUSER;
}
sb->s_export_op = &shmem_export_ops;
sb->s_flags |= MS_NOSEC;
@@ -2830,8 +2833,7 @@ int __init shmem_init(void)
goto out2;
}
- shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
- shmem_fs_type.name, NULL);
+ shm_mnt = kern_mount(&shmem_fs_type);
if (IS_ERR(shm_mnt)) {
error = PTR_ERR(shm_mnt);
printk(KERN_ERR "Could not kern_mount tmpfs\n");
@@ -2908,14 +2910,8 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
/* common code */
-static char *shmem_dname(struct dentry *dentry, char *buffer, int buflen)
-{
- return dynamic_dname(dentry, buffer, buflen, "/%s (deleted)",
- dentry->d_name.name);
-}
-
static struct dentry_operations anon_ops = {
- .d_dname = shmem_dname
+ .d_dname = simple_dname
};
/**
diff --git a/mm/slab.h b/mm/slab.h
index 620ceeddbe1..a535033f7e9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -162,6 +162,8 @@ static inline const char *cache_name(struct kmem_cache *s)
static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
{
+ if (!s->memcg_params)
+ return NULL;
return s->memcg_params->memcg_caches[idx];
}
diff --git a/mm/slub.c b/mm/slub.c
index 2b02d666bf6..e3ba1f2cf60 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
int pages;
int pobjects;
- if (!s->cpu_partial)
- return;
-
do {
pages = 0;
pobjects = 0;
diff --git a/mm/swap.c b/mm/swap.c
index 4a1d0d2c52f..62b78a6e224 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -512,12 +512,7 @@ EXPORT_SYMBOL(__lru_cache_add);
*/
void lru_cache_add(struct page *page)
{
- if (PageActive(page)) {
- VM_BUG_ON(PageUnevictable(page));
- } else if (PageUnevictable(page)) {
- VM_BUG_ON(PageActive(page));
- }
-
+ VM_BUG_ON(PageActive(page) && PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
__lru_cache_add(page);
}
@@ -539,6 +534,7 @@ void add_page_to_unevictable_list(struct page *page)
spin_lock_irq(&zone->lru_lock);
lruvec = mem_cgroup_page_lruvec(page, zone);
+ ClearPageActive(page);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
@@ -774,8 +770,6 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct page *page, struct page *page_tail,
struct lruvec *lruvec, struct list_head *list)
{
- int uninitialized_var(active);
- enum lru_list lru;
const int file = 0;
VM_BUG_ON(!PageHead(page));
@@ -787,20 +781,6 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
if (!list)
SetPageLRU(page_tail);
- if (page_evictable(page_tail)) {
- if (PageActive(page)) {
- SetPageActive(page_tail);
- active = 1;
- lru = LRU_ACTIVE_ANON;
- } else {
- active = 0;
- lru = LRU_INACTIVE_ANON;
- }
- } else {
- SetPageUnevictable(page_tail);
- lru = LRU_UNEVICTABLE;
- }
-
if (likely(PageLRU(page)))
list_add_tail(&page_tail->lru, &page->lru);
else if (list) {
@@ -816,13 +796,13 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
* Use the standard add function to put page_tail on the list,
* but then correct its position so they all end up in order.
*/
- add_page_to_lru_list(page_tail, lruvec, lru);
+ add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail));
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
if (!PageUnevictable(page))
- update_page_reclaim_stat(lruvec, file, active);
+ update_page_reclaim_stat(lruvec, file, PageActive(page_tail));
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -833,7 +813,6 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
int active = PageActive(page);
enum lru_list lru = page_lru(page);
- VM_BUG_ON(PageUnevictable(page));
VM_BUG_ON(PageLRU(page));
SetPageLRU(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 36af6eeaa67..6cf2e60983b 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free)
}
#endif /* CONFIG_HIBERNATION */
+static inline int maybe_same_pte(pte_t pte, pte_t swp_pte)
+{
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ /*
+ * When pte keeps soft dirty bit the pte generated
+ * from swap entry does not has it, still it's same
+ * pte from logical point of view.
+ */
+ pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte);
+ return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty);
+#else
+ return pte_same(pte, swp_pte);
+#endif
+}
+
/*
* No need to decide whether this PTE shares the swap entry with others,
* just let do_wp_page work it out if a write is requested later - to
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
}
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) {
+ if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) {
mem_cgroup_cancel_charge_swapin(memcg);
ret = 0;
goto out;
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
* swapoff spends a _lot_ of time in this loop!
* Test inline before going to call unuse_pte.
*/
- if (unlikely(pte_same(*pte, swp_pte))) {
+ if (unlikely(maybe_same_pte(*pte, swp_pte))) {
pte_unmap(pte);
ret = unuse_pte(vma, pmd, addr, entry, page);
if (ret)
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 736a6011c2c..e0f62837c3f 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -74,15 +74,10 @@ static struct vmpressure *work_to_vmpressure(struct work_struct *work)
return container_of(work, struct vmpressure, work);
}
-static struct vmpressure *cg_to_vmpressure(struct cgroup *cg)
-{
- return css_to_vmpressure(cgroup_subsys_state(cg, mem_cgroup_subsys_id));
-}
-
static struct vmpressure *vmpressure_parent(struct vmpressure *vmpr)
{
- struct cgroup *cg = vmpressure_to_css(vmpr)->cgroup;
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cg);
+ struct cgroup_subsys_state *css = vmpressure_to_css(vmpr);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
memcg = parent_mem_cgroup(memcg);
if (!memcg)
@@ -180,12 +175,12 @@ static void vmpressure_work_fn(struct work_struct *work)
if (!vmpr->scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
scanned = vmpr->scanned;
reclaimed = vmpr->reclaimed;
vmpr->scanned = 0;
vmpr->reclaimed = 0;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
do {
if (vmpressure_event(vmpr, scanned, reclaimed))
@@ -240,13 +235,13 @@ void vmpressure(gfp_t gfp, struct mem_cgroup *memcg,
if (!scanned)
return;
- mutex_lock(&vmpr->sr_lock);
+ spin_lock(&vmpr->sr_lock);
vmpr->scanned += scanned;
vmpr->reclaimed += reclaimed;
scanned = vmpr->scanned;
- mutex_unlock(&vmpr->sr_lock);
+ spin_unlock(&vmpr->sr_lock);
- if (scanned < vmpressure_win || work_pending(&vmpr->work))
+ if (scanned < vmpressure_win)
return;
schedule_work(&vmpr->work);
}
@@ -283,7 +278,7 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
/**
* vmpressure_register_event() - Bind vmpressure notifications to an eventfd
- * @cg: cgroup that is interested in vmpressure notifications
+ * @css: css that is interested in vmpressure notifications
* @cft: cgroup control files handle
* @eventfd: eventfd context to link notifications with
* @args: event arguments (used to set up a pressure level threshold)
@@ -298,10 +293,11 @@ void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio)
* cftype).register_event, and then cgroup core will handle everything by
* itself.
*/
-int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
- struct eventfd_ctx *eventfd, const char *args)
+int vmpressure_register_event(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct eventfd_ctx *eventfd,
+ const char *args)
{
- struct vmpressure *vmpr = cg_to_vmpressure(cg);
+ struct vmpressure *vmpr = css_to_vmpressure(css);
struct vmpressure_event *ev;
int level;
@@ -329,7 +325,7 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
/**
* vmpressure_unregister_event() - Unbind eventfd from vmpressure
- * @cg: cgroup handle
+ * @css: css handle
* @cft: cgroup control files handle
* @eventfd: eventfd context that was used to link vmpressure with the @cg
*
@@ -341,10 +337,11 @@ int vmpressure_register_event(struct cgroup *cg, struct cftype *cft,
* cftype).unregister_event, and then cgroup core will handle everything
* by itself.
*/
-void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
+void vmpressure_unregister_event(struct cgroup_subsys_state *css,
+ struct cftype *cft,
struct eventfd_ctx *eventfd)
{
- struct vmpressure *vmpr = cg_to_vmpressure(cg);
+ struct vmpressure *vmpr = css_to_vmpressure(css);
struct vmpressure_event *ev;
mutex_lock(&vmpr->events_lock);
@@ -367,8 +364,24 @@ void vmpressure_unregister_event(struct cgroup *cg, struct cftype *cft,
*/
void vmpressure_init(struct vmpressure *vmpr)
{
- mutex_init(&vmpr->sr_lock);
+ spin_lock_init(&vmpr->sr_lock);
mutex_init(&vmpr->events_lock);
INIT_LIST_HEAD(&vmpr->events);
INIT_WORK(&vmpr->work, vmpressure_work_fn);
}
+
+/**
+ * vmpressure_cleanup() - shuts down vmpressure control structure
+ * @vmpr: Structure to be cleaned up
+ *
+ * This function should be called before the structure in which it is
+ * embedded is cleaned up.
+ */
+void vmpressure_cleanup(struct vmpressure *vmpr)
+{
+ /*
+ * Make sure there is no pending work before eventfd infrastructure
+ * goes away.
+ */
+ flush_work(&vmpr->work);
+}
diff --git a/mm/zbud.c b/mm/zbud.c
index 9bb4710e358..ad1e781284f 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -257,7 +257,7 @@ int zbud_alloc(struct zbud_pool *pool, int size, gfp_t gfp,
if (size <= 0 || gfp & __GFP_HIGHMEM)
return -EINVAL;
- if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED)
+ if (size > PAGE_SIZE - ZHDR_SIZE_ALIGNED - CHUNK_SIZE)
return -ENOSPC;
chunks = size_to_chunks(size);
spin_lock(&pool->lock);
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 2fb2d88e8c2..61fc573f114 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -210,6 +210,7 @@ out_vid_del:
static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
{
struct net_device *new_dev;
+ struct vlan_dev_priv *vlan;
struct net *net = dev_net(real_dev);
struct vlan_net *vn = net_generic(net, vlan_net_id);
char name[IFNAMSIZ];
@@ -260,11 +261,12 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
new_dev->mtu = real_dev->mtu;
new_dev->priv_flags |= (real_dev->priv_flags & IFF_UNICAST_FLT);
- vlan_dev_priv(new_dev)->vlan_proto = htons(ETH_P_8021Q);
- vlan_dev_priv(new_dev)->vlan_id = vlan_id;
- vlan_dev_priv(new_dev)->real_dev = real_dev;
- vlan_dev_priv(new_dev)->dent = NULL;
- vlan_dev_priv(new_dev)->flags = VLAN_FLAG_REORDER_HDR;
+ vlan = vlan_dev_priv(new_dev);
+ vlan->vlan_proto = htons(ETH_P_8021Q);
+ vlan->vlan_id = vlan_id;
+ vlan->real_dev = real_dev;
+ vlan->dent = NULL;
+ vlan->flags = VLAN_FLAG_REORDER_HDR;
new_dev->rtnl_link_ops = &vlan_link_ops;
err = register_vlan_dev(new_dev);
@@ -459,6 +461,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
+ case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 4a78c4de9f2..6ee48aac776 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep);
struct net_device *vlan_dev_real_dev(const struct net_device *dev)
{
- return vlan_dev_priv(dev)->real_dev;
+ struct net_device *ret = vlan_dev_priv(dev)->real_dev;
+
+ while (is_vlan_dev(ret))
+ ret = vlan_dev_priv(ret)->real_dev;
+
+ return ret;
}
EXPORT_SYMBOL(vlan_dev_real_dev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 1cd3d2a406f..09bf1c38805 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -107,10 +107,10 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 vlan_tci = 0;
int rc;
- if (!(vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+ if (!(vlan->flags & VLAN_FLAG_REORDER_HDR)) {
vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
- vlan_tci = vlan_dev_priv(dev)->vlan_id;
+ vlan_tci = vlan->vlan_id;
vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
vhdr->h_vlan_TCI = htons(vlan_tci);
@@ -133,7 +133,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
saddr = dev->dev_addr;
/* Now make the underlying real hard header */
- dev = vlan_dev_priv(dev)->real_dev;
+ dev = vlan->real_dev;
rc = dev_hard_header(skb, dev, type, daddr, saddr, len + vhdrlen);
if (rc > 0)
rc += vhdrlen;
@@ -582,7 +582,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->dev_id = real_dev->dev_id;
if (is_zero_ether_addr(dev->dev_addr))
- memcpy(dev->dev_addr, real_dev->dev_addr, dev->addr_len);
+ eth_hw_addr_inherit(dev, real_dev);
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
diff --git a/net/9p/client.c b/net/9p/client.c
index 8b93cae2d11..ba93bdab270 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -658,17 +658,12 @@ static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq)
/*
* if we haven't received a response for oldreq,
- * remove it from the list, and notify the transport
- * layer that the reply will never arrive.
+ * remove it from the list
*/
- spin_lock(&c->lock);
if (oldreq->status == REQ_STATUS_FLSH) {
+ spin_lock(&c->lock);
list_del(&oldreq->req_list);
spin_unlock(&c->lock);
- if (c->trans_mod->cancelled)
- c->trans_mod->cancelled(c, req);
- } else {
- spin_unlock(&c->lock);
}
p9_free_req(c, req);
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 928f2bb9bf8..8f68df5d297 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -588,17 +588,6 @@ static int rdma_cancel(struct p9_client *client, struct p9_req_t *req)
return 1;
}
-/* A request has been fully flushed without a reply.
- * That means we have posted one buffer in excess.
- */
-static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req)
-{
- struct p9_trans_rdma *rdma = client->trans;
-
- atomic_inc(&rdma->excess_rc);
- return 0;
-}
-
/**
* trans_create_rdma - Transport method for creating atransport instance
* @client: client instance
diff --git a/net/Kconfig b/net/Kconfig
index 37702491abe..ee021366727 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -244,7 +244,7 @@ config NETPRIO_CGROUP
Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis
-config NET_LL_RX_POLL
+config NET_RX_BUSY_POLL
boolean
default y
@@ -281,7 +281,7 @@ menu "Network testing"
config NET_PKTGEN
tristate "Packet Generator (USE WITH CAUTION)"
- depends on PROC_FS
+ depends on INET && PROC_FS
---help---
This module will inject preconfigured packets, at a configurable
rate, out of a given interface. It is used for network interface
diff --git a/net/appletalk/atalk_proc.c b/net/appletalk/atalk_proc.c
index c30f3a0717f..af46bc49e1e 100644
--- a/net/appletalk/atalk_proc.c
+++ b/net/appletalk/atalk_proc.c
@@ -178,7 +178,7 @@ static int atalk_seq_socket_show(struct seq_file *seq, void *v)
at = at_sk(s);
seq_printf(seq, "%02X %04X:%02X:%02X %04X:%02X:%02X %08X:%08X "
- "%02X %d\n",
+ "%02X %u\n",
s->sk_type, ntohs(at->src_net), at->src_node, at->src_port,
ntohs(at->dest_net), at->dest_node, at->dest_port,
sk_wmem_alloc_get(s),
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 62da5278014..0a8a80cd4bf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -28,6 +28,22 @@
#include "bat_algo.h"
#include "network-coding.h"
+
+/**
+ * batadv_dup_status - duplicate status
+ * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
+ * neighbor)
+ * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+enum batadv_dup_status {
+ BATADV_NO_DUP = 0,
+ BATADV_ORIG_DUP,
+ BATADV_NEIGH_DUP,
+ BATADV_PROTECTED,
+};
+
/**
* batadv_ring_buffer_set - update the ring buffer with the given value
* @lq_recv: pointer to the ring buffer
@@ -71,21 +87,6 @@ static uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[])
return (uint8_t)(sum / count);
}
-/*
- * batadv_dup_status - duplicate status
- * @BATADV_NO_DUP: the packet is a duplicate
- * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
- * neighbor)
- * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
- * @BATADV_PROTECTED: originator is currently protected (after reboot)
- */
-enum batadv_dup_status {
- BATADV_NO_DUP = 0,
- BATADV_ORIG_DUP,
- BATADV_NEIGH_DUP,
- BATADV_PROTECTED,
-};
-
static struct batadv_neigh_node *
batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
@@ -478,6 +479,7 @@ static void batadv_iv_ogm_aggregate_new(const unsigned char *packet_buff,
kfree(forw_packet_aggr);
goto out;
}
+ forw_packet_aggr->skb->priority = TC_PRIO_CONTROL;
skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e14531f1ce1..264de88db32 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1529,6 +1529,8 @@ out:
* in these cases, the skb is further handled by this function and
* returns 1, otherwise it returns 0 and the caller shall further
* process the skb.
+ *
+ * This call might reallocate skb data.
*/
int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid)
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index f105219f4a4..1ce4b8763ef 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -190,6 +190,33 @@ next:
return curr_gw;
}
+/**
+ * batadv_gw_check_client_stop - check if client mode has been switched off
+ * @bat_priv: the bat priv with all the soft interface information
+ *
+ * This function assumes the caller has checked that the gw state *is actually
+ * changing*. This function is not supposed to be called when there is no state
+ * change.
+ */
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv)
+{
+ struct batadv_gw_node *curr_gw;
+
+ if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT)
+ return;
+
+ curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
+ if (!curr_gw)
+ return;
+
+ /* if batman-adv is switching the gw client mode off and a gateway was
+ * already selected, send a DEL uevent
+ */
+ batadv_throw_uevent(bat_priv, BATADV_UEV_GW, BATADV_UEV_DEL, NULL);
+
+ batadv_gw_node_free_ref(curr_gw);
+}
+
void batadv_gw_election(struct batadv_priv *bat_priv)
{
struct batadv_gw_node *curr_gw = NULL, *next_gw = NULL;
@@ -508,6 +535,7 @@ out:
return 0;
}
+/* this call might reallocate skb data */
static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len)
{
int ret = false;
@@ -568,6 +596,7 @@ out:
return ret;
}
+/* this call might reallocate skb data */
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
{
struct ethhdr *ethhdr;
@@ -619,6 +648,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr)))
return false;
+
+ /* skb->data might have been reallocated by pskb_may_pull() */
+ ethhdr = (struct ethhdr *)skb->data;
+ if (ntohs(ethhdr->h_proto) == ETH_P_8021Q)
+ ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN);
+
udphdr = (struct udphdr *)(skb->data + *header_len);
*header_len += sizeof(*udphdr);
@@ -634,12 +669,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len)
return true;
}
+/* this call might reallocate skb data */
bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr)
+ struct sk_buff *skb)
{
struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL;
struct batadv_orig_node *orig_dst_node = NULL;
struct batadv_gw_node *curr_gw = NULL;
+ struct ethhdr *ethhdr;
bool ret, out_of_range = false;
unsigned int header_len = 0;
uint8_t curr_tq_avg;
@@ -648,6 +685,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
if (!ret)
goto out;
+ ethhdr = (struct ethhdr *)skb->data;
orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source,
ethhdr->h_dest);
if (!orig_dst_node)
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 039902dca4a..ceef4ebe8bc 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -20,6 +20,7 @@
#ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
#define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_
+void batadv_gw_check_client_stop(struct batadv_priv *bat_priv);
void batadv_gw_deselect(struct batadv_priv *bat_priv);
void batadv_gw_election(struct batadv_priv *bat_priv);
struct batadv_orig_node *
@@ -34,7 +35,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv,
void batadv_gw_node_purge(struct batadv_priv *bat_priv);
int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset);
bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len);
-bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
- struct sk_buff *skb, struct ethhdr *ethhdr);
+bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb);
#endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b27508b8085..5a99bb4b6b8 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -183,6 +183,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto out;
}
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
icmp_packet = (struct batadv_icmp_packet_rr *)skb_put(skb, packet_len);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 08125f3f606..c72d1bcdcf4 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -19,6 +19,10 @@
#include <linux/crc32c.h>
#include <linux/highmem.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/dsfield.h>
#include "main.h"
#include "sysfs.h"
#include "debugfs.h"
@@ -249,6 +253,60 @@ out:
return primary_if;
}
+/**
+ * batadv_skb_set_priority - sets skb priority according to packet content
+ * @skb: the packet to be sent
+ * @offset: offset to the packet content
+ *
+ * This function sets a value between 256 and 263 (802.1d priority), which
+ * can be interpreted by the cfg80211 or other drivers.
+ */
+void batadv_skb_set_priority(struct sk_buff *skb, int offset)
+{
+ struct iphdr ip_hdr_tmp, *ip_hdr;
+ struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
+ struct ethhdr ethhdr_tmp, *ethhdr;
+ struct vlan_ethhdr *vhdr, vhdr_tmp;
+ u32 prio;
+
+ /* already set, do nothing */
+ if (skb->priority >= 256 && skb->priority <= 263)
+ return;
+
+ ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
+ if (!ethhdr)
+ return;
+
+ switch (ethhdr->h_proto) {
+ case htons(ETH_P_8021Q):
+ vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
+ sizeof(*vhdr), &vhdr_tmp);
+ if (!vhdr)
+ return;
+ prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
+ prio = prio >> VLAN_PRIO_SHIFT;
+ break;
+ case htons(ETH_P_IP):
+ ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip_hdr), &ip_hdr_tmp);
+ if (!ip_hdr)
+ return;
+ prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
+ break;
+ case htons(ETH_P_IPV6):
+ ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
+ sizeof(*ip6_hdr), &ip6_hdr_tmp);
+ if (!ip6_hdr)
+ return;
+ prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
+ break;
+ default:
+ return;
+ }
+
+ skb->priority = prio + 256;
+}
+
static int batadv_recv_unhandled_packet(struct sk_buff *skb,
struct batadv_hard_iface *recv_if)
{
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index 5e9aebb7d56..24675523930 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -26,7 +26,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2013.3.0"
+#define BATADV_SOURCE_VERSION "2013.4.0"
#endif
/* B.A.T.M.A.N. parameters */
@@ -184,6 +184,7 @@ void batadv_mesh_free(struct net_device *soft_iface);
int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);
struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file *seq);
+void batadv_skb_set_priority(struct sk_buff *skb, int offset);
int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype,
struct net_device *orig_dev);
@@ -253,7 +254,7 @@ static inline void batadv_dbg(int type __always_unused,
/* returns 1 if they are the same ethernet addr
*
- * note: can't use compare_ether_addr() as it requires aligned memory
+ * note: can't use ether_addr_equal() as it requires aligned memory
*/
static inline int batadv_compare_eth(const void *data1, const void *data2)
{
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 2f0bd3ffe6e..0439395d7ba 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -775,7 +775,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
struct batadv_neigh_node *neigh_node = NULL;
struct batadv_unicast_packet *unicast_packet;
struct ethhdr *ethhdr = eth_hdr(skb);
- int res, ret = NET_RX_DROP;
+ int res, hdr_len, ret = NET_RX_DROP;
struct sk_buff *new_skb;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -835,6 +835,22 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/* decrement ttl */
unicast_packet->header.ttl--;
+ switch (unicast_packet->header.packet_type) {
+ case BATADV_UNICAST_4ADDR:
+ hdr_len = sizeof(struct batadv_unicast_4addr_packet);
+ break;
+ case BATADV_UNICAST:
+ hdr_len = sizeof(struct batadv_unicast_packet);
+ break;
+ default:
+ /* other packet types not supported - yet */
+ hdr_len = -1;
+ break;
+ }
+
+ if (hdr_len > 0)
+ batadv_skb_set_priority(skb, hdr_len);
+
res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
/* translate transmit result into receive result */
@@ -1193,6 +1209,8 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_bla_check_bcast_duplist(bat_priv, skb))
goto out;
+ batadv_skb_set_priority(skb, sizeof(struct batadv_bcast_packet));
+
/* rebroadcast packet */
batadv_add_bcast_packet_to_list(bat_priv, skb, 1);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index e9ff8d80120..0266edd0fa7 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -67,7 +67,6 @@ int batadv_send_skb_packet(struct sk_buff *skb,
ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
skb_set_network_header(skb, ETH_HLEN);
- skb->priority = TC_PRIO_CONTROL;
skb->protocol = __constant_htons(ETH_P_BATMAN);
skb->dev = hard_iface->net_dev;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 700d0b49742..4493913f0d5 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb,
if (batadv_bla_tx(bat_priv, skb, vid))
goto dropped;
+ /* skb->data might have been reallocated by batadv_bla_tx() */
+ ethhdr = (struct ethhdr *)skb->data;
+
/* Register the client MAC in the transtable */
if (!is_multicast_ether_addr(ethhdr->h_source))
batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif);
@@ -220,8 +223,14 @@ static int batadv_interface_tx(struct sk_buff *skb,
default:
break;
}
+
+ /* reminder: ethhdr might have become unusable from here on
+ * (batadv_gw_is_dhcp_target() might have reallocated skb data)
+ */
}
+ batadv_skb_set_priority(skb, 0);
+
/* ethernet packet should be broadcasted */
if (do_bcast) {
primary_if = batadv_primary_if_get_selected(bat_priv);
@@ -266,7 +275,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
/* unicast packet */
} else {
if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) {
- ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr);
+ ret = batadv_gw_out_of_range(bat_priv, skb);
if (ret)
goto dropped;
}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 929e304dacb..4114b961bc2 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -385,6 +385,10 @@ static ssize_t batadv_store_gw_mode(struct kobject *kobj,
curr_gw_mode_str, buff);
batadv_gw_deselect(bat_priv);
+ /* always call batadv_gw_check_client_stop() before changing the gateway
+ * state
+ */
+ batadv_gw_check_client_stop(bat_priv);
atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp);
return count;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 429aeef3d8b..34510f38708 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1626,6 +1626,7 @@ batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
tt_response->ttvn = ttvn;
@@ -1691,6 +1692,7 @@ static int batadv_send_tt_request(struct batadv_priv *bat_priv,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
tt_req_len = sizeof(*tt_request);
@@ -1788,6 +1790,7 @@ batadv_send_other_tt_response(struct batadv_priv *bat_priv,
if (!skb)
goto unlock;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
packet_pos = skb_put(skb, len);
tt_response = (struct batadv_tt_query_packet *)packet_pos;
@@ -1906,6 +1909,7 @@ batadv_send_my_tt_response(struct batadv_priv *bat_priv,
if (!skb)
goto unlock;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
packet_pos = skb_put(skb, len);
tt_response = (struct batadv_tt_query_packet *)packet_pos;
@@ -2240,6 +2244,7 @@ static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
if (!skb)
goto out;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, ETH_HLEN);
roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
index dc8b5d4dd63..48b31d33ce6 100644
--- a/net/batman-adv/unicast.c
+++ b/net/batman-adv/unicast.c
@@ -242,6 +242,8 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
frag_skb = dev_alloc_skb(data_len - (data_len / 2) + ucf_hdr_len);
if (!frag_skb)
goto dropped;
+
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(frag_skb, ucf_hdr_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
@@ -326,7 +328,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size,
* @skb: the skb containing the payload to encapsulate
* @orig_node: the destination node
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
struct batadv_orig_node *orig_node)
@@ -343,7 +347,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb,
* @orig_node: the destination node
* @packet_subtype: the batman 4addr packet subtype to use
*
- * Returns false if the payload could not be encapsulated or true otherwise
+ * Returns false if the payload could not be encapsulated or true otherwise.
+ *
+ * This call might reallocate skb data.
*/
bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv,
struct sk_buff *skb,
@@ -401,7 +407,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv,
struct batadv_neigh_node *neigh_node;
int data_len = skb->len;
int ret = NET_RX_DROP;
- unsigned int dev_mtu;
+ unsigned int dev_mtu, header_len;
/* get routing information */
if (is_multicast_ether_addr(ethhdr->h_dest)) {
@@ -428,11 +434,17 @@ find_router:
switch (packet_type) {
case BATADV_UNICAST:
- batadv_unicast_prepare_skb(skb, orig_node);
+ if (!batadv_unicast_prepare_skb(skb, orig_node))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
- batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
- packet_subtype);
+ if (!batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node,
+ packet_subtype))
+ goto out;
+
+ header_len = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -441,6 +453,7 @@ find_router:
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + header_len);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c
index 4983340f194..d8ea31a5845 100644
--- a/net/batman-adv/vis.c
+++ b/net/batman-adv/vis.c
@@ -397,6 +397,7 @@ batadv_add_packet(struct batadv_priv *bat_priv,
kfree(info);
return NULL;
}
+ info->skb_packet->priority = TC_PRIO_CONTROL;
skb_reserve(info->skb_packet, ETH_HLEN);
packet = (struct batadv_vis_packet *)skb_put(info->skb_packet, len);
@@ -861,6 +862,7 @@ int batadv_vis_init(struct batadv_priv *bat_priv)
if (!bat_priv->vis.my_info->skb_packet)
goto free_info;
+ bat_priv->vis.my_info->skb_packet->priority = TC_PRIO_CONTROL;
skb_reserve(bat_priv->vis.my_info->skb_packet, ETH_HLEN);
tmp_skb = bat_priv->vis.my_info->skb_packet;
packet = (struct batadv_vis_packet *)skb_put(tmp_skb, sizeof(*packet));
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 6c7f3637972..f0817121ec5 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -31,6 +31,24 @@
#include <net/bluetooth/a2mp.h>
#include <net/bluetooth/smp.h>
+struct sco_param {
+ u16 pkt_type;
+ u16 max_latency;
+};
+
+static const struct sco_param sco_param_cvsd[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
+ { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
+ { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
+};
+
+static const struct sco_param sco_param_wideband[] = {
+ { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
+ { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
+};
+
static void hci_le_create_connection(struct hci_conn *conn)
{
struct hci_dev *hdev = conn->hdev;
@@ -172,10 +190,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle)
hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
}
-void hci_setup_sync(struct hci_conn *conn, __u16 handle)
+bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
{
struct hci_dev *hdev = conn->hdev;
struct hci_cp_setup_sync_conn cp;
+ const struct sco_param *param;
BT_DBG("hcon %p", conn);
@@ -185,15 +204,35 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle)
conn->attempt++;
cp.handle = cpu_to_le16(handle);
- cp.pkt_type = cpu_to_le16(conn->pkt_type);
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
- cp.max_latency = __constant_cpu_to_le16(0xffff);
- cp.voice_setting = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.voice_setting = cpu_to_le16(conn->setting);
+
+ switch (conn->setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
+ return false;
+ cp.retrans_effort = 0x02;
+ param = &sco_param_wideband[conn->attempt - 1];
+ break;
+ case SCO_AIRMODE_CVSD:
+ if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
+ return false;
+ cp.retrans_effort = 0x01;
+ param = &sco_param_cvsd[conn->attempt - 1];
+ break;
+ default:
+ return false;
+ }
- hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
+ cp.pkt_type = __cpu_to_le16(param->pkt_type);
+ cp.max_latency = __cpu_to_le16(param->max_latency);
+
+ if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
+ return false;
+
+ return true;
}
void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
@@ -560,13 +599,13 @@ static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
return acl;
}
-static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
- bdaddr_t *dst, u8 sec_level, u8 auth_type)
+struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
+ __u16 setting)
{
struct hci_conn *acl;
struct hci_conn *sco;
- acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
+ acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
if (IS_ERR(acl))
return acl;
@@ -584,6 +623,8 @@ static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
hci_conn_hold(sco);
+ sco->setting = setting;
+
if (acl->state == BT_CONNECTED &&
(sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
@@ -612,9 +653,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
case ACL_LINK:
return hci_connect_acl(hdev, dst, sec_level, auth_type);
- case SCO_LINK:
- case ESCO_LINK:
- return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
}
return ERR_PTR(-EINVAL);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e3a34997759..634debab4d5 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -454,6 +454,18 @@ static void hci_setup_event_mask(struct hci_request *req)
events[4] |= 0x04; /* Read Remote Extended Features Complete */
events[5] |= 0x08; /* Synchronous Connection Complete */
events[5] |= 0x10; /* Synchronous Connection Changed */
+ } else {
+ /* Use a different default for LE-only devices */
+ memset(events, 0, sizeof(events));
+ events[0] |= 0x10; /* Disconnection Complete */
+ events[0] |= 0x80; /* Encryption Change */
+ events[1] |= 0x08; /* Read Remote Version Information Complete */
+ events[1] |= 0x20; /* Command Complete */
+ events[1] |= 0x40; /* Command Status */
+ events[1] |= 0x80; /* Hardware Error */
+ events[2] |= 0x04; /* Number of Completed Packets */
+ events[3] |= 0x02; /* Data Buffer Overflow */
+ events[5] |= 0x80; /* Encryption Key Refresh Complete */
}
if (lmp_inq_rssi_capable(hdev))
@@ -513,7 +525,10 @@ static void hci_init2_req(struct hci_request *req, unsigned long opt)
hci_setup_event_mask(req);
- if (hdev->hci_ver > BLUETOOTH_VER_1_1)
+ /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
+ * local supported commands HCI command.
+ */
+ if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (lmp_ssp_capable(hdev)) {
@@ -605,7 +620,7 @@ static void hci_init3_req(struct hci_request *req, unsigned long opt)
* as supported send it. If not supported assume that the controller
* does not have actual support for stored link keys which makes this
* command redundant anyway.
- */
+ */
if (hdev->commands[6] & 0x80) {
struct hci_cp_delete_stored_link_key cp;
@@ -2165,10 +2180,6 @@ int hci_register_dev(struct hci_dev *hdev)
BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
- write_lock(&hci_dev_list_lock);
- list_add(&hdev->list, &hci_dev_list);
- write_unlock(&hci_dev_list_lock);
-
hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
WQ_MEM_RECLAIM, 1, hdev->name);
if (!hdev->workqueue) {
@@ -2203,6 +2214,10 @@ int hci_register_dev(struct hci_dev *hdev)
if (hdev->dev_type != HCI_AMP)
set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
+ write_lock(&hci_dev_list_lock);
+ list_add(&hdev->list, &hci_dev_list);
+ write_unlock(&hci_dev_list_lock);
+
hci_notify(hdev, HCI_DEV_REG);
hci_dev_hold(hdev);
@@ -2215,9 +2230,6 @@ err_wqueue:
destroy_workqueue(hdev->req_workqueue);
err:
ida_simple_remove(&hci_index_ida, hdev->id);
- write_lock(&hci_dev_list_lock);
- list_del(&hdev->list);
- write_unlock(&hci_dev_list_lock);
return error;
}
@@ -3399,8 +3411,16 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
*/
if (hdev->sent_cmd) {
req_complete = bt_cb(hdev->sent_cmd)->req.complete;
- if (req_complete)
+
+ if (req_complete) {
+ /* We must set the complete callback to NULL to
+ * avoid calling the callback more than once if
+ * this function gets called again.
+ */
+ bt_cb(hdev->sent_cmd)->req.complete = NULL;
+
goto call_complete;
+ }
}
/* Remove all pending commands belonging to this request */
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 0437200d92f..94aab73f89d 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -2904,15 +2904,16 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
hci_conn_add_sysfs(conn);
break;
+ case 0x0d: /* Connection Rejected due to Limited Resources */
case 0x11: /* Unsupported Feature or Parameter Value */
case 0x1c: /* SCO interval rejected */
case 0x1a: /* Unsupported Remote Feature */
case 0x1f: /* Unspecified error */
- if (conn->out && conn->attempt < 2) {
+ if (conn->out) {
conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
(hdev->esco_type & EDR_ESCO_MASK);
- hci_setup_sync(conn, conn->link->handle);
- goto unlock;
+ if (hci_setup_sync(conn, conn->link->handle))
+ goto unlock;
}
/* fall through */
@@ -3024,17 +3025,20 @@ unlock:
static u8 hci_get_auth_req(struct hci_conn *conn)
{
/* If remote requests dedicated bonding follow that lead */
- if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
+ if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
+ conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
/* If both remote and local IO capabilities allow MITM
* protection then require it, otherwise don't */
- if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
- return 0x02;
+ if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
+ conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
+ return HCI_AT_DEDICATED_BONDING;
else
- return 0x03;
+ return HCI_AT_DEDICATED_BONDING_MITM;
}
/* If remote requests no-bonding follow that lead */
- if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
+ if (conn->remote_auth == HCI_AT_NO_BONDING ||
+ conn->remote_auth == HCI_AT_NO_BONDING_MITM)
return conn->remote_auth | (conn->auth_type & 0x01);
return conn->auth_type;
@@ -3066,7 +3070,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
/* Change the IO capability from KeyboardDisplay
* to DisplayYesNo as it is not supported by BT spec. */
cp.capability = (conn->io_capability == 0x04) ?
- 0x01 : conn->io_capability;
+ HCI_IO_DISPLAY_YESNO : conn->io_capability;
conn->auth_type = hci_get_auth_req(conn);
cp.authentication = conn->auth_type;
@@ -3140,7 +3144,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
* request. The only exception is when we're dedicated bonding
* initiators (connect_cfm_cb set) since then we always have the MITM
* bit set. */
- if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
+ if (!conn->connect_cfm_cb && loc_mitm &&
+ conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
BT_DBG("Rejecting request: remote device can't provide MITM");
hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
sizeof(ev->bdaddr), &ev->bdaddr);
@@ -3148,8 +3153,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
}
/* If no side requires MITM protection; auto-accept */
- if ((!loc_mitm || conn->remote_cap == 0x03) &&
- (!rem_mitm || conn->io_capability == 0x03)) {
+ if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
+ (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
/* If we're not the initiators request authorization to
* proceed from user space (mgmt_user_confirm with
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index 7ad6ecf36f2..edf623a2904 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -590,7 +590,7 @@ int __init bt_sysfs_init(void)
bt_class = class_create(THIS_MODULE, "bluetooth");
- return PTR_RET(bt_class);
+ return PTR_ERR_OR_ZERO(bt_class);
}
void bt_sysfs_cleanup(void)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 0c699cdc369..bdc35a7a7fe 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -225,17 +225,47 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb)
static int hidp_send_report(struct hidp_session *session, struct hid_report *report)
{
- unsigned char buf[32], hdr;
- int rsize;
+ unsigned char hdr;
+ u8 *buf;
+ int rsize, ret;
- rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (rsize > sizeof(buf))
+ buf = hid_alloc_report_buf(report, GFP_ATOMIC);
+ if (!buf)
return -EIO;
hid_output_report(report, buf);
hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT;
- return hidp_send_intr_message(session, hdr, buf, rsize);
+ rsize = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ ret = hidp_send_intr_message(session, hdr, buf, rsize);
+
+ kfree(buf);
+ return ret;
+}
+
+static int hidp_hidinput_event(struct input_dev *dev, unsigned int type,
+ unsigned int code, int value)
+{
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct hidp_session *session = hid->driver_data;
+ struct hid_field *field;
+ int offset;
+
+ BT_DBG("session %p type %d code %d value %d",
+ session, type, code, value);
+
+ if (type != EV_LED)
+ return -1;
+
+ offset = hidinput_find_field(hid, type, code, &field);
+ if (offset == -1) {
+ hid_warn(dev, "event field not found\n");
+ return -1;
+ }
+
+ hid_set_field(field, offset, value);
+
+ return hidp_send_report(session, field->report);
}
static int hidp_get_raw_report(struct hid_device *hid,
@@ -678,20 +708,6 @@ static int hidp_parse(struct hid_device *hid)
static int hidp_start(struct hid_device *hid)
{
- struct hidp_session *session = hid->driver_data;
- struct hid_report *report;
-
- if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS)
- return 0;
-
- list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
- list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].
- report_list, list)
- hidp_send_report(session, report);
-
return 0;
}
@@ -711,6 +727,7 @@ static struct hid_ll_driver hidp_hid_driver = {
.stop = hidp_stop,
.open = hidp_open,
.close = hidp_close,
+ .hidinput_input_event = hidp_hidinput_event,
};
/* This function sets up the hid device. It does not add it
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 8c3499bec89..b3bb7bca8e6 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1415,8 +1415,9 @@ static void l2cap_conn_ready(struct l2cap_conn *conn)
sk->sk_state_change(sk);
release_sock(sk);
- } else if (chan->state == BT_CONNECT)
+ } else if (chan->state == BT_CONNECT) {
l2cap_do_start(chan);
+ }
l2cap_chan_unlock(chan);
}
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index b6e44ad6cca..6d126faf145 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -58,7 +58,6 @@ struct rfcomm_dev {
uint modem_status;
struct rfcomm_dlc *dlc;
- wait_queue_head_t wait;
struct device *tty_dev;
@@ -76,13 +75,6 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig);
/* ---- Device functions ---- */
-/*
- * The reason this isn't actually a race, as you no doubt have a little voice
- * screaming at you in your head, is that the refcount should never actually
- * reach zero unless the device has already been taken off the list, in
- * rfcomm_dev_del(). And if that's not true, we'll hit the BUG() in
- * rfcomm_dev_destruct() anyway.
- */
static void rfcomm_dev_destruct(struct tty_port *port)
{
struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
@@ -90,10 +82,9 @@ static void rfcomm_dev_destruct(struct tty_port *port)
BT_DBG("dev %p dlc %p", dev, dlc);
- /* Refcount should only hit zero when called from rfcomm_dev_del()
- which will have taken us off the list. Everything else are
- refcounting bugs. */
- BUG_ON(!list_empty(&dev->list));
+ spin_lock(&rfcomm_dev_lock);
+ list_del(&dev->list);
+ spin_unlock(&rfcomm_dev_lock);
rfcomm_dlc_lock(dlc);
/* Detach DLC if it's owned by this dev */
@@ -112,8 +103,39 @@ static void rfcomm_dev_destruct(struct tty_port *port)
module_put(THIS_MODULE);
}
+/* device-specific initialization: open the dlc */
+static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ return rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel);
+}
+
+/* we block the open until the dlc->state becomes BT_CONNECTED */
+static int rfcomm_dev_carrier_raised(struct tty_port *port)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ return (dev->dlc->state == BT_CONNECTED);
+}
+
+/* device-specific cleanup: close the dlc */
+static void rfcomm_dev_shutdown(struct tty_port *port)
+{
+ struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port);
+
+ if (dev->tty_dev->parent)
+ device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
+
+ /* close the dlc */
+ rfcomm_dlc_close(dev->dlc, 0);
+}
+
static const struct tty_port_operations rfcomm_port_ops = {
.destruct = rfcomm_dev_destruct,
+ .activate = rfcomm_dev_activate,
+ .shutdown = rfcomm_dev_shutdown,
+ .carrier_raised = rfcomm_dev_carrier_raised,
};
static struct rfcomm_dev *__rfcomm_dev_get(int id)
@@ -236,7 +258,6 @@ static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc)
tty_port_init(&dev->port);
dev->port.ops = &rfcomm_port_ops;
- init_waitqueue_head(&dev->wait);
skb_queue_head_init(&dev->pending);
@@ -282,7 +303,9 @@ out:
dev->id, NULL);
if (IS_ERR(dev->tty_dev)) {
err = PTR_ERR(dev->tty_dev);
+ spin_lock(&rfcomm_dev_lock);
list_del(&dev->list);
+ spin_unlock(&rfcomm_dev_lock);
goto free;
}
@@ -301,27 +324,6 @@ free:
return err;
}
-static void rfcomm_dev_del(struct rfcomm_dev *dev)
-{
- unsigned long flags;
- BT_DBG("dev %p", dev);
-
- BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
-
- spin_lock_irqsave(&dev->port.lock, flags);
- if (dev->port.count > 0) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- return;
- }
- spin_unlock_irqrestore(&dev->port.lock, flags);
-
- spin_lock(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
-
- tty_port_put(&dev->port);
-}
-
/* ---- Send buffer ---- */
static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
{
@@ -333,10 +335,9 @@ static inline unsigned int rfcomm_room(struct rfcomm_dlc *dlc)
static void rfcomm_wfree(struct sk_buff *skb)
{
struct rfcomm_dev *dev = (void *) skb->sk;
- struct tty_struct *tty = dev->port.tty;
atomic_sub(skb->truesize, &dev->wmem_alloc);
- if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags) && tty)
- tty_wakeup(tty);
+ if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags))
+ tty_port_tty_wakeup(&dev->port);
tty_port_put(&dev->port);
}
@@ -410,6 +411,7 @@ static int rfcomm_release_dev(void __user *arg)
{
struct rfcomm_dev_req req;
struct rfcomm_dev *dev;
+ struct tty_struct *tty;
if (copy_from_user(&req, arg, sizeof(req)))
return -EFAULT;
@@ -429,11 +431,15 @@ static int rfcomm_release_dev(void __user *arg)
rfcomm_dlc_close(dev->dlc, 0);
/* Shut down TTY synchronously before freeing rfcomm_dev */
- if (dev->port.tty)
- tty_vhangup(dev->port.tty);
+ tty = tty_port_tty_get(&dev->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
+
+ if (!test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
+ tty_port_put(&dev->port);
- if (!test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags))
- rfcomm_dev_del(dev);
tty_port_put(&dev->port);
return 0;
}
@@ -563,16 +569,21 @@ static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb)
static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
{
struct rfcomm_dev *dev = dlc->owner;
+ struct tty_struct *tty;
if (!dev)
return;
BT_DBG("dlc %p dev %p err %d", dlc, dev, err);
dev->err = err;
- wake_up_interruptible(&dev->wait);
+ if (dlc->state == BT_CONNECTED) {
+ device_move(dev->tty_dev, rfcomm_get_device(dev),
+ DPM_ORDER_DEV_AFTER_PARENT);
- if (dlc->state == BT_CLOSED) {
- if (!dev->port.tty) {
+ wake_up_interruptible(&dev->port.open_wait);
+ } else if (dlc->state == BT_CLOSED) {
+ tty = tty_port_tty_get(&dev->port);
+ if (!tty) {
if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
/* Drop DLC lock here to avoid deadlock
* 1. rfcomm_dev_get will take rfcomm_dev_lock
@@ -580,6 +591,9 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
* rfcomm_dev_lock -> dlc lock
* 2. tty_port_put will deadlock if it's
* the last reference
+ *
+ * FIXME: when we release the lock anything
+ * could happen to dev, even its destruction
*/
rfcomm_dlc_unlock(dlc);
if (rfcomm_dev_get(dev->id) == NULL) {
@@ -587,12 +601,17 @@ static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err)
return;
}
- rfcomm_dev_del(dev);
+ if (!test_and_set_bit(RFCOMM_TTY_RELEASED,
+ &dev->flags))
+ tty_port_put(&dev->port);
+
tty_port_put(&dev->port);
rfcomm_dlc_lock(dlc);
}
- } else
- tty_hangup(dev->port.tty);
+ } else {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
}
}
@@ -604,10 +623,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig);
- if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) {
- if (dev->port.tty && !C_CLOCAL(dev->port.tty))
- tty_hangup(dev->port.tty);
- }
+ if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV))
+ tty_port_tty_hangup(&dev->port, true);
dev->modem_status =
((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
@@ -638,124 +655,92 @@ static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev)
tty_flip_buffer_push(&dev->port);
}
-static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+/* do the reverse of install, clearing the tty fields and releasing the
+ * reference to tty_port
+ */
+static void rfcomm_tty_cleanup(struct tty_struct *tty)
{
- DECLARE_WAITQUEUE(wait, current);
- struct rfcomm_dev *dev;
- struct rfcomm_dlc *dlc;
- unsigned long flags;
- int err, id;
+ struct rfcomm_dev *dev = tty->driver_data;
- id = tty->index;
+ clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- BT_DBG("tty %p id %d", tty, id);
+ rfcomm_dlc_lock(dev->dlc);
+ tty->driver_data = NULL;
+ rfcomm_dlc_unlock(dev->dlc);
- /* We don't leak this refcount. For reasons which are not entirely
- clear, the TTY layer will call our ->close() method even if the
- open fails. We decrease the refcount there, and decreasing it
- here too would cause breakage. */
- dev = rfcomm_dev_get(id);
- if (!dev)
- return -ENODEV;
+ /*
+ * purge the dlc->tx_queue to avoid circular dependencies
+ * between dev and dlc
+ */
+ skb_queue_purge(&dev->dlc->tx_queue);
- BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
- dev->channel, dev->port.count);
+ tty_port_put(&dev->port);
+}
- spin_lock_irqsave(&dev->port.lock, flags);
- if (++dev->port.count > 1) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- return 0;
- }
- spin_unlock_irqrestore(&dev->port.lock, flags);
+/* we acquire the tty_port reference since it's here the tty is first used
+ * by setting the termios. We also populate the driver_data field and install
+ * the tty port
+ */
+static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct rfcomm_dev *dev;
+ struct rfcomm_dlc *dlc;
+ int err;
+
+ dev = rfcomm_dev_get(tty->index);
+ if (!dev)
+ return -ENODEV;
dlc = dev->dlc;
/* Attach TTY and open DLC */
-
rfcomm_dlc_lock(dlc);
tty->driver_data = dev;
- dev->port.tty = tty;
rfcomm_dlc_unlock(dlc);
set_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
- err = rfcomm_dlc_open(dlc, &dev->src, &dev->dst, dev->channel);
- if (err < 0)
- return err;
-
- /* Wait for DLC to connect */
- add_wait_queue(&dev->wait, &wait);
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ /* install the tty_port */
+ err = tty_port_install(&dev->port, driver, tty);
+ if (err)
+ rfcomm_tty_cleanup(tty);
- if (dlc->state == BT_CLOSED) {
- err = -dev->err;
- break;
- }
+ return err;
+}
- if (dlc->state == BT_CONNECTED)
- break;
+static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct rfcomm_dev *dev = tty->driver_data;
+ int err;
- if (signal_pending(current)) {
- err = -EINTR;
- break;
- }
+ BT_DBG("tty %p id %d", tty, tty->index);
- tty_unlock(tty);
- schedule();
- tty_lock(tty);
- }
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&dev->wait, &wait);
+ BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
+ dev->channel, dev->port.count);
- if (err == 0)
- device_move(dev->tty_dev, rfcomm_get_device(dev),
- DPM_ORDER_DEV_AFTER_PARENT);
+ err = tty_port_open(&dev->port, tty, filp);
+ if (err)
+ return err;
+ /*
+ * FIXME: rfcomm should use proper flow control for
+ * received data. This hack will be unnecessary and can
+ * be removed when that's implemented
+ */
rfcomm_tty_copy_pending(dev);
rfcomm_dlc_unthrottle(dev->dlc);
- return err;
+ return 0;
}
static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
- unsigned long flags;
-
- if (!dev)
- return;
BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
dev->port.count);
- spin_lock_irqsave(&dev->port.lock, flags);
- if (!--dev->port.count) {
- spin_unlock_irqrestore(&dev->port.lock, flags);
- if (dev->tty_dev->parent)
- device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
-
- /* Close DLC and dettach TTY */
- rfcomm_dlc_close(dev->dlc, 0);
-
- clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags);
-
- rfcomm_dlc_lock(dev->dlc);
- tty->driver_data = NULL;
- dev->port.tty = NULL;
- rfcomm_dlc_unlock(dev->dlc);
-
- if (test_bit(RFCOMM_TTY_RELEASED, &dev->flags)) {
- spin_lock(&rfcomm_dev_lock);
- list_del_init(&dev->list);
- spin_unlock(&rfcomm_dev_lock);
-
- tty_port_put(&dev->port);
- }
- } else
- spin_unlock_irqrestore(&dev->port.lock, flags);
-
- tty_port_put(&dev->port);
+ tty_port_close(&dev->port, tty, filp);
}
static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
@@ -1055,17 +1040,11 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
BT_DBG("tty %p dev %p", tty, dev);
- if (!dev)
- return;
-
- rfcomm_tty_flush_buffer(tty);
+ tty_port_hangup(&dev->port);
- if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) {
- if (rfcomm_dev_get(dev->id) == NULL)
- return;
- rfcomm_dev_del(dev);
+ if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags) &&
+ !test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags))
tty_port_put(&dev->port);
- }
}
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
@@ -1128,6 +1107,8 @@ static const struct tty_operations rfcomm_ops = {
.wait_until_sent = rfcomm_tty_wait_until_sent,
.tiocmget = rfcomm_tty_tiocmget,
.tiocmset = rfcomm_tty_tiocmset,
+ .install = rfcomm_tty_install,
+ .cleanup = rfcomm_tty_cleanup,
};
int __init rfcomm_init_ttys(void)
@@ -1146,7 +1127,7 @@ int __init rfcomm_init_ttys(void)
rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL;
rfcomm_tty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
rfcomm_tty_driver->init_termios = tty_std_termios;
- rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL;
rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON;
tty_set_operations(rfcomm_tty_driver, &rfcomm_ops);
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index e7bd4eea575..96bd388d93a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -176,8 +176,13 @@ static int sco_connect(struct sock *sk)
else
type = SCO_LINK;
- hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW,
- HCI_AT_NO_BONDING);
+ if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT &&
+ (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) {
+ err = -EOPNOTSUPP;
+ goto done;
+ }
+
+ hcon = hci_connect_sco(hdev, type, dst, sco_pi(sk)->setting);
if (IS_ERR(hcon)) {
err = PTR_ERR(hcon);
goto done;
@@ -417,6 +422,8 @@ static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, int pro
sk->sk_protocol = proto;
sk->sk_state = BT_OPEN;
+ sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
+
setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
bt_sock_link(&sco_sk_list, sk);
@@ -652,7 +659,7 @@ static int sco_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
return err;
}
-static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
+static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting)
{
struct hci_dev *hdev = conn->hdev;
@@ -664,11 +671,7 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
struct hci_cp_accept_conn_req cp;
bacpy(&cp.bdaddr, &conn->dst);
-
- if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
- cp.role = 0x00; /* Become master */
- else
- cp.role = 0x01; /* Remain slave */
+ cp.role = 0x00; /* Ignored */
hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
} else {
@@ -679,9 +682,21 @@ static void sco_conn_defer_accept(struct hci_conn *conn, int mask)
cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
- cp.max_latency = __constant_cpu_to_le16(0xffff);
- cp.content_format = cpu_to_le16(hdev->voice_setting);
- cp.retrans_effort = 0xff;
+ cp.content_format = cpu_to_le16(setting);
+
+ switch (setting & SCO_AIRMODE_MASK) {
+ case SCO_AIRMODE_TRANSP:
+ if (conn->pkt_type & ESCO_2EV3)
+ cp.max_latency = __constant_cpu_to_le16(0x0008);
+ else
+ cp.max_latency = __constant_cpu_to_le16(0x000D);
+ cp.retrans_effort = 0x02;
+ break;
+ case SCO_AIRMODE_CVSD:
+ cp.max_latency = __constant_cpu_to_le16(0xffff);
+ cp.retrans_effort = 0xff;
+ break;
+ }
hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
sizeof(cp), &cp);
@@ -698,7 +713,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
if (sk->sk_state == BT_CONNECT2 &&
test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {
- sco_conn_defer_accept(pi->conn->hcon, 0);
+ sco_conn_defer_accept(pi->conn->hcon, pi->setting);
sk->sk_state = BT_CONFIG;
msg->msg_namelen = 0;
@@ -714,7 +729,8 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,
static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
{
struct sock *sk = sock->sk;
- int err = 0;
+ int len, err = 0;
+ struct bt_voice voice;
u32 opt;
BT_DBG("sk %p", sk);
@@ -740,6 +756,31 @@ static int sco_sock_setsockopt(struct socket *sock, int level, int optname, char
clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags);
break;
+ case BT_VOICE:
+ if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND &&
+ sk->sk_state != BT_CONNECT2) {
+ err = -EINVAL;
+ break;
+ }
+
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, sizeof(voice), optlen);
+ if (copy_from_user((char *) &voice, optval, len)) {
+ err = -EFAULT;
+ break;
+ }
+
+ /* Explicitly check for these values */
+ if (voice.setting != BT_VOICE_TRANSPARENT &&
+ voice.setting != BT_VOICE_CVSD_16BIT) {
+ err = -EINVAL;
+ break;
+ }
+
+ sco_pi(sk)->setting = voice.setting;
+ break;
+
default:
err = -ENOPROTOOPT;
break;
@@ -765,7 +806,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
switch (optname) {
case SCO_OPTIONS:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -781,7 +824,9 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname, char __user
break;
case SCO_CONNINFO:
- if (sk->sk_state != BT_CONNECTED) {
+ if (sk->sk_state != BT_CONNECTED &&
+ !(sk->sk_state == BT_CONNECT2 &&
+ test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) {
err = -ENOTCONN;
break;
}
@@ -809,6 +854,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
{
struct sock *sk = sock->sk;
int len, err = 0;
+ struct bt_voice voice;
BT_DBG("sk %p", sk);
@@ -834,6 +880,15 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname, char
break;
+ case BT_VOICE:
+ voice.setting = sco_pi(sk)->setting;
+
+ len = min_t(unsigned int, len, sizeof(voice));
+ if (copy_to_user(optval, (char *)&voice, len))
+ err = -EFAULT;
+
+ break;
+
default:
err = -ENOPROTOOPT;
break;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 2ef66781fed..ca04163635d 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -70,7 +70,8 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
}
mdst = br_mdb_get(br, skb, vid);
- if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
+ if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
+ br_multicast_querier_exists(br, eth_hdr(skb)))
br_multicast_deliver(mdst, skb);
else
br_flood_deliver(br, skb, false);
@@ -244,22 +245,22 @@ fail:
int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
{
struct netpoll *np;
- int err = 0;
+ int err;
+
+ if (!p->br->dev->npinfo)
+ return 0;
np = kzalloc(sizeof(*p->np), gfp);
- err = -ENOMEM;
if (!np)
- goto out;
+ return -ENOMEM;
err = __netpoll_setup(np, p->dev, gfp);
if (err) {
kfree(np);
- goto out;
+ return err;
}
p->np = np;
-
-out:
return err;
}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 60aca9109a5..ffd5874f259 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -161,7 +161,7 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
if (!pv)
return;
- for_each_set_bit_from(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
fdb_delete(br, f);
@@ -730,7 +730,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
/* VID was specified, so use it. */
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, 0);
goto out;
}
@@ -739,7 +739,7 @@ int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
* specify a VLAN. To be nice, add/update entry for every
* vlan on this port.
*/
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err = __br_fdb_add(ndm, p, addr, nlh_flags, vid);
if (err)
goto out;
@@ -817,7 +817,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
err = __br_fdb_delete(p, addr, vid);
} else {
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID)) {
err = __br_fdb_delete(p, addr, 0);
goto out;
}
@@ -827,7 +827,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
* vlan on this port.
*/
err = -ENOENT;
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
err &= __br_fdb_delete(p, addr, vid);
}
}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 5623be6b9ec..c41d5fbb91d 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -363,7 +363,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (err)
goto err2;
- if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL))))
+ err = br_netpoll_enable(p, GFP_KERNEL);
+ if (err)
goto err3;
err = netdev_master_upper_dev_link(dev, br->dev);
@@ -382,6 +383,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
netdev_update_features(br->dev);
+ if (br->dev->needed_headroom < dev->needed_headroom)
+ br->dev->needed_headroom = dev->needed_headroom;
+
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1b8b8b824cd..a2fd37ec35f 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -101,7 +101,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
unicast = false;
} else if (is_multicast_ether_addr(dest)) {
mdst = br_mdb_get(br, skb, vid);
- if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
+ if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
+ br_multicast_querier_exists(br, eth_hdr(skb))) {
if ((mdst && mdst->mglist) ||
br_multicast_is_router(br))
skb2 = skb;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 0daae3ec235..85a09bb5ca5 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -9,6 +9,7 @@
#include <net/netlink.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
+#include <net/addrconf.h>
#endif
#include "br_private.h"
@@ -61,7 +62,8 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
for (i = 0; i < mdb->max; i++) {
struct net_bridge_mdb_entry *mp;
- struct net_bridge_port_group *p, **pp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_port_group __rcu **pp;
struct net_bridge_port *port;
hlist_for_each_entry_rcu(mp, &mdb->mhash[i], hlist[mdb->ver]) {
@@ -253,7 +255,7 @@ static bool is_valid_mdb_entry(struct br_mdb_entry *entry)
return false;
#if IS_ENABLED(CONFIG_IPV6)
} else if (entry->addr.proto == htons(ETH_P_IPV6)) {
- if (!ipv6_is_transient_multicast(&entry->addr.u.ip6))
+ if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6))
return false;
#endif
} else
@@ -414,16 +416,20 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
- if (timer_pending(&br->multicast_querier_timer))
- return -EBUSY;
-
ip.proto = entry->addr.proto;
- if (ip.proto == htons(ETH_P_IP))
+ if (ip.proto == htons(ETH_P_IP)) {
+ if (timer_pending(&br->ip4_querier.timer))
+ return -EBUSY;
+
ip.u.ip4 = entry->addr.u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- else
+ } else {
+ if (timer_pending(&br->ip6_querier.timer))
+ return -EBUSY;
+
ip.u.ip6 = entry->addr.u.ip6;
#endif
+ }
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 69af490cce4..d1c57863067 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -29,11 +29,13 @@
#include <net/ipv6.h>
#include <net/mld.h>
#include <net/ip6_checksum.h>
+#include <net/addrconf.h>
#endif
#include "br_private.h"
-static void br_multicast_start_querier(struct net_bridge *br);
+static void br_multicast_start_querier(struct net_bridge *br,
+ struct bridge_mcast_query *query);
unsigned int br_mdb_rehash_seq;
static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -619,6 +621,9 @@ rehash:
mp->br = br;
mp->addr = *group;
+ setup_timer(&mp->timer, br_multicast_group_expired,
+ (unsigned long)mp);
+
hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
mdb->size++;
@@ -720,7 +725,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
{
struct br_ip br_group;
- if (!ipv6_is_transient_multicast(group))
+ if (ipv6_addr_is_ll_all_nodes(group))
return 0;
br_group.u.ip6 = *group;
@@ -752,20 +757,35 @@ static void br_multicast_local_router_expired(unsigned long data)
{
}
-static void br_multicast_querier_expired(unsigned long data)
+static void br_multicast_querier_expired(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
- struct net_bridge *br = (void *)data;
-
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) || br->multicast_disabled)
goto out;
- br_multicast_start_querier(br);
+ br_multicast_start_querier(br, query);
out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_querier_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ br_multicast_querier_expired(br, &br->ip4_query);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_querier_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
+
+ br_multicast_querier_expired(br, &br->ip6_query);
+}
+#endif
+
static void __br_multicast_send_query(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *ip)
@@ -786,37 +806,45 @@ static void __br_multicast_send_query(struct net_bridge *br,
}
static void br_multicast_send_query(struct net_bridge *br,
- struct net_bridge_port *port, u32 sent)
+ struct net_bridge_port *port,
+ struct bridge_mcast_query *query)
{
unsigned long time;
struct br_ip br_group;
+ struct bridge_mcast_querier *querier = NULL;
if (!netif_running(br->dev) || br->multicast_disabled ||
- !br->multicast_querier ||
- timer_pending(&br->multicast_querier_timer))
+ !br->multicast_querier)
return;
memset(&br_group.u, 0, sizeof(br_group.u));
- br_group.proto = htons(ETH_P_IP);
- __br_multicast_send_query(br, port, &br_group);
-
+ if (port ? (query == &port->ip4_query) :
+ (query == &br->ip4_query)) {
+ querier = &br->ip4_querier;
+ br_group.proto = htons(ETH_P_IP);
#if IS_ENABLED(CONFIG_IPV6)
- br_group.proto = htons(ETH_P_IPV6);
- __br_multicast_send_query(br, port, &br_group);
+ } else {
+ querier = &br->ip6_querier;
+ br_group.proto = htons(ETH_P_IPV6);
#endif
+ }
+
+ if (!querier || timer_pending(&querier->timer))
+ return;
+
+ __br_multicast_send_query(br, port, &br_group);
time = jiffies;
- time += sent < br->multicast_startup_query_count ?
+ time += query->startup_sent < br->multicast_startup_query_count ?
br->multicast_startup_query_interval :
br->multicast_query_interval;
- mod_timer(port ? &port->multicast_query_timer :
- &br->multicast_query_timer, time);
+ mod_timer(&query->timer, time);
}
-static void br_multicast_port_query_expired(unsigned long data)
+static void br_multicast_port_query_expired(struct net_bridge_port *port,
+ struct bridge_mcast_query *query)
{
- struct net_bridge_port *port = (void *)data;
struct net_bridge *br = port->br;
spin_lock(&br->multicast_lock);
@@ -824,25 +852,43 @@ static void br_multicast_port_query_expired(unsigned long data)
port->state == BR_STATE_BLOCKING)
goto out;
- if (port->multicast_startup_queries_sent <
- br->multicast_startup_query_count)
- port->multicast_startup_queries_sent++;
+ if (query->startup_sent < br->multicast_startup_query_count)
+ query->startup_sent++;
- br_multicast_send_query(port->br, port,
- port->multicast_startup_queries_sent);
+ br_multicast_send_query(port->br, port, query);
out:
spin_unlock(&br->multicast_lock);
}
+static void br_ip4_multicast_port_query_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+
+ br_multicast_port_query_expired(port, &port->ip4_query);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_port_query_expired(unsigned long data)
+{
+ struct net_bridge_port *port = (void *)data;
+
+ br_multicast_port_query_expired(port, &port->ip6_query);
+}
+#endif
+
void br_multicast_add_port(struct net_bridge_port *port)
{
port->multicast_router = 1;
setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
(unsigned long)port);
- setup_timer(&port->multicast_query_timer,
- br_multicast_port_query_expired, (unsigned long)port);
+ setup_timer(&port->ip4_query.timer, br_ip4_multicast_port_query_expired,
+ (unsigned long)port);
+#if IS_ENABLED(CONFIG_IPV6)
+ setup_timer(&port->ip6_query.timer, br_ip6_multicast_port_query_expired,
+ (unsigned long)port);
+#endif
}
void br_multicast_del_port(struct net_bridge_port *port)
@@ -850,13 +896,13 @@ void br_multicast_del_port(struct net_bridge_port *port)
del_timer_sync(&port->multicast_router_timer);
}
-static void __br_multicast_enable_port(struct net_bridge_port *port)
+static void br_multicast_enable(struct bridge_mcast_query *query)
{
- port->multicast_startup_queries_sent = 0;
+ query->startup_sent = 0;
- if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
- del_timer(&port->multicast_query_timer))
- mod_timer(&port->multicast_query_timer, jiffies);
+ if (try_to_del_timer_sync(&query->timer) >= 0 ||
+ del_timer(&query->timer))
+ mod_timer(&query->timer, jiffies);
}
void br_multicast_enable_port(struct net_bridge_port *port)
@@ -867,7 +913,10 @@ void br_multicast_enable_port(struct net_bridge_port *port)
if (br->multicast_disabled || !netif_running(br->dev))
goto out;
- __br_multicast_enable_port(port);
+ br_multicast_enable(&port->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ br_multicast_enable(&port->ip6_query);
+#endif
out:
spin_unlock(&br->multicast_lock);
@@ -886,7 +935,10 @@ void br_multicast_disable_port(struct net_bridge_port *port)
if (!hlist_unhashed(&port->rlist))
hlist_del_init_rcu(&port->rlist);
del_timer(&port->multicast_router_timer);
- del_timer(&port->multicast_query_timer);
+ del_timer(&port->ip4_query.timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer(&port->ip6_query.timer);
+#endif
spin_unlock(&br->multicast_lock);
}
@@ -1011,6 +1063,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
}
#endif
+static void
+br_multicast_update_querier_timer(struct net_bridge *br,
+ struct bridge_mcast_querier *querier,
+ unsigned long max_delay)
+{
+ if (!timer_pending(&querier->timer))
+ querier->delay_time = jiffies + max_delay;
+
+ mod_timer(&querier->timer, jiffies + br->multicast_querier_interval);
+}
+
/*
* Add port to router_list
* list is maintained ordered by pointer value
@@ -1061,12 +1124,13 @@ timer:
static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
- int saddr)
+ struct bridge_mcast_querier *querier,
+ int saddr,
+ unsigned long max_delay)
{
if (saddr)
- mod_timer(&br->multicast_querier_timer,
- jiffies + br->multicast_querier_interval);
- else if (timer_pending(&br->multicast_querier_timer))
+ br_multicast_update_querier_timer(br, querier, max_delay);
+ else if (timer_pending(&querier->timer))
return;
br_multicast_mark_router(br, port);
@@ -1093,8 +1157,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, !!iph->saddr);
-
group = ih->group;
if (skb->len == sizeof(*ih)) {
@@ -1118,6 +1180,9 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
+ br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
+ max_delay);
+
if (!group)
goto out;
@@ -1126,7 +1191,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
@@ -1174,8 +1238,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
- br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
-
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1185,7 +1247,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
if (max_delay)
group = &mld->mld_mca;
- } else if (skb->len >= sizeof(*mld2q)) {
+ } else {
if (!pskb_may_pull(skb, sizeof(*mld2q))) {
err = -EINVAL;
goto out;
@@ -1193,9 +1255,13 @@ static int br_ip6_multicast_query(struct net_bridge *br,
mld2q = (struct mld2_query *)icmp6_hdr(skb);
if (!mld2q->mld2q_nsrcs)
group = &mld2q->mld2q_mca;
- max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(ntohs(mld2q->mld2q_mrc)) : 1;
+
+ max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
+ br_multicast_query_received(br, port, &br->ip6_querier,
+ !ipv6_addr_any(&ip6h->saddr), max_delay);
+
if (!group)
goto out;
@@ -1204,7 +1270,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
if (!mp)
goto out;
- setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp);
mod_timer(&mp->timer, now + br->multicast_membership_interval);
mp->timer_armed = true;
@@ -1232,7 +1297,9 @@ out:
static void br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
- struct br_ip *group)
+ struct br_ip *group,
+ struct bridge_mcast_querier *querier,
+ struct bridge_mcast_query *query)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -1243,7 +1310,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
spin_lock(&br->multicast_lock);
if (!netif_running(br->dev) ||
(port && port->state == BR_STATE_DISABLED) ||
- timer_pending(&br->multicast_querier_timer))
+ timer_pending(&querier->timer))
goto out;
mdb = mlock_dereference(br->mdb, br);
@@ -1251,14 +1318,13 @@ static void br_multicast_leave_group(struct net_bridge *br,
if (!mp)
goto out;
- if (br->multicast_querier &&
- !timer_pending(&br->multicast_querier_timer)) {
+ if (br->multicast_querier) {
__br_multicast_send_query(br, port, &mp->addr);
time = jiffies + br->multicast_last_member_count *
br->multicast_last_member_interval;
- mod_timer(port ? &port->multicast_query_timer :
- &br->multicast_query_timer, time);
+
+ mod_timer(&query->timer, time);
for (p = mlock_dereference(mp->ports, br);
p != NULL;
@@ -1311,7 +1377,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
mod_timer(&mp->timer, time);
}
}
-
out:
spin_unlock(&br->multicast_lock);
}
@@ -1322,6 +1387,8 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
+ struct bridge_mcast_query *query = port ? &port->ip4_query :
+ &br->ip4_query;
if (ipv4_is_local_multicast(group))
return;
@@ -1330,7 +1397,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group);
+ br_multicast_leave_group(br, port, &br_group, &br->ip4_querier, query);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -1340,15 +1407,18 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
__u16 vid)
{
struct br_ip br_group;
+ struct bridge_mcast_query *query = port ? &port->ip6_query :
+ &br->ip6_query;
+
- if (!ipv6_is_transient_multicast(group))
+ if (ipv6_addr_is_ll_all_nodes(group))
return;
br_group.u.ip6 = *group;
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
- br_multicast_leave_group(br, port, &br_group);
+ br_multicast_leave_group(br, port, &br_group, &br->ip6_querier, query);
}
#endif
@@ -1478,8 +1548,14 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
* - MLD has always Router Alert hop-by-hop option
* - But we do not support jumbrograms.
*/
- if (ip6h->version != 6 ||
- ip6h->nexthdr != IPPROTO_HOPOPTS ||
+ if (ip6h->version != 6)
+ return 0;
+
+ /* Prevent flooding this packet if there is no listener present */
+ if (!ipv6_addr_is_ll_all_nodes(&ip6h->daddr))
+ BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
+
+ if (ip6h->nexthdr != IPPROTO_HOPOPTS ||
ip6h->payload_len == 0)
return 0;
@@ -1610,19 +1686,32 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
return 0;
}
-static void br_multicast_query_expired(unsigned long data)
+static void br_multicast_query_expired(struct net_bridge *br,
+ struct bridge_mcast_query *query)
+{
+ spin_lock(&br->multicast_lock);
+ if (query->startup_sent < br->multicast_startup_query_count)
+ query->startup_sent++;
+
+ br_multicast_send_query(br, NULL, query);
+ spin_unlock(&br->multicast_lock);
+}
+
+static void br_ip4_multicast_query_expired(unsigned long data)
{
struct net_bridge *br = (void *)data;
- spin_lock(&br->multicast_lock);
- if (br->multicast_startup_queries_sent <
- br->multicast_startup_query_count)
- br->multicast_startup_queries_sent++;
+ br_multicast_query_expired(br, &br->ip4_query);
+}
- br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
+#if IS_ENABLED(CONFIG_IPV6)
+static void br_ip6_multicast_query_expired(unsigned long data)
+{
+ struct net_bridge *br = (void *)data;
- spin_unlock(&br->multicast_lock);
+ br_multicast_query_expired(br, &br->ip6_query);
}
+#endif
void br_multicast_init(struct net_bridge *br)
{
@@ -1642,23 +1731,43 @@ void br_multicast_init(struct net_bridge *br)
br->multicast_querier_interval = 255 * HZ;
br->multicast_membership_interval = 260 * HZ;
+ br->ip4_querier.delay_time = 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ br->ip6_querier.delay_time = 0;
+#endif
+
spin_lock_init(&br->multicast_lock);
setup_timer(&br->multicast_router_timer,
br_multicast_local_router_expired, 0);
- setup_timer(&br->multicast_querier_timer,
- br_multicast_querier_expired, (unsigned long)br);
- setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
+ setup_timer(&br->ip4_querier.timer, br_ip4_multicast_querier_expired,
+ (unsigned long)br);
+ setup_timer(&br->ip4_query.timer, br_ip4_multicast_query_expired,
(unsigned long)br);
+#if IS_ENABLED(CONFIG_IPV6)
+ setup_timer(&br->ip6_querier.timer, br_ip6_multicast_querier_expired,
+ (unsigned long)br);
+ setup_timer(&br->ip6_query.timer, br_ip6_multicast_query_expired,
+ (unsigned long)br);
+#endif
}
-void br_multicast_open(struct net_bridge *br)
+static void __br_multicast_open(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
- br->multicast_startup_queries_sent = 0;
+ query->startup_sent = 0;
if (br->multicast_disabled)
return;
- mod_timer(&br->multicast_query_timer, jiffies);
+ mod_timer(&query->timer, jiffies);
+}
+
+void br_multicast_open(struct net_bridge *br)
+{
+ __br_multicast_open(br, &br->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ __br_multicast_open(br, &br->ip6_query);
+#endif
}
void br_multicast_stop(struct net_bridge *br)
@@ -1670,8 +1779,12 @@ void br_multicast_stop(struct net_bridge *br)
int i;
del_timer_sync(&br->multicast_router_timer);
- del_timer_sync(&br->multicast_querier_timer);
- del_timer_sync(&br->multicast_query_timer);
+ del_timer_sync(&br->ip4_querier.timer);
+ del_timer_sync(&br->ip4_query.timer);
+#if IS_ENABLED(CONFIG_IPV6)
+ del_timer_sync(&br->ip6_querier.timer);
+ del_timer_sync(&br->ip6_query.timer);
+#endif
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -1774,18 +1887,24 @@ unlock:
return err;
}
-static void br_multicast_start_querier(struct net_bridge *br)
+static void br_multicast_start_querier(struct net_bridge *br,
+ struct bridge_mcast_query *query)
{
struct net_bridge_port *port;
- br_multicast_open(br);
+ __br_multicast_open(br, query);
list_for_each_entry(port, &br->port_list, list) {
if (port->state == BR_STATE_DISABLED ||
port->state == BR_STATE_BLOCKING)
continue;
- __br_multicast_enable_port(port);
+ if (query == &br->ip4_query)
+ br_multicast_enable(&port->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ br_multicast_enable(&port->ip6_query);
+#endif
}
}
@@ -1820,7 +1939,10 @@ rollback:
goto rollback;
}
- br_multicast_start_querier(br);
+ br_multicast_start_querier(br, &br->ip4_query);
+#if IS_ENABLED(CONFIG_IPV6)
+ br_multicast_start_querier(br, &br->ip6_query);
+#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
@@ -1830,6 +1952,8 @@ unlock:
int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
{
+ unsigned long max_delay;
+
val = !!val;
spin_lock_bh(&br->multicast_lock);
@@ -1837,8 +1961,22 @@ int br_multicast_set_querier(struct net_bridge *br, unsigned long val)
goto unlock;
br->multicast_querier = val;
- if (val)
- br_multicast_start_querier(br);
+ if (!val)
+ goto unlock;
+
+ max_delay = br->multicast_query_response_interval;
+
+ if (!timer_pending(&br->ip4_querier.timer))
+ br->ip4_querier.delay_time = jiffies + max_delay;
+
+ br_multicast_start_querier(br, &br->ip4_query);
+
+#if IS_ENABLED(CONFIG_IPV6)
+ if (!timer_pending(&br->ip6_querier.timer))
+ br->ip6_querier.delay_time = jiffies + max_delay;
+
+ br_multicast_start_querier(br, &br->ip6_query);
+#endif
unlock:
spin_unlock_bh(&br->multicast_lock);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 1fc30abd3a5..b9259efa636 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -132,7 +132,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
else
pv = br_get_vlan_info(br);
- if (!pv || bitmap_empty(pv->vlan_bitmap, BR_VLAN_BITMAP_LEN))
+ if (!pv || bitmap_empty(pv->vlan_bitmap, VLAN_N_VID))
goto done;
af = nla_nest_start(skb, IFLA_AF_SPEC);
@@ -140,7 +140,7 @@ static int br_fill_ifinfo(struct sk_buff *skb,
goto nla_put_failure;
pvid = br_get_pvid(pv);
- for_each_set_bit(vid, pv->vlan_bitmap, BR_VLAN_BITMAP_LEN) {
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID) {
vinfo.vid = vid;
vinfo.flags = 0;
if (vid == pvid)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 3a3f371b284..2998dd1769a 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
+
+ case NETDEV_RESEND_IGMP:
+ /* Propagate to master device */
+ call_netdevice_notifiers(event, br->dev);
+ break;
}
/* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3be89b3ce17..598cb0b333c 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -66,6 +66,20 @@ struct br_ip
__u16 vid;
};
+#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+/* our own querier */
+struct bridge_mcast_query {
+ struct timer_list timer;
+ u32 startup_sent;
+};
+
+/* other querier */
+struct bridge_mcast_querier {
+ struct timer_list timer;
+ unsigned long delay_time;
+};
+#endif
+
struct net_port_vlans {
u16 port_idx;
u16 pvid;
@@ -162,10 +176,12 @@ struct net_bridge_port
#define BR_FLOOD 0x00000040
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
- u32 multicast_startup_queries_sent;
+ struct bridge_mcast_query ip4_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct bridge_mcast_query ip6_query;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
unsigned char multicast_router;
struct timer_list multicast_router_timer;
- struct timer_list multicast_query_timer;
struct hlist_head mglist;
struct hlist_node rlist;
#endif
@@ -258,7 +274,6 @@ struct net_bridge
u32 hash_max;
u32 multicast_last_member_count;
- u32 multicast_startup_queries_sent;
u32 multicast_startup_query_count;
unsigned long multicast_last_member_interval;
@@ -273,8 +288,12 @@ struct net_bridge
struct hlist_head router_list;
struct timer_list multicast_router_timer;
- struct timer_list multicast_querier_timer;
- struct timer_list multicast_query_timer;
+ struct bridge_mcast_querier ip4_querier;
+ struct bridge_mcast_query ip4_query;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct bridge_mcast_querier ip6_querier;
+ struct bridge_mcast_query ip6_query;
+#endif /* IS_ENABLED(CONFIG_IPV6) */
#endif
struct timer_list hello_timer;
@@ -333,11 +352,6 @@ extern void br_dev_delete(struct net_device *dev, struct list_head *list);
extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
struct net_device *dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return br->dev->npinfo;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -350,11 +364,6 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp);
extern void br_netpoll_disable(struct net_bridge_port *p);
#else
-static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br)
-{
- return NULL;
-}
-
static inline void br_netpoll_send_skb(const struct net_bridge_port *p,
struct sk_buff *skb)
{
@@ -475,7 +484,7 @@ extern void br_multicast_free_pg(struct rcu_head *head);
extern struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
- struct net_bridge_port_group *next,
+ struct net_bridge_port_group __rcu *next,
unsigned char state);
extern void br_mdb_init(void);
extern void br_mdb_uninit(void);
@@ -485,22 +494,35 @@ extern void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
#define mlock_dereference(X, br) \
rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
-#if IS_ENABLED(CONFIG_IPV6)
-#include <net/addrconf.h>
-static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
-{
- if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
- return 1;
- return 0;
-}
-#endif
-
static inline bool br_multicast_is_router(struct net_bridge *br)
{
return br->multicast_router == 2 ||
(br->multicast_router == 1 &&
timer_pending(&br->multicast_router_timer));
}
+
+static inline bool
+__br_multicast_querier_exists(struct net_bridge *br,
+ struct bridge_mcast_querier *querier)
+{
+ return time_is_before_jiffies(querier->delay_time) &&
+ (br->multicast_querier || timer_pending(&querier->timer));
+}
+
+static inline bool br_multicast_querier_exists(struct net_bridge *br,
+ struct ethhdr *eth)
+{
+ switch (eth->h_proto) {
+ case (htons(ETH_P_IP)):
+ return __br_multicast_querier_exists(br, &br->ip4_querier);
+#if IS_ENABLED(CONFIG_IPV6)
+ case (htons(ETH_P_IPV6)):
+ return __br_multicast_querier_exists(br, &br->ip6_querier);
+#endif
+ default:
+ return false;
+ }
+}
#else
static inline int br_multicast_rcv(struct net_bridge *br,
struct net_bridge_port *port,
@@ -557,6 +579,11 @@ static inline bool br_multicast_is_router(struct net_bridge *br)
{
return 0;
}
+static inline bool br_multicast_querier_exists(struct net_bridge *br,
+ struct ethhdr *eth)
+{
+ return false;
+}
static inline void br_mdb_init(void)
{
}
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 394bb96b608..3b9637fb793 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -1,5 +1,5 @@
/*
- * Sysfs attributes of bridge ports
+ * Sysfs attributes of bridge
* Linux ethernet bridge
*
* Authors:
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index bd58b45f5f9..9a9ffe7e401 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -108,7 +108,7 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
clear_bit(vid, v->vlan_bitmap);
v->num_vlans--;
- if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
+ if (bitmap_empty(v->vlan_bitmap, VLAN_N_VID)) {
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
@@ -122,7 +122,7 @@ static void __vlan_flush(struct net_port_vlans *v)
{
smp_wmb();
v->pvid = 0;
- bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
+ bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
if (v->port_idx)
rcu_assign_pointer(v->parent.port->vlan_info, NULL);
else
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index 70f656ce0f4..dbd1c783431 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -64,7 +64,7 @@ static int ebt_broute(struct sk_buff *skb)
static int __net_init broute_net_init(struct net *net)
{
net->xt.broute_table = ebt_register_table(net, &broute_table);
- return PTR_RET(net->xt.broute_table);
+ return PTR_ERR_OR_ZERO(net->xt.broute_table);
}
static void __net_exit broute_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 3c2e9dced9e..94b2b700cff 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -100,7 +100,7 @@ static struct nf_hook_ops ebt_ops_filter[] __read_mostly = {
static int __net_init frame_filter_net_init(struct net *net)
{
net->xt.frame_filter = ebt_register_table(net, &frame_filter);
- return PTR_RET(net->xt.frame_filter);
+ return PTR_ERR_OR_ZERO(net->xt.frame_filter);
}
static void __net_exit frame_filter_net_exit(struct net *net)
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 10871bc7790..322555acdd4 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -100,7 +100,7 @@ static struct nf_hook_ops ebt_ops_nat[] __read_mostly = {
static int __net_init frame_nat_net_init(struct net *net)
{
net->xt.frame_nat = ebt_register_table(net, &frame_nat);
- return PTR_RET(net->xt.frame_nat);
+ return PTR_ERR_OR_ZERO(net->xt.frame_nat);
}
static void __net_exit frame_nat_net_exit(struct net *net)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 2bd4b58f437..0f455227da8 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -293,9 +293,10 @@ int cfctrl_linkup_request(struct cflayer *layer,
count = cfctrl_cancel_req(&cfctrl->serv.layer,
user_layer);
- if (count != 1)
+ if (count != 1) {
pr_err("Could not remove request (%d)", count);
return -ENODEV;
+ }
}
return 0;
}
diff --git a/net/can/gw.c b/net/can/gw.c
index 2f291f961a1..3f9b0f3a281 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -146,6 +146,7 @@ struct cgw_job {
/* tbc */
};
u8 gwtype;
+ u8 limit_hops;
u16 flags;
};
@@ -402,6 +403,11 @@ static void can_can_gw_rcv(struct sk_buff *skb, void *data)
/* put the incremented hop counter in the cloned skb */
cgw_hops(nskb) = cgw_hops(skb) + 1;
+
+ /* first processing of this CAN frame -> adjust to private hop limit */
+ if (gwj->limit_hops && cgw_hops(nskb) == 1)
+ cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
+
nskb->dev = gwj->dst.dev;
/* pointer to modifiable CAN frame */
@@ -509,6 +515,11 @@ static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
/* check non default settings of attributes */
+ if (gwj->limit_hops) {
+ if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
+ goto cancel;
+ }
+
if (gwj->mod.modtype.and) {
memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.and;
@@ -606,11 +617,12 @@ static const struct nla_policy cgw_policy[CGW_MAX+1] = {
[CGW_SRC_IF] = { .type = NLA_U32 },
[CGW_DST_IF] = { .type = NLA_U32 },
[CGW_FILTER] = { .len = sizeof(struct can_filter) },
+ [CGW_LIM_HOPS] = { .type = NLA_U8 },
};
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
- u8 gwtype, void *gwtypeattr)
+ u8 gwtype, void *gwtypeattr, u8 *limhops)
{
struct nlattr *tb[CGW_MAX+1];
struct cgw_frame_mod mb;
@@ -625,6 +637,13 @@ static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
if (err < 0)
return err;
+ if (tb[CGW_LIM_HOPS]) {
+ *limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
+
+ if (*limhops < 1 || *limhops > max_hops)
+ return -EINVAL;
+ }
+
/* check for AND/OR/XOR/SET modifications */
if (tb[CGW_MOD_AND]) {
@@ -782,6 +801,7 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct rtcanmsg *r;
struct cgw_job *gwj;
+ u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
@@ -808,7 +828,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
- err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
+ err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw,
+ &limhops);
if (err < 0)
goto out;
@@ -836,6 +857,8 @@ static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
goto put_src_dst_out;
+ gwj->limit_hops = limhops;
+
ASSERT_RTNL();
err = cgw_register_filter(gwj);
@@ -867,13 +890,14 @@ static void cgw_remove_all_jobs(void)
}
}
-static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
+static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct cgw_job *gwj = NULL;
struct hlist_node *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
+ u8 limhops = 0;
int err = 0;
if (!capable(CAP_NET_ADMIN))
@@ -890,7 +914,7 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
- err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
+ err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
if (err < 0)
return err;
@@ -910,6 +934,9 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
if (gwj->flags != r->flags)
continue;
+ if (gwj->limit_hops != limhops)
+ continue;
+
if (memcmp(&gwj->mod, &mod, sizeof(mod)))
continue;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index eb0a46a49bd..3be308e1430 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -409,7 +409,7 @@ static void ceph_sock_write_space(struct sock *sk)
* and net/core/stream.c:sk_stream_write_space().
*/
if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
dout("%s %p queueing write work\n", __func__, con);
clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
queue_con(con);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 8ab48cd8955..af814e76420 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -48,6 +48,7 @@
#include <linux/highmem.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
+#include <linux/pagemap.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
@@ -573,6 +574,77 @@ fault:
}
EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+/**
+ * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
+ * @skb: buffer to copy
+ * @from: io vector to copy to
+ * @offset: offset in the io vector to start copying from
+ * @count: amount of vectors to copy to buffer from
+ *
+ * The function will first copy up to headlen, and then pin the userspace
+ * pages and build frags through them.
+ *
+ * Returns 0, -EFAULT or -EMSGSIZE.
+ * Note: the iovec is not modified during the copy
+ */
+int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
+ int offset, size_t count)
+{
+ int len = iov_length(from, count) - offset;
+ int copy = min_t(int, skb_headlen(skb), len);
+ int size;
+ int i = 0;
+
+ /* copy up to skb headlen */
+ if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
+ return -EFAULT;
+
+ if (len == copy)
+ return 0;
+
+ offset += copy;
+ while (count--) {
+ struct page *page[MAX_SKB_FRAGS];
+ int num_pages;
+ unsigned long base;
+ unsigned long truesize;
+
+ /* Skip over from offset and copied */
+ if (offset >= from->iov_len) {
+ offset -= from->iov_len;
+ ++from;
+ continue;
+ }
+ len = from->iov_len - offset;
+ base = (unsigned long)from->iov_base + offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ if (i + size > MAX_SKB_FRAGS)
+ return -EMSGSIZE;
+ num_pages = get_user_pages_fast(base, size, 0, &page[i]);
+ if (num_pages != size) {
+ release_pages(&page[i], num_pages, 0);
+ return -EFAULT;
+ }
+ truesize = size * PAGE_SIZE;
+ skb->data_len += len;
+ skb->len += len;
+ skb->truesize += truesize;
+ atomic_add(truesize, &skb->sk->sk_wmem_alloc);
+ while (len) {
+ int off = base & ~PAGE_MASK;
+ int size = min_t(int, len, PAGE_SIZE - off);
+ skb_fill_page_desc(skb, i, page[i], off, size);
+ base += size;
+ len -= size;
+ i++;
+ }
+ offset = 0;
+ ++from;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(zerocopy_sg_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 26755dd40da..5c713f2239c 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -174,7 +174,7 @@ static DEFINE_SPINLOCK(napi_hash_lock);
static unsigned int napi_gen_id;
static DEFINE_HASHTABLE(napi_hash, 8);
-seqcount_t devnet_rename_seq;
+static seqcount_t devnet_rename_seq;
static inline void dev_base_seq_inc(struct net *net)
{
@@ -1691,13 +1691,13 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
kfree_skb(skb);
return NET_RX_DROP;
}
- skb_scrub_packet(skb);
skb->protocol = eth_type_trans(skb, dev);
/* eth_type_trans() can set pkt_type.
- * clear pkt_type _after_ calling eth_type_trans()
+ * call skb_scrub_packet() after it to clear pkt_type _after_ calling
+ * eth_type_trans().
*/
- skb->pkt_type = PACKET_HOST;
+ skb_scrub_packet(skb, true);
return netif_rx(skb);
}
@@ -4367,57 +4367,48 @@ softnet_break:
goto out;
}
-struct netdev_upper {
+struct netdev_adjacent {
struct net_device *dev;
+
+ /* upper master flag, there can only be one master device per list */
bool master;
+
+ /* indicates that this dev is our first-level lower/upper device */
+ bool neighbour;
+
+ /* counter for the number of times this device was added to us */
+ u16 ref_nr;
+
struct list_head list;
struct rcu_head rcu;
- struct list_head search_list;
};
-static void __append_search_uppers(struct list_head *search_list,
- struct net_device *dev)
+static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
+ struct net_device *adj_dev,
+ bool upper)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *adj;
+ struct list_head *dev_list;
- list_for_each_entry(upper, &dev->upper_dev_list, list) {
- /* check if this upper is not already in search list */
- if (list_empty(&upper->search_list))
- list_add_tail(&upper->search_list, search_list);
+ dev_list = upper ? &dev->upper_dev_list : &dev->lower_dev_list;
+
+ list_for_each_entry(adj, dev_list, list) {
+ if (adj->dev == adj_dev)
+ return adj;
}
+ return NULL;
}
-static bool __netdev_search_upper_dev(struct net_device *dev,
- struct net_device *upper_dev)
+static inline struct netdev_adjacent *__netdev_find_upper(struct net_device *dev,
+ struct net_device *udev)
{
- LIST_HEAD(search_list);
- struct netdev_upper *upper;
- struct netdev_upper *tmp;
- bool ret = false;
-
- __append_search_uppers(&search_list, dev);
- list_for_each_entry(upper, &search_list, search_list) {
- if (upper->dev == upper_dev) {
- ret = true;
- break;
- }
- __append_search_uppers(&search_list, upper->dev);
- }
- list_for_each_entry_safe(upper, tmp, &search_list, search_list)
- INIT_LIST_HEAD(&upper->search_list);
- return ret;
+ return __netdev_find_adj(dev, udev, true);
}
-static struct netdev_upper *__netdev_find_upper(struct net_device *dev,
- struct net_device *upper_dev)
+static inline struct netdev_adjacent *__netdev_find_lower(struct net_device *dev,
+ struct net_device *ldev)
{
- struct netdev_upper *upper;
-
- list_for_each_entry(upper, &dev->upper_dev_list, list) {
- if (upper->dev == upper_dev)
- return upper;
- }
- return NULL;
+ return __netdev_find_adj(dev, ldev, false);
}
/**
@@ -4462,7 +4453,7 @@ EXPORT_SYMBOL(netdev_has_any_upper_dev);
*/
struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *upper;
ASSERT_RTNL();
@@ -4470,13 +4461,38 @@ struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
return NULL;
upper = list_first_entry(&dev->upper_dev_list,
- struct netdev_upper, list);
+ struct netdev_adjacent, list);
if (likely(upper->master))
return upper->dev;
return NULL;
}
EXPORT_SYMBOL(netdev_master_upper_dev_get);
+/* netdev_upper_get_next_dev_rcu - Get the next dev from upper list
+ * @dev: device
+ * @iter: list_head ** of the current position
+ *
+ * Gets the next device from the dev's upper list, starting from iter
+ * position. The caller must hold RCU read lock.
+ */
+struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
+{
+ struct netdev_adjacent *upper;
+
+ WARN_ON_ONCE(!rcu_read_lock_held());
+
+ upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
+
+ if (&upper->list == &dev->upper_dev_list)
+ return NULL;
+
+ *iter = &upper->list;
+
+ return upper->dev;
+}
+EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
+
/**
* netdev_master_upper_dev_get_rcu - Get master upper device
* @dev: device
@@ -4486,20 +4502,158 @@ EXPORT_SYMBOL(netdev_master_upper_dev_get);
*/
struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *upper;
upper = list_first_or_null_rcu(&dev->upper_dev_list,
- struct netdev_upper, list);
+ struct netdev_adjacent, list);
if (upper && likely(upper->master))
return upper->dev;
return NULL;
}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
+static int __netdev_adjacent_dev_insert(struct net_device *dev,
+ struct net_device *adj_dev,
+ bool neighbour, bool master,
+ bool upper)
+{
+ struct netdev_adjacent *adj;
+
+ adj = __netdev_find_adj(dev, adj_dev, upper);
+
+ if (adj) {
+ BUG_ON(neighbour);
+ adj->ref_nr++;
+ return 0;
+ }
+
+ adj = kmalloc(sizeof(*adj), GFP_KERNEL);
+ if (!adj)
+ return -ENOMEM;
+
+ adj->dev = adj_dev;
+ adj->master = master;
+ adj->neighbour = neighbour;
+ adj->ref_nr = 1;
+
+ dev_hold(adj_dev);
+ pr_debug("dev_hold for %s, because of %s link added from %s to %s\n",
+ adj_dev->name, upper ? "upper" : "lower", dev->name,
+ adj_dev->name);
+
+ if (!upper) {
+ list_add_tail_rcu(&adj->list, &dev->lower_dev_list);
+ return 0;
+ }
+
+ /* Ensure that master upper link is always the first item in list. */
+ if (master)
+ list_add_rcu(&adj->list, &dev->upper_dev_list);
+ else
+ list_add_tail_rcu(&adj->list, &dev->upper_dev_list);
+
+ return 0;
+}
+
+static inline int __netdev_upper_dev_insert(struct net_device *dev,
+ struct net_device *udev,
+ bool master, bool neighbour)
+{
+ return __netdev_adjacent_dev_insert(dev, udev, neighbour, master,
+ true);
+}
+
+static inline int __netdev_lower_dev_insert(struct net_device *dev,
+ struct net_device *ldev,
+ bool neighbour)
+{
+ return __netdev_adjacent_dev_insert(dev, ldev, neighbour, false,
+ false);
+}
+
+void __netdev_adjacent_dev_remove(struct net_device *dev,
+ struct net_device *adj_dev, bool upper)
+{
+ struct netdev_adjacent *adj;
+
+ if (upper)
+ adj = __netdev_find_upper(dev, adj_dev);
+ else
+ adj = __netdev_find_lower(dev, adj_dev);
+
+ if (!adj)
+ BUG();
+
+ if (adj->ref_nr > 1) {
+ adj->ref_nr--;
+ return;
+ }
+
+ list_del_rcu(&adj->list);
+ pr_debug("dev_put for %s, because of %s link removed from %s to %s\n",
+ adj_dev->name, upper ? "upper" : "lower", dev->name,
+ adj_dev->name);
+ dev_put(adj_dev);
+ kfree_rcu(adj, rcu);
+}
+
+static inline void __netdev_upper_dev_remove(struct net_device *dev,
+ struct net_device *udev)
+{
+ return __netdev_adjacent_dev_remove(dev, udev, true);
+}
+
+static inline void __netdev_lower_dev_remove(struct net_device *dev,
+ struct net_device *ldev)
+{
+ return __netdev_adjacent_dev_remove(dev, ldev, false);
+}
+
+int __netdev_adjacent_dev_insert_link(struct net_device *dev,
+ struct net_device *upper_dev,
+ bool master, bool neighbour)
+{
+ int ret;
+
+ ret = __netdev_upper_dev_insert(dev, upper_dev, master, neighbour);
+ if (ret)
+ return ret;
+
+ ret = __netdev_lower_dev_insert(upper_dev, dev, neighbour);
+ if (ret) {
+ __netdev_upper_dev_remove(dev, upper_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static inline int __netdev_adjacent_dev_link(struct net_device *dev,
+ struct net_device *udev)
+{
+ return __netdev_adjacent_dev_insert_link(dev, udev, false, false);
+}
+
+static inline int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
+ struct net_device *udev,
+ bool master)
+{
+ return __netdev_adjacent_dev_insert_link(dev, udev, master, true);
+}
+
+void __netdev_adjacent_dev_unlink(struct net_device *dev,
+ struct net_device *upper_dev)
+{
+ __netdev_upper_dev_remove(dev, upper_dev);
+ __netdev_lower_dev_remove(upper_dev, dev);
+}
+
+
static int __netdev_upper_dev_link(struct net_device *dev,
struct net_device *upper_dev, bool master)
{
- struct netdev_upper *upper;
+ struct netdev_adjacent *i, *j, *to_i, *to_j;
+ int ret = 0;
ASSERT_RTNL();
@@ -4507,7 +4661,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
return -EBUSY;
/* To prevent loops, check if dev is not upper device to upper_dev. */
- if (__netdev_search_upper_dev(upper_dev, dev))
+ if (__netdev_find_upper(upper_dev, dev))
return -EBUSY;
if (__netdev_find_upper(dev, upper_dev))
@@ -4516,22 +4670,76 @@ static int __netdev_upper_dev_link(struct net_device *dev,
if (master && netdev_master_upper_dev_get(dev))
return -EBUSY;
- upper = kmalloc(sizeof(*upper), GFP_KERNEL);
- if (!upper)
- return -ENOMEM;
+ ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, master);
+ if (ret)
+ return ret;
- upper->dev = upper_dev;
- upper->master = master;
- INIT_LIST_HEAD(&upper->search_list);
+ /* Now that we linked these devs, make all the upper_dev's
+ * upper_dev_list visible to every dev's lower_dev_list and vice
+ * versa, and don't forget the devices itself. All of these
+ * links are non-neighbours.
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(i->dev, j->dev);
+ if (ret)
+ goto rollback_mesh;
+ }
+ }
+
+ /* add dev to every upper_dev's upper device */
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(dev, i->dev);
+ if (ret)
+ goto rollback_upper_mesh;
+ }
+
+ /* add upper_dev to every dev's lower device */
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
+ if (ret)
+ goto rollback_lower_mesh;
+ }
- /* Ensure that master upper link is always the first item in list. */
- if (master)
- list_add_rcu(&upper->list, &dev->upper_dev_list);
- else
- list_add_tail_rcu(&upper->list, &dev->upper_dev_list);
- dev_hold(upper_dev);
call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
return 0;
+
+rollback_lower_mesh:
+ to_i = i;
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ if (i == to_i)
+ break;
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+ }
+
+ i = NULL;
+
+rollback_upper_mesh:
+ to_i = i;
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list) {
+ if (i == to_i)
+ break;
+ __netdev_adjacent_dev_unlink(dev, i->dev);
+ }
+
+ i = j = NULL;
+
+rollback_mesh:
+ to_i = i;
+ to_j = j;
+ list_for_each_entry(i, &dev->lower_dev_list, list) {
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list) {
+ if (i == to_i && j == to_j)
+ break;
+ __netdev_adjacent_dev_unlink(i->dev, j->dev);
+ }
+ if (i == to_i)
+ break;
+ }
+
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+
+ return ret;
}
/**
@@ -4580,16 +4788,28 @@ EXPORT_SYMBOL(netdev_master_upper_dev_link);
void netdev_upper_dev_unlink(struct net_device *dev,
struct net_device *upper_dev)
{
- struct netdev_upper *upper;
-
+ struct netdev_adjacent *i, *j;
ASSERT_RTNL();
- upper = __netdev_find_upper(dev, upper_dev);
- if (!upper)
- return;
- list_del_rcu(&upper->list);
- dev_put(upper_dev);
- kfree_rcu(upper, rcu);
+ __netdev_adjacent_dev_unlink(dev, upper_dev);
+
+ /* Here is the tricky part. We must remove all dev's lower
+ * devices from all upper_dev's upper devices and vice
+ * versa, to maintain the graph relationship.
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list)
+ list_for_each_entry(j, &upper_dev->upper_dev_list, list)
+ __netdev_adjacent_dev_unlink(i->dev, j->dev);
+
+ /* remove also the devices itself from lower/upper device
+ * list
+ */
+ list_for_each_entry(i, &dev->lower_dev_list, list)
+ __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+
+ list_for_each_entry(i, &upper_dev->upper_dev_list, list)
+ __netdev_adjacent_dev_unlink(dev, i->dev);
+
call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
}
EXPORT_SYMBOL(netdev_upper_dev_unlink);
@@ -4989,6 +5209,24 @@ int dev_change_carrier(struct net_device *dev, bool new_carrier)
EXPORT_SYMBOL(dev_change_carrier);
/**
+ * dev_get_phys_port_id - Get device physical port ID
+ * @dev: device
+ * @ppid: port ID
+ *
+ * Get device physical port ID
+ */
+int dev_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_port_id *ppid)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (!ops->ndo_get_phys_port_id)
+ return -EOPNOTSUPP;
+ return ops->ndo_get_phys_port_id(dev, ppid);
+}
+EXPORT_SYMBOL(dev_get_phys_port_id);
+
+/**
* dev_new_index - allocate an ifindex
* @net: the applicable net namespace
*
@@ -5832,6 +6070,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
INIT_LIST_HEAD(&dev->unreg_list);
INIT_LIST_HEAD(&dev->link_watch_list);
INIT_LIST_HEAD(&dev->upper_dev_list);
+ INIT_LIST_HEAD(&dev->lower_dev_list);
dev->priv_flags = IFF_XMIT_DST_RELEASE;
setup(dev);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 21735440c44..2e654138433 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -33,6 +33,9 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
r->flags = flags;
r->fr_net = hold_net(ops->fro_net);
+ r->suppress_prefixlen = -1;
+ r->suppress_ifgroup = -1;
+
/* The lock is not required here, the list in unreacheable
* at the moment this function is called */
list_add_tail(&r->list, &ops->rules_list);
@@ -226,6 +229,9 @@ jumped:
else
err = ops->action(rule, fl, flags, arg);
+ if (!err && ops->suppress && ops->suppress(rule, arg))
+ continue;
+
if (err != -EAGAIN) {
if ((arg->flags & FIB_LOOKUP_NOREF) ||
likely(atomic_inc_not_zero(&rule->refcnt))) {
@@ -337,6 +343,15 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
rule->action = frh->action;
rule->flags = frh->flags;
rule->table = frh_get_table(frh, tb);
+ if (tb[FRA_SUPPRESS_PREFIXLEN])
+ rule->suppress_prefixlen = nla_get_u32(tb[FRA_SUPPRESS_PREFIXLEN]);
+ else
+ rule->suppress_prefixlen = -1;
+
+ if (tb[FRA_SUPPRESS_IFGROUP])
+ rule->suppress_ifgroup = nla_get_u32(tb[FRA_SUPPRESS_IFGROUP]);
+ else
+ rule->suppress_ifgroup = -1;
if (!tb[FRA_PRIORITY] && ops->default_pref)
rule->pref = ops->default_pref(ops);
@@ -523,6 +538,8 @@ static inline size_t fib_rule_nlmsg_size(struct fib_rules_ops *ops,
+ nla_total_size(IFNAMSIZ) /* FRA_OIFNAME */
+ nla_total_size(4) /* FRA_PRIORITY */
+ nla_total_size(4) /* FRA_TABLE */
+ + nla_total_size(4) /* FRA_SUPPRESS_PREFIXLEN */
+ + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
+ nla_total_size(4) /* FRA_FWMARK */
+ nla_total_size(4); /* FRA_FWMASK */
@@ -548,6 +565,8 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh->table = rule->table;
if (nla_put_u32(skb, FRA_TABLE, rule->table))
goto nla_put_failure;
+ if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
+ goto nla_put_failure;
frh->res1 = 0;
frh->res2 = 0;
frh->action = rule->action;
@@ -580,6 +599,12 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
(rule->target &&
nla_put_u32(skb, FRA_GOTO, rule->target)))
goto nla_put_failure;
+
+ if (rule->suppress_ifgroup != -1) {
+ if (nla_put_u32(skb, FRA_SUPPRESS_IFGROUP, rule->suppress_ifgroup))
+ goto nla_put_failure;
+ }
+
if (ops->fill(rule, skb, frh) < 0)
goto nla_put_failure;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 00ee068efc1..0ff42f029ac 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -65,6 +65,7 @@ ipv6:
nhoff += sizeof(struct ipv6hdr);
break;
}
+ case __constant_htons(ETH_P_8021AD):
case __constant_htons(ETH_P_8021Q): {
const struct vlan_hdr *vlan;
struct vlan_hdr _vlan;
@@ -139,7 +140,11 @@ ipv6:
break;
}
case IPPROTO_IPIP:
- goto again;
+ proto = htons(ETH_P_IP);
+ goto ip;
+ case IPPROTO_IPV6:
+ proto = htons(ETH_P_IPV6);
+ goto ipv6;
default:
break;
}
@@ -345,14 +350,9 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
if (new_index < 0)
new_index = skb_tx_hash(dev, skb);
- if (queue_index != new_index && sk) {
- struct dst_entry *dst =
- rcu_dereference_check(sk->sk_dst_cache, 1);
-
- if (dst && skb_dst(skb) == dst)
- sk_tx_queue_set(sk, queue_index);
-
- }
+ if (queue_index != new_index && sk &&
+ rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, queue_index);
queue_index = new_index;
}
diff --git a/net/core/iovec.c b/net/core/iovec.c
index de178e46268..b77eeecc001 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -212,3 +212,27 @@ out_fault:
goto out;
}
EXPORT_SYMBOL(csum_partial_copy_fromiovecend);
+
+unsigned long iov_pages(const struct iovec *iov, int offset,
+ unsigned long nr_segs)
+{
+ unsigned long seg, base;
+ int pages = 0, len, size;
+
+ while (nr_segs && (offset >= iov->iov_len)) {
+ offset -= iov->iov_len;
+ ++iov;
+ --nr_segs;
+ }
+
+ for (seg = 0; seg < nr_segs; seg++) {
+ base = (unsigned long)iov[seg].iov_base + offset;
+ len = iov[seg].iov_len - offset;
+ size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
+ pages += size;
+ offset = 0;
+ }
+
+ return pages;
+}
+EXPORT_SYMBOL(iov_pages);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b7de821f98d..6072610a867 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
atomic_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(p->base_reachable_time);
+ dev_hold(dev);
+ p->dev = dev;
+ write_pnet(&p->net, hold_net(net));
+ p->sysctl_table = NULL;
if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ release_net(net);
+ dev_put(dev);
kfree(p);
return NULL;
}
- dev_hold(dev);
- p->dev = dev;
- write_pnet(&p->net, hold_net(net));
- p->sysctl_table = NULL;
write_lock_bh(&tbl->lock);
p->next = tbl->parms.next;
tbl->parms.next = p;
@@ -2757,16 +2759,15 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
}
-#ifdef CONFIG_ARPD
void neigh_app_ns(struct neighbour *n)
{
__neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
}
EXPORT_SYMBOL(neigh_app_ns);
-#endif /* CONFIG_ARPD */
#ifdef CONFIG_SYSCTL
static int zero;
+static int int_max = INT_MAX;
static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
static int proc_unres_qlen(struct ctl_table *ctl, int write,
@@ -2819,19 +2820,25 @@ static struct neigh_sysctl_table {
.procname = "mcast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_UCAST_PROBE] = {
.procname = "ucast_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_APP_PROBE] = {
.procname = "app_solicit",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_RETRANS_TIME] = {
.procname = "retrans_time",
@@ -2874,7 +2881,9 @@ static struct neigh_sysctl_table {
.procname = "proxy_qlen",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_ANYCAST_DELAY] = {
.procname = "anycast_delay",
@@ -2916,19 +2925,25 @@ static struct neigh_sysctl_table {
.procname = "gc_thresh1",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_GC_THRESH2] = {
.procname = "gc_thresh2",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
[NEIGH_VAR_GC_THRESH3] = {
.procname = "gc_thresh3",
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .extra1 = &zero,
+ .extra2 = &int_max,
+ .proc_handler = proc_dointvec_minmax,
},
{},
},
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 981fed397d1..3f40ea9de81 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -60,12 +60,19 @@ static ssize_t format_##field(const struct net_device *net, char *buf) \
{ \
return sprintf(buf, format_string, net->field); \
} \
-static ssize_t show_##field(struct device *dev, \
+static ssize_t field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
return netdev_show(dev, attr, buf, format_##field); \
-}
+} \
+#define NETDEVICE_SHOW_RO(field, format_string) \
+NETDEVICE_SHOW(field, format_string); \
+static DEVICE_ATTR_RO(field)
+
+#define NETDEVICE_SHOW_RW(field, format_string) \
+NETDEVICE_SHOW(field, format_string); \
+static DEVICE_ATTR_RW(field)
/* use same locking and permission rules as SIF* ioctl's */
static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
@@ -96,16 +103,16 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
return ret;
}
-NETDEVICE_SHOW(dev_id, fmt_hex);
-NETDEVICE_SHOW(addr_assign_type, fmt_dec);
-NETDEVICE_SHOW(addr_len, fmt_dec);
-NETDEVICE_SHOW(iflink, fmt_dec);
-NETDEVICE_SHOW(ifindex, fmt_dec);
-NETDEVICE_SHOW(type, fmt_dec);
-NETDEVICE_SHOW(link_mode, fmt_dec);
+NETDEVICE_SHOW_RO(dev_id, fmt_hex);
+NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
+NETDEVICE_SHOW_RO(addr_len, fmt_dec);
+NETDEVICE_SHOW_RO(iflink, fmt_dec);
+NETDEVICE_SHOW_RO(ifindex, fmt_dec);
+NETDEVICE_SHOW_RO(type, fmt_dec);
+NETDEVICE_SHOW_RO(link_mode, fmt_dec);
/* use same locking rules as GIFHWADDR ioctl's */
-static ssize_t show_address(struct device *dev, struct device_attribute *attr,
+static ssize_t address_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct net_device *net = to_net_dev(dev);
@@ -117,15 +124,17 @@ static ssize_t show_address(struct device *dev, struct device_attribute *attr,
read_unlock(&dev_base_lock);
return ret;
}
+static DEVICE_ATTR_RO(address);
-static ssize_t show_broadcast(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t broadcast_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct net_device *net = to_net_dev(dev);
if (dev_isalive(net))
return sysfs_format_mac(buf, net->broadcast, net->addr_len);
return -EINVAL;
}
+static DEVICE_ATTR_RO(broadcast);
static int change_carrier(struct net_device *net, unsigned long new_carrier)
{
@@ -134,13 +143,13 @@ static int change_carrier(struct net_device *net, unsigned long new_carrier)
return dev_change_carrier(net, (bool) new_carrier);
}
-static ssize_t store_carrier(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
return netdev_store(dev, attr, buf, len, change_carrier);
}
-static ssize_t show_carrier(struct device *dev,
+static ssize_t carrier_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
@@ -149,8 +158,9 @@ static ssize_t show_carrier(struct device *dev,
}
return -EINVAL;
}
+static DEVICE_ATTR_RW(carrier);
-static ssize_t show_speed(struct device *dev,
+static ssize_t speed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
@@ -167,8 +177,9 @@ static ssize_t show_speed(struct device *dev,
rtnl_unlock();
return ret;
}
+static DEVICE_ATTR_RO(speed);
-static ssize_t show_duplex(struct device *dev,
+static ssize_t duplex_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
@@ -198,8 +209,9 @@ static ssize_t show_duplex(struct device *dev,
rtnl_unlock();
return ret;
}
+static DEVICE_ATTR_RO(duplex);
-static ssize_t show_dormant(struct device *dev,
+static ssize_t dormant_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
@@ -209,6 +221,7 @@ static ssize_t show_dormant(struct device *dev,
return -EINVAL;
}
+static DEVICE_ATTR_RO(dormant);
static const char *const operstates[] = {
"unknown",
@@ -220,7 +233,7 @@ static const char *const operstates[] = {
"up"
};
-static ssize_t show_operstate(struct device *dev,
+static ssize_t operstate_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct net_device *netdev = to_net_dev(dev);
@@ -237,35 +250,33 @@ static ssize_t show_operstate(struct device *dev,
return sprintf(buf, "%s\n", operstates[operstate]);
}
+static DEVICE_ATTR_RO(operstate);
/* read-write attributes */
-NETDEVICE_SHOW(mtu, fmt_dec);
static int change_mtu(struct net_device *net, unsigned long new_mtu)
{
return dev_set_mtu(net, (int) new_mtu);
}
-static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
+static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
return netdev_store(dev, attr, buf, len, change_mtu);
}
-
-NETDEVICE_SHOW(flags, fmt_hex);
+NETDEVICE_SHOW_RW(mtu, fmt_dec);
static int change_flags(struct net_device *net, unsigned long new_flags)
{
return dev_change_flags(net, (unsigned int) new_flags);
}
-static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
+static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
return netdev_store(dev, attr, buf, len, change_flags);
}
-
-NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
+NETDEVICE_SHOW_RW(flags, fmt_hex);
static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
{
@@ -273,7 +284,7 @@ static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
return 0;
}
-static ssize_t store_tx_queue_len(struct device *dev,
+static ssize_t tx_queue_len_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -282,8 +293,9 @@ static ssize_t store_tx_queue_len(struct device *dev,
return netdev_store(dev, attr, buf, len, change_tx_queue_len);
}
+NETDEVICE_SHOW_RW(tx_queue_len, fmt_ulong);
-static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
+static ssize_t ifalias_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_device *netdev = to_net_dev(dev);
@@ -306,7 +318,7 @@ static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
return ret < 0 ? ret : len;
}
-static ssize_t show_ifalias(struct device *dev,
+static ssize_t ifalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct net_device *netdev = to_net_dev(dev);
@@ -319,8 +331,7 @@ static ssize_t show_ifalias(struct device *dev,
rtnl_unlock();
return ret;
}
-
-NETDEVICE_SHOW(group, fmt_dec);
+static DEVICE_ATTR_RW(ifalias);
static int change_group(struct net_device *net, unsigned long new_group)
{
@@ -328,35 +339,60 @@ static int change_group(struct net_device *net, unsigned long new_group)
return 0;
}
-static ssize_t store_group(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+static ssize_t group_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
return netdev_store(dev, attr, buf, len, change_group);
}
+NETDEVICE_SHOW(group, fmt_dec);
+static DEVICE_ATTR(netdev_group, S_IRUGO | S_IWUSR, group_show, group_store);
-static struct device_attribute net_class_attributes[] = {
- __ATTR(addr_assign_type, S_IRUGO, show_addr_assign_type, NULL),
- __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
- __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
- __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
- __ATTR(iflink, S_IRUGO, show_iflink, NULL),
- __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
- __ATTR(type, S_IRUGO, show_type, NULL),
- __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
- __ATTR(address, S_IRUGO, show_address, NULL),
- __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
- __ATTR(carrier, S_IRUGO | S_IWUSR, show_carrier, store_carrier),
- __ATTR(speed, S_IRUGO, show_speed, NULL),
- __ATTR(duplex, S_IRUGO, show_duplex, NULL),
- __ATTR(dormant, S_IRUGO, show_dormant, NULL),
- __ATTR(operstate, S_IRUGO, show_operstate, NULL),
- __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
- __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
- __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
- store_tx_queue_len),
- __ATTR(netdev_group, S_IRUGO | S_IWUSR, show_group, store_group),
- {}
+static ssize_t phys_port_id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ if (!rtnl_trylock())
+ return restart_syscall();
+
+ if (dev_isalive(netdev)) {
+ struct netdev_phys_port_id ppid;
+
+ ret = dev_get_phys_port_id(netdev, &ppid);
+ if (!ret)
+ ret = sprintf(buf, "%*phN\n", ppid.id_len, ppid.id);
+ }
+ rtnl_unlock();
+
+ return ret;
+}
+static DEVICE_ATTR_RO(phys_port_id);
+
+static struct attribute *net_class_attrs[] = {
+ &dev_attr_netdev_group.attr,
+ &dev_attr_type.attr,
+ &dev_attr_dev_id.attr,
+ &dev_attr_iflink.attr,
+ &dev_attr_ifindex.attr,
+ &dev_attr_addr_assign_type.attr,
+ &dev_attr_addr_len.attr,
+ &dev_attr_link_mode.attr,
+ &dev_attr_address.attr,
+ &dev_attr_broadcast.attr,
+ &dev_attr_speed.attr,
+ &dev_attr_duplex.attr,
+ &dev_attr_dormant.attr,
+ &dev_attr_operstate.attr,
+ &dev_attr_ifalias.attr,
+ &dev_attr_carrier.attr,
+ &dev_attr_mtu.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_tx_queue_len.attr,
+ &dev_attr_phys_port_id.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(net_class);
/* Show a given an attribute in the statistics group */
static ssize_t netstat_show(const struct device *d,
@@ -382,13 +418,13 @@ static ssize_t netstat_show(const struct device *d,
/* generate a read-only statistics attribute */
#define NETSTAT_ENTRY(name) \
-static ssize_t show_##name(struct device *d, \
+static ssize_t name##_show(struct device *d, \
struct device_attribute *attr, char *buf) \
{ \
return netstat_show(d, attr, buf, \
offsetof(struct rtnl_link_stats64, name)); \
} \
-static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+static DEVICE_ATTR_RO(name)
NETSTAT_ENTRY(rx_packets);
NETSTAT_ENTRY(tx_packets);
@@ -457,6 +493,9 @@ static struct attribute_group wireless_group = {
.attrs = wireless_attrs,
};
#endif
+
+#else /* CONFIG_SYSFS */
+#define net_class_groups NULL
#endif /* CONFIG_SYSFS */
#ifdef CONFIG_RPS
@@ -1229,9 +1268,7 @@ static const void *net_namespace(struct device *d)
static struct class net_class = {
.name = "net",
.dev_release = netdev_release,
-#ifdef CONFIG_SYSFS
- .dev_attrs = net_class_attributes,
-#endif /* CONFIG_SYSFS */
+ .dev_groups = net_class_groups,
.dev_uevent = netdev_uevent,
.ns_type = &net_ns_type_operations,
.namespace = net_namespace,
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c
index e533259dce3..d9cd627e6a1 100644
--- a/net/core/netprio_cgroup.c
+++ b/net/core/netprio_cgroup.c
@@ -29,12 +29,6 @@
#define PRIOMAP_MIN_SZ 128
-static inline struct cgroup_netprio_state *cgrp_netprio_state(struct cgroup *cgrp)
-{
- return container_of(cgroup_subsys_state(cgrp, net_prio_subsys_id),
- struct cgroup_netprio_state, css);
-}
-
/*
* Extend @dev->priomap so that it's large enough to accomodate
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
@@ -87,67 +81,70 @@ static int extend_netdev_table(struct net_device *dev, u32 target_idx)
/**
* netprio_prio - return the effective netprio of a cgroup-net_device pair
- * @cgrp: cgroup part of the target pair
+ * @css: css part of the target pair
* @dev: net_device part of the target pair
*
* Should be called under RCU read or rtnl lock.
*/
-static u32 netprio_prio(struct cgroup *cgrp, struct net_device *dev)
+static u32 netprio_prio(struct cgroup_subsys_state *css, struct net_device *dev)
{
struct netprio_map *map = rcu_dereference_rtnl(dev->priomap);
+ int id = css->cgroup->id;
- if (map && cgrp->id < map->priomap_len)
- return map->priomap[cgrp->id];
+ if (map && id < map->priomap_len)
+ return map->priomap[id];
return 0;
}
/**
* netprio_set_prio - set netprio on a cgroup-net_device pair
- * @cgrp: cgroup part of the target pair
+ * @css: css part of the target pair
* @dev: net_device part of the target pair
* @prio: prio to set
*
- * Set netprio to @prio on @cgrp-@dev pair. Should be called under rtnl
+ * Set netprio to @prio on @css-@dev pair. Should be called under rtnl
* lock and may fail under memory pressure for non-zero @prio.
*/
-static int netprio_set_prio(struct cgroup *cgrp, struct net_device *dev,
- u32 prio)
+static int netprio_set_prio(struct cgroup_subsys_state *css,
+ struct net_device *dev, u32 prio)
{
struct netprio_map *map;
+ int id = css->cgroup->id;
int ret;
/* avoid extending priomap for zero writes */
map = rtnl_dereference(dev->priomap);
- if (!prio && (!map || map->priomap_len <= cgrp->id))
+ if (!prio && (!map || map->priomap_len <= id))
return 0;
- ret = extend_netdev_table(dev, cgrp->id);
+ ret = extend_netdev_table(dev, id);
if (ret)
return ret;
map = rtnl_dereference(dev->priomap);
- map->priomap[cgrp->id] = prio;
+ map->priomap[id] = prio;
return 0;
}
-static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
- struct cgroup_netprio_state *cs;
+ struct cgroup_subsys_state *css;
- cs = kzalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs)
+ css = kzalloc(sizeof(*css), GFP_KERNEL);
+ if (!css)
return ERR_PTR(-ENOMEM);
- return &cs->css;
+ return css;
}
-static int cgrp_css_online(struct cgroup *cgrp)
+static int cgrp_css_online(struct cgroup_subsys_state *css)
{
- struct cgroup *parent = cgrp->parent;
+ struct cgroup_subsys_state *parent_css = css_parent(css);
struct net_device *dev;
int ret = 0;
- if (!parent)
+ if (!parent_css)
return 0;
rtnl_lock();
@@ -156,9 +153,9 @@ static int cgrp_css_online(struct cgroup *cgrp)
* onlining, there is no need to clear them on offline.
*/
for_each_netdev(&init_net, dev) {
- u32 prio = netprio_prio(parent, dev);
+ u32 prio = netprio_prio(parent_css, dev);
- ret = netprio_set_prio(cgrp, dev, prio);
+ ret = netprio_set_prio(css, dev, prio);
if (ret)
break;
}
@@ -166,29 +163,29 @@ static int cgrp_css_online(struct cgroup *cgrp)
return ret;
}
-static void cgrp_css_free(struct cgroup *cgrp)
+static void cgrp_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgrp_netprio_state(cgrp));
+ kfree(css);
}
-static u64 read_prioidx(struct cgroup *cgrp, struct cftype *cft)
+static u64 read_prioidx(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return cgrp->id;
+ return css->cgroup->id;
}
-static int read_priomap(struct cgroup *cont, struct cftype *cft,
+static int read_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
struct cgroup_map_cb *cb)
{
struct net_device *dev;
rcu_read_lock();
for_each_netdev_rcu(&init_net, dev)
- cb->fill(cb, dev->name, netprio_prio(cont, dev));
+ cb->fill(cb, dev->name, netprio_prio(css, dev));
rcu_read_unlock();
return 0;
}
-static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
+static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer)
{
char devname[IFNAMSIZ + 1];
@@ -205,7 +202,7 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft,
rtnl_lock();
- ret = netprio_set_prio(cgrp, dev, prio);
+ ret = netprio_set_prio(css, dev, prio);
rtnl_unlock();
dev_put(dev);
@@ -221,12 +218,13 @@ static int update_netprio(const void *v, struct file *file, unsigned n)
return 0;
}
-static void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void net_prio_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *p;
void *v;
- cgroup_taskset_for_each(p, cgrp, tset) {
+ cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
v = (void *)(unsigned long)task_netprioidx(p);
iterate_fd(p->files, 0, update_netprio, v);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 9640972ec50..261357a6630 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -160,6 +160,8 @@
#include <net/net_namespace.h>
#include <net/checksum.h>
#include <net/ipv6.h>
+#include <net/udp.h>
+#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#ifdef CONFIG_XFRM
#include <net/xfrm.h>
@@ -198,6 +200,7 @@
#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
#define F_NODE (1<<15) /* Node memory alloc*/
+#define F_UDPCSUM (1<<16) /* Include UDP checksum */
/* Thread control flag bits */
#define T_STOP (1<<0) /* Stop run */
@@ -631,6 +634,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
if (pkt_dev->flags & F_UDPDST_RND)
seq_printf(seq, "UDPDST_RND ");
+ if (pkt_dev->flags & F_UDPCSUM)
+ seq_printf(seq, "UDPCSUM ");
+
if (pkt_dev->flags & F_MPLS_RND)
seq_printf(seq, "MPLS_RND ");
@@ -1228,6 +1234,12 @@ static ssize_t pktgen_if_write(struct file *file,
else if (strcmp(f, "!NODE_ALLOC") == 0)
pkt_dev->flags &= ~F_NODE;
+ else if (strcmp(f, "UDPCSUM") == 0)
+ pkt_dev->flags |= F_UDPCSUM;
+
+ else if (strcmp(f, "!UDPCSUM") == 0)
+ pkt_dev->flags &= ~F_UDPCSUM;
+
else {
sprintf(pg_result,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
@@ -2733,7 +2745,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
udph->len = htons(datalen + 8); /* DATA + udphdr */
- udph->check = 0; /* No checksum */
+ udph->check = 0;
iph->ihl = 5;
iph->version = 4;
@@ -2747,11 +2759,28 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
iph->frag_off = 0;
iplen = 20 + 8 + datalen;
iph->tot_len = htons(iplen);
- iph->check = 0;
- iph->check = ip_fast_csum((void *)iph, iph->ihl);
+ ip_send_check(iph);
skb->protocol = protocol;
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V4_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum = 0;
+ udp4_hwcsum(skb, udph->source, udph->dest);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_tcpudp_magic(udph->source, udph->dest,
+ datalen + 8, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
#ifdef CONFIG_XFRM
@@ -2768,7 +2797,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
struct sk_buff *skb = NULL;
__u8 *eth;
struct udphdr *udph;
- int datalen;
+ int datalen, udplen;
struct ipv6hdr *iph;
__be16 protocol = htons(ETH_P_IPV6);
__be32 *mpls;
@@ -2844,10 +2873,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
net_info_ratelimited("increased datalen to %d\n", datalen);
}
+ udplen = datalen + sizeof(struct udphdr);
udph->source = htons(pkt_dev->cur_udp_src);
udph->dest = htons(pkt_dev->cur_udp_dst);
- udph->len = htons(datalen + sizeof(struct udphdr));
- udph->check = 0; /* No checksum */
+ udph->len = htons(udplen);
+ udph->check = 0;
*(__be32 *) iph = htonl(0x60000000); /* Version + flow */
@@ -2858,7 +2888,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
iph->hop_limit = 32;
- iph->payload_len = htons(sizeof(struct udphdr) + datalen);
+ iph->payload_len = htons(udplen);
iph->nexthdr = IPPROTO_UDP;
iph->daddr = pkt_dev->cur_in6_daddr;
@@ -2868,6 +2898,23 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
skb->dev = odev;
skb->pkt_type = PACKET_HOST;
+ if (!(pkt_dev->flags & F_UDPCSUM)) {
+ skb->ip_summed = CHECKSUM_NONE;
+ } else if (odev->features & NETIF_F_V6_CSUM) {
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ udph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, 0);
+ } else {
+ __wsum csum = udp_csum(skb);
+
+ /* add protocol-dependent pseudo-header */
+ udph->check = csum_ipv6_magic(&iph->saddr, &iph->daddr, udplen, IPPROTO_UDP, csum);
+
+ if (udph->check == 0)
+ udph->check = CSUM_MANGLED_0;
+ }
+
pktgen_finalize_skb(pkt_dev, skb, datalen);
return skb;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 3de740834d1..2a0e21de306 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -767,7 +767,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
+ rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
+ rtnl_link_get_size(dev) /* IFLA_LINKINFO */
- + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
+ + rtnl_link_get_af_size(dev) /* IFLA_AF_SPEC */
+ + nla_total_size(MAX_PHYS_PORT_ID_LEN); /* IFLA_PHYS_PORT_ID */
}
static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -846,6 +847,24 @@ static int rtnl_port_fill(struct sk_buff *skb, struct net_device *dev)
return 0;
}
+static int rtnl_phys_port_id_fill(struct sk_buff *skb, struct net_device *dev)
+{
+ int err;
+ struct netdev_phys_port_id ppid;
+
+ err = dev_get_phys_port_id(dev, &ppid);
+ if (err) {
+ if (err == -EOPNOTSUPP)
+ return 0;
+ return err;
+ }
+
+ if (nla_put(skb, IFLA_PHYS_PORT_ID, ppid.id_len, ppid.id))
+ return -EMSGSIZE;
+
+ return 0;
+}
+
static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
int type, u32 pid, u32 seq, u32 change,
unsigned int flags, u32 ext_filter_mask)
@@ -913,6 +932,9 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
goto nla_put_failure;
}
+ if (rtnl_phys_port_id_fill(skb, dev))
+ goto nla_put_failure;
+
attr = nla_reserve(skb, IFLA_STATS,
sizeof(struct rtnl_link_stats));
if (attr == NULL)
@@ -1113,6 +1135,7 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
+ [IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
EXPORT_SYMBOL(ifla_policy);
@@ -1844,10 +1867,10 @@ replay:
else
err = register_netdevice(dev);
- if (err < 0 && !IS_ERR(dev))
+ if (err < 0) {
free_netdev(dev);
- if (err < 0)
goto out;
+ }
err = rtnl_configure_link(dev, ifm);
if (err < 0)
@@ -2156,7 +2179,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
/* If aging addresses are supported device will need to
* implement its own handler for this.
*/
- if (ndm->ndm_state & NUD_PERMANENT) {
+ if (!(ndm->ndm_state & NUD_PERMANENT)) {
pr_info("%s: FDB only supports static addresses\n", dev->name);
return -EINVAL;
}
@@ -2384,7 +2407,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
struct nlattr *extfilt;
u32 filter_mask = 0;
- extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg),
+ extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
IFLA_EXT_MASK);
if (extfilt)
filter_mask = nla_get_u32(extfilt);
diff --git a/net/core/scm.c b/net/core/scm.c
index 03795d0147f..b4da80b1cc0 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -54,7 +54,7 @@ static __inline__ int scm_check_creds(struct ucred *creds)
return -EINVAL;
if ((creds->pid == task_tgid_vnr(current) ||
- ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&
+ ns_capable(task_active_pid_ns(current)->user_ns, CAP_SYS_ADMIN)) &&
((uid_eq(uid, cred->uid) || uid_eq(uid, cred->euid) ||
uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&
((gid_eq(gid, cred->gid) || gid_eq(gid, cred->egid) ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 20e02d2605e..d81cff119f7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -309,7 +309,8 @@ EXPORT_SYMBOL(__alloc_skb);
* @frag_size: size of fragment, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
- * skb_shared_info. @data must have been allocated by kmalloc()
+ * skb_shared_info. @data must have been allocated by kmalloc() only if
+ * @frag_size is 0, otherwise data should come from the page allocator.
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -739,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_copy_secmark(new, old);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id;
#endif
}
@@ -3499,17 +3500,22 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
EXPORT_SYMBOL(skb_try_coalesce);
/**
- * skb_scrub_packet - scrub an skb before sending it to another netns
+ * skb_scrub_packet - scrub an skb
*
* @skb: buffer to clean
- *
- * skb_scrub_packet can be used to clean an skb before injecting it in
- * another namespace. We have to clear all information in the skb that
- * could impact namespace isolation.
+ * @xnet: packet is crossing netns
+ *
+ * skb_scrub_packet can be used after encapsulating or decapsulting a packet
+ * into/from a tunnel. Some information have to be cleared during these
+ * operations.
+ * skb_scrub_packet can also be used to clean a skb before injecting it in
+ * another namespace (@xnet == true). We have to clear all information in the
+ * skb that could impact namespace isolation.
*/
-void skb_scrub_packet(struct sk_buff *skb)
+void skb_scrub_packet(struct sk_buff *skb, bool xnet)
{
- skb_orphan(skb);
+ if (xnet)
+ skb_orphan(skb);
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index 548d716c5f6..5b6beba494a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -93,6 +93,7 @@
#include <linux/capability.h>
#include <linux/errno.h>
+#include <linux/errqueue.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
@@ -900,7 +901,7 @@ set_rcvbuf:
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
@@ -1170,7 +1171,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break;
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL:
v.val = sk->sk_ll_usec;
break;
@@ -1575,6 +1576,25 @@ void sock_wfree(struct sk_buff *skb)
}
EXPORT_SYMBOL(sock_wfree);
+void skb_orphan_partial(struct sk_buff *skb)
+{
+ /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
+ * so we do not completely orphan skb, but transfert all
+ * accounted bytes but one, to avoid unexpected reorders.
+ */
+ if (skb->destructor == sock_wfree
+#ifdef CONFIG_INET
+ || skb->destructor == tcp_wfree
+#endif
+ ) {
+ atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
+ skb->truesize = 1;
+ } else {
+ skb_orphan(skb);
+ }
+}
+EXPORT_SYMBOL(skb_orphan_partial);
+
/*
* Read buffer destructor automatically called from kfree_skb.
*/
@@ -1721,24 +1741,23 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
unsigned long data_len, int noblock,
- int *errcode)
+ int *errcode, int max_page_order)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = NULL;
+ unsigned long chunk;
gfp_t gfp_mask;
long timeo;
int err;
int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ struct page *page;
+ int i;
err = -EMSGSIZE;
if (npages > MAX_SKB_FRAGS)
goto failure;
- gfp_mask = sk->sk_allocation;
- if (gfp_mask & __GFP_WAIT)
- gfp_mask |= __GFP_REPEAT;
-
timeo = sock_sndtimeo(sk, noblock);
- while (1) {
+ while (!skb) {
err = sock_error(sk);
if (err != 0)
goto failure;
@@ -1747,50 +1766,52 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
if (sk->sk_shutdown & SEND_SHUTDOWN)
goto failure;
- if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
- skb = alloc_skb(header_len, gfp_mask);
- if (skb) {
- int i;
-
- /* No pages, we're done... */
- if (!data_len)
- break;
-
- skb->truesize += data_len;
- skb_shinfo(skb)->nr_frags = npages;
- for (i = 0; i < npages; i++) {
- struct page *page;
-
- page = alloc_pages(sk->sk_allocation, 0);
- if (!page) {
- err = -ENOBUFS;
- skb_shinfo(skb)->nr_frags = i;
- kfree_skb(skb);
- goto failure;
- }
-
- __skb_fill_page_desc(skb, i,
- page, 0,
- (data_len >= PAGE_SIZE ?
- PAGE_SIZE :
- data_len));
- data_len -= PAGE_SIZE;
- }
+ if (atomic_read(&sk->sk_wmem_alloc) >= sk->sk_sndbuf) {
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
+ goto failure;
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
+ continue;
+ }
- /* Full success... */
- break;
- }
- err = -ENOBUFS;
+ err = -ENOBUFS;
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+ gfp_mask |= __GFP_REPEAT;
+
+ skb = alloc_skb(header_len, gfp_mask);
+ if (!skb)
goto failure;
+
+ skb->truesize += data_len;
+
+ for (i = 0; npages > 0; i++) {
+ int order = max_page_order;
+
+ while (order) {
+ if (npages >= 1 << order) {
+ page = alloc_pages(sk->sk_allocation |
+ __GFP_COMP | __GFP_NOWARN,
+ order);
+ if (page)
+ goto fill_page;
+ }
+ order--;
+ }
+ page = alloc_page(sk->sk_allocation);
+ if (!page)
+ goto failure;
+fill_page:
+ chunk = min_t(unsigned long, data_len,
+ PAGE_SIZE << order);
+ skb_fill_page_desc(skb, i, page, 0, chunk);
+ data_len -= chunk;
+ npages -= 1 << order;
}
- set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
- set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
- err = -EAGAIN;
- if (!timeo)
- goto failure;
- if (signal_pending(current))
- goto interrupted;
- timeo = sock_wait_for_wmem(sk, timeo);
}
skb_set_owner_w(skb, sk);
@@ -1799,6 +1820,7 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
interrupted:
err = sock_intr_errno(timeo);
failure:
+ kfree_skb(skb);
*errcode = err;
return NULL;
}
@@ -1807,7 +1829,7 @@ EXPORT_SYMBOL(sock_alloc_send_pskb);
struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
int noblock, int *errcode)
{
- return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode, 0);
}
EXPORT_SYMBOL(sock_alloc_send_skb);
@@ -2292,7 +2314,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_stamp = ktime_set(-1L, 0);
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_busy_read;
#endif
@@ -2425,6 +2447,52 @@ void sock_enable_timestamp(struct sock *sk, int flag)
}
}
+int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
+ int level, int type)
+{
+ struct sock_exterr_skb *serr;
+ struct sk_buff *skb, *skb2;
+ int copied, err;
+
+ err = -EAGAIN;
+ skb = skb_dequeue(&sk->sk_error_queue);
+ if (skb == NULL)
+ goto out;
+
+ copied = skb->len;
+ if (copied > len) {
+ msg->msg_flags |= MSG_TRUNC;
+ copied = len;
+ }
+ err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+ if (err)
+ goto out_free_skb;
+
+ sock_recv_timestamp(msg, sk, skb);
+
+ serr = SKB_EXT_ERR(skb);
+ put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
+
+ msg->msg_flags |= MSG_ERRQUEUE;
+ err = copied;
+
+ /* Reset and regenerate socket error */
+ spin_lock_bh(&sk->sk_error_queue.lock);
+ sk->sk_err = 0;
+ if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
+ sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+ sk->sk_error_report(sk);
+ } else
+ spin_unlock_bh(&sk->sk_error_queue.lock);
+
+out_free_skb:
+ kfree_skb(skb);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_recv_errqueue);
+
/*
* Get a socket option on an socket.
*
diff --git a/net/core/stream.c b/net/core/stream.c
index f5df85dcd20..512f0a24269 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -30,7 +30,7 @@ void sk_stream_write_space(struct sock *sk)
struct socket *sock = sk->sk_socket;
struct socket_wq *wq;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
+ if (sk_stream_is_writeable(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
rcu_read_lock();
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 66096861663..cca44419090 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -20,8 +20,11 @@
#include <net/sock.h>
#include <net/net_ratelimit.h>
#include <net/busy_poll.h>
+#include <net/pkt_sched.h>
+static int zero = 0;
static int one = 1;
+static int ushort_max = USHRT_MAX;
#ifdef CONFIG_RPS
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
@@ -191,6 +194,26 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
}
#endif /* CONFIG_NET_FLOW_LIMIT */
+#ifdef CONFIG_NET_SCHED
+static int set_default_qdisc(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ char id[IFNAMSIZ];
+ struct ctl_table tbl = {
+ .data = id,
+ .maxlen = IFNAMSIZ,
+ };
+ int ret;
+
+ qdisc_get_default(id, IFNAMSIZ);
+
+ ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+ if (write && ret == 0)
+ ret = qdisc_set_default(id);
+ return ret;
+}
+#endif
+
static struct ctl_table net_core_table[] = {
#ifdef CONFIG_NET
{
@@ -298,7 +321,7 @@ static struct ctl_table net_core_table[] = {
.proc_handler = flow_limit_table_len_sysctl
},
#endif /* CONFIG_NET_FLOW_LIMIT */
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
{
.procname = "busy_poll",
.data = &sysctl_net_busy_poll,
@@ -313,7 +336,14 @@ static struct ctl_table net_core_table[] = {
.mode = 0644,
.proc_handler = proc_dointvec
},
-#
+#endif
+#ifdef CONFIG_NET_SCHED
+ {
+ .procname = "default_qdisc",
+ .mode = 0644,
+ .maxlen = IFNAMSIZ,
+ .proc_handler = set_default_qdisc
+ },
#endif
#endif /* CONFIG_NET */
{
@@ -339,7 +369,9 @@ static struct ctl_table netns_core_table[] = {
.data = &init_net.core.sysctl_somaxconn,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .extra1 = &zero,
+ .extra2 = &ushort_max,
+ .proc_handler = proc_dointvec_minmax
},
{ }
};
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 6c7c78b8394..ba64750f038 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -336,7 +336,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -347,7 +347,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6ebd8fbd928..29d684ebca6 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -347,7 +347,7 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->features = master->vlan_features;
SET_ETHTOOL_OPS(slave_dev, &dsa_slave_ethtool_ops);
- memcpy(slave_dev->dev_addr, master->dev_addr, ETH_ALEN);
+ eth_hw_addr_inherit(slave_dev, master);
slave_dev->tx_queue_len = 0;
switch (ds->dst->tag_protocol) {
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 3b9d5f20bd1..c85e71e0c7f 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -67,39 +67,6 @@ static const u8 lowpan_ttl_values[] = {0, 1, 64, 255};
static LIST_HEAD(lowpan_devices);
-/*
- * Uncompression of linklocal:
- * 0 -> 16 bytes from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 2 bytes from prefix - zeroes + 2 from packet
- * 3 -> 2 bytes from prefix - infer 8 bytes from lladdr
- *
- * NOTE: => the uncompress function does change 0xf to 0x10
- * NOTE: 0x00 => no-autoconfig => unspecified
- */
-static const u8 lowpan_unc_llconf[] = {0x0f, 0x28, 0x22, 0x20};
-
-/*
- * Uncompression of ctx-based:
- * 0 -> 0 bits from packet [unspecified / reserved]
- * 1 -> 8 bytes from prefix - bunch of zeroes and 8 from packet
- * 2 -> 8 bytes from prefix - zeroes + 2 from packet
- * 3 -> 8 bytes from prefix - infer 8 bytes from lladdr
- */
-static const u8 lowpan_unc_ctxconf[] = {0x00, 0x88, 0x82, 0x80};
-
-/*
- * Uncompression of ctx-base
- * 0 -> 0 bits from packet
- * 1 -> 2 bytes from prefix - bunch of zeroes 5 from packet
- * 2 -> 2 bytes from prefix - zeroes + 3 from packet
- * 3 -> 2 bytes from prefix - infer 1 bytes from lladdr
- */
-static const u8 lowpan_unc_mxconf[] = {0x0f, 0x25, 0x23, 0x21};
-
-/* Link local prefix */
-static const u8 lowpan_llprefix[] = {0xfe, 0x80};
-
/* private device info */
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
@@ -191,55 +158,177 @@ lowpan_compress_addr_64(u8 **hc06_ptr, u8 shift, const struct in6_addr *ipaddr,
return rol8(val, shift);
}
-static void
-lowpan_uip_ds6_set_addr_iid(struct in6_addr *ipaddr, unsigned char *lladdr)
+/*
+ * Uncompress address function for source and
+ * destination address(non-multicast).
+ *
+ * address_mode is sam value or dam value.
+ */
+static int
+lowpan_uncompress_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 address_mode,
+ const struct ieee802154_addr *lladdr)
{
- memcpy(&ipaddr->s6_addr[8], lladdr, IEEE802154_ADDR_LEN);
- /* second bit-flip (Universe/Local) is done according RFC2464 */
- ipaddr->s6_addr[8] ^= 0x02;
+ bool fail;
+
+ switch (address_mode) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* for global link addresses */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
+ break;
+ case LOWPAN_IPHC_ADDR_02:
+ /* fe:80::ff:fe00:XXXX */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
+ break;
+ case LOWPAN_IPHC_ADDR_03:
+ fail = false;
+ switch (lladdr->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ /* fe:80::XXXX:XXXX:XXXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr->hwaddr,
+ IEEE802154_ADDR_LEN);
+ /* second bit-flip (Universe/Local)
+ * is done according RFC2464
+ */
+ ipaddr->s6_addr[8] ^= 0x02;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ /* fe:80::ff:fe00:XXXX
+ * \__/
+ * short_addr
+ *
+ * Universe/Local bit is zero.
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ ipaddr->s6_addr16[7] = htons(lladdr->short_addr);
+ break;
+ default:
+ pr_debug("Invalid addr_type set\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_debug("Invalid address mode value: 0x%x\n", address_mode);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
+ }
+
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
}
-/*
- * Uncompress addresses based on a prefix and a postfix with zeroes in
- * between. If the postfix is zero in length it will use the link address
- * to configure the IP address (autoconf style).
- * pref_post_count takes a byte where the first nibble specify prefix count
- * and the second postfix count (NOTE: 15/0xf => 16 bytes copy).
+/* Uncompress address function for source context
+ * based address(non-multicast).
*/
static int
-lowpan_uncompress_addr(struct sk_buff *skb, struct in6_addr *ipaddr,
- u8 const *prefix, u8 pref_post_count, unsigned char *lladdr)
+lowpan_uncompress_context_based_src_addr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 sam)
{
- u8 prefcount = pref_post_count >> 4;
- u8 postcount = pref_post_count & 0x0f;
-
- /* full nibble 15 => 16 */
- prefcount = (prefcount == 15 ? 16 : prefcount);
- postcount = (postcount == 15 ? 16 : postcount);
-
- if (lladdr)
- lowpan_raw_dump_inline(__func__, "linklocal address",
- lladdr, IEEE802154_ADDR_LEN);
- if (prefcount > 0)
- memcpy(ipaddr, prefix, prefcount);
-
- if (prefcount + postcount < 16)
- memset(&ipaddr->s6_addr[prefcount], 0,
- 16 - (prefcount + postcount));
-
- if (postcount > 0) {
- memcpy(&ipaddr->s6_addr[16 - postcount], skb->data, postcount);
- skb_pull(skb, postcount);
- } else if (prefcount > 0) {
- if (lladdr == NULL)
- return -EINVAL;
+ switch (sam) {
+ case LOWPAN_IPHC_ADDR_00:
+ /* unspec address ::
+ * Do nothing, address is already ::
+ */
+ break;
+ case LOWPAN_IPHC_ADDR_01:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_02:
+ /* TODO */
+ case LOWPAN_IPHC_ADDR_03:
+ /* TODO */
+ netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
+ return -EINVAL;
+ default:
+ pr_debug("Invalid sam value: 0x%x\n", sam);
+ return -EINVAL;
+ }
+
+ lowpan_raw_dump_inline(NULL,
+ "Reconstructed context based ipv6 src addr is:\n",
+ ipaddr->s6_addr, 16);
+
+ return 0;
+}
- /* no IID based configuration if no prefix and no data */
- lowpan_uip_ds6_set_addr_iid(ipaddr, lladdr);
+/* Uncompress function for multicast destination address,
+ * when M bit is set.
+ */
+static int
+lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
+ struct in6_addr *ipaddr,
+ const u8 dam)
+{
+ bool fail;
+
+ switch (dam) {
+ case LOWPAN_IPHC_DAM_00:
+ /* 00: 128 bits. The full address
+ * is carried in-line.
+ */
+ fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ /* 01: 48 bits. The address takes
+ * the form ffXX::00XX:XXXX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[11], 5);
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ /* 10: 32 bits. The address takes
+ * the form ffXX::00XX:XXXX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[1], 1);
+ fail |= lowpan_fetch_skb(skb, &ipaddr->s6_addr[13], 3);
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ /* 11: 8 bits. The address takes
+ * the form ff02::00XX.
+ */
+ ipaddr->s6_addr[0] = 0xFF;
+ ipaddr->s6_addr[1] = 0x02;
+ fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
+ break;
+ default:
+ pr_debug("DAM value has a wrong value: 0x%x\n", dam);
+ return -EINVAL;
+ }
+
+ if (fail) {
+ pr_debug("Failed to fetch skb data\n");
+ return -EIO;
}
- pr_debug("uncompressing %d + %d => ", prefcount, postcount);
- lowpan_raw_dump_inline(NULL, NULL, ipaddr->s6_addr, 16);
+ lowpan_raw_dump_inline(NULL, "Reconstructed ipv6 multicast addr is:\n",
+ ipaddr->s6_addr, 16);
return 0;
}
@@ -702,6 +791,12 @@ lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
skb_reserve(frame->skb, sizeof(struct ipv6hdr));
skb_put(frame->skb, frame->length);
+ /* copy the first control block to keep a
+ * trace of the link-layer addresses in case
+ * of a link-local compressed address
+ */
+ memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
+
init_timer(&frame->timer);
/* time out is the same as for ipv6 - 60 sec */
frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
@@ -723,9 +818,9 @@ frame_err:
static int
lowpan_process_data(struct sk_buff *skb)
{
- struct ipv6hdr hdr;
+ struct ipv6hdr hdr = {};
u8 tmp, iphc0, iphc1, num_context = 0;
- u8 *_saddr, *_daddr;
+ const struct ieee802154_addr *_saddr, *_daddr;
int err;
lowpan_raw_dump_table(__func__, "raw skb data dump", skb->data,
@@ -828,8 +923,8 @@ lowpan_process_data(struct sk_buff *skb)
if (lowpan_fetch_skb_u8(skb, &iphc1))
goto drop;
- _saddr = mac_cb(skb)->sa.hwaddr;
- _daddr = mac_cb(skb)->da.hwaddr;
+ _saddr = &mac_cb(skb)->sa;
+ _daddr = &mac_cb(skb)->da;
pr_debug("iphc0 = %02x, iphc1 = %02x\n", iphc0, iphc1);
@@ -868,8 +963,6 @@ lowpan_process_data(struct sk_buff *skb)
hdr.priority = ((tmp >> 2) & 0x0f);
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
/*
* Flow Label carried in-line
@@ -885,10 +978,6 @@ lowpan_process_data(struct sk_buff *skb)
break;
/* Traffic Class and Flow Label are elided */
case 3: /* 11b */
- hdr.priority = 0;
- hdr.flow_lbl[0] = 0;
- hdr.flow_lbl[1] = 0;
- hdr.flow_lbl[2] = 0;
break;
default:
break;
@@ -915,10 +1004,18 @@ lowpan_process_data(struct sk_buff *skb)
/* Extract SAM to the tmp variable */
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
- /* Source address uncompression */
- pr_debug("source address stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.saddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ if (iphc1 & LOWPAN_IPHC_SAC) {
+ /* Source address context based uncompression */
+ pr_debug("SAC bit is set. Handle context based source address.\n");
+ err = lowpan_uncompress_context_based_src_addr(
+ skb, &hdr.saddr, tmp);
+ } else {
+ /* Source address uncompression */
+ pr_debug("source address stateless compression\n");
+ err = lowpan_uncompress_addr(skb, &hdr.saddr, tmp, _saddr);
+ }
+
+ /* Check on error of previous branch */
if (err)
goto drop;
@@ -931,23 +1028,14 @@ lowpan_process_data(struct sk_buff *skb)
pr_debug("dest: context-based mcast compression\n");
/* TODO: implement this */
} else {
- u8 prefix[] = {0xff, 0x02};
-
- pr_debug("dest: non context-based mcast compression\n");
- if (0 < tmp && tmp < 3) {
- if (lowpan_fetch_skb_u8(skb, &prefix[1]))
- goto drop;
- }
-
- err = lowpan_uncompress_addr(skb, &hdr.daddr, prefix,
- lowpan_unc_mxconf[tmp], NULL);
+ err = lowpan_uncompress_multicast_daddr(
+ skb, &hdr.daddr, tmp);
if (err)
goto drop;
}
} else {
pr_debug("dest: stateless compression\n");
- err = lowpan_uncompress_addr(skb, &hdr.daddr, lowpan_llprefix,
- lowpan_unc_llconf[tmp], skb->data);
+ err = lowpan_uncompress_addr(skb, &hdr.daddr, tmp, _daddr);
if (err)
goto drop;
}
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 4b8f917658b..2869c0526da 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -193,10 +193,12 @@
/* Values of fields within the IPHC encoding second byte */
#define LOWPAN_IPHC_CID 0x80
+#define LOWPAN_IPHC_ADDR_00 0x00
+#define LOWPAN_IPHC_ADDR_01 0x01
+#define LOWPAN_IPHC_ADDR_02 0x02
+#define LOWPAN_IPHC_ADDR_03 0x03
+
#define LOWPAN_IPHC_SAC 0x40
-#define LOWPAN_IPHC_SAM_00 0x00
-#define LOWPAN_IPHC_SAM_01 0x10
-#define LOWPAN_IPHC_SAM_10 0x20
#define LOWPAN_IPHC_SAM 0x30
#define LOWPAN_IPHC_SAM_BIT 4
@@ -230,4 +232,16 @@
dest = 16 bit inline */
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
+static inline bool lowpan_fetch_skb(struct sk_buff *skb,
+ void *data, const unsigned int len)
+{
+ if (unlikely(!pskb_may_pull(skb, len)))
+ return true;
+
+ skb_copy_from_linear_data(skb, data, len);
+ skb_pull(skb, len);
+
+ return false;
+}
+
#endif /* __6LOWPAN_H__ */
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 13571eae6ba..ef56ab5b35f 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -36,7 +36,8 @@ static ssize_t name ## _show(struct device *dev, \
ret = snprintf(buf, PAGE_SIZE, format_string "\n", args); \
mutex_unlock(&phy->pib_lock); \
return ret; \
-}
+} \
+static DEVICE_ATTR_RO(name);
#define MASTER_SHOW(field, format_string) \
MASTER_SHOW_COMPLEX(field, format_string, phy->field)
@@ -66,15 +67,17 @@ static ssize_t channels_supported_show(struct device *dev,
mutex_unlock(&phy->pib_lock);
return len;
}
-
-static struct device_attribute pmib_attrs[] = {
- __ATTR_RO(current_channel),
- __ATTR_RO(current_page),
- __ATTR_RO(channels_supported),
- __ATTR_RO(transmit_power),
- __ATTR_RO(cca_mode),
- {},
+static DEVICE_ATTR_RO(channels_supported);
+
+static struct attribute *pmib_attrs[] = {
+ &dev_attr_current_channel.attr,
+ &dev_attr_current_page.attr,
+ &dev_attr_channels_supported.attr,
+ &dev_attr_transmit_power.attr,
+ &dev_attr_cca_mode.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(pmib);
static void wpan_phy_release(struct device *d)
{
@@ -85,7 +88,7 @@ static void wpan_phy_release(struct device *d)
static struct class wpan_phy_class = {
.name = "ieee802154",
.dev_release = wpan_phy_release,
- .dev_attrs = pmib_attrs,
+ .dev_groups = pmib_groups,
};
static DEFINE_MUTEX(wpan_phy_mutex);
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 37cf1a6ea3a..05c57f0fcab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -259,22 +259,6 @@ config IP_PIMSM_V2
gated-5). This routing protocol is not used widely, so say N unless
you want to play with it.
-config ARPD
- bool "IP: ARP daemon support"
- ---help---
- The kernel maintains an internal cache which maps IP addresses to
- hardware addresses on the local network, so that Ethernet
- frames are sent to the proper address on the physical networking
- layer. Normally, kernel uses the ARP protocol to resolve these
- mappings.
-
- Saying Y here adds support to have an user space daemon to do this
- resolution instead. This is useful for implementing an alternate
- address resolution protocol (e.g. NHRP on mGRE tunnels) and also for
- testing purposes.
-
- If unsure, say N.
-
config SYN_COOKIES
bool "IP: TCP syncookie support"
---help---
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index b4d0be2b7ce..7a1874b7b8f 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1532,18 +1532,6 @@ int snmp_mib_init(void __percpu *ptr[2], size_t mibsize, size_t align)
}
EXPORT_SYMBOL_GPL(snmp_mib_init);
-void snmp_mib_free(void __percpu *ptr[SNMP_ARRAY_SZ])
-{
- int i;
-
- BUG_ON(ptr == NULL);
- for (i = 0; i < SNMP_ARRAY_SZ; i++) {
- free_percpu(ptr[i]);
- ptr[i] = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(snmp_mib_free);
-
#ifdef CONFIG_IP_MULTICAST
static const struct net_protocol igmp_protocol = {
.handler = igmp_rcv,
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 4429b013f26..7808093cede 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -368,9 +368,7 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
} else {
probes -= neigh->parms->app_probes;
if (probes < 0) {
-#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
-#endif
return;
}
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 8d48c392adc..a1b5bcbd04a 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -73,6 +73,8 @@ static struct ipv4_devconf ipv4_devconf = {
[IPV4_DEVCONF_SEND_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -83,6 +85,8 @@ static struct ipv4_devconf ipv4_devconf_dflt = {
[IPV4_DEVCONF_SECURE_REDIRECTS - 1] = 1,
[IPV4_DEVCONF_SHARED_MEDIA - 1] = 1,
[IPV4_DEVCONF_ACCEPT_SOURCE_ROUTE - 1] = 1,
+ [IPV4_DEVCONF_IGMPV2_UNSOLICITED_REPORT_INTERVAL - 1] = 10000 /*ms*/,
+ [IPV4_DEVCONF_IGMPV3_UNSOLICITED_REPORT_INTERVAL - 1] = 1000 /*ms*/,
},
};
@@ -772,7 +776,7 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
ci = nla_data(tb[IFA_CACHEINFO]);
if (!ci->ifa_valid || ci->ifa_prefered > ci->ifa_valid) {
err = -EINVAL;
- goto errout;
+ goto errout_free;
}
*pvalid_lft = ci->ifa_valid;
*pprefered_lft = ci->ifa_prefered;
@@ -780,6 +784,8 @@ static struct in_ifaddr *rtm_to_ifaddr(struct net *net, struct nlmsghdr *nlh,
return ifa;
+errout_free:
+ inet_free_ifa(ifa);
errout:
return ERR_PTR(err);
}
@@ -1124,10 +1130,7 @@ static int inet_gifconf(struct net_device *dev, char __user *buf, int len)
if (len < (int) sizeof(ifr))
break;
memset(&ifr, 0, sizeof(struct ifreq));
- if (ifa->ifa_label)
- strcpy(ifr.ifr_name, ifa->ifa_label);
- else
- strcpy(ifr.ifr_name, dev->name);
+ strcpy(ifr.ifr_name, ifa->ifa_label);
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_family = AF_INET;
(*(struct sockaddr_in *)&ifr.ifr_addr).sin_addr.s_addr =
@@ -2095,11 +2098,15 @@ static struct devinet_sysctl_table {
DEVINET_SYSCTL_RW_ENTRY(ARP_ACCEPT, "arp_accept"),
DEVINET_SYSCTL_RW_ENTRY(ARP_NOTIFY, "arp_notify"),
DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP_PVLAN, "proxy_arp_pvlan"),
+ DEVINET_SYSCTL_RW_ENTRY(FORCE_IGMP_VERSION,
+ "force_igmp_version"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV2_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv2_unsolicited_report_interval"),
+ DEVINET_SYSCTL_RW_ENTRY(IGMPV3_UNSOLICITED_REPORT_INTERVAL,
+ "igmpv3_unsolicited_report_interval"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOXFRM, "disable_xfrm"),
DEVINET_SYSCTL_FLUSHING_ENTRY(NOPOLICY, "disable_policy"),
- DEVINET_SYSCTL_FLUSHING_ENTRY(FORCE_IGMP_VERSION,
- "force_igmp_version"),
DEVINET_SYSCTL_FLUSHING_ENTRY(PROMOTE_SECONDARIES,
"promote_secondaries"),
DEVINET_SYSCTL_FLUSHING_ENTRY(ROUTE_LOCALNET,
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index ab3d814bc80..109ee89f123 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
}
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 26aa65d1fce..523be38e37d 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -101,6 +101,30 @@ errout:
return err;
}
+static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct fib_result *result = (struct fib_result *) arg->result;
+ struct net_device *dev = result->fi->fib_dev;
+
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (result->prefixlen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ if (!(arg->flags & FIB_LOOKUP_NOREF))
+ fib_info_put(result->fi);
+ return true;
+}
static int fib4_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -267,6 +291,7 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
.rule_size = sizeof(struct fib4_rule),
.addr_size = sizeof(u32),
.action = fib4_rule_action,
+ .suppress = fib4_rule_suppress,
.match = fib4_rule_match,
.configure = fib4_rule_configure,
.delete = fib4_rule_delete,
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 49616fed934..3df6d3edb2a 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -71,7 +71,6 @@
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
-#include <linux/prefetch.h>
#include <linux/export.h>
#include <net/net_namespace.h>
#include <net/ip.h>
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c)
if (!c)
continue;
- if (IS_LEAF(c)) {
- prefetch(rcu_dereference_rtnl(p->child[idx]));
+ if (IS_LEAF(c))
return (struct leaf *) c;
- }
/* Rescan start scanning in new node */
p = (struct tnode *) c;
@@ -2133,7 +2130,7 @@ static void trie_show_stats(struct seq_file *seq, struct trie_stat *stat)
max--;
pointers = 0;
- for (i = 1; i <= max; i++)
+ for (i = 1; i < max; i++)
if (stat->nodesizes[i] != 0) {
seq_printf(seq, " %u: %u", i, stat->nodesizes[i]);
pointers += (1<<i) * stat->nodesizes[i];
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index cd71190d296..d6c0e64ec97 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -88,6 +88,7 @@
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/times.h>
+#include <linux/pkt_sched.h>
#include <net/net_namespace.h>
#include <net/arp.h>
@@ -113,7 +114,8 @@
#define IGMP_V1_Router_Present_Timeout (400*HZ)
#define IGMP_V2_Router_Present_Timeout (400*HZ)
-#define IGMP_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
+#define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
#define IGMP_Query_Response_Interval (10*HZ)
#define IGMP_Unsolicited_Report_Count 2
@@ -138,6 +140,29 @@
((in_dev)->mr_v2_seen && \
time_before(jiffies, (in_dev)->mr_v2_seen)))
+static int unsolicited_report_interval(struct in_device *in_dev)
+{
+ int interval_ms, interval_jiffies;
+
+ if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV2_UNSOLICITED_REPORT_INTERVAL);
+ else /* v3 */
+ interval_ms = IN_DEV_CONF_GET(
+ in_dev,
+ IGMPV3_UNSOLICITED_REPORT_INTERVAL);
+
+ interval_jiffies = msecs_to_jiffies(interval_ms);
+
+ /* _timer functions can't handle a delay of 0 jiffies so ensure
+ * we always return a positive value.
+ */
+ if (interval_jiffies <= 0)
+ interval_jiffies = 1;
+ return interval_jiffies;
+}
+
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
static void igmpv3_clear_delrec(struct in_device *in_dev);
@@ -315,6 +340,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
if (size < 256)
return NULL;
}
+ skb->priority = TC_PRIO_CONTROL;
igmp_skb_size(skb) = size;
rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
@@ -670,6 +696,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
ip_rt_put(rt);
return -1;
}
+ skb->priority = TC_PRIO_CONTROL;
skb_dst_set(skb, &rt->dst);
@@ -719,7 +746,8 @@ static void igmp_ifc_timer_expire(unsigned long data)
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
in_dev->mr_ifc_count--;
- igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
+ igmp_ifc_start_timer(in_dev,
+ unsolicited_report_interval(in_dev));
}
__in_dev_put(in_dev);
}
@@ -744,7 +772,7 @@ static void igmp_timer_expire(unsigned long data)
if (im->unsolicit_count) {
im->unsolicit_count--;
- igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
+ igmp_start_timer(im, unsolicited_report_interval(in_dev));
}
im->reporter = 1;
spin_unlock(&im->lock);
@@ -1323,16 +1351,17 @@ out:
EXPORT_SYMBOL(ip_mc_inc_group);
/*
- * Resend IGMP JOIN report; used for bonding.
- * Called with rcu_read_lock()
+ * Resend IGMP JOIN report; used by netdev notifier.
*/
-void ip_mc_rejoin_groups(struct in_device *in_dev)
+static void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
- for_each_pmc_rcu(in_dev, im) {
+ ASSERT_RTNL();
+
+ for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
@@ -1349,7 +1378,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev)
}
#endif
}
-EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
@@ -2735,8 +2763,42 @@ static struct pernet_operations igmp_net_ops = {
.exit = igmp_net_exit,
};
+static int igmp_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct in_device *in_dev;
+
+ switch (event) {
+ case NETDEV_RESEND_IGMP:
+ in_dev = __in_dev_get_rtnl(dev);
+ if (in_dev)
+ ip_mc_rejoin_groups(in_dev);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block igmp_notifier = {
+ .notifier_call = igmp_netdev_event,
+};
+
int __init igmp_mc_proc_init(void)
{
- return register_pernet_subsys(&igmp_net_ops);
+ int err;
+
+ err = register_pernet_subsys(&igmp_net_ops);
+ if (err)
+ return err;
+ err = register_netdevice_notifier(&igmp_notifier);
+ if (err)
+ goto reg_notif_fail;
+ return 0;
+
+reg_notif_fail:
+ unregister_pernet_subsys(&igmp_net_ops);
+ return err;
}
#endif
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 1f6eab66f7c..d7aea4c5b94 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
if (daddr)
memcpy(&iph->daddr, daddr, 4);
if (iph->daddr)
- return t->hlen;
+ return t->hlen + sizeof(*iph);
return -(t->hlen + sizeof(*iph));
}
@@ -534,7 +534,7 @@ static int __net_init ipgre_init_net(struct net *net)
static void __net_exit ipgre_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipgre_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_link_ops);
}
static struct pernet_operations ipgre_net_ops = {
@@ -767,7 +767,7 @@ static int __net_init ipgre_tap_init_net(struct net *net)
static void __net_exit ipgre_tap_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, gre_tap_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipgre_tap_ops);
}
static struct pernet_operations ipgre_tap_net_ops = {
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 15e3e683ade..054a3e97d82 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -141,6 +141,7 @@
#include <net/icmp.h>
#include <net/raw.h>
#include <net/checksum.h>
+#include <net/inet_ecn.h>
#include <linux/netfilter_ipv4.h>
#include <net/xfrm.h>
#include <linux/mroute.h>
@@ -410,6 +411,13 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
if (iph->ihl < 5 || iph->version != 4)
goto inhdr_error;
+ BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
+ BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
+ BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
+ IP_ADD_STATS_BH(dev_net(dev),
+ IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
+
if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 4bcabf3ab4c..9ee17e3d11c 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -211,14 +211,6 @@ static inline int ip_finish_output2(struct sk_buff *skb)
return -EINVAL;
}
-static inline int ip_skb_dst_mtu(struct sk_buff *skb)
-{
- struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
-
- return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
- skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
-}
-
static int ip_finish_output(struct sk_buff *skb)
{
#if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index ca1cb2d5f6e..ac9fabe0300 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -350,7 +350,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
struct flowi4 fl4;
struct rtable *rt;
- rt = ip_route_output_tunnel(dev_net(dev), &fl4,
+ rt = ip_route_output_tunnel(tunnel->net, &fl4,
tunnel->parms.iph.protocol,
iph->daddr, iph->saddr,
tunnel->parms.o_key,
@@ -365,7 +365,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
}
if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
+ tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev) {
hlen = tdev->hard_header_len + tdev->needed_headroom;
@@ -454,15 +454,15 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
tstats->rx_bytes += skb->len;
u64_stats_update_end(&tstats->syncp);
- if (tunnel->net != dev_net(tunnel->dev))
- skb_scrub_packet(skb);
-
if (tunnel->dev->type == ARPHRD_ETHER) {
skb->protocol = eth_type_trans(skb, tunnel->dev);
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
} else {
skb->dev = tunnel->dev;
}
+
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
+
gro_cells_receive(&tunnel->gro_cells, skb);
return 0;
@@ -613,9 +613,6 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
goto tx_error;
}
- if (tunnel->net != dev_net(dev))
- skb_scrub_packet(skb);
-
if (tunnel->err_count > 0) {
if (time_before(jiffies,
tunnel->err_time + IPTUNNEL_ERR_TIMEO)) {
@@ -653,9 +650,9 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
- err = iptunnel_xmit(dev_net(dev), rt, skb,
- fl4.saddr, fl4.daddr, protocol,
- ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df);
+ err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, protocol,
+ ip_tunnel_ecn_encap(tos, inner_iph, skb), ttl, df,
+ !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return;
@@ -820,11 +817,10 @@ static void ip_tunnel_dev_free(struct net_device *dev)
void ip_tunnel_dellink(struct net_device *dev, struct list_head *head)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
struct ip_tunnel_net *itn;
- itn = net_generic(net, tunnel->ip_tnl_net_id);
+ itn = net_generic(tunnel->net, tunnel->ip_tnl_net_id);
if (itn->fb_tunnel_dev != dev) {
ip_tunnel_del(netdev_priv(dev));
@@ -838,56 +834,68 @@ int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
{
struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
struct ip_tunnel_parm parms;
+ unsigned int i;
- itn->tunnels = kzalloc(IP_TNL_HASH_SIZE * sizeof(struct hlist_head), GFP_KERNEL);
- if (!itn->tunnels)
- return -ENOMEM;
+ for (i = 0; i < IP_TNL_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&itn->tunnels[i]);
if (!ops) {
itn->fb_tunnel_dev = NULL;
return 0;
}
+
memset(&parms, 0, sizeof(parms));
if (devname)
strlcpy(parms.name, devname, IFNAMSIZ);
rtnl_lock();
itn->fb_tunnel_dev = __ip_tunnel_create(net, ops, &parms);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ if (!IS_ERR(itn->fb_tunnel_dev))
+ itn->fb_tunnel_dev->features |= NETIF_F_NETNS_LOCAL;
rtnl_unlock();
- if (IS_ERR(itn->fb_tunnel_dev)) {
- kfree(itn->tunnels);
- return PTR_ERR(itn->fb_tunnel_dev);
- }
- return 0;
+ return PTR_RET(itn->fb_tunnel_dev);
}
EXPORT_SYMBOL_GPL(ip_tunnel_init_net);
-static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head)
+static void ip_tunnel_destroy(struct ip_tunnel_net *itn, struct list_head *head,
+ struct rtnl_link_ops *ops)
{
+ struct net *net = dev_net(itn->fb_tunnel_dev);
+ struct net_device *dev, *aux;
int h;
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == ops)
+ unregister_netdevice_queue(dev, head);
+
for (h = 0; h < IP_TNL_HASH_SIZE; h++) {
struct ip_tunnel *t;
struct hlist_node *n;
struct hlist_head *thead = &itn->tunnels[h];
hlist_for_each_entry_safe(t, n, thead, hash_node)
- unregister_netdevice_queue(t->dev, head);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, head);
}
if (itn->fb_tunnel_dev)
unregister_netdevice_queue(itn->fb_tunnel_dev, head);
}
-void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn, struct rtnl_link_ops *ops)
{
LIST_HEAD(list);
rtnl_lock();
- ip_tunnel_destroy(itn, &list);
+ ip_tunnel_destroy(itn, &list, ops);
unregister_netdevice_many(&list);
rtnl_unlock();
- kfree(itn->tunnels);
}
EXPORT_SYMBOL_GPL(ip_tunnel_delete_net);
@@ -929,23 +937,21 @@ EXPORT_SYMBOL_GPL(ip_tunnel_newlink);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p)
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
+ struct ip_tunnel *t;
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn = net_generic(net, tunnel->ip_tnl_net_id);
if (dev == itn->fb_tunnel_dev)
return -EINVAL;
- nt = netdev_priv(dev);
-
t = ip_tunnel_find(itn, p, dev->type);
if (t) {
if (t->dev != dev)
return -EEXIST;
} else {
- t = nt;
+ t = tunnel;
if (dev->type != ARPHRD_ETHER) {
unsigned int nflags = 0;
@@ -984,6 +990,7 @@ int ip_tunnel_init(struct net_device *dev)
}
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
iph->version = 4;
iph->ihl = 5;
@@ -994,8 +1001,8 @@ EXPORT_SYMBOL_GPL(ip_tunnel_init);
void ip_tunnel_uninit(struct net_device *dev)
{
- struct net *net = dev_net(dev);
struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct net *net = tunnel->net;
struct ip_tunnel_net *itn;
itn = net_generic(net, tunnel->ip_tnl_net_id);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 7167b08977d..d6c856b17fd 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -46,19 +46,17 @@
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
-int iptunnel_xmit(struct net *net, struct rtable *rt,
- struct sk_buff *skb,
+int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb,
__be32 src, __be32 dst, __u8 proto,
- __u8 tos, __u8 ttl, __be16 df)
+ __u8 tos, __u8 ttl, __be16 df, bool xnet)
{
int pkt_len = skb->len;
struct iphdr *iph;
int err;
- nf_reset(skb);
- secpath_reset(skb);
+ skb_scrub_packet(skb, xnet);
+
skb->rxhash = 0;
- skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
@@ -76,9 +74,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt,
iph->daddr = dst;
iph->saddr = src;
iph->ttl = ttl;
- tunnel_ip_select_ident(skb,
- (const struct iphdr *)skb_inner_network_header(skb),
- &rt->dst);
+ __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1);
err = ip_local_out(skb);
if (unlikely(net_xmit_eval(err)))
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 17cc0ffa8c0..e805e7b3030 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -44,176 +44,10 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
-#define HASH_SIZE 16
-#define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
-
static struct rtnl_link_ops vti_link_ops __read_mostly;
static int vti_net_id __read_mostly;
-struct vti_net {
- struct ip_tunnel __rcu *tunnels_r_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_r[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_l[HASH_SIZE];
- struct ip_tunnel __rcu *tunnels_wc[1];
- struct ip_tunnel __rcu **tunnels[4];
-
- struct net_device *fb_tunnel_dev;
-};
-
-static int vti_fb_tunnel_init(struct net_device *dev);
static int vti_tunnel_init(struct net_device *dev);
-static void vti_tunnel_setup(struct net_device *dev);
-static void vti_dev_free(struct net_device *dev);
-static int vti_tunnel_bind_dev(struct net_device *dev);
-
-#define VTI_XMIT(stats1, stats2) do { \
- int err; \
- int pkt_len = skb->len; \
- err = dst_output(skb); \
- if (net_xmit_eval(err) == 0) { \
- u64_stats_update_begin(&(stats1)->syncp); \
- (stats1)->tx_bytes += pkt_len; \
- (stats1)->tx_packets++; \
- u64_stats_update_end(&(stats1)->syncp); \
- } else { \
- (stats2)->tx_errors++; \
- (stats2)->tx_aborted_errors++; \
- } \
-} while (0)
-
-
-static struct ip_tunnel *vti_tunnel_lookup(struct net *net,
- __be32 remote, __be32 local)
-{
- unsigned h0 = HASH(remote);
- unsigned h1 = HASH(local);
- struct ip_tunnel *t;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r_l[h0 ^ h1])
- if (local == t->parms.iph.saddr &&
- remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
- for_each_ip_tunnel_rcu(t, ipn->tunnels_r[h0])
- if (remote == t->parms.iph.daddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_l[h1])
- if (local == t->parms.iph.saddr && (t->dev->flags&IFF_UP))
- return t;
-
- for_each_ip_tunnel_rcu(t, ipn->tunnels_wc[0])
- if (t && (t->dev->flags&IFF_UP))
- return t;
- return NULL;
-}
-
-static struct ip_tunnel __rcu **__vti_bucket(struct vti_net *ipn,
- struct ip_tunnel_parm *parms)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- unsigned h = 0;
- int prio = 0;
-
- if (remote) {
- prio |= 2;
- h ^= HASH(remote);
- }
- if (local) {
- prio |= 1;
- h ^= HASH(local);
- }
- return &ipn->tunnels[prio][h];
-}
-
-static inline struct ip_tunnel __rcu **vti_bucket(struct vti_net *ipn,
- struct ip_tunnel *t)
-{
- return __vti_bucket(ipn, &t->parms);
-}
-
-static void vti_tunnel_unlink(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp;
- struct ip_tunnel *iter;
-
- for (tp = vti_bucket(ipn, t);
- (iter = rtnl_dereference(*tp)) != NULL;
- tp = &iter->next) {
- if (t == iter) {
- rcu_assign_pointer(*tp, t->next);
- break;
- }
- }
-}
-
-static void vti_tunnel_link(struct vti_net *ipn, struct ip_tunnel *t)
-{
- struct ip_tunnel __rcu **tp = vti_bucket(ipn, t);
-
- rcu_assign_pointer(t->next, rtnl_dereference(*tp));
- rcu_assign_pointer(*tp, t);
-}
-
-static struct ip_tunnel *vti_tunnel_locate(struct net *net,
- struct ip_tunnel_parm *parms,
- int create)
-{
- __be32 remote = parms->iph.daddr;
- __be32 local = parms->iph.saddr;
- struct ip_tunnel *t, *nt;
- struct ip_tunnel __rcu **tp;
- struct net_device *dev;
- char name[IFNAMSIZ];
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- for (tp = __vti_bucket(ipn, parms);
- (t = rtnl_dereference(*tp)) != NULL;
- tp = &t->next) {
- if (local == t->parms.iph.saddr && remote == t->parms.iph.daddr)
- return t;
- }
- if (!create)
- return NULL;
-
- if (parms->name[0])
- strlcpy(name, parms->name, IFNAMSIZ);
- else
- strcpy(name, "vti%d");
-
- dev = alloc_netdev(sizeof(*t), name, vti_tunnel_setup);
- if (dev == NULL)
- return NULL;
-
- dev_net_set(dev, net);
-
- nt = netdev_priv(dev);
- nt->parms = *parms;
- dev->rtnl_link_ops = &vti_link_ops;
-
- vti_tunnel_bind_dev(dev);
-
- if (register_netdevice(dev) < 0)
- goto failed_free;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
- return nt;
-
-failed_free:
- free_netdev(dev);
- return NULL;
-}
-
-static void vti_tunnel_uninit(struct net_device *dev)
-{
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- vti_tunnel_unlink(ipn, netdev_priv(dev));
- dev_put(dev);
-}
static int vti_err(struct sk_buff *skb, u32 info)
{
@@ -222,6 +56,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
* 8 bytes of packet payload. It means, that precise relaying of
* ICMP in the real Internet is absolutely infeasible.
*/
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
struct iphdr *iph = (struct iphdr *)skb->data;
const int type = icmp_hdr(skb)->type;
const int code = icmp_hdr(skb)->code;
@@ -252,7 +88,8 @@ static int vti_err(struct sk_buff *skb, u32 info)
err = -ENOENT;
- t = vti_tunnel_lookup(dev_net(skb->dev), iph->daddr, iph->saddr);
+ t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->daddr, iph->saddr, 0);
if (t == NULL)
goto out;
@@ -281,8 +118,11 @@ static int vti_rcv(struct sk_buff *skb)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
- tunnel = vti_tunnel_lookup(dev_net(skb->dev), iph->saddr, iph->daddr);
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
struct pcpu_tstats *tstats;
@@ -311,7 +151,6 @@ static int vti_rcv(struct sk_buff *skb)
static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct pcpu_tstats *tstats;
struct iphdr *tiph = &tunnel->parms.iph;
u8 tos;
struct rtable *rt; /* Route to the other host */
@@ -319,6 +158,7 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
struct iphdr *old_iph = ip_hdr(skb);
__be32 dst = tiph->daddr;
struct flowi4 fl4;
+ int err;
if (skb->protocol != htons(ETH_P_IP))
goto tx_error;
@@ -367,8 +207,10 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
nf_reset(skb);
skb->dev = skb_dst(skb)->dev;
- tstats = this_cpu_ptr(dev->tstats);
- VTI_XMIT(tstats, &dev->stats);
+ err = dst_output(skb);
+ if (net_xmit_eval(err) == 0)
+ err = skb->len;
+ iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error_icmp:
@@ -379,198 +221,57 @@ tx_error:
return NETDEV_TX_OK;
}
-static int vti_tunnel_bind_dev(struct net_device *dev)
-{
- struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
- struct iphdr *iph;
-
- tunnel = netdev_priv(dev);
- iph = &tunnel->parms.iph;
-
- if (iph->daddr) {
- struct rtable *rt;
- struct flowi4 fl4;
- memset(&fl4, 0, sizeof(fl4));
- flowi4_init_output(&fl4, tunnel->parms.link,
- be32_to_cpu(tunnel->parms.i_key),
- RT_TOS(iph->tos), RT_SCOPE_UNIVERSE,
- IPPROTO_IPIP, 0,
- iph->daddr, iph->saddr, 0, 0);
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- tdev = rt->dst.dev;
- ip_rt_put(rt);
- }
- dev->flags |= IFF_POINTOPOINT;
- }
-
- if (!tdev && tunnel->parms.link)
- tdev = __dev_get_by_index(dev_net(dev), tunnel->parms.link);
-
- if (tdev) {
- dev->hard_header_len = tdev->hard_header_len +
- sizeof(struct iphdr);
- dev->mtu = tdev->mtu;
- }
- dev->iflink = tunnel->parms.link;
- return dev->mtu;
-}
-
static int
vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
int err = 0;
struct ip_tunnel_parm p;
- struct ip_tunnel *t;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- switch (cmd) {
- case SIOCGETTUNNEL:
- t = NULL;
- if (dev == ipn->fb_tunnel_dev) {
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p))) {
- err = -EFAULT;
- break;
- }
- t = vti_tunnel_locate(net, &p, 0);
- }
- if (t == NULL)
- t = netdev_priv(dev);
- memcpy(&p, &t->parms, sizeof(p));
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
- err = -EFAULT;
- break;
-
- case SIOCADDTUNNEL:
- case SIOCCHGTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
- goto done;
+ if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
+ return -EFAULT;
- err = -EINVAL;
+ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
if (p.iph.version != 4 || p.iph.protocol != IPPROTO_IPIP ||
p.iph.ihl != 5)
- goto done;
-
- t = vti_tunnel_locate(net, &p, cmd == SIOCADDTUNNEL);
-
- if (dev != ipn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
- if (t != NULL) {
- if (t->dev != dev) {
- err = -EEXIST;
- break;
- }
- } else {
- if (((dev->flags&IFF_POINTOPOINT) &&
- !p.iph.daddr) ||
- (!(dev->flags&IFF_POINTOPOINT) &&
- p.iph.daddr)) {
- err = -EINVAL;
- break;
- }
- t = netdev_priv(dev);
- vti_tunnel_unlink(ipn, t);
- synchronize_net();
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- t->parms.iph.protocol = IPPROTO_IPIP;
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
- }
-
- if (t) {
- err = 0;
- if (cmd == SIOCCHGTUNNEL) {
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- vti_tunnel_bind_dev(dev);
- netdev_state_change(dev);
- }
- }
- p.i_flags |= GRE_KEY | VTI_ISVTI;
- p.o_flags |= GRE_KEY;
- if (copy_to_user(ifr->ifr_ifru.ifru_data, &t->parms,
- sizeof(p)))
- err = -EFAULT;
- } else
- err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
- break;
+ return -EINVAL;
+ }
- case SIOCDELTUNNEL:
- err = -EPERM;
- if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
- goto done;
-
- if (dev == ipn->fb_tunnel_dev) {
- err = -EFAULT;
- if (copy_from_user(&p, ifr->ifr_ifru.ifru_data,
- sizeof(p)))
- goto done;
- err = -ENOENT;
-
- t = vti_tunnel_locate(net, &p, 0);
- if (t == NULL)
- goto done;
- err = -EPERM;
- if (t->dev == ipn->fb_tunnel_dev)
- goto done;
- dev = t->dev;
- }
- unregister_netdevice(dev);
- err = 0;
- break;
+ err = ip_tunnel_ioctl(dev, &p, cmd);
+ if (err)
+ return err;
- default:
- err = -EINVAL;
+ if (cmd != SIOCDELTUNNEL) {
+ p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.o_flags |= GRE_KEY;
}
-done:
- return err;
-}
-
-static int vti_tunnel_change_mtu(struct net_device *dev, int new_mtu)
-{
- if (new_mtu < 68 || new_mtu > 0xFFF8)
- return -EINVAL;
- dev->mtu = new_mtu;
+ if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
+ return -EFAULT;
return 0;
}
static const struct net_device_ops vti_netdev_ops = {
.ndo_init = vti_tunnel_init,
- .ndo_uninit = vti_tunnel_uninit,
+ .ndo_uninit = ip_tunnel_uninit,
.ndo_start_xmit = vti_tunnel_xmit,
.ndo_do_ioctl = vti_tunnel_ioctl,
- .ndo_change_mtu = vti_tunnel_change_mtu,
+ .ndo_change_mtu = ip_tunnel_change_mtu,
.ndo_get_stats64 = ip_tunnel_get_stats64,
};
-static void vti_dev_free(struct net_device *dev)
+static void vti_tunnel_setup(struct net_device *dev)
{
- free_percpu(dev->tstats);
- free_netdev(dev);
+ dev->netdev_ops = &vti_netdev_ops;
+ ip_tunnel_setup(dev, vti_net_id);
}
-static void vti_tunnel_setup(struct net_device *dev)
+static int vti_tunnel_init(struct net_device *dev)
{
- dev->netdev_ops = &vti_netdev_ops;
- dev->destructor = vti_dev_free;
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct iphdr *iph = &tunnel->parms.iph;
+
+ memcpy(dev->dev_addr, &iph->saddr, 4);
+ memcpy(dev->broadcast, &iph->daddr, 4);
dev->type = ARPHRD_TUNNEL;
dev->hard_header_len = LL_MAX_HEADER + sizeof(struct iphdr);
@@ -581,38 +282,18 @@ static void vti_tunnel_setup(struct net_device *dev)
dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
-}
-static int vti_tunnel_init(struct net_device *dev)
-{
- struct ip_tunnel *tunnel = netdev_priv(dev);
-
- tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
-
- memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
- memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
-
- dev->tstats = alloc_percpu(struct pcpu_tstats);
- if (!dev->tstats)
- return -ENOMEM;
-
- return 0;
+ return ip_tunnel_init(dev);
}
-static int __net_init vti_fb_tunnel_init(struct net_device *dev)
+static void __net_init vti_fb_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
iph->version = 4;
iph->protocol = IPPROTO_IPIP;
iph->ihl = 5;
-
- dev_hold(dev);
- rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
- return 0;
}
static struct xfrm_tunnel vti_handler __read_mostly = {
@@ -621,76 +302,30 @@ static struct xfrm_tunnel vti_handler __read_mostly = {
.priority = 1,
};
-static void vti_destroy_tunnels(struct vti_net *ipn, struct list_head *head)
-{
- int prio;
-
- for (prio = 1; prio < 4; prio++) {
- int h;
- for (h = 0; h < HASH_SIZE; h++) {
- struct ip_tunnel *t;
-
- t = rtnl_dereference(ipn->tunnels[prio][h]);
- while (t != NULL) {
- unregister_netdevice_queue(t->dev, head);
- t = rtnl_dereference(t->next);
- }
- }
- }
-}
-
static int __net_init vti_init_net(struct net *net)
{
int err;
- struct vti_net *ipn = net_generic(net, vti_net_id);
-
- ipn->tunnels[0] = ipn->tunnels_wc;
- ipn->tunnels[1] = ipn->tunnels_l;
- ipn->tunnels[2] = ipn->tunnels_r;
- ipn->tunnels[3] = ipn->tunnels_r_l;
-
- ipn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel),
- "ip_vti0",
- vti_tunnel_setup);
- if (!ipn->fb_tunnel_dev) {
- err = -ENOMEM;
- goto err_alloc_dev;
- }
- dev_net_set(ipn->fb_tunnel_dev, net);
-
- err = vti_fb_tunnel_init(ipn->fb_tunnel_dev);
- if (err)
- goto err_reg_dev;
- ipn->fb_tunnel_dev->rtnl_link_ops = &vti_link_ops;
+ struct ip_tunnel_net *itn;
- err = register_netdev(ipn->fb_tunnel_dev);
+ err = ip_tunnel_init_net(net, vti_net_id, &vti_link_ops, "ip_vti0");
if (err)
- goto err_reg_dev;
+ return err;
+ itn = net_generic(net, vti_net_id);
+ vti_fb_tunnel_init(itn->fb_tunnel_dev);
return 0;
-
-err_reg_dev:
- vti_dev_free(ipn->fb_tunnel_dev);
-err_alloc_dev:
- /* nothing */
- return err;
}
static void __net_exit vti_exit_net(struct net *net)
{
- struct vti_net *ipn = net_generic(net, vti_net_id);
- LIST_HEAD(list);
-
- rtnl_lock();
- vti_destroy_tunnels(ipn, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+ ip_tunnel_delete_net(itn, &vti_link_ops);
}
static struct pernet_operations vti_net_ops = {
.init = vti_init_net,
.exit = vti_exit_net,
.id = &vti_net_id,
- .size = sizeof(struct vti_net),
+ .size = sizeof(struct ip_tunnel_net),
};
static int vti_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -728,78 +363,19 @@ static void vti_netlink_parms(struct nlattr *data[],
static int vti_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- struct ip_tunnel *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
- int mtu;
- int err;
-
- nt = netdev_priv(dev);
- vti_netlink_parms(data, &nt->parms);
-
- if (vti_tunnel_locate(net, &nt->parms, 0))
- return -EEXIST;
+ struct ip_tunnel_parm parms;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
-
- err = register_netdevice(dev);
- if (err)
- goto out;
-
- dev_hold(dev);
- vti_tunnel_link(ipn, nt);
-
-out:
- return err;
+ vti_netlink_parms(data, &parms);
+ return ip_tunnel_newlink(dev, tb, &parms);
}
static int vti_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip_tunnel *t, *nt;
- struct net *net = dev_net(dev);
- struct vti_net *ipn = net_generic(net, vti_net_id);
struct ip_tunnel_parm p;
- int mtu;
-
- if (dev == ipn->fb_tunnel_dev)
- return -EINVAL;
- nt = netdev_priv(dev);
vti_netlink_parms(data, &p);
-
- t = vti_tunnel_locate(net, &p, 0);
-
- if (t) {
- if (t->dev != dev)
- return -EEXIST;
- } else {
- t = nt;
-
- vti_tunnel_unlink(ipn, t);
- t->parms.iph.saddr = p.iph.saddr;
- t->parms.iph.daddr = p.iph.daddr;
- t->parms.i_key = p.i_key;
- t->parms.o_key = p.o_key;
- if (dev->type != ARPHRD_ETHER) {
- memcpy(dev->dev_addr, &p.iph.saddr, 4);
- memcpy(dev->broadcast, &p.iph.daddr, 4);
- }
- vti_tunnel_link(ipn, t);
- netdev_state_change(dev);
- }
-
- if (t->parms.link != p.link) {
- t->parms.link = p.link;
- mtu = vti_tunnel_bind_dev(dev);
- if (!tb[IFLA_MTU])
- dev->mtu = mtu;
- netdev_state_change(dev);
- }
-
- return 0;
+ return ip_tunnel_changelink(dev, tb, &p);
}
static size_t vti_get_size(const struct net_device *dev)
@@ -865,7 +441,7 @@ static int __init vti_init(void)
err = xfrm4_mode_tunnel_input_register(&vti_handler);
if (err < 0) {
unregister_pernet_device(&vti_net_ops);
- pr_info(KERN_INFO "vti init: can't register tunnel\n");
+ pr_info("vti init: can't register tunnel\n");
}
err = rtnl_link_register(&vti_link_ops);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 51fc2a1dcdd..7f80fb4b82d 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -190,15 +190,14 @@ static int ipip_rcv(struct sk_buff *skb)
struct ip_tunnel *tunnel;
const struct iphdr *iph;
- if (iptunnel_pull_header(skb, 0, tpi.proto))
- goto drop;
-
iph = ip_hdr(skb);
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
if (tunnel) {
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi.proto))
+ goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
}
@@ -286,7 +285,6 @@ static void ipip_tunnel_setup(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -437,7 +435,7 @@ static int __net_init ipip_init_net(struct net *net)
static void __net_exit ipip_exit_net(struct net *net)
{
struct ip_tunnel_net *itn = net_generic(net, ipip_net_id);
- ip_tunnel_delete_net(itn);
+ ip_tunnel_delete_net(itn, &ipip_link_ops);
}
static struct pernet_operations ipip_net_ops = {
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 132a0966470..9ae54b09254 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -127,9 +127,9 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local);
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local);
static int ipmr_cache_report(struct mr_table *mrt,
struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
@@ -1795,9 +1795,9 @@ static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
/* "local" means that we should preserve one skb (for local delivery) */
-static int ip_mr_forward(struct net *net, struct mr_table *mrt,
- struct sk_buff *skb, struct mfc_cache *cache,
- int local)
+static void ip_mr_forward(struct net *net, struct mr_table *mrt,
+ struct sk_buff *skb, struct mfc_cache *cache,
+ int local)
{
int psend = -1;
int vif, ct;
@@ -1903,14 +1903,13 @@ last_forward:
ipmr_queue_xmit(net, mrt, skb2, cache, psend);
} else {
ipmr_queue_xmit(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
}
dont_forward:
if (!local)
kfree_skb(skb);
- return 0;
}
static struct mr_table *ipmr_rt_fib_lookup(struct net *net, struct sk_buff *skb)
@@ -2068,9 +2067,8 @@ static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IP);
skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, reg_dev);
+ skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 4e902801742..1657e39b291 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -110,6 +110,19 @@ config IP_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_SYNPROXY
+ tristate "SYNPROXY target support"
+ depends on NF_CONNTRACK && NETFILTER_ADVANCED
+ select NETFILTER_SYNPROXY
+ select SYN_COOKIES
+ help
+ The SYNPROXY target allows you to intercept TCP connections and
+ establish them using syncookies before they are passed on to the
+ server. This allows to avoid conntrack and server resource usage
+ during SYN-flood attacks.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_NF_TARGET_ULOG
tristate "ULOG target support (obsolete)"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 007b128eecc..3622b248b6d 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -46,6 +46,7 @@ obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
+obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
# generic ARP tables
diff --git a/net/ipv4/netfilter/arptable_filter.c b/net/ipv4/netfilter/arptable_filter.c
index eadab1ed650..a865f6f9401 100644
--- a/net/ipv4/netfilter/arptable_filter.c
+++ b/net/ipv4/netfilter/arptable_filter.c
@@ -48,7 +48,7 @@ static int __net_init arptable_filter_net_init(struct net *net)
net->ipv4.arptable_filter =
arpt_register_table(net, &packet_filter, repl);
kfree(repl);
- return PTR_RET(net->ipv4.arptable_filter);
+ return PTR_ERR_OR_ZERO(net->ipv4.arptable_filter);
}
static void __net_exit arptable_filter_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index 30e4de94056..00352ce0f0d 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -118,7 +118,7 @@ static int masq_device_event(struct notifier_block *this,
NF_CT_ASSERT(dev->ifindex != 0);
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
}
return NOTIFY_DONE;
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 04b18c1ac34..b969131ad1c 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -119,7 +119,26 @@ static void send_reset(struct sk_buff *oldskb, int hook)
nf_ct_attach(nskb, oldskb);
- ip_local_out(nskb);
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ niph->tot_len = htons(nskb->len);
+ ip_send_check(niph);
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ goto free_nskb;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip_local_out(nskb);
+
return;
free_nskb:
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
new file mode 100644
index 00000000000..67e17dcda65
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+static struct iphdr *
+synproxy_build_ip(struct sk_buff *skb, u32 saddr, u32 daddr)
+{
+ struct iphdr *iph;
+
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *)skb_put(skb, sizeof(*iph));
+ iph->version = 4;
+ iph->ihl = sizeof(*iph) / 4;
+ iph->tos = 0;
+ iph->id = 0;
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = sysctl_ip_default_ttl;
+ iph->protocol = IPPROTO_TCP;
+ iph->check = 0;
+ iph->saddr = saddr;
+ iph->daddr = daddr;
+
+ return iph;
+}
+
+static void
+synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+ struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
+ struct iphdr *niph, struct tcphdr *nth,
+ unsigned int tcp_hdr_size)
+{
+ nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)nth - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ skb_dst_set_noref(nskb, skb_dst(skb));
+ nskb->protocol = htons(ETH_P_IP);
+ if (ip_route_me_harder(nskb, RTN_UNSPEC))
+ goto free_nskb;
+
+ if (nfct) {
+ nskb->nfct = nfct;
+ nskb->nfctinfo = ctinfo;
+ nf_conntrack_get(nfct);
+ }
+
+ ip_local_out(nskb);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+}
+
+static void
+synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+ u16 mss = opts->mss;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = 0;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_syn(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts, u32 recv_seq)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(recv_seq - 1);
+ /* ack_seq is used to relay our ISN to the synproxy hook to initialize
+ * sequence number translation once a connection tracking entry exists.
+ */
+ nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = th->window;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_ack(const struct synproxy_net *snet,
+ const struct ip_ct_tcp *state,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->daddr, iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(ntohl(th->ack_seq));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct iphdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ip_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, iph->saddr, iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(ntohl(th->seq) + 1);
+ nth->ack_seq = th->ack_seq;
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static bool
+synproxy_recv_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq)
+{
+ int mss;
+
+ mss = __cookie_v4_check(ip_hdr(skb), th, ntohl(th->ack_seq) - 1);
+ if (mss == 0) {
+ this_cpu_inc(snet->stats->cookie_invalid);
+ return false;
+ }
+
+ this_cpu_inc(snet->stats->cookie_valid);
+ opts->mss = mss;
+
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_check_timestamp_cookie(opts);
+
+ synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+ return true;
+}
+
+static unsigned int
+synproxy_tg4(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_synproxy_info *info = par->targinfo;
+ struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+ struct synproxy_options opts = {};
+ struct tcphdr *th, _th;
+
+ if (nf_ip_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP))
+ return NF_DROP;
+
+ th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ synproxy_parse_options(skb, par->thoff, th, &opts);
+
+ if (th->syn && !(th->ack || th->fin || th->rst)) {
+ /* Initial SYN from client */
+ this_cpu_inc(snet->stats->syn_received);
+
+ if (th->ece && th->cwr)
+ opts.options |= XT_SYNPROXY_OPT_ECN;
+
+ opts.options &= info->options;
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_init_timestamp_cookie(info, &opts);
+ else
+ opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM |
+ XT_SYNPROXY_OPT_ECN);
+
+ synproxy_send_client_synack(skb, th, &opts);
+ return NF_DROP;
+
+ } else if (th->ack && !(th->fin || th->rst || th->syn)) {
+ /* ACK from client */
+ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+ return NF_DROP;
+ }
+
+ return XT_CONTINUE;
+}
+
+static unsigned int ipv4_synproxy_hook(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ struct nf_conn_synproxy *synproxy;
+ struct synproxy_options opts = {};
+ const struct ip_ct_tcp *state;
+ struct tcphdr *th, _th;
+ unsigned int thoff;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return NF_ACCEPT;
+
+ synproxy = nfct_synproxy(ct);
+ if (synproxy == NULL)
+ return NF_ACCEPT;
+
+ if (nf_is_loopback_packet(skb))
+ return NF_ACCEPT;
+
+ thoff = ip_hdrlen(skb);
+ th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ state = &ct->proto.tcp;
+ switch (state->state) {
+ case TCP_CONNTRACK_CLOSE:
+ if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
+ ntohl(th->seq) + 1);
+ break;
+ }
+
+ if (!th->syn || th->ack ||
+ CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ break;
+
+ /* Reopened connection - reset the sequence number and timestamp
+ * adjustments, they will get initialized once the connection is
+ * reestablished.
+ */
+ nf_ct_seqadj_init(ct, ctinfo, 0);
+ synproxy->tsoff = 0;
+ this_cpu_inc(snet->stats->conn_reopened);
+
+ /* fall through */
+ case TCP_CONNTRACK_SYN_SENT:
+ synproxy_parse_options(skb, thoff, th, &opts);
+
+ if (!th->syn && th->ack &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
+ * therefore we need to add 1 to make the SYN sequence
+ * number match the one of first SYN.
+ */
+ if (synproxy_recv_client_ack(snet, skb, th, &opts,
+ ntohl(th->seq) + 1))
+ this_cpu_inc(snet->stats->cookie_retrans);
+
+ return NF_DROP;
+ }
+
+ synproxy->isn = ntohl(th->ack_seq);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->its = opts.tsecr;
+ break;
+ case TCP_CONNTRACK_SYN_RECV:
+ if (!th->syn || !th->ack)
+ break;
+
+ synproxy_parse_options(skb, thoff, th, &opts);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->tsoff = opts.tsval - synproxy->its;
+
+ opts.options &= ~(XT_SYNPROXY_OPT_MSS |
+ XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM);
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_server_ack(snet, state, skb, th, &opts);
+
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_client_ack(snet, skb, th, &opts);
+
+ consume_skb(skb);
+ return NF_STOLEN;
+ default:
+ break;
+ }
+
+ synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
+ return NF_ACCEPT;
+}
+
+static int synproxy_tg4_check(const struct xt_tgchk_param *par)
+{
+ const struct ipt_entry *e = par->entryinfo;
+
+ if (e->ip.proto != IPPROTO_TCP ||
+ e->ip.invflags & XT_INV_PROTO)
+ return -EINVAL;
+
+ return nf_ct_l3proto_try_module_get(par->family);
+}
+
+static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
+static struct xt_target synproxy_tg4_reg __read_mostly = {
+ .name = "SYNPROXY",
+ .family = NFPROTO_IPV4,
+ .target = synproxy_tg4,
+ .targetsize = sizeof(struct xt_synproxy_info),
+ .checkentry = synproxy_tg4_check,
+ .destroy = synproxy_tg4_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct nf_hook_ops ipv4_synproxy_ops[] __read_mostly = {
+ {
+ .hook = ipv4_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+ {
+ .hook = ipv4_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV4,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+};
+
+static int __init synproxy_tg4_init(void)
+{
+ int err;
+
+ err = nf_register_hooks(ipv4_synproxy_ops,
+ ARRAY_SIZE(ipv4_synproxy_ops));
+ if (err < 0)
+ goto err1;
+
+ err = xt_register_target(&synproxy_tg4_reg);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops));
+err1:
+ return err;
+}
+
+static void __exit synproxy_tg4_exit(void)
+{
+ xt_unregister_target(&synproxy_tg4_reg);
+ nf_unregister_hooks(ipv4_synproxy_ops, ARRAY_SIZE(ipv4_synproxy_ops));
+}
+
+module_init(synproxy_tg4_init);
+module_exit(synproxy_tg4_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv4/netfilter/iptable_filter.c b/net/ipv4/netfilter/iptable_filter.c
index 6b3da5cf54e..50af5b45c05 100644
--- a/net/ipv4/netfilter/iptable_filter.c
+++ b/net/ipv4/netfilter/iptable_filter.c
@@ -69,7 +69,7 @@ static int __net_init iptable_filter_net_init(struct net *net)
net->ipv4.iptable_filter =
ipt_register_table(net, &packet_filter, repl);
kfree(repl);
- return PTR_RET(net->ipv4.iptable_filter);
+ return PTR_ERR_OR_ZERO(net->ipv4.iptable_filter);
}
static void __net_exit iptable_filter_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/iptable_mangle.c b/net/ipv4/netfilter/iptable_mangle.c
index cba5658ec82..0d8cd82e0fa 100644
--- a/net/ipv4/netfilter/iptable_mangle.c
+++ b/net/ipv4/netfilter/iptable_mangle.c
@@ -107,7 +107,7 @@ static int __net_init iptable_mangle_net_init(struct net *net)
net->ipv4.iptable_mangle =
ipt_register_table(net, &packet_mangler, repl);
kfree(repl);
- return PTR_RET(net->ipv4.iptable_mangle);
+ return PTR_ERR_OR_ZERO(net->ipv4.iptable_mangle);
}
static void __net_exit iptable_mangle_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/iptable_nat.c b/net/ipv4/netfilter/iptable_nat.c
index 6383273d54e..683bfaffed6 100644
--- a/net/ipv4/netfilter/iptable_nat.c
+++ b/net/ipv4/netfilter/iptable_nat.c
@@ -292,7 +292,7 @@ static int __net_init iptable_nat_net_init(struct net *net)
return -ENOMEM;
net->ipv4.nat_table = ipt_register_table(net, &nf_nat_ipv4_table, repl);
kfree(repl);
- return PTR_RET(net->ipv4.nat_table);
+ return PTR_ERR_OR_ZERO(net->ipv4.nat_table);
}
static void __net_exit iptable_nat_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c
index 03d9696d3c6..1f82aea11df 100644
--- a/net/ipv4/netfilter/iptable_raw.c
+++ b/net/ipv4/netfilter/iptable_raw.c
@@ -48,7 +48,7 @@ static int __net_init iptable_raw_net_init(struct net *net)
net->ipv4.iptable_raw =
ipt_register_table(net, &packet_raw, repl);
kfree(repl);
- return PTR_RET(net->ipv4.iptable_raw);
+ return PTR_ERR_OR_ZERO(net->ipv4.iptable_raw);
}
static void __net_exit iptable_raw_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/iptable_security.c b/net/ipv4/netfilter/iptable_security.c
index b283d8e2601..f867a8d38bf 100644
--- a/net/ipv4/netfilter/iptable_security.c
+++ b/net/ipv4/netfilter/iptable_security.c
@@ -66,7 +66,7 @@ static int __net_init iptable_security_net_init(struct net *net)
net->ipv4.iptable_security =
ipt_register_table(net, &security_table, repl);
kfree(repl);
- return PTR_RET(net->ipv4.iptable_security);
+ return PTR_ERR_OR_ZERO(net->ipv4.iptable_security);
}
static void __net_exit iptable_security_net_exit(struct net *net)
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 0a2e0e3e95b..86f5b34a4ed 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
@@ -136,11 +137,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
- typeof(nf_nat_seq_adjust_hook) seq_adjust;
-
- seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
- if (!seq_adjust ||
- !seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 746427c9e71..d7d9882d4ca 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -1082,7 +1082,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 6577a1149a4..4a0335854b8 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -111,7 +111,7 @@ static const struct snmp_mib snmp4_ipstats_list[] = {
SNMP_MIB_SENTINEL
};
-/* Following RFC4293 items are displayed in /proc/net/netstat */
+/* Following items are displayed in /proc/net/netstat */
static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("InNoRoutes", IPSTATS_MIB_INNOROUTES),
SNMP_MIB_ITEM("InTruncatedPkts", IPSTATS_MIB_INTRUNCATEDPKTS),
@@ -125,7 +125,12 @@ static const struct snmp_mib snmp4_ipextstats_list[] = {
SNMP_MIB_ITEM("OutMcastOctets", IPSTATS_MIB_OUTMCASTOCTETS),
SNMP_MIB_ITEM("InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
+ /* Non RFC4293 fields */
SNMP_MIB_ITEM("InCsumErrors", IPSTATS_MIB_CSUMERRORS),
+ SNMP_MIB_ITEM("InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
@@ -273,7 +278,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD),
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
- SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS),
+ SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index dd44e0ab600..a86c7ae7188 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -571,7 +571,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
- inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP,
+ inet_sk_flowi_flags(sk) | FLOWI_FLAG_CAN_SLEEP |
+ (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0);
if (!inet->hdrincl) {
@@ -987,7 +988,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
srcp = inet->inet_num;
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
i, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a9a54a23683..727f4365bcd 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -112,7 +112,8 @@
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
-#define IP_MAX_MTU 0xFFF0
+/* IPv4 datagram length is stored into 16bit field (tot_len) */
+#define IP_MAX_MTU 0xFFFF
#define RT_GC_TIMEOUT (300*HZ)
@@ -435,12 +436,12 @@ static inline int ip_rt_proc_init(void)
static inline bool rt_is_expired(const struct rtable *rth)
{
- return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
+ return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
void rt_cache_flush(struct net *net)
{
- rt_genid_bump(net);
+ rt_genid_bump_ipv4(net);
}
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
@@ -1227,10 +1228,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
mtu = 576;
}
- if (mtu > IP_MAX_MTU)
- mtu = IP_MAX_MTU;
-
- return mtu;
+ return min_t(unsigned int, mtu, IP_MAX_MTU);
}
static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
@@ -1458,7 +1456,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
#endif
rth->dst.output = ip_rt_bug;
- rth->rt_genid = rt_genid(dev_net(dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev));
rth->rt_flags = RTCF_MULTICAST;
rth->rt_type = RTN_MULTICAST;
rth->rt_is_input= 1;
@@ -1589,7 +1587,7 @@ static int __mkroute_input(struct sk_buff *skb,
goto cleanup;
}
- rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
+ rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
rth->rt_flags = flags;
rth->rt_type = res->type;
rth->rt_is_input = 1;
@@ -1760,7 +1758,7 @@ local_input:
rth->dst.tclassid = itag;
#endif
- rth->rt_genid = rt_genid(net);
+ rth->rt_genid = rt_genid_ipv4(net);
rth->rt_flags = flags|RTCF_LOCAL;
rth->rt_type = res.type;
rth->rt_is_input = 1;
@@ -1945,7 +1943,7 @@ add:
rth->dst.output = ip_output;
- rth->rt_genid = rt_genid(dev_net(dev_out));
+ rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
rth->rt_is_input = 0;
@@ -2227,7 +2225,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or
rt->rt_iif = ort->rt_iif;
rt->rt_pmtu = ort->rt_pmtu;
- rt->rt_genid = rt_genid(net);
+ rt->rt_genid = rt_genid_ipv4(net);
rt->rt_flags = ort->rt_flags;
rt->rt_type = ort->rt_type;
rt->rt_gateway = ort->rt_gateway;
@@ -2665,7 +2663,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
static __net_init int rt_genid_init(struct net *net)
{
- atomic_set(&net->rt_genid, 0);
+ atomic_set(&net->ipv4.rt_genid, 0);
atomic_set(&net->fnhe_genid, 0);
get_random_bytes(&net->ipv4.dev_addr_genid,
sizeof(net->ipv4.dev_addr_genid));
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index b05c96e7af8..14a15c49129 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -160,26 +160,33 @@ static __u16 const msstab[] = {
* Generate a syncookie. mssp points to the mss, which is returned
* rounded down to the value encoded in the cookie.
*/
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
+ u16 *mssp)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
- tcp_synq_overflow(sk);
-
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
return secure_tcp_syn_cookie(iph->saddr, iph->daddr,
th->source, th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
+EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
+
+__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+{
+ const struct iphdr *iph = ip_hdr(skb);
+ const struct tcphdr *th = tcp_hdr(skb);
+
+ tcp_synq_overflow(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+
+ return __cookie_v4_init_sequence(iph, th, mssp);
+}
/*
* This (misnamed) value is the age of syncookie which is permitted.
@@ -192,10 +199,9 @@ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
* Check if a ack sequence number is a valid syncookie.
* Return the decoded mss if it is, or 0 if not.
*/
-static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
+int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
+ u32 cookie)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr,
th->source, th->dest, seq,
@@ -204,6 +210,7 @@ static inline int cookie_check(struct sk_buff *skb, __u32 cookie)
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
+EXPORT_SYMBOL_GPL(__cookie_v4_check);
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
@@ -284,7 +291,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
- (mss = cookie_check(skb, cookie)) == 0) {
+ (mss = __cookie_v4_check(ip_hdr(skb), th, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index b2c123c44d6..540279f4c53 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -29,6 +29,7 @@
static int zero;
static int one = 1;
static int four = 4;
+static int gso_max_segs = GSO_MAX_SEGS;
static int tcp_retr1_max = 255;
static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
@@ -36,6 +37,8 @@ static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
+static int tcp_syn_retries_min = 1;
+static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
@@ -332,7 +335,9 @@ static struct ctl_table ipv4_table[] = {
.data = &sysctl_tcp_syn_retries,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_syn_retries_min,
+ .extra2 = &tcp_syn_retries_max
},
{
.procname = "tcp_synack_retries",
@@ -555,6 +560,13 @@ static struct ctl_table ipv4_table[] = {
.extra1 = &one,
},
{
+ .procname = "tcp_notsent_lowat",
+ .data = &sysctl_tcp_notsent_lowat,
+ .maxlen = sizeof(sysctl_tcp_notsent_lowat),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "tcp_rmem",
.data = &sysctl_tcp_rmem,
.maxlen = sizeof(sysctl_tcp_rmem),
@@ -750,6 +762,15 @@ static struct ctl_table ipv4_table[] = {
.extra2 = &four,
},
{
+ .procname = "tcp_min_tso_segs",
+ .data = &sysctl_tcp_min_tso_segs,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &gso_max_segs,
+ },
+ {
.procname = "udp_mem",
.data = &sysctl_udp_mem,
.maxlen = sizeof(sysctl_udp_mem),
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5423223e93c..6e5617b9f9d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -283,6 +283,8 @@
int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
+int sysctl_tcp_min_tso_segs __read_mostly = 2;
+
struct percpu_counter tcp_orphan_count;
EXPORT_SYMBOL_GPL(tcp_orphan_count);
@@ -410,10 +412,6 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_sync_mss = tcp_sync_mss;
- /* Presumed zeroed, in order of appearance:
- * cookie_in_always, cookie_out_never,
- * s_data_constant, s_data_in, s_data_out
- */
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -499,7 +497,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
+ if (sk_stream_is_writeable(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
@@ -510,7 +508,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* wspace test but before the flags are set,
* IO signal will be lost.
*/
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
mask |= POLLOUT | POLLWRNORM;
}
} else
@@ -789,12 +787,28 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
xmit_size_goal = mss_now;
if (large_allowed && sk_can_gso(sk)) {
- xmit_size_goal = ((sk->sk_gso_max_size - 1) -
- inet_csk(sk)->icsk_af_ops->net_header_len -
- inet_csk(sk)->icsk_ext_hdr_len -
- tp->tcp_header_len);
+ u32 gso_size, hlen;
+
+ /* Maybe we should/could use sk->sk_prot->max_header here ? */
+ hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
+ inet_csk(sk)->icsk_ext_hdr_len +
+ tp->tcp_header_len;
+
+ /* Goal is to send at least one packet per ms,
+ * not one big TSO packet every 100 ms.
+ * This preserves ACK clocking and is consistent
+ * with tcp_tso_should_defer() heuristic.
+ */
+ gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
+ gso_size = max_t(u32, gso_size,
+ sysctl_tcp_min_tso_segs * mss_now);
+
+ xmit_size_goal = min_t(u32, gso_size,
+ sk->sk_gso_max_size - 1 - hlen);
- /* TSQ : try to have two TSO segments in flight */
+ /* TSQ : try to have at least two segments in flight
+ * (one in NIC TX ring, another in Qdisc)
+ */
xmit_size_goal = min_t(u32, xmit_size_goal,
sysctl_tcp_limit_output_bytes >> 1);
@@ -1121,6 +1135,13 @@ new_segment:
goto wait_for_memory;
/*
+ * All packets are restored as if they have
+ * already been sent.
+ */
+ if (tp->repair)
+ TCP_SKB_CB(skb)->when = tcp_time_stamp;
+
+ /*
* Check whether we can use HW checksum.
*/
if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
@@ -2447,10 +2468,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_THIN_DUPACK:
if (val < 0 || val > 1)
err = -EINVAL;
- else
+ else {
tp->thin_dupack = val;
if (tp->thin_dupack)
tcp_disable_early_retrans(tp);
+ }
break;
case TCP_REPAIR:
@@ -2631,6 +2653,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
else
tp->tsoffset = val - tcp_time_stamp;
break;
+ case TCP_NOTSENT_LOWAT:
+ tp->notsent_lowat = val;
+ sk->sk_write_space(sk);
+ break;
default:
err = -ENOPROTOOPT;
break;
@@ -2847,6 +2873,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset;
break;
+ case TCP_NOTSENT_LOWAT:
+ val = tp->notsent_lowat;
+ break;
default:
return -ENOPROTOOPT;
}
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index a9077f441cb..b6ae92a51f5 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a)
*/
static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{
- u64 offs;
- u32 delta, t, bic_target, max_cnt;
+ u32 delta, bic_target, max_cnt;
+ u64 offs, t;
ca->ack_cnt++; /* count the number of ACKs */
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
* if the cwnd < 1 million packets !!!
*/
+ t = (s32)(tcp_time_stamp - ca->epoch_start);
+ t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */
- t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3)
- - ca->epoch_start) << BICTCP_HZ) / HZ;
+ t <<= BICTCP_HZ;
+ do_div(t, HZ);
if (t < ca->bic_K) /* t - K */
offs = ca->bic_K - t;
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
return;
/* Discard delay samples right after fast recovery */
- if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
+ if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
return;
delay = (rtt_us << 3) / USEC_PER_MSEC;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 8f7ef0ad80e..ab7bd35bb31 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -58,23 +58,22 @@ error: kfree(ctx);
return err;
}
-/* Computes the fastopen cookie for the peer.
- * The peer address is a 128 bits long (pad with zeros for IPv4).
+/* Computes the fastopen cookie for the IP path.
+ * The path is a 128 bits long (pad with zeros for IPv4).
*
* The caller must check foc->len to determine if a valid cookie
* has been generated successfully.
*/
-void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc)
+void tcp_fastopen_cookie_gen(__be32 src, __be32 dst,
+ struct tcp_fastopen_cookie *foc)
{
- __be32 peer_addr[4] = { addr, 0, 0, 0 };
+ __be32 path[4] = { src, dst, 0, 0 };
struct tcp_fastopen_context *ctx;
rcu_read_lock();
ctx = rcu_dereference(tcp_fastopen_ctx);
if (ctx) {
- crypto_cipher_encrypt_one(ctx->tfm,
- foc->val,
- (__u8 *)peer_addr);
+ crypto_cipher_encrypt_one(ctx->tfm, foc->val, (__u8 *)path);
foc->len = TCP_FASTOPEN_COOKIE_SIZE;
}
rcu_read_unlock();
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 28af45abe06..1969e16d936 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -688,6 +688,34 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
}
}
+/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
+ * Note: TCP stack does not yet implement pacing.
+ * FQ packet scheduler can be used to implement cheap but effective
+ * TCP pacing, to smooth the burst on large writes when packets
+ * in flight is significantly lower than cwnd (or rwin)
+ */
+static void tcp_update_pacing_rate(struct sock *sk)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u64 rate;
+
+ /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
+ rate = (u64)tp->mss_cache * 2 * (HZ << 3);
+
+ rate *= max(tp->snd_cwnd, tp->packets_out);
+
+ /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
+ * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+ * We probably need usec resolution in the future.
+ * Note: This also takes care of possible srtt=0 case,
+ * when tcp_rtt_estimator() was not yet called.
+ */
+ if (tp->srtt > 8 + 2)
+ do_div(rate, tp->srtt);
+
+ sk->sk_pacing_rate = min_t(u64, rate, ~0U);
+}
+
/* Calculate rto without backoff. This is the second half of Van Jacobson's
* routine referred to above.
*/
@@ -1048,6 +1076,7 @@ struct tcp_sacktag_state {
int reord;
int fack_count;
int flag;
+ s32 rtt; /* RTT measured by SACKing never-retransmitted data */
};
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1108,7 +1137,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- bool dup_sack, int pcount)
+ int dup_sack, int pcount, u32 xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1148,6 +1177,9 @@ static u8 tcp_sacktag_one(struct sock *sk,
state->reord);
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
+ /* Pick the earliest sequence sacked for RTT */
+ if (state->rtt < 0)
+ state->rtt = tcp_time_stamp - xmit_time;
}
if (sacked & TCPCB_LOST) {
@@ -1205,7 +1237,8 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
* tcp_highest_sack_seq() when skb is highest_sack.
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
- start_seq, end_seq, dup_sack, pcount);
+ start_seq, end_seq, dup_sack, pcount,
+ TCP_SKB_CB(skb)->when);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1479,7 +1512,8 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
- tcp_skb_pcount(skb));
+ tcp_skb_pcount(skb),
+ TCP_SKB_CB(skb)->when);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1536,7 +1570,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 *sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1554,6 +1588,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
state.flag = 0;
state.reord = tp->packets_out;
+ state.rtt = -1;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
@@ -1737,6 +1772,7 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
+ *sack_rtt = state.rtt;
return state.flag;
}
@@ -1869,8 +1905,13 @@ void tcp_enter_loss(struct sock *sk, int how)
}
tcp_verify_left_out(tp);
- tp->reordering = min_t(unsigned int, tp->reordering,
- sysctl_tcp_reordering);
+ /* Timeout in disordered state after receiving substantial DUPACKs
+ * suggests that the degree of reordering is over-estimated.
+ */
+ if (icsk->icsk_ca_state <= TCP_CA_Disorder &&
+ tp->sacked_out >= sysctl_tcp_reordering)
+ tp->reordering = min_t(unsigned int, tp->reordering,
+ sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
@@ -2472,8 +2513,6 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
tcp_try_keep_open(sk);
- if (inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
- tcp_moderate_cwnd(tp);
} else {
tcp_cwnd_reduction(sk, prior_unsacked, 0);
}
@@ -2792,65 +2831,51 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
tcp_xmit_retransmit_queue(sk);
}
-void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt)
+static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
+ s32 seq_rtt, s32 sack_rtt)
{
- tcp_rtt_estimator(sk, seq_rtt);
- tcp_set_rto(sk);
- inet_csk(sk)->icsk_backoff = 0;
-}
-EXPORT_SYMBOL(tcp_valid_rtt_meas);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Prefer RTT measured from ACK's timing to TS-ECR. This is because
+ * broken middle-boxes or peers may corrupt TS-ECR fields. But
+ * Karn's algorithm forbids taking RTT if some retransmitted data
+ * is acked (RFC6298).
+ */
+ if (flag & FLAG_RETRANS_DATA_ACKED)
+ seq_rtt = -1;
+
+ if (seq_rtt < 0)
+ seq_rtt = sack_rtt;
-/* Read draft-ietf-tcplw-high-performance before mucking
- * with this code. (Supersedes RFC1323)
- */
-static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
-{
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
* acknowledges some new data, i.e., only if it advances the
* left edge of the send window.
- *
* See draft-ietf-tcplw-high-performance-00, section 3.3.
- * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
- *
- * Changed: reset backoff as soon as we see the first valid sample.
- * If we do not, we get strongly overestimated rto. With timestamps
- * samples are accepted even from very old segments: f.e., when rtt=1
- * increases to 8, we retransmit 5 times and after 8 seconds delayed
- * answer arrives rto becomes 120 seconds! If at least one of segments
- * in window is lost... Voila. --ANK (010210)
*/
- struct tcp_sock *tp = tcp_sk(sk);
-
- tcp_valid_rtt_meas(sk, tcp_time_stamp - tp->rx_opt.rcv_tsecr);
-}
+ if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
+ seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
-static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
-{
- /* We don't have a timestamp. Can only use
- * packets that are not retransmitted to determine
- * rtt estimates. Also, we must not reset the
- * backoff for rto until we get a non-retransmitted
- * packet. This allows us to deal with a situation
- * where the network delay has increased suddenly.
- * I.e. Karn's algorithm. (SIGCOMM '87, p5.)
- */
+ if (seq_rtt < 0)
+ return false;
- if (flag & FLAG_RETRANS_DATA_ACKED)
- return;
+ tcp_rtt_estimator(sk, seq_rtt);
+ tcp_set_rto(sk);
- tcp_valid_rtt_meas(sk, seq_rtt);
+ /* RFC6298: only reset backoff on valid RTT measurement. */
+ inet_csk(sk)->icsk_backoff = 0;
+ return true;
}
-static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
- const s32 seq_rtt)
+/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
+static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
- if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
- tcp_ack_saw_tstamp(sk, flag);
- else if (seq_rtt >= 0)
- tcp_ack_no_tstamp(sk, seq_rtt, flag);
+ struct tcp_sock *tp = tcp_sk(sk);
+ s32 seq_rtt = -1;
+
+ if (tp->lsndtime && !tp->total_retrans)
+ seq_rtt = tcp_time_stamp - tp->lsndtime;
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -2939,7 +2964,7 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- u32 prior_snd_una)
+ u32 prior_snd_una, s32 sack_rtt)
{
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
@@ -2978,8 +3003,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (sacked & TCPCB_SACKED_RETRANS)
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
- ca_seq_rtt = -1;
- seq_rtt = -1;
} else {
ca_seq_rtt = now - scb->when;
last_ackt = skb->tstamp;
@@ -3031,6 +3054,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
+ if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) ||
+ (flag & FLAG_ACKED))
+ tcp_rearm_rto(sk);
+
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
= inet_csk(sk)->icsk_ca_ops;
@@ -3040,9 +3067,6 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tcp_mtup_probe_success(sk);
}
- tcp_ack_update_rtt(sk, flag, seq_rtt);
- tcp_rearm_rto(sk);
-
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
} else {
@@ -3130,11 +3154,24 @@ static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag)
inet_csk(sk)->icsk_ca_state != TCP_CA_Open;
}
+/* Decide wheather to run the increase function of congestion control. */
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag)
{
- const struct tcp_sock *tp = tcp_sk(sk);
- return (!(flag & FLAG_ECE) || tp->snd_cwnd < tp->snd_ssthresh) &&
- !tcp_in_cwnd_reduction(sk);
+ if (tcp_in_cwnd_reduction(sk))
+ return false;
+
+ /* If reordering is high then always grow cwnd whenever data is
+ * delivered regardless of its ordering. Otherwise stay conservative
+ * and only grow cwnd on in-order delivery in Open state, and retain
+ * cwnd in Disordered state (RFC5681). A stretched ACK with
+ * new SACK or ECE mark may first advance cwnd here and later reduce
+ * cwnd in tcp_fastretrans_alert() based on more states.
+ */
+ if (tcp_sk(sk)->reordering > sysctl_tcp_reordering)
+ return flag & FLAG_FORWARD_PROGRESS;
+
+ return inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
+ flag & FLAG_DATA_ACKED;
}
/* Check that window update is acceptable.
@@ -3269,11 +3306,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
- u32 prior_in_flight;
+ u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
u32 prior_fackets;
int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out;
int acked = 0; /* Number of packets newly acked */
+ s32 sack_rtt = -1;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3330,7 +3368,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
if (TCP_SKB_CB(skb)->sacked)
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
flag |= FLAG_ECE;
@@ -3349,21 +3388,18 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
acked = tp->packets_out;
- flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
acked -= tp->packets_out;
+ /* Advance cwnd if state allows */
+ if (tcp_may_raise_cwnd(sk, flag))
+ tcp_cong_avoid(sk, ack, prior_in_flight);
+
if (tcp_ack_is_dubious(sk, flag)) {
- /* Advance CWND, if state allows this. */
- if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
- tcp_cong_avoid(sk, ack, prior_in_flight);
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
- } else {
- if (flag & FLAG_DATA_ACKED)
- tcp_cong_avoid(sk, ack, prior_in_flight);
}
-
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
@@ -3375,6 +3411,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
+ if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
+ tcp_update_pacing_rate(sk);
return 1;
no_queue:
@@ -3402,7 +3440,8 @@ old_ack:
* If data was DSACKed, see if we can undo a cwnd reduction.
*/
if (TCP_SKB_CB(skb)->sacked) {
- flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
+ flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
+ &sack_rtt);
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
}
@@ -3535,7 +3574,10 @@ static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr
++ptr;
tp->rx_opt.rcv_tsval = ntohl(*ptr);
++ptr;
- tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+ if (*ptr)
+ tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset;
+ else
+ tp->rx_opt.rcv_tsecr = 0;
return true;
}
return false;
@@ -3560,7 +3602,7 @@ static bool tcp_fast_parse_options(const struct sk_buff *skb,
}
tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
- if (tp->rx_opt.saw_tstamp)
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
return true;
@@ -5010,8 +5052,8 @@ discard:
* the rest is checked inline. Fast processing is turned on in
* tcp_data_queue when everything is OK.
*/
-int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- const struct tcphdr *th, unsigned int len)
+void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -5088,7 +5130,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_ack(sk, skb, 0);
__kfree_skb(skb);
tcp_data_snd_check(sk);
- return 0;
+ return;
} else { /* Header too small */
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
goto discard;
@@ -5181,7 +5223,7 @@ no_ack:
if (eaten)
kfree_skb_partial(skb, fragstolen);
sk->sk_data_ready(sk, 0);
- return 0;
+ return;
}
}
@@ -5197,7 +5239,7 @@ slow_path:
*/
if (!tcp_validate_incoming(sk, skb, th, 1))
- return 0;
+ return;
step5:
if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)
@@ -5213,7 +5255,7 @@ step5:
tcp_data_snd_check(sk);
tcp_ack_snd_check(sk);
- return 0;
+ return;
csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
@@ -5221,7 +5263,6 @@ csum_error:
discard:
__kfree_skb(skb);
- return 0;
}
EXPORT_SYMBOL(tcp_rcv_established);
@@ -5316,7 +5357,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
int saved_clamp = tp->rx_opt.mss_clamp;
tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
- if (tp->rx_opt.saw_tstamp)
+ if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
tp->rx_opt.rcv_tsecr -= tp->tsoffset;
if (th->ack) {
@@ -5624,9 +5665,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
* so release it.
*/
if (req) {
- tcp_synack_rtt_meas(sk, req);
tp->total_retrans = req->num_retrans;
-
reqsk_fastopen_remove(sk, req, false);
} else {
/* Make sure socket is routed, for correct metrics. */
@@ -5651,6 +5690,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
+ tcp_synack_rtt_meas(sk, req);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index b299da5ff49..b14266bb91e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -821,8 +821,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct request_sock *req,
- u16 queue_mapping,
- bool nocache)
+ u16 queue_mapping)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
@@ -852,7 +851,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{
- int res = tcp_v4_send_synack(sk, NULL, req, 0, false);
+ int res = tcp_v4_send_synack(sk, NULL, req, 0);
if (!res)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
@@ -890,7 +889,7 @@ bool tcp_syn_flood_action(struct sock *sk,
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
- if (!lopt->synflood_warned) {
+ if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
lopt->synflood_warned = 1;
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
@@ -1316,9 +1315,11 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
}
+
if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
memcmp(&foc->val[0], &valid_foc->val[0],
TCP_FASTOPEN_COOKIE_SIZE) != 0)
@@ -1329,14 +1330,16 @@ static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
return true;
} else if (foc->len == 0) { /* Client requesting a cookie */
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPFASTOPENCOOKIEREQD);
} else {
/* Client sent a cookie with wrong size. Treat it
* the same as invalid and return a valid one.
*/
- tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
+ tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr, valid_foc);
}
return false;
}
@@ -1462,7 +1465,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
* limitations, they conserve resources and peer is
* evidently real one.
*/
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
if (!want_cookie)
goto drop;
@@ -1671,8 +1675,6 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
@@ -1797,10 +1799,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
sk->sk_rx_dst = NULL;
}
}
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
- rsk = sk;
- goto reset;
- }
+ tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
return 0;
}
@@ -2605,7 +2604,7 @@ static void get_openreq4(const struct sock *sk, const struct request_sock *req,
long delta = req->expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK%n",
i,
ireq->loc_addr,
ntohs(inet_sk(sk)->inet_sport),
@@ -2663,7 +2662,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
- "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
+ "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d%n",
i, src, srcp, dest, destp, sk->sk_state,
tp->write_seq - tp->snd_una,
rx_queue,
@@ -2802,6 +2801,7 @@ struct proto tcp_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index da14436c173..8a57d79b0b1 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -132,10 +132,10 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
return 0;
}
-static int tcp_cgroup_write(struct cgroup *cont, struct cftype *cft,
+static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long long val;
int ret = 0;
@@ -180,9 +180,9 @@ static u64 tcp_read_usage(struct mem_cgroup *memcg)
return res_counter_read_u64(&tcp->tcp_memory_allocated, RES_USAGE);
}
-static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
+static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
- struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *memcg = mem_cgroup_from_css(css);
u64 val;
switch (cft->private) {
@@ -202,13 +202,13 @@ static u64 tcp_cgroup_read(struct cgroup *cont, struct cftype *cft)
return val;
}
-static int tcp_cgroup_reset(struct cgroup *cont, unsigned int event)
+static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
{
struct mem_cgroup *memcg;
struct tcp_memcontrol *tcp;
struct cg_proto *cg_proto;
- memcg = mem_cgroup_from_cont(cont);
+ memcg = mem_cgroup_from_css(css);
cg_proto = tcp_prot.proto_cgroup(memcg);
if (!cg_proto)
return 0;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index f6a005c485a..4a22f3e715d 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -443,7 +443,7 @@ void tcp_init_metrics(struct sock *sk)
struct dst_entry *dst = __sk_dst_get(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_metrics_block *tm;
- u32 val;
+ u32 val, crtt = 0; /* cached RTT scaled by 8 */
if (dst == NULL)
goto reset;
@@ -478,15 +478,19 @@ void tcp_init_metrics(struct sock *sk)
tp->reordering = val;
}
- val = tcp_metric_get(tm, TCP_METRIC_RTT);
- if (val == 0 || tp->srtt == 0) {
- rcu_read_unlock();
- goto reset;
- }
- /* Initial rtt is determined from SYN,SYN-ACK.
- * The segment is small and rtt may appear much
- * less than real one. Use per-dst memory
- * to make it more realistic.
+ crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+ rcu_read_unlock();
+reset:
+ /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
+ * to seed the RTO for later data packets because SYN packets are
+ * small. Use the per-dst cached values to seed the RTO but keep
+ * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
+ * Later the RTO will be updated immediately upon obtaining the first
+ * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
+ * influences the first RTO but not later RTT estimation.
+ *
+ * But if RTT is not available from the SYN (due to retransmits or
+ * syn cookies) or the cache, force a conservative 3secs timeout.
*
* A bit of theory. RTT is time passed after "normal" sized packet
* is sent until it is ACKed. In normal circumstances sending small
@@ -497,21 +501,9 @@ void tcp_init_metrics(struct sock *sk)
* to low value, and then abruptly stops to do it and starts to delay
* ACKs, wait for troubles.
*/
- val = msecs_to_jiffies(val);
- if (val > tp->srtt) {
- tp->srtt = val;
- tp->rtt_seq = tp->snd_nxt;
- }
- val = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
- if (val > tp->mdev) {
- tp->mdev = val;
- tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
- }
- rcu_read_unlock();
-
- tcp_set_rto(sk);
-reset:
- if (tp->srtt == 0) {
+ if (crtt > tp->srtt) {
+ inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
+ } else if (tp->srtt == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index ab1c0865852..58a3e69aef6 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -411,6 +411,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
+ newtp->lsndtime = treq->snt_synack;
+ newtp->total_retrans = req->num_retrans;
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -666,12 +668,6 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!(flg & TCP_FLAG_ACK))
return NULL;
- /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */
- if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr)
- tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr;
- else if (req->num_retrans) /* don't take RTT sample if retrans && ~TS */
- tcp_rsk(req)->snt_synack = 0;
-
/* For Fast Open no more processing is needed (sk is the
* child socket).
*/
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 92fde8d1aa8..7c83cb8bf13 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -65,6 +65,9 @@ int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
/* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
+unsigned int sysctl_tcp_notsent_lowat __read_mostly = UINT_MAX;
+EXPORT_SYMBOL(sysctl_tcp_notsent_lowat);
+
static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
int push_one, gfp_t gfp);
@@ -1628,7 +1631,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
/* If a full-sized TSO skb can be sent, do it. */
if (limit >= min_t(unsigned int, sk->sk_gso_max_size,
- sk->sk_gso_max_segs * tp->mss_cache))
+ tp->xmit_size_goal_segs * tp->mss_cache))
goto send_now;
/* Middle in queue won't get any more data, full sendable already? */
@@ -2670,7 +2673,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
int tcp_header_size;
int mss;
- skb = alloc_skb(MAX_TCP_HEADER + 15, sk_gfp_atomic(sk, GFP_ATOMIC));
+ skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (unlikely(!skb)) {
dst_release(dst);
return NULL;
@@ -2814,6 +2817,8 @@ void tcp_connect_init(struct sock *sk)
if (likely(!tp->repair))
tp->rcv_nxt = 0;
+ else
+ tp->rcv_tstamp = tcp_time_stamp;
tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt;
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index d4943f67aff..611beab38a0 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -46,6 +46,10 @@ static unsigned int bufsize __read_mostly = 4096;
MODULE_PARM_DESC(bufsize, "Log buffer size in packets (4096)");
module_param(bufsize, uint, 0);
+static unsigned int fwmark __read_mostly = 0;
+MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
+module_param(fwmark, uint, 0);
+
static int full __read_mostly;
MODULE_PARM_DESC(full, "Full log (1=every ack packet received, 0=only cwnd changes)");
module_param(full, int, 0);
@@ -54,12 +58,16 @@ static const char procname[] = "tcpprobe";
struct tcp_log {
ktime_t tstamp;
- __be32 saddr, daddr;
- __be16 sport, dport;
+ union {
+ struct sockaddr raw;
+ struct sockaddr_in v4;
+ struct sockaddr_in6 v6;
+ } src, dst;
u16 length;
u32 snd_nxt;
u32 snd_una;
u32 snd_wnd;
+ u32 rcv_wnd;
u32 snd_cwnd;
u32 ssthresh;
u32 srtt;
@@ -86,19 +94,45 @@ static inline int tcp_probe_avail(void)
return bufsize - tcp_probe_used() - 1;
}
+#define tcp_probe_copy_fl_to_si4(inet, si4, mem) \
+ do { \
+ si4.sin_family = AF_INET; \
+ si4.sin_port = inet->inet_##mem##port; \
+ si4.sin_addr.s_addr = inet->inet_##mem##addr; \
+ } while (0) \
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define tcp_probe_copy_fl_to_si6(inet, si6, mem) \
+ do { \
+ struct ipv6_pinfo *pi6 = inet->pinet6; \
+ si6.sin6_family = AF_INET6; \
+ si6.sin6_port = inet->inet_##mem##port; \
+ si6.sin6_addr = pi6->mem##addr; \
+ si6.sin6_flowinfo = 0; /* No need here. */ \
+ si6.sin6_scope_id = 0; /* No need here. */ \
+ } while (0)
+#else
+#define tcp_probe_copy_fl_to_si6(fl, si6, mem) \
+ do { \
+ memset(&si6, 0, sizeof(si6)); \
+ } while (0)
+#endif
+
/*
* Hook inserted to be called before each receive packet.
* Note: arguments must match tcp_rcv_established()!
*/
-static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
- struct tcphdr *th, unsigned int len)
+static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ const struct tcphdr *th, unsigned int len)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
- /* Only update if port matches */
- if ((port == 0 || ntohs(inet->inet_dport) == port ||
- ntohs(inet->inet_sport) == port) &&
+ /* Only update if port or skb mark matches */
+ if (((port == 0 && fwmark == 0) ||
+ ntohs(inet->inet_dport) == port ||
+ ntohs(inet->inet_sport) == port ||
+ (fwmark > 0 && skb->mark == fwmark)) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
spin_lock(&tcp_probe.lock);
@@ -107,15 +141,25 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
p->tstamp = ktime_get();
- p->saddr = inet->inet_saddr;
- p->sport = inet->inet_sport;
- p->daddr = inet->inet_daddr;
- p->dport = inet->inet_dport;
+ switch (sk->sk_family) {
+ case AF_INET:
+ tcp_probe_copy_fl_to_si4(inet, p->src.v4, s);
+ tcp_probe_copy_fl_to_si4(inet, p->dst.v4, d);
+ break;
+ case AF_INET6:
+ tcp_probe_copy_fl_to_si6(inet, p->src.v6, s);
+ tcp_probe_copy_fl_to_si6(inet, p->dst.v6, d);
+ break;
+ default:
+ BUG();
+ }
+
p->length = skb->len;
p->snd_nxt = tp->snd_nxt;
p->snd_una = tp->snd_una;
p->snd_cwnd = tp->snd_cwnd;
p->snd_wnd = tp->snd_wnd;
+ p->rcv_wnd = tp->rcv_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
p->srtt = tp->srtt >> 3;
@@ -128,7 +172,6 @@ static int jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
}
jprobe_return();
- return 0;
}
static struct jprobe tcp_jprobe = {
@@ -157,13 +200,11 @@ static int tcpprobe_sprint(char *tbuf, int n)
= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
return scnprintf(tbuf, n,
- "%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
+ "%lu.%09lu %pISpc %pISpc %d %#x %#x %u %u %u %u %u\n",
(unsigned long) tv.tv_sec,
(unsigned long) tv.tv_nsec,
- &p->saddr, ntohs(p->sport),
- &p->daddr, ntohs(p->dport),
- p->length, p->snd_nxt, p->snd_una,
- p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt);
+ &p->src, &p->dst, p->length, p->snd_nxt, p->snd_una,
+ p->snd_cwnd, p->ssthresh, p->snd_wnd, p->srtt, p->rcv_wnd);
}
static ssize_t tcpprobe_read(struct file *file, char __user *buf,
@@ -176,7 +217,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
return -EINVAL;
while (cnt < len) {
- char tbuf[164];
+ char tbuf[256];
int width;
/* Wait for data in buffer */
@@ -223,6 +264,13 @@ static __init int tcpprobe_init(void)
{
int ret = -ENOMEM;
+ /* Warning: if the function signature of tcp_rcv_established,
+ * has been changed, you also have to change the signature of
+ * jtcp_rcv_established, otherwise you end up right here!
+ */
+ BUILD_BUG_ON(__same_type(tcp_rcv_established,
+ jtcp_rcv_established) == 0);
+
init_waitqueue_head(&tcp_probe.wait);
spin_lock_init(&tcp_probe.lock);
@@ -241,7 +289,8 @@ static __init int tcpprobe_init(void)
if (ret)
goto err1;
- pr_info("probe registered (port=%d) bufsize=%u\n", port, bufsize);
+ pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
+ port, fwmark, bufsize);
return 0;
err1:
remove_proc_entry(procname, init_net.proc_net);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 766e6bab911..74d2c95db57 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -704,7 +704,7 @@ EXPORT_SYMBOL(udp_flush_pending_frames);
* @src: source IP address
* @dst: destination IP address
*/
-static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
+void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
{
struct udphdr *uh = udp_hdr(skb);
struct sk_buff *frags = skb_shinfo(skb)->frag_list;
@@ -740,6 +740,7 @@ static void udp4_hwcsum(struct sk_buff *skb, __be32 src, __be32 dst)
uh->check = CSUM_MANGLED_0;
}
}
+EXPORT_SYMBOL_GPL(udp4_hwcsum);
static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4)
{
@@ -2158,7 +2159,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
__u16 srcp = ntohs(inet->inet_sport);
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
- " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d%n",
+ " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d%n",
bucket, src, srcp, dest, destp, sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
@@ -2336,7 +2337,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
uh->len = htons(skb->len - udp_offset);
/* csum segment if tunnel sets skb with csum. */
- if (unlikely(uh->check)) {
+ if (protocol == htons(ETH_P_IP) && unlikely(uh->check)) {
struct iphdr *iph = ip_hdr(skb);
uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
@@ -2347,7 +2348,18 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
+ } else if (protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ u32 len = skb->len - udp_offset;
+
+ uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
+ len, IPPROTO_UDP, 0);
+ uh->check = csum_fold(skb_checksum(skb, udp_offset, len, 0));
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+ skb->ip_summed = CHECKSUM_NONE;
}
+
skb->protocol = protocol;
} while ((skb = skb->next));
out:
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c
index 327a617d594..baa0f63731f 100644
--- a/net/ipv4/xfrm4_output.c
+++ b/net/ipv4/xfrm4_output.c
@@ -21,7 +21,6 @@
static int xfrm4_tunnel_check_size(struct sk_buff *skb)
{
int mtu, ret = 0;
- struct dst_entry *dst;
if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
goto out;
@@ -29,12 +28,10 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb)
if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df)
goto out;
- dst = skb_dst(skb);
- mtu = dst_mtu(dst);
+ mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu) {
if (skb->sk)
- ip_local_error(skb->sk, EMSGSIZE, ip_hdr(skb)->daddr,
- inet_sk(skb->sk)->inet_dport, mtu);
+ xfrm_local_error(skb, mtu);
else
icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_FRAG_NEEDED, htonl(mtu));
@@ -99,3 +96,12 @@ int xfrm4_output(struct sk_buff *skb)
x->outer_mode->afinfo->output_finish,
!(IPCB(skb)->flags & IPSKB_REROUTED));
}
+
+void xfrm4_local_error(struct sk_buff *skb, u32 mtu)
+{
+ struct iphdr *hdr;
+
+ hdr = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ip_local_error(skb->sk, EMSGSIZE, hdr->daddr,
+ inet_sk(skb->sk)->inet_dport, mtu);
+}
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 9258e751bab..0b2a0641526 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -83,6 +83,7 @@ static struct xfrm_state_afinfo xfrm4_state_afinfo = {
.extract_input = xfrm4_extract_input,
.extract_output = xfrm4_extract_output,
.transport_finish = xfrm4_transport_finish,
+ .local_error = xfrm4_local_error,
};
void __init xfrm4_state_init(void)
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index cfdcf7b2daf..d6ff12617f3 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -99,9 +99,9 @@
#define ACONF_DEBUG 2
#if ACONF_DEBUG >= 3
-#define ADBG(x) printk x
+#define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
#else
-#define ADBG(x)
+#define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
#endif
#define INFINITY_LIFE_TIME 0xFFFFFFFF
@@ -177,6 +177,8 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_redirects = 1,
.autoconf = 1,
.force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -202,6 +204,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .suppress_frag_ndisc = 1,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -211,6 +214,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_ra = 1,
.accept_redirects = 1,
.autoconf = 1,
+ .force_mld_version = 0,
+ .mldv1_unsolicited_report_interval = 10 * HZ,
+ .mldv2_unsolicited_report_interval = HZ,
.dad_transmits = 1,
.rtr_solicits = MAX_RTR_SOLICITATIONS,
.rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
@@ -236,17 +242,9 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
.accept_dad = 1,
+ .suppress_frag_ndisc = 1,
};
-/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
-const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
-const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
-const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
-const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
-const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
-const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
-const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
-
/* Check if a valid qdisc is available */
static inline bool addrconf_qdisc_ok(const struct net_device *dev)
{
@@ -306,36 +304,6 @@ err_ip:
return -ENOMEM;
}
-static void snmp6_free_dev(struct inet6_dev *idev)
-{
- kfree(idev->stats.icmpv6msgdev);
- kfree(idev->stats.icmpv6dev);
- snmp_mib_free((void __percpu **)idev->stats.ipv6);
-}
-
-/* Nobody refers to this device, we may destroy it. */
-
-void in6_dev_finish_destroy(struct inet6_dev *idev)
-{
- struct net_device *dev = idev->dev;
-
- WARN_ON(!list_empty(&idev->addr_list));
- WARN_ON(idev->mc_list != NULL);
- WARN_ON(timer_pending(&idev->rs_timer));
-
-#ifdef NET_REFCNT_DEBUG
- pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
-#endif
- dev_put(dev);
- if (!idev->dead) {
- pr_warn("Freeing alive inet6 device %p\n", idev);
- return;
- }
- snmp6_free_dev(idev);
- kfree_rcu(idev, rcu);
-}
-EXPORT_SYMBOL(in6_dev_finish_destroy);
-
static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
{
struct inet6_dev *ndev;
@@ -369,9 +337,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
dev_hold(dev);
if (snmp6_alloc_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot allocate memory for statistics; dev=%s.\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
dev_put(dev);
kfree(ndev);
@@ -379,9 +347,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
}
if (snmp6_register_dev(ndev) < 0) {
- ADBG((KERN_WARNING
+ ADBG(KERN_WARNING
"%s: cannot create /proc/net/dev_snmp6/%s\n",
- __func__, dev->name));
+ __func__, dev->name);
neigh_parms_release(&nd_tbl, ndev->nd_parms);
ndev->dead = 1;
in6_dev_finish_destroy(ndev);
@@ -813,8 +781,9 @@ static u32 inet6_addr_hash(const struct in6_addr *addr)
/* On success it returns ifp with increased reference count */
static struct inet6_ifaddr *
-ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
- int scope, u32 flags)
+ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
+ const struct in6_addr *peer_addr, int pfxlen,
+ int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
{
struct inet6_ifaddr *ifa = NULL;
struct rt6_info *rt;
@@ -843,7 +812,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
/* Ignore adding duplicate addresses on an interface */
if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
- ADBG(("ipv6_add_addr: already assigned\n"));
+ ADBG("ipv6_add_addr: already assigned\n");
err = -EEXIST;
goto out;
}
@@ -851,7 +820,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
if (ifa == NULL) {
- ADBG(("ipv6_add_addr: malloc failed\n"));
+ ADBG("ipv6_add_addr: malloc failed\n");
err = -ENOBUFS;
goto out;
}
@@ -863,6 +832,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
}
ifa->addr = *addr;
+ if (peer_addr)
+ ifa->peer_addr = *peer_addr;
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
@@ -872,6 +843,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
ifa->scope = scope;
ifa->prefix_len = pfxlen;
ifa->flags = flags | IFA_F_TENTATIVE;
+ ifa->valid_lft = valid_lft;
+ ifa->prefered_lft = prefered_lft;
ifa->cstamp = ifa->tstamp = jiffies;
ifa->tokenized = false;
@@ -1049,7 +1022,6 @@ static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *i
unsigned long regen_advance;
int tmp_plen;
int ret = 0;
- int max_addresses;
u32 addr_flags;
unsigned long now = jiffies;
@@ -1095,7 +1067,6 @@ retry:
idev->cnf.temp_prefered_lft + age -
idev->cnf.max_desync_factor);
tmp_plen = ifp->prefix_len;
- max_addresses = idev->cnf.max_addresses;
tmp_tstamp = ifp->tstamp;
spin_unlock_bh(&ifp->lock);
@@ -1121,11 +1092,10 @@ retry:
if (ifp->flags & IFA_F_OPTIMISTIC)
addr_flags |= IFA_F_OPTIMISTIC;
- ift = !max_addresses ||
- ipv6_count_addresses(idev) < max_addresses ?
- ipv6_add_addr(idev, &addr, tmp_plen, ipv6_addr_scope(&addr),
- addr_flags) : NULL;
- if (IS_ERR_OR_NULL(ift)) {
+ ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
+ ipv6_addr_scope(&addr), addr_flags,
+ tmp_valid_lft, tmp_prefered_lft);
+ if (IS_ERR(ift)) {
in6_ifa_put(ifp);
in6_dev_put(idev);
pr_info("%s: retry temporary address regeneration\n", __func__);
@@ -1136,8 +1106,6 @@ retry:
spin_lock_bh(&ift->lock);
ift->ifpub = ifp;
- ift->valid_lft = tmp_valid_lft;
- ift->prefered_lft = tmp_prefered_lft;
ift->cstamp = now;
ift->tstamp = tmp_tstamp;
spin_unlock_bh(&ift->lock);
@@ -1805,6 +1773,16 @@ static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
}
+static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
+{
+ memcpy(eui, dev->perm_addr, 3);
+ memcpy(eui + 5, dev->perm_addr + 3, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ eui[0] ^= 2;
+ return 0;
+}
+
static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
{
switch (dev->type) {
@@ -1823,6 +1801,8 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
+ case ARPHRD_TUNNEL6:
+ return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
}
@@ -2048,7 +2028,7 @@ void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
pinfo = (struct prefix_info *) opt;
if (len < sizeof(struct prefix_info)) {
- ADBG(("addrconf: prefix option too short\n"));
+ ADBG("addrconf: prefix option too short\n");
return;
}
@@ -2179,16 +2159,19 @@ ok:
*/
if (!max_addresses ||
ipv6_count_addresses(in6_dev) < max_addresses)
- ifp = ipv6_add_addr(in6_dev, &addr, pinfo->prefix_len,
+ ifp = ipv6_add_addr(in6_dev, &addr, NULL,
+ pinfo->prefix_len,
addr_type&IPV6_ADDR_SCOPE_MASK,
- addr_flags);
+ addr_flags, valid_lft,
+ prefered_lft);
if (IS_ERR_OR_NULL(ifp)) {
in6_dev_put(in6_dev);
return;
}
- update_lft = create = 1;
+ update_lft = 0;
+ create = 1;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
addrconf_dad_start(ifp);
@@ -2209,7 +2192,7 @@ ok:
stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
else
stored_lft = 0;
- if (!update_lft && stored_lft) {
+ if (!update_lft && !create && stored_lft) {
if (valid_lft > MIN_VALID_LIFETIME ||
valid_lft > stored_lft)
update_lft = 1;
@@ -2455,17 +2438,10 @@ static int inet6_addr_add(struct net *net, int ifindex, const struct in6_addr *p
prefered_lft = timeout;
}
- ifp = ipv6_add_addr(idev, pfx, plen, scope, ifa_flags);
+ ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
+ valid_lft, prefered_lft);
if (!IS_ERR(ifp)) {
- spin_lock_bh(&ifp->lock);
- ifp->valid_lft = valid_lft;
- ifp->prefered_lft = prefered_lft;
- ifp->tstamp = jiffies;
- if (peer_pfx)
- ifp->peer_addr = *peer_pfx;
- spin_unlock_bh(&ifp->lock);
-
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
expires, flags);
/*
@@ -2557,7 +2533,8 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
{
struct inet6_ifaddr *ifp;
- ifp = ipv6_add_addr(idev, addr, plen, scope, IFA_F_PERMANENT);
+ ifp = ipv6_add_addr(idev, addr, NULL, plen,
+ scope, IFA_F_PERMANENT, 0, 0);
if (!IS_ERR(ifp)) {
spin_lock_bh(&ifp->lock);
ifp->flags &= ~IFA_F_TENTATIVE;
@@ -2683,7 +2660,7 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
#endif
- ifp = ipv6_add_addr(idev, addr, 64, IFA_LINK, addr_flags);
+ ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags, 0, 0);
if (!IS_ERR(ifp)) {
addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
addrconf_dad_start(ifp);
@@ -2703,7 +2680,8 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_ARCNET) &&
(dev->type != ARPHRD_INFINIBAND) &&
(dev->type != ARPHRD_IEEE802154) &&
- (dev->type != ARPHRD_IEEE1394)) {
+ (dev->type != ARPHRD_IEEE1394) &&
+ (dev->type != ARPHRD_TUNNEL6)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
}
@@ -2789,44 +2767,6 @@ ipv6_inherit_linklocal(struct inet6_dev *idev, struct net_device *link_dev)
return -1;
}
-static void ip6_tnl_add_linklocal(struct inet6_dev *idev)
-{
- struct net_device *link_dev;
- struct net *net = dev_net(idev->dev);
-
- /* first try to inherit the link-local address from the link device */
- if (idev->dev->iflink &&
- (link_dev = __dev_get_by_index(net, idev->dev->iflink))) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- /* then try to inherit it from any device */
- for_each_netdev(net, link_dev) {
- if (!ipv6_inherit_linklocal(idev, link_dev))
- return;
- }
- pr_debug("init ip6-ip6: add_linklocal failed\n");
-}
-
-/*
- * Autoconfigure tunnel with a link-local address so routing protocols,
- * DHCPv6, MLD etc. can be run over the virtual link
- */
-
-static void addrconf_ip6_tnl_config(struct net_device *dev)
-{
- struct inet6_dev *idev;
-
- ASSERT_RTNL();
-
- idev = addrconf_add_dev(dev);
- if (IS_ERR(idev)) {
- pr_debug("init ip6-ip6: add_dev failed\n");
- return;
- }
- ip6_tnl_add_linklocal(idev);
-}
-
static int addrconf_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
@@ -2894,9 +2834,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
addrconf_gre_config(dev);
break;
#endif
- case ARPHRD_TUNNEL6:
- addrconf_ip6_tnl_config(dev);
- break;
case ARPHRD_LOOPBACK:
init_loopback(dev);
break;
@@ -3121,6 +3058,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
static void addrconf_rs_timer(unsigned long data)
{
struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct net_device *dev = idev->dev;
struct in6_addr lladdr;
write_lock(&idev->lock);
@@ -3135,12 +3073,14 @@ static void addrconf_rs_timer(unsigned long data)
goto out;
if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
- if (!__ipv6_get_lladdr(idev, &lladdr, IFA_F_TENTATIVE))
- ndisc_send_rs(idev->dev, &lladdr,
+ write_unlock(&idev->lock);
+ if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+ ndisc_send_rs(dev, &lladdr,
&in6addr_linklocal_allrouters);
else
- goto out;
+ goto put;
+ write_lock(&idev->lock);
/* The wait after the last probe can be shorter */
addrconf_mod_rs_timer(idev, (idev->rs_probes ==
idev->cnf.rtr_solicits) ?
@@ -3156,6 +3096,7 @@ static void addrconf_rs_timer(unsigned long data)
out:
write_unlock(&idev->lock);
+put:
in6_dev_put(idev);
}
@@ -3631,8 +3572,8 @@ restart:
if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
- ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
- now, next, next_sec, next_sched));
+ ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
+ now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
@@ -4178,6 +4119,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_RTR_SOLICIT_DELAY] =
jiffies_to_msecs(cnf->rtr_solicit_delay);
array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
+ array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
+ array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
+ jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
#ifdef CONFIG_IPV6_PRIVACY
array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
@@ -4208,6 +4153,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
+ array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
}
static inline size_t inet6_ifla6_size(void)
@@ -4653,6 +4599,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
break;
}
atomic_inc(&net->ipv6.dev_addr_genid);
+ rt_genid_bump_ipv6(net);
}
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
@@ -4860,6 +4807,22 @@ static struct addrconf_sysctl_table
.mode = 0644,
.proc_handler = proc_dointvec,
},
+ {
+ .procname = "mldv1_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv1_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
+ {
+ .procname = "mldv2_unsolicited_report_interval",
+ .data =
+ &ipv6_devconf.mldv2_unsolicited_report_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ },
#ifdef CONFIG_IPV6_PRIVACY
{
.procname = "use_tempaddr",
@@ -5005,6 +4968,13 @@ static struct addrconf_sysctl_table
.proc_handler = proc_dointvec
},
{
+ .procname = "suppress_frag_ndisc",
+ .data = &ipv6_devconf.suppress_frag_ndisc,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
/* sentinel */
}
},
diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c
index d2f87427244..4c11cbcf830 100644
--- a/net/ipv6/addrconf_core.c
+++ b/net/ipv6/addrconf_core.c
@@ -6,6 +6,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
+#include <net/ip.h>
#define IPV6_ADDR_SCOPE_TYPE(scope) ((scope) << 16)
@@ -98,3 +99,52 @@ int inet6addr_notifier_call_chain(unsigned long val, void *v)
return atomic_notifier_call_chain(&inet6addr_chain, val, v);
}
EXPORT_SYMBOL(inet6addr_notifier_call_chain);
+
+const struct ipv6_stub *ipv6_stub __read_mostly;
+EXPORT_SYMBOL_GPL(ipv6_stub);
+
+/* IPv6 Wildcard Address and Loopback Address defined by RFC2553 */
+const struct in6_addr in6addr_loopback = IN6ADDR_LOOPBACK_INIT;
+EXPORT_SYMBOL(in6addr_loopback);
+const struct in6_addr in6addr_any = IN6ADDR_ANY_INIT;
+EXPORT_SYMBOL(in6addr_any);
+const struct in6_addr in6addr_linklocal_allnodes = IN6ADDR_LINKLOCAL_ALLNODES_INIT;
+EXPORT_SYMBOL(in6addr_linklocal_allnodes);
+const struct in6_addr in6addr_linklocal_allrouters = IN6ADDR_LINKLOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_linklocal_allrouters);
+const struct in6_addr in6addr_interfacelocal_allnodes = IN6ADDR_INTERFACELOCAL_ALLNODES_INIT;
+EXPORT_SYMBOL(in6addr_interfacelocal_allnodes);
+const struct in6_addr in6addr_interfacelocal_allrouters = IN6ADDR_INTERFACELOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_interfacelocal_allrouters);
+const struct in6_addr in6addr_sitelocal_allrouters = IN6ADDR_SITELOCAL_ALLROUTERS_INIT;
+EXPORT_SYMBOL(in6addr_sitelocal_allrouters);
+
+static void snmp6_free_dev(struct inet6_dev *idev)
+{
+ kfree(idev->stats.icmpv6msgdev);
+ kfree(idev->stats.icmpv6dev);
+ snmp_mib_free((void __percpu **)idev->stats.ipv6);
+}
+
+/* Nobody refers to this device, we may destroy it. */
+
+void in6_dev_finish_destroy(struct inet6_dev *idev)
+{
+ struct net_device *dev = idev->dev;
+
+ WARN_ON(!list_empty(&idev->addr_list));
+ WARN_ON(idev->mc_list != NULL);
+ WARN_ON(timer_pending(&idev->rs_timer));
+
+#ifdef NET_REFCNT_DEBUG
+ pr_debug("%s: %s\n", __func__, dev ? dev->name : "NIL");
+#endif
+ dev_put(dev);
+ if (!idev->dead) {
+ pr_warn("Freeing alive inet6 device %p\n", idev);
+ return;
+ }
+ snmp6_free_dev(idev);
+ kfree_rcu(idev, rcu);
+}
+EXPORT_SYMBOL(in6_dev_finish_destroy);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index f083a583a05..b30ad3741b4 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -251,38 +251,36 @@ static struct ip6addrlbl_entry *ip6addrlbl_alloc(struct net *net,
/* add a label */
static int __ip6addrlbl_add(struct ip6addrlbl_entry *newp, int replace)
{
+ struct hlist_node *n;
+ struct ip6addrlbl_entry *last = NULL, *p = NULL;
int ret = 0;
- ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n",
- __func__,
- newp, replace);
+ ADDRLABEL(KERN_DEBUG "%s(newp=%p, replace=%d)\n", __func__, newp,
+ replace);
- if (hlist_empty(&ip6addrlbl_table.head)) {
- hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
- } else {
- struct hlist_node *n;
- struct ip6addrlbl_entry *p = NULL;
- hlist_for_each_entry_safe(p, n,
- &ip6addrlbl_table.head, list) {
- if (p->prefixlen == newp->prefixlen &&
- net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
- p->ifindex == newp->ifindex &&
- ipv6_addr_equal(&p->prefix, &newp->prefix)) {
- if (!replace) {
- ret = -EEXIST;
- goto out;
- }
- hlist_replace_rcu(&p->list, &newp->list);
- ip6addrlbl_put(p);
- goto out;
- } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
- (p->prefixlen < newp->prefixlen)) {
- hlist_add_before_rcu(&newp->list, &p->list);
+ hlist_for_each_entry_safe(p, n, &ip6addrlbl_table.head, list) {
+ if (p->prefixlen == newp->prefixlen &&
+ net_eq(ip6addrlbl_net(p), ip6addrlbl_net(newp)) &&
+ p->ifindex == newp->ifindex &&
+ ipv6_addr_equal(&p->prefix, &newp->prefix)) {
+ if (!replace) {
+ ret = -EEXIST;
goto out;
}
+ hlist_replace_rcu(&p->list, &newp->list);
+ ip6addrlbl_put(p);
+ goto out;
+ } else if ((p->prefixlen == newp->prefixlen && !p->ifindex) ||
+ (p->prefixlen < newp->prefixlen)) {
+ hlist_add_before_rcu(&newp->list, &p->list);
+ goto out;
}
- hlist_add_after_rcu(&p->list, &newp->list);
+ last = p;
}
+ if (last)
+ hlist_add_after_rcu(&last->list, &newp->list);
+ else
+ hlist_add_head_rcu(&newp->list, &ip6addrlbl_table.head);
out:
if (!ret)
ip6addrlbl_table.seq++;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index a5ac969aeef..136fe55c1a4 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -56,6 +56,7 @@
#include <net/transp_v6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
+#include <net/ndisc.h>
#ifdef CONFIG_IPV6_TUNNEL
#include <net/ip6_tunnel.h>
#endif
@@ -766,6 +767,7 @@ static int __net_init inet6_net_init(struct net *net)
net->ipv6.sysctl.bindv6only = 0;
net->ipv6.sysctl.icmpv6_time = 1*HZ;
+ atomic_set(&net->ipv6.rt_genid, 0);
err = ipv6_init_mibs(net);
if (err)
@@ -809,6 +811,15 @@ static struct pernet_operations inet6_net_ops = {
.exit = inet6_net_exit,
};
+static const struct ipv6_stub ipv6_stub_impl = {
+ .ipv6_sock_mc_join = ipv6_sock_mc_join,
+ .ipv6_sock_mc_drop = ipv6_sock_mc_drop,
+ .ipv6_dst_lookup = ip6_dst_lookup,
+ .udpv6_encap_enable = udpv6_encap_enable,
+ .ndisc_send_na = ndisc_send_na,
+ .nd_tbl = &nd_tbl,
+};
+
static int __init inet6_init(void)
{
struct list_head *r;
@@ -883,6 +894,9 @@ static int __init inet6_init(void)
err = igmp6_init();
if (err)
goto igmp_fail;
+
+ ipv6_stub = &ipv6_stub_impl;
+
err = ipv6_netfilter_init();
if (err)
goto netfilter_fail;
@@ -1039,6 +1053,7 @@ static void __exit inet6_exit(void)
raw6_proc_exit();
#endif
ipv6_netfilter_fini();
+ ipv6_stub = NULL;
igmp6_cleanup();
ndisc_cleanup();
ip6_mr_cleanup();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index bb02e176cb7..73784c3d464 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -628,7 +628,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 197e6f4a2b7..48b6bd2a9a1 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -890,7 +890,7 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
src = &np->rcv_saddr;
seq_printf(seq,
"%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n",
bucket,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 40ffd72243a..d3618a78fca 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
net_adj = 0;
return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
- net_adj) & ~(align - 1)) + (net_adj - 2);
+ net_adj) & ~(align - 1)) + net_adj - 2;
}
static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
@@ -447,7 +447,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 2e1a432867c..a6c58ce43d3 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -55,26 +55,33 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
struct fib6_table *table;
struct net *net = rule->fr_net;
pol_lookup_t lookup = arg->lookup_ptr;
+ int err = 0;
switch (rule->action) {
case FR_ACT_TO_TBL:
break;
case FR_ACT_UNREACHABLE:
+ err = -ENETUNREACH;
rt = net->ipv6.ip6_null_entry;
goto discard_pkt;
default:
case FR_ACT_BLACKHOLE:
+ err = -EINVAL;
rt = net->ipv6.ip6_blk_hole_entry;
goto discard_pkt;
case FR_ACT_PROHIBIT:
+ err = -EACCES;
rt = net->ipv6.ip6_prohibit_entry;
goto discard_pkt;
}
table = fib6_get_table(net, rule->table);
- if (table)
- rt = lookup(net, table, flp6, flags);
+ if (!table) {
+ err = -EAGAIN;
+ goto out;
+ }
+ rt = lookup(net, table, flp6, flags);
if (rt != net->ipv6.ip6_null_entry) {
struct fib6_rule *r = (struct fib6_rule *)rule;
@@ -101,6 +108,7 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
}
again:
ip6_rt_put(rt);
+ err = -EAGAIN;
rt = NULL;
goto out;
@@ -108,9 +116,31 @@ discard_pkt:
dst_hold(&rt->dst);
out:
arg->result = rt;
- return rt == NULL ? -EAGAIN : 0;
+ return err;
}
+static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
+{
+ struct rt6_info *rt = (struct rt6_info *) arg->result;
+ struct net_device *dev = rt->rt6i_idev->dev;
+ /* do not accept result if the route does
+ * not meet the required prefix length
+ */
+ if (rt->rt6i_dst.plen <= rule->suppress_prefixlen)
+ goto suppress_route;
+
+ /* do not accept result if the route uses a device
+ * belonging to a forbidden interface group
+ */
+ if (rule->suppress_ifgroup != -1 && dev && dev->group == rule->suppress_ifgroup)
+ goto suppress_route;
+
+ return false;
+
+suppress_route:
+ ip6_rt_put(rt);
+ return true;
+}
static int fib6_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
{
@@ -244,6 +274,7 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
.addr_size = sizeof(struct in6_addr),
.action = fib6_rule_action,
.match = fib6_rule_match,
+ .suppress = fib6_rule_suppress,
.configure = fib6_rule_configure,
.compare = fib6_rule_compare,
.fill = fib6_rule_fill,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 7cfc8d28487..eef8d945b36 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -92,7 +92,7 @@ static void icmpv6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (type == ICMPV6_PKT_TOOBIG)
ip6_update_pmtu(skb, net, info, 0, 0);
else if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
if (!(type & ICMPV6_INFOMSG_MASK))
if (icmp6->icmp6_type == ICMPV6_ECHO_REQUEST)
@@ -940,6 +940,14 @@ static const struct icmp6_err {
.err = ECONNREFUSED,
.fatal = 1,
},
+ { /* POLICY_FAIL */
+ .err = EACCES,
+ .fatal = 1,
+ },
+ { /* REJECT_ROUTE */
+ .err = EACCES,
+ .fatal = 1,
+ },
};
int icmpv6_err_convert(u8 type, u8 code, int *err)
@@ -951,7 +959,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err)
switch (type) {
case ICMPV6_DEST_UNREACH:
fatal = 1;
- if (code <= ICMPV6_PORT_UNREACH) {
+ if (code < ARRAY_SIZE(tab_unreach)) {
*err = tab_unreach[code].err;
fatal = tab_unreach[code].fatal;
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 5fc9c7a68d8..73db48eba1c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -425,8 +425,8 @@ out:
* node.
*/
-static struct fib6_node * fib6_add_1(struct fib6_node *root, void *addr,
- int addrlen, int plen,
+static struct fib6_node *fib6_add_1(struct fib6_node *root,
+ struct in6_addr *addr, int plen,
int offset, int allow_create,
int replace_required)
{
@@ -543,7 +543,7 @@ insert_above:
but if it is >= plen, the value is ignored in any case.
*/
- bit = __ipv6_addr_diff(addr, &key->addr, addrlen);
+ bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
/*
* (intermediate)[in]
@@ -822,9 +822,9 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
if (!allow_create && !replace_required)
pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
- fn = fib6_add_1(root, &rt->rt6i_dst.addr, sizeof(struct in6_addr),
- rt->rt6i_dst.plen, offsetof(struct rt6_info, rt6i_dst),
- allow_create, replace_required);
+ fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
+ offsetof(struct rt6_info, rt6i_dst), allow_create,
+ replace_required);
if (IS_ERR(fn)) {
err = PTR_ERR(fn);
@@ -863,7 +863,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
/* Now add the first leaf node to new subtree */
sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
@@ -882,7 +882,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info)
fn->subtree = sfn;
} else {
sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
- sizeof(struct in6_addr), rt->rt6i_src.plen,
+ rt->rt6i_src.plen,
offsetof(struct rt6_info, rt6i_src),
allow_create, replace_required);
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root,
if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
#ifdef CONFIG_IPV6_SUBTREES
- if (fn->subtree)
- fn = fib6_lookup_1(fn->subtree, args + 1);
+ if (fn->subtree) {
+ struct fib6_node *sfn;
+ sfn = fib6_lookup_1(fn->subtree,
+ args + 1);
+ if (!sfn)
+ goto backtrack;
+ fn = sfn;
+ }
#endif
- if (!fn || fn->fn_flags & RTN_RTINFO)
+ if (fn->fn_flags & RTN_RTINFO)
return fn;
}
}
-
+#ifdef CONFIG_IPV6_SUBTREES
+backtrack:
+#endif
if (fn->fn_flags & RTN_ROOT)
break;
@@ -1632,27 +1640,28 @@ static int fib6_age(struct rt6_info *rt, void *arg)
static DEFINE_SPINLOCK(fib6_gc_lock);
-void fib6_run_gc(unsigned long expires, struct net *net)
+void fib6_run_gc(unsigned long expires, struct net *net, bool force)
{
- if (expires != ~0UL) {
+ unsigned long now;
+
+ if (force) {
spin_lock_bh(&fib6_gc_lock);
- gc_args.timeout = expires ? (int)expires :
- net->ipv6.sysctl.ip6_rt_gc_interval;
- } else {
- if (!spin_trylock_bh(&fib6_gc_lock)) {
- mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
- return;
- }
- gc_args.timeout = net->ipv6.sysctl.ip6_rt_gc_interval;
+ } else if (!spin_trylock_bh(&fib6_gc_lock)) {
+ mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
+ return;
}
+ gc_args.timeout = expires ? (int)expires :
+ net->ipv6.sysctl.ip6_rt_gc_interval;
gc_args.more = icmp6_dst_gc();
fib6_clean_all(net, fib6_age, 0, NULL);
+ now = jiffies;
+ net->ipv6.ip6_rt_last_gc = now;
if (gc_args.more)
mod_timer(&net->ipv6.ip6_fib_timer,
- round_jiffies(jiffies
+ round_jiffies(now
+ net->ipv6.sysctl.ip6_rt_gc_interval));
else
del_timer(&net->ipv6.ip6_fib_timer);
@@ -1661,7 +1670,7 @@ void fib6_run_gc(unsigned long expires, struct net *net)
static void fib6_gc_timer_cb(unsigned long arg)
{
- fib6_run_gc(0, (struct net *)arg);
+ fib6_run_gc(0, (struct net *)arg, true);
}
static int __net_init fib6_net_init(struct net *net)
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index ecd60733e5e..6b26e9feafb 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -335,6 +335,7 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
dev->rtnl_link_ops = &ip6gre_link_ops;
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, 1);
if (register_netdevice(dev) < 0)
@@ -508,8 +509,6 @@ static int ip6gre_rcv(struct sk_buff *skb)
goto drop;
}
- secpath_reset(skb);
-
skb->protocol = gre_proto;
/* WCCP version 1 and 2 protocol decoding.
* - Change protocol to IP
@@ -524,7 +523,6 @@ static int ip6gre_rcv(struct sk_buff *skb)
skb->mac_header = skb->network_header;
__pskb_pull(skb, offset);
skb_postpull_rcsum(skb, skb_transport_header(skb), offset);
- skb->pkt_type = PACKET_HOST;
if (((flags&GRE_CSUM) && csum) ||
(!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
@@ -556,7 +554,7 @@ static int ip6gre_rcv(struct sk_buff *skb)
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
}
- __skb_tunnel_rx(skb, tunnel->dev);
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
skb_reset_network_header(skb);
@@ -693,6 +691,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
tunnel->err_count = 0;
}
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+
max_headroom = LL_RESERVED_SPACE(tdev) + gre_hlen + dst->header_len;
if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
@@ -709,8 +709,6 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
skb = new_skb;
}
- skb_dst_drop(skb);
-
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
@@ -724,6 +722,11 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
skb_push(skb, gre_hlen);
skb_reset_network_header(skb);
skb_set_transport_header(skb, sizeof(*ipv6h));
@@ -1255,6 +1258,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.laddr, sizeof(struct in6_addr));
@@ -1275,6 +1279,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
struct ip6_tnl *tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
tunnel->hlen = sizeof(struct ipv6hdr) + 4;
@@ -1450,6 +1455,7 @@ static int ip6gre_tap_init(struct net_device *dev)
tunnel = netdev_priv(dev);
tunnel->dev = dev;
+ tunnel->net = dev_net(dev);
strcpy(tunnel->parms.name, dev->name);
ip6gre_tnl_link_config(tunnel, 1);
@@ -1501,6 +1507,7 @@ static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
eth_hw_addr_random(dev);
nt->dev = dev;
+ nt->net = dev_net(dev);
ip6gre_tnl_link_config(nt, !tb[IFLA_MTU]);
/* Can use a lockless transmit, unless we generate output sequences */
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 2bab2aa5974..302d6fb1ff2 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -44,7 +44,7 @@
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
-
+#include <net/inet_ecn.h>
int ip6_rcv_finish(struct sk_buff *skb)
@@ -109,6 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->version != 6)
goto err;
+ IP6_ADD_STATS_BH(dev_net(dev), idev,
+ IPSTATS_MIB_NOECTPKTS +
+ (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
+ max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/*
* RFC4291 2.5.3
* A packet received on an interface with a destination address
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index a263b990ee1..d82de722810 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -91,6 +91,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
+ bool tunnel;
if (unlikely(skb_shinfo(skb)->gso_type &
~(SKB_GSO_UDP |
@@ -106,6 +107,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
+ tunnel = skb->encapsulation;
ipv6h = ipv6_hdr(skb);
__skb_pull(skb, sizeof(*ipv6h));
segs = ERR_PTR(-EPROTONOSUPPORT);
@@ -126,7 +128,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
ipv6h = ipv6_hdr(skb);
ipv6h->payload_len = htons(skb->len - skb->mac_len -
sizeof(*ipv6h));
- if (proto == IPPROTO_UDP) {
+ if (!tunnel && proto == IPPROTO_UDP) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)(skb_network_header(skb) +
unfrag_ip6hlen);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 6e3ddf806ec..3a692d52916 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -56,31 +56,6 @@
#include <net/checksum.h>
#include <linux/mroute6.h>
-int __ip6_local_out(struct sk_buff *skb)
-{
- int len;
-
- len = skb->len - sizeof(struct ipv6hdr);
- if (len > IPV6_MAXPLEN)
- len = 0;
- ipv6_hdr(skb)->payload_len = htons(len);
-
- return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
- skb_dst(skb)->dev, dst_output);
-}
-
-int ip6_local_out(struct sk_buff *skb)
-{
- int err;
-
- err = __ip6_local_out(skb);
- if (likely(err == 1))
- err = dst_output(skb);
-
- return err;
-}
-EXPORT_SYMBOL_GPL(ip6_local_out);
-
static int ip6_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -238,6 +213,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
hdr->saddr = fl6->saddr;
hdr->daddr = *first_hop;
+ skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
@@ -1057,6 +1033,7 @@ static inline int ip6_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
}
@@ -1359,6 +1336,7 @@ alloc_new_skb:
/*
* Fill in the control structures
*/
+ skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
/* reserve for fragmentation and ipsec header */
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 1e55866cead..61355f7f4da 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -41,6 +41,7 @@
#include <linux/netfilter_ipv6.h>
#include <linux/slab.h>
#include <linux/hash.h>
+#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
@@ -315,6 +316,7 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
t = netdev_priv(dev);
t->parms = *p;
+ t->net = dev_net(dev);
err = ip6_tnl_create2(dev);
if (err < 0)
goto failed_free;
@@ -374,7 +376,7 @@ static void
ip6_tnl_dev_uninit(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -741,7 +743,7 @@ int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if ((p->flags & IP6_TNL_F_CAP_RCV) ||
((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
@@ -800,14 +802,12 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
rcu_read_unlock();
goto discard;
}
- secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
skb->protocol = htons(protocol);
- skb->pkt_type = PACKET_HOST;
memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
- __skb_tunnel_rx(skb, t->dev);
+ __skb_tunnel_rx(skb, t->dev, t->net);
err = dscp_ecn_decapsulate(t, ipv6h, skb);
if (unlikely(err)) {
@@ -895,7 +895,7 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t)
{
struct __ip6_tnl_parm *p = &t->parms;
int ret = 0;
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
if (p->flags & IP6_TNL_F_CAP_XMIT) {
struct net_device *ldev = NULL;
@@ -945,8 +945,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
int encap_limit,
__u32 *pmtu)
{
- struct net *net = dev_net(dev);
struct ip6_tnl *t = netdev_priv(dev);
+ struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ipv6_tel_txoption opt;
@@ -996,6 +996,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
goto tx_err_dst_release;
}
+ skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
+
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
@@ -1013,7 +1015,6 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
consume_skb(skb);
skb = new_skb;
}
- skb_dst_drop(skb);
if (fl6->flowi6_mark) {
skb_dst_set(skb, dst);
ndst = NULL;
@@ -1027,6 +1028,12 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
init_tel_txopt(&opt, encap_limit);
ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL);
}
+
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
ipv6h = ipv6_hdr(skb);
@@ -1202,7 +1209,7 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
int strict = (ipv6_addr_type(&p->raddr) &
(IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
- struct rt6_info *rt = rt6_lookup(dev_net(dev),
+ struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, strict);
@@ -1251,7 +1258,7 @@ ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
{
- struct net *net = dev_net(t->dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
int err;
@@ -1463,8 +1470,10 @@ static void ip6_tnl_dev_setup(struct net_device *dev)
dev->mtu-=8;
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
@@ -1479,6 +1488,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
struct ip6_tnl *t = netdev_priv(dev);
t->dev = dev;
+ t->net = dev_net(dev);
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
@@ -1596,9 +1606,9 @@ static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
- struct ip6_tnl *t;
+ struct ip6_tnl *t = netdev_priv(dev);
struct __ip6_tnl_parm p;
- struct net *net = dev_net(dev);
+ struct net *net = t->net;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
if (dev == ip6n->fb_tnl_dev)
@@ -1699,14 +1709,24 @@ static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
static void __net_exit ip6_tnl_destroy_tunnels(struct ip6_tnl_net *ip6n)
{
+ struct net *net = dev_net(ip6n->fb_tnl_dev);
+ struct net_device *dev, *aux;
int h;
struct ip6_tnl *t;
LIST_HEAD(list);
+ for_each_netdev_safe(net, dev, aux)
+ if (dev->rtnl_link_ops == &ip6_link_ops)
+ unregister_netdevice_queue(dev, &list);
+
for (h = 0; h < HASH_SIZE; h++) {
t = rtnl_dereference(ip6n->tnls_r_l[h]);
while (t != NULL) {
- unregister_netdevice_queue(t->dev, &list);
+ /* If dev is in the same netns, it has already
+ * been added to the list by the previous loop.
+ */
+ if (!net_eq(dev_net(t->dev), net))
+ unregister_netdevice_queue(t->dev, &list);
t = rtnl_dereference(t->next);
}
}
@@ -1732,6 +1752,10 @@ static int __net_init ip6_tnl_init_net(struct net *net)
if (!ip6n->fb_tnl_dev)
goto err_alloc_dev;
dev_net_set(ip6n->fb_tnl_dev, net);
+ /* FB netdevice is special: we have one, and only one per netns.
+ * Allowing to move it to another netns is clearly unsafe.
+ */
+ ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 583e8d435f9..f365310bfcc 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -110,8 +110,8 @@ static struct kmem_cache *mrt_cachep __read_mostly;
static struct mr6_table *ip6mr_new_table(struct net *net, u32 id);
static void ip6mr_free_table(struct mr6_table *mrt);
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache);
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache);
static int ip6mr_cache_report(struct mr6_table *mrt, struct sk_buff *pkt,
mifi_t mifi, int assert);
static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
@@ -259,10 +259,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net)
{
struct mr6_table *mrt, *next;
+ rtnl_lock();
list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) {
list_del(&mrt->list);
ip6mr_free_table(mrt);
}
+ rtnl_unlock();
fib_rules_unregister(net->ipv6.mr6_rules_ops);
}
#else
@@ -289,7 +291,10 @@ static int __net_init ip6mr_rules_init(struct net *net)
static void __net_exit ip6mr_rules_exit(struct net *net)
{
+ rtnl_lock();
ip6mr_free_table(net->ipv6.mrt6);
+ net->ipv6.mrt6 = NULL;
+ rtnl_unlock();
}
#endif
@@ -667,9 +672,8 @@ static int pim6_rcv(struct sk_buff *skb)
skb_reset_network_header(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->ip_summed = CHECKSUM_NONE;
- skb->pkt_type = PACKET_HOST;
- skb_tunnel_rx(skb, reg_dev);
+ skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev));
netif_rx(skb);
@@ -2069,8 +2073,8 @@ static int ip6mr_find_vif(struct mr6_table *mrt, struct net_device *dev)
return ct;
}
-static int ip6_mr_forward(struct net *net, struct mr6_table *mrt,
- struct sk_buff *skb, struct mfc6_cache *cache)
+static void ip6_mr_forward(struct net *net, struct mr6_table *mrt,
+ struct sk_buff *skb, struct mfc6_cache *cache)
{
int psend = -1;
int vif, ct;
@@ -2151,12 +2155,11 @@ forward:
last_forward:
if (psend != -1) {
ip6mr_forward2(net, mrt, skb, cache, psend);
- return 0;
+ return;
}
dont_forward:
kfree_skb(skb);
- return 0;
}
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 7af5aee75d9..5636a912074 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -76,7 +76,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return;
if (type == NDISC_REDIRECT)
- ip6_redirect(skb, net, 0, 0);
+ ip6_redirect(skb, net, skb->dev->ifindex, 0);
else
ip6_update_pmtu(skb, net, info, 0, 0);
xfrm_state_put(x);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 99cd65c715c..096cd67b737 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -44,6 +44,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
+#include <linux/pkt_sched.h>
#include <net/mld.h>
#include <linux/netfilter.h>
@@ -94,6 +95,7 @@ static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
static void mld_clear_delrec(struct inet6_dev *idev);
+static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
static void sf_markstate(struct ifmcaddr6 *pmc);
static void ip6_mc_clear_src(struct ifmcaddr6 *pmc);
@@ -106,14 +108,15 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
-
-#define IGMP6_UNSOLICITED_IVAL (10*HZ)
#define MLD_QRV_DEFAULT 2
+/* RFC3810, 9.2. Query Interval */
+#define MLD_QI_DEFAULT (125 * HZ)
+/* RFC3810, 9.3. Query Response Interval */
+#define MLD_QRI_DEFAULT (10 * HZ)
-#define MLD_V1_SEEN(idev) (dev_net((idev)->dev)->ipv6.devconf_all->force_mld_version == 1 || \
- (idev)->cnf.force_mld_version == 1 || \
- ((idev)->mc_v1_seen && \
- time_before(jiffies, (idev)->mc_v1_seen)))
+/* RFC3810, 8.1 Query Version Distinctions */
+#define MLD_V1_QUERY_LEN 24
+#define MLD_V2_QUERY_LEN_MIN 28
#define IPV6_MLD_MAX_MSF 64
@@ -128,6 +131,18 @@ int sysctl_mld_max_msf __read_mostly = IPV6_MLD_MAX_MSF;
pmc != NULL; \
pmc = rcu_dereference(pmc->next))
+static int unsolicited_report_interval(struct inet6_dev *idev)
+{
+ int iv;
+
+ if (mld_in_v1_mode(idev))
+ iv = idev->cnf.mldv1_unsolicited_report_interval;
+ else
+ iv = idev->cnf.mldv2_unsolicited_report_interval;
+
+ return iv > 0 ? iv : 1;
+}
+
int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
{
struct net_device *dev = NULL;
@@ -676,7 +691,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
if (!(dev->flags & IFF_UP) || (mc->mca_flags & MAF_NOREPORT))
return;
- if (MLD_V1_SEEN(mc->idev)) {
+ if (mld_in_v1_mode(mc->idev)) {
igmp6_join_group(mc);
return;
}
@@ -984,29 +999,49 @@ bool ipv6_chk_mcast_addr(struct net_device *dev, const struct in6_addr *group,
static void mld_gq_start_timer(struct inet6_dev *idev)
{
- int tv = net_random() % idev->mc_maxdelay;
+ unsigned long tv = net_random() % idev->mc_maxdelay;
idev->mc_gq_running = 1;
if (!mod_timer(&idev->mc_gq_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_ifc_start_timer(struct inet6_dev *idev, int delay)
+static void mld_gq_stop_timer(struct inet6_dev *idev)
{
- int tv = net_random() % delay;
+ idev->mc_gq_running = 0;
+ if (del_timer(&idev->mc_gq_timer))
+ __in6_dev_put(idev);
+}
+
+static void mld_ifc_start_timer(struct inet6_dev *idev, unsigned long delay)
+{
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_ifc_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
-static void mld_dad_start_timer(struct inet6_dev *idev, int delay)
+static void mld_ifc_stop_timer(struct inet6_dev *idev)
{
- int tv = net_random() % delay;
+ idev->mc_ifc_count = 0;
+ if (del_timer(&idev->mc_ifc_timer))
+ __in6_dev_put(idev);
+}
+
+static void mld_dad_start_timer(struct inet6_dev *idev, unsigned long delay)
+{
+ unsigned long tv = net_random() % delay;
if (!mod_timer(&idev->mc_dad_timer, jiffies+tv+2))
in6_dev_hold(idev);
}
+static void mld_dad_stop_timer(struct inet6_dev *idev)
+{
+ if (del_timer(&idev->mc_dad_timer))
+ __in6_dev_put(idev);
+}
+
/*
* IGMP handling (alias multicast ICMPv6 messages)
*/
@@ -1025,12 +1060,9 @@ static void igmp6_group_queried(struct ifmcaddr6 *ma, unsigned long resptime)
delay = ma->mca_timer.expires - jiffies;
}
- if (delay >= resptime) {
- if (resptime)
- delay = net_random() % resptime;
- else
- delay = 1;
- }
+ if (delay >= resptime)
+ delay = net_random() % resptime;
+
ma->mca_timer.expires = jiffies + delay;
if (!mod_timer(&ma->mca_timer, jiffies + delay))
atomic_inc(&ma->mca_refcnt);
@@ -1097,6 +1129,158 @@ static bool mld_marksources(struct ifmcaddr6 *pmc, int nsrcs,
return true;
}
+static int mld_force_mld_version(const struct inet6_dev *idev)
+{
+ /* Normally, both are 0 here. If enforcement to a particular is
+ * being used, individual device enforcement will have a lower
+ * precedence over 'all' device (.../conf/all/force_mld_version).
+ */
+
+ if (dev_net(idev->dev)->ipv6.devconf_all->force_mld_version != 0)
+ return dev_net(idev->dev)->ipv6.devconf_all->force_mld_version;
+ else
+ return idev->cnf.force_mld_version;
+}
+
+static bool mld_in_v2_mode_only(const struct inet6_dev *idev)
+{
+ return mld_force_mld_version(idev) == 2;
+}
+
+static bool mld_in_v1_mode_only(const struct inet6_dev *idev)
+{
+ return mld_force_mld_version(idev) == 1;
+}
+
+static bool mld_in_v1_mode(const struct inet6_dev *idev)
+{
+ if (mld_in_v2_mode_only(idev))
+ return false;
+ if (mld_in_v1_mode_only(idev))
+ return true;
+ if (idev->mc_v1_seen && time_before(jiffies, idev->mc_v1_seen))
+ return true;
+
+ return false;
+}
+
+static void mld_set_v1_mode(struct inet6_dev *idev)
+{
+ /* RFC3810, relevant sections:
+ * - 9.1. Robustness Variable
+ * - 9.2. Query Interval
+ * - 9.3. Query Response Interval
+ * - 9.12. Older Version Querier Present Timeout
+ */
+ unsigned long switchback;
+
+ switchback = (idev->mc_qrv * idev->mc_qi) + idev->mc_qri;
+
+ idev->mc_v1_seen = jiffies + switchback;
+}
+
+static void mld_update_qrv(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.8. QRV (Querier's Robustness Variable)
+ * - 9.1. Robustness Variable
+ */
+
+ /* The value of the Robustness Variable MUST NOT be zero,
+ * and SHOULD NOT be one. Catch this here if we ever run
+ * into such a case in future.
+ */
+ WARN_ON(idev->mc_qrv == 0);
+
+ if (mlh2->mld2q_qrv > 0)
+ idev->mc_qrv = mlh2->mld2q_qrv;
+
+ if (unlikely(idev->mc_qrv < 2)) {
+ net_warn_ratelimited("IPv6: MLD: clamping QRV from %u to %u!\n",
+ idev->mc_qrv, MLD_QRV_DEFAULT);
+ idev->mc_qrv = MLD_QRV_DEFAULT;
+ }
+}
+
+static void mld_update_qi(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.9. QQIC (Querier's Query Interval Code)
+ * - 9.2. Query Interval
+ * - 9.12. Older Version Querier Present Timeout
+ * (the [Query Interval] in the last Query received)
+ */
+ unsigned long mc_qqi;
+
+ if (mlh2->mld2q_qqic < 128) {
+ mc_qqi = mlh2->mld2q_qqic;
+ } else {
+ unsigned long mc_man, mc_exp;
+
+ mc_exp = MLDV2_QQIC_EXP(mlh2->mld2q_qqic);
+ mc_man = MLDV2_QQIC_MAN(mlh2->mld2q_qqic);
+
+ mc_qqi = (mc_man | 0x10) << (mc_exp + 3);
+ }
+
+ idev->mc_qi = mc_qqi * HZ;
+}
+
+static void mld_update_qri(struct inet6_dev *idev,
+ const struct mld2_query *mlh2)
+{
+ /* RFC3810, relevant sections:
+ * - 5.1.3. Maximum Response Code
+ * - 9.3. Query Response Interval
+ */
+ idev->mc_qri = msecs_to_jiffies(mldv2_mrc(mlh2));
+}
+
+static int mld_process_v1(struct inet6_dev *idev, struct mld_msg *mld,
+ unsigned long *max_delay)
+{
+ unsigned long mldv1_md;
+
+ /* Ignore v1 queries */
+ if (mld_in_v2_mode_only(idev))
+ return -EINVAL;
+
+ /* MLDv1 router present */
+ mldv1_md = ntohs(mld->mld_maxdelay);
+ *max_delay = max(msecs_to_jiffies(mldv1_md), 1UL);
+
+ mld_set_v1_mode(idev);
+
+ /* cancel MLDv2 report timer */
+ mld_gq_stop_timer(idev);
+ /* cancel the interface change timer */
+ mld_ifc_stop_timer(idev);
+ /* clear deleted report items */
+ mld_clear_delrec(idev);
+
+ return 0;
+}
+
+static int mld_process_v2(struct inet6_dev *idev, struct mld2_query *mld,
+ unsigned long *max_delay)
+{
+ /* hosts need to stay in MLDv1 mode, discard MLDv2 queries */
+ if (mld_in_v1_mode(idev))
+ return -EINVAL;
+
+ *max_delay = max(msecs_to_jiffies(mldv2_mrc(mld)), 1UL);
+
+ mld_update_qrv(idev, mld);
+ mld_update_qi(idev, mld);
+ mld_update_qri(idev, mld);
+
+ idev->mc_maxdelay = *max_delay;
+
+ return 0;
+}
+
/* called with rcu_read_lock() */
int igmp6_event_query(struct sk_buff *skb)
{
@@ -1108,7 +1292,7 @@ int igmp6_event_query(struct sk_buff *skb)
struct mld_msg *mld;
int group_type;
int mark = 0;
- int len;
+ int len, err;
if (!pskb_may_pull(skb, sizeof(struct in6_addr)))
return -EINVAL;
@@ -1122,7 +1306,6 @@ int igmp6_event_query(struct sk_buff *skb)
return -EINVAL;
idev = __in6_dev_get(skb->dev);
-
if (idev == NULL)
return 0;
@@ -1134,35 +1317,23 @@ int igmp6_event_query(struct sk_buff *skb)
!(group_type&IPV6_ADDR_MULTICAST))
return -EINVAL;
- if (len == 24) {
- int switchback;
- /* MLDv1 router present */
-
- /* Translate milliseconds to jiffies */
- max_delay = (ntohs(mld->mld_maxdelay)*HZ)/1000;
-
- switchback = (idev->mc_qrv + 1) * max_delay;
- idev->mc_v1_seen = jiffies + switchback;
-
- /* cancel the interface change timer */
- idev->mc_ifc_count = 0;
- if (del_timer(&idev->mc_ifc_timer))
- __in6_dev_put(idev);
- /* clear deleted report items */
- mld_clear_delrec(idev);
- } else if (len >= 28) {
+ if (len == MLD_V1_QUERY_LEN) {
+ err = mld_process_v1(idev, mld, &max_delay);
+ if (err < 0)
+ return err;
+ } else if (len >= MLD_V2_QUERY_LEN_MIN) {
int srcs_offset = sizeof(struct mld2_query) -
sizeof(struct icmp6hdr);
+
if (!pskb_may_pull(skb, srcs_offset))
return -EINVAL;
mlh2 = (struct mld2_query *)skb_transport_header(skb);
- max_delay = (MLDV2_MRC(ntohs(mlh2->mld2q_mrc))*HZ)/1000;
- if (!max_delay)
- max_delay = 1;
- idev->mc_maxdelay = max_delay;
- if (mlh2->mld2q_qrv)
- idev->mc_qrv = mlh2->mld2q_qrv;
+
+ err = mld_process_v2(idev, mlh2, &max_delay);
+ if (err < 0)
+ return err;
+
if (group_type == IPV6_ADDR_ANY) { /* general query */
if (mlh2->mld2q_nsrcs)
return -EINVAL; /* no sources allowed */
@@ -1376,6 +1547,7 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
if (!skb)
return NULL;
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1769,7 +1941,7 @@ static void igmp6_send(struct in6_addr *addr, struct net_device *dev, int type)
rcu_read_unlock();
return;
}
-
+ skb->priority = TC_PRIO_CONTROL;
skb_reserve(skb, hlen);
if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1827,7 +1999,7 @@ err_out:
static void mld_resend_report(struct inet6_dev *idev)
{
- if (MLD_V1_SEEN(idev)) {
+ if (mld_in_v1_mode(idev)) {
struct ifmcaddr6 *mcaddr;
read_lock_bh(&idev->lock);
for (mcaddr = idev->mc_list; mcaddr; mcaddr = mcaddr->next) {
@@ -1891,7 +2063,7 @@ static int ip6_mc_del1_src(struct ifmcaddr6 *pmc, int sfmode,
else
pmc->mca_sources = psf->sf_next;
if (psf->sf_oldin && !(pmc->mca_flags & MAF_NOREPORT) &&
- !MLD_V1_SEEN(idev)) {
+ !mld_in_v1_mode(idev)) {
psf->sf_crcount = idev->mc_qrv;
psf->sf_next = pmc->mca_tomb;
pmc->mca_tomb = psf;
@@ -2156,7 +2328,7 @@ static void igmp6_join_group(struct ifmcaddr6 *ma)
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
- delay = net_random() % IGMP6_UNSOLICITED_IVAL;
+ delay = net_random() % unsolicited_report_interval(ma->idev);
spin_lock_bh(&ma->mca_lock);
if (del_timer(&ma->mca_timer)) {
@@ -2191,7 +2363,7 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
static void igmp6_leave_group(struct ifmcaddr6 *ma)
{
- if (MLD_V1_SEEN(ma->idev)) {
+ if (mld_in_v1_mode(ma->idev)) {
if (ma->mca_flags & MAF_LAST_REPORTER)
igmp6_send(&ma->mca_addr, ma->idev->dev,
ICMPV6_MGM_REDUCTION);
@@ -2225,7 +2397,7 @@ static void mld_ifc_timer_expire(unsigned long data)
static void mld_ifc_event(struct inet6_dev *idev)
{
- if (MLD_V1_SEEN(idev))
+ if (mld_in_v1_mode(idev))
return;
idev->mc_ifc_count = idev->mc_qrv;
mld_ifc_start_timer(idev, 1);
@@ -2236,7 +2408,7 @@ static void igmp6_timer_handler(unsigned long data)
{
struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
- if (MLD_V1_SEEN(ma->idev))
+ if (mld_in_v1_mode(ma->idev))
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
else
mld_send_report(ma->idev, ma);
@@ -2276,14 +2448,9 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
- idev->mc_ifc_count = 0;
- if (del_timer(&idev->mc_ifc_timer))
- __in6_dev_put(idev);
- idev->mc_gq_running = 0;
- if (del_timer(&idev->mc_gq_timer))
- __in6_dev_put(idev);
- if (del_timer(&idev->mc_dad_timer))
- __in6_dev_put(idev);
+ mld_ifc_stop_timer(idev);
+ mld_gq_stop_timer(idev);
+ mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i=i->next)
igmp6_group_dropped(i);
@@ -2322,8 +2489,12 @@ void ipv6_mc_init_dev(struct inet6_dev *idev)
(unsigned long)idev);
setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
(unsigned long)idev);
+
idev->mc_qrv = MLD_QRV_DEFAULT;
- idev->mc_maxdelay = IGMP6_UNSOLICITED_IVAL;
+ idev->mc_qi = MLD_QI_DEFAULT;
+ idev->mc_qri = MLD_QRI_DEFAULT;
+
+ idev->mc_maxdelay = unsolicited_report_interval(idev);
idev->mc_v1_seen = 0;
write_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 24c03396e00..12179457b2c 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -372,14 +372,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
int tlen = dev->needed_tailroom;
struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
struct sk_buff *skb;
- int err;
- skb = sock_alloc_send_skb(sk,
- hlen + sizeof(struct ipv6hdr) + len + tlen,
- 1, &err);
+ skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
if (!skb) {
- ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb, err=%d\n",
- __func__, err);
+ ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
+ __func__);
return NULL;
}
@@ -389,6 +386,11 @@ static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
skb_reset_transport_header(skb);
+ /* Manually assign socket ownership as we avoid calling
+ * sock_alloc_send_pskb() to bypass wmem buffer limits
+ */
+ skb_set_owner_w(skb, sk);
+
return skb;
}
@@ -428,7 +430,6 @@ static void ndisc_send_skb(struct sk_buff *skb,
type = icmp6h->icmp6_type;
if (!dst) {
- struct sock *sk = net->ipv6.ndisc_sk;
struct flowi6 fl6;
icmpv6_flow_init(sk, &fl6, type, saddr, daddr, skb->dev->ifindex);
@@ -462,10 +463,10 @@ static void ndisc_send_skb(struct sk_buff *skb,
rcu_read_unlock();
}
-static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
- const struct in6_addr *daddr,
- const struct in6_addr *solicited_addr,
- bool router, bool solicited, bool override, bool inc_opt)
+void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
+ const struct in6_addr *daddr,
+ const struct in6_addr *solicited_addr,
+ bool router, bool solicited, bool override, bool inc_opt)
{
struct sk_buff *skb;
struct in6_addr tmpaddr;
@@ -663,9 +664,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
}
ndisc_send_ns(dev, neigh, target, target, saddr);
} else if ((probes -= neigh->parms->app_probes) < 0) {
-#ifdef CONFIG_ARPD
neigh_app_ns(neigh);
-#endif
} else {
addrconf_addr_solict_mult(target, &mcaddr);
ndisc_send_ns(dev, NULL, target, &mcaddr, saddr);
@@ -1369,8 +1368,11 @@ static void ndisc_redirect_rcv(struct sk_buff *skb)
if (!ndisc_parse_options(msg->opt, ndoptlen, &ndopts))
return;
- if (!ndopts.nd_opts_rh)
+ if (!ndopts.nd_opts_rh) {
+ ip6_redirect_no_header(skb, dev_net(skb->dev),
+ skb->dev->ifindex, 0);
return;
+ }
hdr = (u8 *)ndopts.nd_opts_rh;
hdr += 8;
@@ -1517,10 +1519,27 @@ static void pndisc_redo(struct sk_buff *skb)
kfree_skb(skb);
}
+static bool ndisc_suppress_frag_ndisc(struct sk_buff *skb)
+{
+ struct inet6_dev *idev = __in6_dev_get(skb->dev);
+
+ if (!idev)
+ return true;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED &&
+ idev->cnf.suppress_frag_ndisc) {
+ net_warn_ratelimited("Received fragmented ndisc packet. Carefully consider disabling suppress_frag_ndisc.\n");
+ return true;
+ }
+ return false;
+}
+
int ndisc_rcv(struct sk_buff *skb)
{
struct nd_msg *msg;
+ if (ndisc_suppress_frag_ndisc(skb))
+ return 0;
+
if (skb_linearize(skb))
return 0;
@@ -1576,7 +1595,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
switch (event) {
case NETDEV_CHANGEADDR:
neigh_changeaddr(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
idev = in6_dev_get(dev);
if (!idev)
break;
@@ -1586,7 +1605,7 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
break;
case NETDEV_DOWN:
neigh_ifdown(&nd_tbl, dev);
- fib6_run_gc(~0UL, net);
+ fib6_run_gc(0, net, false);
break;
case NETDEV_NOTIFY_PEERS:
ndisc_send_unsol_na(dev);
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 4433ab40e7d..a7f842b29b6 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -153,6 +153,19 @@ config IP6_NF_TARGET_REJECT
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_SYNPROXY
+ tristate "SYNPROXY target support"
+ depends on NF_CONNTRACK && NETFILTER_ADVANCED
+ select NETFILTER_SYNPROXY
+ select SYN_COOKIES
+ help
+ The SYNPROXY target allows you to intercept TCP connections and
+ establish them using syncookies before they are passed on to the
+ server. This allows to avoid conntrack and server resource usage
+ during SYN-flood attacks.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP6_NF_MANGLE
tristate "Packet mangling"
default m if NETFILTER_ADVANCED=n
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2d11fcc2cf3..2b53738f798 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -14,7 +14,7 @@ obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o
nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
# l3 independent conntrack
-obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
+obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
nf_nat_ipv6-y := nf_nat_l3proto_ipv6.o nf_nat_proto_icmpv6.o
obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
@@ -37,3 +37,4 @@ obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
obj-$(CONFIG_IP6_NF_TARGET_MASQUERADE) += ip6t_MASQUERADE.o
obj-$(CONFIG_IP6_NF_TARGET_NPT) += ip6t_NPT.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+obj-$(CONFIG_IP6_NF_TARGET_SYNPROXY) += ip6t_SYNPROXY.o
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c
index 47bff610751..3e4e92d5e15 100644
--- a/net/ipv6/netfilter/ip6t_MASQUERADE.c
+++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c
@@ -76,7 +76,7 @@ static int masq_device_event(struct notifier_block *this,
if (event == NETDEV_DOWN)
nf_ct_iterate_cleanup(net, device_cmp,
- (void *)(long)dev->ifindex);
+ (void *)(long)dev->ifindex, 0, 0);
return NOTIFY_DONE;
}
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 70f9abc0efe..56eef30ee5f 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -169,7 +169,25 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
nf_ct_attach(nskb, oldskb);
- ip6_local_out(nskb);
+#ifdef CONFIG_BRIDGE_NETFILTER
+ /* If we use ip6_local_out for bridged traffic, the MAC source on
+ * the RST will be ours, instead of the destination's. This confuses
+ * some routers/firewalls, and they drop the packet. So we need to
+ * build the eth header using the original destination's MAC as the
+ * source, and send the RST packet directly.
+ */
+ if (oldskb->nf_bridge) {
+ struct ethhdr *oeth = eth_hdr(oldskb);
+ nskb->dev = oldskb->nf_bridge->physindev;
+ nskb->protocol = htons(ETH_P_IPV6);
+ ip6h->payload_len = htons(sizeof(struct tcphdr));
+ if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
+ oeth->h_source, oeth->h_dest, nskb->len) < 0)
+ return;
+ dev_queue_xmit(nskb);
+ } else
+#endif
+ ip6_local_out(nskb);
}
static inline void
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
new file mode 100644
index 00000000000..19cfea8dbca
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ip6_checksum.h>
+#include <net/ip6_route.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+static struct ipv6hdr *
+synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
+ const struct in6_addr *daddr)
+{
+ struct ipv6hdr *iph;
+
+ skb_reset_network_header(skb);
+ iph = (struct ipv6hdr *)skb_put(skb, sizeof(*iph));
+ ip6_flow_hdr(iph, 0, 0);
+ iph->hop_limit = 64; //XXX
+ iph->nexthdr = IPPROTO_TCP;
+ iph->saddr = *saddr;
+ iph->daddr = *daddr;
+
+ return iph;
+}
+
+static void
+synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb,
+ struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
+ struct ipv6hdr *niph, struct tcphdr *nth,
+ unsigned int tcp_hdr_size)
+{
+ struct net *net = nf_ct_net((struct nf_conn *)nfct);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ nth->check = ~tcp_v6_check(tcp_hdr_size, &niph->saddr, &niph->daddr, 0);
+ nskb->ip_summed = CHECKSUM_PARTIAL;
+ nskb->csum_start = (unsigned char *)nth - nskb->head;
+ nskb->csum_offset = offsetof(struct tcphdr, check);
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_proto = IPPROTO_TCP;
+ fl6.saddr = niph->saddr;
+ fl6.daddr = niph->daddr;
+ fl6.fl6_sport = nth->source;
+ fl6.fl6_dport = nth->dest;
+ security_skb_classify_flow((struct sk_buff *)skb, flowi6_to_flowi(&fl6));
+ dst = ip6_route_output(net, NULL, &fl6);
+ if (dst == NULL || dst->error) {
+ dst_release(dst);
+ goto free_nskb;
+ }
+ dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
+ if (IS_ERR(dst))
+ goto free_nskb;
+
+ skb_dst_set(nskb, dst);
+
+ if (nfct) {
+ nskb->nfct = nfct;
+ nskb->nfctinfo = ctinfo;
+ nf_conntrack_get(nfct);
+ }
+
+ ip6_local_out(nskb);
+ return;
+
+free_nskb:
+ kfree_skb(nskb);
+}
+
+static void
+synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+ u16 mss = opts->mss;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(__cookie_v6_init_sequence(iph, th, &mss));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN | TCP_FLAG_ACK;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = 0;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_syn(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts, u32 recv_seq)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(recv_seq - 1);
+ /* ack_seq is used to relay our ISN to the synproxy hook to initialize
+ * sequence number translation once a connection tracking entry exists.
+ */
+ nth->ack_seq = htonl(ntohl(th->ack_seq) - 1);
+ tcp_flag_word(nth) = TCP_FLAG_SYN;
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ tcp_flag_word(nth) |= TCP_FLAG_ECE | TCP_FLAG_CWR;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = th->window;
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
+ niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_server_ack(const struct synproxy_net *snet,
+ const struct ip_ct_tcp *state,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->daddr, &iph->saddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->dest;
+ nth->dest = th->source;
+ nth->seq = htonl(ntohl(th->ack_seq));
+ nth->ack_seq = htonl(ntohl(th->seq) + 1);
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = htons(state->seen[IP_CT_DIR_ORIGINAL].td_maxwin);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static void
+synproxy_send_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ const struct synproxy_options *opts)
+{
+ struct sk_buff *nskb;
+ struct ipv6hdr *iph, *niph;
+ struct tcphdr *nth;
+ unsigned int tcp_hdr_size;
+
+ iph = ipv6_hdr(skb);
+
+ tcp_hdr_size = sizeof(*nth) + synproxy_options_size(opts);
+ nskb = alloc_skb(sizeof(*niph) + tcp_hdr_size + MAX_TCP_HEADER,
+ GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+ skb_reserve(nskb, MAX_TCP_HEADER);
+
+ niph = synproxy_build_ip(nskb, &iph->saddr, &iph->daddr);
+
+ skb_reset_transport_header(nskb);
+ nth = (struct tcphdr *)skb_put(nskb, tcp_hdr_size);
+ nth->source = th->source;
+ nth->dest = th->dest;
+ nth->seq = htonl(ntohl(th->seq) + 1);
+ nth->ack_seq = th->ack_seq;
+ tcp_flag_word(nth) = TCP_FLAG_ACK;
+ nth->doff = tcp_hdr_size / 4;
+ nth->window = ntohs(htons(th->window) >> opts->wscale);
+ nth->check = 0;
+ nth->urg_ptr = 0;
+
+ synproxy_build_options(nth, opts);
+
+ synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
+}
+
+static bool
+synproxy_recv_client_ack(const struct synproxy_net *snet,
+ const struct sk_buff *skb, const struct tcphdr *th,
+ struct synproxy_options *opts, u32 recv_seq)
+{
+ int mss;
+
+ mss = __cookie_v6_check(ipv6_hdr(skb), th, ntohl(th->ack_seq) - 1);
+ if (mss == 0) {
+ this_cpu_inc(snet->stats->cookie_invalid);
+ return false;
+ }
+
+ this_cpu_inc(snet->stats->cookie_valid);
+ opts->mss = mss;
+
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_check_timestamp_cookie(opts);
+
+ synproxy_send_server_syn(snet, skb, th, opts, recv_seq);
+ return true;
+}
+
+static unsigned int
+synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_synproxy_info *info = par->targinfo;
+ struct synproxy_net *snet = synproxy_pernet(dev_net(par->in));
+ struct synproxy_options opts = {};
+ struct tcphdr *th, _th;
+
+ if (nf_ip6_checksum(skb, par->hooknum, par->thoff, IPPROTO_TCP))
+ return NF_DROP;
+
+ th = skb_header_pointer(skb, par->thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ synproxy_parse_options(skb, par->thoff, th, &opts);
+
+ if (th->syn && !(th->ack || th->fin || th->rst)) {
+ /* Initial SYN from client */
+ this_cpu_inc(snet->stats->syn_received);
+
+ if (th->ece && th->cwr)
+ opts.options |= XT_SYNPROXY_OPT_ECN;
+
+ opts.options &= info->options;
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy_init_timestamp_cookie(info, &opts);
+ else
+ opts.options &= ~(XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM |
+ XT_SYNPROXY_OPT_ECN);
+
+ synproxy_send_client_synack(skb, th, &opts);
+ return NF_DROP;
+
+ } else if (th->ack && !(th->fin || th->rst || th->syn)) {
+ /* ACK from client */
+ synproxy_recv_client_ack(snet, skb, th, &opts, ntohl(th->seq));
+ return NF_DROP;
+ }
+
+ return XT_CONTINUE;
+}
+
+static unsigned int ipv6_synproxy_hook(unsigned int hooknum,
+ struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct synproxy_net *snet = synproxy_pernet(dev_net(in ? : out));
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ struct nf_conn_synproxy *synproxy;
+ struct synproxy_options opts = {};
+ const struct ip_ct_tcp *state;
+ struct tcphdr *th, _th;
+ __be16 frag_off;
+ u8 nexthdr;
+ int thoff;
+
+ ct = nf_ct_get(skb, &ctinfo);
+ if (ct == NULL)
+ return NF_ACCEPT;
+
+ synproxy = nfct_synproxy(ct);
+ if (synproxy == NULL)
+ return NF_ACCEPT;
+
+ if (nf_is_loopback_packet(skb))
+ return NF_ACCEPT;
+
+ nexthdr = ipv6_hdr(skb)->nexthdr;
+ thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
+ &frag_off);
+ if (thoff < 0)
+ return NF_ACCEPT;
+
+ th = skb_header_pointer(skb, thoff, sizeof(_th), &_th);
+ if (th == NULL)
+ return NF_DROP;
+
+ state = &ct->proto.tcp;
+ switch (state->state) {
+ case TCP_CONNTRACK_CLOSE:
+ if (th->rst && !test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn -
+ ntohl(th->seq) + 1);
+ break;
+ }
+
+ if (!th->syn || th->ack ||
+ CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ break;
+
+ /* Reopened connection - reset the sequence number and timestamp
+ * adjustments, they will get initialized once the connection is
+ * reestablished.
+ */
+ nf_ct_seqadj_init(ct, ctinfo, 0);
+ synproxy->tsoff = 0;
+ this_cpu_inc(snet->stats->conn_reopened);
+
+ /* fall through */
+ case TCP_CONNTRACK_SYN_SENT:
+ synproxy_parse_options(skb, thoff, th, &opts);
+
+ if (!th->syn && th->ack &&
+ CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
+ /* Keep-Alives are sent with SEG.SEQ = SND.NXT-1,
+ * therefore we need to add 1 to make the SYN sequence
+ * number match the one of first SYN.
+ */
+ if (synproxy_recv_client_ack(snet, skb, th, &opts,
+ ntohl(th->seq) + 1))
+ this_cpu_inc(snet->stats->cookie_retrans);
+
+ return NF_DROP;
+ }
+
+ synproxy->isn = ntohl(th->ack_seq);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->its = opts.tsecr;
+ break;
+ case TCP_CONNTRACK_SYN_RECV:
+ if (!th->syn || !th->ack)
+ break;
+
+ synproxy_parse_options(skb, thoff, th, &opts);
+ if (opts.options & XT_SYNPROXY_OPT_TIMESTAMP)
+ synproxy->tsoff = opts.tsval - synproxy->its;
+
+ opts.options &= ~(XT_SYNPROXY_OPT_MSS |
+ XT_SYNPROXY_OPT_WSCALE |
+ XT_SYNPROXY_OPT_SACK_PERM);
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_server_ack(snet, state, skb, th, &opts);
+
+ nf_ct_seqadj_init(ct, ctinfo, synproxy->isn - ntohl(th->seq));
+
+ swap(opts.tsval, opts.tsecr);
+ synproxy_send_client_ack(snet, skb, th, &opts);
+
+ consume_skb(skb);
+ return NF_STOLEN;
+ default:
+ break;
+ }
+
+ synproxy_tstamp_adjust(skb, thoff, th, ct, ctinfo, synproxy);
+ return NF_ACCEPT;
+}
+
+static int synproxy_tg6_check(const struct xt_tgchk_param *par)
+{
+ const struct ip6t_entry *e = par->entryinfo;
+
+ if (!(e->ipv6.flags & IP6T_F_PROTO) ||
+ e->ipv6.proto != IPPROTO_TCP ||
+ e->ipv6.invflags & XT_INV_PROTO)
+ return -EINVAL;
+
+ return nf_ct_l3proto_try_module_get(par->family);
+}
+
+static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
+{
+ nf_ct_l3proto_module_put(par->family);
+}
+
+static struct xt_target synproxy_tg6_reg __read_mostly = {
+ .name = "SYNPROXY",
+ .family = NFPROTO_IPV6,
+ .target = synproxy_tg6,
+ .targetsize = sizeof(struct xt_synproxy_info),
+ .checkentry = synproxy_tg6_check,
+ .destroy = synproxy_tg6_destroy,
+ .me = THIS_MODULE,
+};
+
+static struct nf_hook_ops ipv6_synproxy_ops[] __read_mostly = {
+ {
+ .hook = ipv6_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+ {
+ .hook = ipv6_synproxy_hook,
+ .owner = THIS_MODULE,
+ .pf = NFPROTO_IPV6,
+ .hooknum = NF_INET_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM - 1,
+ },
+};
+
+static int __init synproxy_tg6_init(void)
+{
+ int err;
+
+ err = nf_register_hooks(ipv6_synproxy_ops,
+ ARRAY_SIZE(ipv6_synproxy_ops));
+ if (err < 0)
+ goto err1;
+
+ err = xt_register_target(&synproxy_tg6_reg);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops));
+err1:
+ return err;
+}
+
+static void __exit synproxy_tg6_exit(void)
+{
+ xt_unregister_target(&synproxy_tg6_reg);
+ nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops));
+}
+
+module_init(synproxy_tg6_init);
+module_exit(synproxy_tg6_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/ipv6/netfilter/ip6table_filter.c b/net/ipv6/netfilter/ip6table_filter.c
index beb5777d204..29b44b14c5e 100644
--- a/net/ipv6/netfilter/ip6table_filter.c
+++ b/net/ipv6/netfilter/ip6table_filter.c
@@ -61,7 +61,7 @@ static int __net_init ip6table_filter_net_init(struct net *net)
net->ipv6.ip6table_filter =
ip6t_register_table(net, &packet_filter, repl);
kfree(repl);
- return PTR_RET(net->ipv6.ip6table_filter);
+ return PTR_ERR_OR_ZERO(net->ipv6.ip6table_filter);
}
static void __net_exit ip6table_filter_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_mangle.c b/net/ipv6/netfilter/ip6table_mangle.c
index e075399d8b7..c705907ae6a 100644
--- a/net/ipv6/netfilter/ip6table_mangle.c
+++ b/net/ipv6/netfilter/ip6table_mangle.c
@@ -101,7 +101,7 @@ static int __net_init ip6table_mangle_net_init(struct net *net)
net->ipv6.ip6table_mangle =
ip6t_register_table(net, &packet_mangler, repl);
kfree(repl);
- return PTR_RET(net->ipv6.ip6table_mangle);
+ return PTR_ERR_OR_ZERO(net->ipv6.ip6table_mangle);
}
static void __net_exit ip6table_mangle_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_nat.c b/net/ipv6/netfilter/ip6table_nat.c
index 6383f90efda..9b076d2d3a7 100644
--- a/net/ipv6/netfilter/ip6table_nat.c
+++ b/net/ipv6/netfilter/ip6table_nat.c
@@ -293,7 +293,7 @@ static int __net_init ip6table_nat_net_init(struct net *net)
return -ENOMEM;
net->ipv6.ip6table_nat = ip6t_register_table(net, &nf_nat_ipv6_table, repl);
kfree(repl);
- return PTR_RET(net->ipv6.ip6table_nat);
+ return PTR_ERR_OR_ZERO(net->ipv6.ip6table_nat);
}
static void __net_exit ip6table_nat_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index 60d1bddff7a..9a626d86720 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -40,7 +40,7 @@ static int __net_init ip6table_raw_net_init(struct net *net)
net->ipv6.ip6table_raw =
ip6t_register_table(net, &packet_raw, repl);
kfree(repl);
- return PTR_RET(net->ipv6.ip6table_raw);
+ return PTR_ERR_OR_ZERO(net->ipv6.ip6table_raw);
}
static void __net_exit ip6table_raw_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/ip6table_security.c b/net/ipv6/netfilter/ip6table_security.c
index db155351339..ce88d1d7e52 100644
--- a/net/ipv6/netfilter/ip6table_security.c
+++ b/net/ipv6/netfilter/ip6table_security.c
@@ -58,7 +58,7 @@ static int __net_init ip6table_security_net_init(struct net *net)
net->ipv6.ip6table_security =
ip6t_register_table(net, &security_table, repl);
kfree(repl);
- return PTR_RET(net->ipv6.ip6table_security);
+ return PTR_ERR_OR_ZERO(net->ipv6.ip6table_security);
}
static void __net_exit ip6table_security_net_exit(struct net *net)
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c9b6a6e6a1e..d6e4dd8b58d 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -28,6 +28,7 @@
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
@@ -158,11 +159,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
- typeof(nf_nat_seq_adjust_hook) seq_adjust;
-
- seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
- if (!seq_adjust ||
- !seq_adjust(skb, ct, ctinfo, protoff)) {
+ if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index ab92a3673fb..827f795209c 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -5,6 +5,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
+#include <net/addrconf.h>
void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
{
@@ -75,3 +76,50 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
return offset;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
+
+#if IS_ENABLED(CONFIG_IPV6)
+int ip6_dst_hoplimit(struct dst_entry *dst)
+{
+ int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+ if (hoplimit == 0) {
+ struct net_device *dev = dst->dev;
+ struct inet6_dev *idev;
+
+ rcu_read_lock();
+ idev = __in6_dev_get(dev);
+ if (idev)
+ hoplimit = idev->cnf.hop_limit;
+ else
+ hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
+ rcu_read_unlock();
+ }
+ return hoplimit;
+}
+EXPORT_SYMBOL(ip6_dst_hoplimit);
+#endif
+
+int __ip6_local_out(struct sk_buff *skb)
+{
+ int len;
+
+ len = skb->len - sizeof(struct ipv6hdr);
+ if (len > IPV6_MAXPLEN)
+ len = 0;
+ ipv6_hdr(skb)->payload_len = htons(len);
+
+ return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
+ skb_dst(skb)->dev, dst_output);
+}
+EXPORT_SYMBOL_GPL(__ip6_local_out);
+
+int ip6_local_out(struct sk_buff *skb)
+{
+ int err;
+
+ err = __ip6_local_out(skb);
+ if (likely(err == 1))
+ err = dst_output(skb);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(ip6_local_out);
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 51c3285b5d9..091d066a57b 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -91,6 +91,10 @@ static const struct snmp_mib snmp6_ipstats_list[] = {
SNMP_MIB_ITEM("Ip6InBcastOctets", IPSTATS_MIB_INBCASTOCTETS),
SNMP_MIB_ITEM("Ip6OutBcastOctets", IPSTATS_MIB_OUTBCASTOCTETS),
/* IPSTATS_MIB_CSUMERRORS is not relevant in IPv6 (no checksum) */
+ SNMP_MIB_ITEM("Ip6InNoECTPkts", IPSTATS_MIB_NOECTPKTS),
+ SNMP_MIB_ITEM("Ip6InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
+ SNMP_MIB_ITEM("Ip6InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
+ SNMP_MIB_ITEM("Ip6InCEPkts", IPSTATS_MIB_CEPKTS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index c45f7a5c36e..58916bbb172 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -63,6 +63,8 @@
#include <linux/seq_file.h>
#include <linux/export.h>
+#define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */
+
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
@@ -108,11 +110,14 @@ found:
*/
static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
{
- struct icmp6hdr *_hdr;
+ struct icmp6hdr _hdr;
const struct icmp6hdr *hdr;
+ /* We require only the four bytes of the ICMPv6 header, not any
+ * additional bytes of message body in "struct icmp6hdr".
+ */
hdr = skb_header_pointer(skb, skb_transport_offset(skb),
- sizeof(_hdr), &_hdr);
+ ICMPV6_HDRLEN, &_hdr);
if (hdr) {
const __u32 *data = &raw6_sk(sk)->filter.data[0];
unsigned int type = hdr->icmp6_type;
@@ -628,6 +633,7 @@ static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
goto error;
skb_reserve(skb, hlen);
+ skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 790d9f4b8b0..1aeb473b2cc 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -490,6 +490,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
ipv6_hdr(head)->payload_len = htons(payload_len);
ipv6_change_dsfield(ipv6_hdr(head), 0xff, ecn);
IP6CB(head)->nhoff = nhoff;
+ IP6CB(head)->flags |= IP6SKB_FRAGMENTED;
/* Yes, and fold redundant checksum back. 8) */
if (head->ip_summed == CHECKSUM_COMPLETE)
@@ -524,6 +525,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
struct net *net = dev_net(skb_dst(skb)->dev);
int evicted;
+ if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
+ goto fail_hdr;
+
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */
@@ -544,6 +548,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
+ IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
return 1;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a8c891aa246..c979dd96d82 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -283,9 +283,8 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net,
memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers);
- rt->rt6i_genid = rt_genid(net);
+ rt->rt6i_genid = rt_genid_ipv6(net);
INIT_LIST_HEAD(&rt->rt6i_siblings);
- rt->rt6i_nsiblings = 0;
}
return rt;
}
@@ -1062,7 +1061,7 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
* DST_OBSOLETE_FORCE_CHK which forces validation calls down
* into this function always.
*/
- if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev)))
+ if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
return NULL;
if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie))
@@ -1157,6 +1156,77 @@ void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
}
EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
+/* Handle redirects */
+struct ip6rd_flowi {
+ struct flowi6 fl6;
+ struct in6_addr gateway;
+};
+
+static struct rt6_info *__ip6_route_redirect(struct net *net,
+ struct fib6_table *table,
+ struct flowi6 *fl6,
+ int flags)
+{
+ struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
+ struct rt6_info *rt;
+ struct fib6_node *fn;
+
+ /* Get the "current" route for this destination and
+ * check if the redirect has come from approriate router.
+ *
+ * RFC 4861 specifies that redirects should only be
+ * accepted if they come from the nexthop to the target.
+ * Due to the way the routes are chosen, this notion
+ * is a bit fuzzy and one might need to check all possible
+ * routes.
+ */
+
+ read_lock_bh(&table->tb6_lock);
+ fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
+restart:
+ for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
+ if (rt6_check_expired(rt))
+ continue;
+ if (rt->dst.error)
+ break;
+ if (!(rt->rt6i_flags & RTF_GATEWAY))
+ continue;
+ if (fl6->flowi6_oif != rt->dst.dev->ifindex)
+ continue;
+ if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
+ continue;
+ break;
+ }
+
+ if (!rt)
+ rt = net->ipv6.ip6_null_entry;
+ else if (rt->dst.error) {
+ rt = net->ipv6.ip6_null_entry;
+ goto out;
+ }
+ BACKTRACK(net, &fl6->saddr);
+out:
+ dst_hold(&rt->dst);
+
+ read_unlock_bh(&table->tb6_lock);
+
+ return rt;
+};
+
+static struct dst_entry *ip6_route_redirect(struct net *net,
+ const struct flowi6 *fl6,
+ const struct in6_addr *gateway)
+{
+ int flags = RT6_LOOKUP_F_HAS_SADDR;
+ struct ip6rd_flowi rdfl;
+
+ rdfl.fl6 = *fl6;
+ rdfl.gateway = *gateway;
+
+ return fib6_rule_lookup(net, &rdfl.fl6,
+ flags, __ip6_route_redirect);
+}
+
void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
{
const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
@@ -1171,13 +1241,32 @@ void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
fl6.saddr = iph->saddr;
fl6.flowlabel = ip6_flowinfo(iph);
- dst = ip6_route_output(net, NULL, &fl6);
- if (!dst->error)
- rt6_do_redirect(dst, NULL, skb);
+ dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
+ rt6_do_redirect(dst, NULL, skb);
dst_release(dst);
}
EXPORT_SYMBOL_GPL(ip6_redirect);
+void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
+ u32 mark)
+{
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+ const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
+ struct dst_entry *dst;
+ struct flowi6 fl6;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.flowi6_mark = mark;
+ fl6.flowi6_flags = 0;
+ fl6.daddr = msg->dest;
+ fl6.saddr = iph->daddr;
+
+ dst = ip6_route_redirect(net, &fl6, &iph->saddr);
+ rt6_do_redirect(dst, NULL, skb);
+ dst_release(dst);
+}
+
void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
{
ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
@@ -1311,7 +1400,6 @@ static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
static int ip6_dst_gc(struct dst_ops *ops)
{
- unsigned long now = jiffies;
struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
@@ -1321,13 +1409,12 @@ static int ip6_dst_gc(struct dst_ops *ops)
int entries;
entries = dst_entries_get_fast(ops);
- if (time_after(rt_last_gc + rt_min_interval, now) &&
+ if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
entries <= rt_max_size)
goto out;
net->ipv6.ip6_rt_gc_expire++;
- fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net);
- net->ipv6.ip6_rt_last_gc = now;
+ fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, entries > rt_max_size);
entries = dst_entries_get_slow(ops);
if (entries < ops->gc_thresh)
net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
@@ -1336,25 +1423,6 @@ out:
return entries > rt_max_size;
}
-int ip6_dst_hoplimit(struct dst_entry *dst)
-{
- int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
- if (hoplimit == 0) {
- struct net_device *dev = dst->dev;
- struct inet6_dev *idev;
-
- rcu_read_lock();
- idev = __in6_dev_get(dev);
- if (idev)
- hoplimit = idev->cnf.hop_limit;
- else
- hoplimit = dev_net(dev)->ipv6.devconf_all->hop_limit;
- rcu_read_unlock();
- }
- return hoplimit;
-}
-EXPORT_SYMBOL(ip6_dst_hoplimit);
-
/*
*
*/
@@ -2827,7 +2895,7 @@ int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
net = (struct net *)ctl->extra1;
delay = net->ipv6.sysctl.flush_delay;
proc_dointvec(ctl, write, buffer, lenp, ppos);
- fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
+ fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
return 0;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index a3437a4cd07..7ee5cb96db3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -581,12 +581,10 @@ static int ipip6_rcv(struct sk_buff *skb)
tunnel->parms.iph.protocol != 0)
goto out;
- secpath_reset(skb);
skb->mac_header = skb->network_header;
skb_reset_network_header(skb);
IPCB(skb)->flags = 0;
skb->protocol = htons(ETH_P_IPV6);
- skb->pkt_type = PACKET_HOST;
if (tunnel->dev->priv_flags & IFF_ISATAP) {
if (!isatap_chksrc(skb, iph, tunnel)) {
@@ -603,7 +601,7 @@ static int ipip6_rcv(struct sk_buff *skb)
}
}
- __skb_tunnel_rx(skb, tunnel->dev);
+ __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
@@ -621,8 +619,6 @@ static int ipip6_rcv(struct sk_buff *skb)
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
- if (tunnel->net != dev_net(tunnel->dev))
- skb_scrub_packet(skb);
netif_rx(skb);
return 0;
@@ -645,11 +641,7 @@ static int ipip_rcv(struct sk_buff *skb)
const struct iphdr *iph;
struct ip_tunnel *tunnel;
- if (iptunnel_pull_header(skb, 0, tpi.proto))
- goto drop;
-
iph = ip_hdr(skb);
-
tunnel = ipip6_tunnel_lookup(dev_net(skb->dev), skb->dev,
iph->saddr, iph->daddr);
if (tunnel != NULL) {
@@ -659,6 +651,8 @@ static int ipip_rcv(struct sk_buff *skb)
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
+ if (iptunnel_pull_header(skb, 0, tpi.proto))
+ goto drop;
return ip_tunnel_rcv(tunnel, skb, &tpi, log_ecn_error);
}
@@ -860,9 +854,6 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tunnel->err_count = 0;
}
- if (tunnel->net != dev_net(dev))
- skb_scrub_packet(skb);
-
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
@@ -888,8 +879,13 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
ttl = iph6->hop_limit;
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
- err = iptunnel_xmit(dev_net(dev), rt, skb, fl4.saddr, fl4.daddr,
- IPPROTO_IPV6, tos, ttl, df);
+ if (likely(!skb->encapsulation)) {
+ skb_reset_inner_headers(skb);
+ skb->encapsulation = 1;
+ }
+
+ err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
+ ttl, df, !net_eq(tunnel->net, dev_net(dev)));
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
@@ -1589,7 +1585,7 @@ static void __net_exit sit_destroy_tunnels(struct sit_net *sitn, struct list_hea
/* If dev is in the same netns, it has already
* been added to the list by the previous loop.
*/
- if (dev_net(t->dev) != net)
+ if (!net_eq(dev_net(t->dev), net))
unregister_netdevice_queue(t->dev,
head);
t = rtnl_dereference(t->next);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index d5dda20bd71..bf63ac8a49b 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -112,32 +112,38 @@ static __u32 check_tcp_syn_cookie(__u32 cookie, const struct in6_addr *saddr,
& COOKIEMASK;
}
-__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
+u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
+ const struct tcphdr *th, __u16 *mssp)
{
- const struct ipv6hdr *iph = ipv6_hdr(skb);
- const struct tcphdr *th = tcp_hdr(skb);
int mssind;
const __u16 mss = *mssp;
- tcp_synq_overflow(sk);
-
for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--)
if (mss >= msstab[mssind])
break;
*mssp = msstab[mssind];
- NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
-
return secure_tcp_syn_cookie(&iph->saddr, &iph->daddr, th->source,
th->dest, ntohl(th->seq),
jiffies / (HZ * 60), mssind);
}
+EXPORT_SYMBOL_GPL(__cookie_v6_init_sequence);
-static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
+__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb, __u16 *mssp)
{
const struct ipv6hdr *iph = ipv6_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
+
+ tcp_synq_overflow(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
+
+ return __cookie_v6_init_sequence(iph, th, mssp);
+}
+
+int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
+ __u32 cookie)
+{
__u32 seq = ntohl(th->seq) - 1;
__u32 mssind = check_tcp_syn_cookie(cookie, &iph->saddr, &iph->daddr,
th->source, th->dest, seq,
@@ -145,6 +151,7 @@ static inline int cookie_check(const struct sk_buff *skb, __u32 cookie)
return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0;
}
+EXPORT_SYMBOL_GPL(__cookie_v6_check);
struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
{
@@ -167,7 +174,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
goto out;
if (tcp_synq_no_recent_overflow(sk) ||
- (mss = cookie_check(skb, cookie)) == 0) {
+ (mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie)) == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6e1649d5853..5c71501fc91 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -963,7 +963,8 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (!ipv6_unicast_destination(skb))
goto drop;
- if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
+ if ((sysctl_tcp_syncookies == 2 ||
+ inet_csk_reqsk_queue_is_full(sk)) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
goto drop;
@@ -1237,8 +1238,6 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
- tcp_synack_rtt_meas(newsk, req);
- newtp->total_retrans = req->num_retrans;
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
@@ -1361,8 +1360,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
}
}
- if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len))
- goto reset;
+ tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
if (opt_skb)
goto ipv6_pktoptions;
return 0;
@@ -1427,7 +1425,7 @@ ipv6_pktoptions:
if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
if (np->rxopt.bits.rxtclass)
- np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(skb));
+ np->rcv_tclass = ipv6_get_dsfield(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb)) {
skb_set_owner_r(opt_skb, sk);
opt_skb = xchg(&np->pktoptions, opt_skb);
@@ -1732,7 +1730,7 @@ static void get_openreq6(struct seq_file *seq,
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3],
@@ -1783,7 +1781,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
- "%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %lu %lu %u %u %d\n",
+ "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
@@ -1926,6 +1924,7 @@ struct proto tcpv6_prot = {
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
+ .stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 5d1b8d7ac99..60559511bd9 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -21,26 +21,25 @@ static int udp6_ufo_send_check(struct sk_buff *skb)
const struct ipv6hdr *ipv6h;
struct udphdr *uh;
- /* UDP Tunnel offload on ipv6 is not yet supported. */
- if (skb->encapsulation)
- return -EINVAL;
-
if (!pskb_may_pull(skb, sizeof(*uh)))
return -EINVAL;
- ipv6h = ipv6_hdr(skb);
- uh = udp_hdr(skb);
+ if (likely(!skb->encapsulation)) {
+ ipv6h = ipv6_hdr(skb);
+ uh = udp_hdr(skb);
+
+ uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
+ IPPROTO_UDP, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ }
- uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
- IPPROTO_UDP, 0);
- skb->csum_start = skb_transport_header(skb) - skb->head;
- skb->csum_offset = offsetof(struct udphdr, check);
- skb->ip_summed = CHECKSUM_PARTIAL;
return 0;
}
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
- netdev_features_t features)
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
@@ -75,47 +74,51 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
goto out;
}
- /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
- * do checksum of UDP packets sent as multiple IP fragments.
- */
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
- skb->ip_summed = CHECKSUM_NONE;
-
- /* Check if there is enough headroom to insert fragment header. */
- tnl_hlen = skb_tnl_header_len(skb);
- if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
- if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
- goto out;
+ if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
+ segs = skb_udp_tunnel_segment(skb, features);
+ else {
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+ offset = skb_checksum_start_offset(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb_headroom(skb) < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+
+ /* Find the unfragmentable header and shift it left by frag_hdr_sz
+ * bytes to insert fragment header.
+ */
+ unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ nexthdr = *prevhdr;
+ *prevhdr = NEXTHDR_FRAGMENT;
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+ skb->mac_header -= frag_hdr_sz;
+ skb->network_header -= frag_hdr_sz;
+
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+ ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+ */
+ segs = skb_segment(skb, features);
}
- /* Find the unfragmentable header and shift it left by frag_hdr_sz
- * bytes to insert fragment header.
- */
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- nexthdr = *prevhdr;
- *prevhdr = NEXTHDR_FRAGMENT;
- unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
- unfrag_ip6hlen + tnl_hlen;
- packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
- memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
-
- SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
- skb->mac_header -= frag_hdr_sz;
- skb->network_header -= frag_hdr_sz;
-
- fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
- fptr->nexthdr = nexthdr;
- fptr->reserved = 0;
- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
-
- /* Fragment the skb. ipv6 header and the remaining fields of the
- * fragment header are updated in ipv6_gso_segment()
- */
- segs = skb_segment(skb, features);
-
out:
return segs;
}
diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c
index 8755a3079d0..6cd625e3770 100644
--- a/net/ipv6/xfrm6_output.c
+++ b/net/ipv6/xfrm6_output.c
@@ -34,8 +34,10 @@ static int xfrm6_local_dontfrag(struct sk_buff *skb)
struct sock *sk = skb->sk;
if (sk) {
- proto = sk->sk_protocol;
+ if (sk->sk_family != AF_INET6)
+ return 0;
+ proto = sk->sk_protocol;
if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
return inet6_sk(sk)->dontfrag;
}
@@ -54,13 +56,15 @@ static void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu)
ipv6_local_rxpmtu(sk, &fl6, mtu);
}
-static void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
+void xfrm6_local_error(struct sk_buff *skb, u32 mtu)
{
struct flowi6 fl6;
+ const struct ipv6hdr *hdr;
struct sock *sk = skb->sk;
+ hdr = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
fl6.fl6_dport = inet_sk(sk)->inet_dport;
- fl6.daddr = ipv6_hdr(skb)->daddr;
+ fl6.daddr = hdr->daddr;
ipv6_local_error(sk, EMSGSIZE, &fl6, mtu);
}
@@ -80,7 +84,7 @@ static int xfrm6_tunnel_check_size(struct sk_buff *skb)
if (xfrm6_local_dontfrag(skb))
xfrm6_local_rxpmtu(skb, mtu);
else if (skb->sk)
- xfrm6_local_error(skb, mtu);
+ xfrm_local_error(skb, mtu);
else
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ret = -EMSGSIZE;
@@ -136,13 +140,18 @@ static int __xfrm6_output(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct xfrm_state *x = dst->xfrm;
- int mtu = ip6_skb_dst_mtu(skb);
+ int mtu;
+
+ if (skb->protocol == htons(ETH_P_IPV6))
+ mtu = ip6_skb_dst_mtu(skb);
+ else
+ mtu = dst_mtu(skb_dst(skb));
if (skb->len > mtu && xfrm6_local_dontfrag(skb)) {
xfrm6_local_rxpmtu(skb, mtu);
return -EMSGSIZE;
} else if (!skb->local_df && skb->len > mtu && skb->sk) {
- xfrm6_local_error(skb, mtu);
+ xfrm_local_error(skb, mtu);
return -EMSGSIZE;
}
diff --git a/net/ipv6/xfrm6_state.c b/net/ipv6/xfrm6_state.c
index d8c70b8efc2..3fc970135fc 100644
--- a/net/ipv6/xfrm6_state.c
+++ b/net/ipv6/xfrm6_state.c
@@ -183,6 +183,7 @@ static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.extract_input = xfrm6_extract_input,
.extract_output = xfrm6_extract_output,
.transport_finish = xfrm6_transport_finish,
+ .local_error = xfrm6_local_error,
};
int __init xfrm6_state_init(void)
diff --git a/net/ipx/ipx_proc.c b/net/ipx/ipx_proc.c
index 65e8833a251..e15c16a517e 100644
--- a/net/ipx/ipx_proc.c
+++ b/net/ipx/ipx_proc.c
@@ -213,7 +213,7 @@ static int ipx_seq_socket_show(struct seq_file *seq, void *v)
ntohs(ipxs->dest_addr.sock));
}
- seq_printf(seq, "%08X %08X %02X %03d\n",
+ seq_printf(seq, "%08X %08X %02X %03u\n",
sk_wmem_alloc_get(s),
sk_rmem_alloc_get(s),
s->sk_state,
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index ae43c62f904..85372cfa7b9 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -75,7 +75,7 @@ static pi_minor_info_t pi_minor_call_table[] = {
{ NULL, 0 }, /* 0x00 */
{ irttp_param_max_sdu_size, PV_INTEGER | PV_BIG_ENDIAN } /* 0x01 */
};
-static pi_major_info_t pi_major_call_table[] = {{ pi_minor_call_table, 2 }};
+static pi_major_info_t pi_major_call_table[] = { { pi_minor_call_table, 2 } };
static pi_param_info_t param_info = { pi_major_call_table, 1, 0x0f, 4 };
/************************ GLOBAL PROCEDURES ************************/
@@ -205,7 +205,7 @@ static void irttp_todo_expired(unsigned long data)
*/
static void irttp_flush_queues(struct tsap_cb *self)
{
- struct sk_buff* skb;
+ struct sk_buff *skb;
IRDA_DEBUG(4, "%s()\n", __func__);
@@ -400,7 +400,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
/* The IrLMP spec (IrLMP 1.1 p10) says that we have the right to
* use only 0x01-0x6F. Of course, we can use LSAP_ANY as well.
* JeanII */
- if((stsap_sel != LSAP_ANY) &&
+ if ((stsap_sel != LSAP_ANY) &&
((stsap_sel < 0x01) || (stsap_sel >= 0x70))) {
IRDA_DEBUG(0, "%s(), invalid tsap!\n", __func__);
return NULL;
@@ -427,7 +427,7 @@ struct tsap_cb *irttp_open_tsap(__u8 stsap_sel, int credit, notify_t *notify)
ttp_notify.data_indication = irttp_data_indication;
ttp_notify.udata_indication = irttp_udata_indication;
ttp_notify.flow_indication = irttp_flow_indication;
- if(notify->status_indication != NULL)
+ if (notify->status_indication != NULL)
ttp_notify.status_indication = irttp_status_indication;
ttp_notify.instance = self;
strncpy(ttp_notify.name, notify->name, NOTIFY_MAX_NAME);
@@ -639,8 +639,7 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb)
*/
if ((self->tx_max_sdu_size != 0) &&
(self->tx_max_sdu_size != TTP_SAR_UNBOUND) &&
- (skb->len > self->tx_max_sdu_size))
- {
+ (skb->len > self->tx_max_sdu_size)) {
IRDA_ERROR("%s: SAR enabled, but data is larger than TxMaxSduSize!\n",
__func__);
ret = -EMSGSIZE;
@@ -733,8 +732,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* poll us through irttp_flow_indication() - Jean II */
while ((self->send_credit > 0) &&
(!irlmp_lap_tx_queue_full(self->lsap)) &&
- (skb = skb_dequeue(&self->tx_queue)))
- {
+ (skb = skb_dequeue(&self->tx_queue))) {
/*
* Since we can transmit and receive frames concurrently,
* the code below is a critical region and we must assure that
@@ -798,8 +796,7 @@ static void irttp_run_tx_queue(struct tsap_cb *self)
* where we can spend a bit of time doing stuff. - Jean II */
if ((self->tx_sdu_busy) &&
(skb_queue_len(&self->tx_queue) < TTP_TX_LOW_THRESHOLD) &&
- (!self->close_pend))
- {
+ (!self->close_pend)) {
if (self->notify.flow_indication)
self->notify.flow_indication(self->notify.instance,
self, FLOW_START);
@@ -892,7 +889,7 @@ static int irttp_udata_indication(void *instance, void *sap,
/* Just pass data to layer above */
if (self->notify.udata_indication) {
err = self->notify.udata_indication(self->notify.instance,
- self,skb);
+ self, skb);
/* Same comment as in irttp_do_data_indication() */
if (!err)
return 0;
@@ -1057,7 +1054,7 @@ static void irttp_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
* to do that. Jean II */
/* If we need to send disconnect. try to do it now */
- if(self->disconnect_pend)
+ if (self->disconnect_pend)
irttp_start_todo_timer(self, 0);
}
@@ -1116,7 +1113,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -EBADR;);
if (self->connected) {
- if(userdata)
+ if (userdata)
dev_kfree_skb(userdata);
return -EISCONN;
}
@@ -1137,7 +1134,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
/* Initialize connection parameters */
@@ -1157,7 +1154,7 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
* Give away max 127 credits for now
*/
if (n > 127) {
- self->avail_credit=n-127;
+ self->avail_credit = n - 127;
n = 127;
}
@@ -1166,10 +1163,10 @@ int irttp_connect_request(struct tsap_cb *self, __u8 dtsap_sel,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1386,7 +1383,7 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
* headers
*/
IRDA_ASSERT(skb_headroom(userdata) >= TTP_MAX_HEADER,
- { dev_kfree_skb(userdata); return -1; } );
+ { dev_kfree_skb(userdata); return -1; });
}
self->avail_credit = 0;
@@ -1409,10 +1406,10 @@ int irttp_connect_response(struct tsap_cb *self, __u32 max_sdu_size,
/* SAR enabled? */
if (max_sdu_size > 0) {
IRDA_ASSERT(skb_headroom(tx_skb) >= (TTP_MAX_HEADER + TTP_SAR_HEADER),
- { dev_kfree_skb(tx_skb); return -1; } );
+ { dev_kfree_skb(tx_skb); return -1; });
/* Insert TTP header with SAR parameters */
- frame = skb_push(tx_skb, TTP_HEADER+TTP_SAR_HEADER);
+ frame = skb_push(tx_skb, TTP_HEADER + TTP_SAR_HEADER);
frame[0] = TTP_PARAMETERS | n;
frame[1] = 0x04; /* Length */
@@ -1522,7 +1519,7 @@ int irttp_disconnect_request(struct tsap_cb *self, struct sk_buff *userdata,
* function may be called from various context, like user, timer
* for following a disconnect_indication() (i.e. net_bh).
* Jean II */
- if(test_and_set_bit(0, &self->disconnect_pend)) {
+ if (test_and_set_bit(0, &self->disconnect_pend)) {
IRDA_DEBUG(0, "%s(), disconnect already pending\n",
__func__);
if (userdata)
@@ -1627,7 +1624,7 @@ static void irttp_disconnect_indication(void *instance, void *sap,
* Jean II */
/* No need to notify the client if has already tried to disconnect */
- if(self->notify.disconnect_indication)
+ if (self->notify.disconnect_indication)
self->notify.disconnect_indication(self->notify.instance, self,
reason, skb);
else
@@ -1738,8 +1735,7 @@ static void irttp_run_rx_queue(struct tsap_cb *self)
* This is the last fragment, so time to reassemble!
*/
if ((self->rx_sdu_size <= self->rx_max_sdu_size) ||
- (self->rx_max_sdu_size == TTP_SAR_UNBOUND))
- {
+ (self->rx_max_sdu_size == TTP_SAR_UNBOUND)) {
/*
* A little optimizing. Only queue the fragment if
* there are other fragments. Since if this is the
@@ -1860,7 +1856,7 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "dtsap_sel: %02x\n",
self->dtsap_sel);
seq_printf(seq, " connected: %s, ",
- self->connected? "TRUE":"FALSE");
+ self->connected ? "TRUE" : "FALSE");
seq_printf(seq, "avail credit: %d, ",
self->avail_credit);
seq_printf(seq, "remote credit: %d, ",
@@ -1876,9 +1872,9 @@ static int irttp_seq_show(struct seq_file *seq, void *v)
seq_printf(seq, "rx_queue len: %u\n",
skb_queue_len(&self->rx_queue));
seq_printf(seq, " tx_sdu_busy: %s, ",
- self->tx_sdu_busy? "TRUE":"FALSE");
+ self->tx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, "rx_sdu_busy: %s\n",
- self->rx_sdu_busy? "TRUE":"FALSE");
+ self->rx_sdu_busy ? "TRUE" : "FALSE");
seq_printf(seq, " max_seg_size: %u, ",
self->max_seg_size);
seq_printf(seq, "tx_max_sdu_size: %u, ",
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 9da862070dd..9d585370c5b 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -45,7 +45,7 @@ struct netns_pfkey {
static DEFINE_MUTEX(pfkey_mutex);
#define DUMMY_MARK 0
-static struct xfrm_mark dummy_mark = {0, 0};
+static const struct xfrm_mark dummy_mark = {0, 0};
struct pfkey_sock {
/* struct sock must be the first member of struct pfkey_sock */
struct sock sk;
@@ -338,7 +338,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
return 0;
}
-static u8 sadb_ext_min_len[] = {
+static const u8 sadb_ext_min_len[] = {
[SADB_EXT_RESERVED] = (u8) 0,
[SADB_EXT_SA] = (u8) sizeof(struct sadb_sa),
[SADB_EXT_LIFETIME_CURRENT] = (u8) sizeof(struct sadb_lifetime),
@@ -1196,10 +1196,6 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
x->props.family = pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
&x->props.saddr);
- if (!x->props.family) {
- err = -EAFNOSUPPORT;
- goto out;
- }
pfkey_sadb_addr2xfrm_addr((struct sadb_address *) ext_hdrs[SADB_EXT_ADDRESS_DST-1],
&x->id.daddr);
@@ -2081,6 +2077,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy *
pol->sadb_x_policy_type = IPSEC_POLICY_NONE;
}
pol->sadb_x_policy_dir = dir+1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
pol->sadb_x_policy_priority = xp->priority;
@@ -2204,10 +2201,6 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];
xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);
- if (!xp->family) {
- err = -EINVAL;
- goto out;
- }
xp->selector.family = xp->family;
xp->selector.prefixlen_s = sa->sadb_address_prefixlen;
xp->selector.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto);
@@ -2736,7 +2729,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, const struct sad
typedef int (*pfkey_handler)(struct sock *sk, struct sk_buff *skb,
const struct sadb_msg *hdr, void * const *ext_hdrs);
-static pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
+static const pfkey_handler pfkey_funcs[SADB_MAX + 1] = {
[SADB_RESERVED] = pfkey_reserved,
[SADB_GETSPI] = pfkey_getspi,
[SADB_UPDATE] = pfkey_add,
@@ -3137,7 +3130,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = XFRM_POLICY_OUT + 1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = xp->index;
+ pol->sadb_x_policy_priority = xp->priority;
/* Set sadb_comb's. */
if (x->id.proto == IPPROTO_AH)
@@ -3525,6 +3520,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY;
pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC;
pol->sadb_x_policy_dir = dir + 1;
+ pol->sadb_x_policy_reserved = 0;
pol->sadb_x_policy_id = 0;
pol->sadb_x_policy_priority = 0;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 48aaa89253e..6cba486353e 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -321,12 +321,12 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
if (llc->dev) {
if (!addr->sllc_arphrd)
addr->sllc_arphrd = llc->dev->type;
- if (llc_mac_null(addr->sllc_mac))
+ if (is_zero_ether_addr(addr->sllc_mac))
memcpy(addr->sllc_mac, llc->dev->dev_addr,
IFHWADDRLEN);
if (addr->sllc_arphrd != llc->dev->type ||
- !llc_mac_match(addr->sllc_mac,
- llc->dev->dev_addr)) {
+ !ether_addr_equal(addr->sllc_mac,
+ llc->dev->dev_addr)) {
rc = -EINVAL;
llc->dev = NULL;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 0d0d416dfab..cd872417796 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -478,8 +478,8 @@ static inline bool llc_estab_match(const struct llc_sap *sap,
return llc->laddr.lsap == laddr->lsap &&
llc->daddr.lsap == daddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac) &&
- llc_mac_match(llc->daddr.mac, daddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac) &&
+ ether_addr_equal(llc->daddr.mac, daddr->mac);
}
/**
@@ -550,7 +550,7 @@ static inline bool llc_listener_match(const struct llc_sap *sap,
return sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN &&
llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac);
}
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
diff --git a/net/llc/llc_proc.c b/net/llc/llc_proc.c
index 7b4799cfbf8..1a3c7e0f5d0 100644
--- a/net/llc/llc_proc.c
+++ b/net/llc/llc_proc.c
@@ -147,7 +147,7 @@ static int llc_seq_socket_show(struct seq_file *seq, void *v)
}
seq_printf(seq, "@%02X ", llc->sap->laddr.lsap);
llc_ui_format_mac(seq, llc->daddr.mac);
- seq_printf(seq, "@%02X %8d %8d %2d %3d %4d\n", llc->daddr.lsap,
+ seq_printf(seq, "@%02X %8d %8d %2d %3u %4d\n", llc->daddr.lsap,
sk_wmem_alloc_get(sk),
sk_rmem_alloc_get(sk) - llc->copied_seq,
sk->sk_state,
diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c
index 78be45cda5c..e5850699098 100644
--- a/net/llc/llc_sap.c
+++ b/net/llc/llc_sap.c
@@ -302,7 +302,7 @@ static inline bool llc_dgram_match(const struct llc_sap *sap,
return sk->sk_type == SOCK_DGRAM &&
llc->laddr.lsap == laddr->lsap &&
- llc_mac_match(llc->laddr.mac, laddr->mac);
+ ether_addr_equal(llc->laddr.mac, laddr->mac);
}
/**
@@ -425,7 +425,7 @@ void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb)
llc_pdu_decode_da(skb, laddr.mac);
llc_pdu_decode_dsap(skb, &laddr.lsap);
- if (llc_mac_multicast(laddr.mac)) {
+ if (is_multicast_ether_addr(laddr.mac)) {
llc_sap_mcast(sap, &laddr, skb);
kfree_skb(skb);
} else {
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 8184d121ff0..2e7855a1b10 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -395,9 +395,13 @@ void sta_set_rate_info_tx(struct sta_info *sta,
rinfo->nss = ieee80211_rate_get_vht_nss(rate);
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
+
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy = sband->bitrates[rate->idx].bitrate;
+ brate = sband->bitrates[rate->idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
@@ -422,11 +426,13 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->mcs = sta->last_rx_rate_idx;
} else {
struct ieee80211_supported_band *sband;
+ int shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+ u16 brate;
sband = sta->local->hw.wiphy->bands[
ieee80211_get_sdata_band(sta->sdata)];
- rinfo->legacy =
- sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ brate = sband->bitrates[sta->last_rx_rate_idx].bitrate;
+ rinfo->legacy = DIV_ROUND_UP(brate, 1 << shift);
}
if (sta->last_rx_rate_flag & RX_FLAG_40MHZ)
@@ -666,6 +672,8 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
if (sta->sdata->dev != dev)
continue;
+ sinfo.filled = 0;
+ sta_set_sinfo(sta, &sinfo);
i = 0;
ADD_STA_STATS(sta);
}
@@ -854,8 +862,8 @@ static int ieee80211_set_probe_resp(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_beacon_data *params)
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params)
{
struct beacon_data *new, *old;
int new_head_len, new_tail_len;
@@ -1018,6 +1026,12 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ /* don't allow changing the beacon while CSA is in place - offset
+ * of channel switch counter may change
+ */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
old = rtnl_dereference(sdata->u.ap.beacon);
if (!old)
return -ENOENT;
@@ -1042,6 +1056,10 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
return -ENOENT;
old_probe_resp = rtnl_dereference(sdata->u.ap.probe_resp);
+ /* abort any running channel switch */
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
+
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
@@ -1190,8 +1208,6 @@ static int sta_apply_parameters(struct ieee80211_local *local,
struct station_parameters *params)
{
int ret = 0;
- u32 rates;
- int i, j;
struct ieee80211_supported_band *sband;
struct ieee80211_sub_if_data *sdata = sta->sdata;
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
@@ -1284,16 +1300,10 @@ static int sta_apply_parameters(struct ieee80211_local *local,
sta->listen_interval = params->listen_interval;
if (params->supported_rates) {
- rates = 0;
-
- for (i = 0; i < params->supported_rates_len; i++) {
- int rate = (params->supported_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sta->sta.supp_rates[band] = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ sband, params->supported_rates,
+ params->supported_rates_len,
+ &sta->sta.supp_rates[band]);
}
if (params->ht_capa)
@@ -1956,18 +1966,11 @@ static int ieee80211_change_bss(struct wiphy *wiphy,
}
if (params->basic_rates) {
- int i, j;
- u32 rates = 0;
- struct ieee80211_supported_band *sband = wiphy->bands[band];
-
- for (i = 0; i < params->basic_rates_len; i++) {
- int rate = (params->basic_rates[i] & 0x7f) * 5;
- for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate)
- rates |= BIT(j);
- }
- }
- sdata->vif.bss_conf.basic_rates = rates;
+ ieee80211_parse_bitrates(&sdata->vif.bss_conf.chandef,
+ wiphy->bands[band],
+ params->basic_rates,
+ params->basic_rates_len,
+ &sdata->vif.bss_conf.basic_rates);
changed |= BSS_CHANGED_BASIC_RATES;
}
@@ -2299,14 +2302,25 @@ static void ieee80211_rfkill_poll(struct wiphy *wiphy)
}
#ifdef CONFIG_NL80211_TESTMODE
-static int ieee80211_testmode_cmd(struct wiphy *wiphy, void *data, int len)
+static int ieee80211_testmode_cmd(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ void *data, int len)
{
struct ieee80211_local *local = wiphy_priv(wiphy);
+ struct ieee80211_vif *vif = NULL;
if (!local->ops->testmode_cmd)
return -EOPNOTSUPP;
- return local->ops->testmode_cmd(&local->hw, data, len);
+ if (wdev) {
+ struct ieee80211_sub_if_data *sdata;
+
+ sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+ if (sdata->flags & IEEE80211_SDATA_IN_DRIVER)
+ vif = &sdata->vif;
+ }
+
+ return local->ops->testmode_cmd(&local->hw, vif, data, len);
}
static int ieee80211_testmode_dump(struct wiphy *wiphy,
@@ -2784,6 +2798,178 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy,
return 0;
}
+static struct cfg80211_beacon_data *
+cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
+{
+ struct cfg80211_beacon_data *new_beacon;
+ u8 *pos;
+ int len;
+
+ len = beacon->head_len + beacon->tail_len + beacon->beacon_ies_len +
+ beacon->proberesp_ies_len + beacon->assocresp_ies_len +
+ beacon->probe_resp_len;
+
+ new_beacon = kzalloc(sizeof(*new_beacon) + len, GFP_KERNEL);
+ if (!new_beacon)
+ return NULL;
+
+ pos = (u8 *)(new_beacon + 1);
+ if (beacon->head_len) {
+ new_beacon->head_len = beacon->head_len;
+ new_beacon->head = pos;
+ memcpy(pos, beacon->head, beacon->head_len);
+ pos += beacon->head_len;
+ }
+ if (beacon->tail_len) {
+ new_beacon->tail_len = beacon->tail_len;
+ new_beacon->tail = pos;
+ memcpy(pos, beacon->tail, beacon->tail_len);
+ pos += beacon->tail_len;
+ }
+ if (beacon->beacon_ies_len) {
+ new_beacon->beacon_ies_len = beacon->beacon_ies_len;
+ new_beacon->beacon_ies = pos;
+ memcpy(pos, beacon->beacon_ies, beacon->beacon_ies_len);
+ pos += beacon->beacon_ies_len;
+ }
+ if (beacon->proberesp_ies_len) {
+ new_beacon->proberesp_ies_len = beacon->proberesp_ies_len;
+ new_beacon->proberesp_ies = pos;
+ memcpy(pos, beacon->proberesp_ies, beacon->proberesp_ies_len);
+ pos += beacon->proberesp_ies_len;
+ }
+ if (beacon->assocresp_ies_len) {
+ new_beacon->assocresp_ies_len = beacon->assocresp_ies_len;
+ new_beacon->assocresp_ies = pos;
+ memcpy(pos, beacon->assocresp_ies, beacon->assocresp_ies_len);
+ pos += beacon->assocresp_ies_len;
+ }
+ if (beacon->probe_resp_len) {
+ new_beacon->probe_resp_len = beacon->probe_resp_len;
+ beacon->probe_resp = pos;
+ memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
+ pos += beacon->probe_resp_len;
+ }
+
+ return new_beacon;
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ csa_finalize_work);
+ struct ieee80211_local *local = sdata->local;
+ int err, changed;
+
+ if (!ieee80211_sdata_running(sdata))
+ return;
+
+ if (WARN_ON(sdata->vif.type != NL80211_IFTYPE_AP))
+ return;
+
+ sdata->radar_required = sdata->csa_radar_required;
+ err = ieee80211_vif_change_channel(sdata, &local->csa_chandef,
+ &changed);
+ if (WARN_ON(err < 0))
+ return;
+
+ err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
+ if (err < 0)
+ return;
+
+ changed |= err;
+ kfree(sdata->u.ap.next_beacon);
+ sdata->u.ap.next_beacon = NULL;
+ sdata->vif.csa_active = false;
+
+ ieee80211_wake_queues_by_reason(&sdata->local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ ieee80211_bss_info_change_notify(sdata, changed);
+
+ cfg80211_ch_switch_notify(sdata->dev, &local->csa_chandef);
+}
+
+static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ struct ieee80211_chanctx *chanctx;
+ int err, num_chanctx;
+
+ if (!list_empty(&local->roc_list) || local->scanning)
+ return -EBUSY;
+
+ if (sdata->wdev.cac_started)
+ return -EBUSY;
+
+ if (cfg80211_chandef_identical(&params->chandef,
+ &sdata->vif.bss_conf.chandef))
+ return -EINVAL;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (!chanctx_conf) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+
+ /* don't handle for multi-VIF cases */
+ chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
+ if (chanctx->refcount > 1) {
+ rcu_read_unlock();
+ return -EBUSY;
+ }
+ num_chanctx = 0;
+ list_for_each_entry_rcu(chanctx, &local->chanctx_list, list)
+ num_chanctx++;
+ rcu_read_unlock();
+
+ if (num_chanctx > 1)
+ return -EBUSY;
+
+ /* don't allow another channel switch if one is already active. */
+ if (sdata->vif.csa_active)
+ return -EBUSY;
+
+ /* only handle AP for now. */
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ sdata->u.ap.next_beacon = cfg80211_beacon_dup(&params->beacon_after);
+ if (!sdata->u.ap.next_beacon)
+ return -ENOMEM;
+
+ sdata->csa_counter_offset_beacon = params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
+ sdata->csa_radar_required = params->radar_required;
+
+ if (params->block_tx)
+ ieee80211_stop_queues_by_reason(&local->hw,
+ IEEE80211_MAX_QUEUE_MAP,
+ IEEE80211_QUEUE_STOP_REASON_CSA);
+
+ err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
+ if (err < 0)
+ return err;
+
+ local->csa_chandef = params->chandef;
+ sdata->vif.csa_active = true;
+
+ ieee80211_bss_info_change_notify(sdata, err);
+ drv_channel_switch_beacon(sdata, &params->chandef);
+
+ return 0;
+}
+
static int ieee80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
struct ieee80211_channel *chan, bool offchan,
unsigned int wait, const u8 *buf, size_t len,
@@ -3501,4 +3687,5 @@ struct cfg80211_ops mac80211_config_ops = {
.get_et_strings = ieee80211_get_et_strings,
.get_channel = ieee80211_cfg_get_channel,
.start_radar_detection = ieee80211_start_radar_detection,
+ .channel_switch = ieee80211_channel_switch,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index 03e8d2e3270..3a4764b2869 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -410,6 +410,64 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata,
return ret;
}
+int ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_chanctx_conf *conf;
+ struct ieee80211_chanctx *ctx;
+ int ret;
+ u32 chanctx_changed = 0;
+
+ /* should never be called if not performing a channel switch. */
+ if (WARN_ON(!sdata->vif.csa_active))
+ return -EINVAL;
+
+ if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
+ IEEE80211_CHAN_DISABLED))
+ return -EINVAL;
+
+ mutex_lock(&local->chanctx_mtx);
+ conf = rcu_dereference_protected(sdata->vif.chanctx_conf,
+ lockdep_is_held(&local->chanctx_mtx));
+ if (!conf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ctx = container_of(conf, struct ieee80211_chanctx, conf);
+ if (ctx->refcount != 1) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (sdata->vif.bss_conf.chandef.width != chandef->width) {
+ chanctx_changed = IEEE80211_CHANCTX_CHANGE_WIDTH;
+ *changed |= BSS_CHANGED_BANDWIDTH;
+ }
+
+ sdata->vif.bss_conf.chandef = *chandef;
+ ctx->conf.def = *chandef;
+
+ chanctx_changed |= IEEE80211_CHANCTX_CHANGE_CHANNEL;
+ drv_change_chanctx(local, ctx, chanctx_changed);
+
+ if (!local->use_chanctx) {
+ local->_oper_chandef = *chandef;
+ ieee80211_hw_config(local, 0);
+ }
+
+ ieee80211_recalc_chanctx_chantype(local, ctx);
+ ieee80211_recalc_smps_chanctx(local, ctx);
+ ieee80211_recalc_radar_chanctx(local, ctx);
+
+ ret = 0;
+ out:
+ mutex_unlock(&local->chanctx_mtx);
+ return ret;
+}
+
int ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed)
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 44e201d60a1..19c54a44ed4 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -455,6 +455,15 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
DEBUGFS_ADD_COUNTER(tx_retry_count, tx_retry_count);
DEBUGFS_ADD_COUNTER(wep_weak_iv_count, wep_weak_iv_count);
+ if (sizeof(sta->driver_buffered_tids) == sizeof(u32))
+ debugfs_create_x32("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u32 *)&sta->driver_buffered_tids);
+ else
+ debugfs_create_x64("driver_buffered_tids", 0400,
+ sta->debugfs.dir,
+ (u64 *)&sta->driver_buffered_tids);
+
drv_sta_add_debugfs(local, sdata, &sta->sta, sta->debugfs.dir);
}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index b931c96a596..b3ea11f3d52 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1072,4 +1072,17 @@ static inline void drv_ipv6_addr_change(struct ieee80211_local *local,
}
#endif
+static inline void
+drv_channel_switch_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_local *local = sdata->local;
+
+ if (local->ops->channel_switch_beacon) {
+ trace_drv_channel_switch_beacon(local, sdata, chandef);
+ local->ops->channel_switch_beacon(&local->hw, &sdata->vif,
+ chandef);
+ }
+}
+
#endif /* __MAC80211_DRIVER_OPS */
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f83534f6a2e..529bf58bc14 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -19,13 +19,14 @@
#include "ieee80211_i.h"
#include "rate.h"
-static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
+static void __check_htcap_disable(struct ieee80211_ht_cap *ht_capa,
+ struct ieee80211_ht_cap *ht_capa_mask,
struct ieee80211_sta_ht_cap *ht_cap,
u16 flag)
{
__le16 le_flag = cpu_to_le16(flag);
- if (sdata->u.mgd.ht_capa_mask.cap_info & le_flag) {
- if (!(sdata->u.mgd.ht_capa.cap_info & le_flag))
+ if (ht_capa_mask->cap_info & le_flag) {
+ if (!(ht_capa->cap_info & le_flag))
ht_cap->cap &= ~flag;
}
}
@@ -33,13 +34,30 @@ static void __check_htcap_disable(struct ieee80211_sub_if_data *sdata,
void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
struct ieee80211_sta_ht_cap *ht_cap)
{
- u8 *scaps = (u8 *)(&sdata->u.mgd.ht_capa.mcs.rx_mask);
- u8 *smask = (u8 *)(&sdata->u.mgd.ht_capa_mask.mcs.rx_mask);
+ struct ieee80211_ht_cap *ht_capa, *ht_capa_mask;
+ u8 *scaps, *smask;
int i;
if (!ht_cap->ht_supported)
return;
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
+ ht_capa = &sdata->u.mgd.ht_capa;
+ ht_capa_mask = &sdata->u.mgd.ht_capa_mask;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_capa = &sdata->u.ibss.ht_capa;
+ ht_capa_mask = &sdata->u.ibss.ht_capa_mask;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ scaps = (u8 *)(&ht_capa->mcs.rx_mask);
+ smask = (u8 *)(&ht_capa_mask->mcs.rx_mask);
+
/* NOTE: If you add more over-rides here, update register_hw
* ht_capa_mod_msk logic in main.c as well.
* And, if this method can ever change ht_cap.ht_supported, fix
@@ -55,28 +73,32 @@ void ieee80211_apply_htcap_overrides(struct ieee80211_sub_if_data *sdata,
}
/* Force removal of HT-40 capabilities? */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SUP_WIDTH_20_40);
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_40);
/* Allow user to disable SGI-20 (SGI-40 is handled above) */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_SGI_20);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_SGI_20);
/* Allow user to disable the max-AMSDU bit. */
- __check_htcap_disable(sdata, ht_cap, IEEE80211_HT_CAP_MAX_AMSDU);
+ __check_htcap_disable(ht_capa, ht_capa_mask, ht_cap,
+ IEEE80211_HT_CAP_MAX_AMSDU);
/* Allow user to decrease AMPDU factor */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_FACTOR) {
- u8 n = sdata->u.mgd.ht_capa.ampdu_params_info
- & IEEE80211_HT_AMPDU_PARM_FACTOR;
+ u8 n = ht_capa->ampdu_params_info &
+ IEEE80211_HT_AMPDU_PARM_FACTOR;
if (n < ht_cap->ampdu_factor)
ht_cap->ampdu_factor = n;
}
/* Allow the user to increase AMPDU density. */
- if (sdata->u.mgd.ht_capa_mask.ampdu_params_info &
+ if (ht_capa_mask->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY) {
- u8 n = (sdata->u.mgd.ht_capa.ampdu_params_info &
+ u8 n = (ht_capa->ampdu_params_info &
IEEE80211_HT_AMPDU_PARM_DENSITY)
>> IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT;
if (n > ht_cap->ampdu_density)
@@ -112,7 +134,8 @@ bool ieee80211_ht_cap_ie_to_sta_ht_cap(struct ieee80211_sub_if_data *sdata,
* we advertised a restricted capability set to. Override
* our own capabilities and then use those below.
*/
- if (sdata->vif.type == NL80211_IFTYPE_STATION &&
+ if ((sdata->vif.type == NL80211_IFTYPE_STATION ||
+ sdata->vif.type == NL80211_IFTYPE_ADHOC) &&
!test_sta_flag(sta, WLAN_STA_TDLS_PEER))
ieee80211_apply_htcap_overrides(sdata, &own_cap);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index ea7b9c2c7e6..a12afe77bb2 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -30,75 +30,27 @@
#define IEEE80211_IBSS_MERGE_INTERVAL (30 * HZ)
#define IEEE80211_IBSS_INACTIVITY_LIMIT (60 * HZ)
+#define IEEE80211_IBSS_RSN_INACTIVITY_LIMIT (10 * HZ)
#define IEEE80211_IBSS_MAX_STA_ENTRIES 128
-
-static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
- const u8 *bssid, const int beacon_int,
- struct ieee80211_channel *chan,
- const u32 basic_rates,
- const u16 capability, u64 tsf,
- bool creator)
+static struct beacon_data *
+ieee80211_ibss_build_presp(struct ieee80211_sub_if_data *sdata,
+ const int beacon_int, const u32 basic_rates,
+ const u16 capability, u64 tsf,
+ struct cfg80211_chan_def *chandef,
+ bool *have_higher_than_11mbit)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- int rates, i;
+ int rates_n = 0, i, ri;
struct ieee80211_mgmt *mgmt;
u8 *pos;
struct ieee80211_supported_band *sband;
- struct cfg80211_bss *bss;
- u32 bss_change;
- u8 supp_rates[IEEE80211_MAX_SUPP_RATES];
- struct cfg80211_chan_def chandef;
+ u32 rate_flags, rates = 0, rates_added = 0;
struct beacon_data *presp;
int frame_len;
-
- sdata_assert_lock(sdata);
-
- /* Reset own TSF to allow time synchronization work. */
- drv_reset_tsf(local, sdata);
-
- if (!ether_addr_equal(ifibss->bssid, bssid))
- sta_info_flush(sdata);
-
- /* if merging, indicate to driver that we leave the old IBSS */
- if (sdata->vif.bss_conf.ibss_joined) {
- sdata->vif.bss_conf.ibss_joined = false;
- sdata->vif.bss_conf.ibss_creator = false;
- sdata->vif.bss_conf.enable_beacon = false;
- netif_carrier_off(sdata->dev);
- ieee80211_bss_info_change_notify(sdata,
- BSS_CHANGED_IBSS |
- BSS_CHANGED_BEACON_ENABLED);
- }
-
- presp = rcu_dereference_protected(ifibss->presp,
- lockdep_is_held(&sdata->wdev.mtx));
- rcu_assign_pointer(ifibss->presp, NULL);
- if (presp)
- kfree_rcu(presp, rcu_head);
-
- sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
-
- chandef = ifibss->chandef;
- if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
- chandef.width = NL80211_CHAN_WIDTH_20;
- chandef.center_freq1 = chan->center_freq;
- }
-
- ieee80211_vif_release_channel(sdata);
- if (ieee80211_vif_use_channel(sdata, &chandef,
- ifibss->fixed_channel ?
- IEEE80211_CHANCTX_SHARED :
- IEEE80211_CHANCTX_EXCLUSIVE)) {
- sdata_info(sdata, "Failed to join IBSS, no channel context\n");
- return;
- }
-
- memcpy(ifibss->bssid, bssid, ETH_ALEN);
-
- sband = local->hw.wiphy->bands[chan->band];
+ int shift;
/* Build IBSS probe response */
frame_len = sizeof(struct ieee80211_hdr_3addr) +
@@ -113,7 +65,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
- return;
+ return NULL;
presp->head = (void *)(presp + 1);
@@ -134,21 +86,47 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
memcpy(pos, ifibss->ssid, ifibss->ssid_len);
pos += ifibss->ssid_len;
- rates = min_t(int, 8, sband->n_bitrates);
+ sband = local->hw.wiphy->bands[chandef->chan->band];
+ rate_flags = ieee80211_chandef_rate_flags(chandef);
+ shift = ieee80211_chandef_get_shift(chandef);
+ rates_n = 0;
+ if (have_higher_than_11mbit)
+ *have_higher_than_11mbit = false;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ if (sband->bitrates[i].bitrate > 110 &&
+ have_higher_than_11mbit)
+ *have_higher_than_11mbit = true;
+
+ rates |= BIT(i);
+ rates_n++;
+ }
+
*pos++ = WLAN_EID_SUPP_RATES;
- *pos++ = rates;
- for (i = 0; i < rates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = min_t(int, 8, rates_n);
+ for (ri = 0; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
+ if (++rates_added == 8) {
+ ri++; /* continue at next rate for EXT_SUPP_RATES */
+ break;
+ }
}
if (sband->band == IEEE80211_BAND_2GHZ) {
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = ieee80211_frequency_to_channel(chan->center_freq);
+ *pos++ = ieee80211_frequency_to_channel(
+ chandef->chan->center_freq);
}
*pos++ = WLAN_EID_IBSS_PARAMS;
@@ -157,15 +135,20 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
*pos++ = 0;
*pos++ = 0;
- if (sband->n_bitrates > 8) {
+ /* put the remaining rates in WLAN_EID_EXT_SUPP_RATES */
+ if (rates_n > 8) {
*pos++ = WLAN_EID_EXT_SUPP_RATES;
- *pos++ = sband->n_bitrates - 8;
- for (i = 8; i < sband->n_bitrates; i++) {
- int rate = sband->bitrates[i].bitrate;
+ *pos++ = rates_n - 8;
+ for (; ri < sband->n_bitrates; ri++) {
+ int rate = DIV_ROUND_UP(sband->bitrates[ri].bitrate,
+ 5 * (1 << shift));
u8 basic = 0;
- if (basic_rates & BIT(i))
+ if (!(rates & BIT(ri)))
+ continue;
+
+ if (basic_rates & BIT(ri))
basic = 0x80;
- *pos++ = basic | (u8) (rate / 5);
+ *pos++ = basic | (u8) rate;
}
}
@@ -175,19 +158,23 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
/* add HT capability and information IEs */
- if (chandef.width != NL80211_CHAN_WIDTH_20_NOHT &&
- chandef.width != NL80211_CHAN_WIDTH_5 &&
- chandef.width != NL80211_CHAN_WIDTH_10 &&
+ if (chandef->width != NL80211_CHAN_WIDTH_20_NOHT &&
+ chandef->width != NL80211_CHAN_WIDTH_5 &&
+ chandef->width != NL80211_CHAN_WIDTH_10 &&
sband->ht_cap.ht_supported) {
- pos = ieee80211_ie_build_ht_cap(pos, &sband->ht_cap,
- sband->ht_cap.cap);
+ struct ieee80211_sta_ht_cap ht_cap;
+
+ memcpy(&ht_cap, &sband->ht_cap, sizeof(ht_cap));
+ ieee80211_apply_htcap_overrides(sdata, &ht_cap);
+
+ pos = ieee80211_ie_build_ht_cap(pos, &ht_cap, ht_cap.cap);
/*
* Note: According to 802.11n-2009 9.13.3.1, HT Protection
* field and RIFS Mode are reserved in IBSS mode, therefore
* keep them at 0
*/
pos = ieee80211_ie_build_ht_oper(pos, &sband->ht_cap,
- &chandef, 0);
+ chandef, 0);
}
if (local->hw.queues >= IEEE80211_NUM_ACS) {
@@ -204,9 +191,97 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
presp->head_len = pos - presp->head;
if (WARN_ON(presp->head_len > frame_len))
+ goto error;
+
+ return presp;
+error:
+ kfree(presp);
+ return NULL;
+}
+
+static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
+ const u8 *bssid, const int beacon_int,
+ struct cfg80211_chan_def *req_chandef,
+ const u32 basic_rates,
+ const u16 capability, u64 tsf,
+ bool creator)
+{
+ struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_mgmt *mgmt;
+ struct cfg80211_bss *bss;
+ u32 bss_change;
+ struct cfg80211_chan_def chandef;
+ struct ieee80211_channel *chan;
+ struct beacon_data *presp;
+ enum nl80211_bss_scan_width scan_width;
+ bool have_higher_than_11mbit;
+
+ sdata_assert_lock(sdata);
+
+ /* Reset own TSF to allow time synchronization work. */
+ drv_reset_tsf(local, sdata);
+
+ if (!ether_addr_equal(ifibss->bssid, bssid))
+ sta_info_flush(sdata);
+
+ /* if merging, indicate to driver that we leave the old IBSS */
+ if (sdata->vif.bss_conf.ibss_joined) {
+ sdata->vif.bss_conf.ibss_joined = false;
+ sdata->vif.bss_conf.ibss_creator = false;
+ sdata->vif.bss_conf.enable_beacon = false;
+ netif_carrier_off(sdata->dev);
+ ieee80211_bss_info_change_notify(sdata,
+ BSS_CHANGED_IBSS |
+ BSS_CHANGED_BEACON_ENABLED);
+ }
+
+ presp = rcu_dereference_protected(ifibss->presp,
+ lockdep_is_held(&sdata->wdev.mtx));
+ rcu_assign_pointer(ifibss->presp, NULL);
+ if (presp)
+ kfree_rcu(presp, rcu_head);
+
+ sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
+
+ /* make a copy of the chandef, it could be modified below. */
+ chandef = *req_chandef;
+ chan = chandef.chan;
+ if (!cfg80211_reg_can_beacon(local->hw.wiphy, &chandef)) {
+ if (chandef.width == NL80211_CHAN_WIDTH_5 ||
+ chandef.width == NL80211_CHAN_WIDTH_10 ||
+ chandef.width == NL80211_CHAN_WIDTH_20_NOHT ||
+ chandef.width == NL80211_CHAN_WIDTH_20) {
+ sdata_info(sdata,
+ "Failed to join IBSS, beacons forbidden\n");
+ return;
+ }
+ chandef.width = NL80211_CHAN_WIDTH_20;
+ chandef.center_freq1 = chan->center_freq;
+ }
+
+ ieee80211_vif_release_channel(sdata);
+ if (ieee80211_vif_use_channel(sdata, &chandef,
+ ifibss->fixed_channel ?
+ IEEE80211_CHANCTX_SHARED :
+ IEEE80211_CHANCTX_EXCLUSIVE)) {
+ sdata_info(sdata, "Failed to join IBSS, no channel context\n");
+ return;
+ }
+
+ memcpy(ifibss->bssid, bssid, ETH_ALEN);
+
+ sband = local->hw.wiphy->bands[chan->band];
+
+ presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
+ capability, tsf, &chandef,
+ &have_higher_than_11mbit);
+ if (!presp)
return;
rcu_assign_pointer(ifibss->presp, presp);
+ mgmt = (void *)presp->head;
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.beacon_int = beacon_int;
@@ -236,18 +311,26 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.use_short_slot = chan->band == IEEE80211_BAND_5GHZ;
bss_change |= BSS_CHANGED_ERP_SLOT;
+ /* cf. IEEE 802.11 9.2.12 */
+ if (chan->band == IEEE80211_BAND_2GHZ && have_higher_than_11mbit)
+ sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
+ else
+ sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
+
sdata->vif.bss_conf.ibss_joined = true;
sdata->vif.bss_conf.ibss_creator = creator;
ieee80211_bss_info_change_notify(sdata, bss_change);
- ieee80211_sta_def_wmm_params(sdata, sband->n_bitrates, supp_rates);
+ ieee80211_set_wmm_default(sdata, true);
ifibss->state = IEEE80211_IBSS_MLME_JOINED;
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- bss = cfg80211_inform_bss_frame(local->hw.wiphy, chan,
- mgmt, presp->head_len, 0, GFP_KERNEL);
+ scan_width = cfg80211_chandef_to_scan_width(&chandef);
+ bss = cfg80211_inform_bss_width_frame(local->hw.wiphy, chan,
+ scan_width, mgmt,
+ presp->head_len, 0, GFP_KERNEL);
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
@@ -259,27 +342,60 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
struct cfg80211_bss *cbss =
container_of((void *)bss, struct cfg80211_bss, priv);
struct ieee80211_supported_band *sband;
+ struct cfg80211_chan_def chandef;
u32 basic_rates;
int i, j;
u16 beacon_int = cbss->beacon_interval;
const struct cfg80211_bss_ies *ies;
+ enum nl80211_channel_type chan_type;
u64 tsf;
+ u32 rate_flags;
+ int shift;
sdata_assert_lock(sdata);
if (beacon_int < 10)
beacon_int = 10;
+ switch (sdata->u.ibss.chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ case NL80211_CHAN_WIDTH_40:
+ chan_type = cfg80211_get_chandef_type(&sdata->u.ibss.chandef);
+ cfg80211_chandef_create(&chandef, cbss->channel, chan_type);
+ break;
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ cfg80211_chandef_create(&chandef, cbss->channel,
+ NL80211_CHAN_WIDTH_20_NOHT);
+ chandef.width = sdata->u.ibss.chandef.width;
+ break;
+ default:
+ /* fall back to 20 MHz for unsupported modes */
+ cfg80211_chandef_create(&chandef, cbss->channel,
+ NL80211_CHAN_WIDTH_20_NOHT);
+ break;
+ }
+
sband = sdata->local->hw.wiphy->bands[cbss->channel->band];
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->u.ibss.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
basic_rates = 0;
for (i = 0; i < bss->supp_rates_len; i++) {
- int rate = (bss->supp_rates[i] & 0x7f) * 5;
+ int rate = bss->supp_rates[i] & 0x7f;
bool is_basic = !!(bss->supp_rates[i] & 0x80);
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 5 * (1 << shift));
+ if (brate == rate) {
if (is_basic)
basic_rates |= BIT(j);
break;
@@ -294,7 +410,7 @@ static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
__ieee80211_sta_join_ibss(sdata, cbss->bssid,
beacon_int,
- cbss->channel,
+ &chandef,
basic_rates,
cbss->capability,
tsf, false);
@@ -335,6 +451,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -363,6 +480,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
if (WARN_ON_ONCE(!chanctx_conf))
return NULL;
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_KERNEL);
@@ -376,7 +494,7 @@ ieee80211_ibss_add_sta(struct ieee80211_sub_if_data *sdata, const u8 *bssid,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
return ieee80211_ibss_finish_sta(sta);
}
@@ -440,6 +558,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
u64 beacon_timestamp, rx_timestamp;
u32 supp_rates = 0;
enum ieee80211_band band = rx_status->band;
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
bool rates_updated = false;
@@ -461,16 +580,22 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
sta = sta_info_get(sdata, mgmt->sa);
if (elems->supp_rates) {
- supp_rates = ieee80211_sta_get_rates(local, elems,
+ supp_rates = ieee80211_sta_get_rates(sdata, elems,
band, NULL);
if (sta) {
u32 prev_rates;
prev_rates = sta->sta.supp_rates[band];
/* make sure mandatory rates are always added */
- sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+ sta->sta.supp_rates[band] = supp_rates |
+ ieee80211_mandatory_rates(sband,
+ scan_width);
if (sta->sta.supp_rates[band] != prev_rates) {
ibss_dbg(sdata,
"updated supp_rates set for %pM based on beacon/probe_resp (0x%x -> 0x%x)\n",
@@ -585,7 +710,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
"beacon TSF higher than local TSF - IBSS merge with BSSID %pM\n",
mgmt->bssid);
ieee80211_sta_join_ibss(sdata, bss);
- supp_rates = ieee80211_sta_get_rates(local, elems, band, NULL);
+ supp_rates = ieee80211_sta_get_rates(sdata, elems, band, NULL);
ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
supp_rates);
rcu_read_unlock();
@@ -604,6 +729,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
+ enum nl80211_bss_scan_width scan_width;
int band;
/*
@@ -629,6 +755,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
return;
}
band = chanctx_conf->def.chan->band;
+ scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
@@ -640,7 +767,7 @@ void ieee80211_ibss_rx_no_sta(struct ieee80211_sub_if_data *sdata,
/* make sure mandatory rates are always added */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] = supp_rates |
- ieee80211_mandatory_rates(sband);
+ ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifibss->incomplete_lock);
list_add(&sta->list, &ifibss->incomplete_stations);
@@ -672,6 +799,33 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
return active;
}
+static void ieee80211_ibss_sta_expire(struct ieee80211_sub_if_data *sdata)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta, *tmp;
+ unsigned long exp_time = IEEE80211_IBSS_INACTIVITY_LIMIT;
+ unsigned long exp_rsn_time = IEEE80211_IBSS_RSN_INACTIVITY_LIMIT;
+
+ mutex_lock(&local->sta_mtx);
+
+ list_for_each_entry_safe(sta, tmp, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ continue;
+
+ if (time_after(jiffies, sta->last_rx + exp_time) ||
+ (time_after(jiffies, sta->last_rx + exp_rsn_time) &&
+ sta->sta_state != IEEE80211_STA_AUTHORIZED)) {
+ sta_dbg(sta->sdata, "expiring inactive %sSTA %pM\n",
+ sta->sta_state != IEEE80211_STA_AUTHORIZED ?
+ "not authorized " : "", sta->sta.addr);
+
+ WARN_ON(__sta_info_destroy(sta));
+ }
+ }
+
+ mutex_unlock(&local->sta_mtx);
+}
+
/*
* This function is called with state == IEEE80211_IBSS_MLME_JOINED
*/
@@ -679,13 +833,14 @@ static int ieee80211_sta_active_ibss(struct ieee80211_sub_if_data *sdata)
static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
+ enum nl80211_bss_scan_width scan_width;
sdata_assert_lock(sdata);
mod_timer(&ifibss->timer,
round_jiffies(jiffies + IEEE80211_IBSS_MERGE_INTERVAL));
- ieee80211_sta_expire(sdata, IEEE80211_IBSS_INACTIVITY_LIMIT);
+ ieee80211_ibss_sta_expire(sdata);
if (time_before(jiffies, ifibss->last_scan_completed +
IEEE80211_IBSS_MERGE_INTERVAL))
@@ -700,8 +855,9 @@ static void ieee80211_sta_merge_ibss(struct ieee80211_sub_if_data *sdata)
sdata_info(sdata,
"No active IBSS STAs - trying to scan for other IBSS networks with same SSID (merge)\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid, ifibss->ssid_len,
- NULL);
+ NULL, scan_width);
}
static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
@@ -736,7 +892,7 @@ static void ieee80211_sta_create_ibss(struct ieee80211_sub_if_data *sdata)
sdata->drop_unencrypted = 0;
__ieee80211_sta_join_ibss(sdata, bssid, sdata->vif.bss_conf.beacon_int,
- ifibss->chandef.chan, ifibss->basic_rates,
+ &ifibss->chandef, ifibss->basic_rates,
capability, 0, true);
}
@@ -751,6 +907,7 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct ieee80211_channel *chan = NULL;
const u8 *bssid = NULL;
+ enum nl80211_bss_scan_width scan_width;
int active_ibss;
u16 capability;
@@ -792,6 +949,17 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
return;
}
+ /* if a fixed bssid and a fixed freq have been provided create the IBSS
+ * directly and do not waste time scanning
+ */
+ if (ifibss->fixed_bssid && ifibss->fixed_channel) {
+ sdata_info(sdata, "Created IBSS using preconfigured BSSID %pM\n",
+ bssid);
+ ieee80211_sta_create_ibss(sdata);
+ return;
+ }
+
+
ibss_dbg(sdata, "sta_find_ibss: did not try to join ibss\n");
/* Selected IBSS not found in current scan results - try to scan */
@@ -799,8 +967,10 @@ static void ieee80211_sta_find_ibss(struct ieee80211_sub_if_data *sdata)
IEEE80211_SCAN_INTERVAL)) {
sdata_info(sdata, "Trigger new scan to find an IBSS to join\n");
+ scan_width = cfg80211_chandef_to_scan_width(&ifibss->chandef);
ieee80211_request_ibss_scan(sdata, ifibss->ssid,
- ifibss->ssid_len, chan);
+ ifibss->ssid_len, chan,
+ scan_width);
} else {
int interval = IEEE80211_SCAN_INTERVAL;
@@ -1020,6 +1190,9 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
struct cfg80211_ibss_params *params)
{
u32 changed = 0;
+ u32 rate_flags;
+ struct ieee80211_supported_band *sband;
+ int i;
if (params->bssid) {
memcpy(sdata->u.ibss.bssid, params->bssid, ETH_ALEN);
@@ -1030,6 +1203,14 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
sdata->u.ibss.privacy = params->privacy;
sdata->u.ibss.control_port = params->control_port;
sdata->u.ibss.basic_rates = params->basic_rates;
+
+ /* fix basic_rates if channel does not support these rates */
+ rate_flags = ieee80211_chandef_rate_flags(&params->chandef);
+ sband = sdata->local->hw.wiphy->bands[params->chandef.chan->band];
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ sdata->u.ibss.basic_rates &= ~BIT(i);
+ }
memcpy(sdata->vif.bss_conf.mcast_rate, params->mcast_rate,
sizeof(params->mcast_rate));
@@ -1051,6 +1232,11 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata,
memcpy(sdata->u.ibss.ssid, params->ssid, params->ssid_len);
sdata->u.ibss.ssid_len = params->ssid_len;
+ memcpy(&sdata->u.ibss.ht_capa, &params->ht_capa,
+ sizeof(sdata->u.ibss.ht_capa));
+ memcpy(&sdata->u.ibss.ht_capa_mask, &params->ht_capa_mask,
+ sizeof(sdata->u.ibss.ht_capa_mask));
+
/*
* 802.11n-2009 9.13.3.1: In an IBSS, the HT Protection field is
* reserved, but an HT STA shall protect HT transmissions as though
@@ -1131,6 +1317,11 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
presp = rcu_dereference_protected(ifibss->presp,
lockdep_is_held(&sdata->wdev.mtx));
RCU_INIT_POINTER(sdata->u.ibss.presp, NULL);
+
+ /* on the next join, re-program HT parameters */
+ memset(&ifibss->ht_capa, 0, sizeof(ifibss->ht_capa));
+ memset(&ifibss->ht_capa_mask, 0, sizeof(ifibss->ht_capa_mask));
+
sdata->vif.bss_conf.ibss_joined = false;
sdata->vif.bss_conf.ibss_creator = false;
sdata->vif.bss_conf.enable_beacon = false;
@@ -1138,6 +1329,7 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_IBSS);
+ ieee80211_vif_release_channel(sdata);
synchronize_rcu();
kfree(presp);
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 8412a303993..b6186517ec5 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -53,9 +53,6 @@ struct ieee80211_local;
* increased memory use (about 2 kB of RAM per entry). */
#define IEEE80211_FRAGMENT_MAX 4
-#define TU_TO_JIFFIES(x) (usecs_to_jiffies((x) * 1024))
-#define TU_TO_EXP_TIME(x) (jiffies + TU_TO_JIFFIES(x))
-
/* power level hasn't been configured (or set to automatic) */
#define IEEE80211_UNSET_POWER_LEVEL INT_MIN
@@ -259,6 +256,8 @@ struct ieee80211_if_ap {
struct beacon_data __rcu *beacon;
struct probe_resp __rcu *probe_resp;
+ /* to be used after channel switch. */
+ struct cfg80211_beacon_data *next_beacon;
struct list_head vlans;
struct ps_data ps;
@@ -509,6 +508,9 @@ struct ieee80211_if_ibss {
/* probe response/beacon for IBSS */
struct beacon_data __rcu *presp;
+ struct ieee80211_ht_cap ht_capa; /* configured ht-cap over-rides */
+ struct ieee80211_ht_cap ht_capa_mask; /* Valid parts of ht_capa */
+
spinlock_t incomplete_lock;
struct list_head incomplete_stations;
@@ -713,6 +715,11 @@ struct ieee80211_sub_if_data {
struct ieee80211_tx_queue_params tx_conf[IEEE80211_NUM_ACS];
+ struct work_struct csa_finalize_work;
+ int csa_counter_offset_beacon;
+ int csa_counter_offset_presp;
+ bool csa_radar_required;
+
/* used to reconfigure hardware SM PS */
struct work_struct recalc_smps;
@@ -809,6 +816,34 @@ ieee80211_get_sdata_band(struct ieee80211_sub_if_data *sdata)
return band;
}
+static inline int
+ieee80211_chandef_get_shift(struct cfg80211_chan_def *chandef)
+{
+ switch (chandef->width) {
+ case NL80211_CHAN_WIDTH_5:
+ return 2;
+ case NL80211_CHAN_WIDTH_10:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline int
+ieee80211_vif_get_shift(struct ieee80211_vif *vif)
+{
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ int shift = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(vif->chanctx_conf);
+ if (chanctx_conf)
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rcu_read_unlock();
+
+ return shift;
+}
+
enum sdata_queue_type {
IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
IEEE80211_SDATA_QUEUE_AGG_START = 1,
@@ -1026,7 +1061,7 @@ struct ieee80211_local {
struct cfg80211_ssid scan_ssid;
struct cfg80211_scan_request *int_scan_req;
struct cfg80211_scan_request *scan_req, *hw_scan_req;
- struct ieee80211_channel *scan_channel;
+ struct cfg80211_chan_def scan_chandef;
enum ieee80211_band hw_scan_band;
int scan_channel_idx;
int scan_ies_len;
@@ -1063,7 +1098,6 @@ struct ieee80211_local {
u32 dot11TransmittedFrameCount;
#ifdef CONFIG_MAC80211_LEDS
- int tx_led_counter, rx_led_counter;
struct led_trigger *tx_led, *rx_led, *assoc_led, *radio_led;
struct tpt_led_trigger *tpt_led_trigger;
char tx_led_name[32], rx_led_name[32],
@@ -1306,7 +1340,8 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
void ieee80211_scan_work(struct work_struct *work);
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan);
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width);
int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
struct cfg80211_scan_request *req);
void ieee80211_scan_cancel(struct ieee80211_local *local);
@@ -1341,6 +1376,9 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);
void ieee80211_sw_roc_work(struct work_struct *work);
void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc);
+/* channel switch handling */
+void ieee80211_csa_finalize_work(struct work_struct *work);
+
/* interface handling */
int ieee80211_iface_init(void);
void ieee80211_iface_exit(void);
@@ -1362,6 +1400,8 @@ void ieee80211_del_virtual_monitor(struct ieee80211_local *local);
bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata);
+int ieee80211_assign_beacon(struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_beacon_data *params);
static inline bool ieee80211_sdata_running(struct ieee80211_sub_if_data *sdata)
{
@@ -1465,7 +1505,8 @@ extern void *mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble);
+ int rate, int erp, int short_preamble,
+ int shift);
void mac80211_ev_michael_mic_failure(struct ieee80211_sub_if_data *sdata, int keyidx,
struct ieee80211_hdr *hdr, const u8 *tsc,
gfp_t gfp);
@@ -1569,7 +1610,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel);
+ struct cfg80211_chan_def *chandef);
struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
u8 *dst, u32 ratemask,
struct ieee80211_channel *chan,
@@ -1582,10 +1623,7 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
u32 ratemask, bool directed, u32 tx_flags,
struct ieee80211_channel *channel, bool scan);
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates);
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates);
int __ieee80211_request_smps(struct ieee80211_sub_if_data *sdata,
@@ -1602,6 +1640,9 @@ u8 *ieee80211_ie_build_ht_oper(u8 *pos, struct ieee80211_sta_ht_cap *ht_cap,
u16 prot_mode);
u8 *ieee80211_ie_build_vht_cap(u8 *pos, struct ieee80211_sta_vht_cap *vht_cap,
u32 cap);
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates);
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band);
@@ -1622,6 +1663,11 @@ int __must_check
ieee80211_vif_change_bandwidth(struct ieee80211_sub_if_data *sdata,
const struct cfg80211_chan_def *chandef,
u32 *changed);
+/* NOTE: only use ieee80211_vif_change_channel() for channel switch */
+int __must_check
+ieee80211_vif_change_channel(struct ieee80211_sub_if_data *sdata,
+ const struct cfg80211_chan_def *chandef,
+ u32 *changed);
void ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_vlan_copy_chanctx(struct ieee80211_sub_if_data *sdata);
void ieee80211_vif_copy_chanctx_to_vlans(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index cc117591f67..fcecd633514 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -54,7 +54,7 @@ bool __ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)
return false;
}
- power = chanctx_conf->def.chan->max_power;
+ power = ieee80211_chandef_max_power(&chanctx_conf->def);
rcu_read_unlock();
if (sdata->user_power_level != IEEE80211_UNSET_POWER_LEVEL)
@@ -274,6 +274,12 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
if (iftype == NL80211_IFTYPE_ADHOC &&
nsdata->vif.type == NL80211_IFTYPE_ADHOC)
return -EBUSY;
+ /*
+ * will not add another interface while any channel
+ * switch is active.
+ */
+ if (nsdata->vif.csa_active)
+ return -EBUSY;
/*
* The remaining checks are only performed for interfaces
@@ -302,12 +308,13 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata,
return 0;
}
-static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
+static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata,
+ enum nl80211_iftype iftype)
{
int n_queues = sdata->local->hw.queues;
int i;
- if (sdata->vif.type != NL80211_IFTYPE_P2P_DEVICE) {
+ if (iftype != NL80211_IFTYPE_P2P_DEVICE) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
if (WARN_ON_ONCE(sdata->vif.hw_queue[i] ==
IEEE80211_INVAL_HW_QUEUE))
@@ -318,8 +325,9 @@ static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata)
}
}
- if ((sdata->vif.type != NL80211_IFTYPE_AP &&
- sdata->vif.type != NL80211_IFTYPE_MESH_POINT) ||
+ if ((iftype != NL80211_IFTYPE_AP &&
+ iftype != NL80211_IFTYPE_P2P_GO &&
+ iftype != NL80211_IFTYPE_MESH_POINT) ||
!(sdata->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)) {
sdata->vif.cab_queue = IEEE80211_INVAL_HW_QUEUE;
return 0;
@@ -402,7 +410,7 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
- ret = ieee80211_check_queues(sdata);
+ ret = ieee80211_check_queues(sdata, NL80211_IFTYPE_MONITOR);
if (ret) {
kfree(sdata);
return ret;
@@ -586,7 +594,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
res = drv_add_interface(local, sdata);
if (res)
goto err_stop;
- res = ieee80211_check_queues(sdata);
+ res = ieee80211_check_queues(sdata,
+ ieee80211_vif_type_p2p(&sdata->vif));
if (res)
goto err_del_interface;
}
@@ -804,6 +813,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+ sdata->vif.csa_active = false;
+ cancel_work_sync(&sdata->csa_finalize_work);
cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
@@ -1267,6 +1278,7 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
skb_queue_head_init(&sdata->skb_queue);
INIT_WORK(&sdata->work, ieee80211_iface_work);
INIT_WORK(&sdata->recalc_smps, ieee80211_recalc_smps_work);
+ INIT_WORK(&sdata->csa_finalize_work, ieee80211_csa_finalize_work);
switch (type) {
case NL80211_IFTYPE_P2P_GO:
@@ -1380,14 +1392,14 @@ static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata,
ret = drv_change_interface(local, sdata, internal_type, p2p);
if (ret)
- type = sdata->vif.type;
+ type = ieee80211_vif_type_p2p(&sdata->vif);
/*
* Ignore return value here, there's not much we can do since
* the driver changed the interface type internally already.
* The warnings will hopefully make driver authors fix it :-)
*/
- ieee80211_check_queues(sdata);
+ ieee80211_check_queues(sdata, type);
ieee80211_setup_sdata(sdata, type);
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index e39cc91d0cf..620677e897b 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -93,6 +93,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
might_sleep();
+ if (key->flags & KEY_FLAG_TAINTED)
+ return -EINVAL;
+
if (!key->local->ops->set_key)
goto out_unsupported;
@@ -455,6 +458,7 @@ int ieee80211_key_link(struct ieee80211_key *key,
struct ieee80211_sub_if_data *sdata,
struct sta_info *sta)
{
+ struct ieee80211_local *local = sdata->local;
struct ieee80211_key *old_key;
int idx, ret;
bool pairwise;
@@ -484,10 +488,13 @@ int ieee80211_key_link(struct ieee80211_key *key,
ieee80211_debugfs_key_add(key);
- ret = ieee80211_key_enable_hw_accel(key);
-
- if (ret)
- ieee80211_key_free(key, true);
+ if (!local->wowlan) {
+ ret = ieee80211_key_enable_hw_accel(key);
+ if (ret)
+ ieee80211_key_free(key, true);
+ } else {
+ ret = 0;
+ }
mutex_unlock(&sdata->local->key_mtx);
@@ -540,7 +547,7 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
void *iter_data)
{
struct ieee80211_local *local = hw_to_local(hw);
- struct ieee80211_key *key;
+ struct ieee80211_key *key, *tmp;
struct ieee80211_sub_if_data *sdata;
ASSERT_RTNL();
@@ -548,13 +555,14 @@ void ieee80211_iter_keys(struct ieee80211_hw *hw,
mutex_lock(&local->key_mtx);
if (vif) {
sdata = vif_to_sdata(vif);
- list_for_each_entry(key, &sdata->key_list, list)
+ list_for_each_entry_safe(key, tmp, &sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
} else {
list_for_each_entry(sdata, &local->interfaces, list)
- list_for_each_entry(key, &sdata->key_list, list)
+ list_for_each_entry_safe(key, tmp,
+ &sdata->key_list, list)
iter(hw, &sdata->vif,
key->sta ? &key->sta->sta : NULL,
&key->conf, iter_data);
@@ -751,3 +759,135 @@ void ieee80211_get_key_rx_seq(struct ieee80211_key_conf *keyconf,
}
}
EXPORT_SYMBOL(ieee80211_get_key_rx_seq);
+
+void ieee80211_set_key_tx_seq(struct ieee80211_key_conf *keyconf,
+ struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ u64 pn64;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->u.tkip.tx.iv32 = seq->tkip.iv32;
+ key->u.tkip.tx.iv16 = seq->tkip.iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ pn64 = (u64)seq->ccmp.pn[5] |
+ ((u64)seq->ccmp.pn[4] << 8) |
+ ((u64)seq->ccmp.pn[3] << 16) |
+ ((u64)seq->ccmp.pn[2] << 24) |
+ ((u64)seq->ccmp.pn[1] << 32) |
+ ((u64)seq->ccmp.pn[0] << 40);
+ atomic64_set(&key->u.ccmp.tx_pn, pn64);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ pn64 = (u64)seq->aes_cmac.pn[5] |
+ ((u64)seq->aes_cmac.pn[4] << 8) |
+ ((u64)seq->aes_cmac.pn[3] << 16) |
+ ((u64)seq->aes_cmac.pn[2] << 24) |
+ ((u64)seq->aes_cmac.pn[1] << 32) |
+ ((u64)seq->aes_cmac.pn[0] << 40);
+ atomic64_set(&key->u.aes_cmac.tx_pn, pn64);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_tx_seq);
+
+void ieee80211_set_key_rx_seq(struct ieee80211_key_conf *keyconf,
+ int tid, struct ieee80211_key_seq *seq)
+{
+ struct ieee80211_key *key;
+ u8 *pn;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ switch (key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (WARN_ON(tid < 0 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ key->u.tkip.rx[tid].iv32 = seq->tkip.iv32;
+ key->u.tkip.rx[tid].iv16 = seq->tkip.iv16;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (WARN_ON(tid < -1 || tid >= IEEE80211_NUM_TIDS))
+ return;
+ if (tid < 0)
+ pn = key->u.ccmp.rx_pn[IEEE80211_NUM_TIDS];
+ else
+ pn = key->u.ccmp.rx_pn[tid];
+ memcpy(pn, seq->ccmp.pn, IEEE80211_CCMP_PN_LEN);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ if (WARN_ON(tid != 0))
+ return;
+ pn = key->u.aes_cmac.rx_pn;
+ memcpy(pn, seq->aes_cmac.pn, IEEE80211_CMAC_PN_LEN);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(ieee80211_set_key_rx_seq);
+
+void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_key *key;
+
+ key = container_of(keyconf, struct ieee80211_key, conf);
+
+ assert_key_lock(key->local);
+
+ /*
+ * if key was uploaded, we assume the driver will/has remove(d)
+ * it, so adjust bookkeeping accordingly
+ */
+ if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
+ key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
+
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
+ increment_tailroom_need_count(key->sdata);
+ }
+
+ ieee80211_key_free(key, false);
+}
+EXPORT_SYMBOL_GPL(ieee80211_remove_key);
+
+struct ieee80211_key_conf *
+ieee80211_gtk_rekey_add(struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_key *key;
+ int err;
+
+ if (WARN_ON(!local->wowlan))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
+ return ERR_PTR(-EINVAL);
+
+ key = ieee80211_key_alloc(keyconf->cipher, keyconf->keyidx,
+ keyconf->keylen, keyconf->key,
+ 0, NULL);
+ if (IS_ERR(key))
+ return ERR_PTR(PTR_ERR(key));
+
+ if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
+ key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
+
+ err = ieee80211_key_link(key, sdata, NULL);
+ if (err)
+ return ERR_PTR(err);
+
+ return &key->conf;
+}
+EXPORT_SYMBOL_GPL(ieee80211_gtk_rekey_add);
diff --git a/net/mac80211/led.c b/net/mac80211/led.c
index bcffa690312..e2b836446af 100644
--- a/net/mac80211/led.c
+++ b/net/mac80211/led.c
@@ -12,27 +12,22 @@
#include <linux/export.h>
#include "led.h"
+#define MAC80211_BLINK_DELAY 50 /* ms */
+
void ieee80211_led_rx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->rx_led))
return;
- if (local->rx_led_counter++ % 2 == 0)
- led_trigger_event(local->rx_led, LED_OFF);
- else
- led_trigger_event(local->rx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->rx_led, &led_delay, &led_delay, 0);
}
-/* q is 1 if a packet was enqueued, 0 if it has been transmitted */
-void ieee80211_led_tx(struct ieee80211_local *local, int q)
+void ieee80211_led_tx(struct ieee80211_local *local)
{
+ unsigned long led_delay = MAC80211_BLINK_DELAY;
if (unlikely(!local->tx_led))
return;
- /* not sure how this is supposed to work ... */
- local->tx_led_counter += 2*q-1;
- if (local->tx_led_counter % 2 == 0)
- led_trigger_event(local->tx_led, LED_OFF);
- else
- led_trigger_event(local->tx_led, LED_FULL);
+ led_trigger_blink_oneshot(local->tx_led, &led_delay, &led_delay, 0);
}
void ieee80211_led_assoc(struct ieee80211_local *local, bool associated)
diff --git a/net/mac80211/led.h b/net/mac80211/led.h
index e0275d9befa..89f4344f13b 100644
--- a/net/mac80211/led.h
+++ b/net/mac80211/led.h
@@ -13,7 +13,7 @@
#ifdef CONFIG_MAC80211_LEDS
void ieee80211_led_rx(struct ieee80211_local *local);
-void ieee80211_led_tx(struct ieee80211_local *local, int q);
+void ieee80211_led_tx(struct ieee80211_local *local);
void ieee80211_led_assoc(struct ieee80211_local *local,
bool associated);
void ieee80211_led_radio(struct ieee80211_local *local,
@@ -27,7 +27,7 @@ void ieee80211_mod_tpt_led_trig(struct ieee80211_local *local,
static inline void ieee80211_led_rx(struct ieee80211_local *local)
{
}
-static inline void ieee80211_led_tx(struct ieee80211_local *local, int q)
+static inline void ieee80211_led_tx(struct ieee80211_local *local)
{
}
static inline void ieee80211_led_assoc(struct ieee80211_local *local,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 091088ac789..21d5d44444d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -102,17 +102,8 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
- if (local->scan_channel) {
- chandef.chan = local->scan_channel;
- /* If scanning on oper channel, use whatever channel-type
- * is currently in use.
- */
- if (chandef.chan == local->_oper_chandef.chan) {
- chandef = local->_oper_chandef;
- } else {
- chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
- chandef.center_freq1 = chandef.chan->center_freq;
- }
+ if (local->scan_chandef.chan) {
+ chandef = local->scan_chandef;
} else if (local->tmp_channel) {
chandef.chan = local->tmp_channel;
chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
@@ -151,7 +142,7 @@ static u32 ieee80211_hw_conf_chan(struct ieee80211_local *local)
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
- power = chandef.chan->max_power;
+ power = ieee80211_chandef_max_power(&chandef);
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
@@ -901,9 +892,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
if (!local->ops->remain_on_channel)
local->hw.wiphy->max_remain_on_channel_duration = 5000;
- if (local->ops->sched_scan_start)
- local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
-
/* mac80211 based drivers don't support internal TDLS setup */
if (local->hw.wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS)
local->hw.wiphy->flags |= WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 447f41bbe74..707ac61d63e 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -62,7 +62,6 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *ie)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- struct ieee80211_local *local = sdata->local;
u32 basic_rates = 0;
struct cfg80211_chan_def sta_chan_def;
@@ -85,7 +84,7 @@ bool mesh_matches_local(struct ieee80211_sub_if_data *sdata,
(ifmsh->mesh_auth_id == ie->mesh_config->meshconf_auth)))
return false;
- ieee80211_sta_get_rates(local, ie, ieee80211_get_sdata_band(sdata),
+ ieee80211_sta_get_rates(sdata, ie, ieee80211_get_sdata_band(sdata),
&basic_rates);
if (sdata->vif.bss_conf.basic_rates != basic_rates)
@@ -274,7 +273,9 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
neighbors = min_t(int, neighbors, IEEE80211_MAX_MESH_PEERINGS);
*pos++ = neighbors << 1;
/* Mesh capability */
- *pos = IEEE80211_MESHCONF_CAPAB_FORWARDING;
+ *pos = 0x00;
+ *pos |= ifmsh->mshcfg.dot11MeshForwarding ?
+ IEEE80211_MESHCONF_CAPAB_FORWARDING : 0x00;
*pos |= ifmsh->accepting_plinks ?
IEEE80211_MESHCONF_CAPAB_ACCEPT_PLINKS : 0x00;
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
@@ -831,6 +832,9 @@ ieee80211_mesh_rx_probe_req(struct ieee80211_sub_if_data *sdata,
ieee802_11_parse_elems(pos, len - baselen, false, &elems);
+ if (!elems.mesh_id)
+ return;
+
/* 802.11-2012 10.1.4.3.2 */
if ((!ether_addr_equal(mgmt->da, sdata->vif.addr) &&
!is_broadcast_ether_addr(mgmt->da)) ||
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 02c05fa15c2..6b65d5055f5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -379,7 +379,7 @@ static void mesh_sta_info_init(struct ieee80211_sub_if_data *sdata,
u32 rates, basic_rates = 0, changed = 0;
sband = local->hw.wiphy->bands[band];
- rates = ieee80211_sta_get_rates(local, elems, band, &basic_rates);
+ rates = ieee80211_sta_get_rates(sdata, elems, band, &basic_rates);
spin_lock_bh(&sta->lock);
sta->last_rx = jiffies;
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 3b7bfc01ee3..22290a929b9 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -229,6 +229,10 @@ void ieee80211_mps_sta_status_update(struct sta_info *sta)
enum nl80211_mesh_power_mode pm;
bool do_buffer;
+ /* For non-assoc STA, prevent buffering or frame transmission */
+ if (sta->sta_state < IEEE80211_STA_ASSOC)
+ return;
+
/*
* use peer-specific power mode if peering is established and the
* peer's power mode is known
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index ae31968d42d..86e4ad56b57 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -31,10 +31,12 @@
#include "led.h"
#define IEEE80211_AUTH_TIMEOUT (HZ / 5)
+#define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_AUTH_MAX_TRIES 3
#define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5)
#define IEEE80211_ASSOC_TIMEOUT (HZ / 5)
+#define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2)
#define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10)
#define IEEE80211_ASSOC_MAX_TRIES 3
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
struct ieee80211_channel *channel,
const struct ieee80211_ht_operation *ht_oper,
const struct ieee80211_vht_operation *vht_oper,
- struct cfg80211_chan_def *chandef, bool verbose)
+ struct cfg80211_chan_def *chandef, bool tracking)
{
+ struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
struct cfg80211_chan_def vht_chandef;
u32 ht_cfreq, ret;
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan,
channel->band);
/* check that channel matches the right operating channel */
- if (channel->center_freq != ht_cfreq) {
+ if (!tracking && channel->center_freq != ht_cfreq) {
/*
* It's possible that some APs are confused here;
* Netgear WNDR3700 sometimes reports 4 higher than
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
* since we look at probe response/beacon data here
* it should be OK.
*/
- if (verbose)
- sdata_info(sdata,
- "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
- channel->center_freq, ht_cfreq,
- ht_oper->primary_chan, channel->band);
+ sdata_info(sdata,
+ "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n",
+ channel->center_freq, ht_cfreq,
+ ht_oper->primary_chan, channel->band);
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
goto out;
}
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
channel->band);
break;
default:
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT operation IE has invalid channel width (%d), disable VHT\n",
vht_oper->chan_width);
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
if (!cfg80211_chandef_valid(&vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information is invalid, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
}
if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) {
- if (verbose)
+ if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
sdata_info(sdata,
"AP VHT information doesn't match HT, disable VHT\n");
ret = IEEE80211_STA_DISABLE_VHT;
@@ -333,18 +335,27 @@ out:
if (ret & IEEE80211_STA_DISABLE_VHT)
vht_chandef = *chandef;
+ /*
+ * Ignore the DISABLED flag when we're already connected and only
+ * tracking the APs beacon for bandwidth changes - otherwise we
+ * might get disconnected here if we connect to an AP, update our
+ * regulatory information based on the AP's country IE and the
+ * information we have is wrong/outdated and disables the channel
+ * that we're actually using for the connection to the AP.
+ */
while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef,
- IEEE80211_CHAN_DISABLED)) {
+ tracking ? 0 :
+ IEEE80211_CHAN_DISABLED)) {
if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) {
ret = IEEE80211_STA_DISABLE_HT |
IEEE80211_STA_DISABLE_VHT;
- goto out;
+ break;
}
ret |= chandef_downgrade(chandef);
}
- if (chandef->width != vht_chandef.width && verbose)
+ if (chandef->width != vht_chandef.width && !tracking)
sdata_info(sdata,
"capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n");
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* calculate new channel (type) based on HT/VHT operation IEs */
flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper,
- vht_oper, &chandef, false);
+ vht_oper, &chandef, true);
/*
* Downgrade the new channel if we associated with restricted
@@ -478,27 +489,6 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata,
/* frame sending functions */
-static int ieee80211_compatible_rates(const u8 *supp_rates, int supp_rates_len,
- struct ieee80211_supported_band *sband,
- u32 *rates)
-{
- int i, j, count;
- *rates = 0;
- count = 0;
- for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7F) * 5;
-
- for (j = 0; j < sband->n_bitrates; j++)
- if (sband->bitrates[j].bitrate == rate) {
- *rates |= BIT(j);
- count++;
- break;
- }
- }
-
- return count;
-}
-
static void ieee80211_add_ht_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, u8 ap_ht_param,
struct ieee80211_supported_band *sband,
@@ -617,12 +607,12 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
struct ieee80211_mgmt *mgmt;
u8 *pos, qos_info;
size_t offset = 0, noffset;
- int i, count, rates_len, supp_rates_len;
+ int i, count, rates_len, supp_rates_len, shift;
u16 capab;
struct ieee80211_supported_band *sband;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_channel *chan;
- u32 rates = 0;
+ u32 rate_flags, rates = 0;
sdata_assert_lock(sdata);
@@ -633,8 +623,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
return;
}
chan = chanctx_conf->def.chan;
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
rcu_read_unlock();
sband = local->hw.wiphy->bands[chan->band];
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (assoc_data->supp_rates_len) {
/*
@@ -643,17 +635,24 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
* in the association request (e.g. D-Link DAP 1353 in
* b-only mode)...
*/
- rates_len = ieee80211_compatible_rates(assoc_data->supp_rates,
- assoc_data->supp_rates_len,
- sband, &rates);
+ rates_len = ieee80211_parse_bitrates(&chanctx_conf->def, sband,
+ assoc_data->supp_rates,
+ assoc_data->supp_rates_len,
+ &rates);
} else {
/*
* In case AP not provide any supported rates information
* before association, we send information element(s) with
* all rates that we support.
*/
- rates = ~0;
- rates_len = sband->n_bitrates;
+ rates_len = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ rates |= BIT(i);
+ rates_len++;
+ }
}
skb = alloc_skb(local->hw.extra_tx_headroom +
@@ -730,8 +729,9 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
count = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
if (++count == 8)
break;
}
@@ -744,8 +744,10 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
for (i++; i < sband->n_bitrates; i++) {
if (BIT(i) & rates) {
- int rate = sband->bitrates[i].bitrate;
- *pos++ = (u8) (rate / 5);
+ int rate;
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = (u8) rate;
}
}
}
@@ -756,7 +758,8 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
*pos++ = WLAN_EID_PWR_CAPABILITY;
*pos++ = 2;
*pos++ = 0; /* min tx power */
- *pos++ = chan->max_power; /* max tx power */
+ /* max tx power */
+ *pos++ = ieee80211_chandef_max_power(&chanctx_conf->def);
/* 2. supported channels */
/* TODO: get this in reg domain format */
@@ -1110,6 +1113,15 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
case -1:
cfg80211_chandef_create(&new_chandef, new_chan,
NL80211_CHAN_NO_HT);
+ /* keep width for 5/10 MHz channels */
+ switch (sdata->vif.bss_conf.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
+ new_chandef.width = sdata->vif.bss_conf.chandef.width;
+ break;
+ default:
+ break;
+ }
break;
}
@@ -2432,15 +2444,16 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
u8 *supp_rates, unsigned int supp_rates_len,
u32 *rates, u32 *basic_rates,
bool *have_higher_than_11mbit,
- int *min_rate, int *min_rate_index)
+ int *min_rate, int *min_rate_index,
+ int shift, u32 rate_flags)
{
int i, j;
for (i = 0; i < supp_rates_len; i++) {
- int rate = (supp_rates[i] & 0x7f) * 5;
+ int rate = supp_rates[i] & 0x7f;
bool is_basic = !!(supp_rates[i] & 0x80);
- if (rate > 110)
+ if ((rate * 5 * (1 << shift)) > 110)
*have_higher_than_11mbit = true;
/*
@@ -2456,12 +2469,20 @@ static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
continue;
for (j = 0; j < sband->n_bitrates; j++) {
- if (sband->bitrates[j].bitrate == rate) {
+ struct ieee80211_rate *br;
+ int brate;
+
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
*rates |= BIT(j);
if (is_basic)
*basic_rates |= BIT(j);
- if (rate < *min_rate) {
- *min_rate = rate;
+ if ((rate * 5) < *min_rate) {
+ *min_rate = rate * 5;
*min_rate_index = j;
}
break;
@@ -2840,14 +2861,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_put(local, bss);
sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
}
-
- if (!sdata->u.mgd.associated ||
- !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
- return;
-
- ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
- elems, true);
-
}
@@ -3136,6 +3149,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
+ ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
+ &elems, true);
+
if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
elems.wmm_param_len))
changed |= BSS_CHANGED_QOS;
@@ -3394,10 +3410,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata)
if (tx_flags == 0) {
auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT;
- ifmgd->auth_data->timeout_started = true;
+ auth_data->timeout_started = true;
run_again(sdata, auth_data->timeout);
} else {
- auth_data->timeout_started = false;
+ auth_data->timeout =
+ round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG);
+ auth_data->timeout_started = true;
+ run_again(sdata, auth_data->timeout);
}
return 0;
@@ -3434,7 +3453,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata)
assoc_data->timeout_started = true;
run_again(sdata, assoc_data->timeout);
} else {
- assoc_data->timeout_started = false;
+ assoc_data->timeout =
+ round_jiffies_up(jiffies +
+ IEEE80211_ASSOC_TIMEOUT_LONG);
+ assoc_data->timeout_started = true;
+ run_again(sdata, assoc_data->timeout);
}
return 0;
@@ -3829,7 +3852,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
cbss->channel,
ht_oper, vht_oper,
- &chandef, true);
+ &chandef, false);
sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss),
local->rx_chains);
@@ -3884,27 +3907,40 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
if (!new_sta)
return -ENOMEM;
}
-
if (new_sta) {
u32 rates = 0, basic_rates = 0;
bool have_higher_than_11mbit;
int min_rate = INT_MAX, min_rate_index = -1;
+ struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
const struct cfg80211_bss_ies *ies;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[cbss->channel->band];
err = ieee80211_prep_channel(sdata, cbss);
if (err) {
sta_info_free(local, new_sta);
- return err;
+ return -EINVAL;
}
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ rcu_read_unlock();
ieee80211_get_rates(sband, bss->supp_rates,
bss->supp_rates_len,
&rates, &basic_rates,
&have_higher_than_11mbit,
- &min_rate, &min_rate_index);
+ &min_rate, &min_rate_index,
+ shift, rate_flags);
/*
* This used to be a workaround for basic rates missing
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 7fc5d0d8149..34012620434 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -99,10 +99,13 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
}
mutex_unlock(&local->sta_mtx);
- /* remove all interfaces */
+ /* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
- if (!ieee80211_sdata_running(sdata))
+ if (!ieee80211_sdata_running(sdata) ||
+ sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
+ sdata->vif.type == NL80211_IFTYPE_MONITOR)
continue;
+
drv_remove_interface(local, sdata);
}
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 30d58d2d13e..e126605cec6 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -210,7 +210,7 @@ static bool rc_no_data_or_no_ack_use_min(struct ieee80211_tx_rate_control *txrc)
!ieee80211_is_data(fc);
}
-static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
+static void rc_send_low_basicrate(s8 *idx, u32 basic_rates,
struct ieee80211_supported_band *sband)
{
u8 i;
@@ -232,37 +232,28 @@ static void rc_send_low_broadcast(s8 *idx, u32 basic_rates,
/* could not find a basic rate; use original selection */
}
-static inline s8
-rate_lowest_non_cck_index(struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta)
+static void __rate_control_send_low(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
{
int i;
+ u32 rate_flags =
+ ieee80211_chandef_rate_flags(&hw->conf.chandef);
+
+ if ((sband->band == IEEE80211_BAND_2GHZ) &&
+ (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
+ rate_flags |= IEEE80211_RATE_ERP_G;
+ info->control.rates[0].idx = 0;
for (i = 0; i < sband->n_bitrates; i++) {
- struct ieee80211_rate *srate = &sband->bitrates[i];
- if ((srate->bitrate == 10) || (srate->bitrate == 20) ||
- (srate->bitrate == 55) || (srate->bitrate == 110))
+ if (!rate_supported(sta, sband->band, i))
continue;
- if (rate_supported(sta, sband->band, i))
- return i;
+ info->control.rates[0].idx = i;
+ break;
}
-
- /* No matching rate found */
- return 0;
-}
-
-static void __rate_control_send_low(struct ieee80211_hw *hw,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta,
- struct ieee80211_tx_info *info)
-{
- if ((sband->band != IEEE80211_BAND_2GHZ) ||
- !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
- info->control.rates[0].idx = rate_lowest_index(sband, sta);
- else
- info->control.rates[0].idx =
- rate_lowest_non_cck_index(sband, sta);
+ WARN_ON_ONCE(i == sband->n_bitrates);
info->control.rates[0].count =
(info->flags & IEEE80211_TX_CTL_NO_ACK) ?
@@ -272,28 +263,37 @@ static void __rate_control_send_low(struct ieee80211_hw *hw,
}
-bool rate_control_send_low(struct ieee80211_sta *sta,
+bool rate_control_send_low(struct ieee80211_sta *pubsta,
void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txrc->skb);
struct ieee80211_supported_band *sband = txrc->sband;
+ struct sta_info *sta;
int mcast_rate;
+ bool use_basicrate = false;
- if (!sta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
- __rate_control_send_low(txrc->hw, sband, sta, info);
+ if (!pubsta || !priv_sta || rc_no_data_or_no_ack_use_min(txrc)) {
+ __rate_control_send_low(txrc->hw, sband, pubsta, info);
- if (!sta && txrc->bss) {
+ if (!pubsta && txrc->bss) {
mcast_rate = txrc->bss_conf->mcast_rate[sband->band];
if (mcast_rate > 0) {
info->control.rates[0].idx = mcast_rate - 1;
return true;
}
+ use_basicrate = true;
+ } else if (pubsta) {
+ sta = container_of(pubsta, struct sta_info, sta);
+ if (ieee80211_vif_is_mesh(&sta->sdata->vif))
+ use_basicrate = true;
+ }
- rc_send_low_broadcast(&info->control.rates[0].idx,
+ if (use_basicrate)
+ rc_send_low_basicrate(&info->control.rates[0].idx,
txrc->bss_conf->basic_rates,
sband);
- }
+
return true;
}
return false;
@@ -585,6 +585,7 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
u8 mcs_mask[IEEE80211_HT_MCS_MASK_LEN];
bool has_mcs_mask;
u32 mask;
+ u32 rate_flags;
int i;
/*
@@ -594,6 +595,12 @@ static void rate_control_apply_mask(struct ieee80211_sub_if_data *sdata,
*/
mask = sdata->rc_rateidx_mask[info->band];
has_mcs_mask = sdata->rc_has_mcs_mask[info->band];
+ rate_flags =
+ ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ for (i = 0; i < sband->n_bitrates; i++)
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ mask &= ~BIT(i);
+
if (mask == (1 << sband->n_bitrates) - 1 && !has_mcs_mask)
return;
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index d35a5dd3fb1..5dedc56c94d 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -66,11 +66,12 @@ static inline void rate_control_rate_init(struct sta_info *sta)
}
sband = local->hw.wiphy->bands[chanctx_conf->def.chan->band];
- rcu_read_unlock();
ieee80211_sta_set_rx_nss(sta);
- ref->ops->rate_init(ref->priv, sband, ista, priv_sta);
+ ref->ops->rate_init(ref->priv, sband, &chanctx_conf->def, ista,
+ priv_sta);
+ rcu_read_unlock();
set_sta_flag(sta, WLAN_STA_RATE_CONTROL);
}
@@ -81,10 +82,21 @@ static inline void rate_control_rate_update(struct ieee80211_local *local,
struct rate_control_ref *ref = local->rate_ctrl;
struct ieee80211_sta *ista = &sta->sta;
void *priv_sta = sta->rate_ctrl_priv;
+ struct ieee80211_chanctx_conf *chanctx_conf;
+
+ if (ref && ref->ops->rate_update) {
+ rcu_read_lock();
- if (ref && ref->ops->rate_update)
- ref->ops->rate_update(ref->priv, sband, ista,
- priv_sta, changed);
+ chanctx_conf = rcu_dereference(sta->sdata->vif.chanctx_conf);
+ if (WARN_ON(!chanctx_conf)) {
+ rcu_read_unlock();
+ return;
+ }
+
+ ref->ops->rate_update(ref->priv, sband, &chanctx_conf->def,
+ ista, priv_sta, changed);
+ rcu_read_unlock();
+ }
drv_sta_rc_update(local, sta->sdata, &sta->sta, changed);
}
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index ac7ef5414bd..8b5f7ef7c0c 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -290,7 +290,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
struct minstrel_rate *msr, *mr;
unsigned int ndx;
bool mrr_capable;
- bool prev_sample = mi->prev_sample;
+ bool prev_sample;
int delta;
int sampling_ratio;
@@ -314,6 +314,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
(mi->sample_count + mi->sample_deferred / 2);
/* delta < 0: no sampling required */
+ prev_sample = mi->prev_sample;
mi->prev_sample = false;
if (delta < 0 || (!mrr_capable && prev_sample))
return;
@@ -382,14 +383,18 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
static void
calc_rate_durations(enum ieee80211_band band,
struct minstrel_rate *d,
- struct ieee80211_rate *rate)
+ struct ieee80211_rate *rate,
+ struct cfg80211_chan_def *chandef)
{
int erp = !!(rate->flags & IEEE80211_RATE_ERP_G);
+ int shift = ieee80211_chandef_get_shift(chandef);
d->perfect_tx_time = ieee80211_frame_duration(band, 1200,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
d->ack_time = ieee80211_frame_duration(band, 10,
- rate->bitrate, erp, 1);
+ DIV_ROUND_UP(rate->bitrate, 1 << shift), erp, 1,
+ shift);
}
static void
@@ -417,21 +422,25 @@ init_sample_table(struct minstrel_sta_info *mi)
static void
minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
+ struct cfg80211_chan_def *chandef,
+ struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_sta_info *mi = priv_sta;
struct minstrel_priv *mp = priv;
struct ieee80211_rate *ctl_rate;
unsigned int i, n = 0;
unsigned int t_slot = 9; /* FIXME: get real slot time */
+ u32 rate_flags;
mi->sta = sta;
mi->lowest_rix = rate_lowest_index(sband, sta);
ctl_rate = &sband->bitrates[mi->lowest_rix];
mi->sp_ack_dur = ieee80211_frame_duration(sband->band, 10,
ctl_rate->bitrate,
- !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1);
+ !!(ctl_rate->flags & IEEE80211_RATE_ERP_G), 1,
+ ieee80211_chandef_get_shift(chandef));
+ rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
memset(mi->max_tp_rate, 0, sizeof(mi->max_tp_rate));
mi->max_prob_rate = 0;
@@ -440,15 +449,22 @@ minstrel_rate_init(void *priv, struct ieee80211_supported_band *sband,
unsigned int tx_time = 0, tx_time_cts = 0, tx_time_rtscts = 0;
unsigned int tx_time_single;
unsigned int cw = mp->cw_min;
+ int shift;
if (!rate_supported(sta, sband->band, i))
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
n++;
memset(mr, 0, sizeof(*mr));
mr->rix = i;
- mr->bitrate = sband->bitrates[i].bitrate / 5;
- calc_rate_durations(sband->band, mr, &sband->bitrates[i]);
+ shift = ieee80211_chandef_get_shift(chandef);
+ mr->bitrate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
+ calc_rate_durations(sband->band, mr, &sband->bitrates[i],
+ chandef);
/* calculate maximum number of retransmissions before
* fallback (based on maximum segment size) */
@@ -546,6 +562,7 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
{
static const int bitrates[4] = { 10, 20, 55, 110 };
struct ieee80211_supported_band *sband;
+ u32 rate_flags = ieee80211_chandef_rate_flags(&mp->hw->conf.chandef);
int i, j;
sband = mp->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
@@ -558,6 +575,9 @@ minstrel_init_cck_rates(struct minstrel_priv *mp)
if (rate->flags & IEEE80211_RATE_ERP_G)
continue;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
for (j = 0; j < ARRAY_SIZE(bitrates); j++) {
if (rate->bitrate != bitrates[j])
continue;
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5b2d3012b98..7c323f27ba2 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -776,7 +776,7 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
/* Don't use EAPOL frames for sampling on non-mrr hw */
if (mp->hw->max_rates == 1 &&
- txrc->skb->protocol == cpu_to_be16(ETH_P_PAE))
+ (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
sample_idx = -1;
else
sample_idx = minstrel_get_sample_rate(mp, mi);
@@ -804,10 +804,18 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
sample_group = &minstrel_mcs_groups[sample_idx / MCS_GROUP_RATES];
info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
+ rate->count = 1;
+
+ if (sample_idx / MCS_GROUP_RATES == MINSTREL_CCK_GROUP) {
+ int idx = sample_idx % ARRAY_SIZE(mp->cck_rates);
+ rate->idx = mp->cck_rates[idx];
+ rate->flags = 0;
+ return;
+ }
+
rate->idx = sample_idx % MCS_GROUP_RATES +
(sample_group->streams - 1) * MCS_GROUP_RATES;
rate->flags = IEEE80211_TX_RC_MCS | sample_group->flags;
- rate->count = 1;
}
static void
@@ -820,6 +828,9 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
if (sband->band != IEEE80211_BAND_2GHZ)
return;
+ if (!(mp->hw->flags & IEEE80211_HW_SUPPORTS_HT_CCK_RATES))
+ return;
+
mi->cck_supported = 0;
mi->cck_supported_short = 0;
for (i = 0; i < 4; i++) {
@@ -836,6 +847,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
static void
minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct minstrel_priv *mp = priv;
@@ -861,8 +873,9 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
mi->sta = sta;
mi->stats_update = jiffies;
- ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1);
- mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1) + ack_dur;
+ ack_dur = ieee80211_frame_duration(sband->band, 10, 60, 1, 1, 0);
+ mi->overhead = ieee80211_frame_duration(sband->band, 0, 60, 1, 1, 0);
+ mi->overhead += ack_dur;
mi->overhead_rtscts = mi->overhead + 2 * ack_dur;
mi->avg_ampdu_len = MINSTREL_FRAC(1, 1);
@@ -931,22 +944,25 @@ use_legacy:
memset(&msp->legacy, 0, sizeof(msp->legacy));
msp->legacy.r = msp->ratelist;
msp->legacy.sample_table = msp->sample_table;
- return mac80211_minstrel.rate_init(priv, sband, sta, &msp->legacy);
+ return mac80211_minstrel.rate_init(priv, sband, chandef, sta,
+ &msp->legacy);
}
static void
minstrel_ht_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void
minstrel_ht_rate_update(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta,
u32 changed)
{
- minstrel_ht_update_caps(priv, sband, sta, priv_sta);
+ minstrel_ht_update_caps(priv, sband, chandef, sta, priv_sta);
}
static void *
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 502d3ecc4a7..958fad07b54 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -293,6 +293,7 @@ rate_control_pid_get_rate(void *priv, struct ieee80211_sta *sta,
static void
rate_control_pid_rate_init(void *priv, struct ieee80211_supported_band *sband,
+ struct cfg80211_chan_def *chandef,
struct ieee80211_sta *sta, void *priv_sta)
{
struct rc_pid_sta_info *spinfo = priv_sta;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 23dbcfc69b3..54395d7583b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -87,11 +87,13 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
int len;
/* always present fields */
- len = sizeof(struct ieee80211_radiotap_header) + 9;
+ len = sizeof(struct ieee80211_radiotap_header) + 8;
- /* allocate extra bitmap */
+ /* allocate extra bitmaps */
if (status->vendor_radiotap_len)
len += 4;
+ if (status->chains)
+ len += 4 * hweight8(status->chains);
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -100,6 +102,10 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
len += 1;
+ /* antenna field, if we don't have per-chain info */
+ if (!status->chains)
+ len += 1;
+
/* padding for RX_FLAGS if necessary */
len = ALIGN(len, 2);
@@ -116,6 +122,11 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 12;
}
+ if (status->chains) {
+ /* antenna and antenna signal fields */
+ len += 2 * hweight8(status->chains);
+ }
+
if (status->vendor_radiotap_len) {
if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
status->vendor_radiotap_align = 1;
@@ -145,8 +156,12 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
+ __le32 *it_present;
+ u32 it_present_val;
u16 rx_flags = 0;
- int mpdulen;
+ u16 channel_flags = 0;
+ int mpdulen, chain;
+ unsigned long chains = status->chains;
mpdulen = skb->len;
if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
@@ -154,25 +169,39 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
memset(rthdr, 0, rtap_len);
+ it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
- rthdr->it_present =
- cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
- (1 << IEEE80211_RADIOTAP_CHANNEL) |
- (1 << IEEE80211_RADIOTAP_ANTENNA) |
- (1 << IEEE80211_RADIOTAP_RX_FLAGS));
rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+ it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
+ BIT(IEEE80211_RADIOTAP_CHANNEL) |
+ BIT(IEEE80211_RADIOTAP_RX_FLAGS);
+
+ if (!status->chains)
+ it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
- pos = (unsigned char *)(rthdr + 1);
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ it_present_val |=
+ BIT(IEEE80211_RADIOTAP_EXT) |
+ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
+ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
+ }
if (status->vendor_radiotap_len) {
- rthdr->it_present |=
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE)) |
- cpu_to_le32(BIT(IEEE80211_RADIOTAP_EXT));
- put_unaligned_le32(status->vendor_radiotap_bitmap, pos);
- pos += 4;
+ it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
+ BIT(IEEE80211_RADIOTAP_EXT);
+ put_unaligned_le32(it_present_val, it_present);
+ it_present++;
+ it_present_val = status->vendor_radiotap_bitmap;
}
+ put_unaligned_le32(it_present_val, it_present);
+
+ pos = (void *)(it_present + 1);
+
/* the order of the following fields is important */
/* IEEE80211_RADIOTAP_TSFT */
@@ -207,28 +236,35 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*/
*pos = 0;
} else {
+ int shift = 0;
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = rate->bitrate / 5;
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ else if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
+ *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
}
pos++;
/* IEEE80211_RADIOTAP_CHANNEL */
put_unaligned_le16(status->freq, pos);
pos += 2;
+ if (status->flag & RX_FLAG_10MHZ)
+ channel_flags |= IEEE80211_CHAN_HALF;
+ else if (status->flag & RX_FLAG_5MHZ)
+ channel_flags |= IEEE80211_CHAN_QUARTER;
+
if (status->band == IEEE80211_BAND_5GHZ)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
- put_unaligned_le16(IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
- put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else if (rate)
- put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
- pos);
+ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else
- put_unaligned_le16(IEEE80211_CHAN_2GHZ, pos);
+ channel_flags |= IEEE80211_CHAN_2GHZ;
+ put_unaligned_le16(channel_flags, pos);
pos += 2;
/* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
@@ -242,9 +278,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
- /* IEEE80211_RADIOTAP_ANTENNA */
- *pos = status->antenna;
- pos++;
+ if (!status->chains) {
+ /* IEEE80211_RADIOTAP_ANTENNA */
+ *pos = status->antenna;
+ pos++;
+ }
/* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
@@ -341,6 +379,11 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
pos += 2;
}
+ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
+ *pos++ = status->chain_signal[chain];
+ *pos++ = chain;
+ }
+
if (status->vendor_radiotap_len) {
/* ensure 2 byte alignment for the vendor field as required */
if ((pos - (u8 *)rthdr) & 1)
@@ -936,8 +979,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
- /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */
- if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) {
+ /*
+ * Drop duplicate 802.11 retransmissions
+ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
+ */
+ if (rx->skb->len >= 24 && rx->sta &&
+ !ieee80211_is_ctl(hdr->frame_control) &&
+ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
+ !is_multicast_ether_addr(hdr->addr1)) {
if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
rx->sta->last_seq_ctrl[rx->seqno_idx] ==
hdr->seq_ctrl)) {
@@ -1006,207 +1055,6 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
static ieee80211_rx_result debug_noinline
-ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
-{
- struct sk_buff *skb = rx->skb;
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int keyidx;
- int hdrlen;
- ieee80211_rx_result result = RX_DROP_UNUSABLE;
- struct ieee80211_key *sta_ptk = NULL;
- int mmie_keyidx = -1;
- __le16 fc;
-
- /*
- * Key selection 101
- *
- * There are four types of keys:
- * - GTK (group keys)
- * - IGTK (group keys for management frames)
- * - PTK (pairwise keys)
- * - STK (station-to-station pairwise keys)
- *
- * When selecting a key, we have to distinguish between multicast
- * (including broadcast) and unicast frames, the latter can only
- * use PTKs and STKs while the former always use GTKs and IGTKs.
- * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
- * unicast frames can also use key indices like GTKs. Hence, if we
- * don't have a PTK/STK we check the key index for a WEP key.
- *
- * Note that in a regular BSS, multicast frames are sent by the
- * AP only, associated stations unicast the frame to the AP first
- * which then multicasts it on their behalf.
- *
- * There is also a slight problem in IBSS mode: GTKs are negotiated
- * with each station, that is something we don't currently handle.
- * The spec seems to expect that one negotiates the same key with
- * every station but there's no such requirement; VLANs could be
- * possible.
- */
-
- /*
- * No point in finding a key and decrypting if the frame is neither
- * addressed to us nor a multicast frame.
- */
- if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
- return RX_CONTINUE;
-
- /* start without a key */
- rx->key = NULL;
-
- if (rx->sta)
- sta_ptk = rcu_dereference(rx->sta->ptk);
-
- fc = hdr->frame_control;
-
- if (!ieee80211_has_protected(fc))
- mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
-
- if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
- rx->key = sta_ptk;
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
- /* Skip decryption if the frame is not protected. */
- if (!ieee80211_has_protected(fc))
- return RX_CONTINUE;
- } else if (mmie_keyidx >= 0) {
- /* Broadcast/multicast robust management frame / BIP */
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
- if (mmie_keyidx < NUM_DEFAULT_KEYS ||
- mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
- return RX_DROP_MONITOR; /* unexpected BIP keyidx */
- if (rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
- if (!rx->key)
- rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
- } else if (!ieee80211_has_protected(fc)) {
- /*
- * The frame was not protected, so skip decryption. However, we
- * need to set rx->key if there is a key that could have been
- * used so that the frame may be dropped if encryption would
- * have been expected.
- */
- struct ieee80211_key *key = NULL;
- struct ieee80211_sub_if_data *sdata = rx->sdata;
- int i;
-
- if (ieee80211_is_mgmt(fc) &&
- is_multicast_ether_addr(hdr->addr1) &&
- (key = rcu_dereference(rx->sdata->default_mgmt_key)))
- rx->key = key;
- else {
- if (rx->sta) {
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(rx->sta->gtk[i]);
- if (key)
- break;
- }
- }
- if (!key) {
- for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
- key = rcu_dereference(sdata->keys[i]);
- if (key)
- break;
- }
- }
- if (key)
- rx->key = key;
- }
- return RX_CONTINUE;
- } else {
- u8 keyid;
- /*
- * The device doesn't give us the IV so we won't be
- * able to look up the key. That's ok though, we
- * don't need to decrypt the frame, we just won't
- * be able to keep statistics accurate.
- * Except for key threshold notifications, should
- * we somehow allow the driver to tell us which key
- * the hardware used if this flag is set?
- */
- if ((status->flag & RX_FLAG_DECRYPTED) &&
- (status->flag & RX_FLAG_IV_STRIPPED))
- return RX_CONTINUE;
-
- hdrlen = ieee80211_hdrlen(fc);
-
- if (rx->skb->len < 8 + hdrlen)
- return RX_DROP_UNUSABLE; /* TODO: count this? */
-
- /*
- * no need to call ieee80211_wep_get_keyidx,
- * it verifies a bunch of things we've done already
- */
- skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
- keyidx = keyid >> 6;
-
- /* check per-station GTK first, if multicast packet */
- if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
- rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
-
- /* if not found, try default key */
- if (!rx->key) {
- rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
-
- /*
- * RSNA-protected unicast frames should always be
- * sent with pairwise or station-to-station keys,
- * but for WEP we allow using a key index as well.
- */
- if (rx->key &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
- rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
- !is_multicast_ether_addr(hdr->addr1))
- rx->key = NULL;
- }
- }
-
- if (rx->key) {
- if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
- return RX_DROP_MONITOR;
-
- rx->key->tx_rx_count++;
- /* TODO: add threshold stuff again */
- } else {
- return RX_DROP_MONITOR;
- }
-
- switch (rx->key->conf.cipher) {
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- result = ieee80211_crypto_wep_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- result = ieee80211_crypto_tkip_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- result = ieee80211_crypto_ccmp_decrypt(rx);
- break;
- case WLAN_CIPHER_SUITE_AES_CMAC:
- result = ieee80211_crypto_aes_cmac_decrypt(rx);
- break;
- default:
- /*
- * We can reach here only with HW-only algorithms
- * but why didn't it decrypt the frame?!
- */
- return RX_DROP_UNUSABLE;
- }
-
- /* the hdr variable is invalid after the decrypt handlers */
-
- /* either the frame has been decrypted or will be dropped */
- status->flag |= RX_FLAG_DECRYPTED;
-
- return result;
-}
-
-static ieee80211_rx_result debug_noinline
ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
{
struct ieee80211_local *local;
@@ -1507,6 +1355,207 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
return RX_CONTINUE;
} /* ieee80211_rx_h_sta_process */
+static ieee80211_rx_result debug_noinline
+ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
+{
+ struct sk_buff *skb = rx->skb;
+ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int keyidx;
+ int hdrlen;
+ ieee80211_rx_result result = RX_DROP_UNUSABLE;
+ struct ieee80211_key *sta_ptk = NULL;
+ int mmie_keyidx = -1;
+ __le16 fc;
+
+ /*
+ * Key selection 101
+ *
+ * There are four types of keys:
+ * - GTK (group keys)
+ * - IGTK (group keys for management frames)
+ * - PTK (pairwise keys)
+ * - STK (station-to-station pairwise keys)
+ *
+ * When selecting a key, we have to distinguish between multicast
+ * (including broadcast) and unicast frames, the latter can only
+ * use PTKs and STKs while the former always use GTKs and IGTKs.
+ * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
+ * unicast frames can also use key indices like GTKs. Hence, if we
+ * don't have a PTK/STK we check the key index for a WEP key.
+ *
+ * Note that in a regular BSS, multicast frames are sent by the
+ * AP only, associated stations unicast the frame to the AP first
+ * which then multicasts it on their behalf.
+ *
+ * There is also a slight problem in IBSS mode: GTKs are negotiated
+ * with each station, that is something we don't currently handle.
+ * The spec seems to expect that one negotiates the same key with
+ * every station but there's no such requirement; VLANs could be
+ * possible.
+ */
+
+ /*
+ * No point in finding a key and decrypting if the frame is neither
+ * addressed to us nor a multicast frame.
+ */
+ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
+ return RX_CONTINUE;
+
+ /* start without a key */
+ rx->key = NULL;
+
+ if (rx->sta)
+ sta_ptk = rcu_dereference(rx->sta->ptk);
+
+ fc = hdr->frame_control;
+
+ if (!ieee80211_has_protected(fc))
+ mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
+
+ if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
+ rx->key = sta_ptk;
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+ /* Skip decryption if the frame is not protected. */
+ if (!ieee80211_has_protected(fc))
+ return RX_CONTINUE;
+ } else if (mmie_keyidx >= 0) {
+ /* Broadcast/multicast robust management frame / BIP */
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+
+ if (mmie_keyidx < NUM_DEFAULT_KEYS ||
+ mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
+ return RX_DROP_MONITOR; /* unexpected BIP keyidx */
+ if (rx->sta)
+ rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
+ if (!rx->key)
+ rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
+ } else if (!ieee80211_has_protected(fc)) {
+ /*
+ * The frame was not protected, so skip decryption. However, we
+ * need to set rx->key if there is a key that could have been
+ * used so that the frame may be dropped if encryption would
+ * have been expected.
+ */
+ struct ieee80211_key *key = NULL;
+ struct ieee80211_sub_if_data *sdata = rx->sdata;
+ int i;
+
+ if (ieee80211_is_mgmt(fc) &&
+ is_multicast_ether_addr(hdr->addr1) &&
+ (key = rcu_dereference(rx->sdata->default_mgmt_key)))
+ rx->key = key;
+ else {
+ if (rx->sta) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(rx->sta->gtk[i]);
+ if (key)
+ break;
+ }
+ }
+ if (!key) {
+ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+ key = rcu_dereference(sdata->keys[i]);
+ if (key)
+ break;
+ }
+ }
+ if (key)
+ rx->key = key;
+ }
+ return RX_CONTINUE;
+ } else {
+ u8 keyid;
+ /*
+ * The device doesn't give us the IV so we won't be
+ * able to look up the key. That's ok though, we
+ * don't need to decrypt the frame, we just won't
+ * be able to keep statistics accurate.
+ * Except for key threshold notifications, should
+ * we somehow allow the driver to tell us which key
+ * the hardware used if this flag is set?
+ */
+ if ((status->flag & RX_FLAG_DECRYPTED) &&
+ (status->flag & RX_FLAG_IV_STRIPPED))
+ return RX_CONTINUE;
+
+ hdrlen = ieee80211_hdrlen(fc);
+
+ if (rx->skb->len < 8 + hdrlen)
+ return RX_DROP_UNUSABLE; /* TODO: count this? */
+
+ /*
+ * no need to call ieee80211_wep_get_keyidx,
+ * it verifies a bunch of things we've done already
+ */
+ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
+ keyidx = keyid >> 6;
+
+ /* check per-station GTK first, if multicast packet */
+ if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
+ rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
+
+ /* if not found, try default key */
+ if (!rx->key) {
+ rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
+
+ /*
+ * RSNA-protected unicast frames should always be
+ * sent with pairwise or station-to-station keys,
+ * but for WEP we allow using a key index as well.
+ */
+ if (rx->key &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
+ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
+ !is_multicast_ether_addr(hdr->addr1))
+ rx->key = NULL;
+ }
+ }
+
+ if (rx->key) {
+ if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
+ return RX_DROP_MONITOR;
+
+ rx->key->tx_rx_count++;
+ /* TODO: add threshold stuff again */
+ } else {
+ return RX_DROP_MONITOR;
+ }
+
+ switch (rx->key->conf.cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ result = ieee80211_crypto_wep_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ result = ieee80211_crypto_tkip_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ result = ieee80211_crypto_ccmp_decrypt(rx);
+ break;
+ case WLAN_CIPHER_SUITE_AES_CMAC:
+ result = ieee80211_crypto_aes_cmac_decrypt(rx);
+ break;
+ default:
+ /*
+ * We can reach here only with HW-only algorithms
+ * but why didn't it decrypt the frame?!
+ */
+ return RX_DROP_UNUSABLE;
+ }
+
+ /* the hdr variable is invalid after the decrypt handlers */
+
+ /* either the frame has been decrypted or will be dropped */
+ status->flag |= RX_FLAG_DECRYPTED;
+
+ return result;
+}
+
static inline struct ieee80211_fragment_entry *
ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
unsigned int frag, unsigned int seq, int rx_queue,
@@ -2635,8 +2684,7 @@ ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
sig = status->signal;
if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
- rx->skb->data, rx->skb->len,
- GFP_ATOMIC)) {
+ rx->skb->data, rx->skb->len, 0, GFP_ATOMIC)) {
if (rx->sta)
rx->sta->rx_packets++;
dev_kfree_skb(rx->skb);
@@ -2890,10 +2938,10 @@ static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
*/
rx->skb = skb;
- CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_check_more_data)
CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
CALL_RXH(ieee80211_rx_h_sta_process)
+ CALL_RXH(ieee80211_rx_h_decrypt)
CALL_RXH(ieee80211_rx_h_defragment)
CALL_RXH(ieee80211_rx_h_michael_mic_verify)
/* must be after MMIC verify so header is counted in MPDU mic */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 1b122a79b0d..08afe74b98f 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -66,6 +66,7 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
struct cfg80211_bss *cbss;
struct ieee80211_bss *bss;
int clen, srlen;
+ enum nl80211_bss_scan_width scan_width;
s32 signal = 0;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
@@ -73,8 +74,15 @@ ieee80211_bss_info_update(struct ieee80211_local *local,
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
signal = (rx_status->signal * 100) / local->hw.max_signal;
- cbss = cfg80211_inform_bss_frame(local->hw.wiphy, channel,
- mgmt, len, signal, GFP_ATOMIC);
+ scan_width = NL80211_BSS_CHAN_WIDTH_20;
+ if (rx_status->flag & RX_FLAG_5MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_5;
+ if (rx_status->flag & RX_FLAG_10MHZ)
+ scan_width = NL80211_BSS_CHAN_WIDTH_10;
+
+ cbss = cfg80211_inform_bss_width_frame(local->hw.wiphy, channel,
+ scan_width, mgmt, len, signal,
+ GFP_ATOMIC);
if (!cbss)
return NULL;
@@ -204,10 +212,29 @@ void ieee80211_scan_rx(struct ieee80211_local *local, struct sk_buff *skb)
ieee80211_rx_bss_put(local, bss);
}
+static void
+ieee80211_prepare_scan_chandef(struct cfg80211_chan_def *chandef,
+ enum nl80211_bss_scan_width scan_width)
+{
+ memset(chandef, 0, sizeof(*chandef));
+ switch (scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ chandef->width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ chandef->width = NL80211_CHAN_WIDTH_10;
+ break;
+ default:
+ chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
+}
+
/* return false if no more work */
static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
{
struct cfg80211_scan_request *req = local->scan_req;
+ struct cfg80211_chan_def chandef;
enum ieee80211_band band;
int i, ielen, n_chans;
@@ -229,11 +256,12 @@ static bool ieee80211_prep_hw_scan(struct ieee80211_local *local)
} while (!n_chans);
local->hw_scan_req->n_channels = n_chans;
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
ielen = ieee80211_build_preq_ies(local, (u8 *)local->hw_scan_req->ie,
local->hw_scan_ies_bufsize,
req->ie, req->ie_len, band,
- req->rates[band], 0);
+ req->rates[band], &chandef);
local->hw_scan_req->ie_len = ielen;
local->hw_scan_req->no_cck = req->no_cck;
@@ -280,7 +308,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted,
rcu_assign_pointer(local->scan_sdata, NULL);
local->scanning = 0;
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
/* Set power back to normal operating levels. */
ieee80211_hw_config(local, 0);
@@ -615,11 +643,34 @@ static void ieee80211_scan_state_set_channel(struct ieee80211_local *local,
{
int skip;
struct ieee80211_channel *chan;
+ enum nl80211_bss_scan_width oper_scan_width;
skip = 0;
chan = local->scan_req->channels[local->scan_channel_idx];
- local->scan_channel = chan;
+ local->scan_chandef.chan = chan;
+ local->scan_chandef.center_freq1 = chan->center_freq;
+ local->scan_chandef.center_freq2 = 0;
+ switch (local->scan_req->scan_width) {
+ case NL80211_BSS_CHAN_WIDTH_5:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_5;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_10:
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_10;
+ break;
+ case NL80211_BSS_CHAN_WIDTH_20:
+ /* If scanning on oper channel, use whatever channel-type
+ * is currently in use.
+ */
+ oper_scan_width = cfg80211_chandef_to_scan_width(
+ &local->_oper_chandef);
+ if (chan == local->_oper_chandef.chan &&
+ oper_scan_width == local->scan_req->scan_width)
+ local->scan_chandef = local->_oper_chandef;
+ else
+ local->scan_chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+ break;
+ }
if (ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL))
skip = 1;
@@ -659,7 +710,7 @@ static void ieee80211_scan_state_suspend(struct ieee80211_local *local,
unsigned long *next_delay)
{
/* switch back to the operating channel */
- local->scan_channel = NULL;
+ local->scan_chandef.chan = NULL;
ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
/* disable PS */
@@ -801,7 +852,8 @@ int ieee80211_request_scan(struct ieee80211_sub_if_data *sdata,
int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
const u8 *ssid, u8 ssid_len,
- struct ieee80211_channel *chan)
+ struct ieee80211_channel *chan,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_local *local = sdata->local;
int ret = -EBUSY;
@@ -851,6 +903,7 @@ int ieee80211_request_ibss_scan(struct ieee80211_sub_if_data *sdata,
local->int_scan_req->ssids = &local->scan_ssid;
local->int_scan_req->n_ssids = 1;
+ local->int_scan_req->scan_width = scan_width;
memcpy(local->int_scan_req->ssids[0].ssid, ssid, IEEE80211_MAX_SSID_LEN);
local->int_scan_req->ssids[0].ssid_len = ssid_len;
@@ -912,6 +965,7 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_sched_scan_ies sched_scan_ies = {};
+ struct cfg80211_chan_def chandef;
int ret, i, iebufsz;
iebufsz = 2 + IEEE80211_MAX_SSID_LEN +
@@ -939,10 +993,12 @@ int ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
goto out_free;
}
+ ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
+
sched_scan_ies.len[i] =
ieee80211_build_preq_ies(local, sched_scan_ies.ie[i],
iebufsz, req->ie, req->ie_len,
- i, (u32) -1, 0);
+ i, (u32) -1, &chandef);
}
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 43439203f4e..368837fe3b8 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -235,7 +235,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_RATE rate */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS))
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS)))
len += 2;
/* IEEE80211_RADIOTAP_TX_FLAGS */
@@ -244,17 +245,23 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_DATA_RETRIES */
len += 1;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
- len += 3;
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].idx >= 0) {
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS)
+ len += 3;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS)
+ len = ALIGN(len, 2) + 12;
+ }
return len;
}
-static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
- *sband, struct sk_buff *skb,
- int retry_count, int rtap_len)
+static void
+ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
+ struct ieee80211_supported_band *sband,
+ struct sk_buff *skb, int retry_count,
+ int rtap_len, int shift)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -279,9 +286,13 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
/* IEEE80211_RADIOTAP_RATE */
if (info->status.rates[0].idx >= 0 &&
- !(info->status.rates[0].flags & IEEE80211_TX_RC_MCS)) {
+ !(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ IEEE80211_TX_RC_VHT_MCS))) {
+ u16 rate;
+
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
- *pos = sband->bitrates[info->status.rates[0].idx].bitrate / 5;
+ rate = sband->bitrates[info->status.rates[0].idx].bitrate;
+ *pos = DIV_ROUND_UP(rate, 5 * (1 << shift));
/* padding for tx flags */
pos += 2;
}
@@ -306,9 +317,12 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
*pos = retry_count;
pos++;
- /* IEEE80211_TX_RC_MCS */
- if (info->status.rates[0].idx >= 0 &&
- info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
+ if (info->status.rates[0].idx < 0)
+ return;
+
+ /* IEEE80211_RADIOTAP_MCS
+ * IEEE80211_RADIOTAP_VHT */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_MCS) {
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
pos[0] = IEEE80211_RADIOTAP_MCS_HAVE_MCS |
IEEE80211_RADIOTAP_MCS_HAVE_GI |
@@ -321,8 +335,48 @@ static void ieee80211_add_tx_radiotap_header(struct ieee80211_supported_band
pos[1] |= IEEE80211_RADIOTAP_MCS_FMT_GF;
pos[2] = info->status.rates[0].idx;
pos += 3;
- }
+ } else if (info->status.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) {
+ u16 known = local->hw.radiotap_vht_details &
+ (IEEE80211_RADIOTAP_VHT_KNOWN_GI |
+ IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH);
+
+ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
+
+ /* required alignment from rthdr */
+ pos = (u8 *)rthdr + ALIGN(pos - (u8 *)rthdr, 2);
+ /* u16 known - IEEE80211_RADIOTAP_VHT_KNOWN_* */
+ put_unaligned_le16(known, pos);
+ pos += 2;
+
+ /* u8 flags - IEEE80211_RADIOTAP_VHT_FLAG_* */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+ pos++;
+
+ /* u8 bandwidth */
+ if (info->status.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ *pos = 1;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
+ *pos = 4;
+ else if (info->status.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
+ *pos = 11;
+ else /* IEEE80211_TX_RC_{20_MHZ_WIDTH,FIXME:DUP_DATA} */
+ *pos = 0;
+ pos++;
+
+ /* u8 mcs_nss[4] */
+ *pos = (ieee80211_rate_get_vht_mcs(&info->status.rates[0]) << 4) |
+ ieee80211_rate_get_vht_nss(&info->status.rates[0]);
+ pos += 4;
+
+ /* u8 coding */
+ pos++;
+ /* u8 group_id */
+ pos++;
+ /* u16 partial_aid */
+ pos += 2;
+ }
}
static void ieee80211_report_used_skb(struct ieee80211_local *local,
@@ -424,6 +478,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
bool acked;
struct ieee80211_bar *bar;
int rtap_len;
+ int shift = 0;
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
@@ -458,6 +513,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!ether_addr_equal(hdr->addr2, sta->sdata->vif.addr))
continue;
+ shift = ieee80211_vif_get_shift(&sta->sdata->vif);
+
if (info->flags & IEEE80211_TX_STATUS_EOSP)
clear_sta_flag(sta, WLAN_STA_SP);
@@ -557,7 +614,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
rcu_read_unlock();
- ieee80211_led_tx(local, 0);
+ ieee80211_led_tx(local);
/* SNMP counters
* Fragments are passed to low-level drivers as separate skbs, so these
@@ -624,7 +681,8 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
dev_kfree_skb(skb);
return;
}
- ieee80211_add_tx_radiotap_header(sband, skb, retry_count, rtap_len);
+ ieee80211_add_tx_radiotap_header(local, sband, skb, retry_count,
+ rtap_len, shift);
/* XXX: is this sufficient for BPF? */
skb_set_mac_header(skb, 0);
diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h
index c215fafd7a2..1aba645882b 100644
--- a/net/mac80211/trace.h
+++ b/net/mac80211/trace.h
@@ -1906,6 +1906,32 @@ TRACE_EVENT(api_radar_detected,
)
);
+TRACE_EVENT(drv_channel_switch_beacon,
+ TP_PROTO(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_chan_def *chandef),
+
+ TP_ARGS(local, sdata, chandef),
+
+ TP_STRUCT__entry(
+ LOCAL_ENTRY
+ VIF_ENTRY
+ CHANDEF_ENTRY
+ ),
+
+ TP_fast_assign(
+ LOCAL_ASSIGN;
+ VIF_ASSIGN;
+ CHANDEF_ASSIGN(chandef);
+ ),
+
+ TP_printk(
+ LOCAL_PR_FMT VIF_PR_FMT " channel switch to " CHANDEF_PR_FMT,
+ LOCAL_PR_ARG, VIF_PR_ARG, CHANDEF_PR_ARG
+ )
+);
+
+
#ifdef CONFIG_MAC80211_MESSAGE_TRACING
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mac80211_msg
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4105d0ca963..3456c0486b4 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -40,12 +40,22 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct sk_buff *skb, int group_addr,
int next_frag_len)
{
- int rate, mrate, erp, dur, i;
+ int rate, mrate, erp, dur, i, shift = 0;
struct ieee80211_rate *txrate;
struct ieee80211_local *local = tx->local;
struct ieee80211_supported_band *sband;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_chanctx_conf *chanctx_conf;
+ u32 rate_flags = 0;
+
+ rcu_read_lock();
+ chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
+ if (chanctx_conf) {
+ shift = ieee80211_chandef_get_shift(&chanctx_conf->def);
+ rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
+ }
+ rcu_read_unlock();
/* assume HW handles this */
if (tx->rate.flags & IEEE80211_TX_RC_MCS)
@@ -122,8 +132,11 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (r->bitrate > txrate->bitrate)
break;
+ if ((rate_flags & r->flags) != rate_flags)
+ continue;
+
if (tx->sdata->vif.bss_conf.basic_rates & BIT(i))
- rate = r->bitrate;
+ rate = DIV_ROUND_UP(r->bitrate, 1 << shift);
switch (sband->band) {
case IEEE80211_BAND_2GHZ: {
@@ -150,7 +163,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
if (rate == -1) {
/* No matching basic rate found; use highest suitable mandatory
* PHY rate */
- rate = mrate;
+ rate = DIV_ROUND_UP(mrate, 1 << shift);
}
/* Don't calculate ACKs for QoS Frames with NoAck Policy set */
@@ -162,7 +175,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
* (10 bytes + 4-byte FCS = 112 bits) plus SIFS; rounded up
* to closest integer */
dur = ieee80211_frame_duration(sband->band, 10, rate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
if (next_frag_len) {
/* Frame is fragmented: duration increases with time needed to
@@ -171,7 +185,8 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
/* next fragment */
dur += ieee80211_frame_duration(sband->band, next_frag_len,
txrate->bitrate, erp,
- tx->sdata->vif.bss_conf.use_short_preamble);
+ tx->sdata->vif.bss_conf.use_short_preamble,
+ shift);
}
return cpu_to_le16(dur);
@@ -524,9 +539,11 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
- if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol &&
- tx->sdata->control_port_no_encrypt))
- info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) {
+ if (tx->sdata->control_port_no_encrypt)
+ info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+ info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
+ }
return TX_CONTINUE;
}
@@ -764,9 +781,11 @@ ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx)
/*
* Anything but QoS data that has a sequence number field
* (is long enough) gets a sequence number from the global
- * counter.
+ * counter. QoS data frames with a multicast destination
+ * also use the global counter (802.11-2012 9.3.2.10).
*/
- if (!ieee80211_is_data_qos(hdr->frame_control)) {
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1)) {
/* driver should assign sequence number */
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
/* for pure STA mode without beacons, we can do it */
@@ -1257,6 +1276,10 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
switch (sdata->vif.type) {
case NL80211_IFTYPE_MONITOR:
+ if (sdata->u.mntr_flags & MONITOR_FLAG_ACTIVE) {
+ vif = &sdata->vif;
+ break;
+ }
sdata = rcu_dereference(local->monitor_sdata);
if (sdata) {
vif = &sdata->vif;
@@ -1281,7 +1304,6 @@ static bool __ieee80211_tx(struct ieee80211_local *local,
txpending);
ieee80211_tpt_led_trig_tx(local, fc, led_len);
- ieee80211_led_tx(local, 1);
WARN_ON_ONCE(!skb_queue_empty(skbs));
@@ -2320,6 +2342,81 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
+ struct beacon_data *beacon)
+{
+ struct probe_resp *resp;
+ int counter_offset_beacon = sdata->csa_counter_offset_beacon;
+ int counter_offset_presp = sdata->csa_counter_offset_presp;
+
+ /* warn if the driver did not check for/react to csa completeness */
+ if (WARN_ON(((u8 *)beacon->tail)[counter_offset_beacon] == 0))
+ return;
+
+ ((u8 *)beacon->tail)[counter_offset_beacon]--;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP &&
+ counter_offset_presp) {
+ rcu_read_lock();
+ resp = rcu_dereference(sdata->u.ap.probe_resp);
+
+ /* if nl80211 accepted the offset, this should not happen. */
+ if (WARN_ON(!resp)) {
+ rcu_read_unlock();
+ return;
+ }
+ resp->data[counter_offset_presp]--;
+ rcu_read_unlock();
+ }
+}
+
+bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+ struct beacon_data *beacon = NULL;
+ u8 *beacon_data;
+ size_t beacon_data_len;
+ int counter_beacon = sdata->csa_counter_offset_beacon;
+ int ret = false;
+
+ if (!ieee80211_sdata_running(sdata))
+ return false;
+
+ rcu_read_lock();
+ if (vif->type == NL80211_IFTYPE_AP) {
+ struct ieee80211_if_ap *ap = &sdata->u.ap;
+
+ beacon = rcu_dereference(ap->beacon);
+ if (WARN_ON(!beacon || !beacon->tail))
+ goto out;
+ beacon_data = beacon->tail;
+ beacon_data_len = beacon->tail_len;
+ } else {
+ WARN_ON(1);
+ goto out;
+ }
+
+ if (WARN_ON(counter_beacon > beacon_data_len))
+ goto out;
+
+ if (beacon_data[counter_beacon] == 0)
+ ret = true;
+ out:
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(ieee80211_csa_is_complete);
+
struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u16 *tim_offset, u16 *tim_length)
@@ -2350,6 +2447,9 @@ struct sk_buff *ieee80211_beacon_get_tim(struct ieee80211_hw *hw,
struct beacon_data *beacon = rcu_dereference(ap->beacon);
if (beacon) {
+ if (sdata->vif.csa_active)
+ ieee80211_update_csa(sdata, beacon);
+
/*
* headroom, head length,
* tail length and maximum TIM length
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22654452a56..e1b34a18b24 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -107,7 +107,8 @@ void ieee80211_tx_set_protected(struct ieee80211_tx_data *tx)
}
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
- int rate, int erp, int short_preamble)
+ int rate, int erp, int short_preamble,
+ int shift)
{
int dur;
@@ -118,6 +119,9 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
*
* rate is in 100 kbps, so divident is multiplied by 10 in the
* DIV_ROUND_UP() operations.
+ *
+ * shift may be 2 for 5 MHz channels or 1 for 10 MHz channels, and
+ * is assumed to be 0 otherwise.
*/
if (band == IEEE80211_BAND_5GHZ || erp) {
@@ -130,13 +134,23 @@ int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
* TXTIME = T_PREAMBLE + T_SIGNAL + T_SYM x N_SYM + Signal Ext
*
* T_SYM = 4 usec
- * 802.11a - 17.5.2: aSIFSTime = 16 usec
+ * 802.11a - 18.5.2: aSIFSTime = 16 usec
* 802.11g - 19.8.4: aSIFSTime = 10 usec +
* signal ext = 6 usec
*/
dur = 16; /* SIFS + signal ext */
- dur += 16; /* 17.3.2.3: T_PREAMBLE = 16 usec */
- dur += 4; /* 17.3.2.3: T_SIGNAL = 4 usec */
+ dur += 16; /* IEEE 802.11-2012 18.3.2.4: T_PREAMBLE = 16 usec */
+ dur += 4; /* IEEE 802.11-2012 18.3.2.4: T_SIGNAL = 4 usec */
+
+ /* IEEE 802.11-2012 18.3.2.4: all values above are:
+ * * times 4 for 5 MHz
+ * * times 2 for 10 MHz
+ */
+ dur *= 1 << shift;
+
+ /* rates should already consider the channel bandwidth,
+ * don't apply divisor again.
+ */
dur += 4 * DIV_ROUND_UP((16 + 8 * (len + 4) + 6) * 10,
4 * rate); /* T_SYM x N_SYM */
} else {
@@ -168,7 +182,7 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
{
struct ieee80211_sub_if_data *sdata;
u16 dur;
- int erp;
+ int erp, shift = 0;
bool short_preamble = false;
erp = 0;
@@ -177,10 +191,11 @@ __le16 ieee80211_generic_frame_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
dur = ieee80211_frame_duration(band, frame_len, rate->bitrate, erp,
- short_preamble);
+ short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -194,7 +209,7 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -210,17 +225,20 @@ __le16 ieee80211_rts_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* CTS duration */
- dur = ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
/* Data frame duration */
- dur += ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
return cpu_to_le16(dur);
}
@@ -235,7 +253,7 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
struct ieee80211_rate *rate;
struct ieee80211_sub_if_data *sdata;
bool short_preamble;
- int erp;
+ int erp, shift = 0, bitrate;
u16 dur;
struct ieee80211_supported_band *sband;
@@ -250,15 +268,18 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw,
short_preamble = sdata->vif.bss_conf.use_short_preamble;
if (sdata->flags & IEEE80211_SDATA_OPERATING_GMODE)
erp = rate->flags & IEEE80211_RATE_ERP_G;
+ shift = ieee80211_vif_get_shift(vif);
}
+ bitrate = DIV_ROUND_UP(rate->bitrate, 1 << shift);
+
/* Data frame duration */
- dur = ieee80211_frame_duration(sband->band, frame_len, rate->bitrate,
- erp, short_preamble);
+ dur = ieee80211_frame_duration(sband->band, frame_len, bitrate,
+ erp, short_preamble, shift);
if (!(frame_txctl->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ACK duration */
- dur += ieee80211_frame_duration(sband->band, 10, rate->bitrate,
- erp, short_preamble);
+ dur += ieee80211_frame_duration(sband->band, 10, bitrate,
+ erp, short_preamble, shift);
}
return cpu_to_le16(dur);
@@ -1052,32 +1073,6 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata,
}
}
-void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
- const size_t supp_rates_len,
- const u8 *supp_rates)
-{
- struct ieee80211_chanctx_conf *chanctx_conf;
- int i, have_higher_than_11mbit = 0;
-
- /* cf. IEEE 802.11 9.2.12 */
- for (i = 0; i < supp_rates_len; i++)
- if ((supp_rates[i] & 0x7f) * 5 > 110)
- have_higher_than_11mbit = 1;
-
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
-
- if (chanctx_conf &&
- chanctx_conf->def.chan->band == IEEE80211_BAND_2GHZ &&
- have_higher_than_11mbit)
- sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
- else
- sdata->flags &= ~IEEE80211_SDATA_OPERATING_GMODE;
- rcu_read_unlock();
-
- ieee80211_set_wmm_default(sdata, true);
-}
-
void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
u16 transaction, u16 auth_alg, u16 status,
const u8 *extra, size_t extra_len, const u8 *da,
@@ -1162,7 +1157,7 @@ void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
size_t buffer_len, const u8 *ie, size_t ie_len,
enum ieee80211_band band, u32 rate_mask,
- u8 channel)
+ struct cfg80211_chan_def *chandef)
{
struct ieee80211_supported_band *sband;
u8 *pos = buffer, *end = buffer + buffer_len;
@@ -1171,16 +1166,26 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
u8 rates[32];
int num_rates;
int ext_rates_len;
+ int shift;
+ u32 rate_flags;
sband = local->hw.wiphy->bands[band];
if (WARN_ON_ONCE(!sband))
return 0;
+ rate_flags = ieee80211_chandef_rate_flags(chandef);
+ shift = ieee80211_chandef_get_shift(chandef);
+
num_rates = 0;
for (i = 0; i < sband->n_bitrates; i++) {
if ((BIT(i) & rate_mask) == 0)
continue; /* skip rate */
- rates[num_rates++] = (u8) (sband->bitrates[i].bitrate / 5);
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
+ rates[num_rates++] =
+ (u8) DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ (1 << shift) * 5);
}
supp_rates_len = min_t(int, num_rates, 8);
@@ -1220,12 +1225,13 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
pos += ext_rates_len;
}
- if (channel && sband->band == IEEE80211_BAND_2GHZ) {
+ if (chandef->chan && sband->band == IEEE80211_BAND_2GHZ) {
if (end - pos < 3)
goto out_err;
*pos++ = WLAN_EID_DS_PARAMS;
*pos++ = 1;
- *pos++ = channel;
+ *pos++ = ieee80211_frequency_to_channel(
+ chandef->chan->center_freq);
}
/* insert custom IEs that go before HT */
@@ -1290,9 +1296,9 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
bool directed)
{
struct ieee80211_local *local = sdata->local;
+ struct cfg80211_chan_def chandef;
struct sk_buff *skb;
struct ieee80211_mgmt *mgmt;
- u8 chan_no;
int ies_len;
/*
@@ -1300,10 +1306,11 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
* in order to maximize the chance that we get a response. Some
* badly-behaved APs don't respond when this parameter is included.
*/
+ chandef.width = sdata->vif.bss_conf.chandef.width;
if (directed)
- chan_no = 0;
+ chandef.chan = NULL;
else
- chan_no = ieee80211_frequency_to_channel(chan->center_freq);
+ chandef.chan = chan;
skb = ieee80211_probereq_get(&local->hw, &sdata->vif,
ssid, ssid_len, 100 + ie_len);
@@ -1313,7 +1320,7 @@ struct sk_buff *ieee80211_build_probe_req(struct ieee80211_sub_if_data *sdata,
ies_len = ieee80211_build_preq_ies(local, skb_tail_pointer(skb),
skb_tailroom(skb),
ie, ie_len, chan->band,
- ratemask, chan_no);
+ ratemask, &chandef);
skb_put(skb, ies_len);
if (dst) {
@@ -1347,16 +1354,19 @@ void ieee80211_send_probe_req(struct ieee80211_sub_if_data *sdata, u8 *dst,
}
}
-u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
+u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
struct ieee802_11_elems *elems,
enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
struct ieee80211_rate *bitrates;
size_t num_rates;
- u32 supp_rates;
- int i, j;
- sband = local->hw.wiphy->bands[band];
+ u32 supp_rates, rate_flags;
+ int i, j, shift;
+ sband = sdata->local->hw.wiphy->bands[band];
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
if (WARN_ON(!sband))
return 1;
@@ -1381,7 +1391,15 @@ u32 ieee80211_sta_get_rates(struct ieee80211_local *local,
continue;
for (j = 0; j < num_rates; j++) {
- if (bitrates[j].bitrate == own_rate) {
+ int brate;
+ if ((rate_flags & sband->bitrates[j].flags)
+ != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(sband->bitrates[j].bitrate,
+ 1 << shift);
+
+ if (brate == own_rate) {
supp_rates |= BIT(j);
if (basic_rates && is_basic)
*basic_rates |= BIT(j);
@@ -1435,8 +1453,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
local->resuming = true;
if (local->wowlan) {
- local->wowlan = false;
res = drv_resume(local);
+ local->wowlan = false;
if (res < 0) {
local->resuming = false;
return res;
@@ -2004,18 +2022,56 @@ void ieee80211_ht_oper_to_chandef(struct ieee80211_channel *control_chan,
cfg80211_chandef_create(chandef, control_chan, channel_type);
}
+int ieee80211_parse_bitrates(struct cfg80211_chan_def *chandef,
+ const struct ieee80211_supported_band *sband,
+ const u8 *srates, int srates_len, u32 *rates)
+{
+ u32 rate_flags = ieee80211_chandef_rate_flags(chandef);
+ int shift = ieee80211_chandef_get_shift(chandef);
+ struct ieee80211_rate *br;
+ int brate, rate, i, j, count = 0;
+
+ *rates = 0;
+
+ for (i = 0; i < srates_len; i++) {
+ rate = srates[i] & 0x7f;
+
+ for (j = 0; j < sband->n_bitrates; j++) {
+ br = &sband->bitrates[j];
+ if ((rate_flags & br->flags) != rate_flags)
+ continue;
+
+ brate = DIV_ROUND_UP(br->bitrate, (1 << shift) * 5);
+ if (brate == rate) {
+ *rates |= BIT(j);
+ count++;
+ break;
+ }
+ }
+ }
+ return count;
+}
+
int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb, bool need_basic,
enum ieee80211_band band)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, shift;
u8 i, rates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+ shift = ieee80211_vif_get_shift(&sdata->vif);
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
sband = local->hw.wiphy->bands[band];
- rates = sband->n_bitrates;
+ rates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ rates++;
+ }
if (rates > 8)
rates = 8;
@@ -2027,10 +2083,15 @@ int ieee80211_add_srates_ie(struct ieee80211_sub_if_data *sdata,
*pos++ = rates;
for (i = 0; i < rates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
return 0;
@@ -2042,12 +2103,22 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
- int rate;
+ int rate, skip, shift;
u8 i, exrates, *pos;
u32 basic_rates = sdata->vif.bss_conf.basic_rates;
+ u32 rate_flags;
+
+ rate_flags = ieee80211_chandef_rate_flags(&sdata->vif.bss_conf.chandef);
+ shift = ieee80211_vif_get_shift(&sdata->vif);
sband = local->hw.wiphy->bands[band];
- exrates = sband->n_bitrates;
+ exrates = 0;
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
+ continue;
+ exrates++;
+ }
+
if (exrates > 8)
exrates -= 8;
else
@@ -2060,12 +2131,19 @@ int ieee80211_add_ext_srates_ie(struct ieee80211_sub_if_data *sdata,
pos = skb_put(skb, exrates + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = exrates;
+ skip = 0;
for (i = 8; i < sband->n_bitrates; i++) {
u8 basic = 0;
+ if ((rate_flags & sband->bitrates[i].flags)
+ != rate_flags)
+ continue;
+ if (skip++ < 8)
+ continue;
if (need_basic && basic_rates & BIT(i))
basic = 0x80;
- rate = sband->bitrates[i].bitrate;
- *pos++ = basic | (u8) (rate / 5);
+ rate = DIV_ROUND_UP(sband->bitrates[i].bitrate,
+ 5 * (1 << shift));
+ *pos++ = basic | (u8) rate;
}
}
return 0;
@@ -2149,9 +2227,17 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
} else {
struct ieee80211_supported_band *sband;
+ int shift = 0;
+ int bitrate;
+
+ if (status->flag & RX_FLAG_10MHZ)
+ shift = 1;
+ if (status->flag & RX_FLAG_5MHZ)
+ shift = 2;
sband = local->hw.wiphy->bands[status->band];
- ri.legacy = sband->bitrates[status->rate_idx].bitrate;
+ bitrate = sband->bitrates[status->rate_idx].bitrate;
+ ri.legacy = DIV_ROUND_UP(bitrate, (1 << shift));
}
rate = cfg80211_calculate_bitrate(&ri);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 56d22cae590..6e839b6dff2 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -408,21 +408,10 @@ config NF_NAT_TFTP
depends on NF_CONNTRACK && NF_NAT
default NF_NAT && NF_CONNTRACK_TFTP
-endif # NF_CONNTRACK
-
-# transparent proxy support
-config NETFILTER_TPROXY
- tristate "Transparent proxying support"
- depends on IP_NF_MANGLE
- depends on NETFILTER_ADVANCED
- help
- This option enables transparent proxying support, that is,
- support for handling non-locally bound IPv4 TCP and UDP sockets.
- For it to work you will have to configure certain iptables rules
- and use policy routing. For more information on how to set it up
- see Documentation/networking/tproxy.txt.
+config NETFILTER_SYNPROXY
+ tristate
- To compile it as a module, choose M here. If unsure, say N.
+endif # NF_CONNTRACK
config NETFILTER_XTABLES
tristate "Netfilter Xtables support (required for ip_tables)"
@@ -720,10 +709,10 @@ config NETFILTER_XT_TARGET_TEE
this clone be rerouted to another nexthop.
config NETFILTER_XT_TARGET_TPROXY
- tristate '"TPROXY" target support'
- depends on NETFILTER_TPROXY
+ tristate '"TPROXY" target transparent proxying support'
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
+ depends on IP_NF_MANGLE
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
@@ -731,6 +720,9 @@ config NETFILTER_XT_TARGET_TPROXY
REDIRECT. It can only be used in the mangle table and is useful
to redirect traffic to a transparent proxy. It does _not_ depend
on Netfilter connection tracking and NAT, unlike REDIRECT.
+ For it to work you will have to configure certain iptables rules
+ and use policy routing. For more information on how to set it up
+ see Documentation/networking/tproxy.txt.
To compile it as a module, choose M here. If unsure, say N.
@@ -1180,10 +1172,10 @@ config NETFILTER_XT_MATCH_SCTP
config NETFILTER_XT_MATCH_SOCKET
tristate '"socket" match support'
- depends on NETFILTER_TPROXY
depends on NETFILTER_XTABLES
depends on NETFILTER_ADVANCED
depends on !NF_CONNTRACK || NF_CONNTRACK
+ depends on (IPV6 || IPV6=n)
select NF_DEFRAG_IPV4
select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index a1abf87d43b..c3a0a12907f 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,6 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
-nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o nf_conntrack_seqadj.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
@@ -61,8 +61,8 @@ obj-$(CONFIG_NF_NAT_IRC) += nf_nat_irc.o
obj-$(CONFIG_NF_NAT_SIP) += nf_nat_sip.o
obj-$(CONFIG_NF_NAT_TFTP) += nf_nat_tftp.o
-# transparent proxy support
-obj-$(CONFIG_NETFILTER_TPROXY) += nf_tproxy_core.o
+# SYNPROXY
+obj-$(CONFIG_NETFILTER_SYNPROXY) += nf_synproxy_core.o
# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 2217363ab42..593b16ea45e 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -234,12 +234,13 @@ EXPORT_SYMBOL(skb_make_writable);
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
-void (*ip_ct_attach)(struct sk_buff *, struct sk_buff *) __rcu __read_mostly;
+void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
+ __rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
-void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb)
+void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
- void (*attach)(struct sk_buff *, struct sk_buff *);
+ void (*attach)(struct sk_buff *, const struct sk_buff *);
if (skb->nfct) {
rcu_read_lock();
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 3cd85b2fc67..5199448697f 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -414,7 +414,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_service *svc)
spin_lock_bh(&svc->sched_lock);
tbl->dead = 1;
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
hlist_for_each_entry_safe(en, next, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
@@ -440,7 +440,7 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
struct ip_vs_lblcr_entry *en;
struct hlist_node *next;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -495,7 +495,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
spin_lock(&svc->sched_lock);
@@ -536,7 +536,7 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
INIT_HLIST_HEAD(&tbl->bucket[i]);
}
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c
index 3c0da872803..23e596e438b 100644
--- a/net/netfilter/ipvs/ip_vs_proto_sctp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c
@@ -66,15 +66,7 @@ sctp_conn_schedule(int af, struct sk_buff *skb, struct ip_vs_proto_data *pd,
static void sctp_nat_csum(struct sk_buff *skb, sctp_sctphdr_t *sctph,
unsigned int sctphoff)
{
- __u32 crc32;
- struct sk_buff *iter;
-
- crc32 = sctp_start_cksum((__u8 *)sctph, skb_headlen(skb) - sctphoff);
- skb_walk_frags(skb, iter)
- crc32 = sctp_update_cksum((u8 *) iter->data,
- skb_headlen(iter), crc32);
- sctph->checksum = sctp_end_cksum(crc32);
-
+ sctph->checksum = sctp_compute_cksum(skb, sctphoff);
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@@ -151,10 +143,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
{
unsigned int sctphoff;
struct sctphdr *sh, _sctph;
- struct sk_buff *iter;
- __le32 cmp;
- __le32 val;
- __u32 tmp;
+ __le32 cmp, val;
#ifdef CONFIG_IP_VS_IPV6
if (af == AF_INET6)
@@ -168,13 +157,7 @@ sctp_csum_check(int af, struct sk_buff *skb, struct ip_vs_protocol *pp)
return 0;
cmp = sh->checksum;
-
- tmp = sctp_start_cksum((__u8 *) sh, skb_headlen(skb));
- skb_walk_frags(skb, iter)
- tmp = sctp_update_cksum((__u8 *) iter->data,
- skb_headlen(iter), tmp);
-
- val = sctp_end_cksum(tmp);
+ val = sctp_compute_cksum(skb, sctphoff);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index f16c027df15..3588faebe52 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -269,14 +269,20 @@ ip_vs_sh_get_port(const struct sk_buff *skb, struct ip_vs_iphdr *iph)
switch (iph->protocol) {
case IPPROTO_TCP:
th = skb_header_pointer(skb, iph->len, sizeof(_tcph), &_tcph);
+ if (unlikely(th == NULL))
+ return 0;
port = th->source;
break;
case IPPROTO_UDP:
uh = skb_header_pointer(skb, iph->len, sizeof(_udph), &_udph);
+ if (unlikely(uh == NULL))
+ return 0;
port = uh->source;
break;
case IPPROTO_SCTP:
sh = skb_header_pointer(skb, iph->len, sizeof(_sctph), &_sctph);
+ if (unlikely(sh == NULL))
+ return 0;
port = sh->source;
break;
default:
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 0283baedcdf..5d892febd64 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -39,6 +39,7 @@
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
@@ -47,6 +48,7 @@
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
@@ -238,7 +240,7 @@ destroy_conntrack(struct nf_conntrack *nfct)
nf_conntrack_free(ct);
}
-void nf_ct_delete_from_lists(struct nf_conn *ct)
+static void nf_ct_delete_from_lists(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
@@ -253,7 +255,6 @@ void nf_ct_delete_from_lists(struct nf_conn *ct)
&net->ct.dying);
spin_unlock_bh(&nf_conntrack_lock);
}
-EXPORT_SYMBOL_GPL(nf_ct_delete_from_lists);
static void death_by_event(unsigned long ul_conntrack)
{
@@ -275,7 +276,7 @@ static void death_by_event(unsigned long ul_conntrack)
nf_ct_put(ct);
}
-void nf_ct_dying_timeout(struct nf_conn *ct)
+static void nf_ct_dying_timeout(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
@@ -288,27 +289,33 @@ void nf_ct_dying_timeout(struct nf_conn *ct)
(prandom_u32() % net->ct.sysctl_events_retry_timeout);
add_timer(&ecache->timeout);
}
-EXPORT_SYMBOL_GPL(nf_ct_dying_timeout);
-static void death_by_timeout(unsigned long ul_conntrack)
+bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
{
- struct nf_conn *ct = (void *)ul_conntrack;
struct nf_conn_tstamp *tstamp;
tstamp = nf_conn_tstamp_find(ct);
if (tstamp && tstamp->stop == 0)
tstamp->stop = ktime_to_ns(ktime_get_real());
- if (!test_bit(IPS_DYING_BIT, &ct->status) &&
- unlikely(nf_conntrack_event(IPCT_DESTROY, ct) < 0)) {
+ if (!nf_ct_is_dying(ct) &&
+ unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
+ portid, report) < 0)) {
/* destroy event was not delivered */
nf_ct_delete_from_lists(ct);
nf_ct_dying_timeout(ct);
- return;
+ return false;
}
set_bit(IPS_DYING_BIT, &ct->status);
nf_ct_delete_from_lists(ct);
nf_ct_put(ct);
+ return true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_delete);
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+ nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
/*
@@ -643,10 +650,7 @@ static noinline int early_drop(struct net *net, unsigned int hash)
return dropped;
if (del_timer(&ct->timeout)) {
- death_by_timeout((unsigned long)ct);
- /* Check if we indeed killed this entry. Reliable event
- delivery may have inserted it into the dying list. */
- if (test_bit(IPS_DYING_BIT, &ct->status)) {
+ if (nf_ct_delete(ct, 0, 0)) {
dropped = 1;
NF_CT_STAT_INC_ATOMIC(net, early_drop);
}
@@ -796,6 +800,11 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
+ if (tmpl && nfct_synproxy(tmpl)) {
+ nfct_seqadj_ext_add(ct);
+ nfct_synproxy_ext_add(ct);
+ }
+
timeout_ext = tmpl ? nf_ct_timeout_find(tmpl) : NULL;
if (timeout_ext)
timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
@@ -1192,7 +1201,7 @@ EXPORT_SYMBOL_GPL(nf_ct_port_nlattr_tuple_size);
#endif
/* Used by ipt_REJECT and ip6t_REJECT. */
-static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+static void nf_conntrack_attach(struct sk_buff *nskb, const struct sk_buff *skb)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
@@ -1244,7 +1253,7 @@ found:
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
- void *data)
+ void *data, u32 portid, int report)
{
struct nf_conn *ct;
unsigned int bucket = 0;
@@ -1252,7 +1261,8 @@ void nf_ct_iterate_cleanup(struct net *net,
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
- death_by_timeout((unsigned long)ct);
+ nf_ct_delete(ct, portid, report);
+
/* ... else the timer will get him soon. */
nf_ct_put(ct);
@@ -1260,30 +1270,6 @@ void nf_ct_iterate_cleanup(struct net *net,
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
-struct __nf_ct_flush_report {
- u32 portid;
- int report;
-};
-
-static int kill_report(struct nf_conn *i, void *data)
-{
- struct __nf_ct_flush_report *fr = (struct __nf_ct_flush_report *)data;
- struct nf_conn_tstamp *tstamp;
-
- tstamp = nf_conn_tstamp_find(i);
- if (tstamp && tstamp->stop == 0)
- tstamp->stop = ktime_to_ns(ktime_get_real());
-
- /* If we fail to deliver the event, death_by_timeout() will retry */
- if (nf_conntrack_event_report(IPCT_DESTROY, i,
- fr->portid, fr->report) < 0)
- return 1;
-
- /* Avoid the delivery of the destroy event in death_by_timeout(). */
- set_bit(IPS_DYING_BIT, &i->status);
- return 1;
-}
-
static int kill_all(struct nf_conn *i, void *data)
{
return 1;
@@ -1301,11 +1287,7 @@ EXPORT_SYMBOL_GPL(nf_ct_free_hashtable);
void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
{
- struct __nf_ct_flush_report fr = {
- .portid = portid,
- .report = report,
- };
- nf_ct_iterate_cleanup(net, kill_report, &fr);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, portid, report);
}
EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
@@ -1351,6 +1333,7 @@ void nf_conntrack_cleanup_end(void)
nf_ct_extend_unregister(&nf_ct_zone_extend);
#endif
nf_conntrack_proto_fini();
+ nf_conntrack_seqadj_fini();
nf_conntrack_labels_fini();
nf_conntrack_helper_fini();
nf_conntrack_timeout_fini();
@@ -1386,7 +1369,7 @@ void nf_conntrack_cleanup_net_list(struct list_head *net_exit_list)
i_see_dead_people:
busy = 0;
list_for_each_entry(net, net_exit_list, exit_list) {
- nf_ct_iterate_cleanup(net, kill_all, NULL);
+ nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
nf_ct_release_dying_list(net);
if (atomic_read(&net->ct.count) != 0)
busy = 1;
@@ -1556,6 +1539,10 @@ int nf_conntrack_init_start(void)
if (ret < 0)
goto err_labels;
+ ret = nf_conntrack_seqadj_init();
+ if (ret < 0)
+ goto err_seqadj;
+
#ifdef CONFIG_NF_CONNTRACK_ZONES
ret = nf_ct_extend_register(&nf_ct_zone_extend);
if (ret < 0)
@@ -1580,6 +1567,8 @@ err_proto:
nf_ct_extend_unregister(&nf_ct_zone_extend);
err_extend:
#endif
+ nf_conntrack_seqadj_fini();
+err_seqadj:
nf_conntrack_labels_fini();
err_labels:
nf_conntrack_helper_fini();
@@ -1602,9 +1591,6 @@ void nf_conntrack_init_end(void)
/* For use by REJECT target */
RCU_INIT_POINTER(ip_ct_attach, nf_conntrack_attach);
RCU_INIT_POINTER(nf_ct_destroy, destroy_conntrack);
-
- /* Howto get NAT offsets */
- RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
}
/*
@@ -1691,8 +1677,3 @@ err_slabname:
err_stat:
return ret;
}
-
-s16 (*nf_ct_nat_offset)(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq);
-EXPORT_SYMBOL_GPL(nf_ct_nat_offset);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index c63b618cd61..4fd1ca94fd4 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -293,6 +293,11 @@ void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
sizeof(exp->tuple.dst.u3) - len);
exp->tuple.dst.u.all = *dst;
+
+#ifdef CONFIG_NF_NAT_NEEDED
+ memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
+ memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
+#endif
}
EXPORT_SYMBOL_GPL(nf_ct_expect_init);
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 355d2ef0809..bb53f120e79 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -8,12 +8,8 @@
* published by the Free Software Foundation.
*/
-#include <linux/ctype.h>
#include <linux/export.h>
-#include <linux/jhash.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
-#include <linux/slab.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_labels.h>
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index edc410e778f..eea936b70d1 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -37,6 +37,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
@@ -381,9 +382,8 @@ nla_put_failure:
return -1;
}
-#ifdef CONFIG_NF_NAT_NEEDED
static int
-dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
+dump_ct_seq_adj(struct sk_buff *skb, const struct nf_ct_seqadj *seq, int type)
{
struct nlattr *nest_parms;
@@ -391,12 +391,12 @@ dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
if (!nest_parms)
goto nla_put_failure;
- if (nla_put_be32(skb, CTA_NAT_SEQ_CORRECTION_POS,
- htonl(natseq->correction_pos)) ||
- nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
- htonl(natseq->offset_before)) ||
- nla_put_be32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
- htonl(natseq->offset_after)))
+ if (nla_put_be32(skb, CTA_SEQADJ_CORRECTION_POS,
+ htonl(seq->correction_pos)) ||
+ nla_put_be32(skb, CTA_SEQADJ_OFFSET_BEFORE,
+ htonl(seq->offset_before)) ||
+ nla_put_be32(skb, CTA_SEQADJ_OFFSET_AFTER,
+ htonl(seq->offset_after)))
goto nla_put_failure;
nla_nest_end(skb, nest_parms);
@@ -408,27 +408,24 @@ nla_put_failure:
}
static inline int
-ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
+ctnetlink_dump_ct_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
{
- struct nf_nat_seq *natseq;
- struct nf_conn_nat *nat = nfct_nat(ct);
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *seq;
- if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
+ if (!(ct->status & IPS_SEQ_ADJUST) || !seqadj)
return 0;
- natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
- if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
+ seq = &seqadj->seq[IP_CT_DIR_ORIGINAL];
+ if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_ORIG) == -1)
return -1;
- natseq = &nat->seq[IP_CT_DIR_REPLY];
- if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
+ seq = &seqadj->seq[IP_CT_DIR_REPLY];
+ if (dump_ct_seq_adj(skb, seq, CTA_SEQ_ADJ_REPLY) == -1)
return -1;
return 0;
}
-#else
-#define ctnetlink_dump_nat_seq_adj(a, b) (0)
-#endif
static inline int
ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
@@ -502,7 +499,7 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
ctnetlink_dump_id(skb, ct) < 0 ||
ctnetlink_dump_use(skb, ct) < 0 ||
ctnetlink_dump_master(skb, ct) < 0 ||
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
nlmsg_end(skb, nlh);
@@ -707,8 +704,8 @@ ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
ctnetlink_dump_master(skb, ct) < 0)
goto nla_put_failure;
- if (events & (1 << IPCT_NATSEQADJ) &&
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ if (events & (1 << IPCT_SEQADJ) &&
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
}
@@ -1038,21 +1035,9 @@ ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
}
}
- if (del_timer(&ct->timeout)) {
- if (nf_conntrack_event_report(IPCT_DESTROY, ct,
- NETLINK_CB(skb).portid,
- nlmsg_report(nlh)) < 0) {
- nf_ct_delete_from_lists(ct);
- /* we failed to report the event, try later */
- nf_ct_dying_timeout(ct);
- nf_ct_put(ct);
- return 0;
- }
- /* death_by_timeout would report the event again */
- set_bit(IPS_DYING_BIT, &ct->status);
- nf_ct_delete_from_lists(ct);
- nf_ct_put(ct);
- }
+ if (del_timer(&ct->timeout))
+ nf_ct_delete(ct, NETLINK_CB(skb).portid, nlmsg_report(nlh));
+
nf_ct_put(ct);
return 0;
@@ -1451,66 +1436,65 @@ ctnetlink_change_protoinfo(struct nf_conn *ct, const struct nlattr * const cda[]
return err;
}
-#ifdef CONFIG_NF_NAT_NEEDED
-static const struct nla_policy nat_seq_policy[CTA_NAT_SEQ_MAX+1] = {
- [CTA_NAT_SEQ_CORRECTION_POS] = { .type = NLA_U32 },
- [CTA_NAT_SEQ_OFFSET_BEFORE] = { .type = NLA_U32 },
- [CTA_NAT_SEQ_OFFSET_AFTER] = { .type = NLA_U32 },
+static const struct nla_policy seqadj_policy[CTA_SEQADJ_MAX+1] = {
+ [CTA_SEQADJ_CORRECTION_POS] = { .type = NLA_U32 },
+ [CTA_SEQADJ_OFFSET_BEFORE] = { .type = NLA_U32 },
+ [CTA_SEQADJ_OFFSET_AFTER] = { .type = NLA_U32 },
};
static inline int
-change_nat_seq_adj(struct nf_nat_seq *natseq, const struct nlattr * const attr)
+change_seq_adj(struct nf_ct_seqadj *seq, const struct nlattr * const attr)
{
int err;
- struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
+ struct nlattr *cda[CTA_SEQADJ_MAX+1];
- err = nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, nat_seq_policy);
+ err = nla_parse_nested(cda, CTA_SEQADJ_MAX, attr, seqadj_policy);
if (err < 0)
return err;
- if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
+ if (!cda[CTA_SEQADJ_CORRECTION_POS])
return -EINVAL;
- natseq->correction_pos =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
+ seq->correction_pos =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_CORRECTION_POS]));
- if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
+ if (!cda[CTA_SEQADJ_OFFSET_BEFORE])
return -EINVAL;
- natseq->offset_before =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
+ seq->offset_before =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_BEFORE]));
- if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
+ if (!cda[CTA_SEQADJ_OFFSET_AFTER])
return -EINVAL;
- natseq->offset_after =
- ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
+ seq->offset_after =
+ ntohl(nla_get_be32(cda[CTA_SEQADJ_OFFSET_AFTER]));
return 0;
}
static int
-ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
- const struct nlattr * const cda[])
+ctnetlink_change_seq_adj(struct nf_conn *ct,
+ const struct nlattr * const cda[])
{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
int ret = 0;
- struct nf_conn_nat *nat = nfct_nat(ct);
- if (!nat)
+ if (!seqadj)
return 0;
- if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
- ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
- cda[CTA_NAT_SEQ_ADJ_ORIG]);
+ if (cda[CTA_SEQ_ADJ_ORIG]) {
+ ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_ORIGINAL],
+ cda[CTA_SEQ_ADJ_ORIG]);
if (ret < 0)
return ret;
ct->status |= IPS_SEQ_ADJUST;
}
- if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
- cda[CTA_NAT_SEQ_ADJ_REPLY]);
+ if (cda[CTA_SEQ_ADJ_REPLY]) {
+ ret = change_seq_adj(&seqadj->seq[IP_CT_DIR_REPLY],
+ cda[CTA_SEQ_ADJ_REPLY]);
if (ret < 0)
return ret;
@@ -1519,7 +1503,6 @@ ctnetlink_change_nat_seq_adj(struct nf_conn *ct,
return 0;
}
-#endif
static int
ctnetlink_attach_labels(struct nf_conn *ct, const struct nlattr * const cda[])
@@ -1585,13 +1568,12 @@ ctnetlink_change_conntrack(struct nf_conn *ct,
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
#endif
-#ifdef CONFIG_NF_NAT_NEEDED
- if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- err = ctnetlink_change_nat_seq_adj(ct, cda);
+ if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
+ err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
return err;
}
-#endif
+
if (cda[CTA_LABELS]) {
err = ctnetlink_attach_labels(ct, cda);
if (err < 0)
@@ -1696,13 +1678,11 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
goto err2;
}
-#ifdef CONFIG_NF_NAT_NEEDED
- if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
- err = ctnetlink_change_nat_seq_adj(ct, cda);
+ if (cda[CTA_SEQ_ADJ_ORIG] || cda[CTA_SEQ_ADJ_REPLY]) {
+ err = ctnetlink_change_seq_adj(ct, cda);
if (err < 0)
goto err2;
}
-#endif
memset(&ct->proto, 0, sizeof(ct->proto));
if (cda[CTA_PROTOINFO]) {
@@ -1816,7 +1796,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
(1 << IPCT_ASSURED) |
(1 << IPCT_HELPER) |
(1 << IPCT_PROTOINFO) |
- (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_SEQADJ) |
(1 << IPCT_MARK) | events,
ct, NETLINK_CB(skb).portid,
nlmsg_report(nlh));
@@ -1839,7 +1819,7 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
(1 << IPCT_HELPER) |
(1 << IPCT_LABEL) |
(1 << IPCT_PROTOINFO) |
- (1 << IPCT_NATSEQADJ) |
+ (1 << IPCT_SEQADJ) |
(1 << IPCT_MARK),
ct, NETLINK_CB(skb).portid,
nlmsg_report(nlh));
@@ -1999,6 +1979,27 @@ out:
return err == -EAGAIN ? -ENOBUFS : err;
}
+static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
+ [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
+ [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
+ [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
+ [CTA_EXPECT_ID] = { .type = NLA_U32 },
+ [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
+ .len = NF_CT_HELPER_NAME_LEN - 1 },
+ [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
+ [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
+};
+
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr *const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask);
+
#ifdef CONFIG_NETFILTER_NETLINK_QUEUE_CT
static size_t
ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
@@ -2073,7 +2074,7 @@ ctnetlink_nfqueue_build(struct sk_buff *skb, struct nf_conn *ct)
goto nla_put_failure;
if ((ct->status & IPS_SEQ_ADJUST) &&
- ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
+ ctnetlink_dump_ct_seq_adj(skb, ct) < 0)
goto nla_put_failure;
#ifdef CONFIG_NF_CONNTRACK_MARK
@@ -2139,10 +2140,70 @@ ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct)
return ret;
}
+static int ctnetlink_nfqueue_exp_parse(const struct nlattr * const *cda,
+ const struct nf_conn *ct,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
+{
+ int err;
+
+ err = ctnetlink_parse_tuple(cda, tuple, CTA_EXPECT_TUPLE,
+ nf_ct_l3num(ct));
+ if (err < 0)
+ return err;
+
+ return ctnetlink_parse_tuple(cda, mask, CTA_EXPECT_MASK,
+ nf_ct_l3num(ct));
+}
+
+static int
+ctnetlink_nfqueue_attach_expect(const struct nlattr *attr, struct nf_conn *ct,
+ u32 portid, u32 report)
+{
+ struct nlattr *cda[CTA_EXPECT_MAX+1];
+ struct nf_conntrack_tuple tuple, mask;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ int err;
+
+ err = nla_parse_nested(cda, CTA_EXPECT_MAX, attr, exp_nla_policy);
+ if (err < 0)
+ return err;
+
+ err = ctnetlink_nfqueue_exp_parse((const struct nlattr * const *)cda,
+ ct, &tuple, &mask);
+ if (err < 0)
+ return err;
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL)
+ return -EOPNOTSUPP;
+ }
+
+ exp = ctnetlink_alloc_expect((const struct nlattr * const *)cda, ct,
+ helper, &tuple, &mask);
+ if (IS_ERR(exp))
+ return PTR_ERR(exp);
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0) {
+ nf_ct_expect_put(exp);
+ return err;
+ }
+
+ return 0;
+}
+
static struct nfq_ct_hook ctnetlink_nfqueue_hook = {
.build_size = ctnetlink_nfqueue_build_size,
.build = ctnetlink_nfqueue_build,
.parse = ctnetlink_nfqueue_parse,
+ .attach_expect = ctnetlink_nfqueue_attach_expect,
+ .seq_adjust = nf_ct_tcp_seqadj_set,
};
#endif /* CONFIG_NETFILTER_NETLINK_QUEUE_CT */
@@ -2510,21 +2571,6 @@ static int ctnetlink_dump_exp_ct(struct sock *ctnl, struct sk_buff *skb,
return err;
}
-static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
- [CTA_EXPECT_MASTER] = { .type = NLA_NESTED },
- [CTA_EXPECT_TUPLE] = { .type = NLA_NESTED },
- [CTA_EXPECT_MASK] = { .type = NLA_NESTED },
- [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
- [CTA_EXPECT_ID] = { .type = NLA_U32 },
- [CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING,
- .len = NF_CT_HELPER_NAME_LEN - 1 },
- [CTA_EXPECT_ZONE] = { .type = NLA_U16 },
- [CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
- [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
- [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
- [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
-};
-
static int
ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
const struct nlmsghdr *nlh,
@@ -2747,76 +2793,26 @@ ctnetlink_parse_expect_nat(const struct nlattr *attr,
#endif
}
-static int
-ctnetlink_create_expect(struct net *net, u16 zone,
- const struct nlattr * const cda[],
- u_int8_t u3,
- u32 portid, int report)
+static struct nf_conntrack_expect *
+ctnetlink_alloc_expect(const struct nlattr * const cda[], struct nf_conn *ct,
+ struct nf_conntrack_helper *helper,
+ struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_tuple *mask)
{
- struct nf_conntrack_tuple tuple, mask, master_tuple;
- struct nf_conntrack_tuple_hash *h = NULL;
+ u_int32_t class = 0;
struct nf_conntrack_expect *exp;
- struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conntrack_helper *helper = NULL;
- u_int32_t class = 0;
- int err = 0;
-
- /* caller guarantees that those three CTA_EXPECT_* exist */
- err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
- if (err < 0)
- return err;
- err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
- if (err < 0)
- return err;
-
- /* Look for master conntrack of this expectation */
- h = nf_conntrack_find_get(net, zone, &master_tuple);
- if (!h)
- return -ENOENT;
- ct = nf_ct_tuplehash_to_ctrack(h);
-
- /* Look for helper of this expectation */
- if (cda[CTA_EXPECT_HELP_NAME]) {
- const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
-
- helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper == NULL) {
-#ifdef CONFIG_MODULES
- if (request_module("nfct-helper-%s", helpname) < 0) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- helper = __nf_conntrack_helper_find(helpname,
- nf_ct_l3num(ct),
- nf_ct_protonum(ct));
- if (helper) {
- err = -EAGAIN;
- goto out;
- }
-#endif
- err = -EOPNOTSUPP;
- goto out;
- }
- }
+ int err;
if (cda[CTA_EXPECT_CLASS] && helper) {
class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
- if (class > helper->expect_class_max) {
- err = -EINVAL;
- goto out;
- }
+ if (class > helper->expect_class_max)
+ return ERR_PTR(-EINVAL);
}
exp = nf_ct_expect_alloc(ct);
- if (!exp) {
- err = -ENOMEM;
- goto out;
- }
+ if (!exp)
+ return ERR_PTR(-ENOMEM);
+
help = nfct_help(ct);
if (!help) {
if (!cda[CTA_EXPECT_TIMEOUT]) {
@@ -2854,21 +2850,89 @@ ctnetlink_create_expect(struct net *net, u16 zone,
exp->class = class;
exp->master = ct;
exp->helper = helper;
- memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
- memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
- exp->mask.src.u.all = mask.src.u.all;
+ exp->tuple = *tuple;
+ exp->mask.src.u3 = mask->src.u3;
+ exp->mask.src.u.all = mask->src.u.all;
if (cda[CTA_EXPECT_NAT]) {
err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
- exp, u3);
+ exp, nf_ct_l3num(ct));
if (err < 0)
goto err_out;
}
- err = nf_ct_expect_related_report(exp, portid, report);
+ return exp;
err_out:
nf_ct_expect_put(exp);
-out:
- nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
+ return ERR_PTR(err);
+}
+
+static int
+ctnetlink_create_expect(struct net *net, u16 zone,
+ const struct nlattr * const cda[],
+ u_int8_t u3, u32 portid, int report)
+{
+ struct nf_conntrack_tuple tuple, mask, master_tuple;
+ struct nf_conntrack_tuple_hash *h = NULL;
+ struct nf_conntrack_helper *helper = NULL;
+ struct nf_conntrack_expect *exp;
+ struct nf_conn *ct;
+ int err;
+
+ /* caller guarantees that those three CTA_EXPECT_* exist */
+ err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
+ if (err < 0)
+ return err;
+ err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
+ if (err < 0)
+ return err;
+
+ /* Look for master conntrack of this expectation */
+ h = nf_conntrack_find_get(net, zone, &master_tuple);
+ if (!h)
+ return -ENOENT;
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ helper = __nf_conntrack_helper_find(helpname, u3,
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto err_ct;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto err_ct;
+ }
+ }
+
+ exp = ctnetlink_alloc_expect(cda, ct, helper, &tuple, &mask);
+ if (IS_ERR(exp)) {
+ err = PTR_ERR(exp);
+ goto err_ct;
+ }
+
+ err = nf_ct_expect_related_report(exp, portid, report);
+ if (err < 0)
+ goto err_exp;
+
+ return 0;
+err_exp:
+ nf_ct_expect_put(exp);
+err_ct:
+ nf_ct_put(ct);
return err;
}
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index 0ab9636ac57..ce3004156ee 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -281,7 +281,7 @@ void nf_ct_l3proto_pernet_unregister(struct net *net,
nf_ct_l3proto_unregister_sysctl(net, proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l3proto, proto);
+ nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
@@ -476,7 +476,7 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
- nf_ct_iterate_cleanup(net, kill_l4proto, l4proto);
+ nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
}
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 7dcc376eea5..44d1ea32570 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -27,6 +27,8 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_log.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -495,21 +497,6 @@ static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
}
}
-#ifdef CONFIG_NF_NAT_NEEDED
-static inline s16 nat_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq)
-{
- typeof(nf_ct_nat_offset) get_offset = rcu_dereference(nf_ct_nat_offset);
-
- return get_offset != NULL ? get_offset(ct, dir, seq) : 0;
-}
-#define NAT_OFFSET(ct, dir, seq) \
- (nat_offset(ct, dir, seq))
-#else
-#define NAT_OFFSET(ct, dir, seq) 0
-#endif
-
static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp *state,
enum ip_conntrack_dir dir,
@@ -525,8 +512,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
- s16 receiver_offset;
- bool res;
+ s32 receiver_offset;
+ bool res, in_recv_win;
/*
* Get the required data from the packet.
@@ -540,7 +527,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
tcp_sack(skb, dataoff, tcph, &sack);
/* Take into account NAT sequence number mangling */
- receiver_offset = NAT_OFFSET(ct, !dir, ack - 1);
+ receiver_offset = nf_ct_seq_offset(ct, !dir, ack - 1);
ack -= receiver_offset;
sack -= receiver_offset;
@@ -649,14 +636,18 @@ static bool tcp_in_window(const struct nf_conn *ct,
receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
receiver->td_scale);
+ /* Is the ending sequence in the receive window (if available)? */
+ in_recv_win = !receiver->td_maxwin ||
+ after(end, sender->td_end - receiver->td_maxwin - 1);
+
pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
before(seq, sender->td_maxend + 1),
- after(end, sender->td_end - receiver->td_maxwin - 1),
+ (in_recv_win ? 1 : 0),
before(sack, receiver->td_end + 1),
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1));
if (before(seq, sender->td_maxend + 1) &&
- after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ in_recv_win &&
before(sack, receiver->td_end + 1) &&
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) {
/*
@@ -725,7 +716,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
"nf_ct_tcp: %s ",
before(seq, sender->td_maxend + 1) ?
- after(end, sender->td_end - receiver->td_maxwin - 1) ?
+ in_recv_win ?
before(sack, receiver->td_end + 1) ?
after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG"
: "ACK is under the lower bound (possible overly delayed ACK)"
@@ -956,6 +947,21 @@ static int tcp_packet(struct nf_conn *ct,
"state %s ", tcp_conntrack_names[old_state]);
return NF_ACCEPT;
case TCP_CONNTRACK_MAX:
+ /* Special case for SYN proxy: when the SYN to the server or
+ * the SYN/ACK from the server is lost, the client may transmit
+ * a keep-alive packet while in SYN_SENT state. This needs to
+ * be associated with the original conntrack entry in order to
+ * generate a new SYN with the correct sequence number.
+ */
+ if (nfct_synproxy(ct) && old_state == TCP_CONNTRACK_SYN_SENT &&
+ index == TCP_ACK_SET && dir == IP_CT_DIR_ORIGINAL &&
+ ct->proto.tcp.last_dir == IP_CT_DIR_ORIGINAL &&
+ ct->proto.tcp.seen[dir].td_end - 1 == ntohl(th->seq)) {
+ pr_debug("nf_ct_tcp: SYN proxy client keep alive\n");
+ spin_unlock_bh(&ct->lock);
+ return NF_ACCEPT;
+ }
+
/* Invalid packet */
pr_debug("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
dir, get_conntrack_index(th), old_state);
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
new file mode 100644
index 00000000000..5f9bfd060de
--- /dev/null
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -0,0 +1,238 @@
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+
+int nf_ct_seqadj_init(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off)
+{
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_seqadj *seqadj;
+ struct nf_ct_seqadj *this_way;
+
+ if (off == 0)
+ return 0;
+
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+
+ seqadj = nfct_seqadj(ct);
+ this_way = &seqadj->seq[dir];
+ this_way->offset_before = off;
+ this_way->offset_after = off;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seqadj_init);
+
+int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ __be32 seq, s32 off)
+{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct nf_ct_seqadj *this_way;
+
+ if (off == 0)
+ return 0;
+
+ set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
+
+ spin_lock_bh(&ct->lock);
+ this_way = &seqadj->seq[dir];
+ if (this_way->offset_before == this_way->offset_after ||
+ before(this_way->correction_pos, seq)) {
+ this_way->correction_pos = seq;
+ this_way->offset_before = this_way->offset_after;
+ this_way->offset_after += off;
+ }
+ spin_unlock_bh(&ct->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seqadj_set);
+
+void nf_ct_tcp_seqadj_set(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ s32 off)
+{
+ const struct tcphdr *th;
+
+ if (nf_ct_protonum(ct) != IPPROTO_TCP)
+ return;
+
+ th = (struct tcphdr *)(skb_network_header(skb) + ip_hdrlen(skb));
+ nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
+}
+EXPORT_SYMBOL_GPL(nf_ct_tcp_seqadj_set);
+
+/* Adjust one found SACK option including checksum correction */
+static void nf_ct_sack_block_adjust(struct sk_buff *skb,
+ struct tcphdr *tcph,
+ unsigned int sackoff,
+ unsigned int sackend,
+ struct nf_ct_seqadj *seq)
+{
+ while (sackoff < sackend) {
+ struct tcp_sack_block_wire *sack;
+ __be32 new_start_seq, new_end_seq;
+
+ sack = (void *)skb->data + sackoff;
+ if (after(ntohl(sack->start_seq) - seq->offset_before,
+ seq->correction_pos))
+ new_start_seq = htonl(ntohl(sack->start_seq) -
+ seq->offset_after);
+ else
+ new_start_seq = htonl(ntohl(sack->start_seq) -
+ seq->offset_before);
+
+ if (after(ntohl(sack->end_seq) - seq->offset_before,
+ seq->correction_pos))
+ new_end_seq = htonl(ntohl(sack->end_seq) -
+ seq->offset_after);
+ else
+ new_end_seq = htonl(ntohl(sack->end_seq) -
+ seq->offset_before);
+
+ pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
+ ntohl(sack->start_seq), new_start_seq,
+ ntohl(sack->end_seq), new_end_seq);
+
+ inet_proto_csum_replace4(&tcph->check, skb,
+ sack->start_seq, new_start_seq, 0);
+ inet_proto_csum_replace4(&tcph->check, skb,
+ sack->end_seq, new_end_seq, 0);
+ sack->start_seq = new_start_seq;
+ sack->end_seq = new_end_seq;
+ sackoff += sizeof(*sack);
+ }
+}
+
+/* TCP SACK sequence number adjustment */
+static unsigned int nf_ct_sack_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *tcph,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ unsigned int dir, optoff, optend;
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+
+ optoff = protoff + sizeof(struct tcphdr);
+ optend = protoff + tcph->doff * 4;
+
+ if (!skb_make_writable(skb, optend))
+ return 0;
+
+ dir = CTINFO2DIR(ctinfo);
+
+ while (optoff < optend) {
+ /* Usually: option, length. */
+ unsigned char *op = skb->data + optoff;
+
+ switch (op[0]) {
+ case TCPOPT_EOL:
+ return 1;
+ case TCPOPT_NOP:
+ optoff++;
+ continue;
+ default:
+ /* no partial options */
+ if (optoff + 1 == optend ||
+ optoff + op[1] > optend ||
+ op[1] < 2)
+ return 0;
+ if (op[0] == TCPOPT_SACK &&
+ op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
+ ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
+ nf_ct_sack_block_adjust(skb, tcph, optoff + 2,
+ optoff+op[1],
+ &seqadj->seq[!dir]);
+ optoff += op[1];
+ }
+ }
+ return 1;
+}
+
+/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
+int nf_ct_seq_adjust(struct sk_buff *skb,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo,
+ unsigned int protoff)
+{
+ enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
+ struct tcphdr *tcph;
+ __be32 newseq, newack;
+ s32 seqoff, ackoff;
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *this_way, *other_way;
+ int res;
+
+ this_way = &seqadj->seq[dir];
+ other_way = &seqadj->seq[!dir];
+
+ if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
+ return 0;
+
+ tcph = (void *)skb->data + protoff;
+ spin_lock_bh(&ct->lock);
+ if (after(ntohl(tcph->seq), this_way->correction_pos))
+ seqoff = this_way->offset_after;
+ else
+ seqoff = this_way->offset_before;
+
+ if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
+ other_way->correction_pos))
+ ackoff = other_way->offset_after;
+ else
+ ackoff = other_way->offset_before;
+
+ newseq = htonl(ntohl(tcph->seq) + seqoff);
+ newack = htonl(ntohl(tcph->ack_seq) - ackoff);
+
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
+ inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
+
+ pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
+ ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
+ ntohl(newack));
+
+ tcph->seq = newseq;
+ tcph->ack_seq = newack;
+
+ res = nf_ct_sack_adjust(skb, protoff, tcph, ct, ctinfo);
+ spin_unlock_bh(&ct->lock);
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seq_adjust);
+
+s32 nf_ct_seq_offset(const struct nf_conn *ct,
+ enum ip_conntrack_dir dir,
+ u32 seq)
+{
+ struct nf_conn_seqadj *seqadj = nfct_seqadj(ct);
+ struct nf_ct_seqadj *this_way;
+
+ if (!seqadj)
+ return 0;
+
+ this_way = &seqadj->seq[dir];
+ return after(seq, this_way->correction_pos) ?
+ this_way->offset_after : this_way->offset_before;
+}
+EXPORT_SYMBOL_GPL(nf_ct_seq_offset);
+
+static struct nf_ct_ext_type nf_ct_seqadj_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_seqadj),
+ .align = __alignof__(struct nf_conn_seqadj),
+ .id = NF_CT_EXT_SEQADJ,
+};
+
+int nf_conntrack_seqadj_init(void)
+{
+ return nf_ct_extend_register(&nf_ct_seqadj_extend);
+}
+
+void nf_conntrack_seqadj_fini(void)
+{
+ nf_ct_extend_unregister(&nf_ct_seqadj_extend);
+}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 038eee5c8f8..6f0f4f7f68a 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -25,6 +25,7 @@
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <linux/netfilter/nf_nat.h>
@@ -402,6 +403,9 @@ nf_nat_setup_info(struct nf_conn *ct,
ct->status |= IPS_SRC_NAT;
else
ct->status |= IPS_DST_NAT;
+
+ if (nfct_help(ct))
+ nfct_seqadj_ext_add(ct);
}
if (maniptype == NF_NAT_MANIP_SRC) {
@@ -497,7 +501,7 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -511,7 +515,7 @@ static void nf_nat_l3proto_clean(u8 l3proto)
rtnl_lock();
for_each_net(net)
- nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean, 0, 0);
rtnl_unlock();
}
@@ -749,7 +753,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
{
struct nf_nat_proto_clean clean = {};
- nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+ nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
synchronize_rcu();
nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
}
@@ -764,10 +768,6 @@ static struct nf_ct_helper_expectfn follow_master_nat = {
.expectfn = nf_nat_follow_master,
};
-static struct nfq_ct_nat_hook nfq_ct_nat = {
- .seq_adjust = nf_nat_tcp_seq_adjust,
-};
-
static int __init nf_nat_init(void)
{
int ret;
@@ -787,14 +787,9 @@ static int __init nf_nat_init(void)
/* Initialize fake conntrack so that NAT will skip it */
nf_ct_untracked_status_or(IPS_NAT_DONE_MASK);
- BUG_ON(nf_nat_seq_adjust_hook != NULL);
- RCU_INIT_POINTER(nf_nat_seq_adjust_hook, nf_nat_seq_adjust);
BUG_ON(nfnetlink_parse_nat_setup_hook != NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook,
nfnetlink_parse_nat_setup);
- BUG_ON(nf_ct_nat_offset != NULL);
- RCU_INIT_POINTER(nf_ct_nat_offset, nf_nat_get_offset);
- RCU_INIT_POINTER(nfq_ct_nat_hook, &nfq_ct_nat);
#ifdef CONFIG_XFRM
BUG_ON(nf_nat_decode_session_hook != NULL);
RCU_INIT_POINTER(nf_nat_decode_session_hook, __nf_nat_decode_session);
@@ -813,10 +808,7 @@ static void __exit nf_nat_cleanup(void)
unregister_pernet_subsys(&nf_nat_net_ops);
nf_ct_extend_unregister(&nat_extend);
nf_ct_helper_expectfn_unregister(&follow_master_nat);
- RCU_INIT_POINTER(nf_nat_seq_adjust_hook, NULL);
RCU_INIT_POINTER(nfnetlink_parse_nat_setup_hook, NULL);
- RCU_INIT_POINTER(nf_ct_nat_offset, NULL);
- RCU_INIT_POINTER(nfq_ct_nat_hook, NULL);
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(nf_nat_decode_session_hook, NULL);
#endif
diff --git a/net/netfilter/nf_nat_helper.c b/net/netfilter/nf_nat_helper.c
index 85e20a91908..2840abb5bb9 100644
--- a/net/netfilter/nf_nat_helper.c
+++ b/net/netfilter/nf_nat_helper.c
@@ -20,74 +20,13 @@
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_l3proto.h>
#include <net/netfilter/nf_nat_l4proto.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
-#define DUMP_OFFSET(x) \
- pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
- x->offset_before, x->offset_after, x->correction_pos);
-
-static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
-
-/* Setup TCP sequence correction given this change at this sequence */
-static inline void
-adjust_tcp_sequence(u32 seq,
- int sizediff,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way = &nat->seq[dir];
-
- pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
- seq, sizediff);
-
- pr_debug("adjust_tcp_sequence: Seq_offset before: ");
- DUMP_OFFSET(this_way);
-
- spin_lock_bh(&nf_nat_seqofs_lock);
-
- /* SYN adjust. If it's uninitialized, or this is after last
- * correction, record it: we don't handle more than one
- * adjustment in the window, but do deal with common case of a
- * retransmit */
- if (this_way->offset_before == this_way->offset_after ||
- before(this_way->correction_pos, seq)) {
- this_way->correction_pos = seq;
- this_way->offset_before = this_way->offset_after;
- this_way->offset_after += sizediff;
- }
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- pr_debug("adjust_tcp_sequence: Seq_offset after: ");
- DUMP_OFFSET(this_way);
-}
-
-/* Get the offset value, for conntrack */
-s16 nf_nat_get_offset(const struct nf_conn *ct,
- enum ip_conntrack_dir dir,
- u32 seq)
-{
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way;
- s16 offset;
-
- if (!nat)
- return 0;
-
- this_way = &nat->seq[dir];
- spin_lock_bh(&nf_nat_seqofs_lock);
- offset = after(seq, this_way->correction_pos)
- ? this_way->offset_after : this_way->offset_before;
- spin_unlock_bh(&nf_nat_seqofs_lock);
-
- return offset;
-}
-
/* Frobs data inside this packet, which is linear. */
static void mangle_contents(struct sk_buff *skb,
unsigned int dataoff,
@@ -142,30 +81,6 @@ static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
return 1;
}
-void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
- __be32 seq, s16 off)
-{
- if (!off)
- return;
- set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
- adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
- nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
-}
-EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
-
-void nf_nat_tcp_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
- u32 ctinfo, int off)
-{
- const struct tcphdr *th;
-
- if (nf_ct_protonum(ct) != IPPROTO_TCP)
- return;
-
- th = (struct tcphdr *)(skb_network_header(skb)+ ip_hdrlen(skb));
- nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
-}
-EXPORT_SYMBOL_GPL(nf_nat_tcp_seq_adjust);
-
/* Generic function for mangling variable-length address changes inside
* NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
* command in FTP).
@@ -210,8 +125,8 @@ int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
datalen, oldlen);
if (adjust && rep_len != match_len)
- nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
- (int)rep_len - (int)match_len);
+ nf_ct_seqadj_set(ct, ctinfo, tcph->seq,
+ (int)rep_len - (int)match_len);
return 1;
}
@@ -271,145 +186,6 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb,
}
EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
-/* Adjust one found SACK option including checksum correction */
-static void
-sack_adjust(struct sk_buff *skb,
- struct tcphdr *tcph,
- unsigned int sackoff,
- unsigned int sackend,
- struct nf_nat_seq *natseq)
-{
- while (sackoff < sackend) {
- struct tcp_sack_block_wire *sack;
- __be32 new_start_seq, new_end_seq;
-
- sack = (void *)skb->data + sackoff;
- if (after(ntohl(sack->start_seq) - natseq->offset_before,
- natseq->correction_pos))
- new_start_seq = htonl(ntohl(sack->start_seq)
- - natseq->offset_after);
- else
- new_start_seq = htonl(ntohl(sack->start_seq)
- - natseq->offset_before);
-
- if (after(ntohl(sack->end_seq) - natseq->offset_before,
- natseq->correction_pos))
- new_end_seq = htonl(ntohl(sack->end_seq)
- - natseq->offset_after);
- else
- new_end_seq = htonl(ntohl(sack->end_seq)
- - natseq->offset_before);
-
- pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
- ntohl(sack->start_seq), new_start_seq,
- ntohl(sack->end_seq), new_end_seq);
-
- inet_proto_csum_replace4(&tcph->check, skb,
- sack->start_seq, new_start_seq, 0);
- inet_proto_csum_replace4(&tcph->check, skb,
- sack->end_seq, new_end_seq, 0);
- sack->start_seq = new_start_seq;
- sack->end_seq = new_end_seq;
- sackoff += sizeof(*sack);
- }
-}
-
-/* TCP SACK sequence number adjustment */
-static inline unsigned int
-nf_nat_sack_adjust(struct sk_buff *skb,
- unsigned int protoff,
- struct tcphdr *tcph,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo)
-{
- unsigned int dir, optoff, optend;
- struct nf_conn_nat *nat = nfct_nat(ct);
-
- optoff = protoff + sizeof(struct tcphdr);
- optend = protoff + tcph->doff * 4;
-
- if (!skb_make_writable(skb, optend))
- return 0;
-
- dir = CTINFO2DIR(ctinfo);
-
- while (optoff < optend) {
- /* Usually: option, length. */
- unsigned char *op = skb->data + optoff;
-
- switch (op[0]) {
- case TCPOPT_EOL:
- return 1;
- case TCPOPT_NOP:
- optoff++;
- continue;
- default:
- /* no partial options */
- if (optoff + 1 == optend ||
- optoff + op[1] > optend ||
- op[1] < 2)
- return 0;
- if (op[0] == TCPOPT_SACK &&
- op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
- ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
- sack_adjust(skb, tcph, optoff+2,
- optoff+op[1], &nat->seq[!dir]);
- optoff += op[1];
- }
- }
- return 1;
-}
-
-/* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
-int
-nf_nat_seq_adjust(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff)
-{
- struct tcphdr *tcph;
- int dir;
- __be32 newseq, newack;
- s16 seqoff, ackoff;
- struct nf_conn_nat *nat = nfct_nat(ct);
- struct nf_nat_seq *this_way, *other_way;
-
- dir = CTINFO2DIR(ctinfo);
-
- this_way = &nat->seq[dir];
- other_way = &nat->seq[!dir];
-
- if (!skb_make_writable(skb, protoff + sizeof(*tcph)))
- return 0;
-
- tcph = (void *)skb->data + protoff;
- if (after(ntohl(tcph->seq), this_way->correction_pos))
- seqoff = this_way->offset_after;
- else
- seqoff = this_way->offset_before;
-
- if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
- other_way->correction_pos))
- ackoff = other_way->offset_after;
- else
- ackoff = other_way->offset_before;
-
- newseq = htonl(ntohl(tcph->seq) + seqoff);
- newack = htonl(ntohl(tcph->ack_seq) - ackoff);
-
- inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
- inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
-
- pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
- ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
- ntohl(newack));
-
- tcph->seq = newseq;
- tcph->ack_seq = newack;
-
- return nf_nat_sack_adjust(skb, protoff, tcph, ct, ctinfo);
-}
-
/* Setup NAT on this expected conntrack so it follows master. */
/* If we fail to get a free NAT slot, we'll get dropped on confirm */
void nf_nat_follow_master(struct nf_conn *ct,
diff --git a/net/netfilter/nf_nat_proto_sctp.c b/net/netfilter/nf_nat_proto_sctp.c
index 396e55d46f9..754536f2c67 100644
--- a/net/netfilter/nf_nat_proto_sctp.c
+++ b/net/netfilter/nf_nat_proto_sctp.c
@@ -34,9 +34,7 @@ sctp_manip_pkt(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple,
enum nf_nat_manip_type maniptype)
{
- struct sk_buff *frag;
sctp_sctphdr_t *hdr;
- __u32 crc32;
if (!skb_make_writable(skb, hdroff + sizeof(*hdr)))
return false;
@@ -51,11 +49,7 @@ sctp_manip_pkt(struct sk_buff *skb,
hdr->dest = tuple->dst.u.sctp.port;
}
- crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff);
- skb_walk_frags(skb, frag)
- crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag),
- crc32);
- hdr->checksum = sctp_end_cksum(crc32);
+ hdr->checksum = sctp_compute_cksum(skb, hdroff);
return true;
}
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index dac11f73868..f9790405b7f 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -20,6 +20,7 @@
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <linux/netfilter/nf_conntrack_sip.h>
MODULE_LICENSE("GPL");
@@ -308,7 +309,7 @@ static void nf_nat_sip_seq_adjust(struct sk_buff *skb, unsigned int protoff,
return;
th = (struct tcphdr *)(skb->data + protoff);
- nf_nat_set_seq_adjust(ct, ctinfo, th->seq, off);
+ nf_ct_seqadj_set(ct, ctinfo, th->seq, off);
}
/* Handles expected signalling connections and media streams */
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
new file mode 100644
index 00000000000..6fd967c6278
--- /dev/null
+++ b/net/netfilter/nf_synproxy_core.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2013 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <asm/unaligned.h>
+#include <net/tcp.h>
+#include <net/netns/generic.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter/xt_SYNPROXY.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
+
+int synproxy_net_id;
+EXPORT_SYMBOL_GPL(synproxy_net_id);
+
+void
+synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th, struct synproxy_options *opts)
+{
+ int length = (th->doff * 4) - sizeof(*th);
+ u8 buf[40], *ptr;
+
+ ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
+ BUG_ON(ptr == NULL);
+
+ opts->options = 0;
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ return;
+ case TCPOPT_NOP:
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2)
+ return;
+ if (opsize > length)
+ return;
+
+ switch (opcode) {
+ case TCPOPT_MSS:
+ if (opsize == TCPOLEN_MSS) {
+ opts->mss = get_unaligned_be16(ptr);
+ opts->options |= XT_SYNPROXY_OPT_MSS;
+ }
+ break;
+ case TCPOPT_WINDOW:
+ if (opsize == TCPOLEN_WINDOW) {
+ opts->wscale = *ptr;
+ if (opts->wscale > 14)
+ opts->wscale = 14;
+ opts->options |= XT_SYNPROXY_OPT_WSCALE;
+ }
+ break;
+ case TCPOPT_TIMESTAMP:
+ if (opsize == TCPOLEN_TIMESTAMP) {
+ opts->tsval = get_unaligned_be32(ptr);
+ opts->tsecr = get_unaligned_be32(ptr + 4);
+ opts->options |= XT_SYNPROXY_OPT_TIMESTAMP;
+ }
+ break;
+ case TCPOPT_SACK_PERM:
+ if (opsize == TCPOLEN_SACK_PERM)
+ opts->options |= XT_SYNPROXY_OPT_SACK_PERM;
+ break;
+ }
+
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(synproxy_parse_options);
+
+unsigned int synproxy_options_size(const struct synproxy_options *opts)
+{
+ unsigned int size = 0;
+
+ if (opts->options & XT_SYNPROXY_OPT_MSS)
+ size += TCPOLEN_MSS_ALIGNED;
+ if (opts->options & XT_SYNPROXY_OPT_TIMESTAMP)
+ size += TCPOLEN_TSTAMP_ALIGNED;
+ else if (opts->options & XT_SYNPROXY_OPT_SACK_PERM)
+ size += TCPOLEN_SACKPERM_ALIGNED;
+ if (opts->options & XT_SYNPROXY_OPT_WSCALE)
+ size += TCPOLEN_WSCALE_ALIGNED;
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(synproxy_options_size);
+
+void
+synproxy_build_options(struct tcphdr *th, const struct synproxy_options *opts)
+{
+ __be32 *ptr = (__be32 *)(th + 1);
+ u8 options = opts->options;
+
+ if (options & XT_SYNPROXY_OPT_MSS)
+ *ptr++ = htonl((TCPOPT_MSS << 24) |
+ (TCPOLEN_MSS << 16) |
+ opts->mss);
+
+ if (options & XT_SYNPROXY_OPT_TIMESTAMP) {
+ if (options & XT_SYNPROXY_OPT_SACK_PERM)
+ *ptr++ = htonl((TCPOPT_SACK_PERM << 24) |
+ (TCPOLEN_SACK_PERM << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ else
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+
+ *ptr++ = htonl(opts->tsval);
+ *ptr++ = htonl(opts->tsecr);
+ } else if (options & XT_SYNPROXY_OPT_SACK_PERM)
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_SACK_PERM << 8) |
+ TCPOLEN_SACK_PERM);
+
+ if (options & XT_SYNPROXY_OPT_WSCALE)
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_WINDOW << 16) |
+ (TCPOLEN_WINDOW << 8) |
+ opts->wscale);
+}
+EXPORT_SYMBOL_GPL(synproxy_build_options);
+
+void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
+ struct synproxy_options *opts)
+{
+ opts->tsecr = opts->tsval;
+ opts->tsval = tcp_time_stamp & ~0x3f;
+
+ if (opts->options & XT_SYNPROXY_OPT_WSCALE)
+ opts->tsval |= info->wscale;
+ else
+ opts->tsval |= 0xf;
+
+ if (opts->options & XT_SYNPROXY_OPT_SACK_PERM)
+ opts->tsval |= 1 << 4;
+
+ if (opts->options & XT_SYNPROXY_OPT_ECN)
+ opts->tsval |= 1 << 5;
+}
+EXPORT_SYMBOL_GPL(synproxy_init_timestamp_cookie);
+
+void synproxy_check_timestamp_cookie(struct synproxy_options *opts)
+{
+ opts->wscale = opts->tsecr & 0xf;
+ if (opts->wscale != 0xf)
+ opts->options |= XT_SYNPROXY_OPT_WSCALE;
+
+ opts->options |= opts->tsecr & (1 << 4) ? XT_SYNPROXY_OPT_SACK_PERM : 0;
+
+ opts->options |= opts->tsecr & (1 << 5) ? XT_SYNPROXY_OPT_ECN : 0;
+}
+EXPORT_SYMBOL_GPL(synproxy_check_timestamp_cookie);
+
+unsigned int synproxy_tstamp_adjust(struct sk_buff *skb,
+ unsigned int protoff,
+ struct tcphdr *th,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct nf_conn_synproxy *synproxy)
+{
+ unsigned int optoff, optend;
+ u32 *ptr, old;
+
+ if (synproxy->tsoff == 0)
+ return 1;
+
+ optoff = protoff + sizeof(struct tcphdr);
+ optend = protoff + th->doff * 4;
+
+ if (!skb_make_writable(skb, optend))
+ return 0;
+
+ while (optoff < optend) {
+ unsigned char *op = skb->data + optoff;
+
+ switch (op[0]) {
+ case TCPOPT_EOL:
+ return 1;
+ case TCPOPT_NOP:
+ optoff++;
+ continue;
+ default:
+ if (optoff + 1 == optend ||
+ optoff + op[1] > optend ||
+ op[1] < 2)
+ return 0;
+ if (op[0] == TCPOPT_TIMESTAMP &&
+ op[1] == TCPOLEN_TIMESTAMP) {
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+ ptr = (u32 *)&op[2];
+ old = *ptr;
+ *ptr = htonl(ntohl(*ptr) -
+ synproxy->tsoff);
+ } else {
+ ptr = (u32 *)&op[6];
+ old = *ptr;
+ *ptr = htonl(ntohl(*ptr) +
+ synproxy->tsoff);
+ }
+ inet_proto_csum_replace4(&th->check, skb,
+ old, *ptr, 0);
+ return 1;
+ }
+ optoff += op[1];
+ }
+ }
+ return 1;
+}
+EXPORT_SYMBOL_GPL(synproxy_tstamp_adjust);
+
+static struct nf_ct_ext_type nf_ct_synproxy_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_synproxy),
+ .align = __alignof__(struct nf_conn_synproxy),
+ .id = NF_CT_EXT_SYNPROXY,
+};
+
+#ifdef CONFIG_PROC_FS
+static void *synproxy_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos - 1; cpu < nr_cpu_ids; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(snet->stats, cpu);
+ }
+
+ return NULL;
+}
+
+static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct synproxy_net *snet = synproxy_pernet(seq_file_net(seq));
+ int cpu;
+
+ for (cpu = *pos; cpu < nr_cpu_ids; cpu++) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return per_cpu_ptr(snet->stats, cpu);
+ }
+
+ return NULL;
+}
+
+static void synproxy_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+ return;
+}
+
+static int synproxy_cpu_seq_show(struct seq_file *seq, void *v)
+{
+ struct synproxy_stats *stats = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries\t\tsyn_received\t"
+ "cookie_invalid\tcookie_valid\t"
+ "cookie_retrans\tconn_reopened\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x\t%08x\t%08x\t%08x\t%08x\t%08x\n", 0,
+ stats->syn_received,
+ stats->cookie_invalid,
+ stats->cookie_valid,
+ stats->cookie_retrans,
+ stats->conn_reopened);
+
+ return 0;
+}
+
+static const struct seq_operations synproxy_cpu_seq_ops = {
+ .start = synproxy_cpu_seq_start,
+ .next = synproxy_cpu_seq_next,
+ .stop = synproxy_cpu_seq_stop,
+ .show = synproxy_cpu_seq_show,
+};
+
+static int synproxy_cpu_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &synproxy_cpu_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations synproxy_cpu_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = synproxy_cpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static int __net_init synproxy_proc_init(struct net *net)
+{
+ if (!proc_create("synproxy", S_IRUGO, net->proc_net_stat,
+ &synproxy_cpu_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit synproxy_proc_exit(struct net *net)
+{
+ remove_proc_entry("synproxy", net->proc_net_stat);
+}
+#else
+static int __net_init synproxy_proc_init(struct net *net)
+{
+ return 0;
+}
+
+static void __net_exit synproxy_proc_exit(struct net *net)
+{
+ return;
+}
+#endif /* CONFIG_PROC_FS */
+
+static int __net_init synproxy_net_init(struct net *net)
+{
+ struct synproxy_net *snet = synproxy_pernet(net);
+ struct nf_conntrack_tuple t;
+ struct nf_conn *ct;
+ int err = -ENOMEM;
+
+ memset(&t, 0, sizeof(t));
+ ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL);
+ if (IS_ERR(ct)) {
+ err = PTR_ERR(ct);
+ goto err1;
+ }
+
+ if (!nfct_seqadj_ext_add(ct))
+ goto err2;
+ if (!nfct_synproxy_ext_add(ct))
+ goto err2;
+ __set_bit(IPS_TEMPLATE_BIT, &ct->status);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+
+ snet->tmpl = ct;
+
+ snet->stats = alloc_percpu(struct synproxy_stats);
+ if (snet->stats == NULL)
+ goto err2;
+
+ err = synproxy_proc_init(net);
+ if (err < 0)
+ goto err3;
+
+ return 0;
+
+err3:
+ free_percpu(snet->stats);
+err2:
+ nf_conntrack_free(ct);
+err1:
+ return err;
+}
+
+static void __net_exit synproxy_net_exit(struct net *net)
+{
+ struct synproxy_net *snet = synproxy_pernet(net);
+
+ nf_conntrack_free(snet->tmpl);
+ synproxy_proc_exit(net);
+ free_percpu(snet->stats);
+}
+
+static struct pernet_operations synproxy_net_ops = {
+ .init = synproxy_net_init,
+ .exit = synproxy_net_exit,
+ .id = &synproxy_net_id,
+ .size = sizeof(struct synproxy_net),
+};
+
+static int __init synproxy_core_init(void)
+{
+ int err;
+
+ err = nf_ct_extend_register(&nf_ct_synproxy_extend);
+ if (err < 0)
+ goto err1;
+
+ err = register_pernet_subsys(&synproxy_net_ops);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ nf_ct_extend_unregister(&nf_ct_synproxy_extend);
+err1:
+ return err;
+}
+
+static void __exit synproxy_core_exit(void)
+{
+ unregister_pernet_subsys(&synproxy_net_ops);
+ nf_ct_extend_unregister(&nf_ct_synproxy_extend);
+}
+
+module_init(synproxy_core_init);
+module_exit(synproxy_core_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
deleted file mode 100644
index 474d621cbc2..00000000000
--- a/net/netfilter/nf_tproxy_core.c
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Transparent proxy support for Linux/iptables
- *
- * Copyright (c) 2006-2007 BalaBit IT Ltd.
- * Author: Balazs Scheidler, Krisztian Kovacs
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/module.h>
-
-#include <linux/net.h>
-#include <linux/if.h>
-#include <linux/netdevice.h>
-#include <net/udp.h>
-#include <net/netfilter/nf_tproxy_core.h>
-
-
-static void
-nf_tproxy_destructor(struct sk_buff *skb)
-{
- struct sock *sk = skb->sk;
-
- skb->sk = NULL;
- skb->destructor = NULL;
-
- if (sk)
- sock_put(sk);
-}
-
-/* consumes sk */
-void
-nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
-{
- /* assigning tw sockets complicates things; most
- * skb->sk->X checks would have to test sk->sk_state first */
- if (sk->sk_state == TCP_TIME_WAIT) {
- inet_twsk_put(inet_twsk(sk));
- return;
- }
-
- skb_orphan(skb);
- skb->sk = sk;
- skb->destructor = nf_tproxy_destructor;
-}
-EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
-
-static int __init nf_tproxy_init(void)
-{
- pr_info("NF_TPROXY: Transparent proxy support initialized, version 4.1.0\n");
- pr_info("NF_TPROXY: Copyright (c) 2006-2007 BalaBit IT Ltd.\n");
- return 0;
-}
-
-module_init(nf_tproxy_init);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Krisztian Kovacs");
-MODULE_DESCRIPTION("Transparent proxy support core routines");
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 962e9792e31..d92cc317bf8 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log,
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = htons(inst->group_num);
+ memset(&pmsg, 0, sizeof(pmsg));
pmsg.hw_protocol = skb->protocol;
pmsg.hook = hooknum;
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log,
if (indev && skb->dev &&
skb->mac_header != skb->network_header) {
struct nfulnl_msg_packet_hw phw;
- int len = dev_parse_header(skb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(skb, phw.hw_addr);
if (len > 0) {
phw.hw_addrlen = htons(len);
if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 971ea145ab3..95a98c8c1da 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
if (indev && entskb->dev &&
entskb->mac_header != entskb->network_header) {
struct nfqnl_msg_packet_hw phw;
- int len = dev_parse_header(entskb, phw.hw_addr);
+ int len;
+
+ memset(&phw, 0, sizeof(phw));
+ len = dev_parse_header(entskb, phw.hw_addr);
if (len) {
phw.hw_addrlen = htons(len);
if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
@@ -859,6 +862,7 @@ static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
[NFQA_MARK] = { .type = NLA_U32 },
[NFQA_PAYLOAD] = { .type = NLA_UNSPEC },
[NFQA_CT] = { .type = NLA_UNSPEC },
+ [NFQA_EXP] = { .type = NLA_UNSPEC },
};
static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
@@ -987,9 +991,14 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (entry == NULL)
return -ENOENT;
- rcu_read_lock();
- if (nfqa[NFQA_CT] && (queue->flags & NFQA_CFG_F_CONNTRACK))
+ if (nfqa[NFQA_CT]) {
ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
+ if (ct && nfqa[NFQA_EXP]) {
+ nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
+ NETLINK_CB(skb).portid,
+ nlmsg_report(nlh));
+ }
+ }
if (nfqa[NFQA_PAYLOAD]) {
u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
@@ -1002,7 +1011,6 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
if (ct)
nfqnl_ct_seq_adjust(skb, ct, ctinfo, diff);
}
- rcu_read_unlock();
if (nfqa[NFQA_MARK])
entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
diff --git a/net/netfilter/nfnetlink_queue_ct.c b/net/netfilter/nfnetlink_queue_ct.c
index ab61d66bc0b..96cac50e0d1 100644
--- a/net/netfilter/nfnetlink_queue_ct.c
+++ b/net/netfilter/nfnetlink_queue_ct.c
@@ -87,12 +87,27 @@ nla_put_failure:
void nfqnl_ct_seq_adjust(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo, int diff)
{
- struct nfq_ct_nat_hook *nfq_nat_ct;
+ struct nfq_ct_hook *nfq_ct;
- nfq_nat_ct = rcu_dereference(nfq_ct_nat_hook);
- if (nfq_nat_ct == NULL)
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
return;
if ((ct->status & IPS_NAT_MASK) && diff)
- nfq_nat_ct->seq_adjust(skb, ct, ctinfo, diff);
+ nfq_ct->seq_adjust(skb, ct, ctinfo, diff);
+}
+
+int nfqnl_attach_expect(struct nf_conn *ct, const struct nlattr *attr,
+ u32 portid, u32 report)
+{
+ struct nfq_ct_hook *nfq_ct;
+
+ if (nf_ct_is_untracked(ct))
+ return 0;
+
+ nfq_ct = rcu_dereference(nfq_ct_hook);
+ if (nfq_ct == NULL)
+ return -EOPNOTSUPP;
+
+ return nfq_ct->attach_expect(attr, ct, portid, report);
}
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index 7011c71646f..cd24290f3b2 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -52,23 +52,27 @@ tcpmss_mangle_packet(struct sk_buff *skb,
{
const struct xt_tcpmss_info *info = par->targinfo;
struct tcphdr *tcph;
- unsigned int tcplen, i;
+ int len, tcp_hdrlen;
+ unsigned int i;
__be16 oldval;
u16 newmss;
u8 *opt;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
- return XT_CONTINUE;
+ return 0;
if (!skb_make_writable(skb, skb->len))
return -1;
- tcplen = skb->len - tcphoff;
+ len = skb->len - tcphoff;
+ if (len < (int)sizeof(struct tcphdr))
+ return -1;
+
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+ tcp_hdrlen = tcph->doff * 4;
- /* Header cannot be larger than the packet */
- if (tcplen < tcph->doff*4)
+ if (len < tcp_hdrlen)
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = info->mss;
opt = (u_int8_t *)tcph;
- for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) {
- if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS &&
- opt[i+1] == TCPOLEN_MSS) {
+ for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) {
+ if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) {
u_int16_t oldmss;
oldmss = (opt[i+2] << 8) | opt[i+3];
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
}
/* There is data after the header so the option can't be added
- without moving it, and doing so may make the SYN packet
- itself too large. Accept the packet unmodified instead. */
- if (tcplen > tcph->doff*4)
+ * without moving it, and doing so may make the SYN packet
+ * itself too large. Accept the packet unmodified instead.
+ */
+ if (len > tcp_hdrlen)
return 0;
/*
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
newmss = min(newmss, (u16)1220);
opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
- memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
+ memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr));
inet_proto_csum_replace2(&tcph->check, skb,
- htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1);
+ htons(len), htons(len + TCPOLEN_MSS), 1);
opt[0] = TCPOPT_MSS;
opt[1] = TCPOLEN_MSS;
opt[2] = (newmss & 0xff00) >> 8;
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index b68fa191710..625fa1d636a 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
struct tcphdr *tcph;
u_int16_t n, o;
u_int8_t *opt;
- int len;
+ int len, tcp_hdrlen;
/* This is a fragment, no TCP header is available */
if (par->fragoff != 0)
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
return NF_DROP;
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
- if (tcph->doff * 4 > len)
+ tcp_hdrlen = tcph->doff * 4;
+
+ if (len < tcp_hdrlen)
return NF_DROP;
opt = (u_int8_t *)tcph;
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
* Walk through all TCP options - if we find some option to remove,
* set all octets to %TCPOPT_NOP and adjust checksum.
*/
- for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) {
+ for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) {
optl = optlen(opt, i);
- if (i + optl > tcp_hdrlen(skb))
+ if (i + optl > tcp_hdrlen)
break;
if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i]))
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index d7f195388f6..5d8a3a3cd5a 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -15,7 +15,9 @@
#include <linux/ip.h>
#include <net/checksum.h>
#include <net/udp.h>
+#include <net/tcp.h>
#include <net/inet_sock.h>
+#include <net/inet_hashtables.h>
#include <linux/inetdevice.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
@@ -26,13 +28,18 @@
#define XT_TPROXY_HAVE_IPV6 1
#include <net/if_inet6.h>
#include <net/addrconf.h>
+#include <net/inet6_hashtables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
-#include <net/netfilter/nf_tproxy_core.h>
#include <linux/netfilter/xt_TPROXY.h>
+enum nf_tproxy_lookup_t {
+ NFT_LOOKUP_LISTENER,
+ NFT_LOOKUP_ESTABLISHED,
+};
+
static bool tproxy_sk_is_transparent(struct sock *sk)
{
if (sk->sk_state != TCP_TIME_WAIT) {
@@ -68,6 +75,157 @@ tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
return laddr ? laddr : daddr;
}
+/*
+ * This is used when the user wants to intercept a connection matching
+ * an explicit iptables rule. In this case the sockets are assumed
+ * matching in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple, it is returned, assuming the redirection
+ * already took place and we process a packet belonging to an
+ * established connection
+ *
+ * - match: if there's a listening socket matching the redirection
+ * (e.g. on-port & on-ip of the connection), it is returned,
+ * regardless if it was bound to 0.0.0.0 or an explicit
+ * address. The reasoning is that if there's an explicit rule, it
+ * does not really matter if the listener is bound to an interface
+ * or to 0. The user already stated that he wants redirection
+ * (since he added the rule).
+ *
+ * Please note that there's an overlap between what a TPROXY target
+ * and a socket match will match. Normally if you have both rules the
+ * "socket" match will be the first one, effectively all packets
+ * belonging to established connections going through that one.
+ */
+static inline struct sock *
+nf_tproxy_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, dport,
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = inet_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n",
+ protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+
+#ifdef XT_TPROXY_HAVE_IPV6
+static inline struct sock *
+nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in,
+ const enum nf_tproxy_lookup_t lookup_type)
+{
+ struct sock *sk;
+
+ switch (protocol) {
+ case IPPROTO_TCP:
+ switch (lookup_type) {
+ case NFT_LOOKUP_LISTENER:
+ sk = inet6_lookup_listener(net, &tcp_hashinfo,
+ saddr, sport,
+ daddr, ntohs(dport),
+ in->ifindex);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ break;
+ case NFT_LOOKUP_ESTABLISHED:
+ sk = __inet6_lookup_established(net, &tcp_hashinfo,
+ saddr, sport, daddr, ntohs(dport),
+ in->ifindex);
+ break;
+ default:
+ BUG();
+ }
+ break;
+ case IPPROTO_UDP:
+ sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ if (sk) {
+ int connected = (sk->sk_state == TCP_ESTABLISHED);
+ int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr);
+
+ /* NOTE: we return listeners even if bound to
+ * 0.0.0.0, those are filtered out in
+ * xt_socket, since xt_TPROXY needs 0 bound
+ * listeners too
+ */
+ if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) ||
+ (lookup_type == NFT_LOOKUP_LISTENER && connected)) {
+ sock_put(sk);
+ sk = NULL;
+ }
+ }
+ break;
+ default:
+ WARN_ON(1);
+ sk = NULL;
+ }
+
+ pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n",
+ protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk);
+
+ return sk;
+}
+#endif
+
/**
* tproxy_handle_time_wait4 - handle IPv4 TCP TIME_WAIT reopen redirections
* @skb: The skb being processed.
@@ -117,6 +275,15 @@ tproxy_handle_time_wait4(struct sk_buff *skb, __be32 laddr, __be16 lport,
return sk;
}
+/* assign a socket to the skb -- consumes sk */
+static void
+nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
static unsigned int
tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
u_int32_t mark_mask, u_int32_t mark_value)
diff --git a/net/netfilter/xt_addrtype.c b/net/netfilter/xt_addrtype.c
index 68ff29f6086..fab6eea1bf3 100644
--- a/net/netfilter/xt_addrtype.c
+++ b/net/netfilter/xt_addrtype.c
@@ -202,7 +202,7 @@ static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
return -EINVAL;
}
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
- pr_err("ipv6 PROHIBT (THROW, NAT ..) matching not supported\n");
+ pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
return -EINVAL;
}
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index f8b71911037..06df2b9110f 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -19,12 +19,12 @@
#include <net/icmp.h>
#include <net/sock.h>
#include <net/inet_sock.h>
-#include <net/netfilter/nf_tproxy_core.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
#define XT_SOCKET_HAVE_IPV6 1
#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/inet6_hashtables.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#endif
@@ -101,6 +101,43 @@ extract_icmp4_fields(const struct sk_buff *skb,
return 0;
}
+/* "socket" match based redirection (no specific rule)
+ * ===================================================
+ *
+ * There are connections with dynamic endpoints (e.g. FTP data
+ * connection) that the user is unable to add explicit rules
+ * for. These are taken care of by a generic "socket" rule. It is
+ * assumed that the proxy application is trusted to open such
+ * connections without explicit iptables rule (except of course the
+ * generic 'socket' rule). In this case the following sockets are
+ * matched in preference order:
+ *
+ * - match: if there's a fully established connection matching the
+ * _packet_ tuple
+ *
+ * - match: if there's a non-zero bound listener (possibly with a
+ * non-local address) We don't accept zero-bound listeners, since
+ * then local services could intercept traffic going through the
+ * box.
+ */
+static struct sock *
+xt_socket_get_sock_v4(struct net *net, const u8 protocol,
+ const __be32 saddr, const __be32 daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return __inet_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp4_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+ return NULL;
+}
+
static bool
socket_match(const struct sk_buff *skb, struct xt_action_param *par,
const struct xt_socket_mtinfo1 *info)
@@ -156,9 +193,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
#endif
if (!sk)
- sk = nf_tproxy_get_sock_v4(dev_net(skb->dev), protocol,
+ sk = xt_socket_get_sock_v4(dev_net(skb->dev), protocol,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -172,7 +209,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
/* Ignore non-transparent sockets,
if XT_SOCKET_TRANSPARENT is used */
- if (info && info->flags & XT_SOCKET_TRANSPARENT)
+ if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = ((sk->sk_state != TCP_TIME_WAIT &&
inet_sk(sk)->transparent) ||
(sk->sk_state == TCP_TIME_WAIT &&
@@ -196,7 +233,11 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
static bool
socket_mt4_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
- return socket_match(skb, par, NULL);
+ static struct xt_socket_mtinfo1 xt_info_v0 = {
+ .flags = 0,
+ };
+
+ return socket_match(skb, par, &xt_info_v0);
}
static bool
@@ -261,6 +302,25 @@ extract_icmp6_fields(const struct sk_buff *skb,
return 0;
}
+static struct sock *
+xt_socket_get_sock_v6(struct net *net, const u8 protocol,
+ const struct in6_addr *saddr, const struct in6_addr *daddr,
+ const __be16 sport, const __be16 dport,
+ const struct net_device *in)
+{
+ switch (protocol) {
+ case IPPROTO_TCP:
+ return inet6_lookup(net, &tcp_hashinfo,
+ saddr, sport, daddr, dport,
+ in->ifindex);
+ case IPPROTO_UDP:
+ return udp6_lib_lookup(net, saddr, sport, daddr, dport,
+ in->ifindex);
+ }
+
+ return NULL;
+}
+
static bool
socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
{
@@ -298,9 +358,9 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
}
if (!sk)
- sk = nf_tproxy_get_sock_v6(dev_net(skb->dev), tproto,
+ sk = xt_socket_get_sock_v6(dev_net(skb->dev), tproto,
saddr, daddr, sport, dport,
- par->in, NFT_LOOKUP_ANY);
+ par->in);
if (sk) {
bool wildcard;
bool transparent = true;
@@ -314,7 +374,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par)
/* Ignore non-transparent sockets,
if XT_SOCKET_TRANSPARENT is used */
- if (info && info->flags & XT_SOCKET_TRANSPARENT)
+ if (info->flags & XT_SOCKET_TRANSPARENT)
transparent = ((sk->sk_state != TCP_TIME_WAIT &&
inet_sk(sk)->transparent) ||
(sk->sk_state == TCP_TIME_WAIT &&
diff --git a/net/netlabel/netlabel_cipso_v4.c b/net/netlabel/netlabel_cipso_v4.c
index c15042f987b..a1100640495 100644
--- a/net/netlabel/netlabel_cipso_v4.c
+++ b/net/netlabel/netlabel_cipso_v4.c
@@ -691,8 +691,8 @@ static int netlbl_cipsov4_remove_cb(struct netlbl_dom_map *entry, void *arg)
{
struct netlbl_domhsh_walk_arg *cb_arg = arg;
- if (entry->type == NETLBL_NLTYPE_CIPSOV4 &&
- entry->type_def.cipsov4->doi == cb_arg->doi)
+ if (entry->def.type == NETLBL_NLTYPE_CIPSOV4 &&
+ entry->def.cipso->doi == cb_arg->doi)
return netlbl_domhsh_remove_entry(entry, cb_arg->audit_info);
return 0;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index 6bb1d42f0fa..85d842e6e43 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -84,15 +84,15 @@ static void netlbl_domhsh_free_entry(struct rcu_head *entry)
#endif /* IPv6 */
ptr = container_of(entry, struct netlbl_dom_map, rcu);
- if (ptr->type == NETLBL_NLTYPE_ADDRSELECT) {
+ if (ptr->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_safe(iter4, tmp4,
- &ptr->type_def.addrsel->list4) {
+ &ptr->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
kfree(netlbl_domhsh_addr4_entry(iter4));
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
- &ptr->type_def.addrsel->list6) {
+ &ptr->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
kfree(netlbl_domhsh_addr6_entry(iter6));
}
@@ -213,21 +213,21 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
if (addr4 != NULL) {
struct netlbl_domaddr4_map *map4;
map4 = netlbl_domhsh_addr4_entry(addr4);
- type = map4->type;
- cipsov4 = map4->type_def.cipsov4;
+ type = map4->def.type;
+ cipsov4 = map4->def.cipso;
netlbl_af4list_audit_addr(audit_buf, 0, NULL,
addr4->addr, addr4->mask);
#if IS_ENABLED(CONFIG_IPV6)
} else if (addr6 != NULL) {
struct netlbl_domaddr6_map *map6;
map6 = netlbl_domhsh_addr6_entry(addr6);
- type = map6->type;
+ type = map6->def.type;
netlbl_af6list_audit_addr(audit_buf, 0, NULL,
&addr6->addr, &addr6->mask);
#endif /* IPv6 */
} else {
- type = entry->type;
- cipsov4 = entry->type_def.cipsov4;
+ type = entry->def.type;
+ cipsov4 = entry->def.cipso;
}
switch (type) {
case NETLBL_NLTYPE_UNLABELED:
@@ -265,26 +265,25 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
if (entry == NULL)
return -EINVAL;
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
- if (entry->type_def.cipsov4 != NULL ||
- entry->type_def.addrsel != NULL)
+ if (entry->def.cipso != NULL || entry->def.addrsel != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
- if (entry->type_def.cipsov4 == NULL)
+ if (entry->def.cipso == NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_ADDRSELECT:
- netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
+ netlbl_af4list_foreach(iter4, &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
- switch (map4->type) {
+ switch (map4->def.type) {
case NETLBL_NLTYPE_UNLABELED:
- if (map4->type_def.cipsov4 != NULL)
+ if (map4->def.cipso != NULL)
return -EINVAL;
break;
case NETLBL_NLTYPE_CIPSOV4:
- if (map4->type_def.cipsov4 == NULL)
+ if (map4->def.cipso == NULL)
return -EINVAL;
break;
default:
@@ -292,9 +291,9 @@ static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
}
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
+ netlbl_af6list_foreach(iter6, &entry->def.addrsel->list6) {
map6 = netlbl_domhsh_addr6_entry(iter6);
- switch (map6->type) {
+ switch (map6->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
default:
@@ -402,32 +401,31 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
rcu_assign_pointer(netlbl_domhsh_def, entry);
}
- if (entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+ if (entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4)
+ &entry->def.addrsel->list4)
netlbl_domhsh_audit_add(entry, iter4, NULL,
ret_val, audit_info);
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6)
+ &entry->def.addrsel->list6)
netlbl_domhsh_audit_add(entry, NULL, iter6,
ret_val, audit_info);
#endif /* IPv6 */
} else
netlbl_domhsh_audit_add(entry, NULL, NULL,
ret_val, audit_info);
- } else if (entry_old->type == NETLBL_NLTYPE_ADDRSELECT &&
- entry->type == NETLBL_NLTYPE_ADDRSELECT) {
+ } else if (entry_old->def.type == NETLBL_NLTYPE_ADDRSELECT &&
+ entry->def.type == NETLBL_NLTYPE_ADDRSELECT) {
struct list_head *old_list4;
struct list_head *old_list6;
- old_list4 = &entry_old->type_def.addrsel->list4;
- old_list6 = &entry_old->type_def.addrsel->list6;
+ old_list4 = &entry_old->def.addrsel->list4;
+ old_list6 = &entry_old->def.addrsel->list6;
/* we only allow the addition of address selectors if all of
* the selectors do not exist in the existing domain map */
- netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4)
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4)
if (netlbl_af4list_search_exact(iter4->addr,
iter4->mask,
old_list4)) {
@@ -435,8 +433,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
goto add_return;
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6)
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6)
if (netlbl_af6list_search_exact(&iter6->addr,
&iter6->mask,
old_list6)) {
@@ -446,7 +443,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
#endif /* IPv6 */
netlbl_af4list_foreach_safe(iter4, tmp4,
- &entry->type_def.addrsel->list4) {
+ &entry->def.addrsel->list4) {
netlbl_af4list_remove_entry(iter4);
iter4->valid = 1;
ret_val = netlbl_af4list_add(iter4, old_list4);
@@ -457,7 +454,7 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
}
#if IS_ENABLED(CONFIG_IPV6)
netlbl_af6list_foreach_safe(iter6, tmp6,
- &entry->type_def.addrsel->list6) {
+ &entry->def.addrsel->list6) {
netlbl_af6list_remove_entry(iter6);
iter6->valid = 1;
ret_val = netlbl_af6list_add(iter6, old_list6);
@@ -538,18 +535,18 @@ int netlbl_domhsh_remove_entry(struct netlbl_dom_map *entry,
struct netlbl_af4list *iter4;
struct netlbl_domaddr4_map *map4;
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4) {
+ &entry->def.addrsel->list4) {
map4 = netlbl_domhsh_addr4_entry(iter4);
- cipso_v4_doi_putdef(map4->type_def.cipsov4);
+ cipso_v4_doi_putdef(map4->def.cipso);
}
/* no need to check the IPv6 list since we currently
* support only unlabeled protocols for IPv6 */
break;
case NETLBL_NLTYPE_CIPSOV4:
- cipso_v4_doi_putdef(entry->type_def.cipsov4);
+ cipso_v4_doi_putdef(entry->def.cipso);
break;
}
call_rcu(&entry->rcu, netlbl_domhsh_free_entry);
@@ -590,20 +587,21 @@ int netlbl_domhsh_remove_af4(const char *domain,
entry_map = netlbl_domhsh_search(domain);
else
entry_map = netlbl_domhsh_search_def(domain);
- if (entry_map == NULL || entry_map->type != NETLBL_NLTYPE_ADDRSELECT)
+ if (entry_map == NULL ||
+ entry_map->def.type != NETLBL_NLTYPE_ADDRSELECT)
goto remove_af4_failure;
spin_lock(&netlbl_domhsh_lock);
entry_addr = netlbl_af4list_remove(addr->s_addr, mask->s_addr,
- &entry_map->type_def.addrsel->list4);
+ &entry_map->def.addrsel->list4);
spin_unlock(&netlbl_domhsh_lock);
if (entry_addr == NULL)
goto remove_af4_failure;
- netlbl_af4list_foreach_rcu(iter4, &entry_map->type_def.addrsel->list4)
+ netlbl_af4list_foreach_rcu(iter4, &entry_map->def.addrsel->list4)
goto remove_af4_single_addr;
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6, &entry_map->type_def.addrsel->list6)
+ netlbl_af6list_foreach_rcu(iter6, &entry_map->def.addrsel->list6)
goto remove_af4_single_addr;
#endif /* IPv6 */
/* the domain mapping is empty so remove it from the mapping table */
@@ -616,7 +614,7 @@ remove_af4_single_addr:
* shouldn't be a problem */
synchronize_rcu();
entry = netlbl_domhsh_addr4_entry(entry_addr);
- cipso_v4_doi_putdef(entry->type_def.cipsov4);
+ cipso_v4_doi_putdef(entry->def.cipso);
kfree(entry);
return 0;
@@ -693,8 +691,8 @@ struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain)
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
-struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
- __be32 addr)
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
+ __be32 addr)
{
struct netlbl_dom_map *dom_iter;
struct netlbl_af4list *addr_iter;
@@ -702,15 +700,13 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
- if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
- return NULL;
- addr_iter = netlbl_af4list_search(addr,
- &dom_iter->type_def.addrsel->list4);
+ if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
+ return &dom_iter->def;
+ addr_iter = netlbl_af4list_search(addr, &dom_iter->def.addrsel->list4);
if (addr_iter == NULL)
return NULL;
-
- return netlbl_domhsh_addr4_entry(addr_iter);
+ return &(netlbl_domhsh_addr4_entry(addr_iter)->def);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -725,7 +721,7 @@ struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
* responsible for ensuring that rcu_read_[un]lock() is called.
*
*/
-struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
const struct in6_addr *addr)
{
struct netlbl_dom_map *dom_iter;
@@ -734,15 +730,13 @@ struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
dom_iter = netlbl_domhsh_search_def(domain);
if (dom_iter == NULL)
return NULL;
- if (dom_iter->type != NETLBL_NLTYPE_ADDRSELECT)
- return NULL;
- addr_iter = netlbl_af6list_search(addr,
- &dom_iter->type_def.addrsel->list6);
+ if (dom_iter->def.type != NETLBL_NLTYPE_ADDRSELECT)
+ return &dom_iter->def;
+ addr_iter = netlbl_af6list_search(addr, &dom_iter->def.addrsel->list6);
if (addr_iter == NULL)
return NULL;
-
- return netlbl_domhsh_addr6_entry(addr_iter);
+ return &(netlbl_domhsh_addr6_entry(addr_iter)->def);
}
#endif /* IPv6 */
diff --git a/net/netlabel/netlabel_domainhash.h b/net/netlabel/netlabel_domainhash.h
index 90872c4ca30..b9be0eed898 100644
--- a/net/netlabel/netlabel_domainhash.h
+++ b/net/netlabel/netlabel_domainhash.h
@@ -43,37 +43,35 @@
#define NETLBL_DOMHSH_BITSIZE 7
/* Domain mapping definition structures */
+struct netlbl_domaddr_map {
+ struct list_head list4;
+ struct list_head list6;
+};
+struct netlbl_dommap_def {
+ u32 type;
+ union {
+ struct netlbl_domaddr_map *addrsel;
+ struct cipso_v4_doi *cipso;
+ };
+};
#define netlbl_domhsh_addr4_entry(iter) \
container_of(iter, struct netlbl_domaddr4_map, list)
struct netlbl_domaddr4_map {
- u32 type;
- union {
- struct cipso_v4_doi *cipsov4;
- } type_def;
+ struct netlbl_dommap_def def;
struct netlbl_af4list list;
};
#define netlbl_domhsh_addr6_entry(iter) \
container_of(iter, struct netlbl_domaddr6_map, list)
struct netlbl_domaddr6_map {
- u32 type;
-
- /* NOTE: no 'type_def' union needed at present since we don't currently
- * support any IPv6 labeling protocols */
+ struct netlbl_dommap_def def;
struct netlbl_af6list list;
};
-struct netlbl_domaddr_map {
- struct list_head list4;
- struct list_head list6;
-};
+
struct netlbl_dom_map {
char *domain;
- u32 type;
- union {
- struct cipso_v4_doi *cipsov4;
- struct netlbl_domaddr_map *addrsel;
- } type_def;
+ struct netlbl_dommap_def def;
u32 valid;
struct list_head list;
@@ -97,16 +95,16 @@ int netlbl_domhsh_remove_af4(const char *domain,
int netlbl_domhsh_remove(const char *domain, struct netlbl_audit *audit_info);
int netlbl_domhsh_remove_default(struct netlbl_audit *audit_info);
struct netlbl_dom_map *netlbl_domhsh_getentry(const char *domain);
-struct netlbl_domaddr4_map *netlbl_domhsh_getentry_af4(const char *domain,
- __be32 addr);
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af4(const char *domain,
+ __be32 addr);
+#if IS_ENABLED(CONFIG_IPV6)
+struct netlbl_dommap_def *netlbl_domhsh_getentry_af6(const char *domain,
+ const struct in6_addr *addr);
+#endif /* IPv6 */
+
int netlbl_domhsh_walk(u32 *skip_bkt,
u32 *skip_chain,
int (*callback) (struct netlbl_dom_map *entry, void *arg),
void *cb_arg);
-#if IS_ENABLED(CONFIG_IPV6)
-struct netlbl_domaddr6_map *netlbl_domhsh_getentry_af6(const char *domain,
- const struct in6_addr *addr);
-#endif /* IPv6 */
-
#endif
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index 7c94aedd091..96a458e12f6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -122,7 +122,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
}
if (addr == NULL && mask == NULL)
- entry->type = NETLBL_NLTYPE_UNLABELED;
+ entry->def.type = NETLBL_NLTYPE_UNLABELED;
else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@@ -137,7 +137,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
- map4->type = NETLBL_NLTYPE_UNLABELED;
+ map4->def.type = NETLBL_NLTYPE_UNLABELED;
map4->list.addr = addr4->s_addr & mask4->s_addr;
map4->list.mask = mask4->s_addr;
map4->list.valid = 1;
@@ -154,7 +154,7 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
- map6->type = NETLBL_NLTYPE_UNLABELED;
+ map6->def.type = NETLBL_NLTYPE_UNLABELED;
map6->list.addr = *addr6;
map6->list.addr.s6_addr32[0] &= mask6->s6_addr32[0];
map6->list.addr.s6_addr32[1] &= mask6->s6_addr32[1];
@@ -174,8 +174,8 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
break;
}
- entry->type_def.addrsel = addrmap;
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto cfg_unlbl_map_add_failure;
@@ -355,8 +355,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
}
if (addr == NULL && mask == NULL) {
- entry->type_def.cipsov4 = doi_def;
- entry->type = NETLBL_NLTYPE_CIPSOV4;
+ entry->def.cipso = doi_def;
+ entry->def.type = NETLBL_NLTYPE_CIPSOV4;
} else if (addr != NULL && mask != NULL) {
addrmap = kzalloc(sizeof(*addrmap), GFP_ATOMIC);
if (addrmap == NULL)
@@ -367,8 +367,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
addrinfo = kzalloc(sizeof(*addrinfo), GFP_ATOMIC);
if (addrinfo == NULL)
goto out_addrinfo;
- addrinfo->type_def.cipsov4 = doi_def;
- addrinfo->type = NETLBL_NLTYPE_CIPSOV4;
+ addrinfo->def.cipso = doi_def;
+ addrinfo->def.type = NETLBL_NLTYPE_CIPSOV4;
addrinfo->list.addr = addr->s_addr & mask->s_addr;
addrinfo->list.mask = mask->s_addr;
addrinfo->list.valid = 1;
@@ -376,8 +376,8 @@ int netlbl_cfg_cipsov4_map_add(u32 doi,
if (ret_val != 0)
goto cfg_cipsov4_map_add_failure;
- entry->type_def.addrsel = addrmap;
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
} else {
ret_val = -EINVAL;
goto out_addrmap;
@@ -657,14 +657,14 @@ int netlbl_sock_setattr(struct sock *sk,
}
switch (family) {
case AF_INET:
- switch (dom_entry->type) {
+ switch (dom_entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
ret_val = -EDESTADDRREQ;
break;
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
- dom_entry->type_def.cipsov4,
- secattr);
+ dom_entry->def.cipso,
+ secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
ret_val = 0;
@@ -754,23 +754,22 @@ int netlbl_conn_setattr(struct sock *sk,
{
int ret_val;
struct sockaddr_in *addr4;
- struct netlbl_domaddr4_map *af4_entry;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (addr->sa_family) {
case AF_INET:
addr4 = (struct sockaddr_in *)addr;
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- addr4->sin_addr.s_addr);
- if (af4_entry == NULL) {
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,
+ addr4->sin_addr.s_addr);
+ if (entry == NULL) {
ret_val = -ENOENT;
goto conn_setattr_return;
}
- switch (af4_entry->type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = cipso_v4_sock_setattr(sk,
- af4_entry->type_def.cipsov4,
- secattr);
+ entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@@ -812,36 +811,21 @@ int netlbl_req_setattr(struct request_sock *req,
const struct netlbl_lsm_secattr *secattr)
{
int ret_val;
- struct netlbl_dom_map *dom_entry;
- struct netlbl_domaddr4_map *af4_entry;
- u32 proto_type;
- struct cipso_v4_doi *proto_cv4;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
- dom_entry = netlbl_domhsh_getentry(secattr->domain);
- if (dom_entry == NULL) {
- ret_val = -ENOENT;
- goto req_setattr_return;
- }
switch (req->rsk_ops->family) {
case AF_INET:
- if (dom_entry->type == NETLBL_NLTYPE_ADDRSELECT) {
- struct inet_request_sock *req_inet = inet_rsk(req);
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- req_inet->rmt_addr);
- if (af4_entry == NULL) {
- ret_val = -ENOENT;
- goto req_setattr_return;
- }
- proto_type = af4_entry->type;
- proto_cv4 = af4_entry->type_def.cipsov4;
- } else {
- proto_type = dom_entry->type;
- proto_cv4 = dom_entry->type_def.cipsov4;
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,
+ inet_rsk(req)->rmt_addr);
+ if (entry == NULL) {
+ ret_val = -ENOENT;
+ goto req_setattr_return;
}
- switch (proto_type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = cipso_v4_req_setattr(req, proto_cv4, secattr);
+ ret_val = cipso_v4_req_setattr(req,
+ entry->cipso, secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
@@ -899,23 +883,21 @@ int netlbl_skbuff_setattr(struct sk_buff *skb,
{
int ret_val;
struct iphdr *hdr4;
- struct netlbl_domaddr4_map *af4_entry;
+ struct netlbl_dommap_def *entry;
rcu_read_lock();
switch (family) {
case AF_INET:
hdr4 = ip_hdr(skb);
- af4_entry = netlbl_domhsh_getentry_af4(secattr->domain,
- hdr4->daddr);
- if (af4_entry == NULL) {
+ entry = netlbl_domhsh_getentry_af4(secattr->domain,hdr4->daddr);
+ if (entry == NULL) {
ret_val = -ENOENT;
goto skbuff_setattr_return;
}
- switch (af4_entry->type) {
+ switch (entry->type) {
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = cipso_v4_skbuff_setattr(skb,
- af4_entry->type_def.cipsov4,
- secattr);
+ ret_val = cipso_v4_skbuff_setattr(skb, entry->cipso,
+ secattr);
break;
case NETLBL_NLTYPE_UNLABELED:
/* just delete the protocols we support for right now
diff --git a/net/netlabel/netlabel_mgmt.c b/net/netlabel/netlabel_mgmt.c
index c5384ffc614..dd1c37d7acb 100644
--- a/net/netlabel/netlabel_mgmt.c
+++ b/net/netlabel/netlabel_mgmt.c
@@ -104,7 +104,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
ret_val = -ENOMEM;
goto add_failure;
}
- entry->type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
+ entry->def.type = nla_get_u32(info->attrs[NLBL_MGMT_A_PROTOCOL]);
if (info->attrs[NLBL_MGMT_A_DOMAIN]) {
size_t tmp_size = nla_len(info->attrs[NLBL_MGMT_A_DOMAIN]);
entry->domain = kmalloc(tmp_size, GFP_KERNEL);
@@ -116,12 +116,12 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
info->attrs[NLBL_MGMT_A_DOMAIN], tmp_size);
}
- /* NOTE: internally we allow/use a entry->type value of
+ /* NOTE: internally we allow/use a entry->def.type value of
* NETLBL_NLTYPE_ADDRSELECT but we don't currently allow users
* to pass that as a protocol value because we need to know the
* "real" protocol */
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_UNLABELED:
break;
case NETLBL_NLTYPE_CIPSOV4:
@@ -132,7 +132,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
cipsov4 = cipso_v4_doi_getdef(tmp_val);
if (cipsov4 == NULL)
goto add_failure;
- entry->type_def.cipsov4 = cipsov4;
+ entry->def.cipso = cipsov4;
break;
default:
goto add_failure;
@@ -172,9 +172,9 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr = addr->s_addr & mask->s_addr;
map->list.mask = mask->s_addr;
map->list.valid = 1;
- map->type = entry->type;
+ map->def.type = entry->def.type;
if (cipsov4)
- map->type_def.cipsov4 = cipsov4;
+ map->def.cipso = cipsov4;
ret_val = netlbl_af4list_add(&map->list, &addrmap->list4);
if (ret_val != 0) {
@@ -182,8 +182,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
- entry->type_def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
#if IS_ENABLED(CONFIG_IPV6)
} else if (info->attrs[NLBL_MGMT_A_IPV6ADDR]) {
struct in6_addr *addr;
@@ -223,7 +223,7 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
map->list.addr.s6_addr32[3] &= mask->s6_addr32[3];
map->list.mask = *mask;
map->list.valid = 1;
- map->type = entry->type;
+ map->def.type = entry->def.type;
ret_val = netlbl_af6list_add(&map->list, &addrmap->list6);
if (ret_val != 0) {
@@ -231,8 +231,8 @@ static int netlbl_mgmt_add_common(struct genl_info *info,
goto add_failure;
}
- entry->type = NETLBL_NLTYPE_ADDRSELECT;
- entry->type_def.addrsel = addrmap;
+ entry->def.type = NETLBL_NLTYPE_ADDRSELECT;
+ entry->def.addrsel = addrmap;
#endif /* IPv6 */
}
@@ -281,14 +281,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
}
- switch (entry->type) {
+ switch (entry->def.type) {
case NETLBL_NLTYPE_ADDRSELECT:
nla_a = nla_nest_start(skb, NLBL_MGMT_A_SELECTORLIST);
if (nla_a == NULL)
return -ENOMEM;
- netlbl_af4list_foreach_rcu(iter4,
- &entry->type_def.addrsel->list4) {
+ netlbl_af4list_foreach_rcu(iter4, &entry->def.addrsel->list4) {
struct netlbl_domaddr4_map *map4;
struct in_addr addr_struct;
@@ -310,13 +309,13 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map4 = netlbl_domhsh_addr4_entry(iter4);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
- map4->type);
+ map4->def.type);
if (ret_val != 0)
return ret_val;
- switch (map4->type) {
+ switch (map4->def.type) {
case NETLBL_NLTYPE_CIPSOV4:
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
- map4->type_def.cipsov4->doi);
+ map4->def.cipso->doi);
if (ret_val != 0)
return ret_val;
break;
@@ -325,8 +324,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_b);
}
#if IS_ENABLED(CONFIG_IPV6)
- netlbl_af6list_foreach_rcu(iter6,
- &entry->type_def.addrsel->list6) {
+ netlbl_af6list_foreach_rcu(iter6, &entry->def.addrsel->list6) {
struct netlbl_domaddr6_map *map6;
nla_b = nla_nest_start(skb, NLBL_MGMT_A_ADDRSELECTOR);
@@ -345,7 +343,7 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
return ret_val;
map6 = netlbl_domhsh_addr6_entry(iter6);
ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL,
- map6->type);
+ map6->def.type);
if (ret_val != 0)
return ret_val;
@@ -356,14 +354,14 @@ static int netlbl_mgmt_listentry(struct sk_buff *skb,
nla_nest_end(skb, nla_a);
break;
case NETLBL_NLTYPE_UNLABELED:
- ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
+ ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
break;
case NETLBL_NLTYPE_CIPSOV4:
- ret_val = nla_put_u32(skb, NLBL_MGMT_A_PROTOCOL, entry->type);
+ ret_val = nla_put_u32(skb,NLBL_MGMT_A_PROTOCOL,entry->def.type);
if (ret_val != 0)
return ret_val;
ret_val = nla_put_u32(skb, NLBL_MGMT_A_CV4DOI,
- entry->type_def.cipsov4->doi);
+ entry->def.cipso->doi);
break;
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index af3531926ee..8f0897407a2 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -1541,7 +1541,7 @@ int __init netlbl_unlabel_defconf(void)
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (entry == NULL)
return -ENOMEM;
- entry->type = NETLBL_NLTYPE_UNLABELED;
+ entry->def.type = NETLBL_NLTYPE_UNLABELED;
ret_val = netlbl_domhsh_add_default(entry, &audit_info);
if (ret_val != 0)
return ret_val;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 0c61b59175d..a17dda1bbee 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -294,14 +294,14 @@ static void **alloc_pg_vec(struct netlink_sock *nlk,
{
unsigned int block_nr = req->nm_block_nr;
unsigned int i;
- void **pg_vec, *ptr;
+ void **pg_vec;
pg_vec = kcalloc(block_nr, sizeof(void *), GFP_KERNEL);
if (pg_vec == NULL)
return NULL;
for (i = 0; i < block_nr; i++) {
- pg_vec[i] = ptr = alloc_one_pg_vec_page(order);
+ pg_vec[i] = alloc_one_pg_vec_page(order);
if (pg_vec[i] == NULL)
goto err1;
}
@@ -595,7 +595,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
* for dumps is performed here. A dump is allowed to continue
* if at least half the ring is unused.
*/
- while (nlk->cb != NULL && netlink_dump_space(nlk)) {
+ while (nlk->cb_running && netlink_dump_space(nlk)) {
err = netlink_dump(sk);
if (err < 0) {
sk->sk_err = err;
@@ -802,18 +802,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
#define netlink_mmap_sendmsg(sk, msg, dst_portid, dst_group, siocb) 0
#endif /* CONFIG_NETLINK_MMAP */
-static void netlink_destroy_callback(struct netlink_callback *cb)
-{
- kfree_skb(cb->skb);
- kfree(cb);
-}
-
-static void netlink_consume_callback(struct netlink_callback *cb)
-{
- consume_skb(cb->skb);
- kfree(cb);
-}
-
static void netlink_skb_destructor(struct sk_buff *skb)
{
#ifdef CONFIG_NETLINK_MMAP
@@ -872,12 +860,12 @@ static void netlink_sock_destruct(struct sock *sk)
{
struct netlink_sock *nlk = nlk_sk(sk);
- if (nlk->cb) {
- if (nlk->cb->done)
- nlk->cb->done(nlk->cb);
+ if (nlk->cb_running) {
+ if (nlk->cb.done)
+ nlk->cb.done(&nlk->cb);
- module_put(nlk->cb->module);
- netlink_destroy_callback(nlk->cb);
+ module_put(nlk->cb.module);
+ kfree_skb(nlk->cb.skb);
}
skb_queue_purge(&sk->sk_receive_queue);
@@ -2350,7 +2338,8 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
skb_free_datagram(sk, skb);
- if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
+ if (nlk->cb_running &&
+ atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
ret = netlink_dump(sk);
if (ret) {
sk->sk_err = ret;
@@ -2566,13 +2555,12 @@ static int netlink_dump(struct sock *sk)
int alloc_size;
mutex_lock(nlk->cb_mutex);
-
- cb = nlk->cb;
- if (cb == NULL) {
+ if (!nlk->cb_running) {
err = -EINVAL;
goto errout_skb;
}
+ cb = &nlk->cb;
alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
if (!netlink_rx_is_mmaped(sk) &&
@@ -2610,11 +2598,11 @@ static int netlink_dump(struct sock *sk)
if (cb->done)
cb->done(cb);
- nlk->cb = NULL;
- mutex_unlock(nlk->cb_mutex);
+ nlk->cb_running = false;
+ mutex_unlock(nlk->cb_mutex);
module_put(cb->module);
- netlink_consume_callback(cb);
+ consume_skb(cb->skb);
return 0;
errout_skb:
@@ -2632,59 +2620,51 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct netlink_sock *nlk;
int ret;
- cb = kzalloc(sizeof(*cb), GFP_KERNEL);
- if (cb == NULL)
- return -ENOBUFS;
-
/* Memory mapped dump requests need to be copied to avoid looping
* on the pending state in netlink_mmap_sendmsg() while the CB hold
* a reference to the skb.
*/
if (netlink_skb_is_mmaped(skb)) {
skb = skb_copy(skb, GFP_KERNEL);
- if (skb == NULL) {
- kfree(cb);
+ if (skb == NULL)
return -ENOBUFS;
- }
} else
atomic_inc(&skb->users);
- cb->dump = control->dump;
- cb->done = control->done;
- cb->nlh = nlh;
- cb->data = control->data;
- cb->module = control->module;
- cb->min_dump_alloc = control->min_dump_alloc;
- cb->skb = skb;
-
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
- netlink_destroy_callback(cb);
- return -ECONNREFUSED;
+ ret = -ECONNREFUSED;
+ goto error_free;
}
- nlk = nlk_sk(sk);
+ nlk = nlk_sk(sk);
mutex_lock(nlk->cb_mutex);
/* A dump is in progress... */
- if (nlk->cb) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (nlk->cb_running) {
ret = -EBUSY;
- goto out;
+ goto error_unlock;
}
/* add reference of module which cb->dump belongs to */
- if (!try_module_get(cb->module)) {
- mutex_unlock(nlk->cb_mutex);
- netlink_destroy_callback(cb);
+ if (!try_module_get(control->module)) {
ret = -EPROTONOSUPPORT;
- goto out;
+ goto error_unlock;
}
- nlk->cb = cb;
+ cb = &nlk->cb;
+ memset(cb, 0, sizeof(*cb));
+ cb->dump = control->dump;
+ cb->done = control->done;
+ cb->nlh = nlh;
+ cb->data = control->data;
+ cb->module = control->module;
+ cb->min_dump_alloc = control->min_dump_alloc;
+ cb->skb = skb;
+
+ nlk->cb_running = true;
+
mutex_unlock(nlk->cb_mutex);
ret = netlink_dump(sk);
-out:
sock_put(sk);
if (ret)
@@ -2694,6 +2674,13 @@ out:
* signal not to send ACK even if it was requested.
*/
return -EINTR;
+
+error_unlock:
+ sock_put(sk);
+ mutex_unlock(nlk->cb_mutex);
+error_free:
+ kfree_skb(skb);
+ return ret;
}
EXPORT_SYMBOL(__netlink_dump_start);
@@ -2916,14 +2903,14 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
struct sock *s = v;
struct netlink_sock *nlk = nlk_sk(s);
- seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
+ seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %d %-8d %-8d %-8lu\n",
s,
s->sk_protocol,
nlk->portid,
nlk->groups ? (u32)nlk->groups[0] : 0,
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
- nlk->cb,
+ nlk->cb_running,
atomic_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index eaa88d187cd..acbd774eeb7 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -32,7 +32,8 @@ struct netlink_sock {
unsigned long *groups;
unsigned long state;
wait_queue_head_t wait;
- struct netlink_callback *cb;
+ bool cb_running;
+ struct netlink_callback cb;
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 2fd6dbea327..0c741cec4d0 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -364,7 +364,7 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
EXPORT_SYMBOL(genl_unregister_ops);
/**
- * genl_register_family - register a generic netlink family
+ * __genl_register_family - register a generic netlink family
* @family: generic netlink family
*
* Registers the specified family after validating it first. Only one
@@ -374,7 +374,7 @@ EXPORT_SYMBOL(genl_unregister_ops);
*
* Return 0 on success or a negative error code.
*/
-int genl_register_family(struct genl_family *family)
+int __genl_register_family(struct genl_family *family)
{
int err = -EINVAL;
@@ -430,10 +430,10 @@ errout_locked:
errout:
return err;
}
-EXPORT_SYMBOL(genl_register_family);
+EXPORT_SYMBOL(__genl_register_family);
/**
- * genl_register_family_with_ops - register a generic netlink family
+ * __genl_register_family_with_ops - register a generic netlink family
* @family: generic netlink family
* @ops: operations to be registered
* @n_ops: number of elements to register
@@ -457,12 +457,12 @@ EXPORT_SYMBOL(genl_register_family);
*
* Return 0 on success or a negative error code.
*/
-int genl_register_family_with_ops(struct genl_family *family,
+int __genl_register_family_with_ops(struct genl_family *family,
struct genl_ops *ops, size_t n_ops)
{
int err, i;
- err = genl_register_family(family);
+ err = __genl_register_family(family);
if (err)
return err;
@@ -476,7 +476,7 @@ err_out:
genl_unregister_family(family);
return err;
}
-EXPORT_SYMBOL(genl_register_family_with_ops);
+EXPORT_SYMBOL(__genl_register_family_with_ops);
/**
* genl_unregister_family - unregister generic netlink family
@@ -544,6 +544,30 @@ void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
}
EXPORT_SYMBOL(genlmsg_put);
+static int genl_lock_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct genl_ops *ops = cb->data;
+ int rc;
+
+ genl_lock();
+ rc = ops->dumpit(skb, cb);
+ genl_unlock();
+ return rc;
+}
+
+static int genl_lock_done(struct netlink_callback *cb)
+{
+ struct genl_ops *ops = cb->data;
+ int rc = 0;
+
+ if (ops->done) {
+ genl_lock();
+ rc = ops->done(cb);
+ genl_unlock();
+ }
+ return rc;
+}
+
static int genl_family_rcv_msg(struct genl_family *family,
struct sk_buff *skb,
struct nlmsghdr *nlh)
@@ -571,16 +595,35 @@ static int genl_family_rcv_msg(struct genl_family *family,
!capable(CAP_NET_ADMIN))
return -EPERM;
- if (nlh->nlmsg_flags & NLM_F_DUMP) {
- struct netlink_dump_control c = {
- .dump = ops->dumpit,
- .done = ops->done,
- };
+ if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
+ int rc;
if (ops->dumpit == NULL)
return -EOPNOTSUPP;
- return netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ if (!family->parallel_ops) {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .data = ops,
+ .dump = genl_lock_dumpit,
+ .done = genl_lock_done,
+ };
+
+ genl_unlock();
+ rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ genl_lock();
+
+ } else {
+ struct netlink_dump_control c = {
+ .module = family->module,
+ .dump = ops->dumpit,
+ .done = ops->done,
+ };
+
+ rc = __netlink_dump_start(net->genl_sock, skb, nlh, &c);
+ }
+
+ return rc;
}
if (ops->doit == NULL)
@@ -877,8 +920,10 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
#ifdef CONFIG_MODULES
if (res == NULL) {
genl_unlock();
+ up_read(&cb_lock);
request_module("net-pf-%d-proto-%d-family-%s",
PF_NETLINK, NETLINK_GENERIC, name);
+ down_read(&cb_lock);
genl_lock();
res = genl_family_find_byname(name);
}
diff --git a/net/nfc/core.c b/net/nfc/core.c
index dc96a83aa6a..e92923cf3e0 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -44,7 +44,7 @@ DEFINE_MUTEX(nfc_devlist_mutex);
/* NFC device ID bitmap */
static DEFINE_IDA(nfc_index_ida);
-int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
+int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name)
{
int rc = 0;
@@ -62,28 +62,36 @@ int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name)
goto error;
}
- if (!dev->ops->fw_upload) {
+ if (!dev->ops->fw_download) {
rc = -EOPNOTSUPP;
goto error;
}
- dev->fw_upload_in_progress = true;
- rc = dev->ops->fw_upload(dev, firmware_name);
+ dev->fw_download_in_progress = true;
+ rc = dev->ops->fw_download(dev, firmware_name);
if (rc)
- dev->fw_upload_in_progress = false;
+ dev->fw_download_in_progress = false;
error:
device_unlock(&dev->dev);
return rc;
}
-int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+/**
+ * nfc_fw_download_done - inform that a firmware download was completed
+ *
+ * @dev: The nfc device to which firmware was downloaded
+ * @firmware_name: The firmware filename
+ * @result: The positive value of a standard errno value
+ */
+int nfc_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result)
{
- dev->fw_upload_in_progress = false;
+ dev->fw_download_in_progress = false;
- return nfc_genl_fw_upload_done(dev, firmware_name);
+ return nfc_genl_fw_download_done(dev, firmware_name, result);
}
-EXPORT_SYMBOL(nfc_fw_upload_done);
+EXPORT_SYMBOL(nfc_fw_download_done);
/**
* nfc_dev_up - turn on the NFC device
@@ -110,7 +118,7 @@ int nfc_dev_up(struct nfc_dev *dev)
goto error;
}
- if (dev->fw_upload_in_progress) {
+ if (dev->fw_download_in_progress) {
rc = -EBUSY;
goto error;
}
@@ -129,7 +137,7 @@ int nfc_dev_up(struct nfc_dev *dev)
/* We have to enable the device before discovering SEs */
if (dev->ops->discover_se) {
rc = dev->ops->discover_se(dev);
- if (!rc)
+ if (rc)
pr_warn("SE discovery failed\n");
}
@@ -575,12 +583,14 @@ int nfc_enable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- if (se->type == NFC_SE_ENABLED) {
+ if (se->state == NFC_SE_ENABLED) {
rc = -EALREADY;
goto error;
}
rc = dev->ops->enable_se(dev, se_idx);
+ if (rc >= 0)
+ se->state = NFC_SE_ENABLED;
error:
device_unlock(&dev->dev);
@@ -618,12 +628,14 @@ int nfc_disable_se(struct nfc_dev *dev, u32 se_idx)
goto error;
}
- if (se->type == NFC_SE_DISABLED) {
+ if (se->state == NFC_SE_DISABLED) {
rc = -EALREADY;
goto error;
}
rc = dev->ops->disable_se(dev, se_idx);
+ if (rc >= 0)
+ se->state = NFC_SE_DISABLED;
error:
device_unlock(&dev->dev);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 7b1c186736e..d07ca4c5cf8 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -717,7 +717,7 @@ static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
if (hdev->ops->disable_se)
- return hdev->ops->enable_se(hdev, se_idx);
+ return hdev->ops->disable_se(hdev, se_idx);
return 0;
}
@@ -809,14 +809,14 @@ static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
}
}
-static int hci_fw_upload(struct nfc_dev *nfc_dev, const char *firmware_name)
+static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
{
struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
- if (!hdev->ops->fw_upload)
+ if (!hdev->ops->fw_download)
return -ENOTSUPP;
- return hdev->ops->fw_upload(hdev, firmware_name);
+ return hdev->ops->fw_download(hdev, firmware_name);
}
static struct nfc_ops hci_nfc_ops = {
@@ -831,7 +831,7 @@ static struct nfc_ops hci_nfc_ops = {
.im_transceive = hci_transceive,
.tm_send = hci_tm_send,
.check_presence = hci_check_presence,
- .fw_upload = hci_fw_upload,
+ .fw_download = hci_fw_download,
.discover_se = hci_discover_se,
.enable_se = hci_enable_se,
.disable_se = hci_disable_se,
diff --git a/net/nfc/nci/Kconfig b/net/nfc/nci/Kconfig
index 2a2416080b4..a4f1e42e348 100644
--- a/net/nfc/nci/Kconfig
+++ b/net/nfc/nci/Kconfig
@@ -11,6 +11,7 @@ config NFC_NCI
config NFC_NCI_SPI
depends on NFC_NCI && SPI
+ select CRC_CCITT
bool "NCI over SPI protocol support"
default n
help
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index b05ad909778..68063b2025d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1089,7 +1089,7 @@ exit:
return rc;
}
-static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
+static int nfc_genl_fw_download(struct sk_buff *skb, struct genl_info *info)
{
struct nfc_dev *dev;
int rc;
@@ -1108,13 +1108,14 @@ static int nfc_genl_fw_upload(struct sk_buff *skb, struct genl_info *info)
nla_strlcpy(firmware_name, info->attrs[NFC_ATTR_FIRMWARE_NAME],
sizeof(firmware_name));
- rc = nfc_fw_upload(dev, firmware_name);
+ rc = nfc_fw_download(dev, firmware_name);
nfc_put_device(dev);
return rc;
}
-int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result)
{
struct sk_buff *msg;
void *hdr;
@@ -1124,11 +1125,12 @@ int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name)
return -ENOMEM;
hdr = genlmsg_put(msg, 0, 0, &nfc_genl_family, 0,
- NFC_CMD_FW_UPLOAD);
+ NFC_CMD_FW_DOWNLOAD);
if (!hdr)
goto free_msg;
if (nla_put_string(msg, NFC_ATTR_FIRMWARE_NAME, firmware_name) ||
+ nla_put_u32(msg, NFC_ATTR_FIRMWARE_DOWNLOAD_STATUS, result) ||
nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx))
goto nla_put_failure;
@@ -1191,6 +1193,91 @@ static int nfc_genl_disable_se(struct sk_buff *skb, struct genl_info *info)
return rc;
}
+static int nfc_genl_send_se(struct sk_buff *msg, struct nfc_dev *dev,
+ u32 portid, u32 seq,
+ struct netlink_callback *cb,
+ int flags)
+{
+ void *hdr;
+ struct nfc_se *se, *n;
+
+ list_for_each_entry_safe(se, n, &dev->secure_elements, list) {
+ hdr = genlmsg_put(msg, portid, seq, &nfc_genl_family, flags,
+ NFC_CMD_GET_SE);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (cb)
+ genl_dump_check_consistent(cb, hdr, &nfc_genl_family);
+
+ if (nla_put_u32(msg, NFC_ATTR_DEVICE_INDEX, dev->idx) ||
+ nla_put_u32(msg, NFC_ATTR_SE_INDEX, se->idx) ||
+ nla_put_u8(msg, NFC_ATTR_SE_TYPE, se->type))
+ goto nla_put_failure;
+
+ if (genlmsg_end(msg, hdr) < 0)
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int nfc_genl_dump_ses(struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+ struct nfc_dev *dev = (struct nfc_dev *) cb->args[1];
+ bool first_call = false;
+
+ if (!iter) {
+ first_call = true;
+ iter = kmalloc(sizeof(struct class_dev_iter), GFP_KERNEL);
+ if (!iter)
+ return -ENOMEM;
+ cb->args[0] = (long) iter;
+ }
+
+ mutex_lock(&nfc_devlist_mutex);
+
+ cb->seq = nfc_devlist_generation;
+
+ if (first_call) {
+ nfc_device_iter_init(iter);
+ dev = nfc_device_iter_next(iter);
+ }
+
+ while (dev) {
+ int rc;
+
+ rc = nfc_genl_send_se(skb, dev, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, cb, NLM_F_MULTI);
+ if (rc < 0)
+ break;
+
+ dev = nfc_device_iter_next(iter);
+ }
+
+ mutex_unlock(&nfc_devlist_mutex);
+
+ cb->args[1] = (long) dev;
+
+ return skb->len;
+}
+
+static int nfc_genl_dump_ses_done(struct netlink_callback *cb)
+{
+ struct class_dev_iter *iter = (struct class_dev_iter *) cb->args[0];
+
+ nfc_device_iter_exit(iter);
+ kfree(iter);
+
+ return 0;
+}
+
static struct genl_ops nfc_genl_ops[] = {
{
.cmd = NFC_CMD_GET_DEVICE,
@@ -1251,8 +1338,8 @@ static struct genl_ops nfc_genl_ops[] = {
.policy = nfc_genl_policy,
},
{
- .cmd = NFC_CMD_FW_UPLOAD,
- .doit = nfc_genl_fw_upload,
+ .cmd = NFC_CMD_FW_DOWNLOAD,
+ .doit = nfc_genl_fw_download,
.policy = nfc_genl_policy,
},
{
@@ -1265,6 +1352,12 @@ static struct genl_ops nfc_genl_ops[] = {
.doit = nfc_genl_disable_se,
.policy = nfc_genl_policy,
},
+ {
+ .cmd = NFC_CMD_GET_SE,
+ .dumpit = nfc_genl_dump_ses,
+ .done = nfc_genl_dump_ses_done,
+ .policy = nfc_genl_policy,
+ },
};
diff --git a/net/nfc/nfc.h b/net/nfc/nfc.h
index ee85a1fc1b2..aaf606fc1fa 100644
--- a/net/nfc/nfc.h
+++ b/net/nfc/nfc.h
@@ -123,10 +123,9 @@ static inline void nfc_device_iter_exit(struct class_dev_iter *iter)
class_dev_iter_exit(iter);
}
-int nfc_fw_upload(struct nfc_dev *dev, const char *firmware_name);
-int nfc_genl_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
-
-int nfc_fw_upload_done(struct nfc_dev *dev, const char *firmware_name);
+int nfc_fw_download(struct nfc_dev *dev, const char *firmware_name);
+int nfc_genl_fw_download_done(struct nfc_dev *dev, const char *firmware_name,
+ u32 result);
int nfc_dev_up(struct nfc_dev *dev);
diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig
index 27ee56b688a..6ecf491ad50 100644
--- a/net/openvswitch/Kconfig
+++ b/net/openvswitch/Kconfig
@@ -4,6 +4,7 @@
config OPENVSWITCH
tristate "Open vSwitch"
+ select LIBCRC32C
---help---
Open vSwitch is a multilayer Ethernet switch targeted at virtualized
environments. In addition to supporting a variety of features
@@ -40,3 +41,16 @@ config OPENVSWITCH_GRE
Say N to exclude this support and reduce the binary size.
If unsure, say Y.
+
+config OPENVSWITCH_VXLAN
+ bool "Open vSwitch VXLAN tunneling support"
+ depends on INET
+ depends on OPENVSWITCH
+ depends on VXLAN && !(OPENVSWITCH=y && VXLAN=m)
+ default y
+ ---help---
+ If you say Y here, then the Open vSwitch will be able create vxlan vport.
+
+ Say N to exclude this support and reduce the binary size.
+
+ If unsure, say Y.
diff --git a/net/openvswitch/Makefile b/net/openvswitch/Makefile
index 01bddb2991e..ea36e99089a 100644
--- a/net/openvswitch/Makefile
+++ b/net/openvswitch/Makefile
@@ -10,6 +10,13 @@ openvswitch-y := \
dp_notify.o \
flow.o \
vport.o \
- vport-gre.o \
vport-internal_dev.o \
vport-netdev.o
+
+ifneq ($(CONFIG_OPENVSWITCH_VXLAN),)
+openvswitch-y += vport-vxlan.o
+endif
+
+ifneq ($(CONFIG_OPENVSWITCH_GRE),)
+openvswitch-y += vport-gre.o
+endif
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 22c5f399f1c..65cfaa81607 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -22,6 +22,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/openvswitch.h>
+#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/in6.h>
@@ -31,6 +32,7 @@
#include <net/ipv6.h>
#include <net/checksum.h>
#include <net/dsfield.h>
+#include <net/sctp/checksum.h>
#include "datapath.h"
#include "vport.h"
@@ -352,6 +354,39 @@ static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key)
return 0;
}
+static int set_sctp(struct sk_buff *skb,
+ const struct ovs_key_sctp *sctp_port_key)
+{
+ struct sctphdr *sh;
+ int err;
+ unsigned int sctphoff = skb_transport_offset(skb);
+
+ err = make_writable(skb, sctphoff + sizeof(struct sctphdr));
+ if (unlikely(err))
+ return err;
+
+ sh = sctp_hdr(skb);
+ if (sctp_port_key->sctp_src != sh->source ||
+ sctp_port_key->sctp_dst != sh->dest) {
+ __le32 old_correct_csum, new_csum, old_csum;
+
+ old_csum = sh->checksum;
+ old_correct_csum = sctp_compute_cksum(skb, sctphoff);
+
+ sh->source = sctp_port_key->sctp_src;
+ sh->dest = sctp_port_key->sctp_dst;
+
+ new_csum = sctp_compute_cksum(skb, sctphoff);
+
+ /* Carry any checksum errors through. */
+ sh->checksum = old_csum ^ old_correct_csum ^ new_csum;
+
+ skb->rxhash = 0;
+ }
+
+ return 0;
+}
+
static int do_output(struct datapath *dp, struct sk_buff *skb, int out_port)
{
struct vport *vport;
@@ -376,8 +411,10 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
const struct nlattr *a;
int rem;
+ BUG_ON(!OVS_CB(skb)->pkt_key);
+
upcall.cmd = OVS_PACKET_CMD_ACTION;
- upcall.key = &OVS_CB(skb)->flow->key;
+ upcall.key = OVS_CB(skb)->pkt_key;
upcall.userdata = NULL;
upcall.portid = 0;
@@ -459,6 +496,10 @@ static int execute_set_action(struct sk_buff *skb,
case OVS_KEY_ATTR_UDP:
err = set_udp(skb, nla_data(nested_attr));
break;
+
+ case OVS_KEY_ATTR_SCTP:
+ err = set_sctp(skb, nla_data(nested_attr));
+ break;
}
return err;
@@ -535,6 +576,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
{
struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
+ OVS_CB(skb)->tun_key = NULL;
return do_execute_actions(dp, skb, acts->actions,
acts->actions_len, false);
}
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index f7e3a0d84c4..2aa13bd7f2b 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2012 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -165,7 +165,7 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy((__force struct flow_table *)dp->table);
+ ovs_flow_tbl_destroy((__force struct flow_table *)dp->table, false);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -226,19 +226,18 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
struct sw_flow_key key;
u64 *stats_counter;
int error;
- int key_len;
stats = this_cpu_ptr(dp->stats_percpu);
/* Extract flow from 'skb' into 'key'. */
- error = ovs_flow_extract(skb, p->port_no, &key, &key_len);
+ error = ovs_flow_extract(skb, p->port_no, &key);
if (unlikely(error)) {
kfree_skb(skb);
return;
}
/* Look up flow. */
- flow = ovs_flow_tbl_lookup(rcu_dereference(dp->table), &key, key_len);
+ flow = ovs_flow_lookup(rcu_dereference(dp->table), &key);
if (unlikely(!flow)) {
struct dp_upcall_info upcall;
@@ -253,6 +252,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
}
OVS_CB(skb)->flow = flow;
+ OVS_CB(skb)->pkt_key = &key;
stats_counter = &stats->n_hit;
ovs_flow_used(OVS_CB(skb)->flow, skb);
@@ -435,7 +435,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,
upcall->dp_ifindex = dp_ifindex;
nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
- ovs_flow_to_nlattrs(upcall_info->key, user_skb);
+ ovs_flow_to_nlattrs(upcall_info->key, upcall_info->key, user_skb);
nla_nest_end(user_skb, nla);
if (upcall_info->userdata)
@@ -468,7 +468,7 @@ static int flush_flows(struct datapath *dp)
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_destroy(old_table, true);
return 0;
}
@@ -611,10 +611,12 @@ static int validate_tp_port(const struct sw_flow_key *flow_key)
static int validate_and_copy_set_tun(const struct nlattr *attr,
struct sw_flow_actions **sfa)
{
- struct ovs_key_ipv4_tunnel tun_key;
+ struct sw_flow_match match;
+ struct sw_flow_key key;
int err, start;
- err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &tun_key);
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_ipv4_tun_from_nlattr(nla_data(attr), &match, false);
if (err)
return err;
@@ -622,7 +624,8 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (start < 0)
return start;
- err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &tun_key, sizeof(tun_key));
+ err = add_action(sfa, OVS_KEY_ATTR_IPV4_TUNNEL, &match.key->tun_key,
+ sizeof(match.key->tun_key));
add_nested_action_end(*sfa, start);
return err;
@@ -709,6 +712,12 @@ static int validate_set(const struct nlattr *a,
return validate_tp_port(flow_key);
+ case OVS_KEY_ATTR_SCTP:
+ if (flow_key->ip.proto != IPPROTO_SCTP)
+ return -EINVAL;
+
+ return validate_tp_port(flow_key);
+
default:
return -EINVAL;
}
@@ -857,7 +866,6 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct ethhdr *eth;
int len;
int err;
- int key_len;
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -890,11 +898,11 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(flow))
goto err_kfree_skb;
- err = ovs_flow_extract(packet, -1, &flow->key, &key_len);
+ err = ovs_flow_extract(packet, -1, &flow->key);
if (err)
goto err_flow_free;
- err = ovs_flow_metadata_from_nlattrs(flow, key_len, a[OVS_PACKET_ATTR_KEY]);
+ err = ovs_flow_metadata_from_nlattrs(flow, a[OVS_PACKET_ATTR_KEY]);
if (err)
goto err_flow_free;
acts = ovs_flow_actions_alloc(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
@@ -908,6 +916,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
goto err_flow_free;
OVS_CB(packet)->flow = flow;
+ OVS_CB(packet)->pkt_key = &flow->key;
packet->priority = flow->key.phy.priority;
packet->mark = flow->key.phy.skb_mark;
@@ -922,13 +931,13 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
local_bh_enable();
rcu_read_unlock();
- ovs_flow_free(flow);
+ ovs_flow_free(flow, false);
return err;
err_unlock:
rcu_read_unlock();
err_flow_free:
- ovs_flow_free(flow);
+ ovs_flow_free(flow, false);
err_kfree_skb:
kfree_skb(packet);
err:
@@ -951,9 +960,10 @@ static struct genl_ops dp_packet_genl_ops[] = {
static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats)
{
+ struct flow_table *table;
int i;
- struct flow_table *table = ovsl_dereference(dp->table);
+ table = rcu_dereference_check(dp->table, lockdep_ovsl_is_held());
stats->n_flows = ovs_flow_tbl_count(table);
stats->n_hit = stats->n_missed = stats->n_lost = 0;
@@ -1044,7 +1054,8 @@ static int set_action_to_attr(const struct nlattr *a, struct sk_buff *skb)
if (!start)
return -EMSGSIZE;
- err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key));
+ err = ovs_ipv4_tun_to_nlattr(skb, nla_data(ovs_key),
+ nla_data(ovs_key));
if (err)
return err;
nla_nest_end(skb, start);
@@ -1092,6 +1103,7 @@ static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
{
return NLMSG_ALIGN(sizeof(struct ovs_header))
+ nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
+ + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
+ nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
+ nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
+ nla_total_size(8) /* OVS_FLOW_ATTR_USED */
@@ -1104,7 +1116,6 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
u32 seq, u32 flags, u8 cmd)
{
const int skb_orig_len = skb->len;
- const struct sw_flow_actions *sf_acts;
struct nlattr *start;
struct ovs_flow_stats stats;
struct ovs_header *ovs_header;
@@ -1113,20 +1124,31 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
u8 tcp_flags;
int err;
- sf_acts = ovsl_dereference(flow->sf_acts);
-
ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
if (!ovs_header)
return -EMSGSIZE;
ovs_header->dp_ifindex = get_dpifindex(dp);
+ /* Fill flow key. */
nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
if (!nla)
goto nla_put_failure;
- err = ovs_flow_to_nlattrs(&flow->key, skb);
+
+ err = ovs_flow_to_nlattrs(&flow->unmasked_key,
+ &flow->unmasked_key, skb);
+ if (err)
+ goto error;
+ nla_nest_end(skb, nla);
+
+ nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
+ if (!nla)
+ goto nla_put_failure;
+
+ err = ovs_flow_to_nlattrs(&flow->key, &flow->mask->key, skb);
if (err)
goto error;
+
nla_nest_end(skb, nla);
spin_lock_bh(&flow->lock);
@@ -1161,6 +1183,11 @@ static int ovs_flow_cmd_fill_info(struct sw_flow *flow, struct datapath *dp,
*/
start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
if (start) {
+ const struct sw_flow_actions *sf_acts;
+
+ sf_acts = rcu_dereference_check(flow->sf_acts,
+ lockdep_ovsl_is_held());
+
err = actions_to_attr(sf_acts->actions, sf_acts->actions_len, skb);
if (!err)
nla_nest_end(skb, start);
@@ -1211,20 +1238,24 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr **a = info->attrs;
struct ovs_header *ovs_header = info->userhdr;
- struct sw_flow_key key;
- struct sw_flow *flow;
+ struct sw_flow_key key, masked_key;
+ struct sw_flow *flow = NULL;
+ struct sw_flow_mask mask;
struct sk_buff *reply;
struct datapath *dp;
struct flow_table *table;
struct sw_flow_actions *acts = NULL;
+ struct sw_flow_match match;
int error;
- int key_len;
/* Extract key. */
error = -EINVAL;
if (!a[OVS_FLOW_ATTR_KEY])
goto error;
- error = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+
+ ovs_match_init(&match, &key, &mask);
+ error = ovs_match_from_nlattrs(&match,
+ a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
if (error)
goto error;
@@ -1235,9 +1266,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
if (IS_ERR(acts))
goto error;
- error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &key, 0, &acts);
- if (error)
+ ovs_flow_key_mask(&masked_key, &key, &mask);
+ error = validate_and_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
+ &masked_key, 0, &acts);
+ if (error) {
+ OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
goto err_kfree;
+ }
} else if (info->genlhdr->cmd == OVS_FLOW_CMD_NEW) {
error = -EINVAL;
goto error;
@@ -1250,8 +1285,11 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+
+ /* Check if this is a duplicate flow */
+ flow = ovs_flow_lookup(table, &key);
if (!flow) {
+ struct sw_flow_mask *mask_p;
/* Bail out if we're not allowed to create a new flow. */
error = -ENOENT;
if (info->genlhdr->cmd == OVS_FLOW_CMD_SET)
@@ -1264,7 +1302,7 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
new_table = ovs_flow_tbl_expand(table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(table);
+ ovs_flow_tbl_destroy(table, true);
table = ovsl_dereference(dp->table);
}
}
@@ -1277,14 +1315,30 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
}
clear_stats(flow);
+ flow->key = masked_key;
+ flow->unmasked_key = key;
+
+ /* Make sure mask is unique in the system */
+ mask_p = ovs_sw_flow_mask_find(table, &mask);
+ if (!mask_p) {
+ /* Allocate a new mask if none exsits. */
+ mask_p = ovs_sw_flow_mask_alloc();
+ if (!mask_p)
+ goto err_flow_free;
+ mask_p->key = mask.key;
+ mask_p->range = mask.range;
+ ovs_sw_flow_mask_insert(table, mask_p);
+ }
+
+ ovs_sw_flow_mask_add_ref(mask_p);
+ flow->mask = mask_p;
rcu_assign_pointer(flow->sf_acts, acts);
/* Put flow in bucket. */
- ovs_flow_tbl_insert(table, flow, &key, key_len);
+ ovs_flow_insert(table, flow);
reply = ovs_flow_cmd_build_info(flow, dp, info->snd_portid,
- info->snd_seq,
- OVS_FLOW_CMD_NEW);
+ info->snd_seq, OVS_FLOW_CMD_NEW);
} else {
/* We found a matching flow. */
struct sw_flow_actions *old_acts;
@@ -1300,6 +1354,13 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
info->nlhdr->nlmsg_flags & (NLM_F_CREATE | NLM_F_EXCL))
goto err_unlock_ovs;
+ /* The unmasked key has to be the same for flow updates. */
+ error = -EINVAL;
+ if (!ovs_flow_cmp_unmasked_key(flow, &key, match.range.end)) {
+ OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
+ goto err_unlock_ovs;
+ }
+
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
rcu_assign_pointer(flow->sf_acts, acts);
@@ -1324,6 +1385,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
ovs_dp_flow_multicast_group.id, PTR_ERR(reply));
return 0;
+err_flow_free:
+ ovs_flow_free(flow, false);
err_unlock_ovs:
ovs_unlock();
err_kfree:
@@ -1341,12 +1404,16 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct flow_table *table;
+ struct sw_flow_match match;
int err;
- int key_len;
- if (!a[OVS_FLOW_ATTR_KEY])
+ if (!a[OVS_FLOW_ATTR_KEY]) {
+ OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
return -EINVAL;
- err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+ }
+
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
return err;
@@ -1358,7 +1425,7 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
}
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_lookup_unmasked_key(table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
@@ -1387,8 +1454,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
struct sw_flow *flow;
struct datapath *dp;
struct flow_table *table;
+ struct sw_flow_match match;
int err;
- int key_len;
ovs_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
@@ -1401,12 +1468,14 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
err = flush_flows(dp);
goto unlock;
}
- err = ovs_flow_from_nlattrs(&key, &key_len, a[OVS_FLOW_ATTR_KEY]);
+
+ ovs_match_init(&match, &key, NULL);
+ err = ovs_match_from_nlattrs(&match, a[OVS_FLOW_ATTR_KEY], NULL);
if (err)
goto unlock;
table = ovsl_dereference(dp->table);
- flow = ovs_flow_tbl_lookup(table, &key, key_len);
+ flow = ovs_flow_lookup_unmasked_key(table, &match);
if (!flow) {
err = -ENOENT;
goto unlock;
@@ -1418,13 +1487,13 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
goto unlock;
}
- ovs_flow_tbl_remove(table, flow);
+ ovs_flow_remove(table, flow);
err = ovs_flow_cmd_fill_info(flow, dp, reply, info->snd_portid,
info->snd_seq, 0, OVS_FLOW_CMD_DEL);
BUG_ON(err < 0);
- ovs_flow_deferred_free(flow);
+ ovs_flow_free(flow, true);
ovs_unlock();
ovs_notify(reply, info, &ovs_dp_flow_multicast_group);
@@ -1440,22 +1509,21 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
struct datapath *dp;
struct flow_table *table;
- ovs_lock();
+ rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
if (!dp) {
- ovs_unlock();
+ rcu_read_unlock();
return -ENODEV;
}
- table = ovsl_dereference(dp->table);
-
+ table = rcu_dereference(dp->table);
for (;;) {
struct sw_flow *flow;
u32 bucket, obj;
bucket = cb->args[0];
obj = cb->args[1];
- flow = ovs_flow_tbl_next(table, &bucket, &obj);
+ flow = ovs_flow_dump_next(table, &bucket, &obj);
if (!flow)
break;
@@ -1468,7 +1536,7 @@ static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
cb->args[0] = bucket;
cb->args[1] = obj;
}
- ovs_unlock();
+ rcu_read_unlock();
return skb->len;
}
@@ -1664,7 +1732,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
goto err_destroy_local_port;
ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
- list_add_tail(&dp->list_node, &ovs_net->dps);
+ list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
ovs_unlock();
@@ -1678,7 +1746,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(ovsl_dereference(dp->table));
+ ovs_flow_tbl_destroy(ovsl_dereference(dp->table), false);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -1702,7 +1770,7 @@ static void __dp_destroy(struct datapath *dp)
ovs_dp_detach_port(vport);
}
- list_del(&dp->list_node);
+ list_del_rcu(&dp->list_node);
/* OVSP_LOCAL is datapath internal port. We need to make sure that
* all port in datapath are destroyed first before freeing datapath.
@@ -1807,8 +1875,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
int skip = cb->args[0];
int i = 0;
- ovs_lock();
- list_for_each_entry(dp, &ovs_net->dps, list_node) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
if (i >= skip &&
ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1816,7 +1884,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
break;
i++;
}
- ovs_unlock();
+ rcu_read_unlock();
cb->args[0] = i;
@@ -2076,9 +2144,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
ovs_notify(reply, info, &ovs_dp_vport_multicast_group);
return 0;
- rtnl_unlock();
- return 0;
-
exit_free:
kfree_skb(reply);
exit_unlock:
@@ -2288,7 +2353,7 @@ static void rehash_flow_table(struct work_struct *work)
new_table = ovs_flow_tbl_rehash(old_table);
if (!IS_ERR(new_table)) {
rcu_assign_pointer(dp->table, new_table);
- ovs_flow_tbl_deferred_destroy(old_table);
+ ovs_flow_tbl_destroy(old_table, true);
}
}
}
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index a9148648491..4d109c176ef 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -88,11 +88,13 @@ struct datapath {
/**
* struct ovs_skb_cb - OVS data in skb CB
* @flow: The flow associated with this packet. May be %NULL if no flow.
+ * @pkt_key: The flow information extracted from the packet. Must be nonnull.
* @tun_key: Key for the tunnel that encapsulated this packet. NULL if the
* packet is not being tunneled.
*/
struct ovs_skb_cb {
struct sw_flow *flow;
+ struct sw_flow_key *pkt_key;
struct ovs_key_ipv4_tunnel *tun_key;
};
#define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
@@ -183,4 +185,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *, u32 pid, u32 seq,
int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb);
void ovs_dp_notify_wq(struct work_struct *work);
+
+#define OVS_NLERR(fmt, ...) \
+ pr_info_once("netlink: " fmt, ##__VA_ARGS__)
+
#endif /* datapath.h */
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 5c519b121e1..fb36f856516 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -34,6 +34,7 @@
#include <linux/if_arp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
+#include <linux/sctp.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -46,6 +47,202 @@
static struct kmem_cache *flow_cache;
+static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
+ struct sw_flow_key_range *range, u8 val);
+
+static void update_range__(struct sw_flow_match *match,
+ size_t offset, size_t size, bool is_mask)
+{
+ struct sw_flow_key_range *range = NULL;
+ size_t start = rounddown(offset, sizeof(long));
+ size_t end = roundup(offset + size, sizeof(long));
+
+ if (!is_mask)
+ range = &match->range;
+ else if (match->mask)
+ range = &match->mask->range;
+
+ if (!range)
+ return;
+
+ if (range->start == range->end) {
+ range->start = start;
+ range->end = end;
+ return;
+ }
+
+ if (range->start > start)
+ range->start = start;
+
+ if (range->end < end)
+ range->end = end;
+}
+
+#define SW_FLOW_KEY_PUT(match, field, value, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ sizeof((match)->key->field), is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ (match)->mask->key.field = value; \
+ } else { \
+ (match)->key->field = value; \
+ } \
+ } while (0)
+
+#define SW_FLOW_KEY_MEMCPY(match, field, value_p, len, is_mask) \
+ do { \
+ update_range__(match, offsetof(struct sw_flow_key, field), \
+ len, is_mask); \
+ if (is_mask) { \
+ if ((match)->mask) \
+ memcpy(&(match)->mask->key.field, value_p, len);\
+ } else { \
+ memcpy(&(match)->key->field, value_p, len); \
+ } \
+ } while (0)
+
+static u16 range_n_bytes(const struct sw_flow_key_range *range)
+{
+ return range->end - range->start;
+}
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key,
+ struct sw_flow_mask *mask)
+{
+ memset(match, 0, sizeof(*match));
+ match->key = key;
+ match->mask = mask;
+
+ memset(key, 0, sizeof(*key));
+
+ if (mask) {
+ memset(&mask->key, 0, sizeof(mask->key));
+ mask->range.start = mask->range.end = 0;
+ }
+}
+
+static bool ovs_match_validate(const struct sw_flow_match *match,
+ u64 key_attrs, u64 mask_attrs)
+{
+ u64 key_expected = 1 << OVS_KEY_ATTR_ETHERNET;
+ u64 mask_allowed = key_attrs; /* At most allow all key attributes */
+
+ /* The following mask attributes allowed only if they
+ * pass the validation tests. */
+ mask_allowed &= ~((1 << OVS_KEY_ATTR_IPV4)
+ | (1 << OVS_KEY_ATTR_IPV6)
+ | (1 << OVS_KEY_ATTR_TCP)
+ | (1 << OVS_KEY_ATTR_UDP)
+ | (1 << OVS_KEY_ATTR_SCTP)
+ | (1 << OVS_KEY_ATTR_ICMP)
+ | (1 << OVS_KEY_ATTR_ICMPV6)
+ | (1 << OVS_KEY_ATTR_ARP)
+ | (1 << OVS_KEY_ATTR_ND));
+
+ /* Always allowed mask fields. */
+ mask_allowed |= ((1 << OVS_KEY_ATTR_TUNNEL)
+ | (1 << OVS_KEY_ATTR_IN_PORT)
+ | (1 << OVS_KEY_ATTR_ETHERTYPE));
+
+ /* Check key attributes. */
+ if (match->key->eth.type == htons(ETH_P_ARP)
+ || match->key->eth.type == htons(ETH_P_RARP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ARP;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IP)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV4;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV4;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMP) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMP;
+ }
+ }
+ }
+
+ if (match->key->eth.type == htons(ETH_P_IPV6)) {
+ key_expected |= 1 << OVS_KEY_ATTR_IPV6;
+ if (match->mask && (match->mask->key.eth.type == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_IPV6;
+
+ if (match->key->ip.frag != OVS_FRAG_TYPE_LATER) {
+ if (match->key->ip.proto == IPPROTO_UDP) {
+ key_expected |= 1 << OVS_KEY_ATTR_UDP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_UDP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_SCTP) {
+ key_expected |= 1 << OVS_KEY_ATTR_SCTP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_SCTP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_TCP) {
+ key_expected |= 1 << OVS_KEY_ATTR_TCP;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_TCP;
+ }
+
+ if (match->key->ip.proto == IPPROTO_ICMPV6) {
+ key_expected |= 1 << OVS_KEY_ATTR_ICMPV6;
+ if (match->mask && (match->mask->key.ip.proto == 0xff))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ICMPV6;
+
+ if (match->key->ipv6.tp.src ==
+ htons(NDISC_NEIGHBOUR_SOLICITATION) ||
+ match->key->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
+ key_expected |= 1 << OVS_KEY_ATTR_ND;
+ if (match->mask && (match->mask->key.ipv6.tp.src == htons(0xffff)))
+ mask_allowed |= 1 << OVS_KEY_ATTR_ND;
+ }
+ }
+ }
+ }
+
+ if ((key_attrs & key_expected) != key_expected) {
+ /* Key attributes check failed. */
+ OVS_NLERR("Missing expected key attributes (key_attrs=%llx, expected=%llx).\n",
+ key_attrs, key_expected);
+ return false;
+ }
+
+ if ((mask_attrs & mask_allowed) != mask_attrs) {
+ /* Mask attributes check failed. */
+ OVS_NLERR("Contain more than allowed mask fields (mask_attrs=%llx, mask_allowed=%llx).\n",
+ mask_attrs, mask_allowed);
+ return false;
+ }
+
+ return true;
+}
+
static int check_header(struct sk_buff *skb, int len)
{
if (unlikely(skb->len < len))
@@ -102,6 +299,12 @@ static bool udphdr_ok(struct sk_buff *skb)
sizeof(struct udphdr));
}
+static bool sctphdr_ok(struct sk_buff *skb)
+{
+ return pskb_may_pull(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr));
+}
+
static bool icmphdr_ok(struct sk_buff *skb)
{
return pskb_may_pull(skb, skb_transport_offset(skb) +
@@ -121,12 +324,7 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
return cur_ms - idle_ms;
}
-#define SW_FLOW_KEY_OFFSET(field) \
- (offsetof(struct sw_flow_key, field) + \
- FIELD_SIZEOF(struct sw_flow_key, field))
-
-static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
- int *key_lenp)
+static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key)
{
unsigned int nh_ofs = skb_network_offset(skb);
unsigned int nh_len;
@@ -136,8 +334,6 @@ static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key,
__be16 frag_off;
int err;
- *key_lenp = SW_FLOW_KEY_OFFSET(ipv6.label);
-
err = check_header(skb, nh_ofs + sizeof(*nh));
if (unlikely(err))
return err;
@@ -176,6 +372,22 @@ static bool icmp6hdr_ok(struct sk_buff *skb)
sizeof(struct icmp6hdr));
}
+void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask)
+{
+ const long *m = (long *)((u8 *)&mask->key + mask->range.start);
+ const long *s = (long *)((u8 *)src + mask->range.start);
+ long *d = (long *)((u8 *)dst + mask->range.start);
+ int i;
+
+ /* The memory outside of the 'mask->range' are not set since
+ * further operations on 'dst' only uses contents within
+ * 'mask->range'.
+ */
+ for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
+ *d++ = *s++ & *m++;
+}
+
#define TCP_FLAGS_OFFSET 13
#define TCP_FLAG_MASK 0x3f
@@ -224,6 +436,7 @@ struct sw_flow *ovs_flow_alloc(void)
spin_lock_init(&flow->lock);
flow->sf_acts = NULL;
+ flow->mask = NULL;
return flow;
}
@@ -240,7 +453,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets)
struct flex_array *buckets;
int i, err;
- buckets = flex_array_alloc(sizeof(struct hlist_head *),
+ buckets = flex_array_alloc(sizeof(struct hlist_head),
n_buckets, GFP_KERNEL);
if (!buckets)
return NULL;
@@ -263,7 +476,7 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
-struct flow_table *ovs_flow_tbl_alloc(int new_size)
+static struct flow_table *__flow_tbl_alloc(int new_size)
{
struct flow_table *table = kmalloc(sizeof(*table), GFP_KERNEL);
@@ -281,17 +494,15 @@ struct flow_table *ovs_flow_tbl_alloc(int new_size)
table->node_ver = 0;
table->keep_flows = false;
get_random_bytes(&table->hash_seed, sizeof(u32));
+ table->mask_list = NULL;
return table;
}
-void ovs_flow_tbl_destroy(struct flow_table *table)
+static void __flow_tbl_destroy(struct flow_table *table)
{
int i;
- if (!table)
- return;
-
if (table->keep_flows)
goto skip_flows;
@@ -302,32 +513,56 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
int ver = table->node_ver;
hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del_rcu(&flow->hash_node[ver]);
- ovs_flow_free(flow);
+ hlist_del(&flow->hash_node[ver]);
+ ovs_flow_free(flow, false);
}
}
+ BUG_ON(!list_empty(table->mask_list));
+ kfree(table->mask_list);
+
skip_flows:
free_buckets(table->buckets);
kfree(table);
}
+struct flow_table *ovs_flow_tbl_alloc(int new_size)
+{
+ struct flow_table *table = __flow_tbl_alloc(new_size);
+
+ if (!table)
+ return NULL;
+
+ table->mask_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!table->mask_list) {
+ table->keep_flows = true;
+ __flow_tbl_destroy(table);
+ return NULL;
+ }
+ INIT_LIST_HEAD(table->mask_list);
+
+ return table;
+}
+
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
{
struct flow_table *table = container_of(rcu, struct flow_table, rcu);
- ovs_flow_tbl_destroy(table);
+ __flow_tbl_destroy(table);
}
-void ovs_flow_tbl_deferred_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
{
if (!table)
return;
- call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+ if (deferred)
+ call_rcu(&table->rcu, flow_tbl_destroy_rcu_cb);
+ else
+ __flow_tbl_destroy(table);
}
-struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *last)
+struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *last)
{
struct sw_flow *flow;
struct hlist_head *head;
@@ -353,11 +588,13 @@ struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *la
return NULL;
}
-static void __flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)
+static void __tbl_insert(struct flow_table *table, struct sw_flow *flow)
{
struct hlist_head *head;
+
head = find_bucket(table, flow->hash);
hlist_add_head_rcu(&flow->hash_node[table->node_ver], head);
+
table->count++;
}
@@ -377,8 +614,10 @@ static void flow_table_copy_flows(struct flow_table *old, struct flow_table *new
head = flex_array_get(old->buckets, i);
hlist_for_each_entry(flow, head, hash_node[old_ver])
- __flow_tbl_insert(new, flow);
+ __tbl_insert(new, flow);
}
+
+ new->mask_list = old->mask_list;
old->keep_flows = true;
}
@@ -386,7 +625,7 @@ static struct flow_table *__flow_tbl_rehash(struct flow_table *table, int n_buck
{
struct flow_table *new_table;
- new_table = ovs_flow_tbl_alloc(n_buckets);
+ new_table = __flow_tbl_alloc(n_buckets);
if (!new_table)
return ERR_PTR(-ENOMEM);
@@ -405,28 +644,30 @@ struct flow_table *ovs_flow_tbl_expand(struct flow_table *table)
return __flow_tbl_rehash(table, table->n_buckets * 2);
}
-void ovs_flow_free(struct sw_flow *flow)
+static void __flow_free(struct sw_flow *flow)
{
- if (unlikely(!flow))
- return;
-
kfree((struct sf_flow_acts __force *)flow->sf_acts);
kmem_cache_free(flow_cache, flow);
}
-/* RCU callback used by ovs_flow_deferred_free. */
static void rcu_free_flow_callback(struct rcu_head *rcu)
{
struct sw_flow *flow = container_of(rcu, struct sw_flow, rcu);
- ovs_flow_free(flow);
+ __flow_free(flow);
}
-/* Schedules 'flow' to be freed after the next RCU grace period.
- * The caller must hold rcu_read_lock for this to be sensible. */
-void ovs_flow_deferred_free(struct sw_flow *flow)
+void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
- call_rcu(&flow->rcu, rcu_free_flow_callback);
+ if (!flow)
+ return;
+
+ ovs_sw_flow_mask_del_ref(flow->mask, deferred);
+
+ if (deferred)
+ call_rcu(&flow->rcu, rcu_free_flow_callback);
+ else
+ __flow_free(flow);
}
/* Schedules 'sf_acts' to be freed after the next RCU grace period.
@@ -497,18 +738,15 @@ static __be16 parse_ethertype(struct sk_buff *skb)
}
static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
- int *key_lenp, int nh_len)
+ int nh_len)
{
struct icmp6hdr *icmp = icmp6_hdr(skb);
- int error = 0;
- int key_len;
/* The ICMPv6 type and code fields use the 16-bit transport port
* fields, so we need to store them in 16-bit network byte order.
*/
key->ipv6.tp.src = htons(icmp->icmp6_type);
key->ipv6.tp.dst = htons(icmp->icmp6_code);
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp->icmp6_code == 0 &&
(icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION ||
@@ -517,21 +755,17 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
struct nd_msg *nd;
int offset;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
-
/* In order to process neighbor discovery options, we need the
* entire packet.
*/
if (unlikely(icmp_len < sizeof(*nd)))
- goto out;
- if (unlikely(skb_linearize(skb))) {
- error = -ENOMEM;
- goto out;
- }
+ return 0;
+
+ if (unlikely(skb_linearize(skb)))
+ return -ENOMEM;
nd = (struct nd_msg *)skb_transport_header(skb);
key->ipv6.nd.target = nd->target;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
icmp_len -= sizeof(*nd);
offset = 0;
@@ -541,7 +775,7 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
int opt_len = nd_opt->nd_opt_len * 8;
if (unlikely(!opt_len || opt_len > icmp_len))
- goto invalid;
+ return 0;
/* Store the link layer address if the appropriate
* option is provided. It is considered an error if
@@ -566,16 +800,14 @@ static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key,
}
}
- goto out;
+ return 0;
invalid:
memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
-out:
- *key_lenp = key_len;
- return error;
+ return 0;
}
/**
@@ -584,7 +816,6 @@ out:
* Ethernet header
* @in_port: port number on which @skb was received.
* @key: output flow key
- * @key_lenp: length of output flow key
*
* The caller must ensure that skb->len >= ETH_HLEN.
*
@@ -602,11 +833,9 @@ out:
* of a correct length, otherwise the same as skb->network_header.
* For other key->eth.type values it is left untouched.
*/
-int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
- int *key_lenp)
+int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key)
{
- int error = 0;
- int key_len = SW_FLOW_KEY_OFFSET(eth);
+ int error;
struct ethhdr *eth;
memset(key, 0, sizeof(*key));
@@ -649,15 +878,13 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
struct iphdr *nh;
__be16 offset;
- key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
-
error = check_iphdr(skb);
if (unlikely(error)) {
if (error == -EINVAL) {
skb->transport_header = skb->network_header;
error = 0;
}
- goto out;
+ return error;
}
nh = ip_hdr(skb);
@@ -671,7 +898,7 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
offset = nh->frag_off & htons(IP_OFFSET);
if (offset) {
key->ip.frag = OVS_FRAG_TYPE_LATER;
- goto out;
+ return 0;
}
if (nh->frag_off & htons(IP_MF) ||
skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
@@ -679,21 +906,24 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
/* Transport layer. */
if (key->ip.proto == IPPROTO_TCP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv4.tp.src = tcp->source;
key->ipv4.tp.dst = tcp->dest;
}
} else if (key->ip.proto == IPPROTO_UDP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv4.tp.src = udp->source;
key->ipv4.tp.dst = udp->dest;
}
+ } else if (key->ip.proto == IPPROTO_SCTP) {
+ if (sctphdr_ok(skb)) {
+ struct sctphdr *sctp = sctp_hdr(skb);
+ key->ipv4.tp.src = sctp->source;
+ key->ipv4.tp.dst = sctp->dest;
+ }
} else if (key->ip.proto == IPPROTO_ICMP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
if (icmphdr_ok(skb)) {
struct icmphdr *icmp = icmp_hdr(skb);
/* The ICMP type and code fields use the 16-bit
@@ -722,102 +952,175 @@ int ovs_flow_extract(struct sk_buff *skb, u16 in_port, struct sw_flow_key *key,
memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst));
memcpy(key->ipv4.arp.sha, arp->ar_sha, ETH_ALEN);
memcpy(key->ipv4.arp.tha, arp->ar_tha, ETH_ALEN);
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
}
} else if (key->eth.type == htons(ETH_P_IPV6)) {
int nh_len; /* IPv6 Header + Extensions */
- nh_len = parse_ipv6hdr(skb, key, &key_len);
+ nh_len = parse_ipv6hdr(skb, key);
if (unlikely(nh_len < 0)) {
- if (nh_len == -EINVAL)
+ if (nh_len == -EINVAL) {
skb->transport_header = skb->network_header;
- else
+ error = 0;
+ } else {
error = nh_len;
- goto out;
+ }
+ return error;
}
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
- goto out;
+ return 0;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (tcphdr_ok(skb)) {
struct tcphdr *tcp = tcp_hdr(skb);
key->ipv6.tp.src = tcp->source;
key->ipv6.tp.dst = tcp->dest;
}
} else if (key->ip.proto == NEXTHDR_UDP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (udphdr_ok(skb)) {
struct udphdr *udp = udp_hdr(skb);
key->ipv6.tp.src = udp->source;
key->ipv6.tp.dst = udp->dest;
}
+ } else if (key->ip.proto == NEXTHDR_SCTP) {
+ if (sctphdr_ok(skb)) {
+ struct sctphdr *sctp = sctp_hdr(skb);
+ key->ipv6.tp.src = sctp->source;
+ key->ipv6.tp.dst = sctp->dest;
+ }
} else if (key->ip.proto == NEXTHDR_ICMP) {
- key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
if (icmp6hdr_ok(skb)) {
- error = parse_icmpv6(skb, key, &key_len, nh_len);
- if (error < 0)
- goto out;
+ error = parse_icmpv6(skb, key, nh_len);
+ if (error)
+ return error;
}
}
}
-out:
- *key_lenp = key_len;
- return error;
+ return 0;
}
-static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start, int key_len)
+static u32 ovs_flow_hash(const struct sw_flow_key *key, int key_start,
+ int key_end)
{
- return jhash2((u32 *)((u8 *)key + key_start),
- DIV_ROUND_UP(key_len - key_start, sizeof(u32)), 0);
+ u32 *hash_key = (u32 *)((u8 *)key + key_start);
+ int hash_u32s = (key_end - key_start) >> 2;
+
+ /* Make sure number of hash bytes are multiple of u32. */
+ BUILD_BUG_ON(sizeof(long) % sizeof(u32));
+
+ return jhash2(hash_key, hash_u32s, 0);
}
-static int flow_key_start(struct sw_flow_key *key)
+static int flow_key_start(const struct sw_flow_key *key)
{
if (key->tun_key.ipv4_dst)
return 0;
else
- return offsetof(struct sw_flow_key, phy);
+ return rounddown(offsetof(struct sw_flow_key, phy),
+ sizeof(long));
+}
+
+static bool __cmp_key(const struct sw_flow_key *key1,
+ const struct sw_flow_key *key2, int key_start, int key_end)
+{
+ const long *cp1 = (long *)((u8 *)key1 + key_start);
+ const long *cp2 = (long *)((u8 *)key2 + key_start);
+ long diffs = 0;
+ int i;
+
+ for (i = key_start; i < key_end; i += sizeof(long))
+ diffs |= *cp1++ ^ *cp2++;
+
+ return diffs == 0;
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int key_len)
+static bool __flow_cmp_masked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_start, int key_end)
+{
+ return __cmp_key(&flow->key, key, key_start, key_end);
+}
+
+static bool __flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_start, int key_end)
+{
+ return __cmp_key(&flow->unmasked_key, key, key_start, key_end);
+}
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_end)
+{
+ int key_start;
+ key_start = flow_key_start(key);
+
+ return __flow_cmp_unmasked_key(flow, key, key_start, key_end);
+
+}
+
+struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
+ struct sw_flow_match *match)
+{
+ struct sw_flow_key *unmasked = match->key;
+ int key_end = match->range.end;
+ struct sw_flow *flow;
+
+ flow = ovs_flow_lookup(table, unmasked);
+ if (flow && (!ovs_flow_cmp_unmasked_key(flow, unmasked, key_end)))
+ flow = NULL;
+
+ return flow;
+}
+
+static struct sw_flow *ovs_masked_flow_lookup(struct flow_table *table,
+ const struct sw_flow_key *unmasked,
+ struct sw_flow_mask *mask)
{
struct sw_flow *flow;
struct hlist_head *head;
- u8 *_key;
- int key_start;
+ int key_start = mask->range.start;
+ int key_end = mask->range.end;
u32 hash;
+ struct sw_flow_key masked_key;
- key_start = flow_key_start(key);
- hash = ovs_flow_hash(key, key_start, key_len);
-
- _key = (u8 *) key + key_start;
+ ovs_flow_key_mask(&masked_key, unmasked, mask);
+ hash = ovs_flow_hash(&masked_key, key_start, key_end);
head = find_bucket(table, hash);
hlist_for_each_entry_rcu(flow, head, hash_node[table->node_ver]) {
-
- if (flow->hash == hash &&
- !memcmp((u8 *)&flow->key + key_start, _key, key_len - key_start)) {
+ if (flow->mask == mask &&
+ __flow_cmp_masked_key(flow, &masked_key,
+ key_start, key_end))
return flow;
- }
}
return NULL;
}
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_key *key, int key_len)
+struct sw_flow *ovs_flow_lookup(struct flow_table *tbl,
+ const struct sw_flow_key *key)
+{
+ struct sw_flow *flow = NULL;
+ struct sw_flow_mask *mask;
+
+ list_for_each_entry_rcu(mask, tbl->mask_list, list) {
+ flow = ovs_masked_flow_lookup(tbl, key, mask);
+ if (flow) /* Found */
+ break;
+ }
+
+ return flow;
+}
+
+
+void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow)
{
- flow->hash = ovs_flow_hash(key, flow_key_start(key), key_len);
- memcpy(&flow->key, key, sizeof(flow->key));
- __flow_tbl_insert(table, flow);
+ flow->hash = ovs_flow_hash(&flow->key, flow->mask->range.start,
+ flow->mask->range.end);
+ __tbl_insert(table, flow);
}
-void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)
+void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow)
{
BUG_ON(table->count == 0);
hlist_del_rcu(&flow->hash_node[table->node_ver]);
@@ -837,6 +1140,7 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_IPV6] = sizeof(struct ovs_key_ipv6),
[OVS_KEY_ATTR_TCP] = sizeof(struct ovs_key_tcp),
[OVS_KEY_ATTR_UDP] = sizeof(struct ovs_key_udp),
+ [OVS_KEY_ATTR_SCTP] = sizeof(struct ovs_key_sctp),
[OVS_KEY_ATTR_ICMP] = sizeof(struct ovs_key_icmp),
[OVS_KEY_ATTR_ICMPV6] = sizeof(struct ovs_key_icmpv6),
[OVS_KEY_ATTR_ARP] = sizeof(struct ovs_key_arp),
@@ -844,149 +1148,84 @@ const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1] = {
[OVS_KEY_ATTR_TUNNEL] = -1,
};
-static int ipv4_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
- const struct nlattr *a[], u32 *attrs)
+static bool is_all_zero(const u8 *fp, size_t size)
{
- const struct ovs_key_icmp *icmp_key;
- const struct ovs_key_tcp *tcp_key;
- const struct ovs_key_udp *udp_key;
-
- switch (swkey->ip.proto) {
- case IPPROTO_TCP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
- swkey->ipv4.tp.src = tcp_key->tcp_src;
- swkey->ipv4.tp.dst = tcp_key->tcp_dst;
- break;
-
- case IPPROTO_UDP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
- swkey->ipv4.tp.src = udp_key->udp_src;
- swkey->ipv4.tp.dst = udp_key->udp_dst;
- break;
-
- case IPPROTO_ICMP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_ICMP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv4.tp);
- icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
- swkey->ipv4.tp.src = htons(icmp_key->icmp_type);
- swkey->ipv4.tp.dst = htons(icmp_key->icmp_code);
- break;
- }
-
- return 0;
-}
-
-static int ipv6_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_len,
- const struct nlattr *a[], u32 *attrs)
-{
- const struct ovs_key_icmpv6 *icmpv6_key;
- const struct ovs_key_tcp *tcp_key;
- const struct ovs_key_udp *udp_key;
-
- switch (swkey->ip.proto) {
- case IPPROTO_TCP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_TCP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_TCP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
- swkey->ipv6.tp.src = tcp_key->tcp_src;
- swkey->ipv6.tp.dst = tcp_key->tcp_dst;
- break;
-
- case IPPROTO_UDP:
- if (!(*attrs & (1 << OVS_KEY_ATTR_UDP)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_UDP);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
- swkey->ipv6.tp.src = udp_key->udp_src;
- swkey->ipv6.tp.dst = udp_key->udp_dst;
- break;
-
- case IPPROTO_ICMPV6:
- if (!(*attrs & (1 << OVS_KEY_ATTR_ICMPV6)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.tp);
- icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
- swkey->ipv6.tp.src = htons(icmpv6_key->icmpv6_type);
- swkey->ipv6.tp.dst = htons(icmpv6_key->icmpv6_code);
+ int i;
- if (swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_SOLICITATION) ||
- swkey->ipv6.tp.src == htons(NDISC_NEIGHBOUR_ADVERTISEMENT)) {
- const struct ovs_key_nd *nd_key;
+ if (!fp)
+ return false;
- if (!(*attrs & (1 << OVS_KEY_ATTR_ND)))
- return -EINVAL;
- *attrs &= ~(1 << OVS_KEY_ATTR_ND);
-
- *key_len = SW_FLOW_KEY_OFFSET(ipv6.nd);
- nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
- memcpy(&swkey->ipv6.nd.target, nd_key->nd_target,
- sizeof(swkey->ipv6.nd.target));
- memcpy(swkey->ipv6.nd.sll, nd_key->nd_sll, ETH_ALEN);
- memcpy(swkey->ipv6.nd.tll, nd_key->nd_tll, ETH_ALEN);
- }
- break;
- }
+ for (i = 0; i < size; i++)
+ if (fp[i])
+ return false;
- return 0;
+ return true;
}
-static int parse_flow_nlattrs(const struct nlattr *attr,
- const struct nlattr *a[], u32 *attrsp)
+static int __parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[],
+ u64 *attrsp, bool nz)
{
const struct nlattr *nla;
u32 attrs;
int rem;
- attrs = 0;
+ attrs = *attrsp;
nla_for_each_nested(nla, attr, rem) {
u16 type = nla_type(nla);
int expected_len;
- if (type > OVS_KEY_ATTR_MAX || attrs & (1 << type))
+ if (type > OVS_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown key attribute (type=%d, max=%d).\n",
+ type, OVS_KEY_ATTR_MAX);
+ }
+
+ if (attrs & (1 << type)) {
+ OVS_NLERR("Duplicate key attribute (type %d).\n", type);
return -EINVAL;
+ }
expected_len = ovs_key_lens[type];
- if (nla_len(nla) != expected_len && expected_len != -1)
+ if (nla_len(nla) != expected_len && expected_len != -1) {
+ OVS_NLERR("Key attribute has unexpected length (type=%d"
+ ", length=%d, expected=%d).\n", type,
+ nla_len(nla), expected_len);
return -EINVAL;
+ }
- attrs |= 1 << type;
- a[type] = nla;
+ if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
+ attrs |= 1 << type;
+ a[type] = nla;
+ }
}
- if (rem)
+ if (rem) {
+ OVS_NLERR("Message has %d unknown bytes.\n", rem);
return -EINVAL;
+ }
*attrsp = attrs;
return 0;
}
+static int parse_flow_mask_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, true);
+}
+
+static int parse_flow_nlattrs(const struct nlattr *attr,
+ const struct nlattr *a[], u64 *attrsp)
+{
+ return __parse_flow_nlattrs(attr, a, attrsp, false);
+}
+
int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct ovs_key_ipv4_tunnel *tun_key)
+ struct sw_flow_match *match, bool is_mask)
{
struct nlattr *a;
int rem;
bool ttl = false;
-
- memset(tun_key, 0, sizeof(*tun_key));
+ __be16 tun_flags = 0;
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
@@ -1000,53 +1239,78 @@ int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
[OVS_TUNNEL_KEY_ATTR_CSUM] = 0,
};
- if (type > OVS_TUNNEL_KEY_ATTR_MAX ||
- ovs_tunnel_key_lens[type] != nla_len(a))
+ if (type > OVS_TUNNEL_KEY_ATTR_MAX) {
+ OVS_NLERR("Unknown IPv4 tunnel attribute (type=%d, max=%d).\n",
+ type, OVS_TUNNEL_KEY_ATTR_MAX);
return -EINVAL;
+ }
+
+ if (ovs_tunnel_key_lens[type] != nla_len(a)) {
+ OVS_NLERR("IPv4 tunnel attribute type has unexpected "
+ " length (type=%d, length=%d, expected=%d).\n",
+ type, nla_len(a), ovs_tunnel_key_lens[type]);
+ return -EINVAL;
+ }
switch (type) {
case OVS_TUNNEL_KEY_ATTR_ID:
- tun_key->tun_id = nla_get_be64(a);
- tun_key->tun_flags |= TUNNEL_KEY;
+ SW_FLOW_KEY_PUT(match, tun_key.tun_id,
+ nla_get_be64(a), is_mask);
+ tun_flags |= TUNNEL_KEY;
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_SRC:
- tun_key->ipv4_src = nla_get_be32(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_src,
+ nla_get_be32(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_IPV4_DST:
- tun_key->ipv4_dst = nla_get_be32(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_dst,
+ nla_get_be32(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TOS:
- tun_key->ipv4_tos = nla_get_u8(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_tos,
+ nla_get_u8(a), is_mask);
break;
case OVS_TUNNEL_KEY_ATTR_TTL:
- tun_key->ipv4_ttl = nla_get_u8(a);
+ SW_FLOW_KEY_PUT(match, tun_key.ipv4_ttl,
+ nla_get_u8(a), is_mask);
ttl = true;
break;
case OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT:
- tun_key->tun_flags |= TUNNEL_DONT_FRAGMENT;
+ tun_flags |= TUNNEL_DONT_FRAGMENT;
break;
case OVS_TUNNEL_KEY_ATTR_CSUM:
- tun_key->tun_flags |= TUNNEL_CSUM;
+ tun_flags |= TUNNEL_CSUM;
break;
default:
return -EINVAL;
-
}
}
- if (rem > 0)
- return -EINVAL;
- if (!tun_key->ipv4_dst)
- return -EINVAL;
+ SW_FLOW_KEY_PUT(match, tun_key.tun_flags, tun_flags, is_mask);
- if (!ttl)
+ if (rem > 0) {
+ OVS_NLERR("IPv4 tunnel attribute has %d unknown bytes.\n", rem);
return -EINVAL;
+ }
+
+ if (!is_mask) {
+ if (!match->key->tun_key.ipv4_dst) {
+ OVS_NLERR("IPv4 tunnel destination address is zero.\n");
+ return -EINVAL;
+ }
+
+ if (!ttl) {
+ OVS_NLERR("IPv4 tunnel TTL not specified.\n");
+ return -EINVAL;
+ }
+ }
return 0;
}
int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key)
+ const struct ovs_key_ipv4_tunnel *tun_key,
+ const struct ovs_key_ipv4_tunnel *output)
{
struct nlattr *nla;
@@ -1054,23 +1318,24 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
if (!nla)
return -EMSGSIZE;
- if (tun_key->tun_flags & TUNNEL_KEY &&
- nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, tun_key->tun_id))
+ if (output->tun_flags & TUNNEL_KEY &&
+ nla_put_be64(skb, OVS_TUNNEL_KEY_ATTR_ID, output->tun_id))
return -EMSGSIZE;
- if (tun_key->ipv4_src &&
- nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, tun_key->ipv4_src))
+ if (output->ipv4_src &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_SRC, output->ipv4_src))
return -EMSGSIZE;
- if (nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, tun_key->ipv4_dst))
+ if (output->ipv4_dst &&
+ nla_put_be32(skb, OVS_TUNNEL_KEY_ATTR_IPV4_DST, output->ipv4_dst))
return -EMSGSIZE;
- if (tun_key->ipv4_tos &&
- nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, tun_key->ipv4_tos))
+ if (output->ipv4_tos &&
+ nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TOS, output->ipv4_tos))
return -EMSGSIZE;
- if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, tun_key->ipv4_ttl))
+ if (nla_put_u8(skb, OVS_TUNNEL_KEY_ATTR_TTL, output->ipv4_ttl))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_DONT_FRAGMENT) &&
+ if ((output->tun_flags & TUNNEL_DONT_FRAGMENT) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT))
return -EMSGSIZE;
- if ((tun_key->tun_flags & TUNNEL_CSUM) &&
+ if ((output->tun_flags & TUNNEL_CSUM) &&
nla_put_flag(skb, OVS_TUNNEL_KEY_ATTR_CSUM))
return -EMSGSIZE;
@@ -1078,176 +1343,390 @@ int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
return 0;
}
-/**
- * ovs_flow_from_nlattrs - parses Netlink attributes into a flow key.
- * @swkey: receives the extracted flow key.
- * @key_lenp: number of bytes used in @swkey.
- * @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
- * sequence.
- */
-int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
- const struct nlattr *attr)
+static int metadata_from_nlattrs(struct sw_flow_match *match, u64 *attrs,
+ const struct nlattr **a, bool is_mask)
{
- const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
- const struct ovs_key_ethernet *eth_key;
- int key_len;
- u32 attrs;
- int err;
+ if (*attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
+ SW_FLOW_KEY_PUT(match, phy.priority,
+ nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]), is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ }
- memset(swkey, 0, sizeof(struct sw_flow_key));
- key_len = SW_FLOW_KEY_OFFSET(eth);
+ if (*attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
+ u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
- err = parse_flow_nlattrs(attr, a, &attrs);
- if (err)
- return err;
+ if (is_mask)
+ in_port = 0xffffffff; /* Always exact match in_port. */
+ else if (in_port >= DP_MAX_PORTS)
+ return -EINVAL;
- /* Metadata attributes. */
- if (attrs & (1 << OVS_KEY_ATTR_PRIORITY)) {
- swkey->phy.priority = nla_get_u32(a[OVS_KEY_ATTR_PRIORITY]);
- attrs &= ~(1 << OVS_KEY_ATTR_PRIORITY);
+ SW_FLOW_KEY_PUT(match, phy.in_port, in_port, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, phy.in_port, DP_MAX_PORTS, is_mask);
}
- if (attrs & (1 << OVS_KEY_ATTR_IN_PORT)) {
- u32 in_port = nla_get_u32(a[OVS_KEY_ATTR_IN_PORT]);
- if (in_port >= DP_MAX_PORTS)
- return -EINVAL;
- swkey->phy.in_port = in_port;
- attrs &= ~(1 << OVS_KEY_ATTR_IN_PORT);
- } else {
- swkey->phy.in_port = DP_MAX_PORTS;
+
+ if (*attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
+ uint32_t mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
+
+ SW_FLOW_KEY_PUT(match, phy.skb_mark, mark, is_mask);
+ *attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
}
- if (attrs & (1 << OVS_KEY_ATTR_SKB_MARK)) {
- swkey->phy.skb_mark = nla_get_u32(a[OVS_KEY_ATTR_SKB_MARK]);
- attrs &= ~(1 << OVS_KEY_ATTR_SKB_MARK);
+ if (*attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
+ if (ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], match,
+ is_mask))
+ return -EINVAL;
+ *attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
}
+ return 0;
+}
- if (attrs & (1 << OVS_KEY_ATTR_TUNNEL)) {
- err = ovs_ipv4_tun_from_nlattr(a[OVS_KEY_ATTR_TUNNEL], &swkey->tun_key);
- if (err)
- return err;
+static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
+ const struct nlattr **a, bool is_mask)
+{
+ int err;
+ u64 orig_attrs = attrs;
- attrs &= ~(1 << OVS_KEY_ATTR_TUNNEL);
- }
+ err = metadata_from_nlattrs(match, &attrs, a, is_mask);
+ if (err)
+ return err;
- /* Data attributes. */
- if (!(attrs & (1 << OVS_KEY_ATTR_ETHERNET)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+ if (attrs & (1 << OVS_KEY_ATTR_ETHERNET)) {
+ const struct ovs_key_ethernet *eth_key;
- eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
- memcpy(swkey->eth.src, eth_key->eth_src, ETH_ALEN);
- memcpy(swkey->eth.dst, eth_key->eth_dst, ETH_ALEN);
+ eth_key = nla_data(a[OVS_KEY_ATTR_ETHERNET]);
+ SW_FLOW_KEY_MEMCPY(match, eth.src,
+ eth_key->eth_src, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, eth.dst,
+ eth_key->eth_dst, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ETHERNET);
+ }
- if (attrs & (1u << OVS_KEY_ATTR_ETHERTYPE) &&
- nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q)) {
- const struct nlattr *encap;
+ if (attrs & (1 << OVS_KEY_ATTR_VLAN)) {
__be16 tci;
- if (attrs != ((1 << OVS_KEY_ATTR_VLAN) |
- (1 << OVS_KEY_ATTR_ETHERTYPE) |
- (1 << OVS_KEY_ATTR_ENCAP)))
- return -EINVAL;
-
- encap = a[OVS_KEY_ATTR_ENCAP];
tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
- if (tci & htons(VLAN_TAG_PRESENT)) {
- swkey->eth.tci = tci;
-
- err = parse_flow_nlattrs(encap, a, &attrs);
- if (err)
- return err;
- } else if (!tci) {
- /* Corner case for truncated 802.1Q header. */
- if (nla_len(encap))
- return -EINVAL;
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ if (is_mask)
+ OVS_NLERR("VLAN TCI mask does not have exact match for VLAN_TAG_PRESENT bit.\n");
+ else
+ OVS_NLERR("VLAN TCI does not have VLAN_TAG_PRESENT bit set.\n");
- swkey->eth.type = htons(ETH_P_8021Q);
- *key_lenp = key_len;
- return 0;
- } else {
return -EINVAL;
}
- }
+
+ SW_FLOW_KEY_PUT(match, eth.tci, tci, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_VLAN);
+ } else if (!is_mask)
+ SW_FLOW_KEY_PUT(match, eth.tci, htons(0xffff), true);
if (attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) {
- swkey->eth.type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
- if (ntohs(swkey->eth.type) < ETH_P_802_3_MIN)
+ __be16 eth_type;
+
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+ if (is_mask) {
+ /* Always exact match EtherType. */
+ eth_type = htons(0xffff);
+ } else if (ntohs(eth_type) < ETH_P_802_3_MIN) {
+ OVS_NLERR("EtherType is less than minimum (type=%x, min=%x).\n",
+ ntohs(eth_type), ETH_P_802_3_MIN);
return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, eth.type, eth_type, is_mask);
attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
- } else {
- swkey->eth.type = htons(ETH_P_802_2);
+ } else if (!is_mask) {
+ SW_FLOW_KEY_PUT(match, eth.type, htons(ETH_P_802_2), is_mask);
}
- if (swkey->eth.type == htons(ETH_P_IP)) {
+ if (attrs & (1 << OVS_KEY_ATTR_IPV4)) {
const struct ovs_key_ipv4 *ipv4_key;
- if (!(attrs & (1 << OVS_KEY_ATTR_IPV4)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
-
- key_len = SW_FLOW_KEY_OFFSET(ipv4.addr);
ipv4_key = nla_data(a[OVS_KEY_ATTR_IPV4]);
- if (ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX)
+ if (!is_mask && ipv4_key->ipv4_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv4 fragment type (value=%d, max=%d).\n",
+ ipv4_key->ipv4_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
- swkey->ip.proto = ipv4_key->ipv4_proto;
- swkey->ip.tos = ipv4_key->ipv4_tos;
- swkey->ip.ttl = ipv4_key->ipv4_ttl;
- swkey->ip.frag = ipv4_key->ipv4_frag;
- swkey->ipv4.addr.src = ipv4_key->ipv4_src;
- swkey->ipv4.addr.dst = ipv4_key->ipv4_dst;
-
- if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
- err = ipv4_flow_from_nlattrs(swkey, &key_len, a, &attrs);
- if (err)
- return err;
}
- } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- const struct ovs_key_ipv6 *ipv6_key;
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv4_key->ipv4_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv4_key->ipv4_tos, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv4_key->ipv4_ttl, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv4_key->ipv4_frag, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ ipv4_key->ipv4_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ ipv4_key->ipv4_dst, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV4);
+ }
- if (!(attrs & (1 << OVS_KEY_ATTR_IPV6)))
- return -EINVAL;
- attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+ if (attrs & (1 << OVS_KEY_ATTR_IPV6)) {
+ const struct ovs_key_ipv6 *ipv6_key;
- key_len = SW_FLOW_KEY_OFFSET(ipv6.label);
ipv6_key = nla_data(a[OVS_KEY_ATTR_IPV6]);
- if (ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX)
+ if (!is_mask && ipv6_key->ipv6_frag > OVS_FRAG_TYPE_MAX) {
+ OVS_NLERR("Unknown IPv6 fragment type (value=%d, max=%d).\n",
+ ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
return -EINVAL;
- swkey->ipv6.label = ipv6_key->ipv6_label;
- swkey->ip.proto = ipv6_key->ipv6_proto;
- swkey->ip.tos = ipv6_key->ipv6_tclass;
- swkey->ip.ttl = ipv6_key->ipv6_hlimit;
- swkey->ip.frag = ipv6_key->ipv6_frag;
- memcpy(&swkey->ipv6.addr.src, ipv6_key->ipv6_src,
- sizeof(swkey->ipv6.addr.src));
- memcpy(&swkey->ipv6.addr.dst, ipv6_key->ipv6_dst,
- sizeof(swkey->ipv6.addr.dst));
-
- if (swkey->ip.frag != OVS_FRAG_TYPE_LATER) {
- err = ipv6_flow_from_nlattrs(swkey, &key_len, a, &attrs);
- if (err)
- return err;
}
- } else if (swkey->eth.type == htons(ETH_P_ARP) ||
- swkey->eth.type == htons(ETH_P_RARP)) {
+ SW_FLOW_KEY_PUT(match, ipv6.label,
+ ipv6_key->ipv6_label, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ipv6_key->ipv6_proto, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.tos,
+ ipv6_key->ipv6_tclass, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.ttl,
+ ipv6_key->ipv6_hlimit, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.frag,
+ ipv6_key->ipv6_frag, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.src,
+ ipv6_key->ipv6_src,
+ sizeof(match->key->ipv6.addr.src),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.addr.dst,
+ ipv6_key->ipv6_dst,
+ sizeof(match->key->ipv6.addr.dst),
+ is_mask);
+
+ attrs &= ~(1 << OVS_KEY_ATTR_IPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ARP)) {
const struct ovs_key_arp *arp_key;
- if (!(attrs & (1 << OVS_KEY_ATTR_ARP)))
+ arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
+ if (!is_mask && (arp_key->arp_op & htons(0xff00))) {
+ OVS_NLERR("Unknown ARP opcode (opcode=%d).\n",
+ arp_key->arp_op);
return -EINVAL;
+ }
+
+ SW_FLOW_KEY_PUT(match, ipv4.addr.src,
+ arp_key->arp_sip, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.addr.dst,
+ arp_key->arp_tip, is_mask);
+ SW_FLOW_KEY_PUT(match, ip.proto,
+ ntohs(arp_key->arp_op), is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.sha,
+ arp_key->arp_sha, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv4.arp.tha,
+ arp_key->arp_tha, ETH_ALEN, is_mask);
+
attrs &= ~(1 << OVS_KEY_ATTR_ARP);
+ }
- key_len = SW_FLOW_KEY_OFFSET(ipv4.arp);
- arp_key = nla_data(a[OVS_KEY_ATTR_ARP]);
- swkey->ipv4.addr.src = arp_key->arp_sip;
- swkey->ipv4.addr.dst = arp_key->arp_tip;
- if (arp_key->arp_op & htons(0xff00))
+ if (attrs & (1 << OVS_KEY_ATTR_TCP)) {
+ const struct ovs_key_tcp *tcp_key;
+
+ tcp_key = nla_data(a[OVS_KEY_ATTR_TCP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ tcp_key->tcp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ tcp_key->tcp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_TCP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_UDP)) {
+ const struct ovs_key_udp *udp_key;
+
+ udp_key = nla_data(a[OVS_KEY_ATTR_UDP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ udp_key->udp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ udp_key->udp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ udp_key->udp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_UDP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_SCTP)) {
+ const struct ovs_key_sctp *sctp_key;
+
+ sctp_key = nla_data(a[OVS_KEY_ATTR_SCTP]);
+ if (orig_attrs & (1 << OVS_KEY_ATTR_IPV4)) {
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ } else {
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ sctp_key->sctp_src, is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ sctp_key->sctp_dst, is_mask);
+ }
+ attrs &= ~(1 << OVS_KEY_ATTR_SCTP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMP)) {
+ const struct ovs_key_icmp *icmp_key;
+
+ icmp_key = nla_data(a[OVS_KEY_ATTR_ICMP]);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.src,
+ htons(icmp_key->icmp_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv4.tp.dst,
+ htons(icmp_key->icmp_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMP);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ICMPV6)) {
+ const struct ovs_key_icmpv6 *icmpv6_key;
+
+ icmpv6_key = nla_data(a[OVS_KEY_ATTR_ICMPV6]);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.src,
+ htons(icmpv6_key->icmpv6_type), is_mask);
+ SW_FLOW_KEY_PUT(match, ipv6.tp.dst,
+ htons(icmpv6_key->icmpv6_code), is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ICMPV6);
+ }
+
+ if (attrs & (1 << OVS_KEY_ATTR_ND)) {
+ const struct ovs_key_nd *nd_key;
+
+ nd_key = nla_data(a[OVS_KEY_ATTR_ND]);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
+ nd_key->nd_target,
+ sizeof(match->key->ipv6.nd.target),
+ is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
+ nd_key->nd_sll, ETH_ALEN, is_mask);
+ SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
+ nd_key->nd_tll, ETH_ALEN, is_mask);
+ attrs &= ~(1 << OVS_KEY_ATTR_ND);
+ }
+
+ if (attrs != 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * ovs_match_from_nlattrs - parses Netlink attributes into a flow key and
+ * mask. In case the 'mask' is NULL, the flow is treated as exact match
+ * flow. Otherwise, it is treated as a wildcarded flow, except the mask
+ * does not include any don't care bit.
+ * @match: receives the extracted flow match information.
+ * @key: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
+ * sequence. The fields should of the packet that triggered the creation
+ * of this flow.
+ * @mask: Optional. Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink
+ * attribute specifies the mask field of the wildcarded flow.
+ */
+int ovs_match_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *key,
+ const struct nlattr *mask)
+{
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ const struct nlattr *encap;
+ u64 key_attrs = 0;
+ u64 mask_attrs = 0;
+ bool encap_valid = false;
+ int err;
+
+ err = parse_flow_nlattrs(key, a, &key_attrs);
+ if (err)
+ return err;
+
+ if ((key_attrs & (1 << OVS_KEY_ATTR_ETHERNET)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ETHERTYPE)) &&
+ (nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]) == htons(ETH_P_8021Q))) {
+ __be16 tci;
+
+ if (!((key_attrs & (1 << OVS_KEY_ATTR_VLAN)) &&
+ (key_attrs & (1 << OVS_KEY_ATTR_ENCAP)))) {
+ OVS_NLERR("Invalid Vlan frame.\n");
return -EINVAL;
- swkey->ip.proto = ntohs(arp_key->arp_op);
- memcpy(swkey->ipv4.arp.sha, arp_key->arp_sha, ETH_ALEN);
- memcpy(swkey->ipv4.arp.tha, arp_key->arp_tha, ETH_ALEN);
+ }
+
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ key_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ encap_valid = true;
+
+ if (tci & htons(VLAN_TAG_PRESENT)) {
+ err = parse_flow_nlattrs(encap, a, &key_attrs);
+ if (err)
+ return err;
+ } else if (!tci) {
+ /* Corner case for truncated 802.1Q header. */
+ if (nla_len(encap)) {
+ OVS_NLERR("Truncated 802.1Q header has non-zero encap attribute.\n");
+ return -EINVAL;
+ }
+ } else {
+ OVS_NLERR("Encap attribute is set for a non-VLAN frame.\n");
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, key_attrs, a, false);
+ if (err)
+ return err;
+
+ if (mask) {
+ err = parse_flow_mask_nlattrs(mask, a, &mask_attrs);
+ if (err)
+ return err;
+
+ if (mask_attrs & 1ULL << OVS_KEY_ATTR_ENCAP) {
+ __be16 eth_type = 0;
+ __be16 tci = 0;
+
+ if (!encap_valid) {
+ OVS_NLERR("Encap mask attribute is set for non-VLAN frame.\n");
+ return -EINVAL;
+ }
+
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ENCAP);
+ if (a[OVS_KEY_ATTR_ETHERTYPE])
+ eth_type = nla_get_be16(a[OVS_KEY_ATTR_ETHERTYPE]);
+
+ if (eth_type == htons(0xffff)) {
+ mask_attrs &= ~(1 << OVS_KEY_ATTR_ETHERTYPE);
+ encap = a[OVS_KEY_ATTR_ENCAP];
+ err = parse_flow_mask_nlattrs(encap, a, &mask_attrs);
+ } else {
+ OVS_NLERR("VLAN frames must have an exact match on the TPID (mask=%x).\n",
+ ntohs(eth_type));
+ return -EINVAL;
+ }
+
+ if (a[OVS_KEY_ATTR_VLAN])
+ tci = nla_get_be16(a[OVS_KEY_ATTR_VLAN]);
+
+ if (!(tci & htons(VLAN_TAG_PRESENT))) {
+ OVS_NLERR("VLAN tag present bit must have an exact match (tci_mask=%x).\n", ntohs(tci));
+ return -EINVAL;
+ }
+ }
+
+ err = ovs_key_from_nlattrs(match, mask_attrs, a, true);
+ if (err)
+ return err;
+ } else {
+ /* Populate exact match flow's key mask. */
+ if (match->mask)
+ ovs_sw_flow_mask_set(match->mask, &match->range, 0xff);
}
- if (attrs)
+ if (!ovs_match_validate(match, key_attrs, mask_attrs))
return -EINVAL;
- *key_lenp = key_len;
return 0;
}
@@ -1255,7 +1734,6 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
/**
* ovs_flow_metadata_from_nlattrs - parses Netlink attributes into a flow key.
* @flow: Receives extracted in_port, priority, tun_key and skb_mark.
- * @key_len: Length of key in @flow. Used for calculating flow hash.
* @attr: Netlink attribute holding nested %OVS_KEY_ATTR_* Netlink attribute
* sequence.
*
@@ -1264,102 +1742,100 @@ int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
* get the metadata, that is, the parts of the flow key that cannot be
* extracted from the packet itself.
*/
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
- const struct nlattr *attr)
+
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
+ const struct nlattr *attr)
{
struct ovs_key_ipv4_tunnel *tun_key = &flow->key.tun_key;
- const struct nlattr *nla;
- int rem;
+ const struct nlattr *a[OVS_KEY_ATTR_MAX + 1];
+ u64 attrs = 0;
+ int err;
+ struct sw_flow_match match;
flow->key.phy.in_port = DP_MAX_PORTS;
flow->key.phy.priority = 0;
flow->key.phy.skb_mark = 0;
memset(tun_key, 0, sizeof(flow->key.tun_key));
- nla_for_each_nested(nla, attr, rem) {
- int type = nla_type(nla);
-
- if (type <= OVS_KEY_ATTR_MAX && ovs_key_lens[type] > 0) {
- int err;
-
- if (nla_len(nla) != ovs_key_lens[type])
- return -EINVAL;
-
- switch (type) {
- case OVS_KEY_ATTR_PRIORITY:
- flow->key.phy.priority = nla_get_u32(nla);
- break;
-
- case OVS_KEY_ATTR_TUNNEL:
- err = ovs_ipv4_tun_from_nlattr(nla, tun_key);
- if (err)
- return err;
- break;
-
- case OVS_KEY_ATTR_IN_PORT:
- if (nla_get_u32(nla) >= DP_MAX_PORTS)
- return -EINVAL;
- flow->key.phy.in_port = nla_get_u32(nla);
- break;
-
- case OVS_KEY_ATTR_SKB_MARK:
- flow->key.phy.skb_mark = nla_get_u32(nla);
- break;
- }
- }
- }
- if (rem)
+ err = parse_flow_nlattrs(attr, a, &attrs);
+ if (err)
return -EINVAL;
- flow->hash = ovs_flow_hash(&flow->key,
- flow_key_start(&flow->key), key_len);
+ memset(&match, 0, sizeof(match));
+ match.key = &flow->key;
+
+ err = metadata_from_nlattrs(&match, &attrs, a, false);
+ if (err)
+ return err;
return 0;
}
-int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
+int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey,
+ const struct sw_flow_key *output, struct sk_buff *skb)
{
struct ovs_key_ethernet *eth_key;
struct nlattr *nla, *encap;
+ bool is_mask = (swkey != output);
- if (swkey->phy.priority &&
- nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, swkey->phy.priority))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_PRIORITY, output->phy.priority))
goto nla_put_failure;
- if (swkey->tun_key.ipv4_dst &&
- ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key))
+ if ((swkey->tun_key.ipv4_dst || is_mask) &&
+ ovs_ipv4_tun_to_nlattr(skb, &swkey->tun_key, &output->tun_key))
goto nla_put_failure;
- if (swkey->phy.in_port != DP_MAX_PORTS &&
- nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, swkey->phy.in_port))
- goto nla_put_failure;
+ if (swkey->phy.in_port == DP_MAX_PORTS) {
+ if (is_mask && (output->phy.in_port == 0xffff))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT, 0xffffffff))
+ goto nla_put_failure;
+ } else {
+ u16 upper_u16;
+ upper_u16 = !is_mask ? 0 : 0xffff;
- if (swkey->phy.skb_mark &&
- nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, swkey->phy.skb_mark))
+ if (nla_put_u32(skb, OVS_KEY_ATTR_IN_PORT,
+ (upper_u16 << 16) | output->phy.in_port))
+ goto nla_put_failure;
+ }
+
+ if (nla_put_u32(skb, OVS_KEY_ATTR_SKB_MARK, output->phy.skb_mark))
goto nla_put_failure;
nla = nla_reserve(skb, OVS_KEY_ATTR_ETHERNET, sizeof(*eth_key));
if (!nla)
goto nla_put_failure;
+
eth_key = nla_data(nla);
- memcpy(eth_key->eth_src, swkey->eth.src, ETH_ALEN);
- memcpy(eth_key->eth_dst, swkey->eth.dst, ETH_ALEN);
+ memcpy(eth_key->eth_src, output->eth.src, ETH_ALEN);
+ memcpy(eth_key->eth_dst, output->eth.dst, ETH_ALEN);
if (swkey->eth.tci || swkey->eth.type == htons(ETH_P_8021Q)) {
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, htons(ETH_P_8021Q)) ||
- nla_put_be16(skb, OVS_KEY_ATTR_VLAN, swkey->eth.tci))
+ __be16 eth_type;
+ eth_type = !is_mask ? htons(ETH_P_8021Q) : htons(0xffff);
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, eth_type) ||
+ nla_put_be16(skb, OVS_KEY_ATTR_VLAN, output->eth.tci))
goto nla_put_failure;
encap = nla_nest_start(skb, OVS_KEY_ATTR_ENCAP);
if (!swkey->eth.tci)
goto unencap;
- } else {
+ } else
encap = NULL;
- }
- if (swkey->eth.type == htons(ETH_P_802_2))
+ if (swkey->eth.type == htons(ETH_P_802_2)) {
+ /*
+ * Ethertype 802.2 is represented in the netlink with omitted
+ * OVS_KEY_ATTR_ETHERTYPE in the flow key attribute, and
+ * 0xffff in the mask attribute. Ethertype can also
+ * be wildcarded.
+ */
+ if (is_mask && output->eth.type)
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE,
+ output->eth.type))
+ goto nla_put_failure;
goto unencap;
+ }
- if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, swkey->eth.type))
+ if (nla_put_be16(skb, OVS_KEY_ATTR_ETHERTYPE, output->eth.type))
goto nla_put_failure;
if (swkey->eth.type == htons(ETH_P_IP)) {
@@ -1369,12 +1845,12 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
ipv4_key = nla_data(nla);
- ipv4_key->ipv4_src = swkey->ipv4.addr.src;
- ipv4_key->ipv4_dst = swkey->ipv4.addr.dst;
- ipv4_key->ipv4_proto = swkey->ip.proto;
- ipv4_key->ipv4_tos = swkey->ip.tos;
- ipv4_key->ipv4_ttl = swkey->ip.ttl;
- ipv4_key->ipv4_frag = swkey->ip.frag;
+ ipv4_key->ipv4_src = output->ipv4.addr.src;
+ ipv4_key->ipv4_dst = output->ipv4.addr.dst;
+ ipv4_key->ipv4_proto = output->ip.proto;
+ ipv4_key->ipv4_tos = output->ip.tos;
+ ipv4_key->ipv4_ttl = output->ip.ttl;
+ ipv4_key->ipv4_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
struct ovs_key_ipv6 *ipv6_key;
@@ -1382,15 +1858,15 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
ipv6_key = nla_data(nla);
- memcpy(ipv6_key->ipv6_src, &swkey->ipv6.addr.src,
+ memcpy(ipv6_key->ipv6_src, &output->ipv6.addr.src,
sizeof(ipv6_key->ipv6_src));
- memcpy(ipv6_key->ipv6_dst, &swkey->ipv6.addr.dst,
+ memcpy(ipv6_key->ipv6_dst, &output->ipv6.addr.dst,
sizeof(ipv6_key->ipv6_dst));
- ipv6_key->ipv6_label = swkey->ipv6.label;
- ipv6_key->ipv6_proto = swkey->ip.proto;
- ipv6_key->ipv6_tclass = swkey->ip.tos;
- ipv6_key->ipv6_hlimit = swkey->ip.ttl;
- ipv6_key->ipv6_frag = swkey->ip.frag;
+ ipv6_key->ipv6_label = output->ipv6.label;
+ ipv6_key->ipv6_proto = output->ip.proto;
+ ipv6_key->ipv6_tclass = output->ip.tos;
+ ipv6_key->ipv6_hlimit = output->ip.ttl;
+ ipv6_key->ipv6_frag = output->ip.frag;
} else if (swkey->eth.type == htons(ETH_P_ARP) ||
swkey->eth.type == htons(ETH_P_RARP)) {
struct ovs_key_arp *arp_key;
@@ -1400,11 +1876,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
arp_key = nla_data(nla);
memset(arp_key, 0, sizeof(struct ovs_key_arp));
- arp_key->arp_sip = swkey->ipv4.addr.src;
- arp_key->arp_tip = swkey->ipv4.addr.dst;
- arp_key->arp_op = htons(swkey->ip.proto);
- memcpy(arp_key->arp_sha, swkey->ipv4.arp.sha, ETH_ALEN);
- memcpy(arp_key->arp_tha, swkey->ipv4.arp.tha, ETH_ALEN);
+ arp_key->arp_sip = output->ipv4.addr.src;
+ arp_key->arp_tip = output->ipv4.addr.dst;
+ arp_key->arp_op = htons(output->ip.proto);
+ memcpy(arp_key->arp_sha, output->ipv4.arp.sha, ETH_ALEN);
+ memcpy(arp_key->arp_tha, output->ipv4.arp.tha, ETH_ALEN);
}
if ((swkey->eth.type == htons(ETH_P_IP) ||
@@ -1419,11 +1895,11 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
tcp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
- tcp_key->tcp_src = swkey->ipv4.tp.src;
- tcp_key->tcp_dst = swkey->ipv4.tp.dst;
+ tcp_key->tcp_src = output->ipv4.tp.src;
+ tcp_key->tcp_dst = output->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- tcp_key->tcp_src = swkey->ipv6.tp.src;
- tcp_key->tcp_dst = swkey->ipv6.tp.dst;
+ tcp_key->tcp_src = output->ipv6.tp.src;
+ tcp_key->tcp_dst = output->ipv6.tp.dst;
}
} else if (swkey->ip.proto == IPPROTO_UDP) {
struct ovs_key_udp *udp_key;
@@ -1433,11 +1909,25 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
goto nla_put_failure;
udp_key = nla_data(nla);
if (swkey->eth.type == htons(ETH_P_IP)) {
- udp_key->udp_src = swkey->ipv4.tp.src;
- udp_key->udp_dst = swkey->ipv4.tp.dst;
+ udp_key->udp_src = output->ipv4.tp.src;
+ udp_key->udp_dst = output->ipv4.tp.dst;
+ } else if (swkey->eth.type == htons(ETH_P_IPV6)) {
+ udp_key->udp_src = output->ipv6.tp.src;
+ udp_key->udp_dst = output->ipv6.tp.dst;
+ }
+ } else if (swkey->ip.proto == IPPROTO_SCTP) {
+ struct ovs_key_sctp *sctp_key;
+
+ nla = nla_reserve(skb, OVS_KEY_ATTR_SCTP, sizeof(*sctp_key));
+ if (!nla)
+ goto nla_put_failure;
+ sctp_key = nla_data(nla);
+ if (swkey->eth.type == htons(ETH_P_IP)) {
+ sctp_key->sctp_src = swkey->ipv4.tp.src;
+ sctp_key->sctp_dst = swkey->ipv4.tp.dst;
} else if (swkey->eth.type == htons(ETH_P_IPV6)) {
- udp_key->udp_src = swkey->ipv6.tp.src;
- udp_key->udp_dst = swkey->ipv6.tp.dst;
+ sctp_key->sctp_src = swkey->ipv6.tp.src;
+ sctp_key->sctp_dst = swkey->ipv6.tp.dst;
}
} else if (swkey->eth.type == htons(ETH_P_IP) &&
swkey->ip.proto == IPPROTO_ICMP) {
@@ -1447,8 +1937,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
icmp_key = nla_data(nla);
- icmp_key->icmp_type = ntohs(swkey->ipv4.tp.src);
- icmp_key->icmp_code = ntohs(swkey->ipv4.tp.dst);
+ icmp_key->icmp_type = ntohs(output->ipv4.tp.src);
+ icmp_key->icmp_code = ntohs(output->ipv4.tp.dst);
} else if (swkey->eth.type == htons(ETH_P_IPV6) &&
swkey->ip.proto == IPPROTO_ICMPV6) {
struct ovs_key_icmpv6 *icmpv6_key;
@@ -1458,8 +1948,8 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
icmpv6_key = nla_data(nla);
- icmpv6_key->icmpv6_type = ntohs(swkey->ipv6.tp.src);
- icmpv6_key->icmpv6_code = ntohs(swkey->ipv6.tp.dst);
+ icmpv6_key->icmpv6_type = ntohs(output->ipv6.tp.src);
+ icmpv6_key->icmpv6_code = ntohs(output->ipv6.tp.dst);
if (icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_SOLICITATION ||
icmpv6_key->icmpv6_type == NDISC_NEIGHBOUR_ADVERTISEMENT) {
@@ -1469,10 +1959,10 @@ int ovs_flow_to_nlattrs(const struct sw_flow_key *swkey, struct sk_buff *skb)
if (!nla)
goto nla_put_failure;
nd_key = nla_data(nla);
- memcpy(nd_key->nd_target, &swkey->ipv6.nd.target,
+ memcpy(nd_key->nd_target, &output->ipv6.nd.target,
sizeof(nd_key->nd_target));
- memcpy(nd_key->nd_sll, swkey->ipv6.nd.sll, ETH_ALEN);
- memcpy(nd_key->nd_tll, swkey->ipv6.nd.tll, ETH_ALEN);
+ memcpy(nd_key->nd_sll, output->ipv6.nd.sll, ETH_ALEN);
+ memcpy(nd_key->nd_tll, output->ipv6.nd.tll, ETH_ALEN);
}
}
}
@@ -1491,6 +1981,9 @@ nla_put_failure:
* Returns zero if successful or a negative error code. */
int ovs_flow_init(void)
{
+ BUILD_BUG_ON(__alignof__(struct sw_flow_key) % __alignof__(long));
+ BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
+
flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow), 0,
0, NULL);
if (flow_cache == NULL)
@@ -1504,3 +1997,84 @@ void ovs_flow_exit(void)
{
kmem_cache_destroy(flow_cache);
}
+
+struct sw_flow_mask *ovs_sw_flow_mask_alloc(void)
+{
+ struct sw_flow_mask *mask;
+
+ mask = kmalloc(sizeof(*mask), GFP_KERNEL);
+ if (mask)
+ mask->ref_count = 0;
+
+ return mask;
+}
+
+void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *mask)
+{
+ mask->ref_count++;
+}
+
+void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
+{
+ if (!mask)
+ return;
+
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ list_del_rcu(&mask->list);
+ if (deferred)
+ kfree_rcu(mask, rcu);
+ else
+ kfree(mask);
+ }
+}
+
+static bool ovs_sw_flow_mask_equal(const struct sw_flow_mask *a,
+ const struct sw_flow_mask *b)
+{
+ u8 *a_ = (u8 *)&a->key + a->range.start;
+ u8 *b_ = (u8 *)&b->key + b->range.start;
+
+ return (a->range.end == b->range.end)
+ && (a->range.start == b->range.start)
+ && (memcmp(a_, b_, range_n_bytes(&a->range)) == 0);
+}
+
+struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *tbl,
+ const struct sw_flow_mask *mask)
+{
+ struct list_head *ml;
+
+ list_for_each(ml, tbl->mask_list) {
+ struct sw_flow_mask *m;
+ m = container_of(ml, struct sw_flow_mask, list);
+ if (ovs_sw_flow_mask_equal(mask, m))
+ return m;
+ }
+
+ return NULL;
+}
+
+/**
+ * add a new mask into the mask list.
+ * The caller needs to make sure that 'mask' is not the same
+ * as any masks that are already on the list.
+ */
+void ovs_sw_flow_mask_insert(struct flow_table *tbl, struct sw_flow_mask *mask)
+{
+ list_add_rcu(&mask->list, tbl->mask_list);
+}
+
+/**
+ * Set 'range' fields in the mask to the value of 'val'.
+ */
+static void ovs_sw_flow_mask_set(struct sw_flow_mask *mask,
+ struct sw_flow_key_range *range, u8 val)
+{
+ u8 *m = (u8 *)&mask->key + range->start;
+
+ mask->range = *range;
+ memset(m, val, range_n_bytes(range));
+}
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index 66ef7220293..212fbf7510c 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2007-2011 Nicira, Inc.
+ * Copyright (c) 2007-2013 Nicira, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
@@ -33,6 +33,8 @@
#include <net/inet_ecn.h>
struct sk_buff;
+struct sw_flow_mask;
+struct flow_table;
struct sw_flow_actions {
struct rcu_head rcu;
@@ -97,8 +99,8 @@ struct sw_flow_key {
} addr;
union {
struct {
- __be16 src; /* TCP/UDP source port. */
- __be16 dst; /* TCP/UDP destination port. */
+ __be16 src; /* TCP/UDP/SCTP source port. */
+ __be16 dst; /* TCP/UDP/SCTP destination port. */
} tp;
struct {
u8 sha[ETH_ALEN]; /* ARP source hardware address. */
@@ -113,8 +115,8 @@ struct sw_flow_key {
} addr;
__be32 label; /* IPv6 flow label. */
struct {
- __be16 src; /* TCP/UDP source port. */
- __be16 dst; /* TCP/UDP destination port. */
+ __be16 src; /* TCP/UDP/SCTP source port. */
+ __be16 dst; /* TCP/UDP/SCTP destination port. */
} tp;
struct {
struct in6_addr target; /* ND target address. */
@@ -123,7 +125,7 @@ struct sw_flow_key {
} nd;
} ipv6;
};
-};
+} __aligned(BITS_PER_LONG/8); /* Ensure that we can do comparisons as longs. */
struct sw_flow {
struct rcu_head rcu;
@@ -131,6 +133,8 @@ struct sw_flow {
u32 hash;
struct sw_flow_key key;
+ struct sw_flow_key unmasked_key;
+ struct sw_flow_mask *mask;
struct sw_flow_actions __rcu *sf_acts;
spinlock_t lock; /* Lock for values below. */
@@ -140,6 +144,20 @@ struct sw_flow {
u8 tcp_flags; /* Union of seen TCP flags. */
};
+struct sw_flow_key_range {
+ size_t start;
+ size_t end;
+};
+
+struct sw_flow_match {
+ struct sw_flow_key *key;
+ struct sw_flow_key_range range;
+ struct sw_flow_mask *mask;
+};
+
+void ovs_match_init(struct sw_flow_match *match,
+ struct sw_flow_key *key, struct sw_flow_mask *mask);
+
struct arp_eth_header {
__be16 ar_hrd; /* format of hardware address */
__be16 ar_pro; /* format of protocol address */
@@ -159,21 +177,21 @@ void ovs_flow_exit(void);
struct sw_flow *ovs_flow_alloc(void);
void ovs_flow_deferred_free(struct sw_flow *);
-void ovs_flow_free(struct sw_flow *flow);
+void ovs_flow_free(struct sw_flow *, bool deferred);
struct sw_flow_actions *ovs_flow_actions_alloc(int actions_len);
void ovs_flow_deferred_free_acts(struct sw_flow_actions *);
-int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *,
- int *key_lenp);
+int ovs_flow_extract(struct sk_buff *, u16 in_port, struct sw_flow_key *);
void ovs_flow_used(struct sw_flow *, struct sk_buff *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
-
-int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *);
-int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp,
+int ovs_flow_to_nlattrs(const struct sw_flow_key *,
+ const struct sw_flow_key *, struct sk_buff *);
+int ovs_match_from_nlattrs(struct sw_flow_match *match,
+ const struct nlattr *,
const struct nlattr *);
-int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow, int key_len,
- const struct nlattr *attr);
+int ovs_flow_metadata_from_nlattrs(struct sw_flow *flow,
+ const struct nlattr *attr);
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
#define TBL_MIN_BUCKETS 1024
@@ -182,6 +200,7 @@ struct flow_table {
struct flex_array *buckets;
unsigned int count, n_buckets;
struct rcu_head rcu;
+ struct list_head *mask_list;
int node_ver;
u32 hash_seed;
bool keep_flows;
@@ -197,22 +216,44 @@ static inline int ovs_flow_tbl_need_to_expand(struct flow_table *table)
return (table->count > table->n_buckets);
}
-struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *table,
- struct sw_flow_key *key, int len);
-void ovs_flow_tbl_destroy(struct flow_table *table);
-void ovs_flow_tbl_deferred_destroy(struct flow_table *table);
+struct sw_flow *ovs_flow_lookup(struct flow_table *,
+ const struct sw_flow_key *);
+struct sw_flow *ovs_flow_lookup_unmasked_key(struct flow_table *table,
+ struct sw_flow_match *match);
+
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
struct flow_table *ovs_flow_tbl_alloc(int new_size);
struct flow_table *ovs_flow_tbl_expand(struct flow_table *table);
struct flow_table *ovs_flow_tbl_rehash(struct flow_table *table);
-void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
- struct sw_flow_key *key, int key_len);
-void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow);
-struct sw_flow *ovs_flow_tbl_next(struct flow_table *table, u32 *bucket, u32 *idx);
+void ovs_flow_insert(struct flow_table *table, struct sw_flow *flow);
+void ovs_flow_remove(struct flow_table *table, struct sw_flow *flow);
+
+struct sw_flow *ovs_flow_dump_next(struct flow_table *table, u32 *bucket, u32 *idx);
extern const int ovs_key_lens[OVS_KEY_ATTR_MAX + 1];
int ovs_ipv4_tun_from_nlattr(const struct nlattr *attr,
- struct ovs_key_ipv4_tunnel *tun_key);
+ struct sw_flow_match *match, bool is_mask);
int ovs_ipv4_tun_to_nlattr(struct sk_buff *skb,
- const struct ovs_key_ipv4_tunnel *tun_key);
+ const struct ovs_key_ipv4_tunnel *tun_key,
+ const struct ovs_key_ipv4_tunnel *output);
+
+bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
+ const struct sw_flow_key *key, int key_end);
+
+struct sw_flow_mask {
+ int ref_count;
+ struct rcu_head rcu;
+ struct list_head list;
+ struct sw_flow_key_range range;
+ struct sw_flow_key key;
+};
+struct sw_flow_mask *ovs_sw_flow_mask_alloc(void);
+void ovs_sw_flow_mask_add_ref(struct sw_flow_mask *);
+void ovs_sw_flow_mask_del_ref(struct sw_flow_mask *, bool deferred);
+void ovs_sw_flow_mask_insert(struct flow_table *, struct sw_flow_mask *);
+struct sw_flow_mask *ovs_sw_flow_mask_find(const struct flow_table *,
+ const struct sw_flow_mask *);
+void ovs_flow_key_mask(struct sw_flow_key *dst, const struct sw_flow_key *src,
+ const struct sw_flow_mask *mask);
#endif /* flow.h */
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 493e9775dcd..c99dea543d6 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -16,7 +16,6 @@
* 02110-1301, USA
*/
-#ifdef CONFIG_OPENVSWITCH_GRE
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
@@ -177,10 +176,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
skb->local_df = 1;
- return iptunnel_xmit(net, rt, skb, fl.saddr,
+ return iptunnel_xmit(rt, skb, fl.saddr,
OVS_CB(skb)->tun_key->ipv4_dst, IPPROTO_GRE,
OVS_CB(skb)->tun_key->ipv4_tos,
- OVS_CB(skb)->tun_key->ipv4_ttl, df);
+ OVS_CB(skb)->tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
error:
@@ -271,5 +270,3 @@ const struct vport_ops ovs_gre_vport_ops = {
.get_name = gre_get_name,
.send = gre_tnl_send,
};
-
-#endif /* OPENVSWITCH_GRE */
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 5982f3f6283..09d93c13cfd 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -25,6 +25,7 @@
#include <linux/llc.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
+#include <linux/openvswitch.h>
#include <net/llc.h>
@@ -74,6 +75,15 @@ static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
return RX_HANDLER_CONSUMED;
}
+static struct net_device *get_dpdev(struct datapath *dp)
+{
+ struct vport *local;
+
+ local = ovs_vport_ovsl(dp, OVSP_LOCAL);
+ BUG_ON(!local);
+ return netdev_vport_priv(local)->dev;
+}
+
static struct vport *netdev_create(const struct vport_parms *parms)
{
struct vport *vport;
@@ -103,10 +113,15 @@ static struct vport *netdev_create(const struct vport_parms *parms)
}
rtnl_lock();
+ err = netdev_master_upper_dev_link(netdev_vport->dev,
+ get_dpdev(vport->dp));
+ if (err)
+ goto error_unlock;
+
err = netdev_rx_handler_register(netdev_vport->dev, netdev_frame_hook,
vport);
if (err)
- goto error_unlock;
+ goto error_master_upper_dev_unlink;
dev_set_promiscuity(netdev_vport->dev, 1);
netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
@@ -114,6 +129,8 @@ static struct vport *netdev_create(const struct vport_parms *parms)
return vport;
+error_master_upper_dev_unlink:
+ netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
error_unlock:
rtnl_unlock();
error_put:
@@ -140,6 +157,7 @@ static void netdev_destroy(struct vport *vport)
rtnl_lock();
netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
netdev_rx_handler_unregister(netdev_vport->dev);
+ netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp));
dev_set_promiscuity(netdev_vport->dev, -1);
rtnl_unlock();
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
new file mode 100644
index 00000000000..a481c03e286
--- /dev/null
+++ b/net/openvswitch/vport-vxlan.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2013 Nicira, Inc.
+ * Copyright (c) 2013 Cisco Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of version 2 of the GNU General Public
+ * License as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301, USA
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/net.h>
+#include <linux/rculist.h>
+#include <linux/udp.h>
+
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/ip_tunnels.h>
+#include <net/udp.h>
+#include <net/rtnetlink.h>
+#include <net/route.h>
+#include <net/dsfield.h>
+#include <net/inet_ecn.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
+#include <net/vxlan.h>
+
+#include "datapath.h"
+#include "vport.h"
+
+/**
+ * struct vxlan_port - Keeps track of open UDP ports
+ * @vs: vxlan_sock created for the port.
+ * @name: vport name.
+ */
+struct vxlan_port {
+ struct vxlan_sock *vs;
+ char name[IFNAMSIZ];
+};
+
+static inline struct vxlan_port *vxlan_vport(const struct vport *vport)
+{
+ return vport_priv(vport);
+}
+
+/* Called with rcu_read_lock and BH disabled. */
+static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb, __be32 vx_vni)
+{
+ struct ovs_key_ipv4_tunnel tun_key;
+ struct vport *vport = vs->data;
+ struct iphdr *iph;
+ __be64 key;
+
+ /* Save outer tunnel values */
+ iph = ip_hdr(skb);
+ key = cpu_to_be64(ntohl(vx_vni) >> 8);
+ ovs_flow_tun_key_init(&tun_key, iph, key, TUNNEL_KEY);
+
+ ovs_vport_receive(vport, skb, &tun_key);
+}
+
+static int vxlan_get_options(const struct vport *vport, struct sk_buff *skb)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+
+ if (nla_put_u16(skb, OVS_TUNNEL_ATTR_DST_PORT, ntohs(dst_port)))
+ return -EMSGSIZE;
+ return 0;
+}
+
+static void vxlan_tnl_destroy(struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+
+ vxlan_sock_release(vxlan_port->vs);
+
+ ovs_vport_deferred_free(vport);
+}
+
+static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
+{
+ struct net *net = ovs_dp_get_net(parms->dp);
+ struct nlattr *options = parms->options;
+ struct vxlan_port *vxlan_port;
+ struct vxlan_sock *vs;
+ struct vport *vport;
+ struct nlattr *a;
+ u16 dst_port;
+ int err;
+
+ if (!options) {
+ err = -EINVAL;
+ goto error;
+ }
+ a = nla_find_nested(options, OVS_TUNNEL_ATTR_DST_PORT);
+ if (a && nla_len(a) == sizeof(u16)) {
+ dst_port = nla_get_u16(a);
+ } else {
+ /* Require destination port from userspace. */
+ err = -EINVAL;
+ goto error;
+ }
+
+ vport = ovs_vport_alloc(sizeof(struct vxlan_port),
+ &ovs_vxlan_vport_ops, parms);
+ if (IS_ERR(vport))
+ return vport;
+
+ vxlan_port = vxlan_vport(vport);
+ strncpy(vxlan_port->name, parms->name, IFNAMSIZ);
+
+ vs = vxlan_sock_add(net, htons(dst_port), vxlan_rcv, vport, true, false);
+ if (IS_ERR(vs)) {
+ ovs_vport_free(vport);
+ return (void *)vs;
+ }
+ vxlan_port->vs = vs;
+
+ return vport;
+
+error:
+ return ERR_PTR(err);
+}
+
+static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
+{
+ struct net *net = ovs_dp_get_net(vport->dp);
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ __be16 dst_port = inet_sk(vxlan_port->vs->sock->sk)->inet_sport;
+ struct rtable *rt;
+ struct flowi4 fl;
+ __be16 src_port;
+ int port_min;
+ int port_max;
+ __be16 df;
+ int err;
+
+ if (unlikely(!OVS_CB(skb)->tun_key)) {
+ err = -EINVAL;
+ goto error;
+ }
+
+ /* Route lookup */
+ memset(&fl, 0, sizeof(fl));
+ fl.daddr = OVS_CB(skb)->tun_key->ipv4_dst;
+ fl.saddr = OVS_CB(skb)->tun_key->ipv4_src;
+ fl.flowi4_tos = RT_TOS(OVS_CB(skb)->tun_key->ipv4_tos);
+ fl.flowi4_mark = skb->mark;
+ fl.flowi4_proto = IPPROTO_UDP;
+
+ rt = ip_route_output_key(net, &fl);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto error;
+ }
+
+ df = OVS_CB(skb)->tun_key->tun_flags & TUNNEL_DONT_FRAGMENT ?
+ htons(IP_DF) : 0;
+
+ skb->local_df = 1;
+
+ inet_get_local_port_range(&port_min, &port_max);
+ src_port = vxlan_src_port(port_min, port_max, skb);
+
+ err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
+ fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
+ OVS_CB(skb)->tun_key->ipv4_tos,
+ OVS_CB(skb)->tun_key->ipv4_ttl, df,
+ src_port, dst_port,
+ htonl(be64_to_cpu(OVS_CB(skb)->tun_key->tun_id) << 8));
+ if (err < 0)
+ ip_rt_put(rt);
+error:
+ return err;
+}
+
+static const char *vxlan_get_name(const struct vport *vport)
+{
+ struct vxlan_port *vxlan_port = vxlan_vport(vport);
+ return vxlan_port->name;
+}
+
+const struct vport_ops ovs_vxlan_vport_ops = {
+ .type = OVS_VPORT_TYPE_VXLAN,
+ .create = vxlan_tnl_create,
+ .destroy = vxlan_tnl_destroy,
+ .get_name = vxlan_get_name,
+ .get_options = vxlan_get_options,
+ .send = vxlan_tnl_send,
+};
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index d4c7fa04ce0..6f65dbe1381 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -42,6 +42,9 @@ static const struct vport_ops *vport_ops_list[] = {
#ifdef CONFIG_OPENVSWITCH_GRE
&ovs_gre_vport_ops,
#endif
+#ifdef CONFIG_OPENVSWITCH_VXLAN
+ &ovs_vxlan_vport_ops,
+#endif
};
/* Protected by RCU read lock for reading, ovs_mutex for writing. */
@@ -200,7 +203,7 @@ out:
* ovs_vport_set_options - modify existing vport device (for kernel callers)
*
* @vport: vport to modify.
- * @port: New configuration.
+ * @options: New configuration.
*
* Modifies an existing device with the specified configuration (which is
* dependent on device type). ovs_mutex must be held.
@@ -325,6 +328,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
*
* @vport: vport that received the packet
* @skb: skb that was received
+ * @tun_key: tunnel (if any) that carried packet
*
* Must be called with rcu_read_lock. The packet cannot be shared and
* skb->data should point to the Ethernet header.
diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h
index 376045c42f8..1a9fbcec6e1 100644
--- a/net/openvswitch/vport.h
+++ b/net/openvswitch/vport.h
@@ -199,6 +199,7 @@ void ovs_vport_record_error(struct vport *, enum vport_err_type err_type);
extern const struct vport_ops ovs_netdev_vport_ops;
extern const struct vport_ops ovs_internal_vport_ops;
extern const struct vport_ops ovs_gre_vport_ops;
+extern const struct vport_ops ovs_vxlan_vport_ops;
static inline void ovs_skb_postpush_rcsum(struct sk_buff *skb,
const void *start, unsigned int len)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 4b66c752eae..2e8286b47c2 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -88,7 +88,7 @@
#include <linux/virtio_net.h>
#include <linux/errqueue.h>
#include <linux/net_tstamp.h>
-
+#include <linux/reciprocal_div.h>
#ifdef CONFIG_INET
#include <net/inet_common.h>
#endif
@@ -1135,7 +1135,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int num)
{
- return (((u64)skb->rxhash) * num) >> 32;
+ return reciprocal_divide(skb->rxhash, num);
}
static unsigned int fanout_demux_lb(struct packet_fanout *f,
@@ -1158,6 +1158,13 @@ static unsigned int fanout_demux_cpu(struct packet_fanout *f,
return smp_processor_id() % num;
}
+static unsigned int fanout_demux_rnd(struct packet_fanout *f,
+ struct sk_buff *skb,
+ unsigned int num)
+{
+ return reciprocal_divide(prandom_u32(), num);
+}
+
static unsigned int fanout_demux_rollover(struct packet_fanout *f,
struct sk_buff *skb,
unsigned int idx, unsigned int skip,
@@ -1215,6 +1222,9 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
case PACKET_FANOUT_CPU:
idx = fanout_demux_cpu(f, skb, num);
break;
+ case PACKET_FANOUT_RND:
+ idx = fanout_demux_rnd(f, skb, num);
+ break;
case PACKET_FANOUT_ROLLOVER:
idx = fanout_demux_rollover(f, skb, 0, (unsigned int) -1, num);
break;
@@ -1284,6 +1294,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
case PACKET_FANOUT_HASH:
case PACKET_FANOUT_LB:
case PACKET_FANOUT_CPU:
+ case PACKET_FANOUT_RND:
break;
default:
return -EINVAL;
@@ -2181,7 +2192,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
- err);
+ err, 0);
if (!skb)
return NULL;
@@ -2638,51 +2649,6 @@ out:
return err;
}
-static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
-{
- struct sock_exterr_skb *serr;
- struct sk_buff *skb, *skb2;
- int copied, err;
-
- err = -EAGAIN;
- skb = skb_dequeue(&sk->sk_error_queue);
- if (skb == NULL)
- goto out;
-
- copied = skb->len;
- if (copied > len) {
- msg->msg_flags |= MSG_TRUNC;
- copied = len;
- }
- err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
- if (err)
- goto out_free_skb;
-
- sock_recv_timestamp(msg, sk, skb);
-
- serr = SKB_EXT_ERR(skb);
- put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
- sizeof(serr->ee), &serr->ee);
-
- msg->msg_flags |= MSG_ERRQUEUE;
- err = copied;
-
- /* Reset and regenerate socket error */
- spin_lock_bh(&sk->sk_error_queue.lock);
- sk->sk_err = 0;
- if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
- sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
- spin_unlock_bh(&sk->sk_error_queue.lock);
- sk->sk_error_report(sk);
- } else
- spin_unlock_bh(&sk->sk_error_queue.lock);
-
-out_free_skb:
- kfree_skb(skb);
-out:
- return err;
-}
-
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
@@ -2708,7 +2674,8 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
#endif
if (flags & MSG_ERRQUEUE) {
- err = packet_recv_error(sk, msg, len);
+ err = sock_recv_errqueue(sk, msg, len,
+ SOL_PACKET, PACKET_TX_TIMESTAMP);
goto out;
}
@@ -3259,9 +3226,11 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
if (po->tp_version == TPACKET_V3) {
lv = sizeof(struct tpacket_stats_v3);
+ st.stats3.tp_packets += st.stats3.tp_drops;
data = &st.stats3;
} else {
lv = sizeof(struct tpacket_stats);
+ st.stats1.tp_packets += st.stats1.tp_drops;
data = &st.stats1;
}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index 1afd1381cdc..77e38f73349 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -793,7 +793,7 @@ static int pn_res_seq_show(struct seq_file *seq, void *v)
struct sock **psk = v;
struct sock *sk = *psk;
- seq_printf(seq, "%02X %5d %lu%n",
+ seq_printf(seq, "%02X %5u %lu%n",
(int) (psk - pnres.sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk), &len);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 1cec5e4f3a5..1bacc107994 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -576,14 +576,14 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
}
EXPORT_SYMBOL(rfkill_set_states);
-static ssize_t rfkill_name_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill->name);
}
+static DEVICE_ATTR_RO(name);
static const char *rfkill_get_type_str(enum rfkill_type type)
{
@@ -611,54 +611,52 @@ static const char *rfkill_get_type_str(enum rfkill_type type)
}
}
-static ssize_t rfkill_type_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type));
}
+static DEVICE_ATTR_RO(type);
-static ssize_t rfkill_idx_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t index_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%d\n", rfkill->idx);
}
+static DEVICE_ATTR_RO(index);
-static ssize_t rfkill_persistent_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t persistent_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%d\n", rfkill->persistent);
}
+static DEVICE_ATTR_RO(persistent);
-static ssize_t rfkill_hard_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t hard_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
}
+static DEVICE_ATTR_RO(hard);
-static ssize_t rfkill_soft_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
}
-static ssize_t rfkill_soft_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long state;
@@ -680,6 +678,7 @@ static ssize_t rfkill_soft_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(soft);
static u8 user_state_from_blocked(unsigned long state)
{
@@ -691,18 +690,16 @@ static u8 user_state_from_blocked(unsigned long state)
return RFKILL_USER_STATE_UNBLOCKED;
}
-static ssize_t rfkill_state_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct rfkill *rfkill = to_rfkill(dev);
return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
}
-static ssize_t rfkill_state_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static ssize_t state_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct rfkill *rfkill = to_rfkill(dev);
unsigned long state;
@@ -725,32 +722,27 @@ static ssize_t rfkill_state_store(struct device *dev,
return count;
}
+static DEVICE_ATTR_RW(state);
-static ssize_t rfkill_claim_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static ssize_t claim_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
return sprintf(buf, "%d\n", 0);
}
-
-static ssize_t rfkill_claim_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- return -EOPNOTSUPP;
-}
-
-static struct device_attribute rfkill_dev_attrs[] = {
- __ATTR(name, S_IRUGO, rfkill_name_show, NULL),
- __ATTR(type, S_IRUGO, rfkill_type_show, NULL),
- __ATTR(index, S_IRUGO, rfkill_idx_show, NULL),
- __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
- __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
- __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
- __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
- __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
- __ATTR_NULL
+static DEVICE_ATTR_RO(claim);
+
+static struct attribute *rfkill_dev_attrs[] = {
+ &dev_attr_name.attr,
+ &dev_attr_type.attr,
+ &dev_attr_index.attr,
+ &dev_attr_persistent.attr,
+ &dev_attr_state.attr,
+ &dev_attr_claim.attr,
+ &dev_attr_soft.attr,
+ &dev_attr_hard.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(rfkill_dev);
static void rfkill_release(struct device *dev)
{
@@ -830,7 +822,7 @@ static int rfkill_resume(struct device *dev)
static struct class rfkill_class = {
.name = "rfkill",
.dev_release = rfkill_release,
- .dev_attrs = rfkill_dev_attrs,
+ .dev_groups = rfkill_dev_groups,
.dev_uevent = rfkill_dev_uevent,
.suspend = rfkill_suspend,
.resume = rfkill_resume,
diff --git a/net/rfkill/rfkill-regulator.c b/net/rfkill/rfkill-regulator.c
index d11ac79246e..cf5b145902e 100644
--- a/net/rfkill/rfkill-regulator.c
+++ b/net/rfkill/rfkill-regulator.c
@@ -30,6 +30,7 @@ struct rfkill_regulator_data {
static int rfkill_regulator_set_block(void *data, bool blocked)
{
struct rfkill_regulator_data *rfkill_data = data;
+ int ret = 0;
pr_debug("%s: blocked: %d\n", __func__, blocked);
@@ -40,15 +41,16 @@ static int rfkill_regulator_set_block(void *data, bool blocked)
}
} else {
if (!rfkill_data->reg_enabled) {
- regulator_enable(rfkill_data->vcc);
- rfkill_data->reg_enabled = true;
+ ret = regulator_enable(rfkill_data->vcc);
+ if (!ret)
+ rfkill_data->reg_enabled = true;
}
}
pr_debug("%s: regulator_is_enabled after set_block: %d\n", __func__,
regulator_is_enabled(rfkill_data->vcc));
- return 0;
+ return ret;
}
static struct rfkill_ops rfkill_regulator_ops = {
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 235e01acac5..c03a32a0418 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -272,6 +272,20 @@ config NET_SCH_FQ_CODEL
If unsure, say N.
+config NET_SCH_FQ
+ tristate "Fair Queue"
+ help
+ Say Y here if you want to use the FQ packet scheduling algorithm.
+
+ FQ does flow separation, and is able to respect pacing requirements
+ set by TCP stack into sk->sk_pacing_rate (for localy generated
+ traffic)
+
+ To compile this driver as a module, choose M here: the module
+ will be called sch_fq.
+
+ If unsure, say N.
+
config NET_SCH_INGRESS
tristate "Ingress Qdisc"
depends on NET_CLS_ACT
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 978cbf004e8..e5f9abe9a5d 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -39,6 +39,7 @@ obj-$(CONFIG_NET_SCH_CHOKE) += sch_choke.o
obj-$(CONFIG_NET_SCH_QFQ) += sch_qfq.o
obj-$(CONFIG_NET_SCH_CODEL) += sch_codel.o
obj-$(CONFIG_NET_SCH_FQ_CODEL) += sch_fq_codel.o
+obj-$(CONFIG_NET_SCH_FQ) += sch_fq.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 3a294eb98d6..867b4a3e398 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -23,19 +23,18 @@
#include <net/sock.h>
#include <net/cls_cgroup.h>
-static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
+static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state *css)
{
- return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
- struct cgroup_cls_state, css);
+ return css ? container_of(css, struct cgroup_cls_state, css) : NULL;
}
static inline struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{
- return container_of(task_subsys_state(p, net_cls_subsys_id),
- struct cgroup_cls_state, css);
+ return css_cls_state(task_css(p, net_cls_subsys_id));
}
-static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
+static struct cgroup_subsys_state *
+cgrp_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct cgroup_cls_state *cs;
@@ -45,17 +44,19 @@ static struct cgroup_subsys_state *cgrp_css_alloc(struct cgroup *cgrp)
return &cs->css;
}
-static int cgrp_css_online(struct cgroup *cgrp)
+static int cgrp_css_online(struct cgroup_subsys_state *css)
{
- if (cgrp->parent)
- cgrp_cls_state(cgrp)->classid =
- cgrp_cls_state(cgrp->parent)->classid;
+ struct cgroup_cls_state *cs = css_cls_state(css);
+ struct cgroup_cls_state *parent = css_cls_state(css_parent(css));
+
+ if (parent)
+ cs->classid = parent->classid;
return 0;
}
-static void cgrp_css_free(struct cgroup *cgrp)
+static void cgrp_css_free(struct cgroup_subsys_state *css)
{
- kfree(cgrp_cls_state(cgrp));
+ kfree(css_cls_state(css));
}
static int update_classid(const void *v, struct file *file, unsigned n)
@@ -67,12 +68,13 @@ static int update_classid(const void *v, struct file *file, unsigned n)
return 0;
}
-static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+static void cgrp_attach(struct cgroup_subsys_state *css,
+ struct cgroup_taskset *tset)
{
struct task_struct *p;
void *v;
- cgroup_taskset_for_each(p, cgrp, tset) {
+ cgroup_taskset_for_each(p, css, tset) {
task_lock(p);
v = (void *)(unsigned long)task_cls_classid(p);
iterate_fd(p->files, 0, update_classid, v);
@@ -80,14 +82,15 @@ static void cgrp_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
}
}
-static u64 read_classid(struct cgroup *cgrp, struct cftype *cft)
+static u64 read_classid(struct cgroup_subsys_state *css, struct cftype *cft)
{
- return cgrp_cls_state(cgrp)->classid;
+ return css_cls_state(css)->classid;
}
-static int write_classid(struct cgroup *cgrp, struct cftype *cft, u64 value)
+static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 value)
{
- cgrp_cls_state(cgrp)->classid = (u32) value;
+ css_cls_state(css)->classid = (u32) value;
return 0;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 281c1bded1f..2adda7fa2d3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -200,6 +200,58 @@ int unregister_qdisc(struct Qdisc_ops *qops)
}
EXPORT_SYMBOL(unregister_qdisc);
+/* Get default qdisc if not otherwise specified */
+void qdisc_get_default(char *name, size_t len)
+{
+ read_lock(&qdisc_mod_lock);
+ strlcpy(name, default_qdisc_ops->id, len);
+ read_unlock(&qdisc_mod_lock);
+}
+
+static struct Qdisc_ops *qdisc_lookup_default(const char *name)
+{
+ struct Qdisc_ops *q = NULL;
+
+ for (q = qdisc_base; q; q = q->next) {
+ if (!strcmp(name, q->id)) {
+ if (!try_module_get(q->owner))
+ q = NULL;
+ break;
+ }
+ }
+
+ return q;
+}
+
+/* Set new default qdisc to use */
+int qdisc_set_default(const char *name)
+{
+ const struct Qdisc_ops *ops;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ write_lock(&qdisc_mod_lock);
+ ops = qdisc_lookup_default(name);
+ if (!ops) {
+ /* Not found, drop lock and try to load module */
+ write_unlock(&qdisc_mod_lock);
+ request_module("sch_%s", name);
+ write_lock(&qdisc_mod_lock);
+
+ ops = qdisc_lookup_default(name);
+ }
+
+ if (ops) {
+ /* Set new default */
+ module_put(default_qdisc_ops->owner);
+ default_qdisc_ops = ops;
+ }
+ write_unlock(&qdisc_mod_lock);
+
+ return ops ? 0 : -ENOENT;
+}
+
/* We know handle. Find qdisc among all qdisc's attached to device
(root qdisc, all its children, children of children etc.)
*/
@@ -285,6 +337,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
return q;
}
+/* The linklayer setting were not transferred from iproute2, in older
+ * versions, and the rate tables lookup systems have been dropped in
+ * the kernel. To keep backward compatible with older iproute2 tc
+ * utils, we detect the linklayer setting by detecting if the rate
+ * table were modified.
+ *
+ * For linklayer ATM table entries, the rate table will be aligned to
+ * 48 bytes, thus some table entries will contain the same value. The
+ * mpu (min packet unit) is also encoded into the old rate table, thus
+ * starting from the mpu, we find low and high table entries for
+ * mapping this cell. If these entries contain the same value, when
+ * the rate tables have been modified for linklayer ATM.
+ *
+ * This is done by rounding mpu to the nearest 48 bytes cell/entry,
+ * and then roundup to the next cell, calc the table entry one below,
+ * and compare.
+ */
+static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab)
+{
+ int low = roundup(r->mpu, 48);
+ int high = roundup(low+1, 48);
+ int cell_low = low >> r->cell_log;
+ int cell_high = (high >> r->cell_log) - 1;
+
+ /* rtab is too inaccurate at rates > 100Mbit/s */
+ if ((r->rate > (100000000/8)) || (rtab[0] == 0)) {
+ pr_debug("TC linklayer: Giving up ATM detection\n");
+ return TC_LINKLAYER_ETHERNET;
+ }
+
+ if ((cell_high > cell_low) && (cell_high < 256)
+ && (rtab[cell_low] == rtab[cell_high])) {
+ pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n",
+ cell_low, cell_high, rtab[cell_high]);
+ return TC_LINKLAYER_ATM;
+ }
+ return TC_LINKLAYER_ETHERNET;
+}
+
static struct qdisc_rate_table *qdisc_rtab_list;
struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab)
@@ -308,6 +399,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta
rtab->rate = *r;
rtab->refcnt = 1;
memcpy(rtab->data, nla_data(tab), 1024);
+ if (r->linklayer == TC_LINKLAYER_UNAWARE)
+ r->linklayer = __detect_linklayer(r, rtab->data);
rtab->next = qdisc_rtab_list;
qdisc_rtab_list = rtab;
}
@@ -1813,6 +1906,7 @@ static int __init pktsched_init(void)
return err;
}
+ register_qdisc(&pfifo_fast_ops);
register_qdisc(&pfifo_qdisc_ops);
register_qdisc(&bfifo_qdisc_ops);
register_qdisc(&pfifo_head_drop_qdisc_ops);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index ca8e0a57d94..1f9c31411f1 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
struct sockaddr_atmpvc pvc;
int state;
+ memset(&pvc, 0, sizeof(pvc));
pvc.sap_family = AF_ATMPVC;
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 71a56886255..7a42c81a19e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1465,6 +1465,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
unsigned char *b = skb_tail_pointer(skb);
struct tc_cbq_wrropt opt;
+ memset(&opt, 0, sizeof(opt));
opt.flags = 0;
opt.allot = cl->allot;
opt.priority = cl->priority + 1;
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
new file mode 100644
index 00000000000..32ad015ee8c
--- /dev/null
+++ b/net/sched/sch_fq.c
@@ -0,0 +1,793 @@
+/*
+ * net/sched/sch_fq.c Fair Queue Packet Scheduler (per flow pacing)
+ *
+ * Copyright (C) 2013 Eric Dumazet <edumazet@google.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Meant to be mostly used for localy generated traffic :
+ * Fast classification depends on skb->sk being set before reaching us.
+ * If not, (router workload), we use rxhash as fallback, with 32 bits wide hash.
+ * All packets belonging to a socket are considered as a 'flow'.
+ *
+ * Flows are dynamically allocated and stored in a hash table of RB trees
+ * They are also part of one Round Robin 'queues' (new or old flows)
+ *
+ * Burst avoidance (aka pacing) capability :
+ *
+ * Transport (eg TCP) can set in sk->sk_pacing_rate a rate, enqueue a
+ * bunch of packets, and this packet scheduler adds delay between
+ * packets to respect rate limitation.
+ *
+ * enqueue() :
+ * - lookup one RB tree (out of 1024 or more) to find the flow.
+ * If non existent flow, create it, add it to the tree.
+ * Add skb to the per flow list of skb (fifo).
+ * - Use a special fifo for high prio packets
+ *
+ * dequeue() : serves flows in Round Robin
+ * Note : When a flow becomes empty, we do not immediately remove it from
+ * rb trees, for performance reasons (its expected to send additional packets,
+ * or SLAB cache will reuse socket for another flow)
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/hash.h>
+#include <linux/prefetch.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+
+/*
+ * Per flow structure, dynamically allocated
+ */
+struct fq_flow {
+ struct sk_buff *head; /* list of skbs for this flow : first skb */
+ union {
+ struct sk_buff *tail; /* last skb in the list */
+ unsigned long age; /* jiffies when flow was emptied, for gc */
+ };
+ struct rb_node fq_node; /* anchor in fq_root[] trees */
+ struct sock *sk;
+ int qlen; /* number of packets in flow queue */
+ int credit;
+ u32 socket_hash; /* sk_hash */
+ struct fq_flow *next; /* next pointer in RR lists, or &detached */
+
+ struct rb_node rate_node; /* anchor in q->delayed tree */
+ u64 time_next_packet;
+};
+
+struct fq_flow_head {
+ struct fq_flow *first;
+ struct fq_flow *last;
+};
+
+struct fq_sched_data {
+ struct fq_flow_head new_flows;
+
+ struct fq_flow_head old_flows;
+
+ struct rb_root delayed; /* for rate limited flows */
+ u64 time_next_delayed_flow;
+
+ struct fq_flow internal; /* for non classified or high prio packets */
+ u32 quantum;
+ u32 initial_quantum;
+ u32 flow_default_rate;/* rate per flow : bytes per second */
+ u32 flow_max_rate; /* optional max rate per flow */
+ u32 flow_plimit; /* max packets per flow */
+ struct rb_root *fq_root;
+ u8 rate_enable;
+ u8 fq_trees_log;
+
+ u32 flows;
+ u32 inactive_flows;
+ u32 throttled_flows;
+
+ u64 stat_gc_flows;
+ u64 stat_internal_packets;
+ u64 stat_tcp_retrans;
+ u64 stat_throttled;
+ u64 stat_flows_plimit;
+ u64 stat_pkts_too_long;
+ u64 stat_allocation_errors;
+ struct qdisc_watchdog watchdog;
+};
+
+/* special value to mark a detached flow (not on old/new list) */
+static struct fq_flow detached, throttled;
+
+static void fq_flow_set_detached(struct fq_flow *f)
+{
+ f->next = &detached;
+}
+
+static bool fq_flow_is_detached(const struct fq_flow *f)
+{
+ return f->next == &detached;
+}
+
+static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+ struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
+
+ while (*p) {
+ struct fq_flow *aux;
+
+ parent = *p;
+ aux = container_of(parent, struct fq_flow, rate_node);
+ if (f->time_next_packet >= aux->time_next_packet)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+ rb_link_node(&f->rate_node, parent, p);
+ rb_insert_color(&f->rate_node, &q->delayed);
+ q->throttled_flows++;
+ q->stat_throttled++;
+
+ f->next = &throttled;
+ if (q->time_next_delayed_flow > f->time_next_packet)
+ q->time_next_delayed_flow = f->time_next_packet;
+}
+
+
+static struct kmem_cache *fq_flow_cachep __read_mostly;
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+ if (head->first)
+ head->last->next = flow;
+ else
+ head->first = flow;
+ head->last = flow;
+ flow->next = NULL;
+}
+
+/* limit number of collected flows per round */
+#define FQ_GC_MAX 8
+#define FQ_GC_AGE (3*HZ)
+
+static bool fq_gc_candidate(const struct fq_flow *f)
+{
+ return fq_flow_is_detached(f) &&
+ time_after(jiffies, f->age + FQ_GC_AGE);
+}
+
+static void fq_gc(struct fq_sched_data *q,
+ struct rb_root *root,
+ struct sock *sk)
+{
+ struct fq_flow *f, *tofree[FQ_GC_MAX];
+ struct rb_node **p, *parent;
+ int fcnt = 0;
+
+ p = &root->rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+
+ f = container_of(parent, struct fq_flow, fq_node);
+ if (f->sk == sk)
+ break;
+
+ if (fq_gc_candidate(f)) {
+ tofree[fcnt++] = f;
+ if (fcnt == FQ_GC_MAX)
+ break;
+ }
+
+ if (f->sk > sk)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ q->flows -= fcnt;
+ q->inactive_flows -= fcnt;
+ q->stat_gc_flows += fcnt;
+ while (fcnt) {
+ struct fq_flow *f = tofree[--fcnt];
+
+ rb_erase(&f->fq_node, root);
+ kmem_cache_free(fq_flow_cachep, f);
+ }
+}
+
+static const u8 prio2band[TC_PRIO_MAX + 1] = {
+ 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1
+};
+
+static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
+{
+ struct rb_node **p, *parent;
+ struct sock *sk = skb->sk;
+ struct rb_root *root;
+ struct fq_flow *f;
+ int band;
+
+ /* warning: no starvation prevention... */
+ band = prio2band[skb->priority & TC_PRIO_MAX];
+ if (unlikely(band == 0))
+ return &q->internal;
+
+ if (unlikely(!sk)) {
+ /* By forcing low order bit to 1, we make sure to not
+ * collide with a local flow (socket pointers are word aligned)
+ */
+ sk = (struct sock *)(skb_get_rxhash(skb) | 1L);
+ }
+
+ root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)];
+
+ if (q->flows >= (2U << q->fq_trees_log) &&
+ q->inactive_flows > q->flows/2)
+ fq_gc(q, root, sk);
+
+ p = &root->rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+
+ f = container_of(parent, struct fq_flow, fq_node);
+ if (f->sk == sk) {
+ /* socket might have been reallocated, so check
+ * if its sk_hash is the same.
+ * It not, we need to refill credit with
+ * initial quantum
+ */
+ if (unlikely(skb->sk &&
+ f->socket_hash != sk->sk_hash)) {
+ f->credit = q->initial_quantum;
+ f->socket_hash = sk->sk_hash;
+ }
+ return f;
+ }
+ if (f->sk > sk)
+ p = &parent->rb_right;
+ else
+ p = &parent->rb_left;
+ }
+
+ f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!f)) {
+ q->stat_allocation_errors++;
+ return &q->internal;
+ }
+ fq_flow_set_detached(f);
+ f->sk = sk;
+ if (skb->sk)
+ f->socket_hash = sk->sk_hash;
+ f->credit = q->initial_quantum;
+
+ rb_link_node(&f->fq_node, parent, p);
+ rb_insert_color(&f->fq_node, root);
+
+ q->flows++;
+ q->inactive_flows++;
+ return f;
+}
+
+
+/* remove one skb from head of flow queue */
+static struct sk_buff *fq_dequeue_head(struct fq_flow *flow)
+{
+ struct sk_buff *skb = flow->head;
+
+ if (skb) {
+ flow->head = skb->next;
+ skb->next = NULL;
+ flow->qlen--;
+ }
+ return skb;
+}
+
+/* We might add in the future detection of retransmits
+ * For the time being, just return false
+ */
+static bool skb_is_retransmit(struct sk_buff *skb)
+{
+ return false;
+}
+
+/* add skb to flow queue
+ * flow queue is a linked list, kind of FIFO, except for TCP retransmits
+ * We special case tcp retransmits to be transmitted before other packets.
+ * We rely on fact that TCP retransmits are unlikely, so we do not waste
+ * a separate queue or a pointer.
+ * head-> [retrans pkt 1]
+ * [retrans pkt 2]
+ * [ normal pkt 1]
+ * [ normal pkt 2]
+ * [ normal pkt 3]
+ * tail-> [ normal pkt 4]
+ */
+static void flow_queue_add(struct fq_flow *flow, struct sk_buff *skb)
+{
+ struct sk_buff *prev, *head = flow->head;
+
+ skb->next = NULL;
+ if (!head) {
+ flow->head = skb;
+ flow->tail = skb;
+ return;
+ }
+ if (likely(!skb_is_retransmit(skb))) {
+ flow->tail->next = skb;
+ flow->tail = skb;
+ return;
+ }
+
+ /* This skb is a tcp retransmit,
+ * find the last retrans packet in the queue
+ */
+ prev = NULL;
+ while (skb_is_retransmit(head)) {
+ prev = head;
+ head = head->next;
+ if (!head)
+ break;
+ }
+ if (!prev) { /* no rtx packet in queue, become the new head */
+ skb->next = flow->head;
+ flow->head = skb;
+ } else {
+ if (prev == flow->tail)
+ flow->tail = skb;
+ else
+ skb->next = prev->next;
+ prev->next = skb;
+ }
+}
+
+static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct fq_flow *f;
+
+ if (unlikely(sch->q.qlen >= sch->limit))
+ return qdisc_drop(skb, sch);
+
+ f = fq_classify(skb, q);
+ if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) {
+ q->stat_flows_plimit++;
+ return qdisc_drop(skb, sch);
+ }
+
+ f->qlen++;
+ flow_queue_add(f, skb);
+ if (skb_is_retransmit(skb))
+ q->stat_tcp_retrans++;
+ sch->qstats.backlog += qdisc_pkt_len(skb);
+ if (fq_flow_is_detached(f)) {
+ fq_flow_add_tail(&q->new_flows, f);
+ if (q->quantum > f->credit)
+ f->credit = q->quantum;
+ q->inactive_flows--;
+ qdisc_unthrottled(sch);
+ }
+ if (unlikely(f == &q->internal)) {
+ q->stat_internal_packets++;
+ qdisc_unthrottled(sch);
+ }
+ sch->q.qlen++;
+
+ return NET_XMIT_SUCCESS;
+}
+
+static void fq_check_throttled(struct fq_sched_data *q, u64 now)
+{
+ struct rb_node *p;
+
+ if (q->time_next_delayed_flow > now)
+ return;
+
+ q->time_next_delayed_flow = ~0ULL;
+ while ((p = rb_first(&q->delayed)) != NULL) {
+ struct fq_flow *f = container_of(p, struct fq_flow, rate_node);
+
+ if (f->time_next_packet > now) {
+ q->time_next_delayed_flow = f->time_next_packet;
+ break;
+ }
+ rb_erase(p, &q->delayed);
+ q->throttled_flows--;
+ fq_flow_add_tail(&q->old_flows, f);
+ }
+}
+
+static struct sk_buff *fq_dequeue(struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ u64 now = ktime_to_ns(ktime_get());
+ struct fq_flow_head *head;
+ struct sk_buff *skb;
+ struct fq_flow *f;
+
+ skb = fq_dequeue_head(&q->internal);
+ if (skb)
+ goto out;
+ fq_check_throttled(q, now);
+begin:
+ head = &q->new_flows;
+ if (!head->first) {
+ head = &q->old_flows;
+ if (!head->first) {
+ if (q->time_next_delayed_flow != ~0ULL)
+ qdisc_watchdog_schedule_ns(&q->watchdog,
+ q->time_next_delayed_flow);
+ return NULL;
+ }
+ }
+ f = head->first;
+
+ if (f->credit <= 0) {
+ f->credit += q->quantum;
+ head->first = f->next;
+ fq_flow_add_tail(&q->old_flows, f);
+ goto begin;
+ }
+
+ if (unlikely(f->head && now < f->time_next_packet)) {
+ head->first = f->next;
+ fq_flow_set_throttled(q, f);
+ goto begin;
+ }
+
+ skb = fq_dequeue_head(f);
+ if (!skb) {
+ head->first = f->next;
+ /* force a pass through old_flows to prevent starvation */
+ if ((head == &q->new_flows) && q->old_flows.first) {
+ fq_flow_add_tail(&q->old_flows, f);
+ } else {
+ fq_flow_set_detached(f);
+ f->age = jiffies;
+ q->inactive_flows++;
+ }
+ goto begin;
+ }
+ prefetch(&skb->end);
+ f->time_next_packet = now;
+ f->credit -= qdisc_pkt_len(skb);
+
+ if (f->credit <= 0 &&
+ q->rate_enable &&
+ skb->sk && skb->sk->sk_state != TCP_TIME_WAIT) {
+ u32 rate = skb->sk->sk_pacing_rate ?: q->flow_default_rate;
+
+ rate = min(rate, q->flow_max_rate);
+ if (rate) {
+ u64 len = (u64)qdisc_pkt_len(skb) * NSEC_PER_SEC;
+
+ do_div(len, rate);
+ /* Since socket rate can change later,
+ * clamp the delay to 125 ms.
+ * TODO: maybe segment the too big skb, as in commit
+ * e43ac79a4bc ("sch_tbf: segment too big GSO packets")
+ */
+ if (unlikely(len > 125 * NSEC_PER_MSEC)) {
+ len = 125 * NSEC_PER_MSEC;
+ q->stat_pkts_too_long++;
+ }
+
+ f->time_next_packet = now + len;
+ }
+ }
+out:
+ sch->qstats.backlog -= qdisc_pkt_len(skb);
+ qdisc_bstats_update(sch, skb);
+ sch->q.qlen--;
+ qdisc_unthrottled(sch);
+ return skb;
+}
+
+static void fq_reset(struct Qdisc *sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = fq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void fq_rehash(struct fq_sched_data *q,
+ struct rb_root *old_array, u32 old_log,
+ struct rb_root *new_array, u32 new_log)
+{
+ struct rb_node *op, **np, *parent;
+ struct rb_root *oroot, *nroot;
+ struct fq_flow *of, *nf;
+ int fcnt = 0;
+ u32 idx;
+
+ for (idx = 0; idx < (1U << old_log); idx++) {
+ oroot = &old_array[idx];
+ while ((op = rb_first(oroot)) != NULL) {
+ rb_erase(op, oroot);
+ of = container_of(op, struct fq_flow, fq_node);
+ if (fq_gc_candidate(of)) {
+ fcnt++;
+ kmem_cache_free(fq_flow_cachep, of);
+ continue;
+ }
+ nroot = &new_array[hash_32((u32)(long)of->sk, new_log)];
+
+ np = &nroot->rb_node;
+ parent = NULL;
+ while (*np) {
+ parent = *np;
+
+ nf = container_of(parent, struct fq_flow, fq_node);
+ BUG_ON(nf->sk == of->sk);
+
+ if (nf->sk > of->sk)
+ np = &parent->rb_right;
+ else
+ np = &parent->rb_left;
+ }
+
+ rb_link_node(&of->fq_node, parent, np);
+ rb_insert_color(&of->fq_node, nroot);
+ }
+ }
+ q->flows -= fcnt;
+ q->inactive_flows -= fcnt;
+ q->stat_gc_flows += fcnt;
+}
+
+static int fq_resize(struct fq_sched_data *q, u32 log)
+{
+ struct rb_root *array;
+ u32 idx;
+
+ if (q->fq_root && log == q->fq_trees_log)
+ return 0;
+
+ array = kmalloc(sizeof(struct rb_root) << log, GFP_KERNEL);
+ if (!array)
+ return -ENOMEM;
+
+ for (idx = 0; idx < (1U << log); idx++)
+ array[idx] = RB_ROOT;
+
+ if (q->fq_root) {
+ fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
+ kfree(q->fq_root);
+ }
+ q->fq_root = array;
+ q->fq_trees_log = log;
+
+ return 0;
+}
+
+static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
+ [TCA_FQ_PLIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_PLIMIT] = { .type = NLA_U32 },
+ [TCA_FQ_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_INITIAL_QUANTUM] = { .type = NLA_U32 },
+ [TCA_FQ_RATE_ENABLE] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_DEFAULT_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
+ [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
+};
+
+static int fq_change(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct nlattr *tb[TCA_FQ_MAX + 1];
+ int err, drop_count = 0;
+ u32 fq_log;
+
+ if (!opt)
+ return -EINVAL;
+
+ err = nla_parse_nested(tb, TCA_FQ_MAX, opt, fq_policy);
+ if (err < 0)
+ return err;
+
+ sch_tree_lock(sch);
+
+ fq_log = q->fq_trees_log;
+
+ if (tb[TCA_FQ_BUCKETS_LOG]) {
+ u32 nval = nla_get_u32(tb[TCA_FQ_BUCKETS_LOG]);
+
+ if (nval >= 1 && nval <= ilog2(256*1024))
+ fq_log = nval;
+ else
+ err = -EINVAL;
+ }
+ if (tb[TCA_FQ_PLIMIT])
+ sch->limit = nla_get_u32(tb[TCA_FQ_PLIMIT]);
+
+ if (tb[TCA_FQ_FLOW_PLIMIT])
+ q->flow_plimit = nla_get_u32(tb[TCA_FQ_FLOW_PLIMIT]);
+
+ if (tb[TCA_FQ_QUANTUM])
+ q->quantum = nla_get_u32(tb[TCA_FQ_QUANTUM]);
+
+ if (tb[TCA_FQ_INITIAL_QUANTUM])
+ q->quantum = nla_get_u32(tb[TCA_FQ_INITIAL_QUANTUM]);
+
+ if (tb[TCA_FQ_FLOW_DEFAULT_RATE])
+ q->flow_default_rate = nla_get_u32(tb[TCA_FQ_FLOW_DEFAULT_RATE]);
+
+ if (tb[TCA_FQ_FLOW_MAX_RATE])
+ q->flow_max_rate = nla_get_u32(tb[TCA_FQ_FLOW_MAX_RATE]);
+
+ if (tb[TCA_FQ_RATE_ENABLE]) {
+ u32 enable = nla_get_u32(tb[TCA_FQ_RATE_ENABLE]);
+
+ if (enable <= 1)
+ q->rate_enable = enable;
+ else
+ err = -EINVAL;
+ }
+
+ if (!err)
+ err = fq_resize(q, fq_log);
+
+ while (sch->q.qlen > sch->limit) {
+ struct sk_buff *skb = fq_dequeue(sch);
+
+ kfree_skb(skb);
+ drop_count++;
+ }
+ qdisc_tree_decrease_qlen(sch, drop_count);
+
+ sch_tree_unlock(sch);
+ return err;
+}
+
+static void fq_destroy(struct Qdisc *sch)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct rb_root *root;
+ struct rb_node *p;
+ unsigned int idx;
+
+ if (q->fq_root) {
+ for (idx = 0; idx < (1U << q->fq_trees_log); idx++) {
+ root = &q->fq_root[idx];
+ while ((p = rb_first(root)) != NULL) {
+ rb_erase(p, root);
+ kmem_cache_free(fq_flow_cachep,
+ container_of(p, struct fq_flow, fq_node));
+ }
+ }
+ kfree(q->fq_root);
+ }
+ qdisc_watchdog_cancel(&q->watchdog);
+}
+
+static int fq_init(struct Qdisc *sch, struct nlattr *opt)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ int err;
+
+ sch->limit = 10000;
+ q->flow_plimit = 100;
+ q->quantum = 2 * psched_mtu(qdisc_dev(sch));
+ q->initial_quantum = 10 * psched_mtu(qdisc_dev(sch));
+ q->flow_default_rate = 0;
+ q->flow_max_rate = ~0U;
+ q->rate_enable = 1;
+ q->new_flows.first = NULL;
+ q->old_flows.first = NULL;
+ q->delayed = RB_ROOT;
+ q->fq_root = NULL;
+ q->fq_trees_log = ilog2(1024);
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+ if (opt)
+ err = fq_change(sch, opt);
+ else
+ err = fq_resize(q, q->fq_trees_log);
+
+ return err;
+}
+
+static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ struct nlattr *opts;
+
+ opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_FQ_PLIMIT, sch->limit) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT, q->flow_plimit) ||
+ nla_put_u32(skb, TCA_FQ_QUANTUM, q->quantum) ||
+ nla_put_u32(skb, TCA_FQ_INITIAL_QUANTUM, q->initial_quantum) ||
+ nla_put_u32(skb, TCA_FQ_RATE_ENABLE, q->rate_enable) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_DEFAULT_RATE, q->flow_default_rate) ||
+ nla_put_u32(skb, TCA_FQ_FLOW_MAX_RATE, q->flow_max_rate) ||
+ nla_put_u32(skb, TCA_FQ_BUCKETS_LOG, q->fq_trees_log))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, opts);
+ return skb->len;
+
+nla_put_failure:
+ return -1;
+}
+
+static int fq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
+{
+ struct fq_sched_data *q = qdisc_priv(sch);
+ u64 now = ktime_to_ns(ktime_get());
+ struct tc_fq_qd_stats st = {
+ .gc_flows = q->stat_gc_flows,
+ .highprio_packets = q->stat_internal_packets,
+ .tcp_retrans = q->stat_tcp_retrans,
+ .throttled = q->stat_throttled,
+ .flows_plimit = q->stat_flows_plimit,
+ .pkts_too_long = q->stat_pkts_too_long,
+ .allocation_errors = q->stat_allocation_errors,
+ .flows = q->flows,
+ .inactive_flows = q->inactive_flows,
+ .throttled_flows = q->throttled_flows,
+ .time_next_delayed_flow = q->time_next_delayed_flow - now,
+ };
+
+ return gnet_stats_copy_app(d, &st, sizeof(st));
+}
+
+static struct Qdisc_ops fq_qdisc_ops __read_mostly = {
+ .id = "fq",
+ .priv_size = sizeof(struct fq_sched_data),
+
+ .enqueue = fq_enqueue,
+ .dequeue = fq_dequeue,
+ .peek = qdisc_peek_dequeued,
+ .init = fq_init,
+ .reset = fq_reset,
+ .destroy = fq_destroy,
+ .change = fq_change,
+ .dump = fq_dump,
+ .dump_stats = fq_dump_stats,
+ .owner = THIS_MODULE,
+};
+
+static int __init fq_module_init(void)
+{
+ int ret;
+
+ fq_flow_cachep = kmem_cache_create("fq_flow_cache",
+ sizeof(struct fq_flow),
+ 0, 0, NULL);
+ if (!fq_flow_cachep)
+ return -ENOMEM;
+
+ ret = register_qdisc(&fq_qdisc_ops);
+ if (ret)
+ kmem_cache_destroy(fq_flow_cachep);
+ return ret;
+}
+
+static void __exit fq_module_exit(void)
+{
+ unregister_qdisc(&fq_qdisc_ops);
+ kmem_cache_destroy(fq_flow_cachep);
+}
+
+module_init(fq_module_init)
+module_exit(fq_module_exit)
+MODULE_AUTHOR("Eric Dumazet");
+MODULE_LICENSE("GPL");
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 4626cef4b76..a74e278654a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -25,10 +25,15 @@
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
+/* Qdisc to use by default */
+const struct Qdisc_ops *default_qdisc_ops = &pfifo_fast_ops;
+EXPORT_SYMBOL(default_qdisc_ops);
+
/* Main transmission queue. */
/* Modifications to data participating in scheduling must be protected with
@@ -207,15 +212,19 @@ void __qdisc_run(struct Qdisc *q)
unsigned long dev_trans_start(struct net_device *dev)
{
- unsigned long val, res = dev->trans_start;
+ unsigned long val, res;
unsigned int i;
+ if (is_vlan_dev(dev))
+ dev = vlan_dev_real_dev(dev);
+ res = dev->trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
val = netdev_get_tx_queue(dev, i)->trans_start;
if (val && time_after(val, res))
res = val;
}
dev->trans_start = res;
+
return res;
}
EXPORT_SYMBOL(dev_trans_start);
@@ -525,12 +534,11 @@ struct Qdisc_ops pfifo_fast_ops __read_mostly = {
.dump = pfifo_fast_dump,
.owner = THIS_MODULE,
};
-EXPORT_SYMBOL(pfifo_fast_ops);
static struct lock_class_key qdisc_tx_busylock;
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops)
+ const struct Qdisc_ops *ops)
{
void *p;
struct Qdisc *sch;
@@ -574,10 +582,14 @@ errout:
}
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
- struct Qdisc_ops *ops, unsigned int parentid)
+ const struct Qdisc_ops *ops,
+ unsigned int parentid)
{
struct Qdisc *sch;
+ if (!try_module_get(ops->owner))
+ goto errout;
+
sch = qdisc_alloc(dev_queue, ops);
if (IS_ERR(sch))
goto errout;
@@ -681,7 +693,7 @@ static void attach_one_default_qdisc(struct net_device *dev,
if (dev->tx_queue_len) {
qdisc = qdisc_create_dflt(dev_queue,
- &pfifo_fast_ops, TC_H_ROOT);
+ default_qdisc_ops, TC_H_ROOT);
if (!qdisc) {
netdev_info(dev, "activation failed\n");
return;
@@ -734,9 +746,8 @@ void dev_activate(struct net_device *dev)
int need_watchdog;
/* No queueing discipline is attached to device;
- create default one i.e. pfifo_fast for devices,
- which need queueing and noqueue_qdisc for
- virtual interfaces
+ * create default one for devices, which need queueing
+ * and noqueue_qdisc for virtual interfaces
*/
if (dev->qdisc == &noop_qdisc)
@@ -904,6 +915,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r,
memset(r, 0, sizeof(*r));
r->overhead = conf->overhead;
r->rate_bytes_ps = conf->rate;
+ r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK);
r->mult = 1;
/*
* The deal here is to replace a divide by a reciprocal one
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index c2124ea29f4..c2178b15ca6 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -100,7 +100,7 @@ struct htb_class {
struct psched_ratecfg ceil;
s64 buffer, cbuffer;/* token bucket depth/rate */
s64 mbuffer; /* max wait time */
- int prio; /* these two are used only by leaves... */
+ u32 prio; /* these two are used only by leaves... */
int quantum; /* but stored for parent-to-leaf return */
struct tcf_proto *filter_list; /* class attached filters */
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
struct htb_sched *q = qdisc_priv(sch);
struct htb_class *cl = (struct htb_class *)*arg, *parent;
struct nlattr *opt = tca[TCA_OPTIONS];
+ struct qdisc_rate_table *rtab = NULL, *ctab = NULL;
struct nlattr *tb[TCA_HTB_MAX + 1];
struct tc_htb_opt *hopt;
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!hopt->rate.rate || !hopt->ceil.rate)
goto failure;
+ /* Keeping backward compatible with rate_table based iproute2 tc */
+ if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) {
+ rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]);
+ if (rtab)
+ qdisc_put_rtab(rtab);
+ }
+ if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) {
+ ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]);
+ if (ctab)
+ qdisc_put_rtab(ctab);
+ }
+
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 5da78a19ac9..2e56185736d 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -57,7 +57,7 @@ static int mq_init(struct Qdisc *sch, struct nlattr *opt)
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
dev_queue = netdev_get_tx_queue(dev, ntx);
- qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
if (qdisc == NULL)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index accec33c454..d44c868cb53 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -124,7 +124,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt)
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
- qdisc = qdisc_create_dflt(dev_queue, &pfifo_fast_ops,
+ qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
if (qdisc == NULL) {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 82f6016d89a..a6d788d4521 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -412,12 +412,9 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* If a delay is expected, orphan the skb. (orphaning usually takes
* place at TX completion time, so _before_ the link transit delay)
- * Ideally, this orphaning should be done after the rate limiting
- * module, because this breaks TCP Small Queue, and other mechanisms
- * based on socket sk_wmem_alloc.
*/
if (q->latency || q->jitter)
- skb_orphan(skb);
+ skb_orphan_partial(skb);
/*
* If we need to duplicate packet, then re-insert at top of the
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index bce5b79662a..cef50998519 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -846,12 +840,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc,
else
spc_state = SCTP_ADDR_AVAILABLE;
/* Don't inform ULP about transition from PF to
- * active state and set cwnd to 1, see SCTP
+ * active state and set cwnd to 1 MTU, see SCTP
* Quick failover draft section 5.1, point 5
*/
if (transport->state == SCTP_PF) {
ulp_notify = false;
- transport->cwnd = 1;
+ transport->cwnd = asoc->pathmtu;
}
transport->state = SCTP_ACTIVE;
break;
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index ba1dfc3f8de..8c4fa5dec82 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Vlad Yasevich <vladislav.yasevich@hp.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c
index 64977ea0f9c..077bb070052 100644
--- a/net/sctp/bind_addr.c
+++ b/net/sctp/bind_addr.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 5780565f5b7..7bd5ed4a865 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -24,17 +24,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -201,9 +195,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
/* This is the biggest possible DATA chunk that can fit into
* the packet
*/
- max_data = asoc->pathmtu -
+ max_data = (asoc->pathmtu -
sctp_sk(asoc->base.sk)->pf->af->net_header_len -
- sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk);
+ sizeof(struct sctphdr) - sizeof(struct sctp_data_chunk)) & ~3;
max = asoc->frag_point;
/* If the the peer requested that we authenticate DATA chunks
diff --git a/net/sctp/command.c b/net/sctp/command.c
index c0044019db9..3d9a9ff69c0 100644
--- a/net/sctp/command.c
+++ b/net/sctp/command.c
@@ -25,17 +25,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index f4998780d6d..e89015d8935 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/sctp.h>
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 9e3d257de0e..09b8daac87c 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Jon Grimm <jgrimm@austin.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Dajiang Zhang <dajiang.zhang@nokia.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 3fa4d858c35..5f2068679f8 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -87,15 +81,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
{
struct sctphdr *sh = sctp_hdr(skb);
__le32 cmp = sh->checksum;
- struct sk_buff *list;
- __le32 val;
- __u32 tmp = sctp_start_cksum((__u8 *)sh, skb_headlen(skb));
-
- skb_walk_frags(skb, list)
- tmp = sctp_update_cksum((__u8 *)list->data, skb_headlen(list),
- tmp);
-
- val = sctp_end_cksum(tmp);
+ __le32 val = sctp_compute_cksum(skb, 0);
if (val != cmp) {
/* CRC failure, dump it. */
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index cb25f040fed..5856932fdc3 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -30,17 +30,11 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 09ffcc912d2..da613ceae28 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -27,10 +27,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Le Yanqun <yanqun.le@nokia.com>
@@ -42,9 +39,6 @@
*
* Based on:
* linux/net/ipv6/tcp_ipv6.c
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -351,7 +345,7 @@ out:
rt = (struct rt6_info *)dst;
t->dst = dst;
-
+ t->dst_cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
pr_debug("rt6_dst:%pI6 rt6_src:%pI6\n", &rt->rt6i_dst.addr,
&fl6->saddr);
} else {
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index fe012c44f8d..5ea573b3764 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -26,16 +26,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/output.c b/net/sctp/output.c
index a46d1eb4176..0ac3a65dacc 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -26,19 +26,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Karl Knutson <karl@athena.chicago.il.us>
* Jon Grimm <jgrimm@austin.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index ef9e2bbc0f2..94df7587786 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index 794bb14decd..ce1ffd81177 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -40,9 +37,6 @@
* Karl Knutson <karl@athena.chicago.il.us>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index e62c22535be..53c452efb40 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -46,6 +46,10 @@ static int port __read_mostly = 0;
MODULE_PARM_DESC(port, "Port to match (0=all)");
module_param(port, int, 0);
+static unsigned int fwmark __read_mostly = 0;
+MODULE_PARM_DESC(fwmark, "skb mark to match (0=no mark)");
+module_param(fwmark, uint, 0);
+
static int bufsize __read_mostly = 64 * 1024;
MODULE_PARM_DESC(bufsize, "Log buffer size (default 64k)");
module_param(bufsize, int, 0);
@@ -129,15 +133,19 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
void *arg,
sctp_cmd_seq_t *commands)
{
+ struct sctp_chunk *chunk = arg;
+ struct sk_buff *skb = chunk->skb;
struct sctp_transport *sp;
static __u32 lcwnd = 0;
struct timespec now;
sp = asoc->peer.primary_path;
- if ((full || sp->cwnd != lcwnd) &&
- (!port || asoc->peer.port == port ||
- ep->base.bind_addr.port == port)) {
+ if (((port == 0 && fwmark == 0) ||
+ asoc->peer.port == port ||
+ ep->base.bind_addr.port == port ||
+ (fwmark > 0 && skb->mark == fwmark)) &&
+ (full || sp->cwnd != lcwnd)) {
lcwnd = sp->cwnd;
getnstimeofday(&now);
@@ -155,13 +163,8 @@ static sctp_disposition_t jsctp_sf_eat_sack(struct net *net,
if (sp == asoc->peer.primary_path)
printl("*");
- if (sp->ipaddr.sa.sa_family == AF_INET)
- printl("%pI4 ", &sp->ipaddr.v4.sin_addr);
- else
- printl("%pI6 ", &sp->ipaddr.v6.sin6_addr);
-
- printl("%2u %8u %8u %8u %8u %8u ",
- sp->state, sp->cwnd, sp->ssthresh,
+ printl("%pISc %2u %8u %8u %8u %8u %8u ",
+ &sp->ipaddr, sp->state, sp->cwnd, sp->ssthresh,
sp->flight_size, sp->partial_bytes_acked,
sp->pathmtu);
}
@@ -203,8 +206,8 @@ static __init int sctpprobe_init(void)
if (ret)
goto remove_proc;
- pr_info("probe registered (port=%d)\n", port);
-
+ pr_info("probe registered (port=%d/fwmark=%u) bufsize=%u\n",
+ port, fwmark, bufsize);
return 0;
remove_proc:
diff --git a/net/sctp/proc.c b/net/sctp/proc.c
index 62526c47705..0c064215684 100644
--- a/net/sctp/proc.c
+++ b/net/sctp/proc.c
@@ -22,16 +22,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
@@ -232,7 +226,7 @@ static int sctp_eps_seq_show(struct seq_file *seq, void *v)
sk = epb->sk;
if (!net_eq(sock_net(sk), seq_file_net(seq)))
continue;
- seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
+ seq_printf(seq, "%8pK %8pK %-3d %-3d %-4d %-5d %5u %5lu ", ep, sk,
sctp_sk(sk)->type, sk->sk_state, hash,
epb->bind_addr.port,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
@@ -342,7 +336,7 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
continue;
seq_printf(seq,
"%8pK %8pK %-3d %-3d %-2d %-4d "
- "%4d %8d %8d %7d %5lu %-5d %5d ",
+ "%4d %8d %8d %7u %5lu %-5d %5d ",
assoc, sk, sctp_sk(sk)->type, sk->sk_state,
assoc->state, hash,
assoc->assoc_id,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 4a17494d736..5e17092f4ad 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Sridhar Samudrala <sri@us.ibm.com>
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -1547,7 +1541,7 @@ module_exit(sctp_exit);
*/
MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
-MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>");
+MODULE_AUTHOR("Linux Kernel SCTP developers <linux-sctp@vger.kernel.org>");
MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)");
module_param_named(no_checksums, sctp_checksum_disable, bool, 0644);
MODULE_PARM_DESC(no_checksums, "Disable checksums computing and verification");
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 362ae6e2fd9..d244a23ab8d 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -29,10 +29,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -68,8 +62,12 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen);
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen);
static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep,
const struct sctp_association *asoc,
const struct sctp_chunk *init_chunk,
@@ -82,6 +80,28 @@ static int sctp_process_param(struct sctp_association *asoc,
static void *sctp_addto_param(struct sctp_chunk *chunk, int len,
const void *data);
+/* Control chunk destructor */
+static void sctp_control_release_owner(struct sk_buff *skb)
+{
+ /*TODO: do memory release */
+}
+
+static void sctp_control_set_owner_w(struct sctp_chunk *chunk)
+{
+ struct sctp_association *asoc = chunk->asoc;
+ struct sk_buff *skb = chunk->skb;
+
+ /* TODO: properly account for control chunks.
+ * To do it right we'll need:
+ * 1) endpoint if association isn't known.
+ * 2) proper memory accounting.
+ *
+ * For now don't do anything for now.
+ */
+ skb->sk = asoc ? asoc->base.sk : NULL;
+ skb->destructor = sctp_control_release_owner;
+}
+
/* What was the inbound interface for this chunk? */
int sctp_chunk_iif(const struct sctp_chunk *chunk)
{
@@ -296,7 +316,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
* PLEASE DO NOT FIXME [This version does not support Host Name.]
*/
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT, 0, chunksize);
if (!retval)
goto nodata;
@@ -443,7 +463,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
num_ext);
/* Now allocate and fill out the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
+ retval = sctp_make_control(asoc, SCTP_CID_INIT_ACK, 0, chunksize);
if (!retval)
goto nomem_chunk;
@@ -548,7 +568,7 @@ struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc,
cookie_len = asoc->peer.cookie_len;
/* Build a cookie echo chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len);
if (!retval)
goto nodata;
retval->subh.cookie_hdr =
@@ -593,7 +613,7 @@ struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_COOKIE_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -641,8 +661,8 @@ struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc,
sctp_cwrhdr_t cwr;
cwr.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0,
- sizeof(sctp_cwrhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_CWR, 0,
+ sizeof(sctp_cwrhdr_t));
if (!retval)
goto nodata;
@@ -675,8 +695,8 @@ struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc,
sctp_ecnehdr_t ecne;
ecne.lowest_tsn = htonl(lowest_tsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0,
- sizeof(sctp_ecnehdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_ECN_ECNE, 0,
+ sizeof(sctp_ecnehdr_t));
if (!retval)
goto nodata;
retval->subh.ecne_hdr =
@@ -712,7 +732,7 @@ struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc,
dp.ssn = htons(ssn);
chunk_len = sizeof(dp) + data_len;
- retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len);
+ retval = sctp_make_data(asoc, flags, chunk_len);
if (!retval)
goto nodata;
@@ -759,7 +779,7 @@ struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc)
+ sizeof(__u32) * num_dup_tsns;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len);
+ retval = sctp_make_control(asoc, SCTP_CID_SACK, 0, len);
if (!retval)
goto nodata;
@@ -838,8 +858,8 @@ struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc,
ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
shut.cum_tsn_ack = htonl(ctsn);
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0,
- sizeof(sctp_shutdownhdr_t));
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN, 0,
+ sizeof(sctp_shutdownhdr_t));
if (!retval)
goto nodata;
@@ -857,7 +877,7 @@ struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -886,7 +906,7 @@ struct sctp_chunk *sctp_make_shutdown_complete(
*/
flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T;
- retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
+ retval = sctp_make_control(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -925,7 +945,7 @@ struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc,
flags = SCTP_CHUNK_FLAG_T;
}
- retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_ABORT, flags, hint);
/* RFC 2960 6.4 Multi-homed SCTP Endpoints
*
@@ -1117,7 +1137,7 @@ struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc,
struct sctp_chunk *retval;
sctp_sender_hb_info_t hbinfo;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT, 0, sizeof(hbinfo));
if (!retval)
goto nodata;
@@ -1145,7 +1165,7 @@ struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc,
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
+ retval = sctp_make_control(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen);
if (!retval)
goto nodata;
@@ -1177,8 +1197,8 @@ static struct sctp_chunk *sctp_make_op_error_space(
{
struct sctp_chunk *retval;
- retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0,
- sizeof(sctp_errhdr_t) + size);
+ retval = sctp_make_control(asoc, SCTP_CID_ERROR, 0,
+ sizeof(sctp_errhdr_t) + size);
if (!retval)
goto nodata;
@@ -1248,7 +1268,7 @@ struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc)
if (unlikely(!hmac_desc))
return NULL;
- retval = sctp_make_chunk(asoc, SCTP_CID_AUTH, 0,
+ retval = sctp_make_control(asoc, SCTP_CID_AUTH, 0,
hmac_desc->hmac_len + sizeof(sctp_authhdr_t));
if (!retval)
return NULL;
@@ -1351,8 +1371,8 @@ const union sctp_addr *sctp_source(const struct sctp_chunk *chunk)
/* Create a new chunk, setting the type and flags headers from the
* arguments, reserving enough space for a 'paylen' byte payload.
*/
-static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
- __u8 type, __u8 flags, int paylen)
+static struct sctp_chunk *_sctp_make_chunk(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
{
struct sctp_chunk *retval;
sctp_chunkhdr_t *chunk_hdr;
@@ -1385,14 +1405,27 @@ static struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc,
if (sctp_auth_send_cid(type, asoc))
retval->auth = 1;
- /* Set the skb to the belonging sock for accounting. */
- skb->sk = sk;
-
return retval;
nodata:
return NULL;
}
+static struct sctp_chunk *sctp_make_data(const struct sctp_association *asoc,
+ __u8 flags, int paylen)
+{
+ return _sctp_make_chunk(asoc, SCTP_CID_DATA, flags, paylen);
+}
+
+static struct sctp_chunk *sctp_make_control(const struct sctp_association *asoc,
+ __u8 type, __u8 flags, int paylen)
+{
+ struct sctp_chunk *chunk = _sctp_make_chunk(asoc, type, flags, paylen);
+
+ if (chunk)
+ sctp_control_set_owner_w(chunk);
+
+ return chunk;
+}
/* Release the memory occupied by a chunk. */
static void sctp_chunk_destroy(struct sctp_chunk *chunk)
@@ -2207,25 +2240,23 @@ int sctp_verify_init(struct net *net, const struct sctp_association *asoc,
struct sctp_chunk **errp)
{
union sctp_params param;
- int has_cookie = 0;
+ bool has_cookie = false;
int result;
- /* Verify stream values are non-zero. */
- if ((0 == peer_init->init_hdr.num_outbound_streams) ||
- (0 == peer_init->init_hdr.num_inbound_streams) ||
- (0 == peer_init->init_hdr.init_tag) ||
- (SCTP_DEFAULT_MINWINDOW > ntohl(peer_init->init_hdr.a_rwnd))) {
-
+ /* Check for missing mandatory parameters. Note: Initial TSN is
+ * also mandatory, but is not checked here since the valid range
+ * is 0..2**32-1. RFC4960, section 3.3.3.
+ */
+ if (peer_init->init_hdr.num_outbound_streams == 0 ||
+ peer_init->init_hdr.num_inbound_streams == 0 ||
+ peer_init->init_hdr.init_tag == 0 ||
+ ntohl(peer_init->init_hdr.a_rwnd) < SCTP_DEFAULT_MINWINDOW)
return sctp_process_inv_mandatory(asoc, chunk, errp);
- }
- /* Check for missing mandatory parameters. */
sctp_walk_params(param, peer_init, init_hdr.params) {
-
- if (SCTP_PARAM_STATE_COOKIE == param.p->type)
- has_cookie = 1;
-
- } /* for (loop through all parameters) */
+ if (param.p->type == SCTP_PARAM_STATE_COOKIE)
+ has_cookie = true;
+ }
/* There is a possibility that a parameter length was bad and
* in that case we would have stoped walking the parameters.
@@ -2733,7 +2764,7 @@ static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc,
length += addrlen;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF, 0, length);
if (!retval)
return NULL;
@@ -2917,7 +2948,7 @@ static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *as
int length = sizeof(asconf) + vparam_len;
/* Create the chunk. */
- retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length);
+ retval = sctp_make_control(asoc, SCTP_CID_ASCONF_ACK, 0, length);
if (!retval)
return NULL;
@@ -3448,7 +3479,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
hint = (nstreams + 1) * sizeof(__u32);
- retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint);
+ retval = sctp_make_control(asoc, SCTP_CID_FWD_TSN, 0, hint);
if (!retval)
return NULL;
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 9da68852ee9..666c6684279 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -42,9 +39,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index f6b7109195a..dfe3f36ff2a 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -45,9 +42,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index 84d98d8a5a7..c5999b2dde7 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -28,10 +28,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -41,9 +38,6 @@
* Daisy Chang <daisyc@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index c6670d2e3f8..d5d5882a289 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -34,10 +34,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -52,9 +49,6 @@
* Ryan Layer <rmlayer@us.ibm.com>
* Anup Pemmaiah <pemmaiah@cc.usu.edu>
* Kevin Gao <kevin.gao@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c
index da860352380..6007124aefa 100644
--- a/net/sctp/ssnmap.c
+++ b/net/sctp/ssnmap.c
@@ -24,16 +24,10 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/types.h>
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 9a5c4c9edda..6b36561a1b3 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -25,10 +25,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Mingqin Liu <liuming@us.ibm.com>
@@ -36,9 +33,6 @@
* Ardelle Fan <ardelle.fan@intel.com>
* Ryan Layer <rmlayer@us.ibm.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <net/sctp/structs.h>
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index bdbbc3fd7c1..e332efb124c 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -30,10 +30,7 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
@@ -43,9 +40,6 @@
* Hui Huang <hui.huang@nokia.com>
* Sridhar Samudrala <sri@us.ibm.com>
* Ardelle Fan <ardelle.fan@intel.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -181,12 +175,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport)
return;
}
- call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
-
sctp_packet_free(&transport->packet);
if (transport->asoc)
sctp_association_put(transport->asoc);
+
+ call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
}
/* Start T3_rtx timer if it is not already running and update the heartbeat
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c
index b46019568a8..fbda2002828 100644
--- a/net/sctp/tsnmap.c
+++ b/net/sctp/tsnmap.c
@@ -27,19 +27,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* La Monte H.P. Yarroll <piggy@acm.org>
* Jon Grimm <jgrimm@us.ibm.com>
* Karl Knutson <karl@athena.chicago.il.us>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 44a45dbee4d..81089ed6545 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -28,19 +28,13 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Ardelle Fan <ardelle.fan@intel.com>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c
index 04e3d470f87..1c1484ed605 100644
--- a/net/sctp/ulpqueue.c
+++ b/net/sctp/ulpqueue.c
@@ -27,18 +27,12 @@
*
* Please send any bug reports or fixes you make to the
* email address(es):
- * lksctp developers <lksctp-developers@lists.sourceforge.net>
- *
- * Or submit a bug report through the following website:
- * http://www.sf.net/projects/lksctp
+ * lksctp developers <linux-sctp@vger.kernel.org>
*
* Written or modified by:
* Jon Grimm <jgrimm@us.ibm.com>
* La Monte H.P. Yarroll <piggy@acm.org>
* Sridhar Samudrala <sri@us.ibm.com>
- *
- * Any bugs reported given to us we will try to fix... any fixes shared will
- * be incorporated into the next SCTP release.
*/
#include <linux/slab.h>
diff --git a/net/socket.c b/net/socket.c
index 829b460acb8..b2d7c629eeb 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -106,7 +106,7 @@
#include <linux/atalk.h>
#include <net/busy_poll.h>
-#ifdef CONFIG_NET_LL_RX_POLL
+#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly;
#endif
diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
index d304f41260f..af7ffd447fe 100644
--- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
+++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
@@ -120,7 +120,7 @@ static int gssp_rpc_create(struct net *net, struct rpc_clnt **_clnt)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL gssproxy "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
*_clnt = NULL;
goto out;
}
@@ -328,7 +328,6 @@ void gssp_free_upcall_data(struct gssp_upcall_data *data)
kfree(data->in_handle.data);
kfree(data->out_handle.data);
kfree(data->out_token.data);
- kfree(data->mech_oid.data);
free_svc_cred(&data->creds);
}
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
index 357f613df7f..3c85d1c8a02 100644
--- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
+++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
@@ -430,7 +430,7 @@ static int dummy_enc_nameattr_array(struct xdr_stream *xdr,
static int dummy_dec_nameattr_array(struct xdr_stream *xdr,
struct gssx_name_attr_array *naa)
{
- struct gssx_name_attr dummy;
+ struct gssx_name_attr dummy = { .attr = {.len = 0} };
u32 count, i;
__be32 *p;
@@ -493,12 +493,13 @@ static int gssx_enc_name(struct xdr_stream *xdr,
return err;
}
+
static int gssx_dec_name(struct xdr_stream *xdr,
struct gssx_name *name)
{
- struct xdr_netobj dummy_netobj;
- struct gssx_name_attr_array dummy_name_attr_array;
- struct gssx_option_array dummy_option_array;
+ struct xdr_netobj dummy_netobj = { .len = 0 };
+ struct gssx_name_attr_array dummy_name_attr_array = { .count = 0 };
+ struct gssx_option_array dummy_option_array = { .count = 0 };
int err;
/* name->display_name */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index d0347d148b3..09fb638bcaa 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1180,6 +1180,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
gm = gss_mech_get_by_OID(&ud->mech_oid);
if (!gm)
goto out;
+ rsci.cred.cr_gss_mech = gm;
status = -EINVAL;
/* mech-specific data: */
@@ -1195,7 +1196,6 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
rscp = rsc_update(cd, &rsci, rscp);
status = 0;
out:
- gss_mech_put(gm);
rsc_free(&rsci);
if (rscp)
cache_put(&rscp->h, cd);
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 74f6a704e37..ecbc4e3d83a 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task)
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
+ if (task->tk_flags & RPC_TASK_NOCONNECT) {
+ rpc_exit(task, -ENOTCONN);
+ return;
+ }
xprt_connect(task);
}
}
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 74d948f5d5a..779742cfc1f 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -23,6 +23,7 @@ struct sunrpc_net {
struct rpc_clnt *rpcb_local_clnt4;
spinlock_t rpcb_clnt_lock;
unsigned int rpcb_users;
+ unsigned int rpcb_is_af_local : 1;
struct mutex gssp_lock;
wait_queue_head_t gssp_wq;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3df764dc330..1891a1022c1 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
}
static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
- struct rpc_clnt *clnt4)
+ struct rpc_clnt *clnt4,
+ bool is_af_local)
{
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
/* Protected by rpcb_create_local_mutex */
sn->rpcb_local_clnt = clnt;
sn->rpcb_local_clnt4 = clnt4;
+ sn->rpcb_is_af_local = is_af_local ? 1 : 0;
smp_wmb();
sn->rpcb_users = 1;
dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net)
.program = &rpcb_program,
.version = RPCBVERS_2,
.authflavor = RPC_AUTH_NULL,
+ /*
+ * We turn off the idle timeout to prevent the kernel
+ * from automatically disconnecting the socket.
+ * Otherwise, we'd have to cache the mount namespace
+ * of the caller and somehow pass that to the socket
+ * reconnect code.
+ */
+ .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
};
struct rpc_clnt *clnt, *clnt4;
int result = 0;
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, true);
out:
return result;
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net)
clnt4 = NULL;
}
- rpcb_set_local(net, clnt, clnt4);
+ rpcb_set_local(net, clnt, clnt4, false);
out:
return result;
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname,
return rpc_create(&args);
}
-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
+static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
{
- int result, error = 0;
+ int flags = RPC_TASK_NOCONNECT;
+ int error, result = 0;
+ if (is_set || !sn->rpcb_is_af_local)
+ flags = RPC_TASK_SOFTCONN;
msg->rpc_resp = &result;
- error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
+ error = rpc_call_sync(clnt, msg, flags);
if (error < 0) {
dprintk("RPC: failed to contact local rpcbind "
"server (errno %d).\n", -error);
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short
.rpc_argp = &map,
};
struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+ bool is_set = false;
dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
"rpcbind\n", (port ? "" : "un"),
prog, vers, prot, port);
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
+ is_set = true;
+ }
- return rpcb_register_call(sn->rpcb_local_clnt, &msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
}
/*
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin->sin_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
struct rpcbind_args *map = msg->rpc_argp;
unsigned short port = ntohs(sin6->sin6_port);
+ bool is_set = false;
int result;
map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn,
map->r_addr, map->r_netid);
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- if (port)
+ if (port != 0) {
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
+ is_set = true;
+ }
- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
kfree(map->r_addr);
return result;
}
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
map->r_addr = "";
msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
- return rpcb_register_call(sn->rpcb_local_clnt4, msg);
+ return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
}
/**
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 305374d4fb9..9c9caaa5e0d 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -442,7 +442,7 @@ static void svc_tcp_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock)
+ if (sk_stream_is_writeable(sk) && sock)
clear_bit(SOCK_NOSPACE, &sock->flags);
svc_write_space(sk);
}
@@ -1193,7 +1193,9 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt)
if (test_bit(XPT_LISTENER, &xprt->xpt_flags))
return 1;
required = atomic_read(&xprt->xpt_reserved) + serv->sv_max_mesg;
- if (sk_stream_wspace(svsk->sk_sk) >= required)
+ if (sk_stream_wspace(svsk->sk_sk) >= required ||
+ (sk_stream_min_wspace(svsk->sk_sk) == 0 &&
+ atomic_read(&xprt->xpt_reserved) == 0))
return 1;
set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
return 0;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 75edcfad6e2..1504bb11e4f 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -207,10 +207,13 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
pgfrom_base -= copy;
vto = kmap_atomic(*pgto);
- vfrom = kmap_atomic(*pgfrom);
- memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
+ if (*pgto != *pgfrom) {
+ vfrom = kmap_atomic(*pgfrom);
+ memcpy(vto + pgto_base, vfrom + pgfrom_base, copy);
+ kunmap_atomic(vfrom);
+ } else
+ memmove(vto + pgto_base, vto + pgfrom_base, copy);
flush_dcache_page(*pgto);
- kunmap_atomic(vfrom);
kunmap_atomic(vto);
} while ((len -= copy) != 0);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ddf0602603b..d6656d7768f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1602,7 +1602,7 @@ static void xs_tcp_write_space(struct sock *sk)
read_lock_bh(&sk->sk_callback_lock);
/* from net/core/stream.c:sk_stream_write_space */
- if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
+ if (sk_stream_is_writeable(sk))
xs_write_space(sk);
read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index cb29ef7ba2f..609c30c8081 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct tipc_link *l_ptr;
struct tipc_link *temp_l_ptr;
+ struct tipc_link_req *temp_req;
pr_info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
- if (b_ptr->link_req)
- tipc_disc_delete(b_ptr->link_req);
+ temp_req = b_ptr->link_req;
+ b_ptr->link_req = NULL;
spin_unlock_bh(&b_ptr->lock);
+
+ if (temp_req)
+ tipc_disc_delete(temp_req);
+
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 19da5abe0fa..fd3fa57a410 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -355,8 +355,12 @@ static int tipc_open_listening_sock(struct tipc_server *s)
return PTR_ERR(con);
sock = tipc_create_listen_sock(con);
- if (!sock)
+ if (!sock) {
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ kfree(con);
return -EINVAL;
+ }
tipc_register_callbacks(sock, con);
return 0;
@@ -563,9 +567,14 @@ int tipc_server_start(struct tipc_server *s)
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
+ ret = tipc_open_listening_sock(s);
+ if (ret < 0) {
+ tipc_work_stop(s);
+ kmem_cache_destroy(s->rcvbuf_cache);
+ return ret;
+ }
s->enabled = 1;
-
- return tipc_open_listening_sock(s);
+ return ret;
}
void tipc_server_stop(struct tipc_server *s)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ce8249c7682..6cc7ddd2fb7 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1257,7 +1257,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
/* Accept only ACK or NACK message */
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
- sk->sk_err = -ECONNREFUSED;
+ sk->sk_err = ECONNREFUSED;
retval = TIPC_OK;
break;
}
@@ -1268,7 +1268,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
res = auto_connect(sock, msg);
if (res) {
sock->state = SS_DISCONNECTING;
- sk->sk_err = res;
+ sk->sk_err = -res;
retval = TIPC_OK;
break;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c4ce243824b..86de99ad297 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1479,7 +1479,8 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
MAX_SKB_FRAGS * PAGE_SIZE);
skb = sock_alloc_send_pskb(sk, len - data_len, data_len,
- msg->msg_flags & MSG_DONTWAIT, &err);
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ PAGE_ALLOC_COSTLY_ORDER);
if (skb == NULL)
goto out;
@@ -1596,6 +1597,10 @@ out:
return err;
}
+/* We use paged skbs for stream sockets, and limit occupancy to 32768
+ * bytes, and a minimun of a full page.
+ */
+#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct msghdr *msg, size_t len)
@@ -1609,6 +1614,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
struct scm_cookie tmp_scm;
bool fds_sent = false;
int max_level;
+ int data_len;
if (NULL == siocb->scm)
siocb->scm = &tmp_scm;
@@ -1635,40 +1641,22 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
goto pipe_err;
while (sent < len) {
- /*
- * Optimisation for the fact that under 0.01% of X
- * messages typically need breaking up.
- */
-
- size = len-sent;
+ size = len - sent;
/* Keep two messages in the pipe so it schedules better */
- if (size > ((sk->sk_sndbuf >> 1) - 64))
- size = (sk->sk_sndbuf >> 1) - 64;
+ size = min_t(int, size, (sk->sk_sndbuf >> 1) - 64);
- if (size > SKB_MAX_ALLOC)
- size = SKB_MAX_ALLOC;
-
- /*
- * Grab a buffer
- */
+ /* allow fallback to order-0 allocations */
+ size = min_t(int, size, SKB_MAX_HEAD(0) + UNIX_SKB_FRAGS_SZ);
- skb = sock_alloc_send_skb(sk, size, msg->msg_flags&MSG_DONTWAIT,
- &err);
+ data_len = max_t(int, 0, size - SKB_MAX_HEAD(0));
- if (skb == NULL)
+ skb = sock_alloc_send_pskb(sk, size - data_len, data_len,
+ msg->msg_flags & MSG_DONTWAIT, &err,
+ get_order(UNIX_SKB_FRAGS_SZ));
+ if (!skb)
goto out_err;
- /*
- * If you pass two values to the sock_alloc_send_skb
- * it tries to grab the large buffer with GFP_NOFS
- * (which can fail easily), and if it fails grab the
- * fallback size buffer which is under a page and will
- * succeed. [Alan]
- */
- size = min_t(int, size, skb_tailroom(skb));
-
-
/* Only send the fds in the first buffer */
err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
if (err < 0) {
@@ -1678,7 +1666,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
max_level = err + 1;
fds_sent = true;
- err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ skb_put(skb, size - data_len);
+ skb->data_len = data_len;
+ skb->len = size;
+ err = skb_copy_datagram_from_iovec(skb, 0, msg->msg_iov,
+ sent, size);
if (err) {
kfree_skb(skb);
goto out_err;
@@ -1890,6 +1882,11 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
return timeo;
}
+static unsigned int unix_skb_len(const struct sk_buff *skb)
+{
+ return skb->len - UNIXCB(skb).consumed;
+}
+
static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size,
int flags)
@@ -1977,8 +1974,8 @@ again:
}
skip = sk_peek_offset(sk, flags);
- while (skip >= skb->len) {
- skip -= skb->len;
+ while (skip >= unix_skb_len(skb)) {
+ skip -= unix_skb_len(skb);
last = skb;
skb = skb_peek_next(skb, &sk->sk_receive_queue);
if (!skb)
@@ -2005,8 +2002,9 @@ again:
sunaddr = NULL;
}
- chunk = min_t(unsigned int, skb->len - skip, size);
- if (memcpy_toiovec(msg->msg_iov, skb->data + skip, chunk)) {
+ chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+ if (skb_copy_datagram_iovec(skb, UNIXCB(skb).consumed + skip,
+ msg->msg_iov, chunk)) {
if (copied == 0)
copied = -EFAULT;
break;
@@ -2016,14 +2014,14 @@ again:
/* Mark read part of skb as used */
if (!(flags & MSG_PEEK)) {
- skb_pull(skb, chunk);
+ UNIXCB(skb).consumed += chunk;
sk_peek_offset_bwd(sk, chunk);
if (UNIXCB(skb).fp)
unix_detach_fds(siocb->scm, skb);
- if (skb->len)
+ if (unix_skb_len(skb))
break;
skb_unlink(skb, &sk->sk_receive_queue);
@@ -2107,7 +2105,7 @@ long unix_inq_len(struct sock *sk)
if (sk->sk_type == SOCK_STREAM ||
sk->sk_type == SOCK_SEQPACKET) {
skb_queue_walk(&sk->sk_receive_queue, skb)
- amount += skb->len;
+ amount += unix_skb_len(skb);
} else {
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 593071dabd1..545c08b8a1d 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -96,8 +96,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
-
-#include "af_vsock.h"
+#include <net/af_vsock.h>
static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
static void vsock_sk_destruct(struct sock *sk);
@@ -347,7 +346,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
struct vsock_sock *vsk;
list_for_each_entry(vsk, &vsock_connected_table[i],
- connected_table);
+ connected_table)
fn(sk_vsock(vsk));
}
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index ffc11df02af..9d6986634e0 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -34,8 +34,8 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <net/sock.h>
+#include <net/af_vsock.h>
-#include "af_vsock.h"
#include "vmci_transport_notify.h"
static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
diff --git a/net/vmw_vsock/vmci_transport.h b/net/vmw_vsock/vmci_transport.h
index fd88ea8924e..ce6c9623d5f 100644
--- a/net/vmw_vsock/vmci_transport.h
+++ b/net/vmw_vsock/vmci_transport.h
@@ -19,8 +19,8 @@
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
-#include "vsock_addr.h"
-#include "af_vsock.h"
+#include <net/vsock_addr.h>
+#include <net/af_vsock.h>
/* If the packet format changes in a release then this should change too. */
#define VMCI_TRANSPORT_PACKET_VERSION 1
diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c
index ec2611b4ea0..82486ee55ea 100644
--- a/net/vmw_vsock/vsock_addr.c
+++ b/net/vmw_vsock/vsock_addr.c
@@ -17,8 +17,7 @@
#include <linux/socket.h>
#include <linux/stddef.h>
#include <net/sock.h>
-
-#include "vsock_addr.h"
+#include <net/vsock_addr.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port)
{
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 4f9f216665e..67153964aad 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -462,6 +462,14 @@ int wiphy_register(struct wiphy *wiphy)
return -EINVAL;
#endif
+ if (WARN_ON(wiphy->coalesce &&
+ (!wiphy->coalesce->n_rules ||
+ !wiphy->coalesce->n_patterns) &&
+ (!wiphy->coalesce->pattern_min_len ||
+ wiphy->coalesce->pattern_min_len >
+ wiphy->coalesce->pattern_max_len)))
+ return -EINVAL;
+
if (WARN_ON(wiphy->ap_sme_capa &&
!(wiphy->flags & WIPHY_FLAG_HAVE_AP_SME)))
return -EINVAL;
@@ -668,6 +676,7 @@ void wiphy_unregister(struct wiphy *wiphy)
rdev_set_wakeup(rdev, false);
#endif
cfg80211_rdev_free_wowlan(rdev);
+ cfg80211_rdev_free_coalesce(rdev);
}
EXPORT_SYMBOL(wiphy_unregister);
@@ -765,6 +774,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
cfg80211_leave_mesh(rdev, dev);
break;
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
cfg80211_stop_ap(rdev, dev);
break;
default:
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a6b45bf00f3..9ad43c619c5 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -79,6 +79,8 @@ struct cfg80211_registered_device {
/* netlink port which started critical protocol (0 means not started) */
u32 crit_proto_nlportid;
+ struct cfg80211_coalesce *coalesce;
+
/* must be last because of the way we do wiphy_priv(),
* and it should at least be aligned to NETDEV_ALIGN */
struct wiphy wiphy __aligned(NETDEV_ALIGN);
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 30c49202ee4..0553fd4d85a 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -167,9 +167,12 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
* basic rates
*/
if (!setup->basic_rates) {
+ enum nl80211_bss_scan_width scan_width;
struct ieee80211_supported_band *sband =
rdev->wiphy.bands[setup->chandef.chan->band];
- setup->basic_rates = ieee80211_mandatory_rates(sband);
+ scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
+ setup->basic_rates = ieee80211_mandatory_rates(sband,
+ scan_width);
}
if (!cfg80211_reg_can_beacon(&rdev->wiphy, &setup->chandef))
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index bfac5e186f5..8d49c1ce3de 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -621,7 +621,7 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
}
bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
- const u8 *buf, size_t len, gfp_t gfp)
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp)
{
struct wiphy *wiphy = wdev->wiphy;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
@@ -664,7 +664,7 @@ bool cfg80211_rx_mgmt(struct wireless_dev *wdev, int freq, int sig_mbm,
/* Indicate the received Action frame to user space */
if (nl80211_send_mgmt(rdev, wdev, reg->nlportid,
freq, sig_mbm,
- buf, len, gfp))
+ buf, len, flags, gfp))
continue;
result = true;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 1cc47aca7f0..af8d84a4a5b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -349,6 +349,11 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
.len = IEEE80211_MAX_DATA_LEN },
[NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
+ [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
+ [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
+ [NL80211_ATTR_CSA_IES] = { .type = NLA_NESTED },
+ [NL80211_ATTR_CSA_C_OFF_BEACON] = { .type = NLA_U16 },
+ [NL80211_ATTR_CSA_C_OFF_PRESP] = { .type = NLA_U16 },
};
/* policy for the key attributes */
@@ -403,6 +408,14 @@ nl80211_wowlan_tcp_policy[NUM_NL80211_WOWLAN_TCP] = {
[NL80211_WOWLAN_TCP_WAKE_MASK] = { .len = 1 },
};
+/* policy for coalesce rule attributes */
+static const struct nla_policy
+nl80211_coalesce_policy[NUM_NL80211_ATTR_COALESCE_RULE] = {
+ [NL80211_ATTR_COALESCE_RULE_DELAY] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_CONDITION] = { .type = NLA_U32 },
+ [NL80211_ATTR_COALESCE_RULE_PKT_PATTERN] = { .type = NLA_NESTED },
+};
+
/* policy for GTK rekey offload attributes */
static const struct nla_policy
nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = {
@@ -441,10 +454,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb,
goto out_unlock;
}
*rdev = wiphy_to_dev((*wdev)->wiphy);
- cb->args[0] = (*rdev)->wiphy_idx;
+ /* 0 is the first index - add 1 to parse only once */
+ cb->args[0] = (*rdev)->wiphy_idx + 1;
cb->args[1] = (*wdev)->identifier;
} else {
- struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]);
+ /* subtract the 1 again here */
+ struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
if (!wiphy) {
@@ -974,7 +989,7 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
return -ENOBUFS;
if (dev->wiphy.wowlan->n_patterns) {
- struct nl80211_wowlan_pattern_support pat = {
+ struct nl80211_pattern_support pat = {
.max_patterns = dev->wiphy.wowlan->n_patterns,
.min_pattern_len = dev->wiphy.wowlan->pattern_min_len,
.max_pattern_len = dev->wiphy.wowlan->pattern_max_len,
@@ -995,6 +1010,27 @@ static int nl80211_send_wowlan(struct sk_buff *msg,
}
#endif
+static int nl80211_send_coalesce(struct sk_buff *msg,
+ struct cfg80211_registered_device *dev)
+{
+ struct nl80211_coalesce_rule_support rule;
+
+ if (!dev->wiphy.coalesce)
+ return 0;
+
+ rule.max_rules = dev->wiphy.coalesce->n_rules;
+ rule.max_delay = dev->wiphy.coalesce->max_delay;
+ rule.pat.max_patterns = dev->wiphy.coalesce->n_patterns;
+ rule.pat.min_pattern_len = dev->wiphy.coalesce->pattern_min_len;
+ rule.pat.max_pattern_len = dev->wiphy.coalesce->pattern_max_len;
+ rule.pat.max_pkt_offset = dev->wiphy.coalesce->max_pkt_offset;
+
+ if (nla_put(msg, NL80211_ATTR_COALESCE_RULE, sizeof(rule), &rule))
+ return -ENOBUFS;
+
+ return 0;
+}
+
static int nl80211_send_band_rateinfo(struct sk_buff *msg,
struct ieee80211_supported_band *sband)
{
@@ -1393,6 +1429,8 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
if (state->split) {
CMD(crit_proto_start, CRIT_PROTOCOL_START);
CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
+ if (dev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
+ CMD(channel_switch, CHANNEL_SWITCH);
}
#ifdef CONFIG_NL80211_TESTMODE
@@ -1513,6 +1551,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
dev->wiphy.vht_capa_mod_mask))
goto nla_put_failure;
+ state->split_start++;
+ break;
+ case 10:
+ if (nl80211_send_coalesce(msg, dev))
+ goto nla_put_failure;
+
/* done */
state->split_start = 0;
break;
@@ -2620,8 +2664,8 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
- if (IS_ERR(hdr))
- return PTR_ERR(hdr);
+ if (!hdr)
+ return -ENOBUFS;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -4770,9 +4814,9 @@ do { \
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshForwarding, 0, 1,
mask, NL80211_MESHCONF_FORWARDING,
nla_get_u8);
- FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, 1, 255,
+ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, rssi_threshold, -255, 0,
mask, NL80211_MESHCONF_RSSI_THRESHOLD,
- nla_get_u32);
+ nla_get_s32);
FILL_IN_MESH_PARAM_IF_SET(tb, cfg, ht_opmode, 0, 16,
mask, NL80211_MESHCONF_HT_OPMODE,
nla_get_u16);
@@ -5578,6 +5622,111 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
return err;
}
+static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct cfg80211_csa_settings params;
+ /* csa_attrs is defined static to avoid waste of stack size - this
+ * function is called under RTNL lock, so this should not be a problem.
+ */
+ static struct nlattr *csa_attrs[NL80211_ATTR_MAX+1];
+ u8 radar_detect_width = 0;
+ int err;
+
+ if (!rdev->ops->channel_switch ||
+ !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
+ return -EOPNOTSUPP;
+
+ /* may add IBSS support later */
+ if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
+ dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+ return -EOPNOTSUPP;
+
+ memset(&params, 0, sizeof(params));
+
+ if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] ||
+ !info->attrs[NL80211_ATTR_CH_SWITCH_COUNT])
+ return -EINVAL;
+
+ /* only important for AP, IBSS and mesh create IEs internally */
+ if (!info->attrs[NL80211_ATTR_CSA_IES])
+ return -EINVAL;
+
+ /* useless if AP is not running */
+ if (!wdev->beacon_interval)
+ return -EINVAL;
+
+ params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
+
+ err = nl80211_parse_beacon(info->attrs, &params.beacon_after);
+ if (err)
+ return err;
+
+ err = nla_parse_nested(csa_attrs, NL80211_ATTR_MAX,
+ info->attrs[NL80211_ATTR_CSA_IES],
+ nl80211_policy);
+ if (err)
+ return err;
+
+ err = nl80211_parse_beacon(csa_attrs, &params.beacon_csa);
+ if (err)
+ return err;
+
+ if (!csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON])
+ return -EINVAL;
+
+ params.counter_offset_beacon =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_BEACON]);
+ if (params.counter_offset_beacon >= params.beacon_csa.tail_len)
+ return -EINVAL;
+
+ /* sanity check - counters should be the same */
+ if (params.beacon_csa.tail[params.counter_offset_beacon] !=
+ params.count)
+ return -EINVAL;
+
+ if (csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]) {
+ params.counter_offset_presp =
+ nla_get_u16(csa_attrs[NL80211_ATTR_CSA_C_OFF_PRESP]);
+ if (params.counter_offset_presp >=
+ params.beacon_csa.probe_resp_len)
+ return -EINVAL;
+
+ if (params.beacon_csa.probe_resp[params.counter_offset_presp] !=
+ params.count)
+ return -EINVAL;
+ }
+
+ err = nl80211_parse_chandef(rdev, info, &params.chandef);
+ if (err)
+ return err;
+
+ if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef))
+ return -EINVAL;
+
+ err = cfg80211_chandef_dfs_required(wdev->wiphy, &params.chandef);
+ if (err < 0) {
+ return err;
+ } else if (err) {
+ radar_detect_width = BIT(params.chandef.width);
+ params.radar_required = true;
+ }
+
+ err = cfg80211_can_use_iftype_chan(rdev, wdev, wdev->iftype,
+ params.chandef.chan,
+ CHAN_MODE_SHARED,
+ radar_detect_width);
+ if (err)
+ return err;
+
+ if (info->attrs[NL80211_ATTR_CH_SWITCH_BLOCK_TX])
+ params.block_tx = true;
+
+ return rdev_channel_switch(rdev, dev, &params);
+}
+
static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
u32 seq, int flags,
struct cfg80211_registered_device *rdev,
@@ -5639,6 +5788,7 @@ static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb,
goto nla_put_failure;
if (nla_put_u16(msg, NL80211_BSS_CAPABILITY, res->capability) ||
nla_put_u32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq) ||
+ nla_put_u32(msg, NL80211_BSS_CHAN_WIDTH, res->scan_width) ||
nla_put_u32(msg, NL80211_BSS_SEEN_MS_AGO,
jiffies_to_msecs(jiffies - intbss->ts)))
goto nla_put_failure;
@@ -6319,6 +6469,8 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
switch (ibss.chandef.width) {
+ case NL80211_CHAN_WIDTH_5:
+ case NL80211_CHAN_WIDTH_10:
case NL80211_CHAN_WIDTH_20_NOHT:
break;
case NL80211_CHAN_WIDTH_20:
@@ -6346,6 +6498,19 @@ static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info)
return err;
}
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ memcpy(&ibss.ht_capa_mask,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]),
+ sizeof(ibss.ht_capa_mask));
+
+ if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) {
+ if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK])
+ return -EINVAL;
+ memcpy(&ibss.ht_capa,
+ nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]),
+ sizeof(ibss.ht_capa));
+ }
+
if (info->attrs[NL80211_ATTR_MCAST_RATE] &&
!nl80211_parse_mcast_rate(rdev, ibss.mcast_rate,
nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE])))
@@ -6428,19 +6593,30 @@ static struct genl_multicast_group nl80211_testmode_mcgrp = {
static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct wireless_dev *wdev =
+ __cfg80211_wdev_from_attrs(genl_info_net(info), info->attrs);
int err;
+ if (!rdev->ops->testmode_cmd)
+ return -EOPNOTSUPP;
+
+ if (IS_ERR(wdev)) {
+ err = PTR_ERR(wdev);
+ if (err != -EINVAL)
+ return err;
+ wdev = NULL;
+ } else if (wdev->wiphy != &rdev->wiphy) {
+ return -EINVAL;
+ }
+
if (!info->attrs[NL80211_ATTR_TESTDATA])
return -EINVAL;
- err = -EOPNOTSUPP;
- if (rdev->ops->testmode_cmd) {
- rdev->testmode_info = info;
- err = rdev_testmode_cmd(rdev,
+ rdev->testmode_info = info;
+ err = rdev_testmode_cmd(rdev, wdev,
nla_data(info->attrs[NL80211_ATTR_TESTDATA]),
nla_len(info->attrs[NL80211_ATTR_TESTDATA]));
- rdev->testmode_info = NULL;
- }
+ rdev->testmode_info = NULL;
return err;
}
@@ -6505,6 +6681,9 @@ static int nl80211_testmode_dump(struct sk_buff *skb,
NL80211_CMD_TESTMODE);
struct nlattr *tmdata;
+ if (!hdr)
+ break;
+
if (nla_put_u32(skb, NL80211_ATTR_WIPHY, phy_idx)) {
genlmsg_cancel(skb, hdr);
break;
@@ -6613,12 +6792,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb);
void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
{
+ struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0];
void *hdr = ((void **)skb->cb)[1];
struct nlattr *data = ((void **)skb->cb)[2];
nla_nest_end(skb, data);
genlmsg_end(skb, hdr);
- genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0,
+ nl80211_testmode_mcgrp.id, gfp);
}
EXPORT_SYMBOL(cfg80211_testmode_event);
#endif
@@ -6947,9 +7128,8 @@ static int nl80211_remain_on_channel(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_REMAIN_ON_CHANNEL);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -7247,9 +7427,8 @@ static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_FRAME);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
}
@@ -7399,14 +7578,12 @@ static int nl80211_set_cqm_txe(struct genl_info *info,
u32 rate, u32 pkts, u32 intvl)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct wireless_dev *wdev;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
if (rate > 100 || intvl > NL80211_CQM_TXE_MAX_INTVL)
return -EINVAL;
- wdev = dev->ieee80211_ptr;
-
if (!rdev->ops->set_cqm_txe_config)
return -EOPNOTSUPP;
@@ -7421,13 +7598,15 @@ static int nl80211_set_cqm_rssi(struct genl_info *info,
s32 threshold, u32 hysteresis)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
- struct wireless_dev *wdev;
struct net_device *dev = info->user_ptr[1];
+ struct wireless_dev *wdev = dev->ieee80211_ptr;
if (threshold > 0)
return -EINVAL;
- wdev = dev->ieee80211_ptr;
+ /* disabling - hysteresis should also be zero then */
+ if (threshold == 0)
+ hysteresis = 0;
if (!rdev->ops->set_cqm_rssi_config)
return -EOPNOTSUPP;
@@ -7446,36 +7625,33 @@ static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
int err;
cqm = info->attrs[NL80211_ATTR_CQM];
- if (!cqm) {
- err = -EINVAL;
- goto out;
- }
+ if (!cqm)
+ return -EINVAL;
err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
nl80211_attr_cqm_policy);
if (err)
- goto out;
+ return err;
if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
- s32 threshold;
- u32 hysteresis;
- threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
- hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
- err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
- } else if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
- attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
- attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
- u32 rate, pkts, intvl;
- rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
- pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
- intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
- err = nl80211_set_cqm_txe(info, rate, pkts, intvl);
- } else
- err = -EINVAL;
+ s32 threshold = nla_get_s32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
+ u32 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
-out:
- return err;
+ return nl80211_set_cqm_rssi(info, threshold, hysteresis);
+ }
+
+ if (attrs[NL80211_ATTR_CQM_TXE_RATE] &&
+ attrs[NL80211_ATTR_CQM_TXE_PKTS] &&
+ attrs[NL80211_ATTR_CQM_TXE_INTVL]) {
+ u32 rate = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_RATE]);
+ u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]);
+ u32 intvl = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_INTVL]);
+
+ return nl80211_set_cqm_txe(info, rate, pkts, intvl);
+ }
+
+ return -EINVAL;
}
static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info)
@@ -7591,12 +7767,11 @@ static int nl80211_send_wowlan_patterns(struct sk_buff *msg,
if (!nl_pat)
return -ENOBUFS;
pat_len = wowlan->patterns[i].pattern_len;
- if (nla_put(msg, NL80211_WOWLAN_PKTPAT_MASK,
- DIV_ROUND_UP(pat_len, 8),
+ if (nla_put(msg, NL80211_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8),
wowlan->patterns[i].mask) ||
- nla_put(msg, NL80211_WOWLAN_PKTPAT_PATTERN,
- pat_len, wowlan->patterns[i].pattern) ||
- nla_put_u32(msg, NL80211_WOWLAN_PKTPAT_OFFSET,
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ wowlan->patterns[i].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
wowlan->patterns[i].pkt_offset))
return -ENOBUFS;
nla_nest_end(msg, nl_pat);
@@ -7937,7 +8112,7 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
struct nlattr *pat;
int n_patterns = 0;
int rem, pat_len, mask_len, pkt_offset;
- struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT];
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem)
@@ -7956,26 +8131,25 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN],
rem) {
- nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT,
- nla_data(pat), nla_len(pat), NULL);
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
err = -EINVAL;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] ||
- !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN])
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
goto error;
- pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]);
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
mask_len = DIV_ROUND_UP(pat_len, 8);
- if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) !=
- mask_len)
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
goto error;
if (pat_len > wowlan->pattern_max_len ||
pat_len < wowlan->pattern_min_len)
goto error;
- if (!pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET])
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
pkt_offset = 0;
else
pkt_offset = nla_get_u32(
- pat_tb[NL80211_WOWLAN_PKTPAT_OFFSET]);
+ pat_tb[NL80211_PKTPAT_OFFSET]);
if (pkt_offset > wowlan->max_pkt_offset)
goto error;
new_triggers.patterns[i].pkt_offset = pkt_offset;
@@ -7989,11 +8163,11 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
new_triggers.patterns[i].pattern =
new_triggers.patterns[i].mask + mask_len;
memcpy(new_triggers.patterns[i].mask,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]),
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]),
mask_len);
new_triggers.patterns[i].pattern_len = pat_len;
memcpy(new_triggers.patterns[i].pattern,
- nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]),
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]),
pat_len);
i++;
}
@@ -8032,6 +8206,264 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
}
#endif
+static int nl80211_send_coalesce_rules(struct sk_buff *msg,
+ struct cfg80211_registered_device *rdev)
+{
+ struct nlattr *nl_pats, *nl_pat, *nl_rule, *nl_rules;
+ int i, j, pat_len;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!rdev->coalesce->n_rules)
+ return 0;
+
+ nl_rules = nla_nest_start(msg, NL80211_ATTR_COALESCE_RULE);
+ if (!nl_rules)
+ return -ENOBUFS;
+
+ for (i = 0; i < rdev->coalesce->n_rules; i++) {
+ nl_rule = nla_nest_start(msg, i + 1);
+ if (!nl_rule)
+ return -ENOBUFS;
+
+ rule = &rdev->coalesce->rules[i];
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_DELAY,
+ rule->delay))
+ return -ENOBUFS;
+
+ if (nla_put_u32(msg, NL80211_ATTR_COALESCE_RULE_CONDITION,
+ rule->condition))
+ return -ENOBUFS;
+
+ nl_pats = nla_nest_start(msg,
+ NL80211_ATTR_COALESCE_RULE_PKT_PATTERN);
+ if (!nl_pats)
+ return -ENOBUFS;
+
+ for (j = 0; j < rule->n_patterns; j++) {
+ nl_pat = nla_nest_start(msg, j + 1);
+ if (!nl_pat)
+ return -ENOBUFS;
+ pat_len = rule->patterns[j].pattern_len;
+ if (nla_put(msg, NL80211_PKTPAT_MASK,
+ DIV_ROUND_UP(pat_len, 8),
+ rule->patterns[j].mask) ||
+ nla_put(msg, NL80211_PKTPAT_PATTERN, pat_len,
+ rule->patterns[j].pattern) ||
+ nla_put_u32(msg, NL80211_PKTPAT_OFFSET,
+ rule->patterns[j].pkt_offset))
+ return -ENOBUFS;
+ nla_nest_end(msg, nl_pat);
+ }
+ nla_nest_end(msg, nl_pats);
+ nla_nest_end(msg, nl_rule);
+ }
+ nla_nest_end(msg, nl_rules);
+
+ return 0;
+}
+
+static int nl80211_get_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ struct sk_buff *msg;
+ void *hdr;
+
+ if (!rdev->wiphy.coalesce)
+ return -EOPNOTSUPP;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
+ NL80211_CMD_GET_COALESCE);
+ if (!hdr)
+ goto nla_put_failure;
+
+ if (rdev->coalesce && nl80211_send_coalesce_rules(msg, rdev))
+ goto nla_put_failure;
+
+ genlmsg_end(msg, hdr);
+ return genlmsg_reply(msg, info);
+
+nla_put_failure:
+ nlmsg_free(msg);
+ return -ENOBUFS;
+}
+
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_coalesce *coalesce = rdev->coalesce;
+ int i, j;
+ struct cfg80211_coalesce_rules *rule;
+
+ if (!coalesce)
+ return;
+
+ for (i = 0; i < coalesce->n_rules; i++) {
+ rule = &coalesce->rules[i];
+ for (j = 0; j < rule->n_patterns; j++)
+ kfree(rule->patterns[j].mask);
+ kfree(rule->patterns);
+ }
+ kfree(coalesce->rules);
+ kfree(coalesce);
+ rdev->coalesce = NULL;
+}
+
+static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
+ struct nlattr *rule,
+ struct cfg80211_coalesce_rules *new_rule)
+{
+ int err, i;
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct nlattr *tb[NUM_NL80211_ATTR_COALESCE_RULE], *pat;
+ int rem, pat_len, mask_len, pkt_offset, n_patterns = 0;
+ struct nlattr *pat_tb[NUM_NL80211_PKTPAT];
+
+ err = nla_parse(tb, NL80211_ATTR_COALESCE_RULE_MAX, nla_data(rule),
+ nla_len(rule), nl80211_coalesce_policy);
+ if (err)
+ return err;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_DELAY])
+ new_rule->delay =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_DELAY]);
+ if (new_rule->delay > coalesce->max_delay)
+ return -EINVAL;
+
+ if (tb[NL80211_ATTR_COALESCE_RULE_CONDITION])
+ new_rule->condition =
+ nla_get_u32(tb[NL80211_ATTR_COALESCE_RULE_CONDITION]);
+ if (new_rule->condition != NL80211_COALESCE_CONDITION_MATCH &&
+ new_rule->condition != NL80211_COALESCE_CONDITION_NO_MATCH)
+ return -EINVAL;
+
+ if (!tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN])
+ return -EINVAL;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem)
+ n_patterns++;
+ if (n_patterns > coalesce->n_patterns)
+ return -EINVAL;
+
+ new_rule->patterns = kcalloc(n_patterns, sizeof(new_rule->patterns[0]),
+ GFP_KERNEL);
+ if (!new_rule->patterns)
+ return -ENOMEM;
+
+ new_rule->n_patterns = n_patterns;
+ i = 0;
+
+ nla_for_each_nested(pat, tb[NL80211_ATTR_COALESCE_RULE_PKT_PATTERN],
+ rem) {
+ nla_parse(pat_tb, MAX_NL80211_PKTPAT, nla_data(pat),
+ nla_len(pat), NULL);
+ if (!pat_tb[NL80211_PKTPAT_MASK] ||
+ !pat_tb[NL80211_PKTPAT_PATTERN])
+ return -EINVAL;
+ pat_len = nla_len(pat_tb[NL80211_PKTPAT_PATTERN]);
+ mask_len = DIV_ROUND_UP(pat_len, 8);
+ if (nla_len(pat_tb[NL80211_PKTPAT_MASK]) != mask_len)
+ return -EINVAL;
+ if (pat_len > coalesce->pattern_max_len ||
+ pat_len < coalesce->pattern_min_len)
+ return -EINVAL;
+
+ if (!pat_tb[NL80211_PKTPAT_OFFSET])
+ pkt_offset = 0;
+ else
+ pkt_offset = nla_get_u32(pat_tb[NL80211_PKTPAT_OFFSET]);
+ if (pkt_offset > coalesce->max_pkt_offset)
+ return -EINVAL;
+ new_rule->patterns[i].pkt_offset = pkt_offset;
+
+ new_rule->patterns[i].mask =
+ kmalloc(mask_len + pat_len, GFP_KERNEL);
+ if (!new_rule->patterns[i].mask)
+ return -ENOMEM;
+ new_rule->patterns[i].pattern =
+ new_rule->patterns[i].mask + mask_len;
+ memcpy(new_rule->patterns[i].mask,
+ nla_data(pat_tb[NL80211_PKTPAT_MASK]), mask_len);
+ new_rule->patterns[i].pattern_len = pat_len;
+ memcpy(new_rule->patterns[i].pattern,
+ nla_data(pat_tb[NL80211_PKTPAT_PATTERN]), pat_len);
+ i++;
+ }
+
+ return 0;
+}
+
+static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info)
+{
+ struct cfg80211_registered_device *rdev = info->user_ptr[0];
+ const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce;
+ struct cfg80211_coalesce new_coalesce = {};
+ struct cfg80211_coalesce *n_coalesce;
+ int err, rem_rule, n_rules = 0, i, j;
+ struct nlattr *rule;
+ struct cfg80211_coalesce_rules *tmp_rule;
+
+ if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce)
+ return -EOPNOTSUPP;
+
+ if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) {
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->ops->set_coalesce(&rdev->wiphy, NULL);
+ return 0;
+ }
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule)
+ n_rules++;
+ if (n_rules > coalesce->n_rules)
+ return -EINVAL;
+
+ new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]),
+ GFP_KERNEL);
+ if (!new_coalesce.rules)
+ return -ENOMEM;
+
+ new_coalesce.n_rules = n_rules;
+ i = 0;
+
+ nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE],
+ rem_rule) {
+ err = nl80211_parse_coalesce_rule(rdev, rule,
+ &new_coalesce.rules[i]);
+ if (err)
+ goto error;
+
+ i++;
+ }
+
+ err = rdev->ops->set_coalesce(&rdev->wiphy, &new_coalesce);
+ if (err)
+ goto error;
+
+ n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL);
+ if (!n_coalesce) {
+ err = -ENOMEM;
+ goto error;
+ }
+ cfg80211_rdev_free_coalesce(rdev);
+ rdev->coalesce = n_coalesce;
+
+ return 0;
+error:
+ for (i = 0; i < new_coalesce.n_rules; i++) {
+ tmp_rule = &new_coalesce.rules[i];
+ for (j = 0; j < tmp_rule->n_patterns; j++)
+ kfree(tmp_rule->patterns[j].mask);
+ kfree(tmp_rule->patterns);
+ }
+ kfree(new_coalesce.rules);
+
+ return err;
+}
+
static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info)
{
struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -8128,9 +8560,8 @@ static int nl80211_probe_client(struct sk_buff *skb,
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_PROBE_CLIENT);
-
- if (IS_ERR(hdr)) {
- err = PTR_ERR(hdr);
+ if (!hdr) {
+ err = -ENOBUFS;
goto free_msg;
}
@@ -9039,7 +9470,30 @@ static struct genl_ops nl80211_ops[] = {
.flags = GENL_ADMIN_PERM,
.internal_flags = NL80211_FLAG_NEED_WDEV_UP |
NL80211_FLAG_NEED_RTNL,
- }
+ },
+ {
+ .cmd = NL80211_CMD_GET_COALESCE,
+ .doit = nl80211_get_coalesce,
+ .policy = nl80211_policy,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_SET_COALESCE,
+ .doit = nl80211_set_coalesce,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_WIPHY |
+ NL80211_FLAG_NEED_RTNL,
+ },
+ {
+ .cmd = NL80211_CMD_CHANNEL_SWITCH,
+ .doit = nl80211_channel_switch,
+ .policy = nl80211_policy,
+ .flags = GENL_ADMIN_PERM,
+ .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
+ NL80211_FLAG_NEED_RTNL,
+ },
};
static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -9996,7 +10450,7 @@ EXPORT_SYMBOL(cfg80211_rx_unexpected_4addr_frame);
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u32 nlportid,
int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp)
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp)
{
struct net_device *netdev = wdev->netdev;
struct sk_buff *msg;
@@ -10019,7 +10473,9 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
(sig_dbm &&
nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
- nla_put(msg, NL80211_ATTR_FRAME, len, buf))
+ nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
+ (flags &&
+ nla_put_u32(msg, NL80211_ATTR_RXMGMT_FLAGS, flags)))
goto nla_put_failure;
genlmsg_end(msg, hdr);
@@ -10064,7 +10520,8 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
genlmsg_end(msg, hdr);
- genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp);
+ genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
+ nl80211_mlme_mcgrp.id, gfp);
return;
nla_put_failure:
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index a4073e808c1..2c0f2b3c07c 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -66,7 +66,7 @@ void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, u32 nlpid,
int freq, int sig_dbm,
- const u8 *buf, size_t len, gfp_t gfp);
+ const u8 *buf, size_t len, u32 flags, gfp_t gfp);
void
nl80211_radar_notify(struct cfg80211_registered_device *rdev,
@@ -74,4 +74,6 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
+void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
+
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h
index 9f15f0ac824..37ce9fdfe93 100644
--- a/net/wireless/rdev-ops.h
+++ b/net/wireless/rdev-ops.h
@@ -516,11 +516,12 @@ static inline void rdev_rfkill_poll(struct cfg80211_registered_device *rdev)
#ifdef CONFIG_NL80211_TESTMODE
static inline int rdev_testmode_cmd(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev,
void *data, int len)
{
int ret;
- trace_rdev_testmode_cmd(&rdev->wiphy);
- ret = rdev->ops->testmode_cmd(&rdev->wiphy, data, len);
+ trace_rdev_testmode_cmd(&rdev->wiphy, wdev);
+ ret = rdev->ops->testmode_cmd(&rdev->wiphy, wdev, data, len);
trace_rdev_return_int(&rdev->wiphy, ret);
return ret;
}
@@ -923,4 +924,16 @@ static inline void rdev_crit_proto_stop(struct cfg80211_registered_device *rdev,
trace_rdev_return_void(&rdev->wiphy);
}
+static inline int rdev_channel_switch(struct cfg80211_registered_device *rdev,
+ struct net_device *dev,
+ struct cfg80211_csa_settings *params)
+{
+ int ret;
+
+ trace_rdev_channel_switch(&rdev->wiphy, dev, params);
+ ret = rdev->ops->channel_switch(&rdev->wiphy, dev, params);
+ trace_rdev_return_int(&rdev->wiphy, ret);
+ return ret;
+}
+
#endif /* __CFG80211_RDEV_OPS */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5a24c986f34..de06d5d1287 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2247,10 +2247,13 @@ int reg_device_uevent(struct device *dev, struct kobj_uevent_env *env)
void wiphy_regulatory_register(struct wiphy *wiphy)
{
+ struct regulatory_request *lr;
+
if (!reg_dev_ignore_cell_hint(wiphy))
reg_num_devs_support_basehint++;
- wiphy_update_regulatory(wiphy, NL80211_REGDOM_SET_BY_CORE);
+ lr = get_last_request();
+ wiphy_update_regulatory(wiphy, lr->initiator);
}
void wiphy_regulatory_deregister(struct wiphy *wiphy)
@@ -2279,7 +2282,9 @@ void wiphy_regulatory_deregister(struct wiphy *wiphy)
static void reg_timeout_work(struct work_struct *work)
{
REG_DBG_PRINT("Timeout while waiting for CRDA to reply, restoring regulatory settings\n");
+ rtnl_lock();
restore_regulatory_settings(true);
+ rtnl_unlock();
}
int __init regulatory_init(void)
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index ae8c186b50d..eeb71480f1a 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -465,10 +465,6 @@ static int cmp_bss(struct cfg80211_bss *a,
}
}
- /*
- * we can't use compare_ether_addr here since we need a < > operator.
- * The binary return value of compare_ether_addr isn't enough
- */
r = memcmp(a->bssid, b->bssid, sizeof(a->bssid));
if (r)
return r;
@@ -651,6 +647,8 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *dev,
continue;
if (bss->pub.channel != new->pub.channel)
continue;
+ if (bss->pub.scan_width != new->pub.scan_width)
+ continue;
if (rcu_access_pointer(bss->pub.beacon_ies))
continue;
ies = rcu_access_pointer(bss->pub.ies);
@@ -870,11 +868,12 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss*
-cfg80211_inform_bss(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_bss_ies *ies;
struct cfg80211_internal_bss tmp = {}, *res;
@@ -892,6 +891,7 @@ cfg80211_inform_bss(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
@@ -924,14 +924,15 @@ cfg80211_inform_bss(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss);
+EXPORT_SYMBOL(cfg80211_inform_bss_width);
/* Returned bss is reference counted and must be cleaned up appropriately. */
struct cfg80211_bss *
-cfg80211_inform_bss_frame(struct wiphy *wiphy,
- struct ieee80211_channel *channel,
- struct ieee80211_mgmt *mgmt, size_t len,
- s32 signal, gfp_t gfp)
+cfg80211_inform_bss_width_frame(struct wiphy *wiphy,
+ struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ s32 signal, gfp_t gfp)
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -941,7 +942,8 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
BUILD_BUG_ON(offsetof(struct ieee80211_mgmt, u.probe_resp.variable) !=
offsetof(struct ieee80211_mgmt, u.beacon.variable));
- trace_cfg80211_inform_bss_frame(wiphy, channel, mgmt, len, signal);
+ trace_cfg80211_inform_bss_width_frame(wiphy, channel, scan_width, mgmt,
+ len, signal);
if (WARN_ON(!mgmt))
return NULL;
@@ -976,6 +978,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
+ tmp.pub.scan_width = scan_width;
tmp.pub.signal = signal;
tmp.pub.beacon_interval = le16_to_cpu(mgmt->u.probe_resp.beacon_int);
tmp.pub.capability = le16_to_cpu(mgmt->u.probe_resp.capab_info);
@@ -991,7 +994,7 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
-EXPORT_SYMBOL(cfg80211_inform_bss_frame);
+EXPORT_SYMBOL(cfg80211_inform_bss_width_frame);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 1d3cfb1a3f2..20e86a95dc4 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,8 +34,10 @@ struct cfg80211_conn {
CFG80211_CONN_SCAN_AGAIN,
CFG80211_CONN_AUTHENTICATE_NEXT,
CFG80211_CONN_AUTHENTICATING,
+ CFG80211_CONN_AUTH_FAILED,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
+ CFG80211_CONN_ASSOC_FAILED,
CFG80211_CONN_DEAUTH,
CFG80211_CONN_CONNECTED,
} state;
@@ -164,6 +166,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
NULL, 0,
params->key, params->key_len,
params->key_idx, NULL, 0);
+ case CFG80211_CONN_AUTH_FAILED:
+ return -ENOTCONN;
case CFG80211_CONN_ASSOCIATE_NEXT:
BUG_ON(!rdev->ops->assoc);
wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -188,10 +192,17 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
WLAN_REASON_DEAUTH_LEAVING,
false);
return err;
+ case CFG80211_CONN_ASSOC_FAILED:
+ cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
+ NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING, false);
+ return -ENOTCONN;
case CFG80211_CONN_DEAUTH:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
WLAN_REASON_DEAUTH_LEAVING, false);
+ /* free directly, disconnected event already sent */
+ cfg80211_sme_free(wdev);
return 0;
default:
return 0;
@@ -371,7 +382,7 @@ bool cfg80211_sme_rx_assoc_resp(struct wireless_dev *wdev, u16 status)
return true;
}
- wdev->conn->state = CFG80211_CONN_DEAUTH;
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
schedule_work(&rdev->conn_work);
return false;
}
@@ -383,7 +394,13 @@ void cfg80211_sme_deauth(struct wireless_dev *wdev)
void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
{
- cfg80211_sme_free(wdev);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (!wdev->conn)
+ return;
+
+ wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+ schedule_work(&rdev->conn_work);
}
void cfg80211_sme_disassoc(struct wireless_dev *wdev)
@@ -399,7 +416,13 @@ void cfg80211_sme_disassoc(struct wireless_dev *wdev)
void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
{
- cfg80211_sme_disassoc(wdev);
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
+
+ if (!wdev->conn)
+ return;
+
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+ schedule_work(&rdev->conn_work);
}
static int cfg80211_sme_connect(struct wireless_dev *wdev,
@@ -953,21 +976,19 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
struct net_device *dev, u16 reason, bool wextev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
- int err;
+ int err = 0;
ASSERT_WDEV_LOCK(wdev);
kfree(wdev->connect_keys);
wdev->connect_keys = NULL;
- if (wdev->conn) {
+ if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
- } else if (!rdev->ops->disconnect) {
+ else if (!rdev->ops->disconnect)
cfg80211_mlme_down(rdev, dev);
- err = 0;
- } else {
+ else if (wdev->current_bss)
err = rdev_disconnect(rdev, dev, reason);
- }
return err;
}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index a23253e0635..9ee6bc1a761 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -30,7 +30,8 @@ static ssize_t name ## _show(struct device *dev, \
char *buf) \
{ \
return sprintf(buf, fmt "\n", dev_to_rdev(dev)->member); \
-}
+} \
+static DEVICE_ATTR_RO(name)
SHOW_FMT(index, "%d", wiphy_idx);
SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
@@ -42,7 +43,7 @@ static ssize_t name_show(struct device *dev,
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
}
-
+static DEVICE_ATTR_RO(name);
static ssize_t addresses_show(struct device *dev,
struct device_attribute *attr,
@@ -60,15 +61,17 @@ static ssize_t addresses_show(struct device *dev,
return buf - start;
}
-
-static struct device_attribute ieee80211_dev_attrs[] = {
- __ATTR_RO(index),
- __ATTR_RO(macaddress),
- __ATTR_RO(address_mask),
- __ATTR_RO(addresses),
- __ATTR_RO(name),
- {}
+static DEVICE_ATTR_RO(addresses);
+
+static struct attribute *ieee80211_attrs[] = {
+ &dev_attr_index.attr,
+ &dev_attr_macaddress.attr,
+ &dev_attr_address_mask.attr,
+ &dev_attr_addresses.attr,
+ &dev_attr_name.attr,
+ NULL,
};
+ATTRIBUTE_GROUPS(ieee80211);
static void wiphy_dev_release(struct device *dev)
{
@@ -146,7 +149,7 @@ struct class ieee80211_class = {
.name = "ieee80211",
.owner = THIS_MODULE,
.dev_release = wiphy_dev_release,
- .dev_attrs = ieee80211_dev_attrs,
+ .dev_groups = ieee80211_groups,
.dev_uevent = wiphy_uevent,
#ifdef CONFIG_PM
.suspend = wiphy_suspend,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index e1534baf2eb..ba5f0d6614d 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -1293,15 +1293,17 @@ TRACE_EVENT(rdev_return_int_int,
#ifdef CONFIG_NL80211_TESTMODE
TRACE_EVENT(rdev_testmode_cmd,
- TP_PROTO(struct wiphy *wiphy),
- TP_ARGS(wiphy),
+ TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
+ TP_ARGS(wiphy, wdev),
TP_STRUCT__entry(
WIPHY_ENTRY
+ WDEV_ENTRY
),
TP_fast_assign(
WIPHY_ASSIGN;
+ WDEV_ASSIGN;
),
- TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG)
+ TP_printk(WIPHY_PR_FMT WDEV_PR_FMT, WIPHY_PR_ARG, WDEV_PR_ARG)
);
TRACE_EVENT(rdev_testmode_dump,
@@ -1841,6 +1843,39 @@ TRACE_EVENT(rdev_crit_proto_stop,
WIPHY_PR_ARG, WDEV_PR_ARG)
);
+TRACE_EVENT(rdev_channel_switch,
+ TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+ struct cfg80211_csa_settings *params),
+ TP_ARGS(wiphy, netdev, params),
+ TP_STRUCT__entry(
+ WIPHY_ENTRY
+ NETDEV_ENTRY
+ CHAN_DEF_ENTRY
+ __field(u16, counter_offset_beacon)
+ __field(u16, counter_offset_presp)
+ __field(bool, radar_required)
+ __field(bool, block_tx)
+ __field(u8, count)
+ ),
+ TP_fast_assign(
+ WIPHY_ASSIGN;
+ NETDEV_ASSIGN;
+ CHAN_DEF_ASSIGN(&params->chandef);
+ __entry->counter_offset_beacon = params->counter_offset_beacon;
+ __entry->counter_offset_presp = params->counter_offset_presp;
+ __entry->radar_required = params->radar_required;
+ __entry->block_tx = params->block_tx;
+ __entry->count = params->count;
+ ),
+ TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", " CHAN_DEF_PR_FMT
+ ", block_tx: %d, count: %u, radar_required: %d"
+ ", counter offsets (beacon/presp): %u/%u",
+ WIPHY_PR_ARG, NETDEV_PR_ARG, CHAN_DEF_PR_ARG,
+ __entry->block_tx, __entry->count, __entry->radar_required,
+ __entry->counter_offset_beacon,
+ __entry->counter_offset_presp)
+);
+
/*************************************************************
* cfg80211 exported functions traces *
*************************************************************/
@@ -2391,26 +2426,30 @@ TRACE_EVENT(cfg80211_get_bss,
__entry->capa_mask, __entry->capa_val)
);
-TRACE_EVENT(cfg80211_inform_bss_frame,
+TRACE_EVENT(cfg80211_inform_bss_width_frame,
TP_PROTO(struct wiphy *wiphy, struct ieee80211_channel *channel,
+ enum nl80211_bss_scan_width scan_width,
struct ieee80211_mgmt *mgmt, size_t len,
s32 signal),
- TP_ARGS(wiphy, channel, mgmt, len, signal),
+ TP_ARGS(wiphy, channel, scan_width, mgmt, len, signal),
TP_STRUCT__entry(
WIPHY_ENTRY
CHAN_ENTRY
+ __field(enum nl80211_bss_scan_width, scan_width)
__dynamic_array(u8, mgmt, len)
__field(s32, signal)
),
TP_fast_assign(
WIPHY_ASSIGN;
CHAN_ASSIGN(channel);
+ __entry->scan_width = scan_width;
if (mgmt)
memcpy(__get_dynamic_array(mgmt), mgmt, len);
__entry->signal = signal;
),
- TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "signal: %d",
- WIPHY_PR_ARG, CHAN_PR_ARG, __entry->signal)
+ TP_printk(WIPHY_PR_FMT ", " CHAN_PR_FMT "(scan_width: %d) signal: %d",
+ WIPHY_PR_ARG, CHAN_PR_ARG, __entry->scan_width,
+ __entry->signal)
);
DECLARE_EVENT_CLASS(cfg80211_bss_evt,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 74458b7f61e..ce090c1c5e4 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -33,7 +33,8 @@ ieee80211_get_response_rate(struct ieee80211_supported_band *sband,
}
EXPORT_SYMBOL(ieee80211_get_response_rate);
-u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
+u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband,
+ enum nl80211_bss_scan_width scan_width)
{
struct ieee80211_rate *bitrates;
u32 mandatory_rates = 0;
@@ -43,10 +44,15 @@ u32 ieee80211_mandatory_rates(struct ieee80211_supported_band *sband)
if (WARN_ON(!sband))
return 1;
- if (sband->band == IEEE80211_BAND_2GHZ)
- mandatory_flag = IEEE80211_RATE_MANDATORY_B;
- else
+ if (sband->band == IEEE80211_BAND_2GHZ) {
+ if (scan_width == NL80211_BSS_CHAN_WIDTH_5 ||
+ scan_width == NL80211_BSS_CHAN_WIDTH_10)
+ mandatory_flag = IEEE80211_RATE_MANDATORY_G;
+ else
+ mandatory_flag = IEEE80211_RATE_MANDATORY_B;
+ } else {
mandatory_flag = IEEE80211_RATE_MANDATORY_A;
+ }
bitrates = sband->bitrates;
for (i = 0; i < sband->n_bitrates; i++)
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 66c638730c7..b8253250d72 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -156,6 +156,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_FAC_CALLING_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
dte_facs->calling_len = p[2];
memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLING_AE;
@@ -163,6 +165,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities,
case X25_FAC_CALLED_AE:
if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
return -1;
+ if (p[2] > X25_MAX_AE_LEN)
+ return -1;
dte_facs->called_len = p[2];
memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index eb4a8428864..3bb2cdc13b4 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -214,5 +214,26 @@ int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
return inner_mode->afinfo->extract_output(x, skb);
}
+void xfrm_local_error(struct sk_buff *skb, int mtu)
+{
+ unsigned int proto;
+ struct xfrm_state_afinfo *afinfo;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ proto = AF_INET;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ proto = AF_INET6;
+ else
+ return;
+
+ afinfo = xfrm_state_get_afinfo(proto);
+ if (!afinfo)
+ return;
+
+ afinfo->local_error(skb, mtu);
+ xfrm_state_put_afinfo(afinfo);
+}
+
EXPORT_SYMBOL_GPL(xfrm_output);
EXPORT_SYMBOL_GPL(xfrm_inner_extract_output);
+EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index e52cab3591d..ed38d5d81f9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -308,7 +308,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
{
BUG_ON(!policy->walk.dead);
- if (del_timer(&policy->timer))
+ if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer))
BUG();
security_xfrm_policy_free(policy->security);
@@ -320,10 +320,8 @@ static void xfrm_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(list)) != NULL) {
- dev_put(skb->dev);
+ while ((skb = skb_dequeue(list)) != NULL)
kfree_skb(skb);
- }
}
/* Rule must be locked. Release descentant resources, announce
@@ -660,7 +658,13 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
xfrm_pol_hold(policy);
net->xfrm.policy_count[dir]++;
atomic_inc(&flow_cache_genid);
- rt_genid_bump(net);
+
+ /* After previous checking, family can either be AF_INET or AF_INET6 */
+ if (policy->family == AF_INET)
+ rt_genid_bump_ipv4(net);
+ else
+ rt_genid_bump_ipv6(net);
+
if (delpol) {
xfrm_policy_requeue(delpol, policy);
__xfrm_policy_unlink(delpol, dir);
@@ -1758,7 +1762,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
struct sk_buff *skb;
struct sock *sk;
struct dst_entry *dst;
- struct net_device *dev;
struct xfrm_policy *pol = (struct xfrm_policy *)arg;
struct xfrm_policy_queue *pq = &pol->polq;
struct flowi fl;
@@ -1805,7 +1808,6 @@ static void xfrm_policy_queue_process(unsigned long arg)
dst = xfrm_lookup(xp_net(pol), skb_dst(skb)->path,
&fl, skb->sk, 0);
if (IS_ERR(dst)) {
- dev_put(skb->dev);
kfree_skb(skb);
continue;
}
@@ -1814,9 +1816,7 @@ static void xfrm_policy_queue_process(unsigned long arg)
skb_dst_drop(skb);
skb_dst_set(skb, dst);
- dev = skb->dev;
err = dst_output(skb);
- dev_put(dev);
}
return;
@@ -1839,7 +1839,6 @@ static int xdst_queue_output(struct sk_buff *skb)
}
skb_dst_force(skb);
- dev_hold(skb->dev);
spin_lock_bh(&pq->hold_queue.lock);
@@ -2126,8 +2125,6 @@ restart:
* have the xfrm_state's. We need to wait for KM to
* negotiate new SA's or bail out with error.*/
if (net->xfrm.sysctl_larval_drop) {
- /* EREMOTE tells the caller to generate
- * a one-shot blackhole route. */
dst_release(dst);
xfrm_pols_put(pols, drop_pols);
XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 78f66fa9244..b9c3f9e943a 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -39,9 +39,6 @@ static DEFINE_SPINLOCK(xfrm_state_lock);
static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
-
static inline unsigned int xfrm_dst_hash(struct net *net,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr,
@@ -499,7 +496,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net)
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
+ CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
@@ -990,11 +988,13 @@ void xfrm_state_insert(struct xfrm_state *x)
EXPORT_SYMBOL(xfrm_state_insert);
/* xfrm_state_lock is held */
-static struct xfrm_state *__find_acq_core(struct net *net, struct xfrm_mark *m,
+static struct xfrm_state *__find_acq_core(struct net *net,
+ const struct xfrm_mark *m,
unsigned short family, u8 mode,
u32 reqid, u8 proto,
const xfrm_address_t *daddr,
- const xfrm_address_t *saddr, int create)
+ const xfrm_address_t *saddr,
+ int create)
{
unsigned int h = xfrm_dst_hash(net, daddr, saddr, reqid, family);
struct xfrm_state *x;
@@ -1399,9 +1399,9 @@ xfrm_state_lookup_byaddr(struct net *net, u32 mark,
EXPORT_SYMBOL(xfrm_state_lookup_byaddr);
struct xfrm_state *
-xfrm_find_acq(struct net *net, struct xfrm_mark *mark, u8 mode, u32 reqid, u8 proto,
- const xfrm_address_t *daddr, const xfrm_address_t *saddr,
- int create, unsigned short family)
+xfrm_find_acq(struct net *net, const struct xfrm_mark *mark, u8 mode, u32 reqid,
+ u8 proto, const xfrm_address_t *daddr,
+ const xfrm_address_t *saddr, int create, unsigned short family)
{
struct xfrm_state *x;
@@ -1860,7 +1860,7 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
-static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
+struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
if (unlikely(family >= NPROTO))
@@ -1872,7 +1872,7 @@ static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
return afinfo;
}
-static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
+void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
{
rcu_read_unlock();
}
diff --git a/samples/hidraw/.gitignore b/samples/hidraw/.gitignore
new file mode 100644
index 00000000000..05e51a68524
--- /dev/null
+++ b/samples/hidraw/.gitignore
@@ -0,0 +1 @@
+hid-example
diff --git a/samples/uhid/uhid-example.c b/samples/uhid/uhid-example.c
index 03ce3c059a5..7d58a4b8d32 100644
--- a/samples/uhid/uhid-example.c
+++ b/samples/uhid/uhid-example.c
@@ -1,14 +1,15 @@
/*
* UHID Example
*
- * Copyright (c) 2012 David Herrmann <dh.herrmann@googlemail.com>
+ * Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*
* The code may be used by anyone for any purpose,
* and can serve as a starting point for developing
* applications using uhid.
*/
-/* UHID Example
+/*
+ * UHID Example
* This example emulates a basic 3 buttons mouse with wheel over UHID. Run this
* program as root and then use the following keys to control the mouse:
* q: Quit the application
@@ -22,6 +23,11 @@
* r: Move wheel up
* f: Move wheel down
*
+ * Additionally to 3 button mouse, 3 keyboard LEDs are also supported (LED_NUML,
+ * LED_CAPSL and LED_SCROLLL). The device doesn't generate any related keyboard
+ * events, though. You need to manually write the EV_LED/LED_XY/1 activation
+ * input event to the evdev device to see it being sent to this device.
+ *
* If uhid is not available as /dev/uhid, then you can pass a different path as
* first argument.
* If <linux/uhid.h> is not installed in /usr, then compile this with:
@@ -41,11 +47,12 @@
#include <unistd.h>
#include <linux/uhid.h>
-/* HID Report Desciptor
- * We emulate a basic 3 button mouse with wheel. This is the report-descriptor
- * as the kernel will parse it:
+/*
+ * HID Report Desciptor
+ * We emulate a basic 3 button mouse with wheel and 3 keyboard LEDs. This is
+ * the report-descriptor as the kernel will parse it:
*
- * INPUT[INPUT]
+ * INPUT(1)[INPUT]
* Field(0)
* Physical(GenericDesktop.Pointer)
* Application(GenericDesktop.Mouse)
@@ -72,6 +79,19 @@
* Report Count(3)
* Report Offset(8)
* Flags( Variable Relative )
+ * OUTPUT(2)[OUTPUT]
+ * Field(0)
+ * Application(GenericDesktop.Keyboard)
+ * Usage(3)
+ * LED.NumLock
+ * LED.CapsLock
+ * LED.ScrollLock
+ * Logical Minimum(0)
+ * Logical Maximum(1)
+ * Report Size(1)
+ * Report Count(3)
+ * Report Offset(0)
+ * Flags( Variable Absolute )
*
* This is the mapping that we expect:
* Button.0001 ---> Key.LeftBtn
@@ -80,19 +100,59 @@
* GenericDesktop.X ---> Relative.X
* GenericDesktop.Y ---> Relative.Y
* GenericDesktop.Wheel ---> Relative.Wheel
+ * LED.NumLock ---> LED.NumLock
+ * LED.CapsLock ---> LED.CapsLock
+ * LED.ScrollLock ---> LED.ScrollLock
*
* This information can be verified by reading /sys/kernel/debug/hid/<dev>/rdesc
* This file should print the same information as showed above.
*/
static unsigned char rdesc[] = {
- 0x05, 0x01, 0x09, 0x02, 0xa1, 0x01, 0x09, 0x01,
- 0xa1, 0x00, 0x05, 0x09, 0x19, 0x01, 0x29, 0x03,
- 0x15, 0x00, 0x25, 0x01, 0x95, 0x03, 0x75, 0x01,
- 0x81, 0x02, 0x95, 0x01, 0x75, 0x05, 0x81, 0x01,
- 0x05, 0x01, 0x09, 0x30, 0x09, 0x31, 0x09, 0x38,
- 0x15, 0x80, 0x25, 0x7f, 0x75, 0x08, 0x95, 0x03,
- 0x81, 0x06, 0xc0, 0xc0,
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x02, /* USAGE (Mouse) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x09, 0x01, /* USAGE (Pointer) */
+ 0xa1, 0x00, /* COLLECTION (Physical) */
+ 0x85, 0x01, /* REPORT_ID (1) */
+ 0x05, 0x09, /* USAGE_PAGE (Button) */
+ 0x19, 0x01, /* USAGE_MINIMUM (Button 1) */
+ 0x29, 0x03, /* USAGE_MAXIMUM (Button 3) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x81, 0x02, /* INPUT (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x05, /* REPORT_SIZE (5) */
+ 0x81, 0x01, /* INPUT (Cnst,Var,Abs) */
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x30, /* USAGE (X) */
+ 0x09, 0x31, /* USAGE (Y) */
+ 0x09, 0x38, /* USAGE (WHEEL) */
+ 0x15, 0x81, /* LOGICAL_MINIMUM (-127) */
+ 0x25, 0x7f, /* LOGICAL_MAXIMUM (127) */
+ 0x75, 0x08, /* REPORT_SIZE (8) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x81, 0x06, /* INPUT (Data,Var,Rel) */
+ 0xc0, /* END_COLLECTION */
+ 0xc0, /* END_COLLECTION */
+ 0x05, 0x01, /* USAGE_PAGE (Generic Desktop) */
+ 0x09, 0x06, /* USAGE (Keyboard) */
+ 0xa1, 0x01, /* COLLECTION (Application) */
+ 0x85, 0x02, /* REPORT_ID (2) */
+ 0x05, 0x08, /* USAGE_PAGE (Led) */
+ 0x19, 0x01, /* USAGE_MINIMUM (1) */
+ 0x29, 0x03, /* USAGE_MAXIMUM (3) */
+ 0x15, 0x00, /* LOGICAL_MINIMUM (0) */
+ 0x25, 0x01, /* LOGICAL_MAXIMUM (1) */
+ 0x95, 0x03, /* REPORT_COUNT (3) */
+ 0x75, 0x01, /* REPORT_SIZE (1) */
+ 0x91, 0x02, /* Output (Data,Var,Abs) */
+ 0x95, 0x01, /* REPORT_COUNT (1) */
+ 0x75, 0x05, /* REPORT_SIZE (5) */
+ 0x91, 0x01, /* Output (Cnst,Var,Abs) */
+ 0xc0, /* END_COLLECTION */
};
static int uhid_write(int fd, const struct uhid_event *ev)
@@ -140,6 +200,27 @@ static void destroy(int fd)
uhid_write(fd, &ev);
}
+/* This parses raw output reports sent by the kernel to the device. A normal
+ * uhid program shouldn't do this but instead just forward the raw report.
+ * However, for ducomentational purposes, we try to detect LED events here and
+ * print debug messages for it. */
+static void handle_output(struct uhid_event *ev)
+{
+ /* LED messages are adverised via OUTPUT reports; ignore the rest */
+ if (ev->u.output.rtype != UHID_OUTPUT_REPORT)
+ return;
+ /* LED reports have length 2 bytes */
+ if (ev->u.output.size != 2)
+ return;
+ /* first byte is report-id which is 0x02 for LEDs in our rdesc */
+ if (ev->u.output.data[0] != 0x2)
+ return;
+
+ /* print flags payload */
+ fprintf(stderr, "LED output report received with flags %x\n",
+ ev->u.output.data[1]);
+}
+
static int event(int fd)
{
struct uhid_event ev;
@@ -174,6 +255,7 @@ static int event(int fd)
break;
case UHID_OUTPUT:
fprintf(stderr, "UHID_OUTPUT from uhid-dev\n");
+ handle_output(&ev);
break;
case UHID_OUTPUT_EV:
fprintf(stderr, "UHID_OUTPUT_EV from uhid-dev\n");
@@ -198,18 +280,19 @@ static int send_event(int fd)
memset(&ev, 0, sizeof(ev));
ev.type = UHID_INPUT;
- ev.u.input.size = 4;
+ ev.u.input.size = 5;
+ ev.u.input.data[0] = 0x1;
if (btn1_down)
- ev.u.input.data[0] |= 0x1;
+ ev.u.input.data[1] |= 0x1;
if (btn2_down)
- ev.u.input.data[0] |= 0x2;
+ ev.u.input.data[1] |= 0x2;
if (btn3_down)
- ev.u.input.data[0] |= 0x4;
+ ev.u.input.data[1] |= 0x4;
- ev.u.input.data[1] = abs_hor;
- ev.u.input.data[2] = abs_ver;
- ev.u.input.data[3] = wheel;
+ ev.u.input.data[2] = abs_hor;
+ ev.u.input.data[3] = abs_ver;
+ ev.u.input.data[4] = wheel;
return uhid_write(fd, &ev);
}
diff --git a/scripts/coccinelle/api/ptr_ret.cocci b/scripts/coccinelle/api/ptr_ret.cocci
index 2274638d005..e18f8402e37 100644
--- a/scripts/coccinelle/api/ptr_ret.cocci
+++ b/scripts/coccinelle/api/ptr_ret.cocci
@@ -1,5 +1,5 @@
///
-/// Use PTR_RET rather than if(IS_ERR(...)) + PTR_ERR
+/// Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR
///
// Confidence: High
// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
@@ -7,7 +7,7 @@
// URL: http://coccinelle.lip6.fr/
// Options: --no-includes --include-headers
//
-// Keywords: ERR_PTR, PTR_ERR, PTR_RET
+// Keywords: ERR_PTR, PTR_ERR, PTR_RET, PTR_ERR_OR_ZERO
// Version min: 2.6.39
//
@@ -21,21 +21,21 @@ expression ptr;
@@
- if (IS_ERR(ptr)) return PTR_ERR(ptr); else return 0;
-+ return PTR_RET(ptr);
++ return PTR_ERR_OR_ZERO(ptr);
@depends on patch@
expression ptr;
@@
- if (IS_ERR(ptr)) return PTR_ERR(ptr); return 0;
-+ return PTR_RET(ptr);
++ return PTR_ERR_OR_ZERO(ptr);
@depends on patch@
expression ptr;
@@
- (IS_ERR(ptr) ? PTR_ERR(ptr) : 0)
-+ PTR_RET(ptr)
++ PTR_ERR_OR_ZERO(ptr)
@r1 depends on !patch@
expression ptr;
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 62164348ecf..8247979e8f6 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -821,6 +821,7 @@ static const char *section_white_list[] =
{
".comment*",
".debug*",
+ ".cranges", /* sh64 */
".zdebug*", /* Compressed debug sections. */
".GCC-command-line", /* mn10300 */
".GCC.command.line", /* record-gcc-switches, non mn10300 */
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 2e2a0dd4a73..e3a704c75ef 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -666,6 +666,7 @@ static int param_set_aabool(const char *val, const struct kernel_param *kp);
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
#define param_check_aabool param_check_bool
static struct kernel_param_ops param_ops_aabool = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_aabool,
.get = param_get_aabool
};
@@ -682,6 +683,7 @@ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
static struct kernel_param_ops param_ops_aalockpolicy = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_aalockpolicy,
.get = param_get_aalockpolicy
};
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index e8aad69f0d6..c123628d3f8 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -53,22 +53,17 @@ struct dev_cgroup {
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
- return container_of(s, struct dev_cgroup, css);
-}
-
-static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
-{
- return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
+ return s ? container_of(s, struct dev_cgroup, css) : NULL;
}
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
- return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
+ return css_to_devcgroup(task_css(task, devices_subsys_id));
}
struct cgroup_subsys devices_subsys;
-static int devcgroup_can_attach(struct cgroup *new_cgrp,
+static int devcgroup_can_attach(struct cgroup_subsys_state *new_css,
struct cgroup_taskset *set)
{
struct task_struct *task = cgroup_taskset_first(set);
@@ -193,18 +188,16 @@ static inline bool is_devcg_online(const struct dev_cgroup *devcg)
/**
* devcgroup_online - initializes devcgroup's behavior and exceptions based on
* parent's
- * @cgroup: cgroup getting online
+ * @css: css getting online
* returns 0 in case of success, error code otherwise
*/
-static int devcgroup_online(struct cgroup *cgroup)
+static int devcgroup_online(struct cgroup_subsys_state *css)
{
- struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+ struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css_parent(css));
int ret = 0;
mutex_lock(&devcgroup_mutex);
- dev_cgroup = cgroup_to_devcgroup(cgroup);
- if (cgroup->parent)
- parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
if (parent_dev_cgroup == NULL)
dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
@@ -219,9 +212,9 @@ static int devcgroup_online(struct cgroup *cgroup)
return ret;
}
-static void devcgroup_offline(struct cgroup *cgroup)
+static void devcgroup_offline(struct cgroup_subsys_state *css)
{
- struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
mutex_lock(&devcgroup_mutex);
dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
@@ -231,7 +224,8 @@ static void devcgroup_offline(struct cgroup *cgroup)
/*
* called from kernel/cgroup.c with cgroup_lock() held.
*/
-static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
struct dev_cgroup *dev_cgroup;
@@ -244,11 +238,10 @@ static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
return &dev_cgroup->css;
}
-static void devcgroup_css_free(struct cgroup *cgroup)
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
{
- struct dev_cgroup *dev_cgroup;
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
- dev_cgroup = cgroup_to_devcgroup(cgroup);
__dev_exception_clean(dev_cgroup);
kfree(dev_cgroup);
}
@@ -291,10 +284,10 @@ static void set_majmin(char *str, unsigned m)
sprintf(str, "%u", m);
}
-static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
+static int devcgroup_seq_read(struct cgroup_subsys_state *css,
+ struct cftype *cft, struct seq_file *m)
{
- struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
+ struct dev_cgroup *devcgroup = css_to_devcgroup(css);
struct dev_exception_item *ex;
char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
@@ -394,12 +387,10 @@ static bool may_access(struct dev_cgroup *dev_cgroup,
static int parent_has_perm(struct dev_cgroup *childcg,
struct dev_exception_item *ex)
{
- struct cgroup *pcg = childcg->css.cgroup->parent;
- struct dev_cgroup *parent;
+ struct dev_cgroup *parent = css_to_devcgroup(css_parent(&childcg->css));
- if (!pcg)
+ if (!parent)
return 1;
- parent = cgroup_to_devcgroup(pcg);
return may_access(parent, ex, childcg->behavior);
}
@@ -451,13 +442,13 @@ static void revalidate_active_exceptions(struct dev_cgroup *devcg)
static int propagate_exception(struct dev_cgroup *devcg_root,
struct dev_exception_item *ex)
{
- struct cgroup *root = devcg_root->css.cgroup, *pos;
+ struct cgroup_subsys_state *pos;
int rc = 0;
rcu_read_lock();
- cgroup_for_each_descendant_pre(pos, root) {
- struct dev_cgroup *devcg = cgroup_to_devcgroup(pos);
+ css_for_each_descendant_pre(pos, &devcg_root->css) {
+ struct dev_cgroup *devcg = css_to_devcgroup(pos);
/*
* Because devcgroup_mutex is held, no devcg will become
@@ -465,7 +456,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
* methods), and online ones are safe to access outside RCU
* read lock without bumping refcnt.
*/
- if (!is_devcg_online(devcg))
+ if (pos == &devcg_root->css || !is_devcg_online(devcg))
continue;
rcu_read_unlock();
@@ -524,15 +515,11 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
char temp[12]; /* 11 + 1 characters needed for a u32 */
int count, rc = 0;
struct dev_exception_item ex;
- struct cgroup *p = devcgroup->css.cgroup;
- struct dev_cgroup *parent = NULL;
+ struct dev_cgroup *parent = css_to_devcgroup(css_parent(&devcgroup->css));
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- if (p->parent)
- parent = cgroup_to_devcgroup(p->parent);
-
memset(&ex, 0, sizeof(ex));
b = buffer;
@@ -677,13 +664,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
return rc;
}
-static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static int devcgroup_access_write(struct cgroup_subsys_state *css,
+ struct cftype *cft, const char *buffer)
{
int retval;
mutex_lock(&devcgroup_mutex);
- retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
+ retval = devcgroup_update_access(css_to_devcgroup(css),
cft->private, buffer);
mutex_unlock(&devcgroup_mutex);
return retval;
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 65f67cb0aef..6713f04e30b 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -50,8 +50,13 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
static inline void selinux_xfrm_notify_policyload(void)
{
+ struct net *net;
+
atomic_inc(&flow_cache_genid);
- rt_genid_bump(&init_net);
+ rtnl_lock();
+ for_each_net(net)
+ rt_genid_bump_all(net);
+ rtnl_unlock();
}
#else
static inline int selinux_xfrm_enabled(void)
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 3f7682a387b..eefbd10e408 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
*
* Create or update the port list entry
*/
-static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
int act)
{
__be16 *bep;
__be32 *be32p;
- struct sockaddr_in6 *addr6;
struct smk_port_label *spp;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
@@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address,
/*
* Get the IP address and port from the address.
*/
- addr6 = (struct sockaddr_in6 *)address;
- port = ntohs(addr6->sin6_port);
- bep = (__be16 *)(&addr6->sin6_addr);
- be32p = (__be32 *)(&addr6->sin6_addr);
+ port = ntohs(address->sin6_port);
+ bep = (__be16 *)(&address->sin6_addr);
+ be32p = (__be32 *)(&address->sin6_addr);
/*
* It's remote, so port lookup does no good.
@@ -2060,9 +2058,9 @@ auditout:
ad.a.u.net->family = sk->sk_family;
ad.a.u.net->dport = port;
if (act == SMK_RECEIVING)
- ad.a.u.net->v6info.saddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
else
- ad.a.u.net->v6info.daddr = addr6->sin6_addr;
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
#endif
return smk_access(skp, object, MAY_WRITE, &ad);
}
@@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
case PF_INET6:
if (addrlen < sizeof(struct sockaddr_in6))
return -EINVAL;
- rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING);
+ rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+ SMK_CONNECTING);
break;
}
return rc;
@@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
- struct sockaddr *sap = (struct sockaddr *) msg->msg_name;
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
int rc = 0;
/*
@@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
return smack_net_ambient;
}
-static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap)
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
{
- struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap;
u8 nexthdr;
int offset;
int proto = -EINVAL;
@@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
struct smack_known *skp;
- struct sockaddr sadd;
+ struct sockaddr_in6 sadd;
int rc = 0;
struct smk_audit_info ad;
#ifdef CONFIG_AUDIT
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index ce431e6e07c..5066a3768b2 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -14,12 +14,14 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/ac97_codec.h>
#include <sound/initval.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include <mach/regs-ac97.h>
#include <mach/audio.h>
@@ -41,20 +43,20 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
.reset = pxa2xx_ac97_reset,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_out = {
- .name = "AC97 PCM out",
- .dev_addr = __PREG(PCDR),
- .drcmr = &DRCMR(12),
- .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_out_req = 12;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_out = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_ac97_pcm_out_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_in = {
- .name = "AC97 PCM in",
- .dev_addr = __PREG(PCDR),
- .drcmr = &DRCMR(11),
- .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_in_req = 11;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_in = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_ac97_pcm_in_req,
};
static struct snd_pcm *pxa2xx_ac97_pcm;
diff --git a/sound/arm/pxa2xx-pcm-lib.c b/sound/arm/pxa2xx-pcm-lib.c
index 823359ed95e..a61d7a9a995 100644
--- a/sound/arm/pxa2xx-pcm-lib.c
+++ b/sound/arm/pxa2xx-pcm-lib.c
@@ -7,11 +7,13 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include <mach/dma.h>
@@ -43,6 +45,35 @@ int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
size_t period = params_period_bytes(params);
pxa_dma_desc *dma_desc;
dma_addr_t dma_buff_phys, next_desc_phys;
+ u32 dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
+
+ /* temporary transition hack */
+ switch (rtd->params->addr_width) {
+ case DMA_SLAVE_BUSWIDTH_1_BYTE:
+ dcmd |= DCMD_WIDTH1;
+ break;
+ case DMA_SLAVE_BUSWIDTH_2_BYTES:
+ dcmd |= DCMD_WIDTH2;
+ break;
+ case DMA_SLAVE_BUSWIDTH_4_BYTES:
+ dcmd |= DCMD_WIDTH4;
+ break;
+ default:
+ /* can't happen */
+ break;
+ }
+
+ switch (rtd->params->maxburst) {
+ case 8:
+ dcmd |= DCMD_BURST8;
+ break;
+ case 16:
+ dcmd |= DCMD_BURST16;
+ break;
+ case 32:
+ dcmd |= DCMD_BURST32;
+ break;
+ }
snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
runtime->dma_bytes = totsize;
@@ -55,14 +86,14 @@ int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
dma_desc->ddadr = next_desc_phys;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
dma_desc->dsadr = dma_buff_phys;
- dma_desc->dtadr = rtd->params->dev_addr;
+ dma_desc->dtadr = rtd->params->addr;
} else {
- dma_desc->dsadr = rtd->params->dev_addr;
+ dma_desc->dsadr = rtd->params->addr;
dma_desc->dtadr = dma_buff_phys;
}
if (period > totsize)
period = totsize;
- dma_desc->dcmd = rtd->params->dcmd | period | DCMD_ENDIRQEN;
+ dma_desc->dcmd = dcmd | period | DCMD_ENDIRQEN;
dma_desc++;
dma_buff_phys += period;
} while (totsize -= period);
@@ -76,8 +107,10 @@ int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct pxa2xx_runtime_data *rtd = substream->runtime->private_data;
- if (rtd && rtd->params && rtd->params->drcmr)
- *rtd->params->drcmr = 0;
+ if (rtd && rtd->params && rtd->params->filter_data) {
+ unsigned long req = *(unsigned long *) rtd->params->filter_data;
+ DRCMR(req) = 0;
+ }
snd_pcm_set_runtime_buffer(substream, NULL);
return 0;
@@ -136,6 +169,7 @@ EXPORT_SYMBOL(pxa2xx_pcm_pointer);
int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct pxa2xx_runtime_data *prtd = substream->runtime->private_data;
+ unsigned long req;
if (!prtd || !prtd->params)
return 0;
@@ -146,7 +180,8 @@ int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream)
DCSR(prtd->dma_ch) &= ~DCSR_RUN;
DCSR(prtd->dma_ch) = 0;
DCMD(prtd->dma_ch) = 0;
- *prtd->params->drcmr = prtd->dma_ch | DRCMR_MAPVLD;
+ req = *(unsigned long *) prtd->params->filter_data;
+ DRCMR(req) = prtd->dma_ch | DRCMR_MAPVLD;
return 0;
}
@@ -155,7 +190,6 @@ EXPORT_SYMBOL(__pxa2xx_pcm_prepare);
void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
{
struct snd_pcm_substream *substream = dev_id;
- struct pxa2xx_runtime_data *rtd = substream->runtime->private_data;
int dcsr;
dcsr = DCSR(dma_ch);
@@ -164,8 +198,8 @@ void pxa2xx_pcm_dma_irq(int dma_ch, void *dev_id)
if (dcsr & DCSR_ENDINTR) {
snd_pcm_period_elapsed(substream);
} else {
- printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n",
- rtd->params->name, dma_ch, dcsr);
+ printk(KERN_ERR "DMA error on channel %d (DCSR=%#x)\n",
+ dma_ch, dcsr);
snd_pcm_stream_lock(substream);
snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
snd_pcm_stream_unlock(substream);
diff --git a/sound/arm/pxa2xx-pcm.c b/sound/arm/pxa2xx-pcm.c
index 26422a3584e..69a2455b447 100644
--- a/sound/arm/pxa2xx-pcm.c
+++ b/sound/arm/pxa2xx-pcm.c
@@ -11,8 +11,11 @@
*/
#include <linux/module.h>
+#include <linux/dmaengine.h>
+
#include <sound/core.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include "pxa2xx-pcm.h"
@@ -40,7 +43,7 @@ static int pxa2xx_pcm_open(struct snd_pcm_substream *substream)
rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
client->playback_params : client->capture_params;
- ret = pxa_request_dma(rtd->params->name, DMA_PRIO_LOW,
+ ret = pxa_request_dma("dma", DMA_PRIO_LOW,
pxa2xx_pcm_dma_irq, substream);
if (ret < 0)
goto err2;
diff --git a/sound/arm/pxa2xx-pcm.h b/sound/arm/pxa2xx-pcm.h
index 65f86b56ba4..2a8fc08d52a 100644
--- a/sound/arm/pxa2xx-pcm.h
+++ b/sound/arm/pxa2xx-pcm.h
@@ -13,14 +13,14 @@
struct pxa2xx_runtime_data {
int dma_ch;
- struct pxa2xx_pcm_dma_params *params;
+ struct snd_dmaengine_dai_dma_data *params;
pxa_dma_desc *dma_desc_array;
dma_addr_t dma_desc_array_phys;
};
struct pxa2xx_pcm_client {
- struct pxa2xx_pcm_dma_params *playback_params;
- struct pxa2xx_pcm_dma_params *capture_params;
+ struct snd_dmaengine_dai_dma_data *playback_params;
+ struct snd_dmaengine_dai_dma_data *capture_params;
int (*startup)(struct snd_pcm_substream *);
void (*shutdown)(struct snd_pcm_substream *);
int (*prepare)(struct snd_pcm_substream *);
diff --git a/sound/core/Kconfig b/sound/core/Kconfig
index c0c2f57a0d6..313f22e9d92 100644
--- a/sound/core/Kconfig
+++ b/sound/core/Kconfig
@@ -6,6 +6,9 @@ config SND_PCM
tristate
select SND_TIMER
+config SND_DMAENGINE_PCM
+ tristate
+
config SND_HWDEP
tristate
diff --git a/sound/core/Makefile b/sound/core/Makefile
index 43d4117428a..5e890cfed42 100644
--- a/sound/core/Makefile
+++ b/sound/core/Makefile
@@ -13,6 +13,8 @@ snd-$(CONFIG_SND_JACK) += jack.o
snd-pcm-objs := pcm.o pcm_native.o pcm_lib.o pcm_timer.o pcm_misc.o \
pcm_memory.o
+snd-pcm-dmaengine-objs := pcm_dmaengine.o
+
snd-page-alloc-y := memalloc.o
snd-page-alloc-$(CONFIG_SND_DMA_SGBUF) += sgbuf.o
@@ -30,6 +32,7 @@ obj-$(CONFIG_SND_TIMER) += snd-timer.o
obj-$(CONFIG_SND_HRTIMER) += snd-hrtimer.o
obj-$(CONFIG_SND_RTCTIMER) += snd-rtctimer.o
obj-$(CONFIG_SND_PCM) += snd-pcm.o snd-page-alloc.o
+obj-$(CONFIG_SND_DMAENGINE_PCM) += snd-pcm-dmaengine.o
obj-$(CONFIG_SND_RAWMIDI) += snd-rawmidi.o
obj-$(CONFIG_SND_OSSEMUL) += oss/
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 99db892d729..98969541cbc 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -743,7 +743,7 @@ static long snd_compr_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
mutex_lock(&stream->device->lock);
switch (_IOC_NR(cmd)) {
case _IOC_NR(SNDRV_COMPRESS_IOCTL_VERSION):
- put_user(SNDRV_COMPRESS_VERSION,
+ retval = put_user(SNDRV_COMPRESS_VERSION,
(int __user *)arg) ? -EFAULT : 0;
break;
case _IOC_NR(SNDRV_COMPRESS_GET_CAPS):
diff --git a/sound/soc/soc-dmaengine-pcm.c b/sound/core/pcm_dmaengine.c
index aa924d9b798..aa924d9b798 100644
--- a/sound/soc/soc-dmaengine-pcm.c
+++ b/sound/core/pcm_dmaengine.c
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 82bb029d441..6e03b465e44 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -184,7 +184,7 @@ static void xrun(struct snd_pcm_substream *substream)
do { \
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
xrun_log_show(substream); \
- if (printk_ratelimit()) { \
+ if (snd_printd_ratelimit()) { \
snd_printd("PCM: " fmt, ##args); \
} \
dump_stack_on_xrun(substream); \
@@ -342,7 +342,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
return -EPIPE;
}
if (pos >= runtime->buffer_size) {
- if (printk_ratelimit()) {
+ if (snd_printd_ratelimit()) {
char name[16];
snd_pcm_debug_name(substream, name, sizeof(name));
xrun_log_show(substream);
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index 11048cc744d..915b4d7fbb2 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -1022,7 +1022,7 @@ static void dummy_proc_write(struct snd_info_entry *entry,
if (i >= ARRAY_SIZE(fields))
continue;
snd_info_get_str(item, ptr, sizeof(item));
- if (strict_strtoull(item, 0, &val))
+ if (kstrtoull(item, 0, &val))
continue;
if (fields[i].size == sizeof(int))
*get_dummy_int_ptr(dummy, fields[i].offset) = val;
diff --git a/sound/firewire/speakers.c b/sound/firewire/speakers.c
index 2c638650394..fe9e6e2f2c5 100644
--- a/sound/firewire/speakers.c
+++ b/sound/firewire/speakers.c
@@ -49,7 +49,6 @@ struct fwspk {
struct snd_card *card;
struct fw_unit *unit;
const struct device_info *device_info;
- struct snd_pcm_substream *pcm;
struct mutex mutex;
struct cmp_connection connection;
struct amdtp_out_stream stream;
@@ -363,8 +362,7 @@ static int fwspk_create_pcm(struct fwspk *fwspk)
return err;
pcm->private_data = fwspk;
strcpy(pcm->name, fwspk->device_info->short_name);
- fwspk->pcm = pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
- fwspk->pcm->ops = &ops;
+ snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &ops);
return 0;
}
diff --git a/sound/i2c/other/Makefile b/sound/i2c/other/Makefile
index c95d8f1aae8..5526b03b95a 100644
--- a/sound/i2c/other/Makefile
+++ b/sound/i2c/other/Makefile
@@ -8,10 +8,8 @@ snd-ak4117-objs := ak4117.o
snd-ak4113-objs := ak4113.o
snd-ak4xxx-adda-objs := ak4xxx-adda.o
snd-pt2258-objs := pt2258.o
-snd-tea575x-tuner-objs := tea575x-tuner.o
# Module Dependency
obj-$(CONFIG_SND_PDAUDIOCF) += snd-ak4117.o
obj-$(CONFIG_SND_ICE1712) += snd-ak4xxx-adda.o
obj-$(CONFIG_SND_ICE1724) += snd-ak4114.o snd-ak4113.o snd-ak4xxx-adda.o snd-pt2258.o
-obj-$(CONFIG_SND_TEA575X) += snd-tea575x-tuner.o
diff --git a/sound/isa/gus/interwave.c b/sound/isa/gus/interwave.c
index 9942691cc0c..afef0d73807 100644
--- a/sound/isa/gus/interwave.c
+++ b/sound/isa/gus/interwave.c
@@ -443,8 +443,7 @@ static void snd_interwave_detect_memory(struct snd_gus_card *gus)
for (i = 0; i < 8; ++i)
iwave[i] = snd_gf1_peek(gus, bank_pos + i);
#ifdef CONFIG_SND_DEBUG_ROM
- printk(KERN_DEBUG "ROM at 0x%06x = %*phC\n", bank_pos,
- 8, iwave);
+ printk(KERN_DEBUG "ROM at 0x%06x = %8phC\n", bank_pos, iwave);
#endif
if (strncmp(iwave, "INTRWAVE", 8))
continue; /* first check */
diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c
index 103b33373fd..6effe99bbb9 100644
--- a/sound/isa/opti9xx/opti92x-ad1848.c
+++ b/sound/isa/opti9xx/opti92x-ad1848.c
@@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids);
#endif /* CONFIG_PNP */
-#ifdef OPTi93X
-#define DEV_NAME "opti93x"
-#else
-#define DEV_NAME "opti92x"
-#endif
+#define DEV_NAME KBUILD_MODNAME
static char * snd_opti9xx_names[] = {
"unknown",
@@ -1167,7 +1163,7 @@ static int snd_opti9xx_pnp_resume(struct pnp_card_link *pcard)
static struct pnp_card_driver opti9xx_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
- .name = "opti9xx",
+ .name = DEV_NAME,
.id_table = snd_opti9xx_pnpids,
.probe = snd_opti9xx_pnp_probe,
.remove = snd_opti9xx_pnp_remove,
diff --git a/sound/oss/dmabuf.c b/sound/oss/dmabuf.c
index a59c88818f4..461d94cfecb 100644
--- a/sound/oss/dmabuf.c
+++ b/sound/oss/dmabuf.c
@@ -557,7 +557,6 @@ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock)
unsigned long flags;
int err = 0, n = 0;
struct dma_buffparms *dmap = adev->dmap_in;
- int go;
if (!(adev->open_mode & OPEN_READ))
return -EIO;
@@ -584,7 +583,7 @@ int DMAbuf_getrdbuffer(int dev, char **buf, int *len, int dontblock)
spin_unlock_irqrestore(&dmap->lock,flags);
return -EAGAIN;
}
- if ((go = adev->go))
+ if (adev->go)
timeout = dmabuf_timeout(dmap);
spin_unlock_irqrestore(&dmap->lock,flags);
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index fe6fa93a626..46ed9e8ae0f 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -1,10 +1,5 @@
# ALSA PCI drivers
-config SND_TEA575X
- tristate
- depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 || RADIO_MAXIRADIO || RADIO_SHARK
- default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 || RADIO_MAXIRADIO || RADIO_SHARK
-
menuconfig SND_PCI
bool "PCI sound devices"
depends on PCI
@@ -542,7 +537,11 @@ config SND_ES1968_INPUT
config SND_ES1968_RADIO
bool "Enable TEA5757 radio tuner support for es1968"
depends on SND_ES1968
+ depends on MEDIA_RADIO_SUPPORT
depends on VIDEO_V4L2=y || VIDEO_V4L2=SND_ES1968
+ select RADIO_ADAPTERS
+ select RADIO_TEA575X
+
help
Say Y here to include support for TEA5757 radio tuner integrated on
some MediaForte cards (e.g. SF64-PCE2).
@@ -562,7 +561,10 @@ config SND_FM801
config SND_FM801_TEA575X_BOOL
bool "ForteMedia FM801 + TEA5757 tuner"
depends on SND_FM801
+ depends on MEDIA_RADIO_SUPPORT
depends on VIDEO_V4L2=y || VIDEO_V4L2=SND_FM801
+ select RADIO_ADAPTERS
+ select RADIO_TEA575X
help
Say Y here to include support for soundcards based on the ForteMedia
FM801 chip with a TEA5757 tuner (MediaForte SF256-PCS, SF256-PCP and
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index 5e2ec968773..b0e3d92c465 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -113,7 +113,7 @@
#include <sound/initval.h>
#ifdef CONFIG_SND_ES1968_RADIO
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
#endif
#define CARD_NAME "ESS Maestro1/2"
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 706c5b67b70..45bc8a95b7c 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -37,7 +37,7 @@
#include <asm/io.h>
#ifdef CONFIG_SND_FM801_TEA575X_BOOL
-#include <sound/tea575x-tuner.h>
+#include <media/tea575x.h>
#endif
MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>");
diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
index 59c5e9c03d5..8de66ccd727 100644
--- a/sound/pci/hda/Kconfig
+++ b/sound/pci/hda/Kconfig
@@ -152,14 +152,9 @@ config SND_HDA_CODEC_HDMI
This module is automatically loaded at probing.
config SND_HDA_I915
- bool "Build Display HD-audio controller/codec power well support for i915 cards"
+ bool
+ default y
depends on DRM_I915
- help
- Say Y here to include full HDMI and DisplayPort HD-audio controller/codec
- power-well support for Intel Haswell graphics cards based on the i915 driver.
-
- Note that this option must be enabled for Intel Haswell C+ stepping machines, otherwise
- the GPU audio controller/codecs will not be initialized or damaged when exit from S3 mode.
config SND_HDA_CODEC_CIRRUS
bool "Build Cirrus Logic codec support"
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 7c11d46b84d..48a9d004d6d 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -860,7 +860,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
}
}
if (id < 0 && quirk) {
- for (q = quirk; q->subvendor; q++) {
+ for (q = quirk; q->subvendor || q->subdevice; q++) {
unsigned int vendorid =
q->subdevice | (q->subvendor << 16);
unsigned int mask = 0xffff0000 | q->subdevice_mask;
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 8a005f0e5ca..5b6c4e3c92c 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -666,6 +666,64 @@ int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
}
EXPORT_SYMBOL_HDA(snd_hda_get_conn_index);
+
+/* return DEVLIST_LEN parameter of the given widget */
+static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
+{
+ unsigned int wcaps = get_wcaps(codec, nid);
+ unsigned int parm;
+
+ if (!codec->dp_mst || !(wcaps & AC_WCAP_DIGITAL) ||
+ get_wcaps_type(wcaps) != AC_WID_PIN)
+ return 0;
+
+ parm = snd_hda_param_read(codec, nid, AC_PAR_DEVLIST_LEN);
+ if (parm == -1 && codec->bus->rirb_error)
+ parm = 0;
+ return parm & AC_DEV_LIST_LEN_MASK;
+}
+
+/**
+ * snd_hda_get_devices - copy device list without cache
+ * @codec: the HDA codec
+ * @nid: NID of the pin to parse
+ * @dev_list: device list array
+ * @max_devices: max. number of devices to store
+ *
+ * Copy the device list. This info is dynamic and so not cached.
+ * Currently called only from hda_proc.c, so not exported.
+ */
+int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
+ u8 *dev_list, int max_devices)
+{
+ unsigned int parm;
+ int i, dev_len, devices;
+
+ parm = get_num_devices(codec, nid);
+ if (!parm) /* not multi-stream capable */
+ return 0;
+
+ dev_len = parm + 1;
+ dev_len = dev_len < max_devices ? dev_len : max_devices;
+
+ devices = 0;
+ while (devices < dev_len) {
+ parm = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_DEVICE_LIST, devices);
+ if (parm == -1 && codec->bus->rirb_error)
+ break;
+
+ for (i = 0; i < 8; i++) {
+ dev_list[devices] = (u8)parm;
+ parm >>= 4;
+ devices++;
+ if (devices >= dev_len)
+ break;
+ }
+ }
+ return devices;
+}
+
/**
* snd_hda_queue_unsol_event - add an unsolicited event to queue
* @bus: the BUS
@@ -1216,11 +1274,13 @@ static void hda_jackpoll_work(struct work_struct *work)
{
struct hda_codec *codec =
container_of(work, struct hda_codec, jackpoll_work.work);
- if (!codec->jackpoll_interval)
- return;
snd_hda_jack_set_dirty_all(codec);
snd_hda_jack_poll_all(codec);
+
+ if (!codec->jackpoll_interval)
+ return;
+
queue_delayed_work(codec->bus->workq, &codec->jackpoll_work,
codec->jackpoll_interval);
}
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 701c2e069b1..7aa9870040c 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -94,6 +94,8 @@ enum {
#define AC_VERB_GET_HDMI_DIP_XMIT 0x0f32
#define AC_VERB_GET_HDMI_CP_CTRL 0x0f33
#define AC_VERB_GET_HDMI_CHAN_SLOT 0x0f34
+#define AC_VERB_GET_DEVICE_SEL 0xf35
+#define AC_VERB_GET_DEVICE_LIST 0xf36
/*
* SET verbs
@@ -133,6 +135,7 @@ enum {
#define AC_VERB_SET_HDMI_DIP_XMIT 0x732
#define AC_VERB_SET_HDMI_CP_CTRL 0x733
#define AC_VERB_SET_HDMI_CHAN_SLOT 0x734
+#define AC_VERB_SET_DEVICE_SEL 0x735
/*
* Parameter IDs
@@ -154,6 +157,7 @@ enum {
#define AC_PAR_GPIO_CAP 0x11
#define AC_PAR_AMP_OUT_CAP 0x12
#define AC_PAR_VOL_KNB_CAP 0x13
+#define AC_PAR_DEVLIST_LEN 0x15
#define AC_PAR_HDMI_LPCM_CAP 0x20
/*
@@ -251,6 +255,11 @@ enum {
#define AC_UNSOL_RES_TAG_SHIFT 26
#define AC_UNSOL_RES_SUBTAG (0x1f<<21)
#define AC_UNSOL_RES_SUBTAG_SHIFT 21
+#define AC_UNSOL_RES_DE (0x3f<<15) /* Device Entry
+ * (for DP1.2 MST)
+ */
+#define AC_UNSOL_RES_DE_SHIFT 15
+#define AC_UNSOL_RES_IA (1<<2) /* Inactive (for DP1.2 MST) */
#define AC_UNSOL_RES_ELDV (1<<1) /* ELD Data valid (for HDMI) */
#define AC_UNSOL_RES_PD (1<<0) /* pinsense detect */
#define AC_UNSOL_RES_CP_STATE (1<<1) /* content protection */
@@ -352,6 +361,10 @@ enum {
#define AC_LPCMCAP_44K (1<<30) /* 44.1kHz support */
#define AC_LPCMCAP_44K_MS (1<<31) /* 44.1kHz-multiplies support */
+/* Display pin's device list length */
+#define AC_DEV_LIST_LEN_MASK 0x3f
+#define AC_MAX_DEV_LIST_LEN 64
+
/*
* Control Parameters
*/
@@ -460,6 +473,11 @@ enum {
#define AC_DEFCFG_PORT_CONN (0x3<<30)
#define AC_DEFCFG_PORT_CONN_SHIFT 30
+/* Display pin's device list entry */
+#define AC_DE_PD (1<<0)
+#define AC_DE_ELDV (1<<1)
+#define AC_DE_IA (1<<2)
+
/* device device types (0x0-0xf) */
enum {
AC_JACK_LINE_OUT,
@@ -885,6 +903,7 @@ struct hda_codec {
unsigned int pcm_format_first:1; /* PCM format must be set first */
unsigned int epss:1; /* supporting EPSS? */
unsigned int cached_write:1; /* write only to caches */
+ unsigned int dp_mst:1; /* support DP1.2 Multi-stream transport */
#ifdef CONFIG_PM
unsigned int power_on :1; /* current (global) power-state */
unsigned int d3_stop_clk:1; /* support D3 operation without BCLK */
@@ -972,6 +991,8 @@ int snd_hda_override_conn_list(struct hda_codec *codec, hda_nid_t nid, int nums,
const hda_nid_t *list);
int snd_hda_get_conn_index(struct hda_codec *codec, hda_nid_t mux,
hda_nid_t nid, int recursive);
+int snd_hda_get_devices(struct hda_codec *codec, hda_nid_t nid,
+ u8 *dev_list, int max_devices);
int snd_hda_query_supported_pcm(struct hda_codec *codec, hda_nid_t nid,
u32 *ratesp, u64 *formatsp, unsigned int *bpsp);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8e77cbbad87..ac41e9cdc97 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -142,6 +142,9 @@ static void parse_user_hints(struct hda_codec *codec)
val = snd_hda_get_bool_hint(codec, "primary_hp");
if (val >= 0)
spec->no_primary_hp = !val;
+ val = snd_hda_get_bool_hint(codec, "multi_io");
+ if (val >= 0)
+ spec->no_multi_io = !val;
val = snd_hda_get_bool_hint(codec, "multi_cap_vol");
if (val >= 0)
spec->multi_cap_vol = !!val;
@@ -522,7 +525,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1,
}
#define nid_has_mute(codec, nid, dir) \
- check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE)
+ check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE))
#define nid_has_volume(codec, nid, dir) \
check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS)
@@ -624,7 +627,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
if (enable)
val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT;
}
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (!enable)
val |= HDA_AMP_MUTE;
}
@@ -648,7 +651,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec,
{
unsigned int mask = 0xff;
- if (caps & AC_AMPCAP_MUTE) {
+ if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) {
if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL))
mask &= ~0x80;
}
@@ -813,6 +816,8 @@ static void resume_path_from_idx(struct hda_codec *codec, int path_idx)
static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
+static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol);
enum {
HDA_CTL_WIDGET_VOL,
@@ -830,7 +835,13 @@ static const struct snd_kcontrol_new control_templates[] = {
.put = hda_gen_mixer_mute_put, /* replaced */
.private_value = HDA_COMPOSE_AMP_VAL(0, 3, 0, 0),
},
- HDA_BIND_MUTE(NULL, 0, 0, 0),
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .info = snd_hda_mixer_amp_switch_info,
+ .get = snd_hda_mixer_bind_switch_get,
+ .put = hda_gen_bind_mute_put, /* replaced */
+ .private_value = HDA_COMPOSE_AMP_VAL(0, 3, 0, 0),
+ },
};
/* add dynamic controls from template */
@@ -937,8 +948,8 @@ static int add_stereo_sw(struct hda_codec *codec, const char *pfx,
}
/* playback mute control with the software mute bit check */
-static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
+static void sync_auto_mute_bits(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
struct hda_gen_spec *spec = codec->spec;
@@ -949,10 +960,22 @@ static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
ucontrol->value.integer.value[0] &= enabled;
ucontrol->value.integer.value[1] &= enabled;
}
+}
+static int hda_gen_mixer_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ sync_auto_mute_bits(kcontrol, ucontrol);
return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
}
+static int hda_gen_bind_mute_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ sync_auto_mute_bits(kcontrol, ucontrol);
+ return snd_hda_mixer_bind_switch_put(kcontrol, ucontrol);
+}
+
/* any ctl assigned to the path with the given index? */
static bool path_has_mixer(struct hda_codec *codec, int path_idx, int ctl_type)
{
@@ -1541,7 +1564,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
cfg->speaker_pins,
spec->multiout.extra_out_nid,
spec->speaker_paths);
- if (fill_mio_first && cfg->line_outs == 1 &&
+ if (!spec->no_multi_io &&
+ fill_mio_first && cfg->line_outs == 1 &&
cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
err = fill_multi_ios(codec, cfg->line_out_pins[0], true);
if (!err)
@@ -1554,7 +1578,7 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
spec->private_dac_nids, spec->out_paths,
spec->main_out_badness);
- if (fill_mio_first &&
+ if (!spec->no_multi_io && fill_mio_first &&
cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
/* try to fill multi-io first */
err = fill_multi_ios(codec, cfg->line_out_pins[0], false);
@@ -1582,7 +1606,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
return err;
badness += err;
}
- if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ if (!spec->no_multi_io &&
+ cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
err = fill_multi_ios(codec, cfg->line_out_pins[0], false);
if (err < 0)
return err;
@@ -1600,7 +1625,8 @@ static int fill_and_eval_dacs(struct hda_codec *codec,
check_aamix_out_path(codec, spec->speaker_paths[0]);
}
- if (cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
+ if (!spec->no_multi_io &&
+ cfg->hp_outs && cfg->line_out_type == AUTO_PIN_SPEAKER_OUT)
if (count_multiio_pins(codec, cfg->hp_pins[0]) >= 2)
spec->multi_ios = 1; /* give badness */
@@ -3724,7 +3750,8 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
/* check each pin in the given array; returns true if any of them is plugged */
static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
{
- int i, present = 0;
+ int i;
+ bool present = false;
for (i = 0; i < num_pins; i++) {
hda_nid_t nid = pins[i];
@@ -3733,14 +3760,15 @@ static bool detect_jacks(struct hda_codec *codec, int num_pins, hda_nid_t *pins)
/* don't detect pins retasked as inputs */
if (snd_hda_codec_get_pin_target(codec, nid) & AC_PINCTL_IN_EN)
continue;
- present |= snd_hda_jack_detect(codec, nid);
+ if (snd_hda_jack_detect_state(codec, nid) == HDA_JACK_PRESENT)
+ present = true;
}
return present;
}
/* standard HP/line-out auto-mute helper */
static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
- bool mute)
+ int *paths, bool mute)
{
struct hda_gen_spec *spec = codec->spec;
int i;
@@ -3752,10 +3780,19 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
break;
if (spec->auto_mute_via_amp) {
+ struct nid_path *path;
+ hda_nid_t mute_nid;
+
+ path = snd_hda_get_path_from_idx(codec, paths[i]);
+ if (!path)
+ continue;
+ mute_nid = get_amp_nid_(path->ctls[NID_PATH_MUTE_CTL]);
+ if (!mute_nid)
+ continue;
if (mute)
- spec->mute_bits |= (1ULL << nid);
+ spec->mute_bits |= (1ULL << mute_nid);
else
- spec->mute_bits &= ~(1ULL << nid);
+ spec->mute_bits &= ~(1ULL << mute_nid);
set_pin_eapd(codec, nid, !mute);
continue;
}
@@ -3786,14 +3823,19 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
void snd_hda_gen_update_outputs(struct hda_codec *codec)
{
struct hda_gen_spec *spec = codec->spec;
+ int *paths;
int on;
/* Control HP pins/amps depending on master_mute state;
* in general, HP pins/amps control should be enabled in all cases,
* but currently set only for master_mute, just to be safe
*/
+ if (spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)
+ paths = spec->out_paths;
+ else
+ paths = spec->hp_paths;
do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
- spec->autocfg.hp_pins, spec->master_mute);
+ spec->autocfg.hp_pins, paths, spec->master_mute);
if (!spec->automute_speaker)
on = 0;
@@ -3801,8 +3843,12 @@ void snd_hda_gen_update_outputs(struct hda_codec *codec)
on = spec->hp_jack_present | spec->line_jack_present;
on |= spec->master_mute;
spec->speaker_muted = on;
+ if (spec->autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
+ paths = spec->out_paths;
+ else
+ paths = spec->speaker_paths;
do_automute(codec, ARRAY_SIZE(spec->autocfg.speaker_pins),
- spec->autocfg.speaker_pins, on);
+ spec->autocfg.speaker_pins, paths, on);
/* toggle line-out mutes if needed, too */
/* if LO is a copy of either HP or Speaker, don't need to handle it */
@@ -3815,8 +3861,9 @@ void snd_hda_gen_update_outputs(struct hda_codec *codec)
on = spec->hp_jack_present;
on |= spec->master_mute;
spec->line_out_muted = on;
+ paths = spec->out_paths;
do_automute(codec, ARRAY_SIZE(spec->autocfg.line_out_pins),
- spec->autocfg.line_out_pins, on);
+ spec->autocfg.line_out_pins, paths, on);
}
EXPORT_SYMBOL_HDA(snd_hda_gen_update_outputs);
@@ -3887,7 +3934,7 @@ void snd_hda_gen_mic_autoswitch(struct hda_codec *codec, struct hda_jack_tbl *ja
/* don't detect pins retasked as outputs */
if (snd_hda_codec_get_pin_target(codec, pin) & AC_PINCTL_OUT_EN)
continue;
- if (snd_hda_jack_detect(codec, pin)) {
+ if (snd_hda_jack_detect_state(codec, pin) == HDA_JACK_PRESENT) {
mux_select(codec, 0, spec->am_entry[i].idx);
return;
}
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index e199a852388..48d44026705 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -220,6 +220,7 @@ struct hda_gen_spec {
unsigned int hp_mic:1; /* Allow HP as a mic-in */
unsigned int suppress_hp_mic_detect:1; /* Don't detect HP/mic */
unsigned int no_primary_hp:1; /* Don't prefer HP pins to speaker pins */
+ unsigned int no_multi_io:1; /* Don't try multi I/O config */
unsigned int multi_cap_vol:1; /* allow multiple capture xxx volumes */
unsigned int inv_dmic_split:1; /* inverted dmic w/a for conexant */
unsigned int own_eapd_ctl:1; /* set EAPD by own function */
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index ce67608734b..fe0bda19de1 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -295,7 +295,7 @@ static ssize_t type##_store(struct device *dev, \
struct snd_hwdep *hwdep = dev_get_drvdata(dev); \
struct hda_codec *codec = hwdep->private_data; \
unsigned long val; \
- int err = strict_strtoul(buf, 0, &val); \
+ int err = kstrtoul(buf, 0, &val); \
if (err < 0) \
return err; \
codec->type = val; \
@@ -654,7 +654,7 @@ int snd_hda_get_int_hint(struct hda_codec *codec, const char *key, int *valp)
p = snd_hda_get_hint(codec, key);
if (!p)
ret = -ENOENT;
- else if (strict_strtoul(p, 0, &val))
+ else if (kstrtoul(p, 0, &val))
ret = -EINVAL;
else {
*valp = val;
@@ -751,7 +751,7 @@ static void parse_##name##_mode(char *buf, struct hda_bus *bus, \
struct hda_codec **codecp) \
{ \
unsigned long val; \
- if (!strict_strtoul(buf, 0, &val)) \
+ if (!kstrtoul(buf, 0, &val)) \
(*codecp)->name = val; \
}
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8860dd52952..e54ebd53084 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -555,6 +555,9 @@ struct azx {
#ifdef CONFIG_SND_HDA_DSP_LOADER
struct azx_dev saved_azx_dev;
#endif
+
+ /* secondary power domain for hdmi audio under vga device */
+ struct dev_pm_domain hdmi_pm_domain;
};
#define CREATE_TRACE_POINTS
@@ -1160,7 +1163,7 @@ static int azx_reset(struct azx *chip, int full_reset)
goto __skip;
/* clear STATESTS */
- azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
+ azx_writew(chip, STATESTS, STATESTS_INT_MASK);
/* reset controller */
azx_enter_link_reset(chip);
@@ -1242,7 +1245,7 @@ static void azx_int_clear(struct azx *chip)
}
/* clear STATESTS */
- azx_writeb(chip, STATESTS, STATESTS_INT_MASK);
+ azx_writew(chip, STATESTS, STATESTS_INT_MASK);
/* clear rirb status */
azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
@@ -1397,8 +1400,9 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
int i, ok;
#ifdef CONFIG_PM_RUNTIME
- if (chip->pci->dev.power.runtime_status != RPM_ACTIVE)
- return IRQ_NONE;
+ if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
+ if (chip->pci->dev.power.runtime_status != RPM_ACTIVE)
+ return IRQ_NONE;
#endif
spin_lock(&chip->reg_lock);
@@ -1409,7 +1413,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
}
status = azx_readl(chip, INTSTS);
- if (status == 0) {
+ if (status == 0 || status == 0xffffffff) {
spin_unlock(&chip->reg_lock);
return IRQ_NONE;
}
@@ -1451,8 +1455,8 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
#if 0
/* clear state status int */
- if (azx_readb(chip, STATESTS) & 0x04)
- azx_writeb(chip, STATESTS, 0x04);
+ if (azx_readw(chip, STATESTS) & 0x04)
+ azx_writew(chip, STATESTS, 0x04);
#endif
spin_unlock(&chip->reg_lock);
@@ -2971,6 +2975,16 @@ static int azx_runtime_suspend(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
+ if (chip->disabled)
+ return 0;
+
+ if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ return 0;
+
+ /* enable controller wake up event */
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) |
+ STATESTS_INT_MASK);
+
azx_stop_chip(chip);
azx_enter_link_reset(chip);
azx_clear_irq_pending(chip);
@@ -2983,11 +2997,37 @@ static int azx_runtime_resume(struct device *dev)
{
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
+ struct hda_bus *bus;
+ struct hda_codec *codec;
+ int status;
+
+ if (chip->disabled)
+ return 0;
+
+ if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
+ return 0;
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
hda_display_power(true);
+
+ /* Read STATESTS before controller reset */
+ status = azx_readw(chip, STATESTS);
+
azx_init_pci(chip);
azx_init_chip(chip, 1);
+
+ bus = chip->bus;
+ if (status && bus) {
+ list_for_each_entry(codec, &bus->codec_list, list)
+ if (status & (1 << codec->addr))
+ queue_delayed_work(codec->bus->workq,
+ &codec->jackpoll_work, codec->jackpoll_interval);
+ }
+
+ /* disable controller Wake Up event*/
+ azx_writew(chip, WAKEEN, azx_readw(chip, WAKEEN) &
+ ~STATESTS_INT_MASK);
+
return 0;
}
@@ -2996,6 +3036,9 @@ static int azx_runtime_idle(struct device *dev)
struct snd_card *card = dev_get_drvdata(dev);
struct azx *chip = card->private_data;
+ if (chip->disabled)
+ return 0;
+
if (!power_save_controller ||
!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
return -EBUSY;
@@ -3078,13 +3121,19 @@ static void azx_vs_set_state(struct pci_dev *pci,
"%s: %s via VGA-switcheroo\n", pci_name(chip->pci),
disabled ? "Disabling" : "Enabling");
if (disabled) {
+ pm_runtime_put_sync_suspend(&pci->dev);
azx_suspend(&pci->dev);
+ /* when we get suspended by vga switcheroo we end up in D3cold,
+ * however we have no ACPI handle, so pci/acpi can't put us there,
+ * put ourselves there */
+ pci->current_state = PCI_D3cold;
chip->disabled = true;
if (snd_hda_lock_devices(chip->bus))
snd_printk(KERN_WARNING SFX "%s: Cannot lock devices!\n",
pci_name(chip->pci));
} else {
snd_hda_unlock_devices(chip->bus);
+ pm_runtime_get_noresume(&pci->dev);
chip->disabled = false;
azx_resume(&pci->dev);
}
@@ -3139,6 +3188,9 @@ static int register_vga_switcheroo(struct azx *chip)
if (err < 0)
return err;
chip->vga_switcheroo_registered = 1;
+
+ /* register as an optimus hdmi audio power domain */
+ vga_switcheroo_init_domain_pm_optimus_hdmi_audio(&chip->pci->dev, &chip->hdmi_pm_domain);
return 0;
}
#else
@@ -3831,11 +3883,13 @@ static int azx_probe_continue(struct azx *chip)
/* Request power well for Haswell HDA controller and codec */
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
+#ifdef CONFIG_SND_HDA_I915
err = hda_i915_init();
if (err < 0) {
snd_printk(KERN_ERR SFX "Error request power-well from i915\n");
goto out_free;
}
+#endif
hda_display_power(true);
}
@@ -3887,7 +3941,7 @@ static int azx_probe_continue(struct azx *chip)
power_down_all_codecs(chip);
azx_notifier_register(chip);
azx_add_card_list(chip);
- if (chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
+ if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) || chip->use_vga_switcheroo)
pm_runtime_put_noidle(&pci->dev);
return 0;
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index 3fd2973183e..05b3e3e9108 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -194,18 +194,24 @@ u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid)
EXPORT_SYMBOL_HDA(snd_hda_pin_sense);
/**
- * snd_hda_jack_detect - query pin Presence Detect status
+ * snd_hda_jack_detect_state - query pin Presence Detect status
* @codec: the CODEC to sense
* @nid: the pin NID to sense
*
- * Query and return the pin's Presence Detect status.
+ * Query and return the pin's Presence Detect status, as either
+ * HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT or HDA_JACK_PHANTOM.
*/
-int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid)
{
- u32 sense = snd_hda_pin_sense(codec, nid);
- return get_jack_plug_state(sense);
+ struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid);
+ if (jack && jack->phantom_jack)
+ return HDA_JACK_PHANTOM;
+ else if (snd_hda_pin_sense(codec, nid) & AC_PINSENSE_PRESENCE)
+ return HDA_JACK_PRESENT;
+ else
+ return HDA_JACK_NOT_PRESENT;
}
-EXPORT_SYMBOL_HDA(snd_hda_jack_detect);
+EXPORT_SYMBOL_HDA(snd_hda_jack_detect_state);
/**
* snd_hda_jack_detect_enable - enable the jack-detection
@@ -247,8 +253,8 @@ EXPORT_SYMBOL_HDA(snd_hda_jack_detect_enable);
int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid)
{
- struct hda_jack_tbl *gated = snd_hda_jack_tbl_get(codec, gated_nid);
- struct hda_jack_tbl *gating = snd_hda_jack_tbl_get(codec, gating_nid);
+ struct hda_jack_tbl *gated = snd_hda_jack_tbl_new(codec, gated_nid);
+ struct hda_jack_tbl *gating = snd_hda_jack_tbl_new(codec, gating_nid);
if (!gated || !gating)
return -EINVAL;
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index ec12abd4526..379420c44ee 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -75,7 +75,18 @@ int snd_hda_jack_set_gating_jack(struct hda_codec *codec, hda_nid_t gated_nid,
hda_nid_t gating_nid);
u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid);
-int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid);
+
+/* the jack state returned from snd_hda_jack_detect_state() */
+enum {
+ HDA_JACK_NOT_PRESENT, HDA_JACK_PRESENT, HDA_JACK_PHANTOM,
+};
+
+int snd_hda_jack_detect_state(struct hda_codec *codec, hda_nid_t nid);
+
+static inline bool snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid)
+{
+ return snd_hda_jack_detect_state(codec, nid) != HDA_JACK_NOT_PRESENT;
+}
bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid);
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 9760f001916..a8cb22eec89 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -582,6 +582,36 @@ static void print_gpio(struct snd_info_buffer *buffer,
print_nid_array(buffer, codec, nid, &codec->nids);
}
+static void print_device_list(struct snd_info_buffer *buffer,
+ struct hda_codec *codec, hda_nid_t nid)
+{
+ int i, curr = -1;
+ u8 dev_list[AC_MAX_DEV_LIST_LEN];
+ int devlist_len;
+
+ devlist_len = snd_hda_get_devices(codec, nid, dev_list,
+ AC_MAX_DEV_LIST_LEN);
+ snd_iprintf(buffer, " Devices: %d\n", devlist_len);
+ if (devlist_len <= 0)
+ return;
+
+ curr = snd_hda_codec_read(codec, nid, 0,
+ AC_VERB_GET_DEVICE_SEL, 0);
+
+ for (i = 0; i < devlist_len; i++) {
+ if (i == curr)
+ snd_iprintf(buffer, " *");
+ else
+ snd_iprintf(buffer, " ");
+
+ snd_iprintf(buffer,
+ "Dev %02d: PD = %d, ELDV = %d, IA = %d\n", i,
+ !!(dev_list[i] & AC_DE_PD),
+ !!(dev_list[i] & AC_DE_ELDV),
+ !!(dev_list[i] & AC_DE_IA));
+ }
+}
+
static void print_codec_info(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
@@ -751,6 +781,9 @@ static void print_codec_info(struct snd_info_entry *entry,
(wid_caps & AC_WCAP_DELAY) >>
AC_WCAP_DELAY_SHIFT);
+ if (wid_type == AC_WID_PIN && codec->dp_mst)
+ print_device_list(buffer, codec, nid);
+
if (wid_caps & AC_WCAP_CONN_LIST)
print_conn_list(buffer, codec, nid, wid_type,
conn, conn_len);
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index d97f0d61a15..0cbdd87dde6 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -32,7 +32,6 @@
#include "hda_jack.h"
#include "hda_generic.h"
-#define ENABLE_AD_STATIC_QUIRKS
struct ad198x_spec {
struct hda_gen_spec gen;
@@ -43,114 +42,8 @@ struct ad198x_spec {
hda_nid_t eapd_nid;
unsigned int beep_amp; /* beep amp value, set via set_beep_amp() */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
- const struct snd_kcontrol_new *mixers[6];
- int num_mixers;
- const struct hda_verb *init_verbs[6]; /* initialization verbs
- * don't forget NULL termination!
- */
- unsigned int num_init_verbs;
-
- /* playback */
- struct hda_multi_out multiout; /* playback set-up
- * max_channels, dacs must be set
- * dig_out_nid and hp_nid are optional
- */
- unsigned int cur_eapd;
- unsigned int need_dac_fix;
-
- /* capture */
- unsigned int num_adc_nids;
- const hda_nid_t *adc_nids;
- hda_nid_t dig_in_nid; /* digital-in NID; optional */
-
- /* capture source */
- const struct hda_input_mux *input_mux;
- const hda_nid_t *capsrc_nids;
- unsigned int cur_mux[3];
-
- /* channel model */
- const struct hda_channel_mode *channel_mode;
- int num_channel_mode;
-
- /* PCM information */
- struct hda_pcm pcm_rec[3]; /* used in alc_build_pcms() */
-
- unsigned int spdif_route;
-
- unsigned int jack_present: 1;
- unsigned int inv_jack_detect: 1;/* inverted jack-detection */
- unsigned int analog_beep: 1; /* analog beep input present */
- unsigned int avoid_init_slave_vol:1;
-
-#ifdef CONFIG_PM
- struct hda_loopback_check loopback;
-#endif
- /* for virtual master */
- hda_nid_t vmaster_nid;
- const char * const *slave_vols;
- const char * const *slave_sws;
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-};
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-/*
- * input MUX handling (common part)
- */
-static int ad198x_mux_enum_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
-
- return snd_hda_input_mux_info(spec->input_mux, uinfo);
-}
-
-static int ad198x_mux_enum_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-
- ucontrol->value.enumerated.item[0] = spec->cur_mux[adc_idx];
- return 0;
-}
-
-static int ad198x_mux_enum_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- unsigned int adc_idx = snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
-
- return snd_hda_input_mux_put(codec, spec->input_mux, ucontrol,
- spec->capsrc_nids[adc_idx],
- &spec->cur_mux[adc_idx]);
-}
-
-/*
- * initialization (common callbacks)
- */
-static int ad198x_init(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
- int i;
-
- for (i = 0; i < spec->num_init_verbs; i++)
- snd_hda_sequence_write(codec, spec->init_verbs[i]);
- return 0;
-}
-
-static const char * const ad_slave_pfxs[] = {
- "Front", "Surround", "Center", "LFE", "Side",
- "Headphone", "Mono", "Speaker", "IEC958",
- NULL
};
-static const char * const ad1988_6stack_fp_slave_pfxs[] = {
- "Front", "Surround", "Center", "LFE", "Side", "IEC958",
- NULL
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
#ifdef CONFIG_SND_HDA_INPUT_BEEP
/* additional beep mixers; the actual parameters are overwritten at build */
@@ -160,12 +53,6 @@ static const struct snd_kcontrol_new ad_beep_mixer[] = {
{ } /* end */
};
-static const struct snd_kcontrol_new ad_beep2_mixer[] = {
- HDA_CODEC_VOLUME("Digital Beep Playback Volume", 0, 0, HDA_OUTPUT),
- HDA_CODEC_MUTE_BEEP("Digital Beep Playback Switch", 0, 0, HDA_OUTPUT),
- { } /* end */
-};
-
#define set_beep_amp(spec, nid, idx, dir) \
((spec)->beep_amp = HDA_COMPOSE_AMP_VAL(nid, 1, idx, dir)) /* mono */
#else
@@ -181,8 +68,7 @@ static int create_beep_ctls(struct hda_codec *codec)
if (!spec->beep_amp)
return 0;
- knew = spec->analog_beep ? ad_beep2_mixer : ad_beep_mixer;
- for ( ; knew->name; knew++) {
+ for (knew = ad_beep_mixer ; knew->name; knew++) {
int err;
struct snd_kcontrol *kctl;
kctl = snd_ctl_new1(knew, codec);
@@ -199,268 +85,6 @@ static int create_beep_ctls(struct hda_codec *codec)
#define create_beep_ctls(codec) 0
#endif
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int ad198x_build_controls(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
- struct snd_kcontrol *kctl;
- unsigned int i;
- int err;
-
- for (i = 0; i < spec->num_mixers; i++) {
- err = snd_hda_add_new_ctls(codec, spec->mixers[i]);
- if (err < 0)
- return err;
- }
- if (spec->multiout.dig_out_nid) {
- err = snd_hda_create_spdif_out_ctls(codec,
- spec->multiout.dig_out_nid,
- spec->multiout.dig_out_nid);
- if (err < 0)
- return err;
- err = snd_hda_create_spdif_share_sw(codec,
- &spec->multiout);
- if (err < 0)
- return err;
- spec->multiout.share_spdif = 1;
- }
- if (spec->dig_in_nid) {
- err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in_nid);
- if (err < 0)
- return err;
- }
-
- /* create beep controls if needed */
- err = create_beep_ctls(codec);
- if (err < 0)
- return err;
-
- /* if we have no master control, let's create it */
- if (!snd_hda_find_mixer_ctl(codec, "Master Playback Volume")) {
- unsigned int vmaster_tlv[4];
- snd_hda_set_vmaster_tlv(codec, spec->vmaster_nid,
- HDA_OUTPUT, vmaster_tlv);
- err = __snd_hda_add_vmaster(codec, "Master Playback Volume",
- vmaster_tlv,
- (spec->slave_vols ?
- spec->slave_vols : ad_slave_pfxs),
- "Playback Volume",
- !spec->avoid_init_slave_vol, NULL);
- if (err < 0)
- return err;
- }
- if (!snd_hda_find_mixer_ctl(codec, "Master Playback Switch")) {
- err = snd_hda_add_vmaster(codec, "Master Playback Switch",
- NULL,
- (spec->slave_sws ?
- spec->slave_sws : ad_slave_pfxs),
- "Playback Switch");
- if (err < 0)
- return err;
- }
-
- /* assign Capture Source enums to NID */
- kctl = snd_hda_find_mixer_ctl(codec, "Capture Source");
- if (!kctl)
- kctl = snd_hda_find_mixer_ctl(codec, "Input Source");
- for (i = 0; kctl && i < kctl->count; i++) {
- err = snd_hda_add_nid(codec, kctl, i, spec->capsrc_nids[i]);
- if (err < 0)
- return err;
- }
-
- /* assign IEC958 enums to NID */
- kctl = snd_hda_find_mixer_ctl(codec,
- SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source");
- if (kctl) {
- err = snd_hda_add_nid(codec, kctl, 0,
- spec->multiout.dig_out_nid);
- if (err < 0)
- return err;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int ad198x_check_power_status(struct hda_codec *codec, hda_nid_t nid)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_check_amp_list_power(codec, &spec->loopback, nid);
-}
-#endif
-
-/*
- * Analog playback callbacks
- */
-static int ad198x_playback_pcm_open(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream,
- hinfo);
-}
-
-static int ad198x_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag,
- format, substream);
-}
-
-static int ad198x_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
-}
-
-/*
- * Digital out
- */
-static int ad198x_dig_playback_pcm_open(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_dig_open(codec, &spec->multiout);
-}
-
-static int ad198x_dig_playback_pcm_close(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_dig_close(codec, &spec->multiout);
-}
-
-static int ad198x_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag,
- format, substream);
-}
-
-static int ad198x_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout);
-}
-
-/*
- * Analog capture
- */
-static int ad198x_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- snd_hda_codec_setup_stream(codec, spec->adc_nids[substream->number],
- stream_tag, 0, format);
- return 0;
-}
-
-static int ad198x_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- struct ad198x_spec *spec = codec->spec;
- snd_hda_codec_cleanup_stream(codec, spec->adc_nids[substream->number]);
- return 0;
-}
-
-/*
- */
-static const struct hda_pcm_stream ad198x_pcm_analog_playback = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 6, /* changed later */
- .nid = 0, /* fill later */
- .ops = {
- .open = ad198x_playback_pcm_open,
- .prepare = ad198x_playback_pcm_prepare,
- .cleanup = ad198x_playback_pcm_cleanup,
- },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_analog_capture = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
- .nid = 0, /* fill later */
- .ops = {
- .prepare = ad198x_capture_pcm_prepare,
- .cleanup = ad198x_capture_pcm_cleanup
- },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_digital_playback = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
- .nid = 0, /* fill later */
- .ops = {
- .open = ad198x_dig_playback_pcm_open,
- .close = ad198x_dig_playback_pcm_close,
- .prepare = ad198x_dig_playback_pcm_prepare,
- .cleanup = ad198x_dig_playback_pcm_cleanup
- },
-};
-
-static const struct hda_pcm_stream ad198x_pcm_digital_capture = {
- .substreams = 1,
- .channels_min = 2,
- .channels_max = 2,
- /* NID is set in alc_build_pcms */
-};
-
-static int ad198x_build_pcms(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
- struct hda_pcm *info = spec->pcm_rec;
-
- codec->num_pcms = 1;
- codec->pcm_info = info;
-
- info->name = "AD198x Analog";
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_analog_playback;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = spec->multiout.max_channels;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dac_nids[0];
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad198x_pcm_analog_capture;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_adc_nids;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adc_nids[0];
-
- if (spec->multiout.dig_out_nid) {
- info++;
- codec->num_pcms++;
- codec->spdif_status_reset = 1;
- info->name = "AD198x Digital";
- info->pcm_type = HDA_PCM_TYPE_SPDIF;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ad198x_pcm_digital_playback;
- info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->multiout.dig_out_nid;
- if (spec->dig_in_nid) {
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad198x_pcm_digital_capture;
- info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in_nid;
- }
- }
-
- return 0;
-}
-#endif /* ENABLE_AD_STATIC_QUIRKS */
static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
hda_nid_t hp)
@@ -507,18 +131,6 @@ static void ad198x_shutup(struct hda_codec *codec)
ad198x_power_eapd(codec);
}
-static void ad198x_free(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
-
- if (!spec)
- return;
-
- snd_hda_gen_spec_free(&spec->gen);
- kfree(spec);
- snd_hda_detach_beep_device(codec);
-}
-
#ifdef CONFIG_PM
static int ad198x_suspend(struct hda_codec *codec)
{
@@ -527,65 +139,6 @@ static int ad198x_suspend(struct hda_codec *codec)
}
#endif
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const struct hda_codec_ops ad198x_patch_ops = {
- .build_controls = ad198x_build_controls,
- .build_pcms = ad198x_build_pcms,
- .init = ad198x_init,
- .free = ad198x_free,
-#ifdef CONFIG_PM
- .check_power_status = ad198x_check_power_status,
- .suspend = ad198x_suspend,
-#endif
- .reboot_notify = ad198x_shutup,
-};
-
-
-/*
- * EAPD control
- * the private value = nid
- */
-#define ad198x_eapd_info snd_ctl_boolean_mono_info
-
-static int ad198x_eapd_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- if (codec->inv_eapd)
- ucontrol->value.integer.value[0] = ! spec->cur_eapd;
- else
- ucontrol->value.integer.value[0] = spec->cur_eapd;
- return 0;
-}
-
-static int ad198x_eapd_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
- hda_nid_t nid = kcontrol->private_value & 0xff;
- unsigned int eapd;
- eapd = !!ucontrol->value.integer.value[0];
- if (codec->inv_eapd)
- eapd = !eapd;
- if (eapd == spec->cur_eapd)
- return 0;
- spec->cur_eapd = eapd;
- snd_hda_codec_write_cache(codec, nid,
- 0, AC_VERB_SET_EAPD_BTLENABLE,
- eapd ? 0x02 : 0x00);
- return 1;
-}
-
-static int ad198x_ch_mode_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo);
-static int ad198x_ch_mode_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/*
* Automatic parse of I/O pins from the BIOS configuration
@@ -646,537 +199,6 @@ static int ad198x_parse_auto_config(struct hda_codec *codec)
* AD1986A specific
*/
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1986A_SPDIF_OUT 0x02
-#define AD1986A_FRONT_DAC 0x03
-#define AD1986A_SURR_DAC 0x04
-#define AD1986A_CLFE_DAC 0x05
-#define AD1986A_ADC 0x06
-
-static const hda_nid_t ad1986a_dac_nids[3] = {
- AD1986A_FRONT_DAC, AD1986A_SURR_DAC, AD1986A_CLFE_DAC
-};
-static const hda_nid_t ad1986a_adc_nids[1] = { AD1986A_ADC };
-static const hda_nid_t ad1986a_capsrc_nids[1] = { 0x12 };
-
-static const struct hda_input_mux ad1986a_capture_source = {
- .num_items = 7,
- .items = {
- { "Mic", 0x0 },
- { "CD", 0x1 },
- { "Aux", 0x3 },
- { "Line", 0x4 },
- { "Mix", 0x5 },
- { "Mono", 0x6 },
- { "Phone", 0x7 },
- },
-};
-
-
-static const struct hda_bind_ctls ad1986a_bind_pcm_vol = {
- .ops = &snd_hda_bind_vol,
- .values = {
- HDA_COMPOSE_AMP_VAL(AD1986A_FRONT_DAC, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(AD1986A_SURR_DAC, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(AD1986A_CLFE_DAC, 3, 0, HDA_OUTPUT),
- 0
- },
-};
-
-static const struct hda_bind_ctls ad1986a_bind_pcm_sw = {
- .ops = &snd_hda_bind_sw,
- .values = {
- HDA_COMPOSE_AMP_VAL(AD1986A_FRONT_DAC, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(AD1986A_SURR_DAC, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(AD1986A_CLFE_DAC, 3, 0, HDA_OUTPUT),
- 0
- },
-};
-
-/*
- * mixers
- */
-static const struct snd_kcontrol_new ad1986a_mixers[] = {
- /*
- * bind volumes/mutes of 3 DACs as a single PCM control for simplicity
- */
- HDA_BIND_VOL("PCM Playback Volume", &ad1986a_bind_pcm_vol),
- HDA_BIND_SW("PCM Playback Switch", &ad1986a_bind_pcm_sw),
- HDA_CODEC_VOLUME("Front Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Surround Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x1d, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x1d, 2, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x1d, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x1d, 2, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x1a, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x1a, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Aux Playback Volume", 0x16, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- HDA_CODEC_MUTE("Stereo Downmix Switch", 0x09, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-/* additional mixers for 3stack mode */
-static const struct snd_kcontrol_new ad1986a_3st_mixers[] = {
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Channel Mode",
- .info = ad198x_ch_mode_info,
- .get = ad198x_ch_mode_get,
- .put = ad198x_ch_mode_put,
- },
- { } /* end */
-};
-
-/* laptop model - 2ch only */
-static const hda_nid_t ad1986a_laptop_dac_nids[1] = { AD1986A_FRONT_DAC };
-
-/* master controls both pins 0x1a and 0x1b */
-static const struct hda_bind_ctls ad1986a_laptop_master_vol = {
- .ops = &snd_hda_bind_vol,
- .values = {
- HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
- 0,
- },
-};
-
-static const struct hda_bind_ctls ad1986a_laptop_master_sw = {
- .ops = &snd_hda_bind_sw,
- .values = {
- HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(0x1b, 3, 0, HDA_OUTPUT),
- 0,
- },
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_mixers[] = {
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
- HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Aux Playback Volume", 0x16, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Aux Playback Switch", 0x16, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
- /*
- HDA_CODEC_VOLUME("Mono Playback Volume", 0x1e, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mono Playback Switch", 0x1e, 0x0, HDA_OUTPUT), */
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- { } /* end */
-};
-
-/* laptop-eapd model - 2ch only */
-
-static const struct hda_input_mux ad1986a_laptop_eapd_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Internal Mic", 0x4 },
- { "Mix", 0x5 },
- },
-};
-
-static const struct hda_input_mux ad1986a_automic_capture_source = {
- .num_items = 2,
- .items = {
- { "Mic", 0x0 },
- { "Mix", 0x5 },
- },
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_master_mixers[] = {
- HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
- HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x0f, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x1b, /* port-D */
- },
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1986a_laptop_intmic_mixers[] = {
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
- { } /* end */
-};
-
-/* re-connect the mic boost input according to the jack sensing */
-static void ad1986a_automic(struct hda_codec *codec)
-{
- unsigned int present;
- present = snd_hda_jack_detect(codec, 0x1f);
- /* 0 = 0x1f, 2 = 0x1d, 4 = mixed */
- snd_hda_codec_write(codec, 0x0f, 0, AC_VERB_SET_CONNECT_SEL,
- present ? 0 : 2);
-}
-
-#define AD1986A_MIC_EVENT 0x36
-
-static void ad1986a_automic_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- if ((res >> 26) != AD1986A_MIC_EVENT)
- return;
- ad1986a_automic(codec);
-}
-
-static int ad1986a_automic_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1986a_automic(codec);
- return 0;
-}
-
-/* laptop-automute - 2ch only */
-
-static void ad1986a_update_hp(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
- unsigned int mute;
-
- if (spec->jack_present)
- mute = HDA_AMP_MUTE; /* mute internal speaker */
- else
- /* unmute internal speaker if necessary */
- mute = snd_hda_codec_amp_read(codec, 0x1a, 0, HDA_OUTPUT, 0);
- snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, mute);
-}
-
-static void ad1986a_hp_automute(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
-
- spec->jack_present = snd_hda_jack_detect(codec, 0x1a);
- if (spec->inv_jack_detect)
- spec->jack_present = !spec->jack_present;
- ad1986a_update_hp(codec);
-}
-
-#define AD1986A_HP_EVENT 0x37
-
-static void ad1986a_hp_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- if ((res >> 26) != AD1986A_HP_EVENT)
- return;
- ad1986a_hp_automute(codec);
-}
-
-static int ad1986a_hp_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1986a_hp_automute(codec);
- return 0;
-}
-
-/* bind hp and internal speaker mute (with plug check) */
-static int ad1986a_hp_master_sw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- int change = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
- if (change)
- ad1986a_update_hp(codec);
- return change;
-}
-
-static const struct snd_kcontrol_new ad1986a_automute_master_mixers[] = {
- HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .subdevice = HDA_SUBDEV_AMP_FLAG,
- .info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_amp_switch_get,
- .put = ad1986a_hp_master_sw_put,
- .private_value = HDA_COMPOSE_AMP_VAL(0x1a, 3, 0, HDA_OUTPUT),
- },
- { } /* end */
-};
-
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1986a_init_verbs[] = {
- /* Front, Surround, CLFE DAC; mute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Downmix - off */
- {0x09, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* HP, Line-Out, Surround, CLFE selectors */
- {0x0a, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0b, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Mono selector */
- {0x0e, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Mic selector: Mic 1/2 pin */
- {0x0f, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Line-in selector: Line-in */
- {0x10, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Mic 1/2 swap */
- {0x11, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Record selector: mic */
- {0x12, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Mic, Phone, CD, Aux, Line-In amp; mute as default */
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* PC beep */
- {0x18, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* HP, Line-Out, Surround, CLFE, Mono pins; mute as default */
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* HP Pin */
- {0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
- /* Front, Surround, CLFE Pins */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* Mono Pin */
- {0x1e, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* Mic Pin */
- {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
- /* Line, Aux, CD, Beep-In Pin */
- {0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- {0x21, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- {0x22, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- {0x23, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch2_init[] = {
- /* Surround out -> Line In */
- { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- /* Line-in selectors */
- { 0x10, AC_VERB_SET_CONNECT_SEL, 0x1 },
- /* CLFE -> Mic in */
- { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- /* Mic selector, mix C/LFE (backmic) and Mic (frontmic) */
- { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x4 },
- { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch4_init[] = {
- /* Surround out -> Surround */
- { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x10, AC_VERB_SET_CONNECT_SEL, 0x0 },
- /* CLFE -> Mic in */
- { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x4 },
- { } /* end */
-};
-
-static const struct hda_verb ad1986a_ch6_init[] = {
- /* Surround out -> Surround out */
- { 0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x10, AC_VERB_SET_CONNECT_SEL, 0x0 },
- /* CLFE -> CLFE */
- { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x0 },
- { } /* end */
-};
-
-static const struct hda_channel_mode ad1986a_modes[3] = {
- { 2, ad1986a_ch2_init },
- { 4, ad1986a_ch4_init },
- { 6, ad1986a_ch6_init },
-};
-
-/* eapd initialization */
-static const struct hda_verb ad1986a_eapd_init_verbs[] = {
- {0x1b, AC_VERB_SET_EAPD_BTLENABLE, 0x00 },
- {}
-};
-
-static const struct hda_verb ad1986a_automic_verbs[] = {
- {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x1f, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- /*{0x20, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},*/
- {0x0f, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x1f, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1986A_MIC_EVENT},
- {}
-};
-
-/* Ultra initialization */
-static const struct hda_verb ad1986a_ultra_init[] = {
- /* eapd initialization */
- { 0x1b, AC_VERB_SET_EAPD_BTLENABLE, 0x00 },
- /* CLFE -> Mic in */
- { 0x0f, AC_VERB_SET_CONNECT_SEL, 0x2 },
- { 0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
- { 0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080 },
- { } /* end */
-};
-
-/* pin sensing on HP jack */
-static const struct hda_verb ad1986a_hp_init_verbs[] = {
- {0x1a, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1986A_HP_EVENT},
- {}
-};
-
-static void ad1986a_samsung_p50_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case AD1986A_HP_EVENT:
- ad1986a_hp_automute(codec);
- break;
- case AD1986A_MIC_EVENT:
- ad1986a_automic(codec);
- break;
- }
-}
-
-static int ad1986a_samsung_p50_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1986a_hp_automute(codec);
- ad1986a_automic(codec);
- return 0;
-}
-
-
-/* models */
-enum {
- AD1986A_AUTO,
- AD1986A_6STACK,
- AD1986A_3STACK,
- AD1986A_LAPTOP,
- AD1986A_LAPTOP_EAPD,
- AD1986A_LAPTOP_AUTOMUTE,
- AD1986A_ULTRA,
- AD1986A_SAMSUNG,
- AD1986A_SAMSUNG_P50,
- AD1986A_MODELS
-};
-
-static const char * const ad1986a_models[AD1986A_MODELS] = {
- [AD1986A_AUTO] = "auto",
- [AD1986A_6STACK] = "6stack",
- [AD1986A_3STACK] = "3stack",
- [AD1986A_LAPTOP] = "laptop",
- [AD1986A_LAPTOP_EAPD] = "laptop-eapd",
- [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
- [AD1986A_ULTRA] = "ultra",
- [AD1986A_SAMSUNG] = "samsung",
- [AD1986A_SAMSUNG_P50] = "samsung-p50",
-};
-
-static const struct snd_pci_quirk ad1986a_cfg_tbl[] = {
- SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1153, "ASUS M9", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x11f7, "ASUS U5A", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1213, "ASUS A6J", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1263, "ASUS U5F", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1297, "ASUS Z62F", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS V1j", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1302, "ASUS W3j", AD1986A_LAPTOP_EAPD),
- SND_PCI_QUIRK(0x1043, 0x1443, "ASUS VX1", AD1986A_LAPTOP),
- SND_PCI_QUIRK(0x1043, 0x1447, "ASUS A8J", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1043, 0x817f, "ASUS P5", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1043, 0x818f, "ASUS P5", AD1986A_LAPTOP),
- SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS P5", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1043, 0x81cb, "ASUS M2N", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1043, 0x8234, "ASUS M2N", AD1986A_3STACK),
- SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_3STACK),
- SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40-10Q", AD1986A_3STACK),
- SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
- SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
- SND_PCI_QUIRK(0x144d, 0xc024, "Samsung P50", AD1986A_SAMSUNG_P50),
- SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
- SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_SAMSUNG),
- SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
- SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
- SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_3STACK),
- SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_LAPTOP_AUTOMUTE),
- SND_PCI_QUIRK(0x17c0, 0x2017, "Samsung M50", AD1986A_LAPTOP),
- {}
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1986a_loopbacks[] = {
- { 0x13, HDA_OUTPUT, 0 }, /* Mic */
- { 0x14, HDA_OUTPUT, 0 }, /* Phone */
- { 0x15, HDA_OUTPUT, 0 }, /* CD */
- { 0x16, HDA_OUTPUT, 0 }, /* Aux */
- { 0x17, HDA_OUTPUT, 0 }, /* Line */
- { } /* end */
-};
-#endif
-
-static int is_jack_available(struct hda_codec *codec, hda_nid_t nid)
-{
- unsigned int conf = snd_hda_codec_get_pincfg(codec, nid);
- return get_defcfg_connect(conf) != AC_JACK_PORT_NONE;
-}
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
static int alloc_ad_spec(struct hda_codec *codec)
{
struct ad198x_spec *spec;
@@ -1203,6 +225,11 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
enum {
AD1986A_FIXUP_INV_JACK_DETECT,
+ AD1986A_FIXUP_ULTRA,
+ AD1986A_FIXUP_SAMSUNG,
+ AD1986A_FIXUP_3STACK,
+ AD1986A_FIXUP_LAPTOP,
+ AD1986A_FIXUP_LAPTOP_IMIC,
};
static const struct hda_fixup ad1986a_fixups[] = {
@@ -1210,16 +237,86 @@ static const struct hda_fixup ad1986a_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = ad_fixup_inv_jack_detect,
},
+ [AD1986A_FIXUP_ULTRA] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x90170110 }, /* speaker */
+ { 0x1d, 0x90a7013e }, /* int mic */
+ {}
+ },
+ },
+ [AD1986A_FIXUP_SAMSUNG] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1b, 0x90170110 }, /* speaker */
+ { 0x1d, 0x90a7013e }, /* int mic */
+ { 0x20, 0x411111f0 }, /* N/A */
+ { 0x24, 0x411111f0 }, /* N/A */
+ {}
+ },
+ },
+ [AD1986A_FIXUP_3STACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x02214021 }, /* headphone */
+ { 0x1b, 0x01014011 }, /* front */
+ { 0x1c, 0x01013012 }, /* surround */
+ { 0x1d, 0x01019015 }, /* clfe */
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { 0x1f, 0x02a190f0 }, /* mic */
+ { 0x20, 0x018130f0 }, /* line-in */
+ {}
+ },
+ },
+ [AD1986A_FIXUP_LAPTOP] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x02214021 }, /* headphone */
+ { 0x1b, 0x90170110 }, /* speaker */
+ { 0x1c, 0x411111f0 }, /* N/A */
+ { 0x1d, 0x411111f0 }, /* N/A */
+ { 0x1e, 0x411111f0 }, /* N/A */
+ { 0x1f, 0x02a191f0 }, /* mic */
+ { 0x20, 0x411111f0 }, /* N/A */
+ {}
+ },
+ },
+ [AD1986A_FIXUP_LAPTOP_IMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1d, 0x90a7013e }, /* int mic */
+ {}
+ },
+ .chained_before = 1,
+ .chain_id = AD1986A_FIXUP_LAPTOP,
+ },
};
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x30af, "HP B2800", AD1986A_FIXUP_LAPTOP_IMIC),
+ SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
+ SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
+ SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
+ SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK),
+ {}
+};
+
+static const struct hda_model_fixup ad1986a_fixup_models[] = {
+ { .id = AD1986A_FIXUP_3STACK, .name = "3stack" },
+ { .id = AD1986A_FIXUP_LAPTOP, .name = "laptop" },
+ { .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-imic" },
+ { .id = AD1986A_FIXUP_LAPTOP_IMIC, .name = "laptop-eapd" }, /* alias */
{}
};
/*
*/
-static int ad1986a_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1986a(struct hda_codec *codec)
{
int err;
struct ad198x_spec *spec;
@@ -1244,7 +341,8 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
*/
spec->gen.multiout.no_share_stream = 1;
- snd_hda_pick_fixup(codec, NULL, ad1986a_fixup_tbl, ad1986a_fixups);
+ snd_hda_pick_fixup(codec, ad1986a_fixup_models, ad1986a_fixup_tbl,
+ ad1986a_fixups);
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
err = ad198x_parse_auto_config(codec);
@@ -1258,330 +356,11 @@ static int ad1986a_parse_auto_config(struct hda_codec *codec)
return 0;
}
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1986a(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err, board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1986A_MODELS,
- ad1986a_models,
- ad1986a_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1986A_AUTO;
- }
-
- if (board_config == AD1986A_AUTO)
- return ad1986a_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x19);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x18, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 6;
- spec->multiout.num_dacs = ARRAY_SIZE(ad1986a_dac_nids);
- spec->multiout.dac_nids = ad1986a_dac_nids;
- spec->multiout.dig_out_nid = AD1986A_SPDIF_OUT;
- spec->num_adc_nids = 1;
- spec->adc_nids = ad1986a_adc_nids;
- spec->capsrc_nids = ad1986a_capsrc_nids;
- spec->input_mux = &ad1986a_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1986a_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1986a_init_verbs;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1986a_loopbacks;
-#endif
- spec->vmaster_nid = 0x1b;
- codec->inv_eapd = 1; /* AD1986A has the inverted EAPD implementation */
-
- codec->patch_ops = ad198x_patch_ops;
-
- /* override some parameters */
- switch (board_config) {
- case AD1986A_3STACK:
- spec->num_mixers = 2;
- spec->mixers[1] = ad1986a_3st_mixers;
- spec->num_init_verbs = 2;
- spec->init_verbs[1] = ad1986a_ch2_init;
- spec->channel_mode = ad1986a_modes;
- spec->num_channel_mode = ARRAY_SIZE(ad1986a_modes);
- spec->need_dac_fix = 1;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- break;
- case AD1986A_LAPTOP:
- spec->mixers[0] = ad1986a_laptop_mixers;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- break;
- case AD1986A_LAPTOP_EAPD:
- spec->num_mixers = 3;
- spec->mixers[0] = ad1986a_laptop_master_mixers;
- spec->mixers[1] = ad1986a_laptop_eapd_mixers;
- spec->mixers[2] = ad1986a_laptop_intmic_mixers;
- spec->num_init_verbs = 2;
- spec->init_verbs[1] = ad1986a_eapd_init_verbs;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- if (!is_jack_available(codec, 0x25))
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1986a_laptop_eapd_capture_source;
- break;
- case AD1986A_SAMSUNG:
- spec->num_mixers = 2;
- spec->mixers[0] = ad1986a_laptop_master_mixers;
- spec->mixers[1] = ad1986a_laptop_eapd_mixers;
- spec->num_init_verbs = 3;
- spec->init_verbs[1] = ad1986a_eapd_init_verbs;
- spec->init_verbs[2] = ad1986a_automic_verbs;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- if (!is_jack_available(codec, 0x25))
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1986a_automic_capture_source;
- codec->patch_ops.unsol_event = ad1986a_automic_unsol_event;
- codec->patch_ops.init = ad1986a_automic_init;
- break;
- case AD1986A_SAMSUNG_P50:
- spec->num_mixers = 2;
- spec->mixers[0] = ad1986a_automute_master_mixers;
- spec->mixers[1] = ad1986a_laptop_eapd_mixers;
- spec->num_init_verbs = 4;
- spec->init_verbs[1] = ad1986a_eapd_init_verbs;
- spec->init_verbs[2] = ad1986a_automic_verbs;
- spec->init_verbs[3] = ad1986a_hp_init_verbs;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- if (!is_jack_available(codec, 0x25))
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1986a_automic_capture_source;
- codec->patch_ops.unsol_event = ad1986a_samsung_p50_unsol_event;
- codec->patch_ops.init = ad1986a_samsung_p50_init;
- break;
- case AD1986A_LAPTOP_AUTOMUTE:
- spec->num_mixers = 3;
- spec->mixers[0] = ad1986a_automute_master_mixers;
- spec->mixers[1] = ad1986a_laptop_eapd_mixers;
- spec->mixers[2] = ad1986a_laptop_intmic_mixers;
- spec->num_init_verbs = 3;
- spec->init_verbs[1] = ad1986a_eapd_init_verbs;
- spec->init_verbs[2] = ad1986a_hp_init_verbs;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- if (!is_jack_available(codec, 0x25))
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1986a_laptop_eapd_capture_source;
- codec->patch_ops.unsol_event = ad1986a_hp_unsol_event;
- codec->patch_ops.init = ad1986a_hp_init;
- /* Lenovo N100 seems to report the reversed bit
- * for HP jack-sensing
- */
- spec->inv_jack_detect = 1;
- break;
- case AD1986A_ULTRA:
- spec->mixers[0] = ad1986a_laptop_eapd_mixers;
- spec->num_init_verbs = 2;
- spec->init_verbs[1] = ad1986a_ultra_init;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
- spec->multiout.dig_out_nid = 0;
- break;
- }
-
- /* AD1986A has a hardware problem that it can't share a stream
- * with multiple output pins. The copy of front to surrounds
- * causes noisy or silent outputs at a certain timing, e.g.
- * changing the volume.
- * So, let's disable the shared stream.
- */
- spec->multiout.no_share_stream = 1;
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1986a ad1986a_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
/*
* AD1983 specific
*/
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1983_SPDIF_OUT 0x02
-#define AD1983_DAC 0x03
-#define AD1983_ADC 0x04
-
-static const hda_nid_t ad1983_dac_nids[1] = { AD1983_DAC };
-static const hda_nid_t ad1983_adc_nids[1] = { AD1983_ADC };
-static const hda_nid_t ad1983_capsrc_nids[1] = { 0x15 };
-
-static const struct hda_input_mux ad1983_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "Line", 0x1 },
- { "Mix", 0x2 },
- { "Mix Mono", 0x3 },
- },
-};
-
-/*
- * SPDIF playback route
- */
-static int ad1983_spdif_route_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
-{
- static const char * const texts[] = { "PCM", "ADC" };
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 2;
- if (uinfo->value.enumerated.item > 1)
- uinfo->value.enumerated.item = 1;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
-}
-
-static int ad1983_spdif_route_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
-
- ucontrol->value.enumerated.item[0] = spec->spdif_route;
- return 0;
-}
-
-static int ad1983_spdif_route_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
-
- if (ucontrol->value.enumerated.item[0] > 1)
- return -EINVAL;
- if (spec->spdif_route != ucontrol->value.enumerated.item[0]) {
- spec->spdif_route = ucontrol->value.enumerated.item[0];
- snd_hda_codec_write_cache(codec, spec->multiout.dig_out_nid, 0,
- AC_VERB_SET_CONNECT_SEL,
- spec->spdif_route);
- return 1;
- }
- return 0;
-}
-
-static const struct snd_kcontrol_new ad1983_mixers[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x06, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x06, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x07, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x07, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-static const struct hda_verb ad1983_init_verbs[] = {
- /* Front, HP, Mono; mute as default */
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Beep, PCM, Mic, Line-In: mute */
- {0x10, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Front, HP selectors; from Mix */
- {0x05, AC_VERB_SET_CONNECT_SEL, 0x01},
- {0x06, AC_VERB_SET_CONNECT_SEL, 0x01},
- /* Mono selector; from Mix */
- {0x0b, AC_VERB_SET_CONNECT_SEL, 0x03},
- /* Mic selector; Mic */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Line-in selector: Line-in */
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Mic boost: 0dB */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
- /* Record selector: mic */
- {0x15, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* SPDIF route: PCM */
- {0x02, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Front Pin */
- {0x05, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* HP Pin */
- {0x06, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
- /* Mono Pin */
- {0x07, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* Mic Pin */
- {0x08, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
- /* Line Pin */
- {0x09, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1983_loopbacks[] = {
- { 0x12, HDA_OUTPUT, 0 }, /* Mic */
- { 0x13, HDA_OUTPUT, 0 }, /* Line */
- { } /* end */
-};
-#endif
-
-/* models */
-enum {
- AD1983_AUTO,
- AD1983_BASIC,
- AD1983_MODELS
-};
-
-static const char * const ad1983_models[AD1983_MODELS] = {
- [AD1983_AUTO] = "auto",
- [AD1983_BASIC] = "basic",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
/*
* SPDIF mux control for AD1983 auto-parser
*/
@@ -1656,7 +435,7 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
return 0;
}
-static int ad1983_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1983(struct hda_codec *codec)
{
struct ad198x_spec *spec;
int err;
@@ -1681,432 +460,11 @@ static int ad1983_parse_auto_config(struct hda_codec *codec)
return err;
}
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1983(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int board_config;
- int err;
-
- board_config = snd_hda_check_board_config(codec, AD1983_MODELS,
- ad1983_models, NULL);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1983_AUTO;
- }
-
- if (board_config == AD1983_AUTO)
- return ad1983_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = ARRAY_SIZE(ad1983_dac_nids);
- spec->multiout.dac_nids = ad1983_dac_nids;
- spec->multiout.dig_out_nid = AD1983_SPDIF_OUT;
- spec->num_adc_nids = 1;
- spec->adc_nids = ad1983_adc_nids;
- spec->capsrc_nids = ad1983_capsrc_nids;
- spec->input_mux = &ad1983_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1983_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1983_init_verbs;
- spec->spdif_route = 0;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1983_loopbacks;
-#endif
- spec->vmaster_nid = 0x05;
-
- codec->patch_ops = ad198x_patch_ops;
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1983 ad1983_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/*
* AD1981 HD specific
*/
-#ifdef ENABLE_AD_STATIC_QUIRKS
-#define AD1981_SPDIF_OUT 0x02
-#define AD1981_DAC 0x03
-#define AD1981_ADC 0x04
-
-static const hda_nid_t ad1981_dac_nids[1] = { AD1981_DAC };
-static const hda_nid_t ad1981_adc_nids[1] = { AD1981_ADC };
-static const hda_nid_t ad1981_capsrc_nids[1] = { 0x15 };
-
-/* 0x0c, 0x09, 0x0e, 0x0f, 0x19, 0x05, 0x18, 0x17 */
-static const struct hda_input_mux ad1981_capture_source = {
- .num_items = 7,
- .items = {
- { "Front Mic", 0x0 },
- { "Line", 0x1 },
- { "Mix", 0x2 },
- { "Mix Mono", 0x3 },
- { "CD", 0x4 },
- { "Mic", 0x6 },
- { "Aux", 0x7 },
- },
-};
-
-static const struct snd_kcontrol_new ad1981_mixers[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x06, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x06, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x07, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x07, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Aux Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Aux Playback Switch", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* identical with AD1983 */
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-static const struct hda_verb ad1981_init_verbs[] = {
- /* Front, HP, Mono; mute as default */
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Beep, PCM, Front Mic, Line, Rear Mic, Aux, CD-In: mute */
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Front, HP selectors; from Mix */
- {0x05, AC_VERB_SET_CONNECT_SEL, 0x01},
- {0x06, AC_VERB_SET_CONNECT_SEL, 0x01},
- /* Mono selector; from Mix */
- {0x0b, AC_VERB_SET_CONNECT_SEL, 0x03},
- /* Mic Mixer; select Front Mic */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
- {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* Mic boost: 0dB */
- {0x08, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- /* Record selector: Front mic */
- {0x15, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- /* SPDIF route: PCM */
- {0x02, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Front Pin */
- {0x05, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* HP Pin */
- {0x06, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc0 },
- /* Mono Pin */
- {0x07, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x40 },
- /* Front & Rear Mic Pins */
- {0x08, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
- {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x24 },
- /* Line Pin */
- {0x09, AC_VERB_SET_PIN_WIDGET_CONTROL, 0x20 },
- /* Digital Beep */
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x00},
- /* Line-Out as Input: disabled */
- {0x1a, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1981_loopbacks[] = {
- { 0x12, HDA_OUTPUT, 0 }, /* Front Mic */
- { 0x13, HDA_OUTPUT, 0 }, /* Line */
- { 0x1b, HDA_OUTPUT, 0 }, /* Aux */
- { 0x1c, HDA_OUTPUT, 0 }, /* Mic */
- { 0x1d, HDA_OUTPUT, 0 }, /* CD */
- { } /* end */
-};
-#endif
-
-/*
- * Patch for HP nx6320
- *
- * nx6320 uses EAPD in the reverse way - EAPD-on means the internal
- * speaker output enabled _and_ mute-LED off.
- */
-
-#define AD1981_HP_EVENT 0x37
-#define AD1981_MIC_EVENT 0x38
-
-static const struct hda_verb ad1981_hp_init_verbs[] = {
- {0x05, AC_VERB_SET_EAPD_BTLENABLE, 0x00 }, /* default off */
- /* pin sensing on HP and Mic jacks */
- {0x06, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_HP_EVENT},
- {0x08, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_MIC_EVENT},
- {}
-};
-
-/* turn on/off EAPD (+ mute HP) as a master switch */
-static int ad1981_hp_master_sw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- struct ad198x_spec *spec = codec->spec;
-
- if (! ad198x_eapd_put(kcontrol, ucontrol))
- return 0;
- /* change speaker pin appropriately */
- snd_hda_set_pin_ctl(codec, 0x05, spec->cur_eapd ? PIN_OUT : 0);
- /* toggle HP mute appropriately */
- snd_hda_codec_amp_stereo(codec, 0x06, HDA_OUTPUT, 0,
- HDA_AMP_MUTE,
- spec->cur_eapd ? 0 : HDA_AMP_MUTE);
- return 1;
-}
-
-/* bind volumes of both NID 0x05 and 0x06 */
-static const struct hda_bind_ctls ad1981_hp_bind_master_vol = {
- .ops = &snd_hda_bind_vol,
- .values = {
- HDA_COMPOSE_AMP_VAL(0x05, 3, 0, HDA_OUTPUT),
- HDA_COMPOSE_AMP_VAL(0x06, 3, 0, HDA_OUTPUT),
- 0
- },
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1981_hp_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x06);
- snd_hda_codec_amp_stereo(codec, 0x05, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-/* toggle input of built-in and mic jack appropriately */
-static void ad1981_hp_automic(struct hda_codec *codec)
-{
- static const struct hda_verb mic_jack_on[] = {
- {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
- {}
- };
- static const struct hda_verb mic_jack_off[] = {
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, 0xb080},
- {0x1f, AC_VERB_SET_AMP_GAIN_MUTE, 0xb000},
- {}
- };
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x08);
- if (present)
- snd_hda_sequence_write(codec, mic_jack_on);
- else
- snd_hda_sequence_write(codec, mic_jack_off);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1981_hp_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- res >>= 26;
- switch (res) {
- case AD1981_HP_EVENT:
- ad1981_hp_automute(codec);
- break;
- case AD1981_MIC_EVENT:
- ad1981_hp_automic(codec);
- break;
- }
-}
-
-static const struct hda_input_mux ad1981_hp_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Dock Mic", 0x1 },
- { "Mix", 0x2 },
- },
-};
-
-static const struct snd_kcontrol_new ad1981_hp_mixers[] = {
- HDA_BIND_VOL("Master Playback Volume", &ad1981_hp_bind_master_vol),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .subdevice = HDA_SUBDEV_NID_FLAG | 0x05,
- .name = "Master Playback Switch",
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad1981_hp_master_sw_put,
- .private_value = 0x05,
- },
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
-#if 0
- /* FIXME: analog mic/line loopback doesn't work with my tests...
- * (although recording is OK)
- */
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x1c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x1c, 0x0, HDA_OUTPUT),
- /* FIXME: does this laptop have analog CD connection? */
- HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
-#endif
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x18, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- { } /* end */
-};
-
-/* initialize jack-sensing, too */
-static int ad1981_hp_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1981_hp_automute(codec);
- ad1981_hp_automic(codec);
- return 0;
-}
-
-/* configuration for Toshiba Laptops */
-static const struct hda_verb ad1981_toshiba_init_verbs[] = {
- {0x05, AC_VERB_SET_EAPD_BTLENABLE, 0x01 }, /* default on */
- /* pin sensing on HP and Mic jacks */
- {0x06, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_HP_EVENT},
- {0x08, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1981_MIC_EVENT},
- {}
-};
-
-static const struct snd_kcontrol_new ad1981_toshiba_mixers[] = {
- HDA_CODEC_VOLUME("Amp Volume", 0x1a, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Amp Switch", 0x1a, 0x0, HDA_OUTPUT),
- { }
-};
-
-/* configuration for Lenovo Thinkpad T60 */
-static const struct snd_kcontrol_new ad1981_thinkpad_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x1d, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x08, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x15, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* identical with AD1983 */
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-static const struct hda_input_mux ad1981_thinkpad_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Mix", 0x2 },
- { "CD", 0x4 },
- },
-};
-
-/* models */
-enum {
- AD1981_AUTO,
- AD1981_BASIC,
- AD1981_HP,
- AD1981_THINKPAD,
- AD1981_TOSHIBA,
- AD1981_MODELS
-};
-
-static const char * const ad1981_models[AD1981_MODELS] = {
- [AD1981_AUTO] = "auto",
- [AD1981_HP] = "hp",
- [AD1981_THINKPAD] = "thinkpad",
- [AD1981_BASIC] = "basic",
- [AD1981_TOSHIBA] = "toshiba"
-};
-
-static const struct snd_pci_quirk ad1981_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1014, 0x0597, "Lenovo Z60", AD1981_THINKPAD),
- SND_PCI_QUIRK(0x1014, 0x05b7, "Lenovo Z60m", AD1981_THINKPAD),
- /* All HP models */
- SND_PCI_QUIRK_VENDOR(0x103c, "HP nx", AD1981_HP),
- SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba U205", AD1981_TOSHIBA),
- /* Lenovo Thinkpad T60/X60/Z6xx */
- SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1981_THINKPAD),
- /* HP nx6320 (reversed SSID, H/W bug) */
- SND_PCI_QUIRK(0x30b0, 0x103c, "HP nx6320", AD1981_HP),
- {}
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
/* follow EAPD via vmaster hook */
static void ad_vmaster_eapd_hook(void *private_data, int enabled)
{
@@ -2172,7 +530,7 @@ static const struct snd_pci_quirk ad1981_fixup_tbl[] = {
{}
};
-static int ad1981_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1981(struct hda_codec *codec)
{
struct ad198x_spec *spec;
int err;
@@ -2205,110 +563,6 @@ static int ad1981_parse_auto_config(struct hda_codec *codec)
return err;
}
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1981(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err, board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1981_MODELS,
- ad1981_models,
- ad1981_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1981_AUTO;
- }
-
- if (board_config == AD1981_AUTO)
- return ad1981_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return -ENOMEM;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x0d, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = ARRAY_SIZE(ad1981_dac_nids);
- spec->multiout.dac_nids = ad1981_dac_nids;
- spec->multiout.dig_out_nid = AD1981_SPDIF_OUT;
- spec->num_adc_nids = 1;
- spec->adc_nids = ad1981_adc_nids;
- spec->capsrc_nids = ad1981_capsrc_nids;
- spec->input_mux = &ad1981_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1981_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1981_init_verbs;
- spec->spdif_route = 0;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1981_loopbacks;
-#endif
- spec->vmaster_nid = 0x05;
-
- codec->patch_ops = ad198x_patch_ops;
-
- /* override some parameters */
- switch (board_config) {
- case AD1981_HP:
- spec->mixers[0] = ad1981_hp_mixers;
- spec->num_init_verbs = 2;
- spec->init_verbs[1] = ad1981_hp_init_verbs;
- if (!is_jack_available(codec, 0x0a))
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1981_hp_capture_source;
-
- codec->patch_ops.init = ad1981_hp_init;
- codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
- /* set the upper-limit for mixer amp to 0dB for avoiding the
- * possible damage by overloading
- */
- snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
- (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
- (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
- (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
- (1 << AC_AMPCAP_MUTE_SHIFT));
- break;
- case AD1981_THINKPAD:
- spec->mixers[0] = ad1981_thinkpad_mixers;
- spec->input_mux = &ad1981_thinkpad_capture_source;
- /* set the upper-limit for mixer amp to 0dB for avoiding the
- * possible damage by overloading
- */
- snd_hda_override_amp_caps(codec, 0x11, HDA_INPUT,
- (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
- (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
- (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
- (1 << AC_AMPCAP_MUTE_SHIFT));
- break;
- case AD1981_TOSHIBA:
- spec->mixers[0] = ad1981_hp_mixers;
- spec->mixers[1] = ad1981_toshiba_mixers;
- spec->num_init_verbs = 2;
- spec->init_verbs[1] = ad1981_toshiba_init_verbs;
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1981_hp_capture_source;
- codec->patch_ops.init = ad1981_hp_init;
- codec->patch_ops.unsol_event = ad1981_hp_unsol_event;
- break;
- }
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1981 ad1981_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/*
* AD1988
@@ -2395,90 +649,7 @@ static int patch_ad1981(struct hda_codec *codec)
* E/F quad mic array
*/
-
#ifdef ENABLE_AD_STATIC_QUIRKS
-/* models */
-enum {
- AD1988_AUTO,
- AD1988_6STACK,
- AD1988_6STACK_DIG,
- AD1988_3STACK,
- AD1988_3STACK_DIG,
- AD1988_LAPTOP,
- AD1988_LAPTOP_DIG,
- AD1988_MODEL_LAST,
-};
-
-/* reivision id to check workarounds */
-#define AD1988A_REV2 0x100200
-
-#define is_rev2(codec) \
- ((codec)->vendor_id == 0x11d41988 && \
- (codec)->revision_id == AD1988A_REV2)
-
-/*
- * mixers
- */
-
-static const hda_nid_t ad1988_6stack_dac_nids[4] = {
- 0x04, 0x06, 0x05, 0x0a
-};
-
-static const hda_nid_t ad1988_3stack_dac_nids[3] = {
- 0x04, 0x05, 0x0a
-};
-
-/* for AD1988A revision-2, DAC2-4 are swapped */
-static const hda_nid_t ad1988_6stack_dac_nids_rev2[4] = {
- 0x04, 0x05, 0x0a, 0x06
-};
-
-static const hda_nid_t ad1988_alt_dac_nid[1] = {
- 0x03
-};
-
-static const hda_nid_t ad1988_3stack_dac_nids_rev2[3] = {
- 0x04, 0x0a, 0x06
-};
-
-static const hda_nid_t ad1988_adc_nids[3] = {
- 0x08, 0x09, 0x0f
-};
-
-static const hda_nid_t ad1988_capsrc_nids[3] = {
- 0x0c, 0x0d, 0x0e
-};
-
-#define AD1988_SPDIF_OUT 0x02
-#define AD1988_SPDIF_OUT_HDMI 0x0b
-#define AD1988_SPDIF_IN 0x07
-
-static const hda_nid_t ad1989b_slave_dig_outs[] = {
- AD1988_SPDIF_OUT, AD1988_SPDIF_OUT_HDMI, 0
-};
-
-static const struct hda_input_mux ad1988_6stack_capture_source = {
- .num_items = 5,
- .items = {
- { "Front Mic", 0x1 }, /* port-B */
- { "Line", 0x2 }, /* port-C */
- { "Mic", 0x4 }, /* port-E */
- { "CD", 0x5 },
- { "Mix", 0x9 },
- },
-};
-
-static const struct hda_input_mux ad1988_laptop_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic/Line", 0x1 }, /* port-B */
- { "CD", 0x5 },
- { "Mix", 0x9 },
- },
-};
-
-/*
- */
static int ad198x_ch_mode_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
@@ -2509,569 +680,6 @@ static int ad198x_ch_mode_put(struct snd_kcontrol *kcontrol,
spec->multiout.num_dacs = spec->multiout.max_channels / 2;
return err;
}
-
-/* 6-stack mode */
-static const struct snd_kcontrol_new ad1988_6stack_mixers1[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x06, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Side Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_6stack_mixers1_rev2[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x05, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x0a, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x0a, 2, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Side Playback Volume", 0x06, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_6stack_mixers2[] = {
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
- HDA_BIND_MUTE("Surround Playback Switch", 0x2a, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x27, 1, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x27, 2, 2, HDA_INPUT),
- HDA_BIND_MUTE("Side Playback Switch", 0x28, 2, HDA_INPUT),
- HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
- HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-/* 3-stack mode */
-static const struct snd_kcontrol_new ad1988_3stack_mixers1[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_3stack_mixers1_rev2[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x0a, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x06, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x06, 2, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_3stack_mixers2[] = {
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_BIND_MUTE("Front Playback Switch", 0x29, 2, HDA_INPUT),
- HDA_BIND_MUTE("Surround Playback Switch", 0x2c, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("Center Playback Switch", 0x26, 1, 2, HDA_INPUT),
- HDA_BIND_MUTE_MONO("LFE Playback Switch", 0x26, 2, 2, HDA_INPUT),
- HDA_BIND_MUTE("Headphone Playback Switch", 0x22, 2, HDA_INPUT),
- HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x4, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x4, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Channel Mode",
- .info = ad198x_ch_mode_info,
- .get = ad198x_ch_mode_get,
- .put = ad198x_ch_mode_put,
- },
-
- { } /* end */
-};
-
-/* laptop mode */
-static const struct snd_kcontrol_new ad1988_laptop_mixers[] = {
- HDA_CODEC_VOLUME("Headphone Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x29, 0x0, HDA_INPUT),
- HDA_BIND_MUTE("Mono Playback Switch", 0x1e, 2, HDA_INPUT),
-
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x6, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x1, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x1, HDA_INPUT),
-
- HDA_CODEC_VOLUME("Analog Mix Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Analog Mix Playback Switch", 0x21, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
-
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "External Amplifier",
- .subdevice = HDA_SUBDEV_NID_FLAG | 0x12,
- .info = ad198x_eapd_info,
- .get = ad198x_eapd_get,
- .put = ad198x_eapd_put,
- .private_value = 0x12, /* port-D */
- },
-
- { } /* end */
-};
-
-/* capture */
-static const struct snd_kcontrol_new ad1988_capture_mixers[] = {
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 2, 0x0e, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 2, 0x0e, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 3,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- { } /* end */
-};
-
-static int ad1988_spdif_playback_source_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- static const char * const texts[] = {
- "PCM", "ADC1", "ADC2", "ADC3"
- };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 4;
- if (uinfo->value.enumerated.item >= 4)
- uinfo->value.enumerated.item = 3;
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]);
- return 0;
-}
-
-static int ad1988_spdif_playback_source_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- unsigned int sel;
-
- sel = snd_hda_codec_read(codec, 0x1d, 0, AC_VERB_GET_AMP_GAIN_MUTE,
- AC_AMP_GET_INPUT);
- if (!(sel & 0x80))
- ucontrol->value.enumerated.item[0] = 0;
- else {
- sel = snd_hda_codec_read(codec, 0x0b, 0,
- AC_VERB_GET_CONNECT_SEL, 0);
- if (sel < 3)
- sel++;
- else
- sel = 0;
- ucontrol->value.enumerated.item[0] = sel;
- }
- return 0;
-}
-
-static int ad1988_spdif_playback_source_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- unsigned int val, sel;
- int change;
-
- val = ucontrol->value.enumerated.item[0];
- if (val > 3)
- return -EINVAL;
- if (!val) {
- sel = snd_hda_codec_read(codec, 0x1d, 0,
- AC_VERB_GET_AMP_GAIN_MUTE,
- AC_AMP_GET_INPUT);
- change = sel & 0x80;
- if (change) {
- snd_hda_codec_write_cache(codec, 0x1d, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_IN_UNMUTE(0));
- snd_hda_codec_write_cache(codec, 0x1d, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_IN_MUTE(1));
- }
- } else {
- sel = snd_hda_codec_read(codec, 0x1d, 0,
- AC_VERB_GET_AMP_GAIN_MUTE,
- AC_AMP_GET_INPUT | 0x01);
- change = sel & 0x80;
- if (change) {
- snd_hda_codec_write_cache(codec, 0x1d, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_IN_MUTE(0));
- snd_hda_codec_write_cache(codec, 0x1d, 0,
- AC_VERB_SET_AMP_GAIN_MUTE,
- AMP_IN_UNMUTE(1));
- }
- sel = snd_hda_codec_read(codec, 0x0b, 0,
- AC_VERB_GET_CONNECT_SEL, 0) + 1;
- change |= sel != val;
- if (change)
- snd_hda_codec_write_cache(codec, 0x0b, 0,
- AC_VERB_SET_CONNECT_SEL,
- val - 1);
- }
- return change;
-}
-
-static const struct snd_kcontrol_new ad1988_spdif_out_mixers[] = {
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "IEC958 Playback Source",
- .subdevice = HDA_SUBDEV_NID_FLAG | 0x1b,
- .info = ad1988_spdif_playback_source_info,
- .get = ad1988_spdif_playback_source_get,
- .put = ad1988_spdif_playback_source_put,
- },
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1988_spdif_in_mixers[] = {
- HDA_CODEC_VOLUME("IEC958 Capture Volume", 0x1c, 0x0, HDA_INPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1989_spdif_out_mixers[] = {
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("HDMI Playback Volume", 0x1d, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-/*
- * initialization verbs
- */
-
-/*
- * for 6-stack (+dig)
- */
-static const struct hda_verb ad1988_6stack_init_verbs[] = {
- /* Front, Surround, CLFE, side DAC; unmute as default */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-A front headphon path */
- {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* Port-D line-out path */
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-F surround path */
- {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x2a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-G CLFE path */
- {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x27, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-H side path */
- {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x28, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x25, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Mono out path */
- {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
- /* Port-B front mic-in path */
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-C line-in path */
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Port-E mic-in path */
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Analog CD Input */
- {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
-
- { }
-};
-
-static const struct hda_verb ad1988_6stack_fp_init_verbs[] = {
- /* Headphone; unmute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-A front headphon path */
- {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
-
- { }
-};
-
-static const struct hda_verb ad1988_capture_init_verbs[] = {
- /* mute analog mix */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
- /* select ADCs - front-mic */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
-
- { }
-};
-
-static const struct hda_verb ad1988_spdif_init_verbs[] = {
- /* SPDIF out sel */
- {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
- {0x0b, AC_VERB_SET_CONNECT_SEL, 0x0}, /* ADC1 */
- {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* SPDIF out pin */
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
-
- { }
-};
-
-static const struct hda_verb ad1988_spdif_in_init_verbs[] = {
- /* unmute SPDIF input pin */
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- { }
-};
-
-/* AD1989 has no ADC -> SPDIF route */
-static const struct hda_verb ad1989_spdif_init_verbs[] = {
- /* SPDIF-1 out pin */
- {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- /* SPDIF-2/HDMI out pin */
- {0x1d, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- {0x1d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- { }
-};
-
-/*
- * verbs for 3stack (+dig)
- */
-static const struct hda_verb ad1988_3stack_ch2_init[] = {
- /* set port-C to line-in */
- { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN },
- /* set port-E to mic-in */
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80 },
- { } /* end */
-};
-
-static const struct hda_verb ad1988_3stack_ch6_init[] = {
- /* set port-C to surround out */
- { 0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- /* set port-E to CLFE out */
- { 0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
- { 0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { } /* end */
-};
-
-static const struct hda_channel_mode ad1988_3stack_modes[2] = {
- { 2, ad1988_3stack_ch2_init },
- { 6, ad1988_3stack_ch6_init },
-};
-
-static const struct hda_verb ad1988_3stack_init_verbs[] = {
- /* Front, Surround, CLFE, side DAC; unmute as default */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-A front headphon path */
- {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* Port-D line-out path */
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Mono out path */
- {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
- /* Port-B front mic-in path */
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-C line-in/surround path - 6ch mode as default */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x31, AC_VERB_SET_CONNECT_SEL, 0x0}, /* output sel: DAC 0x05 */
- {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* Port-E mic-in/CLFE path - 6ch mode as default */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x32, AC_VERB_SET_CONNECT_SEL, 0x1}, /* output sel: DAC 0x0a */
- {0x34, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* mute analog mix */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
- /* select ADCs - front-mic */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
- { }
-};
-
-/*
- * verbs for laptop mode (+dig)
- */
-static const struct hda_verb ad1988_laptop_hp_on[] = {
- /* unmute port-A and mute port-D */
- { 0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { 0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { } /* end */
-};
-static const struct hda_verb ad1988_laptop_hp_off[] = {
- /* mute port-A and unmute port-D */
- { 0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE },
- { 0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE },
- { } /* end */
-};
-
-#define AD1988_HP_EVENT 0x01
-
-static const struct hda_verb ad1988_laptop_init_verbs[] = {
- /* Front, Surround, CLFE, side DAC; unmute as default */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x06, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-A front headphon path */
- {0x37, AC_VERB_SET_CONNECT_SEL, 0x00}, /* DAC0:03h */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* unsolicited event for pin-sense */
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1988_HP_EVENT },
- /* Port-D line-out path + EAPD */
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x00}, /* EAPD-off */
- /* Mono out path */
- {0x36, AC_VERB_SET_CONNECT_SEL, 0x1}, /* DAC1:04h */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, 0xb01f}, /* unmute, 0dB */
- /* Port-B mic-in path */
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-C docking station - try to output */
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x33, AC_VERB_SET_CONNECT_SEL, 0x0},
- /* mute analog mix */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
- /* select ADCs - mic */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
- { }
-};
-
-static void ad1988_laptop_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- if ((res >> 26) != AD1988_HP_EVENT)
- return;
- if (snd_hda_jack_detect(codec, 0x11))
- snd_hda_sequence_write(codec, ad1988_laptop_hp_on);
- else
- snd_hda_sequence_write(codec, ad1988_laptop_hp_off);
-}
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1988_loopbacks[] = {
- { 0x20, HDA_INPUT, 0 }, /* Front Mic */
- { 0x20, HDA_INPUT, 1 }, /* Line */
- { 0x20, HDA_INPUT, 4 }, /* Mic */
- { 0x20, HDA_INPUT, 6 }, /* CD */
- { } /* end */
-};
-#endif
#endif /* ENABLE_AD_STATIC_QUIRKS */
static int ad1988_auto_smux_enum_info(struct snd_kcontrol *kcontrol,
@@ -3220,7 +828,34 @@ static int ad1988_add_spdif_mux_ctl(struct hda_codec *codec)
/*
*/
-static int ad1988_parse_auto_config(struct hda_codec *codec)
+enum {
+ AD1988_FIXUP_6STACK_DIG,
+};
+
+static const struct hda_fixup ad1988_fixups[] = {
+ [AD1988_FIXUP_6STACK_DIG] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x11, 0x02214130 }, /* front-hp */
+ { 0x12, 0x01014010 }, /* line-out */
+ { 0x14, 0x02a19122 }, /* front-mic */
+ { 0x15, 0x01813021 }, /* line-in */
+ { 0x16, 0x01011012 }, /* line-out */
+ { 0x17, 0x01a19020 }, /* mic */
+ { 0x1b, 0x0145f1f0 }, /* SPDIF */
+ { 0x24, 0x01016011 }, /* line-out */
+ { 0x25, 0x01012013 }, /* line-out */
+ { }
+ }
+ },
+};
+
+static const struct hda_model_fixup ad1988_fixup_models[] = {
+ { .id = AD1988_FIXUP_6STACK_DIG, .name = "6stack-dig" },
+ {}
+};
+
+static int patch_ad1988(struct hda_codec *codec)
{
struct ad198x_spec *spec;
int err;
@@ -3234,12 +869,19 @@ static int ad1988_parse_auto_config(struct hda_codec *codec)
spec->gen.mixer_merge_nid = 0x21;
spec->gen.beep_nid = 0x10;
set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+ snd_hda_pick_fixup(codec, ad1988_fixup_models, NULL, ad1988_fixups);
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
err = ad198x_parse_auto_config(codec);
if (err < 0)
goto error;
err = ad1988_add_spdif_mux_ctl(codec);
if (err < 0)
goto error;
+
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
return 0;
error:
@@ -3247,169 +889,6 @@ static int ad1988_parse_auto_config(struct hda_codec *codec)
return err;
}
-/*
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const char * const ad1988_models[AD1988_MODEL_LAST] = {
- [AD1988_6STACK] = "6stack",
- [AD1988_6STACK_DIG] = "6stack-dig",
- [AD1988_3STACK] = "3stack",
- [AD1988_3STACK_DIG] = "3stack-dig",
- [AD1988_LAPTOP] = "laptop",
- [AD1988_LAPTOP_DIG] = "laptop-dig",
- [AD1988_AUTO] = "auto",
-};
-
-static const struct snd_pci_quirk ad1988_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG),
- SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG),
- SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG),
- SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG),
- SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG),
- {}
-};
-
-static int patch_ad1988(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err, board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1988_MODEL_LAST,
- ad1988_models, ad1988_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1988_AUTO;
- }
-
- if (board_config == AD1988_AUTO)
- return ad1988_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- if (is_rev2(codec))
- snd_printk(KERN_INFO "patch_analog: AD1988A rev.2 is detected, enable workarounds\n");
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
- if (!spec->multiout.hp_nid)
- spec->multiout.hp_nid = ad1988_alt_dac_nid[0];
- switch (board_config) {
- case AD1988_6STACK:
- case AD1988_6STACK_DIG:
- spec->multiout.max_channels = 8;
- spec->multiout.num_dacs = 4;
- if (is_rev2(codec))
- spec->multiout.dac_nids = ad1988_6stack_dac_nids_rev2;
- else
- spec->multiout.dac_nids = ad1988_6stack_dac_nids;
- spec->input_mux = &ad1988_6stack_capture_source;
- spec->num_mixers = 2;
- if (is_rev2(codec))
- spec->mixers[0] = ad1988_6stack_mixers1_rev2;
- else
- spec->mixers[0] = ad1988_6stack_mixers1;
- spec->mixers[1] = ad1988_6stack_mixers2;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1988_6stack_init_verbs;
- if (board_config == AD1988_6STACK_DIG) {
- spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
- spec->dig_in_nid = AD1988_SPDIF_IN;
- }
- break;
- case AD1988_3STACK:
- case AD1988_3STACK_DIG:
- spec->multiout.max_channels = 6;
- spec->multiout.num_dacs = 3;
- if (is_rev2(codec))
- spec->multiout.dac_nids = ad1988_3stack_dac_nids_rev2;
- else
- spec->multiout.dac_nids = ad1988_3stack_dac_nids;
- spec->input_mux = &ad1988_6stack_capture_source;
- spec->channel_mode = ad1988_3stack_modes;
- spec->num_channel_mode = ARRAY_SIZE(ad1988_3stack_modes);
- spec->num_mixers = 2;
- if (is_rev2(codec))
- spec->mixers[0] = ad1988_3stack_mixers1_rev2;
- else
- spec->mixers[0] = ad1988_3stack_mixers1;
- spec->mixers[1] = ad1988_3stack_mixers2;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1988_3stack_init_verbs;
- if (board_config == AD1988_3STACK_DIG)
- spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
- break;
- case AD1988_LAPTOP:
- case AD1988_LAPTOP_DIG:
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- spec->multiout.dac_nids = ad1988_3stack_dac_nids;
- spec->input_mux = &ad1988_laptop_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1988_laptop_mixers;
- codec->inv_eapd = 1; /* inverted EAPD */
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1988_laptop_init_verbs;
- if (board_config == AD1988_LAPTOP_DIG)
- spec->multiout.dig_out_nid = AD1988_SPDIF_OUT;
- break;
- }
-
- spec->num_adc_nids = ARRAY_SIZE(ad1988_adc_nids);
- spec->adc_nids = ad1988_adc_nids;
- spec->capsrc_nids = ad1988_capsrc_nids;
- spec->mixers[spec->num_mixers++] = ad1988_capture_mixers;
- spec->init_verbs[spec->num_init_verbs++] = ad1988_capture_init_verbs;
- if (spec->multiout.dig_out_nid) {
- if (codec->vendor_id >= 0x11d4989a) {
- spec->mixers[spec->num_mixers++] =
- ad1989_spdif_out_mixers;
- spec->init_verbs[spec->num_init_verbs++] =
- ad1989_spdif_init_verbs;
- codec->slave_dig_outs = ad1989b_slave_dig_outs;
- } else {
- spec->mixers[spec->num_mixers++] =
- ad1988_spdif_out_mixers;
- spec->init_verbs[spec->num_init_verbs++] =
- ad1988_spdif_init_verbs;
- }
- }
- if (spec->dig_in_nid && codec->vendor_id < 0x11d4989a) {
- spec->mixers[spec->num_mixers++] = ad1988_spdif_in_mixers;
- spec->init_verbs[spec->num_init_verbs++] =
- ad1988_spdif_in_init_verbs;
- }
-
- codec->patch_ops = ad198x_patch_ops;
- switch (board_config) {
- case AD1988_LAPTOP:
- case AD1988_LAPTOP_DIG:
- codec->patch_ops.unsol_event = ad1988_laptop_unsol_event;
- break;
- }
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1988_loopbacks;
-#endif
- spec->vmaster_nid = 0x04;
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1988 ad1988_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/*
* AD1884 / AD1984
@@ -3423,167 +902,19 @@ static int patch_ad1988(struct hda_codec *codec)
*
* AD1984 = AD1884 + two digital mic-ins
*
- * FIXME:
- * For simplicity, we share the single DAC for both HP and line-outs
- * right now. The inidividual playbacks could be easily implemented,
- * but no build-up framework is given, so far.
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1884_dac_nids[1] = {
- 0x04,
-};
-
-static const hda_nid_t ad1884_adc_nids[2] = {
- 0x08, 0x09,
-};
-
-static const hda_nid_t ad1884_capsrc_nids[2] = {
- 0x0c, 0x0d,
-};
-
-#define AD1884_SPDIF_OUT 0x02
-
-static const struct hda_input_mux ad1884_capture_source = {
- .num_items = 4,
- .items = {
- { "Front Mic", 0x0 },
- { "Mic", 0x1 },
- { "CD", 0x2 },
- { "Mix", 0x3 },
- },
-};
-
-static const struct snd_kcontrol_new ad1884_base_mixers[] = {
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- /* HDA_CODEC_VOLUME_IDX("PCM Playback Volume", 1, 0x03, 0x0, HDA_OUTPUT), */
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* SPDIF controls */
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- /* identical with ad1983 */
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984_dmic_mixers[] = {
- HDA_CODEC_VOLUME("Digital Mic Capture Volume", 0x05, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Digital Mic Capture Switch", 0x05, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME_IDX("Digital Mic Capture Volume", 1, 0x06, 0x0,
- HDA_INPUT),
- HDA_CODEC_MUTE_IDX("Digital Mic Capture Switch", 1, 0x06, 0x0,
- HDA_INPUT),
- { } /* end */
-};
-
-/*
- * initialization verbs
+ * AD1883 / AD1884A / AD1984A / AD1984B
+ *
+ * port-B (0x14) - front mic-in
+ * port-E (0x1c) - rear mic-in
+ * port-F (0x16) - CD / ext out
+ * port-C (0x15) - rear line-in
+ * port-D (0x12) - rear line-out
+ * port-A (0x11) - front hp-out
+ *
+ * AD1984A = AD1884A + digital-mic
+ * AD1883 = equivalent with AD1984A
+ * AD1984B = AD1984A + extra SPDIF-out
*/
-static const struct hda_verb ad1884_init_verbs[] = {
- /* DACs; mute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-A (HP) mixer */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-A pin */
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* HP selector - select DAC2 */
- {0x22, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Port-D (Line-out) mixer */
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-D pin */
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Mono-out mixer */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Mono-out pin */
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Mono selector */
- {0x0e, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Port-B (front mic) pin */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- /* Port-C (rear mic) pin */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- /* Analog mixer; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
- /* SPDIF output selector */
- {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1884_loopbacks[] = {
- { 0x20, HDA_INPUT, 0 }, /* Front Mic */
- { 0x20, HDA_INPUT, 1 }, /* Mic */
- { 0x20, HDA_INPUT, 2 }, /* CD */
- { 0x20, HDA_INPUT, 4 }, /* Docking */
- { } /* end */
-};
-#endif
-
-static const char * const ad1884_slave_vols[] = {
- "PCM", "Mic", "Mono", "Front Mic", "Mic", "CD",
- "Internal Mic", "Dock Mic", /* "Beep", */ "IEC958",
- NULL
-};
-
-enum {
- AD1884_AUTO,
- AD1884_BASIC,
- AD1884_MODELS
-};
-
-static const char * const ad1884_models[AD1884_MODELS] = {
- [AD1884_AUTO] = "auto",
- [AD1884_BASIC] = "basic",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/* set the upper-limit for mixer amp to 0dB for avoiding the possible
* damage by overloading
@@ -3599,14 +930,34 @@ static void ad1884_fixup_amp_override(struct hda_codec *codec,
(1 << AC_AMPCAP_MUTE_SHIFT));
}
+/* toggle GPIO1 according to the mute state */
+static void ad1884_vmaster_hp_gpio_hook(void *private_data, int enabled)
+{
+ struct hda_codec *codec = private_data;
+ struct ad198x_spec *spec = codec->spec;
+
+ if (spec->eapd_nid)
+ ad_vmaster_eapd_hook(private_data, enabled);
+ snd_hda_codec_update_cache(codec, 0x01, 0,
+ AC_VERB_SET_GPIO_DATA,
+ enabled ? 0x00 : 0x02);
+}
+
static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct ad198x_spec *spec = codec->spec;
+ static const struct hda_verb gpio_init_verbs[] = {
+ {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
+ {0x01, AC_VERB_SET_GPIO_DATA, 0x02},
+ {},
+ };
switch (action) {
case HDA_FIXUP_ACT_PRE_PROBE:
- spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
+ spec->gen.vmaster_mute.hook = ad1884_vmaster_hp_gpio_hook;
+ snd_hda_sequence_write_cache(codec, gpio_init_verbs);
break;
case HDA_FIXUP_ACT_PROBE:
if (spec->gen.autocfg.line_out_type == AUTO_PIN_SPEAKER_OUT)
@@ -3617,9 +968,18 @@ static void ad1884_fixup_hp_eapd(struct hda_codec *codec,
}
}
+/* set magic COEFs for dmic */
+static const struct hda_verb ad1884_dmic_init_verbs[] = {
+ {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
+ {0x01, AC_VERB_SET_PROC_COEF, 0x08},
+ {}
+};
+
enum {
AD1884_FIXUP_AMP_OVERRIDE,
AD1884_FIXUP_HP_EAPD,
+ AD1884_FIXUP_DMIC_COEF,
+ AD1884_FIXUP_HP_TOUCHSMART,
};
static const struct hda_fixup ad1884_fixups[] = {
@@ -3633,15 +993,27 @@ static const struct hda_fixup ad1884_fixups[] = {
.chained = true,
.chain_id = AD1884_FIXUP_AMP_OVERRIDE,
},
+ [AD1884_FIXUP_DMIC_COEF] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = ad1884_dmic_init_verbs,
+ },
+ [AD1884_FIXUP_HP_TOUCHSMART] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = ad1884_dmic_init_verbs,
+ .chained = true,
+ .chain_id = AD1884_FIXUP_HP_EAPD,
+ },
};
static const struct snd_pci_quirk ad1884_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x103c, 0x2a82, "HP Touchsmart", AD1884_FIXUP_HP_TOUCHSMART),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", AD1884_FIXUP_HP_EAPD),
+ SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1884_FIXUP_DMIC_COEF),
{}
};
-static int ad1884_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1884(struct hda_codec *codec)
{
struct ad198x_spec *spec;
int err;
@@ -3674,1170 +1046,6 @@ static int ad1884_parse_auto_config(struct hda_codec *codec)
return err;
}
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1884_basic(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err;
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = ARRAY_SIZE(ad1884_dac_nids);
- spec->multiout.dac_nids = ad1884_dac_nids;
- spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
- spec->num_adc_nids = ARRAY_SIZE(ad1884_adc_nids);
- spec->adc_nids = ad1884_adc_nids;
- spec->capsrc_nids = ad1884_capsrc_nids;
- spec->input_mux = &ad1884_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1884_base_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1884_init_verbs;
- spec->spdif_route = 0;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1884_loopbacks;
-#endif
- spec->vmaster_nid = 0x04;
- /* we need to cover all playback volumes */
- spec->slave_vols = ad1884_slave_vols;
- /* slaves may contain input volumes, so we can't raise to 0dB blindly */
- spec->avoid_init_slave_vol = 1;
-
- codec->patch_ops = ad198x_patch_ops;
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-
-static int patch_ad1884(struct hda_codec *codec)
-{
- int board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1884_MODELS,
- ad1884_models, NULL);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1884_AUTO;
- }
-
- if (board_config == AD1884_AUTO)
- return ad1884_parse_auto_config(codec);
- else
- return patch_ad1884_basic(codec);
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1884 ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-/*
- * Lenovo Thinkpad T61/X61
- */
-static const struct hda_input_mux ad1984_thinkpad_capture_source = {
- .num_items = 4,
- .items = {
- { "Mic", 0x0 },
- { "Internal Mic", 0x1 },
- { "Mix", 0x3 },
- { "Dock Mic", 0x4 },
- },
-};
-
-
-/*
- * Dell Precision T3400
- */
-static const struct hda_input_mux ad1984_dell_desktop_capture_source = {
- .num_items = 3,
- .items = {
- { "Front Mic", 0x0 },
- { "Line-In", 0x1 },
- { "Mix", 0x3 },
- },
-};
-
-
-static const struct snd_kcontrol_new ad1984_thinkpad_mixers[] = {
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- /* HDA_CODEC_VOLUME_IDX("PCM Playback Volume", 1, 0x03, 0x0, HDA_OUTPUT), */
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Speaker Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Beep Playback Volume", 0x20, 0x03, HDA_INPUT),
- HDA_CODEC_MUTE("Beep Playback Switch", 0x20, 0x03, HDA_INPUT),
- HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* SPDIF controls */
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- /* identical with ad1983 */
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-/* additional verbs */
-static const struct hda_verb ad1984_thinkpad_init_verbs[] = {
- /* Port-E (docking station mic) pin */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* docking mic boost */
- {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Analog PC Beeper - allow firmware/ACPI beeps */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(3) | 0x1a},
- /* Analog mixer - docking mic; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* enable EAPD bit */
- {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
- { } /* end */
-};
-
-/*
- * Dell Precision T3400
- */
-static const struct snd_kcontrol_new ad1984_dell_desktop_mixers[] = {
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Speaker Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Line-In Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Line-In Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Line-In Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- { } /* end */
-};
-
-/* Digial MIC ADC NID 0x05 + 0x06 */
-static int ad1984_pcm_dmic_prepare(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- unsigned int stream_tag,
- unsigned int format,
- struct snd_pcm_substream *substream)
-{
- snd_hda_codec_setup_stream(codec, 0x05 + substream->number,
- stream_tag, 0, format);
- return 0;
-}
-
-static int ad1984_pcm_dmic_cleanup(struct hda_pcm_stream *hinfo,
- struct hda_codec *codec,
- struct snd_pcm_substream *substream)
-{
- snd_hda_codec_cleanup_stream(codec, 0x05 + substream->number);
- return 0;
-}
-
-static const struct hda_pcm_stream ad1984_pcm_dmic_capture = {
- .substreams = 2,
- .channels_min = 2,
- .channels_max = 2,
- .nid = 0x05,
- .ops = {
- .prepare = ad1984_pcm_dmic_prepare,
- .cleanup = ad1984_pcm_dmic_cleanup
- },
-};
-
-static int ad1984_build_pcms(struct hda_codec *codec)
-{
- struct ad198x_spec *spec = codec->spec;
- struct hda_pcm *info;
- int err;
-
- err = ad198x_build_pcms(codec);
- if (err < 0)
- return err;
-
- info = spec->pcm_rec + codec->num_pcms;
- codec->num_pcms++;
- info->name = "AD1984 Digital Mic";
- info->stream[SNDRV_PCM_STREAM_CAPTURE] = ad1984_pcm_dmic_capture;
- return 0;
-}
-
-/* models */
-enum {
- AD1984_AUTO,
- AD1984_BASIC,
- AD1984_THINKPAD,
- AD1984_DELL_DESKTOP,
- AD1984_MODELS
-};
-
-static const char * const ad1984_models[AD1984_MODELS] = {
- [AD1984_AUTO] = "auto",
- [AD1984_BASIC] = "basic",
- [AD1984_THINKPAD] = "thinkpad",
- [AD1984_DELL_DESKTOP] = "dell_desktop",
-};
-
-static const struct snd_pci_quirk ad1984_cfg_tbl[] = {
- /* Lenovo Thinkpad T61/X61 */
- SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo Thinkpad", AD1984_THINKPAD),
- SND_PCI_QUIRK(0x1028, 0x0214, "Dell T3400", AD1984_DELL_DESKTOP),
- SND_PCI_QUIRK(0x1028, 0x0233, "Dell Latitude E6400", AD1984_DELL_DESKTOP),
- {}
-};
-
-static int patch_ad1984(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int board_config, err;
-
- board_config = snd_hda_check_board_config(codec, AD1984_MODELS,
- ad1984_models, ad1984_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1984_AUTO;
- }
-
- if (board_config == AD1984_AUTO)
- return ad1884_parse_auto_config(codec);
-
- err = patch_ad1884_basic(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- switch (board_config) {
- case AD1984_BASIC:
- /* additional digital mics */
- spec->mixers[spec->num_mixers++] = ad1984_dmic_mixers;
- codec->patch_ops.build_pcms = ad1984_build_pcms;
- break;
- case AD1984_THINKPAD:
- if (codec->subsystem_id == 0x17aa20fb) {
- /* Thinpad X300 does not have the ability to do SPDIF,
- or attach to docking station to use SPDIF */
- spec->multiout.dig_out_nid = 0;
- } else
- spec->multiout.dig_out_nid = AD1884_SPDIF_OUT;
- spec->input_mux = &ad1984_thinkpad_capture_source;
- spec->mixers[0] = ad1984_thinkpad_mixers;
- spec->init_verbs[spec->num_init_verbs++] = ad1984_thinkpad_init_verbs;
- spec->analog_beep = 1;
- break;
- case AD1984_DELL_DESKTOP:
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1984_dell_desktop_capture_source;
- spec->mixers[0] = ad1984_dell_desktop_mixers;
- break;
- }
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1984 ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
-/*
- * AD1883 / AD1884A / AD1984A / AD1984B
- *
- * port-B (0x14) - front mic-in
- * port-E (0x1c) - rear mic-in
- * port-F (0x16) - CD / ext out
- * port-C (0x15) - rear line-in
- * port-D (0x12) - rear line-out
- * port-A (0x11) - front hp-out
- *
- * AD1984A = AD1884A + digital-mic
- * AD1883 = equivalent with AD1984A
- * AD1984B = AD1984A + extra SPDIF-out
- *
- * FIXME:
- * We share the single DAC for both HP and line-outs (see AD1884/1984).
- */
-
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1884a_dac_nids[1] = {
- 0x03,
-};
-
-#define ad1884a_adc_nids ad1884_adc_nids
-#define ad1884a_capsrc_nids ad1884_capsrc_nids
-
-#define AD1884A_SPDIF_OUT 0x02
-
-static const struct hda_input_mux ad1884a_capture_source = {
- .num_items = 5,
- .items = {
- { "Front Mic", 0x0 },
- { "Mic", 0x4 },
- { "Line", 0x1 },
- { "CD", 0x2 },
- { "Mix", 0x3 },
- },
-};
-
-static const struct snd_kcontrol_new ad1884a_base_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x02, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x02, HDA_INPUT),
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* SPDIF controls */
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- /* identical with ad1983 */
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1884a_init_verbs[] = {
- /* DACs; unmute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- /* Port-A (HP) mixer - route only from analog mixer */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-A pin */
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Port-D (Line-out) mixer - route only from analog mixer */
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-D pin */
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Mono-out mixer - route only from analog mixer */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Mono-out pin */
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Port-B (front mic) pin */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- /* Port-C (rear line-in) pin */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- /* Port-E (rear mic) pin */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x25, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* no boost */
- /* Port-F (CD) pin */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Analog mixer; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(4)}, /* aux */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* capture sources */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* SPDIF output amp */
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1884a_loopbacks[] = {
- { 0x20, HDA_INPUT, 0 }, /* Front Mic */
- { 0x20, HDA_INPUT, 1 }, /* Mic */
- { 0x20, HDA_INPUT, 2 }, /* CD */
- { 0x20, HDA_INPUT, 4 }, /* Docking */
- { } /* end */
-};
-#endif
-
-/*
- * Laptop model
- *
- * Port A: Headphone jack
- * Port B: MIC jack
- * Port C: Internal MIC
- * Port D: Dock Line Out (if enabled)
- * Port E: Dock Line In (if enabled)
- * Port F: Internal speakers
- */
-
-static int ad1884a_mobile_master_sw_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
- int ret = snd_hda_mixer_amp_switch_put(kcontrol, ucontrol);
- int mute = (!ucontrol->value.integer.value[0] &&
- !ucontrol->value.integer.value[1]);
- /* toggle GPIO1 according to the mute state */
- snd_hda_codec_write_cache(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
- mute ? 0x02 : 0x0);
- return ret;
-}
-
-static const struct snd_kcontrol_new ad1884a_laptop_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .subdevice = HDA_SUBDEV_AMP_FLAG,
- .info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_amp_switch_get,
- .put = ad1884a_mobile_master_sw_put,
- .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
- },
- HDA_CODEC_MUTE("Dock Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Dock Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("Dock Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Dock Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1884a_mobile_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- /*HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .subdevice = HDA_SUBDEV_AMP_FLAG,
- .info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_amp_switch_get,
- .put = ad1884a_mobile_master_sw_put,
- .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
- },
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Capture Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1884a_hp_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x11);
- snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
- snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
- present ? 0x00 : 0x02);
-}
-
-/* switch to external mic if plugged */
-static void ad1884a_hp_automic(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x14);
- snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL,
- present ? 0 : 1);
-}
-
-#define AD1884A_HP_EVENT 0x37
-#define AD1884A_MIC_EVENT 0x36
-
-/* unsolicited event for HP jack sensing */
-static void ad1884a_hp_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- switch (res >> 26) {
- case AD1884A_HP_EVENT:
- ad1884a_hp_automute(codec);
- break;
- case AD1884A_MIC_EVENT:
- ad1884a_hp_automic(codec);
- break;
- }
-}
-
-/* initialize jack-sensing, too */
-static int ad1884a_hp_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1884a_hp_automute(codec);
- ad1884a_hp_automic(codec);
- return 0;
-}
-
-/* mute internal speaker if HP or docking HP is plugged */
-static void ad1884a_laptop_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x11);
- if (!present)
- present = snd_hda_jack_detect(codec, 0x12);
- snd_hda_codec_amp_stereo(codec, 0x16, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
- snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_EAPD_BTLENABLE,
- present ? 0x00 : 0x02);
-}
-
-/* switch to external mic if plugged */
-static void ad1884a_laptop_automic(struct hda_codec *codec)
-{
- unsigned int idx;
-
- if (snd_hda_jack_detect(codec, 0x14))
- idx = 0;
- else if (snd_hda_jack_detect(codec, 0x1c))
- idx = 4;
- else
- idx = 1;
- snd_hda_codec_write(codec, 0x0c, 0, AC_VERB_SET_CONNECT_SEL, idx);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1884a_laptop_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case AD1884A_HP_EVENT:
- ad1884a_laptop_automute(codec);
- break;
- case AD1884A_MIC_EVENT:
- ad1884a_laptop_automic(codec);
- break;
- }
-}
-
-/* initialize jack-sensing, too */
-static int ad1884a_laptop_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1884a_laptop_automute(codec);
- ad1884a_laptop_automic(codec);
- return 0;
-}
-
-/* additional verbs for laptop model */
-static const struct hda_verb ad1884a_laptop_verbs[] = {
- /* Port-A (HP) pin - always unmuted */
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-F (int speaker) mixer - route only from analog mixer */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-F (int speaker) pin */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* required for compaq 6530s/6531s speaker output */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- /* Port-C pin - internal mic-in */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
- /* Port-D (docking line-out) pin - default unmuted */
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* analog mix */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* unsolicited event for pin-sense */
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
- {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
- /* allow to touch GPIO1 (for mute control) */
- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
- { } /* end */
-};
-
-static const struct hda_verb ad1884a_mobile_verbs[] = {
- /* DACs; unmute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- /* Port-A (HP) mixer - route only from analog mixer */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-A pin */
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* Port-A (HP) pin - always unmuted */
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-B (mic jack) pin */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
- /* Port-C (int mic) pin */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
- /* Port-F (int speaker) mixer - route only from analog mixer */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-F pin */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Analog mixer; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* capture sources */
- /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* unsolicited event for pin-sense */
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
- /* allow to touch GPIO1 (for mute control) */
- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
- { } /* end */
-};
-
-/*
- * Thinkpad X300
- * 0x11 - HP
- * 0x12 - speaker
- * 0x14 - mic-in
- * 0x17 - built-in mic
- */
-
-static const struct hda_verb ad1984a_thinkpad_verbs[] = {
- /* HP unmute */
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* analog mix */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* turn on EAPD */
- {0x12, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
- /* unsolicited event for pin-sense */
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- /* internal mic - dmic */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- /* set magic COEFs for dmic */
- {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
- {0x01, AC_VERB_SET_PROC_COEF, 0x08},
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_thinkpad_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x14, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Capture Source",
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- { } /* end */
-};
-
-static const struct hda_input_mux ad1984a_thinkpad_capture_source = {
- .num_items = 3,
- .items = {
- { "Mic", 0x0 },
- { "Internal Mic", 0x5 },
- { "Mix", 0x3 },
- },
-};
-
-/* mute internal speaker if HP is plugged */
-static void ad1984a_thinkpad_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x11);
- snd_hda_codec_amp_stereo(codec, 0x12, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_thinkpad_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- if ((res >> 26) != AD1884A_HP_EVENT)
- return;
- ad1984a_thinkpad_automute(codec);
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_thinkpad_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1984a_thinkpad_automute(codec);
- return 0;
-}
-
-/*
- * Precision R5500
- * 0x12 - HP/line-out
- * 0x13 - speaker (mono)
- * 0x15 - mic-in
- */
-
-static const struct hda_verb ad1984a_precision_verbs[] = {
- /* Unmute main output path */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x1f}, /* 0dB */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(5) + 0x17}, /* 0dB */
- /* Analog mixer; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- /* Select mic as input */
- {0x0c, AC_VERB_SET_CONNECT_SEL, 0x1},
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE + 0x27}, /* 0dB */
- /* Configure as mic */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, 0x7002}, /* raise mic as default */
- /* HP unmute */
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* turn on EAPD */
- {0x13, AC_VERB_SET_EAPD_BTLENABLE, 0x02},
- /* unsolicited event for pin-sense */
- {0x12, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_precision_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x15, 0x0, HDA_INPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Speaker Playback Volume", 0x13, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-
-/* mute internal speaker if HP is plugged */
-static void ad1984a_precision_automute(struct hda_codec *codec)
-{
- unsigned int present;
-
- present = snd_hda_jack_detect(codec, 0x12);
- snd_hda_codec_amp_stereo(codec, 0x13, HDA_OUTPUT, 0,
- HDA_AMP_MUTE, present ? HDA_AMP_MUTE : 0);
-}
-
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_precision_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- if ((res >> 26) != AD1884A_HP_EVENT)
- return;
- ad1984a_precision_automute(codec);
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_precision_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1984a_precision_automute(codec);
- return 0;
-}
-
-
-/*
- * HP Touchsmart
- * port-A (0x11) - front hp-out
- * port-B (0x14) - unused
- * port-C (0x15) - unused
- * port-D (0x12) - rear line out
- * port-E (0x1c) - front mic-in
- * port-F (0x16) - Internal speakers
- * digital-mic (0x17) - Internal mic
- */
-
-static const struct hda_verb ad1984a_touchsmart_verbs[] = {
- /* DACs; unmute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, 0x27}, /* 0dB */
- /* Port-A (HP) mixer - route only from analog mixer */
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x07, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-A pin */
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- /* Port-A (HP) pin - always unmuted */
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- /* Port-E (int speaker) mixer - route only from analog mixer */
- {0x25, AC_VERB_SET_AMP_GAIN_MUTE, 0x03},
- /* Port-E pin */
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x1c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
- {0x1c, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- /* Port-F (int speaker) mixer - route only from analog mixer */
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x0b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-F pin */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Analog mixer; mute as default */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* capture sources */
- /* {0x0c, AC_VERB_SET_CONNECT_SEL, 0x0}, */ /* set via unsol */
- {0x0c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x0d, AC_VERB_SET_CONNECT_SEL, 0x0},
- {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* unsolicited event for pin-sense */
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_HP_EVENT},
- {0x1c, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1884A_MIC_EVENT},
- /* allow to touch GPIO1 (for mute control) */
- {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
- {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
- {0x01, AC_VERB_SET_GPIO_DATA, 0x02}, /* first muted */
- /* internal mic - dmic */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- /* set magic COEFs for dmic */
- {0x01, AC_VERB_SET_COEF_INDEX, 0x13f7},
- {0x01, AC_VERB_SET_PROC_COEF, 0x08},
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1984a_touchsmart_mixers[] = {
- HDA_CODEC_VOLUME("Master Playback Volume", 0x21, 0x0, HDA_OUTPUT),
-/* HDA_CODEC_MUTE("Master Playback Switch", 0x21, 0x0, HDA_OUTPUT),*/
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .subdevice = HDA_SUBDEV_AMP_FLAG,
- .name = "Master Playback Switch",
- .info = snd_hda_mixer_amp_switch_info,
- .get = snd_hda_mixer_amp_switch_get,
- .put = ad1884a_mobile_master_sw_put,
- .private_value = HDA_COMPOSE_AMP_VAL(0x21, 3, 0, HDA_OUTPUT),
- },
- HDA_CODEC_VOLUME("PCM Playback Volume", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_MUTE("PCM Playback Switch", 0x20, 0x5, HDA_INPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x25, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Internal Mic Boost Volume", 0x17, 0x0, HDA_INPUT),
- { } /* end */
-};
-
-/* switch to external mic if plugged */
-static void ad1984a_touchsmart_automic(struct hda_codec *codec)
-{
- if (snd_hda_jack_detect(codec, 0x1c))
- snd_hda_codec_write(codec, 0x0c, 0,
- AC_VERB_SET_CONNECT_SEL, 0x4);
- else
- snd_hda_codec_write(codec, 0x0c, 0,
- AC_VERB_SET_CONNECT_SEL, 0x5);
-}
-
-
-/* unsolicited event for HP jack sensing */
-static void ad1984a_touchsmart_unsol_event(struct hda_codec *codec,
- unsigned int res)
-{
- switch (res >> 26) {
- case AD1884A_HP_EVENT:
- ad1884a_hp_automute(codec);
- break;
- case AD1884A_MIC_EVENT:
- ad1984a_touchsmart_automic(codec);
- break;
- }
-}
-
-/* initialize jack-sensing, too */
-static int ad1984a_touchsmart_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1884a_hp_automute(codec);
- ad1984a_touchsmart_automic(codec);
- return 0;
-}
-
-
-/*
- */
-
-enum {
- AD1884A_AUTO,
- AD1884A_DESKTOP,
- AD1884A_LAPTOP,
- AD1884A_MOBILE,
- AD1884A_THINKPAD,
- AD1984A_TOUCHSMART,
- AD1984A_PRECISION,
- AD1884A_MODELS
-};
-
-static const char * const ad1884a_models[AD1884A_MODELS] = {
- [AD1884A_AUTO] = "auto",
- [AD1884A_DESKTOP] = "desktop",
- [AD1884A_LAPTOP] = "laptop",
- [AD1884A_MOBILE] = "mobile",
- [AD1884A_THINKPAD] = "thinkpad",
- [AD1984A_TOUCHSMART] = "touchsmart",
- [AD1984A_PRECISION] = "precision",
-};
-
-static const struct snd_pci_quirk ad1884a_cfg_tbl[] = {
- SND_PCI_QUIRK(0x1028, 0x04ac, "Precision R5500", AD1984A_PRECISION),
- SND_PCI_QUIRK(0x103c, 0x3030, "HP", AD1884A_MOBILE),
- SND_PCI_QUIRK(0x103c, 0x3037, "HP 2230s", AD1884A_LAPTOP),
- SND_PCI_QUIRK(0x103c, 0x3056, "HP", AD1884A_MOBILE),
- SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x3070, "HP", AD1884A_MOBILE),
- SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30d0, "HP laptop", AD1884A_LAPTOP),
- SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x30e0, "HP laptop", AD1884A_LAPTOP),
- SND_PCI_QUIRK_MASK(0x103c, 0xff00, 0x3600, "HP laptop", AD1884A_LAPTOP),
- SND_PCI_QUIRK_MASK(0x103c, 0xfff0, 0x7010, "HP laptop", AD1884A_MOBILE),
- SND_PCI_QUIRK(0x17aa, 0x20ac, "Thinkpad X300", AD1884A_THINKPAD),
- SND_PCI_QUIRK(0x103c, 0x2a82, "Touchsmart", AD1984A_TOUCHSMART),
- {}
-};
-
-static int patch_ad1884a(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err, board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1884A_MODELS,
- ad1884a_models,
- ad1884a_cfg_tbl);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1884A_AUTO;
- }
-
- if (board_config == AD1884A_AUTO)
- return ad1884_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = ARRAY_SIZE(ad1884a_dac_nids);
- spec->multiout.dac_nids = ad1884a_dac_nids;
- spec->multiout.dig_out_nid = AD1884A_SPDIF_OUT;
- spec->num_adc_nids = ARRAY_SIZE(ad1884a_adc_nids);
- spec->adc_nids = ad1884a_adc_nids;
- spec->capsrc_nids = ad1884a_capsrc_nids;
- spec->input_mux = &ad1884a_capture_source;
- spec->num_mixers = 1;
- spec->mixers[0] = ad1884a_base_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1884a_init_verbs;
- spec->spdif_route = 0;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1884a_loopbacks;
-#endif
- codec->patch_ops = ad198x_patch_ops;
-
- /* override some parameters */
- switch (board_config) {
- case AD1884A_LAPTOP:
- spec->mixers[0] = ad1884a_laptop_mixers;
- spec->init_verbs[spec->num_init_verbs++] = ad1884a_laptop_verbs;
- spec->multiout.dig_out_nid = 0;
- codec->patch_ops.unsol_event = ad1884a_laptop_unsol_event;
- codec->patch_ops.init = ad1884a_laptop_init;
- /* set the upper-limit for mixer amp to 0dB for avoiding the
- * possible damage by overloading
- */
- snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
- (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
- (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
- (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
- (1 << AC_AMPCAP_MUTE_SHIFT));
- break;
- case AD1884A_MOBILE:
- spec->mixers[0] = ad1884a_mobile_mixers;
- spec->init_verbs[0] = ad1884a_mobile_verbs;
- spec->multiout.dig_out_nid = 0;
- codec->patch_ops.unsol_event = ad1884a_hp_unsol_event;
- codec->patch_ops.init = ad1884a_hp_init;
- /* set the upper-limit for mixer amp to 0dB for avoiding the
- * possible damage by overloading
- */
- snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
- (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
- (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
- (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
- (1 << AC_AMPCAP_MUTE_SHIFT));
- break;
- case AD1884A_THINKPAD:
- spec->mixers[0] = ad1984a_thinkpad_mixers;
- spec->init_verbs[spec->num_init_verbs++] =
- ad1984a_thinkpad_verbs;
- spec->multiout.dig_out_nid = 0;
- spec->input_mux = &ad1984a_thinkpad_capture_source;
- codec->patch_ops.unsol_event = ad1984a_thinkpad_unsol_event;
- codec->patch_ops.init = ad1984a_thinkpad_init;
- break;
- case AD1984A_PRECISION:
- spec->mixers[0] = ad1984a_precision_mixers;
- spec->init_verbs[spec->num_init_verbs++] =
- ad1984a_precision_verbs;
- spec->multiout.dig_out_nid = 0;
- codec->patch_ops.unsol_event = ad1984a_precision_unsol_event;
- codec->patch_ops.init = ad1984a_precision_init;
- break;
- case AD1984A_TOUCHSMART:
- spec->mixers[0] = ad1984a_touchsmart_mixers;
- spec->init_verbs[0] = ad1984a_touchsmart_verbs;
- spec->multiout.dig_out_nid = 0;
- codec->patch_ops.unsol_event = ad1984a_touchsmart_unsol_event;
- codec->patch_ops.init = ad1984a_touchsmart_init;
- /* set the upper-limit for mixer amp to 0dB for avoiding the
- * possible damage by overloading
- */
- snd_hda_override_amp_caps(codec, 0x20, HDA_INPUT,
- (0x17 << AC_AMPCAP_OFFSET_SHIFT) |
- (0x17 << AC_AMPCAP_NUM_STEPS_SHIFT) |
- (0x05 << AC_AMPCAP_STEP_SIZE_SHIFT) |
- (1 << AC_AMPCAP_MUTE_SHIFT));
- break;
- }
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1884a ad1884_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-
/*
* AD1882 / AD1882A
*
@@ -4850,299 +1058,7 @@ static int patch_ad1884a(struct hda_codec *codec)
* port-G - rear clfe-out (6stack)
*/
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static const hda_nid_t ad1882_dac_nids[3] = {
- 0x04, 0x03, 0x05
-};
-
-static const hda_nid_t ad1882_adc_nids[2] = {
- 0x08, 0x09,
-};
-
-static const hda_nid_t ad1882_capsrc_nids[2] = {
- 0x0c, 0x0d,
-};
-
-#define AD1882_SPDIF_OUT 0x02
-
-/* list: 0x11, 0x39, 0x3a, 0x18, 0x3c, 0x3b, 0x12, 0x20 */
-static const struct hda_input_mux ad1882_capture_source = {
- .num_items = 5,
- .items = {
- { "Front Mic", 0x1 },
- { "Mic", 0x4 },
- { "Line", 0x2 },
- { "CD", 0x3 },
- { "Mix", 0x7 },
- },
-};
-
-/* list: 0x11, 0x39, 0x3a, 0x3c, 0x18, 0x1f, 0x12, 0x20 */
-static const struct hda_input_mux ad1882a_capture_source = {
- .num_items = 5,
- .items = {
- { "Front Mic", 0x1 },
- { "Mic", 0x4},
- { "Line", 0x2 },
- { "Digital Mic", 0x06 },
- { "Mix", 0x7 },
- },
-};
-
-static const struct snd_kcontrol_new ad1882_base_mixers[] = {
- HDA_CODEC_VOLUME("Front Playback Volume", 0x04, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Surround Playback Volume", 0x03, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Center Playback Volume", 0x05, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("LFE Playback Volume", 0x05, 2, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Headphone Playback Switch", 0x11, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Front Playback Switch", 0x12, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_MONO("Mono Playback Volume", 0x13, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Mono Playback Switch", 0x13, 1, 0x0, HDA_OUTPUT),
-
- HDA_CODEC_VOLUME("Mic Boost Volume", 0x3c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Front Mic Boost Volume", 0x39, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Line-In Boost Volume", 0x3a, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME("Capture Volume", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE("Capture Switch", 0x0c, 0x0, HDA_OUTPUT),
- HDA_CODEC_VOLUME_IDX("Capture Volume", 1, 0x0d, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_IDX("Capture Switch", 1, 0x0d, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- /* The multiple "Capture Source" controls confuse alsamixer
- * So call somewhat different..
- */
- /* .name = "Capture Source", */
- .name = "Input Source",
- .count = 2,
- .info = ad198x_mux_enum_info,
- .get = ad198x_mux_enum_get,
- .put = ad198x_mux_enum_put,
- },
- /* SPDIF controls */
- HDA_CODEC_VOLUME("IEC958 Playback Volume", 0x1b, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Source",
- /* identical with ad1983 */
- .info = ad1983_spdif_route_info,
- .get = ad1983_spdif_route_get,
- .put = ad1983_spdif_route_put,
- },
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882_loopback_mixers[] = {
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882a_loopback_mixers[] = {
- HDA_CODEC_VOLUME("Front Mic Playback Volume", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_MUTE("Front Mic Playback Switch", 0x20, 0x00, HDA_INPUT),
- HDA_CODEC_VOLUME("Mic Playback Volume", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_MUTE("Mic Playback Switch", 0x20, 0x04, HDA_INPUT),
- HDA_CODEC_VOLUME("Line Playback Volume", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_MUTE("Line Playback Switch", 0x20, 0x01, HDA_INPUT),
- HDA_CODEC_VOLUME("CD Playback Volume", 0x20, 0x06, HDA_INPUT),
- HDA_CODEC_MUTE("CD Playback Switch", 0x20, 0x06, HDA_INPUT),
- HDA_CODEC_VOLUME("Digital Mic Boost Volume", 0x1f, 0x0, HDA_INPUT),
- { } /* end */
-};
-
-static const struct snd_kcontrol_new ad1882_3stack_mixers[] = {
- HDA_CODEC_MUTE("Surround Playback Switch", 0x15, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x17, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x17, 2, 0x0, HDA_OUTPUT),
- {
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Channel Mode",
- .info = ad198x_ch_mode_info,
- .get = ad198x_ch_mode_get,
- .put = ad198x_ch_mode_put,
- },
- { } /* end */
-};
-
-/* simple auto-mute control for AD1882 3-stack board */
-#define AD1882_HP_EVENT 0x01
-
-static void ad1882_3stack_automute(struct hda_codec *codec)
-{
- bool mute = snd_hda_jack_detect(codec, 0x11);
- snd_hda_codec_write(codec, 0x12, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
- mute ? 0 : PIN_OUT);
-}
-
-static int ad1882_3stack_automute_init(struct hda_codec *codec)
-{
- ad198x_init(codec);
- ad1882_3stack_automute(codec);
- return 0;
-}
-
-static void ad1882_3stack_unsol_event(struct hda_codec *codec, unsigned int res)
-{
- switch (res >> 26) {
- case AD1882_HP_EVENT:
- ad1882_3stack_automute(codec);
- break;
- }
-}
-
-static const struct snd_kcontrol_new ad1882_6stack_mixers[] = {
- HDA_CODEC_MUTE("Surround Playback Switch", 0x16, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("Center Playback Switch", 0x24, 1, 0x0, HDA_OUTPUT),
- HDA_CODEC_MUTE_MONO("LFE Playback Switch", 0x24, 2, 0x0, HDA_OUTPUT),
- { } /* end */
-};
-
-static const struct hda_verb ad1882_ch2_init[] = {
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- { } /* end */
-};
-
-static const struct hda_verb ad1882_ch4_init[] = {
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- { } /* end */
-};
-
-static const struct hda_verb ad1882_ch6_init[] = {
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- { } /* end */
-};
-
-static const struct hda_channel_mode ad1882_modes[3] = {
- { 2, ad1882_ch2_init },
- { 4, ad1882_ch4_init },
- { 6, ad1882_ch6_init },
-};
-
-/*
- * initialization verbs
- */
-static const struct hda_verb ad1882_init_verbs[] = {
- /* DACs; mute as default */
- {0x03, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x04, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- {0x05, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
- /* Port-A (HP) mixer */
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-A pin */
- {0x11, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x11, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* HP selector - select DAC2 */
- {0x37, AC_VERB_SET_CONNECT_SEL, 0x1},
- /* Port-D (Line-out) mixer */
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x29, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Port-D pin */
- {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x12, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Mono-out mixer */
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)},
- {0x1e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(1)},
- /* Mono-out pin */
- {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
- {0x13, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Port-B (front mic) pin */
- {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x39, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
- /* Port-C (line-in) pin */
- {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x3a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
- /* Port-C mixer - mute as input */
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x2c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Port-E (mic-in) pin */
- {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
- {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- {0x3c, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO}, /* boost */
- /* Port-E mixer - mute as input */
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x26, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- /* Port-F (surround) */
- {0x16, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x16, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Port-G (CLFE) */
- {0x24, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
- {0x24, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
- /* Analog mixer; mute as default */
- /* list: 0x39, 0x3a, 0x11, 0x12, 0x3c, 0x3b, 0x18, 0x1a */
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(4)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(5)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(6)},
- {0x20, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(7)},
- /* Analog Mix output amp */
- {0x21, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x1f}, /* 0dB */
- /* SPDIF output selector */
- {0x02, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- {0x02, AC_VERB_SET_CONNECT_SEL, 0x0}, /* PCM */
- {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE | 0x27}, /* 0dB */
- { } /* end */
-};
-
-static const struct hda_verb ad1882_3stack_automute_verbs[] = {
- {0x11, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | AD1882_HP_EVENT},
- { } /* end */
-};
-
-#ifdef CONFIG_PM
-static const struct hda_amp_list ad1882_loopbacks[] = {
- { 0x20, HDA_INPUT, 0 }, /* Front Mic */
- { 0x20, HDA_INPUT, 1 }, /* Mic */
- { 0x20, HDA_INPUT, 4 }, /* Line */
- { 0x20, HDA_INPUT, 6 }, /* CD */
- { } /* end */
-};
-#endif
-
-/* models */
-enum {
- AD1882_AUTO,
- AD1882_3STACK,
- AD1882_6STACK,
- AD1882_3STACK_AUTOMUTE,
- AD1882_MODELS
-};
-
-static const char * const ad1882_models[AD1986A_MODELS] = {
- [AD1882_AUTO] = "auto",
- [AD1882_3STACK] = "3stack",
- [AD1882_6STACK] = "6stack",
- [AD1882_3STACK_AUTOMUTE] = "3stack-automute",
-};
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
-static int ad1882_parse_auto_config(struct hda_codec *codec)
+static int patch_ad1882(struct hda_codec *codec)
{
struct ad198x_spec *spec;
int err;
@@ -5169,110 +1085,20 @@ static int ad1882_parse_auto_config(struct hda_codec *codec)
return err;
}
-#ifdef ENABLE_AD_STATIC_QUIRKS
-static int patch_ad1882(struct hda_codec *codec)
-{
- struct ad198x_spec *spec;
- int err, board_config;
-
- board_config = snd_hda_check_board_config(codec, AD1882_MODELS,
- ad1882_models, NULL);
- if (board_config < 0) {
- printk(KERN_INFO "hda_codec: %s: BIOS auto-probing.\n",
- codec->chip_name);
- board_config = AD1882_AUTO;
- }
-
- if (board_config == AD1882_AUTO)
- return ad1882_parse_auto_config(codec);
-
- err = alloc_ad_spec(codec);
- if (err < 0)
- return err;
- spec = codec->spec;
-
- err = snd_hda_attach_beep_device(codec, 0x10);
- if (err < 0) {
- ad198x_free(codec);
- return err;
- }
- set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
-
- spec->multiout.max_channels = 6;
- spec->multiout.num_dacs = 3;
- spec->multiout.dac_nids = ad1882_dac_nids;
- spec->multiout.dig_out_nid = AD1882_SPDIF_OUT;
- spec->num_adc_nids = ARRAY_SIZE(ad1882_adc_nids);
- spec->adc_nids = ad1882_adc_nids;
- spec->capsrc_nids = ad1882_capsrc_nids;
- if (codec->vendor_id == 0x11d41882)
- spec->input_mux = &ad1882_capture_source;
- else
- spec->input_mux = &ad1882a_capture_source;
- spec->num_mixers = 2;
- spec->mixers[0] = ad1882_base_mixers;
- if (codec->vendor_id == 0x11d41882)
- spec->mixers[1] = ad1882_loopback_mixers;
- else
- spec->mixers[1] = ad1882a_loopback_mixers;
- spec->num_init_verbs = 1;
- spec->init_verbs[0] = ad1882_init_verbs;
- spec->spdif_route = 0;
-#ifdef CONFIG_PM
- spec->loopback.amplist = ad1882_loopbacks;
-#endif
- spec->vmaster_nid = 0x04;
-
- codec->patch_ops = ad198x_patch_ops;
-
- /* override some parameters */
- switch (board_config) {
- default:
- case AD1882_3STACK:
- case AD1882_3STACK_AUTOMUTE:
- spec->num_mixers = 3;
- spec->mixers[2] = ad1882_3stack_mixers;
- spec->channel_mode = ad1882_modes;
- spec->num_channel_mode = ARRAY_SIZE(ad1882_modes);
- spec->need_dac_fix = 1;
- spec->multiout.max_channels = 2;
- spec->multiout.num_dacs = 1;
- if (board_config != AD1882_3STACK) {
- spec->init_verbs[spec->num_init_verbs++] =
- ad1882_3stack_automute_verbs;
- codec->patch_ops.unsol_event = ad1882_3stack_unsol_event;
- codec->patch_ops.init = ad1882_3stack_automute_init;
- }
- break;
- case AD1882_6STACK:
- spec->num_mixers = 3;
- spec->mixers[2] = ad1882_6stack_mixers;
- break;
- }
-
- codec->no_trigger_sense = 1;
- codec->no_sticky_stream = 1;
-
- return 0;
-}
-#else /* ENABLE_AD_STATIC_QUIRKS */
-#define patch_ad1882 ad1882_parse_auto_config
-#endif /* ENABLE_AD_STATIC_QUIRKS */
-
/*
* patch entries
*/
static const struct hda_codec_preset snd_hda_preset_analog[] = {
- { .id = 0x11d4184a, .name = "AD1884A", .patch = patch_ad1884a },
+ { .id = 0x11d4184a, .name = "AD1884A", .patch = patch_ad1884 },
{ .id = 0x11d41882, .name = "AD1882", .patch = patch_ad1882 },
- { .id = 0x11d41883, .name = "AD1883", .patch = patch_ad1884a },
+ { .id = 0x11d41883, .name = "AD1883", .patch = patch_ad1884 },
{ .id = 0x11d41884, .name = "AD1884", .patch = patch_ad1884 },
- { .id = 0x11d4194a, .name = "AD1984A", .patch = patch_ad1884a },
- { .id = 0x11d4194b, .name = "AD1984B", .patch = patch_ad1884a },
+ { .id = 0x11d4194a, .name = "AD1984A", .patch = patch_ad1884 },
+ { .id = 0x11d4194b, .name = "AD1984B", .patch = patch_ad1884 },
{ .id = 0x11d41981, .name = "AD1981", .patch = patch_ad1981 },
{ .id = 0x11d41983, .name = "AD1983", .patch = patch_ad1983 },
- { .id = 0x11d41984, .name = "AD1984", .patch = patch_ad1984 },
+ { .id = 0x11d41984, .name = "AD1984", .patch = patch_ad1884 },
{ .id = 0x11d41986, .name = "AD1986A", .patch = patch_ad1986a },
{ .id = 0x11d41988, .name = "AD1988", .patch = patch_ad1988 },
{ .id = 0x11d4198b, .name = "AD1988B", .patch = patch_ad1988 },
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index de00ce16647..4edd2d0f9a3 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -66,6 +66,8 @@ struct conexant_spec {
hda_nid_t eapds[4];
bool dynamic_eapd;
+ unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
+
#ifdef ENABLE_CXT_STATIC_QUIRKS
const struct snd_kcontrol_new *mixers[5];
int num_mixers;
@@ -3200,6 +3202,9 @@ static int cx_auto_init(struct hda_codec *codec)
snd_hda_gen_init(codec);
if (!spec->dynamic_eapd)
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, true);
+
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
+
return 0;
}
@@ -3224,6 +3229,8 @@ enum {
CXT_PINCFG_LEMOTE_A1205,
CXT_FIXUP_STEREO_DMIC,
CXT_FIXUP_INC_MIC_BOOST,
+ CXT_FIXUP_HEADPHONE_MIC_PIN,
+ CXT_FIXUP_HEADPHONE_MIC,
};
static void cxt_fixup_stereo_dmic(struct hda_codec *codec,
@@ -3246,6 +3253,59 @@ static void cxt5066_increase_mic_boost(struct hda_codec *codec,
(0 << AC_AMPCAP_MUTE_SHIFT));
}
+static void cxt_update_headset_mode(struct hda_codec *codec)
+{
+ /* The verbs used in this function were tested on a Conexant CX20751/2 codec. */
+ int i;
+ bool mic_mode = false;
+ struct conexant_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->gen.autocfg;
+
+ hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
+
+ for (i = 0; i < cfg->num_inputs; i++)
+ if (cfg->inputs[i].pin == mux_pin) {
+ mic_mode = !!cfg->inputs[i].is_headphone_mic;
+ break;
+ }
+
+ if (mic_mode) {
+ snd_hda_codec_write_cache(codec, 0x1c, 0, 0x410, 0x7c); /* enable merged mode for analog int-mic */
+ spec->gen.hp_jack_present = false;
+ } else {
+ snd_hda_codec_write_cache(codec, 0x1c, 0, 0x410, 0x54); /* disable merged mode for analog int-mic */
+ spec->gen.hp_jack_present = snd_hda_jack_detect(codec, spec->gen.autocfg.hp_pins[0]);
+ }
+
+ snd_hda_gen_update_outputs(codec);
+}
+
+static void cxt_update_headset_mode_hook(struct hda_codec *codec,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ cxt_update_headset_mode(codec);
+}
+
+static void cxt_fixup_headphone_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct conexant_spec *spec = codec->spec;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ spec->parse_flags |= HDA_PINCFG_HEADPHONE_MIC;
+ break;
+ case HDA_FIXUP_ACT_PROBE:
+ spec->gen.cap_sync_hook = cxt_update_headset_mode_hook;
+ spec->gen.automute_hook = cxt_update_headset_mode;
+ break;
+ case HDA_FIXUP_ACT_INIT:
+ cxt_update_headset_mode(codec);
+ break;
+ }
+}
+
+
/* ThinkPad X200 & co with cxt5051 */
static const struct hda_pintbl cxt_pincfg_lenovo_x200[] = {
{ 0x16, 0x042140ff }, /* HP (seq# overridden) */
@@ -3302,6 +3362,19 @@ static const struct hda_fixup cxt_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = cxt5066_increase_mic_boost,
},
+ [CXT_FIXUP_HEADPHONE_MIC_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .chained = true,
+ .chain_id = CXT_FIXUP_HEADPHONE_MIC,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x03a1913d }, /* use as headphone mic, without its own jack detect */
+ { }
+ }
+ },
+ [CXT_FIXUP_HEADPHONE_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = cxt_fixup_headphone_mic,
+ },
};
static const struct snd_pci_quirk cxt5051_fixups[] = {
@@ -3311,6 +3384,7 @@ static const struct snd_pci_quirk cxt5051_fixups[] = {
static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T410", CXT_PINCFG_LENOVO_TP410),
SND_PCI_QUIRK(0x17aa, 0x215f, "Lenovo T510", CXT_PINCFG_LENOVO_TP410),
@@ -3395,7 +3469,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
- err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
+ err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL,
+ spec->parse_flags);
if (err < 0)
goto error;
@@ -3416,6 +3491,8 @@ static int patch_conexant_auto(struct hda_codec *codec)
codec->bus->allow_bus_reset = 1;
}
+ snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
return 0;
error:
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 030ca8652a1..9a58893d52a 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -67,6 +67,8 @@ struct hdmi_spec_per_pin {
struct delayed_work work;
struct snd_kcontrol *eld_ctl;
int repoll_count;
+ bool setup; /* the stream has been set up by prepare callback */
+ int channels; /* current number of channels */
bool non_pcm;
bool chmap_set; /* channel-map override by ALSA API? */
unsigned char chmap[8]; /* ALSA API channel-map */
@@ -551,6 +553,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
}
}
+ if (!ca) {
+ /* if there was no match, select the regular ALSA channel
+ * allocation with the matching number of channels */
+ for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
+ if (channels == channel_allocations[i].channels) {
+ ca = channel_allocations[i].ca_index;
+ break;
+ }
+ }
+ }
+
snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
ca, channels, buf);
@@ -868,18 +881,19 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
return true;
}
-static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
- bool non_pcm,
- struct snd_pcm_substream *substream)
+static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
+ struct hdmi_spec_per_pin *per_pin,
+ bool non_pcm)
{
- struct hdmi_spec *spec = codec->spec;
- struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
hda_nid_t pin_nid = per_pin->pin_nid;
- int channels = substream->runtime->channels;
+ int channels = per_pin->channels;
struct hdmi_eld *eld;
int ca;
union audio_infoframe ai;
+ if (!channels)
+ return;
+
eld = &per_pin->sink_eld;
if (!eld->monitor_present)
return;
@@ -959,6 +973,7 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
int pin_nid;
int pin_idx;
struct hda_jack_tbl *jack;
+ int dev_entry = (res & AC_UNSOL_RES_DE) >> AC_UNSOL_RES_DE_SHIFT;
jack = snd_hda_jack_tbl_get_from_tag(codec, tag);
if (!jack)
@@ -967,8 +982,8 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
jack->jack_dirty = 1;
_snd_printd(SND_PR_VERBOSE,
- "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
- codec->addr, pin_nid,
+ "HDMI hot plug event: Codec=%d Pin=%d Device=%d Inactive=%d Presence_Detect=%d ELD_Valid=%d\n",
+ codec->addr, pin_nid, dev_entry, !!(res & AC_UNSOL_RES_IA),
!!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
pin_idx = pin_nid_to_pin_index(spec, pin_nid);
@@ -1329,6 +1344,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
eld_changed = true;
}
if (update_eld) {
+ bool old_eld_valid = pin_eld->eld_valid;
pin_eld->eld_valid = eld->eld_valid;
eld_changed = pin_eld->eld_size != eld->eld_size ||
memcmp(pin_eld->eld_buffer, eld->eld_buffer,
@@ -1338,6 +1354,18 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
eld->eld_size);
pin_eld->eld_size = eld->eld_size;
pin_eld->info = eld->info;
+
+ /* Haswell-specific workaround: re-setup when the transcoder is
+ * changed during the stream playback
+ */
+ if (codec->vendor_id == 0x80862807 &&
+ eld->eld_valid && !old_eld_valid && per_pin->setup) {
+ snd_hda_codec_write(codec, pin_nid, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE,
+ AMP_OUT_UNMUTE);
+ hdmi_setup_audio_infoframe(codec, per_pin,
+ per_pin->non_pcm);
+ }
}
mutex_unlock(&pin_eld->lock);
@@ -1510,14 +1538,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
hda_nid_t cvt_nid = hinfo->nid;
struct hdmi_spec *spec = codec->spec;
int pin_idx = hinfo_to_pin_index(spec, hinfo);
- hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
+ hda_nid_t pin_nid = per_pin->pin_nid;
bool non_pcm;
non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
+ per_pin->channels = substream->runtime->channels;
+ per_pin->setup = true;
hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
- hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
+ hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
}
@@ -1557,6 +1588,9 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
snd_hda_spdif_ctls_unassign(codec, pin_idx);
per_pin->chmap_set = false;
memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
+
+ per_pin->setup = false;
+ per_pin->channels = 0;
}
return 0;
@@ -1692,8 +1726,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
per_pin->chmap_set = true;
memcpy(per_pin->chmap, chmap, sizeof(chmap));
if (prepared)
- hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
- substream);
+ hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
return 0;
}
@@ -1781,6 +1814,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
struct snd_pcm_chmap *chmap;
struct snd_kcontrol *kctl;
int i;
+
+ if (!codec->pcm_info[pin_idx].pcm)
+ break;
err = snd_pcm_add_chmap_ctls(codec->pcm_info[pin_idx].pcm,
SNDRV_PCM_STREAM_PLAYBACK,
NULL, 0, pin_idx, &chmap);
@@ -1989,8 +2025,10 @@ static int patch_generic_hdmi(struct hda_codec *codec)
return -EINVAL;
}
codec->patch_ops = generic_hdmi_patch_ops;
- if (codec->vendor_id == 0x80862807)
+ if (codec->vendor_id == 0x80862807) {
codec->patch_ops.set_power_state = haswell_set_power_state;
+ codec->dp_mst = true;
+ }
generic_hdmi_init_per_pins(codec);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8bd22614986..9e9378cde8f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -282,6 +282,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
{
alc_auto_setup_eapd(codec, false);
msleep(200);
+ snd_hda_shutup_pins(codec);
}
/* generic EAPD initialization */
@@ -826,7 +827,8 @@ static inline void alc_shutup(struct hda_codec *codec)
if (spec && spec->shutup)
spec->shutup(codec);
- snd_hda_shutup_pins(codec);
+ else
+ snd_hda_shutup_pins(codec);
}
#define alc_free snd_hda_gen_free
@@ -1031,6 +1033,7 @@ enum {
ALC880_FIXUP_GPIO2,
ALC880_FIXUP_MEDION_RIM,
ALC880_FIXUP_LG,
+ ALC880_FIXUP_LG_LW25,
ALC880_FIXUP_W810,
ALC880_FIXUP_EAPD_COEF,
ALC880_FIXUP_TCL_S700,
@@ -1089,6 +1092,14 @@ static const struct hda_fixup alc880_fixups[] = {
{ }
}
},
+ [ALC880_FIXUP_LG_LW25] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x0181344f }, /* line-in */
+ { 0x1b, 0x0321403f }, /* headphone */
+ { }
+ }
+ },
[ALC880_FIXUP_W810] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -1341,6 +1352,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = {
SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG),
SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG),
+ SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25),
SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700),
/* Below is the copied entries from alc880_quirks.c.
@@ -1843,8 +1855,10 @@ static void alc882_fixup_no_primary_hp(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
struct alc_spec *spec = codec->spec;
- if (action == HDA_FIXUP_ACT_PRE_PROBE)
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
spec->gen.no_primary_hp = 1;
+ spec->gen.no_multi_io = 1;
+ }
}
static const struct hda_fixup alc882_fixups[] = {
@@ -2523,6 +2537,7 @@ enum {
ALC269_TYPE_ALC269VD,
ALC269_TYPE_ALC280,
ALC269_TYPE_ALC282,
+ ALC269_TYPE_ALC283,
ALC269_TYPE_ALC284,
ALC269_TYPE_ALC286,
};
@@ -2548,6 +2563,7 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
case ALC269_TYPE_ALC269VB:
case ALC269_TYPE_ALC269VD:
case ALC269_TYPE_ALC282:
+ case ALC269_TYPE_ALC283:
case ALC269_TYPE_ALC286:
ssids = alc269_ssids;
break;
@@ -2573,15 +2589,81 @@ static void alc269_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- if (spec->codec_variant != ALC269_TYPE_ALC269VB)
- return;
-
if (spec->codec_variant == ALC269_TYPE_ALC269VB)
alc269vb_toggle_power_output(codec, 0);
if (spec->codec_variant == ALC269_TYPE_ALC269VB &&
(alc_get_coef0(codec) & 0x00ff) == 0x018) {
msleep(150);
}
+ snd_hda_shutup_pins(codec);
+}
+
+static void alc283_init(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+ int val;
+
+ if (!hp_pin)
+ return;
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ /* Index 0x43 Direct Drive HP AMP LPM Control 1 */
+ /* Headphone capless set to high power mode */
+ alc_write_coef_idx(codec, 0x43, 0x9004);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT);
+
+ if (hp_pin_sense)
+ msleep(85);
+ /* Index 0x46 Combo jack auto switch control 2 */
+ /* 3k pull low control for Headset jack. */
+ val = alc_read_coef_idx(codec, 0x46);
+ alc_write_coef_idx(codec, 0x46, val & ~(3 << 12));
+ /* Headphone capless set to normal mode */
+ alc_write_coef_idx(codec, 0x43, 0x9614);
+}
+
+static void alc283_shutup(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ bool hp_pin_sense;
+ int val;
+
+ if (!hp_pin) {
+ alc269_shutup(codec);
+ return;
+ }
+
+ hp_pin_sense = snd_hda_jack_detect(codec, hp_pin);
+
+ alc_write_coef_idx(codec, 0x43, 0x9004);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
+
+ if (hp_pin_sense)
+ msleep(85);
+
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+
+ val = alc_read_coef_idx(codec, 0x46);
+ alc_write_coef_idx(codec, 0x46, val | (3 << 12));
+
+ if (hp_pin_sense)
+ msleep(85);
+ snd_hda_shutup_pins(codec);
+ alc_write_coef_idx(codec, 0x43, 0x9614);
}
static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg,
@@ -2712,6 +2794,7 @@ static int alc269_resume(struct hda_codec *codec)
hda_call_check_power_status(codec, 0x01);
if (spec->has_alc5505_dsp)
alc5505_dsp_resume(codec);
+
return 0;
}
#endif /* CONFIG_PM */
@@ -3251,6 +3334,28 @@ static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
alc_fixup_headset_mode(codec, fix, action);
}
+/* Returns the nid of the external mic input pin, or 0 if it cannot be found. */
+static int find_ext_mic_pin(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+ struct auto_pin_cfg *cfg = &spec->gen.autocfg;
+ hda_nid_t nid;
+ unsigned int defcfg;
+ int i;
+
+ for (i = 0; i < cfg->num_inputs; i++) {
+ if (cfg->inputs[i].type != AUTO_PIN_MIC)
+ continue;
+ nid = cfg->inputs[i].pin;
+ defcfg = snd_hda_codec_get_pincfg(codec, nid);
+ if (snd_hda_get_input_pin_attr(defcfg) == INPUT_PIN_ATTR_INT)
+ continue;
+ return nid;
+ }
+
+ return 0;
+}
+
static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
const struct hda_fixup *fix,
int action)
@@ -3258,11 +3363,12 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PROBE) {
- if (snd_BUG_ON(!spec->gen.am_entry[1].pin ||
- !spec->gen.autocfg.hp_pins[0]))
+ int mic_pin = find_ext_mic_pin(codec);
+ int hp_pin = spec->gen.autocfg.hp_pins[0];
+
+ if (snd_BUG_ON(!mic_pin || !hp_pin))
return;
- snd_hda_jack_set_gating_jack(codec, spec->gen.am_entry[1].pin,
- spec->gen.autocfg.hp_pins[0]);
+ snd_hda_jack_set_gating_jack(codec, mic_pin, hp_pin);
}
}
@@ -3298,6 +3404,45 @@ static void alc269_fixup_limit_int_mic_boost(struct hda_codec *codec,
}
}
+static void alc283_hp_automute_hook(struct hda_codec *codec,
+ struct hda_jack_tbl *jack)
+{
+ struct alc_spec *spec = codec->spec;
+ int vref;
+
+ msleep(200);
+ snd_hda_gen_hp_automute(codec, jack);
+
+ vref = spec->gen.hp_jack_present ? PIN_VREF80 : 0;
+
+ msleep(600);
+ snd_hda_codec_write(codec, 0x19, 0, AC_VERB_SET_PIN_WIDGET_CONTROL,
+ vref);
+}
+
+static void alc283_chromebook_caps(struct hda_codec *codec)
+{
+ snd_hda_override_wcaps(codec, 0x03, 0);
+}
+
+static void alc283_fixup_chromebook(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct alc_spec *spec = codec->spec;
+ int val;
+
+ switch (action) {
+ case HDA_FIXUP_ACT_PRE_PROBE:
+ alc283_chromebook_caps(codec);
+ spec->gen.hp_automute_hook = alc283_hp_automute_hook;
+ /* MIC2-VREF control */
+ /* Set to manual mode */
+ val = alc_read_coef_idx(codec, 0x06);
+ alc_write_coef_idx(codec, 0x06, val & ~0x000c);
+ break;
+ }
+}
+
enum {
ALC269_FIXUP_SONY_VAIO,
ALC275_FIXUP_SONY_VAIO_GPIO2,
@@ -3334,6 +3479,7 @@ enum {
ALC269_FIXUP_ACER_AC700,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
ALC269VB_FIXUP_ORDISSIMO_EVE2,
+ ALC283_FIXUP_CHROME_BOOK,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -3585,11 +3731,20 @@ static const struct hda_fixup alc269_fixups[] = {
{ }
},
},
+ [ALC283_FIXUP_CHROME_BOOK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc283_fixup_chromebook,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
+ SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
+ SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05c4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3627,6 +3782,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -3645,11 +3801,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK(0x104d, 0x9084, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
SND_PCI_QUIRK_VENDOR(0x104d, "Sony VAIO", ALC269_FIXUP_SONY_VAIO),
- SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
- SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
- SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
- SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
- SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -3660,8 +3811,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21f3, "Thinkpad T430", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
- SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
@@ -3830,11 +3989,15 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0290:
spec->codec_variant = ALC269_TYPE_ALC280;
break;
- case 0x10ec0233:
case 0x10ec0282:
- case 0x10ec0283:
spec->codec_variant = ALC269_TYPE_ALC282;
break;
+ case 0x10ec0233:
+ case 0x10ec0283:
+ spec->codec_variant = ALC269_TYPE_ALC283;
+ spec->shutup = alc283_shutup;
+ spec->init_hook = alc283_init;
+ break;
case 0x10ec0284:
case 0x10ec0292:
spec->codec_variant = ALC269_TYPE_ALC284;
@@ -3862,7 +4025,8 @@ static int patch_alc269(struct hda_codec *codec)
codec->patch_ops.suspend = alc269_suspend;
codec->patch_ops.resume = alc269_resume;
#endif
- spec->shutup = alc269_shutup;
+ if (!spec->shutup)
+ spec->shutup = alc269_shutup;
snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
@@ -4326,9 +4490,11 @@ static const struct hda_fixup alc662_fixups[] = {
static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1019, 0x9087, "ECS", ALC662_FIXUP_ASUS_MODE2),
+ SND_PCI_QUIRK(0x1025, 0x022f, "Acer Aspire One", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index e2f83591161..fba0cef1c47 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -158,6 +158,7 @@ enum {
STAC_D965_VERBS,
STAC_DELL_3ST,
STAC_DELL_BIOS,
+ STAC_DELL_BIOS_AMIC,
STAC_DELL_BIOS_SPDIF,
STAC_927X_DELL_DMIC,
STAC_927X_VOLKNOB,
@@ -417,9 +418,11 @@ static void stac_update_outputs(struct hda_codec *codec)
val &= ~spec->eapd_mask;
else
val |= spec->eapd_mask;
- if (spec->gpio_data != val)
+ if (spec->gpio_data != val) {
+ spec->gpio_data = val;
stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir,
val);
+ }
}
}
@@ -2817,6 +2820,7 @@ static const struct hda_pintbl ecs202_pin_configs[] = {
/* codec SSIDs for Intel Mac sharing the same PCI SSID 8384:7680 */
static const struct snd_pci_quirk stac922x_intel_mac_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x0000, 0x0100, "Mac Mini", STAC_INTEL_MAC_V3),
SND_PCI_QUIRK(0x106b, 0x0800, "Mac", STAC_INTEL_MAC_V1),
SND_PCI_QUIRK(0x106b, 0x0600, "Mac", STAC_INTEL_MAC_V2),
SND_PCI_QUIRK(0x106b, 0x0700, "Mac", STAC_INTEL_MAC_V2),
@@ -3228,10 +3232,8 @@ static const struct hda_fixup stac927x_fixups[] = {
[STAC_DELL_BIOS] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
- /* configure the analog microphone on some laptops */
- { 0x0c, 0x90a79130 },
/* correct the front output jack as a hp out */
- { 0x0f, 0x0227011f },
+ { 0x0f, 0x0221101f },
/* correct the front input jack as a mic */
{ 0x0e, 0x02a79130 },
{}
@@ -3239,6 +3241,16 @@ static const struct hda_fixup stac927x_fixups[] = {
.chained = true,
.chain_id = STAC_927X_DELL_DMIC,
},
+ [STAC_DELL_BIOS_AMIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ /* configure the analog microphone on some laptops */
+ { 0x0c, 0x90a79130 },
+ {}
+ },
+ .chained = true,
+ .chain_id = STAC_DELL_BIOS,
+ },
[STAC_DELL_BIOS_SPDIF] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -3267,6 +3279,7 @@ static const struct hda_model_fixup stac927x_models[] = {
{ .id = STAC_D965_5ST_NO_FP, .name = "5stack-no-fp" },
{ .id = STAC_DELL_3ST, .name = "dell-3stack" },
{ .id = STAC_DELL_BIOS, .name = "dell-bios" },
+ { .id = STAC_DELL_BIOS_AMIC, .name = "dell-bios-amic" },
{ .id = STAC_927X_VOLKNOB, .name = "volknob" },
{}
};
@@ -3612,20 +3625,18 @@ static int stac_parse_auto_config(struct hda_codec *codec)
static int stac_init(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
- unsigned int gpio;
int i;
/* override some hints */
stac_store_hints(codec);
/* set up GPIO */
- gpio = spec->gpio_data;
/* turn on EAPD statically when spec->eapd_switch isn't set.
* otherwise, unsol event will turn it on/off dynamically
*/
if (!spec->eapd_switch)
- gpio |= spec->eapd_mask;
- stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, gpio);
+ spec->gpio_data |= spec->eapd_mask;
+ stac_gpio_set(codec, spec->gpio_mask, spec->gpio_dir, spec->gpio_data);
snd_hda_gen_init(codec);
@@ -3915,6 +3926,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
{
struct sigmatel_spec *spec = codec->spec;
+ spec->gpio_mask |= spec->eapd_mask;
if (spec->gpio_led) {
if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index e2481baddc7..0bc20ef5687 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -207,9 +207,9 @@ static void vt1708_stop_hp_work(struct hda_codec *codec)
return;
if (spec->hp_work_active) {
snd_hda_codec_write(codec, 0x1, 0, 0xf81, 1);
+ codec->jackpoll_interval = 0;
cancel_delayed_work_sync(&codec->jackpoll_work);
spec->hp_work_active = false;
- codec->jackpoll_interval = 0;
}
}
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 2a8ad9d1a2a..bb9ebc5543d 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -28,6 +28,7 @@
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/module.h>
+#include <linux/vmalloc.h>
#include <sound/core.h>
#include <sound/info.h>
@@ -198,6 +199,31 @@ MODULE_PARM_DESC(enable, "Enable RME Digi96 soundcard.");
#define RME96_AD1852_VOL_BITS 14
#define RME96_AD1855_VOL_BITS 10
+/* Defines for snd_rme96_trigger */
+#define RME96_TB_START_PLAYBACK 1
+#define RME96_TB_START_CAPTURE 2
+#define RME96_TB_STOP_PLAYBACK 4
+#define RME96_TB_STOP_CAPTURE 8
+#define RME96_TB_RESET_PLAYPOS 16
+#define RME96_TB_RESET_CAPTUREPOS 32
+#define RME96_TB_CLEAR_PLAYBACK_IRQ 64
+#define RME96_TB_CLEAR_CAPTURE_IRQ 128
+#define RME96_RESUME_PLAYBACK (RME96_TB_START_PLAYBACK)
+#define RME96_RESUME_CAPTURE (RME96_TB_START_CAPTURE)
+#define RME96_RESUME_BOTH (RME96_RESUME_PLAYBACK \
+ | RME96_RESUME_CAPTURE)
+#define RME96_START_PLAYBACK (RME96_TB_START_PLAYBACK \
+ | RME96_TB_RESET_PLAYPOS)
+#define RME96_START_CAPTURE (RME96_TB_START_CAPTURE \
+ | RME96_TB_RESET_CAPTUREPOS)
+#define RME96_START_BOTH (RME96_START_PLAYBACK \
+ | RME96_START_CAPTURE)
+#define RME96_STOP_PLAYBACK (RME96_TB_STOP_PLAYBACK \
+ | RME96_TB_CLEAR_PLAYBACK_IRQ)
+#define RME96_STOP_CAPTURE (RME96_TB_STOP_CAPTURE \
+ | RME96_TB_CLEAR_CAPTURE_IRQ)
+#define RME96_STOP_BOTH (RME96_STOP_PLAYBACK \
+ | RME96_STOP_CAPTURE)
struct rme96 {
spinlock_t lock;
@@ -214,6 +240,13 @@ struct rme96 {
u8 rev; /* card revision number */
+#ifdef CONFIG_PM
+ u32 playback_pointer;
+ u32 capture_pointer;
+ void *playback_suspend_buffer;
+ void *capture_suspend_buffer;
+#endif
+
struct snd_pcm_substream *playback_substream;
struct snd_pcm_substream *capture_substream;
@@ -344,6 +377,8 @@ static struct snd_pcm_hardware snd_rme96_playback_spdif_info =
{
.info = (SNDRV_PCM_INFO_MMAP_IOMEM |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
@@ -373,6 +408,8 @@ static struct snd_pcm_hardware snd_rme96_capture_spdif_info =
{
.info = (SNDRV_PCM_INFO_MMAP_IOMEM |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
@@ -402,6 +439,8 @@ static struct snd_pcm_hardware snd_rme96_playback_adat_info =
{
.info = (SNDRV_PCM_INFO_MMAP_IOMEM |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
@@ -427,6 +466,8 @@ static struct snd_pcm_hardware snd_rme96_capture_adat_info =
{
.info = (SNDRV_PCM_INFO_MMAP_IOMEM |
SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_SYNC_START |
+ SNDRV_PCM_INFO_RESUME |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_PAUSE),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
@@ -1045,54 +1086,35 @@ snd_rme96_capture_hw_params(struct snd_pcm_substream *substream,
}
static void
-snd_rme96_playback_start(struct rme96 *rme96,
- int from_pause)
+snd_rme96_trigger(struct rme96 *rme96,
+ int op)
{
- if (!from_pause) {
+ if (op & RME96_TB_RESET_PLAYPOS)
writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS);
- }
-
- rme96->wcreg |= RME96_WCR_START;
- writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
-
-static void
-snd_rme96_capture_start(struct rme96 *rme96,
- int from_pause)
-{
- if (!from_pause) {
+ if (op & RME96_TB_RESET_CAPTUREPOS)
writel(0, rme96->iobase + RME96_IO_RESET_REC_POS);
- }
-
- rme96->wcreg |= RME96_WCR_START_2;
+ if (op & RME96_TB_CLEAR_PLAYBACK_IRQ) {
+ rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
+ if (rme96->rcreg & RME96_RCR_IRQ)
+ writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ);
+ }
+ if (op & RME96_TB_CLEAR_CAPTURE_IRQ) {
+ rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
+ if (rme96->rcreg & RME96_RCR_IRQ_2)
+ writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ);
+ }
+ if (op & RME96_TB_START_PLAYBACK)
+ rme96->wcreg |= RME96_WCR_START;
+ if (op & RME96_TB_STOP_PLAYBACK)
+ rme96->wcreg &= ~RME96_WCR_START;
+ if (op & RME96_TB_START_CAPTURE)
+ rme96->wcreg |= RME96_WCR_START_2;
+ if (op & RME96_TB_STOP_CAPTURE)
+ rme96->wcreg &= ~RME96_WCR_START_2;
writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
}
-static void
-snd_rme96_playback_stop(struct rme96 *rme96)
-{
- /*
- * Check if there is an unconfirmed IRQ, if so confirm it, or else
- * the hardware will not stop generating interrupts
- */
- rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
- if (rme96->rcreg & RME96_RCR_IRQ) {
- writel(0, rme96->iobase + RME96_IO_CONFIRM_PLAY_IRQ);
- }
- rme96->wcreg &= ~RME96_WCR_START;
- writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
-static void
-snd_rme96_capture_stop(struct rme96 *rme96)
-{
- rme96->rcreg = readl(rme96->iobase + RME96_IO_CONTROL_REGISTER);
- if (rme96->rcreg & RME96_RCR_IRQ_2) {
- writel(0, rme96->iobase + RME96_IO_CONFIRM_REC_IRQ);
- }
- rme96->wcreg &= ~RME96_WCR_START_2;
- writel(rme96->wcreg, rme96->iobase + RME96_IO_CONTROL_REGISTER);
-}
static irqreturn_t
snd_rme96_interrupt(int irq,
@@ -1155,6 +1177,7 @@ snd_rme96_playback_spdif_open(struct snd_pcm_substream *substream)
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_set_sync(substream);
spin_lock_irq(&rme96->lock);
if (rme96->playback_substream != NULL) {
spin_unlock_irq(&rme96->lock);
@@ -1191,6 +1214,7 @@ snd_rme96_capture_spdif_open(struct snd_pcm_substream *substream)
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_set_sync(substream);
runtime->hw = snd_rme96_capture_spdif_info;
if (snd_rme96_getinputtype(rme96) != RME96_INPUT_ANALOG &&
(rate = snd_rme96_capture_getrate(rme96, &isadat)) > 0)
@@ -1222,6 +1246,7 @@ snd_rme96_playback_adat_open(struct snd_pcm_substream *substream)
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_set_sync(substream);
spin_lock_irq(&rme96->lock);
if (rme96->playback_substream != NULL) {
spin_unlock_irq(&rme96->lock);
@@ -1253,6 +1278,7 @@ snd_rme96_capture_adat_open(struct snd_pcm_substream *substream)
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
+ snd_pcm_set_sync(substream);
runtime->hw = snd_rme96_capture_adat_info;
if (snd_rme96_getinputtype(rme96) == RME96_INPUT_ANALOG) {
/* makes no sense to use analog input. Note that analog
@@ -1288,7 +1314,7 @@ snd_rme96_playback_close(struct snd_pcm_substream *substream)
spin_lock_irq(&rme96->lock);
if (RME96_ISPLAYING(rme96)) {
- snd_rme96_playback_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_PLAYBACK);
}
rme96->playback_substream = NULL;
rme96->playback_periodsize = 0;
@@ -1309,7 +1335,7 @@ snd_rme96_capture_close(struct snd_pcm_substream *substream)
spin_lock_irq(&rme96->lock);
if (RME96_ISRECORDING(rme96)) {
- snd_rme96_capture_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_CAPTURE);
}
rme96->capture_substream = NULL;
rme96->capture_periodsize = 0;
@@ -1324,7 +1350,7 @@ snd_rme96_playback_prepare(struct snd_pcm_substream *substream)
spin_lock_irq(&rme96->lock);
if (RME96_ISPLAYING(rme96)) {
- snd_rme96_playback_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_PLAYBACK);
}
writel(0, rme96->iobase + RME96_IO_RESET_PLAY_POS);
spin_unlock_irq(&rme96->lock);
@@ -1338,7 +1364,7 @@ snd_rme96_capture_prepare(struct snd_pcm_substream *substream)
spin_lock_irq(&rme96->lock);
if (RME96_ISRECORDING(rme96)) {
- snd_rme96_capture_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_CAPTURE);
}
writel(0, rme96->iobase + RME96_IO_RESET_REC_POS);
spin_unlock_irq(&rme96->lock);
@@ -1350,41 +1376,55 @@ snd_rme96_playback_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+ struct snd_pcm_substream *s;
+ bool sync;
+
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (snd_pcm_substream_chip(s) == rme96)
+ snd_pcm_trigger_done(s, substream);
+ }
+
+ sync = (rme96->playback_substream && rme96->capture_substream) &&
+ (rme96->playback_substream->group ==
+ rme96->capture_substream->group);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (!RME96_ISPLAYING(rme96)) {
- if (substream != rme96->playback_substream) {
+ if (substream != rme96->playback_substream)
return -EBUSY;
- }
- snd_rme96_playback_start(rme96, 0);
+ snd_rme96_trigger(rme96, sync ? RME96_START_BOTH
+ : RME96_START_PLAYBACK);
}
break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
if (RME96_ISPLAYING(rme96)) {
- if (substream != rme96->playback_substream) {
+ if (substream != rme96->playback_substream)
return -EBUSY;
- }
- snd_rme96_playback_stop(rme96);
+ snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+ : RME96_STOP_PLAYBACK);
}
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (RME96_ISPLAYING(rme96)) {
- snd_rme96_playback_stop(rme96);
- }
+ if (RME96_ISPLAYING(rme96))
+ snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+ : RME96_STOP_PLAYBACK);
break;
+ case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!RME96_ISPLAYING(rme96)) {
- snd_rme96_playback_start(rme96, 1);
- }
+ if (!RME96_ISPLAYING(rme96))
+ snd_rme96_trigger(rme96, sync ? RME96_RESUME_BOTH
+ : RME96_RESUME_PLAYBACK);
break;
-
+
default:
return -EINVAL;
}
+
return 0;
}
@@ -1393,38 +1433,51 @@ snd_rme96_capture_trigger(struct snd_pcm_substream *substream,
int cmd)
{
struct rme96 *rme96 = snd_pcm_substream_chip(substream);
+ struct snd_pcm_substream *s;
+ bool sync;
+
+ snd_pcm_group_for_each_entry(s, substream) {
+ if (snd_pcm_substream_chip(s) == rme96)
+ snd_pcm_trigger_done(s, substream);
+ }
+
+ sync = (rme96->playback_substream && rme96->capture_substream) &&
+ (rme96->playback_substream->group ==
+ rme96->capture_substream->group);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
if (!RME96_ISRECORDING(rme96)) {
- if (substream != rme96->capture_substream) {
+ if (substream != rme96->capture_substream)
return -EBUSY;
- }
- snd_rme96_capture_start(rme96, 0);
+ snd_rme96_trigger(rme96, sync ? RME96_START_BOTH
+ : RME96_START_CAPTURE);
}
break;
+ case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
if (RME96_ISRECORDING(rme96)) {
- if (substream != rme96->capture_substream) {
+ if (substream != rme96->capture_substream)
return -EBUSY;
- }
- snd_rme96_capture_stop(rme96);
+ snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+ : RME96_STOP_CAPTURE);
}
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- if (RME96_ISRECORDING(rme96)) {
- snd_rme96_capture_stop(rme96);
- }
+ if (RME96_ISRECORDING(rme96))
+ snd_rme96_trigger(rme96, sync ? RME96_STOP_BOTH
+ : RME96_STOP_CAPTURE);
break;
+ case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- if (!RME96_ISRECORDING(rme96)) {
- snd_rme96_capture_start(rme96, 1);
- }
+ if (!RME96_ISRECORDING(rme96))
+ snd_rme96_trigger(rme96, sync ? RME96_RESUME_BOTH
+ : RME96_RESUME_CAPTURE);
break;
-
+
default:
return -EINVAL;
}
@@ -1505,8 +1558,7 @@ snd_rme96_free(void *private_data)
return;
}
if (rme96->irq >= 0) {
- snd_rme96_playback_stop(rme96);
- snd_rme96_capture_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_BOTH);
rme96->areg &= ~RME96_AR_DAC_EN;
writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
free_irq(rme96->irq, (void *)rme96);
@@ -1520,6 +1572,10 @@ snd_rme96_free(void *private_data)
pci_release_regions(rme96->pci);
rme96->port = 0;
}
+#ifdef CONFIG_PM
+ vfree(rme96->playback_suspend_buffer);
+ vfree(rme96->capture_suspend_buffer);
+#endif
pci_disable_device(rme96->pci);
}
@@ -1606,8 +1662,7 @@ snd_rme96_create(struct rme96 *rme96)
rme96->capture_periodsize = 0;
/* make sure playback/capture is stopped, if by some reason active */
- snd_rme96_playback_stop(rme96);
- snd_rme96_capture_stop(rme96);
+ snd_rme96_trigger(rme96, RME96_STOP_BOTH);
/* set default values in registers */
rme96->wcreg =
@@ -2319,6 +2374,87 @@ snd_rme96_create_switches(struct snd_card *card,
* Card initialisation
*/
+#ifdef CONFIG_PM
+
+static int
+snd_rme96_suspend(struct pci_dev *pci,
+ pm_message_t state)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct rme96 *rme96 = card->private_data;
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
+ snd_pcm_suspend(rme96->playback_substream);
+ snd_pcm_suspend(rme96->capture_substream);
+
+ /* save capture & playback pointers */
+ rme96->playback_pointer = readl(rme96->iobase + RME96_IO_GET_PLAY_POS)
+ & RME96_RCR_AUDIO_ADDR_MASK;
+ rme96->capture_pointer = readl(rme96->iobase + RME96_IO_GET_REC_POS)
+ & RME96_RCR_AUDIO_ADDR_MASK;
+
+ /* save playback and capture buffers */
+ memcpy_fromio(rme96->playback_suspend_buffer,
+ rme96->iobase + RME96_IO_PLAY_BUFFER, RME96_BUFFER_SIZE);
+ memcpy_fromio(rme96->capture_suspend_buffer,
+ rme96->iobase + RME96_IO_REC_BUFFER, RME96_BUFFER_SIZE);
+
+ /* disable the DAC */
+ rme96->areg &= ~RME96_AR_DAC_EN;
+ writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
+
+ pci_disable_device(pci);
+ pci_save_state(pci);
+
+ return 0;
+}
+
+static int
+snd_rme96_resume(struct pci_dev *pci)
+{
+ struct snd_card *card = pci_get_drvdata(pci);
+ struct rme96 *rme96 = card->private_data;
+
+ pci_restore_state(pci);
+ if (pci_enable_device(pci) < 0) {
+ printk(KERN_ERR "rme96: pci_enable_device failed, disabling device\n");
+ snd_card_disconnect(card);
+ return -EIO;
+ }
+
+ /* reset playback and record buffer pointers */
+ writel(0, rme96->iobase + RME96_IO_SET_PLAY_POS
+ + rme96->playback_pointer);
+ writel(0, rme96->iobase + RME96_IO_SET_REC_POS
+ + rme96->capture_pointer);
+
+ /* restore playback and capture buffers */
+ memcpy_toio(rme96->iobase + RME96_IO_PLAY_BUFFER,
+ rme96->playback_suspend_buffer, RME96_BUFFER_SIZE);
+ memcpy_toio(rme96->iobase + RME96_IO_REC_BUFFER,
+ rme96->capture_suspend_buffer, RME96_BUFFER_SIZE);
+
+ /* reset the ADC */
+ writel(rme96->areg | RME96_AR_PD2,
+ rme96->iobase + RME96_IO_ADDITIONAL_REG);
+ writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
+
+ /* reset and enable DAC, restore analog volume */
+ snd_rme96_reset_dac(rme96);
+ rme96->areg |= RME96_AR_DAC_EN;
+ writel(rme96->areg, rme96->iobase + RME96_IO_ADDITIONAL_REG);
+ if (RME96_HAS_ANALOG_OUT(rme96)) {
+ usleep_range(3000, 10000);
+ snd_rme96_apply_dac_volume(rme96);
+ }
+
+ snd_power_change_state(card, SNDRV_CTL_POWER_D0);
+
+ return 0;
+}
+
+#endif
+
static void snd_rme96_card_free(struct snd_card *card)
{
snd_rme96_free(card->private_data);
@@ -2355,6 +2491,23 @@ snd_rme96_probe(struct pci_dev *pci,
return err;
}
+#ifdef CONFIG_PM
+ rme96->playback_suspend_buffer = vmalloc(RME96_BUFFER_SIZE);
+ if (!rme96->playback_suspend_buffer) {
+ snd_printk(KERN_ERR
+ "Failed to allocate playback suspend buffer!\n");
+ snd_card_free(card);
+ return -ENOMEM;
+ }
+ rme96->capture_suspend_buffer = vmalloc(RME96_BUFFER_SIZE);
+ if (!rme96->capture_suspend_buffer) {
+ snd_printk(KERN_ERR
+ "Failed to allocate capture suspend buffer!\n");
+ snd_card_free(card);
+ return -ENOMEM;
+ }
+#endif
+
strcpy(card->driver, "Digi96");
switch (rme96->pci->device) {
case PCI_DEVICE_ID_RME_DIGI96:
@@ -2397,6 +2550,10 @@ static struct pci_driver rme96_driver = {
.id_table = snd_rme96_ids,
.probe = snd_rme96_probe,
.remove = snd_rme96_remove,
+#ifdef CONFIG_PM
+ .suspend = snd_rme96_suspend,
+ .resume = snd_rme96_resume,
+#endif
};
module_pci_driver(rme96_driver);
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index bd501931ee2..3cde55b753e 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -38,6 +38,97 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
+
+/* ************* Register Documentation *******************************************************
+ *
+ * Work in progress! Documentation is based on the code in this file.
+ *
+ * --------- HDSPM_controlRegister ---------
+ * :7654.3210:7654.3210:7654.3210:7654.3210: bit number per byte
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number
+ * :1098.7654:3210.9876:5432.1098:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ * : . : . : . : x . : HDSPM_AudioInterruptEnable \_ setting both bits
+ * : . : . : . : . x: HDSPM_Start / enables audio IO
+ * : . : . : . : x. : HDSPM_ClockModeMaster - 1: Master, 0: Slave
+ * : . : . : . : .210 : HDSPM_LatencyMask - 3 Bit value for latency
+ * : . : . : . : . : 0:64, 1:128, 2:256, 3:512,
+ * : . : . : . : . : 4:1024, 5:2048, 6:4096, 7:8192
+ * :x . : . : . x:xx . : HDSPM_FrequencyMask
+ * : . : . : . :10 . : HDSPM_Frequency1|HDSPM_Frequency0: 1=32K,2=44.1K,3=48K,0=??
+ * : . : . : . x: . : <MADI> HDSPM_DoubleSpeed
+ * :x . : . : . : . : <MADI> HDSPM_QuadSpeed
+ * : . 3 : . 10: 2 . : . : HDSPM_SyncRefMask :
+ * : . : . x: . : . : HDSPM_SyncRef0
+ * : . : . x : . : . : HDSPM_SyncRef1
+ * : . : . : x . : . : <AES32> HDSPM_SyncRef2
+ * : . x : . : . : . : <AES32> HDSPM_SyncRef3
+ * : . : . 10: . : . : <MADI> sync ref: 0:WC, 1:Madi, 2:TCO, 3:SyncIn
+ * : . 3 : . 10: 2 . : . : <AES32> 0:WC, 1:AES1 ... 8:AES8, 9: TCO, 10:SyncIn?
+ * : . x : . : . : . : <MADIe> HDSPe_FLOAT_FORMAT
+ * : . : . : x . : . : <MADI> HDSPM_InputSelect0 : 0=optical,1=coax
+ * : . : . :x . : . : <MADI> HDSPM_InputSelect1
+ * : . : .x : . : . : <MADI> HDSPM_clr_tms
+ * : . : . : . x : . : <MADI> HDSPM_TX_64ch
+ * : . : . : . x : . : <AES32> HDSPM_Emphasis
+ * : . : . : .x : . : <MADI> HDSPM_AutoInp
+ * : . : . x : . : . : <MADI> HDSPM_SMUX
+ * : . : .x : . : . : <MADI> HDSPM_clr_tms
+ * : . : x. : . : . : <MADI> HDSPM_taxi_reset
+ * : . x: . : . : . : <MADI> HDSPM_LineOut
+ * : . x: . : . : . : <AES32> ??????????????????
+ * : . : x. : . : . : <AES32> HDSPM_WCK48
+ * : . : . : .x : . : <AES32> HDSPM_Dolby
+ * : . : x . : . : . : HDSPM_Midi0InterruptEnable
+ * : . :x . : . : . : HDSPM_Midi1InterruptEnable
+ * : . : x . : . : . : HDSPM_Midi2InterruptEnable
+ * : . x : . : . : . : <MADI> HDSPM_Midi3InterruptEnable
+ * : . x : . : . : . : <AES32> HDSPM_DS_DoubleWire
+ * : .x : . : . : . : <AES32> HDSPM_QS_DoubleWire
+ * : x. : . : . : . : <AES32> HDSPM_QS_QuadWire
+ * : . : . : . x : . : <AES32> HDSPM_Professional
+ * : x . : . : . : . : HDSPM_wclk_sel
+ * : . : . : . : . :
+ * :7654.3210:7654.3210:7654.3210:7654.3210: bit number per byte
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number
+ * :1098.7654:3210.9876:5432.1098:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421:hex digit
+ *
+ *
+ *
+ * AIO / RayDAT only
+ *
+ * ------------ HDSPM_WR_SETTINGS ----------
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number per byte
+ * :1098.7654:3210.9876:5432.1098:7654.3210:
+ * :||||.||||:||||.||||:||||.||||:||||.||||: bit number
+ * :7654.3210:7654.3210:7654.3210:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ * : . : . : . : . x: HDSPM_c0Master 1: Master, 0: Slave
+ * : . : . : . : . x : HDSPM_c0_SyncRef0
+ * : . : . : . : . x : HDSPM_c0_SyncRef1
+ * : . : . : . : .x : HDSPM_c0_SyncRef2
+ * : . : . : . : x. : HDSPM_c0_SyncRef3
+ * : . : . : . : 3.210 : HDSPM_c0_SyncRefMask:
+ * : . : . : . : . : RayDat: 0:WC, 1:AES, 2:SPDIF, 3..6: ADAT1..4,
+ * : . : . : . : . : 9:TCO, 10:SyncIn
+ * : . : . : . : . : AIO: 0:WC, 1:AES, 2: SPDIF, 3: ATAT,
+ * : . : . : . : . : 9:TCO, 10:SyncIn
+ * : . : . : . : . :
+ * : . : . : . : . :
+ * :3322.2222:2222.1111:1111.1100:0000.0000: bit number per byte
+ * :1098.7654:3210.9876:5432.1098:7654.3210:
+ * :||||.||||:||||.||||:||||.||||:||||.||||: bit number
+ * :7654.3210:7654.3210:7654.3210:7654.3210: 0..31
+ * :||||.||||:||||.||||:||||.||||:||||.||||:
+ * :8421.8421:8421.8421:8421.8421:8421.8421: hex digit
+ *
+ */
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -95,7 +186,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_controlRegister 64
#define HDSPM_interruptConfirmation 96
#define HDSPM_control2Reg 256 /* not in specs ???????? */
-#define HDSPM_freqReg 256 /* for AES32 */
+#define HDSPM_freqReg 256 /* for setting arbitrary clock values (DDS feature) */
#define HDSPM_midiDataOut0 352 /* just believe in old code */
#define HDSPM_midiDataOut1 356
#define HDSPM_eeprom_wr 384 /* for AES32 */
@@ -258,6 +349,25 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_wclk_sel (1<<30)
+/* additional control register bits for AIO*/
+#define HDSPM_c0_Wck48 0x20 /* also RayDAT */
+#define HDSPM_c0_Input0 0x1000
+#define HDSPM_c0_Input1 0x2000
+#define HDSPM_c0_Spdif_Opt 0x4000
+#define HDSPM_c0_Pro 0x8000
+#define HDSPM_c0_clr_tms 0x10000
+#define HDSPM_c0_AEB1 0x20000
+#define HDSPM_c0_AEB2 0x40000
+#define HDSPM_c0_LineOut 0x80000
+#define HDSPM_c0_AD_GAIN0 0x100000
+#define HDSPM_c0_AD_GAIN1 0x200000
+#define HDSPM_c0_DA_GAIN0 0x400000
+#define HDSPM_c0_DA_GAIN1 0x800000
+#define HDSPM_c0_PH_GAIN0 0x1000000
+#define HDSPM_c0_PH_GAIN1 0x2000000
+#define HDSPM_c0_Sym6db 0x4000000
+
+
/* --- bit helper defines */
#define HDSPM_LatencyMask (HDSPM_Latency0|HDSPM_Latency1|HDSPM_Latency2)
#define HDSPM_FrequencyMask (HDSPM_Frequency0|HDSPM_Frequency1|\
@@ -341,11 +451,11 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_madiLock (1<<3) /* MADI Locked =1, no=0 */
#define HDSPM_madiSync (1<<18) /* MADI is in sync */
-#define HDSPM_tcoLock 0x00000020 /* Optional TCO locked status FOR HDSPe MADI! */
-#define HDSPM_tcoSync 0x10000000 /* Optional TCO sync status */
+#define HDSPM_tcoLockMadi 0x00000020 /* Optional TCO locked status for HDSPe MADI*/
+#define HDSPM_tcoSync 0x10000000 /* Optional TCO sync status for HDSPe MADI and AES32!*/
-#define HDSPM_syncInLock 0x00010000 /* Sync In lock status FOR HDSPe MADI! */
-#define HDSPM_syncInSync 0x00020000 /* Sync In sync status FOR HDSPe MADI! */
+#define HDSPM_syncInLock 0x00010000 /* Sync In lock status for HDSPe MADI! */
+#define HDSPM_syncInSync 0x00020000 /* Sync In sync status for HDSPe MADI! */
#define HDSPM_BufferPositionMask 0x000FFC0 /* Bit 6..15 : h/w buffer pointer */
/* since 64byte accurate, last 6 bits are not used */
@@ -363,7 +473,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
* Interrupt
*/
#define HDSPM_tco_detect 0x08000000
-#define HDSPM_tco_lock 0x20000000
+#define HDSPM_tcoLockAes 0x20000000 /* Optional TCO locked status for HDSPe AES */
#define HDSPM_s2_tco_detect 0x00000040
#define HDSPM_s2_AEBO_D 0x00000080
@@ -461,7 +571,9 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
#define HDSPM_AES32_AUTOSYNC_FROM_AES6 6
#define HDSPM_AES32_AUTOSYNC_FROM_AES7 7
#define HDSPM_AES32_AUTOSYNC_FROM_AES8 8
-#define HDSPM_AES32_AUTOSYNC_FROM_NONE 9
+#define HDSPM_AES32_AUTOSYNC_FROM_TCO 9
+#define HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN 10
+#define HDSPM_AES32_AUTOSYNC_FROM_NONE 11
/* status2 */
/* HDSPM_LockAES_bit is given by HDSPM_LockAES >> (AES# - 1) */
@@ -537,36 +649,39 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
/* names for speed modes */
static char *hdspm_speed_names[] = { "single", "double", "quad" };
-static char *texts_autosync_aes_tco[] = { "Word Clock",
+static const char *const texts_autosync_aes_tco[] = { "Word Clock",
"AES1", "AES2", "AES3", "AES4",
"AES5", "AES6", "AES7", "AES8",
- "TCO" };
-static char *texts_autosync_aes[] = { "Word Clock",
+ "TCO", "Sync In"
+};
+static const char *const texts_autosync_aes[] = { "Word Clock",
"AES1", "AES2", "AES3", "AES4",
- "AES5", "AES6", "AES7", "AES8" };
-static char *texts_autosync_madi_tco[] = { "Word Clock",
+ "AES5", "AES6", "AES7", "AES8",
+ "Sync In"
+};
+static const char *const texts_autosync_madi_tco[] = { "Word Clock",
"MADI", "TCO", "Sync In" };
-static char *texts_autosync_madi[] = { "Word Clock",
+static const char *const texts_autosync_madi[] = { "Word Clock",
"MADI", "Sync In" };
-static char *texts_autosync_raydat_tco[] = {
+static const char *const texts_autosync_raydat_tco[] = {
"Word Clock",
"ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
"AES", "SPDIF", "TCO", "Sync In"
};
-static char *texts_autosync_raydat[] = {
+static const char *const texts_autosync_raydat[] = {
"Word Clock",
"ADAT 1", "ADAT 2", "ADAT 3", "ADAT 4",
"AES", "SPDIF", "Sync In"
};
-static char *texts_autosync_aio_tco[] = {
+static const char *const texts_autosync_aio_tco[] = {
"Word Clock",
"ADAT", "AES", "SPDIF", "TCO", "Sync In"
};
-static char *texts_autosync_aio[] = { "Word Clock",
+static const char *const texts_autosync_aio[] = { "Word Clock",
"ADAT", "AES", "SPDIF", "Sync In" };
-static char *texts_freq[] = {
+static const char *const texts_freq[] = {
"No Lock",
"32 kHz",
"44.1 kHz",
@@ -629,7 +744,8 @@ static char *texts_ports_aio_in_ss[] = {
"AES.L", "AES.R",
"SPDIF.L", "SPDIF.R",
"ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
- "ADAT.7", "ADAT.8"
+ "ADAT.7", "ADAT.8",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aio_out_ss[] = {
@@ -638,14 +754,16 @@ static char *texts_ports_aio_out_ss[] = {
"SPDIF.L", "SPDIF.R",
"ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4", "ADAT.5", "ADAT.6",
"ADAT.7", "ADAT.8",
- "Phone.L", "Phone.R"
+ "Phone.L", "Phone.R",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aio_in_ds[] = {
"Analogue.L", "Analogue.R",
"AES.L", "AES.R",
"SPDIF.L", "SPDIF.R",
- "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aio_out_ds[] = {
@@ -653,14 +771,16 @@ static char *texts_ports_aio_out_ds[] = {
"AES.L", "AES.R",
"SPDIF.L", "SPDIF.R",
"ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
- "Phone.L", "Phone.R"
+ "Phone.L", "Phone.R",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aio_in_qs[] = {
"Analogue.L", "Analogue.R",
"AES.L", "AES.R",
"SPDIF.L", "SPDIF.R",
- "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4"
+ "ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aio_out_qs[] = {
@@ -668,7 +788,8 @@ static char *texts_ports_aio_out_qs[] = {
"AES.L", "AES.R",
"SPDIF.L", "SPDIF.R",
"ADAT.1", "ADAT.2", "ADAT.3", "ADAT.4",
- "Phone.L", "Phone.R"
+ "Phone.L", "Phone.R",
+ "AEB.1", "AEB.2", "AEB.3", "AEB.4"
};
static char *texts_ports_aes32[] = {
@@ -745,8 +866,8 @@ static char channel_map_aio_in_ss[HDSPM_MAX_CHANNELS] = {
8, 9, /* aes in, */
10, 11, /* spdif in */
12, 13, 14, 15, 16, 17, 18, 19, /* ADAT in */
- -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
+ -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -760,7 +881,8 @@ static char channel_map_aio_out_ss[HDSPM_MAX_CHANNELS] = {
10, 11, /* spdif out */
12, 13, 14, 15, 16, 17, 18, 19, /* ADAT out */
6, 7, /* phone out */
- -1, -1, -1, -1, -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
+ -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -773,7 +895,8 @@ static char channel_map_aio_in_ds[HDSPM_MAX_CHANNELS] = {
8, 9, /* aes in */
10, 11, /* spdif in */
12, 14, 16, 18, /* adat in */
- -1, -1, -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
+ -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -788,7 +911,7 @@ static char channel_map_aio_out_ds[HDSPM_MAX_CHANNELS] = {
10, 11, /* spdif out */
12, 14, 16, 18, /* adat out */
6, 7, /* phone out */
- -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -802,7 +925,8 @@ static char channel_map_aio_in_qs[HDSPM_MAX_CHANNELS] = {
8, 9, /* aes in */
10, 11, /* spdif in */
12, 16, /* adat in */
- -1, -1, -1, -1, -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
+ -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -817,7 +941,8 @@ static char channel_map_aio_out_qs[HDSPM_MAX_CHANNELS] = {
10, 11, /* spdif out */
12, 16, /* adat out */
6, 7, /* phone out */
- -1, -1, -1, -1, -1, -1,
+ 2, 3, 4, 5, /* AEB */
+ -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1,
@@ -856,11 +981,11 @@ struct hdspm_midi {
};
struct hdspm_tco {
- int input;
- int framerate;
- int wordclock;
- int samplerate;
- int pull;
+ int input; /* 0: LTC, 1:Video, 2: WC*/
+ int framerate; /* 0=24, 1=25, 2=29.97, 3=29.97d, 4=30, 5=30d */
+ int wordclock; /* 0=1:1, 1=44.1->48, 2=48->44.1 */
+ int samplerate; /* 0=44.1, 1=48, 2= freq from app */
+ int pull; /* 0=0, 1=+0.1%, 2=-0.1%, 3=+4%, 4=-4%*/
int term; /* 0 = off, 1 = on */
};
@@ -879,7 +1004,7 @@ struct hdspm {
u32 control_register; /* cached value */
u32 control2_register; /* cached value */
- u32 settings_register;
+ u32 settings_register; /* cached value for AIO / RayDat (sync reference, master/slave) */
struct hdspm_midi midi[4];
struct tasklet_struct midi_tasklet;
@@ -941,7 +1066,7 @@ struct hdspm {
struct hdspm_tco *tco; /* NULL if no TCO detected */
- char **texts_autosync;
+ const char *const *texts_autosync;
int texts_autosync_items;
cycles_t last_interrupt;
@@ -976,12 +1101,24 @@ static inline void snd_hdspm_initialize_midi_flush(struct hdspm *hdspm);
static inline int hdspm_get_pll_freq(struct hdspm *hdspm);
static int hdspm_update_simple_mixer_controls(struct hdspm *hdspm);
static int hdspm_autosync_ref(struct hdspm *hdspm);
+static int hdspm_set_toggle_setting(struct hdspm *hdspm, u32 regmask, int out);
static int snd_hdspm_set_defaults(struct hdspm *hdspm);
static int hdspm_system_clock_mode(struct hdspm *hdspm);
static void hdspm_set_sgbuf(struct hdspm *hdspm,
struct snd_pcm_substream *substream,
unsigned int reg, int channels);
+static int hdspm_aes_sync_check(struct hdspm *hdspm, int idx);
+static int hdspm_wc_sync_check(struct hdspm *hdspm);
+static int hdspm_tco_sync_check(struct hdspm *hdspm);
+static int hdspm_sync_in_sync_check(struct hdspm *hdspm);
+
+static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index);
+static int hdspm_get_tco_sample_rate(struct hdspm *hdspm);
+static int hdspm_get_wc_sample_rate(struct hdspm *hdspm);
+
+
+
static inline int HDSPM_bit2freq(int n)
{
static const int bit2freq_tab[] = {
@@ -992,6 +1129,12 @@ static inline int HDSPM_bit2freq(int n)
return bit2freq_tab[n];
}
+static bool hdspm_is_raydat_or_aio(struct hdspm *hdspm)
+{
+ return ((AIO == hdspm->io_type) || (RayDAT == hdspm->io_type));
+}
+
+
/* Write/read to/from HDSPM with Adresses in Bytes
not words but only 32Bit writes are allowed */
@@ -1107,14 +1250,11 @@ static int hdspm_rate_multiplier(struct hdspm *hdspm, int rate)
else if (hdspm->control_register &
HDSPM_DoubleSpeed)
return rate * 2;
- };
+ }
return rate;
}
-static int hdspm_tco_sync_check(struct hdspm *hdspm);
-static int hdspm_sync_in_sync_check(struct hdspm *hdspm);
-
-/* check for external sample rate */
+/* check for external sample rate, returns the sample rate in Hz*/
static int hdspm_external_sample_rate(struct hdspm *hdspm)
{
unsigned int status, status2, timecode;
@@ -1127,17 +1267,36 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
syncref = hdspm_autosync_ref(hdspm);
+ switch (syncref) {
+ case HDSPM_AES32_AUTOSYNC_FROM_WORD:
+ /* Check WC sync and get sample rate */
+ if (hdspm_wc_sync_check(hdspm))
+ return HDSPM_bit2freq(hdspm_get_wc_sample_rate(hdspm));
+ break;
- if (syncref == HDSPM_AES32_AUTOSYNC_FROM_WORD &&
- status & HDSPM_AES32_wcLock)
- return HDSPM_bit2freq((status >> HDSPM_AES32_wcFreq_bit) & 0xF);
+ case HDSPM_AES32_AUTOSYNC_FROM_AES1:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES2:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES3:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES4:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES5:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES6:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES7:
+ case HDSPM_AES32_AUTOSYNC_FROM_AES8:
+ /* Check AES sync and get sample rate */
+ if (hdspm_aes_sync_check(hdspm, syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1))
+ return HDSPM_bit2freq(hdspm_get_aes_sample_rate(hdspm,
+ syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1));
+ break;
- if (syncref >= HDSPM_AES32_AUTOSYNC_FROM_AES1 &&
- syncref <= HDSPM_AES32_AUTOSYNC_FROM_AES8 &&
- status2 & (HDSPM_LockAES >>
- (syncref - HDSPM_AES32_AUTOSYNC_FROM_AES1)))
- return HDSPM_bit2freq((timecode >> (4*(syncref-HDSPM_AES32_AUTOSYNC_FROM_AES1))) & 0xF);
- return 0;
+
+ case HDSPM_AES32_AUTOSYNC_FROM_TCO:
+ /* Check TCO sync and get sample rate */
+ if (hdspm_tco_sync_check(hdspm))
+ return HDSPM_bit2freq(hdspm_get_tco_sample_rate(hdspm));
+ break;
+ default:
+ return 0;
+ } /* end switch(syncref) */
break;
case MADIface:
@@ -2129,6 +2288,9 @@ static int hdspm_get_wc_sample_rate(struct hdspm *hdspm)
status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
return (status >> 16) & 0xF;
break;
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ return (status >> HDSPM_AES32_wcFreq_bit) & 0xF;
default:
break;
}
@@ -2152,6 +2314,9 @@ static int hdspm_get_tco_sample_rate(struct hdspm *hdspm)
status = hdspm_read(hdspm, HDSPM_RD_STATUS_1);
return (status >> 20) & 0xF;
break;
+ case AES32:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ return (status >> 1) & 0xF;
default:
break;
}
@@ -2183,6 +2348,23 @@ static int hdspm_get_sync_in_sample_rate(struct hdspm *hdspm)
return 0;
}
+/**
+ * Returns the AES sample rate class for the given card.
+ **/
+static int hdspm_get_aes_sample_rate(struct hdspm *hdspm, int index)
+{
+ int timecode;
+
+ switch (hdspm->io_type) {
+ case AES32:
+ timecode = hdspm_read(hdspm, HDSPM_timecodeRegister);
+ return (timecode >> (4*index)) & 0xF;
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
/**
* Returns the sample rate class for input source <idx> for
@@ -2196,15 +2378,23 @@ static int hdspm_get_s1_sample_rate(struct hdspm *hdspm, unsigned int idx)
}
#define ENUMERATED_CTL_INFO(info, texts) \
-{ \
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; \
- uinfo->count = 1; \
- uinfo->value.enumerated.items = ARRAY_SIZE(texts); \
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items) \
- uinfo->value.enumerated.item = uinfo->value.enumerated.items - 1; \
- strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); \
-}
+ snd_ctl_enum_info(info, 1, ARRAY_SIZE(texts), texts)
+
+/* Helper function to query the external sample rate and return the
+ * corresponding enum to be returned to userspace.
+ */
+static int hdspm_external_rate_to_enum(struct hdspm *hdspm)
+{
+ int rate = hdspm_external_sample_rate(hdspm);
+ int i, selected_rate = 0;
+ for (i = 1; i < 10; i++)
+ if (HDSPM_bit2freq(i) == rate) {
+ selected_rate = i;
+ break;
+ }
+ return selected_rate;
+}
#define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
@@ -2270,7 +2460,7 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
default:
ucontrol->value.enumerated.item[0] =
hdspm_get_s1_sample_rate(hdspm,
- ucontrol->id.index-1);
+ kcontrol->private_value-1);
}
break;
@@ -2289,28 +2479,24 @@ static int snd_hdspm_get_autosync_sample_rate(struct snd_kcontrol *kcontrol,
ucontrol->value.enumerated.item[0] =
hdspm_get_sync_in_sample_rate(hdspm);
break;
+ case 11: /* External Rate */
+ ucontrol->value.enumerated.item[0] =
+ hdspm_external_rate_to_enum(hdspm);
+ break;
default: /* AES1 to AES8 */
ucontrol->value.enumerated.item[0] =
- hdspm_get_s1_sample_rate(hdspm,
- kcontrol->private_value-1);
+ hdspm_get_aes_sample_rate(hdspm,
+ kcontrol->private_value -
+ HDSPM_AES32_AUTOSYNC_FROM_AES1);
break;
}
break;
case MADI:
case MADIface:
- {
- int rate = hdspm_external_sample_rate(hdspm);
- int i, selected_rate = 0;
- for (i = 1; i < 10; i++)
- if (HDSPM_bit2freq(i) == rate) {
- selected_rate = i;
- break;
- }
- ucontrol->value.enumerated.item[0] = selected_rate;
- }
+ ucontrol->value.enumerated.item[0] =
+ hdspm_external_rate_to_enum(hdspm);
break;
-
default:
break;
}
@@ -2359,33 +2545,17 @@ static int hdspm_system_clock_mode(struct hdspm *hdspm)
**/
static void hdspm_set_system_clock_mode(struct hdspm *hdspm, int mode)
{
- switch (hdspm->io_type) {
- case AIO:
- case RayDAT:
- if (0 == mode)
- hdspm->settings_register |= HDSPM_c0Master;
- else
- hdspm->settings_register &= ~HDSPM_c0Master;
-
- hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
- break;
-
- default:
- if (0 == mode)
- hdspm->control_register |= HDSPM_ClockModeMaster;
- else
- hdspm->control_register &= ~HDSPM_ClockModeMaster;
-
- hdspm_write(hdspm, HDSPM_controlRegister,
- hdspm->control_register);
- }
+ hdspm_set_toggle_setting(hdspm,
+ (hdspm_is_raydat_or_aio(hdspm)) ?
+ HDSPM_c0Master : HDSPM_ClockModeMaster,
+ (0 == mode));
}
static int snd_hdspm_info_system_clock_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Master", "AutoSync" };
+ static const char *const texts[] = { "Master", "AutoSync" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -2809,16 +2979,7 @@ static int snd_hdspm_info_pref_sync_ref(struct snd_kcontrol *kcontrol,
{
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = hdspm->texts_autosync_items;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strcpy(uinfo->value.enumerated.name,
- hdspm->texts_autosync[uinfo->value.enumerated.item]);
+ snd_ctl_enum_info(uinfo, 1, hdspm->texts_autosync_items, hdspm->texts_autosync);
return 0;
}
@@ -2873,19 +3034,20 @@ static int snd_hdspm_put_pref_sync_ref(struct snd_kcontrol *kcontrol,
static int hdspm_autosync_ref(struct hdspm *hdspm)
{
+ /* This looks at the autosync selected sync reference */
if (AES32 == hdspm->io_type) {
+
unsigned int status = hdspm_read(hdspm, HDSPM_statusRegister);
- unsigned int syncref =
- (status >> HDSPM_AES32_syncref_bit) & 0xF;
- if (syncref == 0)
- return HDSPM_AES32_AUTOSYNC_FROM_WORD;
- if (syncref <= 8)
+ unsigned int syncref = (status >> HDSPM_AES32_syncref_bit) & 0xF;
+ if ((syncref >= HDSPM_AES32_AUTOSYNC_FROM_WORD) &&
+ (syncref <= HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN)) {
return syncref;
+ }
return HDSPM_AES32_AUTOSYNC_FROM_NONE;
+
} else if (MADI == hdspm->io_type) {
- /* This looks at the autosync selected sync reference */
- unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ unsigned int status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
switch (status2 & HDSPM_SelSyncRefMask) {
case HDSPM_SelSyncRef_WORD:
return HDSPM_AUTOSYNC_FROM_WORD;
@@ -2898,7 +3060,7 @@ static int hdspm_autosync_ref(struct hdspm *hdspm)
case HDSPM_SelSyncRef_NVALID:
return HDSPM_AUTOSYNC_FROM_NONE;
default:
- return 0;
+ return HDSPM_AUTOSYNC_FROM_NONE;
}
}
@@ -2912,31 +3074,15 @@ static int snd_hdspm_info_autosync_ref(struct snd_kcontrol *kcontrol,
struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
if (AES32 == hdspm->io_type) {
- static char *texts[] = { "WordClock", "AES1", "AES2", "AES3",
- "AES4", "AES5", "AES6", "AES7", "AES8", "None"};
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 10;
- if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
+ static const char *const texts[] = { "WordClock", "AES1", "AES2", "AES3",
+ "AES4", "AES5", "AES6", "AES7", "AES8", "TCO", "Sync In", "None"};
+
+ ENUMERATED_CTL_INFO(uinfo, texts);
} else if (MADI == hdspm->io_type) {
- static char *texts[] = {"Word Clock", "MADI", "TCO",
+ static const char *const texts[] = {"Word Clock", "MADI", "TCO",
"Sync In", "None" };
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = 5;
- if (uinfo->value.enumerated.item >=
- uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
- strcpy(uinfo->value.enumerated.name,
- texts[uinfo->value.enumerated.item]);
+ ENUMERATED_CTL_INFO(uinfo, texts);
}
return 0;
}
@@ -2964,7 +3110,7 @@ static int snd_hdspm_get_autosync_ref(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_video_input_format(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"No video", "NTSC", "PAL"};
+ static const char *const texts[] = {"No video", "NTSC", "PAL"};
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3010,7 +3156,7 @@ static int snd_hdspm_get_tco_video_input_format(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_ltc_frames(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = {"No lock", "24 fps", "25 fps", "29.97 fps",
+ static const char *const texts[] = {"No lock", "24 fps", "25 fps", "29.97 fps",
"30 fps"};
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
@@ -3027,19 +3173,19 @@ static int hdspm_tco_ltc_frames(struct hdspm *hdspm)
HDSPM_TCO1_LTC_Format_MSB)) {
case 0:
/* 24 fps */
- ret = 1;
+ ret = fps_24;
break;
case HDSPM_TCO1_LTC_Format_LSB:
/* 25 fps */
- ret = 2;
+ ret = fps_25;
break;
case HDSPM_TCO1_LTC_Format_MSB:
- /* 25 fps */
- ret = 3;
+ /* 29.97 fps */
+ ret = fps_2997;
break;
default:
/* 30 fps */
- ret = 4;
+ ret = fps_30;
break;
}
}
@@ -3067,16 +3213,35 @@ static int snd_hdspm_get_tco_ltc_frames(struct snd_kcontrol *kcontrol,
static int hdspm_toggle_setting(struct hdspm *hdspm, u32 regmask)
{
- return (hdspm->control_register & regmask) ? 1 : 0;
+ u32 reg;
+
+ if (hdspm_is_raydat_or_aio(hdspm))
+ reg = hdspm->settings_register;
+ else
+ reg = hdspm->control_register;
+
+ return (reg & regmask) ? 1 : 0;
}
static int hdspm_set_toggle_setting(struct hdspm *hdspm, u32 regmask, int out)
{
+ u32 *reg;
+ u32 target_reg;
+
+ if (hdspm_is_raydat_or_aio(hdspm)) {
+ reg = &(hdspm->settings_register);
+ target_reg = HDSPM_WR_SETTINGS;
+ } else {
+ reg = &(hdspm->control_register);
+ target_reg = HDSPM_controlRegister;
+ }
+
if (out)
- hdspm->control_register |= regmask;
+ *reg |= regmask;
else
- hdspm->control_register &= ~regmask;
- hdspm_write(hdspm, HDSPM_controlRegister, hdspm->control_register);
+ *reg &= ~regmask;
+
+ hdspm_write(hdspm, target_reg, *reg);
return 0;
}
@@ -3141,7 +3306,7 @@ static int hdspm_set_input_select(struct hdspm * hdspm, int out)
static int snd_hdspm_info_input_select(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "optical", "coaxial" };
+ static const char *const texts[] = { "optical", "coaxial" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3203,7 +3368,7 @@ static int hdspm_set_ds_wire(struct hdspm * hdspm, int ds)
static int snd_hdspm_info_ds_wire(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Single", "Double" };
+ static const char *const texts[] = { "Single", "Double" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3276,7 +3441,7 @@ static int hdspm_set_qs_wire(struct hdspm * hdspm, int mode)
static int snd_hdspm_info_qs_wire(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Single", "Double", "Quad" };
+ static const char *const texts[] = { "Single", "Double", "Quad" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3313,6 +3478,84 @@ static int snd_hdspm_put_qs_wire(struct snd_kcontrol *kcontrol,
return change;
}
+#define HDSPM_CONTROL_TRISTATE(xname, xindex) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+ .name = xname, \
+ .private_value = xindex, \
+ .info = snd_hdspm_info_tristate, \
+ .get = snd_hdspm_get_tristate, \
+ .put = snd_hdspm_put_tristate \
+}
+
+static int hdspm_tristate(struct hdspm *hdspm, u32 regmask)
+{
+ u32 reg = hdspm->settings_register & (regmask * 3);
+ return reg / regmask;
+}
+
+static int hdspm_set_tristate(struct hdspm *hdspm, int mode, u32 regmask)
+{
+ hdspm->settings_register &= ~(regmask * 3);
+ hdspm->settings_register |= (regmask * mode);
+ hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
+
+ return 0;
+}
+
+static int snd_hdspm_info_tristate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ u32 regmask = kcontrol->private_value;
+
+ static const char *const texts_spdif[] = { "Optical", "Coaxial", "Internal" };
+ static const char *const texts_levels[] = { "Hi Gain", "+4 dBu", "-10 dBV" };
+
+ switch (regmask) {
+ case HDSPM_c0_Input0:
+ ENUMERATED_CTL_INFO(uinfo, texts_spdif);
+ break;
+ default:
+ ENUMERATED_CTL_INFO(uinfo, texts_levels);
+ break;
+ }
+ return 0;
+}
+
+static int snd_hdspm_get_tristate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ u32 regmask = kcontrol->private_value;
+
+ spin_lock_irq(&hdspm->lock);
+ ucontrol->value.enumerated.item[0] = hdspm_tristate(hdspm, regmask);
+ spin_unlock_irq(&hdspm->lock);
+ return 0;
+}
+
+static int snd_hdspm_put_tristate(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct hdspm *hdspm = snd_kcontrol_chip(kcontrol);
+ u32 regmask = kcontrol->private_value;
+ int change;
+ int val;
+
+ if (!snd_hdspm_use_is_exclusive(hdspm))
+ return -EBUSY;
+ val = ucontrol->value.integer.value[0];
+ if (val < 0)
+ val = 0;
+ if (val > 2)
+ val = 2;
+
+ spin_lock_irq(&hdspm->lock);
+ change = val != hdspm_tristate(hdspm, regmask);
+ hdspm_set_tristate(hdspm, val, regmask);
+ spin_unlock_irq(&hdspm->lock);
+ return change;
+}
+
#define HDSPM_MADI_SPEEDMODE(xname, xindex) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
.name = xname, \
@@ -3352,7 +3595,7 @@ static int hdspm_set_madi_speedmode(struct hdspm *hdspm, int mode)
static int snd_hdspm_info_madi_speedmode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "Single", "Double", "Quad" };
+ static const char *const texts[] = { "Single", "Double", "Quad" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3587,7 +3830,7 @@ static int snd_hdspm_put_playback_mixer(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_sync_check(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "No Lock", "Lock", "Sync", "N/A" };
+ static const char *const texts[] = { "No Lock", "Lock", "Sync", "N/A" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3595,7 +3838,7 @@ static int snd_hdspm_info_sync_check(struct snd_kcontrol *kcontrol,
static int snd_hdspm_tco_info_lock_check(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "No Lock", "Lock" };
+ static const char *const texts[] = { "No Lock", "Lock" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -3745,9 +3988,18 @@ static int hdspm_tco_sync_check(struct hdspm *hdspm)
if (hdspm->tco) {
switch (hdspm->io_type) {
case MADI:
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ if (status & HDSPM_tcoLockMadi) {
+ if (status & HDSPM_tcoSync)
+ return 2;
+ else
+ return 1;
+ }
+ return 0;
+ break;
case AES32:
status = hdspm_read(hdspm, HDSPM_statusRegister);
- if (status & HDSPM_tcoLock) {
+ if (status & HDSPM_tcoLockAes) {
if (status & HDSPM_tcoSync)
return 2;
else
@@ -3807,7 +4059,8 @@ static int snd_hdspm_get_sync_check(struct snd_kcontrol *kcontrol,
case 5: /* SYNC IN */
val = hdspm_sync_in_sync_check(hdspm); break;
default:
- val = hdspm_s1_sync_check(hdspm, ucontrol->id.index-1);
+ val = hdspm_s1_sync_check(hdspm,
+ kcontrol->private_value-1);
}
break;
@@ -3975,7 +4228,8 @@ static void hdspm_tco_write(struct hdspm *hdspm)
static int snd_hdspm_info_tco_sample_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "44.1 kHz", "48 kHz" };
+ /* TODO freq from app could be supported here, see tco->samplerate */
+ static const char *const texts[] = { "44.1 kHz", "48 kHz" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -4021,7 +4275,8 @@ static int snd_hdspm_put_tco_sample_rate(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_pull(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "0", "+ 0.1 %", "- 0.1 %", "+ 4 %", "- 4 %" };
+ static const char *const texts[] = { "0", "+ 0.1 %", "- 0.1 %",
+ "+ 4 %", "- 4 %" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -4066,7 +4321,7 @@ static int snd_hdspm_put_tco_pull(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_wck_conversion(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "1:1", "44.1 -> 48", "48 -> 44.1" };
+ static const char *const texts[] = { "1:1", "44.1 -> 48", "48 -> 44.1" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -4112,7 +4367,7 @@ static int snd_hdspm_put_tco_wck_conversion(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_frame_rate(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "24 fps", "25 fps", "29.97fps",
+ static const char *const texts[] = { "24 fps", "25 fps", "29.97fps",
"29.97 dfps", "30 fps", "30 dfps" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
@@ -4159,7 +4414,7 @@ static int snd_hdspm_put_tco_frame_rate(struct snd_kcontrol *kcontrol,
static int snd_hdspm_info_tco_sync_source(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
- static char *texts[] = { "LTC", "Video", "WCK" };
+ static const char *const texts[] = { "LTC", "Video", "WCK" };
ENUMERATED_CTL_INFO(uinfo, texts);
return 0;
}
@@ -4284,7 +4539,6 @@ static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
HDSPM_INTERNAL_CLOCK("Internal Clock", 0),
HDSPM_SYSTEM_CLOCK_MODE("System Clock Mode", 0),
HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
- HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
HDSPM_SYNC_CHECK("WC SyncCheck", 0),
@@ -4298,7 +4552,16 @@ static struct snd_kcontrol_new snd_hdspm_controls_aio[] = {
HDSPM_AUTOSYNC_SAMPLE_RATE("SPDIF Frequency", 2),
HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT Frequency", 3),
HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 4),
- HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 5)
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 5),
+ HDSPM_CONTROL_TRISTATE("S/PDIF Input", HDSPM_c0_Input0),
+ HDSPM_TOGGLE_SETTING("S/PDIF Out Optical", HDSPM_c0_Spdif_Opt),
+ HDSPM_TOGGLE_SETTING("S/PDIF Out Professional", HDSPM_c0_Pro),
+ HDSPM_TOGGLE_SETTING("ADAT internal (AEB/TEB)", HDSPM_c0_AEB1),
+ HDSPM_TOGGLE_SETTING("XLR Breakout Cable", HDSPM_c0_Sym6db),
+ HDSPM_TOGGLE_SETTING("Single Speed WordClock Out", HDSPM_c0_Wck48),
+ HDSPM_CONTROL_TRISTATE("Input Level", HDSPM_c0_AD_GAIN0),
+ HDSPM_CONTROL_TRISTATE("Output Level", HDSPM_c0_DA_GAIN0),
+ HDSPM_CONTROL_TRISTATE("Phones Level", HDSPM_c0_PH_GAIN0)
/*
HDSPM_INPUT_SELECT("Input Select", 0),
@@ -4335,7 +4598,9 @@ static struct snd_kcontrol_new snd_hdspm_controls_raydat[] = {
HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT3 Frequency", 5),
HDSPM_AUTOSYNC_SAMPLE_RATE("ADAT4 Frequency", 6),
HDSPM_AUTOSYNC_SAMPLE_RATE("TCO Frequency", 7),
- HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 8)
+ HDSPM_AUTOSYNC_SAMPLE_RATE("SYNC IN Frequency", 8),
+ HDSPM_TOGGLE_SETTING("S/PDIF Out Professional", HDSPM_c0_Pro),
+ HDSPM_TOGGLE_SETTING("Single Speed WordClock Out", HDSPM_c0_Wck48)
};
static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
@@ -4345,7 +4610,7 @@ static struct snd_kcontrol_new snd_hdspm_controls_aes32[] = {
HDSPM_PREF_SYNC_REF("Preferred Sync Reference", 0),
HDSPM_AUTOSYNC_REF("AutoSync Reference", 0),
HDSPM_SYSTEM_SAMPLE_RATE("System Sample Rate", 0),
- HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 0),
+ HDSPM_AUTOSYNC_SAMPLE_RATE("External Rate", 11),
HDSPM_SYNC_CHECK("WC Sync Check", 0),
HDSPM_SYNC_CHECK("AES1 Sync Check", 1),
HDSPM_SYNC_CHECK("AES2 Sync Check", 2),
@@ -4501,77 +4766,22 @@ static int snd_hdspm_create_controls(struct snd_card *card,
------------------------------------------------------------*/
static void
-snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
- struct snd_info_buffer *buffer)
+snd_hdspm_proc_read_tco(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
{
struct hdspm *hdspm = entry->private_data;
- unsigned int status, status2, control, freq;
-
- char *pref_sync_ref;
- char *autosync_ref;
- char *system_clock_mode;
- char *insel;
- int x, x2;
-
- /* TCO stuff */
+ unsigned int status, control;
int a, ltc, frames, seconds, minutes, hours;
unsigned int period;
u64 freq_const = 0;
u32 rate;
+ snd_iprintf(buffer, "--- TCO ---\n");
+
status = hdspm_read(hdspm, HDSPM_statusRegister);
- status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
control = hdspm->control_register;
- freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
- snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
- hdspm->card_name, hdspm->card->number + 1,
- hdspm->firmware_rev,
- (status2 & HDSPM_version0) |
- (status2 & HDSPM_version1) | (status2 &
- HDSPM_version2));
- snd_iprintf(buffer, "HW Serial: 0x%06x%06x\n",
- (hdspm_read(hdspm, HDSPM_midiStatusIn1)>>8) & 0xFFFFFF,
- hdspm->serial);
-
- snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
- hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
-
- snd_iprintf(buffer, "--- System ---\n");
-
- snd_iprintf(buffer,
- "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
- status & HDSPM_audioIRQPending,
- (status & HDSPM_midi0IRQPending) ? 1 : 0,
- (status & HDSPM_midi1IRQPending) ? 1 : 0,
- hdspm->irq_count);
- snd_iprintf(buffer,
- "HW pointer: id = %d, rawptr = %d (%d->%d) "
- "estimated= %ld (bytes)\n",
- ((status & HDSPM_BufferID) ? 1 : 0),
- (status & HDSPM_BufferPositionMask),
- (status & HDSPM_BufferPositionMask) %
- (2 * (int)hdspm->period_bytes),
- ((status & HDSPM_BufferPositionMask) - 64) %
- (2 * (int)hdspm->period_bytes),
- (long) hdspm_hw_pointer(hdspm) * 4);
-
- snd_iprintf(buffer,
- "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
- hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
- snd_iprintf(buffer,
- "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
- hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
- hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
- snd_iprintf(buffer,
- "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
- "status2=0x%x\n",
- hdspm->control_register, hdspm->control2_register,
- status, status2);
if (status & HDSPM_tco_detect) {
snd_iprintf(buffer, "TCO module detected.\n");
a = hdspm_read(hdspm, HDSPM_RD_TCO+4);
@@ -4665,6 +4875,75 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
} else {
snd_iprintf(buffer, "No TCO module detected.\n");
}
+}
+
+static void
+snd_hdspm_proc_read_madi(struct snd_info_entry *entry,
+ struct snd_info_buffer *buffer)
+{
+ struct hdspm *hdspm = entry->private_data;
+ unsigned int status, status2, control, freq;
+
+ char *pref_sync_ref;
+ char *autosync_ref;
+ char *system_clock_mode;
+ char *insel;
+ int x, x2;
+
+ status = hdspm_read(hdspm, HDSPM_statusRegister);
+ status2 = hdspm_read(hdspm, HDSPM_statusRegister2);
+ control = hdspm->control_register;
+ freq = hdspm_read(hdspm, HDSPM_timecodeRegister);
+
+ snd_iprintf(buffer, "%s (Card #%d) Rev.%x Status2first3bits: %x\n",
+ hdspm->card_name, hdspm->card->number + 1,
+ hdspm->firmware_rev,
+ (status2 & HDSPM_version0) |
+ (status2 & HDSPM_version1) | (status2 &
+ HDSPM_version2));
+
+ snd_iprintf(buffer, "HW Serial: 0x%06x%06x\n",
+ (hdspm_read(hdspm, HDSPM_midiStatusIn1)>>8) & 0xFFFFFF,
+ hdspm->serial);
+
+ snd_iprintf(buffer, "IRQ: %d Registers bus: 0x%lx VM: 0x%lx\n",
+ hdspm->irq, hdspm->port, (unsigned long)hdspm->iobase);
+
+ snd_iprintf(buffer, "--- System ---\n");
+
+ snd_iprintf(buffer,
+ "IRQ Pending: Audio=%d, MIDI0=%d, MIDI1=%d, IRQcount=%d\n",
+ status & HDSPM_audioIRQPending,
+ (status & HDSPM_midi0IRQPending) ? 1 : 0,
+ (status & HDSPM_midi1IRQPending) ? 1 : 0,
+ hdspm->irq_count);
+ snd_iprintf(buffer,
+ "HW pointer: id = %d, rawptr = %d (%d->%d) "
+ "estimated= %ld (bytes)\n",
+ ((status & HDSPM_BufferID) ? 1 : 0),
+ (status & HDSPM_BufferPositionMask),
+ (status & HDSPM_BufferPositionMask) %
+ (2 * (int)hdspm->period_bytes),
+ ((status & HDSPM_BufferPositionMask) - 64) %
+ (2 * (int)hdspm->period_bytes),
+ (long) hdspm_hw_pointer(hdspm) * 4);
+
+ snd_iprintf(buffer,
+ "MIDI FIFO: Out1=0x%x, Out2=0x%x, In1=0x%x, In2=0x%x \n",
+ hdspm_read(hdspm, HDSPM_midiStatusOut0) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusOut1) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusIn0) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusIn1) & 0xFF);
+ snd_iprintf(buffer,
+ "MIDIoverMADI FIFO: In=0x%x, Out=0x%x \n",
+ hdspm_read(hdspm, HDSPM_midiStatusIn2) & 0xFF,
+ hdspm_read(hdspm, HDSPM_midiStatusOut2) & 0xFF);
+ snd_iprintf(buffer,
+ "Register: ctrl1=0x%x, ctrl2=0x%x, status1=0x%x, "
+ "status2=0x%x\n",
+ hdspm->control_register, hdspm->control2_register,
+ status, status2);
+
snd_iprintf(buffer, "--- Settings ---\n");
@@ -4768,6 +5047,9 @@ snd_hdspm_proc_read_madi(struct snd_info_entry * entry,
(status & HDSPM_RX_64ch) ? "64 channels" :
"56 channels");
+ /* call readout function for TCO specific status */
+ snd_hdspm_proc_read_tco(entry, buffer);
+
snd_iprintf(buffer, "\n");
}
@@ -4909,11 +5191,18 @@ snd_hdspm_proc_read_aes32(struct snd_info_entry * entry,
autosync_ref = "AES7"; break;
case HDSPM_AES32_AUTOSYNC_FROM_AES8:
autosync_ref = "AES8"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_TCO:
+ autosync_ref = "TCO"; break;
+ case HDSPM_AES32_AUTOSYNC_FROM_SYNC_IN:
+ autosync_ref = "Sync In"; break;
default:
autosync_ref = "---"; break;
}
snd_iprintf(buffer, "AutoSync ref = %s\n", autosync_ref);
+ /* call readout function for TCO specific status */
+ snd_hdspm_proc_read_tco(entry, buffer);
+
snd_iprintf(buffer, "\n");
}
@@ -5097,7 +5386,7 @@ static int snd_hdspm_set_defaults(struct hdspm * hdspm)
case AES32:
hdspm->control_register =
- HDSPM_ClockModeMaster | /* Master Cloack Mode on */
+ HDSPM_ClockModeMaster | /* Master Clock Mode on */
hdspm_encode_latency(7) | /* latency max=8192samples */
HDSPM_SyncRef0 | /* AES1 is syncclock */
HDSPM_LineOut | /* Analog output in */
@@ -5123,9 +5412,8 @@ static int snd_hdspm_set_defaults(struct hdspm * hdspm)
all_in_all_mixer(hdspm, 0 * UNITY_GAIN);
- if (hdspm->io_type == AIO || hdspm->io_type == RayDAT) {
+ if (hdspm_is_raydat_or_aio(hdspm))
hdspm_write(hdspm, HDSPM_WR_SETTINGS, hdspm->settings_register);
- }
/* set a default rate so that the channel map is set up. */
hdspm_set_rate(hdspm, 48000, 1);
@@ -5371,6 +5659,16 @@ static int snd_hdspm_hw_params(struct snd_pcm_substream *substream,
*/
+ /* For AES cards, the float format bit is the same as the
+ * preferred sync reference. Since we don't want to break
+ * sync settings, we have to skip the remaining part of this
+ * function.
+ */
+ if (hdspm->io_type == AES32) {
+ return 0;
+ }
+
+
/* Switch to native float format if requested */
if (SNDRV_PCM_FORMAT_FLOAT_LE == params_format(params)) {
if (!(hdspm->control_register & HDSPe_FLOAT_FORMAT))
@@ -6013,7 +6311,7 @@ static int snd_hdspm_hwdep_ioctl(struct snd_hwdep *hw, struct file *file,
ltc.format = fps_2997;
break;
default:
- ltc.format = 30;
+ ltc.format = fps_30;
break;
}
if (i & HDSPM_TCO1_set_drop_frame_flag) {
@@ -6479,10 +6777,6 @@ static int snd_hdspm_create(struct snd_card *card,
break;
case AIO:
- if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBI_D)) {
- snd_printk(KERN_INFO "HDSPM: AEB input board found, but not supported\n");
- }
-
hdspm->ss_in_channels = AIO_IN_SS_CHANNELS;
hdspm->ds_in_channels = AIO_IN_DS_CHANNELS;
hdspm->qs_in_channels = AIO_IN_QS_CHANNELS;
@@ -6490,6 +6784,20 @@ static int snd_hdspm_create(struct snd_card *card,
hdspm->ds_out_channels = AIO_OUT_DS_CHANNELS;
hdspm->qs_out_channels = AIO_OUT_QS_CHANNELS;
+ if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBI_D)) {
+ snd_printk(KERN_INFO "HDSPM: AEB input board found\n");
+ hdspm->ss_in_channels += 4;
+ hdspm->ds_in_channels += 4;
+ hdspm->qs_in_channels += 4;
+ }
+
+ if (0 == (hdspm_read(hdspm, HDSPM_statusRegister2) & HDSPM_s2_AEBO_D)) {
+ snd_printk(KERN_INFO "HDSPM: AEB output board found\n");
+ hdspm->ss_out_channels += 4;
+ hdspm->ds_out_channels += 4;
+ hdspm->qs_out_channels += 4;
+ }
+
hdspm->channel_map_out_ss = channel_map_aio_out_ss;
hdspm->channel_map_out_ds = channel_map_aio_out_ds;
hdspm->channel_map_out_qs = channel_map_aio_out_qs;
@@ -6558,6 +6866,7 @@ static int snd_hdspm_create(struct snd_card *card,
break;
case MADI:
+ case AES32:
if (hdspm_read(hdspm, HDSPM_statusRegister) & HDSPM_tco_detect) {
hdspm->midiPorts++;
hdspm->tco = kzalloc(sizeof(struct hdspm_tco),
@@ -6565,7 +6874,7 @@ static int snd_hdspm_create(struct snd_card *card,
if (NULL != hdspm->tco) {
hdspm_tco_write(hdspm);
}
- snd_printk(KERN_INFO "HDSPM: MADI TCO module found\n");
+ snd_printk(KERN_INFO "HDSPM: MADI/AES TCO module found\n");
} else {
hdspm->tco = NULL;
}
@@ -6580,10 +6889,12 @@ static int snd_hdspm_create(struct snd_card *card,
case AES32:
if (hdspm->tco) {
hdspm->texts_autosync = texts_autosync_aes_tco;
- hdspm->texts_autosync_items = 10;
+ hdspm->texts_autosync_items =
+ ARRAY_SIZE(texts_autosync_aes_tco);
} else {
hdspm->texts_autosync = texts_autosync_aes;
- hdspm->texts_autosync_items = 9;
+ hdspm->texts_autosync_items =
+ ARRAY_SIZE(texts_autosync_aes);
}
break;
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 45eeaa9f7fe..5138b849305 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -26,12 +26,9 @@ if SND_SOC
config SND_SOC_AC97_BUS
bool
-config SND_SOC_DMAENGINE_PCM
- bool
-
config SND_SOC_GENERIC_DMAENGINE_PCM
bool
- select SND_SOC_DMAENGINE_PCM
+ select SND_DMAENGINE_PCM
# All the supported SoCs
source "sound/soc/atmel/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index bc0261476d7..61a64d28190 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,10 +1,6 @@
snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o
-ifneq ($(CONFIG_SND_SOC_DMAENGINE_PCM),)
-snd-soc-core-objs += soc-dmaengine-pcm.o
-endif
-
ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
snd-soc-core-objs += soc-generic-dmaengine-pcm.o
endif
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 3fdd87fa18a..e48d38a1b95 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -13,6 +13,7 @@ config SND_ATMEL_SOC_PDC
config SND_ATMEL_SOC_DMA
tristate
depends on SND_ATMEL_SOC
+ select SND_SOC_GENERIC_DMAENGINE_PCM
config SND_ATMEL_SOC_SSC
tristate
@@ -32,6 +33,26 @@ config SND_AT91_SOC_SAM9G20_WM8731
Say Y if you want to add support for SoC audio on WM8731-based
AT91sam9g20 evaluation board.
+config SND_ATMEL_SOC_WM8904
+ tristate "Atmel ASoC driver for boards using WM8904 codec"
+ depends on ARCH_AT91 && ATMEL_SSC && SND_ATMEL_SOC
+ select SND_ATMEL_SOC_SSC
+ select SND_ATMEL_SOC_DMA
+ select SND_SOC_WM8904
+ help
+ Say Y if you want to add support for Atmel ASoC driver for boards using
+ WM8904 codec.
+
+config SND_AT91_SOC_SAM9X5_WM8731
+ tristate "SoC Audio support for WM8731-based at91sam9x5 board"
+ depends on ATMEL_SSC && SND_ATMEL_SOC && SOC_AT91SAM9X5
+ select SND_ATMEL_SOC_SSC
+ select SND_ATMEL_SOC_DMA
+ select SND_SOC_WM8731
+ help
+ Say Y if you want to add support for audio SoC on an
+ at91sam9x5 based board that is using WM8731 codec.
+
config SND_AT91_SOC_AFEB9260
tristate "SoC Audio support for AFEB9260 board"
depends on ARCH_AT91 && ATMEL_SSC && ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index 41967ccb6f4..5baabc8bde3 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -11,6 +11,10 @@ obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
# AT91 Machine Support
snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
+snd-atmel-soc-wm8904-objs := atmel_wm8904.o
+snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
+obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
+obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/atmel-pcm-dma.c b/sound/soc/atmel/atmel-pcm-dma.c
index d1282652679..06082e5e5dc 100644
--- a/sound/soc/atmel/atmel-pcm-dma.c
+++ b/sound/soc/atmel/atmel-pcm-dma.c
@@ -91,138 +91,52 @@ static void atmel_pcm_dma_irq(u32 ssc_sr,
}
}
-/*--------------------------------------------------------------------------*\
- * DMAENGINE operations
-\*--------------------------------------------------------------------------*/
-static bool filter(struct dma_chan *chan, void *slave)
-{
- struct at_dma_slave *sl = slave;
-
- if (sl->dma_dev == chan->device->dev) {
- chan->private = sl;
- return true;
- } else {
- return false;
- }
-}
-
static int atmel_pcm_configure_dma(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params, struct atmel_pcm_dma_params *prtd)
+ struct snd_pcm_hw_params *params, struct dma_slave_config *slave_config)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct atmel_pcm_dma_params *prtd;
struct ssc_device *ssc;
- struct dma_chan *dma_chan;
- struct dma_slave_config slave_config;
int ret;
+ prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
ssc = prtd->ssc;
- ret = snd_hwparams_to_dma_slave_config(substream, params,
- &slave_config);
+ ret = snd_hwparams_to_dma_slave_config(substream, params, slave_config);
if (ret) {
pr_err("atmel-pcm: hwparams to dma slave configure failed\n");
return ret;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- slave_config.dst_addr = (dma_addr_t)ssc->phybase + SSC_THR;
- slave_config.dst_maxburst = 1;
+ slave_config->dst_addr = ssc->phybase + SSC_THR;
+ slave_config->dst_maxburst = 1;
} else {
- slave_config.src_addr = (dma_addr_t)ssc->phybase + SSC_RHR;
- slave_config.src_maxburst = 1;
- }
-
- dma_chan = snd_dmaengine_pcm_get_chan(substream);
- if (dmaengine_slave_config(dma_chan, &slave_config)) {
- pr_err("atmel-pcm: failed to configure dma channel\n");
- ret = -EBUSY;
- return ret;
- }
-
- return 0;
-}
-
-static int atmel_pcm_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct atmel_pcm_dma_params *prtd;
- struct ssc_device *ssc;
- struct at_dma_slave *sdata = NULL;
- int ret;
-
- snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
-
- prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
- ssc = prtd->ssc;
- if (ssc->pdev)
- sdata = ssc->pdev->dev.platform_data;
-
- ret = snd_dmaengine_pcm_open_request_chan(substream, filter, sdata);
- if (ret) {
- pr_err("atmel-pcm: dmaengine pcm open failed\n");
- return -EINVAL;
- }
-
- ret = atmel_pcm_configure_dma(substream, params, prtd);
- if (ret) {
- pr_err("atmel-pcm: failed to configure dmai\n");
- goto err;
+ slave_config->src_addr = ssc->phybase + SSC_RHR;
+ slave_config->src_maxburst = 1;
}
prtd->dma_intr_handler = atmel_pcm_dma_irq;
return 0;
-err:
- snd_dmaengine_pcm_close_release_chan(substream);
- return ret;
}
-static int atmel_pcm_dma_prepare(struct snd_pcm_substream *substream)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct atmel_pcm_dma_params *prtd;
-
- prtd = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
-
- ssc_writex(prtd->ssc->regs, SSC_IER, prtd->mask->ssc_error);
- ssc_writex(prtd->ssc->regs, SSC_CR, prtd->mask->ssc_enable);
-
- return 0;
-}
-
-static int atmel_pcm_open(struct snd_pcm_substream *substream)
-{
- snd_soc_set_runtime_hwparams(substream, &atmel_pcm_dma_hardware);
-
- return 0;
-}
-
-static struct snd_pcm_ops atmel_pcm_ops = {
- .open = atmel_pcm_open,
- .close = snd_dmaengine_pcm_close_release_chan,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = atmel_pcm_hw_params,
- .prepare = atmel_pcm_dma_prepare,
- .trigger = snd_dmaengine_pcm_trigger,
- .pointer = snd_dmaengine_pcm_pointer_no_residue,
- .mmap = atmel_pcm_mmap,
-};
-
-static struct snd_soc_platform_driver atmel_soc_platform = {
- .ops = &atmel_pcm_ops,
- .pcm_new = atmel_pcm_new,
- .pcm_free = atmel_pcm_free,
+static const struct snd_dmaengine_pcm_config atmel_dmaengine_pcm_config = {
+ .prepare_slave_config = atmel_pcm_configure_dma,
+ .pcm_hardware = &atmel_pcm_dma_hardware,
+ .prealloc_buffer_size = ATMEL_SSC_DMABUF_SIZE,
};
int atmel_pcm_dma_platform_register(struct device *dev)
{
- return snd_soc_register_platform(dev, &atmel_soc_platform);
+ return snd_dmaengine_pcm_register(dev, &atmel_dmaengine_pcm_config,
+ SND_DMAENGINE_PCM_FLAG_NO_RESIDUE);
}
EXPORT_SYMBOL(atmel_pcm_dma_platform_register);
void atmel_pcm_dma_platform_unregister(struct device *dev)
{
- snd_soc_unregister_platform(dev);
+ snd_dmaengine_pcm_unregister(dev);
}
EXPORT_SYMBOL(atmel_pcm_dma_platform_unregister);
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index f3fdfa07fcb..0ecf356027f 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -73,6 +73,7 @@ static struct atmel_ssc_mask ssc_tx_mask = {
.ssc_disable = SSC_BIT(CR_TXDIS),
.ssc_endx = SSC_BIT(SR_ENDTX),
.ssc_endbuf = SSC_BIT(SR_TXBUFE),
+ .ssc_error = SSC_BIT(SR_OVRUN),
.pdc_enable = ATMEL_PDC_TXTEN,
.pdc_disable = ATMEL_PDC_TXTDIS,
};
@@ -82,6 +83,7 @@ static struct atmel_ssc_mask ssc_rx_mask = {
.ssc_disable = SSC_BIT(CR_RXDIS),
.ssc_endx = SSC_BIT(SR_ENDRX),
.ssc_endbuf = SSC_BIT(SR_RXBUFF),
+ .ssc_error = SSC_BIT(SR_OVRUN),
.pdc_enable = ATMEL_PDC_RXTEN,
.pdc_disable = ATMEL_PDC_RXTDIS,
};
@@ -196,15 +198,27 @@ static int atmel_ssc_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
- int dir_mask;
+ struct atmel_pcm_dma_params *dma_params;
+ int dir, dir_mask;
pr_debug("atmel_ssc_startup: SSC_SR=0x%u\n",
ssc_readl(ssc_p->ssc->regs, SR));
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ dir = 0;
dir_mask = SSC_DIR_MASK_PLAYBACK;
- else
+ } else {
+ dir = 1;
dir_mask = SSC_DIR_MASK_CAPTURE;
+ }
+
+ dma_params = &ssc_dma_params[dai->id][dir];
+ dma_params->ssc = ssc_p->ssc;
+ dma_params->substream = substream;
+
+ ssc_p->dma_params[dir] = dma_params;
+
+ snd_soc_dai_set_dma_data(dai, substream, dma_params);
spin_lock_irq(&ssc_p->lock);
if (ssc_p->dir_mask & dir_mask) {
@@ -325,7 +339,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct snd_soc_pcm_runtime *rtd = snd_pcm_substream_chip(substream);
int id = dai->id;
struct atmel_ssc_info *ssc_p = &ssc_info[id];
struct atmel_pcm_dma_params *dma_params;
@@ -344,19 +357,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
else
dir = 1;
- dma_params = &ssc_dma_params[id][dir];
- dma_params->ssc = ssc_p->ssc;
- dma_params->substream = substream;
-
- ssc_p->dma_params[dir] = dma_params;
-
- /*
- * The snd_soc_pcm_stream->dma_data field is only used to communicate
- * the appropriate DMA parameters to the pcm driver hw_params()
- * function. It should not be used for other purposes
- * as it is common to all substreams.
- */
- snd_soc_dai_set_dma_data(rtd->cpu_dai, substream, dma_params);
+ dma_params = ssc_p->dma_params[dir];
channels = params_channels(params);
@@ -648,6 +649,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
dma_params = ssc_p->dma_params[dir];
ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+ ssc_writel(ssc_p->ssc->regs, IER, dma_params->mask->ssc_error);
pr_debug("%s enabled SSC_SR=0x%08x\n",
dir ? "receive" : "transmit",
diff --git a/sound/soc/atmel/atmel_wm8904.c b/sound/soc/atmel/atmel_wm8904.c
new file mode 100644
index 00000000000..7222380131e
--- /dev/null
+++ b/sound/soc/atmel/atmel_wm8904.c
@@ -0,0 +1,254 @@
+/*
+ * atmel_wm8904 - Atmel ASoC driver for boards with WM8904 codec.
+ *
+ * Copyright (C) 2012 Atmel
+ *
+ * Author: Bo Shen <voice.shen@atmel.com>
+ *
+ * GPLv2 or later
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <sound/soc.h>
+
+#include "../codecs/wm8904.h"
+#include "atmel_ssc_dai.h"
+
+#define MCLK_RATE 32768
+
+static struct clk *mclk;
+
+static const struct snd_soc_dapm_widget atmel_asoc_wm8904_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_MIC("Mic", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+};
+
+static int atmel_asoc_wm8904_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ int ret;
+
+ ret = snd_soc_dai_set_pll(codec_dai, WM8904_FLL_MCLK, WM8904_FLL_MCLK,
+ 32768, params_rate(params) * 256);
+ if (ret < 0) {
+ pr_err("%s - failed to set wm8904 codec PLL.", __func__);
+ return ret;
+ }
+
+ /*
+ * As here wm8904 use FLL output as its system clock
+ * so calling set_sysclk won't care freq parameter
+ * then we pass 0
+ */
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8904_CLK_FLL,
+ 0, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ pr_err("%s -failed to set wm8904 SYSCLK\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_ops atmel_asoc_wm8904_ops = {
+ .hw_params = atmel_asoc_wm8904_hw_params,
+};
+
+static int atmel_set_bias_level(struct snd_soc_card *card,
+ struct snd_soc_dapm_context *dapm,
+ enum snd_soc_bias_level level)
+{
+ if (dapm->bias_level == SND_SOC_BIAS_STANDBY) {
+ switch (level) {
+ case SND_SOC_BIAS_PREPARE:
+ clk_prepare_enable(mclk);
+ break;
+ case SND_SOC_BIAS_OFF:
+ clk_disable_unprepare(mclk);
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+};
+
+static struct snd_soc_dai_link atmel_asoc_wm8904_dailink = {
+ .name = "WM8904",
+ .stream_name = "WM8904 PCM",
+ .codec_dai_name = "wm8904-hifi",
+ .dai_fmt = SND_SOC_DAIFMT_I2S
+ | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM,
+ .ops = &atmel_asoc_wm8904_ops,
+};
+
+static struct snd_soc_card atmel_asoc_wm8904_card = {
+ .name = "atmel_asoc_wm8904",
+ .owner = THIS_MODULE,
+ .set_bias_level = atmel_set_bias_level,
+ .dai_link = &atmel_asoc_wm8904_dailink,
+ .num_links = 1,
+ .dapm_widgets = atmel_asoc_wm8904_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(atmel_asoc_wm8904_dapm_widgets),
+ .fully_routed = true,
+};
+
+static int atmel_asoc_wm8904_dt_init(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
+ struct snd_soc_card *card = &atmel_asoc_wm8904_card;
+ struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "only device tree supported\n");
+ return -EINVAL;
+ }
+
+ ret = snd_soc_of_parse_card_name(card, "atmel,model");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse card name\n");
+ return ret;
+ }
+
+ ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse audio routing\n");
+ return ret;
+ }
+
+ cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "failed to get dai and pcm info\n");
+ ret = -EINVAL;
+ return ret;
+ }
+ dailink->cpu_of_node = cpu_np;
+ dailink->platform_of_node = cpu_np;
+ of_node_put(cpu_np);
+
+ codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "failed to get codec info\n");
+ ret = -EINVAL;
+ return ret;
+ }
+ dailink->codec_of_node = codec_np;
+ of_node_put(codec_np);
+
+ return 0;
+}
+
+static int atmel_asoc_wm8904_probe(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = &atmel_asoc_wm8904_card;
+ struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+ struct clk *clk_src;
+ struct pinctrl *pinctrl;
+ int id, ret;
+
+ pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+ if (IS_ERR(pinctrl)) {
+ dev_err(&pdev->dev, "failed to request pinctrl\n");
+ return PTR_ERR(pinctrl);
+ }
+
+ card->dev = &pdev->dev;
+ ret = atmel_asoc_wm8904_dt_init(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to init dt info\n");
+ return ret;
+ }
+
+ id = of_alias_get_id((struct device_node *)dailink->cpu_of_node, "ssc");
+ ret = atmel_ssc_set_audio(id);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to set SSC %d for audio\n", id);
+ return ret;
+ }
+
+ mclk = clk_get(NULL, "pck0");
+ if (IS_ERR(mclk)) {
+ dev_err(&pdev->dev, "failed to get pck0\n");
+ ret = PTR_ERR(mclk);
+ goto err_set_audio;
+ }
+
+ clk_src = clk_get(NULL, "clk32k");
+ if (IS_ERR(clk_src)) {
+ dev_err(&pdev->dev, "failed to get clk32k\n");
+ ret = PTR_ERR(clk_src);
+ goto err_set_audio;
+ }
+
+ ret = clk_set_parent(mclk, clk_src);
+ clk_put(clk_src);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "failed to set MCLK parent\n");
+ goto err_set_audio;
+ }
+
+ dev_info(&pdev->dev, "setting pck0 to %dHz\n", MCLK_RATE);
+ clk_set_rate(mclk, MCLK_RATE);
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed\n");
+ goto err_set_audio;
+ }
+
+ return 0;
+
+err_set_audio:
+ atmel_ssc_put_audio(id);
+ return ret;
+}
+
+static int atmel_asoc_wm8904_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct snd_soc_dai_link *dailink = &atmel_asoc_wm8904_dailink;
+ int id;
+
+ id = of_alias_get_id((struct device_node *)dailink->cpu_of_node, "ssc");
+
+ snd_soc_unregister_card(card);
+ atmel_ssc_put_audio(id);
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id atmel_asoc_wm8904_dt_ids[] = {
+ { .compatible = "atmel,asoc-wm8904", },
+ { }
+};
+#endif
+
+static struct platform_driver atmel_asoc_wm8904_driver = {
+ .driver = {
+ .name = "atmel-wm8904-audio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(atmel_asoc_wm8904_dt_ids),
+ },
+ .probe = atmel_asoc_wm8904_probe,
+ .remove = atmel_asoc_wm8904_remove,
+};
+
+module_platform_driver(atmel_asoc_wm8904_driver);
+
+/* Module information */
+MODULE_AUTHOR("Bo Shen <voice.shen@atmel.com>");
+MODULE_DESCRIPTION("ALSA SoC machine driver for Atmel EK with WM8904 codec");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
new file mode 100644
index 00000000000..992ae38d5a1
--- /dev/null
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -0,0 +1,208 @@
+/*
+ * sam9x5_wm8731 -- SoC audio for AT91SAM9X5-based boards
+ * that are using WM8731 as codec.
+ *
+ * Copyright (C) 2011 Atmel,
+ * Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Copyright (C) 2013 Paratronic,
+ * Richard Genoud <richard.genoud@gmail.com>
+ *
+ * Based on sam9g20_wm8731.c by:
+ * Sedji Gaouaou <sedji.gaouaou@atmel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/of.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/device.h>
+
+#include <sound/soc.h>
+#include <sound/soc-dai.h>
+#include <sound/soc-dapm.h>
+
+#include "../codecs/wm8731.h"
+#include "atmel_ssc_dai.h"
+
+
+#define MCLK_RATE 12288000
+
+#define DRV_NAME "sam9x5-snd-wm8731"
+
+struct sam9x5_drvdata {
+ int ssc_id;
+};
+
+/*
+ * Logic for a wm8731 as connected on a at91sam9x5ek based board.
+ */
+static int sam9x5_wm8731_init(struct snd_soc_pcm_runtime *rtd)
+{
+ struct snd_soc_dai *codec_dai = rtd->codec_dai;
+ struct device *dev = rtd->dev;
+ int ret;
+
+ dev_dbg(dev, "ASoC: %s called\n", __func__);
+
+ /* set the codec system clock for DAC and ADC */
+ ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL,
+ MCLK_RATE, SND_SOC_CLOCK_IN);
+ if (ret < 0) {
+ dev_err(dev, "ASoC: Failed to set WM8731 SYSCLK: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * Audio paths on at91sam9x5ek board:
+ *
+ * |A| ------------> | | ---R----> Headphone Jack
+ * |T| <----\ | WM | ---L--/
+ * |9| ---> CLK <--> | 8731 | <--R----- Line In Jack
+ * |1| <------------ | | <--L--/
+ */
+static const struct snd_soc_dapm_widget sam9x5_dapm_widgets[] = {
+ SND_SOC_DAPM_HP("Headphone Jack", NULL),
+ SND_SOC_DAPM_LINE("Line In Jack", NULL),
+};
+
+static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device_node *codec_np, *cpu_np;
+ struct snd_soc_card *card;
+ struct snd_soc_dai_link *dai;
+ struct sam9x5_drvdata *priv;
+ int ret;
+
+ if (!np) {
+ dev_err(&pdev->dev, "No device node supplied\n");
+ return -EINVAL;
+ }
+
+ card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL);
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ dai = devm_kzalloc(&pdev->dev, sizeof(*dai), GFP_KERNEL);
+ if (!dai || !card || !priv) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ card->dev = &pdev->dev;
+ card->owner = THIS_MODULE;
+ card->dai_link = dai;
+ card->num_links = 1;
+ card->dapm_widgets = sam9x5_dapm_widgets;
+ card->num_dapm_widgets = ARRAY_SIZE(sam9x5_dapm_widgets);
+ dai->name = "WM8731";
+ dai->stream_name = "WM8731 PCM";
+ dai->codec_dai_name = "wm8731-hifi";
+ dai->init = sam9x5_wm8731_init;
+ dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ | SND_SOC_DAIFMT_CBM_CFM;
+
+ ret = snd_soc_of_parse_card_name(card, "atmel,model");
+ if (ret) {
+ dev_err(&pdev->dev, "atmel,model node missing\n");
+ goto out;
+ }
+
+ ret = snd_soc_of_parse_audio_routing(card, "atmel,audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "atmel,audio-routing node missing\n");
+ goto out;
+ }
+
+ codec_np = of_parse_phandle(np, "atmel,audio-codec", 0);
+ if (!codec_np) {
+ dev_err(&pdev->dev, "atmel,audio-codec node missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ dai->codec_of_node = codec_np;
+
+ cpu_np = of_parse_phandle(np, "atmel,ssc-controller", 0);
+ if (!cpu_np) {
+ dev_err(&pdev->dev, "atmel,ssc-controller node missing\n");
+ ret = -EINVAL;
+ goto out;
+ }
+ dai->cpu_of_node = cpu_np;
+ dai->platform_of_node = cpu_np;
+
+ priv->ssc_id = of_alias_get_id(cpu_np, "ssc");
+
+ ret = atmel_ssc_set_audio(priv->ssc_id);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "ASoC: Failed to set SSC %d for audio: %d\n",
+ ret, priv->ssc_id);
+ goto out;
+ }
+
+ of_node_put(codec_np);
+ of_node_put(cpu_np);
+
+ platform_set_drvdata(pdev, card);
+
+ ret = snd_soc_register_card(card);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "ASoC: Platform device allocation failed\n");
+ goto out_put_audio;
+ }
+
+ dev_dbg(&pdev->dev, "ASoC: %s ok\n", __func__);
+
+ return ret;
+
+out_put_audio:
+ atmel_ssc_put_audio(priv->ssc_id);
+out:
+ return ret;
+}
+
+static int sam9x5_wm8731_driver_remove(struct platform_device *pdev)
+{
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct sam9x5_drvdata *priv = card->drvdata;
+
+ snd_soc_unregister_card(card);
+ atmel_ssc_put_audio(priv->ssc_id);
+
+ return 0;
+}
+
+static const struct of_device_id sam9x5_wm8731_of_match[] = {
+ { .compatible = "atmel,sam9x5-wm8731-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sam9x5_wm8731_of_match);
+
+static struct platform_driver sam9x5_wm8731_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(sam9x5_wm8731_of_match),
+ },
+ .probe = sam9x5_wm8731_driver_probe,
+ .remove = sam9x5_wm8731_driver_remove,
+};
+module_platform_driver(sam9x5_wm8731_driver);
+
+/* Module information */
+MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
+MODULE_AUTHOR("Richard Genoud <richard.genoud@gmail.com>");
+MODULE_DESCRIPTION("ALSA SoC machine driver for AT91SAM9x5 - WM8731");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index d6f7694fcad..c8a2de103c5 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -341,7 +341,7 @@ static struct platform_driver au1xac97c_driver = {
.remove = au1xac97c_drvremove,
};
-module_platform_driver(&au1xac97c_driver);
+module_platform_driver(au1xac97c_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Au1000/1500/1100 AC97C ASoC driver");
diff --git a/sound/soc/au1x/db1200.c b/sound/soc/au1x/db1200.c
index a497a0cfeba..decba87a074 100644
--- a/sound/soc/au1x/db1200.c
+++ b/sound/soc/au1x/db1200.c
@@ -73,12 +73,14 @@ static struct snd_soc_dai_link db1300_ac97_dai = {
static struct snd_soc_card db1300_ac97_machine = {
.name = "DB1300_AC97",
+ .owner = THIS_MODULE,
.dai_link = &db1300_ac97_dai,
.num_links = 1,
};
static struct snd_soc_card db1550_ac97_machine = {
.name = "DB1550_AC97",
+ .owner = THIS_MODULE,
.dai_link = &db1200_ac97_dai,
.num_links = 1,
};
@@ -145,6 +147,7 @@ static struct snd_soc_dai_link db1300_i2s_dai = {
static struct snd_soc_card db1300_i2s_machine = {
.name = "DB1300_I2S",
+ .owner = THIS_MODULE,
.dai_link = &db1300_i2s_dai,
.num_links = 1,
};
@@ -161,6 +164,7 @@ static struct snd_soc_dai_link db1550_i2s_dai = {
static struct snd_soc_card db1550_i2s_machine = {
.name = "DB1550_I2S",
+ .owner = THIS_MODULE,
.dai_link = &db1550_i2s_dai,
.num_links = 1,
};
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index a822ab822bb..986dcec79fa 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -379,9 +379,6 @@ static int au1xpsc_ac97_drvprobe(struct platform_device *pdev)
mutex_init(&wd->lock);
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!iores)
- return -ENODEV;
-
wd->mmio = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(wd->mmio))
return PTR_ERR(wd->mmio);
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index efb1daecd0d..e82eb373a73 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -294,11 +294,12 @@ static int asoc_bfin_ac97_probe(struct platform_device *pdev)
/* Request PB3 as reset pin */
ret = devm_gpio_request_one(&pdev->dev,
CONFIG_SND_BF5XX_RESET_GPIO_NUM,
- GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET") {
+ GPIOF_OUT_INIT_HIGH, "SND_AD198x RESET");
+ if (ret) {
dev_err(&pdev->dev,
"Failed to request GPIO_%d for reset: %d\n",
CONFIG_SND_BF5XX_RESET_GPIO_NUM, ret);
- goto gpio_err;
+ return ret;
}
#endif
diff --git a/sound/soc/blackfin/bf5xx-ac97.h b/sound/soc/blackfin/bf5xx-ac97.h
index 15c635e33f4..a680fdc9bb4 100644
--- a/sound/soc/blackfin/bf5xx-ac97.h
+++ b/sound/soc/blackfin/bf5xx-ac97.h
@@ -9,8 +9,6 @@
#ifndef _BF5XX_AC97_H
#define _BF5XX_AC97_H
-extern struct snd_ac97_bus_ops bf5xx_ac97_ops;
-extern struct snd_ac97 *ac97;
/* Frame format in memory, only support stereo currently */
struct ac97_frame {
u16 ac97_tag; /* slot 0 */
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index ac73c607410..efa75b5086a 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -102,13 +102,13 @@ static struct ep93xx_ac97_info *ep93xx_ac97_info;
static struct ep93xx_dma_data ep93xx_ac97_pcm_out = {
.name = "ac97-pcm-out",
- .dma_port = EP93XX_DMA_AAC1,
+ .port = EP93XX_DMA_AAC1,
.direction = DMA_MEM_TO_DEV,
};
static struct ep93xx_dma_data ep93xx_ac97_pcm_in = {
.name = "ac97-pcm-in",
- .dma_port = EP93XX_DMA_AAC1,
+ .port = EP93XX_DMA_AAC1,
.direction = DMA_DEV_TO_MEM,
};
@@ -363,9 +363,6 @@ static int ep93xx_ac97_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
diff --git a/sound/soc/cirrus/ep93xx-i2s.c b/sound/soc/cirrus/ep93xx-i2s.c
index 17ad70bca9f..a57643d6402 100644
--- a/sound/soc/cirrus/ep93xx-i2s.c
+++ b/sound/soc/cirrus/ep93xx-i2s.c
@@ -376,9 +376,6 @@ static int ep93xx_i2s_probe(struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
-
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
@@ -411,7 +408,6 @@ static int ep93xx_i2s_probe(struct platform_device *pdev)
return 0;
fail_put_lrclk:
- dev_set_drvdata(&pdev->dev, NULL);
clk_put(info->lrclk);
fail_put_sclk:
clk_put(info->sclk);
@@ -426,7 +422,6 @@ static int ep93xx_i2s_remove(struct platform_device *pdev)
struct ep93xx_i2s_info *info = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
- dev_set_drvdata(&pdev->dev, NULL);
clk_put(info->lrclk);
clk_put(info->sclk);
clk_put(info->mclk);
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index badb6fbacaa..15106c04547 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -10,6 +10,7 @@ config SND_SOC_I2C_AND_SPI
config SND_SOC_ALL_CODECS
tristate "Build all ASoC CODEC drivers"
+ depends on COMPILE_TEST
select SND_SOC_88PM860X if MFD_88PM860X
select SND_SOC_L3
select SND_SOC_AB8500_CODEC if ABX500_CORE
@@ -20,6 +21,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_AD73311
select SND_SOC_ADAU1373 if I2C
select SND_SOC_ADAV80X if SND_SOC_I2C_AND_SPI
+ select SND_SOC_ADAU1701 if I2C
select SND_SOC_ADS117X
select SND_SOC_AK4104 if SPI_MASTER
select SND_SOC_AK4535 if I2C
@@ -54,6 +56,8 @@ config SND_SOC_ALL_CODECS
select SND_SOC_MC13783 if MFD_MC13XXX
select SND_SOC_ML26124 if I2C
select SND_SOC_HDMI_CODEC
+ select SND_SOC_PCM1681 if I2C
+ select SND_SOC_PCM1792A if SPI_MASTER
select SND_SOC_PCM3008
select SND_SOC_RT5631 if I2C
select SND_SOC_RT5640 if I2C
@@ -122,6 +126,7 @@ config SND_SOC_ALL_CODECS
select SND_SOC_WM8994 if MFD_WM8994
select SND_SOC_WM8995 if SND_SOC_I2C_AND_SPI
select SND_SOC_WM8996 if I2C
+ select SND_SOC_WM8997 if MFD_WM8997
select SND_SOC_WM9081 if I2C
select SND_SOC_WM9090 if I2C
select SND_SOC_WM9705 if SND_SOC_AC97_BUS
@@ -145,8 +150,10 @@ config SND_SOC_ARIZONA
tristate
default y if SND_SOC_WM5102=y
default y if SND_SOC_WM5110=y
+ default y if SND_SOC_WM8997=y
default m if SND_SOC_WM5102=m
default m if SND_SOC_WM5110=m
+ default m if SND_SOC_WM8997=m
config SND_SOC_WM_HUBS
tristate
@@ -198,6 +205,9 @@ config SND_SOC_AK4104
config SND_SOC_AK4535
tristate
+config SND_SOC_AK4554
+ tristate
+
config SND_SOC_AK4641
tristate
@@ -292,6 +302,12 @@ config SND_SOC_MAX9850
config SND_SOC_HDMI_CODEC
tristate
+config SND_SOC_PCM1681
+ tristate
+
+config SND_SOC_PCM1792A
+ tristate
+
config SND_SOC_PCM3008
tristate
@@ -500,6 +516,9 @@ config SND_SOC_WM8995
config SND_SOC_WM8996
tristate
+config SND_SOC_WM8997
+ tristate
+
config SND_SOC_WM9081
tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 70fd8066f54..bc126764a44 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -11,6 +11,7 @@ snd-soc-adav80x-objs := adav80x.o
snd-soc-ads117x-objs := ads117x.o
snd-soc-ak4104-objs := ak4104.o
snd-soc-ak4535-objs := ak4535.o
+snd-soc-ak4554-objs := ak4554.o
snd-soc-ak4641-objs := ak4641.o
snd-soc-ak4642-objs := ak4642.o
snd-soc-ak4671-objs := ak4671.o
@@ -42,6 +43,8 @@ snd-soc-max9850-objs := max9850.o
snd-soc-mc13783-objs := mc13783.o
snd-soc-ml26124-objs := ml26124.o
snd-soc-hdmi-codec-objs := hdmi.o
+snd-soc-pcm1681-objs := pcm1681.o
+snd-soc-pcm1792a-codec-objs := pcm1792a.o
snd-soc-pcm3008-objs := pcm3008.o
snd-soc-rt5631-objs := rt5631.o
snd-soc-rt5640-objs := rt5640.o
@@ -114,6 +117,7 @@ snd-soc-wm8991-objs := wm8991.o
snd-soc-wm8993-objs := wm8993.o
snd-soc-wm8994-objs := wm8994.o wm8958-dsp2.o
snd-soc-wm8995-objs := wm8995.o
+snd-soc-wm8997-objs := wm8997.o
snd-soc-wm9081-objs := wm9081.o
snd-soc-wm9090-objs := wm9090.o
snd-soc-wm9705-objs := wm9705.o
@@ -138,6 +142,7 @@ obj-$(CONFIG_SND_SOC_ADAV80X) += snd-soc-adav80x.o
obj-$(CONFIG_SND_SOC_ADS117X) += snd-soc-ads117x.o
obj-$(CONFIG_SND_SOC_AK4104) += snd-soc-ak4104.o
obj-$(CONFIG_SND_SOC_AK4535) += snd-soc-ak4535.o
+obj-$(CONFIG_SND_SOC_AK4554) += snd-soc-ak4554.o
obj-$(CONFIG_SND_SOC_AK4641) += snd-soc-ak4641.o
obj-$(CONFIG_SND_SOC_AK4642) += snd-soc-ak4642.o
obj-$(CONFIG_SND_SOC_AK4671) += snd-soc-ak4671.o
@@ -171,6 +176,8 @@ obj-$(CONFIG_SND_SOC_MAX9850) += snd-soc-max9850.o
obj-$(CONFIG_SND_SOC_MC13783) += snd-soc-mc13783.o
obj-$(CONFIG_SND_SOC_ML26124) += snd-soc-ml26124.o
obj-$(CONFIG_SND_SOC_HDMI_CODEC) += snd-soc-hdmi-codec.o
+obj-$(CONFIG_SND_SOC_PCM1681) += snd-soc-pcm1681.o
+obj-$(CONFIG_SND_SOC_PCM1792A) += snd-soc-pcm1792a-codec.o
obj-$(CONFIG_SND_SOC_PCM3008) += snd-soc-pcm3008.o
obj-$(CONFIG_SND_SOC_RT5631) += snd-soc-rt5631.o
obj-$(CONFIG_SND_SOC_RT5640) += snd-soc-rt5640.o
@@ -239,6 +246,7 @@ obj-$(CONFIG_SND_SOC_WM8991) += snd-soc-wm8991.o
obj-$(CONFIG_SND_SOC_WM8993) += snd-soc-wm8993.o
obj-$(CONFIG_SND_SOC_WM8994) += snd-soc-wm8994.o
obj-$(CONFIG_SND_SOC_WM8995) += snd-soc-wm8995.o
+obj-$(CONFIG_SND_SOC_WM8997) += snd-soc-wm8997.o
obj-$(CONFIG_SND_SOC_WM9081) += snd-soc-wm9081.o
obj-$(CONFIG_SND_SOC_WM9090) += snd-soc-wm9090.o
obj-$(CONFIG_SND_SOC_WM9705) += snd-soc-wm9705.o
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index ec7351803c2..8d9ba4ba4bf 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -23,6 +23,16 @@
#include <sound/initval.h>
#include <sound/soc.h>
+static const struct snd_soc_dapm_widget ac97_widgets[] = {
+ SND_SOC_DAPM_INPUT("RX"),
+ SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route ac97_routes[] = {
+ { "AC97 Capture", NULL, "RX" },
+ { "TX", NULL, "AC97 Playback" },
+};
+
static int ac97_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -117,6 +127,11 @@ static struct snd_soc_codec_driver soc_codec_dev_ac97 = {
.probe = ac97_soc_probe,
.suspend = ac97_soc_suspend,
.resume = ac97_soc_resume,
+
+ .dapm_widgets = ac97_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ac97_widgets),
+ .dapm_routes = ac97_routes,
+ .num_dapm_routes = ARRAY_SIZE(ac97_routes),
};
static int ac97_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 89fcf7d6e7b..7257a8885f4 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -96,6 +96,44 @@ SOC_ENUM("Capture Source", ad1980_cap_src),
SOC_SINGLE("Mic Boost Switch", AC97_MIC, 6, 1, 0),
};
+static const struct snd_soc_dapm_widget ad1980_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MIC1"),
+SND_SOC_DAPM_INPUT("MIC2"),
+SND_SOC_DAPM_INPUT("CD_L"),
+SND_SOC_DAPM_INPUT("CD_R"),
+SND_SOC_DAPM_INPUT("AUX_L"),
+SND_SOC_DAPM_INPUT("AUX_R"),
+SND_SOC_DAPM_INPUT("LINE_IN_L"),
+SND_SOC_DAPM_INPUT("LINE_IN_R"),
+
+SND_SOC_DAPM_OUTPUT("LFE_OUT"),
+SND_SOC_DAPM_OUTPUT("CENTER_OUT"),
+SND_SOC_DAPM_OUTPUT("LINE_OUT_L"),
+SND_SOC_DAPM_OUTPUT("LINE_OUT_R"),
+SND_SOC_DAPM_OUTPUT("MONO_OUT"),
+SND_SOC_DAPM_OUTPUT("HP_OUT_L"),
+SND_SOC_DAPM_OUTPUT("HP_OUT_R"),
+};
+
+static const struct snd_soc_dapm_route ad1980_dapm_routes[] = {
+ { "Capture", NULL, "MIC1" },
+ { "Capture", NULL, "MIC2" },
+ { "Capture", NULL, "CD_L" },
+ { "Capture", NULL, "CD_R" },
+ { "Capture", NULL, "AUX_L" },
+ { "Capture", NULL, "AUX_R" },
+ { "Capture", NULL, "LINE_IN_L" },
+ { "Capture", NULL, "LINE_IN_R" },
+
+ { "LFE_OUT", NULL, "Playback" },
+ { "CENTER_OUT", NULL, "Playback" },
+ { "LINE_OUT_L", NULL, "Playback" },
+ { "LINE_OUT_R", NULL, "Playback" },
+ { "MONO_OUT", NULL, "Playback" },
+ { "HP_OUT_L", NULL, "Playback" },
+ { "HP_OUT_R", NULL, "Playback" },
+};
+
static unsigned int ac97_read(struct snd_soc_codec *codec,
unsigned int reg)
{
@@ -253,6 +291,11 @@ static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
.reg_cache_step = 2,
.write = ac97_write,
.read = ac97_read,
+
+ .dapm_widgets = ad1980_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
+ .dapm_routes = ad1980_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ad1980_dapm_routes),
};
static int ad1980_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/ad73311.c b/sound/soc/codecs/ad73311.c
index b1f2baf42b4..5fac8adbc13 100644
--- a/sound/soc/codecs/ad73311.c
+++ b/sound/soc/codecs/ad73311.c
@@ -23,6 +23,21 @@
#include "ad73311.h"
+static const struct snd_soc_dapm_widget ad73311_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("VINP"),
+SND_SOC_DAPM_INPUT("VINN"),
+SND_SOC_DAPM_OUTPUT("VOUTN"),
+SND_SOC_DAPM_OUTPUT("VOUTP"),
+};
+
+static const struct snd_soc_dapm_route ad73311_dapm_routes[] = {
+ { "Capture", NULL, "VINP" },
+ { "Capture", NULL, "VINN" },
+
+ { "VOUTN", NULL, "Playback" },
+ { "VOUTP", NULL, "Playback" },
+};
+
static struct snd_soc_dai_driver ad73311_dai = {
.name = "ad73311-hifi",
.playback = {
@@ -39,7 +54,12 @@ static struct snd_soc_dai_driver ad73311_dai = {
.formats = SNDRV_PCM_FMTBIT_S16_LE, },
};
-static struct snd_soc_codec_driver soc_codec_dev_ad73311;
+static struct snd_soc_codec_driver soc_codec_dev_ad73311 = {
+ .dapm_widgets = ad73311_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ad73311_dapm_widgets),
+ .dapm_routes = ad73311_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ad73311_dapm_routes),
+};
static int ad73311_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/adau1701.c b/sound/soc/codecs/adau1701.c
index d1124a5b347..ebff1128be5 100644
--- a/sound/soc/codecs/adau1701.c
+++ b/sound/soc/codecs/adau1701.c
@@ -91,7 +91,7 @@
#define ADAU1701_OSCIPOW_OPD 0x04
#define ADAU1701_DACSET_DACINIT 1
-#define ADAU1707_CLKDIV_UNSET (-1UL)
+#define ADAU1707_CLKDIV_UNSET (-1U)
#define ADAU1701_FIRMWARE "adau1701.bin"
@@ -247,21 +247,21 @@ static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
gpio_is_valid(adau1701->gpio_pll_mode[1])) {
switch (clkdiv) {
case 64:
- gpio_set_value(adau1701->gpio_pll_mode[0], 0);
- gpio_set_value(adau1701->gpio_pll_mode[1], 0);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 0);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 0);
break;
case 256:
- gpio_set_value(adau1701->gpio_pll_mode[0], 0);
- gpio_set_value(adau1701->gpio_pll_mode[1], 1);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 0);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 1);
break;
case 384:
- gpio_set_value(adau1701->gpio_pll_mode[0], 1);
- gpio_set_value(adau1701->gpio_pll_mode[1], 0);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 1);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 0);
break;
case 0: /* fallback */
case 512:
- gpio_set_value(adau1701->gpio_pll_mode[0], 1);
- gpio_set_value(adau1701->gpio_pll_mode[1], 1);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[0], 1);
+ gpio_set_value_cansleep(adau1701->gpio_pll_mode[1], 1);
break;
}
}
@@ -269,10 +269,10 @@ static int adau1701_reset(struct snd_soc_codec *codec, unsigned int clkdiv)
adau1701->pll_clkdiv = clkdiv;
if (gpio_is_valid(adau1701->gpio_nreset)) {
- gpio_set_value(adau1701->gpio_nreset, 0);
+ gpio_set_value_cansleep(adau1701->gpio_nreset, 0);
/* minimum reset time is 20ns */
udelay(1);
- gpio_set_value(adau1701->gpio_nreset, 1);
+ gpio_set_value_cansleep(adau1701->gpio_nreset, 1);
/* power-up time may be as long as 85ms */
mdelay(85);
}
@@ -734,7 +734,10 @@ static int adau1701_i2c_remove(struct i2c_client *client)
}
static const struct i2c_device_id adau1701_i2c_id[] = {
+ { "adau1401", 0 },
+ { "adau1401a", 0 },
{ "adau1701", 0 },
+ { "adau1702", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adau1701_i2c_id);
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index 3c839cc4e00..15b012d0f22 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -868,6 +868,12 @@ static int adav80x_bus_remove(struct device *dev)
}
#if defined(CONFIG_SPI_MASTER)
+static const struct spi_device_id adav80x_spi_id[] = {
+ { "adav801", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adav80x_spi_id);
+
static int adav80x_spi_probe(struct spi_device *spi)
{
return adav80x_bus_probe(&spi->dev, SND_SOC_SPI);
@@ -885,15 +891,16 @@ static struct spi_driver adav80x_spi_driver = {
},
.probe = adav80x_spi_probe,
.remove = adav80x_spi_remove,
+ .id_table = adav80x_spi_id,
};
#endif
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
-static const struct i2c_device_id adav80x_id[] = {
+static const struct i2c_device_id adav80x_i2c_id[] = {
{ "adav803", 0 },
{ }
};
-MODULE_DEVICE_TABLE(i2c, adav80x_id);
+MODULE_DEVICE_TABLE(i2c, adav80x_i2c_id);
static int adav80x_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
@@ -913,7 +920,7 @@ static struct i2c_driver adav80x_i2c_driver = {
},
.probe = adav80x_i2c_probe,
.remove = adav80x_i2c_remove,
- .id_table = adav80x_id,
+ .id_table = adav80x_i2c_id,
};
#endif
diff --git a/sound/soc/codecs/ads117x.c b/sound/soc/codecs/ads117x.c
index 506d474c4d2..8f388edff58 100644
--- a/sound/soc/codecs/ads117x.c
+++ b/sound/soc/codecs/ads117x.c
@@ -23,6 +23,28 @@
#define ADS117X_RATES (SNDRV_PCM_RATE_8000_48000)
#define ADS117X_FORMATS (SNDRV_PCM_FMTBIT_S16_LE)
+static const struct snd_soc_dapm_widget ads117x_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("Input1"),
+SND_SOC_DAPM_INPUT("Input2"),
+SND_SOC_DAPM_INPUT("Input3"),
+SND_SOC_DAPM_INPUT("Input4"),
+SND_SOC_DAPM_INPUT("Input5"),
+SND_SOC_DAPM_INPUT("Input6"),
+SND_SOC_DAPM_INPUT("Input7"),
+SND_SOC_DAPM_INPUT("Input8"),
+};
+
+static const struct snd_soc_dapm_route ads117x_dapm_routes[] = {
+ { "Capture", NULL, "Input1" },
+ { "Capture", NULL, "Input2" },
+ { "Capture", NULL, "Input3" },
+ { "Capture", NULL, "Input4" },
+ { "Capture", NULL, "Input5" },
+ { "Capture", NULL, "Input6" },
+ { "Capture", NULL, "Input7" },
+ { "Capture", NULL, "Input8" },
+};
+
static struct snd_soc_dai_driver ads117x_dai = {
/* ADC */
.name = "ads117x-hifi",
@@ -34,7 +56,12 @@ static struct snd_soc_dai_driver ads117x_dai = {
.formats = ADS117X_FORMATS,},
};
-static struct snd_soc_codec_driver soc_codec_dev_ads117x;
+static struct snd_soc_codec_driver soc_codec_dev_ads117x = {
+ .dapm_widgets = ads117x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ads117x_dapm_widgets),
+ .dapm_routes = ads117x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ads117x_dapm_routes),
+};
static int ads117x_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/ak4104.c b/sound/soc/codecs/ak4104.c
index c7cfdf957e4..71059c07ae7 100644
--- a/sound/soc/codecs/ak4104.c
+++ b/sound/soc/codecs/ak4104.c
@@ -51,6 +51,17 @@ struct ak4104_private {
struct regmap *regmap;
};
+static const struct snd_soc_dapm_widget ak4104_dapm_widgets[] = {
+SND_SOC_DAPM_PGA("TXE", AK4104_REG_TX, AK4104_TX_TXE, 0, NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route ak4104_dapm_routes[] = {
+ { "TXE", NULL, "Playback" },
+ { "TX", NULL, "TXE" },
+};
+
static int ak4104_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int format)
{
@@ -138,29 +149,11 @@ static int ak4104_hw_params(struct snd_pcm_substream *substream,
if (ret < 0)
return ret;
- /* enable transmitter */
- ret = regmap_update_bits(ak4104->regmap, AK4104_REG_TX,
- AK4104_TX_TXE, AK4104_TX_TXE);
- if (ret < 0)
- return ret;
-
return 0;
}
-static int ak4104_hw_free(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- struct snd_soc_codec *codec = dai->codec;
- struct ak4104_private *ak4104 = snd_soc_codec_get_drvdata(codec);
-
- /* disable transmitter */
- return regmap_update_bits(ak4104->regmap, AK4104_REG_TX,
- AK4104_TX_TXE, 0);
-}
-
static const struct snd_soc_dai_ops ak4101_dai_ops = {
.hw_params = ak4104_hw_params,
- .hw_free = ak4104_hw_free,
.set_fmt = ak4104_set_dai_fmt,
};
@@ -214,6 +207,11 @@ static int ak4104_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_device_ak4104 = {
.probe = ak4104_probe,
.remove = ak4104_remove,
+
+ .dapm_widgets = ak4104_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4104_dapm_widgets),
+ .dapm_routes = ak4104_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak4104_dapm_routes),
};
static const struct regmap_config ak4104_regmap = {
diff --git a/sound/soc/codecs/ak4554.c b/sound/soc/codecs/ak4554.c
new file mode 100644
index 00000000000..79e9555766c
--- /dev/null
+++ b/sound/soc/codecs/ak4554.c
@@ -0,0 +1,106 @@
+/*
+ * ak4554.c
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <sound/soc.h>
+
+/*
+ * ak4554 is very simple DA/AD converter which has no setting register.
+ *
+ * CAUTION
+ *
+ * ak4554 playback format is SND_SOC_DAIFMT_RIGHT_J,
+ * and, capture format is SND_SOC_DAIFMT_LEFT_J
+ * on same bit clock, LR clock.
+ * But, this driver doesn't have snd_soc_dai_ops :: set_fmt
+ *
+ * CPU/Codec DAI image
+ *
+ * CPU-DAI1 (plaback only fmt = RIGHT_J) --+-- ak4554
+ * |
+ * CPU-DAI2 (capture only fmt = LEFT_J) ---+
+ */
+
+static const struct snd_soc_dapm_widget ak4554_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+
+SND_SOC_DAPM_OUTPUT("AOUTL"),
+SND_SOC_DAPM_OUTPUT("AOUTR"),
+};
+
+static const struct snd_soc_dapm_route ak4554_dapm_routes[] = {
+ { "Capture", NULL, "AINL" },
+ { "Capture", NULL, "AINR" },
+
+ { "AOUTL", NULL, "Playback" },
+ { "AOUTR", NULL, "Playback" },
+};
+
+static struct snd_soc_dai_driver ak4554_dai = {
+ .name = "ak4554-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .symmetric_rates = 1,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_ak4554 = {
+ .dapm_widgets = ak4554_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak4554_dapm_widgets),
+ .dapm_routes = ak4554_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak4554_dapm_routes),
+};
+
+static int ak4554_soc_probe(struct platform_device *pdev)
+{
+ return snd_soc_register_codec(&pdev->dev,
+ &soc_codec_dev_ak4554,
+ &ak4554_dai, 1);
+}
+
+static int ak4554_soc_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ return 0;
+}
+
+static struct of_device_id ak4554_of_match[] = {
+ { .compatible = "asahi-kasei,ak4554" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ak4554_of_match);
+
+static struct platform_driver ak4554_driver = {
+ .driver = {
+ .name = "ak4554-adc-dac",
+ .owner = THIS_MODULE,
+ .of_match_table = ak4554_of_match,
+ },
+ .probe = ak4554_soc_probe,
+ .remove = ak4554_soc_remove,
+};
+module_platform_driver(ak4554_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SoC AK4554 driver");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/codecs/ak5386.c b/sound/soc/codecs/ak5386.c
index 1f303983ae0..72e953b2cb4 100644
--- a/sound/soc/codecs/ak5386.c
+++ b/sound/soc/codecs/ak5386.c
@@ -22,7 +22,22 @@ struct ak5386_priv {
int reset_gpio;
};
-static struct snd_soc_codec_driver soc_codec_ak5386;
+static const struct snd_soc_dapm_widget ak5386_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+};
+
+static const struct snd_soc_dapm_route ak5386_dapm_routes[] = {
+ { "Capture", NULL, "AINL" },
+ { "Capture", NULL, "AINR" },
+};
+
+static struct snd_soc_codec_driver soc_codec_ak5386 = {
+ .dapm_widgets = ak5386_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(ak5386_dapm_widgets),
+ .dapm_routes = ak5386_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(ak5386_dapm_routes),
+};
static int ak5386_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int format)
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index de625813c0e..657808ba141 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -19,6 +19,7 @@
#include <sound/tlv.h>
#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/gpio.h>
#include <linux/mfd/arizona/registers.h>
#include "arizona.h"
@@ -199,9 +200,16 @@ int arizona_init_spk(struct snd_soc_codec *codec)
if (ret != 0)
return ret;
- ret = snd_soc_dapm_new_controls(&codec->dapm, &arizona_spkr, 1);
- if (ret != 0)
- return ret;
+ switch (arizona->type) {
+ case WM8997:
+ break;
+ default:
+ ret = snd_soc_dapm_new_controls(&codec->dapm,
+ &arizona_spkr, 1);
+ if (ret != 0)
+ return ret;
+ break;
+ }
ret = arizona_request_irq(arizona, ARIZONA_IRQ_SPK_SHUTDOWN_WARN,
"Thermal warning", arizona_thermal_warn,
@@ -223,6 +231,41 @@ int arizona_init_spk(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(arizona_init_spk);
+int arizona_init_gpio(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+ int i;
+
+ switch (arizona->type) {
+ case WM5110:
+ snd_soc_dapm_disable_pin(&codec->dapm, "DRC2 Signal Activity");
+ break;
+ default:
+ break;
+ }
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "DRC1 Signal Activity");
+
+ for (i = 0; i < ARRAY_SIZE(arizona->pdata.gpio_defaults); i++) {
+ switch (arizona->pdata.gpio_defaults[i] & ARIZONA_GPN_FN_MASK) {
+ case ARIZONA_GP_FN_DRC1_SIGNAL_DETECT:
+ snd_soc_dapm_enable_pin(&codec->dapm,
+ "DRC1 Signal Activity");
+ break;
+ case ARIZONA_GP_FN_DRC2_SIGNAL_DETECT:
+ snd_soc_dapm_enable_pin(&codec->dapm,
+ "DRC2 Signal Activity");
+ break;
+ default:
+ break;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_init_gpio);
+
const char *arizona_mixer_texts[ARIZONA_NUM_MIXER_INPUTS] = {
"None",
"Tone Generator 1",
@@ -517,6 +560,26 @@ const struct soc_enum arizona_ng_hold =
4, arizona_ng_hold_text);
EXPORT_SYMBOL_GPL(arizona_ng_hold);
+static const char * const arizona_in_dmic_osr_text[] = {
+ "1.536MHz", "3.072MHz", "6.144MHz",
+};
+
+const struct soc_enum arizona_in_dmic_osr[] = {
+ SOC_ENUM_SINGLE(ARIZONA_IN1L_CONTROL, ARIZONA_IN1_OSR_SHIFT,
+ ARRAY_SIZE(arizona_in_dmic_osr_text),
+ arizona_in_dmic_osr_text),
+ SOC_ENUM_SINGLE(ARIZONA_IN2L_CONTROL, ARIZONA_IN2_OSR_SHIFT,
+ ARRAY_SIZE(arizona_in_dmic_osr_text),
+ arizona_in_dmic_osr_text),
+ SOC_ENUM_SINGLE(ARIZONA_IN3L_CONTROL, ARIZONA_IN3_OSR_SHIFT,
+ ARRAY_SIZE(arizona_in_dmic_osr_text),
+ arizona_in_dmic_osr_text),
+ SOC_ENUM_SINGLE(ARIZONA_IN4L_CONTROL, ARIZONA_IN4_OSR_SHIFT,
+ ARRAY_SIZE(arizona_in_dmic_osr_text),
+ arizona_in_dmic_osr_text),
+};
+EXPORT_SYMBOL_GPL(arizona_in_dmic_osr);
+
static void arizona_in_set_vu(struct snd_soc_codec *codec, int ena)
{
struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index b60b08ccc1d..9e81b639269 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -150,7 +150,8 @@ extern int arizona_mixer_values[ARIZONA_NUM_MIXER_INPUTS];
ARIZONA_MUX(name_str " Aux 5", &name##_aux5_mux), \
ARIZONA_MUX(name_str " Aux 6", &name##_aux6_mux)
-#define ARIZONA_MUX_ROUTES(name) \
+#define ARIZONA_MUX_ROUTES(widget, name) \
+ { widget, NULL, name " Input" }, \
ARIZONA_MIXER_INPUT_ROUTES(name " Input")
#define ARIZONA_MIXER_ROUTES(widget, name) \
@@ -198,6 +199,7 @@ extern const struct soc_enum arizona_lhpf3_mode;
extern const struct soc_enum arizona_lhpf4_mode;
extern const struct soc_enum arizona_ng_hold;
+extern const struct soc_enum arizona_in_dmic_osr[];
extern int arizona_in_ev(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
@@ -242,6 +244,7 @@ extern int arizona_set_fll(struct arizona_fll *fll, int source,
unsigned int Fref, unsigned int Fout);
extern int arizona_init_spk(struct snd_soc_codec *codec);
+extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_dai(struct arizona_priv *priv, int dai);
diff --git a/sound/soc/codecs/bt-sco.c b/sound/soc/codecs/bt-sco.c
index a081d9fcb16..c4cf0699e77 100644
--- a/sound/soc/codecs/bt-sco.c
+++ b/sound/soc/codecs/bt-sco.c
@@ -15,15 +15,27 @@
#include <sound/soc.h>
+static const struct snd_soc_dapm_widget bt_sco_widgets[] = {
+ SND_SOC_DAPM_INPUT("RX"),
+ SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route bt_sco_routes[] = {
+ { "Capture", NULL, "RX" },
+ { "TX", NULL, "Playback" },
+};
+
static struct snd_soc_dai_driver bt_sco_dai = {
.name = "bt-sco-pcm",
.playback = {
+ .stream_name = "Playback",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
},
.capture = {
+ .stream_name = "Capture",
.channels_min = 1,
.channels_max = 1,
.rates = SNDRV_PCM_RATE_8000,
@@ -31,7 +43,12 @@ static struct snd_soc_dai_driver bt_sco_dai = {
},
};
-static struct snd_soc_codec_driver soc_codec_dev_bt_sco;
+static struct snd_soc_codec_driver soc_codec_dev_bt_sco = {
+ .dapm_widgets = bt_sco_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(bt_sco_widgets),
+ .dapm_routes = bt_sco_routes,
+ .num_dapm_routes = ARRAY_SIZE(bt_sco_routes),
+};
static int bt_sco_probe(struct platform_device *pdev)
{
@@ -50,6 +67,9 @@ static struct platform_device_id bt_sco_driver_ids[] = {
{
.name = "dfbmcs320",
},
+ {
+ .name = "bt-sco",
+ },
{},
};
MODULE_DEVICE_TABLE(platform, bt_sco_driver_ids);
diff --git a/sound/soc/codecs/cs4270.c b/sound/soc/codecs/cs4270.c
index 8e4779812b9..83c835d9fd8 100644
--- a/sound/soc/codecs/cs4270.c
+++ b/sound/soc/codecs/cs4270.c
@@ -139,6 +139,22 @@ struct cs4270_private {
struct regulator_bulk_data supplies[ARRAY_SIZE(supply_names)];
};
+static const struct snd_soc_dapm_widget cs4270_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+
+SND_SOC_DAPM_OUTPUT("AOUTL"),
+SND_SOC_DAPM_OUTPUT("AOUTR"),
+};
+
+static const struct snd_soc_dapm_route cs4270_dapm_routes[] = {
+ { "Capture", NULL, "AINA" },
+ { "Capture", NULL, "AINB" },
+
+ { "AOUTA", NULL, "Playback" },
+ { "AOUTB", NULL, "Playback" },
+};
+
/**
* struct cs4270_mode_ratios - clock ratio tables
* @ratio: the ratio of MCLK to the sample rate
@@ -612,6 +628,10 @@ static const struct snd_soc_codec_driver soc_codec_device_cs4270 = {
.controls = cs4270_snd_controls,
.num_controls = ARRAY_SIZE(cs4270_snd_controls),
+ .dapm_widgets = cs4270_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4270_dapm_widgets),
+ .dapm_routes = cs4270_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4270_dapm_routes),
};
/*
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 03036b32673..a20f1bb8f07 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -173,6 +173,26 @@ struct cs4271_private {
bool enable_soft_reset;
};
+static const struct snd_soc_dapm_widget cs4271_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINA"),
+SND_SOC_DAPM_INPUT("AINB"),
+
+SND_SOC_DAPM_OUTPUT("AOUTA+"),
+SND_SOC_DAPM_OUTPUT("AOUTA-"),
+SND_SOC_DAPM_OUTPUT("AOUTB+"),
+SND_SOC_DAPM_OUTPUT("AOUTB-"),
+};
+
+static const struct snd_soc_dapm_route cs4271_dapm_routes[] = {
+ { "Capture", NULL, "AINA" },
+ { "Capture", NULL, "AINB" },
+
+ { "AOUTA+", NULL, "Playback" },
+ { "AOUTA-", NULL, "Playback" },
+ { "AOUTB+", NULL, "Playback" },
+ { "AOUTB-", NULL, "Playback" },
+};
+
/*
* @freq is the desired MCLK rate
* MCLK rate should (c) be the sample rate, multiplied by one of the
@@ -576,8 +596,7 @@ static int cs4271_probe(struct snd_soc_codec *codec)
CS4271_MODE2_MUTECAEQUB,
CS4271_MODE2_MUTECAEQUB);
- return snd_soc_add_codec_controls(codec, cs4271_snd_controls,
- ARRAY_SIZE(cs4271_snd_controls));
+ return 0;
}
static int cs4271_remove(struct snd_soc_codec *codec)
@@ -596,6 +615,13 @@ static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
.remove = cs4271_remove,
.suspend = cs4271_soc_suspend,
.resume = cs4271_soc_resume,
+
+ .controls = cs4271_snd_controls,
+ .num_controls = ARRAY_SIZE(cs4271_snd_controls),
+ .dapm_widgets = cs4271_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(cs4271_dapm_widgets),
+ .dapm_routes = cs4271_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(cs4271_dapm_routes),
};
#if defined(CONFIG_SPI_MASTER)
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c
index 987f728718c..be2ba1b6fe4 100644
--- a/sound/soc/codecs/cs42l52.c
+++ b/sound/soc/codecs/cs42l52.c
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0);
static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0);
+static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0);
+
static const unsigned int limiter_tlv[] = {
TLV_DB_RANGE_HEAD(2),
0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0),
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = {
SOC_ENUM("Beep Pitch", beep_pitch_enum),
SOC_ENUM("Beep on Time", beep_ontime_enum),
SOC_ENUM("Beep off Time", beep_offtime_enum),
- SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv),
+ SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL,
+ 0, 0x07, 0x1f, beep_tlv),
SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1),
SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum),
SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum),
diff --git a/sound/soc/codecs/dmic.c b/sound/soc/codecs/dmic.c
index 66967ba6f75..b2090b2a5e2 100644
--- a/sound/soc/codecs/dmic.c
+++ b/sound/soc/codecs/dmic.c
@@ -50,20 +50,11 @@ static const struct snd_soc_dapm_route intercon[] = {
{"DMIC AIF", NULL, "DMic"},
};
-static int dmic_probe(struct snd_soc_codec *codec)
-{
- struct snd_soc_dapm_context *dapm = &codec->dapm;
-
- snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
- ARRAY_SIZE(dmic_dapm_widgets));
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
- snd_soc_dapm_new_widgets(dapm);
-
- return 0;
-}
-
static struct snd_soc_codec_driver soc_dmic = {
- .probe = dmic_probe,
+ .dapm_widgets = dmic_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dmic_dapm_widgets),
+ .dapm_routes = intercon,
+ .num_dapm_routes = ARRAY_SIZE(intercon),
};
static int dmic_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/hdmi.c b/sound/soc/codecs/hdmi.c
index 2bcae2b40c9..68342b121c9 100644
--- a/sound/soc/codecs/hdmi.c
+++ b/sound/soc/codecs/hdmi.c
@@ -23,11 +23,20 @@
#define DRV_NAME "hdmi-audio-codec"
-static struct snd_soc_codec_driver hdmi_codec;
+static const struct snd_soc_dapm_widget hdmi_widgets[] = {
+ SND_SOC_DAPM_INPUT("RX"),
+ SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route hdmi_routes[] = {
+ { "Capture", NULL, "RX" },
+ { "TX", NULL, "Playback" },
+};
static struct snd_soc_dai_driver hdmi_codec_dai = {
.name = "hdmi-hifi",
.playback = {
+ .stream_name = "Playback",
.channels_min = 2,
.channels_max = 8,
.rates = SNDRV_PCM_RATE_32000 |
@@ -37,6 +46,25 @@ static struct snd_soc_dai_driver hdmi_codec_dai = {
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE,
},
+ .capture = {
+ .stream_name = "Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_32000 |
+ SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 |
+ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE |
+ SNDRV_PCM_FMTBIT_S24_LE,
+ },
+
+};
+
+static struct snd_soc_codec_driver hdmi_codec = {
+ .dapm_widgets = hdmi_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
+ .dapm_routes = hdmi_routes,
+ .num_dapm_routes = ARRAY_SIZE(hdmi_routes),
};
static int hdmi_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/lm4857.c b/sound/soc/codecs/lm4857.c
index 9f9f59573f7..0e5743ea79d 100644
--- a/sound/soc/codecs/lm4857.c
+++ b/sound/soc/codecs/lm4857.c
@@ -16,6 +16,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <linux/slab.h>
#include <sound/core.h>
@@ -23,12 +24,15 @@
#include <sound/tlv.h>
struct lm4857 {
- struct i2c_client *i2c;
+ struct regmap *regmap;
uint8_t mode;
};
-static const uint8_t lm4857_default_regs[] = {
- 0x00, 0x00, 0x00, 0x00,
+static const struct reg_default lm4857_default_regs[] = {
+ { 0x0, 0x00 },
+ { 0x1, 0x00 },
+ { 0x2, 0x00 },
+ { 0x3, 0x00 },
};
/* The register offsets in the cache array */
@@ -42,39 +46,6 @@ static const uint8_t lm4857_default_regs[] = {
#define LM4857_WAKEUP 5
#define LM4857_EPGAIN 4
-static int lm4857_write(struct snd_soc_codec *codec, unsigned int reg,
- unsigned int value)
-{
- uint8_t data;
- int ret;
-
- ret = snd_soc_cache_write(codec, reg, value);
- if (ret < 0)
- return ret;
-
- data = (reg << 6) | value;
- ret = i2c_master_send(codec->control_data, &data, 1);
- if (ret != 1) {
- dev_err(codec->dev, "Failed to write register: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static unsigned int lm4857_read(struct snd_soc_codec *codec,
- unsigned int reg)
-{
- unsigned int val;
- int ret;
-
- ret = snd_soc_cache_read(codec, reg, &val);
- if (ret)
- return -1;
-
- return val;
-}
-
static int lm4857_get_mode(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -96,7 +67,7 @@ static int lm4857_set_mode(struct snd_kcontrol *kcontrol,
lm4857->mode = value;
if (codec->dapm.bias_level == SND_SOC_BIAS_ON)
- snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, value + 6);
+ regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F, value + 6);
return 1;
}
@@ -108,10 +79,11 @@ static int lm4857_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, lm4857->mode + 6);
+ regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F,
+ lm4857->mode + 6);
break;
case SND_SOC_BIAS_STANDBY:
- snd_soc_update_bits(codec, LM4857_CTRL, 0x0F, 0);
+ regmap_update_bits(lm4857->regmap, LM4857_CTRL, 0x0F, 0);
break;
default:
break;
@@ -171,49 +143,32 @@ static const struct snd_soc_dapm_route lm4857_routes[] = {
{"EP", NULL, "IN"},
};
-static int lm4857_probe(struct snd_soc_codec *codec)
-{
- struct lm4857 *lm4857 = snd_soc_codec_get_drvdata(codec);
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int ret;
-
- codec->control_data = lm4857->i2c;
-
- ret = snd_soc_add_codec_controls(codec, lm4857_controls,
- ARRAY_SIZE(lm4857_controls));
- if (ret)
- return ret;
-
- ret = snd_soc_dapm_new_controls(dapm, lm4857_dapm_widgets,
- ARRAY_SIZE(lm4857_dapm_widgets));
- if (ret)
- return ret;
+static struct snd_soc_codec_driver soc_codec_dev_lm4857 = {
+ .set_bias_level = lm4857_set_bias_level,
- ret = snd_soc_dapm_add_routes(dapm, lm4857_routes,
- ARRAY_SIZE(lm4857_routes));
- if (ret)
- return ret;
+ .controls = lm4857_controls,
+ .num_controls = ARRAY_SIZE(lm4857_controls),
+ .dapm_widgets = lm4857_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(lm4857_dapm_widgets),
+ .dapm_routes = lm4857_routes,
+ .num_dapm_routes = ARRAY_SIZE(lm4857_routes),
+};
- snd_soc_dapm_new_widgets(dapm);
+static const struct regmap_config lm4857_regmap_config = {
+ .val_bits = 6,
+ .reg_bits = 2,
- return 0;
-}
+ .max_register = LM4857_CTRL,
-static struct snd_soc_codec_driver soc_codec_dev_lm4857 = {
- .write = lm4857_write,
- .read = lm4857_read,
- .probe = lm4857_probe,
- .reg_cache_size = ARRAY_SIZE(lm4857_default_regs),
- .reg_word_size = sizeof(uint8_t),
- .reg_cache_default = lm4857_default_regs,
- .set_bias_level = lm4857_set_bias_level,
+ .cache_type = REGCACHE_FLAT,
+ .reg_defaults = lm4857_default_regs,
+ .num_reg_defaults = ARRAY_SIZE(lm4857_default_regs),
};
static int lm4857_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct lm4857 *lm4857;
- int ret;
lm4857 = devm_kzalloc(&i2c->dev, sizeof(*lm4857), GFP_KERNEL);
if (!lm4857)
@@ -221,11 +176,11 @@ static int lm4857_i2c_probe(struct i2c_client *i2c,
i2c_set_clientdata(i2c, lm4857);
- lm4857->i2c = i2c;
-
- ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_lm4857, NULL, 0);
+ lm4857->regmap = devm_regmap_init_i2c(i2c, &lm4857_regmap_config);
+ if (IS_ERR(lm4857->regmap))
+ return PTR_ERR(lm4857->regmap);
- return ret;
+ return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_lm4857, NULL, 0);
}
static int lm4857_i2c_remove(struct i2c_client *i2c)
diff --git a/sound/soc/codecs/max9768.c b/sound/soc/codecs/max9768.c
index a6ac2313047..31f91560e9f 100644
--- a/sound/soc/codecs/max9768.c
+++ b/sound/soc/codecs/max9768.c
@@ -118,6 +118,18 @@ static const struct snd_kcontrol_new max9768_mute[] = {
SOC_SINGLE_BOOL_EXT("Playback Switch", 0, max9768_get_gpio, max9768_set_gpio),
};
+static const struct snd_soc_dapm_widget max9768_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("IN"),
+
+SND_SOC_DAPM_OUTPUT("OUT+"),
+SND_SOC_DAPM_OUTPUT("OUT-"),
+};
+
+static const struct snd_soc_dapm_route max9768_dapm_routes[] = {
+ { "OUT+", NULL, "IN" },
+ { "OUT-", NULL, "IN" },
+};
+
static int max9768_probe(struct snd_soc_codec *codec)
{
struct max9768 *max9768 = snd_soc_codec_get_drvdata(codec);
@@ -148,6 +160,10 @@ static struct snd_soc_codec_driver max9768_codec_driver = {
.probe = max9768_probe,
.controls = max9768_volume,
.num_controls = ARRAY_SIZE(max9768_volume),
+ .dapm_widgets = max9768_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9768_dapm_widgets),
+ .dapm_routes = max9768_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9768_dapm_routes),
};
static const struct regmap_config max9768_i2c_regmap_config = {
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index 3eeada57e87..566a367c94f 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -1612,7 +1612,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute)
static void max98088_sync_cache(struct snd_soc_codec *codec)
{
- u16 *reg_cache = codec->reg_cache;
+ u8 *reg_cache = codec->reg_cache;
int i;
if (!codec->cache_sync)
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index ad5313f98f2..0569a4c3ae0 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -2084,8 +2084,9 @@ static irqreturn_t max98090_interrupt(int irq, void *data)
pm_wakeup_event(codec->dev, 100);
- schedule_delayed_work(&max98090->jack_work,
- msecs_to_jiffies(100));
+ queue_delayed_work(system_power_efficient_wq,
+ &max98090->jack_work,
+ msecs_to_jiffies(100));
}
if (active & M98090_DRCACT_MASK)
@@ -2132,8 +2133,9 @@ int max98090_mic_detect(struct snd_soc_codec *codec,
snd_soc_jack_report(max98090->jack, 0,
SND_JACK_HEADSET | SND_JACK_BTN_0);
- schedule_delayed_work(&max98090->jack_work,
- msecs_to_jiffies(100));
+ queue_delayed_work(system_power_efficient_wq,
+ &max98090->jack_work,
+ msecs_to_jiffies(100));
return 0;
}
diff --git a/sound/soc/codecs/max9877.c b/sound/soc/codecs/max9877.c
index 6b6c74cd83e..29549cdbf4c 100644
--- a/sound/soc/codecs/max9877.c
+++ b/sound/soc/codecs/max9877.c
@@ -14,170 +14,21 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/i2c.h>
+#include <linux/regmap.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include "max9877.h"
-static struct i2c_client *i2c;
+static struct regmap *regmap;
-static u8 max9877_regs[5] = { 0x40, 0x00, 0x00, 0x00, 0x49 };
-
-static void max9877_write_regs(void)
-{
- unsigned int i;
- u8 data[6];
-
- data[0] = MAX9877_INPUT_MODE;
- for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
- data[i + 1] = max9877_regs[i];
-
- if (i2c_master_send(i2c, data, 6) != 6)
- dev_err(&i2c->dev, "i2c write failed\n");
-}
-
-static int max9877_get_reg(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
- unsigned int invert = mc->invert;
-
- ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
-
- if (invert)
- ucontrol->value.integer.value[0] =
- mask - ucontrol->value.integer.value[0];
-
- return 0;
-}
-
-static int max9877_set_reg(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
- unsigned int invert = mc->invert;
- unsigned int val = (ucontrol->value.integer.value[0] & mask);
-
- if (invert)
- val = mask - val;
-
- if (((max9877_regs[reg] >> shift) & mask) == val)
- return 0;
-
- max9877_regs[reg] &= ~(mask << shift);
- max9877_regs[reg] |= val << shift;
- max9877_write_regs();
-
- return 1;
-}
-
-static int max9877_get_2reg(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
-
- ucontrol->value.integer.value[0] = (max9877_regs[reg] >> shift) & mask;
- ucontrol->value.integer.value[1] = (max9877_regs[reg2] >> shift) & mask;
-
- return 0;
-}
-
-static int max9877_set_2reg(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct soc_mixer_control *mc =
- (struct soc_mixer_control *)kcontrol->private_value;
- unsigned int reg = mc->reg;
- unsigned int reg2 = mc->rreg;
- unsigned int shift = mc->shift;
- unsigned int mask = mc->max;
- unsigned int val = (ucontrol->value.integer.value[0] & mask);
- unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
- unsigned int change = 0;
-
- if (((max9877_regs[reg] >> shift) & mask) != val)
- change = 1;
-
- if (((max9877_regs[reg2] >> shift) & mask) != val2)
- change = 1;
-
- if (change) {
- max9877_regs[reg] &= ~(mask << shift);
- max9877_regs[reg] |= val << shift;
- max9877_regs[reg2] &= ~(mask << shift);
- max9877_regs[reg2] |= val2 << shift;
- max9877_write_regs();
- }
-
- return change;
-}
-
-static int max9877_get_out_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 value = max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK;
-
- if (value)
- value -= 1;
-
- ucontrol->value.integer.value[0] = value;
- return 0;
-}
-
-static int max9877_set_out_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 value = ucontrol->value.integer.value[0];
-
- value += 1;
-
- if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OUTMODE_MASK) == value)
- return 0;
-
- max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OUTMODE_MASK;
- max9877_regs[MAX9877_OUTPUT_MODE] |= value;
- max9877_write_regs();
- return 1;
-}
-
-static int max9877_get_osc_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 value = (max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK);
-
- value = value >> MAX9877_OSC_OFFSET;
-
- ucontrol->value.integer.value[0] = value;
- return 0;
-}
-
-static int max9877_set_osc_mode(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- u8 value = ucontrol->value.integer.value[0];
-
- value = value << MAX9877_OSC_OFFSET;
- if ((max9877_regs[MAX9877_OUTPUT_MODE] & MAX9877_OSC_MASK) == value)
- return 0;
-
- max9877_regs[MAX9877_OUTPUT_MODE] &= ~MAX9877_OSC_MASK;
- max9877_regs[MAX9877_OUTPUT_MODE] |= value;
- max9877_write_regs();
- return 1;
-}
+static struct reg_default max9877_regs[] = {
+ { 0, 0x40 },
+ { 1, 0x00 },
+ { 2, 0x00 },
+ { 3, 0x00 },
+ { 4, 0x49 },
+};
static const unsigned int max9877_pgain_tlv[] = {
TLV_DB_RANGE_HEAD(2),
@@ -212,65 +63,104 @@ static const char *max9877_osc_mode[] = {
};
static const struct soc_enum max9877_enum[] = {
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_out_mode), max9877_out_mode),
- SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
+ SOC_ENUM_SINGLE(MAX9877_OUTPUT_MODE, 0, ARRAY_SIZE(max9877_out_mode),
+ max9877_out_mode),
+ SOC_ENUM_SINGLE(MAX9877_OUTPUT_MODE, MAX9877_OSC_OFFSET,
+ ARRAY_SIZE(max9877_osc_mode), max9877_osc_mode),
};
static const struct snd_kcontrol_new max9877_controls[] = {
- SOC_SINGLE_EXT_TLV("MAX9877 PGAINA Playback Volume",
- MAX9877_INPUT_MODE, 0, 2, 0,
- max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
- SOC_SINGLE_EXT_TLV("MAX9877 PGAINB Playback Volume",
- MAX9877_INPUT_MODE, 2, 2, 0,
- max9877_get_reg, max9877_set_reg, max9877_pgain_tlv),
- SOC_SINGLE_EXT_TLV("MAX9877 Amp Speaker Playback Volume",
- MAX9877_SPK_VOLUME, 0, 31, 0,
- max9877_get_reg, max9877_set_reg, max9877_output_tlv),
- SOC_DOUBLE_R_EXT_TLV("MAX9877 Amp HP Playback Volume",
- MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
- max9877_get_2reg, max9877_set_2reg, max9877_output_tlv),
- SOC_SINGLE_EXT("MAX9877 INB Stereo Switch",
- MAX9877_INPUT_MODE, 4, 1, 1,
- max9877_get_reg, max9877_set_reg),
- SOC_SINGLE_EXT("MAX9877 INA Stereo Switch",
- MAX9877_INPUT_MODE, 5, 1, 1,
- max9877_get_reg, max9877_set_reg),
- SOC_SINGLE_EXT("MAX9877 Zero-crossing detection Switch",
- MAX9877_INPUT_MODE, 6, 1, 0,
- max9877_get_reg, max9877_set_reg),
- SOC_SINGLE_EXT("MAX9877 Bypass Mode Switch",
- MAX9877_OUTPUT_MODE, 6, 1, 0,
- max9877_get_reg, max9877_set_reg),
- SOC_SINGLE_EXT("MAX9877 Shutdown Mode Switch",
- MAX9877_OUTPUT_MODE, 7, 1, 1,
- max9877_get_reg, max9877_set_reg),
- SOC_ENUM_EXT("MAX9877 Output Mode", max9877_enum[0],
- max9877_get_out_mode, max9877_set_out_mode),
- SOC_ENUM_EXT("MAX9877 Oscillator Mode", max9877_enum[1],
- max9877_get_osc_mode, max9877_set_osc_mode),
+ SOC_SINGLE_TLV("MAX9877 PGAINA Playback Volume",
+ MAX9877_INPUT_MODE, 0, 2, 0, max9877_pgain_tlv),
+ SOC_SINGLE_TLV("MAX9877 PGAINB Playback Volume",
+ MAX9877_INPUT_MODE, 2, 2, 0, max9877_pgain_tlv),
+ SOC_SINGLE_TLV("MAX9877 Amp Speaker Playback Volume",
+ MAX9877_SPK_VOLUME, 0, 31, 0, max9877_output_tlv),
+ SOC_DOUBLE_R_TLV("MAX9877 Amp HP Playback Volume",
+ MAX9877_HPL_VOLUME, MAX9877_HPR_VOLUME, 0, 31, 0,
+ max9877_output_tlv),
+ SOC_SINGLE("MAX9877 INB Stereo Switch",
+ MAX9877_INPUT_MODE, 4, 1, 1),
+ SOC_SINGLE("MAX9877 INA Stereo Switch",
+ MAX9877_INPUT_MODE, 5, 1, 1),
+ SOC_SINGLE("MAX9877 Zero-crossing detection Switch",
+ MAX9877_INPUT_MODE, 6, 1, 0),
+ SOC_SINGLE("MAX9877 Bypass Mode Switch",
+ MAX9877_OUTPUT_MODE, 6, 1, 0),
+ SOC_ENUM("MAX9877 Output Mode", max9877_enum[0]),
+ SOC_ENUM("MAX9877 Oscillator Mode", max9877_enum[1]),
};
-/* This function is called from ASoC machine driver */
-int max9877_add_controls(struct snd_soc_codec *codec)
-{
- return snd_soc_add_codec_controls(codec, max9877_controls,
- ARRAY_SIZE(max9877_controls));
-}
-EXPORT_SYMBOL_GPL(max9877_add_controls);
+static const struct snd_soc_dapm_widget max9877_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("INA1"),
+SND_SOC_DAPM_INPUT("INA2"),
+SND_SOC_DAPM_INPUT("INB1"),
+SND_SOC_DAPM_INPUT("INB2"),
+SND_SOC_DAPM_INPUT("RXIN+"),
+SND_SOC_DAPM_INPUT("RXIN-"),
+
+SND_SOC_DAPM_PGA("SHDN", MAX9877_OUTPUT_MODE, 7, 1, NULL, 0),
+
+SND_SOC_DAPM_OUTPUT("OUT+"),
+SND_SOC_DAPM_OUTPUT("OUT-"),
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+};
+
+static const struct snd_soc_dapm_route max9877_dapm_routes[] = {
+ { "SHDN", NULL, "INA1" },
+ { "SHDN", NULL, "INA2" },
+ { "SHDN", NULL, "INB1" },
+ { "SHDN", NULL, "INB2" },
+
+ { "OUT+", NULL, "RXIN+" },
+ { "OUT+", NULL, "SHDN" },
+
+ { "OUT-", NULL, "SHDN" },
+ { "OUT-", NULL, "RXIN-" },
+
+ { "HPL", NULL, "SHDN" },
+ { "HPR", NULL, "SHDN" },
+};
+
+static const struct snd_soc_codec_driver max9877_codec = {
+ .controls = max9877_controls,
+ .num_controls = ARRAY_SIZE(max9877_controls),
+
+ .dapm_widgets = max9877_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(max9877_dapm_widgets),
+ .dapm_routes = max9877_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(max9877_dapm_routes),
+};
+
+static const struct regmap_config max9877_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .reg_defaults = max9877_regs,
+ .num_reg_defaults = ARRAY_SIZE(max9877_regs),
+ .cache_type = REGCACHE_RBTREE,
+};
static int max9877_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- i2c = client;
+ int i;
- max9877_write_regs();
+ regmap = devm_regmap_init_i2c(client, &max9877_regmap);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
- return 0;
+ /* Ensure the device is in reset state */
+ for (i = 0; i < ARRAY_SIZE(max9877_regs); i++)
+ regmap_write(regmap, max9877_regs[i].reg, max9877_regs[i].def);
+
+ return snd_soc_register_codec(&client->dev, &max9877_codec, NULL, 0);
}
static int max9877_i2c_remove(struct i2c_client *client)
{
- i2c = NULL;
+ snd_soc_unregister_codec(&client->dev);
return 0;
}
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 5402dfbbb71..4d3c8fd8c5d 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -94,7 +94,6 @@
#define AUDIO_DAC_CFS_DLY_B (1 << 10)
struct mc13783_priv {
- struct snd_soc_codec codec;
struct mc13xxx *mc13xxx;
enum mc13783_ssi_port adc_ssi_port;
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
new file mode 100644
index 00000000000..651ce092367
--- /dev/null
+++ b/sound/soc/codecs/pcm1681.c
@@ -0,0 +1,339 @@
+/*
+ * PCM1681 ASoC codec driver
+ *
+ * Copyright (c) StreamUnlimited GmbH 2013
+ * Marek Belisko <marek.belisko@streamunlimited.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+
+#define PCM1681_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+#define PCM1681_PCM_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+
+#define PCM1681_SOFT_MUTE_ALL 0xff
+#define PCM1681_DEEMPH_RATE_MASK 0x18
+#define PCM1681_DEEMPH_MASK 0x01
+
+#define PCM1681_ATT_CONTROL(X) (X <= 6 ? X : X + 9) /* Attenuation level */
+#define PCM1681_SOFT_MUTE 0x07 /* Soft mute control register */
+#define PCM1681_DAC_CONTROL 0x08 /* DAC operation control */
+#define PCM1681_FMT_CONTROL 0x09 /* Audio interface data format */
+#define PCM1681_DEEMPH_CONTROL 0x0a /* De-emphasis control */
+#define PCM1681_ZERO_DETECT_STATUS 0x0e /* Zero detect status reg */
+
+static const struct reg_default pcm1681_reg_defaults[] = {
+ { 0x01, 0xff },
+ { 0x02, 0xff },
+ { 0x03, 0xff },
+ { 0x04, 0xff },
+ { 0x05, 0xff },
+ { 0x06, 0xff },
+ { 0x07, 0x00 },
+ { 0x08, 0x00 },
+ { 0x09, 0x06 },
+ { 0x0A, 0x00 },
+ { 0x0B, 0xff },
+ { 0x0C, 0x0f },
+ { 0x0D, 0x00 },
+ { 0x10, 0xff },
+ { 0x11, 0xff },
+ { 0x12, 0x00 },
+ { 0x13, 0x00 },
+};
+
+static bool pcm1681_accessible_reg(struct device *dev, unsigned int reg)
+{
+ return !((reg == 0x00) || (reg == 0x0f));
+}
+
+static bool pcm1681_writeable_reg(struct device *dev, unsigned register reg)
+{
+ return pcm1681_accessible_reg(dev, reg) &&
+ (reg != PCM1681_ZERO_DETECT_STATUS);
+}
+
+struct pcm1681_private {
+ struct regmap *regmap;
+ unsigned int format;
+ /* Current deemphasis status */
+ unsigned int deemph;
+ /* Current rate for deemphasis control */
+ unsigned int rate;
+};
+
+static const int pcm1681_deemph[] = { 44100, 48000, 32000 };
+
+static int pcm1681_set_deemph(struct snd_soc_codec *codec)
+{
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ int i = 0, val = -1, enable = 0;
+
+ if (priv->deemph)
+ for (i = 0; i < ARRAY_SIZE(pcm1681_deemph); i++)
+ if (pcm1681_deemph[i] == priv->rate)
+ val = i;
+
+ if (val != -1) {
+ regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+ PCM1681_DEEMPH_RATE_MASK, val);
+ enable = 1;
+ } else
+ enable = 0;
+
+ /* enable/disable deemphasis functionality */
+ return regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
+ PCM1681_DEEMPH_MASK, enable);
+}
+
+static int pcm1681_get_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ ucontrol->value.enumerated.item[0] = priv->deemph;
+
+ return 0;
+}
+
+static int pcm1681_put_deemph(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->deemph = ucontrol->value.enumerated.item[0];
+
+ return pcm1681_set_deemph(codec);
+}
+
+static int pcm1681_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int format)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ /* The PCM1681 can only be slave to all clocks */
+ if ((format & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS) {
+ dev_err(codec->dev, "Invalid clocking mode\n");
+ return -EINVAL;
+ }
+
+ priv->format = format;
+
+ return 0;
+}
+
+static int pcm1681_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ int val;
+
+ if (mute)
+ val = PCM1681_SOFT_MUTE_ALL;
+ else
+ val = 0;
+
+ return regmap_write(priv->regmap, PCM1681_SOFT_MUTE, val);
+}
+
+static int pcm1681_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm1681_private *priv = snd_soc_codec_get_drvdata(codec);
+ int val = 0, ret;
+ int pcm_format = params_format(params);
+
+ priv->rate = params_rate(params);
+
+ switch (priv->format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_RIGHT_J:
+ if (pcm_format == SNDRV_PCM_FORMAT_S24_LE)
+ val = 0x00;
+ else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+ val = 0x03;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ val = 0x04;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ val = 0x05;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->regmap, PCM1681_FMT_CONTROL, 0x0f, val);
+ if (ret < 0)
+ return ret;
+
+ return pcm1681_set_deemph(codec);
+}
+
+static const struct snd_soc_dai_ops pcm1681_dai_ops = {
+ .set_fmt = pcm1681_set_dai_fmt,
+ .hw_params = pcm1681_hw_params,
+ .digital_mute = pcm1681_digital_mute,
+};
+
+static const struct snd_soc_dapm_widget pcm1681_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("VOUT1"),
+SND_SOC_DAPM_OUTPUT("VOUT2"),
+SND_SOC_DAPM_OUTPUT("VOUT3"),
+SND_SOC_DAPM_OUTPUT("VOUT4"),
+SND_SOC_DAPM_OUTPUT("VOUT5"),
+SND_SOC_DAPM_OUTPUT("VOUT6"),
+SND_SOC_DAPM_OUTPUT("VOUT7"),
+SND_SOC_DAPM_OUTPUT("VOUT8"),
+};
+
+static const struct snd_soc_dapm_route pcm1681_dapm_routes[] = {
+ { "VOUT1", NULL, "Playback" },
+ { "VOUT2", NULL, "Playback" },
+ { "VOUT3", NULL, "Playback" },
+ { "VOUT4", NULL, "Playback" },
+ { "VOUT5", NULL, "Playback" },
+ { "VOUT6", NULL, "Playback" },
+ { "VOUT7", NULL, "Playback" },
+ { "VOUT8", NULL, "Playback" },
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm1681_dac_tlv, -6350, 50, 1);
+
+static const struct snd_kcontrol_new pcm1681_controls[] = {
+ SOC_DOUBLE_R_TLV("Channel 1/2 Playback Volume",
+ PCM1681_ATT_CONTROL(1), PCM1681_ATT_CONTROL(2), 0,
+ 0x7f, 0, pcm1681_dac_tlv),
+ SOC_DOUBLE_R_TLV("Channel 3/4 Playback Volume",
+ PCM1681_ATT_CONTROL(3), PCM1681_ATT_CONTROL(4), 0,
+ 0x7f, 0, pcm1681_dac_tlv),
+ SOC_DOUBLE_R_TLV("Channel 5/6 Playback Volume",
+ PCM1681_ATT_CONTROL(5), PCM1681_ATT_CONTROL(6), 0,
+ 0x7f, 0, pcm1681_dac_tlv),
+ SOC_DOUBLE_R_TLV("Channel 7/8 Playback Volume",
+ PCM1681_ATT_CONTROL(7), PCM1681_ATT_CONTROL(8), 0,
+ 0x7f, 0, pcm1681_dac_tlv),
+ SOC_SINGLE_BOOL_EXT("De-emphasis Switch", 0,
+ pcm1681_get_deemph, pcm1681_put_deemph),
+};
+
+static struct snd_soc_dai_driver pcm1681_dai = {
+ .name = "pcm1681-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 8,
+ .rates = PCM1681_PCM_RATES,
+ .formats = PCM1681_PCM_FORMATS,
+ },
+ .ops = &pcm1681_dai_ops,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id pcm1681_dt_ids[] = {
+ { .compatible = "ti,pcm1681", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm1681_dt_ids);
+#endif
+
+static const struct regmap_config pcm1681_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = ARRAY_SIZE(pcm1681_reg_defaults) + 1,
+ .reg_defaults = pcm1681_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(pcm1681_reg_defaults),
+ .writeable_reg = pcm1681_writeable_reg,
+ .readable_reg = pcm1681_accessible_reg,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1681 = {
+ .controls = pcm1681_controls,
+ .num_controls = ARRAY_SIZE(pcm1681_controls),
+ .dapm_widgets = pcm1681_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1681_dapm_widgets),
+ .dapm_routes = pcm1681_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1681_dapm_routes),
+};
+
+static const struct i2c_device_id pcm1681_i2c_id[] = {
+ {"pcm1681", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, pcm1681_i2c_id);
+
+static int pcm1681_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct pcm1681_private *priv;
+
+ priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = devm_regmap_init_i2c(client, &pcm1681_regmap);
+ if (IS_ERR(priv->regmap)) {
+ ret = PTR_ERR(priv->regmap);
+ dev_err(&client->dev, "Failed to create regmap: %d\n", ret);
+ return ret;
+ }
+
+ i2c_set_clientdata(client, priv);
+
+ return snd_soc_register_codec(&client->dev, &soc_codec_dev_pcm1681,
+ &pcm1681_dai, 1);
+}
+
+static int pcm1681_i2c_remove(struct i2c_client *client)
+{
+ snd_soc_unregister_codec(&client->dev);
+ return 0;
+}
+
+static struct i2c_driver pcm1681_i2c_driver = {
+ .driver = {
+ .name = "pcm1681",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcm1681_dt_ids),
+ },
+ .id_table = pcm1681_i2c_id,
+ .probe = pcm1681_i2c_probe,
+ .remove = pcm1681_i2c_remove,
+};
+
+module_i2c_driver(pcm1681_i2c_driver);
+
+MODULE_DESCRIPTION("Texas Instruments PCM1681 ALSA SoC Codec Driver");
+MODULE_AUTHOR("Marek Belisko <marek.belisko@streamunlimited.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/pcm1792a.c b/sound/soc/codecs/pcm1792a.c
new file mode 100644
index 00000000000..2a8eccf64c7
--- /dev/null
+++ b/sound/soc/codecs/pcm1792a.c
@@ -0,0 +1,257 @@
+/*
+ * PCM1792A ASoC codec driver
+ *
+ * Copyright (c) Amarula Solutions B.V. 2013
+ *
+ * Michael Trimarchi <michael@amarulasolutions.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+#include <sound/tlv.h>
+#include <linux/of_device.h>
+
+#include "pcm1792a.h"
+
+#define PCM1792A_DAC_VOL_LEFT 0x10
+#define PCM1792A_DAC_VOL_RIGHT 0x11
+#define PCM1792A_FMT_CONTROL 0x12
+#define PCM1792A_SOFT_MUTE PCM1792A_FMT_CONTROL
+
+#define PCM1792A_FMT_MASK 0x70
+#define PCM1792A_FMT_SHIFT 4
+#define PCM1792A_MUTE_MASK 0x01
+#define PCM1792A_MUTE_SHIFT 0
+#define PCM1792A_ATLD_ENABLE (1 << 7)
+
+static const struct reg_default pcm1792a_reg_defaults[] = {
+ { 0x10, 0xff },
+ { 0x11, 0xff },
+ { 0x12, 0x50 },
+ { 0x13, 0x00 },
+ { 0x14, 0x00 },
+ { 0x15, 0x01 },
+ { 0x16, 0x00 },
+ { 0x17, 0x00 },
+};
+
+static bool pcm1792a_accessible_reg(struct device *dev, unsigned int reg)
+{
+ return reg >= 0x10 && reg <= 0x17;
+}
+
+static bool pcm1792a_writeable_reg(struct device *dev, unsigned register reg)
+{
+ bool accessible;
+
+ accessible = pcm1792a_accessible_reg(dev, reg);
+
+ return accessible && reg != 0x16 && reg != 0x17;
+}
+
+struct pcm1792a_private {
+ struct regmap *regmap;
+ unsigned int format;
+ unsigned int rate;
+};
+
+static int pcm1792a_set_dai_fmt(struct snd_soc_dai *codec_dai,
+ unsigned int format)
+{
+ struct snd_soc_codec *codec = codec_dai->codec;
+ struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->format = format;
+
+ return 0;
+}
+
+static int pcm1792a_digital_mute(struct snd_soc_dai *dai, int mute)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ ret = regmap_update_bits(priv->regmap, PCM1792A_SOFT_MUTE,
+ PCM1792A_MUTE_MASK, !!mute);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int pcm1792a_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_codec *codec = dai->codec;
+ struct pcm1792a_private *priv = snd_soc_codec_get_drvdata(codec);
+ int val = 0, ret;
+ int pcm_format = params_format(params);
+
+ priv->rate = params_rate(params);
+
+ switch (priv->format & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_RIGHT_J:
+ if (pcm_format == SNDRV_PCM_FORMAT_S24_LE ||
+ pcm_format == SNDRV_PCM_FORMAT_S32_LE)
+ val = 0x02;
+ else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+ val = 0x00;
+ break;
+ case SND_SOC_DAIFMT_I2S:
+ if (pcm_format == SNDRV_PCM_FORMAT_S24_LE ||
+ pcm_format == SNDRV_PCM_FORMAT_S32_LE)
+ val = 0x05;
+ else if (pcm_format == SNDRV_PCM_FORMAT_S16_LE)
+ val = 0x04;
+ break;
+ default:
+ dev_err(codec->dev, "Invalid DAI format\n");
+ return -EINVAL;
+ }
+
+ val = val << PCM1792A_FMT_SHIFT | PCM1792A_ATLD_ENABLE;
+
+ ret = regmap_update_bits(priv->regmap, PCM1792A_FMT_CONTROL,
+ PCM1792A_FMT_MASK | PCM1792A_ATLD_ENABLE, val);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops pcm1792a_dai_ops = {
+ .set_fmt = pcm1792a_set_dai_fmt,
+ .hw_params = pcm1792a_hw_params,
+ .digital_mute = pcm1792a_digital_mute,
+};
+
+static const DECLARE_TLV_DB_SCALE(pcm1792a_dac_tlv, -12000, 50, 1);
+
+static const struct snd_kcontrol_new pcm1792a_controls[] = {
+ SOC_DOUBLE_R_RANGE_TLV("DAC Playback Volume", PCM1792A_DAC_VOL_LEFT,
+ PCM1792A_DAC_VOL_RIGHT, 0, 0xf, 0xff, 0,
+ pcm1792a_dac_tlv),
+};
+
+static const struct snd_soc_dapm_widget pcm1792a_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("IOUTL+"),
+SND_SOC_DAPM_OUTPUT("IOUTL-"),
+SND_SOC_DAPM_OUTPUT("IOUTR+"),
+SND_SOC_DAPM_OUTPUT("IOUTR-"),
+};
+
+static const struct snd_soc_dapm_route pcm1792a_dapm_routes[] = {
+ { "IOUTL+", NULL, "Playback" },
+ { "IOUTL-", NULL, "Playback" },
+ { "IOUTR+", NULL, "Playback" },
+ { "IOUTR-", NULL, "Playback" },
+};
+
+static struct snd_soc_dai_driver pcm1792a_dai = {
+ .name = "pcm1792a-hifi",
+ .playback = {
+ .stream_name = "Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = PCM1792A_RATES,
+ .formats = PCM1792A_FORMATS, },
+ .ops = &pcm1792a_dai_ops,
+};
+
+static const struct of_device_id pcm1792a_of_match[] = {
+ { .compatible = "ti,pcm1792a", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, pcm1792a_of_match);
+
+static const struct regmap_config pcm1792a_regmap = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 24,
+ .reg_defaults = pcm1792a_reg_defaults,
+ .num_reg_defaults = ARRAY_SIZE(pcm1792a_reg_defaults),
+ .writeable_reg = pcm1792a_writeable_reg,
+ .readable_reg = pcm1792a_accessible_reg,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_pcm1792a = {
+ .controls = pcm1792a_controls,
+ .num_controls = ARRAY_SIZE(pcm1792a_controls),
+ .dapm_widgets = pcm1792a_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm1792a_dapm_widgets),
+ .dapm_routes = pcm1792a_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm1792a_dapm_routes),
+};
+
+static int pcm1792a_spi_probe(struct spi_device *spi)
+{
+ struct pcm1792a_private *pcm1792a;
+ int ret;
+
+ pcm1792a = devm_kzalloc(&spi->dev, sizeof(struct pcm1792a_private),
+ GFP_KERNEL);
+ if (!pcm1792a)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, pcm1792a);
+
+ pcm1792a->regmap = devm_regmap_init_spi(spi, &pcm1792a_regmap);
+ if (IS_ERR(pcm1792a->regmap)) {
+ ret = PTR_ERR(pcm1792a->regmap);
+ dev_err(&spi->dev, "Failed to register regmap: %d\n", ret);
+ return ret;
+ }
+
+ return snd_soc_register_codec(&spi->dev,
+ &soc_codec_dev_pcm1792a, &pcm1792a_dai, 1);
+}
+
+static int pcm1792a_spi_remove(struct spi_device *spi)
+{
+ snd_soc_unregister_codec(&spi->dev);
+ return 0;
+}
+
+static const struct spi_device_id pcm1792a_spi_ids[] = {
+ { "pcm1792a", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, pcm1792a_spi_ids);
+
+static struct spi_driver pcm1792a_codec_driver = {
+ .driver = {
+ .name = "pcm1792a",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pcm1792a_of_match),
+ },
+ .id_table = pcm1792a_spi_ids,
+ .probe = pcm1792a_spi_probe,
+ .remove = pcm1792a_spi_remove,
+};
+
+module_spi_driver(pcm1792a_codec_driver);
+
+MODULE_DESCRIPTION("ASoC PCM1792A driver");
+MODULE_AUTHOR("Michael Trimarchi <michael@amarulasolutions.com>");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/pcm1792a.h b/sound/soc/codecs/pcm1792a.h
new file mode 100644
index 00000000000..7a83d1fc102
--- /dev/null
+++ b/sound/soc/codecs/pcm1792a.h
@@ -0,0 +1,26 @@
+/*
+ * definitions for PCM1792A
+ *
+ * Copyright 2013 Amarula Solutions
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PCM1792A_H__
+#define __PCM1792A_H__
+
+#define PCM1792A_RATES (SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_8000_48000 | \
+ SNDRV_PCM_RATE_96000 | SNDRV_PCM_RATE_192000)
+
+#define PCM1792A_FORMATS (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \
+ SNDRV_PCM_FMTBIT_S16_LE)
+
+#endif
diff --git a/sound/soc/codecs/pcm3008.c b/sound/soc/codecs/pcm3008.c
index f2a6282b41f..b6618c4a759 100644
--- a/sound/soc/codecs/pcm3008.c
+++ b/sound/soc/codecs/pcm3008.c
@@ -28,7 +28,54 @@
#include "pcm3008.h"
-#define PCM3008_VERSION "0.2"
+static int pcm3008_dac_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct pcm3008_setup_data *setup = codec->dev->platform_data;
+
+ gpio_set_value_cansleep(setup->pdda_pin,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static int pcm3008_adc_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol,
+ int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct pcm3008_setup_data *setup = codec->dev->platform_data;
+
+ gpio_set_value_cansleep(setup->pdad_pin,
+ SND_SOC_DAPM_EVENT_ON(event));
+
+ return 0;
+}
+
+static const struct snd_soc_dapm_widget pcm3008_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("VINL"),
+SND_SOC_DAPM_INPUT("VINR"),
+
+SND_SOC_DAPM_DAC_E("DAC", NULL, SND_SOC_NOPM, 0, 0, pcm3008_dac_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+SND_SOC_DAPM_ADC_E("ADC", NULL, SND_SOC_NOPM, 0, 0, pcm3008_adc_ev,
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+
+SND_SOC_DAPM_OUTPUT("VOUTL"),
+SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route pcm3008_dapm_routes[] = {
+ { "PCM3008 Capture", NULL, "ADC" },
+ { "ADC", NULL, "VINL" },
+ { "ADC", NULL, "VINR" },
+
+ { "DAC", NULL, "PCM3008 Playback" },
+ { "VOUTL", NULL, "DAC" },
+ { "VOUTR", NULL, "DAC" },
+};
#define PCM3008_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
SNDRV_PCM_RATE_48000)
@@ -51,20 +98,20 @@ static struct snd_soc_dai_driver pcm3008_dai = {
},
};
-static void pcm3008_gpio_free(struct pcm3008_setup_data *setup)
-{
- gpio_free(setup->dem0_pin);
- gpio_free(setup->dem1_pin);
- gpio_free(setup->pdad_pin);
- gpio_free(setup->pdda_pin);
-}
+static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
+ .dapm_widgets = pcm3008_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(pcm3008_dapm_widgets),
+ .dapm_routes = pcm3008_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(pcm3008_dapm_routes),
+};
-static int pcm3008_soc_probe(struct snd_soc_codec *codec)
+static int pcm3008_codec_probe(struct platform_device *pdev)
{
- struct pcm3008_setup_data *setup = codec->dev->platform_data;
- int ret = 0;
+ struct pcm3008_setup_data *setup = pdev->dev.platform_data;
+ int ret;
- printk(KERN_INFO "PCM3008 SoC Audio Codec %s\n", PCM3008_VERSION);
+ if (!setup)
+ return -EINVAL;
/* DEM1 DEM0 DE-EMPHASIS_MODE
* Low Low De-emphasis 44.1 kHz ON
@@ -74,83 +121,29 @@ static int pcm3008_soc_probe(struct snd_soc_codec *codec)
*/
/* Configure DEM0 GPIO (turning OFF DAC De-emphasis). */
- ret = gpio_request(setup->dem0_pin, "codec_dem0");
- if (ret == 0)
- ret = gpio_direction_output(setup->dem0_pin, 1);
+ ret = devm_gpio_request_one(&pdev->dev, setup->dem0_pin,
+ GPIOF_OUT_INIT_HIGH, "codec_dem0");
if (ret != 0)
- goto gpio_err;
+ return ret;
/* Configure DEM1 GPIO (turning OFF DAC De-emphasis). */
- ret = gpio_request(setup->dem1_pin, "codec_dem1");
- if (ret == 0)
- ret = gpio_direction_output(setup->dem1_pin, 0);
+ ret = devm_gpio_request_one(&pdev->dev, setup->dem1_pin,
+ GPIOF_OUT_INIT_LOW, "codec_dem1");
if (ret != 0)
- goto gpio_err;
+ return ret;
/* Configure PDAD GPIO. */
- ret = gpio_request(setup->pdad_pin, "codec_pdad");
- if (ret == 0)
- ret = gpio_direction_output(setup->pdad_pin, 1);
+ ret = devm_gpio_request_one(&pdev->dev, setup->pdad_pin,
+ GPIOF_OUT_INIT_LOW, "codec_pdad");
if (ret != 0)
- goto gpio_err;
+ return ret;
/* Configure PDDA GPIO. */
- ret = gpio_request(setup->pdda_pin, "codec_pdda");
- if (ret == 0)
- ret = gpio_direction_output(setup->pdda_pin, 1);
+ ret = devm_gpio_request_one(&pdev->dev, setup->pdda_pin,
+ GPIOF_OUT_INIT_LOW, "codec_pdda");
if (ret != 0)
- goto gpio_err;
-
- return ret;
-
-gpio_err:
- pcm3008_gpio_free(setup);
+ return ret;
- return ret;
-}
-
-static int pcm3008_soc_remove(struct snd_soc_codec *codec)
-{
- struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
- pcm3008_gpio_free(setup);
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int pcm3008_soc_suspend(struct snd_soc_codec *codec)
-{
- struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
- gpio_set_value(setup->pdad_pin, 0);
- gpio_set_value(setup->pdda_pin, 0);
-
- return 0;
-}
-
-static int pcm3008_soc_resume(struct snd_soc_codec *codec)
-{
- struct pcm3008_setup_data *setup = codec->dev->platform_data;
-
- gpio_set_value(setup->pdad_pin, 1);
- gpio_set_value(setup->pdda_pin, 1);
-
- return 0;
-}
-#else
-#define pcm3008_soc_suspend NULL
-#define pcm3008_soc_resume NULL
-#endif
-
-static struct snd_soc_codec_driver soc_codec_dev_pcm3008 = {
- .probe = pcm3008_soc_probe,
- .remove = pcm3008_soc_remove,
- .suspend = pcm3008_soc_suspend,
- .resume = pcm3008_soc_resume,
-};
-
-static int pcm3008_codec_probe(struct platform_device *pdev)
-{
return snd_soc_register_codec(&pdev->dev,
&soc_codec_dev_pcm3008, &pcm3008_dai, 1);
}
@@ -158,6 +151,7 @@ static int pcm3008_codec_probe(struct platform_device *pdev)
static int pcm3008_codec_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
+
return 0;
}
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index ce585e37e38..c26a8f814b1 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -50,8 +50,6 @@ static const struct regmap_range_cfg rt5640_ranges[] = {
static struct reg_default init_list[] = {
{RT5640_PR_BASE + 0x3d, 0x3600},
- {RT5640_PR_BASE + 0x1c, 0x0D21},
- {RT5640_PR_BASE + 0x1b, 0x0000},
{RT5640_PR_BASE + 0x12, 0x0aa8},
{RT5640_PR_BASE + 0x14, 0x0aaa},
{RT5640_PR_BASE + 0x20, 0x6110},
@@ -384,15 +382,11 @@ static const SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5640_snd_controls[] = {
/* Speaker Output Volume */
- SOC_DOUBLE("Speaker Playback Switch", RT5640_SPK_VOL,
- RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
SOC_DOUBLE("Speaker Channel Switch", RT5640_SPK_VOL,
RT5640_VOL_L_SFT, RT5640_VOL_R_SFT, 1, 1),
SOC_DOUBLE_TLV("Speaker Playback Volume", RT5640_SPK_VOL,
RT5640_L_VOL_SFT, RT5640_R_VOL_SFT, 39, 1, out_vol_tlv),
/* Headphone Output Volume */
- SOC_DOUBLE("HP Playback Switch", RT5640_HP_VOL,
- RT5640_L_MUTE_SFT, RT5640_R_MUTE_SFT, 1, 1),
SOC_DOUBLE("HP Channel Switch", RT5640_HP_VOL,
RT5640_VOL_L_SFT, RT5640_VOL_R_SFT, 1, 1),
SOC_DOUBLE_TLV("HP Playback Volume", RT5640_HP_VOL,
@@ -737,28 +731,21 @@ static const struct snd_kcontrol_new rt5640_mono_mix[] = {
RT5640_M_BST1_MM_SFT, 1, 1),
};
-/* INL/R source */
-static const char * const rt5640_inl_src[] = {
- "IN2P", "MONOP"
-};
+static const struct snd_kcontrol_new spk_l_enable_control =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5640_SPK_VOL,
+ RT5640_L_MUTE_SFT, 1, 1);
-static const SOC_ENUM_SINGLE_DECL(
- rt5640_inl_enum, RT5640_INL_INR_VOL,
- RT5640_INL_SEL_SFT, rt5640_inl_src);
+static const struct snd_kcontrol_new spk_r_enable_control =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5640_SPK_VOL,
+ RT5640_R_MUTE_SFT, 1, 1);
-static const struct snd_kcontrol_new rt5640_inl_mux =
- SOC_DAPM_ENUM("INL source", rt5640_inl_enum);
+static const struct snd_kcontrol_new hp_l_enable_control =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5640_HP_VOL,
+ RT5640_L_MUTE_SFT, 1, 1);
-static const char * const rt5640_inr_src[] = {
- "IN2N", "MONON"
-};
-
-static const SOC_ENUM_SINGLE_DECL(
- rt5640_inr_enum, RT5640_INL_INR_VOL,
- RT5640_INR_SEL_SFT, rt5640_inr_src);
-
-static const struct snd_kcontrol_new rt5640_inr_mux =
- SOC_DAPM_ENUM("INR source", rt5640_inr_enum);
+static const struct snd_kcontrol_new hp_r_enable_control =
+ SOC_DAPM_SINGLE_AUTODISABLE("Switch", RT5640_HP_VOL,
+ RT5640_R_MUTE_SFT, 1, 1);
/* Stereo ADC source */
static const char * const rt5640_stereo_adc1_src[] = {
@@ -891,33 +878,6 @@ static const SOC_ENUM_SINGLE_DECL(
static const struct snd_kcontrol_new rt5640_sdi_mux =
SOC_DAPM_ENUM("SDI select", rt5640_sdi_sel_enum);
-static int spk_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = w->codec;
- struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
-
- switch (event) {
- case SND_SOC_DAPM_POST_PMU:
- regmap_update_bits(rt5640->regmap, RT5640_PWR_DIG1,
- 0x0001, 0x0001);
- regmap_update_bits(rt5640->regmap, RT5640_PR_BASE + 0x1c,
- 0xf000, 0xf000);
- break;
-
- case SND_SOC_DAPM_PRE_PMD:
- regmap_update_bits(rt5640->regmap, RT5640_PR_BASE + 0x1c,
- 0xf000, 0x0000);
- regmap_update_bits(rt5640->regmap, RT5640_PWR_DIG1,
- 0x0001, 0x0000);
- break;
-
- default:
- return 0;
- }
- return 0;
-}
-
static int rt5640_set_dmic1_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -966,6 +926,117 @@ static int rt5640_set_dmic2_event(struct snd_soc_dapm_widget *w,
return 0;
}
+void hp_amp_power_on(struct snd_soc_codec *codec)
+{
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ /* depop parameters */
+ regmap_update_bits(rt5640->regmap, RT5640_PR_BASE +
+ RT5640_CHPUMP_INT_REG1, 0x0700, 0x0200);
+ regmap_update_bits(rt5640->regmap, RT5640_DEPOP_M2,
+ RT5640_DEPOP_MASK, RT5640_DEPOP_MAN);
+ regmap_update_bits(rt5640->regmap, RT5640_DEPOP_M1,
+ RT5640_HP_CP_MASK | RT5640_HP_SG_MASK | RT5640_HP_CB_MASK,
+ RT5640_HP_CP_PU | RT5640_HP_SG_DIS | RT5640_HP_CB_PU);
+ regmap_write(rt5640->regmap, RT5640_PR_BASE + RT5640_HP_DCC_INT1,
+ 0x9f00);
+ /* headphone amp power on */
+ regmap_update_bits(rt5640->regmap, RT5640_PWR_ANLG1,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2, 0);
+ regmap_update_bits(rt5640->regmap, RT5640_PWR_ANLG1,
+ RT5640_PWR_HA,
+ RT5640_PWR_HA);
+ usleep_range(10000, 15000);
+ regmap_update_bits(rt5640->regmap, RT5640_PWR_ANLG1,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2 ,
+ RT5640_PWR_FV1 | RT5640_PWR_FV2);
+}
+
+static void rt5640_pmu_depop(struct snd_soc_codec *codec)
+{
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ regmap_update_bits(rt5640->regmap, RT5640_DEPOP_M2,
+ RT5640_DEPOP_MASK | RT5640_DIG_DP_MASK,
+ RT5640_DEPOP_AUTO | RT5640_DIG_DP_EN);
+ regmap_update_bits(rt5640->regmap, RT5640_CHARGE_PUMP,
+ RT5640_PM_HP_MASK, RT5640_PM_HP_HV);
+
+ regmap_update_bits(rt5640->regmap, RT5640_DEPOP_M3,
+ RT5640_CP_FQ1_MASK | RT5640_CP_FQ2_MASK | RT5640_CP_FQ3_MASK,
+ (RT5640_CP_FQ_192_KHZ << RT5640_CP_FQ1_SFT) |
+ (RT5640_CP_FQ_12_KHZ << RT5640_CP_FQ2_SFT) |
+ (RT5640_CP_FQ_192_KHZ << RT5640_CP_FQ3_SFT));
+
+ regmap_write(rt5640->regmap, RT5640_PR_BASE +
+ RT5640_MAMP_INT_REG2, 0x1c00);
+ regmap_update_bits(rt5640->regmap, RT5640_DEPOP_M1,
+ RT5640_HP_CP_MASK | RT5640_HP_SG_MASK,
+ RT5640_HP_CP_PD | RT5640_HP_SG_EN);
+ regmap_update_bits(rt5640->regmap, RT5640_PR_BASE +
+ RT5640_CHPUMP_INT_REG1, 0x0700, 0x0400);
+}
+
+static int rt5640_hp_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ rt5640_pmu_depop(codec);
+ rt5640->hp_mute = 0;
+ break;
+
+ case SND_SOC_DAPM_PRE_PMD:
+ rt5640->hp_mute = 1;
+ usleep_range(70000, 75000);
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5640_hp_power_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ hp_amp_power_on(codec);
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int rt5640_hp_post_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct rt5640_priv *rt5640 = snd_soc_codec_get_drvdata(codec);
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (!rt5640->hp_mute)
+ usleep_range(80000, 85000);
+
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5640_PWR_ANLG2,
RT5640_PWR_PLL_BIT, 0, NULL, 0),
@@ -1005,9 +1076,6 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
RT5640_PWR_IN_L_BIT, 0, NULL, 0),
SND_SOC_DAPM_PGA("INR VOL", RT5640_PWR_VOL,
RT5640_PWR_IN_R_BIT, 0, NULL, 0),
- /* IN Mux */
- SND_SOC_DAPM_MUX("INL Mux", SND_SOC_NOPM, 0, 0, &rt5640_inl_mux),
- SND_SOC_DAPM_MUX("INR Mux", SND_SOC_NOPM, 0, 0, &rt5640_inr_mux),
/* REC Mixer */
SND_SOC_DAPM_MIXER("RECMIXL", RT5640_PWR_MIXER, RT5640_PWR_RM_L_BIT, 0,
rt5640_rec_l_mix, ARRAY_SIZE(rt5640_rec_l_mix)),
@@ -1158,15 +1226,28 @@ static const struct snd_soc_dapm_widget rt5640_dapm_widgets[] = {
rt5640_mono_mix, ARRAY_SIZE(rt5640_mono_mix)),
SND_SOC_DAPM_SUPPLY("Improve MONO Amp Drv", RT5640_PWR_ANLG1,
RT5640_PWR_MA_BIT, 0, NULL, 0),
- SND_SOC_DAPM_SUPPLY("Improve HP Amp Drv", RT5640_PWR_ANLG1,
- SND_SOC_NOPM, 0, NULL, 0),
- SND_SOC_DAPM_PGA("HP L Amp", RT5640_PWR_ANLG1,
+ SND_SOC_DAPM_SUPPLY_S("Improve HP Amp Drv", 1, SND_SOC_NOPM,
+ 0, 0, rt5640_hp_power_event, SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0,
+ rt5640_hp_event,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ SND_SOC_DAPM_SUPPLY("HP L Amp", RT5640_PWR_ANLG1,
RT5640_PWR_HP_L_BIT, 0, NULL, 0),
- SND_SOC_DAPM_PGA("HP R Amp", RT5640_PWR_ANLG1,
+ SND_SOC_DAPM_SUPPLY("HP R Amp", RT5640_PWR_ANLG1,
RT5640_PWR_HP_R_BIT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("Improve SPK Amp Drv", RT5640_PWR_DIG1,
- SND_SOC_NOPM, 0, spk_event,
- SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+ RT5640_PWR_CLS_D_BIT, 0, NULL, 0),
+
+ /* Output Switch */
+ SND_SOC_DAPM_SWITCH("Speaker L Playback", SND_SOC_NOPM, 0, 0,
+ &spk_l_enable_control),
+ SND_SOC_DAPM_SWITCH("Speaker R Playback", SND_SOC_NOPM, 0, 0,
+ &spk_r_enable_control),
+ SND_SOC_DAPM_SWITCH("HP L Playback", SND_SOC_NOPM, 0, 0,
+ &hp_l_enable_control),
+ SND_SOC_DAPM_SWITCH("HP R Playback", SND_SOC_NOPM, 0, 0,
+ &hp_r_enable_control),
+ SND_SOC_DAPM_POST("HP Post", rt5640_hp_post_event),
/* Output Lines */
SND_SOC_DAPM_OUTPUT("SPOLP"),
SND_SOC_DAPM_OUTPUT("SPOLN"),
@@ -1407,9 +1488,11 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
{"HPO MIX L", "HPO MIX DAC2 Switch", "DAC L2"},
{"HPO MIX L", "HPO MIX DAC1 Switch", "DAC L1"},
{"HPO MIX L", "HPO MIX HPVOL Switch", "HPOVOL L"},
+ {"HPO MIX L", NULL, "HP L Amp"},
{"HPO MIX R", "HPO MIX DAC2 Switch", "DAC R2"},
{"HPO MIX R", "HPO MIX DAC1 Switch", "DAC R1"},
{"HPO MIX R", "HPO MIX HPVOL Switch", "HPOVOL R"},
+ {"HPO MIX R", NULL, "HP R Amp"},
{"LOUT MIX", "DAC L1 Switch", "DAC L1"},
{"LOUT MIX", "DAC R1 Switch", "DAC R1"},
@@ -1422,13 +1505,15 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
{"Mono MIX", "OUTVOL L Switch", "OUTVOL L"},
{"Mono MIX", "BST1 Switch", "BST1"},
- {"HP L Amp", NULL, "HPO MIX L"},
- {"HP R Amp", NULL, "HPO MIX R"},
+ {"HP Amp", NULL, "HPO MIX L"},
+ {"HP Amp", NULL, "HPO MIX R"},
- {"SPOLP", NULL, "SPOL MIX"},
- {"SPOLN", NULL, "SPOL MIX"},
- {"SPORP", NULL, "SPOR MIX"},
- {"SPORN", NULL, "SPOR MIX"},
+ {"Speaker L Playback", "Switch", "SPOL MIX"},
+ {"Speaker R Playback", "Switch", "SPOR MIX"},
+ {"SPOLP", NULL, "Speaker L Playback"},
+ {"SPOLN", NULL, "Speaker L Playback"},
+ {"SPORP", NULL, "Speaker R Playback"},
+ {"SPORN", NULL, "Speaker R Playback"},
{"SPOLP", NULL, "Improve SPK Amp Drv"},
{"SPOLN", NULL, "Improve SPK Amp Drv"},
@@ -1438,8 +1523,10 @@ static const struct snd_soc_dapm_route rt5640_dapm_routes[] = {
{"HPOL", NULL, "Improve HP Amp Drv"},
{"HPOR", NULL, "Improve HP Amp Drv"},
- {"HPOL", NULL, "HP L Amp"},
- {"HPOR", NULL, "HP R Amp"},
+ {"HP L Playback", "Switch", "HP Amp"},
+ {"HP R Playback", "Switch", "HP Amp"},
+ {"HPOL", NULL, "HP L Playback"},
+ {"HPOR", NULL, "HP R Playback"},
{"LOUTL", NULL, "LOUT MIX"},
{"LOUTR", NULL, "LOUT MIX"},
{"MONOP", NULL, "Mono MIX"},
@@ -1818,17 +1905,13 @@ static int rt5640_set_bias_level(struct snd_soc_codec *codec,
RT5640_PWR_BG | RT5640_PWR_VREF2,
RT5640_PWR_VREF1 | RT5640_PWR_MB |
RT5640_PWR_BG | RT5640_PWR_VREF2);
- mdelay(10);
+ usleep_range(10000, 15000);
snd_soc_update_bits(codec, RT5640_PWR_ANLG1,
RT5640_PWR_FV1 | RT5640_PWR_FV2,
RT5640_PWR_FV1 | RT5640_PWR_FV2);
regcache_sync(rt5640->regmap);
snd_soc_update_bits(codec, RT5640_DUMMY1,
0x0301, 0x0301);
- snd_soc_update_bits(codec, RT5640_DEPOP_M1,
- 0x001d, 0x0019);
- snd_soc_update_bits(codec, RT5640_DEPOP_M2,
- 0x2000, 0x2000);
snd_soc_update_bits(codec, RT5640_MICBIAS,
0x0030, 0x0030);
}
@@ -1872,8 +1955,6 @@ static int rt5640_probe(struct snd_soc_codec *codec)
rt5640_set_bias_level(codec, SND_SOC_BIAS_OFF);
snd_soc_update_bits(codec, RT5640_DUMMY1, 0x0301, 0x0301);
- snd_soc_update_bits(codec, RT5640_DEPOP_M1, 0x001d, 0x0019);
- snd_soc_update_bits(codec, RT5640_DEPOP_M2, 0x2000, 0x2000);
snd_soc_update_bits(codec, RT5640_MICBIAS, 0x0030, 0x0030);
snd_soc_update_bits(codec, RT5640_DSP_PATH2, 0xfc00, 0x0c00);
@@ -2095,6 +2176,8 @@ static int rt5640_i2c_probe(struct i2c_client *i2c,
regmap_update_bits(rt5640->regmap, RT5640_IN3_IN4,
RT5640_IN_DF2, RT5640_IN_DF2);
+ rt5640->hp_mute = 1;
+
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5640,
rt5640_dai, ARRAY_SIZE(rt5640_dai));
if (ret < 0)
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index c48286d7118..5e8df25a13f 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -145,6 +145,8 @@
/* Index of Codec Private Register definition */
+#define RT5640_CHPUMP_INT_REG1 0x24
+#define RT5640_MAMP_INT_REG2 0x37
#define RT5640_3D_SPK 0x63
#define RT5640_WND_1 0x6c
#define RT5640_WND_2 0x6d
@@ -153,6 +155,7 @@
#define RT5640_WND_5 0x70
#define RT5640_WND_8 0x73
#define RT5640_DIP_SPK_INF 0x75
+#define RT5640_HP_DCC_INT1 0x77
#define RT5640_EQ_BW_LOP 0xa0
#define RT5640_EQ_GN_LOP 0xa1
#define RT5640_EQ_FC_BP1 0xa2
@@ -1201,6 +1204,14 @@
#define RT5640_CP_FQ2_SFT 4
#define RT5640_CP_FQ3_MASK (0x7)
#define RT5640_CP_FQ3_SFT 0
+#define RT5640_CP_FQ_1_5_KHZ 0
+#define RT5640_CP_FQ_3_KHZ 1
+#define RT5640_CP_FQ_6_KHZ 2
+#define RT5640_CP_FQ_12_KHZ 3
+#define RT5640_CP_FQ_24_KHZ 4
+#define RT5640_CP_FQ_48_KHZ 5
+#define RT5640_CP_FQ_96_KHZ 6
+#define RT5640_CP_FQ_192_KHZ 7
/* HPOUT charge pump (0x91) */
#define RT5640_OSW_L_MASK (0x1 << 11)
@@ -2087,6 +2098,7 @@ struct rt5640_priv {
int pll_out;
int dmic_en;
+ bool hp_mute;
};
#endif
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d659d3adcfb..1f4093f3f3a 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
static int power_vag_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
+ const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w,
break;
case SND_SOC_DAPM_PRE_PMD:
- snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
- SGTL5000_VAG_POWERUP, 0);
- msleep(400);
+ /*
+ * Don't clear VAG_POWERUP, when both DAC and ADC are
+ * operational to prevent inadvertently starving the
+ * other one of them.
+ */
+ if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) &
+ mask) != mask) {
+ snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
+ SGTL5000_VAG_POWERUP, 0);
+ msleep(400);
+ }
break;
default:
break;
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = {
SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0),
SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)",
SGTL5000_CHIP_ANA_ADC_CTRL,
- 8, 2, 0, capture_6db_attenuate),
+ 8, 1, 0, capture_6db_attenuate),
SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0),
SOC_DOUBLE_TLV("Headphone Playback Volume",
@@ -644,16 +654,19 @@ static int sgtl5000_set_clock(struct snd_soc_codec *codec, int frame_rate)
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP);
+
+ /* if using pll, clk_ctrl must be set after pll power up */
+ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
} else {
+ /* otherwise, clk_ctrl must be set before pll power down */
+ snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
+
/* power down pll */
snd_soc_update_bits(codec, SGTL5000_CHIP_ANA_POWER,
SGTL5000_PLL_POWERUP | SGTL5000_VCOAMP_POWERUP,
0);
}
- /* if using pll, clk_ctrl must be set after pll power up */
- snd_soc_write(codec, SGTL5000_CHIP_CLK_CTRL, clk_ctl);
-
return 0;
}
@@ -1470,6 +1483,7 @@ static struct snd_soc_codec_driver sgtl5000_driver = {
static const struct regmap_config sgtl5000_regmap = {
.reg_bits = 16,
.val_bits = 16,
+ .reg_stride = 2,
.max_register = SGTL5000_MAX_REG_OFFSET,
.volatile_reg = sgtl5000_volatile,
@@ -1527,6 +1541,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
if (IS_ERR(sgtl5000->mclk)) {
ret = PTR_ERR(sgtl5000->mclk);
dev_err(&client->dev, "Failed to get mclock: %d\n", ret);
+ /* Defer the probe to see if the clk will be provided later */
+ if (ret == -ENOENT)
+ return -EPROBE_DEFER;
return ret;
}
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 73e205c892a..38f3b105c17 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -102,6 +102,16 @@ static int si476x_codec_write(struct snd_soc_codec *codec,
return err;
}
+static const struct snd_soc_dapm_widget si476x_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("LOUT"),
+SND_SOC_DAPM_OUTPUT("ROUT"),
+};
+
+static const struct snd_soc_dapm_route si476x_dapm_routes[] = {
+ { "Capture", NULL, "LOUT" },
+ { "Capture", NULL, "ROUT" },
+};
+
static int si476x_codec_set_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
@@ -260,6 +270,10 @@ static struct snd_soc_codec_driver soc_codec_dev_si476x = {
.probe = si476x_codec_probe,
.read = si476x_codec_read,
.write = si476x_codec_write,
+ .dapm_widgets = si476x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(si476x_dapm_widgets),
+ .dapm_routes = si476x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(si476x_dapm_routes),
};
static int si476x_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/spdif_receiver.c b/sound/soc/codecs/spdif_receiver.c
index e9d7881ed2c..e3501f40c7b 100644
--- a/sound/soc/codecs/spdif_receiver.c
+++ b/sound/soc/codecs/spdif_receiver.c
@@ -23,11 +23,26 @@
#include <sound/initval.h>
#include <linux/of.h>
+static const struct snd_soc_dapm_widget dir_widgets[] = {
+ SND_SOC_DAPM_INPUT("spdif-in"),
+};
+
+static const struct snd_soc_dapm_route dir_routes[] = {
+ { "Capture", NULL, "spdif-in" },
+};
+
#define STUB_RATES SNDRV_PCM_RATE_8000_192000
#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE)
-static struct snd_soc_codec_driver soc_codec_spdif_dir;
+static struct snd_soc_codec_driver soc_codec_spdif_dir = {
+ .dapm_widgets = dir_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dir_widgets),
+ .dapm_routes = dir_routes,
+ .num_dapm_routes = ARRAY_SIZE(dir_routes),
+};
static struct snd_soc_dai_driver dir_stub_dai = {
.name = "dir-hifi",
diff --git a/sound/soc/codecs/spdif_transmitter.c b/sound/soc/codecs/spdif_transmitter.c
index 18280499fd5..a078aa31052 100644
--- a/sound/soc/codecs/spdif_transmitter.c
+++ b/sound/soc/codecs/spdif_transmitter.c
@@ -25,10 +25,24 @@
#define DRV_NAME "spdif-dit"
#define STUB_RATES SNDRV_PCM_RATE_8000_96000
-#define STUB_FORMATS SNDRV_PCM_FMTBIT_S16_LE
+#define STUB_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+static const struct snd_soc_dapm_widget dit_widgets[] = {
+ SND_SOC_DAPM_OUTPUT("spdif-out"),
+};
+
+static const struct snd_soc_dapm_route dit_routes[] = {
+ { "spdif-out", NULL, "Playback" },
+};
-static struct snd_soc_codec_driver soc_codec_spdif_dit;
+static struct snd_soc_codec_driver soc_codec_spdif_dit = {
+ .dapm_widgets = dit_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(dit_widgets),
+ .dapm_routes = dit_routes,
+ .num_dapm_routes = ARRAY_SIZE(dit_routes),
+};
static struct snd_soc_dai_driver dit_stub_dai = {
.name = "dit-hifi",
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
index f8d30e5f637..492644e67ac 100644
--- a/sound/soc/codecs/ssm2602.c
+++ b/sound/soc/codecs/ssm2602.c
@@ -561,8 +561,9 @@ static int ssm2602_suspend(struct snd_soc_codec *codec)
static int ssm2602_resume(struct snd_soc_codec *codec)
{
- snd_soc_cache_sync(codec);
+ struct ssm2602_priv *ssm2602 = snd_soc_codec_get_drvdata(codec);
+ regcache_sync(ssm2602->regmap);
ssm2602_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index cfb55fe35e9..06edb396e73 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -363,16 +363,18 @@ static void sta32x_watchdog(struct work_struct *work)
}
if (!sta32x->shutdown)
- schedule_delayed_work(&sta32x->watchdog_work,
- round_jiffies_relative(HZ));
+ queue_delayed_work(system_power_efficient_wq,
+ &sta32x->watchdog_work,
+ round_jiffies_relative(HZ));
}
static void sta32x_watchdog_start(struct sta32x_priv *sta32x)
{
if (sta32x->pdata->needs_esd_watchdog) {
sta32x->shutdown = 0;
- schedule_delayed_work(&sta32x->watchdog_work,
- round_jiffies_relative(HZ));
+ queue_delayed_work(system_power_efficient_wq,
+ &sta32x->watchdog_work,
+ round_jiffies_relative(HZ));
}
}
diff --git a/sound/soc/codecs/tlv320aic26.c b/sound/soc/codecs/tlv320aic26.c
index b1f6982c7c9..7b8f3d965f4 100644
--- a/sound/soc/codecs/tlv320aic26.c
+++ b/sound/soc/codecs/tlv320aic26.c
@@ -29,7 +29,7 @@ MODULE_LICENSE("GPL");
/* AIC26 driver private data */
struct aic26 {
struct spi_device *spi;
- struct snd_soc_codec codec;
+ struct snd_soc_codec *codec;
int master;
int datfm;
int mclk;
@@ -119,6 +119,22 @@ static int aic26_reg_write(struct snd_soc_codec *codec, unsigned int reg,
return 0;
}
+static const struct snd_soc_dapm_widget tlv320aic26_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("MICIN"),
+SND_SOC_DAPM_INPUT("AUX"),
+
+SND_SOC_DAPM_OUTPUT("HPL"),
+SND_SOC_DAPM_OUTPUT("HPR"),
+};
+
+static const struct snd_soc_dapm_route tlv320aic26_dapm_routes[] = {
+ { "Capture", NULL, "MICIN" },
+ { "Capture", NULL, "AUX" },
+
+ { "HPL", NULL, "Playback" },
+ { "HPR", NULL, "Playback" },
+};
+
/* ---------------------------------------------------------------------
* Digital Audio Interface Operations
*/
@@ -174,9 +190,9 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
dev_dbg(&aic26->spi->dev, "Setting PLLM to %d.%04d\n", jval, dval);
qval = 0;
reg = 0x8000 | qval << 11 | pval << 8 | jval << 2;
- aic26_reg_write(codec, AIC26_REG_PLL_PROG1, reg);
+ snd_soc_write(codec, AIC26_REG_PLL_PROG1, reg);
reg = dval << 2;
- aic26_reg_write(codec, AIC26_REG_PLL_PROG2, reg);
+ snd_soc_write(codec, AIC26_REG_PLL_PROG2, reg);
/* Audio Control 3 (master mode, fsref rate) */
reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL3);
@@ -185,13 +201,13 @@ static int aic26_hw_params(struct snd_pcm_substream *substream,
reg |= 0x0800;
if (fsref == 48000)
reg |= 0x2000;
- aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+ snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
/* Audio Control 1 (FSref divisor) */
reg = aic26_reg_read_cache(codec, AIC26_REG_AUDIO_CTRL1);
reg &= ~0x0fff;
reg |= wlen | aic26->datfm | (divisor << 3) | divisor;
- aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
+ snd_soc_write(codec, AIC26_REG_AUDIO_CTRL1, reg);
return 0;
}
@@ -212,7 +228,7 @@ static int aic26_mute(struct snd_soc_dai *dai, int mute)
reg |= 0x8080;
else
reg &= ~0x8080;
- aic26_reg_write(codec, AIC26_REG_DAC_GAIN, reg);
+ snd_soc_write(codec, AIC26_REG_DAC_GAIN, reg);
return 0;
}
@@ -330,7 +346,7 @@ static ssize_t aic26_keyclick_show(struct device *dev,
struct aic26 *aic26 = dev_get_drvdata(dev);
int val, amp, freq, len;
- val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2);
+ val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
amp = (val >> 12) & 0x7;
freq = (125 << ((val >> 8) & 0x7)) >> 1;
len = 2 * (1 + ((val >> 4) & 0xf));
@@ -346,9 +362,9 @@ static ssize_t aic26_keyclick_set(struct device *dev,
struct aic26 *aic26 = dev_get_drvdata(dev);
int val;
- val = aic26_reg_read_cache(&aic26->codec, AIC26_REG_AUDIO_CTRL2);
+ val = aic26_reg_read_cache(aic26->codec, AIC26_REG_AUDIO_CTRL2);
val |= 0x8000;
- aic26_reg_write(&aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
+ snd_soc_write(aic26->codec, AIC26_REG_AUDIO_CTRL2, val);
return count;
}
@@ -360,25 +376,26 @@ static DEVICE_ATTR(keyclick, 0644, aic26_keyclick_show, aic26_keyclick_set);
*/
static int aic26_probe(struct snd_soc_codec *codec)
{
+ struct aic26 *aic26 = dev_get_drvdata(codec->dev);
int ret, err, i, reg;
- dev_info(codec->dev, "Probing AIC26 SoC CODEC driver\n");
+ aic26->codec = codec;
/* Reset the codec to power on defaults */
- aic26_reg_write(codec, AIC26_REG_RESET, 0xBB00);
+ snd_soc_write(codec, AIC26_REG_RESET, 0xBB00);
/* Power up CODEC */
- aic26_reg_write(codec, AIC26_REG_POWER_CTRL, 0);
+ snd_soc_write(codec, AIC26_REG_POWER_CTRL, 0);
/* Audio Control 3 (master mode, fsref rate) */
- reg = aic26_reg_read(codec, AIC26_REG_AUDIO_CTRL3);
+ reg = snd_soc_read(codec, AIC26_REG_AUDIO_CTRL3);
reg &= ~0xf800;
reg |= 0x0800; /* set master mode */
- aic26_reg_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
+ snd_soc_write(codec, AIC26_REG_AUDIO_CTRL3, reg);
/* Fill register cache */
for (i = 0; i < codec->driver->reg_cache_size; i++)
- aic26_reg_read(codec, i);
+ snd_soc_read(codec, i);
/* Register the sysfs files for debugging */
/* Create SysFS files */
@@ -401,6 +418,10 @@ static struct snd_soc_codec_driver aic26_soc_codec_dev = {
.write = aic26_reg_write,
.reg_cache_size = AIC26_NUM_REGS,
.reg_word_size = sizeof(u16),
+ .dapm_widgets = tlv320aic26_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(tlv320aic26_dapm_widgets),
+ .dapm_routes = tlv320aic26_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(tlv320aic26_dapm_routes),
};
/* ---------------------------------------------------------------------
diff --git a/sound/soc/codecs/tlv320aic32x4.c b/sound/soc/codecs/tlv320aic32x4.c
index 17df4e32fea..2ed57d4aa44 100644
--- a/sound/soc/codecs/tlv320aic32x4.c
+++ b/sound/soc/codecs/tlv320aic32x4.c
@@ -338,18 +338,6 @@ static inline int aic32x4_get_divs(int mclk, int rate)
return -EINVAL;
}
-static int aic32x4_add_widgets(struct snd_soc_codec *codec)
-{
- snd_soc_dapm_new_controls(&codec->dapm, aic32x4_dapm_widgets,
- ARRAY_SIZE(aic32x4_dapm_widgets));
-
- snd_soc_dapm_add_routes(&codec->dapm, aic32x4_dapm_routes,
- ARRAY_SIZE(aic32x4_dapm_routes));
-
- snd_soc_dapm_new_widgets(&codec->dapm);
- return 0;
-}
-
static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
int clk_id, unsigned int freq, int dir)
{
@@ -683,9 +671,6 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
}
aic32x4_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
- snd_soc_add_codec_controls(codec, aic32x4_snd_controls,
- ARRAY_SIZE(aic32x4_snd_controls));
- aic32x4_add_widgets(codec);
/*
* Workaround: for an unknown reason, the ADC needs to be powered up
@@ -714,6 +699,13 @@ static struct snd_soc_codec_driver soc_codec_dev_aic32x4 = {
.suspend = aic32x4_suspend,
.resume = aic32x4_resume,
.set_bias_level = aic32x4_set_bias_level,
+
+ .controls = aic32x4_snd_controls,
+ .num_controls = ARRAY_SIZE(aic32x4_snd_controls),
+ .dapm_widgets = aic32x4_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(aic32x4_dapm_widgets),
+ .dapm_routes = aic32x4_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(aic32x4_dapm_routes),
};
static int aic32x4_i2c_probe(struct i2c_client *i2c,
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index e5b92688313..6e3f269243e 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -138,8 +138,7 @@ static const u8 aic3x_reg[AIC3X_CACHEREGNUM] = {
static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
unsigned int reg = mc->reg;
@@ -147,10 +146,9 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
- unsigned short val, val_mask;
- int ret;
- struct snd_soc_dapm_path *path;
- int found = 0;
+ unsigned short val;
+ struct snd_soc_dapm_update update;
+ int connect, change;
val = (ucontrol->value.integer.value[0] & mask);
@@ -158,42 +156,26 @@ static int snd_soc_dapm_put_volsw_aic3x(struct snd_kcontrol *kcontrol,
if (val)
val = mask;
+ connect = !!val;
+
if (invert)
val = mask - val;
- val_mask = mask << shift;
- val = val << shift;
-
- mutex_lock(&widget->codec->mutex);
- if (snd_soc_test_bits(widget->codec, reg, val_mask, val)) {
- /* find dapm widget path assoc with kcontrol */
- list_for_each_entry(path, &widget->dapm->card->paths, list) {
- if (path->kcontrol != kcontrol)
- continue;
+ mask <<= shift;
+ val <<= shift;
- /* found, now check type */
- found = 1;
- if (val)
- /* new connection */
- path->connect = invert ? 0 : 1;
- else
- /* old connection must be powered down */
- path->connect = invert ? 1 : 0;
+ change = snd_soc_test_bits(codec, val, mask, reg);
+ if (change) {
+ update.kcontrol = kcontrol;
+ update.reg = reg;
+ update.mask = mask;
+ update.val = val;
- dapm_mark_dirty(path->source, "tlv320aic3x source");
- dapm_mark_dirty(path->sink, "tlv320aic3x sink");
-
- break;
- }
+ snd_soc_dapm_mixer_update_power(&codec->dapm, kcontrol, connect,
+ &update);
}
- mutex_unlock(&widget->codec->mutex);
-
- if (found)
- snd_soc_dapm_sync(widget->dapm);
-
- ret = snd_soc_update_bits_locked(widget->codec, reg, val_mask, val);
- return ret;
+ return change;
}
/*
@@ -1492,6 +1474,7 @@ static const struct i2c_device_id aic3x_i2c_id[] = {
{ "tlv320aic3x", AIC3X_MODEL_3X },
{ "tlv320aic33", AIC3X_MODEL_33 },
{ "tlv320aic3007", AIC3X_MODEL_3007 },
+ { "tlv320aic3106", AIC3X_MODEL_3X },
{ }
};
MODULE_DEVICE_TABLE(i2c, aic3x_i2c_id);
@@ -1582,6 +1565,9 @@ static int aic3x_i2c_remove(struct i2c_client *client)
#if defined(CONFIG_OF)
static const struct of_device_id tlv320aic3x_of_match[] = {
{ .compatible = "ti,tlv320aic3x", },
+ { .compatible = "ti,tlv320aic33" },
+ { .compatible = "ti,tlv320aic3007" },
+ { .compatible = "ti,tlv320aic3106" },
{},
};
MODULE_DEVICE_TABLE(of, tlv320aic3x_of_match);
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c
index 8e6e5b01602..1e3884d6b3f 100644
--- a/sound/soc/codecs/twl4030.c
+++ b/sound/soc/codecs/twl4030.c
@@ -137,8 +137,6 @@ static const u8 twl4030_reg[TWL4030_CACHEREGNUM] = {
/* codec private data */
struct twl4030_priv {
- struct snd_soc_codec codec;
-
unsigned int codec_powered;
/* reference counts of AIF/APLL users */
diff --git a/sound/soc/codecs/twl6040.c b/sound/soc/codecs/twl6040.c
index 44621ddc332..3c79dbb6c32 100644
--- a/sound/soc/codecs/twl6040.c
+++ b/sound/soc/codecs/twl6040.c
@@ -429,7 +429,8 @@ static irqreturn_t twl6040_audio_handler(int irq, void *data)
struct snd_soc_codec *codec = data;
struct twl6040_data *priv = snd_soc_codec_get_drvdata(codec);
- schedule_delayed_work(&priv->hs_jack.work, msecs_to_jiffies(200));
+ queue_delayed_work(system_power_efficient_wq,
+ &priv->hs_jack.work, msecs_to_jiffies(200));
return IRQ_HANDLED;
}
@@ -437,9 +438,7 @@ static irqreturn_t twl6040_audio_handler(int irq, void *data)
static int twl6040_soc_dapm_put_vibra_enum(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 6d0aa44c375..c94d4c1e3da 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -325,7 +325,6 @@ static int uda134x_set_dai_fmt(struct snd_soc_dai *codec_dai,
static int uda134x_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
- u8 reg;
struct uda134x_platform_data *pd = codec->control_data;
int i;
u8 *cache = codec->reg_cache;
@@ -334,23 +333,6 @@ static int uda134x_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- /* ADC, DAC on */
- switch (pd->model) {
- case UDA134X_UDA1340:
- case UDA134X_UDA1344:
- case UDA134X_UDA1345:
- reg = uda134x_read_reg_cache(codec, UDA134X_DATA011);
- uda134x_write(codec, UDA134X_DATA011, reg | 0x03);
- break;
- case UDA134X_UDA1341:
- reg = uda134x_read_reg_cache(codec, UDA134X_STATUS1);
- uda134x_write(codec, UDA134X_STATUS1, reg | 0x03);
- break;
- default:
- printk(KERN_ERR "UDA134X SoC codec: "
- "unsupported model %d\n", pd->model);
- return -EINVAL;
- }
break;
case SND_SOC_BIAS_PREPARE:
/* power on */
@@ -362,23 +344,6 @@ static int uda134x_set_bias_level(struct snd_soc_codec *codec,
}
break;
case SND_SOC_BIAS_STANDBY:
- /* ADC, DAC power off */
- switch (pd->model) {
- case UDA134X_UDA1340:
- case UDA134X_UDA1344:
- case UDA134X_UDA1345:
- reg = uda134x_read_reg_cache(codec, UDA134X_DATA011);
- uda134x_write(codec, UDA134X_DATA011, reg & ~(0x03));
- break;
- case UDA134X_UDA1341:
- reg = uda134x_read_reg_cache(codec, UDA134X_STATUS1);
- uda134x_write(codec, UDA134X_STATUS1, reg & ~(0x03));
- break;
- default:
- printk(KERN_ERR "UDA134X SoC codec: "
- "unsupported model %d\n", pd->model);
- return -EINVAL;
- }
break;
case SND_SOC_BIAS_OFF:
/* power off */
@@ -450,6 +415,37 @@ SOC_ENUM("PCM Playback De-emphasis", uda134x_mixer_enum[1]),
SOC_SINGLE("DC Filter Enable Switch", UDA134X_STATUS0, 0, 1, 0),
};
+/* UDA1341 has the DAC/ADC power down in STATUS1 */
+static const struct snd_soc_dapm_widget uda1341_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", UDA134X_STATUS1, 0, 0),
+ SND_SOC_DAPM_ADC("ADC", "Capture", UDA134X_STATUS1, 1, 0),
+};
+
+/* UDA1340/4/5 has the DAC/ADC pwoer down in DATA0 11 */
+static const struct snd_soc_dapm_widget uda1340_dapm_widgets[] = {
+ SND_SOC_DAPM_DAC("DAC", "Playback", UDA134X_DATA011, 0, 0),
+ SND_SOC_DAPM_ADC("ADC", "Capture", UDA134X_DATA011, 1, 0),
+};
+
+/* Common DAPM widgets */
+static const struct snd_soc_dapm_widget uda134x_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("VINL1"),
+ SND_SOC_DAPM_INPUT("VINR1"),
+ SND_SOC_DAPM_INPUT("VINL2"),
+ SND_SOC_DAPM_INPUT("VINR2"),
+ SND_SOC_DAPM_OUTPUT("VOUTL"),
+ SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route uda134x_dapm_routes[] = {
+ { "ADC", NULL, "VINL1" },
+ { "ADC", NULL, "VINR1" },
+ { "ADC", NULL, "VINL2" },
+ { "ADC", NULL, "VINR2" },
+ { "VOUTL", NULL, "DAC" },
+ { "VOUTR", NULL, "DAC" },
+};
+
static const struct snd_soc_dai_ops uda134x_dai_ops = {
.startup = uda134x_startup,
.shutdown = uda134x_shutdown,
@@ -485,6 +481,8 @@ static int uda134x_soc_probe(struct snd_soc_codec *codec)
{
struct uda134x_priv *uda134x;
struct uda134x_platform_data *pd = codec->card->dev->platform_data;
+ const struct snd_soc_dapm_widget *widgets;
+ unsigned num_widgets;
int ret;
@@ -526,6 +524,22 @@ static int uda134x_soc_probe(struct snd_soc_codec *codec)
else
uda134x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
+ if (pd->model == UDA134X_UDA1341) {
+ widgets = uda1341_dapm_widgets;
+ num_widgets = ARRAY_SIZE(uda1341_dapm_widgets);
+ } else {
+ widgets = uda1340_dapm_widgets;
+ num_widgets = ARRAY_SIZE(uda1340_dapm_widgets);
+ }
+
+ ret = snd_soc_dapm_new_controls(&codec->dapm, widgets, num_widgets);
+ if (ret) {
+ printk(KERN_ERR "%s failed to register dapm controls: %d",
+ __func__, ret);
+ kfree(uda134x);
+ return ret;
+ }
+
switch (pd->model) {
case UDA134X_UDA1340:
case UDA134X_UDA1344:
@@ -599,6 +613,10 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
.read = uda134x_read_reg_cache,
.write = uda134x_write,
.set_bias_level = uda134x_set_bias_level,
+ .dapm_widgets = uda134x_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(uda134x_dapm_widgets),
+ .dapm_routes = uda134x_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(uda134x_dapm_routes),
};
static int uda134x_codec_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c
index 54cd3da09ab..b7ab2ef567c 100644
--- a/sound/soc/codecs/wl1273.c
+++ b/sound/soc/codecs/wl1273.c
@@ -290,6 +290,18 @@ static const struct snd_kcontrol_new wl1273_controls[] = {
snd_wl1273_fm_volume_get, snd_wl1273_fm_volume_put),
};
+static const struct snd_soc_dapm_widget wl1273_dapm_widgets[] = {
+ SND_SOC_DAPM_INPUT("RX"),
+
+ SND_SOC_DAPM_OUTPUT("TX"),
+};
+
+static const struct snd_soc_dapm_route wl1273_dapm_routes[] = {
+ { "Capture", NULL, "RX" },
+
+ { "TX", NULL, "Playback" },
+};
+
static int wl1273_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
@@ -483,6 +495,11 @@ static int wl1273_remove(struct snd_soc_codec *codec)
static struct snd_soc_codec_driver soc_codec_dev_wl1273 = {
.probe = wl1273_probe,
.remove = wl1273_remove,
+
+ .dapm_widgets = wl1273_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wl1273_dapm_widgets),
+ .dapm_routes = wl1273_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wl1273_dapm_routes),
};
static int wl1273_platform_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index f5e835662cd..d5ebcb00019 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -410,39 +410,39 @@ static int wm0010_firmware_load(const char *name, struct snd_soc_codec *codec)
rec->command, rec->length);
len = rec->length + 8;
- out = kzalloc(len, GFP_KERNEL);
+ xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
+ if (!xfer) {
+ dev_err(codec->dev, "Failed to allocate xfer\n");
+ ret = -ENOMEM;
+ goto abort;
+ }
+
+ xfer->codec = codec;
+ list_add_tail(&xfer->list, &xfer_list);
+
+ out = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!out) {
dev_err(codec->dev,
"Failed to allocate RX buffer\n");
ret = -ENOMEM;
goto abort1;
}
+ xfer->t.rx_buf = out;
- img = kzalloc(len, GFP_KERNEL);
+ img = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img) {
dev_err(codec->dev,
"Failed to allocate image buffer\n");
ret = -ENOMEM;
goto abort1;
}
+ xfer->t.tx_buf = img;
byte_swap_64((u64 *)&rec->command, img, len);
- xfer = kzalloc(sizeof(*xfer), GFP_KERNEL);
- if (!xfer) {
- dev_err(codec->dev, "Failed to allocate xfer\n");
- ret = -ENOMEM;
- goto abort1;
- }
-
- xfer->codec = codec;
- list_add_tail(&xfer->list, &xfer_list);
-
spi_message_init(&xfer->m);
xfer->m.complete = wm0010_boot_xfer_complete;
xfer->m.context = xfer;
- xfer->t.tx_buf = img;
- xfer->t.rx_buf = out;
xfer->t.len = len;
xfer->t.bits_per_word = 8;
@@ -523,14 +523,14 @@ static int wm0010_stage2_load(struct snd_soc_codec *codec)
dev_dbg(codec->dev, "Downloading %zu byte stage 2 loader\n", fw->size);
/* Copy to local buffer first as vmalloc causes problems for dma */
- img = kzalloc(fw->size, GFP_KERNEL);
+ img = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
if (!img) {
dev_err(codec->dev, "Failed to allocate image buffer\n");
ret = -ENOMEM;
goto abort2;
}
- out = kzalloc(fw->size, GFP_KERNEL);
+ out = kzalloc(fw->size, GFP_KERNEL | GFP_DMA);
if (!out) {
dev_err(codec->dev, "Failed to allocate output buffer\n");
ret = -ENOMEM;
@@ -670,14 +670,14 @@ static int wm0010_boot(struct snd_soc_codec *codec)
ret = -ENOMEM;
len = pll_rec.length + 8;
- out = kzalloc(len, GFP_KERNEL);
+ out = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!out) {
dev_err(codec->dev,
"Failed to allocate RX buffer\n");
goto abort;
}
- img_swap = kzalloc(len, GFP_KERNEL);
+ img_swap = kzalloc(len, GFP_KERNEL | GFP_DMA);
if (!img_swap) {
dev_err(codec->dev,
"Failed to allocate image buffer\n");
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index 282fd232cdf..8bbddc151aa 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -998,6 +998,8 @@ SND_SOC_DAPM_INPUT("IN2R"),
SND_SOC_DAPM_INPUT("IN3L"),
SND_SOC_DAPM_INPUT("IN3R"),
+SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, arizona_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -1421,9 +1423,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "Tone Generator 1", NULL, "TONE" },
{ "Tone Generator 2", NULL, "TONE" },
- { "Mic Mute Mixer", NULL, "Noise Mixer" },
- { "Mic Mute Mixer", NULL, "Mic Mixer" },
-
{ "AIF1 Capture", NULL, "AIF1TX1" },
{ "AIF1 Capture", NULL, "AIF1TX2" },
{ "AIF1 Capture", NULL, "AIF1TX3" },
@@ -1499,23 +1498,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "IN3L PGA", NULL, "IN3L" },
{ "IN3R PGA", NULL, "IN3R" },
- { "ASRC1L", NULL, "ASRC1L Input" },
- { "ASRC1R", NULL, "ASRC1R Input" },
- { "ASRC2L", NULL, "ASRC2L Input" },
- { "ASRC2R", NULL, "ASRC2R Input" },
-
- { "ISRC1DEC1", NULL, "ISRC1DEC1 Input" },
- { "ISRC1DEC2", NULL, "ISRC1DEC2 Input" },
-
- { "ISRC1INT1", NULL, "ISRC1INT1 Input" },
- { "ISRC1INT2", NULL, "ISRC1INT2 Input" },
-
- { "ISRC2DEC1", NULL, "ISRC2DEC1 Input" },
- { "ISRC2DEC2", NULL, "ISRC2DEC2 Input" },
-
- { "ISRC2INT1", NULL, "ISRC2INT1 Input" },
- { "ISRC2INT2", NULL, "ISRC2INT2 Input" },
-
ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"),
@@ -1567,22 +1549,25 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
- ARIZONA_MUX_ROUTES("ASRC1L"),
- ARIZONA_MUX_ROUTES("ASRC1R"),
- ARIZONA_MUX_ROUTES("ASRC2L"),
- ARIZONA_MUX_ROUTES("ASRC2R"),
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
- ARIZONA_MUX_ROUTES("ISRC1INT1"),
- ARIZONA_MUX_ROUTES("ISRC1INT2"),
+ ARIZONA_MUX_ROUTES("ASRC1L", "ASRC1L"),
+ ARIZONA_MUX_ROUTES("ASRC1R", "ASRC1R"),
+ ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"),
+ ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"),
- ARIZONA_MUX_ROUTES("ISRC1DEC1"),
- ARIZONA_MUX_ROUTES("ISRC1DEC2"),
+ ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"),
+ ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC1INT2"),
- ARIZONA_MUX_ROUTES("ISRC2INT1"),
- ARIZONA_MUX_ROUTES("ISRC2INT2"),
+ ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"),
+ ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"),
- ARIZONA_MUX_ROUTES("ISRC2DEC1"),
- ARIZONA_MUX_ROUTES("ISRC2DEC2"),
+ ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"),
+ ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"),
+
+ ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"),
+ ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"),
ARIZONA_DSP_ROUTES("DSP1"),
@@ -1614,6 +1599,9 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = {
{ "SPKDAT1R", NULL, "OUT5R" },
{ "MICSUPP", NULL, "SYSCLK" },
+
+ { "DRC1 Signal Activity", NULL, "DRC1L" },
+ { "DRC1 Signal Activity", NULL, "DRC1R" },
};
static int wm5102_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -1781,6 +1769,7 @@ static int wm5102_codec_probe(struct snd_soc_codec *codec)
return ret;
arizona_init_spk(codec);
+ arizona_init_gpio(codec);
snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 2e7cb4ba161..bbd64384ca1 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -58,14 +58,10 @@ static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
SOC_SINGLE(name " NG SPKDAT2R Switch", base, 11, 1, 0)
static const struct snd_kcontrol_new wm5110_snd_controls[] = {
-SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
- ARIZONA_IN1_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
- ARIZONA_IN2_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN3 High Performance Switch", ARIZONA_IN3L_CONTROL,
- ARIZONA_IN3_OSR_SHIFT, 1, 0),
-SOC_SINGLE("IN4 High Performance Switch", ARIZONA_IN4L_CONTROL,
- ARIZONA_IN4_OSR_SHIFT, 1, 0),
+SOC_ENUM("IN1 OSR", arizona_in_dmic_osr[0]),
+SOC_ENUM("IN2 OSR", arizona_in_dmic_osr[1]),
+SOC_ENUM("IN3 OSR", arizona_in_dmic_osr[2]),
+SOC_ENUM("IN4 OSR", arizona_in_dmic_osr[3]),
SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL,
ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
@@ -432,6 +428,9 @@ SND_SOC_DAPM_INPUT("IN3R"),
SND_SOC_DAPM_INPUT("IN4L"),
SND_SOC_DAPM_INPUT("IN4R"),
+SND_SOC_DAPM_OUTPUT("DRC1 Signal Activity"),
+SND_SOC_DAPM_OUTPUT("DRC2 Signal Activity"),
+
SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
0, NULL, 0, arizona_in_ev,
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
@@ -842,9 +841,6 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "Tone Generator 1", NULL, "TONE" },
{ "Tone Generator 2", NULL, "TONE" },
- { "Mic Mute Mixer", NULL, "Noise Mixer" },
- { "Mic Mute Mixer", NULL, "Mic Mixer" },
-
{ "AIF1 Capture", NULL, "AIF1TX1" },
{ "AIF1 Capture", NULL, "AIF1TX2" },
{ "AIF1 Capture", NULL, "AIF1TX3" },
@@ -979,10 +975,13 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
- ARIZONA_MUX_ROUTES("ASRC1L"),
- ARIZONA_MUX_ROUTES("ASRC1R"),
- ARIZONA_MUX_ROUTES("ASRC2L"),
- ARIZONA_MUX_ROUTES("ASRC2R"),
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
+
+ ARIZONA_MUX_ROUTES("ASRC1L", "ASRC1L"),
+ ARIZONA_MUX_ROUTES("ASRC1R", "ASRC1R"),
+ ARIZONA_MUX_ROUTES("ASRC2L", "ASRC2L"),
+ ARIZONA_MUX_ROUTES("ASRC2R", "ASRC2R"),
{ "HPOUT1L", NULL, "OUT1L" },
{ "HPOUT1R", NULL, "OUT1R" },
@@ -1006,6 +1005,11 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "SPKDAT2R", NULL, "OUT6R" },
{ "MICSUPP", NULL, "SYSCLK" },
+
+ { "DRC1 Signal Activity", NULL, "DRC1L" },
+ { "DRC1 Signal Activity", NULL, "DRC1R" },
+ { "DRC2 Signal Activity", NULL, "DRC2L" },
+ { "DRC2 Signal Activity", NULL, "DRC2R" },
};
static int wm5110_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
@@ -1170,6 +1174,7 @@ static int wm5110_codec_probe(struct snd_soc_codec *codec)
return ret;
arizona_init_spk(codec);
+ arizona_init_gpio(codec);
snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 0e8b3aaf6c8..af1318ddb06 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -1301,7 +1301,8 @@ static irqreturn_t wm8350_hpl_jack_handler(int irq, void *data)
if (device_may_wakeup(wm8350->dev))
pm_wakeup_event(wm8350->dev, 250);
- schedule_delayed_work(&priv->hpl.work, msecs_to_jiffies(200));
+ queue_delayed_work(system_power_efficient_wq,
+ &priv->hpl.work, msecs_to_jiffies(200));
return IRQ_HANDLED;
}
@@ -1318,7 +1319,8 @@ static irqreturn_t wm8350_hpr_jack_handler(int irq, void *data)
if (device_may_wakeup(wm8350->dev))
pm_wakeup_event(wm8350->dev, 250);
- schedule_delayed_work(&priv->hpr.work, msecs_to_jiffies(200));
+ queue_delayed_work(system_power_efficient_wq,
+ &priv->hpr.work, msecs_to_jiffies(200));
return IRQ_HANDLED;
}
diff --git a/sound/soc/codecs/wm8727.c b/sound/soc/codecs/wm8727.c
index 462f5e4d5c0..7b1a6d5c11c 100644
--- a/sound/soc/codecs/wm8727.c
+++ b/sound/soc/codecs/wm8727.c
@@ -23,6 +23,16 @@
#include <sound/initval.h>
#include <sound/soc.h>
+static const struct snd_soc_dapm_widget wm8727_dapm_widgets[] = {
+SND_SOC_DAPM_OUTPUT("VOUTL"),
+SND_SOC_DAPM_OUTPUT("VOUTR"),
+};
+
+static const struct snd_soc_dapm_route wm8727_dapm_routes[] = {
+ { "VOUTL", NULL, "Playback" },
+ { "VOUTR", NULL, "Playback" },
+};
+
/*
* Note this is a simple chip with no configuration interface, sample rate is
* determined automatically by examining the Master clock and Bit clock ratios
@@ -43,7 +53,12 @@ static struct snd_soc_dai_driver wm8727_dai = {
},
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8727;
+static struct snd_soc_codec_driver soc_codec_dev_wm8727 = {
+ .dapm_widgets = wm8727_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8727_dapm_widgets),
+ .dapm_routes = wm8727_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8727_dapm_routes),
+};
static int wm8727_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 5276062d6c7..456bb8c6d75 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -45,6 +45,7 @@ static const char *wm8731_supply_names[WM8731_NUM_SUPPLIES] = {
struct wm8731_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8731_NUM_SUPPLIES];
+ const struct snd_pcm_hw_constraint_list *constraints;
unsigned int sysclk;
int sysclk_type;
int playback_fs;
@@ -290,6 +291,36 @@ static const struct _coeff_div coeff_div[] = {
{12000000, 88200, 136, 0xf, 0x1, 0x1},
};
+/* rates constraints */
+static const unsigned int wm8731_rates_12000000[] = {
+ 8000, 32000, 44100, 48000, 96000, 88200,
+};
+
+static const unsigned int wm8731_rates_12288000_18432000[] = {
+ 8000, 32000, 48000, 96000,
+};
+
+static const unsigned int wm8731_rates_11289600_16934400[] = {
+ 8000, 44100, 88200,
+};
+
+static const struct snd_pcm_hw_constraint_list wm8731_constraints_12000000 = {
+ .list = wm8731_rates_12000000,
+ .count = ARRAY_SIZE(wm8731_rates_12000000),
+};
+
+static const
+struct snd_pcm_hw_constraint_list wm8731_constraints_12288000_18432000 = {
+ .list = wm8731_rates_12288000_18432000,
+ .count = ARRAY_SIZE(wm8731_rates_12288000_18432000),
+};
+
+static const
+struct snd_pcm_hw_constraint_list wm8731_constraints_11289600_16934400 = {
+ .list = wm8731_rates_11289600_16934400,
+ .count = ARRAY_SIZE(wm8731_rates_11289600_16934400),
+};
+
static inline int get_coeff(int mclk, int rate)
{
int i;
@@ -362,17 +393,26 @@ static int wm8731_set_dai_sysclk(struct snd_soc_dai *codec_dai,
}
switch (freq) {
- case 11289600:
+ case 0:
+ wm8731->constraints = NULL;
+ break;
case 12000000:
+ wm8731->constraints = &wm8731_constraints_12000000;
+ break;
case 12288000:
- case 16934400:
case 18432000:
- wm8731->sysclk = freq;
+ wm8731->constraints = &wm8731_constraints_12288000_18432000;
+ break;
+ case 16934400:
+ case 11289600:
+ wm8731->constraints = &wm8731_constraints_11289600_16934400;
break;
default:
return -EINVAL;
}
+ wm8731->sysclk = freq;
+
snd_soc_dapm_sync(&codec->dapm);
return 0;
@@ -475,12 +515,26 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
return 0;
}
+static int wm8731_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(dai->codec);
+
+ if (wm8731->constraints)
+ snd_pcm_hw_constraint_list(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_RATE,
+ wm8731->constraints);
+
+ return 0;
+}
+
#define WM8731_RATES SNDRV_PCM_RATE_8000_96000
#define WM8731_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE)
static const struct snd_soc_dai_ops wm8731_dai_ops = {
+ .startup = wm8731_startup,
.hw_params = wm8731_hw_params,
.digital_mute = wm8731_mute,
.set_sysclk = wm8731_set_dai_sysclk,
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
index 0a4ab4c423d..d96ebf52d95 100644
--- a/sound/soc/codecs/wm8753.c
+++ b/sound/soc/codecs/wm8753.c
@@ -1456,8 +1456,9 @@ static int wm8753_resume(struct snd_soc_codec *codec)
if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
wm8753_set_bias_level(codec, SND_SOC_BIAS_PREPARE);
codec->dapm.bias_level = SND_SOC_BIAS_ON;
- schedule_delayed_work(&codec->dapm.delayed_work,
- msecs_to_jiffies(caps_charge));
+ queue_delayed_work(system_power_efficient_wq,
+ &codec->dapm.delayed_work,
+ msecs_to_jiffies(caps_charge));
}
return 0;
diff --git a/sound/soc/codecs/wm8782.c b/sound/soc/codecs/wm8782.c
index f1fdbf63abb..8092495605c 100644
--- a/sound/soc/codecs/wm8782.c
+++ b/sound/soc/codecs/wm8782.c
@@ -26,6 +26,16 @@
#include <sound/initval.h>
#include <sound/soc.h>
+static const struct snd_soc_dapm_widget wm8782_dapm_widgets[] = {
+SND_SOC_DAPM_INPUT("AINL"),
+SND_SOC_DAPM_INPUT("AINR"),
+};
+
+static const struct snd_soc_dapm_route wm8782_dapm_routes[] = {
+ { "Capture", NULL, "AINL" },
+ { "Capture", NULL, "AINR" },
+};
+
static struct snd_soc_dai_driver wm8782_dai = {
.name = "wm8782",
.capture = {
@@ -40,7 +50,12 @@ static struct snd_soc_dai_driver wm8782_dai = {
},
};
-static struct snd_soc_codec_driver soc_codec_dev_wm8782;
+static struct snd_soc_codec_driver soc_codec_dev_wm8782 = {
+ .dapm_widgets = wm8782_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8782_dapm_widgets),
+ .dapm_routes = wm8782_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8782_dapm_routes),
+};
static int wm8782_probe(struct platform_device *pdev)
{
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index fa24cedee68..eebcb1da3b7 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -364,9 +364,7 @@ static void wm8903_seq_notifier(struct snd_soc_dapm_context *dapm,
static int wm8903_class_w_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct wm8903_priv *wm8903 = snd_soc_codec_get_drvdata(codec);
u16 reg;
int ret;
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 4c9fb142cb2..4dfa8dceeab 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1012,7 +1012,7 @@ static const struct soc_enum liner_enum =
SOC_ENUM_SINGLE(WM8904_ANALOGUE_OUT12_ZC, 0, 2, out_mux_text);
static const struct snd_kcontrol_new liner_mux =
- SOC_DAPM_ENUM("LINEL Mux", liner_enum);
+ SOC_DAPM_ENUM("LINER Mux", liner_enum);
static const char *sidetone_text[] = {
"None", "Left", "Right"
@@ -1202,7 +1202,6 @@ static int wm8904_add_widgets(struct snd_soc_codec *codec)
break;
}
- snd_soc_dapm_new_widgets(dapm);
return 0;
}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 0a4ffdd1d2a..f156010e52b 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -263,8 +263,8 @@ SOC_SINGLE("ALC Attack", WM8960_ALC3, 0, 15, 0),
SOC_SINGLE("Noise Gate Threshold", WM8960_NOISEG, 3, 31, 0),
SOC_SINGLE("Noise Gate Switch", WM8960_NOISEG, 0, 1, 0),
-SOC_DOUBLE_R("ADC PCM Capture Volume", WM8960_LINPATH, WM8960_RINPATH,
- 0, 127, 0),
+SOC_DOUBLE_R_TLV("ADC PCM Capture Volume", WM8960_LADC, WM8960_RADC,
+ 0, 255, 0, adc_tlv),
SOC_SINGLE_TLV("Left Output Mixer Boost Bypass Volume",
WM8960_BYPASS1, 4, 7, 1, bypass_tlv),
@@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
if (pll_div.k) {
reg |= 0x20;
- snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
- snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
- snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
+ snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
+ snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
+ snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
}
snd_soc_write(codec, WM8960_PLL1, reg);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index e2de9ecfd64..11d80f3b613 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2621,8 +2621,6 @@ static int wm8962_set_dai_sysclk(struct snd_soc_dai *dai, int clk_id,
wm8962->sysclk_rate = freq;
- wm8962_configure_bclk(codec);
-
return 0;
}
@@ -3046,8 +3044,9 @@ static irqreturn_t wm8962_irq(int irq, void *data)
pm_wakeup_event(dev, 300);
- schedule_delayed_work(&wm8962->mic_work,
- msecs_to_jiffies(250));
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8962->mic_work,
+ msecs_to_jiffies(250));
}
return IRQ_HANDLED;
@@ -3175,7 +3174,7 @@ static ssize_t wm8962_beep_set(struct device *dev,
long int time;
int ret;
- ret = strict_strtol(buf, 10, &time);
+ ret = kstrtol(buf, 10, &time);
if (ret != 0)
return ret;
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index ba832b77c54..86426a117b0 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -819,8 +819,9 @@ static int clk_sys_event(struct snd_soc_dapm_widget *w,
* don't want false reports.
*/
if (wm8994->jackdet && !wm8994->clk_has_run) {
- schedule_delayed_work(&wm8994->jackdet_bootstrap,
- msecs_to_jiffies(1000));
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8994->jackdet_bootstrap,
+ msecs_to_jiffies(1000));
wm8994->clk_has_run = true;
}
break;
@@ -1432,14 +1433,12 @@ SOC_DAPM_SINGLE("AIF1.1 Switch", WM8994_DAC2_RIGHT_MIXER_ROUTING,
#define WM8994_CLASS_W_SWITCH(xname, reg, shift, max, invert) \
SOC_SINGLE_EXT(xname, reg, shift, max, invert, \
- snd_soc_get_volsw, wm8994_put_class_w)
+ snd_soc_dapm_get_volsw, wm8994_put_class_w)
static int wm8994_put_class_w(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
- struct snd_soc_codec *codec = w->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
int ret;
ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
@@ -3487,7 +3486,8 @@ static irqreturn_t wm8994_mic_irq(int irq, void *data)
pm_wakeup_event(codec->dev, 300);
- schedule_delayed_work(&priv->mic_work, msecs_to_jiffies(250));
+ queue_delayed_work(system_power_efficient_wq,
+ &priv->mic_work, msecs_to_jiffies(250));
return IRQ_HANDLED;
}
@@ -3575,8 +3575,9 @@ static void wm8958_mic_id(void *data, u16 status)
/* If nothing present then clear our statuses */
dev_dbg(codec->dev, "Detected open circuit\n");
- schedule_delayed_work(&wm8994->open_circuit_work,
- msecs_to_jiffies(2500));
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8994->open_circuit_work,
+ msecs_to_jiffies(2500));
return;
}
@@ -3690,8 +3691,9 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data)
WM1811_JACKDET_DB, 0);
delay = control->pdata.micdet_delay;
- schedule_delayed_work(&wm8994->mic_work,
- msecs_to_jiffies(delay));
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8994->mic_work,
+ msecs_to_jiffies(delay));
} else {
dev_dbg(codec->dev, "Jack not detected\n");
@@ -3936,8 +3938,9 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data)
id_delay = wm8994->wm8994->pdata.mic_id_delay;
if (wm8994->mic_detecting)
- schedule_delayed_work(&wm8994->mic_complete_work,
- msecs_to_jiffies(id_delay));
+ queue_delayed_work(system_power_efficient_wq,
+ &wm8994->mic_complete_work,
+ msecs_to_jiffies(id_delay));
else
wm8958_button_det(codec, reg);
@@ -4010,9 +4013,6 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
wm8994->micdet_irq = control->pdata.micdet_irq;
- pm_runtime_enable(codec->dev);
- pm_runtime_idle(codec->dev);
-
/* By default use idle_bias_off, will override for WM8994 */
codec->dapm.idle_bias_off = 1;
@@ -4385,8 +4385,6 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
wm8994_set_bias_level(codec, SND_SOC_BIAS_OFF);
- pm_runtime_disable(codec->dev);
-
for (i = 0; i < ARRAY_SIZE(wm8994->fll_locked); i++)
wm8994_free_irq(wm8994->wm8994, WM8994_IRQ_FLL1_LOCK + i,
&wm8994->fll_locked[i]);
@@ -4445,6 +4443,9 @@ static int wm8994_probe(struct platform_device *pdev)
wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8994,
wm8994_dai, ARRAY_SIZE(wm8994_dai));
}
@@ -4452,6 +4453,8 @@ static int wm8994_probe(struct platform_device *pdev)
static int wm8994_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8995.c b/sound/soc/codecs/wm8995.c
index 90a65c42754..da2899e6c40 100644
--- a/sound/soc/codecs/wm8995.c
+++ b/sound/soc/codecs/wm8995.c
@@ -549,12 +549,9 @@ static int check_clk_sys(struct snd_soc_dapm_widget *source,
static int wm8995_put_class_w(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *w = wlist->widgets[0];
- struct snd_soc_codec *codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
int ret;
- codec = w->codec;
ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
wm8995_update_class_w(codec);
return ret;
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
new file mode 100644
index 00000000000..6ec3de3efa4
--- /dev/null
+++ b/sound/soc/codecs/wm8997.c
@@ -0,0 +1,1175 @@
+/*
+ * wm8997.c -- WM8997 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+#include <sound/jack.h>
+#include <sound/initval.h>
+#include <sound/tlv.h>
+
+#include <linux/mfd/arizona/core.h>
+#include <linux/mfd/arizona/registers.h>
+
+#include "arizona.h"
+#include "wm8997.h"
+
+struct wm8997_priv {
+ struct arizona_priv core;
+ struct arizona_fll fll[2];
+};
+
+static DECLARE_TLV_DB_SCALE(ana_tlv, 0, 100, 0);
+static DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0);
+static DECLARE_TLV_DB_SCALE(digital_tlv, -6400, 50, 0);
+static DECLARE_TLV_DB_SCALE(noise_tlv, 0, 600, 0);
+static DECLARE_TLV_DB_SCALE(ng_tlv, -10200, 600, 0);
+
+static const struct reg_default wm8997_sysclk_reva_patch[] = {
+ { 0x301D, 0x7B15 },
+ { 0x301B, 0x0050 },
+ { 0x305D, 0x7B17 },
+ { 0x305B, 0x0050 },
+ { 0x3001, 0x08FE },
+ { 0x3003, 0x00F4 },
+ { 0x3041, 0x08FF },
+ { 0x3043, 0x0005 },
+ { 0x3020, 0x0225 },
+ { 0x3021, 0x0A00 },
+ { 0x3022, 0xE24D },
+ { 0x3023, 0x0800 },
+ { 0x3024, 0xE24D },
+ { 0x3025, 0xF000 },
+ { 0x3060, 0x0226 },
+ { 0x3061, 0x0A00 },
+ { 0x3062, 0xE252 },
+ { 0x3063, 0x0800 },
+ { 0x3064, 0xE252 },
+ { 0x3065, 0xF000 },
+ { 0x3116, 0x022B },
+ { 0x3117, 0xFA00 },
+ { 0x3110, 0x246C },
+ { 0x3111, 0x0A03 },
+ { 0x3112, 0x246E },
+ { 0x3113, 0x0A03 },
+ { 0x3114, 0x2470 },
+ { 0x3115, 0x0A03 },
+ { 0x3126, 0x246C },
+ { 0x3127, 0x0A02 },
+ { 0x3128, 0x246E },
+ { 0x3129, 0x0A02 },
+ { 0x312A, 0x2470 },
+ { 0x312B, 0xFA02 },
+ { 0x3125, 0x0800 },
+};
+
+static int wm8997_sysclk_ev(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_codec *codec = w->codec;
+ struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
+ struct regmap *regmap = codec->control_data;
+ const struct reg_default *patch = NULL;
+ int i, patch_size;
+
+ switch (arizona->rev) {
+ case 0:
+ patch = wm8997_sysclk_reva_patch;
+ patch_size = ARRAY_SIZE(wm8997_sysclk_reva_patch);
+ break;
+ default:
+ break;
+ }
+
+ switch (event) {
+ case SND_SOC_DAPM_POST_PMU:
+ if (patch)
+ for (i = 0; i < patch_size; i++)
+ regmap_write(regmap, patch[i].reg,
+ patch[i].def);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static const char *wm8997_osr_text[] = {
+ "Low power", "Normal", "High performance",
+};
+
+static const unsigned int wm8997_osr_val[] = {
+ 0x0, 0x3, 0x5,
+};
+
+static const struct soc_enum wm8997_hpout_osr[] = {
+ SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_1L,
+ ARIZONA_OUT1_OSR_SHIFT, 0x7, 3,
+ wm8997_osr_text, wm8997_osr_val),
+ SOC_VALUE_ENUM_SINGLE(ARIZONA_OUTPUT_PATH_CONFIG_3L,
+ ARIZONA_OUT3_OSR_SHIFT, 0x7, 3,
+ wm8997_osr_text, wm8997_osr_val),
+};
+
+#define WM8997_NG_SRC(name, base) \
+ SOC_SINGLE(name " NG HPOUT1L Switch", base, 0, 1, 0), \
+ SOC_SINGLE(name " NG HPOUT1R Switch", base, 1, 1, 0), \
+ SOC_SINGLE(name " NG EPOUT Switch", base, 4, 1, 0), \
+ SOC_SINGLE(name " NG SPKOUT Switch", base, 6, 1, 0), \
+ SOC_SINGLE(name " NG SPKDAT1L Switch", base, 8, 1, 0), \
+ SOC_SINGLE(name " NG SPKDAT1R Switch", base, 9, 1, 0)
+
+static const struct snd_kcontrol_new wm8997_snd_controls[] = {
+SOC_SINGLE("IN1 High Performance Switch", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1_OSR_SHIFT, 1, 0),
+SOC_SINGLE("IN2 High Performance Switch", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2_OSR_SHIFT, 1, 0),
+
+SOC_SINGLE_RANGE_TLV("IN1L Volume", ARIZONA_IN1L_CONTROL,
+ ARIZONA_IN1L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN1R Volume", ARIZONA_IN1R_CONTROL,
+ ARIZONA_IN1R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN2L Volume", ARIZONA_IN2L_CONTROL,
+ ARIZONA_IN2L_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+SOC_SINGLE_RANGE_TLV("IN2R Volume", ARIZONA_IN2R_CONTROL,
+ ARIZONA_IN2R_PGA_VOL_SHIFT, 0x40, 0x5f, 0, ana_tlv),
+
+SOC_SINGLE_TLV("IN1L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1L,
+ ARIZONA_IN1L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN1R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_1R,
+ ARIZONA_IN1R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN2L Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2L,
+ ARIZONA_IN2L_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("IN2R Digital Volume", ARIZONA_ADC_DIGITAL_VOLUME_2R,
+ ARIZONA_IN2R_DIG_VOL_SHIFT, 0xbf, 0, digital_tlv),
+
+SOC_ENUM("Input Ramp Up", arizona_in_vi_ramp),
+SOC_ENUM("Input Ramp Down", arizona_in_vd_ramp),
+
+ARIZONA_MIXER_CONTROLS("EQ1", ARIZONA_EQ1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ2", ARIZONA_EQ2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ3", ARIZONA_EQ3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EQ4", ARIZONA_EQ4MIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("EQ1 Coefficeints", ARIZONA_EQ1_1, 21,
+ ARIZONA_EQ1_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ2 Coefficeints", ARIZONA_EQ2_1, 21,
+ ARIZONA_EQ2_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ3 Coefficeints", ARIZONA_EQ3_1, 21,
+ ARIZONA_EQ3_ENA_MASK),
+SND_SOC_BYTES_MASK("EQ4 Coefficeints", ARIZONA_EQ4_1, 21,
+ ARIZONA_EQ4_ENA_MASK),
+
+SOC_SINGLE_TLV("EQ1 B1 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B2 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B3 Volume", ARIZONA_EQ1_1, ARIZONA_EQ1_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B4 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ1 B5 Volume", ARIZONA_EQ1_2, ARIZONA_EQ1_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ2 B1 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B2 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B3 Volume", ARIZONA_EQ2_1, ARIZONA_EQ2_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B4 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ2 B5 Volume", ARIZONA_EQ2_2, ARIZONA_EQ2_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ3 B1 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B2 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B3 Volume", ARIZONA_EQ3_1, ARIZONA_EQ3_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B4 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ3 B5 Volume", ARIZONA_EQ3_2, ARIZONA_EQ3_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+SOC_SINGLE_TLV("EQ4 B1 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B1_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B2 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B2_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B3 Volume", ARIZONA_EQ4_1, ARIZONA_EQ4_B3_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B4 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B4_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT,
+ 24, 0, eq_tlv),
+
+ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE),
+
+SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5,
+ ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA),
+
+ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF3", ARIZONA_HPLP3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("LHPF4", ARIZONA_HPLP4MIX_INPUT_1_SOURCE),
+
+SOC_ENUM("LHPF1 Mode", arizona_lhpf1_mode),
+SOC_ENUM("LHPF2 Mode", arizona_lhpf2_mode),
+SOC_ENUM("LHPF3 Mode", arizona_lhpf3_mode),
+SOC_ENUM("LHPF4 Mode", arizona_lhpf4_mode),
+
+SND_SOC_BYTES("LHPF1 Coefficients", ARIZONA_HPLPF1_2, 1),
+SND_SOC_BYTES("LHPF2 Coefficients", ARIZONA_HPLPF2_2, 1),
+SND_SOC_BYTES("LHPF3 Coefficients", ARIZONA_HPLPF3_2, 1),
+SND_SOC_BYTES("LHPF4 Coefficients", ARIZONA_HPLPF4_2, 1),
+
+SOC_VALUE_ENUM("ISRC1 FSL", arizona_isrc_fsl[0]),
+SOC_VALUE_ENUM("ISRC2 FSL", arizona_isrc_fsl[1]),
+
+ARIZONA_MIXER_CONTROLS("Mic", ARIZONA_MICMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("Noise", ARIZONA_NOISEMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE_TLV("Noise Generator Volume", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_GAIN_SHIFT, 0x16, 0, noise_tlv),
+
+ARIZONA_MIXER_CONTROLS("HPOUT1L", ARIZONA_OUT1LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("HPOUT1R", ARIZONA_OUT1RMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("EPOUT", ARIZONA_OUT3LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKOUT", ARIZONA_OUT4LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1L", ARIZONA_OUT5LMIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SPKDAT1R", ARIZONA_OUT5RMIX_INPUT_1_SOURCE),
+
+SOC_SINGLE("Speaker High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_4L,
+ ARIZONA_OUT4_OSR_SHIFT, 1, 0),
+SOC_SINGLE("SPKDAT1 High Performance Switch", ARIZONA_OUTPUT_PATH_CONFIG_5L,
+ ARIZONA_OUT5_OSR_SHIFT, 1, 0),
+
+SOC_DOUBLE_R("HPOUT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("EPOUT Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_MUTE_SHIFT, 1, 1),
+SOC_SINGLE("Speaker Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_OUT4L_MUTE_SHIFT, 1, 1),
+SOC_DOUBLE_R("SPKDAT1 Digital Switch", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_MUTE_SHIFT, 1, 1),
+
+SOC_DOUBLE_R_TLV("HPOUT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R, ARIZONA_OUT1L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("EPOUT Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_OUT3L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_SINGLE_TLV("Speaker Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_OUT4L_VOL_SHIFT, 0xbf, 0, digital_tlv),
+SOC_DOUBLE_R_TLV("SPKDAT1 Digital Volume", ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R, ARIZONA_OUT5L_VOL_SHIFT,
+ 0xbf, 0, digital_tlv),
+
+SOC_VALUE_ENUM("HPOUT1 OSR", wm8997_hpout_osr[0]),
+SOC_VALUE_ENUM("EPOUT OSR", wm8997_hpout_osr[1]),
+
+SOC_ENUM("Output Ramp Up", arizona_out_vi_ramp),
+SOC_ENUM("Output Ramp Down", arizona_out_vd_ramp),
+
+SOC_DOUBLE("SPKDAT1 Switch", ARIZONA_PDM_SPK1_CTRL_1, ARIZONA_SPK1L_MUTE_SHIFT,
+ ARIZONA_SPK1R_MUTE_SHIFT, 1, 1),
+
+SOC_SINGLE("Noise Gate Switch", ARIZONA_NOISE_GATE_CONTROL,
+ ARIZONA_NGATE_ENA_SHIFT, 1, 0),
+SOC_SINGLE_TLV("Noise Gate Threshold Volume", ARIZONA_NOISE_GATE_CONTROL,
+ ARIZONA_NGATE_THR_SHIFT, 7, 1, ng_tlv),
+SOC_ENUM("Noise Gate Hold", arizona_ng_hold),
+
+WM8997_NG_SRC("HPOUT1L", ARIZONA_NOISE_GATE_SELECT_1L),
+WM8997_NG_SRC("HPOUT1R", ARIZONA_NOISE_GATE_SELECT_1R),
+WM8997_NG_SRC("EPOUT", ARIZONA_NOISE_GATE_SELECT_3L),
+WM8997_NG_SRC("SPKOUT", ARIZONA_NOISE_GATE_SELECT_4L),
+WM8997_NG_SRC("SPKDAT1L", ARIZONA_NOISE_GATE_SELECT_5L),
+WM8997_NG_SRC("SPKDAT1R", ARIZONA_NOISE_GATE_SELECT_5R),
+
+ARIZONA_MIXER_CONTROLS("AIF1TX1", ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX2", ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX3", ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX4", ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX5", ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX6", ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX7", ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF1TX8", ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("AIF2TX1", ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("AIF2TX2", ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE),
+
+ARIZONA_MIXER_CONTROLS("SLIMTX1", ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX2", ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX3", ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX4", ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX5", ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX6", ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX7", ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE),
+ARIZONA_MIXER_CONTROLS("SLIMTX8", ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE),
+};
+
+ARIZONA_MIXER_ENUMS(EQ1, ARIZONA_EQ1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ2, ARIZONA_EQ2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ3, ARIZONA_EQ3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF3, ARIZONA_HPLP3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(LHPF4, ARIZONA_HPLP4MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(Mic, ARIZONA_MICMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(Noise, ARIZONA_NOISEMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(PWM1, ARIZONA_PWM1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(PWM2, ARIZONA_PWM2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(OUT1L, ARIZONA_OUT1LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT1R, ARIZONA_OUT1RMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(OUT3, ARIZONA_OUT3LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKOUT, ARIZONA_OUT4LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1L, ARIZONA_OUT5LMIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SPKDAT1R, ARIZONA_OUT5RMIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF1TX1, ARIZONA_AIF1TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX2, ARIZONA_AIF1TX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX3, ARIZONA_AIF1TX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX4, ARIZONA_AIF1TX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX5, ARIZONA_AIF1TX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX6, ARIZONA_AIF1TX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX7, ARIZONA_AIF1TX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF1TX8, ARIZONA_AIF1TX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(AIF2TX1, ARIZONA_AIF2TX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(AIF2TX2, ARIZONA_AIF2TX2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MIXER_ENUMS(SLIMTX1, ARIZONA_SLIMTX1MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX2, ARIZONA_SLIMTX2MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX3, ARIZONA_SLIMTX3MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX4, ARIZONA_SLIMTX4MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX5, ARIZONA_SLIMTX5MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX6, ARIZONA_SLIMTX6MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX7, ARIZONA_SLIMTX7MIX_INPUT_1_SOURCE);
+ARIZONA_MIXER_ENUMS(SLIMTX8, ARIZONA_SLIMTX8MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC1INT1, ARIZONA_ISRC1INT1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC1INT2, ARIZONA_ISRC1INT2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC1DEC1, ARIZONA_ISRC1DEC1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC1DEC2, ARIZONA_ISRC1DEC2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC2INT1, ARIZONA_ISRC2INT1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC2INT2, ARIZONA_ISRC2INT2MIX_INPUT_1_SOURCE);
+
+ARIZONA_MUX_ENUMS(ISRC2DEC1, ARIZONA_ISRC2DEC1MIX_INPUT_1_SOURCE);
+ARIZONA_MUX_ENUMS(ISRC2DEC2, ARIZONA_ISRC2DEC2MIX_INPUT_1_SOURCE);
+
+static const char *wm8997_aec_loopback_texts[] = {
+ "HPOUT1L", "HPOUT1R", "EPOUT", "SPKOUT", "SPKDAT1L", "SPKDAT1R",
+};
+
+static const unsigned int wm8997_aec_loopback_values[] = {
+ 0, 1, 4, 6, 8, 9,
+};
+
+static const struct soc_enum wm8997_aec_loopback =
+ SOC_VALUE_ENUM_SINGLE(ARIZONA_DAC_AEC_CONTROL_1,
+ ARIZONA_AEC_LOOPBACK_SRC_SHIFT, 0xf,
+ ARRAY_SIZE(wm8997_aec_loopback_texts),
+ wm8997_aec_loopback_texts,
+ wm8997_aec_loopback_values);
+
+static const struct snd_kcontrol_new wm8997_aec_loopback_mux =
+ SOC_DAPM_VALUE_ENUM("AEC Loopback", wm8997_aec_loopback);
+
+static const struct snd_soc_dapm_widget wm8997_dapm_widgets[] = {
+SND_SOC_DAPM_SUPPLY("SYSCLK", ARIZONA_SYSTEM_CLOCK_1, ARIZONA_SYSCLK_ENA_SHIFT,
+ 0, wm8997_sysclk_ev, SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_SUPPLY("ASYNCCLK", ARIZONA_ASYNC_CLOCK_1,
+ ARIZONA_ASYNC_CLK_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("OPCLK", ARIZONA_OUTPUT_SYSTEM_CLOCK,
+ ARIZONA_OPCLK_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("ASYNCOPCLK", ARIZONA_OUTPUT_ASYNC_CLOCK,
+ ARIZONA_OPCLK_ASYNC_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_REGULATOR_SUPPLY("DBVDD2", 0, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("CPVDD", 20, 0),
+SND_SOC_DAPM_REGULATOR_SUPPLY("MICVDD", 0, SND_SOC_DAPM_REGULATOR_BYPASS),
+SND_SOC_DAPM_REGULATOR_SUPPLY("SPKVDD", 0, 0),
+
+SND_SOC_DAPM_SIGGEN("TONE"),
+SND_SOC_DAPM_SIGGEN("NOISE"),
+SND_SOC_DAPM_SIGGEN("HAPTICS"),
+
+SND_SOC_DAPM_INPUT("IN1L"),
+SND_SOC_DAPM_INPUT("IN1R"),
+SND_SOC_DAPM_INPUT("IN2L"),
+SND_SOC_DAPM_INPUT("IN2R"),
+
+SND_SOC_DAPM_PGA_E("IN1L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN1R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN1R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2L PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2L_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("IN2R PGA", ARIZONA_INPUT_ENABLES, ARIZONA_IN2R_ENA_SHIFT,
+ 0, NULL, 0, arizona_in_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD |
+ SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU),
+
+SND_SOC_DAPM_SUPPLY("MICBIAS1", ARIZONA_MIC_BIAS_CTRL_1,
+ ARIZONA_MICB1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS2", ARIZONA_MIC_BIAS_CTRL_2,
+ ARIZONA_MICB2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_SUPPLY("MICBIAS3", ARIZONA_MIC_BIAS_CTRL_3,
+ ARIZONA_MICB3_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Noise Generator", ARIZONA_COMFORT_NOISE_GENERATOR,
+ ARIZONA_NOISE_GEN_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Tone Generator 1", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("Tone Generator 2", ARIZONA_TONE_GENERATOR_1,
+ ARIZONA_TONE2_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("Mic Mute Mixer", ARIZONA_MIC_NOISE_MIX_CONTROL_1,
+ ARIZONA_MICMUTE_MIX_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("EQ1", ARIZONA_EQ1_1, ARIZONA_EQ1_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ2", ARIZONA_EQ2_1, ARIZONA_EQ2_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ3", ARIZONA_EQ3_1, ARIZONA_EQ3_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("EQ4", ARIZONA_EQ4_1, ARIZONA_EQ4_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF2", ARIZONA_HPLPF2_1, ARIZONA_LHPF2_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF3", ARIZONA_HPLPF3_1, ARIZONA_LHPF3_ENA_SHIFT, 0,
+ NULL, 0),
+SND_SOC_DAPM_PGA("LHPF4", ARIZONA_HPLPF4_1, ARIZONA_LHPF4_ENA_SHIFT, 0,
+ NULL, 0),
+
+SND_SOC_DAPM_PGA("PWM1 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM1_ENA_SHIFT,
+ 0, NULL, 0),
+SND_SOC_DAPM_PGA("PWM2 Driver", ARIZONA_PWM_DRIVE_1, ARIZONA_PWM2_ENA_SHIFT,
+ 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC1INT1", ARIZONA_ISRC_1_CTRL_3,
+ ARIZONA_ISRC1_INT0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC1INT2", ARIZONA_ISRC_1_CTRL_3,
+ ARIZONA_ISRC1_INT1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC1DEC1", ARIZONA_ISRC_1_CTRL_3,
+ ARIZONA_ISRC1_DEC0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC1DEC2", ARIZONA_ISRC_1_CTRL_3,
+ ARIZONA_ISRC1_DEC1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC2INT1", ARIZONA_ISRC_2_CTRL_3,
+ ARIZONA_ISRC2_INT0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC2INT2", ARIZONA_ISRC_2_CTRL_3,
+ ARIZONA_ISRC2_INT1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_PGA("ISRC2DEC1", ARIZONA_ISRC_2_CTRL_3,
+ ARIZONA_ISRC2_DEC0_ENA_SHIFT, 0, NULL, 0),
+SND_SOC_DAPM_PGA("ISRC2DEC2", ARIZONA_ISRC_2_CTRL_3,
+ ARIZONA_ISRC2_DEC1_ENA_SHIFT, 0, NULL, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF1TX1", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX2", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX3", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX4", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX5", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX6", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX7", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF1TX8", NULL, 0,
+ ARIZONA_AIF1_TX_ENABLES, ARIZONA_AIF1TX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF1RX1", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX2", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX3", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX4", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX5", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX6", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX7", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF1RX8", NULL, 0,
+ ARIZONA_AIF1_RX_ENABLES, ARIZONA_AIF1RX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("AIF2TX1", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("AIF2TX2", NULL, 0,
+ ARIZONA_AIF2_TX_ENABLES, ARIZONA_AIF2TX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("AIF2RX1", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("AIF2RX2", NULL, 0,
+ ARIZONA_AIF2_RX_ENABLES, ARIZONA_AIF2RX2_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_OUT("SLIMTX1", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX2", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX3", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX4", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX5", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX6", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX7", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_OUT("SLIMTX8", NULL, 0,
+ ARIZONA_SLIMBUS_TX_CHANNEL_ENABLE,
+ ARIZONA_SLIMTX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_AIF_IN("SLIMRX1", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX1_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX2", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX2_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX3", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX3_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX4", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX4_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX5", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX5_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX6", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX6_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX7", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX7_ENA_SHIFT, 0),
+SND_SOC_DAPM_AIF_IN("SLIMRX8", NULL, 0,
+ ARIZONA_SLIMBUS_RX_CHANNEL_ENABLE,
+ ARIZONA_SLIMRX8_ENA_SHIFT, 0),
+
+SND_SOC_DAPM_VALUE_MUX("AEC Loopback", ARIZONA_DAC_AEC_CONTROL_1,
+ ARIZONA_AEC_LOOPBACK_ENA_SHIFT, 0,
+ &wm8997_aec_loopback_mux),
+
+SND_SOC_DAPM_PGA_E("OUT1L", SND_SOC_NOPM,
+ ARIZONA_OUT1L_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT1R", SND_SOC_NOPM,
+ ARIZONA_OUT1R_ENA_SHIFT, 0, NULL, 0, arizona_hp_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT3L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT3L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5L", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5L_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+SND_SOC_DAPM_PGA_E("OUT5R", ARIZONA_OUTPUT_ENABLES_1,
+ ARIZONA_OUT5R_ENA_SHIFT, 0, NULL, 0, arizona_out_ev,
+ SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU),
+
+ARIZONA_MIXER_WIDGETS(EQ1, "EQ1"),
+ARIZONA_MIXER_WIDGETS(EQ2, "EQ2"),
+ARIZONA_MIXER_WIDGETS(EQ3, "EQ3"),
+ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"),
+
+ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"),
+ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"),
+
+ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"),
+ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"),
+ARIZONA_MIXER_WIDGETS(LHPF3, "LHPF3"),
+ARIZONA_MIXER_WIDGETS(LHPF4, "LHPF4"),
+
+ARIZONA_MIXER_WIDGETS(Mic, "Mic"),
+ARIZONA_MIXER_WIDGETS(Noise, "Noise"),
+
+ARIZONA_MIXER_WIDGETS(PWM1, "PWM1"),
+ARIZONA_MIXER_WIDGETS(PWM2, "PWM2"),
+
+ARIZONA_MIXER_WIDGETS(OUT1L, "HPOUT1L"),
+ARIZONA_MIXER_WIDGETS(OUT1R, "HPOUT1R"),
+ARIZONA_MIXER_WIDGETS(OUT3, "EPOUT"),
+ARIZONA_MIXER_WIDGETS(SPKOUT, "SPKOUT"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1L, "SPKDAT1L"),
+ARIZONA_MIXER_WIDGETS(SPKDAT1R, "SPKDAT1R"),
+
+ARIZONA_MIXER_WIDGETS(AIF1TX1, "AIF1TX1"),
+ARIZONA_MIXER_WIDGETS(AIF1TX2, "AIF1TX2"),
+ARIZONA_MIXER_WIDGETS(AIF1TX3, "AIF1TX3"),
+ARIZONA_MIXER_WIDGETS(AIF1TX4, "AIF1TX4"),
+ARIZONA_MIXER_WIDGETS(AIF1TX5, "AIF1TX5"),
+ARIZONA_MIXER_WIDGETS(AIF1TX6, "AIF1TX6"),
+ARIZONA_MIXER_WIDGETS(AIF1TX7, "AIF1TX7"),
+ARIZONA_MIXER_WIDGETS(AIF1TX8, "AIF1TX8"),
+
+ARIZONA_MIXER_WIDGETS(AIF2TX1, "AIF2TX1"),
+ARIZONA_MIXER_WIDGETS(AIF2TX2, "AIF2TX2"),
+
+ARIZONA_MIXER_WIDGETS(SLIMTX1, "SLIMTX1"),
+ARIZONA_MIXER_WIDGETS(SLIMTX2, "SLIMTX2"),
+ARIZONA_MIXER_WIDGETS(SLIMTX3, "SLIMTX3"),
+ARIZONA_MIXER_WIDGETS(SLIMTX4, "SLIMTX4"),
+ARIZONA_MIXER_WIDGETS(SLIMTX5, "SLIMTX5"),
+ARIZONA_MIXER_WIDGETS(SLIMTX6, "SLIMTX6"),
+ARIZONA_MIXER_WIDGETS(SLIMTX7, "SLIMTX7"),
+ARIZONA_MIXER_WIDGETS(SLIMTX8, "SLIMTX8"),
+
+ARIZONA_MUX_WIDGETS(ISRC1DEC1, "ISRC1DEC1"),
+ARIZONA_MUX_WIDGETS(ISRC1DEC2, "ISRC1DEC2"),
+
+ARIZONA_MUX_WIDGETS(ISRC1INT1, "ISRC1INT1"),
+ARIZONA_MUX_WIDGETS(ISRC1INT2, "ISRC1INT2"),
+
+ARIZONA_MUX_WIDGETS(ISRC2DEC1, "ISRC2DEC1"),
+ARIZONA_MUX_WIDGETS(ISRC2DEC2, "ISRC2DEC2"),
+
+ARIZONA_MUX_WIDGETS(ISRC2INT1, "ISRC2INT1"),
+ARIZONA_MUX_WIDGETS(ISRC2INT2, "ISRC2INT2"),
+
+SND_SOC_DAPM_OUTPUT("HPOUT1L"),
+SND_SOC_DAPM_OUTPUT("HPOUT1R"),
+SND_SOC_DAPM_OUTPUT("EPOUTN"),
+SND_SOC_DAPM_OUTPUT("EPOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKOUTN"),
+SND_SOC_DAPM_OUTPUT("SPKOUTP"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1L"),
+SND_SOC_DAPM_OUTPUT("SPKDAT1R"),
+
+SND_SOC_DAPM_OUTPUT("MICSUPP"),
+};
+
+#define ARIZONA_MIXER_INPUT_ROUTES(name) \
+ { name, "Noise Generator", "Noise Generator" }, \
+ { name, "Tone Generator 1", "Tone Generator 1" }, \
+ { name, "Tone Generator 2", "Tone Generator 2" }, \
+ { name, "Haptics", "HAPTICS" }, \
+ { name, "AEC", "AEC Loopback" }, \
+ { name, "IN1L", "IN1L PGA" }, \
+ { name, "IN1R", "IN1R PGA" }, \
+ { name, "IN2L", "IN2L PGA" }, \
+ { name, "IN2R", "IN2R PGA" }, \
+ { name, "Mic Mute Mixer", "Mic Mute Mixer" }, \
+ { name, "AIF1RX1", "AIF1RX1" }, \
+ { name, "AIF1RX2", "AIF1RX2" }, \
+ { name, "AIF1RX3", "AIF1RX3" }, \
+ { name, "AIF1RX4", "AIF1RX4" }, \
+ { name, "AIF1RX5", "AIF1RX5" }, \
+ { name, "AIF1RX6", "AIF1RX6" }, \
+ { name, "AIF1RX7", "AIF1RX7" }, \
+ { name, "AIF1RX8", "AIF1RX8" }, \
+ { name, "AIF2RX1", "AIF2RX1" }, \
+ { name, "AIF2RX2", "AIF2RX2" }, \
+ { name, "SLIMRX1", "SLIMRX1" }, \
+ { name, "SLIMRX2", "SLIMRX2" }, \
+ { name, "SLIMRX3", "SLIMRX3" }, \
+ { name, "SLIMRX4", "SLIMRX4" }, \
+ { name, "SLIMRX5", "SLIMRX5" }, \
+ { name, "SLIMRX6", "SLIMRX6" }, \
+ { name, "SLIMRX7", "SLIMRX7" }, \
+ { name, "SLIMRX8", "SLIMRX8" }, \
+ { name, "EQ1", "EQ1" }, \
+ { name, "EQ2", "EQ2" }, \
+ { name, "EQ3", "EQ3" }, \
+ { name, "EQ4", "EQ4" }, \
+ { name, "DRC1L", "DRC1L" }, \
+ { name, "DRC1R", "DRC1R" }, \
+ { name, "LHPF1", "LHPF1" }, \
+ { name, "LHPF2", "LHPF2" }, \
+ { name, "LHPF3", "LHPF3" }, \
+ { name, "LHPF4", "LHPF4" }, \
+ { name, "ISRC1DEC1", "ISRC1DEC1" }, \
+ { name, "ISRC1DEC2", "ISRC1DEC2" }, \
+ { name, "ISRC1INT1", "ISRC1INT1" }, \
+ { name, "ISRC1INT2", "ISRC1INT2" }, \
+ { name, "ISRC2DEC1", "ISRC2DEC1" }, \
+ { name, "ISRC2DEC2", "ISRC2DEC2" }, \
+ { name, "ISRC2INT1", "ISRC2INT1" }, \
+ { name, "ISRC2INT2", "ISRC2INT2" }
+
+static const struct snd_soc_dapm_route wm8997_dapm_routes[] = {
+ { "AIF2 Capture", NULL, "DBVDD2" },
+ { "AIF2 Playback", NULL, "DBVDD2" },
+
+ { "OUT1L", NULL, "CPVDD" },
+ { "OUT1R", NULL, "CPVDD" },
+ { "OUT3L", NULL, "CPVDD" },
+
+ { "OUT4L", NULL, "SPKVDD" },
+
+ { "OUT1L", NULL, "SYSCLK" },
+ { "OUT1R", NULL, "SYSCLK" },
+ { "OUT3L", NULL, "SYSCLK" },
+ { "OUT4L", NULL, "SYSCLK" },
+
+ { "IN1L", NULL, "SYSCLK" },
+ { "IN1R", NULL, "SYSCLK" },
+ { "IN2L", NULL, "SYSCLK" },
+ { "IN2R", NULL, "SYSCLK" },
+
+ { "MICBIAS1", NULL, "MICVDD" },
+ { "MICBIAS2", NULL, "MICVDD" },
+ { "MICBIAS3", NULL, "MICVDD" },
+
+ { "Noise Generator", NULL, "SYSCLK" },
+ { "Tone Generator 1", NULL, "SYSCLK" },
+ { "Tone Generator 2", NULL, "SYSCLK" },
+
+ { "Noise Generator", NULL, "NOISE" },
+ { "Tone Generator 1", NULL, "TONE" },
+ { "Tone Generator 2", NULL, "TONE" },
+
+ { "AIF1 Capture", NULL, "AIF1TX1" },
+ { "AIF1 Capture", NULL, "AIF1TX2" },
+ { "AIF1 Capture", NULL, "AIF1TX3" },
+ { "AIF1 Capture", NULL, "AIF1TX4" },
+ { "AIF1 Capture", NULL, "AIF1TX5" },
+ { "AIF1 Capture", NULL, "AIF1TX6" },
+ { "AIF1 Capture", NULL, "AIF1TX7" },
+ { "AIF1 Capture", NULL, "AIF1TX8" },
+
+ { "AIF1RX1", NULL, "AIF1 Playback" },
+ { "AIF1RX2", NULL, "AIF1 Playback" },
+ { "AIF1RX3", NULL, "AIF1 Playback" },
+ { "AIF1RX4", NULL, "AIF1 Playback" },
+ { "AIF1RX5", NULL, "AIF1 Playback" },
+ { "AIF1RX6", NULL, "AIF1 Playback" },
+ { "AIF1RX7", NULL, "AIF1 Playback" },
+ { "AIF1RX8", NULL, "AIF1 Playback" },
+
+ { "AIF2 Capture", NULL, "AIF2TX1" },
+ { "AIF2 Capture", NULL, "AIF2TX2" },
+
+ { "AIF2RX1", NULL, "AIF2 Playback" },
+ { "AIF2RX2", NULL, "AIF2 Playback" },
+
+ { "Slim1 Capture", NULL, "SLIMTX1" },
+ { "Slim1 Capture", NULL, "SLIMTX2" },
+ { "Slim1 Capture", NULL, "SLIMTX3" },
+ { "Slim1 Capture", NULL, "SLIMTX4" },
+
+ { "SLIMRX1", NULL, "Slim1 Playback" },
+ { "SLIMRX2", NULL, "Slim1 Playback" },
+ { "SLIMRX3", NULL, "Slim1 Playback" },
+ { "SLIMRX4", NULL, "Slim1 Playback" },
+
+ { "Slim2 Capture", NULL, "SLIMTX5" },
+ { "Slim2 Capture", NULL, "SLIMTX6" },
+
+ { "SLIMRX5", NULL, "Slim2 Playback" },
+ { "SLIMRX6", NULL, "Slim2 Playback" },
+
+ { "Slim3 Capture", NULL, "SLIMTX7" },
+ { "Slim3 Capture", NULL, "SLIMTX8" },
+
+ { "SLIMRX7", NULL, "Slim3 Playback" },
+ { "SLIMRX8", NULL, "Slim3 Playback" },
+
+ { "AIF1 Playback", NULL, "SYSCLK" },
+ { "AIF2 Playback", NULL, "SYSCLK" },
+ { "Slim1 Playback", NULL, "SYSCLK" },
+ { "Slim2 Playback", NULL, "SYSCLK" },
+ { "Slim3 Playback", NULL, "SYSCLK" },
+
+ { "AIF1 Capture", NULL, "SYSCLK" },
+ { "AIF2 Capture", NULL, "SYSCLK" },
+ { "Slim1 Capture", NULL, "SYSCLK" },
+ { "Slim2 Capture", NULL, "SYSCLK" },
+ { "Slim3 Capture", NULL, "SYSCLK" },
+
+ { "IN1L PGA", NULL, "IN1L" },
+ { "IN1R PGA", NULL, "IN1R" },
+
+ { "IN2L PGA", NULL, "IN2L" },
+ { "IN2R PGA", NULL, "IN2R" },
+
+ ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"),
+ ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"),
+ ARIZONA_MIXER_ROUTES("OUT3L", "EPOUT"),
+
+ ARIZONA_MIXER_ROUTES("OUT4L", "SPKOUT"),
+ ARIZONA_MIXER_ROUTES("OUT5L", "SPKDAT1L"),
+ ARIZONA_MIXER_ROUTES("OUT5R", "SPKDAT1R"),
+
+ ARIZONA_MIXER_ROUTES("PWM1 Driver", "PWM1"),
+ ARIZONA_MIXER_ROUTES("PWM2 Driver", "PWM2"),
+
+ ARIZONA_MIXER_ROUTES("AIF1TX1", "AIF1TX1"),
+ ARIZONA_MIXER_ROUTES("AIF1TX2", "AIF1TX2"),
+ ARIZONA_MIXER_ROUTES("AIF1TX3", "AIF1TX3"),
+ ARIZONA_MIXER_ROUTES("AIF1TX4", "AIF1TX4"),
+ ARIZONA_MIXER_ROUTES("AIF1TX5", "AIF1TX5"),
+ ARIZONA_MIXER_ROUTES("AIF1TX6", "AIF1TX6"),
+ ARIZONA_MIXER_ROUTES("AIF1TX7", "AIF1TX7"),
+ ARIZONA_MIXER_ROUTES("AIF1TX8", "AIF1TX8"),
+
+ ARIZONA_MIXER_ROUTES("AIF2TX1", "AIF2TX1"),
+ ARIZONA_MIXER_ROUTES("AIF2TX2", "AIF2TX2"),
+
+ ARIZONA_MIXER_ROUTES("SLIMTX1", "SLIMTX1"),
+ ARIZONA_MIXER_ROUTES("SLIMTX2", "SLIMTX2"),
+ ARIZONA_MIXER_ROUTES("SLIMTX3", "SLIMTX3"),
+ ARIZONA_MIXER_ROUTES("SLIMTX4", "SLIMTX4"),
+ ARIZONA_MIXER_ROUTES("SLIMTX5", "SLIMTX5"),
+ ARIZONA_MIXER_ROUTES("SLIMTX6", "SLIMTX6"),
+ ARIZONA_MIXER_ROUTES("SLIMTX7", "SLIMTX7"),
+ ARIZONA_MIXER_ROUTES("SLIMTX8", "SLIMTX8"),
+
+ ARIZONA_MIXER_ROUTES("EQ1", "EQ1"),
+ ARIZONA_MIXER_ROUTES("EQ2", "EQ2"),
+ ARIZONA_MIXER_ROUTES("EQ3", "EQ3"),
+ ARIZONA_MIXER_ROUTES("EQ4", "EQ4"),
+
+ ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"),
+ ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"),
+
+ ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"),
+ ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"),
+ ARIZONA_MIXER_ROUTES("LHPF3", "LHPF3"),
+ ARIZONA_MIXER_ROUTES("LHPF4", "LHPF4"),
+
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Noise"),
+ ARIZONA_MIXER_ROUTES("Mic Mute Mixer", "Mic"),
+
+ ARIZONA_MUX_ROUTES("ISRC1INT1", "ISRC1INT1"),
+ ARIZONA_MUX_ROUTES("ISRC1INT2", "ISRC2INT2"),
+
+ ARIZONA_MUX_ROUTES("ISRC1DEC1", "ISRC1DEC1"),
+ ARIZONA_MUX_ROUTES("ISRC1DEC2", "ISRC1DEC2"),
+
+ ARIZONA_MUX_ROUTES("ISRC2INT1", "ISRC2INT1"),
+ ARIZONA_MUX_ROUTES("ISRC2INT2", "ISRC2INT2"),
+
+ ARIZONA_MUX_ROUTES("ISRC2DEC1", "ISRC2DEC1"),
+ ARIZONA_MUX_ROUTES("ISRC2DEC2", "ISRC2DEC2"),
+
+ { "AEC Loopback", "HPOUT1L", "OUT1L" },
+ { "AEC Loopback", "HPOUT1R", "OUT1R" },
+ { "HPOUT1L", NULL, "OUT1L" },
+ { "HPOUT1R", NULL, "OUT1R" },
+
+ { "AEC Loopback", "EPOUT", "OUT3L" },
+ { "EPOUTN", NULL, "OUT3L" },
+ { "EPOUTP", NULL, "OUT3L" },
+
+ { "AEC Loopback", "SPKOUT", "OUT4L" },
+ { "SPKOUTN", NULL, "OUT4L" },
+ { "SPKOUTP", NULL, "OUT4L" },
+
+ { "AEC Loopback", "SPKDAT1L", "OUT5L" },
+ { "AEC Loopback", "SPKDAT1R", "OUT5R" },
+ { "SPKDAT1L", NULL, "OUT5L" },
+ { "SPKDAT1R", NULL, "OUT5R" },
+
+ { "MICSUPP", NULL, "SYSCLK" },
+};
+
+static int wm8997_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
+ unsigned int Fref, unsigned int Fout)
+{
+ struct wm8997_priv *wm8997 = snd_soc_codec_get_drvdata(codec);
+
+ switch (fll_id) {
+ case WM8997_FLL1:
+ return arizona_set_fll(&wm8997->fll[0], source, Fref, Fout);
+ case WM8997_FLL2:
+ return arizona_set_fll(&wm8997->fll[1], source, Fref, Fout);
+ case WM8997_FLL1_REFCLK:
+ return arizona_set_fll_refclk(&wm8997->fll[0], source, Fref,
+ Fout);
+ case WM8997_FLL2_REFCLK:
+ return arizona_set_fll_refclk(&wm8997->fll[1], source, Fref,
+ Fout);
+ default:
+ return -EINVAL;
+ }
+}
+
+#define WM8997_RATES SNDRV_PCM_RATE_8000_192000
+
+#define WM8997_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
+ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
+
+static struct snd_soc_dai_driver wm8997_dai[] = {
+ {
+ .name = "wm8997-aif1",
+ .id = 1,
+ .base = ARIZONA_AIF1_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF1 Playback",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF1 Capture",
+ .channels_min = 1,
+ .channels_max = 8,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm8997-aif2",
+ .id = 2,
+ .base = ARIZONA_AIF2_BCLK_CTRL,
+ .playback = {
+ .stream_name = "AIF2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .capture = {
+ .stream_name = "AIF2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .ops = &arizona_dai_ops,
+ .symmetric_rates = 1,
+ },
+ {
+ .name = "wm8997-slim1",
+ .id = 3,
+ .playback = {
+ .stream_name = "Slim1 Playback",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Slim1 Capture",
+ .channels_min = 1,
+ .channels_max = 4,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .ops = &arizona_simple_dai_ops,
+ },
+ {
+ .name = "wm8997-slim2",
+ .id = 4,
+ .playback = {
+ .stream_name = "Slim2 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Slim2 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .ops = &arizona_simple_dai_ops,
+ },
+ {
+ .name = "wm8997-slim3",
+ .id = 5,
+ .playback = {
+ .stream_name = "Slim3 Playback",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .capture = {
+ .stream_name = "Slim3 Capture",
+ .channels_min = 1,
+ .channels_max = 2,
+ .rates = WM8997_RATES,
+ .formats = WM8997_FORMATS,
+ },
+ .ops = &arizona_simple_dai_ops,
+ },
+};
+
+static int wm8997_codec_probe(struct snd_soc_codec *codec)
+{
+ struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+ int ret;
+
+ codec->control_data = priv->core.arizona->regmap;
+
+ ret = snd_soc_codec_set_cache_io(codec, 32, 16, SND_SOC_REGMAP);
+ if (ret != 0)
+ return ret;
+
+ arizona_init_spk(codec);
+
+ snd_soc_dapm_disable_pin(&codec->dapm, "HAPTICS");
+
+ priv->core.arizona->dapm = &codec->dapm;
+
+ return 0;
+}
+
+static int wm8997_codec_remove(struct snd_soc_codec *codec)
+{
+ struct wm8997_priv *priv = snd_soc_codec_get_drvdata(codec);
+
+ priv->core.arizona->dapm = NULL;
+
+ return 0;
+}
+
+#define WM8997_DIG_VU 0x0200
+
+static unsigned int wm8997_digital_vu[] = {
+ ARIZONA_DAC_DIGITAL_VOLUME_1L,
+ ARIZONA_DAC_DIGITAL_VOLUME_1R,
+ ARIZONA_DAC_DIGITAL_VOLUME_3L,
+ ARIZONA_DAC_DIGITAL_VOLUME_4L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5L,
+ ARIZONA_DAC_DIGITAL_VOLUME_5R,
+};
+
+static struct snd_soc_codec_driver soc_codec_dev_wm8997 = {
+ .probe = wm8997_codec_probe,
+ .remove = wm8997_codec_remove,
+
+ .idle_bias_off = true,
+
+ .set_sysclk = arizona_set_sysclk,
+ .set_pll = wm8997_set_fll,
+
+ .controls = wm8997_snd_controls,
+ .num_controls = ARRAY_SIZE(wm8997_snd_controls),
+ .dapm_widgets = wm8997_dapm_widgets,
+ .num_dapm_widgets = ARRAY_SIZE(wm8997_dapm_widgets),
+ .dapm_routes = wm8997_dapm_routes,
+ .num_dapm_routes = ARRAY_SIZE(wm8997_dapm_routes),
+};
+
+static int wm8997_probe(struct platform_device *pdev)
+{
+ struct arizona *arizona = dev_get_drvdata(pdev->dev.parent);
+ struct wm8997_priv *wm8997;
+ int i;
+
+ wm8997 = devm_kzalloc(&pdev->dev, sizeof(struct wm8997_priv),
+ GFP_KERNEL);
+ if (wm8997 == NULL)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, wm8997);
+
+ wm8997->core.arizona = arizona;
+ wm8997->core.num_inputs = 4;
+
+ for (i = 0; i < ARRAY_SIZE(wm8997->fll); i++)
+ wm8997->fll[i].vco_mult = 1;
+
+ arizona_init_fll(arizona, 1, ARIZONA_FLL1_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL1_LOCK, ARIZONA_IRQ_FLL1_CLOCK_OK,
+ &wm8997->fll[0]);
+ arizona_init_fll(arizona, 2, ARIZONA_FLL2_CONTROL_1 - 1,
+ ARIZONA_IRQ_FLL2_LOCK, ARIZONA_IRQ_FLL2_CLOCK_OK,
+ &wm8997->fll[1]);
+
+ /* SR2 fixed at 8kHz, SR3 fixed at 16kHz */
+ regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_2,
+ ARIZONA_SAMPLE_RATE_2_MASK, 0x11);
+ regmap_update_bits(arizona->regmap, ARIZONA_SAMPLE_RATE_3,
+ ARIZONA_SAMPLE_RATE_3_MASK, 0x12);
+
+ for (i = 0; i < ARRAY_SIZE(wm8997_dai); i++)
+ arizona_init_dai(&wm8997->core, i);
+
+ /* Latch volume update bits */
+ for (i = 0; i < ARRAY_SIZE(wm8997_digital_vu); i++)
+ regmap_update_bits(arizona->regmap, wm8997_digital_vu[i],
+ WM8997_DIG_VU, WM8997_DIG_VU);
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8997,
+ wm8997_dai, ARRAY_SIZE(wm8997_dai));
+}
+
+static int wm8997_remove(struct platform_device *pdev)
+{
+ snd_soc_unregister_codec(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static struct platform_driver wm8997_codec_driver = {
+ .driver = {
+ .name = "wm8997-codec",
+ .owner = THIS_MODULE,
+ },
+ .probe = wm8997_probe,
+ .remove = wm8997_remove,
+};
+
+module_platform_driver(wm8997_codec_driver);
+
+MODULE_DESCRIPTION("ASoC WM8997 driver");
+MODULE_AUTHOR("Charles Keepax <ckeepax@opensource.wolfsonmicro.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8997-codec");
diff --git a/sound/soc/codecs/wm8997.h b/sound/soc/codecs/wm8997.h
new file mode 100644
index 00000000000..5e91c6a7d56
--- /dev/null
+++ b/sound/soc/codecs/wm8997.h
@@ -0,0 +1,23 @@
+/*
+ * wm8997.h -- WM8997 ALSA SoC Audio driver
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _WM8997_H
+#define _WM8997_H
+
+#include "arizona.h"
+
+#define WM8997_FLL1 1
+#define WM8997_FLL2 2
+#define WM8997_FLL1_REFCLK 3
+#define WM8997_FLL2_REFCLK 4
+
+#endif
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 05252ac936a..b38f3506418 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -225,15 +225,8 @@ struct wm_coeff_ctl_ops {
struct snd_ctl_elem_info *uinfo);
};
-struct wm_coeff {
- struct device *dev;
- struct list_head ctl_list;
- struct regmap *regmap;
-};
-
struct wm_coeff_ctl {
const char *name;
- struct snd_card *card;
struct wm_adsp_alg_region region;
struct wm_coeff_ctl_ops ops;
struct wm_adsp *adsp;
@@ -378,7 +371,6 @@ static int wm_coeff_info(struct snd_kcontrol *kcontrol,
static int wm_coeff_write_control(struct snd_kcontrol *kcontrol,
const void *buf, size_t len)
{
- struct wm_coeff *wm_coeff= snd_kcontrol_chip(kcontrol);
struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kcontrol->private_value;
struct wm_adsp_alg_region *region = &ctl->region;
const struct wm_adsp_region *mem;
@@ -401,7 +393,7 @@ static int wm_coeff_write_control(struct snd_kcontrol *kcontrol,
if (!scratch)
return -ENOMEM;
- ret = regmap_raw_write(wm_coeff->regmap, reg, scratch,
+ ret = regmap_raw_write(adsp->regmap, reg, scratch,
ctl->len);
if (ret) {
adsp_err(adsp, "Failed to write %zu bytes to %x\n",
@@ -434,7 +426,6 @@ static int wm_coeff_put(struct snd_kcontrol *kcontrol,
static int wm_coeff_read_control(struct snd_kcontrol *kcontrol,
void *buf, size_t len)
{
- struct wm_coeff *wm_coeff= snd_kcontrol_chip(kcontrol);
struct wm_coeff_ctl *ctl = (struct wm_coeff_ctl *)kcontrol->private_value;
struct wm_adsp_alg_region *region = &ctl->region;
const struct wm_adsp_region *mem;
@@ -457,7 +448,7 @@ static int wm_coeff_read_control(struct snd_kcontrol *kcontrol,
if (!scratch)
return -ENOMEM;
- ret = regmap_raw_read(wm_coeff->regmap, reg, scratch, ctl->len);
+ ret = regmap_raw_read(adsp->regmap, reg, scratch, ctl->len);
if (ret) {
adsp_err(adsp, "Failed to read %zu bytes from %x\n",
ctl->len, reg);
@@ -481,37 +472,18 @@ static int wm_coeff_get(struct snd_kcontrol *kcontrol,
return 0;
}
-static int wm_coeff_add_kcontrol(struct wm_coeff *wm_coeff,
- struct wm_coeff_ctl *ctl,
- const struct snd_kcontrol_new *kctl)
-{
- int ret;
- struct snd_kcontrol *kcontrol;
-
- kcontrol = snd_ctl_new1(kctl, wm_coeff);
- ret = snd_ctl_add(ctl->card, kcontrol);
- if (ret < 0) {
- dev_err(wm_coeff->dev, "Failed to add %s: %d\n",
- kctl->name, ret);
- return ret;
- }
- ctl->kcontrol = kcontrol;
- return 0;
-}
-
struct wmfw_ctl_work {
- struct wm_coeff *wm_coeff;
+ struct wm_adsp *adsp;
struct wm_coeff_ctl *ctl;
struct work_struct work;
};
-static int wmfw_add_ctl(struct wm_coeff *wm_coeff,
- struct wm_coeff_ctl *ctl)
+static int wmfw_add_ctl(struct wm_adsp *adsp, struct wm_coeff_ctl *ctl)
{
struct snd_kcontrol_new *kcontrol;
int ret;
- if (!wm_coeff || !ctl || !ctl->name || !ctl->card)
+ if (!ctl || !ctl->name)
return -EINVAL;
kcontrol = kzalloc(sizeof(*kcontrol), GFP_KERNEL);
@@ -525,14 +497,17 @@ static int wmfw_add_ctl(struct wm_coeff *wm_coeff,
kcontrol->put = wm_coeff_put;
kcontrol->private_value = (unsigned long)ctl;
- ret = wm_coeff_add_kcontrol(wm_coeff,
- ctl, kcontrol);
+ ret = snd_soc_add_card_controls(adsp->card,
+ kcontrol, 1);
if (ret < 0)
goto err_kcontrol;
kfree(kcontrol);
- list_add(&ctl->list, &wm_coeff->ctl_list);
+ ctl->kcontrol = snd_soc_card_get_kcontrol(adsp->card,
+ ctl->name);
+
+ list_add(&ctl->list, &adsp->ctl_list);
return 0;
err_kcontrol:
@@ -753,13 +728,12 @@ out:
return ret;
}
-static int wm_coeff_init_control_caches(struct wm_coeff *wm_coeff)
+static int wm_coeff_init_control_caches(struct wm_adsp *adsp)
{
struct wm_coeff_ctl *ctl;
int ret;
- list_for_each_entry(ctl, &wm_coeff->ctl_list,
- list) {
+ list_for_each_entry(ctl, &adsp->ctl_list, list) {
if (!ctl->enabled || ctl->set)
continue;
ret = wm_coeff_read_control(ctl->kcontrol,
@@ -772,13 +746,12 @@ static int wm_coeff_init_control_caches(struct wm_coeff *wm_coeff)
return 0;
}
-static int wm_coeff_sync_controls(struct wm_coeff *wm_coeff)
+static int wm_coeff_sync_controls(struct wm_adsp *adsp)
{
struct wm_coeff_ctl *ctl;
int ret;
- list_for_each_entry(ctl, &wm_coeff->ctl_list,
- list) {
+ list_for_each_entry(ctl, &adsp->ctl_list, list) {
if (!ctl->enabled)
continue;
if (ctl->set) {
@@ -799,15 +772,14 @@ static void wm_adsp_ctl_work(struct work_struct *work)
struct wmfw_ctl_work,
work);
- wmfw_add_ctl(ctl_work->wm_coeff, ctl_work->ctl);
+ wmfw_add_ctl(ctl_work->adsp, ctl_work->ctl);
kfree(ctl_work);
}
-static int wm_adsp_create_control(struct snd_soc_codec *codec,
+static int wm_adsp_create_control(struct wm_adsp *dsp,
const struct wm_adsp_alg_region *region)
{
- struct wm_adsp *dsp = snd_soc_codec_get_drvdata(codec);
struct wm_coeff_ctl *ctl;
struct wmfw_ctl_work *ctl_work;
char *name;
@@ -842,7 +814,7 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
snprintf(name, PAGE_SIZE, "DSP%d %s %x",
dsp->num, region_name, region->alg);
- list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
+ list_for_each_entry(ctl, &dsp->ctl_list,
list) {
if (!strcmp(ctl->name, name)) {
if (!ctl->enabled)
@@ -866,7 +838,6 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
ctl->set = 0;
ctl->ops.xget = wm_coeff_get;
ctl->ops.xput = wm_coeff_put;
- ctl->card = codec->card->snd_card;
ctl->adsp = dsp;
ctl->len = region->len;
@@ -882,7 +853,7 @@ static int wm_adsp_create_control(struct snd_soc_codec *codec,
goto err_ctl_cache;
}
- ctl_work->wm_coeff = dsp->wm_coeff;
+ ctl_work->adsp = dsp;
ctl_work->ctl = ctl;
INIT_WORK(&ctl_work->work, wm_adsp_ctl_work);
schedule_work(&ctl_work->work);
@@ -903,7 +874,7 @@ err_name:
return ret;
}
-static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
+static int wm_adsp_setup_algs(struct wm_adsp *dsp)
{
struct regmap *regmap = dsp->regmap;
struct wmfw_adsp1_id_hdr adsp1_id;
@@ -1091,7 +1062,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
if (i + 1 < algs) {
region->len = be32_to_cpu(adsp1_alg[i + 1].dm);
region->len -= be32_to_cpu(adsp1_alg[i].dm);
- wm_adsp_create_control(codec, region);
+ wm_adsp_create_control(dsp, region);
} else {
adsp_warn(dsp, "Missing length info for region DM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1108,7 +1079,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
if (i + 1 < algs) {
region->len = be32_to_cpu(adsp1_alg[i + 1].zm);
region->len -= be32_to_cpu(adsp1_alg[i].zm);
- wm_adsp_create_control(codec, region);
+ wm_adsp_create_control(dsp, region);
} else {
adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp1_alg[i].alg.id));
@@ -1137,7 +1108,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
if (i + 1 < algs) {
region->len = be32_to_cpu(adsp2_alg[i + 1].xm);
region->len -= be32_to_cpu(adsp2_alg[i].xm);
- wm_adsp_create_control(codec, region);
+ wm_adsp_create_control(dsp, region);
} else {
adsp_warn(dsp, "Missing length info for region XM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1154,7 +1125,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
if (i + 1 < algs) {
region->len = be32_to_cpu(adsp2_alg[i + 1].ym);
region->len -= be32_to_cpu(adsp2_alg[i].ym);
- wm_adsp_create_control(codec, region);
+ wm_adsp_create_control(dsp, region);
} else {
adsp_warn(dsp, "Missing length info for region YM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1171,7 +1142,7 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp, struct snd_soc_codec *codec)
if (i + 1 < algs) {
region->len = be32_to_cpu(adsp2_alg[i + 1].zm);
region->len -= be32_to_cpu(adsp2_alg[i].zm);
- wm_adsp_create_control(codec, region);
+ wm_adsp_create_control(dsp, region);
} else {
adsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
be32_to_cpu(adsp2_alg[i].alg.id));
@@ -1391,6 +1362,8 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
int ret;
int val;
+ dsp->card = codec->card;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
@@ -1425,7 +1398,7 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
if (ret != 0)
goto err;
- ret = wm_adsp_setup_algs(dsp, codec);
+ ret = wm_adsp_setup_algs(dsp);
if (ret != 0)
goto err;
@@ -1434,12 +1407,12 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
goto err;
/* Initialize caches for enabled and unset controls */
- ret = wm_coeff_init_control_caches(dsp->wm_coeff);
+ ret = wm_coeff_init_control_caches(dsp);
if (ret != 0)
goto err;
/* Sync set controls */
- ret = wm_coeff_sync_controls(dsp->wm_coeff);
+ ret = wm_coeff_sync_controls(dsp);
if (ret != 0)
goto err;
@@ -1460,10 +1433,8 @@ int wm_adsp1_event(struct snd_soc_dapm_widget *w,
regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
ADSP1_SYS_ENA, 0);
- list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
- list) {
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
- }
break;
default:
@@ -1520,6 +1491,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
unsigned int val;
int ret;
+ dsp->card = codec->card;
+
switch (event) {
case SND_SOC_DAPM_POST_PMU:
/*
@@ -1582,7 +1555,7 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
if (ret != 0)
goto err;
- ret = wm_adsp_setup_algs(dsp, codec);
+ ret = wm_adsp_setup_algs(dsp);
if (ret != 0)
goto err;
@@ -1591,12 +1564,12 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
goto err;
/* Initialize caches for enabled and unset controls */
- ret = wm_coeff_init_control_caches(dsp->wm_coeff);
+ ret = wm_coeff_init_control_caches(dsp);
if (ret != 0)
goto err;
/* Sync set controls */
- ret = wm_coeff_sync_controls(dsp->wm_coeff);
+ ret = wm_coeff_sync_controls(dsp);
if (ret != 0)
goto err;
@@ -1637,10 +1610,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
ret);
}
- list_for_each_entry(ctl, &dsp->wm_coeff->ctl_list,
- list) {
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
ctl->enabled = 0;
- }
while (!list_empty(&dsp->alg_regions)) {
alg_region = list_first_entry(&dsp->alg_regions,
@@ -1679,49 +1650,38 @@ int wm_adsp2_init(struct wm_adsp *adsp, bool dvfs)
}
INIT_LIST_HEAD(&adsp->alg_regions);
-
- adsp->wm_coeff = kzalloc(sizeof(*adsp->wm_coeff),
- GFP_KERNEL);
- if (!adsp->wm_coeff)
- return -ENOMEM;
- adsp->wm_coeff->regmap = adsp->regmap;
- adsp->wm_coeff->dev = adsp->dev;
- INIT_LIST_HEAD(&adsp->wm_coeff->ctl_list);
+ INIT_LIST_HEAD(&adsp->ctl_list);
if (dvfs) {
adsp->dvfs = devm_regulator_get(adsp->dev, "DCVDD");
if (IS_ERR(adsp->dvfs)) {
ret = PTR_ERR(adsp->dvfs);
dev_err(adsp->dev, "Failed to get DCVDD: %d\n", ret);
- goto out_coeff;
+ return ret;
}
ret = regulator_enable(adsp->dvfs);
if (ret != 0) {
dev_err(adsp->dev, "Failed to enable DCVDD: %d\n",
ret);
- goto out_coeff;
+ return ret;
}
ret = regulator_set_voltage(adsp->dvfs, 1200000, 1800000);
if (ret != 0) {
dev_err(adsp->dev, "Failed to initialise DVFS: %d\n",
ret);
- goto out_coeff;
+ return ret;
}
ret = regulator_disable(adsp->dvfs);
if (ret != 0) {
dev_err(adsp->dev, "Failed to disable DCVDD: %d\n",
ret);
- goto out_coeff;
+ return ret;
}
}
return 0;
-
-out_coeff:
- kfree(adsp->wm_coeff);
- return ret;
}
EXPORT_SYMBOL_GPL(wm_adsp2_init);
diff --git a/sound/soc/codecs/wm_adsp.h b/sound/soc/codecs/wm_adsp.h
index 9f922c82536..d018dea6254 100644
--- a/sound/soc/codecs/wm_adsp.h
+++ b/sound/soc/codecs/wm_adsp.h
@@ -39,6 +39,7 @@ struct wm_adsp {
int type;
struct device *dev;
struct regmap *regmap;
+ struct snd_soc_card *card;
int base;
int sysclk_reg;
@@ -57,7 +58,7 @@ struct wm_adsp {
struct regulator *dvfs;
- struct wm_coeff *wm_coeff;
+ struct list_head ctl_list;
};
#define WM_ADSP1(wname, num) \
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 2d9e099415a..8b50e5958de 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -699,9 +699,7 @@ EXPORT_SYMBOL_GPL(wm_hubs_update_class_w);
static int class_w_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
int ret;
ret = snd_soc_dapm_put_volsw(kcontrol, ucontrol);
@@ -721,9 +719,7 @@ static int class_w_put_volsw(struct snd_kcontrol *kcontrol,
static int class_w_put_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
int ret;
ret = snd_soc_dapm_put_enum_double(kcontrol, ucontrol);
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 70eb37a5dd1..25c31f1655f 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -421,13 +421,11 @@ static int dw_i2s_probe(struct platform_device *pdev)
dw_i2s_dai, 1);
if (ret != 0) {
dev_err(&pdev->dev, "not able to register dai\n");
- goto err_set_drvdata;
+ goto err_clk_disable;
}
return 0;
-err_set_drvdata:
- dev_set_drvdata(&pdev->dev, NULL);
err_clk_disable:
clk_disable(dev->clk);
err_clk_put:
@@ -440,7 +438,6 @@ static int dw_i2s_remove(struct platform_device *pdev)
struct dw_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
- dev_set_drvdata(&pdev->dev, NULL);
clk_put(dev->clk);
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index aa438546c91..704e246f5b1 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -1,6 +1,9 @@
config SND_SOC_FSL_SSI
tristate
+config SND_SOC_FSL_SPDIF
+ tristate
+
config SND_SOC_FSL_UTILS
tristate
@@ -98,7 +101,7 @@ endif # SND_POWERPC_SOC
menuconfig SND_IMX_SOC
tristate "SoC Audio for Freescale i.MX CPUs"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Say Y or M if you want to add support for codecs attached to
the i.MX CPUs.
@@ -109,11 +112,11 @@ config SND_SOC_IMX_SSI
tristate
config SND_SOC_IMX_PCM_FIQ
- bool
+ tristate
select FIQ
config SND_SOC_IMX_PCM_DMA
- bool
+ tristate
select SND_SOC_GENERIC_DMAENGINE_PCM
config SND_SOC_IMX_AUDMUX
@@ -175,7 +178,6 @@ config SND_SOC_IMX_WM8962
select SND_SOC_IMX_PCM_DMA
select SND_SOC_IMX_AUDMUX
select SND_SOC_FSL_SSI
- select SND_SOC_FSL_UTILS
help
Say Y if you want to add support for SoC audio on an i.MX board with
a wm8962 codec.
@@ -187,14 +189,23 @@ config SND_SOC_IMX_SGTL5000
select SND_SOC_IMX_PCM_DMA
select SND_SOC_IMX_AUDMUX
select SND_SOC_FSL_SSI
- select SND_SOC_FSL_UTILS
help
Say Y if you want to add support for SoC audio on an i.MX board with
a sgtl5000 codec.
+config SND_SOC_IMX_SPDIF
+ tristate "SoC Audio support for i.MX boards with S/PDIF"
+ select SND_SOC_IMX_PCM_DMA
+ select SND_SOC_FSL_SPDIF
+ select SND_SOC_SPDIF
+ help
+ SoC Audio support for i.MX boards with S/PDIF
+ Say Y if you want to add support for SoC audio on an i.MX board with
+ a S/DPDIF.
+
config SND_SOC_IMX_MC13783
tristate "SoC Audio support for I.MX boards with mc13783"
- depends on MFD_MC13783
+ depends on MFD_MC13783 && ARM
select SND_SOC_IMX_SSI
select SND_SOC_IMX_AUDMUX
select SND_SOC_MC13783
diff --git a/sound/soc/fsl/Makefile b/sound/soc/fsl/Makefile
index d4b4aa8b564..8db705b0fdf 100644
--- a/sound/soc/fsl/Makefile
+++ b/sound/soc/fsl/Makefile
@@ -12,9 +12,11 @@ obj-$(CONFIG_SND_SOC_P1022_RDK) += snd-soc-p1022-rdk.o
# Freescale PowerPC SSI/DMA Platform Support
snd-soc-fsl-ssi-objs := fsl_ssi.o
+snd-soc-fsl-spdif-objs := fsl_spdif.o
snd-soc-fsl-utils-objs := fsl_utils.o
snd-soc-fsl-dma-objs := fsl_dma.o
obj-$(CONFIG_SND_SOC_FSL_SSI) += snd-soc-fsl-ssi.o
+obj-$(CONFIG_SND_SOC_FSL_SPDIF) += snd-soc-fsl-spdif.o
obj-$(CONFIG_SND_SOC_FSL_UTILS) += snd-soc-fsl-utils.o
obj-$(CONFIG_SND_SOC_POWERPC_DMA) += snd-soc-fsl-dma.o
@@ -43,6 +45,7 @@ snd-soc-mx27vis-aic32x4-objs := mx27vis-aic32x4.o
snd-soc-wm1133-ev1-objs := wm1133-ev1.o
snd-soc-imx-sgtl5000-objs := imx-sgtl5000.o
snd-soc-imx-wm8962-objs := imx-wm8962.o
+snd-soc-imx-spdif-objs := imx-spdif.o
snd-soc-imx-mc13783-objs := imx-mc13783.o
obj-$(CONFIG_SND_SOC_EUKREA_TLV320) += snd-soc-eukrea-tlv320.o
@@ -51,4 +54,5 @@ obj-$(CONFIG_SND_SOC_MX27VIS_AIC32X4) += snd-soc-mx27vis-aic32x4.o
obj-$(CONFIG_SND_MXC_SOC_WM1133_EV1) += snd-soc-wm1133-ev1.o
obj-$(CONFIG_SND_SOC_IMX_SGTL5000) += snd-soc-imx-sgtl5000.o
obj-$(CONFIG_SND_SOC_IMX_WM8962) += snd-soc-imx-wm8962.o
+obj-$(CONFIG_SND_SOC_IMX_SPDIF) += snd-soc-imx-spdif.o
obj-$(CONFIG_SND_SOC_IMX_MC13783) += snd-soc-imx-mc13783.o
diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c
new file mode 100644
index 00000000000..3920c3e849c
--- /dev/null
+++ b/sound/soc/fsl/fsl_spdif.c
@@ -0,0 +1,1225 @@
+/*
+ * Freescale S/PDIF ALSA SoC Digital Audio Interface (DAI) driver
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Based on stmp3xxx_spdif_dai.c
+ * Vladimir Barinov <vbarinov@embeddedalley.com>
+ * Copyright 2008 SigmaTel, Inc
+ * Copyright 2008 Embedded Alley Solutions, Inc
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/clk-private.h>
+#include <linux/bitrev.h>
+#include <linux/regmap.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+
+#include <sound/asoundef.h>
+#include <sound/soc.h>
+#include <sound/dmaengine_pcm.h>
+
+#include "fsl_spdif.h"
+#include "imx-pcm.h"
+
+#define FSL_SPDIF_TXFIFO_WML 0x8
+#define FSL_SPDIF_RXFIFO_WML 0x8
+
+#define INTR_FOR_PLAYBACK (INT_TXFIFO_RESYNC)
+#define INTR_FOR_CAPTURE (INT_SYM_ERR | INT_BIT_ERR | INT_URX_FUL | INT_URX_OV|\
+ INT_QRX_FUL | INT_QRX_OV | INT_UQ_SYNC | INT_UQ_ERR |\
+ INT_RXFIFO_RESYNC | INT_LOSS_LOCK | INT_DPLL_LOCKED)
+
+/* Index list for the values that has if (DPLL Locked) condition */
+static u8 srpc_dpll_locked[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0xa, 0xb };
+#define SRPC_NODPLL_START1 0x5
+#define SRPC_NODPLL_START2 0xc
+
+#define DEFAULT_RXCLK_SRC 1
+
+/*
+ * SPDIF control structure
+ * Defines channel status, subcode and Q sub
+ */
+struct spdif_mixer_control {
+ /* spinlock to access control data */
+ spinlock_t ctl_lock;
+
+ /* IEC958 channel tx status bit */
+ unsigned char ch_status[4];
+
+ /* User bits */
+ unsigned char subcode[2 * SPDIF_UBITS_SIZE];
+
+ /* Q subcode part of user bits */
+ unsigned char qsub[2 * SPDIF_QSUB_SIZE];
+
+ /* Buffer offset for U/Q */
+ u32 upos;
+ u32 qpos;
+
+ /* Ready buffer index of the two buffers */
+ u32 ready_buf;
+};
+
+struct fsl_spdif_priv {
+ struct spdif_mixer_control fsl_spdif_control;
+ struct snd_soc_dai_driver cpu_dai_drv;
+ struct platform_device *pdev;
+ struct regmap *regmap;
+ bool dpll_locked;
+ u8 txclk_div[SPDIF_TXRATE_MAX];
+ u8 txclk_src[SPDIF_TXRATE_MAX];
+ u8 rxclk_src;
+ struct clk *txclk[SPDIF_TXRATE_MAX];
+ struct clk *rxclk;
+ struct snd_dmaengine_dai_dma_data dma_params_tx;
+ struct snd_dmaengine_dai_dma_data dma_params_rx;
+
+ /* The name space will be allocated dynamically */
+ char name[0];
+};
+
+
+/* DPLL locked and lock loss interrupt handler */
+static void spdif_irq_dpll_lock(struct fsl_spdif_priv *spdif_priv)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 locked;
+
+ regmap_read(regmap, REG_SPDIF_SRPC, &locked);
+ locked &= SRPC_DPLL_LOCKED;
+
+ dev_dbg(&pdev->dev, "isr: Rx dpll %s \n",
+ locked ? "locked" : "loss lock");
+
+ spdif_priv->dpll_locked = locked ? true : false;
+}
+
+/* Receiver found illegal symbol interrupt handler */
+static void spdif_irq_sym_error(struct fsl_spdif_priv *spdif_priv)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+
+ dev_dbg(&pdev->dev, "isr: receiver found illegal symbol\n");
+
+ if (!spdif_priv->dpll_locked) {
+ /* DPLL unlocked seems no audio stream */
+ regmap_update_bits(regmap, REG_SPDIF_SIE, INT_SYM_ERR, 0);
+ }
+}
+
+/* U/Q Channel receive register full */
+static void spdif_irq_uqrx_full(struct fsl_spdif_priv *spdif_priv, char name)
+{
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 *pos, size, val, reg;
+
+ switch (name) {
+ case 'U':
+ pos = &ctrl->upos;
+ size = SPDIF_UBITS_SIZE;
+ reg = REG_SPDIF_SRU;
+ break;
+ case 'Q':
+ pos = &ctrl->qpos;
+ size = SPDIF_QSUB_SIZE;
+ reg = REG_SPDIF_SRQ;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported channel name\n");
+ return;
+ }
+
+ dev_dbg(&pdev->dev, "isr: %c Channel receive register full\n", name);
+
+ if (*pos >= size * 2) {
+ *pos = 0;
+ } else if (unlikely((*pos % size) + 3 > size)) {
+ dev_err(&pdev->dev, "User bit receivce buffer overflow\n");
+ return;
+ }
+
+ regmap_read(regmap, reg, &val);
+ ctrl->subcode[*pos++] = val >> 16;
+ ctrl->subcode[*pos++] = val >> 8;
+ ctrl->subcode[*pos++] = val;
+}
+
+/* U/Q Channel sync found */
+static void spdif_irq_uq_sync(struct fsl_spdif_priv *spdif_priv)
+{
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct platform_device *pdev = spdif_priv->pdev;
+
+ dev_dbg(&pdev->dev, "isr: U/Q Channel sync found\n");
+
+ /* U/Q buffer reset */
+ if (ctrl->qpos == 0)
+ return;
+
+ /* Set ready to this buffer */
+ ctrl->ready_buf = (ctrl->qpos - 1) / SPDIF_QSUB_SIZE + 1;
+}
+
+/* U/Q Channel framing error */
+static void spdif_irq_uq_err(struct fsl_spdif_priv *spdif_priv)
+{
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 val;
+
+ dev_dbg(&pdev->dev, "isr: U/Q Channel framing error\n");
+
+ /* Read U/Q data to clear the irq and do buffer reset */
+ regmap_read(regmap, REG_SPDIF_SRU, &val);
+ regmap_read(regmap, REG_SPDIF_SRQ, &val);
+
+ /* Drop this U/Q buffer */
+ ctrl->ready_buf = 0;
+ ctrl->upos = 0;
+ ctrl->qpos = 0;
+}
+
+/* Get spdif interrupt status and clear the interrupt */
+static u32 spdif_intr_status_clear(struct fsl_spdif_priv *spdif_priv)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val, val2;
+
+ regmap_read(regmap, REG_SPDIF_SIS, &val);
+ regmap_read(regmap, REG_SPDIF_SIE, &val2);
+
+ regmap_write(regmap, REG_SPDIF_SIC, val & val2);
+
+ return val;
+}
+
+static irqreturn_t spdif_isr(int irq, void *devid)
+{
+ struct fsl_spdif_priv *spdif_priv = (struct fsl_spdif_priv *)devid;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 sis;
+
+ sis = spdif_intr_status_clear(spdif_priv);
+
+ if (sis & INT_DPLL_LOCKED)
+ spdif_irq_dpll_lock(spdif_priv);
+
+ if (sis & INT_TXFIFO_UNOV)
+ dev_dbg(&pdev->dev, "isr: Tx FIFO under/overrun\n");
+
+ if (sis & INT_TXFIFO_RESYNC)
+ dev_dbg(&pdev->dev, "isr: Tx FIFO resync\n");
+
+ if (sis & INT_CNEW)
+ dev_dbg(&pdev->dev, "isr: cstatus new\n");
+
+ if (sis & INT_VAL_NOGOOD)
+ dev_dbg(&pdev->dev, "isr: validity flag no good\n");
+
+ if (sis & INT_SYM_ERR)
+ spdif_irq_sym_error(spdif_priv);
+
+ if (sis & INT_BIT_ERR)
+ dev_dbg(&pdev->dev, "isr: receiver found parity bit error\n");
+
+ if (sis & INT_URX_FUL)
+ spdif_irq_uqrx_full(spdif_priv, 'U');
+
+ if (sis & INT_URX_OV)
+ dev_dbg(&pdev->dev, "isr: U Channel receive register overrun\n");
+
+ if (sis & INT_QRX_FUL)
+ spdif_irq_uqrx_full(spdif_priv, 'Q');
+
+ if (sis & INT_QRX_OV)
+ dev_dbg(&pdev->dev, "isr: Q Channel receive register overrun\n");
+
+ if (sis & INT_UQ_SYNC)
+ spdif_irq_uq_sync(spdif_priv);
+
+ if (sis & INT_UQ_ERR)
+ spdif_irq_uq_err(spdif_priv);
+
+ if (sis & INT_RXFIFO_UNOV)
+ dev_dbg(&pdev->dev, "isr: Rx FIFO under/overrun\n");
+
+ if (sis & INT_RXFIFO_RESYNC)
+ dev_dbg(&pdev->dev, "isr: Rx FIFO resync\n");
+
+ if (sis & INT_LOSS_LOCK)
+ spdif_irq_dpll_lock(spdif_priv);
+
+ /* FIXME: Write Tx FIFO to clear TxEm */
+ if (sis & INT_TX_EM)
+ dev_dbg(&pdev->dev, "isr: Tx FIFO empty\n");
+
+ /* FIXME: Read Rx FIFO to clear RxFIFOFul */
+ if (sis & INT_RXFIFO_FUL)
+ dev_dbg(&pdev->dev, "isr: Rx FIFO full\n");
+
+ return IRQ_HANDLED;
+}
+
+static int spdif_softreset(struct fsl_spdif_priv *spdif_priv)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val, cycle = 1000;
+
+ regmap_write(regmap, REG_SPDIF_SCR, SCR_SOFT_RESET);
+
+ /*
+ * RESET bit would be cleared after finishing its reset procedure,
+ * which typically lasts 8 cycles. 1000 cycles will keep it safe.
+ */
+ do {
+ regmap_read(regmap, REG_SPDIF_SCR, &val);
+ } while ((val & SCR_SOFT_RESET) && cycle--);
+
+ if (cycle)
+ return 0;
+ else
+ return -EBUSY;
+}
+
+static void spdif_set_cstatus(struct spdif_mixer_control *ctrl,
+ u8 mask, u8 cstatus)
+{
+ ctrl->ch_status[3] &= ~mask;
+ ctrl->ch_status[3] |= cstatus & mask;
+}
+
+static void spdif_write_channel_status(struct fsl_spdif_priv *spdif_priv)
+{
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 ch_status;
+
+ ch_status = (bitrev8(ctrl->ch_status[0]) << 16) |
+ (bitrev8(ctrl->ch_status[1]) << 8) |
+ bitrev8(ctrl->ch_status[2]);
+ regmap_write(regmap, REG_SPDIF_STCSCH, ch_status);
+
+ dev_dbg(&pdev->dev, "STCSCH: 0x%06x\n", ch_status);
+
+ ch_status = bitrev8(ctrl->ch_status[3]) << 16;
+ regmap_write(regmap, REG_SPDIF_STCSCL, ch_status);
+
+ dev_dbg(&pdev->dev, "STCSCL: 0x%06x\n", ch_status);
+}
+
+/* Set SPDIF PhaseConfig register for rx clock */
+static int spdif_set_rx_clksrc(struct fsl_spdif_priv *spdif_priv,
+ enum spdif_gainsel gainsel, int dpll_locked)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ u8 clksrc = spdif_priv->rxclk_src;
+
+ if (clksrc >= SRPC_CLKSRC_MAX || gainsel >= GAINSEL_MULTI_MAX)
+ return -EINVAL;
+
+ regmap_update_bits(regmap, REG_SPDIF_SRPC,
+ SRPC_CLKSRC_SEL_MASK | SRPC_GAINSEL_MASK,
+ SRPC_CLKSRC_SEL_SET(clksrc) | SRPC_GAINSEL_SET(gainsel));
+
+ return 0;
+}
+
+static int spdif_set_sample_rate(struct snd_pcm_substream *substream,
+ int sample_rate)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ unsigned long csfs = 0;
+ u32 stc, mask, rate;
+ u8 clk, div;
+ int ret;
+
+ switch (sample_rate) {
+ case 32000:
+ rate = SPDIF_TXRATE_32000;
+ csfs = IEC958_AES3_CON_FS_32000;
+ break;
+ case 44100:
+ rate = SPDIF_TXRATE_44100;
+ csfs = IEC958_AES3_CON_FS_44100;
+ break;
+ case 48000:
+ rate = SPDIF_TXRATE_48000;
+ csfs = IEC958_AES3_CON_FS_48000;
+ break;
+ default:
+ dev_err(&pdev->dev, "unsupported sample rate %d\n", sample_rate);
+ return -EINVAL;
+ }
+
+ clk = spdif_priv->txclk_src[rate];
+ if (clk >= STC_TXCLK_SRC_MAX) {
+ dev_err(&pdev->dev, "tx clock source is out of range\n");
+ return -EINVAL;
+ }
+
+ div = spdif_priv->txclk_div[rate];
+ if (div == 0) {
+ dev_err(&pdev->dev, "the divisor can't be zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The S/PDIF block needs a clock of 64 * fs * div. The S/PDIF block
+ * will divide by (div). So request 64 * fs * (div+1) which will
+ * get rounded.
+ */
+ ret = clk_set_rate(spdif_priv->txclk[rate], 64 * sample_rate * (div + 1));
+ if (ret) {
+ dev_err(&pdev->dev, "failed to set tx clock rate\n");
+ return ret;
+ }
+
+ dev_dbg(&pdev->dev, "expected clock rate = %d\n",
+ (64 * sample_rate * div));
+ dev_dbg(&pdev->dev, "actual clock rate = %ld\n",
+ clk_get_rate(spdif_priv->txclk[rate]));
+
+ /* set fs field in consumer channel status */
+ spdif_set_cstatus(ctrl, IEC958_AES3_CON_FS, csfs);
+
+ /* select clock source and divisor */
+ stc = STC_TXCLK_ALL_EN | STC_TXCLK_SRC_SET(clk) | STC_TXCLK_DIV(div);
+ mask = STC_TXCLK_ALL_EN_MASK | STC_TXCLK_SRC_MASK | STC_TXCLK_DIV_MASK;
+ regmap_update_bits(regmap, REG_SPDIF_STC, mask, stc);
+
+ dev_dbg(&pdev->dev, "set sample rate to %d\n", sample_rate);
+
+ return 0;
+}
+
+static int fsl_spdif_startup(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct platform_device *pdev = spdif_priv->pdev;
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 scr, mask, i;
+ int ret;
+
+ /* Reset module and interrupts only for first initialization */
+ if (!cpu_dai->active) {
+ ret = spdif_softreset(spdif_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to soft reset\n");
+ return ret;
+ }
+
+ /* Disable all the interrupts */
+ regmap_update_bits(regmap, REG_SPDIF_SIE, 0xffffff, 0);
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ scr = SCR_TXFIFO_AUTOSYNC | SCR_TXFIFO_CTRL_NORMAL |
+ SCR_TXSEL_NORMAL | SCR_USRC_SEL_CHIP |
+ SCR_TXFIFO_FSEL_IF8;
+ mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
+ SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
+ SCR_TXFIFO_FSEL_MASK;
+ for (i = 0; i < SPDIF_TXRATE_MAX; i++)
+ clk_prepare_enable(spdif_priv->txclk[i]);
+ } else {
+ scr = SCR_RXFIFO_FSEL_IF8 | SCR_RXFIFO_AUTOSYNC;
+ mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
+ SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
+ clk_prepare_enable(spdif_priv->rxclk);
+ }
+ regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
+
+ /* Power up SPDIF module */
+ regmap_update_bits(regmap, REG_SPDIF_SCR, SCR_LOW_POWER, 0);
+
+ return 0;
+}
+
+static void fsl_spdif_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 scr, mask, i;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ scr = 0;
+ mask = SCR_TXFIFO_AUTOSYNC_MASK | SCR_TXFIFO_CTRL_MASK |
+ SCR_TXSEL_MASK | SCR_USRC_SEL_MASK |
+ SCR_TXFIFO_FSEL_MASK;
+ for (i = 0; i < SPDIF_TXRATE_MAX; i++)
+ clk_disable_unprepare(spdif_priv->txclk[i]);
+ } else {
+ scr = SCR_RXFIFO_OFF | SCR_RXFIFO_CTL_ZERO;
+ mask = SCR_RXFIFO_FSEL_MASK | SCR_RXFIFO_AUTOSYNC_MASK|
+ SCR_RXFIFO_CTL_MASK | SCR_RXFIFO_OFF_MASK;
+ clk_disable_unprepare(spdif_priv->rxclk);
+ }
+ regmap_update_bits(regmap, REG_SPDIF_SCR, mask, scr);
+
+ /* Power down SPDIF module only if tx&rx are both inactive */
+ if (!cpu_dai->active) {
+ spdif_intr_status_clear(spdif_priv);
+ regmap_update_bits(regmap, REG_SPDIF_SCR,
+ SCR_LOW_POWER, SCR_LOW_POWER);
+ }
+}
+
+static int fsl_spdif_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u32 sample_rate = params_rate(params);
+ int ret = 0;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ ret = spdif_set_sample_rate(substream, sample_rate);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: set sample rate failed: %d\n",
+ __func__, sample_rate);
+ return ret;
+ }
+ spdif_set_cstatus(ctrl, IEC958_AES3_CON_CLOCK,
+ IEC958_AES3_CON_CLOCK_1000PPM);
+ spdif_write_channel_status(spdif_priv);
+ } else {
+ /* Setup rx clock source */
+ ret = spdif_set_rx_clksrc(spdif_priv, SPDIF_DEFAULT_GAINSEL, 1);
+ }
+
+ return ret;
+}
+
+static int fsl_spdif_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(rtd->cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ int is_playack = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK);
+ u32 intr = is_playack ? INTR_FOR_PLAYBACK : INTR_FOR_CAPTURE;
+ u32 dmaen = is_playack ? SCR_DMA_TX_EN : SCR_DMA_RX_EN;;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ regmap_update_bits(regmap, REG_SPDIF_SIE, intr, intr);
+ regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, dmaen);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0);
+ regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct snd_soc_dai_ops fsl_spdif_dai_ops = {
+ .startup = fsl_spdif_startup,
+ .hw_params = fsl_spdif_hw_params,
+ .trigger = fsl_spdif_trigger,
+ .shutdown = fsl_spdif_shutdown,
+};
+
+
+/*
+ * FSL SPDIF IEC958 controller(mixer) functions
+ *
+ * Channel status get/put control
+ * User bit value get/put control
+ * Valid bit value get control
+ * DPLL lock status get control
+ * User bit sync mode selection control
+ */
+
+static int fsl_spdif_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958;
+ uinfo->count = 1;
+
+ return 0;
+}
+
+static int fsl_spdif_pb_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+ uvalue->value.iec958.status[0] = ctrl->ch_status[0];
+ uvalue->value.iec958.status[1] = ctrl->ch_status[1];
+ uvalue->value.iec958.status[2] = ctrl->ch_status[2];
+ uvalue->value.iec958.status[3] = ctrl->ch_status[3];
+
+ return 0;
+}
+
+static int fsl_spdif_pb_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *uvalue)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+
+ ctrl->ch_status[0] = uvalue->value.iec958.status[0];
+ ctrl->ch_status[1] = uvalue->value.iec958.status[1];
+ ctrl->ch_status[2] = uvalue->value.iec958.status[2];
+ ctrl->ch_status[3] = uvalue->value.iec958.status[3];
+
+ spdif_write_channel_status(spdif_priv);
+
+ return 0;
+}
+
+/* Get channel status from SPDIF_RX_CCHAN register */
+static int fsl_spdif_capture_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 cstatus, val;
+
+ regmap_read(regmap, REG_SPDIF_SIS, &val);
+ if (!(val & INT_CNEW)) {
+ return -EAGAIN;
+ }
+
+ regmap_read(regmap, REG_SPDIF_SRCSH, &cstatus);
+ ucontrol->value.iec958.status[0] = (cstatus >> 16) & 0xFF;
+ ucontrol->value.iec958.status[1] = (cstatus >> 8) & 0xFF;
+ ucontrol->value.iec958.status[2] = cstatus & 0xFF;
+
+ regmap_read(regmap, REG_SPDIF_SRCSL, &cstatus);
+ ucontrol->value.iec958.status[3] = (cstatus >> 16) & 0xFF;
+ ucontrol->value.iec958.status[4] = (cstatus >> 8) & 0xFF;
+ ucontrol->value.iec958.status[5] = cstatus & 0xFF;
+
+ /* Clear intr */
+ regmap_write(regmap, REG_SPDIF_SIC, INT_CNEW);
+
+ return 0;
+}
+
+/*
+ * Get User bits (subcode) from chip value which readed out
+ * in UChannel register.
+ */
+static int fsl_spdif_subcode_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctrl->ctl_lock, flags);
+ if (ctrl->ready_buf) {
+ int idx = (ctrl->ready_buf - 1) * SPDIF_UBITS_SIZE;
+ memcpy(&ucontrol->value.iec958.subcode[0],
+ &ctrl->subcode[idx], SPDIF_UBITS_SIZE);
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irqrestore(&ctrl->ctl_lock, flags);
+
+ return ret;
+}
+
+/* Q-subcode infomation. The byte size is SPDIF_UBITS_SIZE/8 */
+static int fsl_spdif_qinfo(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = SPDIF_QSUB_SIZE;
+
+ return 0;
+}
+
+/* Get Q subcode from chip value which readed out in QChannel register */
+static int fsl_spdif_qget(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct spdif_mixer_control *ctrl = &spdif_priv->fsl_spdif_control;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&ctrl->ctl_lock, flags);
+ if (ctrl->ready_buf) {
+ int idx = (ctrl->ready_buf - 1) * SPDIF_QSUB_SIZE;
+ memcpy(&ucontrol->value.bytes.data[0],
+ &ctrl->qsub[idx], SPDIF_QSUB_SIZE);
+ } else {
+ ret = -EAGAIN;
+ }
+ spin_unlock_irqrestore(&ctrl->ctl_lock, flags);
+
+ return ret;
+}
+
+/* Valid bit infomation */
+static int fsl_spdif_vbit_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+
+ return 0;
+}
+
+/* Get valid good bit from interrupt status register */
+static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val;
+
+ val = regmap_read(regmap, REG_SPDIF_SIS, &val);
+ ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0;
+ regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD);
+
+ return 0;
+}
+
+/* DPLL lock infomation */
+static int fsl_spdif_rxrate_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 16000;
+ uinfo->value.integer.max = 96000;
+
+ return 0;
+}
+
+static u32 gainsel_multi[GAINSEL_MULTI_MAX] = {
+ 24, 16, 12, 8, 6, 4, 3,
+};
+
+/* Get RX data clock rate given the SPDIF bus_clk */
+static int spdif_get_rxclk_rate(struct fsl_spdif_priv *spdif_priv,
+ enum spdif_gainsel gainsel)
+{
+ struct regmap *regmap = spdif_priv->regmap;
+ struct platform_device *pdev = spdif_priv->pdev;
+ u64 tmpval64, busclk_freq = 0;
+ u32 freqmeas, phaseconf;
+ u8 clksrc;
+
+ regmap_read(regmap, REG_SPDIF_SRFM, &freqmeas);
+ regmap_read(regmap, REG_SPDIF_SRPC, &phaseconf);
+
+ clksrc = (phaseconf >> SRPC_CLKSRC_SEL_OFFSET) & 0xf;
+ if (srpc_dpll_locked[clksrc] && (phaseconf & SRPC_DPLL_LOCKED)) {
+ /* Get bus clock from system */
+ busclk_freq = clk_get_rate(spdif_priv->rxclk);
+ }
+
+ /* FreqMeas_CLK = (BUS_CLK * FreqMeas) / 2 ^ 10 / GAINSEL / 128 */
+ tmpval64 = (u64) busclk_freq * freqmeas;
+ do_div(tmpval64, gainsel_multi[gainsel] * 1024);
+ do_div(tmpval64, 128 * 1024);
+
+ dev_dbg(&pdev->dev, "FreqMeas: %d\n", freqmeas);
+ dev_dbg(&pdev->dev, "BusclkFreq: %lld\n", busclk_freq);
+ dev_dbg(&pdev->dev, "RxRate: %lld\n", tmpval64);
+
+ return (int)tmpval64;
+}
+
+/*
+ * Get DPLL lock or not info from stable interrupt status register.
+ * User application must use this control to get locked,
+ * then can do next PCM operation
+ */
+static int fsl_spdif_rxrate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ int rate = spdif_get_rxclk_rate(spdif_priv, SPDIF_DEFAULT_GAINSEL);
+
+ if (spdif_priv->dpll_locked)
+ ucontrol->value.integer.value[0] = rate;
+ else
+ ucontrol->value.integer.value[0] = 0;
+
+ return 0;
+}
+
+/* User bit sync mode info */
+static int fsl_spdif_usync_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+ uinfo->count = 1;
+ uinfo->value.integer.min = 0;
+ uinfo->value.integer.max = 1;
+
+ return 0;
+}
+
+/*
+ * User bit sync mode:
+ * 1 CD User channel subcode
+ * 0 Non-CD data
+ */
+static int fsl_spdif_usync_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val;
+
+ regmap_read(regmap, REG_SPDIF_SRCD, &val);
+ ucontrol->value.integer.value[0] = (val & SRCD_CD_USER) != 0;
+
+ return 0;
+}
+
+/*
+ * User bit sync mode:
+ * 1 CD User channel subcode
+ * 0 Non-CD data
+ */
+static int fsl_spdif_usync_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol);
+ struct fsl_spdif_priv *spdif_priv = snd_soc_dai_get_drvdata(cpu_dai);
+ struct regmap *regmap = spdif_priv->regmap;
+ u32 val = ucontrol->value.integer.value[0] << SRCD_CD_USER_OFFSET;
+
+ regmap_update_bits(regmap, REG_SPDIF_SRCD, SRCD_CD_USER, val);
+
+ return 0;
+}
+
+/* FSL SPDIF IEC958 controller defines */
+static struct snd_kcontrol_new fsl_spdif_ctrls[] = {
+ /* Status cchanel controller */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = SNDRV_CTL_NAME_IEC958("", PLAYBACK, DEFAULT),
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_WRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_info,
+ .get = fsl_spdif_pb_get,
+ .put = fsl_spdif_pb_put,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = SNDRV_CTL_NAME_IEC958("", CAPTURE, DEFAULT),
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_info,
+ .get = fsl_spdif_capture_get,
+ },
+ /* User bits controller */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 Subcode Capture Default",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_info,
+ .get = fsl_spdif_subcode_get,
+ },
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 Q-subcode Capture Default",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_qinfo,
+ .get = fsl_spdif_qget,
+ },
+ /* Valid bit error controller */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 V-Bit Errors",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_vbit_info,
+ .get = fsl_spdif_vbit_get,
+ },
+ /* DPLL lock info get controller */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "RX Sample Rate",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_rxrate_info,
+ .get = fsl_spdif_rxrate_get,
+ },
+ /* User bit sync mode set/get controller */
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "IEC958 USyncMode CDText",
+ .access = SNDRV_CTL_ELEM_ACCESS_READ |
+ SNDRV_CTL_ELEM_ACCESS_WRITE |
+ SNDRV_CTL_ELEM_ACCESS_VOLATILE,
+ .info = fsl_spdif_usync_info,
+ .get = fsl_spdif_usync_get,
+ .put = fsl_spdif_usync_put,
+ },
+};
+
+static int fsl_spdif_dai_probe(struct snd_soc_dai *dai)
+{
+ struct fsl_spdif_priv *spdif_private = snd_soc_dai_get_drvdata(dai);
+
+ dai->playback_dma_data = &spdif_private->dma_params_tx;
+ dai->capture_dma_data = &spdif_private->dma_params_rx;
+
+ snd_soc_add_dai_controls(dai, fsl_spdif_ctrls, ARRAY_SIZE(fsl_spdif_ctrls));
+
+ return 0;
+}
+
+static struct snd_soc_dai_driver fsl_spdif_dai = {
+ .probe = &fsl_spdif_dai_probe,
+ .playback = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = FSL_SPDIF_RATES_PLAYBACK,
+ .formats = FSL_SPDIF_FORMATS_PLAYBACK,
+ },
+ .capture = {
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = FSL_SPDIF_RATES_CAPTURE,
+ .formats = FSL_SPDIF_FORMATS_CAPTURE,
+ },
+ .ops = &fsl_spdif_dai_ops,
+};
+
+static const struct snd_soc_component_driver fsl_spdif_component = {
+ .name = "fsl-spdif",
+};
+
+/* FSL SPDIF REGMAP */
+
+static bool fsl_spdif_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case REG_SPDIF_SCR:
+ case REG_SPDIF_SRCD:
+ case REG_SPDIF_SRPC:
+ case REG_SPDIF_SIE:
+ case REG_SPDIF_SIS:
+ case REG_SPDIF_SRL:
+ case REG_SPDIF_SRR:
+ case REG_SPDIF_SRCSH:
+ case REG_SPDIF_SRCSL:
+ case REG_SPDIF_SRU:
+ case REG_SPDIF_SRQ:
+ case REG_SPDIF_STCSCH:
+ case REG_SPDIF_STCSCL:
+ case REG_SPDIF_SRFM:
+ case REG_SPDIF_STC:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool fsl_spdif_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case REG_SPDIF_SCR:
+ case REG_SPDIF_SRCD:
+ case REG_SPDIF_SRPC:
+ case REG_SPDIF_SIE:
+ case REG_SPDIF_SIC:
+ case REG_SPDIF_STL:
+ case REG_SPDIF_STR:
+ case REG_SPDIF_STCSCH:
+ case REG_SPDIF_STCSCL:
+ case REG_SPDIF_STC:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static const struct regmap_config fsl_spdif_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+
+ .max_register = REG_SPDIF_STC,
+ .readable_reg = fsl_spdif_readable_reg,
+ .writeable_reg = fsl_spdif_writeable_reg,
+};
+
+static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
+ struct clk *clk, u64 savesub,
+ enum spdif_txrate index)
+{
+ const u32 rate[] = { 32000, 44100, 48000 };
+ u64 rate_ideal, rate_actual, sub;
+ u32 div, arate;
+
+ for (div = 1; div <= 128; div++) {
+ rate_ideal = rate[index] * (div + 1) * 64;
+ rate_actual = clk_round_rate(clk, rate_ideal);
+
+ arate = rate_actual / 64;
+ arate /= div;
+
+ if (arate == rate[index]) {
+ /* We are lucky */
+ savesub = 0;
+ spdif_priv->txclk_div[index] = div;
+ break;
+ } else if (arate / rate[index] == 1) {
+ /* A little bigger than expect */
+ sub = (arate - rate[index]) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+ spdif_priv->txclk_div[index] = div;
+ }
+ } else if (rate[index] / arate == 1) {
+ /* A little smaller than expect */
+ sub = (rate[index] - arate) * 100000;
+ do_div(sub, rate[index]);
+ if (sub < savesub) {
+ savesub = sub;
+ spdif_priv->txclk_div[index] = div;
+ }
+ }
+ }
+
+ return savesub;
+}
+
+static int fsl_spdif_probe_txclk(struct fsl_spdif_priv *spdif_priv,
+ enum spdif_txrate index)
+{
+ const u32 rate[] = { 32000, 44100, 48000 };
+ struct platform_device *pdev = spdif_priv->pdev;
+ struct device *dev = &pdev->dev;
+ u64 savesub = 100000, ret;
+ struct clk *clk;
+ char tmp[16];
+ int i;
+
+ for (i = 0; i < STC_TXCLK_SRC_MAX; i++) {
+ sprintf(tmp, "rxtx%d", i);
+ clk = devm_clk_get(&pdev->dev, tmp);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "no rxtx%d clock in devicetree\n", i);
+ return PTR_ERR(clk);
+ }
+ if (!clk_get_rate(clk))
+ continue;
+
+ ret = fsl_spdif_txclk_caldiv(spdif_priv, clk, savesub, index);
+ if (savesub == ret)
+ continue;
+
+ savesub = ret;
+ spdif_priv->txclk[index] = clk;
+ spdif_priv->txclk_src[index] = i;
+
+ /* To quick catch a divisor, we allow a 0.1% deviation */
+ if (savesub < 100)
+ break;
+ }
+
+ dev_dbg(&pdev->dev, "use rxtx%d as tx clock source for %dHz sample rate\n",
+ spdif_priv->txclk_src[index], rate[index]);
+ dev_dbg(&pdev->dev, "use divisor %d for %dHz sample rate\n",
+ spdif_priv->txclk_div[index], rate[index]);
+
+ return 0;
+}
+
+static int fsl_spdif_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct fsl_spdif_priv *spdif_priv;
+ struct spdif_mixer_control *ctrl;
+ struct resource *res;
+ void __iomem *regs;
+ int irq, ret, i;
+
+ if (!np)
+ return -ENODEV;
+
+ spdif_priv = devm_kzalloc(&pdev->dev,
+ sizeof(struct fsl_spdif_priv) + strlen(np->name) + 1,
+ GFP_KERNEL);
+ if (!spdif_priv)
+ return -ENOMEM;
+
+ strcpy(spdif_priv->name, np->name);
+
+ spdif_priv->pdev = pdev;
+
+ /* Initialize this copy of the CPU DAI driver structure */
+ memcpy(&spdif_priv->cpu_dai_drv, &fsl_spdif_dai, sizeof(fsl_spdif_dai));
+ spdif_priv->cpu_dai_drv.name = spdif_priv->name;
+
+ /* Get the addresses and IRQ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (IS_ERR(res)) {
+ dev_err(&pdev->dev, "could not determine device resources\n");
+ return PTR_ERR(res);
+ }
+
+ regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ spdif_priv->regmap = devm_regmap_init_mmio_clk(&pdev->dev,
+ "core", regs, &fsl_spdif_regmap_config);
+ if (IS_ERR(spdif_priv->regmap)) {
+ dev_err(&pdev->dev, "regmap init failed\n");
+ return PTR_ERR(spdif_priv->regmap);
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
+ return irq;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spdif_isr, 0,
+ spdif_priv->name, spdif_priv);
+ if (ret) {
+ dev_err(&pdev->dev, "could not claim irq %u\n", irq);
+ return ret;
+ }
+
+ /* Select clock source for rx/tx clock */
+ spdif_priv->rxclk = devm_clk_get(&pdev->dev, "rxtx1");
+ if (IS_ERR(spdif_priv->rxclk)) {
+ dev_err(&pdev->dev, "no rxtx1 clock in devicetree\n");
+ return PTR_ERR(spdif_priv->rxclk);
+ }
+ spdif_priv->rxclk_src = DEFAULT_RXCLK_SRC;
+
+ for (i = 0; i < SPDIF_TXRATE_MAX; i++) {
+ ret = fsl_spdif_probe_txclk(spdif_priv, i);
+ if (ret)
+ return ret;
+ }
+
+ /* Initial spinlock for control data */
+ ctrl = &spdif_priv->fsl_spdif_control;
+ spin_lock_init(&ctrl->ctl_lock);
+
+ /* Init tx channel status default value */
+ ctrl->ch_status[0] =
+ IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_5015;
+ ctrl->ch_status[1] = IEC958_AES1_CON_DIGDIGCONV_ID;
+ ctrl->ch_status[2] = 0x00;
+ ctrl->ch_status[3] =
+ IEC958_AES3_CON_FS_44100 | IEC958_AES3_CON_CLOCK_1000PPM;
+
+ spdif_priv->dpll_locked = false;
+
+ spdif_priv->dma_params_tx.maxburst = FSL_SPDIF_TXFIFO_WML;
+ spdif_priv->dma_params_rx.maxburst = FSL_SPDIF_RXFIFO_WML;
+ spdif_priv->dma_params_tx.addr = res->start + REG_SPDIF_STL;
+ spdif_priv->dma_params_rx.addr = res->start + REG_SPDIF_SRL;
+
+ /* Register with ASoC */
+ dev_set_drvdata(&pdev->dev, spdif_priv);
+
+ ret = snd_soc_register_component(&pdev->dev, &fsl_spdif_component,
+ &spdif_priv->cpu_dai_drv, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register DAI: %d\n", ret);
+ return ret;
+ }
+
+ ret = imx_pcm_dma_init(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "imx_pcm_dma_init failed: %d\n", ret);
+ goto error_component;
+ }
+
+ return ret;
+
+error_component:
+ snd_soc_unregister_component(&pdev->dev);
+
+ return ret;
+}
+
+static int fsl_spdif_remove(struct platform_device *pdev)
+{
+ imx_pcm_dma_exit(pdev);
+ snd_soc_unregister_component(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_spdif_dt_ids[] = {
+ { .compatible = "fsl,imx35-spdif", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_spdif_dt_ids);
+
+static struct platform_driver fsl_spdif_driver = {
+ .driver = {
+ .name = "fsl-spdif-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = fsl_spdif_dt_ids,
+ },
+ .probe = fsl_spdif_probe,
+ .remove = fsl_spdif_remove,
+};
+
+module_platform_driver(fsl_spdif_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Freescale S/PDIF CPU DAI Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:fsl-spdif-dai");
diff --git a/sound/soc/fsl/fsl_spdif.h b/sound/soc/fsl/fsl_spdif.h
new file mode 100644
index 00000000000..b1266790d11
--- /dev/null
+++ b/sound/soc/fsl/fsl_spdif.h
@@ -0,0 +1,191 @@
+/*
+ * fsl_spdif.h - ALSA S/PDIF interface for the Freescale i.MX SoC
+ *
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * Author: Nicolin Chen <b42378@freescale.com>
+ *
+ * Based on fsl_ssi.h
+ * Author: Timur Tabi <timur@freescale.com>
+ * Copyright 2007-2008 Freescale Semiconductor, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _FSL_SPDIF_DAI_H
+#define _FSL_SPDIF_DAI_H
+
+/* S/PDIF Register Map */
+#define REG_SPDIF_SCR 0x0 /* SPDIF Configuration Register */
+#define REG_SPDIF_SRCD 0x4 /* CDText Control Register */
+#define REG_SPDIF_SRPC 0x8 /* PhaseConfig Register */
+#define REG_SPDIF_SIE 0xc /* InterruptEn Register */
+#define REG_SPDIF_SIS 0x10 /* InterruptStat Register */
+#define REG_SPDIF_SIC 0x10 /* InterruptClear Register */
+#define REG_SPDIF_SRL 0x14 /* SPDIFRxLeft Register */
+#define REG_SPDIF_SRR 0x18 /* SPDIFRxRight Register */
+#define REG_SPDIF_SRCSH 0x1c /* SPDIFRxCChannel_h Register */
+#define REG_SPDIF_SRCSL 0x20 /* SPDIFRxCChannel_l Register */
+#define REG_SPDIF_SRU 0x24 /* UchannelRx Register */
+#define REG_SPDIF_SRQ 0x28 /* QchannelRx Register */
+#define REG_SPDIF_STL 0x2C /* SPDIFTxLeft Register */
+#define REG_SPDIF_STR 0x30 /* SPDIFTxRight Register */
+#define REG_SPDIF_STCSCH 0x34 /* SPDIFTxCChannelCons_h Register */
+#define REG_SPDIF_STCSCL 0x38 /* SPDIFTxCChannelCons_l Register */
+#define REG_SPDIF_SRFM 0x44 /* FreqMeas Register */
+#define REG_SPDIF_STC 0x50 /* SPDIFTxClk Register */
+
+
+/* SPDIF Configuration register */
+#define SCR_RXFIFO_CTL_OFFSET 23
+#define SCR_RXFIFO_CTL_MASK (1 << SCR_RXFIFO_CTL_OFFSET)
+#define SCR_RXFIFO_CTL_ZERO (1 << SCR_RXFIFO_CTL_OFFSET)
+#define SCR_RXFIFO_OFF_OFFSET 22
+#define SCR_RXFIFO_OFF_MASK (1 << SCR_RXFIFO_OFF_OFFSET)
+#define SCR_RXFIFO_OFF (1 << SCR_RXFIFO_OFF_OFFSET)
+#define SCR_RXFIFO_RST_OFFSET 21
+#define SCR_RXFIFO_RST_MASK (1 << SCR_RXFIFO_RST_OFFSET)
+#define SCR_RXFIFO_RST (1 << SCR_RXFIFO_RST_OFFSET)
+#define SCR_RXFIFO_FSEL_OFFSET 19
+#define SCR_RXFIFO_FSEL_MASK (0x3 << SCR_RXFIFO_FSEL_OFFSET)
+#define SCR_RXFIFO_FSEL_IF0 (0x0 << SCR_RXFIFO_FSEL_OFFSET)
+#define SCR_RXFIFO_FSEL_IF4 (0x1 << SCR_RXFIFO_FSEL_OFFSET)
+#define SCR_RXFIFO_FSEL_IF8 (0x2 << SCR_RXFIFO_FSEL_OFFSET)
+#define SCR_RXFIFO_FSEL_IF12 (0x3 << SCR_RXFIFO_FSEL_OFFSET)
+#define SCR_RXFIFO_AUTOSYNC_OFFSET 18
+#define SCR_RXFIFO_AUTOSYNC_MASK (1 << SCR_RXFIFO_AUTOSYNC_OFFSET)
+#define SCR_RXFIFO_AUTOSYNC (1 << SCR_RXFIFO_AUTOSYNC_OFFSET)
+#define SCR_TXFIFO_AUTOSYNC_OFFSET 17
+#define SCR_TXFIFO_AUTOSYNC_MASK (1 << SCR_TXFIFO_AUTOSYNC_OFFSET)
+#define SCR_TXFIFO_AUTOSYNC (1 << SCR_TXFIFO_AUTOSYNC_OFFSET)
+#define SCR_TXFIFO_FSEL_OFFSET 15
+#define SCR_TXFIFO_FSEL_MASK (0x3 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_TXFIFO_FSEL_IF0 (0x0 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_TXFIFO_FSEL_IF4 (0x1 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_TXFIFO_FSEL_IF8 (0x2 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_TXFIFO_FSEL_IF12 (0x3 << SCR_TXFIFO_FSEL_OFFSET)
+#define SCR_LOW_POWER (1 << 13)
+#define SCR_SOFT_RESET (1 << 12)
+#define SCR_TXFIFO_CTRL_OFFSET 10
+#define SCR_TXFIFO_CTRL_MASK (0x3 << SCR_TXFIFO_CTRL_OFFSET)
+#define SCR_TXFIFO_CTRL_ZERO (0x0 << SCR_TXFIFO_CTRL_OFFSET)
+#define SCR_TXFIFO_CTRL_NORMAL (0x1 << SCR_TXFIFO_CTRL_OFFSET)
+#define SCR_TXFIFO_CTRL_ONESAMPLE (0x2 << SCR_TXFIFO_CTRL_OFFSET)
+#define SCR_DMA_RX_EN_OFFSET 9
+#define SCR_DMA_RX_EN_MASK (1 << SCR_DMA_RX_EN_OFFSET)
+#define SCR_DMA_RX_EN (1 << SCR_DMA_RX_EN_OFFSET)
+#define SCR_DMA_TX_EN_OFFSET 8
+#define SCR_DMA_TX_EN_MASK (1 << SCR_DMA_TX_EN_OFFSET)
+#define SCR_DMA_TX_EN (1 << SCR_DMA_TX_EN_OFFSET)
+#define SCR_VAL_OFFSET 5
+#define SCR_VAL_MASK (1 << SCR_VAL_OFFSET)
+#define SCR_VAL_CLEAR (1 << SCR_VAL_OFFSET)
+#define SCR_TXSEL_OFFSET 2
+#define SCR_TXSEL_MASK (0x7 << SCR_TXSEL_OFFSET)
+#define SCR_TXSEL_OFF (0 << SCR_TXSEL_OFFSET)
+#define SCR_TXSEL_RX (1 << SCR_TXSEL_OFFSET)
+#define SCR_TXSEL_NORMAL (0x5 << SCR_TXSEL_OFFSET)
+#define SCR_USRC_SEL_OFFSET 0x0
+#define SCR_USRC_SEL_MASK (0x3 << SCR_USRC_SEL_OFFSET)
+#define SCR_USRC_SEL_NONE (0x0 << SCR_USRC_SEL_OFFSET)
+#define SCR_USRC_SEL_RECV (0x1 << SCR_USRC_SEL_OFFSET)
+#define SCR_USRC_SEL_CHIP (0x3 << SCR_USRC_SEL_OFFSET)
+
+/* SPDIF CDText control */
+#define SRCD_CD_USER_OFFSET 1
+#define SRCD_CD_USER (1 << SRCD_CD_USER_OFFSET)
+
+/* SPDIF Phase Configuration register */
+#define SRPC_DPLL_LOCKED (1 << 6)
+#define SRPC_CLKSRC_SEL_OFFSET 7
+#define SRPC_CLKSRC_SEL_MASK (0xf << SRPC_CLKSRC_SEL_OFFSET)
+#define SRPC_CLKSRC_SEL_SET(x) ((x << SRPC_CLKSRC_SEL_OFFSET) & SRPC_CLKSRC_SEL_MASK)
+#define SRPC_CLKSRC_SEL_LOCKED_OFFSET1 5
+#define SRPC_CLKSRC_SEL_LOCKED_OFFSET2 2
+#define SRPC_GAINSEL_OFFSET 3
+#define SRPC_GAINSEL_MASK (0x7 << SRPC_GAINSEL_OFFSET)
+#define SRPC_GAINSEL_SET(x) ((x << SRPC_GAINSEL_OFFSET) & SRPC_GAINSEL_MASK)
+
+#define SRPC_CLKSRC_MAX 16
+
+enum spdif_gainsel {
+ GAINSEL_MULTI_24 = 0,
+ GAINSEL_MULTI_16,
+ GAINSEL_MULTI_12,
+ GAINSEL_MULTI_8,
+ GAINSEL_MULTI_6,
+ GAINSEL_MULTI_4,
+ GAINSEL_MULTI_3,
+};
+#define GAINSEL_MULTI_MAX (GAINSEL_MULTI_3 + 1)
+#define SPDIF_DEFAULT_GAINSEL GAINSEL_MULTI_8
+
+/* SPDIF interrupt mask define */
+#define INT_DPLL_LOCKED (1 << 20)
+#define INT_TXFIFO_UNOV (1 << 19)
+#define INT_TXFIFO_RESYNC (1 << 18)
+#define INT_CNEW (1 << 17)
+#define INT_VAL_NOGOOD (1 << 16)
+#define INT_SYM_ERR (1 << 15)
+#define INT_BIT_ERR (1 << 14)
+#define INT_URX_FUL (1 << 10)
+#define INT_URX_OV (1 << 9)
+#define INT_QRX_FUL (1 << 8)
+#define INT_QRX_OV (1 << 7)
+#define INT_UQ_SYNC (1 << 6)
+#define INT_UQ_ERR (1 << 5)
+#define INT_RXFIFO_UNOV (1 << 4)
+#define INT_RXFIFO_RESYNC (1 << 3)
+#define INT_LOSS_LOCK (1 << 2)
+#define INT_TX_EM (1 << 1)
+#define INT_RXFIFO_FUL (1 << 0)
+
+/* SPDIF Clock register */
+#define STC_SYSCLK_DIV_OFFSET 11
+#define STC_SYSCLK_DIV_MASK (0x1ff << STC_TXCLK_SRC_OFFSET)
+#define STC_SYSCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_TXCLK_SRC_OFFSET 8
+#define STC_TXCLK_SRC_MASK (0x7 << STC_TXCLK_SRC_OFFSET)
+#define STC_TXCLK_SRC_SET(x) ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
+#define STC_TXCLK_ALL_EN_OFFSET 7
+#define STC_TXCLK_ALL_EN_MASK (1 << STC_TXCLK_ALL_EN_OFFSET)
+#define STC_TXCLK_ALL_EN (1 << STC_TXCLK_ALL_EN_OFFSET)
+#define STC_TXCLK_DIV_OFFSET 0
+#define STC_TXCLK_DIV_MASK (0x7ff << STC_TXCLK_DIV_OFFSET)
+#define STC_TXCLK_DIV(x) ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_TXCLK_DIV_MASK)
+#define STC_TXCLK_SRC_MAX 8
+
+/* SPDIF tx rate */
+enum spdif_txrate {
+ SPDIF_TXRATE_32000 = 0,
+ SPDIF_TXRATE_44100,
+ SPDIF_TXRATE_48000,
+};
+#define SPDIF_TXRATE_MAX (SPDIF_TXRATE_48000 + 1)
+
+
+#define SPDIF_CSTATUS_BYTE 6
+#define SPDIF_UBITS_SIZE 96
+#define SPDIF_QSUB_SIZE (SPDIF_UBITS_SIZE / 8)
+
+
+#define FSL_SPDIF_RATES_PLAYBACK (SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000)
+
+#define FSL_SPDIF_RATES_CAPTURE (SNDRV_PCM_RATE_16000 | \
+ SNDRV_PCM_RATE_32000 | \
+ SNDRV_PCM_RATE_44100 | \
+ SNDRV_PCM_RATE_48000 | \
+ SNDRV_PCM_RATE_64000 | \
+ SNDRV_PCM_RATE_96000)
+
+#define FSL_SPDIF_FORMATS_PLAYBACK (SNDRV_PCM_FMTBIT_S16_LE | \
+ SNDRV_PCM_FMTBIT_S20_3LE | \
+ SNDRV_PCM_FMTBIT_S24_LE)
+
+#define FSL_SPDIF_FORMATS_CAPTURE (SNDRV_PCM_FMTBIT_S24_LE)
+
+#endif /* _FSL_SPDIF_DAI_H */
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 2f2d837df07..c6b743978d5 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -8,6 +8,26 @@
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
+ *
+ *
+ * Some notes why imx-pcm-fiq is used instead of DMA on some boards:
+ *
+ * The i.MX SSI core has some nasty limitations in AC97 mode. While most
+ * sane processor vendors have a FIFO per AC97 slot, the i.MX has only
+ * one FIFO which combines all valid receive slots. We cannot even select
+ * which slots we want to receive. The WM9712 with which this driver
+ * was developed with always sends GPIO status data in slot 12 which
+ * we receive in our (PCM-) data stream. The only chance we have is to
+ * manually skip this data in the FIQ handler. With sampling rates different
+ * from 48000Hz not every frame has valid receive data, so the ratio
+ * between pcm data and GPIO status data changes. Our FIQ handler is not
+ * able to handle this, hence this driver only works with 48000Hz sampling
+ * rate.
+ * Reading and writing AC97 registers is another challenge. The core
+ * provides us status bits when the read register is updated with *another*
+ * value. When we read the same register two times (and the register still
+ * contains the same value) these status bits are not set. We work
+ * around this by not polling these bits but only wait a fixed delay.
*/
#include <linux/init.h>
@@ -36,7 +56,7 @@
#define read_ssi(addr) in_be32(addr)
#define write_ssi(val, addr) out_be32(addr, val)
#define write_ssi_mask(addr, clear, set) clrsetbits_be32(addr, clear, set)
-#elif defined ARM
+#else
#define read_ssi(addr) readl(addr)
#define write_ssi(val, addr) writel(val, addr)
/*
@@ -121,11 +141,14 @@ struct fsl_ssi_private {
bool new_binding;
bool ssi_on_imx;
+ bool imx_ac97;
+ bool use_dma;
struct clk *clk;
struct snd_dmaengine_dai_dma_data dma_params_tx;
struct snd_dmaengine_dai_dma_data dma_params_rx;
struct imx_dma_data filter_data_tx;
struct imx_dma_data filter_data_rx;
+ struct imx_pcm_fiq_params fiq_params;
struct {
unsigned int rfrc;
@@ -298,6 +321,102 @@ static irqreturn_t fsl_ssi_isr(int irq, void *dev_id)
return ret;
}
+static int fsl_ssi_setup(struct fsl_ssi_private *ssi_private)
+{
+ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ u8 i2s_mode;
+ u8 wm;
+ int synchronous = ssi_private->cpu_dai_drv.symmetric_rates;
+
+ if (ssi_private->imx_ac97)
+ i2s_mode = CCSR_SSI_SCR_I2S_MODE_NORMAL | CCSR_SSI_SCR_NET;
+ else
+ i2s_mode = CCSR_SSI_SCR_I2S_MODE_SLAVE;
+
+ /*
+ * Section 16.5 of the MPC8610 reference manual says that the SSI needs
+ * to be disabled before updating the registers we set here.
+ */
+ write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
+
+ /*
+ * Program the SSI into I2S Slave Non-Network Synchronous mode. Also
+ * enable the transmit and receive FIFO.
+ *
+ * FIXME: Little-endian samples require a different shift dir
+ */
+ write_ssi_mask(&ssi->scr,
+ CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_SYN,
+ CCSR_SSI_SCR_TFR_CLK_DIS |
+ i2s_mode |
+ (synchronous ? CCSR_SSI_SCR_SYN : 0));
+
+ write_ssi(CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFEN0 |
+ CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TEFS |
+ CCSR_SSI_STCR_TSCKP, &ssi->stcr);
+
+ write_ssi(CCSR_SSI_SRCR_RXBIT0 | CCSR_SSI_SRCR_RFEN0 |
+ CCSR_SSI_SRCR_RFSI | CCSR_SSI_SRCR_REFS |
+ CCSR_SSI_SRCR_RSCKP, &ssi->srcr);
+ /*
+ * The DC and PM bits are only used if the SSI is the clock master.
+ */
+
+ /*
+ * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
+ * use FIFO 1. We program the transmit water to signal a DMA transfer
+ * if there are only two (or fewer) elements left in the FIFO. Two
+ * elements equals one frame (left channel, right channel). This value,
+ * however, depends on the depth of the transmit buffer.
+ *
+ * We set the watermark on the same level as the DMA burstsize. For
+ * fiq it is probably better to use the biggest possible watermark
+ * size.
+ */
+ if (ssi_private->use_dma)
+ wm = ssi_private->fifo_depth - 2;
+ else
+ wm = ssi_private->fifo_depth;
+
+ write_ssi(CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
+ CCSR_SSI_SFCSR_TFWM1(wm) | CCSR_SSI_SFCSR_RFWM1(wm),
+ &ssi->sfcsr);
+
+ /*
+ * For ac97 interrupts are enabled with the startup of the substream
+ * because it is also running without an active substream. Normally SSI
+ * is only enabled when there is a substream.
+ */
+ if (ssi_private->imx_ac97) {
+ /*
+ * Setup the clock control register
+ */
+ write_ssi(CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13),
+ &ssi->stccr);
+ write_ssi(CCSR_SSI_SxCCR_WL(17) | CCSR_SSI_SxCCR_DC(13),
+ &ssi->srccr);
+
+ /*
+ * Enable AC97 mode and startup the SSI
+ */
+ write_ssi(CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV,
+ &ssi->sacnt);
+ write_ssi(0xff, &ssi->saccdis);
+ write_ssi(0x300, &ssi->saccen);
+
+ /*
+ * Enable SSI, Transmit and Receive
+ */
+ write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_SSIEN |
+ CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE);
+
+ write_ssi(CCSR_SSI_SOR_WAIT(3), &ssi->sor);
+ }
+
+ return 0;
+}
+
+
/**
* fsl_ssi_startup: create a new substream
*
@@ -319,70 +438,14 @@ static int fsl_ssi_startup(struct snd_pcm_substream *substream,
* and initialize the SSI registers.
*/
if (!ssi_private->first_stream) {
- struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
-
ssi_private->first_stream = substream;
/*
- * Section 16.5 of the MPC8610 reference manual says that the
- * SSI needs to be disabled before updating the registers we set
- * here.
- */
- write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
-
- /*
- * Program the SSI into I2S Slave Non-Network Synchronous mode.
- * Also enable the transmit and receive FIFO.
- *
- * FIXME: Little-endian samples require a different shift dir
- */
- write_ssi_mask(&ssi->scr,
- CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_SYN,
- CCSR_SSI_SCR_TFR_CLK_DIS | CCSR_SSI_SCR_I2S_MODE_SLAVE
- | (synchronous ? CCSR_SSI_SCR_SYN : 0));
-
- write_ssi(CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFEN0 |
- CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TEFS |
- CCSR_SSI_STCR_TSCKP, &ssi->stcr);
-
- write_ssi(CCSR_SSI_SRCR_RXBIT0 | CCSR_SSI_SRCR_RFEN0 |
- CCSR_SSI_SRCR_RFSI | CCSR_SSI_SRCR_REFS |
- CCSR_SSI_SRCR_RSCKP, &ssi->srcr);
-
- /*
- * The DC and PM bits are only used if the SSI is the clock
- * master.
- */
-
- /* Enable the interrupts and DMA requests */
- write_ssi(SIER_FLAGS, &ssi->sier);
-
- /*
- * Set the watermark for transmit FIFI 0 and receive FIFO 0. We
- * don't use FIFO 1. We program the transmit water to signal a
- * DMA transfer if there are only two (or fewer) elements left
- * in the FIFO. Two elements equals one frame (left channel,
- * right channel). This value, however, depends on the depth of
- * the transmit buffer.
- *
- * We program the receive FIFO to notify us if at least two
- * elements (one frame) have been written to the FIFO. We could
- * make this value larger (and maybe we should), but this way
- * data will be written to memory as soon as it's available.
- */
- write_ssi(CCSR_SSI_SFCSR_TFWM0(ssi_private->fifo_depth - 2) |
- CCSR_SSI_SFCSR_RFWM0(ssi_private->fifo_depth - 2),
- &ssi->sfcsr);
-
- /*
- * We keep the SSI disabled because if we enable it, then the
- * DMA controller will start. It's not supposed to start until
- * the SCR.TE (or SCR.RE) bit is set, but it does anyway. The
- * DMA controller will transfer one "BWC" of data (i.e. the
- * amount of data that the MR.BWC bits are set to). The reason
- * this is bad is because at this point, the PCM driver has not
- * finished initializing the DMA controller.
+ * fsl_ssi_setup was already called by ac97_init earlier if
+ * the driver is in ac97 mode.
*/
+ if (!ssi_private->imx_ac97)
+ fsl_ssi_setup(ssi_private);
} else {
if (synchronous) {
struct snd_pcm_runtime *first_runtime =
@@ -492,6 +555,27 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(rtd->cpu_dai);
struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+ unsigned int sier_bits;
+
+ /*
+ * Enable only the interrupts and DMA requests
+ * that are needed for the channel. As the fiq
+ * is polling for this bits, we have to ensure
+ * that this are aligned with the preallocated
+ * buffers
+ */
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ if (ssi_private->use_dma)
+ sier_bits = SIER_FLAGS;
+ else
+ sier_bits = CCSR_SSI_SIER_TIE | CCSR_SSI_SIER_TFE0_EN;
+ } else {
+ if (ssi_private->use_dma)
+ sier_bits = SIER_FLAGS;
+ else
+ sier_bits = CCSR_SSI_SIER_RIE | CCSR_SSI_SIER_RFF0_EN;
+ }
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
@@ -510,12 +594,18 @@ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd,
write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_TE, 0);
else
write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_RE, 0);
+
+ if (!ssi_private->imx_ac97 && (read_ssi(&ssi->scr) &
+ (CCSR_SSI_SCR_TE | CCSR_SSI_SCR_RE)) == 0)
+ write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
break;
default:
return -EINVAL;
}
+ write_ssi(sier_bits, &ssi->sier);
+
return 0;
}
@@ -534,22 +624,13 @@ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream,
ssi_private->first_stream = ssi_private->second_stream;
ssi_private->second_stream = NULL;
-
- /*
- * If this is the last active substream, disable the SSI.
- */
- if (!ssi_private->first_stream) {
- struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
-
- write_ssi_mask(&ssi->scr, CCSR_SSI_SCR_SSIEN, 0);
- }
}
static int fsl_ssi_dai_probe(struct snd_soc_dai *dai)
{
struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(dai);
- if (ssi_private->ssi_on_imx) {
+ if (ssi_private->ssi_on_imx && ssi_private->use_dma) {
dai->playback_dma_data = &ssi_private->dma_params_tx;
dai->capture_dma_data = &ssi_private->dma_params_rx;
}
@@ -587,6 +668,133 @@ static const struct snd_soc_component_driver fsl_ssi_component = {
.name = "fsl-ssi",
};
+/**
+ * fsl_ssi_ac97_trigger: start and stop the AC97 receive/transmit.
+ *
+ * This function is called by ALSA to start, stop, pause, and resume the
+ * transfer of data.
+ */
+static int fsl_ssi_ac97_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct fsl_ssi_private *ssi_private = snd_soc_dai_get_drvdata(
+ rtd->cpu_dai);
+ struct ccsr_ssi __iomem *ssi = ssi_private->ssi;
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ write_ssi_mask(&ssi->sier, 0, CCSR_SSI_SIER_TIE |
+ CCSR_SSI_SIER_TFE0_EN);
+ else
+ write_ssi_mask(&ssi->sier, 0, CCSR_SSI_SIER_RIE |
+ CCSR_SSI_SIER_RFF0_EN);
+ break;
+
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ write_ssi_mask(&ssi->sier, CCSR_SSI_SIER_TIE |
+ CCSR_SSI_SIER_TFE0_EN, 0);
+ else
+ write_ssi_mask(&ssi->sier, CCSR_SSI_SIER_RIE |
+ CCSR_SSI_SIER_RFF0_EN, 0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ write_ssi(CCSR_SSI_SOR_TX_CLR, &ssi->sor);
+ else
+ write_ssi(CCSR_SSI_SOR_RX_CLR, &ssi->sor);
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops fsl_ssi_ac97_dai_ops = {
+ .startup = fsl_ssi_startup,
+ .shutdown = fsl_ssi_shutdown,
+ .trigger = fsl_ssi_ac97_trigger,
+};
+
+static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
+ .ac97_control = 1,
+ .playback = {
+ .stream_name = "AC97 Playback",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_8000_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .capture = {
+ .stream_name = "AC97 Capture",
+ .channels_min = 2,
+ .channels_max = 2,
+ .rates = SNDRV_PCM_RATE_48000,
+ .formats = SNDRV_PCM_FMTBIT_S16_LE,
+ },
+ .ops = &fsl_ssi_ac97_dai_ops,
+};
+
+
+static struct fsl_ssi_private *fsl_ac97_data;
+
+static void fsl_ssi_ac97_init(void)
+{
+ fsl_ssi_setup(fsl_ac97_data);
+}
+
+void fsl_ssi_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
+ unsigned short val)
+{
+ struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
+ unsigned int lreg;
+ unsigned int lval;
+
+ if (reg > 0x7f)
+ return;
+
+
+ lreg = reg << 12;
+ write_ssi(lreg, &ssi->sacadd);
+
+ lval = val << 4;
+ write_ssi(lval , &ssi->sacdat);
+
+ write_ssi_mask(&ssi->sacnt, CCSR_SSI_SACNT_RDWR_MASK,
+ CCSR_SSI_SACNT_WR);
+ udelay(100);
+}
+
+unsigned short fsl_ssi_ac97_read(struct snd_ac97 *ac97,
+ unsigned short reg)
+{
+ struct ccsr_ssi *ssi = fsl_ac97_data->ssi;
+
+ unsigned short val = -1;
+ unsigned int lreg;
+
+ lreg = (reg & 0x7f) << 12;
+ write_ssi(lreg, &ssi->sacadd);
+ write_ssi_mask(&ssi->sacnt, CCSR_SSI_SACNT_RDWR_MASK,
+ CCSR_SSI_SACNT_RD);
+
+ udelay(100);
+
+ val = (read_ssi(&ssi->sacdat) >> 4) & 0xffff;
+
+ return val;
+}
+
+static struct snd_ac97_bus_ops fsl_ssi_ac97_ops = {
+ .read = fsl_ssi_ac97_read,
+ .write = fsl_ssi_ac97_write,
+};
+
/* Show the statistics of a flag only if its interrupt is enabled. The
* compiler will optimze this code to a no-op if the interrupt is not
* enabled.
@@ -663,6 +871,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
struct resource res;
char name[64];
bool shared;
+ bool ac97 = false;
/* SSIs that are not connected on the board should have a
* status = "disabled"
@@ -673,14 +882,20 @@ static int fsl_ssi_probe(struct platform_device *pdev)
/* We only support the SSI in "I2S Slave" mode */
sprop = of_get_property(np, "fsl,mode", NULL);
- if (!sprop || strcmp(sprop, "i2s-slave")) {
+ if (!sprop) {
+ dev_err(&pdev->dev, "fsl,mode property is necessary\n");
+ return -EINVAL;
+ }
+ if (!strcmp(sprop, "ac97-slave")) {
+ ac97 = true;
+ } else if (strcmp(sprop, "i2s-slave")) {
dev_notice(&pdev->dev, "mode %s is unsupported\n", sprop);
return -ENODEV;
}
/* The DAI name is the last part of the full name of the node. */
p = strrchr(np->full_name, '/') + 1;
- ssi_private = kzalloc(sizeof(struct fsl_ssi_private) + strlen(p),
+ ssi_private = devm_kzalloc(&pdev->dev, sizeof(*ssi_private) + strlen(p),
GFP_KERNEL);
if (!ssi_private) {
dev_err(&pdev->dev, "could not allocate DAI object\n");
@@ -689,38 +904,41 @@ static int fsl_ssi_probe(struct platform_device *pdev)
strcpy(ssi_private->name, p);
- /* Initialize this copy of the CPU DAI driver structure */
- memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
- sizeof(fsl_ssi_dai_template));
+ ssi_private->use_dma = !of_property_read_bool(np,
+ "fsl,fiq-stream-filter");
+
+ if (ac97) {
+ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_ac97_dai,
+ sizeof(fsl_ssi_ac97_dai));
+
+ fsl_ac97_data = ssi_private;
+ ssi_private->imx_ac97 = true;
+
+ snd_soc_set_ac97_ops_of_reset(&fsl_ssi_ac97_ops, pdev);
+ } else {
+ /* Initialize this copy of the CPU DAI driver structure */
+ memcpy(&ssi_private->cpu_dai_drv, &fsl_ssi_dai_template,
+ sizeof(fsl_ssi_dai_template));
+ }
ssi_private->cpu_dai_drv.name = ssi_private->name;
/* Get the addresses and IRQ */
ret = of_address_to_resource(np, 0, &res);
if (ret) {
dev_err(&pdev->dev, "could not determine device resources\n");
- goto error_kmalloc;
+ return ret;
}
ssi_private->ssi = of_iomap(np, 0);
if (!ssi_private->ssi) {
dev_err(&pdev->dev, "could not map device resources\n");
- ret = -ENOMEM;
- goto error_kmalloc;
+ return -ENOMEM;
}
ssi_private->ssi_phys = res.start;
ssi_private->irq = irq_of_parse_and_map(np, 0);
if (ssi_private->irq == NO_IRQ) {
dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
- ret = -ENXIO;
- goto error_iomap;
- }
-
- /* The 'name' should not have any slashes in it. */
- ret = request_irq(ssi_private->irq, fsl_ssi_isr, 0, ssi_private->name,
- ssi_private);
- if (ret < 0) {
- dev_err(&pdev->dev, "could not claim irq %u\n", ssi_private->irq);
- goto error_irqmap;
+ return -ENXIO;
}
/* Are the RX and the TX clocks locked? */
@@ -739,13 +957,18 @@ static int fsl_ssi_probe(struct platform_device *pdev)
u32 dma_events[2];
ssi_private->ssi_on_imx = true;
- ssi_private->clk = clk_get(&pdev->dev, NULL);
+ ssi_private->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ssi_private->clk)) {
ret = PTR_ERR(ssi_private->clk);
dev_err(&pdev->dev, "could not get clock: %d\n", ret);
- goto error_irq;
+ goto error_irqmap;
+ }
+ ret = clk_prepare_enable(ssi_private->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n",
+ ret);
+ goto error_irqmap;
}
- clk_prepare_enable(ssi_private->clk);
/*
* We have burstsize be "fifo_depth - 2" to match the SSI
@@ -763,24 +986,38 @@ static int fsl_ssi_probe(struct platform_device *pdev)
&ssi_private->filter_data_tx;
ssi_private->dma_params_rx.filter_data =
&ssi_private->filter_data_rx;
- /*
- * TODO: This is a temporary solution and should be changed
- * to use generic DMA binding later when the helplers get in.
- */
- ret = of_property_read_u32_array(pdev->dev.of_node,
+ if (!of_property_read_bool(pdev->dev.of_node, "dmas") &&
+ ssi_private->use_dma) {
+ /*
+ * FIXME: This is a temporary solution until all
+ * necessary dma drivers support the generic dma
+ * bindings.
+ */
+ ret = of_property_read_u32_array(pdev->dev.of_node,
"fsl,ssi-dma-events", dma_events, 2);
- if (ret) {
- dev_err(&pdev->dev, "could not get dma events\n");
- goto error_clk;
+ if (ret && ssi_private->use_dma) {
+ dev_err(&pdev->dev, "could not get dma events but fsl-ssi is configured to use DMA\n");
+ goto error_clk;
+ }
}
shared = of_device_is_compatible(of_get_parent(np),
"fsl,spba-bus");
imx_pcm_dma_params_init_data(&ssi_private->filter_data_tx,
- dma_events[0], shared);
+ dma_events[0], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
imx_pcm_dma_params_init_data(&ssi_private->filter_data_rx,
- dma_events[1], shared);
+ dma_events[1], shared ? IMX_DMATYPE_SSI_SP : IMX_DMATYPE_SSI);
+ } else if (ssi_private->use_dma) {
+ /* The 'name' should not have any slashes in it. */
+ ret = devm_request_irq(&pdev->dev, ssi_private->irq,
+ fsl_ssi_isr, 0, ssi_private->name,
+ ssi_private);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "could not claim irq %u\n",
+ ssi_private->irq);
+ goto error_irqmap;
+ }
}
/* Initialize the the device_attribute structure */
@@ -794,7 +1031,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "could not create sysfs %s file\n",
ssi_private->dev_attr.attr.name);
- goto error_irq;
+ goto error_clk;
}
/* Register with ASoC */
@@ -808,9 +1045,30 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
if (ssi_private->ssi_on_imx) {
- ret = imx_pcm_dma_init(pdev);
- if (ret)
- goto error_dev;
+ if (!ssi_private->use_dma) {
+
+ /*
+ * Some boards use an incompatible codec. To get it
+ * working, we are using imx-fiq-pcm-audio, that
+ * can handle those codecs. DMA is not possible in this
+ * situation.
+ */
+
+ ssi_private->fiq_params.irq = ssi_private->irq;
+ ssi_private->fiq_params.base = ssi_private->ssi;
+ ssi_private->fiq_params.dma_params_rx =
+ &ssi_private->dma_params_rx;
+ ssi_private->fiq_params.dma_params_tx =
+ &ssi_private->dma_params_tx;
+
+ ret = imx_pcm_fiq_init(pdev, &ssi_private->fiq_params);
+ if (ret)
+ goto error_dev;
+ } else {
+ ret = imx_pcm_dma_init(pdev);
+ if (ret)
+ goto error_dev;
+ }
}
/*
@@ -845,6 +1103,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
done:
+ if (ssi_private->imx_ac97)
+ fsl_ssi_ac97_init();
+
return 0;
error_dai:
@@ -853,27 +1114,15 @@ error_dai:
snd_soc_unregister_component(&pdev->dev);
error_dev:
- dev_set_drvdata(&pdev->dev, NULL);
device_remove_file(&pdev->dev, dev_attr);
error_clk:
- if (ssi_private->ssi_on_imx) {
+ if (ssi_private->ssi_on_imx)
clk_disable_unprepare(ssi_private->clk);
- clk_put(ssi_private->clk);
- }
-
-error_irq:
- free_irq(ssi_private->irq, ssi_private);
error_irqmap:
irq_dispose_mapping(ssi_private->irq);
-error_iomap:
- iounmap(ssi_private->ssi);
-
-error_kmalloc:
- kfree(ssi_private);
-
return ret;
}
@@ -883,20 +1132,15 @@ static int fsl_ssi_remove(struct platform_device *pdev)
if (!ssi_private->new_binding)
platform_device_unregister(ssi_private->pdev);
- if (ssi_private->ssi_on_imx) {
+ if (ssi_private->ssi_on_imx)
imx_pcm_dma_exit(pdev);
- clk_disable_unprepare(ssi_private->clk);
- clk_put(ssi_private->clk);
- }
snd_soc_unregister_component(&pdev->dev);
+ dev_set_drvdata(&pdev->dev, NULL);
device_remove_file(&pdev->dev, &ssi_private->dev_attr);
-
- free_irq(ssi_private->irq, ssi_private);
+ if (ssi_private->ssi_on_imx)
+ clk_disable_unprepare(ssi_private->clk);
irq_dispose_mapping(ssi_private->irq);
- kfree(ssi_private);
- dev_set_drvdata(&pdev->dev, NULL);
-
return 0;
}
@@ -919,6 +1163,7 @@ static struct platform_driver fsl_ssi_driver = {
module_platform_driver(fsl_ssi_driver);
+MODULE_ALIAS("platform:fsl-ssi-dai");
MODULE_AUTHOR("Timur Tabi <timur@freescale.com>");
MODULE_DESCRIPTION("Freescale Synchronous Serial Interface (SSI) ASoC Driver");
MODULE_LICENSE("GPL v2");
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index e260f1f899d..ab17381cc98 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -73,8 +73,11 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- if (audmux_clk)
- clk_prepare_enable(audmux_clk);
+ if (audmux_clk) {
+ ret = clk_prepare_enable(audmux_clk);
+ if (ret)
+ return ret;
+ }
ptcr = readl(audmux_base + IMX_AUDMUX_V2_PTCR(port));
pdcr = readl(audmux_base + IMX_AUDMUX_V2_PDCR(port));
@@ -224,14 +227,19 @@ EXPORT_SYMBOL_GPL(imx_audmux_v1_configure_port);
int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
unsigned int pdcr)
{
+ int ret;
+
if (audmux_type != IMX31_AUDMUX)
return -EINVAL;
if (!audmux_base)
return -ENOSYS;
- if (audmux_clk)
- clk_prepare_enable(audmux_clk);
+ if (audmux_clk) {
+ ret = clk_prepare_enable(audmux_clk);
+ if (ret)
+ return ret;
+ }
writel(ptcr, audmux_base + IMX_AUDMUX_V2_PTCR(port));
writel(pdcr, audmux_base + IMX_AUDMUX_V2_PDCR(port));
@@ -243,6 +251,66 @@ int imx_audmux_v2_configure_port(unsigned int port, unsigned int ptcr,
}
EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
+static int imx_audmux_parse_dt_defaults(struct platform_device *pdev,
+ struct device_node *of_node)
+{
+ struct device_node *child;
+
+ for_each_available_child_of_node(of_node, child) {
+ unsigned int port;
+ unsigned int ptcr = 0;
+ unsigned int pdcr = 0;
+ unsigned int pcr = 0;
+ unsigned int val;
+ int ret;
+ int i = 0;
+
+ ret = of_property_read_u32(child, "fsl,audmux-port", &port);
+ if (ret) {
+ dev_warn(&pdev->dev, "Failed to get fsl,audmux-port of child node \"%s\"\n",
+ child->full_name);
+ continue;
+ }
+ if (!of_property_read_bool(child, "fsl,port-config")) {
+ dev_warn(&pdev->dev, "child node \"%s\" does not have property fsl,port-config\n",
+ child->full_name);
+ continue;
+ }
+
+ for (i = 0; (ret = of_property_read_u32_index(child,
+ "fsl,port-config", i, &val)) == 0;
+ ++i) {
+ if (audmux_type == IMX31_AUDMUX) {
+ if (i % 2)
+ pdcr |= val;
+ else
+ ptcr |= val;
+ } else {
+ pcr |= val;
+ }
+ }
+
+ if (ret != -EOVERFLOW) {
+ dev_err(&pdev->dev, "Failed to read u32 at index %d of child %s\n",
+ i, child->full_name);
+ continue;
+ }
+
+ if (audmux_type == IMX31_AUDMUX) {
+ if (i % 2) {
+ dev_err(&pdev->dev, "One pdcr value is missing in child node %s\n",
+ child->full_name);
+ continue;
+ }
+ imx_audmux_v2_configure_port(port, ptcr, pdcr);
+ } else {
+ imx_audmux_v1_configure_port(port, pcr);
+ }
+ }
+
+ return 0;
+}
+
static int imx_audmux_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -267,6 +335,8 @@ static int imx_audmux_probe(struct platform_device *pdev)
if (audmux_type == IMX31_AUDMUX)
audmux_debugfs_init();
+ imx_audmux_parse_dt_defaults(pdev, pdev->dev.of_node);
+
return 0;
}
diff --git a/sound/soc/fsl/imx-audmux.h b/sound/soc/fsl/imx-audmux.h
index b8ff44b9daf..38a4209af7c 100644
--- a/sound/soc/fsl/imx-audmux.h
+++ b/sound/soc/fsl/imx-audmux.h
@@ -1,57 +1,7 @@
#ifndef __IMX_AUDMUX_H
#define __IMX_AUDMUX_H
-#define MX27_AUDMUX_HPCR1_SSI0 0
-#define MX27_AUDMUX_HPCR2_SSI1 1
-#define MX27_AUDMUX_HPCR3_SSI_PINS_4 2
-#define MX27_AUDMUX_PPCR1_SSI_PINS_1 3
-#define MX27_AUDMUX_PPCR2_SSI_PINS_2 4
-#define MX27_AUDMUX_PPCR3_SSI_PINS_3 5
-
-#define MX31_AUDMUX_PORT1_SSI0 0
-#define MX31_AUDMUX_PORT2_SSI1 1
-#define MX31_AUDMUX_PORT3_SSI_PINS_3 2
-#define MX31_AUDMUX_PORT4_SSI_PINS_4 3
-#define MX31_AUDMUX_PORT5_SSI_PINS_5 4
-#define MX31_AUDMUX_PORT6_SSI_PINS_6 5
-#define MX31_AUDMUX_PORT7_SSI_PINS_7 6
-
-#define MX51_AUDMUX_PORT1_SSI0 0
-#define MX51_AUDMUX_PORT2_SSI1 1
-#define MX51_AUDMUX_PORT3 2
-#define MX51_AUDMUX_PORT4 3
-#define MX51_AUDMUX_PORT5 4
-#define MX51_AUDMUX_PORT6 5
-#define MX51_AUDMUX_PORT7 6
-
-/* Register definitions for the i.MX21/27 Digital Audio Multiplexer */
-#define IMX_AUDMUX_V1_PCR_INMMASK(x) ((x) & 0xff)
-#define IMX_AUDMUX_V1_PCR_INMEN (1 << 8)
-#define IMX_AUDMUX_V1_PCR_TXRXEN (1 << 10)
-#define IMX_AUDMUX_V1_PCR_SYN (1 << 12)
-#define IMX_AUDMUX_V1_PCR_RXDSEL(x) (((x) & 0x7) << 13)
-#define IMX_AUDMUX_V1_PCR_RFCSEL(x) (((x) & 0xf) << 20)
-#define IMX_AUDMUX_V1_PCR_RCLKDIR (1 << 24)
-#define IMX_AUDMUX_V1_PCR_RFSDIR (1 << 25)
-#define IMX_AUDMUX_V1_PCR_TFCSEL(x) (((x) & 0xf) << 26)
-#define IMX_AUDMUX_V1_PCR_TCLKDIR (1 << 30)
-#define IMX_AUDMUX_V1_PCR_TFSDIR (1 << 31)
-
-/* Register definitions for the i.MX25/31/35/51 Digital Audio Multiplexer */
-#define IMX_AUDMUX_V2_PTCR_TFSDIR (1 << 31)
-#define IMX_AUDMUX_V2_PTCR_TFSEL(x) (((x) & 0xf) << 27)
-#define IMX_AUDMUX_V2_PTCR_TCLKDIR (1 << 26)
-#define IMX_AUDMUX_V2_PTCR_TCSEL(x) (((x) & 0xf) << 22)
-#define IMX_AUDMUX_V2_PTCR_RFSDIR (1 << 21)
-#define IMX_AUDMUX_V2_PTCR_RFSEL(x) (((x) & 0xf) << 17)
-#define IMX_AUDMUX_V2_PTCR_RCLKDIR (1 << 16)
-#define IMX_AUDMUX_V2_PTCR_RCSEL(x) (((x) & 0xf) << 12)
-#define IMX_AUDMUX_V2_PTCR_SYN (1 << 11)
-
-#define IMX_AUDMUX_V2_PDCR_RXDSEL(x) (((x) & 0x7) << 13)
-#define IMX_AUDMUX_V2_PDCR_TXRXEN (1 << 12)
-#define IMX_AUDMUX_V2_PDCR_MODE(x) (((x) & 0x3) << 8)
-#define IMX_AUDMUX_V2_PDCR_INMMASK(x) ((x) & 0xff)
+#include <dt-bindings/sound/fsl-imx-audmux.h>
int imx_audmux_v1_configure_port(unsigned int port, unsigned int pcr);
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 9df173c091a..a3d60d4bea4 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -90,6 +90,7 @@ static const struct snd_soc_dapm_route imx_mc13783_routes[] = {
static struct snd_soc_card imx_mc13783 = {
.name = "imx_mc13783",
+ .owner = THIS_MODULE,
.dai_link = imx_mc13783_dai_mc13783,
.num_links = ARRAY_SIZE(imx_mc13783_dai_mc13783),
.dapm_widgets = imx_mc13783_widget,
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index fde4d2ea68c..4dc1296688e 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/types.h>
+#include <linux/module.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -64,7 +65,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
{
return snd_dmaengine_pcm_register(&pdev->dev, &imx_dmaengine_pcm_config,
SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
- SND_DMAENGINE_PCM_FLAG_NO_DT |
SND_DMAENGINE_PCM_FLAG_COMPAT);
}
EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
@@ -74,3 +74,5 @@ void imx_pcm_dma_exit(struct platform_device *pdev)
snd_dmaengine_pcm_unregister(&pdev->dev);
}
EXPORT_SYMBOL_GPL(imx_pcm_dma_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/imx-pcm-fiq.c b/sound/soc/fsl/imx-pcm-fiq.c
index 310d9029032..34043c55f2a 100644
--- a/sound/soc/fsl/imx-pcm-fiq.c
+++ b/sound/soc/fsl/imx-pcm-fiq.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <sound/core.h>
+#include <sound/dmaengine_pcm.h>
#include <sound/initval.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -32,6 +33,7 @@
#include <linux/platform_data/asoc-imx-ssi.h>
#include "imx-ssi.h"
+#include "imx-pcm.h"
struct imx_pcm_runtime_data {
unsigned int period;
@@ -366,9 +368,9 @@ static struct snd_soc_platform_driver imx_soc_platform_fiq = {
.pcm_free = imx_pcm_fiq_free,
};
-int imx_pcm_fiq_init(struct platform_device *pdev)
+int imx_pcm_fiq_init(struct platform_device *pdev,
+ struct imx_pcm_fiq_params *params)
{
- struct imx_ssi *ssi = platform_get_drvdata(pdev);
int ret;
ret = claim_fiq(&fh);
@@ -377,15 +379,15 @@ int imx_pcm_fiq_init(struct platform_device *pdev)
return ret;
}
- mxc_set_irq_fiq(ssi->irq, 1);
- ssi_irq = ssi->irq;
+ mxc_set_irq_fiq(params->irq, 1);
+ ssi_irq = params->irq;
- imx_pcm_fiq = ssi->irq;
+ imx_pcm_fiq = params->irq;
- imx_ssi_fiq_base = (unsigned long)ssi->base;
+ imx_ssi_fiq_base = (unsigned long)params->base;
- ssi->dma_params_tx.maxburst = 4;
- ssi->dma_params_rx.maxburst = 6;
+ params->dma_params_tx->maxburst = 4;
+ params->dma_params_rx->maxburst = 6;
ret = snd_soc_register_platform(&pdev->dev, &imx_soc_platform_fiq);
if (ret)
@@ -406,3 +408,5 @@ void imx_pcm_fiq_exit(struct platform_device *pdev)
snd_soc_unregister_platform(&pdev->dev);
}
EXPORT_SYMBOL_GPL(imx_pcm_fiq_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/fsl/imx-pcm.h b/sound/soc/fsl/imx-pcm.h
index 67f656c7c32..5d5b73303e1 100644
--- a/sound/soc/fsl/imx-pcm.h
+++ b/sound/soc/fsl/imx-pcm.h
@@ -22,17 +22,23 @@
static inline void
imx_pcm_dma_params_init_data(struct imx_dma_data *dma_data,
- int dma, bool shared)
+ int dma, enum sdma_peripheral_type peripheral_type)
{
dma_data->dma_request = dma;
dma_data->priority = DMA_PRIO_HIGH;
- if (shared)
- dma_data->peripheral_type = IMX_DMATYPE_SSI_SP;
- else
- dma_data->peripheral_type = IMX_DMATYPE_SSI;
+ dma_data->peripheral_type = peripheral_type;
}
-#ifdef CONFIG_SND_SOC_IMX_PCM_DMA
+struct imx_pcm_fiq_params {
+ int irq;
+ void __iomem *base;
+
+ /* Pointer to original ssi driver to setup tx rx sizes */
+ struct snd_dmaengine_dai_dma_data *dma_params_rx;
+ struct snd_dmaengine_dai_dma_data *dma_params_tx;
+};
+
+#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_DMA)
int imx_pcm_dma_init(struct platform_device *pdev);
void imx_pcm_dma_exit(struct platform_device *pdev);
#else
@@ -46,11 +52,13 @@ static inline void imx_pcm_dma_exit(struct platform_device *pdev)
}
#endif
-#ifdef CONFIG_SND_SOC_IMX_PCM_FIQ
-int imx_pcm_fiq_init(struct platform_device *pdev);
+#if IS_ENABLED(CONFIG_SND_SOC_IMX_PCM_FIQ)
+int imx_pcm_fiq_init(struct platform_device *pdev,
+ struct imx_pcm_fiq_params *params);
void imx_pcm_fiq_exit(struct platform_device *pdev);
#else
-static inline int imx_pcm_fiq_init(struct platform_device *pdev)
+static inline int imx_pcm_fiq_init(struct platform_device *pdev,
+ struct imx_pcm_fiq_params *params)
{
return -ENODEV;
}
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 3f726e4f88d..46c5b4fdfc5 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -13,7 +13,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include <linux/clk.h>
#include <sound/soc.h>
@@ -129,8 +129,10 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
}
data->codec_clk = devm_clk_get(&codec_dev->dev, NULL);
- if (IS_ERR(data->codec_clk))
+ if (IS_ERR(data->codec_clk)) {
+ ret = PTR_ERR(data->codec_clk);
goto fail;
+ }
data->clk_frequency = clk_get_rate(data->codec_clk);
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
new file mode 100644
index 00000000000..816013b0ebb
--- /dev/null
+++ b/sound/soc/fsl/imx-spdif.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Freescale Semiconductor, Inc.
+ *
+ * The code contained herein is licensed under the GNU General Public
+ * License. You may obtain a copy of the GNU General Public License
+ * Version 2 or later at the following locations:
+ *
+ * http://www.opensource.org/licenses/gpl-license.html
+ * http://www.gnu.org/copyleft/gpl.html
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <sound/soc.h>
+
+struct imx_spdif_data {
+ struct snd_soc_dai_link dai[2];
+ struct snd_soc_card card;
+ struct platform_device *txdev;
+ struct platform_device *rxdev;
+};
+
+static int imx_spdif_audio_probe(struct platform_device *pdev)
+{
+ struct device_node *spdif_np, *np = pdev->dev.of_node;
+ struct imx_spdif_data *data;
+ int ret = 0, num_links = 0;
+
+ spdif_np = of_parse_phandle(np, "spdif-controller", 0);
+ if (!spdif_np) {
+ dev_err(&pdev->dev, "failed to find spdif-controller\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ if (of_property_read_bool(np, "spdif-out")) {
+ data->dai[num_links].name = "S/PDIF TX";
+ data->dai[num_links].stream_name = "S/PDIF PCM Playback";
+ data->dai[num_links].codec_dai_name = "dit-hifi";
+ data->dai[num_links].codec_name = "spdif-dit";
+ data->dai[num_links].cpu_of_node = spdif_np;
+ data->dai[num_links].platform_of_node = spdif_np;
+ num_links++;
+
+ data->txdev = platform_device_register_simple("spdif-dit", -1, NULL, 0);
+ if (IS_ERR(data->txdev)) {
+ ret = PTR_ERR(data->txdev);
+ dev_err(&pdev->dev, "register dit failed: %d\n", ret);
+ goto end;
+ }
+ }
+
+ if (of_property_read_bool(np, "spdif-in")) {
+ data->dai[num_links].name = "S/PDIF RX";
+ data->dai[num_links].stream_name = "S/PDIF PCM Capture";
+ data->dai[num_links].codec_dai_name = "dir-hifi";
+ data->dai[num_links].codec_name = "spdif-dir";
+ data->dai[num_links].cpu_of_node = spdif_np;
+ data->dai[num_links].platform_of_node = spdif_np;
+ num_links++;
+
+ data->rxdev = platform_device_register_simple("spdif-dir", -1, NULL, 0);
+ if (IS_ERR(data->rxdev)) {
+ ret = PTR_ERR(data->rxdev);
+ dev_err(&pdev->dev, "register dir failed: %d\n", ret);
+ goto error_dit;
+ }
+ }
+
+ if (!num_links) {
+ dev_err(&pdev->dev, "no enabled S/PDIF DAI link\n");
+ goto error_dir;
+ }
+
+ data->card.dev = &pdev->dev;
+ data->card.num_links = num_links;
+ data->card.dai_link = data->dai;
+
+ ret = snd_soc_of_parse_card_name(&data->card, "model");
+ if (ret)
+ goto error_dir;
+
+ ret = snd_soc_register_card(&data->card);
+ if (ret) {
+ dev_err(&pdev->dev, "snd_soc_register_card failed: %d\n", ret);
+ goto error_dir;
+ }
+
+ platform_set_drvdata(pdev, data);
+
+ goto end;
+
+error_dir:
+ if (data->rxdev)
+ platform_device_unregister(data->rxdev);
+error_dit:
+ if (data->txdev)
+ platform_device_unregister(data->txdev);
+end:
+ if (spdif_np)
+ of_node_put(spdif_np);
+
+ return ret;
+}
+
+static int imx_spdif_audio_remove(struct platform_device *pdev)
+{
+ struct imx_spdif_data *data = platform_get_drvdata(pdev);
+
+ if (data->rxdev)
+ platform_device_unregister(data->rxdev);
+ if (data->txdev)
+ platform_device_unregister(data->txdev);
+
+ snd_soc_unregister_card(&data->card);
+
+ return 0;
+}
+
+static const struct of_device_id imx_spdif_dt_ids[] = {
+ { .compatible = "fsl,imx-audio-spdif", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_spdif_dt_ids);
+
+static struct platform_driver imx_spdif_driver = {
+ .driver = {
+ .name = "imx-spdif",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_spdif_dt_ids,
+ },
+ .probe = imx_spdif_audio_probe,
+ .remove = imx_spdif_audio_remove,
+};
+
+module_platform_driver(imx_spdif_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Freescale i.MX S/PDIF machine driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:imx-spdif");
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 51be3772cba..f58bcd85c07 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -571,13 +571,13 @@ static int imx_ssi_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx0");
if (res) {
imx_pcm_dma_params_init_data(&ssi->filter_data_tx, res->start,
- false);
+ IMX_DMATYPE_SSI);
}
res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx0");
if (res) {
imx_pcm_dma_params_init_data(&ssi->filter_data_rx, res->start,
- false);
+ IMX_DMATYPE_SSI);
}
platform_set_drvdata(pdev, ssi);
@@ -595,7 +595,12 @@ static int imx_ssi_probe(struct platform_device *pdev)
goto failed_register;
}
- ret = imx_pcm_fiq_init(pdev);
+ ssi->fiq_params.irq = ssi->irq;
+ ssi->fiq_params.base = ssi->base;
+ ssi->fiq_params.dma_params_rx = &ssi->dma_params_rx;
+ ssi->fiq_params.dma_params_tx = &ssi->dma_params_tx;
+
+ ret = imx_pcm_fiq_init(pdev, &ssi->fiq_params);
if (ret)
goto failed_pcm_fiq;
diff --git a/sound/soc/fsl/imx-ssi.h b/sound/soc/fsl/imx-ssi.h
index d5003cefca8..fb1616ba8c5 100644
--- a/sound/soc/fsl/imx-ssi.h
+++ b/sound/soc/fsl/imx-ssi.h
@@ -209,6 +209,7 @@ struct imx_ssi {
struct snd_dmaengine_dai_dma_data dma_params_tx;
struct imx_dma_data filter_data_tx;
struct imx_dma_data filter_data_rx;
+ struct imx_pcm_fiq_params fiq_params;
int enabled;
};
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 52a36a90f4f..722afe69169 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -15,7 +15,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_i2c.h>
+#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <sound/soc.h>
@@ -217,7 +217,8 @@ static int imx_wm8962_probe(struct platform_device *pdev)
codec_dev = of_find_i2c_device_by_node(codec_np);
if (!codec_dev || !codec_dev->driver) {
dev_err(&pdev->dev, "failed to find codec platform device\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto fail;
}
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 6cf8355a854..8c49147db84 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -105,6 +105,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
static struct platform_driver asoc_simple_card = {
.driver = {
.name = "asoc-simple-card",
+ .owner = THIS_MODULE,
},
.probe = asoc_simple_card_probe,
.remove = asoc_simple_card_remove,
@@ -112,6 +113,7 @@ static struct platform_driver asoc_simple_card = {
module_platform_driver(asoc_simple_card);
+MODULE_ALIAS("platform:asoc-simple-card");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ASoC Simple Sound Card");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
diff --git a/sound/soc/kirkwood/Kconfig b/sound/soc/kirkwood/Kconfig
index c62d715235e..78ed4a42ad2 100644
--- a/sound/soc/kirkwood/Kconfig
+++ b/sound/soc/kirkwood/Kconfig
@@ -1,19 +1,15 @@
config SND_KIRKWOOD_SOC
- tristate "SoC Audio for the Marvell Kirkwood chip"
- depends on ARCH_KIRKWOOD
+ tristate "SoC Audio for the Marvell Kirkwood and Dove chips"
+ depends on ARCH_KIRKWOOD || ARCH_DOVE || COMPILE_TEST
help
Say Y or M if you want to add support for codecs attached to
the Kirkwood I2S interface. You will also need to select the
audio interfaces to support below.
-config SND_KIRKWOOD_SOC_I2S
- tristate
-
config SND_KIRKWOOD_SOC_OPENRD
tristate "SoC Audio support for Kirkwood Openrd Client"
- depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+ depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE || COMPILE_TEST)
depends on I2C
- select SND_KIRKWOOD_SOC_I2S
select SND_SOC_CS42L51
help
Say Y if you want to add support for SoC audio on
@@ -21,8 +17,7 @@ config SND_KIRKWOOD_SOC_OPENRD
config SND_KIRKWOOD_SOC_T5325
tristate "SoC Audio support for HP t5325"
- depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
- select SND_KIRKWOOD_SOC_I2S
+ depends on SND_KIRKWOOD_SOC && (MACH_T5325 || COMPILE_TEST) && I2C
select SND_SOC_ALC5623
help
Say Y if you want to add support for SoC audio on
diff --git a/sound/soc/kirkwood/Makefile b/sound/soc/kirkwood/Makefile
index 3e62ae9e7bb..9e781385cb8 100644
--- a/sound/soc/kirkwood/Makefile
+++ b/sound/soc/kirkwood/Makefile
@@ -1,8 +1,6 @@
-snd-soc-kirkwood-objs := kirkwood-dma.o
-snd-soc-kirkwood-i2s-objs := kirkwood-i2s.o
+snd-soc-kirkwood-objs := kirkwood-dma.o kirkwood-i2s.o
obj-$(CONFIG_SND_KIRKWOOD_SOC) += snd-soc-kirkwood.o
-obj-$(CONFIG_SND_KIRKWOOD_SOC_I2S) += snd-soc-kirkwood-i2s.o
snd-soc-openrd-objs := kirkwood-openrd.o
snd-soc-t5325-objs := kirkwood-t5325.o
diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
index a9f14530c3d..b238434f92b 100644
--- a/sound/soc/kirkwood/kirkwood-dma.c
+++ b/sound/soc/kirkwood/kirkwood-dma.c
@@ -33,11 +33,11 @@
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | \
SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_BE)
-struct kirkwood_dma_priv {
- struct snd_pcm_substream *play_stream;
- struct snd_pcm_substream *rec_stream;
- struct kirkwood_dma_data *data;
-};
+static struct kirkwood_dma_data *kirkwood_priv(struct snd_pcm_substream *subs)
+{
+ struct snd_soc_pcm_runtime *soc_runtime = subs->private_data;
+ return snd_soc_dai_get_drvdata(soc_runtime->cpu_dai);
+}
static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.info = (SNDRV_PCM_INFO_INTERLEAVED |
@@ -51,7 +51,7 @@ static struct snd_pcm_hardware kirkwood_dma_snd_hw = {
.rate_max = 384000,
.channels_min = 1,
.channels_max = 8,
- .buffer_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES * KIRKWOOD_SND_MAX_PERIODS,
+ .buffer_bytes_max = KIRKWOOD_SND_MAX_BUFFER_BYTES,
.period_bytes_min = KIRKWOOD_SND_MIN_PERIOD_BYTES,
.period_bytes_max = KIRKWOOD_SND_MAX_PERIOD_BYTES,
.periods_min = KIRKWOOD_SND_MIN_PERIODS,
@@ -63,8 +63,7 @@ static u64 kirkwood_dma_dmamask = DMA_BIT_MASK(32);
static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
{
- struct kirkwood_dma_priv *prdata = dev_id;
- struct kirkwood_dma_data *priv = prdata->data;
+ struct kirkwood_dma_data *priv = dev_id;
unsigned long mask, status, cause;
mask = readl(priv->io + KIRKWOOD_INT_MASK);
@@ -89,10 +88,10 @@ static irqreturn_t kirkwood_dma_irq(int irq, void *dev_id)
writel(status, priv->io + KIRKWOOD_INT_CAUSE);
if (status & KIRKWOOD_INT_CAUSE_PLAY_BYTES)
- snd_pcm_period_elapsed(prdata->play_stream);
+ snd_pcm_period_elapsed(priv->substream_play);
if (status & KIRKWOOD_INT_CAUSE_REC_BYTES)
- snd_pcm_period_elapsed(prdata->rec_stream);
+ snd_pcm_period_elapsed(priv->substream_rec);
return IRQ_HANDLED;
}
@@ -126,15 +125,10 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
{
int err;
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_platform *platform = soc_runtime->platform;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct kirkwood_dma_data *priv;
- struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
+ struct kirkwood_dma_data *priv = kirkwood_priv(substream);
const struct mbus_dram_target_info *dram;
unsigned long addr;
- priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
snd_soc_set_runtime_hwparams(substream, &kirkwood_dma_snd_hw);
/* Ensure that all constraints linked to dma burst are fulfilled */
@@ -157,21 +151,11 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
if (err < 0)
return err;
- if (prdata == NULL) {
- prdata = kzalloc(sizeof(struct kirkwood_dma_priv), GFP_KERNEL);
- if (prdata == NULL)
- return -ENOMEM;
-
- prdata->data = priv;
-
+ if (!priv->substream_play && !priv->substream_rec) {
err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
- "kirkwood-i2s", prdata);
- if (err) {
- kfree(prdata);
+ "kirkwood-i2s", priv);
+ if (err)
return -EBUSY;
- }
-
- snd_soc_platform_set_drvdata(platform, prdata);
/*
* Enable Error interrupts. We're only ack'ing them but
@@ -183,11 +167,11 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
dram = mv_mbus_dram_info();
addr = substream->dma_buffer.addr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- prdata->play_stream = substream;
+ priv->substream_play = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_PLAYBACK_WIN, addr, dram);
} else {
- prdata->rec_stream = substream;
+ priv->substream_rec = substream;
kirkwood_dma_conf_mbus_windows(priv->io,
KIRKWOOD_RECORD_WIN, addr, dram);
}
@@ -197,27 +181,19 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
static int kirkwood_dma_close(struct snd_pcm_substream *substream)
{
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct snd_soc_platform *platform = soc_runtime->platform;
- struct kirkwood_dma_priv *prdata = snd_soc_platform_get_drvdata(platform);
- struct kirkwood_dma_data *priv;
-
- priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
+ struct kirkwood_dma_data *priv = kirkwood_priv(substream);
- if (!prdata || !priv)
+ if (!priv)
return 0;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prdata->play_stream = NULL;
+ priv->substream_play = NULL;
else
- prdata->rec_stream = NULL;
+ priv->substream_rec = NULL;
- if (!prdata->play_stream && !prdata->rec_stream) {
+ if (!priv->substream_play && !priv->substream_rec) {
writel(0, priv->io + KIRKWOOD_ERR_MASK);
- free_irq(priv->irq, prdata);
- kfree(prdata);
- snd_soc_platform_set_drvdata(platform, NULL);
+ free_irq(priv->irq, priv);
}
return 0;
@@ -243,13 +219,9 @@ static int kirkwood_dma_hw_free(struct snd_pcm_substream *substream)
static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
{
struct snd_pcm_runtime *runtime = substream->runtime;
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct kirkwood_dma_data *priv;
+ struct kirkwood_dma_data *priv = kirkwood_priv(substream);
unsigned long size, count;
- priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
-
/* compute buffer size in term of "words" as requested in specs */
size = frames_to_bytes(runtime, runtime->buffer_size);
size = (size>>2)-1;
@@ -272,13 +244,9 @@ static int kirkwood_dma_prepare(struct snd_pcm_substream *substream)
static snd_pcm_uframes_t kirkwood_dma_pointer(struct snd_pcm_substream
*substream)
{
- struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
- struct snd_soc_dai *cpu_dai = soc_runtime->cpu_dai;
- struct kirkwood_dma_data *priv;
+ struct kirkwood_dma_data *priv = kirkwood_priv(substream);
snd_pcm_uframes_t count;
- priv = snd_soc_dai_get_dma_data(cpu_dai, substream);
-
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
count = bytes_to_frames(substream->runtime,
readl(priv->io + KIRKWOOD_PLAY_BYTE_COUNT));
@@ -366,36 +334,8 @@ static void kirkwood_dma_free_dma_buffers(struct snd_pcm *pcm)
}
}
-static struct snd_soc_platform_driver kirkwood_soc_platform = {
+struct snd_soc_platform_driver kirkwood_soc_platform = {
.ops = &kirkwood_dma_ops,
.pcm_new = kirkwood_dma_new,
.pcm_free = kirkwood_dma_free_dma_buffers,
};
-
-static int kirkwood_soc_platform_probe(struct platform_device *pdev)
-{
- return snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
-}
-
-static int kirkwood_soc_platform_remove(struct platform_device *pdev)
-{
- snd_soc_unregister_platform(&pdev->dev);
- return 0;
-}
-
-static struct platform_driver kirkwood_pcm_driver = {
- .driver = {
- .name = "kirkwood-pcm-audio",
- .owner = THIS_MODULE,
- },
-
- .probe = kirkwood_soc_platform_probe,
- .remove = kirkwood_soc_platform_remove,
-};
-
-module_platform_driver(kirkwood_pcm_driver);
-
-MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
-MODULE_DESCRIPTION("Marvell Kirkwood Audio DMA module");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:kirkwood-pcm-audio");
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 4c9dad3263c..7fce340ab3e 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -22,13 +22,12 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <linux/platform_data/asoc-kirkwood.h>
+#include <linux/of.h>
+
#include "kirkwood.h"
-#define DRV_NAME "kirkwood-i2s"
+#define DRV_NAME "mvebu-audio"
-#define KIRKWOOD_I2S_RATES \
- (SNDRV_PCM_RATE_44100 | \
- SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_96000)
#define KIRKWOOD_I2S_FORMATS \
(SNDRV_PCM_FMTBIT_S16_LE | \
SNDRV_PCM_FMTBIT_S24_LE | \
@@ -105,14 +104,16 @@ static void kirkwood_set_rate(struct snd_soc_dai *dai,
uint32_t clks_ctrl;
if (rate == 44100 || rate == 48000 || rate == 96000) {
- /* use internal dco for supported rates */
+ /* use internal dco for the supported rates
+ * defined in kirkwood_i2s_dai */
dev_dbg(dai->dev, "%s: dco set rate = %lu\n",
__func__, rate);
kirkwood_set_dco(priv->io, rate);
clks_ctrl = KIRKWOOD_MCLK_SOURCE_DCO;
- } else if (!IS_ERR(priv->extclk)) {
- /* use optional external clk for other rates */
+ } else {
+ /* use the external clock for the other rates
+ * defined in kirkwood_i2s_dai_extclk */
dev_dbg(dai->dev, "%s: extclk set rate = %lu -> %lu\n",
__func__, rate, 256 * rate);
clk_set_rate(priv->extclk, 256 * rate);
@@ -199,8 +200,7 @@ static int kirkwood_i2s_hw_params(struct snd_pcm_substream *substream,
ctl_play |= KIRKWOOD_PLAYCTL_MONO_OFF;
priv->ctl_play &= ~(KIRKWOOD_PLAYCTL_MONO_MASK |
- KIRKWOOD_PLAYCTL_I2S_EN |
- KIRKWOOD_PLAYCTL_SPDIF_EN |
+ KIRKWOOD_PLAYCTL_ENABLE_MASK |
KIRKWOOD_PLAYCTL_SIZE_MASK);
priv->ctl_play |= ctl_play;
} else {
@@ -244,8 +244,7 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_START:
/* configure */
ctl = priv->ctl_play;
- value = ctl & ~(KIRKWOOD_PLAYCTL_I2S_EN |
- KIRKWOOD_PLAYCTL_SPDIF_EN);
+ value = ctl & ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
writel(value, priv->io + KIRKWOOD_PLAYCTL);
/* enable interrupts */
@@ -267,7 +266,7 @@ static int kirkwood_i2s_play_trigger(struct snd_pcm_substream *substream,
writel(value, priv->io + KIRKWOOD_INT_MASK);
/* disable all playbacks */
- ctl &= ~(KIRKWOOD_PLAYCTL_I2S_EN | KIRKWOOD_PLAYCTL_SPDIF_EN);
+ ctl &= ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
writel(ctl, priv->io + KIRKWOOD_PLAYCTL);
break;
@@ -387,7 +386,7 @@ static int kirkwood_i2s_probe(struct snd_soc_dai *dai)
/* disable playback/record */
value = readl(priv->io + KIRKWOOD_PLAYCTL);
- value &= ~(KIRKWOOD_PLAYCTL_I2S_EN|KIRKWOOD_PLAYCTL_SPDIF_EN);
+ value &= ~KIRKWOOD_PLAYCTL_ENABLE_MASK;
writel(value, priv->io + KIRKWOOD_PLAYCTL);
value = readl(priv->io + KIRKWOOD_RECCTL);
@@ -398,11 +397,6 @@ static int kirkwood_i2s_probe(struct snd_soc_dai *dai)
}
-static int kirkwood_i2s_remove(struct snd_soc_dai *dai)
-{
- return 0;
-}
-
static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = {
.startup = kirkwood_i2s_startup,
.trigger = kirkwood_i2s_trigger,
@@ -413,17 +407,18 @@ static const struct snd_soc_dai_ops kirkwood_i2s_dai_ops = {
static struct snd_soc_dai_driver kirkwood_i2s_dai = {
.probe = kirkwood_i2s_probe,
- .remove = kirkwood_i2s_remove,
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = KIRKWOOD_I2S_RATES,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = KIRKWOOD_I2S_RATES,
+ .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |
+ SNDRV_PCM_RATE_96000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
@@ -431,7 +426,6 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai = {
static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk = {
.probe = kirkwood_i2s_probe,
- .remove = kirkwood_i2s_remove,
.playback = {
.channels_min = 1,
.channels_max = 2,
@@ -461,6 +455,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
struct snd_soc_dai_driver *soc_dai = &kirkwood_i2s_dai;
struct kirkwood_dma_data *priv;
struct resource *mem;
+ struct device_node *np = pdev->dev.of_node;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -481,14 +476,16 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
return -ENXIO;
}
- if (!data) {
- dev_err(&pdev->dev, "no platform data ?!\n");
+ if (np) {
+ priv->burst = 128; /* might be 32 or 128 */
+ } else if (data) {
+ priv->burst = data->burst;
+ } else {
+ dev_err(&pdev->dev, "no DT nor platform data ?!\n");
return -EINVAL;
}
- priv->burst = data->burst;
-
- priv->clk = devm_clk_get(&pdev->dev, NULL);
+ priv->clk = devm_clk_get(&pdev->dev, np ? "internal" : NULL);
if (IS_ERR(priv->clk)) {
dev_err(&pdev->dev, "no clock\n");
return PTR_ERR(priv->clk);
@@ -498,10 +495,10 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
if (err < 0)
return err;
- priv->extclk = clk_get(&pdev->dev, "extclk");
+ priv->extclk = devm_clk_get(&pdev->dev, "extclk");
if (!IS_ERR(priv->extclk)) {
if (priv->extclk == priv->clk) {
- clk_put(priv->extclk);
+ devm_clk_put(&pdev->dev, priv->extclk);
priv->extclk = ERR_PTR(-EINVAL);
} else {
dev_info(&pdev->dev, "found external clock\n");
@@ -515,7 +512,7 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
priv->ctl_rec = KIRKWOOD_RECCTL_SIZE_24;
/* Select the burst size */
- if (data->burst == 32) {
+ if (priv->burst == 32) {
priv->ctl_play |= KIRKWOOD_PLAYCTL_BURST_32;
priv->ctl_rec |= KIRKWOOD_RECCTL_BURST_32;
} else {
@@ -525,14 +522,22 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
err = snd_soc_register_component(&pdev->dev, &kirkwood_i2s_component,
soc_dai, 1);
- if (!err)
- return 0;
- dev_err(&pdev->dev, "snd_soc_register_component failed\n");
+ if (err) {
+ dev_err(&pdev->dev, "snd_soc_register_component failed\n");
+ goto err_component;
+ }
- if (!IS_ERR(priv->extclk)) {
- clk_disable_unprepare(priv->extclk);
- clk_put(priv->extclk);
+ err = snd_soc_register_platform(&pdev->dev, &kirkwood_soc_platform);
+ if (err) {
+ dev_err(&pdev->dev, "snd_soc_register_platform failed\n");
+ goto err_platform;
}
+ return 0;
+ err_platform:
+ snd_soc_unregister_component(&pdev->dev);
+ err_component:
+ if (!IS_ERR(priv->extclk))
+ clk_disable_unprepare(priv->extclk);
clk_disable_unprepare(priv->clk);
return err;
@@ -542,23 +547,31 @@ static int kirkwood_i2s_dev_remove(struct platform_device *pdev)
{
struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
+ snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
- if (!IS_ERR(priv->extclk)) {
+ if (!IS_ERR(priv->extclk))
clk_disable_unprepare(priv->extclk);
- clk_put(priv->extclk);
- }
clk_disable_unprepare(priv->clk);
return 0;
}
+#ifdef CONFIG_OF
+static struct of_device_id mvebu_audio_of_match[] = {
+ { .compatible = "marvell,mvebu-audio" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mvebu_audio_of_match);
+#endif
+
static struct platform_driver kirkwood_i2s_driver = {
.probe = kirkwood_i2s_dev_probe,
.remove = kirkwood_i2s_dev_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(mvebu_audio_of_match),
},
};
@@ -568,4 +581,4 @@ module_platform_driver(kirkwood_i2s_driver);
MODULE_AUTHOR("Arnaud Patard, <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("Kirkwood I2S SoC Interface");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:kirkwood-i2s");
+MODULE_ALIAS("platform:mvebu-audio");
diff --git a/sound/soc/kirkwood/kirkwood-openrd.c b/sound/soc/kirkwood/kirkwood-openrd.c
index b979c715471..025be0e9716 100644
--- a/sound/soc/kirkwood/kirkwood-openrd.c
+++ b/sound/soc/kirkwood/kirkwood-openrd.c
@@ -16,9 +16,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
-#include <mach/kirkwood.h>
#include <linux/platform_data/asoc-kirkwood.h>
-#include <asm/mach-types.h>
#include "../codecs/cs42l51.h"
static int openrd_client_hw_params(struct snd_pcm_substream *substream,
@@ -54,8 +52,8 @@ static struct snd_soc_dai_link openrd_client_dai[] = {
{
.name = "CS42L51",
.stream_name = "CS42L51 HiFi",
- .cpu_dai_name = "kirkwood-i2s",
- .platform_name = "kirkwood-pcm-audio",
+ .cpu_dai_name = "mvebu-audio",
+ .platform_name = "mvebu-audio",
.codec_dai_name = "cs42l51-hifi",
.codec_name = "cs42l51-codec.0-004a",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS,
diff --git a/sound/soc/kirkwood/kirkwood-t5325.c b/sound/soc/kirkwood/kirkwood-t5325.c
index 1d0ed6f8add..27545b0c485 100644
--- a/sound/soc/kirkwood/kirkwood-t5325.c
+++ b/sound/soc/kirkwood/kirkwood-t5325.c
@@ -15,9 +15,7 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <sound/soc.h>
-#include <mach/kirkwood.h>
#include <linux/platform_data/asoc-kirkwood.h>
-#include <asm/mach-types.h>
#include "../codecs/alc5623.h"
static int t5325_hw_params(struct snd_pcm_substream *substream,
@@ -70,8 +68,8 @@ static struct snd_soc_dai_link t5325_dai[] = {
{
.name = "ALC5621",
.stream_name = "ALC5621 HiFi",
- .cpu_dai_name = "kirkwood-i2s",
- .platform_name = "kirkwood-pcm-audio",
+ .cpu_dai_name = "mvebu-audio",
+ .platform_name = "mvebu-audio",
.codec_dai_name = "alc5621-hifi",
.codec_name = "alc562x-codec.0-001a",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS,
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 4d92637ddb3..f8e1ccc1c58 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -54,7 +54,7 @@
#define KIRKWOOD_PLAYCTL_MONO_OFF (0<<5)
#define KIRKWOOD_PLAYCTL_I2S_MUTE (1<<7)
#define KIRKWOOD_PLAYCTL_SPDIF_EN (1<<4)
-#define KIRKWOOD_PLAYCTL_I2S_EN (1<<3)
+#define KIRKWOOD_PLAYCTL_I2S_EN (1<<3)
#define KIRKWOOD_PLAYCTL_SIZE_MASK (7<<0)
#define KIRKWOOD_PLAYCTL_SIZE_16 (7<<0)
#define KIRKWOOD_PLAYCTL_SIZE_16_C (3<<0)
@@ -62,6 +62,9 @@
#define KIRKWOOD_PLAYCTL_SIZE_24 (1<<0)
#define KIRKWOOD_PLAYCTL_SIZE_32 (0<<0)
+#define KIRKWOOD_PLAYCTL_ENABLE_MASK (KIRKWOOD_PLAYCTL_SPDIF_EN | \
+ KIRKWOOD_PLAYCTL_I2S_EN)
+
#define KIRKWOOD_PLAY_BUF_ADDR 0x1104
#define KIRKWOOD_PLAY_BUF_SIZE 0x1108
#define KIRKWOOD_PLAY_BYTE_COUNT 0x110C
@@ -122,6 +125,8 @@
#define KIRKWOOD_SND_MAX_PERIODS 16
#define KIRKWOOD_SND_MIN_PERIOD_BYTES 0x4000
#define KIRKWOOD_SND_MAX_PERIOD_BYTES 0x4000
+#define KIRKWOOD_SND_MAX_BUFFER_BYTES (KIRKWOOD_SND_MAX_PERIOD_BYTES \
+ * KIRKWOOD_SND_MAX_PERIODS)
struct kirkwood_dma_data {
void __iomem *io;
@@ -129,8 +134,12 @@ struct kirkwood_dma_data {
struct clk *extclk;
uint32_t ctl_play;
uint32_t ctl_rec;
+ struct snd_pcm_substream *substream_play;
+ struct snd_pcm_substream *substream_rec;
int irq;
int burst;
};
+extern struct snd_soc_platform_driver kirkwood_soc_platform;
+
#endif
diff --git a/sound/soc/mxs/Kconfig b/sound/soc/mxs/Kconfig
index 78d321cbe8b..219235c0221 100644
--- a/sound/soc/mxs/Kconfig
+++ b/sound/soc/mxs/Kconfig
@@ -1,6 +1,7 @@
menuconfig SND_MXS_SOC
tristate "SoC Audio for Freescale MXS CPUs"
- depends on ARCH_MXS
+ depends on ARCH_MXS || COMPILE_TEST
+ depends on COMMON_CLK
select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M if you want to add support for codecs attached to
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index 54511c5e6a7..b56b8a0e8de 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -31,7 +31,6 @@
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
-#include <asm/mach-types.h>
#include "mxs-saif.h"
diff --git a/sound/soc/mxs/mxs-sgtl5000.c b/sound/soc/mxs/mxs-sgtl5000.c
index 1b134d72f12..4bb273786ff 100644
--- a/sound/soc/mxs/mxs-sgtl5000.c
+++ b/sound/soc/mxs/mxs-sgtl5000.c
@@ -25,7 +25,6 @@
#include <sound/soc.h>
#include <sound/jack.h>
#include <sound/soc-dapm.h>
-#include <asm/mach-types.h>
#include "../codecs/sgtl5000.h"
#include "mxs-saif.h"
@@ -51,18 +50,27 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
}
/* Sgtl5000 sysclk should be >= 8MHz and <= 27M */
- if (mclk < 8000000 || mclk > 27000000)
+ if (mclk < 8000000 || mclk > 27000000) {
+ dev_err(codec_dai->dev, "Invalid mclk frequency: %u.%03uMHz\n",
+ mclk / 1000000, mclk / 1000 % 1000);
return -EINVAL;
+ }
/* Set SGTL5000's SYSCLK (provided by SAIF MCLK) */
ret = snd_soc_dai_set_sysclk(codec_dai, SGTL5000_SYSCLK, mclk, 0);
- if (ret)
+ if (ret) {
+ dev_err(codec_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
+ mclk / 1000000, mclk / 1000 % 1000);
return ret;
+ }
/* The SAIF MCLK should be the same as SGTL5000_SYSCLK */
ret = snd_soc_dai_set_sysclk(cpu_dai, MXS_SAIF_MCLK, mclk, 0);
- if (ret)
+ if (ret) {
+ dev_err(cpu_dai->dev, "Failed to set sysclk to %u.%03uMHz\n",
+ mclk / 1000000, mclk / 1000 % 1000);
return ret;
+ }
/* set codec to slave mode */
dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
@@ -70,13 +78,19 @@ static int mxs_sgtl5000_hw_params(struct snd_pcm_substream *substream,
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, dai_format);
- if (ret)
+ if (ret) {
+ dev_err(codec_dai->dev, "Failed to set dai format to %08x\n",
+ dai_format);
return ret;
+ }
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, dai_format);
- if (ret)
+ if (ret) {
+ dev_err(cpu_dai->dev, "Failed to set dai format to %08x\n",
+ dai_format);
return ret;
+ }
return 0;
}
@@ -91,11 +105,13 @@ static struct snd_soc_dai_link mxs_sgtl5000_dai[] = {
.stream_name = "HiFi Playback",
.codec_dai_name = "sgtl5000",
.ops = &mxs_sgtl5000_hifi_ops,
+ .playback_only = true,
}, {
.name = "HiFi Rx",
.stream_name = "HiFi Capture",
.codec_dai_name = "sgtl5000",
.ops = &mxs_sgtl5000_hifi_ops,
+ .capture_only = true,
},
};
@@ -154,8 +170,10 @@ static int mxs_sgtl5000_probe(struct platform_device *pdev)
* should be >= 8MHz and <= 27M.
*/
ret = mxs_saif_get_mclk(0, 44100 * 256, 44100);
- if (ret)
+ if (ret) {
+ dev_err(&pdev->dev, "failed to get mclk\n");
return ret;
+ }
card->dev = &pdev->dev;
platform_set_drvdata(pdev, card);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index f4c2417a873..8987bf987e5 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -333,9 +333,6 @@ static int nuc900_ac97_drvprobe(struct platform_device *pdev)
spin_lock_init(&nuc900_audio->lock);
nuc900_audio->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!nuc900_audio->res)
- return ret;
-
nuc900_audio->mmio = devm_ioremap_resource(&pdev->dev,
nuc900_audio->res);
if (IS_ERR(nuc900_audio->mmio))
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 9f5d55e6b17..daa78a0095f 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -1,7 +1,7 @@
config SND_OMAP_SOC
tristate "SoC Audio for the Texas Instruments OMAP chips"
- depends on ARCH_OMAP && DMA_OMAP
- select SND_SOC_DMAENGINE_PCM
+ depends on (ARCH_OMAP && DMA_OMAP) || (ARCH_ARM && COMPILE_TEST)
+ select SND_DMAENGINE_PCM
config SND_OMAP_SOC_DMIC
tristate
@@ -26,7 +26,7 @@ config SND_OMAP_SOC_N810
config SND_OMAP_SOC_RX51
tristate "SoC Audio support for Nokia RX-51"
- depends on SND_OMAP_SOC && MACH_NOKIA_RX51
+ depends on SND_OMAP_SOC && ARCH_ARM && (MACH_NOKIA_RX51 || COMPILE_TEST)
select SND_OMAP_SOC_MCBSP
select SND_SOC_TLV320AIC3X
select SND_SOC_TPA6130A2
@@ -87,7 +87,7 @@ config SND_OMAP_SOC_OMAP_TWL4030
config SND_OMAP_SOC_OMAP_ABE_TWL6040
tristate "SoC Audio support for OMAP boards using ABE and twl6040 codec"
- depends on TWL6040_CORE && SND_OMAP_SOC && ARCH_OMAP4
+ depends on TWL6040_CORE && SND_OMAP_SOC && (ARCH_OMAP4 || COMPILE_TEST)
select SND_OMAP_SOC_DMIC
select SND_OMAP_SOC_MCPDM
select SND_SOC_TWL6040
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index 361e4c03646..83433fdea32 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -781,7 +781,7 @@ static ssize_t prop##_store(struct device *dev, \
unsigned long val; \
int status; \
\
- status = strict_strtoul(buf, 0, &val); \
+ status = kstrtoul(buf, 0, &val); \
if (status) \
return status; \
\
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 70cd5c7b2e1..ebb13906b3a 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -23,7 +23,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <linux/mfd/twl6040.h>
-#include <linux/platform_data/omap-abe-twl6040.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -166,19 +165,10 @@ static const struct snd_soc_dapm_route audio_map[] = {
{"AFMR", NULL, "Line In"},
};
-static inline void twl6040_disconnect_pin(struct snd_soc_dapm_context *dapm,
- int connected, char *pin)
-{
- if (!connected)
- snd_soc_dapm_disable_pin(dapm, pin);
-}
-
static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_card *card = codec->card;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
int hs_trim;
int ret = 0;
@@ -203,24 +193,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
twl6040_hs_jack_detect(codec, &hs_jack, SND_JACK_HEADSET);
}
- /*
- * NULL pdata means we booted with DT. In this case the routing is
- * provided and the card is fully routed, no need to mark pins.
- */
- if (!pdata)
- return ret;
-
- /* Disable not connected paths if not used */
- twl6040_disconnect_pin(dapm, pdata->has_hs, "Headset Stereophone");
- twl6040_disconnect_pin(dapm, pdata->has_hf, "Ext Spk");
- twl6040_disconnect_pin(dapm, pdata->has_ep, "Earphone Spk");
- twl6040_disconnect_pin(dapm, pdata->has_aux, "Line Out");
- twl6040_disconnect_pin(dapm, pdata->has_vibra, "Vibrator");
- twl6040_disconnect_pin(dapm, pdata->has_hsmic, "Headset Mic");
- twl6040_disconnect_pin(dapm, pdata->has_mainmic, "Main Handset Mic");
- twl6040_disconnect_pin(dapm, pdata->has_submic, "Sub Handset Mic");
- twl6040_disconnect_pin(dapm, pdata->has_afm, "Line In");
-
return ret;
}
@@ -274,13 +246,18 @@ static struct snd_soc_card omap_abe_card = {
static int omap_abe_probe(struct platform_device *pdev)
{
- struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
struct device_node *node = pdev->dev.of_node;
struct snd_soc_card *card = &omap_abe_card;
+ struct device_node *dai_node;
struct abe_twl6040 *priv;
int num_links = 0;
int ret = 0;
+ if (!node) {
+ dev_err(&pdev->dev, "of node is missing.\n");
+ return -ENODEV;
+ }
+
card->dev = &pdev->dev;
priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
@@ -289,78 +266,50 @@ static int omap_abe_probe(struct platform_device *pdev)
priv->dmic_codec_dev = ERR_PTR(-EINVAL);
- if (node) {
- struct device_node *dai_node;
-
- if (snd_soc_of_parse_card_name(card, "ti,model")) {
- dev_err(&pdev->dev, "Card name is not provided\n");
- return -ENODEV;
- }
+ if (snd_soc_of_parse_card_name(card, "ti,model")) {
+ dev_err(&pdev->dev, "Card name is not provided\n");
+ return -ENODEV;
+ }
- ret = snd_soc_of_parse_audio_routing(card,
- "ti,audio-routing");
- if (ret) {
- dev_err(&pdev->dev,
- "Error while parsing DAPM routing\n");
- return ret;
- }
+ ret = snd_soc_of_parse_audio_routing(card, "ti,audio-routing");
+ if (ret) {
+ dev_err(&pdev->dev, "Error while parsing DAPM routing\n");
+ return ret;
+ }
- dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
- if (!dai_node) {
- dev_err(&pdev->dev, "McPDM node is not provided\n");
- return -EINVAL;
- }
- abe_twl6040_dai_links[0].cpu_dai_name = NULL;
- abe_twl6040_dai_links[0].cpu_of_node = dai_node;
+ dai_node = of_parse_phandle(node, "ti,mcpdm", 0);
+ if (!dai_node) {
+ dev_err(&pdev->dev, "McPDM node is not provided\n");
+ return -EINVAL;
+ }
+ abe_twl6040_dai_links[0].cpu_dai_name = NULL;
+ abe_twl6040_dai_links[0].cpu_of_node = dai_node;
- dai_node = of_parse_phandle(node, "ti,dmic", 0);
- if (dai_node) {
- num_links = 2;
- abe_twl6040_dai_links[1].cpu_dai_name = NULL;
- abe_twl6040_dai_links[1].cpu_of_node = dai_node;
+ dai_node = of_parse_phandle(node, "ti,dmic", 0);
+ if (dai_node) {
+ num_links = 2;
+ abe_twl6040_dai_links[1].cpu_dai_name = NULL;
+ abe_twl6040_dai_links[1].cpu_of_node = dai_node;
- priv->dmic_codec_dev = platform_device_register_simple(
+ priv->dmic_codec_dev = platform_device_register_simple(
"dmic-codec", -1, NULL, 0);
- if (IS_ERR(priv->dmic_codec_dev)) {
- dev_err(&pdev->dev,
- "Can't instantiate dmic-codec\n");
- return PTR_ERR(priv->dmic_codec_dev);
- }
- } else {
- num_links = 1;
- }
-
- priv->jack_detection = of_property_read_bool(node,
- "ti,jack-detection");
- of_property_read_u32(node, "ti,mclk-freq",
- &priv->mclk_freq);
- if (!priv->mclk_freq) {
- dev_err(&pdev->dev, "MCLK frequency not provided\n");
- ret = -EINVAL;
- goto err_unregister;
+ if (IS_ERR(priv->dmic_codec_dev)) {
+ dev_err(&pdev->dev, "Can't instantiate dmic-codec\n");
+ return PTR_ERR(priv->dmic_codec_dev);
}
-
- omap_abe_card.fully_routed = 1;
- } else if (pdata) {
- if (pdata->card_name) {
- card->name = pdata->card_name;
- } else {
- dev_err(&pdev->dev, "Card name is not provided\n");
- return -ENODEV;
- }
-
- if (pdata->has_dmic)
- num_links = 2;
- else
- num_links = 1;
-
- priv->jack_detection = pdata->jack_detection;
- priv->mclk_freq = pdata->mclk_freq;
} else {
- dev_err(&pdev->dev, "Missing pdata\n");
- return -ENODEV;
+ num_links = 1;
+ }
+
+ priv->jack_detection = of_property_read_bool(node, "ti,jack-detection");
+ of_property_read_u32(node, "ti,mclk-freq", &priv->mclk_freq);
+ if (!priv->mclk_freq) {
+ dev_err(&pdev->dev, "MCLK frequency not provided\n");
+ ret = -EINVAL;
+ goto err_unregister;
}
+ card->fully_routed = 1;
if (!priv->mclk_freq) {
dev_err(&pdev->dev, "MCLK frequency missing\n");
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 4db1f8e6e17..12e566be379 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -480,15 +480,12 @@ static int asoc_dmic_probe(struct platform_device *pdev)
dmic->dma_data.filter_data = "up_link";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
- if (!res) {
- dev_err(dmic->dev, "invalid memory resource\n");
- ret = -ENODEV;
+ dmic->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dmic->io_base)) {
+ ret = PTR_ERR(dmic->io_base);
goto err_put_clk;
}
- dmic->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dmic->io_base))
- return PTR_ERR(dmic->io_base);
ret = snd_soc_register_component(&pdev->dev, &omap_dmic_component,
&omap_dmic_dai, 1);
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 7483efb6dc6..6c19bba2357 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -433,6 +433,11 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
/* Sample rate generator drives the FS */
regs->srgr2 |= FSGM;
break;
+ case SND_SOC_DAIFMT_CBM_CFS:
+ /* McBSP slave. FS clock as output */
+ regs->srgr2 |= FSGM;
+ regs->pcr0 |= FSXM;
+ break;
case SND_SOC_DAIFMT_CBM_CFM:
/* McBSP slave */
break;
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index a49dc52f8ab..90d2a7cd256 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -480,9 +480,6 @@ static int asoc_mcpdm_probe(struct platform_device *pdev)
mcpdm->dma_data[1].filter_data = "up_link";
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mpu");
- if (res == NULL)
- return -ENOMEM;
-
mcpdm->io_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mcpdm->io_base))
return PTR_ERR(mcpdm->io_base);
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index b3580946754..4db74a083db 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -11,7 +11,7 @@ config SND_PXA2XX_SOC
config SND_MMP_SOC
bool "Soc Audio for Marvell MMP chips"
depends on ARCH_MMP
- select SND_SOC_DMAENGINE_PCM
+ select SND_DMAENGINE_PCM
select SND_ARM
help
Say Y if you want to add support for codecs attached to
diff --git a/sound/soc/pxa/brownstone.c b/sound/soc/pxa/brownstone.c
index 4ad76099dd4..5b7d969f89a 100644
--- a/sound/soc/pxa/brownstone.c
+++ b/sound/soc/pxa/brownstone.c
@@ -129,6 +129,7 @@ static struct snd_soc_dai_link brownstone_wm8994_dai[] = {
/* audio machine driver */
static struct snd_soc_card brownstone = {
.name = "brownstone",
+ .owner = THIS_MODULE,
.dai_link = brownstone_wm8994_dai,
.num_links = ARRAY_SIZE(brownstone_wm8994_dai),
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 97b711e1282..bbea7780eac 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -56,8 +56,6 @@
#include "pxa2xx-ac97.h"
#include "../codecs/wm9713.h"
-#define ARRAY_AND_SIZE(x) (x), ARRAY_SIZE(x)
-
#define AC97_GPIO_PULL 0x58
/* Use GPIO8 for rear speaker amplifier */
@@ -133,10 +131,11 @@ static int mioa701_wm9713_init(struct snd_soc_pcm_runtime *rtd)
unsigned short reg;
/* Add mioa701 specific widgets */
- snd_soc_dapm_new_controls(dapm, ARRAY_AND_SIZE(mioa701_dapm_widgets));
+ snd_soc_dapm_new_controls(dapm, mioa701_dapm_widgets,
+ ARRAY_SIZE(mioa701_dapm_widgets));
/* Set up mioa701 specific audio path audio_mapnects */
- snd_soc_dapm_add_routes(dapm, ARRAY_AND_SIZE(audio_map));
+ snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
/* Prepare GPIO8 for rear speaker amplifier */
reg = codec->driver->read(codec, AC97_GPIO_CFG);
diff --git a/sound/soc/pxa/mmp-pcm.c b/sound/soc/pxa/mmp-pcm.c
index 5d57e071cdf..8235e231d89 100644
--- a/sound/soc/pxa/mmp-pcm.c
+++ b/sound/soc/pxa/mmp-pcm.c
@@ -17,6 +17,7 @@
#include <linux/dmaengine.h>
#include <linux/platform_data/dma-mmp_tdma.h>
#include <linux/platform_data/mmp_audio.h>
+
#include <sound/pxa2xx-lib.h>
#include <sound/core.h>
#include <sound/pcm.h>
@@ -67,7 +68,7 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
{
struct dma_chan *chan = snd_dmaengine_pcm_get_chan(substream);
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct pxa2xx_pcm_dma_params *dma_params;
+ struct snd_dmaengine_dai_dma_data *dma_params;
struct dma_slave_config slave_config;
int ret;
@@ -80,10 +81,10 @@ static int mmp_pcm_hw_params(struct snd_pcm_substream *substream,
return ret;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- slave_config.dst_addr = dma_params->dev_addr;
+ slave_config.dst_addr = dma_params->addr;
slave_config.dst_maxburst = 4;
} else {
- slave_config.src_addr = dma_params->dev_addr;
+ slave_config.src_addr = dma_params->addr;
slave_config.src_maxburst = 4;
}
diff --git a/sound/soc/pxa/mmp-sspa.c b/sound/soc/pxa/mmp-sspa.c
index 62142ce367c..41752a5fe3b 100644
--- a/sound/soc/pxa/mmp-sspa.c
+++ b/sound/soc/pxa/mmp-sspa.c
@@ -27,12 +27,15 @@
#include <linux/slab.h>
#include <linux/pxa2xx_ssp.h>
#include <linux/io.h>
+#include <linux/dmaengine.h>
+
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/initval.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include "mmp-sspa.h"
/*
@@ -40,7 +43,7 @@
*/
struct sspa_priv {
struct ssp_device *sspa;
- struct pxa2xx_pcm_dma_params *dma_params;
+ struct snd_dmaengine_dai_dma_data *dma_params;
struct clk *audio_clk;
struct clk *sysclk;
int dai_fmt;
@@ -266,7 +269,7 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct sspa_priv *sspa_priv = snd_soc_dai_get_drvdata(dai);
struct ssp_device *sspa = sspa_priv->sspa;
- struct pxa2xx_pcm_dma_params *dma_params;
+ struct snd_dmaengine_dai_dma_data *dma_params;
u32 sspa_ctrl;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
@@ -309,7 +312,7 @@ static int mmp_sspa_hw_params(struct snd_pcm_substream *substream,
}
dma_params = &sspa_priv->dma_params[substream->stream];
- dma_params->dev_addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ dma_params->addr = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
(sspa->phys_base + SSPA_TXD) :
(sspa->phys_base + SSPA_RXD);
snd_soc_dai_set_dma_data(cpu_dai, substream, dma_params);
@@ -425,14 +428,12 @@ static int asoc_mmp_sspa_probe(struct platform_device *pdev)
return -ENOMEM;
priv->dma_params = devm_kzalloc(&pdev->dev,
- 2 * sizeof(struct pxa2xx_pcm_dma_params), GFP_KERNEL);
+ 2 * sizeof(struct snd_dmaengine_dai_dma_data),
+ GFP_KERNEL);
if (priv->dma_params == NULL)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL)
- return -ENOMEM;
-
priv->sspa->mmio_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->sspa->mmio_base))
return PTR_ERR(priv->sspa->mmio_base);
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 6f4dd7543e8..a3119a00d8f 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -21,6 +21,8 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/pxa2xx_ssp.h>
+#include <linux/of.h>
+#include <linux/dmaengine.h>
#include <asm/irq.h>
@@ -30,9 +32,9 @@
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include <mach/hardware.h>
-#include <mach/dma.h>
#include "../../arm/pxa2xx-pcm.h"
#include "pxa-ssp.h"
@@ -79,27 +81,13 @@ static void pxa_ssp_disable(struct ssp_device *ssp)
__raw_writel(sscr0, ssp->mmio_base + SSCR0);
}
-struct pxa2xx_pcm_dma_data {
- struct pxa2xx_pcm_dma_params params;
- char name[20];
-};
-
static void pxa_ssp_set_dma_params(struct ssp_device *ssp, int width4,
- int out, struct pxa2xx_pcm_dma_params *dma_data)
+ int out, struct snd_dmaengine_dai_dma_data *dma)
{
- struct pxa2xx_pcm_dma_data *dma;
-
- dma = container_of(dma_data, struct pxa2xx_pcm_dma_data, params);
-
- snprintf(dma->name, 20, "SSP%d PCM %s %s", ssp->port_id,
- width4 ? "32-bit" : "16-bit", out ? "out" : "in");
-
- dma->params.name = dma->name;
- dma->params.drcmr = &DRCMR(out ? ssp->drcmr_tx : ssp->drcmr_rx);
- dma->params.dcmd = (out ? (DCMD_INCSRCADDR | DCMD_FLOWTRG) :
- (DCMD_INCTRGADDR | DCMD_FLOWSRC)) |
- (width4 ? DCMD_WIDTH4 : DCMD_WIDTH2) | DCMD_BURST16;
- dma->params.dev_addr = ssp->phys_base + SSDR;
+ dma->addr_width = width4 ? DMA_SLAVE_BUSWIDTH_4_BYTES :
+ DMA_SLAVE_BUSWIDTH_2_BYTES;
+ dma->maxburst = 16;
+ dma->addr = ssp->phys_base + SSDR;
}
static int pxa_ssp_startup(struct snd_pcm_substream *substream,
@@ -107,7 +95,7 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
{
struct ssp_priv *priv = snd_soc_dai_get_drvdata(cpu_dai);
struct ssp_device *ssp = priv->ssp;
- struct pxa2xx_pcm_dma_data *dma;
+ struct snd_dmaengine_dai_dma_data *dma;
int ret = 0;
if (!cpu_dai->active) {
@@ -115,10 +103,14 @@ static int pxa_ssp_startup(struct snd_pcm_substream *substream,
pxa_ssp_disable(ssp);
}
- dma = kzalloc(sizeof(struct pxa2xx_pcm_dma_data), GFP_KERNEL);
+ dma = kzalloc(sizeof(struct snd_dmaengine_dai_dma_data), GFP_KERNEL);
if (!dma)
return -ENOMEM;
- snd_soc_dai_set_dma_data(cpu_dai, substream, &dma->params);
+
+ dma->filter_data = substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
+ &ssp->drcmr_tx : &ssp->drcmr_rx;
+
+ snd_soc_dai_set_dma_data(cpu_dai, substream, dma);
return ret;
}
@@ -559,7 +551,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
u32 sspsp;
int width = snd_pcm_format_physical_width(params_format(params));
int ttsa = pxa_ssp_read_reg(ssp, SSTSA) & 0xf;
- struct pxa2xx_pcm_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
dma_data = snd_soc_dai_get_dma_data(cpu_dai, substream);
@@ -719,6 +711,7 @@ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd,
static int pxa_ssp_probe(struct snd_soc_dai *dai)
{
+ struct device *dev = dai->dev;
struct ssp_priv *priv;
int ret;
@@ -726,10 +719,26 @@ static int pxa_ssp_probe(struct snd_soc_dai *dai)
if (!priv)
return -ENOMEM;
- priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
- if (priv->ssp == NULL) {
- ret = -ENODEV;
- goto err_priv;
+ if (dev->of_node) {
+ struct device_node *ssp_handle;
+
+ ssp_handle = of_parse_phandle(dev->of_node, "port", 0);
+ if (!ssp_handle) {
+ dev_err(dev, "unable to get 'port' phandle\n");
+ return -ENODEV;
+ }
+
+ priv->ssp = pxa_ssp_request_of(ssp_handle, "SoC audio");
+ if (priv->ssp == NULL) {
+ ret = -ENODEV;
+ goto err_priv;
+ }
+ } else {
+ priv->ssp = pxa_ssp_request(dai->id + 1, "SoC audio");
+ if (priv->ssp == NULL) {
+ ret = -ENODEV;
+ goto err_priv;
+ }
}
priv->dai_fmt = (unsigned int) -1;
@@ -798,6 +807,12 @@ static const struct snd_soc_component_driver pxa_ssp_component = {
.name = "pxa-ssp",
};
+#ifdef CONFIG_OF
+static const struct of_device_id pxa_ssp_of_ids[] = {
+ { .compatible = "mrvl,pxa-ssp-dai" },
+};
+#endif
+
static int asoc_ssp_probe(struct platform_device *pdev)
{
return snd_soc_register_component(&pdev->dev, &pxa_ssp_component,
@@ -812,8 +827,9 @@ static int asoc_ssp_remove(struct platform_device *pdev)
static struct platform_driver asoc_ssp_driver = {
.driver = {
- .name = "pxa-ssp-dai",
- .owner = THIS_MODULE,
+ .name = "pxa-ssp-dai",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(pxa_ssp_of_ids),
},
.probe = asoc_ssp_probe,
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index 1475515712e..f1059d999de 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -14,15 +14,16 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
#include <sound/core.h>
#include <sound/ac97_codec.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include <mach/hardware.h>
#include <mach/regs-ac97.h>
-#include <mach/dma.h>
#include <mach/audio.h>
#include "pxa2xx-ac97.h"
@@ -48,44 +49,44 @@ static struct snd_ac97_bus_ops pxa2xx_ac97_ops = {
.reset = pxa2xx_ac97_cold_reset,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_stereo_out = {
- .name = "AC97 PCM Stereo out",
- .dev_addr = __PREG(PCDR),
- .drcmr = &DRCMR(12),
- .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_stereo_in_req = 12;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_in = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_ac97_pcm_stereo_in_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_stereo_in = {
- .name = "AC97 PCM Stereo in",
- .dev_addr = __PREG(PCDR),
- .drcmr = &DRCMR(11),
- .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_ac97_pcm_stereo_out_req = 11;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_stereo_out = {
+ .addr = __PREG(PCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_ac97_pcm_stereo_out_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_aux_mono_out = {
- .name = "AC97 Aux PCM (Slot 5) Mono out",
- .dev_addr = __PREG(MODR),
- .drcmr = &DRCMR(10),
- .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
- DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mono_out_req = 10;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_out = {
+ .addr = __PREG(MODR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .maxburst = 16,
+ .filter_data = &pxa2xx_ac97_pcm_aux_mono_out_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_aux_mono_in = {
- .name = "AC97 Aux PCM (Slot 5) Mono in",
- .dev_addr = __PREG(MODR),
- .drcmr = &DRCMR(9),
- .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
- DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mono_in_req = 9;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_aux_mono_in = {
+ .addr = __PREG(MODR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .maxburst = 16,
+ .filter_data = &pxa2xx_ac97_pcm_aux_mono_in_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_ac97_pcm_mic_mono_in = {
- .name = "AC97 Mic PCM (Slot 6) Mono in",
- .dev_addr = __PREG(MCDR),
- .drcmr = &DRCMR(8),
- .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
- DCMD_BURST16 | DCMD_WIDTH2,
+static unsigned long pxa2xx_ac97_pcm_aux_mic_mono_req = 8;
+static struct snd_dmaengine_dai_dma_data pxa2xx_ac97_pcm_mic_mono_in = {
+ .addr = __PREG(MCDR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .maxburst = 16,
+ .filter_data = &pxa2xx_ac97_pcm_aux_mic_mono_req,
};
#ifdef CONFIG_PM
@@ -119,7 +120,7 @@ static int pxa2xx_ac97_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
- struct pxa2xx_pcm_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &pxa2xx_ac97_pcm_stereo_out;
@@ -135,7 +136,7 @@ static int pxa2xx_ac97_hw_aux_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *cpu_dai)
{
- struct pxa2xx_pcm_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
dma_data = &pxa2xx_ac97_pcm_aux_mono_out;
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index f7ca7166411..d5340a08885 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -23,9 +23,9 @@
#include <sound/initval.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include <mach/hardware.h>
-#include <mach/dma.h>
#include <mach/audio.h>
#include "pxa2xx-i2s.h"
@@ -82,20 +82,20 @@ static struct pxa_i2s_port pxa_i2s;
static struct clk *clk_i2s;
static int clk_ena = 0;
-static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_out = {
- .name = "I2S PCM Stereo out",
- .dev_addr = __PREG(SADR),
- .drcmr = &DRCMR(3),
- .dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_i2s_pcm_stereo_out_req = 3;
+static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_out = {
+ .addr = __PREG(SADR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_i2s_pcm_stereo_out_req,
};
-static struct pxa2xx_pcm_dma_params pxa2xx_i2s_pcm_stereo_in = {
- .name = "I2S PCM Stereo in",
- .dev_addr = __PREG(SADR),
- .drcmr = &DRCMR(2),
- .dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC |
- DCMD_BURST32 | DCMD_WIDTH4,
+static unsigned long pxa2xx_i2s_pcm_stereo_in_req = 2;
+static struct snd_dmaengine_dai_dma_data pxa2xx_i2s_pcm_stereo_in = {
+ .addr = __PREG(SADR),
+ .addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
+ .maxburst = 32,
+ .filter_data = &pxa2xx_i2s_pcm_stereo_in_req,
};
static int pxa2xx_i2s_startup(struct snd_pcm_substream *substream,
@@ -163,7 +163,7 @@ static int pxa2xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- struct pxa2xx_pcm_dma_params *dma_data;
+ struct snd_dmaengine_dai_dma_data *dma_data;
BUG_ON(IS_ERR(clk_i2s));
clk_prepare_enable(clk_i2s);
diff --git a/sound/soc/pxa/pxa2xx-pcm.c b/sound/soc/pxa/pxa2xx-pcm.c
index ecff116cb7b..806da27b8b6 100644
--- a/sound/soc/pxa/pxa2xx-pcm.c
+++ b/sound/soc/pxa/pxa2xx-pcm.c
@@ -12,10 +12,13 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/of.h>
#include <sound/core.h>
#include <sound/soc.h>
#include <sound/pxa2xx-lib.h>
+#include <sound/dmaengine_pcm.h>
#include "../../arm/pxa2xx-pcm.h"
@@ -25,7 +28,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_runtime *runtime = substream->runtime;
struct pxa2xx_runtime_data *prtd = runtime->private_data;
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct pxa2xx_pcm_dma_params *dma;
+ struct snd_dmaengine_dai_dma_data *dma;
int ret;
dma = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
@@ -39,7 +42,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
* with different params */
if (prtd->params == NULL) {
prtd->params = dma;
- ret = pxa_request_dma(prtd->params->name, DMA_PRIO_LOW,
+ ret = pxa_request_dma("name", DMA_PRIO_LOW,
pxa2xx_pcm_dma_irq, substream);
if (ret < 0)
return ret;
@@ -47,7 +50,7 @@ static int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
} else if (prtd->params != dma) {
pxa_free_dma(prtd->dma_ch);
prtd->params = dma;
- ret = pxa_request_dma(prtd->params->name, DMA_PRIO_LOW,
+ ret = pxa_request_dma("name", DMA_PRIO_LOW,
pxa2xx_pcm_dma_irq, substream);
if (ret < 0)
return ret;
@@ -131,10 +134,18 @@ static int pxa2xx_soc_platform_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id snd_soc_pxa_audio_match[] = {
+ { .compatible = "mrvl,pxa-pcm-audio" },
+ { }
+};
+#endif
+
static struct platform_driver pxa_pcm_driver = {
.driver = {
- .name = "pxa-pcm-audio",
- .owner = THIS_MODULE,
+ .name = "pxa-pcm-audio",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(snd_soc_pxa_audio_match),
},
.probe = pxa2xx_soc_platform_probe,
diff --git a/sound/soc/pxa/ttc-dkb.c b/sound/soc/pxa/ttc-dkb.c
index f4ea4f6663a..13c9ee0cb83 100644
--- a/sound/soc/pxa/ttc-dkb.c
+++ b/sound/soc/pxa/ttc-dkb.c
@@ -122,6 +122,7 @@ static struct snd_soc_dai_link ttc_pm860x_hifi_dai[] = {
/* ttc/td audio machine driver */
static struct snd_soc_card ttc_dkb_card = {
.name = "ttc-dkb-hifi",
+ .owner = THIS_MODULE,
.dai_link = ttc_pm860x_hifi_dai,
.num_links = ARRAY_SIZE(ttc_pm860x_hifi_dai),
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index 58cfb1eb7dd..945e8abdc10 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -192,7 +192,7 @@ static struct snd_soc_card snd_soc_card_s6105 = {
.num_links = 1,
};
-static struct s6000_snd_platform_data __initdata s6105_snd_data = {
+static struct s6000_snd_platform_data s6105_snd_data __initdata = {
.wide = 0,
.channel_in = 0,
.channel_out = 1,
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index 2dd623fa388..2acf987844e 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -404,18 +404,13 @@ static int s3c_ac97_probe(struct platform_device *pdev)
return -ENXIO;
}
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem_res) {
- dev_err(&pdev->dev, "Unable to get register resource\n");
- return -ENXIO;
- }
-
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!irq_res) {
dev_err(&pdev->dev, "AC97 IRQ not provided!\n");
return -ENXIO;
}
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
s3c_ac97.regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(s3c_ac97.regs))
return PTR_ERR(s3c_ac97.regs);
@@ -462,7 +457,7 @@ static int s3c_ac97_probe(struct platform_device *pdev)
if (ret)
goto err5;
- ret = asoc_dma_platform_register(&pdev->dev);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err6;
@@ -485,7 +480,7 @@ static int s3c_ac97_remove(struct platform_device *pdev)
{
struct resource *irq_res;
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/sound/soc/samsung/dma.c b/sound/soc/samsung/dma.c
index 21b79262010..9338d11e921 100644
--- a/sound/soc/samsung/dma.c
+++ b/sound/soc/samsung/dma.c
@@ -90,6 +90,13 @@ static void dma_enqueue(struct snd_pcm_substream *substream)
dma_info.period = prtd->dma_period;
dma_info.len = prtd->dma_period*limit;
+ if (dma_info.cap == DMA_CYCLIC) {
+ dma_info.buf = pos;
+ prtd->params->ops->prepare(prtd->params->ch, &dma_info);
+ prtd->dma_loaded += limit;
+ return;
+ }
+
while (prtd->dma_loaded < limit) {
pr_debug("dma_loaded: %d\n", prtd->dma_loaded);
@@ -176,6 +183,10 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
prtd->params->ch = prtd->params->ops->request(
prtd->params->channel, &req, rtd->cpu_dai->dev,
prtd->params->ch_name);
+ if (!prtd->params->ch) {
+ pr_err("Failed to allocate DMA channel\n");
+ return -ENXIO;
+ }
prtd->params->ops->config(prtd->params->ch, &config);
}
@@ -433,17 +444,17 @@ static struct snd_soc_platform_driver samsung_asoc_platform = {
.pcm_free = dma_free_dma_buffers,
};
-int asoc_dma_platform_register(struct device *dev)
+int samsung_asoc_dma_platform_register(struct device *dev)
{
return snd_soc_register_platform(dev, &samsung_asoc_platform);
}
-EXPORT_SYMBOL_GPL(asoc_dma_platform_register);
+EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_register);
-void asoc_dma_platform_unregister(struct device *dev)
+void samsung_asoc_dma_platform_unregister(struct device *dev)
{
snd_soc_unregister_platform(dev);
}
-EXPORT_SYMBOL_GPL(asoc_dma_platform_unregister);
+EXPORT_SYMBOL_GPL(samsung_asoc_dma_platform_unregister);
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung ASoC DMA Driver");
diff --git a/sound/soc/samsung/dma.h b/sound/soc/samsung/dma.h
index 189a7a6d502..0e86315a3ea 100644
--- a/sound/soc/samsung/dma.h
+++ b/sound/soc/samsung/dma.h
@@ -22,7 +22,7 @@ struct s3c_dma_params {
char *ch_name;
};
-int asoc_dma_platform_register(struct device *dev);
-void asoc_dma_platform_unregister(struct device *dev);
+int samsung_asoc_dma_platform_register(struct device *dev);
+void samsung_asoc_dma_platform_unregister(struct device *dev);
#endif
diff --git a/sound/soc/samsung/i2s-regs.h b/sound/soc/samsung/i2s-regs.h
index c0e6d9a19ef..821a5023100 100644
--- a/sound/soc/samsung/i2s-regs.h
+++ b/sound/soc/samsung/i2s-regs.h
@@ -31,6 +31,10 @@
#define I2SLVL1ADDR 0x34
#define I2SLVL2ADDR 0x38
#define I2SLVL3ADDR 0x3c
+#define I2SSTR1 0x40
+#define I2SVER 0x44
+#define I2SFIC2 0x48
+#define I2STDM 0x4c
#define CON_RSTCLR (1 << 31)
#define CON_FRXOFSTATUS (1 << 26)
@@ -95,24 +99,39 @@
#define MOD_RXONLY (1 << 8)
#define MOD_TXRX (2 << 8)
#define MOD_MASK (3 << 8)
-#define MOD_LR_LLOW (0 << 7)
-#define MOD_LR_RLOW (1 << 7)
-#define MOD_SDF_IIS (0 << 5)
-#define MOD_SDF_MSB (1 << 5)
-#define MOD_SDF_LSB (2 << 5)
-#define MOD_SDF_MASK (3 << 5)
-#define MOD_RCLK_256FS (0 << 3)
-#define MOD_RCLK_512FS (1 << 3)
-#define MOD_RCLK_384FS (2 << 3)
-#define MOD_RCLK_768FS (3 << 3)
-#define MOD_RCLK_MASK (3 << 3)
-#define MOD_BCLK_32FS (0 << 1)
-#define MOD_BCLK_48FS (1 << 1)
-#define MOD_BCLK_16FS (2 << 1)
-#define MOD_BCLK_24FS (3 << 1)
-#define MOD_BCLK_MASK (3 << 1)
+#define MOD_LRP_SHIFT 7
+#define MOD_LR_LLOW 0
+#define MOD_LR_RLOW 1
+#define MOD_SDF_SHIFT 5
+#define MOD_SDF_IIS 0
+#define MOD_SDF_MSB 1
+#define MOD_SDF_LSB 2
+#define MOD_SDF_MASK 3
+#define MOD_RCLK_SHIFT 3
+#define MOD_RCLK_256FS 0
+#define MOD_RCLK_512FS 1
+#define MOD_RCLK_384FS 2
+#define MOD_RCLK_768FS 3
+#define MOD_RCLK_MASK 3
+#define MOD_BCLK_SHIFT 1
+#define MOD_BCLK_32FS 0
+#define MOD_BCLK_48FS 1
+#define MOD_BCLK_16FS 2
+#define MOD_BCLK_24FS 3
+#define MOD_BCLK_MASK 3
#define MOD_8BIT (1 << 0)
+#define EXYNOS5420_MOD_LRP_SHIFT 15
+#define EXYNOS5420_MOD_SDF_SHIFT 6
+#define EXYNOS5420_MOD_RCLK_SHIFT 4
+#define EXYNOS5420_MOD_BCLK_SHIFT 0
+#define EXYNOS5420_MOD_BCLK_64FS 4
+#define EXYNOS5420_MOD_BCLK_96FS 5
+#define EXYNOS5420_MOD_BCLK_128FS 6
+#define EXYNOS5420_MOD_BCLK_192FS 7
+#define EXYNOS5420_MOD_BCLK_256FS 8
+#define EXYNOS5420_MOD_BCLK_MASK 0xf
+
#define MOD_CDCLKCON (1 << 12)
#define PSR_PSREN (1 << 15)
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index 959c702235c..b302f3b7a58 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -40,6 +40,7 @@ enum samsung_dai_type {
struct samsung_i2s_dai_data {
int dai_type;
+ u32 quirks;
};
struct i2s_dai {
@@ -198,7 +199,13 @@ static inline bool is_manager(struct i2s_dai *i2s)
/* Read RCLK of I2S (in multiples of LRCLK) */
static inline unsigned get_rfs(struct i2s_dai *i2s)
{
- u32 rfs = (readl(i2s->addr + I2SMOD) >> 3) & 0x3;
+ u32 rfs;
+
+ if (i2s->quirks & QUIRK_SUPPORTS_TDM)
+ rfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_RCLK_SHIFT;
+ else
+ rfs = (readl(i2s->addr + I2SMOD) >> MOD_RCLK_SHIFT);
+ rfs &= MOD_RCLK_MASK;
switch (rfs) {
case 3: return 768;
@@ -212,21 +219,26 @@ static inline unsigned get_rfs(struct i2s_dai *i2s)
static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
{
u32 mod = readl(i2s->addr + I2SMOD);
+ int rfs_shift;
- mod &= ~MOD_RCLK_MASK;
+ if (i2s->quirks & QUIRK_SUPPORTS_TDM)
+ rfs_shift = EXYNOS5420_MOD_RCLK_SHIFT;
+ else
+ rfs_shift = MOD_RCLK_SHIFT;
+ mod &= ~(MOD_RCLK_MASK << rfs_shift);
switch (rfs) {
case 768:
- mod |= MOD_RCLK_768FS;
+ mod |= (MOD_RCLK_768FS << rfs_shift);
break;
case 512:
- mod |= MOD_RCLK_512FS;
+ mod |= (MOD_RCLK_512FS << rfs_shift);
break;
case 384:
- mod |= MOD_RCLK_384FS;
+ mod |= (MOD_RCLK_384FS << rfs_shift);
break;
default:
- mod |= MOD_RCLK_256FS;
+ mod |= (MOD_RCLK_256FS << rfs_shift);
break;
}
@@ -236,9 +248,22 @@ static inline void set_rfs(struct i2s_dai *i2s, unsigned rfs)
/* Read Bit-Clock of I2S (in multiples of LRCLK) */
static inline unsigned get_bfs(struct i2s_dai *i2s)
{
- u32 bfs = (readl(i2s->addr + I2SMOD) >> 1) & 0x3;
+ u32 bfs;
+
+ if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+ bfs = readl(i2s->addr + I2SMOD) >> EXYNOS5420_MOD_BCLK_SHIFT;
+ bfs &= EXYNOS5420_MOD_BCLK_MASK;
+ } else {
+ bfs = readl(i2s->addr + I2SMOD) >> MOD_BCLK_SHIFT;
+ bfs &= MOD_BCLK_MASK;
+ }
switch (bfs) {
+ case 8: return 256;
+ case 7: return 192;
+ case 6: return 128;
+ case 5: return 96;
+ case 4: return 64;
case 3: return 24;
case 2: return 16;
case 1: return 48;
@@ -250,21 +275,50 @@ static inline unsigned get_bfs(struct i2s_dai *i2s)
static inline void set_bfs(struct i2s_dai *i2s, unsigned bfs)
{
u32 mod = readl(i2s->addr + I2SMOD);
+ int bfs_shift;
+ int tdm = i2s->quirks & QUIRK_SUPPORTS_TDM;
- mod &= ~MOD_BCLK_MASK;
+ if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+ bfs_shift = EXYNOS5420_MOD_BCLK_SHIFT;
+ mod &= ~(EXYNOS5420_MOD_BCLK_MASK << bfs_shift);
+ } else {
+ bfs_shift = MOD_BCLK_SHIFT;
+ mod &= ~(MOD_BCLK_MASK << bfs_shift);
+ }
+
+ /* Non-TDM I2S controllers do not support BCLK > 48 * FS */
+ if (!tdm && bfs > 48) {
+ dev_err(&i2s->pdev->dev, "Unsupported BCLK divider\n");
+ return;
+ }
switch (bfs) {
case 48:
- mod |= MOD_BCLK_48FS;
+ mod |= (MOD_BCLK_48FS << bfs_shift);
break;
case 32:
- mod |= MOD_BCLK_32FS;
+ mod |= (MOD_BCLK_32FS << bfs_shift);
break;
case 24:
- mod |= MOD_BCLK_24FS;
+ mod |= (MOD_BCLK_24FS << bfs_shift);
break;
case 16:
- mod |= MOD_BCLK_16FS;
+ mod |= (MOD_BCLK_16FS << bfs_shift);
+ break;
+ case 64:
+ mod |= (EXYNOS5420_MOD_BCLK_64FS << bfs_shift);
+ break;
+ case 96:
+ mod |= (EXYNOS5420_MOD_BCLK_96FS << bfs_shift);
+ break;
+ case 128:
+ mod |= (EXYNOS5420_MOD_BCLK_128FS << bfs_shift);
+ break;
+ case 192:
+ mod |= (EXYNOS5420_MOD_BCLK_192FS << bfs_shift);
+ break;
+ case 256:
+ mod |= (EXYNOS5420_MOD_BCLK_256FS << bfs_shift);
break;
default:
dev_err(&i2s->pdev->dev, "Wrong BCLK Divider!\n");
@@ -491,20 +545,32 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
{
struct i2s_dai *i2s = to_info(dai);
u32 mod = readl(i2s->addr + I2SMOD);
+ int lrp_shift, sdf_shift, sdf_mask, lrp_rlow;
u32 tmp = 0;
+ if (i2s->quirks & QUIRK_SUPPORTS_TDM) {
+ lrp_shift = EXYNOS5420_MOD_LRP_SHIFT;
+ sdf_shift = EXYNOS5420_MOD_SDF_SHIFT;
+ } else {
+ lrp_shift = MOD_LRP_SHIFT;
+ sdf_shift = MOD_SDF_SHIFT;
+ }
+
+ sdf_mask = MOD_SDF_MASK << sdf_shift;
+ lrp_rlow = MOD_LR_RLOW << lrp_shift;
+
/* Format is priority */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_RIGHT_J:
- tmp |= MOD_LR_RLOW;
- tmp |= MOD_SDF_MSB;
+ tmp |= lrp_rlow;
+ tmp |= (MOD_SDF_MSB << sdf_shift);
break;
case SND_SOC_DAIFMT_LEFT_J:
- tmp |= MOD_LR_RLOW;
- tmp |= MOD_SDF_LSB;
+ tmp |= lrp_rlow;
+ tmp |= (MOD_SDF_LSB << sdf_shift);
break;
case SND_SOC_DAIFMT_I2S:
- tmp |= MOD_SDF_IIS;
+ tmp |= (MOD_SDF_IIS << sdf_shift);
break;
default:
dev_err(&i2s->pdev->dev, "Format not supported\n");
@@ -519,10 +585,10 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_NB_IF:
- if (tmp & MOD_LR_RLOW)
- tmp &= ~MOD_LR_RLOW;
+ if (tmp & lrp_rlow)
+ tmp &= ~lrp_rlow;
else
- tmp |= MOD_LR_RLOW;
+ tmp |= lrp_rlow;
break;
default:
dev_err(&i2s->pdev->dev, "Polarity not supported\n");
@@ -544,15 +610,18 @@ static int i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
+ /*
+ * Don't change the I2S mode if any controller is active on this
+ * channel.
+ */
if (any_active(i2s) &&
- ((mod & (MOD_SDF_MASK | MOD_LR_RLOW
- | MOD_SLAVE)) != tmp)) {
+ ((mod & (sdf_mask | lrp_rlow | MOD_SLAVE)) != tmp)) {
dev_err(&i2s->pdev->dev,
"%s:%d Other DAI busy\n", __func__, __LINE__);
return -EAGAIN;
}
- mod &= ~(MOD_SDF_MASK | MOD_LR_RLOW | MOD_SLAVE);
+ mod &= ~(sdf_mask | lrp_rlow | MOD_SLAVE);
mod |= tmp;
writel(mod, i2s->addr + I2SMOD);
@@ -1007,6 +1076,8 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
if (IS_ERR(i2s->pdev))
return NULL;
+ i2s->pdev->dev.parent = &pdev->dev;
+
platform_set_drvdata(i2s->pdev, i2s);
ret = platform_device_add(i2s->pdev);
if (ret < 0)
@@ -1018,18 +1089,18 @@ static struct i2s_dai *i2s_alloc_dai(struct platform_device *pdev, bool sec)
static const struct of_device_id exynos_i2s_match[];
-static inline int samsung_i2s_get_driver_data(struct platform_device *pdev)
+static inline const struct samsung_i2s_dai_data *samsung_i2s_get_driver_data(
+ struct platform_device *pdev)
{
#ifdef CONFIG_OF
- struct samsung_i2s_dai_data *data;
if (pdev->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(exynos_i2s_match, pdev->dev.of_node);
- data = (struct samsung_i2s_dai_data *) match->data;
- return data->dai_type;
+ return match->data;
} else
#endif
- return platform_get_device_id(pdev)->driver_data;
+ return (struct samsung_i2s_dai_data *)
+ platform_get_device_id(pdev)->driver_data;
}
#ifdef CONFIG_PM_RUNTIME
@@ -1060,13 +1131,13 @@ static int samsung_i2s_probe(struct platform_device *pdev)
struct resource *res;
u32 regs_base, quirks = 0, idma_addr = 0;
struct device_node *np = pdev->dev.of_node;
- enum samsung_dai_type samsung_dai_type;
+ const struct samsung_i2s_dai_data *i2s_dai_data;
int ret = 0;
/* Call during Seconday interface registration */
- samsung_dai_type = samsung_i2s_get_driver_data(pdev);
+ i2s_dai_data = samsung_i2s_get_driver_data(pdev);
- if (samsung_dai_type == TYPE_SEC) {
+ if (i2s_dai_data->dai_type == TYPE_SEC) {
sec_dai = dev_get_drvdata(&pdev->dev);
if (!sec_dai) {
dev_err(&pdev->dev, "Unable to get drvdata\n");
@@ -1075,7 +1146,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
snd_soc_register_component(&sec_dai->pdev->dev,
&samsung_i2s_component,
&sec_dai->i2s_dai_drv, 1);
- asoc_dma_platform_register(&pdev->dev);
+ samsung_asoc_dma_platform_register(&pdev->dev);
return 0;
}
@@ -1115,15 +1186,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
idma_addr = i2s_cfg->idma_addr;
}
} else {
- if (of_find_property(np, "samsung,supports-6ch", NULL))
- quirks |= QUIRK_PRI_6CHAN;
-
- if (of_find_property(np, "samsung,supports-secdai", NULL))
- quirks |= QUIRK_SEC_DAI;
-
- if (of_find_property(np, "samsung,supports-rstclr", NULL))
- quirks |= QUIRK_NEED_RSTCLR;
-
+ quirks = i2s_dai_data->quirks;
if (of_property_read_u32(np, "samsung,idma-addr",
&idma_addr)) {
if (quirks & QUIRK_SEC_DAI) {
@@ -1200,7 +1263,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- asoc_dma_platform_register(&pdev->dev);
+ samsung_asoc_dma_platform_register(&pdev->dev);
return 0;
err:
@@ -1230,33 +1293,59 @@ static int samsung_i2s_remove(struct platform_device *pdev)
i2s->pri_dai = NULL;
i2s->sec_dai = NULL;
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
return 0;
}
+static const struct samsung_i2s_dai_data i2sv3_dai_type = {
+ .dai_type = TYPE_PRI,
+ .quirks = QUIRK_NO_MUXPSR,
+};
+
+static const struct samsung_i2s_dai_data i2sv5_dai_type = {
+ .dai_type = TYPE_PRI,
+ .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR,
+};
+
+static const struct samsung_i2s_dai_data i2sv6_dai_type = {
+ .dai_type = TYPE_PRI,
+ .quirks = QUIRK_PRI_6CHAN | QUIRK_SEC_DAI | QUIRK_NEED_RSTCLR |
+ QUIRK_SUPPORTS_TDM,
+};
+
+static const struct samsung_i2s_dai_data samsung_dai_type_pri = {
+ .dai_type = TYPE_PRI,
+};
+
+static const struct samsung_i2s_dai_data samsung_dai_type_sec = {
+ .dai_type = TYPE_SEC,
+};
+
static struct platform_device_id samsung_i2s_driver_ids[] = {
{
.name = "samsung-i2s",
- .driver_data = TYPE_PRI,
+ .driver_data = (kernel_ulong_t)&samsung_dai_type_pri,
}, {
.name = "samsung-i2s-sec",
- .driver_data = TYPE_SEC,
+ .driver_data = (kernel_ulong_t)&samsung_dai_type_sec,
},
{},
};
MODULE_DEVICE_TABLE(platform, samsung_i2s_driver_ids);
#ifdef CONFIG_OF
-static struct samsung_i2s_dai_data samsung_i2s_dai_data_array[] = {
- [TYPE_PRI] = { TYPE_PRI },
- [TYPE_SEC] = { TYPE_SEC },
-};
-
static const struct of_device_id exynos_i2s_match[] = {
- { .compatible = "samsung,i2s-v5",
- .data = &samsung_i2s_dai_data_array[TYPE_PRI],
+ {
+ .compatible = "samsung,s3c6410-i2s",
+ .data = &i2sv3_dai_type,
+ }, {
+ .compatible = "samsung,s5pv210-i2s",
+ .data = &i2sv5_dai_type,
+ }, {
+ .compatible = "samsung,exynos5420-i2s",
+ .data = &i2sv6_dai_type,
},
{},
};
diff --git a/sound/soc/samsung/pcm.c b/sound/soc/samsung/pcm.c
index 1566afe9ef5..e54256fc4b2 100644
--- a/sound/soc/samsung/pcm.c
+++ b/sound/soc/samsung/pcm.c
@@ -594,7 +594,7 @@ static int s3c_pcm_dev_probe(struct platform_device *pdev)
goto err5;
}
- ret = asoc_dma_platform_register(&pdev->dev);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to get register DMA: %d\n", ret);
goto err6;
@@ -623,7 +623,7 @@ static int s3c_pcm_dev_remove(struct platform_device *pdev)
struct s3c_pcm_info *pcm = &s3c_pcm[pdev->id];
struct resource *mem_res;
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/sound/soc/samsung/s3c2412-i2s.c b/sound/soc/samsung/s3c2412-i2s.c
index 47e23864ea7..ea885cb9f76 100644
--- a/sound/soc/samsung/s3c2412-i2s.c
+++ b/sound/soc/samsung/s3c2412-i2s.c
@@ -176,7 +176,7 @@ static int s3c2412_iis_dev_probe(struct platform_device *pdev)
return ret;
}
- ret = asoc_dma_platform_register(&pdev->dev);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
if (ret) {
pr_err("failed to register the DMA: %d\n", ret);
goto err;
@@ -190,7 +190,7 @@ err:
static int s3c2412_iis_dev_remove(struct platform_device *pdev)
{
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
index 8b3414551a6..9c8ebd872fa 100644
--- a/sound/soc/samsung/s3c24xx-i2s.c
+++ b/sound/soc/samsung/s3c24xx-i2s.c
@@ -480,7 +480,7 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
return ret;
}
- ret = asoc_dma_platform_register(&pdev->dev);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
if (ret) {
pr_err("failed to register the dma: %d\n", ret);
goto err;
@@ -494,7 +494,7 @@ err:
static int s3c24xx_iis_dev_remove(struct platform_device *pdev)
{
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/samsung/smdk_wm8994.c b/sound/soc/samsung/smdk_wm8994.c
index 581ea4a06fc..5fd7a05a9b9 100644
--- a/sound/soc/samsung/smdk_wm8994.c
+++ b/sound/soc/samsung/smdk_wm8994.c
@@ -11,6 +11,7 @@
#include <sound/pcm_params.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
/*
* Default CFG switch settings to use this driver:
@@ -37,11 +38,19 @@
/* SMDK has a 16.934MHZ crystal attached to WM8994 */
#define SMDK_WM8994_FREQ 16934000
+struct smdk_wm8994_data {
+ int mclk1_rate;
+};
+
+/* Default SMDKs */
+static struct smdk_wm8994_data smdk_board_data = {
+ .mclk1_rate = SMDK_WM8994_FREQ,
+};
+
static int smdk_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
unsigned int pll_out;
int ret;
@@ -54,18 +63,6 @@ static int smdk_hw_params(struct snd_pcm_substream *substream,
else
pll_out = params_rate(params) * 256;
- ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
- ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S
- | SND_SOC_DAIFMT_NB_NF
- | SND_SOC_DAIFMT_CBM_CFM);
- if (ret < 0)
- return ret;
-
ret = snd_soc_dai_set_pll(codec_dai, WM8994_FLL1, WM8994_FLL_SRC_MCLK1,
SMDK_WM8994_FREQ, pll_out);
if (ret < 0)
@@ -131,6 +128,8 @@ static struct snd_soc_dai_link smdk_dai[] = {
.platform_name = "samsung-i2s.0",
.codec_name = "wm8994-codec",
.init = smdk_wm8994_init_paiftx,
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.ops = &smdk_ops,
}, { /* Sec_Fifo Playback i/f */
.name = "Sec_FIFO TX",
@@ -139,6 +138,8 @@ static struct snd_soc_dai_link smdk_dai[] = {
.codec_dai_name = "wm8994-aif1",
.platform_name = "samsung-i2s-sec",
.codec_name = "wm8994-codec",
+ .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
+ SND_SOC_DAIFMT_CBM_CFM,
.ops = &smdk_ops,
},
};
@@ -150,15 +151,28 @@ static struct snd_soc_card smdk = {
.num_links = ARRAY_SIZE(smdk_dai),
};
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_wm8994_of_match[] = {
+ { .compatible = "samsung,smdk-wm8994", .data = &smdk_board_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
+#endif /* CONFIG_OF */
static int smdk_audio_probe(struct platform_device *pdev)
{
int ret;
struct device_node *np = pdev->dev.of_node;
struct snd_soc_card *card = &smdk;
+ struct smdk_wm8994_data *board;
+ const struct of_device_id *id;
card->dev = &pdev->dev;
+ board = devm_kzalloc(&pdev->dev, sizeof(*board), GFP_KERNEL);
+ if (!board)
+ return -ENOMEM;
+
if (np) {
smdk_dai[0].cpu_dai_name = NULL;
smdk_dai[0].cpu_of_node = of_parse_phandle(np,
@@ -173,6 +187,12 @@ static int smdk_audio_probe(struct platform_device *pdev)
smdk_dai[0].platform_of_node = smdk_dai[0].cpu_of_node;
}
+ id = of_match_device(samsung_wm8994_of_match, &pdev->dev);
+ if (id)
+ *board = *((struct smdk_wm8994_data *)id->data);
+
+ platform_set_drvdata(pdev, board);
+
ret = snd_soc_register_card(card);
if (ret)
@@ -190,17 +210,9 @@ static int smdk_audio_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id samsung_wm8994_of_match[] = {
- { .compatible = "samsung,smdk-wm8994", },
- {},
-};
-MODULE_DEVICE_TABLE(of, samsung_wm8994_of_match);
-#endif /* CONFIG_OF */
-
static struct platform_driver smdk_audio_driver = {
.driver = {
- .name = "smdk-audio",
+ .name = "smdk-audio-wm8894",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(samsung_wm8994_of_match),
},
@@ -212,4 +224,4 @@ module_platform_driver(smdk_audio_driver);
MODULE_DESCRIPTION("ALSA SoC SMDK WM8994");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:smdk-audio");
+MODULE_ALIAS("platform:smdk-audio-wm8994");
diff --git a/sound/soc/samsung/spdif.c b/sound/soc/samsung/spdif.c
index 2e5ebb2f198..28487dcc453 100644
--- a/sound/soc/samsung/spdif.c
+++ b/sound/soc/samsung/spdif.c
@@ -395,7 +395,7 @@ static int spdif_probe(struct platform_device *pdev)
spin_lock_init(&spdif->lock);
- spdif->pclk = clk_get(&pdev->dev, "spdif");
+ spdif->pclk = devm_clk_get(&pdev->dev, "spdif");
if (IS_ERR(spdif->pclk)) {
dev_err(&pdev->dev, "failed to get peri-clock\n");
ret = -ENOENT;
@@ -403,7 +403,7 @@ static int spdif_probe(struct platform_device *pdev)
}
clk_prepare_enable(spdif->pclk);
- spdif->sclk = clk_get(&pdev->dev, "sclk_spdif");
+ spdif->sclk = devm_clk_get(&pdev->dev, "sclk_spdif");
if (IS_ERR(spdif->sclk)) {
dev_err(&pdev->dev, "failed to get internal source clock\n");
ret = -ENOENT;
@@ -442,7 +442,7 @@ static int spdif_probe(struct platform_device *pdev)
spdif->dma_playback = &spdif_stereo_out;
- ret = asoc_dma_platform_register(&pdev->dev);
+ ret = samsung_asoc_dma_platform_register(&pdev->dev);
if (ret) {
dev_err(&pdev->dev, "failed to register DMA: %d\n", ret);
goto err5;
@@ -457,10 +457,8 @@ err3:
release_mem_region(mem_res->start, resource_size(mem_res));
err2:
clk_disable_unprepare(spdif->sclk);
- clk_put(spdif->sclk);
err1:
clk_disable_unprepare(spdif->pclk);
- clk_put(spdif->pclk);
err0:
return ret;
}
@@ -470,7 +468,7 @@ static int spdif_remove(struct platform_device *pdev)
struct samsung_spdif_info *spdif = &spdif_info;
struct resource *mem_res;
- asoc_dma_platform_unregister(&pdev->dev);
+ samsung_asoc_dma_platform_unregister(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
iounmap(spdif->regs);
@@ -480,9 +478,7 @@ static int spdif_remove(struct platform_device *pdev)
release_mem_region(mem_res->start, resource_size(mem_res));
clk_disable_unprepare(spdif->sclk);
- clk_put(spdif->sclk);
clk_disable_unprepare(spdif->pclk);
- clk_put(spdif->pclk);
return 0;
}
diff --git a/sound/soc/sh/Kconfig b/sound/soc/sh/Kconfig
index 6bcb1164d59..56d8ff6a402 100644
--- a/sound/soc/sh/Kconfig
+++ b/sound/soc/sh/Kconfig
@@ -34,6 +34,13 @@ config SND_SOC_SH4_SIU
select SH_DMAE
select FW_LOADER
+config SND_SOC_RCAR
+ tristate "R-Car series SRU/SCU/SSIU/SSI support"
+ select SND_SIMPLE_CARD
+ select RCAR_CLK_ADG
+ help
+ This option enables R-Car SUR/SCU/SSIU/SSI sound support
+
##
## Boards
##
diff --git a/sound/soc/sh/Makefile b/sound/soc/sh/Makefile
index 849b387d17d..aaf3dcd1ee2 100644
--- a/sound/soc/sh/Makefile
+++ b/sound/soc/sh/Makefile
@@ -12,6 +12,9 @@ obj-$(CONFIG_SND_SOC_SH4_SSI) += snd-soc-ssi.o
obj-$(CONFIG_SND_SOC_SH4_FSI) += snd-soc-fsi.o
obj-$(CONFIG_SND_SOC_SH4_SIU) += snd-soc-siu.o
+## audio units for R-Car
+obj-$(CONFIG_SND_SOC_RCAR) += rcar/
+
## boards
snd-soc-sh7760-ac97-objs := sh7760-ac97.o
snd-soc-migor-objs := migor.o
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 30390260bb6..b33ca7cd085 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -235,6 +235,8 @@ struct fsi_stream {
struct sh_dmae_slave slave; /* see fsi_handler_init() */
struct work_struct work;
dma_addr_t dma;
+ int loop_cnt;
+ int additional_pos;
};
struct fsi_clk {
@@ -1289,6 +1291,8 @@ static int fsi_dma_init(struct fsi_priv *fsi, struct fsi_stream *io)
io->bus_option = BUSOP_SET(24, PACKAGE_24BITBUS_BACK) |
BUSOP_SET(16, PACKAGE_16BITBUS_STREAM);
+ io->loop_cnt = 2; /* push 1st, 2nd period first, then 3rd, 4th... */
+ io->additional_pos = 0;
io->dma = dma_map_single(dai->dev, runtime->dma_area,
snd_pcm_lib_buffer_bytes(io->substream), dir);
return 0;
@@ -1305,11 +1309,15 @@ static int fsi_dma_quit(struct fsi_priv *fsi, struct fsi_stream *io)
return 0;
}
-static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
+static dma_addr_t fsi_dma_get_area(struct fsi_stream *io, int additional)
{
struct snd_pcm_runtime *runtime = io->substream->runtime;
+ int period = io->period_pos + additional;
- return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
+ if (period >= runtime->periods)
+ period = 0;
+
+ return io->dma + samples_to_bytes(runtime, period * io->period_samples);
}
static void fsi_dma_complete(void *data)
@@ -1321,7 +1329,7 @@ static void fsi_dma_complete(void *data)
enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ?
DMA_TO_DEVICE : DMA_FROM_DEVICE;
- dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io),
+ dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io, 0),
samples_to_bytes(runtime, io->period_samples), dir);
io->buff_sample_pos += io->period_samples;
@@ -1347,7 +1355,7 @@ static void fsi_dma_do_work(struct work_struct *work)
struct snd_pcm_runtime *runtime;
enum dma_data_direction dir;
int is_play = fsi_stream_is_play(fsi, io);
- int len;
+ int len, i;
dma_addr_t buf;
if (!fsi_stream_is_working(fsi, io))
@@ -1357,26 +1365,33 @@ static void fsi_dma_do_work(struct work_struct *work)
runtime = io->substream->runtime;
dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
len = samples_to_bytes(runtime, io->period_samples);
- buf = fsi_dma_get_area(io);
- dma_sync_single_for_device(dai->dev, buf, len, dir);
+ for (i = 0; i < io->loop_cnt; i++) {
+ buf = fsi_dma_get_area(io, io->additional_pos);
- desc = dmaengine_prep_slave_single(io->chan, buf, len, dir,
- DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
- if (!desc) {
- dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n");
- return;
- }
+ dma_sync_single_for_device(dai->dev, buf, len, dir);
- desc->callback = fsi_dma_complete;
- desc->callback_param = io;
+ desc = dmaengine_prep_slave_single(io->chan, buf, len, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(dai->dev, "dmaengine_prep_slave_sg() fail\n");
+ return;
+ }
- if (dmaengine_submit(desc) < 0) {
- dev_err(dai->dev, "tx_submit() fail\n");
- return;
+ desc->callback = fsi_dma_complete;
+ desc->callback_param = io;
+
+ if (dmaengine_submit(desc) < 0) {
+ dev_err(dai->dev, "tx_submit() fail\n");
+ return;
+ }
+
+ dma_async_issue_pending(io->chan);
+
+ io->additional_pos = 1;
}
- dma_async_issue_pending(io->chan);
+ io->loop_cnt = 1;
/*
* FIXME
diff --git a/sound/soc/sh/rcar/Makefile b/sound/soc/sh/rcar/Makefile
new file mode 100644
index 00000000000..0ff492df792
--- /dev/null
+++ b/sound/soc/sh/rcar/Makefile
@@ -0,0 +1,2 @@
+snd-soc-rcar-objs := core.o gen.o scu.o adg.o ssi.o
+obj-$(CONFIG_SND_SOC_RCAR) += snd-soc-rcar.o \ No newline at end of file
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
new file mode 100644
index 00000000000..d80deb7ccf1
--- /dev/null
+++ b/sound/soc/sh/rcar/adg.c
@@ -0,0 +1,234 @@
+/*
+ * Helper routines for R-Car sound ADG.
+ *
+ * Copyright (C) 2013 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/sh_clk.h>
+#include <mach/clock.h>
+#include "rsnd.h"
+
+#define CLKA 0
+#define CLKB 1
+#define CLKC 2
+#define CLKI 3
+#define CLKMAX 4
+
+struct rsnd_adg {
+ struct clk *clk[CLKMAX];
+
+ int rate_of_441khz_div_6;
+ int rate_of_48khz_div_6;
+};
+
+#define for_each_rsnd_clk(pos, adg, i) \
+ for (i = 0, (pos) = adg->clk[i]; \
+ i < CLKMAX; \
+ i++, (pos) = adg->clk[i])
+#define rsnd_priv_to_adg(priv) ((struct rsnd_adg *)(priv)->adg)
+
+static enum rsnd_reg rsnd_adg_ssi_reg_get(int id)
+{
+ enum rsnd_reg reg;
+
+ /*
+ * SSI 8 is not connected to ADG.
+ * it works with SSI 7
+ */
+ if (id == 8)
+ return RSND_REG_MAX;
+
+ if (0 <= id && id <= 3)
+ reg = RSND_REG_AUDIO_CLK_SEL0;
+ else if (4 <= id && id <= 7)
+ reg = RSND_REG_AUDIO_CLK_SEL1;
+ else
+ reg = RSND_REG_AUDIO_CLK_SEL2;
+
+ return reg;
+}
+
+int rsnd_adg_ssi_clk_stop(struct rsnd_mod *mod)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ enum rsnd_reg reg;
+ int id;
+
+ /*
+ * "mod" = "ssi" here.
+ * we can get "ssi id" from mod
+ */
+ id = rsnd_mod_id(mod);
+ reg = rsnd_adg_ssi_reg_get(id);
+
+ rsnd_write(priv, mod, reg, 0);
+
+ return 0;
+}
+
+int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
+ enum rsnd_reg reg;
+ int id, shift, i;
+ u32 data;
+ int sel_table[] = {
+ [CLKA] = 0x1,
+ [CLKB] = 0x2,
+ [CLKC] = 0x3,
+ [CLKI] = 0x0,
+ };
+
+ dev_dbg(dev, "request clock = %d\n", rate);
+
+ /*
+ * find suitable clock from
+ * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
+ */
+ data = 0;
+ for_each_rsnd_clk(clk, adg, i) {
+ if (rate == clk_get_rate(clk)) {
+ data = sel_table[i];
+ goto found_clock;
+ }
+ }
+
+ /*
+ * find 1/6 clock from BRGA/BRGB
+ */
+ if (rate == adg->rate_of_441khz_div_6) {
+ data = 0x10;
+ goto found_clock;
+ }
+
+ if (rate == adg->rate_of_48khz_div_6) {
+ data = 0x20;
+ goto found_clock;
+ }
+
+ return -EIO;
+
+found_clock:
+
+ /*
+ * This "mod" = "ssi" here.
+ * we can get "ssi id" from mod
+ */
+ id = rsnd_mod_id(mod);
+ reg = rsnd_adg_ssi_reg_get(id);
+
+ dev_dbg(dev, "ADG: ssi%d selects clk%d = %d", id, i, rate);
+
+ /*
+ * Enable SSIx clock
+ */
+ shift = (id % 4) * 8;
+
+ rsnd_bset(priv, mod, reg,
+ 0xFF << shift,
+ data << shift);
+
+ return 0;
+}
+
+static void rsnd_adg_ssi_clk_init(struct rsnd_priv *priv, struct rsnd_adg *adg)
+{
+ struct clk *clk;
+ unsigned long rate;
+ u32 ckr;
+ int i;
+ int brg_table[] = {
+ [CLKA] = 0x0,
+ [CLKB] = 0x1,
+ [CLKC] = 0x4,
+ [CLKI] = 0x2,
+ };
+
+ /*
+ * This driver is assuming that AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC
+ * have 44.1kHz or 48kHz base clocks for now.
+ *
+ * SSI itself can divide parent clock by 1/1 - 1/16
+ * So, BRGA outputs 44.1kHz base parent clock 1/32,
+ * and, BRGB outputs 48.0kHz base parent clock 1/32 here.
+ * see
+ * rsnd_adg_ssi_clk_try_start()
+ */
+ ckr = 0;
+ adg->rate_of_441khz_div_6 = 0;
+ adg->rate_of_48khz_div_6 = 0;
+ for_each_rsnd_clk(clk, adg, i) {
+ rate = clk_get_rate(clk);
+
+ if (0 == rate) /* not used */
+ continue;
+
+ /* RBGA */
+ if (!adg->rate_of_441khz_div_6 && (0 == rate % 44100)) {
+ adg->rate_of_441khz_div_6 = rate / 6;
+ ckr |= brg_table[i] << 20;
+ }
+
+ /* RBGB */
+ if (!adg->rate_of_48khz_div_6 && (0 == rate % 48000)) {
+ adg->rate_of_48khz_div_6 = rate / 6;
+ ckr |= brg_table[i] << 16;
+ }
+ }
+
+ rsnd_priv_bset(priv, SSICKR, 0x00FF0000, ckr);
+ rsnd_priv_write(priv, BRRA, 0x00000002); /* 1/6 */
+ rsnd_priv_write(priv, BRRB, 0x00000002); /* 1/6 */
+}
+
+int rsnd_adg_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_adg *adg;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct clk *clk;
+ int i;
+
+ adg = devm_kzalloc(dev, sizeof(*adg), GFP_KERNEL);
+ if (!adg) {
+ dev_err(dev, "ADG allocate failed\n");
+ return -ENOMEM;
+ }
+
+ adg->clk[CLKA] = clk_get(NULL, "audio_clk_a");
+ adg->clk[CLKB] = clk_get(NULL, "audio_clk_b");
+ adg->clk[CLKC] = clk_get(NULL, "audio_clk_c");
+ adg->clk[CLKI] = clk_get(NULL, "audio_clk_internal");
+ for_each_rsnd_clk(clk, adg, i) {
+ if (IS_ERR(clk)) {
+ dev_err(dev, "Audio clock failed\n");
+ return -EIO;
+ }
+ }
+
+ rsnd_adg_ssi_clk_init(priv, adg);
+
+ priv->adg = adg;
+
+ dev_dbg(dev, "adg probed\n");
+
+ return 0;
+}
+
+void rsnd_adg_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_adg *adg = priv->adg;
+ struct clk *clk;
+ int i;
+
+ for_each_rsnd_clk(clk, adg, i)
+ clk_put(clk);
+}
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
new file mode 100644
index 00000000000..a3570602851
--- /dev/null
+++ b/sound/soc/sh/rcar/core.c
@@ -0,0 +1,861 @@
+/*
+ * Renesas R-Car SRU/SCU/SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on fsi.c
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Renesas R-Car sound device structure
+ *
+ * Gen1
+ *
+ * SRU : Sound Routing Unit
+ * - SRC : Sampling Rate Converter
+ * - CMD
+ * - CTU : Channel Count Conversion Unit
+ * - MIX : Mixer
+ * - DVC : Digital Volume and Mute Function
+ * - SSI : Serial Sound Interface
+ *
+ * Gen2
+ *
+ * SCU : Sampling Rate Converter Unit
+ * - SRC : Sampling Rate Converter
+ * - CMD
+ * - CTU : Channel Count Conversion Unit
+ * - MIX : Mixer
+ * - DVC : Digital Volume and Mute Function
+ * SSIU : Serial Sound Interface Unit
+ * - SSI : Serial Sound Interface
+ */
+
+/*
+ * driver data Image
+ *
+ * rsnd_priv
+ * |
+ * | ** this depends on Gen1/Gen2
+ * |
+ * +- gen
+ * |
+ * | ** these depend on data path
+ * | ** gen and platform data control it
+ * |
+ * +- rdai[0]
+ * | | sru ssiu ssi
+ * | +- playback -> [mod] -> [mod] -> [mod] -> ...
+ * | |
+ * | | sru ssiu ssi
+ * | +- capture -> [mod] -> [mod] -> [mod] -> ...
+ * |
+ * +- rdai[1]
+ * | | sru ssiu ssi
+ * | +- playback -> [mod] -> [mod] -> [mod] -> ...
+ * | |
+ * | | sru ssiu ssi
+ * | +- capture -> [mod] -> [mod] -> [mod] -> ...
+ * ...
+ * |
+ * | ** these control ssi
+ * |
+ * +- ssi
+ * | |
+ * | +- ssi[0]
+ * | +- ssi[1]
+ * | +- ssi[2]
+ * | ...
+ * |
+ * | ** these control scu
+ * |
+ * +- scu
+ * |
+ * +- scu[0]
+ * +- scu[1]
+ * +- scu[2]
+ * ...
+ *
+ *
+ * for_each_rsnd_dai(xx, priv, xx)
+ * rdai[0] => rdai[1] => rdai[2] => ...
+ *
+ * for_each_rsnd_mod(xx, rdai, xx)
+ * [mod] => [mod] => [mod] => ...
+ *
+ * rsnd_dai_call(xxx, fn )
+ * [mod]->fn() -> [mod]->fn() -> [mod]->fn()...
+ *
+ */
+#include <linux/pm_runtime.h>
+#include "rsnd.h"
+
+#define RSND_RATES SNDRV_PCM_RATE_8000_96000
+#define RSND_FMTS (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
+
+/*
+ * rsnd_platform functions
+ */
+#define rsnd_platform_call(priv, dai, func, param...) \
+ (!(priv->info->func) ? -ENODEV : \
+ priv->info->func(param))
+
+
+/*
+ * basic function
+ */
+u32 rsnd_read(struct rsnd_priv *priv,
+ struct rsnd_mod *mod, enum rsnd_reg reg)
+{
+ void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+
+ BUG_ON(!base);
+
+ return ioread32(base);
+}
+
+void rsnd_write(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 data)
+{
+ void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ BUG_ON(!base);
+
+ dev_dbg(dev, "w %p : %08x\n", base, data);
+
+ iowrite32(data, base);
+}
+
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 mask, u32 data)
+{
+ void __iomem *base = rsnd_gen_reg_get(priv, mod, reg);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 val;
+
+ BUG_ON(!base);
+
+ val = ioread32(base);
+ val &= ~mask;
+ val |= data & mask;
+ iowrite32(val, base);
+
+ dev_dbg(dev, "s %p : %08x\n", base, val);
+}
+
+/*
+ * rsnd_mod functions
+ */
+char *rsnd_mod_name(struct rsnd_mod *mod)
+{
+ if (!mod || !mod->ops)
+ return "unknown";
+
+ return mod->ops->name;
+}
+
+void rsnd_mod_init(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ struct rsnd_mod_ops *ops,
+ int id)
+{
+ mod->priv = priv;
+ mod->id = id;
+ mod->ops = ops;
+ INIT_LIST_HEAD(&mod->list);
+}
+
+/*
+ * rsnd_dma functions
+ */
+static void rsnd_dma_continue(struct rsnd_dma *dma)
+{
+ /* push next A or B plane */
+ dma->submit_loop = 1;
+ schedule_work(&dma->work);
+}
+
+void rsnd_dma_start(struct rsnd_dma *dma)
+{
+ /* push both A and B plane*/
+ dma->submit_loop = 2;
+ schedule_work(&dma->work);
+}
+
+void rsnd_dma_stop(struct rsnd_dma *dma)
+{
+ dma->submit_loop = 0;
+ cancel_work_sync(&dma->work);
+ dmaengine_terminate_all(dma->chan);
+}
+
+static void rsnd_dma_complete(void *data)
+{
+ struct rsnd_dma *dma = (struct rsnd_dma *)data;
+ struct rsnd_priv *priv = dma->priv;
+ unsigned long flags;
+
+ rsnd_lock(priv, flags);
+
+ dma->complete(dma);
+
+ if (dma->submit_loop)
+ rsnd_dma_continue(dma);
+
+ rsnd_unlock(priv, flags);
+}
+
+static void rsnd_dma_do_work(struct work_struct *work)
+{
+ struct rsnd_dma *dma = container_of(work, struct rsnd_dma, work);
+ struct rsnd_priv *priv = dma->priv;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct dma_async_tx_descriptor *desc;
+ dma_addr_t buf;
+ size_t len;
+ int i;
+
+ for (i = 0; i < dma->submit_loop; i++) {
+
+ if (dma->inquiry(dma, &buf, &len) < 0)
+ return;
+
+ desc = dmaengine_prep_slave_single(
+ dma->chan, buf, len, dma->dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc) {
+ dev_err(dev, "dmaengine_prep_slave_sg() fail\n");
+ return;
+ }
+
+ desc->callback = rsnd_dma_complete;
+ desc->callback_param = dma;
+
+ if (dmaengine_submit(desc) < 0) {
+ dev_err(dev, "dmaengine_submit() fail\n");
+ return;
+ }
+
+ }
+
+ dma_async_issue_pending(dma->chan);
+}
+
+int rsnd_dma_available(struct rsnd_dma *dma)
+{
+ return !!dma->chan;
+}
+
+static bool rsnd_dma_filter(struct dma_chan *chan, void *param)
+{
+ chan->private = param;
+
+ return true;
+}
+
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
+ int is_play, int id,
+ int (*inquiry)(struct rsnd_dma *dma,
+ dma_addr_t *buf, int *len),
+ int (*complete)(struct rsnd_dma *dma))
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ dma_cap_mask_t mask;
+
+ if (dma->chan) {
+ dev_err(dev, "it already has dma channel\n");
+ return -EIO;
+ }
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dma->slave.shdma_slave.slave_id = id;
+
+ dma->chan = dma_request_channel(mask, rsnd_dma_filter,
+ &dma->slave.shdma_slave);
+ if (!dma->chan) {
+ dev_err(dev, "can't get dma channel\n");
+ return -EIO;
+ }
+
+ dma->dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ dma->priv = priv;
+ dma->inquiry = inquiry;
+ dma->complete = complete;
+ INIT_WORK(&dma->work, rsnd_dma_do_work);
+
+ return 0;
+}
+
+void rsnd_dma_quit(struct rsnd_priv *priv,
+ struct rsnd_dma *dma)
+{
+ if (dma->chan)
+ dma_release_channel(dma->chan);
+
+ dma->chan = NULL;
+}
+
+/*
+ * rsnd_dai functions
+ */
+#define rsnd_dai_call(rdai, io, fn) \
+({ \
+ struct rsnd_mod *mod, *n; \
+ int ret = 0; \
+ for_each_rsnd_mod(mod, n, io) { \
+ ret = rsnd_mod_call(mod, fn, rdai, io); \
+ if (ret < 0) \
+ break; \
+ } \
+ ret; \
+})
+
+int rsnd_dai_connect(struct rsnd_dai *rdai,
+ struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ if (!mod) {
+ dev_err(dev, "NULL mod\n");
+ return -EIO;
+ }
+
+ if (!list_empty(&mod->list)) {
+ dev_err(dev, "%s%d is not empty\n",
+ rsnd_mod_name(mod),
+ rsnd_mod_id(mod));
+ return -EIO;
+ }
+
+ list_add_tail(&mod->list, &io->head);
+
+ return 0;
+}
+
+int rsnd_dai_disconnect(struct rsnd_mod *mod)
+{
+ list_del_init(&mod->list);
+
+ return 0;
+}
+
+int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai)
+{
+ int id = rdai - priv->rdai;
+
+ if ((id < 0) || (id >= rsnd_dai_nr(priv)))
+ return -EINVAL;
+
+ return id;
+}
+
+struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id)
+{
+ return priv->rdai + id;
+}
+
+static struct rsnd_dai *rsnd_dai_to_rdai(struct snd_soc_dai *dai)
+{
+ struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai);
+
+ return rsnd_dai_get(priv, dai->id);
+}
+
+int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io)
+{
+ return &rdai->playback == io;
+}
+
+/*
+ * rsnd_soc_dai functions
+ */
+int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional)
+{
+ struct snd_pcm_substream *substream = io->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int pos = io->byte_pos + additional;
+
+ pos %= (runtime->periods * io->byte_per_period);
+
+ return pos;
+}
+
+void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int byte)
+{
+ io->byte_pos += byte;
+
+ if (io->byte_pos >= io->next_period_byte) {
+ struct snd_pcm_substream *substream = io->substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ io->period_pos++;
+ io->next_period_byte += io->byte_per_period;
+
+ if (io->period_pos >= runtime->periods) {
+ io->byte_pos = 0;
+ io->period_pos = 0;
+ io->next_period_byte = io->byte_per_period;
+ }
+
+ snd_pcm_period_elapsed(substream);
+ }
+}
+
+static int rsnd_dai_stream_init(struct rsnd_dai_stream *io,
+ struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+
+ if (!list_empty(&io->head))
+ return -EIO;
+
+ INIT_LIST_HEAD(&io->head);
+ io->substream = substream;
+ io->byte_pos = 0;
+ io->period_pos = 0;
+ io->byte_per_period = runtime->period_size *
+ runtime->channels *
+ samples_to_bytes(runtime, 1);
+ io->next_period_byte = io->byte_per_period;
+
+ return 0;
+}
+
+static
+struct snd_soc_dai *rsnd_substream_to_dai(struct snd_pcm_substream *substream)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+
+ return rtd->cpu_dai;
+}
+
+static
+struct rsnd_dai_stream *rsnd_rdai_to_io(struct rsnd_dai *rdai,
+ struct snd_pcm_substream *substream)
+{
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ return &rdai->playback;
+ else
+ return &rdai->capture;
+}
+
+static int rsnd_soc_dai_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct rsnd_priv *priv = snd_soc_dai_get_drvdata(dai);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+ struct rsnd_mod *mod = rsnd_ssi_mod_get_frm_dai(priv,
+ rsnd_dai_id(priv, rdai),
+ rsnd_dai_is_play(rdai, io));
+ int ssi_id = rsnd_mod_id(mod);
+ int ret;
+ unsigned long flags;
+
+ rsnd_lock(priv, flags);
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ ret = rsnd_dai_stream_init(io, substream);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_platform_call(priv, dai, start, ssi_id);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_gen_path_init(priv, rdai, io);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_dai_call(rdai, io, init);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_dai_call(rdai, io, start);
+ if (ret < 0)
+ goto dai_trigger_end;
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ ret = rsnd_dai_call(rdai, io, stop);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_dai_call(rdai, io, quit);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_gen_path_exit(priv, rdai, io);
+ if (ret < 0)
+ goto dai_trigger_end;
+
+ ret = rsnd_platform_call(priv, dai, stop, ssi_id);
+ if (ret < 0)
+ goto dai_trigger_end;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+dai_trigger_end:
+ rsnd_unlock(priv, flags);
+
+ return ret;
+}
+
+static int rsnd_soc_dai_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+{
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+
+ /* set master/slave audio interface */
+ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+ case SND_SOC_DAIFMT_CBM_CFM:
+ rdai->clk_master = 1;
+ break;
+ case SND_SOC_DAIFMT_CBS_CFS:
+ rdai->clk_master = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* set clock inversion */
+ switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+ case SND_SOC_DAIFMT_NB_IF:
+ rdai->bit_clk_inv = 0;
+ rdai->frm_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_IB_NF:
+ rdai->bit_clk_inv = 1;
+ rdai->frm_clk_inv = 0;
+ break;
+ case SND_SOC_DAIFMT_IB_IF:
+ rdai->bit_clk_inv = 1;
+ rdai->frm_clk_inv = 1;
+ break;
+ case SND_SOC_DAIFMT_NB_NF:
+ default:
+ rdai->bit_clk_inv = 0;
+ rdai->frm_clk_inv = 0;
+ break;
+ }
+
+ /* set format */
+ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+ case SND_SOC_DAIFMT_I2S:
+ rdai->sys_delay = 0;
+ rdai->data_alignment = 0;
+ break;
+ case SND_SOC_DAIFMT_LEFT_J:
+ rdai->sys_delay = 1;
+ rdai->data_alignment = 0;
+ break;
+ case SND_SOC_DAIFMT_RIGHT_J:
+ rdai->sys_delay = 1;
+ rdai->data_alignment = 1;
+ break;
+ }
+
+ return 0;
+}
+
+static const struct snd_soc_dai_ops rsnd_soc_dai_ops = {
+ .trigger = rsnd_soc_dai_trigger,
+ .set_fmt = rsnd_soc_dai_set_fmt,
+};
+
+static int rsnd_dai_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct snd_soc_dai_driver *drv;
+ struct rsnd_dai *rdai;
+ struct rsnd_mod *pmod, *cmod;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int dai_nr;
+ int i;
+
+ /* get max dai nr */
+ for (dai_nr = 0; dai_nr < 32; dai_nr++) {
+ pmod = rsnd_ssi_mod_get_frm_dai(priv, dai_nr, 1);
+ cmod = rsnd_ssi_mod_get_frm_dai(priv, dai_nr, 0);
+
+ if (!pmod && !cmod)
+ break;
+ }
+
+ if (!dai_nr) {
+ dev_err(dev, "no dai\n");
+ return -EIO;
+ }
+
+ drv = devm_kzalloc(dev, sizeof(*drv) * dai_nr, GFP_KERNEL);
+ rdai = devm_kzalloc(dev, sizeof(*rdai) * dai_nr, GFP_KERNEL);
+ if (!drv || !rdai) {
+ dev_err(dev, "dai allocate failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < dai_nr; i++) {
+
+ pmod = rsnd_ssi_mod_get_frm_dai(priv, i, 1);
+ cmod = rsnd_ssi_mod_get_frm_dai(priv, i, 0);
+
+ /*
+ * init rsnd_dai
+ */
+ INIT_LIST_HEAD(&rdai[i].playback.head);
+ INIT_LIST_HEAD(&rdai[i].capture.head);
+
+ snprintf(rdai[i].name, RSND_DAI_NAME_SIZE, "rsnd-dai.%d", i);
+
+ /*
+ * init snd_soc_dai_driver
+ */
+ drv[i].name = rdai[i].name;
+ drv[i].ops = &rsnd_soc_dai_ops;
+ if (pmod) {
+ drv[i].playback.rates = RSND_RATES;
+ drv[i].playback.formats = RSND_FMTS;
+ drv[i].playback.channels_min = 2;
+ drv[i].playback.channels_max = 2;
+ }
+ if (cmod) {
+ drv[i].capture.rates = RSND_RATES;
+ drv[i].capture.formats = RSND_FMTS;
+ drv[i].capture.channels_min = 2;
+ drv[i].capture.channels_max = 2;
+ }
+
+ dev_dbg(dev, "%s (%s/%s)\n", rdai[i].name,
+ pmod ? "play" : " -- ",
+ cmod ? "capture" : " -- ");
+ }
+
+ priv->dai_nr = dai_nr;
+ priv->daidrv = drv;
+ priv->rdai = rdai;
+
+ return 0;
+}
+
+static void rsnd_dai_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+}
+
+/*
+ * pcm ops
+ */
+static struct snd_pcm_hardware rsnd_pcm_hardware = {
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE,
+ .formats = RSND_FMTS,
+ .rates = RSND_RATES,
+ .rate_min = 8000,
+ .rate_max = 192000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 64 * 1024,
+ .period_bytes_min = 32,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 32,
+ .fifo_size = 256,
+};
+
+static int rsnd_pcm_open(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ int ret = 0;
+
+ snd_soc_set_runtime_hwparams(substream, &rsnd_pcm_hardware);
+
+ ret = snd_pcm_hw_constraint_integer(runtime,
+ SNDRV_PCM_HW_PARAM_PERIODS);
+
+ return ret;
+}
+
+static int rsnd_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *hw_params)
+{
+ return snd_pcm_lib_malloc_pages(substream,
+ params_buffer_bytes(hw_params));
+}
+
+static snd_pcm_uframes_t rsnd_pointer(struct snd_pcm_substream *substream)
+{
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ struct snd_soc_dai *dai = rsnd_substream_to_dai(substream);
+ struct rsnd_dai *rdai = rsnd_dai_to_rdai(dai);
+ struct rsnd_dai_stream *io = rsnd_rdai_to_io(rdai, substream);
+
+ return bytes_to_frames(runtime, io->byte_pos);
+}
+
+static struct snd_pcm_ops rsnd_pcm_ops = {
+ .open = rsnd_pcm_open,
+ .ioctl = snd_pcm_lib_ioctl,
+ .hw_params = rsnd_hw_params,
+ .hw_free = snd_pcm_lib_free_pages,
+ .pointer = rsnd_pointer,
+};
+
+/*
+ * snd_soc_platform
+ */
+
+#define PREALLOC_BUFFER (32 * 1024)
+#define PREALLOC_BUFFER_MAX (32 * 1024)
+
+static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
+{
+ return snd_pcm_lib_preallocate_pages_for_all(
+ rtd->pcm,
+ SNDRV_DMA_TYPE_DEV,
+ rtd->card->snd_card->dev,
+ PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
+}
+
+static void rsnd_pcm_free(struct snd_pcm *pcm)
+{
+ snd_pcm_lib_preallocate_free_for_all(pcm);
+}
+
+static struct snd_soc_platform_driver rsnd_soc_platform = {
+ .ops = &rsnd_pcm_ops,
+ .pcm_new = rsnd_pcm_new,
+ .pcm_free = rsnd_pcm_free,
+};
+
+static const struct snd_soc_component_driver rsnd_soc_component = {
+ .name = "rsnd",
+};
+
+/*
+ * rsnd probe
+ */
+static int rsnd_probe(struct platform_device *pdev)
+{
+ struct rcar_snd_info *info;
+ struct rsnd_priv *priv;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ info = pdev->dev.platform_data;
+ if (!info) {
+ dev_err(dev, "driver needs R-Car sound information\n");
+ return -ENODEV;
+ }
+
+ /*
+ * init priv data
+ */
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev, "priv allocate failed\n");
+ return -ENODEV;
+ }
+
+ priv->dev = dev;
+ priv->info = info;
+ spin_lock_init(&priv->lock);
+
+ /*
+ * init each module
+ */
+ ret = rsnd_gen_probe(pdev, info, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_scu_probe(pdev, info, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_adg_probe(pdev, info, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_ssi_probe(pdev, info, priv);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_dai_probe(pdev, info, priv);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * asoc register
+ */
+ ret = snd_soc_register_platform(dev, &rsnd_soc_platform);
+ if (ret < 0) {
+ dev_err(dev, "cannot snd soc register\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_component(dev, &rsnd_soc_component,
+ priv->daidrv, rsnd_dai_nr(priv));
+ if (ret < 0) {
+ dev_err(dev, "cannot snd dai register\n");
+ goto exit_snd_soc;
+ }
+
+ dev_set_drvdata(dev, priv);
+
+ pm_runtime_enable(dev);
+
+ dev_info(dev, "probed\n");
+ return ret;
+
+exit_snd_soc:
+ snd_soc_unregister_platform(dev);
+
+ return ret;
+}
+
+static int rsnd_remove(struct platform_device *pdev)
+{
+ struct rsnd_priv *priv = dev_get_drvdata(&pdev->dev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ /*
+ * remove each module
+ */
+ rsnd_ssi_remove(pdev, priv);
+ rsnd_adg_remove(pdev, priv);
+ rsnd_scu_remove(pdev, priv);
+ rsnd_dai_remove(pdev, priv);
+ rsnd_gen_remove(pdev, priv);
+
+ return 0;
+}
+
+static struct platform_driver rsnd_driver = {
+ .driver = {
+ .name = "rcar_sound",
+ },
+ .probe = rsnd_probe,
+ .remove = rsnd_remove,
+};
+module_platform_driver(rsnd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Renesas R-Car audio driver");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_ALIAS("platform:rcar-pcm-audio");
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c
new file mode 100644
index 00000000000..babb203b43b
--- /dev/null
+++ b/sound/soc/sh/rcar/gen.c
@@ -0,0 +1,280 @@
+/*
+ * Renesas R-Car Gen1 SRU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+struct rsnd_gen_ops {
+ int (*path_init)(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+ int (*path_exit)(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+};
+
+struct rsnd_gen_reg_map {
+ int index; /* -1 : not supported */
+ u32 offset_id; /* offset of ssi0, ssi1, ssi2... */
+ u32 offset_adr; /* offset of SSICR, SSISR, ... */
+};
+
+struct rsnd_gen {
+ void __iomem *base[RSND_BASE_MAX];
+
+ struct rsnd_gen_reg_map reg_map[RSND_REG_MAX];
+ struct rsnd_gen_ops *ops;
+};
+
+#define rsnd_priv_to_gen(p) ((struct rsnd_gen *)(p)->gen)
+
+/*
+ * Gen2
+ * will be filled in the future
+ */
+
+/*
+ * Gen1
+ */
+static int rsnd_gen1_path_init(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_mod *mod;
+ int ret;
+ int id;
+
+ /*
+ * Gen1 is created by SRU/SSI, and this SRU is base module of
+ * Gen2's SCU/SSIU/SSI. (Gen2 SCU/SSIU came from SRU)
+ *
+ * Easy image is..
+ * Gen1 SRU = Gen2 SCU + SSIU + etc
+ *
+ * Gen2 SCU path is very flexible, but, Gen1 SRU (SCU parts) is
+ * using fixed path.
+ *
+ * Then, SSI id = SCU id here
+ */
+
+ /* get SSI's ID */
+ mod = rsnd_ssi_mod_get_frm_dai(priv,
+ rsnd_dai_id(priv, rdai),
+ rsnd_dai_is_play(rdai, io));
+ id = rsnd_mod_id(mod);
+
+ /* SSI */
+ mod = rsnd_ssi_mod_get(priv, id);
+ ret = rsnd_dai_connect(rdai, mod, io);
+ if (ret < 0)
+ return ret;
+
+ /* SCU */
+ mod = rsnd_scu_mod_get(priv, id);
+ ret = rsnd_dai_connect(rdai, mod, io);
+
+ return ret;
+}
+
+static int rsnd_gen1_path_exit(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_mod *mod, *n;
+ int ret = 0;
+
+ /*
+ * remove all mod from rdai
+ */
+ for_each_rsnd_mod(mod, n, io)
+ ret |= rsnd_dai_disconnect(mod);
+
+ return ret;
+}
+
+static struct rsnd_gen_ops rsnd_gen1_ops = {
+ .path_init = rsnd_gen1_path_init,
+ .path_exit = rsnd_gen1_path_exit,
+};
+
+#define RSND_GEN1_REG_MAP(g, s, i, oi, oa) \
+ do { \
+ (g)->reg_map[RSND_REG_##i].index = RSND_GEN1_##s; \
+ (g)->reg_map[RSND_REG_##i].offset_id = oi; \
+ (g)->reg_map[RSND_REG_##i].offset_adr = oa; \
+ } while (0)
+
+static void rsnd_gen1_reg_map_init(struct rsnd_gen *gen)
+{
+ RSND_GEN1_REG_MAP(gen, SRU, SRC_ROUTE_SEL, 0x0, 0x00);
+ RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL0, 0x0, 0x08);
+ RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL1, 0x0, 0x0c);
+ RSND_GEN1_REG_MAP(gen, SRU, SRC_TMG_SEL2, 0x0, 0x10);
+ RSND_GEN1_REG_MAP(gen, SRU, SRC_CTRL, 0x0, 0xc0);
+ RSND_GEN1_REG_MAP(gen, SRU, SSI_MODE0, 0x0, 0xD0);
+ RSND_GEN1_REG_MAP(gen, SRU, SSI_MODE1, 0x0, 0xD4);
+ RSND_GEN1_REG_MAP(gen, SRU, BUSIF_MODE, 0x4, 0x20);
+ RSND_GEN1_REG_MAP(gen, SRU, BUSIF_ADINR, 0x40, 0x214);
+
+ RSND_GEN1_REG_MAP(gen, ADG, BRRA, 0x0, 0x00);
+ RSND_GEN1_REG_MAP(gen, ADG, BRRB, 0x0, 0x04);
+ RSND_GEN1_REG_MAP(gen, ADG, SSICKR, 0x0, 0x08);
+ RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL0, 0x0, 0x0c);
+ RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL1, 0x0, 0x10);
+ RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL3, 0x0, 0x18);
+ RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL4, 0x0, 0x1c);
+ RSND_GEN1_REG_MAP(gen, ADG, AUDIO_CLK_SEL5, 0x0, 0x20);
+
+ RSND_GEN1_REG_MAP(gen, SSI, SSICR, 0x40, 0x00);
+ RSND_GEN1_REG_MAP(gen, SSI, SSISR, 0x40, 0x04);
+ RSND_GEN1_REG_MAP(gen, SSI, SSITDR, 0x40, 0x08);
+ RSND_GEN1_REG_MAP(gen, SSI, SSIRDR, 0x40, 0x0c);
+ RSND_GEN1_REG_MAP(gen, SSI, SSIWSR, 0x40, 0x20);
+}
+
+static int rsnd_gen1_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+ struct resource *sru_res;
+ struct resource *adg_res;
+ struct resource *ssi_res;
+
+ /*
+ * map address
+ */
+ sru_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_SRU);
+ adg_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_ADG);
+ ssi_res = platform_get_resource(pdev, IORESOURCE_MEM, RSND_GEN1_SSI);
+
+ gen->base[RSND_GEN1_SRU] = devm_ioremap_resource(dev, sru_res);
+ gen->base[RSND_GEN1_ADG] = devm_ioremap_resource(dev, adg_res);
+ gen->base[RSND_GEN1_SSI] = devm_ioremap_resource(dev, ssi_res);
+ if (IS_ERR(gen->base[RSND_GEN1_SRU]) ||
+ IS_ERR(gen->base[RSND_GEN1_ADG]) ||
+ IS_ERR(gen->base[RSND_GEN1_SSI]))
+ return -ENODEV;
+
+ gen->ops = &rsnd_gen1_ops;
+ rsnd_gen1_reg_map_init(gen);
+
+ dev_dbg(dev, "Gen1 device probed\n");
+ dev_dbg(dev, "SRU : %08x => %p\n", sru_res->start,
+ gen->base[RSND_GEN1_SRU]);
+ dev_dbg(dev, "ADG : %08x => %p\n", adg_res->start,
+ gen->base[RSND_GEN1_ADG]);
+ dev_dbg(dev, "SSI : %08x => %p\n", ssi_res->start,
+ gen->base[RSND_GEN1_SSI]);
+
+ return 0;
+
+}
+
+static void rsnd_gen1_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+}
+
+/*
+ * Gen
+ */
+int rsnd_gen_path_init(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ return gen->ops->path_init(priv, rdai, io);
+}
+
+int rsnd_gen_path_exit(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+
+ return gen->ops->path_exit(priv, rdai, io);
+}
+
+void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ enum rsnd_reg reg)
+{
+ struct rsnd_gen *gen = rsnd_priv_to_gen(priv);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int index;
+ u32 offset_id, offset_adr;
+
+ if (reg >= RSND_REG_MAX) {
+ dev_err(dev, "rsnd_reg reg error\n");
+ return NULL;
+ }
+
+ index = gen->reg_map[reg].index;
+ offset_id = gen->reg_map[reg].offset_id;
+ offset_adr = gen->reg_map[reg].offset_adr;
+
+ if (index < 0) {
+ dev_err(dev, "unsupported reg access %d\n", reg);
+ return NULL;
+ }
+
+ if (offset_id && mod)
+ offset_id *= rsnd_mod_id(mod);
+
+ /*
+ * index/offset were set on gen1/gen2
+ */
+
+ return gen->base[index] + offset_id + offset_adr;
+}
+
+int rsnd_gen_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_gen *gen;
+ int i;
+
+ gen = devm_kzalloc(dev, sizeof(*gen), GFP_KERNEL);
+ if (!gen) {
+ dev_err(dev, "GEN allocate failed\n");
+ return -ENOMEM;
+ }
+
+ priv->gen = gen;
+
+ /*
+ * see
+ * rsnd_reg_get()
+ * rsnd_gen_probe()
+ */
+ for (i = 0; i < RSND_REG_MAX; i++)
+ gen->reg_map[i].index = -1;
+
+ /*
+ * init each module
+ */
+ if (rsnd_is_gen1(priv))
+ return rsnd_gen1_probe(pdev, info, priv);
+
+ dev_err(dev, "unknown generation R-Car sound device\n");
+
+ return -ENODEV;
+}
+
+void rsnd_gen_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ if (rsnd_is_gen1(priv))
+ rsnd_gen1_remove(pdev, priv);
+}
diff --git a/sound/soc/sh/rcar/rsnd.h b/sound/soc/sh/rcar/rsnd.h
new file mode 100644
index 00000000000..9cc6986a8cf
--- /dev/null
+++ b/sound/soc/sh/rcar/rsnd.h
@@ -0,0 +1,302 @@
+/*
+ * Renesas R-Car
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef RSND_H
+#define RSND_H
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/sh_dma.h>
+#include <linux/workqueue.h>
+#include <sound/rcar_snd.h>
+#include <sound/soc.h>
+#include <sound/pcm_params.h>
+
+/*
+ * pseudo register
+ *
+ * The register address offsets SRU/SCU/SSIU on Gen1/Gen2 are very different.
+ * This driver uses pseudo register in order to hide it.
+ * see gen1/gen2 for detail
+ */
+enum rsnd_reg {
+ /* SRU/SCU */
+ RSND_REG_SRC_ROUTE_SEL,
+ RSND_REG_SRC_TMG_SEL0,
+ RSND_REG_SRC_TMG_SEL1,
+ RSND_REG_SRC_TMG_SEL2,
+ RSND_REG_SRC_CTRL,
+ RSND_REG_SSI_MODE0,
+ RSND_REG_SSI_MODE1,
+ RSND_REG_BUSIF_MODE,
+ RSND_REG_BUSIF_ADINR,
+
+ /* ADG */
+ RSND_REG_BRRA,
+ RSND_REG_BRRB,
+ RSND_REG_SSICKR,
+ RSND_REG_AUDIO_CLK_SEL0,
+ RSND_REG_AUDIO_CLK_SEL1,
+ RSND_REG_AUDIO_CLK_SEL2,
+ RSND_REG_AUDIO_CLK_SEL3,
+ RSND_REG_AUDIO_CLK_SEL4,
+ RSND_REG_AUDIO_CLK_SEL5,
+
+ /* SSI */
+ RSND_REG_SSICR,
+ RSND_REG_SSISR,
+ RSND_REG_SSITDR,
+ RSND_REG_SSIRDR,
+ RSND_REG_SSIWSR,
+
+ RSND_REG_MAX,
+};
+
+struct rsnd_priv;
+struct rsnd_mod;
+struct rsnd_dai;
+struct rsnd_dai_stream;
+
+/*
+ * R-Car basic functions
+ */
+#define rsnd_mod_read(m, r) \
+ rsnd_read(rsnd_mod_to_priv(m), m, RSND_REG_##r)
+#define rsnd_mod_write(m, r, d) \
+ rsnd_write(rsnd_mod_to_priv(m), m, RSND_REG_##r, d)
+#define rsnd_mod_bset(m, r, s, d) \
+ rsnd_bset(rsnd_mod_to_priv(m), m, RSND_REG_##r, s, d)
+
+#define rsnd_priv_read(p, r) rsnd_read(p, NULL, RSND_REG_##r)
+#define rsnd_priv_write(p, r, d) rsnd_write(p, NULL, RSND_REG_##r, d)
+#define rsnd_priv_bset(p, r, s, d) rsnd_bset(p, NULL, RSND_REG_##r, s, d)
+
+u32 rsnd_read(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg);
+void rsnd_write(struct rsnd_priv *priv, struct rsnd_mod *mod,
+ enum rsnd_reg reg, u32 data);
+void rsnd_bset(struct rsnd_priv *priv, struct rsnd_mod *mod, enum rsnd_reg reg,
+ u32 mask, u32 data);
+
+/*
+ * R-Car DMA
+ */
+struct rsnd_dma {
+ struct rsnd_priv *priv;
+ struct sh_dmae_slave slave;
+ struct work_struct work;
+ struct dma_chan *chan;
+ enum dma_data_direction dir;
+ int (*inquiry)(struct rsnd_dma *dma, dma_addr_t *buf, int *len);
+ int (*complete)(struct rsnd_dma *dma);
+
+ int submit_loop;
+};
+
+void rsnd_dma_start(struct rsnd_dma *dma);
+void rsnd_dma_stop(struct rsnd_dma *dma);
+int rsnd_dma_available(struct rsnd_dma *dma);
+int rsnd_dma_init(struct rsnd_priv *priv, struct rsnd_dma *dma,
+ int is_play, int id,
+ int (*inquiry)(struct rsnd_dma *dma, dma_addr_t *buf, int *len),
+ int (*complete)(struct rsnd_dma *dma));
+void rsnd_dma_quit(struct rsnd_priv *priv,
+ struct rsnd_dma *dma);
+
+
+/*
+ * R-Car sound mod
+ */
+
+struct rsnd_mod_ops {
+ char *name;
+ int (*init)(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+ int (*quit)(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+ int (*start)(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+ int (*stop)(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+};
+
+struct rsnd_mod {
+ int id;
+ struct rsnd_priv *priv;
+ struct rsnd_mod_ops *ops;
+ struct list_head list; /* connect to rsnd_dai playback/capture */
+ struct rsnd_dma dma;
+};
+
+#define rsnd_mod_to_priv(mod) ((mod)->priv)
+#define rsnd_mod_to_dma(mod) (&(mod)->dma)
+#define rsnd_dma_to_mod(_dma) container_of((_dma), struct rsnd_mod, dma)
+#define rsnd_mod_id(mod) ((mod)->id)
+#define for_each_rsnd_mod(pos, n, io) \
+ list_for_each_entry_safe(pos, n, &(io)->head, list)
+#define rsnd_mod_call(mod, func, rdai, io) \
+ (!(mod) ? -ENODEV : \
+ !((mod)->ops->func) ? 0 : \
+ (mod)->ops->func(mod, rdai, io))
+
+void rsnd_mod_init(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ struct rsnd_mod_ops *ops,
+ int id);
+char *rsnd_mod_name(struct rsnd_mod *mod);
+
+/*
+ * R-Car sound DAI
+ */
+#define RSND_DAI_NAME_SIZE 16
+struct rsnd_dai_stream {
+ struct list_head head; /* head of rsnd_mod list */
+ struct snd_pcm_substream *substream;
+ int byte_pos;
+ int period_pos;
+ int byte_per_period;
+ int next_period_byte;
+};
+
+struct rsnd_dai {
+ char name[RSND_DAI_NAME_SIZE];
+ struct rsnd_dai_platform_info *info; /* rcar_snd.h */
+ struct rsnd_dai_stream playback;
+ struct rsnd_dai_stream capture;
+
+ int clk_master:1;
+ int bit_clk_inv:1;
+ int frm_clk_inv:1;
+ int sys_delay:1;
+ int data_alignment:1;
+};
+
+#define rsnd_dai_nr(priv) ((priv)->dai_nr)
+#define for_each_rsnd_dai(rdai, priv, i) \
+ for (i = 0, (rdai) = rsnd_dai_get(priv, i); \
+ i < rsnd_dai_nr(priv); \
+ i++, (rdai) = rsnd_dai_get(priv, i))
+
+struct rsnd_dai *rsnd_dai_get(struct rsnd_priv *priv, int id);
+int rsnd_dai_disconnect(struct rsnd_mod *mod);
+int rsnd_dai_connect(struct rsnd_dai *rdai, struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io);
+int rsnd_dai_is_play(struct rsnd_dai *rdai, struct rsnd_dai_stream *io);
+int rsnd_dai_id(struct rsnd_priv *priv, struct rsnd_dai *rdai);
+#define rsnd_dai_get_platform_info(rdai) ((rdai)->info)
+#define rsnd_io_to_runtime(io) ((io)->substream->runtime)
+
+void rsnd_dai_pointer_update(struct rsnd_dai_stream *io, int cnt);
+int rsnd_dai_pointer_offset(struct rsnd_dai_stream *io, int additional);
+
+/*
+ * R-Car Gen1/Gen2
+ */
+int rsnd_gen_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv);
+void rsnd_gen_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
+int rsnd_gen_path_init(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+int rsnd_gen_path_exit(struct rsnd_priv *priv,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io);
+void __iomem *rsnd_gen_reg_get(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ enum rsnd_reg reg);
+#define rsnd_is_gen1(s) ((s)->info->flags & RSND_GEN1)
+#define rsnd_is_gen2(s) ((s)->info->flags & RSND_GEN2)
+
+/*
+ * R-Car ADG
+ */
+int rsnd_adg_ssi_clk_stop(struct rsnd_mod *mod);
+int rsnd_adg_ssi_clk_try_start(struct rsnd_mod *mod, unsigned int rate);
+int rsnd_adg_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv);
+void rsnd_adg_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
+
+/*
+ * R-Car sound priv
+ */
+struct rsnd_priv {
+
+ struct device *dev;
+ struct rcar_snd_info *info;
+ spinlock_t lock;
+
+ /*
+ * below value will be filled on rsnd_gen_probe()
+ */
+ void *gen;
+
+ /*
+ * below value will be filled on rsnd_scu_probe()
+ */
+ void *scu;
+ int scu_nr;
+
+ /*
+ * below value will be filled on rsnd_adg_probe()
+ */
+ void *adg;
+
+ /*
+ * below value will be filled on rsnd_ssi_probe()
+ */
+ void *ssiu;
+
+ /*
+ * below value will be filled on rsnd_dai_probe()
+ */
+ struct snd_soc_dai_driver *daidrv;
+ struct rsnd_dai *rdai;
+ int dai_nr;
+};
+
+#define rsnd_priv_to_dev(priv) ((priv)->dev)
+#define rsnd_lock(priv, flags) spin_lock_irqsave(&priv->lock, flags)
+#define rsnd_unlock(priv, flags) spin_unlock_irqrestore(&priv->lock, flags)
+
+/*
+ * R-Car SCU
+ */
+int rsnd_scu_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv);
+void rsnd_scu_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id);
+#define rsnd_scu_nr(priv) ((priv)->scu_nr)
+
+/*
+ * R-Car SSI
+ */
+int rsnd_ssi_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv);
+void rsnd_ssi_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv);
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id);
+struct rsnd_mod *rsnd_ssi_mod_get_frm_dai(struct rsnd_priv *priv,
+ int dai_id, int is_play);
+
+#endif
diff --git a/sound/soc/sh/rcar/scu.c b/sound/soc/sh/rcar/scu.c
new file mode 100644
index 00000000000..184d9008cec
--- /dev/null
+++ b/sound/soc/sh/rcar/scu.c
@@ -0,0 +1,236 @@
+/*
+ * Renesas R-Car SCU support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include "rsnd.h"
+
+struct rsnd_scu {
+ struct rsnd_scu_platform_info *info; /* rcar_snd.h */
+ struct rsnd_mod mod;
+};
+
+#define rsnd_scu_mode_flags(p) ((p)->info->flags)
+
+/*
+ * ADINR
+ */
+#define OTBL_24 (0 << 16)
+#define OTBL_22 (2 << 16)
+#define OTBL_20 (4 << 16)
+#define OTBL_18 (6 << 16)
+#define OTBL_16 (8 << 16)
+
+
+#define rsnd_mod_to_scu(_mod) \
+ container_of((_mod), struct rsnd_scu, mod)
+
+#define for_each_rsnd_scu(pos, priv, i) \
+ for ((i) = 0; \
+ ((i) < rsnd_scu_nr(priv)) && \
+ ((pos) = (struct rsnd_scu *)(priv)->scu + i); \
+ i++)
+
+static int rsnd_scu_set_route(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct scu_route_config {
+ u32 mask;
+ int shift;
+ } routes[] = {
+ { 0xF, 0, }, /* 0 */
+ { 0xF, 4, }, /* 1 */
+ { 0xF, 8, }, /* 2 */
+ { 0x7, 12, }, /* 3 */
+ { 0x7, 16, }, /* 4 */
+ { 0x7, 20, }, /* 5 */
+ { 0x7, 24, }, /* 6 */
+ { 0x3, 28, }, /* 7 */
+ { 0x3, 30, }, /* 8 */
+ };
+
+ u32 mask;
+ u32 val;
+ int shift;
+ int id;
+
+ /*
+ * Gen1 only
+ */
+ if (!rsnd_is_gen1(priv))
+ return 0;
+
+ id = rsnd_mod_id(mod);
+ if (id < 0 || id > ARRAY_SIZE(routes))
+ return -EIO;
+
+ /*
+ * SRC_ROUTE_SELECT
+ */
+ val = rsnd_dai_is_play(rdai, io) ? 0x1 : 0x2;
+ val = val << routes[id].shift;
+ mask = routes[id].mask << routes[id].shift;
+
+ rsnd_mod_bset(mod, SRC_ROUTE_SEL, mask, val);
+
+ /*
+ * SRC_TIMING_SELECT
+ */
+ shift = (id % 4) * 8;
+ mask = 0x1F << shift;
+ if (8 == id) /* SRU8 is very special */
+ val = id << shift;
+ else
+ val = (id + 1) << shift;
+
+ switch (id / 4) {
+ case 0:
+ rsnd_mod_bset(mod, SRC_TMG_SEL0, mask, val);
+ break;
+ case 1:
+ rsnd_mod_bset(mod, SRC_TMG_SEL1, mask, val);
+ break;
+ case 2:
+ rsnd_mod_bset(mod, SRC_TMG_SEL2, mask, val);
+ break;
+ }
+
+ return 0;
+}
+
+static int rsnd_scu_set_mode(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ int id = rsnd_mod_id(mod);
+ u32 val;
+
+ if (rsnd_is_gen1(priv)) {
+ val = (1 << id);
+ rsnd_mod_bset(mod, SRC_CTRL, val, val);
+ }
+
+ return 0;
+}
+
+static int rsnd_scu_set_hpbif(struct rsnd_priv *priv,
+ struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ u32 adinr = runtime->channels;
+
+ switch (runtime->sample_bits) {
+ case 16:
+ adinr |= OTBL_16;
+ break;
+ case 32:
+ adinr |= OTBL_24;
+ break;
+ default:
+ return -EIO;
+ }
+
+ rsnd_mod_write(mod, BUSIF_MODE, 1);
+ rsnd_mod_write(mod, BUSIF_ADINR, adinr);
+
+ return 0;
+}
+
+static int rsnd_scu_start(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_scu *scu = rsnd_mod_to_scu(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 flags = rsnd_scu_mode_flags(scu);
+ int ret;
+
+ /*
+ * SCU will be used if it has RSND_SCU_USB_HPBIF flags
+ */
+ if (!(flags & RSND_SCU_USB_HPBIF)) {
+ /* it use PIO transter */
+ dev_dbg(dev, "%s%d is not used\n",
+ rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return 0;
+ }
+
+ /* it use DMA transter */
+ ret = rsnd_scu_set_route(priv, mod, rdai, io);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_scu_set_mode(priv, mod, rdai, io);
+ if (ret < 0)
+ return ret;
+
+ ret = rsnd_scu_set_hpbif(priv, mod, rdai, io);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s%d start\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return 0;
+}
+
+static struct rsnd_mod_ops rsnd_scu_ops = {
+ .name = "scu",
+ .start = rsnd_scu_start,
+};
+
+struct rsnd_mod *rsnd_scu_mod_get(struct rsnd_priv *priv, int id)
+{
+ BUG_ON(id < 0 || id >= rsnd_scu_nr(priv));
+
+ return &((struct rsnd_scu *)(priv->scu) + id)->mod;
+}
+
+int rsnd_scu_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_scu *scu;
+ int i, nr;
+
+ /*
+ * init SCU
+ */
+ nr = info->scu_info_nr;
+ scu = devm_kzalloc(dev, sizeof(*scu) * nr, GFP_KERNEL);
+ if (!scu) {
+ dev_err(dev, "SCU allocate failed\n");
+ return -ENOMEM;
+ }
+
+ priv->scu_nr = nr;
+ priv->scu = scu;
+
+ for_each_rsnd_scu(scu, priv, i) {
+ rsnd_mod_init(priv, &scu->mod,
+ &rsnd_scu_ops, i);
+ scu->info = &info->scu_info[i];
+
+ dev_dbg(dev, "SCU%d probed\n", i);
+ }
+ dev_dbg(dev, "scu probed\n");
+
+ return 0;
+}
+
+void rsnd_scu_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+}
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
new file mode 100644
index 00000000000..fae26d3f79d
--- /dev/null
+++ b/sound/soc/sh/rcar/ssi.c
@@ -0,0 +1,728 @@
+/*
+ * Renesas R-Car SSIU/SSI support
+ *
+ * Copyright (C) 2013 Renesas Solutions Corp.
+ * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+ *
+ * Based on fsi.c
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/delay.h>
+#include "rsnd.h"
+#define RSND_SSI_NAME_SIZE 16
+
+/*
+ * SSICR
+ */
+#define FORCE (1 << 31) /* Fixed */
+#define DMEN (1 << 28) /* DMA Enable */
+#define UIEN (1 << 27) /* Underflow Interrupt Enable */
+#define OIEN (1 << 26) /* Overflow Interrupt Enable */
+#define IIEN (1 << 25) /* Idle Mode Interrupt Enable */
+#define DIEN (1 << 24) /* Data Interrupt Enable */
+
+#define DWL_8 (0 << 19) /* Data Word Length */
+#define DWL_16 (1 << 19) /* Data Word Length */
+#define DWL_18 (2 << 19) /* Data Word Length */
+#define DWL_20 (3 << 19) /* Data Word Length */
+#define DWL_22 (4 << 19) /* Data Word Length */
+#define DWL_24 (5 << 19) /* Data Word Length */
+#define DWL_32 (6 << 19) /* Data Word Length */
+
+#define SWL_32 (3 << 16) /* R/W System Word Length */
+#define SCKD (1 << 15) /* Serial Bit Clock Direction */
+#define SWSD (1 << 14) /* Serial WS Direction */
+#define SCKP (1 << 13) /* Serial Bit Clock Polarity */
+#define SWSP (1 << 12) /* Serial WS Polarity */
+#define SDTA (1 << 10) /* Serial Data Alignment */
+#define DEL (1 << 8) /* Serial Data Delay */
+#define CKDV(v) (v << 4) /* Serial Clock Division Ratio */
+#define TRMD (1 << 1) /* Transmit/Receive Mode Select */
+#define EN (1 << 0) /* SSI Module Enable */
+
+/*
+ * SSISR
+ */
+#define UIRQ (1 << 27) /* Underflow Error Interrupt Status */
+#define OIRQ (1 << 26) /* Overflow Error Interrupt Status */
+#define IIRQ (1 << 25) /* Idle Mode Interrupt Status */
+#define DIRQ (1 << 24) /* Data Interrupt Status Flag */
+
+/*
+ * SSIWSR
+ */
+#define CONT (1 << 8) /* WS Continue Function */
+
+struct rsnd_ssi {
+ struct clk *clk;
+ struct rsnd_ssi_platform_info *info; /* rcar_snd.h */
+ struct rsnd_ssi *parent;
+ struct rsnd_mod mod;
+
+ struct rsnd_dai *rdai;
+ struct rsnd_dai_stream *io;
+ u32 cr_own;
+ u32 cr_clk;
+ u32 cr_etc;
+ int err;
+ int dma_offset;
+ unsigned int usrcnt;
+ unsigned int rate;
+};
+
+struct rsnd_ssiu {
+ u32 ssi_mode0;
+ u32 ssi_mode1;
+
+ int ssi_nr;
+ struct rsnd_ssi *ssi;
+};
+
+#define for_each_rsnd_ssi(pos, priv, i) \
+ for (i = 0; \
+ (i < rsnd_ssi_nr(priv)) && \
+ ((pos) = ((struct rsnd_ssiu *)((priv)->ssiu))->ssi + i); \
+ i++)
+
+#define rsnd_ssi_nr(priv) (((struct rsnd_ssiu *)((priv)->ssiu))->ssi_nr)
+#define rsnd_mod_to_ssi(_mod) container_of((_mod), struct rsnd_ssi, mod)
+#define rsnd_dma_to_ssi(dma) rsnd_mod_to_ssi(rsnd_dma_to_mod(dma))
+#define rsnd_ssi_pio_available(ssi) ((ssi)->info->pio_irq > 0)
+#define rsnd_ssi_dma_available(ssi) \
+ rsnd_dma_available(rsnd_mod_to_dma(&(ssi)->mod))
+#define rsnd_ssi_clk_from_parent(ssi) ((ssi)->parent)
+#define rsnd_rdai_is_clk_master(rdai) ((rdai)->clk_master)
+#define rsnd_ssi_mode_flags(p) ((p)->info->flags)
+#define rsnd_ssi_dai_id(ssi) ((ssi)->info->dai_id)
+#define rsnd_ssi_to_ssiu(ssi)\
+ (((struct rsnd_ssiu *)((ssi) - rsnd_mod_id(&(ssi)->mod))) - 1)
+
+static void rsnd_ssi_mode_init(struct rsnd_priv *priv,
+ struct rsnd_ssiu *ssiu)
+{
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_ssi *ssi;
+ u32 flags;
+ u32 val;
+ int i;
+
+ /*
+ * SSI_MODE0
+ */
+ ssiu->ssi_mode0 = 0;
+ for_each_rsnd_ssi(ssi, priv, i) {
+ flags = rsnd_ssi_mode_flags(ssi);
+
+ /* see also BUSIF_MODE */
+ if (!(flags & RSND_SSI_DEPENDENT)) {
+ ssiu->ssi_mode0 |= (1 << i);
+ dev_dbg(dev, "SSI%d uses INDEPENDENT mode\n", i);
+ } else {
+ dev_dbg(dev, "SSI%d uses DEPENDENT mode\n", i);
+ }
+ }
+
+ /*
+ * SSI_MODE1
+ */
+#define ssi_parent_set(p, sync, adg, ext) \
+ do { \
+ ssi->parent = ssiu->ssi + p; \
+ if (flags & RSND_SSI_CLK_FROM_ADG) \
+ val = adg; \
+ else \
+ val = ext; \
+ if (flags & RSND_SSI_SYNC) \
+ val |= sync; \
+ } while (0)
+
+ ssiu->ssi_mode1 = 0;
+ for_each_rsnd_ssi(ssi, priv, i) {
+ flags = rsnd_ssi_mode_flags(ssi);
+
+ if (!(flags & RSND_SSI_CLK_PIN_SHARE))
+ continue;
+
+ val = 0;
+ switch (i) {
+ case 1:
+ ssi_parent_set(0, (1 << 4), (0x2 << 0), (0x1 << 0));
+ break;
+ case 2:
+ ssi_parent_set(0, (1 << 4), (0x2 << 2), (0x1 << 2));
+ break;
+ case 4:
+ ssi_parent_set(3, (1 << 20), (0x2 << 16), (0x1 << 16));
+ break;
+ case 8:
+ ssi_parent_set(7, 0, 0, 0);
+ break;
+ }
+
+ ssiu->ssi_mode1 |= val;
+ }
+}
+
+static void rsnd_ssi_mode_set(struct rsnd_ssi *ssi)
+{
+ struct rsnd_ssiu *ssiu = rsnd_ssi_to_ssiu(ssi);
+
+ rsnd_mod_write(&ssi->mod, SSI_MODE0, ssiu->ssi_mode0);
+ rsnd_mod_write(&ssi->mod, SSI_MODE1, ssiu->ssi_mode1);
+}
+
+static void rsnd_ssi_status_check(struct rsnd_mod *mod,
+ u32 bit)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 status;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ status = rsnd_mod_read(mod, SSISR);
+ if (status & bit)
+ return;
+
+ udelay(50);
+ }
+
+ dev_warn(dev, "status check failed\n");
+}
+
+static int rsnd_ssi_master_clk_start(struct rsnd_ssi *ssi,
+ unsigned int rate)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ int i, j, ret;
+ int adg_clk_div_table[] = {
+ 1, 6, /* see adg.c */
+ };
+ int ssi_clk_mul_table[] = {
+ 1, 2, 4, 8, 16, 6, 12,
+ };
+ unsigned int main_rate;
+
+ /*
+ * Find best clock, and try to start ADG
+ */
+ for (i = 0; i < ARRAY_SIZE(adg_clk_div_table); i++) {
+ for (j = 0; j < ARRAY_SIZE(ssi_clk_mul_table); j++) {
+
+ /*
+ * this driver is assuming that
+ * system word is 64fs (= 2 x 32bit)
+ * see rsnd_ssi_start()
+ */
+ main_rate = rate / adg_clk_div_table[i]
+ * 32 * 2 * ssi_clk_mul_table[j];
+
+ ret = rsnd_adg_ssi_clk_try_start(&ssi->mod, main_rate);
+ if (0 == ret) {
+ ssi->rate = rate;
+ ssi->cr_clk = FORCE | SWL_32 |
+ SCKD | SWSD | CKDV(j);
+
+ dev_dbg(dev, "ssi%d outputs %u Hz\n",
+ rsnd_mod_id(&ssi->mod), rate);
+
+ return 0;
+ }
+ }
+ }
+
+ dev_err(dev, "unsupported clock rate\n");
+ return -EIO;
+}
+
+static void rsnd_ssi_master_clk_stop(struct rsnd_ssi *ssi)
+{
+ ssi->rate = 0;
+ ssi->cr_clk = 0;
+ rsnd_adg_ssi_clk_stop(&ssi->mod);
+}
+
+static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 cr;
+
+ if (0 == ssi->usrcnt) {
+ clk_enable(ssi->clk);
+
+ if (rsnd_rdai_is_clk_master(rdai)) {
+ struct snd_pcm_runtime *runtime;
+
+ runtime = rsnd_io_to_runtime(io);
+
+ if (rsnd_ssi_clk_from_parent(ssi))
+ rsnd_ssi_hw_start(ssi->parent, rdai, io);
+ else
+ rsnd_ssi_master_clk_start(ssi, runtime->rate);
+ }
+ }
+
+ cr = ssi->cr_own |
+ ssi->cr_clk |
+ ssi->cr_etc |
+ EN;
+
+ rsnd_mod_write(&ssi->mod, SSICR, cr);
+
+ ssi->usrcnt++;
+
+ dev_dbg(dev, "ssi%d hw started\n", rsnd_mod_id(&ssi->mod));
+}
+
+static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
+ struct rsnd_dai *rdai)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(&ssi->mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ u32 cr;
+
+ if (0 == ssi->usrcnt) /* stop might be called without start */
+ return;
+
+ ssi->usrcnt--;
+
+ if (0 == ssi->usrcnt) {
+ /*
+ * disable all IRQ,
+ * and, wait all data was sent
+ */
+ cr = ssi->cr_own |
+ ssi->cr_clk;
+
+ rsnd_mod_write(&ssi->mod, SSICR, cr | EN);
+ rsnd_ssi_status_check(&ssi->mod, DIRQ);
+
+ /*
+ * disable SSI,
+ * and, wait idle state
+ */
+ rsnd_mod_write(&ssi->mod, SSICR, cr); /* disabled all */
+ rsnd_ssi_status_check(&ssi->mod, IIRQ);
+
+ if (rsnd_rdai_is_clk_master(rdai)) {
+ if (rsnd_ssi_clk_from_parent(ssi))
+ rsnd_ssi_hw_stop(ssi->parent, rdai);
+ else
+ rsnd_ssi_master_clk_stop(ssi);
+ }
+
+ clk_disable(ssi->clk);
+ }
+
+ dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
+}
+
+/*
+ * SSI mod common functions
+ */
+static int rsnd_ssi_init(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ u32 cr;
+
+ cr = FORCE;
+
+ /*
+ * always use 32bit system word for easy clock calculation.
+ * see also rsnd_ssi_master_clk_enable()
+ */
+ cr |= SWL_32;
+
+ /*
+ * init clock settings for SSICR
+ */
+ switch (runtime->sample_bits) {
+ case 16:
+ cr |= DWL_16;
+ break;
+ case 32:
+ cr |= DWL_24;
+ break;
+ default:
+ return -EIO;
+ }
+
+ if (rdai->bit_clk_inv)
+ cr |= SCKP;
+ if (rdai->frm_clk_inv)
+ cr |= SWSP;
+ if (rdai->data_alignment)
+ cr |= SDTA;
+ if (rdai->sys_delay)
+ cr |= DEL;
+ if (rsnd_dai_is_play(rdai, io))
+ cr |= TRMD;
+
+ /*
+ * set ssi parameter
+ */
+ ssi->rdai = rdai;
+ ssi->io = io;
+ ssi->cr_own = cr;
+ ssi->err = -1; /* ignore 1st error */
+
+ rsnd_ssi_mode_set(ssi);
+
+ dev_dbg(dev, "%s.%d init\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return 0;
+}
+
+static int rsnd_ssi_quit(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dev_dbg(dev, "%s.%d quit\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ if (ssi->err > 0)
+ dev_warn(dev, "ssi under/over flow err = %d\n", ssi->err);
+
+ ssi->rdai = NULL;
+ ssi->io = NULL;
+ ssi->cr_own = 0;
+ ssi->err = 0;
+
+ return 0;
+}
+
+static void rsnd_ssi_record_error(struct rsnd_ssi *ssi, u32 status)
+{
+ /* under/over flow error */
+ if (status & (UIRQ | OIRQ)) {
+ ssi->err++;
+
+ /* clear error status */
+ rsnd_mod_write(&ssi->mod, SSISR, 0);
+ }
+}
+
+/*
+ * SSI PIO
+ */
+static irqreturn_t rsnd_ssi_pio_interrupt(int irq, void *data)
+{
+ struct rsnd_ssi *ssi = data;
+ struct rsnd_dai_stream *io = ssi->io;
+ u32 status = rsnd_mod_read(&ssi->mod, SSISR);
+ irqreturn_t ret = IRQ_NONE;
+
+ if (io && (status & DIRQ)) {
+ struct rsnd_dai *rdai = ssi->rdai;
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+ u32 *buf = (u32 *)(runtime->dma_area +
+ rsnd_dai_pointer_offset(io, 0));
+
+ rsnd_ssi_record_error(ssi, status);
+
+ /*
+ * 8/16/32 data can be assesse to TDR/RDR register
+ * directly as 32bit data
+ * see rsnd_ssi_init()
+ */
+ if (rsnd_dai_is_play(rdai, io))
+ rsnd_mod_write(&ssi->mod, SSITDR, *buf);
+ else
+ *buf = rsnd_mod_read(&ssi->mod, SSIRDR);
+
+ rsnd_dai_pointer_update(io, sizeof(*buf));
+
+ ret = IRQ_HANDLED;
+ }
+
+ return ret;
+}
+
+static int rsnd_ssi_pio_start(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ /* enable PIO IRQ */
+ ssi->cr_etc = UIEN | OIEN | DIEN;
+
+ rsnd_ssi_hw_start(ssi, rdai, io);
+
+ dev_dbg(dev, "%s.%d start\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ return 0;
+}
+
+static int rsnd_ssi_pio_stop(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+
+ dev_dbg(dev, "%s.%d stop\n", rsnd_mod_name(mod), rsnd_mod_id(mod));
+
+ ssi->cr_etc = 0;
+
+ rsnd_ssi_hw_stop(ssi, rdai);
+
+ return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_pio_ops = {
+ .name = "ssi (pio)",
+ .init = rsnd_ssi_init,
+ .quit = rsnd_ssi_quit,
+ .start = rsnd_ssi_pio_start,
+ .stop = rsnd_ssi_pio_stop,
+};
+
+static int rsnd_ssi_dma_inquiry(struct rsnd_dma *dma, dma_addr_t *buf, int *len)
+{
+ struct rsnd_ssi *ssi = rsnd_dma_to_ssi(dma);
+ struct rsnd_dai_stream *io = ssi->io;
+ struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
+
+ *len = io->byte_per_period;
+ *buf = runtime->dma_addr +
+ rsnd_dai_pointer_offset(io, ssi->dma_offset + *len);
+ ssi->dma_offset = *len; /* it cares A/B plane */
+
+ return 0;
+}
+
+static int rsnd_ssi_dma_complete(struct rsnd_dma *dma)
+{
+ struct rsnd_ssi *ssi = rsnd_dma_to_ssi(dma);
+ struct rsnd_dai_stream *io = ssi->io;
+ u32 status = rsnd_mod_read(&ssi->mod, SSISR);
+
+ rsnd_ssi_record_error(ssi, status);
+
+ rsnd_dai_pointer_update(ssi->io, io->byte_per_period);
+
+ return 0;
+}
+
+static int rsnd_ssi_dma_start(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
+
+ /* enable DMA transfer */
+ ssi->cr_etc = DMEN;
+ ssi->dma_offset = 0;
+
+ rsnd_dma_start(dma);
+
+ rsnd_ssi_hw_start(ssi, ssi->rdai, io);
+
+ /* enable WS continue */
+ if (rsnd_rdai_is_clk_master(rdai))
+ rsnd_mod_write(&ssi->mod, SSIWSR, CONT);
+
+ return 0;
+}
+
+static int rsnd_ssi_dma_stop(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_ssi *ssi = rsnd_mod_to_ssi(mod);
+ struct rsnd_dma *dma = rsnd_mod_to_dma(&ssi->mod);
+
+ ssi->cr_etc = 0;
+
+ rsnd_ssi_hw_stop(ssi, rdai);
+
+ rsnd_dma_stop(dma);
+
+ return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_dma_ops = {
+ .name = "ssi (dma)",
+ .init = rsnd_ssi_init,
+ .quit = rsnd_ssi_quit,
+ .start = rsnd_ssi_dma_start,
+ .stop = rsnd_ssi_dma_stop,
+};
+
+/*
+ * Non SSI
+ */
+static int rsnd_ssi_non(struct rsnd_mod *mod,
+ struct rsnd_dai *rdai,
+ struct rsnd_dai_stream *io)
+{
+ struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
+ struct device *dev = rsnd_priv_to_dev(priv);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return 0;
+}
+
+static struct rsnd_mod_ops rsnd_ssi_non_ops = {
+ .name = "ssi (non)",
+ .init = rsnd_ssi_non,
+ .quit = rsnd_ssi_non,
+ .start = rsnd_ssi_non,
+ .stop = rsnd_ssi_non,
+};
+
+/*
+ * ssi mod function
+ */
+struct rsnd_mod *rsnd_ssi_mod_get_frm_dai(struct rsnd_priv *priv,
+ int dai_id, int is_play)
+{
+ struct rsnd_ssi *ssi;
+ int i, has_play;
+
+ is_play = !!is_play;
+
+ for_each_rsnd_ssi(ssi, priv, i) {
+ if (rsnd_ssi_dai_id(ssi) != dai_id)
+ continue;
+
+ has_play = !!(rsnd_ssi_mode_flags(ssi) & RSND_SSI_PLAY);
+
+ if (is_play == has_play)
+ return &ssi->mod;
+ }
+
+ return NULL;
+}
+
+struct rsnd_mod *rsnd_ssi_mod_get(struct rsnd_priv *priv, int id)
+{
+ BUG_ON(id < 0 || id >= rsnd_ssi_nr(priv));
+
+ return &(((struct rsnd_ssiu *)(priv->ssiu))->ssi + id)->mod;
+}
+
+int rsnd_ssi_probe(struct platform_device *pdev,
+ struct rcar_snd_info *info,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_ssi_platform_info *pinfo;
+ struct device *dev = rsnd_priv_to_dev(priv);
+ struct rsnd_mod_ops *ops;
+ struct clk *clk;
+ struct rsnd_ssiu *ssiu;
+ struct rsnd_ssi *ssi;
+ char name[RSND_SSI_NAME_SIZE];
+ int i, nr, ret;
+
+ /*
+ * init SSI
+ */
+ nr = info->ssi_info_nr;
+ ssiu = devm_kzalloc(dev, sizeof(*ssiu) + (sizeof(*ssi) * nr),
+ GFP_KERNEL);
+ if (!ssiu) {
+ dev_err(dev, "SSI allocate failed\n");
+ return -ENOMEM;
+ }
+
+ priv->ssiu = ssiu;
+ ssiu->ssi = (struct rsnd_ssi *)(ssiu + 1);
+ ssiu->ssi_nr = nr;
+
+ for_each_rsnd_ssi(ssi, priv, i) {
+ pinfo = &info->ssi_info[i];
+
+ snprintf(name, RSND_SSI_NAME_SIZE, "ssi.%d", i);
+
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ ssi->info = pinfo;
+ ssi->clk = clk;
+
+ ops = &rsnd_ssi_non_ops;
+
+ /*
+ * SSI DMA case
+ */
+ if (pinfo->dma_id > 0) {
+ ret = rsnd_dma_init(
+ priv, rsnd_mod_to_dma(&ssi->mod),
+ (rsnd_ssi_mode_flags(ssi) & RSND_SSI_PLAY),
+ pinfo->dma_id,
+ rsnd_ssi_dma_inquiry,
+ rsnd_ssi_dma_complete);
+ if (ret < 0)
+ dev_info(dev, "SSI DMA failed. try PIO transter\n");
+ else
+ ops = &rsnd_ssi_dma_ops;
+
+ dev_dbg(dev, "SSI%d use DMA transfer\n", i);
+ }
+
+ /*
+ * SSI PIO case
+ */
+ if (!rsnd_ssi_dma_available(ssi) &&
+ rsnd_ssi_pio_available(ssi)) {
+ ret = devm_request_irq(dev, pinfo->pio_irq,
+ &rsnd_ssi_pio_interrupt,
+ IRQF_SHARED,
+ dev_name(dev), ssi);
+ if (ret) {
+ dev_err(dev, "SSI request interrupt failed\n");
+ return ret;
+ }
+
+ ops = &rsnd_ssi_pio_ops;
+
+ dev_dbg(dev, "SSI%d use PIO transfer\n", i);
+ }
+
+ rsnd_mod_init(priv, &ssi->mod, ops, i);
+ }
+
+ rsnd_ssi_mode_init(priv, ssiu);
+
+ dev_dbg(dev, "ssi probed\n");
+
+ return 0;
+}
+
+void rsnd_ssi_remove(struct platform_device *pdev,
+ struct rsnd_priv *priv)
+{
+ struct rsnd_ssi *ssi;
+ int i;
+
+ for_each_rsnd_ssi(ssi, priv, i) {
+ clk_put(ssi->clk);
+ if (rsnd_ssi_dma_available(ssi))
+ rsnd_dma_quit(priv, rsnd_mod_to_dma(&ssi->mod));
+ }
+
+}
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 06a8000aa07..53c9ecdd119 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -149,8 +149,9 @@ static int soc_compr_free(struct snd_compr_stream *cstream)
SND_SOC_DAPM_STREAM_STOP);
} else {
rtd->pop_wait = 1;
- schedule_delayed_work(&rtd->delayed_work,
- msecs_to_jiffies(rtd->pmdown_time));
+ queue_delayed_work(system_power_efficient_wq,
+ &rtd->delayed_work,
+ msecs_to_jiffies(rtd->pmdown_time));
}
} else {
/* capture streams can be powered down now */
@@ -334,7 +335,7 @@ static int soc_compr_copy(struct snd_compr_stream *cstream,
return ret;
}
-static int sst_compr_set_metadata(struct snd_compr_stream *cstream,
+static int soc_compr_set_metadata(struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -347,7 +348,7 @@ static int sst_compr_set_metadata(struct snd_compr_stream *cstream,
return ret;
}
-static int sst_compr_get_metadata(struct snd_compr_stream *cstream,
+static int soc_compr_get_metadata(struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata)
{
struct snd_soc_pcm_runtime *rtd = cstream->private_data;
@@ -364,8 +365,8 @@ static struct snd_compr_ops soc_compr_ops = {
.open = soc_compr_open,
.free = soc_compr_free,
.set_params = soc_compr_set_params,
- .set_metadata = sst_compr_set_metadata,
- .get_metadata = sst_compr_get_metadata,
+ .set_metadata = soc_compr_set_metadata,
+ .get_metadata = soc_compr_get_metadata,
.get_params = soc_compr_get_params,
.trigger = soc_compr_trigger,
.pointer = soc_compr_pointer,
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 0ec070cf723..4d0561312f3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -30,9 +30,12 @@
#include <linux/bitops.h>
#include <linux/debugfs.h>
#include <linux/platform_device.h>
+#include <linux/pinctrl/consumer.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <sound/ac97_codec.h>
#include <sound/core.h>
#include <sound/jack.h>
@@ -47,8 +50,6 @@
#define NAME_SIZE 32
-static DECLARE_WAIT_QUEUE_HEAD(soc_pm_waitq);
-
#ifdef CONFIG_DEBUG_FS
struct dentry *snd_soc_debugfs_root;
EXPORT_SYMBOL_GPL(snd_soc_debugfs_root);
@@ -69,6 +70,16 @@ static int pmdown_time = 5000;
module_param(pmdown_time, int, 0);
MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
+struct snd_ac97_reset_cfg {
+ struct pinctrl *pctl;
+ struct pinctrl_state *pstate_reset;
+ struct pinctrl_state *pstate_warm_reset;
+ struct pinctrl_state *pstate_run;
+ int gpio_sdata;
+ int gpio_sync;
+ int gpio_reset;
+};
+
/* returns the minimum number of bytes needed to represent
* a particular given value */
static int min_bytes_needed(unsigned long val)
@@ -192,7 +203,7 @@ static ssize_t pmdown_time_set(struct device *dev,
struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
int ret;
- ret = strict_strtol(buf, 10, &rtd->pmdown_time);
+ ret = kstrtol(buf, 10, &rtd->pmdown_time);
if (ret)
return ret;
@@ -237,6 +248,7 @@ static ssize_t codec_reg_write_file(struct file *file,
char *start = buf;
unsigned long reg, value;
struct snd_soc_codec *codec = file->private_data;
+ int ret;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
@@ -248,8 +260,9 @@ static ssize_t codec_reg_write_file(struct file *file,
reg = simple_strtoul(start, &start, 16);
while (*start == ' ')
start++;
- if (strict_strtoul(start, 16, &value))
- return -EINVAL;
+ ret = kstrtoul(start, 16, &value);
+ if (ret)
+ return ret;
/* Userspace has been fiddling around behind the kernel's back */
add_taint(TAINT_USER, LOCKDEP_NOW_UNRELIABLE);
@@ -530,6 +543,15 @@ static int soc_ac97_dev_register(struct snd_soc_codec *codec)
}
#endif
+static void codec2codec_close_delayed_work(struct work_struct *work)
+{
+ /* Currently nothing to do for c2c links
+ * Since c2c links are internal nodes in the DAPM graph and
+ * don't interface with the outside world or application layer
+ * we don't have to do any special handling on close.
+ */
+}
+
#ifdef CONFIG_PM_SLEEP
/* powers down audio subsystem for suspend */
int snd_soc_suspend(struct device *dev)
@@ -1223,9 +1245,6 @@ static int soc_post_component_init(struct snd_soc_card *card,
}
rtd->card = card;
- /* Make sure all DAPM widgets are instantiated */
- snd_soc_dapm_new_widgets(&codec->dapm);
-
/* machine controls, routes and widgets are not prefixed */
temp = codec->name_prefix;
codec->name_prefix = NULL;
@@ -1428,6 +1447,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
return ret;
}
} else {
+ INIT_DELAYED_WORK(&rtd->delayed_work,
+ codec2codec_close_delayed_work);
+
/* link the DAI widgets */
play_w = codec_dai->playback_widget;
capture_w = cpu_dai->capture_widget;
@@ -1718,8 +1740,6 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
snd_soc_dapm_add_routes(&card->dapm, card->dapm_routes,
card->num_dapm_routes);
- snd_soc_dapm_new_widgets(&card->dapm);
-
for (i = 0; i < card->num_links; i++) {
dai_link = &card->dai_link[i];
dai_fmt = dai_link->dai_fmt;
@@ -1798,12 +1818,12 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
}
}
- snd_soc_dapm_new_widgets(&card->dapm);
-
if (card->fully_routed)
list_for_each_entry(codec, &card->codec_dev_list, card_list)
snd_soc_dapm_auto_nc_codec_pins(codec);
+ snd_soc_dapm_new_widgets(card);
+
ret = snd_card_register(card->snd_card);
if (ret < 0) {
dev_err(card->dev, "ASoC: failed to register soundcard %d\n",
@@ -2080,6 +2100,117 @@ int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
}
EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
+static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
+
+static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
+{
+ struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
+
+ udelay(10);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+ msleep(2);
+}
+
+static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
+{
+ struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
+
+ udelay(10);
+
+ gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
+
+ pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
+ msleep(2);
+}
+
+static int snd_soc_ac97_parse_pinctl(struct device *dev,
+ struct snd_ac97_reset_cfg *cfg)
+{
+ struct pinctrl *p;
+ struct pinctrl_state *state;
+ int gpio;
+ int ret;
+
+ p = devm_pinctrl_get(dev);
+ if (IS_ERR(p)) {
+ dev_err(dev, "Failed to get pinctrl\n");
+ return PTR_RET(p);
+ }
+ cfg->pctl = p;
+
+ state = pinctrl_lookup_state(p, "ac97-reset");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-reset\n");
+ return PTR_RET(state);
+ }
+ cfg->pstate_reset = state;
+
+ state = pinctrl_lookup_state(p, "ac97-warm-reset");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
+ return PTR_RET(state);
+ }
+ cfg->pstate_warm_reset = state;
+
+ state = pinctrl_lookup_state(p, "ac97-running");
+ if (IS_ERR(state)) {
+ dev_err(dev, "Can't find pinctrl state ac97-running\n");
+ return PTR_RET(state);
+ }
+ cfg->pstate_run = state;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-sync gpio\n");
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link sync");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-sync gpio\n");
+ return ret;
+ }
+ cfg->gpio_sync = gpio;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-sdata gpio\n");
+ return ret;
+ }
+ cfg->gpio_sdata = gpio;
+
+ gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
+ if (gpio < 0) {
+ dev_err(dev, "Can't find ac97-reset gpio\n");
+ return gpio;
+ }
+ ret = devm_gpio_request(dev, gpio, "AC97 link reset");
+ if (ret) {
+ dev_err(dev, "Failed requesting ac97-reset gpio\n");
+ return ret;
+ }
+ cfg->gpio_reset = gpio;
+
+ return 0;
+}
+
struct snd_ac97_bus_ops *soc_ac97_ops;
EXPORT_SYMBOL_GPL(soc_ac97_ops);
@@ -2098,6 +2229,35 @@ int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
/**
+ * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
+ *
+ * This function sets the reset and warm_reset properties of ops and parses
+ * the device node of pdev to get pinctrl states and gpio numbers to use.
+ */
+int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
+ struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct snd_ac97_reset_cfg cfg;
+ int ret;
+
+ ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
+ if (ret)
+ return ret;
+
+ ret = snd_soc_set_ac97_ops(ops);
+ if (ret)
+ return ret;
+
+ ops->warm_reset = snd_soc_ac97_warm_reset;
+ ops->reset = snd_soc_ac97_reset;
+
+ snd_ac97_rst_cfg = cfg;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
+
+/**
* snd_soc_free_ac97_codec - free AC97 codec device
* @codec: audio codec
*
@@ -2299,6 +2459,22 @@ static int snd_soc_add_controls(struct snd_card *card, struct device *dev,
return 0;
}
+struct snd_kcontrol *snd_soc_card_get_kcontrol(struct snd_soc_card *soc_card,
+ const char *name)
+{
+ struct snd_card *card = soc_card->snd_card;
+ struct snd_kcontrol *kctl;
+
+ if (unlikely(!name))
+ return NULL;
+
+ list_for_each_entry(kctl, &card->controls, list)
+ if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name)))
+ return kctl;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(snd_soc_card_get_kcontrol);
+
/**
* snd_soc_add_codec_controls - add an array of controls to a codec.
* Convenience function to add a list of controls. Many codecs were
@@ -2541,59 +2717,6 @@ int snd_soc_put_value_enum_double(struct snd_kcontrol *kcontrol,
EXPORT_SYMBOL_GPL(snd_soc_put_value_enum_double);
/**
- * snd_soc_info_enum_ext - external enumerated single mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about an external enumerated
- * single mixer.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_enum_ext(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->count = 1;
- uinfo->value.enumerated.items = e->max;
-
- if (uinfo->value.enumerated.item > e->max - 1)
- uinfo->value.enumerated.item = e->max - 1;
- strcpy(uinfo->value.enumerated.name,
- e->texts[uinfo->value.enumerated.item]);
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_enum_ext);
-
-/**
- * snd_soc_info_volsw_ext - external single mixer info callback
- * @kcontrol: mixer control
- * @uinfo: control element information
- *
- * Callback to provide information about a single external mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_info_volsw_ext(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- int max = kcontrol->private_value;
-
- if (max == 1 && !strstr(kcontrol->id.name, " Volume"))
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- else
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
-
- uinfo->count = 1;
- uinfo->value.integer.min = 0;
- uinfo->value.integer.max = max;
- return 0;
-}
-EXPORT_SYMBOL_GPL(snd_soc_info_volsw_ext);
-
-/**
* snd_soc_info_volsw - single mixer info callback
* @kcontrol: mixer control
* @uinfo: control element information
@@ -3908,10 +4031,8 @@ int snd_soc_add_platform(struct device *dev, struct snd_soc_platform *platform,
{
/* create platform component name */
platform->name = fmt_single_name(dev, &platform->id);
- if (platform->name == NULL) {
- kfree(platform);
+ if (platform->name == NULL)
return -ENOMEM;
- }
platform->dev = dev;
platform->driver = platform_drv;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index b94190820e8..c17c14c394d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -47,6 +47,15 @@
#define DAPM_UPDATE_STAT(widget, val) widget->dapm->card->dapm_stats.val++;
+static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
+ const char *control,
+ int (*connected)(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink));
+static struct snd_soc_dapm_widget *
+snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_widget *widget);
+
/* dapm power sequences - make this per codec in the future */
static int dapm_up_seq[] = {
[snd_soc_dapm_pre] = 0,
@@ -73,16 +82,18 @@ static int dapm_up_seq[] = {
[snd_soc_dapm_hp] = 10,
[snd_soc_dapm_spk] = 10,
[snd_soc_dapm_line] = 10,
- [snd_soc_dapm_post] = 11,
+ [snd_soc_dapm_kcontrol] = 11,
+ [snd_soc_dapm_post] = 12,
};
static int dapm_down_seq[] = {
[snd_soc_dapm_pre] = 0,
- [snd_soc_dapm_adc] = 1,
- [snd_soc_dapm_hp] = 2,
- [snd_soc_dapm_spk] = 2,
- [snd_soc_dapm_line] = 2,
- [snd_soc_dapm_out_drv] = 2,
+ [snd_soc_dapm_kcontrol] = 1,
+ [snd_soc_dapm_adc] = 2,
+ [snd_soc_dapm_hp] = 3,
+ [snd_soc_dapm_spk] = 3,
+ [snd_soc_dapm_line] = 3,
+ [snd_soc_dapm_out_drv] = 3,
[snd_soc_dapm_pga] = 4,
[snd_soc_dapm_switch] = 5,
[snd_soc_dapm_mixer_named_ctl] = 5,
@@ -174,36 +185,178 @@ static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
return kmemdup(_widget, sizeof(*_widget), GFP_KERNEL);
}
-/* get snd_card from DAPM context */
-static inline struct snd_card *dapm_get_snd_card(
- struct snd_soc_dapm_context *dapm)
+struct dapm_kcontrol_data {
+ unsigned int value;
+ struct snd_soc_dapm_widget *widget;
+ struct list_head paths;
+ struct snd_soc_dapm_widget_list *wlist;
+};
+
+static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
+ struct snd_kcontrol *kcontrol)
{
- if (dapm->codec)
- return dapm->codec->card->snd_card;
- else if (dapm->platform)
- return dapm->platform->card->snd_card;
- else
- BUG();
+ struct dapm_kcontrol_data *data;
+ struct soc_mixer_control *mc;
- /* unreachable */
- return NULL;
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ dev_err(widget->dapm->dev,
+ "ASoC: can't allocate kcontrol data for %s\n",
+ widget->name);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&data->paths);
+
+ switch (widget->id) {
+ case snd_soc_dapm_switch:
+ case snd_soc_dapm_mixer:
+ case snd_soc_dapm_mixer_named_ctl:
+ mc = (struct soc_mixer_control *)kcontrol->private_value;
+
+ if (mc->autodisable) {
+ struct snd_soc_dapm_widget template;
+
+ memset(&template, 0, sizeof(template));
+ template.reg = mc->reg;
+ template.mask = (1 << fls(mc->max)) - 1;
+ template.shift = mc->shift;
+ if (mc->invert)
+ template.off_val = mc->max;
+ else
+ template.off_val = 0;
+ template.on_val = template.off_val;
+ template.id = snd_soc_dapm_kcontrol;
+ template.name = kcontrol->id.name;
+
+ data->value = template.on_val;
+
+ data->widget = snd_soc_dapm_new_control(widget->dapm,
+ &template);
+ if (!data->widget) {
+ kfree(data);
+ return -ENOMEM;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ kcontrol->private_data = data;
+
+ return 0;
}
-/* get soc_card from DAPM context */
-static inline struct snd_soc_card *dapm_get_soc_card(
- struct snd_soc_dapm_context *dapm)
+static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
{
- if (dapm->codec)
- return dapm->codec->card;
- else if (dapm->platform)
- return dapm->platform->card;
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
+ kfree(data->widget);
+ kfree(data->wlist);
+ kfree(data);
+}
+
+static struct snd_soc_dapm_widget_list *dapm_kcontrol_get_wlist(
+ const struct snd_kcontrol *kcontrol)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ return data->wlist;
+}
+
+static int dapm_kcontrol_add_widget(struct snd_kcontrol *kcontrol,
+ struct snd_soc_dapm_widget *widget)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_dapm_widget_list *new_wlist;
+ unsigned int n;
+
+ if (data->wlist)
+ n = data->wlist->num_widgets + 1;
else
- BUG();
+ n = 1;
- /* unreachable */
- return NULL;
+ new_wlist = krealloc(data->wlist,
+ sizeof(*new_wlist) + sizeof(widget) * n, GFP_KERNEL);
+ if (!new_wlist)
+ return -ENOMEM;
+
+ new_wlist->widgets[n - 1] = widget;
+ new_wlist->num_widgets = n;
+
+ data->wlist = new_wlist;
+
+ return 0;
+}
+
+static void dapm_kcontrol_add_path(const struct snd_kcontrol *kcontrol,
+ struct snd_soc_dapm_path *path)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ list_add_tail(&path->list_kcontrol, &data->paths);
+
+ if (data->widget) {
+ snd_soc_dapm_add_path(data->widget->dapm, data->widget,
+ path->source, NULL, NULL);
+ }
+}
+
+static bool dapm_kcontrol_is_powered(const struct snd_kcontrol *kcontrol)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ if (!data->widget)
+ return true;
+
+ return data->widget->power;
+}
+
+static struct list_head *dapm_kcontrol_get_path_list(
+ const struct snd_kcontrol *kcontrol)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ return &data->paths;
+}
+
+#define dapm_kcontrol_for_each_path(path, kcontrol) \
+ list_for_each_entry(path, dapm_kcontrol_get_path_list(kcontrol), \
+ list_kcontrol)
+
+static unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ return data->value;
+}
+
+static bool dapm_kcontrol_set_value(const struct snd_kcontrol *kcontrol,
+ unsigned int value)
+{
+ struct dapm_kcontrol_data *data = snd_kcontrol_chip(kcontrol);
+
+ if (data->value == value)
+ return false;
+
+ if (data->widget)
+ data->widget->on_val = value;
+
+ data->value = value;
+
+ return true;
}
+/**
+ * snd_soc_dapm_kcontrol_codec() - Returns the codec associated to a kcontrol
+ * @kcontrol: The kcontrol
+ */
+struct snd_soc_codec *snd_soc_dapm_kcontrol_codec(struct snd_kcontrol *kcontrol)
+{
+ return dapm_kcontrol_get_wlist(kcontrol)->widgets[0]->codec;
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_kcontrol_codec);
+
static void dapm_reset(struct snd_soc_card *card)
{
struct snd_soc_dapm_widget *w;
@@ -211,6 +364,7 @@ static void dapm_reset(struct snd_soc_card *card)
memset(&card->dapm_stats, 0, sizeof(card->dapm_stats));
list_for_each_entry(w, &card->widgets, list) {
+ w->new_power = w->power;
w->power_checked = false;
w->inputs = -1;
w->outputs = -1;
@@ -428,6 +582,7 @@ static void dapm_set_path_status(struct snd_soc_dapm_widget *w,
case snd_soc_dapm_spk:
case snd_soc_dapm_line:
case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_kcontrol:
p->connect = 1;
break;
/* does affect routing - dynamically connected */
@@ -507,17 +662,12 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
return 0;
}
-static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
-{
- kfree(kctl->private_data);
-}
-
/*
* Determine if a kcontrol is shared. If it is, look it up. If it isn't,
* create it. Either way, add the widget into the control's widget list
*/
static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
- int kci, struct snd_soc_dapm_path *path)
+ int kci)
{
struct snd_soc_dapm_context *dapm = w->dapm;
struct snd_card *card = dapm->card->snd_card;
@@ -525,9 +675,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
size_t prefix_len;
int shared;
struct snd_kcontrol *kcontrol;
- struct snd_soc_dapm_widget_list *wlist;
- int wlistentries;
- size_t wlistsize;
bool wname_in_long_name, kcname_in_long_name;
char *long_name;
const char *name;
@@ -546,25 +693,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[kci],
&kcontrol);
- if (kcontrol) {
- wlist = kcontrol->private_data;
- wlistentries = wlist->num_widgets + 1;
- } else {
- wlist = NULL;
- wlistentries = 1;
- }
-
- wlistsize = sizeof(struct snd_soc_dapm_widget_list) +
- wlistentries * sizeof(struct snd_soc_dapm_widget *);
- wlist = krealloc(wlist, wlistsize, GFP_KERNEL);
- if (wlist == NULL) {
- dev_err(dapm->dev, "ASoC: can't allocate widget list for %s\n",
- w->name);
- return -ENOMEM;
- }
- wlist->num_widgets = wlistentries;
- wlist->widgets[wlistentries - 1] = w;
-
if (!kcontrol) {
if (shared) {
wname_in_long_name = false;
@@ -587,7 +715,6 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
kcname_in_long_name = false;
break;
default:
- kfree(wlist);
return -EINVAL;
}
}
@@ -602,10 +729,8 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
long_name = kasprintf(GFP_KERNEL, "%s %s",
w->name + prefix_len,
w->kcontrol_news[kci].name);
- if (long_name == NULL) {
- kfree(wlist);
+ if (long_name == NULL)
return -ENOMEM;
- }
name = long_name;
} else if (wname_in_long_name) {
@@ -616,23 +741,33 @@ static int dapm_create_or_share_mixmux_kcontrol(struct snd_soc_dapm_widget *w,
name = w->kcontrol_news[kci].name;
}
- kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], wlist, name,
+ kcontrol = snd_soc_cnew(&w->kcontrol_news[kci], NULL, name,
prefix);
- kcontrol->private_free = dapm_kcontrol_free;
kfree(long_name);
+ if (!kcontrol)
+ return -ENOMEM;
+ kcontrol->private_free = dapm_kcontrol_free;
+
+ ret = dapm_kcontrol_data_alloc(w, kcontrol);
+ if (ret) {
+ snd_ctl_free_one(kcontrol);
+ return ret;
+ }
+
ret = snd_ctl_add(card, kcontrol);
if (ret < 0) {
dev_err(dapm->dev,
"ASoC: failed to add widget %s dapm kcontrol %s: %d\n",
w->name, name, ret);
- kfree(wlist);
return ret;
}
}
- kcontrol->private_data = wlist;
+ ret = dapm_kcontrol_add_widget(kcontrol, w);
+ if (ret)
+ return ret;
+
w->kcontrols[kci] = kcontrol;
- path->kcontrol = kcontrol;
return 0;
}
@@ -652,13 +787,15 @@ static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
continue;
if (w->kcontrols[i]) {
- path->kcontrol = w->kcontrols[i];
+ dapm_kcontrol_add_path(w->kcontrols[i], path);
continue;
}
- ret = dapm_create_or_share_mixmux_kcontrol(w, i, path);
+ ret = dapm_create_or_share_mixmux_kcontrol(w, i);
if (ret < 0)
return ret;
+
+ dapm_kcontrol_add_path(w->kcontrols[i], path);
}
}
@@ -679,19 +816,17 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
return -EINVAL;
}
- path = list_first_entry(&w->sources, struct snd_soc_dapm_path,
- list_sink);
- if (!path) {
+ if (list_empty(&w->sources)) {
dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name);
return -EINVAL;
}
- ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path);
+ ret = dapm_create_or_share_mixmux_kcontrol(w, 0);
if (ret < 0)
return ret;
list_for_each_entry(path, &w->sources, list_sink)
- path->kcontrol = w->kcontrols[0];
+ dapm_kcontrol_add_path(w->kcontrols[0], path);
return 0;
}
@@ -812,6 +947,7 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
case snd_soc_dapm_clock_supply:
+ case snd_soc_dapm_kcontrol:
return 0;
default:
break;
@@ -907,6 +1043,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
case snd_soc_dapm_clock_supply:
+ case snd_soc_dapm_kcontrol:
return 0;
default:
break;
@@ -1061,7 +1198,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
int ret;
if (SND_SOC_DAPM_EVENT_ON(event)) {
- if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+ if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, false);
if (ret != 0)
dev_warn(w->dapm->dev,
@@ -1071,7 +1208,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
return regulator_enable(w->regulator);
} else {
- if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+ if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
@@ -1243,10 +1380,9 @@ static void dapm_seq_insert(struct snd_soc_dapm_widget *new_widget,
list_add_tail(&new_widget->power_list, list);
}
-static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
+static void dapm_seq_check_event(struct snd_soc_card *card,
struct snd_soc_dapm_widget *w, int event)
{
- struct snd_soc_card *card = dapm->card;
const char *ev_name;
int power, ret;
@@ -1280,55 +1416,50 @@ static void dapm_seq_check_event(struct snd_soc_dapm_context *dapm,
return;
}
- if (w->power != power)
+ if (w->new_power != power)
return;
if (w->event && (w->event_flags & event)) {
- pop_dbg(dapm->dev, card->pop_time, "pop test : %s %s\n",
+ pop_dbg(w->dapm->dev, card->pop_time, "pop test : %s %s\n",
w->name, ev_name);
trace_snd_soc_dapm_widget_event_start(w, event);
ret = w->event(w, NULL, event);
trace_snd_soc_dapm_widget_event_done(w, event);
if (ret < 0)
- dev_err(dapm->dev, "ASoC: %s: %s event failed: %d\n",
+ dev_err(w->dapm->dev, "ASoC: %s: %s event failed: %d\n",
ev_name, w->name, ret);
}
}
/* Apply the coalesced changes from a DAPM sequence */
-static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
+static void dapm_seq_run_coalesced(struct snd_soc_card *card,
struct list_head *pending)
{
- struct snd_soc_card *card = dapm->card;
struct snd_soc_dapm_widget *w;
- int reg, power;
+ int reg;
unsigned int value = 0;
unsigned int mask = 0;
- unsigned int cur_mask;
reg = list_first_entry(pending, struct snd_soc_dapm_widget,
power_list)->reg;
list_for_each_entry(w, pending, power_list) {
- cur_mask = 1 << w->shift;
BUG_ON(reg != w->reg);
+ w->power = w->new_power;
- if (w->invert)
- power = !w->power;
+ mask |= w->mask << w->shift;
+ if (w->power)
+ value |= w->on_val << w->shift;
else
- power = w->power;
-
- mask |= cur_mask;
- if (power)
- value |= cur_mask;
+ value |= w->off_val << w->shift;
- pop_dbg(dapm->dev, card->pop_time,
+ pop_dbg(w->dapm->dev, card->pop_time,
"pop test : Queue %s: reg=0x%x, 0x%x/0x%x\n",
w->name, reg, value, mask);
/* Check for events */
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMU);
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_PRE_PMD);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMU);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_PRE_PMD);
}
if (reg >= 0) {
@@ -1338,7 +1469,7 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
w = list_first_entry(pending, struct snd_soc_dapm_widget,
power_list);
- pop_dbg(dapm->dev, card->pop_time,
+ pop_dbg(w->dapm->dev, card->pop_time,
"pop test : Applying 0x%x/0x%x to %x in %dms\n",
value, mask, reg, card->pop_time);
pop_wait(card->pop_time);
@@ -1346,8 +1477,8 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
}
list_for_each_entry(w, pending, power_list) {
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMU);
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_POST_PMD);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMU);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_POST_PMD);
}
}
@@ -1359,8 +1490,8 @@ static void dapm_seq_run_coalesced(struct snd_soc_dapm_context *dapm,
* Currently anything that requires more than a single write is not
* handled.
*/
-static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
- struct list_head *list, int event, bool power_up)
+static void dapm_seq_run(struct snd_soc_card *card,
+ struct list_head *list, int event, bool power_up)
{
struct snd_soc_dapm_widget *w, *n;
LIST_HEAD(pending);
@@ -1383,7 +1514,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
if (sort[w->id] != cur_sort || w->reg != cur_reg ||
w->dapm != cur_dapm || w->subseq != cur_subseq) {
if (!list_empty(&pending))
- dapm_seq_run_coalesced(cur_dapm, &pending);
+ dapm_seq_run_coalesced(card, &pending);
if (cur_dapm && cur_dapm->seq_notifier) {
for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
@@ -1443,7 +1574,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
}
if (!list_empty(&pending))
- dapm_seq_run_coalesced(cur_dapm, &pending);
+ dapm_seq_run_coalesced(card, &pending);
if (cur_dapm && cur_dapm->seq_notifier) {
for (i = 0; i < ARRAY_SIZE(dapm_up_seq); i++)
@@ -1453,37 +1584,48 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
}
}
-static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
+static void dapm_widget_update(struct snd_soc_card *card)
{
- struct snd_soc_dapm_update *update = dapm->update;
- struct snd_soc_dapm_widget *w;
+ struct snd_soc_dapm_update *update = card->update;
+ struct snd_soc_dapm_widget_list *wlist;
+ struct snd_soc_dapm_widget *w = NULL;
+ unsigned int wi;
int ret;
- if (!update)
+ if (!update || !dapm_kcontrol_is_powered(update->kcontrol))
return;
- w = update->widget;
+ wlist = dapm_kcontrol_get_wlist(update->kcontrol);
- if (w->event &&
- (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
- ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
- if (ret != 0)
- dev_err(dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n",
- w->name, ret);
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ w = wlist->widgets[wi];
+
+ if (w->event && (w->event_flags & SND_SOC_DAPM_PRE_REG)) {
+ ret = w->event(w, update->kcontrol, SND_SOC_DAPM_PRE_REG);
+ if (ret != 0)
+ dev_err(w->dapm->dev, "ASoC: %s DAPM pre-event failed: %d\n",
+ w->name, ret);
+ }
}
+ if (!w)
+ return;
+
ret = soc_widget_update_bits_locked(w, update->reg, update->mask,
update->val);
if (ret < 0)
- dev_err(dapm->dev, "ASoC: %s DAPM update failed: %d\n",
+ dev_err(w->dapm->dev, "ASoC: %s DAPM update failed: %d\n",
w->name, ret);
- if (w->event &&
- (w->event_flags & SND_SOC_DAPM_POST_REG)) {
- ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
- if (ret != 0)
- dev_err(dapm->dev, "ASoC: %s DAPM post-event failed: %d\n",
- w->name, ret);
+ for (wi = 0; wi < wlist->num_widgets; wi++) {
+ w = wlist->widgets[wi];
+
+ if (w->event && (w->event_flags & SND_SOC_DAPM_POST_REG)) {
+ ret = w->event(w, update->kcontrol, SND_SOC_DAPM_POST_REG);
+ if (ret != 0)
+ dev_err(w->dapm->dev, "ASoC: %s DAPM post-event failed: %d\n",
+ w->name, ret);
+ }
}
}
@@ -1595,6 +1737,7 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
case snd_soc_dapm_clock_supply:
+ case snd_soc_dapm_kcontrol:
/* Supplies can't affect their outputs, only their inputs */
break;
default:
@@ -1611,8 +1754,6 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
dapm_seq_insert(w, up_list, true);
else
dapm_seq_insert(w, down_list, false);
-
- w->power = power;
}
static void dapm_power_one_widget(struct snd_soc_dapm_widget *w,
@@ -1646,9 +1787,8 @@ static void dapm_power_one_widget(struct snd_soc_dapm_widget *w,
* o Input pin to Output pin (bypass, sidetone)
* o DAC to ADC (loopback).
*/
-static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
+static int dapm_power_widgets(struct snd_soc_card *card, int event)
{
- struct snd_soc_card *card = dapm->card;
struct snd_soc_dapm_widget *w;
struct snd_soc_dapm_context *d;
LIST_HEAD(up_list);
@@ -1688,7 +1828,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
break;
}
- if (w->power) {
+ if (w->new_power) {
d = w->dapm;
/* Supplies and micbiases only bring the
@@ -1730,29 +1870,29 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
trace_snd_soc_dapm_walk_done(card);
/* Run all the bias changes in parallel */
- list_for_each_entry(d, &dapm->card->dapm_list, list)
+ list_for_each_entry(d, &card->dapm_list, list)
async_schedule_domain(dapm_pre_sequence_async, d,
&async_domain);
async_synchronize_full_domain(&async_domain);
list_for_each_entry(w, &down_list, power_list) {
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_WILL_PMD);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMD);
}
list_for_each_entry(w, &up_list, power_list) {
- dapm_seq_check_event(dapm, w, SND_SOC_DAPM_WILL_PMU);
+ dapm_seq_check_event(card, w, SND_SOC_DAPM_WILL_PMU);
}
/* Power down widgets first; try to avoid amplifying pops. */
- dapm_seq_run(dapm, &down_list, event, false);
+ dapm_seq_run(card, &down_list, event, false);
- dapm_widget_update(dapm);
+ dapm_widget_update(card);
/* Now power up. */
- dapm_seq_run(dapm, &up_list, event, true);
+ dapm_seq_run(card, &up_list, event, true);
/* Run all the bias changes in parallel */
- list_for_each_entry(d, &dapm->card->dapm_list, list)
+ list_for_each_entry(d, &card->dapm_list, list)
async_schedule_domain(dapm_post_sequence_async, d,
&async_domain);
async_synchronize_full_domain(&async_domain);
@@ -1763,7 +1903,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
d->stream_event(d, event);
}
- pop_dbg(dapm->dev, card->pop_time,
+ pop_dbg(card->dev, card->pop_time,
"DAPM sequencing finished, waiting %dms\n", card->pop_time);
pop_wait(card->pop_time);
@@ -1798,8 +1938,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (w->reg >= 0)
ret += snprintf(buf + ret, PAGE_SIZE - ret,
- " - R%d(0x%x) bit %d",
- w->reg, w->reg, w->shift);
+ " - R%d(0x%x) mask 0x%x",
+ w->reg, w->reg, w->mask << w->shift);
ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
@@ -1936,22 +2076,14 @@ static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
#endif
/* test and update the power status of a mux widget */
-static int soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
+static int soc_dapm_mux_update_power(struct snd_soc_card *card,
struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
{
struct snd_soc_dapm_path *path;
int found = 0;
- if (widget->id != snd_soc_dapm_mux &&
- widget->id != snd_soc_dapm_virt_mux &&
- widget->id != snd_soc_dapm_value_mux)
- return -ENODEV;
-
/* find dapm widget path assoc with kcontrol */
- list_for_each_entry(path, &widget->dapm->card->paths, list) {
- if (path->kcontrol != kcontrol)
- continue;
-
+ dapm_kcontrol_for_each_path(path, kcontrol) {
if (!path->name || !e->texts[mux])
continue;
@@ -1966,73 +2098,68 @@ static int soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
"mux disconnection");
path->connect = 0; /* old connection must be powered down */
}
+ dapm_mark_dirty(path->sink, "mux change");
}
- if (found) {
- dapm_mark_dirty(widget, "mux change");
- dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
- }
+ if (found)
+ dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
return found;
}
-int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
+int snd_soc_dapm_mux_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e,
+ struct snd_soc_dapm_update *update)
{
- struct snd_soc_card *card = widget->dapm->card;
+ struct snd_soc_card *card = dapm->card;
int ret;
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- ret = soc_dapm_mux_update_power(widget, kcontrol, mux, e);
+ card->update = update;
+ ret = soc_dapm_mux_update_power(card, kcontrol, mux, e);
+ card->update = NULL;
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(widget);
+ soc_dpcm_runtime_update(card);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_mux_update_power);
/* test and update the power status of a mixer or switch widget */
-static int soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
+static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
struct snd_kcontrol *kcontrol, int connect)
{
struct snd_soc_dapm_path *path;
int found = 0;
- if (widget->id != snd_soc_dapm_mixer &&
- widget->id != snd_soc_dapm_mixer_named_ctl &&
- widget->id != snd_soc_dapm_switch)
- return -ENODEV;
-
/* find dapm widget path assoc with kcontrol */
- list_for_each_entry(path, &widget->dapm->card->paths, list) {
- if (path->kcontrol != kcontrol)
- continue;
-
- /* found, now check type */
+ dapm_kcontrol_for_each_path(path, kcontrol) {
found = 1;
path->connect = connect;
dapm_mark_dirty(path->source, "mixer connection");
+ dapm_mark_dirty(path->sink, "mixer update");
}
- if (found) {
- dapm_mark_dirty(widget, "mixer update");
- dapm_power_widgets(widget->dapm, SND_SOC_DAPM_STREAM_NOP);
- }
+ if (found)
+ dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
return found;
}
-int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol, int connect)
+int snd_soc_dapm_mixer_update_power(struct snd_soc_dapm_context *dapm,
+ struct snd_kcontrol *kcontrol, int connect,
+ struct snd_soc_dapm_update *update)
{
- struct snd_soc_card *card = widget->dapm->card;
+ struct snd_soc_card *card = dapm->card;
int ret;
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- ret = soc_dapm_mixer_update_power(widget, kcontrol, connect);
+ card->update = update;
+ ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
+ card->update = NULL;
mutex_unlock(&card->dapm_mutex);
if (ret > 0)
- soc_dpcm_runtime_update(widget);
+ soc_dpcm_runtime_update(card);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_mixer_update_power);
@@ -2111,6 +2238,7 @@ static void dapm_free_path(struct snd_soc_dapm_path *path)
{
list_del(&path->list_sink);
list_del(&path->list_source);
+ list_del(&path->list_kcontrol);
list_del(&path->list);
kfree(path);
}
@@ -2205,70 +2333,20 @@ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
return 0;
mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- ret = dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
+ ret = dapm_power_widgets(dapm->card, SND_SOC_DAPM_STREAM_NOP);
mutex_unlock(&dapm->card->dapm_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
-static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
- const struct snd_soc_dapm_route *route)
+static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
+ struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
+ const char *control,
+ int (*connected)(struct snd_soc_dapm_widget *source,
+ struct snd_soc_dapm_widget *sink))
{
struct snd_soc_dapm_path *path;
- struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
- struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
- const char *sink;
- const char *control = route->control;
- const char *source;
- char prefixed_sink[80];
- char prefixed_source[80];
- int ret = 0;
-
- if (dapm->codec && dapm->codec->name_prefix) {
- snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
- dapm->codec->name_prefix, route->sink);
- sink = prefixed_sink;
- snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
- dapm->codec->name_prefix, route->source);
- source = prefixed_source;
- } else {
- sink = route->sink;
- source = route->source;
- }
-
- /*
- * find src and dest widgets over all widgets but favor a widget from
- * current DAPM context
- */
- list_for_each_entry(w, &dapm->card->widgets, list) {
- if (!wsink && !(strcmp(w->name, sink))) {
- wtsink = w;
- if (w->dapm == dapm)
- wsink = w;
- continue;
- }
- if (!wsource && !(strcmp(w->name, source))) {
- wtsource = w;
- if (w->dapm == dapm)
- wsource = w;
- }
- }
- /* use widget from another DAPM context if not found from this */
- if (!wsink)
- wsink = wtsink;
- if (!wsource)
- wsource = wtsource;
-
- if (wsource == NULL) {
- dev_err(dapm->dev, "ASoC: no source widget found for %s\n",
- route->source);
- return -ENODEV;
- }
- if (wsink == NULL) {
- dev_err(dapm->dev, "ASoC: no sink widget found for %s\n",
- route->sink);
- return -ENODEV;
- }
+ int ret;
path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL);
if (!path)
@@ -2276,8 +2354,9 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
path->source = wsource;
path->sink = wsink;
- path->connected = route->connected;
+ path->connected = connected;
INIT_LIST_HEAD(&path->list);
+ INIT_LIST_HEAD(&path->list_kcontrol);
INIT_LIST_HEAD(&path->list_source);
INIT_LIST_HEAD(&path->list_sink);
@@ -2297,6 +2376,9 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
wsource->ext = 1;
}
+ dapm_mark_dirty(wsource, "Route added");
+ dapm_mark_dirty(wsink, "Route added");
+
/* connect static paths */
if (control == NULL) {
list_add(&path->list, &dapm->card->paths);
@@ -2327,6 +2409,7 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
case snd_soc_dapm_dai_in:
case snd_soc_dapm_dai_out:
case snd_soc_dapm_dai_link:
+ case snd_soc_dapm_kcontrol:
list_add(&path->list, &dapm->card->paths);
list_add(&path->list_sink, &wsink->sources);
list_add(&path->list_source, &wsource->sinks);
@@ -2358,15 +2441,78 @@ static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
return 0;
}
- dapm_mark_dirty(wsource, "Route added");
- dapm_mark_dirty(wsink, "Route added");
-
return 0;
+err:
+ kfree(path);
+ return ret;
+}
+
+static int snd_soc_dapm_add_route(struct snd_soc_dapm_context *dapm,
+ const struct snd_soc_dapm_route *route)
+{
+ struct snd_soc_dapm_widget *wsource = NULL, *wsink = NULL, *w;
+ struct snd_soc_dapm_widget *wtsource = NULL, *wtsink = NULL;
+ const char *sink;
+ const char *source;
+ char prefixed_sink[80];
+ char prefixed_source[80];
+ int ret;
+
+ if (dapm->codec && dapm->codec->name_prefix) {
+ snprintf(prefixed_sink, sizeof(prefixed_sink), "%s %s",
+ dapm->codec->name_prefix, route->sink);
+ sink = prefixed_sink;
+ snprintf(prefixed_source, sizeof(prefixed_source), "%s %s",
+ dapm->codec->name_prefix, route->source);
+ source = prefixed_source;
+ } else {
+ sink = route->sink;
+ source = route->source;
+ }
+
+ /*
+ * find src and dest widgets over all widgets but favor a widget from
+ * current DAPM context
+ */
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+ if (!wsink && !(strcmp(w->name, sink))) {
+ wtsink = w;
+ if (w->dapm == dapm)
+ wsink = w;
+ continue;
+ }
+ if (!wsource && !(strcmp(w->name, source))) {
+ wtsource = w;
+ if (w->dapm == dapm)
+ wsource = w;
+ }
+ }
+ /* use widget from another DAPM context if not found from this */
+ if (!wsink)
+ wsink = wtsink;
+ if (!wsource)
+ wsource = wtsource;
+ if (wsource == NULL) {
+ dev_err(dapm->dev, "ASoC: no source widget found for %s\n",
+ route->source);
+ return -ENODEV;
+ }
+ if (wsink == NULL) {
+ dev_err(dapm->dev, "ASoC: no sink widget found for %s\n",
+ route->sink);
+ return -ENODEV;
+ }
+
+ ret = snd_soc_dapm_add_path(dapm, wsource, wsink, route->control,
+ route->connected);
+ if (ret)
+ goto err;
+
+ return 0;
err:
dev_warn(dapm->dev, "ASoC: no dapm match for %s --> %s --> %s\n",
- source, control, sink);
- kfree(path);
+ source, route->control, sink);
return ret;
}
@@ -2568,14 +2714,14 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_weak_routes);
*
* Returns 0 for success.
*/
-int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
+int snd_soc_dapm_new_widgets(struct snd_soc_card *card)
{
struct snd_soc_dapm_widget *w;
unsigned int val;
- mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_INIT);
- list_for_each_entry(w, &dapm->card->widgets, list)
+ list_for_each_entry(w, &card->widgets, list)
{
if (w->new)
continue;
@@ -2585,7 +2731,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
sizeof(struct snd_kcontrol *),
GFP_KERNEL);
if (!w->kcontrols) {
- mutex_unlock(&dapm->card->dapm_mutex);
+ mutex_unlock(&card->dapm_mutex);
return -ENOMEM;
}
}
@@ -2611,12 +2757,9 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
/* Read the initial power state from the device */
if (w->reg >= 0) {
- val = soc_widget_read(w, w->reg);
- val &= 1 << w->shift;
- if (w->invert)
- val = !val;
-
- if (val)
+ val = soc_widget_read(w, w->reg) >> w->shift;
+ val &= w->mask;
+ if (val == w->on_val)
w->power = 1;
}
@@ -2626,8 +2769,8 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
dapm_debugfs_add_widget(w);
}
- dapm_power_widgets(dapm, SND_SOC_DAPM_STREAM_NOP);
- mutex_unlock(&dapm->card->dapm_mutex);
+ dapm_power_widgets(card, SND_SOC_DAPM_STREAM_NOP);
+ mutex_unlock(&card->dapm_mutex);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
@@ -2644,8 +2787,8 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_new_widgets);
int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
+ struct snd_soc_card *card = codec->card;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
unsigned int reg = mc->reg;
@@ -2653,17 +2796,24 @@ int snd_soc_dapm_get_volsw(struct snd_kcontrol *kcontrol,
int max = mc->max;
unsigned int mask = (1 << fls(max)) - 1;
unsigned int invert = mc->invert;
+ unsigned int val;
if (snd_soc_volsw_is_stereo(mc))
- dev_warn(widget->dapm->dev,
+ dev_warn(codec->dapm.dev,
"ASoC: Control '%s' is stereo, which is not supported\n",
kcontrol->id.name);
- ucontrol->value.integer.value[0] =
- (snd_soc_read(widget->codec, reg) >> shift) & mask;
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+ if (dapm_kcontrol_is_powered(kcontrol))
+ val = (snd_soc_read(codec, reg) >> shift) & mask;
+ else
+ val = dapm_kcontrol_get_value(kcontrol);
+ mutex_unlock(&card->dapm_mutex);
+
if (invert)
- ucontrol->value.integer.value[0] =
- max - ucontrol->value.integer.value[0];
+ ucontrol->value.integer.value[0] = max - val;
+ else
+ ucontrol->value.integer.value[0] = val;
return 0;
}
@@ -2681,9 +2831,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_volsw);
int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct snd_soc_card *card = codec->card;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
@@ -2695,10 +2843,9 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
unsigned int val;
int connect, change;
struct snd_soc_dapm_update update;
- int wi;
if (snd_soc_volsw_is_stereo(mc))
- dev_warn(widget->dapm->dev,
+ dev_warn(codec->dapm.dev,
"ASoC: Control '%s' is stereo, which is not supported\n",
kcontrol->id.name);
@@ -2707,33 +2854,30 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
if (invert)
val = max - val;
- mask = mask << shift;
- val = val << shift;
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- change = snd_soc_test_bits(widget->codec, reg, mask, val);
- if (change) {
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- widget = wlist->widgets[wi];
+ dapm_kcontrol_set_value(kcontrol, val);
+
+ mask = mask << shift;
+ val = val << shift;
- widget->value = val;
+ change = snd_soc_test_bits(codec, reg, mask, val);
+ if (change) {
+ update.kcontrol = kcontrol;
+ update.reg = reg;
+ update.mask = mask;
+ update.val = val;
- update.kcontrol = kcontrol;
- update.widget = widget;
- update.reg = reg;
- update.mask = mask;
- update.val = val;
- widget->dapm->update = &update;
+ card->update = &update;
- soc_dapm_mixer_update_power(widget, kcontrol, connect);
+ soc_dapm_mixer_update_power(card, kcontrol, connect);
- widget->dapm->update = NULL;
- }
+ card->update = NULL;
}
mutex_unlock(&card->dapm_mutex);
- return 0;
+ return change;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
@@ -2749,12 +2893,11 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_volsw);
int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val;
- val = snd_soc_read(widget->codec, e->reg);
+ val = snd_soc_read(codec, e->reg);
ucontrol->value.enumerated.item[0] = (val >> e->shift_l) & e->mask;
if (e->shift_l != e->shift_r)
ucontrol->value.enumerated.item[1] =
@@ -2776,15 +2919,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double);
int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct snd_soc_card *card = codec->card;
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val, mux, change;
unsigned int mask;
struct snd_soc_dapm_update update;
- int wi;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
@@ -2800,24 +2940,17 @@ int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+ change = snd_soc_test_bits(codec, e->reg, mask, val);
if (change) {
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- widget = wlist->widgets[wi];
-
- widget->value = val;
+ update.kcontrol = kcontrol;
+ update.reg = e->reg;
+ update.mask = mask;
+ update.val = val;
+ card->update = &update;
- update.kcontrol = kcontrol;
- update.widget = widget;
- update.reg = e->reg;
- update.mask = mask;
- update.val = val;
- widget->dapm->update = &update;
+ soc_dapm_mux_update_power(card, kcontrol, mux, e);
- soc_dapm_mux_update_power(widget, kcontrol, mux, e);
-
- widget->dapm->update = NULL;
- }
+ card->update = NULL;
}
mutex_unlock(&card->dapm_mutex);
@@ -2835,11 +2968,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
int snd_soc_dapm_get_enum_virt(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
-
- ucontrol->value.enumerated.item[0] = widget->value;
-
+ ucontrol->value.enumerated.item[0] = dapm_kcontrol_get_value(kcontrol);
return 0;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_virt);
@@ -2854,34 +2983,25 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_virt);
int snd_soc_dapm_put_enum_virt(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct snd_soc_card *card = codec->card;
+ unsigned int value;
struct soc_enum *e =
(struct soc_enum *)kcontrol->private_value;
int change;
- int ret = 0;
- int wi;
if (ucontrol->value.enumerated.item[0] >= e->max)
return -EINVAL;
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- change = widget->value != ucontrol->value.enumerated.item[0];
- if (change) {
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- widget = wlist->widgets[wi];
-
- widget->value = ucontrol->value.enumerated.item[0];
-
- soc_dapm_mux_update_power(widget, kcontrol, widget->value, e);
- }
- }
+ value = ucontrol->value.enumerated.item[0];
+ change = dapm_kcontrol_set_value(kcontrol, value);
+ if (change)
+ soc_dapm_mux_update_power(card, kcontrol, value, e);
mutex_unlock(&card->dapm_mutex);
- return ret;
+ return change;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt);
@@ -2901,12 +3021,11 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_virt);
int snd_soc_dapm_get_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int reg_val, val, mux;
- reg_val = snd_soc_read(widget->codec, e->reg);
+ reg_val = snd_soc_read(codec, e->reg);
val = (reg_val >> e->shift_l) & e->mask;
for (mux = 0; mux < e->max; mux++) {
if (val == e->values[mux])
@@ -2942,15 +3061,12 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_get_value_enum_double);
int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_dapm_widget_list *wlist = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_widget *widget = wlist->widgets[0];
- struct snd_soc_codec *codec = widget->codec;
+ struct snd_soc_codec *codec = snd_soc_dapm_kcontrol_codec(kcontrol);
struct snd_soc_card *card = codec->card;
struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
unsigned int val, mux, change;
unsigned int mask;
struct snd_soc_dapm_update update;
- int wi;
if (ucontrol->value.enumerated.item[0] > e->max - 1)
return -EINVAL;
@@ -2966,24 +3082,17 @@ int snd_soc_dapm_put_value_enum_double(struct snd_kcontrol *kcontrol,
mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
- change = snd_soc_test_bits(widget->codec, e->reg, mask, val);
+ change = snd_soc_test_bits(codec, e->reg, mask, val);
if (change) {
- for (wi = 0; wi < wlist->num_widgets; wi++) {
- widget = wlist->widgets[wi];
-
- widget->value = val;
+ update.kcontrol = kcontrol;
+ update.reg = e->reg;
+ update.mask = mask;
+ update.val = val;
+ card->update = &update;
- update.kcontrol = kcontrol;
- update.widget = widget;
- update.reg = e->reg;
- update.mask = mask;
- update.val = val;
- widget->dapm->update = &update;
+ soc_dapm_mux_update_power(card, kcontrol, mux, e);
- soc_dapm_mux_update_power(widget, kcontrol, mux, e);
-
- widget->dapm->update = NULL;
- }
+ card->update = NULL;
}
mutex_unlock(&card->dapm_mutex);
@@ -3080,7 +3189,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
return NULL;
}
- if (w->invert & SND_SOC_DAPM_REGULATOR_BYPASS) {
+ if (w->on_val & SND_SOC_DAPM_REGULATOR_BYPASS) {
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
@@ -3127,16 +3236,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
case snd_soc_dapm_value_mux:
w->power_check = dapm_generic_check_power;
break;
- case snd_soc_dapm_adc:
- case snd_soc_dapm_aif_out:
case snd_soc_dapm_dai_out:
w->power_check = dapm_adc_check_power;
break;
- case snd_soc_dapm_dac:
- case snd_soc_dapm_aif_in:
case snd_soc_dapm_dai_in:
w->power_check = dapm_dac_check_power;
break;
+ case snd_soc_dapm_adc:
+ case snd_soc_dapm_aif_out:
+ case snd_soc_dapm_dac:
+ case snd_soc_dapm_aif_in:
case snd_soc_dapm_pga:
case snd_soc_dapm_out_drv:
case snd_soc_dapm_input:
@@ -3152,6 +3261,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
case snd_soc_dapm_supply:
case snd_soc_dapm_regulator_supply:
case snd_soc_dapm_clock_supply:
+ case snd_soc_dapm_kcontrol:
w->power_check = dapm_supply_check_power;
break;
default:
@@ -3416,9 +3526,6 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
{
struct snd_soc_dapm_widget *dai_w, *w;
struct snd_soc_dai *dai;
- struct snd_soc_dapm_route r;
-
- memset(&r, 0, sizeof(r));
/* For each DAI widget... */
list_for_each_entry(dai_w, &card->widgets, list) {
@@ -3445,29 +3552,27 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
break;
}
- if (!w->sname)
+ if (!w->sname || !strstr(w->sname, dai_w->name))
continue;
if (dai->driver->playback.stream_name &&
strstr(w->sname,
dai->driver->playback.stream_name)) {
- r.source = dai->playback_widget->name;
- r.sink = w->name;
dev_dbg(dai->dev, "%s -> %s\n",
- r.source, r.sink);
+ dai->playback_widget->name, w->name);
- snd_soc_dapm_add_route(w->dapm, &r);
+ snd_soc_dapm_add_path(w->dapm,
+ dai->playback_widget, w, NULL, NULL);
}
if (dai->driver->capture.stream_name &&
strstr(w->sname,
dai->driver->capture.stream_name)) {
- r.source = w->name;
- r.sink = dai->capture_widget->name;
dev_dbg(dai->dev, "%s -> %s\n",
- r.source, r.sink);
+ w->name, dai->capture_widget->name);
- snd_soc_dapm_add_route(w->dapm, &r);
+ snd_soc_dapm_add_path(w->dapm, w,
+ dai->capture_widget, NULL, NULL);
}
}
}
@@ -3529,7 +3634,7 @@ static void soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
}
}
- dapm_power_widgets(&rtd->card->dapm, event);
+ dapm_power_widgets(rtd->card, event);
}
/**
@@ -3798,7 +3903,7 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
if (dapm->bias_level == SND_SOC_BIAS_ON)
snd_soc_dapm_set_bias_level(dapm,
SND_SOC_BIAS_PREPARE);
- dapm_seq_run(dapm, &down_list, 0, false);
+ dapm_seq_run(card, &down_list, 0, false);
if (dapm->bias_level == SND_SOC_BIAS_PREPARE)
snd_soc_dapm_set_bias_level(dapm,
SND_SOC_BIAS_STANDBY);
diff --git a/sound/soc/soc-io.c b/sound/soc/soc-io.c
index 8ca9ecc5ac5..122c0c18b9d 100644
--- a/sound/soc/soc-io.c
+++ b/sound/soc/soc-io.c
@@ -158,7 +158,7 @@ int snd_soc_codec_set_cache_io(struct snd_soc_codec *codec,
return -EINVAL;
}
- return PTR_RET(codec->control_data);
+ return PTR_ERR_OR_ZERO(codec->control_data);
}
EXPORT_SYMBOL_GPL(snd_soc_codec_set_cache_io);
#else
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index 0bb5cccd776..71358e3b54d 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -183,8 +183,6 @@ int snd_soc_jack_add_pins(struct snd_soc_jack *jack, int count,
list_add(&(pins[i].list), &jack->pins);
}
- snd_soc_dapm_new_widgets(&jack->codec->card->dapm);
-
/* Update to reflect the last reported status; canned jack
* implementations are likely to set their state before the
* card has an opportunity to associate pins.
@@ -263,7 +261,7 @@ static irqreturn_t gpio_handler(int irq, void *data)
if (device_may_wakeup(dev))
pm_wakeup_event(dev, gpio->debounce_time + 50);
- schedule_delayed_work(&gpio->work,
+ queue_delayed_work(system_power_efficient_wq, &gpio->work,
msecs_to_jiffies(gpio->debounce_time));
return IRQ_HANDLED;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index b6c640332a1..330c9a6b5cb 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -411,8 +411,9 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
} else {
/* start delayed pop wq here for playback streams */
rtd->pop_wait = 1;
- schedule_delayed_work(&rtd->delayed_work,
- msecs_to_jiffies(rtd->pmdown_time));
+ queue_delayed_work(system_power_efficient_wq,
+ &rtd->delayed_work,
+ msecs_to_jiffies(rtd->pmdown_time));
}
} else {
/* capture streams can be powered down now */
@@ -1832,18 +1833,10 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
/* Called by DAPM mixer/mux changes to update audio routing between PCMs and
* any DAI links.
*/
-int soc_dpcm_runtime_update(struct snd_soc_dapm_widget *widget)
+int soc_dpcm_runtime_update(struct snd_soc_card *card)
{
- struct snd_soc_card *card;
int i, old, new, paths;
- if (widget->codec)
- card = widget->codec->card;
- else if (widget->platform)
- card = widget->platform->card;
- else
- return -EINVAL;
-
mutex_lock_nested(&card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
for (i = 0; i < card->num_rtd; i++) {
struct snd_soc_dapm_widget_list *list;
@@ -2027,6 +2020,16 @@ int soc_new_pcm(struct snd_soc_pcm_runtime *rtd, int num)
capture = 1;
}
+ if (rtd->dai_link->playback_only) {
+ playback = 1;
+ capture = 0;
+ }
+
+ if (rtd->dai_link->capture_only) {
+ playback = 0;
+ capture = 1;
+ }
+
/* create the PCM */
if (rtd->dai_link->no_pcm) {
snprintf(new_name, sizeof(new_name), "(%s)",
diff --git a/sound/soc/spear/Kconfig b/sound/soc/spear/Kconfig
index 3567d73b218..0a53053495f 100644
--- a/sound/soc/spear/Kconfig
+++ b/sound/soc/spear/Kconfig
@@ -1,6 +1,6 @@
config SND_SPEAR_SOC
tristate
- select SND_SOC_DMAENGINE_PCM
+ select SND_DMAENGINE_PCM
config SND_SPEAR_SPDIF_OUT
tristate
diff --git a/sound/soc/tegra/Kconfig b/sound/soc/tegra/Kconfig
index 995b120c2cd..8fc653ca3ab 100644
--- a/sound/soc/tegra/Kconfig
+++ b/sound/soc/tegra/Kconfig
@@ -1,8 +1,8 @@
config SND_SOC_TEGRA
tristate "SoC Audio for the Tegra System-on-Chip"
- depends on ARCH_TEGRA && TEGRA20_APB_DMA
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
select REGMAP_MMIO
- select SND_SOC_GENERIC_DMAENGINE_PCM if TEGRA20_APB_DMA
+ select SND_SOC_GENERIC_DMAENGINE_PCM
help
Say Y or M here if you want support for SoC audio on Tegra.
@@ -61,7 +61,7 @@ config SND_SOC_TEGRA30_I2S
config SND_SOC_TEGRA_RT5640
tristate "SoC Audio support for Tegra boards using an RT5640 codec"
- depends on SND_SOC_TEGRA && I2C
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
select SND_SOC_RT5640
@@ -71,7 +71,7 @@ config SND_SOC_TEGRA_RT5640
config SND_SOC_TEGRA_WM8753
tristate "SoC Audio support for Tegra boards using a WM8753 codec"
- depends on SND_SOC_TEGRA && I2C
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
select SND_SOC_WM8753
@@ -81,7 +81,7 @@ config SND_SOC_TEGRA_WM8753
config SND_SOC_TEGRA_WM8903
tristate "SoC Audio support for Tegra boards using a WM8903 codec"
- depends on SND_SOC_TEGRA && I2C
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_TEGRA30_I2S if ARCH_TEGRA_3x_SOC
select SND_SOC_WM8903
@@ -92,7 +92,7 @@ config SND_SOC_TEGRA_WM8903
config SND_SOC_TEGRA_WM9712
tristate "SoC Audio support for Tegra boards using a WM9712 codec"
- depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC
+ depends on SND_SOC_TEGRA && ARCH_TEGRA_2x_SOC && GPIOLIB
select SND_SOC_TEGRA20_AC97
select SND_SOC_WM9712
help
@@ -110,7 +110,7 @@ config SND_SOC_TEGRA_TRIMSLICE
config SND_SOC_TEGRA_ALC5632
tristate "SoC Audio support for Tegra boards using an ALC5632 codec"
- depends on SND_SOC_TEGRA && I2C
+ depends on SND_SOC_TEGRA && I2C && GPIOLIB
select SND_SOC_TEGRA20_I2S if ARCH_TEGRA_2x_SOC
select SND_SOC_ALC5632
help
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index e58233f7df6..ae27bcd586d 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -334,12 +334,6 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!mem) {
- dev_err(&pdev->dev, "No memory resource\n");
- ret = -ENODEV;
- goto err_clk_put;
- }
-
regs = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
@@ -389,9 +383,9 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
ac97->capture_dma_data.slave_id = of_dma[1];
ac97->playback_dma_data.addr = mem->start + TEGRA20_AC97_FIFO_TX1;
- ac97->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- ac97->capture_dma_data.maxburst = 4;
- ac97->capture_dma_data.slave_id = of_dma[0];
+ ac97->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ ac97->playback_dma_data.maxburst = 4;
+ ac97->playback_dma_data.slave_id = of_dma[1];
ret = tegra_asoc_utils_init(&ac97->util_data, &pdev->dev);
if (ret)
@@ -432,8 +426,6 @@ static int tegra20_ac97_platform_probe(struct platform_device *pdev)
return 0;
-err_unregister_pcm:
- tegra_pcm_platform_unregister(&pdev->dev);
err_unregister_component:
snd_soc_unregister_component(&pdev->dev);
err_asoc_utils_fini:
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 5eaa12cdc6e..551b3c93ce9 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -323,8 +323,8 @@ static int tegra20_spdif_platform_probe(struct platform_device *pdev)
}
spdif->playback_dma_data.addr = mem->start + TEGRA20_SPDIF_DATA_OUT;
- spdif->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- spdif->capture_dma_data.maxburst = 4;
+ spdif->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ spdif->playback_dma_data.maxburst = 4;
spdif->playback_dma_data.slave_id = dmareq->start;
pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index d04146cad61..47565fd0450 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream,
reg = TEGRA30_I2S_CIF_RX_CTRL;
} else {
val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX;
- reg = TEGRA30_I2S_CIF_RX_CTRL;
+ reg = TEGRA30_I2S_CIF_TX_CTRL;
}
regmap_write(i2s->regmap, reg, val);
diff --git a/sound/soc/tegra/tegra_alc5632.c b/sound/soc/tegra/tegra_alc5632.c
index 48d05d9e100..c61ea3a1030 100644
--- a/sound/soc/tegra/tegra_alc5632.c
+++ b/sound/soc/tegra/tegra_alc5632.c
@@ -13,8 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <asm/mach-types.h>
-
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/sound/soc/tegra/tegra_rt5640.c b/sound/soc/tegra/tegra_rt5640.c
index 08794f915a9..4511c5a875e 100644
--- a/sound/soc/tegra/tegra_rt5640.c
+++ b/sound/soc/tegra/tegra_rt5640.c
@@ -99,6 +99,7 @@ static struct snd_soc_jack_gpio tegra_rt5640_hp_jack_gpio = {
static const struct snd_soc_dapm_widget tegra_rt5640_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphones", NULL),
SND_SOC_DAPM_SPK("Speakers", NULL),
+ SND_SOC_DAPM_MIC("Mic Jack", NULL),
};
static const struct snd_kcontrol_new tegra_rt5640_controls[] = {
diff --git a/sound/soc/tegra/tegra_wm8753.c b/sound/soc/tegra/tegra_wm8753.c
index f87fc53e9b8..8e774d1a243 100644
--- a/sound/soc/tegra/tegra_wm8753.c
+++ b/sound/soc/tegra/tegra_wm8753.c
@@ -28,8 +28,6 @@
*
*/
-#include <asm/mach-types.h>
-
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/sound/soc/tegra/trimslice.c b/sound/soc/tegra/trimslice.c
index 05c68aab5cf..734bfcd2114 100644
--- a/sound/soc/tegra/trimslice.c
+++ b/sound/soc/tegra/trimslice.c
@@ -24,8 +24,6 @@
*
*/
-#include <asm/mach-types.h>
-
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index 4bcce8a3cde..e0305a14856 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -184,9 +184,6 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -EBUSY;
-
drvdata->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
diff --git a/sound/soc/ux500/mop500.c b/sound/soc/ux500/mop500.c
index 8f5cd00a6e4..178d1bad625 100644
--- a/sound/soc/ux500/mop500.c
+++ b/sound/soc/ux500/mop500.c
@@ -52,6 +52,7 @@ static struct snd_soc_dai_link mop500_dai_links[] = {
static struct snd_soc_card mop500_card = {
.name = "MOP500-card",
+ .owner = THIS_MODULE,
.probe = NULL,
.dai_link = mop500_dai_links,
.num_links = ARRAY_SIZE(mop500_dai_links),
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c
index 9e6e3ffd86b..23452ee617e 100644
--- a/sound/usb/6fire/comm.c
+++ b/sound/usb/6fire/comm.c
@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev)
static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
u8 reg, u8 value)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
u8 reg, u8 vl, u8 vh)
{
- u8 buffer[13]; /* 13: maximum length of message */
+ u8 *buffer;
+ int ret;
+
+ /* 13: maximum length of message */
+ buffer = kmalloc(13, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
+
+ kfree(buffer);
+ return ret;
}
int usb6fire_comm_init(struct sfire_chip *chip)
@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
+ if (!rt->receiver_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
urb = &rt->receiver;
rt->serial = 1;
rt->chip = chip;
@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip)
urb->interval = 1;
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret < 0) {
+ kfree(rt->receiver_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
return ret;
@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip)
void usb6fire_comm_destroy(struct sfire_chip *chip)
{
- kfree(chip->comm);
+ struct comm_runtime *rt = chip->comm;
+
+ kfree(rt->receiver_buffer);
+ kfree(rt);
chip->comm = NULL;
}
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h
index 6a0840b0dcf..780d5ed8e5d 100644
--- a/sound/usb/6fire/comm.h
+++ b/sound/usb/6fire/comm.h
@@ -24,7 +24,7 @@ struct comm_runtime {
struct sfire_chip *chip;
struct urb receiver;
- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
+ u8 *receiver_buffer;
u8 serial; /* urb serial */
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index b9defcdeb7e..780bf3f62d2 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -346,10 +346,10 @@ static int usb6fire_fw_check(u8 *version)
if (!memcmp(version, known_fw_versions + i, 2))
return 0;
- snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %*ph. "
+ snd_printk(KERN_ERR PREFIX "invalid fimware version in device: %4ph. "
"please reconnect to power. if this failure "
"still happens, check your firmware installation.",
- 4, version);
+ version);
return -EINVAL;
}
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c
index 26722423330..f3dd7266c39 100644
--- a/sound/usb/6fire/midi.c
+++ b/sound/usb/6fire/midi.c
@@ -19,6 +19,10 @@
#include "chip.h"
#include "comm.h"
+enum {
+ MIDI_BUFSIZE = 64
+};
+
static void usb6fire_midi_out_handler(struct urb *urb)
{
struct midi_runtime *rt = urb->context;
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL);
+ if (!rt->out_buffer) {
+ kfree(rt);
+ return -ENOMEM;
+ }
+
rt->chip = chip;
rt->in_received = usb6fire_midi_in_received;
rt->out_buffer[0] = 0x80; /* 'send midi' command */
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip)
ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance);
if (ret < 0) {
+ kfree(rt->out_buffer);
kfree(rt);
snd_printk(KERN_ERR PREFIX "unable to create midi.\n");
return ret;
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip)
void usb6fire_midi_destroy(struct sfire_chip *chip)
{
- kfree(chip->midi);
+ struct midi_runtime *rt = chip->midi;
+
+ kfree(rt->out_buffer);
+ kfree(rt);
chip->midi = NULL;
}
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h
index c321006e543..84851b9f555 100644
--- a/sound/usb/6fire/midi.h
+++ b/sound/usb/6fire/midi.h
@@ -16,10 +16,6 @@
#include "common.h"
-enum {
- MIDI_BUFSIZE = 64
-};
-
struct midi_runtime {
struct sfire_chip *chip;
struct snd_rawmidi *instance;
@@ -32,7 +28,7 @@ struct midi_runtime {
struct snd_rawmidi_substream *out;
struct urb out_urb;
u8 out_serial; /* serial number of out packet */
- u8 out_buffer[MIDI_BUFSIZE];
+ u8 *out_buffer;
int buffer_offset;
void (*in_received)(struct midi_runtime *rt, u8 *data, int length);
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index 2aa4e13063a..b5eb97fdc84 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -543,7 +543,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer(
snd_pcm_uframes_t ret;
if (rt->panic || !sub)
- return SNDRV_PCM_STATE_XRUN;
+ return SNDRV_PCM_POS_XRUN;
spin_lock_irqsave(&sub->lock, flags);
ret = sub->dma_off;
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb,
urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB;
}
+static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->out_urbs[i].buffer)
+ return -ENOMEM;
+ rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB
+ * PCM_MAX_PACKET_SIZE, GFP_KERNEL);
+ if (!rt->in_urbs[i].buffer)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt)
+{
+ int i;
+
+ for (i = 0; i < PCM_N_URBS; i++) {
+ kfree(rt->out_urbs[i].buffer);
+ kfree(rt->in_urbs[i].buffer);
+ }
+}
+
int usb6fire_pcm_init(struct sfire_chip *chip)
{
int i;
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
if (!rt)
return -ENOMEM;
+ ret = usb6fire_pcm_buffers_init(rt);
+ if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
+ return ret;
+ }
+
rt->chip = chip;
rt->stream_state = STREAM_DISABLED;
rt->rate = ARRAY_SIZE(rates);
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm);
if (ret < 0) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n");
return ret;
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip)
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops);
if (ret) {
+ usb6fire_pcm_buffers_destroy(rt);
kfree(rt);
snd_printk(KERN_ERR PREFIX
"error preallocating pcm buffers.\n");
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip)
void usb6fire_pcm_destroy(struct sfire_chip *chip)
{
- kfree(chip->pcm);
+ struct pcm_runtime *rt = chip->pcm;
+
+ usb6fire_pcm_buffers_destroy(rt);
+ kfree(rt);
chip->pcm = NULL;
}
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h
index 9b01133ee3f..f5779d6182c 100644
--- a/sound/usb/6fire/pcm.h
+++ b/sound/usb/6fire/pcm.h
@@ -32,7 +32,7 @@ struct pcm_urb {
struct urb instance;
struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB];
/* END DO NOT SEPARATE */
- u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE];
+ u8 *buffer;
struct pcm_urb *peer;
};
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 7a444b5501d..93e970f2b3c 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -418,6 +418,9 @@ struct snd_usb_endpoint *snd_usb_add_endpoint(struct snd_usb_audio *chip,
struct snd_usb_endpoint *ep;
int is_playback = direction == SNDRV_PCM_STREAM_PLAYBACK;
+ if (WARN_ON(!alts))
+ return NULL;
+
mutex_lock(&chip->mutex);
list_for_each_entry(ep, &chip->ep_list, list) {
@@ -591,17 +594,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep,
ep->stride = frame_bits >> 3;
ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
- /* calculate max. frequency */
- if (ep->maxpacksize) {
+ /* assume max. frequency is 25% higher than nominal */
+ ep->freqmax = ep->freqn + (ep->freqn >> 2);
+ maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+ >> (16 - ep->datainterval);
+ /* but wMaxPacketSize might reduce this */
+ if (ep->maxpacksize && ep->maxpacksize < maxsize) {
/* whatever fits into a max. size packet */
maxsize = ep->maxpacksize;
ep->freqmax = (maxsize / (frame_bits >> 3))
<< (16 - ep->datainterval);
- } else {
- /* no max. packet size: just take 25% higher than nominal */
- ep->freqmax = ep->freqn + (ep->freqn >> 2);
- maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
- >> (16 - ep->datainterval);
}
if (ep->fill_max)
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index 6430ed2a9f6..c21a3df9a0d 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -503,7 +503,7 @@ static snd_pcm_uframes_t hiface_pcm_pointer(struct snd_pcm_substream *alsa_sub)
snd_pcm_uframes_t dma_offset;
if (rt->panic || !sub)
- return SNDRV_PCM_STATE_XRUN;
+ return SNDRV_PCM_POS_XRUN;
spin_lock_irqsave(&sub->lock, flags);
dma_offset = sub->dma_off;
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d5438083fd6..95558ef4a7a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
+ case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 15b151ed489..b375d58871e 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -327,6 +327,137 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
return 0;
}
+static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
+ struct usb_device *dev,
+ struct usb_interface_descriptor *altsd,
+ unsigned int attr)
+{
+ struct usb_host_interface *alts;
+ struct usb_interface *iface;
+ unsigned int ep;
+
+ /* Implicit feedback sync EPs consumers are always playback EPs */
+ if (subs->direction != SNDRV_PCM_STREAM_PLAYBACK)
+ return 0;
+
+ switch (subs->stream->chip->usb_id) {
+ case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
+ case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
+ ep = 0x81;
+ iface = usb_ifnum_to_if(dev, 3);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+ break;
+ case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
+ case USB_ID(0x0763, 0x2081):
+ ep = 0x81;
+ iface = usb_ifnum_to_if(dev, 2);
+
+ if (!iface || iface->num_altsetting == 0)
+ return -EINVAL;
+
+ alts = &iface->altsetting[1];
+ goto add_sync_ep;
+ }
+ if (attr == USB_ENDPOINT_SYNC_ASYNC &&
+ altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+ altsd->bInterfaceProtocol == 2 &&
+ altsd->bNumEndpoints == 1 &&
+ USB_ID_VENDOR(subs->stream->chip->usb_id) == 0x0582 /* Roland */ &&
+ search_roland_implicit_fb(dev, altsd->bInterfaceNumber + 1,
+ altsd->bAlternateSetting,
+ &alts, &ep) >= 0) {
+ goto add_sync_ep;
+ }
+
+ /* No quirk */
+ return 0;
+
+add_sync_ep:
+ subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
+ alts, ep, !subs->direction,
+ SND_USB_ENDPOINT_TYPE_DATA);
+ if (!subs->sync_endpoint)
+ return -EINVAL;
+
+ subs->data_endpoint->sync_master = subs->sync_endpoint;
+
+ return 0;
+}
+
+static int set_sync_endpoint(struct snd_usb_substream *subs,
+ struct audioformat *fmt,
+ struct usb_device *dev,
+ struct usb_host_interface *alts,
+ struct usb_interface_descriptor *altsd)
+{
+ int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
+ unsigned int ep, attr;
+ bool implicit_fb;
+ int err;
+
+ /* we need a sync pipe in async OUT or adaptive IN mode */
+ /* check the number of EP, since some devices have broken
+ * descriptors which fool us. if it has only one EP,
+ * assume it as adaptive-out or sync-in.
+ */
+ attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
+
+ err = set_sync_ep_implicit_fb_quirk(subs, dev, altsd, attr);
+ if (err < 0)
+ return err;
+
+ if (altsd->bNumEndpoints < 2)
+ return 0;
+
+ if ((is_playback && attr != USB_ENDPOINT_SYNC_ASYNC) ||
+ (!is_playback && attr != USB_ENDPOINT_SYNC_ADAPTIVE))
+ return 0;
+
+ /* check sync-pipe endpoint */
+ /* ... and check descriptor size before accessing bSynchAddress
+ because there is a version of the SB Audigy 2 NX firmware lacking
+ the audio fields in the endpoint descriptors */
+ if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC ||
+ (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ get_endpoint(alts, 1)->bSynchAddress != 0)) {
+ snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
+ dev->devnum, fmt->iface, fmt->altsetting,
+ get_endpoint(alts, 1)->bmAttributes,
+ get_endpoint(alts, 1)->bLength,
+ get_endpoint(alts, 1)->bSynchAddress);
+ return -EINVAL;
+ }
+ ep = get_endpoint(alts, 1)->bEndpointAddress;
+ if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
+ ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
+ (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
+ snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
+ dev->devnum, fmt->iface, fmt->altsetting,
+ is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
+ return -EINVAL;
+ }
+
+ implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK)
+ == USB_ENDPOINT_USAGE_IMPLICIT_FB;
+
+ subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
+ alts, ep, !subs->direction,
+ implicit_fb ?
+ SND_USB_ENDPOINT_TYPE_DATA :
+ SND_USB_ENDPOINT_TYPE_SYNC);
+ if (!subs->sync_endpoint)
+ return -EINVAL;
+
+ subs->data_endpoint->sync_master = subs->sync_endpoint;
+
+ return 0;
+}
+
/*
* find a matching format and set up the interface
*/
@@ -336,9 +467,7 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
struct usb_host_interface *alts;
struct usb_interface_descriptor *altsd;
struct usb_interface *iface;
- unsigned int ep, attr;
- int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
- int err, implicit_fb = 0;
+ int err;
iface = usb_ifnum_to_if(dev, fmt->iface);
if (WARN_ON(!iface))
@@ -383,118 +512,22 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
subs->data_endpoint = snd_usb_add_endpoint(subs->stream->chip,
alts, fmt->endpoint, subs->direction,
SND_USB_ENDPOINT_TYPE_DATA);
+
if (!subs->data_endpoint)
return -EINVAL;
- /* we need a sync pipe in async OUT or adaptive IN mode */
- /* check the number of EP, since some devices have broken
- * descriptors which fool us. if it has only one EP,
- * assume it as adaptive-out or sync-in.
- */
- attr = fmt->ep_attr & USB_ENDPOINT_SYNCTYPE;
-
- switch (subs->stream->chip->usb_id) {
- case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
- case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
- if (is_playback) {
- implicit_fb = 1;
- ep = 0x81;
- iface = usb_ifnum_to_if(dev, 3);
-
- if (!iface || iface->num_altsetting == 0)
- return -EINVAL;
-
- alts = &iface->altsetting[1];
- goto add_sync_ep;
- }
- break;
- case USB_ID(0x0763, 0x2080): /* M-Audio FastTrack Ultra */
- case USB_ID(0x0763, 0x2081):
- if (is_playback) {
- implicit_fb = 1;
- ep = 0x81;
- iface = usb_ifnum_to_if(dev, 2);
-
- if (!iface || iface->num_altsetting == 0)
- return -EINVAL;
-
- alts = &iface->altsetting[1];
- goto add_sync_ep;
- }
- }
- if (is_playback &&
- attr == USB_ENDPOINT_SYNC_ASYNC &&
- altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
- altsd->bInterfaceProtocol == 2 &&
- altsd->bNumEndpoints == 1 &&
- USB_ID_VENDOR(subs->stream->chip->usb_id) == 0x0582 /* Roland */ &&
- search_roland_implicit_fb(dev, altsd->bInterfaceNumber + 1,
- altsd->bAlternateSetting,
- &alts, &ep) >= 0) {
- implicit_fb = 1;
- goto add_sync_ep;
- }
-
- if (((is_playback && attr == USB_ENDPOINT_SYNC_ASYNC) ||
- (!is_playback && attr == USB_ENDPOINT_SYNC_ADAPTIVE)) &&
- altsd->bNumEndpoints >= 2) {
- /* check sync-pipe endpoint */
- /* ... and check descriptor size before accessing bSynchAddress
- because there is a version of the SB Audigy 2 NX firmware lacking
- the audio fields in the endpoint descriptors */
- if ((get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_ISOC ||
- (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
- get_endpoint(alts, 1)->bSynchAddress != 0 &&
- !implicit_fb)) {
- snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
- dev->devnum, fmt->iface, fmt->altsetting,
- get_endpoint(alts, 1)->bmAttributes,
- get_endpoint(alts, 1)->bLength,
- get_endpoint(alts, 1)->bSynchAddress);
- return -EINVAL;
- }
- ep = get_endpoint(alts, 1)->bEndpointAddress;
- if (!implicit_fb &&
- get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
- (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
- (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
- snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
- dev->devnum, fmt->iface, fmt->altsetting,
- is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
- return -EINVAL;
- }
-
- implicit_fb = (get_endpoint(alts, 1)->bmAttributes & USB_ENDPOINT_USAGE_MASK)
- == USB_ENDPOINT_USAGE_IMPLICIT_FB;
-
-add_sync_ep:
- subs->sync_endpoint = snd_usb_add_endpoint(subs->stream->chip,
- alts, ep, !subs->direction,
- implicit_fb ?
- SND_USB_ENDPOINT_TYPE_DATA :
- SND_USB_ENDPOINT_TYPE_SYNC);
- if (!subs->sync_endpoint)
- return -EINVAL;
-
- subs->data_endpoint->sync_master = subs->sync_endpoint;
- }
+ err = set_sync_endpoint(subs, fmt, dev, alts, altsd);
+ if (err < 0)
+ return err;
- if ((err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt)) < 0)
+ err = snd_usb_init_pitch(subs->stream->chip, fmt->iface, alts, fmt);
+ if (err < 0)
return err;
subs->cur_audiofmt = fmt;
snd_usb_set_format_quirk(subs, fmt);
-#if 0
- printk(KERN_DEBUG
- "setting done: format = %d, rate = %d..%d, channels = %d\n",
- fmt->format, fmt->rate_min, fmt->rate_max, fmt->channels);
- printk(KERN_DEBUG
- " datapipe = 0x%0x, syncpipe = 0x%0x\n",
- subs->datapipe, subs->syncpipe);
-#endif
-
return 0;
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 1bc45e71f1f..0df9ede99df 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip,
if (altsd->bNumEndpoints < 1)
return -ENODEV;
epd = get_endpoint(alts, 0);
- if (!usb_endpoint_xfer_bulk(epd) ||
+ if (!usb_endpoint_xfer_bulk(epd) &&
!usb_endpoint_xfer_int(epd))
return -ENODEV;
switch (USB_ID_VENDOR(chip->usb_id)) {
case 0x0499: /* Yamaha */
err = create_yamaha_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
case 0x0582: /* Roland */
err = create_roland_midi_quirk(chip, iface, driver, alts);
- if (err < 0 && err != -ENODEV)
+ if (err != -ENODEV)
return err;
break;
}
diff --git a/sound/usb/usx2y/usbusx2y.c b/sound/usb/usx2y/usbusx2y.c
index 1f9bbd55553..5a51b18c50f 100644
--- a/sound/usb/usx2y/usbusx2y.c
+++ b/sound/usb/usx2y/usbusx2y.c
@@ -305,11 +305,9 @@ static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S)
{
int i;
for (i = 0; i < URBS_AsyncSeq; ++i) {
- if (S[i].urb) {
- usb_kill_urb(S->urb[i]);
- usb_free_urb(S->urb[i]);
- S->urb[i] = NULL;
- }
+ usb_kill_urb(S->urb[i]);
+ usb_free_urb(S->urb[i]);
+ S->urb[i] = NULL;
}
kfree(S->buffer);
}
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index ca9fa4d32e0..8fd9ec66121 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -79,8 +79,6 @@ enum {
DNS
};
-static char kvp_send_buffer[4096];
-static char kvp_recv_buffer[4096 * 2];
static struct sockaddr_nl addr;
static int in_hand_shake = 1;
@@ -1026,9 +1024,10 @@ kvp_get_ip_info(int family, char *if_name, int op,
if (sn_offset == 0)
strcpy(sn_str, cidr_mask);
- else
+ else {
+ strcat((char *)ip_buffer->sub_net, ";");
strcat(sn_str, cidr_mask);
- strcat((char *)ip_buffer->sub_net, ";");
+ }
sn_offset += strlen(sn_str) + 1;
}
@@ -1300,6 +1299,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
}
error = kvp_write_file(file, "HWADDR", "", mac_addr);
+ free(mac_addr);
if (error)
goto setval_error;
@@ -1345,7 +1345,6 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
goto setval_error;
setval_done:
- free(mac_addr);
fclose(file);
/*
@@ -1354,12 +1353,15 @@ setval_done:
*/
snprintf(cmd, sizeof(cmd), "%s %s", "hv_set_ifconfig", if_file);
- system(cmd);
+ if (system(cmd)) {
+ syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
+ cmd, errno, strerror(errno));
+ return HV_E_FAIL;
+ }
return 0;
setval_error:
syslog(LOG_ERR, "Failed to write config file");
- free(mac_addr);
fclose(file);
return error;
}
@@ -1390,23 +1392,18 @@ kvp_get_domain_name(char *buffer, int length)
static int
netlink_send(int fd, struct cn_msg *msg)
{
- struct nlmsghdr *nlh;
+ struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
unsigned int size;
struct msghdr message;
- char buffer[64];
struct iovec iov[2];
- size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
+ size = sizeof(struct cn_msg) + msg->len;
- nlh = (struct nlmsghdr *)buffer;
- nlh->nlmsg_seq = 0;
- nlh->nlmsg_pid = getpid();
- nlh->nlmsg_type = NLMSG_DONE;
- nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
- nlh->nlmsg_flags = 0;
+ nlh.nlmsg_pid = getpid();
+ nlh.nlmsg_len = NLMSG_LENGTH(size);
- iov[0].iov_base = nlh;
- iov[0].iov_len = sizeof(*nlh);
+ iov[0].iov_base = &nlh;
+ iov[0].iov_len = sizeof(nlh);
iov[1].iov_base = msg;
iov[1].iov_len = size;
@@ -1436,10 +1433,22 @@ int main(void)
int pool;
char *if_name;
struct hv_kvp_ipaddr_value *kvp_ip_val;
+ char *kvp_send_buffer;
+ char *kvp_recv_buffer;
+ size_t kvp_recv_buffer_len;
- daemon(1, 0);
+ if (daemon(1, 0))
+ return 1;
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
+
+ kvp_recv_buffer_len = NLMSG_HDRLEN + sizeof(struct cn_msg) + sizeof(struct hv_kvp_msg);
+ kvp_send_buffer = calloc(1, kvp_recv_buffer_len);
+ kvp_recv_buffer = calloc(1, kvp_recv_buffer_len);
+ if (!(kvp_send_buffer && kvp_recv_buffer)) {
+ syslog(LOG_ERR, "Failed to allocate netlink buffers");
+ exit(EXIT_FAILURE);
+ }
/*
* Retrieve OS release information.
*/
@@ -1513,7 +1522,7 @@ int main(void)
continue;
}
- len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
+ len = recvfrom(fd, kvp_recv_buffer, kvp_recv_buffer_len, 0,
addr_p, &addr_l);
if (len < 0) {
diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
index fea03a3edaf..8611962c672 100644
--- a/tools/hv/hv_vss_daemon.c
+++ b/tools/hv/hv_vss_daemon.c
@@ -38,8 +38,6 @@
#include <linux/netlink.h>
#include <syslog.h>
-static char vss_recv_buffer[4096];
-static char vss_send_buffer[4096];
static struct sockaddr_nl addr;
#ifndef SOL_NETLINK
@@ -107,23 +105,18 @@ static int vss_operate(int operation)
static int netlink_send(int fd, struct cn_msg *msg)
{
- struct nlmsghdr *nlh;
+ struct nlmsghdr nlh = { .nlmsg_type = NLMSG_DONE };
unsigned int size;
struct msghdr message;
- char buffer[64];
struct iovec iov[2];
- size = NLMSG_SPACE(sizeof(struct cn_msg) + msg->len);
+ size = sizeof(struct cn_msg) + msg->len;
- nlh = (struct nlmsghdr *)buffer;
- nlh->nlmsg_seq = 0;
- nlh->nlmsg_pid = getpid();
- nlh->nlmsg_type = NLMSG_DONE;
- nlh->nlmsg_len = NLMSG_LENGTH(size - sizeof(*nlh));
- nlh->nlmsg_flags = 0;
+ nlh.nlmsg_pid = getpid();
+ nlh.nlmsg_len = NLMSG_LENGTH(size);
- iov[0].iov_base = nlh;
- iov[0].iov_len = sizeof(*nlh);
+ iov[0].iov_base = &nlh;
+ iov[0].iov_len = sizeof(nlh);
iov[1].iov_base = msg;
iov[1].iov_len = size;
@@ -147,6 +140,9 @@ int main(void)
struct cn_msg *incoming_cn_msg;
int op;
struct hv_vss_msg *vss_msg;
+ char *vss_send_buffer;
+ char *vss_recv_buffer;
+ size_t vss_recv_buffer_len;
if (daemon(1, 0))
return 1;
@@ -154,9 +150,18 @@ int main(void)
openlog("Hyper-V VSS", 0, LOG_USER);
syslog(LOG_INFO, "VSS starting; pid is:%d", getpid());
+ vss_recv_buffer_len = NLMSG_HDRLEN + sizeof(struct cn_msg) + sizeof(struct hv_vss_msg);
+ vss_send_buffer = calloc(1, vss_recv_buffer_len);
+ vss_recv_buffer = calloc(1, vss_recv_buffer_len);
+ if (!(vss_send_buffer && vss_recv_buffer)) {
+ syslog(LOG_ERR, "Failed to allocate netlink buffers");
+ exit(EXIT_FAILURE);
+ }
+
fd = socket(AF_NETLINK, SOCK_DGRAM, NETLINK_CONNECTOR);
if (fd < 0) {
- syslog(LOG_ERR, "netlink socket creation failed; error:%d", fd);
+ syslog(LOG_ERR, "netlink socket creation failed; error:%d %s",
+ errno, strerror(errno));
exit(EXIT_FAILURE);
}
addr.nl_family = AF_NETLINK;
@@ -167,12 +172,16 @@ int main(void)
error = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
if (error < 0) {
- syslog(LOG_ERR, "bind failed; error:%d", error);
+ syslog(LOG_ERR, "bind failed; error:%d %s", errno, strerror(errno));
close(fd);
exit(EXIT_FAILURE);
}
nl_group = CN_VSS_IDX;
- setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group));
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP, &nl_group, sizeof(nl_group)) < 0) {
+ syslog(LOG_ERR, "setsockopt failed; error:%d %s", errno, strerror(errno));
+ close(fd);
+ exit(EXIT_FAILURE);
+ }
/*
* Register ourselves with the kernel.
*/
@@ -187,7 +196,7 @@ int main(void)
len = netlink_send(fd, message);
if (len < 0) {
- syslog(LOG_ERR, "netlink_send failed; error:%d", len);
+ syslog(LOG_ERR, "netlink_send failed; error:%d %s", errno, strerror(errno));
close(fd);
exit(EXIT_FAILURE);
}
@@ -199,9 +208,18 @@ int main(void)
socklen_t addr_l = sizeof(addr);
pfd.events = POLLIN;
pfd.revents = 0;
- poll(&pfd, 1, -1);
- len = recvfrom(fd, vss_recv_buffer, sizeof(vss_recv_buffer), 0,
+ if (poll(&pfd, 1, -1) < 0) {
+ syslog(LOG_ERR, "poll failed; error:%d %s", errno, strerror(errno));
+ if (errno == EINVAL) {
+ close(fd);
+ exit(EXIT_FAILURE);
+ }
+ else
+ continue;
+ }
+
+ len = recvfrom(fd, vss_recv_buffer, vss_recv_buffer_len, 0,
addr_p, &addr_l);
if (len < 0) {
@@ -241,7 +259,8 @@ int main(void)
vss_msg->error = error;
len = netlink_send(fd, incoming_cn_msg);
if (len < 0) {
- syslog(LOG_ERR, "net_link send failed; error:%d", len);
+ syslog(LOG_ERR, "net_link send failed; error:%d %s",
+ errno, strerror(errno));
exit(EXIT_FAILURE);
}
}
diff --git a/tools/lib/lk/Makefile b/tools/lib/lk/Makefile
index 280dd820543..3dba0a4aebb 100644
--- a/tools/lib/lk/Makefile
+++ b/tools/lib/lk/Makefile
@@ -3,21 +3,6 @@ include ../../scripts/Makefile.include
CC = $(CROSS_COMPILE)gcc
AR = $(CROSS_COMPILE)ar
-# Makefiles suck: This macro sets a default value of $(2) for the
-# variable named by $(1), unless the variable has been set by
-# environment or command line. This is necessary for CC and AR
-# because make sets default values, so the simpler ?= approach
-# won't work as expected.
-define allow-override
- $(if $(or $(findstring environment,$(origin $(1))),\
- $(findstring command line,$(origin $(1)))),,\
- $(eval $(1) = $(2)))
-endef
-
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
-$(call allow-override,CC,$(CROSS_COMPILE)gcc)
-$(call allow-override,AR,$(CROSS_COMPILE)ar)
-
# guard against environment variables
LIB_H=
LIB_OBJS=
diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
index 0b0a90787db..ca6cb779876 100644
--- a/tools/lib/traceevent/Makefile
+++ b/tools/lib/traceevent/Makefile
@@ -39,13 +39,8 @@ bindir_relative = bin
bindir = $(prefix)/$(bindir_relative)
man_dir = $(prefix)/share/man
man_dir_SQ = '$(subst ','\'',$(man_dir))'
-html_install = $(prefix)/share/kernelshark/html
-html_install_SQ = '$(subst ','\'',$(html_install))'
-img_install = $(prefix)/share/kernelshark/html/images
-img_install_SQ = '$(subst ','\'',$(img_install))'
-export man_dir man_dir_SQ html_install html_install_SQ INSTALL
-export img_install img_install_SQ
+export man_dir man_dir_SQ INSTALL
export DESTDIR DESTDIR_SQ
# copy a bit from Linux kbuild
@@ -65,7 +60,7 @@ ifeq ($(BUILD_SRC),)
ifneq ($(BUILD_OUTPUT),)
define build_output
- $(if $(VERBOSE:1=),@)$(MAKE) -C $(BUILD_OUTPUT) \
+ $(if $(VERBOSE:1=),@)+$(MAKE) -C $(BUILD_OUTPUT) \
BUILD_SRC=$(CURDIR) -f $(CURDIR)/Makefile $1
endef
@@ -76,10 +71,7 @@ $(if $(BUILD_OUTPUT),, \
all: sub-make
-gui: force
- $(call build_output, all_cmd)
-
-$(filter-out gui,$(MAKECMDGOALS)): sub-make
+$(MAKECMDGOALS): sub-make
sub-make: force
$(call build_output, $(MAKECMDGOALS))
@@ -189,6 +181,7 @@ $(obj)/%.o: $(src)/%.c
$(Q)$(call do_compile)
PEVENT_LIB_OBJS = event-parse.o trace-seq.o parse-filter.o parse-utils.o
+PEVENT_LIB_OBJS += kbuffer-parse.o
ALL_OBJS = $(PEVENT_LIB_OBJS)
@@ -258,9 +251,6 @@ define check_deps
$(RM) $@.$$$$
endef
-$(gui_deps): ks_version.h
-$(non_gui_deps): tc_version.h
-
$(all_deps): .%.d: $(src)/%.c
$(Q)$(call check_deps)
@@ -300,7 +290,7 @@ define do_install
$(INSTALL) $1 '$(DESTDIR_SQ)$2'
endef
-install_lib: all_cmd install_plugins install_python
+install_lib: all_cmd
$(Q)$(call do_install,$(LIB_FILE),$(bindir_SQ))
install: install_lib
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 82b0606dcb8..d1c2a6a4cd3 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -5450,10 +5450,9 @@ int pevent_register_print_function(struct pevent *pevent,
* If @id is >= 0, then it is used to find the event.
* else @sys_name and @event_name are used.
*/
-int pevent_register_event_handler(struct pevent *pevent,
- int id, char *sys_name, char *event_name,
- pevent_event_handler_func func,
- void *context)
+int pevent_register_event_handler(struct pevent *pevent, int id,
+ const char *sys_name, const char *event_name,
+ pevent_event_handler_func func, void *context)
{
struct event_format *event;
struct event_handler *handle;
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 7be7e89533e..c37b2026d04 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -69,6 +69,7 @@ struct trace_seq {
};
void trace_seq_init(struct trace_seq *s);
+void trace_seq_reset(struct trace_seq *s);
void trace_seq_destroy(struct trace_seq *s);
extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
@@ -399,6 +400,7 @@ struct pevent {
int cpus;
int long_size;
+ int page_size;
struct cmdline *cmdlines;
struct cmdline_list *cmdlist;
@@ -561,7 +563,8 @@ int pevent_print_num_field(struct trace_seq *s, const char *fmt,
struct event_format *event, const char *name,
struct pevent_record *record, int err);
-int pevent_register_event_handler(struct pevent *pevent, int id, char *sys_name, char *event_name,
+int pevent_register_event_handler(struct pevent *pevent, int id,
+ const char *sys_name, const char *event_name,
pevent_event_handler_func func, void *context);
int pevent_register_print_function(struct pevent *pevent,
pevent_func_handler func,
@@ -619,6 +622,16 @@ static inline void pevent_set_long_size(struct pevent *pevent, int long_size)
pevent->long_size = long_size;
}
+static inline int pevent_get_page_size(struct pevent *pevent)
+{
+ return pevent->page_size;
+}
+
+static inline void pevent_set_page_size(struct pevent *pevent, int _page_size)
+{
+ pevent->page_size = _page_size;
+}
+
static inline int pevent_is_file_bigendian(struct pevent *pevent)
{
return pevent->file_bigendian;
diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
new file mode 100644
index 00000000000..dcc665228c7
--- /dev/null
+++ b/tools/lib/traceevent/kbuffer-parse.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (C) 2009, 2010 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "kbuffer.h"
+
+#define MISSING_EVENTS (1 << 31)
+#define MISSING_STORED (1 << 30)
+
+#define COMMIT_MASK ((1 << 27) - 1)
+
+enum {
+ KBUFFER_FL_HOST_BIG_ENDIAN = (1<<0),
+ KBUFFER_FL_BIG_ENDIAN = (1<<1),
+ KBUFFER_FL_LONG_8 = (1<<2),
+ KBUFFER_FL_OLD_FORMAT = (1<<3),
+};
+
+#define ENDIAN_MASK (KBUFFER_FL_HOST_BIG_ENDIAN | KBUFFER_FL_BIG_ENDIAN)
+
+/** kbuffer
+ * @timestamp - timestamp of current event
+ * @lost_events - # of lost events between this subbuffer and previous
+ * @flags - special flags of the kbuffer
+ * @subbuffer - pointer to the sub-buffer page
+ * @data - pointer to the start of data on the sub-buffer page
+ * @index - index from @data to the @curr event data
+ * @curr - offset from @data to the start of current event
+ * (includes metadata)
+ * @next - offset from @data to the start of next event
+ * @size - The size of data on @data
+ * @start - The offset from @subbuffer where @data lives
+ *
+ * @read_4 - Function to read 4 raw bytes (may swap)
+ * @read_8 - Function to read 8 raw bytes (may swap)
+ * @read_long - Function to read a long word (4 or 8 bytes with needed swap)
+ */
+struct kbuffer {
+ unsigned long long timestamp;
+ long long lost_events;
+ unsigned long flags;
+ void *subbuffer;
+ void *data;
+ unsigned int index;
+ unsigned int curr;
+ unsigned int next;
+ unsigned int size;
+ unsigned int start;
+
+ unsigned int (*read_4)(void *ptr);
+ unsigned long long (*read_8)(void *ptr);
+ unsigned long long (*read_long)(struct kbuffer *kbuf, void *ptr);
+ int (*next_event)(struct kbuffer *kbuf);
+};
+
+static void *zmalloc(size_t size)
+{
+ return calloc(1, size);
+}
+
+static int host_is_bigendian(void)
+{
+ unsigned char str[] = { 0x1, 0x2, 0x3, 0x4 };
+ unsigned int *ptr;
+
+ ptr = (unsigned int *)str;
+ return *ptr == 0x01020304;
+}
+
+static int do_swap(struct kbuffer *kbuf)
+{
+ return ((kbuf->flags & KBUFFER_FL_HOST_BIG_ENDIAN) + kbuf->flags) &
+ ENDIAN_MASK;
+}
+
+static unsigned long long __read_8(void *ptr)
+{
+ unsigned long long data = *(unsigned long long *)ptr;
+
+ return data;
+}
+
+static unsigned long long __read_8_sw(void *ptr)
+{
+ unsigned long long data = *(unsigned long long *)ptr;
+ unsigned long long swap;
+
+ swap = ((data & 0xffULL) << 56) |
+ ((data & (0xffULL << 8)) << 40) |
+ ((data & (0xffULL << 16)) << 24) |
+ ((data & (0xffULL << 24)) << 8) |
+ ((data & (0xffULL << 32)) >> 8) |
+ ((data & (0xffULL << 40)) >> 24) |
+ ((data & (0xffULL << 48)) >> 40) |
+ ((data & (0xffULL << 56)) >> 56);
+
+ return swap;
+}
+
+static unsigned int __read_4(void *ptr)
+{
+ unsigned int data = *(unsigned int *)ptr;
+
+ return data;
+}
+
+static unsigned int __read_4_sw(void *ptr)
+{
+ unsigned int data = *(unsigned int *)ptr;
+ unsigned int swap;
+
+ swap = ((data & 0xffULL) << 24) |
+ ((data & (0xffULL << 8)) << 8) |
+ ((data & (0xffULL << 16)) >> 8) |
+ ((data & (0xffULL << 24)) >> 24);
+
+ return swap;
+}
+
+static unsigned long long read_8(struct kbuffer *kbuf, void *ptr)
+{
+ return kbuf->read_8(ptr);
+}
+
+static unsigned int read_4(struct kbuffer *kbuf, void *ptr)
+{
+ return kbuf->read_4(ptr);
+}
+
+static unsigned long long __read_long_8(struct kbuffer *kbuf, void *ptr)
+{
+ return kbuf->read_8(ptr);
+}
+
+static unsigned long long __read_long_4(struct kbuffer *kbuf, void *ptr)
+{
+ return kbuf->read_4(ptr);
+}
+
+static unsigned long long read_long(struct kbuffer *kbuf, void *ptr)
+{
+ return kbuf->read_long(kbuf, ptr);
+}
+
+static int calc_index(struct kbuffer *kbuf, void *ptr)
+{
+ return (unsigned long)ptr - (unsigned long)kbuf->data;
+}
+
+static int __next_event(struct kbuffer *kbuf);
+
+/**
+ * kbuffer_alloc - allocat a new kbuffer
+ * @size; enum to denote size of word
+ * @endian: enum to denote endianness
+ *
+ * Allocates and returns a new kbuffer.
+ */
+struct kbuffer *
+kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian)
+{
+ struct kbuffer *kbuf;
+ int flags = 0;
+
+ switch (size) {
+ case KBUFFER_LSIZE_4:
+ break;
+ case KBUFFER_LSIZE_8:
+ flags |= KBUFFER_FL_LONG_8;
+ break;
+ default:
+ return NULL;
+ }
+
+ switch (endian) {
+ case KBUFFER_ENDIAN_LITTLE:
+ break;
+ case KBUFFER_ENDIAN_BIG:
+ flags |= KBUFFER_FL_BIG_ENDIAN;
+ break;
+ default:
+ return NULL;
+ }
+
+ kbuf = zmalloc(sizeof(*kbuf));
+ if (!kbuf)
+ return NULL;
+
+ kbuf->flags = flags;
+
+ if (host_is_bigendian())
+ kbuf->flags |= KBUFFER_FL_HOST_BIG_ENDIAN;
+
+ if (do_swap(kbuf)) {
+ kbuf->read_8 = __read_8_sw;
+ kbuf->read_4 = __read_4_sw;
+ } else {
+ kbuf->read_8 = __read_8;
+ kbuf->read_4 = __read_4;
+ }
+
+ if (kbuf->flags & KBUFFER_FL_LONG_8)
+ kbuf->read_long = __read_long_8;
+ else
+ kbuf->read_long = __read_long_4;
+
+ /* May be changed by kbuffer_set_old_format() */
+ kbuf->next_event = __next_event;
+
+ return kbuf;
+}
+
+/** kbuffer_free - free an allocated kbuffer
+ * @kbuf: The kbuffer to free
+ *
+ * Can take NULL as a parameter.
+ */
+void kbuffer_free(struct kbuffer *kbuf)
+{
+ free(kbuf);
+}
+
+static unsigned int type4host(struct kbuffer *kbuf,
+ unsigned int type_len_ts)
+{
+ if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
+ return (type_len_ts >> 29) & 3;
+ else
+ return type_len_ts & 3;
+}
+
+static unsigned int len4host(struct kbuffer *kbuf,
+ unsigned int type_len_ts)
+{
+ if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
+ return (type_len_ts >> 27) & 7;
+ else
+ return (type_len_ts >> 2) & 7;
+}
+
+static unsigned int type_len4host(struct kbuffer *kbuf,
+ unsigned int type_len_ts)
+{
+ if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
+ return (type_len_ts >> 27) & ((1 << 5) - 1);
+ else
+ return type_len_ts & ((1 << 5) - 1);
+}
+
+static unsigned int ts4host(struct kbuffer *kbuf,
+ unsigned int type_len_ts)
+{
+ if (kbuf->flags & KBUFFER_FL_BIG_ENDIAN)
+ return type_len_ts & ((1 << 27) - 1);
+ else
+ return type_len_ts >> 5;
+}
+
+/*
+ * Linux 2.6.30 and earlier (not much ealier) had a different
+ * ring buffer format. It should be obsolete, but we handle it anyway.
+ */
+enum old_ring_buffer_type {
+ OLD_RINGBUF_TYPE_PADDING,
+ OLD_RINGBUF_TYPE_TIME_EXTEND,
+ OLD_RINGBUF_TYPE_TIME_STAMP,
+ OLD_RINGBUF_TYPE_DATA,
+};
+
+static unsigned int old_update_pointers(struct kbuffer *kbuf)
+{
+ unsigned long long extend;
+ unsigned int type_len_ts;
+ unsigned int type;
+ unsigned int len;
+ unsigned int delta;
+ unsigned int length;
+ void *ptr = kbuf->data + kbuf->curr;
+
+ type_len_ts = read_4(kbuf, ptr);
+ ptr += 4;
+
+ type = type4host(kbuf, type_len_ts);
+ len = len4host(kbuf, type_len_ts);
+ delta = ts4host(kbuf, type_len_ts);
+
+ switch (type) {
+ case OLD_RINGBUF_TYPE_PADDING:
+ kbuf->next = kbuf->size;
+ return 0;
+
+ case OLD_RINGBUF_TYPE_TIME_EXTEND:
+ extend = read_4(kbuf, ptr);
+ extend <<= TS_SHIFT;
+ extend += delta;
+ delta = extend;
+ ptr += 4;
+ break;
+
+ case OLD_RINGBUF_TYPE_TIME_STAMP:
+ /* should never happen! */
+ kbuf->curr = kbuf->size;
+ kbuf->next = kbuf->size;
+ kbuf->index = kbuf->size;
+ return -1;
+ default:
+ if (len)
+ length = len * 4;
+ else {
+ length = read_4(kbuf, ptr);
+ length -= 4;
+ ptr += 4;
+ }
+ break;
+ }
+
+ kbuf->timestamp += delta;
+ kbuf->index = calc_index(kbuf, ptr);
+ kbuf->next = kbuf->index + length;
+
+ return type;
+}
+
+static int __old_next_event(struct kbuffer *kbuf)
+{
+ int type;
+
+ do {
+ kbuf->curr = kbuf->next;
+ if (kbuf->next >= kbuf->size)
+ return -1;
+ type = old_update_pointers(kbuf);
+ } while (type == OLD_RINGBUF_TYPE_TIME_EXTEND || type == OLD_RINGBUF_TYPE_PADDING);
+
+ return 0;
+}
+
+static unsigned int
+translate_data(struct kbuffer *kbuf, void *data, void **rptr,
+ unsigned long long *delta, int *length)
+{
+ unsigned long long extend;
+ unsigned int type_len_ts;
+ unsigned int type_len;
+
+ type_len_ts = read_4(kbuf, data);
+ data += 4;
+
+ type_len = type_len4host(kbuf, type_len_ts);
+ *delta = ts4host(kbuf, type_len_ts);
+
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ *length = read_4(kbuf, data);
+ data += *length;
+ break;
+
+ case KBUFFER_TYPE_TIME_EXTEND:
+ extend = read_4(kbuf, data);
+ data += 4;
+ extend <<= TS_SHIFT;
+ extend += *delta;
+ *delta = extend;
+ *length = 0;
+ break;
+
+ case KBUFFER_TYPE_TIME_STAMP:
+ data += 12;
+ *length = 0;
+ break;
+ case 0:
+ *length = read_4(kbuf, data) - 4;
+ *length = (*length + 3) & ~3;
+ data += 4;
+ break;
+ default:
+ *length = type_len * 4;
+ break;
+ }
+
+ *rptr = data;
+
+ return type_len;
+}
+
+static unsigned int update_pointers(struct kbuffer *kbuf)
+{
+ unsigned long long delta;
+ unsigned int type_len;
+ int length;
+ void *ptr = kbuf->data + kbuf->curr;
+
+ type_len = translate_data(kbuf, ptr, &ptr, &delta, &length);
+
+ kbuf->timestamp += delta;
+ kbuf->index = calc_index(kbuf, ptr);
+ kbuf->next = kbuf->index + length;
+
+ return type_len;
+}
+
+/**
+ * kbuffer_translate_data - read raw data to get a record
+ * @swap: Set to 1 if bytes in words need to be swapped when read
+ * @data: The raw data to read
+ * @size: Address to store the size of the event data.
+ *
+ * Returns a pointer to the event data. To determine the entire
+ * record size (record metadata + data) just add the difference between
+ * @data and the returned value to @size.
+ */
+void *kbuffer_translate_data(int swap, void *data, unsigned int *size)
+{
+ unsigned long long delta;
+ struct kbuffer kbuf;
+ int type_len;
+ int length;
+ void *ptr;
+
+ if (swap) {
+ kbuf.read_8 = __read_8_sw;
+ kbuf.read_4 = __read_4_sw;
+ kbuf.flags = host_is_bigendian() ? 0 : KBUFFER_FL_BIG_ENDIAN;
+ } else {
+ kbuf.read_8 = __read_8;
+ kbuf.read_4 = __read_4;
+ kbuf.flags = host_is_bigendian() ? KBUFFER_FL_BIG_ENDIAN: 0;
+ }
+
+ type_len = translate_data(&kbuf, data, &ptr, &delta, &length);
+ switch (type_len) {
+ case KBUFFER_TYPE_PADDING:
+ case KBUFFER_TYPE_TIME_EXTEND:
+ case KBUFFER_TYPE_TIME_STAMP:
+ return NULL;
+ };
+
+ *size = length;
+
+ return ptr;
+}
+
+static int __next_event(struct kbuffer *kbuf)
+{
+ int type;
+
+ do {
+ kbuf->curr = kbuf->next;
+ if (kbuf->next >= kbuf->size)
+ return -1;
+ type = update_pointers(kbuf);
+ } while (type == KBUFFER_TYPE_TIME_EXTEND || type == KBUFFER_TYPE_PADDING);
+
+ return 0;
+}
+
+static int next_event(struct kbuffer *kbuf)
+{
+ return kbuf->next_event(kbuf);
+}
+
+/**
+ * kbuffer_next_event - increment the current pointer
+ * @kbuf: The kbuffer to read
+ * @ts: Address to store the next record's timestamp (may be NULL to ignore)
+ *
+ * Increments the pointers into the subbuffer of the kbuffer to point to the
+ * next event so that the next kbuffer_read_event() will return a
+ * new event.
+ *
+ * Returns the data of the next event if a new event exists on the subbuffer,
+ * NULL otherwise.
+ */
+void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts)
+{
+ int ret;
+
+ if (!kbuf || !kbuf->subbuffer)
+ return NULL;
+
+ ret = next_event(kbuf);
+ if (ret < 0)
+ return NULL;
+
+ if (ts)
+ *ts = kbuf->timestamp;
+
+ return kbuf->data + kbuf->index;
+}
+
+/**
+ * kbuffer_load_subbuffer - load a new subbuffer into the kbuffer
+ * @kbuf: The kbuffer to load
+ * @subbuffer: The subbuffer to load into @kbuf.
+ *
+ * Load a new subbuffer (page) into @kbuf. This will reset all
+ * the pointers and update the @kbuf timestamp. The next read will
+ * return the first event on @subbuffer.
+ *
+ * Returns 0 on succes, -1 otherwise.
+ */
+int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer)
+{
+ unsigned long long flags;
+ void *ptr = subbuffer;
+
+ if (!kbuf || !subbuffer)
+ return -1;
+
+ kbuf->subbuffer = subbuffer;
+
+ kbuf->timestamp = read_8(kbuf, ptr);
+ ptr += 8;
+
+ kbuf->curr = 0;
+
+ if (kbuf->flags & KBUFFER_FL_LONG_8)
+ kbuf->start = 16;
+ else
+ kbuf->start = 12;
+
+ kbuf->data = subbuffer + kbuf->start;
+
+ flags = read_long(kbuf, ptr);
+ kbuf->size = (unsigned int)flags & COMMIT_MASK;
+
+ if (flags & MISSING_EVENTS) {
+ if (flags & MISSING_STORED) {
+ ptr = kbuf->data + kbuf->size;
+ kbuf->lost_events = read_long(kbuf, ptr);
+ } else
+ kbuf->lost_events = -1;
+ } else
+ kbuf->lost_events = 0;
+
+ kbuf->index = 0;
+ kbuf->next = 0;
+
+ next_event(kbuf);
+
+ return 0;
+}
+
+/**
+ * kbuffer_read_event - read the next event in the kbuffer subbuffer
+ * @kbuf: The kbuffer to read from
+ * @ts: The address to store the timestamp of the event (may be NULL to ignore)
+ *
+ * Returns a pointer to the data part of the current event.
+ * NULL if no event is left on the subbuffer.
+ */
+void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts)
+{
+ if (!kbuf || !kbuf->subbuffer)
+ return NULL;
+
+ if (kbuf->curr >= kbuf->size)
+ return NULL;
+
+ if (ts)
+ *ts = kbuf->timestamp;
+ return kbuf->data + kbuf->index;
+}
+
+/**
+ * kbuffer_timestamp - Return the timestamp of the current event
+ * @kbuf: The kbuffer to read from
+ *
+ * Returns the timestamp of the current (next) event.
+ */
+unsigned long long kbuffer_timestamp(struct kbuffer *kbuf)
+{
+ return kbuf->timestamp;
+}
+
+/**
+ * kbuffer_read_at_offset - read the event that is at offset
+ * @kbuf: The kbuffer to read from
+ * @offset: The offset into the subbuffer
+ * @ts: The address to store the timestamp of the event (may be NULL to ignore)
+ *
+ * The @offset must be an index from the @kbuf subbuffer beginning.
+ * If @offset is bigger than the stored subbuffer, NULL will be returned.
+ *
+ * Returns the data of the record that is at @offset. Note, @offset does
+ * not need to be the start of the record, the offset just needs to be
+ * in the record (or beginning of it).
+ *
+ * Note, the kbuf timestamp and pointers are updated to the
+ * returned record. That is, kbuffer_read_event() will return the same
+ * data and timestamp, and kbuffer_next_event() will increment from
+ * this record.
+ */
+void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset,
+ unsigned long long *ts)
+{
+ void *data;
+
+ if (offset < kbuf->start)
+ offset = 0;
+ else
+ offset -= kbuf->start;
+
+ /* Reset the buffer */
+ kbuffer_load_subbuffer(kbuf, kbuf->subbuffer);
+
+ while (kbuf->curr < offset) {
+ data = kbuffer_next_event(kbuf, ts);
+ if (!data)
+ break;
+ }
+
+ return data;
+}
+
+/**
+ * kbuffer_subbuffer_size - the size of the loaded subbuffer
+ * @kbuf: The kbuffer to read from
+ *
+ * Returns the size of the subbuffer. Note, this size is
+ * where the last event resides. The stored subbuffer may actually be
+ * bigger due to padding and such.
+ */
+int kbuffer_subbuffer_size(struct kbuffer *kbuf)
+{
+ return kbuf->size;
+}
+
+/**
+ * kbuffer_curr_index - Return the index of the record
+ * @kbuf: The kbuffer to read from
+ *
+ * Returns the index from the start of the data part of
+ * the subbuffer to the current location. Note this is not
+ * from the start of the subbuffer. An index of zero will
+ * point to the first record. Use kbuffer_curr_offset() for
+ * the actually offset (that can be used by kbuffer_read_at_offset())
+ */
+int kbuffer_curr_index(struct kbuffer *kbuf)
+{
+ return kbuf->curr;
+}
+
+/**
+ * kbuffer_curr_offset - Return the offset of the record
+ * @kbuf: The kbuffer to read from
+ *
+ * Returns the offset from the start of the subbuffer to the
+ * current location.
+ */
+int kbuffer_curr_offset(struct kbuffer *kbuf)
+{
+ return kbuf->curr + kbuf->start;
+}
+
+/**
+ * kbuffer_event_size - return the size of the event data
+ * @kbuf: The kbuffer to read
+ *
+ * Returns the size of the event data (the payload not counting
+ * the meta data of the record) of the current event.
+ */
+int kbuffer_event_size(struct kbuffer *kbuf)
+{
+ return kbuf->next - kbuf->index;
+}
+
+/**
+ * kbuffer_curr_size - return the size of the entire record
+ * @kbuf: The kbuffer to read
+ *
+ * Returns the size of the entire record (meta data and payload)
+ * of the current event.
+ */
+int kbuffer_curr_size(struct kbuffer *kbuf)
+{
+ return kbuf->next - kbuf->curr;
+}
+
+/**
+ * kbuffer_missed_events - return the # of missed events from last event.
+ * @kbuf: The kbuffer to read from
+ *
+ * Returns the # of missed events (if recorded) before the current
+ * event. Note, only events on the beginning of a subbuffer can
+ * have missed events, all other events within the buffer will be
+ * zero.
+ */
+int kbuffer_missed_events(struct kbuffer *kbuf)
+{
+ /* Only the first event can have missed events */
+ if (kbuf->curr)
+ return 0;
+
+ return kbuf->lost_events;
+}
+
+/**
+ * kbuffer_set_old_forma - set the kbuffer to use the old format parsing
+ * @kbuf: The kbuffer to set
+ *
+ * This is obsolete (or should be). The first kernels to use the
+ * new ring buffer had a slightly different ring buffer format
+ * (2.6.30 and earlier). It is still somewhat supported by kbuffer,
+ * but should not be counted on in the future.
+ */
+void kbuffer_set_old_format(struct kbuffer *kbuf)
+{
+ kbuf->flags |= KBUFFER_FL_OLD_FORMAT;
+
+ kbuf->next_event = __old_next_event;
+}
diff --git a/tools/lib/traceevent/kbuffer.h b/tools/lib/traceevent/kbuffer.h
new file mode 100644
index 00000000000..c831f64b17a
--- /dev/null
+++ b/tools/lib/traceevent/kbuffer.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2012 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation;
+ * version 2.1 of the License (not later!)
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ */
+#ifndef _KBUFFER_H
+#define _KBUFFER_H
+
+#ifndef TS_SHIFT
+#define TS_SHIFT 27
+#endif
+
+enum kbuffer_endian {
+ KBUFFER_ENDIAN_BIG,
+ KBUFFER_ENDIAN_LITTLE,
+};
+
+enum kbuffer_long_size {
+ KBUFFER_LSIZE_4,
+ KBUFFER_LSIZE_8,
+};
+
+enum {
+ KBUFFER_TYPE_PADDING = 29,
+ KBUFFER_TYPE_TIME_EXTEND = 30,
+ KBUFFER_TYPE_TIME_STAMP = 31,
+};
+
+struct kbuffer;
+
+struct kbuffer *kbuffer_alloc(enum kbuffer_long_size size, enum kbuffer_endian endian);
+void kbuffer_free(struct kbuffer *kbuf);
+int kbuffer_load_subbuffer(struct kbuffer *kbuf, void *subbuffer);
+void *kbuffer_read_event(struct kbuffer *kbuf, unsigned long long *ts);
+void *kbuffer_next_event(struct kbuffer *kbuf, unsigned long long *ts);
+unsigned long long kbuffer_timestamp(struct kbuffer *kbuf);
+
+void *kbuffer_translate_data(int swap, void *data, unsigned int *size);
+
+void *kbuffer_read_at_offset(struct kbuffer *kbuf, int offset, unsigned long long *ts);
+
+int kbuffer_curr_index(struct kbuffer *kbuf);
+
+int kbuffer_curr_offset(struct kbuffer *kbuf);
+int kbuffer_curr_size(struct kbuffer *kbuf);
+int kbuffer_event_size(struct kbuffer *kbuf);
+int kbuffer_missed_events(struct kbuffer *kbuf);
+int kbuffer_subbuffer_size(struct kbuffer *kbuf);
+
+void kbuffer_set_old_format(struct kbuffer *kbuf);
+
+#endif /* _K_BUFFER_H */
diff --git a/tools/lib/traceevent/trace-seq.c b/tools/lib/traceevent/trace-seq.c
index a57db805136..d7f2e68bc5b 100644
--- a/tools/lib/traceevent/trace-seq.c
+++ b/tools/lib/traceevent/trace-seq.c
@@ -49,6 +49,19 @@ void trace_seq_init(struct trace_seq *s)
}
/**
+ * trace_seq_reset - re-initialize the trace_seq structure
+ * @s: a pointer to the trace_seq structure to reset
+ */
+void trace_seq_reset(struct trace_seq *s)
+{
+ if (!s)
+ return;
+ TRACE_SEQ_CHECK(s);
+ s->len = 0;
+ s->readpos = 0;
+}
+
+/**
* trace_seq_destroy - free up memory of a trace_seq
* @s: a pointer to the trace_seq to free the buffer
*
diff --git a/tools/perf/Documentation/perf-diff.txt b/tools/perf/Documentation/perf-diff.txt
index 5b3123d5721..fdfceee0ffd 100644
--- a/tools/perf/Documentation/perf-diff.txt
+++ b/tools/perf/Documentation/perf-diff.txt
@@ -3,17 +3,17 @@ perf-diff(1)
NAME
----
-perf-diff - Read two perf.data files and display the differential profile
+perf-diff - Read perf.data files and display the differential profile
SYNOPSIS
--------
[verse]
-'perf diff' [oldfile] [newfile]
+'perf diff' [baseline file] [data file1] [[data file2] ... ]
DESCRIPTION
-----------
-This command displays the performance difference amongst two perf.data files
-captured via perf record.
+This command displays the performance difference amongst two or more perf.data
+files captured via perf record.
If no parameters are passed it will assume perf.data.old and perf.data.
@@ -75,8 +75,6 @@ OPTIONS
-c::
--compute::
Differential computation selection - delta,ratio,wdiff (default is delta).
- If '+' is specified as a first character, the output is sorted based
- on the computation results.
See COMPARISON METHODS section for more info.
-p::
@@ -87,6 +85,63 @@ OPTIONS
--formula::
Show formula for given computation.
+-o::
+--order::
+ Specify compute sorting column number.
+
+COMPARISON
+----------
+The comparison is governed by the baseline file. The baseline perf.data
+file is iterated for samples. All other perf.data files specified on
+the command line are searched for the baseline sample pair. If the pair
+is found, specified computation is made and result is displayed.
+
+All samples from non-baseline perf.data files, that do not match any
+baseline entry, are displayed with empty space within baseline column
+and possible computation results (delta) in their related column.
+
+Example files samples:
+- file A with samples f1, f2, f3, f4, f6
+- file B with samples f2, f4, f5
+- file C with samples f1, f2, f5
+
+Example output:
+ x - computation takes place for pair
+ b - baseline sample percentage
+
+- perf diff A B C
+
+ baseline/A compute/B compute/C samples
+ ---------------------------------------
+ b x f1
+ b x x f2
+ b f3
+ b x f4
+ b f6
+ x x f5
+
+- perf diff B A C
+
+ baseline/B compute/A compute/C samples
+ ---------------------------------------
+ b x x f2
+ b x f4
+ b x f5
+ x x f1
+ x f3
+ x f6
+
+- perf diff C B A
+
+ baseline/C compute/B compute/A samples
+ ---------------------------------------
+ b x f1
+ b x x f2
+ b x f5
+ x f3
+ x x f4
+ x f6
+
COMPARISON METHODS
------------------
delta
@@ -96,7 +151,7 @@ If specified the 'Delta' column is displayed with value 'd' computed as:
d = A->period_percent - B->period_percent
with:
- - A/B being matching hist entry from first/second file specified
+ - A/B being matching hist entry from data/baseline file specified
(or perf.data/perf.data.old) respectively.
- period_percent being the % of the hist entry period value within
@@ -109,24 +164,26 @@ If specified the 'Ratio' column is displayed with value 'r' computed as:
r = A->period / B->period
with:
- - A/B being matching hist entry from first/second file specified
+ - A/B being matching hist entry from data/baseline file specified
(or perf.data/perf.data.old) respectively.
- period being the hist entry period value
-wdiff
-~~~~~
+wdiff:WEIGHT-B,WEIGHT-A
+~~~~~~~~~~~~~~~~~~~~~~~
If specified the 'Weighted diff' column is displayed with value 'd' computed as:
d = B->period * WEIGHT-A - A->period * WEIGHT-B
- - A/B being matching hist entry from first/second file specified
+ - A/B being matching hist entry from data/baseline file specified
(or perf.data/perf.data.old) respectively.
- period being the hist entry period value
- WEIGHT-A/WEIGHT-B being user suplied weights in the the '-c' option
behind ':' separator like '-c wdiff:1,2'.
+ - WIEGHT-A being the weight of the data file
+ - WIEGHT-B being the weight of the baseline data file
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-kvm.txt b/tools/perf/Documentation/perf-kvm.txt
index 326f2cb333c..ac84db2d233 100644
--- a/tools/perf/Documentation/perf-kvm.txt
+++ b/tools/perf/Documentation/perf-kvm.txt
@@ -13,6 +13,7 @@ SYNOPSIS
{top|record|report|diff|buildid-list}
'perf kvm' [--host] [--guest] [--guestkallsyms=<path> --guestmodules=<path>
| --guestvmlinux=<path>] {top|record|report|diff|buildid-list|stat}
+'perf kvm stat [record|report|live] [<options>]
DESCRIPTION
-----------
@@ -50,6 +51,10 @@ There are a couple of variants of perf kvm:
'perf kvm stat report' reports statistical data which includes events
handled time, samples, and so on.
+ 'perf kvm stat live' reports statistical data in a live mode (similar to
+ record + report but with statistical data updated live at a given display
+ rate).
+
OPTIONS
-------
-i::
@@ -85,13 +90,50 @@ STAT REPORT OPTIONS
--vcpu=<value>::
analyze events which occures on this vcpu. (default: all vcpus)
---events=<value>::
- events to be analyzed. Possible values: vmexit, mmio, ioport.
+--event=<value>::
+ event to be analyzed. Possible values: vmexit, mmio, ioport.
(default: vmexit)
-k::
--key=<value>::
Sorting key. Possible values: sample (default, sort by samples
number), time (sort by average time).
+-p::
+--pid=::
+ Analyze events only for given process ID(s) (comma separated list).
+
+STAT LIVE OPTIONS
+-----------------
+-d::
+--display::
+ Time in seconds between display updates
+
+-m::
+--mmap-pages=::
+ Number of mmap data pages. Must be a power of two.
+
+-a::
+--all-cpus::
+ System-wide collection from all CPUs.
+
+-p::
+--pid=::
+ Analyze events only for given process ID(s) (comma separated list).
+
+--vcpu=<value>::
+ analyze events which occures on this vcpu. (default: all vcpus)
+
+
+--event=<value>::
+ event to be analyzed. Possible values: vmexit, mmio, ioport.
+ (default: vmexit)
+
+-k::
+--key=<value>::
+ Sorting key. Possible values: sample (default, sort by samples
+ number), time (sort by average time).
+
+--duration=<value>::
+ Show events other than HLT that take longer than duration usecs.
SEE ALSO
--------
diff --git a/tools/perf/Documentation/perf-list.txt b/tools/perf/Documentation/perf-list.txt
index d1e39dc8c81..6fce6a62220 100644
--- a/tools/perf/Documentation/perf-list.txt
+++ b/tools/perf/Documentation/perf-list.txt
@@ -8,7 +8,7 @@ perf-list - List all symbolic event types
SYNOPSIS
--------
[verse]
-'perf list' [hw|sw|cache|tracepoint|event_glob]
+'perf list' [hw|sw|cache|tracepoint|pmu|event_glob]
DESCRIPTION
-----------
@@ -29,6 +29,8 @@ counted. The following modifiers exist:
G - guest counting (in KVM guests)
H - host counting (not in KVM guests)
p - precise level
+ S - read sample value (PERF_SAMPLE_READ)
+ D - pin the event to the PMU
The 'p' modifier can be used for specifying how precise the instruction
address should be. The 'p' modifier can be specified multiple times:
@@ -104,6 +106,8 @@ To limit the list use:
'subsys_glob:event_glob' to filter by tracepoint subsystems such as sched,
block, etc.
+. 'pmu' to print the kernel supplied PMU events.
+
. If none of the above is matched, it will apply the supplied glob to all
events, printing the ones that match.
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 66dab7410c1..2b8097ee39d 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -115,7 +115,7 @@ OPTIONS
--dump-raw-trace::
Dump raw trace in ASCII.
--g [type,min[,limit],order]::
+-g [type,min[,limit],order[,key]]::
--call-graph::
Display call chains using type, min percent threshold, optional print
limit and order.
@@ -129,12 +129,21 @@ OPTIONS
- callee: callee based call graph.
- caller: inverted caller based call graph.
- Default: fractal,0.5,callee.
+ key can be:
+ - function: compare on functions
+ - address: compare on individual code addresses
+
+ Default: fractal,0.5,callee,function.
-G::
--inverted::
alias for inverted caller based call graph.
+--ignore-callees=<regex>::
+ Ignore callees of the function(s) matching the given regex.
+ This has the effect of collecting the callers of each such
+ function into one place in the call-graph tree.
+
--pretty=<key>::
Pretty printing style. key: normal, raw
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 2fe87fb558f..73c9759005a 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -132,6 +132,11 @@ is a useful mode to detect imbalance between physical cores. To enable this mod
use --per-core in addition to -a. (system-wide). The output includes the
core number and the number of online logical processors on that physical processor.
+-D msecs::
+--initial-delay msecs::
+After starting the program, wait msecs before measuring. This is useful to
+filter out the startup phase of the program, which is often very different.
+
EXAMPLES
--------
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 7fdd1909e37..58d6598a968 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -155,6 +155,11 @@ Default is to monitor all CPUS.
Default: fractal,0.5,callee.
+--ignore-callees=<regex>::
+ Ignore callees of the function(s) matching the given regex.
+ This has the effect of collecting the callers of each such
+ function into one place in the call-graph tree.
+
--percent-limit::
Do not show entries which have an overhead under that percent.
(Default: 0).
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 68718ccdd17..daccd2c0a48 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -23,25 +23,45 @@ analysis phases.
OPTIONS
-------
+-a::
--all-cpus::
System-wide collection from all CPUs.
+-e::
+--expr::
+ List of events to show, currently only syscall names.
+ Prefixing with ! shows all syscalls but the ones specified. You may
+ need to escape it.
+
+-o::
+--output=::
+ Output file name.
+
-p::
--pid=::
Record events on existing process ID (comma separated list).
+-t::
--tid=::
Record events on existing thread ID (comma separated list).
+-u::
--uid=::
Record events in threads owned by uid. Name or number.
+-v::
+--verbose=::
+ Verbosity level.
+
+-i::
--no-inherit::
Child tasks do not inherit counters.
+-m::
--mmap-pages=::
Number of mmap data pages. Must be a power of two.
+-C::
--cpu::
Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a
comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2.
@@ -54,6 +74,10 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
--sched:
Accrue thread runtime and provide a summary at the end of the session.
+-i
+--input
+ Process events from a given perf data file.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 641fccddb24..c5dc1ad1b8d 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -124,7 +124,7 @@ strip-libs = $(filter-out -l%,$(1))
ifneq ($(OUTPUT),)
TE_PATH=$(OUTPUT)
ifneq ($(subdir),)
- LK_PATH=$(objtree)/lib/lk/
+ LK_PATH=$(OUTPUT)/../lib/lk/
else
LK_PATH=$(OUTPUT)
endif
@@ -281,7 +281,7 @@ LIB_H += util/cpumap.h
LIB_H += util/top.h
LIB_H += $(ARCH_INCLUDE)
LIB_H += util/cgroup.h
-LIB_H += $(TRACE_EVENT_DIR)event-parse.h
+LIB_H += $(LIB_INCLUDE)traceevent/event-parse.h
LIB_H += util/target.h
LIB_H += util/rblist.h
LIB_H += util/intlist.h
@@ -360,6 +360,7 @@ LIB_OBJS += $(OUTPUT)util/rblist.o
LIB_OBJS += $(OUTPUT)util/intlist.o
LIB_OBJS += $(OUTPUT)util/vdso.o
LIB_OBJS += $(OUTPUT)util/stat.o
+LIB_OBJS += $(OUTPUT)util/record.o
LIB_OBJS += $(OUTPUT)ui/setup.o
LIB_OBJS += $(OUTPUT)ui/helpline.o
@@ -389,6 +390,10 @@ LIB_OBJS += $(OUTPUT)tests/bp_signal.o
LIB_OBJS += $(OUTPUT)tests/bp_signal_overflow.o
LIB_OBJS += $(OUTPUT)tests/task-exit.o
LIB_OBJS += $(OUTPUT)tests/sw-clock.o
+ifeq ($(ARCH),x86)
+LIB_OBJS += $(OUTPUT)tests/perf-time-to-tsc.o
+endif
+LIB_OBJS += $(OUTPUT)tests/code-reading.o
BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
@@ -434,6 +439,7 @@ PERFLIBS = $(LIB_FILE) $(LIBLK) $(LIBTRACEEVENT)
ifneq ($(OUTPUT),)
CFLAGS += -I$(OUTPUT)
endif
+LIB_OBJS += $(OUTPUT)tests/sample-parsing.o
ifdef NO_LIBELF
EXTLIBS := $(filter-out -lelf,$(EXTLIBS))
@@ -459,6 +465,7 @@ endif # NO_LIBELF
ifndef NO_LIBUNWIND
LIB_OBJS += $(OUTPUT)util/unwind.o
endif
+LIB_OBJS += $(OUTPUT)tests/keep-tracking.o
ifndef NO_LIBAUDIT
BUILTIN_OBJS += $(OUTPUT)builtin-trace.o
@@ -631,10 +638,10 @@ $(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
$(OUTPUT)util/scripting-engines/trace-event-perl.o: util/scripting-engines/trace-event-perl.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default $<
$(OUTPUT)scripts/perl/Perf-Trace-Util/Context.o: scripts/perl/Perf-Trace-Util/Context.c $(OUTPUT)PERF-CFLAGS
- $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs $<
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-nested-externs -Wno-undef -Wno-switch-default $<
$(OUTPUT)util/scripting-engines/trace-event-python.o: util/scripting-engines/trace-event-python.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow $<
@@ -762,17 +769,21 @@ check: $(OUTPUT)common-cmds.h
install-bin: all
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'
$(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'
+ $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
+ifndef NO_LIBPERL
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
- $(INSTALL) $(OUTPUT)perf-archive -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)'
$(INSTALL) scripts/perl/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/perl/*.pl -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl'
$(INSTALL) scripts/perl/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/perl/bin'
+endif
+ifndef NO_LIBPYTHON
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
$(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace'
$(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python'
$(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin'
+endif
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'
$(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'
diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile
index 815841c04eb..8801fe02f20 100644
--- a/tools/perf/arch/x86/Makefile
+++ b/tools/perf/arch/x86/Makefile
@@ -6,3 +6,5 @@ ifndef NO_LIBUNWIND
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o
endif
LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
+LIB_H += arch/$(ARCH)/util/tsc.h
diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c
new file mode 100644
index 00000000000..9570c2b0f83
--- /dev/null
+++ b/tools/perf/arch/x86/util/tsc.c
@@ -0,0 +1,59 @@
+#include <stdbool.h>
+#include <errno.h>
+
+#include <linux/perf_event.h>
+
+#include "../../perf.h"
+#include "../../util/types.h"
+#include "../../util/debug.h"
+#include "tsc.h"
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
+{
+ u64 t, quot, rem;
+
+ t = ns - tc->time_zero;
+ quot = t / tc->time_mult;
+ rem = t % tc->time_mult;
+ return (quot << tc->time_shift) +
+ (rem << tc->time_shift) / tc->time_mult;
+}
+
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
+{
+ u64 quot, rem;
+
+ quot = cyc >> tc->time_shift;
+ rem = cyc & ((1 << tc->time_shift) - 1);
+ return tc->time_zero + quot * tc->time_mult +
+ ((rem * tc->time_mult) >> tc->time_shift);
+}
+
+int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+ struct perf_tsc_conversion *tc)
+{
+ bool cap_usr_time_zero;
+ u32 seq;
+ int i = 0;
+
+ while (1) {
+ seq = pc->lock;
+ rmb();
+ tc->time_mult = pc->time_mult;
+ tc->time_shift = pc->time_shift;
+ tc->time_zero = pc->time_zero;
+ cap_usr_time_zero = pc->cap_usr_time_zero;
+ rmb();
+ if (pc->lock == seq && !(seq & 1))
+ break;
+ if (++i > 10000) {
+ pr_debug("failed to get perf_event_mmap_page lock\n");
+ return -EINVAL;
+ }
+ }
+
+ if (!cap_usr_time_zero)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
diff --git a/tools/perf/arch/x86/util/tsc.h b/tools/perf/arch/x86/util/tsc.h
new file mode 100644
index 00000000000..a24dec81c79
--- /dev/null
+++ b/tools/perf/arch/x86/util/tsc.h
@@ -0,0 +1,20 @@
+#ifndef TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
+#define TOOLS_PERF_ARCH_X86_UTIL_TSC_H__
+
+#include "../../util/types.h"
+
+struct perf_tsc_conversion {
+ u16 time_shift;
+ u32 time_mult;
+ u64 time_zero;
+};
+
+struct perf_event_mmap_page;
+
+int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
+ struct perf_tsc_conversion *tc);
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
+
+#endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */
diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c
index 25fd3f1966f..8cdca43016b 100644
--- a/tools/perf/bench/mem-memcpy.c
+++ b/tools/perf/bench/mem-memcpy.c
@@ -117,6 +117,8 @@ static void alloc_mem(void **dst, void **src, size_t length)
*src = zalloc(length);
if (!*src)
die("memory allocation failed - maybe length is too large?\n");
+ /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
+ memset(*src, 0, length);
}
static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault)
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index db491e9a812..f988d380c52 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -90,8 +90,7 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
struct addr_location al;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- symbol__annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -195,6 +194,8 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (session == NULL)
return -ENOMEM;
+ machines__set_symbol_filter(&session->machines, symbol__annotate_init);
+
if (ann->cpu_list) {
ret = perf_session__cpu_bitmap(session, ann->cpu_list,
ann->cpu_bitmap);
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 0aac5f3e594..f28799e94f2 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -18,15 +18,53 @@
#include "util/util.h"
#include <stdlib.h>
+#include <math.h>
-static char const *input_old = "perf.data.old",
- *input_new = "perf.data";
-static char diff__default_sort_order[] = "dso,symbol";
-static bool force;
+/* Diff command specific HPP columns. */
+enum {
+ PERF_HPP_DIFF__BASELINE,
+ PERF_HPP_DIFF__PERIOD,
+ PERF_HPP_DIFF__PERIOD_BASELINE,
+ PERF_HPP_DIFF__DELTA,
+ PERF_HPP_DIFF__RATIO,
+ PERF_HPP_DIFF__WEIGHTED_DIFF,
+ PERF_HPP_DIFF__FORMULA,
+
+ PERF_HPP_DIFF__MAX_INDEX
+};
+
+struct diff_hpp_fmt {
+ struct perf_hpp_fmt fmt;
+ int idx;
+ char *header;
+ int header_width;
+};
+
+struct data__file {
+ struct perf_session *session;
+ const char *file;
+ int idx;
+ struct hists *hists;
+ struct diff_hpp_fmt fmt[PERF_HPP_DIFF__MAX_INDEX];
+};
+
+static struct data__file *data__files;
+static int data__files_cnt;
+
+#define data__for_each_file_start(i, d, s) \
+ for (i = s, d = &data__files[s]; \
+ i < data__files_cnt; \
+ i++, d = &data__files[i])
+
+#define data__for_each_file(i, d) data__for_each_file_start(i, d, 0)
+#define data__for_each_file_new(i, d) data__for_each_file_start(i, d, 1)
+
+static char diff__default_sort_order[] = "dso,symbol";
+static bool force;
static bool show_period;
static bool show_formula;
static bool show_baseline_only;
-static bool sort_compute;
+static unsigned int sort_compute;
static s64 compute_wdiff_w1;
static s64 compute_wdiff_w2;
@@ -46,6 +84,47 @@ const char *compute_names[COMPUTE_MAX] = {
static int compute;
+static int compute_2_hpp[COMPUTE_MAX] = {
+ [COMPUTE_DELTA] = PERF_HPP_DIFF__DELTA,
+ [COMPUTE_RATIO] = PERF_HPP_DIFF__RATIO,
+ [COMPUTE_WEIGHTED_DIFF] = PERF_HPP_DIFF__WEIGHTED_DIFF,
+};
+
+#define MAX_COL_WIDTH 70
+
+static struct header_column {
+ const char *name;
+ int width;
+} columns[PERF_HPP_DIFF__MAX_INDEX] = {
+ [PERF_HPP_DIFF__BASELINE] = {
+ .name = "Baseline",
+ },
+ [PERF_HPP_DIFF__PERIOD] = {
+ .name = "Period",
+ .width = 14,
+ },
+ [PERF_HPP_DIFF__PERIOD_BASELINE] = {
+ .name = "Base period",
+ .width = 14,
+ },
+ [PERF_HPP_DIFF__DELTA] = {
+ .name = "Delta",
+ .width = 7,
+ },
+ [PERF_HPP_DIFF__RATIO] = {
+ .name = "Ratio",
+ .width = 14,
+ },
+ [PERF_HPP_DIFF__WEIGHTED_DIFF] = {
+ .name = "Weighted diff",
+ .width = 14,
+ },
+ [PERF_HPP_DIFF__FORMULA] = {
+ .name = "Formula",
+ .width = MAX_COL_WIDTH,
+ }
+};
+
static int setup_compute_opt_wdiff(char *opt)
{
char *w1_str = opt;
@@ -109,13 +188,6 @@ static int setup_compute(const struct option *opt, const char *str,
return 0;
}
- if (*str == '+') {
- sort_compute = true;
- cstr = (char *) ++str;
- if (!*str)
- return 0;
- }
-
option = strchr(str, ':');
if (option) {
unsigned len = option++ - str;
@@ -145,42 +217,42 @@ static int setup_compute(const struct option *opt, const char *str,
return -EINVAL;
}
-double perf_diff__period_percent(struct hist_entry *he, u64 period)
+static double period_percent(struct hist_entry *he, u64 period)
{
u64 total = he->hists->stats.total_period;
return (period * 100.0) / total;
}
-double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair)
+static double compute_delta(struct hist_entry *he, struct hist_entry *pair)
{
- double new_percent = perf_diff__period_percent(he, he->stat.period);
- double old_percent = perf_diff__period_percent(pair, pair->stat.period);
+ double old_percent = period_percent(he, he->stat.period);
+ double new_percent = period_percent(pair, pair->stat.period);
- he->diff.period_ratio_delta = new_percent - old_percent;
- he->diff.computed = true;
- return he->diff.period_ratio_delta;
+ pair->diff.period_ratio_delta = new_percent - old_percent;
+ pair->diff.computed = true;
+ return pair->diff.period_ratio_delta;
}
-double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair)
+static double compute_ratio(struct hist_entry *he, struct hist_entry *pair)
{
- double new_period = he->stat.period;
- double old_period = pair->stat.period;
+ double old_period = he->stat.period ?: 1;
+ double new_period = pair->stat.period;
- he->diff.computed = true;
- he->diff.period_ratio = new_period / old_period;
- return he->diff.period_ratio;
+ pair->diff.computed = true;
+ pair->diff.period_ratio = new_period / old_period;
+ return pair->diff.period_ratio;
}
-s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
+static s64 compute_wdiff(struct hist_entry *he, struct hist_entry *pair)
{
- u64 new_period = he->stat.period;
- u64 old_period = pair->stat.period;
+ u64 old_period = he->stat.period;
+ u64 new_period = pair->stat.period;
- he->diff.computed = true;
- he->diff.wdiff = new_period * compute_wdiff_w2 -
- old_period * compute_wdiff_w1;
+ pair->diff.computed = true;
+ pair->diff.wdiff = new_period * compute_wdiff_w2 -
+ old_period * compute_wdiff_w1;
- return he->diff.wdiff;
+ return pair->diff.wdiff;
}
static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
@@ -189,15 +261,15 @@ static int formula_delta(struct hist_entry *he, struct hist_entry *pair,
return scnprintf(buf, size,
"(%" PRIu64 " * 100 / %" PRIu64 ") - "
"(%" PRIu64 " * 100 / %" PRIu64 ")",
- he->stat.period, he->hists->stats.total_period,
- pair->stat.period, pair->hists->stats.total_period);
+ pair->stat.period, pair->hists->stats.total_period,
+ he->stat.period, he->hists->stats.total_period);
}
static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
- double new_period = he->stat.period;
- double old_period = pair->stat.period;
+ double old_period = he->stat.period;
+ double new_period = pair->stat.period;
return scnprintf(buf, size, "%.0F / %.0F", new_period, old_period);
}
@@ -205,16 +277,16 @@ static int formula_ratio(struct hist_entry *he, struct hist_entry *pair,
static int formula_wdiff(struct hist_entry *he, struct hist_entry *pair,
char *buf, size_t size)
{
- u64 new_period = he->stat.period;
- u64 old_period = pair->stat.period;
+ u64 old_period = he->stat.period;
+ u64 new_period = pair->stat.period;
return scnprintf(buf, size,
"(%" PRIu64 " * " "%" PRId64 ") - (%" PRIu64 " * " "%" PRId64 ")",
new_period, compute_wdiff_w2, old_period, compute_wdiff_w1);
}
-int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
- char *buf, size_t size)
+static int formula_fprintf(struct hist_entry *he, struct hist_entry *pair,
+ char *buf, size_t size)
{
switch (compute) {
case COMPUTE_DELTA:
@@ -247,7 +319,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
{
struct addr_location al;
- if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_warning("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -299,6 +371,29 @@ static void perf_evlist__collapse_resort(struct perf_evlist *evlist)
}
}
+static struct hist_entry*
+get_pair_data(struct hist_entry *he, struct data__file *d)
+{
+ if (hist_entry__has_pairs(he)) {
+ struct hist_entry *pair;
+
+ list_for_each_entry(pair, &he->pairs.head, pairs.node)
+ if (pair->hists == d->hists)
+ return pair;
+ }
+
+ return NULL;
+}
+
+static struct hist_entry*
+get_pair_fmt(struct hist_entry *he, struct diff_hpp_fmt *dfmt)
+{
+ void *ptr = dfmt - dfmt->idx;
+ struct data__file *d = container_of(ptr, struct data__file, fmt);
+
+ return get_pair_data(he, d);
+}
+
static void hists__baseline_only(struct hists *hists)
{
struct rb_root *root;
@@ -333,22 +428,24 @@ static void hists__precompute(struct hists *hists)
next = rb_first(root);
while (next != NULL) {
- struct hist_entry *he = rb_entry(next, struct hist_entry, rb_node_in);
- struct hist_entry *pair = hist_entry__next_pair(he);
+ struct hist_entry *he, *pair;
+ he = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&he->rb_node_in);
+
+ pair = get_pair_data(he, &data__files[sort_compute]);
if (!pair)
continue;
switch (compute) {
case COMPUTE_DELTA:
- perf_diff__compute_delta(he, pair);
+ compute_delta(he, pair);
break;
case COMPUTE_RATIO:
- perf_diff__compute_ratio(he, pair);
+ compute_ratio(he, pair);
break;
case COMPUTE_WEIGHTED_DIFF:
- perf_diff__compute_wdiff(he, pair);
+ compute_wdiff(he, pair);
break;
default:
BUG_ON(1);
@@ -367,7 +464,7 @@ static int64_t cmp_doubles(double l, double r)
}
static int64_t
-hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
+__hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
int c)
{
switch (c) {
@@ -399,6 +496,36 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
return 0;
}
+static int64_t
+hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
+ int c)
+{
+ bool pairs_left = hist_entry__has_pairs(left);
+ bool pairs_right = hist_entry__has_pairs(right);
+ struct hist_entry *p_right, *p_left;
+
+ if (!pairs_left && !pairs_right)
+ return 0;
+
+ if (!pairs_left || !pairs_right)
+ return pairs_left ? -1 : 1;
+
+ p_left = get_pair_data(left, &data__files[sort_compute]);
+ p_right = get_pair_data(right, &data__files[sort_compute]);
+
+ if (!p_left && !p_right)
+ return 0;
+
+ if (!p_left || !p_right)
+ return p_left ? -1 : 1;
+
+ /*
+ * We have 2 entries of same kind, let's
+ * make the data comparison.
+ */
+ return __hist_entry__cmp_compute(p_left, p_right, c);
+}
+
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
@@ -448,75 +575,121 @@ static void hists__compute_resort(struct hists *hists)
}
}
-static void hists__process(struct hists *old, struct hists *new)
+static void hists__process(struct hists *hists)
{
- hists__match(new, old);
-
if (show_baseline_only)
- hists__baseline_only(new);
- else
- hists__link(new, old);
+ hists__baseline_only(hists);
if (sort_compute) {
- hists__precompute(new);
- hists__compute_resort(new);
+ hists__precompute(hists);
+ hists__compute_resort(hists);
} else {
- hists__output_resort(new);
+ hists__output_resort(hists);
}
- hists__fprintf(new, true, 0, 0, 0, stdout);
+ hists__fprintf(hists, true, 0, 0, 0, stdout);
}
-static int __cmd_diff(void)
+static void data__fprintf(void)
{
- int ret, i;
-#define older (session[0])
-#define newer (session[1])
- struct perf_session *session[2];
- struct perf_evlist *evlist_new, *evlist_old;
- struct perf_evsel *evsel;
+ struct data__file *d;
+ int i;
+
+ fprintf(stdout, "# Data files:\n");
+
+ data__for_each_file(i, d)
+ fprintf(stdout, "# [%d] %s %s\n",
+ d->idx, d->file,
+ !d->idx ? "(Baseline)" : "");
+
+ fprintf(stdout, "#\n");
+}
+
+static void data_process(void)
+{
+ struct perf_evlist *evlist_base = data__files[0].session->evlist;
+ struct perf_evsel *evsel_base;
bool first = true;
- older = perf_session__new(input_old, O_RDONLY, force, false,
- &tool);
- newer = perf_session__new(input_new, O_RDONLY, force, false,
- &tool);
- if (session[0] == NULL || session[1] == NULL)
- return -ENOMEM;
+ list_for_each_entry(evsel_base, &evlist_base->entries, node) {
+ struct data__file *d;
+ int i;
- for (i = 0; i < 2; ++i) {
- ret = perf_session__process_events(session[i], &tool);
- if (ret)
- goto out_delete;
- }
+ data__for_each_file_new(i, d) {
+ struct perf_evlist *evlist = d->session->evlist;
+ struct perf_evsel *evsel;
- evlist_old = older->evlist;
- evlist_new = newer->evlist;
+ evsel = evsel_match(evsel_base, evlist);
+ if (!evsel)
+ continue;
- perf_evlist__collapse_resort(evlist_old);
- perf_evlist__collapse_resort(evlist_new);
+ d->hists = &evsel->hists;
- list_for_each_entry(evsel, &evlist_new->entries, node) {
- struct perf_evsel *evsel_old;
+ hists__match(&evsel_base->hists, &evsel->hists);
- evsel_old = evsel_match(evsel, evlist_old);
- if (!evsel_old)
- continue;
+ if (!show_baseline_only)
+ hists__link(&evsel_base->hists,
+ &evsel->hists);
+ }
fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n",
- perf_evsel__name(evsel));
+ perf_evsel__name(evsel_base));
first = false;
- hists__process(&evsel_old->hists, &evsel->hists);
+ if (verbose || data__files_cnt > 2)
+ data__fprintf();
+
+ hists__process(&evsel_base->hists);
+ }
+}
+
+static void data__free(struct data__file *d)
+{
+ int col;
+
+ for (col = 0; col < PERF_HPP_DIFF__MAX_INDEX; col++) {
+ struct diff_hpp_fmt *fmt = &d->fmt[col];
+
+ free(fmt->header);
}
+}
-out_delete:
- for (i = 0; i < 2; ++i)
- perf_session__delete(session[i]);
+static int __cmd_diff(void)
+{
+ struct data__file *d;
+ int ret = -EINVAL, i;
+
+ data__for_each_file(i, d) {
+ d->session = perf_session__new(d->file, O_RDONLY, force,
+ false, &tool);
+ if (!d->session) {
+ pr_err("Failed to open %s\n", d->file);
+ ret = -ENOMEM;
+ goto out_delete;
+ }
+
+ ret = perf_session__process_events(d->session, &tool);
+ if (ret) {
+ pr_err("Failed to process %s\n", d->file);
+ goto out_delete;
+ }
+
+ perf_evlist__collapse_resort(d->session->evlist);
+ }
+
+ data_process();
+
+ out_delete:
+ data__for_each_file(i, d) {
+ if (d->session)
+ perf_session__delete(d->session);
+
+ data__free(d);
+ }
+
+ free(data__files);
return ret;
-#undef older
-#undef newer
}
static const char * const diff_usage[] = {
@@ -555,61 +728,310 @@ static const struct option options[] = {
"columns '.' is reserved."),
OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
"Look for files with symbols relative to this directory"),
+ OPT_UINTEGER('o', "order", &sort_compute, "Specify compute sorting."),
OPT_END()
};
-static void ui_init(void)
+static double baseline_percent(struct hist_entry *he)
{
- /*
- * Display baseline/delta/ratio
- * formula/periods columns.
- */
- perf_hpp__column_enable(PERF_HPP__BASELINE);
+ struct hists *hists = he->hists;
+ return 100.0 * he->stat.period / hists->stats.total_period;
+}
- switch (compute) {
- case COMPUTE_DELTA:
- perf_hpp__column_enable(PERF_HPP__DELTA);
+static int hpp__color_baseline(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp, struct hist_entry *he)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(fmt, struct diff_hpp_fmt, fmt);
+ double percent = baseline_percent(he);
+ char pfmt[20] = " ";
+
+ if (!he->dummy) {
+ scnprintf(pfmt, 20, "%%%d.2f%%%%", dfmt->header_width - 1);
+ return percent_color_snprintf(hpp->buf, hpp->size,
+ pfmt, percent);
+ } else
+ return scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, pfmt);
+}
+
+static int hpp__entry_baseline(struct hist_entry *he, char *buf, size_t size)
+{
+ double percent = baseline_percent(he);
+ const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%";
+ int ret = 0;
+
+ if (!he->dummy)
+ ret = scnprintf(buf, size, fmt, percent);
+
+ return ret;
+}
+
+static void
+hpp__entry_unpair(struct hist_entry *he, int idx, char *buf, size_t size)
+{
+ switch (idx) {
+ case PERF_HPP_DIFF__PERIOD_BASELINE:
+ scnprintf(buf, size, "%" PRIu64, he->stat.period);
break;
- case COMPUTE_RATIO:
- perf_hpp__column_enable(PERF_HPP__RATIO);
+
+ default:
break;
- case COMPUTE_WEIGHTED_DIFF:
- perf_hpp__column_enable(PERF_HPP__WEIGHTED_DIFF);
+ }
+}
+
+static void
+hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
+ int idx, char *buf, size_t size)
+{
+ double diff;
+ double ratio;
+ s64 wdiff;
+
+ switch (idx) {
+ case PERF_HPP_DIFF__DELTA:
+ if (pair->diff.computed)
+ diff = pair->diff.period_ratio_delta;
+ else
+ diff = compute_delta(he, pair);
+
+ if (fabs(diff) >= 0.01)
+ scnprintf(buf, size, "%+4.2F%%", diff);
+ break;
+
+ case PERF_HPP_DIFF__RATIO:
+ /* No point for ratio number if we are dummy.. */
+ if (he->dummy)
+ break;
+
+ if (pair->diff.computed)
+ ratio = pair->diff.period_ratio;
+ else
+ ratio = compute_ratio(he, pair);
+
+ if (ratio > 0.0)
+ scnprintf(buf, size, "%14.6F", ratio);
+ break;
+
+ case PERF_HPP_DIFF__WEIGHTED_DIFF:
+ /* No point for wdiff number if we are dummy.. */
+ if (he->dummy)
+ break;
+
+ if (pair->diff.computed)
+ wdiff = pair->diff.wdiff;
+ else
+ wdiff = compute_wdiff(he, pair);
+
+ if (wdiff != 0)
+ scnprintf(buf, size, "%14ld", wdiff);
+ break;
+
+ case PERF_HPP_DIFF__FORMULA:
+ formula_fprintf(he, pair, buf, size);
+ break;
+
+ case PERF_HPP_DIFF__PERIOD:
+ scnprintf(buf, size, "%" PRIu64, pair->stat.period);
break;
+
default:
BUG_ON(1);
};
+}
+
+static void
+__hpp__entry_global(struct hist_entry *he, struct diff_hpp_fmt *dfmt,
+ char *buf, size_t size)
+{
+ struct hist_entry *pair = get_pair_fmt(he, dfmt);
+ int idx = dfmt->idx;
+
+ /* baseline is special */
+ if (idx == PERF_HPP_DIFF__BASELINE)
+ hpp__entry_baseline(he, buf, size);
+ else {
+ if (pair)
+ hpp__entry_pair(he, pair, idx, buf, size);
+ else
+ hpp__entry_unpair(he, idx, buf, size);
+ }
+}
+
+static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp,
+ struct hist_entry *he)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(_fmt, struct diff_hpp_fmt, fmt);
+ char buf[MAX_COL_WIDTH] = " ";
+
+ __hpp__entry_global(he, dfmt, buf, MAX_COL_WIDTH);
+
+ if (symbol_conf.field_sep)
+ return scnprintf(hpp->buf, hpp->size, "%s", buf);
+ else
+ return scnprintf(hpp->buf, hpp->size, "%*s",
+ dfmt->header_width, buf);
+}
+
+static int hpp__header(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(fmt, struct diff_hpp_fmt, fmt);
- if (show_formula)
- perf_hpp__column_enable(PERF_HPP__FORMULA);
+ BUG_ON(!dfmt->header);
+ return scnprintf(hpp->buf, hpp->size, dfmt->header);
+}
- if (show_period) {
- perf_hpp__column_enable(PERF_HPP__PERIOD);
- perf_hpp__column_enable(PERF_HPP__PERIOD_BASELINE);
+static int hpp__width(struct perf_hpp_fmt *fmt,
+ struct perf_hpp *hpp __maybe_unused)
+{
+ struct diff_hpp_fmt *dfmt =
+ container_of(fmt, struct diff_hpp_fmt, fmt);
+
+ BUG_ON(dfmt->header_width <= 0);
+ return dfmt->header_width;
+}
+
+static void init_header(struct data__file *d, struct diff_hpp_fmt *dfmt)
+{
+#define MAX_HEADER_NAME 100
+ char buf_indent[MAX_HEADER_NAME];
+ char buf[MAX_HEADER_NAME];
+ const char *header = NULL;
+ int width = 0;
+
+ BUG_ON(dfmt->idx >= PERF_HPP_DIFF__MAX_INDEX);
+ header = columns[dfmt->idx].name;
+ width = columns[dfmt->idx].width;
+
+ /* Only our defined HPP fmts should appear here. */
+ BUG_ON(!header);
+
+ if (data__files_cnt > 2)
+ scnprintf(buf, MAX_HEADER_NAME, "%s/%d", header, d->idx);
+
+#define NAME (data__files_cnt > 2 ? buf : header)
+ dfmt->header_width = width;
+ width = (int) strlen(NAME);
+ if (dfmt->header_width < width)
+ dfmt->header_width = width;
+
+ scnprintf(buf_indent, MAX_HEADER_NAME, "%*s",
+ dfmt->header_width, NAME);
+
+ dfmt->header = strdup(buf_indent);
+#undef MAX_HEADER_NAME
+#undef NAME
+}
+
+static void data__hpp_register(struct data__file *d, int idx)
+{
+ struct diff_hpp_fmt *dfmt = &d->fmt[idx];
+ struct perf_hpp_fmt *fmt = &dfmt->fmt;
+
+ dfmt->idx = idx;
+
+ fmt->header = hpp__header;
+ fmt->width = hpp__width;
+ fmt->entry = hpp__entry_global;
+
+ /* TODO more colors */
+ if (idx == PERF_HPP_DIFF__BASELINE)
+ fmt->color = hpp__color_baseline;
+
+ init_header(d, dfmt);
+ perf_hpp__column_register(fmt);
+}
+
+static void ui_init(void)
+{
+ struct data__file *d;
+ int i;
+
+ data__for_each_file(i, d) {
+
+ /*
+ * Baseline or compute realted columns:
+ *
+ * PERF_HPP_DIFF__BASELINE
+ * PERF_HPP_DIFF__DELTA
+ * PERF_HPP_DIFF__RATIO
+ * PERF_HPP_DIFF__WEIGHTED_DIFF
+ */
+ data__hpp_register(d, i ? compute_2_hpp[compute] :
+ PERF_HPP_DIFF__BASELINE);
+
+ /*
+ * And the rest:
+ *
+ * PERF_HPP_DIFF__FORMULA
+ * PERF_HPP_DIFF__PERIOD
+ * PERF_HPP_DIFF__PERIOD_BASELINE
+ */
+ if (show_formula && i)
+ data__hpp_register(d, PERF_HPP_DIFF__FORMULA);
+
+ if (show_period)
+ data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
+ PERF_HPP_DIFF__PERIOD_BASELINE);
}
}
-int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
+static int data_init(int argc, const char **argv)
{
- sort_order = diff__default_sort_order;
- argc = parse_options(argc, argv, options, diff_usage, 0);
+ struct data__file *d;
+ static const char *defaults[] = {
+ "perf.data.old",
+ "perf.data",
+ };
+ bool use_default = true;
+ int i;
+
+ data__files_cnt = 2;
+
if (argc) {
- if (argc > 2)
- usage_with_options(diff_usage, options);
- if (argc == 2) {
- input_old = argv[0];
- input_new = argv[1];
- } else
- input_new = argv[0];
+ if (argc == 1)
+ defaults[1] = argv[0];
+ else {
+ data__files_cnt = argc;
+ use_default = false;
+ }
} else if (symbol_conf.default_guest_vmlinux_name ||
symbol_conf.default_guest_kallsyms) {
- input_old = "perf.data.host";
- input_new = "perf.data.guest";
+ defaults[0] = "perf.data.host";
+ defaults[1] = "perf.data.guest";
}
+ if (sort_compute >= (unsigned int) data__files_cnt) {
+ pr_err("Order option out of limit.\n");
+ return -EINVAL;
+ }
+
+ data__files = zalloc(sizeof(*data__files) * data__files_cnt);
+ if (!data__files)
+ return -ENOMEM;
+
+ data__for_each_file(i, d) {
+ d->file = use_default ? defaults[i] : argv[i];
+ d->idx = i;
+ }
+
+ return 0;
+}
+
+int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
+{
+ sort_order = diff__default_sort_order;
+ argc = parse_options(argc, argv, options, diff_usage, 0);
+
if (symbol__init() < 0)
return -1;
+ if (data_init(argc, argv) < 0)
+ return -1;
+
ui_init();
if (setup_sorting() < 0)
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index 84ad6abe425..9b336fdb6f7 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -38,8 +38,7 @@ struct event_entry {
};
static int perf_event__repipe_synth(struct perf_tool *tool,
- union perf_event *event,
- struct machine *machine __maybe_unused)
+ union perf_event *event)
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
uint32_t size;
@@ -65,39 +64,28 @@ static int perf_event__repipe_op2_synth(struct perf_tool *tool,
struct perf_session *session
__maybe_unused)
{
- return perf_event__repipe_synth(tool, event, NULL);
+ return perf_event__repipe_synth(tool, event);
}
-static int perf_event__repipe_event_type_synth(struct perf_tool *tool,
- union perf_event *event)
-{
- return perf_event__repipe_synth(tool, event, NULL);
-}
-
-static int perf_event__repipe_tracing_data_synth(union perf_event *event,
- struct perf_session *session
- __maybe_unused)
-{
- return perf_event__repipe_synth(NULL, event, NULL);
-}
-
-static int perf_event__repipe_attr(union perf_event *event,
- struct perf_evlist **pevlist __maybe_unused)
+static int perf_event__repipe_attr(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_evlist **pevlist)
{
int ret;
- ret = perf_event__process_attr(event, pevlist);
+
+ ret = perf_event__process_attr(tool, event, pevlist);
if (ret)
return ret;
- return perf_event__repipe_synth(NULL, event, NULL);
+ return perf_event__repipe_synth(tool, event);
}
static int perf_event__repipe(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
- struct machine *machine)
+ struct machine *machine __maybe_unused)
{
- return perf_event__repipe_synth(tool, event, machine);
+ return perf_event__repipe_synth(tool, event);
}
typedef int (*inject_handler)(struct perf_tool *tool,
@@ -119,7 +107,7 @@ static int perf_event__repipe_sample(struct perf_tool *tool,
build_id__mark_dso_hit(tool, event, sample, evsel, machine);
- return perf_event__repipe_synth(tool, event, machine);
+ return perf_event__repipe_synth(tool, event);
}
static int perf_event__repipe_mmap(struct perf_tool *tool,
@@ -148,13 +136,14 @@ static int perf_event__repipe_fork(struct perf_tool *tool,
return err;
}
-static int perf_event__repipe_tracing_data(union perf_event *event,
+static int perf_event__repipe_tracing_data(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session)
{
int err;
- perf_event__repipe_synth(NULL, event, NULL);
- err = perf_event__process_tracing_data(event, session);
+ perf_event__repipe_synth(tool, event);
+ err = perf_event__process_tracing_data(tool, event, session);
return err;
}
@@ -209,7 +198,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- thread = machine__findnew_thread(machine, event->ip.pid);
+ thread = machine__findnew_thread(machine, sample->pid, sample->pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
@@ -217,7 +206,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
}
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, &al);
+ sample->ip, &al);
if (al.map != NULL) {
if (!al.map->dso->hit) {
@@ -312,7 +301,9 @@ found:
sample_sw.period = sample->period;
sample_sw.time = sample->time;
perf_event__synthesize_sample(event_sw, evsel->attr.sample_type,
- &sample_sw, false);
+ evsel->attr.sample_regs_user,
+ evsel->attr.read_format, &sample_sw,
+ false);
build_id__mark_dso_hit(tool, event_sw, &sample_sw, evsel, machine);
return perf_event__repipe(tool, event_sw, &sample_sw, machine);
}
@@ -407,8 +398,8 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
.throttle = perf_event__repipe,
.unthrottle = perf_event__repipe,
.attr = perf_event__repipe_attr,
- .event_type = perf_event__repipe_event_type_synth,
- .tracing_data = perf_event__repipe_tracing_data_synth,
+ .tracing_data = perf_event__repipe_op2_synth,
+ .finished_round = perf_event__repipe_op2_synth,
.build_id = perf_event__repipe_op2_synth,
},
.input_name = "-",
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 0259502638b..c2dff9cb1f2 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -305,7 +305,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->pid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -313,7 +314,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid);
if (evsel->handler.func != NULL) {
tracepoint_handler f = evsel->handler.func;
diff --git a/tools/perf/builtin-kvm.c b/tools/perf/builtin-kvm.c
index 24b78aecc92..47b35407c2f 100644
--- a/tools/perf/builtin-kvm.c
+++ b/tools/perf/builtin-kvm.c
@@ -2,22 +2,26 @@
#include "perf.h"
#include "util/evsel.h"
+#include "util/evlist.h"
#include "util/util.h"
#include "util/cache.h"
#include "util/symbol.h"
#include "util/thread.h"
#include "util/header.h"
#include "util/session.h"
-
+#include "util/intlist.h"
#include "util/parse-options.h"
#include "util/trace-event.h"
#include "util/debug.h"
#include <lk/debugfs.h>
#include "util/tool.h"
#include "util/stat.h"
+#include "util/top.h"
#include <sys/prctl.h>
+#include <sys/timerfd.h>
+#include <termios.h>
#include <semaphore.h>
#include <pthread.h>
#include <math.h>
@@ -82,6 +86,8 @@ struct exit_reasons_table {
struct perf_kvm_stat {
struct perf_tool tool;
+ struct perf_record_opts opts;
+ struct perf_evlist *evlist;
struct perf_session *session;
const char *file_name;
@@ -96,10 +102,20 @@ struct perf_kvm_stat {
struct kvm_events_ops *events_ops;
key_cmp_fun compare;
struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
+
u64 total_time;
u64 total_count;
+ u64 lost_events;
+ u64 duration;
+
+ const char *pid_str;
+ struct intlist *pid_list;
struct rb_root result;
+
+ int timerfd;
+ unsigned int display_time;
+ bool live;
};
@@ -320,6 +336,28 @@ static void init_kvm_event_record(struct perf_kvm_stat *kvm)
INIT_LIST_HEAD(&kvm->kvm_events_cache[i]);
}
+static void clear_events_cache_stats(struct list_head *kvm_events_cache)
+{
+ struct list_head *head;
+ struct kvm_event *event;
+ unsigned int i;
+ int j;
+
+ for (i = 0; i < EVENTS_CACHE_SIZE; i++) {
+ head = &kvm_events_cache[i];
+ list_for_each_entry(event, head, hash_entry) {
+ /* reset stats for event */
+ event->total.time = 0;
+ init_stats(&event->total.stats);
+
+ for (j = 0; j < event->max_vcpu; ++j) {
+ event->vcpu[j].time = 0;
+ init_stats(&event->vcpu[j].stats);
+ }
+ }
+ }
+}
+
static int kvm_events_hash_fn(u64 key)
{
return key & (EVENTS_CACHE_SIZE - 1);
@@ -436,7 +474,7 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
static bool handle_end_event(struct perf_kvm_stat *kvm,
struct vcpu_event_record *vcpu_record,
struct event_key *key,
- u64 timestamp)
+ struct perf_sample *sample)
{
struct kvm_event *event;
u64 time_begin, time_diff;
@@ -472,9 +510,25 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
vcpu_record->last_event = NULL;
vcpu_record->start_time = 0;
- BUG_ON(timestamp < time_begin);
+ /* seems to happen once in a while during live mode */
+ if (sample->time < time_begin) {
+ pr_debug("End time before begin time; skipping event.\n");
+ return true;
+ }
+
+ time_diff = sample->time - time_begin;
+
+ if (kvm->duration && time_diff > kvm->duration) {
+ char decode[32];
+
+ kvm->events_ops->decode_key(kvm, &event->key, decode);
+ if (strcmp(decode, "HLT")) {
+ pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
+ sample->time, sample->pid, vcpu_record->vcpu_id,
+ decode, time_diff/1000);
+ }
+ }
- time_diff = timestamp - time_begin;
return update_kvm_event(event, vcpu, time_diff);
}
@@ -521,7 +575,7 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
return handle_begin_event(kvm, vcpu_record, &key, sample->time);
if (kvm->events_ops->is_end_event(evsel, sample, &key))
- return handle_end_event(kvm, vcpu_record, &key, sample->time);
+ return handle_end_event(kvm, vcpu_record, &key, sample);
return true;
}
@@ -550,6 +604,8 @@ static int compare_kvm_event_ ## func(struct kvm_event *one, \
GET_EVENT_KEY(time, time);
COMPARE_EVENT_KEY(count, stats.n);
COMPARE_EVENT_KEY(mean, stats.mean);
+GET_EVENT_KEY(max, stats.max);
+GET_EVENT_KEY(min, stats.min);
#define DEF_SORT_NAME_KEY(name, compare_key) \
{ #name, compare_kvm_event_ ## compare_key }
@@ -639,43 +695,81 @@ static struct kvm_event *pop_from_result(struct rb_root *result)
return container_of(node, struct kvm_event, rb);
}
-static void print_vcpu_info(int vcpu)
+static void print_vcpu_info(struct perf_kvm_stat *kvm)
{
+ int vcpu = kvm->trace_vcpu;
+
pr_info("Analyze events for ");
+ if (kvm->live) {
+ if (kvm->opts.target.system_wide)
+ pr_info("all VMs, ");
+ else if (kvm->opts.target.pid)
+ pr_info("pid(s) %s, ", kvm->opts.target.pid);
+ else
+ pr_info("dazed and confused on what is monitored, ");
+ }
+
if (vcpu == -1)
pr_info("all VCPUs:\n\n");
else
pr_info("VCPU %d:\n\n", vcpu);
}
+static void show_timeofday(void)
+{
+ char date[64];
+ struct timeval tv;
+ struct tm ltime;
+
+ gettimeofday(&tv, NULL);
+ if (localtime_r(&tv.tv_sec, &ltime)) {
+ strftime(date, sizeof(date), "%H:%M:%S", &ltime);
+ pr_info("%s.%06ld", date, tv.tv_usec);
+ } else
+ pr_info("00:00:00.000000");
+
+ return;
+}
+
static void print_result(struct perf_kvm_stat *kvm)
{
char decode[20];
struct kvm_event *event;
int vcpu = kvm->trace_vcpu;
+ if (kvm->live) {
+ puts(CONSOLE_CLEAR);
+ show_timeofday();
+ }
+
pr_info("\n\n");
- print_vcpu_info(vcpu);
+ print_vcpu_info(kvm);
pr_info("%20s ", kvm->events_ops->name);
pr_info("%10s ", "Samples");
pr_info("%9s ", "Samples%");
pr_info("%9s ", "Time%");
+ pr_info("%10s ", "Min Time");
+ pr_info("%10s ", "Max Time");
pr_info("%16s ", "Avg time");
pr_info("\n\n");
while ((event = pop_from_result(&kvm->result))) {
- u64 ecount, etime;
+ u64 ecount, etime, max, min;
ecount = get_event_count(event, vcpu);
etime = get_event_time(event, vcpu);
+ max = get_event_max(event, vcpu);
+ min = get_event_min(event, vcpu);
kvm->events_ops->decode_key(kvm, &event->key, decode);
pr_info("%20s ", decode);
pr_info("%10llu ", (unsigned long long)ecount);
pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
+ pr_info("%8" PRIu64 "us ", min / 1000);
+ pr_info("%8" PRIu64 "us ", max / 1000);
pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3,
kvm_event_rel_stddev(vcpu, event));
pr_info("\n");
@@ -683,6 +777,29 @@ static void print_result(struct perf_kvm_stat *kvm)
pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n",
kvm->total_count, kvm->total_time / 1e3);
+
+ if (kvm->lost_events)
+ pr_info("\nLost events: %" PRIu64 "\n\n", kvm->lost_events);
+}
+
+static int process_lost_event(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample __maybe_unused,
+ struct machine *machine __maybe_unused)
+{
+ struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool);
+
+ kvm->lost_events++;
+ return 0;
+}
+
+static bool skip_sample(struct perf_kvm_stat *kvm,
+ struct perf_sample *sample)
+{
+ if (kvm->pid_list && intlist__find(kvm->pid_list, sample->pid) == NULL)
+ return true;
+
+ return false;
}
static int process_sample_event(struct perf_tool *tool,
@@ -691,10 +808,14 @@ static int process_sample_event(struct perf_tool *tool,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, sample->tid);
+ struct thread *thread;
struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
tool);
+ if (skip_sample(kvm, sample))
+ return 0;
+
+ thread = machine__findnew_thread(machine, sample->pid, sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
event->header.type);
@@ -707,10 +828,20 @@ static int process_sample_event(struct perf_tool *tool,
return 0;
}
-static int get_cpu_isa(struct perf_session *session)
+static int cpu_isa_config(struct perf_kvm_stat *kvm)
{
- char *cpuid = session->header.env.cpuid;
- int isa;
+ char buf[64], *cpuid;
+ int err, isa;
+
+ if (kvm->live) {
+ err = get_cpuid(buf, sizeof(buf));
+ if (err != 0) {
+ pr_err("Failed to look up CPU type (Intel or AMD)\n");
+ return err;
+ }
+ cpuid = buf;
+ } else
+ cpuid = kvm->session->header.env.cpuid;
if (strstr(cpuid, "Intel"))
isa = 1;
@@ -718,10 +849,361 @@ static int get_cpu_isa(struct perf_session *session)
isa = 0;
else {
pr_err("CPU %s is not supported.\n", cpuid);
- isa = -ENOTSUP;
+ return -ENOTSUP;
+ }
+
+ if (isa == 1) {
+ kvm->exit_reasons = vmx_exit_reasons;
+ kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
+ kvm->exit_reasons_isa = "VMX";
+ }
+
+ return 0;
+}
+
+static bool verify_vcpu(int vcpu)
+{
+ if (vcpu != -1 && vcpu < 0) {
+ pr_err("Invalid vcpu:%d.\n", vcpu);
+ return false;
+ }
+
+ return true;
+}
+
+/* keeping the max events to a modest level to keep
+ * the processing of samples per mmap smooth.
+ */
+#define PERF_KVM__MAX_EVENTS_PER_MMAP 25
+
+static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
+ u64 *mmap_time)
+{
+ union perf_event *event;
+ struct perf_sample sample;
+ s64 n = 0;
+ int err;
+
+ *mmap_time = ULLONG_MAX;
+ while ((event = perf_evlist__mmap_read(kvm->evlist, idx)) != NULL) {
+ err = perf_evlist__parse_sample(kvm->evlist, event, &sample);
+ if (err) {
+ pr_err("Failed to parse sample\n");
+ return -1;
+ }
+
+ err = perf_session_queue_event(kvm->session, event, &sample, 0);
+ if (err) {
+ pr_err("Failed to enqueue sample: %d\n", err);
+ return -1;
+ }
+
+ /* save time stamp of our first sample for this mmap */
+ if (n == 0)
+ *mmap_time = sample.time;
+
+ /* limit events per mmap handled all at once */
+ n++;
+ if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
+ break;
+ }
+
+ return n;
+}
+
+static int perf_kvm__mmap_read(struct perf_kvm_stat *kvm)
+{
+ int i, err, throttled = 0;
+ s64 n, ntotal = 0;
+ u64 flush_time = ULLONG_MAX, mmap_time;
+
+ for (i = 0; i < kvm->evlist->nr_mmaps; i++) {
+ n = perf_kvm__mmap_read_idx(kvm, i, &mmap_time);
+ if (n < 0)
+ return -1;
+
+ /* flush time is going to be the minimum of all the individual
+ * mmap times. Essentially, we flush all the samples queued up
+ * from the last pass under our minimal start time -- that leaves
+ * a very small race for samples to come in with a lower timestamp.
+ * The ioctl to return the perf_clock timestamp should close the
+ * race entirely.
+ */
+ if (mmap_time < flush_time)
+ flush_time = mmap_time;
+
+ ntotal += n;
+ if (n == PERF_KVM__MAX_EVENTS_PER_MMAP)
+ throttled = 1;
+ }
+
+ /* flush queue after each round in which we processed events */
+ if (ntotal) {
+ kvm->session->ordered_samples.next_flush = flush_time;
+ err = kvm->tool.finished_round(&kvm->tool, NULL, kvm->session);
+ if (err) {
+ if (kvm->lost_events)
+ pr_info("\nLost events: %" PRIu64 "\n\n",
+ kvm->lost_events);
+ return err;
+ }
+ }
+
+ return throttled;
+}
+
+static volatile int done;
+
+static void sig_handler(int sig __maybe_unused)
+{
+ done = 1;
+}
+
+static int perf_kvm__timerfd_create(struct perf_kvm_stat *kvm)
+{
+ struct itimerspec new_value;
+ int rc = -1;
+
+ kvm->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK);
+ if (kvm->timerfd < 0) {
+ pr_err("timerfd_create failed\n");
+ goto out;
+ }
+
+ new_value.it_value.tv_sec = kvm->display_time;
+ new_value.it_value.tv_nsec = 0;
+ new_value.it_interval.tv_sec = kvm->display_time;
+ new_value.it_interval.tv_nsec = 0;
+
+ if (timerfd_settime(kvm->timerfd, 0, &new_value, NULL) != 0) {
+ pr_err("timerfd_settime failed: %d\n", errno);
+ close(kvm->timerfd);
+ goto out;
+ }
+
+ rc = 0;
+out:
+ return rc;
+}
+
+static int perf_kvm__handle_timerfd(struct perf_kvm_stat *kvm)
+{
+ uint64_t c;
+ int rc;
+
+ rc = read(kvm->timerfd, &c, sizeof(uint64_t));
+ if (rc < 0) {
+ if (errno == EAGAIN)
+ return 0;
+
+ pr_err("Failed to read timer fd: %d\n", errno);
+ return -1;
+ }
+
+ if (rc != sizeof(uint64_t)) {
+ pr_err("Error reading timer fd - invalid size returned\n");
+ return -1;
+ }
+
+ if (c != 1)
+ pr_debug("Missed timer beats: %" PRIu64 "\n", c-1);
+
+ /* update display */
+ sort_result(kvm);
+ print_result(kvm);
+
+ /* reset counts */
+ clear_events_cache_stats(kvm->kvm_events_cache);
+ kvm->total_count = 0;
+ kvm->total_time = 0;
+ kvm->lost_events = 0;
+
+ return 0;
+}
+
+static int fd_set_nonblock(int fd)
+{
+ long arg = 0;
+
+ arg = fcntl(fd, F_GETFL);
+ if (arg < 0) {
+ pr_err("Failed to get current flags for fd %d\n", fd);
+ return -1;
+ }
+
+ if (fcntl(fd, F_SETFL, arg | O_NONBLOCK) < 0) {
+ pr_err("Failed to set non-block option on fd %d\n", fd);
+ return -1;
+ }
+
+ return 0;
+}
+
+static
+int perf_kvm__handle_stdin(struct termios *tc_now, struct termios *tc_save)
+{
+ int c;
+
+ tcsetattr(0, TCSANOW, tc_now);
+ c = getc(stdin);
+ tcsetattr(0, TCSAFLUSH, tc_save);
+
+ if (c == 'q')
+ return 1;
+
+ return 0;
+}
+
+static int kvm_events_live_report(struct perf_kvm_stat *kvm)
+{
+ struct pollfd *pollfds = NULL;
+ int nr_fds, nr_stdin, ret, err = -EINVAL;
+ struct termios tc, save;
+
+ /* live flag must be set first */
+ kvm->live = true;
+
+ ret = cpu_isa_config(kvm);
+ if (ret < 0)
+ return ret;
+
+ if (!verify_vcpu(kvm->trace_vcpu) ||
+ !select_key(kvm) ||
+ !register_kvm_events_ops(kvm)) {
+ goto out;
+ }
+
+ init_kvm_event_record(kvm);
+
+ tcgetattr(0, &save);
+ tc = save;
+ tc.c_lflag &= ~(ICANON | ECHO);
+ tc.c_cc[VMIN] = 0;
+ tc.c_cc[VTIME] = 0;
+
+ signal(SIGINT, sig_handler);
+ signal(SIGTERM, sig_handler);
+
+ /* copy pollfds -- need to add timerfd and stdin */
+ nr_fds = kvm->evlist->nr_fds;
+ pollfds = zalloc(sizeof(struct pollfd) * (nr_fds + 2));
+ if (!pollfds) {
+ err = -ENOMEM;
+ goto out;
}
+ memcpy(pollfds, kvm->evlist->pollfd,
+ sizeof(struct pollfd) * kvm->evlist->nr_fds);
+
+ /* add timer fd */
+ if (perf_kvm__timerfd_create(kvm) < 0) {
+ err = -1;
+ goto out;
+ }
+
+ pollfds[nr_fds].fd = kvm->timerfd;
+ pollfds[nr_fds].events = POLLIN;
+ nr_fds++;
+
+ pollfds[nr_fds].fd = fileno(stdin);
+ pollfds[nr_fds].events = POLLIN;
+ nr_stdin = nr_fds;
+ nr_fds++;
+ if (fd_set_nonblock(fileno(stdin)) != 0)
+ goto out;
+
+ /* everything is good - enable the events and process */
+ perf_evlist__enable(kvm->evlist);
+
+ while (!done) {
+ int rc;
+
+ rc = perf_kvm__mmap_read(kvm);
+ if (rc < 0)
+ break;
+
+ err = perf_kvm__handle_timerfd(kvm);
+ if (err)
+ goto out;
+
+ if (pollfds[nr_stdin].revents & POLLIN)
+ done = perf_kvm__handle_stdin(&tc, &save);
+
+ if (!rc && !done)
+ err = poll(pollfds, nr_fds, 100);
+ }
+
+ perf_evlist__disable(kvm->evlist);
+
+ if (err == 0) {
+ sort_result(kvm);
+ print_result(kvm);
+ }
+
+out:
+ if (kvm->timerfd >= 0)
+ close(kvm->timerfd);
+
+ if (pollfds)
+ free(pollfds);
- return isa;
+ return err;
+}
+
+static int kvm_live_open_events(struct perf_kvm_stat *kvm)
+{
+ int err, rc = -1;
+ struct perf_evsel *pos;
+ struct perf_evlist *evlist = kvm->evlist;
+
+ perf_evlist__config(evlist, &kvm->opts);
+
+ /*
+ * Note: exclude_{guest,host} do not apply here.
+ * This command processes KVM tracepoints from host only
+ */
+ list_for_each_entry(pos, &evlist->entries, node) {
+ struct perf_event_attr *attr = &pos->attr;
+
+ /* make sure these *are* set */
+ attr->sample_type |= PERF_SAMPLE_TID;
+ attr->sample_type |= PERF_SAMPLE_TIME;
+ attr->sample_type |= PERF_SAMPLE_CPU;
+ attr->sample_type |= PERF_SAMPLE_RAW;
+ /* make sure these are *not*; want as small a sample as possible */
+ attr->sample_type &= ~PERF_SAMPLE_PERIOD;
+ attr->sample_type &= ~PERF_SAMPLE_IP;
+ attr->sample_type &= ~PERF_SAMPLE_CALLCHAIN;
+ attr->sample_type &= ~PERF_SAMPLE_ADDR;
+ attr->sample_type &= ~PERF_SAMPLE_READ;
+ attr->mmap = 0;
+ attr->comm = 0;
+ attr->task = 0;
+
+ attr->sample_period = 1;
+
+ attr->watermark = 0;
+ attr->wakeup_events = 1000;
+
+ /* will enable all once we are ready */
+ attr->disabled = 1;
+ }
+
+ err = perf_evlist__open(evlist);
+ if (err < 0) {
+ printf("Couldn't create the events: %s\n", strerror(errno));
+ goto out;
+ }
+
+ if (perf_evlist__mmap(evlist, kvm->opts.mmap_pages, false) < 0) {
+ ui__error("Failed to mmap the events: %s\n", strerror(errno));
+ perf_evlist__close(evlist);
+ goto out;
+ }
+
+ rc = 0;
+
+out:
+ return rc;
}
static int read_events(struct perf_kvm_stat *kvm)
@@ -749,28 +1231,24 @@ static int read_events(struct perf_kvm_stat *kvm)
* Do not use 'isa' recorded in kvm_exit tracepoint since it is not
* traced in the old kernel.
*/
- ret = get_cpu_isa(kvm->session);
-
+ ret = cpu_isa_config(kvm);
if (ret < 0)
return ret;
- if (ret == 1) {
- kvm->exit_reasons = vmx_exit_reasons;
- kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
- kvm->exit_reasons_isa = "VMX";
- }
-
return perf_session__process_events(kvm->session, &kvm->tool);
}
-static bool verify_vcpu(int vcpu)
+static int parse_target_str(struct perf_kvm_stat *kvm)
{
- if (vcpu != -1 && vcpu < 0) {
- pr_err("Invalid vcpu:%d.\n", vcpu);
- return false;
+ if (kvm->pid_str) {
+ kvm->pid_list = intlist__new(kvm->pid_str);
+ if (kvm->pid_list == NULL) {
+ pr_err("Error parsing process id string\n");
+ return -EINVAL;
+ }
}
- return true;
+ return 0;
}
static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
@@ -778,6 +1256,9 @@ static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm)
int ret = -EINVAL;
int vcpu = kvm->trace_vcpu;
+ if (parse_target_str(kvm) != 0)
+ goto exit;
+
if (!verify_vcpu(vcpu))
goto exit;
@@ -801,16 +1282,11 @@ exit:
return ret;
}
-static const char * const record_args[] = {
- "record",
- "-R",
- "-f",
- "-m", "1024",
- "-c", "1",
- "-e", "kvm:kvm_entry",
- "-e", "kvm:kvm_exit",
- "-e", "kvm:kvm_mmio",
- "-e", "kvm:kvm_pio",
+static const char * const kvm_events_tp[] = {
+ "kvm:kvm_entry",
+ "kvm:kvm_exit",
+ "kvm:kvm_mmio",
+ "kvm:kvm_pio",
};
#define STRDUP_FAIL_EXIT(s) \
@@ -826,8 +1302,15 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
{
unsigned int rec_argc, i, j;
const char **rec_argv;
+ const char * const record_args[] = {
+ "record",
+ "-R",
+ "-m", "1024",
+ "-c", "1",
+ };
- rec_argc = ARRAY_SIZE(record_args) + argc + 2;
+ rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
+ 2 * ARRAY_SIZE(kvm_events_tp);
rec_argv = calloc(rec_argc + 1, sizeof(char *));
if (rec_argv == NULL)
@@ -836,6 +1319,11 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
for (i = 0; i < ARRAY_SIZE(record_args); i++)
rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
+ for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+ rec_argv[i++] = "-e";
+ rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
+ }
+
rec_argv[i++] = STRDUP_FAIL_EXIT("-o");
rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name);
@@ -856,6 +1344,8 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
"key for sorting: sample(sort by samples number)"
" time (sort by avg time)"),
+ OPT_STRING('p', "pid", &kvm->pid_str, "pid",
+ "analyze events only for given process id(s)"),
OPT_END()
};
@@ -878,6 +1368,190 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
return kvm_events_report_vcpu(kvm);
}
+static struct perf_evlist *kvm_live_event_list(void)
+{
+ struct perf_evlist *evlist;
+ char *tp, *name, *sys;
+ unsigned int j;
+ int err = -1;
+
+ evlist = perf_evlist__new();
+ if (evlist == NULL)
+ return NULL;
+
+ for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+
+ tp = strdup(kvm_events_tp[j]);
+ if (tp == NULL)
+ goto out;
+
+ /* split tracepoint into subsystem and name */
+ sys = tp;
+ name = strchr(tp, ':');
+ if (name == NULL) {
+ pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
+ kvm_events_tp[j]);
+ free(tp);
+ goto out;
+ }
+ *name = '\0';
+ name++;
+
+ if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
+ pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]);
+ free(tp);
+ goto out;
+ }
+
+ free(tp);
+ }
+
+ err = 0;
+
+out:
+ if (err) {
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ }
+
+ return evlist;
+}
+
+static int kvm_events_live(struct perf_kvm_stat *kvm,
+ int argc, const char **argv)
+{
+ char errbuf[BUFSIZ];
+ int err;
+
+ const struct option live_options[] = {
+ OPT_STRING('p', "pid", &kvm->opts.target.pid, "pid",
+ "record events on existing process id"),
+ OPT_UINTEGER('m', "mmap-pages", &kvm->opts.mmap_pages,
+ "number of mmap data pages"),
+ OPT_INCR('v', "verbose", &verbose,
+ "be more verbose (show counter open errors, etc)"),
+ OPT_BOOLEAN('a', "all-cpus", &kvm->opts.target.system_wide,
+ "system-wide collection from all CPUs"),
+ OPT_UINTEGER('d', "display", &kvm->display_time,
+ "time in seconds between display updates"),
+ OPT_STRING(0, "event", &kvm->report_event, "report event",
+ "event for reporting: vmexit, mmio, ioport"),
+ OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
+ "vcpu id to report"),
+ OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
+ "key for sorting: sample(sort by samples number)"
+ " time (sort by avg time)"),
+ OPT_U64(0, "duration", &kvm->duration,
+ "show events other than HALT that take longer than duration usecs"),
+ OPT_END()
+ };
+ const char * const live_usage[] = {
+ "perf kvm stat live [<options>]",
+ NULL
+ };
+
+
+ /* event handling */
+ kvm->tool.sample = process_sample_event;
+ kvm->tool.comm = perf_event__process_comm;
+ kvm->tool.exit = perf_event__process_exit;
+ kvm->tool.fork = perf_event__process_fork;
+ kvm->tool.lost = process_lost_event;
+ kvm->tool.ordered_samples = true;
+ perf_tool__fill_defaults(&kvm->tool);
+
+ /* set defaults */
+ kvm->display_time = 1;
+ kvm->opts.user_interval = 1;
+ kvm->opts.mmap_pages = 512;
+ kvm->opts.target.uses_mmap = false;
+ kvm->opts.target.uid_str = NULL;
+ kvm->opts.target.uid = UINT_MAX;
+
+ symbol__init();
+ disable_buildid_cache();
+
+ use_browser = 0;
+ setup_browser(false);
+
+ if (argc) {
+ argc = parse_options(argc, argv, live_options,
+ live_usage, 0);
+ if (argc)
+ usage_with_options(live_usage, live_options);
+ }
+
+ kvm->duration *= NSEC_PER_USEC; /* convert usec to nsec */
+
+ /*
+ * target related setups
+ */
+ err = perf_target__validate(&kvm->opts.target);
+ if (err) {
+ perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
+ ui__warning("%s", errbuf);
+ }
+
+ if (perf_target__none(&kvm->opts.target))
+ kvm->opts.target.system_wide = true;
+
+
+ /*
+ * generate the event list
+ */
+ kvm->evlist = kvm_live_event_list();
+ if (kvm->evlist == NULL) {
+ err = -1;
+ goto out;
+ }
+
+ symbol_conf.nr_events = kvm->evlist->nr_entries;
+
+ if (perf_evlist__create_maps(kvm->evlist, &kvm->opts.target) < 0)
+ usage_with_options(live_usage, live_options);
+
+ /*
+ * perf session
+ */
+ kvm->session = perf_session__new(NULL, O_WRONLY, false, false, &kvm->tool);
+ if (kvm->session == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+ kvm->session->evlist = kvm->evlist;
+ perf_session__set_id_hdr_size(kvm->session);
+
+
+ if (perf_target__has_task(&kvm->opts.target))
+ perf_event__synthesize_thread_map(&kvm->tool,
+ kvm->evlist->threads,
+ perf_event__process,
+ &kvm->session->machines.host);
+ else
+ perf_event__synthesize_threads(&kvm->tool, perf_event__process,
+ &kvm->session->machines.host);
+
+
+ err = kvm_live_open_events(kvm);
+ if (err)
+ goto out;
+
+ err = kvm_events_live_report(kvm);
+
+out:
+ exit_browser(0);
+
+ if (kvm->session)
+ perf_session__delete(kvm->session);
+ kvm->session = NULL;
+ if (kvm->evlist) {
+ perf_evlist__delete_maps(kvm->evlist);
+ perf_evlist__delete(kvm->evlist);
+ }
+
+ return err;
+}
+
static void print_kvm_stat_usage(void)
{
printf("Usage: perf kvm stat <command>\n\n");
@@ -885,6 +1559,7 @@ static void print_kvm_stat_usage(void)
printf("# Available commands:\n");
printf("\trecord: record kvm events\n");
printf("\treport: report statistical data of kvm events\n");
+ printf("\tlive: live reporting of statistical data of kvm events\n");
printf("\nOtherwise, it is the alias of 'perf stat':\n");
}
@@ -914,6 +1589,9 @@ static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
if (!strncmp(argv[1], "rep", 3))
return kvm_events_report(&kvm, argc - 1 , argv + 1);
+ if (!strncmp(argv[1], "live", 4))
+ return kvm_events_live(&kvm, argc - 1 , argv + 1);
+
perf_stat:
return cmd_stat(argc, argv, NULL);
}
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 1948eceb517..e79f423cc30 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -13,6 +13,7 @@
#include "util/parse-events.h"
#include "util/cache.h"
+#include "util/pmu.h"
int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
@@ -37,6 +38,8 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
else if (strcmp(argv[i], "cache") == 0 ||
strcmp(argv[i], "hwcache") == 0)
print_hwcache_events(NULL, false);
+ else if (strcmp(argv[i], "pmu") == 0)
+ print_pmu_events(NULL, false);
else if (strcmp(argv[i], "--raw-dump") == 0)
print_events(NULL, true);
else {
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c
index 76543a4a7a3..ee33ba2f05d 100644
--- a/tools/perf/builtin-lock.c
+++ b/tools/perf/builtin-lock.c
@@ -805,7 +805,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, sample->tid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c
index a8ff6d264e5..791b432df84 100644
--- a/tools/perf/builtin-mem.c
+++ b/tools/perf/builtin-mem.c
@@ -14,7 +14,6 @@ static const char *mem_operation = MEM_OPERATION_LOAD;
struct perf_mem {
struct perf_tool tool;
char const *input_name;
- symbol_filter_t annotate_init;
bool hide_unresolved;
bool dump_raw;
const char *cpu_list;
@@ -69,8 +68,7 @@ dump_raw_samples(struct perf_tool *tool,
struct addr_location al;
const char *fmt;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- mem->annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -96,7 +94,7 @@ dump_raw_samples(struct perf_tool *tool,
symbol_conf.field_sep,
sample->tid,
symbol_conf.field_sep,
- event->ip.ip,
+ sample->ip,
symbol_conf.field_sep,
sample->addr,
symbol_conf.field_sep,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index ecca62e27b2..a41ac41546c 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -474,13 +474,6 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
goto out_delete_session;
}
- err = perf_event__synthesize_event_types(tool, process_synthesized_event,
- machine);
- if (err < 0) {
- pr_err("Couldn't synthesize event_types.\n");
- goto out_delete_session;
- }
-
if (have_tracepoints(&evsel_list->entries)) {
/*
* FIXME err <= 0 here actually means that
@@ -904,7 +897,6 @@ const struct option record_options[] = {
int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
{
int err = -ENOMEM;
- struct perf_evsel *pos;
struct perf_evlist *evsel_list;
struct perf_record *rec = &record;
char errbuf[BUFSIZ];
@@ -968,11 +960,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
if (perf_evlist__create_maps(evsel_list, &rec->opts.target) < 0)
usage_with_options(record_usage, record_options);
- list_for_each_entry(pos, &evsel_list->entries, node) {
- if (perf_header__push_event(pos->attr.config, perf_evsel__name(pos)))
- goto out_free_fd;
- }
-
if (rec->opts.user_interval != ULLONG_MAX)
rec->opts.default_interval = rec->opts.user_interval;
if (rec->opts.user_freq != UINT_MAX)
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 3662047cc6b..9725aa37541 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -49,7 +49,6 @@ struct perf_report {
bool mem_mode;
struct perf_read_values show_threads_values;
const char *pretty_printing_style;
- symbol_filter_t annotate_init;
const char *cpu_list;
const char *symbol_filter_str;
float min_percent;
@@ -89,7 +88,7 @@ static int perf_report__add_mem_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain) &&
sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent);
+ sample, &parent, al);
if (err)
return err;
}
@@ -180,7 +179,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool,
if ((sort__has_parent || symbol_conf.use_callchain)
&& sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent);
+ sample, &parent, al);
if (err)
return err;
}
@@ -254,7 +253,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) {
err = machine__resolve_callchain(machine, evsel, al->thread,
- sample, &parent);
+ sample, &parent, al);
if (err)
return err;
}
@@ -305,8 +304,7 @@ static int process_sample_event(struct perf_tool *tool,
struct addr_location al;
int ret;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- rep->annotate_init) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
fprintf(stderr, "problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -367,7 +365,7 @@ static int process_read_event(struct perf_tool *tool,
static int perf_report__setup_sample_type(struct perf_report *rep)
{
struct perf_session *self = rep->session;
- u64 sample_type = perf_evlist__sample_type(self->evlist);
+ u64 sample_type = perf_evlist__combined_sample_type(self->evlist);
if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) {
if (sort__has_parent) {
@@ -497,7 +495,7 @@ static int __cmd_report(struct perf_report *rep)
ret = perf_session__cpu_bitmap(session, rep->cpu_list,
rep->cpu_bitmap);
if (ret)
- goto out_delete;
+ return ret;
}
if (use_browser <= 0)
@@ -508,11 +506,11 @@ static int __cmd_report(struct perf_report *rep)
ret = perf_report__setup_sample_type(rep);
if (ret)
- goto out_delete;
+ return ret;
ret = perf_session__process_events(session, &rep->tool);
if (ret)
- goto out_delete;
+ return ret;
kernel_map = session->machines.host.vmlinux_maps[MAP__FUNCTION];
kernel_kmap = map__kmap(kernel_map);
@@ -547,7 +545,7 @@ static int __cmd_report(struct perf_report *rep)
if (dump_trace) {
perf_session__fprintf_nr_events(session, stdout);
- goto out_delete;
+ return 0;
}
nr_samples = 0;
@@ -572,7 +570,7 @@ static int __cmd_report(struct perf_report *rep)
if (nr_samples == 0) {
ui__error("The %s file has no samples!\n", session->filename);
- goto out_delete;
+ return 0;
}
list_for_each_entry(pos, &session->evlist->entries, node)
@@ -598,19 +596,6 @@ static int __cmd_report(struct perf_report *rep)
} else
perf_evlist__tty_browse_hists(session->evlist, rep, help);
-out_delete:
- /*
- * Speed up the exit process, for large files this can
- * take quite a while.
- *
- * XXX Enable this when using valgrind or if we ever
- * librarize this command.
- *
- * Also experiment with obstacks to see how much speed
- * up we'll get here.
- *
- * perf_session__delete(session);
- */
return ret;
}
@@ -680,12 +665,23 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset)
}
/* get the call chain order */
- if (!strcmp(tok2, "caller"))
+ if (!strncmp(tok2, "caller", strlen("caller")))
callchain_param.order = ORDER_CALLER;
- else if (!strcmp(tok2, "callee"))
+ else if (!strncmp(tok2, "callee", strlen("callee")))
callchain_param.order = ORDER_CALLEE;
else
return -1;
+
+ /* Get the sort key */
+ tok2 = strtok(NULL, ",");
+ if (!tok2)
+ goto setup;
+ if (!strncmp(tok2, "function", strlen("function")))
+ callchain_param.key = CCKEY_FUNCTION;
+ else if (!strncmp(tok2, "address", strlen("address")))
+ callchain_param.key = CCKEY_ADDRESS;
+ else
+ return -1;
setup:
if (callchain_register_param(&callchain_param) < 0) {
fprintf(stderr, "Can't register callchain params\n");
@@ -694,6 +690,24 @@ setup:
return 0;
}
+int
+report_parse_ignore_callees_opt(const struct option *opt __maybe_unused,
+ const char *arg, int unset __maybe_unused)
+{
+ if (arg) {
+ int err = regcomp(&ignore_callees_regex, arg, REG_EXTENDED);
+ if (err) {
+ char buf[BUFSIZ];
+ regerror(err, &ignore_callees_regex, buf, sizeof(buf));
+ pr_err("Invalid --ignore-callees regex: %s\n%s", arg, buf);
+ return -1;
+ }
+ have_ignore_callees = 1;
+ }
+
+ return 0;
+}
+
static int
parse_branch_mode(const struct option *opt __maybe_unused,
const char *str __maybe_unused, int unset)
@@ -736,7 +750,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
.lost = perf_event__process_lost,
.read = process_read_event,
.attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.ordered_samples = true,
@@ -780,10 +793,13 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_BOOLEAN('x', "exclude-other", &symbol_conf.exclude_other,
"Only display entries with parent-match"),
OPT_CALLBACK_DEFAULT('g', "call-graph", &report, "output_type,min_percent[,print_limit],call_order",
- "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit and callchain order. "
- "Default: fractal,0.5,callee", &parse_callchain_opt, callchain_default_opt),
+ "Display callchains using output_type (graph, flat, fractal, or none) , min percent threshold, optional print limit, callchain order, key (function or address). "
+ "Default: fractal,0.5,callee,function", &parse_callchain_opt, callchain_default_opt),
OPT_BOOLEAN('G', "inverted", &report.inverted_callchain,
"alias for inverted call graph"),
+ OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
+ "ignore callees of these functions in call graphs",
+ report_parse_ignore_callees_opt),
OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
"only consider symbols in these dsos"),
OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
@@ -853,7 +869,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
setup_browser(true);
else {
use_browser = 0;
- perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
}
@@ -907,7 +922,8 @@ repeat:
*/
if (use_browser == 1 && sort__has_sym) {
symbol_conf.priv_size = sizeof(struct annotation);
- report.annotate_init = symbol__annotate_init;
+ machines__set_symbol_filter(&session->machines,
+ symbol__annotate_init);
/*
* For searching by name on the "Browse map details".
* providing it only in verbose mode not to bloat too
@@ -931,14 +947,6 @@ repeat:
if (parent_pattern != default_parent_pattern) {
if (sort_dimension__add("parent") < 0)
goto error;
-
- /*
- * Only show the parent fields if we explicitly
- * sort that way. If we only use parent machinery
- * for filtering, we don't want it.
- */
- if (!strstr(sort_order, "parent"))
- sort_parent.elide = 1;
}
if (argc) {
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index fed9ae432c1..d8c51b2f263 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -109,8 +109,9 @@ struct trace_sched_handler {
int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel,
struct perf_sample *sample, struct machine *machine);
- int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample);
+ /* PERF_RECORD_FORK event, not sched_process_fork tracepoint */
+ int (*fork_event)(struct perf_sched *sched, union perf_event *event,
+ struct machine *machine);
int (*migrate_task_event)(struct perf_sched *sched,
struct perf_evsel *evsel,
@@ -717,22 +718,31 @@ static int replay_switch_event(struct perf_sched *sched,
return 0;
}
-static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel,
- struct perf_sample *sample)
+static int replay_fork_event(struct perf_sched *sched,
+ union perf_event *event,
+ struct machine *machine)
{
- const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"),
- *child_comm = perf_evsel__strval(evsel, sample, "child_comm");
- const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"),
- child_pid = perf_evsel__intval(evsel, sample, "child_pid");
+ struct thread *child, *parent;
+
+ child = machine__findnew_thread(machine, event->fork.pid,
+ event->fork.tid);
+ parent = machine__findnew_thread(machine, event->fork.ppid,
+ event->fork.ptid);
+
+ if (child == NULL || parent == NULL) {
+ pr_debug("thread does not exist on fork event: child %p, parent %p\n",
+ child, parent);
+ return 0;
+ }
if (verbose) {
- printf("sched_fork event %p\n", evsel);
- printf("... parent: %s/%d\n", parent_comm, parent_pid);
- printf("... child: %s/%d\n", child_comm, child_pid);
+ printf("fork event\n");
+ printf("... parent: %s/%d\n", parent->comm, parent->tid);
+ printf("... child: %s/%d\n", child->comm, child->tid);
}
- register_pid(sched, parent_pid, parent_comm);
- register_pid(sched, child_pid, child_comm);
+ register_pid(sched, parent->tid, parent->comm);
+ register_pid(sched, child->tid, child->comm);
return 0;
}
@@ -824,14 +834,6 @@ static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread)
return 0;
}
-static int latency_fork_event(struct perf_sched *sched __maybe_unused,
- struct perf_evsel *evsel __maybe_unused,
- struct perf_sample *sample __maybe_unused)
-{
- /* should insert the newcomer */
- return 0;
-}
-
static char sched_out_state(u64 prev_state)
{
const char *str = TASK_STATE_TO_CHAR_STR;
@@ -934,8 +936,8 @@ static int latency_switch_event(struct perf_sched *sched,
return -1;
}
- sched_out = machine__findnew_thread(machine, prev_pid);
- sched_in = machine__findnew_thread(machine, next_pid);
+ sched_out = machine__findnew_thread(machine, 0, prev_pid);
+ sched_in = machine__findnew_thread(machine, 0, next_pid);
out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
if (!out_events) {
@@ -978,7 +980,7 @@ static int latency_runtime_event(struct perf_sched *sched,
{
const u32 pid = perf_evsel__intval(evsel, sample, "pid");
const u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
- struct thread *thread = machine__findnew_thread(machine, pid);
+ struct thread *thread = machine__findnew_thread(machine, 0, pid);
struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
u64 timestamp = sample->time;
int cpu = sample->cpu;
@@ -1016,7 +1018,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
if (!success)
return 0;
- wakee = machine__findnew_thread(machine, pid);
+ wakee = machine__findnew_thread(machine, 0, pid);
atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, wakee))
@@ -1070,12 +1072,12 @@ static int latency_migrate_task_event(struct perf_sched *sched,
if (sched->profile_cpu == -1)
return 0;
- migrant = machine__findnew_thread(machine, pid);
+ migrant = machine__findnew_thread(machine, 0, pid);
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
if (thread_atoms_insert(sched, migrant))
return -1;
- register_pid(sched, migrant->pid, migrant->comm);
+ register_pid(sched, migrant->tid, migrant->comm);
atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
if (!atoms) {
pr_err("migration-event: Internal tree error");
@@ -1115,7 +1117,7 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
sched->all_runtime += work_list->total_runtime;
sched->all_count += work_list->nb_atoms;
- ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
+ ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->tid);
for (i = 0; i < 24 - ret; i++)
printf(" ");
@@ -1131,9 +1133,9 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
{
- if (l->thread->pid < r->thread->pid)
+ if (l->thread->tid < r->thread->tid)
return -1;
- if (l->thread->pid > r->thread->pid)
+ if (l->thread->tid > r->thread->tid)
return 1;
return 0;
@@ -1289,8 +1291,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
return -1;
}
- sched_out = machine__findnew_thread(machine, prev_pid);
- sched_in = machine__findnew_thread(machine, next_pid);
+ sched_out = machine__findnew_thread(machine, 0, prev_pid);
+ sched_in = machine__findnew_thread(machine, 0, next_pid);
sched->curr_thread[this_cpu] = sched_in;
@@ -1321,7 +1323,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
printf("*");
if (sched->curr_thread[cpu]) {
- if (sched->curr_thread[cpu]->pid)
+ if (sched->curr_thread[cpu]->tid)
printf("%2s ", sched->curr_thread[cpu]->shortname);
else
printf(". ");
@@ -1332,7 +1334,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
printf(" %12.6f secs ", (double)timestamp/1e9);
if (new_shortname) {
printf("%s => %s:%d\n",
- sched_in->shortname, sched_in->comm, sched_in->pid);
+ sched_in->shortname, sched_in->comm, sched_in->tid);
} else {
printf("\n");
}
@@ -1379,25 +1381,20 @@ static int process_sched_runtime_event(struct perf_tool *tool,
return 0;
}
-static int process_sched_fork_event(struct perf_tool *tool,
- struct perf_evsel *evsel,
- struct perf_sample *sample,
- struct machine *machine __maybe_unused)
+static int perf_sched__process_fork_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
{
struct perf_sched *sched = container_of(tool, struct perf_sched, tool);
- if (sched->tp_handler->fork_event)
- return sched->tp_handler->fork_event(sched, evsel, sample);
+ /* run the fork event through the perf machineruy */
+ perf_event__process_fork(tool, event, sample, machine);
- return 0;
-}
+ /* and then run additional processing needed for this command */
+ if (sched->tp_handler->fork_event)
+ return sched->tp_handler->fork_event(sched, event, machine);
-static int process_sched_exit_event(struct perf_tool *tool __maybe_unused,
- struct perf_evsel *evsel,
- struct perf_sample *sample __maybe_unused,
- struct machine *machine __maybe_unused)
-{
- pr_debug("sched_exit event %p\n", evsel);
return 0;
}
@@ -1425,15 +1422,8 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
struct perf_evsel *evsel,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, sample->tid);
int err = 0;
- if (thread == NULL) {
- pr_debug("problem processing %s event, skipping it.\n",
- perf_evsel__name(evsel));
- return -1;
- }
-
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -1445,7 +1435,7 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_
return err;
}
-static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
+static int perf_sched__read_events(struct perf_sched *sched,
struct perf_session **psession)
{
const struct perf_evsel_str_handler handlers[] = {
@@ -1453,8 +1443,6 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
{ "sched:sched_stat_runtime", process_sched_runtime_event, },
{ "sched:sched_wakeup", process_sched_wakeup_event, },
{ "sched:sched_wakeup_new", process_sched_wakeup_event, },
- { "sched:sched_process_fork", process_sched_fork_event, },
- { "sched:sched_process_exit", process_sched_exit_event, },
{ "sched:sched_migrate_task", process_sched_migrate_task_event, },
};
struct perf_session *session;
@@ -1480,11 +1468,10 @@ static int perf_sched__read_events(struct perf_sched *sched, bool destroy,
sched->nr_lost_chunks = session->stats.nr_events[PERF_RECORD_LOST];
}
- if (destroy)
- perf_session__delete(session);
-
if (psession)
*psession = session;
+ else
+ perf_session__delete(session);
return 0;
@@ -1529,8 +1516,11 @@ static int perf_sched__lat(struct perf_sched *sched)
struct perf_session *session;
setup_pager();
- if (perf_sched__read_events(sched, false, &session))
+
+ /* save session -- references to threads are held in work_list */
+ if (perf_sched__read_events(sched, &session))
return -1;
+
perf_sched__sort_lat(sched);
printf("\n ---------------------------------------------------------------------------------------------------------------\n");
@@ -1565,7 +1555,7 @@ static int perf_sched__map(struct perf_sched *sched)
sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF);
setup_pager();
- if (perf_sched__read_events(sched, true, NULL))
+ if (perf_sched__read_events(sched, NULL))
return -1;
print_bad_events(sched);
return 0;
@@ -1580,7 +1570,7 @@ static int perf_sched__replay(struct perf_sched *sched)
test_calibrations(sched);
- if (perf_sched__read_events(sched, true, NULL))
+ if (perf_sched__read_events(sched, NULL))
return -1;
printf("nr_run_events: %ld\n", sched->nr_run_events);
@@ -1639,7 +1629,6 @@ static int __cmd_record(int argc, const char **argv)
"-e", "sched:sched_stat_sleep",
"-e", "sched:sched_stat_iowait",
"-e", "sched:sched_stat_runtime",
- "-e", "sched:sched_process_exit",
"-e", "sched:sched_process_fork",
"-e", "sched:sched_wakeup",
"-e", "sched:sched_migrate_task",
@@ -1662,28 +1651,29 @@ static int __cmd_record(int argc, const char **argv)
return cmd_record(i, rec_argv, NULL);
}
+static const char default_sort_order[] = "avg, max, switch, runtime";
+static struct perf_sched sched = {
+ .tool = {
+ .sample = perf_sched__process_tracepoint_sample,
+ .comm = perf_event__process_comm,
+ .lost = perf_event__process_lost,
+ .fork = perf_sched__process_fork_event,
+ .ordered_samples = true,
+ },
+ .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
+ .sort_list = LIST_HEAD_INIT(sched.sort_list),
+ .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
+ .curr_pid = { [0 ... MAX_CPUS - 1] = -1 },
+ .sort_order = default_sort_order,
+ .replay_repeat = 10,
+ .profile_cpu = -1,
+ .next_shortname1 = 'A',
+ .next_shortname2 = '0',
+};
+
int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
{
- const char default_sort_order[] = "avg, max, switch, runtime";
- struct perf_sched sched = {
- .tool = {
- .sample = perf_sched__process_tracepoint_sample,
- .comm = perf_event__process_comm,
- .lost = perf_event__process_lost,
- .fork = perf_event__process_fork,
- .ordered_samples = true,
- },
- .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid),
- .sort_list = LIST_HEAD_INIT(sched.sort_list),
- .start_work_mutex = PTHREAD_MUTEX_INITIALIZER,
- .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER,
- .curr_pid = { [0 ... MAX_CPUS - 1] = -1 },
- .sort_order = default_sort_order,
- .replay_repeat = 10,
- .profile_cpu = -1,
- .next_shortname1 = 'A',
- .next_shortname2 = '0',
- };
const struct option latency_options[] = {
OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
"sort by key(s): runtime, switch, avg, max"),
@@ -1729,7 +1719,6 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
.wakeup_event = latency_wakeup_event,
.switch_event = latency_switch_event,
.runtime_event = latency_runtime_event,
- .fork_event = latency_fork_event,
.migrate_task_event = latency_migrate_task_event,
};
struct trace_sched_handler map_ops = {
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 92d4658f56f..93a34cef967 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -24,6 +24,7 @@ static u64 last_timestamp;
static u64 nr_unordered;
extern const struct option record_options[];
static bool no_callchain;
+static bool latency_format;
static bool system_wide;
static const char *cpu_list;
static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
@@ -65,6 +66,7 @@ struct output_option {
static struct {
bool user_set;
bool wildcard_set;
+ unsigned int print_ip_opts;
u64 fields;
u64 invalid_fields;
} output[PERF_TYPE_MAX] = {
@@ -234,6 +236,7 @@ static int perf_session__check_output_opt(struct perf_session *session)
{
int j;
struct perf_evsel *evsel;
+ struct perf_event_attr *attr;
for (j = 0; j < PERF_TYPE_MAX; ++j) {
evsel = perf_session__find_first_evtype(session, j);
@@ -252,6 +255,24 @@ static int perf_session__check_output_opt(struct perf_session *session)
if (evsel && output[j].fields &&
perf_evsel__check_attr(evsel, session))
return -1;
+
+ if (evsel == NULL)
+ continue;
+
+ attr = &evsel->attr;
+
+ output[j].print_ip_opts = 0;
+ if (PRINT_FIELD(IP))
+ output[j].print_ip_opts |= PRINT_IP_OPT_IP;
+
+ if (PRINT_FIELD(SYM))
+ output[j].print_ip_opts |= PRINT_IP_OPT_SYM;
+
+ if (PRINT_FIELD(DSO))
+ output[j].print_ip_opts |= PRINT_IP_OPT_DSO;
+
+ if (PRINT_FIELD(SYMOFFSET))
+ output[j].print_ip_opts |= PRINT_IP_OPT_SYMOFFSET;
}
return 0;
@@ -381,8 +402,8 @@ static void print_sample_bts(union perf_event *event,
else
printf("\n");
perf_evsel__print_ip(evsel, event, sample, machine,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO),
- PRINT_FIELD(SYMOFFSET));
+ output[attr->type].print_ip_opts,
+ PERF_MAX_STACK_DEPTH);
}
printf(" => ");
@@ -396,10 +417,10 @@ static void print_sample_bts(union perf_event *event,
static void process_event(union perf_event *event, struct perf_sample *sample,
struct perf_evsel *evsel, struct machine *machine,
- struct addr_location *al)
+ struct thread *thread,
+ struct addr_location *al __maybe_unused)
{
struct perf_event_attr *attr = &evsel->attr;
- struct thread *thread = al->thread;
if (output[attr->type].fields == 0)
return;
@@ -422,9 +443,10 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
printf(" ");
else
printf("\n");
+
perf_evsel__print_ip(evsel, event, sample, machine,
- PRINT_FIELD(SYM), PRINT_FIELD(DSO),
- PRINT_FIELD(SYMOFFSET));
+ output[attr->type].print_ip_opts,
+ PERF_MAX_STACK_DEPTH);
}
printf("\n");
@@ -479,7 +501,8 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct machine *machine)
{
struct addr_location al;
- struct thread *thread = machine__findnew_thread(machine, event->ip.tid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->tid);
if (thread == NULL) {
pr_debug("problem processing %d event, skipping it.\n",
@@ -498,7 +521,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return 0;
}
- if (perf_event__preprocess_sample(event, machine, &al, sample, 0) < 0) {
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
pr_err("problem processing %d event, skipping it.\n",
event->header.type);
return -1;
@@ -510,7 +533,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
return 0;
- scripting_ops->process_event(event, sample, evsel, machine, &al);
+ scripting_ops->process_event(event, sample, evsel, machine, thread, &al);
evsel->hists.stats.total_period += sample->period;
return 0;
@@ -523,7 +546,6 @@ static struct perf_tool perf_script = {
.exit = perf_event__process_exit,
.fork = perf_event__process_fork,
.attr = perf_event__process_attr,
- .event_type = perf_event__process_event_type,
.tracing_data = perf_event__process_tracing_data,
.build_id = perf_event__process_build_id,
.ordered_samples = true,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 352fbd7ff4a..f686d5ff594 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -100,6 +100,7 @@ static const char *pre_cmd = NULL;
static const char *post_cmd = NULL;
static bool sync_run = false;
static unsigned int interval = 0;
+static unsigned int initial_delay = 0;
static bool forever = false;
static struct timespec ref_time;
static struct cpu_map *aggr_map;
@@ -254,7 +255,8 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
if (!perf_target__has_task(&target) &&
perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1;
- attr->enable_on_exec = 1;
+ if (!initial_delay)
+ attr->enable_on_exec = 1;
}
return perf_evsel__open_per_thread(evsel, evsel_list->threads);
@@ -414,6 +416,22 @@ static void print_interval(void)
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter, prefix);
}
+
+ fflush(output);
+}
+
+static void handle_initial_delay(void)
+{
+ struct perf_evsel *counter;
+
+ if (initial_delay) {
+ const int ncpus = cpu_map__nr(evsel_list->cpus),
+ nthreads = thread_map__nr(evsel_list->threads);
+
+ usleep(initial_delay * 1000);
+ list_for_each_entry(counter, &evsel_list->entries, node)
+ perf_evsel__enable(counter, ncpus, nthreads);
+ }
}
static int __run_perf_stat(int argc, const char **argv)
@@ -486,6 +504,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (forks) {
perf_evlist__start_workload(evsel_list);
+ handle_initial_delay();
if (interval) {
while (!waitpid(child_pid, &status, WNOHANG)) {
@@ -497,6 +516,7 @@ static int __run_perf_stat(int argc, const char **argv)
if (WIFSIGNALED(status))
psignal(WTERMSIG(status), argv[0]);
} else {
+ handle_initial_delay();
while (!done) {
nanosleep(&ts, NULL);
if (interval)
@@ -1419,6 +1439,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
"aggregate counts per processor socket", AGGR_SOCKET),
OPT_SET_UINT(0, "per-core", &aggr_mode,
"aggregate counts per physical processor core", AGGR_CORE),
+ OPT_UINTEGER('D', "delay", &initial_delay,
+ "ms to wait before starting measurement after program start"),
OPT_END()
};
const char * const stat_usage[] = {
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 4536a92b18f..c2e02319347 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -12,6 +12,8 @@
* of the License.
*/
+#include <traceevent/event-parse.h>
+
#include "builtin.h"
#include "util/util.h"
@@ -19,6 +21,7 @@
#include "util/color.h"
#include <linux/list.h>
#include "util/cache.h"
+#include "util/evlist.h"
#include "util/evsel.h"
#include <linux/rbtree.h>
#include "util/symbol.h"
@@ -328,25 +331,6 @@ struct wakeup_entry {
int success;
};
-/*
- * trace_flag_type is an enumeration that holds different
- * states when a trace occurs. These are:
- * IRQS_OFF - interrupts were disabled
- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
- * NEED_RESCED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
- */
-enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
- TRACE_FLAG_NEED_RESCHED = 0x04,
- TRACE_FLAG_HARDIRQ = 0x08,
- TRACE_FLAG_SOFTIRQ = 0x10,
-};
-
-
-
struct sched_switch {
struct trace_entry te;
char prev_comm[TASK_COMM_LEN];
@@ -479,6 +463,8 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
}
}
+typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
+ struct perf_sample *sample);
static int process_sample_event(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
@@ -486,8 +472,6 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused)
{
- struct trace_entry *te;
-
if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
if (!first_time || first_time > sample->time)
first_time = sample->time;
@@ -495,69 +479,90 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
last_time = sample->time;
}
- te = (void *)sample->raw_data;
- if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
- char *event_str;
-#ifdef SUPPORT_OLD_POWER_EVENTS
- struct power_entry_old *peo;
- peo = (void *)te;
-#endif
- /*
- * FIXME: use evsel, its already mapped from id to perf_evsel,
- * remove perf_header__find_event infrastructure bits.
- * Mapping all these "power:cpu_idle" strings to the tracepoint
- * ID and then just comparing against evsel->attr.config.
- *
- * e.g.:
- *
- * if (evsel->attr.config == power_cpu_idle_id)
- */
- event_str = perf_header__find_event(te->type);
-
- if (!event_str)
- return 0;
-
- if (sample->cpu > numcpus)
- numcpus = sample->cpu;
-
- if (strcmp(event_str, "power:cpu_idle") == 0) {
- struct power_processor_entry *ppe = (void *)te;
- if (ppe->state == (u32)PWR_EVENT_EXIT)
- c_state_end(ppe->cpu_id, sample->time);
- else
- c_state_start(ppe->cpu_id, sample->time,
- ppe->state);
- }
- else if (strcmp(event_str, "power:cpu_frequency") == 0) {
- struct power_processor_entry *ppe = (void *)te;
- p_state_change(ppe->cpu_id, sample->time, ppe->state);
- }
+ if (sample->cpu > numcpus)
+ numcpus = sample->cpu;
+
+ if (evsel->handler.func != NULL) {
+ tracepoint_handler f = evsel->handler.func;
+ return f(evsel, sample);
+ }
+
+ return 0;
+}
+
+static int
+process_sample_cpu_idle(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct power_processor_entry *ppe = sample->raw_data;
+
+ if (ppe->state == (u32) PWR_EVENT_EXIT)
+ c_state_end(ppe->cpu_id, sample->time);
+ else
+ c_state_start(ppe->cpu_id, sample->time, ppe->state);
+ return 0;
+}
+
+static int
+process_sample_cpu_frequency(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct power_processor_entry *ppe = sample->raw_data;
+
+ p_state_change(ppe->cpu_id, sample->time, ppe->state);
+ return 0;
+}
+
+static int
+process_sample_sched_wakeup(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct trace_entry *te = sample->raw_data;
+
+ sched_wakeup(sample->cpu, sample->time, sample->pid, te);
+ return 0;
+}
- else if (strcmp(event_str, "sched:sched_wakeup") == 0)
- sched_wakeup(sample->cpu, sample->time, sample->pid, te);
+static int
+process_sample_sched_switch(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct trace_entry *te = sample->raw_data;
- else if (strcmp(event_str, "sched:sched_switch") == 0)
- sched_switch(sample->cpu, sample->time, te);
+ sched_switch(sample->cpu, sample->time, te);
+ return 0;
+}
#ifdef SUPPORT_OLD_POWER_EVENTS
- if (use_old_power_events) {
- if (strcmp(event_str, "power:power_start") == 0)
- c_state_start(peo->cpu_id, sample->time,
- peo->value);
-
- else if (strcmp(event_str, "power:power_end") == 0)
- c_state_end(sample->cpu, sample->time);
-
- else if (strcmp(event_str,
- "power:power_frequency") == 0)
- p_state_change(peo->cpu_id, sample->time,
- peo->value);
- }
-#endif
- }
+static int
+process_sample_power_start(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct power_entry_old *peo = sample->raw_data;
+
+ c_state_start(peo->cpu_id, sample->time, peo->value);
return 0;
}
+static int
+process_sample_power_end(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ c_state_end(sample->cpu, sample->time);
+ return 0;
+}
+
+static int
+process_sample_power_frequency(struct perf_evsel *evsel __maybe_unused,
+ struct perf_sample *sample)
+{
+ struct power_entry_old *peo = sample->raw_data;
+
+ p_state_change(peo->cpu_id, sample->time, peo->value);
+ return 0;
+}
+#endif /* SUPPORT_OLD_POWER_EVENTS */
+
/*
* After the last sample we need to wrap up the current C/P state
* and close out each CPU for these.
@@ -974,6 +979,17 @@ static int __cmd_timechart(const char *output_name)
.sample = process_sample_event,
.ordered_samples = true,
};
+ const struct perf_evsel_str_handler power_tracepoints[] = {
+ { "power:cpu_idle", process_sample_cpu_idle },
+ { "power:cpu_frequency", process_sample_cpu_frequency },
+ { "sched:sched_wakeup", process_sample_sched_wakeup },
+ { "sched:sched_switch", process_sample_sched_switch },
+#ifdef SUPPORT_OLD_POWER_EVENTS
+ { "power:power_start", process_sample_power_start },
+ { "power:power_end", process_sample_power_end },
+ { "power:power_frequency", process_sample_power_frequency },
+#endif
+ };
struct perf_session *session = perf_session__new(input_name, O_RDONLY,
0, false, &perf_timechart);
int ret = -EINVAL;
@@ -984,6 +1000,12 @@ static int __cmd_timechart(const char *output_name)
if (!perf_session__has_traces(session, "timechart record"))
goto out_delete;
+ if (perf_session__set_tracepoints_handlers(session,
+ power_tracepoints)) {
+ pr_err("Initializing session tracepoint handlers failed\n");
+ goto out_delete;
+ }
+
ret = perf_session__process_events(session, &perf_timechart);
if (ret)
goto out_delete;
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e06c4f86933..212214162bb 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -40,6 +40,7 @@
#include "util/xyarray.h"
#include "util/sort.h"
#include "util/intlist.h"
+#include "arch/common.h"
#include "util/debug.h"
@@ -102,7 +103,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
/*
* We can't annotate with just /proc/kallsyms
*/
- if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
+ if (map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(map->dso)) {
pr_err("Can't annotate %s: No vmlinux file was found in the "
"path\n", sym->name);
sleep(1);
@@ -237,8 +239,6 @@ out_unlock:
pthread_mutex_unlock(&notes->lock);
}
-static const char CONSOLE_CLEAR[] = "";
-
static struct hist_entry *perf_evsel__add_hist_entry(struct perf_evsel *evsel,
struct addr_location *al,
struct perf_sample *sample)
@@ -689,7 +689,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
{
struct perf_top *top = container_of(tool, struct perf_top, tool);
struct symbol *parent = NULL;
- u64 ip = event->ip.ip;
+ u64 ip = sample->ip;
struct addr_location al;
int err;
@@ -699,10 +699,10 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (!seen)
seen = intlist__new(NULL);
- if (!intlist__has_entry(seen, event->ip.pid)) {
+ if (!intlist__has_entry(seen, sample->pid)) {
pr_err("Can't find guest [%d]'s kernel information\n",
- event->ip.pid);
- intlist__add(seen, event->ip.pid);
+ sample->pid);
+ intlist__add(seen, sample->pid);
}
return;
}
@@ -716,8 +716,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
top->exact_samples++;
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- symbol_filter) < 0 ||
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0 ||
al.filtered)
return;
@@ -772,8 +771,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
sample->callchain) {
err = machine__resolve_callchain(machine, evsel,
al.thread, sample,
- &parent);
-
+ &parent, &al);
if (err)
return;
}
@@ -838,7 +836,8 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
break;
case PERF_RECORD_MISC_GUEST_KERNEL:
++top->guest_kernel_samples;
- machine = perf_session__find_machine(session, event->ip.pid);
+ machine = perf_session__find_machine(session,
+ sample.pid);
break;
case PERF_RECORD_MISC_GUEST_USER:
++top->guest_us_samples;
@@ -939,6 +938,14 @@ static int __cmd_top(struct perf_top *top)
if (top->session == NULL)
return -ENOMEM;
+ machines__set_symbol_filter(&top->session->machines, symbol_filter);
+
+ if (!objdump_path) {
+ ret = perf_session_env__lookup_objdump(&top->session->header.env);
+ if (ret)
+ goto out_delete;
+ }
+
ret = perf_top__setup_sample_type(top);
if (ret)
goto out_delete;
@@ -1102,6 +1109,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_CALLBACK_DEFAULT('G', "call-graph", &top.record_opts,
"mode[,dump_size]", record_callchain_help,
&parse_callchain_opt, "fp"),
+ OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
+ "ignore callees of these functions in call graphs",
+ report_parse_ignore_callees_opt),
OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
"Show a column with the sum of periods"),
OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
@@ -1114,6 +1124,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
"Interleave source code with assembly code (default)"),
OPT_BOOLEAN(0, "asm-raw", &symbol_conf.annotate_asm_raw,
"Display raw encoding of assembly instructions (default)"),
+ OPT_STRING(0, "objdump", &objdump_path, "path",
+ "objdump binary to use for disassembly and annotations"),
OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
"Specify disassembler style (e.g. -M intel for intel syntax)"),
OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index ab3ed4af146..b6f0725068b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1,34 +1,331 @@
+#include <traceevent/event-parse.h>
#include "builtin.h"
#include "util/color.h"
+#include "util/debug.h"
#include "util/evlist.h"
#include "util/machine.h"
+#include "util/session.h"
#include "util/thread.h"
#include "util/parse-options.h"
+#include "util/strlist.h"
+#include "util/intlist.h"
#include "util/thread_map.h"
-#include "event-parse.h"
#include <libaudit.h>
#include <stdlib.h>
+#include <sys/mman.h>
+#include <linux/futex.h>
+
+static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
+ unsigned long arg,
+ u8 arg_idx __maybe_unused,
+ u8 *arg_mask __maybe_unused)
+{
+ return scnprintf(bf, size, "%#lx", arg);
+}
+
+#define SCA_HEX syscall_arg__scnprintf_hex
+
+static size_t syscall_arg__scnprintf_whence(char *bf, size_t size,
+ unsigned long arg,
+ u8 arg_idx __maybe_unused,
+ u8 *arg_mask __maybe_unused)
+{
+ int whence = arg;
+
+ switch (whence) {
+#define P_WHENCE(n) case SEEK_##n: return scnprintf(bf, size, #n)
+ P_WHENCE(SET);
+ P_WHENCE(CUR);
+ P_WHENCE(END);
+#ifdef SEEK_DATA
+ P_WHENCE(DATA);
+#endif
+#ifdef SEEK_HOLE
+ P_WHENCE(HOLE);
+#endif
+#undef P_WHENCE
+ default: break;
+ }
+
+ return scnprintf(bf, size, "%#x", whence);
+}
+
+#define SCA_WHENCE syscall_arg__scnprintf_whence
+
+static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
+ unsigned long arg,
+ u8 arg_idx __maybe_unused,
+ u8 *arg_mask __maybe_unused)
+{
+ int printed = 0, prot = arg;
+
+ if (prot == PROT_NONE)
+ return scnprintf(bf, size, "NONE");
+#define P_MMAP_PROT(n) \
+ if (prot & PROT_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ prot &= ~PROT_##n; \
+ }
+
+ P_MMAP_PROT(EXEC);
+ P_MMAP_PROT(READ);
+ P_MMAP_PROT(WRITE);
+#ifdef PROT_SEM
+ P_MMAP_PROT(SEM);
+#endif
+ P_MMAP_PROT(GROWSDOWN);
+ P_MMAP_PROT(GROWSUP);
+#undef P_MMAP_PROT
+
+ if (prot)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", prot);
+
+ return printed;
+}
+
+#define SCA_MMAP_PROT syscall_arg__scnprintf_mmap_prot
+
+static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size,
+ unsigned long arg, u8 arg_idx __maybe_unused,
+ u8 *arg_mask __maybe_unused)
+{
+ int printed = 0, flags = arg;
+
+#define P_MMAP_FLAG(n) \
+ if (flags & MAP_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~MAP_##n; \
+ }
+
+ P_MMAP_FLAG(SHARED);
+ P_MMAP_FLAG(PRIVATE);
+ P_MMAP_FLAG(32BIT);
+ P_MMAP_FLAG(ANONYMOUS);
+ P_MMAP_FLAG(DENYWRITE);
+ P_MMAP_FLAG(EXECUTABLE);
+ P_MMAP_FLAG(FILE);
+ P_MMAP_FLAG(FIXED);
+ P_MMAP_FLAG(GROWSDOWN);
+#ifdef MAP_HUGETLB
+ P_MMAP_FLAG(HUGETLB);
+#endif
+ P_MMAP_FLAG(LOCKED);
+ P_MMAP_FLAG(NONBLOCK);
+ P_MMAP_FLAG(NORESERVE);
+ P_MMAP_FLAG(POPULATE);
+ P_MMAP_FLAG(STACK);
+#ifdef MAP_UNINITIALIZED
+ P_MMAP_FLAG(UNINITIALIZED);
+#endif
+#undef P_MMAP_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_MMAP_FLAGS syscall_arg__scnprintf_mmap_flags
+
+static size_t syscall_arg__scnprintf_madvise_behavior(char *bf, size_t size,
+ unsigned long arg, u8 arg_idx __maybe_unused,
+ u8 *arg_mask __maybe_unused)
+{
+ int behavior = arg;
+
+ switch (behavior) {
+#define P_MADV_BHV(n) case MADV_##n: return scnprintf(bf, size, #n)
+ P_MADV_BHV(NORMAL);
+ P_MADV_BHV(RANDOM);
+ P_MADV_BHV(SEQUENTIAL);
+ P_MADV_BHV(WILLNEED);
+ P_MADV_BHV(DONTNEED);
+ P_MADV_BHV(REMOVE);
+ P_MADV_BHV(DONTFORK);
+ P_MADV_BHV(DOFORK);
+ P_MADV_BHV(HWPOISON);
+#ifdef MADV_SOFT_OFFLINE
+ P_MADV_BHV(SOFT_OFFLINE);
+#endif
+ P_MADV_BHV(MERGEABLE);
+ P_MADV_BHV(UNMERGEABLE);
+#ifdef MADV_HUGEPAGE
+ P_MADV_BHV(HUGEPAGE);
+#endif
+#ifdef MADV_NOHUGEPAGE
+ P_MADV_BHV(NOHUGEPAGE);
+#endif
+#ifdef MADV_DONTDUMP
+ P_MADV_BHV(DONTDUMP);
+#endif
+#ifdef MADV_DODUMP
+ P_MADV_BHV(DODUMP);
+#endif
+#undef P_MADV_PHV
+ default: break;
+ }
+
+ return scnprintf(bf, size, "%#x", behavior);
+}
+
+#define SCA_MADV_BHV syscall_arg__scnprintf_madvise_behavior
+
+static size_t syscall_arg__scnprintf_futex_op(char *bf, size_t size, unsigned long arg,
+ u8 arg_idx __maybe_unused, u8 *arg_mask)
+{
+ enum syscall_futex_args {
+ SCF_UADDR = (1 << 0),
+ SCF_OP = (1 << 1),
+ SCF_VAL = (1 << 2),
+ SCF_TIMEOUT = (1 << 3),
+ SCF_UADDR2 = (1 << 4),
+ SCF_VAL3 = (1 << 5),
+ };
+ int op = arg;
+ int cmd = op & FUTEX_CMD_MASK;
+ size_t printed = 0;
+
+ switch (cmd) {
+#define P_FUTEX_OP(n) case FUTEX_##n: printed = scnprintf(bf, size, #n);
+ P_FUTEX_OP(WAIT); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(FD); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(REQUEUE); *arg_mask |= SCF_VAL3|SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE); *arg_mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(CMP_REQUEUE_PI); *arg_mask |= SCF_TIMEOUT; break;
+ P_FUTEX_OP(WAKE_OP); break;
+ P_FUTEX_OP(LOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(UNLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2|SCF_TIMEOUT; break;
+ P_FUTEX_OP(TRYLOCK_PI); *arg_mask |= SCF_VAL3|SCF_UADDR2; break;
+ P_FUTEX_OP(WAIT_BITSET); *arg_mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(WAKE_BITSET); *arg_mask |= SCF_UADDR2; break;
+ P_FUTEX_OP(WAIT_REQUEUE_PI); break;
+ default: printed = scnprintf(bf, size, "%#x", cmd); break;
+ }
+
+ if (op & FUTEX_PRIVATE_FLAG)
+ printed += scnprintf(bf + printed, size - printed, "|PRIV");
+
+ if (op & FUTEX_CLOCK_REALTIME)
+ printed += scnprintf(bf + printed, size - printed, "|CLKRT");
+
+ return printed;
+}
+
+#define SCA_FUTEX_OP syscall_arg__scnprintf_futex_op
+
+static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
+ unsigned long arg,
+ u8 arg_idx, u8 *arg_mask)
+{
+ int printed = 0, flags = arg;
+
+ if (!(flags & O_CREAT))
+ *arg_mask |= 1 << (arg_idx + 1); /* Mask the mode parm */
+
+ if (flags == 0)
+ return scnprintf(bf, size, "RDONLY");
+#define P_FLAG(n) \
+ if (flags & O_##n) { \
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+ flags &= ~O_##n; \
+ }
+
+ P_FLAG(APPEND);
+ P_FLAG(ASYNC);
+ P_FLAG(CLOEXEC);
+ P_FLAG(CREAT);
+ P_FLAG(DIRECT);
+ P_FLAG(DIRECTORY);
+ P_FLAG(EXCL);
+ P_FLAG(LARGEFILE);
+ P_FLAG(NOATIME);
+ P_FLAG(NOCTTY);
+#ifdef O_NONBLOCK
+ P_FLAG(NONBLOCK);
+#elif O_NDELAY
+ P_FLAG(NDELAY);
+#endif
+#ifdef O_PATH
+ P_FLAG(PATH);
+#endif
+ P_FLAG(RDWR);
+#ifdef O_DSYNC
+ if ((flags & O_SYNC) == O_SYNC)
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", "SYNC");
+ else {
+ P_FLAG(DSYNC);
+ }
+#else
+ P_FLAG(SYNC);
+#endif
+ P_FLAG(TRUNC);
+ P_FLAG(WRONLY);
+#undef P_FLAG
+
+ if (flags)
+ printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+ return printed;
+}
+
+#define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
static struct syscall_fmt {
const char *name;
const char *alias;
+ size_t (*arg_scnprintf[6])(char *bf, size_t size, unsigned long arg, u8 arg_idx, u8 *arg_mask);
bool errmsg;
bool timeout;
+ bool hexret;
} syscall_fmts[] = {
{ .name = "access", .errmsg = true, },
{ .name = "arch_prctl", .errmsg = true, .alias = "prctl", },
+ { .name = "brk", .hexret = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* brk */ }, },
+ { .name = "mmap", .hexret = true, },
+ { .name = "connect", .errmsg = true, },
{ .name = "fstat", .errmsg = true, .alias = "newfstat", },
{ .name = "fstatat", .errmsg = true, .alias = "newfstatat", },
- { .name = "futex", .errmsg = true, },
- { .name = "open", .errmsg = true, },
+ { .name = "futex", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_FUTEX_OP, /* op */ }, },
+ { .name = "ioctl", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_HEX, /* arg */ }, },
+ { .name = "lseek", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_WHENCE, /* whence */ }, },
+ { .name = "lstat", .errmsg = true, .alias = "newlstat", },
+ { .name = "madvise", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* start */
+ [2] = SCA_MADV_BHV, /* behavior */ }, },
+ { .name = "mmap", .hexret = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */
+ [2] = SCA_MMAP_PROT, /* prot */
+ [3] = SCA_MMAP_FLAGS, /* flags */ }, },
+ { .name = "mprotect", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* start */
+ [2] = SCA_MMAP_PROT, /* prot */ }, },
+ { .name = "mremap", .hexret = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */
+ [4] = SCA_HEX, /* new_addr */ }, },
+ { .name = "munmap", .errmsg = true,
+ .arg_scnprintf = { [0] = SCA_HEX, /* addr */ }, },
+ { .name = "open", .errmsg = true,
+ .arg_scnprintf = { [1] = SCA_OPEN_FLAGS, /* flags */ }, },
+ { .name = "open_by_handle_at", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
+ { .name = "openat", .errmsg = true,
+ .arg_scnprintf = { [2] = SCA_OPEN_FLAGS, /* flags */ }, },
{ .name = "poll", .errmsg = true, .timeout = true, },
{ .name = "ppoll", .errmsg = true, .timeout = true, },
+ { .name = "pread", .errmsg = true, .alias = "pread64", },
+ { .name = "pwrite", .errmsg = true, .alias = "pwrite64", },
{ .name = "read", .errmsg = true, },
{ .name = "recvfrom", .errmsg = true, },
{ .name = "select", .errmsg = true, .timeout = true, },
{ .name = "socket", .errmsg = true, },
{ .name = "stat", .errmsg = true, .alias = "newstat", },
+ { .name = "uname", .errmsg = true, .alias = "newuname", },
};
static int syscall_fmt__cmp(const void *name, const void *fmtp)
@@ -46,7 +343,10 @@ static struct syscall_fmt *syscall_fmt__find(const char *name)
struct syscall {
struct event_format *tp_format;
const char *name;
+ bool filtered;
struct syscall_fmt *fmt;
+ size_t (**arg_scnprintf)(char *bf, size_t size,
+ unsigned long arg, u8 arg_idx, u8 *args_mask);
};
static size_t fprintf_duration(unsigned long t, FILE *fp)
@@ -60,7 +360,7 @@ static size_t fprintf_duration(unsigned long t, FILE *fp)
printed += color_fprintf(fp, PERF_COLOR_YELLOW, "%6.3f ms", duration);
else
printed += color_fprintf(fp, PERF_COLOR_NORMAL, "%6.3f ms", duration);
- return printed + fprintf(stdout, "): ");
+ return printed + fprintf(fp, "): ");
}
struct thread_trace {
@@ -77,7 +377,7 @@ static struct thread_trace *thread_trace__new(void)
return zalloc(sizeof(struct thread_trace));
}
-static struct thread_trace *thread__trace(struct thread *thread)
+static struct thread_trace *thread__trace(struct thread *thread, FILE *fp)
{
struct thread_trace *ttrace;
@@ -95,12 +395,13 @@ static struct thread_trace *thread__trace(struct thread *thread)
return ttrace;
fail:
- color_fprintf(stdout, PERF_COLOR_RED,
+ color_fprintf(fp, PERF_COLOR_RED,
"WARNING: not enough memory, dropping samples!\n");
return NULL;
}
struct trace {
+ struct perf_tool tool;
int audit_machine;
struct {
int max;
@@ -109,7 +410,12 @@ struct trace {
struct perf_record_opts opts;
struct machine host;
u64 base_time;
+ FILE *output;
unsigned long nr_events;
+ struct strlist *ev_qualifier;
+ bool not_ev_qualifier;
+ struct intlist *tid_list;
+ struct intlist *pid_list;
bool sched;
bool multiple_threads;
double duration_filter;
@@ -142,18 +448,19 @@ static size_t trace__fprintf_entry_head(struct trace *trace, struct thread *thre
printed += fprintf_duration(duration, fp);
if (trace->multiple_threads)
- printed += fprintf(fp, "%d ", thread->pid);
+ printed += fprintf(fp, "%d ", thread->tid);
return printed;
}
-static int trace__process_event(struct machine *machine, union perf_event *event)
+static int trace__process_event(struct trace *trace, struct machine *machine,
+ union perf_event *event)
{
int ret = 0;
switch (event->header.type) {
case PERF_RECORD_LOST:
- color_fprintf(stdout, PERF_COLOR_RED,
+ color_fprintf(trace->output, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event);
default:
@@ -164,12 +471,13 @@ static int trace__process_event(struct machine *machine, union perf_event *event
return ret;
}
-static int trace__tool_process(struct perf_tool *tool __maybe_unused,
+static int trace__tool_process(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
struct machine *machine)
{
- return trace__process_event(machine, event);
+ struct trace *trace = container_of(tool, struct trace, tool);
+ return trace__process_event(trace, machine, event);
}
static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
@@ -183,11 +491,11 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
machine__create_kernel_maps(&trace->host);
if (perf_target__has_task(&trace->opts.target)) {
- err = perf_event__synthesize_thread_map(NULL, evlist->threads,
+ err = perf_event__synthesize_thread_map(&trace->tool, evlist->threads,
trace__tool_process,
&trace->host);
} else {
- err = perf_event__synthesize_threads(NULL, trace__tool_process,
+ err = perf_event__synthesize_threads(&trace->tool, trace__tool_process,
&trace->host);
}
@@ -197,6 +505,26 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
return err;
}
+static int syscall__set_arg_fmts(struct syscall *sc)
+{
+ struct format_field *field;
+ int idx = 0;
+
+ sc->arg_scnprintf = calloc(sc->tp_format->format.nr_fields - 1, sizeof(void *));
+ if (sc->arg_scnprintf == NULL)
+ return -1;
+
+ for (field = sc->tp_format->format.fields->next; field; field = field->next) {
+ if (sc->fmt && sc->fmt->arg_scnprintf[idx])
+ sc->arg_scnprintf[idx] = sc->fmt->arg_scnprintf[idx];
+ else if (field->flags & FIELD_IS_POINTER)
+ sc->arg_scnprintf[idx] = syscall_arg__scnprintf_hex;
+ ++idx;
+ }
+
+ return 0;
+}
+
static int trace__read_syscall_info(struct trace *trace, int id)
{
char tp_name[128];
@@ -225,6 +553,20 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc = trace->syscalls.table + id;
sc->name = name;
+
+ if (trace->ev_qualifier) {
+ bool in = strlist__find(trace->ev_qualifier, name) != NULL;
+
+ if (!(in ^ trace->not_ev_qualifier)) {
+ sc->filtered = true;
+ /*
+ * No need to do read tracepoint information since this will be
+ * filtered out.
+ */
+ return 0;
+ }
+ }
+
sc->fmt = syscall_fmt__find(sc->name);
snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
@@ -235,7 +577,10 @@ static int trace__read_syscall_info(struct trace *trace, int id)
sc->tp_format = event_format__new("syscalls", tp_name);
}
- return sc->tp_format != NULL ? 0 : -1;
+ if (sc->tp_format == NULL)
+ return -1;
+
+ return syscall__set_arg_fmts(sc);
}
static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
@@ -246,11 +591,23 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
if (sc->tp_format != NULL) {
struct format_field *field;
+ u8 mask = 0, bit = 1;
+
+ for (field = sc->tp_format->format.fields->next; field;
+ field = field->next, ++i, bit <<= 1) {
+ if (mask & bit)
+ continue;
- for (field = sc->tp_format->format.fields->next; field; field = field->next) {
printed += scnprintf(bf + printed, size - printed,
- "%s%s: %ld", printed ? ", " : "",
- field->name, args[i++]);
+ "%s%s: ", printed ? ", " : "", field->name);
+
+ if (sc->arg_scnprintf && sc->arg_scnprintf[i]) {
+ printed += sc->arg_scnprintf[i](bf + printed, size - printed,
+ args[i], i, &mask);
+ } else {
+ printed += scnprintf(bf + printed, size - printed,
+ "%ld", args[i]);
+ }
}
} else {
while (i < 6) {
@@ -274,7 +631,22 @@ static struct syscall *trace__syscall_info(struct trace *trace,
int id = perf_evsel__intval(evsel, sample, "id");
if (id < 0) {
- printf("Invalid syscall %d id, skipping...\n", id);
+
+ /*
+ * XXX: Noticed on x86_64, reproduced as far back as 3.0.36, haven't tried
+ * before that, leaving at a higher verbosity level till that is
+ * explained. Reproduced with plain ftrace with:
+ *
+ * echo 1 > /t/events/raw_syscalls/sys_exit/enable
+ * grep "NR -1 " /t/trace_pipe
+ *
+ * After generating some load on the machine.
+ */
+ if (verbose > 1) {
+ static u64 n;
+ fprintf(trace->output, "Invalid syscall %d id, skipping (%s, %" PRIu64 ") ...\n",
+ id, perf_evsel__name(evsel), ++n);
+ }
return NULL;
}
@@ -288,10 +660,12 @@ static struct syscall *trace__syscall_info(struct trace *trace,
return &trace->syscalls.table[id];
out_cant_read:
- printf("Problems reading syscall %d", id);
- if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
- printf("(%s)", trace->syscalls.table[id].name);
- puts(" information");
+ if (verbose) {
+ fprintf(trace->output, "Problems reading syscall %d", id);
+ if (id <= trace->syscalls.max && trace->syscalls.table[id].name != NULL)
+ fprintf(trace->output, "(%s)", trace->syscalls.table[id].name);
+ fputs(" information\n", trace->output);
+ }
return NULL;
}
@@ -301,16 +675,25 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
char *msg;
void *args;
size_t printed = 0;
- struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
+ struct thread *thread;
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
- struct thread_trace *ttrace = thread__trace(thread);
+ struct thread_trace *ttrace;
+
+ if (sc == NULL)
+ return -1;
- if (ttrace == NULL || sc == NULL)
+ if (sc->filtered)
+ return 0;
+
+ thread = machine__findnew_thread(&trace->host, sample->pid,
+ sample->tid);
+ ttrace = thread__trace(thread, trace->output);
+ if (ttrace == NULL)
return -1;
args = perf_evsel__rawptr(evsel, sample, "args");
if (args == NULL) {
- printf("Problems reading syscall arguments\n");
+ fprintf(trace->output, "Problems reading syscall arguments\n");
return -1;
}
@@ -330,8 +713,8 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
if (!trace->duration_filter) {
- trace__fprintf_entry_head(trace, thread, 1, sample->time, stdout);
- printf("%-70s\n", ttrace->entry_str);
+ trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
+ fprintf(trace->output, "%-70s\n", ttrace->entry_str);
}
} else
ttrace->entry_pending = true;
@@ -344,11 +727,20 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
{
int ret;
u64 duration = 0;
- struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
- struct thread_trace *ttrace = thread__trace(thread);
+ struct thread *thread;
struct syscall *sc = trace__syscall_info(trace, evsel, sample);
+ struct thread_trace *ttrace;
+
+ if (sc == NULL)
+ return -1;
- if (ttrace == NULL || sc == NULL)
+ if (sc->filtered)
+ return 0;
+
+ thread = machine__findnew_thread(&trace->host, sample->pid,
+ sample->tid);
+ ttrace = thread__trace(thread, trace->output);
+ if (ttrace == NULL)
return -1;
ret = perf_evsel__intval(evsel, sample, "ret");
@@ -364,28 +756,33 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
} else if (trace->duration_filter)
goto out;
- trace__fprintf_entry_head(trace, thread, duration, sample->time, stdout);
+ trace__fprintf_entry_head(trace, thread, duration, sample->time, trace->output);
if (ttrace->entry_pending) {
- printf("%-70s", ttrace->entry_str);
+ fprintf(trace->output, "%-70s", ttrace->entry_str);
} else {
- printf(" ... [");
- color_fprintf(stdout, PERF_COLOR_YELLOW, "continued");
- printf("]: %s()", sc->name);
+ fprintf(trace->output, " ... [");
+ color_fprintf(trace->output, PERF_COLOR_YELLOW, "continued");
+ fprintf(trace->output, "]: %s()", sc->name);
}
- if (ret < 0 && sc->fmt && sc->fmt->errmsg) {
+ if (sc->fmt == NULL) {
+signed_print:
+ fprintf(trace->output, ") = %d", ret);
+ } else if (ret < 0 && sc->fmt->errmsg) {
char bf[256];
const char *emsg = strerror_r(-ret, bf, sizeof(bf)),
*e = audit_errno_to_name(-ret);
- printf(") = -1 %s %s", e, emsg);
- } else if (ret == 0 && sc->fmt && sc->fmt->timeout)
- printf(") = 0 Timeout");
+ fprintf(trace->output, ") = -1 %s %s", e, emsg);
+ } else if (ret == 0 && sc->fmt->timeout)
+ fprintf(trace->output, ") = 0 Timeout");
+ else if (sc->fmt->hexret)
+ fprintf(trace->output, ") = %#x", ret);
else
- printf(") = %d", ret);
+ goto signed_print;
- putchar('\n');
+ fputc('\n', trace->output);
out:
ttrace->entry_pending = false;
@@ -397,8 +794,10 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
{
u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
double runtime_ms = (double)runtime / NSEC_PER_MSEC;
- struct thread *thread = machine__findnew_thread(&trace->host, sample->tid);
- struct thread_trace *ttrace = thread__trace(thread);
+ struct thread *thread = machine__findnew_thread(&trace->host,
+ sample->pid,
+ sample->tid);
+ struct thread_trace *ttrace = thread__trace(thread, trace->output);
if (ttrace == NULL)
goto out_dump;
@@ -408,7 +807,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
return 0;
out_dump:
- printf("%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
+ fprintf(trace->output, "%s: comm=%s,pid=%u,runtime=%" PRIu64 ",vruntime=%" PRIu64 ")\n",
evsel->name,
perf_evsel__strval(evsel, sample, "comm"),
(pid_t)perf_evsel__intval(evsel, sample, "pid"),
@@ -417,6 +816,72 @@ out_dump:
return 0;
}
+static bool skip_sample(struct trace *trace, struct perf_sample *sample)
+{
+ if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
+ (trace->tid_list && intlist__find(trace->tid_list, sample->tid)))
+ return false;
+
+ if (trace->pid_list || trace->tid_list)
+ return true;
+
+ return false;
+}
+
+static int trace__process_sample(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine __maybe_unused)
+{
+ struct trace *trace = container_of(tool, struct trace, tool);
+ int err = 0;
+
+ tracepoint_handler handler = evsel->handler.func;
+
+ if (skip_sample(trace, sample))
+ return 0;
+
+ if (trace->base_time == 0)
+ trace->base_time = sample->time;
+
+ if (handler)
+ handler(trace, evsel, sample);
+
+ return err;
+}
+
+static bool
+perf_session__has_tp(struct perf_session *session, const char *name)
+{
+ struct perf_evsel *evsel;
+
+ evsel = perf_evlist__find_tracepoint_by_name(session->evlist, name);
+
+ return evsel != NULL;
+}
+
+static int parse_target_str(struct trace *trace)
+{
+ if (trace->opts.target.pid) {
+ trace->pid_list = intlist__new(trace->opts.target.pid);
+ if (trace->pid_list == NULL) {
+ pr_err("Error parsing process id string\n");
+ return -EINVAL;
+ }
+ }
+
+ if (trace->opts.target.tid) {
+ trace->tid_list = intlist__new(trace->opts.target.tid);
+ if (trace->tid_list == NULL) {
+ pr_err("Error parsing thread id string\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static int trace__run(struct trace *trace, int argc, const char **argv)
{
struct perf_evlist *evlist = perf_evlist__new();
@@ -426,32 +891,32 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
const bool forks = argc > 0;
if (evlist == NULL) {
- printf("Not enough memory to run!\n");
+ fprintf(trace->output, "Not enough memory to run!\n");
goto out;
}
if (perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_enter", trace__sys_enter) ||
perf_evlist__add_newtp(evlist, "raw_syscalls", "sys_exit", trace__sys_exit)) {
- printf("Couldn't read the raw_syscalls tracepoints information!\n");
+ fprintf(trace->output, "Couldn't read the raw_syscalls tracepoints information!\n");
goto out_delete_evlist;
}
if (trace->sched &&
perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
trace__sched_stat_runtime)) {
- printf("Couldn't read the sched_stat_runtime tracepoint information!\n");
+ fprintf(trace->output, "Couldn't read the sched_stat_runtime tracepoint information!\n");
goto out_delete_evlist;
}
err = perf_evlist__create_maps(evlist, &trace->opts.target);
if (err < 0) {
- printf("Problems parsing the target to trace, check your options!\n");
+ fprintf(trace->output, "Problems parsing the target to trace, check your options!\n");
goto out_delete_evlist;
}
err = trace__symbols_init(trace, evlist);
if (err < 0) {
- printf("Problems initializing symbol libraries!\n");
+ fprintf(trace->output, "Problems initializing symbol libraries!\n");
goto out_delete_maps;
}
@@ -464,20 +929,20 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
err = perf_evlist__prepare_workload(evlist, &trace->opts.target,
argv, false, false);
if (err < 0) {
- printf("Couldn't run the workload!\n");
+ fprintf(trace->output, "Couldn't run the workload!\n");
goto out_delete_maps;
}
}
err = perf_evlist__open(evlist);
if (err < 0) {
- printf("Couldn't create the events: %s\n", strerror(errno));
+ fprintf(trace->output, "Couldn't create the events: %s\n", strerror(errno));
goto out_delete_maps;
}
err = perf_evlist__mmap(evlist, UINT_MAX, false);
if (err < 0) {
- printf("Couldn't mmap the events: %s\n", strerror(errno));
+ fprintf(trace->output, "Couldn't mmap the events: %s\n", strerror(errno));
goto out_close_evlist;
}
@@ -502,7 +967,7 @@ again:
err = perf_evlist__parse_sample(evlist, event, &sample);
if (err) {
- printf("Can't parse sample, err = %d, skipping...\n", err);
+ fprintf(trace->output, "Can't parse sample, err = %d, skipping...\n", err);
continue;
}
@@ -510,18 +975,18 @@ again:
trace->base_time = sample.time;
if (type != PERF_RECORD_SAMPLE) {
- trace__process_event(&trace->host, event);
+ trace__process_event(trace, &trace->host, event);
continue;
}
evsel = perf_evlist__id2evsel(evlist, sample.id);
if (evsel == NULL) {
- printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
+ fprintf(trace->output, "Unknown tp ID %" PRIu64 ", skipping...\n", sample.id);
continue;
}
if (sample.raw_data == NULL) {
- printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
+ fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
perf_evsel__name(evsel), sample.tid,
sample.cpu, sample.raw_size);
continue;
@@ -556,6 +1021,69 @@ out:
return err;
}
+static int trace__replay(struct trace *trace)
+{
+ const struct perf_evsel_str_handler handlers[] = {
+ { "raw_syscalls:sys_enter", trace__sys_enter, },
+ { "raw_syscalls:sys_exit", trace__sys_exit, },
+ };
+
+ struct perf_session *session;
+ int err = -1;
+
+ trace->tool.sample = trace__process_sample;
+ trace->tool.mmap = perf_event__process_mmap;
+ trace->tool.comm = perf_event__process_comm;
+ trace->tool.exit = perf_event__process_exit;
+ trace->tool.fork = perf_event__process_fork;
+ trace->tool.attr = perf_event__process_attr;
+ trace->tool.tracing_data = perf_event__process_tracing_data;
+ trace->tool.build_id = perf_event__process_build_id;
+
+ trace->tool.ordered_samples = true;
+ trace->tool.ordering_requires_timestamps = true;
+
+ /* add tid to output */
+ trace->multiple_threads = true;
+
+ if (symbol__init() < 0)
+ return -1;
+
+ session = perf_session__new(input_name, O_RDONLY, 0, false,
+ &trace->tool);
+ if (session == NULL)
+ return -ENOMEM;
+
+ err = perf_session__set_tracepoints_handlers(session, handlers);
+ if (err)
+ goto out;
+
+ if (!perf_session__has_tp(session, "raw_syscalls:sys_enter")) {
+ pr_err("Data file does not have raw_syscalls:sys_enter events\n");
+ goto out;
+ }
+
+ if (!perf_session__has_tp(session, "raw_syscalls:sys_exit")) {
+ pr_err("Data file does not have raw_syscalls:sys_exit events\n");
+ goto out;
+ }
+
+ err = parse_target_str(trace);
+ if (err != 0)
+ goto out;
+
+ setup_pager();
+
+ err = perf_session__process_events(session, &trace->tool);
+ if (err)
+ pr_err("Failed to process events, error %d", err);
+
+out:
+ perf_session__delete(session);
+
+ return err;
+}
+
static size_t trace__fprintf_threads_header(FILE *fp)
{
size_t printed;
@@ -593,7 +1121,7 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp)
color = PERF_COLOR_YELLOW;
printed += color_fprintf(fp, color, "%20s", thread->comm);
- printed += fprintf(fp, " - %-5d :%11lu [", thread->pid, ttrace->nr_events);
+ printed += fprintf(fp, " - %-5d :%11lu [", thread->tid, ttrace->nr_events);
printed += color_fprintf(fp, color, "%5.1f%%", ratio);
printed += fprintf(fp, " ] %10.3f ms\n", ttrace->runtime_ms);
}
@@ -610,6 +1138,23 @@ static int trace__set_duration(const struct option *opt, const char *str,
return 0;
}
+static int trace__open_output(struct trace *trace, const char *filename)
+{
+ struct stat st;
+
+ if (!stat(filename, &st) && st.st_size) {
+ char oldname[PATH_MAX];
+
+ scnprintf(oldname, sizeof(oldname), "%s.old", filename);
+ unlink(oldname);
+ rename(filename, oldname);
+ }
+
+ trace->output = fopen(filename, "w");
+
+ return trace->output == NULL ? -errno : 0;
+}
+
int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
{
const char * const trace_usage[] = {
@@ -632,26 +1177,34 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
.no_delay = true,
.mmap_pages = 1024,
},
+ .output = stdout,
};
+ const char *output_name = NULL;
+ const char *ev_qualifier_str = NULL;
const struct option trace_options[] = {
+ OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
+ "list of events to trace"),
+ OPT_STRING('o', "output", &output_name, "file", "output file name"),
+ OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
"trace events on existing process id"),
- OPT_STRING(0, "tid", &trace.opts.target.tid, "tid",
+ OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
"trace events on existing thread id"),
- OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
"system-wide collection from all CPUs"),
- OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu",
+ OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
"list of cpus to monitor"),
OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit,
"child tasks do not inherit counters"),
- OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages,
+ OPT_UINTEGER('m', "mmap-pages", &trace.opts.mmap_pages,
"number of mmap data pages"),
- OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user",
+ OPT_STRING('u', "uid", &trace.opts.target.uid_str, "user",
"user to profile"),
OPT_CALLBACK(0, "duration", &trace, "float",
"show only events with duration > N.M ms",
trace__set_duration),
OPT_BOOLEAN(0, "sched", &trace.sched, "show blocking scheduler events"),
+ OPT_INCR('v', "verbose", &verbose, "be more verbose"),
OPT_END()
};
int err;
@@ -659,27 +1212,57 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+ if (output_name != NULL) {
+ err = trace__open_output(&trace, output_name);
+ if (err < 0) {
+ perror("failed to create output file");
+ goto out;
+ }
+ }
+
+ if (ev_qualifier_str != NULL) {
+ const char *s = ev_qualifier_str;
+
+ trace.not_ev_qualifier = *s == '!';
+ if (trace.not_ev_qualifier)
+ ++s;
+ trace.ev_qualifier = strlist__new(true, s);
+ if (trace.ev_qualifier == NULL) {
+ fputs("Not enough memory to parse event qualifier",
+ trace.output);
+ err = -ENOMEM;
+ goto out_close;
+ }
+ }
+
err = perf_target__validate(&trace.opts.target);
if (err) {
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
- printf("%s", bf);
- return err;
+ fprintf(trace.output, "%s", bf);
+ goto out_close;
}
err = perf_target__parse_uid(&trace.opts.target);
if (err) {
perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
- printf("%s", bf);
- return err;
+ fprintf(trace.output, "%s", bf);
+ goto out_close;
}
if (!argc && perf_target__none(&trace.opts.target))
trace.opts.target.system_wide = true;
- err = trace__run(&trace, argc, argv);
+ if (input_name)
+ err = trace__replay(&trace);
+ else
+ err = trace__run(&trace, argc, argv);
if (trace.sched && !err)
- trace__fprintf_thread_summary(&trace, stdout);
+ trace__fprintf_thread_summary(&trace, trace.output);
+out_close:
+ if (output_name != NULL)
+ fclose(trace.output);
+out:
return err;
}
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index b5d9238cb18..214e17e97e5 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -46,6 +46,8 @@ ifneq ($(obj-perf),)
obj-perf := $(abspath $(obj-perf))/
endif
+LIB_INCLUDE := $(srctree)/tools/lib/
+
# include ARCH specific config
-include $(src-perf)/arch/$(ARCH)/Makefile
@@ -121,8 +123,7 @@ endif
CFLAGS += -I$(src-perf)/util
CFLAGS += -I$(src-perf)
-CFLAGS += -I$(TRACE_EVENT_DIR)
-CFLAGS += -I$(srctree)/tools/lib/
+CFLAGS += -I$(LIB_INCLUDE)
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 32bd102c32b..cf20187eee0 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -125,6 +125,9 @@
#ifndef NSEC_PER_SEC
# define NSEC_PER_SEC 1000000000ULL
#endif
+#ifndef NSEC_PER_USEC
+# define NSEC_PER_USEC 1000ULL
+#endif
static inline unsigned long long rdclock(void)
{
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
index b11cca58423..2225162ee1f 100755
--- a/tools/perf/python/twatch.py
+++ b/tools/perf/python/twatch.py
@@ -21,7 +21,7 @@ def main():
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
- sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
+ sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
diff --git a/tools/perf/tests/attr/test-record-group-sampling b/tools/perf/tests/attr/test-record-group-sampling
new file mode 100644
index 00000000000..658f5d60c87
--- /dev/null
+++ b/tools/perf/tests/attr/test-record-group-sampling
@@ -0,0 +1,36 @@
+[config]
+command = record
+args = -e '{cycles,cache-misses}:S' kill >/dev/null 2>&1
+
+[event-1:base-record]
+fd=1
+group_fd=-1
+sample_type=343
+read_format=12
+inherit=0
+
+[event-2:base-record]
+fd=2
+group_fd=1
+
+# cache-misses
+type=0
+config=3
+
+# default | PERF_SAMPLE_READ
+sample_type=343
+
+# PERF_FORMAT_ID | PERF_FORMAT_GROUP
+read_format=12
+
+mmap=0
+comm=0
+enable_on_exec=0
+disabled=0
+
+# inherit is disabled for group sampling
+inherit=0
+
+# sampling disabled
+sample_freq=0
+sample_period=0
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 35b45f1466b..8bbeba322df 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -93,6 +93,24 @@ static struct test {
.desc = "Test software clock events have valid period values",
.func = test__sw_clock_freq,
},
+#if defined(__x86_64__) || defined(__i386__)
+ {
+ .desc = "Test converting perf time to TSC",
+ .func = test__perf_time_to_tsc,
+ },
+#endif
+ {
+ .desc = "Test object code reading",
+ .func = test__code_reading,
+ },
+ {
+ .desc = "Test sample parsing",
+ .func = test__sample_parsing,
+ },
+ {
+ .desc = "Test using a dummy software event to keep tracking",
+ .func = test__keep_tracking,
+ },
{
.func = NULL,
},
diff --git a/tools/perf/tests/code-reading.c b/tools/perf/tests/code-reading.c
new file mode 100644
index 00000000000..6fb781d5586
--- /dev/null
+++ b/tools/perf/tests/code-reading.c
@@ -0,0 +1,572 @@
+#include <sys/types.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <inttypes.h>
+#include <ctype.h>
+#include <string.h>
+
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "machine.h"
+#include "event.h"
+#include "thread.h"
+
+#include "tests.h"
+
+#define BUFSZ 1024
+#define READLEN 128
+
+struct state {
+ u64 done[1024];
+ size_t done_cnt;
+};
+
+static unsigned int hex(char c)
+{
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ return c - 'A' + 10;
+}
+
+static void read_objdump_line(const char *line, size_t line_len, void **buf,
+ size_t *len)
+{
+ const char *p;
+ size_t i;
+
+ /* Skip to a colon */
+ p = strchr(line, ':');
+ if (!p)
+ return;
+ i = p + 1 - line;
+
+ /* Read bytes */
+ while (*len) {
+ char c1, c2;
+
+ /* Skip spaces */
+ for (; i < line_len; i++) {
+ if (!isspace(line[i]))
+ break;
+ }
+ /* Get 2 hex digits */
+ if (i >= line_len || !isxdigit(line[i]))
+ break;
+ c1 = line[i++];
+ if (i >= line_len || !isxdigit(line[i]))
+ break;
+ c2 = line[i++];
+ /* Followed by a space */
+ if (i < line_len && line[i] && !isspace(line[i]))
+ break;
+ /* Store byte */
+ *(unsigned char *)*buf = (hex(c1) << 4) | hex(c2);
+ *buf += 1;
+ *len -= 1;
+ }
+}
+
+static int read_objdump_output(FILE *f, void **buf, size_t *len)
+{
+ char *line = NULL;
+ size_t line_len;
+ ssize_t ret;
+ int err = 0;
+
+ while (1) {
+ ret = getline(&line, &line_len, f);
+ if (feof(f))
+ break;
+ if (ret < 0) {
+ pr_debug("getline failed\n");
+ err = -1;
+ break;
+ }
+ read_objdump_line(line, ret, buf, len);
+ }
+
+ free(line);
+
+ return err;
+}
+
+static int read_via_objdump(const char *filename, u64 addr, void *buf,
+ size_t len)
+{
+ char cmd[PATH_MAX * 2];
+ const char *fmt;
+ FILE *f;
+ int ret;
+
+ fmt = "%s -d --start-address=0x%"PRIx64" --stop-address=0x%"PRIx64" %s";
+ ret = snprintf(cmd, sizeof(cmd), fmt, "objdump", addr, addr + len,
+ filename);
+ if (ret <= 0 || (size_t)ret >= sizeof(cmd))
+ return -1;
+
+ pr_debug("Objdump command is: %s\n", cmd);
+
+ /* Ignore objdump errors */
+ strcat(cmd, " 2>/dev/null");
+
+ f = popen(cmd, "r");
+ if (!f) {
+ pr_debug("popen failed\n");
+ return -1;
+ }
+
+ ret = read_objdump_output(f, &buf, &len);
+ if (len) {
+ pr_debug("objdump read too few bytes\n");
+ if (!ret)
+ ret = len;
+ }
+
+ pclose(f);
+
+ return ret;
+}
+
+static int read_object_code(u64 addr, size_t len, u8 cpumode,
+ struct thread *thread, struct machine *machine,
+ struct state *state)
+{
+ struct addr_location al;
+ unsigned char buf1[BUFSZ];
+ unsigned char buf2[BUFSZ];
+ size_t ret_len;
+ u64 objdump_addr;
+ int ret;
+
+ pr_debug("Reading object code for memory address: %#"PRIx64"\n", addr);
+
+ thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION, addr,
+ &al);
+ if (!al.map || !al.map->dso) {
+ pr_debug("thread__find_addr_map failed\n");
+ return -1;
+ }
+
+ pr_debug("File is: %s\n", al.map->dso->long_name);
+
+ if (al.map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(al.map->dso)) {
+ pr_debug("Unexpected kernel address - skipping\n");
+ return 0;
+ }
+
+ pr_debug("On file address is: %#"PRIx64"\n", al.addr);
+
+ if (len > BUFSZ)
+ len = BUFSZ;
+
+ /* Do not go off the map */
+ if (addr + len > al.map->end)
+ len = al.map->end - addr;
+
+ /* Read the object code using perf */
+ ret_len = dso__data_read_offset(al.map->dso, machine, al.addr, buf1,
+ len);
+ if (ret_len != len) {
+ pr_debug("dso__data_read_offset failed\n");
+ return -1;
+ }
+
+ /*
+ * Converting addresses for use by objdump requires more information.
+ * map__load() does that. See map__rip_2objdump() for details.
+ */
+ if (map__load(al.map, NULL))
+ return -1;
+
+ /* objdump struggles with kcore - try each map only once */
+ if (dso__is_kcore(al.map->dso)) {
+ size_t d;
+
+ for (d = 0; d < state->done_cnt; d++) {
+ if (state->done[d] == al.map->start) {
+ pr_debug("kcore map tested already");
+ pr_debug(" - skipping\n");
+ return 0;
+ }
+ }
+ if (state->done_cnt >= ARRAY_SIZE(state->done)) {
+ pr_debug("Too many kcore maps - skipping\n");
+ return 0;
+ }
+ state->done[state->done_cnt++] = al.map->start;
+ }
+
+ /* Read the object code using objdump */
+ objdump_addr = map__rip_2objdump(al.map, al.addr);
+ ret = read_via_objdump(al.map->dso->long_name, objdump_addr, buf2, len);
+ if (ret > 0) {
+ /*
+ * The kernel maps are inaccurate - assume objdump is right in
+ * that case.
+ */
+ if (cpumode == PERF_RECORD_MISC_KERNEL ||
+ cpumode == PERF_RECORD_MISC_GUEST_KERNEL) {
+ len -= ret;
+ if (len) {
+ pr_debug("Reducing len to %zu\n", len);
+ } else if (dso__is_kcore(al.map->dso)) {
+ /*
+ * objdump cannot handle very large segments
+ * that may be found in kcore.
+ */
+ pr_debug("objdump failed for kcore");
+ pr_debug(" - skipping\n");
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ }
+ if (ret < 0) {
+ pr_debug("read_via_objdump failed\n");
+ return -1;
+ }
+
+ /* The results should be identical */
+ if (memcmp(buf1, buf2, len)) {
+ pr_debug("Bytes read differ from those read by objdump\n");
+ return -1;
+ }
+ pr_debug("Bytes read match those read by objdump\n");
+
+ return 0;
+}
+
+static int process_sample_event(struct machine *machine,
+ struct perf_evlist *evlist,
+ union perf_event *event, struct state *state)
+{
+ struct perf_sample sample;
+ struct thread *thread;
+ u8 cpumode;
+
+ if (perf_evlist__parse_sample(evlist, event, &sample)) {
+ pr_debug("perf_evlist__parse_sample failed\n");
+ return -1;
+ }
+
+ thread = machine__findnew_thread(machine, sample.pid, sample.pid);
+ if (!thread) {
+ pr_debug("machine__findnew_thread failed\n");
+ return -1;
+ }
+
+ cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+ return read_object_code(sample.ip, READLEN, cpumode, thread, machine,
+ state);
+}
+
+static int process_event(struct machine *machine, struct perf_evlist *evlist,
+ union perf_event *event, struct state *state)
+{
+ if (event->header.type == PERF_RECORD_SAMPLE)
+ return process_sample_event(machine, evlist, event, state);
+
+ if (event->header.type < PERF_RECORD_MAX)
+ return machine__process_event(machine, event);
+
+ return 0;
+}
+
+static int process_events(struct machine *machine, struct perf_evlist *evlist,
+ struct state *state)
+{
+ union perf_event *event;
+ int i, ret;
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ ret = process_event(machine, evlist, event, state);
+ if (ret < 0)
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int comp(const void *a, const void *b)
+{
+ return *(int *)a - *(int *)b;
+}
+
+static void do_sort_something(void)
+{
+ int buf[40960], i;
+
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++)
+ buf[i] = ARRAY_SIZE(buf) - i - 1;
+
+ qsort(buf, ARRAY_SIZE(buf), sizeof(int), comp);
+
+ for (i = 0; i < (int)ARRAY_SIZE(buf); i++) {
+ if (buf[i] != i) {
+ pr_debug("qsort failed\n");
+ break;
+ }
+ }
+}
+
+static void sort_something(void)
+{
+ int i;
+
+ for (i = 0; i < 10; i++)
+ do_sort_something();
+}
+
+static void syscall_something(void)
+{
+ int pipefd[2];
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (pipe(pipefd) < 0) {
+ pr_debug("pipe failed\n");
+ break;
+ }
+ close(pipefd[1]);
+ close(pipefd[0]);
+ }
+}
+
+static void fs_something(void)
+{
+ const char *test_file_name = "temp-perf-code-reading-test-file--";
+ FILE *f;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ f = fopen(test_file_name, "w+");
+ if (f) {
+ fclose(f);
+ unlink(test_file_name);
+ }
+ }
+}
+
+static void do_something(void)
+{
+ fs_something();
+
+ sort_something();
+
+ syscall_something();
+}
+
+enum {
+ TEST_CODE_READING_OK,
+ TEST_CODE_READING_NO_VMLINUX,
+ TEST_CODE_READING_NO_KCORE,
+ TEST_CODE_READING_NO_ACCESS,
+ TEST_CODE_READING_NO_KERNEL_OBJ,
+};
+
+static int do_test_code_reading(bool try_kcore)
+{
+ struct machines machines;
+ struct machine *machine;
+ struct thread *thread;
+ struct perf_record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct state state = {
+ .done_cnt = 0,
+ };
+ struct thread_map *threads = NULL;
+ struct cpu_map *cpus = NULL;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ int err = -1, ret;
+ pid_t pid;
+ struct map *map;
+ bool have_vmlinux, have_kcore, excl_kernel = false;
+
+ pid = getpid();
+
+ machines__init(&machines);
+ machine = &machines.host;
+
+ ret = machine__create_kernel_maps(machine);
+ if (ret < 0) {
+ pr_debug("machine__create_kernel_maps failed\n");
+ goto out_err;
+ }
+
+ /* Force the use of kallsyms instead of vmlinux to try kcore */
+ if (try_kcore)
+ symbol_conf.kallsyms_name = "/proc/kallsyms";
+
+ /* Load kernel map */
+ map = machine->vmlinux_maps[MAP__FUNCTION];
+ ret = map__load(map, NULL);
+ if (ret < 0) {
+ pr_debug("map__load failed\n");
+ goto out_err;
+ }
+ have_vmlinux = dso__is_vmlinux(map->dso);
+ have_kcore = dso__is_kcore(map->dso);
+
+ /* 2nd time through we just try kcore */
+ if (try_kcore && !have_kcore)
+ return TEST_CODE_READING_NO_KCORE;
+
+ /* No point getting kernel events if there is no kernel object */
+ if (!have_vmlinux && !have_kcore)
+ excl_kernel = true;
+
+ threads = thread_map__new_by_tid(pid);
+ if (!threads) {
+ pr_debug("thread_map__new_by_tid failed\n");
+ goto out_err;
+ }
+
+ ret = perf_event__synthesize_thread_map(NULL, threads,
+ perf_event__process, machine);
+ if (ret < 0) {
+ pr_debug("perf_event__synthesize_thread_map failed\n");
+ goto out_err;
+ }
+
+ thread = machine__findnew_thread(machine, pid, pid);
+ if (!thread) {
+ pr_debug("machine__findnew_thread failed\n");
+ goto out_err;
+ }
+
+ cpus = cpu_map__new(NULL);
+ if (!cpus) {
+ pr_debug("cpu_map__new failed\n");
+ goto out_err;
+ }
+
+ while (1) {
+ const char *str;
+
+ evlist = perf_evlist__new();
+ if (!evlist) {
+ pr_debug("perf_evlist__new failed\n");
+ goto out_err;
+ }
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ if (excl_kernel)
+ str = "cycles:u";
+ else
+ str = "cycles";
+ pr_debug("Parsing event '%s'\n", str);
+ ret = parse_events(evlist, str);
+ if (ret < 0) {
+ pr_debug("parse_events failed\n");
+ goto out_err;
+ }
+
+ perf_evlist__config(evlist, &opts);
+
+ evsel = perf_evlist__first(evlist);
+
+ evsel->attr.comm = 1;
+ evsel->attr.disabled = 1;
+ evsel->attr.enable_on_exec = 0;
+
+ ret = perf_evlist__open(evlist);
+ if (ret < 0) {
+ if (!excl_kernel) {
+ excl_kernel = true;
+ perf_evlist__delete(evlist);
+ evlist = NULL;
+ continue;
+ }
+ pr_debug("perf_evlist__open failed\n");
+ goto out_err;
+ }
+ break;
+ }
+
+ ret = perf_evlist__mmap(evlist, UINT_MAX, false);
+ if (ret < 0) {
+ pr_debug("perf_evlist__mmap failed\n");
+ goto out_err;
+ }
+
+ perf_evlist__enable(evlist);
+
+ do_something();
+
+ perf_evlist__disable(evlist);
+
+ ret = process_events(machine, evlist, &state);
+ if (ret < 0)
+ goto out_err;
+
+ if (!have_vmlinux && !have_kcore && !try_kcore)
+ err = TEST_CODE_READING_NO_KERNEL_OBJ;
+ else if (!have_vmlinux && !try_kcore)
+ err = TEST_CODE_READING_NO_VMLINUX;
+ else if (excl_kernel)
+ err = TEST_CODE_READING_NO_ACCESS;
+ else
+ err = TEST_CODE_READING_OK;
+out_err:
+ if (evlist) {
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+ }
+ if (cpus)
+ cpu_map__delete(cpus);
+ if (threads)
+ thread_map__delete(threads);
+ machines__destroy_kernel_maps(&machines);
+ machine__delete_threads(machine);
+ machines__exit(&machines);
+
+ return err;
+}
+
+int test__code_reading(void)
+{
+ int ret;
+
+ ret = do_test_code_reading(false);
+ if (!ret)
+ ret = do_test_code_reading(true);
+
+ switch (ret) {
+ case TEST_CODE_READING_OK:
+ return 0;
+ case TEST_CODE_READING_NO_VMLINUX:
+ fprintf(stderr, " (no vmlinux)");
+ return 0;
+ case TEST_CODE_READING_NO_KCORE:
+ fprintf(stderr, " (no kcore)");
+ return 0;
+ case TEST_CODE_READING_NO_ACCESS:
+ fprintf(stderr, " (no access)");
+ return 0;
+ case TEST_CODE_READING_NO_KERNEL_OBJ:
+ fprintf(stderr, " (no kernel obj)");
+ return 0;
+ default:
+ return -1;
+ };
+}
diff --git a/tools/perf/tests/dso-data.c b/tools/perf/tests/dso-data.c
index 5eaffa2de9c..dffe0551aca 100644
--- a/tools/perf/tests/dso-data.c
+++ b/tools/perf/tests/dso-data.c
@@ -10,14 +10,6 @@
#include "symbol.h"
#include "tests.h"
-#define TEST_ASSERT_VAL(text, cond) \
-do { \
- if (!(cond)) { \
- pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
- return -1; \
- } \
-} while (0)
-
static char *test_file(int size)
{
static char buf_templ[] = "/tmp/test-XXXXXX";
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index a5d2fcc5ae3..9b98c155483 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -1,6 +1,6 @@
+#include <traceevent/event-parse.h>
#include "evsel.h"
#include "tests.h"
-#include "event-parse.h"
static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
int size, bool should_be_signed)
@@ -49,7 +49,7 @@ int test__perf_evsel__tp_sched_test(void)
if (perf_evsel__test_field(evsel, "prev_prio", 4, true))
ret = -1;
- if (perf_evsel__test_field(evsel, "prev_state", 8, true))
+ if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
ret = -1;
if (perf_evsel__test_field(evsel, "next_comm", 16, true))
diff --git a/tools/perf/tests/hists_link.c b/tools/perf/tests/hists_link.c
index 89085a9615e..4228ffc0d96 100644
--- a/tools/perf/tests/hists_link.c
+++ b/tools/perf/tests/hists_link.c
@@ -88,7 +88,8 @@ static struct machine *setup_fake_machine(struct machines *machines)
for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
struct thread *thread;
- thread = machine__findnew_thread(machine, fake_threads[i].pid);
+ thread = machine__findnew_thread(machine, fake_threads[i].pid,
+ fake_threads[i].pid);
if (thread == NULL)
goto out;
@@ -210,17 +211,15 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
list_for_each_entry(evsel, &evlist->entries, node) {
for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
const union perf_event event = {
- .ip = {
- .header = {
- .misc = PERF_RECORD_MISC_USER,
- },
- .pid = fake_common_samples[k].pid,
- .ip = fake_common_samples[k].ip,
+ .header = {
+ .misc = PERF_RECORD_MISC_USER,
},
};
+ sample.pid = fake_common_samples[k].pid;
+ sample.ip = fake_common_samples[k].ip;
if (perf_event__preprocess_sample(&event, machine, &al,
- &sample, 0) < 0)
+ &sample) < 0)
goto out;
he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
@@ -234,17 +233,15 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
const union perf_event event = {
- .ip = {
- .header = {
- .misc = PERF_RECORD_MISC_USER,
- },
- .pid = fake_samples[i][k].pid,
- .ip = fake_samples[i][k].ip,
+ .header = {
+ .misc = PERF_RECORD_MISC_USER,
},
};
+ sample.pid = fake_samples[i][k].pid;
+ sample.ip = fake_samples[i][k].ip;
if (perf_event__preprocess_sample(&event, machine, &al,
- &sample, 0) < 0)
+ &sample) < 0)
goto out;
he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
diff --git a/tools/perf/tests/keep-tracking.c b/tools/perf/tests/keep-tracking.c
new file mode 100644
index 00000000000..d444ea2c47d
--- /dev/null
+++ b/tools/perf/tests/keep-tracking.c
@@ -0,0 +1,154 @@
+#include <sys/types.h>
+#include <unistd.h>
+#include <sys/prctl.h>
+
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "tests.h"
+
+#define CHECK__(x) { \
+ while ((x) < 0) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+#define CHECK_NOT_NULL__(x) { \
+ while ((x) == NULL) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+static int find_comm(struct perf_evlist *evlist, const char *comm)
+{
+ union perf_event *event;
+ int i, found;
+
+ found = 0;
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ if (event->header.type == PERF_RECORD_COMM &&
+ (pid_t)event->comm.pid == getpid() &&
+ (pid_t)event->comm.tid == getpid() &&
+ strcmp(event->comm.comm, comm) == 0)
+ found += 1;
+ }
+ }
+ return found;
+}
+
+/**
+ * test__keep_tracking - test using a dummy software event to keep tracking.
+ *
+ * This function implements a test that checks that tracking events continue
+ * when an event is disabled but a dummy software event is not disabled. If the
+ * test passes %0 is returned, otherwise %-1 is returned.
+ */
+int test__keep_tracking(void)
+{
+ struct perf_record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ };
+ struct thread_map *threads = NULL;
+ struct cpu_map *cpus = NULL;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ int found, err = -1;
+ const char *comm;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ CHECK_NOT_NULL__(threads);
+
+ cpus = cpu_map__new(NULL);
+ CHECK_NOT_NULL__(cpus);
+
+ evlist = perf_evlist__new();
+ CHECK_NOT_NULL__(evlist);
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ CHECK__(parse_events(evlist, "dummy:u"));
+ CHECK__(parse_events(evlist, "cycles:u"));
+
+ perf_evlist__config(evlist, &opts);
+
+ evsel = perf_evlist__first(evlist);
+
+ evsel->attr.comm = 1;
+ evsel->attr.disabled = 1;
+ evsel->attr.enable_on_exec = 0;
+
+ if (perf_evlist__open(evlist) < 0) {
+ fprintf(stderr, " (not supported)");
+ err = 0;
+ goto out_err;
+ }
+
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+
+ /*
+ * First, test that a 'comm' event can be found when the event is
+ * enabled.
+ */
+
+ perf_evlist__enable(evlist);
+
+ comm = "Test COMM 1";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
+
+ perf_evlist__disable(evlist);
+
+ found = find_comm(evlist, comm);
+ if (found != 1) {
+ pr_debug("First time, failed to find tracking event.\n");
+ goto out_err;
+ }
+
+ /*
+ * Secondly, test that a 'comm' event can be found when the event is
+ * disabled with the dummy event still enabled.
+ */
+
+ perf_evlist__enable(evlist);
+
+ evsel = perf_evlist__last(evlist);
+
+ CHECK__(perf_evlist__disable_event(evlist, evsel));
+
+ comm = "Test COMM 2";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm, 0, 0, 0));
+
+ perf_evlist__disable(evlist);
+
+ found = find_comm(evlist, comm);
+ if (found != 1) {
+ pr_debug("Seconf time, failed to find tracking event.\n");
+ goto out_err;
+ }
+
+ err = 0;
+
+out_err:
+ if (evlist) {
+ perf_evlist__disable(evlist);
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+ }
+ if (cpus)
+ cpu_map__delete(cpus);
+ if (threads)
+ thread_map__delete(threads);
+
+ return err;
+}
diff --git a/tools/perf/tests/make b/tools/perf/tests/make
index c441a287512..2ca0abf1b2b 100644
--- a/tools/perf/tests/make
+++ b/tools/perf/tests/make
@@ -1,6 +1,8 @@
PERF := .
MK := Makefile
+has = $(shell which $1 2>/dev/null)
+
# standard single make variable specified
make_clean_all := clean all
make_python_perf_so := python/perf.so
@@ -25,6 +27,13 @@ make_help := help
make_doc := doc
make_perf_o := perf.o
make_util_map_o := util/map.o
+make_install := install
+make_install_bin := install-bin
+make_install_doc := install-doc
+make_install_man := install-man
+make_install_html := install-html
+make_install_info := install-info
+make_install_pdf := install-pdf
# all the NO_* variable combined
make_minimal := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -50,14 +59,27 @@ run += make_no_backtrace
run += make_no_libnuma
run += make_no_libaudit
run += make_no_libbionic
-run += make_tags
-run += make_cscope
run += make_help
run += make_doc
run += make_perf_o
run += make_util_map_o
+run += make_install
+run += make_install_bin
+# FIXME 'install-*' commented out till they're fixed
+# run += make_install_doc
+# run += make_install_man
+# run += make_install_html
+# run += make_install_info
+# run += make_install_pdf
run += make_minimal
+ifneq ($(call has,ctags),)
+run += make_tags
+endif
+ifneq ($(call has,cscope),)
+run += make_cscope
+endif
+
# $(run_O) contains same portion of $(run) tests with '_O' attached
# to distinguish O=... tests
run_O := $(addsuffix _O,$(run))
@@ -84,6 +106,31 @@ test_make_python_perf_so := test -f $(PERF)/python/perf.so
test_make_perf_o := test -f $(PERF)/perf.o
test_make_util_map_o := test -f $(PERF)/util/map.o
+test_make_install := test -x $$TMP_DEST/bin/perf
+test_make_install_O := $(test_make_install)
+test_make_install_bin := $(test_make_install)
+test_make_install_bin_O := $(test_make_install)
+
+# FIXME nothing gets installed
+test_make_install_man := test -f $$TMP_DEST/share/man/man1/perf.1
+test_make_install_man_O := $(test_make_install_man)
+
+# FIXME nothing gets installed
+test_make_install_doc := $(test_ok)
+test_make_install_doc_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_html := $(test_ok)
+test_make_install_html_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_info := $(test_ok)
+test_make_install_info_O := $(test_ok)
+
+# FIXME nothing gets installed
+test_make_install_pdf := $(test_ok)
+test_make_install_pdf_O := $(test_ok)
+
# Kbuild tests only
#test_make_python_perf_so_O := test -f $$TMP/tools/perf/python/perf.so
#test_make_perf_o_O := test -f $$TMP/tools/perf/perf.o
@@ -95,7 +142,7 @@ test_make_util_map_o_O := true
test_default = test -x $(PERF)/perf
test = $(if $(test_$1),$(test_$1),$(test_default))
-test_default_O = test -x $$TMP/perf
+test_default_O = test -x $$TMP_O/perf
test_O = $(if $(test_$1),$(test_$1),$(test_default_O))
all:
@@ -111,23 +158,27 @@ clean := @(cd $(PERF); make -s -f $(MK) clean >/dev/null)
$(run):
$(call clean)
- @cmd="cd $(PERF) && make -f $(MK) $($@)"; \
+ @TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && make -f $(MK) DESTDIR=$$TMP_DEST $($@)"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1; \
echo " test: $(call test,$@)"; \
$(call test,$@) && \
- rm -f $@
+ rm -f $@ \
+ rm -rf $$TMP_DEST
$(run_O):
$(call clean)
- @TMP=$$(mktemp -d); \
- cmd="cd $(PERF) && make -f $(MK) $($(patsubst %_O,%,$@)) O=$$TMP"; \
+ @TMP_O=$$(mktemp -d); \
+ TMP_DEST=$$(mktemp -d); \
+ cmd="cd $(PERF) && make -f $(MK) O=$$TMP_O DESTDIR=$$TMP_DEST $($(patsubst %_O,%,$@))"; \
echo "- $@: $$cmd" && echo $$cmd > $@ && \
( eval $$cmd ) >> $@ 2>&1 && \
echo " test: $(call test_O,$@)"; \
$(call test_O,$@) && \
rm -f $@ && \
- rm -rf $$TMP
+ rm -rf $$TMP_O \
+ rm -rf $$TMP_DEST
all: $(run) $(run_O)
@echo OK
diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c
index 5b1b5aba722..c4185b9aeb8 100644
--- a/tools/perf/tests/mmap-basic.c
+++ b/tools/perf/tests/mmap-basic.c
@@ -72,7 +72,7 @@ int test__basic_mmap(void)
}
evsels[i]->attr.wakeup_events = 1;
- perf_evsel__set_sample_id(evsels[i]);
+ perf_evsel__set_sample_id(evsels[i], false);
perf_evlist__add(evlist, evsels[i]);
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 0275bab4ea9..48114d164e9 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -7,14 +7,6 @@
#include "tests.h"
#include <linux/hw_breakpoint.h>
-#define TEST_ASSERT_VAL(text, cond) \
-do { \
- if (!(cond)) { \
- pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
- return -1; \
- } \
-} while (0)
-
#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
@@ -460,6 +452,7 @@ static int test__checkevent_pmu_events(struct perf_evlist *evlist)
evsel->attr.exclude_kernel);
TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned);
return 0;
}
@@ -528,6 +521,7 @@ static int test__group1(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* cycles:upp */
evsel = perf_evsel__next(evsel);
@@ -543,6 +537,7 @@ static int test__group1(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
return 0;
}
@@ -568,6 +563,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* cache-references + :u modifier */
evsel = perf_evsel__next(evsel);
@@ -582,6 +578,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* cycles:k */
evsel = perf_evsel__next(evsel);
@@ -595,6 +592,7 @@ static int test__group2(struct perf_evlist *evlist)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
return 0;
}
@@ -623,6 +621,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
!strcmp(leader->group_name, "group1"));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* group1 cycles:kppp */
evsel = perf_evsel__next(evsel);
@@ -639,6 +638,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* group2 cycles + G modifier */
evsel = leader = perf_evsel__next(evsel);
@@ -656,6 +656,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
!strcmp(leader->group_name, "group2"));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* group2 1:3 + G modifier */
evsel = perf_evsel__next(evsel);
@@ -669,6 +670,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions:u */
evsel = perf_evsel__next(evsel);
@@ -682,6 +684,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
return 0;
}
@@ -709,6 +712,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions:kp + p */
evsel = perf_evsel__next(evsel);
@@ -724,6 +728,7 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
return 0;
}
@@ -750,6 +755,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions + G */
evsel = perf_evsel__next(evsel);
@@ -764,6 +770,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 1);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* cycles:G */
evsel = leader = perf_evsel__next(evsel);
@@ -780,6 +787,7 @@ static int test__group5(struct perf_evlist *evlist __maybe_unused)
TEST_ASSERT_VAL("wrong leader", perf_evsel__is_group_leader(evsel));
TEST_ASSERT_VAL("wrong nr_members", evsel->nr_members == 2);
TEST_ASSERT_VAL("wrong group_idx", perf_evsel__group_idx(evsel) == 0);
+ TEST_ASSERT_VAL("wrong sample_read", !evsel->sample_read);
/* instructions:G */
evsel = perf_evsel__next(evsel);
@@ -971,6 +979,142 @@ static int test__group_gh4(struct perf_evlist *evlist)
return 0;
}
+static int test__leader_sample1(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+ /* cycles - sampling group leader */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* cache-misses - not sampling */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* branch-misses - not sampling */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ return 0;
+}
+
+static int test__leader_sample2(struct perf_evlist *evlist __maybe_unused)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries);
+
+ /* instructions - sampling group leader */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ /* branch-misses - not sampling */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
+ TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
+ TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong sample_read", evsel->sample_read);
+
+ return 0;
+}
+
+static int test__checkevent_pinned_modifier(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel = perf_evlist__first(evlist);
+
+ TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
+ TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
+ TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
+ TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip);
+ TEST_ASSERT_VAL("wrong pinned", evsel->attr.pinned);
+
+ return test__checkevent_symbolic_name(evlist);
+}
+
+static int test__pinned_group(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel, *leader;
+
+ TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries);
+
+ /* cycles - group leader */
+ evsel = leader = perf_evlist__first(evlist);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
+ TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
+ TEST_ASSERT_VAL("wrong pinned", evsel->attr.pinned);
+
+ /* cache-misses - can not be pinned, but will go on with the leader */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_CACHE_MISSES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned);
+
+ /* branch-misses - ditto */
+ evsel = perf_evsel__next(evsel);
+ TEST_ASSERT_VAL("wrong config",
+ PERF_COUNT_HW_BRANCH_MISSES == evsel->attr.config);
+ TEST_ASSERT_VAL("wrong pinned", !evsel->attr.pinned);
+
+ return 0;
+}
+
static int count_tracepoints(void)
{
char events_path[PATH_MAX];
@@ -1187,6 +1331,22 @@ static struct evlist_test test__events[] = {
.name = "{cycles:G,cache-misses:H}:uG",
.check = test__group_gh4,
},
+ [38] = {
+ .name = "{cycles,cache-misses,branch-misses}:S",
+ .check = test__leader_sample1,
+ },
+ [39] = {
+ .name = "{instructions,branch-misses}:Su",
+ .check = test__leader_sample2,
+ },
+ [40] = {
+ .name = "instructions:uDp",
+ .check = test__checkevent_pinned_modifier,
+ },
+ [41] = {
+ .name = "{cycles,cache-misses,branch-misses}:D",
+ .check = test__pinned_group,
+ },
};
static struct evlist_test test__events_pmu[] = {
@@ -1254,24 +1414,20 @@ static int test_events(struct evlist_test *events, unsigned cnt)
static int test_term(struct terms_test *t)
{
- struct list_head *terms;
+ struct list_head terms;
int ret;
- terms = malloc(sizeof(*terms));
- if (!terms)
- return -ENOMEM;
-
- INIT_LIST_HEAD(terms);
+ INIT_LIST_HEAD(&terms);
- ret = parse_events_terms(terms, t->str);
+ ret = parse_events_terms(&terms, t->str);
if (ret) {
pr_debug("failed to parse terms '%s', err %d\n",
t->str , ret);
return ret;
}
- ret = t->check(terms);
- parse_events__free_terms(terms);
+ ret = t->check(&terms);
+ parse_events__free_terms(&terms);
return ret;
}
diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c
new file mode 100644
index 00000000000..0ab61b1f408
--- /dev/null
+++ b/tools/perf/tests/perf-time-to-tsc.c
@@ -0,0 +1,177 @@
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <sys/prctl.h>
+
+#include "parse-events.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "tests.h"
+
+#include "../arch/x86/util/tsc.h"
+
+#define CHECK__(x) { \
+ while ((x) < 0) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+#define CHECK_NOT_NULL__(x) { \
+ while ((x) == NULL) { \
+ pr_debug(#x " failed!\n"); \
+ goto out_err; \
+ } \
+}
+
+static u64 rdtsc(void)
+{
+ unsigned int low, high;
+
+ asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+ return low | ((u64)high) << 32;
+}
+
+/**
+ * test__perf_time_to_tsc - test converting perf time to TSC.
+ *
+ * This function implements a test that checks that the conversion of perf time
+ * to and from TSC is consistent with the order of events. If the test passes
+ * %0 is returned, otherwise %-1 is returned. If TSC conversion is not
+ * supported then then the test passes but " (not supported)" is printed.
+ */
+int test__perf_time_to_tsc(void)
+{
+ struct perf_record_opts opts = {
+ .mmap_pages = UINT_MAX,
+ .user_freq = UINT_MAX,
+ .user_interval = ULLONG_MAX,
+ .freq = 4000,
+ .target = {
+ .uses_mmap = true,
+ },
+ .sample_time = true,
+ };
+ struct thread_map *threads = NULL;
+ struct cpu_map *cpus = NULL;
+ struct perf_evlist *evlist = NULL;
+ struct perf_evsel *evsel = NULL;
+ int err = -1, ret, i;
+ const char *comm1, *comm2;
+ struct perf_tsc_conversion tc;
+ struct perf_event_mmap_page *pc;
+ union perf_event *event;
+ u64 test_tsc, comm1_tsc, comm2_tsc;
+ u64 test_time, comm1_time = 0, comm2_time = 0;
+
+ threads = thread_map__new(-1, getpid(), UINT_MAX);
+ CHECK_NOT_NULL__(threads);
+
+ cpus = cpu_map__new(NULL);
+ CHECK_NOT_NULL__(cpus);
+
+ evlist = perf_evlist__new();
+ CHECK_NOT_NULL__(evlist);
+
+ perf_evlist__set_maps(evlist, cpus, threads);
+
+ CHECK__(parse_events(evlist, "cycles:u"));
+
+ perf_evlist__config(evlist, &opts);
+
+ evsel = perf_evlist__first(evlist);
+
+ evsel->attr.comm = 1;
+ evsel->attr.disabled = 1;
+ evsel->attr.enable_on_exec = 0;
+
+ CHECK__(perf_evlist__open(evlist));
+
+ CHECK__(perf_evlist__mmap(evlist, UINT_MAX, false));
+
+ pc = evlist->mmap[0].base;
+ ret = perf_read_tsc_conversion(pc, &tc);
+ if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ fprintf(stderr, " (not supported)");
+ return 0;
+ }
+ goto out_err;
+ }
+
+ perf_evlist__enable(evlist);
+
+ comm1 = "Test COMM 1";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm1, 0, 0, 0));
+
+ test_tsc = rdtsc();
+
+ comm2 = "Test COMM 2";
+ CHECK__(prctl(PR_SET_NAME, (unsigned long)comm2, 0, 0, 0));
+
+ perf_evlist__disable(evlist);
+
+ for (i = 0; i < evlist->nr_mmaps; i++) {
+ while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+ struct perf_sample sample;
+
+ if (event->header.type != PERF_RECORD_COMM ||
+ (pid_t)event->comm.pid != getpid() ||
+ (pid_t)event->comm.tid != getpid())
+ continue;
+
+ if (strcmp(event->comm.comm, comm1) == 0) {
+ CHECK__(perf_evsel__parse_sample(evsel, event,
+ &sample));
+ comm1_time = sample.time;
+ }
+ if (strcmp(event->comm.comm, comm2) == 0) {
+ CHECK__(perf_evsel__parse_sample(evsel, event,
+ &sample));
+ comm2_time = sample.time;
+ }
+ }
+ }
+
+ if (!comm1_time || !comm2_time)
+ goto out_err;
+
+ test_time = tsc_to_perf_time(test_tsc, &tc);
+ comm1_tsc = perf_time_to_tsc(comm1_time, &tc);
+ comm2_tsc = perf_time_to_tsc(comm2_time, &tc);
+
+ pr_debug("1st event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm1_time, comm1_tsc);
+ pr_debug("rdtsc time %"PRIu64" tsc %"PRIu64"\n",
+ test_time, test_tsc);
+ pr_debug("2nd event perf time %"PRIu64" tsc %"PRIu64"\n",
+ comm2_time, comm2_tsc);
+
+ if (test_time <= comm1_time ||
+ test_time >= comm2_time)
+ goto out_err;
+
+ if (test_tsc <= comm1_tsc ||
+ test_tsc >= comm2_tsc)
+ goto out_err;
+
+ err = 0;
+
+out_err:
+ if (evlist) {
+ perf_evlist__disable(evlist);
+ perf_evlist__munmap(evlist);
+ perf_evlist__close(evlist);
+ perf_evlist__delete(evlist);
+ }
+ if (cpus)
+ cpu_map__delete(cpus);
+ if (threads)
+ thread_map__delete(threads);
+
+ return err;
+}
diff --git a/tools/perf/tests/sample-parsing.c b/tools/perf/tests/sample-parsing.c
new file mode 100644
index 00000000000..77f598dbd97
--- /dev/null
+++ b/tools/perf/tests/sample-parsing.c
@@ -0,0 +1,316 @@
+#include <stdbool.h>
+#include <inttypes.h>
+
+#include "util.h"
+#include "event.h"
+#include "evsel.h"
+
+#include "tests.h"
+
+#define COMP(m) do { \
+ if (s1->m != s2->m) { \
+ pr_debug("Samples differ at '"#m"'\n"); \
+ return false; \
+ } \
+} while (0)
+
+#define MCOMP(m) do { \
+ if (memcmp(&s1->m, &s2->m, sizeof(s1->m))) { \
+ pr_debug("Samples differ at '"#m"'\n"); \
+ return false; \
+ } \
+} while (0)
+
+static bool samples_same(const struct perf_sample *s1,
+ const struct perf_sample *s2, u64 type, u64 regs_user,
+ u64 read_format)
+{
+ size_t i;
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ COMP(id);
+
+ if (type & PERF_SAMPLE_IP)
+ COMP(ip);
+
+ if (type & PERF_SAMPLE_TID) {
+ COMP(pid);
+ COMP(tid);
+ }
+
+ if (type & PERF_SAMPLE_TIME)
+ COMP(time);
+
+ if (type & PERF_SAMPLE_ADDR)
+ COMP(addr);
+
+ if (type & PERF_SAMPLE_ID)
+ COMP(id);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ COMP(stream_id);
+
+ if (type & PERF_SAMPLE_CPU)
+ COMP(cpu);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ COMP(period);
+
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ COMP(read.group.nr);
+ else
+ COMP(read.one.value);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ COMP(read.time_enabled);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ COMP(read.time_running);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ for (i = 0; i < s1->read.group.nr; i++)
+ MCOMP(read.group.values[i]);
+ } else {
+ COMP(read.one.id);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ COMP(callchain->nr);
+ for (i = 0; i < s1->callchain->nr; i++)
+ COMP(callchain->ips[i]);
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ COMP(raw_size);
+ if (memcmp(s1->raw_data, s2->raw_data, s1->raw_size)) {
+ pr_debug("Samples differ at 'raw_data'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ COMP(branch_stack->nr);
+ for (i = 0; i < s1->branch_stack->nr; i++)
+ MCOMP(branch_stack->entries[i]);
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ size_t sz = hweight_long(regs_user) * sizeof(u64);
+
+ COMP(user_regs.abi);
+ if (s1->user_regs.abi &&
+ (!s1->user_regs.regs || !s2->user_regs.regs ||
+ memcmp(s1->user_regs.regs, s2->user_regs.regs, sz))) {
+ pr_debug("Samples differ at 'user_regs'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ COMP(user_stack.size);
+ if (memcmp(s1->user_stack.data, s1->user_stack.data,
+ s1->user_stack.size)) {
+ pr_debug("Samples differ at 'user_stack'\n");
+ return false;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ COMP(weight);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ COMP(data_src);
+
+ return true;
+}
+
+static int do_test(u64 sample_type, u64 sample_regs_user, u64 read_format)
+{
+ struct perf_evsel evsel = {
+ .needs_swap = false,
+ .attr = {
+ .sample_type = sample_type,
+ .sample_regs_user = sample_regs_user,
+ .read_format = read_format,
+ },
+ };
+ union perf_event *event;
+ union {
+ struct ip_callchain callchain;
+ u64 data[64];
+ } callchain = {
+ /* 3 ips */
+ .data = {3, 201, 202, 203},
+ };
+ union {
+ struct branch_stack branch_stack;
+ u64 data[64];
+ } branch_stack = {
+ /* 1 branch_entry */
+ .data = {1, 211, 212, 213},
+ };
+ u64 user_regs[64];
+ const u64 raw_data[] = {0x123456780a0b0c0dULL, 0x1102030405060708ULL};
+ const u64 data[] = {0x2211443366558877ULL, 0, 0xaabbccddeeff4321ULL};
+ struct perf_sample sample = {
+ .ip = 101,
+ .pid = 102,
+ .tid = 103,
+ .time = 104,
+ .addr = 105,
+ .id = 106,
+ .stream_id = 107,
+ .period = 108,
+ .weight = 109,
+ .cpu = 110,
+ .raw_size = sizeof(raw_data),
+ .data_src = 111,
+ .raw_data = (void *)raw_data,
+ .callchain = &callchain.callchain,
+ .branch_stack = &branch_stack.branch_stack,
+ .user_regs = {
+ .abi = PERF_SAMPLE_REGS_ABI_64,
+ .regs = user_regs,
+ },
+ .user_stack = {
+ .size = sizeof(data),
+ .data = (void *)data,
+ },
+ .read = {
+ .time_enabled = 0x030a59d664fca7deULL,
+ .time_running = 0x011b6ae553eb98edULL,
+ },
+ };
+ struct sample_read_value values[] = {{1, 5}, {9, 3}, {2, 7}, {6, 4},};
+ struct perf_sample sample_out;
+ size_t i, sz, bufsz;
+ int err, ret = -1;
+
+ for (i = 0; i < sizeof(user_regs); i++)
+ *(i + (u8 *)user_regs) = i & 0xfe;
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ sample.read.group.nr = 4;
+ sample.read.group.values = values;
+ } else {
+ sample.read.one.value = 0x08789faeb786aa87ULL;
+ sample.read.one.id = 99;
+ }
+
+ sz = perf_event__sample_event_size(&sample, sample_type,
+ sample_regs_user, read_format);
+ bufsz = sz + 4096; /* Add a bit for overrun checking */
+ event = malloc(bufsz);
+ if (!event) {
+ pr_debug("malloc failed\n");
+ return -1;
+ }
+
+ memset(event, 0xff, bufsz);
+ event->header.type = PERF_RECORD_SAMPLE;
+ event->header.misc = 0;
+ event->header.size = sz;
+
+ err = perf_event__synthesize_sample(event, sample_type,
+ sample_regs_user, read_format,
+ &sample, false);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "perf_event__synthesize_sample", sample_type, err);
+ goto out_free;
+ }
+
+ /* The data does not contain 0xff so we use that to check the size */
+ for (i = bufsz; i > 0; i--) {
+ if (*(i - 1 + (u8 *)event) != 0xff)
+ break;
+ }
+ if (i != sz) {
+ pr_debug("Event size mismatch: actual %zu vs expected %zu\n",
+ i, sz);
+ goto out_free;
+ }
+
+ evsel.sample_size = __perf_evsel__sample_size(sample_type);
+
+ err = perf_evsel__parse_sample(&evsel, event, &sample_out);
+ if (err) {
+ pr_debug("%s failed for sample_type %#"PRIx64", error %d\n",
+ "perf_evsel__parse_sample", sample_type, err);
+ goto out_free;
+ }
+
+ if (!samples_same(&sample, &sample_out, sample_type,
+ sample_regs_user, read_format)) {
+ pr_debug("parsing failed for sample_type %#"PRIx64"\n",
+ sample_type);
+ goto out_free;
+ }
+
+ ret = 0;
+out_free:
+ free(event);
+ if (ret && read_format)
+ pr_debug("read_format %#"PRIx64"\n", read_format);
+ return ret;
+}
+
+/**
+ * test__sample_parsing - test sample parsing.
+ *
+ * This function implements a test that synthesizes a sample event, parses it
+ * and then checks that the parsed sample matches the original sample. The test
+ * checks sample format bits separately and together. If the test passes %0 is
+ * returned, otherwise %-1 is returned.
+ */
+int test__sample_parsing(void)
+{
+ const u64 rf[] = {4, 5, 6, 7, 12, 13, 14, 15};
+ u64 sample_type;
+ u64 sample_regs_user;
+ size_t i;
+ int err;
+
+ /*
+ * Fail the test if it has not been updated when new sample format bits
+ * were added.
+ */
+ if (PERF_SAMPLE_MAX > PERF_SAMPLE_IDENTIFIER << 1) {
+ pr_debug("sample format has changed - test needs updating\n");
+ return -1;
+ }
+
+ /* Test each sample format bit separately */
+ for (sample_type = 1; sample_type != PERF_SAMPLE_MAX;
+ sample_type <<= 1) {
+ /* Test read_format variations */
+ if (sample_type == PERF_SAMPLE_READ) {
+ for (i = 0; i < ARRAY_SIZE(rf); i++) {
+ err = do_test(sample_type, 0, rf[i]);
+ if (err)
+ return err;
+ }
+ continue;
+ }
+
+ if (sample_type == PERF_SAMPLE_REGS_USER)
+ sample_regs_user = 0x3fff;
+ else
+ sample_regs_user = 0;
+
+ err = do_test(sample_type, sample_regs_user, 0);
+ if (err)
+ return err;
+ }
+
+ /* Test all sample format bits together */
+ sample_type = PERF_SAMPLE_MAX - 1;
+ sample_regs_user = 0x3fff;
+ for (i = 0; i < ARRAY_SIZE(rf); i++) {
+ err = do_test(sample_type, sample_regs_user, rf[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/tests/tests.h b/tools/perf/tests/tests.h
index dd7feae2d37..c048b589998 100644
--- a/tools/perf/tests/tests.h
+++ b/tools/perf/tests/tests.h
@@ -1,6 +1,14 @@
#ifndef TESTS_H
#define TESTS_H
+#define TEST_ASSERT_VAL(text, cond) \
+do { \
+ if (!(cond)) { \
+ pr_debug("FAILED %s:%d %s\n", __FILE__, __LINE__, text); \
+ return -1; \
+ } \
+} while (0)
+
enum {
TEST_OK = 0,
TEST_FAIL = -1,
@@ -27,5 +35,9 @@ int test__bp_signal(void);
int test__bp_signal_overflow(void);
int test__task_exit(void);
int test__sw_clock_freq(void);
+int test__perf_time_to_tsc(void);
+int test__code_reading(void);
+int test__sample_parsing(void);
+int test__keep_tracking(void);
#endif /* TESTS_H */
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 7b4c4d26d1b..2bd13edcbc1 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -16,6 +16,8 @@ static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused,
return 0;
}
+#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
+
int test__vmlinux_matches_kallsyms(void)
{
int err = -1;
@@ -25,6 +27,7 @@ int test__vmlinux_matches_kallsyms(void)
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
+ u64 mem_start, mem_end;
/*
* Step 1:
@@ -73,7 +76,7 @@ int test__vmlinux_matches_kallsyms(void)
goto out;
}
- ref_reloc_sym.addr = sym->start;
+ ref_reloc_sym.addr = UM(sym->start);
/*
* Step 5:
@@ -123,10 +126,14 @@ int test__vmlinux_matches_kallsyms(void)
if (sym->start == sym->end)
continue;
- first_pair = machine__find_kernel_symbol(&kallsyms, type, sym->start, NULL, NULL);
+ mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
+ mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
+
+ first_pair = machine__find_kernel_symbol(&kallsyms, type,
+ mem_start, NULL, NULL);
pair = first_pair;
- if (pair && pair->start == sym->start) {
+ if (pair && UM(pair->start) == mem_start) {
next_pair:
if (strcmp(sym->name, pair->name) == 0) {
/*
@@ -138,12 +145,20 @@ next_pair:
* off the real size. More than that and we
* _really_ have a problem.
*/
- s64 skew = sym->end - pair->end;
- if (llabs(skew) < page_size)
- continue;
+ s64 skew = mem_end - UM(pair->end);
+ if (llabs(skew) >= page_size)
+ pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
+ mem_start, sym->name, mem_end,
+ UM(pair->end));
+
+ /*
+ * Do not count this as a failure, because we
+ * could really find a case where it's not
+ * possible to get proper function end from
+ * kallsyms.
+ */
+ continue;
- pr_debug("%#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
- sym->start, sym->name, sym->end, pair->end);
} else {
struct rb_node *nnd;
detour:
@@ -152,7 +167,7 @@ detour:
if (nnd) {
struct symbol *next = rb_entry(nnd, struct symbol, rb_node);
- if (next->start == sym->start) {
+ if (UM(next->start) == mem_start) {
pair = next;
goto next_pair;
}
@@ -165,10 +180,11 @@ detour:
}
pr_debug("%#" PRIx64 ": diff name v: %s k: %s\n",
- sym->start, sym->name, pair->name);
+ mem_start, sym->name, pair->name);
}
} else
- pr_debug("%#" PRIx64 ": %s not on kallsyms\n", sym->start, sym->name);
+ pr_debug("%#" PRIx64 ": %s not on kallsyms\n",
+ mem_start, sym->name);
err = -1;
}
@@ -201,16 +217,19 @@ detour:
for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
- pair = map_groups__find(&kallsyms.kmaps, type, pos->start);
+ mem_start = vmlinux_map->unmap_ip(vmlinux_map, pos->start);
+ mem_end = vmlinux_map->unmap_ip(vmlinux_map, pos->end);
+
+ pair = map_groups__find(&kallsyms.kmaps, type, mem_start);
if (pair == NULL || pair->priv)
continue;
- if (pair->start == pos->start) {
+ if (pair->start == mem_start) {
pair->priv = 1;
pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
pos->start, pos->end, pos->pgoff, pos->dso->name);
- if (pos->pgoff != pair->pgoff || pos->end != pair->end)
- pr_info(": \n*%" PRIx64 "-%" PRIx64 " %" PRIx64 "",
+ if (mem_end != pair->end)
+ pr_info(":\n*%" PRIx64 "-%" PRIx64 " %" PRIx64,
pair->start, pair->end, pair->pgoff);
pr_info(" %s\n", pair->dso->name);
pair->priv = 1;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index cc64d3f7fc3..08545ae4699 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -428,6 +428,14 @@ static void annotate_browser__init_asm_mode(struct annotate_browser *browser)
browser->b.nr_entries = browser->nr_asm_entries;
}
+#define SYM_TITLE_MAX_SIZE (PATH_MAX + 64)
+
+static int sym_title(struct symbol *sym, struct map *map, char *title,
+ size_t sz)
+{
+ return snprintf(title, sz, "%s %s", sym->name, map->dso->long_name);
+}
+
static bool annotate_browser__callq(struct annotate_browser *browser,
struct perf_evsel *evsel,
struct hist_browser_timer *hbt)
@@ -438,6 +446,7 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
struct annotation *notes;
struct symbol *target;
u64 ip;
+ char title[SYM_TITLE_MAX_SIZE];
if (!ins__is_call(dl->ins))
return false;
@@ -461,7 +470,8 @@ static bool annotate_browser__callq(struct annotate_browser *browser,
pthread_mutex_unlock(&notes->lock);
symbol__tui_annotate(target, ms->map, evsel, hbt);
- ui_browser__show_title(&browser->b, sym->name);
+ sym_title(sym, ms->map, title, sizeof(title));
+ ui_browser__show_title(&browser->b, title);
return true;
}
@@ -495,7 +505,7 @@ static bool annotate_browser__jump(struct annotate_browser *browser)
dl = annotate_browser__find_offset(browser, dl->ops.target.offset, &idx);
if (dl == NULL) {
- ui_helpline__puts("Invallid jump offset");
+ ui_helpline__puts("Invalid jump offset");
return true;
}
@@ -653,8 +663,10 @@ static int annotate_browser__run(struct annotate_browser *browser,
const char *help = "Press 'h' for help on key bindings";
int delay_secs = hbt ? hbt->refresh : 0;
int key;
+ char title[SYM_TITLE_MAX_SIZE];
- if (ui_browser__show(&browser->b, sym->name, help) < 0)
+ sym_title(sym, ms->map, title, sizeof(title));
+ if (ui_browser__show(&browser->b, title, help) < 0)
return -1;
annotate_browser__calc_percent(browser, evsel);
@@ -720,7 +732,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
"s Toggle source code view\n"
"/ Search string\n"
"r Run available scripts\n"
- "? Search previous string\n");
+ "? Search string backwards\n");
continue;
case 'r':
{
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index fc0bd3843d3..7ef36c36047 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -685,8 +685,10 @@ static u64 __hpp_get_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int hist_browser__hpp_color_##_type(struct perf_hpp *hpp, \
- struct hist_entry *he) \
+static int \
+hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\
+ struct perf_hpp *hpp, \
+ struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
}
@@ -701,8 +703,6 @@ __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
void hist_browser__init_hpp(void)
{
- perf_hpp__column_enable(PERF_HPP__OVERHEAD);
-
perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color =
@@ -762,9 +762,9 @@ static int hist_browser__show_entry(struct hist_browser *browser,
first = false;
if (fmt->color) {
- width -= fmt->color(&hpp, entry);
+ width -= fmt->color(fmt, &hpp, entry);
} else {
- width -= fmt->entry(&hpp, entry);
+ width -= fmt->entry(fmt, &hpp, entry);
slsmg_printf("%s", s);
}
}
@@ -1256,7 +1256,7 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
(thread->comm_set ? thread->comm : ""),
- thread->pid);
+ thread->tid);
if (dso)
printed += scnprintf(bf + printed, size - printed,
", DSO: %s", dso->short_name);
@@ -1579,7 +1579,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
(browser->hists->thread_filter ? "out of" : "into"),
(thread->comm_set ? thread->comm : ""),
- thread->pid) > 0)
+ thread->tid) > 0)
zoom_thread = nr_options++;
if (dso != NULL &&
@@ -1702,7 +1702,7 @@ zoom_out_thread:
} else {
ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
thread->comm_set ? thread->comm : "",
- thread->pid);
+ thread->tid);
browser->hists->thread_filter = thread;
sort_thread.elide = true;
pstack__push(fstack, &browser->hists->thread_filter);
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index 9708dd5fb8f..2ca66cc1160 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -91,7 +91,8 @@ static u64 he_get_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int perf_gtk__hpp_color_##_type(struct perf_hpp *hpp, \
+static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, he_get_##_field); \
@@ -108,8 +109,6 @@ __HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us)
void perf_gtk__init_hpp(void)
{
- perf_hpp__column_enable(PERF_HPP__OVERHEAD);
-
perf_hpp__init();
perf_hpp__format[PERF_HPP__OVERHEAD].color =
@@ -124,6 +123,81 @@ void perf_gtk__init_hpp(void)
perf_gtk__hpp_color_overhead_guest_us;
}
+static void callchain_list__sym_name(struct callchain_list *cl,
+ char *bf, size_t bfsize)
+{
+ if (cl->ms.sym)
+ scnprintf(bf, bfsize, "%s", cl->ms.sym->name);
+ else
+ scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
+}
+
+static void perf_gtk__add_callchain(struct rb_root *root, GtkTreeStore *store,
+ GtkTreeIter *parent, int col, u64 total)
+{
+ struct rb_node *nd;
+ bool has_single_node = (rb_first(root) == rb_last(root));
+
+ for (nd = rb_first(root); nd; nd = rb_next(nd)) {
+ struct callchain_node *node;
+ struct callchain_list *chain;
+ GtkTreeIter iter, new_parent;
+ bool need_new_parent;
+ double percent;
+ u64 hits, child_total;
+
+ node = rb_entry(nd, struct callchain_node, rb_node);
+
+ hits = callchain_cumul_hits(node);
+ percent = 100.0 * hits / total;
+
+ new_parent = *parent;
+ need_new_parent = !has_single_node && (node->val_nr > 1);
+
+ list_for_each_entry(chain, &node->val, list) {
+ char buf[128];
+
+ gtk_tree_store_append(store, &iter, &new_parent);
+
+ scnprintf(buf, sizeof(buf), "%5.2f%%", percent);
+ gtk_tree_store_set(store, &iter, 0, buf, -1);
+
+ callchain_list__sym_name(chain, buf, sizeof(buf));
+ gtk_tree_store_set(store, &iter, col, buf, -1);
+
+ if (need_new_parent) {
+ /*
+ * Only show the top-most symbol in a callchain
+ * if it's not the only callchain.
+ */
+ new_parent = iter;
+ need_new_parent = false;
+ }
+ }
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ child_total = node->children_hit;
+ else
+ child_total = total;
+
+ /* Now 'iter' contains info of the last callchain_list */
+ perf_gtk__add_callchain(&node->rb_root, store, &iter, col,
+ child_total);
+ }
+}
+
+static void on_row_activated(GtkTreeView *view, GtkTreePath *path,
+ GtkTreeViewColumn *col __maybe_unused,
+ gpointer user_data __maybe_unused)
+{
+ bool expanded = gtk_tree_view_row_expanded(view, path);
+
+ if (expanded)
+ gtk_tree_view_collapse_row(view, path);
+ else
+ gtk_tree_view_expand_row(view, path, FALSE);
+}
+
static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
float min_pcnt)
{
@@ -131,10 +205,11 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer;
struct sort_entry *se;
- GtkListStore *store;
+ GtkTreeStore *store;
struct rb_node *nd;
GtkWidget *view;
int col_idx;
+ int sym_col = -1;
int nr_cols;
char s[512];
@@ -153,10 +228,13 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
if (se->elide)
continue;
+ if (se == &sort_sym)
+ sym_col = nr_cols;
+
col_types[nr_cols++] = G_TYPE_STRING;
}
- store = gtk_list_store_newv(nr_cols, col_types);
+ store = gtk_tree_store_newv(nr_cols, col_types);
view = gtk_tree_view_new();
@@ -165,7 +243,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
- fmt->header(&hpp);
+ fmt->header(fmt, &hpp);
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, ltrim(s),
@@ -183,6 +261,18 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx++, NULL);
}
+ for (col_idx = 0; col_idx < nr_cols; col_idx++) {
+ GtkTreeViewColumn *column;
+
+ column = gtk_tree_view_get_column(GTK_TREE_VIEW(view), col_idx);
+ gtk_tree_view_column_set_resizable(column, TRUE);
+
+ if (col_idx == sym_col) {
+ gtk_tree_view_set_expander_column(GTK_TREE_VIEW(view),
+ column);
+ }
+ }
+
gtk_tree_view_set_model(GTK_TREE_VIEW(view), GTK_TREE_MODEL(store));
g_object_unref(GTK_TREE_MODEL(store));
@@ -199,17 +289,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
if (percent < min_pcnt)
continue;
- gtk_list_store_append(store, &iter);
+ gtk_tree_store_append(store, &iter, NULL);
col_idx = 0;
perf_hpp__for_each_format(fmt) {
if (fmt->color)
- fmt->color(&hpp, h);
+ fmt->color(fmt, &hpp, h);
else
- fmt->entry(&hpp, h);
+ fmt->entry(fmt, &hpp, h);
- gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
list_for_each_entry(se, &hist_entry__sort_list, list) {
@@ -219,10 +309,26 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
se->se_snprintf(h, s, ARRAY_SIZE(s),
hists__col_len(hists, se->se_width_idx));
- gtk_list_store_set(store, &iter, col_idx++, s, -1);
+ gtk_tree_store_set(store, &iter, col_idx++, s, -1);
+ }
+
+ if (symbol_conf.use_callchain && sort__has_sym) {
+ u64 total;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL)
+ total = h->stat.period;
+ else
+ total = hists->stats.total_period;
+
+ perf_gtk__add_callchain(&h->sorted_chain, store, &iter,
+ sym_col, total);
}
}
+ gtk_tree_view_set_rules_hint(GTK_TREE_VIEW(view), TRUE);
+
+ g_signal_connect(view, "row-activated",
+ G_CALLBACK(on_row_activated), NULL);
gtk_container_add(GTK_CONTAINER(window), view);
}
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 4bf91b09d62..0a193281eba 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -1,4 +1,5 @@
#include <math.h>
+#include <linux/compiler.h>
#include "../util/hist.h"
#include "../util/util.h"
@@ -79,7 +80,8 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
}
#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
-static int hpp__header_##_type(struct perf_hpp *hpp) \
+static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct perf_hpp *hpp) \
{ \
int len = _min_width; \
\
@@ -92,7 +94,8 @@ static int hpp__header_##_type(struct perf_hpp *hpp) \
}
#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
-static int hpp__width_##_type(struct perf_hpp *hpp __maybe_unused) \
+static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct perf_hpp *hpp __maybe_unused) \
{ \
int len = _min_width; \
\
@@ -110,14 +113,16 @@ static u64 he_get_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int hpp__color_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
+static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
+ struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
(hpp_snprint_fn)percent_color_snprintf, true); \
}
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
-static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
+static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
+ struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
@@ -130,7 +135,8 @@ static u64 he_get_raw_##_field(struct hist_entry *he) \
return he->stat._field; \
} \
\
-static int hpp__entry_##_type(struct perf_hpp *hpp, struct hist_entry *he) \
+static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
+ struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
@@ -157,196 +163,6 @@ HPP_PERCENT_FNS(overhead_guest_us, "guest usr", period_guest_us, 9, 8)
HPP_RAW_FNS(samples, "Samples", nr_events, 12, 12)
HPP_RAW_FNS(period, "Period", period, 12, 12)
-
-static int hpp__header_baseline(struct perf_hpp *hpp)
-{
- return scnprintf(hpp->buf, hpp->size, "Baseline");
-}
-
-static int hpp__width_baseline(struct perf_hpp *hpp __maybe_unused)
-{
- return 8;
-}
-
-static double baseline_percent(struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- struct hists *pair_hists = pair ? pair->hists : NULL;
- double percent = 0.0;
-
- if (pair) {
- u64 total_period = pair_hists->stats.total_period;
- u64 base_period = pair->stat.period;
-
- percent = 100.0 * base_period / total_period;
- }
-
- return percent;
-}
-
-static int hpp__color_baseline(struct perf_hpp *hpp, struct hist_entry *he)
-{
- double percent = baseline_percent(he);
-
- if (hist_entry__has_pairs(he) || symbol_conf.field_sep)
- return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent);
- else
- return scnprintf(hpp->buf, hpp->size, " ");
-}
-
-static int hpp__entry_baseline(struct perf_hpp *hpp, struct hist_entry *he)
-{
- double percent = baseline_percent(he);
- const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%";
-
- if (hist_entry__has_pairs(he) || symbol_conf.field_sep)
- return scnprintf(hpp->buf, hpp->size, fmt, percent);
- else
- return scnprintf(hpp->buf, hpp->size, " ");
-}
-
-static int hpp__header_period_baseline(struct perf_hpp *hpp)
-{
- const char *fmt = symbol_conf.field_sep ? "%s" : "%12s";
-
- return scnprintf(hpp->buf, hpp->size, fmt, "Period Base");
-}
-
-static int hpp__width_period_baseline(struct perf_hpp *hpp __maybe_unused)
-{
- return 12;
-}
-
-static int hpp__entry_period_baseline(struct perf_hpp *hpp, struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- u64 period = pair ? pair->stat.period : 0;
- const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64;
-
- return scnprintf(hpp->buf, hpp->size, fmt, period);
-}
-
-static int hpp__header_delta(struct perf_hpp *hpp)
-{
- const char *fmt = symbol_conf.field_sep ? "%s" : "%7s";
-
- return scnprintf(hpp->buf, hpp->size, fmt, "Delta");
-}
-
-static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused)
-{
- return 7;
-}
-
-static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s";
- char buf[32] = " ";
- double diff = 0.0;
-
- if (pair) {
- if (he->diff.computed)
- diff = he->diff.period_ratio_delta;
- else
- diff = perf_diff__compute_delta(he, pair);
- } else
- diff = perf_diff__period_percent(he, he->stat.period);
-
- if (fabs(diff) >= 0.01)
- scnprintf(buf, sizeof(buf), "%+4.2F%%", diff);
-
- return scnprintf(hpp->buf, hpp->size, fmt, buf);
-}
-
-static int hpp__header_ratio(struct perf_hpp *hpp)
-{
- const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
-
- return scnprintf(hpp->buf, hpp->size, fmt, "Ratio");
-}
-
-static int hpp__width_ratio(struct perf_hpp *hpp __maybe_unused)
-{
- return 14;
-}
-
-static int hpp__entry_ratio(struct perf_hpp *hpp, struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
- char buf[32] = " ";
- double ratio = 0.0;
-
- if (pair) {
- if (he->diff.computed)
- ratio = he->diff.period_ratio;
- else
- ratio = perf_diff__compute_ratio(he, pair);
- }
-
- if (ratio > 0.0)
- scnprintf(buf, sizeof(buf), "%+14.6F", ratio);
-
- return scnprintf(hpp->buf, hpp->size, fmt, buf);
-}
-
-static int hpp__header_wdiff(struct perf_hpp *hpp)
-{
- const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
-
- return scnprintf(hpp->buf, hpp->size, fmt, "Weighted diff");
-}
-
-static int hpp__width_wdiff(struct perf_hpp *hpp __maybe_unused)
-{
- return 14;
-}
-
-static int hpp__entry_wdiff(struct perf_hpp *hpp, struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- const char *fmt = symbol_conf.field_sep ? "%s" : "%14s";
- char buf[32] = " ";
- s64 wdiff = 0;
-
- if (pair) {
- if (he->diff.computed)
- wdiff = he->diff.wdiff;
- else
- wdiff = perf_diff__compute_wdiff(he, pair);
- }
-
- if (wdiff != 0)
- scnprintf(buf, sizeof(buf), "%14ld", wdiff);
-
- return scnprintf(hpp->buf, hpp->size, fmt, buf);
-}
-
-static int hpp__header_formula(struct perf_hpp *hpp)
-{
- const char *fmt = symbol_conf.field_sep ? "%s" : "%70s";
-
- return scnprintf(hpp->buf, hpp->size, fmt, "Formula");
-}
-
-static int hpp__width_formula(struct perf_hpp *hpp __maybe_unused)
-{
- return 70;
-}
-
-static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he)
-{
- struct hist_entry *pair = hist_entry__next_pair(he);
- const char *fmt = symbol_conf.field_sep ? "%s" : "%-70s";
- char buf[96] = " ";
-
- if (pair)
- perf_diff__formula(he, pair, buf, sizeof(buf));
-
- return scnprintf(hpp->buf, hpp->size, fmt, buf);
-}
-
#define HPP__COLOR_PRINT_FNS(_name) \
{ \
.header = hpp__header_ ## _name, \
@@ -363,19 +179,13 @@ static int hpp__entry_formula(struct perf_hpp *hpp, struct hist_entry *he)
}
struct perf_hpp_fmt perf_hpp__format[] = {
- HPP__COLOR_PRINT_FNS(baseline),
HPP__COLOR_PRINT_FNS(overhead),
HPP__COLOR_PRINT_FNS(overhead_sys),
HPP__COLOR_PRINT_FNS(overhead_us),
HPP__COLOR_PRINT_FNS(overhead_guest_sys),
HPP__COLOR_PRINT_FNS(overhead_guest_us),
HPP__PRINT_FNS(samples),
- HPP__PRINT_FNS(period),
- HPP__PRINT_FNS(period_baseline),
- HPP__PRINT_FNS(delta),
- HPP__PRINT_FNS(ratio),
- HPP__PRINT_FNS(wdiff),
- HPP__PRINT_FNS(formula)
+ HPP__PRINT_FNS(period)
};
LIST_HEAD(perf_hpp__list);
@@ -396,6 +206,8 @@ LIST_HEAD(perf_hpp__list);
void perf_hpp__init(void)
{
+ perf_hpp__column_enable(PERF_HPP__OVERHEAD);
+
if (symbol_conf.show_cpu_utilization) {
perf_hpp__column_enable(PERF_HPP__OVERHEAD_SYS);
perf_hpp__column_enable(PERF_HPP__OVERHEAD_US);
@@ -424,46 +236,6 @@ void perf_hpp__column_enable(unsigned col)
perf_hpp__column_register(&perf_hpp__format[col]);
}
-static inline void advance_hpp(struct perf_hpp *hpp, int inc)
-{
- hpp->buf += inc;
- hpp->size -= inc;
-}
-
-int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
- bool color)
-{
- const char *sep = symbol_conf.field_sep;
- struct perf_hpp_fmt *fmt;
- char *start = hpp->buf;
- int ret;
- bool first = true;
-
- if (symbol_conf.exclude_other && !he->parent)
- return 0;
-
- perf_hpp__for_each_format(fmt) {
- /*
- * If there's no field_sep, we still need
- * to display initial ' '.
- */
- if (!sep || !first) {
- ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
- advance_hpp(hpp, ret);
- } else
- first = false;
-
- if (color && fmt->color)
- ret = fmt->color(hpp, he);
- else
- ret = fmt->entry(hpp, he);
-
- advance_hpp(hpp, ret);
- }
-
- return hpp->buf - start;
-}
-
int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size,
struct hists *hists)
{
@@ -499,7 +271,7 @@ unsigned int hists__sort_list_width(struct hists *hists)
if (i)
ret += 2;
- ret += fmt->width(&dummy_hpp);
+ ret += fmt->width(fmt, &dummy_hpp);
}
list_for_each_entry(se, &hist_entry__sort_list, list)
diff --git a/tools/perf/ui/setup.c b/tools/perf/ui/setup.c
index ae6a789cb0f..47d9a571f26 100644
--- a/tools/perf/ui/setup.c
+++ b/tools/perf/ui/setup.c
@@ -30,7 +30,6 @@ void setup_browser(bool fallback_to_pager)
if (fallback_to_pager)
setup_pager();
- perf_hpp__column_enable(PERF_HPP__OVERHEAD);
perf_hpp__init();
break;
}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index ae7a7543224..5b4fb330f65 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -308,6 +308,47 @@ static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
}
+static inline void advance_hpp(struct perf_hpp *hpp, int inc)
+{
+ hpp->buf += inc;
+ hpp->size -= inc;
+}
+
+static int hist_entry__period_snprintf(struct perf_hpp *hpp,
+ struct hist_entry *he,
+ bool color)
+{
+ const char *sep = symbol_conf.field_sep;
+ struct perf_hpp_fmt *fmt;
+ char *start = hpp->buf;
+ int ret;
+ bool first = true;
+
+ if (symbol_conf.exclude_other && !he->parent)
+ return 0;
+
+ perf_hpp__for_each_format(fmt) {
+ /*
+ * If there's no field_sep, we still need
+ * to display initial ' '.
+ */
+ if (!sep || !first) {
+ ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
+ advance_hpp(hpp, ret);
+ } else
+ first = false;
+
+ if (color && fmt->color)
+ ret = fmt->color(fmt, hpp, he);
+ else
+ ret = fmt->entry(fmt, hpp, he);
+
+ advance_hpp(hpp, ret);
+ }
+
+ return hpp->buf - start;
+}
+
static int hist_entry__fprintf(struct hist_entry *he, size_t size,
struct hists *hists, FILE *fp)
{
@@ -365,7 +406,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
else
first = false;
- fmt->header(&dummy_hpp);
+ fmt->header(fmt, &dummy_hpp);
fprintf(fp, "%s", bf);
}
@@ -410,7 +451,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
else
first = false;
- width = fmt->width(&dummy_hpp);
+ width = fmt->width(fmt, &dummy_hpp);
for (i = 0; i < width; i++)
fprintf(fp, ".");
}
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index d102716c43a..bfc5a27597d 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -110,10 +110,10 @@ static int jump__parse(struct ins_operands *ops)
{
const char *s = strchr(ops->raw, '+');
- ops->target.addr = strtoll(ops->raw, NULL, 16);
+ ops->target.addr = strtoull(ops->raw, NULL, 16);
if (s++ != NULL)
- ops->target.offset = strtoll(s, NULL, 16);
+ ops->target.offset = strtoull(s, NULL, 16);
else
ops->target.offset = UINT64_MAX;
@@ -821,11 +821,55 @@ static int symbol__parse_objdump_line(struct symbol *sym, struct map *map,
if (dl == NULL)
return -1;
+ if (dl->ops.target.offset == UINT64_MAX)
+ dl->ops.target.offset = dl->ops.target.addr -
+ map__rip_2objdump(map, sym->start);
+
+ /*
+ * kcore has no symbols, so add the call target name if it is on the
+ * same map.
+ */
+ if (dl->ins && ins__is_call(dl->ins) && !dl->ops.target.name) {
+ struct symbol *s;
+ u64 ip = dl->ops.target.addr;
+
+ if (ip >= map->start && ip <= map->end) {
+ ip = map->map_ip(map, ip);
+ s = map__find_symbol(map, ip, NULL);
+ if (s && s->start == ip)
+ dl->ops.target.name = strdup(s->name);
+ }
+ }
+
disasm__add(&notes->src->source, dl);
return 0;
}
+static void delete_last_nop(struct symbol *sym)
+{
+ struct annotation *notes = symbol__annotation(sym);
+ struct list_head *list = &notes->src->source;
+ struct disasm_line *dl;
+
+ while (!list_empty(list)) {
+ dl = list_entry(list->prev, struct disasm_line, node);
+
+ if (dl->ins && dl->ins->ops) {
+ if (dl->ins->ops != &nop_ops)
+ return;
+ } else {
+ if (!strstr(dl->line, " nop ") &&
+ !strstr(dl->line, " nopl ") &&
+ !strstr(dl->line, " nopw "))
+ return;
+ }
+
+ list_del(&dl->node);
+ disasm_line__free(dl);
+ }
+}
+
int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize)
{
struct dso *dso = map->dso;
@@ -864,7 +908,8 @@ fallback:
free_filename = false;
}
- if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
+ if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS &&
+ !dso__is_kcore(dso)) {
char bf[BUILD_ID_SIZE * 2 + 16] = " with build id ";
char *build_id_msg = NULL;
@@ -898,7 +943,7 @@ fallback:
snprintf(command, sizeof(command),
"%s %s%s --start-address=0x%016" PRIx64
" --stop-address=0x%016" PRIx64
- " -d %s %s -C %s|grep -v %s|expand",
+ " -d %s %s -C %s 2>/dev/null|grep -v %s|expand",
objdump_path ? objdump_path : "objdump",
disassembler_style ? "-M " : "",
disassembler_style ? disassembler_style : "",
@@ -918,6 +963,13 @@ fallback:
if (symbol__parse_objdump_line(sym, map, file, privsize) < 0)
break;
+ /*
+ * kallsyms does not have symbol sizes so there may a nop at the end.
+ * Remove it.
+ */
+ if (dso__is_kcore(dso))
+ delete_last_nop(sym);
+
pclose(file);
out_free_filename:
if (free_filename)
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c
index 5295625c0c0..fb584092eb8 100644
--- a/tools/perf/util/build-id.c
+++ b/tools/perf/util/build-id.c
@@ -18,13 +18,14 @@
int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
union perf_event *event,
- struct perf_sample *sample __maybe_unused,
+ struct perf_sample *sample,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine)
{
struct addr_location al;
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->pid);
if (thread == NULL) {
pr_err("problem processing %d event, skipping it.\n",
@@ -33,7 +34,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
}
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, &al);
+ sample->ip, &al);
if (al.map != NULL)
al.map->dso->hit = 1;
@@ -47,7 +48,9 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
__maybe_unused,
struct machine *machine)
{
- struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
+ struct thread *thread = machine__findnew_thread(machine,
+ event->fork.pid,
+ event->fork.tid);
dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
event->fork.ppid, event->fork.ptid);
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 42b6a632fe7..482f68081cd 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -15,19 +15,12 @@
#include <errno.h>
#include <math.h>
+#include "hist.h"
#include "util.h"
#include "callchain.h"
__thread struct callchain_cursor callchain_cursor;
-bool ip_callchain__valid(struct ip_callchain *chain,
- const union perf_event *event)
-{
- unsigned int chain_size = event->header.size;
- chain_size -= (unsigned long)&event->ip.__more_data - (unsigned long)event;
- return chain->nr * sizeof(u64) <= chain_size;
-}
-
#define chain_for_each_child(child, parent) \
list_for_each_entry(child, &parent->children, siblings)
@@ -327,7 +320,8 @@ append_chain(struct callchain_node *root,
/*
* Lookup in the current node
* If we have a symbol, then compare the start to match
- * anywhere inside a function.
+ * anywhere inside a function, unless function
+ * mode is disabled.
*/
list_for_each_entry(cnode, &root->val, list) {
struct callchain_cursor_node *node;
@@ -339,7 +333,8 @@ append_chain(struct callchain_node *root,
sym = node->sym;
- if (cnode->ms.sym && sym) {
+ if (cnode->ms.sym && sym &&
+ callchain_param.key == CCKEY_FUNCTION) {
if (cnode->ms.sym->start != sym->start)
break;
} else if (cnode->ip != node->ip)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 3ee9f67d5af..2b585bc308c 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -41,12 +41,18 @@ struct callchain_param;
typedef void (*sort_chain_func_t)(struct rb_root *, struct callchain_root *,
u64, struct callchain_param *);
+enum chain_key {
+ CCKEY_FUNCTION,
+ CCKEY_ADDRESS
+};
+
struct callchain_param {
enum chain_mode mode;
u32 print_limit;
double min_percent;
sort_chain_func_t sort;
enum chain_order order;
+ enum chain_key key;
};
struct callchain_list {
@@ -103,11 +109,6 @@ int callchain_append(struct callchain_root *root,
int callchain_merge(struct callchain_cursor *cursor,
struct callchain_root *dst, struct callchain_root *src);
-struct ip_callchain;
-union perf_event;
-
-bool ip_callchain__valid(struct ip_callchain *chain,
- const union perf_event *event);
/*
* Initialize a cursor before adding entries inside, but keep
* the previously allocated entries as a cache.
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 9bed02e5fb3..b123bb9d6f5 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -41,7 +41,7 @@ static inline int cpu_map__nr(const struct cpu_map *map)
return map ? map->nr : 1;
}
-static inline bool cpu_map__all(const struct cpu_map *map)
+static inline bool cpu_map__empty(const struct cpu_map *map)
{
return map ? map->map[0] == -1 : true;
}
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index c4374f07603..e3c1ff8512c 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -78,6 +78,8 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
symbol_conf.symfs, build_id_hex, build_id_hex + 2);
break;
+ case DSO_BINARY_TYPE__VMLINUX:
+ case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__SYSTEM_PATH_DSO:
snprintf(file, size, "%s%s",
symbol_conf.symfs, dso->long_name);
@@ -93,11 +95,14 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type,
dso->long_name);
break;
+ case DSO_BINARY_TYPE__KCORE:
+ case DSO_BINARY_TYPE__GUEST_KCORE:
+ snprintf(file, size, "%s", dso->long_name);
+ break;
+
default:
case DSO_BINARY_TYPE__KALLSYMS:
- case DSO_BINARY_TYPE__VMLINUX:
case DSO_BINARY_TYPE__GUEST_KALLSYMS:
- case DSO_BINARY_TYPE__GUEST_VMLINUX:
case DSO_BINARY_TYPE__JAVA_JIT:
case DSO_BINARY_TYPE__NOT_FOUND:
ret = -1;
@@ -419,6 +424,7 @@ struct dso *dso__new(const char *name)
dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->data_type = DSO_BINARY_TYPE__NOT_FOUND;
dso->loaded = 0;
+ dso->rel = 0;
dso->sorted_by_name = 0;
dso->has_build_id = 0;
dso->kernel = DSO_TYPE_USER;
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index d51aaf272c6..b793053335d 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <linux/rbtree.h>
+#include <stdbool.h>
#include "types.h"
#include "map.h"
@@ -20,6 +21,8 @@ enum dso_binary_type {
DSO_BINARY_TYPE__SYSTEM_PATH_DSO,
DSO_BINARY_TYPE__GUEST_KMODULE,
DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE,
+ DSO_BINARY_TYPE__KCORE,
+ DSO_BINARY_TYPE__GUEST_KCORE,
DSO_BINARY_TYPE__NOT_FOUND,
};
@@ -84,6 +87,7 @@ struct dso {
u8 lname_alloc:1;
u8 sorted_by_name;
u8 loaded;
+ u8 rel;
u8 build_id[BUILD_ID_SIZE];
const char *short_name;
char *long_name;
@@ -146,4 +150,17 @@ size_t dso__fprintf_buildid(struct dso *dso, FILE *fp);
size_t dso__fprintf_symbols_by_name(struct dso *dso,
enum map_type type, FILE *fp);
size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp);
+
+static inline bool dso__is_vmlinux(struct dso *dso)
+{
+ return dso->data_type == DSO_BINARY_TYPE__VMLINUX ||
+ dso->data_type == DSO_BINARY_TYPE__GUEST_VMLINUX;
+}
+
+static inline bool dso__is_kcore(struct dso *dso)
+{
+ return dso->data_type == DSO_BINARY_TYPE__KCORE ||
+ dso->data_type == DSO_BINARY_TYPE__GUEST_KCORE;
+}
+
#endif /* __PERF_DSO */
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 5cd13d768ce..8d51f21107a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -595,6 +595,7 @@ void thread__find_addr_map(struct thread *self,
struct addr_location *al)
{
struct map_groups *mg = &self->mg;
+ bool load_map = false;
al->thread = self;
al->addr = addr;
@@ -609,11 +610,13 @@ void thread__find_addr_map(struct thread *self,
if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
al->level = 'k';
mg = &machine->kmaps;
+ load_map = true;
} else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
al->level = '.';
} else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
al->level = 'g';
mg = &machine->kmaps;
+ load_map = true;
} else {
/*
* 'u' means guest os user space.
@@ -654,18 +657,25 @@ try_again:
mg = &machine->kmaps;
goto try_again;
}
- } else
+ } else {
+ /*
+ * Kernel maps might be changed when loading symbols so loading
+ * must be done prior to using kernel maps.
+ */
+ if (load_map)
+ map__load(al->map, machine->symbol_filter);
al->addr = al->map->map_ip(al->map, al->addr);
+ }
}
void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter)
+ struct addr_location *al)
{
thread__find_addr_map(thread, machine, cpumode, type, addr, al);
if (al->map != NULL)
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
else
al->sym = NULL;
}
@@ -673,11 +683,11 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine,
int perf_event__preprocess_sample(const union perf_event *event,
struct machine *machine,
struct addr_location *al,
- struct perf_sample *sample,
- symbol_filter_t filter)
+ struct perf_sample *sample)
{
u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
- struct thread *thread = machine__findnew_thread(machine, event->ip.pid);
+ struct thread *thread = machine__findnew_thread(machine, sample->pid,
+ sample->pid);
if (thread == NULL)
return -1;
@@ -686,7 +696,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
!strlist__has_entry(symbol_conf.comm_list, thread->comm))
goto out_filtered;
- dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
+ dump_printf(" ... thread: %s:%d\n", thread->comm, thread->tid);
/*
* Have we already created the kernel maps for this machine?
*
@@ -699,7 +709,7 @@ int perf_event__preprocess_sample(const union perf_event *event,
machine__create_kernel_maps(machine);
thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
- event->ip.ip, al);
+ sample->ip, al);
dump_printf(" ...... dso: %s\n",
al->map ? al->map->dso->long_name :
al->level == 'H' ? "[hypervisor]" : "<not found>");
@@ -717,7 +727,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
dso->long_name)))))
goto out_filtered;
- al->sym = map__find_symbol(al->map, al->addr, filter);
+ al->sym = map__find_symbol(al->map, al->addr,
+ machine->symbol_filter);
}
if (symbol_conf.sym_list &&
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 181389535c0..93130d856bf 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -8,16 +8,6 @@
#include "map.h"
#include "build-id.h"
-/*
- * PERF_SAMPLE_IP | PERF_SAMPLE_TID | *
- */
-struct ip_event {
- struct perf_event_header header;
- u64 ip;
- u32 pid, tid;
- unsigned char __more_data[];
-};
-
struct mmap_event {
struct perf_event_header header;
u32 pid, tid;
@@ -63,7 +53,8 @@ struct read_event {
(PERF_SAMPLE_IP | PERF_SAMPLE_TID | \
PERF_SAMPLE_TIME | PERF_SAMPLE_ADDR | \
PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | \
- PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD)
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD | \
+ PERF_SAMPLE_IDENTIFIER)
struct sample_event {
struct perf_event_header header;
@@ -71,6 +62,7 @@ struct sample_event {
};
struct regs_dump {
+ u64 abi;
u64 *regs;
};
@@ -80,6 +72,23 @@ struct stack_dump {
char *data;
};
+struct sample_read_value {
+ u64 value;
+ u64 id;
+};
+
+struct sample_read {
+ u64 time_enabled;
+ u64 time_running;
+ union {
+ struct {
+ u64 nr;
+ struct sample_read_value *values;
+ } group;
+ struct sample_read_value one;
+ };
+};
+
struct perf_sample {
u64 ip;
u32 pid, tid;
@@ -97,6 +106,7 @@ struct perf_sample {
struct branch_stack *branch_stack;
struct regs_dump user_regs;
struct stack_dump user_stack;
+ struct sample_read read;
};
#define PERF_MEM_DATA_SRC_NONE \
@@ -116,7 +126,7 @@ struct build_id_event {
enum perf_user_event_type { /* above any possible kernel type */
PERF_RECORD_USER_TYPE_START = 64,
PERF_RECORD_HEADER_ATTR = 64,
- PERF_RECORD_HEADER_EVENT_TYPE = 65,
+ PERF_RECORD_HEADER_EVENT_TYPE = 65, /* depreceated */
PERF_RECORD_HEADER_TRACING_DATA = 66,
PERF_RECORD_HEADER_BUILD_ID = 67,
PERF_RECORD_FINISHED_ROUND = 68,
@@ -148,7 +158,6 @@ struct tracing_data_event {
union perf_event {
struct perf_event_header header;
- struct ip_event ip;
struct mmap_event mmap;
struct comm_event comm;
struct fork_event fork;
@@ -216,12 +225,14 @@ struct addr_location;
int perf_event__preprocess_sample(const union perf_event *self,
struct machine *machine,
struct addr_location *al,
- struct perf_sample *sample,
- symbol_filter_t filter);
+ struct perf_sample *sample);
const char *perf_event__name(unsigned int id);
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 sample_regs_user, u64 read_format);
int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 sample_regs_user, u64 read_format,
const struct perf_sample *sample,
bool swapped);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8065ce8fa9a..b8727ae45e3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -14,6 +14,7 @@
#include "target.h"
#include "evlist.h"
#include "evsel.h"
+#include "debug.h"
#include <unistd.h>
#include "parse-events.h"
@@ -48,26 +49,19 @@ struct perf_evlist *perf_evlist__new(void)
return evlist;
}
-void perf_evlist__config(struct perf_evlist *evlist,
- struct perf_record_opts *opts)
+/**
+ * perf_evlist__set_id_pos - set the positions of event ids.
+ * @evlist: selected event list
+ *
+ * Events with compatible sample types all have the same id_pos
+ * and is_pos. For convenience, put a copy on evlist.
+ */
+void perf_evlist__set_id_pos(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel;
- /*
- * Set the evsel leader links before we configure attributes,
- * since some might depend on this info.
- */
- if (opts->group)
- perf_evlist__set_leader(evlist);
-
- if (evlist->cpus->map[0] < 0)
- opts->no_inherit = true;
-
- list_for_each_entry(evsel, &evlist->entries, node) {
- perf_evsel__config(evsel, opts);
+ struct perf_evsel *first = perf_evlist__first(evlist);
- if (evlist->nr_entries > 1)
- perf_evsel__set_sample_id(evsel);
- }
+ evlist->id_pos = first->id_pos;
+ evlist->is_pos = first->is_pos;
}
static void perf_evlist__purge(struct perf_evlist *evlist)
@@ -100,15 +94,20 @@ void perf_evlist__delete(struct perf_evlist *evlist)
void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
{
list_add_tail(&entry->node, &evlist->entries);
- ++evlist->nr_entries;
+ if (!evlist->nr_entries++)
+ perf_evlist__set_id_pos(evlist);
}
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
int nr_entries)
{
+ bool set_id_pos = !evlist->nr_entries;
+
list_splice_tail(list, &evlist->entries);
evlist->nr_entries += nr_entries;
+ if (set_id_pos)
+ perf_evlist__set_id_pos(evlist);
}
void __perf_evlist__set_leader(struct list_head *list)
@@ -209,6 +208,21 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
return NULL;
}
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name)
+{
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
+ (strcmp(evsel->name, name) == 0))
+ return evsel;
+ }
+
+ return NULL;
+}
+
int perf_evlist__add_newtp(struct perf_evlist *evlist,
const char *sys, const char *name, void *handler)
{
@@ -232,7 +246,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -250,7 +264,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
for (cpu = 0; cpu < nr_cpus; cpu++) {
list_for_each_entry(pos, &evlist->entries, node) {
- if (!perf_evsel__is_group_leader(pos))
+ if (!perf_evsel__is_group_leader(pos) || !pos->fd)
continue;
for (thread = 0; thread < nr_threads; thread++)
ioctl(FD(pos, cpu, thread),
@@ -259,6 +273,44 @@ void perf_evlist__enable(struct perf_evlist *evlist)
}
}
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return 0;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_DISABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel)
+{
+ int cpu, thread, err;
+
+ if (!evsel->fd)
+ return -EINVAL;
+
+ for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
+ for (thread = 0; thread < evlist->threads->nr; thread++) {
+ err = ioctl(FD(evsel, cpu, thread),
+ PERF_EVENT_IOC_ENABLE, 0);
+ if (err)
+ return err;
+ }
+ }
+ return 0;
+}
+
static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -302,6 +354,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
{
u64 read_data[4] = { 0, };
int id_idx = 1; /* The first entry is the counter value */
+ u64 id;
+ int ret;
+
+ ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
+ if (!ret)
+ goto add;
+
+ if (errno != ENOTTY)
+ return -1;
+
+ /* Legacy way to get event id.. All hail to old kernels! */
+
+ /*
+ * This way does not work with group format read, so bail
+ * out in that case.
+ */
+ if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
+ return -1;
if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
read(fd, &read_data, sizeof(read_data)) == -1)
@@ -312,25 +382,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++id_idx;
- perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
+ id = read_data[id_idx];
+
+ add:
+ perf_evlist__id_add(evlist, evsel, cpu, thread, id);
return 0;
}
-struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
{
struct hlist_head *head;
struct perf_sample_id *sid;
int hash;
- if (evlist->nr_entries == 1)
- return perf_evlist__first(evlist);
-
hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
head = &evlist->heads[hash];
hlist_for_each_entry(sid, head, node)
if (sid->id == id)
- return sid->evsel;
+ return sid;
+
+ return NULL;
+}
+
+struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
+{
+ struct perf_sample_id *sid;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ sid = perf_evlist__id2sid(evlist, id);
+ if (sid)
+ return sid->evsel;
if (!perf_evlist__sample_id_all(evlist))
return perf_evlist__first(evlist);
@@ -338,6 +422,55 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
return NULL;
}
+static int perf_evlist__event2id(struct perf_evlist *evlist,
+ union perf_event *event, u64 *id)
+{
+ const u64 *array = event->sample.array;
+ ssize_t n;
+
+ n = (event->header.size - sizeof(event->header)) >> 3;
+
+ if (event->header.type == PERF_RECORD_SAMPLE) {
+ if (evlist->id_pos >= n)
+ return -1;
+ *id = array[evlist->id_pos];
+ } else {
+ if (evlist->is_pos > n)
+ return -1;
+ n -= evlist->is_pos;
+ *id = array[n];
+ }
+ return 0;
+}
+
+static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
+ union perf_event *event)
+{
+ struct hlist_head *head;
+ struct perf_sample_id *sid;
+ int hash;
+ u64 id;
+
+ if (evlist->nr_entries == 1)
+ return perf_evlist__first(evlist);
+
+ if (perf_evlist__event2id(evlist, event, &id))
+ return NULL;
+
+ /* Synthesized events have an id of zero */
+ if (!id)
+ return perf_evlist__first(evlist);
+
+ hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
+ head = &evlist->heads[hash];
+
+ hlist_for_each_entry(sid, head, node) {
+ if (sid->id == id)
+ return sid->evsel;
+ }
+ return NULL;
+}
+
union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
{
struct perf_mmap *md = &evlist->mmap[idx];
@@ -403,16 +536,20 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
return event;
}
+static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
+{
+ if (evlist->mmap[idx].base != NULL) {
+ munmap(evlist->mmap[idx].base, evlist->mmap_len);
+ evlist->mmap[idx].base = NULL;
+ }
+}
+
void perf_evlist__munmap(struct perf_evlist *evlist)
{
int i;
- for (i = 0; i < evlist->nr_mmaps; i++) {
- if (evlist->mmap[i].base != NULL) {
- munmap(evlist->mmap[i].base, evlist->mmap_len);
- evlist->mmap[i].base = NULL;
- }
- }
+ for (i = 0; i < evlist->nr_mmaps; i++)
+ __perf_evlist__munmap(evlist, i);
free(evlist->mmap);
evlist->mmap = NULL;
@@ -421,7 +558,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
{
evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
- if (cpu_map__all(evlist->cpus))
+ if (cpu_map__empty(evlist->cpus))
evlist->nr_mmaps = thread_map__nr(evlist->threads);
evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
return evlist->mmap != NULL ? 0 : -ENOMEM;
@@ -450,6 +587,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
int nr_cpus = cpu_map__nr(evlist->cpus);
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per cpu\n");
for (cpu = 0; cpu < nr_cpus; cpu++) {
int output = -1;
@@ -477,12 +615,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
return 0;
out_unmap:
- for (cpu = 0; cpu < nr_cpus; cpu++) {
- if (evlist->mmap[cpu].base != NULL) {
- munmap(evlist->mmap[cpu].base, evlist->mmap_len);
- evlist->mmap[cpu].base = NULL;
- }
- }
+ for (cpu = 0; cpu < nr_cpus; cpu++)
+ __perf_evlist__munmap(evlist, cpu);
return -1;
}
@@ -492,6 +626,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
int thread;
int nr_threads = thread_map__nr(evlist->threads);
+ pr_debug2("perf event ring buffer mmapped per thread\n");
for (thread = 0; thread < nr_threads; thread++) {
int output = -1;
@@ -517,12 +652,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
return 0;
out_unmap:
- for (thread = 0; thread < nr_threads; thread++) {
- if (evlist->mmap[thread].base != NULL) {
- munmap(evlist->mmap[thread].base, evlist->mmap_len);
- evlist->mmap[thread].base = NULL;
- }
- }
+ for (thread = 0; thread < nr_threads; thread++)
+ __perf_evlist__munmap(evlist, thread);
return -1;
}
@@ -573,7 +704,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
return -ENOMEM;
}
- if (cpu_map__all(cpus))
+ if (cpu_map__empty(cpus))
return perf_evlist__mmap_per_thread(evlist, prot, mask);
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
@@ -650,20 +781,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
{
+ struct perf_evsel *pos;
+
+ if (evlist->nr_entries == 1)
+ return true;
+
+ if (evlist->id_pos < 0 || evlist->is_pos < 0)
+ return false;
+
+ list_for_each_entry(pos, &evlist->entries, node) {
+ if (pos->id_pos != evlist->id_pos ||
+ pos->is_pos != evlist->is_pos)
+ return false;
+ }
+
+ return true;
+}
+
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+
+ if (evlist->combined_sample_type)
+ return evlist->combined_sample_type;
+
+ list_for_each_entry(evsel, &evlist->entries, node)
+ evlist->combined_sample_type |= evsel->attr.sample_type;
+
+ return evlist->combined_sample_type;
+}
+
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
+{
+ evlist->combined_sample_type = 0;
+ return __perf_evlist__combined_sample_type(evlist);
+}
+
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
+{
struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
+ u64 read_format = first->attr.read_format;
+ u64 sample_type = first->attr.sample_type;
list_for_each_entry_continue(pos, &evlist->entries, node) {
- if (first->attr.sample_type != pos->attr.sample_type)
+ if (read_format != pos->attr.read_format)
return false;
}
+ /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ if ((sample_type & PERF_SAMPLE_READ) &&
+ !(read_format & PERF_FORMAT_ID)) {
+ return false;
+ }
+
return true;
}
-u64 perf_evlist__sample_type(struct perf_evlist *evlist)
+u64 perf_evlist__read_format(struct perf_evlist *evlist)
{
struct perf_evsel *first = perf_evlist__first(evlist);
- return first->attr.sample_type;
+ return first->attr.read_format;
}
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
@@ -692,6 +869,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu) * 2;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(data->id);
out:
return size;
}
@@ -783,13 +963,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
- * Do a dummy execvp to get the PLT entry resolved,
- * so we avoid the resolver overhead on the real
- * execvp call.
- */
- execvp("", (char **)argv);
-
- /*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
@@ -838,7 +1011,7 @@ out_close_ready_pipe:
int perf_evlist__start_workload(struct perf_evlist *evlist)
{
if (evlist->workload.cork_fd > 0) {
- char bf;
+ char bf = 0;
int ret;
/*
* Remove the cork, let it rip!
@@ -857,7 +1030,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
struct perf_sample *sample)
{
- struct perf_evsel *evsel = perf_evlist__first(evlist);
+ struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
+
+ if (!evsel)
+ return -EFAULT;
return perf_evsel__parse_sample(evsel, event, sample);
}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0583d36252b..880d7139d2f 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -32,6 +32,9 @@ struct perf_evlist {
int nr_fds;
int nr_mmaps;
int mmap_len;
+ int id_pos;
+ int is_pos;
+ u64 combined_sample_type;
struct {
int cork_fd;
pid_t pid;
@@ -71,6 +74,10 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter);
struct perf_evsel *
perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id);
+struct perf_evsel *
+perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
+ const char *name);
+
void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
int cpu, int thread, u64 id);
@@ -78,11 +85,15 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
+struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id);
+
union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx);
int perf_evlist__open(struct perf_evlist *evlist);
void perf_evlist__close(struct perf_evlist *evlist);
+void perf_evlist__set_id_pos(struct perf_evlist *evlist);
+bool perf_can_sample_identifier(void);
void perf_evlist__config(struct perf_evlist *evlist,
struct perf_record_opts *opts);
@@ -99,6 +110,11 @@ void perf_evlist__munmap(struct perf_evlist *evlist);
void perf_evlist__disable(struct perf_evlist *evlist);
void perf_evlist__enable(struct perf_evlist *evlist);
+int perf_evlist__disable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+int perf_evlist__enable_event(struct perf_evlist *evlist,
+ struct perf_evsel *evsel);
+
void perf_evlist__set_selected(struct perf_evlist *evlist,
struct perf_evsel *evsel);
@@ -118,7 +134,9 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist);
void __perf_evlist__set_leader(struct list_head *list);
void perf_evlist__set_leader(struct perf_evlist *evlist);
-u64 perf_evlist__sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__read_format(struct perf_evlist *evlist);
+u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist);
+u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist);
bool perf_evlist__sample_id_all(struct perf_evlist *evlist);
u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist);
@@ -127,6 +145,7 @@ int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *even
bool perf_evlist__valid_sample_type(struct perf_evlist *evlist);
bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
+bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
struct list_head *list,
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c9c7494506a..3612183e2cc 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -9,18 +9,20 @@
#include <byteswap.h>
#include <linux/bitops.h>
-#include "asm/bug.h"
#include <lk/debugfs.h>
-#include "event-parse.h"
+#include <traceevent/event-parse.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/perf_event.h>
+#include <sys/resource.h>
+#include "asm/bug.h"
#include "evsel.h"
#include "evlist.h"
#include "util.h"
#include "cpumap.h"
#include "thread_map.h"
#include "target.h"
-#include <linux/hw_breakpoint.h>
-#include <linux/perf_event.h>
#include "perf_regs.h"
+#include "debug.h"
static struct {
bool sample_id_all;
@@ -29,7 +31,7 @@ static struct {
#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
-static int __perf_evsel__sample_size(u64 sample_type)
+int __perf_evsel__sample_size(u64 sample_type)
{
u64 mask = sample_type & PERF_SAMPLE_MASK;
int size = 0;
@@ -45,6 +47,72 @@ static int __perf_evsel__sample_size(u64 sample_type)
return size;
}
+/**
+ * __perf_evsel__calc_id_pos - calculate id_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
+ * sample_event.
+ */
+static int __perf_evsel__calc_id_pos(u64 sample_type)
+{
+ int idx = 0;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 0;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_IP)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_ADDR)
+ idx += 1;
+
+ return idx;
+}
+
+/**
+ * __perf_evsel__calc_is_pos - calculate is_pos.
+ * @sample_type: sample type
+ *
+ * This function returns the position (counting backwards) of the event id
+ * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
+ * sample_id_all is used there is an id sample appended to non-sample events.
+ */
+static int __perf_evsel__calc_is_pos(u64 sample_type)
+{
+ int idx = 1;
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ return 1;
+
+ if (!(sample_type & PERF_SAMPLE_ID))
+ return -1;
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ idx += 1;
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ idx += 1;
+
+ return idx;
+}
+
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
+{
+ evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
+ evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
+}
+
void hists__init(struct hists *hists)
{
memset(hists, 0, sizeof(*hists));
@@ -61,6 +129,7 @@ void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
if (!(evsel->attr.sample_type & bit)) {
evsel->attr.sample_type |= bit;
evsel->sample_size += sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
}
}
@@ -70,12 +139,19 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
if (evsel->attr.sample_type & bit) {
evsel->attr.sample_type &= ~bit;
evsel->sample_size -= sizeof(u64);
+ perf_evsel__calc_id_pos(evsel);
}
}
-void perf_evsel__set_sample_id(struct perf_evsel *evsel)
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool can_sample_identifier)
{
- perf_evsel__set_sample_bit(evsel, ID);
+ if (can_sample_identifier) {
+ perf_evsel__reset_sample_bit(evsel, ID);
+ perf_evsel__set_sample_bit(evsel, IDENTIFIER);
+ } else {
+ perf_evsel__set_sample_bit(evsel, ID);
+ }
evsel->attr.read_format |= PERF_FORMAT_ID;
}
@@ -88,6 +164,7 @@ void perf_evsel__init(struct perf_evsel *evsel,
INIT_LIST_HEAD(&evsel->node);
hists__init(&evsel->hists);
evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
+ perf_evsel__calc_id_pos(evsel);
}
struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx)
@@ -246,6 +323,7 @@ const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
"major-faults",
"alignment-faults",
"emulation-faults",
+ "dummy",
};
static const char *__perf_evsel__sw_name(u64 config)
@@ -490,6 +568,7 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts)
{
+ struct perf_evsel *leader = evsel->leader;
struct perf_event_attr *attr = &evsel->attr;
int track = !evsel->idx; /* only the first counter needs these */
@@ -499,6 +578,25 @@ void perf_evsel__config(struct perf_evsel *evsel,
perf_evsel__set_sample_bit(evsel, IP);
perf_evsel__set_sample_bit(evsel, TID);
+ if (evsel->sample_read) {
+ perf_evsel__set_sample_bit(evsel, READ);
+
+ /*
+ * We need ID even in case of single event, because
+ * PERF_SAMPLE_READ process ID specific data.
+ */
+ perf_evsel__set_sample_id(evsel, false);
+
+ /*
+ * Apply group format only if we belong to group
+ * with more than one members.
+ */
+ if (leader->nr_members > 1) {
+ attr->read_format |= PERF_FORMAT_GROUP;
+ attr->inherit = 0;
+ }
+ }
+
/*
* We default some events to a 1 default interval. But keep
* it a weak assumption overridable by the user.
@@ -514,6 +612,15 @@ void perf_evsel__config(struct perf_evsel *evsel,
}
}
+ /*
+ * Disable sampling for all group members other
+ * than leader in case leader 'leads' the sampling.
+ */
+ if ((leader != evsel) && leader->sample_read) {
+ attr->sample_freq = 0;
+ attr->sample_period = 0;
+ }
+
if (opts->no_samples)
attr->sample_freq = 0;
@@ -605,15 +712,15 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
return evsel->fd != NULL ? 0 : -ENOMEM;
}
-int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
- const char *filter)
+static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
+ int ioc, void *arg)
{
int cpu, thread;
for (cpu = 0; cpu < ncpus; cpu++) {
for (thread = 0; thread < nthreads; thread++) {
int fd = FD(evsel, cpu, thread),
- err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
+ err = ioctl(fd, ioc, arg);
if (err)
return err;
@@ -623,6 +730,21 @@ int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
return 0;
}
+int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+ const char *filter)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_SET_FILTER,
+ (void *)filter);
+}
+
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
+{
+ return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
+ PERF_EVENT_IOC_ENABLE,
+ 0);
+}
+
int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
{
evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
@@ -817,12 +939,72 @@ static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
return fd;
}
+#define __PRINT_ATTR(fmt, cast, field) \
+ fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
+
+#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
+#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
+#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
+#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
+
+#define PRINT_ATTR2N(name1, field1, name2, field2) \
+ fprintf(fp, " %-19s %u %-19s %u\n", \
+ name1, attr->field1, name2, attr->field2)
+
+#define PRINT_ATTR2(field1, field2) \
+ PRINT_ATTR2N(#field1, field1, #field2, field2)
+
+static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
+{
+ size_t ret = 0;
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+ ret += fprintf(fp, "perf_event_attr:\n");
+
+ ret += PRINT_ATTR_U32(type);
+ ret += PRINT_ATTR_U32(size);
+ ret += PRINT_ATTR_X64(config);
+ ret += PRINT_ATTR_U64(sample_period);
+ ret += PRINT_ATTR_U64(sample_freq);
+ ret += PRINT_ATTR_X64(sample_type);
+ ret += PRINT_ATTR_X64(read_format);
+
+ ret += PRINT_ATTR2(disabled, inherit);
+ ret += PRINT_ATTR2(pinned, exclusive);
+ ret += PRINT_ATTR2(exclude_user, exclude_kernel);
+ ret += PRINT_ATTR2(exclude_hv, exclude_idle);
+ ret += PRINT_ATTR2(mmap, comm);
+ ret += PRINT_ATTR2(freq, inherit_stat);
+ ret += PRINT_ATTR2(enable_on_exec, task);
+ ret += PRINT_ATTR2(watermark, precise_ip);
+ ret += PRINT_ATTR2(mmap_data, sample_id_all);
+ ret += PRINT_ATTR2(exclude_host, exclude_guest);
+ ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
+ "excl.callchain_user", exclude_callchain_user);
+
+ ret += PRINT_ATTR_U32(wakeup_events);
+ ret += PRINT_ATTR_U32(wakeup_watermark);
+ ret += PRINT_ATTR_X32(bp_type);
+ ret += PRINT_ATTR_X64(bp_addr);
+ ret += PRINT_ATTR_X64(config1);
+ ret += PRINT_ATTR_U64(bp_len);
+ ret += PRINT_ATTR_X64(config2);
+ ret += PRINT_ATTR_X64(branch_sample_type);
+ ret += PRINT_ATTR_X64(sample_regs_user);
+ ret += PRINT_ATTR_U32(sample_stack_user);
+
+ ret += fprintf(fp, "%.60s\n", graph_dotted_line);
+
+ return ret;
+}
+
static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
struct thread_map *threads)
{
int cpu, thread;
unsigned long flags = 0;
int pid = -1, err;
+ enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
if (evsel->fd == NULL &&
perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
@@ -840,6 +1022,9 @@ retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->attr.sample_id_all = 0;
+ if (verbose >= 2)
+ perf_event_attr__fprintf(&evsel->attr, stderr);
+
for (cpu = 0; cpu < cpus->nr; cpu++) {
for (thread = 0; thread < threads->nr; thread++) {
@@ -849,6 +1034,9 @@ retry_sample_id:
pid = threads->map[thread];
group_fd = get_group_fd(evsel, cpu, thread);
+retry_open:
+ pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
+ pid, cpus->map[cpu], group_fd, flags);
FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
pid,
@@ -858,12 +1046,37 @@ retry_sample_id:
err = -errno;
goto try_fallback;
}
+ set_rlimit = NO_CHANGE;
}
}
return 0;
try_fallback:
+ /*
+ * perf stat needs between 5 and 22 fds per CPU. When we run out
+ * of them try to increase the limits.
+ */
+ if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
+ struct rlimit l;
+ int old_errno = errno;
+
+ if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+ if (set_rlimit == NO_CHANGE)
+ l.rlim_cur = l.rlim_max;
+ else {
+ l.rlim_cur = l.rlim_max + 1000;
+ l.rlim_max = l.rlim_cur;
+ }
+ if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
+ set_rlimit++;
+ errno = old_errno;
+ goto retry_open;
+ }
+ }
+ errno = old_errno;
+ }
+
if (err != -EINVAL || cpu > 0 || thread > 0)
goto out_close;
@@ -951,6 +1164,11 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
array += ((event->header.size -
sizeof(event->header)) / sizeof(u64)) - 1;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ sample->id = *array;
+ array--;
+ }
+
if (type & PERF_SAMPLE_CPU) {
u.val64 = *array;
if (swapped) {
@@ -994,24 +1212,30 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
return 0;
}
-static bool sample_overlap(const union perf_event *event,
- const void *offset, u64 size)
+static inline bool overflow(const void *endp, u16 max_size, const void *offset,
+ u64 size)
{
- const void *base = event;
+ return size > max_size || offset + size > endp;
+}
- if (offset + size > base + event->header.size)
- return true;
+#define OVERFLOW_CHECK(offset, size, max_size) \
+ do { \
+ if (overflow(endp, (max_size), (offset), (size))) \
+ return -EFAULT; \
+ } while (0)
- return false;
-}
+#define OVERFLOW_CHECK_u64(offset) \
+ OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
u64 type = evsel->attr.sample_type;
- u64 regs_user = evsel->attr.sample_regs_user;
bool swapped = evsel->needs_swap;
const u64 *array;
+ u16 max_size = event->header.size;
+ const void *endp = (void *)event + max_size;
+ u64 sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
@@ -1033,11 +1257,22 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array = event->sample.array;
+ /*
+ * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
+ * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
+ * check the format does not go past the end of the event.
+ */
if (evsel->sample_size + sizeof(event->header) > event->header.size)
return -EFAULT;
+ data->id = -1ULL;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ data->id = *array;
+ array++;
+ }
+
if (type & PERF_SAMPLE_IP) {
- data->ip = event->ip.ip;
+ data->ip = *array;
array++;
}
@@ -1066,7 +1301,6 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
array++;
}
- data->id = -1ULL;
if (type & PERF_SAMPLE_ID) {
data->id = *array;
array++;
@@ -1096,25 +1330,62 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
}
if (type & PERF_SAMPLE_READ) {
- fprintf(stderr, "PERF_SAMPLE_READ is unsupported for now\n");
- return -1;
+ u64 read_format = evsel->attr.read_format;
+
+ OVERFLOW_CHECK_u64(array);
+ if (read_format & PERF_FORMAT_GROUP)
+ data->read.group.nr = *array;
+ else
+ data->read.one.value = *array;
+
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_enabled = *array;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.time_running = *array;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ const u64 max_group_nr = UINT64_MAX /
+ sizeof(struct sample_read_value);
+
+ if (data->read.group.nr > max_group_nr)
+ return -EFAULT;
+ sz = data->read.group.nr *
+ sizeof(struct sample_read_value);
+ OVERFLOW_CHECK(array, sz, max_size);
+ data->read.group.values =
+ (struct sample_read_value *)array;
+ array = (void *)array + sz;
+ } else {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.id = *array;
+ array++;
+ }
}
if (type & PERF_SAMPLE_CALLCHAIN) {
- if (sample_overlap(event, array, sizeof(data->callchain->nr)))
- return -EFAULT;
-
- data->callchain = (struct ip_callchain *)array;
+ const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
- if (sample_overlap(event, array, data->callchain->nr))
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ if (data->callchain->nr > max_callchain_nr)
return -EFAULT;
-
- array += 1 + data->callchain->nr;
+ sz = data->callchain->nr * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
}
if (type & PERF_SAMPLE_RAW) {
- const u64 *pdata;
-
+ OVERFLOW_CHECK_u64(array);
u.val64 = *array;
if (WARN_ONCE(swapped,
"Endianness of raw data not corrected!\n")) {
@@ -1123,65 +1394,71 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
u.val32[0] = bswap_32(u.val32[0]);
u.val32[1] = bswap_32(u.val32[1]);
}
-
- if (sample_overlap(event, array, sizeof(u32)))
- return -EFAULT;
-
data->raw_size = u.val32[0];
- pdata = (void *) array + sizeof(u32);
+ array = (void *)array + sizeof(u32);
- if (sample_overlap(event, pdata, data->raw_size))
- return -EFAULT;
-
- data->raw_data = (void *) pdata;
-
- array = (void *)array + data->raw_size + sizeof(u32);
+ OVERFLOW_CHECK(array, data->raw_size, max_size);
+ data->raw_data = (void *)array;
+ array = (void *)array + data->raw_size;
}
if (type & PERF_SAMPLE_BRANCH_STACK) {
- u64 sz;
+ const u64 max_branch_nr = UINT64_MAX /
+ sizeof(struct branch_entry);
- data->branch_stack = (struct branch_stack *)array;
- array++; /* nr */
+ OVERFLOW_CHECK_u64(array);
+ data->branch_stack = (struct branch_stack *)array++;
+ if (data->branch_stack->nr > max_branch_nr)
+ return -EFAULT;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
- sz /= sizeof(u64);
- array += sz;
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
}
if (type & PERF_SAMPLE_REGS_USER) {
- /* First u64 tells us if we have any regs in sample. */
- u64 avail = *array++;
+ OVERFLOW_CHECK_u64(array);
+ data->user_regs.abi = *array;
+ array++;
+
+ if (data->user_regs.abi) {
+ u64 regs_user = evsel->attr.sample_regs_user;
- if (avail) {
+ sz = hweight_long(regs_user) * sizeof(u64);
+ OVERFLOW_CHECK(array, sz, max_size);
data->user_regs.regs = (u64 *)array;
- array += hweight_long(regs_user);
+ array = (void *)array + sz;
}
}
if (type & PERF_SAMPLE_STACK_USER) {
- u64 size = *array++;
+ OVERFLOW_CHECK_u64(array);
+ sz = *array++;
data->user_stack.offset = ((char *)(array - 1)
- (char *) event);
- if (!size) {
+ if (!sz) {
data->user_stack.size = 0;
} else {
+ OVERFLOW_CHECK(array, sz, max_size);
data->user_stack.data = (char *)array;
- array += size / sizeof(*array);
+ array = (void *)array + sz;
+ OVERFLOW_CHECK_u64(array);
data->user_stack.size = *array++;
}
}
data->weight = 0;
if (type & PERF_SAMPLE_WEIGHT) {
+ OVERFLOW_CHECK_u64(array);
data->weight = *array;
array++;
}
data->data_src = PERF_MEM_DATA_SRC_NONE;
if (type & PERF_SAMPLE_DATA_SRC) {
+ OVERFLOW_CHECK_u64(array);
data->data_src = *array;
array++;
}
@@ -1189,12 +1466,105 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
return 0;
}
+size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
+ u64 sample_regs_user, u64 read_format)
+{
+ size_t sz, result = sizeof(struct sample_event);
+
+ if (type & PERF_SAMPLE_IDENTIFIER)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_IP)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_TIME)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ADDR)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_STREAM_ID)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_CPU)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_PERIOD)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_READ) {
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ result += sizeof(u64);
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ result += sizeof(u64);
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ result += sizeof(u32);
+ result += sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ result += sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ result += sizeof(u64);
+ sz = hweight_long(sample_regs_user) * sizeof(u64);
+ result += sz;
+ } else {
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ result += sizeof(u64);
+ if (sz) {
+ result += sz;
+ result += sizeof(u64);
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT)
+ result += sizeof(u64);
+
+ if (type & PERF_SAMPLE_DATA_SRC)
+ result += sizeof(u64);
+
+ return result;
+}
+
int perf_event__synthesize_sample(union perf_event *event, u64 type,
+ u64 sample_regs_user, u64 read_format,
const struct perf_sample *sample,
bool swapped)
{
u64 *array;
-
+ size_t sz;
/*
* used for cross-endian analysis. See git commit 65014ab3
* for why this goofiness is needed.
@@ -1203,8 +1573,13 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
array = event->sample.array;
+ if (type & PERF_SAMPLE_IDENTIFIER) {
+ *array = sample->id;
+ array++;
+ }
+
if (type & PERF_SAMPLE_IP) {
- event->ip.ip = sample->ip;
+ *array = sample->ip;
array++;
}
@@ -1262,6 +1637,97 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type,
array++;
}
+ if (type & PERF_SAMPLE_READ) {
+ if (read_format & PERF_FORMAT_GROUP)
+ *array = sample->read.group.nr;
+ else
+ *array = sample->read.one.value;
+ array++;
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
+ *array = sample->read.time_enabled;
+ array++;
+ }
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
+ *array = sample->read.time_running;
+ array++;
+ }
+
+ /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
+ if (read_format & PERF_FORMAT_GROUP) {
+ sz = sample->read.group.nr *
+ sizeof(struct sample_read_value);
+ memcpy(array, sample->read.group.values, sz);
+ array = (void *)array + sz;
+ } else {
+ *array = sample->read.one.id;
+ array++;
+ }
+ }
+
+ if (type & PERF_SAMPLE_CALLCHAIN) {
+ sz = (sample->callchain->nr + 1) * sizeof(u64);
+ memcpy(array, sample->callchain, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_RAW) {
+ u.val32[0] = sample->raw_size;
+ if (WARN_ONCE(swapped,
+ "Endianness of raw data not corrected!\n")) {
+ /*
+ * Inverse of what is done in perf_evsel__parse_sample
+ */
+ u.val32[0] = bswap_32(u.val32[0]);
+ u.val32[1] = bswap_32(u.val32[1]);
+ u.val64 = bswap_64(u.val64);
+ }
+ *array = u.val64;
+ array = (void *)array + sizeof(u32);
+
+ memcpy(array, sample->raw_data, sample->raw_size);
+ array = (void *)array + sample->raw_size;
+ }
+
+ if (type & PERF_SAMPLE_BRANCH_STACK) {
+ sz = sample->branch_stack->nr * sizeof(struct branch_entry);
+ sz += sizeof(u64);
+ memcpy(array, sample->branch_stack, sz);
+ array = (void *)array + sz;
+ }
+
+ if (type & PERF_SAMPLE_REGS_USER) {
+ if (sample->user_regs.abi) {
+ *array++ = sample->user_regs.abi;
+ sz = hweight_long(sample_regs_user) * sizeof(u64);
+ memcpy(array, sample->user_regs.regs, sz);
+ array = (void *)array + sz;
+ } else {
+ *array++ = 0;
+ }
+ }
+
+ if (type & PERF_SAMPLE_STACK_USER) {
+ sz = sample->user_stack.size;
+ *array++ = sz;
+ if (sz) {
+ memcpy(array, sample->user_stack.data, sz);
+ array = (void *)array + sz;
+ *array++ = sz;
+ }
+ }
+
+ if (type & PERF_SAMPLE_WEIGHT) {
+ *array = sample->weight;
+ array++;
+ }
+
+ if (type & PERF_SAMPLE_DATA_SRC) {
+ *array = sample->data_src;
+ array++;
+ }
+
return 0;
}
@@ -1391,6 +1857,7 @@ static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
+ bit_name(IDENTIFIER),
{ .name = NULL, }
};
#undef bit_name
@@ -1482,7 +1949,7 @@ out:
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize)
{
- if ((err == ENOENT || err == ENXIO) &&
+ if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
evsel->attr.type == PERF_TYPE_HARDWARE &&
evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
/*
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 3f156ccc1ac..4a7bdc713ba 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -38,6 +38,9 @@ struct perf_sample_id {
struct hlist_node node;
u64 id;
struct perf_evsel *evsel;
+
+ /* Holds total ID period value for PERF_SAMPLE_READ processing. */
+ u64 period;
};
/** struct perf_evsel - event selector
@@ -45,6 +48,12 @@ struct perf_sample_id {
* @name - Can be set to retain the original event name passed by the user,
* so that when showing results in tools such as 'perf stat', we
* show the name used, not some alias.
+ * @id_pos: the position of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of
+ * struct sample_event
+ * @is_pos: the position (counting backwards) of the event id (PERF_SAMPLE_ID or
+ * PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if sample_id_all
+ * is used there is an id sample appended to non-sample events
*/
struct perf_evsel {
struct list_head node;
@@ -71,11 +80,14 @@ struct perf_evsel {
} handler;
struct cpu_map *cpus;
unsigned int sample_size;
+ int id_pos;
+ int is_pos;
bool supported;
bool needs_swap;
/* parse modifier helper */
int exclude_GH;
int nr_members;
+ int sample_read;
struct perf_evsel *leader;
char *group_name;
};
@@ -100,6 +112,9 @@ void perf_evsel__delete(struct perf_evsel *evsel);
void perf_evsel__config(struct perf_evsel *evsel,
struct perf_record_opts *opts);
+int __perf_evsel__sample_size(u64 sample_type);
+void perf_evsel__calc_id_pos(struct perf_evsel *evsel);
+
bool perf_evsel__is_cache_op_valid(u8 type, u8 op);
#define PERF_EVSEL__MAX_ALIASES 8
@@ -138,10 +153,12 @@ void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
#define perf_evsel__reset_sample_bit(evsel, bit) \
__perf_evsel__reset_sample_bit(evsel, PERF_SAMPLE_##bit)
-void perf_evsel__set_sample_id(struct perf_evsel *evsel);
+void perf_evsel__set_sample_id(struct perf_evsel *evsel,
+ bool use_sample_identifier);
int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
const char *filter);
+int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads);
int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
struct cpu_map *cpus);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index a4dafbee251..a33197a4fd2 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -25,41 +25,9 @@
static bool no_buildid_cache = false;
-static int trace_event_count;
-static struct perf_trace_event_type *trace_events;
-
static u32 header_argc;
static const char **header_argv;
-int perf_header__push_event(u64 id, const char *name)
-{
- struct perf_trace_event_type *nevents;
-
- if (strlen(name) > MAX_EVENT_NAME)
- pr_warning("Event %s will be truncated\n", name);
-
- nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events));
- if (nevents == NULL)
- return -ENOMEM;
- trace_events = nevents;
-
- memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type));
- trace_events[trace_event_count].event_id = id;
- strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1);
- trace_event_count++;
- return 0;
-}
-
-char *perf_header__find_event(u64 id)
-{
- int i;
- for (i = 0 ; i < trace_event_count; i++) {
- if (trace_events[i].event_id == id)
- return trace_events[i].name;
- }
- return NULL;
-}
-
/*
* magic2 = "PERFILE2"
* must be a numerical value to let the endianness
@@ -748,18 +716,19 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu)
char filename[MAXPATHLEN];
char *buf = NULL, *p;
size_t len = 0;
+ ssize_t sret;
u32 i = 0;
int ret = -1;
sprintf(filename, CORE_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
- return -1;
-
- if (getline(&buf, &len, fp) <= 0)
- goto done;
+ goto try_threads;
+ sret = getline(&buf, &len, fp);
fclose(fp);
+ if (sret <= 0)
+ goto try_threads;
p = strchr(buf, '\n');
if (p)
@@ -775,7 +744,9 @@ static int build_cpu_topo(struct cpu_topo *tp, int cpu)
buf = NULL;
len = 0;
}
+ ret = 0;
+try_threads:
sprintf(filename, THRD_SIB_FMT, cpu);
fp = fopen(filename, "r");
if (!fp)
@@ -2257,7 +2228,7 @@ static int perf_header__adds_write(struct perf_header *header,
sec_size = sizeof(*feat_sec) * nr_sections;
- sec_start = header->data_offset + header->data_size;
+ sec_start = header->feat_offset;
lseek(fd, sec_start + sec_size, SEEK_SET);
for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
@@ -2304,6 +2275,7 @@ int perf_session__write_header(struct perf_session *session,
struct perf_file_attr f_attr;
struct perf_header *header = &session->header;
struct perf_evsel *evsel;
+ u64 attr_offset;
int err;
lseek(fd, sizeof(f_header), SEEK_SET);
@@ -2317,7 +2289,7 @@ int perf_session__write_header(struct perf_session *session,
}
}
- header->attr_offset = lseek(fd, 0, SEEK_CUR);
+ attr_offset = lseek(fd, 0, SEEK_CUR);
list_for_each_entry(evsel, &evlist->entries, node) {
f_attr = (struct perf_file_attr){
@@ -2334,17 +2306,8 @@ int perf_session__write_header(struct perf_session *session,
}
}
- header->event_offset = lseek(fd, 0, SEEK_CUR);
- header->event_size = trace_event_count * sizeof(struct perf_trace_event_type);
- if (trace_events) {
- err = do_write(fd, trace_events, header->event_size);
- if (err < 0) {
- pr_debug("failed to write perf header events\n");
- return err;
- }
- }
-
header->data_offset = lseek(fd, 0, SEEK_CUR);
+ header->feat_offset = header->data_offset + header->data_size;
if (at_exit) {
err = perf_header__adds_write(header, evlist, fd);
@@ -2357,17 +2320,14 @@ int perf_session__write_header(struct perf_session *session,
.size = sizeof(f_header),
.attr_size = sizeof(f_attr),
.attrs = {
- .offset = header->attr_offset,
+ .offset = attr_offset,
.size = evlist->nr_entries * sizeof(f_attr),
},
.data = {
.offset = header->data_offset,
.size = header->data_size,
},
- .event_types = {
- .offset = header->event_offset,
- .size = header->event_size,
- },
+ /* event_types is ignored, store zeros */
};
memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
@@ -2417,7 +2377,7 @@ int perf_header__process_sections(struct perf_header *header, int fd,
sec_size = sizeof(*feat_sec) * nr_sections;
- lseek(fd, header->data_offset + header->data_size, SEEK_SET);
+ lseek(fd, header->feat_offset, SEEK_SET);
err = perf_header__getbuffer64(header, fd, feat_sec, sec_size);
if (err < 0)
@@ -2523,6 +2483,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
/* check for legacy format */
ret = memcmp(&magic, __perf_magic1, sizeof(magic));
if (ret == 0) {
+ ph->version = PERF_HEADER_VERSION_1;
pr_debug("legacy perf.data format\n");
if (is_pipe)
return try_all_pipe_abis(hdr_sz, ph);
@@ -2544,6 +2505,7 @@ static int check_magic_endian(u64 magic, uint64_t hdr_sz,
return -1;
ph->needs_swap = true;
+ ph->version = PERF_HEADER_VERSION_2;
return 0;
}
@@ -2614,10 +2576,9 @@ int perf_file_header__read(struct perf_file_header *header,
memcpy(&ph->adds_features, &header->adds_features,
sizeof(ph->adds_features));
- ph->event_offset = header->event_types.offset;
- ph->event_size = header->event_types.size;
ph->data_offset = header->data.offset;
ph->data_size = header->data.size;
+ ph->feat_offset = header->data.offset + header->data.size;
return 0;
}
@@ -2666,19 +2627,17 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
return 0;
}
-static int perf_header__read_pipe(struct perf_session *session, int fd)
+static int perf_header__read_pipe(struct perf_session *session)
{
struct perf_header *header = &session->header;
struct perf_pipe_file_header f_header;
- if (perf_file_header__read_pipe(&f_header, header, fd,
+ if (perf_file_header__read_pipe(&f_header, header, session->fd,
session->repipe) < 0) {
pr_debug("incompatible file format\n");
return -EINVAL;
}
- session->fd = fd;
-
return 0;
}
@@ -2772,20 +2731,21 @@ static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist,
return 0;
}
-int perf_session__read_header(struct perf_session *session, int fd)
+int perf_session__read_header(struct perf_session *session)
{
struct perf_header *header = &session->header;
struct perf_file_header f_header;
struct perf_file_attr f_attr;
u64 f_id;
int nr_attrs, nr_ids, i, j;
+ int fd = session->fd;
session->evlist = perf_evlist__new();
if (session->evlist == NULL)
return -ENOMEM;
if (session->fd_pipe)
- return perf_header__read_pipe(session, fd);
+ return perf_header__read_pipe(session);
if (perf_file_header__read(&f_header, header, fd) < 0)
return -EINVAL;
@@ -2839,22 +2799,9 @@ int perf_session__read_header(struct perf_session *session, int fd)
symbol_conf.nr_events = nr_attrs;
- if (f_header.event_types.size) {
- lseek(fd, f_header.event_types.offset, SEEK_SET);
- trace_events = malloc(f_header.event_types.size);
- if (trace_events == NULL)
- return -ENOMEM;
- if (perf_header__getbuffer64(header, fd, trace_events,
- f_header.event_types.size))
- goto out_errno;
- trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
- }
-
perf_header__process_sections(header, fd, &session->pevent,
perf_file_section__process);
- lseek(fd, header->data_offset, SEEK_SET);
-
if (perf_evlist__prepare_tracepoint_events(session->evlist,
session->pevent))
goto out_delete_evlist;
@@ -2922,7 +2869,8 @@ int perf_event__synthesize_attrs(struct perf_tool *tool,
return err;
}
-int perf_event__process_attr(union perf_event *event,
+int perf_event__process_attr(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
struct perf_evlist **pevlist)
{
u32 i, ids, n_ids;
@@ -2961,64 +2909,6 @@ int perf_event__process_attr(union perf_event *event,
return 0;
}
-int perf_event__synthesize_event_type(struct perf_tool *tool,
- u64 event_id, char *name,
- perf_event__handler_t process,
- struct machine *machine)
-{
- union perf_event ev;
- size_t size = 0;
- int err = 0;
-
- memset(&ev, 0, sizeof(ev));
-
- ev.event_type.event_type.event_id = event_id;
- memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
- strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
-
- ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
- size = strlen(ev.event_type.event_type.name);
- size = PERF_ALIGN(size, sizeof(u64));
- ev.event_type.header.size = sizeof(ev.event_type) -
- (sizeof(ev.event_type.event_type.name) - size);
-
- err = process(tool, &ev, NULL, machine);
-
- return err;
-}
-
-int perf_event__synthesize_event_types(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine)
-{
- struct perf_trace_event_type *type;
- int i, err = 0;
-
- for (i = 0; i < trace_event_count; i++) {
- type = &trace_events[i];
-
- err = perf_event__synthesize_event_type(tool, type->event_id,
- type->name, process,
- machine);
- if (err) {
- pr_debug("failed to create perf header event type\n");
- return err;
- }
- }
-
- return err;
-}
-
-int perf_event__process_event_type(struct perf_tool *tool __maybe_unused,
- union perf_event *event)
-{
- if (perf_header__push_event(event->event_type.event_type.event_id,
- event->event_type.event_type.name) < 0)
- return -ENOMEM;
-
- return 0;
-}
-
int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
struct perf_evlist *evlist,
perf_event__handler_t process)
@@ -3065,7 +2955,8 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd,
return aligned_size;
}
-int perf_event__process_tracing_data(union perf_event *event,
+int perf_event__process_tracing_data(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
struct perf_session *session)
{
ssize_t size_read, padding, size = event->tracing_data.size;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 16a3e83c584..307c9aed972 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -34,6 +34,11 @@ enum {
HEADER_FEAT_BITS = 256,
};
+enum perf_header_version {
+ PERF_HEADER_VERSION_1,
+ PERF_HEADER_VERSION_2,
+};
+
struct perf_file_section {
u64 offset;
u64 size;
@@ -45,6 +50,7 @@ struct perf_file_header {
u64 attr_size;
struct perf_file_section attrs;
struct perf_file_section data;
+ /* event_types is ignored */
struct perf_file_section event_types;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
};
@@ -84,28 +90,24 @@ struct perf_session_env {
};
struct perf_header {
- bool needs_swap;
- s64 attr_offset;
- u64 data_offset;
- u64 data_size;
- u64 event_offset;
- u64 event_size;
+ enum perf_header_version version;
+ bool needs_swap;
+ u64 data_offset;
+ u64 data_size;
+ u64 feat_offset;
DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS);
- struct perf_session_env env;
+ struct perf_session_env env;
};
struct perf_evlist;
struct perf_session;
-int perf_session__read_header(struct perf_session *session, int fd);
+int perf_session__read_header(struct perf_session *session);
int perf_session__write_header(struct perf_session *session,
struct perf_evlist *evlist,
int fd, bool at_exit);
int perf_header__write_pipe(int fd);
-int perf_header__push_event(u64 id, const char *name);
-char *perf_header__find_event(u64 id);
-
void perf_header__set_feat(struct perf_header *header, int feat);
void perf_header__clear_feat(struct perf_header *header, int feat);
bool perf_header__has_feat(const struct perf_header *header, int feat);
@@ -130,22 +132,14 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
int perf_event__synthesize_attrs(struct perf_tool *tool,
struct perf_session *session,
perf_event__handler_t process);
-int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist);
-
-int perf_event__synthesize_event_type(struct perf_tool *tool,
- u64 event_id, char *name,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__synthesize_event_types(struct perf_tool *tool,
- perf_event__handler_t process,
- struct machine *machine);
-int perf_event__process_event_type(struct perf_tool *tool,
- union perf_event *event);
+int perf_event__process_attr(struct perf_tool *tool, union perf_event *event,
+ struct perf_evlist **pevlist);
int perf_event__synthesize_tracing_data(struct perf_tool *tool,
int fd, struct perf_evlist *evlist,
perf_event__handler_t process);
-int perf_event__process_tracing_data(union perf_event *event,
+int perf_event__process_tracing_data(struct perf_tool *tool,
+ union perf_event *event,
struct perf_session *session);
int perf_event__synthesize_build_id(struct perf_tool *tool,
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b11a6cfdb41..46a0d35a05e 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -24,7 +24,8 @@ enum hist_filter {
struct callchain_param callchain_param = {
.mode = CHAIN_GRAPH_REL,
.min_percent = 0.5,
- .order = ORDER_CALLEE
+ .order = ORDER_CALLEE,
+ .key = CCKEY_FUNCTION
};
u16 hists__col_len(struct hists *hists, enum hist_column col)
@@ -912,6 +913,7 @@ static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
hists__inc_nr_entries(hists, he);
+ he->dummy = true;
}
out:
return he;
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 2d3790fd99b..1329b6b6ffe 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -141,10 +141,12 @@ struct perf_hpp {
};
struct perf_hpp_fmt {
- int (*header)(struct perf_hpp *hpp);
- int (*width)(struct perf_hpp *hpp);
- int (*color)(struct perf_hpp *hpp, struct hist_entry *he);
- int (*entry)(struct perf_hpp *hpp, struct hist_entry *he);
+ int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+ int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
+ int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
+ int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
+ struct hist_entry *he);
struct list_head list;
};
@@ -157,7 +159,7 @@ extern struct list_head perf_hpp__list;
extern struct perf_hpp_fmt perf_hpp__format[];
enum {
- PERF_HPP__BASELINE,
+ /* Matches perf_hpp__format array. */
PERF_HPP__OVERHEAD,
PERF_HPP__OVERHEAD_SYS,
PERF_HPP__OVERHEAD_US,
@@ -165,11 +167,6 @@ enum {
PERF_HPP__OVERHEAD_GUEST_US,
PERF_HPP__SAMPLES,
PERF_HPP__PERIOD,
- PERF_HPP__PERIOD_BASELINE,
- PERF_HPP__DELTA,
- PERF_HPP__RATIO,
- PERF_HPP__WEIGHTED_DIFF,
- PERF_HPP__FORMULA,
PERF_HPP__MAX_INDEX
};
@@ -177,8 +174,6 @@ enum {
void perf_hpp__init(void);
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
-int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he,
- bool color);
struct perf_evlist;
@@ -245,11 +240,4 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused,
#endif
unsigned int hists__sort_list_width(struct hists *self);
-
-double perf_diff__compute_delta(struct hist_entry *he, struct hist_entry *pair);
-double perf_diff__compute_ratio(struct hist_entry *he, struct hist_entry *pair);
-s64 perf_diff__compute_wdiff(struct hist_entry *he, struct hist_entry *pair);
-int perf_diff__formula(struct hist_entry *he, struct hist_entry *pair,
- char *buf, size_t size);
-double perf_diff__period_percent(struct hist_entry *he, u64 period);
#endif /* __PERF_HIST_H */
diff --git a/tools/perf/util/include/linux/string.h b/tools/perf/util/include/linux/string.h
index 6f19c548ecc..97a80073822 100644
--- a/tools/perf/util/include/linux/string.h
+++ b/tools/perf/util/include/linux/string.h
@@ -1,3 +1,4 @@
#include <string.h>
void *memdup(const void *src, size_t len);
+int str_append(char **s, int *len, const char *a);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b2ecad6ec46..1dca61f0512 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -25,12 +25,15 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
machine->kmaps.machine = machine;
machine->pid = pid;
+ machine->symbol_filter = NULL;
+
machine->root_dir = strdup(root_dir);
if (machine->root_dir == NULL)
return -ENOMEM;
if (pid != HOST_KERNEL_ID) {
- struct thread *thread = machine__findnew_thread(machine, pid);
+ struct thread *thread = machine__findnew_thread(machine, 0,
+ pid);
char comm[64];
if (thread == NULL)
@@ -95,6 +98,7 @@ void machines__init(struct machines *machines)
{
machine__init(&machines->host, "", HOST_KERNEL_ID);
machines->guests = RB_ROOT;
+ machines->symbol_filter = NULL;
}
void machines__exit(struct machines *machines)
@@ -118,6 +122,8 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
return NULL;
}
+ machine->symbol_filter = machines->symbol_filter;
+
while (*p != NULL) {
parent = *p;
pos = rb_entry(parent, struct machine, rb_node);
@@ -133,6 +139,21 @@ struct machine *machines__add(struct machines *machines, pid_t pid,
return machine;
}
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter)
+{
+ struct rb_node *nd;
+
+ machines->symbol_filter = symbol_filter;
+ machines->host.symbol_filter = symbol_filter;
+
+ for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+ struct machine *machine = rb_entry(nd, struct machine, rb_node);
+
+ machine->symbol_filter = symbol_filter;
+ }
+}
+
struct machine *machines__find(struct machines *machines, pid_t pid)
{
struct rb_node **p = &machines->guests.rb_node;
@@ -233,7 +254,8 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
return;
}
-static struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid,
+static struct thread *__machine__findnew_thread(struct machine *machine,
+ pid_t pid, pid_t tid,
bool create)
{
struct rb_node **p = &machine->threads.rb_node;
@@ -241,23 +263,28 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
struct thread *th;
/*
- * Font-end cache - PID lookups come in blocks,
+ * Front-end cache - TID lookups come in blocks,
* so most of the time we dont have to look up
* the full rbtree:
*/
- if (machine->last_match && machine->last_match->pid == pid)
+ if (machine->last_match && machine->last_match->tid == tid) {
+ if (pid && pid != machine->last_match->pid_)
+ machine->last_match->pid_ = pid;
return machine->last_match;
+ }
while (*p != NULL) {
parent = *p;
th = rb_entry(parent, struct thread, rb_node);
- if (th->pid == pid) {
+ if (th->tid == tid) {
machine->last_match = th;
+ if (pid && pid != th->pid_)
+ th->pid_ = pid;
return th;
}
- if (pid < th->pid)
+ if (tid < th->tid)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
@@ -266,7 +293,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
if (!create)
return NULL;
- th = thread__new(pid);
+ th = thread__new(pid, tid);
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &machine->threads);
@@ -276,19 +303,22 @@ static struct thread *__machine__findnew_thread(struct machine *machine, pid_t p
return th;
}
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid)
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid)
{
- return __machine__findnew_thread(machine, pid, true);
+ return __machine__findnew_thread(machine, pid, tid, true);
}
-struct thread *machine__find_thread(struct machine *machine, pid_t pid)
+struct thread *machine__find_thread(struct machine *machine, pid_t tid)
{
- return __machine__findnew_thread(machine, pid, false);
+ return __machine__findnew_thread(machine, 0, tid, false);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event)
{
- struct thread *thread = machine__findnew_thread(machine, event->comm.tid);
+ struct thread *thread = machine__findnew_thread(machine,
+ event->comm.pid,
+ event->comm.tid);
if (dump_trace)
perf_event__fprintf_comm(event, stdout);
@@ -628,10 +658,8 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
struct map *map = machine->vmlinux_maps[type];
int ret = dso__load_vmlinux_path(map->dso, map, filter);
- if (ret > 0) {
+ if (ret > 0)
dso__set_loaded(map->dso, type);
- map__reloc_vmlinux(map);
- }
return ret;
}
@@ -808,7 +836,10 @@ static int machine__create_modules(struct machine *machine)
free(line);
fclose(file);
- return machine__set_modules_path(machine);
+ if (machine__set_modules_path(machine) < 0) {
+ pr_debug("Problems setting modules path maps, continuing anyway...\n");
+ }
+ return 0;
out_delete_line:
free(line);
@@ -858,6 +889,18 @@ static void machine__set_kernel_mmap_len(struct machine *machine,
}
}
+static bool machine__uses_kcore(struct machine *machine)
+{
+ struct dso *dso;
+
+ list_for_each_entry(dso, &machine->kernel_dsos, node) {
+ if (dso__is_kcore(dso))
+ return true;
+ }
+
+ return false;
+}
+
static int machine__process_kernel_mmap_event(struct machine *machine,
union perf_event *event)
{
@@ -866,6 +909,10 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
enum dso_kernel_type kernel_type;
bool is_kernel_mmap;
+ /* If we have maps from kcore then we do not need or want any others */
+ if (machine__uses_kcore(machine))
+ return 0;
+
machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix));
if (machine__is_host(machine))
kernel_type = DSO_TYPE_KERNEL;
@@ -969,7 +1016,8 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
return 0;
}
- thread = machine__findnew_thread(machine, event->mmap.pid);
+ thread = machine__findnew_thread(machine, event->mmap.pid,
+ event->mmap.pid);
if (thread == NULL)
goto out_problem;
@@ -994,11 +1042,30 @@ out_problem:
return 0;
}
+static void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+ machine->last_match = NULL;
+ rb_erase(&th->rb_node, &machine->threads);
+ /*
+ * We may have references to this thread, for instance in some hist_entry
+ * instances, so just move them to a separate list.
+ */
+ list_add_tail(&th->node, &machine->dead_threads);
+}
+
int machine__process_fork_event(struct machine *machine, union perf_event *event)
{
- struct thread *thread = machine__findnew_thread(machine, event->fork.tid);
- struct thread *parent = machine__findnew_thread(machine, event->fork.ptid);
+ struct thread *thread = machine__find_thread(machine, event->fork.tid);
+ struct thread *parent = machine__findnew_thread(machine,
+ event->fork.ppid,
+ event->fork.ptid);
+ /* if a thread currently exists for the thread id remove it */
+ if (thread != NULL)
+ machine__remove_thread(machine, thread);
+
+ thread = machine__findnew_thread(machine, event->fork.pid,
+ event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
@@ -1011,18 +1078,8 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
return 0;
}
-static void machine__remove_thread(struct machine *machine, struct thread *th)
-{
- machine->last_match = NULL;
- rb_erase(&th->rb_node, &machine->threads);
- /*
- * We may have references to this thread, for instance in some hist_entry
- * instances, so just move them to a separate list.
- */
- list_add_tail(&th->node, &machine->dead_threads);
-}
-
-int machine__process_exit_event(struct machine *machine, union perf_event *event)
+int machine__process_exit_event(struct machine *machine __maybe_unused,
+ union perf_event *event)
{
struct thread *thread = machine__find_thread(machine, event->fork.tid);
@@ -1030,7 +1087,7 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
perf_event__fprintf_task(event, stdout);
if (thread != NULL)
- machine__remove_thread(machine, thread);
+ thread__exited(thread);
return 0;
}
@@ -1058,11 +1115,10 @@ int machine__process_event(struct machine *machine, union perf_event *event)
return ret;
}
-static bool symbol__match_parent_regex(struct symbol *sym)
+static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
- if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
+ if (sym->name && !regexec(regex, sym->name, 0, NULL, 0))
return 1;
-
return 0;
}
@@ -1094,7 +1150,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
- ip, &al, NULL);
+ ip, &al);
if (al.sym)
goto found;
}
@@ -1112,8 +1168,8 @@ static void ip__resolve_data(struct machine *machine, struct thread *thread,
memset(&al, 0, sizeof(al));
- thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr, &al,
- NULL);
+ thread__find_addr_location(thread, machine, m, MAP__VARIABLE, addr,
+ &al);
ams->addr = addr;
ams->al_addr = al.addr;
ams->sym = al.sym;
@@ -1159,8 +1215,8 @@ struct branch_info *machine__resolve_bstack(struct machine *machine,
static int machine__resolve_callchain_sample(struct machine *machine,
struct thread *thread,
struct ip_callchain *chain,
- struct symbol **parent)
-
+ struct symbol **parent,
+ struct addr_location *root_al)
{
u8 cpumode = PERF_RECORD_MISC_USER;
unsigned int i;
@@ -1208,11 +1264,18 @@ static int machine__resolve_callchain_sample(struct machine *machine,
al.filtered = false;
thread__find_addr_location(thread, machine, cpumode,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al);
if (al.sym != NULL) {
if (sort__has_parent && !*parent &&
- symbol__match_parent_regex(al.sym))
+ symbol__match_regex(al.sym, &parent_regex))
*parent = al.sym;
+ else if (have_ignore_callees && root_al &&
+ symbol__match_regex(al.sym, &ignore_callees_regex)) {
+ /* Treat this symbol as the root,
+ forgetting its callees. */
+ *root_al = al;
+ callchain_cursor_reset(&callchain_cursor);
+ }
if (!symbol_conf.use_callchain)
break;
}
@@ -1237,15 +1300,13 @@ int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
- struct symbol **parent)
-
+ struct symbol **parent,
+ struct addr_location *root_al)
{
int ret;
- callchain_cursor_reset(&callchain_cursor);
-
ret = machine__resolve_callchain_sample(machine, thread,
- sample->callchain, parent);
+ sample->callchain, parent, root_al);
if (ret)
return ret;
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 77940680f1f..0df925ba6a4 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -5,6 +5,7 @@
#include <linux/rbtree.h>
#include "map.h"
+struct addr_location;
struct branch_stack;
struct perf_evsel;
struct perf_sample;
@@ -28,6 +29,7 @@ struct machine {
struct list_head kernel_dsos;
struct map_groups kmaps;
struct map *vmlinux_maps[MAP__NR_TYPES];
+ symbol_filter_t symbol_filter;
};
static inline
@@ -36,7 +38,7 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type)
return machine->vmlinux_maps[type];
}
-struct thread *machine__find_thread(struct machine *machine, pid_t pid);
+struct thread *machine__find_thread(struct machine *machine, pid_t tid);
int machine__process_comm_event(struct machine *machine, union perf_event *event);
int machine__process_exit_event(struct machine *machine, union perf_event *event);
@@ -50,6 +52,7 @@ typedef void (*machine__process_t)(struct machine *machine, void *data);
struct machines {
struct machine host;
struct rb_root guests;
+ symbol_filter_t symbol_filter;
};
void machines__init(struct machines *machines);
@@ -67,6 +70,9 @@ struct machine *machines__findnew(struct machines *machines, pid_t pid);
void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size);
char *machine__mmap_name(struct machine *machine, char *bf, size_t size);
+void machines__set_symbol_filter(struct machines *machines,
+ symbol_filter_t symbol_filter);
+
int machine__init(struct machine *machine, const char *root_dir, pid_t pid);
void machine__exit(struct machine *machine);
void machine__delete_dead_threads(struct machine *machine);
@@ -83,7 +89,8 @@ int machine__resolve_callchain(struct machine *machine,
struct perf_evsel *evsel,
struct thread *thread,
struct perf_sample *sample,
- struct symbol **parent);
+ struct symbol **parent,
+ struct addr_location *root_al);
/*
* Default guest kernel is defined by parameter --guestkallsyms
@@ -99,7 +106,8 @@ static inline bool machine__is_host(struct machine *machine)
return machine ? machine->pid == HOST_KERNEL_ID : false;
}
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid);
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
+ pid_t tid);
size_t machine__fprintf(struct machine *machine, FILE *fp);
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 8bcdf9e5408..9e8304ca343 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -182,12 +182,6 @@ int map__load(struct map *map, symbol_filter_t filter)
#endif
return -1;
}
- /*
- * Only applies to the kernel, as its symtabs aren't relative like the
- * module ones.
- */
- if (map->dso->kernel)
- map__reloc_vmlinux(map);
return 0;
}
@@ -254,14 +248,18 @@ size_t map__fprintf_dsoname(struct map *map, FILE *fp)
/*
* objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN.
- * map->dso->adjust_symbols==1 for ET_EXEC-like cases.
+ * map->dso->adjust_symbols==1 for ET_EXEC-like cases except ET_REL which is
+ * relative to section start.
*/
u64 map__rip_2objdump(struct map *map, u64 rip)
{
- u64 addr = map->dso->adjust_symbols ?
- map->unmap_ip(map, rip) : /* RIP -> IP */
- rip;
- return addr;
+ if (!map->dso->adjust_symbols)
+ return rip;
+
+ if (map->dso->rel)
+ return rip - map->pgoff;
+
+ return map->unmap_ip(map, rip);
}
void map_groups__init(struct map_groups *mg)
@@ -513,35 +511,6 @@ int map_groups__clone(struct map_groups *mg,
return 0;
}
-static u64 map__reloc_map_ip(struct map *map, u64 ip)
-{
- return ip + (s64)map->pgoff;
-}
-
-static u64 map__reloc_unmap_ip(struct map *map, u64 ip)
-{
- return ip - (s64)map->pgoff;
-}
-
-void map__reloc_vmlinux(struct map *map)
-{
- struct kmap *kmap = map__kmap(map);
- s64 reloc;
-
- if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr)
- return;
-
- reloc = (kmap->ref_reloc_sym->unrelocated_addr -
- kmap->ref_reloc_sym->addr);
-
- if (!reloc)
- return;
-
- map->map_ip = map__reloc_map_ip;
- map->unmap_ip = map__reloc_unmap_ip;
- map->pgoff = reloc;
-}
-
void maps__insert(struct rb_root *maps, struct map *map)
{
struct rb_node **p = &maps->rb_node;
@@ -586,3 +555,21 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
return NULL;
}
+
+struct map *maps__first(struct rb_root *maps)
+{
+ struct rb_node *first = rb_first(maps);
+
+ if (first)
+ return rb_entry(first, struct map, rb_node);
+ return NULL;
+}
+
+struct map *maps__next(struct map *map)
+{
+ struct rb_node *next = rb_next(&map->rb_node);
+
+ if (next)
+ return rb_entry(next, struct map, rb_node);
+ return NULL;
+}
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index a887f2c9dfb..2cc93cbf0e1 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -112,6 +112,8 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg,
void maps__insert(struct rb_root *maps, struct map *map);
void maps__remove(struct rb_root *maps, struct map *map);
struct map *maps__find(struct rb_root *maps, u64 addr);
+struct map *maps__first(struct rb_root *maps);
+struct map *maps__next(struct map *map);
void map_groups__init(struct map_groups *mg);
void map_groups__exit(struct map_groups *mg);
int map_groups__clone(struct map_groups *mg,
@@ -139,6 +141,17 @@ static inline struct map *map_groups__find(struct map_groups *mg,
return maps__find(&mg->maps[type], addr);
}
+static inline struct map *map_groups__first(struct map_groups *mg,
+ enum map_type type)
+{
+ return maps__first(&mg->maps[type]);
+}
+
+static inline struct map *map_groups__next(struct map *map)
+{
+ return maps__next(map);
+}
+
struct symbol *map_groups__find_symbol(struct map_groups *mg,
enum map_type type, u64 addr,
struct map **mapp,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 995fc25db8c..98125319b15 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -6,7 +6,7 @@
#include "parse-options.h"
#include "parse-events.h"
#include "exec_cmd.h"
-#include "string.h"
+#include "linux/string.h"
#include "symbol.h"
#include "cache.h"
#include "header.h"
@@ -15,6 +15,7 @@
#define YY_EXTRA_TYPE int
#include "parse-events-flex.h"
#include "pmu.h"
+#include "thread_map.h"
#define MAX_NAME_LEN 100
@@ -108,6 +109,10 @@ static struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
.symbol = "emulation-faults",
.alias = "",
},
+ [PERF_COUNT_SW_DUMMY] = {
+ .symbol = "dummy",
+ .alias = "",
+ },
};
#define __PERF_EVENT_FIELD(config, name) \
@@ -217,6 +222,29 @@ struct tracepoint_path *tracepoint_id_to_path(u64 config)
return NULL;
}
+struct tracepoint_path *tracepoint_name_to_path(const char *name)
+{
+ struct tracepoint_path *path = zalloc(sizeof(*path));
+ char *str = strchr(name, ':');
+
+ if (path == NULL || str == NULL) {
+ free(path);
+ return NULL;
+ }
+
+ path->system = strndup(name, str - name);
+ path->name = strdup(str+1);
+
+ if (path->system == NULL || path->name == NULL) {
+ free(path->system);
+ free(path->name);
+ free(path);
+ path = NULL;
+ }
+
+ return path;
+}
+
const char *event_type(int type)
{
switch (type) {
@@ -241,40 +269,29 @@ const char *event_type(int type)
-static int __add_event(struct list_head **_list, int *idx,
+static int __add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
char *name, struct cpu_map *cpus)
{
struct perf_evsel *evsel;
- struct list_head *list = *_list;
-
- if (!list) {
- list = malloc(sizeof(*list));
- if (!list)
- return -ENOMEM;
- INIT_LIST_HEAD(list);
- }
event_attr_init(attr);
evsel = perf_evsel__new(attr, (*idx)++);
- if (!evsel) {
- free(list);
+ if (!evsel)
return -ENOMEM;
- }
evsel->cpus = cpus;
if (name)
evsel->name = strdup(name);
list_add_tail(&evsel->node, list);
- *_list = list;
return 0;
}
-static int add_event(struct list_head **_list, int *idx,
+static int add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr, char *name)
{
- return __add_event(_list, idx, attr, name, NULL);
+ return __add_event(list, idx, attr, name, NULL);
}
static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size)
@@ -295,7 +312,7 @@ static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES]
return -1;
}
-int parse_events_add_cache(struct list_head **list, int *idx,
+int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2)
{
struct perf_event_attr attr;
@@ -356,31 +373,21 @@ int parse_events_add_cache(struct list_head **list, int *idx,
return add_event(list, idx, &attr, name);
}
-static int add_tracepoint(struct list_head **listp, int *idx,
+static int add_tracepoint(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
struct perf_evsel *evsel;
- struct list_head *list = *listp;
-
- if (!list) {
- list = malloc(sizeof(*list));
- if (!list)
- return -ENOMEM;
- INIT_LIST_HEAD(list);
- }
evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++);
- if (!evsel) {
- free(list);
+ if (!evsel)
return -ENOMEM;
- }
list_add_tail(&evsel->node, list);
- *listp = list;
+
return 0;
}
-static int add_tracepoint_multi_event(struct list_head **list, int *idx,
+static int add_tracepoint_multi_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
char evt_path[MAXPATHLEN];
@@ -412,7 +419,7 @@ static int add_tracepoint_multi_event(struct list_head **list, int *idx,
return ret;
}
-static int add_tracepoint_event(struct list_head **list, int *idx,
+static int add_tracepoint_event(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
return strpbrk(evt_name, "*?") ?
@@ -420,7 +427,7 @@ static int add_tracepoint_event(struct list_head **list, int *idx,
add_tracepoint(list, idx, sys_name, evt_name);
}
-static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
+static int add_tracepoint_multi_sys(struct list_head *list, int *idx,
char *sys_name, char *evt_name)
{
struct dirent *events_ent;
@@ -452,7 +459,7 @@ static int add_tracepoint_multi_sys(struct list_head **list, int *idx,
return ret;
}
-int parse_events_add_tracepoint(struct list_head **list, int *idx,
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event)
{
int ret;
@@ -507,7 +514,7 @@ do { \
return 0;
}
-int parse_events_add_breakpoint(struct list_head **list, int *idx,
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type)
{
struct perf_event_attr attr;
@@ -588,7 +595,7 @@ static int config_attr(struct perf_event_attr *attr,
return 0;
}
-int parse_events_add_numeric(struct list_head **list, int *idx,
+int parse_events_add_numeric(struct list_head *list, int *idx,
u32 type, u64 config,
struct list_head *head_config)
{
@@ -621,7 +628,7 @@ static char *pmu_event_name(struct list_head *head_terms)
return NULL;
}
-int parse_events_add_pmu(struct list_head **list, int *idx,
+int parse_events_add_pmu(struct list_head *list, int *idx,
char *name, struct list_head *head_config)
{
struct perf_event_attr attr;
@@ -664,6 +671,7 @@ void parse_events__set_leader(char *name, struct list_head *list)
leader->group_name = name ? strdup(name) : NULL;
}
+/* list_event is assumed to point to malloc'ed memory */
void parse_events_update_lists(struct list_head *list_event,
struct list_head *list_all)
{
@@ -684,6 +692,8 @@ struct event_modifier {
int eG;
int precise;
int exclude_GH;
+ int sample_read;
+ int pinned;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -695,6 +705,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int eH = evsel ? evsel->attr.exclude_host : 0;
int eG = evsel ? evsel->attr.exclude_guest : 0;
int precise = evsel ? evsel->attr.precise_ip : 0;
+ int sample_read = 0;
+ int pinned = evsel ? evsel->attr.pinned : 0;
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
@@ -727,6 +739,10 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
/* use of precise requires exclude_guest */
if (!exclude_GH)
eG = 1;
+ } else if (*str == 'S') {
+ sample_read = 1;
+ } else if (*str == 'D') {
+ pinned = 1;
} else
break;
@@ -753,6 +769,9 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->eG = eG;
mod->precise = precise;
mod->exclude_GH = exclude_GH;
+ mod->sample_read = sample_read;
+ mod->pinned = pinned;
+
return 0;
}
@@ -765,7 +784,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHppp") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppSD") - 1))
return -1;
while (*p) {
@@ -803,6 +822,10 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->attr.exclude_host = mod.eH;
evsel->attr.exclude_guest = mod.eG;
evsel->exclude_GH = mod.exclude_GH;
+ evsel->sample_read = mod.sample_read;
+
+ if (perf_evsel__is_group_leader(evsel))
+ evsel->attr.pinned = mod.pinned;
}
return 0;
@@ -820,6 +843,32 @@ int parse_events_name(struct list_head *list, char *name)
return 0;
}
+static int parse_events__scanner(const char *str, void *data, int start_token);
+
+static int parse_events_fixup(int ret, const char *str, void *data,
+ int start_token)
+{
+ char *o = strdup(str);
+ char *s = NULL;
+ char *t = o;
+ char *p;
+ int len = 0;
+
+ if (!o)
+ return ret;
+ while ((p = strsep(&t, ",")) != NULL) {
+ if (s)
+ str_append(&s, &len, ",");
+ str_append(&s, &len, "cpu/");
+ str_append(&s, &len, p);
+ str_append(&s, &len, "/");
+ }
+ free(o);
+ if (!s)
+ return -ENOMEM;
+ return parse_events__scanner(s, data, start_token);
+}
+
static int parse_events__scanner(const char *str, void *data, int start_token)
{
YY_BUFFER_STATE buffer;
@@ -840,6 +889,8 @@ static int parse_events__scanner(const char *str, void *data, int start_token)
parse_events__flush_buffer(buffer, scanner);
parse_events__delete_buffer(buffer, scanner);
parse_events_lex_destroy(scanner);
+ if (ret && !strchr(str, '/'))
+ ret = parse_events_fixup(ret, str, data, start_token);
return ret;
}
@@ -1026,6 +1077,33 @@ int is_valid_tracepoint(const char *event_string)
return 0;
}
+static bool is_event_supported(u8 type, unsigned config)
+{
+ bool ret = true;
+ struct perf_evsel *evsel;
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .disabled = 1,
+ .exclude_kernel = 1,
+ };
+ struct {
+ struct thread_map map;
+ int threads[1];
+ } tmap = {
+ .map.nr = 1,
+ .threads = { 0 },
+ };
+
+ evsel = perf_evsel__new(&attr, 0);
+ if (evsel) {
+ ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ perf_evsel__delete(evsel);
+ }
+
+ return ret;
+}
+
static void __print_events_type(u8 type, struct event_symbol *syms,
unsigned max)
{
@@ -1033,14 +1111,16 @@ static void __print_events_type(u8 type, struct event_symbol *syms,
unsigned i;
for (i = 0; i < max ; i++, syms++) {
+ if (!is_event_supported(type, i))
+ continue;
+
if (strlen(syms->alias))
snprintf(name, sizeof(name), "%s OR %s",
syms->symbol, syms->alias);
else
snprintf(name, sizeof(name), "%s", syms->symbol);
- printf(" %-50s [%s]\n", name,
- event_type_descriptors[type]);
+ printf(" %-50s [%s]\n", name, event_type_descriptors[type]);
}
}
@@ -1069,6 +1149,10 @@ int print_hwcache_events(const char *event_glob, bool name_only)
if (event_glob != NULL && !strglobmatch(name, event_glob))
continue;
+ if (!is_event_supported(PERF_TYPE_HW_CACHE,
+ type | (op << 8) | (i << 16)))
+ continue;
+
if (name_only)
printf("%s ", name);
else
@@ -1079,6 +1163,8 @@ int print_hwcache_events(const char *event_glob, bool name_only)
}
}
+ if (printed)
+ printf("\n");
return printed;
}
@@ -1096,6 +1182,9 @@ static void print_symbol_events(const char *event_glob, unsigned type,
(syms->alias && strglobmatch(syms->alias, event_glob))))
continue;
+ if (!is_event_supported(type, i))
+ continue;
+
if (name_only) {
printf("%s ", syms->symbol);
continue;
@@ -1133,11 +1222,12 @@ void print_events(const char *event_glob, bool name_only)
print_hwcache_events(event_glob, name_only);
+ print_pmu_events(event_glob, name_only);
+
if (event_glob != NULL)
return;
if (!name_only) {
- printf("\n");
printf(" %-50s [%s]\n",
"rNNN",
event_type_descriptors[PERF_TYPE_RAW]);
@@ -1237,6 +1327,4 @@ void parse_events__free_terms(struct list_head *terms)
list_for_each_entry_safe(term, h, terms, list)
free(term);
-
- free(terms);
}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 8a4859315fd..f1cb4c4b3c7 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -23,6 +23,7 @@ struct tracepoint_path {
};
extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
+extern struct tracepoint_path *tracepoint_name_to_path(const char *name);
extern bool have_tracepoints(struct list_head *evlist);
const char *event_type(int type);
@@ -84,16 +85,16 @@ void parse_events__free_terms(struct list_head *terms);
int parse_events__modifier_event(struct list_head *list, char *str, bool add);
int parse_events__modifier_group(struct list_head *list, char *event_mod);
int parse_events_name(struct list_head *list, char *name);
-int parse_events_add_tracepoint(struct list_head **list, int *idx,
+int parse_events_add_tracepoint(struct list_head *list, int *idx,
char *sys, char *event);
-int parse_events_add_numeric(struct list_head **list, int *idx,
+int parse_events_add_numeric(struct list_head *list, int *idx,
u32 type, u64 config,
struct list_head *head_config);
-int parse_events_add_cache(struct list_head **list, int *idx,
+int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2);
-int parse_events_add_breakpoint(struct list_head **list, int *idx,
+int parse_events_add_breakpoint(struct list_head *list, int *idx,
void *ptr, char *type);
-int parse_events_add_pmu(struct list_head **list, int *idx,
+int parse_events_add_pmu(struct list_head *list, int *idx,
char *pmu , struct list_head *head_config);
void parse_events__set_leader(char *name, struct list_head *list);
void parse_events_update_lists(struct list_head *list_event,
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index e9d1134c2c6..91346b75396 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -82,7 +82,8 @@ num_hex 0x[a-fA-F0-9]+
num_raw_hex [a-fA-F0-9]+
name [a-zA-Z_*?][a-zA-Z0-9_*?]*
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?]*
-modifier_event [ukhpGH]+
+/* If you add a modifier you need to update check_modifier() */
+modifier_event [ukhpGHSD]+
modifier_bp [rwx]{1,3}
%%
@@ -144,6 +145,7 @@ context-switches|cs { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW
cpu-migrations|migrations { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_CPU_MIGRATIONS); }
alignment-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_ALIGNMENT_FAULTS); }
emulation-faults { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_EMULATION_FAULTS); }
+dummy { return sym(yyscanner, PERF_TYPE_SOFTWARE, PERF_COUNT_SW_DUMMY); }
L1-dcache|l1-d|l1d|L1-data |
L1-icache|l1-i|l1i|L1-instruction |
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index afc44c18dfe..4eb67ec333f 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -22,6 +22,13 @@ do { \
YYABORT; \
} while (0)
+#define ALLOC_LIST(list) \
+do { \
+ list = malloc(sizeof(*list)); \
+ ABORT_ON(!list); \
+ INIT_LIST_HEAD(list); \
+} while (0)
+
static inc_group_count(struct list_head *list,
struct parse_events_evlist *data)
{
@@ -196,9 +203,10 @@ event_pmu:
PE_NAME '/' event_config '/'
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_pmu(&list, &data->idx, $1, $3));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3));
parse_events__free_terms($3);
$$ = list;
}
@@ -212,11 +220,12 @@ event_legacy_symbol:
value_sym '/' event_config '/'
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, $3));
parse_events__free_terms($3);
$$ = list;
@@ -225,11 +234,12 @@ value_sym '/' event_config '/'
value_sym sep_slash_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
int type = $1 >> 16;
int config = $1 & 255;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
type, config, NULL));
$$ = list;
}
@@ -238,27 +248,30 @@ event_legacy_cache:
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, $5));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, $5));
$$ = list;
}
|
PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, $3, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, $3, NULL));
$$ = list;
}
|
PE_NAME_CACHE_TYPE
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_cache(&list, &data->idx, $1, NULL, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_cache(list, &data->idx, $1, NULL, NULL));
$$ = list;
}
@@ -266,9 +279,10 @@ event_legacy_mem:
PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, $4));
$$ = list;
}
@@ -276,9 +290,10 @@ PE_PREFIX_MEM PE_VALUE ':' PE_MODIFIER_BP sep_dc
PE_PREFIX_MEM PE_VALUE sep_dc
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_breakpoint(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_breakpoint(list, &data->idx,
(void *) $2, NULL));
$$ = list;
}
@@ -287,9 +302,10 @@ event_legacy_tracepoint:
PE_NAME ':' PE_NAME
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_tracepoint(&list, &data->idx, $1, $3));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3));
$$ = list;
}
@@ -297,9 +313,10 @@ event_legacy_numeric:
PE_VALUE ':' PE_VALUE
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL));
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL));
$$ = list;
}
@@ -307,9 +324,10 @@ event_legacy_raw:
PE_RAW
{
struct parse_events_evlist *data = _data;
- struct list_head *list = NULL;
+ struct list_head *list;
- ABORT_ON(parse_events_add_numeric(&list, &data->idx,
+ ALLOC_LIST(list);
+ ABORT_ON(parse_events_add_numeric(list, &data->idx,
PERF_TYPE_RAW, $1, NULL));
$$ = list;
}
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 4c6f9c490a8..bc9d8069d37 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -73,7 +73,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head)
* located at:
* /sys/bus/event_source/devices/<dev>/format as sysfs group attributes.
*/
-static int pmu_format(char *name, struct list_head *format)
+static int pmu_format(const char *name, struct list_head *format)
{
struct stat st;
char path[PATH_MAX];
@@ -162,7 +162,7 @@ static int pmu_aliases_parse(char *dir, struct list_head *head)
* Reading the pmu event aliases definition, which should be located at:
* /sys/bus/event_source/devices/<dev>/events as sysfs group attributes.
*/
-static int pmu_aliases(char *name, struct list_head *head)
+static int pmu_aliases(const char *name, struct list_head *head)
{
struct stat st;
char path[PATH_MAX];
@@ -208,7 +208,7 @@ static int pmu_alias_terms(struct perf_pmu_alias *alias,
* located at:
* /sys/bus/event_source/devices/<dev>/type as sysfs attribute.
*/
-static int pmu_type(char *name, __u32 *type)
+static int pmu_type(const char *name, __u32 *type)
{
struct stat st;
char path[PATH_MAX];
@@ -266,7 +266,7 @@ static void pmu_read_sysfs(void)
closedir(dir);
}
-static struct cpu_map *pmu_cpumask(char *name)
+static struct cpu_map *pmu_cpumask(const char *name)
{
struct stat st;
char path[PATH_MAX];
@@ -293,7 +293,7 @@ static struct cpu_map *pmu_cpumask(char *name)
return cpus;
}
-static struct perf_pmu *pmu_lookup(char *name)
+static struct perf_pmu *pmu_lookup(const char *name)
{
struct perf_pmu *pmu;
LIST_HEAD(format);
@@ -330,7 +330,7 @@ static struct perf_pmu *pmu_lookup(char *name)
return pmu;
}
-static struct perf_pmu *pmu_find(char *name)
+static struct perf_pmu *pmu_find(const char *name)
{
struct perf_pmu *pmu;
@@ -356,7 +356,7 @@ struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu)
return NULL;
}
-struct perf_pmu *perf_pmu__find(char *name)
+struct perf_pmu *perf_pmu__find(const char *name)
{
struct perf_pmu *pmu;
@@ -564,3 +564,76 @@ void perf_pmu__set_format(unsigned long *bits, long from, long to)
for (b = from; b <= to; b++)
set_bit(b, bits);
}
+
+static char *format_alias(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s/%s/", pmu->name, alias->name);
+ return buf;
+}
+
+static char *format_alias_or(char *buf, int len, struct perf_pmu *pmu,
+ struct perf_pmu_alias *alias)
+{
+ snprintf(buf, len, "%s OR %s/%s/", alias->name, pmu->name, alias->name);
+ return buf;
+}
+
+static int cmp_string(const void *a, const void *b)
+{
+ const char * const *as = a;
+ const char * const *bs = b;
+ return strcmp(*as, *bs);
+}
+
+void print_pmu_events(const char *event_glob, bool name_only)
+{
+ struct perf_pmu *pmu;
+ struct perf_pmu_alias *alias;
+ char buf[1024];
+ int printed = 0;
+ int len, j;
+ char **aliases;
+
+ pmu = NULL;
+ len = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list)
+ len++;
+ aliases = malloc(sizeof(char *) * len);
+ if (!aliases)
+ return;
+ pmu = NULL;
+ j = 0;
+ while ((pmu = perf_pmu__scan(pmu)) != NULL)
+ list_for_each_entry(alias, &pmu->aliases, list) {
+ char *name = format_alias(buf, sizeof(buf), pmu, alias);
+ bool is_cpu = !strcmp(pmu->name, "cpu");
+
+ if (event_glob != NULL &&
+ !(strglobmatch(name, event_glob) ||
+ (!is_cpu && strglobmatch(alias->name,
+ event_glob))))
+ continue;
+ aliases[j] = name;
+ if (is_cpu && !name_only)
+ aliases[j] = format_alias_or(buf, sizeof(buf),
+ pmu, alias);
+ aliases[j] = strdup(aliases[j]);
+ j++;
+ }
+ len = j;
+ qsort(aliases, len, sizeof(char *), cmp_string);
+ for (j = 0; j < len; j++) {
+ if (name_only) {
+ printf("%s ", aliases[j]);
+ continue;
+ }
+ printf(" %-50s [Kernel PMU event]\n", aliases[j]);
+ free(aliases[j]);
+ printed++;
+ }
+ if (printed)
+ printf("\n");
+ free(aliases);
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 32fe55b659f..6b2cbe2d4cc 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -3,6 +3,7 @@
#include <linux/bitops.h>
#include <linux/perf_event.h>
+#include <stdbool.h>
enum {
PERF_PMU_FORMAT_VALUE_CONFIG,
@@ -21,7 +22,7 @@ struct perf_pmu {
struct list_head list;
};
-struct perf_pmu *perf_pmu__find(char *name);
+struct perf_pmu *perf_pmu__find(const char *name);
int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
struct list_head *head_terms);
int perf_pmu__config_terms(struct list_head *formats,
@@ -40,5 +41,7 @@ int perf_pmu__format_parse(char *dir, struct list_head *head);
struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu);
+void print_pmu_events(const char *event_glob, bool name_only);
+
int perf_pmu__test(void);
#endif /* __PMU_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 925e0c3e6d9..71b5412bbbb 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -8,6 +8,26 @@
#include "cpumap.h"
#include "thread_map.h"
+/*
+ * Support debug printing even though util/debug.c is not linked. That means
+ * implementing 'verbose' and 'eprintf'.
+ */
+int verbose;
+
+int eprintf(int level, const char *fmt, ...)
+{
+ va_list args;
+ int ret = 0;
+
+ if (verbose >= level) {
+ va_start(args, fmt);
+ ret = vfprintf(stderr, fmt, args);
+ va_end(args);
+ }
+
+ return ret;
+}
+
/* Define PyVarObject_HEAD_INIT for python 2.5 */
#ifndef PyVarObject_HEAD_INIT
# define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size,
@@ -967,6 +987,7 @@ static struct {
{ "COUNT_SW_PAGE_FAULTS_MAJ", PERF_COUNT_SW_PAGE_FAULTS_MAJ },
{ "COUNT_SW_ALIGNMENT_FAULTS", PERF_COUNT_SW_ALIGNMENT_FAULTS },
{ "COUNT_SW_EMULATION_FAULTS", PERF_COUNT_SW_EMULATION_FAULTS },
+ { "COUNT_SW_DUMMY", PERF_COUNT_SW_DUMMY },
{ "SAMPLE_IP", PERF_SAMPLE_IP },
{ "SAMPLE_TID", PERF_SAMPLE_TID },
diff --git a/tools/perf/util/record.c b/tools/perf/util/record.c
new file mode 100644
index 00000000000..18d73aa2f0f
--- /dev/null
+++ b/tools/perf/util/record.c
@@ -0,0 +1,108 @@
+#include "evlist.h"
+#include "evsel.h"
+#include "cpumap.h"
+#include "parse-events.h"
+
+typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
+
+static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
+{
+ struct perf_evlist *evlist;
+ struct perf_evsel *evsel;
+ int err = -EAGAIN, fd;
+
+ evlist = perf_evlist__new();
+ if (!evlist)
+ return -ENOMEM;
+
+ if (parse_events(evlist, str))
+ goto out_delete;
+
+ evsel = perf_evlist__first(evlist);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0)
+ goto out_delete;
+ close(fd);
+
+ fn(evsel);
+
+ fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+ if (fd < 0) {
+ if (errno == EINVAL)
+ err = -EINVAL;
+ goto out_delete;
+ }
+ close(fd);
+ err = 0;
+
+out_delete:
+ perf_evlist__delete(evlist);
+ return err;
+}
+
+static bool perf_probe_api(setup_probe_fn_t fn)
+{
+ const char *try[] = {"cycles:u", "instructions:u", "cpu-clock", NULL};
+ struct cpu_map *cpus;
+ int cpu, ret, i = 0;
+
+ cpus = cpu_map__new(NULL);
+ if (!cpus)
+ return false;
+ cpu = cpus->map[0];
+ cpu_map__delete(cpus);
+
+ do {
+ ret = perf_do_probe_api(fn, cpu, try[i++]);
+ if (!ret)
+ return true;
+ } while (ret == -EAGAIN && try[i]);
+
+ return false;
+}
+
+static void perf_probe_sample_identifier(struct perf_evsel *evsel)
+{
+ evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
+}
+
+bool perf_can_sample_identifier(void)
+{
+ return perf_probe_api(perf_probe_sample_identifier);
+}
+
+void perf_evlist__config(struct perf_evlist *evlist,
+ struct perf_record_opts *opts)
+{
+ struct perf_evsel *evsel;
+ bool use_sample_identifier = false;
+
+ /*
+ * Set the evsel leader links before we configure attributes,
+ * since some might depend on this info.
+ */
+ if (opts->group)
+ perf_evlist__set_leader(evlist);
+
+ if (evlist->cpus->map[0] < 0)
+ opts->no_inherit = true;
+
+ list_for_each_entry(evsel, &evlist->entries, node)
+ perf_evsel__config(evsel, opts);
+
+ if (evlist->nr_entries > 1) {
+ struct perf_evsel *first = perf_evlist__first(evlist);
+
+ list_for_each_entry(evsel, &evlist->entries, node) {
+ if (evsel->attr.sample_type == first->attr.sample_type)
+ continue;
+ use_sample_identifier = perf_can_sample_identifier();
+ break;
+ }
+ list_for_each_entry(evsel, &evlist->entries, node)
+ perf_evsel__set_sample_id(evsel, use_sample_identifier);
+ }
+
+ perf_evlist__set_id_pos(evlist);
+}
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index eacec859f29..a85e4ae5f3a 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -261,7 +261,8 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
- struct addr_location *al)
+ struct thread *thread,
+ struct addr_location *al)
{
struct format_field *field;
static char handler[256];
@@ -272,7 +273,6 @@ static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused,
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
- struct thread *thread = al->thread;
char *comm = thread->comm;
dSP;
@@ -351,7 +351,8 @@ static void perl_process_event_generic(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct thread *thread __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
dSP;
@@ -377,10 +378,11 @@ static void perl_process_event(union perf_event *event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct addr_location *al)
+ struct thread *thread,
+ struct addr_location *al)
{
- perl_process_tracepoint(event, sample, evsel, machine, al);
- perl_process_event_generic(event, sample, evsel, machine, al);
+ perl_process_tracepoint(event, sample, evsel, machine, thread, al);
+ perl_process_event_generic(event, sample, evsel, machine, thread, al);
}
static void run_start_sub(void)
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index e87aa5d9696..cc75a3cef38 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -225,6 +225,7 @@ static void python_process_tracepoint(union perf_event *perf_event
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
+ struct thread *thread,
struct addr_location *al)
{
PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
@@ -238,7 +239,6 @@ static void python_process_tracepoint(union perf_event *perf_event
int cpu = sample->cpu;
void *data = sample->raw_data;
unsigned long long nsecs = sample->time;
- struct thread *thread = al->thread;
char *comm = thread->comm;
t = PyTuple_New(MAX_FIELDS);
@@ -345,12 +345,12 @@ static void python_process_general_event(union perf_event *perf_event
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine __maybe_unused,
+ struct thread *thread,
struct addr_location *al)
{
PyObject *handler, *retval, *t, *dict;
static char handler_name[64];
unsigned n = 0;
- struct thread *thread = al->thread;
/*
* Use the MAX_FIELDS to make the function expandable, though
@@ -404,17 +404,18 @@ static void python_process_event(union perf_event *perf_event,
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
+ struct thread *thread,
struct addr_location *al)
{
switch (evsel->attr.type) {
case PERF_TYPE_TRACEPOINT:
python_process_tracepoint(perf_event, sample, evsel,
- machine, al);
+ machine, thread, al);
break;
/* Reserve for future process_hw/sw/raw APIs */
default:
python_process_general_event(perf_event, sample, evsel,
- machine, al);
+ machine, thread, al);
}
}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index cf1fe01b7e8..1fc0c628683 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1,4 +1,5 @@
#include <linux/kernel.h>
+#include <traceevent/event-parse.h>
#include <byteswap.h>
#include <unistd.h>
@@ -12,7 +13,6 @@
#include "sort.h"
#include "util.h"
#include "cpumap.h"
-#include "event-parse.h"
#include "perf_regs.h"
#include "vdso.h"
@@ -24,7 +24,7 @@ static int perf_session__open(struct perf_session *self, bool force)
self->fd_pipe = true;
self->fd = STDIN_FILENO;
- if (perf_session__read_header(self, self->fd) < 0)
+ if (perf_session__read_header(self) < 0)
pr_err("incompatible file format (rerun with -v to learn more)");
return 0;
@@ -56,7 +56,7 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
- if (perf_session__read_header(self, self->fd) < 0) {
+ if (perf_session__read_header(self) < 0) {
pr_err("incompatible file format (rerun with -v to learn more)");
goto out_close;
}
@@ -71,6 +71,11 @@ static int perf_session__open(struct perf_session *self, bool force)
goto out_close;
}
+ if (!perf_evlist__valid_read_format(self->evlist)) {
+ pr_err("non matching read_format");
+ goto out_close;
+ }
+
self->size = input_stat.st_size;
return 0;
@@ -193,7 +198,9 @@ void perf_session__delete(struct perf_session *self)
vdso__exit();
}
-static int process_event_synth_tracing_data_stub(union perf_event *event
+static int process_event_synth_tracing_data_stub(struct perf_tool *tool
+ __maybe_unused,
+ union perf_event *event
__maybe_unused,
struct perf_session *session
__maybe_unused)
@@ -202,7 +209,8 @@ static int process_event_synth_tracing_data_stub(union perf_event *event
return 0;
}
-static int process_event_synth_attr_stub(union perf_event *event __maybe_unused,
+static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
struct perf_evlist **pevlist
__maybe_unused)
{
@@ -238,18 +246,11 @@ static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_event_type_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct perf_session *session);
-static void perf_tool__fill_defaults(struct perf_tool *tool)
+void perf_tool__fill_defaults(struct perf_tool *tool)
{
if (tool->sample == NULL)
tool->sample = process_event_sample_stub;
@@ -271,8 +272,6 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
tool->unthrottle = process_event_stub;
if (tool->attr == NULL)
tool->attr = process_event_synth_attr_stub;
- if (tool->event_type == NULL)
- tool->event_type = process_event_type_stub;
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
@@ -496,7 +495,7 @@ static int perf_session_deliver_event(struct perf_session *session,
u64 file_offset);
static int flush_sample_queue(struct perf_session *s,
- struct perf_tool *tool)
+ struct perf_tool *tool)
{
struct ordered_samples *os = &s->ordered_samples;
struct list_head *head = &os->samples;
@@ -644,7 +643,7 @@ static void __queue_event(struct sample_queue *new, struct perf_session *s)
#define MAX_SAMPLE_BUFFER (64 * 1024 / sizeof(struct sample_queue))
-static int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
struct perf_sample *sample, u64 file_offset)
{
struct ordered_samples *os = &s->ordered_samples;
@@ -740,7 +739,7 @@ static void perf_session__print_tstamp(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample)
{
- u64 sample_type = perf_evlist__sample_type(session->evlist);
+ u64 sample_type = __perf_evlist__combined_sample_type(session->evlist);
if (event->header.type != PERF_RECORD_SAMPLE &&
!perf_evlist__sample_id_all(session->evlist)) {
@@ -755,6 +754,36 @@ static void perf_session__print_tstamp(struct perf_session *session,
printf("%" PRIu64 " ", sample->time);
}
+static void sample_read__printf(struct perf_sample *sample, u64 read_format)
+{
+ printf("... sample_read:\n");
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ printf("...... time enabled %016" PRIx64 "\n",
+ sample->read.time_enabled);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ printf("...... time running %016" PRIx64 "\n",
+ sample->read.time_running);
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ u64 i;
+
+ printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
+
+ for (i = 0; i < sample->read.group.nr; i++) {
+ struct sample_read_value *value;
+
+ value = &sample->read.group.values[i];
+ printf("..... id %016" PRIx64
+ ", value %016" PRIx64 "\n",
+ value->id, value->value);
+ }
+ } else
+ printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
+ sample->read.one.id, sample->read.one.value);
+}
+
static void dump_event(struct perf_session *session, union perf_event *event,
u64 file_offset, struct perf_sample *sample)
{
@@ -804,11 +833,15 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_DATA_SRC)
printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
+
+ if (sample_type & PERF_SAMPLE_READ)
+ sample_read__printf(sample, evsel->attr.read_format);
}
static struct machine *
perf_session__find_machine_for_cpumode(struct perf_session *session,
- union perf_event *event)
+ union perf_event *event,
+ struct perf_sample *sample)
{
const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
@@ -820,7 +853,7 @@ static struct machine *
if (event->header.type == PERF_RECORD_MMAP)
pid = event->mmap.pid;
else
- pid = event->ip.pid;
+ pid = sample->pid;
return perf_session__findnew_machine(session, pid);
}
@@ -828,6 +861,75 @@ static struct machine *
return &session->machines.host;
}
+static int deliver_sample_value(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct sample_read_value *v,
+ struct machine *machine)
+{
+ struct perf_sample_id *sid;
+
+ sid = perf_evlist__id2sid(session->evlist, v->id);
+ if (sid) {
+ sample->id = v->id;
+ sample->period = v->value - sid->period;
+ sid->period = v->value;
+ }
+
+ if (!sid || sid->evsel == NULL) {
+ ++session->stats.nr_unknown_id;
+ return 0;
+ }
+
+ return tool->sample(tool, event, sample, sid->evsel, machine);
+}
+
+static int deliver_sample_group(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct machine *machine)
+{
+ int ret = -EINVAL;
+ u64 i;
+
+ for (i = 0; i < sample->read.group.nr; i++) {
+ ret = deliver_sample_value(session, tool, event, sample,
+ &sample->read.group.values[i],
+ machine);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+static int
+perf_session__deliver_sample(struct perf_session *session,
+ struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ /* We know evsel != NULL. */
+ u64 sample_type = evsel->attr.sample_type;
+ u64 read_format = evsel->attr.read_format;
+
+ /* Standard sample delievery. */
+ if (!(sample_type & PERF_SAMPLE_READ))
+ return tool->sample(tool, event, sample, evsel, machine);
+
+ /* For PERF_SAMPLE_READ we have either single or group mode. */
+ if (read_format & PERF_FORMAT_GROUP)
+ return deliver_sample_group(session, tool, event, sample,
+ machine);
+ else
+ return deliver_sample_value(session, tool, event, sample,
+ &sample->read.one, machine);
+}
+
static int perf_session_deliver_event(struct perf_session *session,
union perf_event *event,
struct perf_sample *sample,
@@ -857,7 +959,8 @@ static int perf_session_deliver_event(struct perf_session *session,
hists__inc_nr_events(&evsel->hists, event->header.type);
}
- machine = perf_session__find_machine_for_cpumode(session, event);
+ machine = perf_session__find_machine_for_cpumode(session, event,
+ sample);
switch (event->header.type) {
case PERF_RECORD_SAMPLE:
@@ -870,7 +973,8 @@ static int perf_session_deliver_event(struct perf_session *session,
++session->stats.nr_unprocessable_samples;
return 0;
}
- return tool->sample(tool, event, sample, evsel, machine);
+ return perf_session__deliver_sample(session, tool, event,
+ sample, evsel, machine);
case PERF_RECORD_MMAP:
return tool->mmap(tool, event, sample, machine);
case PERF_RECORD_COMM:
@@ -895,22 +999,6 @@ static int perf_session_deliver_event(struct perf_session *session,
}
}
-static int perf_session__preprocess_sample(struct perf_session *session,
- union perf_event *event, struct perf_sample *sample)
-{
- if (event->header.type != PERF_RECORD_SAMPLE ||
- !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN))
- return 0;
-
- if (!ip_callchain__valid(sample->callchain, event)) {
- pr_debug("call-chain problem with event, skipping it.\n");
- ++session->stats.nr_invalid_chains;
- session->stats.total_invalid_chains += sample->period;
- return -EINVAL;
- }
- return 0;
-}
-
static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
struct perf_tool *tool, u64 file_offset)
{
@@ -921,16 +1009,14 @@ static int perf_session__process_user_event(struct perf_session *session, union
/* These events are processed right away */
switch (event->header.type) {
case PERF_RECORD_HEADER_ATTR:
- err = tool->attr(event, &session->evlist);
+ err = tool->attr(tool, event, &session->evlist);
if (err == 0)
perf_session__set_id_hdr_size(session);
return err;
- case PERF_RECORD_HEADER_EVENT_TYPE:
- return tool->event_type(tool, event);
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(session->fd, file_offset, SEEK_SET);
- return tool->tracing_data(event, session);
+ return tool->tracing_data(tool, event, session);
case PERF_RECORD_HEADER_BUILD_ID:
return tool->build_id(tool, event, session);
case PERF_RECORD_FINISHED_ROUND:
@@ -975,10 +1061,6 @@ static int perf_session__process_event(struct perf_session *session,
if (ret)
return ret;
- /* Preprocess sample records - precheck callchains */
- if (perf_session__preprocess_sample(session, event, &sample))
- return 0;
-
if (tool->ordered_samples) {
ret = perf_session_queue_event(session, event, &sample,
file_offset);
@@ -999,7 +1081,7 @@ void perf_event_header__bswap(struct perf_event_header *self)
struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
{
- return machine__findnew_thread(&session->machines.host, pid);
+ return machine__findnew_thread(&session->machines.host, 0, pid);
}
static struct thread *perf_session__register_idle_thread(struct perf_session *self)
@@ -1091,8 +1173,10 @@ more:
perf_event_header__bswap(&event->header);
size = event->header.size;
- if (size == 0)
- size = 8;
+ if (size < sizeof(struct perf_event_header)) {
+ pr_err("bad event header size\n");
+ goto out_err;
+ }
if (size > cur_size) {
void *new = realloc(buf, size);
@@ -1161,8 +1245,12 @@ fetch_mmaped_event(struct perf_session *session,
if (session->header.needs_swap)
perf_event_header__bswap(&event->header);
- if (head + event->header.size > mmap_size)
+ if (head + event->header.size > mmap_size) {
+ /* We're not fetching the event so swap back again */
+ if (session->header.needs_swap)
+ perf_event_header__bswap(&event->header);
return NULL;
+ }
return event;
}
@@ -1242,7 +1330,7 @@ more:
size = event->header.size;
- if (size == 0 ||
+ if (size < sizeof(struct perf_event_header) ||
perf_session__process_event(session, event, tool, file_pos) < 0) {
pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
file_offset + head, event->header.size,
@@ -1295,12 +1383,15 @@ int perf_session__process_events(struct perf_session *self,
bool perf_session__has_traces(struct perf_session *session, const char *msg)
{
- if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) {
- pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
- return false;
+ struct perf_evsel *evsel;
+
+ list_for_each_entry(evsel, &session->evlist->entries, node) {
+ if (evsel->attr.type == PERF_TYPE_TRACEPOINT)
+ return true;
}
- return true;
+ pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
+ return false;
}
int maps__set_kallsyms_ref_reloc_sym(struct map **maps,
@@ -1383,13 +1474,18 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
- int print_sym, int print_dso, int print_symoffset)
+ unsigned int print_opts, unsigned int stack_depth)
{
struct addr_location al;
struct callchain_cursor_node *node;
-
- if (perf_event__preprocess_sample(event, machine, &al, sample,
- NULL) < 0) {
+ int print_ip = print_opts & PRINT_IP_OPT_IP;
+ int print_sym = print_opts & PRINT_IP_OPT_SYM;
+ int print_dso = print_opts & PRINT_IP_OPT_DSO;
+ int print_symoffset = print_opts & PRINT_IP_OPT_SYMOFFSET;
+ int print_oneline = print_opts & PRINT_IP_OPT_ONELINE;
+ char s = print_oneline ? ' ' : '\t';
+
+ if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
error("problem processing %d event, skipping it.\n",
event->header.type);
return;
@@ -1397,37 +1493,50 @@ void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
if (symbol_conf.use_callchain && sample->callchain) {
-
if (machine__resolve_callchain(machine, evsel, al.thread,
- sample, NULL) != 0) {
+ sample, NULL, NULL) != 0) {
if (verbose)
error("Failed to resolve callchain. Skipping\n");
return;
}
callchain_cursor_commit(&callchain_cursor);
- while (1) {
+ while (stack_depth) {
node = callchain_cursor_current(&callchain_cursor);
if (!node)
break;
- printf("\t%16" PRIx64, node->ip);
+ if (print_ip)
+ printf("%c%16" PRIx64, s, node->ip);
+
if (print_sym) {
printf(" ");
- symbol__fprintf_symname(node->sym, stdout);
+ if (print_symoffset) {
+ al.addr = node->ip;
+ al.map = node->map;
+ symbol__fprintf_symname_offs(node->sym, &al, stdout);
+ } else
+ symbol__fprintf_symname(node->sym, stdout);
}
+
if (print_dso) {
printf(" (");
map__fprintf_dsoname(node->map, stdout);
printf(")");
}
- printf("\n");
+
+ if (!print_oneline)
+ printf("\n");
callchain_cursor_advance(&callchain_cursor);
+
+ stack_depth--;
}
} else {
- printf("%16" PRIx64, sample->ip);
+ if (print_ip)
+ printf("%16" PRIx64, sample->ip);
+
if (print_sym) {
printf(" ");
if (print_symoffset)
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index f3b235ec7bf..3aa75fb2225 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -37,11 +37,16 @@ struct perf_session {
int fd;
bool fd_pipe;
bool repipe;
- char *cwd;
struct ordered_samples ordered_samples;
char filename[1];
};
+#define PRINT_IP_OPT_IP (1<<0)
+#define PRINT_IP_OPT_SYM (1<<1)
+#define PRINT_IP_OPT_DSO (1<<2)
+#define PRINT_IP_OPT_SYMOFFSET (1<<3)
+#define PRINT_IP_OPT_ONELINE (1<<4)
+
struct perf_tool;
struct perf_session *perf_session__new(const char *filename, int mode,
@@ -57,6 +62,11 @@ int __perf_session__process_events(struct perf_session *self,
int perf_session__process_events(struct perf_session *self,
struct perf_tool *tool);
+int perf_session_queue_event(struct perf_session *s, union perf_event *event,
+ struct perf_sample *sample, u64 file_offset);
+
+void perf_tool__fill_defaults(struct perf_tool *tool);
+
int perf_session__resolve_callchain(struct perf_session *self, struct perf_evsel *evsel,
struct thread *thread,
struct ip_callchain *chain,
@@ -99,7 +109,7 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session,
void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event,
struct perf_sample *sample, struct machine *machine,
- int print_sym, int print_dso, int print_symoffset);
+ unsigned int print_opts, unsigned int stack_depth);
int perf_session__cpu_bitmap(struct perf_session *session,
const char *cpu_list, unsigned long *cpu_bitmap);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 313a5a73011..5f118a08951 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -7,6 +7,8 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char default_sort_order[] = "comm,dso,symbol";
const char *sort_order = default_sort_order;
+regex_t ignore_callees_regex;
+int have_ignore_callees = 0;
int sort__need_collapse = 0;
int sort__has_parent = 0;
int sort__has_sym = 0;
@@ -55,14 +57,14 @@ static int64_t cmp_null(void *l, void *r)
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->pid - left->thread->pid;
+ return right->thread->tid - left->thread->tid;
}
static int hist_entry__thread_snprintf(struct hist_entry *self, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
- self->thread->comm ?: "", self->thread->pid);
+ self->thread->comm ?: "", self->thread->tid);
}
struct sort_entry sort_thread = {
@@ -77,7 +79,7 @@ struct sort_entry sort_thread = {
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
- return right->thread->pid - left->thread->pid;
+ return right->thread->tid - left->thread->tid;
}
static int64_t
@@ -872,6 +874,8 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_SRCLINE, "srcline", sort_srcline),
+ DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
+ DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
};
#undef DIM
@@ -891,8 +895,6 @@ static struct sort_dimension bstack_sort_dimensions[] = {
#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
static struct sort_dimension memory_sort_dimensions[] = {
- DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
- DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 45ac84c1e03..4e80dbd271e 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -29,6 +29,8 @@ extern const char *sort_order;
extern const char default_parent_pattern[];
extern const char *parent_pattern;
extern const char default_sort_order[];
+extern regex_t ignore_callees_regex;
+extern int have_ignore_callees;
extern int sort__need_collapse;
extern int sort__has_parent;
extern int sort__has_sym;
@@ -87,6 +89,9 @@ struct hist_entry {
struct hist_entry_diff diff;
+ /* We are added by hists__add_dummy_entry. */
+ bool dummy;
+
/* XXX These two should move to some tree widget lib */
u16 row_offset;
u16 nr_rows;
@@ -138,6 +143,8 @@ enum sort_type {
SORT_PARENT,
SORT_CPU,
SORT_SRCLINE,
+ SORT_LOCAL_WEIGHT,
+ SORT_GLOBAL_WEIGHT,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
@@ -149,9 +156,7 @@ enum sort_type {
/* memory mode specific sort keys */
__SORT_MEMORY_MODE,
- SORT_LOCAL_WEIGHT = __SORT_MEMORY_MODE,
- SORT_GLOBAL_WEIGHT,
- SORT_MEM_DADDR_SYMBOL,
+ SORT_MEM_DADDR_SYMBOL = __SORT_MEMORY_MODE,
SORT_MEM_DADDR_DSO,
SORT_MEM_LOCKED,
SORT_MEM_TLB,
@@ -183,4 +188,6 @@ int setup_sorting(void);
extern int sort_dimension__add(const char *);
void sort__setup_elide(FILE *fp);
+int report_parse_ignore_callees_opt(const struct option *opt, const char *arg, int unset);
+
#endif /* __PERF_SORT_H */
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 7c59c28afcc..6506b3dfb60 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -10,6 +10,12 @@ void update_stats(struct stats *stats, u64 val)
delta = val - stats->mean;
stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean);
+
+ if (val > stats->max)
+ stats->max = val;
+
+ if (val < stats->min)
+ stats->min = val;
}
double avg_stats(struct stats *stats)
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index 588367c3c76..ae8ccd7227c 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -6,6 +6,7 @@
struct stats
{
double n, mean, M2;
+ u64 max, min;
};
void update_stats(struct stats *stats, u64 val);
@@ -13,4 +14,12 @@ double avg_stats(struct stats *stats);
double stddev_stats(struct stats *stats);
double rel_stddev_stats(double stddev, double avg);
+static inline void init_stats(struct stats *stats)
+{
+ stats->n = 0.0;
+ stats->mean = 0.0;
+ stats->M2 = 0.0;
+ stats->min = (u64) -1;
+ stats->max = 0;
+}
#endif
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index 29c7b2cb252..f0b0c008c50 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -387,3 +387,27 @@ void *memdup(const void *src, size_t len)
return p;
}
+
+/**
+ * str_append - reallocate string and append another
+ * @s: pointer to string pointer
+ * @len: pointer to len (initialized)
+ * @a: string to append.
+ */
+int str_append(char **s, int *len, const char *a)
+{
+ int olen = *s ? strlen(*s) : 0;
+ int nlen = olen + strlen(a) + 1;
+ if (*len < nlen) {
+ *len = *len * 2;
+ if (*len < nlen)
+ *len = nlen;
+ *s = realloc(*s, *len);
+ if (!*s)
+ return -ENOMEM;
+ if (olen == 0)
+ **s = 0;
+ }
+ strcat(*s, a);
+ return 0;
+}
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 4b12bf85032..a7b9ab55738 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -599,11 +599,13 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
if (dso->kernel == DSO_TYPE_USER) {
GElf_Shdr shdr;
ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL ||
elf_section_by_name(elf, &ehdr, &shdr,
".gnu.prelink_undo",
NULL) != NULL);
} else {
- ss->adjust_symbols = 0;
+ ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
+ ehdr.e_type == ET_REL;
}
ss->name = strdup(name);
@@ -624,6 +626,37 @@ out_close:
return err;
}
+/**
+ * ref_reloc_sym_not_found - has kernel relocation symbol been found.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns %true if we are dealing with the kernel maps and the
+ * relocation reference symbol has not yet been found. Otherwise %false is
+ * returned.
+ */
+static bool ref_reloc_sym_not_found(struct kmap *kmap)
+{
+ return kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
+ !kmap->ref_reloc_sym->unrelocated_addr;
+}
+
+/**
+ * ref_reloc - kernel relocation offset.
+ * @kmap: kernel maps and relocation reference symbol
+ *
+ * This function returns the offset of kernel addresses as determined by using
+ * the relocation reference symbol i.e. if the kernel has not been relocated
+ * then the return value is zero.
+ */
+static u64 ref_reloc(struct kmap *kmap)
+{
+ if (kmap && kmap->ref_reloc_sym &&
+ kmap->ref_reloc_sym->unrelocated_addr)
+ return kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
+ return 0;
+}
+
int dso__load_sym(struct dso *dso, struct map *map,
struct symsrc *syms_ss, struct symsrc *runtime_ss,
symbol_filter_t filter, int kmodule)
@@ -642,8 +675,17 @@ int dso__load_sym(struct dso *dso, struct map *map,
Elf_Scn *sec, *sec_strndx;
Elf *elf;
int nr = 0;
+ bool remap_kernel = false, adjust_kernel_syms = false;
dso->symtab_type = syms_ss->type;
+ dso->rel = syms_ss->ehdr.e_type == ET_REL;
+
+ /*
+ * Modules may already have symbols from kallsyms, but those symbols
+ * have the wrong values for the dso maps, so remove them.
+ */
+ if (kmodule && syms_ss->symtab)
+ symbols__delete(&dso->symbols[map->type]);
if (!syms_ss->symtab) {
syms_ss->symtab = syms_ss->dynsym;
@@ -681,7 +723,31 @@ int dso__load_sym(struct dso *dso, struct map *map,
nr_syms = shdr.sh_size / shdr.sh_entsize;
memset(&sym, 0, sizeof(sym));
- dso->adjust_symbols = runtime_ss->adjust_symbols;
+
+ /*
+ * The kernel relocation symbol is needed in advance in order to adjust
+ * kernel maps correctly.
+ */
+ if (ref_reloc_sym_not_found(kmap)) {
+ elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
+ const char *elf_name = elf_sym__name(&sym, symstrs);
+
+ if (strcmp(elf_name, kmap->ref_reloc_sym->name))
+ continue;
+ kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+ break;
+ }
+ }
+
+ dso->adjust_symbols = runtime_ss->adjust_symbols || ref_reloc(kmap);
+ /*
+ * Initial kernel and module mappings do not map to the dso. For
+ * function mappings, flag the fixups.
+ */
+ if (map->type == MAP__FUNCTION && (dso->kernel || kmodule)) {
+ remap_kernel = true;
+ adjust_kernel_syms = dso->adjust_symbols;
+ }
elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) {
struct symbol *f;
const char *elf_name = elf_sym__name(&sym, symstrs);
@@ -690,10 +756,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
const char *section_name;
bool used_opd = false;
- if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name &&
- strcmp(elf_name, kmap->ref_reloc_sym->name) == 0)
- kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
-
if (!is_label && !elf_sym__is_a(&sym, map->type))
continue;
@@ -745,20 +807,55 @@ int dso__load_sym(struct dso *dso, struct map *map,
(sym.st_value & 1))
--sym.st_value;
- if (dso->kernel != DSO_TYPE_USER || kmodule) {
+ if (dso->kernel || kmodule) {
char dso_name[PATH_MAX];
+ /* Adjust symbol to map to file offset */
+ if (adjust_kernel_syms)
+ sym.st_value -= shdr.sh_addr - shdr.sh_offset;
+
if (strcmp(section_name,
(curr_dso->short_name +
dso->short_name_len)) == 0)
goto new_symbol;
if (strcmp(section_name, ".text") == 0) {
+ /*
+ * The initial kernel mapping is based on
+ * kallsyms and identity maps. Overwrite it to
+ * map to the kernel dso.
+ */
+ if (remap_kernel && dso->kernel) {
+ remap_kernel = false;
+ map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ map->end = map->start + shdr.sh_size;
+ map->pgoff = shdr.sh_offset;
+ map->map_ip = map__map_ip;
+ map->unmap_ip = map__unmap_ip;
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmap->kmaps, map);
+ map_groups__insert(kmap->kmaps, map);
+ }
+
+ /*
+ * The initial module mapping is based on
+ * /proc/modules mapped to offset zero.
+ * Overwrite it to map to the module dso.
+ */
+ if (remap_kernel && kmodule) {
+ remap_kernel = false;
+ map->pgoff = shdr.sh_offset;
+ }
+
curr_map = map;
curr_dso = dso;
goto new_symbol;
}
+ if (!kmap)
+ goto new_symbol;
+
snprintf(dso_name, sizeof(dso_name),
"%s%s", dso->short_name, section_name);
@@ -781,8 +878,16 @@ int dso__load_sym(struct dso *dso, struct map *map,
dso__delete(curr_dso);
goto out_elf_end;
}
- curr_map->map_ip = identity__map_ip;
- curr_map->unmap_ip = identity__map_ip;
+ if (adjust_kernel_syms) {
+ curr_map->start = shdr.sh_addr +
+ ref_reloc(kmap);
+ curr_map->end = curr_map->start +
+ shdr.sh_size;
+ curr_map->pgoff = shdr.sh_offset;
+ } else {
+ curr_map->map_ip = identity__map_ip;
+ curr_map->unmap_ip = identity__map_ip;
+ }
curr_dso->symtab_type = dso->symtab_type;
map_groups__insert(kmap->kmaps, curr_map);
dsos__add(&dso->node, curr_dso);
@@ -846,6 +951,57 @@ out_elf_end:
return err;
}
+static int elf_read_maps(Elf *elf, bool exe, mapfn_t mapfn, void *data)
+{
+ GElf_Phdr phdr;
+ size_t i, phdrnum;
+ int err;
+ u64 sz;
+
+ if (elf_getphdrnum(elf, &phdrnum))
+ return -1;
+
+ for (i = 0; i < phdrnum; i++) {
+ if (gelf_getphdr(elf, i, &phdr) == NULL)
+ return -1;
+ if (phdr.p_type != PT_LOAD)
+ continue;
+ if (exe) {
+ if (!(phdr.p_flags & PF_X))
+ continue;
+ } else {
+ if (!(phdr.p_flags & PF_R))
+ continue;
+ }
+ sz = min(phdr.p_memsz, phdr.p_filesz);
+ if (!sz)
+ continue;
+ err = mapfn(phdr.p_vaddr, sz, phdr.p_offset, data);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit)
+{
+ int err;
+ Elf *elf;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return -1;
+
+ if (is_64_bit)
+ *is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+
+ err = elf_read_maps(elf, exe, mapfn, data);
+
+ elf_end(elf);
+ return err;
+}
+
void symbol__elf_init(void)
{
elf_version(EV_CURRENT);
diff --git a/tools/perf/util/symbol-minimal.c b/tools/perf/util/symbol-minimal.c
index a7390cde63b..3a802c300fc 100644
--- a/tools/perf/util/symbol-minimal.c
+++ b/tools/perf/util/symbol-minimal.c
@@ -301,6 +301,13 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
return 0;
}
+int file__read_maps(int fd __maybe_unused, bool exe __maybe_unused,
+ mapfn_t mapfn __maybe_unused, void *data __maybe_unused,
+ bool *is_64_bit __maybe_unused)
+{
+ return -1;
+}
+
void symbol__elf_init(void)
{
}
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index d5528e1cc03..7eb0362f4ff 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -87,6 +87,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
{
s64 a;
s64 b;
+ size_t na, nb;
/* Prefer a symbol with non zero length */
a = syma->end - syma->start;
@@ -120,11 +121,21 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
else if (a > b)
return SYMBOL_B;
- /* If all else fails, choose the symbol with the longest name */
- if (strlen(syma->name) >= strlen(symb->name))
+ /* Choose the symbol with the longest name */
+ na = strlen(syma->name);
+ nb = strlen(symb->name);
+ if (na > nb)
return SYMBOL_A;
- else
+ else if (na < nb)
+ return SYMBOL_B;
+
+ /* Avoid "SyS" kernel syscall aliases */
+ if (na >= 3 && !strncmp(syma->name, "SyS", 3))
+ return SYMBOL_B;
+ if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
return SYMBOL_B;
+
+ return SYMBOL_A;
}
void symbols__fixup_duplicate(struct rb_root *symbols)
@@ -248,7 +259,10 @@ size_t symbol__fprintf_symname_offs(const struct symbol *sym,
if (sym && sym->name) {
length = fprintf(fp, "%s", sym->name);
if (al) {
- offset = al->addr - sym->start;
+ if (al->addr < sym->end)
+ offset = al->addr - sym->start;
+ else
+ offset = al->addr - al->map->start - sym->start;
length += fprintf(fp, "+0x%lx", offset);
}
return length;
@@ -316,6 +330,16 @@ static struct symbol *symbols__find(struct rb_root *symbols, u64 ip)
return NULL;
}
+static struct symbol *symbols__first(struct rb_root *symbols)
+{
+ struct rb_node *n = rb_first(symbols);
+
+ if (n)
+ return rb_entry(n, struct symbol, rb_node);
+
+ return NULL;
+}
+
struct symbol_name_rb_node {
struct rb_node rb_node;
struct symbol sym;
@@ -386,6 +410,11 @@ struct symbol *dso__find_symbol(struct dso *dso,
return symbols__find(&dso->symbols[type], addr);
}
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+{
+ return symbols__first(&dso->symbols[type]);
+}
+
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name)
{
@@ -522,6 +551,53 @@ static int dso__load_all_kallsyms(struct dso *dso, const char *filename,
return kallsyms__parse(filename, &args, map__process_kallsym_symbol);
}
+static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
+ symbol_filter_t filter)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct map *curr_map;
+ struct symbol *pos;
+ int count = 0, moved = 0;
+ struct rb_root *root = &dso->symbols[map->type];
+ struct rb_node *next = rb_first(root);
+
+ while (next) {
+ char *module;
+
+ pos = rb_entry(next, struct symbol, rb_node);
+ next = rb_next(&pos->rb_node);
+
+ module = strchr(pos->name, '\t');
+ if (module)
+ *module = '\0';
+
+ curr_map = map_groups__find(kmaps, map->type, pos->start);
+
+ if (!curr_map || (filter && filter(curr_map, pos))) {
+ rb_erase(&pos->rb_node, root);
+ symbol__delete(pos);
+ } else {
+ pos->start -= curr_map->start - curr_map->pgoff;
+ if (pos->end)
+ pos->end -= curr_map->start - curr_map->pgoff;
+ if (curr_map != map) {
+ rb_erase(&pos->rb_node, root);
+ symbols__insert(
+ &curr_map->dso->symbols[curr_map->type],
+ pos);
+ ++moved;
+ } else {
+ ++count;
+ }
+ }
+ }
+
+ /* Symbols have been adjusted */
+ dso->adjust_symbols = 1;
+
+ return count + moved;
+}
+
/*
* Split the symbols into maps, making sure there are no overlaps, i.e. the
* kernel range is broken in several maps, named [kernel].N, as we don't have
@@ -663,6 +739,161 @@ bool symbol__restricted_filename(const char *filename,
return restricted;
}
+struct kcore_mapfn_data {
+ struct dso *dso;
+ enum map_type type;
+ struct list_head maps;
+};
+
+static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data)
+{
+ struct kcore_mapfn_data *md = data;
+ struct map *map;
+
+ map = map__new2(start, md->dso, md->type);
+ if (map == NULL)
+ return -ENOMEM;
+
+ map->end = map->start + len;
+ map->pgoff = pgoff;
+
+ list_add(&map->node, &md->maps);
+
+ return 0;
+}
+
+/*
+ * If kallsyms is referenced by name then we look for kcore in the same
+ * directory.
+ */
+static bool kcore_filename_from_kallsyms_filename(char *kcore_filename,
+ const char *kallsyms_filename)
+{
+ char *name;
+
+ strcpy(kcore_filename, kallsyms_filename);
+ name = strrchr(kcore_filename, '/');
+ if (!name)
+ return false;
+
+ if (!strcmp(name, "/kallsyms")) {
+ strcpy(name, "/kcore");
+ return true;
+ }
+
+ return false;
+}
+
+static int dso__load_kcore(struct dso *dso, struct map *map,
+ const char *kallsyms_filename)
+{
+ struct map_groups *kmaps = map__kmap(map)->kmaps;
+ struct machine *machine = kmaps->machine;
+ struct kcore_mapfn_data md;
+ struct map *old_map, *new_map, *replacement_map = NULL;
+ bool is_64_bit;
+ int err, fd;
+ char kcore_filename[PATH_MAX];
+ struct symbol *sym;
+
+ /* This function requires that the map is the kernel map */
+ if (map != machine->vmlinux_maps[map->type])
+ return -EINVAL;
+
+ if (!kcore_filename_from_kallsyms_filename(kcore_filename,
+ kallsyms_filename))
+ return -EINVAL;
+
+ md.dso = dso;
+ md.type = map->type;
+ INIT_LIST_HEAD(&md.maps);
+
+ fd = open(kcore_filename, O_RDONLY);
+ if (fd < 0)
+ return -EINVAL;
+
+ /* Read new maps into temporary lists */
+ err = file__read_maps(fd, md.type == MAP__FUNCTION, kcore_mapfn, &md,
+ &is_64_bit);
+ if (err)
+ goto out_err;
+
+ if (list_empty(&md.maps)) {
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ /* Remove old maps */
+ old_map = map_groups__first(kmaps, map->type);
+ while (old_map) {
+ struct map *next = map_groups__next(old_map);
+
+ if (old_map != map)
+ map_groups__remove(kmaps, old_map);
+ old_map = next;
+ }
+
+ /* Find the kernel map using the first symbol */
+ sym = dso__first_symbol(dso, map->type);
+ list_for_each_entry(new_map, &md.maps, node) {
+ if (sym && sym->start >= new_map->start &&
+ sym->start < new_map->end) {
+ replacement_map = new_map;
+ break;
+ }
+ }
+
+ if (!replacement_map)
+ replacement_map = list_entry(md.maps.next, struct map, node);
+
+ /* Add new maps */
+ while (!list_empty(&md.maps)) {
+ new_map = list_entry(md.maps.next, struct map, node);
+ list_del(&new_map->node);
+ if (new_map == replacement_map) {
+ map->start = new_map->start;
+ map->end = new_map->end;
+ map->pgoff = new_map->pgoff;
+ map->map_ip = new_map->map_ip;
+ map->unmap_ip = new_map->unmap_ip;
+ map__delete(new_map);
+ /* Ensure maps are correctly ordered */
+ map_groups__remove(kmaps, map);
+ map_groups__insert(kmaps, map);
+ } else {
+ map_groups__insert(kmaps, new_map);
+ }
+ }
+
+ /*
+ * Set the data type and long name so that kcore can be read via
+ * dso__data_read_addr().
+ */
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->data_type = DSO_BINARY_TYPE__GUEST_KCORE;
+ else
+ dso->data_type = DSO_BINARY_TYPE__KCORE;
+ dso__set_long_name(dso, strdup(kcore_filename));
+
+ close(fd);
+
+ if (map->type == MAP__FUNCTION)
+ pr_debug("Using %s for kernel object code\n", kcore_filename);
+ else
+ pr_debug("Using %s for kernel data\n", kcore_filename);
+
+ return 0;
+
+out_err:
+ while (!list_empty(&md.maps)) {
+ map = list_entry(md.maps.next, struct map, node);
+ list_del(&map->node);
+ map__delete(map);
+ }
+ close(fd);
+ return -EINVAL;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
@@ -680,7 +911,10 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
else
dso->symtab_type = DSO_BINARY_TYPE__KALLSYMS;
- return dso__split_kallsyms(dso, map, filter);
+ if (!dso__load_kcore(dso, map, filename))
+ return dso__split_kallsyms_for_kcore(dso, map, filter);
+ else
+ return dso__split_kallsyms(dso, map, filter);
}
static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -843,10 +1077,15 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (!runtime_ss && syms_ss)
runtime_ss = syms_ss;
- if (syms_ss)
- ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0);
- else
+ if (syms_ss) {
+ int km;
+
+ km = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
+ dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
+ ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, km);
+ } else {
ret = -1;
+ }
if (ret > 0) {
int nr_plt;
@@ -888,8 +1127,11 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
char symfs_vmlinux[PATH_MAX];
enum dso_binary_type symtab_type;
- snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
- symbol_conf.symfs, vmlinux);
+ if (vmlinux[0] == '/')
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s", vmlinux);
+ else
+ snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
+ symbol_conf.symfs, vmlinux);
if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
@@ -903,6 +1145,10 @@ int dso__load_vmlinux(struct dso *dso, struct map *map,
symsrc__destroy(&ss);
if (err > 0) {
+ if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+ dso->data_type = DSO_BINARY_TYPE__GUEST_VMLINUX;
+ else
+ dso->data_type = DSO_BINARY_TYPE__VMLINUX;
dso__set_long_name(dso, (char *)vmlinux);
dso__set_loaded(dso, map->type);
pr_debug("Using %s for symbols\n", symfs_vmlinux);
@@ -975,7 +1221,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
dso__set_long_name(dso,
strdup(symbol_conf.vmlinux_name));
dso->lname_alloc = 1;
- goto out_fixup;
+ return err;
}
return err;
}
@@ -983,7 +1229,7 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map,
if (vmlinux_path != NULL) {
err = dso__load_vmlinux_path(dso, map, filter);
if (err > 0)
- goto out_fixup;
+ return err;
}
/* do not try local files if a symfs was given */
@@ -1042,9 +1288,8 @@ do_kallsyms:
pr_debug("Using %s for symbols\n", kallsyms_filename);
free(kallsyms_allocated_filename);
- if (err > 0) {
+ if (err > 0 && !dso__is_kcore(dso)) {
dso__set_long_name(dso, strdup("[kernel.kallsyms]"));
-out_fixup:
map__fixup_start(map);
map__fixup_end(map);
}
@@ -1075,7 +1320,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
if (symbol_conf.default_guest_vmlinux_name != NULL) {
err = dso__load_vmlinux(dso, map,
symbol_conf.default_guest_vmlinux_name, filter);
- goto out_try_fixup;
+ return err;
}
kallsyms_filename = symbol_conf.default_guest_kallsyms;
@@ -1089,13 +1334,9 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
err = dso__load_kallsyms(dso, kallsyms_filename, map, filter);
if (err > 0)
pr_debug("Using %s for symbols\n", kallsyms_filename);
-
-out_try_fixup:
- if (err > 0) {
- if (kallsyms_filename != NULL) {
- machine__mmap_name(machine, path, sizeof(path));
- dso__set_long_name(dso, strdup(path));
- }
+ if (err > 0 && !dso__is_kcore(dso)) {
+ machine__mmap_name(machine, path, sizeof(path));
+ dso__set_long_name(dso, strdup(path));
map__fixup_start(map);
map__fixup_end(map);
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 5f720dc076d..fd5b70ea298 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -215,6 +215,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
int filename__read_build_id(const char *filename, void *bf, size_t size);
int sysfs__read_build_id(const char *filename, void *bf, size_t size);
@@ -247,4 +248,8 @@ void symbols__fixup_duplicate(struct rb_root *symbols);
void symbols__fixup_end(struct rb_root *symbols);
void __map_groups__fixup_end(struct map_groups *mg, enum map_type type);
+typedef int (*mapfn_t)(u64 start, u64 len, u64 pgoff, void *data);
+int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
+ bool *is_64_bit);
+
#endif /* __PERF_SYMBOL */
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 40399cbcca7..e3d4a550a70 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -7,17 +7,18 @@
#include "util.h"
#include "debug.h"
-struct thread *thread__new(pid_t pid)
+struct thread *thread__new(pid_t pid, pid_t tid)
{
struct thread *self = zalloc(sizeof(*self));
if (self != NULL) {
map_groups__init(&self->mg);
- self->pid = pid;
+ self->pid_ = pid;
+ self->tid = tid;
self->ppid = -1;
self->comm = malloc(32);
if (self->comm)
- snprintf(self->comm, 32, ":%d", self->pid);
+ snprintf(self->comm, 32, ":%d", self->tid);
}
return self;
@@ -57,7 +58,7 @@ int thread__comm_len(struct thread *self)
size_t thread__fprintf(struct thread *thread, FILE *fp)
{
- return fprintf(fp, "Thread %d %s\n", thread->pid, thread->comm) +
+ return fprintf(fp, "Thread %d %s\n", thread->tid, thread->comm) +
map_groups__fprintf(&thread->mg, verbose, fp);
}
@@ -84,7 +85,7 @@ int thread__fork(struct thread *self, struct thread *parent)
if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
return -ENOMEM;
- self->ppid = parent->pid;
+ self->ppid = parent->tid;
return 0;
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index eeb7ac62b9e..4ebbb40d46d 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -12,10 +12,12 @@ struct thread {
struct list_head node;
};
struct map_groups mg;
- pid_t pid;
+ pid_t pid_; /* Not all tools update this */
+ pid_t tid;
pid_t ppid;
char shortname[3];
bool comm_set;
+ bool dead; /* if set thread has exited */
char *comm;
int comm_len;
@@ -24,8 +26,12 @@ struct thread {
struct machine;
-struct thread *thread__new(pid_t pid);
+struct thread *thread__new(pid_t pid, pid_t tid);
void thread__delete(struct thread *self);
+static inline void thread__exited(struct thread *thread)
+{
+ thread->dead = true;
+}
int thread__set_comm(struct thread *self, const char *comm);
int thread__comm_len(struct thread *self);
@@ -45,6 +51,15 @@ void thread__find_addr_map(struct thread *thread, struct machine *machine,
void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
- struct addr_location *al,
- symbol_filter_t filter);
+ struct addr_location *al);
+
+static inline void *thread__priv(struct thread *thread)
+{
+ return thread->priv;
+}
+
+static inline void thread__set_priv(struct thread *thread, void *p)
+{
+ thread->priv = p;
+}
#endif /* __PERF_THREAD_H */
diff --git a/tools/perf/util/tool.h b/tools/perf/util/tool.h
index b0e1aadba8d..62b16b6165b 100644
--- a/tools/perf/util/tool.h
+++ b/tools/perf/util/tool.h
@@ -18,12 +18,9 @@ typedef int (*event_sample)(struct perf_tool *tool, union perf_event *event,
typedef int (*event_op)(struct perf_tool *tool, union perf_event *event,
struct perf_sample *sample, struct machine *machine);
-typedef int (*event_attr_op)(union perf_event *event,
+typedef int (*event_attr_op)(struct perf_tool *tool,
+ union perf_event *event,
struct perf_evlist **pevlist);
-typedef int (*event_simple_op)(struct perf_tool *tool, union perf_event *event);
-
-typedef int (*event_synth_op)(union perf_event *event,
- struct perf_session *session);
typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
struct perf_session *session);
@@ -39,8 +36,7 @@ struct perf_tool {
throttle,
unthrottle;
event_attr_op attr;
- event_synth_op tracing_data;
- event_simple_op event_type;
+ event_op2 tracing_data;
event_op2 finished_round,
build_id;
bool ordered_samples;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index df46be93d90..b554ffc462b 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -39,6 +39,8 @@ struct perf_top {
float min_percent;
};
+#define CONSOLE_CLEAR ""
+
size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size);
void perf_top__reset_sample_counters(struct perf_top *top);
#endif /* __PERF_TOP_H */
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 3917eb9a847..f3c9e551bd3 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -46,65 +46,6 @@
static int output_fd;
-static const char *find_debugfs(void)
-{
- const char *path = perf_debugfs_mount(NULL);
-
- if (!path)
- pr_debug("Your kernel does not support the debugfs filesystem");
-
- return path;
-}
-
-/*
- * Finds the path to the debugfs/tracing
- * Allocates the string and stores it.
- */
-static const char *find_tracing_dir(void)
-{
- static char *tracing;
- static int tracing_found;
- const char *debugfs;
-
- if (tracing_found)
- return tracing;
-
- debugfs = find_debugfs();
- if (!debugfs)
- return NULL;
-
- tracing = malloc(strlen(debugfs) + 9);
- if (!tracing)
- return NULL;
-
- sprintf(tracing, "%s/tracing", debugfs);
-
- tracing_found = 1;
- return tracing;
-}
-
-static char *get_tracing_file(const char *name)
-{
- const char *tracing;
- char *file;
-
- tracing = find_tracing_dir();
- if (!tracing)
- return NULL;
-
- file = malloc(strlen(tracing) + strlen(name) + 2);
- if (!file)
- return NULL;
-
- sprintf(file, "%s/%s", tracing, name);
- return file;
-}
-
-static void put_tracing_file(char *file)
-{
- free(file);
-}
-
int bigendian(void)
{
unsigned char str[] = { 0x1, 0x2, 0x3, 0x4, 0x0, 0x0, 0x0, 0x0};
@@ -160,7 +101,7 @@ out:
return err;
}
-static int read_header_files(void)
+static int record_header_files(void)
{
char *path;
struct stat st;
@@ -299,7 +240,7 @@ out:
return err;
}
-static int read_ftrace_files(struct tracepoint_path *tps)
+static int record_ftrace_files(struct tracepoint_path *tps)
{
char *path;
int ret;
@@ -328,7 +269,7 @@ static bool system_in_tp_list(char *sys, struct tracepoint_path *tps)
return false;
}
-static int read_event_files(struct tracepoint_path *tps)
+static int record_event_files(struct tracepoint_path *tps)
{
struct dirent *dent;
struct stat st;
@@ -403,7 +344,7 @@ out:
return err;
}
-static int read_proc_kallsyms(void)
+static int record_proc_kallsyms(void)
{
unsigned int size;
const char *path = "/proc/kallsyms";
@@ -421,7 +362,7 @@ static int read_proc_kallsyms(void)
return record_file(path, 4);
}
-static int read_ftrace_printk(void)
+static int record_ftrace_printk(void)
{
unsigned int size;
char *path;
@@ -473,12 +414,27 @@ get_tracepoints_path(struct list_head *pattrs)
if (pos->attr.type != PERF_TYPE_TRACEPOINT)
continue;
++nr_tracepoints;
+
+ if (pos->name) {
+ ppath->next = tracepoint_name_to_path(pos->name);
+ if (ppath->next)
+ goto next;
+
+ if (strchr(pos->name, ':') == NULL)
+ goto try_id;
+
+ goto error;
+ }
+
+try_id:
ppath->next = tracepoint_id_to_path(pos->attr.config);
if (!ppath->next) {
+error:
pr_debug("No memory to alloc tracepoints list\n");
put_tracepoints_path(&path);
return NULL;
}
+next:
ppath = ppath->next;
}
@@ -520,8 +476,6 @@ static int tracing_data_header(void)
else
buf[0] = 0;
- read_trace_init(buf[0], buf[0]);
-
if (write(output_fd, buf, 1) != 1)
return -1;
@@ -583,19 +537,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs,
err = tracing_data_header();
if (err)
goto out;
- err = read_header_files();
+ err = record_header_files();
if (err)
goto out;
- err = read_ftrace_files(tps);
+ err = record_ftrace_files(tps);
if (err)
goto out;
- err = read_event_files(tps);
+ err = record_event_files(tps);
if (err)
goto out;
- err = read_proc_kallsyms();
+ err = record_proc_kallsyms();
if (err)
goto out;
- err = read_ftrace_printk();
+ err = record_ftrace_printk();
out:
/*
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 4454835a9eb..fe7a27d67d2 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -28,12 +28,6 @@
#include "util.h"
#include "trace-event.h"
-int header_page_size_size;
-int header_page_ts_size;
-int header_page_data_offset;
-
-bool latency_format;
-
struct pevent *read_trace_init(int file_bigendian, int host_bigendian)
{
struct pevent *pevent = pevent_alloc();
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index af215c0d237..f2112270c66 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -39,10 +39,6 @@
static int input_fd;
-int file_bigendian;
-int host_bigendian;
-static int long_size;
-
static ssize_t trace_data_size;
static bool repipe;
@@ -216,7 +212,7 @@ static int read_ftrace_printk(struct pevent *pevent)
static int read_header_files(struct pevent *pevent)
{
unsigned long long size;
- char *header_event;
+ char *header_page;
char buf[BUFSIZ];
int ret = 0;
@@ -229,13 +225,26 @@ static int read_header_files(struct pevent *pevent)
}
size = read8(pevent);
- skip(size);
- /*
- * The size field in the page is of type long,
- * use that instead, since it represents the kernel.
- */
- long_size = header_page_size_size;
+ header_page = malloc(size);
+ if (header_page == NULL)
+ return -1;
+
+ if (do_read(header_page, size) < 0) {
+ pr_debug("did not read header page");
+ free(header_page);
+ return -1;
+ }
+
+ if (!pevent_parse_header_page(pevent, header_page, size,
+ pevent_get_long_size(pevent))) {
+ /*
+ * The commit field in the page is of type long,
+ * use that instead, since it represents the kernel.
+ */
+ pevent_set_long_size(pevent, pevent->header_page_size_size);
+ }
+ free(header_page);
if (do_read(buf, 13) < 0)
return -1;
@@ -246,14 +255,8 @@ static int read_header_files(struct pevent *pevent)
}
size = read8(pevent);
- header_event = malloc(size);
- if (header_event == NULL)
- return -1;
-
- if (do_read(header_event, size) < 0)
- ret = -1;
+ skip(size);
- free(header_event);
return ret;
}
@@ -349,6 +352,10 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
int show_funcs = 0;
int show_printk = 0;
ssize_t size = -1;
+ int file_bigendian;
+ int host_bigendian;
+ int file_long_size;
+ int file_page_size;
struct pevent *pevent;
int err;
@@ -391,12 +398,15 @@ ssize_t trace_report(int fd, struct pevent **ppevent, bool __repipe)
if (do_read(buf, 1) < 0)
goto out;
- long_size = buf[0];
+ file_long_size = buf[0];
- page_size = read4(pevent);
- if (!page_size)
+ file_page_size = read4(pevent);
+ if (!file_page_size)
goto out;
+ pevent_set_long_size(pevent, file_long_size);
+ pevent_set_page_size(pevent, file_page_size);
+
err = read_header_files(pevent);
if (err)
goto out;
diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c
index 8715a1006d0..95199e4eea9 100644
--- a/tools/perf/util/trace-event-scripting.c
+++ b/tools/perf/util/trace-event-scripting.c
@@ -39,7 +39,8 @@ static void process_event_unsupported(union perf_event *event __maybe_unused,
struct perf_sample *sample __maybe_unused,
struct perf_evsel *evsel __maybe_unused,
struct machine *machine __maybe_unused,
- struct addr_location *al __maybe_unused)
+ struct thread *thread __maybe_unused,
+ struct addr_location *al __maybe_unused)
{
}
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 1978c398ad8..fafe1a40444 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -1,32 +1,18 @@
#ifndef _PERF_UTIL_TRACE_EVENT_H
#define _PERF_UTIL_TRACE_EVENT_H
+#include <traceevent/event-parse.h>
#include "parse-events.h"
-#include "event-parse.h"
#include "session.h"
struct machine;
struct perf_sample;
union perf_event;
struct perf_tool;
+struct thread;
-extern int header_page_size_size;
-extern int header_page_ts_size;
-extern int header_page_data_offset;
-
-extern bool latency_format;
extern struct pevent *perf_pevent;
-enum {
- RINGBUF_TYPE_PADDING = 29,
- RINGBUF_TYPE_TIME_EXTEND = 30,
- RINGBUF_TYPE_TIME_STAMP = 31,
-};
-
-#ifndef TS_SHIFT
-#define TS_SHIFT 27
-#endif
-
int bigendian(void);
struct pevent *read_trace_init(int file_bigendian, int host_bigendian);
@@ -83,7 +69,8 @@ struct scripting_ops {
struct perf_sample *sample,
struct perf_evsel *evsel,
struct machine *machine,
- struct addr_location *al);
+ struct thread *thread,
+ struct addr_location *al);
int (*generate_script) (struct pevent *pevent, const char *outfile);
};
diff --git a/tools/perf/util/unwind.c b/tools/perf/util/unwind.c
index 958723ba3d2..2f891f7e70b 100644
--- a/tools/perf/util/unwind.c
+++ b/tools/perf/util/unwind.c
@@ -473,7 +473,7 @@ static int entry(u64 ip, struct thread *thread, struct machine *machine,
thread__find_addr_location(thread, machine,
PERF_RECORD_MISC_USER,
- MAP__FUNCTION, ip, &al, NULL);
+ MAP__FUNCTION, ip, &al);
e.ip = ip;
e.map = al.map;
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 59d868add27..6d17b18e915 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -269,3 +269,95 @@ void perf_debugfs_set_path(const char *mntpt)
snprintf(debugfs_mountpoint, strlen(debugfs_mountpoint), "%s", mntpt);
set_tracing_events_path(mntpt);
}
+
+static const char *find_debugfs(void)
+{
+ const char *path = perf_debugfs_mount(NULL);
+
+ if (!path)
+ fprintf(stderr, "Your kernel does not support the debugfs filesystem");
+
+ return path;
+}
+
+/*
+ * Finds the path to the debugfs/tracing
+ * Allocates the string and stores it.
+ */
+const char *find_tracing_dir(void)
+{
+ static char *tracing;
+ static int tracing_found;
+ const char *debugfs;
+
+ if (tracing_found)
+ return tracing;
+
+ debugfs = find_debugfs();
+ if (!debugfs)
+ return NULL;
+
+ tracing = malloc(strlen(debugfs) + 9);
+ if (!tracing)
+ return NULL;
+
+ sprintf(tracing, "%s/tracing", debugfs);
+
+ tracing_found = 1;
+ return tracing;
+}
+
+char *get_tracing_file(const char *name)
+{
+ const char *tracing;
+ char *file;
+
+ tracing = find_tracing_dir();
+ if (!tracing)
+ return NULL;
+
+ file = malloc(strlen(tracing) + strlen(name) + 2);
+ if (!file)
+ return NULL;
+
+ sprintf(file, "%s/%s", tracing, name);
+ return file;
+}
+
+void put_tracing_file(char *file)
+{
+ free(file);
+}
+
+int parse_nsec_time(const char *str, u64 *ptime)
+{
+ u64 time_sec, time_nsec;
+ char *end;
+
+ time_sec = strtoul(str, &end, 10);
+ if (*end != '.' && *end != '\0')
+ return -1;
+
+ if (*end == '.') {
+ int i;
+ char nsec_buf[10];
+
+ if (strlen(++end) > 9)
+ return -1;
+
+ strncpy(nsec_buf, end, 9);
+ nsec_buf[9] = '\0';
+
+ /* make it nsec precision */
+ for (i = strlen(nsec_buf); i < 9; i++)
+ nsec_buf[i] = '0';
+
+ time_nsec = strtoul(nsec_buf, &end, 10);
+ if (*end != '\0')
+ return -1;
+ } else
+ time_nsec = 0;
+
+ *ptime = time_sec * NSEC_PER_SEC + time_nsec;
+ return 0;
+}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 2732fad0390..a5353594904 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -80,6 +80,9 @@ extern char buildid_dir[];
extern char tracing_events_path[];
extern void perf_debugfs_set_path(const char *mountpoint);
const char *perf_debugfs_mount(const char *mountpoint);
+const char *find_tracing_dir(void);
+char *get_tracing_file(const char *name);
+void put_tracing_file(char *file);
/* On most systems <limits.h> would have given us this, but
* not on some systems (e.g. GNU/Hurd).
@@ -205,6 +208,8 @@ static inline int has_extension(const char *filename, const char *ext)
#define NSEC_PER_MSEC 1000000L
#endif
+int parse_nsec_time(const char *str, u64 *ptime);
+
extern unsigned char sane_ctype[256];
#define GIT_SPACE 0x01
#define GIT_DIGIT 0x02
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 17c5ac7d10e..685fc72fc75 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -149,7 +149,7 @@ static u32 *vgic_bytemap_get_reg(struct vgic_bytemap *x, int cpuid, u32 offset)
{
offset >>= 2;
BUG_ON(offset > (VGIC_NR_IRQS / 4));
- if (offset < 4)
+ if (offset < 8)
return x->percpu[cpuid] + offset;
else
return x->shared + offset - 8;
@@ -432,19 +432,13 @@ static bool handle_mmio_priority_reg(struct kvm_vcpu *vcpu,
static u32 vgic_get_target_reg(struct kvm *kvm, int irq)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- struct kvm_vcpu *vcpu;
- int i, c;
- unsigned long *bmap;
+ int i;
u32 val = 0;
irq -= VGIC_NR_PRIVATE_IRQS;
- kvm_for_each_vcpu(c, vcpu, kvm) {
- bmap = vgic_bitmap_get_shared_map(&dist->irq_spi_target[c]);
- for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
- if (test_bit(irq + i, bmap))
- val |= 1 << (c + i * 8);
- }
+ for (i = 0; i < GICD_IRQS_PER_ITARGETSR; i++)
+ val |= 1 << (dist->irq_spi_cpu[irq + i] + i * 8);
return val;
}
@@ -547,8 +541,12 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
struct kvm_exit_mmio *mmio, phys_addr_t offset)
{
u32 val;
- u32 *reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
- vcpu->vcpu_id, offset >> 1);
+ u32 *reg;
+
+ offset >>= 1;
+ reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
+ vcpu->vcpu_id, offset);
+
if (offset & 2)
val = *reg >> 16;
else
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 1550637d1b1..abe4d6043b3 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -291,7 +291,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
{
struct kvm_irq_routing_table *irq_rt;
struct _irqfd *irqfd, *tmp;
- struct file *file = NULL;
+ struct fd f;
struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
int ret;
unsigned int events;
@@ -306,13 +306,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
INIT_WORK(&irqfd->inject, irqfd_inject);
INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
- file = eventfd_fget(args->fd);
- if (IS_ERR(file)) {
- ret = PTR_ERR(file);
- goto fail;
+ f = fdget(args->fd);
+ if (!f.file) {
+ ret = -EBADF;
+ goto out;
}
- eventfd = eventfd_ctx_fileget(file);
+ eventfd = eventfd_ctx_fileget(f.file);
if (IS_ERR(eventfd)) {
ret = PTR_ERR(eventfd);
goto fail;
@@ -391,7 +391,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
lockdep_is_held(&kvm->irqfds.lock));
irqfd_update(kvm, irqfd, irq_rt);
- events = file->f_op->poll(file, &irqfd->pt);
+ events = f.file->f_op->poll(f.file, &irqfd->pt);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -408,7 +408,7 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
* do not drop the file until the irqfd is fully initialized, otherwise
* we might race against the POLLHUP
*/
- fput(file);
+ fdput(f);
return 0;
@@ -422,9 +422,9 @@ fail:
if (eventfd && !IS_ERR(eventfd))
eventfd_ctx_put(eventfd);
- if (!IS_ERR(file))
- fput(file);
+ fdput(f);
+out:
kfree(irqfd);
return ret;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1580dd4ace4..bf040c4e02b 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -102,28 +102,8 @@ static bool largepages_enabled = true;
bool kvm_is_mmio_pfn(pfn_t pfn)
{
- if (pfn_valid(pfn)) {
- int reserved;
- struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
- reserved = PageReserved(head);
- if (head != tail) {
- /*
- * "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
- * but the hugepage may have been splitted
- * from under us (and we may not hold a
- * reference count on the head page so it can
- * be reused before we run PageReferenced), so
- * we've to check PageTail before returning
- * what we just read.
- */
- smp_rmb();
- if (PageTail(tail))
- return reserved;
- }
- return PageReserved(tail);
- }
+ if (pfn_valid(pfn))
+ return PageReserved(pfn_to_page(pfn));
return true;
}
@@ -731,7 +711,10 @@ static struct kvm_memslots *install_new_memslots(struct kvm *kvm,
update_memslots(slots, new, kvm->memslots->generation);
rcu_assign_pointer(kvm->memslots, slots);
synchronize_srcu_expedited(&kvm->srcu);
- return old_memslots;
+
+ kvm_arch_memslots_updated(kvm);
+
+ return old_memslots;
}
/*
@@ -1893,7 +1876,7 @@ static struct file_operations kvm_vcpu_fops = {
*/
static int create_vcpu_fd(struct kvm_vcpu *vcpu)
{
- return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR);
+ return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
}
/*
@@ -2302,7 +2285,7 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
return ret;
}
- ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR);
+ ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
ops->destroy(dev);
return ret;
@@ -2586,7 +2569,7 @@ static int kvm_dev_ioctl_create_vm(unsigned long type)
return r;
}
#endif
- r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
+ r = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR | O_CLOEXEC);
if (r < 0)
kvm_put_kvm(kvm);
@@ -2812,11 +2795,9 @@ static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
kfree(bus);
}
-static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
+static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
+ const struct kvm_io_range *r2)
{
- const struct kvm_io_range *r1 = p1;
- const struct kvm_io_range *r2 = p2;
-
if (r1->addr < r2->addr)
return -1;
if (r1->addr + r1->len > r2->addr + r2->len)
@@ -2824,6 +2805,11 @@ static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
return 0;
}
+static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
+{
+ return kvm_io_bus_cmp(p1, p2);
+}
+
static int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
gpa_t addr, int len)
{
@@ -2857,17 +2843,54 @@ static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
off = range - bus->range;
- while (off > 0 && kvm_io_bus_sort_cmp(&key, &bus->range[off-1]) == 0)
+ while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
off--;
return off;
}
+static int __kvm_io_bus_write(struct kvm_io_bus *bus,
+ struct kvm_io_range *range, const void *val)
+{
+ int idx;
+
+ idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
+ if (idx < 0)
+ return -EOPNOTSUPP;
+
+ while (idx < bus->dev_count &&
+ kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_write(bus->range[idx].dev, range->addr,
+ range->len, val))
+ return idx;
+ idx++;
+ }
+
+ return -EOPNOTSUPP;
+}
+
/* kvm_io_bus_write - called under kvm->slots_lock */
int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, const void *val)
{
- int idx;
+ struct kvm_io_bus *bus;
+ struct kvm_io_range range;
+ int r;
+
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
+
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+ r = __kvm_io_bus_write(bus, &range, val);
+ return r < 0 ? r : 0;
+}
+
+/* kvm_io_bus_write_cookie - called under kvm->slots_lock */
+int kvm_io_bus_write_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, const void *val, long cookie)
+{
struct kvm_io_bus *bus;
struct kvm_io_range range;
@@ -2877,14 +2900,35 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
};
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- idx = kvm_io_bus_get_first_dev(bus, addr, len);
+
+ /* First try the device referenced by cookie. */
+ if ((cookie >= 0) && (cookie < bus->dev_count) &&
+ (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
+ if (!kvm_iodevice_write(bus->range[cookie].dev, addr, len,
+ val))
+ return cookie;
+
+ /*
+ * cookie contained garbage; fall back to search and return the
+ * correct cookie value.
+ */
+ return __kvm_io_bus_write(bus, &range, val);
+}
+
+static int __kvm_io_bus_read(struct kvm_io_bus *bus, struct kvm_io_range *range,
+ void *val)
+{
+ int idx;
+
+ idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
if (idx < 0)
return -EOPNOTSUPP;
while (idx < bus->dev_count &&
- kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
- if (!kvm_iodevice_write(bus->range[idx].dev, addr, len, val))
- return 0;
+ kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
+ if (!kvm_iodevice_read(bus->range[idx].dev, range->addr,
+ range->len, val))
+ return idx;
idx++;
}
@@ -2895,9 +2939,9 @@ int kvm_io_bus_write(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, void *val)
{
- int idx;
struct kvm_io_bus *bus;
struct kvm_io_range range;
+ int r;
range = (struct kvm_io_range) {
.addr = addr,
@@ -2905,18 +2949,36 @@ int kvm_io_bus_read(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
};
bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
- idx = kvm_io_bus_get_first_dev(bus, addr, len);
- if (idx < 0)
- return -EOPNOTSUPP;
+ r = __kvm_io_bus_read(bus, &range, val);
+ return r < 0 ? r : 0;
+}
- while (idx < bus->dev_count &&
- kvm_io_bus_sort_cmp(&range, &bus->range[idx]) == 0) {
- if (!kvm_iodevice_read(bus->range[idx].dev, addr, len, val))
- return 0;
- idx++;
- }
+/* kvm_io_bus_read_cookie - called under kvm->slots_lock */
+int kvm_io_bus_read_cookie(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
+ int len, void *val, long cookie)
+{
+ struct kvm_io_bus *bus;
+ struct kvm_io_range range;
- return -EOPNOTSUPP;
+ range = (struct kvm_io_range) {
+ .addr = addr,
+ .len = len,
+ };
+
+ bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+
+ /* First try the device referenced by cookie. */
+ if ((cookie >= 0) && (cookie < bus->dev_count) &&
+ (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
+ if (!kvm_iodevice_read(bus->range[cookie].dev, addr, len,
+ val))
+ return cookie;
+
+ /*
+ * cookie contained garbage; fall back to search and return the
+ * correct cookie value.
+ */
+ return __kvm_io_bus_read(bus, &range, val);
}
/* Caller must hold slots_lock. */